From 9984e2d562437bd5c6e1dc54254f33f4a5699952 Mon Sep 17 00:00:00 2001 From: Max Berrendorf Date: Fri, 2 Oct 2020 15:57:23 +0200 Subject: [PATCH] Add epoch runtime estimates for all HPO trials --- ablation/performance_table.py | 91 + ablation/runtime/experiments.tsv | 1049 + ablation/runtime/measurement.tsv | 58901 +++ ablation/runtime/params.tsv | 788728 ++++++++++++++++++++++++++++ 4 files changed, 848769 insertions(+) create mode 100644 ablation/performance_table.py create mode 100644 ablation/runtime/experiments.tsv create mode 100644 ablation/runtime/measurement.tsv create mode 100644 ablation/runtime/params.tsv diff --git a/ablation/performance_table.py b/ablation/performance_table.py new file mode 100644 index 000000000..a972d3351 --- /dev/null +++ b/ablation/performance_table.py @@ -0,0 +1,91 @@ +"""Generate a table of runtime per epoch for each trial of HPO.""" +import os +import pathlib + +import pandas +from tqdm import tqdm + + +def main(): + results_directory = pathlib.Path('.', 'results') + output_directory = pathlib.Path('.', 'runtime') + output_directory.mkdir(exist_ok=True, parents=True) + directories = [ + pathlib.Path(directory) + for directory, _, filenames in os.walk(results_directory) + if 'study.json' in filenames + ] + + directories = directories + + all_measurements = [] + all_params = [] + experiments = sorted(directories) + progress = tqdm(experiments) + for experiment_id, directory in enumerate(progress): + progress.set_postfix(dict(part=directory.parts[1:3])) + optuna_results_db = directory / "optuna_results.db" + + connection = f'sqlite:///{str(optuna_results_db)}' + user_attributes_df = pandas.read_sql_table( + table_name='trial_user_attributes', + con=connection, + ) + stop_data = user_attributes_df.loc[ + user_attributes_df['key'] == 'stopped_epoch' + ].groupby( + by='trial_id' + ).agg(dict( + value_json='first', + )).reset_index().rename( + columns=dict( + value_json='stopped_epoch' + ), + ) + stop_data['stopped_epoch'] = pandas.to_numeric(stop_data['stopped_epoch']) + + trials_df = pandas.read_sql_table( + table_name='trials', + con=connection, + ) + trials_df = trials_df[trials_df["state"] == "COMPLETE"][["trial_id", "datetime_start", "datetime_complete"]] + trials_df["trial_time"] = trials_df["datetime_complete"] - trials_df["datetime_start"] + trials_df = trials_df[["trial_id", "trial_time"]] + measurement_df = pandas.merge(trials_df, stop_data, on="trial_id") + measurement_df["epoch_time"] = measurement_df["trial_time"] / measurement_df["stopped_epoch"] + measurement_df["experiment_id"] = experiment_id + all_measurements.append(measurement_df[["trial_id", "experiment_id", "epoch_time"]].set_index(["experiment_id", "trial_id"])) + + param_df = pandas.read_sql_table( + table_name='trial_params', + con=connection, + ) + + study_df = pandas.read_sql_table( + table_name='study_user_attributes', + con=connection, + ) + study_df = study_df[~study_df["key"].isin(["pykeen_version", "pykeen_git_hash", "metric"])][["key", "value_json"]].rename(columns=dict(key="param_name", value_json="param_value")) + cat = [param_df[["trial_id", "param_name", "param_value"]]] + for trial_id in param_df["trial_id"].unique(): + common_param_df = study_df.copy() + common_param_df["trial_id"] = trial_id + cat.append(common_param_df) + param_df = pandas.concat(cat) + param_df["experiment_id"] = experiment_id + + all_params.append(param_df.set_index(["experiment_id", "trial_id", "param_name"])) + + measurement = pandas.concat(all_measurements).reset_index() + params = pandas.concat(all_params).reset_index() + experiments = pandas.DataFrame(data=dict(output_directory=experiments)) + experiments.index.name = 'experiment_id' + experiments = experiments.reset_index() + + measurement.to_csv(output_directory / 'measurement.tsv', index=False, sep='\t') + params.to_csv(output_directory / 'params.tsv', index=False, sep='\t') + experiments.to_csv(output_directory / 'experiments.tsv', index=False, sep='\t') + + +if __name__ == '__main__': + main() diff --git a/ablation/runtime/experiments.tsv b/ablation/runtime/experiments.tsv new file mode 100644 index 000000000..d3b3451fb --- /dev/null +++ b/ablation/runtime/experiments.tsv @@ -0,0 +1,1049 @@ +experiment_id output_directory +0 results/complex/fb15k237/random/adam/2020-03-01-12-36_18bc5f9e-d7d2-488a-9a6d-951b0176efec_hpo_complex_fb15k237_owa_adam/0000_fb15k237_complex +1 results/complex/fb15k237/random/adam/2020-03-01-12-36_18bc5f9e-d7d2-488a-9a6d-951b0176efec_hpo_complex_fb15k237_owa_adam/0001_fb15k237_complex +2 results/complex/fb15k237/random/adam/2020-03-01-12-36_18bc5f9e-d7d2-488a-9a6d-951b0176efec_hpo_complex_fb15k237_owa_adam/0002_fb15k237_complex +3 results/complex/fb15k237/random/adam/2020-03-01-12-36_18bc5f9e-d7d2-488a-9a6d-951b0176efec_hpo_complex_fb15k237_owa_adam/0003_fb15k237_complex +4 results/complex/fb15k237/random/adam/2020-03-01-12-36_6f8208f6-fa68-49d4-8b84-010b09f6c7d5_hpo_complex_fb15k237_owa_nssal/0000_fb15k237_complex +5 results/complex/fb15k237/random/adam/2020-03-01-12-36_6f8208f6-fa68-49d4-8b84-010b09f6c7d5_hpo_complex_fb15k237_owa_nssal/0002_fb15k237_complex +6 results/complex/fb15k237/random/adam/2020-03-09-23-39_631f628a-82cf-499a-a7e7-bf9ab5f4b1fa_hpo_complex_fb15k237_owa_mrl/0000_fb15k237_complex +7 results/complex/fb15k237/random/adam/2020-03-09-23-39_631f628a-82cf-499a-a7e7-bf9ab5f4b1fa_hpo_complex_fb15k237_owa_mrl/0002_fb15k237_complex +8 results/complex/fb15k237/random/adam/2020-03-09-23-39_b1d90ad8-df0b-4919-a3b3-244bc7fee19d_hpo_complex_fb15k237_lcwa_adam/0000_fb15k237_complex +9 results/complex/fb15k237/random/adam/2020-03-09-23-39_b1d90ad8-df0b-4919-a3b3-244bc7fee19d_hpo_complex_fb15k237_lcwa_adam/0001_fb15k237_complex +10 results/complex/fb15k237/random/adam/2020-03-09-23-39_b1d90ad8-df0b-4919-a3b3-244bc7fee19d_hpo_complex_fb15k237_lcwa_adam/0002_fb15k237_complex +11 results/complex/fb15k237/random/adam/2020-03-09-23-39_b1d90ad8-df0b-4919-a3b3-244bc7fee19d_hpo_complex_fb15k237_lcwa_adam/0003_fb15k237_complex +12 results/complex/fb15k237/random/adam/2020-03-10-04-36_610f748e-afa6-4b5c-96b1-ce86f4fbeff6_hpo_complex_fb15k237_lcwa_crossentropy/0000_fb15k237_complex +13 results/complex/fb15k237/random/adam/2020-03-10-04-36_610f748e-afa6-4b5c-96b1-ce86f4fbeff6_hpo_complex_fb15k237_lcwa_crossentropy/0002_fb15k237_complex +14 results/complex/kinships/random/adadelta/2020-04-05-13-27_26193e53-2baa-406b-a85a-5777e434850e_hpo_complex_kinships_lcwa_crossentropy/0000_kinships_complex +15 results/complex/kinships/random/adadelta/2020-04-05-13-27_26193e53-2baa-406b-a85a-5777e434850e_hpo_complex_kinships_lcwa_crossentropy/0001_kinships_complex +16 results/complex/kinships/random/adadelta/2020-04-05-13-33_17236bbd-c0d6-4006-b4b6-60434b400d84_hpo_complex_kinships_lcwa_adadelta/0000_kinships_complex +17 results/complex/kinships/random/adadelta/2020-04-05-13-33_17236bbd-c0d6-4006-b4b6-60434b400d84_hpo_complex_kinships_lcwa_adadelta/0001_kinships_complex +18 results/complex/kinships/random/adadelta/2020-04-05-13-33_17236bbd-c0d6-4006-b4b6-60434b400d84_hpo_complex_kinships_lcwa_adadelta/0002_kinships_complex +19 results/complex/kinships/random/adadelta/2020-04-05-13-33_17236bbd-c0d6-4006-b4b6-60434b400d84_hpo_complex_kinships_lcwa_adadelta/0003_kinships_complex +20 results/complex/kinships/random/adadelta/2020-04-05-14-50_5344d4ab-961a-489c-bffa-e26ad61d4113_hpo_complex_kinships_owa_adadelta/0000_kinships_complex +21 results/complex/kinships/random/adadelta/2020-04-05-14-50_5344d4ab-961a-489c-bffa-e26ad61d4113_hpo_complex_kinships_owa_adadelta/0001_kinships_complex +22 results/complex/kinships/random/adadelta/2020-04-05-14-50_5344d4ab-961a-489c-bffa-e26ad61d4113_hpo_complex_kinships_owa_adadelta/0002_kinships_complex +23 results/complex/kinships/random/adadelta/2020-04-05-14-50_5344d4ab-961a-489c-bffa-e26ad61d4113_hpo_complex_kinships_owa_adadelta/0003_kinships_complex +24 results/complex/kinships/random/adadelta/2020-04-05-14-51_5fadf39c-12ec-458b-b926-9aaeb83d0975_hpo_complex_kinships_owa_mrl/0000_kinships_complex +25 results/complex/kinships/random/adadelta/2020-04-05-14-51_5fadf39c-12ec-458b-b926-9aaeb83d0975_hpo_complex_kinships_owa_mrl/0001_kinships_complex +26 results/complex/kinships/random/adadelta/2020-04-06-10-03_821cadc9-c57f-49f2-bfc9-0d53ca98800f_hpo_complex_kinships_owa_nssal/0000_kinships_complex +27 results/complex/kinships/random/adadelta/2020-04-06-10-03_821cadc9-c57f-49f2-bfc9-0d53ca98800f_hpo_complex_kinships_owa_nssal/0001_kinships_complex +28 results/complex/kinships/random/adam/2020-03-11-13-38_38be8abd-b772-4aa6-94bf-9fd95e98e7d7_hpo_complex_kinships_lcwa_adam/0000_kinships_complex +29 results/complex/kinships/random/adam/2020-03-11-13-38_38be8abd-b772-4aa6-94bf-9fd95e98e7d7_hpo_complex_kinships_lcwa_adam/0001_kinships_complex +30 results/complex/kinships/random/adam/2020-03-11-13-38_38be8abd-b772-4aa6-94bf-9fd95e98e7d7_hpo_complex_kinships_lcwa_adam/0002_kinships_complex +31 results/complex/kinships/random/adam/2020-03-11-13-38_38be8abd-b772-4aa6-94bf-9fd95e98e7d7_hpo_complex_kinships_lcwa_adam/0003_kinships_complex +32 results/complex/kinships/random/adam/2020-03-11-13-38_470fd719-b21c-4ad4-909c-2cde456d1e7b_hpo_complex_kinships_lcwa_crossentropy/0000_kinships_complex +33 results/complex/kinships/random/adam/2020-03-11-13-38_470fd719-b21c-4ad4-909c-2cde456d1e7b_hpo_complex_kinships_lcwa_crossentropy/0002_kinships_complex +34 results/complex/kinships/random/adam/2020-03-11-13-40_5f150dc0-38be-471f-876f-a9838ebb8f23_hpo_complex_kinships_owa_adam/0000_kinships_complex +35 results/complex/kinships/random/adam/2020-03-11-13-40_5f150dc0-38be-471f-876f-a9838ebb8f23_hpo_complex_kinships_owa_adam/0001_kinships_complex +36 results/complex/kinships/random/adam/2020-03-11-13-40_5f150dc0-38be-471f-876f-a9838ebb8f23_hpo_complex_kinships_owa_adam/0002_kinships_complex +37 results/complex/kinships/random/adam/2020-03-11-13-40_5f150dc0-38be-471f-876f-a9838ebb8f23_hpo_complex_kinships_owa_adam/0003_kinships_complex +38 results/complex/kinships/random/adam/2020-03-11-13-41_3f667388-a922-4b40-94a8-a1efb98f27d0_hpo_complex_kinships_owa_nssal/0000_kinships_complex +39 results/complex/kinships/random/adam/2020-03-11-13-41_3f667388-a922-4b40-94a8-a1efb98f27d0_hpo_complex_kinships_owa_nssal/0002_kinships_complex +40 results/complex/kinships/random/adam/2020-03-11-13-41_ea8fb797-0f49-4233-9035-c0c9a957750f_hpo_complex_kinships_owa_mrl/0000_kinships_complex +41 results/complex/kinships/random/adam/2020-03-11-13-41_ea8fb797-0f49-4233-9035-c0c9a957750f_hpo_complex_kinships_owa_mrl/0002_kinships_complex +42 results/complex/wn18rr/random/adam/2020-04-05-17-13_7d8711a4-27c1-4543-a11f-ee6d4aee048f/0000_wn18rr_complex +43 results/complex/wn18rr/random/adam/2020-04-05-17-13_7d8711a4-27c1-4543-a11f-ee6d4aee048f/0001_wn18rr_complex +44 results/complex/wn18rr/random/adam/2020-04-05-17-13_7d8711a4-27c1-4543-a11f-ee6d4aee048f/0002_wn18rr_complex +45 results/complex/wn18rr/random/adam/2020-04-05-17-13_7d8711a4-27c1-4543-a11f-ee6d4aee048f/0003_wn18rr_complex +46 results/complex/wn18rr/random/adam/2020-04-05-17-13_a3970ebc-aa33-4684-bd69-a6fe1ecb6c2e/0000_wn18rr_complex +47 results/complex/wn18rr/random/adam/2020-04-05-17-13_a3970ebc-aa33-4684-bd69-a6fe1ecb6c2e/0001_wn18rr_complex +48 results/complex/wn18rr/random/adam/2020-04-05-17-13_df03d6b2-0061-45e3-a280-b181d6b1f71c/0000_wn18rr_complex +49 results/complex/wn18rr/random/adam/2020-04-05-17-13_df03d6b2-0061-45e3-a280-b181d6b1f71c/0001_wn18rr_complex +50 results/complex/wn18rr/random/adam/2020-04-15-00-33_9f619635-ff8e-4335-86c2-2d327269a10f/0000_wn18rr_complex +51 results/complex/wn18rr/random/adam/2020-04-15-00-33_9f619635-ff8e-4335-86c2-2d327269a10f/0001_wn18rr_complex +52 results/complex/wn18rr/random/adam/2020-04-25-18-56_55ddb31c-70e6-4fee-a8f3-106aae28ceef/0000_wn18rr_complex +53 results/complex/wn18rr/random/adam/2020-04-25-18-56_55ddb31c-70e6-4fee-a8f3-106aae28ceef/0001_wn18rr_complex +54 results/complex/wn18rr/random/adam/2020-04-25-18-56_b338d5fd-ddac-4809-afa0-506ed77e9a38/0000_wn18rr_complex +55 results/complex/wn18rr/random/adam/2020-04-25-18-56_b338d5fd-ddac-4809-afa0-506ed77e9a38/0001_wn18rr_complex +56 results/complex/yago310/random/adam/2020-05-21-02-46_0f62ea18-89a1-49c1-87f3-904a807c64b2/0000_yago310_complex +57 results/complex/yago310/random/adam/2020-05-21-02-46_0f62ea18-89a1-49c1-87f3-904a807c64b2/0001_yago310_complex +58 results/complex/yago310/random/adam/2020-05-21-02-47_1218c513-997d-483e-8d3f-3d6c144d8fdd/0000_yago310_complex +59 results/complex/yago310/random/adam/2020-05-21-02-47_1218c513-997d-483e-8d3f-3d6c144d8fdd/0001_yago310_complex +60 results/complex/yago310/random/adam/2020-05-21-02-47_6a795261-4c9b-4ac7-bf54-80a6b85bdcc9/0000_yago310_complex +61 results/complex/yago310/random/adam/2020-05-21-02-47_6a795261-4c9b-4ac7-bf54-80a6b85bdcc9/0001_yago310_complex +62 results/complex/yago310/random/adam/2020-05-21-02-47_f6265b37-f100-43d6-9b21-d4f6f2872f78/0000_yago310_complex +63 results/complex/yago310/random/adam/2020-05-21-02-47_f6265b37-f100-43d6-9b21-d4f6f2872f78/0001_yago310_complex +64 results/conve/kinships/random/adadelta/2020-05-11-18-40_71bc3bd5-71fe-4467-ac62-dbd544a000b8_hpo_conve_kinships_lcwa_crossentropy/0000_kinships_conve +65 results/conve/kinships/random/adadelta/2020-05-11-18-40_71bc3bd5-71fe-4467-ac62-dbd544a000b8_hpo_conve_kinships_lcwa_crossentropy/0001_kinships_conve +66 results/conve/kinships/random/adadelta/2020-05-11-18-42_d3a57e7f-85e1-42b6-af0d-b0126b992347_hpo_conve_kinships_owa_mrl/0000_kinships_conve +67 results/conve/kinships/random/adadelta/2020-05-11-18-42_d3a57e7f-85e1-42b6-af0d-b0126b992347_hpo_conve_kinships_owa_mrl/0001_kinships_conve +68 results/conve/kinships/random/adadelta/2020-05-14-11-32_93e21dc8-dfbc-44ee-8367-339b538156ec_hpo_conve_kinships_owa_nssal/0000_kinships_conve +69 results/conve/kinships/random/adadelta/2020-05-14-11-32_93e21dc8-dfbc-44ee-8367-339b538156ec_hpo_conve_kinships_owa_nssal/0001_kinships_conve +70 results/conve/kinships/random/adadelta/2020-05-14-19-16_78ecfd30-5fbd-4519-84cc-137ab5b817a2_hpo_conve_kinships_lcwa_adadelta/0000_kinships_conve +71 results/conve/kinships/random/adadelta/2020-05-14-19-16_78ecfd30-5fbd-4519-84cc-137ab5b817a2_hpo_conve_kinships_lcwa_adadelta/0001_kinships_conve +72 results/conve/kinships/random/adadelta/2020-05-14-19-16_78ecfd30-5fbd-4519-84cc-137ab5b817a2_hpo_conve_kinships_lcwa_adadelta/0002_kinships_conve +73 results/conve/kinships/random/adadelta/2020-05-14-19-16_78ecfd30-5fbd-4519-84cc-137ab5b817a2_hpo_conve_kinships_lcwa_adadelta/0003_kinships_conve +74 results/conve/kinships/random/adadelta/2020-05-16-04-28_93d23f27-9fce-43c5-8e2b-7172d72f92b6_hpo_conve_kinships_owa_adadelta/0000_kinships_conve +75 results/conve/kinships/random/adadelta/2020-05-16-04-28_93d23f27-9fce-43c5-8e2b-7172d72f92b6_hpo_conve_kinships_owa_adadelta/0001_kinships_conve +76 results/conve/kinships/random/adadelta/2020-05-16-04-28_93d23f27-9fce-43c5-8e2b-7172d72f92b6_hpo_conve_kinships_owa_adadelta/0002_kinships_conve +77 results/conve/kinships/random/adadelta/2020-05-16-04-28_93d23f27-9fce-43c5-8e2b-7172d72f92b6_hpo_conve_kinships_owa_adadelta/0003_kinships_conve +78 results/conve/kinships/random/adam/2020-03-17-17-19_670ea96e-6724-407a-9895-90734932838d_hpo_conve_kinships_lcwa_adam/0000_kinships_conve +79 results/conve/kinships/random/adam/2020-03-17-17-19_670ea96e-6724-407a-9895-90734932838d_hpo_conve_kinships_lcwa_adam/0001_kinships_conve +80 results/conve/kinships/random/adam/2020-03-17-17-19_670ea96e-6724-407a-9895-90734932838d_hpo_conve_kinships_lcwa_adam/0002_kinships_conve +81 results/conve/kinships/random/adam/2020-03-17-17-19_670ea96e-6724-407a-9895-90734932838d_hpo_conve_kinships_lcwa_adam/0003_kinships_conve +82 results/conve/kinships/random/adam/2020-05-14-19-41_668a6e48-183c-4001-87ad-3dc663c348d3/0000_kinships_conve +83 results/conve/kinships/random/adam/2020-05-14-19-41_668a6e48-183c-4001-87ad-3dc663c348d3/0001_kinships_conve +84 results/conve/kinships/random/adam/2020-05-14-19-41_a4526652-e0c3-42bc-96da-7a3bb00d9c78/0000_kinships_conve +85 results/conve/kinships/random/adam/2020-05-14-19-41_a4526652-e0c3-42bc-96da-7a3bb00d9c78/0001_kinships_conve +86 results/conve/kinships/random/adam/2020-05-14-19-41_c2c260c3-517a-414b-abd1-9b4e9dd54379/0000_kinships_conve +87 results/conve/kinships/random/adam/2020-05-14-19-41_c2c260c3-517a-414b-abd1-9b4e9dd54379/0001_kinships_conve +88 results/conve/kinships/random/adam/2020-05-14-19-41_edc04baa-9e88-4c2f-9144-43949e7ba7be/0000_kinships_conve +89 results/conve/kinships/random/adam/2020-05-14-19-41_edc04baa-9e88-4c2f-9144-43949e7ba7be/0001_kinships_conve +90 results/conve/kinships/random/adam/2020-05-14-19-41_edc04baa-9e88-4c2f-9144-43949e7ba7be/0002_kinships_conve +91 results/conve/kinships/random/adam/2020-05-14-19-41_edc04baa-9e88-4c2f-9144-43949e7ba7be/0003_kinships_conve +92 results/conve/wn18rr/random/adam/2020-05-10-17-28_313c888f-8bb5-4886-8239-68fbc5ff8da9/0000_wn18rr_conve +93 results/conve/wn18rr/random/adam/2020-05-10-17-28_313c888f-8bb5-4886-8239-68fbc5ff8da9/0001_wn18rr_conve +94 results/conve/wn18rr/random/adam/2020-05-10-17-28_4d1d9bab-5d39-4faf-8ffb-736a02be9f5f/0000_wn18rr_conve +95 results/conve/wn18rr/random/adam/2020-05-10-17-28_4d1d9bab-5d39-4faf-8ffb-736a02be9f5f/0001_wn18rr_conve +96 results/conve/wn18rr/random/adam/2020-05-10-17-28_51876bff-fb79-495e-b7a6-e613dcb6b64e/0000_wn18rr_conve +97 results/conve/wn18rr/random/adam/2020-05-10-17-28_51876bff-fb79-495e-b7a6-e613dcb6b64e/0001_wn18rr_conve +98 results/conve/wn18rr/random/adam/2020-05-10-17-28_53e857de-8017-4f5b-8c9d-1d0e9f4d4fb3/0000_wn18rr_conve +99 results/conve/wn18rr/random/adam/2020-05-10-17-28_53e857de-8017-4f5b-8c9d-1d0e9f4d4fb3/0001_wn18rr_conve +100 results/conve/wn18rr/random/adam/2020-05-10-17-28_85e4fb58-e212-4851-9fbe-7cf9507587b6/0000_wn18rr_conve +101 results/conve/wn18rr/random/adam/2020-05-10-17-28_85e4fb58-e212-4851-9fbe-7cf9507587b6/0001_wn18rr_conve +102 results/conve/wn18rr/random/adam/2020-05-10-17-28_c7a545ee-8b92-4c72-a867-8ab48b87c79b/0000_wn18rr_conve +103 results/conve/wn18rr/random/adam/2020-05-10-17-28_c7a545ee-8b92-4c72-a867-8ab48b87c79b/0001_wn18rr_conve +104 results/conve/wn18rr/random/adam/2020-05-10-17-28_f90f336c-bb65-4bdd-97a0-2cc6b0dc2d9f/0000_wn18rr_conve +105 results/conve/wn18rr/random/adam/2020-05-10-17-28_f90f336c-bb65-4bdd-97a0-2cc6b0dc2d9f/0001_wn18rr_conve +106 results/convkb/kinships/random/adadelta/2020-05-16-04-35_6946cc6f-2166-4dfd-b69f-47a672764b56_hpo_convkb_kinships_lcwa_adadelta/0000_kinships_convkb +107 results/convkb/kinships/random/adadelta/2020-05-16-04-35_6946cc6f-2166-4dfd-b69f-47a672764b56_hpo_convkb_kinships_lcwa_adadelta/0001_kinships_convkb +108 results/convkb/kinships/random/adadelta/2020-05-16-04-35_6946cc6f-2166-4dfd-b69f-47a672764b56_hpo_convkb_kinships_lcwa_adadelta/0002_kinships_convkb +109 results/convkb/kinships/random/adadelta/2020-05-16-04-35_6946cc6f-2166-4dfd-b69f-47a672764b56_hpo_convkb_kinships_lcwa_adadelta/0003_kinships_convkb +110 results/convkb/kinships/random/adadelta/2020-05-20-01-29_38a7d0d4-2079-4833-ac3f-8eea8cc7bb77_hpo_convkb_kinships_lcwa_crossentropy/0000_kinships_convkb +111 results/convkb/kinships/random/adadelta/2020-05-20-01-29_38a7d0d4-2079-4833-ac3f-8eea8cc7bb77_hpo_convkb_kinships_lcwa_crossentropy/0001_kinships_convkb +112 results/convkb/kinships/random/adadelta/2020-05-21-11-27_d378adf0-8de0-422c-b1f9-53743112f173/0000_kinships_convkb +113 results/convkb/kinships/random/adadelta/2020-05-21-11-27_d378adf0-8de0-422c-b1f9-53743112f173/0001_kinships_convkb +114 results/convkb/kinships/random/adadelta/2020-05-21-11-27_d378adf0-8de0-422c-b1f9-53743112f173/0002_kinships_convkb +115 results/convkb/kinships/random/adadelta/2020-05-21-11-27_d378adf0-8de0-422c-b1f9-53743112f173/0003_kinships_convkb +116 results/convkb/kinships/random/adadelta/2020-05-21-13-30_4c6fce69-e8af-4f6b-ad73-19b7b717404c_hpo_convkb_kinships_owa_mrl/0000_kinships_convkb +117 results/convkb/kinships/random/adadelta/2020-05-21-13-30_4c6fce69-e8af-4f6b-ad73-19b7b717404c_hpo_convkb_kinships_owa_mrl/0001_kinships_convkb +118 results/convkb/kinships/random/adadelta/2020-05-21-13-31_e4476371-df7b-426a-b72b-4cd030377612_hpo_convkb_kinships_owa_nssal/0000_kinships_convkb +119 results/convkb/kinships/random/adadelta/2020-05-21-13-31_e4476371-df7b-426a-b72b-4cd030377612_hpo_convkb_kinships_owa_nssal/0001_kinships_convkb +120 results/convkb/kinships/random/adam/2020-04-03-13-42_34249d25-53d3-4b4b-aabe-d78b00b892ad/0000_kinships_convkb +121 results/convkb/kinships/random/adam/2020-04-03-13-42_34249d25-53d3-4b4b-aabe-d78b00b892ad/0001_kinships_convkb +122 results/convkb/kinships/random/adam/2020-04-03-13-42_34249d25-53d3-4b4b-aabe-d78b00b892ad/0002_kinships_convkb +123 results/convkb/kinships/random/adam/2020-04-03-13-42_34249d25-53d3-4b4b-aabe-d78b00b892ad/0003_kinships_convkb +124 results/convkb/kinships/random/adam/2020-04-03-13-42_7e48d5be-cc65-4c9a-b3bb-5a470629e7cb/0000_kinships_convkb +125 results/convkb/kinships/random/adam/2020-04-03-13-42_7e48d5be-cc65-4c9a-b3bb-5a470629e7cb/0001_kinships_convkb +126 results/convkb/kinships/random/adam/2020-05-14-19-43_11e70d90-e73b-4099-b3c3-07331d3cc2e6/0000_kinships_convkb +127 results/convkb/kinships/random/adam/2020-05-14-19-43_11e70d90-e73b-4099-b3c3-07331d3cc2e6/0001_kinships_convkb +128 results/convkb/kinships/random/adam/2020-05-14-19-43_a2a4857f-5c68-4469-bd3b-6a25f7f60f84/0000_kinships_convkb +129 results/convkb/kinships/random/adam/2020-05-14-19-43_a2a4857f-5c68-4469-bd3b-6a25f7f60f84/0001_kinships_convkb +130 results/convkb/kinships/random/adam/2020-05-14-19-43_de4d325b-0a59-4aee-b863-72ae2b0464c9/0000_kinships_convkb +131 results/convkb/kinships/random/adam/2020-05-14-19-43_de4d325b-0a59-4aee-b863-72ae2b0464c9/0001_kinships_convkb +132 results/convkb/kinships/random/adam/2020-05-14-19-43_de4d325b-0a59-4aee-b863-72ae2b0464c9/0002_kinships_convkb +133 results/convkb/kinships/random/adam/2020-05-14-19-43_de4d325b-0a59-4aee-b863-72ae2b0464c9/0003_kinships_convkb +134 results/convkb/wn18rr/random/adam/2020-05-08-21-19_379bb29d-4d8b-4cf4-b62b-d9ec7037fb39/0000_wn18rr_convkb +135 results/convkb/wn18rr/random/adam/2020-05-08-21-19_379bb29d-4d8b-4cf4-b62b-d9ec7037fb39/0001_wn18rr_convkb +136 results/convkb/wn18rr/random/adam/2020-05-08-21-19_8814e434-174f-4d15-b7b5-8334a4389d1e/0000_wn18rr_convkb +137 results/convkb/wn18rr/random/adam/2020-05-08-21-19_8814e434-174f-4d15-b7b5-8334a4389d1e/0001_wn18rr_convkb +138 results/convkb/wn18rr/random/adam/2020-05-08-21-19_9254c02c-7204-430e-aea8-1fd264c49cd9/0000_wn18rr_convkb +139 results/convkb/wn18rr/random/adam/2020-05-08-21-19_9254c02c-7204-430e-aea8-1fd264c49cd9/0001_wn18rr_convkb +140 results/convkb/wn18rr/random/adam/2020-05-08-21-19_bbc10786-009a-4b1d-b19e-fcfb92b01e56/0000_wn18rr_convkb +141 results/convkb/wn18rr/random/adam/2020-05-08-21-19_bbc10786-009a-4b1d-b19e-fcfb92b01e56/0001_wn18rr_convkb +142 results/convkb/yago310/random/adam/2020-05-24-12-46_15946901-b9e5-40ae-b887-f6f26c24d586/0000_yago310_convkb +143 results/convkb/yago310/random/adam/2020-05-24-12-46_15946901-b9e5-40ae-b887-f6f26c24d586/0001_yago310_convkb +144 results/convkb/yago310/random/adam/2020-05-24-12-47_8e9914b5-1c2d-4bc0-b318-e95c43c72219/0000_yago310_convkb +145 results/convkb/yago310/random/adam/2020-05-24-12-47_8e9914b5-1c2d-4bc0-b318-e95c43c72219/0001_yago310_convkb +146 results/convkb/yago310/random/adam/2020-05-24-13-32_a5d8b694-e4d4-4ade-ae1b-fc43378c2312/0000_yago310_ermlp +147 results/convkb/yago310/random/adam/2020-05-24-13-32_a5d8b694-e4d4-4ade-ae1b-fc43378c2312/0001_yago310_ermlp +148 results/convkb/yago310/random/adam/2020-05-24-13-33_962d1d7d-42e9-4eda-92cd-6b2b567e2174/0000_yago310_ermlp +149 results/convkb/yago310/random/adam/2020-05-24-13-33_962d1d7d-42e9-4eda-92cd-6b2b567e2174/0001_yago310_ermlp +150 results/convkb/yago310/random/adam/2020-05-24-16-42_6b6f5e99-de53-485e-a8fd-c3abf7cc6f85/0000_yago310_convkb +151 results/convkb/yago310/random/adam/2020-05-24-16-42_6b6f5e99-de53-485e-a8fd-c3abf7cc6f85/0001_yago310_convkb +152 results/convkb/yago310/random/adam/2020-05-24-16-42_75b121d7-a48e-4e86-b05b-69dc9dd225f9/0000_yago310_convkb +153 results/convkb/yago310/random/adam/2020-05-24-16-42_75b121d7-a48e-4e86-b05b-69dc9dd225f9/0001_yago310_convkb +154 results/distmult/fb15k237/random/adam/2020-05-12-16-50_1c9ce3ad-f168-46db-abc0-81d2bae92752/0000_fb15k237_distmult +155 results/distmult/fb15k237/random/adam/2020-05-12-16-50_1c9ce3ad-f168-46db-abc0-81d2bae92752/0001_fb15k237_distmult +156 results/distmult/fb15k237/random/adam/2020-05-12-16-50_2786b4ae-8e89-4c01-8eb6-12609c591545/0000_fb15k237_distmult +157 results/distmult/fb15k237/random/adam/2020-05-12-16-50_2786b4ae-8e89-4c01-8eb6-12609c591545/0001_fb15k237_distmult +158 results/distmult/fb15k237/random/adam/2020-05-12-16-51_4715528e-05fe-44dd-96b8-c83b4df42889/0000_fb15k237_distmult +159 results/distmult/fb15k237/random/adam/2020-05-12-16-51_4715528e-05fe-44dd-96b8-c83b4df42889/0001_fb15k237_distmult +160 results/distmult/fb15k237/random/adam/2020-05-12-16-51_ad735abe-2a6a-478e-bdd6-8a8869c4898b/0000_fb15k237_distmult +161 results/distmult/fb15k237/random/adam/2020-05-12-16-51_ad735abe-2a6a-478e-bdd6-8a8869c4898b/0001_fb15k237_distmult +162 results/distmult/fb15k237/random/adam/2020-05-12-16-54_0e92198e-1d97-4064-91f8-be55c8b42f66/0000_fb15k237_distmult +163 results/distmult/fb15k237/random/adam/2020-05-12-16-54_0e92198e-1d97-4064-91f8-be55c8b42f66/0001_fb15k237_distmult +164 results/distmult/fb15k237/random/adam/2020-05-12-16-55_6f557c76-6bc0-457c-bf01-2bf81dccc09a/0000_fb15k237_distmult +165 results/distmult/fb15k237/random/adam/2020-05-12-16-55_6f557c76-6bc0-457c-bf01-2bf81dccc09a/0001_fb15k237_distmult +166 results/distmult/fb15k237/random/adam/2020-05-12-17-21_f4b38a75-ae62-4bd0-bf97-4990aa9132b5/0000_fb15k237_distmult +167 results/distmult/fb15k237/random/adam/2020-05-12-17-21_f4b38a75-ae62-4bd0-bf97-4990aa9132b5/0001_fb15k237_distmult +168 results/distmult/kinships/random/adadelta/2020-04-05-17-10_beb66eba-4dbe-4360-8778-ae6bb78a7ee1_hpo_distmult_kinships_lcwa_adadelta/0000_kinships_distmult +169 results/distmult/kinships/random/adadelta/2020-04-05-17-10_beb66eba-4dbe-4360-8778-ae6bb78a7ee1_hpo_distmult_kinships_lcwa_adadelta/0001_kinships_distmult +170 results/distmult/kinships/random/adadelta/2020-04-05-17-10_beb66eba-4dbe-4360-8778-ae6bb78a7ee1_hpo_distmult_kinships_lcwa_adadelta/0002_kinships_distmult +171 results/distmult/kinships/random/adadelta/2020-04-05-17-10_beb66eba-4dbe-4360-8778-ae6bb78a7ee1_hpo_distmult_kinships_lcwa_adadelta/0003_kinships_distmult +172 results/distmult/kinships/random/adadelta/2020-04-05-17-11_1cfcf1a4-cbad-4c38-b8ab-0df35a54bf72_hpo_distmult_kinships_lcwa_crossentropy/0000_kinships_distmult +173 results/distmult/kinships/random/adadelta/2020-04-05-17-11_1cfcf1a4-cbad-4c38-b8ab-0df35a54bf72_hpo_distmult_kinships_lcwa_crossentropy/0001_kinships_distmult +174 results/distmult/kinships/random/adadelta/2020-04-05-17-12_54adeae4-7310-46a8-98ef-40ff2edc89f2_hpo_distmult_kinships_owa_adadelta/0000_kinships_distmult +175 results/distmult/kinships/random/adadelta/2020-04-05-17-12_54adeae4-7310-46a8-98ef-40ff2edc89f2_hpo_distmult_kinships_owa_adadelta/0001_kinships_distmult +176 results/distmult/kinships/random/adadelta/2020-04-05-17-12_54adeae4-7310-46a8-98ef-40ff2edc89f2_hpo_distmult_kinships_owa_adadelta/0002_kinships_distmult +177 results/distmult/kinships/random/adadelta/2020-04-05-17-12_54adeae4-7310-46a8-98ef-40ff2edc89f2_hpo_distmult_kinships_owa_adadelta/0003_kinships_distmult +178 results/distmult/kinships/random/adadelta/2020-04-05-17-13_d54f6011-d3ee-477e-8d39-157905593de6_hpo_distmult_kinships_owa_mrl/0000_kinships_distmult +179 results/distmult/kinships/random/adadelta/2020-04-05-17-13_d54f6011-d3ee-477e-8d39-157905593de6_hpo_distmult_kinships_owa_mrl/0001_kinships_distmult +180 results/distmult/kinships/random/adadelta/2020-04-06-12-22_ecdd2a11-2e5f-4c54-8d9d-26f18a955175_hpo_distmult_kinships_owa_nssal/0000_kinships_distmult +181 results/distmult/kinships/random/adadelta/2020-04-06-12-22_ecdd2a11-2e5f-4c54-8d9d-26f18a955175_hpo_distmult_kinships_owa_nssal/0001_kinships_distmult +182 results/distmult/kinships/random/adam/2020-03-06-09-01_a49e1236-0af4-4ef2-b525-c620945c4cee_hpo_distmult_kinships_lcwa_adam/0000_kinships_distmult +183 results/distmult/kinships/random/adam/2020-03-06-09-01_a49e1236-0af4-4ef2-b525-c620945c4cee_hpo_distmult_kinships_lcwa_adam/0001_kinships_distmult +184 results/distmult/kinships/random/adam/2020-03-06-09-01_a49e1236-0af4-4ef2-b525-c620945c4cee_hpo_distmult_kinships_lcwa_adam/0002_kinships_distmult +185 results/distmult/kinships/random/adam/2020-03-06-09-01_a49e1236-0af4-4ef2-b525-c620945c4cee_hpo_distmult_kinships_lcwa_adam/0003_kinships_distmult +186 results/distmult/kinships/random/adam/2020-03-06-09-02_610dbc50-f4bb-4009-bdd8-fdfcaf8909ef_hpo_distmult_kinships_lcwa_crossentropy/0000_kinships_distmult +187 results/distmult/kinships/random/adam/2020-03-06-09-02_610dbc50-f4bb-4009-bdd8-fdfcaf8909ef_hpo_distmult_kinships_lcwa_crossentropy/0002_kinships_distmult +188 results/distmult/kinships/random/adam/2020-03-06-09-08_b24cd746-8c26-48bf-9ec2-ee5077d85e25_hpo_distmult_kinships_owa_adam/0000_kinships_distmult +189 results/distmult/kinships/random/adam/2020-03-06-09-08_b24cd746-8c26-48bf-9ec2-ee5077d85e25_hpo_distmult_kinships_owa_adam/0001_kinships_distmult +190 results/distmult/kinships/random/adam/2020-03-06-09-08_b24cd746-8c26-48bf-9ec2-ee5077d85e25_hpo_distmult_kinships_owa_adam/0002_kinships_distmult +191 results/distmult/kinships/random/adam/2020-03-06-09-08_b24cd746-8c26-48bf-9ec2-ee5077d85e25_hpo_distmult_kinships_owa_adam/0003_kinships_distmult +192 results/distmult/kinships/random/adam/2020-03-06-09-10_6c84990a-af64-4734-82b2-7825386fa2da_hpo_distmult_kinships_owa_mrl/0000_kinships_distmult +193 results/distmult/kinships/random/adam/2020-03-06-09-10_6c84990a-af64-4734-82b2-7825386fa2da_hpo_distmult_kinships_owa_mrl/0002_kinships_distmult +194 results/distmult/kinships/random/adam/2020-03-06-09-11_02f8e5e9-65e8-460c-8baa-4002ffdabbf0_hpo_distmult_kinships_owa_nssal/0000_kinships_distmult +195 results/distmult/kinships/random/adam/2020-03-06-09-11_02f8e5e9-65e8-460c-8baa-4002ffdabbf0_hpo_distmult_kinships_owa_nssal/0002_kinships_distmult +196 results/distmult/wn18rr/random/adam/2020-04-25-19-10_0eae1c4e-3252-4ead-91c4-682046e21dc0/0000_wn18rr_distmult +197 results/distmult/wn18rr/random/adam/2020-04-25-19-10_0eae1c4e-3252-4ead-91c4-682046e21dc0/0001_wn18rr_distmult +198 results/distmult/wn18rr/random/adam/2020-04-25-19-10_0eae1c4e-3252-4ead-91c4-682046e21dc0/0002_wn18rr_distmult +199 results/distmult/wn18rr/random/adam/2020-04-25-19-10_0eae1c4e-3252-4ead-91c4-682046e21dc0/0003_wn18rr_distmult +200 results/distmult/wn18rr/random/adam/2020-04-25-19-10_29a01a5d-3467-4a72-9514-ed0d6e58c3eb/0000_wn18rr_distmult +201 results/distmult/wn18rr/random/adam/2020-04-25-19-10_29a01a5d-3467-4a72-9514-ed0d6e58c3eb/0001_wn18rr_distmult +202 results/distmult/wn18rr/random/adam/2020-04-25-19-10_a0410317-d9b3-4bc5-9d7a-1ef578977ac0/0000_wn18rr_distmult +203 results/distmult/wn18rr/random/adam/2020-04-25-19-10_a0410317-d9b3-4bc5-9d7a-1ef578977ac0/0001_wn18rr_distmult +204 results/distmult/wn18rr/random/adam/2020-04-25-19-10_a0410317-d9b3-4bc5-9d7a-1ef578977ac0/0002_wn18rr_distmult +205 results/distmult/wn18rr/random/adam/2020-04-25-19-10_a0410317-d9b3-4bc5-9d7a-1ef578977ac0/0003_wn18rr_distmult +206 results/distmult/wn18rr/random/adam/2020-04-25-19-10_e3c7345e-8cae-4358-8009-4efc81cb68cc/0000_wn18rr_distmult +207 results/distmult/wn18rr/random/adam/2020-04-25-19-10_e3c7345e-8cae-4358-8009-4efc81cb68cc/0001_wn18rr_distmult +208 results/distmult/wn18rr/random/adam/2020-04-25-19-10_fac17a04-ede9-4227-bc21-62c12ba22886/0000_wn18rr_distmult +209 results/distmult/wn18rr/random/adam/2020-04-25-19-10_fac17a04-ede9-4227-bc21-62c12ba22886/0001_wn18rr_distmult +210 results/distmult/yago310/random/adam/2020-05-21-03-02_3a38b26b-deb7-4140-9393-8cf536490aa0/0000_yago310_distmult +211 results/distmult/yago310/random/adam/2020-05-21-03-02_3a38b26b-deb7-4140-9393-8cf536490aa0/0001_yago310_distmult +212 results/distmult/yago310/random/adam/2020-05-21-03-02_f57bb176-9a10-4c31-8d12-c9498b368cfa/0000_yago310_distmult +213 results/distmult/yago310/random/adam/2020-05-21-03-02_f57bb176-9a10-4c31-8d12-c9498b368cfa/0001_yago310_distmult +214 results/distmult/yago310/random/adam/2020-05-21-03-03_009a75b8-d026-497a-b078-d64df11244a7/0000_yago310_distmult +215 results/distmult/yago310/random/adam/2020-05-21-03-03_009a75b8-d026-497a-b078-d64df11244a7/0001_yago310_distmult +216 results/distmult/yago310/random/adam/2020-05-21-03-03_add18f7b-c19c-434c-b7ee-41deca9bad62/0000_yago310_distmult +217 results/distmult/yago310/random/adam/2020-05-21-03-03_add18f7b-c19c-434c-b7ee-41deca9bad62/0001_yago310_distmult +218 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_ac1f79f3-e766-4a0d-9780-47fead907de5_hpo_ermlp_fb15k237_owa_mrl/0000_fb15k237_ermlp +219 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_ac1f79f3-e766-4a0d-9780-47fead907de5_hpo_ermlp_fb15k237_owa_mrl/0002_fb15k237_ermlp +220 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_bcd452be-9e6f-46b5-bf6d-e5792cdecb68_hpo_ermlp_fb15k237_owa_adam/0000_fb15k237_ermlp +221 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_bcd452be-9e6f-46b5-bf6d-e5792cdecb68_hpo_ermlp_fb15k237_owa_adam/0001_fb15k237_ermlp +222 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_bcd452be-9e6f-46b5-bf6d-e5792cdecb68_hpo_ermlp_fb15k237_owa_adam/0002_fb15k237_ermlp +223 results/ermlp/fb15k237/random/adam/2020-03-10-09-32_bcd452be-9e6f-46b5-bf6d-e5792cdecb68_hpo_ermlp_fb15k237_owa_adam/0003_fb15k237_ermlp +224 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_13a959e4-d9aa-4e0c-8eb5-38b37284e9fa_hpo_ermlp_fb15k237_lcwa_adam/0000_fb15k237_ermlp +225 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_13a959e4-d9aa-4e0c-8eb5-38b37284e9fa_hpo_ermlp_fb15k237_lcwa_adam/0001_fb15k237_ermlp +226 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_13a959e4-d9aa-4e0c-8eb5-38b37284e9fa_hpo_ermlp_fb15k237_lcwa_adam/0002_fb15k237_ermlp +227 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_13a959e4-d9aa-4e0c-8eb5-38b37284e9fa_hpo_ermlp_fb15k237_lcwa_adam/0003_fb15k237_ermlp +228 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_4fc21782-fe6c-4fe5-b126-4494c59c42d0_hpo_ermlp_fb15k237_lcwa_crossentropy/0000_fb15k237_ermlp +229 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_4fc21782-fe6c-4fe5-b126-4494c59c42d0_hpo_ermlp_fb15k237_lcwa_crossentropy/0002_fb15k237_ermlp +230 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_eb9d42ac-5fa9-4ebf-91e6-e7e0602b348d_hpo_ermlp_fb15k237_owa_nssal/0000_fb15k237_ermlp +231 results/ermlp/fb15k237/random/adam/2020-03-10-09-33_eb9d42ac-5fa9-4ebf-91e6-e7e0602b348d_hpo_ermlp_fb15k237_owa_nssal/0002_fb15k237_ermlp +232 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_6a7aa508-6135-4e4c-9646-b103ca242369_hpo_ermlp_kinships_lcwa_crossentropy/0000_kinships_ermlp +233 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_6a7aa508-6135-4e4c-9646-b103ca242369_hpo_ermlp_kinships_lcwa_crossentropy/0001_kinships_ermlp +234 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_73b80e66-9a8f-4b35-a17c-656e0c0b8f24_hpo_ermlp_kinships_lcwa_adadelta/0000_kinships_ermlp +235 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_73b80e66-9a8f-4b35-a17c-656e0c0b8f24_hpo_ermlp_kinships_lcwa_adadelta/0001_kinships_ermlp +236 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_73b80e66-9a8f-4b35-a17c-656e0c0b8f24_hpo_ermlp_kinships_lcwa_adadelta/0002_kinships_ermlp +237 results/ermlp/kinships/random/adadelta/2020-04-06-12-29_73b80e66-9a8f-4b35-a17c-656e0c0b8f24_hpo_ermlp_kinships_lcwa_adadelta/0003_kinships_ermlp +238 results/ermlp/kinships/random/adadelta/2020-04-06-12-30_f84562b2-c9e6-42a9-9908-2ac69327df32_hpo_hole_kinships_owa_adadelta/0000_kinships_ermlp +239 results/ermlp/kinships/random/adadelta/2020-04-06-12-30_f84562b2-c9e6-42a9-9908-2ac69327df32_hpo_hole_kinships_owa_adadelta/0001_kinships_ermlp +240 results/ermlp/kinships/random/adadelta/2020-04-06-12-30_f84562b2-c9e6-42a9-9908-2ac69327df32_hpo_hole_kinships_owa_adadelta/0002_kinships_ermlp +241 results/ermlp/kinships/random/adadelta/2020-04-06-12-30_f84562b2-c9e6-42a9-9908-2ac69327df32_hpo_hole_kinships_owa_adadelta/0003_kinships_ermlp +242 results/ermlp/kinships/random/adadelta/2020-04-06-12-31_7eeda42d-7554-4692-863a-716694049caf_hpo_ermlp_kinships_owa_mrl/0000_kinships_ermlp +243 results/ermlp/kinships/random/adadelta/2020-04-06-12-31_7eeda42d-7554-4692-863a-716694049caf_hpo_ermlp_kinships_owa_mrl/0001_kinships_ermlp +244 results/ermlp/kinships/random/adadelta/2020-04-06-12-31_f0e14299-5e85-4f1a-8472-3131a0b0d312_hpo_ermlp_kinships_owa_nssal/0000_kinships_ermlp +245 results/ermlp/kinships/random/adadelta/2020-04-06-12-31_f0e14299-5e85-4f1a-8472-3131a0b0d312_hpo_ermlp_kinships_owa_nssal/0001_kinships_ermlp +246 results/ermlp/kinships/random/adam/2020-03-16-22-38_bee6622c-dc7f-472e-a084-3bc5c1b6cd90_hpo_ermlp_kinships_lcwa_adam/0000_kinships_ermlp +247 results/ermlp/kinships/random/adam/2020-03-16-22-38_bee6622c-dc7f-472e-a084-3bc5c1b6cd90_hpo_ermlp_kinships_lcwa_adam/0001_kinships_ermlp +248 results/ermlp/kinships/random/adam/2020-03-16-22-38_bee6622c-dc7f-472e-a084-3bc5c1b6cd90_hpo_ermlp_kinships_lcwa_adam/0002_kinships_ermlp +249 results/ermlp/kinships/random/adam/2020-03-16-22-38_bee6622c-dc7f-472e-a084-3bc5c1b6cd90_hpo_ermlp_kinships_lcwa_adam/0003_kinships_ermlp +250 results/ermlp/kinships/random/adam/2020-03-16-22-39_b37ca4cb-6d94-458a-b277-f568b93e5000_hpo_ermlp_kinships_lcwa_crossentropy/0000_kinships_ermlp +251 results/ermlp/kinships/random/adam/2020-03-16-22-39_b37ca4cb-6d94-458a-b277-f568b93e5000_hpo_ermlp_kinships_lcwa_crossentropy/0002_kinships_ermlp +252 results/ermlp/kinships/random/adam/2020-03-17-14-39_9f87953e-b201-41cf-82a8-de12503e40b3_hpo_ermlp_kinships_owa_adam/0000_kinships_ermlp +253 results/ermlp/kinships/random/adam/2020-03-17-14-39_9f87953e-b201-41cf-82a8-de12503e40b3_hpo_ermlp_kinships_owa_adam/0001_kinships_ermlp +254 results/ermlp/kinships/random/adam/2020-03-17-14-39_9f87953e-b201-41cf-82a8-de12503e40b3_hpo_ermlp_kinships_owa_adam/0002_kinships_ermlp +255 results/ermlp/kinships/random/adam/2020-03-17-14-39_9f87953e-b201-41cf-82a8-de12503e40b3_hpo_ermlp_kinships_owa_adam/0003_kinships_ermlp +256 results/ermlp/kinships/random/adam/2020-03-17-14-41_75d99e7b-09d0-41ac-82ce-8661af857c74_hpo_ermlp_kinships_owa_mrl/0000_kinships_ermlp +257 results/ermlp/kinships/random/adam/2020-03-17-14-41_75d99e7b-09d0-41ac-82ce-8661af857c74_hpo_ermlp_kinships_owa_mrl/0002_kinships_ermlp +258 results/ermlp/kinships/random/adam/2020-03-17-14-42_fe135d8b-3e9c-4973-b7d6-abd74e5a7fc5_hpo_ermlp_kinships_owa_nssal/0000_kinships_ermlp +259 results/ermlp/kinships/random/adam/2020-03-17-14-42_fe135d8b-3e9c-4973-b7d6-abd74e5a7fc5_hpo_ermlp_kinships_owa_nssal/0002_kinships_ermlp +260 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_4fc65df6-599e-4813-8c2c-d8bcacf35903/0000_wn18rr_ermlp +261 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_4fc65df6-599e-4813-8c2c-d8bcacf35903/0002_wn18rr_ermlp +262 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_e461c925-6a8a-4b74-8753-1f885f7029f6/0000_wn18rr_ermlp +263 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_e461c925-6a8a-4b74-8753-1f885f7029f6/0002_wn18rr_ermlp +264 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_f35d0ff1-5230-48d1-8376-ad96ea42973b/0000_wn18rr_ermlp +265 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_f35d0ff1-5230-48d1-8376-ad96ea42973b/0001_wn18rr_ermlp +266 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_f35d0ff1-5230-48d1-8376-ad96ea42973b/0002_wn18rr_ermlp +267 results/ermlp/wn18rr/random/adam/2020-04-01-17-40_f35d0ff1-5230-48d1-8376-ad96ea42973b/0003_wn18rr_ermlp +268 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_57a14f56-8f4a-4b9b-b214-b4527663111a/0000_wn18rr_ermlp +269 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_57a14f56-8f4a-4b9b-b214-b4527663111a/0001_wn18rr_ermlp +270 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_5ab44324-4a2b-416f-8824-c777c689927b/0000_wn18rr_ermlp +271 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_5ab44324-4a2b-416f-8824-c777c689927b/0001_wn18rr_ermlp +272 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_5ab44324-4a2b-416f-8824-c777c689927b/0002_wn18rr_ermlp +273 results/ermlp/wn18rr/random/adam/2020-04-16-10-48_5ab44324-4a2b-416f-8824-c777c689927b/0003_wn18rr_ermlp +274 results/ermlp/wn18rr/random/adam/2020-04-25-19-10_672766f6-5fd5-4de6-a731-a9bfd174d41f/0000_wn18rr_ermlp +275 results/ermlp/wn18rr/random/adam/2020-04-25-19-10_672766f6-5fd5-4de6-a731-a9bfd174d41f/0001_wn18rr_ermlp +276 results/ermlp/wn18rr/random/adam/2020-04-25-19-10_672766f6-5fd5-4de6-a731-a9bfd174d41f/0002_wn18rr_ermlp +277 results/ermlp/wn18rr/random/adam/2020-04-25-19-10_672766f6-5fd5-4de6-a731-a9bfd174d41f/0003_wn18rr_ermlp +278 results/ermlp/yago310/random/adam/2020-05-24-11-37_94079123-7e78-47a5-9f0c-b3527170bb70/0000_yago310_ermlp +279 results/ermlp/yago310/random/adam/2020-05-24-11-37_94079123-7e78-47a5-9f0c-b3527170bb70/0001_yago310_ermlp +280 results/ermlp/yago310/random/adam/2020-05-24-13-32_a5d8b694-e4d4-4ade-ae1b-fc43378c2312/0000_yago310_ermlp +281 results/ermlp/yago310/random/adam/2020-05-24-13-32_a5d8b694-e4d4-4ade-ae1b-fc43378c2312/0001_yago310_ermlp +282 results/ermlp/yago310/random/adam/2020-05-24-16-45_2c03b79f-cc42-477d-b499-b903332ca062/0000_yago310_ermlp +283 results/ermlp/yago310/random/adam/2020-05-24-16-45_2c03b79f-cc42-477d-b499-b903332ca062/0001_yago310_ermlp +284 results/hole/fb15k237/random/adam/2020-03-27-14-14_8b0a7570-cd5a-40aa-ae87-2a94f9706419/0000_fb15k237_hole +285 results/hole/fb15k237/random/adam/2020-03-27-14-14_8b0a7570-cd5a-40aa-ae87-2a94f9706419/0002_fb15k237_hole +286 results/hole/fb15k237/random/adam/2020-03-27-14-14_b86cc69c-3c7b-4fb7-b303-2e4c68ff4329/0000_fb15k237_hole +287 results/hole/fb15k237/random/adam/2020-03-27-14-14_b86cc69c-3c7b-4fb7-b303-2e4c68ff4329/0001_fb15k237_hole +288 results/hole/fb15k237/random/adam/2020-03-27-14-14_b86cc69c-3c7b-4fb7-b303-2e4c68ff4329/0002_fb15k237_hole +289 results/hole/fb15k237/random/adam/2020-03-27-14-14_b86cc69c-3c7b-4fb7-b303-2e4c68ff4329/0003_fb15k237_hole +290 results/hole/fb15k237/random/adam/2020-03-27-14-14_baabf532-c5ed-49ad-8f1a-a6fba3bf0f58/0000_fb15k237_hole +291 results/hole/fb15k237/random/adam/2020-03-27-14-14_baabf532-c5ed-49ad-8f1a-a6fba3bf0f58/0002_fb15k237_hole +292 results/hole/fb15k237/random/adam/2020-05-12-16-57_3e15b60a-06cb-47a8-a0e6-78952b1716e1/0000_fb15k237_hole +293 results/hole/fb15k237/random/adam/2020-05-12-16-57_3e15b60a-06cb-47a8-a0e6-78952b1716e1/0001_fb15k237_hole +294 results/hole/fb15k237/random/adam/2020-05-12-16-57_7727b2c3-aecf-407a-b0ca-dcac04556a24/0000_fb15k237_hole +295 results/hole/fb15k237/random/adam/2020-05-12-16-57_7727b2c3-aecf-407a-b0ca-dcac04556a24/0001_fb15k237_hole +296 results/hole/fb15k237/random/adam/2020-05-12-16-57_8829907b-9194-4750-b38e-6a481e5fdff1/0000_fb15k237_hole +297 results/hole/fb15k237/random/adam/2020-05-12-16-57_8829907b-9194-4750-b38e-6a481e5fdff1/0001_fb15k237_hole +298 results/hole/kinships/random/adadelta/2020-04-07-11-37_ed258016-02e5-4407-abfe-20c030585c6f_hpo_hole_kinships_lcwa_adadelta/0000_kinships_hole +299 results/hole/kinships/random/adadelta/2020-04-07-11-37_ed258016-02e5-4407-abfe-20c030585c6f_hpo_hole_kinships_lcwa_adadelta/0001_kinships_hole +300 results/hole/kinships/random/adadelta/2020-04-07-11-37_ed258016-02e5-4407-abfe-20c030585c6f_hpo_hole_kinships_lcwa_adadelta/0002_kinships_hole +301 results/hole/kinships/random/adadelta/2020-04-07-11-37_ed258016-02e5-4407-abfe-20c030585c6f_hpo_hole_kinships_lcwa_adadelta/0003_kinships_hole +302 results/hole/kinships/random/adadelta/2020-04-07-11-38_30dc619f-9c6b-4280-8925-c0e941af5ffd_hpo_hole_kinships_lcwa_crossentropy/0000_kinships_hole +303 results/hole/kinships/random/adadelta/2020-04-07-11-38_30dc619f-9c6b-4280-8925-c0e941af5ffd_hpo_hole_kinships_lcwa_crossentropy/0001_kinships_hole +304 results/hole/kinships/random/adadelta/2020-04-07-11-39_ea42a91a-cc79-4c7c-ab55-afa477511bcc_hpo_hole_kinships_owa_adadelta.json/0000_kinships_hole +305 results/hole/kinships/random/adadelta/2020-04-07-11-39_ea42a91a-cc79-4c7c-ab55-afa477511bcc_hpo_hole_kinships_owa_adadelta.json/0001_kinships_hole +306 results/hole/kinships/random/adadelta/2020-04-07-11-39_ea42a91a-cc79-4c7c-ab55-afa477511bcc_hpo_hole_kinships_owa_adadelta.json/0002_kinships_hole +307 results/hole/kinships/random/adadelta/2020-04-07-11-39_ea42a91a-cc79-4c7c-ab55-afa477511bcc_hpo_hole_kinships_owa_adadelta.json/0003_kinships_hole +308 results/hole/kinships/random/adadelta/2020-04-07-11-40_326f57e0-09db-41df-bd72-3760688f1f5d_hpo_hole_kinships_owa_nssal/0000_kinships_hole +309 results/hole/kinships/random/adadelta/2020-04-07-11-40_326f57e0-09db-41df-bd72-3760688f1f5d_hpo_hole_kinships_owa_nssal/0001_kinships_hole +310 results/hole/kinships/random/adadelta/2020-04-07-11-40_3cda29d4-57de-44ee-ba83-d41bef2f4d03_hpo_hole_kinships_owa_mrl/0000_kinships_hole +311 results/hole/kinships/random/adadelta/2020-04-07-11-40_3cda29d4-57de-44ee-ba83-d41bef2f4d03_hpo_hole_kinships_owa_mrl/0001_kinships_hole +312 results/hole/kinships/random/adam/2020-03-12-12-53_0baa5fcb-35f2-4439-a31e-40e5e1988ad0_hpo_hole_kinships_lcwa_adam/0000_kinships_hole +313 results/hole/kinships/random/adam/2020-03-12-12-53_0baa5fcb-35f2-4439-a31e-40e5e1988ad0_hpo_hole_kinships_lcwa_adam/0001_kinships_hole +314 results/hole/kinships/random/adam/2020-03-12-12-53_0baa5fcb-35f2-4439-a31e-40e5e1988ad0_hpo_hole_kinships_lcwa_adam/0002_kinships_hole +315 results/hole/kinships/random/adam/2020-03-12-12-53_0baa5fcb-35f2-4439-a31e-40e5e1988ad0_hpo_hole_kinships_lcwa_adam/0003_kinships_hole +316 results/hole/kinships/random/adam/2020-03-12-12-54_7c59afc4-0db0-4d1a-ada6-6a0c6c5c8092_hpo_hole_kinships_lcwa_crossentropy/0000_kinships_hole +317 results/hole/kinships/random/adam/2020-03-12-12-54_7c59afc4-0db0-4d1a-ada6-6a0c6c5c8092_hpo_hole_kinships_lcwa_crossentropy/0002_kinships_hole +318 results/hole/kinships/random/adam/2020-03-12-12-55_8d4da0dd-d153-4704-b9c9-36590fa35c31_hpo_hole_kinships_owa_adam/0000_kinships_hole +319 results/hole/kinships/random/adam/2020-03-12-12-55_8d4da0dd-d153-4704-b9c9-36590fa35c31_hpo_hole_kinships_owa_adam/0001_kinships_hole +320 results/hole/kinships/random/adam/2020-03-12-12-55_8d4da0dd-d153-4704-b9c9-36590fa35c31_hpo_hole_kinships_owa_adam/0002_kinships_hole +321 results/hole/kinships/random/adam/2020-03-12-12-55_8d4da0dd-d153-4704-b9c9-36590fa35c31_hpo_hole_kinships_owa_adam/0003_kinships_hole +322 results/hole/kinships/random/adam/2020-03-12-12-56_492f3ed6-a762-4705-a6df-5a429f93002d_hpo_hole_kinships_owa_mrl/0000_kinships_hole +323 results/hole/kinships/random/adam/2020-03-12-12-56_492f3ed6-a762-4705-a6df-5a429f93002d_hpo_hole_kinships_owa_mrl/0002_kinships_hole +324 results/hole/kinships/random/adam/2020-03-12-12-57_bccbb6d2-419b-4d65-8271-730e503a8aab_hpo_hole_kinships_owa_nssal/0000_kinships_hole +325 results/hole/kinships/random/adam/2020-03-12-12-57_bccbb6d2-419b-4d65-8271-730e503a8aab_hpo_hole_kinships_owa_nssal/0002_kinships_hole +326 results/hole/wn18rr/random/adam/2020-04-25-19-10_581a86b9-d3f5-4e6a-b366-708fe52465f7/0000_wn18rr_hole +327 results/hole/wn18rr/random/adam/2020-04-25-19-10_581a86b9-d3f5-4e6a-b366-708fe52465f7/0001_wn18rr_hole +328 results/hole/wn18rr/random/adam/2020-04-25-19-10_581a86b9-d3f5-4e6a-b366-708fe52465f7/0002_wn18rr_hole +329 results/hole/wn18rr/random/adam/2020-04-25-19-10_581a86b9-d3f5-4e6a-b366-708fe52465f7/0003_wn18rr_hole +330 results/hole/wn18rr/random/adam/2020-04-25-19-10_71e41439-aad5-4508-98de-022a93259cc4/0000_wn18rr_hole +331 results/hole/wn18rr/random/adam/2020-04-25-19-10_71e41439-aad5-4508-98de-022a93259cc4/0001_wn18rr_hole +332 results/hole/wn18rr/random/adam/2020-04-25-19-10_80ffe3be-5a43-49db-a27f-ba31f8c519f7/0000_wn18rr_hole +333 results/hole/wn18rr/random/adam/2020-04-25-19-10_80ffe3be-5a43-49db-a27f-ba31f8c519f7/0001_wn18rr_hole +334 results/hole/wn18rr/random/adam/2020-04-25-19-11_7af099ba-7d4c-4459-bf60-44b80cff4509/0000_wn18rr_hole +335 results/hole/wn18rr/random/adam/2020-04-25-19-11_7af099ba-7d4c-4459-bf60-44b80cff4509/0001_wn18rr_hole +336 results/hole/wn18rr/random/adam/2020-05-05-16-11_6cf51e47-47d3-4451-aa29-995f82418abf/0000_wn18rr_hole +337 results/hole/wn18rr/random/adam/2020-05-05-16-11_6cf51e47-47d3-4451-aa29-995f82418abf/0001_wn18rr_hole +338 results/hole/wn18rr/random/adam/2020-05-05-16-12_f93b3cf7-49ea-414b-9257-4dcdea3d1cf6/0000_wn18rr_hole +339 results/hole/wn18rr/random/adam/2020-05-05-16-12_f93b3cf7-49ea-414b-9257-4dcdea3d1cf6/0001_wn18rr_hole +340 results/hole/yago310/random/adam/2020-05-17-19-33_4050c399-dd78-445c-9bad-b772d9771922/0000_yago310_hole +341 results/hole/yago310/random/adam/2020-05-21-02-48_08cf438c-3abe-48a3-94fd-cbb5d18a3d9f/0000_yago310_hole +342 results/hole/yago310/random/adam/2020-05-21-02-48_08cf438c-3abe-48a3-94fd-cbb5d18a3d9f/0001_yago310_hole +343 results/hole/yago310/random/adam/2020-05-21-02-48_4eafa140-201f-4ac3-a207-8a309889e82d/0000_yago310_hole +344 results/hole/yago310/random/adam/2020-05-21-02-48_4eafa140-201f-4ac3-a207-8a309889e82d/0001_yago310_hole +345 results/hole/yago310/random/adam/2020-05-21-02-48_c14160e1-294c-4e83-bd6b-f535625de46e/0000_yago310_hole +346 results/hole/yago310/random/adam/2020-05-21-02-48_c14160e1-294c-4e83-bd6b-f535625de46e/0001_yago310_hole +347 results/kg2e/fb15k237/random/adam/2020-03-10-10-40_3e40acbd-d65c-4c58-b877-ec0c0d01d285_hpo_kg2e_fb15k237_owa_mrl/0000_fb15k237_kg2e +348 results/kg2e/fb15k237/random/adam/2020-03-10-10-40_3e40acbd-d65c-4c58-b877-ec0c0d01d285_hpo_kg2e_fb15k237_owa_mrl/0002_fb15k237_kg2e +349 results/kg2e/fb15k237/random/adam/2020-03-10-10-43_94f76a8f-6faf-459d-85c6-d272cbee7362_hpo_kg2e_fb15k237_owa_adam/0000_fb15k237_kg2e +350 results/kg2e/fb15k237/random/adam/2020-03-10-10-43_94f76a8f-6faf-459d-85c6-d272cbee7362_hpo_kg2e_fb15k237_owa_adam/0001_fb15k237_kg2e +351 results/kg2e/fb15k237/random/adam/2020-03-10-10-43_94f76a8f-6faf-459d-85c6-d272cbee7362_hpo_kg2e_fb15k237_owa_adam/0002_fb15k237_kg2e +352 results/kg2e/fb15k237/random/adam/2020-03-10-10-43_94f76a8f-6faf-459d-85c6-d272cbee7362_hpo_kg2e_fb15k237_owa_adam/0003_fb15k237_kg2e +353 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_d6ab117a-ab36-4622-af8f-7f13f2c1351c_hpo_kg2e_fb15k237_owa_nssal/0000_fb15k237_kg2e +354 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_d6ab117a-ab36-4622-af8f-7f13f2c1351c_hpo_kg2e_fb15k237_owa_nssal/0002_fb15k237_kg2e +355 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_e15c47d2-8846-4578-8926-70129feac94e_hpo_kg2e_fb15k237_lcwa_adam/0000_fb15k237_kg2e +356 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_e15c47d2-8846-4578-8926-70129feac94e_hpo_kg2e_fb15k237_lcwa_adam/0001_fb15k237_kg2e +357 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_e15c47d2-8846-4578-8926-70129feac94e_hpo_kg2e_fb15k237_lcwa_adam/0002_fb15k237_kg2e +358 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_e15c47d2-8846-4578-8926-70129feac94e_hpo_kg2e_fb15k237_lcwa_adam/0003_fb15k237_kg2e +359 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_f6c03baf-3d87-4e22-8d40-1e33a508f04d_hpo_kg2e_fb15k237_lcwa_crossentropy/0000_fb15k237_kg2e +360 results/kg2e/fb15k237/random/adam/2020-03-11-10-10_f6c03baf-3d87-4e22-8d40-1e33a508f04d_hpo_kg2e_fb15k237_lcwa_crossentropy/0002_fb15k237_kg2e +361 results/kg2e/kinships/random/adadelta/2020-04-07-14-09_5129f22d-a0fb-4a3f-8d2e-d8f9d236d004_hpo_kg2e_kinships_lcwa_adadelta/0000_kinships_kg2e +362 results/kg2e/kinships/random/adadelta/2020-04-07-14-09_5129f22d-a0fb-4a3f-8d2e-d8f9d236d004_hpo_kg2e_kinships_lcwa_adadelta/0001_kinships_kg2e +363 results/kg2e/kinships/random/adadelta/2020-04-07-14-09_5129f22d-a0fb-4a3f-8d2e-d8f9d236d004_hpo_kg2e_kinships_lcwa_adadelta/0002_kinships_kg2e +364 results/kg2e/kinships/random/adadelta/2020-04-07-14-09_5129f22d-a0fb-4a3f-8d2e-d8f9d236d004_hpo_kg2e_kinships_lcwa_adadelta/0003_kinships_kg2e +365 results/kg2e/kinships/random/adadelta/2020-04-07-14-10_2b471a4d-9e0a-4090-a900-6717a766afab_hpo_kg2e_kinships_lcwa_crossentropy/0000_kinships_kg2e +366 results/kg2e/kinships/random/adadelta/2020-04-07-14-10_2b471a4d-9e0a-4090-a900-6717a766afab_hpo_kg2e_kinships_lcwa_crossentropy/0001_kinships_kg2e +367 results/kg2e/kinships/random/adadelta/2020-04-07-14-13_7f55ef09-063b-4353-9c32-2974314e5dc5_po_kg2e_kinships_owa_adadelta/0000_kinships_kg2e +368 results/kg2e/kinships/random/adadelta/2020-04-07-14-13_7f55ef09-063b-4353-9c32-2974314e5dc5_po_kg2e_kinships_owa_adadelta/0001_kinships_kg2e +369 results/kg2e/kinships/random/adadelta/2020-04-07-14-13_7f55ef09-063b-4353-9c32-2974314e5dc5_po_kg2e_kinships_owa_adadelta/0002_kinships_kg2e +370 results/kg2e/kinships/random/adadelta/2020-04-07-14-13_7f55ef09-063b-4353-9c32-2974314e5dc5_po_kg2e_kinships_owa_adadelta/0003_kinships_kg2e +371 results/kg2e/kinships/random/adadelta/2020-04-07-14-14_8bd2b720-1c6c-40f4-8102-f36df025031d_hpo_kg2e_kinships_owa_mrl/0000_kinships_kg2e +372 results/kg2e/kinships/random/adadelta/2020-04-07-14-14_8bd2b720-1c6c-40f4-8102-f36df025031d_hpo_kg2e_kinships_owa_mrl/0001_kinships_kg2e +373 results/kg2e/kinships/random/adadelta/2020-04-08-18-23_3961da21-c7f3-440b-b536-06840aa23dc8_hpo_kg2e_kinships_owa_nssal/0000_kinships_kg2e +374 results/kg2e/kinships/random/adadelta/2020-04-08-18-23_3961da21-c7f3-440b-b536-06840aa23dc8_hpo_kg2e_kinships_owa_nssal/0001_kinships_kg2e +375 results/kg2e/kinships/random/adam/2020-03-13-11-41_55ae17f7-78e5-4a04-ba74-838a97b6d129_hpo_kg2e_kinships_lcwa_adam/0000_kinships_kg2e +376 results/kg2e/kinships/random/adam/2020-03-13-11-41_55ae17f7-78e5-4a04-ba74-838a97b6d129_hpo_kg2e_kinships_lcwa_adam/0001_kinships_kg2e +377 results/kg2e/kinships/random/adam/2020-03-13-11-41_55ae17f7-78e5-4a04-ba74-838a97b6d129_hpo_kg2e_kinships_lcwa_adam/0002_kinships_kg2e +378 results/kg2e/kinships/random/adam/2020-03-13-11-41_55ae17f7-78e5-4a04-ba74-838a97b6d129_hpo_kg2e_kinships_lcwa_adam/0003_kinships_kg2e +379 results/kg2e/kinships/random/adam/2020-03-13-11-42_5e751624-b8da-4880-8430-4de93c5d1ffb_hpo_kg2e_kinships_lcwa_crossentropy/0000_kinships_kg2e +380 results/kg2e/kinships/random/adam/2020-03-13-11-42_5e751624-b8da-4880-8430-4de93c5d1ffb_hpo_kg2e_kinships_lcwa_crossentropy/0002_kinships_kg2e +381 results/kg2e/kinships/random/adam/2020-03-13-11-43_816f55c0-b03c-44cc-870c-b47dea0d59de_hpo_kg2e_kinships_owa_adam/0000_kinships_kg2e +382 results/kg2e/kinships/random/adam/2020-03-13-11-43_816f55c0-b03c-44cc-870c-b47dea0d59de_hpo_kg2e_kinships_owa_adam/0001_kinships_kg2e +383 results/kg2e/kinships/random/adam/2020-03-13-11-43_816f55c0-b03c-44cc-870c-b47dea0d59de_hpo_kg2e_kinships_owa_adam/0002_kinships_kg2e +384 results/kg2e/kinships/random/adam/2020-03-13-11-43_816f55c0-b03c-44cc-870c-b47dea0d59de_hpo_kg2e_kinships_owa_adam/0003_kinships_kg2e +385 results/kg2e/kinships/random/adam/2020-03-13-11-44_dcc54217-5c79-414d-9b45-613200695d52_hpo_kg2e_kinships_owa_mrl/0000_kinships_kg2e +386 results/kg2e/kinships/random/adam/2020-03-13-11-44_dcc54217-5c79-414d-9b45-613200695d52_hpo_kg2e_kinships_owa_mrl/0002_kinships_kg2e +387 results/kg2e/kinships/random/adam/2020-03-16-09-17_36f69559-e08d-4646-90f6-6b558592d1ef_hpo_kg2e_kinships_owa_nssal/0000_kinships_kg2e +388 results/kg2e/kinships/random/adam/2020-03-16-09-17_36f69559-e08d-4646-90f6-6b558592d1ef_hpo_kg2e_kinships_owa_nssal/0002_kinships_kg2e +389 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_00c4bc7a-3beb-441f-9041-e8f92e26b183/0000_wn18rr_kg2e +390 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_00c4bc7a-3beb-441f-9041-e8f92e26b183/0001_wn18rr_kg2e +391 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_6d7b0cf7-f4ac-4a78-9fce-65b15e240fb4/0000_wn18rr_kg2e +392 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_6d7b0cf7-f4ac-4a78-9fce-65b15e240fb4/0001_wn18rr_kg2e +393 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_bdaf747e-6b9d-42bb-93cd-d8bfbddfe293/0000_wn18rr_kg2e +394 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_bdaf747e-6b9d-42bb-93cd-d8bfbddfe293/0001_wn18rr_kg2e +395 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_bdaf747e-6b9d-42bb-93cd-d8bfbddfe293/0002_wn18rr_kg2e +396 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_bdaf747e-6b9d-42bb-93cd-d8bfbddfe293/0003_wn18rr_kg2e +397 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_c018583b-e413-420f-bf44-30b32cb0181a/0000_wn18rr_kg2e +398 results/kg2e/wn18rr/random/adam/2020-04-25-19-28_c018583b-e413-420f-bf44-30b32cb0181a/0001_wn18rr_kg2e +399 results/kg2e/wn18rr/random/adam/2020-05-05-16-13_a79a02c3-a700-4921-bb21-0037ed3cdff5/0000_wn18rr_kg2e +400 results/kg2e/wn18rr/random/adam/2020-05-05-16-13_a79a02c3-a700-4921-bb21-0037ed3cdff5/0001_wn18rr_kg2e +401 results/kg2e/wn18rr/random/adam/2020-05-05-16-13_d8c5e408-9491-43b7-96a3-9bd74a6360d0/0000_wn18rr_kg2e +402 results/kg2e/wn18rr/random/adam/2020-05-05-16-13_d8c5e408-9491-43b7-96a3-9bd74a6360d0/0001_wn18rr_kg2e +403 results/ntn/fb15k237/random/adam/2020-05-12-17-00_2eb85ac4-67ae-473f-b907-d5a7f0758936/0000_fb15k237_ntn +404 results/ntn/fb15k237/random/adam/2020-05-12-17-00_2eb85ac4-67ae-473f-b907-d5a7f0758936/0001_fb15k237_ntn +405 results/ntn/fb15k237/random/adam/2020-05-12-17-00_3930dbe3-8628-4ff8-9613-2cdae83ede0e/0000_fb15k237_ntn +406 results/ntn/fb15k237/random/adam/2020-05-12-17-00_3930dbe3-8628-4ff8-9613-2cdae83ede0e/0001_fb15k237_ntn +407 results/ntn/fb15k237/random/adam/2020-05-12-17-00_660469f2-18bc-46b0-8a81-374d23e08f43/0000_fb15k237_ntn +408 results/ntn/fb15k237/random/adam/2020-05-12-17-00_660469f2-18bc-46b0-8a81-374d23e08f43/0001_fb15k237_ntn +409 results/ntn/fb15k237/random/adam/2020-05-12-17-00_a5329ea9-7849-46f9-a914-e5983ebfe212/0000_fb15k237_ntn +410 results/ntn/fb15k237/random/adam/2020-05-12-17-00_a5329ea9-7849-46f9-a914-e5983ebfe212/0001_fb15k237_ntn +411 results/ntn/fb15k237/random/adam/2020-05-12-17-00_b03ee556-e747-4be9-8ab7-4e5cc998ca5e/0000_fb15k237_ntn +412 results/ntn/fb15k237/random/adam/2020-05-12-17-00_b03ee556-e747-4be9-8ab7-4e5cc998ca5e/0001_fb15k237_ntn +413 results/ntn/fb15k237/random/adam/2020-05-12-17-00_f45cadeb-532c-41ae-b99a-cea5192af0ea/0000_fb15k237_ntn +414 results/ntn/fb15k237/random/adam/2020-05-12-17-00_f45cadeb-532c-41ae-b99a-cea5192af0ea/0001_fb15k237_ntn +415 results/ntn/kinships/random/adadelta/2020-04-26-20-04_44502e67-fe75-45dd-9e9c-d1ad9c9255b6_hpo_ntn_kinships_lcwa_crossentropy/0000_kinships_ntn +416 results/ntn/kinships/random/adadelta/2020-04-26-20-04_44502e67-fe75-45dd-9e9c-d1ad9c9255b6_hpo_ntn_kinships_lcwa_crossentropy/0001_kinships_ntn +417 results/ntn/kinships/random/adadelta/2020-04-27-18-26_3414ccd6-a566-4e8b-a9d1-c6d7197d9be7_hpo_ntn_kinships_owa_nssal/0000_kinships_ntn +418 results/ntn/kinships/random/adadelta/2020-04-27-18-26_3414ccd6-a566-4e8b-a9d1-c6d7197d9be7_hpo_ntn_kinships_owa_nssal/0001_kinships_ntn +419 results/ntn/kinships/random/adadelta/2020-05-12-09-53_823f69d7-0315-4212-9d6b-3b5396f1b7d1_hpo_ntn_kinships_owa_mrl/0000_kinships_ntn +420 results/ntn/kinships/random/adadelta/2020-05-12-09-53_823f69d7-0315-4212-9d6b-3b5396f1b7d1_hpo_ntn_kinships_owa_mrl/0001_kinships_ntn +421 results/ntn/kinships/random/adadelta/2020-05-14-11-38_fe7ab101-b4f2-4a1a-b77c-18d4f99a4a9f_hpo_ntn_kinships_lcwa_adadelta/0000_kinships_ntn +422 results/ntn/kinships/random/adadelta/2020-05-14-11-38_fe7ab101-b4f2-4a1a-b77c-18d4f99a4a9f_hpo_ntn_kinships_lcwa_adadelta/0001_kinships_ntn +423 results/ntn/kinships/random/adadelta/2020-05-14-11-38_fe7ab101-b4f2-4a1a-b77c-18d4f99a4a9f_hpo_ntn_kinships_lcwa_adadelta/0002_kinships_ntn +424 results/ntn/kinships/random/adadelta/2020-05-14-11-38_fe7ab101-b4f2-4a1a-b77c-18d4f99a4a9f_hpo_ntn_kinships_lcwa_adadelta/0003_kinships_ntn +425 results/ntn/kinships/random/adadelta/2020-05-14-17-08_6f557db0-72ea-4488-a78a-1468c01d397a_hpo_ntn_kinships_owa_adadelta/0000_kinships_ntn +426 results/ntn/kinships/random/adadelta/2020-05-14-17-08_6f557db0-72ea-4488-a78a-1468c01d397a_hpo_ntn_kinships_owa_adadelta/0001_kinships_ntn +427 results/ntn/kinships/random/adadelta/2020-05-14-17-08_6f557db0-72ea-4488-a78a-1468c01d397a_hpo_ntn_kinships_owa_adadelta/0002_kinships_ntn +428 results/ntn/kinships/random/adadelta/2020-05-14-17-08_6f557db0-72ea-4488-a78a-1468c01d397a_hpo_ntn_kinships_owa_adadelta/0003_kinships_ntn +429 results/ntn/kinships/random/adam/2020-04-02-18-27_e5cd3031-6aad-4649-9a1a-866584a72bce/0000_kinships_ntn +430 results/ntn/kinships/random/adam/2020-04-02-18-27_e5cd3031-6aad-4649-9a1a-866584a72bce/0001_kinships_ntn +431 results/ntn/kinships/random/adam/2020-04-02-18-27_e5cd3031-6aad-4649-9a1a-866584a72bce/0002_kinships_ntn +432 results/ntn/kinships/random/adam/2020-04-02-18-27_e5cd3031-6aad-4649-9a1a-866584a72bce/0003_kinships_ntn +433 results/ntn/kinships/random/adam/2020-04-02-18-28_92ec4a45-97b6-47d4-8c39-06bec241dddf/0000_kinships_ntn +434 results/ntn/kinships/random/adam/2020-04-02-18-28_92ec4a45-97b6-47d4-8c39-06bec241dddf/0001_kinships_ntn +435 results/ntn/kinships/random/adam/2020-05-18-11-12_26b1bb20-d00f-4b8b-a87c-999d24c67539/0000_kinships_ntn +436 results/ntn/kinships/random/adam/2020-05-18-11-12_26b1bb20-d00f-4b8b-a87c-999d24c67539/0001_kinships_ntn +437 results/ntn/kinships/random/adam/2020-05-18-11-12_588807ff-042e-4c32-baf4-d8853fbca850/0000_kinships_ntn +438 results/ntn/kinships/random/adam/2020-05-18-11-12_588807ff-042e-4c32-baf4-d8853fbca850/0001_kinships_ntn +439 results/ntn/kinships/random/adam/2020-05-18-11-12_588807ff-042e-4c32-baf4-d8853fbca850/0002_kinships_ntn +440 results/ntn/kinships/random/adam/2020-05-18-11-12_588807ff-042e-4c32-baf4-d8853fbca850/0003_kinships_ntn +441 results/ntn/kinships/random/adam/2020-05-18-11-12_cde53e15-1b66-44d2-a068-ad33f3e1a42a/0000_kinships_ntn +442 results/ntn/kinships/random/adam/2020-05-18-11-12_cde53e15-1b66-44d2-a068-ad33f3e1a42a/0001_kinships_ntn +443 results/ntn/wn18rr/random/adam/2020-04-25-19-28_846489bd-4791-4fa2-8330-16246091d505/0000_wn18rr_ntn +444 results/ntn/wn18rr/random/adam/2020-04-25-19-28_846489bd-4791-4fa2-8330-16246091d505/0001_wn18rr_ntn +445 results/ntn/wn18rr/random/adam/2020-04-25-19-28_b650b078-ff2a-4395-ab5f-602975abde8d/0000_wn18rr_ntn +446 results/ntn/wn18rr/random/adam/2020-04-25-19-28_b650b078-ff2a-4395-ab5f-602975abde8d/0001_wn18rr_ntn +447 results/ntn/wn18rr/random/adam/2020-05-05-16-04_7eec3a19-2ecd-4327-bec7-2dbec928aa5a/0000_wn18rr_ntn +448 results/ntn/wn18rr/random/adam/2020-05-05-16-04_7eec3a19-2ecd-4327-bec7-2dbec928aa5a/0001_wn18rr_ntn +449 results/ntn/wn18rr/random/adam/2020-05-05-16-04_e90e6507-c242-49b1-846e-4d5a941ffb33/0000_wn18rr_ntn +450 results/ntn/wn18rr/random/adam/2020-05-05-16-04_e90e6507-c242-49b1-846e-4d5a941ffb33/0001_wn18rr_ntn +451 results/proje/fb15k237/random/adam/2020-03-14-19-43_f24d4479-c575-4bc7-bcf4-369434fecf9b_hpo_proje_fb15k237_owa_adam/0000_fb15k237_proje +452 results/proje/fb15k237/random/adam/2020-03-14-19-43_f24d4479-c575-4bc7-bcf4-369434fecf9b_hpo_proje_fb15k237_owa_adam/0001_fb15k237_proje +453 results/proje/fb15k237/random/adam/2020-03-14-19-43_f24d4479-c575-4bc7-bcf4-369434fecf9b_hpo_proje_fb15k237_owa_adam/0002_fb15k237_proje +454 results/proje/fb15k237/random/adam/2020-03-14-19-43_f24d4479-c575-4bc7-bcf4-369434fecf9b_hpo_proje_fb15k237_owa_adam/0003_fb15k237_proje +455 results/proje/fb15k237/random/adam/2020-03-14-19-44_19021199-6d9c-4af0-9673-23f4b3a9e12a_hpo_proje_fb15k237_lcwa_adam/0000_fb15k237_proje +456 results/proje/fb15k237/random/adam/2020-03-14-19-44_19021199-6d9c-4af0-9673-23f4b3a9e12a_hpo_proje_fb15k237_lcwa_adam/0001_fb15k237_proje +457 results/proje/fb15k237/random/adam/2020-03-14-19-44_19021199-6d9c-4af0-9673-23f4b3a9e12a_hpo_proje_fb15k237_lcwa_adam/0002_fb15k237_proje +458 results/proje/fb15k237/random/adam/2020-03-14-19-44_19021199-6d9c-4af0-9673-23f4b3a9e12a_hpo_proje_fb15k237_lcwa_adam/0003_fb15k237_proje +459 results/proje/fb15k237/random/adam/2020-03-14-19-44_1ee08015-b83d-4cc3-a944-10e940dc3f68_hpo_proje_fb15k237_owa_mrl/0000_fb15k237_proje +460 results/proje/fb15k237/random/adam/2020-03-14-19-44_1ee08015-b83d-4cc3-a944-10e940dc3f68_hpo_proje_fb15k237_owa_mrl/0002_fb15k237_proje +461 results/proje/fb15k237/random/adam/2020-03-14-19-44_27364e79-3469-47dc-868e-7f4e5bb929ef_hpo_proje_fb15k237_owa_nssal/0000_fb15k237_proje +462 results/proje/fb15k237/random/adam/2020-03-14-19-44_27364e79-3469-47dc-868e-7f4e5bb929ef_hpo_proje_fb15k237_owa_nssal/0002_fb15k237_proje +463 results/proje/fb15k237/random/adam/2020-03-14-19-44_37e5a52f-9298-4881-9012-e96df920167a_hpo_proje_fb15k237_lcwa_crossentropy/0000_fb15k237_proje +464 results/proje/fb15k237/random/adam/2020-03-14-19-44_37e5a52f-9298-4881-9012-e96df920167a_hpo_proje_fb15k237_lcwa_crossentropy/0002_fb15k237_proje +465 results/proje/kinships/random/adadelta/2020-04-08-16-25_335327ee-129a-4546-a905-0545739c0746_hpo_proje_kinships_lcwa_adadelta/0000_kinships_proje +466 results/proje/kinships/random/adadelta/2020-04-08-16-25_335327ee-129a-4546-a905-0545739c0746_hpo_proje_kinships_lcwa_adadelta/0001_kinships_proje +467 results/proje/kinships/random/adadelta/2020-04-08-16-25_335327ee-129a-4546-a905-0545739c0746_hpo_proje_kinships_lcwa_adadelta/0002_kinships_proje +468 results/proje/kinships/random/adadelta/2020-04-08-16-25_335327ee-129a-4546-a905-0545739c0746_hpo_proje_kinships_lcwa_adadelta/0003_kinships_proje +469 results/proje/kinships/random/adadelta/2020-04-08-16-26_29c14838-9a80-487d-ae43-a3de7a502ecf_hpo_proje_kinships_lcwa_crossentropy/0000_kinships_proje +470 results/proje/kinships/random/adadelta/2020-04-08-16-26_29c14838-9a80-487d-ae43-a3de7a502ecf_hpo_proje_kinships_lcwa_crossentropy/0001_kinships_proje +471 results/proje/kinships/random/adadelta/2020-04-08-16-28_d202db45-9f0b-4434-938c-98505d01aaf7_hpo_proje_kinships_owa_mrl/0000_kinships_proje +472 results/proje/kinships/random/adadelta/2020-04-08-16-28_d202db45-9f0b-4434-938c-98505d01aaf7_hpo_proje_kinships_owa_mrl/0001_kinships_proje +473 results/proje/kinships/random/adadelta/2020-04-08-16-29_cf7ed813-23b3-4f0b-a6dc-99a0183ae10d_hpo_proje_kinships_owa_adadelta/0000_kinships_proje +474 results/proje/kinships/random/adadelta/2020-04-08-16-29_cf7ed813-23b3-4f0b-a6dc-99a0183ae10d_hpo_proje_kinships_owa_adadelta/0001_kinships_proje +475 results/proje/kinships/random/adadelta/2020-04-08-16-29_cf7ed813-23b3-4f0b-a6dc-99a0183ae10d_hpo_proje_kinships_owa_adadelta/0002_kinships_proje +476 results/proje/kinships/random/adadelta/2020-04-08-16-29_cf7ed813-23b3-4f0b-a6dc-99a0183ae10d_hpo_proje_kinships_owa_adadelta/0003_kinships_proje +477 results/proje/kinships/random/adadelta/2020-05-17-09-40_9c318037-9c09-43d6-8150-72ef10353a84_hpo_proje_kinships_owa_nssal/0000_kinships_proje +478 results/proje/kinships/random/adadelta/2020-05-17-09-40_9c318037-9c09-43d6-8150-72ef10353a84_hpo_proje_kinships_owa_nssal/0001_kinships_proje +479 results/proje/kinships/random/adam/2020-03-13-12-53_3e7443d0-bdec-45d3-be3a-cf06b0ceb6da_hpo_proje_kinships_lcwa_adam/0000_kinships_proje +480 results/proje/kinships/random/adam/2020-03-13-12-53_3e7443d0-bdec-45d3-be3a-cf06b0ceb6da_hpo_proje_kinships_lcwa_adam/0001_kinships_proje +481 results/proje/kinships/random/adam/2020-03-13-12-53_3e7443d0-bdec-45d3-be3a-cf06b0ceb6da_hpo_proje_kinships_lcwa_adam/0002_kinships_proje +482 results/proje/kinships/random/adam/2020-03-13-12-53_3e7443d0-bdec-45d3-be3a-cf06b0ceb6da_hpo_proje_kinships_lcwa_adam/0003_kinships_proje +483 results/proje/kinships/random/adam/2020-03-13-12-54_cd62fe3b-0995-4179-9f4d-64e59b8b2574_hpo_proje_kinships_lcwa_crossentropy/0000_kinships_proje +484 results/proje/kinships/random/adam/2020-03-13-12-54_cd62fe3b-0995-4179-9f4d-64e59b8b2574_hpo_proje_kinships_lcwa_crossentropy/0002_kinships_proje +485 results/proje/kinships/random/adam/2020-03-13-12-55_e419a6c5-558e-4795-9feb-c790732c1f65_hpo_proje_kinships_owa_adam/0000_kinships_proje +486 results/proje/kinships/random/adam/2020-03-13-12-55_e419a6c5-558e-4795-9feb-c790732c1f65_hpo_proje_kinships_owa_adam/0001_kinships_proje +487 results/proje/kinships/random/adam/2020-03-13-12-55_e419a6c5-558e-4795-9feb-c790732c1f65_hpo_proje_kinships_owa_adam/0002_kinships_proje +488 results/proje/kinships/random/adam/2020-03-13-12-55_e419a6c5-558e-4795-9feb-c790732c1f65_hpo_proje_kinships_owa_adam/0003_kinships_proje +489 results/proje/kinships/random/adam/2020-03-13-12-56_7b59b0bc-1a3a-4819-83fc-92d9318d39a7_hpo_proje_kinships_owa_mrl/0000_kinships_proje +490 results/proje/kinships/random/adam/2020-03-13-12-56_7b59b0bc-1a3a-4819-83fc-92d9318d39a7_hpo_proje_kinships_owa_mrl/0002_kinships_proje +491 results/proje/kinships/random/adam/2020-03-13-12-57_15375ce3-1053-4ea7-a773-558a00473967_hpo_proje_kinships_owa_nssal/0000_kinships_proje +492 results/proje/kinships/random/adam/2020-03-13-12-57_15375ce3-1053-4ea7-a773-558a00473967_hpo_proje_kinships_owa_nssal/0002_kinships_proje +493 results/proje/wn18rr/random/adam/2020-04-01-17-40_34c25a64-8ccd-4a48-a5d5-6a8c60c4cf46/0000_wn18rr_proje +494 results/proje/wn18rr/random/adam/2020-04-01-17-40_34c25a64-8ccd-4a48-a5d5-6a8c60c4cf46/0002_wn18rr_proje +495 results/proje/wn18rr/random/adam/2020-04-01-17-40_3c13f4cf-849e-43a3-a759-91f9e5d4481f/0000_wn18rr_proje +496 results/proje/wn18rr/random/adam/2020-04-01-17-40_3c13f4cf-849e-43a3-a759-91f9e5d4481f/0002_wn18rr_proje +497 results/proje/wn18rr/random/adam/2020-04-01-17-40_5c104bf8-c242-4ed7-aef8-50c7f19e3182/0000_wn18rr_proje +498 results/proje/wn18rr/random/adam/2020-04-01-17-40_5c104bf8-c242-4ed7-aef8-50c7f19e3182/0001_wn18rr_proje +499 results/proje/wn18rr/random/adam/2020-04-01-17-40_5c104bf8-c242-4ed7-aef8-50c7f19e3182/0002_wn18rr_proje +500 results/proje/wn18rr/random/adam/2020-04-01-17-40_5c104bf8-c242-4ed7-aef8-50c7f19e3182/0003_wn18rr_proje +501 results/proje/wn18rr/random/adam/2020-04-01-17-40_a54cbbcc-ce64-42c5-a183-8628e5cb356c/0000_wn18rr_proje +502 results/proje/wn18rr/random/adam/2020-04-01-17-40_a54cbbcc-ce64-42c5-a183-8628e5cb356c/0001_wn18rr_proje +503 results/proje/wn18rr/random/adam/2020-04-01-17-40_a54cbbcc-ce64-42c5-a183-8628e5cb356c/0002_wn18rr_proje +504 results/proje/wn18rr/random/adam/2020-04-01-17-40_a54cbbcc-ce64-42c5-a183-8628e5cb356c/0003_wn18rr_proje +505 results/proje/wn18rr/random/adam/2020-04-01-17-40_f1f9e4d3-4553-495f-a475-74a871f25792/0000_wn18rr_proje +506 results/proje/wn18rr/random/adam/2020-04-01-17-40_f1f9e4d3-4553-495f-a475-74a871f25792/0002_wn18rr_proje +507 results/rescal/fb15k237/random/adam/2020-03-14-19-33_25660d21-1d27-4ffd-8510-00fa4397b543_hpo_rescal_fb15k237_lcwa_adam/0000_fb15k237_rescal +508 results/rescal/fb15k237/random/adam/2020-03-14-19-33_25660d21-1d27-4ffd-8510-00fa4397b543_hpo_rescal_fb15k237_lcwa_adam/0001_fb15k237_rescal +509 results/rescal/fb15k237/random/adam/2020-03-14-19-33_25660d21-1d27-4ffd-8510-00fa4397b543_hpo_rescal_fb15k237_lcwa_adam/0002_fb15k237_rescal +510 results/rescal/fb15k237/random/adam/2020-03-14-19-33_25660d21-1d27-4ffd-8510-00fa4397b543_hpo_rescal_fb15k237_lcwa_adam/0003_fb15k237_rescal +511 results/rescal/fb15k237/random/adam/2020-03-20-11-15_97996eb3-faca-4448-a948-af74e0fa865f_hpo_rescal_fb15k237_owa_adam/0000_fb15k237_rescal +512 results/rescal/fb15k237/random/adam/2020-03-20-11-15_97996eb3-faca-4448-a948-af74e0fa865f_hpo_rescal_fb15k237_owa_adam/0001_fb15k237_rescal +513 results/rescal/fb15k237/random/adam/2020-03-20-11-15_97996eb3-faca-4448-a948-af74e0fa865f_hpo_rescal_fb15k237_owa_adam/0002_fb15k237_rescal +514 results/rescal/fb15k237/random/adam/2020-03-20-11-15_97996eb3-faca-4448-a948-af74e0fa865f_hpo_rescal_fb15k237_owa_adam/0003_fb15k237_rescal +515 results/rescal/fb15k237/random/adam/2020-03-20-11-15_b77c7144-1086-462e-84f5-d3d84862a096_hpo_rescal_fb15k237_lcwa_crossentropy/0000_fb15k237_rescal +516 results/rescal/fb15k237/random/adam/2020-03-20-11-15_b77c7144-1086-462e-84f5-d3d84862a096_hpo_rescal_fb15k237_lcwa_crossentropy/0002_fb15k237_rescal +517 results/rescal/fb15k237/random/adam/2020-03-20-11-16_0ad32049-72e6-479d-ba58-c2090b91b9d0_hpo_rescal_fb15k237_owa_adam/0000_fb15k237_rescal +518 results/rescal/fb15k237/random/adam/2020-03-20-11-16_0ad32049-72e6-479d-ba58-c2090b91b9d0_hpo_rescal_fb15k237_owa_adam/0002_fb15k237_rescal +519 results/rescal/fb15k237/random/adam/2020-03-20-11-16_6632fdca-7eee-4c16-b982-e48031b8ccce_hpo_rescal_fb15k237_owa_mrl/0000_fb15k237_rescal +520 results/rescal/fb15k237/random/adam/2020-03-20-11-16_6632fdca-7eee-4c16-b982-e48031b8ccce_hpo_rescal_fb15k237_owa_mrl/0002_fb15k237_rescal +521 results/rescal/kinships/random/adadelta/2020-04-13-12-16_e2ea3f4d-77c7-486e-96c5-3d85b8cd5579_hpo_rescal_kinships_lcwa_adadelta/0000_kinships_rescal +522 results/rescal/kinships/random/adadelta/2020-04-13-12-16_e2ea3f4d-77c7-486e-96c5-3d85b8cd5579_hpo_rescal_kinships_lcwa_adadelta/0001_kinships_rescal +523 results/rescal/kinships/random/adadelta/2020-04-13-12-16_e2ea3f4d-77c7-486e-96c5-3d85b8cd5579_hpo_rescal_kinships_lcwa_adadelta/0002_kinships_rescal +524 results/rescal/kinships/random/adadelta/2020-04-13-12-16_e2ea3f4d-77c7-486e-96c5-3d85b8cd5579_hpo_rescal_kinships_lcwa_adadelta/0003_kinships_rescal +525 results/rescal/kinships/random/adadelta/2020-04-13-12-18_c0ef00e1-03fc-4611-8ccd-35a4cb43c0cd_hpo_rescal_kinships_lcwa_crossentropy/0000_kinships_rescal +526 results/rescal/kinships/random/adadelta/2020-04-13-12-18_c0ef00e1-03fc-4611-8ccd-35a4cb43c0cd_hpo_rescal_kinships_lcwa_crossentropy/0001_kinships_rescal +527 results/rescal/kinships/random/adadelta/2020-04-13-12-19_b3ff0b6e-e9db-48e5-9ce3-ab131348c624_hpo_rescal_kinships_owa_mrl/0000_kinships_rescal +528 results/rescal/kinships/random/adadelta/2020-04-13-12-19_b3ff0b6e-e9db-48e5-9ce3-ab131348c624_hpo_rescal_kinships_owa_mrl/0001_kinships_rescal +529 results/rescal/kinships/random/adadelta/2020-04-14-14-55_eeb5085e-490c-453c-8acb-5c149a4473f7_hpo_rescal_kinships_owa_nssal/0000_kinships_rescal +530 results/rescal/kinships/random/adadelta/2020-04-14-14-55_eeb5085e-490c-453c-8acb-5c149a4473f7_hpo_rescal_kinships_owa_nssal/0001_kinships_rescal +531 results/rescal/kinships/random/adadelta/2020-04-15-11-16_341afabb-8d8e-4ef4-a0b2-a55ff762d93a_hpo_rescal_kinships_lcwa_crossentropy/0000_kinships_rescal +532 results/rescal/kinships/random/adadelta/2020-04-15-11-16_341afabb-8d8e-4ef4-a0b2-a55ff762d93a_hpo_rescal_kinships_lcwa_crossentropy/0001_kinships_rescal +533 results/rescal/kinships/random/adam/2020-03-16-21-48_411cfc7c-2c1c-4cec-a024-25e6137a9f4f_hpo_rescal_kinships_lcwa_adam/0000_kinships_rescal +534 results/rescal/kinships/random/adam/2020-03-16-21-48_411cfc7c-2c1c-4cec-a024-25e6137a9f4f_hpo_rescal_kinships_lcwa_adam/0001_kinships_rescal +535 results/rescal/kinships/random/adam/2020-03-16-21-48_411cfc7c-2c1c-4cec-a024-25e6137a9f4f_hpo_rescal_kinships_lcwa_adam/0002_kinships_rescal +536 results/rescal/kinships/random/adam/2020-03-16-21-48_411cfc7c-2c1c-4cec-a024-25e6137a9f4f_hpo_rescal_kinships_lcwa_adam/0003_kinships_rescal +537 results/rescal/kinships/random/adam/2020-03-16-21-49_77195c11-01a1-4d1d-b3e8-f5c4603f68e0_hpo_rescal_kinships_lcwa_crossentropy/0000_kinships_rescal +538 results/rescal/kinships/random/adam/2020-03-16-21-49_77195c11-01a1-4d1d-b3e8-f5c4603f68e0_hpo_rescal_kinships_lcwa_crossentropy/0002_kinships_rescal +539 results/rescal/kinships/random/adam/2020-03-17-13-34_cf7154da-438c-4299-840e-ef11b8972d55_hpo_rescal_kinships_owa_adam/0000_kinships_rescal +540 results/rescal/kinships/random/adam/2020-03-17-13-34_cf7154da-438c-4299-840e-ef11b8972d55_hpo_rescal_kinships_owa_adam/0001_kinships_rescal +541 results/rescal/kinships/random/adam/2020-03-17-13-34_cf7154da-438c-4299-840e-ef11b8972d55_hpo_rescal_kinships_owa_adam/0002_kinships_rescal +542 results/rescal/kinships/random/adam/2020-03-17-13-34_cf7154da-438c-4299-840e-ef11b8972d55_hpo_rescal_kinships_owa_adam/0003_kinships_rescal +543 results/rescal/kinships/random/adam/2020-03-17-13-37_b856eba8-df78-4cda-b519-c60ef3902790_hpo_rescal_kinships_owa_nssal/0000_kinships_rescal +544 results/rescal/kinships/random/adam/2020-03-17-13-37_b856eba8-df78-4cda-b519-c60ef3902790_hpo_rescal_kinships_owa_nssal/0002_kinships_rescal +545 results/rescal/kinships/random/adam/2020-03-20-16-50_e35ba56b-2793-46e0-aa30-98a65f49bdd2_hpo_rescal_kinships_owa_mrl/0000_kinships_rescal +546 results/rescal/kinships/random/adam/2020-03-20-16-50_e35ba56b-2793-46e0-aa30-98a65f49bdd2_hpo_rescal_kinships_owa_mrl/0002_kinships_rescal +547 results/rescal/wn18rr/random/adam/2020-04-05-17-14_51b430bc-9d4d-47ab-8f33-bf8e970e521b/0000_wn18rr_rescal +548 results/rescal/wn18rr/random/adam/2020-04-05-17-14_51b430bc-9d4d-47ab-8f33-bf8e970e521b/0001_wn18rr_rescal +549 results/rescal/wn18rr/random/adam/2020-04-05-17-14_d6a2ee6f-0576-47e1-8486-b5e9a0b4be28/0000_wn18rr_rescal +550 results/rescal/wn18rr/random/adam/2020-04-05-17-14_d6a2ee6f-0576-47e1-8486-b5e9a0b4be28/0001_wn18rr_rescal +551 results/rescal/wn18rr/random/adam/2020-04-16-13-11_05636bac-cd71-409d-8c8a-a8b5674cc756/0000_wn18rr_rescal +552 results/rescal/wn18rr/random/adam/2020-04-16-13-11_05636bac-cd71-409d-8c8a-a8b5674cc756/0001_wn18rr_rescal +553 results/rescal/wn18rr/random/adam/2020-04-16-13-11_05636bac-cd71-409d-8c8a-a8b5674cc756/0002_wn18rr_rescal +554 results/rescal/wn18rr/random/adam/2020-04-16-13-11_05636bac-cd71-409d-8c8a-a8b5674cc756/0003_wn18rr_rescal +555 results/rescal/wn18rr/random/adam/2020-04-16-13-11_c6c5c623-1d1c-4c21-8479-107984cce146/0000_wn18rr_rescal +556 results/rescal/wn18rr/random/adam/2020-04-16-13-11_c6c5c623-1d1c-4c21-8479-107984cce146/0001_wn18rr_rescal +557 results/rescal/wn18rr/random/adam/2020-04-25-19-10_2354467b-7d30-4200-97b0-b91b5768fea6/0000_wn18rr_rescal +558 results/rescal/wn18rr/random/adam/2020-04-25-19-10_2354467b-7d30-4200-97b0-b91b5768fea6/0001_wn18rr_rescal +559 results/rescal/wn18rr/random/adam/2020-04-25-19-10_52e107f1-4c3e-4866-a37d-372beef49e0a/0000_wn18rr_rescal +560 results/rescal/wn18rr/random/adam/2020-04-25-19-10_52e107f1-4c3e-4866-a37d-372beef49e0a/0001_wn18rr_rescal +561 results/rescal/yago310/random/adam/2020-05-23-19-14_4edc3ffe-e6bd-40bc-acab-a2a600c62151/0000_yago310_rescal +562 results/rescal/yago310/random/adam/2020-05-23-19-14_4edc3ffe-e6bd-40bc-acab-a2a600c62151/0001_yago310_rescal +563 results/rescal/yago310/random/adam/2020-05-23-19-14_6ced0a76-e9b7-4e1b-87b7-a2cff7cf30b4/0000_yago310_rescal +564 results/rescal/yago310/random/adam/2020-05-23-19-14_6ced0a76-e9b7-4e1b-87b7-a2cff7cf30b4/0001_yago310_rescal +565 results/rescal/yago310/random/adam/2020-05-23-19-15_3aef1c91-33a5-4c0f-b852-90296f6b2e29/0000_yago310_rescal +566 results/rescal/yago310/random/adam/2020-05-23-19-15_3aef1c91-33a5-4c0f-b852-90296f6b2e29/0001_yago310_rescal +567 results/rescal/yago310/random/adam/2020-05-23-22-10_8f2e302a-4935-465a-943f-0e083693b8c8/0000_yago310_rescal +568 results/rescal/yago310/random/adam/2020-05-23-22-10_8f2e302a-4935-465a-943f-0e083693b8c8/0001_yago310_rescal +569 results/rotate/fb15k237/random/adam/2020-03-09-09-46_25200436-dfdd-4598-944d-817645fc666a_hpo_rotate_fb15k237_lcwa_crossentropy/0000_fb15k237_rotate +570 results/rotate/fb15k237/random/adam/2020-03-09-09-46_25200436-dfdd-4598-944d-817645fc666a_hpo_rotate_fb15k237_lcwa_crossentropy/0002_fb15k237_rotate +571 results/rotate/fb15k237/random/adam/2020-03-09-09-46_57ad5910-5de1-40b5-b48e-8ad1391bd210_hpo_rotate_fb15k237_owa_adam/0000_fb15k237_rotate +572 results/rotate/fb15k237/random/adam/2020-03-09-09-46_57ad5910-5de1-40b5-b48e-8ad1391bd210_hpo_rotate_fb15k237_owa_adam/0001_fb15k237_rotate +573 results/rotate/fb15k237/random/adam/2020-03-09-09-46_57ad5910-5de1-40b5-b48e-8ad1391bd210_hpo_rotate_fb15k237_owa_adam/0002_fb15k237_rotate +574 results/rotate/fb15k237/random/adam/2020-03-09-09-46_57ad5910-5de1-40b5-b48e-8ad1391bd210_hpo_rotate_fb15k237_owa_adam/0003_fb15k237_rotate +575 results/rotate/fb15k237/random/adam/2020-03-09-09-46_9040650f-ce0d-44f8-b26f-d733fb4f6001_hpo_rotate_fb15k237_owa_nssal/0000_fb15k237_rotate +576 results/rotate/fb15k237/random/adam/2020-03-09-09-46_9040650f-ce0d-44f8-b26f-d733fb4f6001_hpo_rotate_fb15k237_owa_nssal/0002_fb15k237_rotate +577 results/rotate/fb15k237/random/adam/2020-03-09-09-46_dab0bd1a-64c5-4879-87b8-a2e6f4899a71_hpo_rotate_fb15k237_lcwa_adam/0000_fb15k237_rotate +578 results/rotate/fb15k237/random/adam/2020-03-09-09-46_dab0bd1a-64c5-4879-87b8-a2e6f4899a71_hpo_rotate_fb15k237_lcwa_adam/0001_fb15k237_rotate +579 results/rotate/fb15k237/random/adam/2020-03-09-09-46_dab0bd1a-64c5-4879-87b8-a2e6f4899a71_hpo_rotate_fb15k237_lcwa_adam/0002_fb15k237_rotate +580 results/rotate/fb15k237/random/adam/2020-03-09-09-46_dab0bd1a-64c5-4879-87b8-a2e6f4899a71_hpo_rotate_fb15k237_lcwa_adam/0003_fb15k237_rotate +581 results/rotate/fb15k237/random/adam/2020-03-09-09-46_fb49a0ad-13a5-4abd-863d-ed3ab743e02d_hpo_rotate_fb15k237_owa_mrl/0000_fb15k237_rotate +582 results/rotate/fb15k237/random/adam/2020-03-09-09-46_fb49a0ad-13a5-4abd-863d-ed3ab743e02d_hpo_rotate_fb15k237_owa_mrl/0002_fb15k237_rotate +583 results/rotate/kinships/random/adadelta/2020-04-14-14-58_58dcaa3a-23c8-4efc-9516-887986a76ca3_hpo_rotate_kinships_lcwa_adadelta/0000_kinships_rotate +584 results/rotate/kinships/random/adadelta/2020-04-14-14-58_58dcaa3a-23c8-4efc-9516-887986a76ca3_hpo_rotate_kinships_lcwa_adadelta/0001_kinships_rotate +585 results/rotate/kinships/random/adadelta/2020-04-14-14-58_58dcaa3a-23c8-4efc-9516-887986a76ca3_hpo_rotate_kinships_lcwa_adadelta/0002_kinships_rotate +586 results/rotate/kinships/random/adadelta/2020-04-14-14-58_58dcaa3a-23c8-4efc-9516-887986a76ca3_hpo_rotate_kinships_lcwa_adadelta/0003_kinships_rotate +587 results/rotate/kinships/random/adadelta/2020-04-14-14-59_b5d871e7-c567-4bc0-a660-d00732bf8d2f_hpo_rotate_kinships_lcwa_crossentropy/0000_kinships_rotate +588 results/rotate/kinships/random/adadelta/2020-04-14-14-59_b5d871e7-c567-4bc0-a660-d00732bf8d2f_hpo_rotate_kinships_lcwa_crossentropy/0001_kinships_rotate +589 results/rotate/kinships/random/adadelta/2020-04-15-11-19_e1cfdd78-7328-4832-8f5c-e99d342eaddb_hpo_rotate_kinships_owa_adadelta/0000_kinships_rotate +590 results/rotate/kinships/random/adadelta/2020-04-15-11-19_e1cfdd78-7328-4832-8f5c-e99d342eaddb_hpo_rotate_kinships_owa_adadelta/0001_kinships_rotate +591 results/rotate/kinships/random/adadelta/2020-04-15-11-19_e1cfdd78-7328-4832-8f5c-e99d342eaddb_hpo_rotate_kinships_owa_adadelta/0002_kinships_rotate +592 results/rotate/kinships/random/adadelta/2020-04-15-11-19_e1cfdd78-7328-4832-8f5c-e99d342eaddb_hpo_rotate_kinships_owa_adadelta/0003_kinships_rotate +593 results/rotate/kinships/random/adadelta/2020-04-15-11-20_c250d660-4361-4393-82ca-a272d98e484c_hpo_rotate_kinships_owa_mrl/0000_kinships_rotate +594 results/rotate/kinships/random/adadelta/2020-04-15-11-20_c250d660-4361-4393-82ca-a272d98e484c_hpo_rotate_kinships_owa_mrl/0001_kinships_rotate +595 results/rotate/kinships/random/adadelta/2020-04-16-08-52_10571215-9d85-4eeb-8c4e-d7d05f4174e8_hpo_rotate_kinships_owa_nssal/0000_kinships_rotate +596 results/rotate/kinships/random/adadelta/2020-04-16-08-52_10571215-9d85-4eeb-8c4e-d7d05f4174e8_hpo_rotate_kinships_owa_nssal/0001_kinships_rotate +597 results/rotate/kinships/random/adam/2020-04-02-18-17_2c2b03e0-493a-473a-81c4-c7977019a999_hpo_rotate_kinships_owa_nssal/0000_kinships_rotate +598 results/rotate/kinships/random/adam/2020-04-02-18-17_2c2b03e0-493a-473a-81c4-c7977019a999_hpo_rotate_kinships_owa_nssal/0002_kinships_rotate +599 results/rotate/kinships/random/adam/2020-04-02-18-17_3364cecc-9271-479d-9367-c9c4e05bb045_hpo_rotate_kinships_owa_adam/0000_kinships_rotate +600 results/rotate/kinships/random/adam/2020-04-02-18-17_3364cecc-9271-479d-9367-c9c4e05bb045_hpo_rotate_kinships_owa_adam/0001_kinships_rotate +601 results/rotate/kinships/random/adam/2020-04-02-18-17_3364cecc-9271-479d-9367-c9c4e05bb045_hpo_rotate_kinships_owa_adam/0002_kinships_rotate +602 results/rotate/kinships/random/adam/2020-04-02-18-17_3364cecc-9271-479d-9367-c9c4e05bb045_hpo_rotate_kinships_owa_adam/0003_kinships_rotate +603 results/rotate/kinships/random/adam/2020-04-02-18-17_980ea3f5-8e57-46e4-8ae0-bbeff8ae8350_hpo_rotate_kinships_owa_mrl/0000_kinships_rotate +604 results/rotate/kinships/random/adam/2020-04-02-18-17_980ea3f5-8e57-46e4-8ae0-bbeff8ae8350_hpo_rotate_kinships_owa_mrl/0002_kinships_rotate +605 results/rotate/kinships/random/adam/2020-04-02-18-18_04dd7a9e-8751-4a89-bed6-5bcdae610354_hpo_rotate_kinships_lcwa_crossentropy/0000_kinships_rotate +606 results/rotate/kinships/random/adam/2020-04-02-18-18_04dd7a9e-8751-4a89-bed6-5bcdae610354_hpo_rotate_kinships_lcwa_crossentropy/0002_kinships_rotate +607 results/rotate/kinships/random/adam/2020-04-02-18-18_b94dfb9a-0d84-4190-850c-941e0df8621d_hpo_rotate_kinships_lcwa_adam/0000_kinships_rotate +608 results/rotate/kinships/random/adam/2020-04-02-18-18_b94dfb9a-0d84-4190-850c-941e0df8621d_hpo_rotate_kinships_lcwa_adam/0001_kinships_rotate +609 results/rotate/kinships/random/adam/2020-04-02-18-18_b94dfb9a-0d84-4190-850c-941e0df8621d_hpo_rotate_kinships_lcwa_adam/0002_kinships_rotate +610 results/rotate/kinships/random/adam/2020-04-02-18-18_b94dfb9a-0d84-4190-850c-941e0df8621d_hpo_rotate_kinships_lcwa_adam/0003_kinships_rotate +611 results/rotate/wn18rr/random/adam/2020-04-05-17-15_566f9a75-2f13-4d87-8df8-7cb2568bc4c0/0000_wn18rr_rotate +612 results/rotate/wn18rr/random/adam/2020-04-05-17-15_566f9a75-2f13-4d87-8df8-7cb2568bc4c0/0001_wn18rr_rotate +613 results/rotate/wn18rr/random/adam/2020-04-05-17-15_59b35fe1-5d49-41d3-9a66-1b40f6c24239/0000_wn18rr_rotate +614 results/rotate/wn18rr/random/adam/2020-04-05-17-15_59b35fe1-5d49-41d3-9a66-1b40f6c24239/0001_wn18rr_rotate +615 results/rotate/wn18rr/random/adam/2020-04-05-17-15_65069c21-a777-4d9c-b3b2-4f389a06831c/0000_wn18rr_rotate +616 results/rotate/wn18rr/random/adam/2020-04-05-17-15_65069c21-a777-4d9c-b3b2-4f389a06831c/0001_wn18rr_rotate +617 results/rotate/wn18rr/random/adam/2020-04-05-17-15_65069c21-a777-4d9c-b3b2-4f389a06831c/0002_wn18rr_rotate +618 results/rotate/wn18rr/random/adam/2020-04-05-17-15_65069c21-a777-4d9c-b3b2-4f389a06831c/0003_wn18rr_rotate +619 results/rotate/wn18rr/random/adam/2020-04-15-00-33_1eb111d7-4f46-4e9e-98d8-36f1c5438ae4/0000_wn18rr_rotate +620 results/rotate/wn18rr/random/adam/2020-04-15-00-33_1eb111d7-4f46-4e9e-98d8-36f1c5438ae4/0001_wn18rr_rotate +621 results/rotate/wn18rr/random/adam/2020-04-25-19-04_217bcf38-2101-461b-9593-b133b4201e6a/0000_wn18rr_rotate +622 results/rotate/wn18rr/random/adam/2020-04-25-19-04_217bcf38-2101-461b-9593-b133b4201e6a/0001_wn18rr_rotate +623 results/rotate/wn18rr/random/adam/2020-04-25-19-04_c99d2c90-4fad-4e45-b5d3-9b04d1a2a9a0/0000_wn18rr_rotate +624 results/rotate/wn18rr/random/adam/2020-04-25-19-04_c99d2c90-4fad-4e45-b5d3-9b04d1a2a9a0/0001_wn18rr_rotate +625 results/rotate/yago310/random/adam/2020-05-21-03-08_27ebfe54-c50d-4e95-9940-8528ec384633/0000_yago310_rotate +626 results/rotate/yago310/random/adam/2020-05-21-03-08_27ebfe54-c50d-4e95-9940-8528ec384633/0001_yago310_rotate +627 results/rotate/yago310/random/adam/2020-05-21-03-08_fc09f731-e05e-4302-82f0-ca2fef1621d8/0000_yago310_rotate +628 results/rotate/yago310/random/adam/2020-05-21-03-08_fc09f731-e05e-4302-82f0-ca2fef1621d8/0001_yago310_rotate +629 results/rotate/yago310/random/adam/2020-05-21-03-12_b217a008-f283-4ed5-8838-14d3b602f5a9/0000_yago310_rotate +630 results/rotate/yago310/random/adam/2020-05-21-03-12_b217a008-f283-4ed5-8838-14d3b602f5a9/0001_yago310_rotate +631 results/rotate/yago310/random/adam/2020-05-23-09-49_2204bfb4-ba9b-4a46-8f45-eb1d7a4a0a24/0000_yago310_rotate +632 results/rotate/yago310/random/adam/2020-05-23-09-49_2204bfb4-ba9b-4a46-8f45-eb1d7a4a0a24/0001_yago310_rotate +633 results/simple/fb15k237/random/adam/2020-03-01-12-52_362f974c-e8a0-4908-ae8f-dbfff1c944d6_hpo_simple_fb15k237_owa_adam/0000_fb15k237_simple +634 results/simple/fb15k237/random/adam/2020-03-01-12-52_362f974c-e8a0-4908-ae8f-dbfff1c944d6_hpo_simple_fb15k237_owa_adam/0001_fb15k237_simple +635 results/simple/fb15k237/random/adam/2020-03-01-12-52_362f974c-e8a0-4908-ae8f-dbfff1c944d6_hpo_simple_fb15k237_owa_adam/0002_fb15k237_simple +636 results/simple/fb15k237/random/adam/2020-03-01-12-52_362f974c-e8a0-4908-ae8f-dbfff1c944d6_hpo_simple_fb15k237_owa_adam/0003_fb15k237_simple +637 results/simple/fb15k237/random/adam/2020-03-01-12-52_932e77e4-0d02-410d-ae27-3ea5d7ea8398_hpo_simple_fb15k237_owa_nssal/0000_fb15k237_simple +638 results/simple/fb15k237/random/adam/2020-03-01-12-52_932e77e4-0d02-410d-ae27-3ea5d7ea8398_hpo_simple_fb15k237_owa_nssal/0002_fb15k237_simple +639 results/simple/fb15k237/random/adam/2020-03-09-23-34_057cd1ff-b7e7-4043-a4cc-ed364275ccc9_hpo_simple_fb15k237_owa_mrl/0000_fb15k237_simple +640 results/simple/fb15k237/random/adam/2020-03-09-23-34_057cd1ff-b7e7-4043-a4cc-ed364275ccc9_hpo_simple_fb15k237_owa_mrl/0002_fb15k237_simple +641 results/simple/fb15k237/random/adam/2020-03-09-23-34_53745925-d8fa-4aa5-a578-8f5c2adeeea0_hpo_rotate_fb15k237_lcwa_adam/0000_fb15k237_simple +642 results/simple/fb15k237/random/adam/2020-03-09-23-34_53745925-d8fa-4aa5-a578-8f5c2adeeea0_hpo_rotate_fb15k237_lcwa_adam/0001_fb15k237_simple +643 results/simple/fb15k237/random/adam/2020-03-09-23-34_53745925-d8fa-4aa5-a578-8f5c2adeeea0_hpo_rotate_fb15k237_lcwa_adam/0002_fb15k237_simple +644 results/simple/fb15k237/random/adam/2020-03-09-23-34_53745925-d8fa-4aa5-a578-8f5c2adeeea0_hpo_rotate_fb15k237_lcwa_adam/0003_fb15k237_simple +645 results/simple/fb15k237/random/adam/2020-03-09-23-34_7d44c545-f7a7-44f1-ae18-0d3429095d99_hpo_simple_fb15k237_lcwa_crossentropy/0000_fb15k237_simple +646 results/simple/fb15k237/random/adam/2020-03-09-23-34_7d44c545-f7a7-44f1-ae18-0d3429095d99_hpo_simple_fb15k237_lcwa_crossentropy/0002_fb15k237_simple +647 results/simple/kinships/random/adadelta/2020-04-08-18-33_15cf1a91-8e10-45a1-975b-58493a7d236b_hpo_simple_kinships_lcwa_adadelta/0000_kinships_simple +648 results/simple/kinships/random/adadelta/2020-04-08-18-33_15cf1a91-8e10-45a1-975b-58493a7d236b_hpo_simple_kinships_lcwa_adadelta/0001_kinships_simple +649 results/simple/kinships/random/adadelta/2020-04-08-18-33_15cf1a91-8e10-45a1-975b-58493a7d236b_hpo_simple_kinships_lcwa_adadelta/0002_kinships_simple +650 results/simple/kinships/random/adadelta/2020-04-08-18-33_15cf1a91-8e10-45a1-975b-58493a7d236b_hpo_simple_kinships_lcwa_adadelta/0003_kinships_simple +651 results/simple/kinships/random/adadelta/2020-04-08-18-34_48c661c6-d1d4-4c33-a196-5b421cf3cc23_hpo_simple_kinships_lcwa_crossentropy/0000_kinships_simple +652 results/simple/kinships/random/adadelta/2020-04-08-18-34_48c661c6-d1d4-4c33-a196-5b421cf3cc23_hpo_simple_kinships_lcwa_crossentropy/0001_kinships_simple +653 results/simple/kinships/random/adadelta/2020-04-09-12-22_558cc2db-7d78-4a25-9dac-b16d62deb71c_hpo_simple_kinships_owa_adadelta/0000_kinships_simple +654 results/simple/kinships/random/adadelta/2020-04-09-12-22_558cc2db-7d78-4a25-9dac-b16d62deb71c_hpo_simple_kinships_owa_adadelta/0001_kinships_simple +655 results/simple/kinships/random/adadelta/2020-04-09-12-22_558cc2db-7d78-4a25-9dac-b16d62deb71c_hpo_simple_kinships_owa_adadelta/0002_kinships_simple +656 results/simple/kinships/random/adadelta/2020-04-09-12-22_558cc2db-7d78-4a25-9dac-b16d62deb71c_hpo_simple_kinships_owa_adadelta/0003_kinships_simple +657 results/simple/kinships/random/adadelta/2020-04-09-12-23_8b4b8288-8dc6-4c0f-a8bb-b44fe1603fe0_hpo_simple_kinships_owa_mrl/0000_kinships_simple +658 results/simple/kinships/random/adadelta/2020-04-09-12-23_8b4b8288-8dc6-4c0f-a8bb-b44fe1603fe0_hpo_simple_kinships_owa_mrl/0001_kinships_simple +659 results/simple/kinships/random/adadelta/2020-04-09-12-23_ee392e61-1c23-4a8f-b760-fa7273e7f6de_hpo_simple_kinships_owa_nssal/0000_kinships_simple +660 results/simple/kinships/random/adadelta/2020-04-09-12-23_ee392e61-1c23-4a8f-b760-fa7273e7f6de_hpo_simple_kinships_owa_nssal/0001_kinships_simple +661 results/simple/kinships/random/adam/2020-04-02-16-17_3ca57c70-9442-4d40-be35-28655b394086_hpo_simple_kinships_lcwa_adam/0000_kinships_simple +662 results/simple/kinships/random/adam/2020-04-02-16-17_3ca57c70-9442-4d40-be35-28655b394086_hpo_simple_kinships_lcwa_adam/0001_kinships_simple +663 results/simple/kinships/random/adam/2020-04-02-16-17_3ca57c70-9442-4d40-be35-28655b394086_hpo_simple_kinships_lcwa_adam/0002_kinships_simple +664 results/simple/kinships/random/adam/2020-04-02-16-17_3ca57c70-9442-4d40-be35-28655b394086_hpo_simple_kinships_lcwa_adam/0003_kinships_simple +665 results/simple/kinships/random/adam/2020-04-02-16-18_a32bada0-dec1-40d0-8ad0-bf694fdd4ead_hpo_simple_kinships_lcwa_crossentropy/0000_kinships_simple +666 results/simple/kinships/random/adam/2020-04-02-16-18_a32bada0-dec1-40d0-8ad0-bf694fdd4ead_hpo_simple_kinships_lcwa_crossentropy/0002_kinships_simple +667 results/simple/kinships/random/adam/2020-05-20-20-42_1aee8181-a2d6-42ed-be13-95673b2f7816/0000_kinships_simple +668 results/simple/kinships/random/adam/2020-05-20-20-42_1aee8181-a2d6-42ed-be13-95673b2f7816/0001_kinships_simple +669 results/simple/kinships/random/adam/2020-05-20-20-42_aad0469c-27fe-4433-bde9-29509973f35d/0000_kinships_simple +670 results/simple/kinships/random/adam/2020-05-20-20-42_aad0469c-27fe-4433-bde9-29509973f35d/0001_kinships_simple +671 results/simple/kinships/random/adam/2020-05-20-20-42_aafaaca0-db6d-4617-a052-7e029b642b5b/0000_kinships_simple +672 results/simple/kinships/random/adam/2020-05-20-20-42_aafaaca0-db6d-4617-a052-7e029b642b5b/0001_kinships_simple +673 results/simple/kinships/random/adam/2020-05-20-20-42_aafaaca0-db6d-4617-a052-7e029b642b5b/0002_kinships_simple +674 results/simple/kinships/random/adam/2020-05-20-20-42_aafaaca0-db6d-4617-a052-7e029b642b5b/0003_kinships_simple +675 results/simple/wn18rr/random/adam/2020-04-05-17-16_115ed3a7-7f4f-4f07-82cb-2134531cc834/0000_wn18rr_simple +676 results/simple/wn18rr/random/adam/2020-04-05-17-16_115ed3a7-7f4f-4f07-82cb-2134531cc834/0001_wn18rr_simple +677 results/simple/wn18rr/random/adam/2020-04-05-17-16_498b253e-4d8e-4c95-811b-b39c7b5c84a5/0000_wn18rr_simple +678 results/simple/wn18rr/random/adam/2020-04-05-17-16_498b253e-4d8e-4c95-811b-b39c7b5c84a5/0001_wn18rr_simple +679 results/simple/wn18rr/random/adam/2020-04-05-17-16_795b6647-f078-4a4e-8b7a-0a1189c8fd37/0000_wn18rr_simple +680 results/simple/wn18rr/random/adam/2020-04-05-17-16_795b6647-f078-4a4e-8b7a-0a1189c8fd37/0001_wn18rr_simple +681 results/simple/wn18rr/random/adam/2020-04-05-17-16_795b6647-f078-4a4e-8b7a-0a1189c8fd37/0002_wn18rr_simple +682 results/simple/wn18rr/random/adam/2020-04-05-17-16_795b6647-f078-4a4e-8b7a-0a1189c8fd37/0003_wn18rr_simple +683 results/simple/wn18rr/random/adam/2020-04-16-13-11_b1facd4f-880e-4042-82ba-bc351ee29d73/0000_wn18rr_simple +684 results/simple/wn18rr/random/adam/2020-04-16-13-11_b1facd4f-880e-4042-82ba-bc351ee29d73/0001_wn18rr_simple +685 results/simple/wn18rr/random/adam/2020-04-25-19-03_d2d39b9b-8a50-40bd-b3c2-2f9ef41655d7/0000_wn18rr_simple +686 results/simple/wn18rr/random/adam/2020-04-25-19-03_d2d39b9b-8a50-40bd-b3c2-2f9ef41655d7/0001_wn18rr_simple +687 results/simple/wn18rr/random/adam/2020-04-25-19-04_8b703e86-c77f-4482-b12b-2efe4e27303c/0000_wn18rr_simple +688 results/simple/wn18rr/random/adam/2020-04-25-19-04_8b703e86-c77f-4482-b12b-2efe4e27303c/0001_wn18rr_simple +689 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_82da9b80-ede2-4f20-a35c-64c9896f5aa2_hpo_se_kinships_lcwa_adadelta/0000_kinships_structuredembedding +690 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_82da9b80-ede2-4f20-a35c-64c9896f5aa2_hpo_se_kinships_lcwa_adadelta/0001_kinships_structuredembedding +691 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_82da9b80-ede2-4f20-a35c-64c9896f5aa2_hpo_se_kinships_lcwa_adadelta/0002_kinships_structuredembedding +692 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_82da9b80-ede2-4f20-a35c-64c9896f5aa2_hpo_se_kinships_lcwa_adadelta/0003_kinships_structuredembedding +693 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_b972f59c-e1fb-461a-958f-09aecf0ed5a3_hpo_se_kinships_lcwa_crossentropy/0000_kinships_structuredembedding +694 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-24_b972f59c-e1fb-461a-958f-09aecf0ed5a3_hpo_se_kinships_lcwa_crossentropy/0001_kinships_structuredembedding +695 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-25_d45db4c5-d841-46b3-8925-3310eb0f4cb1/0000_kinships_structuredembedding +696 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-13-11-25_d45db4c5-d841-46b3-8925-3310eb0f4cb1/0001_kinships_structuredembedding +697 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-14-10-00_640c1c2b-e305-475c-ba45-e3a6adf8bd6c_hpo_se_kinships_owa_nssal/0000_kinships_structuredembedding +698 results/structuredembedding/kinships/random/adadelta/kinships/2020-05-14-10-00_640c1c2b-e305-475c-ba45-e3a6adf8bd6c_hpo_se_kinships_owa_nssal/0001_kinships_structuredembedding +699 results/structuredembedding/kinships/random/adam/2020-03-17-15-44_10029042-e52a-4730-afb7-099a1da08cdf_hpo_se_kinships_lcwa_adam/0000_kinships_structuredembedding +700 results/structuredembedding/kinships/random/adam/2020-03-17-15-44_10029042-e52a-4730-afb7-099a1da08cdf_hpo_se_kinships_lcwa_adam/0001_kinships_structuredembedding +701 results/structuredembedding/kinships/random/adam/2020-03-17-15-44_10029042-e52a-4730-afb7-099a1da08cdf_hpo_se_kinships_lcwa_adam/0002_kinships_structuredembedding +702 results/structuredembedding/kinships/random/adam/2020-03-17-15-44_10029042-e52a-4730-afb7-099a1da08cdf_hpo_se_kinships_lcwa_adam/0003_kinships_structuredembedding +703 results/structuredembedding/kinships/random/adam/2020-03-17-15-47_eb783b30-9cc5-4481-8df4-c6f5ecb85ff8_hpo_se_kinships_lcwa_crossentropy/0000_kinships_structuredembedding +704 results/structuredembedding/kinships/random/adam/2020-03-17-15-47_eb783b30-9cc5-4481-8df4-c6f5ecb85ff8_hpo_se_kinships_lcwa_crossentropy/0002_kinships_structuredembedding +705 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_4d28c243-fd74-4835-a4ef-cc1dc82ec64d/0000_kinships_structuredembedding +706 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_4d28c243-fd74-4835-a4ef-cc1dc82ec64d/0001_kinships_structuredembedding +707 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_4d28c243-fd74-4835-a4ef-cc1dc82ec64d/0002_kinships_structuredembedding +708 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_4d28c243-fd74-4835-a4ef-cc1dc82ec64d/0003_kinships_structuredembedding +709 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_8ff9413d-f9b9-4750-bed0-5b48475df51e/0000_kinships_structuredembedding +710 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_8ff9413d-f9b9-4750-bed0-5b48475df51e/0001_kinships_structuredembedding +711 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_fcf94dfc-a4b6-45f2-8d77-56d5999bbd12/0000_kinships_structuredembedding +712 results/structuredembedding/kinships/random/adam/2020-05-23-12-19_fcf94dfc-a4b6-45f2-8d77-56d5999bbd12/0001_kinships_structuredembedding +713 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-18_d5e1d677-12e3-43c1-81b8-849705ff2df4/0000_wn18rr_structuredembedding +714 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-18_d5e1d677-12e3-43c1-81b8-849705ff2df4/0001_wn18rr_structuredembedding +715 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-21_5f8c60f9-eb9a-4ebe-aa2c-bfe41223aa79/0000_wn18rr_structuredembedding +716 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-21_5f8c60f9-eb9a-4ebe-aa2c-bfe41223aa79/0001_wn18rr_structuredembedding +717 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-21_cd1bc041-4851-4a5c-97e2-395589853999/0000_wn18rr_structuredembedding +718 results/structuredembedding/wn18rr/random/adam/2020-05-05-16-21_cd1bc041-4851-4a5c-97e2-395589853999/0001_wn18rr_structuredembedding +719 results/structuredembedding/yago310/random/adam/2020-05-23-12-11_fb10772f-1bad-4aa7-8bbf-2166d4e51411/0000_yago310_structuredembedding +720 results/structuredembedding/yago310/random/adam/2020-05-23-12-11_fb10772f-1bad-4aa7-8bbf-2166d4e51411/0001_yago310_structuredembedding +721 results/structuredembedding/yago310/random/adam/2020-05-23-12-12_c6cd7832-f8a8-4fa9-9c8c-ede7c795d1c4/0000_yago310_structuredembedding +722 results/structuredembedding/yago310/random/adam/2020-05-23-12-12_c6cd7832-f8a8-4fa9-9c8c-ede7c795d1c4/0001_yago310_structuredembedding +723 results/structuredembedding/yago310/random/adam/2020-05-23-12-14_734e47c9-85b2-4ea7-adc5-da1167f1abc4/0000_yago310_structuredembedding +724 results/structuredembedding/yago310/random/adam/2020-05-23-12-14_734e47c9-85b2-4ea7-adc5-da1167f1abc4/0001_yago310_structuredembedding +725 results/structuredembedding/yago310/random/adam/2020-05-23-12-15_faae248f-dc6f-442f-8c75-813bca873124/0000_yago310_structuredembedding +726 results/structuredembedding/yago310/random/adam/2020-05-23-12-15_faae248f-dc6f-442f-8c75-813bca873124/0001_yago310_structuredembedding +727 results/transd/fb15k237/random/adam/2020-03-01-12-29_8f57aad7-4762-47bd-9f3a-47f950c14072_hpo_transd_fb15k237_owa_adam/0000_fb15k237_transd +728 results/transd/fb15k237/random/adam/2020-03-01-12-29_8f57aad7-4762-47bd-9f3a-47f950c14072_hpo_transd_fb15k237_owa_adam/0001_fb15k237_transd +729 results/transd/fb15k237/random/adam/2020-03-01-12-29_8f57aad7-4762-47bd-9f3a-47f950c14072_hpo_transd_fb15k237_owa_adam/0002_fb15k237_transd +730 results/transd/fb15k237/random/adam/2020-03-01-12-29_8f57aad7-4762-47bd-9f3a-47f950c14072_hpo_transd_fb15k237_owa_adam/0003_fb15k237_transd +731 results/transd/fb15k237/random/adam/2020-03-01-12-31_45d06e4d-c21b-4aa7-aef8-ecf287903c09_hpo_transd_fb15k237_owa_nssal/0000_fb15k237_transd +732 results/transd/fb15k237/random/adam/2020-03-01-12-31_45d06e4d-c21b-4aa7-aef8-ecf287903c09_hpo_transd_fb15k237_owa_nssal/0002_fb15k237_transd +733 results/transd/fb15k237/random/adam/2020-03-10-09-41_966138b3-594e-4e24-966d-0e0363227071_hpo_transd_fb15k237_lcwa_adam/0000_fb15k237_transd +734 results/transd/fb15k237/random/adam/2020-03-10-09-41_966138b3-594e-4e24-966d-0e0363227071_hpo_transd_fb15k237_lcwa_adam/0001_fb15k237_transd +735 results/transd/fb15k237/random/adam/2020-03-10-09-41_966138b3-594e-4e24-966d-0e0363227071_hpo_transd_fb15k237_lcwa_adam/0002_fb15k237_transd +736 results/transd/fb15k237/random/adam/2020-03-10-09-41_966138b3-594e-4e24-966d-0e0363227071_hpo_transd_fb15k237_lcwa_adam/0003_fb15k237_transd +737 results/transd/fb15k237/random/adam/2020-03-10-09-41_ebe71b31-39c4-4cbb-8e86-031464374b66_hpo_transd_fb15k237_owa_mrl/0000_fb15k237_transd +738 results/transd/fb15k237/random/adam/2020-03-10-09-41_ebe71b31-39c4-4cbb-8e86-031464374b66_hpo_transd_fb15k237_owa_mrl/0002_fb15k237_transd +739 results/transd/fb15k237/random/adam/2020-03-20-16-44_76ee958d-0ba7-489d-837f-afd5a1de4e08_hpo_transd_fb15k237_lcwa_crossentropy/0000_fb15k237_transd +740 results/transd/fb15k237/random/adam/2020-03-20-16-44_76ee958d-0ba7-489d-837f-afd5a1de4e08_hpo_transd_fb15k237_lcwa_crossentropy/0002_fb15k237_transd +741 results/transd/kinships/random/adadelta/2020-04-09-12-25_fdd623a1-fbc2-4a02-aeb9-a226d1c2cee3_hpo_transd_kinships_lcwa_adadelta/0000_kinships_transd +742 results/transd/kinships/random/adadelta/2020-04-09-12-25_fdd623a1-fbc2-4a02-aeb9-a226d1c2cee3_hpo_transd_kinships_lcwa_adadelta/0001_kinships_transd +743 results/transd/kinships/random/adadelta/2020-04-09-12-25_fdd623a1-fbc2-4a02-aeb9-a226d1c2cee3_hpo_transd_kinships_lcwa_adadelta/0002_kinships_transd +744 results/transd/kinships/random/adadelta/2020-04-09-12-25_fdd623a1-fbc2-4a02-aeb9-a226d1c2cee3_hpo_transd_kinships_lcwa_adadelta/0003_kinships_transd +745 results/transd/kinships/random/adadelta/2020-04-09-12-26_a3032e0e-5fcc-48db-ab3d-535ce07f9950_hpo_transd_kinships_lcwa_crossentropy/0000_kinships_transd +746 results/transd/kinships/random/adadelta/2020-04-09-12-26_a3032e0e-5fcc-48db-ab3d-535ce07f9950_hpo_transd_kinships_lcwa_crossentropy/0001_kinships_transd +747 results/transd/kinships/random/adadelta/2020-04-09-12-26_a5f1edf6-c575-45d1-b893-dd0288f09df1_hpo_transd_kinships_owa_adadelta/0000_kinships_transd +748 results/transd/kinships/random/adadelta/2020-04-09-12-26_a5f1edf6-c575-45d1-b893-dd0288f09df1_hpo_transd_kinships_owa_adadelta/0001_kinships_transd +749 results/transd/kinships/random/adadelta/2020-04-09-12-26_a5f1edf6-c575-45d1-b893-dd0288f09df1_hpo_transd_kinships_owa_adadelta/0002_kinships_transd +750 results/transd/kinships/random/adadelta/2020-04-09-12-26_a5f1edf6-c575-45d1-b893-dd0288f09df1_hpo_transd_kinships_owa_adadelta/0003_kinships_transd +751 results/transd/kinships/random/adadelta/2020-04-09-12-27_42e99eae-99ed-450b-85dc-03b9256fa050_hpo_transd_kinships_owa_mrl/0000_kinships_transd +752 results/transd/kinships/random/adadelta/2020-04-09-12-27_42e99eae-99ed-450b-85dc-03b9256fa050_hpo_transd_kinships_owa_mrl/0001_kinships_transd +753 results/transd/kinships/random/adadelta/2020-04-09-12-28_0aabe350-db04-4112-83b8-35a4613918b1_hpo_transd_kinships_owa_nssal/0000_kinships_transd +754 results/transd/kinships/random/adadelta/2020-04-09-12-28_0aabe350-db04-4112-83b8-35a4613918b1_hpo_transd_kinships_owa_nssal/0001_kinships_transd +755 results/transd/kinships/random/adam/2020-03-12-12-01_696fdfa2-a558-48a9-adf7-f0eeb876b5fc_hpo_transd_kinships_lcwa_crossentropy/0000_kinships_transd +756 results/transd/kinships/random/adam/2020-03-12-12-01_696fdfa2-a558-48a9-adf7-f0eeb876b5fc_hpo_transd_kinships_lcwa_crossentropy/0002_kinships_transd +757 results/transd/kinships/random/adam/2020-03-12-12-01_a33d2f8b-2688-4864-9227-42e42b816e8e_hpo_transd_kinships_lcwa_adam/0000_kinships_transd +758 results/transd/kinships/random/adam/2020-03-12-12-01_a33d2f8b-2688-4864-9227-42e42b816e8e_hpo_transd_kinships_lcwa_adam/0001_kinships_transd +759 results/transd/kinships/random/adam/2020-03-12-12-01_a33d2f8b-2688-4864-9227-42e42b816e8e_hpo_transd_kinships_lcwa_adam/0002_kinships_transd +760 results/transd/kinships/random/adam/2020-03-12-12-01_a33d2f8b-2688-4864-9227-42e42b816e8e_hpo_transd_kinships_lcwa_adam/0003_kinships_transd +761 results/transd/kinships/random/adam/2020-03-12-12-02_ca43b0cc-f1aa-4e56-b8af-0f87c5b95fcc_hpo_transd_kinships_owa_adam/0000_kinships_transd +762 results/transd/kinships/random/adam/2020-03-12-12-02_ca43b0cc-f1aa-4e56-b8af-0f87c5b95fcc_hpo_transd_kinships_owa_adam/0001_kinships_transd +763 results/transd/kinships/random/adam/2020-03-12-12-02_ca43b0cc-f1aa-4e56-b8af-0f87c5b95fcc_hpo_transd_kinships_owa_adam/0002_kinships_transd +764 results/transd/kinships/random/adam/2020-03-12-12-02_ca43b0cc-f1aa-4e56-b8af-0f87c5b95fcc_hpo_transd_kinships_owa_adam/0003_kinships_transd +765 results/transd/kinships/random/adam/2020-03-13-11-29_76e1261f-a1ea-45b6-8462-cce034073403_hpo_transd_kinships_owa_mrl/0000_kinships_transd +766 results/transd/kinships/random/adam/2020-03-13-11-29_76e1261f-a1ea-45b6-8462-cce034073403_hpo_transd_kinships_owa_mrl/0002_kinships_transd +767 results/transd/kinships/random/adam/2020-03-13-11-29_c8cce359-bc03-4150-9b58-08983d524f10_hpo_transd_kinships_owa_nssal/0000_kinships_transd +768 results/transd/kinships/random/adam/2020-03-13-11-29_c8cce359-bc03-4150-9b58-08983d524f10_hpo_transd_kinships_owa_nssal/0002_kinships_transd +769 results/transd/wn18rr/random/adam/2020-04-05-17-25_0aeae955-2e44-4cce-bf3b-e53dc4821145/0000_wn18rr_transd +770 results/transd/wn18rr/random/adam/2020-04-05-17-25_0aeae955-2e44-4cce-bf3b-e53dc4821145/0001_wn18rr_transd +771 results/transd/wn18rr/random/adam/2020-04-05-17-25_60ca6f18-634c-4576-8813-41ebb2c6aa0e/0000_wn18rr_transd +772 results/transd/wn18rr/random/adam/2020-04-05-17-25_60ca6f18-634c-4576-8813-41ebb2c6aa0e/0001_wn18rr_transd +773 results/transd/wn18rr/random/adam/2020-04-05-17-25_60ca6f18-634c-4576-8813-41ebb2c6aa0e/0002_wn18rr_transd +774 results/transd/wn18rr/random/adam/2020-04-05-17-25_60ca6f18-634c-4576-8813-41ebb2c6aa0e/0003_wn18rr_transd +775 results/transd/wn18rr/random/adam/2020-04-05-17-25_826d3a49-7242-464c-abdd-231042701e14/0000_wn18rr_transd +776 results/transd/wn18rr/random/adam/2020-04-05-17-25_826d3a49-7242-464c-abdd-231042701e14/0001_wn18rr_transd +777 results/transd/wn18rr/random/adam/2020-04-16-13-12_ae31e099-a7a0-4b73-854c-f2dc0444cc9c/0000_wn18rr_transd +778 results/transd/wn18rr/random/adam/2020-04-16-13-12_ae31e099-a7a0-4b73-854c-f2dc0444cc9c/0001_wn18rr_transd +779 results/transd/wn18rr/random/adam/2020-04-16-13-12_c5b85f3c-3627-403e-935a-923502670cc1/0000_wn18rr_transd +780 results/transd/wn18rr/random/adam/2020-04-16-13-12_c5b85f3c-3627-403e-935a-923502670cc1/0001_wn18rr_transd +781 results/transd/wn18rr/random/adam/2020-04-16-13-12_c5b85f3c-3627-403e-935a-923502670cc1/0002_wn18rr_transd +782 results/transd/wn18rr/random/adam/2020-04-16-13-12_c5b85f3c-3627-403e-935a-923502670cc1/0003_wn18rr_transd +783 results/transd/yago310/random/adam/2020-05-23-09-54_433d541f-3cfb-401a-894d-2c4fb02294b5/0000_yago310_transd +784 results/transd/yago310/random/adam/2020-05-23-09-54_433d541f-3cfb-401a-894d-2c4fb02294b5/0001_yago310_transd +785 results/transd/yago310/random/adam/2020-05-23-09-54_f77b1773-3eb3-444b-9afc-939775755a29/0000_yago310_transd +786 results/transd/yago310/random/adam/2020-05-23-09-54_f77b1773-3eb3-444b-9afc-939775755a29/0001_yago310_transd +787 results/transd/yago310/random/adam/2020-05-23-12-08_3bad8672-a2aa-4e00-8845-b7611f97f91f/0000_yago310_transd +788 results/transd/yago310/random/adam/2020-05-23-12-08_3bad8672-a2aa-4e00-8845-b7611f97f91f/0001_yago310_transd +789 results/transd/yago310/random/adam/2020-05-23-12-20_37bcf335-1bde-429f-be0d-ba32c5e44ac7/0000_yago310_transd +790 results/transd/yago310/random/adam/2020-05-23-12-20_37bcf335-1bde-429f-be0d-ba32c5e44ac7/0001_yago310_transd +791 results/transe/fb15k237/random/adam/2020-02-29-16-36_3fcdf31a-c1dd-4798-8beb-9dd142d0bb57_hpo_transe_fb15k237_owa_adam/0000_fb15k237_transe +792 results/transe/fb15k237/random/adam/2020-02-29-16-36_3fcdf31a-c1dd-4798-8beb-9dd142d0bb57_hpo_transe_fb15k237_owa_adam/0001_fb15k237_transe +793 results/transe/fb15k237/random/adam/2020-02-29-16-36_3fcdf31a-c1dd-4798-8beb-9dd142d0bb57_hpo_transe_fb15k237_owa_adam/0002_fb15k237_transe +794 results/transe/fb15k237/random/adam/2020-02-29-16-36_3fcdf31a-c1dd-4798-8beb-9dd142d0bb57_hpo_transe_fb15k237_owa_adam/0003_fb15k237_transe +795 results/transe/fb15k237/random/adam/2020-02-29-16-47_f4f11684-52c1-43ea-9e65-63d4151c1af8_hpo_transe_fb15k237_owa_mrl/0000_fb15k237_transe +796 results/transe/fb15k237/random/adam/2020-02-29-16-47_f4f11684-52c1-43ea-9e65-63d4151c1af8_hpo_transe_fb15k237_owa_mrl/0002_fb15k237_transe +797 results/transe/fb15k237/random/adam/2020-02-29-16-55_32544762-ae85-435d-bb40-4f2b79f53dd2_hpo_transe_fb15k237_owa_nssal/0000_fb15k237_transe +798 results/transe/fb15k237/random/adam/2020-02-29-16-55_32544762-ae85-435d-bb40-4f2b79f53dd2_hpo_transe_fb15k237_owa_nssal/0002_fb15k237_transe +799 results/transe/fb15k237/random/adam/2020-02-29-16-58_b8e79b84-9369-4097-b12f-951d8ce3b390_hpo_transe_fb15k237_lcwa_adam/0000_fb15k237_transe +800 results/transe/fb15k237/random/adam/2020-02-29-16-58_b8e79b84-9369-4097-b12f-951d8ce3b390_hpo_transe_fb15k237_lcwa_adam/0001_fb15k237_transe +801 results/transe/fb15k237/random/adam/2020-02-29-16-58_b8e79b84-9369-4097-b12f-951d8ce3b390_hpo_transe_fb15k237_lcwa_adam/0002_fb15k237_transe +802 results/transe/fb15k237/random/adam/2020-02-29-16-58_b8e79b84-9369-4097-b12f-951d8ce3b390_hpo_transe_fb15k237_lcwa_adam/0003_fb15k237_transe +803 results/transe/fb15k237/random/adam/2020-03-10-09-30_43516558-f3a2-4038-b2ed-80d4a106968d_hpo_transe_fb15k237_lcwa_crossentropy/0000_fb15k237_transe +804 results/transe/fb15k237/random/adam/2020-03-10-09-30_43516558-f3a2-4038-b2ed-80d4a106968d_hpo_transe_fb15k237_lcwa_crossentropy/0002_fb15k237_transe +805 results/transe/kinships/random/adadelta/2020-04-10-12-46_cc69e057-12e8-490a-89e4-6b4266298f72_hpo_transe_kinships_lcwa_adadelta/0000_kinships_transe +806 results/transe/kinships/random/adadelta/2020-04-10-12-46_cc69e057-12e8-490a-89e4-6b4266298f72_hpo_transe_kinships_lcwa_adadelta/0001_kinships_transe +807 results/transe/kinships/random/adadelta/2020-04-10-12-46_cc69e057-12e8-490a-89e4-6b4266298f72_hpo_transe_kinships_lcwa_adadelta/0002_kinships_transe +808 results/transe/kinships/random/adadelta/2020-04-10-12-46_cc69e057-12e8-490a-89e4-6b4266298f72_hpo_transe_kinships_lcwa_adadelta/0003_kinships_transe +809 results/transe/kinships/random/adadelta/2020-04-10-12-47_93a61542-b7c5-4b98-9489-5b606c2e0a50_hpo_transe_kinships_lcwa_crossentropy/0000_kinships_transe +810 results/transe/kinships/random/adadelta/2020-04-10-12-47_93a61542-b7c5-4b98-9489-5b606c2e0a50_hpo_transe_kinships_lcwa_crossentropy/0001_kinships_transe +811 results/transe/kinships/random/adadelta/2020-04-10-12-47_fbc8e56c-e074-4132-a6e5-b4d008f829ef_hpo_transe_kinships_owa_adadelta/0000_kinships_transe +812 results/transe/kinships/random/adadelta/2020-04-10-12-47_fbc8e56c-e074-4132-a6e5-b4d008f829ef_hpo_transe_kinships_owa_adadelta/0001_kinships_transe +813 results/transe/kinships/random/adadelta/2020-04-10-12-47_fbc8e56c-e074-4132-a6e5-b4d008f829ef_hpo_transe_kinships_owa_adadelta/0002_kinships_transe +814 results/transe/kinships/random/adadelta/2020-04-10-12-47_fbc8e56c-e074-4132-a6e5-b4d008f829ef_hpo_transe_kinships_owa_adadelta/0003_kinships_transe +815 results/transe/kinships/random/adadelta/2020-04-10-12-48_11c25671-67b0-49d1-91f1-2dc215ef29f0_hpo_transe_kinships_owa_mrl/0000_kinships_transe +816 results/transe/kinships/random/adadelta/2020-04-10-12-48_11c25671-67b0-49d1-91f1-2dc215ef29f0_hpo_transe_kinships_owa_mrl/0001_kinships_transe +817 results/transe/kinships/random/adadelta/2020-04-10-12-48_854dcdfa-674e-45ef-a99d-54fd3dac248f_hpo_transe_kinships_owa_nssal/0000_kinships_transe +818 results/transe/kinships/random/adadelta/2020-04-10-12-48_854dcdfa-674e-45ef-a99d-54fd3dac248f_hpo_transe_kinships_owa_nssal/0001_kinships_transe +819 results/transe/kinships/random/adam/2020-03-07-00-01_73c6cf9b-78ad-467d-91d7-05767b9d3c2c_hpo_transe_kinships_lcwa_adam/0000_kinships_transe +820 results/transe/kinships/random/adam/2020-03-07-00-01_73c6cf9b-78ad-467d-91d7-05767b9d3c2c_hpo_transe_kinships_lcwa_adam/0001_kinships_transe +821 results/transe/kinships/random/adam/2020-03-07-00-01_73c6cf9b-78ad-467d-91d7-05767b9d3c2c_hpo_transe_kinships_lcwa_adam/0002_kinships_transe +822 results/transe/kinships/random/adam/2020-03-07-00-01_73c6cf9b-78ad-467d-91d7-05767b9d3c2c_hpo_transe_kinships_lcwa_adam/0003_kinships_transe +823 results/transe/kinships/random/adam/2020-03-07-00-02_63fd7ac9-13c2-46e0-8c83-071e6197cf1e_hpo_transe_kinships_lcwa_crossentropy/0000_kinships_transe +824 results/transe/kinships/random/adam/2020-03-07-00-02_63fd7ac9-13c2-46e0-8c83-071e6197cf1e_hpo_transe_kinships_lcwa_crossentropy/0002_kinships_transe +825 results/transe/kinships/random/adam/2020-03-07-00-05_85d26bfb-c231-4bad-b90f-753e84257a22_hpo_transe_kinships_owa_adam/0000_kinships_transe +826 results/transe/kinships/random/adam/2020-03-07-00-05_85d26bfb-c231-4bad-b90f-753e84257a22_hpo_transe_kinships_owa_adam/0001_kinships_transe +827 results/transe/kinships/random/adam/2020-03-07-00-05_85d26bfb-c231-4bad-b90f-753e84257a22_hpo_transe_kinships_owa_adam/0002_kinships_transe +828 results/transe/kinships/random/adam/2020-03-07-00-05_85d26bfb-c231-4bad-b90f-753e84257a22_hpo_transe_kinships_owa_adam/0003_kinships_transe +829 results/transe/kinships/random/adam/2020-03-07-00-06_9a4f09e4-ca13-4b00-bb2e-47ffe269e7be_hpo_transe_kinships_owa_mrl/0000_kinships_transe +830 results/transe/kinships/random/adam/2020-03-07-00-06_9a4f09e4-ca13-4b00-bb2e-47ffe269e7be_hpo_transe_kinships_owa_mrl/0002_kinships_transe +831 results/transe/kinships/random/adam/2020-03-07-00-07_92c11128-ccb4-440c-aec3-51b2ddf2d019_hpo_transe_kinships_owa_nssal/0000_kinships_transe +832 results/transe/kinships/random/adam/2020-03-07-00-07_92c11128-ccb4-440c-aec3-51b2ddf2d019_hpo_transe_kinships_owa_nssal/0002_kinships_transe +833 results/transe/wn18rr/random/adam/2020-04-01-17-40_20f2508e-1a82-4c2d-930f-dec39f9089e3/0000_wn18rr_transe +834 results/transe/wn18rr/random/adam/2020-04-01-17-40_20f2508e-1a82-4c2d-930f-dec39f9089e3/0001_wn18rr_transe +835 results/transe/wn18rr/random/adam/2020-04-01-17-40_20f2508e-1a82-4c2d-930f-dec39f9089e3/0002_wn18rr_transe +836 results/transe/wn18rr/random/adam/2020-04-01-17-40_20f2508e-1a82-4c2d-930f-dec39f9089e3/0003_wn18rr_transe +837 results/transe/wn18rr/random/adam/2020-04-01-17-40_2b669219-c610-42c1-97fc-ce1e0fd7fa6f/0000_wn18rr_transe +838 results/transe/wn18rr/random/adam/2020-04-01-17-40_2b669219-c610-42c1-97fc-ce1e0fd7fa6f/0002_wn18rr_transe +839 results/transe/wn18rr/random/adam/2020-04-01-17-40_c4ba8bd5-bb90-42d9-bfbc-c33f0f5c3d90/0000_wn18rr_transe +840 results/transe/wn18rr/random/adam/2020-04-01-17-40_c4ba8bd5-bb90-42d9-bfbc-c33f0f5c3d90/0002_wn18rr_transe +841 results/transe/wn18rr/random/adam/2020-04-11-12-24_bd52ebba-722d-44ed-a6ec-9ac4250c5f45_hpo_transe_wn18rr_lcwa_crossentropy/0000_wn18rr_transe +842 results/transe/wn18rr/random/adam/2020-04-11-12-24_bd52ebba-722d-44ed-a6ec-9ac4250c5f45_hpo_transe_wn18rr_lcwa_crossentropy/0001_wn18rr_transe +843 results/transe/wn18rr/random/adam/2020-05-20-03-15_00b22931-c88f-4d90-a606-c94a31a80516/0000_wn18rr_transe +844 results/transe/wn18rr/random/adam/2020-05-20-03-15_00b22931-c88f-4d90-a606-c94a31a80516/0001_wn18rr_transe +845 results/transe/wn18rr/random/adam/2020-05-20-03-15_00b22931-c88f-4d90-a606-c94a31a80516/0002_wn18rr_transe +846 results/transe/wn18rr/random/adam/2020-05-20-03-15_00b22931-c88f-4d90-a606-c94a31a80516/0003_wn18rr_transe +847 results/transe/yago310/random/adam/2020-05-17-20-18_8d1ff4bf-9929-45a8-949e-63626a1604b4/0000_yago310_transe +848 results/transe/yago310/random/adam/2020-05-17-20-18_8d1ff4bf-9929-45a8-949e-63626a1604b4/0001_yago310_transe +849 results/transe/yago310/random/adam/2020-05-17-20-18_dc7ab8b6-cf73-4efd-b854-fe35540900ee/0000_yago310_transe +850 results/transe/yago310/random/adam/2020-05-17-20-18_dc7ab8b6-cf73-4efd-b854-fe35540900ee/0001_yago310_transe +851 results/transe/yago310/random/adam/2020-05-21-03-04_795e8b3f-dc46-4538-b201-bb64cbb98cab/0000_yago310_transe +852 results/transe/yago310/random/adam/2020-05-21-03-04_795e8b3f-dc46-4538-b201-bb64cbb98cab/0001_yago310_transe +853 results/transe/yago310/random/adam/2020-05-21-03-04_bc6364c0-27c6-4ba0-b61c-b7bca827c001/0000_yago310_transe +854 results/transe/yago310/random/adam/2020-05-21-03-04_bc6364c0-27c6-4ba0-b61c-b7bca827c001/0001_yago310_transe +855 results/transh/fb15k237/random/adam/2020-02-29-17-07_6a9ae0c2-03e3-436b-976a-1ad225fb2c45_hpo_transh_fb15k237_owa_adam/0000_fb15k237_transh +856 results/transh/fb15k237/random/adam/2020-02-29-17-07_6a9ae0c2-03e3-436b-976a-1ad225fb2c45_hpo_transh_fb15k237_owa_adam/0001_fb15k237_transh +857 results/transh/fb15k237/random/adam/2020-02-29-17-07_6a9ae0c2-03e3-436b-976a-1ad225fb2c45_hpo_transh_fb15k237_owa_adam/0002_fb15k237_transh +858 results/transh/fb15k237/random/adam/2020-02-29-17-07_6a9ae0c2-03e3-436b-976a-1ad225fb2c45_hpo_transh_fb15k237_owa_adam/0003_fb15k237_transh +859 results/transh/fb15k237/random/adam/2020-03-20-17-45_05fcc3e8-5bc4-4098-87f2-f9727aa7e9d6_hpo_transh_fb15k237_owa_nssal/0000_fb15k237_transh +860 results/transh/fb15k237/random/adam/2020-03-20-17-45_05fcc3e8-5bc4-4098-87f2-f9727aa7e9d6_hpo_transh_fb15k237_owa_nssal/0002_fb15k237_transh +861 results/transh/fb15k237/random/adam/2020-03-20-17-45_51d23c2f-78fd-4b45-a6bb-83b4f3569677_hpo_transh_fb15k237_owa_mrl./0000_fb15k237_transh +862 results/transh/fb15k237/random/adam/2020-03-20-17-45_51d23c2f-78fd-4b45-a6bb-83b4f3569677_hpo_transh_fb15k237_owa_mrl./0002_fb15k237_transh +863 results/transh/fb15k237/random/adam/2020-03-20-17-45_b76b9ec7-0d03-40db-9a73-70ddf2d3d327_hpo_transh_fb15k237_lcwa_adam/0000_fb15k237_transh +864 results/transh/fb15k237/random/adam/2020-03-20-17-45_b76b9ec7-0d03-40db-9a73-70ddf2d3d327_hpo_transh_fb15k237_lcwa_adam/0001_fb15k237_transh +865 results/transh/fb15k237/random/adam/2020-03-20-17-45_b76b9ec7-0d03-40db-9a73-70ddf2d3d327_hpo_transh_fb15k237_lcwa_adam/0002_fb15k237_transh +866 results/transh/fb15k237/random/adam/2020-03-20-17-45_b76b9ec7-0d03-40db-9a73-70ddf2d3d327_hpo_transh_fb15k237_lcwa_adam/0003_fb15k237_transh +867 results/transh/fb15k237/random/adam/2020-03-20-17-45_dcdb977b-4f47-4b16-b44d-59fa7f770f75_hpo_transh_fb15k237_lcwa_crossentropy/0000_fb15k237_transh +868 results/transh/fb15k237/random/adam/2020-03-20-17-45_dcdb977b-4f47-4b16-b44d-59fa7f770f75_hpo_transh_fb15k237_lcwa_crossentropy/0002_fb15k237_transh +869 results/transh/kinships/random/adam/2020-03-07-00-18_d55990be-584a-40fb-86b7-7bb409ff82a2_hpo_transh_kinships_lcwa_adam/0000_kinships_transh +870 results/transh/kinships/random/adam/2020-03-07-00-18_d55990be-584a-40fb-86b7-7bb409ff82a2_hpo_transh_kinships_lcwa_adam/0001_kinships_transh +871 results/transh/kinships/random/adam/2020-03-07-00-18_d55990be-584a-40fb-86b7-7bb409ff82a2_hpo_transh_kinships_lcwa_adam/0002_kinships_transh +872 results/transh/kinships/random/adam/2020-03-07-00-18_d55990be-584a-40fb-86b7-7bb409ff82a2_hpo_transh_kinships_lcwa_adam/0003_kinships_transh +873 results/transh/kinships/random/adam/2020-03-07-00-19_a1a1a0a3-8e50-497d-a9d9-9a1ebc4ab054_hpo_transh_kinships_lcwa_crossentropy/0000_kinships_transh +874 results/transh/kinships/random/adam/2020-03-07-00-19_a1a1a0a3-8e50-497d-a9d9-9a1ebc4ab054_hpo_transh_kinships_lcwa_crossentropy/0002_kinships_transh +875 results/transh/kinships/random/adam/2020-03-08-12-11_68ecbf37-44f1-4849-b472-d04743cf6a18_hpo_transh_kinships_owa_adam/0000_kinships_transh +876 results/transh/kinships/random/adam/2020-03-08-12-11_68ecbf37-44f1-4849-b472-d04743cf6a18_hpo_transh_kinships_owa_adam/0001_kinships_transh +877 results/transh/kinships/random/adam/2020-03-08-12-11_68ecbf37-44f1-4849-b472-d04743cf6a18_hpo_transh_kinships_owa_adam/0002_kinships_transh +878 results/transh/kinships/random/adam/2020-03-08-12-11_68ecbf37-44f1-4849-b472-d04743cf6a18_hpo_transh_kinships_owa_adam/0003_kinships_transh +879 results/transh/kinships/random/adam/2020-03-08-12-13_dfee2ab0-af5f-4ce0-91e3-a539f3e24173_hpo_transh_kinships_owa_mrl/0000_kinships_transh +880 results/transh/kinships/random/adam/2020-03-08-12-13_dfee2ab0-af5f-4ce0-91e3-a539f3e24173_hpo_transh_kinships_owa_mrl/0002_kinships_transh +881 results/transh/kinships/random/adam/2020-03-08-12-14_38912add-a331-43bb-baa7-38dda0545748_hpo_transh_kinships_owa_nssal/0000_kinships_transh +882 results/transh/kinships/random/adam/2020-03-08-12-14_38912add-a331-43bb-baa7-38dda0545748_hpo_transh_kinships_owa_nssal/0002_kinships_transh +883 results/transh/wn18rr/random/adam/2020-04-05-17-24_34a7a673-d1f3-4169-bc0a-574ce7f0ab57/0000_wn18rr_transh +884 results/transh/wn18rr/random/adam/2020-04-05-17-24_34a7a673-d1f3-4169-bc0a-574ce7f0ab57/0001_wn18rr_transh +885 results/transh/wn18rr/random/adam/2020-04-05-17-24_35433fad-bcee-4e46-851f-d845fdd2a580/0000_wn18rr_transh +886 results/transh/wn18rr/random/adam/2020-04-05-17-24_35433fad-bcee-4e46-851f-d845fdd2a580/0001_wn18rr_transh +887 results/transh/wn18rr/random/adam/2020-04-05-17-24_f0293b5d-be14-4c43-a3bd-e1bf17b352f6/0000_wn18rr_transh +888 results/transh/wn18rr/random/adam/2020-04-05-17-24_f0293b5d-be14-4c43-a3bd-e1bf17b352f6/0001_wn18rr_transh +889 results/transh/wn18rr/random/adam/2020-04-05-17-24_f0293b5d-be14-4c43-a3bd-e1bf17b352f6/0002_wn18rr_transh +890 results/transh/wn18rr/random/adam/2020-04-05-17-24_f0293b5d-be14-4c43-a3bd-e1bf17b352f6/0003_wn18rr_transh +891 results/transh/wn18rr/random/adam/2020-04-25-19-04_42c59c51-244c-41b5-8f50-74579f4a01cd/0000_wn18rr_transh +892 results/transh/wn18rr/random/adam/2020-04-25-19-04_42c59c51-244c-41b5-8f50-74579f4a01cd/0001_wn18rr_transh +893 results/transh/wn18rr/random/adam/2020-04-25-19-04_5dc85476-1109-46f0-9a03-bfd74187cd13/0000_wn18rr_transh +894 results/transh/wn18rr/random/adam/2020-04-25-19-04_5dc85476-1109-46f0-9a03-bfd74187cd13/0001_wn18rr_transh +895 results/transh/wn18rr/random/adam/2020-04-25-19-04_edafc5eb-b41f-4630-bce5-a03434c97c82/0000_wn18rr_transh +896 results/transh/wn18rr/random/adam/2020-04-25-19-04_edafc5eb-b41f-4630-bce5-a03434c97c82/0001_wn18rr_transh +897 results/transr/fb15k237/random/adam/2020-03-20-17-46_a7a65e41-103d-4b9c-827f-484854813646/0000_fb15k237_transr +898 results/transr/fb15k237/random/adam/2020-03-20-17-46_a7a65e41-103d-4b9c-827f-484854813646/0001_fb15k237_transr +899 results/transr/fb15k237/random/adam/2020-03-20-17-47_b753dd04-81b2-4419-91e1-bec5c695b590/0000_fb15k237_transr +900 results/transr/fb15k237/random/adam/2020-03-20-17-47_b753dd04-81b2-4419-91e1-bec5c695b590/0001_fb15k237_transr +901 results/transr/fb15k237/random/adam/2020-03-20-17-47_b753dd04-81b2-4419-91e1-bec5c695b590/0002_fb15k237_transr +902 results/transr/fb15k237/random/adam/2020-03-20-17-47_b753dd04-81b2-4419-91e1-bec5c695b590/0003_fb15k237_transr +903 results/transr/fb15k237/random/adam/2020-05-12-17-00_c74298ee-6e3e-4e95-9b3f-193cf7ed995f/0000_fb15k237_transr +904 results/transr/fb15k237/random/adam/2020-05-12-17-00_c74298ee-6e3e-4e95-9b3f-193cf7ed995f/0001_fb15k237_transr +905 results/transr/fb15k237/random/adam/2020-05-12-17-00_d0d2524f-f41b-4627-8413-07a4e5d295a9/0000_fb15k237_transr +906 results/transr/fb15k237/random/adam/2020-05-12-17-00_d0d2524f-f41b-4627-8413-07a4e5d295a9/0001_fb15k237_transr +907 results/transr/kinships/random/adadelta/2020-05-17-09-41_a21fa9d0-7598-471b-92a2-a4e5d73d4008_hpo_transr_kinships_lcwa_adadelta/0000_kinships_transr +908 results/transr/kinships/random/adadelta/2020-05-17-09-41_a21fa9d0-7598-471b-92a2-a4e5d73d4008_hpo_transr_kinships_lcwa_adadelta/0001_kinships_transr +909 results/transr/kinships/random/adadelta/2020-05-17-09-41_a21fa9d0-7598-471b-92a2-a4e5d73d4008_hpo_transr_kinships_lcwa_adadelta/0002_kinships_transr +910 results/transr/kinships/random/adadelta/2020-05-17-09-41_a21fa9d0-7598-471b-92a2-a4e5d73d4008_hpo_transr_kinships_lcwa_adadelta/0003_kinships_transr +911 results/transr/kinships/random/adadelta/2020-05-17-09-43_eca6210c-8d55-4464-a197-f20dd5cbf800_hpo_transr_kinships_lcwa_crossentropy/0000_kinships_transr +912 results/transr/kinships/random/adadelta/2020-05-17-09-43_eca6210c-8d55-4464-a197-f20dd5cbf800_hpo_transr_kinships_lcwa_crossentropy/0001_kinships_transr +913 results/transr/kinships/random/adadelta/2020-05-17-09-44_cc0d0a14-297b-4277-8c6c-b279b9ab167c_hpo_transr_kinships_owa_adadelta/0000_kinships_transr +914 results/transr/kinships/random/adadelta/2020-05-17-09-44_cc0d0a14-297b-4277-8c6c-b279b9ab167c_hpo_transr_kinships_owa_adadelta/0001_kinships_transr +915 results/transr/kinships/random/adadelta/2020-05-17-09-44_cc0d0a14-297b-4277-8c6c-b279b9ab167c_hpo_transr_kinships_owa_adadelta/0002_kinships_transr +916 results/transr/kinships/random/adadelta/2020-05-17-09-44_cc0d0a14-297b-4277-8c6c-b279b9ab167c_hpo_transr_kinships_owa_adadelta/0003_kinships_transr +917 results/transr/kinships/random/adadelta/2020-05-17-09-50_4b4e81c3-733c-41e4-a484-7eadb7234b6d_hpo_transr_kinships_owa_mrl/0000_kinships_transr +918 results/transr/kinships/random/adadelta/2020-05-17-09-50_4b4e81c3-733c-41e4-a484-7eadb7234b6d_hpo_transr_kinships_owa_mrl/0001_kinships_transr +919 results/transr/kinships/random/adadelta/2020-05-18-07-45_a9e05d39-415f-4801-ba2d-569daf00e528_hpo_transr_kinships_owa_nssal/0000_kinships_transr +920 results/transr/kinships/random/adadelta/2020-05-18-07-45_a9e05d39-415f-4801-ba2d-569daf00e528_hpo_transr_kinships_owa_nssal/0001_kinships_transr +921 results/transr/kinships/random/adam/2020-03-16-10-25_67124f87-75d1-4812-8b17-1a7af3bb9930_hpo_transr_kinships_lcwa_adam/0000_kinships_transr +922 results/transr/kinships/random/adam/2020-03-16-10-25_67124f87-75d1-4812-8b17-1a7af3bb9930_hpo_transr_kinships_lcwa_adam/0001_kinships_transr +923 results/transr/kinships/random/adam/2020-03-16-10-25_67124f87-75d1-4812-8b17-1a7af3bb9930_hpo_transr_kinships_lcwa_adam/0002_kinships_transr +924 results/transr/kinships/random/adam/2020-03-16-10-25_67124f87-75d1-4812-8b17-1a7af3bb9930_hpo_transr_kinships_lcwa_adam/0003_kinships_transr +925 results/transr/kinships/random/adam/2020-03-16-10-26_7f773118-27fb-4f01-887b-c19caa6bf149_hpo_transr_kinships_lcwa_crossentropy/0000_kinships_transr +926 results/transr/kinships/random/adam/2020-03-16-10-26_7f773118-27fb-4f01-887b-c19caa6bf149_hpo_transr_kinships_lcwa_crossentropy/0002_kinships_transr +927 results/transr/kinships/random/adam/2020-04-02-18-34_91ce6943-37df-4605-aab6-cbe9292065a3_hpo_transr_kinships_owa_adam/0000_kinships_transr +928 results/transr/kinships/random/adam/2020-04-02-18-34_91ce6943-37df-4605-aab6-cbe9292065a3_hpo_transr_kinships_owa_adam/0001_kinships_transr +929 results/transr/kinships/random/adam/2020-04-02-18-34_91ce6943-37df-4605-aab6-cbe9292065a3_hpo_transr_kinships_owa_adam/0002_kinships_transr +930 results/transr/kinships/random/adam/2020-04-02-18-34_91ce6943-37df-4605-aab6-cbe9292065a3_hpo_transr_kinships_owa_adam/0003_kinships_transr +931 results/transr/kinships/random/adam/2020-05-21-21-43_1512fa44-9e98-47b4-930b-385019e6d1ee/0000_kinships_transr +932 results/transr/kinships/random/adam/2020-05-21-21-43_1512fa44-9e98-47b4-930b-385019e6d1ee/0001_kinships_transr +933 results/transr/kinships/random/adam/2020-05-21-21-43_619e16e2-81b8-4439-9e44-5c9b5ff22f7c/0000_kinships_transr +934 results/transr/kinships/random/adam/2020-05-21-21-43_619e16e2-81b8-4439-9e44-5c9b5ff22f7c/0001_kinships_transr +935 results/transr/wn18rr/random/adam/2020-04-25-19-19_33ba321b-f13e-44eb-a2da-43eaee715bab/0000_wn18rr_transr +936 results/transr/wn18rr/random/adam/2020-04-25-19-19_33ba321b-f13e-44eb-a2da-43eaee715bab/0001_wn18rr_transr +937 results/transr/wn18rr/random/adam/2020-04-25-19-19_4c7b9c55-93b8-437f-b811-82b061bd311c/0000_wn18rr_transr +938 results/transr/wn18rr/random/adam/2020-04-25-19-19_4c7b9c55-93b8-437f-b811-82b061bd311c/0001_wn18rr_transr +939 results/transr/wn18rr/random/adam/2020-04-25-19-19_54b3eddf-a1b5-403a-b7f6-6400dce71c6a/0000_wn18rr_transr +940 results/transr/wn18rr/random/adam/2020-04-25-19-19_54b3eddf-a1b5-403a-b7f6-6400dce71c6a/0001_wn18rr_transr +941 results/transr/wn18rr/random/adam/2020-04-25-19-19_599e598a-af62-4dee-a846-9955a046a263/0000_wn18rr_transr +942 results/transr/wn18rr/random/adam/2020-04-25-19-19_599e598a-af62-4dee-a846-9955a046a263/0001_wn18rr_transr +943 results/transr/wn18rr/random/adam/2020-04-25-19-19_7a20ba52-0c83-459e-910b-a4acec40c19a/0000_wn18rr_transr +944 results/transr/wn18rr/random/adam/2020-04-25-19-19_7a20ba52-0c83-459e-910b-a4acec40c19a/0001_wn18rr_transr +945 results/transr/wn18rr/random/adam/2020-04-25-19-19_a362e0d1-aa7a-4d9b-a990-9520137fe6db/0000_wn18rr_transr +946 results/transr/wn18rr/random/adam/2020-04-25-19-19_a362e0d1-aa7a-4d9b-a990-9520137fe6db/0001_wn18rr_transr +947 results/transr/wn18rr/random/adam/2020-05-05-15-55_6f1a7817-3e1a-4e7e-9094-274370f738d2/0000_wn18rr_transr +948 results/transr/wn18rr/random/adam/2020-05-05-15-55_6f1a7817-3e1a-4e7e-9094-274370f738d2/0001_wn18rr_transr +949 results/tucker/fb15k237/random/adam/2020-03-20-11-13_0866235c-09ae-4f29-93d5-e9959f99ff1e/0000_fb15k237_tucker +950 results/tucker/fb15k237/random/adam/2020-03-20-11-13_0866235c-09ae-4f29-93d5-e9959f99ff1e/0001_fb15k237_tucker +951 results/tucker/fb15k237/random/adam/2020-03-20-11-13_f72ba4de-e2bb-47dc-9d89-b672f91b52de/0000_fb15k237_tucker +952 results/tucker/fb15k237/random/adam/2020-03-20-11-13_f72ba4de-e2bb-47dc-9d89-b672f91b52de/0001_fb15k237_tucker +953 results/tucker/fb15k237/random/adam/2020-03-20-11-13_f72ba4de-e2bb-47dc-9d89-b672f91b52de/0002_fb15k237_tucker +954 results/tucker/fb15k237/random/adam/2020-03-20-11-13_f72ba4de-e2bb-47dc-9d89-b672f91b52de/0003_fb15k237_tucker +955 results/tucker/kinships/random/adadelta/2020-05-18-10-26_7d45fb67-9cfd-4185-8357-64773026b9d8/0000_kinships_tucker +956 results/tucker/kinships/random/adadelta/2020-05-18-10-26_7d45fb67-9cfd-4185-8357-64773026b9d8/0001_kinships_tucker +957 results/tucker/kinships/random/adadelta/2020-05-18-10-26_7d45fb67-9cfd-4185-8357-64773026b9d8/0002_kinships_tucker +958 results/tucker/kinships/random/adadelta/2020-05-18-10-26_7d45fb67-9cfd-4185-8357-64773026b9d8/0003_kinships_tucker +959 results/tucker/kinships/random/adadelta/2020-05-18-10-27_eacbd4bd-4be1-4ce5-a9ea-7174557da600/0000_kinships_tucker +960 results/tucker/kinships/random/adadelta/2020-05-18-10-27_eacbd4bd-4be1-4ce5-a9ea-7174557da600/0001_kinships_tucker +961 results/tucker/kinships/random/adadelta/2020-05-19-08-52_54b5115e-0385-4c48-b0a4-cc6b4509c46b/0000_kinships_tucker +962 results/tucker/kinships/random/adadelta/2020-05-19-08-52_54b5115e-0385-4c48-b0a4-cc6b4509c46b/0001_kinships_tucker +963 results/tucker/kinships/random/adadelta/2020-05-19-08-52_54b5115e-0385-4c48-b0a4-cc6b4509c46b/0002_kinships_tucker +964 results/tucker/kinships/random/adadelta/2020-05-19-10-56_cfd7fae5-0146-4204-b2d0-4a5d9df88680/0000_kinships_tucker +965 results/tucker/kinships/random/adadelta/2020-05-19-10-56_cfd7fae5-0146-4204-b2d0-4a5d9df88680/0001_kinships_tucker +966 results/tucker/kinships/random/adadelta/2020-05-19-12-27_3ce94651-8d84-4261-b0a5-af898a5d34b6/0000_kinships_tucker +967 results/tucker/kinships/random/adam/2020-04-02-18-20_8e1d129d-6cb2-449a-8423-54ab4d682c2b/0000_kinships_tucker +968 results/tucker/kinships/random/adam/2020-04-02-18-20_8e1d129d-6cb2-449a-8423-54ab4d682c2b/0001_kinships_tucker +969 results/tucker/kinships/random/adam/2020-04-02-18-20_8e1d129d-6cb2-449a-8423-54ab4d682c2b/0002_kinships_tucker +970 results/tucker/kinships/random/adam/2020-04-02-18-20_8e1d129d-6cb2-449a-8423-54ab4d682c2b/0003_kinships_tucker +971 results/tucker/kinships/random/adam/2020-04-02-18-21_4cb7dc77-e4eb-4ce2-a64d-4556c5e19a28/0000_kinships_tucker +972 results/tucker/kinships/random/adam/2020-04-02-18-21_4cb7dc77-e4eb-4ce2-a64d-4556c5e19a28/0001_kinships_tucker +973 results/tucker/kinships/random/adam/2020-04-02-18-21_4cb7dc77-e4eb-4ce2-a64d-4556c5e19a28/0002_kinships_tucker +974 results/tucker/kinships/random/adam/2020-04-02-18-21_4cb7dc77-e4eb-4ce2-a64d-4556c5e19a28/0003_kinships_tucker +975 results/tucker/kinships/random/adam/2020-04-02-18-21_75b56b12-291e-4e9e-85c1-57bfaf83b275/0000_kinships_tucker +976 results/tucker/kinships/random/adam/2020-04-02-18-21_75b56b12-291e-4e9e-85c1-57bfaf83b275/0001_kinships_tucker +977 results/tucker/kinships/random/adam/2020-04-02-18-21_75b56b12-291e-4e9e-85c1-57bfaf83b275/0002_kinships_tucker +978 results/tucker/kinships/random/adam/2020-04-02-18-21_75b56b12-291e-4e9e-85c1-57bfaf83b275/0003_kinships_tucker +979 results/tucker/kinships/random/adam/2020-05-21-21-45_546fac04-86d1-44f8-b241-811b3d7a5ff3/0000_kinships_tucker +980 results/tucker/kinships/random/adam/2020-05-21-21-45_546fac04-86d1-44f8-b241-811b3d7a5ff3/0001_kinships_tucker +981 results/tucker/kinships/random/adam/2020-05-21-21-45_c839deab-3d8f-4cbb-bb13-9637a30a4551/0000_kinships_tucker +982 results/tucker/wn18rr/random/adam/2020-05-05-16-24_a34ed865-6bec-47aa-bedf-ef7a16030f43/0000_wn18rr_tucker +983 results/tucker/wn18rr/random/adam/2020-05-05-16-24_a34ed865-6bec-47aa-bedf-ef7a16030f43/0001_wn18rr_tucker +984 results/tucker/wn18rr/random/adam/2020-05-05-16-24_ab33d046-5fd5-460a-b728-8c4ef549213e/0000_wn18rr_tucker +985 results/tucker/wn18rr/random/adam/2020-05-05-16-24_ab33d046-5fd5-460a-b728-8c4ef549213e/0001_wn18rr_tucker +986 results/tucker/wn18rr/random/adam/2020-05-05-16-24_e1ab01f7-6d51-4573-85dd-c4ee1f74ab0a/0000_wn18rr_tucker +987 results/tucker/wn18rr/random/adam/2020-05-05-16-24_e1ab01f7-6d51-4573-85dd-c4ee1f74ab0a/0001_wn18rr_tucker +988 results/tucker/wn18rr/random/adam/2020-05-05-16-24_eef69ad8-d197-4128-9562-c8f993860c00/0000_wn18rr_tucker +989 results/tucker/wn18rr/random/adam/2020-05-05-16-24_eef69ad8-d197-4128-9562-c8f993860c00/0001_wn18rr_tucker +990 results/tucker/wn18rr/random/adam/2020-05-05-16-24_ffaf19fb-3246-4221-952b-6e009faea6ef/0000_wn18rr_tucker +991 results/tucker/wn18rr/random/adam/2020-05-05-16-24_ffaf19fb-3246-4221-952b-6e009faea6ef/0001_wn18rr_tucker +992 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_0a593427-a61b-4ec7-a4d0-d6ea46661421_hpo_unstructured_model_fb15k237_lcwa_adam/0000_fb15k237_unstructuredmodel +993 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_0a593427-a61b-4ec7-a4d0-d6ea46661421_hpo_unstructured_model_fb15k237_lcwa_adam/0001_fb15k237_unstructuredmodel +994 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_0a593427-a61b-4ec7-a4d0-d6ea46661421_hpo_unstructured_model_fb15k237_lcwa_adam/0002_fb15k237_unstructuredmodel +995 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_0a593427-a61b-4ec7-a4d0-d6ea46661421_hpo_unstructured_model_fb15k237_lcwa_adam/0003_fb15k237_unstructuredmodel +996 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_823a9c49-b877-43a9-93ca-5b5df69a61af_hpo_unstructured_model_fb15k237_lcwa_crossentropy/0000_fb15k237_unstructuredmodel +997 results/unstructuredmodel/fb15k237/random/adam/2020-03-20-11-09_823a9c49-b877-43a9-93ca-5b5df69a61af_hpo_unstructured_model_fb15k237_lcwa_crossentropy/0002_fb15k237_unstructuredmodel +998 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_28d553d4-6232-4f6a-8b5f-d4fa5863b54a_hpo_unstructured_model_fb15k237_owa_nssal/0000_fb15k237_unstructuredmodel +999 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_28d553d4-6232-4f6a-8b5f-d4fa5863b54a_hpo_unstructured_model_fb15k237_owa_nssal/0002_fb15k237_unstructuredmodel +1000 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_6a5bf985-cc3b-42ed-8406-3c2407bb900a_hpo_unstructured_model_fb15k237_owa_mrl/0000_fb15k237_unstructuredmodel +1001 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_6a5bf985-cc3b-42ed-8406-3c2407bb900a_hpo_unstructured_model_fb15k237_owa_mrl/0002_fb15k237_unstructuredmodel +1002 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_f34a0b51-e0ec-4b13-8faf-4b44d9e36834_hpo_unstructured_model_fb15k237_owa_adam/0000_fb15k237_unstructuredmodel +1003 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_f34a0b51-e0ec-4b13-8faf-4b44d9e36834_hpo_unstructured_model_fb15k237_owa_adam/0001_fb15k237_unstructuredmodel +1004 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_f34a0b51-e0ec-4b13-8faf-4b44d9e36834_hpo_unstructured_model_fb15k237_owa_adam/0002_fb15k237_unstructuredmodel +1005 results/unstructuredmodel/fb15k237/random/adam/2020-03-23-16-39_f34a0b51-e0ec-4b13-8faf-4b44d9e36834_hpo_unstructured_model_fb15k237_owa_adam/0003_fb15k237_unstructuredmodel +1006 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-49_f78b0db1-05e7-4e5a-8753-a7fa5bfbba1c_hpo_unstructured_model_kinships_lcwa_adadelta/0000_kinships_unstructuredmodel +1007 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-49_f78b0db1-05e7-4e5a-8753-a7fa5bfbba1c_hpo_unstructured_model_kinships_lcwa_adadelta/0001_kinships_unstructuredmodel +1008 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-49_f78b0db1-05e7-4e5a-8753-a7fa5bfbba1c_hpo_unstructured_model_kinships_lcwa_adadelta/0002_kinships_unstructuredmodel +1009 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-49_f78b0db1-05e7-4e5a-8753-a7fa5bfbba1c_hpo_unstructured_model_kinships_lcwa_adadelta/0003_kinships_unstructuredmodel +1010 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-50_23b3a55f-9092-4b28-9243-31d886e20e64_hpo_unstructured_model_kinships_lcwa_crossentropy/0000_kinships_unstructuredmodel +1011 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-50_23b3a55f-9092-4b28-9243-31d886e20e64_hpo_unstructured_model_kinships_lcwa_crossentropy/0001_kinships_unstructuredmodel +1012 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-51_a415d2ea-ae01-4e4a-bb89-4b277086687f_hpo_unstructured_model_kinships_owa_adadelta/0000_kinships_unstructuredmodel +1013 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-51_a415d2ea-ae01-4e4a-bb89-4b277086687f_hpo_unstructured_model_kinships_owa_adadelta/0001_kinships_unstructuredmodel +1014 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-51_a415d2ea-ae01-4e4a-bb89-4b277086687f_hpo_unstructured_model_kinships_owa_adadelta/0002_kinships_unstructuredmodel +1015 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-51_a415d2ea-ae01-4e4a-bb89-4b277086687f_hpo_unstructured_model_kinships_owa_adadelta/0003_kinships_unstructuredmodel +1016 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-52_b2c41423-9195-4e04-ac3e-3f698d11a43b_hpo_unstructured_model_kinships_owa_nssal/0000_kinships_unstructuredmodel +1017 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-52_b2c41423-9195-4e04-ac3e-3f698d11a43b_hpo_unstructured_model_kinships_owa_nssal/0001_kinships_unstructuredmodel +1018 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-52_c66fcd6e-cfd2-4343-9725-5793497d78af_hpo_unstructured_model_kinships_owa_mrl/0000_kinships_unstructuredmodel +1019 results/unstructuredmodel/kinships/random/adadelta/kinships/2020-05-12-09-52_c66fcd6e-cfd2-4343-9725-5793497d78af_hpo_unstructured_model_kinships_owa_mrl/0001_kinships_unstructuredmodel +1020 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e31851fe-5979-41ec-a129-fdbea1702921_hpo_unstructured_model_kinships_lcwa_adam/0000_kinships_unstructuredmodel +1021 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e31851fe-5979-41ec-a129-fdbea1702921_hpo_unstructured_model_kinships_lcwa_adam/0001_kinships_unstructuredmodel +1022 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e31851fe-5979-41ec-a129-fdbea1702921_hpo_unstructured_model_kinships_lcwa_adam/0002_kinships_unstructuredmodel +1023 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e31851fe-5979-41ec-a129-fdbea1702921_hpo_unstructured_model_kinships_lcwa_adam/0003_kinships_unstructuredmodel +1024 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e9837262-21e6-4187-be5c-da87d895940b_hpo_unstructured_model_kinships_lcwa_crossentropy/0000_kinships_unstructuredmodel +1025 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-18_e9837262-21e6-4187-be5c-da87d895940b_hpo_unstructured_model_kinships_lcwa_crossentropy/0002_kinships_unstructuredmodel +1026 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-19_11b8f717-ce8e-4eef-94f5-c9eab8e14cb4_hpo_unstructured_model_kinships_owa_adam/0000_kinships_unstructuredmodel +1027 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-19_11b8f717-ce8e-4eef-94f5-c9eab8e14cb4_hpo_unstructured_model_kinships_owa_adam/0001_kinships_unstructuredmodel +1028 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-19_11b8f717-ce8e-4eef-94f5-c9eab8e14cb4_hpo_unstructured_model_kinships_owa_adam/0002_kinships_unstructuredmodel +1029 results/unstructuredmodel/kinships/random/adam/2020-03-08-12-19_11b8f717-ce8e-4eef-94f5-c9eab8e14cb4_hpo_unstructured_model_kinships_owa_adam/0003_kinships_unstructuredmodel +1030 results/unstructuredmodel/kinships/random/adam/2020-03-09-08-19_28728c2f-bcd6-4379-bcb4-2abe31960adf_hpo_unstructured_model_kinships_owa_nssal/0000_kinships_unstructuredmodel +1031 results/unstructuredmodel/kinships/random/adam/2020-03-09-08-19_28728c2f-bcd6-4379-bcb4-2abe31960adf_hpo_unstructured_model_kinships_owa_nssal/0002_kinships_unstructuredmodel +1032 results/unstructuredmodel/kinships/random/adam/2020-03-10-08-39_bb0b916b-bc35-4c90-a939-10a851b8b0c3_hpo_unstructured_model_kinships_owa_mrl/0000_kinships_unstructuredmodel +1033 results/unstructuredmodel/kinships/random/adam/2020-03-10-08-39_bb0b916b-bc35-4c90-a939-10a851b8b0c3_hpo_unstructured_model_kinships_owa_mrl/0002_kinships_unstructuredmodel +1034 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_0a43adbc-3c17-4f87-a554-1f0190aadb85/0000_wn18rr_unstructuredmodel +1035 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_0a43adbc-3c17-4f87-a554-1f0190aadb85/0001_wn18rr_unstructuredmodel +1036 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_0a43adbc-3c17-4f87-a554-1f0190aadb85/0002_wn18rr_unstructuredmodel +1037 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_0a43adbc-3c17-4f87-a554-1f0190aadb85/0003_wn18rr_unstructuredmodel +1038 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_68ded625-40f3-4824-8f7b-4a2719a9a8fd/0000_wn18rr_unstructuredmodel +1039 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_68ded625-40f3-4824-8f7b-4a2719a9a8fd/0002_wn18rr_unstructuredmodel +1040 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_7c6a7da5-a5ea-494f-8ca6-a2bd5d925c84/0000_wn18rr_unstructuredmodel +1041 results/unstructuredmodel/wn18rr/random/adam/2020-04-01-17-40_7c6a7da5-a5ea-494f-8ca6-a2bd5d925c84/0002_wn18rr_unstructuredmodel +1042 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_6efd8889-5d58-482c-9040-25bf2a51f4d7/0000_wn18rr_unstructuredmodel +1043 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_6efd8889-5d58-482c-9040-25bf2a51f4d7/0001_wn18rr_unstructuredmodel +1044 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_bf64bef2-98d0-4d32-91a8-ca2adc6ad762/0000_wn18rr_unstructuredmodel +1045 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_bf64bef2-98d0-4d32-91a8-ca2adc6ad762/0001_wn18rr_unstructuredmodel +1046 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_e005668d-5a40-4a0b-a22b-9a0428488893/0000_wn18rr_unstructuredmodel +1047 results/unstructuredmodel/wn18rr/random/adam/2020-05-05-15-54_e005668d-5a40-4a0b-a22b-9a0428488893/0001_wn18rr_unstructuredmodel diff --git a/ablation/runtime/measurement.tsv b/ablation/runtime/measurement.tsv new file mode 100644 index 000000000..07dcc42e8 --- /dev/null +++ b/ablation/runtime/measurement.tsv @@ -0,0 +1,58901 @@ +experiment_id trial_id epoch_time +0 1 0 days 00:00:11.461380720 +0 2 0 days 00:00:21.630315660 +0 3 0 days 00:00:12.398622896 +0 4 0 days 00:00:13.070597686 +0 5 0 days 00:00:08.112013713 +0 6 0 days 00:00:25.705436644 +0 7 0 days 00:00:25.304123850 +0 8 0 days 00:00:10.046781146 +0 9 0 days 00:00:17.448964596 +0 10 0 days 00:00:15.965379066 +0 11 0 days 00:00:11.107641426 +0 12 0 days 00:00:10.072645976 +0 13 0 days 00:00:12.446077000 +0 14 0 days 00:00:14.837122408 +0 15 0 days 00:00:27.625268785 +0 16 0 days 00:00:11.435062160 +0 17 0 days 00:00:14.229728460 +0 18 0 days 00:00:13.680163433 +0 19 0 days 00:00:11.751886665 +0 20 0 days 00:00:12.857513365 +0 21 0 days 00:00:30.076121933 +0 22 0 days 00:00:12.611849415 +0 23 0 days 00:00:11.622217400 +0 24 0 days 00:00:15.661978015 +0 25 0 days 00:00:08.007001793 +0 26 0 days 00:00:16.206931630 +0 27 0 days 00:00:24.865540575 +0 28 0 days 00:00:17.769832260 +0 29 0 days 00:00:14.839960944 +1 1 0 days 00:00:24.394775880 +1 2 0 days 00:00:23.643897426 +1 3 0 days 00:00:15.398611970 +1 4 0 days 00:00:24.146311526 +1 5 0 days 00:00:26.987800956 +1 6 0 days 00:00:09.687704580 +1 7 0 days 00:00:16.739018885 +1 8 0 days 00:00:22.619926933 +1 9 0 days 00:00:16.132080620 +1 10 0 days 00:00:23.586611800 +1 11 0 days 00:00:13.035104426 +1 12 0 days 00:00:16.695540573 +1 13 0 days 00:00:20.063662773 +1 14 0 days 00:00:28.399795933 +1 15 0 days 00:00:16.390855620 +1 16 0 days 00:00:13.562382626 +1 17 0 days 00:00:12.072898120 +1 18 0 days 00:00:17.550989860 +1 19 0 days 00:00:25.531318285 +1 20 0 days 00:00:11.967565443 +1 21 0 days 00:00:22.886616060 +1 22 0 days 00:00:24.424259720 +1 23 0 days 00:00:24.220469486 +1 24 0 days 00:00:11.236995973 +1 25 0 days 00:00:28.854670242 +2 1 0 days 00:00:13.912823308 +2 2 0 days 00:00:08.827296480 +2 3 0 days 00:00:05.032970903 +2 4 0 days 00:00:15.328466033 +2 5 0 days 00:00:11.139160493 +2 6 0 days 00:00:04.977470200 +2 7 0 days 00:00:07.248904075 +2 8 0 days 00:00:13.988536085 +2 9 0 days 00:00:04.403087573 +2 10 0 days 00:00:07.577754625 +2 11 0 days 00:00:14.824067153 +2 12 0 days 00:00:07.250697873 +2 13 0 days 00:00:12.368251295 +2 14 0 days 00:00:05.045915100 +2 15 0 days 00:00:05.621825540 +2 16 0 days 00:00:11.419871875 +2 17 0 days 00:00:07.495534065 +2 18 0 days 00:00:07.424172010 +2 19 0 days 00:00:08.034206884 +2 20 0 days 00:00:08.647012695 +2 21 0 days 00:00:11.243896553 +2 22 0 days 00:00:12.009057766 +2 23 0 days 00:00:12.490678146 +2 24 0 days 00:00:14.126761526 +2 25 0 days 00:00:07.289924170 +2 26 0 days 00:00:04.700983160 +2 27 0 days 00:00:05.459954115 +2 28 0 days 00:00:09.738589736 +2 29 0 days 00:00:13.467758246 +2 30 0 days 00:00:09.940686433 +2 31 0 days 00:00:07.543331935 +2 32 0 days 00:00:12.724093466 +2 33 0 days 00:00:04.702633400 +2 34 0 days 00:00:07.148583340 +2 35 0 days 00:00:09.396400428 +2 36 0 days 00:00:12.961305860 +2 37 0 days 00:00:11.240998933 +2 38 0 days 00:00:12.359194300 +2 39 0 days 00:00:07.562145586 +2 40 0 days 00:00:12.436313460 +2 41 0 days 00:00:07.957089426 +2 42 0 days 00:00:05.557975885 +2 43 0 days 00:00:07.371575386 +2 44 0 days 00:00:04.970047875 +2 45 0 days 00:00:12.727054230 +2 46 0 days 00:00:07.197120865 +2 47 0 days 00:00:05.218519726 +2 48 0 days 00:00:06.931808884 +2 49 0 days 00:00:13.166151156 +3 1 0 days 00:00:15.068439070 +3 2 0 days 00:00:10.059031660 +3 3 0 days 00:00:14.392594193 +3 4 0 days 00:00:07.769064990 +3 5 0 days 00:00:13.618005708 +3 6 0 days 00:00:07.423836465 +3 7 0 days 00:00:05.496919673 +3 8 0 days 00:00:08.974908285 +3 9 0 days 00:00:09.553573620 +3 10 0 days 00:00:06.367480585 +3 11 0 days 00:00:07.495450440 +3 12 0 days 00:00:14.145148855 +3 13 0 days 00:00:10.293039610 +3 14 0 days 00:00:07.507277804 +3 15 0 days 00:00:07.448250610 +3 16 0 days 00:00:05.649153616 +3 17 0 days 00:00:08.175306148 +3 18 0 days 00:00:14.612975385 +3 19 0 days 00:00:08.313011040 +3 20 0 days 00:00:05.799380655 +3 21 0 days 00:00:12.805704060 +3 22 0 days 00:00:10.518556525 +3 23 0 days 00:00:04.887762285 +3 24 0 days 00:00:05.540030140 +3 25 0 days 00:00:07.394095475 +3 26 0 days 00:00:05.643808192 +3 27 0 days 00:00:08.587994330 +3 28 0 days 00:00:11.073731453 +3 29 0 days 00:00:15.951742731 +3 30 0 days 00:00:12.738994395 +3 31 0 days 00:00:12.420903633 +3 32 0 days 00:00:07.767432825 +3 33 0 days 00:00:13.895462520 +3 34 0 days 00:00:14.090237735 +3 35 0 days 00:00:08.052187706 +3 36 0 days 00:00:16.874641368 +3 37 0 days 00:00:08.939378497 +4 1 0 days 00:00:16.964925732 +4 2 0 days 00:00:25.526679740 +4 3 0 days 00:00:16.185916053 +4 4 0 days 00:00:24.233893986 +4 5 0 days 00:00:25.699729960 +4 6 0 days 00:00:14.841385010 +4 7 0 days 00:00:10.058467640 +4 8 0 days 00:00:20.566708416 +4 9 0 days 00:00:31.194621605 +4 10 0 days 00:00:09.150092435 +4 11 0 days 00:00:31.404154435 +4 12 0 days 00:00:10.292553205 +4 13 0 days 00:00:18.167525280 +4 14 0 days 00:00:23.265791633 +4 15 0 days 00:00:09.619543048 +4 16 0 days 00:00:27.299562990 +4 17 0 days 00:00:28.353111810 +4 18 0 days 00:00:09.763906180 +4 19 0 days 00:00:16.527153080 +4 20 0 days 00:00:28.120190690 +5 1 0 days 00:00:05.368219848 +5 2 0 days 00:00:07.562422845 +5 3 0 days 00:00:07.936904420 +5 4 0 days 00:00:06.016980843 +5 5 0 days 00:00:07.441006760 +5 6 0 days 00:00:13.053412945 +5 7 0 days 00:00:16.185196662 +5 8 0 days 00:00:16.852507860 +5 9 0 days 00:00:08.803589345 +5 10 0 days 00:00:14.388629850 +5 11 0 days 00:00:12.908730910 +5 12 0 days 00:00:15.546614512 +5 13 0 days 00:00:08.334261223 +5 14 0 days 00:00:07.991820296 +5 15 0 days 00:00:10.669468573 +5 16 0 days 00:00:15.209024140 +5 17 0 days 00:00:08.830125925 +5 18 0 days 00:00:06.818706870 +5 19 0 days 00:00:12.343676850 +5 20 0 days 00:00:13.750662737 +5 21 0 days 00:00:12.886132185 +5 22 0 days 00:00:12.897162085 +5 23 0 days 00:00:07.462093980 +5 24 0 days 00:00:05.395856990 +5 25 0 days 00:00:14.108244500 +5 26 0 days 00:00:11.778020160 +5 27 0 days 00:00:07.039766168 +5 28 0 days 00:00:13.943351820 +5 29 0 days 00:00:09.997986888 +5 30 0 days 00:00:04.752276955 +5 31 0 days 00:00:08.546498900 +5 32 0 days 00:00:05.633424790 +5 33 0 days 00:00:12.462852700 +5 34 0 days 00:00:06.802587262 +6 1 0 days 00:00:22.074454246 +6 2 0 days 00:00:22.948236813 +6 3 0 days 00:00:19.187352560 +6 4 0 days 00:00:25.913579375 +6 5 0 days 00:00:10.530876246 +6 6 0 days 00:00:34.764865500 +6 7 0 days 00:00:19.474391232 +6 8 0 days 00:00:23.304678128 +6 9 0 days 00:00:10.904295984 +6 10 0 days 00:00:17.173184980 +6 11 0 days 00:00:36.539171871 +7 1 0 days 00:00:06.569192340 +7 2 0 days 00:00:08.200445920 +7 3 0 days 00:00:12.336487239 +7 4 0 days 00:00:07.861465840 +7 5 0 days 00:00:05.299603613 +7 6 0 days 00:00:11.421740600 +7 7 0 days 00:00:16.064670446 +7 8 0 days 00:00:05.677920170 +7 9 0 days 00:00:09.003851746 +7 10 0 days 00:00:07.121395660 +7 11 0 days 00:00:10.174810570 +7 12 0 days 00:00:11.146339508 +7 13 0 days 00:00:14.456724706 +7 14 0 days 00:00:12.497188433 +7 15 0 days 00:00:10.391363120 +7 16 0 days 00:00:06.730634515 +7 17 0 days 00:00:10.371330180 +7 18 0 days 00:00:13.170357617 +7 19 0 days 00:00:06.687061960 +7 20 0 days 00:00:15.498249970 +7 21 0 days 00:00:17.723592625 +7 22 0 days 00:00:15.061000991 +7 23 0 days 00:00:07.366456593 +7 24 0 days 00:00:07.885804606 +8 1 0 days 00:00:35.929837530 +8 2 0 days 00:00:30.976135593 +8 3 0 days 00:00:34.629586055 +8 4 0 days 00:01:16.907746718 +8 5 0 days 00:00:38.303617616 +8 6 0 days 00:00:32.083666166 +8 7 0 days 00:00:35.023147325 +9 1 0 days 00:01:45.065244233 +9 2 0 days 00:01:43.652442300 +9 3 0 days 00:01:43.724765240 +9 4 0 days 00:00:35.301981710 +9 5 0 days 00:00:34.736168250 +9 6 0 days 00:00:35.287945010 +9 7 0 days 00:00:34.723320490 +9 8 0 days 00:01:57.622866815 +10 1 0 days 00:00:23.338357008 +10 2 0 days 00:00:21.666312335 +10 3 0 days 00:01:26.471604526 +10 4 0 days 00:01:12.548258810 +10 5 0 days 00:00:38.773140490 +10 6 0 days 00:00:38.771206440 +10 7 0 days 00:00:24.874763850 +11 1 0 days 00:00:34.695942266 +11 2 0 days 00:01:04.914118786 +11 3 0 days 00:00:20.334527186 +11 4 0 days 00:01:13.585766190 +11 5 0 days 00:00:19.440245680 +11 6 0 days 00:01:13.575502025 +11 7 0 days 00:01:04.882522220 +11 8 0 days 00:01:04.871250480 +11 9 0 days 00:00:35.653689820 +11 10 0 days 00:00:39.221591090 +11 11 0 days 00:00:21.740420220 +12 1 0 days 00:02:04.383975240 +12 2 0 days 00:01:01.999825990 +12 3 0 days 00:01:55.970806815 +12 4 0 days 00:00:38.733694890 +12 5 0 days 00:00:36.782311884 +13 1 0 days 00:01:23.308548782 +13 2 0 days 00:00:21.697037240 +13 3 0 days 00:01:12.572008675 +13 4 0 days 00:00:22.394948585 +13 5 0 days 00:00:21.944477485 +13 6 0 days 00:00:38.710447665 +13 7 0 days 00:01:17.163941472 +13 8 0 days 00:00:39.535043360 +14 1 0 days 00:00:00.152641082 +14 2 0 days 00:00:00.129210920 +14 3 0 days 00:00:00.143545031 +14 4 0 days 00:00:00.130874256 +14 5 0 days 00:00:00.194372838 +14 6 0 days 00:00:00.134564718 +14 7 0 days 00:00:00.146634122 +14 8 0 days 00:00:00.125122335 +14 9 0 days 00:00:00.125127171 +14 10 0 days 00:00:00.149765856 +14 11 0 days 00:00:00.185291760 +14 12 0 days 00:00:00.150101192 +14 13 0 days 00:00:00.147550391 +14 14 0 days 00:00:00.141470071 +14 15 0 days 00:00:00.146069097 +14 16 0 days 00:00:00.192928962 +14 17 0 days 00:00:00.145807628 +14 18 0 days 00:00:00.185279140 +14 19 0 days 00:00:00.186413566 +14 20 0 days 00:00:00.182610833 +14 21 0 days 00:00:00.181369092 +14 22 0 days 00:00:00.133066888 +14 23 0 days 00:00:00.145510142 +14 24 0 days 00:00:00.123087218 +14 25 0 days 00:00:00.122907290 +14 27 0 days 00:00:00.149502448 +14 28 0 days 00:00:00.180792072 +14 29 0 days 00:00:00.145619045 +14 30 0 days 00:00:00.179565188 +14 31 0 days 00:00:00.154483088 +14 32 0 days 00:00:00.182682712 +14 33 0 days 00:00:00.188504523 +14 34 0 days 00:00:00.142310828 +14 35 0 days 00:00:00.122839870 +14 36 0 days 00:00:00.148856888 +14 37 0 days 00:00:00.147950166 +14 38 0 days 00:00:00.188811573 +14 39 0 days 00:00:00.134562374 +14 40 0 days 00:00:00.124702760 +14 41 0 days 00:00:00.188918633 +14 42 0 days 00:00:00.192815562 +14 43 0 days 00:00:00.123590858 +14 44 0 days 00:00:00.151876380 +14 45 0 days 00:00:00.125742176 +14 46 0 days 00:00:00.193040020 +14 47 0 days 00:00:00.124938387 +14 48 0 days 00:00:00.193360154 +14 49 0 days 00:00:00.143290387 +14 50 0 days 00:00:00.143370432 +14 51 0 days 00:00:00.188053397 +14 52 0 days 00:00:00.142701627 +14 53 0 days 00:00:00.101122793 +14 54 0 days 00:00:00.122630891 +14 55 0 days 00:00:00.121439298 +14 56 0 days 00:00:00.139137177 +14 57 0 days 00:00:00.123304658 +14 58 0 days 00:00:00.177211856 +14 59 0 days 00:00:00.183382086 +14 60 0 days 00:00:00.189432742 +14 61 0 days 00:00:00.201042433 +14 62 0 days 00:00:00.128730780 +14 63 0 days 00:00:00.125898520 +14 64 0 days 00:00:00.120515867 +14 65 0 days 00:00:00.132781655 +14 66 0 days 00:00:00.120283527 +14 67 0 days 00:00:00.185763106 +14 68 0 days 00:00:00.195571582 +14 69 0 days 00:00:00.134229240 +14 70 0 days 00:00:00.143968972 +14 71 0 days 00:00:00.176056293 +14 72 0 days 00:00:00.139711912 +14 73 0 days 00:00:00.188234600 +14 74 0 days 00:00:00.121030868 +14 75 0 days 00:00:00.138512387 +14 76 0 days 00:00:00.152464455 +14 77 0 days 00:00:00.145666917 +14 78 0 days 00:00:00.190899763 +14 79 0 days 00:00:00.143139580 +14 80 0 days 00:00:00.186699663 +14 81 0 days 00:00:00.143506690 +14 82 0 days 00:00:00.189114214 +14 83 0 days 00:00:00.136683914 +14 84 0 days 00:00:00.122782963 +14 85 0 days 00:00:00.188385926 +14 86 0 days 00:00:00.183997620 +14 87 0 days 00:00:00.177584024 +14 88 0 days 00:00:00.148543084 +14 89 0 days 00:00:00.141159017 +14 90 0 days 00:00:00.179949483 +14 91 0 days 00:00:00.144242932 +14 92 0 days 00:00:00.131506213 +14 93 0 days 00:00:00.177841376 +14 94 0 days 00:00:00.143149524 +14 95 0 days 00:00:00.144979325 +14 96 0 days 00:00:00.145387912 +14 97 0 days 00:00:00.131735405 +14 98 0 days 00:00:00.120251695 +14 99 0 days 00:00:00.128938135 +14 100 0 days 00:00:00.122682221 +15 1 0 days 00:00:00.097525726 +15 2 0 days 00:00:00.082240546 +15 3 0 days 00:00:00.102898897 +15 4 0 days 00:00:00.073373742 +15 5 0 days 00:00:00.066959232 +15 6 0 days 00:00:00.106011933 +15 7 0 days 00:00:00.077657468 +15 8 0 days 00:00:00.108543385 +15 9 0 days 00:00:00.079102587 +15 10 0 days 00:00:00.073695692 +15 11 0 days 00:00:00.051842953 +15 12 0 days 00:00:00.102020774 +15 13 0 days 00:00:00.083239636 +15 14 0 days 00:00:00.081422887 +15 15 0 days 00:00:00.082644715 +15 16 0 days 00:00:00.057481526 +15 17 0 days 00:00:00.068216716 +15 18 0 days 00:00:00.069053033 +15 19 0 days 00:00:00.068568218 +15 20 0 days 00:00:00.079315366 +15 21 0 days 00:00:00.066010464 +15 22 0 days 00:00:00.066979708 +15 23 0 days 00:00:00.071781102 +15 24 0 days 00:00:00.103799733 +15 25 0 days 00:00:00.068088062 +15 26 0 days 00:00:00.072944229 +15 27 0 days 00:00:00.076456497 +15 28 0 days 00:00:00.084789064 +15 29 0 days 00:00:00.109129654 +15 30 0 days 00:00:00.055484953 +15 31 0 days 00:00:00.105425297 +15 32 0 days 00:00:00.102740305 +15 33 0 days 00:00:00.081889581 +15 34 0 days 00:00:00.080326002 +15 35 0 days 00:00:00.055765700 +15 36 0 days 00:00:00.081813841 +15 37 0 days 00:00:00.069498055 +15 38 0 days 00:00:00.104672960 +15 39 0 days 00:00:00.105196188 +15 40 0 days 00:00:00.082969160 +15 41 0 days 00:00:00.105321990 +15 42 0 days 00:00:00.107291492 +15 43 0 days 00:00:00.109073954 +15 44 0 days 00:00:00.099260810 +15 45 0 days 00:00:00.102317183 +15 46 0 days 00:00:00.111659435 +15 47 0 days 00:00:00.079190146 +15 48 0 days 00:00:00.106823815 +15 49 0 days 00:00:00.066937281 +15 50 0 days 00:00:00.068076264 +15 51 0 days 00:00:00.083326466 +15 52 0 days 00:00:00.079630558 +15 53 0 days 00:00:00.073808569 +15 54 0 days 00:00:00.106792043 +15 55 0 days 00:00:00.054575820 +15 56 0 days 00:00:00.082844033 +15 57 0 days 00:00:00.079202023 +15 58 0 days 00:00:00.101397577 +15 59 0 days 00:00:00.069469570 +15 60 0 days 00:00:00.079761991 +15 61 0 days 00:00:00.083530455 +15 62 0 days 00:00:00.110559540 +15 63 0 days 00:00:00.078855425 +15 64 0 days 00:00:00.067804605 +15 65 0 days 00:00:00.110628466 +15 66 0 days 00:00:00.109314296 +15 67 0 days 00:00:00.077064227 +15 68 0 days 00:00:00.072147892 +15 69 0 days 00:00:00.078375722 +15 71 0 days 00:00:00.079839765 +15 72 0 days 00:00:00.070193852 +15 73 0 days 00:00:00.066581205 +15 74 0 days 00:00:00.107038163 +15 75 0 days 00:00:00.105387840 +15 76 0 days 00:00:00.106729855 +15 77 0 days 00:00:00.109456062 +15 78 0 days 00:00:00.080453408 +15 79 0 days 00:00:00.108674402 +15 80 0 days 00:00:00.060690386 +15 81 0 days 00:00:00.051441500 +15 82 0 days 00:00:00.051899706 +15 83 0 days 00:00:00.108420382 +15 84 0 days 00:00:00.068013987 +15 85 0 days 00:00:00.067686477 +15 86 0 days 00:00:00.107967122 +15 87 0 days 00:00:00.054694333 +15 88 0 days 00:00:00.103928582 +15 89 0 days 00:00:00.069493194 +15 90 0 days 00:00:00.081007968 +15 91 0 days 00:00:00.066787550 +15 92 0 days 00:00:00.118315009 +15 93 0 days 00:00:00.105600850 +15 94 0 days 00:00:00.078362604 +15 95 0 days 00:00:00.054687926 +15 96 0 days 00:00:00.079379314 +15 97 0 days 00:00:00.084504627 +15 98 0 days 00:00:00.067569276 +15 99 0 days 00:00:00.079314690 +15 100 0 days 00:00:00.078932448 +16 1 0 days 00:00:00.185991240 +16 2 0 days 00:00:00.114313530 +16 3 0 days 00:00:00.125335406 +16 4 0 days 00:00:00.188276066 +16 5 0 days 00:00:00.185110520 +16 6 0 days 00:00:00.195788620 +16 7 0 days 00:00:00.100717200 +16 8 0 days 00:00:00.113492340 +16 9 0 days 00:00:00.102116660 +16 10 0 days 00:00:00.172998753 +16 11 0 days 00:00:00.125005553 +16 12 0 days 00:00:00.191035200 +16 13 0 days 00:00:00.120803833 +16 14 0 days 00:00:00.173130900 +16 15 0 days 00:00:00.165954533 +16 16 0 days 00:00:00.118605360 +16 17 0 days 00:00:00.142257120 +16 18 0 days 00:00:00.120085860 +16 19 0 days 00:00:00.122585473 +16 20 0 days 00:00:00.124015000 +16 21 0 days 00:00:00.222176955 +16 22 0 days 00:00:00.106183686 +16 23 0 days 00:00:00.116476206 +16 24 0 days 00:00:00.104197313 +16 25 0 days 00:00:00.163257966 +16 26 0 days 00:00:00.162362446 +16 27 0 days 00:00:00.115717786 +16 28 0 days 00:00:00.105641246 +16 29 0 days 00:00:00.099120153 +16 30 0 days 00:00:00.120181140 +16 31 0 days 00:00:00.121190133 +16 32 0 days 00:00:00.120139493 +16 33 0 days 00:00:00.109522695 +16 34 0 days 00:00:00.118234686 +16 35 0 days 00:00:00.099261480 +16 36 0 days 00:00:00.184893020 +16 37 0 days 00:00:00.106455473 +16 38 0 days 00:00:00.120080180 +16 39 0 days 00:00:00.132471960 +16 40 0 days 00:00:00.100141806 +16 41 0 days 00:00:00.187176820 +16 42 0 days 00:00:00.121303893 +16 43 0 days 00:00:00.170863426 +16 44 0 days 00:00:00.196351610 +16 45 0 days 00:00:00.183632720 +16 46 0 days 00:00:00.126814600 +16 47 0 days 00:00:00.127479293 +16 48 0 days 00:00:00.125835940 +16 50 0 days 00:00:00.183838740 +16 51 0 days 00:00:00.202281950 +16 52 0 days 00:00:00.112814095 +16 53 0 days 00:00:00.168653833 +16 54 0 days 00:00:00.174422080 +16 55 0 days 00:00:00.107944260 +16 56 0 days 00:00:00.162103693 +16 58 0 days 00:00:00.120742673 +16 59 0 days 00:00:00.124535553 +16 60 0 days 00:00:00.215906742 +16 61 0 days 00:00:00.115676505 +16 62 0 days 00:00:00.098249720 +16 63 0 days 00:00:00.167741946 +16 64 0 days 00:00:00.170526686 +16 65 0 days 00:00:00.169995286 +16 66 0 days 00:00:00.106256213 +16 67 0 days 00:00:00.168225780 +16 68 0 days 00:00:00.131751880 +16 69 0 days 00:00:00.170802280 +16 70 0 days 00:00:00.102945446 +16 71 0 days 00:00:00.118540266 +16 72 0 days 00:00:00.104551986 +16 73 0 days 00:00:00.142681112 +16 74 0 days 00:00:00.152775516 +16 75 0 days 00:00:00.204027760 +16 77 0 days 00:00:00.117221873 +16 78 0 days 00:00:00.168863620 +16 79 0 days 00:00:00.118947706 +16 80 0 days 00:00:00.175193800 +16 81 0 days 00:00:00.108215766 +16 82 0 days 00:00:00.189343365 +16 83 0 days 00:00:00.103033653 +16 84 0 days 00:00:00.105838460 +16 85 0 days 00:00:00.118732673 +16 86 0 days 00:00:00.095741746 +16 87 0 days 00:00:00.122194176 +16 88 0 days 00:00:00.122420580 +16 89 0 days 00:00:00.165749540 +16 90 0 days 00:00:00.123422000 +16 91 0 days 00:00:00.098523260 +16 92 0 days 00:00:00.135485840 +16 93 0 days 00:00:00.212628884 +16 94 0 days 00:00:00.117244608 +16 96 0 days 00:00:00.117446226 +16 97 0 days 00:00:00.098596646 +16 98 0 days 00:00:00.106108525 +16 99 0 days 00:00:00.165857033 +16 100 0 days 00:00:00.135870412 +17 2 0 days 00:00:00.126485233 +17 3 0 days 00:00:00.110185206 +17 4 0 days 00:00:00.125576433 +17 5 0 days 00:00:00.128701693 +17 6 0 days 00:00:00.123361893 +17 7 0 days 00:00:00.123346353 +17 8 0 days 00:00:00.122449133 +17 9 0 days 00:00:00.148262704 +17 10 0 days 00:00:00.183262773 +17 11 0 days 00:00:00.129007773 +17 12 0 days 00:00:00.203829485 +17 13 0 days 00:00:00.107801926 +17 14 0 days 00:00:00.123920260 +17 15 0 days 00:00:00.102897140 +17 16 0 days 00:00:00.107877380 +17 18 0 days 00:00:00.103079280 +17 19 0 days 00:00:00.128706133 +17 20 0 days 00:00:00.169924866 +17 21 0 days 00:00:00.127676142 +17 22 0 days 00:00:00.126563260 +17 23 0 days 00:00:00.110097173 +17 24 0 days 00:00:00.104110406 +17 25 0 days 00:00:00.107808600 +17 26 0 days 00:00:00.102306620 +17 27 0 days 00:00:00.183181413 +17 28 0 days 00:00:00.130989306 +17 29 0 days 00:00:00.175572380 +17 30 0 days 00:00:00.127128093 +17 31 0 days 00:00:00.103230240 +17 32 0 days 00:00:00.148492636 +17 33 0 days 00:00:00.104956653 +17 34 0 days 00:00:00.127379600 +17 35 0 days 00:00:00.127962853 +17 37 0 days 00:00:00.189762990 +17 38 0 days 00:00:00.127394273 +17 39 0 days 00:00:00.107948253 +17 40 0 days 00:00:00.127663366 +17 41 0 days 00:00:00.106122913 +17 42 0 days 00:00:00.135300480 +17 44 0 days 00:00:00.158307117 +17 45 0 days 00:00:00.133548700 +17 47 0 days 00:00:00.174306880 +17 48 0 days 00:00:00.125275746 +17 49 0 days 00:00:00.169123533 +17 50 0 days 00:00:00.123119660 +17 51 0 days 00:00:00.129195633 +17 52 0 days 00:00:00.174552400 +17 53 0 days 00:00:00.175063713 +17 54 0 days 00:00:00.106257760 +17 55 0 days 00:00:00.104924740 +17 56 0 days 00:00:00.104762553 +17 57 0 days 00:00:00.124845453 +17 58 0 days 00:00:00.196449925 +17 59 0 days 00:00:00.106006393 +17 60 0 days 00:00:00.177069953 +17 61 0 days 00:00:00.195549115 +17 62 0 days 00:00:00.177769320 +17 63 0 days 00:00:00.171792833 +17 64 0 days 00:00:00.196175155 +17 65 0 days 00:00:00.129121146 +17 67 0 days 00:00:00.132759653 +17 68 0 days 00:00:00.179168633 +17 69 0 days 00:00:00.171179160 +17 70 0 days 00:00:00.172676353 +17 71 0 days 00:00:00.128971853 +17 72 0 days 00:00:00.126184553 +17 73 0 days 00:00:00.173308553 +17 74 0 days 00:00:00.134127980 +17 75 0 days 00:00:00.125320856 +17 76 0 days 00:00:00.177506280 +17 77 0 days 00:00:00.112618600 +17 78 0 days 00:00:00.132759873 +17 79 0 days 00:00:00.147047850 +17 80 0 days 00:00:00.144245045 +17 81 0 days 00:00:00.108282166 +17 82 0 days 00:00:00.199192130 +17 83 0 days 00:00:00.184600800 +17 84 0 days 00:00:00.180029766 +17 85 0 days 00:00:00.106770653 +17 86 0 days 00:00:00.179096380 +17 87 0 days 00:00:00.129417153 +17 88 0 days 00:00:00.150519896 +17 89 0 days 00:00:00.105155906 +17 91 0 days 00:00:00.127331280 +17 92 0 days 00:00:00.104851326 +17 93 0 days 00:00:00.150928528 +17 94 0 days 00:00:00.135608846 +17 95 0 days 00:00:00.107849386 +17 96 0 days 00:00:00.109948966 +17 97 0 days 00:00:00.168155493 +17 98 0 days 00:00:00.105190400 +17 99 0 days 00:00:00.129065213 +17 100 0 days 00:00:00.173923460 +18 1 0 days 00:00:00.103901470 +18 2 0 days 00:00:00.096725160 +18 3 0 days 00:00:00.098724446 +18 4 0 days 00:00:00.101529166 +18 5 0 days 00:00:00.109163605 +18 6 0 days 00:00:00.072046320 +18 7 0 days 00:00:00.095802060 +18 8 0 days 00:00:00.059336620 +18 9 0 days 00:00:00.069189170 +18 10 0 days 00:00:00.102484253 +18 11 0 days 00:00:00.072240986 +18 12 0 days 00:00:00.066361610 +18 13 0 days 00:00:00.103611860 +18 14 0 days 00:00:00.075173213 +18 15 0 days 00:00:00.059491400 +18 16 0 days 00:00:00.069231013 +18 17 0 days 00:00:00.082696453 +18 18 0 days 00:00:00.096719166 +18 19 0 days 00:00:00.058032026 +18 20 0 days 00:00:00.098551853 +18 21 0 days 00:00:00.107654405 +18 22 0 days 00:00:00.059925400 +18 23 0 days 00:00:00.058163626 +18 24 0 days 00:00:00.097011893 +18 25 0 days 00:00:00.122128988 +18 26 0 days 00:00:00.068458546 +18 27 0 days 00:00:00.068680653 +18 28 0 days 00:00:00.071841086 +18 29 0 days 00:00:00.062783420 +18 30 0 days 00:00:00.098430820 +18 31 0 days 00:00:00.077625415 +18 32 0 days 00:00:00.082080852 +18 33 0 days 00:00:00.059127213 +18 34 0 days 00:00:00.060271526 +18 35 0 days 00:00:00.072600133 +18 36 0 days 00:00:00.071600893 +18 37 0 days 00:00:00.058462773 +18 38 0 days 00:00:00.106224640 +18 39 0 days 00:00:00.061771193 +18 40 0 days 00:00:00.097092213 +18 41 0 days 00:00:00.062857580 +18 42 0 days 00:00:00.076202445 +18 43 0 days 00:00:00.105934370 +18 44 0 days 00:00:00.058290300 +18 45 0 days 00:00:00.059431766 +18 46 0 days 00:00:00.070227046 +18 47 0 days 00:00:00.059781260 +18 48 0 days 00:00:00.062074013 +18 49 0 days 00:00:00.058865253 +18 50 0 days 00:00:00.105344155 +18 51 0 days 00:00:00.063609880 +18 52 0 days 00:00:00.058890580 +18 53 0 days 00:00:00.106238860 +18 54 0 days 00:00:00.073014173 +18 55 0 days 00:00:00.065830480 +18 56 0 days 00:00:00.101518993 +18 57 0 days 00:00:00.061401360 +18 58 0 days 00:00:00.072128333 +18 59 0 days 00:00:00.102245966 +18 60 0 days 00:00:00.064418620 +18 61 0 days 00:00:00.063474906 +18 62 0 days 00:00:00.081314326 +18 63 0 days 00:00:00.080186672 +18 64 0 days 00:00:00.062962666 +18 65 0 days 00:00:00.068972953 +18 66 0 days 00:00:00.069977966 +18 67 0 days 00:00:00.082664108 +18 68 0 days 00:00:00.101923446 +18 69 0 days 00:00:00.060598826 +18 70 0 days 00:00:00.057988626 +18 72 0 days 00:00:00.095321913 +18 73 0 days 00:00:00.064210150 +18 74 0 days 00:00:00.063573000 +18 75 0 days 00:00:00.070765286 +18 76 0 days 00:00:00.068893540 +18 77 0 days 00:00:00.064163460 +18 78 0 days 00:00:00.059003566 +18 79 0 days 00:00:00.095910166 +18 80 0 days 00:00:00.097066093 +18 81 0 days 00:00:00.057665113 +18 82 0 days 00:00:00.096813680 +18 83 0 days 00:00:00.075233970 +18 84 0 days 00:00:00.062738520 +18 85 0 days 00:00:00.068243112 +18 86 0 days 00:00:00.094542966 +18 87 0 days 00:00:00.096724133 +18 88 0 days 00:00:00.104982870 +18 89 0 days 00:00:00.096059446 +18 90 0 days 00:00:00.096264066 +18 91 0 days 00:00:00.072989896 +18 92 0 days 00:00:00.067178700 +18 93 0 days 00:00:00.096134633 +18 94 0 days 00:00:00.059339580 +18 95 0 days 00:00:00.095849353 +18 96 0 days 00:00:00.075491560 +18 97 0 days 00:00:00.078152400 +18 98 0 days 00:00:00.070524573 +18 99 0 days 00:00:00.071533053 +18 100 0 days 00:00:00.058379046 +19 1 0 days 00:00:00.060063566 +19 2 0 days 00:00:00.068318310 +19 3 0 days 00:00:00.069060480 +19 4 0 days 00:00:00.071847846 +19 5 0 days 00:00:00.073040033 +19 6 0 days 00:00:00.105273113 +19 7 0 days 00:00:00.124635140 +19 8 0 days 00:00:00.103188693 +19 9 0 days 00:00:00.063699720 +19 10 0 days 00:00:00.074375800 +19 11 0 days 00:00:00.063269640 +19 12 0 days 00:00:00.070199680 +19 13 0 days 00:00:00.064080353 +19 14 0 days 00:00:00.060977186 +19 15 0 days 00:00:00.065393633 +19 16 0 days 00:00:00.105308120 +19 17 0 days 00:00:00.066915660 +19 19 0 days 00:00:00.080166660 +19 20 0 days 00:00:00.067467586 +19 21 0 days 00:00:00.065850866 +19 22 0 days 00:00:00.068659946 +19 23 0 days 00:00:00.091453253 +19 24 0 days 00:00:00.090819560 +19 25 0 days 00:00:00.089687346 +19 26 0 days 00:00:00.098303620 +19 27 0 days 00:00:00.068907973 +19 28 0 days 00:00:00.059984720 +19 29 0 days 00:00:00.102638815 +19 30 0 days 00:00:00.103394075 +19 31 0 days 00:00:00.089320666 +19 32 0 days 00:00:00.101679945 +19 33 0 days 00:00:00.069104930 +19 34 0 days 00:00:00.067099413 +19 35 0 days 00:00:00.105879096 +19 36 0 days 00:00:00.077510172 +19 37 0 days 00:00:00.066847706 +19 38 0 days 00:00:00.059916553 +19 39 0 days 00:00:00.101060970 +19 40 0 days 00:00:00.058433020 +19 41 0 days 00:00:00.091279633 +19 42 0 days 00:00:00.072645010 +19 43 0 days 00:00:00.091151913 +19 44 0 days 00:00:00.066214380 +19 45 0 days 00:00:00.060444380 +19 46 0 days 00:00:00.093788066 +19 47 0 days 00:00:00.104064824 +19 48 0 days 00:00:00.056676386 +19 49 0 days 00:00:00.056785900 +19 50 0 days 00:00:00.096054606 +19 51 0 days 00:00:00.056996813 +19 52 0 days 00:00:00.066267966 +19 53 0 days 00:00:00.060761093 +19 54 0 days 00:00:00.076752712 +19 55 0 days 00:00:00.089871860 +19 56 0 days 00:00:00.111951610 +19 57 0 days 00:00:00.059647346 +19 58 0 days 00:00:00.091388533 +19 59 0 days 00:00:00.077004632 +19 60 0 days 00:00:00.058143800 +19 61 0 days 00:00:00.065885510 +19 62 0 days 00:00:00.066751320 +19 63 0 days 00:00:00.102309175 +19 64 0 days 00:00:00.100538615 +19 65 0 days 00:00:00.106455044 +19 66 0 days 00:00:00.057669013 +19 67 0 days 00:00:00.094666626 +19 68 0 days 00:00:00.089415800 +19 69 0 days 00:00:00.091595380 +19 70 0 days 00:00:00.057210873 +19 71 0 days 00:00:00.105531736 +19 72 0 days 00:00:00.068718520 +19 73 0 days 00:00:00.091049513 +19 74 0 days 00:00:00.059422573 +19 75 0 days 00:00:00.066229760 +19 76 0 days 00:00:00.089582793 +19 77 0 days 00:00:00.066697286 +19 78 0 days 00:00:00.057813480 +19 79 0 days 00:00:00.099381820 +19 80 0 days 00:00:00.057566373 +19 81 0 days 00:00:00.060049793 +19 82 0 days 00:00:00.056713146 +19 83 0 days 00:00:00.066831473 +19 84 0 days 00:00:00.091987193 +19 85 0 days 00:00:00.092198340 +19 86 0 days 00:00:00.072679490 +19 87 0 days 00:00:00.057681733 +19 88 0 days 00:00:00.091632326 +19 89 0 days 00:00:00.057667800 +19 90 0 days 00:00:00.066331693 +19 91 0 days 00:00:00.060280266 +19 92 0 days 00:00:00.057744233 +19 93 0 days 00:00:00.066994453 +19 94 0 days 00:00:00.056129960 +19 95 0 days 00:00:00.067092406 +19 96 0 days 00:00:00.072536000 +19 97 0 days 00:00:00.068514606 +19 98 0 days 00:00:00.056630006 +19 99 0 days 00:00:00.100481145 +19 100 0 days 00:00:00.068148360 +20 1 0 days 00:00:00.685801646 +20 2 0 days 00:00:00.373266193 +20 3 0 days 00:00:00.275506113 +20 4 0 days 00:00:00.479741597 +20 5 0 days 00:00:01.308655275 +20 6 0 days 00:00:00.560152550 +20 7 0 days 00:00:00.384216986 +20 8 0 days 00:00:00.529562973 +20 9 0 days 00:00:00.565561950 +20 10 0 days 00:00:00.695766666 +20 11 0 days 00:00:01.302174620 +20 12 0 days 00:00:02.335419453 +20 13 0 days 00:00:00.558392900 +20 14 0 days 00:00:00.941435667 +20 15 0 days 00:00:01.506148689 +20 16 0 days 00:00:00.760062633 +20 17 0 days 00:00:01.506647084 +20 18 0 days 00:00:00.671194695 +20 19 0 days 00:00:01.539830547 +20 20 0 days 00:00:01.167046173 +20 21 0 days 00:00:01.187729225 +20 22 0 days 00:00:00.736627706 +20 23 0 days 00:00:00.517732913 +20 24 0 days 00:00:00.623379140 +20 25 0 days 00:00:00.520854026 +20 26 0 days 00:00:00.448819853 +20 27 0 days 00:00:00.947808134 +20 29 0 days 00:00:00.565702200 +20 30 0 days 00:00:00.442661853 +20 31 0 days 00:00:00.832378944 +20 32 0 days 00:00:00.484416766 +20 33 0 days 00:00:00.722937520 +20 34 0 days 00:00:00.586908540 +20 35 0 days 00:00:00.518067940 +20 36 0 days 00:00:01.190403540 +20 37 0 days 00:00:00.953274574 +20 38 0 days 00:00:00.966471833 +20 39 0 days 00:00:00.353545902 +20 40 0 days 00:00:01.157784513 +20 41 0 days 00:00:01.290962673 +20 42 0 days 00:00:01.758800297 +20 43 0 days 00:00:01.038499973 +20 44 0 days 00:00:00.273434006 +20 45 0 days 00:00:00.869984920 +20 46 0 days 00:00:01.298725433 +20 47 0 days 00:00:00.385395593 +20 48 0 days 00:00:00.316778706 +20 49 0 days 00:00:00.879529726 +20 50 0 days 00:00:01.078881300 +20 51 0 days 00:00:00.719204320 +20 52 0 days 00:00:00.692842713 +20 53 0 days 00:00:00.792226044 +20 54 0 days 00:00:00.571522866 +20 55 0 days 00:00:00.867309516 +20 57 0 days 00:00:01.629259918 +20 58 0 days 00:00:00.656330380 +20 59 0 days 00:00:00.875106313 +20 60 0 days 00:00:01.756434106 +20 62 0 days 00:00:00.539874266 +20 63 0 days 00:00:00.777871660 +20 64 0 days 00:00:00.707875860 +20 65 0 days 00:00:00.951313716 +20 69 0 days 00:00:01.051647240 +20 70 0 days 00:00:01.033621353 +20 71 0 days 00:00:00.352866700 +20 73 0 days 00:00:00.957423400 +20 74 0 days 00:00:00.771511592 +20 75 0 days 00:00:02.005062210 +20 77 0 days 00:00:01.459481103 +20 78 0 days 00:00:01.411327806 +20 79 0 days 00:00:00.548399686 +20 80 0 days 00:00:00.757134586 +20 81 0 days 00:00:01.550304563 +20 82 0 days 00:00:00.945877800 +20 83 0 days 00:00:01.617673788 +20 84 0 days 00:00:01.035305153 +20 85 0 days 00:00:01.534980324 +20 86 0 days 00:00:01.472243580 +20 87 0 days 00:00:00.987204845 +20 88 0 days 00:00:01.476752897 +20 89 0 days 00:00:01.547386746 +20 90 0 days 00:00:00.726645114 +20 91 0 days 00:00:00.815706200 +20 92 0 days 00:00:00.398364833 +20 94 0 days 00:00:00.949265280 +20 95 0 days 00:00:01.001911146 +20 96 0 days 00:00:00.784822010 +20 97 0 days 00:00:00.421687660 +20 98 0 days 00:00:01.029540046 +20 99 0 days 00:00:00.479128323 +20 100 0 days 00:00:01.520943713 +21 1 0 days 00:00:00.596259833 +21 2 0 days 00:00:00.593385976 +21 3 0 days 00:00:00.514660835 +21 4 0 days 00:00:00.442404805 +21 5 0 days 00:00:00.509920493 +21 7 0 days 00:00:01.207483006 +21 8 0 days 00:00:01.589027256 +21 9 0 days 00:00:00.948765700 +21 10 0 days 00:00:00.961372266 +21 11 0 days 00:00:01.214025744 +21 12 0 days 00:00:00.388163900 +21 13 0 days 00:00:00.465822526 +21 14 0 days 00:00:00.556076132 +21 15 0 days 00:00:01.059511873 +21 16 0 days 00:00:01.723381066 +21 17 0 days 00:00:01.153231033 +21 18 0 days 00:00:00.722071525 +21 19 0 days 00:00:01.385334882 +21 20 0 days 00:00:00.477725993 +21 21 0 days 00:00:00.291942613 +21 22 0 days 00:00:00.248951375 +21 23 0 days 00:00:00.413148033 +21 24 0 days 00:00:00.254258653 +21 25 0 days 00:00:00.216226206 +21 26 0 days 00:00:00.388729646 +21 28 0 days 00:00:00.553954713 +21 29 0 days 00:00:00.781332140 +21 30 0 days 00:00:00.824332353 +21 31 0 days 00:00:00.390627560 +21 32 0 days 00:00:00.447184693 +21 34 0 days 00:00:00.527363916 +21 35 0 days 00:00:00.593350740 +21 36 0 days 00:00:00.233363253 +21 38 0 days 00:00:00.542676400 +21 39 0 days 00:00:00.905961780 +21 40 0 days 00:00:00.593377940 +21 41 0 days 00:00:00.251558220 +21 42 0 days 00:00:00.401780526 +21 43 0 days 00:00:00.816293025 +21 44 0 days 00:00:01.247351415 +21 45 0 days 00:00:00.848998190 +21 46 0 days 00:00:00.462253275 +21 47 0 days 00:00:00.577870286 +21 48 0 days 00:00:00.822580306 +21 49 0 days 00:00:00.331093913 +21 51 0 days 00:00:00.629008960 +21 52 0 days 00:00:01.008263790 +21 54 0 days 00:00:01.146148093 +21 55 0 days 00:00:00.333419286 +21 56 0 days 00:00:01.007768273 +21 57 0 days 00:00:00.421319175 +21 58 0 days 00:00:00.448176773 +21 59 0 days 00:00:00.234598486 +21 60 0 days 00:00:00.439741400 +21 61 0 days 00:00:00.310004053 +21 62 0 days 00:00:00.518455560 +21 63 0 days 00:00:00.376762253 +21 64 0 days 00:00:00.276146115 +21 65 0 days 00:00:00.796084800 +21 66 0 days 00:00:00.309732246 +21 67 0 days 00:00:00.819033206 +21 68 0 days 00:00:00.531604530 +21 69 0 days 00:00:00.416107960 +21 70 0 days 00:00:01.084666233 +21 71 0 days 00:00:01.071762786 +21 72 0 days 00:00:00.298700780 +21 73 0 days 00:00:00.434523466 +21 74 0 days 00:00:00.323425556 +21 75 0 days 00:00:00.976590745 +21 77 0 days 00:00:00.815249940 +21 78 0 days 00:00:00.845081380 +21 79 0 days 00:00:00.765543226 +21 80 0 days 00:00:00.742390253 +21 81 0 days 00:00:00.359277035 +21 82 0 days 00:00:00.730400220 +21 84 0 days 00:00:00.538766443 +21 85 0 days 00:00:01.001884648 +21 86 0 days 00:00:00.325883040 +21 87 0 days 00:00:00.729826013 +21 88 0 days 00:00:00.425593406 +21 89 0 days 00:00:00.282493266 +21 91 0 days 00:00:00.992427460 +21 92 0 days 00:00:00.423255400 +21 93 0 days 00:00:01.002846517 +21 94 0 days 00:00:00.409620160 +21 95 0 days 00:00:00.815710740 +21 96 0 days 00:00:00.312822153 +21 97 0 days 00:00:00.445934760 +21 98 0 days 00:00:00.748475780 +21 99 0 days 00:00:00.704254773 +21 100 0 days 00:00:01.018716753 +22 1 0 days 00:00:00.405989800 +22 2 0 days 00:00:00.357181713 +22 3 0 days 00:00:00.353477180 +22 4 0 days 00:00:00.191689773 +22 7 0 days 00:00:00.231384553 +22 9 0 days 00:00:00.304483273 +22 10 0 days 00:00:00.360413140 +22 11 0 days 00:00:00.155384828 +22 12 0 days 00:00:00.137483890 +22 13 0 days 00:00:00.249786673 +22 14 0 days 00:00:00.196228440 +22 15 0 days 00:00:00.486570851 +22 16 0 days 00:00:00.347158960 +22 17 0 days 00:00:00.218891133 +22 18 0 days 00:00:00.225956573 +22 19 0 days 00:00:00.128557270 +22 20 0 days 00:00:00.190787240 +22 21 0 days 00:00:00.203246273 +22 22 0 days 00:00:00.181334840 +22 23 0 days 00:00:00.239213333 +22 24 0 days 00:00:00.289346353 +22 25 0 days 00:00:00.473285302 +22 26 0 days 00:00:00.163080873 +22 27 0 days 00:00:00.240881806 +22 28 0 days 00:00:00.488788035 +22 29 0 days 00:00:00.244908675 +22 30 0 days 00:00:00.123837365 +22 32 0 days 00:00:00.293151213 +22 34 0 days 00:00:00.354056280 +22 35 0 days 00:00:00.440376905 +22 36 0 days 00:00:00.382449406 +22 37 0 days 00:00:00.365444366 +22 38 0 days 00:00:00.262031333 +22 39 0 days 00:00:00.228970073 +22 40 0 days 00:00:00.283581706 +22 41 0 days 00:00:00.351875293 +22 42 0 days 00:00:00.267479426 +22 43 0 days 00:00:00.149330090 +22 44 0 days 00:00:00.479594343 +22 45 0 days 00:00:00.182483588 +22 46 0 days 00:00:00.293280433 +22 47 0 days 00:00:00.209924186 +22 48 0 days 00:00:00.478443067 +22 49 0 days 00:00:00.113880380 +22 50 0 days 00:00:00.205474940 +22 53 0 days 00:00:00.111156253 +22 54 0 days 00:00:00.374528766 +22 55 0 days 00:00:00.131103385 +22 56 0 days 00:00:00.372320820 +22 58 0 days 00:00:00.298870986 +22 59 0 days 00:00:00.219511466 +22 60 0 days 00:00:00.531242153 +22 61 0 days 00:00:00.212934435 +22 62 0 days 00:00:00.131039146 +22 63 0 days 00:00:00.468845606 +22 64 0 days 00:00:00.133616580 +22 65 0 days 00:00:00.437495640 +22 66 0 days 00:00:00.159987192 +22 67 0 days 00:00:00.373277280 +22 68 0 days 00:00:00.419744626 +22 70 0 days 00:00:00.190247426 +22 71 0 days 00:00:00.256956106 +22 72 0 days 00:00:00.259904408 +22 73 0 days 00:00:00.208348973 +22 74 0 days 00:00:00.316687184 +22 75 0 days 00:00:00.209836493 +22 76 0 days 00:00:00.215585940 +22 77 0 days 00:00:00.264166350 +22 78 0 days 00:00:00.485450365 +22 79 0 days 00:00:00.110475980 +22 80 0 days 00:00:00.245079925 +22 81 0 days 00:00:00.373721720 +22 82 0 days 00:00:00.254693210 +22 83 0 days 00:00:00.216157865 +22 84 0 days 00:00:00.365767513 +22 85 0 days 00:00:00.194707746 +22 86 0 days 00:00:00.134174406 +22 87 0 days 00:00:00.302638906 +22 88 0 days 00:00:00.191231800 +22 89 0 days 00:00:00.222772275 +22 90 0 days 00:00:00.187782456 +22 91 0 days 00:00:00.195937273 +22 92 0 days 00:00:00.365466313 +22 93 0 days 00:00:00.219919720 +22 94 0 days 00:00:00.254904320 +22 95 0 days 00:00:00.243345406 +22 98 0 days 00:00:00.148508633 +22 99 0 days 00:00:00.352745720 +22 100 0 days 00:00:00.319312220 +23 1 0 days 00:00:00.156034526 +23 2 0 days 00:00:00.218360146 +23 3 0 days 00:00:00.369791960 +23 4 0 days 00:00:00.209936046 +23 5 0 days 00:00:00.281961486 +23 6 0 days 00:00:00.504094751 +23 8 0 days 00:00:00.371974973 +23 9 0 days 00:00:00.138445166 +23 10 0 days 00:00:00.301582160 +23 11 0 days 00:00:00.168154316 +23 12 0 days 00:00:00.272797253 +23 13 0 days 00:00:00.503969646 +23 14 0 days 00:00:00.532673160 +23 15 0 days 00:00:00.253060650 +23 17 0 days 00:00:00.397026900 +23 18 0 days 00:00:00.496553409 +23 19 0 days 00:00:00.199053813 +23 20 0 days 00:00:00.364060720 +23 22 0 days 00:00:00.161144913 +23 23 0 days 00:00:00.267382858 +23 24 0 days 00:00:00.500180988 +23 25 0 days 00:00:00.280285420 +23 26 0 days 00:00:00.264186760 +23 27 0 days 00:00:00.264958806 +23 28 0 days 00:00:00.367113033 +23 30 0 days 00:00:00.199517086 +23 31 0 days 00:00:00.183847350 +23 32 0 days 00:00:00.258644426 +23 33 0 days 00:00:00.211950146 +23 34 0 days 00:00:00.188299500 +23 35 0 days 00:00:00.448031413 +23 36 0 days 00:00:00.190700920 +23 38 0 days 00:00:00.266344140 +23 39 0 days 00:00:00.278696466 +23 40 0 days 00:00:00.483977434 +23 41 0 days 00:00:00.213939573 +23 42 0 days 00:00:00.368276900 +23 43 0 days 00:00:00.366666140 +23 44 0 days 00:00:00.200619600 +23 45 0 days 00:00:00.200255493 +23 46 0 days 00:00:00.410248980 +23 47 0 days 00:00:00.285022420 +23 48 0 days 00:00:00.117249386 +23 49 0 days 00:00:00.336621420 +23 50 0 days 00:00:00.509505703 +23 51 0 days 00:00:00.497877364 +23 52 0 days 00:00:00.367151613 +23 53 0 days 00:00:00.365965846 +23 54 0 days 00:00:00.231624333 +23 55 0 days 00:00:00.201292673 +23 56 0 days 00:00:00.200586440 +23 57 0 days 00:00:00.389473046 +23 58 0 days 00:00:00.507575245 +23 59 0 days 00:00:00.135416400 +23 60 0 days 00:00:00.217864786 +23 61 0 days 00:00:00.234161753 +23 62 0 days 00:00:00.208956580 +23 63 0 days 00:00:00.403572693 +23 64 0 days 00:00:00.427022973 +23 65 0 days 00:00:00.260762450 +23 66 0 days 00:00:00.236043253 +23 67 0 days 00:00:00.369362206 +23 68 0 days 00:00:00.367352660 +23 69 0 days 00:00:00.345331206 +23 70 0 days 00:00:00.492338281 +23 71 0 days 00:00:00.292713506 +23 72 0 days 00:00:00.134134873 +23 73 0 days 00:00:00.347089505 +23 74 0 days 00:00:00.175776813 +23 75 0 days 00:00:00.367383753 +23 76 0 days 00:00:00.216403446 +23 77 0 days 00:00:00.459272190 +23 78 0 days 00:00:00.402875126 +23 79 0 days 00:00:00.502820585 +23 80 0 days 00:00:00.470351140 +23 81 0 days 00:00:00.275420744 +23 82 0 days 00:00:00.575132340 +23 83 0 days 00:00:00.369032326 +23 84 0 days 00:00:00.367647720 +23 85 0 days 00:00:00.149193113 +23 86 0 days 00:00:00.299601266 +23 87 0 days 00:00:00.367523520 +23 88 0 days 00:00:00.220003426 +23 89 0 days 00:00:00.177021333 +23 90 0 days 00:00:00.224134340 +23 91 0 days 00:00:00.219847966 +23 92 0 days 00:00:00.221910960 +23 94 0 days 00:00:00.457094120 +23 95 0 days 00:00:00.161281066 +23 96 0 days 00:00:00.390549360 +23 97 0 days 00:00:00.350958395 +23 98 0 days 00:00:00.237132073 +23 99 0 days 00:00:00.137104826 +23 100 0 days 00:00:00.394503993 +24 1 0 days 00:00:01.230640940 +24 2 0 days 00:00:01.740329772 +24 3 0 days 00:00:00.790847076 +24 4 0 days 00:00:01.366950455 +24 5 0 days 00:00:01.041264615 +24 6 0 days 00:00:01.331007810 +24 7 0 days 00:00:00.585105184 +24 8 0 days 00:00:01.036274410 +24 9 0 days 00:00:00.475388775 +24 10 0 days 00:00:01.188353275 +24 11 0 days 00:00:01.490596032 +24 12 0 days 00:00:00.443364264 +24 13 0 days 00:00:00.675912683 +24 14 0 days 00:00:00.911312634 +24 15 0 days 00:00:01.164397940 +24 16 0 days 00:00:00.355623860 +24 17 0 days 00:00:00.782269983 +24 18 0 days 00:00:00.751557840 +24 19 0 days 00:00:00.492671098 +24 20 0 days 00:00:00.920019831 +24 21 0 days 00:00:00.465800094 +24 22 0 days 00:00:00.997297275 +24 23 0 days 00:00:00.849851593 +24 24 0 days 00:00:00.825152566 +24 25 0 days 00:00:01.294963496 +24 26 0 days 00:00:00.623459868 +24 27 0 days 00:00:00.485293754 +24 28 0 days 00:00:00.641915702 +24 29 0 days 00:00:01.093564500 +24 30 0 days 00:00:00.382784115 +24 31 0 days 00:00:00.801068442 +24 32 0 days 00:00:00.442660982 +24 33 0 days 00:00:00.748614731 +24 34 0 days 00:00:00.946805300 +24 35 0 days 00:00:01.652233796 +24 36 0 days 00:00:01.239695600 +24 37 0 days 00:00:00.987393580 +24 38 0 days 00:00:01.096521130 +24 39 0 days 00:00:00.465815962 +24 40 0 days 00:00:01.099951070 +24 41 0 days 00:00:01.379095382 +24 42 0 days 00:00:00.991864077 +24 43 0 days 00:00:01.471539920 +24 44 0 days 00:00:01.122637720 +24 45 0 days 00:00:00.481503045 +24 46 0 days 00:00:00.732544980 +24 47 0 days 00:00:01.323383025 +24 48 0 days 00:00:00.577625434 +24 49 0 days 00:00:01.625722408 +24 50 0 days 00:00:00.492112861 +24 51 0 days 00:00:00.579972242 +24 52 0 days 00:00:01.135805672 +24 53 0 days 00:00:00.623213586 +24 54 0 days 00:00:00.741709446 +24 55 0 days 00:00:00.665459005 +24 56 0 days 00:00:01.227507327 +24 57 0 days 00:00:00.347316214 +24 58 0 days 00:00:00.595178132 +24 59 0 days 00:00:00.822746916 +24 60 0 days 00:00:00.771756715 +24 61 0 days 00:00:00.440971897 +24 62 0 days 00:00:01.069119593 +24 63 0 days 00:00:00.893995309 +24 64 0 days 00:00:00.853654405 +24 65 0 days 00:00:00.536973968 +24 66 0 days 00:00:01.576757225 +24 67 0 days 00:00:00.522223407 +24 68 0 days 00:00:01.607220432 +24 69 0 days 00:00:00.910408812 +24 70 0 days 00:00:00.516869114 +24 71 0 days 00:00:00.432806402 +24 72 0 days 00:00:00.514506934 +24 73 0 days 00:00:00.717237166 +24 74 0 days 00:00:00.440984585 +24 75 0 days 00:00:01.475156295 +24 76 0 days 00:00:01.158711935 +24 77 0 days 00:00:01.778267532 +24 78 0 days 00:00:00.896819253 +24 79 0 days 00:00:01.052823435 +24 80 0 days 00:00:00.818121946 +24 81 0 days 00:00:00.658955567 +24 82 0 days 00:00:01.097902090 +24 83 0 days 00:00:00.781788660 +24 84 0 days 00:00:00.446343098 +24 85 0 days 00:00:00.798880472 +24 86 0 days 00:00:01.673081388 +24 87 0 days 00:00:00.892399830 +24 88 0 days 00:00:00.452055826 +24 89 0 days 00:00:01.412689776 +24 90 0 days 00:00:00.566195065 +24 91 0 days 00:00:00.456908878 +24 92 0 days 00:00:01.567074777 +24 93 0 days 00:00:00.438620834 +24 94 0 days 00:00:00.770548958 +24 95 0 days 00:00:00.470304500 +24 96 0 days 00:00:01.097203492 +24 97 0 days 00:00:01.878331684 +24 98 0 days 00:00:01.351485675 +24 99 0 days 00:00:00.603322464 +24 100 0 days 00:00:01.237712415 +25 1 0 days 00:00:00.368818749 +25 2 0 days 00:00:00.599577904 +25 3 0 days 00:00:00.541903408 +25 4 0 days 00:00:00.945766344 +25 5 0 days 00:00:00.395598982 +25 6 0 days 00:00:00.208944641 +25 7 0 days 00:00:00.338583330 +25 8 0 days 00:00:00.186525800 +25 9 0 days 00:00:00.443308134 +25 10 0 days 00:00:00.484165910 +25 11 0 days 00:00:00.361548183 +25 12 0 days 00:00:00.618465944 +25 13 0 days 00:00:00.418810164 +25 14 0 days 00:00:00.369396827 +25 15 0 days 00:00:00.563804730 +25 16 0 days 00:00:00.251046745 +25 17 0 days 00:00:00.545448716 +25 18 0 days 00:00:00.575854052 +25 19 0 days 00:00:00.234937358 +25 20 0 days 00:00:00.191459171 +25 21 0 days 00:00:00.259053058 +25 22 0 days 00:00:00.614359065 +25 23 0 days 00:00:00.344449631 +25 24 0 days 00:00:01.001916876 +25 25 0 days 00:00:00.595395176 +25 26 0 days 00:00:00.203410923 +25 27 0 days 00:00:00.339657568 +25 28 0 days 00:00:00.255415889 +25 29 0 days 00:00:00.210832292 +25 30 0 days 00:00:00.269424964 +25 31 0 days 00:00:00.368702390 +25 32 0 days 00:00:00.335721066 +25 33 0 days 00:00:00.313354680 +25 34 0 days 00:00:00.427097898 +25 35 0 days 00:00:00.697444364 +25 36 0 days 00:00:00.291015678 +25 37 0 days 00:00:00.616723876 +25 38 0 days 00:00:00.250904033 +25 39 0 days 00:00:00.469282400 +25 40 0 days 00:00:00.270523625 +25 41 0 days 00:00:00.676915445 +25 42 0 days 00:00:00.815289508 +25 43 0 days 00:00:00.312935850 +25 44 0 days 00:00:00.463661342 +25 45 0 days 00:00:00.442557050 +25 46 0 days 00:00:00.353102256 +25 47 0 days 00:00:00.491758277 +25 48 0 days 00:00:00.506312632 +25 49 0 days 00:00:00.310619303 +25 50 0 days 00:00:00.631786570 +25 51 0 days 00:00:00.581453108 +25 52 0 days 00:00:00.404748242 +25 53 0 days 00:00:00.335092638 +25 54 0 days 00:00:00.432085140 +25 55 0 days 00:00:00.342739564 +25 56 0 days 00:00:00.140393717 +25 57 0 days 00:00:00.340349968 +25 58 0 days 00:00:00.868807866 +25 59 0 days 00:00:00.664240788 +25 60 0 days 00:00:00.466009780 +25 61 0 days 00:00:00.177717074 +25 62 0 days 00:00:00.147711105 +25 63 0 days 00:00:00.534708193 +25 64 0 days 00:00:00.404885594 +25 65 0 days 00:00:00.485046908 +25 66 0 days 00:00:00.639177848 +25 67 0 days 00:00:00.181623246 +25 68 0 days 00:00:00.416904908 +25 69 0 days 00:00:00.643747780 +25 70 0 days 00:00:00.865478608 +25 71 0 days 00:00:00.259948535 +25 72 0 days 00:00:00.405961717 +25 73 0 days 00:00:00.615396468 +25 74 0 days 00:00:00.713128116 +25 75 0 days 00:00:00.349690940 +25 76 0 days 00:00:00.358997796 +25 77 0 days 00:00:00.254493000 +25 78 0 days 00:00:00.892619543 +25 79 0 days 00:00:00.378900468 +25 80 0 days 00:00:00.190654691 +25 81 0 days 00:00:00.605677880 +25 82 0 days 00:00:00.726772744 +25 83 0 days 00:00:00.192815594 +25 84 0 days 00:00:00.927583763 +25 85 0 days 00:00:00.491307400 +25 86 0 days 00:00:00.444996252 +25 87 0 days 00:00:00.223118971 +25 88 0 days 00:00:00.758585162 +25 89 0 days 00:00:00.425127240 +25 90 0 days 00:00:00.293295713 +25 91 0 days 00:00:00.584041424 +25 92 0 days 00:00:00.279225950 +25 93 0 days 00:00:00.427901633 +25 94 0 days 00:00:00.627991368 +25 95 0 days 00:00:00.373891216 +25 96 0 days 00:00:00.504125260 +25 97 0 days 00:00:00.211589757 +25 98 0 days 00:00:00.201611064 +25 99 0 days 00:00:00.600241944 +25 100 0 days 00:00:00.586218315 +26 1 0 days 00:00:00.706522813 +26 2 0 days 00:00:00.992206620 +26 3 0 days 00:00:00.989430062 +26 4 0 days 00:00:01.006503152 +26 5 0 days 00:00:00.315231260 +26 6 0 days 00:00:01.166140955 +26 7 0 days 00:00:00.605960520 +26 8 0 days 00:00:00.332520473 +26 9 0 days 00:00:00.792194880 +26 10 0 days 00:00:00.976193465 +26 11 0 days 00:00:01.363662675 +26 12 0 days 00:00:00.426178313 +26 13 0 days 00:00:00.588140293 +26 14 0 days 00:00:00.341959900 +26 15 0 days 00:00:00.693251074 +26 16 0 days 00:00:00.548974214 +26 17 0 days 00:00:00.562692877 +26 18 0 days 00:00:01.108498355 +26 19 0 days 00:00:01.133226595 +26 20 0 days 00:00:00.511240088 +26 21 0 days 00:00:01.033397130 +26 22 0 days 00:00:00.377054507 +26 23 0 days 00:00:00.783918729 +26 24 0 days 00:00:00.336640804 +26 25 0 days 00:00:01.067393348 +26 26 0 days 00:00:01.055562257 +26 27 0 days 00:00:00.293340333 +26 28 0 days 00:00:00.385530112 +26 29 0 days 00:00:00.639767453 +26 30 0 days 00:00:01.177654910 +26 31 0 days 00:00:00.610277341 +26 32 0 days 00:00:00.419439637 +26 33 0 days 00:00:00.551558472 +26 34 0 days 00:00:00.679728348 +26 35 0 days 00:00:00.276114893 +26 36 0 days 00:00:00.568572602 +26 37 0 days 00:00:00.569789222 +26 38 0 days 00:00:00.634671662 +26 39 0 days 00:00:00.503033085 +26 40 0 days 00:00:00.700334393 +26 41 0 days 00:00:00.683711932 +26 42 0 days 00:00:00.691247666 +26 43 0 days 00:00:00.440346912 +26 44 0 days 00:00:01.146122325 +26 45 0 days 00:00:00.547475198 +26 46 0 days 00:00:00.317082920 +26 47 0 days 00:00:00.981547017 +26 48 0 days 00:00:01.000732147 +26 49 0 days 00:00:01.051630370 +26 50 0 days 00:00:00.344174180 +26 51 0 days 00:00:00.546254282 +26 53 0 days 00:00:00.246230820 +26 54 0 days 00:00:00.738726893 +26 55 0 days 00:00:01.008430685 +26 56 0 days 00:00:00.560110470 +26 57 0 days 00:00:00.311431913 +26 58 0 days 00:00:00.754811329 +26 59 0 days 00:00:01.015197753 +26 60 0 days 00:00:00.440056140 +26 61 0 days 00:00:00.554740634 +26 62 0 days 00:00:00.553859488 +26 63 0 days 00:00:01.069931652 +26 64 0 days 00:00:00.262816960 +26 65 0 days 00:00:00.988780020 +26 66 0 days 00:00:00.982184991 +26 67 0 days 00:00:00.423166861 +26 68 0 days 00:00:00.938859630 +26 69 0 days 00:00:00.976406165 +26 70 0 days 00:00:00.615367623 +26 71 0 days 00:00:01.020972605 +26 72 0 days 00:00:00.378816882 +26 73 0 days 00:00:00.970322774 +26 74 0 days 00:00:00.559081430 +26 75 0 days 00:00:00.398508470 +26 76 0 days 00:00:00.773935703 +26 77 0 days 00:00:00.982539700 +26 78 0 days 00:00:00.961655266 +26 80 0 days 00:00:01.365818708 +26 81 0 days 00:00:00.972330242 +26 82 0 days 00:00:00.340270415 +26 84 0 days 00:00:00.386697533 +26 85 0 days 00:00:00.592173080 +26 86 0 days 00:00:00.348744726 +26 87 0 days 00:00:00.972838148 +26 88 0 days 00:00:00.724826238 +26 89 0 days 00:00:00.345477136 +26 90 0 days 00:00:00.612810483 +26 91 0 days 00:00:00.988405087 +26 92 0 days 00:00:00.636140330 +26 93 0 days 00:00:00.545802772 +26 94 0 days 00:00:00.255013606 +26 95 0 days 00:00:00.814491030 +26 96 0 days 00:00:01.221482202 +26 97 0 days 00:00:01.078600322 +26 98 0 days 00:00:00.709690925 +26 99 0 days 00:00:00.320303513 +26 100 0 days 00:00:00.915662271 +27 1 0 days 00:00:00.290386020 +27 3 0 days 00:00:00.126007993 +27 4 0 days 00:00:00.161943740 +27 5 0 days 00:00:00.514590078 +27 6 0 days 00:00:00.527764018 +27 7 0 days 00:00:00.599465208 +27 8 0 days 00:00:00.584330182 +27 9 0 days 00:00:00.712141198 +27 10 0 days 00:00:00.522813865 +27 11 0 days 00:00:00.240914126 +27 12 0 days 00:00:00.535330944 +27 13 0 days 00:00:00.556008003 +27 14 0 days 00:00:00.595585826 +27 15 0 days 00:00:00.307807265 +27 16 0 days 00:00:00.499664212 +27 17 0 days 00:00:00.194901286 +27 18 0 days 00:00:00.367857326 +27 19 0 days 00:00:00.504308612 +27 20 0 days 00:00:00.186479010 +27 21 0 days 00:00:00.499440940 +27 22 0 days 00:00:00.771591010 +27 23 0 days 00:00:00.288831217 +27 24 0 days 00:00:00.133880020 +27 25 0 days 00:00:00.786314754 +27 26 0 days 00:00:00.505451897 +27 27 0 days 00:00:00.486670936 +27 28 0 days 00:00:00.581424820 +27 29 0 days 00:00:00.156352620 +27 30 0 days 00:00:00.490714300 +27 31 0 days 00:00:00.136517533 +27 32 0 days 00:00:00.367666113 +27 33 0 days 00:00:00.382107060 +27 34 0 days 00:00:00.109495526 +27 35 0 days 00:00:00.331140641 +27 36 0 days 00:00:00.289111860 +27 37 0 days 00:00:00.158807140 +27 39 0 days 00:00:00.714616281 +27 40 0 days 00:00:00.447726920 +27 41 0 days 00:00:00.523550362 +27 42 0 days 00:00:00.124572406 +27 43 0 days 00:00:00.188978486 +27 44 0 days 00:00:00.297190223 +27 45 0 days 00:00:00.163520186 +27 46 0 days 00:00:00.314946258 +27 47 0 days 00:00:00.250626853 +27 48 0 days 00:00:00.144321020 +27 49 0 days 00:00:00.515680566 +27 50 0 days 00:00:00.459935434 +27 51 0 days 00:00:00.294238556 +27 52 0 days 00:00:00.333611228 +27 53 0 days 00:00:00.293194174 +27 54 0 days 00:00:00.802992004 +27 55 0 days 00:00:00.299676710 +27 56 0 days 00:00:00.501945094 +27 57 0 days 00:00:00.357653287 +27 58 0 days 00:00:00.235627020 +27 59 0 days 00:00:00.221028013 +27 60 0 days 00:00:00.299502118 +27 61 0 days 00:00:00.249639933 +27 62 0 days 00:00:00.579340263 +27 63 0 days 00:00:00.124941806 +27 64 0 days 00:00:00.525439760 +27 65 0 days 00:00:00.206504326 +27 66 0 days 00:00:00.520252513 +27 67 0 days 00:00:00.137964106 +27 68 0 days 00:00:00.337354565 +27 69 0 days 00:00:00.419146766 +27 70 0 days 00:00:00.499279362 +27 71 0 days 00:00:00.324441884 +27 72 0 days 00:00:00.226507640 +27 73 0 days 00:00:00.406438145 +27 74 0 days 00:00:00.501379997 +27 75 0 days 00:00:00.283983090 +27 76 0 days 00:00:00.212367246 +27 77 0 days 00:00:00.653110616 +27 78 0 days 00:00:00.192486438 +27 79 0 days 00:00:00.136431426 +27 80 0 days 00:00:00.510730535 +27 81 0 days 00:00:00.295196840 +27 82 0 days 00:00:00.165989760 +27 83 0 days 00:00:00.417882822 +27 84 0 days 00:00:00.185724880 +27 85 0 days 00:00:00.705937200 +27 86 0 days 00:00:00.124690693 +27 87 0 days 00:00:00.395147822 +27 88 0 days 00:00:00.690558627 +27 89 0 days 00:00:00.210650013 +27 90 0 days 00:00:00.380389226 +27 91 0 days 00:00:00.498385460 +27 92 0 days 00:00:00.553542723 +27 93 0 days 00:00:00.199584993 +27 94 0 days 00:00:00.497820690 +27 95 0 days 00:00:00.283908390 +27 96 0 days 00:00:00.546125525 +27 97 0 days 00:00:00.604060614 +27 98 0 days 00:00:00.498487454 +27 99 0 days 00:00:00.762891080 +27 100 0 days 00:00:00.586818814 +28 1 0 days 00:00:00.211189905 +28 3 0 days 00:00:00.109199785 +28 4 0 days 00:00:00.134529905 +28 5 0 days 00:00:00.135858660 +28 6 0 days 00:00:00.129975157 +28 7 0 days 00:00:00.121595590 +28 8 0 days 00:00:00.111674795 +28 9 0 days 00:00:00.154086137 +28 10 0 days 00:00:00.181084495 +28 11 0 days 00:00:00.170895800 +28 12 0 days 00:00:00.179719665 +28 13 0 days 00:00:00.114727360 +28 14 0 days 00:00:00.176455465 +28 15 0 days 00:00:00.174217620 +28 16 0 days 00:00:00.108565135 +28 17 0 days 00:00:00.125823435 +28 18 0 days 00:00:00.122671000 +28 19 0 days 00:00:00.184456465 +28 20 0 days 00:00:00.110588240 +28 21 0 days 00:00:00.136308692 +28 22 0 days 00:00:00.193292756 +28 23 0 days 00:00:00.141090250 +28 24 0 days 00:00:00.132831425 +28 25 0 days 00:00:00.199691506 +28 26 0 days 00:00:00.200325516 +28 27 0 days 00:00:00.123839984 +28 28 0 days 00:00:00.173929025 +28 29 0 days 00:00:00.139517125 +28 30 0 days 00:00:00.117183040 +28 31 0 days 00:00:00.107217865 +28 32 0 days 00:00:00.133683260 +28 33 0 days 00:00:00.174163800 +28 34 0 days 00:00:00.216664790 +28 35 0 days 00:00:00.149537612 +28 36 0 days 00:00:00.181528605 +28 37 0 days 00:00:00.123379380 +28 38 0 days 00:00:00.133894625 +28 39 0 days 00:00:00.147235723 +28 40 0 days 00:00:00.180683515 +28 41 0 days 00:00:00.134306372 +28 42 0 days 00:00:00.129380245 +28 43 0 days 00:00:00.120968933 +28 44 0 days 00:00:00.134941980 +28 45 0 days 00:00:00.137592000 +28 46 0 days 00:00:00.128358230 +28 47 0 days 00:00:00.181082670 +28 48 0 days 00:00:00.115629965 +28 49 0 days 00:00:00.137288720 +28 50 0 days 00:00:00.111887290 +28 51 0 days 00:00:00.177155030 +28 52 0 days 00:00:00.204022980 +28 53 0 days 00:00:00.139481984 +28 54 0 days 00:00:00.134214720 +28 55 0 days 00:00:00.108057870 +28 56 0 days 00:00:00.176124300 +28 57 0 days 00:00:00.175976695 +28 58 0 days 00:00:00.201620131 +28 59 0 days 00:00:00.113965905 +28 60 0 days 00:00:00.108575820 +28 61 0 days 00:00:00.107051505 +28 62 0 days 00:00:00.108867005 +28 63 0 days 00:00:00.183406670 +28 64 0 days 00:00:00.186474356 +28 65 0 days 00:00:00.191011176 +28 66 0 days 00:00:00.144705955 +28 67 0 days 00:00:00.132353305 +28 68 0 days 00:00:00.142406624 +28 69 0 days 00:00:00.203560380 +28 70 0 days 00:00:00.178053495 +28 71 0 days 00:00:00.153096225 +28 72 0 days 00:00:00.119349345 +28 73 0 days 00:00:00.177320915 +28 74 0 days 00:00:00.134718880 +28 75 0 days 00:00:00.118096995 +28 76 0 days 00:00:00.123937353 +28 77 0 days 00:00:00.179822800 +28 78 0 days 00:00:00.133261240 +28 79 0 days 00:00:00.140024455 +28 80 0 days 00:00:00.138765012 +28 81 0 days 00:00:00.132359165 +28 82 0 days 00:00:00.192740825 +28 83 0 days 00:00:00.178791560 +28 84 0 days 00:00:00.117108980 +28 85 0 days 00:00:00.155218880 +28 86 0 days 00:00:00.141288785 +28 87 0 days 00:00:00.133030620 +28 88 0 days 00:00:00.118050330 +28 89 0 days 00:00:00.181741050 +28 90 0 days 00:00:00.142925793 +28 91 0 days 00:00:00.131785530 +28 92 0 days 00:00:00.108366270 +28 93 0 days 00:00:00.194223400 +28 94 0 days 00:00:00.151050502 +28 95 0 days 00:00:00.125556120 +28 96 0 days 00:00:00.139717140 +28 97 0 days 00:00:00.132705755 +28 98 0 days 00:00:00.117453630 +28 99 0 days 00:00:00.190714465 +28 100 0 days 00:00:00.110517620 +29 1 0 days 00:00:00.119895776 +29 2 0 days 00:00:00.114650125 +29 3 0 days 00:00:00.142631105 +29 4 0 days 00:00:00.142125475 +29 5 0 days 00:00:00.143019958 +29 6 0 days 00:00:00.188995660 +29 7 0 days 00:00:00.129076976 +29 8 0 days 00:00:00.133866148 +29 9 0 days 00:00:00.112025250 +29 10 0 days 00:00:00.119506925 +29 11 0 days 00:00:00.118859024 +29 12 0 days 00:00:00.182710550 +29 13 0 days 00:00:00.120100875 +29 14 0 days 00:00:00.181650445 +29 15 0 days 00:00:00.112456745 +29 16 0 days 00:00:00.110647580 +29 17 0 days 00:00:00.131210443 +29 18 0 days 00:00:00.128968095 +29 19 0 days 00:00:00.123733080 +29 20 0 days 00:00:00.134573435 +29 21 0 days 00:00:00.155430664 +29 22 0 days 00:00:00.165223180 +29 23 0 days 00:00:00.111893365 +29 24 0 days 00:00:00.147746790 +29 25 0 days 00:00:00.176928325 +29 26 0 days 00:00:00.141596630 +29 27 0 days 00:00:00.133583850 +29 28 0 days 00:00:00.221289348 +29 29 0 days 00:00:00.133709500 +29 30 0 days 00:00:00.135944045 +29 31 0 days 00:00:00.211958617 +29 32 0 days 00:00:00.136559100 +29 33 0 days 00:00:00.143637105 +29 34 0 days 00:00:00.187537180 +29 35 0 days 00:00:00.119846870 +29 36 0 days 00:00:00.119411035 +29 37 0 days 00:00:00.212146255 +29 38 0 days 00:00:00.155056726 +29 39 0 days 00:00:00.137201175 +29 40 0 days 00:00:00.116175252 +29 41 0 days 00:00:00.180745890 +29 42 0 days 00:00:00.122880611 +29 43 0 days 00:00:00.208693537 +29 44 0 days 00:00:00.181880585 +29 45 0 days 00:00:00.140537535 +29 46 0 days 00:00:00.131309235 +29 47 0 days 00:00:00.141596616 +29 48 0 days 00:00:00.145415025 +29 49 0 days 00:00:00.110697010 +29 50 0 days 00:00:00.187415252 +29 51 0 days 00:00:00.108739015 +29 52 0 days 00:00:00.181495775 +29 53 0 days 00:00:00.132101740 +29 54 0 days 00:00:00.131124545 +29 55 0 days 00:00:00.175297270 +29 56 0 days 00:00:00.123976792 +29 57 0 days 00:00:00.133608475 +29 58 0 days 00:00:00.135056840 +29 59 0 days 00:00:00.117177060 +29 60 0 days 00:00:00.114995052 +29 61 0 days 00:00:00.110797225 +29 62 0 days 00:00:00.125446804 +29 63 0 days 00:00:00.129707280 +29 64 0 days 00:00:00.149833215 +29 65 0 days 00:00:00.120447584 +29 66 0 days 00:00:00.177656065 +29 67 0 days 00:00:00.133065185 +29 68 0 days 00:00:00.121867405 +29 69 0 days 00:00:00.163119028 +29 70 0 days 00:00:00.190650980 +29 71 0 days 00:00:00.112015300 +29 72 0 days 00:00:00.149968656 +29 73 0 days 00:00:00.133614185 +29 74 0 days 00:00:00.131929760 +29 75 0 days 00:00:00.181439480 +29 76 0 days 00:00:00.173658845 +29 77 0 days 00:00:00.170330635 +29 78 0 days 00:00:00.185728932 +29 79 0 days 00:00:00.154033053 +29 80 0 days 00:00:00.178481475 +29 81 0 days 00:00:00.166336135 +29 82 0 days 00:00:00.172278926 +29 83 0 days 00:00:00.131021265 +29 84 0 days 00:00:00.171577240 +29 85 0 days 00:00:00.173360595 +29 86 0 days 00:00:00.130987320 +29 87 0 days 00:00:00.167499025 +29 88 0 days 00:00:00.170396620 +29 89 0 days 00:00:00.128244430 +29 90 0 days 00:00:00.117461945 +29 91 0 days 00:00:00.128364585 +29 92 0 days 00:00:00.189051245 +29 93 0 days 00:00:00.111153040 +29 94 0 days 00:00:00.160536173 +29 95 0 days 00:00:00.167788050 +29 96 0 days 00:00:00.126663955 +29 97 0 days 00:00:00.174346940 +29 98 0 days 00:00:00.135940780 +29 99 0 days 00:00:00.116673666 +29 100 0 days 00:00:00.126766290 +30 1 0 days 00:00:00.069394576 +30 2 0 days 00:00:00.072235675 +30 3 0 days 00:00:00.100174055 +30 4 0 days 00:00:00.103018325 +30 5 0 days 00:00:00.063267092 +30 6 0 days 00:00:00.069963560 +30 7 0 days 00:00:00.068682132 +30 8 0 days 00:00:00.078986840 +30 9 0 days 00:00:00.063781185 +30 10 0 days 00:00:00.095918735 +30 11 0 days 00:00:00.067606605 +30 12 0 days 00:00:00.072911135 +30 13 0 days 00:00:00.106525271 +30 14 0 days 00:00:00.061057435 +30 15 0 days 00:00:00.095626695 +30 16 0 days 00:00:00.067110224 +30 17 0 days 00:00:00.109789804 +30 18 0 days 00:00:00.075962352 +30 19 0 days 00:00:00.068990255 +30 20 0 days 00:00:00.071829495 +30 21 0 days 00:00:00.066529075 +30 22 0 days 00:00:00.063774415 +30 23 0 days 00:00:00.076399020 +30 24 0 days 00:00:00.059577040 +30 25 0 days 00:00:00.076120493 +30 26 0 days 00:00:00.075558506 +30 27 0 days 00:00:00.070514870 +30 28 0 days 00:00:00.069711340 +30 29 0 days 00:00:00.095555515 +30 30 0 days 00:00:00.098689010 +30 31 0 days 00:00:00.072813830 +30 32 0 days 00:00:00.114275510 +30 33 0 days 00:00:00.064084395 +30 34 0 days 00:00:00.106016615 +30 35 0 days 00:00:00.070026480 +30 36 0 days 00:00:00.100226450 +30 37 0 days 00:00:00.075691640 +30 38 0 days 00:00:00.099406920 +30 39 0 days 00:00:00.088187848 +30 40 0 days 00:00:00.064220656 +30 41 0 days 00:00:00.095086330 +30 42 0 days 00:00:00.063768920 +30 43 0 days 00:00:00.067459962 +30 44 0 days 00:00:00.100345365 +30 45 0 days 00:00:00.076607528 +30 46 0 days 00:00:00.060975145 +30 47 0 days 00:00:00.100264465 +30 48 0 days 00:00:00.069191495 +30 49 0 days 00:00:00.101425170 +30 50 0 days 00:00:00.078641468 +30 51 0 days 00:00:00.070974845 +30 52 0 days 00:00:00.069151040 +30 53 0 days 00:00:00.072954015 +30 54 0 days 00:00:00.097145384 +30 55 0 days 00:00:00.074671790 +30 56 0 days 00:00:00.080914731 +30 57 0 days 00:00:00.061929120 +30 58 0 days 00:00:00.058705365 +30 59 0 days 00:00:00.090932475 +30 60 0 days 00:00:00.099382950 +30 61 0 days 00:00:00.105016345 +30 62 0 days 00:00:00.080341644 +30 63 0 days 00:00:00.098901495 +30 64 0 days 00:00:00.101380765 +30 65 0 days 00:00:00.070505005 +30 66 0 days 00:00:00.096921740 +30 67 0 days 00:00:00.068723672 +30 68 0 days 00:00:00.100111740 +30 69 0 days 00:00:00.069796985 +30 70 0 days 00:00:00.070874895 +30 71 0 days 00:00:00.069749905 +30 72 0 days 00:00:00.063052332 +30 73 0 days 00:00:00.059296850 +30 74 0 days 00:00:00.059926715 +30 75 0 days 00:00:00.097464760 +30 76 0 days 00:00:00.101943185 +30 77 0 days 00:00:00.061408185 +30 78 0 days 00:00:00.060749425 +30 79 0 days 00:00:00.101951830 +30 80 0 days 00:00:00.089827341 +30 81 0 days 00:00:00.099183955 +30 82 0 days 00:00:00.068960200 +30 83 0 days 00:00:00.097297610 +30 84 0 days 00:00:00.071907785 +30 85 0 days 00:00:00.082657888 +30 86 0 days 00:00:00.071384690 +30 87 0 days 00:00:00.094272090 +30 88 0 days 00:00:00.062013968 +30 89 0 days 00:00:00.105711706 +30 90 0 days 00:00:00.068304225 +30 91 0 days 00:00:00.069443460 +30 92 0 days 00:00:00.108283750 +30 93 0 days 00:00:00.069509375 +30 94 0 days 00:00:00.074168370 +30 95 0 days 00:00:00.101656344 +30 96 0 days 00:00:00.062714925 +30 97 0 days 00:00:00.066145125 +30 98 0 days 00:00:00.073603823 +30 99 0 days 00:00:00.069404510 +30 100 0 days 00:00:00.108733500 +31 1 0 days 00:00:00.061065840 +31 2 0 days 00:00:00.095043395 +31 3 0 days 00:00:00.053869373 +31 4 0 days 00:00:00.068852650 +31 5 0 days 00:00:00.069232895 +31 6 0 days 00:00:00.120304648 +31 7 0 days 00:00:00.095322390 +31 8 0 days 00:00:00.102402700 +31 9 0 days 00:00:00.060821600 +31 10 0 days 00:00:00.084877120 +31 11 0 days 00:00:00.094321153 +31 12 0 days 00:00:00.083752906 +31 13 0 days 00:00:00.101618985 +31 14 0 days 00:00:00.069729780 +31 15 0 days 00:00:00.074295172 +31 16 0 days 00:00:00.107878497 +31 17 0 days 00:00:00.072813945 +31 18 0 days 00:00:00.081063311 +31 19 0 days 00:00:00.073355085 +31 20 0 days 00:00:00.076102090 +31 21 0 days 00:00:00.098014720 +31 22 0 days 00:00:00.096493060 +31 23 0 days 00:00:00.069306213 +31 24 0 days 00:00:00.088686820 +31 25 0 days 00:00:00.076162980 +31 26 0 days 00:00:00.075006225 +31 27 0 days 00:00:00.071772910 +31 28 0 days 00:00:00.066684505 +31 29 0 days 00:00:00.062625990 +31 30 0 days 00:00:00.072548435 +31 31 0 days 00:00:00.067923100 +31 32 0 days 00:00:00.073665275 +31 34 0 days 00:00:00.060870860 +31 35 0 days 00:00:00.060060520 +31 36 0 days 00:00:00.082591486 +31 37 0 days 00:00:00.075072765 +31 38 0 days 00:00:00.074915645 +31 39 0 days 00:00:00.075702520 +31 40 0 days 00:00:00.088900402 +31 41 0 days 00:00:00.097625320 +31 42 0 days 00:00:00.105578590 +31 43 0 days 00:00:00.094776955 +31 44 0 days 00:00:00.059702525 +31 45 0 days 00:00:00.085630800 +31 46 0 days 00:00:00.079284457 +31 47 0 days 00:00:00.064258020 +31 48 0 days 00:00:00.059271100 +31 49 0 days 00:00:00.061062255 +31 50 0 days 00:00:00.103264825 +31 51 0 days 00:00:00.074275000 +31 52 0 days 00:00:00.095192095 +31 53 0 days 00:00:00.063694536 +31 54 0 days 00:00:00.059658655 +31 55 0 days 00:00:00.069345590 +31 56 0 days 00:00:00.081548070 +31 57 0 days 00:00:00.070638820 +31 58 0 days 00:00:00.072828745 +31 59 0 days 00:00:00.094389845 +31 60 0 days 00:00:00.103418880 +31 61 0 days 00:00:00.063639248 +31 62 0 days 00:00:00.074227370 +31 63 0 days 00:00:00.064128215 +31 64 0 days 00:00:00.059432605 +31 65 0 days 00:00:00.072021820 +31 66 0 days 00:00:00.109272020 +31 67 0 days 00:00:00.065295824 +31 68 0 days 00:00:00.103884780 +31 69 0 days 00:00:00.061523380 +31 70 0 days 00:00:00.071681450 +31 71 0 days 00:00:00.077540670 +31 72 0 days 00:00:00.070555270 +31 73 0 days 00:00:00.079880631 +31 74 0 days 00:00:00.062464495 +31 75 0 days 00:00:00.071492605 +31 76 0 days 00:00:00.068008240 +31 77 0 days 00:00:00.064754080 +31 78 0 days 00:00:00.097157455 +31 79 0 days 00:00:00.065852086 +31 80 0 days 00:00:00.074754420 +31 81 0 days 00:00:00.072305330 +31 82 0 days 00:00:00.060691125 +31 83 0 days 00:00:00.069442420 +31 84 0 days 00:00:00.097411365 +31 85 0 days 00:00:00.064637780 +31 86 0 days 00:00:00.079433960 +31 87 0 days 00:00:00.072018845 +31 88 0 days 00:00:00.073642440 +31 89 0 days 00:00:00.074464620 +31 90 0 days 00:00:00.068014645 +31 91 0 days 00:00:00.094546050 +31 92 0 days 00:00:00.083053762 +31 93 0 days 00:00:00.066406140 +31 94 0 days 00:00:00.068151632 +31 95 0 days 00:00:00.099877295 +31 96 0 days 00:00:00.101930004 +31 97 0 days 00:00:00.065665955 +31 98 0 days 00:00:00.080102250 +31 99 0 days 00:00:00.061166735 +31 100 0 days 00:00:00.071850675 +32 1 0 days 00:00:00.168433563 +32 2 0 days 00:00:00.123633322 +32 3 0 days 00:00:00.143854525 +32 4 0 days 00:00:00.143024864 +32 5 0 days 00:00:00.145806880 +32 6 0 days 00:00:00.195021628 +32 7 0 days 00:00:00.114313950 +32 8 0 days 00:00:00.162780932 +32 9 0 days 00:00:00.216119910 +32 10 0 days 00:00:00.194724080 +32 11 0 days 00:00:00.230017285 +32 12 0 days 00:00:00.154705895 +32 13 0 days 00:00:00.224781420 +32 14 0 days 00:00:00.147520690 +32 15 0 days 00:00:00.119687290 +32 16 0 days 00:00:00.117780032 +32 17 0 days 00:00:00.118879112 +32 18 0 days 00:00:00.139114176 +32 19 0 days 00:00:00.119148565 +32 20 0 days 00:00:00.195258026 +32 21 0 days 00:00:00.119889336 +32 22 0 days 00:00:00.199519700 +32 23 0 days 00:00:00.138724376 +32 24 0 days 00:00:00.116214075 +32 25 0 days 00:00:00.194260265 +32 26 0 days 00:00:00.193876490 +32 27 0 days 00:00:00.152256911 +32 28 0 days 00:00:00.110674390 +32 29 0 days 00:00:00.135863655 +32 30 0 days 00:00:00.163522536 +32 31 0 days 00:00:00.175331240 +32 32 0 days 00:00:00.133660535 +32 33 0 days 00:00:00.158702566 +32 34 0 days 00:00:00.153389048 +32 35 0 days 00:00:00.242765900 +32 36 0 days 00:00:00.153718160 +32 37 0 days 00:00:00.121563745 +32 38 0 days 00:00:00.215765970 +32 39 0 days 00:00:00.144132035 +32 40 0 days 00:00:00.143242065 +32 41 0 days 00:00:00.197887785 +32 42 0 days 00:00:00.221049486 +32 43 0 days 00:00:00.163398988 +32 44 0 days 00:00:00.148783912 +32 45 0 days 00:00:00.177836430 +32 46 0 days 00:00:00.123528830 +32 47 0 days 00:00:00.149919597 +32 48 0 days 00:00:00.195946230 +32 49 0 days 00:00:00.143990660 +32 50 0 days 00:00:00.138419960 +32 51 0 days 00:00:00.101808466 +32 52 0 days 00:00:00.133252706 +32 53 0 days 00:00:00.230460212 +32 54 0 days 00:00:00.253276990 +32 55 0 days 00:00:00.271551702 +32 56 0 days 00:00:00.171585750 +32 57 0 days 00:00:00.218232915 +32 58 0 days 00:00:00.136538147 +32 59 0 days 00:00:00.116263095 +32 60 0 days 00:00:00.145929915 +32 61 0 days 00:00:00.135149208 +32 62 0 days 00:00:00.127575915 +32 63 0 days 00:00:00.130783855 +32 64 0 days 00:00:00.252657713 +32 65 0 days 00:00:00.167402190 +32 66 0 days 00:00:00.131218785 +32 67 0 days 00:00:00.156872270 +32 68 0 days 00:00:00.233002485 +32 69 0 days 00:00:00.136964726 +32 70 0 days 00:00:00.153631235 +32 71 0 days 00:00:00.129998205 +32 72 0 days 00:00:00.188795875 +32 73 0 days 00:00:00.137734925 +32 74 0 days 00:00:00.221525048 +32 75 0 days 00:00:00.165496050 +32 76 0 days 00:00:00.163585620 +32 77 0 days 00:00:00.139853295 +32 78 0 days 00:00:00.193511175 +32 79 0 days 00:00:00.111824395 +32 80 0 days 00:00:00.134942845 +32 81 0 days 00:00:00.177435915 +32 82 0 days 00:00:00.120161648 +32 83 0 days 00:00:00.204666214 +32 84 0 days 00:00:00.204714875 +32 85 0 days 00:00:00.156364186 +32 86 0 days 00:00:00.119780900 +32 87 0 days 00:00:00.111494813 +32 88 0 days 00:00:00.148498070 +32 89 0 days 00:00:00.166371830 +32 90 0 days 00:00:00.126587064 +32 91 0 days 00:00:00.115932195 +32 92 0 days 00:00:00.121364120 +32 93 0 days 00:00:00.135386096 +32 94 0 days 00:00:00.215352556 +32 95 0 days 00:00:00.123517280 +32 96 0 days 00:00:00.143100830 +32 97 0 days 00:00:00.119408885 +32 98 0 days 00:00:00.120709900 +32 99 0 days 00:00:00.137029705 +32 100 0 days 00:00:00.161285316 +33 1 0 days 00:00:00.100097000 +33 2 0 days 00:00:00.073843030 +33 3 0 days 00:00:00.077480646 +33 4 0 days 00:00:00.099742655 +33 5 0 days 00:00:00.074712160 +33 6 0 days 00:00:00.071502630 +33 7 0 days 00:00:00.108866460 +33 8 0 days 00:00:00.069623308 +33 9 0 days 00:00:00.084394990 +33 10 0 days 00:00:00.113706880 +33 11 0 days 00:00:00.068182380 +33 12 0 days 00:00:00.095897656 +33 13 0 days 00:00:00.137071592 +33 14 0 days 00:00:00.092360980 +33 15 0 days 00:00:00.080043390 +33 16 0 days 00:00:00.075219150 +33 17 0 days 00:00:00.130031990 +33 18 0 days 00:00:00.104925870 +33 19 0 days 00:00:00.080758010 +33 20 0 days 00:00:00.071209833 +33 21 0 days 00:00:00.106370850 +33 22 0 days 00:00:00.095874286 +33 23 0 days 00:00:00.103390810 +33 24 0 days 00:00:00.069739115 +33 25 0 days 00:00:00.082396260 +33 26 0 days 00:00:00.068760740 +33 27 0 days 00:00:00.074606170 +33 28 0 days 00:00:00.081331600 +33 29 0 days 00:00:00.119330708 +33 30 0 days 00:00:00.086696044 +33 31 0 days 00:00:00.071782355 +33 32 0 days 00:00:00.087558075 +33 33 0 days 00:00:00.071715805 +33 34 0 days 00:00:00.066142160 +33 35 0 days 00:00:00.092721900 +33 36 0 days 00:00:00.115497472 +33 37 0 days 00:00:00.072155668 +33 38 0 days 00:00:00.062404500 +33 39 0 days 00:00:00.103213144 +33 40 0 days 00:00:00.085948802 +33 41 0 days 00:00:00.070836055 +33 42 0 days 00:00:00.086133174 +33 43 0 days 00:00:00.111171295 +33 44 0 days 00:00:00.110579215 +33 45 0 days 00:00:00.088655156 +33 46 0 days 00:00:00.132706260 +33 47 0 days 00:00:00.101045474 +33 48 0 days 00:00:00.080551576 +33 49 0 days 00:00:00.083269371 +33 50 0 days 00:00:00.126061675 +33 51 0 days 00:00:00.077611640 +33 52 0 days 00:00:00.083379463 +33 53 0 days 00:00:00.073901452 +33 54 0 days 00:00:00.072533940 +33 55 0 days 00:00:00.120798591 +33 56 0 days 00:00:00.083211676 +33 57 0 days 00:00:00.074178220 +33 58 0 days 00:00:00.098119760 +33 59 0 days 00:00:00.077300716 +33 60 0 days 00:00:00.070951663 +33 61 0 days 00:00:00.112528260 +33 62 0 days 00:00:00.101615925 +33 63 0 days 00:00:00.072065575 +33 64 0 days 00:00:00.098464805 +33 65 0 days 00:00:00.084158550 +33 66 0 days 00:00:00.075213340 +33 67 0 days 00:00:00.123099450 +33 68 0 days 00:00:00.087833575 +33 69 0 days 00:00:00.089093402 +33 70 0 days 00:00:00.072616860 +33 71 0 days 00:00:00.062361615 +33 72 0 days 00:00:00.072308984 +33 73 0 days 00:00:00.109905044 +33 74 0 days 00:00:00.075366742 +33 75 0 days 00:00:00.098247290 +33 76 0 days 00:00:00.098734485 +33 77 0 days 00:00:00.100828990 +33 78 0 days 00:00:00.063282870 +33 79 0 days 00:00:00.063933320 +33 80 0 days 00:00:00.102906024 +33 81 0 days 00:00:00.098993145 +33 82 0 days 00:00:00.102845768 +33 83 0 days 00:00:00.061899435 +33 84 0 days 00:00:00.096750505 +33 85 0 days 00:00:00.099159305 +33 86 0 days 00:00:00.082448215 +33 87 0 days 00:00:00.122447925 +33 88 0 days 00:00:00.089606183 +33 89 0 days 00:00:00.084306932 +33 90 0 days 00:00:00.082413605 +33 91 0 days 00:00:00.079061300 +33 92 0 days 00:00:00.134341990 +33 93 0 days 00:00:00.084724985 +33 94 0 days 00:00:00.119848345 +33 95 0 days 00:00:00.120018500 +33 96 0 days 00:00:00.119671130 +33 97 0 days 00:00:00.084211855 +33 98 0 days 00:00:00.078364340 +33 99 0 days 00:00:00.071851800 +33 100 0 days 00:00:00.084869810 +34 1 0 days 00:00:00.551789797 +34 2 0 days 00:00:00.437002090 +34 3 0 days 00:00:00.911396751 +34 4 0 days 00:00:00.464096592 +34 5 0 days 00:00:00.714835386 +34 6 0 days 00:00:00.591956560 +34 7 0 days 00:00:00.811548745 +34 8 0 days 00:00:00.816200890 +34 9 0 days 00:00:00.462009215 +34 10 0 days 00:00:01.262162415 +34 11 0 days 00:00:00.352570620 +34 12 0 days 00:00:00.745129420 +34 13 0 days 00:00:00.497393023 +34 14 0 days 00:00:00.326463836 +34 15 0 days 00:00:00.487184930 +34 16 0 days 00:00:01.007275462 +34 17 0 days 00:00:00.816115830 +34 18 0 days 00:00:00.552876472 +34 19 0 days 00:00:00.958319030 +34 20 0 days 00:00:00.922163325 +34 21 0 days 00:00:00.935715005 +34 22 0 days 00:00:00.955518685 +34 23 0 days 00:00:01.002459992 +34 24 0 days 00:00:00.820325905 +34 25 0 days 00:00:00.429726140 +34 26 0 days 00:00:00.534124645 +34 27 0 days 00:00:00.562512460 +34 28 0 days 00:00:00.460803660 +34 29 0 days 00:00:00.406500905 +34 30 0 days 00:00:01.265284825 +34 31 0 days 00:00:00.812957635 +34 32 0 days 00:00:00.374216164 +34 33 0 days 00:00:00.686559555 +34 34 0 days 00:00:00.458813760 +34 35 0 days 00:00:00.809306986 +34 36 0 days 00:00:01.231071575 +34 37 0 days 00:00:00.464803324 +34 38 0 days 00:00:00.459748268 +34 39 0 days 00:00:01.096097175 +34 40 0 days 00:00:00.821333836 +34 41 0 days 00:00:00.470378420 +34 42 0 days 00:00:00.481128855 +34 43 0 days 00:00:01.172413415 +34 44 0 days 00:00:00.479293680 +34 45 0 days 00:00:01.232379430 +34 46 0 days 00:00:00.864129884 +34 47 0 days 00:00:00.457539588 +34 48 0 days 00:00:00.498967502 +34 49 0 days 00:00:00.460480868 +34 50 0 days 00:00:00.985623025 +34 51 0 days 00:00:00.807318710 +34 52 0 days 00:00:00.325961300 +34 53 0 days 00:00:00.856142164 +34 54 0 days 00:00:00.807697055 +34 55 0 days 00:00:00.810001600 +34 56 0 days 00:00:00.519303845 +34 57 0 days 00:00:00.811956430 +34 58 0 days 00:00:00.721956400 +34 59 0 days 00:00:00.449893060 +34 60 0 days 00:00:00.575759965 +34 61 0 days 00:00:00.438644910 +34 62 0 days 00:00:01.124952543 +34 63 0 days 00:00:00.628516573 +34 64 0 days 00:00:00.839140150 +34 65 0 days 00:00:00.687740424 +34 66 0 days 00:00:00.401120954 +34 67 0 days 00:00:00.449800055 +34 68 0 days 00:00:00.820875340 +34 69 0 days 00:00:01.024662284 +34 70 0 days 00:00:00.822340890 +34 71 0 days 00:00:00.854364490 +34 72 0 days 00:00:01.010280926 +34 73 0 days 00:00:00.511640396 +34 74 0 days 00:00:00.866977285 +34 75 0 days 00:00:00.459014435 +34 76 0 days 00:00:00.671047243 +34 77 0 days 00:00:00.505243765 +34 78 0 days 00:00:00.767870040 +34 79 0 days 00:00:00.563803750 +34 80 0 days 00:00:00.262701410 +34 81 0 days 00:00:00.510899856 +34 82 0 days 00:00:01.132422330 +34 83 0 days 00:00:00.266349385 +34 84 0 days 00:00:00.294665435 +34 85 0 days 00:00:00.296906270 +34 86 0 days 00:00:00.533282743 +34 87 0 days 00:00:00.592565960 +34 88 0 days 00:00:01.032664760 +34 89 0 days 00:00:01.042515395 +34 90 0 days 00:00:00.950884660 +34 91 0 days 00:00:00.921217864 +34 92 0 days 00:00:00.458941655 +34 93 0 days 00:00:00.358361251 +34 94 0 days 00:00:00.904114640 +34 95 0 days 00:00:00.427122485 +34 96 0 days 00:00:01.018624000 +34 97 0 days 00:00:00.876751500 +34 98 0 days 00:00:01.480507510 +34 99 0 days 00:00:00.751748336 +34 100 0 days 00:00:00.338642985 +35 1 0 days 00:00:00.658222072 +35 2 0 days 00:00:00.891555635 +35 3 0 days 00:00:00.312627164 +35 4 0 days 00:00:00.894795728 +35 5 0 days 00:00:00.717539145 +35 6 0 days 00:00:00.569055152 +35 7 0 days 00:00:00.305118550 +35 8 0 days 00:00:00.529217650 +35 9 0 days 00:00:00.862951105 +35 10 0 days 00:00:00.386618090 +35 11 0 days 00:00:00.712394930 +35 12 0 days 00:00:00.335565856 +35 13 0 days 00:00:00.586100336 +35 14 0 days 00:00:00.850288895 +35 15 0 days 00:00:00.945982756 +35 16 0 days 00:00:00.977143636 +35 17 0 days 00:00:00.981385896 +35 18 0 days 00:00:01.035336265 +35 19 0 days 00:00:00.545033690 +35 20 0 days 00:00:00.541100011 +35 21 0 days 00:00:00.297785316 +35 22 0 days 00:00:00.477038395 +35 23 0 days 00:00:00.292896285 +35 24 0 days 00:00:00.495443005 +35 25 0 days 00:00:00.874323565 +35 26 0 days 00:00:00.871964670 +35 27 0 days 00:00:00.497486335 +35 28 0 days 00:00:01.353490980 +35 29 0 days 00:00:00.538731880 +35 30 0 days 00:00:01.018670950 +35 31 0 days 00:00:00.606803772 +35 32 0 days 00:00:01.004194213 +35 33 0 days 00:00:00.901985504 +35 34 0 days 00:00:00.943303660 +35 35 0 days 00:00:00.592492820 +35 36 0 days 00:00:00.446680375 +35 37 0 days 00:00:00.910753355 +35 38 0 days 00:00:00.473944775 +35 39 0 days 00:00:01.092404600 +35 40 0 days 00:00:00.527437923 +35 41 0 days 00:00:00.469944855 +35 42 0 days 00:00:01.039692930 +35 43 0 days 00:00:00.907490575 +35 44 0 days 00:00:00.450246175 +35 45 0 days 00:00:00.898651865 +35 46 0 days 00:00:00.490245915 +35 47 0 days 00:00:00.976103816 +35 48 0 days 00:00:00.286950530 +35 49 0 days 00:00:01.083870407 +35 50 0 days 00:00:00.891176825 +35 51 0 days 00:00:00.516445005 +35 52 0 days 00:00:00.937132112 +35 53 0 days 00:00:00.463473705 +35 54 0 days 00:00:00.468461360 +35 55 0 days 00:00:00.916525276 +35 56 0 days 00:00:00.348429531 +35 57 0 days 00:00:00.488256390 +35 58 0 days 00:00:00.963743344 +35 59 0 days 00:00:00.770499960 +35 60 0 days 00:00:00.372439051 +35 61 0 days 00:00:00.797647565 +35 62 0 days 00:00:01.000311580 +35 63 0 days 00:00:00.582514096 +35 64 0 days 00:00:00.837649117 +35 65 0 days 00:00:00.453004050 +35 66 0 days 00:00:00.510563345 +35 67 0 days 00:00:00.380963275 +35 68 0 days 00:00:00.481148010 +35 69 0 days 00:00:00.297780653 +35 70 0 days 00:00:00.706393712 +35 71 0 days 00:00:00.523627605 +35 72 0 days 00:00:00.745045030 +35 73 0 days 00:00:00.477031225 +35 74 0 days 00:00:00.573801312 +35 75 0 days 00:00:00.537574930 +35 76 0 days 00:00:00.317600960 +35 77 0 days 00:00:00.632661188 +35 78 0 days 00:00:00.843564290 +35 79 0 days 00:00:00.628811520 +35 80 0 days 00:00:00.954338900 +35 81 0 days 00:00:00.491683215 +35 82 0 days 00:00:01.018558050 +35 83 0 days 00:00:00.916199300 +35 84 0 days 00:00:00.889431015 +35 85 0 days 00:00:00.557884286 +35 86 0 days 00:00:01.157204648 +35 87 0 days 00:00:00.547822790 +35 88 0 days 00:00:00.863802310 +35 89 0 days 00:00:00.815163840 +35 90 0 days 00:00:00.547541435 +35 91 0 days 00:00:00.716982400 +35 92 0 days 00:00:00.312495430 +35 93 0 days 00:00:00.398224125 +35 94 0 days 00:00:01.403928488 +35 95 0 days 00:00:00.344552065 +35 96 0 days 00:00:00.753397960 +35 97 0 days 00:00:00.507622476 +35 98 0 days 00:00:00.904046365 +35 99 0 days 00:00:00.437791150 +35 100 0 days 00:00:00.875278235 +36 1 0 days 00:00:00.323633105 +36 2 0 days 00:00:00.367210240 +36 3 0 days 00:00:00.417804290 +36 4 0 days 00:00:00.263367255 +36 5 0 days 00:00:00.260992135 +36 6 0 days 00:00:00.497350244 +36 7 0 days 00:00:00.140094505 +36 8 0 days 00:00:00.252451872 +36 9 0 days 00:00:00.201428970 +36 10 0 days 00:00:00.422439400 +36 11 0 days 00:00:00.166108115 +36 12 0 days 00:00:00.275329160 +36 13 0 days 00:00:00.248393685 +36 14 0 days 00:00:00.173470266 +36 15 0 days 00:00:00.168167515 +36 16 0 days 00:00:00.277486872 +36 17 0 days 00:00:00.157678885 +36 18 0 days 00:00:00.267079756 +36 19 0 days 00:00:00.204177208 +36 20 0 days 00:00:00.237904620 +36 21 0 days 00:00:00.320578340 +36 22 0 days 00:00:00.200815908 +36 23 0 days 00:00:00.243521420 +36 24 0 days 00:00:00.347776275 +36 25 0 days 00:00:00.480276034 +36 26 0 days 00:00:00.455193552 +36 27 0 days 00:00:00.151467195 +36 28 0 days 00:00:00.204197154 +36 29 0 days 00:00:00.159761310 +36 30 0 days 00:00:00.230256540 +36 31 0 days 00:00:00.412061390 +36 32 0 days 00:00:00.423171115 +36 33 0 days 00:00:00.526172456 +36 34 0 days 00:00:00.253481512 +36 35 0 days 00:00:00.327738420 +36 36 0 days 00:00:00.327819880 +36 37 0 days 00:00:00.434123905 +36 38 0 days 00:00:00.352379550 +36 39 0 days 00:00:00.257332372 +36 40 0 days 00:00:00.447318488 +36 41 0 days 00:00:00.234132995 +36 42 0 days 00:00:00.135978405 +36 43 0 days 00:00:00.278826893 +36 44 0 days 00:00:00.431773145 +36 45 0 days 00:00:00.288296267 +36 46 0 days 00:00:00.394079636 +36 47 0 days 00:00:00.592261925 +36 48 0 days 00:00:00.253522550 +36 49 0 days 00:00:00.468199650 +36 50 0 days 00:00:00.482150505 +36 51 0 days 00:00:00.454149732 +36 52 0 days 00:00:00.427349970 +36 53 0 days 00:00:00.430270440 +36 54 0 days 00:00:00.525811440 +36 55 0 days 00:00:00.413793690 +36 56 0 days 00:00:00.252033745 +36 57 0 days 00:00:00.136390615 +36 58 0 days 00:00:00.162365488 +36 59 0 days 00:00:00.229786040 +36 60 0 days 00:00:00.519786105 +36 61 0 days 00:00:00.551380153 +36 62 0 days 00:00:00.262324390 +36 63 0 days 00:00:00.453335034 +36 64 0 days 00:00:00.332403460 +36 65 0 days 00:00:00.145336065 +36 66 0 days 00:00:00.324453580 +36 67 0 days 00:00:00.195722920 +36 68 0 days 00:00:00.275647620 +36 69 0 days 00:00:00.201526696 +36 70 0 days 00:00:00.380878910 +36 71 0 days 00:00:00.190021575 +36 72 0 days 00:00:00.362675800 +36 73 0 days 00:00:00.231614245 +36 74 0 days 00:00:00.459881590 +36 75 0 days 00:00:00.386635625 +36 76 0 days 00:00:00.435700680 +36 77 0 days 00:00:00.228730095 +36 78 0 days 00:00:00.552807005 +36 79 0 days 00:00:00.391450850 +36 80 0 days 00:00:00.576869390 +36 81 0 days 00:00:00.280234385 +36 82 0 days 00:00:00.177580752 +36 83 0 days 00:00:00.263463993 +36 84 0 days 00:00:00.364521592 +36 85 0 days 00:00:00.235639175 +36 86 0 days 00:00:00.272647488 +36 87 0 days 00:00:00.167634080 +36 88 0 days 00:00:00.375311026 +36 89 0 days 00:00:00.442435890 +36 90 0 days 00:00:00.289653570 +36 91 0 days 00:00:00.137410125 +36 92 0 days 00:00:00.307155192 +36 93 0 days 00:00:00.469008705 +36 94 0 days 00:00:00.361961596 +36 95 0 days 00:00:00.424105780 +36 96 0 days 00:00:00.617707628 +36 97 0 days 00:00:00.442300480 +36 98 0 days 00:00:00.185307280 +36 99 0 days 00:00:00.196218080 +36 100 0 days 00:00:00.477672625 +37 1 0 days 00:00:00.419382892 +37 2 0 days 00:00:00.153188940 +37 3 0 days 00:00:00.192083366 +37 4 0 days 00:00:00.241609400 +37 5 0 days 00:00:00.262048971 +37 6 0 days 00:00:00.643160080 +37 7 0 days 00:00:00.246662540 +37 8 0 days 00:00:00.239794076 +37 9 0 days 00:00:00.269676952 +37 10 0 days 00:00:00.489205715 +37 11 0 days 00:00:00.146838220 +37 12 0 days 00:00:00.481534860 +37 13 0 days 00:00:00.475451980 +37 14 0 days 00:00:00.241076050 +37 15 0 days 00:00:00.238120750 +37 16 0 days 00:00:00.254863345 +37 17 0 days 00:00:00.205330620 +37 18 0 days 00:00:00.477389740 +37 19 0 days 00:00:00.229651465 +37 20 0 days 00:00:00.140668640 +37 21 0 days 00:00:00.264117312 +37 22 0 days 00:00:00.439356825 +37 23 0 days 00:00:00.245461030 +37 24 0 days 00:00:00.519136124 +37 25 0 days 00:00:00.246651165 +37 26 0 days 00:00:00.352050770 +37 27 0 days 00:00:00.437287785 +37 28 0 days 00:00:00.222612160 +37 29 0 days 00:00:00.373054640 +37 30 0 days 00:00:00.436993355 +37 31 0 days 00:00:00.447049735 +37 32 0 days 00:00:00.285157920 +37 33 0 days 00:00:00.237429405 +37 34 0 days 00:00:00.164648560 +37 35 0 days 00:00:00.144124625 +37 36 0 days 00:00:00.288665900 +37 37 0 days 00:00:00.286593715 +37 38 0 days 00:00:00.327821450 +37 39 0 days 00:00:00.252619780 +37 40 0 days 00:00:00.173995555 +37 41 0 days 00:00:00.310130373 +37 42 0 days 00:00:00.184233320 +37 43 0 days 00:00:00.594171844 +37 44 0 days 00:00:00.444604460 +37 45 0 days 00:00:00.434002720 +37 46 0 days 00:00:00.477422675 +37 47 0 days 00:00:00.191914480 +37 48 0 days 00:00:00.630039555 +37 49 0 days 00:00:00.249817145 +37 50 0 days 00:00:00.222207290 +37 51 0 days 00:00:00.437415065 +37 52 0 days 00:00:00.287536412 +37 53 0 days 00:00:00.296083620 +37 54 0 days 00:00:00.499441635 +37 55 0 days 00:00:00.444110200 +37 56 0 days 00:00:00.442704975 +37 57 0 days 00:00:00.456900285 +37 58 0 days 00:00:00.497979724 +37 59 0 days 00:00:00.237435435 +37 60 0 days 00:00:00.484044096 +37 61 0 days 00:00:00.263958320 +37 62 0 days 00:00:00.398164576 +37 63 0 days 00:00:00.440090935 +37 64 0 days 00:00:00.242481626 +37 65 0 days 00:00:00.138924300 +37 66 0 days 00:00:00.246953910 +37 67 0 days 00:00:00.487362680 +37 68 0 days 00:00:00.264270510 +37 69 0 days 00:00:00.197793220 +37 70 0 days 00:00:00.682277360 +37 71 0 days 00:00:00.546457096 +37 72 0 days 00:00:00.473508708 +37 73 0 days 00:00:00.139798125 +37 74 0 days 00:00:00.544795720 +37 75 0 days 00:00:00.390932000 +37 76 0 days 00:00:00.441726580 +37 77 0 days 00:00:00.241708725 +37 78 0 days 00:00:00.471179208 +37 79 0 days 00:00:00.441373330 +37 80 0 days 00:00:00.178982094 +37 81 0 days 00:00:00.152924130 +37 82 0 days 00:00:00.413017840 +37 83 0 days 00:00:00.390158860 +37 84 0 days 00:00:00.443228325 +37 85 0 days 00:00:00.497242213 +37 86 0 days 00:00:00.266159140 +37 87 0 days 00:00:00.663563770 +37 88 0 days 00:00:00.195894906 +37 89 0 days 00:00:00.286614540 +37 90 0 days 00:00:00.319209415 +37 91 0 days 00:00:00.266778224 +37 92 0 days 00:00:00.171651530 +37 93 0 days 00:00:00.272983163 +37 94 0 days 00:00:00.577023550 +37 95 0 days 00:00:00.171056512 +37 96 0 days 00:00:00.323497553 +37 97 0 days 00:00:00.446905300 +37 98 0 days 00:00:00.183471420 +37 99 0 days 00:00:00.254601715 +37 100 0 days 00:00:00.271495364 +38 1 0 days 00:00:01.076265570 +38 2 0 days 00:00:00.554836840 +38 3 0 days 00:00:00.673078245 +38 4 0 days 00:00:00.701355635 +38 5 0 days 00:00:00.401184875 +38 6 0 days 00:00:00.909505637 +38 7 0 days 00:00:01.150654920 +38 8 0 days 00:00:00.633425131 +38 9 0 days 00:00:00.645573470 +38 10 0 days 00:00:01.177352136 +38 11 0 days 00:00:01.508156360 +38 12 0 days 00:00:00.489330300 +38 13 0 days 00:00:01.249528546 +38 14 0 days 00:00:02.218897230 +38 15 0 days 00:00:01.370129668 +38 16 0 days 00:00:00.362084685 +38 17 0 days 00:00:00.498299220 +38 18 0 days 00:00:00.517892503 +38 19 0 days 00:00:01.077457095 +38 20 0 days 00:00:00.441132280 +38 21 0 days 00:00:00.830365466 +38 22 0 days 00:00:01.503892178 +38 23 0 days 00:00:01.310950102 +38 24 0 days 00:00:01.318378096 +38 25 0 days 00:00:01.112533530 +38 26 0 days 00:00:01.746630597 +38 27 0 days 00:00:01.215390933 +38 28 0 days 00:00:00.582863345 +38 29 0 days 00:00:00.273401965 +38 30 0 days 00:00:01.330931863 +38 31 0 days 00:00:00.853092646 +38 32 0 days 00:00:01.145255485 +38 33 0 days 00:00:02.006614590 +38 34 0 days 00:00:01.417150135 +38 35 0 days 00:00:01.219837125 +38 36 0 days 00:00:01.093249710 +38 37 0 days 00:00:01.240548683 +38 38 0 days 00:00:01.533937245 +38 39 0 days 00:00:01.914222603 +38 40 0 days 00:00:00.779106905 +38 41 0 days 00:00:00.347038355 +38 42 0 days 00:00:01.575737863 +38 43 0 days 00:00:00.930047410 +38 44 0 days 00:00:01.237953135 +38 45 0 days 00:00:00.952647505 +38 46 0 days 00:00:01.026847730 +38 47 0 days 00:00:01.402038807 +38 48 0 days 00:00:00.690408216 +38 49 0 days 00:00:01.767923825 +38 50 0 days 00:00:01.300153215 +38 51 0 days 00:00:01.859106735 +38 52 0 days 00:00:02.241464116 +38 53 0 days 00:00:00.796698395 +38 54 0 days 00:00:01.020298504 +38 55 0 days 00:00:01.249510810 +38 56 0 days 00:00:01.029639280 +38 57 0 days 00:00:00.486867080 +38 58 0 days 00:00:01.511436285 +38 59 0 days 00:00:00.478362165 +38 60 0 days 00:00:01.459942417 +38 61 0 days 00:00:00.463045880 +38 62 0 days 00:00:01.152850995 +38 63 0 days 00:00:01.293119753 +38 64 0 days 00:00:00.611222195 +38 65 0 days 00:00:00.873089748 +38 66 0 days 00:00:00.353247335 +38 67 0 days 00:00:00.648260290 +38 68 0 days 00:00:01.143194010 +38 69 0 days 00:00:00.860147095 +38 70 0 days 00:00:01.094432865 +38 71 0 days 00:00:01.248412564 +38 72 0 days 00:00:01.384822280 +38 73 0 days 00:00:01.221193720 +38 74 0 days 00:00:00.546090625 +38 75 0 days 00:00:00.887660505 +38 76 0 days 00:00:01.133750755 +38 77 0 days 00:00:01.498508200 +38 78 0 days 00:00:01.118720340 +38 79 0 days 00:00:00.630691493 +38 80 0 days 00:00:01.489447170 +38 81 0 days 00:00:00.853094220 +38 82 0 days 00:00:00.515025574 +38 83 0 days 00:00:00.382692835 +38 84 0 days 00:00:00.638871020 +38 85 0 days 00:00:00.685556095 +38 86 0 days 00:00:02.028844696 +38 87 0 days 00:00:01.648862690 +38 88 0 days 00:00:01.523438580 +38 89 0 days 00:00:00.770207680 +38 90 0 days 00:00:00.442099940 +38 91 0 days 00:00:00.494520035 +38 92 0 days 00:00:00.639363210 +38 93 0 days 00:00:00.540099605 +38 94 0 days 00:00:00.879247120 +38 95 0 days 00:00:01.236561445 +38 96 0 days 00:00:00.781954130 +38 97 0 days 00:00:00.663613625 +38 98 0 days 00:00:01.105596290 +38 99 0 days 00:00:00.900959655 +38 100 0 days 00:00:01.362848580 +39 1 0 days 00:00:00.309142215 +39 2 0 days 00:00:00.436077025 +39 3 0 days 00:00:00.365670850 +39 4 0 days 00:00:01.220191912 +39 5 0 days 00:00:00.502025690 +39 6 0 days 00:00:01.013742156 +39 7 0 days 00:00:00.350830184 +39 8 0 days 00:00:00.628487910 +39 9 0 days 00:00:00.230460460 +39 10 0 days 00:00:00.186412145 +39 11 0 days 00:00:00.576381875 +39 12 0 days 00:00:00.340748950 +39 13 0 days 00:00:00.311832900 +39 14 0 days 00:00:00.409158253 +39 15 0 days 00:00:00.645683782 +39 16 0 days 00:00:00.317924075 +39 17 0 days 00:00:00.283728000 +39 18 0 days 00:00:00.594896330 +39 19 0 days 00:00:00.588225685 +39 20 0 days 00:00:00.479085995 +39 21 0 days 00:00:00.379199290 +39 22 0 days 00:00:00.721663835 +39 23 0 days 00:00:00.299651675 +39 24 0 days 00:00:00.380509600 +39 25 0 days 00:00:00.835602951 +39 26 0 days 00:00:00.394464590 +39 27 0 days 00:00:00.436825100 +39 28 0 days 00:00:00.696969916 +39 29 0 days 00:00:00.328504790 +39 30 0 days 00:00:00.215427116 +39 31 0 days 00:00:00.575012965 +39 32 0 days 00:00:00.405811228 +39 33 0 days 00:00:00.759177072 +39 34 0 days 00:00:00.183339915 +39 35 0 days 00:00:00.132973555 +39 36 0 days 00:00:00.257265990 +39 37 0 days 00:00:00.182513536 +39 38 0 days 00:00:00.879203493 +39 39 0 days 00:00:00.643025425 +39 40 0 days 00:00:00.724472330 +39 41 0 days 00:00:00.838277575 +39 42 0 days 00:00:00.648809585 +39 43 0 days 00:00:00.763072012 +39 44 0 days 00:00:00.639144415 +39 45 0 days 00:00:00.331170345 +39 46 0 days 00:00:00.349956650 +39 47 0 days 00:00:00.876652111 +39 48 0 days 00:00:00.515316067 +39 49 0 days 00:00:00.204714244 +39 50 0 days 00:00:00.542388892 +39 51 0 days 00:00:00.486011340 +39 52 0 days 00:00:01.148697137 +39 53 0 days 00:00:00.769588990 +39 54 0 days 00:00:00.312938720 +39 55 0 days 00:00:00.781847065 +39 56 0 days 00:00:00.235906945 +39 57 0 days 00:00:00.438791000 +39 58 0 days 00:00:00.774191317 +39 59 0 days 00:00:00.670839353 +39 60 0 days 00:00:00.275075435 +39 61 0 days 00:00:00.337754371 +39 62 0 days 00:00:00.358281560 +39 63 0 days 00:00:00.197170245 +39 64 0 days 00:00:00.344055945 +39 65 0 days 00:00:00.264401451 +39 66 0 days 00:00:00.446593380 +39 67 0 days 00:00:00.418708568 +39 68 0 days 00:00:00.249980896 +39 69 0 days 00:00:00.350067245 +39 70 0 days 00:00:00.748632627 +39 71 0 days 00:00:00.281885593 +39 72 0 days 00:00:00.770849202 +39 73 0 days 00:00:00.783560670 +39 74 0 days 00:00:00.430701796 +39 75 0 days 00:00:00.368961390 +39 76 0 days 00:00:00.307456534 +39 77 0 days 00:00:00.372581025 +39 78 0 days 00:00:00.237126780 +39 79 0 days 00:00:00.305235620 +39 80 0 days 00:00:00.758329165 +39 81 0 days 00:00:00.511026154 +39 82 0 days 00:00:00.305313788 +39 83 0 days 00:00:00.236411845 +39 84 0 days 00:00:00.233157795 +39 85 0 days 00:00:01.147652003 +39 86 0 days 00:00:00.351798990 +39 87 0 days 00:00:00.277576500 +39 88 0 days 00:00:00.458640580 +39 89 0 days 00:00:00.735436515 +39 90 0 days 00:00:01.073270502 +39 91 0 days 00:00:00.639214570 +39 92 0 days 00:00:00.426599805 +39 93 0 days 00:00:00.785140785 +39 94 0 days 00:00:00.254887852 +39 95 0 days 00:00:00.304406863 +39 96 0 days 00:00:00.937686590 +39 97 0 days 00:00:00.203792188 +39 98 0 days 00:00:00.226839710 +39 99 0 days 00:00:00.563466240 +39 100 0 days 00:00:00.628133390 +40 1 0 days 00:00:01.530742313 +40 2 0 days 00:00:01.449419284 +40 3 0 days 00:00:00.837708730 +40 4 0 days 00:00:00.577124830 +40 5 0 days 00:00:01.886509286 +40 6 0 days 00:00:01.137369980 +40 7 0 days 00:00:01.229289480 +40 8 0 days 00:00:00.365097164 +40 9 0 days 00:00:01.466533000 +40 10 0 days 00:00:01.162324550 +40 11 0 days 00:00:01.365696575 +40 12 0 days 00:00:00.880799586 +40 13 0 days 00:00:01.019444700 +40 14 0 days 00:00:01.226589285 +40 15 0 days 00:00:01.045052423 +40 16 0 days 00:00:01.125662472 +40 17 0 days 00:00:00.597583028 +40 18 0 days 00:00:01.135511470 +40 19 0 days 00:00:01.339306736 +40 20 0 days 00:00:01.350292156 +40 21 0 days 00:00:02.122336510 +40 22 0 days 00:00:01.130645800 +40 23 0 days 00:00:00.783403550 +40 24 0 days 00:00:01.071705955 +40 25 0 days 00:00:00.381068596 +40 26 0 days 00:00:00.795971503 +40 27 0 days 00:00:00.797268065 +40 28 0 days 00:00:00.920920045 +40 29 0 days 00:00:00.335113426 +40 30 0 days 00:00:00.645583662 +40 31 0 days 00:00:01.012309425 +40 32 0 days 00:00:00.459673655 +40 33 0 days 00:00:00.884868695 +40 34 0 days 00:00:00.809213655 +40 35 0 days 00:00:01.451217795 +40 36 0 days 00:00:01.303328985 +40 37 0 days 00:00:01.366407696 +40 38 0 days 00:00:00.549576370 +40 39 0 days 00:00:00.741289935 +40 40 0 days 00:00:01.884171985 +40 41 0 days 00:00:00.657192972 +40 42 0 days 00:00:01.167690568 +40 43 0 days 00:00:00.445010685 +40 44 0 days 00:00:01.203582764 +40 45 0 days 00:00:01.520821593 +40 46 0 days 00:00:01.895638895 +40 47 0 days 00:00:01.831177813 +40 48 0 days 00:00:00.954030775 +40 49 0 days 00:00:01.222108420 +40 50 0 days 00:00:00.478616640 +40 51 0 days 00:00:01.420509984 +40 52 0 days 00:00:00.332615895 +40 53 0 days 00:00:01.312014070 +40 54 0 days 00:00:00.710836420 +40 55 0 days 00:00:01.093586020 +40 56 0 days 00:00:00.992932673 +40 57 0 days 00:00:00.664961045 +40 58 0 days 00:00:01.248004095 +40 59 0 days 00:00:00.936308460 +40 60 0 days 00:00:00.446324720 +40 61 0 days 00:00:01.630603520 +40 62 0 days 00:00:00.589906740 +40 63 0 days 00:00:01.024682225 +40 64 0 days 00:00:01.484440525 +40 65 0 days 00:00:01.163649945 +40 66 0 days 00:00:00.581039915 +40 67 0 days 00:00:00.934682715 +40 68 0 days 00:00:00.388039180 +40 69 0 days 00:00:01.100771613 +40 70 0 days 00:00:00.788767460 +40 71 0 days 00:00:01.597674215 +40 72 0 days 00:00:00.642394155 +40 73 0 days 00:00:01.940977360 +40 74 0 days 00:00:00.516757760 +40 75 0 days 00:00:01.405096366 +40 76 0 days 00:00:00.615426290 +40 77 0 days 00:00:01.221470060 +40 78 0 days 00:00:00.697516740 +40 79 0 days 00:00:01.069814250 +40 80 0 days 00:00:01.105898435 +40 81 0 days 00:00:01.378451744 +40 82 0 days 00:00:01.386552165 +40 83 0 days 00:00:01.287501164 +40 84 0 days 00:00:01.420273131 +40 85 0 days 00:00:01.308055880 +40 86 0 days 00:00:01.299868775 +40 87 0 days 00:00:01.321962416 +40 88 0 days 00:00:01.549130970 +40 89 0 days 00:00:00.687719185 +40 90 0 days 00:00:00.347567485 +40 91 0 days 00:00:00.390063246 +40 92 0 days 00:00:01.178561020 +40 93 0 days 00:00:00.711836395 +40 94 0 days 00:00:00.938054224 +40 95 0 days 00:00:02.124260873 +40 96 0 days 00:00:00.778518375 +40 97 0 days 00:00:00.537702892 +40 98 0 days 00:00:01.526605180 +40 99 0 days 00:00:00.959547080 +40 100 0 days 00:00:00.747357000 +41 1 0 days 00:00:00.152378816 +41 2 0 days 00:00:00.319253600 +41 3 0 days 00:00:00.434248724 +41 4 0 days 00:00:00.179309785 +41 5 0 days 00:00:00.641039584 +41 6 0 days 00:00:00.220100326 +41 7 0 days 00:00:00.261013710 +41 8 0 days 00:00:00.480901550 +41 9 0 days 00:00:00.484811990 +41 10 0 days 00:00:00.578166956 +41 11 0 days 00:00:00.595723005 +41 12 0 days 00:00:00.455965935 +41 13 0 days 00:00:00.269428510 +41 14 0 days 00:00:00.442516840 +41 15 0 days 00:00:00.465482425 +41 16 0 days 00:00:00.409935157 +41 17 0 days 00:00:00.271006295 +41 18 0 days 00:00:00.710044750 +41 19 0 days 00:00:00.330061425 +41 20 0 days 00:00:00.764039346 +41 21 0 days 00:00:00.486972104 +41 22 0 days 00:00:00.406064020 +41 23 0 days 00:00:00.322314765 +41 24 0 days 00:00:00.271172470 +41 25 0 days 00:00:00.221818460 +41 26 0 days 00:00:00.712708290 +41 27 0 days 00:00:00.340299517 +41 28 0 days 00:00:00.521399660 +41 29 0 days 00:00:00.198217310 +41 30 0 days 00:00:00.221046060 +41 31 0 days 00:00:00.193483380 +41 32 0 days 00:00:00.451550120 +41 33 0 days 00:00:00.221088656 +41 34 0 days 00:00:00.228851265 +41 35 0 days 00:00:00.444784820 +41 36 0 days 00:00:00.499279440 +41 37 0 days 00:00:00.155049040 +41 38 0 days 00:00:00.483912795 +41 39 0 days 00:00:00.498255905 +41 40 0 days 00:00:00.160453500 +41 41 0 days 00:00:00.344523268 +41 42 0 days 00:00:00.678835540 +41 43 0 days 00:00:00.500053885 +41 44 0 days 00:00:00.380600564 +41 45 0 days 00:00:00.402719695 +41 46 0 days 00:00:00.530125052 +41 47 0 days 00:00:00.249637120 +41 48 0 days 00:00:00.259214380 +41 49 0 days 00:00:00.301873008 +41 50 0 days 00:00:00.202852270 +41 51 0 days 00:00:00.308902444 +41 52 0 days 00:00:00.250146790 +41 53 0 days 00:00:00.181182005 +41 54 0 days 00:00:00.471411720 +41 55 0 days 00:00:00.303351895 +41 56 0 days 00:00:00.557019560 +41 57 0 days 00:00:00.434090110 +41 58 0 days 00:00:00.411048210 +41 59 0 days 00:00:00.396955980 +41 60 0 days 00:00:00.435968875 +41 61 0 days 00:00:00.312429020 +41 62 0 days 00:00:00.589615415 +41 63 0 days 00:00:00.198680645 +41 64 0 days 00:00:00.235621900 +41 65 0 days 00:00:00.328431480 +41 66 0 days 00:00:00.208248480 +41 67 0 days 00:00:00.168749425 +41 68 0 days 00:00:00.449663270 +41 69 0 days 00:00:00.324417716 +41 70 0 days 00:00:00.123040040 +41 71 0 days 00:00:00.374539960 +41 72 0 days 00:00:00.526311628 +41 73 0 days 00:00:00.384061490 +41 74 0 days 00:00:00.310919345 +41 75 0 days 00:00:00.170921896 +41 76 0 days 00:00:00.215682395 +41 77 0 days 00:00:00.280555240 +41 78 0 days 00:00:00.491628965 +41 79 0 days 00:00:00.168764070 +41 80 0 days 00:00:00.365629232 +41 81 0 days 00:00:00.418600412 +41 82 0 days 00:00:00.386118288 +41 83 0 days 00:00:00.519484366 +41 84 0 days 00:00:00.487264136 +41 85 0 days 00:00:00.342362000 +41 86 0 days 00:00:00.477061584 +41 87 0 days 00:00:00.258756720 +41 88 0 days 00:00:00.157353975 +41 89 0 days 00:00:00.378028260 +41 90 0 days 00:00:00.212216290 +41 91 0 days 00:00:00.370730340 +41 92 0 days 00:00:00.258823015 +41 93 0 days 00:00:00.500301256 +41 94 0 days 00:00:00.172782996 +41 95 0 days 00:00:00.354215830 +41 96 0 days 00:00:00.381105790 +41 97 0 days 00:00:00.640621844 +41 98 0 days 00:00:00.479743590 +41 99 0 days 00:00:00.580228070 +41 100 0 days 00:00:00.605137936 +42 1 0 days 00:00:05.785865424 +42 2 0 days 00:00:04.105505950 +42 3 0 days 00:00:03.168170795 +42 4 0 days 00:00:05.796417382 +42 5 0 days 00:00:03.093629740 +42 6 0 days 00:00:03.389573808 +42 7 0 days 00:00:10.058321455 +42 8 0 days 00:00:09.471558230 +42 9 0 days 00:00:05.817406875 +42 10 0 days 00:00:10.494748680 +42 11 0 days 00:00:05.858018580 +42 12 0 days 00:00:06.127480347 +42 13 0 days 00:00:04.949626245 +42 14 0 days 00:00:06.031307256 +42 15 0 days 00:00:03.973443800 +42 16 0 days 00:00:10.312839726 +42 17 0 days 00:00:08.816278645 +42 18 0 days 00:00:03.854587113 +42 19 0 days 00:00:06.933633200 +42 20 0 days 00:00:09.276647856 +42 21 0 days 00:00:08.908569265 +42 22 0 days 00:00:09.193192440 +42 23 0 days 00:00:03.586560380 +42 24 0 days 00:00:04.923566705 +42 25 0 days 00:00:06.416620495 +42 26 0 days 00:00:05.993597780 +42 27 0 days 00:00:03.297486916 +42 28 0 days 00:00:04.857276965 +42 29 0 days 00:00:04.646235295 +42 30 0 days 00:00:08.109717180 +42 31 0 days 00:00:08.555824250 +42 32 0 days 00:00:05.656687645 +42 33 0 days 00:00:09.980541971 +42 34 0 days 00:00:05.603293160 +42 35 0 days 00:00:08.153032900 +42 36 0 days 00:00:03.785062896 +42 37 0 days 00:00:10.611350663 +42 38 0 days 00:00:07.161287537 +42 39 0 days 00:00:03.595016915 +42 40 0 days 00:00:05.899514374 +42 41 0 days 00:00:07.099282191 +42 42 0 days 00:00:10.504917082 +42 43 0 days 00:00:10.125839630 +42 44 0 days 00:00:11.947638525 +42 45 0 days 00:00:05.364411390 +43 1 0 days 00:00:03.850282255 +43 2 0 days 00:00:10.101833682 +43 3 0 days 00:00:05.365657728 +43 4 0 days 00:00:05.106313780 +43 5 0 days 00:00:05.724934490 +43 6 0 days 00:00:05.337385905 +43 7 0 days 00:00:05.630936810 +43 8 0 days 00:00:05.350663065 +43 9 0 days 00:00:05.352641585 +43 10 0 days 00:00:03.867377097 +43 11 0 days 00:00:05.117275910 +43 12 0 days 00:00:13.071360957 +43 13 0 days 00:00:13.160379384 +43 14 0 days 00:00:05.311220065 +43 15 0 days 00:00:09.206307930 +43 16 0 days 00:00:09.628948620 +43 17 0 days 00:00:03.570132270 +43 18 0 days 00:00:05.379636560 +43 19 0 days 00:00:03.929046380 +43 20 0 days 00:00:03.286942485 +43 21 0 days 00:00:06.095563520 +43 22 0 days 00:00:03.755986740 +43 23 0 days 00:00:03.720191030 +43 24 0 days 00:00:05.167100185 +43 25 0 days 00:00:03.324488175 +43 26 0 days 00:00:04.799122095 +43 27 0 days 00:00:03.275332345 +43 28 0 days 00:00:12.560323385 +43 29 0 days 00:00:10.105703344 +43 30 0 days 00:00:03.631790353 +43 31 0 days 00:00:09.711504660 +43 32 0 days 00:00:11.908708580 +43 33 0 days 00:00:08.795742486 +43 34 0 days 00:00:05.461771704 +43 35 0 days 00:00:04.195571248 +43 36 0 days 00:00:04.672645324 +43 37 0 days 00:00:03.383912636 +43 38 0 days 00:00:09.224823085 +43 39 0 days 00:00:06.471553748 +43 40 0 days 00:00:03.700847885 +43 41 0 days 00:00:05.426773770 +43 42 0 days 00:00:03.292258875 +43 43 0 days 00:00:05.640402990 +43 44 0 days 00:00:04.707404885 +43 45 0 days 00:00:08.958762770 +43 46 0 days 00:00:09.801035030 +43 47 0 days 00:00:10.520257980 +43 48 0 days 00:00:07.061604560 +43 49 0 days 00:00:05.714848650 +43 50 0 days 00:00:05.423409144 +43 51 0 days 00:00:09.538576212 +43 52 0 days 00:00:05.505898385 +43 53 0 days 00:00:03.845180540 +43 54 0 days 00:00:09.224305590 +43 55 0 days 00:00:09.950525420 +43 56 0 days 00:00:08.873735055 +43 57 0 days 00:00:08.986225705 +44 1 0 days 00:00:04.775227355 +44 2 0 days 00:00:04.351758375 +44 3 0 days 00:00:01.699061336 +44 4 0 days 00:00:01.678053005 +44 5 0 days 00:00:03.003472075 +44 6 0 days 00:00:02.656236415 +44 7 0 days 00:00:02.654477445 +44 8 0 days 00:00:02.026721290 +44 9 0 days 00:00:04.541900200 +44 10 0 days 00:00:02.659615190 +44 11 0 days 00:00:04.014046313 +44 12 0 days 00:00:01.910847810 +44 13 0 days 00:00:03.723301530 +44 14 0 days 00:00:03.465599105 +44 15 0 days 00:00:04.310509652 +44 16 0 days 00:00:05.012451484 +44 17 0 days 00:00:02.631599245 +44 18 0 days 00:00:05.482779640 +44 19 0 days 00:00:02.638934665 +44 20 0 days 00:00:04.360042315 +44 21 0 days 00:00:03.133482925 +44 22 0 days 00:00:02.219223780 +44 23 0 days 00:00:02.547730290 +44 24 0 days 00:00:01.736468935 +44 25 0 days 00:00:01.738359283 +44 26 0 days 00:00:02.401878560 +44 27 0 days 00:00:06.414712945 +44 28 0 days 00:00:01.986524090 +44 29 0 days 00:00:03.066026970 +44 30 0 days 00:00:03.256429451 +44 31 0 days 00:00:01.920785000 +44 32 0 days 00:00:04.361307490 +44 33 0 days 00:00:01.992353636 +44 34 0 days 00:00:02.688725365 +44 35 0 days 00:00:02.637482145 +44 36 0 days 00:00:04.302010260 +44 37 0 days 00:00:02.994687242 +44 38 0 days 00:00:03.086203070 +44 39 0 days 00:00:02.496955095 +44 40 0 days 00:00:02.023349317 +44 41 0 days 00:00:02.715541370 +44 42 0 days 00:00:04.745457790 +44 43 0 days 00:00:01.713478588 +44 44 0 days 00:00:01.774867240 +44 45 0 days 00:00:01.727154620 +44 46 0 days 00:00:06.290703968 +44 47 0 days 00:00:02.555690980 +44 48 0 days 00:00:02.648698285 +44 49 0 days 00:00:03.571853185 +44 50 0 days 00:00:02.658836960 +44 51 0 days 00:00:05.259012980 +44 52 0 days 00:00:04.565773596 +44 53 0 days 00:00:02.170379420 +44 54 0 days 00:00:01.696307260 +44 55 0 days 00:00:05.988954390 +44 56 0 days 00:00:04.291473620 +44 57 0 days 00:00:05.073180442 +44 58 0 days 00:00:05.601665615 +44 59 0 days 00:00:05.250471735 +44 60 0 days 00:00:03.035671222 +44 61 0 days 00:00:03.009862030 +44 62 0 days 00:00:01.821349642 +44 63 0 days 00:00:02.817936842 +44 64 0 days 00:00:04.370920175 +44 65 0 days 00:00:05.619380825 +44 66 0 days 00:00:04.864528280 +44 67 0 days 00:00:01.744825203 +44 68 0 days 00:00:04.654798065 +44 69 0 days 00:00:02.560524355 +44 70 0 days 00:00:02.533071045 +44 71 0 days 00:00:02.012474815 +44 72 0 days 00:00:04.366956130 +44 73 0 days 00:00:03.561188472 +44 74 0 days 00:00:05.229391900 +44 75 0 days 00:00:01.741830380 +44 76 0 days 00:00:05.269817935 +44 77 0 days 00:00:05.641671550 +44 78 0 days 00:00:04.531360440 +44 79 0 days 00:00:02.298755540 +44 80 0 days 00:00:01.659072075 +44 81 0 days 00:00:02.692480055 +44 82 0 days 00:00:01.791036065 +44 83 0 days 00:00:02.892859245 +44 84 0 days 00:00:01.694213175 +44 85 0 days 00:00:02.691425345 +44 86 0 days 00:00:06.626697530 +44 87 0 days 00:00:04.532605730 +44 88 0 days 00:00:04.534326165 +44 89 0 days 00:00:02.523984750 +44 90 0 days 00:00:03.025238173 +44 91 0 days 00:00:02.671149780 +44 92 0 days 00:00:05.249636921 +44 93 0 days 00:00:01.957637314 +44 94 0 days 00:00:01.870429635 +44 95 0 days 00:00:04.500556740 +44 96 0 days 00:00:01.690702292 +44 97 0 days 00:00:01.651353334 +44 98 0 days 00:00:02.918247440 +44 99 0 days 00:00:02.293486646 +44 100 0 days 00:00:03.202011735 +45 1 0 days 00:00:05.368162440 +45 2 0 days 00:00:02.298061985 +45 3 0 days 00:00:04.552874410 +45 4 0 days 00:00:02.754479185 +45 5 0 days 00:00:03.013298840 +45 6 0 days 00:00:04.658981070 +45 7 0 days 00:00:03.696141605 +45 8 0 days 00:00:02.756388975 +45 9 0 days 00:00:04.545222060 +45 10 0 days 00:00:03.279300580 +45 11 0 days 00:00:05.031193990 +45 12 0 days 00:00:01.821969556 +45 13 0 days 00:00:03.988308742 +45 14 0 days 00:00:06.568470205 +45 15 0 days 00:00:01.750218832 +45 16 0 days 00:00:03.488068340 +45 17 0 days 00:00:02.054942488 +45 18 0 days 00:00:03.649250060 +45 19 0 days 00:00:01.746830052 +45 20 0 days 00:00:04.905648675 +45 21 0 days 00:00:01.891950224 +45 22 0 days 00:00:01.947349062 +45 23 0 days 00:00:03.300552190 +45 24 0 days 00:00:03.061982540 +45 25 0 days 00:00:02.654084145 +45 26 0 days 00:00:03.338566835 +45 27 0 days 00:00:03.651518950 +45 28 0 days 00:00:02.666973936 +45 29 0 days 00:00:03.319708580 +45 30 0 days 00:00:02.638274350 +45 31 0 days 00:00:02.764983925 +45 32 0 days 00:00:03.284697503 +45 33 0 days 00:00:02.791432690 +45 34 0 days 00:00:03.028529033 +45 35 0 days 00:00:02.207694890 +45 36 0 days 00:00:02.622726850 +45 37 0 days 00:00:04.586809664 +45 38 0 days 00:00:02.523729630 +45 39 0 days 00:00:03.882776520 +45 40 0 days 00:00:03.092351310 +45 41 0 days 00:00:01.874276260 +45 42 0 days 00:00:01.819474736 +45 43 0 days 00:00:04.273960843 +45 44 0 days 00:00:01.801281590 +45 45 0 days 00:00:06.046131260 +45 46 0 days 00:00:04.977920084 +45 47 0 days 00:00:04.867829755 +45 48 0 days 00:00:02.910061516 +45 49 0 days 00:00:03.183196455 +45 50 0 days 00:00:05.780295515 +45 51 0 days 00:00:04.671020613 +45 52 0 days 00:00:03.186961400 +45 53 0 days 00:00:04.335793968 +45 54 0 days 00:00:02.753277535 +45 55 0 days 00:00:01.755968324 +45 56 0 days 00:00:02.691394810 +45 57 0 days 00:00:01.914899000 +45 58 0 days 00:00:02.789833120 +45 59 0 days 00:00:04.973676010 +45 60 0 days 00:00:02.556287920 +45 61 0 days 00:00:04.107387125 +45 62 0 days 00:00:04.588800905 +45 63 0 days 00:00:02.789463244 +45 64 0 days 00:00:02.634434540 +45 65 0 days 00:00:01.966692983 +45 66 0 days 00:00:03.691613095 +45 67 0 days 00:00:02.654339790 +45 68 0 days 00:00:02.723356610 +45 69 0 days 00:00:02.917717588 +45 70 0 days 00:00:02.997031508 +45 71 0 days 00:00:03.237888060 +45 72 0 days 00:00:01.808895255 +45 73 0 days 00:00:02.938898871 +45 74 0 days 00:00:01.827489120 +45 75 0 days 00:00:03.995565220 +45 76 0 days 00:00:03.510216746 +45 77 0 days 00:00:04.670474933 +45 78 0 days 00:00:04.087712233 +45 79 0 days 00:00:05.010370092 +45 80 0 days 00:00:03.913912836 +45 81 0 days 00:00:04.941533480 +45 82 0 days 00:00:02.128877424 +45 83 0 days 00:00:03.215826068 +45 84 0 days 00:00:04.849804290 +45 85 0 days 00:00:05.140137908 +45 86 0 days 00:00:01.805329763 +45 87 0 days 00:00:02.766061245 +45 88 0 days 00:00:02.035688604 +45 89 0 days 00:00:05.628463842 +45 90 0 days 00:00:02.627433550 +45 91 0 days 00:00:03.033388820 +45 92 0 days 00:00:06.250319544 +45 93 0 days 00:00:03.058808985 +45 94 0 days 00:00:02.907988550 +45 95 0 days 00:00:04.790024228 +45 96 0 days 00:00:03.908060552 +45 97 0 days 00:00:01.940351130 +45 98 0 days 00:00:02.256597745 +45 99 0 days 00:00:02.762087651 +45 100 0 days 00:00:03.047280953 +46 1 0 days 00:00:09.704872110 +46 2 0 days 00:00:12.661697660 +46 3 0 days 00:00:10.435945928 +46 4 0 days 00:00:09.948827866 +46 5 0 days 00:00:14.000862576 +46 6 0 days 00:00:15.526371446 +46 7 0 days 00:00:05.422638973 +46 8 0 days 00:00:05.588162590 +46 9 0 days 00:00:12.936783717 +46 10 0 days 00:00:08.930434473 +46 11 0 days 00:00:05.075490440 +46 12 0 days 00:00:10.729660270 +46 13 0 days 00:00:13.506984685 +46 14 0 days 00:00:03.968778380 +46 15 0 days 00:00:06.288019692 +46 16 0 days 00:00:06.913607420 +46 17 0 days 00:00:10.990289938 +46 18 0 days 00:00:05.959481773 +46 19 0 days 00:00:03.387716275 +46 20 0 days 00:00:04.633547185 +46 21 0 days 00:00:03.828721633 +46 22 0 days 00:00:12.286446646 +46 23 0 days 00:00:09.123163584 +46 24 0 days 00:00:04.400250110 +46 25 0 days 00:00:06.466691737 +46 26 0 days 00:00:04.320949218 +46 27 0 days 00:00:11.332660485 +46 28 0 days 00:00:06.451051232 +46 29 0 days 00:00:07.231945605 +46 30 0 days 00:00:12.206629228 +46 31 0 days 00:00:05.836195980 +46 32 0 days 00:00:05.388533820 +46 33 0 days 00:00:07.832170875 +46 34 0 days 00:00:04.338644956 +46 35 0 days 00:00:11.257569110 +46 36 0 days 00:00:10.438252460 +47 1 0 days 00:00:05.686121720 +47 2 0 days 00:00:05.332830476 +47 3 0 days 00:00:03.340231385 +47 4 0 days 00:00:03.323970320 +47 5 0 days 00:00:02.457952290 +47 6 0 days 00:00:02.191115150 +47 7 0 days 00:00:05.389759425 +47 8 0 days 00:00:05.625580660 +47 9 0 days 00:00:03.612613522 +47 10 0 days 00:00:06.270380985 +47 11 0 days 00:00:02.687842198 +47 12 0 days 00:00:03.073512190 +47 13 0 days 00:00:03.016593241 +47 14 0 days 00:00:01.829076950 +47 15 0 days 00:00:06.537767693 +47 16 0 days 00:00:01.797175005 +47 17 0 days 00:00:02.225203383 +47 18 0 days 00:00:02.276791405 +47 19 0 days 00:00:03.071058000 +47 20 0 days 00:00:02.716227455 +47 21 0 days 00:00:03.088632410 +47 22 0 days 00:00:01.855651845 +47 23 0 days 00:00:03.538776840 +47 24 0 days 00:00:04.728748463 +47 25 0 days 00:00:04.087499885 +47 26 0 days 00:00:04.163977830 +47 27 0 days 00:00:01.877008316 +47 28 0 days 00:00:05.093177692 +47 29 0 days 00:00:01.744873840 +47 30 0 days 00:00:03.265881520 +47 31 0 days 00:00:02.880468748 +47 32 0 days 00:00:02.017615843 +47 33 0 days 00:00:03.232275250 +47 34 0 days 00:00:04.738425485 +47 35 0 days 00:00:08.592165575 +47 36 0 days 00:00:02.106097365 +47 37 0 days 00:00:04.234076877 +47 38 0 days 00:00:04.819234045 +47 39 0 days 00:00:03.069993865 +47 40 0 days 00:00:02.105345848 +47 41 0 days 00:00:05.765081490 +47 42 0 days 00:00:05.792663695 +47 43 0 days 00:00:03.021750808 +47 44 0 days 00:00:06.009356134 +47 45 0 days 00:00:05.523271475 +47 46 0 days 00:00:03.339419990 +47 47 0 days 00:00:02.877627900 +47 48 0 days 00:00:02.385079490 +47 49 0 days 00:00:08.642308088 +47 50 0 days 00:00:01.770863195 +47 51 0 days 00:00:02.710191986 +47 52 0 days 00:00:07.257827800 +47 53 0 days 00:00:03.164021252 +47 54 0 days 00:00:03.097090003 +47 55 0 days 00:00:07.465608036 +47 56 0 days 00:00:03.480303261 +47 57 0 days 00:00:05.913987495 +47 58 0 days 00:00:03.393813792 +47 59 0 days 00:00:05.709839400 +47 60 0 days 00:00:02.878171150 +47 61 0 days 00:00:02.623453635 +47 62 0 days 00:00:04.450460546 +47 63 0 days 00:00:06.690111711 +47 64 0 days 00:00:05.224643230 +47 65 0 days 00:00:05.585923227 +47 66 0 days 00:00:04.049318645 +47 67 0 days 00:00:03.638609855 +47 68 0 days 00:00:06.007784347 +47 69 0 days 00:00:02.712281433 +48 1 0 days 00:00:09.111985680 +48 2 0 days 00:00:10.506091313 +48 3 0 days 00:00:03.616710300 +48 4 0 days 00:00:09.021103985 +48 5 0 days 00:00:04.060481424 +48 6 0 days 00:00:09.591759885 +48 7 0 days 00:00:05.363131345 +48 8 0 days 00:00:03.736981700 +48 9 0 days 00:00:09.328472850 +48 10 0 days 00:00:09.961412606 +48 11 0 days 00:00:09.904643530 +48 12 0 days 00:00:03.418268472 +48 13 0 days 00:00:03.807069685 +48 14 0 days 00:00:10.296204036 +48 15 0 days 00:00:10.703685545 +48 16 0 days 00:00:05.948936756 +48 17 0 days 00:00:12.926924760 +48 18 0 days 00:00:04.542536075 +48 19 0 days 00:00:04.826676325 +48 20 0 days 00:00:07.428557645 +48 21 0 days 00:00:03.526872112 +48 22 0 days 00:00:05.208521993 +48 23 0 days 00:00:03.818252940 +48 24 0 days 00:00:05.305884275 +48 25 0 days 00:00:12.544530843 +48 26 0 days 00:00:06.587506165 +48 27 0 days 00:00:04.043587922 +48 28 0 days 00:00:09.925056315 +48 29 0 days 00:00:09.099089980 +48 30 0 days 00:00:06.054947597 +48 31 0 days 00:00:06.266929714 +48 32 0 days 00:00:05.488781764 +48 33 0 days 00:00:09.684407725 +48 34 0 days 00:00:03.396378145 +48 35 0 days 00:00:09.088875000 +48 36 0 days 00:00:05.673849690 +48 37 0 days 00:00:09.326978760 +48 38 0 days 00:00:03.470599580 +48 39 0 days 00:00:09.383223605 +48 40 0 days 00:00:03.569707345 +48 41 0 days 00:00:09.627401160 +48 42 0 days 00:00:06.015159480 +48 43 0 days 00:00:05.359369530 +48 44 0 days 00:00:09.973224660 +48 45 0 days 00:00:04.374520655 +48 46 0 days 00:00:05.153209430 +48 47 0 days 00:00:06.072453475 +48 48 0 days 00:00:04.578937495 +48 49 0 days 00:00:09.076855860 +48 50 0 days 00:00:05.248209880 +48 51 0 days 00:00:11.785096190 +48 52 0 days 00:00:13.057049995 +49 1 0 days 00:00:04.848522845 +49 2 0 days 00:00:05.979462640 +49 3 0 days 00:00:02.566638128 +49 4 0 days 00:00:05.452791850 +49 5 0 days 00:00:02.884577290 +49 6 0 days 00:00:02.654248745 +49 7 0 days 00:00:06.021436856 +49 8 0 days 00:00:04.110100350 +49 9 0 days 00:00:02.023211584 +49 10 0 days 00:00:04.845927424 +49 11 0 days 00:00:02.917046820 +49 12 0 days 00:00:03.483069036 +49 13 0 days 00:00:03.046261300 +49 14 0 days 00:00:02.061281448 +49 15 0 days 00:00:01.917133836 +49 16 0 days 00:00:03.825157604 +49 17 0 days 00:00:02.962528248 +49 18 0 days 00:00:04.609057935 +49 19 0 days 00:00:02.637998160 +49 20 0 days 00:00:05.820344436 +49 21 0 days 00:00:03.637291424 +49 22 0 days 00:00:02.983590676 +49 23 0 days 00:00:02.067879300 +49 24 0 days 00:00:04.771222410 +49 25 0 days 00:00:02.870735104 +49 26 0 days 00:00:04.840732440 +49 27 0 days 00:00:05.565673731 +49 28 0 days 00:00:04.998611800 +49 29 0 days 00:00:04.866122356 +49 30 0 days 00:00:01.933804174 +49 31 0 days 00:00:03.187990370 +49 32 0 days 00:00:01.896140752 +49 33 0 days 00:00:01.912590606 +49 34 0 days 00:00:03.294338825 +49 35 0 days 00:00:01.678891335 +49 36 0 days 00:00:05.478525660 +49 37 0 days 00:00:02.754368960 +49 38 0 days 00:00:01.824905840 +49 39 0 days 00:00:01.889886320 +49 40 0 days 00:00:02.752190836 +49 41 0 days 00:00:04.772066620 +49 42 0 days 00:00:01.797996400 +49 43 0 days 00:00:04.559552145 +49 44 0 days 00:00:02.891948055 +49 45 0 days 00:00:01.908463543 +49 46 0 days 00:00:01.897877324 +49 47 0 days 00:00:02.440307845 +49 48 0 days 00:00:02.861135080 +49 49 0 days 00:00:04.649225010 +49 50 0 days 00:00:02.910205588 +49 51 0 days 00:00:02.898908330 +49 52 0 days 00:00:02.652286315 +49 53 0 days 00:00:04.027873630 +49 54 0 days 00:00:01.750319064 +49 55 0 days 00:00:05.404473440 +49 56 0 days 00:00:02.154559456 +49 57 0 days 00:00:02.766625551 +49 58 0 days 00:00:02.036266973 +49 59 0 days 00:00:05.054488748 +49 60 0 days 00:00:05.081456435 +49 61 0 days 00:00:04.609197765 +49 62 0 days 00:00:03.658928262 +49 63 0 days 00:00:02.851008034 +49 64 0 days 00:00:02.443875735 +49 65 0 days 00:00:05.826791836 +49 66 0 days 00:00:01.970475020 +49 67 0 days 00:00:04.769223325 +49 68 0 days 00:00:05.090699316 +49 69 0 days 00:00:01.916536690 +49 70 0 days 00:00:04.782878275 +49 71 0 days 00:00:03.234268436 +49 72 0 days 00:00:02.005164176 +49 73 0 days 00:00:02.033267840 +49 74 0 days 00:00:02.786690124 +49 75 0 days 00:00:02.971088474 +49 76 0 days 00:00:04.745129585 +49 77 0 days 00:00:06.248204293 +49 78 0 days 00:00:05.007151076 +49 79 0 days 00:00:02.870992513 +49 80 0 days 00:00:04.602943330 +49 81 0 days 00:00:04.777676795 +49 82 0 days 00:00:03.801120804 +49 83 0 days 00:00:01.952832995 +49 84 0 days 00:00:03.343109555 +49 85 0 days 00:00:05.507200960 +49 86 0 days 00:00:02.670356385 +49 87 0 days 00:00:02.893914936 +49 88 0 days 00:00:02.609082685 +49 89 0 days 00:00:05.018962868 +49 90 0 days 00:00:05.501026210 +49 91 0 days 00:00:03.900613968 +49 92 0 days 00:00:02.439500540 +49 93 0 days 00:00:02.803822244 +49 94 0 days 00:00:03.077437557 +49 95 0 days 00:00:03.128851970 +49 96 0 days 00:00:05.225049680 +49 97 0 days 00:00:02.018017775 +49 98 0 days 00:00:05.487059595 +49 99 0 days 00:00:02.703202294 +50 1 0 days 00:04:23.189886970 +51 1 0 days 00:00:36.878877850 +51 2 0 days 00:01:14.479360044 +51 3 0 days 00:02:25.655278132 +51 4 0 days 00:00:41.592144396 +51 5 0 days 00:00:39.403329468 +51 6 0 days 00:02:16.212407690 +52 1 0 days 00:04:01.110604660 +52 2 0 days 00:04:27.128734963 +53 1 0 days 00:02:15.660017725 +53 2 0 days 00:00:37.870815775 +53 3 0 days 00:01:16.650062450 +53 4 0 days 00:00:37.305540155 +53 5 0 days 00:00:47.431246084 +54 1 0 days 00:02:10.961419456 +54 2 0 days 00:01:54.108627640 +54 3 0 days 00:03:42.024746265 +55 1 0 days 00:00:37.714045845 +55 2 0 days 00:02:14.390057140 +55 3 0 days 00:00:38.117524190 +55 4 0 days 00:01:13.818952104 +55 5 0 days 00:01:10.055758100 +55 6 0 days 00:02:14.404995465 +56 1 0 days 00:02:22.648237508 +56 2 0 days 00:00:49.195437672 +56 3 0 days 00:02:00.662801442 +56 4 0 days 00:01:15.216509860 +56 5 0 days 00:00:44.043971405 +56 6 0 days 00:03:05.674352457 +56 7 0 days 00:01:48.383639775 +56 8 0 days 00:01:40.926781300 +56 9 0 days 00:02:04.313134883 +57 1 0 days 00:01:13.651983561 +57 2 0 days 00:01:01.502979092 +57 3 0 days 00:00:28.908413500 +57 4 0 days 00:00:53.550939485 +57 5 0 days 00:00:58.710840733 +57 6 0 days 00:00:17.370696989 +57 7 0 days 00:01:45.080095612 +57 8 0 days 00:00:24.485899923 +57 9 0 days 00:00:18.946109730 +57 10 0 days 00:00:58.627913383 +57 11 0 days 00:00:47.401484218 +57 12 0 days 00:00:39.312543022 +57 13 0 days 00:00:45.147257345 +57 14 0 days 00:00:45.670085900 +57 15 0 days 00:00:26.017029007 +57 16 0 days 00:00:43.617192988 +57 17 0 days 00:00:51.871582892 +58 1 0 days 00:01:44.910901371 +58 2 0 days 00:01:03.298235550 +58 3 0 days 00:00:48.249514800 +58 4 0 days 00:01:00.754705066 +58 5 0 days 00:01:29.049543166 +58 6 0 days 00:00:47.230699873 +58 7 0 days 00:01:19.895063508 +58 8 0 days 00:01:30.549050987 +58 9 0 days 00:02:02.557688942 +58 10 0 days 00:01:25.022119527 +59 1 0 days 00:00:27.998271491 +59 2 0 days 00:00:29.943211775 +59 3 0 days 00:00:25.895394023 +59 4 0 days 00:01:12.139346085 +59 5 0 days 00:00:38.333050100 +59 6 0 days 00:00:19.416951531 +59 7 0 days 00:01:06.665910823 +59 8 0 days 00:00:27.323742561 +59 9 0 days 00:00:57.497824454 +59 10 0 days 00:00:22.547162931 +59 11 0 days 00:00:36.775164750 +59 12 0 days 00:01:15.546102066 +59 13 0 days 00:01:00.053903555 +60 1 0 days 00:00:32.074133091 +60 2 0 days 00:00:43.005766618 +60 3 0 days 00:01:01.466680390 +60 4 0 days 00:00:27.108074859 +60 5 0 days 00:00:49.381404405 +60 6 0 days 00:00:41.055575841 +60 7 0 days 00:00:40.693329315 +60 8 0 days 00:00:54.876439584 +60 9 0 days 00:00:48.946744986 +60 10 0 days 00:01:25.681180147 +61 1 0 days 00:00:44.128921587 +61 2 0 days 00:00:43.771801416 +61 3 0 days 00:01:13.107524917 +61 4 0 days 00:01:26.244629288 +61 5 0 days 00:00:30.294491090 +61 6 0 days 00:01:01.884829612 +61 7 0 days 00:00:23.168233878 +61 8 0 days 00:00:52.330606978 +61 9 0 days 00:01:35.818477194 +61 10 0 days 00:00:48.603429117 +61 11 0 days 00:01:00.584698562 +62 1 0 days 00:00:59.283145094 +62 2 0 days 00:02:02.754916800 +62 3 0 days 00:00:40.386602114 +62 4 0 days 00:01:00.343611010 +62 5 0 days 00:02:10.801441344 +62 6 0 days 00:00:38.841521843 +62 7 0 days 00:01:15.672179525 +63 1 0 days 00:00:29.375702252 +63 2 0 days 00:02:15.265479215 +63 3 0 days 00:00:40.179838900 +63 4 0 days 00:01:13.330477300 +63 5 0 days 00:01:14.456874655 +63 6 0 days 00:00:43.568291740 +63 7 0 days 00:00:57.081993694 +63 8 0 days 00:00:32.003874157 +63 9 0 days 00:00:59.966829112 +63 10 0 days 00:01:15.887605705 +64 1 0 days 00:00:00.141423142 +64 2 0 days 00:00:00.249840926 +64 3 0 days 00:00:00.167829760 +64 4 0 days 00:00:00.187671370 +64 5 0 days 00:00:00.136232855 +64 6 0 days 00:00:00.170160597 +64 7 0 days 00:00:00.137609585 +64 8 0 days 00:00:00.135082580 +64 9 0 days 00:00:00.136647734 +64 10 0 days 00:00:00.163001522 +64 11 0 days 00:00:00.225888668 +64 12 0 days 00:00:00.132968102 +64 13 0 days 00:00:00.248564060 +64 14 0 days 00:00:00.200933389 +64 15 0 days 00:00:00.252010915 +64 16 0 days 00:00:00.154110823 +64 17 0 days 00:00:00.232364591 +64 18 0 days 00:00:00.239822890 +64 19 0 days 00:00:00.228208346 +64 20 0 days 00:00:00.153805208 +64 21 0 days 00:00:00.241371665 +64 22 0 days 00:00:00.174271234 +64 23 0 days 00:00:00.169936927 +64 24 0 days 00:00:00.177337254 +64 25 0 days 00:00:00.135508661 +64 26 0 days 00:00:00.175042492 +64 27 0 days 00:00:00.162714717 +64 28 0 days 00:00:00.237896948 +64 29 0 days 00:00:00.171075180 +64 30 0 days 00:00:00.171906355 +64 31 0 days 00:00:00.166350491 +64 32 0 days 00:00:00.233675880 +64 33 0 days 00:00:00.248665431 +64 34 0 days 00:00:00.169511528 +64 35 0 days 00:00:00.167565924 +64 36 0 days 00:00:00.240791850 +64 37 0 days 00:00:00.230666322 +64 38 0 days 00:00:00.141148927 +64 39 0 days 00:00:00.131195014 +64 40 0 days 00:00:00.186732934 +64 41 0 days 00:00:00.135505892 +64 42 0 days 00:00:00.238015190 +64 43 0 days 00:00:00.234506980 +64 44 0 days 00:00:00.151585472 +64 45 0 days 00:00:00.254244656 +64 46 0 days 00:00:00.136636981 +64 47 0 days 00:00:00.162190377 +64 48 0 days 00:00:00.170099788 +64 49 0 days 00:00:00.174619437 +64 50 0 days 00:00:00.241111573 +64 51 0 days 00:00:00.188642023 +64 52 0 days 00:00:00.175692881 +64 53 0 days 00:00:00.135621441 +64 54 0 days 00:00:00.175163515 +64 55 0 days 00:00:00.176600761 +64 56 0 days 00:00:00.140727975 +64 57 0 days 00:00:00.181581375 +64 58 0 days 00:00:00.175552193 +64 59 0 days 00:00:00.180296840 +64 60 0 days 00:00:00.176007654 +64 61 0 days 00:00:00.134494894 +64 62 0 days 00:00:00.173968980 +64 63 0 days 00:00:00.180959235 +64 64 0 days 00:00:00.134896951 +64 65 0 days 00:00:00.166974033 +64 66 0 days 00:00:00.181143629 +64 67 0 days 00:00:00.238235917 +64 68 0 days 00:00:00.265626853 +64 69 0 days 00:00:00.177127950 +64 70 0 days 00:00:00.232137996 +64 71 0 days 00:00:00.244568384 +64 72 0 days 00:00:00.183575282 +64 73 0 days 00:00:00.140349198 +64 74 0 days 00:00:00.140591293 +64 75 0 days 00:00:00.172813805 +64 76 0 days 00:00:00.136394064 +64 77 0 days 00:00:00.138502735 +64 78 0 days 00:00:00.239715834 +64 79 0 days 00:00:00.225864736 +64 80 0 days 00:00:00.244742985 +64 81 0 days 00:00:00.185835602 +64 82 0 days 00:00:00.234653452 +64 83 0 days 00:00:00.134421317 +64 84 0 days 00:00:00.235543470 +64 85 0 days 00:00:00.139164648 +64 86 0 days 00:00:00.236319594 +64 87 0 days 00:00:00.232289066 +64 88 0 days 00:00:00.153390726 +64 89 0 days 00:00:00.241764556 +64 90 0 days 00:00:00.130517658 +64 91 0 days 00:00:00.136968457 +64 92 0 days 00:00:00.168427536 +64 93 0 days 00:00:00.176116578 +64 94 0 days 00:00:00.253470065 +64 95 0 days 00:00:00.179596770 +64 96 0 days 00:00:00.136895701 +64 97 0 days 00:00:00.225633076 +64 98 0 days 00:00:00.237698270 +64 99 0 days 00:00:00.254646452 +64 100 0 days 00:00:00.233243356 +65 1 0 days 00:00:00.085523730 +65 2 0 days 00:00:00.139833672 +65 3 0 days 00:00:00.115094727 +65 4 0 days 00:00:00.146581162 +65 5 0 days 00:00:00.081937297 +65 6 0 days 00:00:00.128570480 +65 7 0 days 00:00:00.152878958 +65 8 0 days 00:00:00.095813012 +65 9 0 days 00:00:00.082670148 +65 10 0 days 00:00:00.080828202 +65 11 0 days 00:00:00.080834452 +65 12 0 days 00:00:00.083246284 +65 13 0 days 00:00:00.102948753 +65 14 0 days 00:00:00.088195027 +65 15 0 days 00:00:00.150571008 +65 16 0 days 00:00:00.112630875 +65 18 0 days 00:00:00.093276744 +65 19 0 days 00:00:00.107200584 +65 20 0 days 00:00:00.084711940 +65 21 0 days 00:00:00.094883814 +65 22 0 days 00:00:00.142426802 +65 23 0 days 00:00:00.083470533 +65 24 0 days 00:00:00.096789324 +65 25 0 days 00:00:00.139437820 +65 27 0 days 00:00:00.082208590 +65 28 0 days 00:00:00.139153253 +65 29 0 days 00:00:00.085432960 +65 30 0 days 00:00:00.103125336 +65 31 0 days 00:00:00.111875715 +65 32 0 days 00:00:00.096792969 +65 33 0 days 00:00:00.108939847 +65 34 0 days 00:00:00.105615246 +65 35 0 days 00:00:00.132338326 +65 36 0 days 00:00:00.105122352 +65 37 0 days 00:00:00.104547570 +65 38 0 days 00:00:00.145621575 +65 39 0 days 00:00:00.083700947 +65 40 0 days 00:00:00.103092891 +65 41 0 days 00:00:00.117010101 +65 42 0 days 00:00:00.090275061 +65 43 0 days 00:00:00.086332200 +65 44 0 days 00:00:00.148627650 +65 45 0 days 00:00:00.101310968 +65 46 0 days 00:00:00.149286825 +65 47 0 days 00:00:00.108493660 +65 48 0 days 00:00:00.102250498 +65 49 0 days 00:00:00.102153480 +65 50 0 days 00:00:00.080925086 +65 51 0 days 00:00:00.079981848 +65 52 0 days 00:00:00.083106666 +65 53 0 days 00:00:00.102575907 +65 54 0 days 00:00:00.116424026 +65 55 0 days 00:00:00.086796843 +65 56 0 days 00:00:00.147239254 +65 57 0 days 00:00:00.144111350 +65 58 0 days 00:00:00.085188614 +65 59 0 days 00:00:00.084347692 +65 60 0 days 00:00:00.084936941 +65 61 0 days 00:00:00.148709255 +65 62 0 days 00:00:00.148710262 +65 63 0 days 00:00:00.152535352 +65 64 0 days 00:00:00.147559160 +65 65 0 days 00:00:00.132485985 +65 66 0 days 00:00:00.132250127 +65 67 0 days 00:00:00.091718105 +65 68 0 days 00:00:00.113501183 +65 69 0 days 00:00:00.091562868 +65 70 0 days 00:00:00.100313396 +65 71 0 days 00:00:00.144141696 +65 72 0 days 00:00:00.082509854 +65 73 0 days 00:00:00.139881611 +65 74 0 days 00:00:00.097561150 +65 75 0 days 00:00:00.079992427 +65 76 0 days 00:00:00.135359046 +65 77 0 days 00:00:00.140677165 +65 78 0 days 00:00:00.079400255 +65 79 0 days 00:00:00.100701416 +65 80 0 days 00:00:00.081535767 +65 81 0 days 00:00:00.147378523 +65 82 0 days 00:00:00.146247254 +65 83 0 days 00:00:00.147350635 +65 84 0 days 00:00:00.109322706 +65 85 0 days 00:00:00.080986395 +65 86 0 days 00:00:00.142622203 +65 87 0 days 00:00:00.103655898 +65 88 0 days 00:00:00.101692538 +65 89 0 days 00:00:00.148910952 +65 90 0 days 00:00:00.083801613 +65 91 0 days 00:00:00.152800770 +65 92 0 days 00:00:00.104111315 +65 93 0 days 00:00:00.152942552 +65 94 0 days 00:00:00.099784987 +65 95 0 days 00:00:00.142355178 +65 96 0 days 00:00:00.081881789 +65 97 0 days 00:00:00.110708851 +65 98 0 days 00:00:00.106028588 +65 99 0 days 00:00:00.094645192 +65 100 0 days 00:00:00.105818422 +66 1 0 days 00:00:00.471780390 +66 2 0 days 00:00:03.245557611 +66 3 0 days 00:00:01.760317770 +66 4 0 days 00:00:06.320131600 +66 5 0 days 00:00:01.317817607 +66 6 0 days 00:00:04.918591700 +66 7 0 days 00:00:02.121647535 +66 8 0 days 00:00:06.495154890 +66 9 0 days 00:00:04.025089168 +66 10 0 days 00:00:01.790761370 +66 11 0 days 00:00:02.936990850 +66 12 0 days 00:00:05.172756136 +66 13 0 days 00:00:07.066389225 +66 14 0 days 00:00:01.308943180 +66 15 0 days 00:00:02.340371140 +66 16 0 days 00:00:01.424202805 +66 17 0 days 00:00:02.963674724 +66 18 0 days 00:00:03.416731305 +66 19 0 days 00:00:04.789991505 +66 20 0 days 00:00:01.225009340 +66 21 0 days 00:00:03.556540095 +66 22 0 days 00:00:04.824771420 +66 23 0 days 00:00:04.957852545 +66 24 0 days 00:00:08.100937455 +66 25 0 days 00:00:01.087160030 +66 26 0 days 00:00:06.781600200 +66 27 0 days 00:00:01.308586785 +66 29 0 days 00:00:08.763090510 +66 30 0 days 00:00:02.943256672 +66 31 0 days 00:00:04.928671895 +66 32 0 days 00:00:00.754307280 +66 33 0 days 00:00:02.587538190 +66 34 0 days 00:00:03.610736800 +66 35 0 days 00:00:01.134585311 +66 37 0 days 00:00:01.578158350 +66 38 0 days 00:00:03.538804420 +66 39 0 days 00:00:02.388461904 +66 40 0 days 00:00:03.480389415 +66 41 0 days 00:00:04.274489850 +66 42 0 days 00:00:04.752016485 +66 43 0 days 00:00:03.841950230 +66 44 0 days 00:00:06.522402310 +66 45 0 days 00:00:01.560696065 +66 47 0 days 00:00:01.340077124 +66 48 0 days 00:00:00.619290988 +66 49 0 days 00:00:03.241540370 +66 50 0 days 00:00:01.545336915 +66 51 0 days 00:00:06.808361150 +66 52 0 days 00:00:01.196352840 +66 53 0 days 00:00:05.903298400 +66 55 0 days 00:00:02.383403615 +66 56 0 days 00:00:00.652809090 +66 57 0 days 00:00:03.455433195 +66 58 0 days 00:00:02.324907305 +66 59 0 days 00:00:03.221355240 +66 60 0 days 00:00:01.976047365 +66 61 0 days 00:00:01.306272480 +66 63 0 days 00:00:03.891296620 +66 65 0 days 00:00:03.792321310 +66 66 0 days 00:00:05.796332090 +66 67 0 days 00:00:02.269311985 +66 68 0 days 00:00:02.046670760 +66 69 0 days 00:00:03.465168365 +66 70 0 days 00:00:02.033233210 +66 71 0 days 00:00:01.181035540 +66 72 0 days 00:00:01.944955060 +66 74 0 days 00:00:00.920537365 +66 75 0 days 00:00:06.389582205 +66 76 0 days 00:00:02.730595870 +66 78 0 days 00:00:03.453615616 +66 79 0 days 00:00:04.492290660 +66 80 0 days 00:00:01.865784770 +66 81 0 days 00:00:02.594880516 +66 82 0 days 00:00:01.537622775 +66 83 0 days 00:00:02.982162144 +66 84 0 days 00:00:03.371333880 +66 85 0 days 00:00:01.469209895 +66 86 0 days 00:00:02.299290956 +66 87 0 days 00:00:01.684228740 +66 88 0 days 00:00:02.329541300 +66 89 0 days 00:00:01.381952095 +66 91 0 days 00:00:07.630793390 +66 92 0 days 00:00:01.533681636 +66 93 0 days 00:00:04.862549532 +66 94 0 days 00:00:01.812596832 +66 95 0 days 00:00:03.649707893 +66 96 0 days 00:00:00.892368295 +66 97 0 days 00:00:02.001361170 +66 98 0 days 00:00:08.166712848 +66 99 0 days 00:00:02.616530735 +66 100 0 days 00:00:04.845296555 +67 1 0 days 00:00:02.143240695 +67 2 0 days 00:00:02.580150012 +67 3 0 days 00:00:00.258289268 +67 4 0 days 00:00:00.550483196 +67 5 0 days 00:00:01.338431664 +67 6 0 days 00:00:03.839492955 +67 8 0 days 00:00:01.417768532 +67 9 0 days 00:00:01.007204955 +67 10 0 days 00:00:01.388418050 +67 11 0 days 00:00:01.991791440 +67 12 0 days 00:00:00.992992158 +67 13 0 days 00:00:00.497405420 +67 14 0 days 00:00:03.580292395 +67 15 0 days 00:00:01.620463665 +67 17 0 days 00:00:01.624711180 +67 18 0 days 00:00:01.636535745 +67 19 0 days 00:00:00.768528285 +67 20 0 days 00:00:01.163035734 +67 21 0 days 00:00:04.172815160 +67 22 0 days 00:00:03.143537015 +67 23 0 days 00:00:01.146781455 +67 24 0 days 00:00:01.717847248 +67 26 0 days 00:00:02.988576975 +67 28 0 days 00:00:01.819173030 +67 29 0 days 00:00:02.022739665 +67 30 0 days 00:00:01.746817825 +67 31 0 days 00:00:02.020002735 +67 32 0 days 00:00:00.711790566 +67 33 0 days 00:00:03.159398810 +67 34 0 days 00:00:01.112666090 +67 35 0 days 00:00:00.724259020 +67 36 0 days 00:00:01.874166995 +67 37 0 days 00:00:01.657358376 +67 38 0 days 00:00:04.080317560 +67 39 0 days 00:00:00.565220920 +67 40 0 days 00:00:00.749754976 +67 42 0 days 00:00:02.376101495 +67 43 0 days 00:00:00.823642955 +67 44 0 days 00:00:01.424947063 +67 46 0 days 00:00:03.047749655 +67 47 0 days 00:00:04.668984180 +67 48 0 days 00:00:01.966914315 +67 49 0 days 00:00:00.848735345 +67 50 0 days 00:00:00.406079384 +67 51 0 days 00:00:01.361668295 +67 52 0 days 00:00:01.716763330 +67 53 0 days 00:00:00.941942390 +67 54 0 days 00:00:00.869383855 +67 55 0 days 00:00:01.169902108 +67 56 0 days 00:00:02.416116550 +67 57 0 days 00:00:01.645523275 +67 58 0 days 00:00:01.402115945 +67 59 0 days 00:00:00.840195246 +67 60 0 days 00:00:00.502308285 +67 61 0 days 00:00:00.952613260 +67 62 0 days 00:00:01.850002020 +67 63 0 days 00:00:01.354080937 +67 64 0 days 00:00:00.368191860 +67 65 0 days 00:00:01.277496732 +67 66 0 days 00:00:04.487755440 +67 67 0 days 00:00:01.910414531 +67 68 0 days 00:00:02.521325275 +67 70 0 days 00:00:00.581274120 +67 72 0 days 00:00:01.970241750 +67 74 0 days 00:00:00.340294463 +67 75 0 days 00:00:00.946496000 +67 76 0 days 00:00:01.724577325 +67 77 0 days 00:00:01.554047395 +67 78 0 days 00:00:02.415604896 +67 79 0 days 00:00:00.838817392 +67 80 0 days 00:00:01.158040615 +67 81 0 days 00:00:00.846181610 +67 82 0 days 00:00:02.184840430 +67 83 0 days 00:00:03.025206944 +67 84 0 days 00:00:00.859583672 +67 85 0 days 00:00:01.497627015 +67 86 0 days 00:00:00.827350550 +67 87 0 days 00:00:01.400243476 +67 89 0 days 00:00:00.755418950 +67 91 0 days 00:00:00.459486035 +67 92 0 days 00:00:01.478833165 +67 93 0 days 00:00:01.191167685 +67 95 0 days 00:00:01.527224468 +67 96 0 days 00:00:02.988378800 +67 97 0 days 00:00:03.345940765 +67 98 0 days 00:00:02.225672900 +67 99 0 days 00:00:04.534739875 +67 100 0 days 00:00:01.804232656 +68 1 0 days 00:00:01.545195628 +68 2 0 days 00:00:02.261095844 +68 3 0 days 00:00:03.730221615 +68 5 0 days 00:00:01.687161250 +68 6 0 days 00:00:02.200574273 +68 7 0 days 00:00:01.790325510 +68 8 0 days 00:00:00.816202200 +68 9 0 days 00:00:02.404580716 +68 11 0 days 00:00:01.929364561 +68 12 0 days 00:00:03.704775418 +68 13 0 days 00:00:07.211637868 +68 14 0 days 00:00:06.752595690 +68 15 0 days 00:00:02.582768168 +68 16 0 days 00:00:04.355801081 +68 17 0 days 00:00:02.181048653 +68 18 0 days 00:00:06.461775480 +68 19 0 days 00:00:03.993837821 +68 20 0 days 00:00:01.826512310 +68 21 0 days 00:00:03.774852113 +68 22 0 days 00:00:03.561071791 +68 23 0 days 00:00:00.978755848 +68 24 0 days 00:00:04.037243145 +68 25 0 days 00:00:06.565070163 +68 26 0 days 00:00:00.695762644 +68 27 0 days 00:00:01.943137710 +68 28 0 days 00:00:04.855860786 +68 29 0 days 00:00:02.817009808 +68 30 0 days 00:00:03.419649590 +68 31 0 days 00:00:09.193828740 +68 32 0 days 00:00:03.124845930 +68 33 0 days 00:00:03.118496291 +68 34 0 days 00:00:02.499210584 +68 35 0 days 00:00:05.691510050 +68 36 0 days 00:00:02.223372432 +68 37 0 days 00:00:05.128969030 +68 38 0 days 00:00:01.770992010 +68 39 0 days 00:00:03.973537586 +68 40 0 days 00:00:01.546064880 +68 41 0 days 00:00:00.695867546 +68 42 0 days 00:00:04.487520040 +68 43 0 days 00:00:02.761884735 +68 44 0 days 00:00:01.937574450 +68 45 0 days 00:00:02.679431712 +68 46 0 days 00:00:02.193734228 +68 47 0 days 00:00:02.660897250 +68 48 0 days 00:00:01.514709544 +68 50 0 days 00:00:01.874693450 +68 52 0 days 00:00:01.196555256 +68 53 0 days 00:00:06.170885568 +68 54 0 days 00:00:05.600715223 +68 55 0 days 00:00:11.603369936 +68 57 0 days 00:00:04.826647523 +68 58 0 days 00:00:01.996492915 +68 59 0 days 00:00:05.396599700 +68 60 0 days 00:00:08.305906330 +68 61 0 days 00:00:03.392941216 +68 63 0 days 00:00:04.994877008 +68 64 0 days 00:00:01.815626884 +68 65 0 days 00:00:01.235131807 +68 66 0 days 00:00:02.063783568 +68 67 0 days 00:00:00.737255234 +68 69 0 days 00:00:05.009017660 +68 70 0 days 00:00:04.536461980 +68 71 0 days 00:00:02.550624560 +68 72 0 days 00:00:01.486580456 +68 73 0 days 00:00:06.013199240 +68 74 0 days 00:00:04.153373756 +68 75 0 days 00:00:05.008904522 +68 76 0 days 00:00:01.366129414 +68 77 0 days 00:00:02.295359391 +68 78 0 days 00:00:01.035707506 +68 79 0 days 00:00:02.210982325 +68 80 0 days 00:00:02.603391868 +68 81 0 days 00:00:01.019678632 +68 82 0 days 00:00:01.788048830 +68 83 0 days 00:00:05.917859940 +68 84 0 days 00:00:01.015756284 +68 85 0 days 00:00:01.078701571 +68 87 0 days 00:00:01.197675975 +68 88 0 days 00:00:00.886331002 +68 89 0 days 00:00:02.220907020 +68 90 0 days 00:00:00.673768110 +68 91 0 days 00:00:02.814245783 +68 92 0 days 00:00:01.433597671 +68 93 0 days 00:00:09.964109300 +68 94 0 days 00:00:00.955234467 +68 95 0 days 00:00:03.123344930 +68 96 0 days 00:00:03.163470430 +68 97 0 days 00:00:02.176299455 +68 98 0 days 00:00:03.154701555 +68 99 0 days 00:00:01.562341480 +68 100 0 days 00:00:02.365165004 +69 1 0 days 00:00:03.018506068 +69 3 0 days 00:00:01.236733165 +69 4 0 days 00:00:00.789319564 +69 5 0 days 00:00:00.586454805 +69 6 0 days 00:00:00.719367189 +69 7 0 days 00:00:00.723140908 +69 8 0 days 00:00:00.353499420 +69 10 0 days 00:00:01.346047658 +69 11 0 days 00:00:00.504025808 +69 12 0 days 00:00:01.516601428 +69 13 0 days 00:00:00.942261120 +69 14 0 days 00:00:00.496439056 +69 15 0 days 00:00:00.715310117 +69 16 0 days 00:00:00.483165395 +69 17 0 days 00:00:02.885011112 +69 18 0 days 00:00:03.133531828 +69 19 0 days 00:00:01.632780102 +69 20 0 days 00:00:03.672548440 +69 21 0 days 00:00:06.212644292 +69 23 0 days 00:00:01.215963482 +69 24 0 days 00:00:01.885910535 +69 25 0 days 00:00:00.832061763 +69 26 0 days 00:00:01.887709585 +69 27 0 days 00:00:00.608391145 +69 28 0 days 00:00:02.210014088 +69 29 0 days 00:00:02.344921482 +69 30 0 days 00:00:02.835344057 +69 31 0 days 00:00:01.485699367 +69 32 0 days 00:00:00.249478941 +69 33 0 days 00:00:01.449020960 +69 34 0 days 00:00:00.673124266 +69 35 0 days 00:00:00.782214704 +69 36 0 days 00:00:02.083216281 +69 37 0 days 00:00:00.960695500 +69 38 0 days 00:00:01.490379382 +69 39 0 days 00:00:02.967913906 +69 41 0 days 00:00:00.764703186 +69 42 0 days 00:00:01.496157460 +69 43 0 days 00:00:00.919075051 +69 44 0 days 00:00:00.776793370 +69 45 0 days 00:00:00.758364375 +69 46 0 days 00:00:00.968556724 +69 47 0 days 00:00:01.670577517 +69 48 0 days 00:00:01.039366418 +69 49 0 days 00:00:01.043350175 +69 50 0 days 00:00:01.730133720 +69 51 0 days 00:00:01.920842617 +69 52 0 days 00:00:01.692273592 +69 53 0 days 00:00:00.606970670 +69 54 0 days 00:00:00.969125165 +69 55 0 days 00:00:03.093736472 +69 57 0 days 00:00:03.468913830 +69 58 0 days 00:00:00.462565936 +69 59 0 days 00:00:03.340471236 +69 60 0 days 00:00:00.348279763 +69 61 0 days 00:00:00.651316762 +69 62 0 days 00:00:05.935552736 +69 64 0 days 00:00:00.709610847 +69 65 0 days 00:00:01.038641476 +69 66 0 days 00:00:01.700686658 +69 67 0 days 00:00:00.871480502 +69 68 0 days 00:00:01.011300070 +69 69 0 days 00:00:01.353824481 +69 70 0 days 00:00:01.334195868 +69 72 0 days 00:00:01.391585160 +69 73 0 days 00:00:01.363901150 +69 74 0 days 00:00:02.235104690 +69 75 0 days 00:00:00.677595404 +69 77 0 days 00:00:01.201149891 +69 78 0 days 00:00:00.682693036 +69 79 0 days 00:00:01.407859941 +69 80 0 days 00:00:01.366941096 +69 81 0 days 00:00:00.424194874 +69 82 0 days 00:00:00.232193262 +69 83 0 days 00:00:01.905803520 +69 84 0 days 00:00:00.996358148 +69 85 0 days 00:00:01.298460444 +69 86 0 days 00:00:02.097102713 +69 87 0 days 00:00:01.145419160 +69 89 0 days 00:00:02.058524603 +69 90 0 days 00:00:01.540374164 +69 91 0 days 00:00:01.840767904 +69 92 0 days 00:00:01.907172926 +69 93 0 days 00:00:00.378519171 +69 94 0 days 00:00:01.823381863 +69 95 0 days 00:00:02.382068153 +69 96 0 days 00:00:01.699004549 +69 97 0 days 00:00:00.668020047 +69 98 0 days 00:00:02.125783103 +69 99 0 days 00:00:04.095054640 +69 100 0 days 00:00:01.515748201 +70 1 0 days 00:00:00.256186713 +70 2 0 days 00:00:00.174419276 +70 4 0 days 00:00:00.147980113 +70 5 0 days 00:00:00.250410649 +70 6 0 days 00:00:00.253497549 +70 8 0 days 00:00:00.241165738 +70 9 0 days 00:00:00.172900223 +70 11 0 days 00:00:00.174135467 +70 12 0 days 00:00:00.123400320 +70 13 0 days 00:00:00.157996213 +70 14 0 days 00:00:00.183225275 +70 15 0 days 00:00:00.240854650 +70 16 0 days 00:00:00.243068236 +70 17 0 days 00:00:00.132389264 +70 19 0 days 00:00:00.246936115 +70 22 0 days 00:00:00.177675656 +70 23 0 days 00:00:00.245919507 +70 25 0 days 00:00:00.242294901 +70 27 0 days 00:00:00.240968565 +70 28 0 days 00:00:00.242696404 +70 29 0 days 00:00:00.247836177 +70 30 0 days 00:00:00.139894762 +70 31 0 days 00:00:00.140447044 +70 35 0 days 00:00:00.244765250 +70 36 0 days 00:00:00.157509992 +70 37 0 days 00:00:00.246303067 +70 38 0 days 00:00:00.246837078 +70 40 0 days 00:00:00.140035188 +70 44 0 days 00:00:00.241153140 +70 46 0 days 00:00:00.248674134 +70 47 0 days 00:00:00.182541273 +70 48 0 days 00:00:00.176285132 +70 49 0 days 00:00:00.249074657 +70 52 0 days 00:00:00.137831037 +70 53 0 days 00:00:00.232576522 +70 57 0 days 00:00:00.174933174 +70 59 0 days 00:00:00.232307700 +70 60 0 days 00:00:00.142308197 +70 62 0 days 00:00:00.241283610 +70 64 0 days 00:00:00.235648820 +70 66 0 days 00:00:00.168643897 +70 68 0 days 00:00:00.238551300 +70 69 0 days 00:00:00.177953767 +70 70 0 days 00:00:00.143114970 +70 71 0 days 00:00:00.182648592 +70 72 0 days 00:00:00.146118505 +70 74 0 days 00:00:00.177290705 +70 75 0 days 00:00:00.175916241 +70 77 0 days 00:00:00.242381394 +70 78 0 days 00:00:00.243298093 +70 79 0 days 00:00:00.174633972 +70 81 0 days 00:00:00.244878480 +70 82 0 days 00:00:00.175364241 +70 83 0 days 00:00:00.250728708 +70 84 0 days 00:00:00.236391647 +70 86 0 days 00:00:00.149582862 +70 88 0 days 00:00:00.241476038 +70 89 0 days 00:00:00.244181930 +70 90 0 days 00:00:00.182763200 +70 91 0 days 00:00:00.241464671 +70 92 0 days 00:00:00.240718451 +70 93 0 days 00:00:00.169532381 +70 94 0 days 00:00:00.170046978 +70 95 0 days 00:00:00.245036907 +70 96 0 days 00:00:00.221261744 +70 97 0 days 00:00:00.320808486 +70 98 0 days 00:00:00.209675087 +71 1 0 days 00:00:00.221922264 +71 2 0 days 00:00:00.314824977 +71 3 0 days 00:00:00.264098487 +71 4 0 days 00:00:00.515141942 +71 6 0 days 00:00:00.300567322 +71 7 0 days 00:00:00.503410373 +71 9 0 days 00:00:00.186459560 +71 12 0 days 00:00:00.513445821 +71 13 0 days 00:00:00.471248809 +71 17 0 days 00:00:00.209075113 +71 19 0 days 00:00:00.314888224 +71 20 0 days 00:00:00.208687520 +71 22 0 days 00:00:00.248908285 +71 23 0 days 00:00:00.304519567 +71 24 0 days 00:00:00.509371422 +71 25 0 days 00:00:00.298160169 +71 26 0 days 00:00:00.291410795 +71 27 0 days 00:00:00.469756627 +71 28 0 days 00:00:00.501390622 +71 30 0 days 00:00:00.505238082 +71 33 0 days 00:00:00.279801745 +71 34 0 days 00:00:00.183280213 +71 35 0 days 00:00:00.299211398 +71 36 0 days 00:00:00.225090285 +71 37 0 days 00:00:00.489969795 +71 38 0 days 00:00:00.228769573 +71 40 0 days 00:00:00.502096382 +71 42 0 days 00:00:00.307989672 +71 44 0 days 00:00:00.234982230 +71 48 0 days 00:00:00.244659800 +71 50 0 days 00:00:00.238273220 +71 51 0 days 00:00:00.203491690 +71 52 0 days 00:00:00.508603062 +71 54 0 days 00:00:00.250951326 +71 55 0 days 00:00:00.474217711 +71 57 0 days 00:00:00.493095788 +71 58 0 days 00:00:00.295959478 +71 59 0 days 00:00:00.213884008 +71 60 0 days 00:00:00.491137840 +71 61 0 days 00:00:00.474095349 +71 62 0 days 00:00:00.500258322 +71 63 0 days 00:00:00.280522095 +71 64 0 days 00:00:00.510417217 +71 66 0 days 00:00:00.483742870 +71 67 0 days 00:00:00.213195277 +71 68 0 days 00:00:00.482783898 +71 69 0 days 00:00:00.456704611 +71 70 0 days 00:00:00.296873876 +71 71 0 days 00:00:00.309087890 +71 72 0 days 00:00:00.239014193 +71 75 0 days 00:00:00.507179334 +71 76 0 days 00:00:00.176376333 +71 77 0 days 00:00:00.487993853 +71 78 0 days 00:00:00.212051257 +71 79 0 days 00:00:00.480526376 +71 81 0 days 00:00:00.209056922 +71 82 0 days 00:00:00.491069002 +71 83 0 days 00:00:00.172203606 +71 84 0 days 00:00:00.487270424 +71 87 0 days 00:00:00.289672001 +71 89 0 days 00:00:00.304206921 +71 90 0 days 00:00:00.481341948 +71 91 0 days 00:00:00.187420366 +71 96 0 days 00:00:00.289050983 +71 97 0 days 00:00:00.296717309 +71 98 0 days 00:00:00.491857380 +71 99 0 days 00:00:00.514239700 +72 1 0 days 00:00:00.112171004 +72 3 0 days 00:00:00.082053715 +72 6 0 days 00:00:00.149079231 +72 7 0 days 00:00:00.144991384 +72 8 0 days 00:00:00.155231188 +72 11 0 days 00:00:00.146458504 +72 13 0 days 00:00:00.079198260 +72 14 0 days 00:00:00.146303306 +72 16 0 days 00:00:00.105868110 +72 18 0 days 00:00:00.149336118 +72 21 0 days 00:00:00.138910806 +72 23 0 days 00:00:00.091124653 +72 24 0 days 00:00:00.143623562 +72 25 0 days 00:00:00.102955745 +72 28 0 days 00:00:00.142897556 +72 30 0 days 00:00:00.143828977 +72 31 0 days 00:00:00.101782005 +72 32 0 days 00:00:00.085192629 +72 37 0 days 00:00:00.147308641 +72 40 0 days 00:00:00.148847367 +72 45 0 days 00:00:00.149966471 +72 46 0 days 00:00:00.144763192 +72 51 0 days 00:00:00.141844100 +72 52 0 days 00:00:00.099711747 +72 56 0 days 00:00:00.152619460 +72 58 0 days 00:00:00.083130380 +72 59 0 days 00:00:00.149332073 +72 60 0 days 00:00:00.149999362 +72 63 0 days 00:00:00.104603046 +72 64 0 days 00:00:00.145369712 +72 66 0 days 00:00:00.141095306 +72 67 0 days 00:00:00.147396763 +72 71 0 days 00:00:00.146205650 +72 76 0 days 00:00:00.141856338 +72 78 0 days 00:00:00.139223432 +72 81 0 days 00:00:00.082162046 +72 83 0 days 00:00:00.145368734 +72 89 0 days 00:00:00.145168106 +72 91 0 days 00:00:00.145128073 +72 93 0 days 00:00:00.105222431 +72 94 0 days 00:00:00.112731623 +72 97 0 days 00:00:00.146927409 +72 98 0 days 00:00:00.114231200 +73 2 0 days 00:00:00.125991920 +73 5 0 days 00:00:00.300570797 +73 6 0 days 00:00:00.125410197 +73 8 0 days 00:00:00.169337514 +73 9 0 days 00:00:00.286342230 +73 12 0 days 00:00:00.183347294 +73 13 0 days 00:00:00.177657809 +73 14 0 days 00:00:00.285398575 +73 17 0 days 00:00:00.122365915 +73 18 0 days 00:00:00.299747970 +73 19 0 days 00:00:00.311222045 +73 21 0 days 00:00:00.132156536 +73 23 0 days 00:00:00.279901570 +73 24 0 days 00:00:00.179944035 +73 25 0 days 00:00:00.298917610 +73 29 0 days 00:00:00.186064547 +73 30 0 days 00:00:00.162909206 +73 31 0 days 00:00:00.294567940 +73 33 0 days 00:00:00.181459160 +73 36 0 days 00:00:00.145291240 +73 38 0 days 00:00:00.299015892 +73 42 0 days 00:00:00.167348155 +73 44 0 days 00:00:00.282204548 +73 45 0 days 00:00:00.292119340 +73 46 0 days 00:00:00.124826526 +73 47 0 days 00:00:00.289163444 +73 48 0 days 00:00:00.157419333 +73 51 0 days 00:00:00.164551164 +73 52 0 days 00:00:00.159574300 +73 54 0 days 00:00:00.274003231 +73 55 0 days 00:00:00.298559666 +73 58 0 days 00:00:00.237565446 +73 60 0 days 00:00:00.171840508 +73 64 0 days 00:00:00.304024751 +73 66 0 days 00:00:00.175236037 +73 68 0 days 00:00:00.165156081 +73 70 0 days 00:00:00.128169900 +73 71 0 days 00:00:00.133128715 +73 73 0 days 00:00:00.282841949 +73 74 0 days 00:00:00.298400333 +73 75 0 days 00:00:00.300220720 +73 77 0 days 00:00:00.260081495 +73 78 0 days 00:00:00.161367410 +73 79 0 days 00:00:00.167624420 +73 80 0 days 00:00:00.165992090 +73 81 0 days 00:00:00.284891976 +73 82 0 days 00:00:00.290674908 +73 83 0 days 00:00:00.132602642 +73 86 0 days 00:00:00.172413661 +73 90 0 days 00:00:00.293843721 +73 91 0 days 00:00:00.181145951 +73 92 0 days 00:00:00.133652550 +73 93 0 days 00:00:00.298608541 +73 100 0 days 00:00:00.280352922 +74 1 0 days 00:00:04.439008522 +74 2 0 days 00:00:06.174497342 +74 4 0 days 00:00:01.844152586 +74 5 0 days 00:00:01.715450728 +74 7 0 days 00:00:03.274215446 +74 8 0 days 00:00:05.524997180 +74 10 0 days 00:00:00.383447460 +74 11 0 days 00:00:02.018465113 +74 12 0 days 00:00:02.004697275 +74 13 0 days 00:00:05.345001450 +74 14 0 days 00:00:01.536092883 +74 16 0 days 00:00:01.769629964 +74 17 0 days 00:00:01.409656965 +74 18 0 days 00:00:01.270045760 +74 19 0 days 00:00:06.275323118 +74 21 0 days 00:00:02.493825276 +74 22 0 days 00:00:11.200305770 +74 23 0 days 00:00:04.991325644 +74 24 0 days 00:00:01.484310225 +74 26 0 days 00:00:02.676365204 +74 27 0 days 00:00:01.113676355 +74 28 0 days 00:00:03.826914253 +74 29 0 days 00:00:00.995758005 +74 30 0 days 00:00:04.533473912 +74 31 0 days 00:00:01.677844333 +74 32 0 days 00:00:04.729810881 +74 33 0 days 00:00:07.060468580 +74 34 0 days 00:00:00.493951245 +74 35 0 days 00:00:05.224858481 +74 36 0 days 00:00:03.722724984 +74 37 0 days 00:00:03.184987520 +74 38 0 days 00:00:01.975645536 +74 40 0 days 00:00:03.460543308 +74 41 0 days 00:00:04.556581517 +74 42 0 days 00:00:05.245338588 +74 43 0 days 00:00:03.286842106 +74 44 0 days 00:00:01.056101176 +74 45 0 days 00:00:04.068056460 +74 46 0 days 00:00:05.885508668 +74 47 0 days 00:00:05.384770746 +74 48 0 days 00:00:05.636683305 +74 49 0 days 00:00:00.488659483 +74 50 0 days 00:00:15.789674060 +74 51 0 days 00:00:03.625701282 +74 52 0 days 00:00:03.122323722 +74 53 0 days 00:00:03.716383921 +75 1 0 days 00:00:04.096856700 +75 4 0 days 00:00:01.528514684 +75 5 0 days 00:00:01.317724648 +75 6 0 days 00:00:02.353801405 +75 8 0 days 00:00:02.743535083 +75 9 0 days 00:00:01.188892467 +75 10 0 days 00:00:00.920530971 +75 12 0 days 00:00:09.733357634 +75 13 0 days 00:00:06.445423773 +75 14 0 days 00:00:04.554720726 +75 15 0 days 00:00:03.722930606 +75 16 0 days 00:00:11.188986896 +75 17 0 days 00:00:01.107114673 +75 18 0 days 00:00:02.540675916 +75 19 0 days 00:00:00.947373188 +75 21 0 days 00:00:02.190550322 +75 22 0 days 00:00:01.159812317 +75 23 0 days 00:00:08.418702080 +75 25 0 days 00:00:04.028178637 +75 27 0 days 00:00:02.063942510 +75 28 0 days 00:00:06.069655365 +75 29 0 days 00:00:01.422186155 +75 30 0 days 00:00:04.198163365 +75 32 0 days 00:00:03.519308625 +75 34 0 days 00:00:03.103385764 +75 35 0 days 00:00:00.937275402 +75 36 0 days 00:00:00.841533687 +75 37 0 days 00:00:04.515536978 +75 39 0 days 00:00:02.836875317 +75 40 0 days 00:00:03.612629962 +75 41 0 days 00:00:01.124420117 +75 42 0 days 00:00:06.732520344 +75 43 0 days 00:00:03.891264205 +75 44 0 days 00:00:03.674715413 +75 45 0 days 00:00:02.090360296 +75 47 0 days 00:00:01.619007658 +75 49 0 days 00:00:01.713399004 +75 50 0 days 00:00:01.322180678 +75 51 0 days 00:00:07.693502251 +75 52 0 days 00:00:03.327313621 +75 53 0 days 00:00:06.793592940 +75 54 0 days 00:00:02.050248563 +75 55 0 days 00:00:15.993303448 +76 1 0 days 00:00:07.946123827 +76 2 0 days 00:00:05.485869166 +76 4 0 days 00:00:01.962291577 +76 5 0 days 00:00:00.812926118 +76 7 0 days 00:00:00.973840164 +76 8 0 days 00:00:00.732767617 +76 11 0 days 00:00:00.820495575 +76 14 0 days 00:00:01.026725937 +76 16 0 days 00:00:00.488169193 +76 17 0 days 00:00:00.842351075 +76 18 0 days 00:00:01.059611965 +76 19 0 days 00:00:00.582012686 +76 20 0 days 00:00:04.562595052 +76 21 0 days 00:00:01.889500957 +76 22 0 days 00:00:00.659205367 +76 23 0 days 00:00:01.687357013 +76 26 0 days 00:00:06.892508361 +76 29 0 days 00:00:01.784274870 +76 32 0 days 00:00:02.534957715 +76 33 0 days 00:00:02.708160831 +76 35 0 days 00:00:00.379704893 +76 36 0 days 00:00:00.413386892 +76 39 0 days 00:00:00.764833093 +76 42 0 days 00:00:03.158867234 +76 46 0 days 00:00:01.626747868 +76 48 0 days 00:00:00.719900406 +76 50 0 days 00:00:03.478541931 +76 51 0 days 00:00:02.312867424 +76 52 0 days 00:00:01.039848880 +76 53 0 days 00:00:02.323477622 +76 54 0 days 00:00:04.066636320 +76 55 0 days 00:00:01.005993016 +76 56 0 days 00:00:02.256972760 +76 58 0 days 00:00:01.149021487 +76 60 0 days 00:00:00.982128978 +76 61 0 days 00:00:02.484445444 +76 62 0 days 00:00:00.730932866 +76 63 0 days 00:00:02.126080380 +76 64 0 days 00:00:01.316630513 +76 65 0 days 00:00:02.963707592 +76 67 0 days 00:00:02.038397690 +76 68 0 days 00:00:01.163050705 +76 69 0 days 00:00:03.377197051 +77 1 0 days 00:00:00.438885697 +77 2 0 days 00:00:00.260625940 +77 3 0 days 00:00:00.946761196 +77 5 0 days 00:00:01.190612225 +77 6 0 days 00:00:04.263627587 +77 8 0 days 00:00:03.873754060 +77 11 0 days 00:00:01.021487496 +77 12 0 days 00:00:03.052034176 +77 16 0 days 00:00:02.185950202 +77 18 0 days 00:00:00.513241802 +77 19 0 days 00:00:01.932895690 +77 20 0 days 00:00:02.152583830 +77 21 0 days 00:00:00.601617035 +77 22 0 days 00:00:01.304689066 +77 23 0 days 00:00:01.957769406 +77 25 0 days 00:00:02.262776524 +77 26 0 days 00:00:00.841242901 +77 27 0 days 00:00:02.757332192 +77 28 0 days 00:00:02.002318432 +77 29 0 days 00:00:00.790387514 +77 30 0 days 00:00:00.374568968 +77 31 0 days 00:00:00.937646731 +77 32 0 days 00:00:02.020469175 +77 33 0 days 00:00:06.430288056 +77 34 0 days 00:00:00.893420402 +77 35 0 days 00:00:03.021220620 +77 41 0 days 00:00:01.473039428 +77 42 0 days 00:00:00.871932131 +77 43 0 days 00:00:01.106023605 +77 44 0 days 00:00:00.894621433 +77 45 0 days 00:00:00.929629041 +77 47 0 days 00:00:04.507709590 +77 48 0 days 00:00:02.753625783 +77 49 0 days 00:00:01.750322797 +77 50 0 days 00:00:01.350755616 +77 51 0 days 00:00:01.392653094 +77 52 0 days 00:00:02.673161705 +77 53 0 days 00:00:02.365652852 +77 54 0 days 00:00:01.296175135 +77 55 0 days 00:00:02.286248336 +77 56 0 days 00:00:02.114661128 +77 59 0 days 00:00:04.604520300 +77 60 0 days 00:00:08.231810100 +77 62 0 days 00:00:02.555660760 +77 63 0 days 00:00:00.786839944 +77 65 0 days 00:00:00.319614707 +77 66 0 days 00:00:02.679578044 +77 67 0 days 00:00:01.976433927 +77 68 0 days 00:00:01.254599713 +77 69 0 days 00:00:03.076202696 +77 70 0 days 00:00:00.595075764 +77 71 0 days 00:00:00.475257421 +78 1 0 days 00:00:01.403390374 +78 2 0 days 00:00:01.415743996 +78 3 0 days 00:00:00.889534818 +78 4 0 days 00:00:00.696482740 +78 5 0 days 00:00:00.797533986 +78 6 0 days 00:00:01.482986368 +78 7 0 days 00:00:00.872119146 +78 8 0 days 00:00:00.853391426 +78 9 0 days 00:00:01.765781145 +78 10 0 days 00:00:00.446464028 +78 11 0 days 00:00:00.894813260 +78 12 0 days 00:00:00.745931680 +78 13 0 days 00:00:01.447637740 +78 14 0 days 00:00:01.309292440 +78 15 0 days 00:00:00.760365617 +78 16 0 days 00:00:01.483326460 +78 17 0 days 00:00:01.539949108 +78 18 0 days 00:00:01.603618040 +78 19 0 days 00:00:00.957738057 +78 20 0 days 00:00:00.740454411 +78 21 0 days 00:00:01.567433310 +78 22 0 days 00:00:01.416793326 +78 23 0 days 00:00:00.439478525 +78 24 0 days 00:00:00.818664200 +78 25 0 days 00:00:01.487867444 +78 26 0 days 00:00:01.392540012 +78 27 0 days 00:00:01.284684992 +78 28 0 days 00:00:00.688522993 +78 29 0 days 00:00:00.876776191 +78 30 0 days 00:00:01.662095656 +78 31 0 days 00:00:01.372092400 +78 32 0 days 00:00:01.410391000 +78 33 0 days 00:00:01.519062940 +78 34 0 days 00:00:01.444090773 +78 35 0 days 00:00:00.987864615 +78 36 0 days 00:00:01.144033420 +78 37 0 days 00:00:00.471114758 +78 38 0 days 00:00:00.764060538 +78 39 0 days 00:00:00.485302774 +78 40 0 days 00:00:00.430954771 +78 41 0 days 00:00:01.612465875 +78 42 0 days 00:00:01.237151390 +78 43 0 days 00:00:00.818593252 +78 44 0 days 00:00:01.408376732 +78 45 0 days 00:00:00.780345480 +78 46 0 days 00:00:01.498661530 +78 47 0 days 00:00:00.780756938 +78 48 0 days 00:00:00.843695284 +78 49 0 days 00:00:01.633500700 +78 50 0 days 00:00:01.747690763 +78 51 0 days 00:00:00.793709672 +78 52 0 days 00:00:01.406680956 +78 53 0 days 00:00:01.363329780 +78 54 0 days 00:00:00.435417927 +78 55 0 days 00:00:00.779287175 +78 56 0 days 00:00:01.811247225 +78 57 0 days 00:00:01.459303542 +78 58 0 days 00:00:00.811881530 +78 59 0 days 00:00:00.779279390 +78 60 0 days 00:00:01.564143428 +78 61 0 days 00:00:01.880343583 +78 62 0 days 00:00:00.460289000 +78 63 0 days 00:00:01.550762492 +78 64 0 days 00:00:01.872913126 +78 65 0 days 00:00:01.491069651 +78 66 0 days 00:00:00.867812915 +78 67 0 days 00:00:01.439326923 +78 68 0 days 00:00:00.841369431 +78 69 0 days 00:00:01.544812068 +78 70 0 days 00:00:00.904988014 +78 71 0 days 00:00:00.468351382 +78 72 0 days 00:00:01.591395383 +78 73 0 days 00:00:00.698524008 +78 74 0 days 00:00:01.445446826 +78 75 0 days 00:00:00.900692994 +78 76 0 days 00:00:00.885822260 +78 77 0 days 00:00:01.423105612 +78 78 0 days 00:00:01.364081600 +78 79 0 days 00:00:00.452049715 +78 80 0 days 00:00:01.488046784 +78 81 0 days 00:00:01.669902496 +78 82 0 days 00:00:01.565648470 +78 83 0 days 00:00:01.341649495 +78 84 0 days 00:00:00.835943475 +78 85 0 days 00:00:01.457826653 +78 86 0 days 00:00:00.520746657 +78 87 0 days 00:00:01.078007273 +78 88 0 days 00:00:01.454363174 +78 89 0 days 00:00:01.304216910 +78 90 0 days 00:00:01.319494165 +78 91 0 days 00:00:00.786257148 +78 92 0 days 00:00:00.700406484 +78 93 0 days 00:00:01.360873708 +78 94 0 days 00:00:01.450452263 +78 95 0 days 00:00:01.695510585 +78 96 0 days 00:00:00.851333024 +78 97 0 days 00:00:00.475160986 +78 98 0 days 00:00:01.448519973 +78 99 0 days 00:00:00.463274634 +78 100 0 days 00:00:01.001568060 +79 1 0 days 00:00:01.146653513 +79 2 0 days 00:00:01.495293788 +79 3 0 days 00:00:00.801475648 +79 4 0 days 00:00:00.893430163 +79 5 0 days 00:00:01.699683226 +79 6 0 days 00:00:00.913500051 +79 7 0 days 00:00:00.744112485 +79 8 0 days 00:00:01.398555980 +79 9 0 days 00:00:00.449319900 +79 10 0 days 00:00:00.870307320 +79 11 0 days 00:00:01.501216420 +79 12 0 days 00:00:00.863554851 +79 13 0 days 00:00:00.588370480 +79 14 0 days 00:00:01.399592960 +79 15 0 days 00:00:00.720571064 +79 16 0 days 00:00:01.790360972 +79 17 0 days 00:00:00.709333365 +79 18 0 days 00:00:00.894404913 +79 19 0 days 00:00:00.822676603 +79 20 0 days 00:00:01.337697297 +79 21 0 days 00:00:01.434667664 +79 22 0 days 00:00:01.583589506 +79 23 0 days 00:00:00.790511904 +79 24 0 days 00:00:01.469487982 +79 25 0 days 00:00:00.878182164 +79 26 0 days 00:00:01.510101222 +79 27 0 days 00:00:00.856472477 +79 28 0 days 00:00:01.411438280 +79 29 0 days 00:00:00.869071090 +79 30 0 days 00:00:00.745847716 +79 31 0 days 00:00:00.676708795 +79 32 0 days 00:00:00.716123790 +79 33 0 days 00:00:00.780427304 +79 34 0 days 00:00:01.574418335 +79 35 0 days 00:00:00.843866348 +79 36 0 days 00:00:00.715157458 +79 37 0 days 00:00:01.424959506 +79 38 0 days 00:00:01.356903645 +79 39 0 days 00:00:00.949391727 +79 40 0 days 00:00:01.451396170 +79 41 0 days 00:00:01.421050494 +79 42 0 days 00:00:00.953368481 +79 43 0 days 00:00:00.584819146 +79 44 0 days 00:00:01.049850974 +79 45 0 days 00:00:00.734865906 +79 46 0 days 00:00:01.489573060 +79 47 0 days 00:00:00.474632762 +79 48 0 days 00:00:01.514145047 +79 50 0 days 00:00:01.519573560 +79 51 0 days 00:00:01.736895717 +79 52 0 days 00:00:01.324435810 +79 53 0 days 00:00:00.435297571 +79 54 0 days 00:00:01.019999385 +79 55 0 days 00:00:01.351410733 +79 56 0 days 00:00:01.306072355 +79 57 0 days 00:00:01.583408064 +79 58 0 days 00:00:00.813606980 +79 59 0 days 00:00:01.616526196 +79 60 0 days 00:00:01.548129076 +79 61 0 days 00:00:01.482921462 +79 62 0 days 00:00:00.811500811 +79 63 0 days 00:00:00.802795697 +79 64 0 days 00:00:00.640826946 +79 65 0 days 00:00:01.400289412 +79 66 0 days 00:00:00.999648700 +79 67 0 days 00:00:01.703031392 +79 68 0 days 00:00:01.611638169 +79 69 0 days 00:00:01.414021676 +79 70 0 days 00:00:01.601673771 +79 71 0 days 00:00:00.503744488 +79 72 0 days 00:00:00.810865520 +79 73 0 days 00:00:00.878102980 +79 74 0 days 00:00:00.847331025 +79 75 0 days 00:00:01.531926644 +79 76 0 days 00:00:01.377610764 +79 77 0 days 00:00:00.820740393 +79 78 0 days 00:00:00.543467525 +79 79 0 days 00:00:01.415039540 +79 80 0 days 00:00:00.528729428 +79 81 0 days 00:00:00.687030900 +79 82 0 days 00:00:00.708598064 +79 83 0 days 00:00:00.816846480 +79 84 0 days 00:00:01.442656942 +79 85 0 days 00:00:00.425972885 +79 86 0 days 00:00:01.381582755 +79 87 0 days 00:00:00.782385393 +79 88 0 days 00:00:00.833829133 +79 89 0 days 00:00:00.955975307 +79 90 0 days 00:00:01.765426020 +79 91 0 days 00:00:01.681837105 +79 92 0 days 00:00:01.052329222 +79 93 0 days 00:00:00.484393450 +79 94 0 days 00:00:01.318866920 +79 95 0 days 00:00:00.796431852 +79 96 0 days 00:00:01.500296970 +79 97 0 days 00:00:00.829836322 +79 98 0 days 00:00:00.719617204 +79 99 0 days 00:00:01.118112648 +79 100 0 days 00:00:00.879429670 +80 1 0 days 00:00:01.329974632 +80 2 0 days 00:00:01.405631832 +80 3 0 days 00:00:00.436058123 +80 4 0 days 00:00:01.110132092 +80 5 0 days 00:00:00.420252172 +80 6 0 days 00:00:00.972116486 +80 7 0 days 00:00:00.990540930 +80 8 0 days 00:00:01.047141385 +80 9 0 days 00:00:00.491064920 +80 10 0 days 00:00:00.573198110 +80 11 0 days 00:00:01.284371550 +80 12 0 days 00:00:00.585203018 +80 13 0 days 00:00:00.986825910 +80 14 0 days 00:00:01.068480280 +80 15 0 days 00:00:00.820205225 +80 16 0 days 00:00:01.139626235 +80 17 0 days 00:00:00.678797143 +80 18 0 days 00:00:00.778335653 +80 19 0 days 00:00:00.571902540 +80 20 0 days 00:00:01.274767200 +80 21 0 days 00:00:01.033195736 +80 22 0 days 00:00:00.294112703 +80 23 0 days 00:00:00.552252272 +80 24 0 days 00:00:01.163900342 +80 25 0 days 00:00:00.505432943 +80 26 0 days 00:00:01.049797916 +80 27 0 days 00:00:01.398866774 +80 28 0 days 00:00:00.941433695 +80 29 0 days 00:00:00.644424212 +80 30 0 days 00:00:00.816853956 +80 31 0 days 00:00:00.822050877 +80 32 0 days 00:00:00.640550784 +80 33 0 days 00:00:01.545776073 +80 34 0 days 00:00:00.617550131 +80 35 0 days 00:00:00.741892522 +80 36 0 days 00:00:00.663764344 +80 37 0 days 00:00:01.431432191 +80 38 0 days 00:00:00.542372051 +80 39 0 days 00:00:01.594663827 +80 40 0 days 00:00:00.541910458 +80 41 0 days 00:00:00.502972180 +80 42 0 days 00:00:01.434764860 +80 43 0 days 00:00:00.903207431 +80 44 0 days 00:00:00.541780892 +80 45 0 days 00:00:00.400142203 +80 46 0 days 00:00:00.733196798 +80 47 0 days 00:00:00.956686764 +80 48 0 days 00:00:01.159095815 +80 49 0 days 00:00:00.550777900 +80 50 0 days 00:00:00.428987690 +80 51 0 days 00:00:00.882402785 +80 52 0 days 00:00:00.974791738 +80 53 0 days 00:00:00.629205550 +80 54 0 days 00:00:00.595709962 +80 55 0 days 00:00:01.270896388 +80 56 0 days 00:00:00.549683215 +80 57 0 days 00:00:00.743679168 +80 58 0 days 00:00:01.608979955 +80 59 0 days 00:00:00.359338578 +80 60 0 days 00:00:01.051172362 +80 61 0 days 00:00:01.281848231 +80 62 0 days 00:00:01.026478405 +80 63 0 days 00:00:00.470798920 +80 64 0 days 00:00:00.945647650 +80 65 0 days 00:00:01.181678425 +80 66 0 days 00:00:00.379181497 +80 67 0 days 00:00:00.383340576 +80 68 0 days 00:00:00.816882310 +80 69 0 days 00:00:01.090573280 +80 70 0 days 00:00:00.363117280 +80 71 0 days 00:00:00.669840811 +80 72 0 days 00:00:00.500254942 +80 73 0 days 00:00:00.754211676 +80 74 0 days 00:00:00.924767122 +80 75 0 days 00:00:00.644249111 +80 76 0 days 00:00:00.730865545 +80 77 0 days 00:00:00.476412551 +80 78 0 days 00:00:01.307238388 +80 79 0 days 00:00:01.386762722 +80 80 0 days 00:00:00.605731676 +80 81 0 days 00:00:00.440332604 +80 82 0 days 00:00:00.657583178 +80 83 0 days 00:00:00.623020152 +80 84 0 days 00:00:01.225811716 +80 85 0 days 00:00:00.941864505 +80 86 0 days 00:00:01.801765106 +80 87 0 days 00:00:00.455690413 +80 88 0 days 00:00:00.737442596 +80 89 0 days 00:00:00.622681588 +80 90 0 days 00:00:00.334815114 +80 91 0 days 00:00:00.936212857 +80 92 0 days 00:00:01.713867397 +80 93 0 days 00:00:00.906705228 +80 94 0 days 00:00:01.488672145 +80 95 0 days 00:00:00.550351795 +80 96 0 days 00:00:00.757634363 +80 97 0 days 00:00:00.836521983 +80 98 0 days 00:00:00.427515968 +80 99 0 days 00:00:00.840772742 +80 100 0 days 00:00:01.055297526 +81 1 0 days 00:00:01.533523580 +81 2 0 days 00:00:00.545807033 +81 3 0 days 00:00:01.604064945 +81 4 0 days 00:00:01.102187031 +81 5 0 days 00:00:01.142616976 +81 6 0 days 00:00:00.677704400 +81 7 0 days 00:00:01.510501350 +81 8 0 days 00:00:00.427599962 +81 9 0 days 00:00:00.825641745 +81 10 0 days 00:00:00.919401896 +81 11 0 days 00:00:00.374977801 +81 12 0 days 00:00:01.422825377 +81 13 0 days 00:00:00.752532900 +81 15 0 days 00:00:00.565080223 +81 16 0 days 00:00:00.956955541 +81 17 0 days 00:00:01.305667155 +81 18 0 days 00:00:01.266884212 +81 19 0 days 00:00:01.287540496 +81 20 0 days 00:00:00.907370285 +81 21 0 days 00:00:00.825833930 +81 22 0 days 00:00:00.309607762 +81 23 0 days 00:00:00.961704393 +81 24 0 days 00:00:00.441103791 +81 25 0 days 00:00:00.837320326 +81 26 0 days 00:00:00.600053009 +81 27 0 days 00:00:00.876157170 +81 28 0 days 00:00:01.182491058 +81 29 0 days 00:00:01.150242056 +81 30 0 days 00:00:00.434081980 +81 31 0 days 00:00:01.459321415 +81 32 0 days 00:00:00.656023294 +81 33 0 days 00:00:01.569988425 +81 34 0 days 00:00:00.628317046 +81 35 0 days 00:00:00.961731135 +81 36 0 days 00:00:00.374066434 +81 37 0 days 00:00:00.714239192 +81 38 0 days 00:00:01.163135505 +81 39 0 days 00:00:00.833470340 +81 40 0 days 00:00:01.026695570 +81 41 0 days 00:00:01.106677522 +81 42 0 days 00:00:00.952688096 +81 43 0 days 00:00:00.602927788 +81 44 0 days 00:00:01.429955943 +81 45 0 days 00:00:01.099930140 +81 46 0 days 00:00:01.628330865 +81 47 0 days 00:00:00.478692712 +81 48 0 days 00:00:01.373463456 +81 49 0 days 00:00:01.138413097 +81 50 0 days 00:00:01.332087064 +81 51 0 days 00:00:00.612449686 +81 52 0 days 00:00:01.160472165 +81 53 0 days 00:00:00.836894130 +81 54 0 days 00:00:01.244870302 +81 55 0 days 00:00:00.992233935 +81 56 0 days 00:00:00.633494956 +81 57 0 days 00:00:01.053083140 +81 58 0 days 00:00:01.217923335 +81 59 0 days 00:00:00.898882360 +81 60 0 days 00:00:01.111327993 +81 61 0 days 00:00:01.037795022 +81 62 0 days 00:00:01.631007628 +81 63 0 days 00:00:00.988178355 +81 64 0 days 00:00:00.617788660 +81 65 0 days 00:00:01.363897614 +81 66 0 days 00:00:00.719819576 +81 67 0 days 00:00:01.249990980 +81 68 0 days 00:00:00.545750828 +81 69 0 days 00:00:00.596034632 +81 70 0 days 00:00:01.263873380 +81 71 0 days 00:00:01.376796503 +81 72 0 days 00:00:00.321924097 +81 73 0 days 00:00:00.807027425 +81 74 0 days 00:00:01.127237670 +81 75 0 days 00:00:00.630809896 +81 76 0 days 00:00:01.471158040 +81 77 0 days 00:00:01.269197454 +81 78 0 days 00:00:01.412519635 +81 79 0 days 00:00:01.048088370 +81 80 0 days 00:00:01.172035448 +81 81 0 days 00:00:00.586010801 +81 82 0 days 00:00:01.527541296 +81 83 0 days 00:00:00.872704255 +81 84 0 days 00:00:00.943423780 +81 85 0 days 00:00:01.152914530 +81 86 0 days 00:00:01.110832718 +81 87 0 days 00:00:00.423141202 +81 88 0 days 00:00:01.342175923 +81 89 0 days 00:00:00.865732792 +81 90 0 days 00:00:00.918573448 +81 91 0 days 00:00:01.125942110 +81 92 0 days 00:00:01.397917543 +81 93 0 days 00:00:01.193732150 +81 94 0 days 00:00:00.311193106 +81 95 0 days 00:00:00.810421576 +81 96 0 days 00:00:01.381918831 +81 97 0 days 00:00:00.383510228 +81 98 0 days 00:00:00.394349628 +81 99 0 days 00:00:00.812167797 +81 100 0 days 00:00:01.069895952 +82 2 0 days 00:00:03.894036860 +82 3 0 days 00:00:08.560342840 +82 4 0 days 00:00:01.026309365 +82 5 0 days 00:00:01.893872470 +82 6 0 days 00:00:05.588880390 +82 7 0 days 00:00:07.289538265 +82 8 0 days 00:00:03.494497305 +82 9 0 days 00:00:01.341939575 +82 10 0 days 00:00:01.214870140 +82 11 0 days 00:00:08.631752645 +82 12 0 days 00:00:09.808243095 +82 13 0 days 00:00:10.683240910 +82 14 0 days 00:00:02.252265000 +82 15 0 days 00:00:02.478042430 +82 16 0 days 00:00:01.612160285 +82 17 0 days 00:00:03.814918520 +82 18 0 days 00:00:02.481836705 +82 19 0 days 00:00:11.134010795 +82 20 0 days 00:00:05.752603665 +82 21 0 days 00:00:04.591049535 +82 22 0 days 00:00:07.640414250 +82 23 0 days 00:00:05.118613786 +82 24 0 days 00:00:05.620339290 +82 25 0 days 00:00:07.268751753 +82 26 0 days 00:00:11.610430013 +82 27 0 days 00:00:01.609126005 +82 28 0 days 00:00:03.302591180 +82 29 0 days 00:00:15.714774073 +82 30 0 days 00:00:05.215212210 +82 31 0 days 00:00:01.960897620 +82 32 0 days 00:00:01.680846730 +82 33 0 days 00:00:02.577732366 +82 34 0 days 00:00:04.543962925 +82 36 0 days 00:00:07.842818126 +82 37 0 days 00:00:10.105865115 +82 38 0 days 00:00:14.845148225 +82 39 0 days 00:00:01.106733050 +82 40 0 days 00:00:01.029200180 +82 41 0 days 00:00:20.957554610 +82 42 0 days 00:00:01.614181790 +82 43 0 days 00:00:04.861483685 +82 44 0 days 00:00:12.479807685 +82 45 0 days 00:00:02.342001613 +82 46 0 days 00:00:05.501123750 +82 47 0 days 00:00:02.931140005 +82 48 0 days 00:00:04.591957346 +82 49 0 days 00:00:03.628466266 +82 50 0 days 00:00:01.488086500 +82 51 0 days 00:00:03.392655120 +82 52 0 days 00:00:03.084300755 +82 53 0 days 00:00:00.953300805 +82 54 0 days 00:00:05.849382130 +82 55 0 days 00:00:01.034306315 +82 56 0 days 00:00:01.172122600 +82 57 0 days 00:00:02.936815244 +82 58 0 days 00:00:02.342351565 +82 59 0 days 00:00:01.812867000 +82 60 0 days 00:00:01.277648510 +82 61 0 days 00:00:00.795186455 +82 62 0 days 00:00:02.665318866 +82 63 0 days 00:00:08.970251593 +82 64 0 days 00:00:04.836623355 +82 65 0 days 00:00:01.246158226 +82 66 0 days 00:00:03.496333248 +82 67 0 days 00:00:05.868849720 +82 68 0 days 00:00:02.386272695 +82 69 0 days 00:00:02.930842795 +82 70 0 days 00:00:03.367079605 +82 71 0 days 00:00:17.196434815 +82 72 0 days 00:00:06.690701060 +82 73 0 days 00:00:04.388329905 +82 74 0 days 00:00:01.162293616 +82 75 0 days 00:00:01.507821725 +82 76 0 days 00:00:03.208984695 +82 77 0 days 00:00:09.314942780 +82 78 0 days 00:00:11.153040305 +82 79 0 days 00:00:01.632863240 +82 80 0 days 00:00:02.371615860 +82 81 0 days 00:00:04.469855315 +82 82 0 days 00:00:02.298553170 +82 83 0 days 00:00:01.288835120 +82 84 0 days 00:00:09.024382086 +82 85 0 days 00:00:09.008407615 +82 86 0 days 00:00:10.032600460 +82 87 0 days 00:00:03.728990660 +82 88 0 days 00:00:02.330228208 +82 89 0 days 00:00:09.085404905 +82 90 0 days 00:00:01.484635200 +82 91 0 days 00:00:04.567707653 +83 1 0 days 00:00:02.433957960 +83 2 0 days 00:00:03.332400785 +83 3 0 days 00:00:04.020502295 +83 4 0 days 00:00:02.057701828 +83 5 0 days 00:00:00.635741431 +83 6 0 days 00:00:01.103691645 +83 7 0 days 00:00:03.965129595 +83 8 0 days 00:00:03.101158220 +83 9 0 days 00:00:08.904743680 +83 10 0 days 00:00:02.789715045 +83 11 0 days 00:00:01.008806396 +83 12 0 days 00:00:01.275104105 +83 13 0 days 00:00:04.446835940 +83 14 0 days 00:00:03.455159052 +83 15 0 days 00:00:03.563295990 +83 16 0 days 00:00:01.683607960 +83 17 0 days 00:00:00.390471720 +83 18 0 days 00:00:02.652137184 +83 20 0 days 00:00:01.589013986 +83 21 0 days 00:00:01.039785765 +83 22 0 days 00:00:01.738899005 +83 23 0 days 00:00:04.855703760 +83 24 0 days 00:00:01.368865045 +83 25 0 days 00:00:01.890338065 +83 26 0 days 00:00:03.724868200 +83 27 0 days 00:00:01.782229720 +83 29 0 days 00:00:00.610528805 +83 30 0 days 00:00:01.337664595 +83 31 0 days 00:00:01.714271205 +83 33 0 days 00:00:02.685140910 +83 34 0 days 00:00:01.915395955 +83 35 0 days 00:00:02.611641565 +83 36 0 days 00:00:02.473596276 +83 37 0 days 00:00:00.599638175 +83 38 0 days 00:00:01.630460580 +83 39 0 days 00:00:00.673150080 +83 40 0 days 00:00:01.352553740 +83 41 0 days 00:00:00.363810960 +83 42 0 days 00:00:01.184236895 +83 43 0 days 00:00:02.859795387 +83 44 0 days 00:00:10.970396020 +83 45 0 days 00:00:00.322378115 +83 46 0 days 00:00:00.641710390 +83 47 0 days 00:00:06.453759515 +83 48 0 days 00:00:05.208946980 +83 49 0 days 00:00:01.576551554 +83 50 0 days 00:00:00.607313380 +83 51 0 days 00:00:02.408280830 +83 52 0 days 00:00:01.564882831 +83 53 0 days 00:00:02.655290610 +83 54 0 days 00:00:01.196899860 +83 55 0 days 00:00:02.140991965 +83 56 0 days 00:00:01.501401085 +83 57 0 days 00:00:01.306967780 +83 58 0 days 00:00:02.093535996 +83 59 0 days 00:00:00.663598265 +83 60 0 days 00:00:04.528332035 +83 61 0 days 00:00:03.171388165 +83 62 0 days 00:00:01.950075325 +83 63 0 days 00:00:05.509318510 +83 64 0 days 00:00:01.411858535 +83 65 0 days 00:00:01.718667728 +83 66 0 days 00:00:00.870223240 +83 67 0 days 00:00:00.894204255 +83 68 0 days 00:00:00.614798060 +83 69 0 days 00:00:04.079645035 +83 70 0 days 00:00:02.355208045 +83 71 0 days 00:00:02.251339445 +83 72 0 days 00:00:01.911653345 +83 73 0 days 00:00:04.733531208 +83 74 0 days 00:00:01.573205620 +83 75 0 days 00:00:01.812798635 +83 76 0 days 00:00:00.371718930 +83 77 0 days 00:00:00.704007016 +83 78 0 days 00:00:01.102910065 +83 79 0 days 00:00:03.346015883 +83 80 0 days 00:00:03.446284820 +83 81 0 days 00:00:04.387822265 +83 82 0 days 00:00:04.190676125 +83 83 0 days 00:00:06.902505870 +83 84 0 days 00:00:02.991874635 +83 85 0 days 00:00:02.187839460 +83 86 0 days 00:00:01.727634420 +83 87 0 days 00:00:01.190248056 +83 88 0 days 00:00:01.019812530 +83 89 0 days 00:00:05.168691866 +83 90 0 days 00:00:02.309198520 +83 91 0 days 00:00:03.027275100 +83 92 0 days 00:00:03.094722680 +83 93 0 days 00:00:00.643145476 +83 94 0 days 00:00:04.604496540 +83 96 0 days 00:00:02.660345000 +83 97 0 days 00:00:02.899298300 +83 98 0 days 00:00:01.631941030 +83 99 0 days 00:00:00.917232945 +83 100 0 days 00:00:07.834277485 +84 1 0 days 00:00:07.249233026 +84 2 0 days 00:00:04.209400300 +84 3 0 days 00:00:09.486128155 +84 4 0 days 00:00:09.872101110 +84 5 0 days 00:00:03.911650352 +84 6 0 days 00:00:01.208280160 +84 7 0 days 00:00:07.716969526 +84 8 0 days 00:00:04.130670513 +84 9 0 days 00:00:13.623123065 +84 10 0 days 00:00:06.055173210 +84 11 0 days 00:00:06.382983005 +84 12 0 days 00:00:00.729199505 +84 13 0 days 00:00:03.534498465 +84 14 0 days 00:00:00.893149290 +84 15 0 days 00:00:06.063882012 +84 16 0 days 00:00:01.806277195 +84 17 0 days 00:00:01.950599076 +84 18 0 days 00:00:08.702642140 +84 19 0 days 00:00:02.348426640 +84 20 0 days 00:00:09.669725688 +84 21 0 days 00:00:03.445474765 +84 22 0 days 00:00:03.655238335 +84 23 0 days 00:00:01.683182365 +84 24 0 days 00:00:16.082401444 +84 25 0 days 00:00:07.400695053 +84 26 0 days 00:00:03.302643405 +84 27 0 days 00:00:05.718035616 +84 28 0 days 00:00:02.339709710 +84 29 0 days 00:00:01.437637536 +84 30 0 days 00:00:04.994049133 +84 31 0 days 00:00:02.150363895 +84 32 0 days 00:00:00.616152120 +84 33 0 days 00:00:03.185362135 +84 34 0 days 00:00:04.916011590 +84 35 0 days 00:00:05.749768240 +84 36 0 days 00:00:04.050075206 +84 37 0 days 00:00:08.845347625 +84 38 0 days 00:00:02.975256855 +84 39 0 days 00:00:03.514390386 +84 40 0 days 00:00:01.721304280 +84 41 0 days 00:00:02.107539131 +84 42 0 days 00:00:15.057181340 +84 43 0 days 00:00:03.179638070 +84 44 0 days 00:00:06.234523135 +84 45 0 days 00:00:03.339037180 +84 46 0 days 00:00:03.721887065 +84 47 0 days 00:00:02.341438950 +84 48 0 days 00:00:03.260047975 +84 49 0 days 00:00:04.954849296 +84 50 0 days 00:00:13.788558490 +84 51 0 days 00:00:13.946866305 +84 52 0 days 00:00:02.332012015 +84 53 0 days 00:00:09.932290160 +84 54 0 days 00:00:02.858975426 +84 55 0 days 00:00:05.788887110 +84 56 0 days 00:00:08.242998832 +84 57 0 days 00:00:01.654365680 +84 58 0 days 00:00:14.576329865 +84 59 0 days 00:00:00.999819390 +84 60 0 days 00:00:03.271980760 +84 61 0 days 00:00:01.958470825 +84 62 0 days 00:00:06.083137005 +84 63 0 days 00:00:10.885873380 +84 64 0 days 00:00:06.518925150 +84 65 0 days 00:00:08.128478700 +84 66 0 days 00:00:02.847761825 +84 67 0 days 00:00:21.470705485 +84 68 0 days 00:00:05.512320920 +84 69 0 days 00:00:07.473153960 +84 70 0 days 00:00:02.960042015 +84 71 0 days 00:00:09.360550724 +84 72 0 days 00:00:08.468238955 +85 1 0 days 00:00:02.783052316 +85 2 0 days 00:00:03.211929204 +85 3 0 days 00:00:01.110209455 +85 4 0 days 00:00:03.576566935 +85 5 0 days 00:00:01.085457635 +85 6 0 days 00:00:03.380490337 +85 7 0 days 00:00:05.531606800 +85 8 0 days 00:00:08.120365380 +85 9 0 days 00:00:02.021550360 +85 10 0 days 00:00:02.382437680 +85 12 0 days 00:00:00.408953350 +85 13 0 days 00:00:00.292446840 +85 15 0 days 00:00:03.910466755 +85 16 0 days 00:00:00.737789763 +85 17 0 days 00:00:03.631308104 +85 18 0 days 00:00:01.035506107 +85 19 0 days 00:00:04.524228795 +85 20 0 days 00:00:03.779829415 +85 21 0 days 00:00:04.869876463 +85 22 0 days 00:00:01.740164252 +85 23 0 days 00:00:02.423343285 +85 24 0 days 00:00:03.340951075 +85 25 0 days 00:00:03.579688060 +85 26 0 days 00:00:03.247942450 +85 27 0 days 00:00:00.733857595 +85 28 0 days 00:00:05.250036355 +85 29 0 days 00:00:04.497029235 +85 30 0 days 00:00:00.855300700 +85 31 0 days 00:00:01.246568532 +85 32 0 days 00:00:05.240938724 +85 33 0 days 00:00:00.886469300 +85 34 0 days 00:00:08.969594995 +85 35 0 days 00:00:01.598000240 +85 36 0 days 00:00:04.674882695 +85 37 0 days 00:00:04.790037565 +85 38 0 days 00:00:02.467575305 +85 39 0 days 00:00:02.341952140 +85 40 0 days 00:00:02.344888855 +85 41 0 days 00:00:01.453771745 +85 42 0 days 00:00:04.934930510 +85 43 0 days 00:00:02.496596845 +85 45 0 days 00:00:02.490531815 +85 46 0 days 00:00:01.699295084 +85 47 0 days 00:00:01.615729588 +85 48 0 days 00:00:08.739389168 +85 49 0 days 00:00:05.316713533 +85 50 0 days 00:00:01.690745930 +85 51 0 days 00:00:00.765982357 +85 52 0 days 00:00:01.558169860 +85 53 0 days 00:00:01.635504095 +85 54 0 days 00:00:01.573784880 +85 55 0 days 00:00:04.106295096 +85 56 0 days 00:00:00.676969443 +85 57 0 days 00:00:01.399720217 +85 58 0 days 00:00:01.164072575 +85 59 0 days 00:00:00.779718700 +85 60 0 days 00:00:05.055466355 +85 61 0 days 00:00:02.750671575 +85 62 0 days 00:00:01.788847703 +85 63 0 days 00:00:01.407678925 +85 65 0 days 00:00:04.726046495 +85 66 0 days 00:00:02.901123204 +85 67 0 days 00:00:01.072733660 +85 68 0 days 00:00:04.851795565 +85 69 0 days 00:00:01.150356015 +85 70 0 days 00:00:03.361473155 +85 71 0 days 00:00:02.977469150 +85 72 0 days 00:00:02.703774922 +85 73 0 days 00:00:05.541039740 +85 74 0 days 00:00:01.816178504 +85 75 0 days 00:00:00.665472585 +85 76 0 days 00:00:03.338671676 +85 77 0 days 00:00:01.451041670 +85 78 0 days 00:00:01.527419756 +85 79 0 days 00:00:02.622411165 +85 80 0 days 00:00:00.786051868 +85 81 0 days 00:00:02.235435640 +85 82 0 days 00:00:06.137640645 +85 83 0 days 00:00:01.788803446 +85 85 0 days 00:00:08.474024825 +85 86 0 days 00:00:03.940781625 +85 87 0 days 00:00:05.949050923 +85 88 0 days 00:00:00.999410505 +85 89 0 days 00:00:02.947474132 +85 91 0 days 00:00:00.943695520 +85 92 0 days 00:00:04.694167840 +85 94 0 days 00:00:01.989885048 +85 96 0 days 00:00:01.255936415 +85 97 0 days 00:00:03.424534286 +85 98 0 days 00:00:00.657493775 +85 99 0 days 00:00:08.691552220 +85 100 0 days 00:00:02.050919702 +86 1 0 days 00:00:00.325354865 +86 2 0 days 00:00:00.171979971 +86 3 0 days 00:00:00.200446566 +86 4 0 days 00:00:00.181644406 +86 5 0 days 00:00:00.182298020 +86 6 0 days 00:00:00.267537180 +86 7 0 days 00:00:00.171044551 +86 8 0 days 00:00:00.265993760 +86 9 0 days 00:00:00.263525755 +86 10 0 days 00:00:00.212210672 +86 11 0 days 00:00:00.176529292 +86 12 0 days 00:00:00.283707933 +86 13 0 days 00:00:00.253264255 +86 14 0 days 00:00:00.204776511 +86 15 0 days 00:00:00.180566542 +86 16 0 days 00:00:00.272184295 +86 17 0 days 00:00:00.178272751 +86 18 0 days 00:00:00.200566064 +86 19 0 days 00:00:00.205742151 +86 20 0 days 00:00:00.218411648 +86 21 0 days 00:00:00.266486577 +86 22 0 days 00:00:00.252710296 +86 23 0 days 00:00:00.204241326 +86 24 0 days 00:00:00.216165248 +86 25 0 days 00:00:00.221905280 +86 26 0 days 00:00:00.170534783 +86 27 0 days 00:00:00.169679217 +86 28 0 days 00:00:00.170076645 +86 29 0 days 00:00:00.259798520 +86 30 0 days 00:00:00.250747080 +86 31 0 days 00:00:00.172923315 +86 32 0 days 00:00:00.218133680 +86 33 0 days 00:00:00.211363933 +86 34 0 days 00:00:00.228453182 +86 35 0 days 00:00:00.212340714 +86 36 0 days 00:00:00.264552324 +86 37 0 days 00:00:00.203286380 +86 38 0 days 00:00:00.193678964 +86 39 0 days 00:00:00.174509156 +86 40 0 days 00:00:00.193051850 +86 41 0 days 00:00:00.170552930 +86 42 0 days 00:00:00.277131317 +86 43 0 days 00:00:00.254354870 +86 44 0 days 00:00:00.217227443 +86 45 0 days 00:00:00.174361257 +86 46 0 days 00:00:00.266456910 +86 47 0 days 00:00:00.162002940 +86 48 0 days 00:00:00.273063142 +86 49 0 days 00:00:00.202764848 +86 50 0 days 00:00:00.258938880 +86 51 0 days 00:00:00.264513864 +86 52 0 days 00:00:00.250488112 +86 53 0 days 00:00:00.215422340 +86 54 0 days 00:00:00.275628966 +86 55 0 days 00:00:00.199895372 +86 56 0 days 00:00:00.259201392 +86 57 0 days 00:00:00.205427808 +86 58 0 days 00:00:00.276146893 +86 59 0 days 00:00:00.171874536 +86 60 0 days 00:00:00.203177262 +86 61 0 days 00:00:00.187354637 +86 62 0 days 00:00:00.213716876 +86 63 0 days 00:00:00.190719596 +86 64 0 days 00:00:00.198609653 +86 65 0 days 00:00:00.188957175 +86 66 0 days 00:00:00.178883124 +86 67 0 days 00:00:00.171419057 +86 68 0 days 00:00:00.209039480 +86 69 0 days 00:00:00.273836320 +86 70 0 days 00:00:00.270260182 +86 71 0 days 00:00:00.264326750 +86 72 0 days 00:00:00.191166820 +86 73 0 days 00:00:00.203605948 +86 74 0 days 00:00:00.173104592 +86 75 0 days 00:00:00.191739582 +86 76 0 days 00:00:00.181417240 +86 77 0 days 00:00:00.280664343 +86 78 0 days 00:00:00.220654156 +86 79 0 days 00:00:00.250379323 +86 80 0 days 00:00:00.191684070 +86 81 0 days 00:00:00.199323056 +86 82 0 days 00:00:00.220924680 +86 83 0 days 00:00:00.205316005 +86 84 0 days 00:00:00.175126562 +86 85 0 days 00:00:00.275636348 +86 86 0 days 00:00:00.204954248 +86 87 0 days 00:00:00.253720680 +86 88 0 days 00:00:00.178200945 +86 89 0 days 00:00:00.209004003 +86 90 0 days 00:00:00.194781080 +86 91 0 days 00:00:00.178079254 +86 92 0 days 00:00:00.202967076 +86 93 0 days 00:00:00.182583045 +86 94 0 days 00:00:00.193066831 +86 95 0 days 00:00:00.200523346 +86 96 0 days 00:00:00.280985666 +86 97 0 days 00:00:00.203026137 +86 98 0 days 00:00:00.264931708 +86 99 0 days 00:00:00.168814170 +86 100 0 days 00:00:00.276953927 +87 1 0 days 00:00:00.126294115 +87 2 0 days 00:00:00.125542815 +87 3 0 days 00:00:00.164359621 +87 5 0 days 00:00:00.155966115 +87 6 0 days 00:00:00.167888028 +87 7 0 days 00:00:00.137994958 +87 8 0 days 00:00:00.115510891 +87 9 0 days 00:00:00.104256272 +87 10 0 days 00:00:00.104565971 +87 11 0 days 00:00:00.162708840 +87 12 0 days 00:00:00.168531757 +87 13 0 days 00:00:00.165083865 +87 14 0 days 00:00:00.120175433 +87 15 0 days 00:00:00.117078992 +87 16 0 days 00:00:00.138204733 +87 17 0 days 00:00:00.171133972 +87 18 0 days 00:00:00.200634515 +87 19 0 days 00:00:00.133443062 +87 20 0 days 00:00:00.171932545 +87 21 0 days 00:00:00.122696038 +87 22 0 days 00:00:00.129836164 +87 23 0 days 00:00:00.121332612 +87 24 0 days 00:00:00.110690190 +87 25 0 days 00:00:00.148009870 +87 26 0 days 00:00:00.111075946 +87 27 0 days 00:00:00.160637330 +87 28 0 days 00:00:00.116735936 +87 29 0 days 00:00:00.158830510 +87 30 0 days 00:00:00.174715542 +87 31 0 days 00:00:00.133804954 +87 32 0 days 00:00:00.109333700 +87 33 0 days 00:00:00.113848200 +87 34 0 days 00:00:00.160266614 +87 35 0 days 00:00:00.124055737 +87 36 0 days 00:00:00.170357285 +87 37 0 days 00:00:00.131293786 +87 38 0 days 00:00:00.188130546 +87 39 0 days 00:00:00.199676010 +87 40 0 days 00:00:00.128602641 +87 41 0 days 00:00:00.125896570 +87 42 0 days 00:00:00.124486135 +87 43 0 days 00:00:00.130518977 +87 44 0 days 00:00:00.103290520 +87 45 0 days 00:00:00.163626395 +87 46 0 days 00:00:00.134316994 +87 47 0 days 00:00:00.137451171 +87 48 0 days 00:00:00.120649849 +87 49 0 days 00:00:00.116067423 +87 50 0 days 00:00:00.139693562 +87 51 0 days 00:00:00.113794318 +87 52 0 days 00:00:00.160224764 +87 53 0 days 00:00:00.171870398 +87 54 0 days 00:00:00.192171517 +87 55 0 days 00:00:00.110639768 +87 56 0 days 00:00:00.164438640 +87 57 0 days 00:00:00.158684876 +87 58 0 days 00:00:00.183763229 +87 59 0 days 00:00:00.118094124 +87 60 0 days 00:00:00.133989683 +87 61 0 days 00:00:00.165226040 +87 62 0 days 00:00:00.109748694 +87 63 0 days 00:00:00.136365478 +87 64 0 days 00:00:00.118025021 +87 65 0 days 00:00:00.136435097 +87 66 0 days 00:00:00.141879835 +87 67 0 days 00:00:00.111571104 +87 68 0 days 00:00:00.111041607 +87 69 0 days 00:00:00.109344635 +87 70 0 days 00:00:00.179127890 +87 71 0 days 00:00:00.183852260 +87 72 0 days 00:00:00.131079886 +87 73 0 days 00:00:00.112060928 +87 74 0 days 00:00:00.171402185 +87 75 0 days 00:00:00.166565772 +87 76 0 days 00:00:00.178578480 +87 77 0 days 00:00:00.118094823 +87 78 0 days 00:00:00.127218448 +87 79 0 days 00:00:00.137233126 +87 80 0 days 00:00:00.143847806 +87 81 0 days 00:00:00.157757420 +87 82 0 days 00:00:00.138821574 +87 83 0 days 00:00:00.117126915 +87 84 0 days 00:00:00.112654662 +87 85 0 days 00:00:00.179011827 +87 86 0 days 00:00:00.186146177 +87 87 0 days 00:00:00.118263660 +87 88 0 days 00:00:00.105180910 +87 89 0 days 00:00:00.105589065 +87 90 0 days 00:00:00.166485191 +87 91 0 days 00:00:00.118665449 +87 92 0 days 00:00:00.114950216 +87 93 0 days 00:00:00.116847683 +87 94 0 days 00:00:00.159754330 +87 95 0 days 00:00:00.109544193 +87 96 0 days 00:00:00.136722316 +87 97 0 days 00:00:00.110315948 +87 98 0 days 00:00:00.111507607 +87 99 0 days 00:00:00.129858480 +87 100 0 days 00:00:00.167967694 +88 1 0 days 00:00:08.946078853 +88 3 0 days 00:00:18.558459200 +88 4 0 days 00:00:06.269592995 +88 5 0 days 00:00:01.744144760 +88 6 0 days 00:00:07.646678000 +88 8 0 days 00:00:04.495623855 +88 9 0 days 00:00:00.428349340 +88 10 0 days 00:00:17.261583100 +88 11 0 days 00:00:04.115695856 +88 12 0 days 00:00:05.520345615 +88 13 0 days 00:00:01.632447175 +88 14 0 days 00:00:01.579252680 +88 15 0 days 00:00:02.180881564 +88 16 0 days 00:00:01.545374775 +88 17 0 days 00:00:01.135142210 +88 18 0 days 00:00:07.700266780 +88 19 0 days 00:00:08.167754130 +88 20 0 days 00:00:06.537482105 +88 21 0 days 00:00:03.058650420 +88 22 0 days 00:00:04.813021520 +88 23 0 days 00:00:03.736841600 +88 24 0 days 00:00:01.008644785 +88 25 0 days 00:00:07.471554605 +88 26 0 days 00:00:06.995168820 +88 27 0 days 00:00:10.411868490 +88 28 0 days 00:00:00.345302240 +88 29 0 days 00:00:10.049224550 +88 30 0 days 00:00:01.686610095 +88 31 0 days 00:00:02.470634590 +88 32 0 days 00:00:01.008947380 +88 33 0 days 00:00:11.344968705 +88 34 0 days 00:00:09.052977768 +88 35 0 days 00:00:07.517869645 +88 36 0 days 00:00:10.151239230 +88 37 0 days 00:00:07.793118280 +88 38 0 days 00:00:06.593227425 +88 39 0 days 00:00:02.364146905 +88 40 0 days 00:00:03.574320455 +88 41 0 days 00:00:09.810539615 +88 42 0 days 00:00:04.433794720 +88 43 0 days 00:00:03.721437715 +88 44 0 days 00:00:02.183716185 +88 45 0 days 00:00:02.371358005 +88 46 0 days 00:00:03.278435880 +88 47 0 days 00:00:01.636657560 +88 49 0 days 00:00:07.532547053 +88 50 0 days 00:00:00.720286745 +88 51 0 days 00:00:06.110658132 +88 52 0 days 00:00:04.915922190 +88 53 0 days 00:00:10.294271210 +88 54 0 days 00:00:04.107936073 +88 55 0 days 00:00:02.341088433 +88 56 0 days 00:00:09.627588130 +88 57 0 days 00:00:08.502713055 +88 58 0 days 00:00:09.457301915 +88 59 0 days 00:00:10.719807430 +88 60 0 days 00:00:07.650627190 +88 61 0 days 00:00:02.394130906 +88 62 0 days 00:00:08.366230420 +88 63 0 days 00:00:07.218944215 +88 65 0 days 00:00:16.781448920 +88 66 0 days 00:00:10.113727365 +88 67 0 days 00:00:03.734800953 +88 68 0 days 00:00:08.044441295 +88 69 0 days 00:00:05.461678145 +88 70 0 days 00:00:00.528018823 +88 71 0 days 00:00:01.763346340 +88 72 0 days 00:00:05.671991385 +88 73 0 days 00:00:06.868994075 +88 74 0 days 00:00:00.936605870 +88 75 0 days 00:00:04.618331775 +88 76 0 days 00:00:05.319529000 +88 77 0 days 00:00:15.155421320 +89 1 0 days 00:00:02.101252732 +89 2 0 days 00:00:07.109401015 +89 3 0 days 00:00:08.467208686 +89 4 0 days 00:00:07.488198680 +89 5 0 days 00:00:08.295249912 +89 6 0 days 00:00:08.874812790 +89 7 0 days 00:00:05.334864630 +89 8 0 days 00:00:01.526674526 +89 9 0 days 00:00:00.700248860 +89 10 0 days 00:00:05.933845340 +89 11 0 days 00:00:05.182619730 +89 12 0 days 00:00:01.904179595 +89 13 0 days 00:00:05.730011090 +89 15 0 days 00:00:07.361345260 +89 16 0 days 00:00:04.450602180 +89 17 0 days 00:00:03.700288330 +89 18 0 days 00:00:09.066757255 +89 19 0 days 00:00:05.562977600 +89 20 0 days 00:00:01.055575806 +89 21 0 days 00:00:03.663822850 +89 24 0 days 00:00:07.766393320 +89 25 0 days 00:00:16.238998712 +89 26 0 days 00:00:04.626662495 +89 27 0 days 00:00:04.720184168 +89 28 0 days 00:00:04.770440995 +89 29 0 days 00:00:04.585404580 +89 30 0 days 00:00:00.396361140 +89 31 0 days 00:00:04.855212480 +89 32 0 days 00:00:06.587371900 +89 33 0 days 00:00:06.060363065 +89 34 0 days 00:00:03.832215420 +89 35 0 days 00:00:04.190583900 +89 36 0 days 00:00:01.961402655 +89 37 0 days 00:00:04.353311930 +89 38 0 days 00:00:08.140241480 +89 40 0 days 00:00:01.589741970 +89 41 0 days 00:00:11.338557196 +89 42 0 days 00:00:07.662617060 +89 43 0 days 00:00:07.055337720 +89 44 0 days 00:00:09.918564320 +89 45 0 days 00:00:02.676294070 +89 46 0 days 00:00:04.487504520 +89 47 0 days 00:00:08.503172080 +89 48 0 days 00:00:10.241011165 +89 49 0 days 00:00:00.697287080 +89 50 0 days 00:00:03.138482930 +89 51 0 days 00:00:09.605333730 +89 52 0 days 00:00:00.519015340 +89 53 0 days 00:00:02.327288545 +89 54 0 days 00:00:02.745097340 +89 55 0 days 00:00:00.638316595 +89 56 0 days 00:00:04.390623293 +89 57 0 days 00:00:01.447945050 +89 58 0 days 00:00:03.141139935 +89 59 0 days 00:00:07.340637720 +89 60 0 days 00:00:08.723015195 +89 61 0 days 00:00:04.250839350 +89 62 0 days 00:00:02.777585472 +89 63 0 days 00:00:17.377967065 +89 64 0 days 00:00:01.633768866 +89 65 0 days 00:00:04.872664513 +89 66 0 days 00:00:05.402877735 +89 67 0 days 00:00:02.500229272 +89 68 0 days 00:00:09.295869295 +89 69 0 days 00:00:00.634707865 +89 70 0 days 00:00:04.992378800 +89 71 0 days 00:00:18.324677640 +89 72 0 days 00:00:09.537087665 +89 73 0 days 00:00:06.319068025 +89 74 0 days 00:00:01.035036471 +89 75 0 days 00:00:04.185481453 +89 76 0 days 00:00:02.581278292 +89 78 0 days 00:00:09.701485640 +89 79 0 days 00:00:02.591155020 +89 80 0 days 00:00:04.097810920 +89 81 0 days 00:00:00.965704280 +89 83 0 days 00:00:01.220473246 +89 84 0 days 00:00:04.470457645 +89 85 0 days 00:00:09.845200480 +90 2 0 days 00:00:00.858273765 +90 3 0 days 00:00:00.952036806 +90 4 0 days 00:00:03.443807510 +90 5 0 days 00:00:02.270796485 +90 6 0 days 00:00:06.628897873 +90 7 0 days 00:00:03.521886306 +90 8 0 days 00:00:01.831408560 +90 9 0 days 00:00:02.295398260 +90 10 0 days 00:00:02.918091480 +90 11 0 days 00:00:05.183107485 +90 12 0 days 00:00:00.651065260 +90 13 0 days 00:00:02.733645000 +90 14 0 days 00:00:02.019511775 +90 15 0 days 00:00:04.076479090 +90 16 0 days 00:00:01.540976540 +90 17 0 days 00:00:01.751897208 +90 18 0 days 00:00:01.244469544 +90 19 0 days 00:00:02.758553900 +90 20 0 days 00:00:00.464600525 +90 21 0 days 00:00:03.488533940 +90 22 0 days 00:00:04.337819800 +90 23 0 days 00:00:01.971003370 +90 24 0 days 00:00:03.054566165 +90 26 0 days 00:00:01.663447555 +90 27 0 days 00:00:03.157163424 +90 28 0 days 00:00:04.144205985 +90 29 0 days 00:00:02.538308185 +90 30 0 days 00:00:02.203462490 +90 31 0 days 00:00:02.996797745 +90 32 0 days 00:00:04.191733195 +90 33 0 days 00:00:03.021937966 +90 34 0 days 00:00:05.793969072 +90 35 0 days 00:00:02.390316575 +90 36 0 days 00:00:05.055451660 +90 37 0 days 00:00:03.487562016 +90 38 0 days 00:00:01.331700580 +90 39 0 days 00:00:04.033056012 +90 40 0 days 00:00:00.947101290 +90 41 0 days 00:00:05.184991685 +90 42 0 days 00:00:04.079721315 +90 43 0 days 00:00:01.640728765 +90 44 0 days 00:00:01.253178720 +90 45 0 days 00:00:00.571675640 +90 46 0 days 00:00:02.112835675 +90 47 0 days 00:00:02.397160228 +90 48 0 days 00:00:03.745653408 +90 49 0 days 00:00:04.457761980 +90 50 0 days 00:00:01.849828735 +90 51 0 days 00:00:02.307261300 +90 53 0 days 00:00:01.883341316 +90 54 0 days 00:00:04.014679420 +90 55 0 days 00:00:01.163936660 +90 56 0 days 00:00:02.570351333 +90 57 0 days 00:00:01.080339040 +90 58 0 days 00:00:01.380143940 +90 59 0 days 00:00:00.854253530 +90 60 0 days 00:00:01.313854176 +90 61 0 days 00:00:04.558306780 +90 62 0 days 00:00:01.127504900 +90 63 0 days 00:00:01.958618208 +90 64 0 days 00:00:01.238420570 +90 65 0 days 00:00:00.925941245 +90 66 0 days 00:00:01.876440552 +90 67 0 days 00:00:01.314440804 +90 68 0 days 00:00:04.078206300 +90 69 0 days 00:00:02.037500390 +90 70 0 days 00:00:00.581413456 +90 71 0 days 00:00:00.658854515 +90 72 0 days 00:00:02.555779396 +90 73 0 days 00:00:00.468327077 +90 74 0 days 00:00:03.286011735 +90 75 0 days 00:00:01.492793725 +90 76 0 days 00:00:02.626855320 +90 77 0 days 00:00:02.267591525 +90 78 0 days 00:00:05.915484633 +90 80 0 days 00:00:01.340149525 +90 81 0 days 00:00:04.211681360 +90 82 0 days 00:00:01.925944046 +90 83 0 days 00:00:05.379011704 +90 84 0 days 00:00:03.774136100 +90 85 0 days 00:00:02.613856552 +90 86 0 days 00:00:01.472314225 +90 87 0 days 00:00:03.208383888 +90 88 0 days 00:00:00.865529670 +90 89 0 days 00:00:03.033707636 +90 90 0 days 00:00:01.494213983 +90 91 0 days 00:00:03.723685650 +90 92 0 days 00:00:03.655325748 +90 93 0 days 00:00:02.721987465 +90 94 0 days 00:00:00.969089845 +90 95 0 days 00:00:01.776909468 +90 96 0 days 00:00:00.825124812 +90 97 0 days 00:00:03.619568645 +90 98 0 days 00:00:02.378637825 +90 99 0 days 00:00:03.083592910 +90 100 0 days 00:00:06.758953776 +91 1 0 days 00:00:01.260727875 +91 2 0 days 00:00:02.366784940 +91 3 0 days 00:00:01.247414915 +91 4 0 days 00:00:04.270609760 +91 5 0 days 00:00:03.186744843 +91 6 0 days 00:00:04.148397830 +91 7 0 days 00:00:01.430958756 +91 8 0 days 00:00:04.122331460 +91 9 0 days 00:00:01.537070330 +91 10 0 days 00:00:01.651183055 +91 11 0 days 00:00:01.228974290 +91 12 0 days 00:00:01.108087976 +91 13 0 days 00:00:03.867871900 +91 14 0 days 00:00:02.077908280 +91 15 0 days 00:00:01.817243925 +91 16 0 days 00:00:00.649368628 +91 18 0 days 00:00:02.599343550 +91 19 0 days 00:00:01.063059284 +91 21 0 days 00:00:00.465866116 +91 22 0 days 00:00:01.109743720 +91 23 0 days 00:00:02.880333872 +91 24 0 days 00:00:01.005759835 +91 25 0 days 00:00:00.690135102 +91 26 0 days 00:00:00.916839280 +91 27 0 days 00:00:02.107785005 +91 28 0 days 00:00:04.512117160 +91 29 0 days 00:00:03.130106140 +91 31 0 days 00:00:01.004305752 +91 32 0 days 00:00:02.829532625 +91 33 0 days 00:00:01.354507680 +91 34 0 days 00:00:00.378688776 +91 35 0 days 00:00:01.304325325 +91 36 0 days 00:00:01.940424630 +91 38 0 days 00:00:02.168044528 +91 39 0 days 00:00:02.028197190 +91 40 0 days 00:00:02.245613466 +91 41 0 days 00:00:00.773040945 +91 42 0 days 00:00:07.298207905 +91 43 0 days 00:00:04.355844913 +91 44 0 days 00:00:01.082715413 +91 45 0 days 00:00:00.641829335 +91 46 0 days 00:00:03.584990433 +91 47 0 days 00:00:02.764984840 +91 48 0 days 00:00:01.551277654 +91 49 0 days 00:00:04.734172745 +91 50 0 days 00:00:01.700358851 +91 51 0 days 00:00:01.683260080 +91 52 0 days 00:00:01.559653710 +91 53 0 days 00:00:02.816037750 +91 54 0 days 00:00:00.700285450 +91 55 0 days 00:00:04.031624433 +91 56 0 days 00:00:01.669685230 +91 57 0 days 00:00:00.247873682 +91 58 0 days 00:00:01.537232940 +91 59 0 days 00:00:02.918269900 +91 61 0 days 00:00:03.535620940 +91 62 0 days 00:00:01.584112200 +91 63 0 days 00:00:00.812573040 +91 64 0 days 00:00:00.768807635 +91 65 0 days 00:00:04.170806030 +91 66 0 days 00:00:04.490970475 +91 68 0 days 00:00:00.914120006 +91 69 0 days 00:00:03.011050953 +91 70 0 days 00:00:00.689005900 +91 71 0 days 00:00:02.763224845 +91 72 0 days 00:00:06.612442960 +91 74 0 days 00:00:02.723018347 +91 75 0 days 00:00:01.120521108 +91 76 0 days 00:00:03.347347206 +91 77 0 days 00:00:02.868746880 +91 78 0 days 00:00:02.063673506 +91 79 0 days 00:00:03.034632872 +91 80 0 days 00:00:01.102183320 +91 81 0 days 00:00:04.747793605 +91 82 0 days 00:00:02.236682873 +91 83 0 days 00:00:01.350349866 +91 84 0 days 00:00:01.903014595 +91 85 0 days 00:00:06.765649320 +91 86 0 days 00:00:03.026611475 +91 87 0 days 00:00:02.321578490 +91 88 0 days 00:00:04.448865340 +91 90 0 days 00:00:00.801470112 +91 91 0 days 00:00:02.035663435 +91 93 0 days 00:00:05.588647545 +91 94 0 days 00:00:01.026113956 +91 95 0 days 00:00:02.785220470 +91 96 0 days 00:00:01.144510420 +91 97 0 days 00:00:00.484535245 +91 98 0 days 00:00:04.146808672 +91 99 0 days 00:00:03.050391345 +92 1 0 days 00:00:13.800972033 +92 2 0 days 00:01:14.201214653 +92 3 0 days 00:00:15.966610850 +92 4 0 days 00:00:58.907353386 +92 5 0 days 00:00:16.179397940 +92 6 0 days 00:00:12.135910040 +92 7 0 days 00:00:40.006870773 +92 8 0 days 00:00:20.545072486 +92 9 0 days 00:00:34.297148080 +92 10 0 days 00:01:02.520419080 +92 11 0 days 00:00:06.932381720 +92 12 0 days 00:00:07.849264364 +92 13 0 days 00:00:47.595705113 +92 14 0 days 00:00:50.580489646 +92 15 0 days 00:00:35.779950040 +92 16 0 days 00:00:26.610241100 +92 17 0 days 00:00:17.766682866 +92 18 0 days 00:00:54.457997420 +93 1 0 days 00:00:07.445193823 +93 2 0 days 00:00:25.967291886 +93 3 0 days 00:00:46.406037786 +93 4 0 days 00:00:37.237829793 +93 5 0 days 00:01:25.035744450 +93 6 0 days 00:00:35.182450700 +93 7 0 days 00:00:17.728225160 +93 8 0 days 00:00:22.294764240 +93 9 0 days 00:00:58.854385370 +93 10 0 days 00:00:46.892086440 +93 11 0 days 00:00:29.906301520 +93 12 0 days 00:00:13.497787706 +93 13 0 days 00:00:48.192696336 +94 1 0 days 00:00:11.392497656 +94 2 0 days 00:00:12.144241423 +94 3 0 days 00:00:11.405871696 +94 4 0 days 00:00:11.959299087 +94 5 0 days 00:00:13.775174970 +94 6 0 days 00:00:12.623583491 +94 7 0 days 00:00:10.777724885 +94 8 0 days 00:00:13.098576123 +94 9 0 days 00:00:13.532879357 +94 10 0 days 00:00:12.496779488 +94 11 0 days 00:00:12.506329901 +94 12 0 days 00:00:14.015289880 +94 13 0 days 00:00:12.891466138 +94 14 0 days 00:00:11.717624540 +94 15 0 days 00:00:12.119833337 +94 16 0 days 00:00:15.736293182 +94 17 0 days 00:00:14.963499427 +95 1 0 days 00:00:08.914180217 +95 2 0 days 00:00:13.815297444 +95 3 0 days 00:00:08.894886577 +95 4 0 days 00:00:09.812460827 +95 5 0 days 00:00:12.351470166 +95 6 0 days 00:00:10.947251277 +95 7 0 days 00:00:08.672537600 +95 8 0 days 00:00:14.211372822 +95 9 0 days 00:00:11.847420484 +95 10 0 days 00:00:10.257192968 +95 11 0 days 00:00:10.559693415 +95 12 0 days 00:00:11.699808821 +95 13 0 days 00:00:14.282345550 +95 14 0 days 00:00:10.232837383 +95 15 0 days 00:00:10.212116954 +96 1 0 days 00:00:26.010457340 +96 2 0 days 00:00:12.975517590 +96 3 0 days 00:00:16.425610955 +96 4 0 days 00:00:56.742676270 +96 5 0 days 00:00:12.670402525 +96 6 0 days 00:00:12.544303180 +96 7 0 days 00:00:19.797168366 +96 8 0 days 00:00:42.814394815 +96 9 0 days 00:01:09.156977626 +96 10 0 days 00:01:14.890179710 +96 11 0 days 00:00:21.732453806 +96 12 0 days 00:00:10.202172293 +96 13 0 days 00:01:00.333145980 +96 14 0 days 00:01:04.471553405 +97 1 0 days 00:00:28.857082032 +97 2 0 days 00:00:28.862818900 +97 3 0 days 00:00:58.866056075 +97 4 0 days 00:00:13.477774175 +97 5 0 days 00:00:33.484269246 +97 6 0 days 00:00:22.408315720 +97 7 0 days 00:00:21.899326670 +97 8 0 days 00:00:14.870831875 +97 9 0 days 00:00:18.545880225 +97 10 0 days 00:01:02.489200785 +97 11 0 days 00:00:25.993949210 +97 12 0 days 00:00:10.805860665 +97 13 0 days 00:00:35.202685585 +97 14 0 days 00:00:08.990487410 +97 15 0 days 00:01:02.662288920 +98 1 0 days 00:00:10.879169155 +98 2 0 days 00:00:11.554753680 +98 3 0 days 00:00:10.052572940 +98 4 0 days 00:00:12.905687540 +98 5 0 days 00:00:10.660996665 +98 6 0 days 00:00:11.946670303 +98 7 0 days 00:00:12.292721722 +98 8 0 days 00:00:11.990335940 +98 9 0 days 00:00:10.640576210 +98 10 0 days 00:00:12.588510913 +98 11 0 days 00:00:12.844433104 +98 12 0 days 00:00:11.623012745 +98 13 0 days 00:00:11.383967288 +98 14 0 days 00:00:10.598706471 +98 15 0 days 00:00:12.364819320 +98 16 0 days 00:00:10.816263950 +98 17 0 days 00:00:10.760888940 +98 18 0 days 00:00:10.933485625 +98 19 0 days 00:00:11.163983380 +98 20 0 days 00:00:10.797628020 +98 21 0 days 00:00:10.484386715 +98 22 0 days 00:00:12.023934576 +98 23 0 days 00:00:09.738505835 +98 24 0 days 00:00:10.739074830 +98 25 0 days 00:00:12.697399637 +98 26 0 days 00:00:12.504723695 +99 1 0 days 00:00:09.724629050 +99 2 0 days 00:00:09.699184386 +99 3 0 days 00:00:07.666299800 +99 4 0 days 00:00:08.993009420 +99 5 0 days 00:00:09.695864580 +99 7 0 days 00:00:10.982952201 +99 8 0 days 00:00:13.212396502 +99 9 0 days 00:00:10.470108802 +99 10 0 days 00:00:08.848924732 +99 11 0 days 00:00:08.896034701 +99 12 0 days 00:00:13.025819276 +99 13 0 days 00:00:09.810863965 +99 14 0 days 00:00:08.878101272 +99 15 0 days 00:00:09.831098410 +99 16 0 days 00:00:08.533397358 +99 17 0 days 00:00:08.794898812 +99 18 0 days 00:00:09.996466716 +100 1 0 days 00:00:13.189762811 +100 2 0 days 00:00:11.496353095 +100 3 0 days 00:00:12.221167210 +100 4 0 days 00:00:13.070059460 +100 5 0 days 00:00:12.086212640 +100 6 0 days 00:00:11.475157042 +100 7 0 days 00:00:12.871660933 +100 8 0 days 00:00:14.062836957 +100 9 0 days 00:00:10.980132393 +100 10 0 days 00:00:12.715328313 +100 11 0 days 00:00:11.885403727 +100 12 0 days 00:00:11.769967448 +100 13 0 days 00:00:09.042087860 +100 14 0 days 00:00:13.356399300 +100 15 0 days 00:00:13.879586404 +100 16 0 days 00:00:12.704165580 +100 17 0 days 00:00:13.261544648 +100 18 0 days 00:00:12.007372760 +101 1 0 days 00:00:09.224835280 +101 2 0 days 00:00:09.372054816 +101 3 0 days 00:00:08.671111266 +101 4 0 days 00:00:08.428542617 +101 5 0 days 00:00:09.028493247 +101 6 0 days 00:00:09.700838980 +101 7 0 days 00:00:15.355898408 +101 8 0 days 00:00:13.638065307 +101 9 0 days 00:00:10.154064591 +101 10 0 days 00:00:13.028282848 +101 11 0 days 00:00:14.133507140 +101 12 0 days 00:00:12.771841135 +101 13 0 days 00:00:10.750511307 +101 14 0 days 00:00:10.165836607 +101 15 0 days 00:00:09.172274224 +101 16 0 days 00:00:15.470804653 +101 17 0 days 00:00:07.442338593 +101 18 0 days 00:00:14.648600880 +101 19 0 days 00:00:09.573133866 +101 20 0 days 00:00:14.766081026 +101 21 0 days 00:00:08.894225088 +102 1 0 days 00:00:08.147836766 +102 2 0 days 00:00:28.033011046 +102 3 0 days 00:02:08.905675080 +102 4 0 days 00:00:39.540159100 +102 5 0 days 00:01:06.561132226 +102 6 0 days 00:00:08.984573935 +102 7 0 days 00:00:45.249494633 +102 8 0 days 00:00:37.747917715 +102 9 0 days 00:00:18.766581415 +102 10 0 days 00:00:15.980069066 +102 11 0 days 00:00:12.396084413 +102 12 0 days 00:01:12.834059105 +102 13 0 days 00:00:33.487189895 +102 14 0 days 00:00:20.785849813 +103 1 0 days 00:00:08.769084575 +103 2 0 days 00:00:29.596443360 +103 3 0 days 00:00:19.054327286 +103 4 0 days 00:00:06.584929110 +103 5 0 days 00:00:29.340116260 +103 6 0 days 00:00:09.207833573 +103 7 0 days 00:00:26.246420860 +103 8 0 days 00:00:58.379093300 +103 9 0 days 00:00:13.836015293 +103 10 0 days 00:00:47.567172425 +103 11 0 days 00:00:57.931475190 +103 12 0 days 00:00:09.643494675 +103 13 0 days 00:00:09.113736515 +103 14 0 days 00:00:57.332385880 +103 15 0 days 00:00:12.241500495 +103 16 0 days 00:01:00.920137055 +104 1 0 days 00:00:38.600447793 +104 2 0 days 00:00:29.959975600 +104 3 0 days 00:00:14.784695973 +104 4 0 days 00:00:10.259909215 +104 5 0 days 00:00:06.878059140 +104 6 0 days 00:00:37.777718973 +104 7 0 days 00:01:04.995238993 +104 8 0 days 00:01:42.837178213 +104 9 0 days 00:00:10.361413508 +104 10 0 days 00:00:09.348448813 +104 11 0 days 00:00:14.138464793 +104 12 0 days 00:00:20.222024660 +104 13 0 days 00:00:41.482508420 +104 14 0 days 00:00:37.369958726 +104 15 0 days 00:02:02.112775120 +105 1 0 days 00:00:21.164620140 +105 2 0 days 00:00:11.242763313 +105 3 0 days 00:00:19.854533136 +105 4 0 days 00:00:05.978282705 +105 5 0 days 00:00:21.038809045 +105 6 0 days 00:00:33.354777940 +105 7 0 days 00:01:04.008972908 +105 8 0 days 00:00:15.550389886 +105 9 0 days 00:00:06.679792080 +105 10 0 days 00:00:28.191860373 +105 11 0 days 00:00:10.779947100 +105 12 0 days 00:00:23.572229600 +105 13 0 days 00:00:06.453779326 +105 14 0 days 00:00:08.115806433 +105 15 0 days 00:00:19.916876486 +105 16 0 days 00:00:05.549836020 +105 17 0 days 00:00:37.696156316 +105 18 0 days 00:00:20.271457633 +105 19 0 days 00:00:18.772958740 +105 20 0 days 00:00:17.212759953 +105 21 0 days 00:00:09.994548295 +105 22 0 days 00:00:23.347165144 +106 1 0 days 00:00:04.774321820 +106 2 0 days 00:00:03.398137071 +106 3 0 days 00:00:00.591163842 +106 4 0 days 00:00:00.386108916 +106 5 0 days 00:00:01.075194048 +106 6 0 days 00:00:01.010462365 +106 7 0 days 00:00:03.921233688 +106 8 0 days 00:00:01.119690017 +106 9 0 days 00:00:01.734588670 +106 10 0 days 00:00:01.916131200 +106 12 0 days 00:00:03.977719395 +106 13 0 days 00:00:03.782178866 +106 14 0 days 00:00:06.718626711 +106 17 0 days 00:00:01.501760700 +106 20 0 days 00:00:00.520433420 +106 21 0 days 00:00:01.891509650 +106 22 0 days 00:00:06.639173442 +106 23 0 days 00:00:01.722975683 +106 24 0 days 00:00:01.763912942 +106 25 0 days 00:00:03.643827696 +106 27 0 days 00:00:01.314989680 +106 28 0 days 00:00:01.367156528 +106 30 0 days 00:00:01.814001258 +106 31 0 days 00:00:01.203601937 +106 32 0 days 00:00:03.570412807 +106 33 0 days 00:00:01.289561384 +106 34 0 days 00:00:02.846297192 +106 35 0 days 00:00:01.481558660 +106 36 0 days 00:00:01.517662124 +106 37 0 days 00:00:00.956795485 +106 38 0 days 00:00:01.646321768 +106 39 0 days 00:00:01.507047047 +106 41 0 days 00:00:08.254829274 +106 43 0 days 00:00:00.864143615 +106 44 0 days 00:00:07.546637285 +106 45 0 days 00:00:01.684082383 +106 46 0 days 00:00:07.473699122 +106 47 0 days 00:00:01.543584910 +106 48 0 days 00:00:01.383267440 +106 49 0 days 00:00:03.374079631 +106 50 0 days 00:00:09.989616426 +107 1 0 days 00:00:00.792656922 +107 2 0 days 00:00:00.845674755 +107 3 0 days 00:00:06.019237680 +107 4 0 days 00:00:01.819472702 +107 5 0 days 00:00:04.061488398 +107 6 0 days 00:00:02.752983846 +107 8 0 days 00:00:01.635905835 +107 9 0 days 00:00:01.145573796 +107 10 0 days 00:00:01.868072790 +107 11 0 days 00:00:00.485919346 +107 13 0 days 00:00:04.335639712 +107 14 0 days 00:00:01.840282489 +107 15 0 days 00:00:00.455564860 +107 16 0 days 00:00:08.118624390 +107 17 0 days 00:00:01.113577960 +107 18 0 days 00:00:06.910995154 +107 19 0 days 00:00:03.793571133 +107 20 0 days 00:00:03.240685732 +107 21 0 days 00:00:03.666486421 +107 22 0 days 00:00:02.464620734 +107 23 0 days 00:00:00.371026715 +107 24 0 days 00:00:06.645840518 +107 25 0 days 00:00:01.059009322 +107 26 0 days 00:00:00.457083100 +107 27 0 days 00:00:02.424475773 +107 28 0 days 00:00:04.895910505 +107 29 0 days 00:00:09.551420405 +107 30 0 days 00:00:07.387607840 +107 31 0 days 00:00:01.316959020 +107 32 0 days 00:00:02.920629575 +107 33 0 days 00:00:07.753418972 +107 34 0 days 00:00:08.238431088 +107 35 0 days 00:00:06.901875311 +107 36 0 days 00:00:04.975694822 +107 37 0 days 00:00:03.048189826 +107 38 0 days 00:00:00.963215200 +107 39 0 days 00:00:00.914035820 +107 40 0 days 00:00:01.809368988 +107 41 0 days 00:00:01.548301260 +107 42 0 days 00:00:03.541639347 +107 43 0 days 00:00:03.804212994 +107 44 0 days 00:00:02.200630240 +107 45 0 days 00:00:06.038430007 +108 2 0 days 00:00:01.054219295 +108 3 0 days 00:00:00.844788950 +108 4 0 days 00:00:02.454533760 +108 5 0 days 00:00:04.441245328 +108 6 0 days 00:00:00.310223510 +108 7 0 days 00:00:04.166037037 +108 8 0 days 00:00:00.532874320 +108 10 0 days 00:00:00.527328973 +108 11 0 days 00:00:01.979236784 +108 12 0 days 00:00:01.809859786 +108 13 0 days 00:00:05.374784074 +108 14 0 days 00:00:05.285396755 +108 15 0 days 00:00:00.418562425 +108 16 0 days 00:00:01.308503771 +108 17 0 days 00:00:01.088734517 +108 18 0 days 00:00:02.184575480 +108 19 0 days 00:00:01.186214556 +108 20 0 days 00:00:05.264038641 +108 21 0 days 00:00:02.040815397 +108 23 0 days 00:00:00.841048520 +108 24 0 days 00:00:00.406855936 +108 25 0 days 00:00:00.434695440 +108 27 0 days 00:00:03.646346948 +108 28 0 days 00:00:02.036977244 +108 29 0 days 00:00:01.004136737 +108 30 0 days 00:00:00.400719472 +108 31 0 days 00:00:01.811713812 +108 32 0 days 00:00:05.263561478 +108 33 0 days 00:00:00.447399655 +108 34 0 days 00:00:00.638270180 +108 35 0 days 00:00:03.593298202 +108 36 0 days 00:00:03.593900624 +108 37 0 days 00:00:05.405806837 +108 38 0 days 00:00:01.113476854 +108 39 0 days 00:00:00.930126810 +108 40 0 days 00:00:03.720836051 +108 41 0 days 00:00:02.015865834 +108 43 0 days 00:00:05.060552005 +108 44 0 days 00:00:00.970519413 +108 47 0 days 00:00:01.972871991 +108 48 0 days 00:00:00.332946671 +108 49 0 days 00:00:03.188227525 +108 50 0 days 00:00:01.244172845 +108 51 0 days 00:00:01.456254640 +108 52 0 days 00:00:00.310361071 +108 53 0 days 00:00:00.540584705 +108 54 0 days 00:00:01.212332011 +108 55 0 days 00:00:04.294286066 +108 56 0 days 00:00:04.316967120 +108 57 0 days 00:00:01.075585638 +108 59 0 days 00:00:00.733635660 +108 60 0 days 00:00:03.197711703 +108 61 0 days 00:00:00.968624922 +108 62 0 days 00:00:02.706837700 +108 63 0 days 00:00:00.768147865 +108 64 0 days 00:00:00.308458426 +108 65 0 days 00:00:03.965860863 +108 66 0 days 00:00:01.975349592 +108 67 0 days 00:00:01.986250046 +108 68 0 days 00:00:00.523916841 +108 69 0 days 00:00:00.955833157 +108 70 0 days 00:00:01.548580730 +108 71 0 days 00:00:03.979967520 +109 2 0 days 00:00:01.729400964 +109 5 0 days 00:00:00.358663085 +109 6 0 days 00:00:04.454438695 +109 7 0 days 00:00:02.680261475 +109 8 0 days 00:00:01.158678131 +109 9 0 days 00:00:04.476785675 +109 10 0 days 00:00:03.013067755 +109 11 0 days 00:00:01.086875166 +109 12 0 days 00:00:01.146772638 +109 13 0 days 00:00:01.876554414 +109 14 0 days 00:00:00.190150324 +109 15 0 days 00:00:00.567432981 +109 16 0 days 00:00:00.782447735 +109 17 0 days 00:00:04.467410500 +109 18 0 days 00:00:00.276051126 +109 19 0 days 00:00:00.589919923 +109 20 0 days 00:00:03.146532209 +109 21 0 days 00:00:02.817257725 +109 22 0 days 00:00:00.737349024 +109 23 0 days 00:00:01.250189110 +109 24 0 days 00:00:02.024404373 +109 25 0 days 00:00:04.027143666 +109 26 0 days 00:00:00.441142266 +109 27 0 days 00:00:00.427270143 +109 28 0 days 00:00:00.981930067 +109 29 0 days 00:00:00.277808093 +109 30 0 days 00:00:01.720420245 +109 31 0 days 00:00:00.568690355 +109 32 0 days 00:00:02.721504792 +109 33 0 days 00:00:00.803187833 +109 34 0 days 00:00:03.533109968 +109 35 0 days 00:00:01.443270103 +109 36 0 days 00:00:02.185596351 +109 37 0 days 00:00:00.506400433 +109 38 0 days 00:00:03.640830657 +109 39 0 days 00:00:00.799347073 +109 40 0 days 00:00:00.225141428 +109 41 0 days 00:00:01.137562552 +109 42 0 days 00:00:00.640378377 +109 43 0 days 00:00:00.643166906 +109 44 0 days 00:00:01.661607325 +109 45 0 days 00:00:00.347589376 +109 46 0 days 00:00:01.103778147 +109 47 0 days 00:00:00.904312050 +109 48 0 days 00:00:05.178925367 +109 49 0 days 00:00:01.108573555 +109 50 0 days 00:00:01.589230561 +109 51 0 days 00:00:00.565147088 +109 52 0 days 00:00:00.875991705 +109 54 0 days 00:00:00.313510262 +109 55 0 days 00:00:00.311588187 +109 56 0 days 00:00:00.538508864 +109 57 0 days 00:00:01.957767588 +109 58 0 days 00:00:01.028083390 +109 59 0 days 00:00:02.810392618 +109 60 0 days 00:00:02.274638711 +109 61 0 days 00:00:00.331722271 +109 62 0 days 00:00:00.411929400 +109 63 0 days 00:00:01.326346469 +109 64 0 days 00:00:01.490708077 +109 65 0 days 00:00:04.521396254 +109 66 0 days 00:00:00.842712261 +109 67 0 days 00:00:00.611652645 +109 68 0 days 00:00:03.570637776 +109 69 0 days 00:00:00.422727602 +109 70 0 days 00:00:00.923077592 +109 71 0 days 00:00:02.559117627 +109 72 0 days 00:00:00.824457900 +109 73 0 days 00:00:00.789765502 +109 74 0 days 00:00:00.198421576 +109 75 0 days 00:00:04.057624394 +109 76 0 days 00:00:01.406946420 +109 77 0 days 00:00:00.656036182 +109 78 0 days 00:00:02.028266404 +109 79 0 days 00:00:00.957593160 +109 80 0 days 00:00:01.569783958 +109 81 0 days 00:00:02.597365322 +109 82 0 days 00:00:00.532537820 +109 83 0 days 00:00:01.494821905 +109 84 0 days 00:00:00.441075150 +109 85 0 days 00:00:02.603735095 +109 86 0 days 00:00:03.252248173 +109 87 0 days 00:00:00.428400160 +109 88 0 days 00:00:00.717943592 +109 89 0 days 00:00:01.304889724 +109 90 0 days 00:00:02.751937813 +109 91 0 days 00:00:01.995196609 +109 92 0 days 00:00:00.517578751 +109 93 0 days 00:00:00.243306900 +109 94 0 days 00:00:00.289442905 +109 95 0 days 00:00:01.488402480 +109 96 0 days 00:00:04.371441770 +109 97 0 days 00:00:05.586654845 +110 1 0 days 00:00:02.900470494 +110 2 0 days 00:00:00.772362233 +110 4 0 days 00:00:01.401712062 +110 6 0 days 00:00:02.541041941 +110 8 0 days 00:00:01.158830975 +110 9 0 days 00:00:08.708010753 +110 10 0 days 00:00:08.719100606 +110 13 0 days 00:00:06.335703811 +110 14 0 days 00:00:01.377913664 +110 15 0 days 00:00:07.157684461 +110 16 0 days 00:00:02.162950555 +110 18 0 days 00:00:08.741990924 +110 20 0 days 00:00:02.112980444 +110 23 0 days 00:00:00.390304661 +110 25 0 days 00:00:01.179122914 +110 26 0 days 00:00:04.979495230 +110 28 0 days 00:00:03.096921703 +110 29 0 days 00:00:03.855916353 +111 2 0 days 00:00:00.840490208 +111 3 0 days 00:00:01.015857524 +111 4 0 days 00:00:02.924566620 +111 6 0 days 00:00:04.620401234 +111 7 0 days 00:00:01.161386863 +111 8 0 days 00:00:00.361917455 +111 9 0 days 00:00:01.412055207 +111 10 0 days 00:00:03.638974575 +111 11 0 days 00:00:00.635824344 +111 12 0 days 00:00:02.137492941 +111 13 0 days 00:00:04.554956350 +111 17 0 days 00:00:04.643645795 +111 18 0 days 00:00:02.133682584 +111 19 0 days 00:00:01.249521486 +111 22 0 days 00:00:01.728766261 +111 23 0 days 00:00:00.343626760 +111 24 0 days 00:00:02.768157553 +111 25 0 days 00:00:01.914744283 +111 26 0 days 00:00:00.658257196 +111 27 0 days 00:00:02.797396469 +111 28 0 days 00:00:00.421396292 +111 29 0 days 00:00:02.369821241 +111 30 0 days 00:00:00.328098940 +111 31 0 days 00:00:00.339749112 +111 35 0 days 00:00:01.719241436 +111 36 0 days 00:00:00.576871683 +111 38 0 days 00:00:05.692483060 +111 39 0 days 00:00:00.726720724 +111 40 0 days 00:00:00.439253648 +111 41 0 days 00:00:02.113441696 +111 44 0 days 00:00:00.886364880 +111 45 0 days 00:00:01.694350004 +111 46 0 days 00:00:01.992392817 +111 47 0 days 00:00:00.672876831 +111 48 0 days 00:00:02.799064073 +111 51 0 days 00:00:01.065069354 +111 53 0 days 00:00:00.342749432 +111 55 0 days 00:00:00.211844230 +111 57 0 days 00:00:00.443020107 +111 58 0 days 00:00:00.341864036 +111 60 0 days 00:00:00.600882237 +111 61 0 days 00:00:00.370038784 +111 63 0 days 00:00:04.569244290 +111 65 0 days 00:00:02.920895444 +111 67 0 days 00:00:02.082818843 +111 68 0 days 00:00:01.779748020 +111 69 0 days 00:00:03.834351595 +111 70 0 days 00:00:02.167218968 +112 1 0 days 00:00:01.215165175 +112 2 0 days 00:00:01.310756373 +112 3 0 days 00:00:03.633765586 +112 6 0 days 00:00:15.420647720 +112 7 0 days 00:00:10.590076787 +112 9 0 days 00:00:03.192176154 +112 10 0 days 00:00:06.491079980 +112 11 0 days 00:00:06.711234087 +112 12 0 days 00:00:20.554157102 +112 13 0 days 00:00:25.481124765 +112 14 0 days 00:00:04.642838020 +112 15 0 days 00:00:15.870905448 +112 16 0 days 00:00:01.910702770 +112 17 0 days 00:00:01.652867635 +112 18 0 days 00:00:01.382027913 +112 19 0 days 00:00:01.965984750 +112 20 0 days 00:00:10.745028685 +112 21 0 days 00:00:04.680053346 +112 22 0 days 00:00:07.378785084 +112 24 0 days 00:00:07.618372211 +112 25 0 days 00:00:07.574552672 +112 27 0 days 00:00:01.069218860 +112 28 0 days 00:00:04.145891610 +112 29 0 days 00:00:08.277648420 +112 30 0 days 00:00:35.393261081 +113 1 0 days 00:00:36.936438672 +113 2 0 days 00:00:22.723789406 +113 3 0 days 00:00:06.380181164 +113 4 0 days 00:00:03.305750515 +113 5 0 days 00:00:28.782580974 +113 6 0 days 00:00:12.184793995 +113 7 0 days 00:00:32.421515236 +113 8 0 days 00:00:04.121647613 +113 9 0 days 00:00:06.654190740 +113 10 0 days 00:00:01.485783797 +113 11 0 days 00:00:01.660620613 +113 12 0 days 00:00:01.840726175 +113 14 0 days 00:00:11.549590390 +113 15 0 days 00:00:08.591425020 +113 16 0 days 00:00:03.824222945 +113 17 0 days 00:00:01.701328686 +113 19 0 days 00:00:03.065446660 +113 20 0 days 00:00:05.290686652 +114 1 0 days 00:00:13.125284854 +114 2 0 days 00:00:01.336667037 +114 4 0 days 00:00:03.239545191 +114 5 0 days 00:00:01.292815921 +114 6 0 days 00:00:06.462809625 +114 7 0 days 00:00:01.639521515 +114 8 0 days 00:00:00.621773880 +114 9 0 days 00:00:01.681499458 +114 10 0 days 00:00:01.229313167 +114 11 0 days 00:00:04.249543786 +114 12 0 days 00:00:02.645979483 +114 13 0 days 00:00:12.377037010 +114 14 0 days 00:00:04.439088080 +114 15 0 days 00:00:04.466478537 +114 17 0 days 00:00:05.970571106 +114 18 0 days 00:00:02.381763544 +114 19 0 days 00:00:00.605022748 +114 20 0 days 00:00:01.134710967 +114 21 0 days 00:00:01.778636278 +114 22 0 days 00:00:02.988808805 +114 23 0 days 00:00:00.512097060 +114 24 0 days 00:00:01.445770420 +114 25 0 days 00:00:07.854905162 +114 26 0 days 00:00:02.742621857 +114 27 0 days 00:00:05.512033983 +114 28 0 days 00:00:03.687235025 +114 29 0 days 00:00:00.871376112 +114 30 0 days 00:00:00.755995586 +114 31 0 days 00:00:02.790318835 +114 32 0 days 00:00:05.268628344 +114 35 0 days 00:00:03.562180200 +114 36 0 days 00:00:07.815705808 +114 37 0 days 00:00:07.286787946 +114 38 0 days 00:00:01.751749585 +114 39 0 days 00:00:05.059465044 +114 40 0 days 00:00:04.326459255 +114 41 0 days 00:00:02.951592066 +114 42 0 days 00:00:08.310315750 +114 43 0 days 00:00:02.196067230 +114 44 0 days 00:00:01.446175334 +114 45 0 days 00:00:01.715928765 +114 46 0 days 00:00:04.560291098 +114 47 0 days 00:00:03.892710155 +114 48 0 days 00:00:01.412762980 +114 49 0 days 00:00:01.682407608 +114 50 0 days 00:00:11.349290405 +114 51 0 days 00:00:05.901041232 +114 52 0 days 00:00:01.075951700 +114 54 0 days 00:00:02.227502620 +114 55 0 days 00:00:03.648120157 +114 56 0 days 00:00:07.275189820 +114 57 0 days 00:00:01.799564925 +114 58 0 days 00:00:03.184330836 +114 60 0 days 00:00:01.015028970 +114 61 0 days 00:00:04.743738684 +114 62 0 days 00:00:06.762797356 +114 64 0 days 00:00:01.636635500 +114 65 0 days 00:00:08.858152136 +115 1 0 days 00:00:06.994564084 +115 2 0 days 00:00:00.401841923 +115 3 0 days 00:00:09.569902560 +115 4 0 days 00:00:00.745943645 +115 5 0 days 00:00:00.599149443 +115 6 0 days 00:00:01.425131585 +115 7 0 days 00:00:12.356768920 +115 8 0 days 00:00:02.445804504 +115 9 0 days 00:00:01.832470800 +115 10 0 days 00:00:03.698495425 +115 11 0 days 00:00:09.430960130 +115 12 0 days 00:00:01.290085906 +115 13 0 days 00:00:00.732135617 +115 14 0 days 00:00:17.432726832 +115 15 0 days 00:00:00.941365365 +115 16 0 days 00:00:02.823902900 +115 17 0 days 00:00:11.144011860 +115 18 0 days 00:00:03.765961353 +115 19 0 days 00:00:07.614899417 +115 20 0 days 00:00:01.569264877 +115 22 0 days 00:00:12.100650116 +115 23 0 days 00:00:06.733746662 +115 25 0 days 00:00:02.126701097 +115 27 0 days 00:00:00.831184772 +115 28 0 days 00:00:02.703406510 +115 29 0 days 00:00:08.596216652 +115 30 0 days 00:00:04.371837242 +115 31 0 days 00:00:20.305377485 +115 32 0 days 00:00:06.368085366 +115 34 0 days 00:00:03.429852242 +115 35 0 days 00:00:00.842145840 +115 36 0 days 00:00:03.841297810 +115 37 0 days 00:00:00.807412732 +115 38 0 days 00:00:03.188958833 +115 39 0 days 00:00:01.643094465 +115 41 0 days 00:00:01.145114465 +115 42 0 days 00:00:03.034724153 +115 43 0 days 00:00:14.184381893 +115 44 0 days 00:00:04.824276607 +115 45 0 days 00:00:01.298839651 +115 46 0 days 00:00:00.968663763 +115 47 0 days 00:00:01.815794390 +115 48 0 days 00:00:00.968008275 +115 50 0 days 00:00:18.441437170 +116 1 0 days 00:00:06.155979766 +116 2 0 days 00:00:24.040349126 +116 3 0 days 00:00:01.737656871 +116 4 0 days 00:00:10.965548143 +116 6 0 days 00:00:05.406918008 +116 7 0 days 00:00:04.549323780 +116 8 0 days 00:00:14.956150790 +116 9 0 days 00:00:07.211249726 +116 10 0 days 00:00:02.545335398 +116 11 0 days 00:00:05.153548460 +116 12 0 days 00:00:01.564746060 +116 13 0 days 00:00:27.359353985 +116 14 0 days 00:00:04.882778138 +116 15 0 days 00:00:03.598305652 +116 16 0 days 00:00:06.278055600 +116 17 0 days 00:00:14.582070997 +116 18 0 days 00:00:03.535102985 +116 19 0 days 00:00:03.685755828 +117 1 0 days 00:00:01.370746522 +117 2 0 days 00:00:04.132110887 +117 3 0 days 00:00:01.700589653 +117 4 0 days 00:00:21.679126870 +117 5 0 days 00:00:06.300152830 +117 6 0 days 00:00:05.101847800 +117 7 0 days 00:00:02.963000078 +117 8 0 days 00:00:08.739350225 +117 9 0 days 00:00:02.549634071 +117 10 0 days 00:00:01.233138243 +117 11 0 days 00:00:05.422285632 +117 12 0 days 00:00:04.534697337 +117 13 0 days 00:00:05.631100116 +117 14 0 days 00:00:04.300370863 +117 15 0 days 00:00:05.070383246 +117 16 0 days 00:00:02.763652342 +117 18 0 days 00:00:01.385420764 +117 19 0 days 00:00:00.690808438 +117 20 0 days 00:00:01.565467484 +117 21 0 days 00:00:04.974061689 +117 22 0 days 00:00:01.002235074 +117 23 0 days 00:00:01.474968160 +117 24 0 days 00:00:00.752337832 +117 25 0 days 00:00:04.140463160 +117 26 0 days 00:00:02.786579891 +117 27 0 days 00:00:11.494681866 +117 28 0 days 00:00:08.717164366 +117 29 0 days 00:00:02.112787403 +117 30 0 days 00:00:00.701934024 +117 31 0 days 00:00:02.176765444 +117 32 0 days 00:00:01.961633777 +117 33 0 days 00:00:05.989732805 +117 34 0 days 00:00:13.936004392 +117 35 0 days 00:00:02.935641166 +117 36 0 days 00:00:01.096145036 +117 37 0 days 00:00:00.795845645 +117 38 0 days 00:00:02.230583972 +117 39 0 days 00:00:00.959374126 +117 40 0 days 00:00:07.439860808 +117 41 0 days 00:00:18.328472052 +118 1 0 days 00:00:00.918284941 +118 2 0 days 00:00:03.358432746 +118 4 0 days 00:00:03.891955556 +118 7 0 days 00:00:31.813443340 +118 8 0 days 00:00:08.009945972 +118 10 0 days 00:00:02.466086615 +118 11 0 days 00:00:13.597094008 +119 1 0 days 00:00:00.697854532 +119 4 0 days 00:00:02.069229512 +119 5 0 days 00:00:11.836771938 +119 6 0 days 00:00:01.991978264 +119 7 0 days 00:00:11.805132689 +119 8 0 days 00:00:05.745417730 +119 11 0 days 00:00:11.492889603 +119 12 0 days 00:00:06.388349683 +119 14 0 days 00:00:08.306085461 +119 15 0 days 00:00:02.685137934 +119 16 0 days 00:00:02.925104564 +119 17 0 days 00:00:03.570758452 +119 18 0 days 00:00:07.380244576 +119 19 0 days 00:00:05.742710935 +119 22 0 days 00:00:05.195685054 +120 1 0 days 00:00:02.297421733 +120 2 0 days 00:00:05.767706372 +120 3 0 days 00:00:02.269348335 +120 4 0 days 00:00:04.578112023 +120 5 0 days 00:00:06.490362573 +120 6 0 days 00:00:05.479917986 +120 7 0 days 00:00:05.080868240 +120 8 0 days 00:00:08.007838566 +120 9 0 days 00:00:07.854775720 +120 10 0 days 00:00:06.040278108 +120 11 0 days 00:00:06.720147612 +120 12 0 days 00:00:02.943625600 +120 13 0 days 00:00:01.525039055 +120 14 0 days 00:00:14.110043000 +120 15 0 days 00:00:05.904959406 +120 16 0 days 00:00:12.318561172 +120 17 0 days 00:00:27.525654266 +120 18 0 days 00:00:14.012894953 +120 19 0 days 00:00:04.212278840 +120 20 0 days 00:00:11.102772251 +120 21 0 days 00:00:08.792643140 +120 22 0 days 00:00:13.457617502 +120 23 0 days 00:00:01.317088900 +120 24 0 days 00:00:02.173035940 +120 25 0 days 00:00:07.161290580 +120 26 0 days 00:00:08.292226120 +120 27 0 days 00:00:26.063489933 +120 28 0 days 00:00:10.300810573 +120 29 0 days 00:00:01.937678133 +120 30 0 days 00:00:13.073428086 +120 31 0 days 00:00:04.309822372 +120 32 0 days 00:00:02.512574237 +120 33 0 days 00:00:19.153919253 +120 34 0 days 00:00:06.708573346 +120 35 0 days 00:00:10.122765536 +120 36 0 days 00:00:11.277493760 +120 37 0 days 00:00:18.653124540 +120 38 0 days 00:00:02.120203260 +120 39 0 days 00:00:51.128230416 +121 1 0 days 00:00:05.387024035 +121 2 0 days 00:00:03.184750210 +121 3 0 days 00:00:25.005967218 +121 4 0 days 00:00:08.293356065 +121 5 0 days 00:00:04.363568660 +121 6 0 days 00:00:15.547797725 +121 7 0 days 00:00:05.157961690 +121 8 0 days 00:00:20.979433657 +121 9 0 days 00:00:05.959847256 +121 10 0 days 00:00:51.288983655 +121 11 0 days 00:00:05.032626793 +121 12 0 days 00:00:04.006600692 +121 13 0 days 00:00:08.130066287 +121 14 0 days 00:00:01.256502210 +121 15 0 days 00:00:04.349783220 +121 16 0 days 00:00:40.070734635 +121 17 0 days 00:00:05.600429050 +121 18 0 days 00:00:06.134576517 +121 19 0 days 00:00:02.084777137 +121 20 0 days 00:00:16.869027004 +121 21 0 days 00:00:10.852266180 +121 22 0 days 00:00:04.068499512 +121 23 0 days 00:00:00.696993695 +121 24 0 days 00:00:09.584543370 +121 25 0 days 00:00:06.282849904 +121 26 0 days 00:00:08.245803522 +121 27 0 days 00:00:16.071035110 +121 28 0 days 00:00:10.791620136 +121 29 0 days 00:00:05.220950596 +122 1 0 days 00:00:04.003110690 +122 2 0 days 00:00:04.575697776 +122 3 0 days 00:00:05.685217710 +122 4 0 days 00:00:01.935207760 +122 5 0 days 00:00:01.565137545 +122 6 0 days 00:00:04.368325732 +122 7 0 days 00:00:02.362264673 +122 8 0 days 00:00:05.231107860 +122 9 0 days 00:00:12.787996906 +122 10 0 days 00:00:00.651059466 +122 11 0 days 00:00:01.674390873 +122 12 0 days 00:00:04.408895373 +122 13 0 days 00:00:12.607676430 +122 14 0 days 00:00:02.589182153 +122 15 0 days 00:00:04.330997080 +122 16 0 days 00:00:03.429618613 +122 17 0 days 00:00:01.723122240 +122 18 0 days 00:00:02.059101890 +122 19 0 days 00:00:06.869763093 +122 20 0 days 00:00:08.430841360 +122 21 0 days 00:00:01.086283630 +122 22 0 days 00:00:02.959446060 +122 23 0 days 00:00:07.219402273 +122 24 0 days 00:00:02.318302306 +122 25 0 days 00:00:02.056094457 +122 26 0 days 00:00:00.388394536 +122 27 0 days 00:00:04.017309400 +122 28 0 days 00:00:00.340086566 +122 29 0 days 00:00:02.780102708 +122 30 0 days 00:00:02.054490725 +122 31 0 days 00:00:05.903118211 +122 32 0 days 00:00:02.000650273 +122 33 0 days 00:00:06.787453633 +122 34 0 days 00:00:01.762825740 +122 35 0 days 00:00:01.375857020 +122 36 0 days 00:00:02.009070926 +122 37 0 days 00:00:03.122966900 +122 38 0 days 00:00:01.366174993 +122 39 0 days 00:00:04.430089134 +122 40 0 days 00:00:02.944849540 +122 41 0 days 00:00:02.227672806 +122 42 0 days 00:00:06.622654273 +122 43 0 days 00:00:14.778904186 +122 44 0 days 00:00:02.214405926 +122 45 0 days 00:00:08.935490391 +122 46 0 days 00:00:03.421973937 +122 47 0 days 00:00:10.129422093 +122 48 0 days 00:00:04.344771200 +122 49 0 days 00:00:02.681307353 +122 50 0 days 00:00:10.099967873 +122 51 0 days 00:00:02.853948780 +122 52 0 days 00:00:13.364739973 +122 53 0 days 00:00:03.684256166 +122 54 0 days 00:00:00.779631512 +122 55 0 days 00:00:00.959225670 +122 56 0 days 00:00:04.042393748 +122 57 0 days 00:00:04.454390061 +122 58 0 days 00:00:03.130298588 +122 59 0 days 00:00:03.262494440 +122 60 0 days 00:00:09.130080073 +122 61 0 days 00:00:01.147102520 +122 62 0 days 00:00:12.104520188 +122 63 0 days 00:00:00.944315693 +122 64 0 days 00:00:01.605341940 +122 65 0 days 00:00:04.565923340 +122 66 0 days 00:00:09.713803023 +122 67 0 days 00:00:15.416012540 +122 68 0 days 00:00:05.700475573 +122 69 0 days 00:00:01.125544288 +122 70 0 days 00:00:02.988791583 +122 71 0 days 00:00:03.034615726 +122 72 0 days 00:00:06.605709760 +122 73 0 days 00:00:06.197331940 +123 1 0 days 00:00:01.343125273 +123 2 0 days 00:00:09.267480850 +123 3 0 days 00:00:00.945458260 +123 4 0 days 00:00:02.585093490 +123 5 0 days 00:00:04.433626075 +123 6 0 days 00:00:02.550217469 +123 7 0 days 00:00:03.146294818 +123 8 0 days 00:00:01.999912588 +123 9 0 days 00:00:02.079295691 +123 10 0 days 00:00:06.373501730 +123 11 0 days 00:00:06.010645820 +123 12 0 days 00:00:01.541211743 +123 13 0 days 00:00:01.038681465 +123 14 0 days 00:00:02.688973495 +123 15 0 days 00:00:06.368076925 +123 16 0 days 00:00:27.818144035 +123 17 0 days 00:00:04.585961078 +123 18 0 days 00:00:10.186421805 +123 19 0 days 00:00:01.820444464 +123 20 0 days 00:00:01.288780160 +123 21 0 days 00:00:07.408879725 +123 22 0 days 00:00:10.364881657 +123 23 0 days 00:00:01.493267885 +123 24 0 days 00:00:01.353404520 +123 25 0 days 00:00:02.223458725 +123 26 0 days 00:00:13.189965080 +123 27 0 days 00:00:05.275745090 +123 28 0 days 00:00:12.515932792 +123 29 0 days 00:00:04.881475560 +123 30 0 days 00:00:05.843514795 +123 31 0 days 00:00:00.386995280 +123 32 0 days 00:00:01.594224900 +123 33 0 days 00:00:04.606580970 +123 34 0 days 00:00:03.429372950 +123 35 0 days 00:00:01.422504520 +123 36 0 days 00:00:12.933086370 +123 37 0 days 00:00:04.851856058 +123 38 0 days 00:00:01.780260345 +123 39 0 days 00:00:01.289580280 +123 40 0 days 00:00:05.868386536 +123 41 0 days 00:00:02.368987596 +123 42 0 days 00:00:01.112435597 +123 43 0 days 00:00:02.178597704 +123 44 0 days 00:00:01.334976370 +123 45 0 days 00:00:00.319520665 +123 46 0 days 00:00:06.732061950 +123 47 0 days 00:00:09.388217112 +123 48 0 days 00:00:03.544284692 +123 49 0 days 00:00:01.640696928 +123 50 0 days 00:00:00.618368848 +123 51 0 days 00:00:04.896572110 +123 52 0 days 00:00:04.742864940 +123 53 0 days 00:00:04.500750270 +123 54 0 days 00:00:19.168978495 +123 55 0 days 00:00:10.155451400 +123 56 0 days 00:00:00.741470045 +123 57 0 days 00:00:06.210733310 +123 58 0 days 00:00:02.052350425 +123 59 0 days 00:00:06.303420495 +123 60 0 days 00:00:01.845038565 +123 61 0 days 00:00:00.825680337 +123 62 0 days 00:00:02.018161446 +124 1 0 days 00:00:06.625810610 +124 2 0 days 00:00:05.647634628 +124 3 0 days 00:00:06.343440928 +124 4 0 days 00:00:08.758153930 +124 5 0 days 00:00:19.446943840 +124 6 0 days 00:00:13.702018176 +124 7 0 days 00:00:06.043970801 +124 8 0 days 00:00:02.787609645 +124 9 0 days 00:00:41.275753143 +124 10 0 days 00:00:07.923062770 +124 11 0 days 00:00:00.887250756 +124 12 0 days 00:00:08.257890385 +124 13 0 days 00:00:06.901753196 +124 14 0 days 00:00:06.369512505 +124 15 0 days 00:00:11.985626584 +124 16 0 days 00:00:08.989198202 +124 17 0 days 00:00:04.634789105 +124 18 0 days 00:00:20.011206032 +124 19 0 days 00:00:12.806286977 +124 20 0 days 00:00:08.071195550 +124 21 0 days 00:00:07.231542005 +124 22 0 days 00:00:06.067790673 +124 23 0 days 00:00:05.334204440 +124 24 0 days 00:00:02.975907652 +124 25 0 days 00:00:02.244621695 +124 26 0 days 00:00:05.735749945 +124 27 0 days 00:00:06.804175540 +124 28 0 days 00:00:10.218270760 +124 29 0 days 00:00:02.959972027 +124 30 0 days 00:00:06.840271116 +124 31 0 days 00:00:01.341517430 +124 32 0 days 00:00:08.398405406 +124 33 0 days 00:00:05.666264410 +124 34 0 days 00:00:20.641275776 +125 1 0 days 00:00:04.992901310 +125 2 0 days 00:00:12.415489248 +125 3 0 days 00:00:20.128911384 +125 4 0 days 00:00:03.922874480 +125 5 0 days 00:00:01.845283124 +125 6 0 days 00:00:12.334450388 +125 7 0 days 00:00:01.459334302 +125 8 0 days 00:00:00.930200032 +125 9 0 days 00:00:17.886880970 +125 10 0 days 00:00:04.869308682 +125 11 0 days 00:00:00.917502652 +125 12 0 days 00:00:05.889173251 +125 13 0 days 00:00:02.519948777 +125 14 0 days 00:00:00.977845624 +125 15 0 days 00:00:01.198626755 +125 16 0 days 00:00:00.992723935 +125 17 0 days 00:00:02.492739266 +125 18 0 days 00:00:01.138745302 +125 19 0 days 00:00:04.819765694 +125 20 0 days 00:00:01.644408032 +125 21 0 days 00:00:07.136392727 +125 22 0 days 00:00:05.417811160 +125 23 0 days 00:00:01.964952964 +125 24 0 days 00:00:03.599757156 +125 25 0 days 00:00:19.752812716 +125 26 0 days 00:00:02.081149781 +125 27 0 days 00:00:01.886940355 +125 28 0 days 00:00:03.033467322 +125 29 0 days 00:00:05.838068586 +125 30 0 days 00:00:05.644844070 +125 31 0 days 00:00:13.152514816 +125 32 0 days 00:00:01.085288543 +125 33 0 days 00:00:04.519933552 +125 34 0 days 00:00:10.325173632 +125 35 0 days 00:00:08.719880744 +125 37 0 days 00:00:06.477003596 +125 38 0 days 00:00:01.200584410 +125 39 0 days 00:00:19.435383644 +125 40 0 days 00:00:03.181590640 +125 41 0 days 00:00:09.462205820 +125 42 0 days 00:00:02.823538516 +125 43 0 days 00:00:03.694441250 +125 44 0 days 00:00:01.512315741 +125 45 0 days 00:00:05.870191113 +125 46 0 days 00:00:02.854301455 +125 47 0 days 00:00:01.981879847 +125 48 0 days 00:00:26.732654537 +126 1 0 days 00:00:06.091235966 +126 2 0 days 00:00:04.898768755 +126 3 0 days 00:00:10.014233866 +126 4 0 days 00:00:09.002834892 +126 5 0 days 00:00:07.118263068 +126 6 0 days 00:00:01.578329280 +126 7 0 days 00:00:08.507353640 +126 8 0 days 00:00:09.927303470 +126 9 0 days 00:00:01.982786545 +126 10 0 days 00:00:02.011617567 +126 11 0 days 00:00:03.529452174 +126 12 0 days 00:00:13.072135996 +126 13 0 days 00:00:08.515653112 +126 14 0 days 00:00:01.807282736 +126 15 0 days 00:00:02.131227910 +126 16 0 days 00:00:19.538930885 +126 17 0 days 00:00:02.761417467 +126 18 0 days 00:00:12.716687036 +126 19 0 days 00:00:04.668042245 +126 20 0 days 00:00:02.872225930 +126 21 0 days 00:00:08.031438905 +126 22 0 days 00:00:09.043935775 +126 23 0 days 00:00:15.101801220 +126 24 0 days 00:00:19.380102811 +126 25 0 days 00:00:08.531572425 +126 26 0 days 00:00:27.815364527 +126 27 0 days 00:00:06.034596652 +126 28 0 days 00:00:02.966167150 +126 29 0 days 00:00:17.776708842 +126 30 0 days 00:00:26.541811940 +127 1 0 days 00:00:10.045528005 +127 2 0 days 00:00:03.146900576 +127 3 0 days 00:00:11.981073883 +127 4 0 days 00:00:02.530158990 +127 5 0 days 00:00:04.719980205 +127 6 0 days 00:00:00.602041242 +127 7 0 days 00:00:14.253072682 +127 8 0 days 00:00:08.141606910 +127 9 0 days 00:00:05.044098920 +127 10 0 days 00:00:01.005254580 +127 11 0 days 00:00:05.280466096 +127 12 0 days 00:00:04.764846178 +127 13 0 days 00:00:36.614397610 +127 14 0 days 00:00:01.157330300 +127 15 0 days 00:00:04.330960590 +127 16 0 days 00:00:12.053973632 +127 17 0 days 00:00:05.823943448 +127 18 0 days 00:00:09.645144730 +127 19 0 days 00:00:11.302107253 +127 20 0 days 00:00:02.871029950 +127 21 0 days 00:00:10.656416571 +127 22 0 days 00:00:04.207836340 +127 23 0 days 00:00:13.578498426 +127 24 0 days 00:00:11.635144940 +128 1 0 days 00:00:06.991535348 +128 2 0 days 00:00:02.363910353 +128 3 0 days 00:00:01.336871818 +128 4 0 days 00:00:13.310612304 +128 5 0 days 00:00:01.728911440 +128 7 0 days 00:00:00.661743800 +128 8 0 days 00:00:01.566958366 +128 9 0 days 00:00:14.683313394 +128 10 0 days 00:00:02.555024764 +128 11 0 days 00:00:02.202906910 +128 13 0 days 00:00:03.812083946 +128 14 0 days 00:00:05.078296500 +128 15 0 days 00:00:02.355522680 +128 16 0 days 00:00:02.068924198 +128 17 0 days 00:00:05.124588163 +128 18 0 days 00:00:05.030297830 +128 19 0 days 00:00:01.355330616 +128 20 0 days 00:00:03.454997531 +128 21 0 days 00:00:09.608673225 +128 22 0 days 00:00:02.355725117 +128 23 0 days 00:00:02.216764022 +128 24 0 days 00:00:01.659614970 +128 25 0 days 00:00:06.114028920 +128 26 0 days 00:00:01.207384295 +128 28 0 days 00:00:11.483499693 +128 29 0 days 00:00:04.856907633 +128 30 0 days 00:00:00.927955405 +128 31 0 days 00:00:06.966836540 +128 32 0 days 00:00:12.532797468 +128 33 0 days 00:00:08.909595256 +128 34 0 days 00:00:02.880756524 +128 35 0 days 00:00:05.553005425 +128 36 0 days 00:00:05.925416689 +128 37 0 days 00:00:02.470153921 +128 38 0 days 00:00:01.773708546 +128 39 0 days 00:00:04.422904651 +128 40 0 days 00:00:01.656345574 +128 41 0 days 00:00:02.249461666 +129 1 0 days 00:00:03.974665589 +129 2 0 days 00:00:02.687283998 +129 3 0 days 00:00:06.214791988 +129 4 0 days 00:00:01.288478035 +129 5 0 days 00:00:00.433627044 +129 6 0 days 00:00:02.682593326 +129 7 0 days 00:00:01.066055601 +129 8 0 days 00:00:01.416829983 +129 9 0 days 00:00:05.951370542 +129 10 0 days 00:00:03.521703360 +129 11 0 days 00:00:02.416428593 +129 12 0 days 00:00:03.300844608 +129 13 0 days 00:00:07.437092782 +129 14 0 days 00:00:01.230394260 +129 16 0 days 00:00:01.405677983 +129 18 0 days 00:00:02.310729408 +129 19 0 days 00:00:06.182109225 +129 21 0 days 00:00:06.128547345 +129 22 0 days 00:00:01.964820567 +129 23 0 days 00:00:02.635812811 +129 24 0 days 00:00:00.551234113 +129 25 0 days 00:00:01.332432117 +129 26 0 days 00:00:00.801287958 +129 27 0 days 00:00:01.790862350 +129 28 0 days 00:00:03.501439260 +129 29 0 days 00:00:01.544996002 +129 30 0 days 00:00:01.145920500 +129 31 0 days 00:00:05.471498055 +129 32 0 days 00:00:01.476301726 +129 33 0 days 00:00:08.242647994 +129 34 0 days 00:00:02.637506497 +129 35 0 days 00:00:00.517349425 +129 36 0 days 00:00:02.467891660 +129 37 0 days 00:00:03.120715195 +129 38 0 days 00:00:05.151394062 +129 39 0 days 00:00:00.558028048 +129 40 0 days 00:00:01.514121685 +129 41 0 days 00:00:01.958358238 +129 42 0 days 00:00:01.831283463 +129 43 0 days 00:00:07.166300368 +129 44 0 days 00:00:01.768307126 +129 45 0 days 00:00:00.532138540 +129 46 0 days 00:00:04.412351154 +129 47 0 days 00:00:01.093438520 +129 48 0 days 00:00:01.216162691 +129 49 0 days 00:00:01.233479616 +129 50 0 days 00:00:07.272892697 +129 51 0 days 00:00:01.982492578 +129 52 0 days 00:00:03.222964876 +129 53 0 days 00:00:01.181997916 +129 54 0 days 00:00:00.583604200 +129 55 0 days 00:00:04.866917409 +130 1 0 days 00:00:01.924918506 +130 2 0 days 00:00:01.173363926 +130 3 0 days 00:00:01.149971078 +130 4 0 days 00:00:06.108654947 +130 5 0 days 00:00:02.414648438 +130 6 0 days 00:00:06.709132460 +130 7 0 days 00:00:02.055336486 +130 8 0 days 00:00:02.300964835 +130 9 0 days 00:00:05.400307680 +130 10 0 days 00:00:11.300847085 +130 11 0 days 00:00:02.419504124 +130 12 0 days 00:00:05.443458153 +130 13 0 days 00:00:02.381138314 +130 14 0 days 00:00:06.744578253 +130 15 0 days 00:00:05.073636036 +130 16 0 days 00:00:06.659306626 +130 17 0 days 00:00:03.660829340 +130 18 0 days 00:00:02.762136513 +130 19 0 days 00:00:02.657540584 +130 20 0 days 00:00:05.447235520 +130 21 0 days 00:00:09.064084200 +130 22 0 days 00:00:02.299952080 +130 23 0 days 00:00:05.410372833 +130 24 0 days 00:00:00.810785613 +130 25 0 days 00:00:04.630653966 +130 26 0 days 00:00:00.730017244 +130 27 0 days 00:00:01.665730935 +130 28 0 days 00:00:02.971530005 +130 29 0 days 00:00:04.822810566 +130 30 0 days 00:00:02.872208905 +130 32 0 days 00:00:02.045167640 +130 33 0 days 00:00:00.825539549 +130 34 0 days 00:00:04.538262693 +130 35 0 days 00:00:02.852102540 +130 36 0 days 00:00:03.137989775 +130 37 0 days 00:00:02.350018606 +130 38 0 days 00:00:01.401311790 +130 39 0 days 00:00:01.786592340 +130 40 0 days 00:00:01.792171644 +130 41 0 days 00:00:07.895082520 +130 42 0 days 00:00:09.066364113 +130 43 0 days 00:00:02.818951933 +130 44 0 days 00:00:05.351439360 +130 45 0 days 00:00:05.462357260 +130 46 0 days 00:00:00.966913610 +130 47 0 days 00:00:04.862670120 +130 48 0 days 00:00:02.681438625 +130 49 0 days 00:00:02.738538750 +130 50 0 days 00:00:03.335705240 +130 51 0 days 00:00:01.920542646 +130 52 0 days 00:00:05.832490100 +130 53 0 days 00:00:09.224287904 +130 54 0 days 00:00:00.851723620 +130 55 0 days 00:00:00.677512875 +130 56 0 days 00:00:02.926968552 +130 57 0 days 00:00:03.364879366 +130 58 0 days 00:00:02.745734520 +130 59 0 days 00:00:03.875756493 +130 60 0 days 00:00:01.198534528 +130 61 0 days 00:00:05.545725392 +130 62 0 days 00:00:04.816360660 +130 63 0 days 00:00:06.745912623 +130 64 0 days 00:00:05.878594270 +130 65 0 days 00:00:05.645234832 +130 66 0 days 00:00:01.312534953 +130 68 0 days 00:00:04.074618640 +130 69 0 days 00:00:01.322281442 +130 70 0 days 00:00:08.972752666 +130 71 0 days 00:00:01.564131258 +130 72 0 days 00:00:01.101276152 +130 73 0 days 00:00:02.334227786 +130 75 0 days 00:00:08.730259210 +130 76 0 days 00:00:01.116996183 +130 77 0 days 00:00:01.749227684 +130 78 0 days 00:00:02.686418012 +131 1 0 days 00:00:02.565542275 +131 2 0 days 00:00:06.882551892 +131 3 0 days 00:00:01.858485920 +131 4 0 days 00:00:10.551091924 +131 5 0 days 00:00:02.441449668 +131 6 0 days 00:00:01.019606468 +131 7 0 days 00:00:04.651240897 +131 8 0 days 00:00:03.413283794 +131 9 0 days 00:00:01.887412537 +131 10 0 days 00:00:06.471302850 +131 11 0 days 00:00:04.208672895 +131 12 0 days 00:00:06.444458376 +131 13 0 days 00:00:01.919433050 +131 14 0 days 00:00:03.493024060 +131 15 0 days 00:00:03.723943745 +131 16 0 days 00:00:07.457291880 +131 17 0 days 00:00:04.408839903 +131 18 0 days 00:00:02.044896885 +131 19 0 days 00:00:12.525692860 +131 20 0 days 00:00:05.908837523 +131 21 0 days 00:00:07.473614904 +131 22 0 days 00:00:05.739429062 +131 23 0 days 00:00:01.061118635 +131 24 0 days 00:00:04.353305980 +131 25 0 days 00:00:01.616718788 +131 26 0 days 00:00:01.882126902 +131 27 0 days 00:00:06.409416175 +131 28 0 days 00:00:04.870972880 +131 29 0 days 00:00:07.388481650 +131 30 0 days 00:00:05.050910030 +131 31 0 days 00:00:09.146812592 +131 32 0 days 00:00:05.381928696 +131 33 0 days 00:00:05.449857000 +131 34 0 days 00:00:00.939484790 +131 35 0 days 00:00:00.817050885 +131 36 0 days 00:00:03.130897165 +131 37 0 days 00:00:01.648233720 +131 38 0 days 00:00:03.991988315 +131 39 0 days 00:00:05.235691075 +131 40 0 days 00:00:00.880272505 +131 41 0 days 00:00:02.976989665 +131 42 0 days 00:00:02.451898060 +131 43 0 days 00:00:08.985772031 +131 44 0 days 00:00:00.687827548 +131 45 0 days 00:00:04.940746785 +131 46 0 days 00:00:02.068396920 +131 47 0 days 00:00:11.302442642 +131 48 0 days 00:00:03.017032796 +131 49 0 days 00:00:05.340612360 +131 50 0 days 00:00:02.013589230 +131 51 0 days 00:00:01.060577973 +131 52 0 days 00:00:10.361884766 +131 53 0 days 00:00:04.349896100 +131 54 0 days 00:00:04.528972570 +131 55 0 days 00:00:05.330838992 +131 56 0 days 00:00:01.901425593 +132 1 0 days 00:00:05.509882660 +132 2 0 days 00:00:01.278452660 +132 3 0 days 00:00:00.546574400 +132 4 0 days 00:00:05.500312993 +132 5 0 days 00:00:04.970162900 +132 6 0 days 00:00:03.693941420 +132 7 0 days 00:00:01.975671410 +132 8 0 days 00:00:00.709286796 +132 9 0 days 00:00:01.462910985 +132 10 0 days 00:00:00.717501890 +132 11 0 days 00:00:02.707443968 +132 12 0 days 00:00:01.102846853 +132 13 0 days 00:00:00.900304193 +132 14 0 days 00:00:06.935392255 +132 15 0 days 00:00:01.557323266 +132 16 0 days 00:00:01.392147015 +132 17 0 days 00:00:05.720218321 +132 19 0 days 00:00:02.014528193 +132 20 0 days 00:00:01.094985174 +132 21 0 days 00:00:02.634092058 +132 22 0 days 00:00:05.529746930 +132 23 0 days 00:00:01.471023713 +132 24 0 days 00:00:02.099292426 +132 25 0 days 00:00:01.020423786 +132 26 0 days 00:00:00.432061071 +132 27 0 days 00:00:02.146400306 +132 29 0 days 00:00:02.324439400 +132 30 0 days 00:00:01.592616046 +132 31 0 days 00:00:02.692124833 +132 33 0 days 00:00:00.725342451 +132 34 0 days 00:00:02.649777962 +132 35 0 days 00:00:03.694558733 +132 36 0 days 00:00:02.362873084 +132 37 0 days 00:00:01.443032366 +132 38 0 days 00:00:03.257704710 +132 39 0 days 00:00:06.262618340 +132 40 0 days 00:00:05.594304246 +132 41 0 days 00:00:03.332239430 +132 42 0 days 00:00:02.388756406 +132 43 0 days 00:00:04.312023780 +132 44 0 days 00:00:00.705936887 +132 45 0 days 00:00:04.204552560 +132 46 0 days 00:00:00.608052780 +132 47 0 days 00:00:04.180940620 +132 48 0 days 00:00:05.523783610 +132 49 0 days 00:00:01.903960062 +132 50 0 days 00:00:03.387714742 +132 51 0 days 00:00:00.729268090 +132 52 0 days 00:00:01.179852533 +132 53 0 days 00:00:00.598507535 +132 54 0 days 00:00:01.425605786 +132 55 0 days 00:00:01.995912135 +132 56 0 days 00:00:02.113725226 +132 57 0 days 00:00:01.181383633 +132 58 0 days 00:00:01.733605608 +132 59 0 days 00:00:08.556270891 +132 61 0 days 00:00:02.117102460 +132 62 0 days 00:00:02.630732896 +132 63 0 days 00:00:02.659970400 +132 64 0 days 00:00:00.937184658 +132 65 0 days 00:00:02.030160360 +132 66 0 days 00:00:01.825745616 +132 68 0 days 00:00:03.643492193 +132 69 0 days 00:00:00.984218096 +132 70 0 days 00:00:01.215567710 +132 71 0 days 00:00:00.536222100 +132 72 0 days 00:00:02.589526707 +132 73 0 days 00:00:01.107532486 +132 74 0 days 00:00:01.991131224 +132 75 0 days 00:00:03.207450406 +132 76 0 days 00:00:03.966789290 +132 77 0 days 00:00:01.261710712 +132 78 0 days 00:00:01.355984525 +132 79 0 days 00:00:06.204761645 +132 80 0 days 00:00:01.112425310 +132 81 0 days 00:00:03.355476595 +132 82 0 days 00:00:01.289300245 +132 84 0 days 00:00:02.550778948 +132 85 0 days 00:00:02.815340240 +132 86 0 days 00:00:01.289803250 +132 87 0 days 00:00:03.612178725 +132 89 0 days 00:00:00.546219940 +132 90 0 days 00:00:00.739590366 +132 91 0 days 00:00:01.059390666 +132 92 0 days 00:00:02.143315346 +132 93 0 days 00:00:00.461250348 +132 94 0 days 00:00:07.312078436 +132 95 0 days 00:00:05.501408035 +132 96 0 days 00:00:02.137940713 +132 97 0 days 00:00:02.726835067 +133 1 0 days 00:00:04.833069870 +133 3 0 days 00:00:00.605601335 +133 4 0 days 00:00:00.635302667 +133 5 0 days 00:00:03.802313357 +133 6 0 days 00:00:00.738817073 +133 7 0 days 00:00:02.197707530 +133 8 0 days 00:00:01.097374000 +133 9 0 days 00:00:02.117901313 +133 10 0 days 00:00:01.142539846 +133 11 0 days 00:00:01.117153515 +133 12 0 days 00:00:02.126026340 +133 13 0 days 00:00:00.385357330 +133 14 0 days 00:00:00.556797900 +133 15 0 days 00:00:01.102641953 +133 16 0 days 00:00:07.958756097 +133 17 0 days 00:00:03.123551265 +133 18 0 days 00:00:04.627836272 +133 19 0 days 00:00:02.288533956 +133 20 0 days 00:00:02.339438054 +133 21 0 days 00:00:03.535863450 +133 22 0 days 00:00:02.362739594 +133 23 0 days 00:00:02.342174320 +133 24 0 days 00:00:01.099529640 +133 25 0 days 00:00:07.281033988 +133 26 0 days 00:00:02.196770863 +133 27 0 days 00:00:00.433488515 +133 28 0 days 00:00:00.589918340 +133 29 0 days 00:00:01.479306413 +133 30 0 days 00:00:02.772597700 +133 31 0 days 00:00:00.815816854 +133 32 0 days 00:00:06.139837970 +133 33 0 days 00:00:01.169449595 +133 34 0 days 00:00:00.871540263 +133 35 0 days 00:00:01.452284400 +133 36 0 days 00:00:02.064356450 +133 37 0 days 00:00:01.367187230 +133 38 0 days 00:00:01.527290631 +133 39 0 days 00:00:03.120838940 +133 40 0 days 00:00:02.144133346 +133 41 0 days 00:00:03.737104876 +133 42 0 days 00:00:01.466700652 +133 43 0 days 00:00:02.564253637 +133 44 0 days 00:00:02.404454962 +133 46 0 days 00:00:01.061198300 +133 47 0 days 00:00:02.761978195 +133 48 0 days 00:00:02.066023175 +133 49 0 days 00:00:01.748441052 +133 50 0 days 00:00:02.719798293 +133 51 0 days 00:00:03.599912183 +133 52 0 days 00:00:01.919671595 +133 53 0 days 00:00:01.732577548 +133 54 0 days 00:00:00.764013420 +133 55 0 days 00:00:02.262194742 +133 56 0 days 00:00:01.673875941 +133 57 0 days 00:00:04.516045905 +133 58 0 days 00:00:02.539972591 +133 59 0 days 00:00:00.972982264 +133 60 0 days 00:00:01.288575122 +133 61 0 days 00:00:01.357335492 +133 62 0 days 00:00:01.222521208 +133 63 0 days 00:00:07.485401870 +133 64 0 days 00:00:02.633039728 +133 65 0 days 00:00:03.201902860 +133 66 0 days 00:00:00.425021577 +133 67 0 days 00:00:03.665492836 +133 68 0 days 00:00:06.453162364 +133 69 0 days 00:00:00.915898963 +133 70 0 days 00:00:03.374184035 +133 71 0 days 00:00:00.711813340 +133 72 0 days 00:00:01.627341591 +133 73 0 days 00:00:07.188238592 +133 74 0 days 00:00:02.614668137 +133 75 0 days 00:00:04.667863330 +133 76 0 days 00:00:07.459756533 +133 77 0 days 00:00:01.395123066 +133 78 0 days 00:00:02.097310133 +133 79 0 days 00:00:05.707857820 +133 80 0 days 00:00:06.785637630 +133 81 0 days 00:00:02.845164151 +133 82 0 days 00:00:01.590578060 +133 83 0 days 00:00:01.470121812 +133 84 0 days 00:00:01.186167033 +133 85 0 days 00:00:00.560515586 +133 86 0 days 00:00:06.088499480 +133 87 0 days 00:00:01.258256720 +133 88 0 days 00:00:01.193147530 +133 89 0 days 00:00:06.119176765 +133 90 0 days 00:00:02.873101669 +133 91 0 days 00:00:00.724624439 +133 92 0 days 00:00:01.704652196 +133 93 0 days 00:00:01.452555656 +133 94 0 days 00:00:00.564251516 +133 95 0 days 00:00:04.988592640 +134 1 0 days 00:00:08.899671440 +134 2 0 days 00:00:13.172237297 +134 3 0 days 00:00:16.358624943 +134 4 0 days 00:00:13.025007860 +134 5 0 days 00:00:15.211605825 +134 6 0 days 00:00:32.323539444 +134 7 0 days 00:00:07.366385256 +134 8 0 days 00:00:46.805555200 +134 9 0 days 00:00:18.926654544 +134 10 0 days 00:00:23.258828620 +134 11 0 days 00:00:06.344150492 +134 12 0 days 00:00:34.898116970 +134 13 0 days 00:00:14.731582766 +134 14 0 days 00:00:56.928009828 +135 3 0 days 00:00:04.841110093 +135 4 0 days 00:00:06.917116203 +135 5 0 days 00:00:18.952554906 +135 6 0 days 00:00:15.837113686 +135 7 0 days 00:00:09.495583361 +135 8 0 days 00:00:04.506543220 +135 9 0 days 00:00:13.446778606 +135 10 0 days 00:00:08.725934256 +135 11 0 days 00:00:05.621571845 +135 12 0 days 00:00:07.964876075 +135 13 0 days 00:00:10.706265330 +135 14 0 days 00:00:07.916518012 +135 15 0 days 00:00:06.869046820 +135 16 0 days 00:00:19.357703814 +135 17 0 days 00:00:18.019753491 +135 18 0 days 00:00:04.848192467 +135 19 0 days 00:00:04.463100306 +135 20 0 days 00:00:40.363236513 +136 1 0 days 00:00:44.312468432 +136 2 0 days 00:00:17.589527560 +136 3 0 days 00:00:39.264898440 +136 4 0 days 00:00:22.831999564 +136 5 0 days 00:00:14.134342072 +136 6 0 days 00:00:06.448716732 +136 7 0 days 00:00:20.432622792 +136 8 0 days 00:00:12.319201710 +136 9 0 days 00:00:05.994722100 +136 10 0 days 00:00:34.570328252 +136 11 0 days 00:00:22.746787492 +136 12 0 days 00:00:03.955721240 +136 13 0 days 00:00:18.194065636 +137 1 0 days 00:00:11.737881406 +137 2 0 days 00:00:09.809918366 +137 3 0 days 00:00:15.056052595 +137 4 0 days 00:00:24.711781473 +137 5 0 days 00:00:16.980780090 +137 6 0 days 00:00:07.926452200 +137 7 0 days 00:00:03.032046800 +137 8 0 days 00:00:22.235392340 +137 9 0 days 00:00:05.754029824 +137 10 0 days 00:00:09.750040926 +137 11 0 days 00:00:05.684748900 +137 12 0 days 00:00:07.420054020 +137 13 0 days 00:00:09.754587465 +137 14 0 days 00:00:06.155627844 +137 15 0 days 00:00:14.254082766 +137 16 0 days 00:00:14.683825348 +137 17 0 days 00:00:11.237163765 +137 18 0 days 00:00:45.741934603 +137 19 0 days 00:00:04.063793354 +137 20 0 days 00:00:22.550772896 +137 22 0 days 00:00:02.701086665 +137 23 0 days 00:00:04.353284342 +137 25 0 days 00:00:03.239032720 +137 26 0 days 00:00:11.723311793 +137 28 0 days 00:00:06.721752716 +137 29 0 days 00:00:06.314988501 +138 1 0 days 00:00:17.943948897 +138 2 0 days 00:00:26.433989650 +138 3 0 days 00:00:19.758728835 +138 4 0 days 00:00:19.397572696 +138 5 0 days 00:00:11.477074395 +138 6 0 days 00:00:43.971319263 +138 7 0 days 00:00:14.134339404 +138 8 0 days 00:00:09.377543873 +138 9 0 days 00:00:14.198382033 +138 10 0 days 00:00:19.228510165 +138 11 0 days 00:00:40.174046180 +138 12 0 days 00:00:12.636386330 +138 14 0 days 00:00:15.628076195 +138 15 0 days 00:00:17.603857762 +138 16 0 days 00:00:22.756577405 +138 17 0 days 00:00:29.655363340 +139 1 0 days 00:00:08.972548675 +139 2 0 days 00:00:04.135305908 +139 3 0 days 00:00:08.424642871 +139 4 0 days 00:00:10.213982475 +139 5 0 days 00:00:03.733392093 +139 6 0 days 00:00:11.253106484 +139 7 0 days 00:00:05.917594400 +139 8 0 days 00:00:12.546528605 +139 9 0 days 00:00:07.180411480 +139 10 0 days 00:00:44.099162775 +139 11 0 days 00:00:07.548587980 +139 12 0 days 00:00:11.165912185 +139 13 0 days 00:00:12.404977992 +139 14 0 days 00:00:05.006968190 +139 15 0 days 00:00:16.956306960 +139 16 0 days 00:00:09.806848910 +139 17 0 days 00:00:04.219255855 +139 18 0 days 00:00:06.704842100 +139 19 0 days 00:00:08.790567457 +139 20 0 days 00:00:15.873946045 +139 21 0 days 00:00:35.714490660 +139 22 0 days 00:00:12.638947602 +139 23 0 days 00:00:13.787443593 +139 24 0 days 00:00:15.509269360 +140 1 0 days 00:00:14.996432396 +140 2 0 days 00:00:26.743849374 +140 3 0 days 00:00:19.374429424 +140 4 0 days 00:00:40.910894970 +140 5 0 days 00:00:37.438643306 +140 6 0 days 00:00:23.971539208 +140 7 0 days 00:00:12.180376294 +140 8 0 days 00:00:39.234382713 +141 1 0 days 00:00:47.379604231 +141 2 0 days 00:00:10.756630346 +141 3 0 days 00:00:37.193455633 +141 5 0 days 00:00:21.325259395 +141 6 0 days 00:00:03.160772022 +141 7 0 days 00:00:05.105584546 +141 8 0 days 00:00:10.595676683 +141 9 0 days 00:00:10.796305769 +142 2 0 days 00:05:50.673838071 +142 15 0 days 00:06:59.177087700 +143 1 0 days 00:03:39.053751200 +143 27 0 days 00:03:12.859635596 +144 9 0 days 00:04:57.725707484 +144 39 0 days 00:03:46.833822270 +145 2 0 days 00:02:48.645997555 +145 3 0 days 00:05:02.498007125 +146 5 0 days 00:00:49.235383836 +146 6 0 days 00:01:03.984361700 +146 7 0 days 00:01:12.149219900 +147 1 0 days 00:00:25.739165000 +147 3 0 days 00:00:09.511185710 +147 6 0 days 00:00:23.373215420 +147 9 0 days 00:00:30.412215766 +147 11 0 days 00:00:16.423297990 +147 13 0 days 00:00:18.332275288 +147 14 0 days 00:00:29.607661783 +147 18 0 days 00:00:31.553130618 +147 19 0 days 00:00:35.117989640 +147 20 0 days 00:00:09.549427734 +147 21 0 days 00:00:09.414346083 +147 22 0 days 00:00:08.567067231 +148 3 0 days 00:01:18.723932516 +148 5 0 days 00:00:23.528316200 +148 6 0 days 00:00:51.444786880 +148 8 0 days 00:00:51.546900013 +148 10 0 days 00:00:54.247664118 +148 11 0 days 00:00:16.893089500 +148 15 0 days 00:00:20.560339686 +148 16 0 days 00:00:36.056483158 +148 17 0 days 00:00:15.607089111 +148 18 0 days 00:00:15.994761311 +148 19 0 days 00:00:21.041569611 +148 24 0 days 00:00:37.006692419 +148 25 0 days 00:01:09.690860666 +149 1 0 days 00:00:37.028803932 +149 2 0 days 00:00:08.954831166 +149 4 0 days 00:00:12.992614025 +149 6 0 days 00:00:37.608763669 +149 7 0 days 00:00:25.281093246 +149 8 0 days 00:00:38.932685360 +149 10 0 days 00:00:12.317665505 +149 11 0 days 00:00:12.497720552 +149 12 0 days 00:00:29.250795300 +149 13 0 days 00:00:09.348238272 +150 5 0 days 00:02:45.630815134 +150 8 0 days 00:03:14.418172416 +151 7 0 days 00:01:57.220843728 +151 36 0 days 00:04:12.211047627 +151 40 0 days 00:01:49.739359652 +152 3 0 days 00:08:05.215886846 +152 7 0 days 00:09:46.262929583 +153 3 0 days 00:02:23.323538702 +153 8 0 days 00:03:07.774929462 +153 28 0 days 00:03:19.225907006 +154 1 0 days 00:00:11.128737910 +154 2 0 days 00:00:25.785370880 +154 3 0 days 00:00:09.821246450 +154 4 0 days 00:00:13.512020960 +154 5 0 days 00:00:08.351182113 +154 6 0 days 00:00:18.922604976 +154 7 0 days 00:00:10.032302075 +154 8 0 days 00:00:30.491625847 +154 9 0 days 00:00:12.248076906 +154 10 0 days 00:00:11.716670100 +154 11 0 days 00:00:12.236449626 +154 12 0 days 00:00:14.695058370 +154 13 0 days 00:00:09.625122620 +154 14 0 days 00:00:16.278127830 +154 15 0 days 00:00:11.486564677 +154 16 0 days 00:00:26.450342280 +154 17 0 days 00:00:15.930236245 +154 18 0 days 00:00:15.558446900 +154 19 0 days 00:00:10.833255510 +154 20 0 days 00:00:07.986443600 +154 21 0 days 00:00:12.234676096 +154 22 0 days 00:00:12.913841178 +155 1 0 days 00:00:14.144089716 +155 2 0 days 00:00:06.048577272 +155 3 0 days 00:00:14.492908750 +155 4 0 days 00:00:14.621942800 +155 5 0 days 00:00:05.444177882 +155 6 0 days 00:00:06.862132151 +155 7 0 days 00:00:10.347964268 +155 8 0 days 00:00:05.318203145 +155 9 0 days 00:00:05.494766635 +155 10 0 days 00:00:11.731465906 +155 11 0 days 00:00:06.173463884 +155 12 0 days 00:00:09.216143434 +155 13 0 days 00:00:06.061189608 +155 14 0 days 00:00:04.629178793 +155 15 0 days 00:00:14.418399705 +155 16 0 days 00:00:07.476983490 +155 17 0 days 00:00:08.641721442 +155 18 0 days 00:00:11.574956526 +155 19 0 days 00:00:05.627921734 +155 20 0 days 00:00:08.493828165 +155 21 0 days 00:00:06.501142700 +155 22 0 days 00:00:05.342052456 +155 23 0 days 00:00:08.792544856 +155 24 0 days 00:00:07.518927000 +155 25 0 days 00:00:08.739575326 +155 26 0 days 00:00:09.024067261 +155 27 0 days 00:00:09.838813881 +155 28 0 days 00:00:15.716614460 +155 29 0 days 00:00:07.906163788 +155 30 0 days 00:00:09.176573553 +156 1 0 days 00:00:20.927022160 +156 2 0 days 00:00:31.066100842 +156 3 0 days 00:00:14.970542700 +156 4 0 days 00:00:23.626574846 +156 5 0 days 00:00:17.929681700 +156 6 0 days 00:00:26.303610640 +156 7 0 days 00:00:26.198871345 +156 8 0 days 00:00:17.153400140 +156 9 0 days 00:00:16.822440188 +156 10 0 days 00:00:26.090525765 +156 11 0 days 00:00:28.911529853 +156 12 0 days 00:00:23.302993000 +156 13 0 days 00:00:13.102928345 +156 14 0 days 00:00:12.345052793 +156 15 0 days 00:00:13.113267510 +156 16 0 days 00:00:14.534934064 +156 17 0 days 00:00:13.048346390 +156 18 0 days 00:00:13.613749160 +156 19 0 days 00:00:14.439178096 +156 20 0 days 00:00:24.567892390 +157 1 0 days 00:00:23.013814160 +157 3 0 days 00:00:09.662315405 +157 4 0 days 00:00:08.424926420 +157 5 0 days 00:00:09.349561350 +157 7 0 days 00:00:08.332325840 +157 8 0 days 00:00:08.611474526 +157 9 0 days 00:00:09.693139180 +157 10 0 days 00:00:25.822650290 +157 11 0 days 00:00:22.971375266 +157 13 0 days 00:00:27.319448660 +157 14 0 days 00:00:09.316152790 +157 15 0 days 00:00:25.796328920 +157 18 0 days 00:00:09.921441940 +157 19 0 days 00:00:09.657799495 +157 20 0 days 00:00:25.683725435 +157 21 0 days 00:00:26.080353985 +157 22 0 days 00:00:08.413495580 +157 25 0 days 00:00:26.001771515 +157 26 0 days 00:00:28.344018310 +157 27 0 days 00:00:23.246700746 +157 28 0 days 00:00:09.348727385 +157 29 0 days 00:00:27.483056484 +157 30 0 days 00:00:26.016619340 +157 31 0 days 00:00:25.813179755 +158 1 0 days 00:00:29.082974748 +158 2 0 days 00:00:14.825845808 +158 3 0 days 00:00:29.581346430 +158 6 0 days 00:00:19.539803768 +158 7 0 days 00:00:32.316959640 +159 1 0 days 00:00:10.560584193 +159 2 0 days 00:00:09.131309560 +159 3 0 days 00:00:11.904560960 +159 4 0 days 00:00:09.010894933 +159 5 0 days 00:00:19.104881496 +159 6 0 days 00:00:11.929153727 +159 7 0 days 00:00:18.507242192 +159 8 0 days 00:00:11.787988090 +159 9 0 days 00:00:11.908861510 +159 10 0 days 00:00:19.446136150 +159 11 0 days 00:00:08.737543088 +159 12 0 days 00:00:17.674847553 +159 13 0 days 00:00:11.908339676 +159 14 0 days 00:00:12.655666148 +159 15 0 days 00:00:17.002284384 +159 16 0 days 00:00:08.185241900 +159 17 0 days 00:00:09.132188050 +159 18 0 days 00:00:11.387053423 +159 19 0 days 00:00:09.011335893 +159 20 0 days 00:00:17.672788883 +159 21 0 days 00:00:18.884080711 +160 1 0 days 00:00:27.988075868 +160 2 0 days 00:00:14.980843913 +160 3 0 days 00:00:16.502949905 +160 4 0 days 00:00:18.553151871 +160 5 0 days 00:00:31.695265500 +160 6 0 days 00:00:30.031026952 +160 7 0 days 00:00:28.872539222 +160 8 0 days 00:00:25.168593273 +160 9 0 days 00:00:15.295297436 +160 10 0 days 00:00:19.967011610 +160 11 0 days 00:00:25.010979160 +160 12 0 days 00:00:25.102935533 +160 13 0 days 00:00:18.618892623 +161 1 0 days 00:00:07.858936176 +161 2 0 days 00:00:07.888103890 +161 3 0 days 00:00:14.100527980 +161 4 0 days 00:00:05.492746290 +161 5 0 days 00:00:10.085448598 +161 6 0 days 00:00:16.449927193 +161 7 0 days 00:00:14.664104476 +161 8 0 days 00:00:07.592963520 +161 9 0 days 00:00:14.105179380 +161 10 0 days 00:00:09.148560087 +161 11 0 days 00:00:05.023186376 +161 12 0 days 00:00:05.458239758 +161 13 0 days 00:00:05.159203360 +161 14 0 days 00:00:05.297495708 +161 15 0 days 00:00:16.966272798 +161 16 0 days 00:00:05.314008880 +161 17 0 days 00:00:12.577997485 +161 18 0 days 00:00:08.069330485 +161 19 0 days 00:00:14.524940910 +161 20 0 days 00:00:12.156543233 +161 21 0 days 00:00:12.550132740 +161 22 0 days 00:00:07.185113965 +161 23 0 days 00:00:07.223183025 +161 24 0 days 00:00:12.249232460 +161 25 0 days 00:00:04.717135556 +161 26 0 days 00:00:12.281306230 +161 27 0 days 00:00:07.562781632 +161 28 0 days 00:00:13.985394090 +162 1 0 days 00:00:24.795427905 +162 2 0 days 00:00:24.090044780 +162 3 0 days 00:00:18.251797845 +162 4 0 days 00:00:08.487446206 +162 5 0 days 00:00:31.500824780 +162 6 0 days 00:00:14.786085086 +162 7 0 days 00:00:22.502929520 +162 8 0 days 00:00:12.682416900 +162 9 0 days 00:00:08.449497553 +162 10 0 days 00:00:34.138124580 +162 11 0 days 00:00:25.401843155 +162 12 0 days 00:00:09.659533965 +162 13 0 days 00:00:27.065643106 +162 14 0 days 00:00:14.915988924 +162 15 0 days 00:00:08.142819226 +162 16 0 days 00:00:34.465239940 +162 17 0 days 00:00:18.890580506 +162 18 0 days 00:00:19.363245640 +162 19 0 days 00:00:11.897362510 +162 20 0 days 00:00:18.467882045 +162 21 0 days 00:00:28.829600473 +162 22 0 days 00:00:30.676255566 +162 23 0 days 00:00:15.722348200 +162 24 0 days 00:00:13.999448795 +162 25 0 days 00:00:10.437658306 +162 26 0 days 00:00:10.893555060 +162 27 0 days 00:00:12.166293500 +162 28 0 days 00:00:09.933511293 +162 29 0 days 00:00:12.563393125 +163 1 0 days 00:00:14.715505166 +163 2 0 days 00:00:16.371021333 +163 3 0 days 00:00:14.499077825 +163 4 0 days 00:00:08.375284055 +163 5 0 days 00:00:04.728796126 +163 6 0 days 00:00:16.495456266 +163 7 0 days 00:00:10.988789485 +163 8 0 days 00:00:18.720293620 +163 9 0 days 00:00:10.619164300 +163 10 0 days 00:00:06.817303893 +163 11 0 days 00:00:06.446270370 +163 12 0 days 00:00:09.127459646 +163 13 0 days 00:00:18.306435525 +163 14 0 days 00:00:12.735381545 +163 15 0 days 00:00:13.445171930 +163 16 0 days 00:00:13.081244370 +163 17 0 days 00:00:04.939629000 +163 18 0 days 00:00:17.943660590 +163 19 0 days 00:00:04.523049900 +163 20 0 days 00:00:12.373265920 +163 21 0 days 00:00:12.388634426 +163 22 0 days 00:00:11.254211253 +163 23 0 days 00:00:06.988170813 +163 24 0 days 00:00:13.266144513 +163 25 0 days 00:00:17.269893353 +163 26 0 days 00:00:14.154476235 +163 27 0 days 00:00:07.958070095 +163 28 0 days 00:00:11.280750340 +163 29 0 days 00:00:07.467276386 +163 30 0 days 00:00:18.522030790 +163 31 0 days 00:00:09.724464893 +163 32 0 days 00:00:06.407246255 +163 33 0 days 00:00:10.292020885 +163 34 0 days 00:00:12.039699360 +163 35 0 days 00:00:16.181056193 +163 36 0 days 00:00:10.027273785 +163 37 0 days 00:00:04.817596380 +163 38 0 days 00:00:04.439966646 +163 39 0 days 00:00:13.537985033 +163 40 0 days 00:00:07.958654390 +163 41 0 days 00:00:05.729210955 +163 42 0 days 00:00:17.688762510 +163 43 0 days 00:00:04.728672573 +163 44 0 days 00:00:10.761953815 +164 1 0 days 00:00:10.905715671 +164 2 0 days 00:00:12.121237772 +164 3 0 days 00:00:33.424839472 +164 4 0 days 00:00:31.199854295 +164 5 0 days 00:00:32.887437313 +164 6 0 days 00:00:31.859717357 +164 7 0 days 00:00:18.932944490 +164 8 0 days 00:00:12.040542500 +164 9 0 days 00:00:20.656853404 +164 10 0 days 00:00:22.521187580 +164 11 0 days 00:00:18.636040315 +165 1 0 days 00:00:17.240555406 +165 2 0 days 00:00:16.727775102 +165 3 0 days 00:00:09.999241800 +165 4 0 days 00:00:07.974479235 +165 5 0 days 00:00:11.725208600 +165 6 0 days 00:00:09.785763775 +165 7 0 days 00:00:08.734799306 +165 8 0 days 00:00:13.918949305 +165 9 0 days 00:00:12.512219380 +165 10 0 days 00:00:07.287820310 +165 11 0 days 00:00:08.558579444 +165 12 0 days 00:00:05.529728740 +165 13 0 days 00:00:12.466965366 +165 14 0 days 00:00:12.949620753 +165 15 0 days 00:00:05.507732637 +165 16 0 days 00:00:05.462819033 +165 17 0 days 00:00:05.710501410 +165 18 0 days 00:00:05.197165588 +165 19 0 days 00:00:12.968781700 +165 20 0 days 00:00:14.370379010 +165 21 0 days 00:00:17.430662065 +165 22 0 days 00:00:04.764614975 +165 23 0 days 00:00:06.351217534 +165 24 0 days 00:00:05.527220000 +166 1 0 days 00:00:25.021500415 +166 2 0 days 00:00:29.621003986 +166 3 0 days 00:00:20.457924794 +166 4 0 days 00:00:16.077999882 +166 5 0 days 00:00:20.128928782 +166 6 0 days 00:00:30.786990785 +166 7 0 days 00:00:20.069877526 +166 8 0 days 00:00:19.161900537 +166 9 0 days 00:00:16.888130955 +166 10 0 days 00:00:15.725648140 +166 11 0 days 00:00:19.256082660 +166 12 0 days 00:00:30.376362016 +167 1 0 days 00:00:12.068331276 +167 2 0 days 00:00:18.778229598 +167 3 0 days 00:00:08.387965050 +167 4 0 days 00:00:11.234940852 +167 5 0 days 00:00:18.614830248 +167 6 0 days 00:00:11.671732316 +167 7 0 days 00:00:09.097726334 +167 8 0 days 00:00:17.648547080 +167 9 0 days 00:00:09.594829013 +167 10 0 days 00:00:11.272545986 +167 11 0 days 00:00:10.583786020 +167 12 0 days 00:00:11.680815256 +167 13 0 days 00:00:19.578495478 +167 14 0 days 00:00:09.206336525 +167 15 0 days 00:00:18.280845052 +167 16 0 days 00:00:09.605023553 +167 17 0 days 00:00:07.975753850 +167 18 0 days 00:00:08.468464388 +167 19 0 days 00:00:19.646017975 +168 1 0 days 00:00:00.104800360 +168 2 0 days 00:00:00.084708860 +168 3 0 days 00:00:00.096782600 +168 4 0 days 00:00:00.130824715 +168 5 0 days 00:00:00.120169386 +168 6 0 days 00:00:00.096511113 +168 7 0 days 00:00:00.096580680 +168 8 0 days 00:00:00.084965046 +168 9 0 days 00:00:00.085326046 +168 10 0 days 00:00:00.117751906 +168 11 0 days 00:00:00.085472086 +168 12 0 days 00:00:00.085222826 +168 13 0 days 00:00:00.114214713 +168 14 0 days 00:00:00.096067953 +168 15 0 days 00:00:00.084370366 +168 16 0 days 00:00:00.097563128 +168 17 0 days 00:00:00.118607886 +168 18 0 days 00:00:00.117798233 +168 19 0 days 00:00:00.137657364 +168 20 0 days 00:00:00.106540565 +168 21 0 days 00:00:00.085381706 +168 22 0 days 00:00:00.083560026 +168 23 0 days 00:00:00.085766526 +168 24 0 days 00:00:00.085844773 +168 25 0 days 00:00:00.098859240 +168 26 0 days 00:00:00.119677820 +168 27 0 days 00:00:00.120744273 +168 28 0 days 00:00:00.119166497 +168 29 0 days 00:00:00.134232880 +168 30 0 days 00:00:00.121525033 +168 31 0 days 00:00:00.135491495 +168 32 0 days 00:00:00.119343813 +168 33 0 days 00:00:00.121427466 +168 34 0 days 00:00:00.085347033 +168 35 0 days 00:00:00.138333912 +168 36 0 days 00:00:00.085702306 +168 37 0 days 00:00:00.120256960 +168 38 0 days 00:00:00.096051533 +168 39 0 days 00:00:00.086832806 +168 40 0 days 00:00:00.085319753 +168 41 0 days 00:00:00.082458593 +168 42 0 days 00:00:00.132332100 +168 43 0 days 00:00:00.114698740 +168 44 0 days 00:00:00.086414606 +168 45 0 days 00:00:00.127258630 +168 46 0 days 00:00:00.097594786 +168 47 0 days 00:00:00.083818193 +168 48 0 days 00:00:00.096542753 +168 49 0 days 00:00:00.106453110 +168 50 0 days 00:00:00.084792193 +168 51 0 days 00:00:00.085264980 +168 52 0 days 00:00:00.085864773 +168 53 0 days 00:00:00.133608040 +168 54 0 days 00:00:00.123739060 +168 55 0 days 00:00:00.122171340 +168 56 0 days 00:00:00.084356660 +168 57 0 days 00:00:00.122068833 +168 58 0 days 00:00:00.098066133 +168 59 0 days 00:00:00.085871246 +168 60 0 days 00:00:00.085805846 +168 61 0 days 00:00:00.086873060 +168 62 0 days 00:00:00.110549030 +168 63 0 days 00:00:00.133219000 +168 64 0 days 00:00:00.119766846 +168 65 0 days 00:00:00.086982746 +168 66 0 days 00:00:00.086022253 +168 67 0 days 00:00:00.098166980 +168 68 0 days 00:00:00.115879973 +168 69 0 days 00:00:00.085391886 +168 70 0 days 00:00:00.098087013 +168 71 0 days 00:00:00.107980005 +168 72 0 days 00:00:00.085330060 +168 73 0 days 00:00:00.086446386 +168 74 0 days 00:00:00.086456213 +168 75 0 days 00:00:00.096884880 +168 76 0 days 00:00:00.115685300 +168 77 0 days 00:00:00.085166493 +168 78 0 days 00:00:00.127631425 +168 79 0 days 00:00:00.131292100 +168 80 0 days 00:00:00.096515253 +168 81 0 days 00:00:00.084435526 +168 82 0 days 00:00:00.119397080 +168 83 0 days 00:00:00.148738028 +168 84 0 days 00:00:00.085532186 +168 85 0 days 00:00:00.096501380 +168 86 0 days 00:00:00.095577886 +168 87 0 days 00:00:00.120384093 +168 88 0 days 00:00:00.097178340 +168 89 0 days 00:00:00.097585780 +168 90 0 days 00:00:00.119833753 +168 91 0 days 00:00:00.094035926 +168 92 0 days 00:00:00.097114106 +168 93 0 days 00:00:00.084304073 +168 94 0 days 00:00:00.085190133 +168 95 0 days 00:00:00.085398120 +168 96 0 days 00:00:00.119450326 +168 97 0 days 00:00:00.120127393 +168 98 0 days 00:00:00.131828440 +168 99 0 days 00:00:00.122333366 +168 100 0 days 00:00:00.120099586 +169 1 0 days 00:00:00.135181130 +169 2 0 days 00:00:00.089794066 +169 3 0 days 00:00:00.100119286 +169 4 0 days 00:00:00.138394865 +169 5 0 days 00:00:00.085396440 +169 6 0 days 00:00:00.100209380 +169 7 0 days 00:00:00.135188880 +169 8 0 days 00:00:00.124364366 +169 9 0 days 00:00:00.087328953 +169 10 0 days 00:00:00.124812100 +169 11 0 days 00:00:00.135178555 +169 12 0 days 00:00:00.125912033 +169 13 0 days 00:00:00.087268180 +169 14 0 days 00:00:00.100284393 +169 15 0 days 00:00:00.115676480 +169 16 0 days 00:00:00.108708170 +169 17 0 days 00:00:00.153811497 +169 18 0 days 00:00:00.098897920 +169 19 0 days 00:00:00.086278220 +169 20 0 days 00:00:00.108593220 +169 21 0 days 00:00:00.124783586 +169 22 0 days 00:00:00.122112813 +169 23 0 days 00:00:00.086539080 +169 24 0 days 00:00:00.116831348 +169 25 0 days 00:00:00.101415166 +169 26 0 days 00:00:00.100889493 +169 27 0 days 00:00:00.143364200 +169 28 0 days 00:00:00.126494406 +169 29 0 days 00:00:00.087363300 +169 30 0 days 00:00:00.137949435 +169 31 0 days 00:00:00.125910680 +169 32 0 days 00:00:00.110603525 +169 33 0 days 00:00:00.087030006 +169 34 0 days 00:00:00.126476920 +169 35 0 days 00:00:00.146761020 +169 36 0 days 00:00:00.145026100 +169 37 0 days 00:00:00.137008085 +169 38 0 days 00:00:00.121750493 +169 39 0 days 00:00:00.123253320 +169 40 0 days 00:00:00.132774385 +169 41 0 days 00:00:00.099218860 +169 42 0 days 00:00:00.111041875 +169 43 0 days 00:00:00.099955193 +169 44 0 days 00:00:00.144053156 +169 45 0 days 00:00:00.094300966 +169 46 0 days 00:00:00.114739008 +169 47 0 days 00:00:00.091862713 +169 48 0 days 00:00:00.086099840 +169 49 0 days 00:00:00.109240120 +169 50 0 days 00:00:00.100263140 +169 51 0 days 00:00:00.086845600 +169 52 0 days 00:00:00.100469500 +169 53 0 days 00:00:00.152907896 +169 54 0 days 00:00:00.087523940 +169 55 0 days 00:00:00.087169040 +169 56 0 days 00:00:00.086953946 +169 57 0 days 00:00:00.127903766 +169 58 0 days 00:00:00.086789566 +169 59 0 days 00:00:00.086762926 +169 60 0 days 00:00:00.137070725 +169 61 0 days 00:00:00.100084706 +169 62 0 days 00:00:00.099073504 +169 63 0 days 00:00:00.087603000 +169 64 0 days 00:00:00.140893955 +169 65 0 days 00:00:00.124434093 +169 66 0 days 00:00:00.087918960 +169 67 0 days 00:00:00.126388313 +169 68 0 days 00:00:00.101153973 +169 69 0 days 00:00:00.124648993 +169 70 0 days 00:00:00.086595026 +169 71 0 days 00:00:00.125697460 +169 72 0 days 00:00:00.101203093 +169 73 0 days 00:00:00.134717060 +169 74 0 days 00:00:00.101324980 +169 75 0 days 00:00:00.086618506 +169 76 0 days 00:00:00.122555740 +169 77 0 days 00:00:00.123319153 +169 78 0 days 00:00:00.101286493 +169 79 0 days 00:00:00.100491780 +169 80 0 days 00:00:00.114592808 +169 81 0 days 00:00:00.086778613 +169 82 0 days 00:00:00.100315266 +169 83 0 days 00:00:00.098557146 +169 84 0 days 00:00:00.087546913 +169 85 0 days 00:00:00.100494600 +169 86 0 days 00:00:00.148526150 +169 87 0 days 00:00:00.086603246 +169 88 0 days 00:00:00.138267355 +169 89 0 days 00:00:00.090531160 +169 90 0 days 00:00:00.087466866 +169 91 0 days 00:00:00.110472465 +169 92 0 days 00:00:00.099629893 +169 93 0 days 00:00:00.125045580 +169 94 0 days 00:00:00.125107166 +169 95 0 days 00:00:00.087892526 +169 96 0 days 00:00:00.086580060 +169 97 0 days 00:00:00.099420373 +169 98 0 days 00:00:00.099701933 +169 99 0 days 00:00:00.099486760 +169 100 0 days 00:00:00.108947905 +170 1 0 days 00:00:00.050389000 +170 2 0 days 00:00:00.085801280 +170 3 0 days 00:00:00.072001660 +170 4 0 days 00:00:00.070643240 +170 5 0 days 00:00:00.049871926 +170 6 0 days 00:00:00.049689640 +170 7 0 days 00:00:00.050060626 +170 8 0 days 00:00:00.050091126 +170 9 0 days 00:00:00.049671173 +170 10 0 days 00:00:00.052963370 +170 11 0 days 00:00:00.080087052 +170 12 0 days 00:00:00.056802280 +170 13 0 days 00:00:00.049485860 +170 14 0 days 00:00:00.070160420 +170 15 0 days 00:00:00.050668820 +170 16 0 days 00:00:00.070145086 +170 17 0 days 00:00:00.080712176 +170 18 0 days 00:00:00.050791593 +170 19 0 days 00:00:00.063032500 +170 20 0 days 00:00:00.049730293 +170 21 0 days 00:00:00.053231670 +170 22 0 days 00:00:00.056517646 +170 23 0 days 00:00:00.055736026 +170 24 0 days 00:00:00.049128020 +170 25 0 days 00:00:00.049255940 +170 26 0 days 00:00:00.070072853 +170 27 0 days 00:00:00.075998095 +170 28 0 days 00:00:00.055715366 +170 29 0 days 00:00:00.071015646 +170 30 0 days 00:00:00.060471555 +170 31 0 days 00:00:00.049518346 +170 32 0 days 00:00:00.075454720 +170 33 0 days 00:00:00.082770120 +170 34 0 days 00:00:00.055049746 +170 35 0 days 00:00:00.058813540 +170 36 0 days 00:00:00.056338373 +170 37 0 days 00:00:00.049628460 +170 38 0 days 00:00:00.048199453 +170 39 0 days 00:00:00.049499526 +170 40 0 days 00:00:00.082771588 +170 41 0 days 00:00:00.050287146 +170 42 0 days 00:00:00.077905436 +170 43 0 days 00:00:00.068343340 +170 44 0 days 00:00:00.074482760 +170 45 0 days 00:00:00.068528440 +170 46 0 days 00:00:00.068889593 +170 47 0 days 00:00:00.049829273 +170 48 0 days 00:00:00.068485660 +170 49 0 days 00:00:00.068612253 +170 50 0 days 00:00:00.067659760 +170 51 0 days 00:00:00.050836053 +170 52 0 days 00:00:00.056854186 +170 53 0 days 00:00:00.069921773 +170 54 0 days 00:00:00.049030746 +170 55 0 days 00:00:00.060205630 +170 56 0 days 00:00:00.056228686 +170 57 0 days 00:00:00.060101660 +170 58 0 days 00:00:00.080131740 +170 59 0 days 00:00:00.055634526 +170 60 0 days 00:00:00.066946280 +170 61 0 days 00:00:00.049058100 +170 62 0 days 00:00:00.076882845 +170 63 0 days 00:00:00.049801933 +170 64 0 days 00:00:00.068167253 +170 65 0 days 00:00:00.056539346 +170 66 0 days 00:00:00.049993286 +170 67 0 days 00:00:00.059329170 +170 68 0 days 00:00:00.067339573 +170 69 0 days 00:00:00.068883000 +170 70 0 days 00:00:00.069526626 +170 71 0 days 00:00:00.055450280 +170 72 0 days 00:00:00.055782853 +170 73 0 days 00:00:00.069992600 +170 74 0 days 00:00:00.056082600 +170 75 0 days 00:00:00.049801566 +170 76 0 days 00:00:00.055891333 +170 77 0 days 00:00:00.068440766 +170 78 0 days 00:00:00.054729220 +170 79 0 days 00:00:00.049772046 +170 80 0 days 00:00:00.066246757 +170 81 0 days 00:00:00.080487288 +170 82 0 days 00:00:00.049976240 +170 83 0 days 00:00:00.079947312 +170 84 0 days 00:00:00.050808060 +170 85 0 days 00:00:00.049606300 +170 86 0 days 00:00:00.056947066 +170 87 0 days 00:00:00.057402500 +170 88 0 days 00:00:00.056506513 +170 89 0 days 00:00:00.071510506 +170 90 0 days 00:00:00.053638780 +170 91 0 days 00:00:00.056609333 +170 92 0 days 00:00:00.079893324 +170 93 0 days 00:00:00.056362026 +170 94 0 days 00:00:00.050546286 +170 95 0 days 00:00:00.056938706 +170 96 0 days 00:00:00.050532100 +170 97 0 days 00:00:00.069997613 +170 98 0 days 00:00:00.073675640 +170 99 0 days 00:00:00.049664180 +170 100 0 days 00:00:00.050267860 +171 1 0 days 00:00:00.057481840 +171 2 0 days 00:00:00.057761393 +171 3 0 days 00:00:00.058306046 +171 4 0 days 00:00:00.057365313 +171 5 0 days 00:00:00.056889113 +171 6 0 days 00:00:00.050461746 +171 7 0 days 00:00:00.071841373 +171 8 0 days 00:00:00.064426912 +171 9 0 days 00:00:00.050473940 +171 10 0 days 00:00:00.050567426 +171 11 0 days 00:00:00.057131153 +171 12 0 days 00:00:00.057420546 +171 13 0 days 00:00:00.071758433 +171 14 0 days 00:00:00.050305300 +171 15 0 days 00:00:00.080706072 +171 16 0 days 00:00:00.078944135 +171 17 0 days 00:00:00.050258373 +171 18 0 days 00:00:00.062028770 +171 19 0 days 00:00:00.050556806 +171 20 0 days 00:00:00.057224680 +171 21 0 days 00:00:00.052176933 +171 22 0 days 00:00:00.050586666 +171 23 0 days 00:00:00.050437013 +171 24 0 days 00:00:00.049986660 +171 25 0 days 00:00:00.071823906 +171 26 0 days 00:00:00.057730293 +171 27 0 days 00:00:00.051443793 +171 28 0 days 00:00:00.071260500 +171 29 0 days 00:00:00.057686526 +171 30 0 days 00:00:00.077673945 +171 31 0 days 00:00:00.072547020 +171 32 0 days 00:00:00.071861933 +171 33 0 days 00:00:00.062877960 +171 34 0 days 00:00:00.051106073 +171 35 0 days 00:00:00.057699033 +171 36 0 days 00:00:00.051294740 +171 37 0 days 00:00:00.051199233 +171 38 0 days 00:00:00.051861606 +171 39 0 days 00:00:00.057741046 +171 40 0 days 00:00:00.057826093 +171 41 0 days 00:00:00.057794600 +171 42 0 days 00:00:00.050431020 +171 43 0 days 00:00:00.051296713 +171 44 0 days 00:00:00.071022486 +171 45 0 days 00:00:00.051226040 +171 46 0 days 00:00:00.051516586 +171 47 0 days 00:00:00.051186473 +171 48 0 days 00:00:00.051285973 +171 49 0 days 00:00:00.070336706 +171 50 0 days 00:00:00.051638693 +171 51 0 days 00:00:00.083165813 +171 52 0 days 00:00:00.050790760 +171 53 0 days 00:00:00.058451180 +171 54 0 days 00:00:00.057765673 +171 55 0 days 00:00:00.050681606 +171 56 0 days 00:00:00.057549260 +171 57 0 days 00:00:00.051670886 +171 58 0 days 00:00:00.050488713 +171 59 0 days 00:00:00.057014413 +171 60 0 days 00:00:00.050632973 +171 61 0 days 00:00:00.051000726 +171 62 0 days 00:00:00.082903284 +171 63 0 days 00:00:00.050859506 +171 64 0 days 00:00:00.058098780 +171 65 0 days 00:00:00.050775693 +171 66 0 days 00:00:00.057247840 +171 67 0 days 00:00:00.072090660 +171 68 0 days 00:00:00.072837666 +171 69 0 days 00:00:00.049809960 +171 70 0 days 00:00:00.056840886 +171 71 0 days 00:00:00.057139946 +171 72 0 days 00:00:00.057871320 +171 73 0 days 00:00:00.057792180 +171 74 0 days 00:00:00.051052066 +171 75 0 days 00:00:00.051431220 +171 76 0 days 00:00:00.057870426 +171 77 0 days 00:00:00.049882473 +171 78 0 days 00:00:00.051467226 +171 79 0 days 00:00:00.051771766 +171 80 0 days 00:00:00.051286353 +171 81 0 days 00:00:00.056845896 +171 82 0 days 00:00:00.056914066 +171 83 0 days 00:00:00.065034780 +171 84 0 days 00:00:00.071017286 +171 85 0 days 00:00:00.057385840 +171 86 0 days 00:00:00.058322653 +171 87 0 days 00:00:00.056891826 +171 88 0 days 00:00:00.057133860 +171 89 0 days 00:00:00.050998500 +171 90 0 days 00:00:00.066546966 +171 91 0 days 00:00:00.051346193 +171 92 0 days 00:00:00.051240873 +171 93 0 days 00:00:00.057574626 +171 94 0 days 00:00:00.058341120 +171 95 0 days 00:00:00.063838780 +171 96 0 days 00:00:00.072760626 +171 97 0 days 00:00:00.069903240 +171 98 0 days 00:00:00.061723910 +171 99 0 days 00:00:00.077547090 +171 100 0 days 00:00:00.085462246 +172 3 0 days 00:00:00.110440756 +172 4 0 days 00:00:00.169719435 +172 5 0 days 00:00:00.109632740 +172 6 0 days 00:00:00.112110762 +172 8 0 days 00:00:00.111370007 +172 9 0 days 00:00:00.116527463 +172 11 0 days 00:00:00.114665833 +172 13 0 days 00:00:00.126151610 +172 14 0 days 00:00:00.132067601 +172 16 0 days 00:00:00.104868847 +172 17 0 days 00:00:00.120336267 +172 18 0 days 00:00:00.166723446 +172 20 0 days 00:00:00.156870108 +172 21 0 days 00:00:00.111456836 +172 22 0 days 00:00:00.110479480 +172 23 0 days 00:00:00.084096573 +172 24 0 days 00:00:00.165699025 +172 25 0 days 00:00:00.168906153 +172 26 0 days 00:00:00.161744572 +172 27 0 days 00:00:00.110125741 +172 28 0 days 00:00:00.121033980 +172 29 0 days 00:00:00.109542548 +172 30 0 days 00:00:00.110921326 +172 31 0 days 00:00:00.185168361 +172 33 0 days 00:00:00.161040683 +172 34 0 days 00:00:00.132163468 +172 39 0 days 00:00:00.108132960 +172 40 0 days 00:00:00.085744300 +172 41 0 days 00:00:00.117607952 +172 42 0 days 00:00:00.150940297 +172 45 0 days 00:00:00.114494164 +172 46 0 days 00:00:00.107375038 +172 47 0 days 00:00:00.165632700 +172 48 0 days 00:00:00.172747480 +172 49 0 days 00:00:00.104992603 +172 50 0 days 00:00:00.122737732 +172 51 0 days 00:00:00.108422196 +172 52 0 days 00:00:00.160728485 +172 54 0 days 00:00:00.130383741 +172 58 0 days 00:00:00.156605005 +172 59 0 days 00:00:00.151894575 +172 60 0 days 00:00:00.113747946 +172 63 0 days 00:00:00.147348823 +172 65 0 days 00:00:00.116359453 +172 67 0 days 00:00:00.116272975 +172 68 0 days 00:00:00.162873504 +172 69 0 days 00:00:00.120477038 +172 70 0 days 00:00:00.127231501 +172 71 0 days 00:00:00.085820806 +172 72 0 days 00:00:00.139494935 +172 73 0 days 00:00:00.169586705 +172 74 0 days 00:00:00.125426506 +172 75 0 days 00:00:00.127301696 +172 76 0 days 00:00:00.130858046 +172 78 0 days 00:00:00.114881843 +172 80 0 days 00:00:00.109397345 +172 81 0 days 00:00:00.156217090 +172 82 0 days 00:00:00.162809142 +172 83 0 days 00:00:00.115466136 +172 85 0 days 00:00:00.132385421 +172 86 0 days 00:00:00.128682047 +172 88 0 days 00:00:00.124559307 +172 90 0 days 00:00:00.173906253 +172 91 0 days 00:00:00.151268262 +172 96 0 days 00:00:00.121297494 +172 97 0 days 00:00:00.127878444 +172 98 0 days 00:00:00.112426083 +172 99 0 days 00:00:00.119290497 +173 2 0 days 00:00:00.087730896 +173 3 0 days 00:00:00.094019464 +173 4 0 days 00:00:00.083857420 +173 5 0 days 00:00:00.092301369 +173 6 0 days 00:00:00.087782336 +173 7 0 days 00:00:00.074744710 +173 8 0 days 00:00:00.085590708 +173 10 0 days 00:00:00.086228326 +173 11 0 days 00:00:00.065746154 +173 12 0 days 00:00:00.085723336 +173 13 0 days 00:00:00.067607812 +173 15 0 days 00:00:00.087431091 +173 17 0 days 00:00:00.086604210 +173 18 0 days 00:00:00.062834570 +173 19 0 days 00:00:00.058736750 +173 20 0 days 00:00:00.065260215 +173 21 0 days 00:00:00.057814408 +173 22 0 days 00:00:00.069535640 +173 24 0 days 00:00:00.058900720 +173 25 0 days 00:00:00.060021834 +173 26 0 days 00:00:00.087513617 +173 27 0 days 00:00:00.066472602 +173 28 0 days 00:00:00.059491890 +173 29 0 days 00:00:00.049681786 +173 30 0 days 00:00:00.068461494 +173 31 0 days 00:00:00.085635528 +173 33 0 days 00:00:00.074110317 +173 34 0 days 00:00:00.089537765 +173 35 0 days 00:00:00.066696360 +173 36 0 days 00:00:00.077997082 +173 38 0 days 00:00:00.063151920 +173 40 0 days 00:00:00.070485077 +173 42 0 days 00:00:00.082406236 +173 44 0 days 00:00:00.057071404 +173 45 0 days 00:00:00.068990944 +173 46 0 days 00:00:00.086947000 +173 47 0 days 00:00:00.087170765 +173 49 0 days 00:00:00.061300458 +173 51 0 days 00:00:00.068956016 +173 52 0 days 00:00:00.071022298 +173 53 0 days 00:00:00.081323983 +173 54 0 days 00:00:00.063009344 +173 55 0 days 00:00:00.062650572 +173 59 0 days 00:00:00.061259658 +173 61 0 days 00:00:00.082110982 +173 62 0 days 00:00:00.060803003 +173 63 0 days 00:00:00.071691241 +173 65 0 days 00:00:00.098600712 +173 66 0 days 00:00:00.071810103 +173 67 0 days 00:00:00.073850220 +173 68 0 days 00:00:00.062480130 +173 69 0 days 00:00:00.075164267 +173 70 0 days 00:00:00.061544910 +173 71 0 days 00:00:00.094564383 +173 72 0 days 00:00:00.069187093 +173 74 0 days 00:00:00.062743932 +173 75 0 days 00:00:00.070332708 +173 76 0 days 00:00:00.086136710 +173 77 0 days 00:00:00.085703766 +173 78 0 days 00:00:00.098235161 +173 79 0 days 00:00:00.070631668 +173 81 0 days 00:00:00.061402912 +173 83 0 days 00:00:00.061645158 +173 84 0 days 00:00:00.064480980 +173 86 0 days 00:00:00.062158452 +173 87 0 days 00:00:00.088261281 +173 88 0 days 00:00:00.082962785 +173 89 0 days 00:00:00.072911880 +173 91 0 days 00:00:00.067237186 +173 93 0 days 00:00:00.085890429 +173 94 0 days 00:00:00.083450522 +173 95 0 days 00:00:00.069375608 +173 96 0 days 00:00:00.085891672 +173 97 0 days 00:00:00.049974186 +173 98 0 days 00:00:00.062355601 +173 99 0 days 00:00:00.058683260 +173 100 0 days 00:00:00.061378889 +174 1 0 days 00:00:00.602710550 +174 2 0 days 00:00:00.331211973 +174 3 0 days 00:00:00.255391593 +174 4 0 days 00:00:00.225607753 +174 5 0 days 00:00:00.520457293 +174 6 0 days 00:00:00.609673766 +174 7 0 days 00:00:00.209189846 +174 8 0 days 00:00:00.614654100 +174 9 0 days 00:00:00.379521080 +174 10 0 days 00:00:00.209366240 +174 11 0 days 00:00:00.332947053 +174 12 0 days 00:00:00.212660960 +174 13 0 days 00:00:00.681734710 +174 14 0 days 00:00:00.600718733 +174 15 0 days 00:00:00.608490566 +174 16 0 days 00:00:00.220931053 +174 17 0 days 00:00:00.590539626 +174 18 0 days 00:00:00.585126373 +174 19 0 days 00:00:00.214967573 +174 20 0 days 00:00:00.382441815 +174 21 0 days 00:00:00.277320462 +174 22 0 days 00:00:00.195705400 +174 23 0 days 00:00:00.495449996 +174 24 0 days 00:00:00.389886728 +174 25 0 days 00:00:00.278691337 +174 26 0 days 00:00:00.246175596 +174 27 0 days 00:00:00.399152432 +174 28 0 days 00:00:00.359134605 +174 29 0 days 00:00:00.195245420 +174 30 0 days 00:00:00.611922380 +174 31 0 days 00:00:00.205107526 +174 32 0 days 00:00:00.256426580 +174 33 0 days 00:00:00.683147400 +174 34 0 days 00:00:00.581561080 +174 35 0 days 00:00:00.588546880 +174 36 0 days 00:00:00.738997512 +174 37 0 days 00:00:00.204452386 +174 38 0 days 00:00:00.244970550 +174 39 0 days 00:00:00.392685856 +174 40 0 days 00:00:00.203970646 +174 41 0 days 00:00:00.363777730 +174 42 0 days 00:00:00.198000926 +174 43 0 days 00:00:00.232352192 +174 44 0 days 00:00:00.304039722 +174 45 0 days 00:00:00.425565156 +174 46 0 days 00:00:00.682736840 +174 47 0 days 00:00:00.567396840 +174 48 0 days 00:00:00.382232872 +174 49 0 days 00:00:00.214695746 +174 50 0 days 00:00:00.386947270 +174 51 0 days 00:00:00.423948490 +174 52 0 days 00:00:00.581798546 +174 53 0 days 00:00:00.375951440 +174 54 0 days 00:00:00.221137826 +174 55 0 days 00:00:00.220353146 +174 56 0 days 00:00:00.195324706 +174 57 0 days 00:00:00.210225513 +174 58 0 days 00:00:00.599878400 +174 59 0 days 00:00:00.520418734 +174 60 0 days 00:00:00.372595670 +174 61 0 days 00:00:00.580059953 +174 62 0 days 00:00:00.302744224 +174 63 0 days 00:00:00.617706993 +174 64 0 days 00:00:00.594660073 +174 65 0 days 00:00:00.660919010 +174 66 0 days 00:00:00.354353125 +174 67 0 days 00:00:00.596097833 +174 68 0 days 00:00:00.369094905 +174 69 0 days 00:00:00.363174885 +174 70 0 days 00:00:00.235225253 +174 71 0 days 00:00:00.190735073 +174 72 0 days 00:00:00.200213433 +174 73 0 days 00:00:00.357173665 +174 74 0 days 00:00:00.342777580 +174 75 0 days 00:00:00.355117500 +174 76 0 days 00:00:00.400092320 +174 77 0 days 00:00:00.683081255 +174 78 0 days 00:00:00.241236460 +174 79 0 days 00:00:00.595808500 +174 80 0 days 00:00:00.268457353 +174 81 0 days 00:00:00.393541192 +174 82 0 days 00:00:00.457633176 +174 83 0 days 00:00:00.219029453 +174 84 0 days 00:00:00.363916690 +174 85 0 days 00:00:00.198668013 +174 86 0 days 00:00:00.671985795 +174 87 0 days 00:00:00.622429466 +174 88 0 days 00:00:00.385251020 +174 89 0 days 00:00:00.429984045 +174 90 0 days 00:00:00.616365873 +174 91 0 days 00:00:00.245292312 +174 92 0 days 00:00:00.412324395 +174 93 0 days 00:00:00.201218860 +174 94 0 days 00:00:00.589399880 +174 95 0 days 00:00:00.621090960 +174 96 0 days 00:00:00.221810773 +174 97 0 days 00:00:00.266815550 +174 98 0 days 00:00:00.243696792 +174 99 0 days 00:00:00.234988365 +174 100 0 days 00:00:00.625357833 +175 1 0 days 00:00:00.237198105 +175 2 0 days 00:00:00.260483626 +175 3 0 days 00:00:00.384972320 +175 4 0 days 00:00:00.201425100 +175 5 0 days 00:00:00.620520220 +175 6 0 days 00:00:00.409292988 +175 7 0 days 00:00:00.217353826 +175 8 0 days 00:00:00.226928733 +175 9 0 days 00:00:00.384653890 +175 10 0 days 00:00:00.618260980 +175 11 0 days 00:00:00.628578220 +175 12 0 days 00:00:00.702594950 +175 13 0 days 00:00:00.242698586 +175 14 0 days 00:00:00.638827113 +175 15 0 days 00:00:00.210084686 +175 16 0 days 00:00:00.223186913 +175 17 0 days 00:00:00.611476226 +175 18 0 days 00:00:00.202705766 +175 19 0 days 00:00:00.416646425 +175 20 0 days 00:00:00.411999760 +175 21 0 days 00:00:00.424963152 +175 22 0 days 00:00:00.211373246 +175 23 0 days 00:00:00.232547693 +175 24 0 days 00:00:00.265212983 +175 25 0 days 00:00:00.223859166 +175 26 0 days 00:00:00.649569920 +175 27 0 days 00:00:00.409606825 +175 28 0 days 00:00:00.726311415 +175 29 0 days 00:00:00.542778180 +175 30 0 days 00:00:00.394041765 +175 31 0 days 00:00:00.638516040 +175 32 0 days 00:00:00.219676873 +175 33 0 days 00:00:00.253165708 +175 34 0 days 00:00:00.390629240 +175 35 0 days 00:00:00.628670626 +175 36 0 days 00:00:00.258255460 +175 37 0 days 00:00:00.211710953 +175 38 0 days 00:00:00.217880180 +175 39 0 days 00:00:00.633621180 +175 40 0 days 00:00:00.283167350 +175 41 0 days 00:00:00.262441440 +175 42 0 days 00:00:00.432482660 +175 43 0 days 00:00:00.343799266 +175 44 0 days 00:00:00.246623148 +175 45 0 days 00:00:00.251716080 +175 46 0 days 00:00:00.225286513 +175 47 0 days 00:00:00.395932220 +175 48 0 days 00:00:00.718326140 +175 49 0 days 00:00:00.690001895 +175 50 0 days 00:00:00.203540573 +175 51 0 days 00:00:00.372659930 +175 52 0 days 00:00:00.597440826 +175 53 0 days 00:00:00.604132006 +175 54 0 days 00:00:00.343762973 +175 55 0 days 00:00:00.624099800 +175 56 0 days 00:00:00.636634126 +175 57 0 days 00:00:00.378110920 +175 58 0 days 00:00:00.622350953 +175 59 0 days 00:00:00.379563575 +175 60 0 days 00:00:00.267804616 +175 61 0 days 00:00:00.343050253 +175 62 0 days 00:00:00.204138660 +175 63 0 days 00:00:00.403860628 +175 64 0 days 00:00:00.645105933 +175 65 0 days 00:00:00.459359260 +175 66 0 days 00:00:00.205705753 +175 67 0 days 00:00:00.212618753 +175 68 0 days 00:00:00.606384860 +175 69 0 days 00:00:00.340272900 +175 70 0 days 00:00:00.365726380 +175 71 0 days 00:00:00.425642420 +175 72 0 days 00:00:00.619450053 +175 73 0 days 00:00:00.697700350 +175 74 0 days 00:00:00.604570653 +175 75 0 days 00:00:00.378154315 +175 76 0 days 00:00:00.626553413 +175 77 0 days 00:00:00.388200960 +175 78 0 days 00:00:00.252010120 +175 79 0 days 00:00:00.725546095 +175 80 0 days 00:00:00.725274535 +175 81 0 days 00:00:00.618283386 +175 82 0 days 00:00:00.229230540 +175 83 0 days 00:00:00.373085610 +175 84 0 days 00:00:00.623142473 +175 85 0 days 00:00:00.391591140 +175 86 0 days 00:00:00.616662686 +175 87 0 days 00:00:00.410284680 +175 88 0 days 00:00:00.608854380 +175 89 0 days 00:00:00.611575780 +175 90 0 days 00:00:00.244020732 +175 91 0 days 00:00:00.399339395 +175 92 0 days 00:00:00.395671040 +175 93 0 days 00:00:00.457093256 +175 94 0 days 00:00:00.209711106 +175 95 0 days 00:00:00.685504620 +175 96 0 days 00:00:00.592284933 +175 97 0 days 00:00:00.220879426 +175 98 0 days 00:00:00.249188986 +175 99 0 days 00:00:00.333221526 +175 100 0 days 00:00:00.253745430 +176 1 0 days 00:00:00.128216940 +176 2 0 days 00:00:00.318401473 +176 3 0 days 00:00:00.126718993 +176 4 0 days 00:00:00.148591912 +176 5 0 days 00:00:00.363819900 +176 6 0 days 00:00:00.110479886 +176 7 0 days 00:00:00.178650700 +176 8 0 days 00:00:00.202996780 +176 9 0 days 00:00:00.315863526 +176 10 0 days 00:00:00.314518586 +176 11 0 days 00:00:00.175561220 +176 12 0 days 00:00:00.126431615 +176 13 0 days 00:00:00.164019006 +176 14 0 days 00:00:00.179988126 +176 15 0 days 00:00:00.106103826 +176 16 0 days 00:00:00.133286466 +176 17 0 days 00:00:00.308305740 +176 18 0 days 00:00:00.177197593 +176 19 0 days 00:00:00.110263306 +176 20 0 days 00:00:00.308621026 +176 21 0 days 00:00:00.188372860 +176 22 0 days 00:00:00.227825543 +176 23 0 days 00:00:00.120517010 +176 24 0 days 00:00:00.323971866 +176 25 0 days 00:00:00.180328853 +176 26 0 days 00:00:00.194529740 +176 27 0 days 00:00:00.259636805 +176 28 0 days 00:00:00.128169436 +176 29 0 days 00:00:00.117082485 +176 30 0 days 00:00:00.126114668 +176 31 0 days 00:00:00.175566240 +176 32 0 days 00:00:00.134709665 +176 33 0 days 00:00:00.206421940 +176 34 0 days 00:00:00.197076208 +176 35 0 days 00:00:00.198113108 +176 36 0 days 00:00:00.199433256 +176 37 0 days 00:00:00.300941580 +176 38 0 days 00:00:00.371082260 +176 39 0 days 00:00:00.305842486 +176 40 0 days 00:00:00.167209453 +176 41 0 days 00:00:00.128385866 +176 42 0 days 00:00:00.177271855 +176 43 0 days 00:00:00.183408760 +176 44 0 days 00:00:00.201277560 +176 45 0 days 00:00:00.095977904 +176 46 0 days 00:00:00.108089366 +176 47 0 days 00:00:00.106898100 +176 48 0 days 00:00:00.104568386 +176 49 0 days 00:00:00.190073093 +176 50 0 days 00:00:00.189990300 +176 51 0 days 00:00:00.116291020 +176 52 0 days 00:00:00.167021553 +176 53 0 days 00:00:00.337947965 +176 54 0 days 00:00:00.326234825 +176 55 0 days 00:00:00.188938400 +176 56 0 days 00:00:00.361083728 +176 57 0 days 00:00:00.363931196 +176 58 0 days 00:00:00.333275795 +176 59 0 days 00:00:00.280445367 +176 60 0 days 00:00:00.175706440 +176 61 0 days 00:00:00.113586365 +176 62 0 days 00:00:00.281987006 +176 63 0 days 00:00:00.305258913 +176 64 0 days 00:00:00.108837900 +176 65 0 days 00:00:00.179252993 +176 66 0 days 00:00:00.119130313 +176 67 0 days 00:00:00.125214565 +176 68 0 days 00:00:00.166600173 +176 69 0 days 00:00:00.196948052 +176 70 0 days 00:00:00.117494266 +176 71 0 days 00:00:00.228156223 +176 72 0 days 00:00:00.130630624 +176 73 0 days 00:00:00.193817024 +176 74 0 days 00:00:00.330544375 +176 75 0 days 00:00:00.185188930 +176 76 0 days 00:00:00.109111386 +176 77 0 days 00:00:00.124943865 +176 78 0 days 00:00:00.179722460 +176 79 0 days 00:00:00.188395486 +176 80 0 days 00:00:00.160220626 +176 81 0 days 00:00:00.169318186 +176 82 0 days 00:00:00.106850973 +176 83 0 days 00:00:00.102918173 +176 84 0 days 00:00:00.180126106 +176 85 0 days 00:00:00.103133140 +176 86 0 days 00:00:00.345116365 +176 87 0 days 00:00:00.291706546 +176 88 0 days 00:00:00.328054890 +176 89 0 days 00:00:00.181903960 +176 90 0 days 00:00:00.211233993 +176 91 0 days 00:00:00.100035220 +176 92 0 days 00:00:00.232797720 +176 93 0 days 00:00:00.188099875 +176 94 0 days 00:00:00.292196440 +176 95 0 days 00:00:00.303459986 +176 96 0 days 00:00:00.187009100 +176 97 0 days 00:00:00.106848686 +176 98 0 days 00:00:00.170539793 +176 99 0 days 00:00:00.377338744 +176 100 0 days 00:00:00.116161455 +177 1 0 days 00:00:00.122859526 +177 2 0 days 00:00:00.114174133 +177 3 0 days 00:00:00.424126393 +177 4 0 days 00:00:00.108894973 +177 5 0 days 00:00:00.347846085 +177 6 0 days 00:00:00.197212440 +177 7 0 days 00:00:00.313307433 +177 8 0 days 00:00:00.399847862 +177 9 0 days 00:00:00.312816200 +177 10 0 days 00:00:00.378323144 +177 11 0 days 00:00:00.114248273 +177 12 0 days 00:00:00.224275688 +177 13 0 days 00:00:00.207156512 +177 14 0 days 00:00:00.371029740 +177 15 0 days 00:00:00.344723555 +177 16 0 days 00:00:00.344140715 +177 17 0 days 00:00:00.113947993 +177 18 0 days 00:00:00.111470700 +177 19 0 days 00:00:00.185434086 +177 20 0 days 00:00:00.126535324 +177 21 0 days 00:00:00.192476280 +177 22 0 days 00:00:00.212838860 +177 23 0 days 00:00:00.110662600 +177 24 0 days 00:00:00.204887924 +177 25 0 days 00:00:00.306579826 +177 26 0 days 00:00:00.217063972 +177 27 0 days 00:00:00.173814280 +177 28 0 days 00:00:00.306966626 +177 29 0 days 00:00:00.305212520 +177 30 0 days 00:00:00.176397873 +177 31 0 days 00:00:00.300856800 +177 32 0 days 00:00:00.105950313 +177 33 0 days 00:00:00.310301953 +177 34 0 days 00:00:00.124305006 +177 35 0 days 00:00:00.228618206 +177 36 0 days 00:00:00.145471127 +177 37 0 days 00:00:00.178439680 +177 38 0 days 00:00:00.209334296 +177 39 0 days 00:00:00.247378451 +177 40 0 days 00:00:00.111129280 +177 41 0 days 00:00:00.355864390 +177 42 0 days 00:00:00.186283626 +177 43 0 days 00:00:00.389074380 +177 44 0 days 00:00:00.133630400 +177 45 0 days 00:00:00.208579216 +177 46 0 days 00:00:00.403114430 +177 47 0 days 00:00:00.137847782 +177 48 0 days 00:00:00.105405613 +177 49 0 days 00:00:00.180514213 +177 50 0 days 00:00:00.312922766 +177 51 0 days 00:00:00.177471473 +177 52 0 days 00:00:00.178897653 +177 53 0 days 00:00:00.211970176 +177 54 0 days 00:00:00.354587835 +177 55 0 days 00:00:00.350244975 +177 56 0 days 00:00:00.310130600 +177 57 0 days 00:00:00.348704565 +177 58 0 days 00:00:00.224096786 +177 59 0 days 00:00:00.356004770 +177 60 0 days 00:00:00.228170370 +177 61 0 days 00:00:00.383382948 +177 62 0 days 00:00:00.374557460 +177 63 0 days 00:00:00.211352952 +177 64 0 days 00:00:00.220956988 +177 65 0 days 00:00:00.127385260 +177 66 0 days 00:00:00.185343800 +177 67 0 days 00:00:00.235612636 +177 68 0 days 00:00:00.314690413 +177 69 0 days 00:00:00.122709410 +177 70 0 days 00:00:00.132077093 +177 71 0 days 00:00:00.373224076 +177 72 0 days 00:00:00.129777560 +177 73 0 days 00:00:00.352126804 +177 74 0 days 00:00:00.103069086 +177 75 0 days 00:00:00.110727160 +177 76 0 days 00:00:00.330094080 +177 77 0 days 00:00:00.191759380 +177 78 0 days 00:00:00.219594068 +177 79 0 days 00:00:00.106175033 +177 80 0 days 00:00:00.356981890 +177 81 0 days 00:00:00.125409486 +177 82 0 days 00:00:00.202631040 +177 83 0 days 00:00:00.221672710 +177 84 0 days 00:00:00.173036363 +177 85 0 days 00:00:00.230534004 +177 86 0 days 00:00:00.314304340 +177 87 0 days 00:00:00.115130086 +177 88 0 days 00:00:00.316665106 +177 89 0 days 00:00:00.178634253 +177 90 0 days 00:00:00.120512033 +177 91 0 days 00:00:00.118670666 +177 92 0 days 00:00:00.352326555 +177 93 0 days 00:00:00.122917880 +177 94 0 days 00:00:00.177010700 +177 95 0 days 00:00:00.194766910 +177 96 0 days 00:00:00.110668893 +177 97 0 days 00:00:00.321008640 +177 98 0 days 00:00:00.109785580 +177 99 0 days 00:00:00.134539195 +177 100 0 days 00:00:00.357354740 +178 1 0 days 00:00:00.253858906 +178 2 0 days 00:00:00.473459664 +178 3 0 days 00:00:00.264599416 +178 4 0 days 00:00:01.049289251 +178 5 0 days 00:00:00.787599222 +178 6 0 days 00:00:00.303844404 +178 7 0 days 00:00:00.739544005 +178 8 0 days 00:00:00.484623816 +178 11 0 days 00:00:00.272566240 +178 12 0 days 00:00:00.264920660 +178 13 0 days 00:00:01.042359077 +178 14 0 days 00:00:00.410798195 +178 15 0 days 00:00:00.855569011 +178 16 0 days 00:00:00.453404473 +178 17 0 days 00:00:00.710518625 +178 18 0 days 00:00:00.938673352 +178 19 0 days 00:00:00.818393975 +178 20 0 days 00:00:00.229915340 +178 21 0 days 00:00:00.242676260 +178 22 0 days 00:00:00.540137661 +178 23 0 days 00:00:00.307209623 +178 24 0 days 00:00:00.918008740 +178 26 0 days 00:00:00.238154536 +178 27 0 days 00:00:00.320576017 +178 28 0 days 00:00:00.336576070 +178 29 0 days 00:00:00.917579270 +178 30 0 days 00:00:00.978159468 +178 31 0 days 00:00:00.949514607 +178 32 0 days 00:00:00.921276128 +178 33 0 days 00:00:00.550804522 +178 34 0 days 00:00:00.465468508 +178 35 0 days 00:00:00.861518818 +178 36 0 days 00:00:00.350309985 +178 37 0 days 00:00:00.284058176 +178 38 0 days 00:00:00.873066685 +178 39 0 days 00:00:00.283447357 +178 40 0 days 00:00:00.340264473 +178 41 0 days 00:00:00.474010181 +178 42 0 days 00:00:00.259904903 +178 43 0 days 00:00:00.258597656 +178 44 0 days 00:00:00.433959650 +178 45 0 days 00:00:00.251298536 +178 46 0 days 00:00:00.234941144 +178 47 0 days 00:00:00.943185592 +178 49 0 days 00:00:00.530004112 +178 50 0 days 00:00:00.232077784 +178 51 0 days 00:00:00.266188775 +178 52 0 days 00:00:00.457880680 +178 53 0 days 00:00:00.698294696 +178 54 0 days 00:00:00.830452962 +178 55 0 days 00:00:00.359295985 +178 56 0 days 00:00:00.981918778 +178 58 0 days 00:00:00.821427015 +178 59 0 days 00:00:00.832915248 +178 60 0 days 00:00:00.229504940 +178 61 0 days 00:00:00.772127525 +178 62 0 days 00:00:00.415973106 +178 63 0 days 00:00:00.285068848 +178 64 0 days 00:00:00.229249995 +178 65 0 days 00:00:00.785736197 +178 66 0 days 00:00:00.251714116 +178 67 0 days 00:00:00.431245365 +178 69 0 days 00:00:00.826351769 +178 70 0 days 00:00:00.892352361 +178 71 0 days 00:00:00.534365240 +178 72 0 days 00:00:00.361041880 +178 73 0 days 00:00:00.529375388 +178 74 0 days 00:00:00.387222800 +178 75 0 days 00:00:00.512383860 +178 77 0 days 00:00:00.712224140 +178 78 0 days 00:00:00.931370474 +178 79 0 days 00:00:00.513841630 +178 80 0 days 00:00:00.284957540 +178 81 0 days 00:00:00.783052330 +178 82 0 days 00:00:00.487784593 +178 83 0 days 00:00:00.387031476 +178 84 0 days 00:00:00.710087890 +178 85 0 days 00:00:00.253889922 +178 86 0 days 00:00:00.457976148 +178 87 0 days 00:00:00.279810685 +178 88 0 days 00:00:00.292912636 +178 90 0 days 00:00:00.806664457 +178 91 0 days 00:00:00.303013427 +178 92 0 days 00:00:00.215801592 +178 93 0 days 00:00:00.392945623 +178 94 0 days 00:00:00.633317155 +178 95 0 days 00:00:00.799392938 +178 96 0 days 00:00:00.697932980 +178 98 0 days 00:00:00.813103420 +178 99 0 days 00:00:00.249142344 +178 100 0 days 00:00:00.260050626 +179 1 0 days 00:00:00.245360876 +179 2 0 days 00:00:00.391357101 +179 3 0 days 00:00:00.328256455 +179 4 0 days 00:00:00.427320078 +179 5 0 days 00:00:00.405588768 +179 6 0 days 00:00:00.404470590 +179 7 0 days 00:00:00.339473343 +179 8 0 days 00:00:00.280588995 +179 9 0 days 00:00:00.278384565 +179 10 0 days 00:00:00.125632497 +179 11 0 days 00:00:00.194497012 +179 13 0 days 00:00:00.108533181 +179 15 0 days 00:00:00.314629424 +179 16 0 days 00:00:00.187298088 +179 17 0 days 00:00:00.280850230 +179 18 0 days 00:00:00.113025734 +179 19 0 days 00:00:00.119945848 +179 20 0 days 00:00:00.190163816 +179 21 0 days 00:00:00.219803170 +179 22 0 days 00:00:00.109037037 +179 23 0 days 00:00:00.360158563 +179 24 0 days 00:00:00.350084323 +179 25 0 days 00:00:00.125810689 +179 26 0 days 00:00:00.387675660 +179 27 0 days 00:00:00.226990340 +179 29 0 days 00:00:00.138759037 +179 31 0 days 00:00:00.390287503 +179 32 0 days 00:00:00.174692855 +179 33 0 days 00:00:00.118732943 +179 34 0 days 00:00:00.188916171 +179 35 0 days 00:00:00.373582848 +179 36 0 days 00:00:00.160798125 +179 37 0 days 00:00:00.135348573 +179 38 0 days 00:00:00.123628622 +179 39 0 days 00:00:00.169054983 +179 40 0 days 00:00:00.324185705 +179 41 0 days 00:00:00.115185536 +179 42 0 days 00:00:00.144643092 +179 44 0 days 00:00:00.222357705 +179 45 0 days 00:00:00.203310083 +179 46 0 days 00:00:00.165605364 +179 47 0 days 00:00:00.331496437 +179 49 0 days 00:00:00.354049155 +179 50 0 days 00:00:00.134178004 +179 51 0 days 00:00:00.396128729 +179 52 0 days 00:00:00.390506785 +179 53 0 days 00:00:00.121947753 +179 55 0 days 00:00:00.202650671 +179 56 0 days 00:00:00.113533322 +179 57 0 days 00:00:00.128078651 +179 58 0 days 00:00:00.215829756 +179 59 0 days 00:00:00.110423740 +179 60 0 days 00:00:00.355293651 +179 61 0 days 00:00:00.325746916 +179 62 0 days 00:00:00.435981217 +179 63 0 days 00:00:00.335971061 +179 64 0 days 00:00:00.367818443 +179 65 0 days 00:00:00.114288285 +179 66 0 days 00:00:00.214047844 +179 67 0 days 00:00:00.371450056 +179 68 0 days 00:00:00.115858816 +179 69 0 days 00:00:00.117268263 +179 70 0 days 00:00:00.189088140 +179 71 0 days 00:00:00.124751945 +179 72 0 days 00:00:00.243053914 +179 73 0 days 00:00:00.108896082 +179 74 0 days 00:00:00.113905142 +179 75 0 days 00:00:00.412675406 +179 76 0 days 00:00:00.314842910 +179 77 0 days 00:00:00.180579830 +179 78 0 days 00:00:00.256098650 +179 79 0 days 00:00:00.322654138 +179 80 0 days 00:00:00.438121108 +179 81 0 days 00:00:00.332122235 +179 82 0 days 00:00:00.244006288 +179 83 0 days 00:00:00.188413552 +179 84 0 days 00:00:00.130584164 +179 85 0 days 00:00:00.121174188 +179 86 0 days 00:00:00.222357851 +179 87 0 days 00:00:00.222114750 +179 88 0 days 00:00:00.169654908 +179 89 0 days 00:00:00.421915287 +179 90 0 days 00:00:00.173957316 +179 91 0 days 00:00:00.122713460 +179 92 0 days 00:00:00.115971587 +179 93 0 days 00:00:00.293180564 +179 95 0 days 00:00:00.309786365 +179 97 0 days 00:00:00.163987270 +179 98 0 days 00:00:00.125181476 +179 99 0 days 00:00:00.415374763 +179 100 0 days 00:00:00.306807630 +180 1 0 days 00:00:00.210681880 +180 2 0 days 00:00:00.208505940 +180 3 0 days 00:00:00.237216880 +180 4 0 days 00:00:00.188567573 +180 5 0 days 00:00:00.230692736 +180 6 0 days 00:00:00.339638260 +180 7 0 days 00:00:00.346900265 +180 8 0 days 00:00:00.345213200 +180 9 0 days 00:00:00.581905960 +180 10 0 days 00:00:00.209950053 +180 11 0 days 00:00:00.659423800 +180 12 0 days 00:00:00.403244468 +180 13 0 days 00:00:00.382387895 +180 14 0 days 00:00:00.229179100 +180 15 0 days 00:00:00.250843290 +180 16 0 days 00:00:00.218629426 +180 17 0 days 00:00:00.625812213 +180 18 0 days 00:00:00.738071074 +180 19 0 days 00:00:00.703328532 +180 20 0 days 00:00:00.203726033 +180 21 0 days 00:00:00.323491713 +180 22 0 days 00:00:00.406667296 +180 23 0 days 00:00:00.585134940 +180 24 0 days 00:00:00.220234246 +180 25 0 days 00:00:00.416840526 +180 26 0 days 00:00:00.218308985 +180 27 0 days 00:00:00.336520206 +180 28 0 days 00:00:00.669630815 +180 29 0 days 00:00:00.275322626 +180 30 0 days 00:00:00.207455840 +180 31 0 days 00:00:00.265351250 +180 32 0 days 00:00:00.666454725 +180 33 0 days 00:00:00.188876560 +180 34 0 days 00:00:00.236309986 +180 35 0 days 00:00:00.692555780 +180 36 0 days 00:00:00.322093246 +180 37 0 days 00:00:00.299052686 +180 38 0 days 00:00:00.213639800 +180 39 0 days 00:00:00.385112172 +180 40 0 days 00:00:00.218574430 +180 41 0 days 00:00:00.588704933 +180 42 0 days 00:00:00.320996380 +180 43 0 days 00:00:00.217882585 +180 44 0 days 00:00:00.650572210 +180 45 0 days 00:00:00.369705455 +180 46 0 days 00:00:00.585804760 +180 47 0 days 00:00:00.349458066 +180 48 0 days 00:00:00.186732493 +180 49 0 days 00:00:00.328816033 +180 50 0 days 00:00:00.640832270 +180 51 0 days 00:00:00.713220423 +180 52 0 days 00:00:00.395590886 +180 53 0 days 00:00:00.650157955 +180 54 0 days 00:00:00.326927733 +180 55 0 days 00:00:00.245391040 +180 56 0 days 00:00:00.329926166 +180 57 0 days 00:00:00.690900552 +180 58 0 days 00:00:00.198908740 +180 59 0 days 00:00:00.647037785 +180 60 0 days 00:00:00.677548012 +180 61 0 days 00:00:00.241436675 +180 62 0 days 00:00:00.197587313 +180 63 0 days 00:00:00.328077886 +180 64 0 days 00:00:00.714815132 +180 65 0 days 00:00:00.721976326 +180 66 0 days 00:00:00.787732418 +180 67 0 days 00:00:00.436329400 +180 68 0 days 00:00:00.335680106 +180 69 0 days 00:00:00.661671665 +180 70 0 days 00:00:00.209487486 +180 71 0 days 00:00:00.418307753 +180 72 0 days 00:00:00.411947548 +180 73 0 days 00:00:00.218006126 +180 74 0 days 00:00:00.711577910 +180 75 0 days 00:00:00.429230888 +180 76 0 days 00:00:00.575423160 +180 77 0 days 00:00:00.209876893 +180 78 0 days 00:00:00.222363873 +180 79 0 days 00:00:00.213379520 +180 80 0 days 00:00:00.213510206 +180 81 0 days 00:00:00.420529064 +180 82 0 days 00:00:00.265736032 +180 83 0 days 00:00:00.261139435 +180 84 0 days 00:00:00.722942683 +180 85 0 days 00:00:00.323045826 +180 86 0 days 00:00:00.198168173 +180 87 0 days 00:00:00.201943413 +180 88 0 days 00:00:00.652855140 +180 89 0 days 00:00:00.368785240 +180 90 0 days 00:00:00.245978120 +180 91 0 days 00:00:00.394819075 +180 92 0 days 00:00:00.366487780 +180 93 0 days 00:00:00.370660005 +180 94 0 days 00:00:00.257630384 +180 95 0 days 00:00:00.355788360 +180 96 0 days 00:00:00.413825510 +180 97 0 days 00:00:00.183278330 +180 98 0 days 00:00:00.412389790 +180 99 0 days 00:00:00.427607944 +180 100 0 days 00:00:00.233794066 +181 1 0 days 00:00:00.106288320 +181 2 0 days 00:00:00.334613780 +181 3 0 days 00:00:00.127815404 +181 4 0 days 00:00:00.366895996 +181 5 0 days 00:00:00.117359853 +181 6 0 days 00:00:00.106171486 +181 7 0 days 00:00:00.321042946 +181 8 0 days 00:00:00.351405480 +181 9 0 days 00:00:00.291007660 +181 10 0 days 00:00:00.172984373 +181 11 0 days 00:00:00.186193560 +181 12 0 days 00:00:00.112627546 +181 13 0 days 00:00:00.303935046 +181 14 0 days 00:00:00.119806212 +181 15 0 days 00:00:00.288005420 +181 16 0 days 00:00:00.197843096 +181 17 0 days 00:00:00.313529060 +181 18 0 days 00:00:00.360327335 +181 19 0 days 00:00:00.106783593 +181 20 0 days 00:00:00.105817720 +181 21 0 days 00:00:00.355207588 +181 22 0 days 00:00:00.169869220 +181 23 0 days 00:00:00.173901460 +181 24 0 days 00:00:00.290184813 +181 25 0 days 00:00:00.375598644 +181 26 0 days 00:00:00.318048766 +181 27 0 days 00:00:00.298132080 +181 28 0 days 00:00:00.143702580 +181 29 0 days 00:00:00.411012415 +181 30 0 days 00:00:00.099674773 +181 31 0 days 00:00:00.367137684 +181 32 0 days 00:00:00.150569720 +181 33 0 days 00:00:00.168921333 +181 34 0 days 00:00:00.353960336 +181 35 0 days 00:00:00.167983033 +181 36 0 days 00:00:00.182766900 +181 37 0 days 00:00:00.173130466 +181 38 0 days 00:00:00.273814153 +181 39 0 days 00:00:00.193994455 +181 40 0 days 00:00:00.132397413 +181 41 0 days 00:00:00.104029573 +181 42 0 days 00:00:00.168465046 +181 43 0 days 00:00:00.166839580 +181 44 0 days 00:00:00.106245346 +181 45 0 days 00:00:00.105835700 +181 46 0 days 00:00:00.100450633 +181 47 0 days 00:00:00.206224763 +181 48 0 days 00:00:00.170193640 +181 49 0 days 00:00:00.114665780 +181 50 0 days 00:00:00.339220076 +181 51 0 days 00:00:00.203410080 +181 52 0 days 00:00:00.345282720 +181 53 0 days 00:00:00.114488310 +181 54 0 days 00:00:00.230994676 +181 55 0 days 00:00:00.322791780 +181 56 0 days 00:00:00.114053020 +181 57 0 days 00:00:00.199930176 +181 58 0 days 00:00:00.346887372 +181 59 0 days 00:00:00.362428076 +181 60 0 days 00:00:00.170461040 +181 61 0 days 00:00:00.307791506 +181 62 0 days 00:00:00.344717212 +181 63 0 days 00:00:00.311603600 +181 64 0 days 00:00:00.337666796 +181 65 0 days 00:00:00.192688890 +181 66 0 days 00:00:00.292779193 +181 67 0 days 00:00:00.328866065 +181 68 0 days 00:00:00.104179333 +181 69 0 days 00:00:00.197905468 +181 70 0 days 00:00:00.111382700 +181 71 0 days 00:00:00.107425300 +181 72 0 days 00:00:00.161273046 +181 73 0 days 00:00:00.377148728 +181 74 0 days 00:00:00.325694275 +181 75 0 days 00:00:00.372114185 +181 76 0 days 00:00:00.198403952 +181 77 0 days 00:00:00.188226860 +181 78 0 days 00:00:00.333784800 +181 79 0 days 00:00:00.338489855 +181 80 0 days 00:00:00.337643760 +181 81 0 days 00:00:00.289469880 +181 82 0 days 00:00:00.304821220 +181 83 0 days 00:00:00.182818830 +181 84 0 days 00:00:00.178946106 +181 85 0 days 00:00:00.172035713 +181 86 0 days 00:00:00.299152006 +181 87 0 days 00:00:00.405621898 +181 88 0 days 00:00:00.295983566 +181 89 0 days 00:00:00.107925460 +181 90 0 days 00:00:00.114230145 +181 91 0 days 00:00:00.167465713 +181 92 0 days 00:00:00.383381321 +181 93 0 days 00:00:00.358673173 +181 94 0 days 00:00:00.196831232 +181 95 0 days 00:00:00.198118248 +181 96 0 days 00:00:00.322472070 +181 97 0 days 00:00:00.101557700 +181 98 0 days 00:00:00.360230732 +181 99 0 days 00:00:00.296768433 +181 100 0 days 00:00:00.293541206 +182 1 0 days 00:00:00.162924990 +182 2 0 days 00:00:00.112475460 +182 3 0 days 00:00:00.131077345 +182 4 0 days 00:00:00.107460983 +182 5 0 days 00:00:00.131283595 +182 6 0 days 00:00:00.113859544 +182 7 0 days 00:00:00.141815860 +182 8 0 days 00:00:00.119174622 +182 9 0 days 00:00:00.098562736 +182 10 0 days 00:00:00.159464485 +182 11 0 days 00:00:00.167437953 +182 12 0 days 00:00:00.142090360 +182 13 0 days 00:00:00.161554791 +182 14 0 days 00:00:00.098459630 +182 15 0 days 00:00:00.122906288 +182 16 0 days 00:00:00.171580775 +182 17 0 days 00:00:00.132409682 +182 18 0 days 00:00:00.123702266 +182 19 0 days 00:00:00.136412171 +182 20 0 days 00:00:00.112111868 +182 21 0 days 00:00:00.131621554 +182 22 0 days 00:00:00.134868747 +182 23 0 days 00:00:00.173396300 +182 24 0 days 00:00:00.113534077 +182 25 0 days 00:00:00.103050693 +182 26 0 days 00:00:00.167801353 +182 27 0 days 00:00:00.110799295 +182 28 0 days 00:00:00.116172970 +182 29 0 days 00:00:00.135455123 +182 30 0 days 00:00:00.118157386 +182 31 0 days 00:00:00.131590875 +182 32 0 days 00:00:00.160321574 +182 33 0 days 00:00:00.171585622 +182 34 0 days 00:00:00.099498735 +182 35 0 days 00:00:00.166227116 +182 36 0 days 00:00:00.113952765 +182 37 0 days 00:00:00.113013100 +182 38 0 days 00:00:00.119461790 +182 39 0 days 00:00:00.134070062 +182 40 0 days 00:00:00.138529695 +182 41 0 days 00:00:00.105772125 +182 42 0 days 00:00:00.170566620 +182 43 0 days 00:00:00.119630516 +182 44 0 days 00:00:00.097389755 +182 45 0 days 00:00:00.128271777 +182 46 0 days 00:00:00.139291436 +182 47 0 days 00:00:00.168204971 +182 48 0 days 00:00:00.106662786 +182 49 0 days 00:00:00.102936995 +182 50 0 days 00:00:00.098901380 +182 51 0 days 00:00:00.140655980 +182 52 0 days 00:00:00.139515560 +182 53 0 days 00:00:00.114515878 +182 54 0 days 00:00:00.125610376 +182 55 0 days 00:00:00.154759650 +182 56 0 days 00:00:00.087967973 +182 57 0 days 00:00:00.117434284 +182 58 0 days 00:00:00.113347310 +182 59 0 days 00:00:00.122608633 +182 60 0 days 00:00:00.116244610 +182 61 0 days 00:00:00.134169492 +182 62 0 days 00:00:00.134655416 +182 63 0 days 00:00:00.113155370 +182 64 0 days 00:00:00.172244672 +182 65 0 days 00:00:00.111423220 +182 66 0 days 00:00:00.108659030 +182 67 0 days 00:00:00.121044420 +182 68 0 days 00:00:00.151220812 +182 69 0 days 00:00:00.160585110 +182 70 0 days 00:00:00.116193198 +182 71 0 days 00:00:00.117087698 +182 72 0 days 00:00:00.167779991 +182 73 0 days 00:00:00.157105088 +182 74 0 days 00:00:00.100363335 +182 75 0 days 00:00:00.167921200 +182 76 0 days 00:00:00.105103748 +182 77 0 days 00:00:00.158171584 +182 78 0 days 00:00:00.168136574 +182 79 0 days 00:00:00.168387507 +182 80 0 days 00:00:00.140876565 +182 81 0 days 00:00:00.128523242 +182 82 0 days 00:00:00.127953303 +182 83 0 days 00:00:00.139372970 +182 84 0 days 00:00:00.104221900 +182 85 0 days 00:00:00.127545577 +182 86 0 days 00:00:00.126559300 +182 87 0 days 00:00:00.167884290 +182 88 0 days 00:00:00.137041985 +182 89 0 days 00:00:00.154970060 +182 90 0 days 00:00:00.120464260 +182 91 0 days 00:00:00.107516230 +182 92 0 days 00:00:00.135298410 +182 93 0 days 00:00:00.110485975 +182 94 0 days 00:00:00.165739350 +182 95 0 days 00:00:00.126975823 +182 96 0 days 00:00:00.116959514 +182 97 0 days 00:00:00.106030960 +182 98 0 days 00:00:00.102620160 +182 99 0 days 00:00:00.113814268 +182 100 0 days 00:00:00.115548372 +183 1 0 days 00:00:00.114172160 +183 2 0 days 00:00:00.132744397 +183 3 0 days 00:00:00.102646352 +183 4 0 days 00:00:00.125321353 +183 5 0 days 00:00:00.110204595 +183 6 0 days 00:00:00.160719210 +183 7 0 days 00:00:00.169106046 +183 8 0 days 00:00:00.089515073 +183 9 0 days 00:00:00.101308093 +183 10 0 days 00:00:00.126966165 +183 11 0 days 00:00:00.127432327 +183 12 0 days 00:00:00.120595440 +183 13 0 days 00:00:00.097224235 +183 14 0 days 00:00:00.157557191 +183 15 0 days 00:00:00.087705780 +183 16 0 days 00:00:00.091396553 +183 17 0 days 00:00:00.101091840 +183 18 0 days 00:00:00.166726951 +183 19 0 days 00:00:00.103216925 +183 20 0 days 00:00:00.168487738 +183 21 0 days 00:00:00.118965244 +183 22 0 days 00:00:00.121500030 +183 23 0 days 00:00:00.122169606 +183 24 0 days 00:00:00.134829310 +183 25 0 days 00:00:00.169915064 +183 26 0 days 00:00:00.149480168 +183 27 0 days 00:00:00.109711280 +183 28 0 days 00:00:00.129064204 +183 29 0 days 00:00:00.119343885 +183 30 0 days 00:00:00.108389860 +183 31 0 days 00:00:00.154782763 +183 32 0 days 00:00:00.134620815 +183 33 0 days 00:00:00.150395626 +183 34 0 days 00:00:00.151884523 +183 35 0 days 00:00:00.171726761 +183 36 0 days 00:00:00.097224055 +183 37 0 days 00:00:00.123738128 +183 38 0 days 00:00:00.116664232 +183 39 0 days 00:00:00.117182793 +183 40 0 days 00:00:00.156311177 +183 41 0 days 00:00:00.173021930 +183 42 0 days 00:00:00.119189533 +183 43 0 days 00:00:00.122819734 +183 44 0 days 00:00:00.111981646 +183 45 0 days 00:00:00.163414414 +183 47 0 days 00:00:00.141198035 +183 48 0 days 00:00:00.108452726 +183 49 0 days 00:00:00.122554913 +183 50 0 days 00:00:00.110233691 +183 51 0 days 00:00:00.127544151 +183 52 0 days 00:00:00.136355120 +183 53 0 days 00:00:00.112206486 +183 54 0 days 00:00:00.111596756 +183 55 0 days 00:00:00.100009000 +183 56 0 days 00:00:00.160121398 +183 57 0 days 00:00:00.128900845 +183 58 0 days 00:00:00.151459708 +183 59 0 days 00:00:00.111982632 +183 60 0 days 00:00:00.165032814 +183 61 0 days 00:00:00.124538140 +183 62 0 days 00:00:00.121442206 +183 63 0 days 00:00:00.158650935 +183 64 0 days 00:00:00.166153111 +183 65 0 days 00:00:00.103351000 +183 66 0 days 00:00:00.120541260 +183 67 0 days 00:00:00.105142775 +183 68 0 days 00:00:00.117055677 +183 69 0 days 00:00:00.116014868 +183 70 0 days 00:00:00.172276812 +183 71 0 days 00:00:00.145472688 +183 72 0 days 00:00:00.125399983 +183 73 0 days 00:00:00.131298567 +183 74 0 days 00:00:00.148067428 +183 75 0 days 00:00:00.128450543 +183 76 0 days 00:00:00.142921260 +183 77 0 days 00:00:00.168925740 +183 78 0 days 00:00:00.161630845 +183 79 0 days 00:00:00.147780788 +183 80 0 days 00:00:00.153211726 +183 81 0 days 00:00:00.118600997 +183 82 0 days 00:00:00.162678802 +183 83 0 days 00:00:00.106438256 +183 84 0 days 00:00:00.146469936 +183 85 0 days 00:00:00.099954180 +183 86 0 days 00:00:00.150175740 +183 87 0 days 00:00:00.109262000 +183 88 0 days 00:00:00.114651908 +183 89 0 days 00:00:00.131327152 +183 90 0 days 00:00:00.160445252 +183 91 0 days 00:00:00.112376765 +183 92 0 days 00:00:00.164100302 +183 93 0 days 00:00:00.129066864 +183 94 0 days 00:00:00.121289312 +183 95 0 days 00:00:00.139094475 +183 96 0 days 00:00:00.113600720 +183 97 0 days 00:00:00.136700706 +183 98 0 days 00:00:00.123176733 +183 99 0 days 00:00:00.167871388 +183 100 0 days 00:00:00.140724350 +184 1 0 days 00:00:00.057920256 +184 2 0 days 00:00:00.079205831 +184 4 0 days 00:00:00.068391623 +184 5 0 days 00:00:00.060941810 +184 6 0 days 00:00:00.074480948 +184 7 0 days 00:00:00.062207995 +184 8 0 days 00:00:00.061026360 +184 9 0 days 00:00:00.084720961 +184 10 0 days 00:00:00.071407185 +184 11 0 days 00:00:00.061081806 +184 12 0 days 00:00:00.066396870 +184 13 0 days 00:00:00.070556040 +184 14 0 days 00:00:00.058926845 +184 15 0 days 00:00:00.070555549 +184 16 0 days 00:00:00.083935530 +184 17 0 days 00:00:00.062479355 +184 18 0 days 00:00:00.077933024 +184 19 0 days 00:00:00.059349510 +184 20 0 days 00:00:00.055133628 +184 21 0 days 00:00:00.062322800 +184 22 0 days 00:00:00.055709908 +184 23 0 days 00:00:00.077418806 +184 24 0 days 00:00:00.089633783 +184 25 0 days 00:00:00.060847140 +184 26 0 days 00:00:00.080011753 +184 27 0 days 00:00:00.084718791 +184 28 0 days 00:00:00.062837578 +184 29 0 days 00:00:00.059239095 +184 30 0 days 00:00:00.061630910 +184 31 0 days 00:00:00.063158771 +184 32 0 days 00:00:00.067642382 +184 33 0 days 00:00:00.073910721 +184 34 0 days 00:00:00.075210385 +184 35 0 days 00:00:00.087505005 +184 36 0 days 00:00:00.052341725 +184 37 0 days 00:00:00.063175618 +184 38 0 days 00:00:00.056418793 +184 39 0 days 00:00:00.076973693 +184 40 0 days 00:00:00.048924513 +184 41 0 days 00:00:00.068366325 +184 42 0 days 00:00:00.059549848 +184 43 0 days 00:00:00.068555305 +184 44 0 days 00:00:00.059352060 +184 45 0 days 00:00:00.063237305 +184 46 0 days 00:00:00.076123740 +184 48 0 days 00:00:00.055618816 +184 49 0 days 00:00:00.061939971 +184 50 0 days 00:00:00.080206590 +184 51 0 days 00:00:00.056036975 +184 52 0 days 00:00:00.082319252 +184 53 0 days 00:00:00.062374827 +184 54 0 days 00:00:00.052536256 +184 55 0 days 00:00:00.052811476 +184 56 0 days 00:00:00.082152102 +184 57 0 days 00:00:00.064152721 +184 58 0 days 00:00:00.062727137 +184 59 0 days 00:00:00.054528730 +184 60 0 days 00:00:00.081042596 +184 61 0 days 00:00:00.077602862 +184 62 0 days 00:00:00.079692046 +184 63 0 days 00:00:00.076768208 +184 64 0 days 00:00:00.053240048 +184 65 0 days 00:00:00.059263153 +184 66 0 days 00:00:00.078224805 +184 67 0 days 00:00:00.061948108 +184 68 0 days 00:00:00.052974956 +184 69 0 days 00:00:00.072912440 +184 70 0 days 00:00:00.060408175 +184 71 0 days 00:00:00.058089332 +184 72 0 days 00:00:00.055434874 +184 73 0 days 00:00:00.055999485 +184 74 0 days 00:00:00.059749241 +184 75 0 days 00:00:00.080826196 +184 76 0 days 00:00:00.053096512 +184 77 0 days 00:00:00.059204686 +184 78 0 days 00:00:00.064571765 +184 79 0 days 00:00:00.063785135 +184 80 0 days 00:00:00.080782308 +184 81 0 days 00:00:00.058278894 +184 82 0 days 00:00:00.058664388 +184 83 0 days 00:00:00.064471021 +184 84 0 days 00:00:00.057896084 +184 85 0 days 00:00:00.045172193 +184 86 0 days 00:00:00.059574564 +184 87 0 days 00:00:00.072006596 +184 89 0 days 00:00:00.062623346 +184 90 0 days 00:00:00.065914635 +184 91 0 days 00:00:00.079257536 +184 92 0 days 00:00:00.061811374 +184 93 0 days 00:00:00.058851662 +184 94 0 days 00:00:00.055264660 +184 95 0 days 00:00:00.080580549 +184 96 0 days 00:00:00.081542806 +184 97 0 days 00:00:00.056393536 +184 98 0 days 00:00:00.062964076 +184 99 0 days 00:00:00.063986217 +184 100 0 days 00:00:00.077717990 +185 1 0 days 00:00:00.065636622 +185 2 0 days 00:00:00.058560294 +185 3 0 days 00:00:00.068854298 +185 4 0 days 00:00:00.058128165 +185 5 0 days 00:00:00.050814850 +185 6 0 days 00:00:00.065052145 +185 7 0 days 00:00:00.082207071 +185 8 0 days 00:00:00.065022005 +185 9 0 days 00:00:00.060547198 +185 10 0 days 00:00:00.081097260 +185 11 0 days 00:00:00.087048155 +185 12 0 days 00:00:00.086669633 +185 13 0 days 00:00:00.056914240 +185 14 0 days 00:00:00.050517365 +185 15 0 days 00:00:00.071821525 +185 16 0 days 00:00:00.052743853 +185 17 0 days 00:00:00.060387852 +185 18 0 days 00:00:00.065819080 +185 19 0 days 00:00:00.057360026 +185 20 0 days 00:00:00.051591910 +185 21 0 days 00:00:00.085235910 +185 22 0 days 00:00:00.079008371 +185 23 0 days 00:00:00.062467700 +185 24 0 days 00:00:00.055356116 +185 25 0 days 00:00:00.053998910 +185 26 0 days 00:00:00.068443138 +185 27 0 days 00:00:00.051242555 +185 28 0 days 00:00:00.085170490 +185 29 0 days 00:00:00.051961440 +185 30 0 days 00:00:00.068734588 +185 31 0 days 00:00:00.070974805 +185 32 0 days 00:00:00.058980277 +185 33 0 days 00:00:00.056702788 +185 34 0 days 00:00:00.054577020 +185 35 0 days 00:00:00.057556457 +185 36 0 days 00:00:00.051401840 +185 37 0 days 00:00:00.051262805 +185 38 0 days 00:00:00.056902665 +185 39 0 days 00:00:00.057043136 +185 40 0 days 00:00:00.075275008 +185 41 0 days 00:00:00.071945550 +185 42 0 days 00:00:00.061899203 +185 43 0 days 00:00:00.064870728 +185 44 0 days 00:00:00.061927301 +185 45 0 days 00:00:00.054318164 +185 46 0 days 00:00:00.064598745 +185 47 0 days 00:00:00.085405990 +185 48 0 days 00:00:00.083326990 +185 49 0 days 00:00:00.064265236 +185 50 0 days 00:00:00.076355676 +185 51 0 days 00:00:00.078924522 +185 52 0 days 00:00:00.057157495 +185 53 0 days 00:00:00.054433320 +185 54 0 days 00:00:00.072990060 +185 55 0 days 00:00:00.081801957 +185 56 0 days 00:00:00.082013895 +185 57 0 days 00:00:00.057747025 +185 58 0 days 00:00:00.084279156 +185 59 0 days 00:00:00.050997200 +185 60 0 days 00:00:00.051733435 +185 61 0 days 00:00:00.060706352 +185 62 0 days 00:00:00.064071880 +185 63 0 days 00:00:00.068305626 +185 64 0 days 00:00:00.086937125 +185 65 0 days 00:00:00.053221024 +185 66 0 days 00:00:00.087165927 +185 67 0 days 00:00:00.051278930 +185 68 0 days 00:00:00.057172760 +185 69 0 days 00:00:00.065263770 +185 70 0 days 00:00:00.056532553 +185 71 0 days 00:00:00.083045186 +185 72 0 days 00:00:00.086856081 +185 73 0 days 00:00:00.068119449 +185 74 0 days 00:00:00.059584513 +185 75 0 days 00:00:00.058633143 +185 76 0 days 00:00:00.083026491 +185 77 0 days 00:00:00.064607934 +185 78 0 days 00:00:00.062210811 +185 79 0 days 00:00:00.073440080 +185 80 0 days 00:00:00.076428376 +185 81 0 days 00:00:00.058379767 +185 82 0 days 00:00:00.078590826 +185 83 0 days 00:00:00.064925893 +185 84 0 days 00:00:00.081853317 +185 85 0 days 00:00:00.063744193 +185 86 0 days 00:00:00.051584160 +185 87 0 days 00:00:00.064775495 +185 88 0 days 00:00:00.069033490 +185 89 0 days 00:00:00.046511540 +185 90 0 days 00:00:00.084461162 +185 91 0 days 00:00:00.084249207 +185 92 0 days 00:00:00.066687046 +185 93 0 days 00:00:00.066384240 +185 94 0 days 00:00:00.069041396 +185 95 0 days 00:00:00.070033854 +185 96 0 days 00:00:00.056712275 +185 97 0 days 00:00:00.046270626 +185 98 0 days 00:00:00.063748273 +185 99 0 days 00:00:00.051830725 +185 100 0 days 00:00:00.054800612 +186 1 0 days 00:00:00.123004826 +186 2 0 days 00:00:00.162803650 +186 3 0 days 00:00:00.140383855 +186 4 0 days 00:00:00.133833160 +186 5 0 days 00:00:00.106367630 +186 6 0 days 00:00:00.150321136 +186 7 0 days 00:00:00.157680285 +186 8 0 days 00:00:00.135880635 +186 9 0 days 00:00:00.118253184 +186 10 0 days 00:00:00.112858044 +186 11 0 days 00:00:00.128945102 +186 12 0 days 00:00:00.114038660 +186 13 0 days 00:00:00.149338495 +186 14 0 days 00:00:00.099439015 +186 15 0 days 00:00:00.126503096 +186 16 0 days 00:00:00.106744575 +186 17 0 days 00:00:00.170558898 +186 18 0 days 00:00:00.125457343 +186 19 0 days 00:00:00.143060434 +186 20 0 days 00:00:00.120428160 +186 21 0 days 00:00:00.113244180 +186 22 0 days 00:00:00.113020870 +186 23 0 days 00:00:00.139494630 +186 24 0 days 00:00:00.157307840 +186 25 0 days 00:00:00.100221970 +186 26 0 days 00:00:00.141763965 +186 27 0 days 00:00:00.120212780 +186 28 0 days 00:00:00.137588410 +186 29 0 days 00:00:00.174466283 +186 30 0 days 00:00:00.148819135 +186 31 0 days 00:00:00.118781491 +186 32 0 days 00:00:00.099439610 +186 33 0 days 00:00:00.123930996 +186 34 0 days 00:00:00.124901466 +186 35 0 days 00:00:00.121425586 +186 36 0 days 00:00:00.119363955 +186 37 0 days 00:00:00.138743864 +186 38 0 days 00:00:00.098865830 +186 39 0 days 00:00:00.099904785 +186 40 0 days 00:00:00.167835652 +186 41 0 days 00:00:00.114963770 +186 42 0 days 00:00:00.139347780 +186 43 0 days 00:00:00.154595923 +186 44 0 days 00:00:00.108792880 +186 45 0 days 00:00:00.126624086 +186 46 0 days 00:00:00.122939451 +186 47 0 days 00:00:00.142871575 +186 48 0 days 00:00:00.141315840 +186 49 0 days 00:00:00.125958624 +186 50 0 days 00:00:00.118770532 +186 51 0 days 00:00:00.108852956 +186 52 0 days 00:00:00.138827281 +186 53 0 days 00:00:00.119321065 +186 54 0 days 00:00:00.141472040 +186 55 0 days 00:00:00.132736092 +186 56 0 days 00:00:00.165555210 +186 57 0 days 00:00:00.123822250 +186 58 0 days 00:00:00.135209170 +186 59 0 days 00:00:00.128007283 +186 60 0 days 00:00:00.163525165 +186 61 0 days 00:00:00.121327005 +186 62 0 days 00:00:00.158030230 +186 63 0 days 00:00:00.153719072 +186 64 0 days 00:00:00.160201466 +186 65 0 days 00:00:00.110138653 +186 66 0 days 00:00:00.114994945 +186 67 0 days 00:00:00.116648433 +186 68 0 days 00:00:00.107306476 +186 69 0 days 00:00:00.141252270 +186 70 0 days 00:00:00.128069785 +186 71 0 days 00:00:00.151494424 +186 72 0 days 00:00:00.133616418 +186 73 0 days 00:00:00.105876968 +186 74 0 days 00:00:00.169701497 +186 75 0 days 00:00:00.168897511 +186 76 0 days 00:00:00.130069247 +186 77 0 days 00:00:00.167794851 +186 78 0 days 00:00:00.165657302 +186 79 0 days 00:00:00.161004902 +186 80 0 days 00:00:00.138351860 +186 81 0 days 00:00:00.138472667 +186 82 0 days 00:00:00.098011435 +186 83 0 days 00:00:00.098194220 +186 84 0 days 00:00:00.100138770 +186 85 0 days 00:00:00.103513604 +186 86 0 days 00:00:00.123232009 +186 87 0 days 00:00:00.169220822 +186 88 0 days 00:00:00.111616392 +186 89 0 days 00:00:00.119455636 +186 90 0 days 00:00:00.157662168 +186 91 0 days 00:00:00.115197222 +186 92 0 days 00:00:00.121658704 +186 93 0 days 00:00:00.158811723 +186 94 0 days 00:00:00.150403672 +186 95 0 days 00:00:00.123522748 +186 96 0 days 00:00:00.110284636 +186 97 0 days 00:00:00.118606024 +186 98 0 days 00:00:00.109683213 +186 99 0 days 00:00:00.122694261 +186 100 0 days 00:00:00.159255616 +187 1 0 days 00:00:00.065846408 +187 2 0 days 00:00:00.062559585 +187 3 0 days 00:00:00.057756684 +187 4 0 days 00:00:00.077757825 +187 5 0 days 00:00:00.056751345 +187 6 0 days 00:00:00.055621875 +187 7 0 days 00:00:00.055728375 +187 8 0 days 00:00:00.063541191 +187 9 0 days 00:00:00.085509372 +187 10 0 days 00:00:00.062463660 +187 11 0 days 00:00:00.064231860 +187 12 0 days 00:00:00.071637366 +187 13 0 days 00:00:00.088268768 +187 14 0 days 00:00:00.078474960 +187 15 0 days 00:00:00.075726863 +187 16 0 days 00:00:00.070272475 +187 17 0 days 00:00:00.066373130 +187 18 0 days 00:00:00.077166555 +187 19 0 days 00:00:00.085205780 +187 20 0 days 00:00:00.061022805 +187 21 0 days 00:00:00.085049020 +187 22 0 days 00:00:00.067188815 +187 23 0 days 00:00:00.056991320 +187 24 0 days 00:00:00.062522910 +187 25 0 days 00:00:00.067267118 +187 26 0 days 00:00:00.081069420 +187 27 0 days 00:00:00.062218150 +187 28 0 days 00:00:00.060748245 +187 29 0 days 00:00:00.085079020 +187 30 0 days 00:00:00.069735206 +187 31 0 days 00:00:00.070610660 +187 32 0 days 00:00:00.062827875 +187 33 0 days 00:00:00.085758737 +187 34 0 days 00:00:00.075827180 +187 35 0 days 00:00:00.079710660 +187 36 0 days 00:00:00.079647136 +187 37 0 days 00:00:00.073683732 +187 38 0 days 00:00:00.065525440 +187 39 0 days 00:00:00.070200112 +187 40 0 days 00:00:00.069965720 +187 41 0 days 00:00:00.061055015 +187 42 0 days 00:00:00.076056515 +187 43 0 days 00:00:00.060540375 +187 44 0 days 00:00:00.056921892 +187 45 0 days 00:00:00.073816622 +187 46 0 days 00:00:00.089190606 +187 47 0 days 00:00:00.054782210 +187 48 0 days 00:00:00.061704102 +187 49 0 days 00:00:00.063467658 +187 50 0 days 00:00:00.055761050 +187 51 0 days 00:00:00.055270245 +187 52 0 days 00:00:00.077084475 +187 53 0 days 00:00:00.061237430 +187 54 0 days 00:00:00.062279460 +187 55 0 days 00:00:00.076486980 +187 56 0 days 00:00:00.063746360 +187 57 0 days 00:00:00.091796100 +187 58 0 days 00:00:00.091672295 +187 59 0 days 00:00:00.080917440 +187 60 0 days 00:00:00.083718786 +187 61 0 days 00:00:00.061734545 +187 62 0 days 00:00:00.059287353 +187 63 0 days 00:00:00.074556125 +187 64 0 days 00:00:00.094092816 +187 65 0 days 00:00:00.077137205 +187 66 0 days 00:00:00.079640115 +187 67 0 days 00:00:00.092593236 +187 68 0 days 00:00:00.061908140 +187 69 0 days 00:00:00.077462886 +187 70 0 days 00:00:00.072446885 +187 71 0 days 00:00:00.079356460 +187 72 0 days 00:00:00.061613870 +187 73 0 days 00:00:00.082992516 +187 74 0 days 00:00:00.055737990 +187 75 0 days 00:00:00.076686776 +187 76 0 days 00:00:00.091115977 +187 77 0 days 00:00:00.061590385 +187 78 0 days 00:00:00.081045330 +187 79 0 days 00:00:00.076041628 +187 80 0 days 00:00:00.061187150 +187 81 0 days 00:00:00.061116585 +187 82 0 days 00:00:00.062118257 +187 83 0 days 00:00:00.076401565 +187 84 0 days 00:00:00.060686050 +187 85 0 days 00:00:00.077604845 +187 86 0 days 00:00:00.054742625 +187 87 0 days 00:00:00.081246330 +187 88 0 days 00:00:00.065335228 +187 89 0 days 00:00:00.090617526 +187 90 0 days 00:00:00.068696240 +187 91 0 days 00:00:00.074403968 +187 92 0 days 00:00:00.062513000 +187 93 0 days 00:00:00.061654480 +187 94 0 days 00:00:00.069779300 +187 95 0 days 00:00:00.074617255 +187 96 0 days 00:00:00.060962456 +187 97 0 days 00:00:00.064994655 +187 98 0 days 00:00:00.065607585 +187 99 0 days 00:00:00.072127053 +187 100 0 days 00:00:00.067750114 +188 1 0 days 00:00:00.339456503 +188 2 0 days 00:00:00.269209748 +188 3 0 days 00:00:00.260508320 +188 4 0 days 00:00:00.329296590 +188 5 0 days 00:00:00.265078836 +188 6 0 days 00:00:00.283127394 +188 7 0 days 00:00:00.784054088 +188 8 0 days 00:00:00.340234070 +188 9 0 days 00:00:00.728778405 +188 10 0 days 00:00:00.460882297 +188 11 0 days 00:00:00.769791376 +188 12 0 days 00:00:00.285329184 +188 13 0 days 00:00:00.256176030 +188 14 0 days 00:00:00.812951563 +188 15 0 days 00:00:00.398737575 +188 16 0 days 00:00:00.486144964 +188 17 0 days 00:00:00.816760030 +188 18 0 days 00:00:00.733054880 +188 19 0 days 00:00:00.269309784 +188 20 0 days 00:00:00.291892645 +188 21 0 days 00:00:00.319321902 +188 22 0 days 00:00:00.294442728 +188 23 0 days 00:00:00.789498364 +188 24 0 days 00:00:00.269918363 +188 25 0 days 00:00:00.232934860 +188 26 0 days 00:00:00.219131570 +188 27 0 days 00:00:00.275474717 +188 28 0 days 00:00:00.782131700 +188 29 0 days 00:00:00.787125872 +188 30 0 days 00:00:00.261062596 +188 31 0 days 00:00:00.783636760 +188 32 0 days 00:00:00.298026385 +188 33 0 days 00:00:00.853678425 +188 34 0 days 00:00:00.466819990 +188 35 0 days 00:00:00.494286648 +188 36 0 days 00:00:00.261413692 +188 37 0 days 00:00:00.866462325 +188 38 0 days 00:00:00.289238924 +188 39 0 days 00:00:00.426564040 +188 40 0 days 00:00:00.794152424 +188 41 0 days 00:00:00.305829880 +188 42 0 days 00:00:00.783461904 +188 43 0 days 00:00:00.802660332 +188 44 0 days 00:00:00.274217537 +188 45 0 days 00:00:00.461207765 +188 46 0 days 00:00:00.823812790 +188 47 0 days 00:00:00.264716770 +188 48 0 days 00:00:00.490406634 +188 49 0 days 00:00:00.834751612 +188 50 0 days 00:00:00.825239828 +188 51 0 days 00:00:00.277823320 +188 52 0 days 00:00:00.437825680 +188 53 0 days 00:00:00.478143327 +188 54 0 days 00:00:00.467867466 +188 55 0 days 00:00:00.267651933 +188 56 0 days 00:00:00.308843240 +188 57 0 days 00:00:00.272902208 +188 58 0 days 00:00:00.285581080 +188 59 0 days 00:00:00.813132176 +188 60 0 days 00:00:00.796465520 +188 61 0 days 00:00:00.290290164 +188 62 0 days 00:00:00.575954874 +188 63 0 days 00:00:00.270995812 +188 64 0 days 00:00:00.858705813 +188 65 0 days 00:00:00.276472735 +188 66 0 days 00:00:00.350552126 +188 67 0 days 00:00:00.821882040 +188 68 0 days 00:00:00.441622396 +188 69 0 days 00:00:00.830831950 +188 70 0 days 00:00:00.872624240 +188 71 0 days 00:00:00.431768535 +188 72 0 days 00:00:00.442988344 +188 73 0 days 00:00:00.281218980 +188 74 0 days 00:00:00.270079045 +188 75 0 days 00:00:00.484015742 +188 76 0 days 00:00:00.475393060 +188 77 0 days 00:00:00.244990045 +188 78 0 days 00:00:00.412155620 +188 79 0 days 00:00:00.411029085 +188 80 0 days 00:00:00.283499524 +188 81 0 days 00:00:00.452125625 +188 82 0 days 00:00:00.261477240 +188 83 0 days 00:00:00.792309420 +188 84 0 days 00:00:00.289157174 +188 85 0 days 00:00:00.418908344 +188 86 0 days 00:00:00.479015197 +188 87 0 days 00:00:00.367774005 +188 88 0 days 00:00:00.230752995 +188 89 0 days 00:00:00.786827376 +188 90 0 days 00:00:00.458298566 +188 91 0 days 00:00:00.789643608 +188 92 0 days 00:00:00.845401116 +188 93 0 days 00:00:00.423009725 +188 94 0 days 00:00:00.467867697 +188 95 0 days 00:00:00.271033305 +188 96 0 days 00:00:00.764077064 +188 97 0 days 00:00:00.453153225 +188 98 0 days 00:00:00.271505380 +188 99 0 days 00:00:00.788495104 +188 100 0 days 00:00:00.825163430 +189 1 0 days 00:00:00.256350088 +189 2 0 days 00:00:00.764035540 +189 3 0 days 00:00:00.818941305 +189 4 0 days 00:00:00.471607346 +189 5 0 days 00:00:00.195370506 +189 6 0 days 00:00:00.859622670 +189 7 0 days 00:00:00.856883260 +189 8 0 days 00:00:00.270812348 +189 9 0 days 00:00:00.294000405 +189 10 0 days 00:00:00.341395807 +189 11 0 days 00:00:00.273067295 +189 12 0 days 00:00:00.259412600 +189 13 0 days 00:00:00.497734174 +189 14 0 days 00:00:00.427949810 +189 15 0 days 00:00:00.461682270 +189 16 0 days 00:00:00.263362980 +189 17 0 days 00:00:00.775205765 +189 18 0 days 00:00:00.883772247 +189 19 0 days 00:00:00.869538300 +189 20 0 days 00:00:00.879674517 +189 21 0 days 00:00:00.840818164 +189 22 0 days 00:00:00.429206320 +189 23 0 days 00:00:00.447948728 +189 24 0 days 00:00:00.396612312 +189 25 0 days 00:00:00.211109816 +189 26 0 days 00:00:00.218669600 +189 27 0 days 00:00:00.226927042 +189 28 0 days 00:00:00.230153787 +189 29 0 days 00:00:00.622094264 +189 30 0 days 00:00:00.255480172 +189 31 0 days 00:00:00.574998775 +189 32 0 days 00:00:00.306278120 +189 33 0 days 00:00:00.208558740 +189 34 0 days 00:00:00.231052591 +189 35 0 days 00:00:00.645038520 +189 36 0 days 00:00:00.416726654 +189 37 0 days 00:00:00.233385882 +189 38 0 days 00:00:00.617828260 +189 39 0 days 00:00:00.571684040 +189 40 0 days 00:00:00.282374554 +189 41 0 days 00:00:00.430143558 +189 42 0 days 00:00:00.230013896 +189 43 0 days 00:00:00.222650632 +189 44 0 days 00:00:00.361820794 +189 45 0 days 00:00:00.360807293 +189 46 0 days 00:00:00.273862233 +189 47 0 days 00:00:00.328457010 +189 48 0 days 00:00:00.702444974 +189 49 0 days 00:00:00.329987028 +189 50 0 days 00:00:00.472646756 +189 51 0 days 00:00:00.325048620 +189 52 0 days 00:00:00.234569213 +189 53 0 days 00:00:00.213142765 +189 54 0 days 00:00:00.352373140 +189 55 0 days 00:00:00.344530416 +189 56 0 days 00:00:00.228954428 +189 57 0 days 00:00:00.336537184 +189 58 0 days 00:00:00.485188540 +189 59 0 days 00:00:00.635941446 +189 60 0 days 00:00:00.294866806 +189 61 0 days 00:00:00.210514104 +189 62 0 days 00:00:00.242906750 +189 63 0 days 00:00:00.225648164 +189 64 0 days 00:00:00.212826343 +189 65 0 days 00:00:00.669864390 +189 66 0 days 00:00:00.400598393 +189 67 0 days 00:00:00.341156252 +189 68 0 days 00:00:00.639527804 +189 69 0 days 00:00:00.229457165 +189 70 0 days 00:00:00.336003588 +189 71 0 days 00:00:00.549813030 +189 72 0 days 00:00:00.636226704 +189 73 0 days 00:00:00.313942492 +189 74 0 days 00:00:00.643683380 +189 75 0 days 00:00:00.639122900 +189 76 0 days 00:00:00.304006980 +189 77 0 days 00:00:00.358080760 +189 78 0 days 00:00:00.360583492 +189 79 0 days 00:00:00.356536175 +189 80 0 days 00:00:00.361912157 +189 81 0 days 00:00:00.691389297 +189 82 0 days 00:00:00.684927668 +189 83 0 days 00:00:00.328477504 +189 84 0 days 00:00:00.599709250 +189 85 0 days 00:00:00.601059255 +189 86 0 days 00:00:00.362411557 +189 87 0 days 00:00:00.324941684 +189 88 0 days 00:00:00.451211760 +189 89 0 days 00:00:00.370062545 +189 90 0 days 00:00:00.666862146 +189 91 0 days 00:00:00.239922783 +189 92 0 days 00:00:00.310598428 +189 93 0 days 00:00:00.347553780 +189 94 0 days 00:00:00.651053684 +189 95 0 days 00:00:00.174219250 +189 96 0 days 00:00:00.258222331 +189 97 0 days 00:00:00.694013055 +189 98 0 days 00:00:00.647449572 +189 99 0 days 00:00:00.604776625 +189 100 0 days 00:00:00.377775131 +190 1 0 days 00:00:00.128601727 +190 2 0 days 00:00:00.182170405 +190 3 0 days 00:00:00.321126073 +190 4 0 days 00:00:00.300623315 +190 5 0 days 00:00:00.165937780 +190 6 0 days 00:00:00.092154848 +190 7 0 days 00:00:00.352720258 +190 8 0 days 00:00:00.331887420 +190 9 0 days 00:00:00.161576255 +190 10 0 days 00:00:00.320025823 +190 11 0 days 00:00:00.173027320 +190 12 0 days 00:00:00.351392348 +190 13 0 days 00:00:00.192224551 +190 14 0 days 00:00:00.101676463 +190 15 0 days 00:00:00.129856792 +190 16 0 days 00:00:00.171491988 +190 17 0 days 00:00:00.338978455 +190 18 0 days 00:00:00.286590495 +190 19 0 days 00:00:00.322566020 +190 20 0 days 00:00:00.200961922 +190 21 0 days 00:00:00.191841112 +190 22 0 days 00:00:00.155115180 +190 23 0 days 00:00:00.285933550 +190 24 0 days 00:00:00.202498506 +190 25 0 days 00:00:00.167431651 +190 26 0 days 00:00:00.175573840 +190 27 0 days 00:00:00.185929552 +190 28 0 days 00:00:00.165589076 +190 29 0 days 00:00:00.191935940 +190 30 0 days 00:00:00.165339020 +190 31 0 days 00:00:00.240475371 +190 32 0 days 00:00:00.298300668 +190 33 0 days 00:00:00.307621752 +190 34 0 days 00:00:00.189789634 +190 35 0 days 00:00:00.160791255 +190 36 0 days 00:00:00.326861642 +190 37 0 days 00:00:00.126781712 +190 38 0 days 00:00:00.304371146 +190 39 0 days 00:00:00.083545557 +190 40 0 days 00:00:00.121762044 +190 41 0 days 00:00:00.109617610 +190 42 0 days 00:00:00.221058012 +190 43 0 days 00:00:00.114216718 +190 44 0 days 00:00:00.303007192 +190 45 0 days 00:00:00.305925032 +190 46 0 days 00:00:00.340032262 +190 47 0 days 00:00:00.163057670 +190 48 0 days 00:00:00.112222570 +190 49 0 days 00:00:00.109109565 +190 50 0 days 00:00:00.294780555 +190 51 0 days 00:00:00.325680857 +190 52 0 days 00:00:00.156976836 +190 53 0 days 00:00:00.120188888 +190 54 0 days 00:00:00.193880868 +190 55 0 days 00:00:00.196911356 +190 56 0 days 00:00:00.289390560 +190 57 0 days 00:00:00.168933945 +190 58 0 days 00:00:00.180134924 +190 59 0 days 00:00:00.330248120 +190 60 0 days 00:00:00.173928652 +190 61 0 days 00:00:00.311580552 +190 62 0 days 00:00:00.113111085 +190 63 0 days 00:00:00.184179312 +190 64 0 days 00:00:00.330410235 +190 65 0 days 00:00:00.110782005 +190 66 0 days 00:00:00.162373252 +190 67 0 days 00:00:00.311993350 +190 68 0 days 00:00:00.116083912 +190 69 0 days 00:00:00.187735031 +190 70 0 days 00:00:00.351310998 +190 71 0 days 00:00:00.160486845 +190 72 0 days 00:00:00.186292912 +190 73 0 days 00:00:00.227907512 +190 74 0 days 00:00:00.350616107 +190 75 0 days 00:00:00.210071193 +190 76 0 days 00:00:00.123579150 +190 77 0 days 00:00:00.129972205 +190 78 0 days 00:00:00.295393150 +190 79 0 days 00:00:00.131618743 +190 80 0 days 00:00:00.350128406 +190 81 0 days 00:00:00.135252081 +190 82 0 days 00:00:00.349529162 +190 83 0 days 00:00:00.125769780 +190 84 0 days 00:00:00.127929497 +190 85 0 days 00:00:00.172421676 +190 86 0 days 00:00:00.106392420 +190 87 0 days 00:00:00.144807222 +190 88 0 days 00:00:00.102931313 +190 89 0 days 00:00:00.137338177 +190 90 0 days 00:00:00.105177423 +190 91 0 days 00:00:00.131832045 +190 92 0 days 00:00:00.111897385 +190 93 0 days 00:00:00.355998482 +190 94 0 days 00:00:00.127929957 +190 95 0 days 00:00:00.202224931 +190 96 0 days 00:00:00.138077680 +190 97 0 days 00:00:00.144502026 +190 98 0 days 00:00:00.118254442 +190 99 0 days 00:00:00.306380168 +190 100 0 days 00:00:00.166258264 +191 1 0 days 00:00:00.364224670 +191 2 0 days 00:00:00.360105914 +191 3 0 days 00:00:00.360896922 +191 4 0 days 00:00:00.378777849 +191 5 0 days 00:00:00.132944716 +191 6 0 days 00:00:00.117067029 +191 7 0 days 00:00:00.140972100 +191 8 0 days 00:00:00.134709000 +191 9 0 days 00:00:00.261998103 +191 10 0 days 00:00:00.130017555 +191 11 0 days 00:00:00.124488938 +191 12 0 days 00:00:00.137701990 +191 13 0 days 00:00:00.131160463 +191 14 0 days 00:00:00.348646491 +191 15 0 days 00:00:00.205513900 +191 16 0 days 00:00:00.350957140 +191 17 0 days 00:00:00.127074450 +191 18 0 days 00:00:00.200575700 +191 19 0 days 00:00:00.114812613 +191 20 0 days 00:00:00.362931028 +191 21 0 days 00:00:00.134029100 +191 22 0 days 00:00:00.315424570 +191 23 0 days 00:00:00.196601906 +191 24 0 days 00:00:00.203172697 +191 25 0 days 00:00:00.121456162 +191 26 0 days 00:00:00.182953584 +191 27 0 days 00:00:00.133326940 +191 28 0 days 00:00:00.106171303 +191 29 0 days 00:00:00.190240308 +191 30 0 days 00:00:00.173308420 +191 31 0 days 00:00:00.223957493 +191 32 0 days 00:00:00.135440654 +191 33 0 days 00:00:00.125803008 +191 34 0 days 00:00:00.141804786 +191 35 0 days 00:00:00.178631636 +191 36 0 days 00:00:00.371925062 +191 37 0 days 00:00:00.167460308 +191 38 0 days 00:00:00.214069807 +191 39 0 days 00:00:00.177127965 +191 40 0 days 00:00:00.212074449 +191 41 0 days 00:00:00.194287042 +191 42 0 days 00:00:00.345004805 +191 43 0 days 00:00:00.315145164 +191 44 0 days 00:00:00.175563766 +191 45 0 days 00:00:00.351596632 +191 46 0 days 00:00:00.316101444 +191 47 0 days 00:00:00.194290932 +191 48 0 days 00:00:00.189590277 +191 49 0 days 00:00:00.167250800 +191 50 0 days 00:00:00.182057756 +191 51 0 days 00:00:00.134741381 +191 52 0 days 00:00:00.338778660 +191 53 0 days 00:00:00.292829670 +191 54 0 days 00:00:00.214752104 +191 55 0 days 00:00:00.324892920 +191 56 0 days 00:00:00.288774375 +191 57 0 days 00:00:00.299909936 +191 58 0 days 00:00:00.309451115 +191 59 0 days 00:00:00.181993986 +191 60 0 days 00:00:00.194688625 +191 61 0 days 00:00:00.116385350 +191 62 0 days 00:00:00.113692369 +191 63 0 days 00:00:00.180873936 +191 64 0 days 00:00:00.133663057 +191 65 0 days 00:00:00.112335337 +191 66 0 days 00:00:00.192014852 +191 67 0 days 00:00:00.312385526 +191 68 0 days 00:00:00.122467334 +191 69 0 days 00:00:00.182044943 +191 70 0 days 00:00:00.175351655 +191 71 0 days 00:00:00.325938496 +191 72 0 days 00:00:00.286511672 +191 73 0 days 00:00:00.348912970 +191 74 0 days 00:00:00.188608471 +191 75 0 days 00:00:00.124479780 +191 76 0 days 00:00:00.119391817 +191 77 0 days 00:00:00.188319302 +191 78 0 days 00:00:00.180963008 +191 79 0 days 00:00:00.126299226 +191 80 0 days 00:00:00.331292147 +191 81 0 days 00:00:00.334119432 +191 82 0 days 00:00:00.184599514 +191 83 0 days 00:00:00.323179837 +191 84 0 days 00:00:00.311550572 +191 85 0 days 00:00:00.124919652 +191 86 0 days 00:00:00.161257908 +191 87 0 days 00:00:00.188868889 +191 88 0 days 00:00:00.119753266 +191 89 0 days 00:00:00.133584562 +191 90 0 days 00:00:00.171121185 +191 91 0 days 00:00:00.184169211 +191 92 0 days 00:00:00.114452574 +191 93 0 days 00:00:00.310909210 +191 94 0 days 00:00:00.257869590 +191 95 0 days 00:00:00.291930595 +191 96 0 days 00:00:00.109621436 +191 97 0 days 00:00:00.113216967 +191 98 0 days 00:00:00.321487988 +191 99 0 days 00:00:00.324455602 +191 100 0 days 00:00:00.170408070 +192 1 0 days 00:00:00.220318320 +192 2 0 days 00:00:00.797589500 +192 3 0 days 00:00:00.249375710 +192 4 0 days 00:00:00.879955080 +192 5 0 days 00:00:00.304959425 +192 6 0 days 00:00:00.308708905 +192 7 0 days 00:00:00.866498580 +192 8 0 days 00:00:00.790636143 +192 9 0 days 00:00:00.510105342 +192 10 0 days 00:00:00.341516293 +192 11 0 days 00:00:00.969335620 +192 12 0 days 00:00:00.320050982 +192 13 0 days 00:00:00.794200010 +192 14 0 days 00:00:00.271033306 +192 15 0 days 00:00:00.701134035 +192 16 0 days 00:00:00.297351340 +192 17 0 days 00:00:00.887066740 +192 18 0 days 00:00:00.549573316 +192 19 0 days 00:00:00.422647356 +192 20 0 days 00:00:00.326895190 +192 21 0 days 00:00:00.472751290 +192 22 0 days 00:00:00.401522085 +192 23 0 days 00:00:00.823965796 +192 24 0 days 00:00:00.805650910 +192 25 0 days 00:00:00.861125932 +192 26 0 days 00:00:00.455787806 +192 27 0 days 00:00:00.716263510 +192 28 0 days 00:00:00.855935012 +192 29 0 days 00:00:00.262568125 +192 30 0 days 00:00:00.462132855 +192 31 0 days 00:00:00.913320820 +192 32 0 days 00:00:00.524976392 +192 33 0 days 00:00:00.404402370 +192 34 0 days 00:00:00.500221190 +192 35 0 days 00:00:00.865714456 +192 36 0 days 00:00:00.243504655 +192 37 0 days 00:00:00.742665780 +192 38 0 days 00:00:00.833073415 +192 39 0 days 00:00:00.438496945 +192 40 0 days 00:00:00.466289896 +192 41 0 days 00:00:00.309534896 +192 42 0 days 00:00:00.313842597 +192 43 0 days 00:00:00.872732808 +192 44 0 days 00:00:00.485838833 +192 45 0 days 00:00:00.739028740 +192 46 0 days 00:00:00.248495856 +192 47 0 days 00:00:00.300491285 +192 48 0 days 00:00:00.321669228 +192 49 0 days 00:00:00.865416924 +192 50 0 days 00:00:00.258087493 +192 51 0 days 00:00:00.525179840 +192 52 0 days 00:00:00.851358348 +192 53 0 days 00:00:00.829229852 +192 54 0 days 00:00:00.746211560 +192 55 0 days 00:00:00.948467902 +192 56 0 days 00:00:00.367701790 +192 57 0 days 00:00:00.506085270 +192 58 0 days 00:00:00.242721420 +192 59 0 days 00:00:00.530391266 +192 60 0 days 00:00:00.979461548 +192 61 0 days 00:00:00.405160065 +192 62 0 days 00:00:00.239011665 +192 63 0 days 00:00:00.471160990 +192 64 0 days 00:00:00.417338660 +192 65 0 days 00:00:00.477160985 +192 66 0 days 00:00:00.261227713 +192 67 0 days 00:00:00.855892272 +192 68 0 days 00:00:00.516606996 +192 69 0 days 00:00:00.253451980 +192 70 0 days 00:00:00.515561593 +192 71 0 days 00:00:00.271411380 +192 72 0 days 00:00:00.771245655 +192 73 0 days 00:00:00.263091444 +192 74 0 days 00:00:00.255564684 +192 75 0 days 00:00:00.817100312 +192 76 0 days 00:00:00.284533330 +192 77 0 days 00:00:00.717156180 +192 78 0 days 00:00:00.867743532 +192 79 0 days 00:00:00.446748695 +192 80 0 days 00:00:00.461224860 +192 81 0 days 00:00:00.861374003 +192 82 0 days 00:00:00.250857400 +192 83 0 days 00:00:00.398751225 +192 84 0 days 00:00:00.248785915 +192 85 0 days 00:00:00.248713075 +192 86 0 days 00:00:00.802499896 +192 87 0 days 00:00:00.322541912 +192 88 0 days 00:00:00.289916920 +192 89 0 days 00:00:00.525263596 +192 90 0 days 00:00:00.283610146 +192 91 0 days 00:00:00.237971850 +192 92 0 days 00:00:00.495360534 +192 93 0 days 00:00:00.300199377 +192 94 0 days 00:00:00.269183284 +192 95 0 days 00:00:01.086158905 +192 97 0 days 00:00:00.423509470 +192 98 0 days 00:00:00.265411855 +192 99 0 days 00:00:00.278178323 +192 100 0 days 00:00:00.524299206 +193 1 0 days 00:00:00.172280888 +193 2 0 days 00:00:00.150265060 +193 3 0 days 00:00:00.407933280 +193 4 0 days 00:00:00.270929267 +193 5 0 days 00:00:00.455226108 +193 6 0 days 00:00:00.144369148 +193 7 0 days 00:00:00.252349757 +193 8 0 days 00:00:00.133037940 +193 9 0 days 00:00:00.237467263 +193 10 0 days 00:00:00.170353724 +193 11 0 days 00:00:00.236208708 +193 12 0 days 00:00:00.164099706 +193 13 0 days 00:00:00.140235442 +193 15 0 days 00:00:00.145889690 +193 16 0 days 00:00:00.112329933 +193 17 0 days 00:00:00.404402010 +193 18 0 days 00:00:00.208654206 +193 19 0 days 00:00:00.445120693 +193 20 0 days 00:00:00.208683955 +193 21 0 days 00:00:00.270954192 +193 22 0 days 00:00:00.137781560 +193 23 0 days 00:00:00.245296828 +193 24 0 days 00:00:00.394621865 +193 26 0 days 00:00:00.495228083 +193 27 0 days 00:00:00.433840405 +193 28 0 days 00:00:00.264267014 +193 29 0 days 00:00:00.138568496 +193 30 0 days 00:00:00.117165555 +193 31 0 days 00:00:00.270384711 +193 32 0 days 00:00:00.295634511 +193 33 0 days 00:00:00.246291887 +193 34 0 days 00:00:00.249750950 +193 35 0 days 00:00:00.275462310 +193 36 0 days 00:00:00.461713242 +193 37 0 days 00:00:00.459288160 +193 38 0 days 00:00:00.126390260 +193 39 0 days 00:00:00.146207723 +193 40 0 days 00:00:00.254586231 +193 41 0 days 00:00:00.135671473 +193 42 0 days 00:00:00.157925110 +193 43 0 days 00:00:00.488013015 +193 44 0 days 00:00:00.283103098 +193 45 0 days 00:00:00.171145156 +193 46 0 days 00:00:00.142497575 +193 47 0 days 00:00:00.249100228 +193 48 0 days 00:00:00.411972530 +193 49 0 days 00:00:00.445408272 +193 50 0 days 00:00:00.142424376 +193 51 0 days 00:00:00.405982008 +193 52 0 days 00:00:00.524548836 +193 53 0 days 00:00:00.261751637 +193 54 0 days 00:00:00.309160296 +193 55 0 days 00:00:00.154847885 +193 56 0 days 00:00:00.481362636 +193 57 0 days 00:00:00.194617526 +193 58 0 days 00:00:00.126666720 +193 59 0 days 00:00:00.261279993 +193 60 0 days 00:00:00.215035080 +193 61 0 days 00:00:00.419512856 +193 62 0 days 00:00:00.299085740 +193 63 0 days 00:00:00.489825668 +193 64 0 days 00:00:00.234544945 +193 65 0 days 00:00:00.235006700 +193 67 0 days 00:00:00.256423240 +193 68 0 days 00:00:00.236580520 +193 69 0 days 00:00:00.237212105 +193 70 0 days 00:00:00.374646575 +193 71 0 days 00:00:00.236864510 +193 72 0 days 00:00:00.251876912 +193 73 0 days 00:00:00.158394788 +193 74 0 days 00:00:00.157701488 +193 75 0 days 00:00:00.426557216 +193 76 0 days 00:00:00.227733160 +193 77 0 days 00:00:00.481818157 +193 78 0 days 00:00:00.182754900 +193 79 0 days 00:00:00.152321115 +193 80 0 days 00:00:00.252088688 +193 81 0 days 00:00:00.170733710 +193 82 0 days 00:00:00.279680197 +193 83 0 days 00:00:00.255274616 +193 84 0 days 00:00:00.154310570 +193 85 0 days 00:00:00.140197210 +193 86 0 days 00:00:00.469856882 +193 87 0 days 00:00:00.290416849 +193 88 0 days 00:00:00.251112172 +193 89 0 days 00:00:00.270265430 +193 90 0 days 00:00:00.126326448 +193 91 0 days 00:00:00.451248130 +193 92 0 days 00:00:00.227563187 +193 93 0 days 00:00:00.197027680 +193 94 0 days 00:00:00.403492045 +193 95 0 days 00:00:00.111649420 +193 96 0 days 00:00:00.216395542 +193 97 0 days 00:00:00.128469175 +193 98 0 days 00:00:00.193304631 +193 99 0 days 00:00:00.369581990 +193 100 0 days 00:00:00.351935113 +194 1 0 days 00:00:00.875081691 +194 2 0 days 00:00:00.758150930 +194 3 0 days 00:00:00.444260168 +194 4 0 days 00:00:00.490782491 +194 5 0 days 00:00:00.383111680 +194 6 0 days 00:00:00.766673088 +194 7 0 days 00:00:00.146448480 +194 8 0 days 00:00:00.464830932 +194 9 0 days 00:00:00.588297237 +194 10 0 days 00:00:00.853950037 +194 11 0 days 00:00:00.245309600 +194 12 0 days 00:00:00.481279271 +194 13 0 days 00:00:00.310832711 +194 14 0 days 00:00:00.734187530 +194 15 0 days 00:00:00.281727028 +194 16 0 days 00:00:00.278263986 +194 17 0 days 00:00:00.282579176 +194 18 0 days 00:00:00.367603013 +194 19 0 days 00:00:00.321469268 +194 20 0 days 00:00:00.819062948 +194 21 0 days 00:00:00.451978092 +194 22 0 days 00:00:00.772975960 +194 23 0 days 00:00:00.880700778 +194 24 0 days 00:00:00.470804631 +194 25 0 days 00:00:00.271933182 +194 26 0 days 00:00:00.489586255 +194 27 0 days 00:00:00.296687747 +194 28 0 days 00:00:00.372915293 +194 29 0 days 00:00:00.504386492 +194 30 0 days 00:00:00.861911740 +194 31 0 days 00:00:00.746531115 +194 32 0 days 00:00:00.392044120 +194 33 0 days 00:00:00.493409632 +194 34 0 days 00:00:00.471873098 +194 35 0 days 00:00:00.488889311 +194 36 0 days 00:00:00.651400353 +194 37 0 days 00:00:00.498442982 +194 38 0 days 00:00:00.471566112 +194 39 0 days 00:00:00.453468450 +194 40 0 days 00:00:00.510004975 +194 41 0 days 00:00:00.478133402 +194 42 0 days 00:00:00.488462760 +194 43 0 days 00:00:00.501767889 +194 44 0 days 00:00:00.397552560 +194 45 0 days 00:00:00.843391927 +194 46 0 days 00:00:00.440863272 +194 47 0 days 00:00:00.559864216 +194 48 0 days 00:00:00.268505926 +194 49 0 days 00:00:00.276143687 +194 50 0 days 00:00:00.488418002 +194 51 0 days 00:00:00.311229410 +194 52 0 days 00:00:00.495467738 +194 54 0 days 00:00:00.291989758 +194 55 0 days 00:00:00.479887838 +194 56 0 days 00:00:00.476183500 +194 57 0 days 00:00:00.846809536 +194 58 0 days 00:00:00.204233633 +194 59 0 days 00:00:00.802460816 +194 60 0 days 00:00:00.708065745 +194 61 0 days 00:00:00.456786330 +194 62 0 days 00:00:00.729480080 +194 63 0 days 00:00:00.483584361 +194 64 0 days 00:00:00.453301348 +194 65 0 days 00:00:00.778303232 +194 66 0 days 00:00:00.434971840 +194 67 0 days 00:00:00.402213585 +194 68 0 days 00:00:00.269831148 +194 69 0 days 00:00:00.477036475 +194 70 0 days 00:00:00.780890168 +194 71 0 days 00:00:00.830285445 +194 72 0 days 00:00:00.227888145 +194 73 0 days 00:00:00.233454110 +194 74 0 days 00:00:00.208014106 +194 75 0 days 00:00:00.781055290 +194 76 0 days 00:00:00.874367496 +194 77 0 days 00:00:00.473058951 +194 78 0 days 00:00:00.357721366 +194 79 0 days 00:00:00.602849613 +194 80 0 days 00:00:00.236402655 +194 81 0 days 00:00:00.327071918 +194 82 0 days 00:00:00.271724008 +194 83 0 days 00:00:00.793889025 +194 84 0 days 00:00:00.443589423 +194 85 0 days 00:00:00.497835578 +194 86 0 days 00:00:00.265476860 +194 87 0 days 00:00:00.594173827 +194 88 0 days 00:00:00.240682955 +194 89 0 days 00:00:00.251210366 +194 90 0 days 00:00:00.463883362 +194 91 0 days 00:00:00.492520880 +194 92 0 days 00:00:00.272438244 +194 93 0 days 00:00:00.242057545 +194 94 0 days 00:00:00.259818263 +194 95 0 days 00:00:00.487529597 +194 96 0 days 00:00:00.279807258 +194 97 0 days 00:00:00.462051016 +194 98 0 days 00:00:00.856432927 +194 99 0 days 00:00:00.324572905 +194 100 0 days 00:00:00.456872185 +195 1 0 days 00:00:00.128080370 +195 2 0 days 00:00:00.447191631 +195 3 0 days 00:00:00.132216455 +195 4 0 days 00:00:00.264623392 +195 5 0 days 00:00:00.459198980 +195 6 0 days 00:00:00.141408558 +195 7 0 days 00:00:00.121972393 +195 8 0 days 00:00:00.146783718 +195 9 0 days 00:00:00.435303671 +195 10 0 days 00:00:00.234349220 +195 11 0 days 00:00:00.338220506 +195 12 0 days 00:00:00.214589900 +195 13 0 days 00:00:00.426296400 +195 14 0 days 00:00:00.456616676 +195 15 0 days 00:00:00.268203160 +195 16 0 days 00:00:00.459037056 +195 17 0 days 00:00:00.441253960 +195 18 0 days 00:00:00.111897633 +195 19 0 days 00:00:00.452571676 +195 20 0 days 00:00:00.146770461 +195 21 0 days 00:00:00.459819050 +195 22 0 days 00:00:00.118496360 +195 23 0 days 00:00:00.410196315 +195 24 0 days 00:00:00.393110230 +195 25 0 days 00:00:00.145390520 +195 26 0 days 00:00:00.378618425 +195 27 0 days 00:00:00.436266260 +195 28 0 days 00:00:00.232074788 +195 29 0 days 00:00:00.467165628 +195 30 0 days 00:00:00.492573926 +195 31 0 days 00:00:00.168976562 +195 32 0 days 00:00:00.125819535 +195 33 0 days 00:00:00.163427797 +195 34 0 days 00:00:00.199811473 +195 35 0 days 00:00:00.113824986 +195 36 0 days 00:00:00.464896527 +195 37 0 days 00:00:00.470908833 +195 38 0 days 00:00:00.248109640 +195 39 0 days 00:00:00.299274111 +195 40 0 days 00:00:00.154933451 +195 41 0 days 00:00:00.232771464 +195 42 0 days 00:00:00.248272640 +195 43 0 days 00:00:00.141437680 +195 44 0 days 00:00:00.228294460 +195 45 0 days 00:00:00.150790697 +195 46 0 days 00:00:00.465249282 +195 47 0 days 00:00:00.140947752 +195 48 0 days 00:00:00.406329144 +195 49 0 days 00:00:00.124413080 +195 50 0 days 00:00:00.111074780 +195 51 0 days 00:00:00.267192852 +195 52 0 days 00:00:00.172437722 +195 53 0 days 00:00:00.392140080 +195 54 0 days 00:00:00.408785312 +195 55 0 days 00:00:00.120244833 +195 56 0 days 00:00:00.248060780 +195 57 0 days 00:00:00.271720916 +195 58 0 days 00:00:00.134792288 +195 59 0 days 00:00:00.247915915 +195 60 0 days 00:00:00.217735083 +195 61 0 days 00:00:00.433059168 +195 62 0 days 00:00:00.252543358 +195 63 0 days 00:00:00.243841006 +195 64 0 days 00:00:00.125209220 +195 65 0 days 00:00:00.470200320 +195 66 0 days 00:00:00.127822390 +195 67 0 days 00:00:00.263942698 +195 68 0 days 00:00:00.254166576 +195 69 0 days 00:00:00.421882216 +195 70 0 days 00:00:00.187256006 +195 71 0 days 00:00:00.222211685 +195 72 0 days 00:00:00.242598220 +195 73 0 days 00:00:00.243021215 +195 74 0 days 00:00:00.122440045 +195 75 0 days 00:00:00.222391456 +195 76 0 days 00:00:00.441529890 +195 77 0 days 00:00:00.263478435 +195 78 0 days 00:00:00.127826700 +195 79 0 days 00:00:00.422848900 +195 80 0 days 00:00:00.235491946 +195 81 0 days 00:00:00.373030720 +195 82 0 days 00:00:00.147488032 +195 83 0 days 00:00:00.457799468 +195 84 0 days 00:00:00.147504352 +195 85 0 days 00:00:00.448211910 +195 86 0 days 00:00:00.247238077 +195 87 0 days 00:00:00.441051983 +195 88 0 days 00:00:00.258165405 +195 89 0 days 00:00:00.136046906 +195 90 0 days 00:00:00.148489340 +195 91 0 days 00:00:00.109088220 +195 92 0 days 00:00:00.454871753 +195 93 0 days 00:00:00.273704648 +195 94 0 days 00:00:00.259971350 +195 95 0 days 00:00:00.255890548 +195 96 0 days 00:00:00.120860010 +195 97 0 days 00:00:00.253522572 +195 98 0 days 00:00:00.147213664 +195 99 0 days 00:00:00.143833702 +195 100 0 days 00:00:00.408953410 +196 1 0 days 00:00:09.405700565 +196 2 0 days 00:00:03.195323768 +196 3 0 days 00:00:03.066680445 +196 4 0 days 00:00:05.644920260 +196 5 0 days 00:00:03.451009460 +196 6 0 days 00:00:06.814884433 +196 7 0 days 00:00:08.046623623 +196 8 0 days 00:00:02.892287472 +196 9 0 days 00:00:03.075261180 +196 10 0 days 00:00:05.934583437 +196 11 0 days 00:00:06.581465733 +196 12 0 days 00:00:03.688464025 +196 13 0 days 00:00:05.883800516 +196 14 0 days 00:00:08.699408732 +196 15 0 days 00:00:04.673595003 +196 16 0 days 00:00:04.264937700 +196 17 0 days 00:00:03.026672015 +196 18 0 days 00:00:04.430180650 +196 19 0 days 00:00:08.849481448 +196 20 0 days 00:00:03.071948983 +196 21 0 days 00:00:04.826679440 +196 22 0 days 00:00:03.213524746 +196 23 0 days 00:00:05.107444256 +196 24 0 days 00:00:05.316605717 +196 25 0 days 00:00:03.013167626 +196 26 0 days 00:00:08.083685888 +196 27 0 days 00:00:04.433419120 +196 28 0 days 00:00:04.280473815 +196 29 0 days 00:00:08.308106485 +196 30 0 days 00:00:03.308402580 +196 31 0 days 00:00:08.169831735 +196 32 0 days 00:00:03.492654565 +196 33 0 days 00:00:04.820124860 +196 34 0 days 00:00:04.296693140 +196 35 0 days 00:00:03.251181933 +196 36 0 days 00:00:04.828531086 +196 37 0 days 00:00:04.420538155 +196 38 0 days 00:00:04.732985093 +196 39 0 days 00:00:02.733920525 +196 40 0 days 00:00:02.618112460 +196 41 0 days 00:00:04.795013000 +196 42 0 days 00:00:02.811254045 +196 43 0 days 00:00:06.524087246 +196 44 0 days 00:00:03.363234230 +196 45 0 days 00:00:09.333359125 +196 46 0 days 00:00:04.696520980 +196 47 0 days 00:00:03.511008310 +196 48 0 days 00:00:02.832056095 +196 49 0 days 00:00:02.954755670 +196 50 0 days 00:00:02.861818645 +196 51 0 days 00:00:02.580536700 +196 52 0 days 00:00:04.440745955 +196 53 0 days 00:00:03.319131323 +196 54 0 days 00:00:03.123208656 +196 55 0 days 00:00:03.502307680 +196 56 0 days 00:00:03.199189275 +196 57 0 days 00:00:02.698153875 +196 58 0 days 00:00:08.873565236 +196 59 0 days 00:00:07.281300455 +196 60 0 days 00:00:06.084430573 +196 61 0 days 00:00:05.313486006 +196 62 0 days 00:00:08.679008780 +196 63 0 days 00:00:03.504955570 +196 64 0 days 00:00:09.152318405 +196 65 0 days 00:00:07.410666805 +196 66 0 days 00:00:03.116160335 +196 67 0 days 00:00:04.824402945 +196 68 0 days 00:00:04.716862210 +196 69 0 days 00:00:02.868159110 +196 70 0 days 00:00:03.062248855 +196 71 0 days 00:00:08.214585566 +196 72 0 days 00:00:03.183125185 +196 73 0 days 00:00:03.688578315 +196 74 0 days 00:00:05.315051085 +196 75 0 days 00:00:03.147239032 +196 76 0 days 00:00:04.319730435 +196 77 0 days 00:00:08.620721256 +196 78 0 days 00:00:04.990878792 +197 1 0 days 00:00:09.030982283 +197 2 0 days 00:00:07.953111270 +197 3 0 days 00:00:08.089919593 +197 4 0 days 00:00:03.321132692 +197 5 0 days 00:00:04.988090480 +197 6 0 days 00:00:04.690242225 +197 7 0 days 00:00:03.065958528 +197 8 0 days 00:00:05.093669540 +197 9 0 days 00:00:03.567117088 +197 10 0 days 00:00:05.123024895 +197 11 0 days 00:00:09.692471745 +197 12 0 days 00:00:08.820192540 +197 13 0 days 00:00:07.107821220 +197 14 0 days 00:00:05.566328355 +197 15 0 days 00:00:05.827474815 +197 16 0 days 00:00:05.030265555 +197 17 0 days 00:00:02.907678380 +197 18 0 days 00:00:05.636713780 +197 19 0 days 00:00:05.170316516 +197 20 0 days 00:00:03.000029440 +197 21 0 days 00:00:04.589869640 +197 22 0 days 00:00:06.683869213 +197 23 0 days 00:00:04.607523240 +197 24 0 days 00:00:03.118935515 +197 25 0 days 00:00:08.792929404 +197 26 0 days 00:00:05.626297393 +197 27 0 days 00:00:08.261540250 +197 28 0 days 00:00:03.582513408 +197 29 0 days 00:00:07.413312106 +197 30 0 days 00:00:03.377834640 +197 31 0 days 00:00:05.163491780 +197 32 0 days 00:00:03.611756495 +197 33 0 days 00:00:09.953622180 +197 34 0 days 00:00:05.000038955 +197 35 0 days 00:00:04.781124595 +197 36 0 days 00:00:08.141995340 +197 37 0 days 00:00:05.505886600 +197 38 0 days 00:00:08.048829715 +197 39 0 days 00:00:02.873784740 +197 40 0 days 00:00:02.999348105 +197 41 0 days 00:00:05.566550910 +197 42 0 days 00:00:08.274661335 +197 43 0 days 00:00:04.640299705 +197 44 0 days 00:00:04.639409470 +197 45 0 days 00:00:09.712684895 +197 46 0 days 00:00:03.624050215 +197 47 0 days 00:00:09.811652160 +197 48 0 days 00:00:06.139139564 +197 49 0 days 00:00:08.773720580 +197 50 0 days 00:00:04.647005753 +197 51 0 days 00:00:09.743484816 +197 52 0 days 00:00:03.303088970 +197 53 0 days 00:00:10.319272564 +197 54 0 days 00:00:08.958285290 +197 55 0 days 00:00:03.150728683 +197 56 0 days 00:00:08.778076570 +197 57 0 days 00:00:04.144535660 +197 58 0 days 00:00:05.164682535 +197 59 0 days 00:00:06.473963580 +197 60 0 days 00:00:05.630485250 +197 61 0 days 00:00:08.290270580 +197 62 0 days 00:00:03.866849205 +197 63 0 days 00:00:07.989404936 +197 64 0 days 00:00:08.023970240 +197 65 0 days 00:00:05.027708120 +197 66 0 days 00:00:07.211732286 +197 67 0 days 00:00:10.459130976 +198 1 0 days 00:00:01.631414935 +198 2 0 days 00:00:02.489984680 +198 3 0 days 00:00:01.462916765 +198 4 0 days 00:00:02.046791206 +198 5 0 days 00:00:02.782598645 +198 6 0 days 00:00:02.305075156 +198 7 0 days 00:00:02.730014375 +198 8 0 days 00:00:03.039819626 +198 9 0 days 00:00:03.727452245 +198 10 0 days 00:00:02.486677090 +198 11 0 days 00:00:04.240572413 +198 12 0 days 00:00:02.366877340 +198 13 0 days 00:00:01.502514420 +198 14 0 days 00:00:02.597210708 +198 15 0 days 00:00:03.798632213 +198 16 0 days 00:00:02.232845760 +198 17 0 days 00:00:02.246076646 +198 18 0 days 00:00:02.168848565 +198 19 0 days 00:00:04.712466037 +198 20 0 days 00:00:03.456532593 +198 21 0 days 00:00:02.794523185 +198 22 0 days 00:00:01.602235028 +198 23 0 days 00:00:01.509118353 +198 24 0 days 00:00:03.842895773 +198 25 0 days 00:00:02.368683655 +198 26 0 days 00:00:03.839186155 +198 27 0 days 00:00:03.782332446 +198 28 0 days 00:00:02.598060546 +198 29 0 days 00:00:01.720345960 +198 30 0 days 00:00:02.602583153 +198 31 0 days 00:00:02.732189050 +198 32 0 days 00:00:01.663128120 +198 33 0 days 00:00:03.675202586 +198 34 0 days 00:00:02.733376490 +198 35 0 days 00:00:01.574438488 +198 36 0 days 00:00:02.681468710 +198 37 0 days 00:00:04.219306800 +198 38 0 days 00:00:03.481174480 +198 39 0 days 00:00:03.104578777 +198 40 0 days 00:00:01.588461350 +198 41 0 days 00:00:04.225064810 +198 42 0 days 00:00:02.714734577 +198 43 0 days 00:00:01.447661575 +198 44 0 days 00:00:02.071551446 +198 45 0 days 00:00:01.766298112 +198 46 0 days 00:00:02.486367613 +198 47 0 days 00:00:04.166769666 +198 48 0 days 00:00:03.692391730 +198 49 0 days 00:00:02.686956753 +198 50 0 days 00:00:04.212125085 +198 51 0 days 00:00:04.115762070 +198 52 0 days 00:00:04.594820710 +198 53 0 days 00:00:03.296325520 +198 54 0 days 00:00:01.470105170 +198 55 0 days 00:00:04.739971062 +198 56 0 days 00:00:01.874083366 +198 57 0 days 00:00:02.842781890 +198 58 0 days 00:00:02.715647266 +198 59 0 days 00:00:04.113242090 +198 60 0 days 00:00:02.520932824 +198 61 0 days 00:00:03.349791626 +198 62 0 days 00:00:04.645994180 +198 63 0 days 00:00:02.470867995 +198 64 0 days 00:00:04.043840108 +198 65 0 days 00:00:02.460565940 +198 66 0 days 00:00:02.247501340 +198 67 0 days 00:00:03.782768380 +198 68 0 days 00:00:01.892818208 +198 69 0 days 00:00:03.699394145 +198 70 0 days 00:00:02.378437450 +198 71 0 days 00:00:04.650483570 +198 72 0 days 00:00:01.712034844 +198 73 0 days 00:00:03.259749140 +198 74 0 days 00:00:02.149421200 +198 75 0 days 00:00:03.730664995 +198 76 0 days 00:00:04.213100326 +198 77 0 days 00:00:03.660425920 +198 78 0 days 00:00:03.805987920 +198 79 0 days 00:00:01.456008810 +198 80 0 days 00:00:04.180599245 +198 81 0 days 00:00:01.673407270 +198 82 0 days 00:00:02.589665926 +198 83 0 days 00:00:02.504252760 +198 84 0 days 00:00:02.251011425 +198 85 0 days 00:00:02.379579000 +198 86 0 days 00:00:01.582004268 +198 87 0 days 00:00:01.728736816 +198 88 0 days 00:00:02.270152530 +198 89 0 days 00:00:02.783549030 +198 90 0 days 00:00:01.589978120 +198 91 0 days 00:00:01.638791470 +198 92 0 days 00:00:05.224790974 +198 93 0 days 00:00:02.751154215 +198 94 0 days 00:00:04.466344072 +198 95 0 days 00:00:04.468975008 +198 96 0 days 00:00:04.088702935 +198 97 0 days 00:00:02.250960370 +198 98 0 days 00:00:01.605311210 +198 99 0 days 00:00:02.597001936 +198 100 0 days 00:00:02.477933020 +199 1 0 days 00:00:02.647825185 +199 2 0 days 00:00:02.897298240 +199 3 0 days 00:00:04.595227745 +199 4 0 days 00:00:01.558309880 +199 5 0 days 00:00:04.252982032 +199 6 0 days 00:00:01.724609565 +199 7 0 days 00:00:04.723639956 +199 8 0 days 00:00:02.658677950 +199 9 0 days 00:00:03.726015353 +199 10 0 days 00:00:01.467928870 +199 11 0 days 00:00:04.902094860 +199 12 0 days 00:00:02.546304280 +199 13 0 days 00:00:04.471461333 +199 14 0 days 00:00:01.576882255 +199 15 0 days 00:00:05.424399063 +199 16 0 days 00:00:01.675274520 +199 17 0 days 00:00:02.656765000 +199 18 0 days 00:00:01.734146835 +199 19 0 days 00:00:01.843192316 +199 20 0 days 00:00:04.517828875 +199 21 0 days 00:00:01.829571572 +199 22 0 days 00:00:02.348474195 +199 23 0 days 00:00:04.420198325 +199 24 0 days 00:00:02.124590828 +199 25 0 days 00:00:02.410940726 +199 26 0 days 00:00:04.589843005 +199 27 0 days 00:00:02.341397980 +199 28 0 days 00:00:02.787944676 +199 29 0 days 00:00:01.691364184 +199 30 0 days 00:00:05.158460651 +199 31 0 days 00:00:01.765744690 +199 32 0 days 00:00:02.851553264 +199 33 0 days 00:00:02.643717230 +199 34 0 days 00:00:04.255611325 +199 35 0 days 00:00:01.684674840 +199 36 0 days 00:00:04.962047530 +199 37 0 days 00:00:01.702522172 +199 38 0 days 00:00:02.221659540 +199 39 0 days 00:00:01.472984955 +199 40 0 days 00:00:02.550462530 +199 41 0 days 00:00:02.801548100 +199 42 0 days 00:00:04.965741320 +199 43 0 days 00:00:04.144002313 +199 44 0 days 00:00:01.584532235 +199 45 0 days 00:00:02.641758950 +199 46 0 days 00:00:01.908284705 +199 47 0 days 00:00:02.541959365 +199 48 0 days 00:00:02.218962580 +199 49 0 days 00:00:04.584842845 +199 50 0 days 00:00:01.547324290 +199 51 0 days 00:00:04.437057925 +199 52 0 days 00:00:04.832380736 +199 53 0 days 00:00:04.946382796 +199 54 0 days 00:00:02.916586840 +199 55 0 days 00:00:04.162831765 +199 56 0 days 00:00:03.039796380 +199 57 0 days 00:00:02.446856190 +199 58 0 days 00:00:01.480994490 +199 59 0 days 00:00:01.886479190 +199 60 0 days 00:00:04.183202080 +199 61 0 days 00:00:03.607730400 +199 62 0 days 00:00:02.475950793 +199 63 0 days 00:00:03.987431900 +199 64 0 days 00:00:03.787444640 +199 65 0 days 00:00:03.029162176 +199 66 0 days 00:00:03.783980545 +199 67 0 days 00:00:02.247871113 +199 68 0 days 00:00:02.638377440 +199 69 0 days 00:00:04.571440180 +199 70 0 days 00:00:04.580026490 +199 71 0 days 00:00:01.682262300 +199 72 0 days 00:00:03.292933020 +199 73 0 days 00:00:04.464808130 +199 74 0 days 00:00:04.320087176 +199 75 0 days 00:00:01.487881615 +199 76 0 days 00:00:01.916463200 +199 77 0 days 00:00:01.476684250 +199 78 0 days 00:00:05.043118325 +199 79 0 days 00:00:04.193292405 +199 80 0 days 00:00:02.126511740 +199 81 0 days 00:00:01.598447070 +199 82 0 days 00:00:02.400520700 +199 83 0 days 00:00:02.887431296 +199 84 0 days 00:00:02.551427620 +199 85 0 days 00:00:01.768407745 +199 86 0 days 00:00:04.496263713 +199 87 0 days 00:00:02.102775780 +199 88 0 days 00:00:04.186068320 +199 89 0 days 00:00:01.686824330 +199 90 0 days 00:00:01.463934306 +199 91 0 days 00:00:01.795469232 +199 92 0 days 00:00:02.339115715 +199 93 0 days 00:00:02.694651525 +199 94 0 days 00:00:02.487867652 +199 95 0 days 00:00:02.023541596 +199 96 0 days 00:00:02.555987600 +199 97 0 days 00:00:01.832551948 +199 98 0 days 00:00:02.302133140 +199 99 0 days 00:00:02.757724280 +199 100 0 days 00:00:05.275266607 +200 1 0 days 00:00:44.360171100 +200 2 0 days 00:00:44.230577312 +200 3 0 days 00:00:44.173909340 +200 4 0 days 00:00:18.139330444 +200 5 0 days 00:00:44.198058420 +200 6 0 days 00:00:41.576440050 +200 7 0 days 00:00:19.213397474 +200 8 0 days 00:00:26.710291828 +200 9 0 days 00:00:41.142984590 +200 10 0 days 00:00:24.813384080 +200 11 0 days 00:00:41.149347430 +201 1 0 days 00:00:10.497102335 +201 2 0 days 00:00:16.449730300 +201 3 0 days 00:00:10.352496920 +201 4 0 days 00:00:27.217493604 +201 5 0 days 00:00:25.175685090 +201 6 0 days 00:00:15.472529320 +201 7 0 days 00:00:28.289421566 +201 8 0 days 00:00:15.202501520 +201 9 0 days 00:00:10.696444125 +201 10 0 days 00:00:16.193880960 +201 11 0 days 00:00:11.797633516 +201 12 0 days 00:00:27.886521250 +201 13 0 days 00:00:25.200989565 +201 14 0 days 00:00:11.225613320 +201 15 0 days 00:00:11.844263082 +201 16 0 days 00:00:16.248900592 +201 17 0 days 00:00:29.158117242 +201 18 0 days 00:00:28.293265820 +202 1 0 days 00:00:26.940214140 +202 2 0 days 00:00:27.985212186 +202 3 0 days 00:00:18.237221756 +202 4 0 days 00:00:29.029485332 +202 5 0 days 00:00:26.856135496 +202 6 0 days 00:00:28.939077542 +202 7 0 days 00:00:20.743675587 +202 8 0 days 00:00:29.414082165 +202 9 0 days 00:00:50.984431495 +203 1 0 days 00:00:20.080186257 +203 2 0 days 00:00:44.835309332 +203 3 0 days 00:00:21.204593549 +203 4 0 days 00:00:47.775296897 +203 5 0 days 00:00:48.825983907 +203 6 0 days 00:00:46.631981010 +203 7 0 days 00:00:20.948469112 +204 1 0 days 00:00:13.092661252 +204 2 0 days 00:00:12.623721255 +204 3 0 days 00:00:26.710914264 +204 4 0 days 00:00:18.658768401 +204 5 0 days 00:00:17.252334791 +204 6 0 days 00:00:27.811462550 +204 7 0 days 00:00:11.015705248 +204 8 0 days 00:00:26.727965400 +204 9 0 days 00:00:30.213758184 +204 10 0 days 00:00:13.066389892 +204 11 0 days 00:00:17.865186648 +205 1 0 days 00:00:26.890719396 +205 2 0 days 00:00:17.578785762 +205 3 0 days 00:00:30.063859896 +205 4 0 days 00:00:30.765925671 +205 5 0 days 00:00:11.717046536 +205 6 0 days 00:00:11.294631976 +205 7 0 days 00:00:28.086850663 +205 8 0 days 00:00:18.137657427 +205 9 0 days 00:00:29.424557720 +205 10 0 days 00:00:31.308791180 +206 1 0 days 00:00:03.697031072 +206 2 0 days 00:00:04.743729853 +206 3 0 days 00:00:04.855475485 +206 4 0 days 00:00:07.320332340 +206 5 0 days 00:00:05.469089356 +206 6 0 days 00:00:03.533449332 +206 7 0 days 00:00:05.034103664 +206 8 0 days 00:00:04.886317255 +206 9 0 days 00:00:05.312098840 +206 10 0 days 00:00:05.629050960 +206 11 0 days 00:00:03.126311500 +206 12 0 days 00:00:08.936446192 +206 13 0 days 00:00:11.065892324 +206 14 0 days 00:00:03.690277490 +206 15 0 days 00:00:03.060948393 +206 16 0 days 00:00:10.162757285 +206 17 0 days 00:00:04.366941960 +206 18 0 days 00:00:07.607533520 +206 19 0 days 00:00:06.341239396 +206 20 0 days 00:00:05.135149850 +206 21 0 days 00:00:05.495404632 +206 22 0 days 00:00:05.316267900 +206 23 0 days 00:00:03.625252765 +206 24 0 days 00:00:04.036648356 +206 25 0 days 00:00:04.222961986 +206 26 0 days 00:00:08.950718260 +206 27 0 days 00:00:04.731965326 +206 28 0 days 00:00:08.971206920 +206 29 0 days 00:00:03.459721640 +206 30 0 days 00:00:05.444189460 +206 31 0 days 00:00:09.012322710 +206 32 0 days 00:00:03.037495873 +206 33 0 days 00:00:03.742082985 +206 34 0 days 00:00:08.062239846 +206 35 0 days 00:00:07.355956333 +206 36 0 days 00:00:10.130025383 +206 37 0 days 00:00:08.022747206 +206 38 0 days 00:00:09.227950215 +206 39 0 days 00:00:10.499016428 +206 40 0 days 00:00:02.754965906 +206 41 0 days 00:00:05.169020232 +206 42 0 days 00:00:02.928619925 +206 43 0 days 00:00:02.969048406 +206 44 0 days 00:00:03.131975208 +206 45 0 days 00:00:03.018817546 +206 46 0 days 00:00:03.396132390 +206 47 0 days 00:00:03.464957500 +206 48 0 days 00:00:05.252913775 +206 49 0 days 00:00:09.902030965 +206 50 0 days 00:00:07.354650906 +206 51 0 days 00:00:09.776014540 +206 52 0 days 00:00:03.543294828 +206 53 0 days 00:00:04.248502933 +206 54 0 days 00:00:02.920008555 +206 55 0 days 00:00:09.314632305 +206 56 0 days 00:00:09.783134072 +206 57 0 days 00:00:03.774129165 +206 58 0 days 00:00:05.492259468 +206 59 0 days 00:00:03.474584980 +206 60 0 days 00:00:03.189374220 +206 61 0 days 00:00:10.211681883 +206 62 0 days 00:00:04.766492846 +206 63 0 days 00:00:03.741377720 +207 1 0 days 00:00:01.638497233 +207 2 0 days 00:00:04.044082600 +207 3 0 days 00:00:01.689083950 +207 4 0 days 00:00:01.819074397 +207 5 0 days 00:00:04.245023700 +207 6 0 days 00:00:01.642048640 +207 7 0 days 00:00:01.672725220 +207 8 0 days 00:00:04.223191626 +207 9 0 days 00:00:03.095466124 +207 10 0 days 00:00:04.700054450 +207 11 0 days 00:00:04.137738420 +207 12 0 days 00:00:01.811233136 +207 13 0 days 00:00:01.367413333 +207 14 0 days 00:00:01.722709940 +207 15 0 days 00:00:01.646679793 +207 16 0 days 00:00:01.621755730 +207 17 0 days 00:00:04.073777246 +207 18 0 days 00:00:04.629909405 +207 19 0 days 00:00:04.561726000 +207 20 0 days 00:00:01.741457295 +207 21 0 days 00:00:02.690392660 +207 22 0 days 00:00:05.587244160 +207 23 0 days 00:00:02.454997093 +207 24 0 days 00:00:02.268154393 +207 25 0 days 00:00:02.340485213 +207 26 0 days 00:00:04.549433496 +207 27 0 days 00:00:01.805332853 +207 28 0 days 00:00:02.686597455 +207 29 0 days 00:00:02.104537248 +207 30 0 days 00:00:01.585186986 +207 31 0 days 00:00:01.819001284 +207 32 0 days 00:00:03.736340250 +207 33 0 days 00:00:04.911035868 +207 34 0 days 00:00:01.880198355 +207 35 0 days 00:00:02.339944720 +207 36 0 days 00:00:01.931616394 +207 37 0 days 00:00:01.790180360 +207 38 0 days 00:00:01.603411960 +207 39 0 days 00:00:04.396635380 +207 40 0 days 00:00:04.211423226 +207 41 0 days 00:00:02.848437820 +207 42 0 days 00:00:04.608346873 +207 43 0 days 00:00:02.264658080 +207 44 0 days 00:00:01.689895056 +207 45 0 days 00:00:04.953139308 +207 46 0 days 00:00:04.538223435 +207 47 0 days 00:00:04.715866536 +207 48 0 days 00:00:01.581688160 +207 49 0 days 00:00:04.234075726 +207 50 0 days 00:00:06.078360961 +207 51 0 days 00:00:05.049845930 +207 52 0 days 00:00:05.814955877 +207 53 0 days 00:00:01.454779806 +207 54 0 days 00:00:02.690663473 +207 55 0 days 00:00:03.860980553 +207 56 0 days 00:00:04.284822550 +207 57 0 days 00:00:03.006784142 +207 58 0 days 00:00:02.589727240 +207 59 0 days 00:00:05.283836308 +207 60 0 days 00:00:04.621881340 +207 61 0 days 00:00:04.683524015 +207 62 0 days 00:00:01.690029376 +207 63 0 days 00:00:01.693648735 +207 64 0 days 00:00:04.553842346 +207 65 0 days 00:00:04.596843820 +207 66 0 days 00:00:02.373326455 +207 67 0 days 00:00:03.879374726 +207 68 0 days 00:00:04.815432732 +207 69 0 days 00:00:01.770526253 +207 70 0 days 00:00:03.036143940 +207 71 0 days 00:00:04.225891513 +207 72 0 days 00:00:02.880889352 +207 73 0 days 00:00:02.984162515 +207 74 0 days 00:00:01.549103806 +207 75 0 days 00:00:04.212504733 +207 76 0 days 00:00:05.707539351 +207 77 0 days 00:00:01.815477112 +207 78 0 days 00:00:01.859488173 +207 79 0 days 00:00:01.568511430 +207 80 0 days 00:00:04.795954768 +207 81 0 days 00:00:02.246016073 +207 82 0 days 00:00:04.221731946 +207 83 0 days 00:00:02.491546595 +207 84 0 days 00:00:02.470913326 +207 85 0 days 00:00:03.045167470 +207 86 0 days 00:00:01.582307060 +207 87 0 days 00:00:02.230556200 +207 88 0 days 00:00:05.084503813 +207 89 0 days 00:00:01.625468466 +207 90 0 days 00:00:02.519287300 +207 91 0 days 00:00:01.454558293 +207 92 0 days 00:00:04.672936765 +207 93 0 days 00:00:01.573935820 +207 94 0 days 00:00:02.993353657 +207 95 0 days 00:00:02.329296766 +207 96 0 days 00:00:02.584777280 +207 97 0 days 00:00:02.338793920 +207 98 0 days 00:00:02.693993986 +207 99 0 days 00:00:03.869310073 +207 100 0 days 00:00:03.879280255 +208 1 0 days 00:00:04.095725220 +208 2 0 days 00:00:08.170351795 +208 3 0 days 00:00:13.035778076 +208 4 0 days 00:00:09.834563196 +208 5 0 days 00:00:05.384651725 +208 6 0 days 00:00:10.619937830 +208 7 0 days 00:00:09.232845930 +208 8 0 days 00:00:07.378121310 +208 9 0 days 00:00:03.848400525 +208 10 0 days 00:00:04.118583946 +208 11 0 days 00:00:02.729651833 +208 12 0 days 00:00:04.779571360 +208 13 0 days 00:00:09.008643886 +208 14 0 days 00:00:09.582199100 +208 15 0 days 00:00:09.115260865 +208 16 0 days 00:00:06.881515292 +208 17 0 days 00:00:05.219586210 +208 18 0 days 00:00:03.712691200 +208 19 0 days 00:00:03.349956560 +208 20 0 days 00:00:11.164785845 +208 21 0 days 00:00:04.656870840 +208 22 0 days 00:00:03.319674920 +208 23 0 days 00:00:06.785493950 +208 24 0 days 00:00:04.613776220 +208 25 0 days 00:00:03.017228240 +208 26 0 days 00:00:04.936450700 +208 27 0 days 00:00:03.636623306 +208 28 0 days 00:00:05.068511295 +208 29 0 days 00:00:07.292733048 +208 30 0 days 00:00:09.354071575 +208 31 0 days 00:00:10.355886685 +208 32 0 days 00:00:03.300753420 +208 33 0 days 00:00:03.381447586 +208 34 0 days 00:00:09.308289393 +208 35 0 days 00:00:07.872304364 +208 36 0 days 00:00:10.950325815 +208 37 0 days 00:00:05.187854710 +208 38 0 days 00:00:07.774232526 +208 39 0 days 00:00:06.551240945 +208 40 0 days 00:00:08.066546360 +208 41 0 days 00:00:02.795133893 +208 42 0 days 00:00:08.104846973 +208 43 0 days 00:00:08.302323126 +208 44 0 days 00:00:03.936953700 +208 45 0 days 00:00:03.249711780 +208 46 0 days 00:00:07.325390810 +208 47 0 days 00:00:03.499006135 +208 48 0 days 00:00:11.488920405 +208 49 0 days 00:00:10.968661873 +208 50 0 days 00:00:05.741265460 +208 51 0 days 00:00:08.127574893 +208 52 0 days 00:00:10.857835880 +208 53 0 days 00:00:06.384526985 +208 54 0 days 00:00:08.001804850 +208 55 0 days 00:00:08.630957000 +208 56 0 days 00:00:04.821236575 +208 57 0 days 00:00:12.917520100 +208 58 0 days 00:00:06.746187610 +208 59 0 days 00:00:10.532293615 +208 60 0 days 00:00:08.304414846 +208 61 0 days 00:00:12.536605855 +209 1 0 days 00:00:03.650903433 +209 2 0 days 00:00:04.577659265 +209 3 0 days 00:00:03.028929260 +209 4 0 days 00:00:02.529101835 +209 5 0 days 00:00:01.450201206 +209 6 0 days 00:00:01.962763365 +209 7 0 days 00:00:03.283112120 +209 8 0 days 00:00:01.543150870 +209 9 0 days 00:00:01.503690753 +209 10 0 days 00:00:03.077515380 +209 11 0 days 00:00:02.088188920 +209 12 0 days 00:00:04.743886615 +209 13 0 days 00:00:02.153184333 +209 14 0 days 00:00:01.965896537 +209 15 0 days 00:00:05.656395268 +209 16 0 days 00:00:06.160513605 +209 17 0 days 00:00:03.189028480 +209 18 0 days 00:00:05.253108775 +209 19 0 days 00:00:01.679466870 +209 20 0 days 00:00:03.179913103 +209 21 0 days 00:00:01.936001292 +209 22 0 days 00:00:05.800178515 +209 23 0 days 00:00:01.610584115 +209 24 0 days 00:00:01.841438105 +209 25 0 days 00:00:03.773731870 +209 26 0 days 00:00:01.549900335 +209 27 0 days 00:00:01.663688920 +209 28 0 days 00:00:03.909989986 +209 29 0 days 00:00:03.418587370 +209 30 0 days 00:00:05.666668770 +209 31 0 days 00:00:03.545451840 +209 32 0 days 00:00:01.516745505 +209 33 0 days 00:00:04.314946855 +209 34 0 days 00:00:01.870867360 +209 35 0 days 00:00:05.833597996 +209 36 0 days 00:00:05.991230055 +209 37 0 days 00:00:01.972377436 +209 38 0 days 00:00:02.006425644 +209 39 0 days 00:00:06.224238228 +209 40 0 days 00:00:02.928779397 +209 41 0 days 00:00:05.487173820 +209 42 0 days 00:00:02.101311780 +209 43 0 days 00:00:02.418001305 +209 44 0 days 00:00:02.552011630 +209 45 0 days 00:00:02.519895265 +209 46 0 days 00:00:02.712417633 +209 47 0 days 00:00:05.755507580 +209 48 0 days 00:00:06.127450140 +209 49 0 days 00:00:02.274953125 +209 50 0 days 00:00:01.890673530 +209 51 0 days 00:00:04.984502323 +209 52 0 days 00:00:06.193421603 +209 53 0 days 00:00:02.316272100 +209 54 0 days 00:00:03.398669085 +209 55 0 days 00:00:02.122445455 +209 56 0 days 00:00:07.026077036 +209 57 0 days 00:00:03.342043125 +209 58 0 days 00:00:02.793647835 +209 59 0 days 00:00:01.614961245 +209 60 0 days 00:00:02.086476793 +209 61 0 days 00:00:03.183525010 +209 62 0 days 00:00:02.165881110 +209 63 0 days 00:00:04.814200835 +209 64 0 days 00:00:02.257905065 +209 65 0 days 00:00:01.929523393 +209 66 0 days 00:00:02.841542183 +209 67 0 days 00:00:04.329320910 +209 68 0 days 00:00:02.379425452 +209 69 0 days 00:00:03.539803455 +209 70 0 days 00:00:02.841617888 +209 71 0 days 00:00:03.081983556 +209 72 0 days 00:00:02.823641786 +209 73 0 days 00:00:02.547826990 +209 74 0 days 00:00:03.303223830 +209 75 0 days 00:00:02.909153940 +209 76 0 days 00:00:02.965634400 +209 77 0 days 00:00:03.868027656 +209 78 0 days 00:00:06.086438493 +209 79 0 days 00:00:02.193572720 +209 80 0 days 00:00:05.307489955 +209 81 0 days 00:00:03.819266353 +209 82 0 days 00:00:02.021866085 +209 83 0 days 00:00:03.239584940 +209 84 0 days 00:00:01.521799305 +209 85 0 days 00:00:01.814411464 +209 86 0 days 00:00:03.355737160 +209 87 0 days 00:00:02.397015170 +209 88 0 days 00:00:01.876875055 +209 89 0 days 00:00:05.782357232 +209 90 0 days 00:00:02.151224480 +209 91 0 days 00:00:04.477905492 +209 92 0 days 00:00:02.064613945 +209 93 0 days 00:00:01.697114375 +209 94 0 days 00:00:02.053017472 +209 95 0 days 00:00:01.892803290 +209 96 0 days 00:00:02.963965410 +209 97 0 days 00:00:03.287173520 +209 98 0 days 00:00:02.151219066 +209 99 0 days 00:00:02.571427010 +209 100 0 days 00:00:01.764048185 +210 1 0 days 00:00:25.579459366 +210 2 0 days 00:00:37.010593816 +210 3 0 days 00:00:19.692303900 +210 4 0 days 00:00:18.680702357 +210 5 0 days 00:00:17.434346166 +210 6 0 days 00:00:33.363131712 +210 7 0 days 00:00:30.141076310 +210 8 0 days 00:00:16.886364844 +210 9 0 days 00:00:28.003462875 +210 10 0 days 00:00:21.193094385 +210 11 0 days 00:00:22.749724866 +210 12 0 days 00:00:21.279074230 +210 13 0 days 00:00:16.560077047 +210 14 0 days 00:00:29.769214237 +210 15 0 days 00:00:24.138598650 +210 16 0 days 00:00:51.149758742 +210 17 0 days 00:00:36.016275725 +210 18 0 days 00:00:23.656898614 +210 19 0 days 00:00:40.310998585 +210 20 0 days 00:00:28.229983166 +210 21 0 days 00:00:20.220283455 +210 22 0 days 00:00:18.723776100 +210 23 0 days 00:00:30.354472562 +210 24 0 days 00:00:36.302001200 +210 25 0 days 00:00:51.531851914 +210 26 0 days 00:00:55.141956400 +210 27 0 days 00:00:22.671253576 +210 28 0 days 00:00:21.675622194 +210 29 0 days 00:00:30.439336650 +210 30 0 days 00:00:40.951729833 +210 31 0 days 00:00:31.168180750 +210 32 0 days 00:00:38.301533233 +211 1 0 days 00:00:12.317423605 +211 2 0 days 00:00:15.600323453 +211 3 0 days 00:00:19.478200337 +211 4 0 days 00:00:16.851670500 +211 5 0 days 00:00:14.649493055 +211 6 0 days 00:00:11.750009131 +211 7 0 days 00:00:17.597635006 +211 8 0 days 00:00:16.083924054 +211 9 0 days 00:00:16.592125600 +211 10 0 days 00:00:10.200289210 +211 11 0 days 00:00:17.758243166 +211 12 0 days 00:00:10.663203923 +211 13 0 days 00:00:22.422330345 +211 14 0 days 00:00:18.663471616 +211 15 0 days 00:00:13.286206514 +211 16 0 days 00:00:16.538280846 +211 17 0 days 00:00:12.203080616 +211 18 0 days 00:00:11.116972785 +211 19 0 days 00:00:10.957144855 +211 20 0 days 00:00:31.344048400 +211 21 0 days 00:00:11.250481814 +211 22 0 days 00:00:23.446016109 +211 23 0 days 00:00:22.954357709 +211 24 0 days 00:00:17.495271642 +211 25 0 days 00:00:09.316763170 +211 26 0 days 00:00:16.660066780 +211 27 0 days 00:00:32.441632730 +211 28 0 days 00:00:11.738540540 +211 29 0 days 00:00:12.985249585 +211 30 0 days 00:00:12.645342940 +211 31 0 days 00:00:13.724523629 +211 32 0 days 00:00:27.903645700 +211 33 0 days 00:00:20.458345107 +211 34 0 days 00:00:16.568509828 +211 35 0 days 00:00:20.109932700 +211 36 0 days 00:00:21.395672757 +211 37 0 days 00:00:14.056964900 +211 38 0 days 00:00:11.063119085 +211 39 0 days 00:00:12.051017925 +211 40 0 days 00:00:12.746880057 +211 41 0 days 00:00:10.826493355 +211 42 0 days 00:00:29.352238016 +211 43 0 days 00:00:11.446418670 +211 44 0 days 00:00:31.159374983 +212 1 0 days 00:00:20.813231466 +212 2 0 days 00:00:18.404487320 +212 3 0 days 00:00:30.521192885 +212 4 0 days 00:00:27.228162350 +212 5 0 days 00:00:15.301972616 +212 6 0 days 00:00:22.793338084 +212 7 0 days 00:00:22.787372557 +212 8 0 days 00:00:36.002668866 +212 9 0 days 00:00:17.675483095 +212 10 0 days 00:00:20.762830000 +212 11 0 days 00:00:38.645443242 +212 12 0 days 00:00:54.226511850 +212 13 0 days 00:00:50.524755683 +212 14 0 days 00:00:24.183821655 +212 15 0 days 00:01:04.068988687 +212 16 0 days 00:00:20.932797785 +212 17 0 days 00:00:21.218817541 +212 18 0 days 00:00:34.089607783 +212 19 0 days 00:00:52.208065383 +212 20 0 days 00:00:55.859177300 +212 21 0 days 00:00:27.287085883 +212 22 0 days 00:00:17.026051542 +212 23 0 days 00:00:20.686949945 +212 24 0 days 00:00:25.383600357 +212 25 0 days 00:00:21.530819841 +212 26 0 days 00:00:14.771572218 +212 27 0 days 00:00:27.680607466 +212 28 0 days 00:00:38.634452220 +212 29 0 days 00:00:18.296421592 +212 30 0 days 00:00:32.684964914 +213 1 0 days 00:00:20.297012328 +213 2 0 days 00:00:15.471004036 +213 3 0 days 00:00:11.884652072 +213 4 0 days 00:00:19.806447012 +213 5 0 days 00:00:10.490351914 +213 6 0 days 00:00:15.203611357 +213 7 0 days 00:00:15.242411816 +213 8 0 days 00:00:14.830508030 +213 9 0 days 00:00:11.442031708 +213 10 0 days 00:00:20.128533012 +213 11 0 days 00:00:16.287931470 +213 12 0 days 00:00:11.058440782 +213 13 0 days 00:00:28.006583516 +213 14 0 days 00:00:14.409714533 +213 15 0 days 00:00:15.940871173 +213 16 0 days 00:00:14.721303252 +213 17 0 days 00:00:17.964271528 +213 18 0 days 00:00:32.312692546 +213 19 0 days 00:00:11.346177241 +213 20 0 days 00:00:18.792930966 +213 21 0 days 00:00:17.986347568 +213 22 0 days 00:00:28.664077962 +213 23 0 days 00:00:16.030416428 +213 24 0 days 00:00:09.995007106 +213 25 0 days 00:00:11.725458233 +213 26 0 days 00:00:15.929162537 +213 27 0 days 00:00:11.118919033 +213 28 0 days 00:00:13.850555975 +213 29 0 days 00:00:10.224676740 +213 30 0 days 00:00:17.482588613 +213 31 0 days 00:00:17.321137485 +213 32 0 days 00:00:21.506150163 +213 33 0 days 00:00:09.160721811 +213 34 0 days 00:00:14.691917681 +213 35 0 days 00:00:10.277805157 +213 36 0 days 00:00:11.151310358 +213 37 0 days 00:00:10.055558600 +213 38 0 days 00:00:19.061482014 +214 1 0 days 00:01:01.813308314 +214 2 0 days 00:00:30.915216300 +214 3 0 days 00:00:19.144712228 +214 4 0 days 00:00:25.169150422 +214 5 0 days 00:00:33.000659670 +214 6 0 days 00:00:28.551715500 +214 7 0 days 00:00:30.194811900 +214 8 0 days 00:00:29.715856381 +214 9 0 days 00:00:38.414806916 +214 10 0 days 00:00:32.222497433 +214 11 0 days 00:00:36.325421722 +214 12 0 days 00:00:34.590216742 +214 13 0 days 00:00:16.289492600 +214 14 0 days 00:00:21.600151300 +214 15 0 days 00:00:16.754968783 +214 16 0 days 00:00:21.551699650 +214 17 0 days 00:00:16.573682350 +214 18 0 days 00:00:20.152657385 +214 19 0 days 00:00:20.723244400 +214 20 0 days 00:00:22.839448675 +214 21 0 days 00:00:23.352897585 +214 22 0 days 00:00:21.943802283 +214 23 0 days 00:00:21.827280050 +214 24 0 days 00:00:24.969148980 +214 25 0 days 00:00:21.124303045 +214 26 0 days 00:00:14.817866576 +214 27 0 days 00:00:19.441994050 +214 28 0 days 00:00:28.060838750 +214 29 0 days 00:00:24.463770722 +214 30 0 days 00:00:36.899368200 +214 31 0 days 00:00:29.340599355 +214 32 0 days 00:00:25.220046685 +214 33 0 days 00:00:18.179983838 +214 34 0 days 00:00:18.889818883 +214 35 0 days 00:00:24.046237883 +214 36 0 days 00:00:35.178415050 +214 37 0 days 00:00:51.123919957 +214 38 0 days 00:00:38.157780316 +214 39 0 days 00:00:19.376420850 +214 40 0 days 00:00:33.654149822 +215 1 0 days 00:00:11.944201637 +215 2 0 days 00:00:21.238030633 +215 3 0 days 00:00:11.193496983 +215 4 0 days 00:00:20.026436313 +215 5 0 days 00:00:09.692984011 +215 6 0 days 00:00:10.714190244 +215 7 0 days 00:00:18.284200833 +215 8 0 days 00:00:11.502485266 +215 9 0 days 00:00:14.327791433 +215 10 0 days 00:00:10.243704816 +215 11 0 days 00:00:11.639015446 +215 12 0 days 00:00:13.334034283 +215 13 0 days 00:00:29.379980183 +215 14 0 days 00:00:09.925608857 +215 15 0 days 00:00:28.400851463 +215 16 0 days 00:00:13.862458166 +215 17 0 days 00:00:08.543130416 +215 18 0 days 00:00:13.378019900 +215 19 0 days 00:00:28.332213316 +215 20 0 days 00:00:10.801617616 +215 21 0 days 00:00:19.176934272 +215 22 0 days 00:00:11.368998423 +215 23 0 days 00:00:16.026431857 +215 24 0 days 00:00:12.829135366 +215 25 0 days 00:00:12.245205816 +215 26 0 days 00:00:11.944547466 +215 27 0 days 00:00:14.548168530 +215 28 0 days 00:00:11.435440700 +215 29 0 days 00:00:10.895706833 +215 30 0 days 00:00:17.459191028 +215 31 0 days 00:00:11.549237787 +215 32 0 days 00:00:11.087461614 +215 33 0 days 00:00:10.746724866 +215 34 0 days 00:00:09.050947657 +215 35 0 days 00:00:18.833536566 +215 36 0 days 00:00:09.885583566 +215 37 0 days 00:00:10.359456266 +215 38 0 days 00:00:16.259578700 +215 39 0 days 00:00:16.798526885 +215 40 0 days 00:00:16.825717125 +215 41 0 days 00:00:17.707688866 +215 42 0 days 00:00:11.653616066 +215 43 0 days 00:00:10.351614927 +215 44 0 days 00:00:10.757905266 +215 45 0 days 00:00:23.139793433 +215 46 0 days 00:00:21.573211300 +215 47 0 days 00:00:10.707079457 +215 48 0 days 00:00:18.757145483 +215 49 0 days 00:00:12.501040477 +215 50 0 days 00:00:14.264783657 +215 51 0 days 00:00:10.471932033 +215 52 0 days 00:00:11.749362177 +215 53 0 days 00:00:12.595447386 +215 54 0 days 00:00:11.077633666 +215 55 0 days 00:00:13.749616075 +215 56 0 days 00:00:17.872775109 +215 57 0 days 00:00:28.782862966 +215 58 0 days 00:00:33.771737871 +215 59 0 days 00:00:11.983787000 +215 60 0 days 00:00:09.957276514 +215 61 0 days 00:00:10.752771485 +215 62 0 days 00:00:14.223351233 +215 63 0 days 00:00:18.669144950 +215 64 0 days 00:00:19.801572150 +215 65 0 days 00:00:21.302622133 +215 66 0 days 00:00:21.678894754 +216 1 0 days 00:01:07.977045915 +216 2 0 days 00:00:20.356193421 +216 3 0 days 00:00:32.026282714 +216 4 0 days 00:00:19.679260611 +216 5 0 days 00:00:41.040700954 +216 6 0 days 00:00:19.125604642 +216 7 0 days 00:00:24.669691214 +216 8 0 days 00:00:32.090509462 +216 9 0 days 00:00:56.427371500 +216 10 0 days 00:00:58.405268011 +216 11 0 days 00:00:41.513775553 +216 12 0 days 00:00:20.695652333 +216 13 0 days 00:00:33.458620854 +216 14 0 days 00:00:41.749267650 +216 15 0 days 00:00:19.970899616 +216 16 0 days 00:00:18.559063425 +216 17 0 days 00:00:20.431235342 +216 18 0 days 00:00:37.657813150 +216 19 0 days 00:00:56.636319512 +217 1 0 days 00:00:11.989661842 +217 2 0 days 00:00:11.661983533 +217 3 0 days 00:00:16.622976828 +217 4 0 days 00:00:10.839507883 +217 5 0 days 00:00:10.388358871 +217 6 0 days 00:00:12.991195300 +217 7 0 days 00:00:31.572919200 +217 8 0 days 00:00:14.951380272 +217 9 0 days 00:00:18.694457466 +217 10 0 days 00:00:30.312000300 +217 11 0 days 00:00:10.089035116 +217 12 0 days 00:00:20.797910066 +217 13 0 days 00:00:10.320775428 +217 14 0 days 00:00:14.034653345 +217 15 0 days 00:00:11.876093783 +217 16 0 days 00:00:11.222845566 +217 17 0 days 00:00:10.912094883 +217 18 0 days 00:00:18.841667266 +217 19 0 days 00:00:18.317405183 +217 20 0 days 00:00:11.041697162 +217 21 0 days 00:00:21.019509612 +217 22 0 days 00:00:22.322127785 +217 23 0 days 00:00:11.370591509 +217 24 0 days 00:00:21.659010016 +217 25 0 days 00:00:13.086629266 +217 26 0 days 00:00:12.063225200 +217 27 0 days 00:00:19.448427550 +217 28 0 days 00:00:12.924332585 +217 29 0 days 00:00:12.778437450 +217 30 0 days 00:00:19.273113571 +217 31 0 days 00:00:17.167045340 +217 32 0 days 00:00:17.387386411 +217 33 0 days 00:00:18.982908316 +217 34 0 days 00:00:14.032527771 +217 35 0 days 00:00:17.259540357 +217 36 0 days 00:00:12.648042150 +217 37 0 days 00:00:22.853188090 +217 38 0 days 00:00:14.525352683 +217 39 0 days 00:00:17.558736283 +217 40 0 days 00:00:10.191549571 +217 41 0 days 00:00:13.463999150 +217 42 0 days 00:00:29.739472200 +217 43 0 days 00:00:16.382684700 +217 44 0 days 00:00:18.464059342 +217 45 0 days 00:00:16.573228355 +217 46 0 days 00:00:16.704779250 +217 47 0 days 00:00:21.175719028 +217 48 0 days 00:00:12.331001100 +217 49 0 days 00:00:11.170299925 +217 50 0 days 00:00:10.685906316 +217 51 0 days 00:00:10.854172566 +217 52 0 days 00:00:09.747857683 +217 53 0 days 00:00:21.199790742 +217 54 0 days 00:00:23.072188717 +217 55 0 days 00:00:18.568684685 +217 56 0 days 00:00:20.904466816 +217 57 0 days 00:00:08.570827700 +217 58 0 days 00:00:19.751226262 +218 1 0 days 00:00:09.631451124 +218 2 0 days 00:00:26.255100660 +218 3 0 days 00:00:14.192684410 +218 4 0 days 00:00:10.810499176 +218 5 0 days 00:00:23.153087210 +218 6 0 days 00:00:15.655061265 +218 7 0 days 00:00:09.549246973 +218 8 0 days 00:00:25.857609420 +218 9 0 days 00:00:09.811731055 +218 10 0 days 00:00:20.481046000 +218 11 0 days 00:00:07.329702326 +218 12 0 days 00:00:25.552469810 +218 13 0 days 00:00:23.306883016 +218 14 0 days 00:00:10.792936285 +218 15 0 days 00:00:10.337821010 +218 16 0 days 00:00:09.267440356 +218 17 0 days 00:00:24.521955616 +218 18 0 days 00:00:30.715145295 +218 19 0 days 00:00:08.810390895 +218 20 0 days 00:00:24.983194630 +218 21 0 days 00:00:15.302463660 +218 22 0 days 00:00:13.037928650 +218 23 0 days 00:00:16.159386410 +218 24 0 days 00:00:30.288096820 +219 1 0 days 00:00:14.008297745 +219 2 0 days 00:00:11.737221200 +219 3 0 days 00:00:04.497088266 +219 4 0 days 00:00:06.718427668 +219 5 0 days 00:00:05.474580295 +219 6 0 days 00:00:13.390193925 +219 7 0 days 00:00:05.235066372 +219 8 0 days 00:00:07.265279540 +219 9 0 days 00:00:04.505373190 +219 10 0 days 00:00:07.954555305 +219 11 0 days 00:00:11.698678720 +219 12 0 days 00:00:12.910351215 +219 13 0 days 00:00:07.099547003 +219 14 0 days 00:00:07.005655780 +219 15 0 days 00:00:10.980596660 +219 16 0 days 00:00:07.551633890 +219 17 0 days 00:00:05.201457090 +219 18 0 days 00:00:04.992227003 +219 19 0 days 00:00:10.211582220 +219 20 0 days 00:00:07.580862835 +219 21 0 days 00:00:07.776486100 +219 22 0 days 00:00:12.332433565 +219 23 0 days 00:00:04.608128683 +219 24 0 days 00:00:06.589756415 +219 25 0 days 00:00:07.316212515 +219 26 0 days 00:00:04.741160276 +219 27 0 days 00:00:04.677985792 +219 28 0 days 00:00:04.726471670 +219 29 0 days 00:00:04.767279640 +219 30 0 days 00:00:07.444162614 +219 31 0 days 00:00:10.959668047 +219 32 0 days 00:00:15.650291360 +219 33 0 days 00:00:04.158028313 +219 34 0 days 00:00:07.522903403 +219 35 0 days 00:00:14.029432744 +219 36 0 days 00:00:12.264116460 +219 37 0 days 00:00:06.839370755 +219 38 0 days 00:00:07.843493595 +219 39 0 days 00:00:13.876575905 +219 40 0 days 00:00:04.556505732 +219 41 0 days 00:00:08.333660195 +219 42 0 days 00:00:04.695242575 +219 43 0 days 00:00:08.148045290 +219 44 0 days 00:00:11.884747585 +220 1 0 days 00:00:14.756205587 +220 2 0 days 00:00:11.168671846 +220 3 0 days 00:00:14.230309170 +220 4 0 days 00:00:19.769644586 +220 5 0 days 00:00:09.742573456 +220 6 0 days 00:00:08.034532400 +220 7 0 days 00:00:08.543512145 +220 8 0 days 00:00:19.884698648 +220 9 0 days 00:00:13.215664480 +220 10 0 days 00:00:12.037702920 +220 11 0 days 00:00:11.303458520 +220 12 0 days 00:00:09.182396912 +220 13 0 days 00:00:20.879340600 +220 14 0 days 00:00:23.941161571 +220 15 0 days 00:00:07.440220993 +220 16 0 days 00:00:13.502532374 +220 17 0 days 00:00:11.072196886 +220 18 0 days 00:00:19.522043573 +220 19 0 days 00:00:13.122069175 +220 20 0 days 00:00:08.602736236 +220 21 0 days 00:00:19.850456932 +220 22 0 days 00:00:12.726418086 +220 23 0 days 00:00:21.825874036 +220 24 0 days 00:00:11.346541086 +220 25 0 days 00:00:12.887378053 +220 26 0 days 00:00:19.297496313 +220 27 0 days 00:00:21.293944780 +221 1 0 days 00:00:09.487976765 +221 2 0 days 00:00:25.143286723 +221 3 0 days 00:00:22.525098200 +221 4 0 days 00:00:12.041509300 +221 5 0 days 00:00:16.825597045 +221 6 0 days 00:00:09.751536300 +221 7 0 days 00:00:23.682293260 +221 8 0 days 00:00:15.566802060 +221 9 0 days 00:00:24.275251790 +221 10 0 days 00:00:13.029729925 +221 11 0 days 00:00:10.777769270 +221 12 0 days 00:00:09.218203475 +221 13 0 days 00:00:13.164259055 +221 14 0 days 00:00:22.560211015 +221 15 0 days 00:00:20.059016580 +221 16 0 days 00:00:22.366686945 +221 17 0 days 00:00:12.853188210 +221 18 0 days 00:00:25.589654980 +221 19 0 days 00:00:10.034040260 +221 20 0 days 00:00:09.824208821 +221 21 0 days 00:00:29.194632540 +222 1 0 days 00:00:04.698692222 +222 2 0 days 00:00:11.023800860 +222 3 0 days 00:00:06.456470055 +222 4 0 days 00:00:06.312416145 +222 5 0 days 00:00:06.013107986 +222 6 0 days 00:00:06.949201703 +222 7 0 days 00:00:12.617755005 +222 8 0 days 00:00:08.209696795 +222 9 0 days 00:00:06.421568485 +222 10 0 days 00:00:07.032340245 +222 11 0 days 00:00:04.733280474 +222 12 0 days 00:00:07.100589122 +222 13 0 days 00:00:05.192034946 +222 14 0 days 00:00:06.368228585 +222 15 0 days 00:00:11.928733300 +222 16 0 days 00:00:05.604977373 +222 17 0 days 00:00:06.987072973 +222 18 0 days 00:00:04.214853435 +222 19 0 days 00:00:11.069487303 +222 20 0 days 00:00:05.432625753 +222 21 0 days 00:00:10.859141980 +222 22 0 days 00:00:12.720516982 +222 23 0 days 00:00:09.759911653 +222 24 0 days 00:00:06.367331215 +222 25 0 days 00:00:07.251072173 +222 26 0 days 00:00:08.043101905 +222 27 0 days 00:00:10.947431185 +222 28 0 days 00:00:04.596097468 +222 29 0 days 00:00:10.552026140 +222 30 0 days 00:00:04.902738084 +222 31 0 days 00:00:09.565783613 +222 32 0 days 00:00:04.122035910 +222 33 0 days 00:00:06.084341840 +222 34 0 days 00:00:07.433119303 +222 35 0 days 00:00:07.649236130 +222 36 0 days 00:00:10.553685325 +222 37 0 days 00:00:05.468908706 +222 38 0 days 00:00:04.090227585 +222 39 0 days 00:00:04.608683343 +222 40 0 days 00:00:09.775251106 +222 41 0 days 00:00:09.546395773 +222 42 0 days 00:00:07.788746983 +222 43 0 days 00:00:08.636764925 +222 44 0 days 00:00:12.614376514 +223 1 0 days 00:00:06.581325995 +223 2 0 days 00:00:08.064100142 +223 3 0 days 00:00:06.113316408 +223 4 0 days 00:00:08.333953060 +223 5 0 days 00:00:11.586332880 +223 6 0 days 00:00:05.306319220 +223 7 0 days 00:00:09.992291126 +223 8 0 days 00:00:04.493166056 +223 9 0 days 00:00:04.966012890 +223 10 0 days 00:00:06.940735632 +223 11 0 days 00:00:04.743845340 +223 12 0 days 00:00:08.599913253 +223 13 0 days 00:00:05.700123473 +223 14 0 days 00:00:05.226876328 +223 15 0 days 00:00:04.201203115 +223 16 0 days 00:00:07.663380586 +223 17 0 days 00:00:07.376487451 +223 18 0 days 00:00:05.455626475 +223 19 0 days 00:00:11.430958390 +223 20 0 days 00:00:05.223397080 +223 21 0 days 00:00:13.464238952 +223 22 0 days 00:00:06.335344920 +223 23 0 days 00:00:06.915594296 +223 24 0 days 00:00:11.424949450 +223 25 0 days 00:00:05.719391240 +223 26 0 days 00:00:08.581841797 +223 27 0 days 00:00:11.449051215 +223 28 0 days 00:00:09.680829088 +223 29 0 days 00:00:05.893223433 +223 30 0 days 00:00:09.995138766 +223 31 0 days 00:00:06.193672960 +223 32 0 days 00:00:05.545970045 +223 33 0 days 00:00:13.282417475 +223 34 0 days 00:00:06.547288880 +223 35 0 days 00:00:04.744925444 +223 36 0 days 00:00:10.117786573 +223 37 0 days 00:00:04.647702380 +223 38 0 days 00:00:07.660733124 +223 39 0 days 00:00:07.060794545 +223 40 0 days 00:00:04.144697666 +223 41 0 days 00:00:08.632552200 +223 42 0 days 00:00:04.954529303 +223 43 0 days 00:00:05.056645361 +224 1 0 days 00:00:17.873923225 +224 2 0 days 00:00:40.957224080 +224 3 0 days 00:00:28.055400385 +224 4 0 days 00:00:29.353518424 +224 5 0 days 00:00:51.423055986 +224 6 0 days 00:00:41.561720893 +224 7 0 days 00:00:16.967700436 +224 8 0 days 00:00:22.350048540 +224 9 0 days 00:00:22.336795320 +224 10 0 days 00:00:40.472221390 +224 11 0 days 00:00:44.824561833 +224 12 0 days 00:00:45.904522350 +225 1 0 days 00:00:46.726514975 +225 2 0 days 00:00:39.784069033 +225 3 0 days 00:00:41.225860760 +225 4 0 days 00:00:41.795005240 +225 5 0 days 00:00:16.200814330 +225 6 0 days 00:01:02.440062703 +225 7 0 days 00:00:57.012976500 +225 8 0 days 00:00:25.586071736 +225 9 0 days 00:00:24.483781845 +225 10 0 days 00:00:16.713308260 +225 11 0 days 00:00:22.534987170 +225 12 0 days 00:00:40.787703570 +226 1 0 days 00:00:12.541026526 +226 2 0 days 00:00:17.602314635 +226 3 0 days 00:00:17.507901615 +226 4 0 days 00:00:14.001751745 +226 5 0 days 00:00:15.732025813 +226 6 0 days 00:00:22.995671280 +226 7 0 days 00:00:25.666943635 +226 8 0 days 00:00:29.093846085 +226 9 0 days 00:00:13.978563360 +226 10 0 days 00:00:29.148667065 +226 11 0 days 00:00:32.230492020 +226 12 0 days 00:00:23.013656040 +226 13 0 days 00:00:11.170308520 +226 14 0 days 00:00:09.982664550 +226 15 0 days 00:00:15.181728870 +226 16 0 days 00:00:14.025951465 +226 17 0 days 00:00:25.680240990 +226 18 0 days 00:00:25.671745775 +226 19 0 days 00:00:09.359247870 +226 20 0 days 00:00:11.166402310 +226 21 0 days 00:00:11.868345840 +226 22 0 days 00:00:35.715125345 +226 23 0 days 00:00:13.983452670 +226 24 0 days 00:00:13.988978105 +226 25 0 days 00:00:15.125412340 +227 1 0 days 00:00:17.017815965 +227 2 0 days 00:00:31.033225816 +227 3 0 days 00:00:29.196714155 +227 4 0 days 00:00:14.075401485 +227 5 0 days 00:00:17.781558005 +227 6 0 days 00:00:11.412086560 +227 7 0 days 00:00:25.595053485 +227 8 0 days 00:00:31.391895080 +227 9 0 days 00:00:17.751155875 +227 10 0 days 00:00:36.231752045 +227 11 0 days 00:00:09.431970740 +227 12 0 days 00:00:11.714855724 +227 13 0 days 00:00:10.078479420 +227 14 0 days 00:00:17.791151040 +227 15 0 days 00:00:14.068617495 +227 16 0 days 00:00:11.959800588 +227 17 0 days 00:00:17.779565655 +227 18 0 days 00:00:09.428268200 +227 19 0 days 00:00:29.189898765 +227 20 0 days 00:00:11.382335015 +227 21 0 days 00:00:09.409083330 +227 22 0 days 00:00:10.066924655 +227 23 0 days 00:00:15.682092733 +228 1 0 days 00:00:22.557593570 +228 2 0 days 00:00:22.284942430 +228 3 0 days 00:00:46.646581255 +228 4 0 days 00:00:46.694189562 +228 5 0 days 00:00:22.448940700 +228 6 0 days 00:00:29.741362808 +228 7 0 days 00:00:46.686536722 +228 8 0 days 00:00:46.483564200 +229 1 0 days 00:00:09.963253804 +229 2 0 days 00:00:11.043865860 +229 3 0 days 00:00:10.640794448 +229 4 0 days 00:00:16.712276673 +229 5 0 days 00:00:20.879405838 +229 6 0 days 00:00:11.157764795 +229 7 0 days 00:00:25.141268665 +229 8 0 days 00:00:10.655890291 +229 9 0 days 00:00:43.552665665 +229 10 0 days 00:00:11.163264420 +229 11 0 days 00:00:38.167478556 +229 12 0 days 00:00:27.332515564 +229 13 0 days 00:00:16.146750976 +229 14 0 days 00:00:19.923453605 +230 1 0 days 00:00:08.107229986 +230 2 0 days 00:00:07.711938406 +230 3 0 days 00:00:09.485942835 +230 4 0 days 00:00:08.919778728 +230 5 0 days 00:00:09.306370025 +230 6 0 days 00:00:25.278205002 +230 7 0 days 00:00:07.533326886 +230 8 0 days 00:00:13.079614766 +230 9 0 days 00:00:08.522762273 +230 10 0 days 00:00:08.485844540 +230 11 0 days 00:00:13.860802620 +230 12 0 days 00:00:24.503022115 +230 13 0 days 00:00:24.658487624 +230 14 0 days 00:00:09.919441364 +230 15 0 days 00:00:08.466886210 +230 16 0 days 00:00:19.946650506 +230 17 0 days 00:00:12.402529485 +230 18 0 days 00:00:12.474705013 +230 19 0 days 00:00:23.352410695 +230 20 0 days 00:00:13.916021625 +230 21 0 days 00:00:20.638625726 +230 22 0 days 00:00:21.122619300 +230 23 0 days 00:00:23.088623130 +230 24 0 days 00:00:13.057355050 +230 25 0 days 00:00:08.747449870 +230 26 0 days 00:00:12.434111546 +230 27 0 days 00:00:08.486470965 +230 28 0 days 00:00:09.083486408 +230 29 0 days 00:00:22.807938045 +230 30 0 days 00:00:20.071235073 +230 31 0 days 00:00:21.186523286 +231 1 0 days 00:00:05.851947786 +231 2 0 days 00:00:06.122160086 +231 3 0 days 00:00:04.132580880 +231 4 0 days 00:00:03.829684593 +231 5 0 days 00:00:12.244389633 +231 6 0 days 00:00:05.044691364 +231 7 0 days 00:00:03.920569173 +231 8 0 days 00:00:07.062827800 +231 9 0 days 00:00:09.902851465 +231 10 0 days 00:00:06.812219672 +231 11 0 days 00:00:03.958436313 +231 12 0 days 00:00:07.250284810 +231 13 0 days 00:00:11.351842600 +231 14 0 days 00:00:12.647520185 +231 15 0 days 00:00:12.075879285 +231 16 0 days 00:00:04.327393470 +231 17 0 days 00:00:04.566212930 +231 18 0 days 00:00:04.297205550 +231 19 0 days 00:00:11.237457075 +231 20 0 days 00:00:04.185100920 +231 21 0 days 00:00:11.258290090 +231 22 0 days 00:00:07.104720605 +231 23 0 days 00:00:04.776975596 +231 24 0 days 00:00:05.580115338 +231 25 0 days 00:00:06.559692915 +231 26 0 days 00:00:04.128829366 +231 27 0 days 00:00:08.028187460 +231 28 0 days 00:00:10.193280880 +231 29 0 days 00:00:09.924710100 +231 30 0 days 00:00:10.704681093 +231 31 0 days 00:00:04.491357810 +231 32 0 days 00:00:11.993481045 +231 33 0 days 00:00:06.987646600 +231 34 0 days 00:00:11.656395250 +231 35 0 days 00:00:11.989540680 +231 36 0 days 00:00:13.331175824 +231 37 0 days 00:00:04.342643850 +231 38 0 days 00:00:05.444546280 +231 39 0 days 00:00:04.360741670 +231 40 0 days 00:00:04.821206043 +231 41 0 days 00:00:05.934075040 +231 42 0 days 00:00:04.829552976 +231 43 0 days 00:00:10.095467460 +231 44 0 days 00:00:08.375060955 +231 45 0 days 00:00:07.377768962 +231 46 0 days 00:00:05.991895753 +231 47 0 days 00:00:04.369511750 +231 48 0 days 00:00:05.955106886 +231 49 0 days 00:00:11.288676793 +231 50 0 days 00:00:04.818197616 +231 51 0 days 00:00:09.978200306 +231 52 0 days 00:00:05.936205253 +231 53 0 days 00:00:05.953587640 +231 54 0 days 00:00:06.636865486 +231 55 0 days 00:00:04.683830015 +232 1 0 days 00:00:00.171417195 +232 2 0 days 00:00:00.240344235 +232 3 0 days 00:00:00.219105265 +232 4 0 days 00:00:00.176958196 +232 5 0 days 00:00:00.162512540 +232 7 0 days 00:00:00.170285060 +232 8 0 days 00:00:00.235344482 +232 9 0 days 00:00:00.145666104 +232 10 0 days 00:00:00.231445603 +232 11 0 days 00:00:00.158940978 +232 12 0 days 00:00:00.225992565 +232 13 0 days 00:00:00.155188898 +232 14 0 days 00:00:00.165096682 +232 15 0 days 00:00:00.134235393 +232 16 0 days 00:00:00.132023777 +232 17 0 days 00:00:00.199180535 +232 18 0 days 00:00:00.167086178 +232 19 0 days 00:00:00.158555542 +232 20 0 days 00:00:00.227408252 +232 21 0 days 00:00:00.229029573 +232 24 0 days 00:00:00.148698212 +232 25 0 days 00:00:00.181123160 +232 26 0 days 00:00:00.277102712 +232 27 0 days 00:00:00.173530766 +232 28 0 days 00:00:00.149372680 +232 29 0 days 00:00:00.141590986 +232 30 0 days 00:00:00.146719793 +232 31 0 days 00:00:00.232183250 +232 32 0 days 00:00:00.214706013 +232 33 0 days 00:00:00.138242937 +232 34 0 days 00:00:00.134898515 +232 35 0 days 00:00:00.132847261 +232 36 0 days 00:00:00.247894348 +232 37 0 days 00:00:00.176555414 +232 38 0 days 00:00:00.239493698 +232 39 0 days 00:00:00.163752603 +232 40 0 days 00:00:00.136041177 +232 41 0 days 00:00:00.161617360 +232 42 0 days 00:00:00.158616925 +232 43 0 days 00:00:00.143774943 +232 44 0 days 00:00:00.156094955 +232 45 0 days 00:00:00.177897383 +232 46 0 days 00:00:00.253025045 +232 47 0 days 00:00:00.167095206 +232 48 0 days 00:00:00.241784442 +232 49 0 days 00:00:00.226953443 +232 50 0 days 00:00:00.132689650 +232 51 0 days 00:00:00.163982316 +232 52 0 days 00:00:00.165683110 +232 53 0 days 00:00:00.151617217 +232 54 0 days 00:00:00.146029260 +232 55 0 days 00:00:00.159916394 +232 56 0 days 00:00:00.169623796 +232 57 0 days 00:00:00.225405483 +232 58 0 days 00:00:00.133102026 +232 59 0 days 00:00:00.161140594 +232 60 0 days 00:00:00.191074706 +232 61 0 days 00:00:00.143788587 +232 62 0 days 00:00:00.139504296 +232 63 0 days 00:00:00.150044763 +232 64 0 days 00:00:00.142662994 +232 65 0 days 00:00:00.245959823 +232 66 0 days 00:00:00.163655542 +232 67 0 days 00:00:00.229590722 +232 68 0 days 00:00:00.166053196 +232 69 0 days 00:00:00.144072840 +232 70 0 days 00:00:00.152502621 +232 71 0 days 00:00:00.239140855 +232 72 0 days 00:00:00.136494860 +232 73 0 days 00:00:00.230113657 +232 74 0 days 00:00:00.228506126 +232 75 0 days 00:00:00.220760560 +232 76 0 days 00:00:00.132186756 +232 77 0 days 00:00:00.168584950 +232 78 0 days 00:00:00.239736355 +232 79 0 days 00:00:00.140836077 +232 80 0 days 00:00:00.138953607 +232 81 0 days 00:00:00.153090906 +232 82 0 days 00:00:00.163586464 +232 83 0 days 00:00:00.135477301 +232 84 0 days 00:00:00.154866515 +232 85 0 days 00:00:00.182336230 +232 87 0 days 00:00:00.140164513 +232 88 0 days 00:00:00.235275127 +232 90 0 days 00:00:00.230690266 +232 91 0 days 00:00:00.167556225 +232 92 0 days 00:00:00.218700526 +232 93 0 days 00:00:00.223518480 +232 94 0 days 00:00:00.223975144 +232 95 0 days 00:00:00.137803355 +232 96 0 days 00:00:00.156654276 +232 97 0 days 00:00:00.138125907 +232 98 0 days 00:00:00.230826358 +232 99 0 days 00:00:00.191131285 +232 100 0 days 00:00:00.164577950 +233 1 0 days 00:00:00.086306653 +233 2 0 days 00:00:00.125846564 +233 3 0 days 00:00:00.080813963 +233 4 0 days 00:00:00.075485711 +233 5 0 days 00:00:00.125783831 +233 6 0 days 00:00:00.091787086 +233 7 0 days 00:00:00.136708126 +233 8 0 days 00:00:00.124856195 +233 9 0 days 00:00:00.074930088 +233 10 0 days 00:00:00.084493602 +233 11 0 days 00:00:00.127834978 +233 12 0 days 00:00:00.080160894 +233 13 0 days 00:00:00.091986484 +233 14 0 days 00:00:00.078247810 +233 15 0 days 00:00:00.076693748 +233 16 0 days 00:00:00.127952888 +233 17 0 days 00:00:00.140391928 +233 18 0 days 00:00:00.087613890 +233 19 0 days 00:00:00.131879676 +233 20 0 days 00:00:00.105729815 +233 21 0 days 00:00:00.091520520 +233 22 0 days 00:00:00.128283884 +233 23 0 days 00:00:00.097538296 +233 25 0 days 00:00:00.123408333 +233 26 0 days 00:00:00.078916254 +233 27 0 days 00:00:00.091407442 +233 28 0 days 00:00:00.125415704 +233 29 0 days 00:00:00.077390457 +233 30 0 days 00:00:00.129563608 +233 31 0 days 00:00:00.091008828 +233 32 0 days 00:00:00.123204145 +233 33 0 days 00:00:00.071947751 +233 34 0 days 00:00:00.089956908 +233 35 0 days 00:00:00.121088670 +233 36 0 days 00:00:00.082388398 +233 37 0 days 00:00:00.137012680 +233 38 0 days 00:00:00.074672395 +233 39 0 days 00:00:00.131871185 +233 40 0 days 00:00:00.104710571 +233 41 0 days 00:00:00.072727107 +233 42 0 days 00:00:00.098805936 +233 43 0 days 00:00:00.126093225 +233 44 0 days 00:00:00.071934948 +233 45 0 days 00:00:00.122894997 +233 46 0 days 00:00:00.084885150 +233 47 0 days 00:00:00.132255488 +233 48 0 days 00:00:00.092906309 +233 49 0 days 00:00:00.103848995 +233 50 0 days 00:00:00.132514217 +233 51 0 days 00:00:00.096372042 +233 52 0 days 00:00:00.081626578 +233 53 0 days 00:00:00.131731020 +233 54 0 days 00:00:00.074759773 +233 55 0 days 00:00:00.139493326 +233 56 0 days 00:00:00.079313794 +233 57 0 days 00:00:00.073394520 +233 58 0 days 00:00:00.094547494 +233 59 0 days 00:00:00.107804857 +233 60 0 days 00:00:00.122585902 +233 61 0 days 00:00:00.085321351 +233 62 0 days 00:00:00.131820374 +233 63 0 days 00:00:00.076622090 +233 64 0 days 00:00:00.113004446 +233 65 0 days 00:00:00.090638804 +233 66 0 days 00:00:00.091826700 +233 67 0 days 00:00:00.129251145 +233 68 0 days 00:00:00.073475128 +233 69 0 days 00:00:00.092607672 +233 70 0 days 00:00:00.088661664 +233 71 0 days 00:00:00.121916823 +233 72 0 days 00:00:00.078844111 +233 73 0 days 00:00:00.085501936 +233 74 0 days 00:00:00.074672030 +233 75 0 days 00:00:00.126412076 +233 76 0 days 00:00:00.076169738 +233 77 0 days 00:00:00.080286400 +233 78 0 days 00:00:00.086896600 +233 79 0 days 00:00:00.076603160 +233 80 0 days 00:00:00.095457335 +233 81 0 days 00:00:00.131277097 +233 82 0 days 00:00:00.142357830 +233 83 0 days 00:00:00.101093238 +233 84 0 days 00:00:00.073229112 +233 85 0 days 00:00:00.074521925 +233 86 0 days 00:00:00.069050344 +233 87 0 days 00:00:00.078177194 +233 88 0 days 00:00:00.077017384 +233 89 0 days 00:00:00.125747685 +233 90 0 days 00:00:00.083646135 +233 91 0 days 00:00:00.074805720 +233 92 0 days 00:00:00.092056440 +233 93 0 days 00:00:00.092707998 +233 94 0 days 00:00:00.079982172 +233 95 0 days 00:00:00.090326764 +233 96 0 days 00:00:00.089221024 +233 97 0 days 00:00:00.139969588 +233 98 0 days 00:00:00.078005254 +233 99 0 days 00:00:00.099140620 +233 100 0 days 00:00:00.094530715 +234 2 0 days 00:00:00.137135146 +234 4 0 days 00:00:00.227231484 +234 5 0 days 00:00:00.219548471 +234 6 0 days 00:00:00.165909497 +234 8 0 days 00:00:00.228440913 +234 9 0 days 00:00:00.210348125 +234 12 0 days 00:00:00.166956112 +234 13 0 days 00:00:00.148615017 +234 14 0 days 00:00:00.215869380 +234 16 0 days 00:00:00.230364416 +234 19 0 days 00:00:00.230683077 +234 21 0 days 00:00:00.165654369 +234 23 0 days 00:00:00.210944285 +234 24 0 days 00:00:00.224499921 +234 25 0 days 00:00:00.231695271 +234 26 0 days 00:00:00.219827053 +234 28 0 days 00:00:00.227697338 +234 29 0 days 00:00:00.135739710 +234 30 0 days 00:00:00.105017893 +234 36 0 days 00:00:00.217425146 +234 39 0 days 00:00:00.221326686 +234 42 0 days 00:00:00.230701824 +234 43 0 days 00:00:00.147895108 +234 44 0 days 00:00:00.231692489 +234 45 0 days 00:00:00.224913043 +234 46 0 days 00:00:00.169886853 +234 47 0 days 00:00:00.161239811 +234 48 0 days 00:00:00.214160446 +234 49 0 days 00:00:00.136406001 +234 51 0 days 00:00:00.164674909 +234 53 0 days 00:00:00.168291883 +234 54 0 days 00:00:00.146209940 +234 55 0 days 00:00:00.215384671 +234 57 0 days 00:00:00.148773101 +234 58 0 days 00:00:00.221602995 +234 60 0 days 00:00:00.165305662 +234 61 0 days 00:00:00.162261629 +234 63 0 days 00:00:00.161980812 +234 65 0 days 00:00:00.231547794 +234 66 0 days 00:00:00.222796613 +234 67 0 days 00:00:00.164153033 +234 68 0 days 00:00:00.158150270 +234 70 0 days 00:00:00.214247112 +234 71 0 days 00:00:00.167221605 +234 72 0 days 00:00:00.218620255 +234 73 0 days 00:00:00.222243748 +234 74 0 days 00:00:00.165477189 +234 75 0 days 00:00:00.163910552 +234 76 0 days 00:00:00.226793169 +234 80 0 days 00:00:00.166200962 +234 81 0 days 00:00:00.230591915 +234 82 0 days 00:00:00.232364046 +234 83 0 days 00:00:00.216027955 +234 84 0 days 00:00:00.222829994 +234 85 0 days 00:00:00.144952645 +234 86 0 days 00:00:00.167040463 +234 91 0 days 00:00:00.214562928 +234 94 0 days 00:00:00.166585129 +234 98 0 days 00:00:00.127146853 +234 100 0 days 00:00:00.164638156 +235 1 0 days 00:00:00.227662650 +235 5 0 days 00:00:00.229318920 +235 6 0 days 00:00:00.145532532 +235 7 0 days 00:00:00.150550860 +235 8 0 days 00:00:00.124220675 +235 9 0 days 00:00:00.130246813 +235 11 0 days 00:00:00.141113717 +235 12 0 days 00:00:00.167916376 +235 13 0 days 00:00:00.170663796 +235 14 0 days 00:00:00.168096094 +235 15 0 days 00:00:00.169478482 +235 16 0 days 00:00:00.162402235 +235 17 0 days 00:00:00.166871246 +235 18 0 days 00:00:00.221707182 +235 19 0 days 00:00:00.163366722 +235 20 0 days 00:00:00.103988506 +235 21 0 days 00:00:00.104279173 +235 24 0 days 00:00:00.164540866 +235 25 0 days 00:00:00.162721273 +235 27 0 days 00:00:00.135503588 +235 30 0 days 00:00:00.142049738 +235 31 0 days 00:00:00.223063027 +235 32 0 days 00:00:00.133871036 +235 33 0 days 00:00:00.201793243 +235 34 0 days 00:00:00.223165215 +235 36 0 days 00:00:00.164617994 +235 39 0 days 00:00:00.172165046 +235 40 0 days 00:00:00.140689369 +235 43 0 days 00:00:00.159773031 +235 44 0 days 00:00:00.135337609 +235 47 0 days 00:00:00.170896506 +235 48 0 days 00:00:00.126435953 +235 49 0 days 00:00:00.165326174 +235 50 0 days 00:00:00.205364634 +235 51 0 days 00:00:00.125750914 +235 52 0 days 00:00:00.167740160 +235 53 0 days 00:00:00.160908960 +235 54 0 days 00:00:00.223962045 +235 57 0 days 00:00:00.219213582 +235 58 0 days 00:00:00.162896489 +235 59 0 days 00:00:00.162624955 +235 60 0 days 00:00:00.164978854 +235 62 0 days 00:00:00.143630398 +235 63 0 days 00:00:00.142871240 +235 65 0 days 00:00:00.161035856 +235 66 0 days 00:00:00.206439913 +235 69 0 days 00:00:00.214254865 +235 71 0 days 00:00:00.145770551 +235 72 0 days 00:00:00.221150070 +235 73 0 days 00:00:00.185771280 +235 77 0 days 00:00:00.140973210 +235 78 0 days 00:00:00.215798850 +235 79 0 days 00:00:00.113756615 +235 80 0 days 00:00:00.135755934 +235 81 0 days 00:00:00.128769675 +235 82 0 days 00:00:00.142717550 +235 84 0 days 00:00:00.141767327 +235 85 0 days 00:00:00.210543140 +235 86 0 days 00:00:00.214231104 +235 87 0 days 00:00:00.160715631 +235 88 0 days 00:00:00.215319102 +235 90 0 days 00:00:00.191386392 +235 91 0 days 00:00:00.215561220 +235 93 0 days 00:00:00.211885321 +235 96 0 days 00:00:00.193388980 +235 98 0 days 00:00:00.209047831 +236 1 0 days 00:00:00.091792793 +236 4 0 days 00:00:00.083104765 +236 5 0 days 00:00:00.124985829 +236 8 0 days 00:00:00.083578128 +236 9 0 days 00:00:00.091556536 +236 10 0 days 00:00:00.119252017 +236 11 0 days 00:00:00.116283495 +236 12 0 days 00:00:00.083333910 +236 13 0 days 00:00:00.092318820 +236 14 0 days 00:00:00.081387405 +236 17 0 days 00:00:00.078376700 +236 18 0 days 00:00:00.073772011 +236 19 0 days 00:00:00.088536888 +236 20 0 days 00:00:00.114259445 +236 21 0 days 00:00:00.119676040 +236 22 0 days 00:00:00.110628060 +236 24 0 days 00:00:00.075107833 +236 25 0 days 00:00:00.123923041 +236 26 0 days 00:00:00.090926811 +236 27 0 days 00:00:00.121728773 +236 28 0 days 00:00:00.090589382 +236 29 0 days 00:00:00.118089456 +236 33 0 days 00:00:00.125101047 +236 34 0 days 00:00:00.092290073 +236 35 0 days 00:00:00.081306842 +236 37 0 days 00:00:00.091719280 +236 38 0 days 00:00:00.124391045 +236 39 0 days 00:00:00.080846305 +236 40 0 days 00:00:00.116205093 +236 41 0 days 00:00:00.074170620 +236 43 0 days 00:00:00.121063922 +236 44 0 days 00:00:00.125423921 +236 45 0 days 00:00:00.116604006 +236 46 0 days 00:00:00.090651906 +236 47 0 days 00:00:00.081620992 +236 48 0 days 00:00:00.091707833 +236 49 0 days 00:00:00.091727486 +236 51 0 days 00:00:00.116909714 +236 56 0 days 00:00:00.121083851 +236 58 0 days 00:00:00.090475534 +236 59 0 days 00:00:00.122845210 +236 60 0 days 00:00:00.122711004 +236 61 0 days 00:00:00.118913694 +236 62 0 days 00:00:00.088322684 +236 63 0 days 00:00:00.117599488 +236 64 0 days 00:00:00.081715634 +236 65 0 days 00:00:00.089172495 +236 66 0 days 00:00:00.117603151 +236 69 0 days 00:00:00.091501770 +236 70 0 days 00:00:00.081306811 +236 71 0 days 00:00:00.118350653 +236 73 0 days 00:00:00.092066592 +236 75 0 days 00:00:00.093013860 +236 77 0 days 00:00:00.120568112 +236 79 0 days 00:00:00.082003376 +236 83 0 days 00:00:00.116244248 +236 86 0 days 00:00:00.125822254 +236 87 0 days 00:00:00.115448492 +236 88 0 days 00:00:00.081382796 +236 90 0 days 00:00:00.123455070 +236 91 0 days 00:00:00.122730662 +236 93 0 days 00:00:00.122805548 +236 95 0 days 00:00:00.117595350 +236 97 0 days 00:00:00.105655083 +236 98 0 days 00:00:00.110517109 +236 99 0 days 00:00:00.138070518 +236 100 0 days 00:00:00.101271270 +237 1 0 days 00:00:00.072632640 +237 3 0 days 00:00:00.084409365 +237 4 0 days 00:00:00.095776615 +237 8 0 days 00:00:00.119201886 +237 9 0 days 00:00:00.124236911 +237 12 0 days 00:00:00.084109288 +237 13 0 days 00:00:00.095779433 +237 14 0 days 00:00:00.094377442 +237 15 0 days 00:00:00.119365446 +237 16 0 days 00:00:00.079199606 +237 17 0 days 00:00:00.084352443 +237 21 0 days 00:00:00.073947205 +237 22 0 days 00:00:00.100118464 +237 23 0 days 00:00:00.127524505 +237 24 0 days 00:00:00.124999710 +237 26 0 days 00:00:00.098498357 +237 27 0 days 00:00:00.082937865 +237 29 0 days 00:00:00.091293482 +237 30 0 days 00:00:00.120326674 +237 31 0 days 00:00:00.085250416 +237 32 0 days 00:00:00.067952680 +237 33 0 days 00:00:00.083652860 +237 35 0 days 00:00:00.090420045 +237 37 0 days 00:00:00.084395201 +237 38 0 days 00:00:00.096682606 +237 40 0 days 00:00:00.128224461 +237 41 0 days 00:00:00.090876111 +237 43 0 days 00:00:00.094232403 +237 44 0 days 00:00:00.093010813 +237 46 0 days 00:00:00.129602590 +237 48 0 days 00:00:00.120485170 +237 49 0 days 00:00:00.081607209 +237 50 0 days 00:00:00.126964909 +237 52 0 days 00:00:00.091035590 +237 53 0 days 00:00:00.125031346 +237 55 0 days 00:00:00.092517600 +237 56 0 days 00:00:00.094351262 +237 57 0 days 00:00:00.074827186 +237 58 0 days 00:00:00.113709712 +237 59 0 days 00:00:00.067363520 +237 60 0 days 00:00:00.094865490 +237 61 0 days 00:00:00.123814264 +237 62 0 days 00:00:00.061129493 +237 64 0 days 00:00:00.119189462 +237 65 0 days 00:00:00.130331734 +237 67 0 days 00:00:00.083332953 +237 69 0 days 00:00:00.124633055 +237 71 0 days 00:00:00.074533220 +237 74 0 days 00:00:00.119247245 +237 75 0 days 00:00:00.093864520 +237 79 0 days 00:00:00.093093489 +237 80 0 days 00:00:00.109469245 +237 81 0 days 00:00:00.092687557 +237 82 0 days 00:00:00.121659680 +237 83 0 days 00:00:00.092558946 +237 86 0 days 00:00:00.117505260 +237 88 0 days 00:00:00.121274480 +237 90 0 days 00:00:00.117042806 +237 92 0 days 00:00:00.124935021 +237 96 0 days 00:00:00.121406847 +237 97 0 days 00:00:00.094104483 +237 98 0 days 00:00:00.125543386 +237 100 0 days 00:00:00.123046771 +238 1 0 days 00:00:01.274616963 +238 2 0 days 00:00:00.410523546 +238 3 0 days 00:00:00.833642656 +238 4 0 days 00:00:01.164902471 +238 5 0 days 00:00:00.609406408 +238 6 0 days 00:00:00.638738969 +238 7 0 days 00:00:01.170676438 +238 8 0 days 00:00:01.342756148 +238 9 0 days 00:00:00.580817760 +238 10 0 days 00:00:00.707769226 +238 12 0 days 00:00:01.151388693 +238 13 0 days 00:00:01.141369240 +238 14 0 days 00:00:00.515665058 +238 15 0 days 00:00:01.283194946 +238 16 0 days 00:00:00.389242500 +238 17 0 days 00:00:00.610632510 +238 18 0 days 00:00:00.451936494 +238 19 0 days 00:00:00.345501326 +238 20 0 days 00:00:00.580320780 +238 21 0 days 00:00:01.286499651 +238 22 0 days 00:00:01.247188598 +238 23 0 days 00:00:00.558997310 +238 24 0 days 00:00:00.485146910 +238 25 0 days 00:00:00.586014686 +238 27 0 days 00:00:00.795832620 +238 29 0 days 00:00:01.139818247 +238 31 0 days 00:00:01.352808126 +238 33 0 days 00:00:01.234788456 +238 34 0 days 00:00:00.679736338 +238 35 0 days 00:00:00.889615713 +238 36 0 days 00:00:01.060088962 +238 37 0 days 00:00:00.600705970 +238 38 0 days 00:00:00.706683173 +238 39 0 days 00:00:00.351324070 +238 40 0 days 00:00:01.009578307 +238 41 0 days 00:00:00.520914297 +238 42 0 days 00:00:00.411068800 +238 43 0 days 00:00:00.956374014 +238 44 0 days 00:00:00.680417205 +238 46 0 days 00:00:01.165051666 +238 47 0 days 00:00:00.567741481 +238 48 0 days 00:00:00.405070380 +238 49 0 days 00:00:00.453409882 +238 50 0 days 00:00:00.323870120 +238 51 0 days 00:00:00.570287017 +238 52 0 days 00:00:00.334482583 +238 53 0 days 00:00:01.254865168 +238 54 0 days 00:00:00.724769008 +238 55 0 days 00:00:00.929093543 +238 57 0 days 00:00:01.078720423 +238 58 0 days 00:00:00.606837484 +238 59 0 days 00:00:01.192484534 +238 60 0 days 00:00:00.639909067 +238 61 0 days 00:00:00.508867190 +238 63 0 days 00:00:01.075026110 +238 64 0 days 00:00:00.334058074 +238 65 0 days 00:00:00.353305210 +238 66 0 days 00:00:00.720687543 +238 67 0 days 00:00:00.399194550 +238 69 0 days 00:00:00.858910847 +238 70 0 days 00:00:00.473783358 +238 71 0 days 00:00:00.463624758 +238 72 0 days 00:00:00.916238940 +238 73 0 days 00:00:00.946448320 +238 75 0 days 00:00:00.899896508 +238 77 0 days 00:00:00.427511915 +238 78 0 days 00:00:00.466151014 +238 80 0 days 00:00:00.841616146 +238 83 0 days 00:00:00.416565790 +238 84 0 days 00:00:00.465410786 +238 87 0 days 00:00:00.331601300 +238 88 0 days 00:00:00.842186522 +238 89 0 days 00:00:00.490490680 +238 92 0 days 00:00:00.265148473 +238 94 0 days 00:00:00.748004313 +238 96 0 days 00:00:00.481083123 +238 97 0 days 00:00:00.287141089 +238 98 0 days 00:00:00.858444573 +238 99 0 days 00:00:00.297620511 +238 100 0 days 00:00:00.821650791 +239 2 0 days 00:00:00.607387677 +239 4 0 days 00:00:00.485939576 +239 6 0 days 00:00:00.822927170 +239 7 0 days 00:00:00.584199737 +239 8 0 days 00:00:00.884180290 +239 10 0 days 00:00:00.324911595 +239 11 0 days 00:00:00.564949314 +239 12 0 days 00:00:01.015900220 +239 13 0 days 00:00:00.486679381 +239 14 0 days 00:00:00.815943157 +239 15 0 days 00:00:00.867380855 +239 17 0 days 00:00:00.881977742 +239 18 0 days 00:00:00.444013797 +239 19 0 days 00:00:00.273284925 +239 22 0 days 00:00:00.840530517 +239 23 0 days 00:00:01.062564736 +239 24 0 days 00:00:00.871339298 +239 25 0 days 00:00:00.364225522 +239 26 0 days 00:00:00.331550846 +239 27 0 days 00:00:00.498805977 +239 28 0 days 00:00:00.294364836 +239 29 0 days 00:00:00.471501708 +239 30 0 days 00:00:00.311719980 +239 32 0 days 00:00:00.531734711 +239 33 0 days 00:00:00.632984217 +239 34 0 days 00:00:00.568738337 +239 35 0 days 00:00:00.496577368 +239 36 0 days 00:00:00.482483603 +239 37 0 days 00:00:00.307718268 +239 38 0 days 00:00:00.424976729 +239 39 0 days 00:00:00.943707580 +239 40 0 days 00:00:00.906240460 +239 42 0 days 00:00:00.374498207 +239 43 0 days 00:00:00.602127070 +239 44 0 days 00:00:00.637158456 +239 45 0 days 00:00:00.895433086 +239 46 0 days 00:00:00.887430882 +239 47 0 days 00:00:00.586154092 +239 48 0 days 00:00:00.918773589 +239 49 0 days 00:00:00.515274282 +239 50 0 days 00:00:00.504933038 +239 51 0 days 00:00:00.484462116 +239 52 0 days 00:00:00.583921677 +239 53 0 days 00:00:00.387701615 +239 54 0 days 00:00:00.708798653 +239 55 0 days 00:00:00.890467757 +239 56 0 days 00:00:00.875907863 +239 57 0 days 00:00:00.468604449 +239 58 0 days 00:00:00.818608382 +239 59 0 days 00:00:00.889523480 +239 61 0 days 00:00:00.596445802 +239 62 0 days 00:00:00.358101042 +239 63 0 days 00:00:00.897030288 +239 64 0 days 00:00:00.349379310 +239 65 0 days 00:00:00.715190206 +239 66 0 days 00:00:00.517636537 +239 67 0 days 00:00:00.882977823 +239 68 0 days 00:00:00.270746325 +239 69 0 days 00:00:00.645883128 +239 70 0 days 00:00:00.815516555 +239 71 0 days 00:00:00.436686856 +239 72 0 days 00:00:00.520724810 +239 75 0 days 00:00:00.899063398 +239 77 0 days 00:00:00.499336688 +239 78 0 days 00:00:00.328137834 +239 79 0 days 00:00:00.609478244 +239 81 0 days 00:00:00.950759240 +239 82 0 days 00:00:00.888513022 +239 83 0 days 00:00:00.813545280 +239 84 0 days 00:00:00.450658390 +239 85 0 days 00:00:00.270229550 +239 86 0 days 00:00:00.431489086 +239 87 0 days 00:00:00.609709075 +239 89 0 days 00:00:00.820784271 +239 90 0 days 00:00:00.494710295 +239 92 0 days 00:00:00.265920757 +239 93 0 days 00:00:00.397687774 +239 94 0 days 00:00:00.700612757 +239 95 0 days 00:00:00.370308761 +239 97 0 days 00:00:00.320853948 +239 98 0 days 00:00:00.865789089 +239 99 0 days 00:00:00.845055454 +239 100 0 days 00:00:00.845694328 +240 1 0 days 00:00:00.273404506 +240 2 0 days 00:00:00.394600010 +240 3 0 days 00:00:00.386990553 +240 4 0 days 00:00:00.256779527 +240 5 0 days 00:00:00.464309635 +240 6 0 days 00:00:00.456664590 +240 7 0 days 00:00:00.167331144 +240 8 0 days 00:00:00.401808141 +240 9 0 days 00:00:00.213233980 +240 10 0 days 00:00:00.223005821 +240 12 0 days 00:00:00.385728324 +240 13 0 days 00:00:00.259231846 +240 14 0 days 00:00:00.403012813 +240 15 0 days 00:00:00.144716245 +240 16 0 days 00:00:00.194396941 +240 17 0 days 00:00:00.427340380 +240 18 0 days 00:00:00.421362362 +240 19 0 days 00:00:00.424574492 +240 20 0 days 00:00:00.176426255 +240 21 0 days 00:00:00.430988621 +240 22 0 days 00:00:00.232899550 +240 23 0 days 00:00:00.216117336 +240 24 0 days 00:00:00.238030205 +240 25 0 days 00:00:00.419712456 +240 26 0 days 00:00:00.242509316 +240 27 0 days 00:00:00.179824172 +240 28 0 days 00:00:00.292989466 +240 30 0 days 00:00:00.429784673 +240 31 0 days 00:00:00.281979160 +240 33 0 days 00:00:00.244402174 +240 34 0 days 00:00:00.409066553 +240 35 0 days 00:00:00.274207297 +240 36 0 days 00:00:00.155849122 +240 38 0 days 00:00:00.484920088 +240 39 0 days 00:00:00.429606640 +240 40 0 days 00:00:00.206447120 +240 41 0 days 00:00:00.235381673 +240 42 0 days 00:00:00.404706312 +240 43 0 days 00:00:00.252338506 +240 44 0 days 00:00:00.203264593 +240 45 0 days 00:00:00.224867322 +240 46 0 days 00:00:00.289115037 +240 47 0 days 00:00:00.248999784 +240 48 0 days 00:00:00.426493562 +240 49 0 days 00:00:00.239926016 +240 51 0 days 00:00:00.439682493 +240 52 0 days 00:00:00.253421849 +240 53 0 days 00:00:00.225841560 +240 54 0 days 00:00:00.424159550 +240 56 0 days 00:00:00.177752246 +240 58 0 days 00:00:00.251989920 +240 59 0 days 00:00:00.236426070 +240 60 0 days 00:00:00.160009350 +240 61 0 days 00:00:00.424001613 +240 62 0 days 00:00:00.238613595 +240 63 0 days 00:00:00.382407220 +240 64 0 days 00:00:00.423449028 +240 65 0 days 00:00:00.335846915 +240 66 0 days 00:00:00.416216038 +240 68 0 days 00:00:00.234194944 +240 69 0 days 00:00:00.337401800 +240 70 0 days 00:00:00.244334168 +240 71 0 days 00:00:00.266697284 +240 72 0 days 00:00:00.211414676 +240 73 0 days 00:00:00.308173951 +240 74 0 days 00:00:00.174422029 +240 75 0 days 00:00:00.412579352 +240 76 0 days 00:00:00.476186161 +240 77 0 days 00:00:00.439867384 +240 78 0 days 00:00:00.272566198 +240 79 0 days 00:00:00.225483345 +240 80 0 days 00:00:00.440473135 +240 83 0 days 00:00:00.251820940 +240 84 0 days 00:00:00.532551690 +240 86 0 days 00:00:00.291139508 +240 87 0 days 00:00:00.248428390 +240 88 0 days 00:00:00.475194440 +240 89 0 days 00:00:00.271729484 +240 90 0 days 00:00:00.418828632 +240 91 0 days 00:00:00.147990258 +240 92 0 days 00:00:00.277832474 +240 93 0 days 00:00:00.257769941 +240 94 0 days 00:00:00.159098450 +240 95 0 days 00:00:00.406019240 +240 96 0 days 00:00:00.328187694 +240 97 0 days 00:00:00.249820517 +240 98 0 days 00:00:00.168238880 +240 99 0 days 00:00:00.279170487 +240 100 0 days 00:00:00.282799834 +241 1 0 days 00:00:00.139952760 +241 3 0 days 00:00:00.252178010 +241 4 0 days 00:00:00.450373521 +241 5 0 days 00:00:00.164347983 +241 6 0 days 00:00:00.413972262 +241 7 0 days 00:00:00.247039451 +241 8 0 days 00:00:00.510002447 +241 9 0 days 00:00:00.272925941 +241 10 0 days 00:00:00.462571805 +241 11 0 days 00:00:00.456793887 +241 12 0 days 00:00:00.212221103 +241 13 0 days 00:00:00.155510763 +241 14 0 days 00:00:00.292498634 +241 15 0 days 00:00:00.456051338 +241 17 0 days 00:00:00.469077301 +241 18 0 days 00:00:00.445666575 +241 19 0 days 00:00:00.248966426 +241 20 0 days 00:00:00.230975252 +241 21 0 days 00:00:00.431311820 +241 22 0 days 00:00:00.295203900 +241 23 0 days 00:00:00.451447520 +241 25 0 days 00:00:00.261727562 +241 27 0 days 00:00:00.142987057 +241 28 0 days 00:00:00.216173667 +241 29 0 days 00:00:00.156474030 +241 30 0 days 00:00:00.385375844 +241 31 0 days 00:00:00.230681723 +241 32 0 days 00:00:00.290976185 +241 33 0 days 00:00:00.224079751 +241 34 0 days 00:00:00.227608173 +241 35 0 days 00:00:00.248343963 +241 37 0 days 00:00:00.174875165 +241 39 0 days 00:00:00.413509306 +241 40 0 days 00:00:00.242513554 +241 41 0 days 00:00:00.364774591 +241 42 0 days 00:00:00.450185450 +241 43 0 days 00:00:00.265115102 +241 44 0 days 00:00:00.453976141 +241 45 0 days 00:00:00.276976697 +241 46 0 days 00:00:00.165185184 +241 47 0 days 00:00:00.244742234 +241 52 0 days 00:00:00.459888777 +241 53 0 days 00:00:00.269786125 +241 54 0 days 00:00:00.558508651 +241 55 0 days 00:00:00.431181068 +241 57 0 days 00:00:00.496670905 +241 58 0 days 00:00:00.248529385 +241 59 0 days 00:00:00.315449850 +241 60 0 days 00:00:00.294097025 +241 63 0 days 00:00:00.201036310 +241 64 0 days 00:00:00.289027488 +241 65 0 days 00:00:00.232848300 +241 67 0 days 00:00:00.204243072 +241 68 0 days 00:00:00.457172173 +241 69 0 days 00:00:00.410615463 +241 70 0 days 00:00:00.270068293 +241 74 0 days 00:00:00.362415843 +241 75 0 days 00:00:00.434698082 +241 76 0 days 00:00:00.152611916 +241 77 0 days 00:00:00.251270184 +241 78 0 days 00:00:00.340301164 +241 79 0 days 00:00:00.456297405 +241 80 0 days 00:00:00.461578553 +241 81 0 days 00:00:00.445351327 +241 82 0 days 00:00:00.372087015 +241 83 0 days 00:00:00.434016357 +241 84 0 days 00:00:00.253651341 +241 85 0 days 00:00:00.182757141 +241 86 0 days 00:00:00.178224812 +241 87 0 days 00:00:00.188689288 +241 88 0 days 00:00:00.136621850 +241 89 0 days 00:00:00.207628174 +241 90 0 days 00:00:00.152283985 +241 91 0 days 00:00:00.304738602 +241 93 0 days 00:00:00.427841108 +241 94 0 days 00:00:00.237469066 +241 95 0 days 00:00:00.238386275 +241 97 0 days 00:00:00.358110165 +241 100 0 days 00:00:00.187613309 +242 1 0 days 00:00:01.015706875 +242 2 0 days 00:00:00.988629290 +242 3 0 days 00:00:00.426520610 +242 4 0 days 00:00:00.430351764 +242 5 0 days 00:00:01.107117256 +242 6 0 days 00:00:00.676146610 +242 7 0 days 00:00:01.114007880 +242 8 0 days 00:00:00.338597686 +242 9 0 days 00:00:01.089896905 +242 10 0 days 00:00:00.330563373 +242 11 0 days 00:00:00.301860733 +242 12 0 days 00:00:00.609280196 +242 13 0 days 00:00:00.724752226 +242 14 0 days 00:00:00.523863305 +242 15 0 days 00:00:01.221323612 +242 16 0 days 00:00:01.085265545 +242 17 0 days 00:00:00.958453250 +242 18 0 days 00:00:00.506089352 +242 19 0 days 00:00:00.533760200 +242 20 0 days 00:00:00.471984100 +242 21 0 days 00:00:01.177113300 +242 22 0 days 00:00:00.371809756 +242 23 0 days 00:00:00.340329860 +242 24 0 days 00:00:01.088039576 +242 25 0 days 00:00:00.846272871 +242 26 0 days 00:00:01.028332300 +242 27 0 days 00:00:00.590568276 +242 28 0 days 00:00:01.051484340 +242 29 0 days 00:00:00.329402112 +242 30 0 days 00:00:00.409444572 +242 31 0 days 00:00:01.056036500 +242 32 0 days 00:00:01.012624995 +242 33 0 days 00:00:00.753481316 +242 34 0 days 00:00:00.631588464 +242 35 0 days 00:00:01.081254756 +242 36 0 days 00:00:01.143050675 +242 37 0 days 00:00:00.429180235 +242 38 0 days 00:00:00.309107310 +242 39 0 days 00:00:00.682396836 +242 40 0 days 00:00:00.340108935 +242 41 0 days 00:00:00.590363960 +242 42 0 days 00:00:01.029988380 +242 43 0 days 00:00:00.381545716 +242 44 0 days 00:00:01.075136555 +242 45 0 days 00:00:00.392488032 +242 46 0 days 00:00:00.325166132 +242 47 0 days 00:00:00.622979592 +242 48 0 days 00:00:00.659289070 +242 49 0 days 00:00:01.018956844 +242 50 0 days 00:00:01.177401615 +242 51 0 days 00:00:00.539358168 +242 52 0 days 00:00:00.359655764 +242 53 0 days 00:00:01.086884088 +242 54 0 days 00:00:00.387603606 +242 55 0 days 00:00:00.585448093 +242 56 0 days 00:00:00.409704595 +242 57 0 days 00:00:00.614515948 +242 58 0 days 00:00:00.796458500 +242 59 0 days 00:00:00.985479460 +242 60 0 days 00:00:00.571821570 +242 61 0 days 00:00:00.534936948 +242 62 0 days 00:00:00.335570453 +242 63 0 days 00:00:01.275737845 +242 64 0 days 00:00:00.294451872 +242 65 0 days 00:00:00.394148416 +242 66 0 days 00:00:00.351371905 +242 67 0 days 00:00:00.364700168 +242 68 0 days 00:00:00.547103233 +242 69 0 days 00:00:00.341279960 +242 70 0 days 00:00:00.954127375 +242 71 0 days 00:00:01.000415220 +242 72 0 days 00:00:01.000634915 +242 73 0 days 00:00:00.660885580 +242 74 0 days 00:00:00.586968546 +242 75 0 days 00:00:00.370989785 +242 76 0 days 00:00:00.444318033 +242 77 0 days 00:00:00.496500928 +242 78 0 days 00:00:01.042015535 +242 79 0 days 00:00:01.009222685 +242 80 0 days 00:00:01.005481400 +242 81 0 days 00:00:00.518115100 +242 82 0 days 00:00:00.640942946 +242 83 0 days 00:00:00.639840225 +242 84 0 days 00:00:00.306912845 +242 85 0 days 00:00:00.573948848 +242 86 0 days 00:00:00.358111225 +242 87 0 days 00:00:00.414745635 +242 88 0 days 00:00:01.108165563 +242 89 0 days 00:00:00.720904140 +242 90 0 days 00:00:00.624909897 +242 91 0 days 00:00:00.566434735 +242 92 0 days 00:00:00.505985755 +242 93 0 days 00:00:00.373568854 +242 94 0 days 00:00:00.877595665 +242 95 0 days 00:00:00.553518901 +242 96 0 days 00:00:00.929874650 +242 97 0 days 00:00:00.582851375 +242 98 0 days 00:00:00.705346673 +242 99 0 days 00:00:01.031616988 +242 100 0 days 00:00:00.320946970 +243 1 0 days 00:00:00.623809803 +243 2 0 days 00:00:00.554353976 +243 3 0 days 00:00:00.164658315 +243 4 0 days 00:00:00.481897615 +243 5 0 days 00:00:00.461820145 +243 6 0 days 00:00:00.177513624 +243 7 0 days 00:00:00.184078085 +243 8 0 days 00:00:00.470526365 +243 9 0 days 00:00:00.492972705 +243 10 0 days 00:00:00.155483780 +243 11 0 days 00:00:00.428661355 +243 12 0 days 00:00:00.253616115 +243 13 0 days 00:00:00.466922152 +243 14 0 days 00:00:00.508244555 +243 15 0 days 00:00:00.239524512 +243 16 0 days 00:00:00.302725660 +243 17 0 days 00:00:00.585775035 +243 18 0 days 00:00:00.132128305 +243 19 0 days 00:00:00.239817372 +243 20 0 days 00:00:00.234973010 +243 21 0 days 00:00:00.287960028 +243 22 0 days 00:00:00.138721330 +243 23 0 days 00:00:00.516252815 +243 24 0 days 00:00:00.359968257 +243 25 0 days 00:00:00.160748000 +243 26 0 days 00:00:00.530062144 +243 27 0 days 00:00:00.190796756 +243 28 0 days 00:00:00.201631525 +243 29 0 days 00:00:00.284467208 +243 30 0 days 00:00:00.245603430 +243 31 0 days 00:00:00.169372712 +243 32 0 days 00:00:00.266445296 +243 33 0 days 00:00:00.472228550 +243 34 0 days 00:00:00.432729620 +243 35 0 days 00:00:00.285018936 +243 36 0 days 00:00:00.482658335 +243 37 0 days 00:00:00.459628884 +243 38 0 days 00:00:00.414033800 +243 39 0 days 00:00:00.260503943 +243 40 0 days 00:00:00.548386370 +243 41 0 days 00:00:00.493687610 +243 42 0 days 00:00:00.156575920 +243 43 0 days 00:00:00.581045128 +243 44 0 days 00:00:00.185956570 +243 45 0 days 00:00:00.397715173 +243 46 0 days 00:00:00.203879524 +243 47 0 days 00:00:00.158726285 +243 48 0 days 00:00:00.182515253 +243 49 0 days 00:00:00.320113910 +243 50 0 days 00:00:00.375391800 +243 51 0 days 00:00:00.171617868 +243 52 0 days 00:00:00.329178874 +243 53 0 days 00:00:00.310953735 +243 54 0 days 00:00:00.286619324 +243 55 0 days 00:00:00.433441416 +243 56 0 days 00:00:00.165302696 +243 57 0 days 00:00:00.452218144 +243 58 0 days 00:00:00.144018290 +243 59 0 days 00:00:00.236787080 +243 60 0 days 00:00:00.323684475 +243 61 0 days 00:00:00.286935785 +243 62 0 days 00:00:00.332680312 +243 63 0 days 00:00:00.175982793 +243 64 0 days 00:00:00.247878120 +243 65 0 days 00:00:00.247920645 +243 66 0 days 00:00:00.158810704 +243 67 0 days 00:00:00.316985228 +243 68 0 days 00:00:00.288822152 +243 69 0 days 00:00:00.592025356 +243 70 0 days 00:00:00.247031340 +243 71 0 days 00:00:00.390326666 +243 72 0 days 00:00:00.305645896 +243 73 0 days 00:00:00.146391250 +243 74 0 days 00:00:00.213086760 +243 75 0 days 00:00:00.552974500 +243 76 0 days 00:00:00.257421828 +243 77 0 days 00:00:00.162584280 +243 78 0 days 00:00:00.252892255 +243 79 0 days 00:00:00.523851405 +243 80 0 days 00:00:00.559644055 +243 81 0 days 00:00:00.324269457 +243 82 0 days 00:00:00.601558220 +243 83 0 days 00:00:00.241594700 +243 84 0 days 00:00:00.284995985 +243 85 0 days 00:00:00.496358656 +243 86 0 days 00:00:00.460677920 +243 87 0 days 00:00:00.157407415 +243 88 0 days 00:00:00.266690592 +243 89 0 days 00:00:00.460422260 +243 90 0 days 00:00:00.314540388 +243 91 0 days 00:00:00.438875830 +243 92 0 days 00:00:00.441771250 +243 93 0 days 00:00:00.142923480 +243 94 0 days 00:00:00.155254464 +243 95 0 days 00:00:00.286565485 +243 96 0 days 00:00:00.439040236 +243 97 0 days 00:00:00.248447145 +243 98 0 days 00:00:00.501005195 +243 99 0 days 00:00:00.149438440 +243 100 0 days 00:00:00.273638485 +244 1 0 days 00:00:00.497113062 +244 2 0 days 00:00:00.520861330 +244 3 0 days 00:00:00.487580330 +244 4 0 days 00:00:00.512166216 +244 5 0 days 00:00:00.905799932 +244 6 0 days 00:00:00.497646377 +244 7 0 days 00:00:00.471781208 +244 8 0 days 00:00:00.799654730 +244 9 0 days 00:00:00.300008792 +244 10 0 days 00:00:00.507243490 +244 11 0 days 00:00:00.777883980 +244 12 0 days 00:00:00.296157588 +244 13 0 days 00:00:00.305925735 +244 14 0 days 00:00:00.858932072 +244 15 0 days 00:00:00.873541344 +244 16 0 days 00:00:00.478480900 +244 17 0 days 00:00:00.452296870 +244 18 0 days 00:00:00.507828388 +244 19 0 days 00:00:00.694982685 +244 20 0 days 00:00:00.460657040 +244 21 0 days 00:00:00.424509505 +244 22 0 days 00:00:00.700340148 +244 23 0 days 00:00:00.503701765 +244 24 0 days 00:00:00.390052328 +244 25 0 days 00:00:00.863807130 +244 26 0 days 00:00:00.461713340 +244 27 0 days 00:00:00.338202240 +244 28 0 days 00:00:00.291655721 +244 29 0 days 00:00:00.295296045 +244 30 0 days 00:00:00.473928434 +244 31 0 days 00:00:00.369497770 +244 32 0 days 00:00:00.798916816 +244 33 0 days 00:00:00.902576040 +244 34 0 days 00:00:00.718649813 +244 35 0 days 00:00:00.300494276 +244 36 0 days 00:00:00.354581991 +244 37 0 days 00:00:00.910518844 +244 38 0 days 00:00:00.838754722 +244 39 0 days 00:00:00.294458137 +244 40 0 days 00:00:00.451807476 +244 41 0 days 00:00:00.546171644 +244 42 0 days 00:00:00.506986765 +244 43 0 days 00:00:00.504385190 +244 44 0 days 00:00:00.362084132 +244 45 0 days 00:00:00.508995512 +244 46 0 days 00:00:00.464790752 +244 47 0 days 00:00:00.518255760 +244 48 0 days 00:00:00.474995122 +244 49 0 days 00:00:00.324908188 +244 50 0 days 00:00:00.888286050 +244 51 0 days 00:00:00.824626115 +244 52 0 days 00:00:00.829952710 +244 53 0 days 00:00:00.413859633 +244 54 0 days 00:00:00.269706151 +244 55 0 days 00:00:00.463892290 +244 56 0 days 00:00:00.843800220 +244 57 0 days 00:00:00.562296676 +244 58 0 days 00:00:00.306850038 +244 59 0 days 00:00:00.384261132 +244 60 0 days 00:00:00.832485375 +244 61 0 days 00:00:00.910795584 +244 62 0 days 00:00:00.827210400 +244 63 0 days 00:00:00.505083904 +244 64 0 days 00:00:00.308416936 +244 65 0 days 00:00:00.499171281 +244 66 0 days 00:00:00.287195964 +244 67 0 days 00:00:00.749004765 +244 68 0 days 00:00:00.288387595 +244 69 0 days 00:00:00.265862463 +244 70 0 days 00:00:00.273013470 +244 71 0 days 00:00:00.311271028 +244 72 0 days 00:00:00.796083275 +244 73 0 days 00:00:00.857530632 +244 74 0 days 00:00:00.291305014 +244 75 0 days 00:00:00.512112260 +244 76 0 days 00:00:00.288495874 +244 77 0 days 00:00:00.856250885 +244 78 0 days 00:00:00.487117369 +244 79 0 days 00:00:00.857453973 +244 80 0 days 00:00:00.864943116 +244 81 0 days 00:00:00.301130781 +244 82 0 days 00:00:00.325795616 +244 83 0 days 00:00:00.510696949 +244 84 0 days 00:00:00.296407264 +244 85 0 days 00:00:00.267714142 +244 86 0 days 00:00:00.886285194 +244 87 0 days 00:00:00.374160453 +244 88 0 days 00:00:00.280204711 +244 89 0 days 00:00:00.304593777 +244 90 0 days 00:00:00.505376235 +244 91 0 days 00:00:00.518474920 +244 92 0 days 00:00:00.484346828 +244 93 0 days 00:00:00.517149085 +244 94 0 days 00:00:00.500273415 +244 95 0 days 00:00:00.867991747 +244 96 0 days 00:00:00.491386315 +244 97 0 days 00:00:00.294417293 +244 98 0 days 00:00:00.287370602 +244 99 0 days 00:00:00.507214877 +244 100 0 days 00:00:00.852861851 +245 1 0 days 00:00:00.249106084 +245 2 0 days 00:00:00.245822088 +245 3 0 days 00:00:00.168184360 +245 4 0 days 00:00:00.402514788 +245 5 0 days 00:00:00.152522031 +245 6 0 days 00:00:00.163459500 +245 7 0 days 00:00:00.146202151 +245 8 0 days 00:00:00.150768958 +245 9 0 days 00:00:00.156885237 +245 10 0 days 00:00:00.399633795 +245 11 0 days 00:00:00.412346068 +245 12 0 days 00:00:00.257268220 +245 13 0 days 00:00:00.252725752 +245 14 0 days 00:00:00.150947144 +245 15 0 days 00:00:00.174626215 +245 16 0 days 00:00:00.252457202 +245 17 0 days 00:00:00.329254870 +245 18 0 days 00:00:00.243451706 +245 19 0 days 00:00:00.248624036 +245 20 0 days 00:00:00.408660015 +245 21 0 days 00:00:00.156537115 +245 22 0 days 00:00:00.148268300 +245 23 0 days 00:00:00.269847155 +245 24 0 days 00:00:00.427563155 +245 25 0 days 00:00:00.252202396 +245 26 0 days 00:00:00.406974224 +245 27 0 days 00:00:00.148386941 +245 28 0 days 00:00:00.201296933 +245 29 0 days 00:00:00.225097756 +245 30 0 days 00:00:00.247557005 +245 31 0 days 00:00:00.257644348 +245 32 0 days 00:00:00.153389294 +245 33 0 days 00:00:00.148858936 +245 34 0 days 00:00:00.238609852 +245 35 0 days 00:00:00.136120311 +245 36 0 days 00:00:00.160596207 +245 37 0 days 00:00:00.259932817 +245 38 0 days 00:00:00.201424568 +245 39 0 days 00:00:00.411645560 +245 40 0 days 00:00:00.379925265 +245 41 0 days 00:00:00.158846324 +245 42 0 days 00:00:00.400614770 +245 43 0 days 00:00:00.221683336 +245 44 0 days 00:00:00.384063905 +245 45 0 days 00:00:00.403567784 +245 46 0 days 00:00:00.250766412 +245 47 0 days 00:00:00.408434905 +245 48 0 days 00:00:00.141386760 +245 49 0 days 00:00:00.454839233 +245 50 0 days 00:00:00.415061843 +245 51 0 days 00:00:00.270988086 +245 52 0 days 00:00:00.133686576 +245 53 0 days 00:00:00.152924409 +245 54 0 days 00:00:00.153181301 +245 55 0 days 00:00:00.388110360 +245 56 0 days 00:00:00.399223520 +245 57 0 days 00:00:00.406507016 +245 58 0 days 00:00:00.142394317 +245 59 0 days 00:00:00.236640373 +245 60 0 days 00:00:00.149025898 +245 61 0 days 00:00:00.200362580 +245 62 0 days 00:00:00.284918372 +245 63 0 days 00:00:00.449568140 +245 64 0 days 00:00:00.389043475 +245 65 0 days 00:00:00.233939648 +245 66 0 days 00:00:00.176436004 +245 67 0 days 00:00:00.247707445 +245 68 0 days 00:00:00.248085208 +245 69 0 days 00:00:00.257186561 +245 70 0 days 00:00:00.202533732 +245 71 0 days 00:00:00.156033757 +245 72 0 days 00:00:00.155047698 +245 73 0 days 00:00:00.437422028 +245 74 0 days 00:00:00.272750552 +245 75 0 days 00:00:00.192199132 +245 76 0 days 00:00:00.150072382 +245 77 0 days 00:00:00.258947250 +245 78 0 days 00:00:00.452403204 +245 79 0 days 00:00:00.412207190 +245 80 0 days 00:00:00.179576425 +245 81 0 days 00:00:00.247903612 +245 82 0 days 00:00:00.255464947 +245 83 0 days 00:00:00.155833291 +245 84 0 days 00:00:00.170108015 +245 85 0 days 00:00:00.408197960 +245 86 0 days 00:00:00.177084907 +245 87 0 days 00:00:00.181731208 +245 88 0 days 00:00:00.385673690 +245 89 0 days 00:00:00.442174420 +245 90 0 days 00:00:00.202096386 +245 91 0 days 00:00:00.418077062 +245 92 0 days 00:00:00.248805430 +245 93 0 days 00:00:00.152017536 +245 94 0 days 00:00:00.178147312 +245 95 0 days 00:00:00.264246470 +245 96 0 days 00:00:00.158844820 +245 97 0 days 00:00:00.176886872 +245 98 0 days 00:00:00.157105846 +245 99 0 days 00:00:00.159938388 +245 100 0 days 00:00:00.238663870 +246 1 0 days 00:00:00.216269891 +246 2 0 days 00:00:00.128665988 +246 3 0 days 00:00:00.128471595 +246 4 0 days 00:00:00.140455650 +246 5 0 days 00:00:00.166311842 +246 6 0 days 00:00:00.204243140 +246 7 0 days 00:00:00.133795560 +246 8 0 days 00:00:00.127468153 +246 9 0 days 00:00:00.134430163 +246 10 0 days 00:00:00.167370420 +246 11 0 days 00:00:00.136849852 +246 12 0 days 00:00:00.140097985 +246 13 0 days 00:00:00.131417232 +246 14 0 days 00:00:00.121651310 +246 15 0 days 00:00:00.148408370 +246 16 0 days 00:00:00.119551568 +246 17 0 days 00:00:00.118145272 +246 18 0 days 00:00:00.136498060 +246 19 0 days 00:00:00.142530540 +246 20 0 days 00:00:00.153303592 +246 21 0 days 00:00:00.127597391 +246 22 0 days 00:00:00.140040340 +246 23 0 days 00:00:00.148770315 +246 24 0 days 00:00:00.114475364 +246 25 0 days 00:00:00.206093916 +246 26 0 days 00:00:00.146092970 +246 27 0 days 00:00:00.182908495 +246 28 0 days 00:00:00.121976790 +246 29 0 days 00:00:00.183467715 +246 30 0 days 00:00:00.125740523 +246 31 0 days 00:00:00.203138024 +246 32 0 days 00:00:00.150544436 +246 33 0 days 00:00:00.150286813 +246 34 0 days 00:00:00.129407620 +246 35 0 days 00:00:00.202307864 +246 36 0 days 00:00:00.143211345 +246 37 0 days 00:00:00.200373184 +246 38 0 days 00:00:00.132665620 +246 39 0 days 00:00:00.152122775 +246 40 0 days 00:00:00.154090020 +246 41 0 days 00:00:00.196084552 +246 42 0 days 00:00:00.203185086 +246 43 0 days 00:00:00.151304972 +246 44 0 days 00:00:00.136698554 +246 46 0 days 00:00:00.193468185 +246 47 0 days 00:00:00.127156835 +246 48 0 days 00:00:00.130744503 +246 49 0 days 00:00:00.131470970 +246 50 0 days 00:00:00.172591240 +246 51 0 days 00:00:00.183394135 +246 52 0 days 00:00:00.151268080 +246 53 0 days 00:00:00.147611626 +246 54 0 days 00:00:00.139422068 +246 55 0 days 00:00:00.158158696 +246 56 0 days 00:00:00.137894706 +246 57 0 days 00:00:00.217217048 +246 58 0 days 00:00:00.116490192 +246 59 0 days 00:00:00.153874715 +246 60 0 days 00:00:00.135454252 +246 61 0 days 00:00:00.182914955 +246 62 0 days 00:00:00.170813450 +246 63 0 days 00:00:00.155450705 +246 64 0 days 00:00:00.134890630 +246 65 0 days 00:00:00.126822685 +246 66 0 days 00:00:00.152646055 +246 67 0 days 00:00:00.113020505 +246 68 0 days 00:00:00.158991814 +246 69 0 days 00:00:00.125947990 +246 70 0 days 00:00:00.200190308 +246 71 0 days 00:00:00.126290297 +246 72 0 days 00:00:00.200698092 +246 73 0 days 00:00:00.210461214 +246 74 0 days 00:00:00.193792960 +246 75 0 days 00:00:00.116952944 +246 76 0 days 00:00:00.191632810 +246 77 0 days 00:00:00.126331976 +246 78 0 days 00:00:00.115935900 +246 79 0 days 00:00:00.150738940 +246 80 0 days 00:00:00.188249335 +246 81 0 days 00:00:00.117190576 +246 82 0 days 00:00:00.115529733 +246 83 0 days 00:00:00.137423755 +246 84 0 days 00:00:00.153491077 +246 85 0 days 00:00:00.125499828 +246 86 0 days 00:00:00.140817340 +246 87 0 days 00:00:00.144733813 +246 88 0 days 00:00:00.161555280 +246 89 0 days 00:00:00.123815063 +246 90 0 days 00:00:00.127597340 +246 91 0 days 00:00:00.119218695 +246 92 0 days 00:00:00.143413405 +246 93 0 days 00:00:00.127012160 +246 94 0 days 00:00:00.136150520 +246 95 0 days 00:00:00.123205513 +246 96 0 days 00:00:00.136914325 +246 97 0 days 00:00:00.203405188 +246 98 0 days 00:00:00.189902015 +246 99 0 days 00:00:00.150308883 +246 100 0 days 00:00:00.137952145 +247 1 0 days 00:00:00.122061660 +247 2 0 days 00:00:00.134017376 +247 3 0 days 00:00:00.208236676 +247 4 0 days 00:00:00.121914316 +247 5 0 days 00:00:00.130281080 +247 6 0 days 00:00:00.186284230 +247 7 0 days 00:00:00.202692435 +247 8 0 days 00:00:00.180601480 +247 9 0 days 00:00:00.239789000 +247 10 0 days 00:00:00.188562480 +247 11 0 days 00:00:00.140713045 +247 12 0 days 00:00:00.204055116 +247 13 0 days 00:00:00.191344810 +247 14 0 days 00:00:00.145721555 +247 15 0 days 00:00:00.119325152 +247 16 0 days 00:00:00.131616398 +247 17 0 days 00:00:00.135632896 +247 18 0 days 00:00:00.124904876 +247 19 0 days 00:00:00.143439960 +247 20 0 days 00:00:00.137521011 +247 21 0 days 00:00:00.133188625 +247 22 0 days 00:00:00.104005640 +247 23 0 days 00:00:00.126455900 +247 24 0 days 00:00:00.201259584 +247 25 0 days 00:00:00.125263380 +247 26 0 days 00:00:00.203964800 +247 27 0 days 00:00:00.204574360 +247 28 0 days 00:00:00.124215780 +247 29 0 days 00:00:00.139300514 +247 30 0 days 00:00:00.200612768 +247 31 0 days 00:00:00.140633520 +247 32 0 days 00:00:00.199249720 +247 33 0 days 00:00:00.158819967 +247 34 0 days 00:00:00.158027080 +247 35 0 days 00:00:00.133815073 +247 36 0 days 00:00:00.227312920 +247 37 0 days 00:00:00.129258300 +247 38 0 days 00:00:00.127047333 +247 39 0 days 00:00:00.155982114 +247 40 0 days 00:00:00.117191104 +247 41 0 days 00:00:00.156838823 +247 42 0 days 00:00:00.162012584 +247 43 0 days 00:00:00.142915915 +247 44 0 days 00:00:00.169826040 +247 45 0 days 00:00:00.153627540 +247 46 0 days 00:00:00.184557960 +247 47 0 days 00:00:00.138991470 +247 48 0 days 00:00:00.197665095 +247 49 0 days 00:00:00.219408085 +247 50 0 days 00:00:00.171666973 +247 51 0 days 00:00:00.120737736 +247 52 0 days 00:00:00.139432022 +247 53 0 days 00:00:00.125765260 +247 54 0 days 00:00:00.210618246 +247 55 0 days 00:00:00.158916475 +247 56 0 days 00:00:00.213224026 +247 57 0 days 00:00:00.134210643 +247 58 0 days 00:00:00.159760568 +247 59 0 days 00:00:00.166189990 +247 60 0 days 00:00:00.156301968 +247 61 0 days 00:00:00.127416522 +247 62 0 days 00:00:00.200210628 +247 63 0 days 00:00:00.173428466 +247 64 0 days 00:00:00.127235890 +247 65 0 days 00:00:00.164821647 +247 66 0 days 00:00:00.135650785 +247 67 0 days 00:00:00.131838338 +247 68 0 days 00:00:00.124499751 +247 69 0 days 00:00:00.120502640 +247 70 0 days 00:00:00.149782040 +247 71 0 days 00:00:00.193321975 +247 72 0 days 00:00:00.201509804 +247 73 0 days 00:00:00.155102846 +247 74 0 days 00:00:00.190971080 +247 75 0 days 00:00:00.186583840 +247 76 0 days 00:00:00.185772090 +247 77 0 days 00:00:00.153877850 +247 78 0 days 00:00:00.186097345 +247 79 0 days 00:00:00.196584590 +247 80 0 days 00:00:00.186649155 +247 81 0 days 00:00:00.123071948 +247 82 0 days 00:00:00.164040916 +247 83 0 days 00:00:00.127290920 +247 84 0 days 00:00:00.140161265 +247 85 0 days 00:00:00.194981495 +247 86 0 days 00:00:00.133513810 +247 87 0 days 00:00:00.145577488 +247 88 0 days 00:00:00.145822548 +247 89 0 days 00:00:00.119421500 +247 90 0 days 00:00:00.170675346 +247 91 0 days 00:00:00.123132285 +247 92 0 days 00:00:00.218799317 +247 93 0 days 00:00:00.121818611 +247 94 0 days 00:00:00.218710317 +247 95 0 days 00:00:00.136246938 +247 96 0 days 00:00:00.102803593 +247 97 0 days 00:00:00.217844680 +247 98 0 days 00:00:00.190634525 +247 99 0 days 00:00:00.138333808 +247 100 0 days 00:00:00.115826445 +248 1 0 days 00:00:00.088448854 +248 2 0 days 00:00:00.077178415 +248 3 0 days 00:00:00.108371865 +248 4 0 days 00:00:00.069376096 +248 5 0 days 00:00:00.082458780 +248 6 0 days 00:00:00.115651740 +248 7 0 days 00:00:00.075235390 +248 8 0 days 00:00:00.068040824 +248 9 0 days 00:00:00.113315540 +248 10 0 days 00:00:00.064905066 +248 11 0 days 00:00:00.128659552 +248 12 0 days 00:00:00.092401864 +248 13 0 days 00:00:00.115691775 +248 14 0 days 00:00:00.068284296 +248 15 0 days 00:00:00.074014930 +248 16 0 days 00:00:00.065171733 +248 17 0 days 00:00:00.111282355 +248 18 0 days 00:00:00.110699420 +248 19 0 days 00:00:00.086650332 +248 20 0 days 00:00:00.086784651 +248 21 0 days 00:00:00.106825375 +248 22 0 days 00:00:00.110029735 +248 23 0 days 00:00:00.068727020 +248 24 0 days 00:00:00.077716195 +248 25 0 days 00:00:00.068482820 +248 26 0 days 00:00:00.124647288 +248 27 0 days 00:00:00.084465442 +248 28 0 days 00:00:00.079608036 +248 29 0 days 00:00:00.084396460 +248 30 0 days 00:00:00.114461080 +248 31 0 days 00:00:00.112663396 +248 32 0 days 00:00:00.078186760 +248 33 0 days 00:00:00.073986008 +248 34 0 days 00:00:00.067554916 +248 35 0 days 00:00:00.070331776 +248 36 0 days 00:00:00.134804980 +248 37 0 days 00:00:00.083016090 +248 38 0 days 00:00:00.083693640 +248 39 0 days 00:00:00.104157510 +248 40 0 days 00:00:00.115743585 +248 41 0 days 00:00:00.080580640 +248 42 0 days 00:00:00.072928853 +248 43 0 days 00:00:00.069427065 +248 44 0 days 00:00:00.076550088 +248 45 0 days 00:00:00.096019680 +248 46 0 days 00:00:00.077877444 +248 47 0 days 00:00:00.099451866 +248 48 0 days 00:00:00.073732767 +248 49 0 days 00:00:00.071055880 +248 50 0 days 00:00:00.068173270 +248 51 0 days 00:00:00.084625260 +248 52 0 days 00:00:00.121106820 +248 53 0 days 00:00:00.064623560 +248 54 0 days 00:00:00.080536212 +248 55 0 days 00:00:00.088711306 +248 56 0 days 00:00:00.072639972 +248 57 0 days 00:00:00.080826830 +248 58 0 days 00:00:00.089213909 +248 59 0 days 00:00:00.114180690 +248 60 0 days 00:00:00.102844900 +248 61 0 days 00:00:00.105217745 +248 62 0 days 00:00:00.108126020 +248 63 0 days 00:00:00.073694856 +248 64 0 days 00:00:00.081952160 +248 65 0 days 00:00:00.117097868 +248 66 0 days 00:00:00.080261996 +248 67 0 days 00:00:00.110147973 +248 68 0 days 00:00:00.076751515 +248 69 0 days 00:00:00.082506246 +248 70 0 days 00:00:00.086958992 +248 71 0 days 00:00:00.089707769 +248 72 0 days 00:00:00.089635293 +248 73 0 days 00:00:00.082313753 +248 74 0 days 00:00:00.079533837 +248 75 0 days 00:00:00.103970860 +248 76 0 days 00:00:00.068268636 +248 77 0 days 00:00:00.085257045 +248 78 0 days 00:00:00.081804944 +248 79 0 days 00:00:00.076394040 +248 80 0 days 00:00:00.079574690 +248 81 0 days 00:00:00.111163430 +248 82 0 days 00:00:00.080489815 +248 83 0 days 00:00:00.074487603 +248 84 0 days 00:00:00.100454473 +248 85 0 days 00:00:00.083830000 +248 86 0 days 00:00:00.108682855 +248 87 0 days 00:00:00.107757165 +248 88 0 days 00:00:00.068302564 +248 89 0 days 00:00:00.109556900 +248 90 0 days 00:00:00.111843570 +248 91 0 days 00:00:00.109558725 +248 92 0 days 00:00:00.106332825 +248 93 0 days 00:00:00.070523140 +248 94 0 days 00:00:00.085279840 +248 95 0 days 00:00:00.117125976 +248 96 0 days 00:00:00.081398815 +248 97 0 days 00:00:00.094630544 +248 98 0 days 00:00:00.073991380 +248 99 0 days 00:00:00.089575606 +248 100 0 days 00:00:00.070240400 +249 1 0 days 00:00:00.130874856 +249 2 0 days 00:00:00.092197460 +249 3 0 days 00:00:00.109527285 +249 4 0 days 00:00:00.089651702 +249 5 0 days 00:00:00.072632333 +249 6 0 days 00:00:00.082776184 +249 7 0 days 00:00:00.083351413 +249 8 0 days 00:00:00.122088550 +249 9 0 days 00:00:00.110765085 +249 10 0 days 00:00:00.120066100 +249 11 0 days 00:00:00.079093885 +249 12 0 days 00:00:00.068411848 +249 13 0 days 00:00:00.087733905 +249 14 0 days 00:00:00.081755165 +249 15 0 days 00:00:00.087576460 +249 16 0 days 00:00:00.090591292 +249 17 0 days 00:00:00.122229073 +249 18 0 days 00:00:00.072770246 +249 19 0 days 00:00:00.086205715 +249 20 0 days 00:00:00.073374648 +249 21 0 days 00:00:00.090384477 +249 22 0 days 00:00:00.109280885 +249 23 0 days 00:00:00.118543288 +249 24 0 days 00:00:00.108778125 +249 25 0 days 00:00:00.084876875 +249 26 0 days 00:00:00.069093972 +249 27 0 days 00:00:00.069578708 +249 28 0 days 00:00:00.090710968 +249 29 0 days 00:00:00.122810696 +249 30 0 days 00:00:00.119072036 +249 31 0 days 00:00:00.080801756 +249 32 0 days 00:00:00.076739770 +249 33 0 days 00:00:00.071460633 +249 34 0 days 00:00:00.090385948 +249 35 0 days 00:00:00.076158876 +249 36 0 days 00:00:00.116869340 +249 37 0 days 00:00:00.086430630 +249 38 0 days 00:00:00.069976134 +249 39 0 days 00:00:00.116641170 +249 40 0 days 00:00:00.077083265 +249 41 0 days 00:00:00.069090464 +249 42 0 days 00:00:00.124106743 +249 43 0 days 00:00:00.074594812 +249 44 0 days 00:00:00.121252208 +249 45 0 days 00:00:00.073284117 +249 46 0 days 00:00:00.110776710 +249 47 0 days 00:00:00.072914342 +249 48 0 days 00:00:00.068982028 +249 49 0 days 00:00:00.093299056 +249 50 0 days 00:00:00.084928552 +249 51 0 days 00:00:00.070650443 +249 52 0 days 00:00:00.069726156 +249 53 0 days 00:00:00.095090565 +249 54 0 days 00:00:00.113522504 +249 55 0 days 00:00:00.113878255 +249 56 0 days 00:00:00.071701765 +249 57 0 days 00:00:00.078778780 +249 58 0 days 00:00:00.089304800 +249 59 0 days 00:00:00.133044373 +249 60 0 days 00:00:00.084975520 +249 61 0 days 00:00:00.122002400 +249 62 0 days 00:00:00.109642780 +249 63 0 days 00:00:00.088975942 +249 64 0 days 00:00:00.085254404 +249 65 0 days 00:00:00.123132682 +249 66 0 days 00:00:00.074847620 +249 67 0 days 00:00:00.128516180 +249 68 0 days 00:00:00.114391960 +249 69 0 days 00:00:00.081991195 +249 70 0 days 00:00:00.074859856 +249 71 0 days 00:00:00.074748571 +249 72 0 days 00:00:00.090453555 +249 73 0 days 00:00:00.077628225 +249 74 0 days 00:00:00.109774320 +249 75 0 days 00:00:00.108782615 +249 76 0 days 00:00:00.074698552 +249 77 0 days 00:00:00.082925964 +249 78 0 days 00:00:00.109277640 +249 79 0 days 00:00:00.078005668 +249 80 0 days 00:00:00.110727935 +249 81 0 days 00:00:00.119291088 +249 82 0 days 00:00:00.120193392 +249 83 0 days 00:00:00.083014844 +249 84 0 days 00:00:00.098793206 +249 85 0 days 00:00:00.110464905 +249 86 0 days 00:00:00.106545785 +249 87 0 days 00:00:00.089800923 +249 88 0 days 00:00:00.092396417 +249 89 0 days 00:00:00.072955640 +249 90 0 days 00:00:00.108334635 +249 91 0 days 00:00:00.089319480 +249 92 0 days 00:00:00.117964616 +249 93 0 days 00:00:00.085576992 +249 94 0 days 00:00:00.111038715 +249 95 0 days 00:00:00.073349753 +249 96 0 days 00:00:00.110975840 +249 97 0 days 00:00:00.065474670 +249 98 0 days 00:00:00.092522007 +249 99 0 days 00:00:00.085388370 +249 100 0 days 00:00:00.119587395 +250 1 0 days 00:00:00.173074169 +250 2 0 days 00:00:00.130816045 +250 3 0 days 00:00:00.150102440 +250 4 0 days 00:00:00.207712151 +250 5 0 days 00:00:00.162475328 +250 6 0 days 00:00:00.189929335 +250 7 0 days 00:00:00.211223166 +250 8 0 days 00:00:00.204874380 +250 9 0 days 00:00:00.130689140 +250 10 0 days 00:00:00.214062536 +250 11 0 days 00:00:00.147159196 +250 12 0 days 00:00:00.120170675 +250 13 0 days 00:00:00.183348050 +250 14 0 days 00:00:00.212098285 +250 15 0 days 00:00:00.223924167 +250 16 0 days 00:00:00.132491843 +250 17 0 days 00:00:00.150103976 +250 18 0 days 00:00:00.145395516 +250 19 0 days 00:00:00.135556234 +250 20 0 days 00:00:00.163530865 +250 21 0 days 00:00:00.151891856 +250 22 0 days 00:00:00.154969214 +250 23 0 days 00:00:00.167746373 +250 24 0 days 00:00:00.209340250 +250 25 0 days 00:00:00.218007196 +250 26 0 days 00:00:00.147875612 +250 27 0 days 00:00:00.210856670 +250 28 0 days 00:00:00.175809786 +250 29 0 days 00:00:00.361938700 +250 30 0 days 00:00:00.202761076 +250 31 0 days 00:00:00.202385684 +250 32 0 days 00:00:00.317375705 +250 33 0 days 00:00:00.350981491 +250 34 0 days 00:00:00.209644773 +250 35 0 days 00:00:00.319456325 +250 36 0 days 00:00:00.368598611 +250 37 0 days 00:00:00.212998588 +250 38 0 days 00:00:00.181756525 +250 39 0 days 00:00:00.181590974 +250 40 0 days 00:00:00.147394846 +250 41 0 days 00:00:00.296222095 +250 42 0 days 00:00:00.343396452 +250 43 0 days 00:00:00.303918675 +250 44 0 days 00:00:00.157201887 +250 45 0 days 00:00:00.198071425 +250 46 0 days 00:00:00.196154672 +250 47 0 days 00:00:00.211401871 +250 48 0 days 00:00:00.204276976 +250 49 0 days 00:00:00.159114790 +250 50 0 days 00:00:00.165870996 +250 51 0 days 00:00:00.163033696 +250 52 0 days 00:00:00.201888907 +250 53 0 days 00:00:00.182973880 +250 54 0 days 00:00:00.310662655 +250 55 0 days 00:00:00.335809416 +250 56 0 days 00:00:00.206413928 +250 57 0 days 00:00:00.332135260 +250 58 0 days 00:00:00.344068020 +250 59 0 days 00:00:00.314669110 +250 60 0 days 00:00:00.276033686 +250 61 0 days 00:00:00.194783340 +250 62 0 days 00:00:00.185492260 +250 63 0 days 00:00:00.318859157 +250 64 0 days 00:00:00.207116520 +250 65 0 days 00:00:00.202505966 +250 66 0 days 00:00:00.184687156 +250 67 0 days 00:00:00.230816450 +250 68 0 days 00:00:00.140988140 +250 69 0 days 00:00:00.147844335 +250 70 0 days 00:00:00.132149036 +250 71 0 days 00:00:00.162973820 +250 72 0 days 00:00:00.240113930 +250 73 0 days 00:00:00.151156000 +250 74 0 days 00:00:00.134948912 +250 75 0 days 00:00:00.136858465 +250 76 0 days 00:00:00.257697635 +250 77 0 days 00:00:00.286024554 +250 78 0 days 00:00:00.237286210 +250 79 0 days 00:00:00.154316720 +250 80 0 days 00:00:00.244560910 +250 81 0 days 00:00:00.157619800 +250 82 0 days 00:00:00.150342816 +250 83 0 days 00:00:00.145477528 +250 84 0 days 00:00:00.127645724 +250 85 0 days 00:00:00.143521070 +250 86 0 days 00:00:00.144825700 +250 87 0 days 00:00:00.222329320 +250 88 0 days 00:00:00.120613276 +250 89 0 days 00:00:00.191201900 +250 90 0 days 00:00:00.194122915 +250 91 0 days 00:00:00.172538936 +250 92 0 days 00:00:00.216344796 +250 93 0 days 00:00:00.188830355 +250 94 0 days 00:00:00.122054410 +250 95 0 days 00:00:00.185777375 +250 96 0 days 00:00:00.118587356 +250 97 0 days 00:00:00.112828530 +250 98 0 days 00:00:00.165771972 +250 99 0 days 00:00:00.189931292 +250 100 0 days 00:00:00.186230115 +251 1 0 days 00:00:00.196387430 +251 2 0 days 00:00:00.190206323 +251 3 0 days 00:00:00.126720507 +251 4 0 days 00:00:00.194006405 +251 5 0 days 00:00:00.174400275 +251 6 0 days 00:00:00.180605940 +251 7 0 days 00:00:00.187453853 +251 8 0 days 00:00:00.167140725 +251 9 0 days 00:00:00.107914768 +251 10 0 days 00:00:00.176092105 +251 11 0 days 00:00:00.173357295 +251 12 0 days 00:00:00.190463012 +251 13 0 days 00:00:00.166394310 +251 14 0 days 00:00:00.113098625 +251 15 0 days 00:00:00.081295735 +251 16 0 days 00:00:00.105841104 +251 17 0 days 00:00:00.120093455 +251 18 0 days 00:00:00.190070623 +251 19 0 days 00:00:00.107252024 +251 20 0 days 00:00:00.175386244 +251 21 0 days 00:00:00.101613470 +251 22 0 days 00:00:00.121771967 +251 23 0 days 00:00:00.199969888 +251 24 0 days 00:00:00.103843644 +251 25 0 days 00:00:00.078092745 +251 26 0 days 00:00:00.102988203 +251 27 0 days 00:00:00.082991556 +251 28 0 days 00:00:00.151165136 +251 29 0 days 00:00:00.159676582 +251 30 0 days 00:00:00.079908668 +251 31 0 days 00:00:00.139805515 +251 32 0 days 00:00:00.138630950 +251 33 0 days 00:00:00.083900385 +251 34 0 days 00:00:00.076325380 +251 35 0 days 00:00:00.079907908 +251 36 0 days 00:00:00.085557091 +251 37 0 days 00:00:00.082493366 +251 38 0 days 00:00:00.098157965 +251 39 0 days 00:00:00.092928576 +251 40 0 days 00:00:00.074301820 +251 41 0 days 00:00:00.092699422 +251 42 0 days 00:00:00.116646670 +251 43 0 days 00:00:00.067535386 +251 44 0 days 00:00:00.064471390 +251 45 0 days 00:00:00.116179953 +251 46 0 days 00:00:00.132185310 +251 47 0 days 00:00:00.070951708 +251 48 0 days 00:00:00.177154590 +251 49 0 days 00:00:00.090434042 +251 50 0 days 00:00:00.162336030 +251 51 0 days 00:00:00.184315102 +251 52 0 days 00:00:00.162428804 +251 53 0 days 00:00:00.175900168 +251 54 0 days 00:00:00.093396834 +251 55 0 days 00:00:00.100269270 +251 56 0 days 00:00:00.124771492 +251 57 0 days 00:00:00.117940600 +251 58 0 days 00:00:00.103358464 +251 59 0 days 00:00:00.081147304 +251 60 0 days 00:00:00.099440735 +251 61 0 days 00:00:00.087124850 +251 62 0 days 00:00:00.109929350 +251 63 0 days 00:00:00.091441540 +251 64 0 days 00:00:00.118817066 +251 65 0 days 00:00:00.104134310 +251 66 0 days 00:00:00.105132400 +251 67 0 days 00:00:00.081961532 +251 68 0 days 00:00:00.121302948 +251 69 0 days 00:00:00.115820314 +251 70 0 days 00:00:00.171543185 +251 71 0 days 00:00:00.189140866 +251 72 0 days 00:00:00.104759540 +251 73 0 days 00:00:00.116679732 +251 74 0 days 00:00:00.108621370 +251 75 0 days 00:00:00.112148851 +251 76 0 days 00:00:00.190832728 +251 77 0 days 00:00:00.189849897 +251 78 0 days 00:00:00.175666800 +251 79 0 days 00:00:00.095389613 +251 80 0 days 00:00:00.070099525 +251 81 0 days 00:00:00.101312482 +251 82 0 days 00:00:00.148102951 +251 83 0 days 00:00:00.124204895 +251 84 0 days 00:00:00.093825448 +251 85 0 days 00:00:00.136474440 +251 86 0 days 00:00:00.086904504 +251 87 0 days 00:00:00.147421597 +251 88 0 days 00:00:00.095596342 +251 89 0 days 00:00:00.186617690 +251 90 0 days 00:00:00.198146735 +251 91 0 days 00:00:00.116404280 +251 92 0 days 00:00:00.085107185 +251 93 0 days 00:00:00.176624810 +251 94 0 days 00:00:00.110027088 +251 95 0 days 00:00:00.124609670 +251 96 0 days 00:00:00.116334174 +251 97 0 days 00:00:00.103970095 +251 98 0 days 00:00:00.189150600 +251 99 0 days 00:00:00.087433436 +251 100 0 days 00:00:00.114956660 +252 1 0 days 00:00:00.432471536 +252 2 0 days 00:00:00.334791828 +252 3 0 days 00:00:00.299683540 +252 4 0 days 00:00:00.292330990 +252 5 0 days 00:00:00.378200415 +252 6 0 days 00:00:00.861566810 +252 7 0 days 00:00:00.302401044 +252 8 0 days 00:00:00.694097840 +252 9 0 days 00:00:00.918985903 +252 10 0 days 00:00:00.311278400 +252 11 0 days 00:00:00.368111616 +252 12 0 days 00:00:01.166357896 +252 13 0 days 00:00:00.294663156 +252 14 0 days 00:00:00.284882660 +252 15 0 days 00:00:00.933141750 +252 16 0 days 00:00:00.795464545 +252 17 0 days 00:00:00.926725445 +252 18 0 days 00:00:00.790918625 +252 19 0 days 00:00:00.373018224 +252 20 0 days 00:00:00.564266540 +252 21 0 days 00:00:00.527863293 +252 22 0 days 00:00:00.892567075 +252 23 0 days 00:00:00.915655440 +252 24 0 days 00:00:00.362047010 +252 25 0 days 00:00:00.516944886 +252 26 0 days 00:00:00.927522385 +252 27 0 days 00:00:00.602163055 +252 28 0 days 00:00:00.521897386 +252 29 0 days 00:00:00.469921925 +252 30 0 days 00:00:00.848870505 +252 31 0 days 00:00:00.335608385 +252 32 0 days 00:00:01.005223748 +252 33 0 days 00:00:00.342065276 +252 34 0 days 00:00:00.449126930 +252 35 0 days 00:00:00.525069920 +252 36 0 days 00:00:00.294829760 +252 37 0 days 00:00:00.832746240 +252 38 0 days 00:00:00.813508210 +252 39 0 days 00:00:00.476608910 +252 40 0 days 00:00:00.473189975 +252 41 0 days 00:00:00.317273996 +252 42 0 days 00:00:00.439524580 +252 43 0 days 00:00:00.891790130 +252 44 0 days 00:00:00.531582665 +252 45 0 days 00:00:00.521688735 +252 46 0 days 00:00:00.863733915 +252 47 0 days 00:00:00.308807268 +252 48 0 days 00:00:00.470268235 +252 49 0 days 00:00:00.285282355 +252 50 0 days 00:00:00.342153510 +252 51 0 days 00:00:00.314172868 +252 52 0 days 00:00:00.944015460 +252 53 0 days 00:00:00.621982925 +252 54 0 days 00:00:00.592708720 +252 55 0 days 00:00:00.345052716 +252 56 0 days 00:00:00.485943025 +252 57 0 days 00:00:00.897717725 +252 58 0 days 00:00:00.262114730 +252 59 0 days 00:00:00.514192320 +252 60 0 days 00:00:00.291804480 +252 61 0 days 00:00:00.420450286 +252 62 0 days 00:00:00.936102245 +252 63 0 days 00:00:00.252640233 +252 64 0 days 00:00:00.487273464 +252 65 0 days 00:00:00.333903016 +252 66 0 days 00:00:00.389617843 +252 67 0 days 00:00:00.560113693 +252 68 0 days 00:00:00.546209222 +252 69 0 days 00:00:00.807582873 +252 70 0 days 00:00:00.836909815 +252 71 0 days 00:00:00.642567660 +252 72 0 days 00:00:00.318127585 +252 73 0 days 00:00:00.621938926 +252 74 0 days 00:00:00.301849403 +252 75 0 days 00:00:00.500264177 +252 76 0 days 00:00:00.332691480 +252 77 0 days 00:00:00.943561045 +252 78 0 days 00:00:00.833273585 +252 79 0 days 00:00:00.581327106 +252 80 0 days 00:00:00.269748824 +252 81 0 days 00:00:00.439927293 +252 82 0 days 00:00:00.497385236 +252 83 0 days 00:00:00.442172910 +252 84 0 days 00:00:00.306324755 +252 85 0 days 00:00:00.512845785 +252 86 0 days 00:00:00.294750525 +252 87 0 days 00:00:00.318771133 +252 88 0 days 00:00:00.566450680 +252 89 0 days 00:00:00.338449935 +252 90 0 days 00:00:00.539110820 +252 91 0 days 00:00:00.347394770 +252 92 0 days 00:00:00.406252486 +252 93 0 days 00:00:00.498519900 +252 94 0 days 00:00:00.535233483 +252 95 0 days 00:00:00.368449906 +252 96 0 days 00:00:00.829737115 +252 97 0 days 00:00:00.599666500 +252 98 0 days 00:00:00.893334144 +252 99 0 days 00:00:00.575367560 +252 100 0 days 00:00:00.911971420 +253 1 0 days 00:00:00.298048756 +253 2 0 days 00:00:01.005477304 +253 3 0 days 00:00:00.463566650 +253 4 0 days 00:00:00.551682380 +253 5 0 days 00:00:00.473004055 +253 6 0 days 00:00:00.628101425 +253 7 0 days 00:00:00.553535135 +253 8 0 days 00:00:00.359162324 +253 9 0 days 00:00:00.748454940 +253 10 0 days 00:00:00.909309926 +253 11 0 days 00:00:00.937612590 +253 12 0 days 00:00:00.330162583 +253 13 0 days 00:00:00.625470236 +253 14 0 days 00:00:00.410919780 +253 15 0 days 00:00:00.422267200 +253 16 0 days 00:00:00.400171328 +253 17 0 days 00:00:00.311184948 +253 18 0 days 00:00:00.302582895 +253 19 0 days 00:00:00.277244385 +253 20 0 days 00:00:00.382423850 +253 21 0 days 00:00:00.364688364 +253 22 0 days 00:00:00.936597780 +253 23 0 days 00:00:00.907902150 +253 24 0 days 00:00:00.424095708 +253 25 0 days 00:00:00.590257630 +253 26 0 days 00:00:00.983265795 +253 27 0 days 00:00:01.045478806 +253 28 0 days 00:00:00.659733533 +253 29 0 days 00:00:01.049850015 +253 30 0 days 00:00:00.961413065 +253 31 0 days 00:00:00.349763557 +253 32 0 days 00:00:00.327035810 +253 33 0 days 00:00:00.464428325 +253 34 0 days 00:00:01.002701732 +253 35 0 days 00:00:00.314972220 +253 36 0 days 00:00:00.862285130 +253 37 0 days 00:00:00.474564350 +253 38 0 days 00:00:00.496241588 +253 39 0 days 00:00:00.897722632 +253 40 0 days 00:00:00.742939906 +253 41 0 days 00:00:00.663358483 +253 42 0 days 00:00:00.887719996 +253 43 0 days 00:00:00.293171340 +253 44 0 days 00:00:00.625429560 +253 45 0 days 00:00:00.468798355 +253 46 0 days 00:00:00.379163300 +253 47 0 days 00:00:00.312153617 +253 48 0 days 00:00:00.321019484 +253 49 0 days 00:00:00.333165040 +253 50 0 days 00:00:00.477046670 +253 51 0 days 00:00:00.824381245 +253 52 0 days 00:00:00.781220533 +253 53 0 days 00:00:00.794067770 +253 54 0 days 00:00:00.516916700 +253 55 0 days 00:00:00.342615161 +253 56 0 days 00:00:00.298903696 +253 57 0 days 00:00:00.410422364 +253 58 0 days 00:00:00.441187446 +253 59 0 days 00:00:00.890890325 +253 60 0 days 00:00:00.352533673 +253 61 0 days 00:00:00.550711482 +253 62 0 days 00:00:00.419545064 +253 63 0 days 00:00:00.374600070 +253 64 0 days 00:00:00.545191804 +253 65 0 days 00:00:00.462426825 +253 66 0 days 00:00:00.508531720 +253 67 0 days 00:00:00.511238130 +253 68 0 days 00:00:00.887896395 +253 69 0 days 00:00:00.925783100 +253 70 0 days 00:00:00.924822360 +253 71 0 days 00:00:00.576401868 +253 72 0 days 00:00:00.318017768 +253 73 0 days 00:00:00.355408432 +253 74 0 days 00:00:01.124200732 +253 75 0 days 00:00:00.959661605 +253 76 0 days 00:00:00.334429950 +253 77 0 days 00:00:00.963845300 +253 78 0 days 00:00:00.274593542 +253 79 0 days 00:00:00.510786932 +253 80 0 days 00:00:00.479449605 +253 81 0 days 00:00:00.775501491 +253 82 0 days 00:00:00.897487308 +253 83 0 days 00:00:00.448837600 +253 84 0 days 00:00:00.537008668 +253 85 0 days 00:00:00.292937765 +253 86 0 days 00:00:00.323696903 +253 87 0 days 00:00:00.343315955 +253 88 0 days 00:00:00.317764090 +253 89 0 days 00:00:00.333656468 +253 90 0 days 00:00:00.520339040 +253 91 0 days 00:00:00.281634545 +253 92 0 days 00:00:00.534622916 +253 93 0 days 00:00:00.700891688 +253 94 0 days 00:00:00.453637975 +253 95 0 days 00:00:00.558865440 +253 96 0 days 00:00:00.530369736 +253 97 0 days 00:00:00.520981745 +253 98 0 days 00:00:00.577001526 +253 99 0 days 00:00:00.311171636 +253 100 0 days 00:00:00.485022290 +254 1 0 days 00:00:00.287206702 +254 2 0 days 00:00:00.160754992 +254 3 0 days 00:00:00.237925000 +254 4 0 days 00:00:00.316096060 +254 5 0 days 00:00:00.409523335 +254 6 0 days 00:00:00.241880385 +254 7 0 days 00:00:00.157439228 +254 8 0 days 00:00:00.254251346 +254 9 0 days 00:00:00.162316920 +254 10 0 days 00:00:00.172161787 +254 11 0 days 00:00:00.430449080 +254 12 0 days 00:00:00.448958865 +254 13 0 days 00:00:00.230609875 +254 14 0 days 00:00:00.175404062 +254 15 0 days 00:00:00.264755320 +254 16 0 days 00:00:00.266095260 +254 17 0 days 00:00:00.200657628 +254 18 0 days 00:00:00.198578206 +254 19 0 days 00:00:00.395643830 +254 20 0 days 00:00:00.408677060 +254 21 0 days 00:00:00.170270040 +254 22 0 days 00:00:00.247786767 +254 23 0 days 00:00:00.300657453 +254 24 0 days 00:00:00.240384385 +254 25 0 days 00:00:00.450398108 +254 26 0 days 00:00:00.472004686 +254 27 0 days 00:00:00.434084085 +254 28 0 days 00:00:00.252594100 +254 29 0 days 00:00:00.195916913 +254 30 0 days 00:00:00.184508051 +254 31 0 days 00:00:00.306090880 +254 32 0 days 00:00:00.153866675 +254 33 0 days 00:00:00.411324946 +254 34 0 days 00:00:00.163027911 +254 35 0 days 00:00:00.422765250 +254 36 0 days 00:00:00.452601324 +254 37 0 days 00:00:00.177676500 +254 38 0 days 00:00:00.426902345 +254 39 0 days 00:00:00.161598052 +254 40 0 days 00:00:00.266374753 +254 41 0 days 00:00:00.169069960 +254 42 0 days 00:00:00.253244610 +254 43 0 days 00:00:00.143570724 +254 44 0 days 00:00:00.160851516 +254 45 0 days 00:00:00.167896260 +254 46 0 days 00:00:00.192434500 +254 47 0 days 00:00:00.238856435 +254 48 0 days 00:00:00.254506672 +254 49 0 days 00:00:00.171784150 +254 50 0 days 00:00:00.504207660 +254 51 0 days 00:00:00.416916005 +254 52 0 days 00:00:00.457265484 +254 53 0 days 00:00:00.299156305 +254 54 0 days 00:00:00.198081195 +254 55 0 days 00:00:00.431018911 +254 56 0 days 00:00:00.264813840 +254 57 0 days 00:00:00.307260410 +254 58 0 days 00:00:00.159750625 +254 59 0 days 00:00:00.402617940 +254 60 0 days 00:00:00.181055232 +254 61 0 days 00:00:00.439377680 +254 62 0 days 00:00:00.132342515 +254 63 0 days 00:00:00.368472026 +254 64 0 days 00:00:00.147032660 +254 65 0 days 00:00:00.272054225 +254 66 0 days 00:00:00.163882815 +254 67 0 days 00:00:00.433862305 +254 68 0 days 00:00:00.253800420 +254 69 0 days 00:00:00.493538975 +254 70 0 days 00:00:00.493957770 +254 71 0 days 00:00:00.426839375 +254 72 0 days 00:00:00.428836160 +254 73 0 days 00:00:00.379316166 +254 74 0 days 00:00:00.269911005 +254 75 0 days 00:00:00.295976523 +254 76 0 days 00:00:00.419528020 +254 77 0 days 00:00:00.432950645 +254 78 0 days 00:00:00.239262295 +254 79 0 days 00:00:00.272714340 +254 80 0 days 00:00:00.451725536 +254 81 0 days 00:00:00.244901726 +254 82 0 days 00:00:00.279137325 +254 83 0 days 00:00:00.448173130 +254 84 0 days 00:00:00.297856206 +254 85 0 days 00:00:00.467243316 +254 86 0 days 00:00:00.245456315 +254 87 0 days 00:00:00.375797162 +254 88 0 days 00:00:00.264073825 +254 89 0 days 00:00:00.329594295 +254 90 0 days 00:00:00.214181460 +254 91 0 days 00:00:00.288539206 +254 92 0 days 00:00:00.156678180 +254 93 0 days 00:00:00.206132620 +254 94 0 days 00:00:00.141057420 +254 95 0 days 00:00:00.284812982 +254 96 0 days 00:00:00.454162275 +254 97 0 days 00:00:00.149073786 +254 98 0 days 00:00:00.159131375 +254 99 0 days 00:00:00.195390580 +254 100 0 days 00:00:00.206700346 +255 1 0 days 00:00:00.265677700 +255 2 0 days 00:00:00.166927230 +255 3 0 days 00:00:00.537797817 +255 4 0 days 00:00:00.161407284 +255 5 0 days 00:00:00.214412455 +255 6 0 days 00:00:00.269545768 +255 7 0 days 00:00:00.182927540 +255 8 0 days 00:00:00.343493970 +255 9 0 days 00:00:00.245090735 +255 10 0 days 00:00:00.246372765 +255 11 0 days 00:00:00.440430030 +255 12 0 days 00:00:00.507423016 +255 13 0 days 00:00:00.200410932 +255 14 0 days 00:00:00.276875825 +255 15 0 days 00:00:00.433191230 +255 16 0 days 00:00:00.170927646 +255 17 0 days 00:00:00.289703900 +255 18 0 days 00:00:00.186040997 +255 19 0 days 00:00:00.444733380 +255 20 0 days 00:00:00.290804264 +255 21 0 days 00:00:00.221459500 +255 22 0 days 00:00:00.227158476 +255 23 0 days 00:00:00.521215365 +255 24 0 days 00:00:00.356371525 +255 25 0 days 00:00:00.442602860 +255 26 0 days 00:00:00.285983208 +255 27 0 days 00:00:00.152972040 +255 28 0 days 00:00:00.290838104 +255 29 0 days 00:00:00.389375350 +255 30 0 days 00:00:00.522752904 +255 31 0 days 00:00:00.167641010 +255 32 0 days 00:00:00.141365665 +255 33 0 days 00:00:00.243134305 +255 34 0 days 00:00:00.575730888 +255 35 0 days 00:00:00.562069068 +255 36 0 days 00:00:00.292629980 +255 37 0 days 00:00:00.260260680 +255 38 0 days 00:00:00.454187660 +255 39 0 days 00:00:00.473921353 +255 40 0 days 00:00:00.270556650 +255 41 0 days 00:00:00.227238330 +255 42 0 days 00:00:00.400328820 +255 43 0 days 00:00:00.521420295 +255 44 0 days 00:00:00.306684760 +255 45 0 days 00:00:00.305482465 +255 46 0 days 00:00:00.348640603 +255 47 0 days 00:00:00.141242190 +255 48 0 days 00:00:00.363060295 +255 49 0 days 00:00:00.278846605 +255 50 0 days 00:00:00.443313225 +255 51 0 days 00:00:00.233601480 +255 52 0 days 00:00:00.370662136 +255 53 0 days 00:00:00.176628364 +255 54 0 days 00:00:00.493112248 +255 55 0 days 00:00:00.157917920 +255 56 0 days 00:00:00.268895615 +255 57 0 days 00:00:00.219746800 +255 58 0 days 00:00:00.515378590 +255 59 0 days 00:00:00.257972384 +255 60 0 days 00:00:00.276229395 +255 61 0 days 00:00:00.412426590 +255 62 0 days 00:00:00.266972160 +255 63 0 days 00:00:00.151619953 +255 64 0 days 00:00:00.166512175 +255 65 0 days 00:00:00.163296040 +255 66 0 days 00:00:00.167061742 +255 67 0 days 00:00:00.423933095 +255 68 0 days 00:00:00.402340880 +255 69 0 days 00:00:00.502624722 +255 70 0 days 00:00:00.174397700 +255 71 0 days 00:00:00.322165104 +255 72 0 days 00:00:00.151201044 +255 73 0 days 00:00:00.294311803 +255 74 0 days 00:00:00.461027352 +255 75 0 days 00:00:00.209534855 +255 76 0 days 00:00:00.332242200 +255 77 0 days 00:00:00.138991540 +255 78 0 days 00:00:00.488011096 +255 79 0 days 00:00:00.494078080 +255 80 0 days 00:00:00.503871840 +255 81 0 days 00:00:00.219917420 +255 82 0 days 00:00:00.570120490 +255 83 0 days 00:00:00.414821710 +255 84 0 days 00:00:00.171804396 +255 85 0 days 00:00:00.173659625 +255 86 0 days 00:00:00.476260584 +255 87 0 days 00:00:00.159036366 +255 88 0 days 00:00:00.175158165 +255 89 0 days 00:00:00.166612866 +255 90 0 days 00:00:00.271594120 +255 91 0 days 00:00:00.564510953 +255 92 0 days 00:00:00.266296923 +255 93 0 days 00:00:00.177008780 +255 94 0 days 00:00:00.254295425 +255 95 0 days 00:00:00.269432400 +255 96 0 days 00:00:00.532803471 +255 97 0 days 00:00:00.269019776 +255 98 0 days 00:00:00.193125463 +255 99 0 days 00:00:00.273401365 +255 100 0 days 00:00:00.504683656 +256 1 0 days 00:00:01.068884806 +256 2 0 days 00:00:00.867467865 +256 3 0 days 00:00:00.504587110 +256 4 0 days 00:00:00.285173775 +256 5 0 days 00:00:00.913432580 +256 6 0 days 00:00:00.313970200 +256 7 0 days 00:00:00.537782156 +256 8 0 days 00:00:00.542015744 +256 9 0 days 00:00:00.522325720 +256 10 0 days 00:00:00.359963520 +256 11 0 days 00:00:00.899490915 +256 12 0 days 00:00:00.313338940 +256 13 0 days 00:00:00.377835043 +256 14 0 days 00:00:00.933864530 +256 15 0 days 00:00:00.613453215 +256 16 0 days 00:00:00.512822028 +256 17 0 days 00:00:00.978035712 +256 18 0 days 00:00:00.275195210 +256 19 0 days 00:00:00.306313170 +256 20 0 days 00:00:00.963624760 +256 21 0 days 00:00:00.918253695 +256 22 0 days 00:00:00.350714525 +256 23 0 days 00:00:00.350805515 +256 24 0 days 00:00:01.093480340 +256 25 0 days 00:00:00.335934180 +256 26 0 days 00:00:00.894118715 +256 27 0 days 00:00:00.277090685 +256 28 0 days 00:00:00.508006840 +256 29 0 days 00:00:00.306322750 +256 30 0 days 00:00:00.493321030 +256 31 0 days 00:00:00.277300335 +256 32 0 days 00:00:00.283579872 +256 33 0 days 00:00:00.976260990 +256 34 0 days 00:00:00.358192283 +256 35 0 days 00:00:00.486823290 +256 36 0 days 00:00:01.092515036 +256 37 0 days 00:00:00.509858905 +256 38 0 days 00:00:01.074517268 +256 39 0 days 00:00:01.058572805 +256 40 0 days 00:00:00.340868120 +256 41 0 days 00:00:00.514544965 +256 42 0 days 00:00:00.911857813 +256 43 0 days 00:00:00.287439665 +256 44 0 days 00:00:00.349422375 +256 45 0 days 00:00:00.916273055 +256 46 0 days 00:00:00.556088890 +256 47 0 days 00:00:00.655622010 +256 48 0 days 00:00:00.552028045 +256 49 0 days 00:00:00.569598485 +256 50 0 days 00:00:00.412366020 +256 51 0 days 00:00:00.549270615 +256 52 0 days 00:00:00.975628432 +256 53 0 days 00:00:01.037852288 +256 54 0 days 00:00:00.306444816 +256 55 0 days 00:00:00.451870110 +256 56 0 days 00:00:00.562382655 +256 57 0 days 00:00:01.355252765 +256 58 0 days 00:00:00.296101130 +256 59 0 days 00:00:01.062006412 +256 60 0 days 00:00:00.504540984 +256 61 0 days 00:00:00.595627248 +256 62 0 days 00:00:00.592087330 +256 63 0 days 00:00:00.486702650 +256 64 0 days 00:00:00.363640680 +256 65 0 days 00:00:00.614047820 +256 66 0 days 00:00:00.339001940 +256 67 0 days 00:00:00.300258255 +256 68 0 days 00:00:00.543179280 +256 69 0 days 00:00:00.333115576 +256 70 0 days 00:00:00.411427822 +256 71 0 days 00:00:00.408520823 +256 72 0 days 00:00:00.416345393 +256 73 0 days 00:00:01.079476564 +256 74 0 days 00:00:00.601344280 +256 75 0 days 00:00:00.339090485 +256 76 0 days 00:00:00.349235840 +256 77 0 days 00:00:00.852854210 +256 78 0 days 00:00:00.291836220 +256 79 0 days 00:00:01.091079908 +256 80 0 days 00:00:00.510191370 +256 81 0 days 00:00:01.005484636 +256 82 0 days 00:00:00.492642785 +256 83 0 days 00:00:00.289860330 +256 84 0 days 00:00:00.275618720 +256 85 0 days 00:00:00.901051495 +256 86 0 days 00:00:00.679906428 +256 87 0 days 00:00:00.310557648 +256 88 0 days 00:00:01.006393504 +256 89 0 days 00:00:00.864696120 +256 90 0 days 00:00:00.543741468 +256 91 0 days 00:00:00.279603590 +256 92 0 days 00:00:00.944047693 +256 93 0 days 00:00:00.707731446 +256 94 0 days 00:00:00.309880986 +256 95 0 days 00:00:00.283443430 +256 96 0 days 00:00:00.625862293 +256 97 0 days 00:00:00.522387600 +256 98 0 days 00:00:00.513164284 +256 99 0 days 00:00:00.596862065 +256 100 0 days 00:00:00.656173090 +257 1 0 days 00:00:00.495616908 +257 2 0 days 00:00:00.477430570 +257 3 0 days 00:00:00.233384265 +257 4 0 days 00:00:00.247196345 +257 5 0 days 00:00:00.130870330 +257 6 0 days 00:00:00.137650520 +257 7 0 days 00:00:00.151694916 +257 8 0 days 00:00:00.440132535 +257 9 0 days 00:00:00.488839294 +257 10 0 days 00:00:00.395261725 +257 11 0 days 00:00:00.255246160 +257 12 0 days 00:00:00.220012530 +257 13 0 days 00:00:00.255365645 +257 14 0 days 00:00:00.425249904 +257 15 0 days 00:00:00.315201992 +257 16 0 days 00:00:00.114983385 +257 17 0 days 00:00:00.202048125 +257 18 0 days 00:00:00.233118580 +257 19 0 days 00:00:00.149251286 +257 20 0 days 00:00:00.234514522 +257 21 0 days 00:00:00.154116680 +257 22 0 days 00:00:00.151792050 +257 23 0 days 00:00:00.217654455 +257 24 0 days 00:00:00.179791425 +257 25 0 days 00:00:00.143659370 +257 26 0 days 00:00:00.323040916 +257 27 0 days 00:00:00.423054484 +257 28 0 days 00:00:00.255485208 +257 29 0 days 00:00:00.295747490 +257 30 0 days 00:00:00.240522065 +257 31 0 days 00:00:00.222309292 +257 32 0 days 00:00:00.294631650 +257 33 0 days 00:00:00.239167240 +257 34 0 days 00:00:00.222111225 +257 35 0 days 00:00:00.241918893 +257 36 0 days 00:00:00.397778675 +257 37 0 days 00:00:00.238852949 +257 38 0 days 00:00:00.139214926 +257 39 0 days 00:00:00.350535855 +257 40 0 days 00:00:00.377160555 +257 41 0 days 00:00:00.175925468 +257 42 0 days 00:00:00.244654004 +257 43 0 days 00:00:00.244901745 +257 44 0 days 00:00:00.222854636 +257 45 0 days 00:00:00.145382710 +257 46 0 days 00:00:00.333627280 +257 47 0 days 00:00:00.120754360 +257 48 0 days 00:00:00.250346235 +257 49 0 days 00:00:00.126718480 +257 50 0 days 00:00:00.467096015 +257 51 0 days 00:00:00.361950575 +257 52 0 days 00:00:00.462851700 +257 53 0 days 00:00:00.417650944 +257 54 0 days 00:00:00.371884020 +257 55 0 days 00:00:00.286137790 +257 56 0 days 00:00:00.433692670 +257 57 0 days 00:00:00.410246285 +257 58 0 days 00:00:00.234502534 +257 59 0 days 00:00:00.421158010 +257 60 0 days 00:00:00.126307820 +257 61 0 days 00:00:00.495088611 +257 62 0 days 00:00:00.233624296 +257 63 0 days 00:00:00.429581690 +257 64 0 days 00:00:00.246660811 +257 65 0 days 00:00:00.418144760 +257 66 0 days 00:00:00.375507416 +257 67 0 days 00:00:00.370636490 +257 68 0 days 00:00:00.246508625 +257 69 0 days 00:00:00.425840984 +257 70 0 days 00:00:00.140962030 +257 71 0 days 00:00:00.139473370 +257 72 0 days 00:00:00.287929510 +257 73 0 days 00:00:00.238323308 +257 74 0 days 00:00:00.133492937 +257 75 0 days 00:00:00.211734740 +257 76 0 days 00:00:00.199225495 +257 77 0 days 00:00:00.149091440 +257 78 0 days 00:00:00.393984824 +257 79 0 days 00:00:00.128888128 +257 80 0 days 00:00:00.362563925 +257 81 0 days 00:00:00.388564120 +257 82 0 days 00:00:00.171804626 +257 83 0 days 00:00:00.464490916 +257 84 0 days 00:00:00.250933755 +257 85 0 days 00:00:00.133780165 +257 86 0 days 00:00:00.168387440 +257 87 0 days 00:00:00.238406300 +257 88 0 days 00:00:00.296442780 +257 89 0 days 00:00:00.232059880 +257 90 0 days 00:00:00.468022502 +257 91 0 days 00:00:00.431860190 +257 92 0 days 00:00:00.408014740 +257 93 0 days 00:00:00.132245024 +257 94 0 days 00:00:00.283092005 +257 95 0 days 00:00:00.268442388 +257 96 0 days 00:00:00.253791730 +257 97 0 days 00:00:00.200165385 +257 98 0 days 00:00:00.416063000 +257 99 0 days 00:00:00.162268175 +257 100 0 days 00:00:00.220457651 +258 1 0 days 00:00:00.786345515 +258 2 0 days 00:00:00.364998600 +258 3 0 days 00:00:00.462384462 +258 4 0 days 00:00:00.267383960 +258 5 0 days 00:00:00.499385850 +258 6 0 days 00:00:00.788360095 +258 7 0 days 00:00:00.318573368 +258 8 0 days 00:00:00.401689665 +258 9 0 days 00:00:00.460590022 +258 10 0 days 00:00:00.337515790 +258 11 0 days 00:00:00.404864230 +258 12 0 days 00:00:00.248177810 +258 13 0 days 00:00:00.390633205 +258 14 0 days 00:00:00.240492430 +258 15 0 days 00:00:00.745144485 +258 16 0 days 00:00:00.725432595 +258 17 0 days 00:00:00.244208080 +258 18 0 days 00:00:00.798479030 +258 19 0 days 00:00:00.880406775 +258 20 0 days 00:00:00.718884993 +258 21 0 days 00:00:00.859867104 +258 22 0 days 00:00:00.756000215 +258 23 0 days 00:00:00.262232800 +258 24 0 days 00:00:00.414428512 +258 25 0 days 00:00:00.360447913 +258 26 0 days 00:00:00.557555093 +258 27 0 days 00:00:00.788788855 +258 28 0 days 00:00:00.282740831 +258 29 0 days 00:00:00.290917792 +258 30 0 days 00:00:00.295934175 +258 31 0 days 00:00:00.798647473 +258 32 0 days 00:00:00.244331800 +258 33 0 days 00:00:00.634076886 +258 34 0 days 00:00:00.789736470 +258 35 0 days 00:00:00.272502568 +258 36 0 days 00:00:00.225499530 +258 37 0 days 00:00:00.523475066 +258 38 0 days 00:00:00.704251390 +258 39 0 days 00:00:00.433887475 +258 40 0 days 00:00:00.716737984 +258 41 0 days 00:00:00.292056535 +258 42 0 days 00:00:00.388631195 +258 43 0 days 00:00:00.460274402 +258 44 0 days 00:00:00.795282910 +258 45 0 days 00:00:00.242127340 +258 46 0 days 00:00:00.289807510 +258 47 0 days 00:00:00.418652635 +258 48 0 days 00:00:00.736660435 +258 49 0 days 00:00:00.436021910 +258 50 0 days 00:00:00.744805335 +258 51 0 days 00:00:00.319876966 +258 52 0 days 00:00:00.719157780 +258 53 0 days 00:00:00.422306412 +258 54 0 days 00:00:00.466087060 +258 55 0 days 00:00:00.699235553 +258 56 0 days 00:00:00.744817050 +258 57 0 days 00:00:00.395289160 +258 58 0 days 00:00:00.491671770 +258 59 0 days 00:00:00.761613795 +258 60 0 days 00:00:00.388115000 +258 61 0 days 00:00:00.271494726 +258 62 0 days 00:00:00.280821673 +258 63 0 days 00:00:00.296397865 +258 64 0 days 00:00:00.792265400 +258 65 0 days 00:00:00.647688320 +258 66 0 days 00:00:00.761767304 +258 67 0 days 00:00:00.266393016 +258 68 0 days 00:00:00.767760995 +258 69 0 days 00:00:00.337223155 +258 70 0 days 00:00:00.256064940 +258 71 0 days 00:00:00.725939715 +258 72 0 days 00:00:00.259295336 +258 73 0 days 00:00:00.820725530 +258 74 0 days 00:00:00.817541428 +258 75 0 days 00:00:00.358687980 +258 76 0 days 00:00:00.689508266 +258 77 0 days 00:00:00.424450825 +258 78 0 days 00:00:00.268610950 +258 79 0 days 00:00:00.363011950 +258 80 0 days 00:00:00.423546990 +258 81 0 days 00:00:00.822708480 +258 82 0 days 00:00:00.730913025 +258 83 0 days 00:00:00.455894872 +258 84 0 days 00:00:00.425567050 +258 85 0 days 00:00:00.442559280 +258 86 0 days 00:00:00.446023800 +258 87 0 days 00:00:00.412436455 +258 88 0 days 00:00:00.436053146 +258 89 0 days 00:00:00.417328230 +258 90 0 days 00:00:00.271740660 +258 91 0 days 00:00:00.828730100 +258 92 0 days 00:00:00.270240648 +258 93 0 days 00:00:00.734204205 +258 94 0 days 00:00:00.270630134 +258 95 0 days 00:00:00.793496860 +258 96 0 days 00:00:00.271551865 +258 97 0 days 00:00:00.310238015 +258 98 0 days 00:00:00.255266035 +258 99 0 days 00:00:00.435672860 +258 100 0 days 00:00:00.468136980 +259 1 0 days 00:00:00.139519893 +259 2 0 days 00:00:00.435445465 +259 3 0 days 00:00:00.140667545 +259 4 0 days 00:00:00.142456430 +259 5 0 days 00:00:00.151596086 +259 6 0 days 00:00:00.208848040 +259 7 0 days 00:00:00.223455045 +259 8 0 days 00:00:00.232709645 +259 9 0 days 00:00:00.402712580 +259 10 0 days 00:00:00.249635104 +259 11 0 days 00:00:00.230183044 +259 12 0 days 00:00:00.253097855 +259 13 0 days 00:00:00.158810005 +259 14 0 days 00:00:00.188179120 +259 15 0 days 00:00:00.190350760 +259 16 0 days 00:00:00.214495845 +259 17 0 days 00:00:00.224404240 +259 18 0 days 00:00:00.135477280 +259 19 0 days 00:00:00.267392480 +259 20 0 days 00:00:00.133078085 +259 21 0 days 00:00:00.251225860 +259 22 0 days 00:00:00.163401450 +259 23 0 days 00:00:00.140539904 +259 24 0 days 00:00:00.449617433 +259 25 0 days 00:00:00.236107744 +259 26 0 days 00:00:00.236466640 +259 27 0 days 00:00:00.395713145 +259 28 0 days 00:00:00.216859648 +259 29 0 days 00:00:00.158287583 +259 30 0 days 00:00:00.221950206 +259 31 0 days 00:00:00.201992810 +259 32 0 days 00:00:00.138571405 +259 33 0 days 00:00:00.149050438 +259 34 0 days 00:00:00.397854190 +259 35 0 days 00:00:00.150900525 +259 36 0 days 00:00:00.126440215 +259 37 0 days 00:00:00.135556920 +259 38 0 days 00:00:00.375655725 +259 39 0 days 00:00:00.213895804 +259 40 0 days 00:00:00.270291105 +259 41 0 days 00:00:00.257768570 +259 42 0 days 00:00:00.400156956 +259 43 0 days 00:00:00.226415812 +259 44 0 days 00:00:00.417574240 +259 45 0 days 00:00:00.367821815 +259 46 0 days 00:00:00.202849455 +259 47 0 days 00:00:00.153521830 +259 48 0 days 00:00:00.468104622 +259 49 0 days 00:00:00.239627375 +259 50 0 days 00:00:00.130361565 +259 51 0 days 00:00:00.394403335 +259 52 0 days 00:00:00.128800760 +259 53 0 days 00:00:00.370734975 +259 54 0 days 00:00:00.358498915 +259 55 0 days 00:00:00.417805795 +259 56 0 days 00:00:00.244573345 +259 57 0 days 00:00:00.215014320 +259 58 0 days 00:00:00.300030400 +259 59 0 days 00:00:00.384240500 +259 60 0 days 00:00:00.243695775 +259 61 0 days 00:00:00.227950225 +259 62 0 days 00:00:00.363178345 +259 63 0 days 00:00:00.135897693 +259 64 0 days 00:00:00.376700536 +259 65 0 days 00:00:00.209335245 +259 66 0 days 00:00:00.139249380 +259 67 0 days 00:00:00.287577333 +259 68 0 days 00:00:00.238553525 +259 69 0 days 00:00:00.174455050 +259 70 0 days 00:00:00.247103985 +259 71 0 days 00:00:00.203706435 +259 72 0 days 00:00:00.220528916 +259 73 0 days 00:00:00.121456690 +259 74 0 days 00:00:00.395873216 +259 75 0 days 00:00:00.209484500 +259 76 0 days 00:00:00.334980226 +259 77 0 days 00:00:00.368216555 +259 78 0 days 00:00:00.214611270 +259 79 0 days 00:00:00.144172656 +259 80 0 days 00:00:00.161368540 +259 81 0 days 00:00:00.132186740 +259 82 0 days 00:00:00.205909160 +259 83 0 days 00:00:00.346041123 +259 84 0 days 00:00:00.373785626 +259 85 0 days 00:00:00.141087262 +259 86 0 days 00:00:00.183980210 +259 87 0 days 00:00:00.216574432 +259 88 0 days 00:00:00.192043460 +259 89 0 days 00:00:00.184110753 +259 90 0 days 00:00:00.371811060 +259 91 0 days 00:00:00.217414985 +259 92 0 days 00:00:00.213290775 +259 93 0 days 00:00:00.134247375 +259 94 0 days 00:00:00.137317895 +259 95 0 days 00:00:00.212187393 +259 96 0 days 00:00:00.220734572 +259 97 0 days 00:00:00.385138490 +259 98 0 days 00:00:00.433673233 +259 99 0 days 00:00:00.127949630 +259 100 0 days 00:00:00.147780195 +260 1 0 days 00:00:08.221345906 +260 2 0 days 00:00:05.054171435 +260 4 0 days 00:00:09.313123387 +260 5 0 days 00:00:03.562884649 +260 6 0 days 00:00:06.441597660 +260 7 0 days 00:00:06.107753060 +260 8 0 days 00:00:03.650332250 +260 9 0 days 00:00:06.141309173 +260 10 0 days 00:00:05.750540916 +260 11 0 days 00:00:09.724266430 +260 12 0 days 00:00:05.854773140 +260 13 0 days 00:00:05.695371265 +260 14 0 days 00:00:10.972095005 +260 15 0 days 00:00:09.661121126 +260 16 0 days 00:00:06.080978620 +260 17 0 days 00:00:04.257584690 +260 18 0 days 00:00:05.799018740 +260 19 0 days 00:00:07.767918653 +260 20 0 days 00:00:03.683764690 +260 21 0 days 00:00:03.793176362 +260 22 0 days 00:00:04.297283406 +260 23 0 days 00:00:06.525036620 +260 24 0 days 00:00:04.178968782 +260 25 0 days 00:00:06.619294261 +260 26 0 days 00:00:05.876514909 +260 27 0 days 00:00:04.212691856 +260 28 0 days 00:00:11.356955808 +260 29 0 days 00:00:10.237324617 +260 30 0 days 00:00:03.760191778 +260 31 0 days 00:00:05.585677811 +260 32 0 days 00:00:06.124742935 +261 1 0 days 00:00:02.063771452 +261 2 0 days 00:00:05.910149556 +261 3 0 days 00:00:03.101754107 +261 4 0 days 00:00:03.015418285 +261 5 0 days 00:00:05.433860882 +261 6 0 days 00:00:05.186734202 +261 7 0 days 00:00:04.823670153 +261 8 0 days 00:00:01.927559234 +261 9 0 days 00:00:02.929632745 +261 10 0 days 00:00:01.986265300 +261 11 0 days 00:00:05.333042851 +261 12 0 days 00:00:01.859846329 +261 13 0 days 00:00:02.518402995 +261 14 0 days 00:00:01.851529096 +261 15 0 days 00:00:02.665388258 +261 16 0 days 00:00:03.415721261 +261 17 0 days 00:00:03.477854211 +261 18 0 days 00:00:02.108508293 +261 19 0 days 00:00:04.797370525 +261 20 0 days 00:00:03.130988591 +261 21 0 days 00:00:03.553590553 +261 22 0 days 00:00:02.370125250 +261 24 0 days 00:00:03.887062618 +261 25 0 days 00:00:05.493135360 +261 26 0 days 00:00:04.665703294 +261 27 0 days 00:00:03.562639495 +261 28 0 days 00:00:02.155773120 +261 29 0 days 00:00:02.537272050 +261 30 0 days 00:00:05.361572288 +261 31 0 days 00:00:04.998529267 +261 32 0 days 00:00:04.585667296 +261 33 0 days 00:00:03.073165036 +261 34 0 days 00:00:02.762296154 +261 35 0 days 00:00:05.311115787 +261 36 0 days 00:00:03.078294882 +261 37 0 days 00:00:02.968232757 +261 38 0 days 00:00:05.390386451 +261 39 0 days 00:00:01.859989790 +261 40 0 days 00:00:02.572219855 +261 41 0 days 00:00:02.741234313 +261 42 0 days 00:00:01.902077864 +261 43 0 days 00:00:02.817566093 +261 44 0 days 00:00:04.283307770 +261 45 0 days 00:00:02.159955593 +261 46 0 days 00:00:02.931648377 +261 47 0 days 00:00:05.153284306 +261 48 0 days 00:00:05.496598890 +261 49 0 days 00:00:04.334004495 +261 50 0 days 00:00:01.434271166 +261 51 0 days 00:00:03.921684535 +261 52 0 days 00:00:04.118928307 +262 1 0 days 00:00:03.693578741 +262 2 0 days 00:00:03.514385935 +262 3 0 days 00:00:04.032899426 +262 4 0 days 00:00:04.449026746 +262 5 0 days 00:00:05.178665420 +262 6 0 days 00:00:02.688246333 +262 7 0 days 00:00:07.200636073 +262 8 0 days 00:00:04.821304850 +262 9 0 days 00:00:03.286278940 +262 10 0 days 00:00:02.952288015 +262 11 0 days 00:00:04.629018537 +262 12 0 days 00:00:03.581384996 +262 13 0 days 00:00:07.263049866 +262 14 0 days 00:00:04.292341993 +262 15 0 days 00:00:04.751663340 +262 16 0 days 00:00:05.198774293 +262 17 0 days 00:00:02.718986866 +262 18 0 days 00:00:05.357617700 +262 19 0 days 00:00:03.054497510 +262 20 0 days 00:00:09.516761671 +262 21 0 days 00:00:05.309565252 +262 22 0 days 00:00:04.657756280 +262 23 0 days 00:00:05.283368990 +262 24 0 days 00:00:07.041795086 +262 25 0 days 00:00:07.205925406 +262 26 0 days 00:00:07.968401615 +262 27 0 days 00:00:03.250489836 +262 28 0 days 00:00:05.401157130 +262 29 0 days 00:00:02.738932153 +262 30 0 days 00:00:07.082863286 +262 31 0 days 00:00:04.789651515 +262 32 0 days 00:00:05.474105537 +262 33 0 days 00:00:05.639426175 +262 34 0 days 00:00:04.479996393 +262 35 0 days 00:00:04.074365260 +262 36 0 days 00:00:05.614300410 +262 37 0 days 00:00:03.454700280 +262 38 0 days 00:00:07.208659606 +262 39 0 days 00:00:02.866450493 +262 40 0 days 00:00:08.362191704 +262 41 0 days 00:00:06.247191066 +262 42 0 days 00:00:03.067459675 +262 43 0 days 00:00:07.930490066 +262 44 0 days 00:00:04.901156970 +262 45 0 days 00:00:03.798798455 +262 46 0 days 00:00:07.947557080 +262 47 0 days 00:00:03.194110930 +262 48 0 days 00:00:08.532688752 +262 49 0 days 00:00:05.836868232 +262 50 0 days 00:00:02.663820693 +262 51 0 days 00:00:03.507162780 +262 52 0 days 00:00:08.799963543 +262 53 0 days 00:00:05.007896533 +262 54 0 days 00:00:04.356195173 +262 55 0 days 00:00:03.324777385 +262 56 0 days 00:00:04.346907313 +262 57 0 days 00:00:08.959137345 +262 58 0 days 00:00:04.167779740 +262 59 0 days 00:00:07.046989346 +262 60 0 days 00:00:08.232370100 +262 61 0 days 00:00:03.387371640 +262 62 0 days 00:00:08.806055980 +262 63 0 days 00:00:04.254724261 +263 1 0 days 00:00:01.837028448 +263 2 0 days 00:00:02.318627234 +263 3 0 days 00:00:01.940604255 +263 4 0 days 00:00:02.876335730 +263 5 0 days 00:00:01.645783460 +263 6 0 days 00:00:01.680168056 +263 7 0 days 00:00:02.420295391 +263 8 0 days 00:00:04.699290434 +263 9 0 days 00:00:02.511893853 +263 10 0 days 00:00:01.408270440 +263 11 0 days 00:00:02.116672446 +263 12 0 days 00:00:01.405036100 +263 13 0 days 00:00:02.753218354 +263 14 0 days 00:00:03.166752553 +263 15 0 days 00:00:02.932273450 +263 16 0 days 00:00:02.419465490 +263 17 0 days 00:00:03.971006053 +263 18 0 days 00:00:01.793729426 +263 19 0 days 00:00:03.548324646 +263 20 0 days 00:00:02.822797745 +263 21 0 days 00:00:04.002778226 +263 22 0 days 00:00:01.679239665 +263 23 0 days 00:00:01.614055545 +263 24 0 days 00:00:02.778555527 +263 25 0 days 00:00:03.965522750 +263 26 0 days 00:00:04.282074504 +263 27 0 days 00:00:03.706047533 +263 29 0 days 00:00:04.594215922 +263 30 0 days 00:00:02.309184415 +263 31 0 days 00:00:04.736324270 +263 32 0 days 00:00:01.770838708 +263 33 0 days 00:00:01.871692392 +263 34 0 days 00:00:04.415403295 +263 35 0 days 00:00:02.334962065 +263 36 0 days 00:00:03.539004040 +263 37 0 days 00:00:03.536940666 +263 38 0 days 00:00:04.111731185 +263 39 0 days 00:00:04.279821136 +263 40 0 days 00:00:03.485866346 +263 42 0 days 00:00:03.852467706 +263 43 0 days 00:00:02.793651198 +263 44 0 days 00:00:04.221500952 +263 45 0 days 00:00:02.335788380 +263 46 0 days 00:00:02.917314225 +263 47 0 days 00:00:01.655987805 +263 48 0 days 00:00:05.491619572 +263 49 0 days 00:00:02.024644111 +263 51 0 days 00:00:02.081692626 +263 52 0 days 00:00:02.636597400 +263 54 0 days 00:00:02.328134500 +263 55 0 days 00:00:02.655363284 +263 56 0 days 00:00:02.066317991 +263 57 0 days 00:00:03.751288860 +263 58 0 days 00:00:01.415889260 +263 59 0 days 00:00:04.129280266 +263 61 0 days 00:00:01.942529700 +263 62 0 days 00:00:04.018570373 +263 63 0 days 00:00:02.588169080 +263 64 0 days 00:00:03.593466386 +263 65 0 days 00:00:02.191020893 +263 66 0 days 00:00:01.735360142 +263 67 0 days 00:00:01.863458120 +263 68 0 days 00:00:02.659781620 +263 69 0 days 00:00:02.391733805 +263 71 0 days 00:00:01.960420554 +263 72 0 days 00:00:04.453244355 +263 73 0 days 00:00:01.783527135 +263 74 0 days 00:00:02.111733300 +263 75 0 days 00:00:03.686165760 +263 76 0 days 00:00:01.670078663 +263 77 0 days 00:00:01.623626492 +263 78 0 days 00:00:05.448836695 +263 80 0 days 00:00:02.998639250 +263 81 0 days 00:00:01.762648640 +264 1 0 days 00:00:07.679740620 +264 2 0 days 00:00:03.189882023 +264 3 0 days 00:00:03.347596480 +264 4 0 days 00:00:04.925951548 +264 5 0 days 00:00:05.839554750 +264 6 0 days 00:00:03.351832022 +264 7 0 days 00:00:08.392840502 +264 8 0 days 00:00:07.522766840 +264 9 0 days 00:00:06.667492933 +264 10 0 days 00:00:07.031885533 +264 11 0 days 00:00:04.112936153 +264 12 0 days 00:00:04.390817673 +264 13 0 days 00:00:03.107325816 +264 14 0 days 00:00:05.271851838 +264 15 0 days 00:00:07.336098180 +264 16 0 days 00:00:07.520984295 +264 17 0 days 00:00:04.448079820 +264 18 0 days 00:00:08.854356491 +264 19 0 days 00:00:04.009889126 +264 20 0 days 00:00:05.510305004 +264 21 0 days 00:00:04.031360106 +264 22 0 days 00:00:05.261532122 +264 23 0 days 00:00:06.071891880 +264 24 0 days 00:00:03.464757333 +264 25 0 days 00:00:07.396287060 +264 26 0 days 00:00:08.542955955 +264 27 0 days 00:00:05.283789209 +264 28 0 days 00:00:09.330041630 +264 29 0 days 00:00:05.279955150 +264 30 0 days 00:00:03.890859546 +264 31 0 days 00:00:06.506691966 +264 32 0 days 00:00:03.250796340 +264 33 0 days 00:00:04.360855930 +264 34 0 days 00:00:03.499809499 +264 35 0 days 00:00:03.118994566 +264 36 0 days 00:00:08.218778353 +264 37 0 days 00:00:03.186612187 +264 38 0 days 00:00:02.591337546 +264 39 0 days 00:00:04.484748585 +264 40 0 days 00:00:03.072394296 +264 41 0 days 00:00:08.860436908 +264 42 0 days 00:00:04.491490810 +264 43 0 days 00:00:02.595849246 +264 44 0 days 00:00:02.958035688 +264 45 0 days 00:00:07.553794553 +264 46 0 days 00:00:03.091181988 +264 47 0 days 00:00:03.120277183 +264 48 0 days 00:00:07.359276010 +264 49 0 days 00:00:03.920900220 +264 50 0 days 00:00:08.333429257 +264 51 0 days 00:00:03.160288417 +264 52 0 days 00:00:07.859645748 +264 53 0 days 00:00:03.871552436 +264 54 0 days 00:00:03.453142247 +264 55 0 days 00:00:06.567011733 +265 1 0 days 00:00:09.079770620 +265 2 0 days 00:00:04.425445433 +265 3 0 days 00:00:05.210282470 +265 4 0 days 00:00:04.973439772 +265 5 0 days 00:00:09.276202512 +265 6 0 days 00:00:07.707420670 +265 7 0 days 00:00:03.369456650 +265 8 0 days 00:00:03.404158197 +265 9 0 days 00:00:04.741403012 +265 10 0 days 00:00:03.007015380 +265 11 0 days 00:00:02.831848773 +265 12 0 days 00:00:07.190811273 +265 13 0 days 00:00:09.380072992 +265 14 0 days 00:00:03.549738750 +265 15 0 days 00:00:03.516892913 +265 16 0 days 00:00:08.006300146 +265 17 0 days 00:00:03.590542720 +265 18 0 days 00:00:07.381707893 +265 19 0 days 00:00:05.666852255 +265 20 0 days 00:00:03.495033104 +265 21 0 days 00:00:04.578794260 +265 22 0 days 00:00:07.221647960 +265 23 0 days 00:00:05.082780400 +265 24 0 days 00:00:05.613357891 +265 25 0 days 00:00:08.214626175 +265 26 0 days 00:00:08.248437544 +265 27 0 days 00:00:02.640003340 +265 28 0 days 00:00:06.916624740 +265 29 0 days 00:00:04.252027915 +265 30 0 days 00:00:02.631925066 +265 31 0 days 00:00:02.576774306 +265 32 0 days 00:00:05.362590910 +265 33 0 days 00:00:04.797454285 +265 34 0 days 00:00:09.764009964 +265 35 0 days 00:00:06.887055246 +265 36 0 days 00:00:03.595973188 +265 37 0 days 00:00:02.666101520 +265 38 0 days 00:00:04.386643153 +265 39 0 days 00:00:09.264218964 +265 40 0 days 00:00:04.074392866 +265 41 0 days 00:00:04.057102666 +265 42 0 days 00:00:09.259293412 +265 43 0 days 00:00:02.865584665 +265 44 0 days 00:00:08.458106252 +265 45 0 days 00:00:08.036058020 +265 46 0 days 00:00:04.157616760 +265 47 0 days 00:00:05.340596036 +265 48 0 days 00:00:07.193684245 +265 49 0 days 00:00:07.667819345 +265 50 0 days 00:00:03.541463052 +266 1 0 days 00:00:01.585475955 +266 2 0 days 00:00:01.824803153 +266 3 0 days 00:00:03.390998334 +266 4 0 days 00:00:02.072214920 +266 5 0 days 00:00:02.429215766 +266 6 0 days 00:00:01.951189046 +266 7 0 days 00:00:02.027444030 +266 8 0 days 00:00:04.158531937 +266 9 0 days 00:00:01.537377406 +266 10 0 days 00:00:01.996488186 +266 11 0 days 00:00:03.586841206 +266 12 0 days 00:00:02.391795820 +266 13 0 days 00:00:03.784397355 +266 14 0 days 00:00:02.359120162 +266 15 0 days 00:00:03.440037664 +266 16 0 days 00:00:01.763590508 +266 17 0 days 00:00:03.532825140 +266 18 0 days 00:00:01.724191114 +266 19 0 days 00:00:02.363500803 +266 20 0 days 00:00:02.513206685 +266 21 0 days 00:00:03.827909920 +266 22 0 days 00:00:03.707359170 +266 23 0 days 00:00:02.787489474 +266 24 0 days 00:00:04.499148357 +266 25 0 days 00:00:04.635637582 +266 26 0 days 00:00:02.674770078 +266 27 0 days 00:00:01.717074586 +266 28 0 days 00:00:02.554267412 +266 29 0 days 00:00:03.726469735 +266 30 0 days 00:00:02.415926536 +266 31 0 days 00:00:04.359936564 +266 32 0 days 00:00:02.072202033 +266 33 0 days 00:00:02.187818800 +266 34 0 days 00:00:01.795175433 +266 35 0 days 00:00:02.279196663 +266 36 0 days 00:00:02.459425557 +266 37 0 days 00:00:02.758504585 +266 39 0 days 00:00:01.313766446 +266 41 0 days 00:00:03.816934020 +266 42 0 days 00:00:03.650141370 +266 43 0 days 00:00:01.643566726 +266 45 0 days 00:00:01.693968265 +266 46 0 days 00:00:01.743714165 +266 47 0 days 00:00:02.067738473 +266 48 0 days 00:00:02.653427000 +266 49 0 days 00:00:02.561380171 +266 50 0 days 00:00:04.504884835 +266 51 0 days 00:00:04.254883428 +266 52 0 days 00:00:01.749004193 +266 53 0 days 00:00:03.682534560 +266 54 0 days 00:00:02.658379860 +266 55 0 days 00:00:02.205110800 +266 56 0 days 00:00:01.675405583 +266 57 0 days 00:00:01.333306180 +266 58 0 days 00:00:03.876229500 +266 59 0 days 00:00:02.752640752 +266 60 0 days 00:00:01.773235844 +266 61 0 days 00:00:03.566720673 +266 62 0 days 00:00:03.960386200 +266 63 0 days 00:00:03.275779486 +266 64 0 days 00:00:01.701194405 +266 65 0 days 00:00:04.415119640 +266 66 0 days 00:00:04.577969236 +266 67 0 days 00:00:02.474648176 +266 68 0 days 00:00:04.568178554 +266 69 0 days 00:00:02.911630482 +266 70 0 days 00:00:01.874355945 +266 71 0 days 00:00:02.563763235 +266 72 0 days 00:00:03.793240885 +266 73 0 days 00:00:02.615437753 +266 74 0 days 00:00:02.100710366 +266 75 0 days 00:00:02.962135470 +266 76 0 days 00:00:02.892321424 +266 77 0 days 00:00:01.719780982 +266 78 0 days 00:00:02.071865293 +266 79 0 days 00:00:03.910193956 +266 80 0 days 00:00:03.851730170 +266 81 0 days 00:00:03.303115233 +266 82 0 days 00:00:03.330752173 +267 1 0 days 00:00:04.619774220 +267 2 0 days 00:00:02.732182025 +267 3 0 days 00:00:02.062553460 +267 4 0 days 00:00:03.671310533 +267 5 0 days 00:00:04.492399003 +267 6 0 days 00:00:01.511226020 +267 7 0 days 00:00:01.945692416 +267 8 0 days 00:00:02.455866112 +267 9 0 days 00:00:04.909775336 +267 10 0 days 00:00:03.955585460 +267 11 0 days 00:00:04.026083720 +267 12 0 days 00:00:01.730787374 +267 13 0 days 00:00:02.165464000 +267 14 0 days 00:00:01.712772288 +267 15 0 days 00:00:02.168931853 +267 16 0 days 00:00:04.996858462 +267 18 0 days 00:00:01.865162036 +267 19 0 days 00:00:01.766553046 +267 20 0 days 00:00:01.651123545 +267 21 0 days 00:00:02.549152126 +267 22 0 days 00:00:04.178978173 +267 23 0 days 00:00:04.119605720 +267 24 0 days 00:00:02.349188780 +267 25 0 days 00:00:01.581879304 +267 28 0 days 00:00:02.430325989 +267 29 0 days 00:00:01.756688854 +267 30 0 days 00:00:02.259620135 +267 31 0 days 00:00:02.127543060 +267 32 0 days 00:00:01.701489606 +267 33 0 days 00:00:02.305904008 +267 34 0 days 00:00:01.736513321 +267 35 0 days 00:00:02.800837034 +267 36 0 days 00:00:02.313989020 +267 37 0 days 00:00:04.422177470 +267 38 0 days 00:00:02.531580343 +267 39 0 days 00:00:02.787665312 +267 40 0 days 00:00:02.505036946 +267 41 0 days 00:00:02.008002068 +267 42 0 days 00:00:01.406867193 +267 43 0 days 00:00:04.197702584 +267 44 0 days 00:00:02.930264578 +267 45 0 days 00:00:01.976921866 +267 46 0 days 00:00:04.648866205 +267 47 0 days 00:00:03.069090235 +267 48 0 days 00:00:01.587445230 +267 49 0 days 00:00:01.357487726 +267 51 0 days 00:00:02.547295640 +267 52 0 days 00:00:02.809381772 +267 53 0 days 00:00:04.277037668 +267 54 0 days 00:00:03.864578025 +267 55 0 days 00:00:04.357245433 +267 56 0 days 00:00:03.208898069 +267 57 0 days 00:00:04.358677823 +267 59 0 days 00:00:03.967811130 +267 60 0 days 00:00:02.282744194 +267 61 0 days 00:00:02.862788397 +267 62 0 days 00:00:02.714243333 +267 63 0 days 00:00:02.070794760 +267 64 0 days 00:00:03.266203229 +267 65 0 days 00:00:02.048014646 +267 66 0 days 00:00:02.222461620 +267 68 0 days 00:00:02.611620753 +267 69 0 days 00:00:04.517124671 +267 70 0 days 00:00:04.560017322 +267 71 0 days 00:00:01.963896307 +267 72 0 days 00:00:02.623559395 +267 73 0 days 00:00:04.636960317 +267 74 0 days 00:00:01.528143895 +267 75 0 days 00:00:01.651120786 +267 76 0 days 00:00:03.870389525 +267 77 0 days 00:00:02.165685513 +267 78 0 days 00:00:02.946065602 +268 1 0 days 00:00:42.406180900 +268 2 0 days 00:00:25.760572140 +268 3 0 days 00:02:02.178815486 +268 4 0 days 00:02:27.790778136 +268 5 0 days 00:00:26.871034516 +268 6 0 days 00:00:20.754430246 +269 1 0 days 00:00:32.647576235 +269 2 0 days 00:01:14.484483866 +269 3 0 days 00:00:23.954526840 +269 4 0 days 00:00:15.382618060 +269 5 0 days 00:01:23.985608850 +269 6 0 days 00:00:24.909571524 +269 7 0 days 00:00:16.085303188 +269 8 0 days 00:01:41.796918580 +270 1 0 days 00:00:27.446412318 +270 2 0 days 00:00:25.825222203 +271 1 0 days 00:00:25.069376040 +271 2 0 days 00:00:40.255622973 +271 3 0 days 00:00:39.405493893 +271 4 0 days 00:00:25.685182148 +271 5 0 days 00:00:24.378950030 +271 6 0 days 00:02:44.107393247 +272 1 0 days 00:00:26.543468410 +272 2 0 days 00:01:15.848748460 +272 3 0 days 00:00:15.568785964 +272 4 0 days 00:00:13.786106170 +272 5 0 days 00:01:15.841368073 +272 6 0 days 00:00:16.172736440 +272 7 0 days 00:00:30.188994571 +272 8 0 days 00:01:16.218884726 +272 9 0 days 00:01:16.198155206 +273 1 0 days 00:01:34.458478306 +273 2 0 days 00:01:15.509596326 +273 3 0 days 00:01:14.860379506 +273 4 0 days 00:00:12.392022740 +273 5 0 days 00:00:26.361233026 +273 6 0 days 00:01:16.145614306 +273 7 0 days 00:00:24.072121940 +273 8 0 days 00:00:16.819108508 +273 9 0 days 00:00:13.148564106 +273 10 0 days 00:00:15.947297291 +274 1 0 days 00:00:50.761880853 +274 2 0 days 00:00:48.436361793 +274 3 0 days 00:01:08.103620165 +274 4 0 days 00:00:54.921610950 +274 5 0 days 00:00:31.687115562 +275 1 0 days 00:02:42.398185507 +275 2 0 days 00:00:54.455531734 +275 3 0 days 00:00:28.076573076 +276 1 0 days 00:00:15.187100360 +276 2 0 days 00:00:16.860759225 +276 3 0 days 00:00:29.286610372 +276 4 0 days 00:00:27.533261850 +276 5 0 days 00:01:43.538544505 +276 6 0 days 00:00:33.370740795 +276 7 0 days 00:01:43.481297425 +276 8 0 days 00:01:55.977880112 +277 1 0 days 00:00:37.850685422 +277 2 0 days 00:02:12.820650712 +278 1 0 days 00:01:14.435457943 +278 2 0 days 00:00:49.321033778 +278 3 0 days 00:00:35.617349100 +278 5 0 days 00:00:26.634461407 +278 6 0 days 00:00:36.349008700 +278 7 0 days 00:00:36.057027953 +278 8 0 days 00:00:58.378437566 +278 10 0 days 00:00:49.890082936 +278 11 0 days 00:00:14.338980228 +279 2 0 days 00:00:25.705273145 +279 3 0 days 00:00:09.313643895 +279 4 0 days 00:00:23.466249859 +279 6 0 days 00:00:37.033963744 +279 7 0 days 00:00:10.166201825 +279 8 0 days 00:00:25.880579147 +279 9 0 days 00:00:28.331757425 +279 10 0 days 00:00:24.769212218 +279 11 0 days 00:00:30.187227400 +279 12 0 days 00:00:07.706580430 +279 13 0 days 00:00:18.848113275 +279 14 0 days 00:00:36.995132708 +280 5 0 days 00:00:49.235383836 +280 6 0 days 00:01:03.984361700 +280 7 0 days 00:01:12.149219900 +281 1 0 days 00:00:25.739165000 +281 3 0 days 00:00:09.511185710 +281 6 0 days 00:00:23.373215420 +281 9 0 days 00:00:30.412215766 +281 11 0 days 00:00:16.423297990 +281 13 0 days 00:00:18.332275288 +281 14 0 days 00:00:29.607661783 +281 18 0 days 00:00:31.553130618 +281 19 0 days 00:00:35.117989640 +281 20 0 days 00:00:09.549427734 +281 21 0 days 00:00:09.414346083 +281 22 0 days 00:00:08.567067231 +282 13 0 days 00:01:18.828499148 +282 14 0 days 00:00:42.631772395 +283 1 0 days 00:00:35.823471000 +283 3 0 days 00:00:45.165188133 +283 4 0 days 00:00:52.039597600 +283 5 0 days 00:00:39.986748843 +283 6 0 days 00:00:51.176570883 +283 15 0 days 00:00:25.298284364 +283 17 0 days 00:00:15.781932527 +283 20 0 days 00:00:36.672928096 +284 1 0 days 00:00:34.082232751 +284 2 0 days 00:00:20.841318744 +284 3 0 days 00:00:28.745635560 +284 4 0 days 00:00:10.927144610 +284 5 0 days 00:00:13.427911480 +284 6 0 days 00:00:16.531846370 +284 7 0 days 00:00:10.213171480 +284 8 0 days 00:00:09.963034986 +284 9 0 days 00:00:13.908465520 +284 10 0 days 00:00:31.296396116 +284 11 0 days 00:00:19.394868096 +284 12 0 days 00:00:28.986699835 +284 13 0 days 00:00:29.502922335 +284 14 0 days 00:00:26.185008666 +284 15 0 days 00:00:29.511515830 +285 1 0 days 00:00:08.372141530 +285 2 0 days 00:00:16.853103636 +285 3 0 days 00:00:06.344440056 +285 4 0 days 00:00:14.531101415 +285 5 0 days 00:00:17.910564285 +285 6 0 days 00:00:14.741291635 +285 7 0 days 00:00:16.335291332 +285 8 0 days 00:00:07.240632931 +285 9 0 days 00:00:09.031707956 +285 10 0 days 00:00:09.508021360 +285 11 0 days 00:00:06.171195298 +285 12 0 days 00:00:06.254348010 +285 13 0 days 00:00:08.892520908 +285 14 0 days 00:00:09.039038416 +285 15 0 days 00:00:06.776109908 +285 16 0 days 00:00:14.642099060 +285 17 0 days 00:00:07.502419366 +285 18 0 days 00:00:08.918562180 +285 19 0 days 00:00:17.489285748 +285 20 0 days 00:00:05.287744310 +285 21 0 days 00:00:10.318881772 +285 22 0 days 00:00:06.478647780 +285 23 0 days 00:00:13.104353880 +285 24 0 days 00:00:07.537568790 +285 25 0 days 00:00:17.825029443 +286 1 0 days 00:00:17.187651836 +286 2 0 days 00:00:15.375293825 +286 3 0 days 00:00:17.194706388 +286 4 0 days 00:00:24.362566406 +286 5 0 days 00:00:26.829466280 +286 6 0 days 00:00:23.548016840 +286 7 0 days 00:00:20.262920342 +286 8 0 days 00:00:18.014566370 +286 9 0 days 00:00:17.031680430 +286 10 0 days 00:00:28.465035464 +286 11 0 days 00:00:24.135165193 +286 12 0 days 00:00:17.752978686 +286 13 0 days 00:00:24.289792493 +286 14 0 days 00:00:13.922062571 +286 15 0 days 00:00:17.252861024 +286 16 0 days 00:00:16.511424636 +286 17 0 days 00:00:10.676203496 +286 18 0 days 00:00:17.619549372 +287 1 0 days 00:00:15.758484645 +287 2 0 days 00:00:29.103164272 +287 3 0 days 00:00:31.349208290 +287 4 0 days 00:00:41.834349987 +287 5 0 days 00:00:16.089260230 +287 6 0 days 00:00:18.234310665 +287 7 0 days 00:00:12.453138296 +287 8 0 days 00:00:32.590815656 +287 9 0 days 00:00:17.689513306 +287 10 0 days 00:00:26.063491686 +287 11 0 days 00:00:30.021163860 +287 12 0 days 00:00:16.073308220 +288 1 0 days 00:00:12.022773375 +288 2 0 days 00:00:06.348704165 +288 3 0 days 00:00:04.905996720 +288 4 0 days 00:00:08.424904873 +288 5 0 days 00:00:05.682688976 +288 6 0 days 00:00:06.015049300 +288 7 0 days 00:00:12.593845306 +288 8 0 days 00:00:05.787887515 +288 9 0 days 00:00:15.010966786 +288 10 0 days 00:00:13.653738700 +288 11 0 days 00:00:12.091230253 +288 12 0 days 00:00:11.996208393 +288 13 0 days 00:00:05.819033888 +288 14 0 days 00:00:08.457644290 +288 15 0 days 00:00:14.258740152 +288 16 0 days 00:00:07.844779440 +288 17 0 days 00:00:08.015743780 +288 18 0 days 00:00:05.276225508 +288 19 0 days 00:00:05.328628550 +288 20 0 days 00:00:13.612730495 +288 21 0 days 00:00:07.931774820 +288 22 0 days 00:00:14.601509620 +288 23 0 days 00:00:06.975606610 +288 24 0 days 00:00:04.950163380 +288 25 0 days 00:00:07.822389085 +288 26 0 days 00:00:13.509009470 +288 27 0 days 00:00:13.806690305 +288 28 0 days 00:00:05.204607816 +288 29 0 days 00:00:14.117274624 +288 30 0 days 00:00:05.479775226 +288 31 0 days 00:00:08.323051288 +288 32 0 days 00:00:15.183294066 +288 33 0 days 00:00:05.161102212 +288 34 0 days 00:00:09.678971564 +288 35 0 days 00:00:13.635724710 +288 36 0 days 00:00:09.719789228 +288 37 0 days 00:00:08.733808612 +288 38 0 days 00:00:07.836775290 +288 39 0 days 00:00:10.180425904 +289 1 0 days 00:00:15.712304940 +289 2 0 days 00:00:08.165056950 +289 3 0 days 00:00:05.104356625 +289 4 0 days 00:00:12.618799113 +289 5 0 days 00:00:08.149168410 +289 6 0 days 00:00:16.012642837 +289 7 0 days 00:00:07.339234265 +289 8 0 days 00:00:14.325906895 +289 9 0 days 00:00:12.597629133 +289 10 0 days 00:00:12.507571946 +289 11 0 days 00:00:15.839614270 +289 12 0 days 00:00:08.893117883 +289 13 0 days 00:00:08.576097976 +289 14 0 days 00:00:15.327333692 +289 15 0 days 00:00:15.661963993 +289 16 0 days 00:00:15.764968120 +289 17 0 days 00:00:05.729033774 +289 18 0 days 00:00:06.793172530 +289 19 0 days 00:00:08.163925520 +289 20 0 days 00:00:05.397670145 +289 21 0 days 00:00:14.172259350 +289 22 0 days 00:00:15.214920376 +289 23 0 days 00:00:06.050094600 +289 24 0 days 00:00:12.704071446 +289 25 0 days 00:00:09.044627688 +289 26 0 days 00:00:12.626064200 +289 27 0 days 00:00:08.595471756 +289 28 0 days 00:00:06.427235326 +289 29 0 days 00:00:06.213185634 +289 30 0 days 00:00:13.114167286 +289 31 0 days 00:00:05.031041645 +289 32 0 days 00:00:05.623671168 +289 33 0 days 00:00:08.980263672 +289 34 0 days 00:00:08.616302360 +289 35 0 days 00:00:09.100551805 +289 36 0 days 00:00:08.586277324 +289 37 0 days 00:00:14.051677083 +290 1 0 days 00:00:29.351900995 +290 2 0 days 00:00:31.819061820 +290 3 0 days 00:00:31.668738328 +290 4 0 days 00:00:11.087901796 +290 5 0 days 00:00:20.321572056 +290 6 0 days 00:00:33.372702800 +290 7 0 days 00:00:18.484427172 +290 8 0 days 00:00:11.486641468 +290 9 0 days 00:00:33.274082688 +290 10 0 days 00:00:41.526226657 +290 11 0 days 00:00:35.452875800 +290 12 0 days 00:00:15.994913871 +291 1 0 days 00:00:09.041236705 +291 2 0 days 00:00:14.768523830 +291 3 0 days 00:00:10.251572800 +291 4 0 days 00:00:09.301662086 +291 5 0 days 00:00:19.459611495 +291 6 0 days 00:00:05.771895472 +291 7 0 days 00:00:19.439658770 +291 8 0 days 00:00:16.820389060 +291 9 0 days 00:00:05.753333274 +291 10 0 days 00:00:17.147041050 +291 11 0 days 00:00:10.565168236 +291 12 0 days 00:00:14.204404165 +291 13 0 days 00:00:05.764095820 +291 14 0 days 00:00:08.913127432 +291 15 0 days 00:00:06.197820056 +291 16 0 days 00:00:05.578238270 +291 17 0 days 00:00:06.362929440 +291 18 0 days 00:00:06.785614665 +291 19 0 days 00:00:08.886736435 +291 20 0 days 00:00:09.358601250 +291 21 0 days 00:00:06.199146236 +291 22 0 days 00:00:07.174283743 +291 23 0 days 00:00:09.240180980 +291 24 0 days 00:00:05.463350796 +291 25 0 days 00:00:16.982965577 +291 26 0 days 00:00:06.071314756 +291 27 0 days 00:00:10.291491091 +291 28 0 days 00:00:15.312108960 +291 29 0 days 00:00:09.009826103 +291 30 0 days 00:00:19.120092176 +292 1 0 days 00:01:24.339782764 +292 2 0 days 00:00:51.453538471 +292 3 0 days 00:00:46.526761680 +292 4 0 days 00:00:29.554873920 +292 5 0 days 00:01:18.380933145 +292 6 0 days 00:00:25.821631755 +292 7 0 days 00:00:48.695087434 +293 1 0 days 00:00:17.189566548 +293 2 0 days 00:00:30.198353026 +293 3 0 days 00:00:28.443374900 +293 4 0 days 00:00:27.335132130 +293 5 0 days 00:00:55.409065370 +293 6 0 days 00:00:29.053611976 +293 7 0 days 00:00:28.497371620 +293 8 0 days 00:00:56.929602005 +293 9 0 days 00:00:27.326516020 +293 10 0 days 00:00:30.232422808 +294 1 0 days 00:00:44.491964730 +294 2 0 days 00:00:43.422834905 +294 3 0 days 00:00:51.576102430 +294 4 0 days 00:00:43.301631710 +294 5 0 days 00:00:46.034973952 +294 6 0 days 00:00:49.472210420 +294 7 0 days 00:01:27.649140360 +295 1 0 days 00:00:17.045485960 +295 2 0 days 00:00:18.806951956 +295 3 0 days 00:00:44.107802306 +295 4 0 days 00:00:44.123191006 +295 5 0 days 00:00:18.112884124 +295 6 0 days 00:00:49.517960910 +295 7 0 days 00:00:19.440849892 +295 8 0 days 00:00:17.018787685 +295 9 0 days 00:00:49.318771640 +295 10 0 days 00:00:54.707680963 +295 11 0 days 00:00:29.065890610 +295 12 0 days 00:00:18.266819455 +295 13 0 days 00:00:56.060020136 +296 1 0 days 00:00:27.228994905 +296 2 0 days 00:00:26.472381980 +296 3 0 days 00:00:28.159802920 +296 4 0 days 00:01:23.556056332 +296 5 0 days 00:00:47.578108383 +296 6 0 days 00:00:45.803155475 +296 7 0 days 00:00:28.728236393 +296 8 0 days 00:00:30.347746348 +296 9 0 days 00:00:45.813872910 +297 1 0 days 00:00:27.475597380 +297 2 0 days 00:00:28.637861376 +297 3 0 days 00:00:52.319778940 +297 4 0 days 00:00:27.482183485 +297 5 0 days 00:00:18.459472940 +297 6 0 days 00:00:29.758226493 +297 7 0 days 00:00:18.821752350 +297 8 0 days 00:00:17.289203776 +297 9 0 days 00:00:28.636377025 +297 10 0 days 00:00:16.783590705 +297 11 0 days 00:00:28.647043780 +297 12 0 days 00:00:17.295498428 +297 13 0 days 00:00:53.552895776 +298 2 0 days 00:00:00.167729543 +298 7 0 days 00:00:00.104522833 +298 14 0 days 00:00:00.162448522 +298 18 0 days 00:00:00.219365442 +298 20 0 days 00:00:00.233280028 +298 24 0 days 00:00:00.220096182 +298 25 0 days 00:00:00.164452809 +298 27 0 days 00:00:00.129384881 +298 28 0 days 00:00:00.225265605 +298 31 0 days 00:00:00.226045793 +298 33 0 days 00:00:00.132079002 +298 35 0 days 00:00:00.114845872 +298 36 0 days 00:00:00.095321026 +298 39 0 days 00:00:00.223263453 +298 41 0 days 00:00:00.095677020 +298 42 0 days 00:00:00.213072452 +298 43 0 days 00:00:00.166095522 +298 44 0 days 00:00:00.229136311 +298 45 0 days 00:00:00.131779896 +298 46 0 days 00:00:00.168824892 +298 47 0 days 00:00:00.133086838 +298 50 0 days 00:00:00.215910657 +298 52 0 days 00:00:00.156803622 +298 53 0 days 00:00:00.160955402 +298 55 0 days 00:00:00.220241664 +298 60 0 days 00:00:00.213716647 +298 62 0 days 00:00:00.109601320 +298 66 0 days 00:00:00.124761245 +298 68 0 days 00:00:00.211977343 +298 69 0 days 00:00:00.103569000 +298 70 0 days 00:00:00.205609769 +298 71 0 days 00:00:00.120181492 +298 76 0 days 00:00:00.217082875 +298 77 0 days 00:00:00.091424420 +298 80 0 days 00:00:00.219633345 +298 81 0 days 00:00:00.202294935 +298 83 0 days 00:00:00.205323516 +298 86 0 days 00:00:00.147729300 +298 87 0 days 00:00:00.146492391 +298 91 0 days 00:00:00.205504965 +298 93 0 days 00:00:00.089755640 +298 100 0 days 00:00:00.090565493 +299 1 0 days 00:00:00.115203790 +299 3 0 days 00:00:00.186291184 +299 5 0 days 00:00:00.118123542 +299 7 0 days 00:00:00.110309610 +299 9 0 days 00:00:00.092881620 +299 10 0 days 00:00:00.235690116 +299 11 0 days 00:00:00.102883606 +299 12 0 days 00:00:00.145767988 +299 13 0 days 00:00:00.226601125 +299 14 0 days 00:00:00.202964393 +299 16 0 days 00:00:00.143822955 +299 17 0 days 00:00:00.097795960 +299 19 0 days 00:00:00.098131366 +299 20 0 days 00:00:00.246563865 +299 21 0 days 00:00:00.242284490 +299 22 0 days 00:00:00.214419825 +299 24 0 days 00:00:00.109759636 +299 25 0 days 00:00:00.210983146 +299 26 0 days 00:00:00.119095050 +299 27 0 days 00:00:00.127566453 +299 28 0 days 00:00:00.120722636 +299 29 0 days 00:00:00.108061893 +299 30 0 days 00:00:00.092254020 +299 31 0 days 00:00:00.204451968 +299 32 0 days 00:00:00.159221385 +299 34 0 days 00:00:00.099525740 +299 37 0 days 00:00:00.110807966 +299 41 0 days 00:00:00.214692383 +299 42 0 days 00:00:00.127879832 +299 44 0 days 00:00:00.102504440 +299 45 0 days 00:00:00.217356393 +299 46 0 days 00:00:00.155439945 +299 47 0 days 00:00:00.116090300 +299 49 0 days 00:00:00.128149355 +299 50 0 days 00:00:00.219708995 +299 51 0 days 00:00:00.095009046 +299 53 0 days 00:00:00.213529518 +299 57 0 days 00:00:00.098650373 +299 58 0 days 00:00:00.157084322 +299 60 0 days 00:00:00.124667290 +299 66 0 days 00:00:00.136773360 +299 68 0 days 00:00:00.100065000 +299 70 0 days 00:00:00.185032451 +299 72 0 days 00:00:00.163264772 +299 73 0 days 00:00:00.181211812 +299 74 0 days 00:00:00.133122617 +299 75 0 days 00:00:00.128076454 +299 76 0 days 00:00:00.208104340 +299 80 0 days 00:00:00.094098013 +299 81 0 days 00:00:00.135718503 +299 83 0 days 00:00:00.160785894 +299 84 0 days 00:00:00.126787766 +299 85 0 days 00:00:00.221959692 +299 87 0 days 00:00:00.215948271 +299 88 0 days 00:00:00.216924310 +299 90 0 days 00:00:00.200778247 +299 91 0 days 00:00:00.205679210 +299 93 0 days 00:00:00.095817953 +299 94 0 days 00:00:00.202453360 +299 95 0 days 00:00:00.208109552 +299 97 0 days 00:00:00.207434051 +299 99 0 days 00:00:00.202535664 +299 100 0 days 00:00:00.214295639 +300 1 0 days 00:00:00.074178950 +300 2 0 days 00:00:00.078183665 +300 5 0 days 00:00:00.080211072 +300 11 0 days 00:00:00.115501549 +300 12 0 days 00:00:00.081055529 +300 14 0 days 00:00:00.127865814 +300 15 0 days 00:00:00.091009253 +300 16 0 days 00:00:00.091673128 +300 17 0 days 00:00:00.065712753 +300 19 0 days 00:00:00.091452633 +300 20 0 days 00:00:00.119378496 +300 23 0 days 00:00:00.062651853 +300 25 0 days 00:00:00.121503493 +300 28 0 days 00:00:00.072039866 +300 30 0 days 00:00:00.059029560 +300 32 0 days 00:00:00.069346120 +300 33 0 days 00:00:00.064125968 +300 34 0 days 00:00:00.131220202 +300 37 0 days 00:00:00.090915565 +300 39 0 days 00:00:00.136495750 +300 40 0 days 00:00:00.119612332 +300 43 0 days 00:00:00.072116211 +300 44 0 days 00:00:00.052536200 +300 49 0 days 00:00:00.120581679 +300 51 0 days 00:00:00.111942578 +300 52 0 days 00:00:00.110212396 +300 53 0 days 00:00:00.114800161 +300 57 0 days 00:00:00.112851308 +300 63 0 days 00:00:00.083209955 +300 64 0 days 00:00:00.052433866 +300 65 0 days 00:00:00.111945498 +300 66 0 days 00:00:00.111667945 +300 69 0 days 00:00:00.051429926 +300 74 0 days 00:00:00.120644045 +300 75 0 days 00:00:00.125209118 +300 76 0 days 00:00:00.052011726 +300 77 0 days 00:00:00.125457309 +300 81 0 days 00:00:00.057421915 +300 85 0 days 00:00:00.051978420 +300 86 0 days 00:00:00.112797845 +300 87 0 days 00:00:00.113744651 +300 89 0 days 00:00:00.110140276 +300 91 0 days 00:00:00.113295945 +300 92 0 days 00:00:00.114063634 +300 96 0 days 00:00:00.110658960 +300 97 0 days 00:00:00.053655280 +300 98 0 days 00:00:00.115572357 +300 99 0 days 00:00:00.059072706 +301 1 0 days 00:00:00.052512953 +301 2 0 days 00:00:00.054135293 +301 3 0 days 00:00:00.065176092 +301 4 0 days 00:00:00.051764846 +301 7 0 days 00:00:00.098458032 +301 9 0 days 00:00:00.115358921 +301 12 0 days 00:00:00.118811301 +301 13 0 days 00:00:00.114777052 +301 14 0 days 00:00:00.059918306 +301 16 0 days 00:00:00.051911093 +301 18 0 days 00:00:00.113660003 +301 21 0 days 00:00:00.094264520 +301 25 0 days 00:00:00.054626280 +301 26 0 days 00:00:00.054509700 +301 28 0 days 00:00:00.060744266 +301 30 0 days 00:00:00.052413680 +301 33 0 days 00:00:00.093963510 +301 37 0 days 00:00:00.064499506 +301 40 0 days 00:00:00.051845760 +301 41 0 days 00:00:00.051191773 +301 43 0 days 00:00:00.125565419 +301 44 0 days 00:00:00.052339233 +301 47 0 days 00:00:00.117642050 +301 48 0 days 00:00:00.081098980 +301 51 0 days 00:00:00.112385738 +301 53 0 days 00:00:00.116185421 +301 55 0 days 00:00:00.116112804 +301 57 0 days 00:00:00.060977133 +301 60 0 days 00:00:00.060427566 +301 61 0 days 00:00:00.053047833 +301 62 0 days 00:00:00.054324906 +301 63 0 days 00:00:00.093141660 +301 68 0 days 00:00:00.118295806 +301 71 0 days 00:00:00.105276146 +301 75 0 days 00:00:00.125460273 +301 76 0 days 00:00:00.069544146 +301 78 0 days 00:00:00.088431771 +301 81 0 days 00:00:00.114174529 +301 85 0 days 00:00:00.054168033 +301 86 0 days 00:00:00.057404775 +301 87 0 days 00:00:00.084397173 +301 93 0 days 00:00:00.115482014 +301 94 0 days 00:00:00.054171946 +301 95 0 days 00:00:00.109443565 +301 96 0 days 00:00:00.052061646 +301 99 0 days 00:00:00.062723770 +302 1 0 days 00:00:00.129638715 +302 2 0 days 00:00:00.218055000 +302 3 0 days 00:00:00.132489948 +302 4 0 days 00:00:00.156844210 +302 5 0 days 00:00:00.130140360 +302 6 0 days 00:00:00.157426883 +302 7 0 days 00:00:00.133074371 +302 8 0 days 00:00:00.137505912 +302 9 0 days 00:00:00.169493440 +302 10 0 days 00:00:00.133950395 +302 11 0 days 00:00:00.157517046 +302 12 0 days 00:00:00.210707872 +302 13 0 days 00:00:00.217607536 +302 14 0 days 00:00:00.137109084 +302 15 0 days 00:00:00.226207562 +302 16 0 days 00:00:00.169746017 +302 17 0 days 00:00:00.220864334 +302 18 0 days 00:00:00.128117902 +302 19 0 days 00:00:00.153520044 +302 20 0 days 00:00:00.197451480 +302 21 0 days 00:00:00.148136602 +302 22 0 days 00:00:00.151072202 +302 23 0 days 00:00:00.127920507 +302 24 0 days 00:00:00.131672284 +302 25 0 days 00:00:00.154481093 +302 26 0 days 00:00:00.132730617 +302 27 0 days 00:00:00.129476725 +302 28 0 days 00:00:00.197971756 +302 29 0 days 00:00:00.123228568 +302 30 0 days 00:00:00.145667853 +302 31 0 days 00:00:00.208544392 +302 32 0 days 00:00:00.152395670 +302 33 0 days 00:00:00.202492417 +302 34 0 days 00:00:00.153718760 +302 35 0 days 00:00:00.128885310 +302 36 0 days 00:00:00.203427646 +302 37 0 days 00:00:00.195406176 +302 38 0 days 00:00:00.127650892 +302 39 0 days 00:00:00.151960164 +302 40 0 days 00:00:00.194903260 +302 41 0 days 00:00:00.128844589 +302 42 0 days 00:00:00.125899631 +302 43 0 days 00:00:00.145629626 +302 44 0 days 00:00:00.194623920 +302 45 0 days 00:00:00.150660602 +302 46 0 days 00:00:00.148655162 +302 47 0 days 00:00:00.124869303 +302 48 0 days 00:00:00.126675631 +302 49 0 days 00:00:00.202429434 +302 50 0 days 00:00:00.128953682 +302 51 0 days 00:00:00.153666905 +302 52 0 days 00:00:00.160697892 +302 53 0 days 00:00:00.204885840 +302 55 0 days 00:00:00.206579811 +302 56 0 days 00:00:00.201789753 +302 57 0 days 00:00:00.154666024 +302 58 0 days 00:00:00.212607788 +302 59 0 days 00:00:00.151238591 +302 60 0 days 00:00:00.199952373 +302 61 0 days 00:00:00.131461598 +302 62 0 days 00:00:00.130078928 +302 63 0 days 00:00:00.156314368 +302 64 0 days 00:00:00.128436246 +302 65 0 days 00:00:00.149037180 +302 66 0 days 00:00:00.149559431 +302 67 0 days 00:00:00.152845336 +302 68 0 days 00:00:00.187296776 +302 69 0 days 00:00:00.151876272 +302 70 0 days 00:00:00.195249684 +302 71 0 days 00:00:00.215955016 +302 72 0 days 00:00:00.128700518 +302 73 0 days 00:00:00.129850895 +302 74 0 days 00:00:00.205818863 +302 75 0 days 00:00:00.214793230 +302 76 0 days 00:00:00.145851616 +302 77 0 days 00:00:00.201603640 +302 78 0 days 00:00:00.130511198 +302 79 0 days 00:00:00.155585997 +302 80 0 days 00:00:00.129522538 +302 81 0 days 00:00:00.205655934 +302 82 0 days 00:00:00.199007116 +302 83 0 days 00:00:00.161576432 +302 84 0 days 00:00:00.196520864 +302 85 0 days 00:00:00.127627848 +302 86 0 days 00:00:00.132223946 +302 87 0 days 00:00:00.157968982 +302 88 0 days 00:00:00.194277264 +302 89 0 days 00:00:00.208662600 +302 90 0 days 00:00:00.186350860 +302 91 0 days 00:00:00.129426634 +302 92 0 days 00:00:00.157999404 +302 93 0 days 00:00:00.126430342 +302 94 0 days 00:00:00.161251276 +302 95 0 days 00:00:00.148815010 +302 96 0 days 00:00:00.219109961 +302 97 0 days 00:00:00.128386922 +302 98 0 days 00:00:00.151662071 +302 99 0 days 00:00:00.197467493 +302 100 0 days 00:00:00.131954875 +303 1 0 days 00:00:00.075312915 +303 2 0 days 00:00:00.108158212 +303 3 0 days 00:00:00.109643420 +303 4 0 days 00:00:00.102387875 +303 5 0 days 00:00:00.083816762 +303 6 0 days 00:00:00.102716575 +303 7 0 days 00:00:00.113516882 +303 8 0 days 00:00:00.106769260 +303 9 0 days 00:00:00.102157720 +303 10 0 days 00:00:00.101124580 +303 11 0 days 00:00:00.101825535 +303 12 0 days 00:00:00.080894786 +303 13 0 days 00:00:00.075345870 +303 14 0 days 00:00:00.101696030 +303 15 0 days 00:00:00.066234380 +303 16 0 days 00:00:00.108733068 +303 17 0 days 00:00:00.103474390 +303 18 0 days 00:00:00.107833500 +303 19 0 days 00:00:00.108863104 +303 20 0 days 00:00:00.069520973 +303 21 0 days 00:00:00.070551511 +303 22 0 days 00:00:00.069111220 +303 23 0 days 00:00:00.079568310 +303 24 0 days 00:00:00.075377945 +303 25 0 days 00:00:00.069258405 +303 26 0 days 00:00:00.068187851 +303 27 0 days 00:00:00.067992971 +303 28 0 days 00:00:00.077495532 +303 29 0 days 00:00:00.101676355 +303 30 0 days 00:00:00.106625456 +303 31 0 days 00:00:00.106823944 +303 32 0 days 00:00:00.068934230 +303 33 0 days 00:00:00.077715024 +303 34 0 days 00:00:00.106751740 +303 35 0 days 00:00:00.069556487 +303 36 0 days 00:00:00.074582440 +303 38 0 days 00:00:00.100463925 +303 39 0 days 00:00:00.099742645 +303 40 0 days 00:00:00.067234572 +303 41 0 days 00:00:00.113621250 +303 42 0 days 00:00:00.078206584 +303 43 0 days 00:00:00.079543440 +303 44 0 days 00:00:00.102405015 +303 45 0 days 00:00:00.079690864 +303 46 0 days 00:00:00.079266872 +303 47 0 days 00:00:00.073945695 +303 48 0 days 00:00:00.102561175 +303 49 0 days 00:00:00.078357404 +303 50 0 days 00:00:00.081877945 +303 51 0 days 00:00:00.069600174 +303 52 0 days 00:00:00.078077640 +303 53 0 days 00:00:00.069523306 +303 54 0 days 00:00:00.068738431 +303 55 0 days 00:00:00.067509492 +303 56 0 days 00:00:00.072988280 +303 57 0 days 00:00:00.067634820 +303 58 0 days 00:00:00.076023116 +303 59 0 days 00:00:00.077252420 +303 60 0 days 00:00:00.103397580 +303 61 0 days 00:00:00.069573355 +303 62 0 days 00:00:00.065338068 +303 63 0 days 00:00:00.069012354 +303 64 0 days 00:00:00.078920940 +303 65 0 days 00:00:00.104832955 +303 66 0 days 00:00:00.069747254 +303 67 0 days 00:00:00.110298846 +303 68 0 days 00:00:00.069808590 +303 69 0 days 00:00:00.077598140 +303 70 0 days 00:00:00.065995873 +303 71 0 days 00:00:00.067454420 +303 72 0 days 00:00:00.069691936 +303 73 0 days 00:00:00.067113200 +303 74 0 days 00:00:00.084461420 +303 75 0 days 00:00:00.066964636 +303 76 0 days 00:00:00.080934286 +303 77 0 days 00:00:00.078331072 +303 78 0 days 00:00:00.079554633 +303 79 0 days 00:00:00.100525625 +303 80 0 days 00:00:00.065345676 +303 81 0 days 00:00:00.080052600 +303 82 0 days 00:00:00.077444220 +303 83 0 days 00:00:00.101348875 +303 84 0 days 00:00:00.079862553 +303 85 0 days 00:00:00.105995272 +303 86 0 days 00:00:00.066425663 +303 87 0 days 00:00:00.102034910 +303 88 0 days 00:00:00.073200360 +303 89 0 days 00:00:00.102017660 +303 90 0 days 00:00:00.081912230 +303 91 0 days 00:00:00.104615325 +303 92 0 days 00:00:00.104366815 +303 93 0 days 00:00:00.071726831 +303 94 0 days 00:00:00.072207252 +303 95 0 days 00:00:00.085806577 +303 96 0 days 00:00:00.069360260 +303 97 0 days 00:00:00.080526304 +303 98 0 days 00:00:00.069615237 +303 99 0 days 00:00:00.104842350 +303 100 0 days 00:00:00.087412304 +304 2 0 days 00:00:01.371637100 +304 3 0 days 00:00:00.396092432 +304 4 0 days 00:00:00.694982470 +304 5 0 days 00:00:01.425472693 +304 6 0 days 00:00:00.407825555 +304 8 0 days 00:00:00.729745723 +304 9 0 days 00:00:01.480322478 +304 10 0 days 00:00:01.310285489 +304 12 0 days 00:00:00.760445311 +304 16 0 days 00:00:00.355104221 +304 18 0 days 00:00:00.298978473 +304 21 0 days 00:00:01.138897454 +304 22 0 days 00:00:01.309173532 +304 24 0 days 00:00:00.507804704 +304 25 0 days 00:00:01.150391910 +304 28 0 days 00:00:01.129955878 +304 29 0 days 00:00:00.850860262 +304 30 0 days 00:00:01.234389265 +304 32 0 days 00:00:01.201030097 +304 33 0 days 00:00:01.170058142 +304 34 0 days 00:00:00.894270582 +304 35 0 days 00:00:00.651544078 +304 37 0 days 00:00:00.608673463 +304 38 0 days 00:00:01.296875470 +304 39 0 days 00:00:00.739265951 +304 40 0 days 00:00:01.033499697 +304 42 0 days 00:00:00.349109961 +304 44 0 days 00:00:01.155684833 +304 45 0 days 00:00:00.389131232 +304 47 0 days 00:00:00.456346558 +304 48 0 days 00:00:00.955769260 +304 49 0 days 00:00:00.927480332 +304 51 0 days 00:00:00.572359617 +304 52 0 days 00:00:00.290470810 +304 54 0 days 00:00:01.087309862 +304 55 0 days 00:00:00.361744742 +304 56 0 days 00:00:00.782057497 +304 58 0 days 00:00:01.146877660 +304 61 0 days 00:00:01.188455008 +304 62 0 days 00:00:00.698728557 +304 63 0 days 00:00:00.603654161 +304 64 0 days 00:00:00.592447146 +304 69 0 days 00:00:00.587800780 +304 72 0 days 00:00:00.526914477 +304 75 0 days 00:00:00.542994528 +304 77 0 days 00:00:00.321024423 +304 82 0 days 00:00:01.041152601 +304 83 0 days 00:00:00.335815848 +304 84 0 days 00:00:00.562178181 +304 86 0 days 00:00:00.315973170 +304 88 0 days 00:00:01.030412601 +304 89 0 days 00:00:00.355832205 +304 90 0 days 00:00:00.577958968 +304 91 0 days 00:00:00.349368685 +304 92 0 days 00:00:00.374364329 +304 93 0 days 00:00:01.101865228 +304 94 0 days 00:00:00.332775245 +304 95 0 days 00:00:01.029495513 +304 96 0 days 00:00:01.021878337 +304 97 0 days 00:00:00.417289244 +304 99 0 days 00:00:00.563362692 +304 100 0 days 00:00:00.864992511 +305 1 0 days 00:00:00.606398722 +305 3 0 days 00:00:01.187438593 +305 4 0 days 00:00:01.057873595 +305 7 0 days 00:00:00.423767384 +305 8 0 days 00:00:01.091477498 +305 10 0 days 00:00:01.094728940 +305 11 0 days 00:00:00.552208045 +305 13 0 days 00:00:00.490301265 +305 14 0 days 00:00:01.078009341 +305 15 0 days 00:00:01.088129260 +305 16 0 days 00:00:00.980990040 +305 18 0 days 00:00:00.575559127 +305 19 0 days 00:00:00.273111866 +305 22 0 days 00:00:00.604013629 +305 23 0 days 00:00:01.088049360 +305 24 0 days 00:00:00.435302836 +305 25 0 days 00:00:00.348152673 +305 26 0 days 00:00:01.184266597 +305 28 0 days 00:00:01.092216486 +305 29 0 days 00:00:01.096039738 +305 30 0 days 00:00:00.594059367 +305 32 0 days 00:00:00.573459896 +305 33 0 days 00:00:01.173534514 +305 34 0 days 00:00:01.076540977 +305 35 0 days 00:00:00.565434010 +305 37 0 days 00:00:01.072458350 +305 38 0 days 00:00:00.542428584 +305 39 0 days 00:00:00.568656688 +305 40 0 days 00:00:00.286872804 +305 41 0 days 00:00:00.619738235 +305 43 0 days 00:00:00.365172103 +305 44 0 days 00:00:00.923160454 +305 45 0 days 00:00:00.579128574 +305 46 0 days 00:00:00.719388587 +305 48 0 days 00:00:00.985364402 +305 51 0 days 00:00:00.585796765 +305 52 0 days 00:00:01.022238075 +305 54 0 days 00:00:01.067596833 +305 55 0 days 00:00:00.265459688 +305 57 0 days 00:00:00.352716588 +305 58 0 days 00:00:01.078932618 +305 59 0 days 00:00:00.679647655 +305 61 0 days 00:00:01.048515780 +305 62 0 days 00:00:00.296872050 +305 63 0 days 00:00:00.574538201 +305 64 0 days 00:00:01.066489894 +305 65 0 days 00:00:01.010694967 +305 69 0 days 00:00:00.444577917 +305 70 0 days 00:00:00.582496800 +305 73 0 days 00:00:00.636935964 +305 74 0 days 00:00:01.034436316 +305 75 0 days 00:00:00.340696408 +305 76 0 days 00:00:01.056392326 +305 79 0 days 00:00:00.951378103 +305 81 0 days 00:00:00.363235996 +305 82 0 days 00:00:00.406470721 +305 83 0 days 00:00:01.076120498 +305 84 0 days 00:00:00.450956634 +305 86 0 days 00:00:00.580268016 +305 88 0 days 00:00:01.073605227 +305 90 0 days 00:00:00.267566633 +305 91 0 days 00:00:01.048899120 +305 92 0 days 00:00:01.191262690 +305 93 0 days 00:00:01.175130852 +305 94 0 days 00:00:00.323738873 +305 97 0 days 00:00:00.922764980 +305 98 0 days 00:00:01.151928613 +305 99 0 days 00:00:01.079091804 +305 100 0 days 00:00:00.728017158 +306 1 0 days 00:00:00.306291228 +306 2 0 days 00:00:00.493937214 +306 3 0 days 00:00:00.159842184 +306 5 0 days 00:00:00.189890718 +306 6 0 days 00:00:00.493136202 +306 7 0 days 00:00:00.198751050 +306 8 0 days 00:00:00.535820811 +306 9 0 days 00:00:00.283249066 +306 11 0 days 00:00:00.285676831 +306 12 0 days 00:00:00.283220623 +306 13 0 days 00:00:00.160554791 +306 14 0 days 00:00:00.126369233 +306 15 0 days 00:00:00.513120860 +306 17 0 days 00:00:00.318813210 +306 19 0 days 00:00:00.507912101 +306 21 0 days 00:00:00.523722200 +306 22 0 days 00:00:00.524793649 +306 23 0 days 00:00:00.287268427 +306 25 0 days 00:00:00.384042705 +306 26 0 days 00:00:00.507394073 +306 28 0 days 00:00:00.528515705 +306 31 0 days 00:00:00.281426285 +306 32 0 days 00:00:00.469610622 +306 34 0 days 00:00:00.287451581 +306 36 0 days 00:00:00.291132651 +306 37 0 days 00:00:00.279714228 +306 38 0 days 00:00:00.312447325 +306 39 0 days 00:00:00.193995556 +306 40 0 days 00:00:00.170525933 +306 41 0 days 00:00:00.290154352 +306 42 0 days 00:00:00.287974302 +306 46 0 days 00:00:00.495024608 +306 47 0 days 00:00:00.503246016 +306 48 0 days 00:00:00.280805750 +306 50 0 days 00:00:00.514413995 +306 51 0 days 00:00:00.175303870 +306 53 0 days 00:00:00.514583672 +306 54 0 days 00:00:00.279257678 +306 57 0 days 00:00:00.442821720 +306 58 0 days 00:00:00.247462265 +306 59 0 days 00:00:00.497973350 +306 62 0 days 00:00:00.281013833 +306 67 0 days 00:00:00.162138795 +306 68 0 days 00:00:00.173869653 +306 69 0 days 00:00:00.137849633 +306 70 0 days 00:00:00.367344942 +306 72 0 days 00:00:00.258140932 +306 73 0 days 00:00:00.336535803 +306 74 0 days 00:00:00.272212578 +306 75 0 days 00:00:00.496128702 +306 76 0 days 00:00:00.520469387 +306 78 0 days 00:00:00.215217323 +306 79 0 days 00:00:00.495299962 +306 80 0 days 00:00:00.499344635 +306 82 0 days 00:00:00.434091408 +306 83 0 days 00:00:00.155250121 +306 84 0 days 00:00:00.279946025 +306 87 0 days 00:00:00.488102442 +306 88 0 days 00:00:00.391937485 +306 89 0 days 00:00:00.521690280 +306 90 0 days 00:00:00.513917781 +306 91 0 days 00:00:00.176337805 +306 92 0 days 00:00:00.516345655 +306 93 0 days 00:00:00.138240900 +306 95 0 days 00:00:00.144983650 +306 96 0 days 00:00:00.405602411 +306 97 0 days 00:00:00.536500944 +306 98 0 days 00:00:00.490653622 +306 100 0 days 00:00:00.153800761 +307 1 0 days 00:00:00.282758990 +307 2 0 days 00:00:00.584956353 +307 4 0 days 00:00:00.292491918 +307 5 0 days 00:00:00.478562364 +307 6 0 days 00:00:00.167232589 +307 7 0 days 00:00:00.544432824 +307 8 0 days 00:00:00.540552131 +307 9 0 days 00:00:00.545160390 +307 10 0 days 00:00:00.539195735 +307 11 0 days 00:00:00.164052014 +307 14 0 days 00:00:00.174539853 +307 16 0 days 00:00:00.304254387 +307 17 0 days 00:00:00.588849578 +307 18 0 days 00:00:00.279905515 +307 19 0 days 00:00:00.162115087 +307 21 0 days 00:00:00.288991541 +307 22 0 days 00:00:00.521442742 +307 23 0 days 00:00:00.291516529 +307 24 0 days 00:00:00.164208110 +307 27 0 days 00:00:00.171788974 +307 28 0 days 00:00:00.284043420 +307 29 0 days 00:00:00.284115538 +307 30 0 days 00:00:00.279240488 +307 31 0 days 00:00:00.537468135 +307 32 0 days 00:00:00.593505396 +307 33 0 days 00:00:00.535583181 +307 34 0 days 00:00:00.544360126 +307 35 0 days 00:00:00.545793347 +307 39 0 days 00:00:00.549028391 +307 40 0 days 00:00:00.526235893 +307 41 0 days 00:00:00.164358028 +307 42 0 days 00:00:00.287383535 +307 44 0 days 00:00:00.209857059 +307 45 0 days 00:00:00.482948480 +307 46 0 days 00:00:00.143474953 +307 47 0 days 00:00:00.504408662 +307 48 0 days 00:00:00.249454997 +307 50 0 days 00:00:00.567816123 +307 51 0 days 00:00:00.531126788 +307 53 0 days 00:00:00.529577481 +307 54 0 days 00:00:00.574258866 +307 56 0 days 00:00:00.320047064 +307 57 0 days 00:00:00.302377309 +307 58 0 days 00:00:00.250281291 +307 59 0 days 00:00:00.179634961 +307 60 0 days 00:00:00.364598752 +307 62 0 days 00:00:00.342087971 +307 64 0 days 00:00:00.179751247 +307 66 0 days 00:00:00.319268822 +307 68 0 days 00:00:00.342312791 +307 69 0 days 00:00:00.594639404 +307 71 0 days 00:00:00.589684822 +307 72 0 days 00:00:00.167926526 +307 73 0 days 00:00:00.572334154 +307 74 0 days 00:00:00.559264509 +307 76 0 days 00:00:00.575052575 +307 77 0 days 00:00:00.278638657 +307 78 0 days 00:00:00.287837176 +307 79 0 days 00:00:00.318893024 +307 80 0 days 00:00:00.308101364 +307 82 0 days 00:00:00.286705662 +307 84 0 days 00:00:00.201933700 +307 87 0 days 00:00:00.310389576 +307 88 0 days 00:00:00.183527720 +307 90 0 days 00:00:00.230940960 +307 91 0 days 00:00:00.541317842 +307 92 0 days 00:00:00.307345614 +307 93 0 days 00:00:00.316444616 +307 94 0 days 00:00:00.610375227 +307 96 0 days 00:00:00.558002353 +307 97 0 days 00:00:00.543137303 +307 98 0 days 00:00:00.560366985 +307 100 0 days 00:00:00.311394221 +308 1 0 days 00:00:01.358369398 +308 2 0 days 00:00:00.909645038 +308 3 0 days 00:00:00.716850166 +308 4 0 days 00:00:00.759176001 +308 5 0 days 00:00:01.343461761 +308 6 0 days 00:00:01.225758997 +308 7 0 days 00:00:00.681570905 +308 8 0 days 00:00:01.421142413 +308 9 0 days 00:00:00.521755892 +308 10 0 days 00:00:01.282118537 +308 11 0 days 00:00:01.297562146 +308 12 0 days 00:00:00.847408172 +308 13 0 days 00:00:01.364020707 +308 14 0 days 00:00:00.717670877 +308 15 0 days 00:00:01.375174190 +308 16 0 days 00:00:01.527502832 +308 17 0 days 00:00:00.616314040 +308 18 0 days 00:00:01.408601141 +308 19 0 days 00:00:01.411162723 +308 20 0 days 00:00:00.804699780 +308 21 0 days 00:00:00.735783080 +308 22 0 days 00:00:00.647659355 +308 23 0 days 00:00:01.191857826 +308 24 0 days 00:00:01.494778210 +308 25 0 days 00:00:00.705300288 +308 26 0 days 00:00:01.419672143 +308 27 0 days 00:00:01.505488138 +308 28 0 days 00:00:00.474693824 +308 29 0 days 00:00:01.371773936 +308 30 0 days 00:00:00.457784090 +308 31 0 days 00:00:00.443117495 +308 32 0 days 00:00:00.697796748 +308 33 0 days 00:00:00.838502745 +308 34 0 days 00:00:00.818421515 +308 35 0 days 00:00:00.407832340 +308 36 0 days 00:00:01.440449556 +308 37 0 days 00:00:01.556735886 +308 38 0 days 00:00:00.414939126 +308 39 0 days 00:00:00.628983513 +308 40 0 days 00:00:00.729999770 +308 41 0 days 00:00:00.593169993 +308 42 0 days 00:00:00.840934198 +308 43 0 days 00:00:00.735716805 +308 44 0 days 00:00:00.559915701 +308 45 0 days 00:00:00.660957043 +308 46 0 days 00:00:00.717205475 +308 48 0 days 00:00:00.810334320 +308 49 0 days 00:00:01.518671942 +308 50 0 days 00:00:00.723713529 +308 51 0 days 00:00:01.312048315 +308 52 0 days 00:00:00.760079350 +308 53 0 days 00:00:01.484257312 +308 54 0 days 00:00:00.763109517 +308 56 0 days 00:00:01.220826350 +308 57 0 days 00:00:01.345814583 +308 58 0 days 00:00:01.499224380 +308 59 0 days 00:00:00.405107848 +308 60 0 days 00:00:01.272157083 +308 61 0 days 00:00:01.023602765 +308 62 0 days 00:00:01.374979630 +308 63 0 days 00:00:00.479225190 +308 64 0 days 00:00:00.772016092 +308 65 0 days 00:00:00.381476700 +308 66 0 days 00:00:01.363586471 +308 67 0 days 00:00:00.826120137 +308 68 0 days 00:00:01.000908144 +308 69 0 days 00:00:00.703394682 +308 70 0 days 00:00:00.808447951 +308 71 0 days 00:00:00.806958126 +308 72 0 days 00:00:00.390335415 +308 73 0 days 00:00:00.789422361 +308 74 0 days 00:00:00.423795783 +308 75 0 days 00:00:01.315228706 +308 76 0 days 00:00:01.266481255 +308 77 0 days 00:00:01.050323007 +308 79 0 days 00:00:00.399213151 +308 80 0 days 00:00:00.496886840 +308 81 0 days 00:00:00.321769744 +308 82 0 days 00:00:00.557063311 +308 83 0 days 00:00:00.424557532 +308 84 0 days 00:00:00.608470160 +308 85 0 days 00:00:00.611235722 +308 86 0 days 00:00:00.561405858 +308 87 0 days 00:00:01.077033456 +308 88 0 days 00:00:00.976051671 +308 89 0 days 00:00:00.348391410 +308 90 0 days 00:00:00.358835220 +308 91 0 days 00:00:00.430873933 +308 92 0 days 00:00:00.570065955 +308 93 0 days 00:00:01.057257295 +308 94 0 days 00:00:00.566288821 +308 95 0 days 00:00:00.346875828 +308 96 0 days 00:00:01.030722710 +308 97 0 days 00:00:00.298798924 +308 98 0 days 00:00:00.285601408 +308 99 0 days 00:00:00.340668520 +308 100 0 days 00:00:00.973703305 +309 1 0 days 00:00:00.282897310 +309 2 0 days 00:00:00.473576091 +309 3 0 days 00:00:00.484476680 +309 4 0 days 00:00:00.493967080 +309 5 0 days 00:00:00.296548523 +309 6 0 days 00:00:00.404579467 +309 7 0 days 00:00:00.498909352 +309 8 0 days 00:00:00.331464988 +309 9 0 days 00:00:00.165598771 +309 10 0 days 00:00:00.165397014 +309 11 0 days 00:00:00.138715595 +309 12 0 days 00:00:00.511934712 +309 13 0 days 00:00:00.321180204 +309 14 0 days 00:00:00.272176862 +309 15 0 days 00:00:00.181505474 +309 17 0 days 00:00:00.520126631 +309 18 0 days 00:00:00.525701381 +309 20 0 days 00:00:00.288525677 +309 21 0 days 00:00:00.534648931 +309 22 0 days 00:00:00.284669238 +309 23 0 days 00:00:00.288429669 +309 24 0 days 00:00:00.243697800 +309 25 0 days 00:00:00.546576296 +309 26 0 days 00:00:00.279116218 +309 27 0 days 00:00:00.261955250 +309 28 0 days 00:00:00.291037474 +309 29 0 days 00:00:00.519569122 +309 30 0 days 00:00:00.179533776 +309 31 0 days 00:00:00.165623761 +309 32 0 days 00:00:00.270031421 +309 33 0 days 00:00:00.524041636 +309 34 0 days 00:00:00.190222766 +309 35 0 days 00:00:00.176219608 +309 36 0 days 00:00:00.493925144 +309 37 0 days 00:00:00.298955852 +309 38 0 days 00:00:00.159911140 +309 39 0 days 00:00:00.283542824 +309 40 0 days 00:00:00.167014823 +309 41 0 days 00:00:00.471416616 +309 42 0 days 00:00:00.289892042 +309 43 0 days 00:00:00.490515686 +309 44 0 days 00:00:00.219707571 +309 45 0 days 00:00:00.205723175 +309 46 0 days 00:00:00.322767305 +309 47 0 days 00:00:00.167991885 +309 48 0 days 00:00:00.534068537 +309 49 0 days 00:00:00.523502861 +309 50 0 days 00:00:00.170260841 +309 51 0 days 00:00:00.281204361 +309 52 0 days 00:00:00.281568521 +309 53 0 days 00:00:00.165960130 +309 54 0 days 00:00:00.484534970 +309 55 0 days 00:00:00.520343606 +309 57 0 days 00:00:00.548100268 +309 58 0 days 00:00:00.488068377 +309 59 0 days 00:00:00.519015276 +309 61 0 days 00:00:00.529400498 +309 63 0 days 00:00:00.281049010 +309 64 0 days 00:00:00.512935905 +309 66 0 days 00:00:00.340849831 +309 67 0 days 00:00:00.259472183 +309 68 0 days 00:00:00.520344396 +309 69 0 days 00:00:00.224462488 +309 70 0 days 00:00:00.537751468 +309 71 0 days 00:00:00.279178224 +309 72 0 days 00:00:00.292829330 +309 73 0 days 00:00:00.306641600 +309 75 0 days 00:00:00.524144670 +309 77 0 days 00:00:00.530804778 +309 78 0 days 00:00:00.197259087 +309 79 0 days 00:00:00.506400200 +309 80 0 days 00:00:00.494870340 +309 82 0 days 00:00:00.146955525 +309 83 0 days 00:00:00.232224580 +309 84 0 days 00:00:00.486406137 +309 85 0 days 00:00:00.209306369 +309 86 0 days 00:00:00.319717292 +309 87 0 days 00:00:00.508126120 +309 88 0 days 00:00:00.513449714 +309 89 0 days 00:00:00.307611185 +309 91 0 days 00:00:00.381716148 +309 92 0 days 00:00:00.361608888 +309 93 0 days 00:00:00.487050933 +309 94 0 days 00:00:00.166494844 +309 95 0 days 00:00:00.138910825 +309 96 0 days 00:00:00.275833431 +309 97 0 days 00:00:00.196328808 +309 98 0 days 00:00:00.215908541 +309 99 0 days 00:00:00.517363592 +309 100 0 days 00:00:00.293889030 +310 1 0 days 00:00:00.405295702 +310 2 0 days 00:00:01.203554236 +310 3 0 days 00:00:00.616869900 +310 4 0 days 00:00:00.610878222 +310 5 0 days 00:00:00.724176220 +310 6 0 days 00:00:00.814882752 +310 7 0 days 00:00:01.393741365 +310 8 0 days 00:00:00.690795480 +310 9 0 days 00:00:00.377901223 +310 10 0 days 00:00:01.393430826 +310 11 0 days 00:00:00.758007300 +310 12 0 days 00:00:01.295410546 +310 13 0 days 00:00:00.484336994 +310 14 0 days 00:00:01.238172013 +310 15 0 days 00:00:00.398426245 +310 16 0 days 00:00:00.425711014 +310 17 0 days 00:00:00.778297443 +310 18 0 days 00:00:01.178426190 +310 19 0 days 00:00:00.786181060 +310 20 0 days 00:00:00.423387662 +310 21 0 days 00:00:01.228819180 +310 22 0 days 00:00:00.654253565 +310 23 0 days 00:00:01.227964003 +310 24 0 days 00:00:01.275767460 +310 25 0 days 00:00:00.768627483 +310 26 0 days 00:00:00.405506087 +310 27 0 days 00:00:01.275790220 +310 28 0 days 00:00:01.294467523 +310 29 0 days 00:00:01.358177517 +310 30 0 days 00:00:01.300277133 +310 31 0 days 00:00:00.395142517 +310 32 0 days 00:00:01.212525445 +310 33 0 days 00:00:01.196710540 +310 34 0 days 00:00:01.399873008 +310 35 0 days 00:00:00.369460476 +310 36 0 days 00:00:00.660805135 +310 37 0 days 00:00:00.699254140 +310 38 0 days 00:00:00.592142220 +310 39 0 days 00:00:00.673681086 +310 40 0 days 00:00:00.786352564 +310 41 0 days 00:00:01.158379604 +310 42 0 days 00:00:00.611903917 +310 43 0 days 00:00:00.713149690 +310 44 0 days 00:00:00.388044145 +310 45 0 days 00:00:00.705034360 +310 46 0 days 00:00:01.064835785 +310 47 0 days 00:00:00.304233245 +310 48 0 days 00:00:01.378539635 +310 49 0 days 00:00:01.661709482 +310 50 0 days 00:00:00.999651833 +310 51 0 days 00:00:00.646013830 +310 52 0 days 00:00:00.440876050 +310 53 0 days 00:00:00.413652615 +310 54 0 days 00:00:00.804127920 +310 55 0 days 00:00:00.566494725 +310 56 0 days 00:00:00.710771052 +310 57 0 days 00:00:01.273504768 +310 58 0 days 00:00:00.645968080 +310 59 0 days 00:00:00.729520930 +310 60 0 days 00:00:01.122624180 +310 61 0 days 00:00:00.811574467 +310 62 0 days 00:00:00.513454455 +310 63 0 days 00:00:01.424483174 +310 64 0 days 00:00:00.383026528 +310 65 0 days 00:00:00.644515596 +310 66 0 days 00:00:00.569686108 +310 67 0 days 00:00:01.091319580 +310 68 0 days 00:00:00.771352510 +310 69 0 days 00:00:00.699885375 +310 70 0 days 00:00:01.453958312 +310 71 0 days 00:00:01.323914837 +310 72 0 days 00:00:01.634609357 +310 73 0 days 00:00:01.267429820 +310 74 0 days 00:00:00.742304705 +310 75 0 days 00:00:01.217164800 +310 76 0 days 00:00:01.264823540 +310 77 0 days 00:00:00.464314544 +310 78 0 days 00:00:00.367037850 +310 79 0 days 00:00:00.412762433 +310 80 0 days 00:00:00.820023323 +310 81 0 days 00:00:00.274035791 +310 82 0 days 00:00:00.496147254 +310 83 0 days 00:00:00.717299847 +310 84 0 days 00:00:01.138614880 +310 85 0 days 00:00:00.598084056 +310 86 0 days 00:00:00.441442234 +310 87 0 days 00:00:01.839254730 +310 88 0 days 00:00:01.208064690 +310 89 0 days 00:00:01.782996920 +310 90 0 days 00:00:00.744271220 +310 91 0 days 00:00:01.507310324 +310 92 0 days 00:00:01.170424065 +310 93 0 days 00:00:00.431662803 +310 94 0 days 00:00:00.377522044 +310 95 0 days 00:00:00.395570590 +310 96 0 days 00:00:00.639396906 +310 97 0 days 00:00:01.038533916 +310 98 0 days 00:00:01.051439405 +310 99 0 days 00:00:01.188524920 +310 100 0 days 00:00:00.395158142 +311 1 0 days 00:00:00.893912612 +311 2 0 days 00:00:00.602818384 +311 3 0 days 00:00:00.540317845 +311 4 0 days 00:00:00.623978048 +311 5 0 days 00:00:00.194231605 +311 6 0 days 00:00:00.397791703 +311 7 0 days 00:00:00.306785945 +311 8 0 days 00:00:00.239790048 +311 9 0 days 00:00:00.673312915 +311 10 0 days 00:00:00.202455198 +311 11 0 days 00:00:00.351528087 +311 12 0 days 00:00:00.221437026 +311 13 0 days 00:00:00.332268085 +311 14 0 days 00:00:00.640224893 +311 15 0 days 00:00:00.644979313 +311 16 0 days 00:00:00.420495032 +311 17 0 days 00:00:00.227799290 +311 18 0 days 00:00:00.379776610 +311 19 0 days 00:00:00.196285856 +311 20 0 days 00:00:00.714630000 +311 21 0 days 00:00:00.412667945 +311 22 0 days 00:00:00.212101150 +311 23 0 days 00:00:00.291887960 +311 24 0 days 00:00:00.629579004 +311 25 0 days 00:00:00.760359430 +311 27 0 days 00:00:00.286236576 +311 28 0 days 00:00:00.295935875 +311 29 0 days 00:00:00.192335502 +311 30 0 days 00:00:00.191947200 +311 31 0 days 00:00:00.364473050 +311 32 0 days 00:00:00.354468955 +311 33 0 days 00:00:00.336699490 +311 34 0 days 00:00:00.407859380 +311 35 0 days 00:00:00.207161672 +311 36 0 days 00:00:00.205601618 +311 37 0 days 00:00:00.682146050 +311 38 0 days 00:00:00.751965115 +311 39 0 days 00:00:00.335024500 +311 40 0 days 00:00:00.225364464 +311 41 0 days 00:00:00.361825592 +311 42 0 days 00:00:00.327426040 +311 43 0 days 00:00:00.380803247 +311 44 0 days 00:00:00.211050726 +311 45 0 days 00:00:00.500277440 +311 46 0 days 00:00:00.270784660 +311 47 0 days 00:00:00.276531215 +311 48 0 days 00:00:00.363560880 +311 49 0 days 00:00:00.505652330 +311 50 0 days 00:00:00.661616416 +311 51 0 days 00:00:00.544349645 +311 52 0 days 00:00:00.557486745 +311 53 0 days 00:00:00.637001683 +311 54 0 days 00:00:00.657641524 +311 55 0 days 00:00:00.616211544 +311 56 0 days 00:00:00.212069428 +311 57 0 days 00:00:00.675057416 +311 58 0 days 00:00:00.814331100 +311 59 0 days 00:00:00.659365480 +311 60 0 days 00:00:00.296593408 +311 61 0 days 00:00:00.349654457 +311 62 0 days 00:00:00.185430071 +311 63 0 days 00:00:00.565263892 +311 64 0 days 00:00:00.639390504 +311 65 0 days 00:00:00.295817790 +311 66 0 days 00:00:00.621490255 +311 67 0 days 00:00:00.651748291 +311 68 0 days 00:00:00.536529115 +311 69 0 days 00:00:00.601131476 +311 70 0 days 00:00:00.324895494 +311 71 0 days 00:00:00.349900116 +311 72 0 days 00:00:00.561428445 +311 73 0 days 00:00:00.236397180 +311 74 0 days 00:00:00.203379080 +311 75 0 days 00:00:00.195548728 +311 76 0 days 00:00:00.218204115 +311 77 0 days 00:00:00.211566746 +311 78 0 days 00:00:00.403684665 +311 79 0 days 00:00:00.395282247 +311 80 0 days 00:00:00.817438954 +311 81 0 days 00:00:00.315457500 +311 82 0 days 00:00:00.463537577 +311 83 0 days 00:00:00.416792175 +311 84 0 days 00:00:00.656593188 +311 85 0 days 00:00:00.282855771 +311 86 0 days 00:00:00.249473982 +311 87 0 days 00:00:00.465534908 +311 88 0 days 00:00:00.683754723 +311 89 0 days 00:00:00.177774353 +311 90 0 days 00:00:00.184016592 +311 91 0 days 00:00:00.622685465 +311 92 0 days 00:00:00.278716406 +311 93 0 days 00:00:00.643278012 +311 94 0 days 00:00:00.389038870 +311 95 0 days 00:00:00.373723124 +311 96 0 days 00:00:00.194437822 +311 97 0 days 00:00:00.202257562 +311 98 0 days 00:00:00.357069840 +311 99 0 days 00:00:00.205696967 +311 100 0 days 00:00:00.191756611 +312 1 0 days 00:00:00.132750268 +312 2 0 days 00:00:00.140476512 +312 3 0 days 00:00:00.120599013 +312 4 0 days 00:00:00.192956126 +312 5 0 days 00:00:00.172898220 +312 6 0 days 00:00:00.120524448 +312 7 0 days 00:00:00.132575502 +312 8 0 days 00:00:00.146354342 +312 9 0 days 00:00:00.137103848 +312 10 0 days 00:00:00.129990315 +312 11 0 days 00:00:00.173644325 +312 12 0 days 00:00:00.153370236 +312 13 0 days 00:00:00.119021910 +312 14 0 days 00:00:00.131878775 +312 15 0 days 00:00:00.132124815 +312 16 0 days 00:00:00.193249537 +312 17 0 days 00:00:00.131277115 +312 18 0 days 00:00:00.150558548 +312 19 0 days 00:00:00.129778295 +312 20 0 days 00:00:00.162847435 +312 21 0 days 00:00:00.194970913 +312 22 0 days 00:00:00.134090835 +312 23 0 days 00:00:00.201094253 +312 24 0 days 00:00:00.171347065 +312 25 0 days 00:00:00.130590596 +312 26 0 days 00:00:00.153750435 +312 27 0 days 00:00:00.167190792 +312 28 0 days 00:00:00.181985884 +312 29 0 days 00:00:00.121007054 +312 30 0 days 00:00:00.114561825 +312 31 0 days 00:00:00.130293852 +312 32 0 days 00:00:00.112246220 +312 33 0 days 00:00:00.111750380 +312 34 0 days 00:00:00.131909220 +312 35 0 days 00:00:00.134223216 +312 36 0 days 00:00:00.177915812 +312 37 0 days 00:00:00.114326960 +312 38 0 days 00:00:00.133515445 +312 39 0 days 00:00:00.115852176 +312 40 0 days 00:00:00.195823368 +312 41 0 days 00:00:00.203104351 +312 42 0 days 00:00:00.139392722 +312 43 0 days 00:00:00.137705508 +312 44 0 days 00:00:00.188000668 +312 45 0 days 00:00:00.192606365 +312 46 0 days 00:00:00.177517585 +312 47 0 days 00:00:00.206442605 +312 48 0 days 00:00:00.175710465 +312 49 0 days 00:00:00.179873515 +312 50 0 days 00:00:00.175944105 +312 51 0 days 00:00:00.132940620 +312 52 0 days 00:00:00.181245268 +312 53 0 days 00:00:00.165872976 +312 54 0 days 00:00:00.121578100 +312 55 0 days 00:00:00.123825568 +312 56 0 days 00:00:00.137892584 +312 57 0 days 00:00:00.178254235 +312 58 0 days 00:00:00.133493735 +312 59 0 days 00:00:00.152847546 +312 60 0 days 00:00:00.198661320 +312 61 0 days 00:00:00.125010106 +312 62 0 days 00:00:00.187333500 +312 63 0 days 00:00:00.150541745 +312 64 0 days 00:00:00.179252120 +312 65 0 days 00:00:00.178841780 +312 66 0 days 00:00:00.195853750 +312 67 0 days 00:00:00.189768620 +312 68 0 days 00:00:00.118784706 +312 69 0 days 00:00:00.190777040 +312 70 0 days 00:00:00.123137528 +312 71 0 days 00:00:00.154711976 +312 72 0 days 00:00:00.178091995 +312 73 0 days 00:00:00.132909950 +312 74 0 days 00:00:00.132348080 +312 75 0 days 00:00:00.194366626 +312 76 0 days 00:00:00.116197104 +312 77 0 days 00:00:00.139521255 +312 78 0 days 00:00:00.174651750 +312 79 0 days 00:00:00.132493425 +312 80 0 days 00:00:00.108796585 +312 81 0 days 00:00:00.114199655 +312 82 0 days 00:00:00.152332331 +312 83 0 days 00:00:00.124169410 +312 84 0 days 00:00:00.190733933 +312 85 0 days 00:00:00.209012263 +312 86 0 days 00:00:00.185561946 +312 87 0 days 00:00:00.115839960 +312 88 0 days 00:00:00.179249684 +312 89 0 days 00:00:00.133705145 +312 90 0 days 00:00:00.158229182 +312 91 0 days 00:00:00.196243894 +312 92 0 days 00:00:00.143329630 +312 93 0 days 00:00:00.216534213 +312 94 0 days 00:00:00.167253195 +312 95 0 days 00:00:00.184967525 +312 96 0 days 00:00:00.191148090 +312 97 0 days 00:00:00.197442616 +312 98 0 days 00:00:00.174196695 +312 99 0 days 00:00:00.141973948 +312 100 0 days 00:00:00.132909880 +313 1 0 days 00:00:00.134920930 +313 2 0 days 00:00:00.116277215 +313 3 0 days 00:00:00.134268956 +313 4 0 days 00:00:00.131301505 +313 5 0 days 00:00:00.123239100 +313 6 0 days 00:00:00.173490655 +313 7 0 days 00:00:00.122954993 +313 8 0 days 00:00:00.172941150 +313 9 0 days 00:00:00.203263005 +313 10 0 days 00:00:00.140565936 +313 11 0 days 00:00:00.229234880 +313 12 0 days 00:00:00.130102855 +313 13 0 days 00:00:00.130872283 +313 14 0 days 00:00:00.132660802 +313 15 0 days 00:00:00.134968820 +313 16 0 days 00:00:00.136393796 +313 17 0 days 00:00:00.146003682 +313 18 0 days 00:00:00.142607905 +313 19 0 days 00:00:00.208045552 +313 20 0 days 00:00:00.116995770 +313 21 0 days 00:00:00.156609475 +313 22 0 days 00:00:00.146238035 +313 23 0 days 00:00:00.178527860 +313 24 0 days 00:00:00.164311537 +313 25 0 days 00:00:00.190272615 +313 26 0 days 00:00:00.177225660 +313 27 0 days 00:00:00.175104146 +313 28 0 days 00:00:00.129877180 +313 29 0 days 00:00:00.143563788 +313 30 0 days 00:00:00.132278967 +313 31 0 days 00:00:00.163171446 +313 32 0 days 00:00:00.200757105 +313 33 0 days 00:00:00.116791635 +313 34 0 days 00:00:00.130740896 +313 35 0 days 00:00:00.129635612 +313 36 0 days 00:00:00.126902831 +313 37 0 days 00:00:00.197821320 +313 38 0 days 00:00:00.181900005 +313 39 0 days 00:00:00.184436703 +313 40 0 days 00:00:00.189917324 +313 41 0 days 00:00:00.118543980 +313 42 0 days 00:00:00.123907765 +313 43 0 days 00:00:00.184953550 +313 44 0 days 00:00:00.163833213 +313 45 0 days 00:00:00.111984630 +313 46 0 days 00:00:00.119959640 +313 47 0 days 00:00:00.154294885 +313 48 0 days 00:00:00.209088680 +313 49 0 days 00:00:00.136895816 +313 50 0 days 00:00:00.100664420 +313 51 0 days 00:00:00.143348795 +313 52 0 days 00:00:00.127669856 +313 53 0 days 00:00:00.128138110 +313 54 0 days 00:00:00.113189580 +313 55 0 days 00:00:00.140694580 +313 56 0 days 00:00:00.176349470 +313 57 0 days 00:00:00.124184715 +313 58 0 days 00:00:00.167463475 +313 59 0 days 00:00:00.145454402 +313 60 0 days 00:00:00.191903410 +313 62 0 days 00:00:00.115740786 +313 63 0 days 00:00:00.218713755 +313 64 0 days 00:00:00.149122396 +313 65 0 days 00:00:00.191206226 +313 66 0 days 00:00:00.118711808 +313 67 0 days 00:00:00.186008686 +313 68 0 days 00:00:00.131408835 +313 69 0 days 00:00:00.184286644 +313 70 0 days 00:00:00.151129925 +313 71 0 days 00:00:00.134361410 +313 72 0 days 00:00:00.139150116 +313 73 0 days 00:00:00.109686950 +313 74 0 days 00:00:00.116341036 +313 75 0 days 00:00:00.153791746 +313 76 0 days 00:00:00.133376215 +313 77 0 days 00:00:00.149642150 +313 78 0 days 00:00:00.147751172 +313 79 0 days 00:00:00.174420635 +313 80 0 days 00:00:00.184421216 +313 81 0 days 00:00:00.132619050 +313 82 0 days 00:00:00.190907263 +313 83 0 days 00:00:00.207484053 +313 84 0 days 00:00:00.138638860 +313 85 0 days 00:00:00.137114652 +313 86 0 days 00:00:00.197865292 +313 87 0 days 00:00:00.117915320 +313 88 0 days 00:00:00.129043084 +313 89 0 days 00:00:00.118551800 +313 90 0 days 00:00:00.113375810 +313 91 0 days 00:00:00.111714090 +313 92 0 days 00:00:00.153152398 +313 93 0 days 00:00:00.130832694 +313 94 0 days 00:00:00.133170950 +313 95 0 days 00:00:00.129929335 +313 96 0 days 00:00:00.130607645 +313 97 0 days 00:00:00.117462604 +313 98 0 days 00:00:00.175616585 +313 99 0 days 00:00:00.173549080 +313 100 0 days 00:00:00.175025830 +314 1 0 days 00:00:00.078322293 +314 2 0 days 00:00:00.071037050 +314 3 0 days 00:00:00.099752133 +314 4 0 days 00:00:00.068703627 +314 5 0 days 00:00:00.097634985 +314 6 0 days 00:00:00.104516896 +314 7 0 days 00:00:00.105048816 +314 8 0 days 00:00:00.104731883 +314 9 0 days 00:00:00.074144375 +314 10 0 days 00:00:00.082171690 +314 11 0 days 00:00:00.105207670 +314 12 0 days 00:00:00.081616055 +314 13 0 days 00:00:00.081491366 +314 14 0 days 00:00:00.107263416 +314 15 0 days 00:00:00.091404416 +314 16 0 days 00:00:00.100965492 +314 17 0 days 00:00:00.104618843 +314 18 0 days 00:00:00.102620635 +314 19 0 days 00:00:00.101111584 +314 20 0 days 00:00:00.097397280 +314 21 0 days 00:00:00.107149608 +314 22 0 days 00:00:00.081289227 +314 23 0 days 00:00:00.096655950 +314 24 0 days 00:00:00.077927076 +314 25 0 days 00:00:00.068987886 +314 26 0 days 00:00:00.082193137 +314 27 0 days 00:00:00.072692620 +314 28 0 days 00:00:00.078601540 +314 29 0 days 00:00:00.097231725 +314 30 0 days 00:00:00.077866457 +314 31 0 days 00:00:00.101950484 +314 32 0 days 00:00:00.067122782 +314 33 0 days 00:00:00.098638820 +314 34 0 days 00:00:00.071581468 +314 35 0 days 00:00:00.099914660 +314 36 0 days 00:00:00.078167533 +314 37 0 days 00:00:00.066317946 +314 38 0 days 00:00:00.072541496 +314 39 0 days 00:00:00.101818876 +314 40 0 days 00:00:00.105018948 +314 41 0 days 00:00:00.084391372 +314 42 0 days 00:00:00.072156649 +314 43 0 days 00:00:00.105481000 +314 44 0 days 00:00:00.078064696 +314 45 0 days 00:00:00.080542890 +314 46 0 days 00:00:00.107967782 +314 47 0 days 00:00:00.062227815 +314 48 0 days 00:00:00.091206035 +314 49 0 days 00:00:00.062892935 +314 50 0 days 00:00:00.079903882 +314 51 0 days 00:00:00.097104445 +314 52 0 days 00:00:00.082864275 +314 53 0 days 00:00:00.107237302 +314 54 0 days 00:00:00.073769685 +314 55 0 days 00:00:00.070985835 +314 56 0 days 00:00:00.065364364 +314 57 0 days 00:00:00.102771264 +314 58 0 days 00:00:00.071820548 +314 59 0 days 00:00:00.081154627 +314 60 0 days 00:00:00.066348263 +314 61 0 days 00:00:00.083896416 +314 62 0 days 00:00:00.104268766 +314 63 0 days 00:00:00.079412370 +314 64 0 days 00:00:00.109205254 +314 65 0 days 00:00:00.104336730 +314 66 0 days 00:00:00.075015970 +314 67 0 days 00:00:00.111532852 +314 68 0 days 00:00:00.073427142 +314 69 0 days 00:00:00.069786990 +314 70 0 days 00:00:00.079775887 +314 71 0 days 00:00:00.067563405 +314 72 0 days 00:00:00.071440515 +314 73 0 days 00:00:00.086466505 +314 74 0 days 00:00:00.104512410 +314 75 0 days 00:00:00.066866236 +314 76 0 days 00:00:00.106771933 +314 77 0 days 00:00:00.075985703 +314 78 0 days 00:00:00.078327000 +314 79 0 days 00:00:00.105647246 +314 80 0 days 00:00:00.063502530 +314 81 0 days 00:00:00.104300040 +314 82 0 days 00:00:00.106610757 +314 83 0 days 00:00:00.080881705 +314 84 0 days 00:00:00.096049950 +314 85 0 days 00:00:00.098764285 +314 86 0 days 00:00:00.065647156 +314 87 0 days 00:00:00.073223695 +314 88 0 days 00:00:00.072601680 +314 89 0 days 00:00:00.072397225 +314 90 0 days 00:00:00.080278310 +314 91 0 days 00:00:00.068664726 +314 92 0 days 00:00:00.067855500 +314 93 0 days 00:00:00.079287683 +314 94 0 days 00:00:00.072959135 +314 95 0 days 00:00:00.072321122 +314 96 0 days 00:00:00.079767460 +314 97 0 days 00:00:00.079505985 +314 98 0 days 00:00:00.092779396 +314 99 0 days 00:00:00.103654670 +314 100 0 days 00:00:00.075956644 +315 1 0 days 00:00:00.063687450 +315 2 0 days 00:00:00.074886195 +315 3 0 days 00:00:00.069747922 +315 4 0 days 00:00:00.108448926 +315 5 0 days 00:00:00.076662596 +315 6 0 days 00:00:00.074595735 +315 7 0 days 00:00:00.113665566 +315 8 0 days 00:00:00.075721160 +315 9 0 days 00:00:00.080588430 +315 10 0 days 00:00:00.070940115 +315 11 0 days 00:00:00.070335990 +315 12 0 days 00:00:00.083359513 +315 13 0 days 00:00:00.084411842 +315 14 0 days 00:00:00.074202680 +315 15 0 days 00:00:00.070724374 +315 16 0 days 00:00:00.081030740 +315 17 0 days 00:00:00.099431190 +315 18 0 days 00:00:00.074548760 +315 19 0 days 00:00:00.118804440 +315 20 0 days 00:00:00.102221575 +315 21 0 days 00:00:00.067955176 +315 22 0 days 00:00:00.100020995 +315 23 0 days 00:00:00.117394304 +315 24 0 days 00:00:00.082998912 +315 25 0 days 00:00:00.124381185 +315 27 0 days 00:00:00.071400005 +315 28 0 days 00:00:00.079584830 +315 29 0 days 00:00:00.080891174 +315 30 0 days 00:00:00.085461682 +315 31 0 days 00:00:00.099614330 +315 32 0 days 00:00:00.067626413 +315 33 0 days 00:00:00.068291603 +315 34 0 days 00:00:00.073325605 +315 35 0 days 00:00:00.100927190 +315 36 0 days 00:00:00.067990545 +315 37 0 days 00:00:00.089560203 +315 38 0 days 00:00:00.072949798 +315 39 0 days 00:00:00.068263550 +315 40 0 days 00:00:00.083872791 +315 41 0 days 00:00:00.116963012 +315 42 0 days 00:00:00.087140363 +315 44 0 days 00:00:00.074691326 +315 45 0 days 00:00:00.106213352 +315 46 0 days 00:00:00.084485411 +315 47 0 days 00:00:00.075600555 +315 48 0 days 00:00:00.073515877 +315 49 0 days 00:00:00.073531962 +315 50 0 days 00:00:00.084977018 +315 51 0 days 00:00:00.073944470 +315 52 0 days 00:00:00.064757630 +315 53 0 days 00:00:00.099441685 +315 54 0 days 00:00:00.083508331 +315 55 0 days 00:00:00.079979834 +315 56 0 days 00:00:00.070859420 +315 57 0 days 00:00:00.108356311 +315 58 0 days 00:00:00.095048155 +315 59 0 days 00:00:00.082817782 +315 60 0 days 00:00:00.075555776 +315 61 0 days 00:00:00.074852140 +315 62 0 days 00:00:00.074050100 +315 63 0 days 00:00:00.084056082 +315 64 0 days 00:00:00.103991812 +315 65 0 days 00:00:00.104405260 +315 66 0 days 00:00:00.063036060 +315 67 0 days 00:00:00.075495140 +315 68 0 days 00:00:00.105120712 +315 69 0 days 00:00:00.079956840 +315 70 0 days 00:00:00.101211795 +315 71 0 days 00:00:00.065815332 +315 72 0 days 00:00:00.106448368 +315 73 0 days 00:00:00.081148641 +315 74 0 days 00:00:00.092237049 +315 75 0 days 00:00:00.065281825 +315 76 0 days 00:00:00.061525445 +315 77 0 days 00:00:00.110023390 +315 78 0 days 00:00:00.070226137 +315 79 0 days 00:00:00.063585225 +315 80 0 days 00:00:00.081519917 +315 81 0 days 00:00:00.072702224 +315 82 0 days 00:00:00.092702773 +315 83 0 days 00:00:00.100448900 +315 84 0 days 00:00:00.066904352 +315 85 0 days 00:00:00.107318786 +315 86 0 days 00:00:00.084970036 +315 87 0 days 00:00:00.100515615 +315 88 0 days 00:00:00.108606317 +315 89 0 days 00:00:00.065557610 +315 90 0 days 00:00:00.099272175 +315 91 0 days 00:00:00.082567565 +315 92 0 days 00:00:00.073229855 +315 93 0 days 00:00:00.065060250 +315 94 0 days 00:00:00.110575605 +315 95 0 days 00:00:00.085575548 +315 96 0 days 00:00:00.074209360 +315 97 0 days 00:00:00.111390928 +315 98 0 days 00:00:00.082775402 +315 99 0 days 00:00:00.065651864 +315 100 0 days 00:00:00.071462617 +316 1 0 days 00:00:00.121962420 +316 2 0 days 00:00:00.171247010 +316 3 0 days 00:00:00.179638112 +316 4 0 days 00:00:00.127796872 +316 5 0 days 00:00:00.126926448 +316 6 0 days 00:00:00.150398940 +316 7 0 days 00:00:00.132827265 +316 8 0 days 00:00:00.186252430 +316 9 0 days 00:00:00.187250783 +316 10 0 days 00:00:00.173030765 +316 11 0 days 00:00:00.120575630 +316 12 0 days 00:00:00.183099228 +316 13 0 days 00:00:00.144957793 +316 14 0 days 00:00:00.132287740 +316 15 0 days 00:00:00.142044248 +316 16 0 days 00:00:00.108693230 +316 17 0 days 00:00:00.209408264 +316 18 0 days 00:00:00.118769146 +316 19 0 days 00:00:00.129749865 +316 20 0 days 00:00:00.141121643 +316 21 0 days 00:00:00.110949800 +316 22 0 days 00:00:00.137228004 +316 23 0 days 00:00:00.131530050 +316 24 0 days 00:00:00.115253544 +316 25 0 days 00:00:00.127379280 +316 26 0 days 00:00:00.128878174 +316 27 0 days 00:00:00.140383795 +316 28 0 days 00:00:00.168270815 +316 29 0 days 00:00:00.107965400 +316 30 0 days 00:00:00.129442500 +316 31 0 days 00:00:00.180212765 +316 32 0 days 00:00:00.173175485 +316 33 0 days 00:00:00.138461833 +316 34 0 days 00:00:00.172717180 +316 35 0 days 00:00:00.149657246 +316 36 0 days 00:00:00.119541296 +316 37 0 days 00:00:00.140979155 +316 38 0 days 00:00:00.212298575 +316 39 0 days 00:00:00.193527510 +316 40 0 days 00:00:00.110469625 +316 41 0 days 00:00:00.173387340 +316 42 0 days 00:00:00.112196810 +316 43 0 days 00:00:00.128697815 +316 44 0 days 00:00:00.137969110 +316 45 0 days 00:00:00.173207925 +316 46 0 days 00:00:00.147347491 +316 47 0 days 00:00:00.136454524 +316 48 0 days 00:00:00.156919277 +316 49 0 days 00:00:00.120012204 +316 50 0 days 00:00:00.199998723 +316 51 0 days 00:00:00.173176155 +316 52 0 days 00:00:00.143609992 +316 53 0 days 00:00:00.115316252 +316 54 0 days 00:00:00.133210750 +316 55 0 days 00:00:00.140294396 +316 56 0 days 00:00:00.135525225 +316 57 0 days 00:00:00.146269425 +316 58 0 days 00:00:00.108471180 +316 59 0 days 00:00:00.172055830 +316 60 0 days 00:00:00.127228206 +316 61 0 days 00:00:00.115158500 +316 62 0 days 00:00:00.162612660 +316 63 0 days 00:00:00.121188938 +316 64 0 days 00:00:00.131692755 +316 65 0 days 00:00:00.138301548 +316 66 0 days 00:00:00.108561490 +316 67 0 days 00:00:00.200862684 +316 68 0 days 00:00:00.127605568 +316 69 0 days 00:00:00.118837888 +316 70 0 days 00:00:00.113958916 +316 71 0 days 00:00:00.198405180 +316 72 0 days 00:00:00.130046685 +316 73 0 days 00:00:00.112862710 +316 74 0 days 00:00:00.132009830 +316 75 0 days 00:00:00.129921802 +316 76 0 days 00:00:00.182897248 +316 77 0 days 00:00:00.144984855 +316 78 0 days 00:00:00.120968280 +316 79 0 days 00:00:00.143473406 +316 80 0 days 00:00:00.141977553 +316 81 0 days 00:00:00.136401468 +316 82 0 days 00:00:00.155607027 +316 83 0 days 00:00:00.176926130 +316 84 0 days 00:00:00.151319540 +316 85 0 days 00:00:00.147630785 +316 86 0 days 00:00:00.159411385 +316 87 0 days 00:00:00.123707322 +316 88 0 days 00:00:00.142640805 +316 89 0 days 00:00:00.176730953 +316 90 0 days 00:00:00.170675140 +316 91 0 days 00:00:00.172219545 +316 92 0 days 00:00:00.144527440 +316 93 0 days 00:00:00.134569515 +316 94 0 days 00:00:00.124621150 +316 95 0 days 00:00:00.112046920 +316 96 0 days 00:00:00.114055696 +316 97 0 days 00:00:00.148931922 +316 98 0 days 00:00:00.126196350 +316 99 0 days 00:00:00.130852140 +316 100 0 days 00:00:00.226493908 +317 1 0 days 00:00:00.084369630 +317 2 0 days 00:00:00.098193695 +317 3 0 days 00:00:00.061973105 +317 4 0 days 00:00:00.098159805 +317 5 0 days 00:00:00.080315917 +317 6 0 days 00:00:00.070285214 +317 7 0 days 00:00:00.098159540 +317 8 0 days 00:00:00.073754820 +317 9 0 days 00:00:00.070142760 +317 10 0 days 00:00:00.099365050 +317 11 0 days 00:00:00.072446395 +317 12 0 days 00:00:00.064707175 +317 13 0 days 00:00:00.072995760 +317 14 0 days 00:00:00.096376490 +317 15 0 days 00:00:00.067261670 +317 16 0 days 00:00:00.066842385 +317 17 0 days 00:00:00.072484316 +317 18 0 days 00:00:00.112763640 +317 19 0 days 00:00:00.084410185 +317 20 0 days 00:00:00.082827110 +317 21 0 days 00:00:00.070260040 +317 22 0 days 00:00:00.069132263 +317 23 0 days 00:00:00.072128520 +317 24 0 days 00:00:00.073258595 +317 25 0 days 00:00:00.064397255 +317 26 0 days 00:00:00.099243275 +317 27 0 days 00:00:00.073902970 +317 28 0 days 00:00:00.062970110 +317 29 0 days 00:00:00.100392775 +317 30 0 days 00:00:00.061465225 +317 31 0 days 00:00:00.102003635 +317 32 0 days 00:00:00.079850545 +317 33 0 days 00:00:00.120391605 +317 34 0 days 00:00:00.064720895 +317 35 0 days 00:00:00.073878695 +317 36 0 days 00:00:00.072683450 +317 37 0 days 00:00:00.073105765 +317 38 0 days 00:00:00.068890223 +317 39 0 days 00:00:00.115080695 +317 40 0 days 00:00:00.127734240 +317 41 0 days 00:00:00.084173164 +317 42 0 days 00:00:00.073397905 +317 43 0 days 00:00:00.081575095 +317 44 0 days 00:00:00.089712454 +317 45 0 days 00:00:00.080306530 +317 46 0 days 00:00:00.093518730 +317 47 0 days 00:00:00.098568255 +317 48 0 days 00:00:00.064276005 +317 49 0 days 00:00:00.069223646 +317 50 0 days 00:00:00.072684130 +317 51 0 days 00:00:00.072524068 +317 52 0 days 00:00:00.105655663 +317 53 0 days 00:00:00.061445135 +317 54 0 days 00:00:00.066722610 +317 55 0 days 00:00:00.073411850 +317 56 0 days 00:00:00.061804810 +317 57 0 days 00:00:00.099773785 +317 58 0 days 00:00:00.061861675 +317 59 0 days 00:00:00.064027655 +317 60 0 days 00:00:00.072683468 +317 61 0 days 00:00:00.067981426 +317 62 0 days 00:00:00.073815530 +317 63 0 days 00:00:00.098371495 +317 64 0 days 00:00:00.097175305 +317 65 0 days 00:00:00.072709265 +317 66 0 days 00:00:00.063737035 +317 67 0 days 00:00:00.097013830 +317 68 0 days 00:00:00.097806310 +317 69 0 days 00:00:00.101689250 +317 70 0 days 00:00:00.079751232 +317 71 0 days 00:00:00.063938475 +317 72 0 days 00:00:00.072408131 +317 73 0 days 00:00:00.062939330 +317 74 0 days 00:00:00.078729720 +317 75 0 days 00:00:00.067977580 +317 76 0 days 00:00:00.076554345 +317 77 0 days 00:00:00.077093350 +317 78 0 days 00:00:00.098148860 +317 79 0 days 00:00:00.076895972 +317 80 0 days 00:00:00.097953695 +317 81 0 days 00:00:00.111085465 +317 82 0 days 00:00:00.070148160 +317 83 0 days 00:00:00.067361190 +317 84 0 days 00:00:00.081889625 +317 85 0 days 00:00:00.078062684 +317 86 0 days 00:00:00.098764990 +317 87 0 days 00:00:00.104374876 +317 88 0 days 00:00:00.072454635 +317 89 0 days 00:00:00.098617075 +317 90 0 days 00:00:00.072896625 +317 91 0 days 00:00:00.073778815 +317 92 0 days 00:00:00.109626620 +317 93 0 days 00:00:00.067965022 +317 94 0 days 00:00:00.096502660 +317 95 0 days 00:00:00.098129665 +317 96 0 days 00:00:00.094409315 +317 97 0 days 00:00:00.072980345 +317 98 0 days 00:00:00.097621175 +317 99 0 days 00:00:00.076691428 +317 100 0 days 00:00:00.101105055 +318 1 0 days 00:00:00.358781768 +318 2 0 days 00:00:00.496604845 +318 3 0 days 00:00:00.820999860 +318 4 0 days 00:00:00.827050180 +318 5 0 days 00:00:00.331181880 +318 6 0 days 00:00:00.295725184 +318 7 0 days 00:00:00.312400520 +318 8 0 days 00:00:00.331483550 +318 9 0 days 00:00:00.857244730 +318 10 0 days 00:00:00.492882920 +318 11 0 days 00:00:00.970525635 +318 12 0 days 00:00:00.555574385 +318 13 0 days 00:00:00.457060880 +318 14 0 days 00:00:00.498307240 +318 15 0 days 00:00:00.350765755 +318 16 0 days 00:00:00.321321492 +318 17 0 days 00:00:00.237048260 +318 18 0 days 00:00:00.788384475 +318 19 0 days 00:00:00.793025685 +318 20 0 days 00:00:00.920054874 +318 21 0 days 00:00:00.264608570 +318 22 0 days 00:00:00.422890445 +318 23 0 days 00:00:00.258445625 +318 24 0 days 00:00:00.433793140 +318 25 0 days 00:00:00.273742690 +318 26 0 days 00:00:00.777917890 +318 27 0 days 00:00:00.422690570 +318 28 0 days 00:00:00.479451476 +318 29 0 days 00:00:00.864914970 +318 30 0 days 00:00:00.484851030 +318 31 0 days 00:00:00.279386680 +318 32 0 days 00:00:00.488526175 +318 33 0 days 00:00:00.405168224 +318 34 0 days 00:00:00.463057690 +318 35 0 days 00:00:00.336349300 +318 36 0 days 00:00:00.280377210 +318 37 0 days 00:00:00.320319377 +318 38 0 days 00:00:00.295949120 +318 39 0 days 00:00:00.831460080 +318 40 0 days 00:00:00.282546890 +318 41 0 days 00:00:00.802010890 +318 42 0 days 00:00:00.477845966 +318 43 0 days 00:00:00.421211155 +318 44 0 days 00:00:00.825691060 +318 45 0 days 00:00:00.499095985 +318 46 0 days 00:00:00.341972662 +318 47 0 days 00:00:00.283518660 +318 48 0 days 00:00:00.474299286 +318 49 0 days 00:00:00.797600290 +318 50 0 days 00:00:00.483789070 +318 51 0 days 00:00:00.787799220 +318 52 0 days 00:00:00.562777495 +318 53 0 days 00:00:00.349030045 +318 54 0 days 00:00:00.890701074 +318 55 0 days 00:00:00.279229493 +318 56 0 days 00:00:00.772544145 +318 57 0 days 00:00:00.363063753 +318 58 0 days 00:00:00.386752365 +318 59 0 days 00:00:00.830700056 +318 60 0 days 00:00:00.859750140 +318 61 0 days 00:00:00.435079410 +318 62 0 days 00:00:00.782746205 +318 63 0 days 00:00:00.844050010 +318 64 0 days 00:00:00.961964491 +318 65 0 days 00:00:00.802414840 +318 66 0 days 00:00:00.290282363 +318 67 0 days 00:00:00.375286280 +318 68 0 days 00:00:00.339040450 +318 69 0 days 00:00:00.884838045 +318 70 0 days 00:00:00.432852895 +318 71 0 days 00:00:00.520833302 +318 72 0 days 00:00:00.437059185 +318 73 0 days 00:00:00.349790440 +318 74 0 days 00:00:00.359535872 +318 75 0 days 00:00:00.844642148 +318 76 0 days 00:00:00.792716145 +318 77 0 days 00:00:00.420440265 +318 78 0 days 00:00:00.764318470 +318 79 0 days 00:00:00.431611265 +318 80 0 days 00:00:00.851581720 +318 81 0 days 00:00:00.315262197 +318 82 0 days 00:00:00.879449545 +318 83 0 days 00:00:00.494721990 +318 84 0 days 00:00:00.446224285 +318 85 0 days 00:00:00.792442295 +318 86 0 days 00:00:00.796926955 +318 87 0 days 00:00:00.432602955 +318 88 0 days 00:00:00.448605384 +318 89 0 days 00:00:00.787754195 +318 90 0 days 00:00:00.577656333 +318 91 0 days 00:00:00.437016380 +318 92 0 days 00:00:00.786443505 +318 93 0 days 00:00:00.428100990 +318 94 0 days 00:00:00.460506545 +318 95 0 days 00:00:00.910245940 +318 96 0 days 00:00:00.896020370 +318 97 0 days 00:00:00.899667423 +318 98 0 days 00:00:00.348176510 +318 99 0 days 00:00:00.845804112 +318 100 0 days 00:00:00.442572395 +319 1 0 days 00:00:00.608803695 +319 2 0 days 00:00:00.824034770 +319 3 0 days 00:00:00.566091880 +319 4 0 days 00:00:00.502712533 +319 5 0 days 00:00:00.285073890 +319 6 0 days 00:00:00.505770003 +319 7 0 days 00:00:00.304016783 +319 8 0 days 00:00:00.287419400 +319 9 0 days 00:00:00.468523665 +319 10 0 days 00:00:00.427163084 +319 11 0 days 00:00:00.352423100 +319 12 0 days 00:00:00.447524640 +319 13 0 days 00:00:00.572314380 +319 14 0 days 00:00:00.282298596 +319 15 0 days 00:00:00.830629870 +319 16 0 days 00:00:00.897057646 +319 17 0 days 00:00:00.540598834 +319 18 0 days 00:00:00.281381708 +319 19 0 days 00:00:00.441801285 +319 20 0 days 00:00:00.484556596 +319 21 0 days 00:00:00.796935232 +319 22 0 days 00:00:00.833769764 +319 23 0 days 00:00:00.915382203 +319 24 0 days 00:00:00.823415755 +319 25 0 days 00:00:00.826039750 +319 26 0 days 00:00:00.437489165 +319 27 0 days 00:00:00.880882420 +319 28 0 days 00:00:00.826930610 +319 29 0 days 00:00:00.292500946 +319 30 0 days 00:00:00.268687320 +319 31 0 days 00:00:00.383332015 +319 32 0 days 00:00:00.445583370 +319 33 0 days 00:00:00.822113835 +319 34 0 days 00:00:00.351587500 +319 35 0 days 00:00:00.442058250 +319 36 0 days 00:00:00.499239396 +319 37 0 days 00:00:01.032375188 +319 38 0 days 00:00:00.325041720 +319 39 0 days 00:00:00.831618890 +319 40 0 days 00:00:00.922563414 +319 41 0 days 00:00:00.324357976 +319 42 0 days 00:00:00.817206420 +319 43 0 days 00:00:00.818480345 +319 44 0 days 00:00:00.323887926 +319 45 0 days 00:00:00.245675570 +319 46 0 days 00:00:00.821496825 +319 47 0 days 00:00:00.476437892 +319 48 0 days 00:00:00.521333031 +319 49 0 days 00:00:00.848360455 +319 50 0 days 00:00:00.484701615 +319 51 0 days 00:00:00.507268400 +319 52 0 days 00:00:00.446638620 +319 53 0 days 00:00:00.735027354 +319 54 0 days 00:00:00.924046168 +319 55 0 days 00:00:00.445348685 +319 56 0 days 00:00:01.032876846 +319 57 0 days 00:00:00.272894610 +319 58 0 days 00:00:00.280047820 +319 59 0 days 00:00:00.308681150 +319 60 0 days 00:00:00.269097495 +319 61 0 days 00:00:00.308509845 +319 62 0 days 00:00:00.447589565 +319 63 0 days 00:00:00.591361145 +319 64 0 days 00:00:00.404972386 +319 65 0 days 00:00:00.823852035 +319 66 0 days 00:00:00.469052065 +319 67 0 days 00:00:00.356293428 +319 68 0 days 00:00:00.312034967 +319 69 0 days 00:00:00.331286413 +319 70 0 days 00:00:00.527550190 +319 71 0 days 00:00:00.697570930 +319 72 0 days 00:00:00.449839640 +319 73 0 days 00:00:00.503900326 +319 74 0 days 00:00:00.315739231 +319 75 0 days 00:00:00.366348370 +319 76 0 days 00:00:00.739266265 +319 77 0 days 00:00:00.436514015 +319 78 0 days 00:00:00.871860448 +319 79 0 days 00:00:00.872818688 +319 80 0 days 00:00:00.280610572 +319 81 0 days 00:00:00.448396035 +319 82 0 days 00:00:00.531887750 +319 83 0 days 00:00:00.449875240 +319 84 0 days 00:00:00.517779860 +319 85 0 days 00:00:00.775361545 +319 86 0 days 00:00:00.298386353 +319 87 0 days 00:00:00.860586555 +319 88 0 days 00:00:00.277385375 +319 89 0 days 00:00:00.484756788 +319 90 0 days 00:00:00.265285755 +319 91 0 days 00:00:00.877099004 +319 92 0 days 00:00:00.820049405 +319 93 0 days 00:00:00.890860015 +319 94 0 days 00:00:00.267147875 +319 95 0 days 00:00:00.444377300 +319 96 0 days 00:00:00.480567372 +319 97 0 days 00:00:00.451924055 +319 98 0 days 00:00:00.925517300 +319 99 0 days 00:00:00.456629510 +319 100 0 days 00:00:00.269763975 +320 1 0 days 00:00:00.202734780 +320 2 0 days 00:00:00.438662773 +320 3 0 days 00:00:00.159873200 +320 4 0 days 00:00:00.222148523 +320 5 0 days 00:00:00.221976082 +320 6 0 days 00:00:00.388585415 +320 7 0 days 00:00:00.222038195 +320 8 0 days 00:00:00.232924740 +320 9 0 days 00:00:00.454856974 +320 10 0 days 00:00:00.149999380 +320 11 0 days 00:00:00.418375424 +320 12 0 days 00:00:00.244363228 +320 13 0 days 00:00:00.429411928 +320 14 0 days 00:00:00.226300260 +320 15 0 days 00:00:00.286743390 +320 16 0 days 00:00:00.437416580 +320 17 0 days 00:00:00.139829244 +320 18 0 days 00:00:00.489053580 +320 19 0 days 00:00:00.471673746 +320 20 0 days 00:00:00.226370380 +320 21 0 days 00:00:00.254798520 +320 22 0 days 00:00:00.239941152 +320 23 0 days 00:00:00.284974303 +320 24 0 days 00:00:00.146566076 +320 25 0 days 00:00:00.431602823 +320 26 0 days 00:00:00.399915605 +320 27 0 days 00:00:00.168307572 +320 28 0 days 00:00:00.170304680 +320 29 0 days 00:00:00.405035075 +320 30 0 days 00:00:00.241652092 +320 31 0 days 00:00:00.182264700 +320 32 0 days 00:00:00.234634304 +320 33 0 days 00:00:00.221942595 +320 34 0 days 00:00:00.223226660 +320 35 0 days 00:00:00.230186025 +320 36 0 days 00:00:00.421246648 +320 37 0 days 00:00:00.232733755 +320 38 0 days 00:00:00.158411676 +320 39 0 days 00:00:00.140395680 +320 40 0 days 00:00:00.230612465 +320 41 0 days 00:00:00.234624200 +320 42 0 days 00:00:00.246375004 +320 43 0 days 00:00:00.430575680 +320 44 0 days 00:00:00.225621840 +320 45 0 days 00:00:00.322648595 +320 46 0 days 00:00:00.196209470 +320 47 0 days 00:00:00.144743023 +320 48 0 days 00:00:00.248320176 +320 49 0 days 00:00:00.148966230 +320 50 0 days 00:00:00.434543080 +320 51 0 days 00:00:00.259982295 +320 52 0 days 00:00:00.223422200 +320 53 0 days 00:00:00.402602215 +320 54 0 days 00:00:00.239994004 +320 55 0 days 00:00:00.194698616 +320 56 0 days 00:00:00.138058240 +320 57 0 days 00:00:00.149389344 +320 58 0 days 00:00:00.242831892 +320 59 0 days 00:00:00.243170020 +320 60 0 days 00:00:00.441262830 +320 61 0 days 00:00:00.416946856 +320 62 0 days 00:00:00.240799388 +320 63 0 days 00:00:00.135808490 +320 64 0 days 00:00:00.125267655 +320 65 0 days 00:00:00.401209660 +320 66 0 days 00:00:00.451826632 +320 67 0 days 00:00:00.384305075 +320 68 0 days 00:00:00.258010340 +320 69 0 days 00:00:00.165163890 +320 70 0 days 00:00:00.386688770 +320 71 0 days 00:00:00.146335560 +320 72 0 days 00:00:00.220343073 +320 73 0 days 00:00:00.445513790 +320 74 0 days 00:00:00.272455563 +320 75 0 days 00:00:00.247327912 +320 76 0 days 00:00:00.148748030 +320 77 0 days 00:00:00.146810330 +320 78 0 days 00:00:00.169836908 +320 79 0 days 00:00:00.427792384 +320 80 0 days 00:00:00.174477350 +320 81 0 days 00:00:00.172005532 +320 82 0 days 00:00:00.251186025 +320 83 0 days 00:00:00.425936396 +320 84 0 days 00:00:00.230584410 +320 85 0 days 00:00:00.203636100 +320 86 0 days 00:00:00.246430743 +320 87 0 days 00:00:00.223810420 +320 88 0 days 00:00:00.166284308 +320 89 0 days 00:00:00.215870284 +320 90 0 days 00:00:00.161196025 +320 91 0 days 00:00:00.406593825 +320 92 0 days 00:00:00.227437290 +320 93 0 days 00:00:00.284154742 +320 94 0 days 00:00:00.210362635 +320 95 0 days 00:00:00.439942036 +320 96 0 days 00:00:00.247845960 +320 97 0 days 00:00:00.402809050 +320 98 0 days 00:00:00.268124468 +320 99 0 days 00:00:00.397806470 +320 100 0 days 00:00:00.238527308 +321 1 0 days 00:00:00.152256036 +321 2 0 days 00:00:00.245622250 +321 3 0 days 00:00:00.279333300 +321 4 0 days 00:00:00.269850448 +321 5 0 days 00:00:00.428411796 +321 6 0 days 00:00:00.136346735 +321 7 0 days 00:00:00.496585520 +321 8 0 days 00:00:00.415527595 +321 9 0 days 00:00:00.425220065 +321 10 0 days 00:00:00.169889008 +321 11 0 days 00:00:00.405756405 +321 12 0 days 00:00:00.207839925 +321 13 0 days 00:00:00.145014992 +321 14 0 days 00:00:00.400009095 +321 15 0 days 00:00:00.232220240 +321 16 0 days 00:00:00.137609630 +321 17 0 days 00:00:00.257908226 +321 18 0 days 00:00:00.163400621 +321 19 0 days 00:00:00.449636826 +321 20 0 days 00:00:00.433388860 +321 21 0 days 00:00:00.233165945 +321 22 0 days 00:00:00.142362580 +321 23 0 days 00:00:00.264481314 +321 24 0 days 00:00:00.266532656 +321 25 0 days 00:00:00.402358725 +321 26 0 days 00:00:00.378424795 +321 27 0 days 00:00:00.173963710 +321 28 0 days 00:00:00.436347405 +321 29 0 days 00:00:00.277887636 +321 30 0 days 00:00:00.160113257 +321 31 0 days 00:00:00.260434450 +321 32 0 days 00:00:00.413273953 +321 33 0 days 00:00:00.166805520 +321 34 0 days 00:00:00.136059415 +321 35 0 days 00:00:00.303256176 +321 36 0 days 00:00:00.278710155 +321 37 0 days 00:00:00.247161668 +321 38 0 days 00:00:00.144691144 +321 39 0 days 00:00:00.256627210 +321 40 0 days 00:00:00.528328057 +321 41 0 days 00:00:00.200216560 +321 42 0 days 00:00:00.444261628 +321 43 0 days 00:00:00.251603320 +321 44 0 days 00:00:00.428042360 +321 45 0 days 00:00:00.442457096 +321 46 0 days 00:00:00.263248240 +321 47 0 days 00:00:00.370947205 +321 48 0 days 00:00:00.473178876 +321 49 0 days 00:00:00.413269805 +321 50 0 days 00:00:00.177693634 +321 51 0 days 00:00:00.152458640 +321 52 0 days 00:00:00.146293065 +321 53 0 days 00:00:00.435200810 +321 54 0 days 00:00:00.317313984 +321 55 0 days 00:00:00.269988356 +321 56 0 days 00:00:00.148755988 +321 57 0 days 00:00:00.438657880 +321 58 0 days 00:00:00.158632512 +321 59 0 days 00:00:00.142081970 +321 60 0 days 00:00:00.454540645 +321 61 0 days 00:00:00.308438593 +321 62 0 days 00:00:00.176113730 +321 63 0 days 00:00:00.264719817 +321 64 0 days 00:00:00.463008430 +321 65 0 days 00:00:00.462549770 +321 66 0 days 00:00:00.232795035 +321 67 0 days 00:00:00.262282400 +321 68 0 days 00:00:00.447797073 +321 69 0 days 00:00:00.273548588 +321 70 0 days 00:00:00.460133096 +321 71 0 days 00:00:00.145734824 +321 72 0 days 00:00:00.251480272 +321 73 0 days 00:00:00.456122960 +321 74 0 days 00:00:00.437863584 +321 75 0 days 00:00:00.405795955 +321 76 0 days 00:00:00.252589952 +321 77 0 days 00:00:00.403487096 +321 78 0 days 00:00:00.211606843 +321 79 0 days 00:00:00.466663437 +321 80 0 days 00:00:00.250623104 +321 81 0 days 00:00:00.364373610 +321 82 0 days 00:00:00.458740913 +321 83 0 days 00:00:00.468614116 +321 84 0 days 00:00:00.177028998 +321 85 0 days 00:00:00.443522420 +321 86 0 days 00:00:00.254465288 +321 87 0 days 00:00:00.140754625 +321 88 0 days 00:00:00.199762733 +321 89 0 days 00:00:00.153797950 +321 90 0 days 00:00:00.248808788 +321 91 0 days 00:00:00.418778850 +321 92 0 days 00:00:00.198135556 +321 93 0 days 00:00:00.161076184 +321 94 0 days 00:00:00.418670745 +321 95 0 days 00:00:00.161936672 +321 96 0 days 00:00:00.164598015 +321 97 0 days 00:00:00.145248000 +321 98 0 days 00:00:00.149781176 +321 99 0 days 00:00:00.441910404 +321 100 0 days 00:00:00.165891227 +322 1 0 days 00:00:00.509726214 +322 2 0 days 00:00:00.385365088 +322 3 0 days 00:00:00.385701875 +322 4 0 days 00:00:01.222339220 +322 5 0 days 00:00:01.074382260 +322 6 0 days 00:00:01.382756436 +322 7 0 days 00:00:00.389001380 +322 8 0 days 00:00:00.950998780 +322 9 0 days 00:00:00.801234944 +322 10 0 days 00:00:01.340921578 +322 11 0 days 00:00:00.596877405 +322 12 0 days 00:00:00.480478580 +322 13 0 days 00:00:00.246687180 +322 14 0 days 00:00:00.582697940 +322 15 0 days 00:00:00.594570717 +322 16 0 days 00:00:00.387680376 +322 17 0 days 00:00:01.163561744 +322 18 0 days 00:00:00.348806648 +322 19 0 days 00:00:00.422847585 +322 20 0 days 00:00:00.536300920 +322 21 0 days 00:00:00.492639572 +322 22 0 days 00:00:00.467831860 +322 23 0 days 00:00:01.123582635 +322 24 0 days 00:00:00.480156834 +322 25 0 days 00:00:00.411066792 +322 26 0 days 00:00:00.751595740 +322 27 0 days 00:00:00.614615932 +322 28 0 days 00:00:00.635747796 +322 29 0 days 00:00:01.150784932 +322 30 0 days 00:00:01.179724956 +322 31 0 days 00:00:00.304185425 +322 32 0 days 00:00:00.410310128 +322 33 0 days 00:00:00.490835853 +322 34 0 days 00:00:01.310517646 +322 35 0 days 00:00:00.600509330 +322 36 0 days 00:00:00.650772908 +322 37 0 days 00:00:01.161791120 +322 38 0 days 00:00:00.745221876 +322 39 0 days 00:00:00.732813745 +322 40 0 days 00:00:00.686212712 +322 41 0 days 00:00:00.836403092 +322 42 0 days 00:00:00.579169040 +322 43 0 days 00:00:00.598600870 +322 44 0 days 00:00:01.119892790 +322 45 0 days 00:00:01.068072926 +322 46 0 days 00:00:00.831685280 +322 47 0 days 00:00:00.435370243 +322 48 0 days 00:00:00.821641263 +322 49 0 days 00:00:00.575175715 +322 50 0 days 00:00:00.764936115 +322 51 0 days 00:00:00.532104685 +322 52 0 days 00:00:00.337075770 +322 53 0 days 00:00:01.046057475 +322 54 0 days 00:00:00.556338985 +322 55 0 days 00:00:00.424619720 +322 56 0 days 00:00:01.298506666 +322 57 0 days 00:00:00.483592060 +322 58 0 days 00:00:00.444700510 +322 59 0 days 00:00:01.068085950 +322 60 0 days 00:00:00.346701890 +322 61 0 days 00:00:00.518273285 +322 62 0 days 00:00:00.539017864 +322 63 0 days 00:00:00.455878205 +322 64 0 days 00:00:00.328770180 +322 65 0 days 00:00:01.039057205 +322 66 0 days 00:00:00.631680880 +322 67 0 days 00:00:01.074534733 +322 68 0 days 00:00:01.053268020 +322 69 0 days 00:00:00.586721968 +322 70 0 days 00:00:00.981637875 +322 71 0 days 00:00:01.137807793 +322 72 0 days 00:00:00.338320450 +322 73 0 days 00:00:01.050593750 +322 74 0 days 00:00:01.011263580 +322 75 0 days 00:00:00.338131760 +322 76 0 days 00:00:00.930905882 +322 77 0 days 00:00:00.764657350 +322 78 0 days 00:00:01.016668210 +322 79 0 days 00:00:01.086468710 +322 80 0 days 00:00:00.783490600 +322 81 0 days 00:00:00.344836860 +322 82 0 days 00:00:00.537738300 +322 83 0 days 00:00:00.649860660 +322 84 0 days 00:00:00.355454705 +322 85 0 days 00:00:00.426945226 +322 86 0 days 00:00:00.718553095 +322 87 0 days 00:00:00.351327396 +322 88 0 days 00:00:01.233754160 +322 89 0 days 00:00:00.737043980 +322 90 0 days 00:00:00.329806391 +322 91 0 days 00:00:00.615820853 +322 92 0 days 00:00:00.382158884 +322 93 0 days 00:00:00.739468524 +322 94 0 days 00:00:00.324964210 +322 95 0 days 00:00:00.348415370 +322 96 0 days 00:00:00.345146890 +322 97 0 days 00:00:00.364015725 +322 98 0 days 00:00:00.421345494 +322 99 0 days 00:00:00.857722090 +322 100 0 days 00:00:01.173845395 +323 1 0 days 00:00:00.188143255 +323 2 0 days 00:00:00.512921526 +323 3 0 days 00:00:00.295707116 +323 4 0 days 00:00:00.500365670 +323 5 0 days 00:00:00.488571325 +323 6 0 days 00:00:00.523409216 +323 7 0 days 00:00:00.430847384 +323 8 0 days 00:00:00.235989055 +323 9 0 days 00:00:00.330122160 +323 10 0 days 00:00:00.190432268 +323 11 0 days 00:00:00.244775405 +323 12 0 days 00:00:00.430944645 +323 13 0 days 00:00:00.195606845 +323 14 0 days 00:00:00.311996270 +323 15 0 days 00:00:00.469521725 +323 16 0 days 00:00:00.212300860 +323 17 0 days 00:00:00.491447204 +323 18 0 days 00:00:00.402116260 +323 19 0 days 00:00:00.249593455 +323 20 0 days 00:00:00.147323865 +323 21 0 days 00:00:00.492125920 +323 22 0 days 00:00:00.464327912 +323 23 0 days 00:00:00.267168392 +323 24 0 days 00:00:00.205439020 +323 25 0 days 00:00:00.417172765 +323 26 0 days 00:00:00.528039240 +323 27 0 days 00:00:00.315420993 +323 28 0 days 00:00:00.236185050 +323 29 0 days 00:00:00.187994520 +323 30 0 days 00:00:00.162866345 +323 31 0 days 00:00:00.247199320 +323 32 0 days 00:00:00.520178163 +323 33 0 days 00:00:00.146219247 +323 34 0 days 00:00:00.220133028 +323 35 0 days 00:00:00.188112650 +323 36 0 days 00:00:00.193295935 +323 37 0 days 00:00:00.176272706 +323 38 0 days 00:00:00.208148500 +323 39 0 days 00:00:00.292405896 +323 40 0 days 00:00:00.144590145 +323 41 0 days 00:00:00.514198113 +323 42 0 days 00:00:00.484311772 +323 43 0 days 00:00:00.475984108 +323 44 0 days 00:00:00.203008983 +323 45 0 days 00:00:00.298318865 +323 46 0 days 00:00:00.504020417 +323 47 0 days 00:00:00.300225866 +323 48 0 days 00:00:00.511190624 +323 49 0 days 00:00:00.475974320 +323 50 0 days 00:00:00.160666856 +323 51 0 days 00:00:00.300507812 +323 52 0 days 00:00:00.219436665 +323 53 0 days 00:00:00.490596956 +323 54 0 days 00:00:00.143536512 +323 55 0 days 00:00:00.185305488 +323 56 0 days 00:00:00.479575786 +323 57 0 days 00:00:00.158694280 +323 58 0 days 00:00:00.227586855 +323 59 0 days 00:00:00.272301755 +323 60 0 days 00:00:00.491873716 +323 61 0 days 00:00:00.246291295 +323 62 0 days 00:00:00.137489335 +323 63 0 days 00:00:00.536796860 +323 64 0 days 00:00:00.501208284 +323 65 0 days 00:00:00.423139380 +323 66 0 days 00:00:00.439550070 +323 67 0 days 00:00:00.555564636 +323 68 0 days 00:00:00.250644955 +323 69 0 days 00:00:00.230773835 +323 70 0 days 00:00:00.146252546 +323 71 0 days 00:00:00.198052713 +323 72 0 days 00:00:00.231184740 +323 73 0 days 00:00:00.510290505 +323 74 0 days 00:00:00.249494228 +323 75 0 days 00:00:00.237577765 +323 76 0 days 00:00:00.247773665 +323 77 0 days 00:00:00.180745892 +323 78 0 days 00:00:00.150371995 +323 79 0 days 00:00:00.146386895 +323 80 0 days 00:00:00.156330010 +323 81 0 days 00:00:00.424382215 +323 82 0 days 00:00:00.241447600 +323 83 0 days 00:00:00.263120642 +323 84 0 days 00:00:00.408504130 +323 85 0 days 00:00:00.195206600 +323 86 0 days 00:00:00.558330444 +323 87 0 days 00:00:00.464735172 +323 88 0 days 00:00:00.299049077 +323 89 0 days 00:00:00.477392395 +323 90 0 days 00:00:00.432797075 +323 91 0 days 00:00:00.256787500 +323 92 0 days 00:00:00.163320645 +323 93 0 days 00:00:00.160646130 +323 94 0 days 00:00:00.240109075 +323 95 0 days 00:00:00.162695755 +323 96 0 days 00:00:00.167608352 +323 97 0 days 00:00:00.248183423 +323 98 0 days 00:00:00.476868905 +323 99 0 days 00:00:00.530565086 +323 100 0 days 00:00:00.478335465 +324 1 0 days 00:00:00.512989740 +324 2 0 days 00:00:00.392606845 +324 3 0 days 00:00:00.587279745 +324 4 0 days 00:00:00.962993570 +324 5 0 days 00:00:00.586963796 +324 6 0 days 00:00:00.453426836 +324 7 0 days 00:00:00.462122071 +324 8 0 days 00:00:00.334155100 +324 9 0 days 00:00:00.582022720 +324 10 0 days 00:00:00.759070390 +324 11 0 days 00:00:00.562429375 +324 12 0 days 00:00:00.927460285 +324 13 0 days 00:00:01.157503160 +324 14 0 days 00:00:00.391075488 +324 15 0 days 00:00:00.763767570 +324 16 0 days 00:00:00.554177255 +324 17 0 days 00:00:00.332597140 +324 18 0 days 00:00:00.616354960 +324 19 0 days 00:00:00.645402540 +324 20 0 days 00:00:01.078354865 +324 21 0 days 00:00:01.134709200 +324 22 0 days 00:00:01.441967975 +324 23 0 days 00:00:01.056832480 +324 24 0 days 00:00:00.629139972 +324 25 0 days 00:00:00.377903428 +324 26 0 days 00:00:00.618716835 +324 27 0 days 00:00:00.400087735 +324 28 0 days 00:00:01.270320886 +324 29 0 days 00:00:00.981233345 +324 30 0 days 00:00:00.953931820 +324 31 0 days 00:00:01.132089213 +324 32 0 days 00:00:00.543084976 +324 33 0 days 00:00:00.527287745 +324 34 0 days 00:00:01.257162092 +324 35 0 days 00:00:00.947680270 +324 36 0 days 00:00:00.575661352 +324 37 0 days 00:00:00.387166630 +324 38 0 days 00:00:00.790056716 +324 39 0 days 00:00:00.293683920 +324 40 0 days 00:00:01.391364606 +324 41 0 days 00:00:00.613496145 +324 42 0 days 00:00:00.602432294 +324 43 0 days 00:00:00.600212186 +324 44 0 days 00:00:00.347138580 +324 45 0 days 00:00:00.664239766 +324 46 0 days 00:00:00.510081424 +324 47 0 days 00:00:00.750482512 +324 48 0 days 00:00:01.201214193 +324 49 0 days 00:00:00.343018434 +324 50 0 days 00:00:00.349146234 +324 51 0 days 00:00:00.750234465 +324 52 0 days 00:00:00.727056655 +324 53 0 days 00:00:01.181416926 +324 54 0 days 00:00:00.385403967 +324 55 0 days 00:00:01.230450772 +324 56 0 days 00:00:00.314479516 +324 57 0 days 00:00:01.075478396 +324 58 0 days 00:00:00.814574564 +324 59 0 days 00:00:00.492681240 +324 60 0 days 00:00:00.331086286 +324 61 0 days 00:00:00.467803910 +324 62 0 days 00:00:01.089368374 +324 63 0 days 00:00:00.554400596 +324 64 0 days 00:00:00.562850812 +324 65 0 days 00:00:00.327586143 +324 66 0 days 00:00:00.368988332 +324 67 0 days 00:00:00.598138013 +324 68 0 days 00:00:00.352121880 +324 69 0 days 00:00:00.460189077 +324 70 0 days 00:00:00.559259985 +324 71 0 days 00:00:00.798946952 +324 72 0 days 00:00:01.158931976 +324 73 0 days 00:00:01.189728615 +324 74 0 days 00:00:00.571534916 +324 75 0 days 00:00:00.349008820 +324 76 0 days 00:00:00.784980904 +324 77 0 days 00:00:00.575211325 +324 78 0 days 00:00:00.421018852 +324 79 0 days 00:00:01.048750955 +324 80 0 days 00:00:00.839374428 +324 81 0 days 00:00:00.604748187 +324 82 0 days 00:00:01.144895225 +324 83 0 days 00:00:00.324710300 +324 84 0 days 00:00:00.342561544 +324 85 0 days 00:00:01.109885446 +324 86 0 days 00:00:00.435328334 +324 87 0 days 00:00:00.729846825 +324 88 0 days 00:00:01.166591720 +324 89 0 days 00:00:00.614219263 +324 90 0 days 00:00:00.558975748 +324 91 0 days 00:00:00.422631296 +324 92 0 days 00:00:01.257730820 +324 93 0 days 00:00:00.356931386 +324 94 0 days 00:00:00.532987256 +324 95 0 days 00:00:00.561717200 +324 96 0 days 00:00:01.057897606 +324 97 0 days 00:00:00.483602530 +324 98 0 days 00:00:00.730609872 +324 99 0 days 00:00:00.354491346 +324 100 0 days 00:00:00.931739075 +325 1 0 days 00:00:00.523084920 +325 2 0 days 00:00:00.335563525 +325 3 0 days 00:00:00.576215423 +325 4 0 days 00:00:00.249887480 +325 5 0 days 00:00:00.297115024 +325 6 0 days 00:00:00.191381271 +325 7 0 days 00:00:00.214425787 +325 8 0 days 00:00:00.334069842 +325 9 0 days 00:00:00.268056885 +325 10 0 days 00:00:00.311693831 +325 11 0 days 00:00:00.319536765 +325 12 0 days 00:00:00.238239395 +325 13 0 days 00:00:00.661827535 +325 14 0 days 00:00:00.346804612 +325 15 0 days 00:00:00.487206568 +325 16 0 days 00:00:00.521592872 +325 17 0 days 00:00:00.331251086 +325 18 0 days 00:00:00.368213115 +325 19 0 days 00:00:00.335894413 +325 20 0 days 00:00:00.270858668 +325 21 0 days 00:00:00.482769615 +325 22 0 days 00:00:00.236495056 +325 23 0 days 00:00:00.554667706 +325 24 0 days 00:00:00.511009564 +325 25 0 days 00:00:00.391409630 +325 26 0 days 00:00:00.330412915 +325 27 0 days 00:00:00.197493575 +325 28 0 days 00:00:00.199997342 +325 29 0 days 00:00:00.219325234 +325 30 0 days 00:00:00.329543960 +325 31 0 days 00:00:00.201914170 +325 32 0 days 00:00:00.210007240 +325 33 0 days 00:00:00.234600240 +325 34 0 days 00:00:00.324508913 +325 35 0 days 00:00:00.264441354 +325 36 0 days 00:00:00.466425146 +325 37 0 days 00:00:00.686831090 +325 38 0 days 00:00:00.415007528 +325 39 0 days 00:00:00.261464068 +325 40 0 days 00:00:00.709471422 +325 41 0 days 00:00:00.322211873 +325 42 0 days 00:00:00.324123152 +325 43 0 days 00:00:00.208444416 +325 44 0 days 00:00:00.522582712 +325 45 0 days 00:00:00.239688816 +325 46 0 days 00:00:00.213388006 +325 47 0 days 00:00:00.586463993 +325 48 0 days 00:00:00.176442288 +325 49 0 days 00:00:00.419962688 +325 50 0 days 00:00:00.385911540 +325 51 0 days 00:00:00.164608690 +325 52 0 days 00:00:00.431848296 +325 53 0 days 00:00:00.319869808 +325 54 0 days 00:00:00.574769024 +325 55 0 days 00:00:00.179699955 +325 56 0 days 00:00:00.519278336 +325 57 0 days 00:00:00.415548932 +325 58 0 days 00:00:00.196920345 +325 59 0 days 00:00:00.221304856 +325 60 0 days 00:00:00.374195008 +325 61 0 days 00:00:00.312489073 +325 62 0 days 00:00:00.522157495 +325 63 0 days 00:00:00.191930442 +325 64 0 days 00:00:00.336547865 +325 65 0 days 00:00:00.184819437 +325 66 0 days 00:00:00.486394320 +325 67 0 days 00:00:00.217505812 +325 68 0 days 00:00:00.389380076 +325 69 0 days 00:00:00.208682032 +325 70 0 days 00:00:00.289107044 +325 71 0 days 00:00:00.298415450 +325 72 0 days 00:00:00.550083960 +325 73 0 days 00:00:00.172275252 +325 74 0 days 00:00:00.333034833 +325 75 0 days 00:00:00.180063742 +325 76 0 days 00:00:00.253874553 +325 77 0 days 00:00:00.559257991 +325 78 0 days 00:00:00.585128002 +325 79 0 days 00:00:00.174589377 +325 80 0 days 00:00:00.178023757 +325 81 0 days 00:00:00.280691665 +325 82 0 days 00:00:00.512162492 +325 83 0 days 00:00:00.525156230 +325 84 0 days 00:00:00.538888300 +325 85 0 days 00:00:00.241891636 +325 86 0 days 00:00:00.239464248 +325 87 0 days 00:00:00.552933365 +325 88 0 days 00:00:00.469331335 +325 89 0 days 00:00:00.226218977 +325 90 0 days 00:00:00.479397400 +325 91 0 days 00:00:00.180744821 +325 92 0 days 00:00:00.414092442 +325 93 0 days 00:00:00.213135829 +325 94 0 days 00:00:00.539700848 +325 95 0 days 00:00:00.532052917 +325 96 0 days 00:00:00.578966016 +325 97 0 days 00:00:00.560001652 +325 98 0 days 00:00:00.191972996 +325 99 0 days 00:00:00.602411170 +325 100 0 days 00:00:00.171272593 +326 1 0 days 00:00:08.194134704 +326 2 0 days 00:00:04.882243744 +326 3 0 days 00:00:09.010609344 +326 4 0 days 00:00:16.348877232 +326 5 0 days 00:00:06.395235228 +326 6 0 days 00:00:12.299474560 +326 7 0 days 00:00:12.556747635 +326 8 0 days 00:00:14.899818313 +326 9 0 days 00:00:05.261685756 +326 10 0 days 00:00:13.667932270 +326 11 0 days 00:00:07.097763340 +326 12 0 days 00:00:07.309026560 +326 13 0 days 00:00:05.332474243 +326 14 0 days 00:00:04.964593442 +326 15 0 days 00:00:10.772832320 +326 16 0 days 00:00:13.696752300 +326 17 0 days 00:00:14.730447230 +326 18 0 days 00:00:11.254518986 +326 19 0 days 00:00:09.110150280 +326 20 0 days 00:00:07.438394185 +326 21 0 days 00:00:04.377686860 +326 22 0 days 00:00:07.515257830 +326 23 0 days 00:00:14.257944103 +326 24 0 days 00:00:04.833126293 +326 25 0 days 00:00:14.968054630 +326 26 0 days 00:00:07.362081460 +326 27 0 days 00:00:15.643203480 +326 28 0 days 00:00:04.505702260 +326 29 0 days 00:00:06.417893065 +326 30 0 days 00:00:04.743018532 +326 31 0 days 00:00:11.604080893 +326 32 0 days 00:00:07.834393276 +326 33 0 days 00:00:11.791460595 +326 34 0 days 00:00:12.860802165 +326 35 0 days 00:00:04.912079272 +326 36 0 days 00:00:04.967683435 +327 1 0 days 00:00:09.606360688 +327 2 0 days 00:00:09.852189930 +327 3 0 days 00:00:12.190041926 +327 4 0 days 00:00:06.109477077 +327 5 0 days 00:00:12.973083366 +327 6 0 days 00:00:17.257559462 +327 7 0 days 00:00:07.632766872 +327 8 0 days 00:00:06.840294987 +327 9 0 days 00:00:04.499143004 +327 10 0 days 00:00:09.108478017 +327 11 0 days 00:00:12.861429246 +327 12 0 days 00:00:13.305660880 +327 13 0 days 00:00:07.900190804 +327 14 0 days 00:00:14.214185616 +327 15 0 days 00:00:04.871221565 +327 16 0 days 00:00:07.138697180 +327 17 0 days 00:00:17.334291817 +327 18 0 days 00:00:04.796769284 +327 19 0 days 00:00:05.717488620 +327 20 0 days 00:00:05.221845184 +327 21 0 days 00:00:07.752050245 +327 22 0 days 00:00:15.543115955 +327 23 0 days 00:00:07.274856100 +327 24 0 days 00:00:17.992994242 +327 25 0 days 00:00:14.686960453 +327 26 0 days 00:00:04.544103690 +327 27 0 days 00:00:17.187155694 +327 28 0 days 00:00:07.710398315 +327 29 0 days 00:00:08.232553750 +327 30 0 days 00:00:15.988724420 +327 31 0 days 00:00:10.580105945 +328 1 0 days 00:00:07.163488470 +328 2 0 days 00:00:06.374233495 +328 3 0 days 00:00:02.365680365 +328 4 0 days 00:00:02.748436188 +328 5 0 days 00:00:02.409542665 +328 6 0 days 00:00:02.385863310 +328 7 0 days 00:00:02.440697118 +328 8 0 days 00:00:02.573465020 +328 9 0 days 00:00:02.428590817 +328 10 0 days 00:00:04.294725431 +328 11 0 days 00:00:07.687349586 +328 12 0 days 00:00:02.457541202 +328 13 0 days 00:00:07.388451580 +328 14 0 days 00:00:06.599813906 +328 15 0 days 00:00:02.297720000 +328 16 0 days 00:00:02.075880290 +328 17 0 days 00:00:03.969066648 +328 18 0 days 00:00:03.978092845 +328 19 0 days 00:00:04.474931492 +328 20 0 days 00:00:03.742005380 +328 21 0 days 00:00:08.282033680 +328 22 0 days 00:00:04.186321143 +328 23 0 days 00:00:06.576418310 +328 24 0 days 00:00:03.480038800 +328 25 0 days 00:00:02.603678813 +328 26 0 days 00:00:06.943123428 +328 27 0 days 00:00:07.507322936 +328 28 0 days 00:00:07.673609380 +328 29 0 days 00:00:03.937769920 +328 30 0 days 00:00:02.533589474 +328 31 0 days 00:00:04.217854788 +328 32 0 days 00:00:08.314236777 +328 33 0 days 00:00:02.439322462 +328 34 0 days 00:00:06.762128908 +328 35 0 days 00:00:04.990859617 +328 36 0 days 00:00:02.312795968 +328 37 0 days 00:00:02.268705816 +328 38 0 days 00:00:06.927680584 +328 39 0 days 00:00:03.930063040 +328 40 0 days 00:00:02.247633040 +328 41 0 days 00:00:05.001208714 +328 42 0 days 00:00:04.193828468 +328 43 0 days 00:00:06.361305670 +328 44 0 days 00:00:02.706694737 +328 45 0 days 00:00:02.520084164 +328 46 0 days 00:00:02.493655570 +328 47 0 days 00:00:04.141360180 +328 48 0 days 00:00:04.146469426 +328 49 0 days 00:00:06.971410643 +328 50 0 days 00:00:04.733917460 +328 51 0 days 00:00:08.049236726 +328 52 0 days 00:00:03.868272040 +328 53 0 days 00:00:02.594006740 +328 54 0 days 00:00:06.486817035 +328 55 0 days 00:00:02.266800160 +328 56 0 days 00:00:07.917110842 +328 57 0 days 00:00:06.936382332 +328 58 0 days 00:00:05.104080263 +328 59 0 days 00:00:07.209941320 +328 60 0 days 00:00:02.052367348 +328 61 0 days 00:00:06.909067250 +328 62 0 days 00:00:07.702098380 +328 63 0 days 00:00:03.173305976 +328 64 0 days 00:00:02.398777868 +328 65 0 days 00:00:03.493098075 +328 66 0 days 00:00:03.836512976 +328 67 0 days 00:00:03.133614296 +329 1 0 days 00:00:04.245650748 +329 2 0 days 00:00:04.499853567 +329 3 0 days 00:00:07.147351680 +329 4 0 days 00:00:02.361537140 +329 5 0 days 00:00:03.522004444 +329 6 0 days 00:00:03.123714480 +329 7 0 days 00:00:02.454722048 +329 8 0 days 00:00:02.716801588 +329 9 0 days 00:00:07.768625028 +329 10 0 days 00:00:05.979241660 +329 11 0 days 00:00:07.553865236 +329 12 0 days 00:00:08.248144237 +329 13 0 days 00:00:03.646283604 +329 14 0 days 00:00:02.443047127 +329 15 0 days 00:00:02.397584096 +329 16 0 days 00:00:06.886282433 +329 17 0 days 00:00:02.730415748 +329 18 0 days 00:00:02.561637568 +329 19 0 days 00:00:02.851246192 +329 20 0 days 00:00:04.577449425 +329 21 0 days 00:00:04.612307176 +329 22 0 days 00:00:07.569163592 +329 23 0 days 00:00:06.468434800 +329 24 0 days 00:00:04.217226096 +329 25 0 days 00:00:03.647231600 +329 26 0 days 00:00:04.093627035 +329 27 0 days 00:00:06.572606786 +329 28 0 days 00:00:04.283891056 +329 29 0 days 00:00:08.050196336 +329 30 0 days 00:00:08.420566468 +329 31 0 days 00:00:04.154424845 +329 32 0 days 00:00:03.878404135 +329 33 0 days 00:00:08.117858085 +329 34 0 days 00:00:08.223418965 +329 35 0 days 00:00:02.148107980 +329 36 0 days 00:00:03.632698066 +329 37 0 days 00:00:02.700599737 +329 38 0 days 00:00:02.363926123 +329 39 0 days 00:00:03.812896605 +329 40 0 days 00:00:07.127212268 +329 41 0 days 00:00:03.499845300 +329 42 0 days 00:00:04.026167415 +329 43 0 days 00:00:07.090371286 +329 44 0 days 00:00:04.313568215 +329 45 0 days 00:00:02.390491516 +329 46 0 days 00:00:07.153516812 +329 47 0 days 00:00:02.431447577 +329 48 0 days 00:00:02.339526695 +329 49 0 days 00:00:04.549777743 +329 50 0 days 00:00:07.010270146 +329 51 0 days 00:00:07.911271870 +329 52 0 days 00:00:07.264600625 +329 53 0 days 00:00:02.338501725 +329 54 0 days 00:00:01.938209693 +329 55 0 days 00:00:04.401360973 +329 56 0 days 00:00:07.718495016 +329 57 0 days 00:00:07.292912668 +329 58 0 days 00:00:06.445985455 +329 59 0 days 00:00:03.418551306 +329 60 0 days 00:00:05.308227825 +329 61 0 days 00:00:08.906494452 +329 62 0 days 00:00:04.149764350 +329 63 0 days 00:00:07.130271675 +329 64 0 days 00:00:04.377916265 +329 65 0 days 00:00:02.282040440 +330 1 0 days 00:02:44.381337162 +330 2 0 days 00:02:26.682335010 +331 1 0 days 00:00:51.589139403 +331 2 0 days 00:01:27.398566645 +331 3 0 days 00:00:30.697752212 +331 4 0 days 00:00:49.629382820 +331 5 0 days 00:00:28.326272820 +331 6 0 days 00:01:41.096509548 +332 1 0 days 00:00:16.664769273 +332 2 0 days 00:00:17.700887348 +332 3 0 days 00:00:06.189266168 +332 4 0 days 00:00:15.911967472 +332 5 0 days 00:00:16.405726592 +332 6 0 days 00:00:17.802846993 +332 7 0 days 00:00:09.056442791 +332 8 0 days 00:00:08.137129904 +332 9 0 days 00:00:19.438143346 +332 10 0 days 00:00:08.722247473 +332 11 0 days 00:00:19.509869676 +332 12 0 days 00:00:20.038335023 +332 13 0 days 00:00:18.481610653 +332 14 0 days 00:00:16.254887745 +332 15 0 days 00:00:13.329235660 +332 16 0 days 00:00:08.282009416 +332 17 0 days 00:00:05.595404013 +332 18 0 days 00:00:09.468031451 +332 19 0 days 00:00:10.903285658 +332 20 0 days 00:00:09.971846696 +332 21 0 days 00:00:06.393740153 +332 22 0 days 00:00:07.693785868 +332 23 0 days 00:00:19.087265911 +333 1 0 days 00:00:02.343680925 +333 2 0 days 00:00:09.143268688 +333 3 0 days 00:00:02.660104508 +333 4 0 days 00:00:02.844689648 +333 5 0 days 00:00:10.043368313 +333 6 0 days 00:00:08.839051840 +333 7 0 days 00:00:03.238755250 +333 8 0 days 00:00:03.105226654 +333 9 0 days 00:00:04.375521873 +333 10 0 days 00:00:04.739537964 +333 11 0 days 00:00:04.442835625 +333 12 0 days 00:00:06.198431326 +333 13 0 days 00:00:04.492490984 +333 14 0 days 00:00:08.113938735 +333 15 0 days 00:00:07.027833155 +333 16 0 days 00:00:04.475817208 +333 17 0 days 00:00:02.226869168 +333 18 0 days 00:00:08.327833093 +333 19 0 days 00:00:04.601325344 +333 20 0 days 00:00:03.122751920 +333 21 0 days 00:00:02.557153750 +333 22 0 days 00:00:05.572594693 +333 23 0 days 00:00:02.873269980 +333 24 0 days 00:00:04.292754860 +333 25 0 days 00:00:08.721274290 +333 26 0 days 00:00:07.512946820 +333 27 0 days 00:00:02.963907730 +333 28 0 days 00:00:04.341232262 +333 29 0 days 00:00:02.744850116 +333 30 0 days 00:00:04.112460264 +333 31 0 days 00:00:04.366288545 +333 32 0 days 00:00:07.852674008 +333 33 0 days 00:00:09.052985057 +333 34 0 days 00:00:04.898228356 +333 35 0 days 00:00:03.675060936 +333 36 0 days 00:00:02.542806157 +333 37 0 days 00:00:03.519398986 +333 38 0 days 00:00:04.860606476 +333 39 0 days 00:00:08.247481207 +333 40 0 days 00:00:05.564092560 +333 41 0 days 00:00:05.785628673 +333 42 0 days 00:00:05.160622632 +333 43 0 days 00:00:03.789166097 +333 44 0 days 00:00:09.604288216 +333 45 0 days 00:00:07.807550480 +333 46 0 days 00:00:04.828645910 +333 47 0 days 00:00:07.754402962 +333 48 0 days 00:00:10.080338516 +333 49 0 days 00:00:08.152103963 +333 50 0 days 00:00:03.086196156 +333 51 0 days 00:00:05.203582660 +333 52 0 days 00:00:08.784518800 +333 53 0 days 00:00:04.031639680 +333 54 0 days 00:00:03.881954476 +333 55 0 days 00:00:04.457513650 +333 56 0 days 00:00:04.835429648 +333 57 0 days 00:00:08.731236180 +333 58 0 days 00:00:02.887344156 +333 59 0 days 00:00:03.131189510 +333 60 0 days 00:00:09.661805540 +334 1 0 days 00:00:08.814921910 +334 2 0 days 00:00:13.955425400 +334 3 0 days 00:00:13.144740733 +334 4 0 days 00:00:10.531941460 +334 5 0 days 00:00:06.473386506 +334 6 0 days 00:00:05.485709667 +334 7 0 days 00:00:18.084691211 +334 8 0 days 00:00:04.275525753 +334 9 0 days 00:00:12.561843386 +334 10 0 days 00:00:06.010178448 +334 11 0 days 00:00:07.563714905 +334 12 0 days 00:00:07.681251840 +334 13 0 days 00:00:05.238165047 +334 14 0 days 00:00:07.519610490 +334 15 0 days 00:00:05.626946080 +334 16 0 days 00:00:04.036601073 +334 17 0 days 00:00:03.919540266 +334 18 0 days 00:00:08.711992428 +334 19 0 days 00:00:09.207344211 +334 20 0 days 00:00:07.986276840 +334 21 0 days 00:00:14.630785800 +334 22 0 days 00:00:14.984770470 +334 23 0 days 00:00:05.052649105 +334 24 0 days 00:00:13.211726113 +334 25 0 days 00:00:14.731625805 +334 26 0 days 00:00:13.202302006 +334 27 0 days 00:00:08.201924776 +334 28 0 days 00:00:06.940453753 +334 29 0 days 00:00:09.497264346 +334 30 0 days 00:00:15.673154492 +334 31 0 days 00:00:08.664533248 +334 32 0 days 00:00:08.848340713 +334 33 0 days 00:00:05.134292350 +334 34 0 days 00:00:13.169582020 +334 35 0 days 00:00:06.578341307 +334 36 0 days 00:00:17.795997955 +335 1 0 days 00:00:05.135428165 +335 2 0 days 00:00:03.015328705 +335 3 0 days 00:00:02.687604612 +335 4 0 days 00:00:06.769476693 +335 5 0 days 00:00:03.067810904 +335 6 0 days 00:00:03.623656113 +335 7 0 days 00:00:03.824621533 +335 8 0 days 00:00:04.858989561 +335 9 0 days 00:00:04.391683867 +335 10 0 days 00:00:02.815348980 +335 11 0 days 00:00:07.180229886 +335 12 0 days 00:00:02.735952402 +335 13 0 days 00:00:04.624446053 +335 14 0 days 00:00:03.010480520 +335 15 0 days 00:00:06.630635293 +335 16 0 days 00:00:06.639041746 +335 17 0 days 00:00:08.566442897 +335 18 0 days 00:00:02.860998389 +335 19 0 days 00:00:03.015523326 +335 20 0 days 00:00:08.594855251 +335 21 0 days 00:00:04.216511853 +335 22 0 days 00:00:07.591980536 +335 23 0 days 00:00:06.622402973 +335 24 0 days 00:00:02.364522996 +335 25 0 days 00:00:02.600340162 +335 26 0 days 00:00:04.632076169 +335 27 0 days 00:00:02.136076713 +335 28 0 days 00:00:09.052208732 +335 29 0 days 00:00:08.342464617 +335 30 0 days 00:00:02.517267000 +335 31 0 days 00:00:02.316311410 +335 32 0 days 00:00:04.521962330 +335 33 0 days 00:00:02.398544100 +335 34 0 days 00:00:04.886441603 +335 35 0 days 00:00:08.038582280 +335 36 0 days 00:00:04.306410297 +335 37 0 days 00:00:02.531738837 +335 38 0 days 00:00:04.644028767 +335 39 0 days 00:00:02.659658610 +335 40 0 days 00:00:04.333471996 +335 41 0 days 00:00:02.151208360 +335 42 0 days 00:00:03.719166107 +335 43 0 days 00:00:03.800428306 +335 44 0 days 00:00:03.807930940 +335 45 0 days 00:00:02.944971002 +335 46 0 days 00:00:08.912307605 +335 47 0 days 00:00:04.416039880 +335 48 0 days 00:00:04.884549816 +335 49 0 days 00:00:02.872131108 +335 50 0 days 00:00:02.765776212 +335 51 0 days 00:00:03.823073413 +335 53 0 days 00:00:03.339907133 +336 1 0 days 00:02:52.197948820 +336 2 0 days 00:02:47.953505282 +337 1 0 days 00:00:30.195436005 +337 2 0 days 00:00:30.215808460 +337 3 0 days 00:00:26.466601395 +337 8 0 days 00:00:51.946533193 +337 9 0 days 00:00:29.518601553 +337 10 0 days 00:00:56.502639141 +337 11 0 days 00:00:28.377792912 +338 1 0 days 00:00:49.137700926 +338 2 0 days 00:00:47.300243432 +338 3 0 days 00:01:18.204773180 +338 5 0 days 00:02:44.747454163 +339 1 0 days 00:00:53.435949160 +339 2 0 days 00:00:29.712855183 +339 4 0 days 00:00:29.708894293 +339 5 0 days 00:01:35.283441256 +339 6 0 days 00:00:42.185184386 +339 7 0 days 00:00:53.407989637 +339 8 0 days 00:00:26.613217415 +340 1 0 days 00:00:50.258708337 +340 2 0 days 00:00:32.856357144 +340 3 0 days 00:00:41.275661833 +340 4 0 days 00:00:30.271539322 +340 5 0 days 00:00:20.438778327 +340 6 0 days 00:00:26.986676814 +340 7 0 days 00:01:00.372974190 +340 8 0 days 00:00:16.904433045 +340 9 0 days 00:00:46.685472400 +340 10 0 days 00:00:22.668140227 +340 11 0 days 00:00:50.358958900 +340 12 0 days 00:00:48.162872075 +340 13 0 days 00:00:18.232884503 +341 1 0 days 00:00:28.992779069 +341 2 0 days 00:01:33.472678791 +341 3 0 days 00:00:44.479080975 +341 4 0 days 00:00:30.310908425 +341 5 0 days 00:01:49.073034180 +341 6 0 days 00:00:46.841487057 +341 7 0 days 00:01:28.099914672 +341 8 0 days 00:01:45.383413900 +341 9 0 days 00:00:32.065634103 +341 10 0 days 00:00:28.606519318 +341 11 0 days 00:01:02.281713931 +342 1 0 days 00:00:48.658650370 +342 2 0 days 00:00:14.785876000 +342 3 0 days 00:00:37.081846716 +342 4 0 days 00:00:17.129300256 +342 5 0 days 00:00:41.738633637 +342 6 0 days 00:00:20.293910533 +342 7 0 days 00:00:28.731310714 +342 8 0 days 00:00:19.964682178 +342 9 0 days 00:00:36.380292530 +342 10 0 days 00:00:52.390816100 +342 11 0 days 00:00:41.831258547 +342 12 0 days 00:00:29.034806548 +342 13 0 days 00:01:17.018105500 +342 14 0 days 00:01:05.284262500 +342 15 0 days 00:00:39.531854655 +342 16 0 days 00:01:09.218621758 +342 17 0 days 00:00:51.316920740 +343 1 0 days 00:00:52.094311612 +343 2 0 days 00:00:34.739288966 +343 3 0 days 00:00:47.498813156 +343 4 0 days 00:00:55.566211220 +343 5 0 days 00:00:34.064831037 +343 6 0 days 00:01:12.349514468 +343 7 0 days 00:00:51.883791413 +343 8 0 days 00:00:30.385478377 +343 9 0 days 00:00:42.415296730 +344 1 0 days 00:00:54.954625842 +344 2 0 days 00:00:36.987723563 +344 3 0 days 00:00:51.000642952 +344 4 0 days 00:00:16.173192717 +344 5 0 days 00:00:33.384889228 +344 6 0 days 00:00:31.939700820 +344 7 0 days 00:01:10.645659588 +344 8 0 days 00:00:27.557003171 +344 9 0 days 00:01:12.467739560 +344 10 0 days 00:00:22.739123227 +344 11 0 days 00:00:17.256139903 +344 12 0 days 00:00:26.368870333 +344 13 0 days 00:01:06.274195225 +345 1 0 days 00:01:03.999088983 +345 2 0 days 00:00:53.565117194 +345 3 0 days 00:00:34.045046887 +345 4 0 days 00:00:26.648135374 +345 5 0 days 00:01:28.496346676 +345 6 0 days 00:00:53.998418350 +345 7 0 days 00:01:54.659831950 +345 8 0 days 00:00:58.747322615 +345 9 0 days 00:00:37.994782711 +345 10 0 days 00:01:13.405183043 +346 1 0 days 00:00:23.761694033 +346 2 0 days 00:00:27.740653922 +346 3 0 days 00:00:24.244698400 +346 4 0 days 00:00:15.148394329 +346 5 0 days 00:00:17.143323925 +346 6 0 days 00:00:20.215262843 +346 7 0 days 00:01:00.595116176 +346 8 0 days 00:00:18.272835100 +346 9 0 days 00:00:28.438757215 +346 10 0 days 00:00:25.069009030 +346 11 0 days 00:00:14.940676337 +346 12 0 days 00:00:28.863799100 +346 13 0 days 00:00:32.181736481 +346 14 0 days 00:00:12.151847775 +346 15 0 days 00:00:26.729236943 +346 16 0 days 00:00:44.539948833 +346 17 0 days 00:00:25.758807669 +346 18 0 days 00:00:51.570881912 +346 19 0 days 00:00:13.606717100 +346 20 0 days 00:00:27.391397476 +346 21 0 days 00:00:41.028877260 +346 22 0 days 00:00:26.009278553 +346 23 0 days 00:00:16.263163709 +347 1 0 days 00:00:11.862413715 +347 2 0 days 00:00:12.321109286 +347 3 0 days 00:00:40.243186964 +347 4 0 days 00:00:34.867429625 +347 5 0 days 00:00:23.627103250 +347 6 0 days 00:00:13.283318205 +347 7 0 days 00:00:13.206680234 +347 8 0 days 00:00:37.789353415 +347 9 0 days 00:00:10.898499685 +347 10 0 days 00:00:17.856114950 +347 11 0 days 00:00:35.206433000 +347 12 0 days 00:00:40.252808813 +347 13 0 days 00:00:41.372612620 +347 14 0 days 00:00:18.911482875 +347 15 0 days 00:00:40.387009845 +348 1 0 days 00:00:12.522824380 +348 2 0 days 00:00:06.637335760 +348 3 0 days 00:00:10.841809045 +348 4 0 days 00:00:09.775775755 +348 5 0 days 00:00:12.039624093 +348 6 0 days 00:00:17.103960455 +348 7 0 days 00:00:18.649953505 +348 8 0 days 00:00:15.908701260 +348 9 0 days 00:00:17.770516644 +348 10 0 days 00:00:10.575094495 +348 11 0 days 00:00:05.422149491 +348 12 0 days 00:00:09.778224700 +348 13 0 days 00:00:08.625854546 +348 14 0 days 00:00:10.022189720 +348 15 0 days 00:00:08.819383624 +348 16 0 days 00:00:16.036212465 +348 17 0 days 00:00:09.746135530 +348 18 0 days 00:00:18.350110590 +348 19 0 days 00:00:17.675356010 +348 20 0 days 00:00:06.285610023 +348 21 0 days 00:00:16.895691395 +348 22 0 days 00:00:06.102435264 +348 23 0 days 00:00:10.405274947 +348 24 0 days 00:00:18.767165632 +348 25 0 days 00:00:06.527947871 +348 26 0 days 00:00:19.812859366 +348 27 0 days 00:00:06.405995900 +348 28 0 days 00:00:06.226374884 +348 29 0 days 00:00:09.675483635 +348 30 0 days 00:00:10.115930152 +348 31 0 days 00:00:07.430443812 +348 32 0 days 00:00:10.684222690 +349 1 0 days 00:00:10.024067353 +349 2 0 days 00:00:27.869638693 +349 3 0 days 00:00:15.438656886 +349 4 0 days 00:00:09.730117340 +349 5 0 days 00:00:09.766812040 +349 6 0 days 00:00:27.582678033 +349 7 0 days 00:00:13.218229113 +349 8 0 days 00:00:18.435414873 +349 9 0 days 00:00:28.518766640 +349 10 0 days 00:00:10.289748180 +349 11 0 days 00:00:28.680730233 +349 12 0 days 00:00:27.908721706 +349 13 0 days 00:00:13.516125446 +349 14 0 days 00:00:28.641204353 +349 15 0 days 00:00:16.125353420 +349 16 0 days 00:00:11.168929466 +349 17 0 days 00:00:16.684386426 +349 18 0 days 00:00:29.010454266 +349 19 0 days 00:00:10.124641466 +349 20 0 days 00:00:12.261037060 +349 21 0 days 00:00:28.679393086 +349 22 0 days 00:00:10.707546715 +349 23 0 days 00:00:16.165788946 +349 24 0 days 00:00:28.702114413 +349 25 0 days 00:00:16.281404806 +349 26 0 days 00:00:16.067137300 +349 27 0 days 00:00:28.646869766 +349 28 0 days 00:00:09.738565680 +349 29 0 days 00:00:11.812740706 +349 30 0 days 00:00:17.416982835 +349 31 0 days 00:00:09.609969093 +349 32 0 days 00:00:09.906981033 +350 1 0 days 00:00:11.097257585 +350 2 0 days 00:00:35.248607368 +350 3 0 days 00:00:18.499184150 +350 4 0 days 00:00:11.999358040 +350 5 0 days 00:00:18.435601495 +350 6 0 days 00:00:11.442642525 +350 7 0 days 00:00:32.889142845 +350 8 0 days 00:00:14.505086370 +350 9 0 days 00:00:11.738341364 +350 10 0 days 00:00:13.909192900 +350 11 0 days 00:00:32.877214035 +350 12 0 days 00:00:34.892872932 +350 13 0 days 00:00:15.551005210 +350 14 0 days 00:00:10.872845905 +350 15 0 days 00:00:11.550994344 +350 16 0 days 00:00:20.556848260 +350 17 0 days 00:00:19.649450312 +350 18 0 days 00:00:11.809556128 +350 19 0 days 00:00:18.137646230 +350 20 0 days 00:00:18.515702225 +350 21 0 days 00:00:19.799666040 +351 1 0 days 00:00:14.175362293 +351 2 0 days 00:00:08.095279686 +351 3 0 days 00:00:05.115559733 +351 4 0 days 00:00:08.367020713 +351 5 0 days 00:00:14.192751653 +351 6 0 days 00:00:14.181532593 +351 7 0 days 00:00:14.500860006 +351 8 0 days 00:00:08.500886933 +351 9 0 days 00:00:06.805527266 +351 10 0 days 00:00:04.974782306 +351 11 0 days 00:00:08.199347540 +351 12 0 days 00:00:05.202026820 +351 13 0 days 00:00:14.505840006 +351 14 0 days 00:00:07.817347033 +351 15 0 days 00:00:14.510652593 +351 16 0 days 00:00:14.501243806 +351 17 0 days 00:00:06.564409093 +351 18 0 days 00:00:05.842830533 +351 19 0 days 00:00:07.362091380 +351 20 0 days 00:00:08.117824426 +351 21 0 days 00:00:05.699083606 +351 22 0 days 00:00:14.577305700 +351 23 0 days 00:00:15.419227440 +351 24 0 days 00:00:14.517493260 +351 25 0 days 00:00:14.365539673 +351 26 0 days 00:00:14.384525860 +351 27 0 days 00:00:08.168765073 +351 28 0 days 00:00:04.974381680 +351 29 0 days 00:00:05.111011966 +351 30 0 days 00:00:11.839745166 +351 31 0 days 00:00:14.495136960 +351 32 0 days 00:00:07.252315380 +351 33 0 days 00:00:05.102297593 +351 34 0 days 00:00:06.180612566 +351 35 0 days 00:00:14.196627060 +351 36 0 days 00:00:08.168212400 +351 37 0 days 00:00:14.340421886 +351 38 0 days 00:00:05.104793573 +351 39 0 days 00:00:05.555474473 +351 40 0 days 00:00:14.415651940 +351 41 0 days 00:00:08.106739226 +351 42 0 days 00:00:14.501521260 +351 43 0 days 00:00:14.319273173 +351 44 0 days 00:00:05.631470246 +351 45 0 days 00:00:08.190325093 +351 46 0 days 00:00:07.986091966 +351 47 0 days 00:00:14.364349200 +351 48 0 days 00:00:14.043723580 +351 49 0 days 00:00:04.726827295 +351 50 0 days 00:00:06.146835873 +351 51 0 days 00:00:08.081058986 +351 52 0 days 00:00:14.354665233 +351 53 0 days 00:00:14.164969900 +351 54 0 days 00:00:08.085694966 +351 55 0 days 00:00:08.874644380 +351 56 0 days 00:00:05.009101340 +351 57 0 days 00:00:08.513557620 +351 58 0 days 00:00:08.143642860 +351 59 0 days 00:00:05.396742120 +352 1 0 days 00:00:09.351127140 +352 2 0 days 00:00:09.671623805 +352 3 0 days 00:00:16.359945770 +352 4 0 days 00:00:06.489833830 +352 5 0 days 00:00:06.958013820 +352 6 0 days 00:00:06.100425020 +352 7 0 days 00:00:08.378102002 +352 8 0 days 00:00:06.008650076 +352 9 0 days 00:00:18.666868177 +352 10 0 days 00:00:12.477010220 +352 11 0 days 00:00:09.267643750 +352 12 0 days 00:00:06.075490660 +352 13 0 days 00:00:16.567067355 +352 14 0 days 00:00:15.095149116 +352 15 0 days 00:00:10.149556430 +352 16 0 days 00:00:07.331316132 +352 17 0 days 00:00:06.110940200 +352 18 0 days 00:00:09.127638175 +352 19 0 days 00:00:09.213542210 +352 20 0 days 00:00:05.814189612 +352 21 0 days 00:00:09.672551635 +352 22 0 days 00:00:09.228864905 +352 23 0 days 00:00:11.527933570 +352 24 0 days 00:00:10.406434486 +352 25 0 days 00:00:07.578169950 +352 26 0 days 00:00:07.739888763 +352 27 0 days 00:00:09.512774290 +352 28 0 days 00:00:11.163837345 +352 29 0 days 00:00:06.100515936 +352 30 0 days 00:00:06.124310880 +352 31 0 days 00:00:18.374962020 +352 32 0 days 00:00:16.503016895 +352 33 0 days 00:00:18.262505340 +352 34 0 days 00:00:16.386398650 +353 1 0 days 00:00:19.117618570 +353 2 0 days 00:00:20.382989116 +353 3 0 days 00:00:11.877470140 +353 4 0 days 00:00:21.259594100 +353 5 0 days 00:00:18.480545975 +353 6 0 days 00:00:18.981615155 +353 7 0 days 00:00:12.377798540 +353 8 0 days 00:00:33.042926245 +353 9 0 days 00:00:33.284268390 +353 10 0 days 00:00:36.893276666 +353 11 0 days 00:00:11.142698205 +353 12 0 days 00:00:18.662555565 +353 13 0 days 00:00:20.108411510 +353 14 0 days 00:00:12.377533004 +353 15 0 days 00:00:35.112309392 +353 16 0 days 00:00:14.185956910 +353 17 0 days 00:00:33.372160430 +353 18 0 days 00:00:35.813444040 +354 1 0 days 00:00:08.404972346 +354 2 0 days 00:00:10.178211870 +354 3 0 days 00:00:06.242885460 +354 4 0 days 00:00:09.537540315 +354 5 0 days 00:00:16.879042195 +354 6 0 days 00:00:06.306261116 +354 7 0 days 00:00:06.218438860 +354 8 0 days 00:00:09.338266815 +354 9 0 days 00:00:06.358840380 +354 10 0 days 00:00:05.627794637 +354 11 0 days 00:00:06.237246872 +354 12 0 days 00:00:06.383574640 +354 13 0 days 00:00:06.468245252 +354 14 0 days 00:00:16.439594210 +354 15 0 days 00:00:06.118353032 +354 16 0 days 00:00:08.484653226 +354 17 0 days 00:00:08.619203352 +354 18 0 days 00:00:06.056628836 +354 19 0 days 00:00:16.954919265 +354 20 0 days 00:00:17.617932772 +354 21 0 days 00:00:16.728349315 +354 22 0 days 00:00:08.021938110 +354 23 0 days 00:00:09.784434890 +354 24 0 days 00:00:18.781187864 +354 25 0 days 00:00:06.453790540 +354 26 0 days 00:00:06.027836600 +354 27 0 days 00:00:17.474352448 +354 28 0 days 00:00:06.388082890 +354 29 0 days 00:00:17.610322912 +354 30 0 days 00:00:16.739933910 +354 31 0 days 00:00:09.901860515 +354 32 0 days 00:00:10.799244333 +354 33 0 days 00:00:16.924941735 +354 34 0 days 00:00:06.173862392 +354 35 0 days 00:00:06.181243352 +354 36 0 days 00:00:09.507751710 +355 1 0 days 00:01:02.065305786 +355 2 0 days 00:01:02.050713053 +355 3 0 days 00:01:03.880893486 +355 4 0 days 00:00:36.920376773 +355 5 0 days 00:00:36.915280526 +355 6 0 days 00:00:35.679959160 +355 7 0 days 00:00:35.682251800 +355 8 0 days 00:00:35.089342806 +355 9 0 days 00:00:35.667008060 +355 10 0 days 00:00:35.088252840 +355 11 0 days 00:01:02.025679953 +355 12 0 days 00:00:35.060514220 +355 13 0 days 00:00:35.661006400 +355 14 0 days 00:01:58.241650400 +356 1 0 days 00:00:43.632015983 +356 2 0 days 00:02:12.592011440 +356 3 0 days 00:01:17.090128426 +356 4 0 days 00:01:21.964432762 +357 1 0 days 00:01:13.076208333 +357 2 0 days 00:00:39.929304820 +357 3 0 days 00:01:13.079587380 +357 4 0 days 00:00:23.093884760 +357 5 0 days 00:00:38.808748153 +357 6 0 days 00:00:38.811798360 +357 7 0 days 00:01:13.839532746 +357 8 0 days 00:01:13.079907233 +357 9 0 days 00:00:38.800646813 +357 10 0 days 00:00:39.937959326 +357 11 0 days 00:01:13.833135406 +358 1 0 days 00:00:50.408175300 +358 2 0 days 00:01:12.974494393 +358 3 0 days 00:00:47.679205216 +358 4 0 days 00:01:32.842298340 +358 5 0 days 00:01:33.092060248 +359 1 0 days 00:02:19.019608452 +359 2 0 days 00:01:17.410964306 +359 3 0 days 00:02:19.022568908 +360 1 0 days 00:00:27.100953343 +360 2 0 days 00:00:46.524949660 +360 3 0 days 00:01:27.756157620 +360 4 0 days 00:01:32.642157757 +360 5 0 days 00:00:44.549102115 +360 6 0 days 00:00:44.528195170 +361 1 0 days 00:00:00.152333633 +361 2 0 days 00:00:00.176993480 +361 3 0 days 00:00:00.142426180 +361 4 0 days 00:00:00.129465653 +361 5 0 days 00:00:00.135689546 +361 6 0 days 00:00:00.128307020 +361 7 0 days 00:00:00.192113366 +361 8 0 days 00:00:00.188247613 +361 9 0 days 00:00:00.186341546 +361 10 0 days 00:00:00.184234560 +361 11 0 days 00:00:00.190796400 +361 12 0 days 00:00:00.188450873 +361 13 0 days 00:00:00.147843700 +361 14 0 days 00:00:00.118043773 +361 15 0 days 00:00:00.135735760 +361 16 0 days 00:00:00.184012913 +361 17 0 days 00:00:00.189606640 +361 18 0 days 00:00:00.186248646 +361 19 0 days 00:00:00.119425480 +361 20 0 days 00:00:00.140717386 +361 21 0 days 00:00:00.145880526 +361 22 0 days 00:00:00.129437426 +361 23 0 days 00:00:00.146271413 +361 24 0 days 00:00:00.144235413 +361 25 0 days 00:00:00.130400620 +361 26 0 days 00:00:00.180110146 +361 27 0 days 00:00:00.179220726 +361 28 0 days 00:00:00.137119200 +361 29 0 days 00:00:00.182234213 +361 30 0 days 00:00:00.137433140 +361 31 0 days 00:00:00.132200953 +361 32 0 days 00:00:00.187140233 +361 33 0 days 00:00:00.115190246 +361 34 0 days 00:00:00.114783340 +361 35 0 days 00:00:00.179259726 +361 36 0 days 00:00:00.180567880 +361 37 0 days 00:00:00.183021440 +361 38 0 days 00:00:00.132168373 +361 39 0 days 00:00:00.112451206 +361 40 0 days 00:00:00.182891300 +361 41 0 days 00:00:00.128535493 +361 42 0 days 00:00:00.117935380 +361 43 0 days 00:00:00.107800886 +361 44 0 days 00:00:00.179374680 +361 45 0 days 00:00:00.132808346 +361 46 0 days 00:00:00.150717100 +361 47 0 days 00:00:00.116826333 +361 48 0 days 00:00:00.182124766 +361 49 0 days 00:00:00.115934546 +361 50 0 days 00:00:00.132000606 +361 51 0 days 00:00:00.190810193 +361 52 0 days 00:00:00.194768320 +361 53 0 days 00:00:00.148805800 +361 54 0 days 00:00:00.135079146 +361 55 0 days 00:00:00.190302026 +361 56 0 days 00:00:00.142578860 +361 57 0 days 00:00:00.120700593 +361 58 0 days 00:00:00.183640466 +361 59 0 days 00:00:00.138274360 +361 60 0 days 00:00:00.150566353 +361 61 0 days 00:00:00.142424380 +361 62 0 days 00:00:00.184994013 +361 63 0 days 00:00:00.129518973 +361 64 0 days 00:00:00.178758800 +361 65 0 days 00:00:00.111369960 +361 66 0 days 00:00:00.111244866 +361 67 0 days 00:00:00.176195600 +361 68 0 days 00:00:00.128429506 +361 69 0 days 00:00:00.187301186 +361 70 0 days 00:00:00.138923480 +361 71 0 days 00:00:00.196680200 +361 72 0 days 00:00:00.191115566 +361 73 0 days 00:00:00.138322286 +361 74 0 days 00:00:00.110396286 +361 75 0 days 00:00:00.132235093 +361 76 0 days 00:00:00.137786300 +361 77 0 days 00:00:00.120317480 +361 78 0 days 00:00:00.133748786 +361 79 0 days 00:00:00.180189920 +361 80 0 days 00:00:00.182598086 +361 81 0 days 00:00:00.111056053 +361 82 0 days 00:00:00.135907360 +361 83 0 days 00:00:00.136366006 +361 84 0 days 00:00:00.131147393 +361 85 0 days 00:00:00.119325526 +361 86 0 days 00:00:00.150834780 +361 87 0 days 00:00:00.138086866 +361 88 0 days 00:00:00.144293373 +361 89 0 days 00:00:00.133537293 +361 90 0 days 00:00:00.117074793 +361 91 0 days 00:00:00.194613320 +361 92 0 days 00:00:00.207301026 +361 93 0 days 00:00:00.138688240 +361 94 0 days 00:00:00.192539893 +361 95 0 days 00:00:00.118639106 +361 96 0 days 00:00:00.111248860 +361 97 0 days 00:00:00.133352133 +361 98 0 days 00:00:00.148002826 +361 99 0 days 00:00:00.119099053 +361 100 0 days 00:00:00.131557480 +362 1 0 days 00:00:00.121963435 +362 5 0 days 00:00:00.176868471 +362 7 0 days 00:00:00.139337036 +362 8 0 days 00:00:00.256546458 +362 9 0 days 00:00:00.143356630 +362 10 0 days 00:00:00.162798020 +362 11 0 days 00:00:00.122361400 +362 12 0 days 00:00:00.150312630 +362 13 0 days 00:00:00.135246544 +362 14 0 days 00:00:00.254814303 +362 15 0 days 00:00:00.242647747 +362 16 0 days 00:00:00.247307416 +362 17 0 days 00:00:00.251094698 +362 19 0 days 00:00:00.145501824 +362 20 0 days 00:00:00.189046565 +362 21 0 days 00:00:00.140023486 +362 22 0 days 00:00:00.207106920 +362 24 0 days 00:00:00.153975416 +362 25 0 days 00:00:00.122760853 +362 26 0 days 00:00:00.134098473 +362 27 0 days 00:00:00.147441120 +362 28 0 days 00:00:00.247451594 +362 29 0 days 00:00:00.179866253 +362 30 0 days 00:00:00.170231340 +362 32 0 days 00:00:00.189168326 +362 34 0 days 00:00:00.181834957 +362 36 0 days 00:00:00.151970864 +362 37 0 days 00:00:00.171538775 +362 38 0 days 00:00:00.254796777 +362 39 0 days 00:00:00.207396540 +362 41 0 days 00:00:00.256122043 +362 45 0 days 00:00:00.187065980 +362 48 0 days 00:00:00.265115006 +362 49 0 days 00:00:00.169936256 +362 54 0 days 00:00:00.253148747 +362 55 0 days 00:00:00.164539970 +362 56 0 days 00:00:00.183448365 +362 58 0 days 00:00:00.189159746 +362 60 0 days 00:00:00.261802942 +362 62 0 days 00:00:00.184145946 +362 63 0 days 00:00:00.170839684 +362 65 0 days 00:00:00.148764770 +362 66 0 days 00:00:00.138628760 +362 67 0 days 00:00:00.170124632 +362 68 0 days 00:00:00.262757858 +362 69 0 days 00:00:00.187788620 +362 70 0 days 00:00:00.116411146 +362 72 0 days 00:00:00.184019995 +362 73 0 days 00:00:00.158876550 +362 75 0 days 00:00:00.191330653 +362 76 0 days 00:00:00.216108136 +362 77 0 days 00:00:00.144833395 +362 80 0 days 00:00:00.132648033 +362 81 0 days 00:00:00.145465130 +362 82 0 days 00:00:00.150870640 +362 83 0 days 00:00:00.154046876 +362 84 0 days 00:00:00.125461900 +362 86 0 days 00:00:00.166127140 +362 87 0 days 00:00:00.163708088 +362 88 0 days 00:00:00.149910425 +362 90 0 days 00:00:00.142397656 +362 91 0 days 00:00:00.164155370 +362 92 0 days 00:00:00.203508360 +362 93 0 days 00:00:00.142663426 +362 95 0 days 00:00:00.118346840 +362 96 0 days 00:00:00.135170600 +362 98 0 days 00:00:00.193712000 +362 99 0 days 00:00:00.251422453 +363 1 0 days 00:00:00.077207133 +363 2 0 days 00:00:00.076755900 +363 3 0 days 00:00:00.069565440 +363 4 0 days 00:00:00.105178086 +363 5 0 days 00:00:00.081739100 +363 6 0 days 00:00:00.067763893 +363 7 0 days 00:00:00.110605766 +363 8 0 days 00:00:00.108252860 +363 9 0 days 00:00:00.067084273 +363 10 0 days 00:00:00.067380840 +363 11 0 days 00:00:00.064634373 +363 12 0 days 00:00:00.064154220 +363 13 0 days 00:00:00.108335400 +363 14 0 days 00:00:00.100918866 +363 15 0 days 00:00:00.078660300 +363 16 0 days 00:00:00.104517326 +363 17 0 days 00:00:00.065028286 +363 18 0 days 00:00:00.080371366 +363 19 0 days 00:00:00.106252280 +363 20 0 days 00:00:00.084715400 +363 21 0 days 00:00:00.110767273 +363 22 0 days 00:00:00.106859553 +363 23 0 days 00:00:00.067898446 +363 24 0 days 00:00:00.076155026 +363 25 0 days 00:00:00.080190993 +363 26 0 days 00:00:00.078439446 +363 27 0 days 00:00:00.070355340 +363 28 0 days 00:00:00.067264586 +363 29 0 days 00:00:00.064955760 +363 30 0 days 00:00:00.082183093 +363 31 0 days 00:00:00.106271060 +363 32 0 days 00:00:00.083932960 +363 33 0 days 00:00:00.079507093 +363 34 0 days 00:00:00.084659666 +363 35 0 days 00:00:00.070619473 +363 36 0 days 00:00:00.069707813 +363 37 0 days 00:00:00.076555513 +363 38 0 days 00:00:00.074014500 +363 39 0 days 00:00:00.115187720 +363 40 0 days 00:00:00.113341213 +363 41 0 days 00:00:00.079505026 +363 42 0 days 00:00:00.085897500 +363 43 0 days 00:00:00.075172500 +363 44 0 days 00:00:00.078900900 +363 45 0 days 00:00:00.109645520 +363 46 0 days 00:00:00.077494846 +363 47 0 days 00:00:00.076458613 +363 48 0 days 00:00:00.082541073 +363 49 0 days 00:00:00.067382200 +363 50 0 days 00:00:00.069771626 +363 51 0 days 00:00:00.080162446 +363 52 0 days 00:00:00.082050433 +363 53 0 days 00:00:00.102538513 +363 54 0 days 00:00:00.075680020 +363 55 0 days 00:00:00.069429693 +363 56 0 days 00:00:00.082123593 +363 57 0 days 00:00:00.108175006 +363 58 0 days 00:00:00.076806740 +363 59 0 days 00:00:00.107445253 +363 60 0 days 00:00:00.107186066 +363 61 0 days 00:00:00.067066386 +363 62 0 days 00:00:00.069246586 +363 63 0 days 00:00:00.081309193 +363 64 0 days 00:00:00.112880553 +363 65 0 days 00:00:00.084213180 +363 66 0 days 00:00:00.072935966 +363 67 0 days 00:00:00.082034986 +363 68 0 days 00:00:00.081683260 +363 69 0 days 00:00:00.107799366 +363 70 0 days 00:00:00.107922600 +363 71 0 days 00:00:00.075038513 +363 72 0 days 00:00:00.082651706 +363 73 0 days 00:00:00.070369580 +363 74 0 days 00:00:00.106368820 +363 75 0 days 00:00:00.076146600 +363 76 0 days 00:00:00.082180340 +363 77 0 days 00:00:00.070046106 +363 78 0 days 00:00:00.081923220 +363 79 0 days 00:00:00.065618606 +363 80 0 days 00:00:00.071427746 +363 81 0 days 00:00:00.082068460 +363 82 0 days 00:00:00.082288146 +363 83 0 days 00:00:00.080540820 +363 84 0 days 00:00:00.076208373 +363 85 0 days 00:00:00.072603113 +363 86 0 days 00:00:00.078395253 +363 87 0 days 00:00:00.081488600 +363 88 0 days 00:00:00.079169873 +363 89 0 days 00:00:00.067904373 +363 90 0 days 00:00:00.105018780 +363 91 0 days 00:00:00.104485053 +363 92 0 days 00:00:00.067905600 +363 93 0 days 00:00:00.105425966 +363 94 0 days 00:00:00.108544480 +363 95 0 days 00:00:00.079239660 +363 96 0 days 00:00:00.078961080 +363 97 0 days 00:00:00.066073420 +363 98 0 days 00:00:00.107941426 +363 99 0 days 00:00:00.105345006 +363 100 0 days 00:00:00.106663626 +364 2 0 days 00:00:00.092834191 +364 3 0 days 00:00:00.079789846 +364 4 0 days 00:00:00.082120580 +364 5 0 days 00:00:00.075586610 +364 6 0 days 00:00:00.071308015 +364 11 0 days 00:00:00.095909503 +364 12 0 days 00:00:00.080786865 +364 14 0 days 00:00:00.084788145 +364 15 0 days 00:00:00.073962972 +364 17 0 days 00:00:00.097623320 +364 19 0 days 00:00:00.076394133 +364 20 0 days 00:00:00.088643896 +364 21 0 days 00:00:00.065132533 +364 23 0 days 00:00:00.129768570 +364 24 0 days 00:00:00.087050720 +364 25 0 days 00:00:00.085956000 +364 27 0 days 00:00:00.080287204 +364 29 0 days 00:00:00.088972930 +364 30 0 days 00:00:00.085083200 +364 31 0 days 00:00:00.079097200 +364 32 0 days 00:00:00.071051360 +364 33 0 days 00:00:00.133867198 +364 34 0 days 00:00:00.082031225 +364 35 0 days 00:00:00.083347335 +364 36 0 days 00:00:00.096063483 +364 37 0 days 00:00:00.075787780 +364 38 0 days 00:00:00.079330763 +364 39 0 days 00:00:00.103841512 +364 41 0 days 00:00:00.110227910 +364 42 0 days 00:00:00.079488273 +364 43 0 days 00:00:00.103688493 +364 44 0 days 00:00:00.115914480 +364 45 0 days 00:00:00.097653265 +364 46 0 days 00:00:00.098946274 +364 47 0 days 00:00:00.111203430 +364 48 0 days 00:00:00.067345973 +364 51 0 days 00:00:00.069058855 +364 52 0 days 00:00:00.076672480 +364 53 0 days 00:00:00.066570065 +364 55 0 days 00:00:00.078685866 +364 56 0 days 00:00:00.086924617 +364 58 0 days 00:00:00.107571840 +364 59 0 days 00:00:00.069460568 +364 60 0 days 00:00:00.078804068 +364 61 0 days 00:00:00.099793593 +364 62 0 days 00:00:00.093727336 +364 63 0 days 00:00:00.106078073 +364 64 0 days 00:00:00.064295326 +364 66 0 days 00:00:00.074893640 +364 67 0 days 00:00:00.075333940 +364 69 0 days 00:00:00.084954084 +364 70 0 days 00:00:00.083152836 +364 71 0 days 00:00:00.077839020 +364 72 0 days 00:00:00.085927525 +364 74 0 days 00:00:00.104750400 +364 76 0 days 00:00:00.089035032 +364 77 0 days 00:00:00.090070665 +364 78 0 days 00:00:00.070091756 +364 79 0 days 00:00:00.073474080 +364 80 0 days 00:00:00.076794033 +364 81 0 days 00:00:00.075885217 +364 82 0 days 00:00:00.088749962 +364 83 0 days 00:00:00.091754485 +364 84 0 days 00:00:00.090645093 +364 85 0 days 00:00:00.074826612 +364 86 0 days 00:00:00.080494820 +364 87 0 days 00:00:00.071710284 +364 89 0 days 00:00:00.101777113 +364 90 0 days 00:00:00.094075520 +364 91 0 days 00:00:00.087816888 +364 92 0 days 00:00:00.097803896 +364 93 0 days 00:00:00.082794185 +364 94 0 days 00:00:00.069447280 +364 96 0 days 00:00:00.067801646 +364 97 0 days 00:00:00.113404165 +364 98 0 days 00:00:00.081161460 +364 99 0 days 00:00:00.076348333 +364 100 0 days 00:00:00.133914047 +365 1 0 days 00:00:00.139562040 +365 2 0 days 00:00:00.143534332 +365 3 0 days 00:00:00.147987838 +365 4 0 days 00:00:00.145023572 +365 5 0 days 00:00:00.164932894 +365 6 0 days 00:00:00.141796657 +365 7 0 days 00:00:00.228260632 +365 8 0 days 00:00:00.172422394 +365 9 0 days 00:00:00.242119438 +365 10 0 days 00:00:00.186045972 +365 11 0 days 00:00:00.233296894 +365 12 0 days 00:00:00.142114940 +365 13 0 days 00:00:00.141244205 +365 14 0 days 00:00:00.183031493 +365 15 0 days 00:00:00.230589896 +365 16 0 days 00:00:00.140781728 +365 17 0 days 00:00:00.229696822 +365 18 0 days 00:00:00.161438757 +365 19 0 days 00:00:00.182518003 +365 20 0 days 00:00:00.237990721 +365 21 0 days 00:00:00.144508467 +365 22 0 days 00:00:00.167724384 +365 23 0 days 00:00:00.147130284 +365 24 0 days 00:00:00.239207909 +365 25 0 days 00:00:00.235250081 +365 26 0 days 00:00:00.162435070 +365 27 0 days 00:00:00.183271840 +365 28 0 days 00:00:00.184684006 +365 30 0 days 00:00:00.159247290 +365 31 0 days 00:00:00.147382866 +365 32 0 days 00:00:00.235637308 +365 33 0 days 00:00:00.179230571 +365 34 0 days 00:00:00.235364682 +365 35 0 days 00:00:00.173183465 +365 36 0 days 00:00:00.143601555 +365 37 0 days 00:00:00.225343466 +365 38 0 days 00:00:00.168774460 +365 39 0 days 00:00:00.175124055 +365 40 0 days 00:00:00.234619370 +365 41 0 days 00:00:00.174312285 +365 42 0 days 00:00:00.233291561 +365 43 0 days 00:00:00.162051935 +365 44 0 days 00:00:00.178730425 +365 45 0 days 00:00:00.156682182 +365 46 0 days 00:00:00.171847661 +365 47 0 days 00:00:00.146497850 +365 48 0 days 00:00:00.165392750 +365 49 0 days 00:00:00.142051430 +365 50 0 days 00:00:00.181266488 +365 51 0 days 00:00:00.145136801 +365 52 0 days 00:00:00.165040935 +365 53 0 days 00:00:00.166830370 +365 54 0 days 00:00:00.229854306 +365 55 0 days 00:00:00.177586243 +365 56 0 days 00:00:00.223821100 +365 57 0 days 00:00:00.226507697 +365 59 0 days 00:00:00.174267286 +365 60 0 days 00:00:00.159211360 +365 61 0 days 00:00:00.169055883 +365 62 0 days 00:00:00.225525910 +365 63 0 days 00:00:00.236154800 +365 64 0 days 00:00:00.165939060 +365 65 0 days 00:00:00.145081348 +365 66 0 days 00:00:00.173496402 +365 67 0 days 00:00:00.230319762 +365 68 0 days 00:00:00.235908340 +365 69 0 days 00:00:00.173496073 +365 70 0 days 00:00:00.221852377 +365 71 0 days 00:00:00.144395596 +365 72 0 days 00:00:00.147921080 +365 73 0 days 00:00:00.141209313 +365 74 0 days 00:00:00.232564929 +365 75 0 days 00:00:00.184383211 +365 76 0 days 00:00:00.244105848 +365 78 0 days 00:00:00.234367714 +365 79 0 days 00:00:00.232726969 +365 80 0 days 00:00:00.146380316 +365 81 0 days 00:00:00.174871353 +365 82 0 days 00:00:00.238480055 +365 83 0 days 00:00:00.237394009 +365 84 0 days 00:00:00.230836721 +365 85 0 days 00:00:00.172005992 +365 86 0 days 00:00:00.233111664 +365 87 0 days 00:00:00.186931346 +365 88 0 days 00:00:00.239977111 +365 89 0 days 00:00:00.175691290 +365 90 0 days 00:00:00.172669834 +365 91 0 days 00:00:00.174289065 +365 92 0 days 00:00:00.178189692 +365 93 0 days 00:00:00.145933088 +365 94 0 days 00:00:00.165474160 +365 95 0 days 00:00:00.172082444 +365 96 0 days 00:00:00.143591846 +365 97 0 days 00:00:00.171209971 +365 98 0 days 00:00:00.176703303 +365 99 0 days 00:00:00.187152112 +365 100 0 days 00:00:00.184730068 +366 1 0 days 00:00:00.076233050 +366 2 0 days 00:00:00.125368071 +366 6 0 days 00:00:00.132897614 +366 7 0 days 00:00:00.092221132 +366 8 0 days 00:00:00.092486451 +366 9 0 days 00:00:00.136826468 +366 10 0 days 00:00:00.084247450 +366 11 0 days 00:00:00.098810654 +366 12 0 days 00:00:00.095956240 +366 13 0 days 00:00:00.073733740 +366 14 0 days 00:00:00.090723985 +366 15 0 days 00:00:00.073961876 +366 16 0 days 00:00:00.100996981 +366 17 0 days 00:00:00.096535364 +366 18 0 days 00:00:00.091685003 +366 19 0 days 00:00:00.101844950 +366 20 0 days 00:00:00.126956842 +366 21 0 days 00:00:00.135417481 +366 22 0 days 00:00:00.130690492 +366 23 0 days 00:00:00.091005329 +366 24 0 days 00:00:00.066805570 +366 25 0 days 00:00:00.095878496 +366 26 0 days 00:00:00.095973916 +366 27 0 days 00:00:00.126292781 +366 28 0 days 00:00:00.094858665 +366 29 0 days 00:00:00.079673818 +366 30 0 days 00:00:00.078717955 +366 31 0 days 00:00:00.089663196 +366 32 0 days 00:00:00.091463185 +366 33 0 days 00:00:00.135417346 +366 34 0 days 00:00:00.079990904 +366 35 0 days 00:00:00.131197528 +366 36 0 days 00:00:00.126711052 +366 37 0 days 00:00:00.083319849 +366 38 0 days 00:00:00.100344170 +366 39 0 days 00:00:00.131239926 +366 40 0 days 00:00:00.082498385 +366 41 0 days 00:00:00.097906668 +366 42 0 days 00:00:00.099774773 +366 43 0 days 00:00:00.091586208 +366 44 0 days 00:00:00.082683570 +366 45 0 days 00:00:00.078560777 +366 46 0 days 00:00:00.073642775 +366 47 0 days 00:00:00.132033018 +366 48 0 days 00:00:00.096555918 +366 49 0 days 00:00:00.093905636 +366 50 0 days 00:00:00.081703586 +366 51 0 days 00:00:00.080895440 +366 52 0 days 00:00:00.097961086 +366 53 0 days 00:00:00.099567495 +366 54 0 days 00:00:00.125409507 +366 55 0 days 00:00:00.074045680 +366 56 0 days 00:00:00.100652813 +366 57 0 days 00:00:00.095650638 +366 58 0 days 00:00:00.081647937 +366 59 0 days 00:00:00.094215138 +366 60 0 days 00:00:00.082581222 +366 61 0 days 00:00:00.095897004 +366 62 0 days 00:00:00.093899053 +366 63 0 days 00:00:00.131106525 +366 65 0 days 00:00:00.101378595 +366 66 0 days 00:00:00.096710160 +366 67 0 days 00:00:00.095009310 +366 68 0 days 00:00:00.131200993 +366 69 0 days 00:00:00.091083583 +366 70 0 days 00:00:00.095644122 +366 71 0 days 00:00:00.093819275 +366 72 0 days 00:00:00.099299690 +366 73 0 days 00:00:00.090257700 +366 74 0 days 00:00:00.129145396 +366 75 0 days 00:00:00.098144405 +366 76 0 days 00:00:00.092894948 +366 77 0 days 00:00:00.095917335 +366 78 0 days 00:00:00.130050436 +366 79 0 days 00:00:00.124526280 +366 80 0 days 00:00:00.133618276 +366 81 0 days 00:00:00.131018441 +366 82 0 days 00:00:00.093986321 +366 83 0 days 00:00:00.131369560 +366 84 0 days 00:00:00.132499203 +366 85 0 days 00:00:00.127563890 +366 86 0 days 00:00:00.092600998 +366 87 0 days 00:00:00.089949008 +366 88 0 days 00:00:00.091314736 +366 89 0 days 00:00:00.078659201 +366 90 0 days 00:00:00.103676575 +366 91 0 days 00:00:00.078863403 +366 92 0 days 00:00:00.101827237 +366 93 0 days 00:00:00.130957666 +366 94 0 days 00:00:00.123666555 +366 95 0 days 00:00:00.096518734 +366 96 0 days 00:00:00.095986223 +366 97 0 days 00:00:00.070864290 +366 98 0 days 00:00:00.078324329 +366 99 0 days 00:00:00.072560125 +366 100 0 days 00:00:00.092631362 +367 1 0 days 00:00:01.196042966 +367 2 0 days 00:00:01.292783440 +367 3 0 days 00:00:00.740284860 +367 4 0 days 00:00:00.373182813 +367 5 0 days 00:00:00.402006160 +367 6 0 days 00:00:00.691076233 +367 7 0 days 00:00:01.265598440 +367 8 0 days 00:00:00.693501126 +367 9 0 days 00:00:00.685414233 +367 10 0 days 00:00:01.199246720 +367 11 0 days 00:00:00.369410653 +367 12 0 days 00:00:01.148897813 +367 13 0 days 00:00:01.167265933 +367 14 0 days 00:00:00.399121180 +367 15 0 days 00:00:00.348921173 +367 16 0 days 00:00:00.444777686 +367 17 0 days 00:00:01.249696720 +367 18 0 days 00:00:00.383857813 +367 19 0 days 00:00:00.440566666 +367 20 0 days 00:00:00.701250980 +367 21 0 days 00:00:00.404680113 +367 22 0 days 00:00:00.694051213 +367 23 0 days 00:00:01.231271293 +367 24 0 days 00:00:01.245809380 +367 25 0 days 00:00:00.652591566 +367 26 0 days 00:00:00.843616466 +367 27 0 days 00:00:00.638453086 +367 28 0 days 00:00:00.651422920 +367 29 0 days 00:00:00.642273993 +367 30 0 days 00:00:00.731919620 +367 31 0 days 00:00:00.442684840 +367 32 0 days 00:00:00.668129233 +367 33 0 days 00:00:01.232833053 +367 34 0 days 00:00:00.718842393 +367 35 0 days 00:00:01.268243573 +367 36 0 days 00:00:00.473927633 +367 37 0 days 00:00:00.404376126 +367 38 0 days 00:00:00.408298560 +367 39 0 days 00:00:00.638300153 +367 40 0 days 00:00:01.166052166 +367 41 0 days 00:00:01.187416920 +367 42 0 days 00:00:01.176816920 +367 43 0 days 00:00:00.381390326 +367 44 0 days 00:00:01.226897833 +367 45 0 days 00:00:01.212005733 +367 46 0 days 00:00:00.377821280 +367 47 0 days 00:00:01.223418920 +367 48 0 days 00:00:00.498654880 +367 49 0 days 00:00:00.402299126 +367 50 0 days 00:00:00.710078873 +367 51 0 days 00:00:01.322152793 +367 52 0 days 00:00:00.704609926 +367 53 0 days 00:00:00.409782386 +367 54 0 days 00:00:00.724686213 +367 55 0 days 00:00:00.423403280 +367 56 0 days 00:00:00.383494720 +367 57 0 days 00:00:01.368210146 +367 58 0 days 00:00:01.463783900 +367 59 0 days 00:00:00.852879626 +367 60 0 days 00:00:00.883129500 +367 61 0 days 00:00:01.448134826 +367 62 0 days 00:00:01.451097440 +367 63 0 days 00:00:00.575819446 +367 64 0 days 00:00:01.221874360 +367 65 0 days 00:00:00.709027846 +367 66 0 days 00:00:00.663617366 +367 67 0 days 00:00:00.624587633 +367 68 0 days 00:00:00.743682680 +367 69 0 days 00:00:00.795246973 +367 70 0 days 00:00:00.559113180 +367 71 0 days 00:00:00.768514326 +367 72 0 days 00:00:01.163469600 +367 73 0 days 00:00:00.453195526 +367 74 0 days 00:00:00.789297820 +367 75 0 days 00:00:00.351961633 +367 76 0 days 00:00:00.402275713 +367 77 0 days 00:00:00.604533780 +367 78 0 days 00:00:00.724313040 +367 79 0 days 00:00:01.287446733 +367 80 0 days 00:00:00.881308286 +367 81 0 days 00:00:00.397876353 +367 82 0 days 00:00:00.605696300 +367 83 0 days 00:00:00.536444693 +367 84 0 days 00:00:00.470821260 +367 85 0 days 00:00:00.697735933 +367 86 0 days 00:00:00.387735280 +367 87 0 days 00:00:01.196852013 +367 88 0 days 00:00:00.393432353 +367 89 0 days 00:00:01.199564433 +367 90 0 days 00:00:01.210446700 +367 91 0 days 00:00:00.647417806 +367 92 0 days 00:00:01.163543106 +367 93 0 days 00:00:00.627936280 +367 94 0 days 00:00:00.588391053 +367 95 0 days 00:00:01.274434613 +367 96 0 days 00:00:00.399780273 +367 97 0 days 00:00:01.235105946 +367 98 0 days 00:00:01.206750153 +367 99 0 days 00:00:00.427914053 +367 100 0 days 00:00:00.837799753 +368 1 0 days 00:00:00.975855391 +368 2 0 days 00:00:01.201442897 +368 4 0 days 00:00:01.631947886 +368 5 0 days 00:00:00.891138082 +368 7 0 days 00:00:01.673649700 +368 10 0 days 00:00:01.681222063 +368 11 0 days 00:00:00.872921746 +368 12 0 days 00:00:01.349851415 +368 13 0 days 00:00:01.529224864 +368 14 0 days 00:00:01.879278597 +368 15 0 days 00:00:00.402665833 +368 16 0 days 00:00:00.825867320 +368 17 0 days 00:00:00.737537220 +368 18 0 days 00:00:00.458744406 +368 19 0 days 00:00:01.382778960 +368 20 0 days 00:00:00.482462150 +368 21 0 days 00:00:00.568025995 +368 22 0 days 00:00:01.691991533 +368 23 0 days 00:00:01.672564771 +368 24 0 days 00:00:01.672101476 +368 25 0 days 00:00:00.503398006 +368 26 0 days 00:00:01.652069372 +368 28 0 days 00:00:01.623703533 +368 29 0 days 00:00:00.314570453 +368 30 0 days 00:00:00.493558521 +368 31 0 days 00:00:00.978730397 +368 32 0 days 00:00:01.121924426 +368 34 0 days 00:00:00.928139210 +368 35 0 days 00:00:00.933167368 +368 37 0 days 00:00:01.611537112 +368 38 0 days 00:00:00.589040260 +368 40 0 days 00:00:01.699567448 +368 41 0 days 00:00:00.389324433 +368 42 0 days 00:00:00.695111720 +368 43 0 days 00:00:00.882835686 +368 44 0 days 00:00:00.381126700 +368 45 0 days 00:00:00.437956328 +368 46 0 days 00:00:00.542226773 +368 47 0 days 00:00:00.356127646 +368 48 0 days 00:00:00.872266808 +368 49 0 days 00:00:01.623466344 +368 50 0 days 00:00:00.914863836 +368 51 0 days 00:00:00.591806195 +368 52 0 days 00:00:00.440351640 +368 53 0 days 00:00:01.658339880 +368 54 0 days 00:00:00.659604946 +368 56 0 days 00:00:00.517268205 +368 58 0 days 00:00:01.639956922 +368 59 0 days 00:00:00.822505140 +368 62 0 days 00:00:01.552530022 +368 63 0 days 00:00:01.689561968 +368 64 0 days 00:00:00.856469523 +368 65 0 days 00:00:01.554028800 +368 66 0 days 00:00:00.864231729 +368 67 0 days 00:00:01.040939316 +368 68 0 days 00:00:00.649912060 +368 69 0 days 00:00:00.701877648 +368 71 0 days 00:00:01.678284263 +368 72 0 days 00:00:01.815003150 +368 73 0 days 00:00:00.843057600 +368 74 0 days 00:00:01.629963954 +368 75 0 days 00:00:00.524876846 +368 77 0 days 00:00:00.922399595 +368 78 0 days 00:00:00.835662046 +368 80 0 days 00:00:01.163664074 +368 81 0 days 00:00:00.617442246 +368 82 0 days 00:00:01.818227525 +368 83 0 days 00:00:00.993262585 +368 84 0 days 00:00:00.537719769 +368 85 0 days 00:00:00.391093135 +368 86 0 days 00:00:00.900620247 +368 88 0 days 00:00:00.529465275 +368 89 0 days 00:00:00.750080200 +368 90 0 days 00:00:00.432796865 +368 92 0 days 00:00:01.798265113 +368 93 0 days 00:00:00.459181845 +368 94 0 days 00:00:01.734463400 +368 95 0 days 00:00:01.164740520 +368 96 0 days 00:00:01.750492590 +368 97 0 days 00:00:00.907586377 +368 98 0 days 00:00:00.422011760 +369 1 0 days 00:00:00.372056386 +369 2 0 days 00:00:00.403650100 +369 3 0 days 00:00:00.403972300 +369 4 0 days 00:00:00.768845120 +369 5 0 days 00:00:00.183344946 +369 6 0 days 00:00:00.567049413 +369 7 0 days 00:00:00.561568313 +369 8 0 days 00:00:00.313197320 +369 9 0 days 00:00:00.223716366 +369 10 0 days 00:00:00.603666353 +369 11 0 days 00:00:00.651335293 +369 12 0 days 00:00:00.646664453 +369 13 0 days 00:00:00.132813733 +369 14 0 days 00:00:00.660109146 +369 15 0 days 00:00:00.699002653 +369 16 0 days 00:00:00.685321666 +369 17 0 days 00:00:00.356823933 +369 18 0 days 00:00:00.195710420 +369 19 0 days 00:00:00.375005833 +369 20 0 days 00:00:00.349280226 +369 21 0 days 00:00:00.229897846 +369 22 0 days 00:00:00.191051306 +369 23 0 days 00:00:00.226198646 +369 24 0 days 00:00:00.766361460 +369 25 0 days 00:00:00.450618173 +369 26 0 days 00:00:00.217369993 +369 27 0 days 00:00:00.718971600 +369 28 0 days 00:00:00.403982786 +369 29 0 days 00:00:00.390648233 +369 30 0 days 00:00:00.410131853 +369 31 0 days 00:00:00.415083286 +369 32 0 days 00:00:00.232291086 +369 33 0 days 00:00:00.228697680 +369 34 0 days 00:00:00.371639853 +369 35 0 days 00:00:00.404887213 +369 36 0 days 00:00:00.646652820 +369 37 0 days 00:00:00.269643233 +369 38 0 days 00:00:00.556475486 +369 39 0 days 00:00:00.327292373 +369 40 0 days 00:00:00.601991900 +369 41 0 days 00:00:00.213260820 +369 42 0 days 00:00:00.192410620 +369 43 0 days 00:00:00.553611086 +369 44 0 days 00:00:00.303455020 +369 45 0 days 00:00:00.172630333 +369 46 0 days 00:00:00.181068026 +369 47 0 days 00:00:00.192672253 +369 48 0 days 00:00:00.565063313 +369 49 0 days 00:00:00.308990046 +369 50 0 days 00:00:00.320843346 +369 51 0 days 00:00:00.178523973 +369 52 0 days 00:00:00.354434046 +369 53 0 days 00:00:00.179840980 +369 54 0 days 00:00:00.579168053 +369 55 0 days 00:00:00.199335913 +369 56 0 days 00:00:00.593179266 +369 57 0 days 00:00:00.562837486 +369 58 0 days 00:00:00.190838446 +369 59 0 days 00:00:00.608207053 +369 60 0 days 00:00:00.307921113 +369 61 0 days 00:00:00.567749206 +369 62 0 days 00:00:00.176599140 +369 63 0 days 00:00:00.333033280 +369 64 0 days 00:00:00.564520373 +369 65 0 days 00:00:00.318855553 +369 66 0 days 00:00:00.185533113 +369 67 0 days 00:00:00.172307626 +369 68 0 days 00:00:00.209131026 +369 69 0 days 00:00:00.182572526 +369 70 0 days 00:00:00.198744140 +369 71 0 days 00:00:00.567951926 +369 72 0 days 00:00:00.355722373 +369 73 0 days 00:00:00.555323180 +369 74 0 days 00:00:00.331716253 +369 75 0 days 00:00:00.183017400 +369 76 0 days 00:00:00.565004660 +369 77 0 days 00:00:00.186541573 +369 78 0 days 00:00:00.402924433 +369 79 0 days 00:00:00.174067560 +369 80 0 days 00:00:00.183694133 +369 81 0 days 00:00:00.314465913 +369 82 0 days 00:00:00.329413020 +369 83 0 days 00:00:00.177864893 +369 84 0 days 00:00:00.199101720 +369 85 0 days 00:00:00.181178120 +369 86 0 days 00:00:00.190204086 +369 87 0 days 00:00:00.402714626 +369 88 0 days 00:00:00.172542453 +369 89 0 days 00:00:00.315103673 +369 90 0 days 00:00:00.319212326 +369 91 0 days 00:00:00.573115986 +369 92 0 days 00:00:00.184975040 +369 93 0 days 00:00:00.180334386 +369 94 0 days 00:00:00.557997346 +369 95 0 days 00:00:00.320755253 +369 96 0 days 00:00:00.306400086 +369 97 0 days 00:00:00.313571320 +369 98 0 days 00:00:00.528156026 +369 99 0 days 00:00:00.456252486 +369 100 0 days 00:00:00.503974433 +370 1 0 days 00:00:00.655324482 +370 2 0 days 00:00:00.572967440 +370 3 0 days 00:00:00.291241095 +370 4 0 days 00:00:00.185142495 +370 5 0 days 00:00:00.342025836 +370 6 0 days 00:00:00.597227032 +370 7 0 days 00:00:00.292215790 +370 8 0 days 00:00:00.326349828 +370 9 0 days 00:00:00.347425080 +370 11 0 days 00:00:00.634522706 +370 12 0 days 00:00:00.346045698 +370 13 0 days 00:00:00.331792632 +370 14 0 days 00:00:00.199820710 +370 15 0 days 00:00:00.390027636 +370 17 0 days 00:00:00.259408146 +370 18 0 days 00:00:00.159130453 +370 20 0 days 00:00:00.229541332 +370 21 0 days 00:00:00.329098757 +370 22 0 days 00:00:00.345327986 +370 24 0 days 00:00:00.598629240 +370 25 0 days 00:00:00.332428165 +370 27 0 days 00:00:00.579332880 +370 29 0 days 00:00:00.523906615 +370 31 0 days 00:00:00.620048104 +370 32 0 days 00:00:00.340230905 +370 33 0 days 00:00:00.213837500 +370 34 0 days 00:00:00.581237031 +370 35 0 days 00:00:00.233053916 +370 36 0 days 00:00:00.679964664 +370 37 0 days 00:00:00.295813250 +370 38 0 days 00:00:00.199165644 +370 39 0 days 00:00:00.322993306 +370 40 0 days 00:00:00.197414541 +370 41 0 days 00:00:00.426731532 +370 42 0 days 00:00:00.347093570 +370 43 0 days 00:00:00.659726531 +370 44 0 days 00:00:00.176779340 +370 45 0 days 00:00:00.651341326 +370 46 0 days 00:00:00.218283444 +370 47 0 days 00:00:00.667083950 +370 48 0 days 00:00:00.357975390 +370 51 0 days 00:00:00.256845546 +370 52 0 days 00:00:00.605337165 +370 55 0 days 00:00:00.230932364 +370 56 0 days 00:00:00.264865473 +370 57 0 days 00:00:00.621061924 +370 58 0 days 00:00:00.197197700 +370 59 0 days 00:00:00.566968786 +370 60 0 days 00:00:00.615833662 +370 61 0 days 00:00:00.634088200 +370 62 0 days 00:00:00.167121010 +370 63 0 days 00:00:00.260340493 +370 64 0 days 00:00:00.338019367 +370 65 0 days 00:00:00.649667581 +370 66 0 days 00:00:00.264471440 +370 67 0 days 00:00:00.634249933 +370 68 0 days 00:00:00.178036640 +370 69 0 days 00:00:00.178986963 +370 70 0 days 00:00:00.563253486 +370 71 0 days 00:00:00.618039446 +370 72 0 days 00:00:00.610049220 +370 73 0 days 00:00:00.596038577 +370 74 0 days 00:00:00.237330482 +370 75 0 days 00:00:00.236869726 +370 76 0 days 00:00:00.342566951 +370 77 0 days 00:00:00.448591113 +370 78 0 days 00:00:00.176436032 +370 79 0 days 00:00:00.634815720 +370 80 0 days 00:00:00.157528553 +370 81 0 days 00:00:00.616889842 +370 82 0 days 00:00:00.327564314 +370 83 0 days 00:00:00.232054067 +370 84 0 days 00:00:00.225589493 +370 85 0 days 00:00:00.418651514 +370 86 0 days 00:00:00.173200080 +370 87 0 days 00:00:00.532839875 +370 88 0 days 00:00:00.388647646 +370 89 0 days 00:00:00.569497125 +370 90 0 days 00:00:00.330775105 +370 91 0 days 00:00:00.341990476 +370 92 0 days 00:00:00.340030997 +370 93 0 days 00:00:00.199184602 +370 94 0 days 00:00:00.341339296 +370 95 0 days 00:00:00.165693175 +370 96 0 days 00:00:00.164642310 +370 97 0 days 00:00:00.198030313 +370 98 0 days 00:00:00.626027633 +370 99 0 days 00:00:00.326339006 +370 100 0 days 00:00:00.204929552 +371 1 0 days 00:00:01.844228198 +371 2 0 days 00:00:01.544544697 +371 3 0 days 00:00:00.508566771 +371 4 0 days 00:00:00.515696794 +371 5 0 days 00:00:00.953380431 +371 6 0 days 00:00:01.883134330 +371 7 0 days 00:00:00.551063268 +371 8 0 days 00:00:00.511447785 +371 9 0 days 00:00:01.543770140 +371 10 0 days 00:00:01.764495688 +371 11 0 days 00:00:01.886520384 +371 12 0 days 00:00:01.128173748 +371 13 0 days 00:00:01.129910130 +371 14 0 days 00:00:00.595346727 +371 15 0 days 00:00:01.841585020 +371 16 0 days 00:00:02.019713068 +371 17 0 days 00:00:01.695374405 +371 18 0 days 00:00:01.910241148 +371 19 0 days 00:00:01.980980861 +371 20 0 days 00:00:01.674672948 +371 21 0 days 00:00:01.636858145 +371 22 0 days 00:00:00.493295155 +371 23 0 days 00:00:00.585144000 +371 24 0 days 00:00:01.523743743 +371 25 0 days 00:00:00.483559881 +371 26 0 days 00:00:00.821861227 +371 27 0 days 00:00:00.455818665 +371 28 0 days 00:00:01.694056120 +371 29 0 days 00:00:00.897785135 +371 30 0 days 00:00:01.646735351 +371 31 0 days 00:00:00.494134960 +371 32 0 days 00:00:01.633739692 +371 33 0 days 00:00:01.718962470 +371 34 0 days 00:00:01.791957592 +371 35 0 days 00:00:01.733549980 +371 36 0 days 00:00:01.660589125 +371 37 0 days 00:00:01.786551120 +371 38 0 days 00:00:00.764526856 +371 39 0 days 00:00:00.491846312 +371 40 0 days 00:00:02.001804060 +371 41 0 days 00:00:00.795679876 +371 42 0 days 00:00:00.832580989 +371 43 0 days 00:00:00.472608536 +371 44 0 days 00:00:01.688070533 +371 45 0 days 00:00:00.537678058 +371 46 0 days 00:00:01.807257632 +371 47 0 days 00:00:00.556273332 +371 48 0 days 00:00:00.888684146 +371 49 0 days 00:00:00.491052036 +371 50 0 days 00:00:00.784239442 +371 51 0 days 00:00:01.106658400 +371 52 0 days 00:00:01.666759232 +371 53 0 days 00:00:01.673207355 +371 54 0 days 00:00:00.826321910 +371 55 0 days 00:00:00.443113092 +371 56 0 days 00:00:00.484350081 +371 57 0 days 00:00:00.448622651 +371 58 0 days 00:00:00.533717180 +371 59 0 days 00:00:00.474386040 +371 60 0 days 00:00:00.529192130 +371 61 0 days 00:00:01.571269464 +371 62 0 days 00:00:00.909583241 +371 63 0 days 00:00:00.450110720 +371 64 0 days 00:00:00.835870210 +371 65 0 days 00:00:00.812331514 +371 66 0 days 00:00:01.826977170 +371 67 0 days 00:00:01.813992862 +371 68 0 days 00:00:01.759102750 +371 69 0 days 00:00:00.659278328 +371 70 0 days 00:00:00.789602945 +371 71 0 days 00:00:00.673716674 +371 72 0 days 00:00:01.551564953 +371 73 0 days 00:00:00.439499477 +371 74 0 days 00:00:01.742113740 +371 75 0 days 00:00:00.438170616 +371 76 0 days 00:00:00.484496595 +371 77 0 days 00:00:00.812672954 +371 78 0 days 00:00:00.434941816 +371 79 0 days 00:00:01.695324337 +371 80 0 days 00:00:01.693535892 +371 81 0 days 00:00:01.031788776 +371 82 0 days 00:00:00.510967768 +371 83 0 days 00:00:01.638755360 +371 84 0 days 00:00:00.570485751 +371 85 0 days 00:00:00.443102835 +371 86 0 days 00:00:00.826483328 +371 87 0 days 00:00:00.896850890 +371 88 0 days 00:00:01.065764878 +371 89 0 days 00:00:00.844557784 +371 90 0 days 00:00:00.510156994 +371 91 0 days 00:00:01.661236492 +371 92 0 days 00:00:00.932461103 +371 93 0 days 00:00:01.137988100 +371 94 0 days 00:00:00.515088082 +371 95 0 days 00:00:00.494522095 +371 96 0 days 00:00:00.924473400 +371 97 0 days 00:00:00.481134825 +371 98 0 days 00:00:00.421840460 +371 99 0 days 00:00:00.661726448 +371 100 0 days 00:00:00.908351062 +372 1 0 days 00:00:00.249081770 +372 2 0 days 00:00:00.581997361 +372 3 0 days 00:00:00.442750260 +372 4 0 days 00:00:00.288428203 +372 5 0 days 00:00:00.453104653 +372 6 0 days 00:00:00.821853928 +372 7 0 days 00:00:00.430465396 +372 8 0 days 00:00:00.538119214 +372 9 0 days 00:00:00.233328933 +372 10 0 days 00:00:00.232657450 +372 11 0 days 00:00:00.237446041 +372 12 0 days 00:00:00.500441110 +372 13 0 days 00:00:00.861944968 +372 14 0 days 00:00:00.311302811 +372 15 0 days 00:00:00.740876317 +372 16 0 days 00:00:00.794244934 +372 17 0 days 00:00:01.012174000 +372 18 0 days 00:00:00.968257957 +372 19 0 days 00:00:00.861606915 +372 20 0 days 00:00:00.890592312 +372 21 0 days 00:00:00.865843609 +372 22 0 days 00:00:00.965963991 +372 23 0 days 00:00:00.290198230 +372 24 0 days 00:00:00.368563795 +372 25 0 days 00:00:00.345366648 +372 26 0 days 00:00:00.474805420 +372 27 0 days 00:00:00.466694022 +372 28 0 days 00:00:00.409163311 +372 29 0 days 00:00:00.275242902 +372 30 0 days 00:00:00.849226497 +372 31 0 days 00:00:00.833679240 +372 32 0 days 00:00:00.459088702 +372 33 0 days 00:00:00.297109310 +372 34 0 days 00:00:00.949699261 +372 35 0 days 00:00:00.548540510 +372 36 0 days 00:00:00.265547960 +372 37 0 days 00:00:00.256151307 +372 38 0 days 00:00:00.850486037 +372 39 0 days 00:00:00.626719507 +372 40 0 days 00:00:01.046292512 +372 41 0 days 00:00:00.590909074 +372 42 0 days 00:00:00.535971520 +372 43 0 days 00:00:00.863468605 +372 44 0 days 00:00:00.238909958 +372 45 0 days 00:00:00.928905123 +372 46 0 days 00:00:00.769193480 +372 47 0 days 00:00:00.291911207 +372 48 0 days 00:00:00.798187064 +372 49 0 days 00:00:00.972389249 +372 50 0 days 00:00:00.907286030 +372 51 0 days 00:00:00.968748513 +372 52 0 days 00:00:00.895002820 +372 53 0 days 00:00:00.932442601 +372 54 0 days 00:00:00.832982108 +372 55 0 days 00:00:00.566744077 +372 56 0 days 00:00:00.239629232 +372 57 0 days 00:00:00.305937670 +372 58 0 days 00:00:00.417891184 +372 59 0 days 00:00:00.918196682 +372 60 0 days 00:00:00.951381141 +372 61 0 days 00:00:00.269841657 +372 62 0 days 00:00:00.456229086 +372 63 0 days 00:00:00.857124392 +372 64 0 days 00:00:00.551237232 +372 65 0 days 00:00:00.241162147 +372 66 0 days 00:00:00.296506225 +372 67 0 days 00:00:00.467237954 +372 68 0 days 00:00:00.410997512 +372 69 0 days 00:00:00.275132568 +372 70 0 days 00:00:00.483469737 +372 71 0 days 00:00:00.210374050 +372 72 0 days 00:00:00.557822270 +372 73 0 days 00:00:00.418826893 +372 74 0 days 00:00:00.845317701 +372 75 0 days 00:00:00.908608973 +372 76 0 days 00:00:00.463988674 +372 77 0 days 00:00:00.842964270 +372 78 0 days 00:00:00.833559758 +372 79 0 days 00:00:00.890684443 +372 80 0 days 00:00:00.265020680 +372 81 0 days 00:00:00.742865707 +372 82 0 days 00:00:00.940412423 +372 83 0 days 00:00:00.225408347 +372 84 0 days 00:00:00.225784133 +372 85 0 days 00:00:00.368702874 +372 86 0 days 00:00:00.438616766 +372 87 0 days 00:00:00.412273780 +372 88 0 days 00:00:00.848066208 +372 89 0 days 00:00:00.248615177 +372 90 0 days 00:00:00.907584850 +372 91 0 days 00:00:00.441088235 +372 92 0 days 00:00:00.537022304 +372 93 0 days 00:00:00.841644656 +372 94 0 days 00:00:00.953639307 +372 95 0 days 00:00:00.543212460 +372 96 0 days 00:00:00.554783022 +372 97 0 days 00:00:00.430832327 +372 98 0 days 00:00:00.748580968 +372 99 0 days 00:00:00.467876702 +372 100 0 days 00:00:00.239703026 +373 1 0 days 00:00:00.747911834 +373 2 0 days 00:00:01.172201683 +373 3 0 days 00:00:00.323365206 +373 4 0 days 00:00:01.256148435 +373 5 0 days 00:00:01.276381855 +373 6 0 days 00:00:00.758156390 +373 7 0 days 00:00:00.684652786 +373 8 0 days 00:00:01.220410717 +373 9 0 days 00:00:00.730668265 +373 10 0 days 00:00:00.725755225 +373 11 0 days 00:00:00.708441518 +373 12 0 days 00:00:00.684018907 +373 13 0 days 00:00:00.757014086 +373 14 0 days 00:00:00.705552123 +373 15 0 days 00:00:00.666863518 +373 16 0 days 00:00:00.380544503 +373 17 0 days 00:00:00.394963806 +373 18 0 days 00:00:00.572644854 +373 19 0 days 00:00:00.658495449 +373 20 0 days 00:00:01.192544515 +373 21 0 days 00:00:00.348937800 +373 22 0 days 00:00:00.397919126 +373 23 0 days 00:00:01.229126808 +373 24 0 days 00:00:00.300131900 +373 25 0 days 00:00:00.683107674 +373 26 0 days 00:00:00.654947966 +373 27 0 days 00:00:00.809005355 +373 28 0 days 00:00:00.682394991 +373 29 0 days 00:00:00.721712881 +373 30 0 days 00:00:00.342815886 +373 31 0 days 00:00:01.284021625 +373 32 0 days 00:00:00.650145982 +373 33 0 days 00:00:00.393751678 +373 34 0 days 00:00:01.165866830 +373 35 0 days 00:00:00.314330000 +373 36 0 days 00:00:00.693743801 +373 37 0 days 00:00:00.318180653 +373 38 0 days 00:00:01.354481281 +373 39 0 days 00:00:00.740495738 +373 40 0 days 00:00:01.142139280 +373 41 0 days 00:00:01.223137160 +373 42 0 days 00:00:01.214590880 +373 43 0 days 00:00:01.266151122 +373 44 0 days 00:00:00.324240493 +373 45 0 days 00:00:01.196154965 +373 46 0 days 00:00:01.247871240 +373 47 0 days 00:00:00.415307345 +373 48 0 days 00:00:00.315648800 +373 49 0 days 00:00:01.242309711 +373 50 0 days 00:00:01.135168828 +373 51 0 days 00:00:01.190388410 +373 52 0 days 00:00:00.645388220 +373 53 0 days 00:00:00.701131365 +373 54 0 days 00:00:00.679541192 +373 55 0 days 00:00:00.415910780 +373 56 0 days 00:00:01.075568070 +373 57 0 days 00:00:01.223117880 +373 58 0 days 00:00:00.660153815 +373 59 0 days 00:00:01.221283417 +373 60 0 days 00:00:00.407923575 +373 61 0 days 00:00:01.203810240 +373 62 0 days 00:00:00.293285486 +373 63 0 days 00:00:01.036950551 +373 64 0 days 00:00:00.298623680 +373 65 0 days 00:00:00.673222095 +373 66 0 days 00:00:00.673026558 +373 67 0 days 00:00:00.818378408 +373 68 0 days 00:00:00.333136866 +373 69 0 days 00:00:00.701892942 +373 70 0 days 00:00:00.678242314 +373 71 0 days 00:00:00.550473820 +373 72 0 days 00:00:01.258992632 +373 73 0 days 00:00:01.052837814 +373 74 0 days 00:00:01.242693624 +373 75 0 days 00:00:00.339767740 +373 76 0 days 00:00:00.618276470 +373 77 0 days 00:00:00.296218786 +373 78 0 days 00:00:00.662400243 +373 80 0 days 00:00:01.271204284 +373 81 0 days 00:00:01.215513587 +373 82 0 days 00:00:00.388788306 +373 83 0 days 00:00:01.026309120 +373 84 0 days 00:00:01.223552938 +373 85 0 days 00:00:00.735044625 +373 86 0 days 00:00:00.401788683 +373 87 0 days 00:00:00.723183380 +373 88 0 days 00:00:01.151052053 +373 89 0 days 00:00:00.395264193 +373 90 0 days 00:00:00.427575238 +373 91 0 days 00:00:00.287027460 +373 92 0 days 00:00:00.294115226 +373 93 0 days 00:00:00.387107009 +373 94 0 days 00:00:00.675368877 +373 95 0 days 00:00:01.167189620 +373 96 0 days 00:00:01.171254891 +373 98 0 days 00:00:01.217055265 +373 99 0 days 00:00:00.689445125 +373 100 0 days 00:00:00.316921690 +374 1 0 days 00:00:00.651742393 +374 2 0 days 00:00:00.160276335 +374 3 0 days 00:00:00.334914066 +374 4 0 days 00:00:00.266539220 +374 5 0 days 00:00:00.601695117 +374 6 0 days 00:00:00.358742468 +374 7 0 days 00:00:00.166016073 +374 8 0 days 00:00:00.347169738 +374 9 0 days 00:00:00.149776920 +374 10 0 days 00:00:00.619830653 +374 11 0 days 00:00:00.330802714 +374 12 0 days 00:00:00.619046540 +374 13 0 days 00:00:00.626403120 +374 14 0 days 00:00:00.624275330 +374 15 0 days 00:00:00.352154702 +374 16 0 days 00:00:00.178894230 +374 17 0 days 00:00:00.170317660 +374 18 0 days 00:00:00.196273595 +374 19 0 days 00:00:00.196752700 +374 20 0 days 00:00:00.349870993 +374 21 0 days 00:00:00.542299469 +374 22 0 days 00:00:00.282738306 +374 23 0 days 00:00:00.633313110 +374 24 0 days 00:00:00.260030310 +374 25 0 days 00:00:00.631179049 +374 26 0 days 00:00:00.164575965 +374 27 0 days 00:00:00.631074514 +374 28 0 days 00:00:00.632475146 +374 29 0 days 00:00:00.266934493 +374 30 0 days 00:00:00.636808832 +374 31 0 days 00:00:00.630592980 +374 32 0 days 00:00:00.256067455 +374 33 0 days 00:00:00.227546760 +374 34 0 days 00:00:00.297340375 +374 35 0 days 00:00:00.233175640 +374 36 0 days 00:00:00.545287675 +374 37 0 days 00:00:00.214469313 +374 38 0 days 00:00:00.255891600 +374 39 0 days 00:00:00.649692133 +374 40 0 days 00:00:00.350092566 +374 41 0 days 00:00:00.608847945 +374 42 0 days 00:00:00.150344413 +374 43 0 days 00:00:00.254679940 +374 44 0 days 00:00:00.656040610 +374 45 0 days 00:00:00.164613400 +374 46 0 days 00:00:00.633024842 +374 47 0 days 00:00:00.273872200 +374 48 0 days 00:00:00.654532886 +374 49 0 days 00:00:00.654661378 +374 50 0 days 00:00:00.641481118 +374 51 0 days 00:00:00.634693327 +374 52 0 days 00:00:00.543080394 +374 53 0 days 00:00:00.353429036 +374 54 0 days 00:00:00.723715243 +374 55 0 days 00:00:00.160767933 +374 56 0 days 00:00:00.657342940 +374 57 0 days 00:00:00.168606833 +374 59 0 days 00:00:00.676249941 +374 60 0 days 00:00:00.286025106 +374 61 0 days 00:00:00.210408498 +374 62 0 days 00:00:00.350017994 +374 63 0 days 00:00:00.282935260 +374 64 0 days 00:00:00.184319765 +374 65 0 days 00:00:00.298175920 +374 66 0 days 00:00:00.692061304 +374 67 0 days 00:00:00.359249223 +374 68 0 days 00:00:00.399154250 +374 69 0 days 00:00:00.667786916 +374 70 0 days 00:00:00.201062501 +374 71 0 days 00:00:00.156646693 +374 72 0 days 00:00:00.178483375 +374 73 0 days 00:00:00.151484120 +374 74 0 days 00:00:00.196508670 +374 75 0 days 00:00:00.358994800 +374 76 0 days 00:00:00.273887926 +374 77 0 days 00:00:00.182157053 +374 78 0 days 00:00:00.286709286 +374 79 0 days 00:00:00.637419716 +374 80 0 days 00:00:00.199698680 +374 81 0 days 00:00:00.172921384 +374 82 0 days 00:00:00.212234457 +374 84 0 days 00:00:00.386286927 +374 85 0 days 00:00:00.174467865 +374 86 0 days 00:00:00.171000406 +374 87 0 days 00:00:00.274915273 +374 88 0 days 00:00:00.638129090 +374 89 0 days 00:00:00.624184255 +374 90 0 days 00:00:00.645612148 +374 91 0 days 00:00:00.274980080 +374 92 0 days 00:00:00.618918891 +374 93 0 days 00:00:00.205782875 +374 94 0 days 00:00:00.633258436 +374 95 0 days 00:00:00.375150460 +374 96 0 days 00:00:00.164802545 +374 97 0 days 00:00:00.152553880 +374 99 0 days 00:00:00.179056940 +374 100 0 days 00:00:00.273005240 +375 1 0 days 00:00:00.147403480 +375 2 0 days 00:00:00.115645300 +375 3 0 days 00:00:00.102587400 +375 4 0 days 00:00:00.188983080 +375 5 0 days 00:00:00.187379033 +375 6 0 days 00:00:00.119382580 +375 7 0 days 00:00:00.139763140 +375 8 0 days 00:00:00.131029033 +375 9 0 days 00:00:00.129240000 +375 10 0 days 00:00:00.111984973 +375 11 0 days 00:00:00.178796960 +375 12 0 days 00:00:00.103996666 +375 13 0 days 00:00:00.121112420 +375 14 0 days 00:00:00.141968913 +375 15 0 days 00:00:00.128815793 +375 16 0 days 00:00:00.134339626 +375 17 0 days 00:00:00.182148500 +375 18 0 days 00:00:00.107471666 +375 19 0 days 00:00:00.118455220 +375 20 0 days 00:00:00.113536600 +375 21 0 days 00:00:00.110490100 +375 22 0 days 00:00:00.136796926 +375 23 0 days 00:00:00.122070453 +375 24 0 days 00:00:00.175304146 +375 25 0 days 00:00:00.104877853 +375 26 0 days 00:00:00.174223780 +375 27 0 days 00:00:00.122170486 +375 28 0 days 00:00:00.105994473 +375 29 0 days 00:00:00.134065660 +375 30 0 days 00:00:00.103093513 +375 31 0 days 00:00:00.109095853 +375 32 0 days 00:00:00.133173786 +375 33 0 days 00:00:00.175489786 +375 34 0 days 00:00:00.172871600 +375 35 0 days 00:00:00.109721033 +375 36 0 days 00:00:00.134769093 +375 37 0 days 00:00:00.131683366 +375 38 0 days 00:00:00.111060080 +375 39 0 days 00:00:00.114292880 +375 40 0 days 00:00:00.142737206 +375 41 0 days 00:00:00.110347800 +375 42 0 days 00:00:00.105296740 +375 43 0 days 00:00:00.186533726 +375 44 0 days 00:00:00.186637373 +375 45 0 days 00:00:00.127321633 +375 46 0 days 00:00:00.134737920 +375 47 0 days 00:00:00.135963340 +375 48 0 days 00:00:00.129970033 +375 49 0 days 00:00:00.180631700 +375 50 0 days 00:00:00.109179380 +375 51 0 days 00:00:00.120740113 +375 52 0 days 00:00:00.109314613 +375 53 0 days 00:00:00.132864213 +375 54 0 days 00:00:00.171853773 +375 55 0 days 00:00:00.111514013 +375 56 0 days 00:00:00.141802026 +375 57 0 days 00:00:00.152906153 +375 58 0 days 00:00:00.130404173 +375 59 0 days 00:00:00.172606626 +375 60 0 days 00:00:00.134251313 +375 61 0 days 00:00:00.134234580 +375 62 0 days 00:00:00.176175746 +375 63 0 days 00:00:00.174786800 +375 64 0 days 00:00:00.108062726 +375 65 0 days 00:00:00.108674966 +375 66 0 days 00:00:00.108512466 +375 67 0 days 00:00:00.176314180 +375 68 0 days 00:00:00.132262746 +375 69 0 days 00:00:00.134691833 +375 70 0 days 00:00:00.125251540 +375 71 0 days 00:00:00.113394706 +375 72 0 days 00:00:00.121504133 +375 73 0 days 00:00:00.147470160 +375 74 0 days 00:00:00.133266446 +375 75 0 days 00:00:00.125218226 +375 76 0 days 00:00:00.188498060 +375 77 0 days 00:00:00.148472533 +375 78 0 days 00:00:00.170936280 +375 79 0 days 00:00:00.118402866 +375 80 0 days 00:00:00.126290973 +375 81 0 days 00:00:00.185773680 +375 82 0 days 00:00:00.132184113 +375 83 0 days 00:00:00.172671220 +375 84 0 days 00:00:00.178067453 +375 85 0 days 00:00:00.145303506 +375 86 0 days 00:00:00.171704386 +375 87 0 days 00:00:00.135458580 +375 88 0 days 00:00:00.193868220 +375 89 0 days 00:00:00.134775760 +375 90 0 days 00:00:00.120033133 +375 91 0 days 00:00:00.134019326 +375 92 0 days 00:00:00.199257733 +375 93 0 days 00:00:00.145600220 +375 94 0 days 00:00:00.114113826 +375 95 0 days 00:00:00.119851026 +375 96 0 days 00:00:00.107401253 +375 97 0 days 00:00:00.137385866 +375 98 0 days 00:00:00.127476773 +375 99 0 days 00:00:00.104592620 +375 100 0 days 00:00:00.173004980 +376 1 0 days 00:00:00.196705370 +376 2 0 days 00:00:00.168599632 +376 3 0 days 00:00:00.135297190 +376 4 0 days 00:00:00.123171540 +376 5 0 days 00:00:00.130965288 +376 6 0 days 00:00:00.152285145 +376 7 0 days 00:00:00.130565426 +376 8 0 days 00:00:00.155485300 +376 9 0 days 00:00:00.155443516 +376 10 0 days 00:00:00.142622553 +376 11 0 days 00:00:00.199986360 +376 12 0 days 00:00:00.148221546 +376 13 0 days 00:00:00.186818033 +376 14 0 days 00:00:00.158531810 +376 15 0 days 00:00:00.136916176 +376 16 0 days 00:00:00.122932360 +376 17 0 days 00:00:00.150567405 +376 18 0 days 00:00:00.146187596 +376 19 0 days 00:00:00.157950908 +376 20 0 days 00:00:00.109378460 +376 21 0 days 00:00:00.141138956 +376 22 0 days 00:00:00.128990080 +376 23 0 days 00:00:00.127431916 +376 24 0 days 00:00:00.117933500 +376 25 0 days 00:00:00.165414596 +376 26 0 days 00:00:00.123246673 +376 27 0 days 00:00:00.178190892 +376 28 0 days 00:00:00.117188895 +376 29 0 days 00:00:00.157324148 +376 30 0 days 00:00:00.109882160 +376 31 0 days 00:00:00.135275297 +376 32 0 days 00:00:00.111577153 +376 33 0 days 00:00:00.128362326 +376 34 0 days 00:00:00.124056688 +376 35 0 days 00:00:00.153011836 +376 36 0 days 00:00:00.157185460 +376 37 0 days 00:00:00.150840646 +376 38 0 days 00:00:00.164195048 +376 39 0 days 00:00:00.121537090 +376 40 0 days 00:00:00.161148916 +376 41 0 days 00:00:00.215008305 +376 42 0 days 00:00:00.135010448 +376 43 0 days 00:00:00.212312470 +376 44 0 days 00:00:00.135633613 +376 45 0 days 00:00:00.191703473 +376 46 0 days 00:00:00.150032876 +376 47 0 days 00:00:00.218412088 +376 48 0 days 00:00:00.144422793 +376 49 0 days 00:00:00.169135753 +376 50 0 days 00:00:00.178039745 +376 51 0 days 00:00:00.177130922 +376 52 0 days 00:00:00.134929980 +376 53 0 days 00:00:00.153713208 +376 54 0 days 00:00:00.204896305 +376 55 0 days 00:00:00.210570265 +376 56 0 days 00:00:00.130584468 +376 57 0 days 00:00:00.183556746 +376 58 0 days 00:00:00.182026977 +376 59 0 days 00:00:00.139805046 +376 60 0 days 00:00:00.156804300 +376 61 0 days 00:00:00.181053728 +376 62 0 days 00:00:00.157081820 +376 63 0 days 00:00:00.140399100 +376 64 0 days 00:00:00.146776370 +376 65 0 days 00:00:00.179190932 +376 66 0 days 00:00:00.229221816 +376 67 0 days 00:00:00.119894100 +376 68 0 days 00:00:00.120967485 +376 69 0 days 00:00:00.180104960 +376 70 0 days 00:00:00.240491432 +376 71 0 days 00:00:00.166898412 +376 72 0 days 00:00:00.196221720 +376 73 0 days 00:00:00.169323693 +376 74 0 days 00:00:00.140648024 +376 75 0 days 00:00:00.238865563 +376 76 0 days 00:00:00.179003793 +376 77 0 days 00:00:00.187285566 +376 78 0 days 00:00:00.179764217 +376 79 0 days 00:00:00.123488490 +376 80 0 days 00:00:00.136043473 +376 81 0 days 00:00:00.164962950 +376 82 0 days 00:00:00.232953736 +376 83 0 days 00:00:00.132152875 +376 84 0 days 00:00:00.180652260 +376 85 0 days 00:00:00.134389566 +376 86 0 days 00:00:00.158157485 +376 87 0 days 00:00:00.139636173 +376 88 0 days 00:00:00.135414488 +376 89 0 days 00:00:00.181921602 +376 90 0 days 00:00:00.109828260 +376 91 0 days 00:00:00.167735741 +376 92 0 days 00:00:00.153008450 +376 93 0 days 00:00:00.195594920 +376 94 0 days 00:00:00.143582505 +376 95 0 days 00:00:00.149702560 +376 96 0 days 00:00:00.144907445 +376 97 0 days 00:00:00.105834620 +376 98 0 days 00:00:00.209147820 +376 99 0 days 00:00:00.216666544 +376 100 0 days 00:00:00.216749220 +377 1 0 days 00:00:00.063412613 +377 2 0 days 00:00:00.071457466 +377 3 0 days 00:00:00.097853700 +377 4 0 days 00:00:00.074817073 +377 5 0 days 00:00:00.071650473 +377 6 0 days 00:00:00.078460406 +377 7 0 days 00:00:00.078999106 +377 8 0 days 00:00:00.106053406 +377 9 0 days 00:00:00.074861020 +377 10 0 days 00:00:00.076147026 +377 11 0 days 00:00:00.071637780 +377 12 0 days 00:00:00.069826340 +377 13 0 days 00:00:00.063033286 +377 14 0 days 00:00:00.071163580 +377 15 0 days 00:00:00.068758866 +377 16 0 days 00:00:00.068818700 +377 17 0 days 00:00:00.069336213 +377 18 0 days 00:00:00.069744813 +377 19 0 days 00:00:00.070171460 +377 20 0 days 00:00:00.074967660 +377 21 0 days 00:00:00.098640220 +377 22 0 days 00:00:00.075920740 +377 23 0 days 00:00:00.074505346 +377 24 0 days 00:00:00.104044313 +377 25 0 days 00:00:00.098005746 +377 26 0 days 00:00:00.068970740 +377 27 0 days 00:00:00.062593393 +377 28 0 days 00:00:00.061130366 +377 29 0 days 00:00:00.098950013 +377 30 0 days 00:00:00.061259800 +377 31 0 days 00:00:00.074340853 +377 32 0 days 00:00:00.061635993 +377 33 0 days 00:00:00.067532453 +377 34 0 days 00:00:00.096690093 +377 35 0 days 00:00:00.072376440 +377 36 0 days 00:00:00.097710713 +377 37 0 days 00:00:00.103412446 +377 38 0 days 00:00:00.094948960 +377 39 0 days 00:00:00.067472400 +377 40 0 days 00:00:00.073529846 +377 41 0 days 00:00:00.103246573 +377 42 0 days 00:00:00.064064846 +377 43 0 days 00:00:00.061542740 +377 44 0 days 00:00:00.061846013 +377 45 0 days 00:00:00.067141120 +377 46 0 days 00:00:00.082364180 +377 47 0 days 00:00:00.097486840 +377 48 0 days 00:00:00.073485546 +377 49 0 days 00:00:00.067323033 +377 50 0 days 00:00:00.073471406 +377 51 0 days 00:00:00.068609840 +377 52 0 days 00:00:00.074424486 +377 53 0 days 00:00:00.074303780 +377 54 0 days 00:00:00.102584740 +377 55 0 days 00:00:00.098815240 +377 56 0 days 00:00:00.101226633 +377 57 0 days 00:00:00.098905493 +377 58 0 days 00:00:00.058601973 +377 59 0 days 00:00:00.061742620 +377 60 0 days 00:00:00.075010606 +377 61 0 days 00:00:00.061314340 +377 62 0 days 00:00:00.074781280 +377 63 0 days 00:00:00.073428740 +377 64 0 days 00:00:00.074673126 +377 65 0 days 00:00:00.063201580 +377 66 0 days 00:00:00.099713486 +377 67 0 days 00:00:00.059044840 +377 68 0 days 00:00:00.074849166 +377 69 0 days 00:00:00.107590846 +377 70 0 days 00:00:00.104569206 +377 71 0 days 00:00:00.068826913 +377 72 0 days 00:00:00.099298053 +377 73 0 days 00:00:00.060655680 +377 74 0 days 00:00:00.108105673 +377 75 0 days 00:00:00.061178940 +377 76 0 days 00:00:00.080540066 +377 77 0 days 00:00:00.069213280 +377 78 0 days 00:00:00.080550946 +377 79 0 days 00:00:00.073350706 +377 80 0 days 00:00:00.074934773 +377 81 0 days 00:00:00.073363740 +377 82 0 days 00:00:00.061188713 +377 83 0 days 00:00:00.069168053 +377 84 0 days 00:00:00.098429526 +377 85 0 days 00:00:00.100152000 +377 86 0 days 00:00:00.059644140 +377 87 0 days 00:00:00.058393640 +377 88 0 days 00:00:00.095264040 +377 89 0 days 00:00:00.067173053 +377 90 0 days 00:00:00.073413433 +377 91 0 days 00:00:00.074686920 +377 92 0 days 00:00:00.068363093 +377 93 0 days 00:00:00.072362573 +377 94 0 days 00:00:00.100149833 +377 95 0 days 00:00:00.069997913 +377 96 0 days 00:00:00.074668686 +377 97 0 days 00:00:00.062950546 +377 98 0 days 00:00:00.060125533 +377 99 0 days 00:00:00.076116640 +377 100 0 days 00:00:00.070913833 +378 1 0 days 00:00:00.102592932 +378 2 0 days 00:00:00.126055156 +378 3 0 days 00:00:00.085803996 +378 4 0 days 00:00:00.112828270 +378 5 0 days 00:00:00.077969760 +378 6 0 days 00:00:00.124433953 +378 7 0 days 00:00:00.075882465 +378 8 0 days 00:00:00.074925293 +378 9 0 days 00:00:00.097410994 +378 10 0 days 00:00:00.079496890 +378 11 0 days 00:00:00.104104660 +378 12 0 days 00:00:00.102622714 +378 13 0 days 00:00:00.085659440 +378 14 0 days 00:00:00.070602835 +378 15 0 days 00:00:00.081992292 +378 16 0 days 00:00:00.070637095 +378 17 0 days 00:00:00.071162930 +378 18 0 days 00:00:00.092705164 +378 19 0 days 00:00:00.094435185 +378 20 0 days 00:00:00.101318380 +378 21 0 days 00:00:00.074226953 +378 22 0 days 00:00:00.125829640 +378 23 0 days 00:00:00.095422503 +378 24 0 days 00:00:00.078833465 +378 25 0 days 00:00:00.114277935 +378 26 0 days 00:00:00.089813855 +378 27 0 days 00:00:00.088206165 +378 28 0 days 00:00:00.119208715 +378 29 0 days 00:00:00.071665346 +378 30 0 days 00:00:00.127173076 +378 31 0 days 00:00:00.074896820 +378 32 0 days 00:00:00.064158333 +378 33 0 days 00:00:00.128651142 +378 34 0 days 00:00:00.116840765 +378 35 0 days 00:00:00.128533012 +378 36 0 days 00:00:00.098893814 +378 37 0 days 00:00:00.072775833 +378 38 0 days 00:00:00.121708864 +378 39 0 days 00:00:00.100402477 +378 40 0 days 00:00:00.081590605 +378 41 0 days 00:00:00.126153880 +378 42 0 days 00:00:00.127374816 +378 43 0 days 00:00:00.085498335 +378 44 0 days 00:00:00.104676600 +378 45 0 days 00:00:00.132597552 +378 46 0 days 00:00:00.090431415 +378 47 0 days 00:00:00.078400000 +378 48 0 days 00:00:00.097790991 +378 49 0 days 00:00:00.121747013 +378 50 0 days 00:00:00.096257076 +378 51 0 days 00:00:00.075688693 +378 52 0 days 00:00:00.074626133 +378 53 0 days 00:00:00.122733880 +378 54 0 days 00:00:00.091289088 +378 55 0 days 00:00:00.073978120 +378 56 0 days 00:00:00.083335075 +378 57 0 days 00:00:00.122540840 +378 58 0 days 00:00:00.126428284 +378 59 0 days 00:00:00.094624353 +378 60 0 days 00:00:00.075619266 +378 61 0 days 00:00:00.117028160 +378 62 0 days 00:00:00.066961100 +378 63 0 days 00:00:00.124925624 +378 64 0 days 00:00:00.083115735 +378 65 0 days 00:00:00.071116695 +378 66 0 days 00:00:00.063699580 +378 67 0 days 00:00:00.087048040 +378 68 0 days 00:00:00.064755453 +378 69 0 days 00:00:00.070991873 +378 70 0 days 00:00:00.117382235 +378 71 0 days 00:00:00.116567195 +378 72 0 days 00:00:00.075985585 +378 73 0 days 00:00:00.073422526 +378 74 0 days 00:00:00.095723473 +378 75 0 days 00:00:00.059833713 +378 76 0 days 00:00:00.107608320 +378 77 0 days 00:00:00.117982948 +378 78 0 days 00:00:00.109737165 +378 79 0 days 00:00:00.117302140 +378 80 0 days 00:00:00.081162364 +378 81 0 days 00:00:00.113054225 +378 82 0 days 00:00:00.096757366 +378 83 0 days 00:00:00.080520268 +378 84 0 days 00:00:00.126726965 +378 85 0 days 00:00:00.076284133 +378 86 0 days 00:00:00.078150376 +378 87 0 days 00:00:00.127136066 +378 88 0 days 00:00:00.124493380 +378 89 0 days 00:00:00.093069397 +378 90 0 days 00:00:00.106337925 +378 91 0 days 00:00:00.074118292 +378 92 0 days 00:00:00.083581196 +378 93 0 days 00:00:00.094305153 +378 94 0 days 00:00:00.096198213 +378 95 0 days 00:00:00.098331426 +378 96 0 days 00:00:00.077635620 +378 97 0 days 00:00:00.079026176 +378 98 0 days 00:00:00.109274260 +378 99 0 days 00:00:00.122534462 +378 100 0 days 00:00:00.121489848 +379 1 0 days 00:00:00.173249008 +379 2 0 days 00:00:00.150601852 +379 3 0 days 00:00:00.207966673 +379 4 0 days 00:00:00.138252325 +379 5 0 days 00:00:00.147545488 +379 6 0 days 00:00:00.220618650 +379 7 0 days 00:00:00.192883470 +379 8 0 days 00:00:00.169294770 +379 9 0 days 00:00:00.150493900 +379 10 0 days 00:00:00.219305642 +379 11 0 days 00:00:00.133531980 +379 12 0 days 00:00:00.221615270 +379 13 0 days 00:00:00.210320866 +379 14 0 days 00:00:00.162734865 +379 15 0 days 00:00:00.208925096 +379 16 0 days 00:00:00.153969736 +379 17 0 days 00:00:00.193650350 +379 18 0 days 00:00:00.193022345 +379 19 0 days 00:00:00.223947068 +379 20 0 days 00:00:00.233962468 +379 21 0 days 00:00:00.136062996 +379 22 0 days 00:00:00.115727250 +379 23 0 days 00:00:00.144450400 +379 24 0 days 00:00:00.147916280 +379 25 0 days 00:00:00.136079491 +379 26 0 days 00:00:00.222034926 +379 27 0 days 00:00:00.144020480 +379 28 0 days 00:00:00.149303925 +379 29 0 days 00:00:00.165566466 +379 30 0 days 00:00:00.142201852 +379 31 0 days 00:00:00.166678220 +379 32 0 days 00:00:00.160675195 +379 33 0 days 00:00:00.215426480 +379 34 0 days 00:00:00.165249251 +379 35 0 days 00:00:00.131688476 +379 36 0 days 00:00:00.172185617 +379 37 0 days 00:00:00.222482894 +379 38 0 days 00:00:00.136010875 +379 39 0 days 00:00:00.129875340 +379 40 0 days 00:00:00.139776074 +379 41 0 days 00:00:00.163890537 +379 42 0 days 00:00:00.152101652 +379 43 0 days 00:00:00.153737273 +379 44 0 days 00:00:00.240686128 +379 45 0 days 00:00:00.159294220 +379 46 0 days 00:00:00.220293012 +379 47 0 days 00:00:00.222655204 +379 48 0 days 00:00:00.123904300 +379 49 0 days 00:00:00.137866100 +379 50 0 days 00:00:00.204211932 +379 51 0 days 00:00:00.130546953 +379 52 0 days 00:00:00.227284564 +379 53 0 days 00:00:00.151814670 +379 54 0 days 00:00:00.210854188 +379 55 0 days 00:00:00.134161727 +379 56 0 days 00:00:00.204647144 +379 57 0 days 00:00:00.192765845 +379 58 0 days 00:00:00.149666516 +379 59 0 days 00:00:00.146240780 +379 60 0 days 00:00:00.132719960 +379 61 0 days 00:00:00.210431623 +379 62 0 days 00:00:00.190243925 +379 63 0 days 00:00:00.199062732 +379 64 0 days 00:00:00.206441826 +379 65 0 days 00:00:00.151457853 +379 66 0 days 00:00:00.165935928 +379 67 0 days 00:00:00.198725164 +379 68 0 days 00:00:00.197720288 +379 69 0 days 00:00:00.134192807 +379 70 0 days 00:00:00.124148356 +379 71 0 days 00:00:00.190518995 +379 72 0 days 00:00:00.151739766 +379 73 0 days 00:00:00.156399436 +379 74 0 days 00:00:00.157236005 +379 75 0 days 00:00:00.151132944 +379 76 0 days 00:00:00.130105695 +379 77 0 days 00:00:00.200310744 +379 78 0 days 00:00:00.153848814 +379 79 0 days 00:00:00.157530010 +379 80 0 days 00:00:00.198818180 +379 81 0 days 00:00:00.154524100 +379 82 0 days 00:00:00.210105968 +379 83 0 days 00:00:00.219952532 +379 84 0 days 00:00:00.225804390 +379 85 0 days 00:00:00.202758460 +379 86 0 days 00:00:00.157506743 +379 87 0 days 00:00:00.216537440 +379 88 0 days 00:00:00.190502150 +379 89 0 days 00:00:00.208904656 +379 90 0 days 00:00:00.162539051 +379 91 0 days 00:00:00.160946622 +379 92 0 days 00:00:00.159100240 +379 93 0 days 00:00:00.223196646 +379 94 0 days 00:00:00.144894153 +379 95 0 days 00:00:00.162033862 +379 96 0 days 00:00:00.131708408 +379 97 0 days 00:00:00.124397468 +379 98 0 days 00:00:00.208923096 +379 99 0 days 00:00:00.213910020 +379 100 0 days 00:00:00.197593500 +380 1 0 days 00:00:00.065515860 +380 2 0 days 00:00:00.082614156 +380 3 0 days 00:00:00.078790848 +380 4 0 days 00:00:00.074285275 +380 5 0 days 00:00:00.113473032 +380 6 0 days 00:00:00.070931768 +380 7 0 days 00:00:00.084190053 +380 8 0 days 00:00:00.080713624 +380 9 0 days 00:00:00.086043960 +380 10 0 days 00:00:00.064166210 +380 11 0 days 00:00:00.076963056 +380 12 0 days 00:00:00.074441920 +380 13 0 days 00:00:00.114818500 +380 14 0 days 00:00:00.068051585 +380 15 0 days 00:00:00.070447090 +380 16 0 days 00:00:00.071743700 +380 17 0 days 00:00:00.128699322 +380 18 0 days 00:00:00.068223053 +380 19 0 days 00:00:00.108756100 +380 20 0 days 00:00:00.102480550 +380 21 0 days 00:00:00.111312452 +380 22 0 days 00:00:00.111847484 +380 23 0 days 00:00:00.077371816 +380 24 0 days 00:00:00.077422300 +380 25 0 days 00:00:00.084975777 +380 26 0 days 00:00:00.068132513 +380 27 0 days 00:00:00.073280620 +380 28 0 days 00:00:00.107774812 +380 29 0 days 00:00:00.062507925 +380 30 0 days 00:00:00.106786340 +380 31 0 days 00:00:00.063813300 +380 32 0 days 00:00:00.102373550 +380 33 0 days 00:00:00.063997364 +380 34 0 days 00:00:00.103698735 +380 35 0 days 00:00:00.083676700 +380 36 0 days 00:00:00.107896112 +380 37 0 days 00:00:00.081465057 +380 38 0 days 00:00:00.069132540 +380 39 0 days 00:00:00.077595044 +380 40 0 days 00:00:00.107586505 +380 41 0 days 00:00:00.079031670 +380 42 0 days 00:00:00.077136456 +380 43 0 days 00:00:00.076788385 +380 44 0 days 00:00:00.079143903 +380 45 0 days 00:00:00.080669550 +380 46 0 days 00:00:00.112161356 +380 47 0 days 00:00:00.066781733 +380 48 0 days 00:00:00.083598354 +380 49 0 days 00:00:00.111686208 +380 50 0 days 00:00:00.084946057 +380 51 0 days 00:00:00.074440897 +380 52 0 days 00:00:00.087890785 +380 53 0 days 00:00:00.078048988 +380 54 0 days 00:00:00.119403383 +380 55 0 days 00:00:00.076506164 +380 56 0 days 00:00:00.101608890 +380 57 0 days 00:00:00.079639413 +380 58 0 days 00:00:00.060195940 +380 59 0 days 00:00:00.086828235 +380 60 0 days 00:00:00.081637171 +380 61 0 days 00:00:00.076472124 +380 62 0 days 00:00:00.070888678 +380 63 0 days 00:00:00.068496332 +380 64 0 days 00:00:00.080796668 +380 65 0 days 00:00:00.104739640 +380 66 0 days 00:00:00.108675852 +380 67 0 days 00:00:00.076404510 +380 68 0 days 00:00:00.107421108 +380 69 0 days 00:00:00.077986642 +380 70 0 days 00:00:00.115026200 +380 71 0 days 00:00:00.065238410 +380 72 0 days 00:00:00.082355600 +380 73 0 days 00:00:00.085463771 +380 74 0 days 00:00:00.090499047 +380 75 0 days 00:00:00.063453470 +380 76 0 days 00:00:00.080928788 +380 77 0 days 00:00:00.061539800 +380 78 0 days 00:00:00.076035865 +380 79 0 days 00:00:00.110592096 +380 80 0 days 00:00:00.074294406 +380 81 0 days 00:00:00.087335680 +380 82 0 days 00:00:00.086387288 +380 83 0 days 00:00:00.079415690 +380 84 0 days 00:00:00.072286775 +380 85 0 days 00:00:00.083572546 +380 86 0 days 00:00:00.071289935 +380 87 0 days 00:00:00.077510140 +380 88 0 days 00:00:00.111503106 +380 89 0 days 00:00:00.072649426 +380 90 0 days 00:00:00.109410280 +380 91 0 days 00:00:00.108658316 +380 92 0 days 00:00:00.104254465 +380 93 0 days 00:00:00.078368020 +380 94 0 days 00:00:00.090475689 +380 95 0 days 00:00:00.087633793 +380 96 0 days 00:00:00.086367840 +380 97 0 days 00:00:00.120628956 +380 98 0 days 00:00:00.117184660 +380 99 0 days 00:00:00.089190284 +380 100 0 days 00:00:00.064729585 +381 1 0 days 00:00:00.349265580 +381 2 0 days 00:00:01.295694320 +381 3 0 days 00:00:00.396752306 +381 4 0 days 00:00:00.394959626 +381 5 0 days 00:00:00.398052260 +381 6 0 days 00:00:00.871680220 +381 7 0 days 00:00:01.282668600 +381 8 0 days 00:00:00.696953826 +381 9 0 days 00:00:00.402621406 +381 10 0 days 00:00:00.412366420 +381 11 0 days 00:00:00.373069633 +381 12 0 days 00:00:00.811845313 +381 13 0 days 00:00:00.460362853 +381 14 0 days 00:00:00.401815606 +381 15 0 days 00:00:01.362126373 +381 16 0 days 00:00:00.893544673 +381 17 0 days 00:00:00.787675626 +381 18 0 days 00:00:00.369842973 +381 19 0 days 00:00:01.351173813 +381 20 0 days 00:00:00.376565373 +381 21 0 days 00:00:00.736623486 +381 22 0 days 00:00:00.390529806 +381 23 0 days 00:00:01.281262760 +381 24 0 days 00:00:00.837333006 +381 25 0 days 00:00:00.614462926 +381 26 0 days 00:00:01.338882100 +381 27 0 days 00:00:01.368033673 +381 28 0 days 00:00:00.899839546 +381 29 0 days 00:00:00.416282753 +381 30 0 days 00:00:00.769002040 +381 31 0 days 00:00:01.350626706 +381 32 0 days 00:00:01.484432440 +381 33 0 days 00:00:00.815850333 +381 34 0 days 00:00:00.733862286 +381 35 0 days 00:00:00.423507986 +381 36 0 days 00:00:00.601079740 +381 37 0 days 00:00:01.397551873 +381 38 0 days 00:00:00.404680820 +381 39 0 days 00:00:00.728533000 +381 40 0 days 00:00:01.402705586 +381 41 0 days 00:00:00.704838813 +381 42 0 days 00:00:00.466600160 +381 43 0 days 00:00:01.269202660 +381 44 0 days 00:00:00.730455906 +381 45 0 days 00:00:00.388341600 +381 46 0 days 00:00:00.429189840 +381 47 0 days 00:00:01.602390333 +381 48 0 days 00:00:01.399717126 +381 49 0 days 00:00:01.520695126 +381 50 0 days 00:00:00.436896300 +381 51 0 days 00:00:01.339149480 +381 52 0 days 00:00:00.768894413 +381 53 0 days 00:00:00.376074160 +381 54 0 days 00:00:00.375433500 +381 55 0 days 00:00:00.831415440 +381 56 0 days 00:00:00.417285926 +381 57 0 days 00:00:00.856682226 +381 58 0 days 00:00:00.365909053 +381 59 0 days 00:00:01.375274460 +381 60 0 days 00:00:01.443295506 +381 61 0 days 00:00:00.427187733 +381 62 0 days 00:00:01.536530433 +381 63 0 days 00:00:01.276679180 +381 64 0 days 00:00:00.265161513 +381 65 0 days 00:00:00.695874153 +381 66 0 days 00:00:01.282293060 +381 67 0 days 00:00:00.686428633 +381 68 0 days 00:00:01.296540020 +381 69 0 days 00:00:00.697173100 +381 70 0 days 00:00:00.414701333 +381 71 0 days 00:00:00.421546960 +381 72 0 days 00:00:00.736711566 +381 73 0 days 00:00:00.776770013 +381 74 0 days 00:00:00.469620206 +381 75 0 days 00:00:01.407313293 +381 76 0 days 00:00:00.734624926 +381 77 0 days 00:00:00.702973380 +381 78 0 days 00:00:01.027150840 +381 79 0 days 00:00:01.485552773 +381 80 0 days 00:00:01.391603873 +381 81 0 days 00:00:00.729673000 +381 82 0 days 00:00:00.846938560 +381 83 0 days 00:00:01.404033106 +381 84 0 days 00:00:01.659434446 +381 85 0 days 00:00:01.311407806 +381 86 0 days 00:00:00.697372066 +381 87 0 days 00:00:00.427670586 +381 88 0 days 00:00:00.444029626 +381 89 0 days 00:00:00.699139480 +381 90 0 days 00:00:01.344148700 +381 91 0 days 00:00:00.380108693 +381 92 0 days 00:00:01.347394013 +381 93 0 days 00:00:01.309586706 +381 94 0 days 00:00:00.914912340 +381 95 0 days 00:00:00.482621606 +381 96 0 days 00:00:01.381958366 +381 97 0 days 00:00:00.753511720 +381 98 0 days 00:00:00.536976720 +381 99 0 days 00:00:01.410791140 +381 100 0 days 00:00:00.426070813 +382 1 0 days 00:00:00.745749500 +382 2 0 days 00:00:01.437245466 +382 3 0 days 00:00:00.663664437 +382 4 0 days 00:00:01.556440795 +382 5 0 days 00:00:01.415086006 +382 6 0 days 00:00:01.589955965 +382 7 0 days 00:00:00.484926510 +382 8 0 days 00:00:01.535909010 +382 9 0 days 00:00:00.863215815 +382 10 0 days 00:00:00.799149110 +382 11 0 days 00:00:00.776615920 +382 12 0 days 00:00:00.398813106 +382 13 0 days 00:00:01.340790320 +382 14 0 days 00:00:00.418016780 +382 15 0 days 00:00:00.470104972 +382 16 0 days 00:00:01.031670545 +382 17 0 days 00:00:00.427740250 +382 18 0 days 00:00:00.508057336 +382 19 0 days 00:00:00.791366045 +382 20 0 days 00:00:00.687187560 +382 21 0 days 00:00:01.374723833 +382 22 0 days 00:00:00.502283883 +382 23 0 days 00:00:00.681452180 +382 24 0 days 00:00:01.510278985 +382 25 0 days 00:00:01.097599136 +382 26 0 days 00:00:00.810799988 +382 27 0 days 00:00:00.770738960 +382 28 0 days 00:00:01.453632880 +382 29 0 days 00:00:00.417168693 +382 30 0 days 00:00:00.424924646 +382 31 0 days 00:00:00.478161500 +382 32 0 days 00:00:00.804700228 +382 33 0 days 00:00:00.682560706 +382 34 0 days 00:00:00.818558000 +382 35 0 days 00:00:00.682255053 +382 36 0 days 00:00:00.518811091 +382 37 0 days 00:00:00.528385675 +382 38 0 days 00:00:00.793240373 +382 39 0 days 00:00:00.522336940 +382 40 0 days 00:00:00.497380612 +382 41 0 days 00:00:01.644394264 +382 42 0 days 00:00:01.066470295 +382 43 0 days 00:00:00.499081206 +382 44 0 days 00:00:00.879932612 +382 45 0 days 00:00:01.651126695 +382 46 0 days 00:00:01.580342935 +382 47 0 days 00:00:00.739574026 +382 48 0 days 00:00:01.064256110 +382 49 0 days 00:00:00.501935188 +382 50 0 days 00:00:01.541722080 +382 51 0 days 00:00:00.924755776 +382 52 0 days 00:00:00.825972140 +382 53 0 days 00:00:00.951423945 +382 54 0 days 00:00:00.286922570 +382 55 0 days 00:00:00.370684560 +382 56 0 days 00:00:01.611824640 +382 57 0 days 00:00:01.374816713 +382 58 0 days 00:00:00.469485950 +382 59 0 days 00:00:01.500197033 +382 60 0 days 00:00:01.559628410 +382 61 0 days 00:00:00.888607400 +382 62 0 days 00:00:01.837863904 +382 63 0 days 00:00:00.884428786 +382 64 0 days 00:00:00.804205415 +382 65 0 days 00:00:00.844419210 +382 66 0 days 00:00:00.894452506 +382 67 0 days 00:00:00.833339820 +382 68 0 days 00:00:01.637488605 +382 69 0 days 00:00:00.567570932 +382 70 0 days 00:00:01.511194206 +382 71 0 days 00:00:00.668772283 +382 72 0 days 00:00:00.843059686 +382 73 0 days 00:00:00.513921606 +382 74 0 days 00:00:00.453104593 +382 75 0 days 00:00:00.559864340 +382 76 0 days 00:00:01.734016000 +382 77 0 days 00:00:00.699431270 +382 78 0 days 00:00:00.472856712 +382 79 0 days 00:00:00.646406412 +382 80 0 days 00:00:00.857355232 +382 81 0 days 00:00:00.647172340 +382 82 0 days 00:00:01.488908613 +382 83 0 days 00:00:00.825808992 +382 84 0 days 00:00:00.505349452 +382 85 0 days 00:00:00.627935556 +382 86 0 days 00:00:01.342210273 +382 87 0 days 00:00:00.472461145 +382 88 0 days 00:00:00.509590993 +382 89 0 days 00:00:00.413981113 +382 90 0 days 00:00:00.705791112 +382 91 0 days 00:00:00.436364265 +382 92 0 days 00:00:00.483392053 +382 93 0 days 00:00:00.776041720 +382 94 0 days 00:00:00.872333872 +382 95 0 days 00:00:01.376155293 +382 96 0 days 00:00:00.766552280 +382 97 0 days 00:00:01.976355982 +382 98 0 days 00:00:00.721334280 +382 99 0 days 00:00:00.822795385 +382 100 0 days 00:00:00.983113760 +383 1 0 days 00:00:00.195389593 +383 2 0 days 00:00:00.352398566 +383 3 0 days 00:00:00.257265033 +383 4 0 days 00:00:00.659390900 +383 5 0 days 00:00:00.348758273 +383 6 0 days 00:00:00.349562926 +383 7 0 days 00:00:00.354933613 +383 8 0 days 00:00:00.381654933 +383 9 0 days 00:00:00.425507286 +383 10 0 days 00:00:00.694736353 +383 11 0 days 00:00:00.126080633 +383 12 0 days 00:00:00.371191800 +383 13 0 days 00:00:00.246040553 +383 14 0 days 00:00:00.355915340 +383 15 0 days 00:00:00.687808120 +383 16 0 days 00:00:00.409179266 +383 17 0 days 00:00:00.413826200 +383 18 0 days 00:00:00.685854973 +383 19 0 days 00:00:00.682413780 +383 20 0 days 00:00:00.203291093 +383 21 0 days 00:00:00.672698526 +383 22 0 days 00:00:00.667498626 +383 23 0 days 00:00:00.200804886 +383 24 0 days 00:00:00.220089406 +383 25 0 days 00:00:00.207628240 +383 26 0 days 00:00:00.260677293 +383 27 0 days 00:00:00.836085566 +383 28 0 days 00:00:00.556708800 +383 29 0 days 00:00:00.566513673 +383 30 0 days 00:00:00.493121893 +383 31 0 days 00:00:01.085754806 +383 32 0 days 00:00:00.327721100 +383 33 0 days 00:00:00.690833593 +383 34 0 days 00:00:00.976456320 +383 35 0 days 00:00:00.316689860 +383 36 0 days 00:00:00.552046133 +383 37 0 days 00:00:00.714201546 +383 38 0 days 00:00:00.672952660 +383 39 0 days 00:00:00.392699053 +383 40 0 days 00:00:00.195362573 +383 41 0 days 00:00:00.687078613 +383 42 0 days 00:00:00.272766973 +383 43 0 days 00:00:00.673124840 +383 44 0 days 00:00:00.671787620 +383 45 0 days 00:00:00.444798893 +383 46 0 days 00:00:00.363703840 +383 47 0 days 00:00:00.356271360 +383 48 0 days 00:00:00.366854093 +383 49 0 days 00:00:00.692242740 +383 50 0 days 00:00:00.670423646 +383 51 0 days 00:00:00.674363740 +383 52 0 days 00:00:00.227970586 +383 53 0 days 00:00:00.302963260 +383 54 0 days 00:00:00.365956706 +383 55 0 days 00:00:00.380304520 +383 56 0 days 00:00:00.363448586 +383 57 0 days 00:00:00.212184946 +383 58 0 days 00:00:00.198141806 +383 59 0 days 00:00:00.206089913 +383 60 0 days 00:00:00.206400120 +383 61 0 days 00:00:00.277313753 +383 62 0 days 00:00:00.673552493 +383 63 0 days 00:00:00.686028360 +383 64 0 days 00:00:00.358005826 +383 65 0 days 00:00:00.252224786 +383 66 0 days 00:00:00.362543973 +383 67 0 days 00:00:00.202390440 +383 68 0 days 00:00:00.355356633 +383 69 0 days 00:00:00.239179133 +383 70 0 days 00:00:00.669327500 +383 71 0 days 00:00:00.358633433 +383 72 0 days 00:00:00.255323753 +383 73 0 days 00:00:00.383600053 +383 74 0 days 00:00:00.728012266 +383 75 0 days 00:00:00.207710873 +383 76 0 days 00:00:00.760447160 +383 77 0 days 00:00:00.404362026 +383 78 0 days 00:00:00.416032366 +383 79 0 days 00:00:00.425152720 +383 80 0 days 00:00:00.829787106 +383 81 0 days 00:00:00.718208033 +383 82 0 days 00:00:00.393501373 +383 83 0 days 00:00:00.795304306 +383 84 0 days 00:00:00.472897760 +383 85 0 days 00:00:00.243358133 +383 86 0 days 00:00:00.463755773 +383 87 0 days 00:00:00.345755020 +383 88 0 days 00:00:00.326815120 +383 89 0 days 00:00:00.257367533 +383 90 0 days 00:00:00.551646820 +383 91 0 days 00:00:00.347781520 +383 92 0 days 00:00:00.868369400 +383 93 0 days 00:00:00.450587573 +383 94 0 days 00:00:00.214180513 +383 95 0 days 00:00:00.378292066 +383 96 0 days 00:00:00.460936733 +383 97 0 days 00:00:00.277725606 +383 98 0 days 00:00:00.431328786 +383 99 0 days 00:00:00.697383160 +383 100 0 days 00:00:00.379377946 +384 1 0 days 00:00:00.751064760 +384 2 0 days 00:00:00.331419688 +384 3 0 days 00:00:00.404114290 +384 4 0 days 00:00:00.682514766 +384 5 0 days 00:00:00.304500800 +384 6 0 days 00:00:00.366231453 +384 7 0 days 00:00:00.478796293 +384 8 0 days 00:00:00.314870685 +384 9 0 days 00:00:00.235844460 +384 10 0 days 00:00:00.757713670 +384 11 0 days 00:00:00.416625452 +384 12 0 days 00:00:00.377201746 +384 13 0 days 00:00:00.474857706 +384 14 0 days 00:00:00.426387150 +384 15 0 days 00:00:00.256949413 +384 16 0 days 00:00:00.238529292 +384 17 0 days 00:00:00.684202946 +384 18 0 days 00:00:00.442061770 +384 19 0 days 00:00:00.246496065 +384 20 0 days 00:00:00.680591166 +384 21 0 days 00:00:00.454540253 +384 22 0 days 00:00:00.737530746 +384 23 0 days 00:00:00.305172580 +384 24 0 days 00:00:00.292675010 +384 25 0 days 00:00:00.458073483 +384 26 0 days 00:00:00.243514425 +384 27 0 days 00:00:00.788039206 +384 28 0 days 00:00:00.884756073 +384 29 0 days 00:00:00.340479330 +384 30 0 days 00:00:00.790682744 +384 31 0 days 00:00:00.311677628 +384 32 0 days 00:00:00.441306775 +384 33 0 days 00:00:00.367877153 +384 34 0 days 00:00:00.402488305 +384 35 0 days 00:00:00.870077272 +384 36 0 days 00:00:00.525967115 +384 37 0 days 00:00:00.390587746 +384 38 0 days 00:00:00.460782055 +384 39 0 days 00:00:00.374833524 +384 40 0 days 00:00:00.280622600 +384 41 0 days 00:00:00.197234173 +384 42 0 days 00:00:00.390009620 +384 43 0 days 00:00:00.912366192 +384 44 0 days 00:00:00.809993745 +384 45 0 days 00:00:00.477115150 +384 46 0 days 00:00:00.242409705 +384 47 0 days 00:00:00.266374784 +384 48 0 days 00:00:00.803264240 +384 49 0 days 00:00:00.234680740 +384 50 0 days 00:00:00.451929075 +384 51 0 days 00:00:00.465610745 +384 52 0 days 00:00:00.395521960 +384 53 0 days 00:00:00.492423584 +384 54 0 days 00:00:00.801882625 +384 55 0 days 00:00:00.465124840 +384 56 0 days 00:00:00.354518106 +384 57 0 days 00:00:00.225917820 +384 58 0 days 00:00:00.283807683 +384 59 0 days 00:00:00.773079773 +384 60 0 days 00:00:00.509595385 +384 61 0 days 00:00:00.802827320 +384 62 0 days 00:00:00.554971590 +384 63 0 days 00:00:00.292434265 +384 64 0 days 00:00:00.587740880 +384 65 0 days 00:00:00.692706966 +384 66 0 days 00:00:00.276367450 +384 67 0 days 00:00:00.260943684 +384 68 0 days 00:00:00.698239593 +384 69 0 days 00:00:00.363824554 +384 70 0 days 00:00:00.446407055 +384 71 0 days 00:00:00.442452786 +384 72 0 days 00:00:00.262515025 +384 73 0 days 00:00:00.290196820 +384 74 0 days 00:00:00.888267104 +384 75 0 days 00:00:00.466562116 +384 76 0 days 00:00:00.815381075 +384 77 0 days 00:00:00.227068775 +384 78 0 days 00:00:00.793127910 +384 79 0 days 00:00:00.844663928 +384 80 0 days 00:00:00.368846986 +384 81 0 days 00:00:00.253154295 +384 82 0 days 00:00:00.364057693 +384 83 0 days 00:00:00.300506596 +384 84 0 days 00:00:00.268011980 +384 85 0 days 00:00:00.789763573 +384 86 0 days 00:00:00.368975466 +384 87 0 days 00:00:00.431622076 +384 88 0 days 00:00:00.738747833 +384 89 0 days 00:00:00.274501740 +384 90 0 days 00:00:00.156965448 +384 91 0 days 00:00:00.692525186 +384 92 0 days 00:00:00.251800020 +384 93 0 days 00:00:00.273305052 +384 94 0 days 00:00:00.284543735 +384 95 0 days 00:00:00.315322251 +384 96 0 days 00:00:00.488606076 +384 97 0 days 00:00:00.285739352 +384 98 0 days 00:00:00.770515720 +384 99 0 days 00:00:00.250143916 +384 100 0 days 00:00:00.321607380 +385 1 0 days 00:00:00.777040675 +385 2 0 days 00:00:01.358228353 +385 3 0 days 00:00:00.892760412 +385 4 0 days 00:00:00.854449928 +385 5 0 days 00:00:00.506328252 +385 6 0 days 00:00:00.528728393 +385 7 0 days 00:00:00.463315408 +385 8 0 days 00:00:00.955457697 +385 9 0 days 00:00:00.914927796 +385 10 0 days 00:00:00.444522956 +385 11 0 days 00:00:01.788394352 +385 12 0 days 00:00:00.467607624 +385 13 0 days 00:00:00.484579590 +385 14 0 days 00:00:00.568018556 +385 15 0 days 00:00:00.510452395 +385 16 0 days 00:00:00.907391488 +385 17 0 days 00:00:00.990436342 +385 18 0 days 00:00:01.975421702 +385 19 0 days 00:00:00.510883288 +385 20 0 days 00:00:01.610095360 +385 21 0 days 00:00:00.605961250 +385 22 0 days 00:00:00.457810205 +385 23 0 days 00:00:00.842143136 +385 24 0 days 00:00:00.580192360 +385 25 0 days 00:00:00.526872856 +385 26 0 days 00:00:01.830137190 +385 27 0 days 00:00:01.537567695 +385 28 0 days 00:00:00.560369564 +385 29 0 days 00:00:00.881789333 +385 30 0 days 00:00:00.944240418 +385 31 0 days 00:00:00.453087284 +385 32 0 days 00:00:00.426382895 +385 33 0 days 00:00:00.856141605 +385 34 0 days 00:00:00.971924917 +385 35 0 days 00:00:01.589026555 +385 36 0 days 00:00:00.742864940 +385 37 0 days 00:00:00.902563682 +385 38 0 days 00:00:01.747796140 +385 39 0 days 00:00:00.682692842 +385 40 0 days 00:00:00.921437690 +385 41 0 days 00:00:01.654119132 +385 42 0 days 00:00:01.557090265 +385 43 0 days 00:00:01.459683450 +385 44 0 days 00:00:00.562159295 +385 45 0 days 00:00:01.041773924 +385 46 0 days 00:00:00.835275045 +385 47 0 days 00:00:00.468245875 +385 48 0 days 00:00:01.702594944 +385 49 0 days 00:00:01.873638755 +385 50 0 days 00:00:00.914449696 +385 51 0 days 00:00:00.869334273 +385 52 0 days 00:00:01.590968660 +385 53 0 days 00:00:00.825239755 +385 54 0 days 00:00:00.492733024 +385 55 0 days 00:00:00.844099030 +385 56 0 days 00:00:00.884917525 +385 57 0 days 00:00:01.764901765 +385 58 0 days 00:00:00.843257226 +385 59 0 days 00:00:00.986464405 +385 60 0 days 00:00:00.788657325 +385 61 0 days 00:00:01.628200580 +385 62 0 days 00:00:01.093616570 +385 63 0 days 00:00:01.548984375 +385 64 0 days 00:00:01.766422553 +385 65 0 days 00:00:01.815156262 +385 66 0 days 00:00:01.084274713 +385 67 0 days 00:00:01.442836260 +385 68 0 days 00:00:00.434736470 +385 69 0 days 00:00:00.995858605 +385 70 0 days 00:00:00.916198560 +385 71 0 days 00:00:00.479793810 +385 72 0 days 00:00:01.484783015 +385 73 0 days 00:00:00.852838945 +385 74 0 days 00:00:00.909374400 +385 75 0 days 00:00:00.871527992 +385 76 0 days 00:00:01.688219925 +385 77 0 days 00:00:01.595052970 +385 78 0 days 00:00:01.088093040 +385 79 0 days 00:00:00.492509110 +385 80 0 days 00:00:00.571804234 +385 81 0 days 00:00:01.908751132 +385 82 0 days 00:00:00.856742343 +385 83 0 days 00:00:00.509687792 +385 84 0 days 00:00:01.692191972 +385 85 0 days 00:00:00.512378590 +385 86 0 days 00:00:01.816058860 +385 87 0 days 00:00:02.059092509 +385 88 0 days 00:00:00.898928085 +385 89 0 days 00:00:00.506874522 +385 90 0 days 00:00:00.618674808 +385 91 0 days 00:00:00.476315080 +385 92 0 days 00:00:01.838838856 +385 93 0 days 00:00:00.600212615 +385 94 0 days 00:00:00.609708732 +385 95 0 days 00:00:00.802116812 +385 96 0 days 00:00:02.049386764 +385 97 0 days 00:00:00.854783800 +385 98 0 days 00:00:01.054991908 +385 99 0 days 00:00:01.479867200 +385 100 0 days 00:00:01.105784365 +386 1 0 days 00:00:00.362744102 +386 2 0 days 00:00:00.199206813 +386 3 0 days 00:00:00.196034490 +386 4 0 days 00:00:00.426104254 +386 5 0 days 00:00:00.210705948 +386 6 0 days 00:00:00.394378692 +386 7 0 days 00:00:00.772519240 +386 8 0 days 00:00:00.554230173 +386 9 0 days 00:00:00.427972511 +386 10 0 days 00:00:00.623733260 +386 11 0 days 00:00:00.656956482 +386 12 0 days 00:00:00.207321740 +386 13 0 days 00:00:00.668690848 +386 14 0 days 00:00:00.183743184 +386 15 0 days 00:00:00.321704735 +386 16 0 days 00:00:00.331222792 +386 17 0 days 00:00:00.642664260 +386 18 0 days 00:00:00.232583840 +386 19 0 days 00:00:00.696655582 +386 20 0 days 00:00:00.207380853 +386 21 0 days 00:00:00.691600112 +386 22 0 days 00:00:00.671053500 +386 23 0 days 00:00:00.367076440 +386 24 0 days 00:00:00.613330115 +386 25 0 days 00:00:00.691587110 +386 26 0 days 00:00:00.194547864 +386 27 0 days 00:00:00.250956949 +386 28 0 days 00:00:00.235559957 +386 29 0 days 00:00:00.371261993 +386 30 0 days 00:00:00.227697571 +386 31 0 days 00:00:00.205658857 +386 32 0 days 00:00:00.409870927 +386 33 0 days 00:00:00.405219570 +386 34 0 days 00:00:00.257809097 +386 35 0 days 00:00:00.406675820 +386 36 0 days 00:00:00.757149930 +386 37 0 days 00:00:00.204818155 +386 38 0 days 00:00:00.643690370 +386 39 0 days 00:00:00.773122022 +386 40 0 days 00:00:00.246154810 +386 41 0 days 00:00:00.385046371 +386 42 0 days 00:00:00.679822456 +386 43 0 days 00:00:00.271092543 +386 44 0 days 00:00:00.767300158 +386 45 0 days 00:00:00.399962808 +386 46 0 days 00:00:00.224180625 +386 47 0 days 00:00:00.374455990 +386 48 0 days 00:00:00.606124440 +386 49 0 days 00:00:00.179260110 +386 50 0 days 00:00:00.651642125 +386 51 0 days 00:00:00.722864583 +386 52 0 days 00:00:00.672430326 +386 53 0 days 00:00:00.411724380 +386 54 0 days 00:00:00.623565068 +386 55 0 days 00:00:00.672015170 +386 56 0 days 00:00:00.555326513 +386 57 0 days 00:00:00.272332831 +386 58 0 days 00:00:00.618427225 +386 59 0 days 00:00:00.417247888 +386 60 0 days 00:00:00.430972085 +386 61 0 days 00:00:00.698620540 +386 62 0 days 00:00:00.391766782 +386 63 0 days 00:00:00.186899965 +386 64 0 days 00:00:00.218749096 +386 65 0 days 00:00:00.421966614 +386 66 0 days 00:00:00.194322405 +386 67 0 days 00:00:00.211609607 +386 68 0 days 00:00:00.239628988 +386 69 0 days 00:00:00.195883696 +386 70 0 days 00:00:00.373308691 +386 71 0 days 00:00:00.724806340 +386 72 0 days 00:00:00.224183310 +386 73 0 days 00:00:00.596660540 +386 74 0 days 00:00:00.558009060 +386 75 0 days 00:00:00.201852500 +386 76 0 days 00:00:00.676885757 +386 77 0 days 00:00:00.574383475 +386 78 0 days 00:00:00.511433048 +386 79 0 days 00:00:00.660695085 +386 80 0 days 00:00:00.212593350 +386 81 0 days 00:00:00.206424271 +386 82 0 days 00:00:00.770731703 +386 83 0 days 00:00:00.581263280 +386 84 0 days 00:00:00.663612920 +386 85 0 days 00:00:00.446062934 +386 86 0 days 00:00:00.447898984 +386 87 0 days 00:00:00.220799905 +386 88 0 days 00:00:00.199952432 +386 89 0 days 00:00:00.199250186 +386 90 0 days 00:00:00.357234170 +386 91 0 days 00:00:00.183753732 +386 92 0 days 00:00:00.235129025 +386 93 0 days 00:00:00.366881165 +386 94 0 days 00:00:00.390223025 +386 95 0 days 00:00:00.696898322 +386 96 0 days 00:00:00.595514880 +386 97 0 days 00:00:00.693906436 +386 98 0 days 00:00:00.174702930 +386 99 0 days 00:00:00.388691370 +386 100 0 days 00:00:00.400476485 +387 1 0 days 00:00:00.452286485 +387 2 0 days 00:00:00.857652260 +387 3 0 days 00:00:01.070490440 +387 4 0 days 00:00:00.445981700 +387 5 0 days 00:00:00.764765000 +387 6 0 days 00:00:00.578709860 +387 7 0 days 00:00:00.750112028 +387 8 0 days 00:00:00.713720053 +387 9 0 days 00:00:00.459510008 +387 10 0 days 00:00:01.100162353 +387 11 0 days 00:00:00.592101386 +387 12 0 days 00:00:00.664471665 +387 13 0 days 00:00:00.476273817 +387 14 0 days 00:00:01.085243846 +387 15 0 days 00:00:00.643226175 +387 16 0 days 00:00:01.289308396 +387 17 0 days 00:00:00.569009864 +387 18 0 days 00:00:00.421024453 +387 19 0 days 00:00:00.846594420 +387 20 0 days 00:00:01.348953356 +387 21 0 days 00:00:00.433931688 +387 22 0 days 00:00:01.440676990 +387 23 0 days 00:00:00.668031630 +387 24 0 days 00:00:00.510833460 +387 25 0 days 00:00:01.077812746 +387 26 0 days 00:00:01.202933440 +387 27 0 days 00:00:01.412838231 +387 28 0 days 00:00:01.075677340 +387 29 0 days 00:00:00.645371760 +387 30 0 days 00:00:00.617861700 +387 31 0 days 00:00:01.295173616 +387 32 0 days 00:00:00.593144450 +387 33 0 days 00:00:00.592949013 +387 34 0 days 00:00:01.137053120 +387 35 0 days 00:00:00.528480740 +387 36 0 days 00:00:00.339102195 +387 37 0 days 00:00:00.999817200 +387 38 0 days 00:00:00.536894933 +387 39 0 days 00:00:00.385340736 +387 40 0 days 00:00:01.310130557 +387 41 0 days 00:00:00.667456826 +387 42 0 days 00:00:00.515575753 +387 43 0 days 00:00:00.380547588 +387 44 0 days 00:00:00.523022283 +387 45 0 days 00:00:00.993941466 +387 46 0 days 00:00:00.358515588 +387 47 0 days 00:00:00.654259555 +387 48 0 days 00:00:00.741739360 +387 49 0 days 00:00:00.342082000 +387 50 0 days 00:00:01.218081903 +387 51 0 days 00:00:00.391089860 +387 52 0 days 00:00:00.341121335 +387 53 0 days 00:00:00.651224965 +387 54 0 days 00:00:00.641032105 +387 55 0 days 00:00:00.597323530 +387 56 0 days 00:00:01.063230966 +387 57 0 days 00:00:00.540481360 +387 58 0 days 00:00:00.495157340 +387 59 0 days 00:00:00.684796811 +387 60 0 days 00:00:00.623523065 +387 61 0 days 00:00:00.585166800 +387 62 0 days 00:00:00.398385980 +387 63 0 days 00:00:00.538791232 +387 64 0 days 00:00:00.604146000 +387 65 0 days 00:00:00.656335116 +387 66 0 days 00:00:00.600151095 +387 67 0 days 00:00:01.001756700 +387 68 0 days 00:00:00.419381252 +387 69 0 days 00:00:00.340423700 +387 70 0 days 00:00:00.419795436 +387 71 0 days 00:00:00.639287280 +387 72 0 days 00:00:01.003231100 +387 73 0 days 00:00:00.372300716 +387 74 0 days 00:00:00.629265550 +387 75 0 days 00:00:00.619091400 +387 76 0 days 00:00:00.553046033 +387 77 0 days 00:00:00.701366620 +387 78 0 days 00:00:01.332683822 +387 79 0 days 00:00:00.592004080 +387 80 0 days 00:00:00.751695936 +387 81 0 days 00:00:00.602758835 +387 82 0 days 00:00:00.336825365 +387 83 0 days 00:00:01.123740865 +387 84 0 days 00:00:01.169286564 +387 85 0 days 00:00:00.831807760 +387 86 0 days 00:00:00.648590320 +387 87 0 days 00:00:01.005951246 +387 88 0 days 00:00:00.379498260 +387 89 0 days 00:00:00.614868720 +387 90 0 days 00:00:01.008517720 +387 91 0 days 00:00:00.355380270 +387 92 0 days 00:00:00.686339680 +387 93 0 days 00:00:01.181050528 +387 94 0 days 00:00:00.451010305 +387 95 0 days 00:00:00.584678860 +387 96 0 days 00:00:01.057876913 +387 97 0 days 00:00:01.042705913 +387 98 0 days 00:00:01.175158840 +387 99 0 days 00:00:00.370256845 +387 100 0 days 00:00:00.355978755 +388 1 0 days 00:00:00.212993217 +388 2 0 days 00:00:00.348413524 +388 3 0 days 00:00:00.288833906 +388 4 0 days 00:00:00.288917340 +388 5 0 days 00:00:00.206685830 +388 6 0 days 00:00:00.325468510 +388 7 0 days 00:00:00.440643913 +388 8 0 days 00:00:00.270859273 +388 9 0 days 00:00:00.548832153 +388 10 0 days 00:00:00.264945376 +388 11 0 days 00:00:00.365746897 +388 12 0 days 00:00:00.294534293 +388 13 0 days 00:00:00.349161783 +388 14 0 days 00:00:00.501656466 +388 15 0 days 00:00:00.173617075 +388 16 0 days 00:00:00.298649870 +388 17 0 days 00:00:00.504418320 +388 18 0 days 00:00:00.195339131 +388 19 0 days 00:00:00.558386630 +388 20 0 days 00:00:00.496932460 +388 21 0 days 00:00:00.335980656 +388 22 0 days 00:00:00.203556751 +388 23 0 days 00:00:00.215073000 +388 24 0 days 00:00:00.330255535 +388 25 0 days 00:00:00.211557697 +388 26 0 days 00:00:00.196167582 +388 27 0 days 00:00:00.171755125 +388 28 0 days 00:00:00.176946780 +388 29 0 days 00:00:00.330374808 +388 30 0 days 00:00:00.255010083 +388 31 0 days 00:00:00.320422264 +388 32 0 days 00:00:00.202465644 +388 33 0 days 00:00:00.349026724 +388 34 0 days 00:00:00.285444386 +388 35 0 days 00:00:00.196118645 +388 36 0 days 00:00:00.323831735 +388 37 0 days 00:00:00.194701164 +388 38 0 days 00:00:00.279020013 +388 39 0 days 00:00:00.293998566 +388 40 0 days 00:00:00.221026026 +388 41 0 days 00:00:00.374660696 +388 42 0 days 00:00:00.565549830 +388 43 0 days 00:00:00.501958993 +388 44 0 days 00:00:00.189745010 +388 45 0 days 00:00:00.567764140 +388 46 0 days 00:00:00.336997496 +388 47 0 days 00:00:00.267552292 +388 48 0 days 00:00:00.337868416 +388 49 0 days 00:00:00.189387826 +388 50 0 days 00:00:00.308727525 +388 51 0 days 00:00:00.329067288 +388 52 0 days 00:00:00.563959650 +388 53 0 days 00:00:00.510381700 +388 54 0 days 00:00:00.240705015 +388 55 0 days 00:00:00.508028233 +388 56 0 days 00:00:00.501923153 +388 57 0 days 00:00:00.327358172 +388 58 0 days 00:00:00.178242390 +388 59 0 days 00:00:00.306385300 +388 60 0 days 00:00:00.511526773 +388 61 0 days 00:00:00.306715885 +388 62 0 days 00:00:00.563044735 +388 63 0 days 00:00:00.181305044 +388 64 0 days 00:00:00.239558140 +388 65 0 days 00:00:00.274784293 +388 66 0 days 00:00:00.192500773 +388 67 0 days 00:00:00.191078313 +388 68 0 days 00:00:00.303418455 +388 69 0 days 00:00:00.206293073 +388 70 0 days 00:00:00.200357795 +388 71 0 days 00:00:00.319936840 +388 72 0 days 00:00:00.309846250 +388 73 0 days 00:00:00.329175720 +388 74 0 days 00:00:00.598427940 +388 75 0 days 00:00:00.293416140 +388 76 0 days 00:00:00.561915760 +388 77 0 days 00:00:00.560283535 +388 78 0 days 00:00:00.254035690 +388 79 0 days 00:00:00.195955972 +388 80 0 days 00:00:00.221671410 +388 81 0 days 00:00:00.268678502 +388 82 0 days 00:00:00.282746666 +388 83 0 days 00:00:00.212908160 +388 84 0 days 00:00:00.182299872 +388 85 0 days 00:00:00.587976596 +388 86 0 days 00:00:00.261351276 +388 87 0 days 00:00:00.308982925 +388 88 0 days 00:00:00.190873353 +388 89 0 days 00:00:00.625407300 +388 90 0 days 00:00:00.503183580 +388 91 0 days 00:00:00.555024450 +388 92 0 days 00:00:00.190167028 +388 93 0 days 00:00:00.552959830 +388 94 0 days 00:00:00.309443705 +388 95 0 days 00:00:00.185300415 +388 96 0 days 00:00:00.272922920 +388 97 0 days 00:00:00.319440720 +388 98 0 days 00:00:00.315518306 +388 99 0 days 00:00:00.349116488 +388 100 0 days 00:00:00.282722467 +389 1 0 days 00:00:17.704296380 +389 2 0 days 00:00:05.561873150 +389 3 0 days 00:00:05.794726187 +389 4 0 days 00:00:11.576548128 +389 5 0 days 00:00:06.061575900 +389 6 0 days 00:00:16.158866595 +389 7 0 days 00:00:10.419400224 +389 8 0 days 00:00:10.587944886 +389 9 0 days 00:00:18.387088093 +389 10 0 days 00:00:06.143458565 +389 11 0 days 00:00:06.266779120 +389 12 0 days 00:00:12.433182111 +389 13 0 days 00:00:11.488424405 +389 14 0 days 00:00:06.262312292 +389 15 0 days 00:00:18.363223673 +389 16 0 days 00:00:07.738523356 +389 17 0 days 00:00:09.587166224 +389 18 0 days 00:00:06.283363691 +389 19 0 days 00:00:05.462250250 +389 20 0 days 00:00:05.694307593 +389 21 0 days 00:00:11.756557940 +389 22 0 days 00:00:09.696839920 +389 23 0 days 00:00:07.621011637 +389 24 0 days 00:00:18.505237970 +389 25 0 days 00:00:20.239936145 +389 26 0 days 00:00:08.446113228 +389 27 0 days 00:00:11.562373228 +389 28 0 days 00:00:17.789930105 +389 29 0 days 00:00:18.910562344 +390 1 0 days 00:00:03.125745684 +390 2 0 days 00:00:04.916855826 +390 3 0 days 00:00:10.870079580 +390 4 0 days 00:00:05.160733828 +390 5 0 days 00:00:03.513720235 +390 6 0 days 00:00:05.368178108 +390 7 0 days 00:00:03.186569167 +390 8 0 days 00:00:05.034092416 +390 9 0 days 00:00:02.744288365 +390 10 0 days 00:00:03.365603830 +390 11 0 days 00:00:09.552375154 +390 12 0 days 00:00:06.182674490 +390 13 0 days 00:00:02.797605404 +390 14 0 days 00:00:09.437611940 +390 15 0 days 00:00:04.458473040 +390 16 0 days 00:00:03.034030634 +390 17 0 days 00:00:09.938393083 +390 18 0 days 00:00:10.144038046 +390 19 0 days 00:00:10.260227900 +390 20 0 days 00:00:03.017974690 +390 21 0 days 00:00:02.679182325 +390 22 0 days 00:00:04.328205250 +390 23 0 days 00:00:05.754650234 +390 24 0 days 00:00:05.273729132 +390 25 0 days 00:00:02.790435235 +390 26 0 days 00:00:04.869099740 +390 27 0 days 00:00:02.686898592 +390 28 0 days 00:00:06.504735897 +390 29 0 days 00:00:09.151451115 +390 30 0 days 00:00:08.704277524 +390 31 0 days 00:00:05.609827615 +390 32 0 days 00:00:09.443627040 +390 33 0 days 00:00:09.153378620 +390 34 0 days 00:00:08.521925375 +390 35 0 days 00:00:09.749468916 +390 36 0 days 00:00:05.762505475 +390 37 0 days 00:00:11.413550310 +390 38 0 days 00:00:08.781293716 +390 39 0 days 00:00:03.007836130 +390 40 0 days 00:00:03.411504385 +390 41 0 days 00:00:08.497077867 +390 42 0 days 00:00:05.563390907 +390 43 0 days 00:00:04.863192435 +390 44 0 days 00:00:02.880744386 +390 45 0 days 00:00:03.983578415 +391 1 0 days 00:00:09.428572376 +391 2 0 days 00:00:21.012678210 +391 3 0 days 00:00:23.495437871 +391 4 0 days 00:00:09.470358913 +391 5 0 days 00:00:05.872867682 +391 6 0 days 00:00:14.895997500 +391 7 0 days 00:00:19.174990422 +391 8 0 days 00:00:12.863475056 +391 9 0 days 00:00:10.445313446 +391 10 0 days 00:00:20.174791608 +391 11 0 days 00:00:16.857547120 +391 12 0 days 00:00:11.545545332 +391 13 0 days 00:00:07.398401414 +391 14 0 days 00:00:07.633107735 +391 15 0 days 00:00:17.186886924 +391 16 0 days 00:00:23.892025483 +391 17 0 days 00:00:22.597825400 +392 1 0 days 00:00:03.609694900 +392 2 0 days 00:00:12.718459917 +392 3 0 days 00:00:07.770276390 +392 4 0 days 00:00:07.428369104 +392 5 0 days 00:00:06.136594280 +392 6 0 days 00:00:07.833412018 +392 7 0 days 00:00:03.378324967 +392 8 0 days 00:00:07.182933145 +392 9 0 days 00:00:12.741973726 +392 10 0 days 00:00:03.331185121 +392 11 0 days 00:00:03.187884572 +392 12 0 days 00:00:03.730170542 +392 13 0 days 00:00:03.460331424 +392 14 0 days 00:00:03.388659823 +392 15 0 days 00:00:03.215942282 +392 16 0 days 00:00:03.057085681 +392 17 0 days 00:00:06.257638006 +392 18 0 days 00:00:03.242756418 +392 19 0 days 00:00:08.185318290 +392 20 0 days 00:00:06.768393640 +392 21 0 days 00:00:10.883973560 +392 22 0 days 00:00:05.726038185 +392 23 0 days 00:00:05.209437760 +392 24 0 days 00:00:11.609623185 +392 25 0 days 00:00:05.472535627 +392 26 0 days 00:00:03.119345727 +392 27 0 days 00:00:04.831216547 +392 28 0 days 00:00:06.161690286 +392 29 0 days 00:00:05.592512647 +392 30 0 days 00:00:06.442991210 +392 31 0 days 00:00:03.135930086 +392 32 0 days 00:00:09.315351092 +392 33 0 days 00:00:07.659445793 +393 1 0 days 00:00:07.539816353 +393 2 0 days 00:00:06.447702566 +393 3 0 days 00:00:17.406972006 +393 4 0 days 00:00:04.060173046 +393 5 0 days 00:00:04.275550613 +393 6 0 days 00:00:08.657010240 +393 7 0 days 00:00:06.605848366 +393 8 0 days 00:00:09.600983560 +393 9 0 days 00:00:08.472054453 +393 10 0 days 00:00:08.276287626 +393 11 0 days 00:00:15.996575186 +393 12 0 days 00:00:09.480135840 +393 13 0 days 00:00:05.201336773 +393 14 0 days 00:00:15.550330720 +393 15 0 days 00:00:04.291618633 +393 16 0 days 00:00:15.673158980 +393 17 0 days 00:00:15.311757286 +393 18 0 days 00:00:07.722800573 +393 19 0 days 00:00:08.663022853 +393 20 0 days 00:00:07.554584626 +393 21 0 days 00:00:04.766848273 +393 22 0 days 00:00:04.761807266 +393 23 0 days 00:00:15.986261473 +393 24 0 days 00:00:05.261409713 +393 25 0 days 00:00:04.735644993 +393 26 0 days 00:00:09.880729426 +393 27 0 days 00:00:07.499638373 +393 28 0 days 00:00:17.432096380 +393 29 0 days 00:00:08.624180553 +393 30 0 days 00:00:07.695753286 +393 31 0 days 00:00:08.330395786 +393 32 0 days 00:00:07.540064240 +393 33 0 days 00:00:15.255120293 +393 34 0 days 00:00:09.379858193 +393 35 0 days 00:00:08.459877433 +393 36 0 days 00:00:08.448163513 +393 37 0 days 00:00:09.684604473 +393 38 0 days 00:00:04.351152806 +393 39 0 days 00:00:06.592424106 +393 40 0 days 00:00:15.160746706 +393 41 0 days 00:00:06.653183373 +393 42 0 days 00:00:04.327209720 +393 43 0 days 00:00:08.593530313 +393 44 0 days 00:00:04.796467526 +393 45 0 days 00:00:13.912018106 +393 46 0 days 00:00:05.974792406 +393 47 0 days 00:00:17.500470813 +393 48 0 days 00:00:07.541955213 +393 49 0 days 00:00:17.591968533 +393 50 0 days 00:00:15.875891166 +393 51 0 days 00:00:14.130064226 +393 52 0 days 00:00:08.175100486 +393 53 0 days 00:00:08.336096106 +393 54 0 days 00:00:04.942280213 +393 55 0 days 00:00:05.404484706 +393 56 0 days 00:00:08.562063973 +393 57 0 days 00:00:05.340004940 +393 58 0 days 00:00:07.719470766 +393 59 0 days 00:00:08.263844640 +393 60 0 days 00:00:15.543202826 +393 61 0 days 00:00:15.621056420 +393 62 0 days 00:00:08.638303713 +394 1 0 days 00:00:11.119097735 +394 2 0 days 00:00:18.753151284 +394 3 0 days 00:00:18.138596840 +394 4 0 days 00:00:18.244237790 +394 5 0 days 00:00:18.004369840 +394 6 0 days 00:00:10.524805272 +394 7 0 days 00:00:18.489719180 +394 8 0 days 00:00:08.760503590 +394 9 0 days 00:00:05.659996450 +394 10 0 days 00:00:05.821957162 +394 11 0 days 00:00:09.510283288 +394 12 0 days 00:00:05.519291265 +394 13 0 days 00:00:19.608951180 +394 14 0 days 00:00:16.133100575 +394 15 0 days 00:00:05.770724424 +394 16 0 days 00:00:05.441821766 +394 17 0 days 00:00:05.841227732 +394 18 0 days 00:00:07.366513426 +394 19 0 days 00:00:16.621790415 +394 20 0 days 00:00:09.313542036 +394 21 0 days 00:00:07.490215171 +394 22 0 days 00:00:05.937621096 +394 23 0 days 00:00:06.544404383 +394 24 0 days 00:00:07.868760075 +394 25 0 days 00:00:10.239972456 +394 26 0 days 00:00:06.028469764 +394 27 0 days 00:00:08.241747136 +394 28 0 days 00:00:09.917954425 +394 29 0 days 00:00:11.578738304 +394 30 0 days 00:00:08.876884120 +394 31 0 days 00:00:18.128332755 +394 32 0 days 00:00:17.740620915 +394 33 0 days 00:00:21.710131116 +395 1 0 days 00:00:05.035362500 +395 2 0 days 00:00:07.185384726 +395 3 0 days 00:00:02.811739960 +395 4 0 days 00:00:02.594580246 +395 5 0 days 00:00:02.461671586 +395 6 0 days 00:00:07.225312453 +395 7 0 days 00:00:03.782756893 +395 8 0 days 00:00:08.057427273 +395 9 0 days 00:00:02.720646620 +395 10 0 days 00:00:02.426817706 +395 11 0 days 00:00:04.324641393 +395 12 0 days 00:00:09.095693946 +395 13 0 days 00:00:02.457106226 +395 14 0 days 00:00:02.580538213 +395 15 0 days 00:00:04.164757713 +395 16 0 days 00:00:02.421963546 +395 17 0 days 00:00:07.700813046 +395 18 0 days 00:00:04.326967426 +395 19 0 days 00:00:04.335203026 +395 20 0 days 00:00:04.370362773 +395 21 0 days 00:00:02.320945073 +395 22 0 days 00:00:07.015622180 +395 23 0 days 00:00:08.121741940 +395 24 0 days 00:00:04.194145580 +395 25 0 days 00:00:02.576510873 +395 26 0 days 00:00:05.058212400 +395 27 0 days 00:00:02.186364440 +395 28 0 days 00:00:07.704238600 +395 29 0 days 00:00:02.727563393 +395 30 0 days 00:00:03.287761153 +395 31 0 days 00:00:07.656057893 +395 32 0 days 00:00:03.981374006 +395 33 0 days 00:00:03.129103180 +395 34 0 days 00:00:02.679618906 +395 35 0 days 00:00:08.169070626 +395 36 0 days 00:00:07.137595300 +395 37 0 days 00:00:07.707195020 +395 38 0 days 00:00:08.904040680 +395 39 0 days 00:00:02.560186240 +395 40 0 days 00:00:04.175420206 +395 41 0 days 00:00:08.113220093 +395 42 0 days 00:00:02.751301340 +395 43 0 days 00:00:07.709816700 +395 44 0 days 00:00:04.198990100 +395 45 0 days 00:00:07.327807840 +395 46 0 days 00:00:09.096459113 +395 47 0 days 00:00:02.402203393 +395 48 0 days 00:00:04.428229313 +395 49 0 days 00:00:03.801496020 +395 50 0 days 00:00:02.438014680 +395 51 0 days 00:00:02.574241466 +395 52 0 days 00:00:03.953193980 +395 53 0 days 00:00:08.980995306 +395 54 0 days 00:00:04.193188013 +395 55 0 days 00:00:03.477813493 +395 56 0 days 00:00:02.580435920 +395 57 0 days 00:00:04.941914100 +395 58 0 days 00:00:03.781684313 +395 59 0 days 00:00:02.426854600 +395 60 0 days 00:00:07.934902166 +395 61 0 days 00:00:03.470755033 +395 62 0 days 00:00:02.636078726 +395 63 0 days 00:00:04.361429113 +395 64 0 days 00:00:04.197875533 +395 65 0 days 00:00:02.396345386 +395 66 0 days 00:00:07.717316366 +395 67 0 days 00:00:07.015869053 +395 68 0 days 00:00:08.131463853 +395 69 0 days 00:00:07.925760886 +395 70 0 days 00:00:06.464579986 +395 71 0 days 00:00:08.119262060 +395 72 0 days 00:00:09.094953624 +395 73 0 days 00:00:07.931259393 +395 74 0 days 00:00:02.649791320 +395 75 0 days 00:00:04.196621666 +395 76 0 days 00:00:06.982142773 +395 77 0 days 00:00:02.328170040 +395 78 0 days 00:00:02.230978393 +395 79 0 days 00:00:03.265278206 +395 80 0 days 00:00:03.490377906 +395 81 0 days 00:00:04.197639973 +395 82 0 days 00:00:03.456342320 +395 83 0 days 00:00:08.167509266 +395 84 0 days 00:00:02.703386173 +395 85 0 days 00:00:05.063740220 +395 86 0 days 00:00:04.472080873 +395 87 0 days 00:00:04.371576546 +395 88 0 days 00:00:02.404452646 +395 89 0 days 00:00:02.813539480 +395 90 0 days 00:00:06.256843640 +395 91 0 days 00:00:02.210927093 +395 92 0 days 00:00:04.371650646 +395 93 0 days 00:00:07.664780193 +395 94 0 days 00:00:07.181648133 +395 95 0 days 00:00:04.360216286 +395 96 0 days 00:00:02.172971220 +395 97 0 days 00:00:02.220815100 +395 98 0 days 00:00:08.120121933 +395 99 0 days 00:00:05.324632506 +395 100 0 days 00:00:04.918889946 +396 1 0 days 00:00:08.971495196 +396 2 0 days 00:00:03.304476136 +396 3 0 days 00:00:08.942897725 +396 4 0 days 00:00:03.493904205 +396 5 0 days 00:00:10.227895580 +396 6 0 days 00:00:03.065288670 +396 7 0 days 00:00:09.172762080 +396 8 0 days 00:00:04.834386520 +396 9 0 days 00:00:08.472959200 +396 10 0 days 00:00:02.855208685 +396 11 0 days 00:00:10.216544285 +396 12 0 days 00:00:08.298660030 +396 13 0 days 00:00:04.816218835 +396 14 0 days 00:00:03.782225885 +396 15 0 days 00:00:05.408586946 +396 16 0 days 00:00:03.238172636 +396 17 0 days 00:00:08.164308350 +396 18 0 days 00:00:03.296233568 +396 19 0 days 00:00:04.402827235 +396 20 0 days 00:00:02.527633360 +396 21 0 days 00:00:08.048978555 +396 22 0 days 00:00:09.509938680 +396 23 0 days 00:00:08.897195725 +396 24 0 days 00:00:08.562906656 +396 25 0 days 00:00:02.548319900 +396 26 0 days 00:00:05.439276360 +396 27 0 days 00:00:03.000122285 +396 28 0 days 00:00:09.123736070 +396 29 0 days 00:00:06.365113673 +396 30 0 days 00:00:02.896885187 +396 31 0 days 00:00:09.451247140 +396 32 0 days 00:00:03.067894480 +396 33 0 days 00:00:04.674164556 +396 34 0 days 00:00:10.354196910 +396 35 0 days 00:00:05.329501276 +396 36 0 days 00:00:08.388530590 +396 37 0 days 00:00:10.263174765 +396 38 0 days 00:00:03.345324802 +396 39 0 days 00:00:09.358895945 +396 40 0 days 00:00:04.386721450 +396 41 0 days 00:00:04.965753475 +396 42 0 days 00:00:05.748129210 +396 43 0 days 00:00:08.468064725 +396 44 0 days 00:00:08.393069210 +396 45 0 days 00:00:05.713593485 +396 46 0 days 00:00:10.238214220 +396 47 0 days 00:00:02.641185545 +396 48 0 days 00:00:02.953349020 +396 49 0 days 00:00:03.134929432 +396 50 0 days 00:00:08.976841535 +396 51 0 days 00:00:06.604069100 +396 52 0 days 00:00:09.294504395 +396 53 0 days 00:00:02.532960705 +396 54 0 days 00:00:02.961172095 +396 55 0 days 00:00:03.168108355 +396 56 0 days 00:00:05.466721085 +396 57 0 days 00:00:09.403438775 +396 58 0 days 00:00:02.803038305 +396 59 0 days 00:00:08.163811585 +396 60 0 days 00:00:03.053758015 +396 61 0 days 00:00:02.915451600 +396 62 0 days 00:00:05.437091677 +397 1 0 days 00:01:22.342505026 +397 2 0 days 00:04:43.087530236 +398 1 0 days 00:00:49.746753477 +398 2 0 days 00:00:50.374361988 +398 3 0 days 00:01:37.127112296 +399 1 0 days 00:03:51.358898000 +399 2 0 days 00:03:50.359330540 +399 3 0 days 00:01:55.628010840 +400 1 0 days 00:00:38.057446773 +400 2 0 days 00:02:19.260639440 +400 3 0 days 00:02:19.360410053 +400 4 0 days 00:02:19.578561353 +400 5 0 days 00:01:10.463019660 +400 6 0 days 00:00:37.965515620 +400 7 0 days 00:00:38.007072946 +401 1 0 days 00:01:26.400868990 +401 2 0 days 00:02:23.660940030 +402 1 0 days 00:00:38.152808260 +402 2 0 days 00:01:37.022191228 +402 3 0 days 00:02:19.986041880 +403 1 0 days 00:06:16.635085930 +403 2 0 days 00:04:10.983866823 +404 1 0 days 00:08:16.089925408 +405 1 0 days 00:01:11.085801120 +405 2 0 days 00:00:45.647680046 +405 3 0 days 00:07:12.093197946 +405 4 0 days 00:00:55.933905790 +406 1 0 days 00:01:03.480584313 +406 2 0 days 00:02:45.651530480 +406 3 0 days 00:05:33.220240940 +406 4 0 days 00:00:25.113189106 +407 1 0 days 00:12:59.944775556 +408 1 0 days 00:00:16.141228332 +408 2 0 days 00:02:17.152448166 +408 3 0 days 00:04:55.516112042 +409 2 0 days 00:00:48.791277566 +409 3 0 days 00:01:02.721543140 +409 4 0 days 00:01:04.387053602 +409 5 0 days 00:02:44.496855290 +409 7 0 days 00:01:08.267930409 +410 1 0 days 00:00:20.652559980 +410 2 0 days 00:00:42.694214466 +410 3 0 days 00:03:44.699318093 +410 4 0 days 00:00:17.068580705 +410 5 0 days 00:02:03.863479210 +410 6 0 days 00:00:29.780484411 +410 7 0 days 00:00:32.975873868 +411 1 0 days 00:01:34.930212586 +411 2 0 days 00:01:43.861724132 +412 1 0 days 00:00:30.119688410 +412 2 0 days 00:01:35.041569273 +412 4 0 days 00:01:18.948164406 +413 1 0 days 00:12:04.716650390 +414 1 0 days 00:03:58.718205145 +414 2 0 days 00:04:30.218449485 +415 1 0 days 00:00:00.198866586 +415 2 0 days 00:00:00.211838056 +415 3 0 days 00:00:00.299048935 +415 4 0 days 00:00:00.168953400 +415 5 0 days 00:00:00.254381400 +415 6 0 days 00:00:00.313306692 +415 7 0 days 00:00:00.146722466 +415 8 0 days 00:00:00.201480784 +415 9 0 days 00:00:00.281832044 +415 10 0 days 00:00:00.262217713 +415 11 0 days 00:00:00.243038026 +415 12 0 days 00:00:00.177562400 +415 13 0 days 00:00:00.194903000 +415 14 0 days 00:00:00.356785335 +415 15 0 days 00:00:00.208187823 +415 16 0 days 00:00:00.296949931 +415 17 0 days 00:00:00.176113666 +415 18 0 days 00:00:00.270117460 +415 19 0 days 00:00:00.153747020 +415 20 0 days 00:00:00.244872573 +415 21 0 days 00:00:00.269998546 +415 22 0 days 00:00:00.226012148 +415 23 0 days 00:00:00.245369913 +415 24 0 days 00:00:00.175818993 +415 25 0 days 00:00:00.145916980 +415 26 0 days 00:00:00.296173062 +415 27 0 days 00:00:00.313098540 +415 28 0 days 00:00:00.281775440 +415 29 0 days 00:00:00.212162735 +415 30 0 days 00:00:00.154546870 +415 31 0 days 00:00:00.276066120 +415 32 0 days 00:00:00.296077408 +415 33 0 days 00:00:00.169550300 +415 34 0 days 00:00:00.143806453 +415 35 0 days 00:00:00.138925940 +415 36 0 days 00:00:00.215874568 +415 37 0 days 00:00:00.197698080 +415 38 0 days 00:00:00.245496226 +415 39 0 days 00:00:00.323696560 +415 40 0 days 00:00:00.274946986 +415 41 0 days 00:00:00.241716853 +415 42 0 days 00:00:00.204921300 +415 43 0 days 00:00:00.267461280 +415 44 0 days 00:00:00.301878124 +415 45 0 days 00:00:00.299081550 +415 46 0 days 00:00:00.196127093 +415 47 0 days 00:00:00.308381992 +415 48 0 days 00:00:00.356844065 +415 49 0 days 00:00:00.148646146 +415 50 0 days 00:00:00.219971146 +415 51 0 days 00:00:00.270544910 +415 52 0 days 00:00:00.173664450 +415 53 0 days 00:00:00.272828895 +415 54 0 days 00:00:00.246125493 +415 55 0 days 00:00:00.306091108 +415 56 0 days 00:00:00.325851713 +415 57 0 days 00:00:00.313054944 +415 58 0 days 00:00:00.286450993 +415 59 0 days 00:00:00.178605373 +415 60 0 days 00:00:00.269930995 +415 61 0 days 00:00:00.231910893 +415 62 0 days 00:00:00.326352853 +415 63 0 days 00:00:00.187065860 +415 64 0 days 00:00:00.188757380 +415 65 0 days 00:00:00.251038413 +415 66 0 days 00:00:00.291977970 +415 67 0 days 00:00:00.169563476 +415 68 0 days 00:00:00.374812600 +415 69 0 days 00:00:00.291929733 +415 70 0 days 00:00:00.385837423 +415 71 0 days 00:00:00.262993875 +415 72 0 days 00:00:00.287264300 +415 73 0 days 00:00:00.294263732 +415 74 0 days 00:00:00.169924443 +415 75 0 days 00:00:00.146523560 +415 76 0 days 00:00:00.266349520 +415 77 0 days 00:00:00.190370720 +415 78 0 days 00:00:00.265999646 +415 79 0 days 00:00:00.176638093 +415 80 0 days 00:00:00.270964460 +415 81 0 days 00:00:00.245373293 +415 82 0 days 00:00:00.326577733 +415 83 0 days 00:00:00.326092453 +415 84 0 days 00:00:00.357089225 +415 85 0 days 00:00:00.250265820 +415 86 0 days 00:00:00.196496660 +415 87 0 days 00:00:00.245607666 +415 88 0 days 00:00:00.249251873 +415 89 0 days 00:00:00.222159432 +415 90 0 days 00:00:00.243595773 +415 91 0 days 00:00:00.273294273 +415 92 0 days 00:00:00.245735693 +415 93 0 days 00:00:00.183805085 +415 94 0 days 00:00:00.169210966 +415 95 0 days 00:00:00.178259540 +415 96 0 days 00:00:00.250296646 +415 97 0 days 00:00:00.355095560 +415 98 0 days 00:00:00.194285320 +415 99 0 days 00:00:00.290011350 +415 100 0 days 00:00:00.268680155 +416 1 0 days 00:00:00.169437053 +416 2 0 days 00:00:00.126824506 +416 3 0 days 00:00:00.176238352 +416 4 0 days 00:00:00.115345416 +416 5 0 days 00:00:00.167465360 +416 6 0 days 00:00:00.170853870 +416 7 0 days 00:00:00.168049200 +416 8 0 days 00:00:00.092261873 +416 9 0 days 00:00:00.208556200 +416 10 0 days 00:00:00.168492400 +416 11 0 days 00:00:00.152851733 +416 12 0 days 00:00:00.119601482 +416 13 0 days 00:00:00.130646765 +416 14 0 days 00:00:00.151264293 +416 15 0 days 00:00:00.167190426 +416 16 0 days 00:00:00.176938425 +416 17 0 days 00:00:00.109229440 +416 18 0 days 00:00:00.167762926 +416 19 0 days 00:00:00.225844892 +416 20 0 days 00:00:00.118708151 +416 21 0 days 00:00:00.167309680 +416 22 0 days 00:00:00.177317265 +416 23 0 days 00:00:00.225793160 +416 24 0 days 00:00:00.099919840 +416 25 0 days 00:00:00.224118224 +416 26 0 days 00:00:00.173474990 +416 27 0 days 00:00:00.118894031 +416 28 0 days 00:00:00.178720700 +416 29 0 days 00:00:00.152363580 +416 30 0 days 00:00:00.178549926 +416 31 0 days 00:00:00.168174820 +416 32 0 days 00:00:00.192670076 +416 33 0 days 00:00:00.178691260 +416 34 0 days 00:00:00.131374145 +416 35 0 days 00:00:00.118240136 +416 36 0 days 00:00:00.207721513 +416 37 0 days 00:00:00.115980752 +416 38 0 days 00:00:00.185630900 +416 39 0 days 00:00:00.184934360 +416 40 0 days 00:00:00.156883630 +416 41 0 days 00:00:00.112527393 +416 42 0 days 00:00:00.178458006 +416 43 0 days 00:00:00.149109126 +416 44 0 days 00:00:00.167172672 +416 45 0 days 00:00:00.179085395 +416 46 0 days 00:00:00.116173570 +416 47 0 days 00:00:00.134577540 +416 48 0 days 00:00:00.119375792 +416 49 0 days 00:00:00.163405573 +416 50 0 days 00:00:00.207362780 +416 51 0 days 00:00:00.231789971 +416 52 0 days 00:00:00.126791233 +416 53 0 days 00:00:00.096387566 +416 54 0 days 00:00:00.192966070 +416 55 0 days 00:00:00.128493093 +416 56 0 days 00:00:00.164132252 +416 57 0 days 00:00:00.180441725 +416 58 0 days 00:00:00.115131840 +416 59 0 days 00:00:00.115672260 +416 60 0 days 00:00:00.234844770 +416 61 0 days 00:00:00.166928253 +416 62 0 days 00:00:00.119498972 +416 63 0 days 00:00:00.126197746 +416 64 0 days 00:00:00.226037900 +416 65 0 days 00:00:00.190222856 +416 66 0 days 00:00:00.165659308 +416 67 0 days 00:00:00.202111273 +416 68 0 days 00:00:00.119870944 +416 69 0 days 00:00:00.098755346 +416 70 0 days 00:00:00.131234135 +416 71 0 days 00:00:00.095638786 +416 72 0 days 00:00:00.194719168 +416 73 0 days 00:00:00.178019950 +416 74 0 days 00:00:00.166490486 +416 75 0 days 00:00:00.124076293 +416 76 0 days 00:00:00.208664920 +416 77 0 days 00:00:00.127908493 +416 78 0 days 00:00:00.099846593 +416 79 0 days 00:00:00.096681135 +416 80 0 days 00:00:00.181047042 +416 81 0 days 00:00:00.209536373 +416 82 0 days 00:00:00.190032760 +416 83 0 days 00:00:00.185548410 +416 84 0 days 00:00:00.111670000 +416 85 0 days 00:00:00.177851880 +416 86 0 days 00:00:00.192086030 +416 87 0 days 00:00:00.178605520 +416 88 0 days 00:00:00.112538680 +416 89 0 days 00:00:00.191352436 +416 90 0 days 00:00:00.099198604 +416 91 0 days 00:00:00.176237405 +416 92 0 days 00:00:00.178693900 +416 93 0 days 00:00:00.178464085 +416 94 0 days 00:00:00.119365400 +416 95 0 days 00:00:00.151833746 +416 96 0 days 00:00:00.118965133 +416 97 0 days 00:00:00.118081375 +416 98 0 days 00:00:00.168460400 +416 99 0 days 00:00:00.111988360 +416 100 0 days 00:00:00.190701844 +417 1 0 days 00:00:05.951379804 +417 2 0 days 00:00:02.778365873 +417 3 0 days 00:00:06.182218300 +417 4 0 days 00:00:03.917998608 +417 5 0 days 00:00:03.638907806 +417 6 0 days 00:00:03.908163955 +417 7 0 days 00:00:04.188086905 +417 8 0 days 00:00:00.796503520 +417 9 0 days 00:00:00.429268153 +417 10 0 days 00:00:01.994215513 +417 11 0 days 00:00:01.346725284 +417 12 0 days 00:00:01.694893915 +417 13 0 days 00:00:20.742556160 +417 14 0 days 00:00:01.290632453 +417 15 0 days 00:00:03.330040416 +417 16 0 days 00:00:11.072665840 +417 17 0 days 00:00:03.004630033 +417 18 0 days 00:00:00.707755193 +417 19 0 days 00:00:04.633406720 +417 20 0 days 00:00:19.639702310 +417 21 0 days 00:00:16.847471546 +417 22 0 days 00:00:04.527877533 +417 23 0 days 00:00:02.939589908 +417 24 0 days 00:00:01.118769975 +417 25 0 days 00:00:22.366507126 +417 26 0 days 00:00:06.081416512 +417 27 0 days 00:00:21.670386693 +417 28 0 days 00:00:02.271330530 +417 29 0 days 00:00:14.132514795 +417 30 0 days 00:00:01.878727168 +417 31 0 days 00:00:00.651420166 +417 32 0 days 00:00:01.601746625 +417 33 0 days 00:00:03.830007853 +417 34 0 days 00:00:01.836682810 +417 35 0 days 00:00:24.821556263 +417 36 0 days 00:00:02.338029266 +417 37 0 days 00:00:02.217984006 +417 38 0 days 00:00:02.565703360 +417 39 0 days 00:00:01.247283820 +417 40 0 days 00:00:02.632814360 +417 41 0 days 00:00:00.544783820 +417 42 0 days 00:00:01.520470580 +417 43 0 days 00:00:06.331251313 +417 44 0 days 00:00:02.803051066 +417 45 0 days 00:00:01.123045086 +417 46 0 days 00:00:01.619579520 +417 47 0 days 00:00:07.167019106 +417 48 0 days 00:00:18.265464046 +417 49 0 days 00:00:18.298093860 +417 50 0 days 00:00:03.318323272 +417 51 0 days 00:00:01.051362040 +417 52 0 days 00:00:18.704586866 +417 53 0 days 00:00:01.821614400 +417 54 0 days 00:00:01.224644693 +417 55 0 days 00:00:00.794248093 +417 56 0 days 00:00:01.526317653 +417 57 0 days 00:00:01.331504360 +417 58 0 days 00:00:04.112677686 +417 59 0 days 00:00:03.268118315 +417 60 0 days 00:00:02.762573206 +417 61 0 days 00:00:03.145516286 +417 62 0 days 00:00:00.945954636 +417 63 0 days 00:00:00.722867553 +417 64 0 days 00:00:03.121664120 +417 65 0 days 00:00:00.684355386 +417 66 0 days 00:00:03.851283973 +417 67 0 days 00:00:00.843772340 +417 68 0 days 00:00:01.280195340 +417 69 0 days 00:00:08.622909133 +417 70 0 days 00:00:12.172465766 +417 71 0 days 00:00:02.125023925 +417 72 0 days 00:00:07.170427904 +417 73 0 days 00:00:17.580179560 +417 74 0 days 00:00:02.251472480 +417 75 0 days 00:00:01.674257833 +417 76 0 days 00:00:00.732374040 +417 77 0 days 00:00:05.660463850 +417 78 0 days 00:00:29.251696692 +418 1 0 days 00:00:01.310931120 +418 2 0 days 00:00:01.990246166 +418 3 0 days 00:00:05.284517913 +418 4 0 days 00:00:04.092955850 +418 5 0 days 00:00:08.918880340 +418 6 0 days 00:00:09.072782524 +418 7 0 days 00:00:08.438331500 +418 8 0 days 00:00:00.834546866 +418 9 0 days 00:00:08.263607413 +418 10 0 days 00:00:07.012493433 +418 11 0 days 00:00:09.230840573 +418 12 0 days 00:00:12.742614450 +418 13 0 days 00:00:00.272531710 +418 14 0 days 00:00:01.001240893 +418 15 0 days 00:00:02.348194426 +418 16 0 days 00:00:13.246415060 +418 17 0 days 00:00:00.584933136 +418 18 0 days 00:00:01.961335333 +418 19 0 days 00:00:06.745722800 +418 20 0 days 00:00:01.067548773 +418 21 0 days 00:00:01.934007173 +418 22 0 days 00:00:00.468159436 +418 23 0 days 00:00:01.057716800 +418 24 0 days 00:00:00.683698193 +418 25 0 days 00:00:14.208660980 +418 26 0 days 00:00:07.613115552 +418 27 0 days 00:00:01.578055820 +418 28 0 days 00:00:00.687383012 +418 29 0 days 00:00:01.627145785 +418 30 0 days 00:00:00.749432106 +418 31 0 days 00:00:04.645183070 +418 32 0 days 00:00:05.138901080 +418 33 0 days 00:00:10.579563585 +418 34 0 days 00:00:00.700968536 +418 35 0 days 00:00:01.623640660 +418 36 0 days 00:00:00.439818100 +418 37 0 days 00:00:11.576592033 +418 38 0 days 00:00:02.217801426 +418 39 0 days 00:00:03.010623973 +418 40 0 days 00:00:02.397317426 +418 41 0 days 00:00:10.533380800 +418 42 0 days 00:00:00.848202693 +418 43 0 days 00:00:04.475321500 +418 44 0 days 00:00:02.959308480 +418 45 0 days 00:00:01.315448730 +418 46 0 days 00:00:03.468850046 +418 47 0 days 00:00:00.355032095 +418 48 0 days 00:00:00.753764873 +418 49 0 days 00:00:04.761650813 +418 50 0 days 00:00:01.056362280 +418 51 0 days 00:00:09.692518226 +418 52 0 days 00:00:08.930531433 +418 53 0 days 00:00:00.566087430 +418 54 0 days 00:00:01.115800800 +418 55 0 days 00:00:00.550042315 +418 56 0 days 00:00:00.572990206 +418 57 0 days 00:00:01.259062906 +418 58 0 days 00:00:01.201185815 +418 59 0 days 00:00:01.036349865 +418 60 0 days 00:00:07.319369633 +418 61 0 days 00:00:03.759142490 +418 62 0 days 00:00:00.451286965 +418 63 0 days 00:00:06.981530460 +418 64 0 days 00:00:01.727438760 +418 65 0 days 00:00:00.765492711 +418 66 0 days 00:00:00.248436175 +418 67 0 days 00:00:13.433157120 +418 68 0 days 00:00:02.936463020 +418 69 0 days 00:00:00.461944013 +418 70 0 days 00:00:10.096758904 +418 71 0 days 00:00:07.974723406 +418 72 0 days 00:00:03.466279733 +418 73 0 days 00:00:01.104035773 +418 74 0 days 00:00:00.857119492 +418 75 0 days 00:00:00.754970346 +418 76 0 days 00:00:02.566092703 +418 77 0 days 00:00:01.574118193 +418 78 0 days 00:00:00.945267648 +418 79 0 days 00:00:01.280421940 +418 80 0 days 00:00:01.454650410 +418 81 0 days 00:00:00.845560440 +418 82 0 days 00:00:00.840955673 +418 83 0 days 00:00:00.632559580 +418 84 0 days 00:00:00.873636186 +418 85 0 days 00:00:02.160339696 +418 86 0 days 00:00:00.927612740 +418 87 0 days 00:00:02.166043252 +418 88 0 days 00:00:00.938469366 +418 89 0 days 00:00:01.635916253 +418 90 0 days 00:00:13.979186704 +418 91 0 days 00:00:00.957573655 +418 92 0 days 00:00:12.059646746 +418 93 0 days 00:00:00.721976425 +418 94 0 days 00:00:02.507876833 +418 95 0 days 00:00:02.564462353 +418 96 0 days 00:00:01.342276116 +418 97 0 days 00:00:00.904414226 +418 98 0 days 00:00:02.282676314 +418 99 0 days 00:00:08.420786900 +418 100 0 days 00:00:00.750101133 +419 1 0 days 00:00:06.058733240 +419 2 0 days 00:00:03.052893493 +419 3 0 days 00:00:03.947903673 +419 4 0 days 00:00:02.446414435 +419 5 0 days 00:00:02.466073596 +419 6 0 days 00:00:01.092187146 +419 7 0 days 00:00:04.567110273 +419 8 0 days 00:00:02.309252406 +419 9 0 days 00:00:01.344705810 +419 10 0 days 00:00:05.499356840 +419 11 0 days 00:00:00.681413053 +419 12 0 days 00:00:12.253622992 +419 13 0 days 00:00:00.907902680 +419 14 0 days 00:00:01.295409450 +419 15 0 days 00:00:01.820030105 +419 16 0 days 00:00:06.415641835 +419 17 0 days 00:00:06.082971815 +419 18 0 days 00:00:05.910352780 +419 19 0 days 00:00:22.389550713 +419 20 0 days 00:00:01.735001060 +419 21 0 days 00:00:24.942379286 +419 22 0 days 00:00:01.205625686 +419 23 0 days 00:00:25.135385926 +419 24 0 days 00:00:01.130255806 +419 25 0 days 00:00:00.860561593 +419 26 0 days 00:00:11.758236070 +419 27 0 days 00:00:03.135499520 +419 28 0 days 00:00:06.975116665 +419 29 0 days 00:00:01.662431813 +419 30 0 days 00:00:01.186949153 +419 31 0 days 00:00:00.782313480 +419 32 0 days 00:00:01.113751010 +419 33 0 days 00:00:06.638493525 +419 34 0 days 00:00:04.998310208 +419 35 0 days 00:00:01.507999313 +419 36 0 days 00:00:02.319927206 +419 37 0 days 00:00:01.990125028 +419 38 0 days 00:00:15.977808125 +419 39 0 days 00:00:05.554540152 +419 40 0 days 00:00:01.698236433 +419 41 0 days 00:00:00.749829800 +419 42 0 days 00:00:02.307298686 +419 43 0 days 00:00:20.368470806 +419 44 0 days 00:00:02.147328633 +419 45 0 days 00:00:00.467738080 +419 46 0 days 00:00:03.763206113 +419 47 0 days 00:00:02.095053170 +419 48 0 days 00:00:01.546887556 +419 49 0 days 00:00:01.458850684 +419 50 0 days 00:00:01.784831586 +419 51 0 days 00:00:22.521607816 +419 52 0 days 00:00:00.998208586 +419 53 0 days 00:00:01.596741913 +419 54 0 days 00:00:02.182315873 +419 55 0 days 00:00:01.357810866 +419 56 0 days 00:00:02.822720606 +419 57 0 days 00:00:01.728984392 +419 58 0 days 00:00:12.660553213 +419 59 0 days 00:00:07.640772196 +419 60 0 days 00:00:01.391456392 +419 61 0 days 00:00:34.669305075 +419 62 0 days 00:00:00.825480420 +419 63 0 days 00:00:04.227720700 +419 64 0 days 00:00:06.591921893 +419 65 0 days 00:00:02.564798326 +419 66 0 days 00:00:01.347731866 +419 67 0 days 00:00:06.052892940 +419 68 0 days 00:00:01.945661315 +419 69 0 days 00:00:06.921536813 +419 70 0 days 00:00:18.841075348 +419 71 0 days 00:00:01.325282580 +419 72 0 days 00:00:03.455473312 +419 73 0 days 00:00:16.403287276 +419 74 0 days 00:00:26.457165713 +419 75 0 days 00:00:03.382555680 +419 76 0 days 00:00:01.384814050 +419 77 0 days 00:00:01.878404420 +419 78 0 days 00:00:06.696779120 +419 79 0 days 00:00:27.212530824 +420 1 0 days 00:00:02.070117900 +420 2 0 days 00:00:00.457292536 +420 3 0 days 00:00:00.739120570 +420 4 0 days 00:00:05.264647330 +420 5 0 days 00:00:00.944546332 +420 6 0 days 00:00:05.947133125 +420 7 0 days 00:00:01.070728210 +420 8 0 days 00:00:01.136443393 +420 9 0 days 00:00:01.295009753 +420 10 0 days 00:00:03.748484484 +420 11 0 days 00:00:11.893336186 +420 12 0 days 00:00:00.971244020 +420 13 0 days 00:00:04.717316366 +420 14 0 days 00:00:01.277760805 +420 15 0 days 00:00:01.873920500 +420 16 0 days 00:00:00.421320586 +420 17 0 days 00:00:03.193613980 +420 18 0 days 00:00:02.990543686 +420 19 0 days 00:00:01.251833044 +420 20 0 days 00:00:11.308142720 +420 21 0 days 00:00:00.444664636 +420 22 0 days 00:00:00.226528965 +420 23 0 days 00:00:00.384746653 +420 24 0 days 00:00:01.057878296 +420 25 0 days 00:00:16.571000434 +420 26 0 days 00:00:00.614307566 +420 27 0 days 00:00:06.749391310 +420 28 0 days 00:00:00.889604120 +420 29 0 days 00:00:02.267374084 +420 30 0 days 00:00:03.196765633 +420 31 0 days 00:00:00.584675773 +420 32 0 days 00:00:03.952322786 +420 33 0 days 00:00:06.221305115 +420 34 0 days 00:00:13.352374965 +420 35 0 days 00:00:00.743584080 +420 36 0 days 00:00:01.450187180 +420 37 0 days 00:00:00.601330500 +420 38 0 days 00:00:03.286436975 +420 39 0 days 00:00:00.973579112 +420 40 0 days 00:00:00.956281426 +420 41 0 days 00:00:00.625266720 +420 42 0 days 00:00:01.044157896 +420 43 0 days 00:00:01.950768693 +420 44 0 days 00:00:00.579879850 +420 45 0 days 00:00:00.804527573 +420 46 0 days 00:00:13.843201693 +420 47 0 days 00:00:02.059070173 +420 48 0 days 00:00:01.358072596 +420 49 0 days 00:00:06.199051476 +420 50 0 days 00:00:01.875416492 +420 51 0 days 00:00:02.136265345 +420 52 0 days 00:00:00.893997020 +420 53 0 days 00:00:01.945093133 +420 54 0 days 00:00:00.393119186 +420 55 0 days 00:00:00.328042552 +420 56 0 days 00:00:02.198326540 +420 57 0 days 00:00:01.565894512 +420 58 0 days 00:00:13.955991413 +420 59 0 days 00:00:01.037980780 +420 60 0 days 00:00:11.135229175 +420 61 0 days 00:00:00.323443260 +420 62 0 days 00:00:01.872366692 +420 63 0 days 00:00:00.506621580 +420 64 0 days 00:00:11.076648146 +420 65 0 days 00:00:09.393290780 +420 66 0 days 00:00:01.333441493 +420 67 0 days 00:00:01.203517733 +420 68 0 days 00:00:00.413821266 +420 69 0 days 00:00:00.843509173 +420 70 0 days 00:00:02.151506728 +420 71 0 days 00:00:00.787519733 +420 72 0 days 00:00:03.489828582 +420 73 0 days 00:00:00.908864630 +420 74 0 days 00:00:16.537308335 +420 75 0 days 00:00:08.398389895 +420 76 0 days 00:00:07.527516793 +420 77 0 days 00:00:00.863426366 +420 78 0 days 00:00:00.499035910 +420 79 0 days 00:00:16.849473670 +420 80 0 days 00:00:00.938410446 +420 81 0 days 00:00:12.480605513 +420 82 0 days 00:00:00.688815133 +420 83 0 days 00:00:01.349225180 +420 84 0 days 00:00:00.510779960 +420 85 0 days 00:00:01.569209104 +420 86 0 days 00:00:00.264768406 +420 87 0 days 00:00:05.077297920 +420 88 0 days 00:00:03.831683560 +420 89 0 days 00:00:00.433798026 +420 90 0 days 00:00:02.740011992 +420 91 0 days 00:00:01.276448766 +420 92 0 days 00:00:00.484568226 +420 93 0 days 00:00:13.259433253 +420 94 0 days 00:00:00.683933865 +420 95 0 days 00:00:12.229624005 +420 96 0 days 00:00:01.418578045 +420 97 0 days 00:00:00.397081726 +420 98 0 days 00:00:00.916822523 +420 99 0 days 00:00:01.194911330 +420 100 0 days 00:00:02.015723540 +421 1 0 days 00:00:00.307066860 +421 2 0 days 00:00:00.246122746 +421 3 0 days 00:00:00.268904210 +421 4 0 days 00:00:00.296553406 +421 5 0 days 00:00:00.271712660 +421 6 0 days 00:00:00.175271885 +421 7 0 days 00:00:00.250428466 +421 8 0 days 00:00:00.318337726 +421 9 0 days 00:00:00.165381133 +421 10 0 days 00:00:00.162291686 +421 11 0 days 00:00:00.327391460 +421 12 0 days 00:00:00.139651000 +421 13 0 days 00:00:00.173314080 +421 14 0 days 00:00:00.246846133 +421 15 0 days 00:00:00.346062755 +421 16 0 days 00:00:00.300284325 +421 17 0 days 00:00:00.166167526 +421 18 0 days 00:00:00.274638205 +421 19 0 days 00:00:00.191847053 +421 20 0 days 00:00:00.270914325 +421 21 0 days 00:00:00.163135013 +421 22 0 days 00:00:00.247712005 +421 23 0 days 00:00:00.249464026 +421 24 0 days 00:00:00.348716755 +421 25 0 days 00:00:00.273164166 +421 26 0 days 00:00:00.273088473 +421 27 0 days 00:00:00.154135256 +421 28 0 days 00:00:00.269286630 +421 29 0 days 00:00:00.188756696 +421 30 0 days 00:00:00.320136853 +421 31 0 days 00:00:00.177286815 +421 32 0 days 00:00:00.277643180 +421 33 0 days 00:00:00.182174836 +421 34 0 days 00:00:00.148346015 +421 35 0 days 00:00:00.249552515 +421 36 0 days 00:00:00.164210680 +421 37 0 days 00:00:00.343300385 +421 38 0 days 00:00:00.139804080 +421 39 0 days 00:00:00.248649353 +421 40 0 days 00:00:00.268918713 +421 41 0 days 00:00:00.185805626 +421 42 0 days 00:00:00.185072233 +421 43 0 days 00:00:00.225939520 +421 44 0 days 00:00:00.185227356 +421 45 0 days 00:00:00.287224772 +421 46 0 days 00:00:00.218501780 +421 47 0 days 00:00:00.148859055 +421 48 0 days 00:00:00.202578460 +421 49 0 days 00:00:00.247687833 +421 50 0 days 00:00:00.148989305 +421 51 0 days 00:00:00.249503900 +421 52 0 days 00:00:00.298314436 +421 53 0 days 00:00:00.288845220 +421 54 0 days 00:00:00.136876893 +421 55 0 days 00:00:00.272967976 +421 56 0 days 00:00:00.291490795 +421 57 0 days 00:00:00.163352226 +421 58 0 days 00:00:00.272070020 +421 59 0 days 00:00:00.280051560 +421 60 0 days 00:00:00.269754360 +421 61 0 days 00:00:00.377238136 +421 62 0 days 00:00:00.263875166 +421 63 0 days 00:00:00.202216080 +421 64 0 days 00:00:00.259992166 +421 65 0 days 00:00:00.315627124 +421 66 0 days 00:00:00.166275240 +421 67 0 days 00:00:00.256586040 +421 68 0 days 00:00:00.190939386 +421 69 0 days 00:00:00.271308206 +421 70 0 days 00:00:00.163250586 +421 71 0 days 00:00:00.302506411 +421 72 0 days 00:00:00.160247672 +421 73 0 days 00:00:00.286629196 +421 74 0 days 00:00:00.156622632 +421 75 0 days 00:00:00.204524750 +421 76 0 days 00:00:00.190419712 +421 77 0 days 00:00:00.150537460 +421 78 0 days 00:00:00.165218273 +421 79 0 days 00:00:00.163500706 +421 80 0 days 00:00:00.194859271 +421 81 0 days 00:00:00.347416590 +421 82 0 days 00:00:00.319458706 +421 83 0 days 00:00:00.177451433 +421 84 0 days 00:00:00.308618460 +421 85 0 days 00:00:00.273119160 +421 86 0 days 00:00:00.165320993 +421 87 0 days 00:00:00.272291345 +421 88 0 days 00:00:00.352294925 +421 89 0 days 00:00:00.263762032 +421 90 0 days 00:00:00.191171766 +421 91 0 days 00:00:00.186486433 +421 92 0 days 00:00:00.269194980 +421 93 0 days 00:00:00.318212236 +421 94 0 days 00:00:00.138315840 +421 95 0 days 00:00:00.176423780 +421 96 0 days 00:00:00.270714020 +421 97 0 days 00:00:00.154370680 +421 98 0 days 00:00:00.248807286 +421 99 0 days 00:00:00.233244760 +421 100 0 days 00:00:00.155717772 +422 1 0 days 00:00:00.317133100 +422 2 0 days 00:00:00.289887630 +422 3 0 days 00:00:00.142199266 +422 4 0 days 00:00:00.143863713 +422 5 0 days 00:00:00.142883106 +422 6 0 days 00:00:00.141707146 +422 7 0 days 00:00:00.160477196 +422 8 0 days 00:00:00.328057726 +422 9 0 days 00:00:00.256688833 +422 10 0 days 00:00:00.156549065 +422 11 0 days 00:00:00.165613006 +422 12 0 days 00:00:00.306828304 +422 13 0 days 00:00:00.196346460 +422 14 0 days 00:00:00.327848060 +422 15 0 days 00:00:00.183942405 +422 16 0 days 00:00:00.234283600 +422 17 0 days 00:00:00.275941100 +422 18 0 days 00:00:00.283814748 +422 19 0 days 00:00:00.299557540 +422 20 0 days 00:00:00.208328520 +422 21 0 days 00:00:00.274951285 +422 22 0 days 00:00:00.368575235 +422 23 0 days 00:00:00.383637876 +422 24 0 days 00:00:00.197283544 +422 25 0 days 00:00:00.253619286 +422 26 0 days 00:00:00.143133746 +422 27 0 days 00:00:00.189049966 +422 28 0 days 00:00:00.262235210 +422 29 0 days 00:00:00.256187240 +422 30 0 days 00:00:00.309606473 +422 31 0 days 00:00:00.331301474 +422 32 0 days 00:00:00.271686285 +422 33 0 days 00:00:00.143705500 +422 34 0 days 00:00:00.262079640 +422 35 0 days 00:00:00.276035645 +422 36 0 days 00:00:00.329706573 +422 37 0 days 00:00:00.231095508 +422 38 0 days 00:00:00.277527286 +422 39 0 days 00:00:00.194109556 +422 40 0 days 00:00:00.188940296 +422 41 0 days 00:00:00.330348073 +422 42 0 days 00:00:00.288472773 +422 43 0 days 00:00:00.287442566 +422 44 0 days 00:00:00.276188930 +422 45 0 days 00:00:00.194673780 +422 46 0 days 00:00:00.188062385 +422 47 0 days 00:00:00.186628615 +422 48 0 days 00:00:00.300162825 +422 49 0 days 00:00:00.291508360 +422 50 0 days 00:00:00.261833053 +422 51 0 days 00:00:00.301935372 +422 52 0 days 00:00:00.167745880 +422 53 0 days 00:00:00.209912065 +422 54 0 days 00:00:00.286621060 +422 55 0 days 00:00:00.203877580 +422 56 0 days 00:00:00.277105500 +422 57 0 days 00:00:00.198935700 +422 58 0 days 00:00:00.209006476 +422 59 0 days 00:00:00.379695272 +422 60 0 days 00:00:00.170719646 +422 61 0 days 00:00:00.316076120 +422 62 0 days 00:00:00.160205593 +422 63 0 days 00:00:00.172831460 +422 64 0 days 00:00:00.143042613 +422 65 0 days 00:00:00.239744766 +422 66 0 days 00:00:00.171358580 +422 67 0 days 00:00:00.172451820 +422 68 0 days 00:00:00.363471260 +422 69 0 days 00:00:00.305829754 +422 70 0 days 00:00:00.260940473 +422 71 0 days 00:00:00.141496386 +422 72 0 days 00:00:00.168078506 +422 73 0 days 00:00:00.284673240 +422 74 0 days 00:00:00.237887466 +422 75 0 days 00:00:00.244010846 +422 76 0 days 00:00:00.190076296 +422 77 0 days 00:00:00.324846288 +422 78 0 days 00:00:00.260607300 +422 79 0 days 00:00:00.182457000 +422 80 0 days 00:00:00.274392640 +422 81 0 days 00:00:00.192834906 +422 82 0 days 00:00:00.178416530 +422 83 0 days 00:00:00.277137446 +422 84 0 days 00:00:00.236722286 +422 85 0 days 00:00:00.146770566 +422 86 0 days 00:00:00.242348073 +422 87 0 days 00:00:00.196307173 +422 88 0 days 00:00:00.294175940 +422 89 0 days 00:00:00.187908390 +422 90 0 days 00:00:00.299927540 +422 91 0 days 00:00:00.203144500 +422 92 0 days 00:00:00.205415393 +422 93 0 days 00:00:00.333347097 +422 94 0 days 00:00:00.336587653 +422 95 0 days 00:00:00.333421273 +422 96 0 days 00:00:00.158580515 +422 97 0 days 00:00:00.360068815 +422 98 0 days 00:00:00.289628773 +422 99 0 days 00:00:00.273886625 +422 100 0 days 00:00:00.192486704 +423 1 0 days 00:00:00.156269833 +423 2 0 days 00:00:00.170327665 +423 3 0 days 00:00:00.211873860 +423 4 0 days 00:00:00.118116806 +423 5 0 days 00:00:00.175267833 +423 6 0 days 00:00:00.170649196 +423 7 0 days 00:00:00.171863615 +423 8 0 days 00:00:00.110556675 +423 9 0 days 00:00:00.178181286 +423 10 0 days 00:00:00.089623066 +423 11 0 days 00:00:00.108069466 +423 12 0 days 00:00:00.108983193 +423 13 0 days 00:00:00.108236000 +423 14 0 days 00:00:00.218117924 +423 15 0 days 00:00:00.119975395 +423 16 0 days 00:00:00.111133412 +423 17 0 days 00:00:00.175505853 +423 18 0 days 00:00:00.165402335 +423 19 0 days 00:00:00.218925420 +423 20 0 days 00:00:00.097184326 +423 21 0 days 00:00:00.097042880 +423 22 0 days 00:00:00.108233300 +423 23 0 days 00:00:00.166504600 +423 24 0 days 00:00:00.181559134 +423 25 0 days 00:00:00.187992888 +423 26 0 days 00:00:00.157751413 +423 27 0 days 00:00:00.104200253 +423 28 0 days 00:00:00.157534906 +423 29 0 days 00:00:00.113860336 +423 30 0 days 00:00:00.117332306 +423 31 0 days 00:00:00.096050270 +423 32 0 days 00:00:00.106604113 +423 33 0 days 00:00:00.168560106 +423 34 0 days 00:00:00.166061495 +423 35 0 days 00:00:00.089772306 +423 36 0 days 00:00:00.098954880 +423 37 0 days 00:00:00.171862526 +423 38 0 days 00:00:00.206463093 +423 39 0 days 00:00:00.166754853 +423 40 0 days 00:00:00.175760256 +423 41 0 days 00:00:00.174590620 +423 42 0 days 00:00:00.141934426 +423 43 0 days 00:00:00.114752608 +423 44 0 days 00:00:00.105770986 +423 45 0 days 00:00:00.092285740 +423 46 0 days 00:00:00.201472560 +423 47 0 days 00:00:00.176114520 +423 48 0 days 00:00:00.111174846 +423 49 0 days 00:00:00.093533153 +423 50 0 days 00:00:00.223012283 +423 51 0 days 00:00:00.119175760 +423 52 0 days 00:00:00.118661173 +423 53 0 days 00:00:00.092712675 +423 54 0 days 00:00:00.178795460 +423 55 0 days 00:00:00.120267613 +423 56 0 days 00:00:00.175173940 +423 57 0 days 00:00:00.175998904 +423 58 0 days 00:00:00.134564290 +423 59 0 days 00:00:00.112272950 +423 60 0 days 00:00:00.109117966 +423 61 0 days 00:00:00.176979866 +423 62 0 days 00:00:00.205581693 +423 63 0 days 00:00:00.203373080 +423 64 0 days 00:00:00.131426942 +423 65 0 days 00:00:00.096547266 +423 66 0 days 00:00:00.176209184 +423 67 0 days 00:00:00.157244126 +423 68 0 days 00:00:00.171507316 +423 69 0 days 00:00:00.089426186 +423 70 0 days 00:00:00.119738060 +423 71 0 days 00:00:00.113474904 +423 72 0 days 00:00:00.186059400 +423 73 0 days 00:00:00.118297674 +423 74 0 days 00:00:00.107520613 +423 75 0 days 00:00:00.141614380 +423 76 0 days 00:00:00.110430906 +423 77 0 days 00:00:00.109731193 +423 78 0 days 00:00:00.140620160 +423 79 0 days 00:00:00.189295793 +423 80 0 days 00:00:00.177584853 +423 81 0 days 00:00:00.168670386 +423 82 0 days 00:00:00.122438146 +423 83 0 days 00:00:00.176396680 +423 84 0 days 00:00:00.172979690 +423 85 0 days 00:00:00.219040968 +423 86 0 days 00:00:00.121678626 +423 87 0 days 00:00:00.177261006 +423 88 0 days 00:00:00.094035653 +423 89 0 days 00:00:00.180981586 +423 90 0 days 00:00:00.106759686 +423 91 0 days 00:00:00.108179380 +423 92 0 days 00:00:00.166300245 +423 93 0 days 00:00:00.184548192 +423 94 0 days 00:00:00.158550820 +423 95 0 days 00:00:00.167049246 +423 96 0 days 00:00:00.111758032 +423 97 0 days 00:00:00.158122220 +423 98 0 days 00:00:00.119058360 +423 99 0 days 00:00:00.214724975 +423 100 0 days 00:00:00.174843164 +424 1 0 days 00:00:00.163258176 +424 2 0 days 00:00:00.121212380 +424 3 0 days 00:00:00.121560673 +424 4 0 days 00:00:00.189776448 +424 5 0 days 00:00:00.220472250 +424 6 0 days 00:00:00.148818600 +424 7 0 days 00:00:00.179619080 +424 8 0 days 00:00:00.127070680 +424 9 0 days 00:00:00.098853760 +424 10 0 days 00:00:00.127588205 +424 11 0 days 00:00:00.188611480 +424 12 0 days 00:00:00.149505500 +424 13 0 days 00:00:00.139786617 +424 14 0 days 00:00:00.114044726 +424 15 0 days 00:00:00.121015080 +424 16 0 days 00:00:00.167993640 +424 17 0 days 00:00:00.182666673 +424 18 0 days 00:00:00.123490146 +424 19 0 days 00:00:00.176141453 +424 20 0 days 00:00:00.114779328 +424 21 0 days 00:00:00.127614860 +424 22 0 days 00:00:00.179828860 +424 23 0 days 00:00:00.132006528 +424 24 0 days 00:00:00.126033466 +424 25 0 days 00:00:00.194702636 +424 26 0 days 00:00:00.219753770 +424 27 0 days 00:00:00.114839632 +424 28 0 days 00:00:00.158706853 +424 29 0 days 00:00:00.131355584 +424 30 0 days 00:00:00.119348200 +424 31 0 days 00:00:00.112633713 +424 32 0 days 00:00:00.107617693 +424 33 0 days 00:00:00.178184390 +424 34 0 days 00:00:00.110732733 +424 35 0 days 00:00:00.169396846 +424 36 0 days 00:00:00.109582760 +424 37 0 days 00:00:00.208330153 +424 38 0 days 00:00:00.180062346 +424 39 0 days 00:00:00.115493410 +424 40 0 days 00:00:00.211110933 +424 41 0 days 00:00:00.210725526 +424 42 0 days 00:00:00.167796740 +424 43 0 days 00:00:00.179947860 +424 44 0 days 00:00:00.185415293 +424 45 0 days 00:00:00.164284706 +424 46 0 days 00:00:00.178327476 +424 47 0 days 00:00:00.180297140 +424 48 0 days 00:00:00.091782706 +424 49 0 days 00:00:00.109613106 +424 50 0 days 00:00:00.115110923 +424 51 0 days 00:00:00.169141340 +424 52 0 days 00:00:00.183554043 +424 53 0 days 00:00:00.180202920 +424 54 0 days 00:00:00.097069400 +424 55 0 days 00:00:00.181829092 +424 56 0 days 00:00:00.193054723 +424 57 0 days 00:00:00.145142253 +424 58 0 days 00:00:00.115914363 +424 59 0 days 00:00:00.095660408 +424 60 0 days 00:00:00.098508804 +424 61 0 days 00:00:00.170745766 +424 62 0 days 00:00:00.174133865 +424 63 0 days 00:00:00.166641773 +424 64 0 days 00:00:00.177730712 +424 65 0 days 00:00:00.162047306 +424 66 0 days 00:00:00.120882331 +424 67 0 days 00:00:00.149395080 +424 68 0 days 00:00:00.228293032 +424 69 0 days 00:00:00.179921010 +424 70 0 days 00:00:00.223413670 +424 71 0 days 00:00:00.167716484 +424 72 0 days 00:00:00.173556400 +424 73 0 days 00:00:00.165546586 +424 74 0 days 00:00:00.161204096 +424 75 0 days 00:00:00.151150173 +424 76 0 days 00:00:00.185736440 +424 77 0 days 00:00:00.109032840 +424 78 0 days 00:00:00.168105046 +424 79 0 days 00:00:00.169486520 +424 80 0 days 00:00:00.190078404 +424 81 0 days 00:00:00.194840290 +424 82 0 days 00:00:00.153055946 +424 83 0 days 00:00:00.116468273 +424 84 0 days 00:00:00.121400626 +424 85 0 days 00:00:00.109974386 +424 86 0 days 00:00:00.121800360 +424 87 0 days 00:00:00.173587210 +424 88 0 days 00:00:00.113868650 +424 89 0 days 00:00:00.166933160 +424 90 0 days 00:00:00.173490665 +424 91 0 days 00:00:00.188741040 +424 92 0 days 00:00:00.169763700 +424 93 0 days 00:00:00.150675366 +424 94 0 days 00:00:00.165812873 +424 95 0 days 00:00:00.169124566 +424 96 0 days 00:00:00.170843706 +424 97 0 days 00:00:00.114415400 +424 98 0 days 00:00:00.179038053 +424 99 0 days 00:00:00.164166380 +424 100 0 days 00:00:00.162133073 +425 1 0 days 00:00:01.679099525 +425 2 0 days 00:00:01.759724085 +425 3 0 days 00:00:12.424002295 +425 4 0 days 00:00:02.030169733 +425 5 0 days 00:00:19.051799540 +425 6 0 days 00:00:04.563257260 +425 7 0 days 00:00:06.515830040 +425 8 0 days 00:00:01.791075373 +425 9 0 days 00:00:01.050510260 +425 10 0 days 00:00:06.630651652 +425 11 0 days 00:00:01.057451254 +425 12 0 days 00:00:00.938355340 +425 13 0 days 00:00:01.036837910 +425 14 0 days 00:00:23.315736784 +425 15 0 days 00:00:15.217494713 +425 16 0 days 00:00:05.448895606 +425 17 0 days 00:00:21.645038593 +425 18 0 days 00:00:05.184323920 +425 19 0 days 00:00:22.465158313 +425 20 0 days 00:00:02.589648280 +425 21 0 days 00:00:04.391604666 +425 22 0 days 00:00:04.898866366 +425 23 0 days 00:00:23.030988640 +425 24 0 days 00:00:28.530578466 +425 25 0 days 00:00:00.693030093 +425 26 0 days 00:00:06.320288606 +425 27 0 days 00:00:04.538015373 +425 28 0 days 00:00:02.821295806 +425 29 0 days 00:00:13.036600285 +425 30 0 days 00:00:10.626640944 +425 31 0 days 00:00:03.639567353 +425 32 0 days 00:00:01.423873433 +425 33 0 days 00:00:01.843323586 +425 34 0 days 00:00:01.308784593 +425 35 0 days 00:00:01.906254295 +425 36 0 days 00:00:27.260362785 +425 37 0 days 00:00:06.211012846 +425 38 0 days 00:00:00.576059410 +425 39 0 days 00:00:01.069314484 +425 40 0 days 00:00:01.457072273 +425 41 0 days 00:00:03.720357273 +425 42 0 days 00:00:05.651997775 +425 43 0 days 00:00:01.815127366 +425 44 0 days 00:00:01.046564560 +425 45 0 days 00:00:03.394857613 +425 46 0 days 00:00:03.174252115 +425 47 0 days 00:00:07.925724076 +425 48 0 days 00:00:05.242201246 +425 49 0 days 00:00:00.637566864 +425 50 0 days 00:00:01.994481426 +425 51 0 days 00:00:09.935779466 +425 52 0 days 00:00:04.520025008 +425 53 0 days 00:00:00.628809106 +425 54 0 days 00:00:00.910770445 +425 55 0 days 00:00:01.104087986 +425 56 0 days 00:00:00.661595406 +425 57 0 days 00:00:04.665169033 +425 59 0 days 00:00:05.347496480 +425 60 0 days 00:00:01.001817275 +425 61 0 days 00:00:01.333078020 +425 62 0 days 00:00:22.289386040 +425 63 0 days 00:00:00.703808046 +425 64 0 days 00:00:13.407784643 +425 65 0 days 00:00:06.404472513 +425 66 0 days 00:00:00.745535713 +425 67 0 days 00:00:00.878175992 +425 68 0 days 00:00:22.735523613 +425 69 0 days 00:00:28.533443750 +426 1 0 days 00:00:03.144846696 +426 2 0 days 00:00:01.658778880 +426 3 0 days 00:00:04.790025653 +426 4 0 days 00:00:02.332328546 +426 5 0 days 00:00:01.728187023 +426 6 0 days 00:00:13.161314956 +426 7 0 days 00:00:01.422414190 +426 8 0 days 00:00:32.515393335 +426 9 0 days 00:00:01.969438225 +426 10 0 days 00:00:10.797044490 +426 11 0 days 00:00:01.959379200 +426 12 0 days 00:00:05.864758993 +426 13 0 days 00:00:28.734965685 +426 14 0 days 00:00:05.478168500 +426 15 0 days 00:00:01.453188860 +426 16 0 days 00:00:00.902666720 +426 17 0 days 00:00:01.876051713 +426 18 0 days 00:00:02.728410316 +426 19 0 days 00:00:03.303889086 +426 20 0 days 00:00:06.668872186 +426 21 0 days 00:00:01.578901064 +426 22 0 days 00:00:01.996876413 +426 23 0 days 00:00:07.014504480 +426 24 0 days 00:00:28.686990184 +426 25 0 days 00:00:01.748339686 +426 26 0 days 00:00:11.032910455 +426 27 0 days 00:00:20.928657985 +426 28 0 days 00:00:01.150052046 +426 29 0 days 00:00:04.468867146 +426 30 0 days 00:00:02.159709515 +426 31 0 days 00:00:04.049207380 +426 32 0 days 00:00:21.181298475 +426 33 0 days 00:00:07.387607276 +426 34 0 days 00:00:01.524954070 +426 35 0 days 00:00:01.004964745 +426 36 0 days 00:00:02.230840960 +426 37 0 days 00:00:00.756093586 +426 38 0 days 00:00:24.388732873 +426 39 0 days 00:00:04.592694160 +426 40 0 days 00:00:25.957851193 +426 41 0 days 00:00:07.297135836 +426 42 0 days 00:00:07.072726430 +426 43 0 days 00:00:19.537659256 +426 44 0 days 00:00:00.560935800 +426 45 0 days 00:00:01.113508226 +426 46 0 days 00:00:02.371194153 +426 47 0 days 00:00:23.547134525 +426 48 0 days 00:00:01.593316700 +426 49 0 days 00:00:04.573029340 +426 50 0 days 00:00:06.352486725 +426 51 0 days 00:00:06.383619873 +426 52 0 days 00:00:07.453824610 +426 53 0 days 00:00:02.054296423 +426 54 0 days 00:00:23.928745653 +426 55 0 days 00:00:01.786363666 +426 56 0 days 00:00:01.095791466 +426 57 0 days 00:00:16.908675753 +427 1 0 days 00:00:01.021010126 +427 2 0 days 00:00:01.125587386 +427 3 0 days 00:00:02.746581345 +427 4 0 days 00:00:02.839591080 +427 5 0 days 00:00:04.308588380 +427 6 0 days 00:00:01.532057690 +427 7 0 days 00:00:00.563351604 +427 8 0 days 00:00:00.275833740 +427 9 0 days 00:00:04.644130600 +427 10 0 days 00:00:00.578706095 +427 11 0 days 00:00:01.454856574 +427 12 0 days 00:00:00.798537343 +427 13 0 days 00:00:00.285288946 +427 14 0 days 00:00:12.154353300 +427 15 0 days 00:00:00.319179704 +427 16 0 days 00:00:08.857701393 +427 17 0 days 00:00:04.538141073 +427 18 0 days 00:00:05.030100270 +427 19 0 days 00:00:03.020165033 +427 20 0 days 00:00:00.341171410 +427 21 0 days 00:00:00.805492126 +427 22 0 days 00:00:00.624513065 +427 23 0 days 00:00:02.676643810 +427 24 0 days 00:00:10.680901970 +427 25 0 days 00:00:06.070964120 +427 26 0 days 00:00:06.036218506 +427 27 0 days 00:00:00.954533986 +427 28 0 days 00:00:14.205221880 +427 29 0 days 00:00:01.893624426 +427 30 0 days 00:00:00.491764400 +427 31 0 days 00:00:00.717701380 +427 32 0 days 00:00:00.486035440 +427 33 0 days 00:00:01.138171072 +427 34 0 days 00:00:12.717508213 +427 35 0 days 00:00:03.835079246 +427 36 0 days 00:00:09.669030793 +427 37 0 days 00:00:06.068051140 +427 38 0 days 00:00:00.243852206 +427 39 0 days 00:00:01.359903312 +427 40 0 days 00:00:01.681250175 +427 41 0 days 00:00:10.637814286 +427 42 0 days 00:00:00.328787653 +427 43 0 days 00:00:11.267091110 +427 44 0 days 00:00:00.866927873 +427 45 0 days 00:00:06.169312306 +427 46 0 days 00:00:00.883817780 +427 47 0 days 00:00:00.704718584 +427 48 0 days 00:00:09.873488865 +427 49 0 days 00:00:06.869673976 +427 50 0 days 00:00:00.879405440 +427 51 0 days 00:00:01.734731110 +427 52 0 days 00:00:02.653048540 +427 53 0 days 00:00:00.776102405 +427 54 0 days 00:00:00.765396003 +427 55 0 days 00:00:00.595610825 +427 56 0 days 00:00:00.806721980 +427 57 0 days 00:00:02.299031485 +427 58 0 days 00:00:15.377208180 +427 59 0 days 00:00:02.067576305 +427 60 0 days 00:00:07.714271465 +427 61 0 days 00:00:02.872327726 +427 62 0 days 00:00:00.734088206 +427 63 0 days 00:00:01.103596736 +427 64 0 days 00:00:00.375869686 +427 65 0 days 00:00:01.752500056 +427 66 0 days 00:00:16.513711092 +427 67 0 days 00:00:00.445522186 +427 68 0 days 00:00:02.377410013 +427 69 0 days 00:00:03.118470860 +427 70 0 days 00:00:02.154540560 +427 71 0 days 00:00:00.556102620 +427 72 0 days 00:00:01.107848573 +427 73 0 days 00:00:00.910551046 +427 74 0 days 00:00:00.530196693 +427 75 0 days 00:00:08.620143386 +427 76 0 days 00:00:00.666526810 +427 77 0 days 00:00:02.879904846 +427 78 0 days 00:00:00.593996553 +427 79 0 days 00:00:03.177214436 +427 80 0 days 00:00:01.742477593 +427 81 0 days 00:00:05.763155468 +427 82 0 days 00:00:15.630251210 +427 83 0 days 00:00:00.740641126 +427 84 0 days 00:00:02.684091480 +427 85 0 days 00:00:01.556427206 +427 86 0 days 00:00:02.150066008 +427 87 0 days 00:00:00.957267432 +427 88 0 days 00:00:00.437924300 +427 89 0 days 00:00:01.345057300 +427 90 0 days 00:00:00.942587993 +427 91 0 days 00:00:00.753347280 +427 92 0 days 00:00:00.642845873 +427 93 0 days 00:00:03.033178725 +427 94 0 days 00:00:01.062224335 +427 95 0 days 00:00:02.678586526 +427 96 0 days 00:00:06.116858144 +427 97 0 days 00:00:16.554076960 +427 98 0 days 00:00:11.674223660 +427 99 0 days 00:00:00.274811100 +427 100 0 days 00:00:03.044348556 +428 1 0 days 00:00:01.223572365 +428 2 0 days 00:00:02.516280608 +428 3 0 days 00:00:00.977989306 +428 4 0 days 00:00:00.887491113 +428 5 0 days 00:00:00.756168540 +428 6 0 days 00:00:09.312585280 +428 7 0 days 00:00:00.559254753 +428 8 0 days 00:00:01.000747840 +428 9 0 days 00:00:03.338528756 +428 10 0 days 00:00:09.788011130 +428 11 0 days 00:00:09.627006875 +428 12 0 days 00:00:00.480173593 +428 13 0 days 00:00:01.554852893 +428 14 0 days 00:00:00.511697760 +428 15 0 days 00:00:04.040873522 +428 16 0 days 00:00:12.075921740 +428 17 0 days 00:00:00.993610960 +428 18 0 days 00:00:01.872411766 +428 19 0 days 00:00:00.612460455 +428 20 0 days 00:00:03.843363580 +428 21 0 days 00:00:00.670581080 +428 22 0 days 00:00:00.740710210 +428 23 0 days 00:00:01.136662546 +428 24 0 days 00:00:03.269209932 +428 25 0 days 00:00:02.219919173 +428 26 0 days 00:00:01.362937964 +428 27 0 days 00:00:01.087541570 +428 28 0 days 00:00:01.653450660 +428 29 0 days 00:00:00.625964530 +428 30 0 days 00:00:04.551120820 +428 31 0 days 00:00:10.484381446 +428 32 0 days 00:00:00.734904206 +428 33 0 days 00:00:00.908578650 +428 34 0 days 00:00:03.117425056 +428 35 0 days 00:00:00.446971006 +428 36 0 days 00:00:00.648828968 +428 37 0 days 00:00:00.474632100 +428 38 0 days 00:00:01.431502384 +428 39 0 days 00:00:00.558469246 +428 40 0 days 00:00:01.001130425 +428 41 0 days 00:00:01.302601263 +428 42 0 days 00:00:02.806083133 +428 43 0 days 00:00:02.640743124 +428 44 0 days 00:00:03.550186260 +428 45 0 days 00:00:00.457943613 +428 46 0 days 00:00:10.141369085 +428 47 0 days 00:00:00.514192800 +428 48 0 days 00:00:02.430430406 +428 49 0 days 00:00:02.173735833 +428 50 0 days 00:00:02.791204853 +428 51 0 days 00:00:01.500489555 +428 52 0 days 00:00:02.685429860 +428 53 0 days 00:00:01.964764673 +428 54 0 days 00:00:12.101203568 +428 55 0 days 00:00:01.264787493 +428 56 0 days 00:00:01.066232688 +428 57 0 days 00:00:00.580876420 +428 58 0 days 00:00:00.774680130 +428 59 0 days 00:00:00.551754292 +428 60 0 days 00:00:00.800935820 +428 61 0 days 00:00:02.086780140 +428 62 0 days 00:00:03.225582653 +428 63 0 days 00:00:02.772265753 +428 64 0 days 00:00:01.344657920 +428 65 0 days 00:00:00.989034793 +428 66 0 days 00:00:00.923451900 +428 67 0 days 00:00:00.951760580 +428 68 0 days 00:00:01.364585455 +428 69 0 days 00:00:01.196696680 +428 70 0 days 00:00:11.588449460 +428 71 0 days 00:00:00.615645300 +428 72 0 days 00:00:01.342921965 +428 73 0 days 00:00:00.594106735 +428 74 0 days 00:00:01.059507135 +428 75 0 days 00:00:03.423837100 +428 76 0 days 00:00:01.186487193 +428 77 0 days 00:00:02.424392320 +428 78 0 days 00:00:01.395097480 +428 79 0 days 00:00:00.419304686 +428 80 0 days 00:00:05.622119073 +428 81 0 days 00:00:00.968045205 +428 82 0 days 00:00:00.673025726 +428 83 0 days 00:00:05.200185866 +428 84 0 days 00:00:00.485589240 +428 85 0 days 00:00:00.858814960 +428 86 0 days 00:00:00.248221440 +428 87 0 days 00:00:01.061973940 +428 88 0 days 00:00:08.632872245 +428 89 0 days 00:00:03.166132048 +428 90 0 days 00:00:01.324837265 +428 91 0 days 00:00:00.498588720 +428 92 0 days 00:00:00.316693590 +428 93 0 days 00:00:03.520734430 +428 94 0 days 00:00:03.446733860 +428 95 0 days 00:00:00.466437020 +428 96 0 days 00:00:00.651713845 +428 97 0 days 00:00:01.092352756 +428 98 0 days 00:00:00.386465266 +428 99 0 days 00:00:00.766598895 +428 100 0 days 00:00:01.091002015 +429 1 0 days 00:00:00.314170693 +429 3 0 days 00:00:00.242817826 +429 4 0 days 00:00:00.216025456 +429 5 0 days 00:00:00.289183526 +429 6 0 days 00:00:00.244088875 +429 7 0 days 00:00:00.314359805 +429 8 0 days 00:00:00.215689930 +429 9 0 days 00:00:00.291379240 +429 10 0 days 00:00:00.316265162 +429 11 0 days 00:00:00.289922413 +429 12 0 days 00:00:00.251743793 +429 13 0 days 00:00:00.328742368 +429 14 0 days 00:00:00.169780313 +429 15 0 days 00:00:00.317843500 +429 16 0 days 00:00:00.374227086 +429 17 0 days 00:00:00.169811513 +429 18 0 days 00:00:00.309301080 +429 19 0 days 00:00:00.315121975 +429 20 0 days 00:00:00.186334326 +429 21 0 days 00:00:00.339899513 +429 22 0 days 00:00:00.169955153 +429 23 0 days 00:00:00.277924506 +429 24 0 days 00:00:00.303729363 +429 25 0 days 00:00:00.324376774 +429 26 0 days 00:00:00.370598380 +429 27 0 days 00:00:00.353885041 +429 29 0 days 00:00:00.284817422 +429 30 0 days 00:00:00.217562000 +429 31 0 days 00:00:00.340189030 +429 32 0 days 00:00:00.339713683 +429 33 0 days 00:00:00.352060310 +429 34 0 days 00:00:00.322465463 +429 35 0 days 00:00:00.316745767 +429 36 0 days 00:00:00.290086973 +429 37 0 days 00:00:00.327337514 +429 38 0 days 00:00:00.315370845 +429 39 0 days 00:00:00.367083323 +429 40 0 days 00:00:00.241492407 +429 41 0 days 00:00:00.268886200 +429 42 0 days 00:00:00.340283653 +429 43 0 days 00:00:00.347414330 +429 44 0 days 00:00:00.187059366 +429 45 0 days 00:00:00.318791146 +429 46 0 days 00:00:00.351159907 +429 47 0 days 00:00:00.260588444 +429 48 0 days 00:00:00.318441340 +429 49 0 days 00:00:00.318828406 +429 50 0 days 00:00:00.350877865 +429 51 0 days 00:00:00.407749185 +429 52 0 days 00:00:00.347139232 +429 53 0 days 00:00:00.300195795 +429 54 0 days 00:00:00.290828286 +429 55 0 days 00:00:00.252735760 +429 56 0 days 00:00:00.290449880 +429 57 0 days 00:00:00.324934248 +429 58 0 days 00:00:00.216930995 +429 59 0 days 00:00:00.218868485 +429 60 0 days 00:00:00.216507434 +429 61 0 days 00:00:00.225302778 +429 62 0 days 00:00:00.239636948 +429 63 0 days 00:00:00.281923175 +429 64 0 days 00:00:00.226047050 +429 65 0 days 00:00:00.317780302 +429 66 0 days 00:00:00.276814917 +429 67 0 days 00:00:00.276361325 +429 68 0 days 00:00:00.262135367 +429 69 0 days 00:00:00.251673297 +429 70 0 days 00:00:00.277685466 +429 71 0 days 00:00:00.319246880 +429 72 0 days 00:00:00.246853355 +429 73 0 days 00:00:00.219059698 +429 74 0 days 00:00:00.277894120 +429 75 0 days 00:00:00.187736615 +429 76 0 days 00:00:00.319836826 +429 77 0 days 00:00:00.216764946 +429 78 0 days 00:00:00.354511047 +429 79 0 days 00:00:00.234525140 +429 80 0 days 00:00:00.187463760 +429 81 0 days 00:00:00.313657276 +429 82 0 days 00:00:00.234238715 +429 83 0 days 00:00:00.199170986 +429 84 0 days 00:00:00.258705503 +429 85 0 days 00:00:00.398557010 +429 86 0 days 00:00:00.239981552 +429 87 0 days 00:00:00.234904005 +429 89 0 days 00:00:00.403738225 +429 90 0 days 00:00:00.234152650 +429 91 0 days 00:00:00.294684216 +429 92 0 days 00:00:00.216900180 +429 93 0 days 00:00:00.215578006 +429 94 0 days 00:00:00.230521145 +429 95 0 days 00:00:00.375362958 +429 96 0 days 00:00:00.347364608 +429 97 0 days 00:00:00.246845745 +429 98 0 days 00:00:00.276934460 +429 99 0 days 00:00:00.187814573 +429 100 0 days 00:00:00.290803220 +430 1 0 days 00:00:00.415386986 +430 2 0 days 00:00:00.415605371 +430 3 0 days 00:00:00.256174524 +430 4 0 days 00:00:00.201449720 +430 5 0 days 00:00:00.223299632 +430 6 0 days 00:00:00.270736534 +430 7 0 days 00:00:00.245151314 +430 8 0 days 00:00:00.201171500 +430 9 0 days 00:00:00.287582105 +430 10 0 days 00:00:00.386515503 +430 11 0 days 00:00:00.279602246 +430 12 0 days 00:00:00.304313245 +430 13 0 days 00:00:00.327058373 +430 14 0 days 00:00:00.255650530 +430 15 0 days 00:00:00.353129809 +430 16 0 days 00:00:00.347690576 +430 17 0 days 00:00:00.289542525 +430 18 0 days 00:00:00.364507922 +430 19 0 days 00:00:00.240238083 +430 20 0 days 00:00:00.174449900 +430 21 0 days 00:00:00.221255975 +430 22 0 days 00:00:00.174030553 +430 23 0 days 00:00:00.320784102 +430 24 0 days 00:00:00.248944170 +430 25 0 days 00:00:00.224316309 +430 26 0 days 00:00:00.231644753 +430 27 0 days 00:00:00.280620186 +430 28 0 days 00:00:00.202316406 +430 29 0 days 00:00:00.201845464 +430 30 0 days 00:00:00.192898260 +430 31 0 days 00:00:00.226742969 +430 32 0 days 00:00:00.172508960 +430 33 0 days 00:00:00.280711146 +430 34 0 days 00:00:00.217167360 +430 35 0 days 00:00:00.296308346 +430 36 0 days 00:00:00.263494577 +430 37 0 days 00:00:00.335995106 +430 38 0 days 00:00:00.279876033 +430 39 0 days 00:00:00.241723245 +430 40 0 days 00:00:00.327361020 +430 41 0 days 00:00:00.378447834 +430 42 0 days 00:00:00.364596628 +430 43 0 days 00:00:00.339012370 +430 44 0 days 00:00:00.328258853 +430 45 0 days 00:00:00.222607613 +430 46 0 days 00:00:00.221539766 +430 47 0 days 00:00:00.296358060 +430 48 0 days 00:00:00.339761845 +430 49 0 days 00:00:00.260495516 +430 50 0 days 00:00:00.245009273 +430 51 0 days 00:00:00.279934180 +430 52 0 days 00:00:00.341015555 +430 53 0 days 00:00:00.245236986 +430 54 0 days 00:00:00.193537960 +430 55 0 days 00:00:00.204634320 +430 56 0 days 00:00:00.232629326 +430 57 0 days 00:00:00.173824126 +430 58 0 days 00:00:00.222916720 +430 59 0 days 00:00:00.231055873 +430 60 0 days 00:00:00.305133390 +430 61 0 days 00:00:00.270388996 +430 62 0 days 00:00:00.292497671 +430 63 0 days 00:00:00.338161040 +430 64 0 days 00:00:00.256959120 +430 65 0 days 00:00:00.328299920 +430 66 0 days 00:00:00.232733820 +430 67 0 days 00:00:00.387614196 +430 68 0 days 00:00:00.262635013 +430 69 0 days 00:00:00.232234550 +430 70 0 days 00:00:00.176401986 +430 71 0 days 00:00:00.375260748 +430 72 0 days 00:00:00.203585033 +430 73 0 days 00:00:00.356697054 +430 74 0 days 00:00:00.223695273 +430 75 0 days 00:00:00.358276308 +430 76 0 days 00:00:00.227692076 +430 77 0 days 00:00:00.248599880 +430 78 0 days 00:00:00.192571140 +430 79 0 days 00:00:00.232538153 +430 80 0 days 00:00:00.222218928 +430 81 0 days 00:00:00.230914945 +430 82 0 days 00:00:00.175806366 +430 83 0 days 00:00:00.174296973 +430 84 0 days 00:00:00.254302910 +430 85 0 days 00:00:00.233468112 +430 86 0 days 00:00:00.282087426 +430 87 0 days 00:00:00.369274208 +430 88 0 days 00:00:00.289641695 +430 89 0 days 00:00:00.341983896 +430 90 0 days 00:00:00.233179313 +430 91 0 days 00:00:00.222511628 +430 92 0 days 00:00:00.257650864 +430 93 0 days 00:00:00.290661615 +430 94 0 days 00:00:00.369496932 +430 95 0 days 00:00:00.245771880 +430 96 0 days 00:00:00.246247055 +430 98 0 days 00:00:00.243727027 +430 99 0 days 00:00:00.288807817 +430 100 0 days 00:00:00.206565280 +431 1 0 days 00:00:00.163018998 +431 2 0 days 00:00:00.145355926 +431 3 0 days 00:00:00.175199200 +431 4 0 days 00:00:00.173452726 +431 5 0 days 00:00:00.135699927 +431 6 0 days 00:00:00.220574633 +431 7 0 days 00:00:00.163281478 +431 8 0 days 00:00:00.219345788 +431 9 0 days 00:00:00.220291112 +431 10 0 days 00:00:00.210085003 +431 11 0 days 00:00:00.135630126 +431 12 0 days 00:00:00.123188696 +431 14 0 days 00:00:00.212965634 +431 15 0 days 00:00:00.105655000 +431 17 0 days 00:00:00.209250242 +431 18 0 days 00:00:00.153335850 +431 19 0 days 00:00:00.241312842 +431 20 0 days 00:00:00.218279533 +431 21 0 days 00:00:00.124910853 +431 22 0 days 00:00:00.148630890 +431 23 0 days 00:00:00.151632672 +431 24 0 days 00:00:00.227618606 +431 25 0 days 00:00:00.143252773 +431 26 0 days 00:00:00.242693388 +431 27 0 days 00:00:00.226889600 +431 28 0 days 00:00:00.113109246 +431 29 0 days 00:00:00.217567230 +431 30 0 days 00:00:00.159567914 +431 31 0 days 00:00:00.221210197 +431 32 0 days 00:00:00.222015816 +431 33 0 days 00:00:00.242326424 +431 34 0 days 00:00:00.114223306 +431 35 0 days 00:00:00.112588753 +431 36 0 days 00:00:00.124391405 +431 37 0 days 00:00:00.160002538 +431 38 0 days 00:00:00.182090684 +431 39 0 days 00:00:00.157915053 +431 40 0 days 00:00:00.146309320 +431 41 0 days 00:00:00.154835913 +431 42 0 days 00:00:00.218055965 +431 43 0 days 00:00:00.135552173 +431 44 0 days 00:00:00.222236560 +431 45 0 days 00:00:00.160113282 +431 46 0 days 00:00:00.154162320 +431 47 0 days 00:00:00.147466495 +431 48 0 days 00:00:00.121906725 +431 49 0 days 00:00:00.104372900 +431 50 0 days 00:00:00.221334446 +431 51 0 days 00:00:00.114153093 +431 52 0 days 00:00:00.217910460 +431 53 0 days 00:00:00.126005828 +431 55 0 days 00:00:00.242048740 +431 56 0 days 00:00:00.243017193 +431 57 0 days 00:00:00.127029128 +431 58 0 days 00:00:00.182944192 +431 59 0 days 00:00:00.183040960 +431 60 0 days 00:00:00.148754908 +431 61 0 days 00:00:00.186983142 +431 62 0 days 00:00:00.188474090 +431 63 0 days 00:00:00.129424893 +431 64 0 days 00:00:00.147002473 +431 65 0 days 00:00:00.176433906 +431 66 0 days 00:00:00.229181866 +431 67 0 days 00:00:00.217119900 +431 68 0 days 00:00:00.106887873 +431 69 0 days 00:00:00.218671167 +431 70 0 days 00:00:00.136392286 +431 71 0 days 00:00:00.110425385 +431 72 0 days 00:00:00.218712575 +431 73 0 days 00:00:00.148947276 +431 74 0 days 00:00:00.136175546 +431 75 0 days 00:00:00.134654453 +431 76 0 days 00:00:00.105740920 +431 77 0 days 00:00:00.146989373 +431 78 0 days 00:00:00.147022220 +431 79 0 days 00:00:00.242178856 +431 80 0 days 00:00:00.162593904 +431 81 0 days 00:00:00.222121786 +431 82 0 days 00:00:00.144615453 +431 83 0 days 00:00:00.224200688 +431 84 0 days 00:00:00.242866043 +431 85 0 days 00:00:00.137519166 +431 86 0 days 00:00:00.114382053 +431 87 0 days 00:00:00.153610653 +431 88 0 days 00:00:00.182195216 +431 89 0 days 00:00:00.223325666 +431 90 0 days 00:00:00.125913681 +431 91 0 days 00:00:00.146678853 +431 92 0 days 00:00:00.147517073 +431 93 0 days 00:00:00.243238330 +431 94 0 days 00:00:00.244860553 +431 95 0 days 00:00:00.243025780 +431 96 0 days 00:00:00.176094140 +431 97 0 days 00:00:00.223182693 +431 98 0 days 00:00:00.175551713 +431 99 0 days 00:00:00.219746693 +432 1 0 days 00:00:00.222791760 +432 2 0 days 00:00:00.114102890 +432 3 0 days 00:00:00.191288475 +432 4 0 days 00:00:00.163246417 +432 5 0 days 00:00:00.127900098 +432 6 0 days 00:00:00.149040500 +432 7 0 days 00:00:00.229056420 +432 8 0 days 00:00:00.157938060 +432 9 0 days 00:00:00.221789530 +432 10 0 days 00:00:00.126137198 +432 11 0 days 00:00:00.222984026 +432 12 0 days 00:00:00.148219200 +432 13 0 days 00:00:00.141221226 +432 14 0 days 00:00:00.222539866 +432 15 0 days 00:00:00.105856233 +432 16 0 days 00:00:00.153702446 +432 17 0 days 00:00:00.148640570 +432 18 0 days 00:00:00.158551326 +432 19 0 days 00:00:00.108257733 +432 20 0 days 00:00:00.157528720 +432 21 0 days 00:00:00.214738142 +432 22 0 days 00:00:00.186821588 +432 23 0 days 00:00:00.213341436 +432 24 0 days 00:00:00.151327080 +432 25 0 days 00:00:00.247420950 +432 26 0 days 00:00:00.116286826 +432 27 0 days 00:00:00.215132700 +432 28 0 days 00:00:00.116583460 +432 29 0 days 00:00:00.114335273 +432 30 0 days 00:00:00.148999425 +432 31 0 days 00:00:00.179246100 +432 32 0 days 00:00:00.151347458 +432 33 0 days 00:00:00.226179224 +432 34 0 days 00:00:00.219300390 +432 35 0 days 00:00:00.223076846 +432 36 0 days 00:00:00.157346640 +432 37 0 days 00:00:00.186569320 +432 38 0 days 00:00:00.148057140 +432 39 0 days 00:00:00.248200640 +432 40 0 days 00:00:00.223942331 +432 41 0 days 00:00:00.186284360 +432 42 0 days 00:00:00.211866944 +432 43 0 days 00:00:00.162529274 +432 44 0 days 00:00:00.106558046 +432 45 0 days 00:00:00.114235553 +432 46 0 days 00:00:00.155282026 +432 47 0 days 00:00:00.229668133 +432 48 0 days 00:00:00.156120906 +432 49 0 days 00:00:00.150694847 +432 50 0 days 00:00:00.150443661 +432 51 0 days 00:00:00.107102526 +432 52 0 days 00:00:00.177051333 +432 53 0 days 00:00:00.186517556 +432 54 0 days 00:00:00.190126631 +432 55 0 days 00:00:00.137163260 +432 56 0 days 00:00:00.211911947 +432 57 0 days 00:00:00.151121900 +432 58 0 days 00:00:00.193704514 +432 59 0 days 00:00:00.150975834 +432 60 0 days 00:00:00.123705790 +432 61 0 days 00:00:00.223136113 +432 62 0 days 00:00:00.154095893 +432 63 0 days 00:00:00.213533162 +432 64 0 days 00:00:00.158750100 +432 65 0 days 00:00:00.248295933 +432 66 0 days 00:00:00.140367572 +432 67 0 days 00:00:00.214779235 +432 68 0 days 00:00:00.129201613 +432 69 0 days 00:00:00.157391746 +432 70 0 days 00:00:00.156077313 +432 71 0 days 00:00:00.167388060 +432 72 0 days 00:00:00.139578793 +432 73 0 days 00:00:00.249632906 +432 74 0 days 00:00:00.117108773 +432 75 0 days 00:00:00.216182750 +432 76 0 days 00:00:00.224961250 +432 77 0 days 00:00:00.107816560 +432 78 0 days 00:00:00.132234165 +432 79 0 days 00:00:00.111757015 +432 80 0 days 00:00:00.223426646 +432 81 0 days 00:00:00.137858336 +432 82 0 days 00:00:00.116724426 +432 83 0 days 00:00:00.126284838 +432 84 0 days 00:00:00.190215980 +432 85 0 days 00:00:00.189511068 +432 86 0 days 00:00:00.183833525 +432 87 0 days 00:00:00.178817246 +432 89 0 days 00:00:00.193681145 +432 90 0 days 00:00:00.130094853 +432 91 0 days 00:00:00.222231106 +432 92 0 days 00:00:00.138990900 +432 93 0 days 00:00:00.192013764 +432 94 0 days 00:00:00.164284992 +432 95 0 days 00:00:00.105734880 +432 96 0 days 00:00:00.170590508 +432 97 0 days 00:00:00.113831215 +432 98 0 days 00:00:00.163453060 +432 99 0 days 00:00:00.125398700 +432 100 0 days 00:00:00.160153976 +433 1 0 days 00:00:00.361722806 +433 2 0 days 00:00:00.216470032 +433 4 0 days 00:00:00.409326624 +433 5 0 days 00:00:00.287114475 +433 6 0 days 00:00:00.217174228 +433 7 0 days 00:00:00.205863660 +433 8 0 days 00:00:00.244704600 +433 9 0 days 00:00:00.170152980 +433 10 0 days 00:00:00.311898200 +433 11 0 days 00:00:00.208111231 +433 12 0 days 00:00:00.408549224 +433 13 0 days 00:00:00.238715977 +433 14 0 days 00:00:00.271664970 +433 15 0 days 00:00:00.215246650 +433 16 0 days 00:00:00.250590116 +433 17 0 days 00:00:00.272072843 +433 18 0 days 00:00:00.346324135 +433 19 0 days 00:00:00.394275924 +433 20 0 days 00:00:00.238248324 +433 21 0 days 00:00:00.236053710 +433 22 0 days 00:00:00.206751270 +433 23 0 days 00:00:00.226157456 +433 24 0 days 00:00:00.263082752 +433 25 0 days 00:00:00.207074880 +433 26 0 days 00:00:00.215059360 +433 27 0 days 00:00:00.286808397 +433 28 0 days 00:00:00.284150085 +433 29 0 days 00:00:00.246237704 +433 30 0 days 00:00:00.327780540 +433 31 0 days 00:00:00.298390160 +433 32 0 days 00:00:00.269693397 +433 33 0 days 00:00:00.250631988 +433 34 0 days 00:00:00.375790696 +433 35 0 days 00:00:00.196929480 +433 36 0 days 00:00:00.408100130 +433 37 0 days 00:00:00.237720860 +433 38 0 days 00:00:00.197436576 +433 39 0 days 00:00:00.298494115 +433 40 0 days 00:00:00.225325486 +433 41 0 days 00:00:00.202741560 +433 42 0 days 00:00:00.238200370 +433 43 0 days 00:00:00.394191595 +433 44 0 days 00:00:00.250810936 +433 45 0 days 00:00:00.318258680 +433 46 0 days 00:00:00.170716846 +433 47 0 days 00:00:00.235522547 +433 48 0 days 00:00:00.288290566 +433 49 0 days 00:00:00.346636675 +433 50 0 days 00:00:00.231049937 +433 51 0 days 00:00:00.364800833 +433 52 0 days 00:00:00.225327173 +433 53 0 days 00:00:00.273159082 +433 54 0 days 00:00:00.313330620 +433 55 0 days 00:00:00.255250520 +433 56 0 days 00:00:00.411464410 +433 57 0 days 00:00:00.321959206 +433 58 0 days 00:00:00.314998411 +433 59 0 days 00:00:00.338431980 +433 60 0 days 00:00:00.375649026 +433 61 0 days 00:00:00.238839482 +433 62 0 days 00:00:00.328442471 +433 63 0 days 00:00:00.281004630 +433 64 0 days 00:00:00.318967686 +433 65 0 days 00:00:00.221875069 +433 66 0 days 00:00:00.320042325 +433 67 0 days 00:00:00.217726020 +433 68 0 days 00:00:00.234282023 +433 69 0 days 00:00:00.251102888 +433 70 0 days 00:00:00.246515213 +433 71 0 days 00:00:00.239234486 +433 72 0 days 00:00:00.217711902 +433 73 0 days 00:00:00.225626926 +433 74 0 days 00:00:00.349731588 +433 75 0 days 00:00:00.247988155 +433 76 0 days 00:00:00.252785350 +433 77 0 days 00:00:00.403058182 +433 78 0 days 00:00:00.279414671 +433 79 0 days 00:00:00.402550278 +433 80 0 days 00:00:00.348886285 +433 81 0 days 00:00:00.401695183 +433 82 0 days 00:00:00.369851368 +433 83 0 days 00:00:00.259462500 +433 84 0 days 00:00:00.388022231 +433 85 0 days 00:00:00.342798167 +433 86 0 days 00:00:00.356122887 +433 87 0 days 00:00:00.347608060 +433 88 0 days 00:00:00.197825900 +433 89 0 days 00:00:00.225907993 +433 90 0 days 00:00:00.197191816 +433 91 0 days 00:00:00.265665551 +433 92 0 days 00:00:00.289033780 +433 93 0 days 00:00:00.241414356 +433 94 0 days 00:00:00.170998313 +433 95 0 days 00:00:00.249496953 +433 96 0 days 00:00:00.338707186 +433 97 0 days 00:00:00.291130028 +433 98 0 days 00:00:00.347197760 +433 99 0 days 00:00:00.320321240 +433 100 0 days 00:00:00.231595442 +434 1 0 days 00:00:00.224600806 +434 2 0 days 00:00:00.217104720 +434 3 0 days 00:00:00.223133920 +434 4 0 days 00:00:00.134560620 +434 5 0 days 00:00:00.147774420 +434 6 0 days 00:00:00.134150553 +434 7 0 days 00:00:00.170024691 +434 8 0 days 00:00:00.153494826 +434 9 0 days 00:00:00.212643296 +434 10 0 days 00:00:00.113790360 +434 11 0 days 00:00:00.135319447 +434 12 0 days 00:00:00.134402337 +434 13 0 days 00:00:00.218839326 +434 14 0 days 00:00:00.217360974 +434 15 0 days 00:00:00.112142010 +434 16 0 days 00:00:00.130089780 +434 17 0 days 00:00:00.191092391 +434 18 0 days 00:00:00.179880680 +434 19 0 days 00:00:00.189489726 +434 20 0 days 00:00:00.222956646 +434 21 0 days 00:00:00.223691586 +434 22 0 days 00:00:00.240768846 +434 23 0 days 00:00:00.146944820 +434 24 0 days 00:00:00.132553182 +434 25 0 days 00:00:00.105193113 +434 26 0 days 00:00:00.148058125 +434 27 0 days 00:00:00.153686813 +434 28 0 days 00:00:00.151599120 +434 29 0 days 00:00:00.219568313 +434 30 0 days 00:00:00.211394316 +434 31 0 days 00:00:00.223818226 +434 32 0 days 00:00:00.210016525 +434 33 0 days 00:00:00.241400110 +434 34 0 days 00:00:00.222840066 +434 35 0 days 00:00:00.240521753 +434 36 0 days 00:00:00.241843384 +434 37 0 days 00:00:00.216645745 +434 38 0 days 00:00:00.154482993 +434 39 0 days 00:00:00.131199937 +434 40 0 days 00:00:00.154251733 +434 41 0 days 00:00:00.150077228 +434 42 0 days 00:00:00.117169273 +434 43 0 days 00:00:00.114778726 +434 44 0 days 00:00:00.157287164 +434 45 0 days 00:00:00.129185290 +434 46 0 days 00:00:00.160239484 +434 47 0 days 00:00:00.123568640 +434 48 0 days 00:00:00.148812480 +434 49 0 days 00:00:00.147715136 +434 50 0 days 00:00:00.106353180 +434 51 0 days 00:00:00.185431446 +434 52 0 days 00:00:00.135356006 +434 53 0 days 00:00:00.168413649 +434 54 0 days 00:00:00.183944772 +434 55 0 days 00:00:00.176800380 +434 56 0 days 00:00:00.219418140 +434 57 0 days 00:00:00.107155966 +434 58 0 days 00:00:00.184495812 +434 59 0 days 00:00:00.156752168 +434 60 0 days 00:00:00.220063620 +434 61 0 days 00:00:00.212295465 +434 62 0 days 00:00:00.151724893 +434 63 0 days 00:00:00.150926540 +434 64 0 days 00:00:00.180451265 +434 65 0 days 00:00:00.177122320 +434 66 0 days 00:00:00.187212025 +434 67 0 days 00:00:00.127207606 +434 68 0 days 00:00:00.220226233 +434 69 0 days 00:00:00.185241450 +434 70 0 days 00:00:00.192300210 +434 71 0 days 00:00:00.147013433 +434 72 0 days 00:00:00.117430620 +434 73 0 days 00:00:00.243071145 +434 74 0 days 00:00:00.182992236 +434 75 0 days 00:00:00.138825870 +434 76 0 days 00:00:00.124320470 +434 77 0 days 00:00:00.189092104 +434 78 0 days 00:00:00.119447325 +434 79 0 days 00:00:00.119374517 +434 80 0 days 00:00:00.186598954 +434 81 0 days 00:00:00.147593772 +434 82 0 days 00:00:00.116828676 +434 83 0 days 00:00:00.104883493 +434 84 0 days 00:00:00.119025630 +434 85 0 days 00:00:00.219611257 +434 86 0 days 00:00:00.160188295 +434 87 0 days 00:00:00.145827715 +434 88 0 days 00:00:00.160562804 +434 89 0 days 00:00:00.190924657 +434 90 0 days 00:00:00.105463973 +434 91 0 days 00:00:00.217814718 +434 92 0 days 00:00:00.243305360 +434 93 0 days 00:00:00.210922980 +434 94 0 days 00:00:00.142604993 +434 95 0 days 00:00:00.189516312 +434 96 0 days 00:00:00.176152780 +434 97 0 days 00:00:00.242565484 +434 98 0 days 00:00:00.124184662 +434 99 0 days 00:00:00.242674965 +434 100 0 days 00:00:00.146357206 +435 1 0 days 00:00:04.319444025 +435 2 0 days 00:00:08.135823800 +435 3 0 days 00:00:33.149096755 +435 4 0 days 00:00:06.195611933 +435 5 0 days 00:00:01.754210365 +435 6 0 days 00:00:01.192281885 +435 7 0 days 00:00:03.706005280 +435 8 0 days 00:00:03.495335370 +435 9 0 days 00:00:44.552598120 +435 10 0 days 00:00:03.856221947 +435 11 0 days 00:00:08.254532180 +435 12 0 days 00:00:12.593894893 +435 13 0 days 00:00:56.522596692 +435 14 0 days 00:00:03.790635700 +435 15 0 days 00:00:01.942823465 +435 16 0 days 00:00:59.138480417 +435 17 0 days 00:00:09.549420740 +435 18 0 days 00:00:12.635490375 +435 19 0 days 00:00:46.640307205 +436 1 0 days 00:00:00.377741985 +436 2 0 days 00:00:01.375743002 +436 3 0 days 00:00:16.043724900 +436 4 0 days 00:00:00.336135255 +436 5 0 days 00:00:05.796003540 +436 6 0 days 00:00:02.712525060 +436 7 0 days 00:00:01.467972665 +436 8 0 days 00:00:01.021267140 +436 9 0 days 00:00:21.796019293 +436 10 0 days 00:00:00.731082800 +436 11 0 days 00:00:02.150485055 +436 12 0 days 00:00:13.486808631 +436 13 0 days 00:00:26.426672644 +436 14 0 days 00:00:16.065117880 +436 15 0 days 00:00:10.167505060 +436 16 0 days 00:00:05.781832105 +436 17 0 days 00:00:01.552548510 +436 18 0 days 00:00:00.661613615 +436 19 0 days 00:00:21.484113706 +436 20 0 days 00:00:25.529673490 +436 21 0 days 00:00:06.170664040 +436 22 0 days 00:00:01.317908728 +436 23 0 days 00:00:01.179753912 +436 24 0 days 00:00:06.333362560 +436 25 0 days 00:00:01.031506850 +436 26 0 days 00:00:06.797442073 +436 27 0 days 00:00:26.649306152 +436 28 0 days 00:00:06.296410473 +436 29 0 days 00:00:03.063640110 +436 30 0 days 00:00:01.688481075 +436 31 0 days 00:00:01.179816023 +436 32 0 days 00:00:04.334474033 +436 33 0 days 00:00:05.957211550 +436 34 0 days 00:00:01.125846274 +436 35 0 days 00:00:24.762671755 +436 36 0 days 00:00:00.728985692 +436 37 0 days 00:00:01.730967940 +436 38 0 days 00:00:01.881673180 +436 39 0 days 00:00:01.229162888 +436 40 0 days 00:00:04.684206305 +436 41 0 days 00:00:03.828535144 +436 42 0 days 00:00:01.662392310 +436 43 0 days 00:00:01.309398720 +436 44 0 days 00:00:00.764217084 +436 45 0 days 00:00:02.053171293 +436 46 0 days 00:00:26.077460111 +437 1 0 days 00:00:07.877503400 +437 2 0 days 00:00:11.733652000 +437 3 0 days 00:00:02.012615693 +437 4 0 days 00:00:40.786347520 +437 5 0 days 00:00:08.725088650 +437 6 0 days 00:00:11.404931593 +437 7 0 days 00:00:00.849591955 +437 8 0 days 00:00:17.981853713 +437 9 0 days 00:00:02.624590968 +437 10 0 days 00:00:00.496684056 +437 11 0 days 00:00:02.330145740 +437 12 0 days 00:00:09.658976586 +437 13 0 days 00:00:13.303627210 +437 14 0 days 00:00:08.087485773 +437 15 0 days 00:00:03.137938913 +437 16 0 days 00:00:02.665456946 +437 17 0 days 00:00:01.393984013 +437 18 0 days 00:00:11.130827086 +437 19 0 days 00:00:03.387074693 +437 20 0 days 00:00:03.369174493 +437 21 0 days 00:00:00.767955806 +437 22 0 days 00:00:15.242349133 +437 23 0 days 00:00:02.444529406 +437 24 0 days 00:00:04.912470920 +437 25 0 days 00:00:09.923368746 +437 26 0 days 00:00:46.294349730 +437 27 0 days 00:00:39.947048923 +437 28 0 days 00:00:03.762786953 +437 29 0 days 00:00:12.875852360 +437 30 0 days 00:00:03.404221633 +437 31 0 days 00:00:30.455896993 +437 32 0 days 00:00:29.605977260 +437 33 0 days 00:00:03.680187793 +437 34 0 days 00:00:02.087227613 +437 35 0 days 00:00:11.029639706 +437 36 0 days 00:00:05.782066633 +437 37 0 days 00:00:02.689916206 +437 38 0 days 00:00:01.352826420 +437 39 0 days 00:00:27.998181920 +437 40 0 days 00:00:01.778853733 +437 41 0 days 00:00:02.086304353 +437 42 0 days 00:00:28.899822613 +437 44 0 days 00:00:02.767331717 +437 45 0 days 00:00:12.058851073 +437 46 0 days 00:00:03.897257500 +437 47 0 days 00:00:10.169735113 +438 1 0 days 00:00:13.161731990 +438 2 0 days 00:00:08.870605000 +438 3 0 days 00:00:01.175415453 +438 4 0 days 00:00:01.852585968 +438 5 0 days 00:00:24.100840133 +438 6 0 days 00:00:21.133385180 +438 7 0 days 00:00:08.067779913 +438 8 0 days 00:00:45.091786240 +438 9 0 days 00:00:02.211893760 +438 10 0 days 00:00:10.160548533 +438 11 0 days 00:00:02.682431420 +438 12 0 days 00:00:22.730587470 +438 13 0 days 00:00:07.155988640 +438 14 0 days 00:00:54.529026555 +438 15 0 days 00:00:03.409207853 +438 16 0 days 00:00:29.457879326 +438 17 0 days 00:00:10.279155106 +438 18 0 days 00:00:10.822625513 +438 19 0 days 00:00:03.074081786 +438 20 0 days 00:01:01.617270366 +438 21 0 days 00:00:41.279477986 +438 22 0 days 00:00:09.185890468 +438 23 0 days 00:00:03.379811000 +438 24 0 days 00:00:25.325309526 +438 25 0 days 00:00:36.601331686 +438 26 0 days 00:00:02.118100286 +438 27 0 days 00:00:02.294942405 +438 28 0 days 00:00:12.122511666 +438 29 0 days 00:00:01.462062793 +439 1 0 days 00:00:14.804417940 +439 2 0 days 00:00:02.311066717 +439 3 0 days 00:00:23.324162190 +439 4 0 days 00:00:14.263536313 +439 5 0 days 00:00:00.974181060 +439 6 0 days 00:00:00.911884406 +439 7 0 days 00:00:00.439965293 +439 8 0 days 00:00:00.656375293 +439 9 0 days 00:00:02.476111345 +439 10 0 days 00:00:01.712138180 +439 11 0 days 00:00:05.203500545 +439 12 0 days 00:00:02.224011040 +439 13 0 days 00:00:03.244001772 +439 14 0 days 00:00:01.456754893 +439 15 0 days 00:00:00.980027450 +439 16 0 days 00:00:02.287681413 +439 17 0 days 00:00:02.431098180 +439 18 0 days 00:00:06.031873600 +439 19 0 days 00:00:00.533451208 +439 20 0 days 00:00:01.726593186 +439 21 0 days 00:00:08.081614180 +439 22 0 days 00:00:09.238375700 +439 23 0 days 00:00:01.601651706 +439 24 0 days 00:00:15.613286846 +439 25 0 days 00:00:04.571342373 +439 26 0 days 00:00:03.259989346 +439 27 0 days 00:00:05.628610626 +439 28 0 days 00:00:23.580244166 +439 29 0 days 00:00:16.918069860 +439 30 0 days 00:00:00.614340366 +439 31 0 days 00:00:10.825569033 +439 32 0 days 00:00:05.719429740 +439 33 0 days 00:00:21.201254413 +439 34 0 days 00:00:00.834351693 +439 35 0 days 00:00:10.838255853 +439 36 0 days 00:00:00.788901156 +439 37 0 days 00:00:04.729383400 +439 38 0 days 00:00:13.910456040 +439 39 0 days 00:00:01.161002566 +439 40 0 days 00:00:05.318293040 +439 41 0 days 00:00:17.207411960 +439 42 0 days 00:00:03.714355173 +439 43 0 days 00:00:03.639161290 +439 44 0 days 00:00:02.063869983 +439 45 0 days 00:00:01.056882280 +439 46 0 days 00:00:05.428648226 +439 47 0 days 00:00:04.682136406 +439 48 0 days 00:00:01.690016043 +439 49 0 days 00:00:03.097077493 +439 50 0 days 00:00:02.654462706 +439 51 0 days 00:00:02.602049613 +439 52 0 days 00:00:00.619997836 +439 53 0 days 00:00:02.091687984 +439 54 0 days 00:00:00.303177587 +439 55 0 days 00:00:19.799812880 +439 56 0 days 00:00:01.731088380 +439 57 0 days 00:00:06.056187626 +439 58 0 days 00:00:02.780057200 +439 59 0 days 00:00:14.141803526 +439 60 0 days 00:00:16.242713460 +439 61 0 days 00:00:06.933970793 +439 63 0 days 00:00:19.073306026 +439 64 0 days 00:00:01.343671066 +439 65 0 days 00:00:12.363619233 +439 66 0 days 00:00:03.247908946 +439 67 0 days 00:00:01.606327626 +439 68 0 days 00:00:01.810145795 +439 69 0 days 00:00:01.014496720 +439 70 0 days 00:00:22.430216255 +439 71 0 days 00:00:05.166597773 +439 72 0 days 00:00:01.371237340 +439 73 0 days 00:00:00.533023192 +439 74 0 days 00:00:00.872780953 +439 75 0 days 00:00:04.121126640 +439 76 0 days 00:00:03.752061920 +439 77 0 days 00:00:04.093566620 +439 78 0 days 00:00:23.249675860 +439 79 0 days 00:00:01.026234773 +439 80 0 days 00:00:02.009952025 +439 81 0 days 00:00:00.661991300 +439 82 0 days 00:00:03.271526446 +439 83 0 days 00:00:01.677364573 +439 84 0 days 00:00:06.838242820 +439 85 0 days 00:00:04.483170733 +439 86 0 days 00:00:01.230731034 +439 87 0 days 00:00:02.191919253 +439 88 0 days 00:00:03.925878673 +439 89 0 days 00:00:00.832636066 +439 90 0 days 00:00:00.629486086 +439 91 0 days 00:00:02.541691546 +439 92 0 days 00:00:00.617565393 +439 93 0 days 00:00:14.126926013 +439 94 0 days 00:00:02.106439223 +439 95 0 days 00:00:03.861172606 +440 1 0 days 00:00:18.873403233 +440 2 0 days 00:00:15.899069466 +440 3 0 days 00:00:03.371194140 +440 4 0 days 00:00:00.883987380 +440 5 0 days 00:00:15.339427943 +440 6 0 days 00:00:01.008477933 +440 7 0 days 00:00:01.339563744 +440 8 0 days 00:00:02.693729940 +440 9 0 days 00:00:01.364147366 +440 10 0 days 00:00:04.987874445 +440 11 0 days 00:00:05.131832480 +440 12 0 days 00:00:01.102786220 +440 13 0 days 00:00:05.330659286 +440 14 0 days 00:00:00.929613273 +440 15 0 days 00:00:04.680682220 +440 16 0 days 00:00:02.028086286 +440 17 0 days 00:00:02.526115646 +440 18 0 days 00:00:11.075271873 +440 19 0 days 00:00:01.130260880 +440 20 0 days 00:00:01.701364426 +440 21 0 days 00:00:08.071417155 +440 22 0 days 00:00:01.096107186 +440 23 0 days 00:00:01.332311917 +440 24 0 days 00:00:01.729612570 +440 25 0 days 00:00:21.711839106 +440 26 0 days 00:00:04.148265786 +440 27 0 days 00:00:14.166265206 +440 28 0 days 00:00:05.969004593 +440 29 0 days 00:00:02.645176066 +440 30 0 days 00:00:20.819707380 +440 31 0 days 00:00:01.151924233 +440 32 0 days 00:00:02.194465566 +440 33 0 days 00:00:02.637913060 +440 34 0 days 00:00:00.886389565 +440 35 0 days 00:00:01.104099394 +440 36 0 days 00:00:05.437026660 +440 37 0 days 00:00:02.372052940 +440 38 0 days 00:00:00.932802253 +440 39 0 days 00:00:00.942051114 +440 40 0 days 00:00:07.156058016 +440 41 0 days 00:00:04.528384250 +440 42 0 days 00:00:01.792155000 +440 43 0 days 00:00:00.428888916 +440 44 0 days 00:00:01.607577313 +440 45 0 days 00:00:00.479634297 +440 46 0 days 00:00:14.878436273 +440 47 0 days 00:00:16.747841093 +440 48 0 days 00:00:03.907421700 +440 49 0 days 00:00:11.902883077 +440 50 0 days 00:00:02.251270626 +440 51 0 days 00:00:01.912796013 +440 52 0 days 00:00:01.555489500 +440 53 0 days 00:00:00.680497295 +440 54 0 days 00:00:00.892448790 +440 55 0 days 00:00:01.439331613 +440 56 0 days 00:00:03.877355226 +440 57 0 days 00:00:19.837550013 +440 58 0 days 00:00:01.486324206 +440 59 0 days 00:00:03.992433046 +440 60 0 days 00:00:01.078313000 +440 61 0 days 00:00:02.788956720 +440 62 0 days 00:00:12.601509140 +440 63 0 days 00:00:03.477418765 +440 64 0 days 00:00:00.973228942 +440 65 0 days 00:00:03.475445240 +440 66 0 days 00:00:07.158426280 +440 67 0 days 00:00:06.081137253 +440 68 0 days 00:00:01.808454246 +440 69 0 days 00:00:00.962676973 +440 70 0 days 00:00:08.347470413 +440 71 0 days 00:00:01.313679260 +440 72 0 days 00:00:17.989701855 +440 73 0 days 00:00:00.624136081 +440 74 0 days 00:00:09.087703825 +440 75 0 days 00:00:01.269568233 +440 76 0 days 00:00:00.907280073 +440 77 0 days 00:00:07.978430366 +440 78 0 days 00:00:00.959357733 +440 79 0 days 00:00:01.949970906 +440 80 0 days 00:00:05.746449680 +440 81 0 days 00:00:00.832191960 +440 82 0 days 00:00:01.653254353 +440 83 0 days 00:00:01.390000286 +440 84 0 days 00:00:00.619395295 +440 85 0 days 00:00:05.241441753 +440 86 0 days 00:00:01.199591260 +440 87 0 days 00:00:00.851049463 +440 88 0 days 00:00:00.899339033 +440 89 0 days 00:00:05.593653253 +440 90 0 days 00:00:03.651036910 +440 91 0 days 00:00:02.420651386 +440 92 0 days 00:00:00.721478220 +440 93 0 days 00:00:01.104025473 +440 94 0 days 00:00:12.347592066 +440 95 0 days 00:00:16.068800366 +440 96 0 days 00:00:00.887025746 +440 97 0 days 00:00:02.479247773 +440 98 0 days 00:00:00.438019636 +440 99 0 days 00:00:00.870745020 +440 100 0 days 00:00:20.330805793 +441 1 0 days 00:00:08.538171760 +441 2 0 days 00:00:25.514141960 +441 3 0 days 00:00:10.732753180 +441 4 0 days 00:00:40.239075088 +441 5 0 days 00:00:41.152361740 +441 6 0 days 00:00:01.241395897 +441 7 0 days 00:00:02.439445346 +441 8 0 days 00:00:04.847288053 +441 9 0 days 00:00:03.334631453 +441 10 0 days 00:00:00.999295160 +441 11 0 days 00:00:01.241450106 +441 12 0 days 00:00:02.995543386 +441 13 0 days 00:00:03.552384206 +441 14 0 days 00:00:01.008193026 +441 15 0 days 00:00:10.785781113 +441 16 0 days 00:00:01.504423320 +441 17 0 days 00:00:01.646277733 +441 18 0 days 00:00:03.058717073 +441 19 0 days 00:00:02.145127616 +441 20 0 days 00:00:02.860705593 +441 21 0 days 00:00:20.636056640 +441 22 0 days 00:00:15.159109553 +441 23 0 days 00:00:00.802080013 +441 24 0 days 00:00:02.741232660 +441 25 0 days 00:00:01.583655166 +441 26 0 days 00:00:25.420042706 +441 27 0 days 00:00:09.057360073 +441 29 0 days 00:00:03.097717686 +441 30 0 days 00:00:22.650713860 +441 31 0 days 00:00:01.146538160 +441 32 0 days 00:00:37.482420460 +441 33 0 days 00:00:01.343522926 +441 34 0 days 00:00:33.144616346 +441 35 0 days 00:00:02.428662653 +441 36 0 days 00:00:01.550233760 +441 37 0 days 00:00:02.948892741 +441 38 0 days 00:00:08.982954846 +441 39 0 days 00:00:01.871590733 +441 40 0 days 00:00:35.799176900 +441 41 0 days 00:00:31.461642937 +441 42 0 days 00:00:15.086304760 +441 43 0 days 00:00:14.096600748 +441 44 0 days 00:00:03.287926600 +441 45 0 days 00:00:02.019230586 +441 46 0 days 00:00:02.546418306 +441 47 0 days 00:00:03.526472680 +442 1 0 days 00:00:16.601753193 +442 2 0 days 00:00:02.096178133 +442 3 0 days 00:00:05.523489560 +442 4 0 days 00:00:01.339237500 +442 5 0 days 00:00:05.758881066 +442 6 0 days 00:00:01.648344880 +442 7 0 days 00:00:04.245849213 +442 8 0 days 00:00:14.958830060 +442 9 0 days 00:00:08.927588753 +442 10 0 days 00:00:01.128094026 +442 11 0 days 00:00:01.136793253 +442 12 0 days 00:00:01.165945460 +442 13 0 days 00:00:03.524396026 +442 14 0 days 00:00:02.407901700 +442 15 0 days 00:00:01.031072800 +442 16 0 days 00:00:00.569125713 +442 17 0 days 00:00:24.016648620 +442 18 0 days 00:00:00.721334273 +442 19 0 days 00:00:14.325388193 +442 20 0 days 00:00:19.244736713 +442 21 0 days 00:00:18.380365846 +442 22 0 days 00:00:14.417996066 +442 23 0 days 00:00:15.786144280 +442 24 0 days 00:00:00.840282980 +442 25 0 days 00:00:01.776626166 +442 26 0 days 00:00:00.934098333 +442 27 0 days 00:00:22.555164000 +442 28 0 days 00:00:16.085602213 +442 29 0 days 00:00:05.146966066 +442 30 0 days 00:00:05.927540593 +442 31 0 days 00:00:01.571423620 +442 32 0 days 00:00:05.776463906 +442 33 0 days 00:00:19.989559420 +442 34 0 days 00:00:04.965364286 +442 35 0 days 00:00:03.600348273 +442 36 0 days 00:00:23.980274086 +442 37 0 days 00:00:10.887367280 +442 38 0 days 00:00:09.631794166 +442 39 0 days 00:00:00.866978260 +442 40 0 days 00:00:18.793637693 +442 41 0 days 00:00:14.840935153 +442 42 0 days 00:00:01.138206760 +442 43 0 days 00:00:01.128248066 +442 44 0 days 00:00:03.963609793 +442 45 0 days 00:00:02.853633786 +442 46 0 days 00:00:03.347737213 +442 47 0 days 00:00:01.506626713 +442 48 0 days 00:00:01.153560746 +442 49 0 days 00:00:03.136218200 +442 50 0 days 00:00:01.349606793 +442 51 0 days 00:00:18.970649853 +442 52 0 days 00:00:01.504569726 +442 53 0 days 00:00:16.332411613 +442 54 0 days 00:00:02.038776453 +442 55 0 days 00:00:05.414290653 +442 56 0 days 00:00:24.324916206 +442 57 0 days 00:00:04.749992480 +442 58 0 days 00:00:00.754296206 +442 59 0 days 00:00:15.545788780 +442 60 0 days 00:00:01.027040624 +442 61 0 days 00:00:00.501928280 +442 62 0 days 00:00:01.285571146 +442 63 0 days 00:00:19.620604346 +442 64 0 days 00:00:03.058242273 +442 65 0 days 00:00:06.028612486 +442 66 0 days 00:00:20.618031320 +442 67 0 days 00:00:02.387788013 +442 68 0 days 00:00:01.682080693 +442 69 0 days 00:00:02.134922780 +442 70 0 days 00:00:14.686961606 +442 71 0 days 00:00:10.980938966 +442 72 0 days 00:00:02.400454193 +442 73 0 days 00:00:21.589898826 +442 74 0 days 00:00:01.393306073 +442 75 0 days 00:00:00.996400180 +442 76 0 days 00:00:02.270701306 +442 77 0 days 00:00:19.763624532 +443 1 0 days 00:10:39.271111846 +444 1 0 days 00:03:56.423106135 +444 2 0 days 00:07:15.442119345 +445 1 0 days 00:03:07.749134693 +445 2 0 days 00:01:14.409261113 +445 3 0 days 00:00:10.304166568 +445 4 0 days 00:00:44.035941526 +445 5 0 days 00:02:47.058068896 +446 1 0 days 00:00:08.987042646 +446 2 0 days 00:00:39.054408960 +446 3 0 days 00:00:07.794054146 +446 4 0 days 00:00:13.380564275 +446 5 0 days 00:02:01.770321803 +446 6 0 days 00:00:51.784017580 +446 7 0 days 00:00:13.420483889 +446 8 0 days 00:00:04.124809993 +446 9 0 days 00:00:29.651794833 +446 10 0 days 00:00:53.631205706 +446 11 0 days 00:00:12.232150946 +446 12 0 days 00:00:10.491600880 +447 1 0 days 00:00:04.879586073 +447 2 0 days 00:00:21.429983946 +447 3 0 days 00:00:21.412633293 +447 5 0 days 00:01:36.002847680 +447 6 0 days 00:00:13.312839593 +447 7 0 days 00:00:09.150629433 +447 8 0 days 00:00:47.740813546 +447 9 0 days 00:00:27.750183826 +447 10 0 days 00:00:28.372796260 +447 11 0 days 00:00:22.275057613 +447 12 0 days 00:00:11.284103740 +447 13 0 days 00:00:17.735727266 +447 14 0 days 00:00:17.324539860 +447 16 0 days 00:01:31.156766446 +447 17 0 days 00:00:38.896781393 +447 18 0 days 00:03:03.502214286 +448 1 0 days 00:00:59.231756180 +448 2 0 days 00:01:37.380036140 +448 3 0 days 00:00:08.619384026 +448 4 0 days 00:00:12.817359620 +448 5 0 days 00:00:17.149554700 +448 6 0 days 00:01:07.827809773 +448 7 0 days 00:00:10.781877566 +448 8 0 days 00:00:04.516518573 +448 9 0 days 00:01:13.577205420 +448 10 0 days 00:00:07.995266066 +448 11 0 days 00:00:26.492548673 +448 12 0 days 00:00:08.053959320 +448 13 0 days 00:00:10.087421153 +448 14 0 days 00:01:40.888564373 +448 15 0 days 00:00:34.288373260 +448 16 0 days 00:00:16.059420086 +448 17 0 days 00:00:20.739600926 +449 1 0 days 00:00:30.135704666 +449 2 0 days 00:00:45.698144813 +449 3 0 days 00:00:22.245862313 +449 4 0 days 00:00:41.508729600 +449 5 0 days 00:00:04.857632693 +449 6 0 days 00:00:39.614671666 +449 7 0 days 00:00:28.252719040 +449 8 0 days 00:00:17.844596526 +449 9 0 days 00:00:10.685399595 +449 10 0 days 00:00:47.256151560 +449 11 0 days 00:01:33.180393133 +449 12 0 days 00:00:10.866830286 +449 13 0 days 00:00:14.380326313 +449 14 0 days 00:00:21.648404860 +449 15 0 days 00:00:12.448976633 +449 16 0 days 00:00:14.612013480 +449 17 0 days 00:00:11.105542260 +449 18 0 days 00:00:14.587894346 +449 19 0 days 00:00:49.411165020 +449 20 0 days 00:00:26.437298613 +450 1 0 days 00:00:08.173079006 +450 2 0 days 00:00:26.622361706 +450 3 0 days 00:00:08.461685553 +450 4 0 days 00:00:07.591184600 +450 5 0 days 00:00:04.077192146 +450 6 0 days 00:00:04.900692086 +450 7 0 days 00:00:13.893668186 +450 8 0 days 00:00:06.937313220 +450 9 0 days 00:01:12.867529606 +450 10 0 days 00:01:31.230554000 +450 11 0 days 00:00:13.980754426 +450 12 0 days 00:00:07.891977166 +450 13 0 days 00:00:06.103059126 +450 14 0 days 00:00:16.248180966 +450 15 0 days 00:01:37.085933360 +450 16 0 days 00:00:04.545249013 +450 17 0 days 00:00:15.451703380 +450 18 0 days 00:00:07.602851600 +450 19 0 days 00:00:50.398787833 +450 20 0 days 00:00:10.761302233 +450 21 0 days 00:00:23.937851426 +450 22 0 days 00:01:23.777329326 +451 1 0 days 00:00:24.026056803 +451 2 0 days 00:00:12.691812275 +451 3 0 days 00:00:15.158223911 +451 4 0 days 00:00:10.427753000 +451 5 0 days 00:00:22.371367245 +451 6 0 days 00:00:14.752834177 +451 7 0 days 00:00:11.137144643 +451 8 0 days 00:00:12.832020090 +451 9 0 days 00:00:09.841471146 +451 10 0 days 00:00:14.502571034 +451 12 0 days 00:00:24.016539480 +451 13 0 days 00:00:23.186120616 +451 14 0 days 00:00:09.770230708 +452 1 0 days 00:00:08.563749295 +452 2 0 days 00:00:22.425021913 +452 3 0 days 00:00:21.042188113 +452 4 0 days 00:00:09.435783460 +452 5 0 days 00:00:14.269523356 +452 6 0 days 00:00:08.717320505 +452 7 0 days 00:00:10.281412555 +452 8 0 days 00:00:13.145390885 +452 9 0 days 00:00:14.282532180 +452 10 0 days 00:00:09.428816893 +452 11 0 days 00:00:20.634633340 +452 12 0 days 00:00:27.732803629 +452 13 0 days 00:00:09.828798112 +452 14 0 days 00:00:20.881993333 +452 15 0 days 00:00:24.475529952 +452 16 0 days 00:00:08.493273300 +452 17 0 days 00:00:12.133442388 +452 18 0 days 00:00:16.387631633 +452 19 0 days 00:00:27.093243877 +453 1 0 days 00:00:04.211215090 +453 2 0 days 00:00:10.009696526 +453 3 0 days 00:00:08.100943355 +453 4 0 days 00:00:07.609709607 +453 5 0 days 00:00:09.683098465 +453 6 0 days 00:00:07.092607652 +453 7 0 days 00:00:04.221775165 +453 8 0 days 00:00:10.961049634 +453 9 0 days 00:00:13.406655046 +453 10 0 days 00:00:12.530257822 +453 11 0 days 00:00:11.830492200 +453 12 0 days 00:00:04.175431420 +453 13 0 days 00:00:13.030431217 +453 14 0 days 00:00:12.708032277 +453 15 0 days 00:00:05.986040960 +453 16 0 days 00:00:04.097525410 +453 17 0 days 00:00:06.350503545 +453 18 0 days 00:00:07.087144096 +453 19 0 days 00:00:07.228686388 +453 20 0 days 00:00:06.413663465 +453 21 0 days 00:00:07.110602900 +453 22 0 days 00:00:06.912204788 +453 23 0 days 00:00:06.552301245 +453 24 0 days 00:00:06.688390550 +453 25 0 days 00:00:12.524094923 +453 26 0 days 00:00:07.872351354 +453 27 0 days 00:00:11.719177448 +453 28 0 days 00:00:06.381499860 +453 29 0 days 00:00:04.492918963 +453 30 0 days 00:00:11.594033624 +453 31 0 days 00:00:04.658527633 +453 32 0 days 00:00:05.799666802 +453 33 0 days 00:00:07.592814167 +453 34 0 days 00:00:04.939730714 +453 35 0 days 00:00:04.175070590 +453 36 0 days 00:00:07.796791912 +454 1 0 days 00:00:07.662714371 +454 2 0 days 00:00:12.102094816 +454 3 0 days 00:00:11.624003475 +454 4 0 days 00:00:05.002490708 +454 5 0 days 00:00:07.849192735 +454 6 0 days 00:00:13.707384984 +454 7 0 days 00:00:04.338055805 +454 8 0 days 00:00:07.881057134 +454 9 0 days 00:00:08.220058313 +454 10 0 days 00:00:13.295530560 +454 11 0 days 00:00:08.104519143 +454 12 0 days 00:00:07.625892145 +454 13 0 days 00:00:12.687585380 +454 14 0 days 00:00:05.100010320 +454 15 0 days 00:00:04.869557503 +454 16 0 days 00:00:13.291914600 +454 17 0 days 00:00:13.230907468 +454 18 0 days 00:00:07.267027520 +454 19 0 days 00:00:04.710216690 +454 20 0 days 00:00:04.909187766 +454 21 0 days 00:00:06.761154020 +454 22 0 days 00:00:08.016723335 +454 23 0 days 00:00:07.810010024 +454 24 0 days 00:00:08.838213838 +454 25 0 days 00:00:13.988496289 +454 26 0 days 00:00:08.325473765 +455 1 0 days 00:00:21.111397645 +455 2 0 days 00:00:27.394555433 +455 3 0 days 00:00:16.421081866 +455 4 0 days 00:00:14.080683665 +455 5 0 days 00:00:14.082376285 +455 6 0 days 00:00:14.084051100 +455 7 0 days 00:00:17.491046540 +455 8 0 days 00:00:12.853853170 +455 9 0 days 00:00:16.399249260 +455 10 0 days 00:00:29.809899066 +455 11 0 days 00:00:24.715961700 +455 12 0 days 00:00:26.611072610 +455 13 0 days 00:00:26.617638310 +455 14 0 days 00:00:18.328274175 +455 15 0 days 00:00:18.335447670 +455 16 0 days 00:00:16.463191450 +455 17 0 days 00:00:15.974249282 +455 18 0 days 00:00:12.265470065 +455 19 0 days 00:00:17.028178645 +456 1 0 days 00:00:25.538163995 +456 2 0 days 00:00:24.141939626 +456 3 0 days 00:00:12.961052520 +456 4 0 days 00:00:13.866857604 +456 5 0 days 00:00:14.468427415 +456 6 0 days 00:00:22.833637753 +456 7 0 days 00:00:12.409592095 +456 8 0 days 00:00:24.157891846 +456 9 0 days 00:00:11.700572900 +456 10 0 days 00:00:16.608291415 +456 11 0 days 00:00:24.142865913 +456 12 0 days 00:00:16.599238150 +456 13 0 days 00:00:29.012276394 +456 14 0 days 00:00:21.204869806 +456 15 0 days 00:00:13.064406480 +456 16 0 days 00:00:15.417627733 +456 17 0 days 00:00:13.064163930 +456 18 0 days 00:00:12.406244635 +456 19 0 days 00:00:12.419951460 +456 20 0 days 00:00:13.872080412 +456 21 0 days 00:00:28.226935330 +457 1 0 days 00:00:11.496798700 +457 2 0 days 00:00:07.725855180 +457 3 0 days 00:00:13.877320960 +457 4 0 days 00:00:14.935473313 +457 5 0 days 00:00:10.670785760 +457 6 0 days 00:00:09.783837110 +457 7 0 days 00:00:10.311766025 +457 8 0 days 00:00:08.095806305 +457 9 0 days 00:00:08.868326495 +457 10 0 days 00:00:07.736259595 +457 11 0 days 00:00:10.316031485 +457 12 0 days 00:00:10.316284125 +457 13 0 days 00:00:08.096521665 +457 14 0 days 00:00:14.216511226 +457 15 0 days 00:00:08.867913255 +457 16 0 days 00:00:08.522744136 +457 17 0 days 00:00:13.880344720 +457 18 0 days 00:00:09.426092080 +457 19 0 days 00:00:11.496300515 +457 20 0 days 00:00:11.505375820 +457 21 0 days 00:00:08.095439460 +457 22 0 days 00:00:07.724010485 +457 23 0 days 00:00:07.727887760 +457 24 0 days 00:00:14.217734386 +457 25 0 days 00:00:08.868645690 +457 26 0 days 00:00:15.886161325 +457 27 0 days 00:00:10.301845510 +457 28 0 days 00:00:19.863076630 +457 29 0 days 00:00:10.318863820 +457 30 0 days 00:00:08.102038490 +457 31 0 days 00:00:15.507987255 +457 32 0 days 00:00:10.306057900 +457 33 0 days 00:00:15.513768120 +457 34 0 days 00:00:08.872032010 +457 35 0 days 00:00:15.508896530 +457 36 0 days 00:00:08.873246465 +457 37 0 days 00:00:14.950820440 +457 38 0 days 00:00:07.728047595 +458 1 0 days 00:00:10.380815585 +458 2 0 days 00:00:15.566216795 +458 3 0 days 00:00:15.109030740 +458 4 0 days 00:00:11.689401775 +458 5 0 days 00:00:17.668787450 +458 6 0 days 00:00:10.465846213 +458 7 0 days 00:00:10.761229655 +458 8 0 days 00:00:07.783749400 +458 9 0 days 00:00:08.262674604 +458 10 0 days 00:00:10.374280340 +458 11 0 days 00:00:12.909299840 +458 12 0 days 00:00:09.645358313 +458 13 0 days 00:00:10.463830766 +458 14 0 days 00:00:17.963085720 +458 15 0 days 00:00:18.670829263 +458 16 0 days 00:00:10.462622473 +458 17 0 days 00:00:15.565579010 +458 18 0 days 00:00:11.688686115 +458 19 0 days 00:00:08.190261425 +458 20 0 days 00:00:10.774275160 +458 21 0 days 00:00:07.781725155 +458 22 0 days 00:00:11.023585872 +458 23 0 days 00:00:09.645278906 +458 24 0 days 00:00:15.105809046 +458 25 0 days 00:00:09.293415673 +458 26 0 days 00:00:17.665635996 +458 27 0 days 00:00:15.563744915 +458 28 0 days 00:00:07.777652145 +458 29 0 days 00:00:10.465100120 +458 30 0 days 00:00:10.364140290 +458 31 0 days 00:00:07.354767780 +458 32 0 days 00:00:08.193000305 +458 33 0 days 00:00:08.697829724 +458 34 0 days 00:00:07.783891455 +458 35 0 days 00:00:15.108722173 +458 36 0 days 00:00:07.784340285 +458 37 0 days 00:00:17.959896388 +459 1 0 days 00:00:14.680458666 +459 2 0 days 00:00:15.596355033 +459 3 0 days 00:00:22.442709373 +459 4 0 days 00:00:08.725665513 +459 5 0 days 00:00:08.829402215 +459 6 0 days 00:00:09.626134026 +459 7 0 days 00:00:28.147385240 +459 8 0 days 00:00:09.604832657 +459 9 0 days 00:00:26.843989436 +459 10 0 days 00:00:10.312618306 +459 11 0 days 00:00:09.246319415 +459 12 0 days 00:00:14.395173366 +459 13 0 days 00:00:14.525534115 +459 14 0 days 00:00:10.458331645 +459 15 0 days 00:00:22.143967855 +459 16 0 days 00:00:30.371042173 +459 17 0 days 00:00:08.360997275 +459 18 0 days 00:00:10.225139684 +459 19 0 days 00:00:23.218382435 +459 20 0 days 00:00:20.782000873 +459 21 0 days 00:00:29.550816280 +459 22 0 days 00:00:08.987099980 +459 23 0 days 00:00:12.616356892 +459 24 0 days 00:00:13.365334366 +460 1 0 days 00:00:07.228379075 +460 2 0 days 00:00:07.393241980 +460 3 0 days 00:00:04.681111430 +460 4 0 days 00:00:04.699081385 +460 5 0 days 00:00:05.159296955 +460 6 0 days 00:00:07.960384755 +460 7 0 days 00:00:14.149910995 +460 8 0 days 00:00:12.756989826 +460 9 0 days 00:00:04.329741635 +460 10 0 days 00:00:05.145119090 +460 11 0 days 00:00:04.393957860 +460 12 0 days 00:00:04.630178100 +460 13 0 days 00:00:04.652412463 +460 14 0 days 00:00:07.426313165 +460 15 0 days 00:00:12.613942566 +460 16 0 days 00:00:13.023401896 +460 17 0 days 00:00:10.877836273 +460 18 0 days 00:00:11.424074960 +460 19 0 days 00:00:04.961189125 +460 20 0 days 00:00:04.786020695 +460 21 0 days 00:00:06.992174470 +460 22 0 days 00:00:06.805190205 +460 23 0 days 00:00:13.946247030 +460 24 0 days 00:00:04.797489640 +460 25 0 days 00:00:04.760175950 +460 26 0 days 00:00:06.958149180 +460 27 0 days 00:00:06.805226105 +460 28 0 days 00:00:08.141687810 +460 29 0 days 00:00:07.341624493 +460 30 0 days 00:00:14.229888880 +460 31 0 days 00:00:04.338271545 +460 32 0 days 00:00:04.294048440 +460 33 0 days 00:00:04.757607906 +460 34 0 days 00:00:07.702238990 +460 35 0 days 00:00:07.698404520 +460 36 0 days 00:00:06.646093190 +460 37 0 days 00:00:14.592384437 +460 38 0 days 00:00:06.357857340 +460 39 0 days 00:00:05.056031260 +460 40 0 days 00:00:12.151614440 +460 41 0 days 00:00:10.891850106 +460 42 0 days 00:00:04.898434732 +460 43 0 days 00:00:04.257211710 +460 44 0 days 00:00:04.586967970 +460 45 0 days 00:00:04.881908134 +460 46 0 days 00:00:13.863915820 +460 47 0 days 00:00:05.732518604 +460 48 0 days 00:00:06.908049284 +460 49 0 days 00:00:05.011984045 +460 50 0 days 00:00:04.165134896 +460 51 0 days 00:00:04.821698960 +460 52 0 days 00:00:07.936145060 +460 53 0 days 00:00:05.025394153 +460 54 0 days 00:00:06.849770452 +460 55 0 days 00:00:04.873387020 +460 56 0 days 00:00:11.410973493 +460 57 0 days 00:00:04.404572380 +460 58 0 days 00:00:05.020375390 +460 59 0 days 00:00:04.632069820 +461 1 0 days 00:00:23.454733380 +461 2 0 days 00:00:23.095045400 +461 3 0 days 00:00:10.857187644 +461 4 0 days 00:00:23.085890495 +461 5 0 days 00:00:13.560970130 +461 6 0 days 00:00:13.671085395 +461 7 0 days 00:00:20.831545540 +461 8 0 days 00:00:13.593946905 +461 9 0 days 00:00:24.429973980 +461 10 0 days 00:00:23.478952110 +461 11 0 days 00:00:08.557608250 +461 12 0 days 00:00:23.247850285 +461 13 0 days 00:00:08.581169645 +461 14 0 days 00:00:25.303489080 +461 15 0 days 00:00:24.761859572 +461 16 0 days 00:00:13.663597225 +461 17 0 days 00:00:15.429539728 +461 18 0 days 00:00:14.288095700 +461 19 0 days 00:00:08.566567030 +461 20 0 days 00:00:09.516781566 +461 21 0 days 00:00:22.785239890 +461 22 0 days 00:00:09.282459716 +462 1 0 days 00:00:11.905326240 +462 2 0 days 00:00:04.634560360 +462 3 0 days 00:00:07.327057172 +462 4 0 days 00:00:04.351183200 +462 5 0 days 00:00:05.052214500 +462 6 0 days 00:00:11.716991705 +462 7 0 days 00:00:06.903080180 +462 8 0 days 00:00:11.986997150 +462 9 0 days 00:00:04.447709325 +462 10 0 days 00:00:06.114201020 +462 11 0 days 00:00:11.758012600 +462 12 0 days 00:00:04.406691860 +462 13 0 days 00:00:06.170217660 +462 14 0 days 00:00:12.699126626 +462 15 0 days 00:00:06.839765170 +462 16 0 days 00:00:06.814634395 +462 17 0 days 00:00:04.802925456 +462 18 0 days 00:00:04.545757005 +462 19 0 days 00:00:11.724761840 +462 20 0 days 00:00:07.500757576 +462 21 0 days 00:00:07.526942110 +462 22 0 days 00:00:04.301970525 +462 23 0 days 00:00:06.813358975 +462 24 0 days 00:00:10.882905512 +462 25 0 days 00:00:04.416925790 +462 26 0 days 00:00:11.592384480 +462 27 0 days 00:00:06.907234785 +462 28 0 days 00:00:04.278426140 +462 29 0 days 00:00:04.406978640 +462 30 0 days 00:00:05.115921340 +462 31 0 days 00:00:06.810508955 +462 32 0 days 00:00:04.380467520 +462 33 0 days 00:00:11.898675632 +462 34 0 days 00:00:06.790390750 +462 35 0 days 00:00:07.472004528 +462 36 0 days 00:00:14.031438912 +462 37 0 days 00:00:06.819944525 +462 38 0 days 00:00:04.884747530 +462 39 0 days 00:00:11.898690590 +462 40 0 days 00:00:11.741456975 +462 41 0 days 00:00:04.881051922 +462 42 0 days 00:00:05.192484310 +462 43 0 days 00:00:12.412388220 +462 44 0 days 00:00:11.609743450 +462 45 0 days 00:00:04.415343015 +462 46 0 days 00:00:12.475235860 +462 47 0 days 00:00:05.000738796 +463 1 0 days 00:00:12.377969470 +463 2 0 days 00:00:25.394464660 +463 3 0 days 00:00:20.769146160 +463 4 0 days 00:00:12.921485045 +463 5 0 days 00:00:18.302420045 +463 6 0 days 00:00:25.386070715 +463 7 0 days 00:00:16.606949720 +463 8 0 days 00:00:14.031399715 +463 9 0 days 00:00:16.603726705 +463 10 0 days 00:00:17.133020295 +463 11 0 days 00:00:25.370033370 +463 12 0 days 00:00:12.388509900 +463 13 0 days 00:00:14.040199530 +463 14 0 days 00:00:29.413402547 +463 15 0 days 00:00:26.595376510 +463 16 0 days 00:00:24.783168680 +463 17 0 days 00:00:14.256183050 +463 18 0 days 00:00:27.414744243 +463 19 0 days 00:00:14.038084060 +464 1 0 days 00:00:07.786789530 +464 2 0 days 00:00:08.844584600 +464 3 0 days 00:00:16.531138968 +464 4 0 days 00:00:17.174085683 +464 5 0 days 00:00:15.548513140 +464 6 0 days 00:00:11.463650040 +464 7 0 days 00:00:17.571847106 +464 8 0 days 00:00:17.714337556 +464 9 0 days 00:00:16.902225404 +464 10 0 days 00:00:16.890618340 +464 11 0 days 00:00:10.744984110 +464 12 0 days 00:00:08.276745544 +464 13 0 days 00:00:10.744508255 +464 14 0 days 00:00:08.844361315 +464 15 0 days 00:00:12.026177375 +464 16 0 days 00:00:16.536236132 +464 17 0 days 00:00:16.914263160 +464 18 0 days 00:00:10.741564515 +464 19 0 days 00:00:07.806646165 +464 20 0 days 00:00:11.066055600 +464 21 0 days 00:00:08.269796512 +464 22 0 days 00:00:16.658031670 +464 23 0 days 00:00:11.065507360 +464 24 0 days 00:00:10.743012230 +464 25 0 days 00:00:07.797087260 +464 26 0 days 00:00:11.397137388 +464 27 0 days 00:00:15.562317515 +464 28 0 days 00:00:11.483484585 +464 29 0 days 00:00:10.392351375 +464 30 0 days 00:00:08.837174060 +465 1 0 days 00:00:00.135996780 +465 3 0 days 00:00:00.115489120 +465 4 0 days 00:00:00.201722565 +465 5 0 days 00:00:00.151935740 +465 6 0 days 00:00:00.205110568 +465 7 0 days 00:00:00.207586685 +465 8 0 days 00:00:00.114302700 +465 11 0 days 00:00:00.090199946 +465 12 0 days 00:00:00.120250830 +465 14 0 days 00:00:00.141620666 +465 15 0 days 00:00:00.194828064 +465 16 0 days 00:00:00.194806823 +465 21 0 days 00:00:00.184808311 +465 23 0 days 00:00:00.107530186 +465 25 0 days 00:00:00.090272966 +465 26 0 days 00:00:00.089684480 +465 27 0 days 00:00:00.089062526 +465 28 0 days 00:00:00.098364885 +465 29 0 days 00:00:00.192913141 +465 30 0 days 00:00:00.147464421 +465 31 0 days 00:00:00.090836780 +465 32 0 days 00:00:00.108485800 +465 33 0 days 00:00:00.193236481 +465 34 0 days 00:00:00.091402820 +465 37 0 days 00:00:00.114186688 +465 39 0 days 00:00:00.201513232 +465 40 0 days 00:00:00.104735616 +465 41 0 days 00:00:00.200226087 +465 42 0 days 00:00:00.109895602 +465 43 0 days 00:00:00.107878346 +465 44 0 days 00:00:00.113401444 +465 45 0 days 00:00:00.103948072 +465 46 0 days 00:00:00.105609500 +465 48 0 days 00:00:00.205077938 +465 49 0 days 00:00:00.194559425 +465 50 0 days 00:00:00.088758486 +465 51 0 days 00:00:00.198338797 +465 53 0 days 00:00:00.195416225 +465 54 0 days 00:00:00.141528180 +465 55 0 days 00:00:00.092687760 +465 56 0 days 00:00:00.198043040 +465 57 0 days 00:00:00.100694806 +465 58 0 days 00:00:00.194390936 +465 60 0 days 00:00:00.155211455 +465 61 0 days 00:00:00.200038298 +465 62 0 days 00:00:00.094026186 +465 63 0 days 00:00:00.112486625 +465 65 0 days 00:00:00.204616223 +465 67 0 days 00:00:00.101974645 +465 68 0 days 00:00:00.141459186 +465 69 0 days 00:00:00.199582885 +465 70 0 days 00:00:00.196016770 +465 71 0 days 00:00:00.201030600 +465 72 0 days 00:00:00.201616697 +465 73 0 days 00:00:00.147955226 +465 74 0 days 00:00:00.101731846 +465 75 0 days 00:00:00.206401950 +465 76 0 days 00:00:00.198256820 +465 77 0 days 00:00:00.093696173 +465 78 0 days 00:00:00.129548740 +465 79 0 days 00:00:00.119171520 +465 83 0 days 00:00:00.092987380 +465 84 0 days 00:00:00.201666723 +465 85 0 days 00:00:00.198879706 +465 86 0 days 00:00:00.115252566 +465 89 0 days 00:00:00.200843281 +465 90 0 days 00:00:00.205955054 +465 92 0 days 00:00:00.131490860 +465 93 0 days 00:00:00.096528173 +465 95 0 days 00:00:00.116117520 +465 96 0 days 00:00:00.110980708 +465 97 0 days 00:00:00.105936915 +465 100 0 days 00:00:00.098818786 +466 1 0 days 00:00:00.114951233 +466 4 0 days 00:00:00.120903460 +466 5 0 days 00:00:00.210398762 +466 6 0 days 00:00:00.161473751 +466 7 0 days 00:00:00.099486846 +466 9 0 days 00:00:00.216411350 +466 10 0 days 00:00:00.118376010 +466 11 0 days 00:00:00.097040093 +466 12 0 days 00:00:00.210447058 +466 13 0 days 00:00:00.171910040 +466 15 0 days 00:00:00.158129453 +466 16 0 days 00:00:00.129797195 +466 18 0 days 00:00:00.202128945 +466 19 0 days 00:00:00.187551436 +466 20 0 days 00:00:00.113487126 +466 21 0 days 00:00:00.146521586 +466 22 0 days 00:00:00.106465720 +466 23 0 days 00:00:00.200772550 +466 25 0 days 00:00:00.119260397 +466 26 0 days 00:00:00.115082133 +466 27 0 days 00:00:00.096175873 +466 29 0 days 00:00:00.105629810 +466 31 0 days 00:00:00.105477560 +466 32 0 days 00:00:00.113768806 +466 33 0 days 00:00:00.113539773 +466 34 0 days 00:00:00.200905229 +466 37 0 days 00:00:00.149242820 +466 38 0 days 00:00:00.148698140 +466 39 0 days 00:00:00.148474206 +466 40 0 days 00:00:00.125918230 +466 41 0 days 00:00:00.111252708 +466 44 0 days 00:00:00.147027126 +466 45 0 days 00:00:00.111584548 +466 47 0 days 00:00:00.105787595 +466 48 0 days 00:00:00.202419682 +466 50 0 days 00:00:00.148816746 +466 51 0 days 00:00:00.113921953 +466 52 0 days 00:00:00.202626784 +466 53 0 days 00:00:00.198677380 +466 55 0 days 00:00:00.148496846 +466 56 0 days 00:00:00.112434006 +466 57 0 days 00:00:00.133785644 +466 58 0 days 00:00:00.202455709 +466 59 0 days 00:00:00.127446617 +466 60 0 days 00:00:00.097652493 +466 61 0 days 00:00:00.149918480 +466 64 0 days 00:00:00.153481776 +466 65 0 days 00:00:00.133544992 +466 66 0 days 00:00:00.126496760 +466 69 0 days 00:00:00.104553725 +466 71 0 days 00:00:00.104817640 +466 72 0 days 00:00:00.114074313 +466 73 0 days 00:00:00.095886713 +466 77 0 days 00:00:00.149084473 +466 78 0 days 00:00:00.201551338 +466 80 0 days 00:00:00.096930633 +466 81 0 days 00:00:00.202556761 +466 82 0 days 00:00:00.095979306 +466 83 0 days 00:00:00.202214561 +466 85 0 days 00:00:00.122292148 +466 86 0 days 00:00:00.104304855 +466 87 0 days 00:00:00.096095266 +466 88 0 days 00:00:00.122945824 +466 89 0 days 00:00:00.149741673 +466 91 0 days 00:00:00.200468802 +466 92 0 days 00:00:00.146944513 +466 93 0 days 00:00:00.197558450 +466 96 0 days 00:00:00.112841660 +466 97 0 days 00:00:00.149926346 +466 99 0 days 00:00:00.114136120 +466 100 0 days 00:00:00.113385306 +467 1 0 days 00:00:00.079089262 +467 2 0 days 00:00:00.081101677 +467 3 0 days 00:00:00.106645410 +467 4 0 days 00:00:00.081060945 +467 5 0 days 00:00:00.082362140 +467 6 0 days 00:00:00.054159886 +467 7 0 days 00:00:00.055021813 +467 8 0 days 00:00:00.107309364 +467 9 0 days 00:00:00.108484953 +467 10 0 days 00:00:00.074328870 +467 11 0 days 00:00:00.064141620 +467 12 0 days 00:00:00.054579666 +467 13 0 days 00:00:00.061233160 +467 14 0 days 00:00:00.062985486 +467 15 0 days 00:00:00.054244973 +467 16 0 days 00:00:00.081713220 +467 17 0 days 00:00:00.069998662 +467 18 0 days 00:00:00.107396870 +467 19 0 days 00:00:00.054455686 +467 20 0 days 00:00:00.054003280 +467 21 0 days 00:00:00.062673333 +467 22 0 days 00:00:00.054568520 +467 23 0 days 00:00:00.054581926 +467 24 0 days 00:00:00.054079840 +467 25 0 days 00:00:00.082400213 +467 26 0 days 00:00:00.103782524 +467 27 0 days 00:00:00.081987155 +467 28 0 days 00:00:00.081967152 +467 29 0 days 00:00:00.054578053 +467 30 0 days 00:00:00.089565180 +467 31 0 days 00:00:00.054466420 +467 32 0 days 00:00:00.107464390 +467 33 0 days 00:00:00.082720182 +467 34 0 days 00:00:00.053869193 +467 35 0 days 00:00:00.062150233 +467 36 0 days 00:00:00.062047400 +467 37 0 days 00:00:00.064030076 +467 38 0 days 00:00:00.059089350 +467 39 0 days 00:00:00.109611996 +467 40 0 days 00:00:00.053820633 +467 41 0 days 00:00:00.061870426 +467 42 0 days 00:00:00.082045186 +467 43 0 days 00:00:00.053685186 +467 44 0 days 00:00:00.105116200 +467 45 0 days 00:00:00.063540540 +467 46 0 days 00:00:00.108785152 +467 47 0 days 00:00:00.107294650 +467 48 0 days 00:00:00.094958408 +467 49 0 days 00:00:00.063147053 +467 50 0 days 00:00:00.081672235 +467 52 0 days 00:00:00.054678800 +467 53 0 days 00:00:00.109523536 +467 54 0 days 00:00:00.054535686 +467 55 0 days 00:00:00.081179026 +467 56 0 days 00:00:00.054185426 +467 57 0 days 00:00:00.054421760 +467 58 0 days 00:00:00.054974020 +467 59 0 days 00:00:00.109894568 +467 60 0 days 00:00:00.054525573 +467 61 0 days 00:00:00.053976446 +467 62 0 days 00:00:00.082149046 +467 63 0 days 00:00:00.058900405 +467 64 0 days 00:00:00.063661480 +467 65 0 days 00:00:00.063979050 +467 66 0 days 00:00:00.082683308 +467 67 0 days 00:00:00.054221633 +467 68 0 days 00:00:00.068384070 +467 69 0 days 00:00:00.062901860 +467 70 0 days 00:00:00.108358567 +467 71 0 days 00:00:00.062603693 +467 72 0 days 00:00:00.109235726 +467 74 0 days 00:00:00.054864660 +467 75 0 days 00:00:00.082791880 +467 76 0 days 00:00:00.054764640 +467 77 0 days 00:00:00.109626416 +467 78 0 days 00:00:00.106295516 +467 79 0 days 00:00:00.062351446 +467 80 0 days 00:00:00.055101226 +467 81 0 days 00:00:00.054674020 +467 82 0 days 00:00:00.103016145 +467 83 0 days 00:00:00.062967966 +467 84 0 days 00:00:00.062951400 +467 85 0 days 00:00:00.061673980 +467 86 0 days 00:00:00.053942686 +467 87 0 days 00:00:00.107837570 +467 88 0 days 00:00:00.074856400 +467 89 0 days 00:00:00.053880913 +467 90 0 days 00:00:00.079289752 +467 92 0 days 00:00:00.095022580 +467 93 0 days 00:00:00.075279743 +467 94 0 days 00:00:00.082594940 +467 96 0 days 00:00:00.063570740 +467 97 0 days 00:00:00.081012653 +467 98 0 days 00:00:00.053972140 +467 99 0 days 00:00:00.082408846 +467 100 0 days 00:00:00.082069702 +468 1 0 days 00:00:00.055684720 +468 2 0 days 00:00:00.055138926 +468 3 0 days 00:00:00.111716545 +468 4 0 days 00:00:00.071326320 +468 5 0 days 00:00:00.055010513 +468 6 0 days 00:00:00.066638040 +468 7 0 days 00:00:00.093375385 +468 8 0 days 00:00:00.056236146 +468 9 0 days 00:00:00.064316626 +468 10 0 days 00:00:00.064119080 +468 11 0 days 00:00:00.064455320 +468 12 0 days 00:00:00.055230986 +468 13 0 days 00:00:00.110197548 +468 14 0 days 00:00:00.110406581 +468 15 0 days 00:00:00.056022026 +468 16 0 days 00:00:00.083907056 +468 17 0 days 00:00:00.113056627 +468 18 0 days 00:00:00.111230486 +468 19 0 days 00:00:00.113320261 +468 20 0 days 00:00:00.111512785 +468 21 0 days 00:00:00.070585920 +468 22 0 days 00:00:00.110857484 +468 23 0 days 00:00:00.059939630 +468 24 0 days 00:00:00.111961902 +468 25 0 days 00:00:00.064054100 +468 26 0 days 00:00:00.110810366 +468 27 0 days 00:00:00.063728000 +468 28 0 days 00:00:00.075636050 +468 29 0 days 00:00:00.083977813 +468 30 0 days 00:00:00.055248760 +468 31 0 days 00:00:00.084248780 +468 32 0 days 00:00:00.112079994 +468 33 0 days 00:00:00.074397616 +468 34 0 days 00:00:00.073564540 +468 35 0 days 00:00:00.084899113 +468 36 0 days 00:00:00.107826037 +468 37 0 days 00:00:00.111503781 +468 38 0 days 00:00:00.084031840 +468 39 0 days 00:00:00.054774886 +468 40 0 days 00:00:00.098046068 +468 41 0 days 00:00:00.078637022 +468 42 0 days 00:00:00.065594374 +468 43 0 days 00:00:00.065699653 +468 44 0 days 00:00:00.104521680 +468 45 0 days 00:00:00.111099070 +468 46 0 days 00:00:00.054333013 +468 48 0 days 00:00:00.054464886 +468 49 0 days 00:00:00.064598546 +468 50 0 days 00:00:00.084624880 +468 51 0 days 00:00:00.084050933 +468 52 0 days 00:00:00.063304380 +468 53 0 days 00:00:00.109765176 +468 54 0 days 00:00:00.079924340 +468 55 0 days 00:00:00.064840676 +468 56 0 days 00:00:00.064897813 +468 57 0 days 00:00:00.112385415 +468 58 0 days 00:00:00.078129845 +468 60 0 days 00:00:00.055203786 +468 61 0 days 00:00:00.076210410 +468 62 0 days 00:00:00.082676398 +468 63 0 days 00:00:00.082297566 +468 64 0 days 00:00:00.061253760 +468 65 0 days 00:00:00.107899440 +468 66 0 days 00:00:00.057184580 +468 67 0 days 00:00:00.061672850 +468 68 0 days 00:00:00.081990420 +468 69 0 days 00:00:00.079472606 +468 70 0 days 00:00:00.053218846 +468 71 0 days 00:00:00.053528680 +468 72 0 days 00:00:00.090794030 +468 73 0 days 00:00:00.071429816 +468 74 0 days 00:00:00.092266015 +468 75 0 days 00:00:00.062908720 +468 76 0 days 00:00:00.053399613 +468 77 0 days 00:00:00.110975302 +468 78 0 days 00:00:00.067536730 +468 79 0 days 00:00:00.109075415 +468 80 0 days 00:00:00.061462300 +468 81 0 days 00:00:00.062107800 +468 82 0 days 00:00:00.061070926 +468 83 0 days 00:00:00.053347320 +468 84 0 days 00:00:00.061600572 +468 85 0 days 00:00:00.062731693 +468 86 0 days 00:00:00.110983827 +468 87 0 days 00:00:00.111371562 +468 88 0 days 00:00:00.109233446 +468 89 0 days 00:00:00.111958340 +468 90 0 days 00:00:00.111750678 +468 92 0 days 00:00:00.111060507 +468 93 0 days 00:00:00.069958745 +468 94 0 days 00:00:00.055424440 +468 95 0 days 00:00:00.076386000 +468 96 0 days 00:00:00.063172613 +468 97 0 days 00:00:00.073181820 +468 98 0 days 00:00:00.114761114 +468 100 0 days 00:00:00.063586066 +469 1 0 days 00:00:00.192761847 +469 2 0 days 00:00:00.184254930 +469 3 0 days 00:00:00.111853006 +469 4 0 days 00:00:00.148693870 +469 6 0 days 00:00:00.198602533 +469 7 0 days 00:00:00.124378830 +469 8 0 days 00:00:00.110908724 +469 9 0 days 00:00:00.125779425 +469 10 0 days 00:00:00.193371466 +469 11 0 days 00:00:00.126048670 +469 12 0 days 00:00:00.153563257 +469 13 0 days 00:00:00.192936368 +469 14 0 days 00:00:00.195177474 +469 16 0 days 00:00:00.192639986 +469 18 0 days 00:00:00.197086823 +469 19 0 days 00:00:00.133893612 +469 20 0 days 00:00:00.200263055 +469 21 0 days 00:00:00.203751413 +469 22 0 days 00:00:00.114557566 +469 23 0 days 00:00:00.127514550 +469 24 0 days 00:00:00.152639905 +469 25 0 days 00:00:00.126456165 +469 26 0 days 00:00:00.201179021 +469 27 0 days 00:00:00.150054852 +469 28 0 days 00:00:00.201614915 +469 29 0 days 00:00:00.150271898 +469 30 0 days 00:00:00.112411712 +469 31 0 days 00:00:00.151620948 +469 32 0 days 00:00:00.134025076 +469 33 0 days 00:00:00.126000160 +469 35 0 days 00:00:00.123039126 +469 36 0 days 00:00:00.151424326 +469 38 0 days 00:00:00.200901441 +469 39 0 days 00:00:00.199634107 +469 40 0 days 00:00:00.202920533 +469 41 0 days 00:00:00.195948058 +469 42 0 days 00:00:00.203402982 +469 44 0 days 00:00:00.111044912 +469 45 0 days 00:00:00.205480266 +469 46 0 days 00:00:00.202599091 +469 47 0 days 00:00:00.154011358 +469 49 0 days 00:00:00.112012300 +469 50 0 days 00:00:00.194810088 +469 51 0 days 00:00:00.198557445 +469 53 0 days 00:00:00.148206438 +469 54 0 days 00:00:00.123530110 +469 56 0 days 00:00:00.190196350 +469 57 0 days 00:00:00.123258705 +469 58 0 days 00:00:00.109441240 +469 59 0 days 00:00:00.151142533 +469 60 0 days 00:00:00.189892636 +469 61 0 days 00:00:00.198608063 +469 62 0 days 00:00:00.112428733 +469 63 0 days 00:00:00.119337515 +469 64 0 days 00:00:00.113994580 +469 65 0 days 00:00:00.113667680 +469 66 0 days 00:00:00.194977961 +469 68 0 days 00:00:00.193636167 +469 69 0 days 00:00:00.195857001 +469 70 0 days 00:00:00.196738473 +469 71 0 days 00:00:00.123582020 +469 72 0 days 00:00:00.114044923 +469 73 0 days 00:00:00.110304184 +469 74 0 days 00:00:00.150030648 +469 75 0 days 00:00:00.108925920 +469 76 0 days 00:00:00.109967688 +469 77 0 days 00:00:00.196008281 +469 78 0 days 00:00:00.193058783 +469 79 0 days 00:00:00.189580655 +469 80 0 days 00:00:00.151294170 +469 81 0 days 00:00:00.148510567 +469 82 0 days 00:00:00.109602508 +469 83 0 days 00:00:00.149119512 +469 84 0 days 00:00:00.161501770 +469 87 0 days 00:00:00.123280275 +469 88 0 days 00:00:00.109763800 +469 89 0 days 00:00:00.110206952 +469 90 0 days 00:00:00.116262622 +469 91 0 days 00:00:00.195277937 +469 92 0 days 00:00:00.161867210 +469 93 0 days 00:00:00.191359889 +469 94 0 days 00:00:00.192556968 +469 95 0 days 00:00:00.148584495 +469 96 0 days 00:00:00.105802052 +469 97 0 days 00:00:00.107108124 +469 98 0 days 00:00:00.110302573 +469 99 0 days 00:00:00.111290526 +470 1 0 days 00:00:00.062037360 +470 2 0 days 00:00:00.061204528 +470 3 0 days 00:00:00.091725455 +470 4 0 days 00:00:00.066282105 +470 5 0 days 00:00:00.108653464 +470 6 0 days 00:00:00.067292300 +470 7 0 days 00:00:00.067324875 +470 8 0 days 00:00:00.091633625 +470 9 0 days 00:00:00.060244616 +470 10 0 days 00:00:00.062950826 +470 11 0 days 00:00:00.062764553 +470 12 0 days 00:00:00.070648228 +470 13 0 days 00:00:00.065092485 +470 14 0 days 00:00:00.091315540 +470 15 0 days 00:00:00.063722431 +470 16 0 days 00:00:00.091955590 +470 17 0 days 00:00:00.062334013 +470 18 0 days 00:00:00.109162380 +470 19 0 days 00:00:00.064643895 +470 20 0 days 00:00:00.059645244 +470 21 0 days 00:00:00.066874645 +470 22 0 days 00:00:00.095086836 +470 23 0 days 00:00:00.066270345 +470 24 0 days 00:00:00.059319384 +470 25 0 days 00:00:00.090261005 +470 26 0 days 00:00:00.066742370 +470 27 0 days 00:00:00.058530244 +470 28 0 days 00:00:00.081398913 +470 29 0 days 00:00:00.108495855 +470 30 0 days 00:00:00.062290100 +470 31 0 days 00:00:00.069984352 +470 32 0 days 00:00:00.059293608 +470 33 0 days 00:00:00.090416980 +470 34 0 days 00:00:00.091320590 +470 35 0 days 00:00:00.111435070 +470 36 0 days 00:00:00.108825733 +470 37 0 days 00:00:00.062405313 +470 38 0 days 00:00:00.067771010 +470 39 0 days 00:00:00.090719270 +470 40 0 days 00:00:00.061681260 +470 41 0 days 00:00:00.091191035 +470 42 0 days 00:00:00.107834791 +470 43 0 days 00:00:00.090743225 +470 44 0 days 00:00:00.074724542 +470 45 0 days 00:00:00.060903148 +470 46 0 days 00:00:00.071482124 +470 47 0 days 00:00:00.091292580 +470 48 0 days 00:00:00.092530750 +470 50 0 days 00:00:00.079874466 +470 51 0 days 00:00:00.092083265 +470 52 0 days 00:00:00.097076984 +470 53 0 days 00:00:00.060390196 +470 54 0 days 00:00:00.110495854 +470 55 0 days 00:00:00.091014815 +470 56 0 days 00:00:00.067647360 +470 57 0 days 00:00:00.108722355 +470 58 0 days 00:00:00.067639065 +470 59 0 days 00:00:00.091355355 +470 60 0 days 00:00:00.058689925 +470 61 0 days 00:00:00.060288584 +470 62 0 days 00:00:00.061004256 +470 63 0 days 00:00:00.091378835 +470 64 0 days 00:00:00.091798305 +470 65 0 days 00:00:00.066922590 +470 66 0 days 00:00:00.060553352 +470 67 0 days 00:00:00.091214530 +470 68 0 days 00:00:00.062109810 +470 69 0 days 00:00:00.070838732 +470 70 0 days 00:00:00.067745545 +470 71 0 days 00:00:00.067759315 +470 72 0 days 00:00:00.067672915 +470 73 0 days 00:00:00.061126112 +470 74 0 days 00:00:00.091240960 +470 75 0 days 00:00:00.068131720 +470 76 0 days 00:00:00.067920655 +470 77 0 days 00:00:00.060875896 +470 78 0 days 00:00:00.060682608 +470 79 0 days 00:00:00.067522835 +470 80 0 days 00:00:00.091028510 +470 81 0 days 00:00:00.064747168 +470 82 0 days 00:00:00.091999125 +470 83 0 days 00:00:00.068023050 +470 84 0 days 00:00:00.071534404 +470 85 0 days 00:00:00.061616352 +470 86 0 days 00:00:00.091596370 +470 87 0 days 00:00:00.067285905 +470 88 0 days 00:00:00.068283555 +470 89 0 days 00:00:00.075684865 +470 90 0 days 00:00:00.061795676 +470 91 0 days 00:00:00.068579180 +470 92 0 days 00:00:00.092265730 +470 93 0 days 00:00:00.091988970 +470 94 0 days 00:00:00.067579586 +470 95 0 days 00:00:00.068005545 +470 96 0 days 00:00:00.061498580 +470 97 0 days 00:00:00.071628328 +470 98 0 days 00:00:00.092148020 +470 99 0 days 00:00:00.060971640 +470 100 0 days 00:00:00.091797780 +471 1 0 days 00:00:00.595411188 +471 2 0 days 00:00:01.188745476 +471 3 0 days 00:00:00.371457768 +471 4 0 days 00:00:00.749686326 +471 5 0 days 00:00:00.346355287 +471 6 0 days 00:00:00.575207017 +471 7 0 days 00:00:00.390559808 +471 8 0 days 00:00:00.642172657 +471 9 0 days 00:00:00.389681665 +471 10 0 days 00:00:01.115892260 +471 11 0 days 00:00:00.365354284 +471 12 0 days 00:00:01.173790996 +471 13 0 days 00:00:01.127885795 +471 14 0 days 00:00:00.556583590 +471 15 0 days 00:00:00.414473172 +471 16 0 days 00:00:00.996644443 +471 17 0 days 00:00:00.670381060 +471 18 0 days 00:00:00.380282950 +471 19 0 days 00:00:00.412686449 +471 21 0 days 00:00:00.438713616 +471 22 0 days 00:00:00.366886041 +471 23 0 days 00:00:00.641998588 +471 24 0 days 00:00:01.101543312 +471 25 0 days 00:00:01.037780376 +471 26 0 days 00:00:00.662786376 +471 27 0 days 00:00:00.584842076 +471 28 0 days 00:00:00.687962113 +471 29 0 days 00:00:00.491680418 +471 30 0 days 00:00:01.254918635 +471 31 0 days 00:00:01.291456900 +471 32 0 days 00:00:01.203793445 +471 33 0 days 00:00:00.358981983 +471 34 0 days 00:00:01.000836536 +471 35 0 days 00:00:00.728483470 +471 36 0 days 00:00:00.397341464 +471 37 0 days 00:00:01.095234724 +471 38 0 days 00:00:00.398267660 +471 39 0 days 00:00:01.150373488 +471 40 0 days 00:00:00.544344197 +471 41 0 days 00:00:00.667474877 +471 42 0 days 00:00:00.431870330 +471 43 0 days 00:00:00.353883346 +471 44 0 days 00:00:01.119962208 +471 45 0 days 00:00:01.143001200 +471 46 0 days 00:00:00.573562656 +471 47 0 days 00:00:00.622818057 +471 48 0 days 00:00:01.181943557 +471 49 0 days 00:00:00.378241304 +471 50 0 days 00:00:00.593730333 +471 51 0 days 00:00:00.631868037 +471 52 0 days 00:00:01.035160568 +471 53 0 days 00:00:00.368404905 +471 54 0 days 00:00:00.636606828 +471 55 0 days 00:00:00.570071114 +471 56 0 days 00:00:00.754783198 +471 57 0 days 00:00:01.099948992 +471 58 0 days 00:00:01.062962170 +471 59 0 days 00:00:01.039304383 +471 60 0 days 00:00:00.398840732 +471 61 0 days 00:00:00.852896824 +471 62 0 days 00:00:00.715416558 +471 63 0 days 00:00:00.390940514 +471 64 0 days 00:00:00.343968802 +471 65 0 days 00:00:01.134679292 +471 66 0 days 00:00:00.379779556 +471 67 0 days 00:00:00.684173850 +471 68 0 days 00:00:00.584960255 +471 69 0 days 00:00:00.622649982 +471 70 0 days 00:00:00.639693777 +471 71 0 days 00:00:00.457188200 +471 72 0 days 00:00:00.371265756 +471 73 0 days 00:00:01.075872544 +471 74 0 days 00:00:00.444718627 +471 75 0 days 00:00:00.590583245 +471 76 0 days 00:00:00.623962325 +471 77 0 days 00:00:01.102184553 +471 78 0 days 00:00:00.405119232 +471 79 0 days 00:00:01.053584894 +471 80 0 days 00:00:00.327464565 +471 81 0 days 00:00:00.555618582 +471 82 0 days 00:00:01.200219130 +471 83 0 days 00:00:00.333786120 +471 84 0 days 00:00:00.622372397 +471 85 0 days 00:00:01.057340236 +471 86 0 days 00:00:00.362476974 +471 87 0 days 00:00:01.012683220 +471 88 0 days 00:00:01.141237233 +471 89 0 days 00:00:00.635484641 +471 90 0 days 00:00:00.375809240 +471 91 0 days 00:00:00.781335420 +471 92 0 days 00:00:00.594953354 +471 93 0 days 00:00:00.629974506 +471 94 0 days 00:00:00.407142767 +471 95 0 days 00:00:00.396489745 +471 96 0 days 00:00:00.608838127 +471 97 0 days 00:00:01.145722370 +471 98 0 days 00:00:01.196417791 +471 99 0 days 00:00:01.085376748 +471 100 0 days 00:00:00.471988608 +472 1 0 days 00:00:00.559263366 +472 2 0 days 00:00:00.189020275 +472 3 0 days 00:00:00.607274020 +472 4 0 days 00:00:00.205544211 +472 5 0 days 00:00:00.182860985 +472 6 0 days 00:00:00.586769817 +472 7 0 days 00:00:00.198556782 +472 8 0 days 00:00:00.577629572 +472 9 0 days 00:00:00.190127578 +472 10 0 days 00:00:00.596860960 +472 11 0 days 00:00:00.239529565 +472 12 0 days 00:00:00.582014845 +472 13 0 days 00:00:00.550952022 +472 14 0 days 00:00:00.195994092 +472 15 0 days 00:00:00.352511520 +472 16 0 days 00:00:00.193927528 +472 17 0 days 00:00:00.621506310 +472 18 0 days 00:00:00.198956908 +472 19 0 days 00:00:00.186638470 +472 20 0 days 00:00:00.613543977 +472 21 0 days 00:00:00.603579274 +472 22 0 days 00:00:00.208573896 +472 23 0 days 00:00:00.541972886 +472 24 0 days 00:00:00.294624125 +472 25 0 days 00:00:00.193356285 +472 26 0 days 00:00:00.565854327 +472 27 0 days 00:00:00.217601154 +472 28 0 days 00:00:00.192427541 +472 29 0 days 00:00:00.186588220 +472 30 0 days 00:00:00.173350157 +472 31 0 days 00:00:00.187653692 +472 32 0 days 00:00:00.299446671 +472 33 0 days 00:00:00.595902463 +472 34 0 days 00:00:00.177768130 +472 35 0 days 00:00:00.194266948 +472 36 0 days 00:00:00.344859540 +472 37 0 days 00:00:00.642071810 +472 38 0 days 00:00:00.235643363 +472 39 0 days 00:00:00.353917787 +472 40 0 days 00:00:00.531330803 +472 41 0 days 00:00:00.181192546 +472 42 0 days 00:00:00.366828554 +472 43 0 days 00:00:00.388944600 +472 44 0 days 00:00:00.415045988 +472 45 0 days 00:00:00.292964454 +472 46 0 days 00:00:00.596638702 +472 47 0 days 00:00:00.626478003 +472 48 0 days 00:00:00.303569184 +472 49 0 days 00:00:00.315269813 +472 50 0 days 00:00:00.640659409 +472 51 0 days 00:00:00.321507597 +472 52 0 days 00:00:00.571373605 +472 53 0 days 00:00:00.311618382 +472 54 0 days 00:00:00.583831454 +472 55 0 days 00:00:00.338479900 +472 56 0 days 00:00:00.518363773 +472 57 0 days 00:00:00.343943690 +472 58 0 days 00:00:00.338084332 +472 59 0 days 00:00:00.585406726 +472 60 0 days 00:00:00.534653884 +472 61 0 days 00:00:00.178713544 +472 62 0 days 00:00:00.340956234 +472 63 0 days 00:00:00.570544460 +472 64 0 days 00:00:00.355706900 +472 65 0 days 00:00:00.537142760 +472 66 0 days 00:00:00.574452561 +472 67 0 days 00:00:00.659603641 +472 68 0 days 00:00:00.358608488 +472 70 0 days 00:00:00.568953993 +472 73 0 days 00:00:00.608260477 +472 74 0 days 00:00:00.619066755 +472 75 0 days 00:00:00.444451450 +472 76 0 days 00:00:00.649891466 +472 77 0 days 00:00:00.543438103 +472 78 0 days 00:00:00.581368196 +472 79 0 days 00:00:00.340049000 +472 80 0 days 00:00:00.339717217 +472 81 0 days 00:00:00.377055600 +472 82 0 days 00:00:00.317822376 +472 83 0 days 00:00:00.348826701 +472 84 0 days 00:00:00.325860315 +472 85 0 days 00:00:00.187544564 +472 86 0 days 00:00:00.353092507 +472 87 0 days 00:00:00.585075104 +472 88 0 days 00:00:00.333198367 +472 89 0 days 00:00:00.302052437 +472 90 0 days 00:00:00.593111107 +472 91 0 days 00:00:00.239502037 +472 92 0 days 00:00:00.188383011 +472 93 0 days 00:00:00.194028062 +472 94 0 days 00:00:00.345571452 +472 95 0 days 00:00:00.527821193 +472 96 0 days 00:00:00.540266760 +472 97 0 days 00:00:00.331289508 +472 98 0 days 00:00:00.293990220 +472 99 0 days 00:00:00.539386070 +472 100 0 days 00:00:00.318075300 +473 1 0 days 00:00:00.295400200 +473 2 0 days 00:00:00.472314546 +473 4 0 days 00:00:00.289027985 +473 5 0 days 00:00:00.296954565 +473 6 0 days 00:00:01.093957740 +473 7 0 days 00:00:01.188589804 +473 8 0 days 00:00:00.262404806 +473 9 0 days 00:00:00.311351948 +473 10 0 days 00:00:00.489156400 +473 11 0 days 00:00:00.280445546 +473 12 0 days 00:00:01.179492984 +473 13 0 days 00:00:00.915032526 +473 14 0 days 00:00:00.561346090 +473 15 0 days 00:00:01.181768025 +473 16 0 days 00:00:01.063020374 +473 17 0 days 00:00:01.026342836 +473 18 0 days 00:00:00.596049216 +473 19 0 days 00:00:00.968695105 +473 20 0 days 00:00:00.864182893 +473 21 0 days 00:00:00.528278293 +473 22 0 days 00:00:00.833444073 +473 23 0 days 00:00:00.318049460 +473 24 0 days 00:00:00.608384222 +473 25 0 days 00:00:00.269929413 +473 26 0 days 00:00:00.266095120 +473 27 0 days 00:00:00.319418092 +473 28 0 days 00:00:00.268466806 +473 29 0 days 00:00:00.750647755 +473 30 0 days 00:00:00.603133776 +473 31 0 days 00:00:01.084911756 +473 32 0 days 00:00:00.867867326 +473 33 0 days 00:00:00.870923853 +473 34 0 days 00:00:01.142822405 +473 35 0 days 00:00:00.392376173 +473 36 0 days 00:00:00.885680253 +473 37 0 days 00:00:00.956671520 +473 38 0 days 00:00:00.652854735 +473 39 0 days 00:00:00.373877536 +473 40 0 days 00:00:01.115001997 +473 41 0 days 00:00:00.504793920 +473 42 0 days 00:00:00.259770046 +473 43 0 days 00:00:00.520031393 +473 44 0 days 00:00:00.484032766 +473 45 0 days 00:00:00.329428210 +473 46 0 days 00:00:00.295089200 +473 47 0 days 00:00:00.542131692 +473 48 0 days 00:00:01.099353448 +473 49 0 days 00:00:00.547817973 +473 50 0 days 00:00:00.327507230 +473 51 0 days 00:00:00.646874430 +473 52 0 days 00:00:01.122442502 +473 53 0 days 00:00:00.945364834 +473 54 0 days 00:00:00.391229190 +473 55 0 days 00:00:01.101899068 +473 56 0 days 00:00:00.891032653 +473 57 0 days 00:00:00.321809223 +473 58 0 days 00:00:00.277998546 +473 59 0 days 00:00:00.478304186 +473 60 0 days 00:00:00.311424620 +473 61 0 days 00:00:00.307283384 +473 62 0 days 00:00:00.843526320 +473 63 0 days 00:00:01.246455650 +473 64 0 days 00:00:00.576562764 +473 65 0 days 00:00:00.269036786 +473 66 0 days 00:00:01.154950232 +473 67 0 days 00:00:00.627064660 +473 68 0 days 00:00:01.195078334 +473 69 0 days 00:00:01.079034733 +473 70 0 days 00:00:00.615412261 +473 71 0 days 00:00:00.998680968 +473 72 0 days 00:00:01.014412820 +473 73 0 days 00:00:01.221759794 +473 74 0 days 00:00:00.529265845 +473 75 0 days 00:00:00.841360306 +473 76 0 days 00:00:00.260747700 +473 77 0 days 00:00:00.588843287 +473 78 0 days 00:00:00.597228612 +473 79 0 days 00:00:00.593916482 +473 80 0 days 00:00:00.455946673 +473 81 0 days 00:00:00.453712813 +473 82 0 days 00:00:00.297840786 +473 83 0 days 00:00:00.496149780 +473 84 0 days 00:00:00.322681176 +473 85 0 days 00:00:00.856569200 +473 86 0 days 00:00:00.454989486 +473 87 0 days 00:00:00.507128660 +473 88 0 days 00:00:00.501690840 +473 89 0 days 00:00:00.449786833 +473 90 0 days 00:00:00.341340046 +473 91 0 days 00:00:00.370240808 +473 92 0 days 00:00:00.313102432 +473 93 0 days 00:00:00.293189113 +473 94 0 days 00:00:00.492109933 +473 95 0 days 00:00:00.669307764 +473 96 0 days 00:00:00.408821782 +473 97 0 days 00:00:00.317524886 +473 98 0 days 00:00:01.002957676 +473 99 0 days 00:00:00.624036990 +474 1 0 days 00:00:00.969344870 +474 2 0 days 00:00:01.113508791 +474 3 0 days 00:00:00.318095620 +474 4 0 days 00:00:00.497161973 +474 5 0 days 00:00:00.551313872 +474 6 0 days 00:00:00.363338880 +474 7 0 days 00:00:00.877544546 +474 8 0 days 00:00:00.711132560 +474 9 0 days 00:00:00.488689700 +474 10 0 days 00:00:00.299260600 +474 11 0 days 00:00:00.360642206 +474 12 0 days 00:00:00.282533573 +474 13 0 days 00:00:01.070242768 +474 14 0 days 00:00:00.491653100 +474 15 0 days 00:00:01.038660700 +474 16 0 days 00:00:00.471240526 +474 17 0 days 00:00:00.311194020 +474 18 0 days 00:00:01.174233454 +474 19 0 days 00:00:01.155880786 +474 20 0 days 00:00:01.191253062 +474 21 0 days 00:00:00.424133231 +474 22 0 days 00:00:00.459710506 +474 23 0 days 00:00:00.321501724 +474 24 0 days 00:00:00.275235793 +474 25 0 days 00:00:00.910135040 +474 26 0 days 00:00:00.464853580 +474 27 0 days 00:00:01.202875956 +474 28 0 days 00:00:00.652763868 +474 29 0 days 00:00:00.276731366 +474 30 0 days 00:00:01.228641153 +474 31 0 days 00:00:00.909155006 +474 32 0 days 00:00:00.402035485 +474 33 0 days 00:00:00.401661292 +474 34 0 days 00:00:01.218274778 +474 35 0 days 00:00:00.861880153 +474 37 0 days 00:00:00.617156080 +474 38 0 days 00:00:00.280963093 +474 39 0 days 00:00:00.331155617 +474 40 0 days 00:00:01.172133852 +474 41 0 days 00:00:01.220012218 +474 42 0 days 00:00:00.270077046 +474 43 0 days 00:00:00.297636000 +474 44 0 days 00:00:00.464574113 +474 45 0 days 00:00:00.471920546 +474 46 0 days 00:00:01.249508645 +474 47 0 days 00:00:00.528968060 +474 48 0 days 00:00:00.363686746 +474 49 0 days 00:00:01.148785632 +474 50 0 days 00:00:01.106082217 +474 51 0 days 00:00:00.370298752 +474 52 0 days 00:00:00.476890753 +474 53 0 days 00:00:00.856254600 +474 54 0 days 00:00:00.540339130 +474 55 0 days 00:00:00.876427913 +474 56 0 days 00:00:01.140046055 +474 57 0 days 00:00:01.027988655 +474 58 0 days 00:00:00.660239211 +474 59 0 days 00:00:00.611357088 +474 60 0 days 00:00:00.964987958 +474 61 0 days 00:00:00.988178665 +474 62 0 days 00:00:00.930892273 +474 63 0 days 00:00:00.931943473 +474 64 0 days 00:00:01.174977258 +474 65 0 days 00:00:00.307059555 +474 66 0 days 00:00:01.012020093 +474 67 0 days 00:00:00.594943591 +474 68 0 days 00:00:00.871951220 +474 69 0 days 00:00:00.867872173 +474 70 0 days 00:00:00.480153706 +474 71 0 days 00:00:00.555647300 +474 72 0 days 00:00:01.057432476 +474 73 0 days 00:00:00.311405900 +474 74 0 days 00:00:00.588971655 +474 75 0 days 00:00:00.553829516 +474 76 0 days 00:00:01.183111002 +474 77 0 days 00:00:00.657905850 +474 78 0 days 00:00:00.318180395 +474 79 0 days 00:00:00.271875386 +474 80 0 days 00:00:00.350612693 +474 81 0 days 00:00:01.141811614 +474 82 0 days 00:00:00.485690540 +474 83 0 days 00:00:00.584514172 +474 84 0 days 00:00:00.518356100 +474 85 0 days 00:00:00.911905593 +474 86 0 days 00:00:00.353667112 +474 87 0 days 00:00:01.054936252 +474 88 0 days 00:00:00.509795046 +474 89 0 days 00:00:00.566735900 +474 90 0 days 00:00:00.549229660 +474 91 0 days 00:00:01.290444588 +474 92 0 days 00:00:00.516216073 +474 93 0 days 00:00:00.883699193 +474 94 0 days 00:00:00.354500128 +474 95 0 days 00:00:01.061690696 +474 96 0 days 00:00:01.044643905 +474 97 0 days 00:00:00.676164804 +474 98 0 days 00:00:00.501558400 +474 99 0 days 00:00:00.993243135 +474 100 0 days 00:00:00.893381653 +475 1 0 days 00:00:00.188102866 +475 2 0 days 00:00:00.336517086 +475 3 0 days 00:00:00.114922453 +475 4 0 days 00:00:00.457786426 +475 5 0 days 00:00:00.374151480 +475 6 0 days 00:00:00.214879575 +475 7 0 days 00:00:00.113442006 +475 8 0 days 00:00:00.459584709 +475 9 0 days 00:00:00.376174125 +475 10 0 days 00:00:00.109057253 +475 11 0 days 00:00:00.342902560 +475 12 0 days 00:00:00.188031993 +475 13 0 days 00:00:00.346139666 +475 14 0 days 00:00:00.112422145 +475 15 0 days 00:00:00.331444193 +475 16 0 days 00:00:00.187043266 +475 17 0 days 00:00:00.097172106 +475 18 0 days 00:00:00.335703626 +475 19 0 days 00:00:00.198096400 +475 20 0 days 00:00:00.108171633 +475 21 0 days 00:00:00.213033625 +475 22 0 days 00:00:00.129715296 +475 23 0 days 00:00:00.223042092 +475 24 0 days 00:00:00.112003506 +475 25 0 days 00:00:00.445975728 +475 26 0 days 00:00:00.399721244 +475 27 0 days 00:00:00.102880393 +475 28 0 days 00:00:00.334102273 +475 29 0 days 00:00:00.433344702 +475 30 0 days 00:00:00.430215034 +475 31 0 days 00:00:00.340490070 +475 32 0 days 00:00:00.209402975 +475 33 0 days 00:00:00.183314980 +475 34 0 days 00:00:00.130087760 +475 35 0 days 00:00:00.188104720 +475 36 0 days 00:00:00.135953146 +475 37 0 days 00:00:00.376651200 +475 38 0 days 00:00:00.336954873 +475 39 0 days 00:00:00.227909524 +475 40 0 days 00:00:00.460612041 +475 41 0 days 00:00:00.185489426 +475 42 0 days 00:00:00.337367160 +475 43 0 days 00:00:00.137268772 +475 44 0 days 00:00:00.123762573 +475 45 0 days 00:00:00.111780533 +475 46 0 days 00:00:00.201202320 +475 47 0 days 00:00:00.104825420 +475 48 0 days 00:00:00.338226313 +475 49 0 days 00:00:00.202080133 +475 50 0 days 00:00:00.439332531 +475 51 0 days 00:00:00.332989613 +475 52 0 days 00:00:00.242255800 +475 53 0 days 00:00:00.264289306 +475 54 0 days 00:00:00.137389894 +475 56 0 days 00:00:00.105691280 +475 57 0 days 00:00:00.187644706 +475 58 0 days 00:00:00.136760310 +475 59 0 days 00:00:00.108913060 +475 60 0 days 00:00:00.342410640 +475 61 0 days 00:00:00.245150560 +475 62 0 days 00:00:00.131975560 +475 63 0 days 00:00:00.133074552 +475 64 0 days 00:00:00.103521353 +475 65 0 days 00:00:00.236662164 +475 66 0 days 00:00:00.233239916 +475 67 0 days 00:00:00.376686400 +475 68 0 days 00:00:00.454603634 +475 69 0 days 00:00:00.107502693 +475 70 0 days 00:00:00.188543506 +475 71 0 days 00:00:00.240363355 +475 72 0 days 00:00:00.247525116 +475 73 0 days 00:00:00.451615420 +475 74 0 days 00:00:00.188959053 +475 75 0 days 00:00:00.410888969 +475 76 0 days 00:00:00.129849813 +475 77 0 days 00:00:00.341824333 +475 78 0 days 00:00:00.106887806 +475 79 0 days 00:00:00.102531566 +475 80 0 days 00:00:00.448889348 +475 81 0 days 00:00:00.231533343 +475 82 0 days 00:00:00.342568913 +475 83 0 days 00:00:00.111483266 +475 84 0 days 00:00:00.254673708 +475 85 0 days 00:00:00.103031200 +475 86 0 days 00:00:00.138943670 +475 87 0 days 00:00:00.098220166 +475 88 0 days 00:00:00.222090616 +475 89 0 days 00:00:00.336991153 +475 90 0 days 00:00:00.137203315 +475 91 0 days 00:00:00.108054386 +475 92 0 days 00:00:00.448593938 +475 93 0 days 00:00:00.235497200 +475 94 0 days 00:00:00.108232646 +475 95 0 days 00:00:00.408961842 +475 96 0 days 00:00:00.230973906 +475 97 0 days 00:00:00.133943243 +475 98 0 days 00:00:00.111612593 +475 99 0 days 00:00:00.132688848 +476 1 0 days 00:00:00.103980740 +476 2 0 days 00:00:00.374299853 +476 3 0 days 00:00:00.176424480 +476 4 0 days 00:00:00.371891186 +476 5 0 days 00:00:00.108323020 +476 6 0 days 00:00:00.197810653 +476 7 0 days 00:00:00.096687806 +476 8 0 days 00:00:00.096829020 +476 9 0 days 00:00:00.132257000 +476 10 0 days 00:00:00.124689553 +476 11 0 days 00:00:00.420607350 +476 12 0 days 00:00:00.401757915 +476 13 0 days 00:00:00.436196288 +476 14 0 days 00:00:00.225921380 +476 15 0 days 00:00:00.394445752 +476 16 0 days 00:00:00.240274724 +476 17 0 days 00:00:00.222789497 +476 18 0 days 00:00:00.120443880 +476 19 0 days 00:00:00.488114969 +476 20 0 days 00:00:00.195043706 +476 21 0 days 00:00:00.110536673 +476 24 0 days 00:00:00.193006773 +476 25 0 days 00:00:00.217024476 +476 28 0 days 00:00:00.140861758 +476 29 0 days 00:00:00.207910506 +476 30 0 days 00:00:00.419444116 +476 31 0 days 00:00:00.436200292 +476 32 0 days 00:00:00.371186240 +476 33 0 days 00:00:00.111647146 +476 34 0 days 00:00:00.412912540 +476 35 0 days 00:00:00.206711950 +476 36 0 days 00:00:00.110835706 +476 37 0 days 00:00:00.379946535 +476 38 0 days 00:00:00.242483748 +476 39 0 days 00:00:00.222564236 +476 40 0 days 00:00:00.244353965 +476 41 0 days 00:00:00.454051976 +476 42 0 days 00:00:00.188046060 +476 43 0 days 00:00:00.178104513 +476 44 0 days 00:00:00.434237288 +476 45 0 days 00:00:00.107048700 +476 46 0 days 00:00:00.111741185 +476 47 0 days 00:00:00.120995550 +476 48 0 days 00:00:00.483836887 +476 49 0 days 00:00:00.098739893 +476 50 0 days 00:00:00.360136126 +476 51 0 days 00:00:00.358738873 +476 52 0 days 00:00:00.247896272 +476 53 0 days 00:00:00.260475609 +476 54 0 days 00:00:00.115605380 +476 55 0 days 00:00:00.209736240 +476 56 0 days 00:00:00.176858240 +476 57 0 days 00:00:00.199448053 +476 58 0 days 00:00:00.366019133 +476 59 0 days 00:00:00.434786300 +476 60 0 days 00:00:00.204122725 +476 61 0 days 00:00:00.198799466 +476 62 0 days 00:00:00.430451231 +476 63 0 days 00:00:00.363932480 +476 64 0 days 00:00:00.134213273 +476 65 0 days 00:00:00.457194457 +476 66 0 days 00:00:00.127583240 +476 67 0 days 00:00:00.186021080 +476 68 0 days 00:00:00.193766580 +476 69 0 days 00:00:00.434685331 +476 70 0 days 00:00:00.197642080 +476 71 0 days 00:00:00.468050664 +476 72 0 days 00:00:00.193918380 +476 73 0 days 00:00:00.132905080 +476 74 0 days 00:00:00.200406146 +476 75 0 days 00:00:00.122846913 +476 76 0 days 00:00:00.154490380 +476 77 0 days 00:00:00.107143560 +476 78 0 days 00:00:00.364034353 +476 79 0 days 00:00:00.210504626 +476 80 0 days 00:00:00.418397362 +476 81 0 days 00:00:00.115168093 +476 82 0 days 00:00:00.358881300 +476 83 0 days 00:00:00.324173500 +476 84 0 days 00:00:00.143696886 +476 85 0 days 00:00:00.366251893 +476 86 0 days 00:00:00.425534866 +476 87 0 days 00:00:00.224199252 +476 88 0 days 00:00:00.198176226 +476 89 0 days 00:00:00.200166506 +476 90 0 days 00:00:00.114231570 +476 91 0 days 00:00:00.189391766 +476 92 0 days 00:00:00.198453435 +476 93 0 days 00:00:00.364053273 +476 94 0 days 00:00:00.159874792 +476 95 0 days 00:00:00.339756986 +476 96 0 days 00:00:00.267660589 +476 97 0 days 00:00:00.108584986 +476 98 0 days 00:00:00.365936033 +476 99 0 days 00:00:00.247742301 +476 100 0 days 00:00:00.260396153 +477 2 0 days 00:00:01.935807253 +477 3 0 days 00:00:00.809999244 +477 4 0 days 00:00:00.469048277 +477 5 0 days 00:00:01.608652660 +477 6 0 days 00:00:01.822512563 +477 8 0 days 00:00:00.739935753 +477 9 0 days 00:00:00.412450593 +477 10 0 days 00:00:00.376840706 +477 11 0 days 00:00:00.738996235 +477 12 0 days 00:00:01.722363446 +477 13 0 days 00:00:01.000873141 +477 14 0 days 00:00:00.534845572 +477 15 0 days 00:00:00.471117232 +477 16 0 days 00:00:01.789946066 +477 17 0 days 00:00:01.980444166 +477 18 0 days 00:00:01.755599170 +477 19 0 days 00:00:00.371423566 +477 20 0 days 00:00:01.040121796 +477 21 0 days 00:00:01.747018148 +477 23 0 days 00:00:00.569666113 +477 24 0 days 00:00:00.747132745 +477 25 0 days 00:00:00.651900527 +477 26 0 days 00:00:01.679571108 +477 27 0 days 00:00:00.396857440 +477 28 0 days 00:00:00.441868866 +477 29 0 days 00:00:01.843561541 +477 30 0 days 00:00:00.461275026 +477 31 0 days 00:00:01.849288584 +477 32 0 days 00:00:00.472342740 +477 33 0 days 00:00:01.781019155 +477 34 0 days 00:00:01.953419870 +477 35 0 days 00:00:00.814459244 +477 36 0 days 00:00:01.757513348 +477 37 0 days 00:00:01.804104011 +477 38 0 days 00:00:01.430095173 +477 39 0 days 00:00:00.764978740 +477 41 0 days 00:00:00.402752946 +477 42 0 days 00:00:01.826481771 +477 43 0 days 00:00:00.397532973 +477 44 0 days 00:00:00.991690990 +477 45 0 days 00:00:01.053976322 +477 47 0 days 00:00:00.473013406 +477 48 0 days 00:00:00.378738073 +477 49 0 days 00:00:00.497057830 +477 51 0 days 00:00:00.928986932 +477 52 0 days 00:00:00.519205503 +477 55 0 days 00:00:00.749356573 +477 56 0 days 00:00:00.403594673 +477 57 0 days 00:00:01.044040494 +477 58 0 days 00:00:01.819415500 +477 59 0 days 00:00:00.488737542 +477 60 0 days 00:00:02.041196760 +477 61 0 days 00:00:01.947437166 +477 62 0 days 00:00:01.015388440 +477 63 0 days 00:00:00.554720410 +477 64 0 days 00:00:01.928674727 +477 65 0 days 00:00:00.369011360 +477 66 0 days 00:00:00.485135204 +477 67 0 days 00:00:01.813032934 +477 69 0 days 00:00:00.527605173 +477 71 0 days 00:00:00.540450451 +477 72 0 days 00:00:00.505175120 +477 73 0 days 00:00:01.938172529 +477 74 0 days 00:00:01.005275492 +477 75 0 days 00:00:01.489216870 +477 76 0 days 00:00:00.381672606 +477 77 0 days 00:00:01.050011333 +477 80 0 days 00:00:01.801548005 +477 81 0 days 00:00:00.450084803 +477 82 0 days 00:00:01.044473107 +477 83 0 days 00:00:01.906443255 +477 84 0 days 00:00:01.041680262 +477 85 0 days 00:00:00.392338353 +477 86 0 days 00:00:01.794468753 +477 88 0 days 00:00:00.384978093 +477 91 0 days 00:00:00.579938102 +477 93 0 days 00:00:00.386928146 +477 94 0 days 00:00:00.483729642 +477 95 0 days 00:00:00.838616614 +477 96 0 days 00:00:00.984561915 +477 97 0 days 00:00:02.040316691 +477 98 0 days 00:00:00.466539293 +477 99 0 days 00:00:01.601314716 +477 100 0 days 00:00:00.437258746 +478 1 0 days 00:00:00.762649540 +478 3 0 days 00:00:00.433606320 +478 4 0 days 00:00:01.050915949 +478 5 0 days 00:00:00.290570411 +478 7 0 days 00:00:00.515703128 +478 10 0 days 00:00:00.280703414 +478 11 0 days 00:00:00.207621240 +478 12 0 days 00:00:00.189776420 +478 13 0 days 00:00:00.734357945 +478 14 0 days 00:00:00.232415800 +478 16 0 days 00:00:00.243671357 +478 18 0 days 00:00:00.640114820 +478 20 0 days 00:00:00.459286254 +478 21 0 days 00:00:00.258593673 +478 23 0 days 00:00:00.669762320 +478 24 0 days 00:00:00.389797485 +478 25 0 days 00:00:00.393363336 +478 27 0 days 00:00:00.699788810 +478 28 0 days 00:00:00.247545122 +478 29 0 days 00:00:00.607138833 +478 31 0 days 00:00:00.207273175 +478 33 0 days 00:00:00.642064660 +478 34 0 days 00:00:00.246056643 +478 36 0 days 00:00:00.424654352 +478 37 0 days 00:00:00.485278298 +478 39 0 days 00:00:00.877247951 +478 40 0 days 00:00:00.249259871 +478 41 0 days 00:00:00.252578153 +478 42 0 days 00:00:00.414033684 +478 43 0 days 00:00:00.193987446 +478 44 0 days 00:00:00.465160466 +478 45 0 days 00:00:00.224779782 +478 46 0 days 00:00:00.660372373 +478 48 0 days 00:00:00.201330273 +478 49 0 days 00:00:00.192857900 +478 50 0 days 00:00:00.226082465 +478 51 0 days 00:00:00.202854840 +478 52 0 days 00:00:00.411817816 +478 54 0 days 00:00:00.427037940 +478 56 0 days 00:00:00.826546027 +478 57 0 days 00:00:00.842266092 +478 58 0 days 00:00:00.424731448 +478 59 0 days 00:00:00.611256260 +478 60 0 days 00:00:00.415640500 +478 62 0 days 00:00:00.227195900 +478 63 0 days 00:00:00.896854150 +478 64 0 days 00:00:00.881431983 +478 65 0 days 00:00:00.899875208 +478 66 0 days 00:00:00.906052024 +478 67 0 days 00:00:00.391117508 +478 68 0 days 00:00:00.384529460 +478 69 0 days 00:00:00.210627546 +478 70 0 days 00:00:00.836670080 +478 72 0 days 00:00:00.196469466 +478 74 0 days 00:00:00.237143637 +478 75 0 days 00:00:00.240398557 +478 76 0 days 00:00:00.195521373 +478 77 0 days 00:00:00.414277040 +478 78 0 days 00:00:00.414085672 +478 79 0 days 00:00:00.897052369 +478 80 0 days 00:00:00.405092624 +478 81 0 days 00:00:00.207331773 +478 84 0 days 00:00:00.201312850 +478 85 0 days 00:00:00.440974980 +478 87 0 days 00:00:00.847393126 +478 88 0 days 00:00:00.382385012 +478 89 0 days 00:00:00.888082255 +478 90 0 days 00:00:00.475932342 +478 91 0 days 00:00:00.911792115 +478 92 0 days 00:00:00.843524458 +478 93 0 days 00:00:00.188832906 +478 94 0 days 00:00:00.224476771 +478 95 0 days 00:00:00.226525548 +478 96 0 days 00:00:00.242138691 +478 97 0 days 00:00:00.424985503 +478 98 0 days 00:00:00.647631473 +478 100 0 days 00:00:00.195491800 +479 1 0 days 00:00:00.126530340 +479 2 0 days 00:00:00.190400077 +479 3 0 days 00:00:00.146867108 +479 4 0 days 00:00:00.154515493 +479 5 0 days 00:00:00.127245940 +479 6 0 days 00:00:00.171717170 +479 7 0 days 00:00:00.123705811 +479 8 0 days 00:00:00.197282540 +479 9 0 days 00:00:00.112637220 +479 10 0 days 00:00:00.129948655 +479 11 0 days 00:00:00.129369283 +479 12 0 days 00:00:00.146502804 +479 13 0 days 00:00:00.128528740 +479 14 0 days 00:00:00.194269217 +479 15 0 days 00:00:00.187151010 +479 16 0 days 00:00:00.175740576 +479 17 0 days 00:00:00.123424268 +479 18 0 days 00:00:00.166007883 +479 19 0 days 00:00:00.126175941 +479 20 0 days 00:00:00.206255955 +479 21 0 days 00:00:00.188733660 +479 22 0 days 00:00:00.172835220 +479 23 0 days 00:00:00.137584156 +479 24 0 days 00:00:00.124460433 +479 25 0 days 00:00:00.147860933 +479 26 0 days 00:00:00.122527508 +479 27 0 days 00:00:00.107673500 +479 28 0 days 00:00:00.142013566 +479 29 0 days 00:00:00.121761045 +479 30 0 days 00:00:00.154631085 +479 31 0 days 00:00:00.206490784 +479 32 0 days 00:00:00.129698030 +479 33 0 days 00:00:00.196219538 +479 34 0 days 00:00:00.130341158 +479 35 0 days 00:00:00.108325992 +479 36 0 days 00:00:00.126452803 +479 37 0 days 00:00:00.124355302 +479 38 0 days 00:00:00.146029436 +479 39 0 days 00:00:00.168992372 +479 40 0 days 00:00:00.118101034 +479 41 0 days 00:00:00.189107680 +479 42 0 days 00:00:00.124258483 +479 43 0 days 00:00:00.198888778 +479 44 0 days 00:00:00.128504325 +479 45 0 days 00:00:00.128337130 +479 46 0 days 00:00:00.114812577 +479 47 0 days 00:00:00.188244095 +479 48 0 days 00:00:00.148347380 +479 49 0 days 00:00:00.122403196 +479 50 0 days 00:00:00.181222464 +479 51 0 days 00:00:00.211534220 +479 52 0 days 00:00:00.123735933 +479 53 0 days 00:00:00.183549377 +479 54 0 days 00:00:00.134467164 +479 55 0 days 00:00:00.129527975 +479 56 0 days 00:00:00.189490591 +479 57 0 days 00:00:00.174790210 +479 58 0 days 00:00:00.118228268 +479 59 0 days 00:00:00.115100788 +479 60 0 days 00:00:00.138432168 +479 61 0 days 00:00:00.177097332 +479 62 0 days 00:00:00.117249616 +479 63 0 days 00:00:00.127287083 +479 64 0 days 00:00:00.180236404 +479 65 0 days 00:00:00.130132820 +479 66 0 days 00:00:00.175365392 +479 67 0 days 00:00:00.117568748 +479 68 0 days 00:00:00.167900840 +479 69 0 days 00:00:00.178026516 +479 70 0 days 00:00:00.191542360 +479 71 0 days 00:00:00.209083196 +479 72 0 days 00:00:00.185925260 +479 73 0 days 00:00:00.147200441 +479 74 0 days 00:00:00.127444646 +479 75 0 days 00:00:00.137268764 +479 76 0 days 00:00:00.113191940 +479 77 0 days 00:00:00.145384050 +479 78 0 days 00:00:00.172429590 +479 79 0 days 00:00:00.158237661 +479 80 0 days 00:00:00.118763765 +479 81 0 days 00:00:00.163904665 +479 82 0 days 00:00:00.172910410 +479 83 0 days 00:00:00.106657075 +479 84 0 days 00:00:00.131618564 +479 85 0 days 00:00:00.110693936 +479 86 0 days 00:00:00.180205290 +479 87 0 days 00:00:00.169893212 +479 88 0 days 00:00:00.122841608 +479 89 0 days 00:00:00.104287085 +479 90 0 days 00:00:00.143973196 +479 91 0 days 00:00:00.119382765 +479 92 0 days 00:00:00.142715756 +479 93 0 days 00:00:00.145847225 +479 94 0 days 00:00:00.139119686 +479 95 0 days 00:00:00.169783120 +479 97 0 days 00:00:00.129137595 +479 98 0 days 00:00:00.128278195 +479 99 0 days 00:00:00.156570636 +479 100 0 days 00:00:00.190881931 +480 1 0 days 00:00:00.126769821 +480 2 0 days 00:00:00.158736006 +480 3 0 days 00:00:00.175213765 +480 4 0 days 00:00:00.100002513 +480 5 0 days 00:00:00.195939720 +480 7 0 days 00:00:00.197748056 +480 8 0 days 00:00:00.194690387 +480 9 0 days 00:00:00.141463100 +480 10 0 days 00:00:00.186559064 +480 11 0 days 00:00:00.132089215 +480 12 0 days 00:00:00.129522840 +480 13 0 days 00:00:00.197897671 +480 14 0 days 00:00:00.111754228 +480 15 0 days 00:00:00.178549020 +480 16 0 days 00:00:00.138357484 +480 17 0 days 00:00:00.143724410 +480 18 0 days 00:00:00.121929996 +480 19 0 days 00:00:00.182399800 +480 20 0 days 00:00:00.184138210 +480 21 0 days 00:00:00.146267205 +480 22 0 days 00:00:00.103003593 +480 23 0 days 00:00:00.168281930 +480 24 0 days 00:00:00.160292346 +480 25 0 days 00:00:00.125258120 +480 26 0 days 00:00:00.114452436 +480 27 0 days 00:00:00.132001274 +480 28 0 days 00:00:00.126216454 +480 29 0 days 00:00:00.172690250 +480 30 0 days 00:00:00.108638035 +480 31 0 days 00:00:00.138417332 +480 32 0 days 00:00:00.168557025 +480 33 0 days 00:00:00.199626062 +480 34 0 days 00:00:00.114155280 +480 35 0 days 00:00:00.107630235 +480 36 0 days 00:00:00.124133204 +480 37 0 days 00:00:00.196161250 +480 38 0 days 00:00:00.098386846 +480 39 0 days 00:00:00.188961623 +480 40 0 days 00:00:00.193988617 +480 41 0 days 00:00:00.134053175 +480 42 0 days 00:00:00.152518360 +480 43 0 days 00:00:00.130337365 +480 44 0 days 00:00:00.169254130 +480 45 0 days 00:00:00.140541880 +480 46 0 days 00:00:00.206015576 +480 47 0 days 00:00:00.153407244 +480 48 0 days 00:00:00.126684965 +480 49 0 days 00:00:00.173083076 +480 50 0 days 00:00:00.198403460 +480 51 0 days 00:00:00.156255731 +480 52 0 days 00:00:00.191420890 +480 53 0 days 00:00:00.153529150 +480 54 0 days 00:00:00.140055752 +480 55 0 days 00:00:00.181066792 +480 56 0 days 00:00:00.123023208 +480 57 0 days 00:00:00.156864986 +480 58 0 days 00:00:00.131148225 +480 59 0 days 00:00:00.121950877 +480 60 0 days 00:00:00.130509195 +480 61 0 days 00:00:00.114290196 +480 62 0 days 00:00:00.175949345 +480 63 0 days 00:00:00.114996192 +480 64 0 days 00:00:00.190183943 +480 65 0 days 00:00:00.155777182 +480 66 0 days 00:00:00.119891782 +480 67 0 days 00:00:00.173248616 +480 68 0 days 00:00:00.096623240 +480 69 0 days 00:00:00.146837822 +480 70 0 days 00:00:00.127390825 +480 71 0 days 00:00:00.098943873 +480 72 0 days 00:00:00.141395468 +480 73 0 days 00:00:00.158267393 +480 74 0 days 00:00:00.149511957 +480 75 0 days 00:00:00.130269880 +480 76 0 days 00:00:00.134162005 +480 77 0 days 00:00:00.186555300 +480 78 0 days 00:00:00.211086296 +480 79 0 days 00:00:00.130796415 +480 80 0 days 00:00:00.126454540 +480 81 0 days 00:00:00.124471820 +480 82 0 days 00:00:00.154730846 +480 83 0 days 00:00:00.136394244 +480 84 0 days 00:00:00.107730055 +480 85 0 days 00:00:00.203956985 +480 86 0 days 00:00:00.140151648 +480 87 0 days 00:00:00.129512750 +480 88 0 days 00:00:00.129007585 +480 89 0 days 00:00:00.204048920 +480 90 0 days 00:00:00.098056693 +480 91 0 days 00:00:00.144971336 +480 92 0 days 00:00:00.188751896 +480 93 0 days 00:00:00.116397340 +480 94 0 days 00:00:00.115517816 +480 95 0 days 00:00:00.129061765 +480 96 0 days 00:00:00.135583648 +480 97 0 days 00:00:00.139551133 +480 98 0 days 00:00:00.140921864 +480 99 0 days 00:00:00.180401492 +480 100 0 days 00:00:00.118202255 +481 1 0 days 00:00:00.072443975 +481 2 0 days 00:00:00.078838072 +481 3 0 days 00:00:00.073824586 +481 4 0 days 00:00:00.083784016 +481 5 0 days 00:00:00.097833120 +481 6 0 days 00:00:00.078141060 +481 7 0 days 00:00:00.104202750 +481 8 0 days 00:00:00.069833310 +481 9 0 days 00:00:00.097886064 +481 10 0 days 00:00:00.083133562 +481 11 0 days 00:00:00.097285415 +481 12 0 days 00:00:00.092346551 +481 13 0 days 00:00:00.076989536 +481 14 0 days 00:00:00.069828160 +481 15 0 days 00:00:00.063810324 +481 16 0 days 00:00:00.072112100 +481 17 0 days 00:00:00.066990468 +481 18 0 days 00:00:00.077701455 +481 19 0 days 00:00:00.062431540 +481 20 0 days 00:00:00.085780097 +481 21 0 days 00:00:00.076728900 +481 22 0 days 00:00:00.110217460 +481 23 0 days 00:00:00.080726535 +481 24 0 days 00:00:00.106272573 +481 25 0 days 00:00:00.060722070 +481 26 0 days 00:00:00.067847995 +481 27 0 days 00:00:00.060753510 +481 28 0 days 00:00:00.071442925 +481 29 0 days 00:00:00.110683886 +481 30 0 days 00:00:00.068306173 +481 31 0 days 00:00:00.062573132 +481 32 0 days 00:00:00.078020773 +481 33 0 days 00:00:00.077031290 +481 34 0 days 00:00:00.071342648 +481 35 0 days 00:00:00.105109477 +481 36 0 days 00:00:00.097439960 +481 37 0 days 00:00:00.097170135 +481 38 0 days 00:00:00.062149656 +481 39 0 days 00:00:00.072013375 +481 40 0 days 00:00:00.065220126 +481 41 0 days 00:00:00.066475900 +481 42 0 days 00:00:00.059809220 +481 43 0 days 00:00:00.116227591 +481 44 0 days 00:00:00.067143930 +481 45 0 days 00:00:00.074520912 +481 46 0 days 00:00:00.066198216 +481 47 0 days 00:00:00.069571421 +481 48 0 days 00:00:00.072752230 +481 49 0 days 00:00:00.116934856 +481 50 0 days 00:00:00.067382685 +481 51 0 days 00:00:00.067089275 +481 52 0 days 00:00:00.067613085 +481 53 0 days 00:00:00.082474542 +481 54 0 days 00:00:00.072489775 +481 55 0 days 00:00:00.064335108 +481 56 0 days 00:00:00.087817490 +481 57 0 days 00:00:00.096631050 +481 58 0 days 00:00:00.100993568 +481 59 0 days 00:00:00.111902805 +481 60 0 days 00:00:00.061408050 +481 61 0 days 00:00:00.079109426 +481 62 0 days 00:00:00.063772780 +481 63 0 days 00:00:00.069545996 +481 64 0 days 00:00:00.101187080 +481 65 0 days 00:00:00.070104585 +481 66 0 days 00:00:00.082185130 +481 67 0 days 00:00:00.103030100 +481 68 0 days 00:00:00.115540568 +481 69 0 days 00:00:00.083711180 +481 70 0 days 00:00:00.069272004 +481 71 0 days 00:00:00.093721450 +481 72 0 days 00:00:00.097870395 +481 73 0 days 00:00:00.066510214 +481 74 0 days 00:00:00.061846035 +481 75 0 days 00:00:00.066915917 +481 76 0 days 00:00:00.094685420 +481 77 0 days 00:00:00.100475748 +481 78 0 days 00:00:00.075391090 +481 79 0 days 00:00:00.110502711 +481 80 0 days 00:00:00.100761080 +481 81 0 days 00:00:00.086357668 +481 82 0 days 00:00:00.113410708 +481 83 0 days 00:00:00.087963346 +481 84 0 days 00:00:00.068410585 +481 85 0 days 00:00:00.086306852 +481 86 0 days 00:00:00.080301032 +481 87 0 days 00:00:00.097858264 +481 88 0 days 00:00:00.105583076 +481 89 0 days 00:00:00.079004935 +481 90 0 days 00:00:00.106147010 +481 91 0 days 00:00:00.089559050 +481 92 0 days 00:00:00.095638060 +481 93 0 days 00:00:00.072108755 +481 94 0 days 00:00:00.069839172 +481 95 0 days 00:00:00.102632470 +481 96 0 days 00:00:00.068338864 +481 97 0 days 00:00:00.063888420 +481 98 0 days 00:00:00.099686564 +481 100 0 days 00:00:00.077480796 +482 1 0 days 00:00:00.057449290 +482 2 0 days 00:00:00.102272780 +482 3 0 days 00:00:00.097952005 +482 4 0 days 00:00:00.108779945 +482 5 0 days 00:00:00.070891760 +482 6 0 days 00:00:00.112926070 +482 7 0 days 00:00:00.079551842 +482 8 0 days 00:00:00.064177176 +482 9 0 days 00:00:00.097451660 +482 10 0 days 00:00:00.065535828 +482 11 0 days 00:00:00.065923623 +482 12 0 days 00:00:00.085133260 +482 13 0 days 00:00:00.061776075 +482 14 0 days 00:00:00.068277082 +482 15 0 days 00:00:00.061041225 +482 16 0 days 00:00:00.111330295 +482 17 0 days 00:00:00.076874235 +482 18 0 days 00:00:00.113982106 +482 20 0 days 00:00:00.076318542 +482 21 0 days 00:00:00.116589008 +482 22 0 days 00:00:00.064682308 +482 23 0 days 00:00:00.064341276 +482 24 0 days 00:00:00.089992515 +482 25 0 days 00:00:00.069966818 +482 26 0 days 00:00:00.126117250 +482 27 0 days 00:00:00.064272992 +482 28 0 days 00:00:00.079668000 +482 29 0 days 00:00:00.073463670 +482 30 0 days 00:00:00.096137366 +482 31 0 days 00:00:00.063600645 +482 32 0 days 00:00:00.066916370 +482 33 0 days 00:00:00.099977428 +482 34 0 days 00:00:00.067830240 +482 35 0 days 00:00:00.080064872 +482 36 0 days 00:00:00.088227960 +482 37 0 days 00:00:00.112159890 +482 38 0 days 00:00:00.072026811 +482 39 0 days 00:00:00.114117198 +482 40 0 days 00:00:00.069658676 +482 41 0 days 00:00:00.099639225 +482 42 0 days 00:00:00.123542050 +482 43 0 days 00:00:00.115122113 +482 44 0 days 00:00:00.065467864 +482 45 0 days 00:00:00.102270468 +482 46 0 days 00:00:00.094995065 +482 47 0 days 00:00:00.066744944 +482 48 0 days 00:00:00.101409105 +482 49 0 days 00:00:00.072340875 +482 50 0 days 00:00:00.061784780 +482 51 0 days 00:00:00.069483315 +482 52 0 days 00:00:00.061518910 +482 53 0 days 00:00:00.074336155 +482 54 0 days 00:00:00.116546255 +482 55 0 days 00:00:00.070206244 +482 56 0 days 00:00:00.074467000 +482 57 0 days 00:00:00.102826780 +482 58 0 days 00:00:00.102799424 +482 59 0 days 00:00:00.074356195 +482 60 0 days 00:00:00.067674588 +482 61 0 days 00:00:00.072482075 +482 62 0 days 00:00:00.064746716 +482 63 0 days 00:00:00.076860565 +482 64 0 days 00:00:00.064401316 +482 65 0 days 00:00:00.121057206 +482 66 0 days 00:00:00.101410672 +482 67 0 days 00:00:00.071303976 +482 68 0 days 00:00:00.111189471 +482 69 0 days 00:00:00.065361532 +482 70 0 days 00:00:00.065970512 +482 71 0 days 00:00:00.108693168 +482 72 0 days 00:00:00.093257013 +482 73 0 days 00:00:00.062264740 +482 74 0 days 00:00:00.063585255 +482 75 0 days 00:00:00.075997037 +482 76 0 days 00:00:00.080834814 +482 77 0 days 00:00:00.074249390 +482 78 0 days 00:00:00.066876813 +482 79 0 days 00:00:00.078646364 +482 80 0 days 00:00:00.097237090 +482 81 0 days 00:00:00.096635288 +482 82 0 days 00:00:00.088229590 +482 83 0 days 00:00:00.072981190 +482 84 0 days 00:00:00.066530525 +482 85 0 days 00:00:00.082131264 +482 86 0 days 00:00:00.067077600 +482 87 0 days 00:00:00.098080050 +482 88 0 days 00:00:00.071784402 +482 89 0 days 00:00:00.081479716 +482 90 0 days 00:00:00.112559547 +482 91 0 days 00:00:00.066205656 +482 92 0 days 00:00:00.067878153 +482 93 0 days 00:00:00.068859455 +482 94 0 days 00:00:00.103904680 +482 95 0 days 00:00:00.109142550 +482 96 0 days 00:00:00.094708135 +482 97 0 days 00:00:00.065155404 +482 98 0 days 00:00:00.083952185 +482 99 0 days 00:00:00.070481805 +482 100 0 days 00:00:00.067141532 +483 1 0 days 00:00:00.129133640 +483 2 0 days 00:00:00.144859574 +483 3 0 days 00:00:00.204095224 +483 4 0 days 00:00:00.120039157 +483 5 0 days 00:00:00.150065975 +483 6 0 days 00:00:00.188536188 +483 7 0 days 00:00:00.107566525 +483 8 0 days 00:00:00.145903472 +483 9 0 days 00:00:00.198810938 +483 10 0 days 00:00:00.127256323 +483 11 0 days 00:00:00.150636533 +483 12 0 days 00:00:00.152602593 +483 13 0 days 00:00:00.105785465 +483 14 0 days 00:00:00.149521470 +483 15 0 days 00:00:00.198824441 +483 16 0 days 00:00:00.184368450 +483 17 0 days 00:00:00.177841164 +483 18 0 days 00:00:00.200270835 +483 19 0 days 00:00:00.150147688 +483 20 0 days 00:00:00.203332054 +483 21 0 days 00:00:00.142720168 +483 23 0 days 00:00:00.190266820 +483 24 0 days 00:00:00.189853482 +483 25 0 days 00:00:00.148373058 +483 26 0 days 00:00:00.197267205 +483 27 0 days 00:00:00.112196096 +483 28 0 days 00:00:00.124141500 +483 29 0 days 00:00:00.151434728 +483 30 0 days 00:00:00.127814995 +483 31 0 days 00:00:00.173314512 +483 32 0 days 00:00:00.173690328 +483 33 0 days 00:00:00.122390153 +483 34 0 days 00:00:00.121026358 +483 35 0 days 00:00:00.139552953 +483 36 0 days 00:00:00.184796590 +483 37 0 days 00:00:00.121569264 +483 38 0 days 00:00:00.178434800 +483 39 0 days 00:00:00.188196048 +483 40 0 days 00:00:00.188698822 +483 41 0 days 00:00:00.146530370 +483 42 0 days 00:00:00.194238151 +483 43 0 days 00:00:00.140133785 +483 44 0 days 00:00:00.154563622 +483 45 0 days 00:00:00.150846796 +483 46 0 days 00:00:00.203901169 +483 47 0 days 00:00:00.145381920 +483 48 0 days 00:00:00.189882514 +483 49 0 days 00:00:00.120162850 +483 50 0 days 00:00:00.145584493 +483 51 0 days 00:00:00.120802660 +483 52 0 days 00:00:00.151393326 +483 53 0 days 00:00:00.118578637 +483 54 0 days 00:00:00.152398718 +483 55 0 days 00:00:00.118108782 +483 56 0 days 00:00:00.128529765 +483 58 0 days 00:00:00.184340414 +483 59 0 days 00:00:00.140212386 +483 60 0 days 00:00:00.188339048 +483 61 0 days 00:00:00.201845641 +483 62 0 days 00:00:00.120411864 +483 63 0 days 00:00:00.125320712 +483 64 0 days 00:00:00.144010080 +483 65 0 days 00:00:00.151350606 +483 66 0 days 00:00:00.203819768 +483 68 0 days 00:00:00.190866653 +483 69 0 days 00:00:00.120405207 +483 70 0 days 00:00:00.125074461 +483 71 0 days 00:00:00.190054368 +483 72 0 days 00:00:00.153783605 +483 73 0 days 00:00:00.122502745 +483 74 0 days 00:00:00.122275086 +483 75 0 days 00:00:00.128375730 +483 76 0 days 00:00:00.118472308 +483 77 0 days 00:00:00.124026546 +483 78 0 days 00:00:00.190132537 +483 79 0 days 00:00:00.155297493 +483 80 0 days 00:00:00.127146537 +483 81 0 days 00:00:00.119220867 +483 82 0 days 00:00:00.112865045 +483 83 0 days 00:00:00.146409826 +483 84 0 days 00:00:00.196988111 +483 85 0 days 00:00:00.195534634 +483 86 0 days 00:00:00.154076468 +483 87 0 days 00:00:00.128248055 +483 88 0 days 00:00:00.189184677 +483 89 0 days 00:00:00.182225843 +483 90 0 days 00:00:00.120313675 +483 91 0 days 00:00:00.197146614 +483 93 0 days 00:00:00.147751846 +483 94 0 days 00:00:00.200340532 +483 95 0 days 00:00:00.150547706 +483 96 0 days 00:00:00.202003930 +483 97 0 days 00:00:00.154460897 +483 98 0 days 00:00:00.140480555 +483 99 0 days 00:00:00.145666792 +483 100 0 days 00:00:00.196222120 +484 1 0 days 00:00:00.095279310 +484 2 0 days 00:00:00.104423000 +484 3 0 days 00:00:00.111228335 +484 4 0 days 00:00:00.066710303 +484 5 0 days 00:00:00.112502716 +484 6 0 days 00:00:00.061782955 +484 7 0 days 00:00:00.106603397 +484 8 0 days 00:00:00.057277360 +484 9 0 days 00:00:00.070135292 +484 10 0 days 00:00:00.070967656 +484 11 0 days 00:00:00.064133940 +484 12 0 days 00:00:00.099517540 +484 13 0 days 00:00:00.111847206 +484 14 0 days 00:00:00.108537918 +484 15 0 days 00:00:00.071710355 +484 16 0 days 00:00:00.079602457 +484 18 0 days 00:00:00.110103832 +484 19 0 days 00:00:00.104741636 +484 20 0 days 00:00:00.107584442 +484 21 0 days 00:00:00.070356438 +484 22 0 days 00:00:00.110167176 +484 23 0 days 00:00:00.083714936 +484 24 0 days 00:00:00.064140688 +484 25 0 days 00:00:00.066548006 +484 26 0 days 00:00:00.075617292 +484 27 0 days 00:00:00.068537060 +484 28 0 days 00:00:00.108253562 +484 29 0 days 00:00:00.101358516 +484 30 0 days 00:00:00.066792553 +484 31 0 days 00:00:00.070849285 +484 32 0 days 00:00:00.103949850 +484 33 0 days 00:00:00.061021795 +484 34 0 days 00:00:00.084899452 +484 35 0 days 00:00:00.061411005 +484 36 0 days 00:00:00.053111473 +484 37 0 days 00:00:00.078442368 +484 38 0 days 00:00:00.068922900 +484 39 0 days 00:00:00.106600568 +484 40 0 days 00:00:00.079454231 +484 42 0 days 00:00:00.075396980 +484 43 0 days 00:00:00.106336357 +484 44 0 days 00:00:00.112597700 +484 45 0 days 00:00:00.078863756 +484 46 0 days 00:00:00.071468497 +484 47 0 days 00:00:00.114427904 +484 48 0 days 00:00:00.066481686 +484 49 0 days 00:00:00.115399126 +484 50 0 days 00:00:00.075628636 +484 51 0 days 00:00:00.061325685 +484 52 0 days 00:00:00.088365333 +484 53 0 days 00:00:00.092039255 +484 54 0 days 00:00:00.097061515 +484 55 0 days 00:00:00.061861470 +484 56 0 days 00:00:00.101247533 +484 57 0 days 00:00:00.087611486 +484 58 0 days 00:00:00.067571080 +484 59 0 days 00:00:00.070384636 +484 60 0 days 00:00:00.062305910 +484 61 0 days 00:00:00.057334606 +484 62 0 days 00:00:00.063683292 +484 63 0 days 00:00:00.066702436 +484 64 0 days 00:00:00.055915353 +484 65 0 days 00:00:00.056908340 +484 66 0 days 00:00:00.082495244 +484 67 0 days 00:00:00.067834754 +484 68 0 days 00:00:00.069299506 +484 69 0 days 00:00:00.109627126 +484 71 0 days 00:00:00.104650540 +484 72 0 days 00:00:00.075926924 +484 73 0 days 00:00:00.104150863 +484 74 0 days 00:00:00.096948345 +484 75 0 days 00:00:00.067560673 +484 76 0 days 00:00:00.066746570 +484 77 0 days 00:00:00.057809006 +484 78 0 days 00:00:00.105164306 +484 79 0 days 00:00:00.079306396 +484 80 0 days 00:00:00.103456622 +484 81 0 days 00:00:00.064800148 +484 82 0 days 00:00:00.089032960 +484 83 0 days 00:00:00.059135446 +484 84 0 days 00:00:00.068609857 +484 85 0 days 00:00:00.068353435 +484 86 0 days 00:00:00.111289020 +484 87 0 days 00:00:00.097299800 +484 88 0 days 00:00:00.081580640 +484 89 0 days 00:00:00.080488495 +484 90 0 days 00:00:00.083225634 +484 91 0 days 00:00:00.085379296 +484 92 0 days 00:00:00.076377376 +484 93 0 days 00:00:00.110061571 +484 94 0 days 00:00:00.061754160 +484 95 0 days 00:00:00.066530633 +484 96 0 days 00:00:00.058593440 +484 97 0 days 00:00:00.061223110 +484 98 0 days 00:00:00.112037535 +484 99 0 days 00:00:00.108308573 +484 100 0 days 00:00:00.064356944 +485 1 0 days 00:00:00.575731841 +485 2 0 days 00:00:00.564411918 +485 3 0 days 00:00:00.314812930 +485 4 0 days 00:00:00.514644400 +485 5 0 days 00:00:00.403076430 +485 6 0 days 00:00:00.311904726 +485 7 0 days 00:00:00.870582955 +485 8 0 days 00:00:00.374611522 +485 9 0 days 00:00:00.787051025 +485 10 0 days 00:00:00.355420852 +485 11 0 days 00:00:00.571690951 +485 12 0 days 00:00:00.990560362 +485 13 0 days 00:00:00.470879364 +485 14 0 days 00:00:00.938834368 +485 15 0 days 00:00:00.409421816 +485 16 0 days 00:00:00.984599954 +485 17 0 days 00:00:00.425381388 +485 18 0 days 00:00:01.038103236 +485 19 0 days 00:00:00.299897623 +485 20 0 days 00:00:00.990832620 +485 21 0 days 00:00:00.322483897 +485 22 0 days 00:00:00.549979666 +485 23 0 days 00:00:00.528459092 +485 24 0 days 00:00:00.568015963 +485 25 0 days 00:00:00.970224028 +485 26 0 days 00:00:00.627815492 +485 27 0 days 00:00:00.581252563 +485 28 0 days 00:00:00.938796976 +485 29 0 days 00:00:00.320157250 +485 30 0 days 00:00:00.558423740 +485 31 0 days 00:00:00.365184354 +485 32 0 days 00:00:00.303998805 +485 33 0 days 00:00:00.628864532 +485 34 0 days 00:00:00.574317547 +485 35 0 days 00:00:00.539936753 +485 36 0 days 00:00:00.418225220 +485 37 0 days 00:00:00.534713052 +485 38 0 days 00:00:00.562544233 +485 39 0 days 00:00:00.580352649 +485 40 0 days 00:00:01.038645337 +485 41 0 days 00:00:00.313516814 +485 42 0 days 00:00:00.339471268 +485 43 0 days 00:00:00.244765866 +485 44 0 days 00:00:01.005154984 +485 45 0 days 00:00:00.533471485 +485 47 0 days 00:00:00.316775451 +485 48 0 days 00:00:00.942393271 +485 49 0 days 00:00:00.341231960 +485 50 0 days 00:00:00.552843036 +485 51 0 days 00:00:00.548007420 +485 52 0 days 00:00:00.595655252 +485 53 0 days 00:00:00.320950860 +485 54 0 days 00:00:00.934533613 +485 56 0 days 00:00:01.052048985 +485 57 0 days 00:00:01.019383360 +485 58 0 days 00:00:00.317852220 +485 59 0 days 00:00:00.511152793 +485 60 0 days 00:00:00.976818042 +485 61 0 days 00:00:00.905223012 +485 62 0 days 00:00:00.967471123 +485 63 0 days 00:00:00.951210896 +485 64 0 days 00:00:00.675724370 +485 65 0 days 00:00:00.307701070 +485 66 0 days 00:00:00.342582042 +485 67 0 days 00:00:00.890818708 +485 68 0 days 00:00:00.732914610 +485 69 0 days 00:00:00.969556136 +485 71 0 days 00:00:00.938555840 +485 72 0 days 00:00:00.332030868 +485 73 0 days 00:00:00.572957670 +485 74 0 days 00:00:00.889137736 +485 75 0 days 00:00:00.561136271 +485 76 0 days 00:00:00.795887950 +485 77 0 days 00:00:00.954912223 +485 78 0 days 00:00:00.328326617 +485 79 0 days 00:00:00.579768087 +485 80 0 days 00:00:00.334603987 +485 81 0 days 00:00:01.001071723 +485 82 0 days 00:00:00.452321871 +485 83 0 days 00:00:00.309364660 +485 84 0 days 00:00:00.352418800 +485 85 0 days 00:00:00.592366095 +485 86 0 days 00:00:00.374902720 +485 87 0 days 00:00:00.351683464 +485 88 0 days 00:00:00.535552863 +485 89 0 days 00:00:00.930310576 +485 90 0 days 00:00:00.370492740 +485 91 0 days 00:00:01.086316077 +485 92 0 days 00:00:00.558600568 +485 93 0 days 00:00:00.611195823 +485 94 0 days 00:00:00.319531628 +485 95 0 days 00:00:00.416058811 +485 96 0 days 00:00:00.611052777 +485 97 0 days 00:00:00.519925613 +485 98 0 days 00:00:00.332728156 +485 99 0 days 00:00:00.967005400 +485 100 0 days 00:00:00.937968512 +486 1 0 days 00:00:00.329142165 +486 2 0 days 00:00:01.105775737 +486 3 0 days 00:00:00.447124395 +486 4 0 days 00:00:00.349234412 +486 5 0 days 00:00:00.565040402 +486 6 0 days 00:00:00.639727508 +486 7 0 days 00:00:00.450288150 +486 8 0 days 00:00:00.583534536 +486 9 0 days 00:00:00.581598745 +486 10 0 days 00:00:00.304711070 +486 11 0 days 00:00:00.562844000 +486 12 0 days 00:00:00.637250026 +486 13 0 days 00:00:00.565210170 +486 14 0 days 00:00:00.950717652 +486 15 0 days 00:00:00.321842011 +486 16 0 days 00:00:00.998358574 +486 17 0 days 00:00:00.549530411 +486 18 0 days 00:00:00.344836200 +486 19 0 days 00:00:00.348466397 +486 20 0 days 00:00:00.471843401 +486 21 0 days 00:00:00.561904807 +486 22 0 days 00:00:00.339659542 +486 23 0 days 00:00:01.058414475 +486 24 0 days 00:00:00.548740736 +486 25 0 days 00:00:00.355822306 +486 26 0 days 00:00:00.370573504 +486 27 0 days 00:00:00.968818633 +486 28 0 days 00:00:00.988086083 +486 29 0 days 00:00:00.530531780 +486 30 0 days 00:00:00.309957410 +486 31 0 days 00:00:00.329984762 +486 32 0 days 00:00:00.342836204 +486 33 0 days 00:00:01.069141914 +486 34 0 days 00:00:00.545727173 +486 35 0 days 00:00:00.563254720 +486 36 0 days 00:00:01.058836110 +486 37 0 days 00:00:00.410876342 +486 38 0 days 00:00:00.351194138 +486 39 0 days 00:00:00.337677262 +486 40 0 days 00:00:01.062385988 +486 41 0 days 00:00:00.958542568 +486 42 0 days 00:00:00.851407217 +486 43 0 days 00:00:00.278299575 +486 44 0 days 00:00:00.260430836 +486 45 0 days 00:00:00.812925276 +486 46 0 days 00:00:00.276395286 +486 47 0 days 00:00:00.457621691 +486 48 0 days 00:00:00.890377680 +486 49 0 days 00:00:00.418883300 +486 50 0 days 00:00:00.819000302 +486 51 0 days 00:00:00.348376676 +486 52 0 days 00:00:00.885794903 +486 53 0 days 00:00:00.290875693 +486 54 0 days 00:00:00.276924072 +486 55 0 days 00:00:00.833256807 +486 56 0 days 00:00:00.463417500 +486 57 0 days 00:00:00.463780331 +486 58 0 days 00:00:00.790458093 +486 59 0 days 00:00:00.297958728 +486 60 0 days 00:00:00.811830833 +486 61 0 days 00:00:00.286748731 +486 62 0 days 00:00:00.282896235 +486 63 0 days 00:00:00.821572496 +486 64 0 days 00:00:00.378117764 +486 65 0 days 00:00:00.463917912 +486 66 0 days 00:00:00.507468085 +486 67 0 days 00:00:00.295583826 +486 68 0 days 00:00:00.470004727 +486 69 0 days 00:00:00.435266012 +486 70 0 days 00:00:00.457952782 +486 71 0 days 00:00:00.437968520 +486 72 0 days 00:00:00.792027036 +486 73 0 days 00:00:00.841015868 +486 74 0 days 00:00:00.866726693 +486 75 0 days 00:00:00.282849226 +486 76 0 days 00:00:00.310352186 +486 77 0 days 00:00:00.812099022 +486 78 0 days 00:00:00.350230633 +486 79 0 days 00:00:00.470828080 +486 80 0 days 00:00:00.820835923 +486 81 0 days 00:00:00.862659186 +486 82 0 days 00:00:00.474925015 +486 83 0 days 00:00:00.840899535 +486 84 0 days 00:00:00.465996948 +486 85 0 days 00:00:00.904770043 +486 86 0 days 00:00:00.492560588 +486 87 0 days 00:00:00.455544720 +486 88 0 days 00:00:00.299722297 +486 89 0 days 00:00:00.365663750 +486 90 0 days 00:00:00.854344391 +486 91 0 days 00:00:00.470854880 +486 92 0 days 00:00:00.453239563 +486 93 0 days 00:00:00.496515895 +486 94 0 days 00:00:00.271589256 +486 95 0 days 00:00:00.355035734 +486 96 0 days 00:00:00.483693478 +486 97 0 days 00:00:00.315770240 +486 98 0 days 00:00:00.293600847 +486 99 0 days 00:00:00.244131040 +486 100 0 days 00:00:00.450148200 +487 1 0 days 00:00:00.245603522 +487 2 0 days 00:00:00.235288325 +487 3 0 days 00:00:00.129467928 +487 4 0 days 00:00:00.399181563 +487 5 0 days 00:00:00.236365020 +487 6 0 days 00:00:00.359371486 +487 7 0 days 00:00:00.394152006 +487 8 0 days 00:00:00.398106114 +487 9 0 days 00:00:00.155103156 +487 10 0 days 00:00:00.236220515 +487 11 0 days 00:00:00.241293718 +487 12 0 days 00:00:00.152265335 +487 13 0 days 00:00:00.213495660 +487 14 0 days 00:00:00.429992552 +487 15 0 days 00:00:00.140908626 +487 16 0 days 00:00:00.235843322 +487 17 0 days 00:00:00.161249772 +487 18 0 days 00:00:00.239791435 +487 19 0 days 00:00:00.239581555 +487 20 0 days 00:00:00.137581637 +487 21 0 days 00:00:00.245894472 +487 22 0 days 00:00:00.129682580 +487 23 0 days 00:00:00.153025100 +487 24 0 days 00:00:00.108755266 +487 25 0 days 00:00:00.425351515 +487 26 0 days 00:00:00.404579668 +487 27 0 days 00:00:00.247472111 +487 28 0 days 00:00:00.115917240 +487 29 0 days 00:00:00.130721694 +487 30 0 days 00:00:00.422852557 +487 31 0 days 00:00:00.217038040 +487 32 0 days 00:00:00.421958850 +487 33 0 days 00:00:00.173111655 +487 34 0 days 00:00:00.238489716 +487 35 0 days 00:00:00.314365673 +487 36 0 days 00:00:00.136896690 +487 37 0 days 00:00:00.120986392 +487 38 0 days 00:00:00.222973244 +487 39 0 days 00:00:00.235446946 +487 40 0 days 00:00:00.417309564 +487 41 0 days 00:00:00.401447062 +487 42 0 days 00:00:00.145614814 +487 43 0 days 00:00:00.257037910 +487 44 0 days 00:00:00.246434778 +487 45 0 days 00:00:00.136034425 +487 46 0 days 00:00:00.214749472 +487 47 0 days 00:00:00.145640396 +487 48 0 days 00:00:00.213474536 +487 49 0 days 00:00:00.133734255 +487 50 0 days 00:00:00.233677463 +487 51 0 days 00:00:00.190833433 +487 52 0 days 00:00:00.379315600 +487 53 0 days 00:00:00.216539230 +487 54 0 days 00:00:00.413554008 +487 55 0 days 00:00:00.131158990 +487 56 0 days 00:00:00.375405004 +487 57 0 days 00:00:00.147947606 +487 58 0 days 00:00:00.241980308 +487 59 0 days 00:00:00.145598960 +487 60 0 days 00:00:00.132789406 +487 61 0 days 00:00:00.407332846 +487 62 0 days 00:00:00.355893220 +487 63 0 days 00:00:00.411156945 +487 64 0 days 00:00:00.392902863 +487 65 0 days 00:00:00.383255536 +487 66 0 days 00:00:00.384362292 +487 67 0 days 00:00:00.154409536 +487 68 0 days 00:00:00.230848652 +487 69 0 days 00:00:00.409920760 +487 70 0 days 00:00:00.277563546 +487 71 0 days 00:00:00.406490848 +487 72 0 days 00:00:00.227548372 +487 73 0 days 00:00:00.389602186 +487 74 0 days 00:00:00.233583017 +487 75 0 days 00:00:00.123509465 +487 76 0 days 00:00:00.381929376 +487 77 0 days 00:00:00.369365272 +487 78 0 days 00:00:00.219458488 +487 79 0 days 00:00:00.381309388 +487 80 0 days 00:00:00.213574084 +487 81 0 days 00:00:00.384424724 +487 82 0 days 00:00:00.131680511 +487 83 0 days 00:00:00.387710583 +487 84 0 days 00:00:00.142073026 +487 85 0 days 00:00:00.415657482 +487 86 0 days 00:00:00.147300826 +487 87 0 days 00:00:00.396875937 +487 88 0 days 00:00:00.210913568 +487 89 0 days 00:00:00.107552300 +487 90 0 days 00:00:00.238901666 +487 91 0 days 00:00:00.126859356 +487 92 0 days 00:00:00.165940380 +487 93 0 days 00:00:00.121978080 +487 94 0 days 00:00:00.257695344 +487 95 0 days 00:00:00.118987024 +487 96 0 days 00:00:00.224699686 +487 97 0 days 00:00:00.144558035 +487 98 0 days 00:00:00.119034305 +487 99 0 days 00:00:00.398402483 +487 100 0 days 00:00:00.141068655 +488 1 0 days 00:00:00.206956973 +488 2 0 days 00:00:00.239218805 +488 3 0 days 00:00:00.416280902 +488 4 0 days 00:00:00.149759996 +488 5 0 days 00:00:00.155510595 +488 6 0 days 00:00:00.443659786 +488 7 0 days 00:00:00.190751246 +488 8 0 days 00:00:00.434342007 +488 9 0 days 00:00:00.125495560 +488 10 0 days 00:00:00.434719451 +488 11 0 days 00:00:00.247955270 +488 12 0 days 00:00:00.423782980 +488 13 0 days 00:00:00.392815460 +488 14 0 days 00:00:00.146633082 +488 15 0 days 00:00:00.141866273 +488 16 0 days 00:00:00.419606292 +488 17 0 days 00:00:00.228423448 +488 18 0 days 00:00:00.257786338 +488 19 0 days 00:00:00.132429116 +488 20 0 days 00:00:00.176231900 +488 21 0 days 00:00:00.225299576 +488 22 0 days 00:00:00.208651660 +488 23 0 days 00:00:00.419241073 +488 24 0 days 00:00:00.260914500 +488 25 0 days 00:00:00.135581740 +488 26 0 days 00:00:00.239243134 +488 27 0 days 00:00:00.257440722 +488 28 0 days 00:00:00.228056472 +488 29 0 days 00:00:00.417246783 +488 30 0 days 00:00:00.235907597 +488 31 0 days 00:00:00.214434606 +488 32 0 days 00:00:00.129405932 +488 33 0 days 00:00:00.207728865 +488 34 0 days 00:00:00.253500188 +488 35 0 days 00:00:00.407631760 +488 36 0 days 00:00:00.226907805 +488 37 0 days 00:00:00.242558045 +488 38 0 days 00:00:00.411943043 +488 39 0 days 00:00:00.147794391 +488 40 0 days 00:00:00.138082045 +488 41 0 days 00:00:00.159068985 +488 42 0 days 00:00:00.222476716 +488 43 0 days 00:00:00.411470220 +488 44 0 days 00:00:00.423055817 +488 45 0 days 00:00:00.445777946 +488 46 0 days 00:00:00.406248471 +488 47 0 days 00:00:00.358348082 +488 48 0 days 00:00:00.149266945 +488 49 0 days 00:00:00.234520417 +488 50 0 days 00:00:00.245944242 +488 51 0 days 00:00:00.226633893 +488 52 0 days 00:00:00.266222422 +488 53 0 days 00:00:00.151542464 +488 54 0 days 00:00:00.143451255 +488 55 0 days 00:00:00.147555336 +488 56 0 days 00:00:00.247007160 +488 57 0 days 00:00:00.237171231 +488 58 0 days 00:00:00.428829542 +488 59 0 days 00:00:00.375388376 +488 60 0 days 00:00:00.141165056 +488 61 0 days 00:00:00.169804164 +488 62 0 days 00:00:00.253022596 +488 63 0 days 00:00:00.144167768 +488 64 0 days 00:00:00.144169670 +488 65 0 days 00:00:00.254361526 +488 66 0 days 00:00:00.415171226 +488 67 0 days 00:00:00.401365552 +488 68 0 days 00:00:00.265899186 +488 69 0 days 00:00:00.234245780 +488 70 0 days 00:00:00.150832401 +488 71 0 days 00:00:00.135572660 +488 72 0 days 00:00:00.131173232 +488 73 0 days 00:00:00.413332703 +488 74 0 days 00:00:00.368473456 +488 75 0 days 00:00:00.433928777 +488 76 0 days 00:00:00.145435685 +488 77 0 days 00:00:00.232061696 +488 78 0 days 00:00:00.434314270 +488 79 0 days 00:00:00.232021312 +488 80 0 days 00:00:00.174815143 +488 81 0 days 00:00:00.145422120 +488 82 0 days 00:00:00.139407622 +488 83 0 days 00:00:00.118131640 +488 84 0 days 00:00:00.459174324 +488 85 0 days 00:00:00.234786066 +488 86 0 days 00:00:00.157978236 +488 87 0 days 00:00:00.254041008 +488 88 0 days 00:00:00.168189574 +488 89 0 days 00:00:00.155283737 +488 90 0 days 00:00:00.244014705 +488 91 0 days 00:00:00.395500168 +488 92 0 days 00:00:00.113670286 +488 93 0 days 00:00:00.160876190 +488 94 0 days 00:00:00.132824443 +488 95 0 days 00:00:00.144257824 +488 96 0 days 00:00:00.438074406 +488 97 0 days 00:00:00.422402363 +488 98 0 days 00:00:00.264408235 +488 99 0 days 00:00:00.431510007 +488 100 0 days 00:00:00.411316370 +489 1 0 days 00:00:00.355308242 +489 2 0 days 00:00:00.971353352 +489 3 0 days 00:00:00.488040755 +489 4 0 days 00:00:00.415751505 +489 5 0 days 00:00:00.483982163 +489 6 0 days 00:00:00.535502923 +489 7 0 days 00:00:01.127632634 +489 8 0 days 00:00:00.611306885 +489 9 0 days 00:00:00.416729120 +489 10 0 days 00:00:00.993532292 +489 11 0 days 00:00:01.067840864 +489 12 0 days 00:00:00.457961495 +489 13 0 days 00:00:00.383030588 +489 14 0 days 00:00:00.519043992 +489 15 0 days 00:00:00.944220125 +489 16 0 days 00:00:01.107470543 +489 17 0 days 00:00:00.304516765 +489 18 0 days 00:00:00.544877020 +489 19 0 days 00:00:00.963649960 +489 20 0 days 00:00:00.534187555 +489 21 0 days 00:00:00.897353790 +489 22 0 days 00:00:00.395609735 +489 23 0 days 00:00:00.943432460 +489 24 0 days 00:00:00.406988636 +489 25 0 days 00:00:00.592046692 +489 26 0 days 00:00:00.523905570 +489 27 0 days 00:00:00.348651648 +489 28 0 days 00:00:00.538977612 +489 29 0 days 00:00:01.096798933 +489 30 0 days 00:00:00.550925360 +489 31 0 days 00:00:00.620858250 +489 32 0 days 00:00:00.550860450 +489 33 0 days 00:00:00.499362408 +489 34 0 days 00:00:00.907965515 +489 35 0 days 00:00:00.389664460 +489 36 0 days 00:00:00.574498913 +489 37 0 days 00:00:00.529098765 +489 38 0 days 00:00:01.007762330 +489 39 0 days 00:00:00.967568216 +489 40 0 days 00:00:00.303672800 +489 41 0 days 00:00:00.927360404 +489 42 0 days 00:00:00.358124411 +489 43 0 days 00:00:00.940076584 +489 44 0 days 00:00:00.323129180 +489 45 0 days 00:00:01.060452683 +489 46 0 days 00:00:00.629059477 +489 47 0 days 00:00:00.333176875 +489 48 0 days 00:00:00.951273892 +489 49 0 days 00:00:01.132855026 +489 50 0 days 00:00:00.289212080 +489 51 0 days 00:00:00.845871470 +489 52 0 days 00:00:00.493890405 +489 53 0 days 00:00:01.074206231 +489 54 0 days 00:00:00.317355816 +489 55 0 days 00:00:00.316591076 +489 56 0 days 00:00:00.686597917 +489 57 0 days 00:00:00.935457145 +489 58 0 days 00:00:01.034618208 +489 59 0 days 00:00:00.842647765 +489 60 0 days 00:00:01.079097603 +489 61 0 days 00:00:01.091136240 +489 62 0 days 00:00:00.563752788 +489 63 0 days 00:00:01.028885210 +489 64 0 days 00:00:00.497452708 +489 65 0 days 00:00:01.006502370 +489 66 0 days 00:00:00.545951467 +489 67 0 days 00:00:00.490182056 +489 68 0 days 00:00:01.005339884 +489 69 0 days 00:00:00.480192025 +489 70 0 days 00:00:00.351985920 +489 71 0 days 00:00:00.890641465 +489 72 0 days 00:00:01.039833450 +489 73 0 days 00:00:00.501015852 +489 74 0 days 00:00:00.350475058 +489 75 0 days 00:00:00.561950772 +489 76 0 days 00:00:00.304671648 +489 77 0 days 00:00:00.361939780 +489 78 0 days 00:00:01.083380936 +489 79 0 days 00:00:00.269306692 +489 80 0 days 00:00:00.495107915 +489 81 0 days 00:00:00.528486046 +489 82 0 days 00:00:00.543135420 +489 83 0 days 00:00:00.496105760 +489 84 0 days 00:00:00.379673755 +489 85 0 days 00:00:00.480674388 +489 86 0 days 00:00:00.548291955 +489 87 0 days 00:00:00.312182160 +489 88 0 days 00:00:00.500200508 +489 89 0 days 00:00:00.322920353 +489 90 0 days 00:00:00.556421224 +489 91 0 days 00:00:00.392124717 +489 92 0 days 00:00:00.300632445 +489 93 0 days 00:00:00.521733015 +489 94 0 days 00:00:00.363091135 +489 95 0 days 00:00:00.503266646 +489 96 0 days 00:00:00.374388320 +489 97 0 days 00:00:00.952260906 +489 98 0 days 00:00:00.936702703 +489 99 0 days 00:00:00.293250774 +489 100 0 days 00:00:00.322914075 +490 1 0 days 00:00:00.288132670 +490 2 0 days 00:00:00.476065060 +490 3 0 days 00:00:00.329408460 +490 4 0 days 00:00:00.152119220 +490 5 0 days 00:00:00.147257485 +490 6 0 days 00:00:00.281612880 +490 7 0 days 00:00:00.158620308 +490 8 0 days 00:00:00.156958388 +490 9 0 days 00:00:00.153093250 +490 10 0 days 00:00:00.274332674 +490 11 0 days 00:00:00.171608036 +490 12 0 days 00:00:00.274794773 +490 13 0 days 00:00:00.291653016 +490 14 0 days 00:00:00.463462490 +490 15 0 days 00:00:00.510610990 +490 16 0 days 00:00:00.155505900 +490 17 0 days 00:00:00.178272194 +490 18 0 days 00:00:00.276500610 +490 19 0 days 00:00:00.174114825 +490 20 0 days 00:00:00.145253910 +490 21 0 days 00:00:00.571405667 +490 22 0 days 00:00:00.158168365 +490 23 0 days 00:00:00.298894480 +490 24 0 days 00:00:00.167193353 +490 25 0 days 00:00:00.417555990 +490 26 0 days 00:00:00.247675485 +490 27 0 days 00:00:00.153299970 +490 28 0 days 00:00:00.489859735 +490 29 0 days 00:00:00.461389775 +490 30 0 days 00:00:00.168334318 +490 31 0 days 00:00:00.257042448 +490 32 0 days 00:00:00.518116968 +490 33 0 days 00:00:00.161395945 +490 34 0 days 00:00:00.169481790 +490 35 0 days 00:00:00.131164940 +490 36 0 days 00:00:00.149369526 +490 37 0 days 00:00:00.147530670 +490 38 0 days 00:00:00.126048703 +490 39 0 days 00:00:00.456567820 +490 40 0 days 00:00:00.280379435 +490 41 0 days 00:00:00.326772365 +490 42 0 days 00:00:00.470991056 +490 43 0 days 00:00:00.503329632 +490 44 0 days 00:00:00.459512595 +490 45 0 days 00:00:00.179221598 +490 46 0 days 00:00:00.172747745 +490 47 0 days 00:00:00.498419276 +490 48 0 days 00:00:00.597835432 +490 49 0 days 00:00:00.278087583 +490 50 0 days 00:00:00.233793710 +490 51 0 days 00:00:00.310555732 +490 52 0 days 00:00:00.168289156 +490 53 0 days 00:00:00.260638417 +490 54 0 days 00:00:00.147329217 +490 55 0 days 00:00:00.143525520 +490 56 0 days 00:00:00.375122344 +490 57 0 days 00:00:00.233086485 +490 58 0 days 00:00:00.252016200 +490 59 0 days 00:00:00.161609490 +490 60 0 days 00:00:00.159395472 +490 61 0 days 00:00:00.450917210 +490 62 0 days 00:00:00.358653825 +490 63 0 days 00:00:00.135617590 +490 64 0 days 00:00:00.157113285 +490 65 0 days 00:00:00.434073280 +490 66 0 days 00:00:00.237305270 +490 67 0 days 00:00:00.124025265 +490 68 0 days 00:00:00.196346143 +490 69 0 days 00:00:00.228441280 +490 70 0 days 00:00:00.468215670 +490 71 0 days 00:00:00.369519120 +490 72 0 days 00:00:00.129037000 +490 73 0 days 00:00:00.374313055 +490 74 0 days 00:00:00.250020480 +490 75 0 days 00:00:00.400218175 +490 76 0 days 00:00:00.401660205 +490 77 0 days 00:00:00.477227616 +490 78 0 days 00:00:00.354139975 +490 79 0 days 00:00:00.134919515 +490 80 0 days 00:00:00.412323025 +490 81 0 days 00:00:00.215984870 +490 82 0 days 00:00:00.226775300 +490 83 0 days 00:00:00.126023300 +490 84 0 days 00:00:00.319381650 +490 85 0 days 00:00:00.250439395 +490 86 0 days 00:00:00.365752690 +490 87 0 days 00:00:00.251650844 +490 88 0 days 00:00:00.398526310 +490 89 0 days 00:00:00.180559994 +490 90 0 days 00:00:00.128651555 +490 91 0 days 00:00:00.131957320 +490 92 0 days 00:00:00.248095091 +490 94 0 days 00:00:00.252797053 +490 95 0 days 00:00:00.276092266 +490 96 0 days 00:00:00.389838725 +490 97 0 days 00:00:00.229733596 +490 98 0 days 00:00:00.230086216 +490 99 0 days 00:00:00.427061540 +490 100 0 days 00:00:00.246042495 +491 1 0 days 00:00:00.531296092 +491 2 0 days 00:00:01.092709947 +491 3 0 days 00:00:00.370280736 +491 4 0 days 00:00:00.597579601 +491 5 0 days 00:00:01.098164252 +491 6 0 days 00:00:00.336683130 +491 7 0 days 00:00:01.013146262 +491 8 0 days 00:00:00.533357175 +491 9 0 days 00:00:00.508129205 +491 10 0 days 00:00:00.519921736 +491 11 0 days 00:00:00.304918160 +491 12 0 days 00:00:00.320035145 +491 13 0 days 00:00:00.338279620 +491 14 0 days 00:00:00.519779726 +491 15 0 days 00:00:00.312459422 +491 16 0 days 00:00:01.050785140 +491 17 0 days 00:00:00.388462985 +491 18 0 days 00:00:00.314755450 +491 19 0 days 00:00:01.018049674 +491 20 0 days 00:00:00.559182390 +491 21 0 days 00:00:00.861047300 +491 22 0 days 00:00:00.572067280 +491 23 0 days 00:00:00.553127077 +491 24 0 days 00:00:00.533991645 +491 25 0 days 00:00:01.028817483 +491 26 0 days 00:00:00.515722576 +491 27 0 days 00:00:00.571018556 +491 28 0 days 00:00:00.351129605 +491 29 0 days 00:00:00.910636580 +491 30 0 days 00:00:00.358289176 +491 31 0 days 00:00:00.302487975 +491 32 0 days 00:00:00.307311346 +491 33 0 days 00:00:00.550831160 +491 34 0 days 00:00:00.518051856 +491 35 0 days 00:00:00.315596940 +491 36 0 days 00:00:00.351528053 +491 37 0 days 00:00:01.084716707 +491 38 0 days 00:00:00.312715386 +491 39 0 days 00:00:00.950536773 +491 40 0 days 00:00:00.936903748 +491 41 0 days 00:00:00.417880233 +491 42 0 days 00:00:00.517449946 +491 43 0 days 00:00:00.566641097 +491 44 0 days 00:00:00.328408355 +491 45 0 days 00:00:00.329107326 +491 46 0 days 00:00:00.323110387 +491 47 0 days 00:00:00.991441011 +491 48 0 days 00:00:00.332668395 +491 49 0 days 00:00:00.315653010 +491 50 0 days 00:00:00.349343762 +491 51 0 days 00:00:00.526025025 +491 52 0 days 00:00:00.260884020 +491 53 0 days 00:00:00.945114264 +491 54 0 days 00:00:00.905787804 +491 55 0 days 00:00:00.964986586 +491 56 0 days 00:00:00.338804895 +491 57 0 days 00:00:00.957769283 +491 58 0 days 00:00:00.971965776 +491 59 0 days 00:00:00.325016982 +491 60 0 days 00:00:00.492877616 +491 61 0 days 00:00:00.557014563 +491 62 0 days 00:00:01.105860807 +491 63 0 days 00:00:00.521745703 +491 64 0 days 00:00:00.300433840 +491 65 0 days 00:00:00.313834097 +491 66 0 days 00:00:00.967350793 +491 67 0 days 00:00:00.250812953 +491 68 0 days 00:00:00.940903852 +491 69 0 days 00:00:00.328103043 +491 70 0 days 00:00:00.800561095 +491 71 0 days 00:00:00.580476167 +491 72 0 days 00:00:00.972676088 +491 73 0 days 00:00:00.869283890 +491 74 0 days 00:00:01.001642705 +491 75 0 days 00:00:00.986465068 +491 76 0 days 00:00:00.326332484 +491 77 0 days 00:00:01.015066870 +491 78 0 days 00:00:00.561652380 +491 79 0 days 00:00:00.322909072 +491 80 0 days 00:00:00.344790126 +491 81 0 days 00:00:00.526424913 +491 82 0 days 00:00:00.996313405 +491 83 0 days 00:00:00.972265331 +491 84 0 days 00:00:00.545394760 +491 85 0 days 00:00:00.552575327 +491 86 0 days 00:00:00.559876362 +491 87 0 days 00:00:00.532360503 +491 88 0 days 00:00:00.567101010 +491 89 0 days 00:00:00.913168352 +491 90 0 days 00:00:00.567122472 +491 91 0 days 00:00:00.564588727 +491 92 0 days 00:00:00.319686140 +491 93 0 days 00:00:00.936519708 +491 94 0 days 00:00:00.536731260 +491 95 0 days 00:00:00.511917176 +491 96 0 days 00:00:00.995772954 +491 97 0 days 00:00:00.569871772 +491 98 0 days 00:00:00.342060690 +491 99 0 days 00:00:00.525947617 +491 100 0 days 00:00:00.560840220 +492 1 0 days 00:00:00.246920760 +492 2 0 days 00:00:00.465991716 +492 3 0 days 00:00:00.520840962 +492 4 0 days 00:00:00.481647870 +492 5 0 days 00:00:00.199924663 +492 6 0 days 00:00:00.262918100 +492 7 0 days 00:00:00.143509795 +492 8 0 days 00:00:00.258375156 +492 9 0 days 00:00:00.282500572 +492 10 0 days 00:00:00.266184763 +492 11 0 days 00:00:00.439235920 +492 12 0 days 00:00:00.292857738 +492 13 0 days 00:00:00.147794440 +492 14 0 days 00:00:00.159541420 +492 15 0 days 00:00:00.260789346 +492 16 0 days 00:00:00.158575834 +492 17 0 days 00:00:00.277062834 +492 18 0 days 00:00:00.137227273 +492 19 0 days 00:00:00.171682282 +492 20 0 days 00:00:00.144326236 +492 21 0 days 00:00:00.261893680 +492 22 0 days 00:00:00.480852026 +492 23 0 days 00:00:00.498294596 +492 24 0 days 00:00:00.490310344 +492 25 0 days 00:00:00.150951008 +492 26 0 days 00:00:00.178518038 +492 27 0 days 00:00:00.271832876 +492 28 0 days 00:00:00.462606400 +492 29 0 days 00:00:00.252020680 +492 30 0 days 00:00:00.405085972 +492 31 0 days 00:00:00.491692800 +492 32 0 days 00:00:00.538454150 +492 33 0 days 00:00:00.310065963 +492 34 0 days 00:00:00.489423968 +492 35 0 days 00:00:00.264486496 +492 36 0 days 00:00:00.508101103 +492 37 0 days 00:00:00.491289540 +492 38 0 days 00:00:00.128460646 +492 39 0 days 00:00:00.294294403 +492 40 0 days 00:00:00.545746356 +492 41 0 days 00:00:00.474736868 +492 42 0 days 00:00:00.158920505 +492 43 0 days 00:00:00.167025390 +492 44 0 days 00:00:00.266915288 +492 45 0 days 00:00:00.264521980 +492 46 0 days 00:00:00.254757993 +492 47 0 days 00:00:00.194549368 +492 48 0 days 00:00:00.466483080 +492 49 0 days 00:00:00.294353744 +492 50 0 days 00:00:00.357079972 +492 51 0 days 00:00:00.515997937 +492 52 0 days 00:00:00.507986602 +492 53 0 days 00:00:00.503243880 +492 54 0 days 00:00:00.515643650 +492 55 0 days 00:00:00.477828300 +492 56 0 days 00:00:00.257452820 +492 57 0 days 00:00:00.463844508 +492 58 0 days 00:00:00.178548947 +492 59 0 days 00:00:00.261110184 +492 60 0 days 00:00:00.281344345 +492 61 0 days 00:00:00.299526132 +492 62 0 days 00:00:00.287047795 +492 63 0 days 00:00:00.267494353 +492 64 0 days 00:00:00.445994468 +492 65 0 days 00:00:00.259331292 +492 66 0 days 00:00:00.153044780 +492 67 0 days 00:00:00.264374672 +492 68 0 days 00:00:00.465247476 +492 69 0 days 00:00:00.134507920 +492 70 0 days 00:00:00.172981860 +492 71 0 days 00:00:00.255485848 +492 72 0 days 00:00:00.445450910 +492 73 0 days 00:00:00.162573150 +492 74 0 days 00:00:00.157756231 +492 75 0 days 00:00:00.266414948 +492 76 0 days 00:00:00.477500260 +492 77 0 days 00:00:00.503951625 +492 78 0 days 00:00:00.150559990 +492 79 0 days 00:00:00.458394760 +492 80 0 days 00:00:00.165636851 +492 81 0 days 00:00:00.470292756 +492 82 0 days 00:00:00.441919040 +492 83 0 days 00:00:00.154828010 +492 84 0 days 00:00:00.461427548 +492 85 0 days 00:00:00.253335160 +492 86 0 days 00:00:00.277617470 +492 87 0 days 00:00:00.255327388 +492 88 0 days 00:00:00.170450797 +492 89 0 days 00:00:00.225336546 +492 90 0 days 00:00:00.459066364 +492 91 0 days 00:00:00.169061477 +492 92 0 days 00:00:00.459451240 +492 93 0 days 00:00:00.265861356 +492 94 0 days 00:00:00.505405560 +492 95 0 days 00:00:00.158626276 +492 96 0 days 00:00:00.521893273 +492 97 0 days 00:00:00.477062540 +492 98 0 days 00:00:00.289116713 +492 99 0 days 00:00:00.477054444 +492 100 0 days 00:00:00.465412980 +493 1 0 days 00:00:25.872607045 +493 2 0 days 00:00:31.228499201 +493 3 0 days 00:00:25.740620620 +493 4 0 days 00:00:20.281715508 +493 5 0 days 00:00:20.994034211 +493 6 0 days 00:00:42.800803585 +493 7 0 days 00:00:21.806601153 +493 8 0 days 00:00:20.737528137 +493 9 0 days 00:00:46.822481516 +494 1 0 days 00:00:27.121146152 +494 2 0 days 00:00:29.785135425 +494 3 0 days 00:00:13.163875756 +494 4 0 days 00:00:17.651388610 +494 5 0 days 00:00:16.741889420 +494 6 0 days 00:00:18.158537857 +494 7 0 days 00:00:25.495356285 +494 8 0 days 00:00:11.504056440 +494 9 0 days 00:00:17.431816110 +494 10 0 days 00:00:27.865949520 +494 11 0 days 00:00:28.247683413 +494 12 0 days 00:00:31.227631704 +494 13 0 days 00:00:15.632618470 +495 1 0 days 00:00:08.497382448 +495 2 0 days 00:00:11.266012097 +495 3 0 days 00:00:03.936874846 +495 4 0 days 00:00:03.618772912 +495 5 0 days 00:00:06.835184931 +495 6 0 days 00:00:05.468805102 +495 7 0 days 00:00:09.733194917 +495 8 0 days 00:00:11.081244637 +495 9 0 days 00:00:06.782587973 +495 10 0 days 00:00:10.008667042 +495 11 0 days 00:00:05.250979845 +495 12 0 days 00:00:05.528056496 +495 13 0 days 00:00:10.675296506 +495 14 0 days 00:00:04.090031894 +495 15 0 days 00:00:10.478355017 +495 16 0 days 00:00:03.397059487 +495 17 0 days 00:00:11.557403752 +495 18 0 days 00:00:03.690580598 +495 19 0 days 00:00:03.499993646 +495 20 0 days 00:00:05.828600596 +495 21 0 days 00:00:05.814206274 +495 22 0 days 00:00:03.790021137 +495 23 0 days 00:00:04.435111150 +495 24 0 days 00:00:09.112731440 +495 25 0 days 00:00:04.861021182 +495 26 0 days 00:00:06.209913702 +495 27 0 days 00:00:03.392000176 +495 28 0 days 00:00:06.413639327 +495 29 0 days 00:00:11.147760122 +495 30 0 days 00:00:04.798635531 +495 31 0 days 00:00:10.990293900 +495 32 0 days 00:00:06.821823254 +495 33 0 days 00:00:04.358610328 +495 34 0 days 00:00:05.400990416 +496 1 0 days 00:00:05.294196371 +496 2 0 days 00:00:05.345443317 +496 3 0 days 00:00:02.794544880 +496 4 0 days 00:00:02.315371751 +496 5 0 days 00:00:01.793367982 +496 6 0 days 00:00:03.161723616 +496 7 0 days 00:00:03.317202774 +496 8 0 days 00:00:06.070395385 +496 9 0 days 00:00:05.049016372 +496 10 0 days 00:00:01.587850016 +496 11 0 days 00:00:01.948639516 +496 12 0 days 00:00:03.080703435 +496 13 0 days 00:00:05.434289192 +496 14 0 days 00:00:03.142623927 +496 15 0 days 00:00:03.159123307 +496 16 0 days 00:00:03.411644998 +496 17 0 days 00:00:05.058228026 +496 18 0 days 00:00:01.859135317 +496 19 0 days 00:00:05.149506596 +496 20 0 days 00:00:02.863217248 +496 21 0 days 00:00:05.137181805 +496 22 0 days 00:00:03.025101680 +496 23 0 days 00:00:05.130765114 +496 24 0 days 00:00:05.638888955 +496 25 0 days 00:00:02.827221535 +496 26 0 days 00:00:04.725757980 +496 27 0 days 00:00:01.952007363 +496 28 0 days 00:00:01.793554135 +496 29 0 days 00:00:02.528563595 +496 30 0 days 00:00:05.151348998 +496 31 0 days 00:00:01.905184885 +496 32 0 days 00:00:03.121043203 +496 33 0 days 00:00:02.690489905 +496 34 0 days 00:00:02.528962776 +496 35 0 days 00:00:03.232250007 +496 36 0 days 00:00:02.383951875 +496 37 0 days 00:00:02.613376124 +496 38 0 days 00:00:01.750769475 +496 39 0 days 00:00:02.489760005 +496 40 0 days 00:00:04.690220835 +496 41 0 days 00:00:02.192961208 +496 42 0 days 00:00:02.316919134 +496 43 0 days 00:00:05.017171131 +496 44 0 days 00:00:01.768250675 +496 45 0 days 00:00:05.070341260 +496 46 0 days 00:00:01.977611950 +496 47 0 days 00:00:02.811513028 +496 48 0 days 00:00:04.399741836 +496 49 0 days 00:00:02.156444008 +496 50 0 days 00:00:01.736135010 +496 51 0 days 00:00:05.556484565 +496 52 0 days 00:00:03.193948824 +496 53 0 days 00:00:05.336967406 +496 54 0 days 00:00:01.574285210 +496 55 0 days 00:00:01.821454588 +496 56 0 days 00:00:01.814063924 +496 57 0 days 00:00:01.720465655 +496 58 0 days 00:00:02.785274705 +496 59 0 days 00:00:04.125787834 +496 60 0 days 00:00:01.685029672 +496 61 0 days 00:00:01.774256975 +496 62 0 days 00:00:02.611227065 +496 63 0 days 00:00:02.877350564 +496 64 0 days 00:00:04.627631905 +496 65 0 days 00:00:05.292592744 +496 66 0 days 00:00:01.837174408 +496 67 0 days 00:00:04.605681905 +496 68 0 days 00:00:02.352959095 +496 69 0 days 00:00:04.669293924 +496 70 0 days 00:00:01.742538372 +497 1 0 days 00:00:18.117083400 +497 2 0 days 00:00:25.635247775 +497 3 0 days 00:00:42.696245590 +497 4 0 days 00:00:42.655256850 +497 5 0 days 00:00:42.687070040 +497 6 0 days 00:00:30.215176091 +497 7 0 days 00:00:28.825508400 +497 8 0 days 00:00:30.549218292 +497 9 0 days 00:00:20.853219891 +497 10 0 days 00:00:25.615291330 +497 11 0 days 00:00:20.598083665 +498 1 0 days 00:00:25.784153635 +498 2 0 days 00:00:18.999243124 +498 3 0 days 00:00:18.502134290 +498 4 0 days 00:00:19.484270036 +498 5 0 days 00:00:27.319365172 +498 6 0 days 00:00:38.342129620 +498 7 0 days 00:00:31.446002210 +498 8 0 days 00:00:15.985721880 +498 9 0 days 00:00:19.552799348 +498 10 0 days 00:00:25.739536165 +498 11 0 days 00:00:20.363280257 +498 12 0 days 00:00:26.404371365 +498 13 0 days 00:00:47.197099806 +499 1 0 days 00:00:10.755876125 +499 2 0 days 00:00:30.494783062 +499 3 0 days 00:00:11.798118966 +499 4 0 days 00:00:28.665392263 +499 5 0 days 00:00:15.518877785 +499 6 0 days 00:00:11.873934410 +499 7 0 days 00:00:10.659750710 +499 8 0 days 00:00:29.786349615 +499 9 0 days 00:00:16.502996328 +499 10 0 days 00:00:27.557724184 +499 11 0 days 00:00:16.423859540 +499 12 0 days 00:00:16.494779564 +499 13 0 days 00:00:25.368043950 +499 14 0 days 00:00:16.444712444 +499 15 0 days 00:00:27.583789000 +499 16 0 days 00:00:13.590642654 +500 1 0 days 00:00:15.819612605 +500 2 0 days 00:00:15.884871980 +500 3 0 days 00:00:11.054221775 +500 4 0 days 00:00:11.035691195 +500 5 0 days 00:00:14.129737746 +500 6 0 days 00:00:09.680479040 +500 7 0 days 00:00:16.915372552 +500 8 0 days 00:00:13.952279466 +500 9 0 days 00:00:15.630154910 +500 10 0 days 00:00:25.715441740 +500 11 0 days 00:00:28.563495633 +500 12 0 days 00:00:15.611115645 +500 13 0 days 00:00:22.967816233 +500 14 0 days 00:00:10.984418285 +500 15 0 days 00:00:27.384497712 +500 16 0 days 00:00:11.670255552 +500 17 0 days 00:00:10.860240130 +500 18 0 days 00:00:15.620465210 +500 19 0 days 00:00:27.752144816 +500 20 0 days 00:00:25.658573865 +500 21 0 days 00:00:26.061219385 +500 22 0 days 00:00:11.003690290 +500 23 0 days 00:00:23.286427880 +500 24 0 days 00:00:25.720070820 +501 1 0 days 00:00:03.243589873 +501 2 0 days 00:00:08.114746800 +501 3 0 days 00:00:07.678285725 +501 4 0 days 00:00:07.791513033 +501 5 0 days 00:00:04.817550848 +501 6 0 days 00:00:04.853195684 +501 7 0 days 00:00:08.353996500 +501 8 0 days 00:00:04.505284410 +501 9 0 days 00:00:04.718948400 +501 10 0 days 00:00:05.212844692 +501 11 0 days 00:00:08.013493980 +501 12 0 days 00:00:08.077372970 +501 13 0 days 00:00:04.859818972 +501 14 0 days 00:00:08.111047788 +501 15 0 days 00:00:05.084224848 +501 16 0 days 00:00:03.129045405 +501 17 0 days 00:00:08.091228900 +501 18 0 days 00:00:03.572178585 +501 19 0 days 00:00:07.792020945 +501 20 0 days 00:00:07.593574785 +501 21 0 days 00:00:03.285936330 +501 22 0 days 00:00:04.935403136 +501 23 0 days 00:00:05.093161674 +501 24 0 days 00:00:04.847806060 +501 25 0 days 00:00:05.034183273 +501 26 0 days 00:00:04.643753825 +501 27 0 days 00:00:04.899995056 +501 28 0 days 00:00:04.641788365 +501 29 0 days 00:00:08.029018510 +501 30 0 days 00:00:03.051741956 +501 31 0 days 00:00:03.453938195 +501 32 0 days 00:00:04.887959032 +501 33 0 days 00:00:04.934836506 +501 34 0 days 00:00:03.599208203 +501 35 0 days 00:00:03.396612344 +501 36 0 days 00:00:02.992798880 +501 37 0 days 00:00:08.373799560 +501 38 0 days 00:00:04.502869215 +501 39 0 days 00:00:04.863441428 +501 40 0 days 00:00:02.868266280 +501 41 0 days 00:00:02.893816285 +501 42 0 days 00:00:04.928263136 +501 43 0 days 00:00:08.700512583 +501 44 0 days 00:00:07.825092115 +501 45 0 days 00:00:03.384514166 +501 46 0 days 00:00:04.548111160 +501 47 0 days 00:00:09.101183234 +501 48 0 days 00:00:08.660472497 +501 49 0 days 00:00:04.809617344 +501 50 0 days 00:00:05.066799010 +501 51 0 days 00:00:03.244905228 +501 52 0 days 00:00:04.579344570 +501 53 0 days 00:00:07.681290485 +501 54 0 days 00:00:05.294569562 +501 55 0 days 00:00:04.670778345 +501 56 0 days 00:00:02.859527030 +501 57 0 days 00:00:02.913986655 +501 58 0 days 00:00:07.409394200 +501 59 0 days 00:00:08.235212180 +501 60 0 days 00:00:04.930527124 +501 61 0 days 00:00:04.777267784 +501 62 0 days 00:00:08.523621248 +501 63 0 days 00:00:07.628081005 +501 64 0 days 00:00:03.204755713 +501 65 0 days 00:00:07.795087710 +502 1 0 days 00:00:04.709949360 +502 2 0 days 00:00:02.993487785 +502 3 0 days 00:00:08.193874910 +502 4 0 days 00:00:04.763569165 +502 5 0 days 00:00:09.277279497 +502 6 0 days 00:00:08.572347016 +502 7 0 days 00:00:03.369668597 +502 8 0 days 00:00:04.836200855 +502 9 0 days 00:00:03.145982140 +502 10 0 days 00:00:08.113775800 +502 11 0 days 00:00:05.137290486 +502 12 0 days 00:00:08.997894872 +502 13 0 days 00:00:03.711793770 +502 14 0 days 00:00:04.916360765 +502 15 0 days 00:00:05.168034176 +502 16 0 days 00:00:08.921653352 +502 17 0 days 00:00:07.942967795 +502 18 0 days 00:00:05.027962424 +502 19 0 days 00:00:04.718312455 +502 20 0 days 00:00:03.385370911 +502 21 0 days 00:00:08.992843876 +502 22 0 days 00:00:08.053794480 +502 23 0 days 00:00:08.167230220 +502 24 0 days 00:00:08.248279360 +502 25 0 days 00:00:09.276857434 +502 26 0 days 00:00:05.221244053 +502 27 0 days 00:00:05.104808313 +502 28 0 days 00:00:04.601717135 +502 29 0 days 00:00:05.223775926 +502 30 0 days 00:00:04.656776565 +502 31 0 days 00:00:08.116280155 +502 32 0 days 00:00:05.145190008 +502 33 0 days 00:00:02.972756770 +502 34 0 days 00:00:09.091562188 +502 35 0 days 00:00:08.278723120 +502 36 0 days 00:00:08.015363930 +502 37 0 days 00:00:03.553594368 +502 38 0 days 00:00:03.377687140 +502 39 0 days 00:00:04.777297765 +502 40 0 days 00:00:08.012283560 +502 41 0 days 00:00:07.955058220 +502 42 0 days 00:00:02.969688110 +502 43 0 days 00:00:09.273005553 +502 44 0 days 00:00:03.507098383 +502 45 0 days 00:00:03.559015284 +502 46 0 days 00:00:05.054797640 +502 47 0 days 00:00:04.601493515 +502 48 0 days 00:00:04.767885345 +502 49 0 days 00:00:03.006552265 +502 50 0 days 00:00:08.468967830 +502 51 0 days 00:00:03.283308010 +502 52 0 days 00:00:05.220061780 +502 53 0 days 00:00:02.984349640 +502 54 0 days 00:00:03.376855233 +502 55 0 days 00:00:03.265754976 +502 56 0 days 00:00:04.629499660 +502 57 0 days 00:00:03.963656962 +502 58 0 days 00:00:08.683275152 +502 59 0 days 00:00:07.183211630 +502 60 0 days 00:00:03.178853524 +502 61 0 days 00:00:08.042863130 +502 62 0 days 00:00:03.663186482 +502 63 0 days 00:00:08.380691800 +503 1 0 days 00:00:04.090707584 +503 2 0 days 00:00:04.319932536 +503 3 0 days 00:00:02.296336065 +503 4 0 days 00:00:01.595920545 +503 5 0 days 00:00:01.767907610 +503 6 0 days 00:00:03.851645920 +503 7 0 days 00:00:04.111181925 +503 8 0 days 00:00:02.271894035 +503 9 0 days 00:00:03.834600280 +503 10 0 days 00:00:01.463133440 +503 11 0 days 00:00:02.288244965 +503 12 0 days 00:00:02.429112204 +503 13 0 days 00:00:04.147478506 +503 14 0 days 00:00:03.989655770 +503 15 0 days 00:00:01.538265928 +503 16 0 days 00:00:04.079073224 +503 17 0 days 00:00:02.319220390 +503 18 0 days 00:00:03.950160776 +503 19 0 days 00:00:04.112380185 +503 20 0 days 00:00:04.069908375 +503 21 0 days 00:00:02.150707225 +503 22 0 days 00:00:01.632759226 +503 23 0 days 00:00:03.727180453 +503 24 0 days 00:00:01.757337278 +503 25 0 days 00:00:01.786185530 +503 26 0 days 00:00:01.358495140 +503 27 0 days 00:00:03.850268670 +503 28 0 days 00:00:03.996286740 +503 29 0 days 00:00:03.879585340 +503 30 0 days 00:00:04.105062130 +503 31 0 days 00:00:01.736067663 +503 32 0 days 00:00:01.592914053 +503 33 0 days 00:00:02.444035492 +503 34 0 days 00:00:03.880315570 +503 35 0 days 00:00:03.855488070 +503 36 0 days 00:00:04.344247937 +503 37 0 days 00:00:02.419436908 +503 38 0 days 00:00:04.125370980 +503 39 0 days 00:00:02.434109545 +503 40 0 days 00:00:02.414243985 +503 41 0 days 00:00:03.916625350 +503 42 0 days 00:00:01.501208750 +503 43 0 days 00:00:02.435148336 +503 44 0 days 00:00:02.398114510 +503 45 0 days 00:00:01.593774803 +503 46 0 days 00:00:04.073419378 +503 47 0 days 00:00:01.620497396 +503 48 0 days 00:00:01.489496465 +503 49 0 days 00:00:02.326370595 +503 50 0 days 00:00:01.678286473 +503 51 0 days 00:00:02.357852705 +503 52 0 days 00:00:01.490745570 +503 53 0 days 00:00:02.498811936 +503 54 0 days 00:00:02.480653615 +503 55 0 days 00:00:02.401826405 +503 56 0 days 00:00:02.338945745 +503 57 0 days 00:00:02.343372060 +503 58 0 days 00:00:01.697712588 +503 59 0 days 00:00:02.404098005 +503 60 0 days 00:00:01.506381045 +503 61 0 days 00:00:01.748304720 +503 62 0 days 00:00:04.358863448 +503 63 0 days 00:00:01.697840960 +503 64 0 days 00:00:02.287081890 +503 65 0 days 00:00:02.462927184 +503 66 0 days 00:00:02.297443480 +503 67 0 days 00:00:03.951132585 +503 68 0 days 00:00:04.122429384 +503 69 0 days 00:00:01.786352472 +503 70 0 days 00:00:01.686038944 +503 71 0 days 00:00:02.301912308 +503 72 0 days 00:00:02.452861848 +503 73 0 days 00:00:01.775796775 +503 74 0 days 00:00:04.163257072 +503 75 0 days 00:00:02.626601916 +503 76 0 days 00:00:04.107324070 +503 77 0 days 00:00:03.816642525 +503 78 0 days 00:00:01.731447208 +503 79 0 days 00:00:02.271390935 +503 80 0 days 00:00:02.749412906 +503 81 0 days 00:00:03.925053140 +503 82 0 days 00:00:02.297169825 +503 83 0 days 00:00:02.335471275 +503 84 0 days 00:00:04.079427445 +503 85 0 days 00:00:01.756134514 +503 86 0 days 00:00:02.517040150 +503 87 0 days 00:00:02.624010845 +503 88 0 days 00:00:02.399996920 +503 89 0 days 00:00:01.522501785 +503 90 0 days 00:00:02.326728405 +503 91 0 days 00:00:02.617857605 +503 92 0 days 00:00:02.406701560 +503 93 0 days 00:00:04.286471996 +503 94 0 days 00:00:01.778893960 +503 95 0 days 00:00:02.643430972 +503 96 0 days 00:00:03.949979035 +503 97 0 days 00:00:01.588025272 +503 98 0 days 00:00:01.499573730 +503 99 0 days 00:00:02.399644665 +503 100 0 days 00:00:04.108969950 +504 1 0 days 00:00:01.773310235 +504 2 0 days 00:00:02.371202605 +504 3 0 days 00:00:01.758044336 +504 4 0 days 00:00:04.283919060 +504 5 0 days 00:00:04.115362475 +504 6 0 days 00:00:04.069818625 +504 7 0 days 00:00:02.526860495 +504 8 0 days 00:00:01.790935775 +504 9 0 days 00:00:02.482951040 +504 10 0 days 00:00:01.737212632 +504 11 0 days 00:00:01.754931397 +504 12 0 days 00:00:04.742010100 +504 13 0 days 00:00:01.617815236 +504 14 0 days 00:00:02.492438904 +504 15 0 days 00:00:02.688048645 +504 16 0 days 00:00:04.630703162 +504 17 0 days 00:00:04.190375090 +504 18 0 days 00:00:01.751677918 +504 19 0 days 00:00:02.354598745 +504 20 0 days 00:00:01.592937875 +504 21 0 days 00:00:04.079471640 +504 22 0 days 00:00:01.684583492 +504 23 0 days 00:00:02.745178943 +504 24 0 days 00:00:01.824392510 +504 25 0 days 00:00:02.382889190 +504 26 0 days 00:00:01.777520012 +504 27 0 days 00:00:02.772317202 +504 28 0 days 00:00:02.382855300 +504 29 0 days 00:00:04.110416865 +504 30 0 days 00:00:04.457295213 +504 31 0 days 00:00:04.231274715 +504 32 0 days 00:00:02.500282145 +504 33 0 days 00:00:02.648855860 +504 34 0 days 00:00:01.761719056 +504 35 0 days 00:00:01.550075445 +504 36 0 days 00:00:01.833927715 +504 37 0 days 00:00:04.278554750 +504 38 0 days 00:00:01.542242960 +504 39 0 days 00:00:01.681092384 +504 40 0 days 00:00:02.341329785 +504 41 0 days 00:00:04.258852395 +504 42 0 days 00:00:01.648232110 +504 43 0 days 00:00:02.585647255 +504 44 0 days 00:00:02.517631080 +504 45 0 days 00:00:01.648932645 +504 46 0 days 00:00:04.259190380 +504 47 0 days 00:00:01.754026980 +504 48 0 days 00:00:02.440896480 +504 49 0 days 00:00:04.275465995 +504 50 0 days 00:00:04.737377993 +504 51 0 days 00:00:02.439726605 +504 52 0 days 00:00:02.862459598 +504 53 0 days 00:00:02.385568075 +504 54 0 days 00:00:01.828504616 +504 55 0 days 00:00:04.587220162 +504 56 0 days 00:00:01.747532862 +504 57 0 days 00:00:04.324704776 +504 58 0 days 00:00:04.161397725 +504 59 0 days 00:00:04.435402463 +504 60 0 days 00:00:02.543148344 +504 61 0 days 00:00:02.373750950 +504 62 0 days 00:00:04.402864516 +504 63 0 days 00:00:04.220617150 +504 64 0 days 00:00:01.673229360 +504 65 0 days 00:00:02.471169515 +504 66 0 days 00:00:01.594435180 +504 67 0 days 00:00:02.513542340 +504 68 0 days 00:00:02.739359876 +504 69 0 days 00:00:02.005396876 +504 70 0 days 00:00:04.390581064 +504 71 0 days 00:00:04.649073090 +504 72 0 days 00:00:01.741788936 +504 73 0 days 00:00:02.678496217 +504 74 0 days 00:00:01.533606395 +504 75 0 days 00:00:02.423097290 +504 76 0 days 00:00:04.296478860 +504 77 0 days 00:00:01.610358436 +504 78 0 days 00:00:01.744472837 +504 79 0 days 00:00:04.319299556 +504 80 0 days 00:00:01.891554316 +504 81 0 days 00:00:04.131986825 +504 82 0 days 00:00:01.693449331 +504 83 0 days 00:00:04.418551063 +504 84 0 days 00:00:01.819418664 +504 85 0 days 00:00:01.678690990 +504 86 0 days 00:00:02.463547535 +504 87 0 days 00:00:04.107541735 +504 88 0 days 00:00:04.042982540 +504 89 0 days 00:00:02.799549592 +504 90 0 days 00:00:04.302585045 +504 91 0 days 00:00:02.366048345 +504 92 0 days 00:00:01.859624466 +504 93 0 days 00:00:01.893884036 +504 94 0 days 00:00:02.522803256 +504 95 0 days 00:00:01.606592576 +504 96 0 days 00:00:02.669767113 +504 97 0 days 00:00:04.321111208 +504 98 0 days 00:00:04.044724985 +504 99 0 days 00:00:02.416940455 +504 100 0 days 00:00:04.314522835 +505 1 0 days 00:00:08.229425430 +505 2 0 days 00:00:03.030766445 +505 3 0 days 00:00:03.312419673 +505 4 0 days 00:00:05.092646005 +505 5 0 days 00:00:04.942294855 +505 6 0 days 00:00:05.253306110 +505 7 0 days 00:00:09.576737835 +505 8 0 days 00:00:08.312901265 +505 9 0 days 00:00:08.784174912 +505 10 0 days 00:00:03.183857560 +505 11 0 days 00:00:03.627043962 +505 12 0 days 00:00:09.082090012 +505 13 0 days 00:00:04.819450555 +505 14 0 days 00:00:05.105215136 +505 15 0 days 00:00:05.128711760 +505 16 0 days 00:00:03.128460760 +505 17 0 days 00:00:03.532692940 +505 18 0 days 00:00:08.834589328 +505 19 0 days 00:00:03.448841062 +505 20 0 days 00:00:05.185938013 +505 21 0 days 00:00:09.497026832 +505 22 0 days 00:00:04.990591680 +505 23 0 days 00:00:08.259205835 +505 24 0 days 00:00:08.009558815 +505 25 0 days 00:00:03.352784848 +505 26 0 days 00:00:05.348012287 +505 27 0 days 00:00:09.132558952 +505 28 0 days 00:00:08.551853190 +505 29 0 days 00:00:03.050448970 +505 30 0 days 00:00:03.145809330 +505 31 0 days 00:00:03.362885571 +505 32 0 days 00:00:08.011463705 +505 33 0 days 00:00:03.870683502 +505 34 0 days 00:00:08.158541865 +505 35 0 days 00:00:03.357624910 +505 36 0 days 00:00:04.779335710 +505 37 0 days 00:00:05.132499523 +505 38 0 days 00:00:08.448806890 +505 39 0 days 00:00:05.495281296 +505 40 0 days 00:00:09.040020208 +505 41 0 days 00:00:05.019826904 +505 42 0 days 00:00:03.332387220 +505 43 0 days 00:00:03.660269457 +505 44 0 days 00:00:08.631521500 +505 45 0 days 00:00:04.950146650 +505 46 0 days 00:00:03.295563910 +505 47 0 days 00:00:09.475344480 +505 48 0 days 00:00:08.038303320 +505 49 0 days 00:00:09.239113457 +505 50 0 days 00:00:03.050755640 +505 51 0 days 00:00:04.725840950 +505 52 0 days 00:00:03.072114655 +505 53 0 days 00:00:03.259421588 +505 54 0 days 00:00:03.401567922 +505 55 0 days 00:00:05.346405444 +505 56 0 days 00:00:08.335624130 +505 57 0 days 00:00:08.836771696 +506 1 0 days 00:00:04.287119032 +506 2 0 days 00:00:04.365692185 +506 3 0 days 00:00:01.868457885 +506 4 0 days 00:00:01.779331216 +506 5 0 days 00:00:02.455554055 +506 6 0 days 00:00:02.598583698 +506 7 0 days 00:00:02.464548470 +506 8 0 days 00:00:02.401542205 +506 9 0 days 00:00:01.816387008 +506 10 0 days 00:00:02.081922862 +506 11 0 days 00:00:02.632671505 +506 12 0 days 00:00:02.416008765 +506 13 0 days 00:00:04.181384370 +506 14 0 days 00:00:02.933504161 +506 15 0 days 00:00:04.357979145 +506 16 0 days 00:00:01.880140517 +506 17 0 days 00:00:01.598510090 +506 18 0 days 00:00:04.313875930 +506 19 0 days 00:00:04.370122875 +506 20 0 days 00:00:01.700605836 +506 21 0 days 00:00:04.197349582 +506 22 0 days 00:00:02.767565740 +506 23 0 days 00:00:02.516482565 +506 24 0 days 00:00:01.793439153 +506 25 0 days 00:00:01.904812807 +506 26 0 days 00:00:01.777767703 +506 27 0 days 00:00:02.445068880 +506 28 0 days 00:00:01.700245782 +506 29 0 days 00:00:01.795326130 +506 30 0 days 00:00:04.130866130 +506 31 0 days 00:00:04.426818456 +506 32 0 days 00:00:02.805712512 +506 33 0 days 00:00:04.171949255 +506 34 0 days 00:00:01.900740273 +506 35 0 days 00:00:01.719308664 +506 36 0 days 00:00:02.842233376 +506 37 0 days 00:00:04.727186145 +506 38 0 days 00:00:02.460511965 +506 39 0 days 00:00:02.584248776 +506 40 0 days 00:00:01.730184256 +506 41 0 days 00:00:02.688314325 +506 42 0 days 00:00:01.685528153 +506 43 0 days 00:00:01.571199870 +506 44 0 days 00:00:01.901668324 +506 45 0 days 00:00:01.863107145 +506 46 0 days 00:00:04.555205450 +506 47 0 days 00:00:02.416292380 +506 48 0 days 00:00:01.839667520 +506 49 0 days 00:00:01.623271355 +506 50 0 days 00:00:04.517920884 +506 51 0 days 00:00:02.616267430 +506 52 0 days 00:00:02.539422312 +506 53 0 days 00:00:02.774432133 +506 54 0 days 00:00:03.861711820 +506 55 0 days 00:00:04.368700010 +506 56 0 days 00:00:04.601183916 +506 57 0 days 00:00:02.547297464 +506 58 0 days 00:00:04.200115165 +506 59 0 days 00:00:02.581810328 +506 60 0 days 00:00:01.798122145 +506 61 0 days 00:00:02.594794984 +506 62 0 days 00:00:02.540204892 +506 63 0 days 00:00:04.113753510 +506 64 0 days 00:00:01.539475435 +506 65 0 days 00:00:02.735850384 +506 66 0 days 00:00:02.926653977 +506 67 0 days 00:00:04.265905110 +506 68 0 days 00:00:02.539251905 +506 69 0 days 00:00:02.427165264 +506 70 0 days 00:00:02.808043776 +506 71 0 days 00:00:04.339025270 +506 72 0 days 00:00:04.761322708 +506 73 0 days 00:00:04.466732072 +506 74 0 days 00:00:01.826659925 +506 75 0 days 00:00:02.667736496 +506 76 0 days 00:00:01.927007483 +506 77 0 days 00:00:01.799520944 +506 78 0 days 00:00:04.772664057 +506 79 0 days 00:00:01.630319392 +506 80 0 days 00:00:02.519024470 +506 81 0 days 00:00:02.850039862 +506 82 0 days 00:00:01.723357940 +506 83 0 days 00:00:02.561285930 +506 84 0 days 00:00:02.708154988 +506 85 0 days 00:00:04.117568780 +506 86 0 days 00:00:04.182810550 +506 87 0 days 00:00:01.814759502 +506 88 0 days 00:00:01.734608390 +506 89 0 days 00:00:01.893228576 +506 90 0 days 00:00:02.569328988 +506 91 0 days 00:00:02.728816010 +506 92 0 days 00:00:01.759236720 +506 93 0 days 00:00:02.695764037 +506 94 0 days 00:00:01.752524130 +506 95 0 days 00:00:04.406193816 +506 96 0 days 00:00:02.544671265 +506 97 0 days 00:00:02.393102155 +506 98 0 days 00:00:04.187540960 +506 99 0 days 00:00:02.540703425 +506 100 0 days 00:00:02.717741436 +507 1 0 days 00:00:24.527389606 +507 2 0 days 00:00:15.847954500 +507 3 0 days 00:00:11.002763713 +507 4 0 days 00:00:17.004645813 +507 5 0 days 00:00:25.082749133 +507 6 0 days 00:00:26.006646226 +507 7 0 days 00:00:12.686751225 +507 8 0 days 00:00:12.725756635 +507 9 0 days 00:00:15.799321560 +507 10 0 days 00:00:16.512145386 +507 11 0 days 00:00:11.372479740 +507 12 0 days 00:00:17.626103900 +507 13 0 days 00:00:18.457087080 +507 14 0 days 00:00:13.459680855 +507 15 0 days 00:00:13.556048623 +507 16 0 days 00:00:15.811999113 +507 17 0 days 00:00:16.530445786 +507 18 0 days 00:00:13.044997984 +507 19 0 days 00:00:25.950202140 +507 20 0 days 00:00:24.501960133 +507 21 0 days 00:00:24.996546326 +507 22 0 days 00:00:24.983255960 +507 23 0 days 00:00:13.057489028 +507 24 0 days 00:00:13.469041050 +507 25 0 days 00:00:24.991040093 +507 26 0 days 00:00:11.008330806 +507 27 0 days 00:00:24.982941626 +507 28 0 days 00:00:13.468277310 +507 29 0 days 00:00:15.809074486 +508 1 0 days 00:00:13.215844640 +508 2 0 days 00:00:12.897876985 +508 3 0 days 00:00:13.702778116 +508 4 0 days 00:00:18.852250910 +508 5 0 days 00:00:16.879633313 +508 6 0 days 00:00:27.530107315 +508 7 0 days 00:00:28.141527980 +508 8 0 days 00:00:17.342420700 +508 9 0 days 00:00:27.517064400 +508 10 0 days 00:00:20.827181173 +508 11 0 days 00:00:17.857864645 +508 12 0 days 00:00:20.371678084 +508 13 0 days 00:00:29.395476670 +508 14 0 days 00:00:12.432270305 +508 15 0 days 00:00:13.729477146 +508 16 0 days 00:00:16.005042400 +508 17 0 days 00:00:17.843003010 +508 18 0 days 00:00:17.835154330 +508 19 0 days 00:00:29.377198220 +509 1 0 days 00:00:15.564446460 +509 2 0 days 00:00:09.654334680 +509 3 0 days 00:00:10.319934980 +509 4 0 days 00:00:10.317011420 +509 5 0 days 00:00:08.471253090 +509 6 0 days 00:00:07.596176693 +509 7 0 days 00:00:07.595014080 +509 8 0 days 00:00:15.556208886 +509 9 0 days 00:00:07.157045126 +509 10 0 days 00:00:16.299439253 +509 11 0 days 00:00:09.342635617 +509 12 0 days 00:00:08.547532816 +509 13 0 days 00:00:09.649859346 +509 14 0 days 00:00:15.317394033 +509 15 0 days 00:00:09.446942538 +509 16 0 days 00:00:09.651336160 +509 17 0 days 00:00:06.951920926 +509 18 0 days 00:00:09.876183093 +509 19 0 days 00:00:15.307955360 +509 20 0 days 00:00:07.972513790 +509 21 0 days 00:00:09.069183306 +509 22 0 days 00:00:08.939633515 +509 23 0 days 00:00:15.552680453 +509 24 0 days 00:00:10.325218860 +509 25 0 days 00:00:16.298313953 +509 26 0 days 00:00:09.003873476 +509 27 0 days 00:00:09.213512617 +509 28 0 days 00:00:11.520241320 +509 29 0 days 00:00:15.548657493 +509 30 0 days 00:00:08.464076076 +509 31 0 days 00:00:15.544281566 +509 32 0 days 00:00:16.287489486 +509 33 0 days 00:00:09.874281320 +509 34 0 days 00:00:16.288571386 +509 35 0 days 00:00:15.305212493 +509 36 0 days 00:00:16.286679560 +509 37 0 days 00:00:09.639812353 +509 38 0 days 00:00:08.459636572 +509 39 0 days 00:00:10.307803840 +509 40 0 days 00:00:09.866236806 +510 1 0 days 00:00:11.707423260 +510 2 0 days 00:00:08.563808280 +510 3 0 days 00:00:18.358780295 +510 4 0 days 00:00:15.836963053 +510 5 0 days 00:00:11.734420820 +510 6 0 days 00:00:08.061602255 +510 7 0 days 00:00:06.988938986 +510 8 0 days 00:00:17.680072580 +510 9 0 days 00:00:11.722208675 +510 10 0 days 00:00:08.559036112 +510 11 0 days 00:00:18.355324065 +510 12 0 days 00:00:11.124422265 +510 13 0 days 00:00:11.248021550 +510 14 0 days 00:00:11.728188310 +510 15 0 days 00:00:10.500967100 +510 16 0 days 00:00:08.657449755 +510 17 0 days 00:00:11.730189895 +510 18 0 days 00:00:11.247848845 +510 19 0 days 00:00:11.134665925 +510 20 0 days 00:00:11.119581870 +510 21 0 days 00:00:08.661553750 +510 22 0 days 00:00:17.697126500 +510 23 0 days 00:00:08.281053496 +510 24 0 days 00:00:08.071031315 +510 25 0 days 00:00:17.363677170 +510 26 0 days 00:00:11.943473780 +510 27 0 days 00:00:11.741641305 +510 28 0 days 00:00:11.133026360 +510 29 0 days 00:00:11.131917445 +510 30 0 days 00:00:17.376292660 +510 31 0 days 00:00:15.840404080 +510 32 0 days 00:00:08.264925420 +510 33 0 days 00:00:11.737603615 +510 34 0 days 00:00:08.675166315 +510 35 0 days 00:00:09.207794140 +510 36 0 days 00:00:11.826006636 +510 37 0 days 00:00:11.745180395 +511 1 0 days 00:01:31.237913753 +511 2 0 days 00:01:36.976856140 +511 3 0 days 00:01:23.745020573 +511 4 0 days 00:00:09.264259982 +511 5 0 days 00:01:30.281511986 +511 6 0 days 00:00:08.437030533 +511 7 0 days 00:00:59.981802073 +511 8 0 days 00:00:59.992316106 +511 9 0 days 00:00:09.468603950 +511 10 0 days 00:00:10.652586145 +511 11 0 days 00:00:13.536293760 +512 1 0 days 00:00:19.692293140 +512 2 0 days 00:00:41.595463472 +512 3 0 days 00:00:14.046059753 +512 4 0 days 00:00:30.728449926 +512 5 0 days 00:02:02.258702656 +512 6 0 days 00:02:15.934879264 +513 1 0 days 00:00:05.268755544 +513 2 0 days 00:00:26.728175866 +513 3 0 days 00:00:06.207126855 +513 4 0 days 00:00:42.482013713 +513 5 0 days 00:00:19.212139120 +513 6 0 days 00:00:05.396709493 +513 7 0 days 00:00:10.618787275 +513 8 0 days 00:00:53.144427173 +513 9 0 days 00:00:45.963903170 +513 10 0 days 00:00:12.979008973 +513 11 0 days 00:00:10.452336146 +513 12 0 days 00:00:06.601049920 +513 13 0 days 00:00:11.640867620 +513 14 0 days 00:00:06.560738075 +513 15 0 days 00:00:12.358302900 +513 16 0 days 00:00:08.630529960 +513 17 0 days 00:00:10.493807440 +513 18 0 days 00:00:19.791172737 +513 19 0 days 00:00:05.516634146 +513 20 0 days 00:00:14.930517853 +513 21 0 days 00:00:29.498151640 +513 22 0 days 00:00:05.001794614 +513 23 0 days 00:00:04.613065995 +513 24 0 days 00:00:05.578929793 +513 25 0 days 00:00:09.775736720 +513 26 0 days 00:00:16.326206206 +513 27 0 days 00:00:39.638046860 +513 28 0 days 00:00:12.264459364 +513 29 0 days 00:00:18.209230366 +514 1 0 days 00:00:06.299563084 +514 2 0 days 00:00:32.991337800 +514 3 0 days 00:00:39.920350353 +514 4 0 days 00:00:19.695248770 +514 5 0 days 00:00:13.280866291 +514 6 0 days 00:01:09.561541876 +514 7 0 days 00:00:11.605692740 +514 8 0 days 00:00:16.395411804 +514 9 0 days 00:00:29.686826508 +515 1 0 days 00:00:21.206664666 +515 2 0 days 00:00:22.056409230 +515 3 0 days 00:00:14.186912212 +515 4 0 days 00:00:33.295730113 +515 5 0 days 00:00:20.469472488 +515 6 0 days 00:00:13.084003636 +515 7 0 days 00:00:19.666259928 +515 8 0 days 00:00:13.475528412 +515 9 0 days 00:00:19.656409825 +515 10 0 days 00:00:14.380349560 +515 11 0 days 00:00:19.627618656 +515 12 0 days 00:00:29.234205752 +516 1 0 days 00:00:17.505051330 +516 2 0 days 00:00:13.314691106 +516 4 0 days 00:00:19.019990223 +516 5 0 days 00:00:07.755740380 +516 6 0 days 00:00:08.461982896 +516 7 0 days 00:00:19.013217803 +516 8 0 days 00:00:12.475486302 +516 9 0 days 00:00:13.319903845 +516 10 0 days 00:00:08.517422260 +516 11 0 days 00:00:08.018498665 +516 12 0 days 00:00:09.047220057 +516 13 0 days 00:00:06.972976226 +516 14 0 days 00:00:19.061319960 +516 15 0 days 00:00:08.254455644 +516 16 0 days 00:00:08.968415672 +516 17 0 days 00:00:19.259380646 +516 18 0 days 00:00:11.484317780 +516 19 0 days 00:00:06.968288353 +516 20 0 days 00:00:21.457961629 +517 1 0 days 00:00:16.621557830 +517 2 0 days 00:02:28.193694086 +518 1 0 days 00:00:54.731844436 +518 2 0 days 00:00:20.188091496 +518 3 0 days 00:00:10.571971844 +518 4 0 days 00:00:21.020639740 +518 5 0 days 00:00:59.255171211 +518 6 0 days 00:00:05.911325368 +518 7 0 days 00:00:17.124883453 +518 8 0 days 00:00:38.982160696 +519 1 0 days 00:01:49.466573265 +519 2 0 days 00:00:33.635447032 +519 3 0 days 00:00:21.756825328 +519 4 0 days 00:00:19.321905870 +519 5 0 days 00:00:22.139627733 +519 6 0 days 00:00:34.133234324 +520 1 0 days 00:00:35.976384066 +520 2 0 days 00:00:18.791282410 +520 3 0 days 00:00:06.774980210 +520 4 0 days 00:01:07.570439692 +520 5 0 days 00:00:16.766532218 +520 6 0 days 00:00:07.864823345 +520 7 0 days 00:00:29.487327914 +521 1 0 days 00:00:00.109972946 +521 3 0 days 00:00:00.131219980 +521 4 0 days 00:00:00.102795493 +521 7 0 days 00:00:00.130466673 +521 9 0 days 00:00:00.097710980 +521 10 0 days 00:00:00.107848353 +521 12 0 days 00:00:00.129894653 +521 13 0 days 00:00:00.098091480 +521 14 0 days 00:00:00.098325226 +521 15 0 days 00:00:00.132167533 +521 16 0 days 00:00:00.105920736 +521 17 0 days 00:00:00.105306966 +521 18 0 days 00:00:00.129409420 +521 20 0 days 00:00:00.138628553 +521 21 0 days 00:00:00.127559346 +521 22 0 days 00:00:00.108691100 +521 23 0 days 00:00:00.115058753 +521 24 0 days 00:00:00.131934680 +521 25 0 days 00:00:00.097786906 +521 26 0 days 00:00:00.089753693 +521 27 0 days 00:00:00.098596735 +521 29 0 days 00:00:00.089083313 +521 30 0 days 00:00:00.098260500 +521 32 0 days 00:00:00.104091893 +521 35 0 days 00:00:00.138250433 +521 36 0 days 00:00:00.092299653 +521 37 0 days 00:00:00.092642740 +521 39 0 days 00:00:00.089005873 +521 43 0 days 00:00:00.100518260 +521 45 0 days 00:00:00.099392070 +521 46 0 days 00:00:00.133957940 +521 47 0 days 00:00:00.133868593 +521 48 0 days 00:00:00.101024490 +521 50 0 days 00:00:00.093237473 +521 51 0 days 00:00:00.110081060 +521 52 0 days 00:00:00.123681748 +521 53 0 days 00:00:00.117275105 +521 54 0 days 00:00:00.093153506 +521 55 0 days 00:00:00.110685486 +521 56 0 days 00:00:00.111421273 +521 59 0 days 00:00:00.090159726 +521 60 0 days 00:00:00.109978233 +521 62 0 days 00:00:00.100021793 +521 63 0 days 00:00:00.089151946 +521 65 0 days 00:00:00.104114266 +521 66 0 days 00:00:00.124189413 +521 67 0 days 00:00:00.133658146 +521 68 0 days 00:00:00.097995353 +521 69 0 days 00:00:00.107779573 +521 70 0 days 00:00:00.089360446 +521 71 0 days 00:00:00.089529140 +521 72 0 days 00:00:00.123465680 +521 73 0 days 00:00:00.097060286 +521 74 0 days 00:00:00.137402433 +521 75 0 days 00:00:00.110095646 +521 76 0 days 00:00:00.104321546 +521 79 0 days 00:00:00.107465994 +521 80 0 days 00:00:00.130979753 +521 81 0 days 00:00:00.132678446 +521 82 0 days 00:00:00.101896524 +521 83 0 days 00:00:00.092324186 +521 84 0 days 00:00:00.176768906 +521 86 0 days 00:00:00.090608806 +521 87 0 days 00:00:00.105913753 +521 88 0 days 00:00:00.106978213 +521 89 0 days 00:00:00.091620600 +521 90 0 days 00:00:00.099809640 +521 91 0 days 00:00:00.089903286 +521 92 0 days 00:00:00.131986366 +521 93 0 days 00:00:00.105445420 +521 94 0 days 00:00:00.108912513 +521 96 0 days 00:00:00.177781161 +521 97 0 days 00:00:00.134525746 +521 98 0 days 00:00:00.121875010 +521 99 0 days 00:00:00.108105146 +521 100 0 days 00:00:00.109912386 +522 1 0 days 00:00:00.178204160 +522 3 0 days 00:00:00.172555320 +522 4 0 days 00:00:00.179870822 +522 10 0 days 00:00:00.173306820 +522 14 0 days 00:00:00.174853097 +522 15 0 days 00:00:00.171622060 +522 16 0 days 00:00:00.138773571 +522 19 0 days 00:00:00.173828578 +522 20 0 days 00:00:00.099043750 +522 21 0 days 00:00:00.144694522 +522 22 0 days 00:00:00.143736412 +522 25 0 days 00:00:00.177154371 +522 26 0 days 00:00:00.135037060 +522 28 0 days 00:00:00.097422240 +522 29 0 days 00:00:00.128761067 +522 31 0 days 00:00:00.106324906 +522 33 0 days 00:00:00.178195116 +522 37 0 days 00:00:00.179387987 +522 38 0 days 00:00:00.147207116 +522 41 0 days 00:00:00.145633632 +522 42 0 days 00:00:00.138777172 +522 44 0 days 00:00:00.165314695 +522 52 0 days 00:00:00.173426922 +522 56 0 days 00:00:00.174215692 +522 57 0 days 00:00:00.182444730 +522 58 0 days 00:00:00.182662424 +522 59 0 days 00:00:00.181501452 +522 60 0 days 00:00:00.145654858 +522 61 0 days 00:00:00.137670082 +522 62 0 days 00:00:00.123184508 +522 64 0 days 00:00:00.177366245 +522 65 0 days 00:00:00.173243453 +522 67 0 days 00:00:00.181549004 +522 68 0 days 00:00:00.175058971 +522 69 0 days 00:00:00.145041038 +522 71 0 days 00:00:00.135293253 +522 73 0 days 00:00:00.150900375 +522 77 0 days 00:00:00.145159534 +522 78 0 days 00:00:00.128461295 +522 79 0 days 00:00:00.175182910 +522 80 0 days 00:00:00.141765490 +522 82 0 days 00:00:00.129038377 +522 83 0 days 00:00:00.179588990 +522 87 0 days 00:00:00.171217568 +522 89 0 days 00:00:00.140052243 +522 90 0 days 00:00:00.097844965 +522 92 0 days 00:00:00.176299127 +522 93 0 days 00:00:00.142310528 +522 94 0 days 00:00:00.129729578 +522 95 0 days 00:00:00.142058051 +522 97 0 days 00:00:00.170518438 +523 1 0 days 00:00:00.073620240 +523 2 0 days 00:00:00.059002126 +523 3 0 days 00:00:00.059061760 +523 4 0 days 00:00:00.058397506 +523 5 0 days 00:00:00.050985660 +523 6 0 days 00:00:00.073493833 +523 7 0 days 00:00:00.074532946 +523 8 0 days 00:00:00.057882626 +523 9 0 days 00:00:00.073082533 +523 10 0 days 00:00:00.059942731 +523 11 0 days 00:00:00.051272253 +523 12 0 days 00:00:00.050954260 +523 13 0 days 00:00:00.052427886 +523 15 0 days 00:00:00.062179120 +523 16 0 days 00:00:00.060392206 +523 17 0 days 00:00:00.072002553 +523 18 0 days 00:00:00.072588040 +523 19 0 days 00:00:00.058104628 +523 20 0 days 00:00:00.069684253 +523 21 0 days 00:00:00.057565080 +523 22 0 days 00:00:00.055175340 +523 24 0 days 00:00:00.060538573 +523 25 0 days 00:00:00.079017445 +523 26 0 days 00:00:00.061617815 +523 27 0 days 00:00:00.058810653 +523 28 0 days 00:00:00.050617726 +523 29 0 days 00:00:00.073902220 +523 30 0 days 00:00:00.059864026 +523 31 0 days 00:00:00.060099700 +523 32 0 days 00:00:00.056656153 +523 33 0 days 00:00:00.072047746 +523 34 0 days 00:00:00.073080346 +523 35 0 days 00:00:00.060193933 +523 36 0 days 00:00:00.063182480 +523 37 0 days 00:00:00.072929373 +523 38 0 days 00:00:00.051370206 +523 39 0 days 00:00:00.075666953 +523 40 0 days 00:00:00.056251133 +523 41 0 days 00:00:00.073594686 +523 42 0 days 00:00:00.060081746 +523 43 0 days 00:00:00.057862040 +523 44 0 days 00:00:00.072359020 +523 45 0 days 00:00:00.072875586 +523 47 0 days 00:00:00.056065933 +523 48 0 days 00:00:00.063334910 +523 49 0 days 00:00:00.074675513 +523 51 0 days 00:00:00.052885526 +523 52 0 days 00:00:00.075115446 +523 53 0 days 00:00:00.051288413 +523 55 0 days 00:00:00.054637273 +523 56 0 days 00:00:00.073110246 +523 58 0 days 00:00:00.064577680 +523 59 0 days 00:00:00.056087445 +523 61 0 days 00:00:00.055305046 +523 62 0 days 00:00:00.050181913 +523 63 0 days 00:00:00.050347193 +523 64 0 days 00:00:00.079352590 +523 65 0 days 00:00:00.050154120 +523 66 0 days 00:00:00.072779400 +523 67 0 days 00:00:00.058215073 +523 68 0 days 00:00:00.070010444 +523 69 0 days 00:00:00.056039426 +523 70 0 days 00:00:00.074270060 +523 71 0 days 00:00:00.074951846 +523 72 0 days 00:00:00.051038540 +523 73 0 days 00:00:00.054737785 +523 74 0 days 00:00:00.056188753 +523 75 0 days 00:00:00.058529806 +523 76 0 days 00:00:00.050255453 +523 77 0 days 00:00:00.072507713 +523 78 0 days 00:00:00.056087780 +523 79 0 days 00:00:00.075223233 +523 80 0 days 00:00:00.051420600 +523 81 0 days 00:00:00.067561740 +523 82 0 days 00:00:00.061108365 +523 83 0 days 00:00:00.086311250 +523 84 0 days 00:00:00.055892780 +523 85 0 days 00:00:00.070031540 +523 86 0 days 00:00:00.079983835 +523 87 0 days 00:00:00.060908180 +523 88 0 days 00:00:00.075374760 +523 89 0 days 00:00:00.073501220 +523 90 0 days 00:00:00.078060465 +523 91 0 days 00:00:00.062821455 +523 92 0 days 00:00:00.052164960 +523 93 0 days 00:00:00.080316410 +523 94 0 days 00:00:00.050922580 +523 95 0 days 00:00:00.052610346 +523 96 0 days 00:00:00.058091680 +523 97 0 days 00:00:00.057625940 +523 98 0 days 00:00:00.057844793 +523 99 0 days 00:00:00.079602325 +523 100 0 days 00:00:00.082841740 +524 1 0 days 00:00:00.092497578 +524 2 0 days 00:00:00.075867502 +524 6 0 days 00:00:00.090682391 +524 7 0 days 00:00:00.052778006 +524 8 0 days 00:00:00.075666572 +524 9 0 days 00:00:00.074455467 +524 10 0 days 00:00:00.094453085 +524 11 0 days 00:00:00.069491246 +524 13 0 days 00:00:00.075936436 +524 14 0 days 00:00:00.076987600 +524 16 0 days 00:00:00.062313613 +524 17 0 days 00:00:00.068793830 +524 19 0 days 00:00:00.068165510 +524 20 0 days 00:00:00.083733097 +524 21 0 days 00:00:00.052535813 +524 24 0 days 00:00:00.075481020 +524 25 0 days 00:00:00.093995166 +524 26 0 days 00:00:00.074160974 +524 27 0 days 00:00:00.068769831 +524 28 0 days 00:00:00.066340630 +524 29 0 days 00:00:00.068202236 +524 30 0 days 00:00:00.075716478 +524 31 0 days 00:00:00.067251684 +524 32 0 days 00:00:00.094889116 +524 34 0 days 00:00:00.097670548 +524 35 0 days 00:00:00.090106766 +524 38 0 days 00:00:00.068669160 +524 39 0 days 00:00:00.066858980 +524 40 0 days 00:00:00.070730760 +524 42 0 days 00:00:00.069408166 +524 43 0 days 00:00:00.075641347 +524 44 0 days 00:00:00.094442438 +524 46 0 days 00:00:00.051881506 +524 47 0 days 00:00:00.097491307 +524 48 0 days 00:00:00.059842813 +524 49 0 days 00:00:00.095222501 +524 52 0 days 00:00:00.052841066 +524 53 0 days 00:00:00.068874600 +524 55 0 days 00:00:00.061132134 +524 57 0 days 00:00:00.059932526 +524 58 0 days 00:00:00.068968791 +524 59 0 days 00:00:00.094894828 +524 61 0 days 00:00:00.098270036 +524 62 0 days 00:00:00.073699946 +524 63 0 days 00:00:00.076335353 +524 64 0 days 00:00:00.061822446 +524 65 0 days 00:00:00.094252324 +524 66 0 days 00:00:00.075043120 +524 67 0 days 00:00:00.096360040 +524 71 0 days 00:00:00.087950073 +524 72 0 days 00:00:00.094225924 +524 73 0 days 00:00:00.096064363 +524 75 0 days 00:00:00.095245620 +524 76 0 days 00:00:00.093563620 +524 78 0 days 00:00:00.074890668 +524 80 0 days 00:00:00.076588713 +524 82 0 days 00:00:00.074672554 +524 83 0 days 00:00:00.096494400 +524 85 0 days 00:00:00.075343852 +524 86 0 days 00:00:00.052670313 +524 88 0 days 00:00:00.097150581 +524 90 0 days 00:00:00.074356135 +524 91 0 days 00:00:00.098618966 +524 92 0 days 00:00:00.058873024 +524 93 0 days 00:00:00.060432900 +524 94 0 days 00:00:00.069731921 +524 95 0 days 00:00:00.097499485 +524 96 0 days 00:00:00.063908020 +524 97 0 days 00:00:00.067559929 +524 98 0 days 00:00:00.094943508 +524 99 0 days 00:00:00.095770427 +524 100 0 days 00:00:00.076517856 +525 3 0 days 00:00:00.164821268 +525 4 0 days 00:00:00.135660468 +525 8 0 days 00:00:00.117680522 +525 9 0 days 00:00:00.132312806 +525 10 0 days 00:00:00.161106082 +525 13 0 days 00:00:00.116796830 +525 15 0 days 00:00:00.125518766 +525 16 0 days 00:00:00.113883563 +525 18 0 days 00:00:00.130907140 +525 19 0 days 00:00:00.114055569 +525 21 0 days 00:00:00.164569415 +525 22 0 days 00:00:00.130748995 +525 26 0 days 00:00:00.170611041 +525 28 0 days 00:00:00.133496770 +525 29 0 days 00:00:00.176151345 +525 30 0 days 00:00:00.140816370 +525 32 0 days 00:00:00.166697218 +525 33 0 days 00:00:00.149946775 +525 35 0 days 00:00:00.131562020 +525 36 0 days 00:00:00.132451780 +525 38 0 days 00:00:00.108432353 +525 39 0 days 00:00:00.111521632 +525 40 0 days 00:00:00.132917778 +525 41 0 days 00:00:00.114850184 +525 43 0 days 00:00:00.161464522 +525 44 0 days 00:00:00.123156452 +525 46 0 days 00:00:00.117479236 +525 49 0 days 00:00:00.112957642 +525 50 0 days 00:00:00.119497288 +525 51 0 days 00:00:00.134470643 +525 54 0 days 00:00:00.133849994 +525 55 0 days 00:00:00.115206490 +525 56 0 days 00:00:00.133757310 +525 57 0 days 00:00:00.132715262 +525 61 0 days 00:00:00.177544622 +525 63 0 days 00:00:00.114450578 +525 65 0 days 00:00:00.126261830 +525 66 0 days 00:00:00.115315982 +525 72 0 days 00:00:00.154404824 +525 74 0 days 00:00:00.135500586 +525 77 0 days 00:00:00.168227155 +525 78 0 days 00:00:00.115005456 +525 79 0 days 00:00:00.116417190 +525 80 0 days 00:00:00.131577253 +525 81 0 days 00:00:00.115085374 +525 82 0 days 00:00:00.129571824 +525 84 0 days 00:00:00.133285168 +525 85 0 days 00:00:00.132044940 +525 86 0 days 00:00:00.145505505 +525 87 0 days 00:00:00.112731454 +525 88 0 days 00:00:00.170425777 +525 91 0 days 00:00:00.137177412 +525 93 0 days 00:00:00.117684248 +525 96 0 days 00:00:00.112963972 +525 97 0 days 00:00:00.124630156 +525 98 0 days 00:00:00.171273460 +525 99 0 days 00:00:00.115415351 +526 1 0 days 00:00:00.093846970 +526 2 0 days 00:00:00.065904717 +526 4 0 days 00:00:00.068185386 +526 5 0 days 00:00:00.062911244 +526 6 0 days 00:00:00.073850156 +526 7 0 days 00:00:00.073801374 +526 9 0 days 00:00:00.064541287 +526 10 0 days 00:00:00.095733735 +526 13 0 days 00:00:00.068938623 +526 14 0 days 00:00:00.095577867 +526 17 0 days 00:00:00.063960390 +526 18 0 days 00:00:00.063065895 +526 19 0 days 00:00:00.071880640 +526 20 0 days 00:00:00.062953508 +526 23 0 days 00:00:00.087785764 +526 24 0 days 00:00:00.052415760 +526 25 0 days 00:00:00.071718697 +526 26 0 days 00:00:00.072842426 +526 29 0 days 00:00:00.069006826 +526 30 0 days 00:00:00.063293740 +526 31 0 days 00:00:00.065923460 +526 32 0 days 00:00:00.062527420 +526 33 0 days 00:00:00.098847248 +526 36 0 days 00:00:00.075930706 +526 37 0 days 00:00:00.071796302 +526 39 0 days 00:00:00.071295290 +526 42 0 days 00:00:00.063462636 +526 43 0 days 00:00:00.095514658 +526 44 0 days 00:00:00.095767086 +526 45 0 days 00:00:00.061470880 +526 47 0 days 00:00:00.075316785 +526 48 0 days 00:00:00.095250544 +526 49 0 days 00:00:00.069868306 +526 50 0 days 00:00:00.076381107 +526 52 0 days 00:00:00.066573225 +526 53 0 days 00:00:00.071881940 +526 54 0 days 00:00:00.065232025 +526 56 0 days 00:00:00.074726680 +526 57 0 days 00:00:00.065788981 +526 59 0 days 00:00:00.100006735 +526 61 0 days 00:00:00.063540170 +526 62 0 days 00:00:00.065705496 +526 63 0 days 00:00:00.061554840 +526 64 0 days 00:00:00.057874720 +526 66 0 days 00:00:00.069668156 +526 67 0 days 00:00:00.067136784 +526 68 0 days 00:00:00.065910082 +526 69 0 days 00:00:00.071510677 +526 70 0 days 00:00:00.059750568 +526 71 0 days 00:00:00.078163326 +526 73 0 days 00:00:00.063238836 +526 74 0 days 00:00:00.065205160 +526 75 0 days 00:00:00.061408894 +526 76 0 days 00:00:00.066423042 +526 78 0 days 00:00:00.084228115 +526 79 0 days 00:00:00.072820874 +526 80 0 days 00:00:00.057048086 +526 81 0 days 00:00:00.096864978 +526 82 0 days 00:00:00.097919850 +526 85 0 days 00:00:00.065951911 +526 86 0 days 00:00:00.064681103 +526 87 0 days 00:00:00.066454538 +526 88 0 days 00:00:00.070631976 +526 89 0 days 00:00:00.068839568 +526 90 0 days 00:00:00.063456805 +526 91 0 days 00:00:00.065075162 +526 92 0 days 00:00:00.063546918 +526 94 0 days 00:00:00.067286960 +526 97 0 days 00:00:00.071910406 +526 98 0 days 00:00:00.096618034 +526 99 0 days 00:00:00.095792570 +526 100 0 days 00:00:00.096793715 +527 1 0 days 00:00:00.427034580 +527 2 0 days 00:00:03.479906070 +527 3 0 days 00:00:01.194411040 +527 4 0 days 00:00:01.636994125 +527 5 0 days 00:00:00.864957256 +527 6 0 days 00:00:00.288685262 +527 7 0 days 00:00:03.821037670 +527 8 0 days 00:00:03.891827210 +527 9 0 days 00:00:00.248388346 +527 10 0 days 00:00:00.965165120 +527 11 0 days 00:00:00.880810935 +527 12 0 days 00:00:00.789944168 +527 13 0 days 00:00:03.409821275 +527 14 0 days 00:00:00.747234094 +527 15 0 days 00:00:05.004108010 +527 16 0 days 00:00:00.563554058 +527 17 0 days 00:00:00.502291840 +527 18 0 days 00:00:00.517090955 +527 19 0 days 00:00:00.744979700 +527 20 0 days 00:00:00.853946950 +527 21 0 days 00:00:00.581287596 +527 22 0 days 00:00:00.270468813 +527 23 0 days 00:00:01.900076710 +527 24 0 days 00:00:05.035951575 +527 25 0 days 00:00:02.732277380 +527 26 0 days 00:00:01.423257380 +527 27 0 days 00:00:00.740934535 +527 28 0 days 00:00:02.039587009 +527 29 0 days 00:00:01.348236852 +527 30 0 days 00:00:00.718638642 +527 31 0 days 00:00:04.013642545 +527 32 0 days 00:00:01.120875884 +527 33 0 days 00:00:00.487645944 +527 34 0 days 00:00:00.444872750 +527 35 0 days 00:00:00.706582860 +527 36 0 days 00:00:00.647659404 +527 37 0 days 00:00:04.461294625 +527 38 0 days 00:00:00.493648093 +527 39 0 days 00:00:03.918436170 +527 40 0 days 00:00:05.453104695 +527 41 0 days 00:00:00.992955455 +527 42 0 days 00:00:04.844911280 +527 43 0 days 00:00:00.354583310 +527 44 0 days 00:00:01.297800690 +527 45 0 days 00:00:00.891841074 +527 46 0 days 00:00:00.941751643 +527 47 0 days 00:00:02.094711440 +527 48 0 days 00:00:01.301907594 +527 49 0 days 00:00:01.423977615 +527 50 0 days 00:00:02.571925415 +527 51 0 days 00:00:01.316905892 +527 52 0 days 00:00:00.511174123 +527 53 0 days 00:00:00.721573160 +527 54 0 days 00:00:00.915778368 +527 55 0 days 00:00:00.777700656 +527 56 0 days 00:00:01.887042124 +527 57 0 days 00:00:01.448009630 +527 58 0 days 00:00:00.296087882 +527 59 0 days 00:00:00.622885825 +527 60 0 days 00:00:04.973142068 +527 61 0 days 00:00:01.282579105 +527 62 0 days 00:00:01.149344920 +527 63 0 days 00:00:01.726451942 +527 64 0 days 00:00:01.301322815 +527 65 0 days 00:00:01.419534492 +527 66 0 days 00:00:03.910674810 +527 67 0 days 00:00:04.671104775 +527 68 0 days 00:00:00.270140010 +527 69 0 days 00:00:00.493181397 +527 70 0 days 00:00:00.502973900 +527 71 0 days 00:00:01.059100786 +527 72 0 days 00:00:00.883079815 +527 73 0 days 00:00:01.180139835 +527 74 0 days 00:00:01.763234485 +527 75 0 days 00:00:00.488894656 +527 76 0 days 00:00:00.385527097 +527 77 0 days 00:00:00.525079896 +527 78 0 days 00:00:00.666489596 +527 79 0 days 00:00:01.250728245 +527 80 0 days 00:00:00.977731480 +527 81 0 days 00:00:02.562728775 +527 82 0 days 00:00:00.402618445 +527 83 0 days 00:00:00.684481666 +527 84 0 days 00:00:02.055088580 +527 85 0 days 00:00:02.267366960 +527 86 0 days 00:00:01.572363070 +527 87 0 days 00:00:02.387035055 +527 88 0 days 00:00:00.377673716 +527 89 0 days 00:00:00.533327571 +527 90 0 days 00:00:00.909980366 +527 91 0 days 00:00:00.850654960 +527 92 0 days 00:00:00.380352604 +527 93 0 days 00:00:00.511426445 +527 94 0 days 00:00:00.685298630 +527 95 0 days 00:00:00.490948652 +527 96 0 days 00:00:00.417748660 +527 97 0 days 00:00:00.679712796 +527 98 0 days 00:00:01.149940155 +527 99 0 days 00:00:01.494743500 +527 100 0 days 00:00:04.208375255 +528 1 0 days 00:00:00.142116132 +528 2 0 days 00:00:00.392177860 +528 3 0 days 00:00:00.298925082 +528 4 0 days 00:00:01.211146945 +528 5 0 days 00:00:00.259144903 +528 6 0 days 00:00:00.433657632 +528 7 0 days 00:00:01.003969960 +528 8 0 days 00:00:00.189785907 +528 9 0 days 00:00:00.298769144 +528 10 0 days 00:00:00.236023933 +528 11 0 days 00:00:01.340280290 +528 12 0 days 00:00:00.221216488 +528 13 0 days 00:00:01.568720775 +528 14 0 days 00:00:00.548902952 +528 15 0 days 00:00:00.552256150 +528 16 0 days 00:00:00.733662110 +528 17 0 days 00:00:01.657171955 +528 18 0 days 00:00:00.330728625 +528 19 0 days 00:00:00.296174457 +528 20 0 days 00:00:00.280968550 +528 21 0 days 00:00:01.649586785 +528 22 0 days 00:00:00.143021622 +528 23 0 days 00:00:00.152432965 +528 24 0 days 00:00:01.618641355 +528 25 0 days 00:00:00.122761653 +528 26 0 days 00:00:02.023601872 +528 27 0 days 00:00:00.270882353 +528 28 0 days 00:00:00.644511120 +528 29 0 days 00:00:00.471633976 +528 30 0 days 00:00:00.235205306 +528 31 0 days 00:00:00.223864467 +528 32 0 days 00:00:00.214061957 +528 33 0 days 00:00:00.219426920 +528 34 0 days 00:00:01.305750270 +528 35 0 days 00:00:01.510384155 +528 36 0 days 00:00:00.379759727 +528 37 0 days 00:00:00.329016784 +528 38 0 days 00:00:01.194336160 +528 39 0 days 00:00:00.176620449 +528 40 0 days 00:00:00.338739420 +528 41 0 days 00:00:00.248365173 +528 42 0 days 00:00:00.416948674 +528 43 0 days 00:00:00.291268440 +528 44 0 days 00:00:00.276750595 +528 45 0 days 00:00:01.225454625 +528 46 0 days 00:00:02.017463210 +528 47 0 days 00:00:00.998753488 +528 48 0 days 00:00:00.928790864 +528 49 0 days 00:00:00.500167105 +528 50 0 days 00:00:00.271939360 +528 51 0 days 00:00:00.633143850 +528 52 0 days 00:00:00.282567978 +528 53 0 days 00:00:00.587027104 +528 54 0 days 00:00:00.485007085 +528 55 0 days 00:00:01.807938816 +528 56 0 days 00:00:00.166801347 +528 57 0 days 00:00:02.116165356 +528 58 0 days 00:00:01.142719340 +528 59 0 days 00:00:01.028094724 +528 60 0 days 00:00:00.350877065 +528 61 0 days 00:00:00.431282492 +528 62 0 days 00:00:01.856251504 +528 63 0 days 00:00:00.491684890 +528 64 0 days 00:00:00.175660626 +528 65 0 days 00:00:00.309719080 +528 66 0 days 00:00:01.029531560 +528 67 0 days 00:00:00.229996453 +528 68 0 days 00:00:01.340269310 +528 69 0 days 00:00:00.472918060 +528 70 0 days 00:00:02.321208396 +528 71 0 days 00:00:01.435912930 +528 72 0 days 00:00:00.184989100 +528 73 0 days 00:00:00.183803090 +528 74 0 days 00:00:00.152632641 +528 75 0 days 00:00:00.203279128 +528 76 0 days 00:00:00.425902195 +528 77 0 days 00:00:00.532114710 +528 78 0 days 00:00:00.254984824 +528 79 0 days 00:00:00.612626612 +528 80 0 days 00:00:00.511104690 +528 81 0 days 00:00:00.370589256 +528 82 0 days 00:00:00.473048208 +528 83 0 days 00:00:00.283199510 +528 84 0 days 00:00:01.677879755 +528 85 0 days 00:00:00.642280815 +528 86 0 days 00:00:00.365159962 +528 87 0 days 00:00:00.581489660 +528 88 0 days 00:00:00.482981072 +528 89 0 days 00:00:00.534057172 +528 90 0 days 00:00:00.554139505 +528 91 0 days 00:00:00.595135795 +528 92 0 days 00:00:01.338476556 +528 93 0 days 00:00:00.321483850 +528 94 0 days 00:00:02.525484070 +528 95 0 days 00:00:00.475371733 +528 96 0 days 00:00:00.629902800 +528 97 0 days 00:00:00.468926040 +528 98 0 days 00:00:00.203672131 +528 99 0 days 00:00:01.160160610 +528 100 0 days 00:00:00.695130855 +529 1 0 days 00:00:00.440031375 +529 2 0 days 00:00:01.599711653 +529 3 0 days 00:00:00.513539871 +529 4 0 days 00:00:01.277467167 +529 5 0 days 00:00:00.266311897 +529 6 0 days 00:00:00.819831916 +529 7 0 days 00:00:01.254275830 +529 8 0 days 00:00:00.337314655 +529 9 0 days 00:00:00.549307406 +529 10 0 days 00:00:00.571215214 +529 11 0 days 00:00:02.704038330 +529 12 0 days 00:00:01.076639621 +529 13 0 days 00:00:01.137873833 +529 14 0 days 00:00:00.812431505 +529 15 0 days 00:00:00.541899240 +529 16 0 days 00:00:00.581570761 +529 17 0 days 00:00:04.458734656 +529 18 0 days 00:00:00.754228777 +529 19 0 days 00:00:01.705732384 +529 20 0 days 00:00:00.291125520 +529 21 0 days 00:00:00.372945178 +529 22 0 days 00:00:05.552530800 +529 23 0 days 00:00:03.225807575 +529 24 0 days 00:00:00.260586893 +529 25 0 days 00:00:00.635786512 +529 26 0 days 00:00:00.411045827 +529 27 0 days 00:00:00.896155231 +529 28 0 days 00:00:00.275893267 +529 29 0 days 00:00:05.513968356 +529 30 0 days 00:00:00.213302680 +529 31 0 days 00:00:00.559512542 +529 32 0 days 00:00:00.328824641 +529 33 0 days 00:00:00.558701106 +529 34 0 days 00:00:01.443274534 +529 35 0 days 00:00:00.468486150 +529 36 0 days 00:00:04.677017553 +529 37 0 days 00:00:00.865265034 +529 38 0 days 00:00:00.496048752 +529 39 0 days 00:00:02.290170210 +529 40 0 days 00:00:03.272361805 +529 41 0 days 00:00:01.709122050 +529 42 0 days 00:00:00.606124132 +529 43 0 days 00:00:00.996956202 +529 44 0 days 00:00:00.522129836 +529 45 0 days 00:00:01.676951040 +529 46 0 days 00:00:00.431832521 +529 47 0 days 00:00:01.366303780 +529 48 0 days 00:00:03.986082092 +529 49 0 days 00:00:00.657171507 +529 50 0 days 00:00:01.410325315 +529 51 0 days 00:00:04.459418750 +529 52 0 days 00:00:01.236135870 +529 53 0 days 00:00:00.787601580 +529 54 0 days 00:00:02.425066505 +529 55 0 days 00:00:00.643846790 +529 56 0 days 00:00:00.706217042 +529 57 0 days 00:00:01.120854100 +529 58 0 days 00:00:00.794282932 +529 59 0 days 00:00:01.418330157 +529 60 0 days 00:00:00.730558140 +529 61 0 days 00:00:00.865979462 +529 62 0 days 00:00:00.412898737 +529 63 0 days 00:00:00.973318135 +529 64 0 days 00:00:00.950065153 +529 65 0 days 00:00:01.204419860 +529 66 0 days 00:00:02.115699046 +529 67 0 days 00:00:01.520891214 +529 68 0 days 00:00:00.744024277 +529 69 0 days 00:00:03.215555595 +529 70 0 days 00:00:00.451928961 +529 71 0 days 00:00:00.845001689 +529 72 0 days 00:00:05.031618673 +529 73 0 days 00:00:00.369859560 +529 74 0 days 00:00:00.308665529 +529 75 0 days 00:00:01.933600010 +529 76 0 days 00:00:04.130387690 +529 77 0 days 00:00:02.251488200 +529 78 0 days 00:00:00.298773141 +529 79 0 days 00:00:01.233803186 +529 80 0 days 00:00:00.845367520 +529 81 0 days 00:00:00.401775045 +529 82 0 days 00:00:01.666104535 +529 83 0 days 00:00:00.467324132 +529 84 0 days 00:00:00.487001717 +529 85 0 days 00:00:00.653345980 +529 86 0 days 00:00:01.285510584 +529 87 0 days 00:00:00.301116201 +529 88 0 days 00:00:00.643703812 +529 89 0 days 00:00:04.561861223 +529 90 0 days 00:00:01.005681526 +529 91 0 days 00:00:01.465843486 +529 92 0 days 00:00:00.413103352 +529 93 0 days 00:00:03.721169475 +529 94 0 days 00:00:00.436055110 +529 95 0 days 00:00:01.844792010 +529 96 0 days 00:00:05.027405580 +529 97 0 days 00:00:00.925988088 +529 98 0 days 00:00:00.381366575 +529 99 0 days 00:00:00.490332334 +529 100 0 days 00:00:01.205860141 +530 1 0 days 00:00:00.227784649 +530 2 0 days 00:00:00.552792656 +530 3 0 days 00:00:02.804702286 +530 4 0 days 00:00:00.377751665 +530 5 0 days 00:00:00.117891380 +530 6 0 days 00:00:00.515128872 +530 7 0 days 00:00:00.144505941 +530 8 0 days 00:00:00.623666076 +530 9 0 days 00:00:00.253538067 +530 10 0 days 00:00:02.463786192 +530 11 0 days 00:00:00.616298556 +530 12 0 days 00:00:00.423704350 +530 13 0 days 00:00:02.165357930 +530 14 0 days 00:00:02.143264330 +530 15 0 days 00:00:00.389967808 +530 16 0 days 00:00:00.274749610 +530 17 0 days 00:00:00.686345655 +530 18 0 days 00:00:00.416851570 +530 19 0 days 00:00:01.301811580 +530 20 0 days 00:00:00.555387478 +530 21 0 days 00:00:01.054444613 +530 22 0 days 00:00:01.835402665 +530 23 0 days 00:00:00.447377920 +530 24 0 days 00:00:00.193894931 +530 25 0 days 00:00:00.315839316 +530 26 0 days 00:00:00.300709888 +530 27 0 days 00:00:00.433813277 +530 28 0 days 00:00:00.406721473 +530 29 0 days 00:00:00.705390263 +530 30 0 days 00:00:02.733285566 +530 31 0 days 00:00:00.337173027 +530 32 0 days 00:00:00.277968175 +530 33 0 days 00:00:00.758722350 +530 34 0 days 00:00:01.814983660 +530 35 0 days 00:00:00.720575795 +530 36 0 days 00:00:00.356094112 +530 37 0 days 00:00:00.244608783 +530 38 0 days 00:00:00.530817303 +530 39 0 days 00:00:00.369267522 +530 40 0 days 00:00:00.159040518 +530 41 0 days 00:00:00.192121998 +530 42 0 days 00:00:00.421522556 +530 43 0 days 00:00:00.444631677 +530 44 0 days 00:00:00.212234408 +530 45 0 days 00:00:00.513894595 +530 46 0 days 00:00:00.451546877 +530 47 0 days 00:00:00.442553895 +530 48 0 days 00:00:00.292818154 +530 49 0 days 00:00:00.371738522 +530 50 0 days 00:00:00.191929700 +530 51 0 days 00:00:00.711741003 +530 52 0 days 00:00:00.136215955 +530 53 0 days 00:00:00.255067284 +530 54 0 days 00:00:00.127899897 +530 55 0 days 00:00:00.425379888 +530 56 0 days 00:00:00.599245935 +530 57 0 days 00:00:00.399001749 +530 58 0 days 00:00:00.323240081 +530 59 0 days 00:00:00.257482798 +530 60 0 days 00:00:00.609699825 +530 61 0 days 00:00:00.195112597 +530 62 0 days 00:00:00.260630980 +530 63 0 days 00:00:01.088732572 +530 64 0 days 00:00:00.324396422 +530 65 0 days 00:00:00.265546763 +530 66 0 days 00:00:00.239232210 +530 67 0 days 00:00:00.238898361 +530 68 0 days 00:00:00.475764310 +530 69 0 days 00:00:00.417402997 +530 70 0 days 00:00:00.263744580 +530 71 0 days 00:00:00.735351850 +530 72 0 days 00:00:00.192319421 +530 73 0 days 00:00:00.481324845 +530 74 0 days 00:00:00.733723770 +530 75 0 days 00:00:00.704752867 +530 76 0 days 00:00:00.239898495 +530 77 0 days 00:00:00.234237136 +530 78 0 days 00:00:01.777069216 +530 79 0 days 00:00:00.268076971 +530 80 0 days 00:00:00.717030205 +530 81 0 days 00:00:00.367392820 +530 82 0 days 00:00:00.577697600 +530 83 0 days 00:00:00.529804315 +530 84 0 days 00:00:01.826339743 +530 85 0 days 00:00:00.185999924 +530 86 0 days 00:00:00.233455996 +530 87 0 days 00:00:00.778878810 +530 88 0 days 00:00:00.513949976 +530 89 0 days 00:00:01.943722040 +530 90 0 days 00:00:00.411611275 +530 91 0 days 00:00:00.605914476 +530 92 0 days 00:00:00.268620962 +530 93 0 days 00:00:00.499248793 +530 94 0 days 00:00:00.361794298 +530 95 0 days 00:00:01.892823044 +530 96 0 days 00:00:00.602212024 +530 97 0 days 00:00:01.643744164 +530 98 0 days 00:00:01.630820030 +530 99 0 days 00:00:00.625810390 +530 100 0 days 00:00:00.683757644 +531 1 0 days 00:00:00.128908166 +531 3 0 days 00:00:00.125315305 +531 4 0 days 00:00:00.112675380 +531 5 0 days 00:00:00.134528862 +531 8 0 days 00:00:00.129999070 +531 11 0 days 00:00:00.130923733 +531 12 0 days 00:00:00.136329395 +531 13 0 days 00:00:00.116275420 +531 14 0 days 00:00:00.140645305 +531 16 0 days 00:00:00.158388125 +531 17 0 days 00:00:00.111678567 +531 20 0 days 00:00:00.108824366 +531 22 0 days 00:00:00.113319200 +531 24 0 days 00:00:00.136513623 +531 26 0 days 00:00:00.112982682 +531 28 0 days 00:00:00.128458446 +531 29 0 days 00:00:00.118812967 +531 32 0 days 00:00:00.120237536 +531 34 0 days 00:00:00.114182363 +531 35 0 days 00:00:00.113282877 +531 37 0 days 00:00:00.138672938 +531 39 0 days 00:00:00.127319760 +531 41 0 days 00:00:00.131167214 +531 42 0 days 00:00:00.130406700 +531 44 0 days 00:00:00.121077496 +531 45 0 days 00:00:00.114216471 +531 46 0 days 00:00:00.112283830 +531 48 0 days 00:00:00.133470654 +531 49 0 days 00:00:00.122529176 +531 53 0 days 00:00:00.112518786 +531 56 0 days 00:00:00.130818196 +531 57 0 days 00:00:00.152336868 +531 62 0 days 00:00:00.108898668 +531 67 0 days 00:00:00.157875462 +531 68 0 days 00:00:00.132892400 +531 69 0 days 00:00:00.115664085 +531 70 0 days 00:00:00.122138576 +531 73 0 days 00:00:00.112235773 +531 74 0 days 00:00:00.157792165 +531 76 0 days 00:00:00.165832709 +531 79 0 days 00:00:00.123942263 +531 81 0 days 00:00:00.110468393 +531 83 0 days 00:00:00.131780475 +531 84 0 days 00:00:00.113827223 +531 85 0 days 00:00:00.112516520 +531 88 0 days 00:00:00.111482080 +531 89 0 days 00:00:00.132427015 +531 95 0 days 00:00:00.155867448 +531 97 0 days 00:00:00.115925757 +532 2 0 days 00:00:00.068156996 +532 3 0 days 00:00:00.073924461 +532 4 0 days 00:00:00.091447670 +532 5 0 days 00:00:00.072948311 +532 6 0 days 00:00:00.072336158 +532 7 0 days 00:00:00.063848141 +532 9 0 days 00:00:00.074395535 +532 13 0 days 00:00:00.091562308 +532 14 0 days 00:00:00.094908592 +532 16 0 days 00:00:00.063387835 +532 17 0 days 00:00:00.070600670 +532 18 0 days 00:00:00.066138993 +532 19 0 days 00:00:00.064085066 +532 20 0 days 00:00:00.071512007 +532 22 0 days 00:00:00.071523340 +532 23 0 days 00:00:00.073456587 +532 24 0 days 00:00:00.070567225 +532 26 0 days 00:00:00.073566584 +532 27 0 days 00:00:00.074202816 +532 28 0 days 00:00:00.072431870 +532 30 0 days 00:00:00.059864793 +532 31 0 days 00:00:00.066196823 +532 32 0 days 00:00:00.072674110 +532 34 0 days 00:00:00.062849294 +532 37 0 days 00:00:00.073045429 +532 38 0 days 00:00:00.063885969 +532 39 0 days 00:00:00.073709662 +532 41 0 days 00:00:00.072821149 +532 42 0 days 00:00:00.074140690 +532 44 0 days 00:00:00.094716280 +532 46 0 days 00:00:00.073723302 +532 47 0 days 00:00:00.068733270 +532 48 0 days 00:00:00.093891980 +532 49 0 days 00:00:00.065692722 +532 50 0 days 00:00:00.085115028 +532 53 0 days 00:00:00.089244702 +532 55 0 days 00:00:00.068572166 +532 56 0 days 00:00:00.055708620 +532 60 0 days 00:00:00.061326064 +532 61 0 days 00:00:00.065209807 +532 63 0 days 00:00:00.062853820 +532 64 0 days 00:00:00.062937428 +532 65 0 days 00:00:00.065487262 +532 67 0 days 00:00:00.060736210 +532 68 0 days 00:00:00.087053446 +532 70 0 days 00:00:00.064774880 +532 71 0 days 00:00:00.066491174 +532 72 0 days 00:00:00.064145545 +532 73 0 days 00:00:00.063570401 +532 74 0 days 00:00:00.094457158 +532 77 0 days 00:00:00.062227792 +532 79 0 days 00:00:00.062150406 +532 82 0 days 00:00:00.062727227 +532 83 0 days 00:00:00.061144252 +532 84 0 days 00:00:00.093731613 +532 85 0 days 00:00:00.066129220 +532 86 0 days 00:00:00.094781091 +532 87 0 days 00:00:00.064827692 +532 88 0 days 00:00:00.094149533 +532 89 0 days 00:00:00.090687712 +532 90 0 days 00:00:00.083113385 +532 91 0 days 00:00:00.073921986 +532 92 0 days 00:00:00.094266109 +532 93 0 days 00:00:00.062405536 +532 96 0 days 00:00:00.063960755 +532 98 0 days 00:00:00.072687684 +532 100 0 days 00:00:00.093489050 +533 1 0 days 00:00:00.166786780 +533 2 0 days 00:00:00.103287846 +533 3 0 days 00:00:00.143497995 +533 4 0 days 00:00:00.099437880 +533 5 0 days 00:00:00.127844377 +533 6 0 days 00:00:00.133075260 +533 7 0 days 00:00:00.108829335 +533 8 0 days 00:00:00.150237050 +533 9 0 days 00:00:00.101999550 +533 10 0 days 00:00:00.107106804 +533 11 0 days 00:00:00.119548110 +533 12 0 days 00:00:00.113849831 +533 13 0 days 00:00:00.172753091 +533 15 0 days 00:00:00.136452645 +533 16 0 days 00:00:00.112870127 +533 17 0 days 00:00:00.099182393 +533 18 0 days 00:00:00.102906826 +533 19 0 days 00:00:00.099357275 +533 20 0 days 00:00:00.106922205 +533 21 0 days 00:00:00.179507165 +533 22 0 days 00:00:00.099997365 +533 23 0 days 00:00:00.116004085 +533 24 0 days 00:00:00.166720916 +533 25 0 days 00:00:00.116111180 +533 26 0 days 00:00:00.158744806 +533 27 0 days 00:00:00.099246195 +533 28 0 days 00:00:00.100003140 +533 29 0 days 00:00:00.122510968 +533 30 0 days 00:00:00.108220445 +533 31 0 days 00:00:00.134772920 +533 32 0 days 00:00:00.155205760 +533 33 0 days 00:00:00.108950080 +533 34 0 days 00:00:00.099346055 +533 35 0 days 00:00:00.113983340 +533 36 0 days 00:00:00.109432076 +533 37 0 days 00:00:00.179178478 +533 38 0 days 00:00:00.107687260 +533 39 0 days 00:00:00.102112420 +533 40 0 days 00:00:00.097831910 +533 41 0 days 00:00:00.100385685 +533 42 0 days 00:00:00.101824420 +533 43 0 days 00:00:00.119772902 +533 44 0 days 00:00:00.145449665 +533 45 0 days 00:00:00.114575490 +533 46 0 days 00:00:00.146352800 +533 47 0 days 00:00:00.123192850 +533 48 0 days 00:00:00.121131025 +533 49 0 days 00:00:00.115305580 +533 50 0 days 00:00:00.102848213 +533 51 0 days 00:00:00.124586392 +533 52 0 days 00:00:00.100844805 +533 53 0 days 00:00:00.098903880 +533 54 0 days 00:00:00.108727660 +533 55 0 days 00:00:00.144260905 +533 56 0 days 00:00:00.088277246 +533 57 0 days 00:00:00.098313213 +533 58 0 days 00:00:00.144736029 +533 59 0 days 00:00:00.117719670 +533 60 0 days 00:00:00.146229340 +533 61 0 days 00:00:00.109719370 +533 62 0 days 00:00:00.117884175 +533 64 0 days 00:00:00.103443875 +533 65 0 days 00:00:00.124354870 +533 66 0 days 00:00:00.102307340 +533 67 0 days 00:00:00.146431805 +533 68 0 days 00:00:00.098421580 +533 69 0 days 00:00:00.098109905 +533 70 0 days 00:00:00.089480800 +533 71 0 days 00:00:00.167801304 +533 72 0 days 00:00:00.126025888 +533 73 0 days 00:00:00.095456006 +533 74 0 days 00:00:00.143371693 +533 75 0 days 00:00:00.137360928 +533 76 0 days 00:00:00.111616353 +533 77 0 days 00:00:00.167446491 +533 78 0 days 00:00:00.147577670 +533 79 0 days 00:00:00.103460620 +533 80 0 days 00:00:00.109631395 +533 81 0 days 00:00:00.108691744 +533 82 0 days 00:00:00.126844372 +533 83 0 days 00:00:00.113010820 +533 84 0 days 00:00:00.118082605 +533 85 0 days 00:00:00.119485935 +533 87 0 days 00:00:00.120842480 +533 88 0 days 00:00:00.150989805 +533 89 0 days 00:00:00.180602575 +533 90 0 days 00:00:00.102086520 +533 91 0 days 00:00:00.104275915 +533 92 0 days 00:00:00.151071755 +533 93 0 days 00:00:00.121309780 +533 94 0 days 00:00:00.153845600 +533 95 0 days 00:00:00.113586240 +533 96 0 days 00:00:00.114669926 +533 97 0 days 00:00:00.136941856 +533 98 0 days 00:00:00.120326986 +533 99 0 days 00:00:00.105488180 +533 100 0 days 00:00:00.106640950 +534 1 0 days 00:00:00.132125705 +534 2 0 days 00:00:00.168279965 +534 3 0 days 00:00:00.182234416 +534 4 0 days 00:00:00.095525866 +534 5 0 days 00:00:00.116841348 +534 6 0 days 00:00:00.125810276 +534 7 0 days 00:00:00.153099908 +534 8 0 days 00:00:00.128752850 +534 9 0 days 00:00:00.161907535 +534 10 0 days 00:00:00.195300002 +534 11 0 days 00:00:00.136405850 +534 13 0 days 00:00:00.157632350 +534 14 0 days 00:00:00.148886156 +534 15 0 days 00:00:00.129929049 +534 16 0 days 00:00:00.107034945 +534 18 0 days 00:00:00.144115635 +534 20 0 days 00:00:00.110693676 +534 21 0 days 00:00:00.140562693 +534 22 0 days 00:00:00.156031505 +534 23 0 days 00:00:00.158021690 +534 24 0 days 00:00:00.138222346 +534 26 0 days 00:00:00.110045760 +534 27 0 days 00:00:00.126881077 +534 29 0 days 00:00:00.194392675 +534 31 0 days 00:00:00.130963060 +534 32 0 days 00:00:00.104575825 +534 34 0 days 00:00:00.128984111 +534 35 0 days 00:00:00.141134291 +534 36 0 days 00:00:00.161402015 +534 38 0 days 00:00:00.117520584 +534 39 0 days 00:00:00.123313940 +534 40 0 days 00:00:00.182860005 +534 41 0 days 00:00:00.123398850 +534 42 0 days 00:00:00.122149109 +534 44 0 days 00:00:00.160518700 +534 45 0 days 00:00:00.131435408 +534 46 0 days 00:00:00.182794054 +534 47 0 days 00:00:00.092697013 +534 48 0 days 00:00:00.185366314 +534 49 0 days 00:00:00.117300875 +534 53 0 days 00:00:00.138447566 +534 54 0 days 00:00:00.130833085 +534 55 0 days 00:00:00.123648716 +534 56 0 days 00:00:00.118924044 +534 57 0 days 00:00:00.117151088 +534 60 0 days 00:00:00.120489933 +534 61 0 days 00:00:00.143854363 +534 62 0 days 00:00:00.120573576 +534 63 0 days 00:00:00.144357997 +534 64 0 days 00:00:00.117818146 +534 65 0 days 00:00:00.134725666 +534 66 0 days 00:00:00.124754670 +534 67 0 days 00:00:00.110294435 +534 69 0 days 00:00:00.125249836 +534 70 0 days 00:00:00.171534105 +534 71 0 days 00:00:00.134433253 +534 73 0 days 00:00:00.180543843 +534 74 0 days 00:00:00.180058593 +534 78 0 days 00:00:00.121265226 +534 79 0 days 00:00:00.145802185 +534 80 0 days 00:00:00.178975270 +534 82 0 days 00:00:00.151672820 +534 83 0 days 00:00:00.111820105 +534 84 0 days 00:00:00.093173866 +534 85 0 days 00:00:00.138936716 +534 86 0 days 00:00:00.147699350 +534 87 0 days 00:00:00.120691745 +534 88 0 days 00:00:00.143059049 +534 89 0 days 00:00:00.109668413 +534 90 0 days 00:00:00.153836004 +534 91 0 days 00:00:00.111735990 +534 92 0 days 00:00:00.118816010 +534 94 0 days 00:00:00.153951124 +534 95 0 days 00:00:00.164858217 +534 96 0 days 00:00:00.140602731 +534 97 0 days 00:00:00.141643854 +534 98 0 days 00:00:00.117355975 +534 99 0 days 00:00:00.166526897 +534 100 0 days 00:00:00.125364360 +535 1 0 days 00:00:00.086942040 +535 2 0 days 00:00:00.078998390 +535 3 0 days 00:00:00.072341520 +535 4 0 days 00:00:00.051220173 +535 5 0 days 00:00:00.057981713 +535 6 0 days 00:00:00.066536410 +535 7 0 days 00:00:00.080047495 +535 8 0 days 00:00:00.071471040 +535 9 0 days 00:00:00.055643093 +535 10 0 days 00:00:00.059537880 +535 11 0 days 00:00:00.094391345 +535 12 0 days 00:00:00.064702560 +535 13 0 days 00:00:00.059955033 +535 14 0 days 00:00:00.071595260 +535 15 0 days 00:00:00.055316466 +535 16 0 days 00:00:00.079403020 +535 17 0 days 00:00:00.050562073 +535 18 0 days 00:00:00.058547526 +535 19 0 days 00:00:00.062995440 +535 20 0 days 00:00:00.071141080 +535 21 0 days 00:00:00.055695460 +535 22 0 days 00:00:00.072265553 +535 23 0 days 00:00:00.050275680 +535 24 0 days 00:00:00.078523710 +535 25 0 days 00:00:00.055744046 +535 26 0 days 00:00:00.073150886 +535 27 0 days 00:00:00.065245470 +535 28 0 days 00:00:00.072188460 +535 29 0 days 00:00:00.056824580 +535 30 0 days 00:00:00.064112280 +535 31 0 days 00:00:00.060611560 +535 32 0 days 00:00:00.055658070 +535 33 0 days 00:00:00.057707013 +535 34 0 days 00:00:00.080522370 +535 35 0 days 00:00:00.061344533 +535 36 0 days 00:00:00.051156040 +535 37 0 days 00:00:00.061538640 +535 38 0 days 00:00:00.073478206 +535 39 0 days 00:00:00.082493056 +535 41 0 days 00:00:00.060846686 +535 42 0 days 00:00:00.079239705 +535 43 0 days 00:00:00.091654624 +535 44 0 days 00:00:00.064304996 +535 45 0 days 00:00:00.057155426 +535 46 0 days 00:00:00.074666553 +535 47 0 days 00:00:00.056781100 +535 48 0 days 00:00:00.060880773 +535 49 0 days 00:00:00.079188115 +535 50 0 days 00:00:00.060305446 +535 51 0 days 00:00:00.063928610 +535 52 0 days 00:00:00.079962100 +535 53 0 days 00:00:00.061105185 +535 54 0 days 00:00:00.069175668 +535 55 0 days 00:00:00.059637053 +535 56 0 days 00:00:00.052905526 +535 57 0 days 00:00:00.083493084 +535 58 0 days 00:00:00.079935540 +535 59 0 days 00:00:00.080235120 +535 60 0 days 00:00:00.072386573 +535 61 0 days 00:00:00.064660220 +535 62 0 days 00:00:00.058358186 +535 63 0 days 00:00:00.085075832 +535 64 0 days 00:00:00.073512611 +535 65 0 days 00:00:00.094116833 +535 66 0 days 00:00:00.055590535 +535 67 0 days 00:00:00.062372800 +535 68 0 days 00:00:00.079003170 +535 69 0 days 00:00:00.053944340 +535 70 0 days 00:00:00.051073920 +535 71 0 days 00:00:00.060475613 +535 72 0 days 00:00:00.049990840 +535 73 0 days 00:00:00.079337820 +535 74 0 days 00:00:00.051062493 +535 75 0 days 00:00:00.059708510 +535 76 0 days 00:00:00.065972196 +535 77 0 days 00:00:00.060187360 +535 78 0 days 00:00:00.072221326 +535 79 0 days 00:00:00.087281013 +535 80 0 days 00:00:00.052474980 +535 81 0 days 00:00:00.061203733 +535 82 0 days 00:00:00.058689986 +535 83 0 days 00:00:00.063856165 +535 84 0 days 00:00:00.066604216 +535 85 0 days 00:00:00.079751760 +535 86 0 days 00:00:00.069426080 +535 87 0 days 00:00:00.073114566 +535 88 0 days 00:00:00.063267620 +535 89 0 days 00:00:00.068023882 +535 90 0 days 00:00:00.059389933 +535 91 0 days 00:00:00.065988800 +535 92 0 days 00:00:00.055650746 +535 93 0 days 00:00:00.050836846 +535 94 0 days 00:00:00.095218440 +535 95 0 days 00:00:00.058062465 +535 96 0 days 00:00:00.089679140 +535 97 0 days 00:00:00.074120073 +535 98 0 days 00:00:00.064431505 +535 99 0 days 00:00:00.072811206 +536 1 0 days 00:00:00.056343913 +536 2 0 days 00:00:00.090093296 +536 3 0 days 00:00:00.083124945 +536 4 0 days 00:00:00.066044178 +536 5 0 days 00:00:00.077518142 +536 7 0 days 00:00:00.052091920 +536 9 0 days 00:00:00.067492195 +536 10 0 days 00:00:00.084625435 +536 11 0 days 00:00:00.075609146 +536 13 0 days 00:00:00.076789318 +536 15 0 days 00:00:00.065386624 +536 16 0 days 00:00:00.083985600 +536 17 0 days 00:00:00.067701053 +536 18 0 days 00:00:00.065664448 +536 22 0 days 00:00:00.078191645 +536 23 0 days 00:00:00.086920025 +536 24 0 days 00:00:00.077414564 +536 26 0 days 00:00:00.083411785 +536 27 0 days 00:00:00.061653995 +536 28 0 days 00:00:00.086127364 +536 29 0 days 00:00:00.076531916 +536 30 0 days 00:00:00.078334514 +536 31 0 days 00:00:00.074943043 +536 33 0 days 00:00:00.090954220 +536 34 0 days 00:00:00.057259935 +536 35 0 days 00:00:00.069108125 +536 36 0 days 00:00:00.087779916 +536 37 0 days 00:00:00.096008600 +536 38 0 days 00:00:00.067704405 +536 40 0 days 00:00:00.083915355 +536 42 0 days 00:00:00.084378315 +536 43 0 days 00:00:00.068239650 +536 44 0 days 00:00:00.095698064 +536 45 0 days 00:00:00.100724158 +536 46 0 days 00:00:00.075487566 +536 47 0 days 00:00:00.069076905 +536 48 0 days 00:00:00.068771577 +536 49 0 days 00:00:00.078366982 +536 50 0 days 00:00:00.099311892 +536 51 0 days 00:00:00.083932630 +536 52 0 days 00:00:00.066477500 +536 53 0 days 00:00:00.059413553 +536 54 0 days 00:00:00.063992490 +536 55 0 days 00:00:00.092114377 +536 56 0 days 00:00:00.083235740 +536 58 0 days 00:00:00.075484400 +536 59 0 days 00:00:00.069677691 +536 60 0 days 00:00:00.072787744 +536 61 0 days 00:00:00.066258701 +536 63 0 days 00:00:00.061940485 +536 64 0 days 00:00:00.075186186 +536 65 0 days 00:00:00.066701223 +536 66 0 days 00:00:00.077875041 +536 67 0 days 00:00:00.090264828 +536 68 0 days 00:00:00.100295581 +536 69 0 days 00:00:00.077225000 +536 70 0 days 00:00:00.091954950 +536 73 0 days 00:00:00.067893364 +536 74 0 days 00:00:00.070387392 +536 75 0 days 00:00:00.065952142 +536 77 0 days 00:00:00.068845494 +536 78 0 days 00:00:00.069395185 +536 81 0 days 00:00:00.062089880 +536 82 0 days 00:00:00.100041456 +536 83 0 days 00:00:00.067416983 +536 84 0 days 00:00:00.067871185 +536 85 0 days 00:00:00.092526496 +536 86 0 days 00:00:00.092060323 +536 87 0 days 00:00:00.100480412 +536 89 0 days 00:00:00.061943050 +536 90 0 days 00:00:00.068410321 +536 91 0 days 00:00:00.090982783 +536 94 0 days 00:00:00.084488205 +536 95 0 days 00:00:00.078573824 +536 97 0 days 00:00:00.057988735 +536 99 0 days 00:00:00.069708545 +536 100 0 days 00:00:00.069377891 +537 1 0 days 00:00:00.145336340 +537 2 0 days 00:00:00.115990705 +537 4 0 days 00:00:00.125909053 +537 5 0 days 00:00:00.134777217 +537 6 0 days 00:00:00.164113112 +537 7 0 days 00:00:00.124141768 +537 8 0 days 00:00:00.139945174 +537 9 0 days 00:00:00.116287920 +537 10 0 days 00:00:00.159055295 +537 11 0 days 00:00:00.183311690 +537 12 0 days 00:00:00.120950406 +537 13 0 days 00:00:00.108543128 +537 14 0 days 00:00:00.173419802 +537 15 0 days 00:00:00.122932438 +537 16 0 days 00:00:00.118993125 +537 17 0 days 00:00:00.107787910 +537 18 0 days 00:00:00.116092610 +537 20 0 days 00:00:00.127873486 +537 21 0 days 00:00:00.120397186 +537 22 0 days 00:00:00.157042732 +537 24 0 days 00:00:00.128238528 +537 25 0 days 00:00:00.117604195 +537 26 0 days 00:00:00.123350908 +537 27 0 days 00:00:00.163197496 +537 28 0 days 00:00:00.175349562 +537 30 0 days 00:00:00.125225678 +537 31 0 days 00:00:00.145229267 +537 32 0 days 00:00:00.114320931 +537 33 0 days 00:00:00.144546070 +537 34 0 days 00:00:00.138365011 +537 35 0 days 00:00:00.117197649 +537 36 0 days 00:00:00.149728772 +537 37 0 days 00:00:00.130821518 +537 39 0 days 00:00:00.112649340 +537 40 0 days 00:00:00.170972751 +537 41 0 days 00:00:00.131354840 +537 43 0 days 00:00:00.143491020 +537 45 0 days 00:00:00.123448922 +537 48 0 days 00:00:00.184789333 +537 49 0 days 00:00:00.129480977 +537 51 0 days 00:00:00.146650505 +537 52 0 days 00:00:00.132183374 +537 53 0 days 00:00:00.163013095 +537 56 0 days 00:00:00.133279006 +537 57 0 days 00:00:00.142850070 +537 58 0 days 00:00:00.120917980 +537 59 0 days 00:00:00.115764888 +537 60 0 days 00:00:00.116383974 +537 61 0 days 00:00:00.141460589 +537 62 0 days 00:00:00.148105524 +537 63 0 days 00:00:00.139338654 +537 64 0 days 00:00:00.119140764 +537 65 0 days 00:00:00.172257170 +537 66 0 days 00:00:00.176761933 +537 67 0 days 00:00:00.125161400 +537 69 0 days 00:00:00.174894183 +537 70 0 days 00:00:00.116841310 +537 72 0 days 00:00:00.115477525 +537 73 0 days 00:00:00.137491009 +537 74 0 days 00:00:00.108853992 +537 75 0 days 00:00:00.125971210 +537 76 0 days 00:00:00.142745972 +537 77 0 days 00:00:00.121229390 +537 79 0 days 00:00:00.106799555 +537 80 0 days 00:00:00.130122616 +537 81 0 days 00:00:00.148101710 +537 83 0 days 00:00:00.106309996 +537 84 0 days 00:00:00.090917965 +537 85 0 days 00:00:00.122771102 +537 86 0 days 00:00:00.158435500 +537 87 0 days 00:00:00.164298634 +537 88 0 days 00:00:00.114063650 +537 89 0 days 00:00:00.146426052 +537 90 0 days 00:00:00.107499480 +537 91 0 days 00:00:00.107301994 +537 92 0 days 00:00:00.124003442 +537 93 0 days 00:00:00.139896780 +537 94 0 days 00:00:00.157929592 +537 95 0 days 00:00:00.165251574 +537 96 0 days 00:00:00.151765146 +537 99 0 days 00:00:00.152666722 +537 100 0 days 00:00:00.121194926 +538 2 0 days 00:00:00.065563338 +538 5 0 days 00:00:00.072222785 +538 6 0 days 00:00:00.068196197 +538 7 0 days 00:00:00.095153290 +538 9 0 days 00:00:00.075067448 +538 10 0 days 00:00:00.088170473 +538 11 0 days 00:00:00.068513830 +538 13 0 days 00:00:00.095372947 +538 14 0 days 00:00:00.060514836 +538 15 0 days 00:00:00.066840252 +538 21 0 days 00:00:00.089725657 +538 22 0 days 00:00:00.062551135 +538 24 0 days 00:00:00.061198044 +538 25 0 days 00:00:00.067722003 +538 26 0 days 00:00:00.091211743 +538 27 0 days 00:00:00.065167752 +538 28 0 days 00:00:00.097196887 +538 29 0 days 00:00:00.068961310 +538 30 0 days 00:00:00.090096420 +538 31 0 days 00:00:00.094509915 +538 34 0 days 00:00:00.094190036 +538 35 0 days 00:00:00.067447223 +538 36 0 days 00:00:00.072861192 +538 37 0 days 00:00:00.073055448 +538 38 0 days 00:00:00.076112116 +538 39 0 days 00:00:00.065125212 +538 40 0 days 00:00:00.093133547 +538 41 0 days 00:00:00.065732128 +538 42 0 days 00:00:00.069848617 +538 43 0 days 00:00:00.063210387 +538 45 0 days 00:00:00.079630115 +538 46 0 days 00:00:00.078231421 +538 47 0 days 00:00:00.064881649 +538 49 0 days 00:00:00.078051471 +538 50 0 days 00:00:00.065606840 +538 51 0 days 00:00:00.093218311 +538 53 0 days 00:00:00.071037305 +538 54 0 days 00:00:00.055160230 +538 56 0 days 00:00:00.074580335 +538 58 0 days 00:00:00.083156535 +538 59 0 days 00:00:00.095511136 +538 60 0 days 00:00:00.092316740 +538 62 0 days 00:00:00.050431993 +538 63 0 days 00:00:00.057706410 +538 64 0 days 00:00:00.070304191 +538 66 0 days 00:00:00.083079088 +538 67 0 days 00:00:00.075587102 +538 68 0 days 00:00:00.097380430 +538 69 0 days 00:00:00.059500520 +538 71 0 days 00:00:00.075170961 +538 76 0 days 00:00:00.093547667 +538 77 0 days 00:00:00.083365752 +538 78 0 days 00:00:00.066638424 +538 79 0 days 00:00:00.097668356 +538 80 0 days 00:00:00.075507523 +538 83 0 days 00:00:00.065024200 +538 84 0 days 00:00:00.097563694 +538 85 0 days 00:00:00.092885643 +538 86 0 days 00:00:00.064305825 +538 89 0 days 00:00:00.066528672 +538 91 0 days 00:00:00.069234205 +538 92 0 days 00:00:00.057847480 +538 93 0 days 00:00:00.077182103 +538 94 0 days 00:00:00.077575400 +538 97 0 days 00:00:00.070777411 +538 100 0 days 00:00:00.063334550 +539 1 0 days 00:00:00.800688346 +539 2 0 days 00:00:00.257955706 +539 3 0 days 00:00:01.430102520 +539 4 0 days 00:00:01.248229553 +539 5 0 days 00:00:00.673199386 +539 6 0 days 00:00:02.625740540 +539 7 0 days 00:00:02.159151573 +539 8 0 days 00:00:02.710078620 +539 9 0 days 00:00:06.485897313 +539 10 0 days 00:00:00.587653260 +539 11 0 days 00:00:01.071545080 +539 12 0 days 00:00:01.199890240 +539 13 0 days 00:00:00.783100460 +539 14 0 days 00:00:01.334572336 +539 15 0 days 00:00:01.062219940 +539 16 0 days 00:00:04.185938486 +539 17 0 days 00:00:01.800877360 +539 18 0 days 00:00:01.569715746 +539 19 0 days 00:00:02.169536556 +539 20 0 days 00:00:02.088584000 +539 21 0 days 00:00:04.777538100 +539 22 0 days 00:00:00.734244146 +539 23 0 days 00:00:00.807962973 +539 24 0 days 00:00:01.214220480 +539 25 0 days 00:00:00.494834726 +539 26 0 days 00:00:00.953568820 +539 27 0 days 00:00:01.800590686 +539 28 0 days 00:00:02.328643813 +539 29 0 days 00:00:00.449483972 +539 30 0 days 00:00:00.389737300 +539 31 0 days 00:00:01.309333840 +539 32 0 days 00:00:00.450372520 +539 33 0 days 00:00:03.735546160 +539 34 0 days 00:00:00.305343160 +539 35 0 days 00:00:02.075161860 +539 36 0 days 00:00:00.619778260 +539 37 0 days 00:00:00.365934526 +539 38 0 days 00:00:03.875539860 +539 39 0 days 00:00:03.495933565 +539 40 0 days 00:00:00.569257860 +539 41 0 days 00:00:01.094096866 +539 42 0 days 00:00:02.434742113 +539 43 0 days 00:00:00.644329486 +539 44 0 days 00:00:00.393843368 +539 45 0 days 00:00:00.905352066 +539 47 0 days 00:00:02.715583786 +539 48 0 days 00:00:00.357743527 +539 49 0 days 00:00:00.314077626 +539 50 0 days 00:00:01.116344780 +539 51 0 days 00:00:00.495117400 +539 52 0 days 00:00:00.658193766 +539 53 0 days 00:00:00.475775033 +539 54 0 days 00:00:00.626552760 +539 55 0 days 00:00:00.627349173 +539 56 0 days 00:00:03.863502540 +539 57 0 days 00:00:01.048723006 +539 58 0 days 00:00:03.998004066 +539 59 0 days 00:00:04.396054700 +539 60 0 days 00:00:00.868332936 +539 61 0 days 00:00:00.840863320 +539 62 0 days 00:00:00.516426066 +539 63 0 days 00:00:01.386086773 +539 64 0 days 00:00:03.333143106 +539 65 0 days 00:00:01.699844736 +539 66 0 days 00:00:01.964388646 +539 67 0 days 00:00:00.373518275 +539 68 0 days 00:00:00.668041333 +539 69 0 days 00:00:00.428952566 +539 70 0 days 00:00:03.399283612 +539 71 0 days 00:00:00.394840086 +539 72 0 days 00:00:00.966119996 +539 73 0 days 00:00:00.392318026 +539 74 0 days 00:00:00.527123752 +539 75 0 days 00:00:01.063348200 +539 76 0 days 00:00:00.618887606 +539 77 0 days 00:00:00.349760640 +539 78 0 days 00:00:00.687217500 +539 79 0 days 00:00:00.180828593 +539 80 0 days 00:00:00.423350720 +539 81 0 days 00:00:00.347401020 +539 82 0 days 00:00:02.977924626 +539 83 0 days 00:00:00.275710246 +539 84 0 days 00:00:01.134285406 +539 85 0 days 00:00:04.152803446 +539 86 0 days 00:00:00.667449400 +539 87 0 days 00:00:00.638456506 +539 88 0 days 00:00:00.760760606 +539 89 0 days 00:00:01.668252306 +539 90 0 days 00:00:00.473853173 +539 91 0 days 00:00:00.373306260 +539 92 0 days 00:00:00.252695306 +539 93 0 days 00:00:00.936005113 +539 94 0 days 00:00:00.651419486 +539 95 0 days 00:00:02.184650126 +539 96 0 days 00:00:00.398433386 +539 97 0 days 00:00:01.082866200 +539 98 0 days 00:00:01.185801473 +539 99 0 days 00:00:00.400271520 +539 100 0 days 00:00:00.655332293 +540 1 0 days 00:00:00.236727196 +540 2 0 days 00:00:01.956207126 +540 3 0 days 00:00:00.261288665 +540 4 0 days 00:00:00.723808838 +540 5 0 days 00:00:00.667135460 +540 6 0 days 00:00:00.735958758 +540 7 0 days 00:00:00.751758075 +540 8 0 days 00:00:00.831041680 +540 9 0 days 00:00:00.686390052 +540 10 0 days 00:00:00.463747725 +540 11 0 days 00:00:00.560182971 +540 12 0 days 00:00:00.607434690 +540 13 0 days 00:00:00.830952010 +540 16 0 days 00:00:00.709869380 +540 18 0 days 00:00:00.502620405 +540 19 0 days 00:00:01.144687590 +540 20 0 days 00:00:00.497926388 +540 21 0 days 00:00:02.393214644 +540 22 0 days 00:00:00.825353140 +540 23 0 days 00:00:03.806972696 +540 26 0 days 00:00:00.847597185 +540 27 0 days 00:00:01.632506183 +540 28 0 days 00:00:00.496478305 +540 29 0 days 00:00:02.831623764 +540 30 0 days 00:00:00.839793390 +540 31 0 days 00:00:01.442126915 +540 32 0 days 00:00:01.116197970 +540 33 0 days 00:00:00.553477586 +540 34 0 days 00:00:05.687222180 +540 35 0 days 00:00:00.512105880 +540 36 0 days 00:00:00.956724417 +540 37 0 days 00:00:05.227098192 +540 39 0 days 00:00:03.893442035 +540 40 0 days 00:00:00.494479462 +540 41 0 days 00:00:03.977152388 +540 42 0 days 00:00:00.787762461 +540 43 0 days 00:00:00.446116186 +540 44 0 days 00:00:00.583869916 +540 45 0 days 00:00:00.897975586 +540 46 0 days 00:00:00.461910608 +540 47 0 days 00:00:00.642241398 +540 48 0 days 00:00:01.761310247 +540 49 0 days 00:00:00.364497400 +540 50 0 days 00:00:05.737185712 +540 51 0 days 00:00:01.285798244 +540 52 0 days 00:00:00.663780112 +540 53 0 days 00:00:00.887331681 +540 54 0 days 00:00:00.460340872 +540 55 0 days 00:00:01.774168597 +540 56 0 days 00:00:00.310754995 +540 57 0 days 00:00:00.890867500 +540 59 0 days 00:00:00.660855325 +540 60 0 days 00:00:00.524739555 +540 61 0 days 00:00:00.339730560 +540 62 0 days 00:00:05.436434754 +540 63 0 days 00:00:01.096260508 +540 64 0 days 00:00:00.337505603 +540 65 0 days 00:00:00.628298326 +540 66 0 days 00:00:00.381016142 +540 67 0 days 00:00:01.813966530 +540 68 0 days 00:00:03.586965317 +540 69 0 days 00:00:01.125467395 +540 70 0 days 00:00:00.383733945 +540 71 0 days 00:00:00.252587675 +540 72 0 days 00:00:02.644524406 +540 73 0 days 00:00:01.921759476 +540 75 0 days 00:00:00.563654776 +540 76 0 days 00:00:00.874078725 +540 77 0 days 00:00:00.394862770 +540 78 0 days 00:00:01.351409435 +540 79 0 days 00:00:01.747191780 +540 80 0 days 00:00:01.104399052 +540 81 0 days 00:00:00.653393905 +540 82 0 days 00:00:05.616644229 +540 83 0 days 00:00:00.654013307 +540 84 0 days 00:00:00.502235471 +540 86 0 days 00:00:00.331621720 +540 88 0 days 00:00:00.911246997 +540 89 0 days 00:00:01.091033704 +540 90 0 days 00:00:01.306114135 +540 91 0 days 00:00:00.208991600 +540 92 0 days 00:00:04.052584104 +540 93 0 days 00:00:00.507324816 +540 94 0 days 00:00:00.849190604 +540 95 0 days 00:00:00.230880435 +540 96 0 days 00:00:00.910855450 +540 97 0 days 00:00:00.313892540 +540 98 0 days 00:00:03.452077412 +540 99 0 days 00:00:03.585387415 +540 100 0 days 00:00:04.440675351 +541 1 0 days 00:00:00.496816966 +541 2 0 days 00:00:00.317568200 +541 3 0 days 00:00:00.398826246 +541 4 0 days 00:00:00.353816845 +541 5 0 days 00:00:00.331229508 +541 6 0 days 00:00:00.407431833 +541 7 0 days 00:00:00.182674853 +541 8 0 days 00:00:00.453024946 +541 9 0 days 00:00:00.114745740 +541 10 0 days 00:00:00.115695293 +541 11 0 days 00:00:00.605629906 +541 12 0 days 00:00:01.273168666 +541 13 0 days 00:00:02.046828946 +541 14 0 days 00:00:01.695447900 +541 15 0 days 00:00:00.304597373 +541 16 0 days 00:00:00.186293280 +541 17 0 days 00:00:01.138764520 +541 18 0 days 00:00:00.322582806 +541 19 0 days 00:00:00.463733712 +541 20 0 days 00:00:00.328944833 +541 21 0 days 00:00:01.095144633 +541 22 0 days 00:00:01.107832500 +541 23 0 days 00:00:00.261643693 +541 24 0 days 00:00:00.476710216 +541 25 0 days 00:00:01.295609233 +541 26 0 days 00:00:00.389407393 +541 27 0 days 00:00:02.101390020 +541 28 0 days 00:00:00.205390473 +541 29 0 days 00:00:00.493432846 +541 30 0 days 00:00:01.697780026 +541 31 0 days 00:00:00.117946346 +541 32 0 days 00:00:02.335274720 +541 33 0 days 00:00:02.138671733 +541 34 0 days 00:00:00.140105452 +541 35 0 days 00:00:00.167792813 +541 36 0 days 00:00:00.276375420 +541 37 0 days 00:00:00.147190453 +541 38 0 days 00:00:01.963989160 +541 39 0 days 00:00:01.519572340 +541 40 0 days 00:00:00.272905720 +541 41 0 days 00:00:00.651171340 +541 42 0 days 00:00:00.842874895 +541 43 0 days 00:00:00.610995973 +541 44 0 days 00:00:00.139347986 +541 45 0 days 00:00:00.550664100 +541 46 0 days 00:00:00.272036373 +541 47 0 days 00:00:00.390963023 +541 48 0 days 00:00:00.152912985 +541 49 0 days 00:00:01.889745320 +541 50 0 days 00:00:00.411486980 +541 51 0 days 00:00:00.109962451 +541 52 0 days 00:00:00.120834789 +541 53 0 days 00:00:00.649329573 +541 54 0 days 00:00:00.328349326 +541 55 0 days 00:00:00.554675053 +541 56 0 days 00:00:00.270933646 +541 57 0 days 00:00:00.296975166 +541 58 0 days 00:00:01.809069440 +541 59 0 days 00:00:00.220058746 +541 60 0 days 00:00:00.179699760 +541 61 0 days 00:00:00.589378310 +541 62 0 days 00:00:00.225545046 +541 63 0 days 00:00:01.366477073 +541 65 0 days 00:00:01.643297800 +541 66 0 days 00:00:00.200969644 +541 67 0 days 00:00:00.248399960 +541 68 0 days 00:00:00.265563993 +541 69 0 days 00:00:00.293898226 +541 70 0 days 00:00:00.208190093 +541 71 0 days 00:00:00.324567720 +541 72 0 days 00:00:00.207948080 +541 73 0 days 00:00:01.958097653 +541 74 0 days 00:00:00.269691633 +541 75 0 days 00:00:00.215228433 +541 76 0 days 00:00:00.300283900 +541 77 0 days 00:00:02.039887493 +541 78 0 days 00:00:00.558700953 +541 79 0 days 00:00:00.123356960 +541 80 0 days 00:00:00.465484926 +541 81 0 days 00:00:01.807927480 +541 82 0 days 00:00:00.176690560 +541 83 0 days 00:00:00.409354600 +541 84 0 days 00:00:00.157828855 +541 85 0 days 00:00:00.316380740 +541 86 0 days 00:00:00.423350520 +541 87 0 days 00:00:00.303341480 +541 88 0 days 00:00:00.117123826 +541 89 0 days 00:00:00.260699760 +541 90 0 days 00:00:00.204683013 +541 91 0 days 00:00:00.132024510 +541 92 0 days 00:00:00.228639460 +541 93 0 days 00:00:00.547460273 +541 94 0 days 00:00:00.437532604 +541 95 0 days 00:00:00.321522546 +541 96 0 days 00:00:00.170712906 +541 97 0 days 00:00:00.268891740 +541 98 0 days 00:00:00.486029880 +541 99 0 days 00:00:00.549896173 +541 100 0 days 00:00:00.664985393 +542 1 0 days 00:00:01.487433150 +542 2 0 days 00:00:00.163254075 +542 3 0 days 00:00:00.358881305 +542 4 0 days 00:00:01.635999494 +542 5 0 days 00:00:00.450035847 +542 6 0 days 00:00:00.538460455 +542 7 0 days 00:00:00.154547968 +542 9 0 days 00:00:00.279791200 +542 10 0 days 00:00:00.273463800 +542 13 0 days 00:00:00.326243610 +542 14 0 days 00:00:02.403384378 +542 15 0 days 00:00:00.575701190 +542 16 0 days 00:00:00.286873233 +542 17 0 days 00:00:00.305780317 +542 18 0 days 00:00:00.841439327 +542 20 0 days 00:00:02.437893410 +542 21 0 days 00:00:00.478113536 +542 22 0 days 00:00:02.272360254 +542 23 0 days 00:00:00.214639787 +542 24 0 days 00:00:00.251093697 +542 25 0 days 00:00:00.384765572 +542 26 0 days 00:00:00.210596412 +542 27 0 days 00:00:00.314807933 +542 30 0 days 00:00:00.714865067 +542 31 0 days 00:00:00.235660248 +542 32 0 days 00:00:00.732568568 +542 35 0 days 00:00:00.337186946 +542 36 0 days 00:00:00.676296280 +542 37 0 days 00:00:00.451275626 +542 38 0 days 00:00:00.189320928 +542 39 0 days 00:00:00.264603422 +542 40 0 days 00:00:00.732304313 +542 41 0 days 00:00:01.484658960 +542 42 0 days 00:00:00.465996371 +542 43 0 days 00:00:00.595748600 +542 44 0 days 00:00:00.508864847 +542 46 0 days 00:00:02.537660882 +542 47 0 days 00:00:01.682111804 +542 49 0 days 00:00:00.814417385 +542 50 0 days 00:00:00.941834960 +542 51 0 days 00:00:02.505833463 +542 52 0 days 00:00:01.392476840 +542 53 0 days 00:00:02.286616193 +542 54 0 days 00:00:00.248860784 +542 55 0 days 00:00:00.293599458 +542 56 0 days 00:00:00.374551081 +542 58 0 days 00:00:00.250354847 +542 59 0 days 00:00:00.269978690 +542 60 0 days 00:00:00.268889908 +542 61 0 days 00:00:02.856124517 +542 62 0 days 00:00:00.333469357 +542 65 0 days 00:00:02.025545370 +542 66 0 days 00:00:00.732820946 +542 68 0 days 00:00:00.993322688 +542 69 0 days 00:00:00.711663306 +542 70 0 days 00:00:00.133823332 +542 71 0 days 00:00:00.283064926 +542 72 0 days 00:00:00.496252572 +542 73 0 days 00:00:00.467266080 +542 74 0 days 00:00:00.504481580 +542 75 0 days 00:00:00.258771161 +542 76 0 days 00:00:00.263531900 +542 78 0 days 00:00:00.279870284 +542 79 0 days 00:00:01.577260128 +542 80 0 days 00:00:00.208503238 +542 81 0 days 00:00:00.296284925 +542 83 0 days 00:00:00.881167508 +542 84 0 days 00:00:02.266789830 +542 86 0 days 00:00:00.821224876 +542 87 0 days 00:00:01.775456880 +542 88 0 days 00:00:00.790682820 +542 89 0 days 00:00:00.419089356 +542 90 0 days 00:00:01.800902826 +542 92 0 days 00:00:01.735476444 +542 93 0 days 00:00:01.831233100 +542 94 0 days 00:00:01.768475717 +542 96 0 days 00:00:00.227641468 +542 99 0 days 00:00:00.206076591 +542 100 0 days 00:00:01.840663894 +543 1 0 days 00:00:01.519490605 +543 2 0 days 00:00:03.669428413 +543 3 0 days 00:00:08.695325040 +543 4 0 days 00:00:05.398937248 +543 5 0 days 00:00:02.651822088 +543 6 0 days 00:00:02.788751765 +543 7 0 days 00:00:01.020711852 +543 8 0 days 00:00:04.003216975 +543 9 0 days 00:00:00.648377640 +543 10 0 days 00:00:00.870477136 +543 11 0 days 00:00:09.386996490 +543 12 0 days 00:00:02.294814933 +543 13 0 days 00:00:08.554386563 +543 14 0 days 00:00:00.761078984 +543 15 0 days 00:00:04.005795896 +543 16 0 days 00:00:00.544117000 +543 17 0 days 00:00:01.613728504 +543 18 0 days 00:00:02.358848220 +543 19 0 days 00:00:04.767254500 +543 20 0 days 00:00:10.979792480 +543 21 0 days 00:00:01.284234090 +543 22 0 days 00:00:00.584532270 +543 23 0 days 00:00:05.673179930 +543 25 0 days 00:00:00.822834625 +543 26 0 days 00:00:01.115022475 +543 27 0 days 00:00:04.668757102 +543 28 0 days 00:00:01.196513560 +543 29 0 days 00:00:00.774453700 +543 30 0 days 00:00:00.413543804 +543 31 0 days 00:00:00.475801765 +543 32 0 days 00:00:00.478408082 +543 33 0 days 00:00:00.420051352 +543 34 0 days 00:00:00.303867880 +543 35 0 days 00:00:04.914596957 +543 36 0 days 00:00:00.324358576 +543 37 0 days 00:00:03.361593205 +543 38 0 days 00:00:00.481402523 +543 39 0 days 00:00:00.256736300 +543 40 0 days 00:00:00.589018145 +543 41 0 days 00:00:00.779981860 +543 42 0 days 00:00:00.689445282 +543 43 0 days 00:00:01.507698345 +543 44 0 days 00:00:01.524312248 +543 45 0 days 00:00:01.033249200 +543 47 0 days 00:00:01.573297793 +543 48 0 days 00:00:02.163306983 +543 49 0 days 00:00:00.894647120 +543 50 0 days 00:00:00.510947186 +543 52 0 days 00:00:00.909137892 +543 53 0 days 00:00:00.318322977 +543 54 0 days 00:00:00.400246390 +543 55 0 days 00:00:00.824829335 +543 56 0 days 00:00:03.284225028 +543 58 0 days 00:00:03.633858176 +543 59 0 days 00:00:01.545926508 +543 60 0 days 00:00:00.407140470 +543 61 0 days 00:00:01.478419348 +543 62 0 days 00:00:01.271092112 +543 63 0 days 00:00:00.343280500 +543 64 0 days 00:00:00.791891635 +543 65 0 days 00:00:05.555843044 +543 67 0 days 00:00:05.249243045 +543 68 0 days 00:00:03.316525008 +543 69 0 days 00:00:05.231430625 +543 70 0 days 00:00:00.300234396 +543 71 0 days 00:00:00.579711476 +543 72 0 days 00:00:05.343875290 +543 73 0 days 00:00:00.830884654 +543 74 0 days 00:00:00.243140642 +543 75 0 days 00:00:00.476133992 +543 76 0 days 00:00:00.961460503 +543 77 0 days 00:00:00.741651295 +543 78 0 days 00:00:00.400383080 +543 79 0 days 00:00:02.842113426 +543 80 0 days 00:00:03.920859788 +543 81 0 days 00:00:02.643093552 +543 82 0 days 00:00:00.711212016 +543 83 0 days 00:00:00.964094293 +543 84 0 days 00:00:01.431418891 +543 85 0 days 00:00:00.757523773 +543 87 0 days 00:00:00.588002136 +543 88 0 days 00:00:01.436143542 +543 89 0 days 00:00:04.773830735 +543 90 0 days 00:00:01.435091916 +543 91 0 days 00:00:00.443205006 +543 92 0 days 00:00:00.664146045 +543 93 0 days 00:00:00.330112920 +543 94 0 days 00:00:01.359402257 +543 95 0 days 00:00:04.264280658 +543 96 0 days 00:00:00.491374093 +543 97 0 days 00:00:03.970054623 +543 98 0 days 00:00:00.751567216 +543 99 0 days 00:00:00.766421231 +543 100 0 days 00:00:04.550154884 +544 1 0 days 00:00:00.427924938 +544 2 0 days 00:00:02.006793836 +544 3 0 days 00:00:01.816377053 +544 4 0 days 00:00:02.182823006 +544 5 0 days 00:00:00.175022586 +544 6 0 days 00:00:00.113177785 +544 7 0 days 00:00:00.148914112 +544 8 0 days 00:00:00.496709270 +544 9 0 days 00:00:00.565186776 +544 10 0 days 00:00:00.701371800 +544 11 0 days 00:00:00.180730908 +544 12 0 days 00:00:00.231394440 +544 13 0 days 00:00:00.263758462 +544 14 0 days 00:00:00.421324250 +544 15 0 days 00:00:01.629352464 +544 17 0 days 00:00:01.190374468 +544 18 0 days 00:00:01.044835568 +544 20 0 days 00:00:00.484479349 +544 21 0 days 00:00:00.382837790 +544 22 0 days 00:00:00.307704180 +544 23 0 days 00:00:00.608050905 +544 24 0 days 00:00:01.357166951 +544 25 0 days 00:00:00.437808069 +544 27 0 days 00:00:00.876626660 +544 28 0 days 00:00:00.458938427 +544 29 0 days 00:00:00.470528375 +544 30 0 days 00:00:00.113109455 +544 31 0 days 00:00:00.753906147 +544 32 0 days 00:00:00.249241222 +544 33 0 days 00:00:00.403152057 +544 34 0 days 00:00:01.463763896 +544 35 0 days 00:00:00.901492605 +544 36 0 days 00:00:00.431132246 +544 37 0 days 00:00:00.244594290 +544 39 0 days 00:00:00.449639580 +544 40 0 days 00:00:01.523364360 +544 41 0 days 00:00:00.392354040 +544 42 0 days 00:00:00.338322570 +544 44 0 days 00:00:01.752333351 +544 45 0 days 00:00:00.180604310 +544 46 0 days 00:00:00.337545552 +544 47 0 days 00:00:00.756165010 +544 48 0 days 00:00:00.259392603 +544 50 0 days 00:00:00.433575702 +544 51 0 days 00:00:00.198705631 +544 52 0 days 00:00:00.259684606 +544 53 0 days 00:00:00.206391080 +544 54 0 days 00:00:00.788246573 +544 55 0 days 00:00:00.360078436 +544 56 0 days 00:00:00.580387240 +544 57 0 days 00:00:00.247332320 +544 58 0 days 00:00:02.260914615 +544 59 0 days 00:00:00.213626145 +544 60 0 days 00:00:01.646054205 +544 61 0 days 00:00:00.131661317 +544 62 0 days 00:00:01.048023156 +544 63 0 days 00:00:00.636620117 +544 64 0 days 00:00:00.614296035 +544 65 0 days 00:00:00.325430640 +544 66 0 days 00:00:00.605985713 +544 67 0 days 00:00:00.345341136 +544 68 0 days 00:00:00.666971020 +544 69 0 days 00:00:00.313948894 +544 70 0 days 00:00:00.132532608 +544 71 0 days 00:00:01.225629974 +544 72 0 days 00:00:00.533080054 +544 73 0 days 00:00:00.678444648 +544 74 0 days 00:00:00.793921560 +544 75 0 days 00:00:00.435158352 +544 76 0 days 00:00:00.544752920 +544 77 0 days 00:00:00.342493473 +544 78 0 days 00:00:00.154517350 +544 79 0 days 00:00:00.236547961 +544 80 0 days 00:00:01.156790680 +544 81 0 days 00:00:01.555647160 +544 82 0 days 00:00:00.377321912 +544 83 0 days 00:00:01.609993504 +544 84 0 days 00:00:00.287683340 +544 85 0 days 00:00:00.607855794 +544 86 0 days 00:00:00.413189437 +544 87 0 days 00:00:01.419920634 +544 88 0 days 00:00:00.232007986 +544 89 0 days 00:00:02.047871837 +544 90 0 days 00:00:00.462178481 +544 91 0 days 00:00:00.381158460 +544 92 0 days 00:00:00.216936897 +544 93 0 days 00:00:00.337472195 +544 94 0 days 00:00:00.514514995 +544 95 0 days 00:00:00.124245904 +544 96 0 days 00:00:00.586247608 +544 97 0 days 00:00:00.851359820 +544 98 0 days 00:00:01.336760144 +544 99 0 days 00:00:00.157396608 +544 100 0 days 00:00:00.223749888 +545 1 0 days 00:00:00.697808263 +545 2 0 days 00:00:00.534735114 +545 3 0 days 00:00:04.329018753 +545 4 0 days 00:00:02.204832840 +545 5 0 days 00:00:04.026946790 +545 6 0 days 00:00:00.219020828 +545 7 0 days 00:00:00.932597460 +545 8 0 days 00:00:00.343721924 +545 9 0 days 00:00:04.992960480 +545 11 0 days 00:00:01.529902522 +545 12 0 days 00:00:00.961134480 +545 13 0 days 00:00:02.765604444 +545 14 0 days 00:00:03.516402430 +545 15 0 days 00:00:00.836581430 +545 16 0 days 00:00:00.680106527 +545 18 0 days 00:00:00.436246420 +545 19 0 days 00:00:00.882159780 +545 20 0 days 00:00:02.196681033 +545 21 0 days 00:00:01.294307902 +545 22 0 days 00:00:00.230568384 +545 24 0 days 00:00:00.648628870 +545 25 0 days 00:00:00.579854806 +545 26 0 days 00:00:00.577869090 +545 27 0 days 00:00:00.555232490 +545 28 0 days 00:00:00.387725600 +545 29 0 days 00:00:01.094720945 +545 30 0 days 00:00:00.898525596 +545 31 0 days 00:00:04.149908460 +545 32 0 days 00:00:04.275300521 +545 33 0 days 00:00:00.258306740 +545 34 0 days 00:00:00.833721202 +545 35 0 days 00:00:01.835927965 +545 36 0 days 00:00:01.338349224 +545 37 0 days 00:00:00.275166260 +545 38 0 days 00:00:01.748073264 +545 39 0 days 00:00:00.957137933 +545 40 0 days 00:00:00.565628432 +545 41 0 days 00:00:00.208909218 +545 42 0 days 00:00:00.447431850 +545 44 0 days 00:00:01.104619113 +545 45 0 days 00:00:00.466288965 +545 47 0 days 00:00:00.457669170 +545 48 0 days 00:00:02.123311880 +545 49 0 days 00:00:01.437035196 +545 50 0 days 00:00:00.746150260 +545 51 0 days 00:00:00.268585644 +545 53 0 days 00:00:01.793846950 +545 54 0 days 00:00:04.276923015 +545 56 0 days 00:00:04.107442430 +545 57 0 days 00:00:04.405931484 +545 58 0 days 00:00:01.152131882 +545 59 0 days 00:00:00.583920182 +545 61 0 days 00:00:01.954915412 +545 62 0 days 00:00:00.452921823 +545 64 0 days 00:00:04.157861375 +545 65 0 days 00:00:00.693042940 +545 66 0 days 00:00:00.707804568 +545 67 0 days 00:00:01.407503940 +545 68 0 days 00:00:03.417832633 +545 69 0 days 00:00:00.413266880 +545 70 0 days 00:00:00.868717100 +545 71 0 days 00:00:00.380166622 +545 72 0 days 00:00:00.409901005 +545 73 0 days 00:00:00.238621236 +545 74 0 days 00:00:00.572810100 +545 75 0 days 00:00:00.532858791 +545 76 0 days 00:00:00.512141104 +545 78 0 days 00:00:00.887668291 +545 79 0 days 00:00:03.589968057 +545 80 0 days 00:00:01.686084264 +545 81 0 days 00:00:01.064040716 +545 82 0 days 00:00:00.705621460 +545 83 0 days 00:00:00.961187811 +545 84 0 days 00:00:00.243814264 +545 85 0 days 00:00:00.244679285 +545 86 0 days 00:00:00.991514950 +545 88 0 days 00:00:01.338015950 +545 89 0 days 00:00:00.870921926 +545 90 0 days 00:00:02.324634588 +545 91 0 days 00:00:00.691225410 +545 92 0 days 00:00:00.627095757 +545 93 0 days 00:00:00.644063560 +545 94 0 days 00:00:03.195755390 +545 96 0 days 00:00:00.304738650 +545 97 0 days 00:00:01.547709334 +545 98 0 days 00:00:03.309440414 +545 99 0 days 00:00:00.679513468 +545 100 0 days 00:00:02.182326746 +546 1 0 days 00:00:00.487612983 +546 3 0 days 00:00:00.208775710 +546 4 0 days 00:00:00.237953610 +546 6 0 days 00:00:00.576747580 +546 8 0 days 00:00:00.432493720 +546 9 0 days 00:00:00.460146573 +546 10 0 days 00:00:00.181735200 +546 11 0 days 00:00:00.260660861 +546 12 0 days 00:00:00.841473785 +546 13 0 days 00:00:01.156960572 +546 14 0 days 00:00:00.687793160 +546 16 0 days 00:00:02.415140780 +546 17 0 days 00:00:00.248414140 +546 18 0 days 00:00:00.166176785 +546 19 0 days 00:00:00.378096084 +546 20 0 days 00:00:00.322609068 +546 21 0 days 00:00:01.711041917 +546 22 0 days 00:00:00.504259656 +546 23 0 days 00:00:00.431695728 +546 24 0 days 00:00:00.627002443 +546 25 0 days 00:00:00.258011172 +546 26 0 days 00:00:00.756012673 +546 27 0 days 00:00:00.276484776 +546 28 0 days 00:00:00.199322822 +546 30 0 days 00:00:00.860627760 +546 31 0 days 00:00:00.917111940 +546 33 0 days 00:00:00.231363907 +546 34 0 days 00:00:00.369185375 +546 35 0 days 00:00:01.908885594 +546 36 0 days 00:00:00.352770520 +546 37 0 days 00:00:00.290125160 +546 38 0 days 00:00:00.208186268 +546 39 0 days 00:00:00.482341544 +546 40 0 days 00:00:00.223554225 +546 41 0 days 00:00:00.511929833 +546 42 0 days 00:00:00.294869360 +546 43 0 days 00:00:00.398805525 +546 44 0 days 00:00:00.328598712 +546 45 0 days 00:00:01.837657068 +546 46 0 days 00:00:00.240977351 +546 48 0 days 00:00:00.367498334 +546 49 0 days 00:00:00.816376104 +546 50 0 days 00:00:02.661984707 +546 52 0 days 00:00:00.386517180 +546 53 0 days 00:00:01.892523487 +546 54 0 days 00:00:00.394994600 +546 55 0 days 00:00:00.303480148 +546 56 0 days 00:00:00.350748895 +546 57 0 days 00:00:00.192489216 +546 58 0 days 00:00:00.387155889 +546 59 0 days 00:00:00.420996540 +546 60 0 days 00:00:01.688492168 +546 61 0 days 00:00:01.254995294 +546 62 0 days 00:00:00.682750328 +546 63 0 days 00:00:00.828847882 +546 64 0 days 00:00:00.316185937 +546 65 0 days 00:00:00.987026397 +546 66 0 days 00:00:00.255422451 +546 67 0 days 00:00:00.551238613 +546 68 0 days 00:00:00.295463170 +546 69 0 days 00:00:00.548364097 +546 70 0 days 00:00:00.214034410 +546 71 0 days 00:00:00.327500292 +546 73 0 days 00:00:00.230601900 +546 74 0 days 00:00:00.533631585 +546 76 0 days 00:00:00.223442496 +546 77 0 days 00:00:00.247024598 +546 79 0 days 00:00:01.009351074 +546 80 0 days 00:00:00.307046140 +546 81 0 days 00:00:00.424609308 +546 82 0 days 00:00:00.452529300 +546 84 0 days 00:00:00.635668760 +546 85 0 days 00:00:00.890736565 +546 86 0 days 00:00:00.263249377 +546 87 0 days 00:00:00.151830470 +546 88 0 days 00:00:00.621272328 +546 89 0 days 00:00:02.082435071 +546 90 0 days 00:00:00.296725788 +546 92 0 days 00:00:00.210668602 +546 93 0 days 00:00:02.802915104 +546 94 0 days 00:00:01.572479705 +546 95 0 days 00:00:00.320884821 +546 96 0 days 00:00:00.358893240 +546 97 0 days 00:00:00.143244484 +546 98 0 days 00:00:00.561924468 +546 99 0 days 00:00:00.187206197 +546 100 0 days 00:00:00.377645946 +547 1 0 days 00:00:16.302417135 +547 2 0 days 00:00:15.936557308 +547 3 0 days 00:00:04.642394520 +547 4 0 days 00:00:10.782704933 +547 5 0 days 00:00:11.744986210 +547 6 0 days 00:00:34.312379244 +547 7 0 days 00:00:12.568883408 +547 8 0 days 00:00:05.808930702 +547 9 0 days 00:00:03.136551869 +547 10 0 days 00:00:05.654195321 +547 11 0 days 00:00:05.712913366 +547 12 0 days 00:00:16.797221335 +547 13 0 days 00:00:20.637323791 +547 14 0 days 00:00:04.522267152 +547 15 0 days 00:00:03.290748350 +547 16 0 days 00:00:05.018551924 +547 17 0 days 00:00:05.083782868 +548 1 0 days 00:00:07.337573824 +548 3 0 days 00:00:04.572929567 +548 4 0 days 00:00:07.266550517 +548 5 0 days 00:00:05.864385454 +548 6 0 days 00:00:06.804001066 +548 7 0 days 00:00:04.694474104 +548 8 0 days 00:00:04.536268920 +548 9 0 days 00:00:05.702814608 +548 10 0 days 00:00:01.783702980 +548 11 0 days 00:00:04.381549731 +548 12 0 days 00:00:03.077342877 +548 13 0 days 00:00:02.894357504 +548 14 0 days 00:00:03.610956669 +548 15 0 days 00:00:04.695862752 +548 17 0 days 00:00:02.352340534 +548 18 0 days 00:00:04.891270009 +548 19 0 days 00:00:02.707411010 +548 20 0 days 00:00:05.461336470 +548 22 0 days 00:00:04.350619476 +548 23 0 days 00:00:11.327975343 +548 25 0 days 00:00:09.293878426 +549 1 0 days 00:00:04.561427943 +549 2 0 days 00:00:06.592903417 +549 3 0 days 00:00:10.451489494 +549 4 0 days 00:00:07.525658880 +549 5 0 days 00:00:13.285595880 +549 6 0 days 00:00:10.243694608 +549 7 0 days 00:00:07.437491668 +549 9 0 days 00:00:10.559509203 +549 10 0 days 00:00:08.240364116 +549 11 0 days 00:00:11.364697286 +549 12 0 days 00:00:15.861431874 +549 13 0 days 00:00:25.194123580 +549 14 0 days 00:00:31.828509631 +550 1 0 days 00:00:05.205545585 +550 2 0 days 00:00:03.929652627 +550 3 0 days 00:00:19.896278610 +550 4 0 days 00:00:03.923147330 +550 6 0 days 00:00:03.508711589 +550 7 0 days 00:00:16.408503322 +550 8 0 days 00:00:05.811356820 +550 9 0 days 00:00:03.528086767 +550 10 0 days 00:00:04.330609960 +550 11 0 days 00:00:18.528211956 +550 12 0 days 00:00:05.642977815 +550 13 0 days 00:00:02.377217595 +550 14 0 days 00:00:03.192247765 +550 15 0 days 00:00:02.145952328 +550 16 0 days 00:00:02.722183924 +550 18 0 days 00:00:03.712080585 +550 19 0 days 00:00:04.040705260 +550 20 0 days 00:00:06.581596927 +550 21 0 days 00:00:17.050223625 +550 22 0 days 00:00:01.757277333 +550 23 0 days 00:00:01.768038356 +550 24 0 days 00:00:07.801787219 +550 25 0 days 00:00:04.195168856 +550 26 0 days 00:00:05.966070300 +551 1 0 days 00:00:17.298515853 +551 2 0 days 00:00:27.699511000 +551 3 0 days 00:00:17.202773833 +551 4 0 days 00:00:27.787353613 +551 5 0 days 00:00:59.037223866 +551 6 0 days 00:00:27.834937853 +551 7 0 days 00:00:16.922014646 +551 8 0 days 00:00:27.696027180 +551 9 0 days 00:00:58.026642486 +551 10 0 days 00:00:16.934129340 +551 11 0 days 00:00:59.419364920 +551 12 0 days 00:00:59.259396226 +551 13 0 days 00:00:27.423387360 +551 14 0 days 00:00:28.017079600 +551 15 0 days 00:00:21.797424251 +551 16 0 days 00:00:20.464254440 +551 17 0 days 00:00:27.387524393 +552 1 0 days 00:00:21.649937550 +552 2 0 days 00:01:05.461639790 +552 3 0 days 00:01:05.770181205 +552 4 0 days 00:00:34.093391608 +552 5 0 days 00:01:05.830797280 +552 6 0 days 00:01:13.706366156 +552 7 0 days 00:01:09.065968700 +553 2 0 days 00:00:17.294251640 +553 3 0 days 00:00:17.192428546 +553 4 0 days 00:00:16.835362060 +553 5 0 days 00:00:10.248469806 +553 6 0 days 00:00:17.213893473 +553 7 0 days 00:00:35.143882953 +553 8 0 days 00:00:36.628742533 +553 9 0 days 00:00:35.829503026 +553 10 0 days 00:00:10.195071693 +553 11 0 days 00:00:14.065801421 +553 12 0 days 00:00:13.200621370 +553 13 0 days 00:00:35.209535266 +553 14 0 days 00:00:17.215309660 +553 15 0 days 00:00:16.541425573 +553 16 0 days 00:00:16.809990566 +553 17 0 days 00:00:10.212322460 +553 18 0 days 00:00:10.218910413 +553 19 0 days 00:00:10.268057153 +553 20 0 days 00:00:16.844397366 +553 21 0 days 00:00:35.959202140 +553 22 0 days 00:00:16.536458186 +554 1 0 days 00:00:39.441526920 +554 2 0 days 00:00:22.028967155 +554 3 0 days 00:00:10.284875386 +554 4 0 days 00:00:40.518486270 +554 5 0 days 00:00:13.154636505 +554 6 0 days 00:00:46.719601588 +554 7 0 days 00:00:14.229788965 +554 8 0 days 00:00:46.506705402 +554 9 0 days 00:00:43.027272472 +555 1 0 days 00:00:20.845118296 +555 2 0 days 00:01:21.689609611 +555 3 0 days 00:00:36.434679097 +555 4 0 days 00:00:38.027966142 +556 1 0 days 00:00:46.460561991 +556 2 0 days 00:00:47.077267105 +556 3 0 days 00:00:13.138645073 +556 4 0 days 00:00:13.440128557 +556 5 0 days 00:00:14.158361888 +557 1 0 days 00:00:03.771631571 +557 2 0 days 00:00:21.812159225 +557 3 0 days 00:00:30.605761236 +557 4 0 days 00:00:39.171858844 +557 5 0 days 00:00:10.557534712 +557 6 0 days 00:00:32.100642856 +557 7 0 days 00:00:03.844527294 +557 8 0 days 00:00:14.147691455 +557 9 0 days 00:00:04.012954040 +557 10 0 days 00:00:32.583337775 +557 11 0 days 00:00:15.070734002 +557 12 0 days 00:00:11.184325003 +557 13 0 days 00:00:22.164573825 +557 14 0 days 00:00:14.371188457 +557 15 0 days 00:00:10.055029772 +557 16 0 days 00:00:17.237510815 +557 17 0 days 00:00:28.783122824 +557 18 0 days 00:00:09.949993777 +557 19 0 days 00:00:15.235778548 +558 1 0 days 00:00:03.990216305 +558 2 0 days 00:00:04.481119368 +558 3 0 days 00:00:04.260182597 +558 4 0 days 00:00:07.553893290 +558 5 0 days 00:00:02.929582197 +558 6 0 days 00:00:04.241985885 +558 7 0 days 00:00:05.443123680 +558 8 0 days 00:00:04.004820896 +558 9 0 days 00:00:03.442915195 +558 10 0 days 00:00:04.830657057 +558 11 0 days 00:00:08.740168200 +558 12 0 days 00:00:02.074715207 +558 13 0 days 00:00:09.535373284 +558 14 0 days 00:00:07.278923360 +558 15 0 days 00:00:19.323887576 +558 16 0 days 00:00:19.323923537 +558 17 0 days 00:00:02.403651090 +558 18 0 days 00:00:05.475401831 +558 19 0 days 00:00:05.305938354 +558 20 0 days 00:00:06.627548418 +558 21 0 days 00:00:04.767646140 +558 22 0 days 00:00:05.859093520 +558 23 0 days 00:00:19.972956740 +558 24 0 days 00:00:05.723424356 +558 25 0 days 00:00:20.481783883 +558 26 0 days 00:00:21.841151992 +558 27 0 days 00:00:03.088740322 +558 28 0 days 00:00:02.408151640 +558 29 0 days 00:00:03.109584066 +558 30 0 days 00:00:05.020714998 +558 31 0 days 00:00:04.723977760 +558 32 0 days 00:00:03.183260287 +558 33 0 days 00:00:05.206016808 +558 34 0 days 00:00:02.819396236 +558 35 0 days 00:00:04.410992677 +558 36 0 days 00:00:05.492529360 +559 1 0 days 00:00:06.016425020 +559 2 0 days 00:00:06.706446272 +559 3 0 days 00:00:11.130573446 +559 4 0 days 00:00:12.618765386 +559 6 0 days 00:00:07.303098455 +559 7 0 days 00:00:05.528198146 +559 8 0 days 00:00:27.631636020 +559 9 0 days 00:00:24.716286806 +559 10 0 days 00:00:11.259606126 +559 11 0 days 00:00:23.954195148 +559 12 0 days 00:00:14.524614500 +559 13 0 days 00:00:14.092151020 +559 14 0 days 00:00:09.758365673 +559 15 0 days 00:00:18.166927993 +559 16 0 days 00:00:07.929802315 +559 17 0 days 00:00:26.880109890 +559 18 0 days 00:00:11.695683386 +559 19 0 days 00:00:04.698646140 +559 20 0 days 00:00:04.192846236 +559 21 0 days 00:00:05.259019000 +559 22 0 days 00:00:10.133632386 +559 23 0 days 00:00:04.216008475 +559 24 0 days 00:00:27.093328073 +559 25 0 days 00:00:10.418186924 +559 26 0 days 00:00:11.197431653 +559 27 0 days 00:00:04.752862730 +559 28 0 days 00:00:18.990561900 +559 29 0 days 00:00:24.788080426 +559 30 0 days 00:00:08.366471413 +560 1 0 days 00:00:05.123036642 +560 2 0 days 00:00:04.733264476 +560 3 0 days 00:00:03.521721777 +560 4 0 days 00:00:01.908318793 +560 5 0 days 00:00:01.845298688 +560 6 0 days 00:00:16.814539666 +560 7 0 days 00:00:08.596815573 +560 8 0 days 00:00:02.148690072 +560 9 0 days 00:00:02.626063800 +560 10 0 days 00:00:01.555058246 +560 11 0 days 00:00:04.873572126 +560 12 0 days 00:00:05.580566690 +560 13 0 days 00:00:04.392792040 +560 14 0 days 00:00:14.912430746 +560 15 0 days 00:00:05.475765280 +560 16 0 days 00:00:05.164302208 +560 17 0 days 00:00:03.844076133 +560 18 0 days 00:00:03.915291006 +560 19 0 days 00:00:03.395081873 +560 20 0 days 00:00:10.130216386 +560 21 0 days 00:00:02.880189982 +560 22 0 days 00:00:01.891533081 +560 23 0 days 00:00:08.825831280 +560 24 0 days 00:00:02.797126726 +560 25 0 days 00:00:02.780913265 +560 26 0 days 00:00:07.418024286 +560 27 0 days 00:00:03.636164646 +560 28 0 days 00:00:16.509066560 +560 29 0 days 00:00:03.598992660 +560 30 0 days 00:00:12.129737093 +560 31 0 days 00:00:12.468517590 +560 32 0 days 00:00:03.047882563 +560 33 0 days 00:00:04.460336910 +560 34 0 days 00:00:05.139976980 +560 35 0 days 00:00:13.676349586 +560 36 0 days 00:00:04.827836420 +560 37 0 days 00:00:05.837465580 +560 39 0 days 00:00:04.582671325 +560 40 0 days 00:00:08.302052850 +560 41 0 days 00:00:05.237813626 +560 42 0 days 00:00:05.566657397 +560 43 0 days 00:00:02.951002318 +560 44 0 days 00:00:03.121995194 +560 45 0 days 00:00:06.882132960 +560 46 0 days 00:00:08.787873926 +560 47 0 days 00:00:02.739985383 +560 48 0 days 00:00:14.097728333 +560 49 0 days 00:00:10.682677780 +560 50 0 days 00:00:04.851871613 +560 51 0 days 00:00:05.857663141 +560 52 0 days 00:00:06.005677486 +560 53 0 days 00:00:12.612111606 +560 54 0 days 00:00:04.212443986 +560 55 0 days 00:00:01.832907503 +560 56 0 days 00:00:02.293611555 +560 57 0 days 00:00:02.331643608 +560 58 0 days 00:00:04.506067853 +560 59 0 days 00:00:09.606660893 +560 60 0 days 00:00:03.438469970 +560 61 0 days 00:00:11.313599800 +560 62 0 days 00:00:05.135971780 +560 63 0 days 00:00:05.204860687 +560 64 0 days 00:00:14.804322680 +560 65 0 days 00:00:01.806054250 +561 1 0 days 00:02:38.611879221 +561 2 0 days 00:02:42.123647507 +561 3 0 days 00:01:01.411838518 +562 1 0 days 00:05:51.971881335 +563 1 0 days 00:11:12.055944900 +563 2 0 days 00:01:10.021001650 +563 3 0 days 00:03:05.169325007 +563 4 0 days 00:11:38.297062070 +564 1 0 days 00:00:25.871462816 +564 2 0 days 00:00:22.763882786 +564 3 0 days 00:00:20.671791827 +564 4 0 days 00:05:55.881297266 +564 5 0 days 00:00:20.278600247 +564 6 0 days 00:00:49.764901166 +564 7 0 days 00:06:41.069805633 +564 8 0 days 00:00:30.013194188 +564 9 0 days 00:00:22.885347145 +564 10 0 days 00:00:18.142264871 +564 11 0 days 00:00:24.426173344 +564 12 0 days 00:00:55.015660957 +565 1 0 days 00:02:02.536459623 +565 2 0 days 00:00:52.646257545 +565 3 0 days 00:00:57.751754737 +565 4 0 days 00:03:12.103482804 +566 1 0 days 00:00:16.951516629 +566 2 0 days 00:06:06.600824861 +567 1 0 days 00:05:34.413807857 +567 2 0 days 00:01:09.659706731 +568 2 0 days 00:00:51.500295472 +568 3 0 days 00:00:27.509596294 +568 4 0 days 00:02:53.140025845 +569 1 0 days 00:00:39.170247017 +569 2 0 days 00:01:04.809238106 +569 3 0 days 00:01:11.601263250 +569 4 0 days 00:01:07.788767014 +570 1 0 days 00:00:13.181701940 +570 2 0 days 00:00:14.814514436 +570 3 0 days 00:00:15.942570303 +570 4 0 days 00:00:36.682639380 +570 5 0 days 00:00:23.718980028 +570 6 0 days 00:00:21.612142070 +570 7 0 days 00:00:14.784660860 +570 8 0 days 00:00:21.639081965 +570 9 0 days 00:00:36.683365995 +570 10 0 days 00:00:22.195810472 +570 11 0 days 00:00:13.930728705 +570 12 0 days 00:00:37.286957735 +570 13 0 days 00:00:39.007040968 +570 14 0 days 00:00:22.493341384 +570 15 0 days 00:00:37.239430015 +571 1 0 days 00:00:07.877721406 +571 2 0 days 00:00:19.247777908 +571 3 0 days 00:00:11.474528753 +571 4 0 days 00:00:12.678283780 +571 5 0 days 00:00:19.741509806 +571 6 0 days 00:00:26.079191835 +571 7 0 days 00:00:08.596862016 +571 8 0 days 00:00:24.929115446 +571 9 0 days 00:00:20.602499656 +571 10 0 days 00:00:12.725053585 +571 11 0 days 00:00:20.188832260 +571 12 0 days 00:00:15.968899826 +571 13 0 days 00:00:12.698215080 +571 14 0 days 00:00:13.902718013 +571 15 0 days 00:00:19.852655226 +571 16 0 days 00:00:08.423143340 +571 17 0 days 00:00:23.381488708 +571 18 0 days 00:00:19.970091700 +571 19 0 days 00:00:21.076022540 +571 20 0 days 00:00:11.853374686 +571 21 0 days 00:00:24.854999880 +571 22 0 days 00:00:12.978646095 +571 23 0 days 00:00:14.947352128 +571 24 0 days 00:00:19.834204826 +571 25 0 days 00:00:14.215058092 +571 26 0 days 00:00:08.234636985 +571 27 0 days 00:00:20.911244686 +571 28 0 days 00:00:11.423743870 +571 29 0 days 00:00:24.379910856 +572 1 0 days 00:00:09.908833426 +572 2 0 days 00:00:19.735016555 +572 3 0 days 00:00:23.014504200 +572 4 0 days 00:00:20.594332953 +572 5 0 days 00:00:23.785753766 +572 6 0 days 00:00:10.130799375 +572 7 0 days 00:00:20.787713833 +572 8 0 days 00:00:21.890758793 +572 9 0 days 00:00:13.173515240 +572 10 0 days 00:00:21.901399366 +572 11 0 days 00:00:22.917981305 +572 12 0 days 00:00:13.733402905 +572 13 0 days 00:00:26.392616645 +572 14 0 days 00:00:09.951182533 +572 15 0 days 00:00:15.206535345 +572 16 0 days 00:00:27.066070915 +572 17 0 days 00:00:08.465098810 +572 18 0 days 00:00:20.502567966 +572 19 0 days 00:00:08.987325066 +572 20 0 days 00:00:08.293525326 +572 21 0 days 00:00:13.947484405 +572 22 0 days 00:00:08.720584985 +572 23 0 days 00:00:15.297687326 +572 24 0 days 00:00:07.731292366 +572 25 0 days 00:00:13.293311386 +572 26 0 days 00:00:20.624348813 +572 27 0 days 00:00:09.055100105 +572 28 0 days 00:00:08.633896615 +572 29 0 days 00:00:07.649932486 +572 30 0 days 00:00:21.517513310 +572 31 0 days 00:00:16.321257305 +573 1 0 days 00:00:04.855003810 +573 2 0 days 00:00:04.177425175 +573 3 0 days 00:00:08.723271920 +573 4 0 days 00:00:06.601832265 +573 5 0 days 00:00:10.519214540 +573 6 0 days 00:00:10.301055586 +573 7 0 days 00:00:04.303951240 +573 8 0 days 00:00:12.597684960 +573 9 0 days 00:00:11.340620775 +573 10 0 days 00:00:11.075446585 +573 11 0 days 00:00:06.085359953 +573 12 0 days 00:00:04.303685653 +573 13 0 days 00:00:05.105928580 +573 14 0 days 00:00:10.433283535 +573 15 0 days 00:00:08.698658680 +573 16 0 days 00:00:06.757666940 +573 17 0 days 00:00:13.974267715 +573 18 0 days 00:00:10.146414153 +573 19 0 days 00:00:05.479187726 +573 20 0 days 00:00:06.996270976 +573 21 0 days 00:00:09.881122820 +573 22 0 days 00:00:04.416484946 +573 23 0 days 00:00:11.014434275 +573 24 0 days 00:00:06.480893872 +573 25 0 days 00:00:04.244305360 +573 26 0 days 00:00:08.579006543 +573 27 0 days 00:00:06.831759695 +573 28 0 days 00:00:08.000912252 +573 29 0 days 00:00:04.307226955 +573 30 0 days 00:00:04.895987710 +573 31 0 days 00:00:06.794368246 +573 32 0 days 00:00:07.688796210 +573 33 0 days 00:00:06.076380900 +573 34 0 days 00:00:05.207082752 +573 35 0 days 00:00:08.485921455 +573 36 0 days 00:00:12.569627160 +573 37 0 days 00:00:10.043443610 +573 38 0 days 00:00:10.077837646 +573 39 0 days 00:00:11.101651230 +573 40 0 days 00:00:05.877845426 +573 41 0 days 00:00:04.538498693 +573 42 0 days 00:00:09.092585612 +573 43 0 days 00:00:11.331405380 +573 44 0 days 00:00:08.506207212 +573 45 0 days 00:00:04.023767080 +573 46 0 days 00:00:11.091251630 +573 47 0 days 00:00:04.648063540 +573 48 0 days 00:00:10.156271245 +573 49 0 days 00:00:09.657639060 +573 50 0 days 00:00:07.096600333 +573 51 0 days 00:00:06.729447765 +573 52 0 days 00:00:13.535965992 +573 53 0 days 00:00:04.766492516 +574 1 0 days 00:00:10.544109240 +574 2 0 days 00:00:07.702265390 +574 3 0 days 00:00:10.785276466 +574 4 0 days 00:00:06.322661653 +574 5 0 days 00:00:12.450482160 +574 6 0 days 00:00:04.563814565 +574 7 0 days 00:00:07.074545812 +574 8 0 days 00:00:04.357795535 +574 9 0 days 00:00:09.506727250 +574 10 0 days 00:00:13.281323133 +574 11 0 days 00:00:12.194160980 +574 12 0 days 00:00:04.020624306 +574 13 0 days 00:00:07.784593845 +574 14 0 days 00:00:07.844791565 +574 15 0 days 00:00:10.478507953 +574 16 0 days 00:00:06.808512145 +574 17 0 days 00:00:04.169795930 +574 18 0 days 00:00:11.943805375 +574 19 0 days 00:00:04.754657608 +574 20 0 days 00:00:06.854456730 +574 21 0 days 00:00:07.880287572 +574 22 0 days 00:00:12.726962400 +574 23 0 days 00:00:04.603978640 +574 24 0 days 00:00:05.889237870 +574 25 0 days 00:00:07.679682553 +574 26 0 days 00:00:06.070914553 +574 27 0 days 00:00:14.526208036 +574 28 0 days 00:00:10.720013825 +574 29 0 days 00:00:12.462626668 +574 30 0 days 00:00:07.252871588 +574 31 0 days 00:00:05.011983533 +574 32 0 days 00:00:09.546975690 +574 33 0 days 00:00:12.476294240 +574 34 0 days 00:00:06.311914533 +574 35 0 days 00:00:07.415221433 +574 36 0 days 00:00:11.776550555 +574 37 0 days 00:00:10.345227820 +574 38 0 days 00:00:11.533190140 +574 39 0 days 00:00:10.812010700 +574 40 0 days 00:00:11.722469640 +574 41 0 days 00:00:15.006436500 +574 42 0 days 00:00:09.318521740 +574 43 0 days 00:00:07.028848495 +574 44 0 days 00:00:11.572822525 +574 45 0 days 00:00:11.545233853 +574 46 0 days 00:00:07.007780950 +574 47 0 days 00:00:07.116169705 +574 48 0 days 00:00:07.740641725 +574 49 0 days 00:00:07.849167860 +574 50 0 days 00:00:03.969904785 +575 1 0 days 00:00:12.908274010 +575 2 0 days 00:00:16.834561075 +575 3 0 days 00:00:10.274709170 +575 4 0 days 00:00:09.366437580 +575 5 0 days 00:00:13.222430213 +575 6 0 days 00:00:12.218799253 +575 7 0 days 00:00:12.730546193 +575 8 0 days 00:00:12.415325520 +575 9 0 days 00:00:09.490984675 +575 10 0 days 00:00:13.708427165 +575 11 0 days 00:00:09.801870688 +575 12 0 days 00:00:21.255607433 +575 13 0 days 00:00:15.177795950 +575 14 0 days 00:00:25.267153732 +575 15 0 days 00:00:12.308763446 +575 16 0 days 00:00:16.159296203 +575 17 0 days 00:00:27.321701900 +575 18 0 days 00:00:20.902738720 +575 19 0 days 00:00:08.479427440 +575 20 0 days 00:00:12.253553253 +575 21 0 days 00:00:14.844934946 +575 22 0 days 00:00:08.529576306 +575 23 0 days 00:00:12.579665126 +575 24 0 days 00:00:13.257098580 +575 25 0 days 00:00:14.020283780 +575 26 0 days 00:00:20.623585393 +575 27 0 days 00:00:17.232263345 +575 28 0 days 00:00:11.394556755 +575 29 0 days 00:00:24.186641230 +576 1 0 days 00:00:06.883748280 +576 2 0 days 00:00:05.360837907 +576 3 0 days 00:00:06.834154605 +576 4 0 days 00:00:08.149533807 +576 5 0 days 00:00:05.112015928 +576 6 0 days 00:00:08.643739630 +576 7 0 days 00:00:05.105360856 +576 8 0 days 00:00:11.858223485 +576 9 0 days 00:00:08.243130556 +576 10 0 days 00:00:08.355286964 +576 11 0 days 00:00:14.018323505 +576 12 0 days 00:00:07.513490770 +576 13 0 days 00:00:05.763967412 +576 14 0 days 00:00:05.634249380 +576 15 0 days 00:00:07.614460465 +576 16 0 days 00:00:12.647292500 +576 17 0 days 00:00:06.412544055 +576 18 0 days 00:00:11.952889225 +576 19 0 days 00:00:11.725724400 +576 20 0 days 00:00:12.117308713 +576 21 0 days 00:00:09.364688760 +576 22 0 days 00:00:09.009684860 +576 23 0 days 00:00:08.118890550 +576 24 0 days 00:00:05.550000766 +576 25 0 days 00:00:08.290923580 +576 26 0 days 00:00:12.807852628 +576 27 0 days 00:00:12.824358956 +576 28 0 days 00:00:13.718704970 +576 29 0 days 00:00:07.003409800 +576 30 0 days 00:00:07.776977715 +576 31 0 days 00:00:13.432201032 +576 32 0 days 00:00:04.383923615 +576 33 0 days 00:00:04.612585675 +576 34 0 days 00:00:08.674981355 +576 35 0 days 00:00:04.639608660 +576 36 0 days 00:00:08.191906420 +576 37 0 days 00:00:11.704497770 +576 38 0 days 00:00:08.141771648 +576 39 0 days 00:00:13.742670980 +577 1 0 days 00:00:35.977777656 +577 2 0 days 00:00:58.617615685 +577 3 0 days 00:00:35.458707888 +577 4 0 days 00:00:23.739974146 +577 5 0 days 00:00:24.685289453 +577 6 0 days 00:01:06.639515182 +577 7 0 days 00:00:36.900970173 +577 8 0 days 00:01:07.953959634 +578 1 0 days 00:00:58.890931285 +578 2 0 days 00:01:10.869472334 +578 3 0 days 00:00:53.622279933 +578 4 0 days 00:00:24.139343364 +578 5 0 days 00:00:24.001198091 +578 6 0 days 00:01:05.092931840 +579 1 0 days 00:00:23.716942317 +579 2 0 days 00:00:14.296304780 +579 3 0 days 00:00:42.486446254 +579 4 0 days 00:00:41.755253354 +579 5 0 days 00:00:14.003652000 +579 6 0 days 00:00:39.658814108 +579 7 0 days 00:00:15.779583875 +579 8 0 days 00:00:41.335884363 +579 9 0 days 00:00:24.500620905 +579 10 0 days 00:00:15.952421046 +580 1 0 days 00:00:18.690199526 +580 2 0 days 00:00:23.281266772 +580 3 0 days 00:00:21.888683235 +580 4 0 days 00:00:21.290775985 +580 5 0 days 00:00:19.587026106 +580 6 0 days 00:00:11.840531373 +580 7 0 days 00:00:40.629514850 +580 8 0 days 00:00:22.249650224 +580 9 0 days 00:00:42.774205620 +580 10 0 days 00:00:23.149883820 +580 11 0 days 00:00:22.205469532 +580 12 0 days 00:00:41.479251870 +580 13 0 days 00:00:22.567942404 +580 14 0 days 00:00:12.689916773 +581 1 0 days 00:00:27.987440615 +581 2 0 days 00:00:26.842976830 +581 3 0 days 00:00:23.211409406 +581 4 0 days 00:00:07.636095266 +581 5 0 days 00:00:08.901141433 +581 6 0 days 00:00:32.767092940 +581 7 0 days 00:00:12.497157726 +581 8 0 days 00:00:09.170726253 +581 9 0 days 00:00:16.978365760 +581 10 0 days 00:00:14.212826546 +581 11 0 days 00:00:15.702002615 +581 12 0 days 00:00:20.591852320 +581 13 0 days 00:00:11.290752550 +581 14 0 days 00:00:17.557193440 +581 15 0 days 00:00:09.636239506 +581 16 0 days 00:00:29.568275480 +581 17 0 days 00:00:09.143444870 +581 18 0 days 00:00:12.403120693 +581 19 0 days 00:00:32.974075470 +582 1 0 days 00:00:12.748927700 +582 2 0 days 00:00:05.372257705 +582 3 0 days 00:00:11.763477673 +582 4 0 days 00:00:09.613013300 +582 5 0 days 00:00:05.974651793 +582 6 0 days 00:00:07.468991360 +582 7 0 days 00:00:08.844290695 +582 8 0 days 00:00:05.428337546 +582 9 0 days 00:00:04.671371424 +582 10 0 days 00:00:06.623939500 +582 11 0 days 00:00:07.063293620 +582 12 0 days 00:00:04.619749060 +582 13 0 days 00:00:09.006262726 +582 14 0 days 00:00:12.551560093 +582 15 0 days 00:00:10.330112566 +582 16 0 days 00:00:08.286110745 +582 17 0 days 00:00:07.505894010 +582 18 0 days 00:00:04.731193660 +582 19 0 days 00:00:04.421108955 +582 20 0 days 00:00:05.210882433 +582 21 0 days 00:00:12.228292733 +582 22 0 days 00:00:04.987177045 +582 23 0 days 00:00:05.966967340 +582 24 0 days 00:00:07.262387072 +582 25 0 days 00:00:13.202337995 +582 26 0 days 00:00:13.461949880 +582 27 0 days 00:00:04.977902760 +582 28 0 days 00:00:11.813915680 +582 29 0 days 00:00:06.025716766 +582 30 0 days 00:00:09.044319200 +582 31 0 days 00:00:12.996204573 +582 32 0 days 00:00:09.339353986 +582 33 0 days 00:00:07.011014530 +582 34 0 days 00:00:11.013511246 +582 35 0 days 00:00:07.406435500 +582 36 0 days 00:00:13.247148680 +582 37 0 days 00:00:07.341257205 +582 38 0 days 00:00:16.706675680 +582 39 0 days 00:00:08.137461446 +582 40 0 days 00:00:08.990753520 +582 41 0 days 00:00:07.183825080 +582 42 0 days 00:00:08.283289360 +582 43 0 days 00:00:07.928069435 +582 44 0 days 00:00:12.539116380 +582 45 0 days 00:00:04.646530760 +582 46 0 days 00:00:15.376412255 +582 47 0 days 00:00:07.254468946 +582 48 0 days 00:00:05.150904625 +582 49 0 days 00:00:12.494239780 +582 50 0 days 00:00:06.351978996 +582 51 0 days 00:00:04.721412770 +582 52 0 days 00:00:13.177257780 +582 53 0 days 00:00:04.967505733 +582 54 0 days 00:00:05.331405640 +582 55 0 days 00:00:08.198704610 +582 56 0 days 00:00:03.948177953 +582 57 0 days 00:00:09.202353240 +582 58 0 days 00:00:06.519496355 +582 59 0 days 00:00:06.329151465 +583 1 0 days 00:00:00.117096160 +583 2 0 days 00:00:00.120271405 +583 3 0 days 00:00:00.142486760 +583 4 0 days 00:00:00.140418580 +583 5 0 days 00:00:00.099212433 +583 6 0 days 00:00:00.094581326 +583 7 0 days 00:00:00.103226435 +583 8 0 days 00:00:00.093911820 +583 9 0 days 00:00:00.163760915 +583 10 0 days 00:00:00.093612753 +583 11 0 days 00:00:00.130188768 +583 12 0 days 00:00:00.099587220 +583 13 0 days 00:00:00.112622920 +583 14 0 days 00:00:00.144891166 +583 15 0 days 00:00:00.143642360 +583 16 0 days 00:00:00.143925560 +583 17 0 days 00:00:00.087352113 +583 18 0 days 00:00:00.110619653 +583 19 0 days 00:00:00.099217640 +583 20 0 days 00:00:00.142952013 +583 21 0 days 00:00:00.099550126 +583 22 0 days 00:00:00.113449473 +583 23 0 days 00:00:00.148120746 +583 24 0 days 00:00:00.146634940 +583 25 0 days 00:00:00.147227666 +583 26 0 days 00:00:00.095180100 +583 27 0 days 00:00:00.148453300 +583 28 0 days 00:00:00.113024840 +583 29 0 days 00:00:00.098414553 +583 30 0 days 00:00:00.108515395 +583 31 0 days 00:00:00.137710506 +583 32 0 days 00:00:00.112121846 +583 33 0 days 00:00:00.143844040 +583 34 0 days 00:00:00.099453560 +583 35 0 days 00:00:00.093883506 +583 36 0 days 00:00:00.098742286 +583 37 0 days 00:00:00.161966625 +583 38 0 days 00:00:00.113078753 +583 39 0 days 00:00:00.114230360 +583 40 0 days 00:00:00.111883446 +583 41 0 days 00:00:00.148099520 +583 42 0 days 00:00:00.095663906 +583 43 0 days 00:00:00.125012475 +583 44 0 days 00:00:00.094586033 +583 45 0 days 00:00:00.143735260 +583 46 0 days 00:00:00.113099886 +583 47 0 days 00:00:00.093678420 +583 48 0 days 00:00:00.144631800 +583 49 0 days 00:00:00.144936266 +583 50 0 days 00:00:00.144392746 +583 51 0 days 00:00:00.113076133 +583 52 0 days 00:00:00.094267766 +583 53 0 days 00:00:00.146695553 +583 54 0 days 00:00:00.113532066 +583 55 0 days 00:00:00.112008833 +583 56 0 days 00:00:00.144438640 +583 57 0 days 00:00:00.113234846 +583 58 0 days 00:00:00.111712420 +583 59 0 days 00:00:00.148794606 +583 60 0 days 00:00:00.142174160 +583 61 0 days 00:00:00.094293060 +583 62 0 days 00:00:00.135024060 +583 63 0 days 00:00:00.094550413 +583 64 0 days 00:00:00.111026500 +583 65 0 days 00:00:00.095295446 +583 66 0 days 00:00:00.093677333 +583 67 0 days 00:00:00.143870820 +583 68 0 days 00:00:00.111665560 +583 69 0 days 00:00:00.112311573 +583 70 0 days 00:00:00.146485600 +583 71 0 days 00:00:00.145226993 +583 72 0 days 00:00:00.113644840 +583 73 0 days 00:00:00.113928633 +583 74 0 days 00:00:00.094099760 +583 75 0 days 00:00:00.104538955 +583 76 0 days 00:00:00.099020720 +583 77 0 days 00:00:00.114907333 +583 78 0 days 00:00:00.113191080 +583 79 0 days 00:00:00.145758266 +583 80 0 days 00:00:00.099731080 +583 81 0 days 00:00:00.093763313 +583 82 0 days 00:00:00.094659933 +583 83 0 days 00:00:00.097242393 +583 84 0 days 00:00:00.107776160 +583 85 0 days 00:00:00.129376672 +583 86 0 days 00:00:00.110525353 +583 87 0 days 00:00:00.111963320 +583 88 0 days 00:00:00.109951380 +583 89 0 days 00:00:00.122580485 +583 90 0 days 00:00:00.113685006 +583 91 0 days 00:00:00.145603833 +583 92 0 days 00:00:00.144364720 +583 93 0 days 00:00:00.145491180 +583 94 0 days 00:00:00.111836460 +583 95 0 days 00:00:00.125657213 +583 96 0 days 00:00:00.091881153 +583 97 0 days 00:00:00.144777786 +583 98 0 days 00:00:00.144405606 +583 99 0 days 00:00:00.094067900 +583 100 0 days 00:00:00.093271286 +584 1 0 days 00:00:00.114386746 +584 2 0 days 00:00:00.114125813 +584 3 0 days 00:00:00.095782446 +584 4 0 days 00:00:00.098029933 +584 5 0 days 00:00:00.116241393 +584 6 0 days 00:00:00.109339986 +584 7 0 days 00:00:00.114782006 +584 8 0 days 00:00:00.151136620 +584 9 0 days 00:00:00.112310426 +584 10 0 days 00:00:00.112909246 +584 11 0 days 00:00:00.097017353 +584 12 0 days 00:00:00.150771986 +584 13 0 days 00:00:00.150900226 +584 14 0 days 00:00:00.115006026 +584 15 0 days 00:00:00.096706066 +584 16 0 days 00:00:00.134022806 +584 17 0 days 00:00:00.100543786 +584 18 0 days 00:00:00.096279320 +584 19 0 days 00:00:00.159756085 +584 20 0 days 00:00:00.117593693 +584 21 0 days 00:00:00.115591273 +584 22 0 days 00:00:00.100508933 +584 23 0 days 00:00:00.101106293 +584 24 0 days 00:00:00.146245440 +584 25 0 days 00:00:00.160887460 +584 26 0 days 00:00:00.126301355 +584 27 0 days 00:00:00.145711460 +584 28 0 days 00:00:00.115404733 +584 29 0 days 00:00:00.161460550 +584 30 0 days 00:00:00.140912680 +584 31 0 days 00:00:00.099847313 +584 32 0 days 00:00:00.096385560 +584 33 0 days 00:00:00.099029580 +584 34 0 days 00:00:00.140874213 +584 35 0 days 00:00:00.095846040 +584 36 0 days 00:00:00.151874793 +584 37 0 days 00:00:00.148001180 +584 38 0 days 00:00:00.173435332 +584 39 0 days 00:00:00.113308086 +584 40 0 days 00:00:00.095534186 +584 41 0 days 00:00:00.098375773 +584 42 0 days 00:00:00.163382945 +584 43 0 days 00:00:00.114060766 +584 44 0 days 00:00:00.148444120 +584 45 0 days 00:00:00.146728313 +584 46 0 days 00:00:00.147939713 +584 47 0 days 00:00:00.146711060 +584 48 0 days 00:00:00.115602693 +584 49 0 days 00:00:00.097683700 +584 50 0 days 00:00:00.146423966 +584 51 0 days 00:00:00.116606593 +584 52 0 days 00:00:00.116584473 +584 53 0 days 00:00:00.095906780 +584 54 0 days 00:00:00.148268420 +584 55 0 days 00:00:00.115506833 +584 56 0 days 00:00:00.161878440 +584 57 0 days 00:00:00.115112880 +584 58 0 days 00:00:00.099974510 +584 59 0 days 00:00:00.095915146 +584 60 0 days 00:00:00.147590820 +584 61 0 days 00:00:00.115741840 +584 62 0 days 00:00:00.101938340 +584 63 0 days 00:00:00.139220833 +584 64 0 days 00:00:00.166783030 +584 65 0 days 00:00:00.125863460 +584 66 0 days 00:00:00.151281726 +584 67 0 days 00:00:00.105500193 +584 68 0 days 00:00:00.109927773 +584 69 0 days 00:00:00.150172760 +584 70 0 days 00:00:00.113836846 +584 71 0 days 00:00:00.123863035 +584 72 0 days 00:00:00.151055760 +584 73 0 days 00:00:00.095739166 +584 74 0 days 00:00:00.114058153 +584 75 0 days 00:00:00.149763506 +584 76 0 days 00:00:00.096649060 +584 77 0 days 00:00:00.116019713 +584 78 0 days 00:00:00.104718805 +584 79 0 days 00:00:00.113941813 +584 80 0 days 00:00:00.168391045 +584 81 0 days 00:00:00.150139213 +584 82 0 days 00:00:00.115085826 +584 83 0 days 00:00:00.139677673 +584 84 0 days 00:00:00.174855904 +584 85 0 days 00:00:00.115894840 +584 86 0 days 00:00:00.095965486 +584 87 0 days 00:00:00.152780340 +584 88 0 days 00:00:00.153470946 +584 89 0 days 00:00:00.149451773 +584 90 0 days 00:00:00.100348000 +584 91 0 days 00:00:00.096390506 +584 92 0 days 00:00:00.166747133 +584 93 0 days 00:00:00.142911520 +584 94 0 days 00:00:00.119877093 +584 95 0 days 00:00:00.117034853 +584 96 0 days 00:00:00.114819333 +584 97 0 days 00:00:00.115290666 +584 98 0 days 00:00:00.095683240 +584 99 0 days 00:00:00.177889772 +584 100 0 days 00:00:00.094954833 +585 1 0 days 00:00:00.055173040 +585 2 0 days 00:00:00.085184766 +585 3 0 days 00:00:00.054452000 +585 4 0 days 00:00:00.085136240 +585 5 0 days 00:00:00.063919980 +585 6 0 days 00:00:00.091012935 +585 7 0 days 00:00:00.061898900 +585 8 0 days 00:00:00.082813906 +585 9 0 days 00:00:00.063709760 +585 10 0 days 00:00:00.063673840 +585 11 0 days 00:00:00.063635120 +585 12 0 days 00:00:00.064281966 +585 13 0 days 00:00:00.063341236 +585 14 0 days 00:00:00.084848740 +585 15 0 days 00:00:00.084934853 +585 16 0 days 00:00:00.083697026 +585 17 0 days 00:00:00.054893880 +585 18 0 days 00:00:00.082146520 +585 19 0 days 00:00:00.064714493 +585 20 0 days 00:00:00.056268660 +585 21 0 days 00:00:00.090890645 +585 22 0 days 00:00:00.083280440 +585 23 0 days 00:00:00.055520200 +585 24 0 days 00:00:00.081202120 +585 25 0 days 00:00:00.066658051 +585 26 0 days 00:00:00.055655786 +585 27 0 days 00:00:00.063249286 +585 28 0 days 00:00:00.064019026 +585 29 0 days 00:00:00.056647560 +585 30 0 days 00:00:00.054639333 +585 31 0 days 00:00:00.055983280 +585 32 0 days 00:00:00.054400120 +585 33 0 days 00:00:00.071378720 +585 34 0 days 00:00:00.055656000 +585 35 0 days 00:00:00.059307073 +585 36 0 days 00:00:00.054646793 +585 37 0 days 00:00:00.064480426 +585 38 0 days 00:00:00.079609633 +585 39 0 days 00:00:00.056957873 +585 40 0 days 00:00:00.085187493 +585 41 0 days 00:00:00.055129246 +585 42 0 days 00:00:00.056230873 +585 43 0 days 00:00:00.056611840 +585 44 0 days 00:00:00.055749726 +585 45 0 days 00:00:00.083312573 +585 46 0 days 00:00:00.065315553 +585 47 0 days 00:00:00.054189206 +585 48 0 days 00:00:00.089808355 +585 49 0 days 00:00:00.054428980 +585 50 0 days 00:00:00.082257253 +585 51 0 days 00:00:00.054585653 +585 52 0 days 00:00:00.082357133 +585 53 0 days 00:00:00.054653346 +585 54 0 days 00:00:00.063758400 +585 55 0 days 00:00:00.054366833 +585 56 0 days 00:00:00.056403000 +585 57 0 days 00:00:00.082883260 +585 58 0 days 00:00:00.055573886 +585 59 0 days 00:00:00.081277153 +585 60 0 days 00:00:00.082838913 +585 61 0 days 00:00:00.087111125 +585 62 0 days 00:00:00.082519033 +585 63 0 days 00:00:00.083815000 +585 64 0 days 00:00:00.082234086 +585 65 0 days 00:00:00.066576230 +585 66 0 days 00:00:00.059554140 +585 67 0 days 00:00:00.060133865 +585 68 0 days 00:00:00.055877060 +585 69 0 days 00:00:00.086275793 +585 70 0 days 00:00:00.065231806 +585 71 0 days 00:00:00.071581900 +585 72 0 days 00:00:00.063064633 +585 73 0 days 00:00:00.065292986 +585 74 0 days 00:00:00.057019306 +585 75 0 days 00:00:00.055708500 +585 76 0 days 00:00:00.064757106 +585 77 0 days 00:00:00.073611073 +585 78 0 days 00:00:00.080305253 +585 79 0 days 00:00:00.055653946 +585 80 0 days 00:00:00.084616546 +585 81 0 days 00:00:00.055387166 +585 82 0 days 00:00:00.084747826 +585 83 0 days 00:00:00.064558433 +585 84 0 days 00:00:00.085199946 +585 85 0 days 00:00:00.057464013 +585 86 0 days 00:00:00.060961433 +585 87 0 days 00:00:00.083965986 +585 88 0 days 00:00:00.063872013 +585 89 0 days 00:00:00.055027073 +585 90 0 days 00:00:00.064728106 +585 91 0 days 00:00:00.054918226 +585 92 0 days 00:00:00.054781933 +585 93 0 days 00:00:00.085135700 +585 94 0 days 00:00:00.084769580 +585 95 0 days 00:00:00.062508786 +585 96 0 days 00:00:00.064308740 +585 97 0 days 00:00:00.055252346 +585 98 0 days 00:00:00.063804046 +585 99 0 days 00:00:00.085505293 +585 100 0 days 00:00:00.085422333 +586 1 0 days 00:00:00.086693766 +586 2 0 days 00:00:00.089136706 +586 3 0 days 00:00:00.066504740 +586 4 0 days 00:00:00.055001113 +586 5 0 days 00:00:00.085567133 +586 6 0 days 00:00:00.088899546 +586 7 0 days 00:00:00.056564553 +586 8 0 days 00:00:00.056121933 +586 9 0 days 00:00:00.055929453 +586 10 0 days 00:00:00.067878340 +586 11 0 days 00:00:00.084813160 +586 12 0 days 00:00:00.057771066 +586 13 0 days 00:00:00.055712573 +586 14 0 days 00:00:00.057543413 +586 15 0 days 00:00:00.056409506 +586 16 0 days 00:00:00.084671386 +586 17 0 days 00:00:00.086467246 +586 18 0 days 00:00:00.065279500 +586 19 0 days 00:00:00.055841240 +586 20 0 days 00:00:00.055307760 +586 21 0 days 00:00:00.057472306 +586 22 0 days 00:00:00.085392586 +586 23 0 days 00:00:00.063329826 +586 24 0 days 00:00:00.057580240 +586 25 0 days 00:00:00.058542253 +586 26 0 days 00:00:00.085737573 +586 27 0 days 00:00:00.065587953 +586 28 0 days 00:00:00.065517673 +586 29 0 days 00:00:00.085200386 +586 30 0 days 00:00:00.086391413 +586 31 0 days 00:00:00.065136453 +586 32 0 days 00:00:00.065055156 +586 33 0 days 00:00:00.057069593 +586 34 0 days 00:00:00.055347320 +586 35 0 days 00:00:00.065577446 +586 36 0 days 00:00:00.066524893 +586 37 0 days 00:00:00.058941020 +586 38 0 days 00:00:00.065599013 +586 39 0 days 00:00:00.057350373 +586 40 0 days 00:00:00.085614080 +586 41 0 days 00:00:00.066287513 +586 42 0 days 00:00:00.055560480 +586 43 0 days 00:00:00.098179308 +586 44 0 days 00:00:00.078222306 +586 45 0 days 00:00:00.055532813 +586 46 0 days 00:00:00.067348680 +586 47 0 days 00:00:00.085376546 +586 48 0 days 00:00:00.066502100 +586 49 0 days 00:00:00.056349286 +586 50 0 days 00:00:00.066557240 +586 51 0 days 00:00:00.056801693 +586 52 0 days 00:00:00.085180293 +586 53 0 days 00:00:00.054771553 +586 54 0 days 00:00:00.071619280 +586 55 0 days 00:00:00.066463866 +586 56 0 days 00:00:00.093819445 +586 57 0 days 00:00:00.057896313 +586 58 0 days 00:00:00.084771273 +586 59 0 days 00:00:00.084802226 +586 60 0 days 00:00:00.088117493 +586 61 0 days 00:00:00.058592660 +586 62 0 days 00:00:00.066965386 +586 63 0 days 00:00:00.057979393 +586 64 0 days 00:00:00.055579680 +586 65 0 days 00:00:00.064325506 +586 66 0 days 00:00:00.085372240 +586 67 0 days 00:00:00.058850426 +586 68 0 days 00:00:00.063436980 +586 69 0 days 00:00:00.085901006 +586 70 0 days 00:00:00.085468693 +586 71 0 days 00:00:00.072165295 +586 72 0 days 00:00:00.087474760 +586 73 0 days 00:00:00.055016260 +586 74 0 days 00:00:00.061053326 +586 75 0 days 00:00:00.086283666 +586 76 0 days 00:00:00.057748513 +586 77 0 days 00:00:00.064905873 +586 78 0 days 00:00:00.064402886 +586 79 0 days 00:00:00.065817400 +586 80 0 days 00:00:00.103830911 +586 81 0 days 00:00:00.057787126 +586 82 0 days 00:00:00.055226266 +586 83 0 days 00:00:00.086002413 +586 84 0 days 00:00:00.055346160 +586 85 0 days 00:00:00.085922586 +586 86 0 days 00:00:00.084016460 +586 87 0 days 00:00:00.064646593 +586 88 0 days 00:00:00.065480960 +586 89 0 days 00:00:00.093545560 +586 90 0 days 00:00:00.065737753 +586 91 0 days 00:00:00.085185646 +586 92 0 days 00:00:00.056932466 +586 93 0 days 00:00:00.057612160 +586 94 0 days 00:00:00.056527773 +586 95 0 days 00:00:00.085324126 +586 96 0 days 00:00:00.086979506 +586 97 0 days 00:00:00.057702180 +586 98 0 days 00:00:00.055569873 +586 99 0 days 00:00:00.084964933 +586 100 0 days 00:00:00.055921026 +587 2 0 days 00:00:00.190794725 +587 3 0 days 00:00:00.218032150 +587 4 0 days 00:00:00.189370894 +587 5 0 days 00:00:00.208243614 +587 6 0 days 00:00:00.200557484 +587 8 0 days 00:00:00.188181849 +587 9 0 days 00:00:00.194363210 +587 11 0 days 00:00:00.099884953 +587 14 0 days 00:00:00.099990580 +587 16 0 days 00:00:00.201783284 +587 17 0 days 00:00:00.199451576 +587 20 0 days 00:00:00.193489014 +587 21 0 days 00:00:00.195773861 +587 22 0 days 00:00:00.147968974 +587 31 0 days 00:00:00.111487986 +587 32 0 days 00:00:00.094278073 +587 35 0 days 00:00:00.193162608 +587 36 0 days 00:00:00.198581952 +587 37 0 days 00:00:00.197503091 +587 38 0 days 00:00:00.191661032 +587 39 0 days 00:00:00.197171129 +587 40 0 days 00:00:00.099600713 +587 41 0 days 00:00:00.189116960 +587 44 0 days 00:00:00.190399658 +587 47 0 days 00:00:00.094971153 +587 48 0 days 00:00:00.203385406 +587 49 0 days 00:00:00.142014682 +587 50 0 days 00:00:00.195491243 +587 51 0 days 00:00:00.096011406 +587 54 0 days 00:00:00.100368340 +587 55 0 days 00:00:00.192101891 +587 57 0 days 00:00:00.113004900 +587 60 0 days 00:00:00.112544500 +587 61 0 days 00:00:00.094574733 +587 62 0 days 00:00:00.098324986 +587 64 0 days 00:00:00.149645378 +587 66 0 days 00:00:00.187305376 +587 68 0 days 00:00:00.191344423 +587 70 0 days 00:00:00.150974203 +587 72 0 days 00:00:00.170503055 +587 73 0 days 00:00:00.191768158 +587 74 0 days 00:00:00.189789924 +587 75 0 days 00:00:00.095648173 +587 76 0 days 00:00:00.130297673 +587 77 0 days 00:00:00.195370108 +587 81 0 days 00:00:00.095293926 +587 82 0 days 00:00:00.094781400 +587 83 0 days 00:00:00.150526725 +587 85 0 days 00:00:00.166466872 +587 87 0 days 00:00:00.161249138 +587 89 0 days 00:00:00.186245621 +587 92 0 days 00:00:00.134803020 +587 93 0 days 00:00:00.189232845 +587 94 0 days 00:00:00.096095106 +587 96 0 days 00:00:00.110428740 +587 97 0 days 00:00:00.097323373 +587 98 0 days 00:00:00.204568929 +587 99 0 days 00:00:00.188509281 +587 100 0 days 00:00:00.193187745 +588 1 0 days 00:00:00.105133776 +588 2 0 days 00:00:00.058753995 +588 4 0 days 00:00:00.103664605 +588 5 0 days 00:00:00.071988192 +588 8 0 days 00:00:00.057231586 +588 10 0 days 00:00:00.115598180 +588 13 0 days 00:00:00.056532586 +588 14 0 days 00:00:00.054027333 +588 15 0 days 00:00:00.079958869 +588 17 0 days 00:00:00.056208260 +588 18 0 days 00:00:00.118018173 +588 24 0 days 00:00:00.056236253 +588 27 0 days 00:00:00.121833587 +588 28 0 days 00:00:00.117759874 +588 29 0 days 00:00:00.123303408 +588 30 0 days 00:00:00.058303626 +588 32 0 days 00:00:00.056011400 +588 33 0 days 00:00:00.123162431 +588 35 0 days 00:00:00.108605812 +588 36 0 days 00:00:00.117637630 +588 38 0 days 00:00:00.106409672 +588 39 0 days 00:00:00.060812866 +588 41 0 days 00:00:00.120172739 +588 45 0 days 00:00:00.123217204 +588 46 0 days 00:00:00.055772460 +588 48 0 days 00:00:00.056451793 +588 49 0 days 00:00:00.054183660 +588 58 0 days 00:00:00.063450433 +588 60 0 days 00:00:00.063473766 +588 63 0 days 00:00:00.072632606 +588 65 0 days 00:00:00.107822222 +588 67 0 days 00:00:00.054483626 +588 70 0 days 00:00:00.124636915 +588 71 0 days 00:00:00.063495693 +588 72 0 days 00:00:00.107458296 +588 73 0 days 00:00:00.056762026 +588 74 0 days 00:00:00.113259326 +588 75 0 days 00:00:00.096677876 +588 78 0 days 00:00:00.063067413 +588 80 0 days 00:00:00.122938572 +588 81 0 days 00:00:00.053877253 +588 82 0 days 00:00:00.104171322 +588 83 0 days 00:00:00.064018153 +588 85 0 days 00:00:00.082550118 +588 86 0 days 00:00:00.054555480 +588 88 0 days 00:00:00.053898680 +588 89 0 days 00:00:00.055513120 +588 90 0 days 00:00:00.115352377 +588 91 0 days 00:00:00.116814496 +588 92 0 days 00:00:00.112884899 +588 94 0 days 00:00:00.065472526 +588 95 0 days 00:00:00.063658100 +588 98 0 days 00:00:00.111759657 +588 100 0 days 00:00:00.057969700 +589 2 0 days 00:00:00.280684640 +589 3 0 days 00:00:00.349063000 +589 4 0 days 00:00:00.898084757 +589 5 0 days 00:00:00.450936226 +589 6 0 days 00:00:00.645061926 +589 7 0 days 00:00:00.454857840 +589 8 0 days 00:00:00.225773566 +589 9 0 days 00:00:00.905153106 +589 10 0 days 00:00:00.857258429 +589 12 0 days 00:00:00.494092380 +589 13 0 days 00:00:00.436664673 +589 14 0 days 00:00:00.505053473 +589 15 0 days 00:00:00.862169628 +589 16 0 days 00:00:00.451595580 +589 17 0 days 00:00:00.259920096 +589 19 0 days 00:00:00.954597794 +589 20 0 days 00:00:00.473872606 +589 21 0 days 00:00:00.721300240 +589 22 0 days 00:00:00.438069945 +589 23 0 days 00:00:00.573993886 +589 24 0 days 00:00:00.834525486 +589 25 0 days 00:00:00.672191280 +589 26 0 days 00:00:00.289099398 +589 28 0 days 00:00:00.881009004 +589 29 0 days 00:00:00.578841640 +589 30 0 days 00:00:00.356265086 +589 31 0 days 00:00:00.404109460 +589 32 0 days 00:00:00.489276682 +589 33 0 days 00:00:00.825928426 +589 34 0 days 00:00:00.860903900 +589 35 0 days 00:00:00.664157733 +589 36 0 days 00:00:00.517821870 +589 37 0 days 00:00:00.472867520 +589 38 0 days 00:00:00.627356313 +589 39 0 days 00:00:00.271249960 +589 40 0 days 00:00:00.360576866 +589 41 0 days 00:00:00.882175842 +589 42 0 days 00:00:00.234934006 +589 44 0 days 00:00:00.688158926 +589 45 0 days 00:00:00.697315920 +589 46 0 days 00:00:00.441125526 +589 47 0 days 00:00:00.872006174 +589 48 0 days 00:00:00.573913166 +589 49 0 days 00:00:00.659832693 +589 50 0 days 00:00:00.649516913 +589 51 0 days 00:00:00.704817040 +589 52 0 days 00:00:00.435891793 +589 53 0 days 00:00:00.261261806 +589 54 0 days 00:00:00.362326953 +589 55 0 days 00:00:00.397868253 +589 57 0 days 00:00:00.350222506 +589 58 0 days 00:00:00.670871893 +589 59 0 days 00:00:00.705832586 +589 60 0 days 00:00:00.443888205 +589 61 0 days 00:00:00.778358493 +589 62 0 days 00:00:00.492626460 +589 63 0 days 00:00:00.760242050 +589 64 0 days 00:00:00.722331573 +589 65 0 days 00:00:00.368490226 +589 66 0 days 00:00:00.453579560 +589 67 0 days 00:00:00.253670684 +589 68 0 days 00:00:00.388520033 +589 69 0 days 00:00:00.367013080 +589 70 0 days 00:00:00.632427460 +589 71 0 days 00:00:00.665113500 +589 72 0 days 00:00:00.654036273 +589 73 0 days 00:00:00.810001608 +589 74 0 days 00:00:00.700815693 +589 76 0 days 00:00:00.412702320 +589 77 0 days 00:00:00.440675713 +589 78 0 days 00:00:00.276669806 +589 79 0 days 00:00:00.792332572 +589 80 0 days 00:00:00.527010306 +589 81 0 days 00:00:00.418410550 +589 82 0 days 00:00:00.434680253 +589 83 0 days 00:00:00.233719666 +589 84 0 days 00:00:00.662253580 +589 85 0 days 00:00:00.399082906 +589 86 0 days 00:00:00.393241280 +589 87 0 days 00:00:00.804419133 +589 88 0 days 00:00:00.399568506 +589 90 0 days 00:00:00.655766886 +589 91 0 days 00:00:00.472256040 +589 92 0 days 00:00:00.771679106 +589 94 0 days 00:00:00.427026718 +589 95 0 days 00:00:00.437198306 +589 96 0 days 00:00:00.867732473 +589 97 0 days 00:00:00.285574393 +589 98 0 days 00:00:00.657404053 +589 99 0 days 00:00:00.871677655 +589 100 0 days 00:00:00.679801120 +590 1 0 days 00:00:00.932773561 +590 2 0 days 00:00:00.898075166 +590 3 0 days 00:00:00.430071866 +590 4 0 days 00:00:00.382014606 +590 6 0 days 00:00:00.881535504 +590 7 0 days 00:00:00.902564840 +590 8 0 days 00:00:00.726230193 +590 9 0 days 00:00:00.234400553 +590 10 0 days 00:00:00.563878600 +590 11 0 days 00:00:00.515937480 +590 12 0 days 00:00:00.283167020 +590 13 0 days 00:00:00.584967506 +590 14 0 days 00:00:00.556032224 +590 15 0 days 00:00:00.772998584 +590 16 0 days 00:00:00.249446613 +590 17 0 days 00:00:00.284280166 +590 18 0 days 00:00:00.754555320 +590 20 0 days 00:00:00.813743224 +590 21 0 days 00:00:00.371722770 +590 22 0 days 00:00:00.279794286 +590 23 0 days 00:00:00.512348140 +590 24 0 days 00:00:00.273574693 +590 25 0 days 00:00:00.690480873 +590 27 0 days 00:00:00.650146000 +590 28 0 days 00:00:00.263943800 +590 29 0 days 00:00:00.505534774 +590 30 0 days 00:00:00.675844024 +590 31 0 days 00:00:00.282258326 +590 32 0 days 00:00:00.627477030 +590 33 0 days 00:00:00.466778320 +590 34 0 days 00:00:00.397973146 +590 35 0 days 00:00:00.275261830 +590 36 0 days 00:00:00.383483386 +590 37 0 days 00:00:00.392409653 +590 38 0 days 00:00:00.233436173 +590 39 0 days 00:00:00.386354113 +590 40 0 days 00:00:00.946166874 +590 41 0 days 00:00:00.418104505 +590 42 0 days 00:00:00.386403813 +590 43 0 days 00:00:00.520014780 +590 44 0 days 00:00:00.352287300 +590 45 0 days 00:00:00.402277753 +590 46 0 days 00:00:00.240401733 +590 47 0 days 00:00:00.240122693 +590 48 0 days 00:00:00.509130653 +590 49 0 days 00:00:00.481460005 +590 50 0 days 00:00:00.757283123 +590 51 0 days 00:00:00.322299526 +590 52 0 days 00:00:00.739747536 +590 53 0 days 00:00:00.958833473 +590 54 0 days 00:00:00.378768166 +590 55 0 days 00:00:00.386225640 +590 56 0 days 00:00:00.253558300 +590 57 0 days 00:00:00.412082946 +590 58 0 days 00:00:00.718333060 +590 59 0 days 00:00:00.373496360 +590 60 0 days 00:00:00.594750313 +590 62 0 days 00:00:00.718806704 +590 63 0 days 00:00:00.675040326 +590 64 0 days 00:00:00.386597186 +590 65 0 days 00:00:00.468589760 +590 67 0 days 00:00:00.379941660 +590 68 0 days 00:00:00.777836610 +590 69 0 days 00:00:00.282066200 +590 70 0 days 00:00:00.687759260 +590 71 0 days 00:00:00.252741380 +590 72 0 days 00:00:00.431330615 +590 73 0 days 00:00:00.213541520 +590 74 0 days 00:00:00.391715786 +590 75 0 days 00:00:00.645525780 +590 76 0 days 00:00:00.690896386 +590 77 0 days 00:00:00.776400208 +590 78 0 days 00:00:00.928812231 +590 81 0 days 00:00:00.305740993 +590 82 0 days 00:00:00.275100740 +590 83 0 days 00:00:00.825837393 +590 84 0 days 00:00:00.775316546 +590 85 0 days 00:00:00.378107773 +590 86 0 days 00:00:00.248125773 +590 87 0 days 00:00:00.259554493 +590 88 0 days 00:00:00.474285190 +590 89 0 days 00:00:00.770438830 +590 90 0 days 00:00:00.247848346 +590 91 0 days 00:00:00.240296473 +590 92 0 days 00:00:00.563430660 +590 93 0 days 00:00:00.535003840 +590 94 0 days 00:00:00.809637405 +590 95 0 days 00:00:00.599923975 +590 96 0 days 00:00:00.514439541 +590 97 0 days 00:00:00.540796304 +590 98 0 days 00:00:00.386822900 +590 99 0 days 00:00:00.731163373 +590 100 0 days 00:00:00.783569060 +591 1 0 days 00:00:00.287551493 +591 2 0 days 00:00:00.346306813 +591 3 0 days 00:00:00.218975746 +591 4 0 days 00:00:00.351931413 +591 5 0 days 00:00:00.193247886 +591 6 0 days 00:00:00.409806592 +591 7 0 days 00:00:00.349938366 +591 8 0 days 00:00:00.193985620 +591 9 0 days 00:00:00.211608086 +591 10 0 days 00:00:00.196617186 +591 11 0 days 00:00:00.344660433 +591 12 0 days 00:00:00.389357706 +591 13 0 days 00:00:00.234604053 +591 14 0 days 00:00:00.224872084 +591 15 0 days 00:00:00.191997440 +591 16 0 days 00:00:00.173127433 +591 17 0 days 00:00:00.217419960 +591 18 0 days 00:00:00.350795733 +591 19 0 days 00:00:00.218363880 +591 20 0 days 00:00:00.239011860 +591 21 0 days 00:00:00.484932515 +591 22 0 days 00:00:00.405357940 +591 23 0 days 00:00:00.193746353 +591 24 0 days 00:00:00.350912080 +591 25 0 days 00:00:00.297797533 +591 26 0 days 00:00:00.369093660 +591 27 0 days 00:00:00.152953333 +591 28 0 days 00:00:00.305099013 +591 29 0 days 00:00:00.267762673 +591 30 0 days 00:00:00.391230540 +591 31 0 days 00:00:00.486365152 +591 32 0 days 00:00:00.219442650 +591 33 0 days 00:00:00.400649926 +591 34 0 days 00:00:00.141787530 +591 35 0 days 00:00:00.129780400 +591 36 0 days 00:00:00.362132300 +591 37 0 days 00:00:00.222557826 +591 38 0 days 00:00:00.291022400 +591 39 0 days 00:00:00.200469046 +591 40 0 days 00:00:00.320105180 +591 41 0 days 00:00:00.392733905 +591 42 0 days 00:00:00.194837586 +591 43 0 days 00:00:00.133162833 +591 44 0 days 00:00:00.204520926 +591 45 0 days 00:00:00.113748110 +591 46 0 days 00:00:00.194073546 +591 47 0 days 00:00:00.166715200 +591 48 0 days 00:00:00.119454200 +591 49 0 days 00:00:00.281008806 +591 50 0 days 00:00:00.138916908 +591 51 0 days 00:00:00.127086900 +591 53 0 days 00:00:00.331458293 +591 54 0 days 00:00:00.275166480 +591 55 0 days 00:00:00.262661156 +591 56 0 days 00:00:00.150469206 +591 57 0 days 00:00:00.292478786 +591 58 0 days 00:00:00.168417366 +591 59 0 days 00:00:00.333457133 +591 60 0 days 00:00:00.184774975 +591 61 0 days 00:00:00.204366473 +591 62 0 days 00:00:00.422731720 +591 63 0 days 00:00:00.121122620 +591 64 0 days 00:00:00.280402913 +591 65 0 days 00:00:00.332173160 +591 66 0 days 00:00:00.191615593 +591 67 0 days 00:00:00.361383880 +591 68 0 days 00:00:00.138858924 +591 69 0 days 00:00:00.392302373 +591 70 0 days 00:00:00.238522122 +591 71 0 days 00:00:00.174995853 +591 73 0 days 00:00:00.328578086 +591 74 0 days 00:00:00.150808880 +591 75 0 days 00:00:00.183990700 +591 76 0 days 00:00:00.209440626 +591 77 0 days 00:00:00.252181586 +591 78 0 days 00:00:00.139690746 +591 79 0 days 00:00:00.220199033 +591 80 0 days 00:00:00.134738980 +591 81 0 days 00:00:00.424884500 +591 82 0 days 00:00:00.186734386 +591 83 0 days 00:00:00.344690420 +591 84 0 days 00:00:00.449478112 +591 85 0 days 00:00:00.336401820 +591 86 0 days 00:00:00.167167080 +591 87 0 days 00:00:00.378244986 +591 88 0 days 00:00:00.427364857 +591 89 0 days 00:00:00.188036846 +591 90 0 days 00:00:00.330015473 +591 91 0 days 00:00:00.234713410 +591 92 0 days 00:00:00.192222673 +591 93 0 days 00:00:00.203528240 +591 94 0 days 00:00:00.307575448 +591 95 0 days 00:00:00.158923146 +591 96 0 days 00:00:00.443763326 +591 97 0 days 00:00:00.245140736 +591 98 0 days 00:00:00.197594453 +591 99 0 days 00:00:00.215282173 +591 100 0 days 00:00:00.333855100 +592 1 0 days 00:00:00.203231266 +592 2 0 days 00:00:00.430614468 +592 3 0 days 00:00:00.214836700 +592 4 0 days 00:00:00.221244300 +592 5 0 days 00:00:00.346477420 +592 6 0 days 00:00:00.126703040 +592 7 0 days 00:00:00.205811806 +592 8 0 days 00:00:00.386112572 +592 9 0 days 00:00:00.174372266 +592 10 0 days 00:00:00.353485500 +592 11 0 days 00:00:00.362299300 +592 12 0 days 00:00:00.234925915 +592 13 0 days 00:00:00.197618866 +592 14 0 days 00:00:00.281001680 +592 15 0 days 00:00:00.243144566 +592 17 0 days 00:00:00.234897900 +592 18 0 days 00:00:00.441697366 +592 19 0 days 00:00:00.148573253 +592 20 0 days 00:00:00.390068482 +592 21 0 days 00:00:00.253171993 +592 22 0 days 00:00:00.186020650 +592 23 0 days 00:00:00.160633125 +592 24 0 days 00:00:00.226651413 +592 25 0 days 00:00:00.342864326 +592 26 0 days 00:00:00.342756366 +592 27 0 days 00:00:00.241672346 +592 28 0 days 00:00:00.200453750 +592 29 0 days 00:00:00.274422830 +592 30 0 days 00:00:00.323233520 +592 31 0 days 00:00:00.118484580 +592 32 0 days 00:00:00.344663406 +592 33 0 days 00:00:00.362047933 +592 34 0 days 00:00:00.191412886 +592 35 0 days 00:00:00.254426240 +592 36 0 days 00:00:00.142191673 +592 37 0 days 00:00:00.347279540 +592 38 0 days 00:00:00.375527771 +592 39 0 days 00:00:00.278718770 +592 40 0 days 00:00:00.128664640 +592 41 0 days 00:00:00.179918920 +592 42 0 days 00:00:00.382968460 +592 43 0 days 00:00:00.259573406 +592 44 0 days 00:00:00.192720253 +592 45 0 days 00:00:00.256838066 +592 46 0 days 00:00:00.346494286 +592 47 0 days 00:00:00.115093173 +592 48 0 days 00:00:00.177834366 +592 49 0 days 00:00:00.149870880 +592 50 0 days 00:00:00.360756073 +592 51 0 days 00:00:00.129750986 +592 52 0 days 00:00:00.214065500 +592 53 0 days 00:00:00.203938020 +592 54 0 days 00:00:00.233105332 +592 55 0 days 00:00:00.308383906 +592 56 0 days 00:00:00.197620846 +592 57 0 days 00:00:00.459072498 +592 58 0 days 00:00:00.357964360 +592 59 0 days 00:00:00.290041433 +592 60 0 days 00:00:00.350247373 +592 61 0 days 00:00:00.352554780 +592 62 0 days 00:00:00.238021873 +592 63 0 days 00:00:00.236826206 +592 64 0 days 00:00:00.195623053 +592 65 0 days 00:00:00.236567553 +592 66 0 days 00:00:00.405851886 +592 67 0 days 00:00:00.235586346 +592 68 0 days 00:00:00.153608486 +592 69 0 days 00:00:00.190124826 +592 70 0 days 00:00:00.401356893 +592 71 0 days 00:00:00.170509653 +592 72 0 days 00:00:00.115076046 +592 73 0 days 00:00:00.421543273 +592 74 0 days 00:00:00.141464920 +592 75 0 days 00:00:00.137627373 +592 77 0 days 00:00:00.234205713 +592 78 0 days 00:00:00.224793746 +592 79 0 days 00:00:00.170969520 +592 80 0 days 00:00:00.212411013 +592 81 0 days 00:00:00.210635213 +592 82 0 days 00:00:00.128447060 +592 83 0 days 00:00:00.197268840 +592 84 0 days 00:00:00.349005860 +592 85 0 days 00:00:00.165947513 +592 86 0 days 00:00:00.349313386 +592 87 0 days 00:00:00.380355963 +592 88 0 days 00:00:00.245923913 +592 89 0 days 00:00:00.451363113 +592 90 0 days 00:00:00.352356786 +592 91 0 days 00:00:00.399432165 +592 92 0 days 00:00:00.475207505 +592 93 0 days 00:00:00.198737026 +592 94 0 days 00:00:00.222542226 +592 95 0 days 00:00:00.348235386 +592 96 0 days 00:00:00.482026556 +592 97 0 days 00:00:00.221799090 +592 98 0 days 00:00:00.109307026 +592 99 0 days 00:00:00.163627954 +592 100 0 days 00:00:00.201963473 +593 1 0 days 00:00:01.049094358 +593 2 0 days 00:00:00.332225040 +593 3 0 days 00:00:01.110191281 +593 5 0 days 00:00:00.288626188 +593 8 0 days 00:00:00.569539485 +593 9 0 days 00:00:00.865747893 +593 10 0 days 00:00:00.499796980 +593 11 0 days 00:00:00.444438844 +593 12 0 days 00:00:00.920370410 +593 13 0 days 00:00:00.556566628 +593 14 0 days 00:00:01.059528128 +593 15 0 days 00:00:00.748162054 +593 16 0 days 00:00:01.146800574 +593 17 0 days 00:00:01.157740934 +593 20 0 days 00:00:00.521269308 +593 21 0 days 00:00:00.328290887 +593 22 0 days 00:00:01.016138255 +593 24 0 days 00:00:00.679858710 +593 25 0 days 00:00:01.066304326 +593 27 0 days 00:00:00.974133453 +593 28 0 days 00:00:01.015022537 +593 30 0 days 00:00:00.631755432 +593 32 0 days 00:00:00.943341813 +593 33 0 days 00:00:01.057020802 +593 34 0 days 00:00:00.340269735 +593 35 0 days 00:00:00.929811845 +593 37 0 days 00:00:00.348646438 +593 40 0 days 00:00:00.485379550 +593 41 0 days 00:00:01.039837209 +593 44 0 days 00:00:00.616352917 +593 45 0 days 00:00:01.108059672 +593 46 0 days 00:00:01.007141732 +593 47 0 days 00:00:01.113535151 +593 48 0 days 00:00:00.566337523 +593 51 0 days 00:00:00.507794980 +593 52 0 days 00:00:00.965717000 +593 53 0 days 00:00:00.921229845 +593 54 0 days 00:00:00.507844615 +593 55 0 days 00:00:00.606952338 +593 56 0 days 00:00:00.548289442 +593 57 0 days 00:00:01.042329209 +593 59 0 days 00:00:00.861437520 +593 60 0 days 00:00:00.490516003 +593 61 0 days 00:00:01.166317210 +593 62 0 days 00:00:00.345059468 +593 63 0 days 00:00:00.733352507 +593 65 0 days 00:00:00.245448727 +593 66 0 days 00:00:00.526606131 +593 67 0 days 00:00:00.774015349 +593 69 0 days 00:00:00.595439548 +593 71 0 days 00:00:00.963727570 +593 72 0 days 00:00:00.294876445 +593 73 0 days 00:00:00.544240260 +593 75 0 days 00:00:00.716058994 +593 78 0 days 00:00:00.582383774 +593 82 0 days 00:00:00.524184160 +593 83 0 days 00:00:00.986115524 +593 84 0 days 00:00:00.940367252 +593 85 0 days 00:00:00.614660542 +593 87 0 days 00:00:00.352854708 +593 88 0 days 00:00:00.435878298 +593 91 0 days 00:00:01.186359902 +593 92 0 days 00:00:00.912734164 +593 93 0 days 00:00:01.236767559 +593 94 0 days 00:00:00.953578132 +593 96 0 days 00:00:00.710897061 +593 97 0 days 00:00:01.139385109 +593 99 0 days 00:00:00.677318037 +594 2 0 days 00:00:00.234996908 +594 3 0 days 00:00:00.277099420 +594 4 0 days 00:00:00.583548885 +594 6 0 days 00:00:00.533332300 +594 7 0 days 00:00:00.208629375 +594 8 0 days 00:00:00.312594547 +594 9 0 days 00:00:00.218524210 +594 10 0 days 00:00:00.231295164 +594 11 0 days 00:00:00.186326155 +594 12 0 days 00:00:00.231599560 +594 13 0 days 00:00:00.278059823 +594 14 0 days 00:00:00.156200246 +594 15 0 days 00:00:00.169136869 +594 16 0 days 00:00:00.175947489 +594 17 0 days 00:00:00.269588061 +594 18 0 days 00:00:00.491923590 +594 20 0 days 00:00:00.300179593 +594 21 0 days 00:00:00.319968063 +594 22 0 days 00:00:00.458415697 +594 23 0 days 00:00:00.599626687 +594 24 0 days 00:00:00.477566347 +594 25 0 days 00:00:00.247796940 +594 26 0 days 00:00:00.145294117 +594 27 0 days 00:00:00.223848981 +594 28 0 days 00:00:00.491078753 +594 30 0 days 00:00:00.277483794 +594 31 0 days 00:00:00.166403820 +594 32 0 days 00:00:00.414288887 +594 33 0 days 00:00:00.342200481 +594 34 0 days 00:00:00.182222026 +594 35 0 days 00:00:00.197143840 +594 36 0 days 00:00:00.486047406 +594 37 0 days 00:00:00.318830733 +594 38 0 days 00:00:00.331517871 +594 39 0 days 00:00:00.243517674 +594 40 0 days 00:00:00.211484092 +594 41 0 days 00:00:00.312614166 +594 42 0 days 00:00:00.140746798 +594 43 0 days 00:00:00.215108600 +594 44 0 days 00:00:00.354163953 +594 45 0 days 00:00:00.198483477 +594 46 0 days 00:00:00.463591580 +594 48 0 days 00:00:00.137938317 +594 49 0 days 00:00:00.218172400 +594 52 0 days 00:00:00.542500433 +594 53 0 days 00:00:00.260547956 +594 54 0 days 00:00:00.281799751 +594 57 0 days 00:00:00.467187356 +594 58 0 days 00:00:00.223016662 +594 59 0 days 00:00:00.185009748 +594 61 0 days 00:00:00.200334152 +594 63 0 days 00:00:00.219867262 +594 64 0 days 00:00:00.538961868 +594 65 0 days 00:00:00.151571437 +594 66 0 days 00:00:00.448302872 +594 68 0 days 00:00:00.142492242 +594 69 0 days 00:00:00.220298634 +594 71 0 days 00:00:00.576500986 +594 73 0 days 00:00:00.391047288 +594 74 0 days 00:00:00.300511971 +594 75 0 days 00:00:00.296849090 +594 76 0 days 00:00:00.583660605 +594 77 0 days 00:00:00.244945371 +594 78 0 days 00:00:00.465047735 +594 79 0 days 00:00:00.150962922 +594 80 0 days 00:00:00.159466425 +594 81 0 days 00:00:00.576519124 +594 82 0 days 00:00:00.514106283 +594 84 0 days 00:00:00.588874331 +594 85 0 days 00:00:00.264864952 +594 86 0 days 00:00:00.264729018 +594 87 0 days 00:00:00.483645297 +594 88 0 days 00:00:00.220982197 +594 89 0 days 00:00:00.195398300 +594 90 0 days 00:00:00.627201964 +594 91 0 days 00:00:00.284035231 +594 92 0 days 00:00:00.149225424 +594 93 0 days 00:00:00.284026507 +594 95 0 days 00:00:00.129970073 +594 97 0 days 00:00:00.330235496 +594 98 0 days 00:00:00.165196618 +594 99 0 days 00:00:00.141427082 +594 100 0 days 00:00:00.166303332 +595 4 0 days 00:00:00.392147806 +595 5 0 days 00:00:00.538992282 +595 6 0 days 00:00:00.991856887 +595 7 0 days 00:00:00.961796006 +595 8 0 days 00:00:00.445605486 +595 9 0 days 00:00:00.301148994 +595 13 0 days 00:00:00.919340950 +595 16 0 days 00:00:00.707850440 +595 18 0 days 00:00:00.258330133 +595 19 0 days 00:00:00.332842566 +595 20 0 days 00:00:00.230981673 +595 23 0 days 00:00:00.248059153 +595 25 0 days 00:00:00.279564866 +595 26 0 days 00:00:00.270019406 +595 27 0 days 00:00:00.347511453 +595 28 0 days 00:00:00.518597440 +595 31 0 days 00:00:00.526785151 +595 32 0 days 00:00:00.474677566 +595 33 0 days 00:00:00.257397206 +595 34 0 days 00:00:00.304484486 +595 36 0 days 00:00:00.567314404 +595 37 0 days 00:00:00.604713605 +595 38 0 days 00:00:01.215950462 +595 40 0 days 00:00:00.551375180 +595 41 0 days 00:00:00.401471806 +595 42 0 days 00:00:00.987673728 +595 43 0 days 00:00:00.254693440 +595 45 0 days 00:00:00.264616200 +595 47 0 days 00:00:00.897395668 +595 48 0 days 00:00:00.261769213 +595 55 0 days 00:00:00.620679704 +595 56 0 days 00:00:01.100220325 +595 58 0 days 00:00:00.556981723 +595 59 0 days 00:00:00.266989620 +595 60 0 days 00:00:01.024152149 +595 61 0 days 00:00:00.940687870 +595 62 0 days 00:00:00.300700013 +595 68 0 days 00:00:00.412611600 +595 70 0 days 00:00:01.053288546 +595 71 0 days 00:00:00.423654080 +595 74 0 days 00:00:00.250453340 +595 76 0 days 00:00:00.374828912 +595 77 0 days 00:00:00.448331573 +595 80 0 days 00:00:00.342368264 +595 82 0 days 00:00:00.334736926 +595 83 0 days 00:00:00.327846073 +595 84 0 days 00:00:00.286243746 +595 85 0 days 00:00:00.342986413 +595 86 0 days 00:00:00.986922397 +595 88 0 days 00:00:00.407333466 +595 89 0 days 00:00:01.121164810 +595 92 0 days 00:00:00.322984165 +595 93 0 days 00:00:00.418986366 +595 94 0 days 00:00:00.256396566 +595 95 0 days 00:00:01.078577397 +595 96 0 days 00:00:00.724587483 +595 97 0 days 00:00:00.307943966 +595 98 0 days 00:00:00.788602856 +595 100 0 days 00:00:00.383192840 +596 2 0 days 00:00:00.427973533 +596 5 0 days 00:00:00.509646542 +596 6 0 days 00:00:00.573674961 +596 7 0 days 00:00:00.399168400 +596 8 0 days 00:00:00.198497633 +596 9 0 days 00:00:00.269792492 +596 10 0 days 00:00:00.286742822 +596 11 0 days 00:00:00.246611846 +596 12 0 days 00:00:00.132301853 +596 13 0 days 00:00:00.465130954 +596 14 0 days 00:00:00.481977166 +596 15 0 days 00:00:00.243121524 +596 16 0 days 00:00:00.440578120 +596 17 0 days 00:00:00.196972953 +596 20 0 days 00:00:00.280269292 +596 21 0 days 00:00:00.580990496 +596 23 0 days 00:00:00.273561500 +596 25 0 days 00:00:00.170029435 +596 26 0 days 00:00:00.221944796 +596 27 0 days 00:00:00.428347173 +596 29 0 days 00:00:00.323639755 +596 30 0 days 00:00:00.277549665 +596 33 0 days 00:00:00.193512760 +596 35 0 days 00:00:00.235922913 +596 36 0 days 00:00:00.236232540 +596 38 0 days 00:00:00.573582303 +596 39 0 days 00:00:00.273041205 +596 41 0 days 00:00:00.120829455 +596 43 0 days 00:00:00.217693600 +596 46 0 days 00:00:00.214024900 +596 47 0 days 00:00:00.574199385 +596 49 0 days 00:00:00.217409250 +596 50 0 days 00:00:00.279349340 +596 51 0 days 00:00:00.259613708 +596 55 0 days 00:00:00.172606013 +596 56 0 days 00:00:00.200536146 +596 58 0 days 00:00:00.205915280 +596 61 0 days 00:00:00.283950756 +596 63 0 days 00:00:00.213449826 +596 64 0 days 00:00:00.320191826 +596 66 0 days 00:00:00.461557407 +596 67 0 days 00:00:00.460142566 +596 68 0 days 00:00:00.255219154 +596 69 0 days 00:00:00.477088715 +596 72 0 days 00:00:00.135672926 +596 74 0 days 00:00:00.237816126 +596 75 0 days 00:00:00.164049130 +596 76 0 days 00:00:00.272417950 +596 77 0 days 00:00:00.272077188 +596 78 0 days 00:00:00.199083786 +596 81 0 days 00:00:00.228320192 +596 82 0 days 00:00:00.200719293 +596 83 0 days 00:00:00.478173817 +596 84 0 days 00:00:00.219932995 +596 85 0 days 00:00:00.128654420 +596 86 0 days 00:00:00.124715920 +596 88 0 days 00:00:00.276699485 +596 89 0 days 00:00:00.170998112 +596 90 0 days 00:00:00.217999345 +596 92 0 days 00:00:00.129264595 +596 94 0 days 00:00:00.189047956 +596 95 0 days 00:00:00.247016216 +596 97 0 days 00:00:00.501224416 +596 99 0 days 00:00:00.360911440 +597 1 0 days 00:00:00.326547424 +597 2 0 days 00:00:00.546459665 +597 3 0 days 00:00:00.371682902 +597 4 0 days 00:00:00.521250622 +597 5 0 days 00:00:00.724508393 +597 6 0 days 00:00:00.405493340 +597 7 0 days 00:00:00.881195224 +597 8 0 days 00:00:00.474808660 +597 9 0 days 00:00:00.917452140 +597 10 0 days 00:00:00.810967560 +597 11 0 days 00:00:00.881718340 +597 12 0 days 00:00:00.371186604 +597 13 0 days 00:00:00.356862926 +597 14 0 days 00:00:00.460404770 +597 15 0 days 00:00:00.888173835 +597 16 0 days 00:00:00.963716136 +597 17 0 days 00:00:00.497374184 +597 18 0 days 00:00:00.663013444 +597 19 0 days 00:00:00.927253895 +597 20 0 days 00:00:00.730262317 +597 21 0 days 00:00:00.817706140 +597 22 0 days 00:00:00.495522748 +597 23 0 days 00:00:00.548212024 +597 24 0 days 00:00:00.816847610 +597 25 0 days 00:00:00.480821784 +597 26 0 days 00:00:00.357787168 +597 27 0 days 00:00:00.468096000 +597 28 0 days 00:00:00.806545340 +597 29 0 days 00:00:00.856223628 +597 30 0 days 00:00:00.932546232 +597 31 0 days 00:00:00.754085188 +597 32 0 days 00:00:00.910339731 +597 33 0 days 00:00:00.802846390 +597 34 0 days 00:00:00.354807847 +597 35 0 days 00:00:00.358182105 +597 36 0 days 00:00:00.297838080 +597 37 0 days 00:00:00.857566435 +597 38 0 days 00:00:00.387070956 +597 39 0 days 00:00:00.315874340 +597 40 0 days 00:00:00.881675623 +597 41 0 days 00:00:00.868429200 +597 42 0 days 00:00:00.568653916 +597 43 0 days 00:00:00.297806775 +597 44 0 days 00:00:00.852706468 +597 45 0 days 00:00:00.342480546 +597 46 0 days 00:00:00.479163235 +597 47 0 days 00:00:00.418689650 +597 48 0 days 00:00:00.852023396 +597 49 0 days 00:00:00.882111464 +597 50 0 days 00:00:00.510299716 +597 51 0 days 00:00:00.517152340 +597 52 0 days 00:00:00.803842245 +597 53 0 days 00:00:00.561503121 +597 54 0 days 00:00:00.888404426 +597 55 0 days 00:00:00.884520315 +597 56 0 days 00:00:00.428443514 +597 57 0 days 00:00:00.604893580 +597 58 0 days 00:00:00.542651942 +597 59 0 days 00:00:00.363131322 +597 60 0 days 00:00:00.887040548 +597 61 0 days 00:00:00.715445670 +597 62 0 days 00:00:00.489927232 +597 63 0 days 00:00:00.337102580 +597 64 0 days 00:00:00.910356334 +597 65 0 days 00:00:00.706190457 +597 66 0 days 00:00:00.296408325 +597 67 0 days 00:00:00.810405910 +597 68 0 days 00:00:00.331302922 +597 69 0 days 00:00:00.497620155 +597 70 0 days 00:00:00.321137568 +597 71 0 days 00:00:00.833091550 +597 72 0 days 00:00:00.293576020 +597 73 0 days 00:00:00.355154038 +597 74 0 days 00:00:00.342710737 +597 75 0 days 00:00:00.807640890 +597 76 0 days 00:00:00.338885014 +597 77 0 days 00:00:00.298020660 +597 78 0 days 00:00:00.643030620 +597 79 0 days 00:00:00.947025544 +597 80 0 days 00:00:00.826500795 +597 81 0 days 00:00:00.491482032 +597 82 0 days 00:00:00.551494937 +597 83 0 days 00:00:00.342010120 +597 84 0 days 00:00:00.841945244 +597 85 0 days 00:00:00.967375052 +597 86 0 days 00:00:00.451697662 +597 87 0 days 00:00:00.539242976 +597 88 0 days 00:00:00.511435908 +597 89 0 days 00:00:00.466611495 +597 90 0 days 00:00:00.342584782 +597 91 0 days 00:00:00.495370240 +597 92 0 days 00:00:00.479527650 +597 93 0 days 00:00:00.970023976 +597 94 0 days 00:00:00.891648510 +597 95 0 days 00:00:00.599443365 +597 96 0 days 00:00:00.356961836 +597 97 0 days 00:00:00.797855975 +597 98 0 days 00:00:00.388680690 +597 99 0 days 00:00:00.813699140 +597 100 0 days 00:00:00.331019262 +598 1 0 days 00:00:00.445755300 +598 2 0 days 00:00:00.433199524 +598 3 0 days 00:00:00.407063810 +598 4 0 days 00:00:00.176953405 +598 5 0 days 00:00:00.423144100 +598 6 0 days 00:00:00.165728780 +598 7 0 days 00:00:00.158707412 +598 8 0 days 00:00:00.281202192 +598 9 0 days 00:00:00.198170106 +598 10 0 days 00:00:00.263736033 +598 11 0 days 00:00:00.370433452 +598 12 0 days 00:00:00.407198514 +598 13 0 days 00:00:00.288485800 +598 14 0 days 00:00:00.461698613 +598 15 0 days 00:00:00.269730220 +598 16 0 days 00:00:00.457824074 +598 17 0 days 00:00:00.266607520 +598 18 0 days 00:00:00.264327736 +598 19 0 days 00:00:00.294515043 +598 20 0 days 00:00:00.286098512 +598 21 0 days 00:00:00.485116182 +598 22 0 days 00:00:00.281441525 +598 23 0 days 00:00:00.468127150 +598 24 0 days 00:00:00.237925105 +598 25 0 days 00:00:00.445927740 +598 26 0 days 00:00:00.159533855 +598 27 0 days 00:00:00.488457632 +598 28 0 days 00:00:00.424276132 +598 29 0 days 00:00:00.281107752 +598 30 0 days 00:00:00.426038360 +598 31 0 days 00:00:00.169142900 +598 32 0 days 00:00:00.478979660 +598 33 0 days 00:00:00.286315760 +598 34 0 days 00:00:00.400642545 +598 35 0 days 00:00:00.426769172 +598 36 0 days 00:00:00.251748496 +598 37 0 days 00:00:00.165822583 +598 38 0 days 00:00:00.331473627 +598 39 0 days 00:00:00.257863248 +598 40 0 days 00:00:00.249857090 +598 41 0 days 00:00:00.266495380 +598 42 0 days 00:00:00.285474043 +598 43 0 days 00:00:00.243845175 +598 44 0 days 00:00:00.181018601 +598 45 0 days 00:00:00.407657302 +598 46 0 days 00:00:00.289012924 +598 47 0 days 00:00:00.445887790 +598 48 0 days 00:00:00.457609768 +598 49 0 days 00:00:00.171153908 +598 50 0 days 00:00:00.151719610 +598 51 0 days 00:00:00.290866298 +598 52 0 days 00:00:00.182024162 +598 53 0 days 00:00:00.194030680 +598 54 0 days 00:00:00.408684880 +598 55 0 days 00:00:00.325803565 +598 56 0 days 00:00:00.279912204 +598 57 0 days 00:00:00.216841336 +598 58 0 days 00:00:00.465662322 +598 59 0 days 00:00:00.260981420 +598 60 0 days 00:00:00.452617862 +598 61 0 days 00:00:00.305311468 +598 62 0 days 00:00:00.406372865 +598 63 0 days 00:00:00.465178207 +598 64 0 days 00:00:00.203002385 +598 65 0 days 00:00:00.410166905 +598 66 0 days 00:00:00.290915590 +598 67 0 days 00:00:00.180097964 +598 68 0 days 00:00:00.204644656 +598 69 0 days 00:00:00.287120423 +598 70 0 days 00:00:00.264582745 +598 71 0 days 00:00:00.287605628 +598 72 0 days 00:00:00.203070776 +598 73 0 days 00:00:00.171063816 +598 74 0 days 00:00:00.405833470 +598 75 0 days 00:00:00.141468760 +598 76 0 days 00:00:00.264795896 +598 77 0 days 00:00:00.372858626 +598 78 0 days 00:00:00.237066644 +598 79 0 days 00:00:00.183505968 +598 80 0 days 00:00:00.240256615 +598 81 0 days 00:00:00.267044000 +598 82 0 days 00:00:00.446807012 +598 83 0 days 00:00:00.289720151 +598 84 0 days 00:00:00.431406340 +598 85 0 days 00:00:00.425688260 +598 86 0 days 00:00:00.353100180 +598 87 0 days 00:00:00.235464466 +598 88 0 days 00:00:00.177264011 +598 89 0 days 00:00:00.261772120 +598 90 0 days 00:00:00.245382185 +598 91 0 days 00:00:00.174930688 +598 92 0 days 00:00:00.236343230 +598 93 0 days 00:00:00.444496356 +598 94 0 days 00:00:00.276856600 +598 95 0 days 00:00:00.275309288 +598 96 0 days 00:00:00.246407282 +598 97 0 days 00:00:00.469213905 +598 98 0 days 00:00:00.290164155 +598 99 0 days 00:00:00.354184840 +598 100 0 days 00:00:00.295237363 +599 1 0 days 00:00:00.536156930 +599 2 0 days 00:00:00.752139770 +599 3 0 days 00:00:00.477489940 +599 4 0 days 00:00:00.811743828 +599 5 0 days 00:00:00.318023687 +599 6 0 days 00:00:00.312969588 +599 7 0 days 00:00:00.962954756 +599 8 0 days 00:00:00.570048060 +599 9 0 days 00:00:00.414255868 +599 10 0 days 00:00:00.295656010 +599 11 0 days 00:00:00.519239132 +599 12 0 days 00:00:00.903526108 +599 13 0 days 00:00:00.467478540 +599 14 0 days 00:00:00.456119224 +599 15 0 days 00:00:00.504501317 +599 16 0 days 00:00:00.748485645 +599 17 0 days 00:00:00.318291025 +599 18 0 days 00:00:00.385165372 +599 19 0 days 00:00:00.310175076 +599 20 0 days 00:00:00.673170046 +599 21 0 days 00:00:00.495225685 +599 22 0 days 00:00:00.739459795 +599 23 0 days 00:00:00.300121075 +599 24 0 days 00:00:00.748341345 +599 25 0 days 00:00:00.309213450 +599 26 0 days 00:00:00.357790042 +599 27 0 days 00:00:00.311513603 +599 28 0 days 00:00:00.377155956 +599 29 0 days 00:00:00.392868983 +599 30 0 days 00:00:00.872477493 +599 31 0 days 00:00:00.307737312 +599 32 0 days 00:00:00.439940805 +599 33 0 days 00:00:00.310177340 +599 34 0 days 00:00:00.816425784 +599 35 0 days 00:00:00.521582610 +599 36 0 days 00:00:00.331142815 +599 37 0 days 00:00:00.554658350 +599 38 0 days 00:00:00.753051340 +599 39 0 days 00:00:00.503086147 +599 40 0 days 00:00:00.814451763 +599 41 0 days 00:00:00.753956050 +599 42 0 days 00:00:00.860881142 +599 43 0 days 00:00:00.336760544 +599 44 0 days 00:00:00.751556380 +599 45 0 days 00:00:00.916750526 +599 46 0 days 00:00:00.299713068 +599 47 0 days 00:00:00.362247640 +599 48 0 days 00:00:00.480033096 +599 49 0 days 00:00:00.746006345 +599 50 0 days 00:00:00.317486765 +599 51 0 days 00:00:00.532857175 +599 52 0 days 00:00:00.462322980 +599 53 0 days 00:00:00.298333508 +599 54 0 days 00:00:00.333911408 +599 55 0 days 00:00:00.435613785 +599 56 0 days 00:00:00.504714594 +599 57 0 days 00:00:00.753675975 +599 58 0 days 00:00:00.784903524 +599 59 0 days 00:00:00.391816700 +599 60 0 days 00:00:00.750447735 +599 61 0 days 00:00:00.827729370 +599 62 0 days 00:00:00.361012168 +599 63 0 days 00:00:00.746892315 +599 64 0 days 00:00:00.327223465 +599 65 0 days 00:00:00.770529835 +599 66 0 days 00:00:00.306699608 +599 67 0 days 00:00:00.514551340 +599 68 0 days 00:00:00.464813036 +599 69 0 days 00:00:00.439093748 +599 70 0 days 00:00:00.299867844 +599 71 0 days 00:00:00.391067504 +599 72 0 days 00:00:00.322915926 +599 73 0 days 00:00:00.744668935 +599 74 0 days 00:00:00.304700730 +599 75 0 days 00:00:00.299192805 +599 76 0 days 00:00:00.418061224 +599 77 0 days 00:00:00.783819740 +599 78 0 days 00:00:00.828335096 +599 79 0 days 00:00:00.306381310 +599 80 0 days 00:00:00.431191050 +599 81 0 days 00:00:00.283440985 +599 82 0 days 00:00:00.810348325 +599 83 0 days 00:00:00.298560432 +599 84 0 days 00:00:00.695646020 +599 85 0 days 00:00:00.497404596 +599 86 0 days 00:00:00.749412545 +599 87 0 days 00:00:00.830317966 +599 88 0 days 00:00:00.312426420 +599 89 0 days 00:00:00.448554365 +599 90 0 days 00:00:00.703979916 +599 91 0 days 00:00:00.520809042 +599 92 0 days 00:00:00.327369800 +599 93 0 days 00:00:00.703057100 +599 94 0 days 00:00:00.753723660 +599 95 0 days 00:00:00.560248564 +599 96 0 days 00:00:00.347760040 +599 97 0 days 00:00:00.813924456 +599 98 0 days 00:00:00.457386804 +599 99 0 days 00:00:00.704289120 +599 100 0 days 00:00:00.321844700 +600 1 0 days 00:00:00.850772400 +600 2 0 days 00:00:00.835636932 +600 3 0 days 00:00:00.788073740 +600 4 0 days 00:00:00.317886950 +600 5 0 days 00:00:00.385970708 +600 6 0 days 00:00:00.553893680 +600 7 0 days 00:00:00.481756084 +600 8 0 days 00:00:00.557542540 +600 9 0 days 00:00:00.334109743 +600 10 0 days 00:00:00.811323175 +600 11 0 days 00:00:00.817002065 +600 12 0 days 00:00:00.838455710 +600 13 0 days 00:00:00.539335384 +600 14 0 days 00:00:00.554289193 +600 15 0 days 00:00:00.374749765 +600 16 0 days 00:00:00.342604136 +600 17 0 days 00:00:00.511631115 +600 18 0 days 00:00:00.528923880 +600 19 0 days 00:00:00.535121935 +600 20 0 days 00:00:00.563590426 +600 21 0 days 00:00:00.857613552 +600 22 0 days 00:00:00.371488708 +600 23 0 days 00:00:00.454507670 +600 24 0 days 00:00:00.890389654 +600 25 0 days 00:00:00.505262956 +600 26 0 days 00:00:00.327561184 +600 27 0 days 00:00:00.936186993 +600 28 0 days 00:00:00.438937686 +600 29 0 days 00:00:00.919135295 +600 30 0 days 00:00:00.793858615 +600 31 0 days 00:00:00.491275372 +600 32 0 days 00:00:00.454832915 +600 33 0 days 00:00:00.521584140 +600 34 0 days 00:00:00.539918825 +600 35 0 days 00:00:00.304770988 +600 36 0 days 00:00:00.487841728 +600 37 0 days 00:00:00.683340284 +600 38 0 days 00:00:00.540115448 +600 39 0 days 00:00:00.449245188 +600 40 0 days 00:00:00.334066216 +600 41 0 days 00:00:00.789779420 +600 42 0 days 00:00:00.836225324 +600 43 0 days 00:00:00.961486546 +600 44 0 days 00:00:00.507575332 +600 45 0 days 00:00:00.531599702 +600 46 0 days 00:00:00.933282015 +600 47 0 days 00:00:00.512004792 +600 48 0 days 00:00:00.857608140 +600 49 0 days 00:00:00.836144636 +600 50 0 days 00:00:00.553193044 +600 51 0 days 00:00:00.486150436 +600 52 0 days 00:00:00.459981520 +600 53 0 days 00:00:00.639989344 +600 54 0 days 00:00:00.499457266 +600 55 0 days 00:00:00.485071160 +600 56 0 days 00:00:00.782799300 +600 57 0 days 00:00:00.480559872 +600 58 0 days 00:00:00.469031655 +600 59 0 days 00:00:00.784780675 +600 60 0 days 00:00:00.314408764 +600 61 0 days 00:00:00.672022660 +600 62 0 days 00:00:00.869285836 +600 63 0 days 00:00:00.906684695 +600 64 0 days 00:00:00.547838564 +600 65 0 days 00:00:00.851947993 +600 66 0 days 00:00:00.454981980 +600 67 0 days 00:00:00.867612704 +600 68 0 days 00:00:00.370120925 +600 69 0 days 00:00:00.543926864 +600 70 0 days 00:00:00.404339786 +600 71 0 days 00:00:00.792588770 +600 72 0 days 00:00:00.362176600 +600 73 0 days 00:00:00.843246788 +600 74 0 days 00:00:00.491972905 +600 75 0 days 00:00:00.910164342 +600 76 0 days 00:00:00.495779525 +600 77 0 days 00:00:00.356413120 +600 78 0 days 00:00:01.112149651 +600 79 0 days 00:00:00.907845995 +600 80 0 days 00:00:00.349799950 +600 81 0 days 00:00:00.869552753 +600 82 0 days 00:00:00.296937690 +600 83 0 days 00:00:00.342664400 +600 84 0 days 00:00:00.807400557 +600 85 0 days 00:00:00.403429400 +600 86 0 days 00:00:00.785161475 +600 87 0 days 00:00:00.459530950 +600 88 0 days 00:00:00.872929976 +600 89 0 days 00:00:00.485433128 +600 90 0 days 00:00:00.380226390 +600 91 0 days 00:00:00.609981720 +600 92 0 days 00:00:00.331696551 +600 93 0 days 00:00:01.022788120 +600 94 0 days 00:00:00.462875185 +600 95 0 days 00:00:00.501594468 +600 96 0 days 00:00:00.788624295 +600 97 0 days 00:00:00.486179450 +600 98 0 days 00:00:00.427026955 +600 99 0 days 00:00:00.700302770 +600 100 0 days 00:00:00.267907745 +601 1 0 days 00:00:00.143273720 +601 2 0 days 00:00:00.210674536 +601 3 0 days 00:00:00.166197096 +601 4 0 days 00:00:00.244149836 +601 5 0 days 00:00:00.380765620 +601 6 0 days 00:00:00.268233524 +601 7 0 days 00:00:00.404181552 +601 8 0 days 00:00:00.332738295 +601 9 0 days 00:00:00.330983955 +601 10 0 days 00:00:00.407907436 +601 11 0 days 00:00:00.223983735 +601 12 0 days 00:00:00.293880803 +601 13 0 days 00:00:00.449072364 +601 14 0 days 00:00:00.333857715 +601 15 0 days 00:00:00.252048540 +601 16 0 days 00:00:00.248740812 +601 17 0 days 00:00:00.228869755 +601 18 0 days 00:00:00.217027428 +601 19 0 days 00:00:00.476158672 +601 20 0 days 00:00:00.190536115 +601 21 0 days 00:00:00.233933762 +601 22 0 days 00:00:00.378377850 +601 23 0 days 00:00:00.420079166 +601 24 0 days 00:00:00.246356504 +601 25 0 days 00:00:00.378104865 +601 26 0 days 00:00:00.214964973 +601 27 0 days 00:00:00.258786805 +601 28 0 days 00:00:00.404658812 +601 29 0 days 00:00:00.420745396 +601 30 0 days 00:00:00.166632608 +601 31 0 days 00:00:00.494613263 +601 32 0 days 00:00:00.159805910 +601 33 0 days 00:00:00.375791465 +601 34 0 days 00:00:00.417942043 +601 35 0 days 00:00:00.167301106 +601 36 0 days 00:00:00.156295364 +601 37 0 days 00:00:00.205195430 +601 38 0 days 00:00:00.402835652 +601 39 0 days 00:00:00.311495694 +601 40 0 days 00:00:00.420687053 +601 41 0 days 00:00:00.337170745 +601 42 0 days 00:00:00.220727640 +601 43 0 days 00:00:00.392466300 +601 44 0 days 00:00:00.380970010 +601 45 0 days 00:00:00.178022897 +601 46 0 days 00:00:00.253625893 +601 47 0 days 00:00:00.159093168 +601 48 0 days 00:00:00.471477450 +601 49 0 days 00:00:00.230219995 +601 50 0 days 00:00:00.242757560 +601 51 0 days 00:00:00.166455036 +601 52 0 days 00:00:00.477481950 +601 53 0 days 00:00:00.270222716 +601 54 0 days 00:00:00.420531496 +601 55 0 days 00:00:00.181370004 +601 56 0 days 00:00:00.236546990 +601 57 0 days 00:00:00.276591640 +601 58 0 days 00:00:00.251847114 +601 59 0 days 00:00:00.377950640 +601 60 0 days 00:00:00.260209396 +601 61 0 days 00:00:00.154347500 +601 62 0 days 00:00:00.160018086 +601 63 0 days 00:00:00.153912444 +601 64 0 days 00:00:00.320466574 +601 65 0 days 00:00:00.181849997 +601 66 0 days 00:00:00.224504350 +601 67 0 days 00:00:00.286821340 +601 68 0 days 00:00:00.236471976 +601 69 0 days 00:00:00.395621108 +601 70 0 days 00:00:00.294803828 +601 71 0 days 00:00:00.189357182 +601 72 0 days 00:00:00.365229133 +601 73 0 days 00:00:00.334321680 +601 74 0 days 00:00:00.296754788 +601 75 0 days 00:00:00.250618328 +601 76 0 days 00:00:00.163777720 +601 77 0 days 00:00:00.147979425 +601 78 0 days 00:00:00.449563195 +601 79 0 days 00:00:00.177232486 +601 80 0 days 00:00:00.465758126 +601 81 0 days 00:00:00.184699443 +601 82 0 days 00:00:00.284717934 +601 83 0 days 00:00:00.284728560 +601 84 0 days 00:00:00.173275248 +601 85 0 days 00:00:00.148038275 +601 86 0 days 00:00:00.406947005 +601 87 0 days 00:00:00.262931392 +601 88 0 days 00:00:00.364215606 +601 89 0 days 00:00:00.223156150 +601 90 0 days 00:00:00.174829722 +601 91 0 days 00:00:00.173692314 +601 92 0 days 00:00:00.248256774 +601 93 0 days 00:00:00.150880665 +601 94 0 days 00:00:00.308290625 +601 95 0 days 00:00:00.233733816 +601 96 0 days 00:00:00.503455393 +601 97 0 days 00:00:00.236231040 +601 98 0 days 00:00:00.172041780 +601 99 0 days 00:00:00.452910850 +601 100 0 days 00:00:00.269274113 +602 1 0 days 00:00:00.305936664 +602 2 0 days 00:00:00.177786450 +602 3 0 days 00:00:00.449203908 +602 4 0 days 00:00:00.175591100 +602 5 0 days 00:00:00.357030970 +602 6 0 days 00:00:00.493369260 +602 7 0 days 00:00:00.233512280 +602 8 0 days 00:00:00.234104415 +602 9 0 days 00:00:00.264585020 +602 10 0 days 00:00:00.263842150 +602 11 0 days 00:00:00.202019674 +602 12 0 days 00:00:00.258590876 +602 13 0 days 00:00:00.160542412 +602 14 0 days 00:00:00.339653570 +602 15 0 days 00:00:00.268997162 +602 16 0 days 00:00:00.232996310 +602 17 0 days 00:00:00.169839737 +602 18 0 days 00:00:00.305868160 +602 19 0 days 00:00:00.180803933 +602 20 0 days 00:00:00.474711202 +602 21 0 days 00:00:00.182797888 +602 22 0 days 00:00:00.352221105 +602 23 0 days 00:00:00.160134703 +602 24 0 days 00:00:00.458633162 +602 25 0 days 00:00:00.471185465 +602 26 0 days 00:00:00.242514325 +602 27 0 days 00:00:00.188816865 +602 28 0 days 00:00:00.256207900 +602 29 0 days 00:00:00.170812520 +602 30 0 days 00:00:00.262836140 +602 31 0 days 00:00:00.162209848 +602 32 0 days 00:00:00.452878505 +602 33 0 days 00:00:00.245978156 +602 34 0 days 00:00:00.208189180 +602 35 0 days 00:00:00.246862900 +602 36 0 days 00:00:00.374017832 +602 37 0 days 00:00:00.207551170 +602 38 0 days 00:00:00.391167665 +602 39 0 days 00:00:00.164226556 +602 40 0 days 00:00:00.352535656 +602 41 0 days 00:00:00.233267615 +602 42 0 days 00:00:00.149474810 +602 43 0 days 00:00:00.186010896 +602 44 0 days 00:00:00.234396245 +602 45 0 days 00:00:00.398946605 +602 46 0 days 00:00:00.363383204 +602 47 0 days 00:00:00.431617664 +602 48 0 days 00:00:00.397241155 +602 49 0 days 00:00:00.153317170 +602 50 0 days 00:00:00.501123713 +602 51 0 days 00:00:00.271756242 +602 52 0 days 00:00:00.231264725 +602 53 0 days 00:00:00.157760976 +602 54 0 days 00:00:00.202371456 +602 55 0 days 00:00:00.468556211 +602 56 0 days 00:00:00.173132156 +602 57 0 days 00:00:00.271306916 +602 58 0 days 00:00:00.243190195 +602 59 0 days 00:00:00.291476675 +602 60 0 days 00:00:00.465761643 +602 61 0 days 00:00:00.249312720 +602 62 0 days 00:00:00.245775052 +602 63 0 days 00:00:00.241173775 +602 64 0 days 00:00:00.420140164 +602 65 0 days 00:00:00.276339160 +602 66 0 days 00:00:00.391901670 +602 67 0 days 00:00:00.177973784 +602 68 0 days 00:00:00.196933670 +602 69 0 days 00:00:00.438283760 +602 70 0 days 00:00:00.221948840 +602 71 0 days 00:00:00.395854975 +602 72 0 days 00:00:00.446756834 +602 73 0 days 00:00:00.397379455 +602 74 0 days 00:00:00.150450825 +602 75 0 days 00:00:00.430647400 +602 76 0 days 00:00:00.450949905 +602 77 0 days 00:00:00.427005585 +602 78 0 days 00:00:00.241215815 +602 79 0 days 00:00:00.164584024 +602 80 0 days 00:00:00.279940370 +602 81 0 days 00:00:00.310311908 +602 82 0 days 00:00:00.281889876 +602 83 0 days 00:00:00.185212491 +602 84 0 days 00:00:00.447827877 +602 85 0 days 00:00:00.425068192 +602 86 0 days 00:00:00.274433386 +602 87 0 days 00:00:00.253140206 +602 88 0 days 00:00:00.296605083 +602 89 0 days 00:00:00.156252520 +602 90 0 days 00:00:00.359912206 +602 91 0 days 00:00:00.353041305 +602 92 0 days 00:00:00.438759743 +602 93 0 days 00:00:00.249862536 +602 94 0 days 00:00:00.474265796 +602 95 0 days 00:00:00.172646936 +602 96 0 days 00:00:00.155241115 +602 97 0 days 00:00:00.170798694 +602 98 0 days 00:00:00.441501210 +602 99 0 days 00:00:00.159213728 +602 100 0 days 00:00:00.157941732 +603 1 0 days 00:00:00.614715740 +603 2 0 days 00:00:00.846177035 +603 3 0 days 00:00:00.803656895 +603 4 0 days 00:00:00.520276300 +603 5 0 days 00:00:00.895172715 +603 6 0 days 00:00:00.392979230 +603 7 0 days 00:00:00.530462775 +603 8 0 days 00:00:00.321308080 +603 9 0 days 00:00:00.522644190 +603 10 0 days 00:00:00.305407205 +603 11 0 days 00:00:00.507158920 +603 12 0 days 00:00:00.970758972 +603 13 0 days 00:00:00.451038260 +603 14 0 days 00:00:00.565618108 +603 15 0 days 00:00:00.548278340 +603 16 0 days 00:00:00.590856520 +603 17 0 days 00:00:01.029909968 +603 18 0 days 00:00:00.511661545 +603 19 0 days 00:00:00.938797756 +603 20 0 days 00:00:00.968484755 +603 21 0 days 00:00:00.505508930 +603 22 0 days 00:00:00.575017545 +603 23 0 days 00:00:00.913117328 +603 24 0 days 00:00:00.388556652 +603 25 0 days 00:00:00.418787610 +603 26 0 days 00:00:00.988342130 +603 27 0 days 00:00:00.443194395 +603 28 0 days 00:00:00.339219252 +603 29 0 days 00:00:00.995471345 +603 30 0 days 00:00:00.528766305 +603 31 0 days 00:00:00.357716170 +603 32 0 days 00:00:00.525083394 +603 33 0 days 00:00:00.351345100 +603 34 0 days 00:00:00.285921430 +603 35 0 days 00:00:00.580291378 +603 36 0 days 00:00:00.589197915 +603 37 0 days 00:00:00.570964780 +603 38 0 days 00:00:00.450605585 +603 39 0 days 00:00:00.573813010 +603 40 0 days 00:00:00.534656194 +603 41 0 days 00:00:00.419204400 +603 42 0 days 00:00:00.570085492 +603 43 0 days 00:00:00.952673853 +603 44 0 days 00:00:00.445705905 +603 45 0 days 00:00:00.501483075 +603 46 0 days 00:00:00.361596010 +603 47 0 days 00:00:00.760822320 +603 48 0 days 00:00:00.975741525 +603 49 0 days 00:00:01.044069362 +603 50 0 days 00:00:00.860059815 +603 51 0 days 00:00:00.392682675 +603 52 0 days 00:00:00.571438656 +603 53 0 days 00:00:01.148298460 +603 54 0 days 00:00:00.460550075 +603 55 0 days 00:00:00.973867950 +603 56 0 days 00:00:00.639347924 +603 57 0 days 00:00:00.421863237 +603 58 0 days 00:00:00.747718812 +603 59 0 days 00:00:00.849431175 +603 60 0 days 00:00:00.572001010 +603 61 0 days 00:00:00.615997688 +603 62 0 days 00:00:00.391239400 +603 63 0 days 00:00:00.303886695 +603 64 0 days 00:00:00.792994320 +603 65 0 days 00:00:00.995494130 +603 66 0 days 00:00:00.466436370 +603 67 0 days 00:00:00.307031116 +603 68 0 days 00:00:00.395269093 +603 69 0 days 00:00:00.353520245 +603 70 0 days 00:00:00.538441625 +603 71 0 days 00:00:00.480211893 +603 72 0 days 00:00:00.320646724 +603 73 0 days 00:00:00.540658760 +603 74 0 days 00:00:00.542479130 +603 75 0 days 00:00:01.005951824 +603 76 0 days 00:00:00.313116390 +603 77 0 days 00:00:00.290545035 +603 78 0 days 00:00:00.886120940 +603 79 0 days 00:00:00.499893430 +603 80 0 days 00:00:00.968531165 +603 81 0 days 00:00:00.862695725 +603 82 0 days 00:00:00.391048504 +603 83 0 days 00:00:00.383323280 +603 84 0 days 00:00:00.725651728 +603 85 0 days 00:00:00.811836360 +603 86 0 days 00:00:01.186037516 +603 87 0 days 00:00:00.318562180 +603 88 0 days 00:00:00.838401775 +603 89 0 days 00:00:00.498658985 +603 90 0 days 00:00:01.003029028 +603 91 0 days 00:00:00.963734275 +603 92 0 days 00:00:00.358372477 +603 93 0 days 00:00:00.767670910 +603 94 0 days 00:00:00.895835015 +603 95 0 days 00:00:00.932972116 +603 96 0 days 00:00:00.489480795 +603 97 0 days 00:00:00.986798500 +603 98 0 days 00:00:00.493066155 +603 99 0 days 00:00:00.886486325 +603 100 0 days 00:00:00.327464260 +604 1 0 days 00:00:00.456129292 +604 2 0 days 00:00:00.445164095 +604 3 0 days 00:00:00.313252090 +604 4 0 days 00:00:00.475658950 +604 5 0 days 00:00:00.326187253 +604 6 0 days 00:00:00.515654110 +604 7 0 days 00:00:00.183124101 +604 8 0 days 00:00:00.274222765 +604 9 0 days 00:00:00.160970140 +604 10 0 days 00:00:00.271425982 +604 11 0 days 00:00:00.271944205 +604 12 0 days 00:00:00.354691135 +604 13 0 days 00:00:00.561929325 +604 14 0 days 00:00:00.146971320 +604 15 0 days 00:00:00.252548771 +604 16 0 days 00:00:00.176791880 +604 17 0 days 00:00:00.256465806 +604 18 0 days 00:00:00.374951685 +604 19 0 days 00:00:00.192410305 +604 20 0 days 00:00:00.479380796 +604 21 0 days 00:00:00.621713012 +604 22 0 days 00:00:00.441120333 +604 23 0 days 00:00:00.276329900 +604 24 0 days 00:00:00.171671715 +604 25 0 days 00:00:00.441029560 +604 26 0 days 00:00:00.300155044 +604 27 0 days 00:00:00.442594205 +604 28 0 days 00:00:00.467174670 +604 29 0 days 00:00:00.430520488 +604 30 0 days 00:00:00.253930600 +604 31 0 days 00:00:00.168820230 +604 32 0 days 00:00:00.276088504 +604 33 0 days 00:00:00.213764089 +604 34 0 days 00:00:00.170831500 +604 35 0 days 00:00:00.552488795 +604 36 0 days 00:00:00.341183143 +604 37 0 days 00:00:00.294927105 +604 38 0 days 00:00:00.195092272 +604 39 0 days 00:00:00.518839190 +604 40 0 days 00:00:00.152920855 +604 41 0 days 00:00:00.183138264 +604 42 0 days 00:00:00.289983330 +604 43 0 days 00:00:00.154380960 +604 44 0 days 00:00:00.467911900 +604 45 0 days 00:00:00.233537975 +604 46 0 days 00:00:00.327652768 +604 47 0 days 00:00:00.216187230 +604 48 0 days 00:00:00.551834735 +604 49 0 days 00:00:00.408486015 +604 50 0 days 00:00:00.336102770 +604 51 0 days 00:00:00.242102380 +604 52 0 days 00:00:00.558849588 +604 53 0 days 00:00:00.301332791 +604 54 0 days 00:00:00.199403435 +604 55 0 days 00:00:00.169559605 +604 56 0 days 00:00:00.257838783 +604 57 0 days 00:00:00.305966388 +604 58 0 days 00:00:00.482688905 +604 59 0 days 00:00:00.186860840 +604 60 0 days 00:00:00.486075405 +604 61 0 days 00:00:00.183023310 +604 62 0 days 00:00:00.552865200 +604 63 0 days 00:00:00.262268306 +604 64 0 days 00:00:00.549617033 +604 65 0 days 00:00:00.448674860 +604 66 0 days 00:00:00.183508610 +604 67 0 days 00:00:00.256449720 +604 68 0 days 00:00:00.280891492 +604 69 0 days 00:00:00.320669640 +604 70 0 days 00:00:00.273841600 +604 71 0 days 00:00:00.459423710 +604 72 0 days 00:00:00.477119340 +604 73 0 days 00:00:00.457341171 +604 74 0 days 00:00:00.314999005 +604 75 0 days 00:00:00.475547328 +604 76 0 days 00:00:00.167606143 +604 77 0 days 00:00:00.211649905 +604 78 0 days 00:00:00.223456225 +604 79 0 days 00:00:00.393619245 +604 80 0 days 00:00:00.451464715 +604 82 0 days 00:00:00.429924872 +604 83 0 days 00:00:00.226641322 +604 84 0 days 00:00:00.312855604 +604 85 0 days 00:00:00.227030375 +604 86 0 days 00:00:00.191988311 +604 87 0 days 00:00:00.391061640 +604 88 0 days 00:00:00.352660512 +604 89 0 days 00:00:00.317266350 +604 90 0 days 00:00:00.190944195 +604 91 0 days 00:00:00.255491016 +604 92 0 days 00:00:00.281107205 +604 93 0 days 00:00:00.473558571 +604 94 0 days 00:00:00.391588355 +604 95 0 days 00:00:00.491258305 +604 96 0 days 00:00:00.368383240 +604 97 0 days 00:00:00.271824075 +604 98 0 days 00:00:00.509502445 +604 99 0 days 00:00:00.488429870 +604 100 0 days 00:00:00.610218395 +605 1 0 days 00:00:00.211625185 +605 2 0 days 00:00:00.167235745 +605 3 0 days 00:00:00.196204975 +605 4 0 days 00:00:00.158663205 +605 5 0 days 00:00:00.221202740 +605 6 0 days 00:00:00.180917380 +605 7 0 days 00:00:00.158167872 +605 8 0 days 00:00:00.190876173 +605 9 0 days 00:00:00.197149085 +605 10 0 days 00:00:00.181110206 +605 11 0 days 00:00:00.197027165 +605 12 0 days 00:00:00.169829291 +605 13 0 days 00:00:00.181337766 +605 14 0 days 00:00:00.160959144 +605 15 0 days 00:00:00.196272780 +605 16 0 days 00:00:00.188824477 +605 17 0 days 00:00:00.176628290 +605 18 0 days 00:00:00.166318610 +605 19 0 days 00:00:00.207253844 +605 20 0 days 00:00:00.197055080 +605 21 0 days 00:00:00.180940930 +605 22 0 days 00:00:00.174536450 +605 23 0 days 00:00:00.182005803 +605 24 0 days 00:00:00.166867875 +605 25 0 days 00:00:00.150990340 +605 26 0 days 00:00:00.165895060 +605 27 0 days 00:00:00.158417124 +605 28 0 days 00:00:00.178050406 +605 29 0 days 00:00:00.167586951 +605 30 0 days 00:00:00.166642305 +605 31 0 days 00:00:00.193305984 +605 32 0 days 00:00:00.166103625 +605 33 0 days 00:00:00.208977248 +605 34 0 days 00:00:00.151644070 +605 35 0 days 00:00:00.158909296 +605 36 0 days 00:00:00.196800925 +605 37 0 days 00:00:00.175595616 +605 38 0 days 00:00:00.196979255 +605 39 0 days 00:00:00.175609044 +605 40 0 days 00:00:00.185724100 +605 41 0 days 00:00:00.167156045 +605 42 0 days 00:00:00.178002058 +605 43 0 days 00:00:00.176944326 +605 44 0 days 00:00:00.208296108 +605 45 0 days 00:00:00.194977589 +605 46 0 days 00:00:00.166553190 +605 47 0 days 00:00:00.176435166 +605 48 0 days 00:00:00.181117853 +605 49 0 days 00:00:00.170485805 +605 50 0 days 00:00:00.153364570 +605 51 0 days 00:00:00.196437845 +605 52 0 days 00:00:00.220432011 +605 53 0 days 00:00:00.166767475 +605 54 0 days 00:00:00.197981200 +605 55 0 days 00:00:00.197643285 +605 56 0 days 00:00:00.186182457 +605 57 0 days 00:00:00.167148235 +605 58 0 days 00:00:00.175393648 +605 59 0 days 00:00:00.167035385 +605 60 0 days 00:00:00.208964872 +605 61 0 days 00:00:00.220840065 +605 62 0 days 00:00:00.197940405 +605 63 0 days 00:00:00.198425510 +605 64 0 days 00:00:00.175309415 +605 65 0 days 00:00:00.175437562 +605 66 0 days 00:00:00.151593765 +605 67 0 days 00:00:00.168607917 +605 68 0 days 00:00:00.166766686 +605 69 0 days 00:00:00.208599908 +605 70 0 days 00:00:00.197856320 +605 71 0 days 00:00:00.215911276 +605 72 0 days 00:00:00.198128365 +605 73 0 days 00:00:00.180293858 +605 74 0 days 00:00:00.167715180 +605 75 0 days 00:00:00.168223955 +605 76 0 days 00:00:00.151390960 +605 77 0 days 00:00:00.198024350 +605 78 0 days 00:00:00.197068835 +605 79 0 days 00:00:00.197026995 +605 80 0 days 00:00:00.167465995 +605 81 0 days 00:00:00.151543965 +605 82 0 days 00:00:00.164532666 +605 83 0 days 00:00:00.179884948 +605 84 0 days 00:00:00.196737838 +605 85 0 days 00:00:00.159489532 +605 86 0 days 00:00:00.198079125 +605 87 0 days 00:00:00.197556110 +605 88 0 days 00:00:00.177156168 +605 89 0 days 00:00:00.168104185 +605 90 0 days 00:00:00.154205230 +605 91 0 days 00:00:00.197534015 +605 92 0 days 00:00:00.151511835 +605 93 0 days 00:00:00.170600151 +605 94 0 days 00:00:00.151655875 +605 95 0 days 00:00:00.152016370 +605 96 0 days 00:00:00.221687242 +605 97 0 days 00:00:00.152250745 +605 98 0 days 00:00:00.166894430 +605 99 0 days 00:00:00.199272685 +605 100 0 days 00:00:00.153924130 +606 1 0 days 00:00:00.127478575 +606 2 0 days 00:00:00.128934636 +606 3 0 days 00:00:00.111608042 +606 4 0 days 00:00:00.093449170 +606 5 0 days 00:00:00.086170900 +606 6 0 days 00:00:00.117906316 +606 7 0 days 00:00:00.093900625 +606 8 0 days 00:00:00.094063696 +606 9 0 days 00:00:00.100691227 +606 10 0 days 00:00:00.111907770 +606 11 0 days 00:00:00.086757900 +606 12 0 days 00:00:00.106514210 +606 13 0 days 00:00:00.144836782 +606 14 0 days 00:00:00.127958462 +606 15 0 days 00:00:00.094475820 +606 16 0 days 00:00:00.100409016 +606 17 0 days 00:00:00.094254265 +606 18 0 days 00:00:00.112496015 +606 19 0 days 00:00:00.096455737 +606 20 0 days 00:00:00.121691476 +606 21 0 days 00:00:00.101944490 +606 22 0 days 00:00:00.108595018 +606 23 0 days 00:00:00.112555180 +606 24 0 days 00:00:00.111343421 +606 25 0 days 00:00:00.099144240 +606 26 0 days 00:00:00.094398230 +606 27 0 days 00:00:00.098744060 +606 28 0 days 00:00:00.095017115 +606 29 0 days 00:00:00.101611403 +606 30 0 days 00:00:00.106756535 +606 31 0 days 00:00:00.126260762 +606 32 0 days 00:00:00.087069020 +606 33 0 days 00:00:00.101318417 +606 34 0 days 00:00:00.094633745 +606 35 0 days 00:00:00.113846140 +606 36 0 days 00:00:00.111959925 +606 37 0 days 00:00:00.101692606 +606 38 0 days 00:00:00.104070622 +606 39 0 days 00:00:00.112936255 +606 40 0 days 00:00:00.112818830 +606 41 0 days 00:00:00.086792945 +606 42 0 days 00:00:00.090771488 +606 43 0 days 00:00:00.102243360 +606 44 0 days 00:00:00.086833060 +606 45 0 days 00:00:00.124490565 +606 46 0 days 00:00:00.086831265 +606 47 0 days 00:00:00.100708945 +606 48 0 days 00:00:00.105757057 +606 49 0 days 00:00:00.099362716 +606 50 0 days 00:00:00.113531145 +606 51 0 days 00:00:00.086724185 +606 52 0 days 00:00:00.094312610 +606 53 0 days 00:00:00.104226560 +606 54 0 days 00:00:00.112322155 +606 55 0 days 00:00:00.095571850 +606 56 0 days 00:00:00.112951465 +606 57 0 days 00:00:00.118356192 +606 58 0 days 00:00:00.100428200 +606 59 0 days 00:00:00.112681840 +606 60 0 days 00:00:00.112274525 +606 61 0 days 00:00:00.097493995 +606 62 0 days 00:00:00.094790380 +606 63 0 days 00:00:00.095246505 +606 64 0 days 00:00:00.087235975 +606 65 0 days 00:00:00.087001410 +606 66 0 days 00:00:00.112290730 +606 67 0 days 00:00:00.121839443 +606 68 0 days 00:00:00.108139096 +606 69 0 days 00:00:00.099155128 +606 70 0 days 00:00:00.112542195 +606 71 0 days 00:00:00.118582536 +606 72 0 days 00:00:00.112737100 +606 73 0 days 00:00:00.113457870 +606 74 0 days 00:00:00.121887623 +606 75 0 days 00:00:00.122199580 +606 76 0 days 00:00:00.102852330 +606 77 0 days 00:00:00.087245595 +606 78 0 days 00:00:00.090953440 +606 79 0 days 00:00:00.112823470 +606 80 0 days 00:00:00.101710310 +606 81 0 days 00:00:00.095320815 +606 82 0 days 00:00:00.112577835 +606 83 0 days 00:00:00.099469532 +606 84 0 days 00:00:00.130741856 +606 85 0 days 00:00:00.118374588 +606 86 0 days 00:00:00.090960588 +606 87 0 days 00:00:00.093723270 +606 88 0 days 00:00:00.112939825 +606 89 0 days 00:00:00.112433870 +606 90 0 days 00:00:00.095355343 +606 91 0 days 00:00:00.087168110 +606 92 0 days 00:00:00.102151506 +606 93 0 days 00:00:00.091808756 +606 94 0 days 00:00:00.112872650 +606 95 0 days 00:00:00.095217225 +606 96 0 days 00:00:00.112789980 +606 97 0 days 00:00:00.087096095 +606 98 0 days 00:00:00.101989136 +606 99 0 days 00:00:00.112369940 +606 100 0 days 00:00:00.100030794 +607 1 0 days 00:00:00.191710737 +607 2 0 days 00:00:00.166608733 +607 3 0 days 00:00:00.195045385 +607 4 0 days 00:00:00.184643360 +607 5 0 days 00:00:00.205149340 +607 6 0 days 00:00:00.166052725 +607 7 0 days 00:00:00.194150815 +607 8 0 days 00:00:00.225520835 +607 9 0 days 00:00:00.161734456 +607 10 0 days 00:00:00.206564388 +607 11 0 days 00:00:00.194592705 +607 12 0 days 00:00:00.166290490 +607 13 0 days 00:00:00.195032190 +607 14 0 days 00:00:00.153401630 +607 15 0 days 00:00:00.180806546 +607 16 0 days 00:00:00.166779073 +607 17 0 days 00:00:00.161334680 +607 18 0 days 00:00:00.194433620 +607 19 0 days 00:00:00.195619865 +607 20 0 days 00:00:00.176005320 +607 21 0 days 00:00:00.153962200 +607 22 0 days 00:00:00.195690790 +607 23 0 days 00:00:00.166313980 +607 24 0 days 00:00:00.175780676 +607 25 0 days 00:00:00.195510915 +607 26 0 days 00:00:00.150966995 +607 27 0 days 00:00:00.180770046 +607 28 0 days 00:00:00.166717855 +607 29 0 days 00:00:00.158802212 +607 30 0 days 00:00:00.166978206 +607 31 0 days 00:00:00.213819900 +607 32 0 days 00:00:00.195393960 +607 33 0 days 00:00:00.166084840 +607 34 0 days 00:00:00.194930365 +607 35 0 days 00:00:00.188451947 +607 36 0 days 00:00:00.207288192 +607 37 0 days 00:00:00.170911632 +607 38 0 days 00:00:00.185646108 +607 39 0 days 00:00:00.181921631 +607 40 0 days 00:00:00.176122951 +607 41 0 days 00:00:00.196016620 +607 42 0 days 00:00:00.185559217 +607 43 0 days 00:00:00.166408085 +607 44 0 days 00:00:00.162129836 +607 45 0 days 00:00:00.154650165 +607 46 0 days 00:00:00.181620723 +607 47 0 days 00:00:00.171204057 +607 48 0 days 00:00:00.195245090 +607 49 0 days 00:00:00.181430146 +607 50 0 days 00:00:00.213853410 +607 51 0 days 00:00:00.167067385 +607 52 0 days 00:00:00.207432340 +607 53 0 days 00:00:00.219236168 +607 54 0 days 00:00:00.168792434 +607 55 0 days 00:00:00.225897866 +607 56 0 days 00:00:00.195691490 +607 57 0 days 00:00:00.214067426 +607 58 0 days 00:00:00.214172406 +607 59 0 days 00:00:00.174565370 +607 60 0 days 00:00:00.196269475 +607 61 0 days 00:00:00.196679250 +607 62 0 days 00:00:00.174301305 +607 63 0 days 00:00:00.214303350 +607 64 0 days 00:00:00.206852248 +607 65 0 days 00:00:00.182045766 +607 66 0 days 00:00:00.151655520 +607 67 0 days 00:00:00.162179944 +607 68 0 days 00:00:00.171835874 +607 69 0 days 00:00:00.176313636 +607 70 0 days 00:00:00.207652212 +607 71 0 days 00:00:00.154806995 +607 72 0 days 00:00:00.181557766 +607 73 0 days 00:00:00.195527640 +607 74 0 days 00:00:00.167674630 +607 75 0 days 00:00:00.181756676 +607 76 0 days 00:00:00.196012905 +607 77 0 days 00:00:00.230746232 +607 78 0 days 00:00:00.195994265 +607 79 0 days 00:00:00.169486562 +607 80 0 days 00:00:00.195845500 +607 81 0 days 00:00:00.164789066 +607 82 0 days 00:00:00.151686065 +607 83 0 days 00:00:00.196133740 +607 84 0 days 00:00:00.181666850 +607 85 0 days 00:00:00.168774445 +607 86 0 days 00:00:00.219470591 +607 87 0 days 00:00:00.181402410 +607 88 0 days 00:00:00.182192646 +607 89 0 days 00:00:00.207375592 +607 90 0 days 00:00:00.175470160 +607 91 0 days 00:00:00.195560032 +607 92 0 days 00:00:00.196064065 +607 93 0 days 00:00:00.196712510 +607 94 0 days 00:00:00.196158190 +607 95 0 days 00:00:00.197744540 +607 96 0 days 00:00:00.153806245 +607 97 0 days 00:00:00.176337728 +607 98 0 days 00:00:00.168102070 +607 99 0 days 00:00:00.160548912 +607 100 0 days 00:00:00.207145300 +608 1 0 days 00:00:00.181304420 +608 2 0 days 00:00:00.166452444 +608 3 0 days 00:00:00.175961452 +608 4 0 days 00:00:00.195090285 +608 5 0 days 00:00:00.155664610 +608 6 0 days 00:00:00.188174900 +608 7 0 days 00:00:00.172938053 +608 8 0 days 00:00:00.192894608 +608 9 0 days 00:00:00.181134424 +608 10 0 days 00:00:00.163372944 +608 11 0 days 00:00:00.168906200 +608 12 0 days 00:00:00.223735820 +608 13 0 days 00:00:00.178541155 +608 14 0 days 00:00:00.166886976 +608 15 0 days 00:00:00.182083888 +608 16 0 days 00:00:00.144382186 +608 17 0 days 00:00:00.226038826 +608 18 0 days 00:00:00.172542655 +608 19 0 days 00:00:00.180205334 +608 20 0 days 00:00:00.188011426 +608 21 0 days 00:00:00.156399765 +608 22 0 days 00:00:00.177106080 +608 23 0 days 00:00:00.215877460 +608 24 0 days 00:00:00.173062680 +608 25 0 days 00:00:00.195711677 +608 26 0 days 00:00:00.169377983 +608 27 0 days 00:00:00.216156588 +608 28 0 days 00:00:00.173317040 +608 29 0 days 00:00:00.206384730 +608 30 0 days 00:00:00.182060114 +608 31 0 days 00:00:00.182841752 +608 32 0 days 00:00:00.217165904 +608 33 0 days 00:00:00.159229475 +608 34 0 days 00:00:00.169710173 +608 35 0 days 00:00:00.173186700 +608 36 0 days 00:00:00.216575020 +608 37 0 days 00:00:00.176588657 +608 38 0 days 00:00:00.159231245 +608 39 0 days 00:00:00.181334913 +608 40 0 days 00:00:00.164043464 +608 41 0 days 00:00:00.167656932 +608 42 0 days 00:00:00.206372025 +608 43 0 days 00:00:00.182204400 +608 44 0 days 00:00:00.229276114 +608 45 0 days 00:00:00.179538555 +608 46 0 days 00:00:00.164729944 +608 47 0 days 00:00:00.182412264 +608 48 0 days 00:00:00.192435111 +608 49 0 days 00:00:00.234454862 +608 50 0 days 00:00:00.217331780 +608 51 0 days 00:00:00.185236960 +608 52 0 days 00:00:00.188912046 +608 53 0 days 00:00:00.172485430 +608 54 0 days 00:00:00.216438656 +608 55 0 days 00:00:00.199199866 +608 56 0 days 00:00:00.188182283 +608 57 0 days 00:00:00.172525950 +608 58 0 days 00:00:00.188596943 +608 59 0 days 00:00:00.193112208 +608 60 0 days 00:00:00.216775196 +608 61 0 days 00:00:00.205989350 +608 62 0 days 00:00:00.176932817 +608 63 0 days 00:00:00.225129446 +608 64 0 days 00:00:00.173261145 +608 65 0 days 00:00:00.170233970 +608 66 0 days 00:00:00.197014815 +608 67 0 days 00:00:00.182850671 +608 68 0 days 00:00:00.156201420 +608 69 0 days 00:00:00.235091067 +608 70 0 days 00:00:00.205485640 +608 71 0 days 00:00:00.217936276 +608 72 0 days 00:00:00.176877810 +608 73 0 days 00:00:00.206850445 +608 74 0 days 00:00:00.164892832 +608 75 0 days 00:00:00.177601880 +608 76 0 days 00:00:00.171782080 +608 77 0 days 00:00:00.173496085 +608 78 0 days 00:00:00.157422466 +608 79 0 days 00:00:00.237230548 +608 80 0 days 00:00:00.189346573 +608 81 0 days 00:00:00.173258595 +608 82 0 days 00:00:00.177169682 +608 83 0 days 00:00:00.204038191 +608 84 0 days 00:00:00.194089057 +608 85 0 days 00:00:00.206217030 +608 86 0 days 00:00:00.173225465 +608 87 0 days 00:00:00.224712366 +608 88 0 days 00:00:00.206401050 +608 89 0 days 00:00:00.158848866 +608 90 0 days 00:00:00.177229635 +608 91 0 days 00:00:00.173151910 +608 92 0 days 00:00:00.196746902 +608 93 0 days 00:00:00.157065780 +608 94 0 days 00:00:00.158476073 +608 95 0 days 00:00:00.174101920 +608 96 0 days 00:00:00.148586593 +608 97 0 days 00:00:00.206903215 +608 98 0 days 00:00:00.234572782 +608 99 0 days 00:00:00.173830740 +608 100 0 days 00:00:00.156991090 +609 1 0 days 00:00:00.093548225 +609 2 0 days 00:00:00.124611687 +609 3 0 days 00:00:00.092780186 +609 4 0 days 00:00:00.100797516 +609 5 0 days 00:00:00.110289710 +609 6 0 days 00:00:00.094517113 +609 7 0 days 00:00:00.128844954 +609 8 0 days 00:00:00.103268760 +609 9 0 days 00:00:00.107039990 +609 10 0 days 00:00:00.101036216 +609 11 0 days 00:00:00.090778288 +609 12 0 days 00:00:00.104057223 +609 13 0 days 00:00:00.086364025 +609 14 0 days 00:00:00.126434533 +609 15 0 days 00:00:00.091946936 +609 16 0 days 00:00:00.098693288 +609 17 0 days 00:00:00.097561684 +609 18 0 days 00:00:00.091780932 +609 19 0 days 00:00:00.094647153 +609 20 0 days 00:00:00.090455604 +609 21 0 days 00:00:00.110764595 +609 22 0 days 00:00:00.099007000 +609 23 0 days 00:00:00.090150620 +609 24 0 days 00:00:00.109339832 +609 25 0 days 00:00:00.110779550 +609 26 0 days 00:00:00.121344443 +609 27 0 days 00:00:00.100658758 +609 28 0 days 00:00:00.092345856 +609 29 0 days 00:00:00.098376452 +609 30 0 days 00:00:00.090741576 +609 31 0 days 00:00:00.124960467 +609 32 0 days 00:00:00.094124020 +609 33 0 days 00:00:00.090971040 +609 34 0 days 00:00:00.107062612 +609 35 0 days 00:00:00.098821408 +609 36 0 days 00:00:00.097485322 +609 37 0 days 00:00:00.097617995 +609 38 0 days 00:00:00.094487865 +609 39 0 days 00:00:00.098732436 +609 40 0 days 00:00:00.111542825 +609 41 0 days 00:00:00.110789775 +609 42 0 days 00:00:00.117587676 +609 43 0 days 00:00:00.117187520 +609 44 0 days 00:00:00.101351826 +609 45 0 days 00:00:00.093811496 +609 46 0 days 00:00:00.089463440 +609 47 0 days 00:00:00.111418015 +609 48 0 days 00:00:00.106416680 +609 49 0 days 00:00:00.087064280 +609 50 0 days 00:00:00.111470425 +609 51 0 days 00:00:00.098692764 +609 52 0 days 00:00:00.095111874 +609 53 0 days 00:00:00.110574466 +609 54 0 days 00:00:00.094716665 +609 55 0 days 00:00:00.095469906 +609 56 0 days 00:00:00.086932515 +609 57 0 days 00:00:00.094549045 +609 58 0 days 00:00:00.087239530 +609 59 0 days 00:00:00.098761108 +609 60 0 days 00:00:00.120681480 +609 61 0 days 00:00:00.120359130 +609 62 0 days 00:00:00.117171156 +609 63 0 days 00:00:00.101647010 +609 64 0 days 00:00:00.125624322 +609 65 0 days 00:00:00.098128086 +609 66 0 days 00:00:00.097329354 +609 67 0 days 00:00:00.099179936 +609 68 0 days 00:00:00.098371895 +609 69 0 days 00:00:00.111908860 +609 70 0 days 00:00:00.117388484 +609 71 0 days 00:00:00.103938088 +609 72 0 days 00:00:00.112031195 +609 73 0 days 00:00:00.095066340 +609 74 0 days 00:00:00.112027370 +609 75 0 days 00:00:00.094790865 +609 76 0 days 00:00:00.111909080 +609 77 0 days 00:00:00.105531075 +609 78 0 days 00:00:00.111244485 +609 79 0 days 00:00:00.098518152 +609 80 0 days 00:00:00.094166696 +609 81 0 days 00:00:00.087693720 +609 82 0 days 00:00:00.124935345 +609 83 0 days 00:00:00.123443377 +609 84 0 days 00:00:00.094647455 +609 85 0 days 00:00:00.108417654 +609 86 0 days 00:00:00.111964765 +609 87 0 days 00:00:00.102167473 +609 88 0 days 00:00:00.112566320 +609 89 0 days 00:00:00.087922640 +609 90 0 days 00:00:00.095280086 +609 91 0 days 00:00:00.097738700 +609 92 0 days 00:00:00.095075365 +609 93 0 days 00:00:00.112654065 +609 94 0 days 00:00:00.101551262 +609 95 0 days 00:00:00.112522515 +609 96 0 days 00:00:00.112784700 +609 97 0 days 00:00:00.091545972 +609 98 0 days 00:00:00.117620332 +609 99 0 days 00:00:00.118010256 +609 100 0 days 00:00:00.100640372 +610 1 0 days 00:00:00.096022323 +610 2 0 days 00:00:00.100262116 +610 3 0 days 00:00:00.115215895 +610 4 0 days 00:00:00.091861264 +610 5 0 days 00:00:00.116544425 +610 6 0 days 00:00:00.094388052 +610 7 0 days 00:00:00.100467844 +610 8 0 days 00:00:00.129348867 +610 9 0 days 00:00:00.105915100 +610 10 0 days 00:00:00.127470605 +610 11 0 days 00:00:00.114779955 +610 12 0 days 00:00:00.124961973 +610 13 0 days 00:00:00.116355650 +610 14 0 days 00:00:00.098550360 +610 15 0 days 00:00:00.127517134 +610 16 0 days 00:00:00.097013480 +610 17 0 days 00:00:00.096802035 +610 18 0 days 00:00:00.100590996 +610 19 0 days 00:00:00.125094703 +610 20 0 days 00:00:00.120946044 +610 21 0 days 00:00:00.096487445 +610 22 0 days 00:00:00.082484746 +610 23 0 days 00:00:00.110183840 +610 24 0 days 00:00:00.116693265 +610 25 0 days 00:00:00.096857195 +610 26 0 days 00:00:00.106154862 +610 27 0 days 00:00:00.095959775 +610 28 0 days 00:00:00.096404770 +610 29 0 days 00:00:00.094505726 +610 30 0 days 00:00:00.098248451 +610 31 0 days 00:00:00.100652624 +610 32 0 days 00:00:00.098906495 +610 33 0 days 00:00:00.115092150 +610 34 0 days 00:00:00.096392815 +610 35 0 days 00:00:00.109836438 +610 36 0 days 00:00:00.096623911 +610 37 0 days 00:00:00.100323934 +610 38 0 days 00:00:00.106250560 +610 39 0 days 00:00:00.088294855 +610 40 0 days 00:00:00.099363720 +610 41 0 days 00:00:00.109857600 +610 42 0 days 00:00:00.103764860 +610 43 0 days 00:00:00.089557465 +610 44 0 days 00:00:00.115482025 +610 45 0 days 00:00:00.094565296 +610 46 0 days 00:00:00.127762737 +610 47 0 days 00:00:00.121384160 +610 48 0 days 00:00:00.115479315 +610 49 0 days 00:00:00.124801843 +610 50 0 days 00:00:00.089943585 +610 51 0 days 00:00:00.105965922 +610 52 0 days 00:00:00.089388845 +610 53 0 days 00:00:00.088553600 +610 54 0 days 00:00:00.102384456 +610 55 0 days 00:00:00.096882608 +610 56 0 days 00:00:00.098441807 +610 57 0 days 00:00:00.098639915 +610 58 0 days 00:00:00.097050550 +610 59 0 days 00:00:00.090926815 +610 60 0 days 00:00:00.089003473 +610 61 0 days 00:00:00.095221913 +610 62 0 days 00:00:00.104455483 +610 63 0 days 00:00:00.088555425 +610 64 0 days 00:00:00.095393840 +610 65 0 days 00:00:00.095660543 +610 66 0 days 00:00:00.125872930 +610 67 0 days 00:00:00.130399070 +610 68 0 days 00:00:00.094572212 +610 69 0 days 00:00:00.105082273 +610 70 0 days 00:00:00.108352675 +610 71 0 days 00:00:00.128788645 +610 72 0 days 00:00:00.097578074 +610 73 0 days 00:00:00.116899705 +610 74 0 days 00:00:00.116104780 +610 75 0 days 00:00:00.125771553 +610 76 0 days 00:00:00.121966036 +610 77 0 days 00:00:00.126254046 +610 78 0 days 00:00:00.116604610 +610 79 0 days 00:00:00.095006656 +610 80 0 days 00:00:00.090477466 +610 81 0 days 00:00:00.097610345 +610 82 0 days 00:00:00.122147580 +610 83 0 days 00:00:00.097209380 +610 84 0 days 00:00:00.088962850 +610 85 0 days 00:00:00.121608648 +610 86 0 days 00:00:00.121290764 +610 87 0 days 00:00:00.095776103 +610 88 0 days 00:00:00.101368924 +610 89 0 days 00:00:00.096924923 +610 90 0 days 00:00:00.121965660 +610 91 0 days 00:00:00.096809705 +610 92 0 days 00:00:00.105304116 +610 93 0 days 00:00:00.093578560 +610 94 0 days 00:00:00.091022570 +610 95 0 days 00:00:00.097537430 +610 96 0 days 00:00:00.099679108 +610 97 0 days 00:00:00.115882530 +610 98 0 days 00:00:00.106932231 +610 99 0 days 00:00:00.116862500 +610 100 0 days 00:00:00.101531744 +611 1 0 days 00:00:06.243984890 +611 2 0 days 00:00:09.231831515 +611 3 0 days 00:00:11.033837945 +611 4 0 days 00:00:09.102302265 +611 5 0 days 00:00:12.440516445 +611 6 0 days 00:00:08.299621900 +611 7 0 days 00:00:03.370001455 +611 8 0 days 00:00:03.168009710 +611 9 0 days 00:00:11.163260375 +611 10 0 days 00:00:09.599639824 +611 11 0 days 00:00:05.660245980 +611 12 0 days 00:00:09.509348443 +611 13 0 days 00:00:08.478564245 +611 14 0 days 00:00:09.446646015 +611 15 0 days 00:00:03.436671875 +611 16 0 days 00:00:03.262834250 +611 17 0 days 00:00:03.655283720 +611 18 0 days 00:00:04.272764905 +611 19 0 days 00:00:05.613526835 +611 20 0 days 00:00:04.340162990 +611 21 0 days 00:00:09.932942865 +611 22 0 days 00:00:05.020606900 +611 23 0 days 00:00:06.053895940 +611 24 0 days 00:00:09.381985940 +611 25 0 days 00:00:03.999731690 +611 26 0 days 00:00:04.687953545 +611 27 0 days 00:00:04.604523646 +611 28 0 days 00:00:04.950079530 +611 29 0 days 00:00:04.917032800 +611 30 0 days 00:00:09.776192320 +611 31 0 days 00:00:07.597392795 +611 32 0 days 00:00:06.363108740 +611 33 0 days 00:00:05.985008300 +611 34 0 days 00:00:10.402238210 +611 35 0 days 00:00:05.687151780 +611 36 0 days 00:00:04.510611976 +611 37 0 days 00:00:04.194380055 +611 38 0 days 00:00:05.567007060 +611 39 0 days 00:00:03.054938865 +611 40 0 days 00:00:03.295990585 +611 41 0 days 00:00:05.686842950 +611 42 0 days 00:00:08.298981135 +611 43 0 days 00:00:06.248438955 +611 44 0 days 00:00:04.887754955 +611 45 0 days 00:00:08.057300375 +611 46 0 days 00:00:12.740626695 +611 47 0 days 00:00:04.953491975 +611 48 0 days 00:00:09.835282100 +611 49 0 days 00:00:11.189379666 +611 50 0 days 00:00:06.333055585 +611 51 0 days 00:00:03.325675765 +611 52 0 days 00:00:09.671643060 +611 53 0 days 00:00:06.145797285 +611 54 0 days 00:00:03.032039505 +611 55 0 days 00:00:10.087149792 +611 56 0 days 00:00:03.742845645 +611 57 0 days 00:00:03.247350045 +611 58 0 days 00:00:05.656100255 +611 59 0 days 00:00:03.849885560 +611 60 0 days 00:00:11.669467335 +611 61 0 days 00:00:03.163421220 +612 1 0 days 00:00:02.468764085 +612 2 0 days 00:00:04.237637995 +612 3 0 days 00:00:04.143251905 +612 4 0 days 00:00:05.003533340 +612 5 0 days 00:00:03.092839180 +612 6 0 days 00:00:02.662009755 +612 7 0 days 00:00:03.299719270 +612 8 0 days 00:00:05.109100255 +612 9 0 days 00:00:02.659258335 +612 10 0 days 00:00:02.107741940 +612 11 0 days 00:00:06.410075135 +612 12 0 days 00:00:01.623193875 +612 13 0 days 00:00:01.948456100 +612 14 0 days 00:00:01.683854685 +612 15 0 days 00:00:04.158105190 +612 16 0 days 00:00:05.319620025 +612 17 0 days 00:00:02.695780430 +612 18 0 days 00:00:02.801318845 +612 19 0 days 00:00:01.592064785 +612 20 0 days 00:00:02.493216750 +612 21 0 days 00:00:01.763937810 +612 22 0 days 00:00:03.170666230 +612 23 0 days 00:00:05.057345684 +612 24 0 days 00:00:01.737704948 +612 25 0 days 00:00:05.334355690 +612 26 0 days 00:00:03.811724960 +612 27 0 days 00:00:04.227669280 +612 28 0 days 00:00:04.239223028 +612 29 0 days 00:00:02.596550508 +612 30 0 days 00:00:05.058605710 +612 31 0 days 00:00:03.288046315 +612 32 0 days 00:00:02.691010868 +612 33 0 days 00:00:02.586414900 +612 34 0 days 00:00:02.986914725 +612 35 0 days 00:00:03.170203035 +612 36 0 days 00:00:02.095746325 +612 37 0 days 00:00:04.406496276 +612 38 0 days 00:00:02.498638780 +612 39 0 days 00:00:02.793342915 +612 40 0 days 00:00:02.632881245 +612 41 0 days 00:00:01.906875235 +612 42 0 days 00:00:02.856443045 +612 43 0 days 00:00:02.549188095 +612 44 0 days 00:00:01.820393475 +612 45 0 days 00:00:01.795336130 +612 46 0 days 00:00:03.006653335 +612 47 0 days 00:00:02.695662115 +612 48 0 days 00:00:02.920652596 +612 49 0 days 00:00:04.498541270 +612 50 0 days 00:00:02.923964832 +612 51 0 days 00:00:03.978422450 +612 52 0 days 00:00:01.849581040 +612 53 0 days 00:00:05.032287456 +612 54 0 days 00:00:04.336578455 +612 55 0 days 00:00:02.880298060 +612 56 0 days 00:00:02.369854565 +612 57 0 days 00:00:04.037435125 +612 58 0 days 00:00:02.881366211 +612 59 0 days 00:00:01.932115960 +612 60 0 days 00:00:04.302815185 +612 61 0 days 00:00:03.956280150 +612 62 0 days 00:00:04.726404045 +612 63 0 days 00:00:02.597829100 +612 64 0 days 00:00:02.520592592 +612 65 0 days 00:00:01.697157677 +612 66 0 days 00:00:04.588638953 +612 67 0 days 00:00:05.181085410 +612 68 0 days 00:00:02.140305535 +612 69 0 days 00:00:02.663815790 +612 70 0 days 00:00:04.261718330 +612 71 0 days 00:00:04.848950255 +612 72 0 days 00:00:03.592922335 +612 73 0 days 00:00:01.709762435 +612 74 0 days 00:00:05.009017805 +612 75 0 days 00:00:02.855845035 +612 76 0 days 00:00:02.862270670 +612 77 0 days 00:00:04.341475665 +612 78 0 days 00:00:01.892814595 +612 79 0 days 00:00:03.071687600 +612 80 0 days 00:00:02.113934055 +612 81 0 days 00:00:01.675651276 +612 82 0 days 00:00:01.674639030 +612 83 0 days 00:00:06.232887085 +612 84 0 days 00:00:02.447722660 +612 85 0 days 00:00:02.272710060 +612 86 0 days 00:00:02.598981270 +612 87 0 days 00:00:02.384311735 +612 88 0 days 00:00:01.883527250 +612 89 0 days 00:00:02.903948030 +612 90 0 days 00:00:03.281900563 +612 91 0 days 00:00:04.035012020 +612 92 0 days 00:00:01.687063560 +612 93 0 days 00:00:04.871832700 +612 94 0 days 00:00:01.679579836 +612 95 0 days 00:00:02.547195772 +612 96 0 days 00:00:01.823277416 +612 97 0 days 00:00:01.849245413 +612 98 0 days 00:00:01.669969985 +612 99 0 days 00:00:05.287442465 +612 100 0 days 00:00:01.811952965 +613 1 0 days 00:00:04.081149090 +613 2 0 days 00:00:03.588380790 +613 3 0 days 00:00:05.070980320 +613 4 0 days 00:00:05.759465000 +613 5 0 days 00:00:03.620658364 +613 6 0 days 00:00:03.020998375 +613 7 0 days 00:00:05.268387250 +613 8 0 days 00:00:09.639816640 +613 9 0 days 00:00:03.821567922 +613 10 0 days 00:00:09.022621140 +613 11 0 days 00:00:03.636944733 +613 12 0 days 00:00:03.032736635 +613 13 0 days 00:00:05.442088445 +613 14 0 days 00:00:03.507601805 +613 15 0 days 00:00:03.432858820 +613 16 0 days 00:00:03.424763851 +613 17 0 days 00:00:06.513396610 +613 18 0 days 00:00:03.618912791 +613 19 0 days 00:00:08.151891540 +613 20 0 days 00:00:12.159715750 +613 21 0 days 00:00:05.960152766 +613 22 0 days 00:00:04.938110740 +613 23 0 days 00:00:06.111711205 +613 24 0 days 00:00:05.837707413 +613 25 0 days 00:00:03.836196493 +613 26 0 days 00:00:03.607205402 +613 27 0 days 00:00:05.858222036 +613 28 0 days 00:00:05.116675568 +613 29 0 days 00:00:05.655703808 +613 30 0 days 00:00:09.631895712 +613 31 0 days 00:00:04.350553569 +613 32 0 days 00:00:05.464826134 +613 33 0 days 00:00:03.549473982 +613 34 0 days 00:00:05.792200296 +613 35 0 days 00:00:05.244901416 +613 36 0 days 00:00:08.819827376 +613 37 0 days 00:00:03.921544157 +613 38 0 days 00:00:03.802318235 +613 39 0 days 00:00:03.282429536 +613 40 0 days 00:00:03.584678862 +613 41 0 days 00:00:11.642091096 +613 42 0 days 00:00:05.434653133 +613 43 0 days 00:00:04.784509645 +613 44 0 days 00:00:05.471018665 +613 45 0 days 00:00:06.140217646 +613 46 0 days 00:00:08.505653425 +613 47 0 days 00:00:09.473001946 +613 48 0 days 00:00:09.096940660 +613 49 0 days 00:00:06.137108320 +613 50 0 days 00:00:04.773737230 +613 51 0 days 00:00:05.279770203 +614 1 0 days 00:00:04.759696580 +614 2 0 days 00:00:05.133428148 +614 3 0 days 00:00:02.228707183 +614 4 0 days 00:00:05.813569032 +614 5 0 days 00:00:02.786145007 +614 6 0 days 00:00:03.898233627 +614 7 0 days 00:00:03.199229102 +614 8 0 days 00:00:04.381809684 +614 9 0 days 00:00:02.918127648 +614 10 0 days 00:00:03.033597940 +614 11 0 days 00:00:02.647614625 +614 12 0 days 00:00:02.276673800 +614 13 0 days 00:00:02.660237188 +614 14 0 days 00:00:01.862064826 +614 15 0 days 00:00:05.809763847 +614 16 0 days 00:00:02.019475018 +614 17 0 days 00:00:02.521390645 +614 18 0 days 00:00:01.627689665 +614 19 0 days 00:00:02.761862348 +614 20 0 days 00:00:06.200641897 +614 21 0 days 00:00:01.914895171 +614 22 0 days 00:00:02.912606163 +614 23 0 days 00:00:01.953729433 +614 24 0 days 00:00:04.225055875 +614 25 0 days 00:00:02.905081304 +614 26 0 days 00:00:02.152407867 +614 27 0 days 00:00:02.756351803 +614 28 0 days 00:00:03.747593610 +614 29 0 days 00:00:03.865106717 +614 30 0 days 00:00:02.566674550 +614 31 0 days 00:00:04.765357584 +614 32 0 days 00:00:05.417285855 +614 33 0 days 00:00:03.032753588 +614 34 0 days 00:00:06.108977916 +614 35 0 days 00:00:01.989448194 +614 36 0 days 00:00:01.821019685 +614 37 0 days 00:00:01.871794322 +614 38 0 days 00:00:02.857130300 +614 39 0 days 00:00:02.440099400 +614 40 0 days 00:00:01.622796965 +614 41 0 days 00:00:02.965942190 +614 42 0 days 00:00:02.483535585 +614 43 0 days 00:00:01.746941140 +614 44 0 days 00:00:05.027026110 +614 45 0 days 00:00:02.521111880 +614 46 0 days 00:00:04.280113050 +614 47 0 days 00:00:01.819222044 +614 48 0 days 00:00:02.805919983 +614 49 0 days 00:00:05.022823296 +614 50 0 days 00:00:04.422475816 +614 51 0 days 00:00:04.612009596 +614 52 0 days 00:00:03.034031354 +614 53 0 days 00:00:03.793042577 +614 54 0 days 00:00:02.961072974 +614 55 0 days 00:00:05.942019280 +614 56 0 days 00:00:01.887453430 +614 57 0 days 00:00:02.499621461 +614 58 0 days 00:00:05.172312660 +614 59 0 days 00:00:01.786263557 +614 60 0 days 00:00:05.478822813 +614 61 0 days 00:00:03.162702008 +614 62 0 days 00:00:05.577343166 +614 63 0 days 00:00:02.497942584 +614 64 0 days 00:00:03.578351184 +614 65 0 days 00:00:02.418215770 +614 66 0 days 00:00:04.994954760 +614 67 0 days 00:00:02.698021188 +614 68 0 days 00:00:02.862767676 +614 69 0 days 00:00:03.095318385 +614 70 0 days 00:00:04.217291315 +614 71 0 days 00:00:02.698550055 +614 72 0 days 00:00:04.606771912 +614 73 0 days 00:00:04.893730357 +614 74 0 days 00:00:05.686883404 +614 75 0 days 00:00:04.665275260 +614 76 0 days 00:00:01.966637474 +614 77 0 days 00:00:04.138693790 +614 78 0 days 00:00:03.085458146 +615 1 0 days 00:00:02.924147385 +615 2 0 days 00:00:08.321113676 +615 3 0 days 00:00:07.963996000 +615 4 0 days 00:00:03.833397630 +615 5 0 days 00:00:07.732859590 +615 6 0 days 00:00:06.519149844 +615 7 0 days 00:00:09.117096585 +615 8 0 days 00:00:08.082928186 +615 9 0 days 00:00:04.456702665 +615 10 0 days 00:00:09.627962645 +615 11 0 days 00:00:07.916046970 +615 12 0 days 00:00:06.325167570 +615 13 0 days 00:00:09.778318015 +615 14 0 days 00:00:03.083639570 +615 15 0 days 00:00:06.973161286 +615 16 0 days 00:00:07.958481610 +615 17 0 days 00:00:04.516593600 +615 18 0 days 00:00:03.321070790 +615 19 0 days 00:00:08.968550422 +615 20 0 days 00:00:04.553971840 +615 21 0 days 00:00:09.482431100 +615 22 0 days 00:00:05.212175200 +615 23 0 days 00:00:06.259797530 +615 24 0 days 00:00:04.542534290 +615 25 0 days 00:00:08.046570877 +615 26 0 days 00:00:05.522351325 +615 27 0 days 00:00:04.564043485 +615 28 0 days 00:00:09.572070550 +615 29 0 days 00:00:02.884829110 +615 30 0 days 00:00:03.031253475 +615 31 0 days 00:00:07.833006295 +615 32 0 days 00:00:07.790132830 +615 33 0 days 00:00:05.074077773 +615 34 0 days 00:00:04.161653430 +615 35 0 days 00:00:05.356363452 +615 36 0 days 00:00:07.785961025 +615 37 0 days 00:00:07.826007370 +615 38 0 days 00:00:08.143227716 +615 39 0 days 00:00:03.259179785 +615 40 0 days 00:00:10.066089850 +615 41 0 days 00:00:07.787431425 +615 42 0 days 00:00:08.575449192 +615 43 0 days 00:00:10.488554850 +615 44 0 days 00:00:03.105256805 +615 45 0 days 00:00:04.588664315 +615 46 0 days 00:00:05.521168075 +615 47 0 days 00:00:03.173521765 +615 48 0 days 00:00:03.599684650 +615 49 0 days 00:00:04.998473104 +615 50 0 days 00:00:06.641526125 +615 51 0 days 00:00:02.987757405 +615 52 0 days 00:00:06.584535865 +615 53 0 days 00:00:09.823358820 +615 54 0 days 00:00:08.354266440 +615 55 0 days 00:00:08.877441231 +615 56 0 days 00:00:07.831806965 +615 57 0 days 00:00:03.117308065 +615 58 0 days 00:00:05.748277456 +615 59 0 days 00:00:07.728652245 +615 60 0 days 00:00:10.492111385 +615 61 0 days 00:00:06.862501175 +616 1 0 days 00:00:08.398117835 +616 2 0 days 00:00:12.469540043 +616 3 0 days 00:00:03.093630020 +616 4 0 days 00:00:05.959352495 +616 5 0 days 00:00:08.511379626 +616 6 0 days 00:00:03.097806930 +616 7 0 days 00:00:08.815975325 +616 8 0 days 00:00:03.372812975 +616 9 0 days 00:00:04.912466740 +616 10 0 days 00:00:03.347688520 +616 11 0 days 00:00:08.163577305 +616 12 0 days 00:00:08.336407750 +616 13 0 days 00:00:03.159675265 +616 14 0 days 00:00:03.397426515 +616 15 0 days 00:00:04.005795070 +616 16 0 days 00:00:05.509027205 +616 17 0 days 00:00:08.175392415 +616 18 0 days 00:00:03.338876873 +616 19 0 days 00:00:08.982278576 +616 20 0 days 00:00:04.968569000 +616 21 0 days 00:00:03.157084190 +616 22 0 days 00:00:05.382754645 +616 23 0 days 00:00:05.381228115 +616 24 0 days 00:00:05.837081005 +616 25 0 days 00:00:03.212700495 +616 26 0 days 00:00:04.782950435 +616 27 0 days 00:00:04.798953675 +616 28 0 days 00:00:03.554072544 +616 29 0 days 00:00:04.762370090 +616 30 0 days 00:00:03.004061475 +616 31 0 days 00:00:09.439740595 +616 32 0 days 00:00:09.552561290 +616 33 0 days 00:00:06.503684825 +616 34 0 days 00:00:03.687228545 +616 35 0 days 00:00:08.433292135 +616 36 0 days 00:00:03.656979570 +616 37 0 days 00:00:03.266314060 +616 38 0 days 00:00:11.368837945 +616 39 0 days 00:00:08.753901796 +616 40 0 days 00:00:10.134915288 +616 41 0 days 00:00:08.237021435 +616 42 0 days 00:00:03.206033760 +616 43 0 days 00:00:09.756338255 +616 44 0 days 00:00:04.880694485 +616 45 0 days 00:00:05.202250350 +616 46 0 days 00:00:09.267441953 +616 47 0 days 00:00:07.563299070 +616 48 0 days 00:00:04.759488360 +616 49 0 days 00:00:03.014488040 +616 50 0 days 00:00:08.549053013 +616 51 0 days 00:00:09.827262830 +616 52 0 days 00:00:08.247830050 +616 53 0 days 00:00:08.242948720 +616 54 0 days 00:00:04.993767015 +616 55 0 days 00:00:07.430638955 +616 56 0 days 00:00:06.918991615 +616 57 0 days 00:00:04.756089835 +616 58 0 days 00:00:03.538960680 +616 59 0 days 00:00:08.330266825 +616 60 0 days 00:00:08.401757815 +616 61 0 days 00:00:08.706656520 +616 62 0 days 00:00:06.506166906 +616 63 0 days 00:00:04.212311205 +616 64 0 days 00:00:07.796997733 +616 65 0 days 00:00:07.084651805 +616 66 0 days 00:00:09.674322796 +616 67 0 days 00:00:05.033427300 +617 1 0 days 00:00:02.327607890 +617 2 0 days 00:00:02.418713990 +617 3 0 days 00:00:04.657396866 +617 4 0 days 00:00:01.756033505 +617 5 0 days 00:00:02.410048480 +617 6 0 days 00:00:01.712628340 +617 7 0 days 00:00:02.311477230 +617 8 0 days 00:00:02.381511470 +617 9 0 days 00:00:02.583613450 +617 10 0 days 00:00:02.693837705 +617 11 0 days 00:00:02.257780920 +617 12 0 days 00:00:02.931196305 +617 13 0 days 00:00:01.541493605 +617 14 0 days 00:00:02.166077290 +617 15 0 days 00:00:01.585843476 +617 16 0 days 00:00:03.945285215 +617 17 0 days 00:00:04.912916875 +617 18 0 days 00:00:03.620794170 +617 19 0 days 00:00:02.385437135 +617 20 0 days 00:00:01.578772550 +617 21 0 days 00:00:02.680283730 +617 22 0 days 00:00:02.558314416 +617 23 0 days 00:00:01.717872650 +617 24 0 days 00:00:02.024759864 +617 25 0 days 00:00:03.950642725 +617 26 0 days 00:00:01.541739970 +617 27 0 days 00:00:01.657831955 +617 28 0 days 00:00:04.330941592 +617 29 0 days 00:00:02.573413395 +617 30 0 days 00:00:04.621851225 +617 31 0 days 00:00:04.051172165 +617 32 0 days 00:00:04.945808630 +617 33 0 days 00:00:01.673609944 +617 34 0 days 00:00:04.087226115 +617 35 0 days 00:00:04.093583020 +617 36 0 days 00:00:02.985321988 +617 37 0 days 00:00:03.067646425 +617 38 0 days 00:00:03.665197766 +617 39 0 days 00:00:02.346502515 +617 40 0 days 00:00:04.255915275 +617 41 0 days 00:00:01.551864260 +617 42 0 days 00:00:04.621732945 +617 43 0 days 00:00:04.092854595 +617 44 0 days 00:00:02.710837275 +617 45 0 days 00:00:02.305302755 +617 46 0 days 00:00:02.300359355 +617 47 0 days 00:00:02.649713155 +617 48 0 days 00:00:03.619637800 +617 49 0 days 00:00:03.253233873 +617 50 0 days 00:00:03.976077175 +617 51 0 days 00:00:03.947810985 +617 52 0 days 00:00:02.854659204 +617 53 0 days 00:00:02.315502245 +617 54 0 days 00:00:05.058514565 +617 55 0 days 00:00:01.570748188 +617 56 0 days 00:00:02.470665764 +617 57 0 days 00:00:02.140195700 +617 58 0 days 00:00:02.811015880 +617 59 0 days 00:00:01.823655030 +617 60 0 days 00:00:03.761905706 +617 61 0 days 00:00:02.583922215 +617 62 0 days 00:00:02.800488360 +617 63 0 days 00:00:02.400908430 +617 64 0 days 00:00:05.456505992 +617 65 0 days 00:00:02.379991720 +617 66 0 days 00:00:02.412014375 +617 67 0 days 00:00:02.376758065 +617 68 0 days 00:00:01.902632170 +617 69 0 days 00:00:02.327133465 +617 70 0 days 00:00:01.680700520 +617 71 0 days 00:00:01.661427160 +617 72 0 days 00:00:01.606178056 +617 73 0 days 00:00:02.650621015 +617 74 0 days 00:00:04.309012973 +617 75 0 days 00:00:02.504183053 +617 76 0 days 00:00:02.376596500 +617 77 0 days 00:00:05.304420055 +617 78 0 days 00:00:02.105917185 +617 79 0 days 00:00:03.245669190 +617 80 0 days 00:00:01.625804720 +617 81 0 days 00:00:03.045726365 +617 82 0 days 00:00:01.864669780 +617 83 0 days 00:00:02.339957705 +617 84 0 days 00:00:01.802863876 +617 85 0 days 00:00:01.501164555 +617 86 0 days 00:00:04.137731170 +617 87 0 days 00:00:03.952528780 +617 88 0 days 00:00:03.974894460 +617 89 0 days 00:00:01.863033495 +617 90 0 days 00:00:02.320076670 +617 91 0 days 00:00:04.189184865 +617 92 0 days 00:00:02.945980910 +617 93 0 days 00:00:04.597439765 +617 94 0 days 00:00:02.372834215 +617 95 0 days 00:00:01.672210265 +617 96 0 days 00:00:01.998930320 +617 97 0 days 00:00:01.675529345 +617 98 0 days 00:00:02.319568905 +617 99 0 days 00:00:02.556257585 +617 100 0 days 00:00:02.091221586 +618 1 0 days 00:00:04.417464630 +618 2 0 days 00:00:04.212292290 +618 3 0 days 00:00:02.558288810 +618 4 0 days 00:00:01.601390440 +618 5 0 days 00:00:02.412801440 +618 6 0 days 00:00:02.138429945 +618 7 0 days 00:00:02.179588090 +618 8 0 days 00:00:02.851616100 +618 9 0 days 00:00:01.655111615 +618 10 0 days 00:00:05.123687045 +618 11 0 days 00:00:04.346555613 +618 12 0 days 00:00:04.960485165 +618 13 0 days 00:00:04.294938245 +618 14 0 days 00:00:02.658800580 +618 15 0 days 00:00:03.396919140 +618 16 0 days 00:00:02.552275706 +618 17 0 days 00:00:01.874326625 +618 18 0 days 00:00:04.251034755 +618 19 0 days 00:00:04.957890040 +618 20 0 days 00:00:05.452683795 +618 21 0 days 00:00:04.709774676 +618 22 0 days 00:00:02.415912800 +618 23 0 days 00:00:04.139863940 +618 24 0 days 00:00:03.558047908 +618 25 0 days 00:00:04.680551876 +618 26 0 days 00:00:02.684082025 +618 27 0 days 00:00:02.578597170 +618 28 0 days 00:00:05.751510100 +618 29 0 days 00:00:04.138948150 +618 30 0 days 00:00:02.765812480 +618 31 0 days 00:00:02.430835320 +618 32 0 days 00:00:01.714591730 +618 33 0 days 00:00:02.935623260 +618 34 0 days 00:00:02.853754184 +618 35 0 days 00:00:02.519347630 +618 36 0 days 00:00:02.426406970 +618 37 0 days 00:00:01.591865620 +618 38 0 days 00:00:01.696990070 +618 39 0 days 00:00:03.870197310 +618 40 0 days 00:00:02.911223803 +618 41 0 days 00:00:02.632602888 +618 42 0 days 00:00:02.383336710 +618 43 0 days 00:00:01.989075930 +618 44 0 days 00:00:01.625697770 +618 45 0 days 00:00:05.112510712 +618 46 0 days 00:00:02.663038456 +618 47 0 days 00:00:01.916753888 +618 48 0 days 00:00:03.500113400 +618 49 0 days 00:00:05.422977855 +618 50 0 days 00:00:02.553744250 +618 51 0 days 00:00:02.491790795 +618 52 0 days 00:00:01.997926060 +618 53 0 days 00:00:04.766350760 +618 54 0 days 00:00:03.381853280 +618 55 0 days 00:00:02.939213435 +618 56 0 days 00:00:02.617000930 +618 57 0 days 00:00:03.115822145 +618 58 0 days 00:00:02.446601030 +618 59 0 days 00:00:02.857380844 +618 60 0 days 00:00:02.275414485 +618 61 0 days 00:00:02.405357775 +618 62 0 days 00:00:04.266213020 +618 63 0 days 00:00:03.983398886 +618 64 0 days 00:00:01.769953290 +618 65 0 days 00:00:01.787384830 +618 66 0 days 00:00:05.351365085 +618 67 0 days 00:00:01.618645455 +618 68 0 days 00:00:02.216865585 +618 69 0 days 00:00:04.435883245 +618 70 0 days 00:00:02.599243365 +618 71 0 days 00:00:01.619496536 +618 72 0 days 00:00:04.144456745 +618 73 0 days 00:00:01.598183365 +618 74 0 days 00:00:01.560297675 +618 75 0 days 00:00:02.658909966 +618 76 0 days 00:00:02.549143440 +618 77 0 days 00:00:02.151768695 +618 78 0 days 00:00:02.893078365 +618 79 0 days 00:00:04.217202650 +618 80 0 days 00:00:01.598480035 +618 81 0 days 00:00:04.788183065 +618 82 0 days 00:00:02.060459330 +618 83 0 days 00:00:02.428209250 +618 84 0 days 00:00:02.287312225 +618 85 0 days 00:00:02.447766550 +618 86 0 days 00:00:04.287790295 +618 87 0 days 00:00:04.318981893 +618 88 0 days 00:00:04.796763200 +618 89 0 days 00:00:03.936547886 +618 90 0 days 00:00:04.341374770 +618 91 0 days 00:00:04.281310180 +618 92 0 days 00:00:04.978616750 +618 93 0 days 00:00:03.843476165 +618 94 0 days 00:00:02.816338745 +618 95 0 days 00:00:02.582365405 +618 96 0 days 00:00:01.474487420 +618 97 0 days 00:00:02.415118345 +618 98 0 days 00:00:02.333312508 +618 99 0 days 00:00:02.459731725 +618 100 0 days 00:00:04.556566143 +619 1 0 days 00:02:11.635916150 +619 2 0 days 00:01:04.919907510 +619 3 0 days 00:01:10.645901824 +620 1 0 days 00:01:14.835772563 +620 2 0 days 00:01:18.095570202 +620 3 0 days 00:00:21.953457232 +620 4 0 days 00:01:18.719282886 +621 2 0 days 00:02:16.392130167 +622 1 0 days 00:01:22.303894805 +622 3 0 days 00:00:26.669240774 +623 1 0 days 00:00:39.853487260 +623 2 0 days 00:01:09.669877817 +623 3 0 days 00:01:11.788875668 +624 1 0 days 00:01:24.796390140 +625 1 0 days 00:02:17.974852328 +625 2 0 days 00:00:48.836593400 +625 3 0 days 00:00:44.453931371 +625 4 0 days 00:01:55.611190488 +625 5 0 days 00:00:38.786821328 +625 6 0 days 00:00:22.620122171 +625 7 0 days 00:01:12.754203330 +625 8 0 days 00:00:25.300543911 +625 9 0 days 00:00:39.400511487 +625 10 0 days 00:01:10.189466962 +625 11 0 days 00:00:56.142574883 +625 12 0 days 00:01:13.712724150 +625 13 0 days 00:01:09.951849712 +625 14 0 days 00:01:01.059313464 +625 15 0 days 00:00:53.104222192 +625 16 0 days 00:00:25.727199557 +626 1 0 days 00:00:22.046858900 +626 2 0 days 00:01:04.414422015 +626 3 0 days 00:00:15.189244320 +626 4 0 days 00:00:23.252590626 +626 5 0 days 00:00:32.830068392 +626 6 0 days 00:00:17.612585018 +626 7 0 days 00:00:24.555283942 +626 8 0 days 00:00:40.811036250 +626 9 0 days 00:00:37.741324657 +626 10 0 days 00:00:14.019890891 +626 11 0 days 00:00:17.366132200 +626 12 0 days 00:00:17.774969312 +626 13 0 days 00:00:50.309075925 +626 14 0 days 00:00:21.838682700 +626 15 0 days 00:00:23.708513928 +626 16 0 days 00:00:23.248128600 +626 17 0 days 00:00:51.217226975 +626 18 0 days 00:00:17.622862350 +626 19 0 days 00:00:20.899981137 +626 20 0 days 00:00:14.229381542 +626 21 0 days 00:00:29.119792280 +626 22 0 days 00:00:20.234504081 +626 23 0 days 00:00:49.400835854 +626 24 0 days 00:00:29.734753214 +626 25 0 days 00:00:35.193967775 +626 26 0 days 00:00:14.102208130 +626 27 0 days 00:00:23.898323633 +626 28 0 days 00:00:12.916416862 +626 29 0 days 00:00:30.579669216 +626 30 0 days 00:00:17.414461485 +626 31 0 days 00:00:46.377103400 +626 32 0 days 00:00:16.910645085 +626 33 0 days 00:00:25.666322042 +626 34 0 days 00:00:16.925681700 +626 35 0 days 00:00:31.119697000 +627 1 0 days 00:00:21.982516825 +627 2 0 days 00:00:38.182878528 +627 3 0 days 00:00:34.733158281 +627 4 0 days 00:00:57.313410776 +627 5 0 days 00:00:40.989671510 +627 6 0 days 00:00:33.663669066 +627 7 0 days 00:01:56.713501230 +627 8 0 days 00:01:57.612596642 +627 9 0 days 00:00:37.101183950 +627 10 0 days 00:01:08.938158483 +627 11 0 days 00:00:44.904411971 +627 12 0 days 00:01:25.882515966 +627 13 0 days 00:00:26.517649275 +627 14 0 days 00:01:05.337582062 +627 15 0 days 00:01:38.012620913 +627 16 0 days 00:00:54.777383285 +627 17 0 days 00:00:50.995625100 +628 1 0 days 00:00:24.279823700 +628 2 0 days 00:00:39.950589988 +628 3 0 days 00:00:13.186545600 +628 4 0 days 00:00:28.388258033 +628 5 0 days 00:00:15.264325371 +628 6 0 days 00:00:17.348636907 +628 7 0 days 00:00:17.900743014 +628 8 0 days 00:00:24.458680728 +628 9 0 days 00:00:34.990412750 +628 10 0 days 00:00:34.305132571 +628 11 0 days 00:00:24.134513788 +628 12 0 days 00:00:22.047211383 +628 13 0 days 00:00:23.372572328 +628 14 0 days 00:00:30.094775137 +628 15 0 days 00:00:20.994212144 +628 16 0 days 00:01:06.919526428 +628 17 0 days 00:00:31.412183840 +628 18 0 days 00:00:18.004065828 +628 19 0 days 00:00:35.320997085 +628 20 0 days 00:00:50.752176842 +628 21 0 days 00:00:28.108809466 +628 22 0 days 00:00:25.733785937 +628 23 0 days 00:00:24.157243614 +628 24 0 days 00:00:30.694930660 +628 25 0 days 00:00:37.638497533 +628 26 0 days 00:00:27.186244175 +628 27 0 days 00:00:14.648398181 +628 28 0 days 00:00:20.262377650 +628 29 0 days 00:00:30.417466653 +628 30 0 days 00:00:29.212798837 +628 31 0 days 00:00:13.489671330 +628 32 0 days 00:00:29.956876644 +628 33 0 days 00:00:30.836396685 +628 34 0 days 00:00:35.926412323 +629 1 0 days 00:01:12.875968262 +629 2 0 days 00:01:58.391828764 +629 3 0 days 00:00:55.725040557 +629 4 0 days 00:03:10.702341825 +629 5 0 days 00:00:36.803037146 +630 1 0 days 00:00:23.670606689 +630 2 0 days 00:00:34.630577890 +630 3 0 days 00:00:46.816739340 +630 4 0 days 00:00:20.464326143 +630 5 0 days 00:00:34.392987278 +630 6 0 days 00:01:23.014874244 +630 7 0 days 00:00:25.498130125 +630 8 0 days 00:01:16.997216891 +630 9 0 days 00:00:43.857381188 +630 10 0 days 00:00:24.416105065 +631 1 0 days 00:01:49.991897878 +631 2 0 days 00:00:41.173544428 +631 3 0 days 00:00:47.542199142 +631 4 0 days 00:00:46.752553263 +631 5 0 days 00:01:00.790104678 +631 6 0 days 00:00:52.226606533 +631 7 0 days 00:00:56.269323041 +631 8 0 days 00:01:03.149880013 +631 9 0 days 00:00:30.799120816 +631 10 0 days 00:01:07.010359100 +631 11 0 days 00:00:34.458644406 +632 1 0 days 00:01:43.562206435 +632 2 0 days 00:00:17.365688477 +632 3 0 days 00:00:31.581309285 +632 4 0 days 00:00:20.726595371 +632 5 0 days 00:00:12.914647928 +632 6 0 days 00:00:29.952163410 +632 7 0 days 00:00:36.059497875 +632 8 0 days 00:00:26.923079896 +632 9 0 days 00:00:46.731630537 +632 10 0 days 00:00:42.654609268 +632 11 0 days 00:01:41.332596620 +632 12 0 days 00:00:21.929758511 +632 13 0 days 00:00:44.288332932 +633 1 0 days 00:00:28.940540755 +633 2 0 days 00:00:13.858821090 +633 3 0 days 00:00:13.723496368 +633 4 0 days 00:00:26.193373412 +633 5 0 days 00:00:14.026864345 +633 6 0 days 00:00:09.768595506 +633 7 0 days 00:00:24.980338515 +633 8 0 days 00:00:26.062874990 +633 9 0 days 00:00:14.409952290 +633 10 0 days 00:00:24.447338545 +633 11 0 days 00:00:14.614490365 +633 12 0 days 00:00:09.180385680 +633 13 0 days 00:00:26.501461616 +633 14 0 days 00:00:14.517221525 +633 15 0 days 00:00:26.280421350 +633 16 0 days 00:00:14.694660760 +633 17 0 days 00:00:16.108633245 +633 18 0 days 00:00:09.781372992 +633 19 0 days 00:00:15.308659492 +634 1 0 days 00:00:16.665092777 +634 2 0 days 00:00:28.986110654 +634 3 0 days 00:00:27.668885232 +634 4 0 days 00:00:25.695668005 +634 5 0 days 00:00:10.450926660 +634 6 0 days 00:00:15.848463385 +634 7 0 days 00:00:15.886834196 +634 8 0 days 00:00:15.754445704 +634 9 0 days 00:00:15.778480336 +634 10 0 days 00:00:25.807470365 +634 11 0 days 00:00:27.210342050 +634 12 0 days 00:00:29.501672147 +634 13 0 days 00:00:09.451258415 +634 14 0 days 00:00:14.788770465 +634 15 0 days 00:00:15.617556092 +634 16 0 days 00:00:11.327479325 +634 17 0 days 00:00:10.224027895 +634 18 0 days 00:00:10.178984020 +634 19 0 days 00:00:10.318840824 +635 1 0 days 00:00:05.606542450 +635 2 0 days 00:00:04.466319540 +635 3 0 days 00:00:12.423936440 +635 4 0 days 00:00:12.564948475 +635 5 0 days 00:00:04.981486877 +635 6 0 days 00:00:08.243999875 +635 7 0 days 00:00:12.154195295 +635 8 0 days 00:00:12.450897375 +635 9 0 days 00:00:09.743477095 +635 10 0 days 00:00:12.683846175 +635 11 0 days 00:00:05.545534025 +635 12 0 days 00:00:07.668793396 +635 13 0 days 00:00:05.030439005 +635 14 0 days 00:00:13.344690356 +635 15 0 days 00:00:07.151467460 +635 16 0 days 00:00:12.975691388 +635 17 0 days 00:00:13.189936256 +635 18 0 days 00:00:12.464298140 +635 19 0 days 00:00:07.873211192 +635 20 0 days 00:00:04.771326893 +635 21 0 days 00:00:07.208979255 +635 22 0 days 00:00:09.748112705 +635 23 0 days 00:00:11.027026466 +635 24 0 days 00:00:13.655406571 +635 25 0 days 00:00:07.298171395 +635 26 0 days 00:00:13.671347791 +635 27 0 days 00:00:05.422758204 +635 28 0 days 00:00:06.483086586 +635 29 0 days 00:00:04.918450568 +635 30 0 days 00:00:05.452810126 +635 31 0 days 00:00:07.305179795 +635 32 0 days 00:00:07.268501745 +635 33 0 days 00:00:04.706976166 +635 34 0 days 00:00:07.792073332 +635 35 0 days 00:00:04.924637716 +635 36 0 days 00:00:13.614037660 +635 37 0 days 00:00:07.609421280 +635 38 0 days 00:00:12.067613830 +635 39 0 days 00:00:06.361534233 +635 40 0 days 00:00:06.757897194 +635 41 0 days 00:00:04.581519415 +635 42 0 days 00:00:12.223702650 +636 1 0 days 00:00:12.995003650 +636 2 0 days 00:00:13.764225024 +636 3 0 days 00:00:08.364342440 +636 4 0 days 00:00:04.724942485 +636 5 0 days 00:00:08.621660331 +636 6 0 days 00:00:08.151464932 +636 7 0 days 00:00:09.355842787 +636 8 0 days 00:00:05.405671897 +636 9 0 days 00:00:13.085183975 +636 10 0 days 00:00:15.206473617 +636 11 0 days 00:00:06.151498300 +636 12 0 days 00:00:05.730881648 +636 13 0 days 00:00:12.279520170 +636 14 0 days 00:00:14.312384988 +636 15 0 days 00:00:07.813576840 +636 16 0 days 00:00:13.097229185 +636 17 0 days 00:00:05.174441553 +636 18 0 days 00:00:09.572744794 +636 19 0 days 00:00:05.290069833 +636 20 0 days 00:00:07.782176192 +636 21 0 days 00:00:15.361927029 +636 22 0 days 00:00:04.611446065 +636 23 0 days 00:00:13.368772550 +636 24 0 days 00:00:12.915530745 +636 25 0 days 00:00:07.554400490 +636 26 0 days 00:00:05.203270165 +636 27 0 days 00:00:13.121252240 +636 28 0 days 00:00:04.893863220 +636 29 0 days 00:00:13.144771870 +636 30 0 days 00:00:07.574531380 +636 31 0 days 00:00:08.616507600 +636 32 0 days 00:00:12.980381570 +636 33 0 days 00:00:13.162200615 +636 34 0 days 00:00:13.107447400 +637 1 0 days 00:00:16.056942600 +637 2 0 days 00:00:09.515345675 +637 3 0 days 00:00:26.403882300 +637 4 0 days 00:00:09.991877376 +637 5 0 days 00:00:11.104861911 +637 6 0 days 00:00:15.069092250 +637 7 0 days 00:00:14.664852900 +637 8 0 days 00:00:14.633352895 +637 9 0 days 00:00:26.642306440 +637 10 0 days 00:00:16.049505320 +637 11 0 days 00:00:15.220018325 +637 12 0 days 00:00:11.132604485 +637 13 0 days 00:00:27.430338388 +637 14 0 days 00:00:15.305606890 +637 15 0 days 00:00:17.414224183 +637 16 0 days 00:00:11.403537360 +637 17 0 days 00:00:11.676332905 +637 18 0 days 00:00:23.567080506 +637 19 0 days 00:00:09.706783032 +637 20 0 days 00:00:27.715491516 +637 21 0 days 00:00:28.025464996 +638 1 0 days 00:00:15.566834171 +638 2 0 days 00:00:08.343173543 +638 3 0 days 00:00:13.077745090 +638 4 0 days 00:00:04.884720620 +638 5 0 days 00:00:08.088751350 +638 6 0 days 00:00:05.829380463 +638 7 0 days 00:00:05.535289025 +638 8 0 days 00:00:12.755678760 +638 9 0 days 00:00:14.319261373 +638 10 0 days 00:00:04.666188655 +638 11 0 days 00:00:08.474703145 +638 12 0 days 00:00:13.859450912 +638 13 0 days 00:00:09.173239194 +638 14 0 days 00:00:07.533868255 +638 15 0 days 00:00:14.895372774 +638 16 0 days 00:00:14.713523273 +638 17 0 days 00:00:09.047343876 +638 18 0 days 00:00:08.230086995 +638 19 0 days 00:00:13.992411340 +638 20 0 days 00:00:04.823478030 +638 21 0 days 00:00:08.143201260 +638 22 0 days 00:00:09.166623857 +638 23 0 days 00:00:05.697928700 +638 24 0 days 00:00:05.802871527 +638 25 0 days 00:00:08.136988124 +638 26 0 days 00:00:10.498055680 +638 27 0 days 00:00:14.675191590 +638 28 0 days 00:00:07.661018635 +638 29 0 days 00:00:08.149164808 +638 30 0 days 00:00:05.727385697 +638 31 0 days 00:00:07.760284570 +638 32 0 days 00:00:05.249471556 +638 33 0 days 00:00:04.843026590 +638 34 0 days 00:00:09.262546522 +639 1 0 days 00:00:26.445125808 +639 2 0 days 00:00:35.565997111 +639 3 0 days 00:00:19.828888880 +639 4 0 days 00:00:09.532106845 +639 5 0 days 00:00:24.736763530 +639 6 0 days 00:00:15.694016805 +639 7 0 days 00:00:12.917285776 +639 8 0 days 00:00:34.713503766 +639 9 0 days 00:00:24.676044015 +639 10 0 days 00:00:30.924411805 +640 1 0 days 00:00:14.569217520 +640 2 0 days 00:00:11.649725400 +640 3 0 days 00:00:07.541679895 +640 4 0 days 00:00:14.775451920 +640 5 0 days 00:00:16.466172089 +640 6 0 days 00:00:06.260169396 +640 7 0 days 00:00:06.688455030 +640 8 0 days 00:00:11.204538052 +640 9 0 days 00:00:07.383140960 +640 10 0 days 00:00:04.643519800 +640 11 0 days 00:00:06.108524525 +640 12 0 days 00:00:07.623636370 +640 13 0 days 00:00:04.768658285 +640 14 0 days 00:00:05.569925830 +640 15 0 days 00:00:09.840825576 +640 16 0 days 00:00:13.539514490 +640 17 0 days 00:00:08.868066035 +640 18 0 days 00:00:06.227266581 +640 19 0 days 00:00:18.573984835 +640 20 0 days 00:00:09.719652524 +640 21 0 days 00:00:13.382053595 +640 22 0 days 00:00:07.703153176 +640 23 0 days 00:00:07.535779456 +640 24 0 days 00:00:16.610891516 +640 25 0 days 00:00:07.564493975 +640 26 0 days 00:00:04.631575210 +640 27 0 days 00:00:13.202009325 +641 1 0 days 00:00:35.461236728 +641 2 0 days 00:00:58.289285275 +641 3 0 days 00:00:33.351002035 +641 4 0 days 00:01:09.532235874 +641 5 0 days 00:00:33.844327700 +641 6 0 days 00:00:21.273524415 +641 7 0 days 00:00:23.614826183 +641 8 0 days 00:00:21.374966395 +641 9 0 days 00:00:33.899688035 +642 1 0 days 00:00:59.005879715 +642 2 0 days 00:00:18.750133526 +642 3 0 days 00:00:58.989027900 +642 4 0 days 00:01:00.394962640 +642 5 0 days 00:00:22.659920920 +642 6 0 days 00:00:33.683048550 +642 7 0 days 00:00:19.347955606 +642 8 0 days 00:00:35.164462355 +642 9 0 days 00:00:34.072159740 +642 10 0 days 00:00:59.110413540 +642 11 0 days 00:00:33.639939680 +642 12 0 days 00:00:59.091794325 +643 1 0 days 00:00:36.829977995 +643 2 0 days 00:00:21.756049030 +643 3 0 days 00:00:39.108246760 +643 4 0 days 00:00:36.752482710 +643 5 0 days 00:00:15.422209923 +643 6 0 days 00:00:44.165360026 +643 7 0 days 00:00:36.786994350 +643 8 0 days 00:00:44.709891354 +643 9 0 days 00:00:23.186415420 +644 1 0 days 00:00:39.208284748 +644 2 0 days 00:00:14.181704950 +644 3 0 days 00:00:13.494018350 +644 4 0 days 00:00:36.929722045 +644 5 0 days 00:00:15.053816424 +644 6 0 days 00:00:20.967541620 +644 7 0 days 00:00:39.323783680 +644 8 0 days 00:00:36.947578970 +644 9 0 days 00:00:14.478440503 +644 10 0 days 00:00:22.273272796 +644 11 0 days 00:00:20.945027545 +644 12 0 days 00:00:15.353795748 +644 13 0 days 00:00:13.427963680 +644 14 0 days 00:00:13.884818408 +644 15 0 days 00:00:23.770977322 +644 16 0 days 00:00:36.861816635 +645 1 0 days 00:01:06.232620508 +645 2 0 days 00:00:40.232527607 +645 3 0 days 00:00:24.499649630 +645 4 0 days 00:01:04.787874933 +645 5 0 days 00:00:25.937379411 +645 6 0 days 00:00:24.115327427 +646 1 0 days 00:00:15.114952120 +646 2 0 days 00:00:14.452039930 +646 3 0 days 00:00:43.232004022 +646 4 0 days 00:00:14.435613673 +646 5 0 days 00:00:44.538451198 +646 6 0 days 00:00:21.622420535 +646 7 0 days 00:00:21.108832840 +646 8 0 days 00:00:23.046249860 +646 9 0 days 00:00:22.442392556 +646 10 0 days 00:00:21.100364875 +646 11 0 days 00:00:39.127249192 +647 1 0 days 00:00:00.164764946 +647 3 0 days 00:00:00.145058533 +647 4 0 days 00:00:00.097561246 +647 5 0 days 00:00:00.117459793 +647 6 0 days 00:00:00.116471493 +647 7 0 days 00:00:00.122206130 +647 8 0 days 00:00:00.095028500 +647 9 0 days 00:00:00.175126913 +647 10 0 days 00:00:00.140268525 +647 11 0 days 00:00:00.097668553 +647 12 0 days 00:00:00.167431746 +647 13 0 days 00:00:00.146074620 +647 14 0 days 00:00:00.117442346 +647 15 0 days 00:00:00.116735493 +647 16 0 days 00:00:00.111497253 +647 17 0 days 00:00:00.131075900 +647 18 0 days 00:00:00.145224413 +647 19 0 days 00:00:00.097593140 +647 20 0 days 00:00:00.121807995 +647 21 0 days 00:00:00.108236660 +647 22 0 days 00:00:00.096041433 +647 23 0 days 00:00:00.112661626 +647 24 0 days 00:00:00.113341806 +647 25 0 days 00:00:00.112430900 +647 27 0 days 00:00:00.143512580 +647 29 0 days 00:00:00.114634113 +647 30 0 days 00:00:00.093009493 +647 31 0 days 00:00:00.114912460 +647 32 0 days 00:00:00.094172266 +647 33 0 days 00:00:00.094497773 +647 35 0 days 00:00:00.176575220 +647 36 0 days 00:00:00.123401610 +647 37 0 days 00:00:00.094738293 +647 38 0 days 00:00:00.095623133 +647 39 0 days 00:00:00.115512593 +647 40 0 days 00:00:00.130227440 +647 41 0 days 00:00:00.144570733 +647 42 0 days 00:00:00.094620606 +647 44 0 days 00:00:00.148213386 +647 45 0 days 00:00:00.112105133 +647 46 0 days 00:00:00.098414586 +647 47 0 days 00:00:00.112420740 +647 48 0 days 00:00:00.137684570 +647 49 0 days 00:00:00.112187120 +647 50 0 days 00:00:00.119294837 +647 51 0 days 00:00:00.149015946 +647 52 0 days 00:00:00.150082000 +647 53 0 days 00:00:00.100415453 +647 54 0 days 00:00:00.099213673 +647 55 0 days 00:00:00.098109193 +647 56 0 days 00:00:00.110903660 +647 57 0 days 00:00:00.112697566 +647 58 0 days 00:00:00.139599292 +647 59 0 days 00:00:00.111397640 +647 60 0 days 00:00:00.113386733 +647 61 0 days 00:00:00.133848530 +647 62 0 days 00:00:00.122328360 +647 63 0 days 00:00:00.113088766 +647 64 0 days 00:00:00.094042040 +647 65 0 days 00:00:00.112749040 +647 66 0 days 00:00:00.162404695 +647 67 0 days 00:00:00.116417380 +647 68 0 days 00:00:00.113475893 +647 69 0 days 00:00:00.104895730 +647 70 0 days 00:00:00.112114360 +647 71 0 days 00:00:00.173255066 +647 72 0 days 00:00:00.146594884 +647 73 0 days 00:00:00.110807020 +647 74 0 days 00:00:00.124868625 +647 75 0 days 00:00:00.103700533 +647 76 0 days 00:00:00.110056500 +647 77 0 days 00:00:00.184488428 +647 78 0 days 00:00:00.126079625 +647 79 0 days 00:00:00.114757660 +647 80 0 days 00:00:00.121362846 +647 81 0 days 00:00:00.095208786 +647 82 0 days 00:00:00.096043200 +647 83 0 days 00:00:00.112913206 +647 84 0 days 00:00:00.095277493 +647 86 0 days 00:00:00.145954426 +647 87 0 days 00:00:00.122994925 +647 88 0 days 00:00:00.136698480 +647 90 0 days 00:00:00.094471413 +647 91 0 days 00:00:00.180611800 +647 92 0 days 00:00:00.132810260 +647 93 0 days 00:00:00.159228345 +647 94 0 days 00:00:00.157962500 +647 95 0 days 00:00:00.109672828 +647 96 0 days 00:00:00.132152375 +647 97 0 days 00:00:00.129338168 +647 98 0 days 00:00:00.113146493 +647 99 0 days 00:00:00.169080360 +647 100 0 days 00:00:00.163291765 +648 1 0 days 00:00:00.141001377 +648 2 0 days 00:00:00.149653140 +648 3 0 days 00:00:00.096459100 +648 4 0 days 00:00:00.130140153 +648 5 0 days 00:00:00.151729285 +648 6 0 days 00:00:00.113102300 +648 7 0 days 00:00:00.145509276 +648 8 0 days 00:00:00.133975706 +648 9 0 days 00:00:00.117295166 +648 10 0 days 00:00:00.134614626 +648 11 0 days 00:00:00.153038453 +648 12 0 days 00:00:00.098624686 +648 13 0 days 00:00:00.149145633 +648 14 0 days 00:00:00.138056310 +648 15 0 days 00:00:00.109243376 +648 16 0 days 00:00:00.128308175 +648 17 0 days 00:00:00.155107666 +648 18 0 days 00:00:00.116665060 +648 19 0 days 00:00:00.114955193 +648 20 0 days 00:00:00.117682886 +648 21 0 days 00:00:00.095420466 +648 22 0 days 00:00:00.151323853 +648 23 0 days 00:00:00.133503728 +648 24 0 days 00:00:00.107943855 +648 25 0 days 00:00:00.128027606 +648 26 0 days 00:00:00.149093686 +648 27 0 days 00:00:00.169564448 +648 28 0 days 00:00:00.114686953 +648 30 0 days 00:00:00.131815056 +648 31 0 days 00:00:00.115612620 +648 34 0 days 00:00:00.130307122 +648 35 0 days 00:00:00.147506853 +648 36 0 days 00:00:00.096247606 +648 37 0 days 00:00:00.131638120 +648 38 0 days 00:00:00.227858548 +648 39 0 days 00:00:00.162983115 +648 40 0 days 00:00:00.108607990 +648 41 0 days 00:00:00.148389326 +648 42 0 days 00:00:00.112205146 +648 43 0 days 00:00:00.145803473 +648 44 0 days 00:00:00.144876333 +648 45 0 days 00:00:00.115113953 +648 47 0 days 00:00:00.114184860 +648 48 0 days 00:00:00.148688100 +648 51 0 days 00:00:00.171581110 +648 52 0 days 00:00:00.194500035 +648 54 0 days 00:00:00.147257320 +648 55 0 days 00:00:00.180194270 +648 56 0 days 00:00:00.145799746 +648 57 0 days 00:00:00.134262880 +648 59 0 days 00:00:00.150456226 +648 60 0 days 00:00:00.132664066 +648 61 0 days 00:00:00.116439680 +648 62 0 days 00:00:00.132338620 +648 63 0 days 00:00:00.109035940 +648 64 0 days 00:00:00.096743760 +648 65 0 days 00:00:00.144993960 +648 66 0 days 00:00:00.113387560 +648 67 0 days 00:00:00.146284653 +648 68 0 days 00:00:00.094714080 +648 69 0 days 00:00:00.184752385 +648 70 0 days 00:00:00.124800990 +648 71 0 days 00:00:00.163141215 +648 73 0 days 00:00:00.143368731 +648 74 0 days 00:00:00.098026733 +648 75 0 days 00:00:00.111000720 +648 76 0 days 00:00:00.109479480 +648 77 0 days 00:00:00.109667940 +648 78 0 days 00:00:00.120869296 +648 79 0 days 00:00:00.172985216 +648 80 0 days 00:00:00.113762206 +648 81 0 days 00:00:00.110044486 +648 82 0 days 00:00:00.095060186 +648 84 0 days 00:00:00.098898526 +648 85 0 days 00:00:00.111026380 +648 87 0 days 00:00:00.129786255 +648 88 0 days 00:00:00.100843346 +648 89 0 days 00:00:00.151619586 +648 90 0 days 00:00:00.131840040 +648 91 0 days 00:00:00.165591340 +648 92 0 days 00:00:00.125262980 +648 93 0 days 00:00:00.127639525 +648 94 0 days 00:00:00.140458680 +648 96 0 days 00:00:00.149946326 +648 97 0 days 00:00:00.113861906 +648 98 0 days 00:00:00.132101193 +648 99 0 days 00:00:00.131010244 +648 100 0 days 00:00:00.102063140 +649 1 0 days 00:00:00.057479860 +649 2 0 days 00:00:00.093231980 +649 3 0 days 00:00:00.064277253 +649 4 0 days 00:00:00.064111893 +649 5 0 days 00:00:00.088326600 +649 6 0 days 00:00:00.064287700 +649 7 0 days 00:00:00.064555646 +649 8 0 days 00:00:00.072934280 +649 9 0 days 00:00:00.090730850 +649 10 0 days 00:00:00.064479620 +649 11 0 days 00:00:00.054620546 +649 12 0 days 00:00:00.061410032 +649 13 0 days 00:00:00.064520906 +649 14 0 days 00:00:00.082384066 +649 15 0 days 00:00:00.060268850 +649 16 0 days 00:00:00.074404800 +649 17 0 days 00:00:00.098723356 +649 18 0 days 00:00:00.108471512 +649 19 0 days 00:00:00.054871006 +649 20 0 days 00:00:00.083146153 +649 21 0 days 00:00:00.055356193 +649 22 0 days 00:00:00.065482526 +649 23 0 days 00:00:00.094939856 +649 24 0 days 00:00:00.075086580 +649 25 0 days 00:00:00.061854573 +649 26 0 days 00:00:00.060939652 +649 27 0 days 00:00:00.083116746 +649 28 0 days 00:00:00.075430006 +649 29 0 days 00:00:00.069534890 +649 30 0 days 00:00:00.071443085 +649 31 0 days 00:00:00.085573380 +649 32 0 days 00:00:00.085352786 +649 33 0 days 00:00:00.070509690 +649 34 0 days 00:00:00.055773266 +649 35 0 days 00:00:00.080218420 +649 36 0 days 00:00:00.081949533 +649 37 0 days 00:00:00.073927193 +649 38 0 days 00:00:00.065408953 +649 39 0 days 00:00:00.054472573 +649 40 0 days 00:00:00.053439986 +649 41 0 days 00:00:00.093588380 +649 42 0 days 00:00:00.055017573 +649 43 0 days 00:00:00.078796120 +649 44 0 days 00:00:00.056493673 +649 45 0 days 00:00:00.062930506 +649 46 0 days 00:00:00.054488226 +649 47 0 days 00:00:00.063890133 +649 48 0 days 00:00:00.083122906 +649 49 0 days 00:00:00.094942820 +649 50 0 days 00:00:00.054187940 +649 51 0 days 00:00:00.060320266 +649 52 0 days 00:00:00.064510420 +649 53 0 days 00:00:00.086526693 +649 54 0 days 00:00:00.078408266 +649 55 0 days 00:00:00.054073973 +649 56 0 days 00:00:00.062673506 +649 57 0 days 00:00:00.056285460 +649 58 0 days 00:00:00.080865440 +649 59 0 days 00:00:00.062851226 +649 60 0 days 00:00:00.072603093 +649 61 0 days 00:00:00.081794880 +649 62 0 days 00:00:00.053652140 +649 63 0 days 00:00:00.054418353 +649 64 0 days 00:00:00.089261505 +649 65 0 days 00:00:00.071531812 +649 66 0 days 00:00:00.081044653 +649 67 0 days 00:00:00.084455973 +649 68 0 days 00:00:00.083947873 +649 69 0 days 00:00:00.083568700 +649 70 0 days 00:00:00.066027150 +649 71 0 days 00:00:00.072669928 +649 72 0 days 00:00:00.100536246 +649 73 0 days 00:00:00.094492093 +649 74 0 days 00:00:00.056114493 +649 75 0 days 00:00:00.065260600 +649 76 0 days 00:00:00.056373153 +649 77 0 days 00:00:00.066677126 +649 78 0 days 00:00:00.086559633 +649 79 0 days 00:00:00.065107220 +649 80 0 days 00:00:00.065107000 +649 81 0 days 00:00:00.089691513 +649 82 0 days 00:00:00.055386033 +649 83 0 days 00:00:00.090206525 +649 84 0 days 00:00:00.081269993 +649 85 0 days 00:00:00.083590046 +649 86 0 days 00:00:00.063732613 +649 87 0 days 00:00:00.054741240 +649 88 0 days 00:00:00.097830435 +649 89 0 days 00:00:00.097213976 +649 90 0 days 00:00:00.063712493 +649 91 0 days 00:00:00.074977262 +649 92 0 days 00:00:00.079513046 +649 93 0 days 00:00:00.058741360 +649 94 0 days 00:00:00.075014626 +649 95 0 days 00:00:00.064188313 +649 97 0 days 00:00:00.057751613 +649 98 0 days 00:00:00.087412313 +649 99 0 days 00:00:00.056686913 +649 100 0 days 00:00:00.064072113 +650 1 0 days 00:00:00.068242408 +650 2 0 days 00:00:00.075302746 +650 3 0 days 00:00:00.075298713 +650 4 0 days 00:00:00.054974793 +650 5 0 days 00:00:00.084324740 +650 6 0 days 00:00:00.056448966 +650 7 0 days 00:00:00.066069120 +650 8 0 days 00:00:00.065060893 +650 10 0 days 00:00:00.066081873 +650 11 0 days 00:00:00.056029253 +650 12 0 days 00:00:00.059497580 +650 13 0 days 00:00:00.064811440 +650 14 0 days 00:00:00.065190526 +650 15 0 days 00:00:00.064554020 +650 16 0 days 00:00:00.099499513 +650 17 0 days 00:00:00.098887665 +650 18 0 days 00:00:00.117629945 +650 19 0 days 00:00:00.073678093 +650 20 0 days 00:00:00.079657264 +650 21 0 days 00:00:00.078837526 +650 23 0 days 00:00:00.055385873 +650 25 0 days 00:00:00.092018105 +650 26 0 days 00:00:00.092049605 +650 27 0 days 00:00:00.085054793 +650 28 0 days 00:00:00.068008624 +650 29 0 days 00:00:00.055676546 +650 30 0 days 00:00:00.096246100 +650 31 0 days 00:00:00.065694993 +650 32 0 days 00:00:00.057555486 +650 33 0 days 00:00:00.063137464 +650 34 0 days 00:00:00.057177600 +650 35 0 days 00:00:00.065953013 +650 36 0 days 00:00:00.084418620 +650 37 0 days 00:00:00.057210120 +650 38 0 days 00:00:00.086251726 +650 39 0 days 00:00:00.062904473 +650 40 0 days 00:00:00.065322693 +650 41 0 days 00:00:00.056071446 +650 42 0 days 00:00:00.063448863 +650 43 0 days 00:00:00.055446413 +650 44 0 days 00:00:00.057574000 +650 45 0 days 00:00:00.064286200 +650 46 0 days 00:00:00.054442320 +650 47 0 days 00:00:00.059104240 +650 48 0 days 00:00:00.056787246 +650 49 0 days 00:00:00.057866560 +650 50 0 days 00:00:00.055118006 +650 51 0 days 00:00:00.066528686 +650 52 0 days 00:00:00.060445605 +650 53 0 days 00:00:00.059490660 +650 54 0 days 00:00:00.086039586 +650 55 0 days 00:00:00.079571935 +650 56 0 days 00:00:00.063457556 +650 57 0 days 00:00:00.084081940 +650 58 0 days 00:00:00.064574266 +650 59 0 days 00:00:00.098665972 +650 60 0 days 00:00:00.057184986 +650 61 0 days 00:00:00.103420450 +650 62 0 days 00:00:00.059523613 +650 63 0 days 00:00:00.086540440 +650 65 0 days 00:00:00.095671850 +650 66 0 days 00:00:00.073455760 +650 67 0 days 00:00:00.109452202 +650 68 0 days 00:00:00.096479732 +650 69 0 days 00:00:00.086179433 +650 70 0 days 00:00:00.058857120 +650 71 0 days 00:00:00.088275773 +650 72 0 days 00:00:00.080813076 +650 73 0 days 00:00:00.065635606 +650 74 0 days 00:00:00.065598113 +650 75 0 days 00:00:00.056402453 +650 76 0 days 00:00:00.057598933 +650 77 0 days 00:00:00.072045885 +650 78 0 days 00:00:00.055839940 +650 79 0 days 00:00:00.097069673 +650 80 0 days 00:00:00.101693960 +650 81 0 days 00:00:00.056826153 +650 82 0 days 00:00:00.066025126 +650 83 0 days 00:00:00.074045192 +650 84 0 days 00:00:00.088638860 +650 85 0 days 00:00:00.075327633 +650 86 0 days 00:00:00.067926084 +650 87 0 days 00:00:00.108976560 +650 88 0 days 00:00:00.076327813 +650 89 0 days 00:00:00.071427435 +650 90 0 days 00:00:00.073148908 +650 91 0 days 00:00:00.074667424 +650 92 0 days 00:00:00.109484546 +650 93 0 days 00:00:00.086785746 +650 94 0 days 00:00:00.086878266 +650 95 0 days 00:00:00.057850700 +650 96 0 days 00:00:00.066847826 +650 97 0 days 00:00:00.057866893 +650 98 0 days 00:00:00.086206266 +650 99 0 days 00:00:00.076735393 +650 100 0 days 00:00:00.083130260 +651 1 0 days 00:00:00.149013112 +651 2 0 days 00:00:00.187834212 +651 3 0 days 00:00:00.133481317 +651 4 0 days 00:00:00.147266378 +651 5 0 days 00:00:00.158178034 +651 8 0 days 00:00:00.181056948 +651 9 0 days 00:00:00.182507090 +651 10 0 days 00:00:00.184034409 +651 11 0 days 00:00:00.126313565 +651 12 0 days 00:00:00.195745984 +651 15 0 days 00:00:00.148576066 +651 17 0 days 00:00:00.146371736 +651 20 0 days 00:00:00.144857463 +651 21 0 days 00:00:00.143653646 +651 22 0 days 00:00:00.191087592 +651 23 0 days 00:00:00.187235726 +651 24 0 days 00:00:00.122261220 +651 25 0 days 00:00:00.124278322 +651 27 0 days 00:00:00.184593915 +651 28 0 days 00:00:00.121461050 +651 29 0 days 00:00:00.197751736 +651 30 0 days 00:00:00.122110833 +651 31 0 days 00:00:00.146034137 +651 32 0 days 00:00:00.187203535 +651 33 0 days 00:00:00.146995073 +651 34 0 days 00:00:00.143862338 +651 35 0 days 00:00:00.144620070 +651 36 0 days 00:00:00.189712670 +651 37 0 days 00:00:00.122211964 +651 38 0 days 00:00:00.197241938 +651 39 0 days 00:00:00.122769898 +651 40 0 days 00:00:00.199984585 +651 41 0 days 00:00:00.216185382 +651 44 0 days 00:00:00.192608837 +651 46 0 days 00:00:00.180968467 +651 47 0 days 00:00:00.191146329 +651 48 0 days 00:00:00.140371554 +651 49 0 days 00:00:00.140646424 +651 50 0 days 00:00:00.144295394 +651 51 0 days 00:00:00.177106767 +651 52 0 days 00:00:00.187260953 +651 53 0 days 00:00:00.140539914 +651 55 0 days 00:00:00.146301831 +651 56 0 days 00:00:00.182064126 +651 57 0 days 00:00:00.153899420 +651 58 0 days 00:00:00.150164703 +651 59 0 days 00:00:00.179405090 +651 60 0 days 00:00:00.179131477 +651 62 0 days 00:00:00.198029642 +651 63 0 days 00:00:00.200985753 +651 64 0 days 00:00:00.153023964 +651 65 0 days 00:00:00.190232268 +651 67 0 days 00:00:00.142984436 +651 68 0 days 00:00:00.201128988 +651 71 0 days 00:00:00.121387674 +651 72 0 days 00:00:00.177665735 +651 73 0 days 00:00:00.156099908 +651 74 0 days 00:00:00.196467058 +651 77 0 days 00:00:00.146695991 +651 79 0 days 00:00:00.142806655 +651 82 0 days 00:00:00.182967752 +651 83 0 days 00:00:00.201150334 +651 84 0 days 00:00:00.142041945 +651 85 0 days 00:00:00.201821872 +651 86 0 days 00:00:00.177575267 +651 87 0 days 00:00:00.186759055 +651 90 0 days 00:00:00.145001028 +651 91 0 days 00:00:00.176856292 +651 92 0 days 00:00:00.124396967 +651 93 0 days 00:00:00.108715873 +651 94 0 days 00:00:00.122078744 +651 95 0 days 00:00:00.193844118 +651 97 0 days 00:00:00.161800152 +651 98 0 days 00:00:00.150457551 +651 100 0 days 00:00:00.191428898 +652 1 0 days 00:00:00.078439801 +652 2 0 days 00:00:00.082551651 +652 4 0 days 00:00:00.080262110 +652 5 0 days 00:00:00.106478364 +652 6 0 days 00:00:00.092085965 +652 11 0 days 00:00:00.110825776 +652 14 0 days 00:00:00.102684129 +652 17 0 days 00:00:00.101305415 +652 22 0 days 00:00:00.104681987 +652 23 0 days 00:00:00.104384193 +652 24 0 days 00:00:00.114442676 +652 25 0 days 00:00:00.084019305 +652 26 0 days 00:00:00.106014383 +652 29 0 days 00:00:00.056340466 +652 30 0 days 00:00:00.114316502 +652 31 0 days 00:00:00.112198985 +652 34 0 days 00:00:00.084231741 +652 35 0 days 00:00:00.110330358 +652 37 0 days 00:00:00.102083610 +652 38 0 days 00:00:00.116696596 +652 41 0 days 00:00:00.113684977 +652 43 0 days 00:00:00.082446387 +652 45 0 days 00:00:00.078811210 +652 46 0 days 00:00:00.117328464 +652 54 0 days 00:00:00.106887116 +652 56 0 days 00:00:00.054446013 +652 57 0 days 00:00:00.109752471 +652 59 0 days 00:00:00.114596376 +652 61 0 days 00:00:00.080530432 +652 65 0 days 00:00:00.101851730 +652 66 0 days 00:00:00.126499826 +652 67 0 days 00:00:00.120329956 +652 69 0 days 00:00:00.114397158 +652 71 0 days 00:00:00.102722955 +652 72 0 days 00:00:00.118175693 +652 74 0 days 00:00:00.103386784 +652 77 0 days 00:00:00.106994381 +652 80 0 days 00:00:00.115668616 +652 81 0 days 00:00:00.087725901 +652 82 0 days 00:00:00.120690396 +652 84 0 days 00:00:00.089196076 +652 85 0 days 00:00:00.083109497 +652 91 0 days 00:00:00.126671603 +652 93 0 days 00:00:00.105022078 +652 94 0 days 00:00:00.112470045 +652 96 0 days 00:00:00.128924446 +653 1 0 days 00:00:00.329909805 +653 2 0 days 00:00:00.510692953 +653 4 0 days 00:00:00.305845726 +653 5 0 days 00:00:00.937312306 +653 6 0 days 00:00:00.376175306 +653 7 0 days 00:00:00.788820106 +653 8 0 days 00:00:00.313169693 +653 9 0 days 00:00:00.289792626 +653 10 0 days 00:00:00.939811300 +653 11 0 days 00:00:00.307764366 +653 15 0 days 00:00:01.007204160 +653 16 0 days 00:00:00.318657300 +653 18 0 days 00:00:00.675743647 +653 19 0 days 00:00:00.276848380 +653 20 0 days 00:00:00.615326073 +653 21 0 days 00:00:00.975116760 +653 22 0 days 00:00:01.264182913 +653 24 0 days 00:00:00.315940753 +653 25 0 days 00:00:00.310359060 +653 26 0 days 00:00:00.497343760 +653 27 0 days 00:00:00.316841386 +653 28 0 days 00:00:00.514264613 +653 30 0 days 00:00:00.300169820 +653 32 0 days 00:00:00.580975053 +653 33 0 days 00:00:00.286252046 +653 34 0 days 00:00:00.693544882 +653 35 0 days 00:00:01.304746610 +653 36 0 days 00:00:01.341377396 +653 39 0 days 00:00:00.821170021 +653 41 0 days 00:00:00.483111066 +653 42 0 days 00:00:00.419283810 +653 43 0 days 00:00:00.504980140 +653 45 0 days 00:00:00.589805880 +653 46 0 days 00:00:00.956597433 +653 47 0 days 00:00:00.812799382 +653 48 0 days 00:00:00.285333606 +653 52 0 days 00:00:00.286193840 +653 53 0 days 00:00:00.802877033 +653 54 0 days 00:00:00.362769144 +653 55 0 days 00:00:00.611601040 +653 56 0 days 00:00:00.619933906 +653 57 0 days 00:00:00.586346593 +653 58 0 days 00:00:01.240309142 +653 60 0 days 00:00:01.033089660 +653 61 0 days 00:00:00.306497800 +653 62 0 days 00:00:00.539712200 +653 63 0 days 00:00:00.480558062 +653 64 0 days 00:00:00.395650383 +653 65 0 days 00:00:00.498100913 +653 66 0 days 00:00:00.945843580 +653 67 0 days 00:00:00.378985500 +653 68 0 days 00:00:00.590186053 +653 72 0 days 00:00:00.322129166 +653 73 0 days 00:00:00.523589306 +653 74 0 days 00:00:00.499267700 +653 75 0 days 00:00:01.276487176 +653 76 0 days 00:00:00.293088780 +653 77 0 days 00:00:00.368438450 +653 79 0 days 00:00:00.516213026 +653 80 0 days 00:00:00.510747706 +653 81 0 days 00:00:00.913866726 +653 82 0 days 00:00:00.915681693 +653 83 0 days 00:00:00.360953853 +653 85 0 days 00:00:00.551384575 +653 87 0 days 00:00:00.543904453 +653 88 0 days 00:00:00.943296673 +653 90 0 days 00:00:00.441379944 +653 91 0 days 00:00:00.394111752 +653 92 0 days 00:00:00.362412160 +653 94 0 days 00:00:00.524703433 +653 96 0 days 00:00:00.294718700 +653 97 0 days 00:00:00.305351013 +653 98 0 days 00:00:00.306072800 +653 99 0 days 00:00:00.819238765 +653 100 0 days 00:00:00.386490208 +654 1 0 days 00:00:00.334501406 +654 2 0 days 00:00:00.987983953 +654 3 0 days 00:00:00.549646520 +654 4 0 days 00:00:00.623110905 +654 5 0 days 00:00:00.346818773 +654 8 0 days 00:00:00.360323645 +654 9 0 days 00:00:00.547403000 +654 11 0 days 00:00:01.032880760 +654 12 0 days 00:00:01.003776686 +654 13 0 days 00:00:00.348045345 +654 14 0 days 00:00:00.999588640 +654 15 0 days 00:00:01.280519371 +654 17 0 days 00:00:00.690398332 +654 18 0 days 00:00:00.479293946 +654 19 0 days 00:00:00.402877883 +654 20 0 days 00:00:01.071527733 +654 21 0 days 00:00:00.671558842 +654 22 0 days 00:00:00.947328793 +654 23 0 days 00:00:00.571357413 +654 24 0 days 00:00:00.403267193 +654 26 0 days 00:00:00.820131996 +654 27 0 days 00:00:00.923515986 +654 30 0 days 00:00:00.401071846 +654 31 0 days 00:00:00.211080740 +654 32 0 days 00:00:00.822053825 +654 33 0 days 00:00:00.729329280 +654 35 0 days 00:00:00.395939693 +654 36 0 days 00:00:00.273316186 +654 38 0 days 00:00:00.715164173 +654 40 0 days 00:00:00.271831530 +654 41 0 days 00:00:00.234407986 +654 42 0 days 00:00:00.995567173 +654 43 0 days 00:00:00.246219326 +654 45 0 days 00:00:00.703559573 +654 46 0 days 00:00:00.243899573 +654 47 0 days 00:00:00.858317624 +654 48 0 days 00:00:00.759663773 +654 49 0 days 00:00:00.462607746 +654 51 0 days 00:00:00.767696468 +654 52 0 days 00:00:00.334976766 +654 53 0 days 00:00:00.237726440 +654 54 0 days 00:00:00.247674080 +654 57 0 days 00:00:00.435857660 +654 60 0 days 00:00:00.241095526 +654 61 0 days 00:00:00.307865560 +654 64 0 days 00:00:00.402306166 +654 66 0 days 00:00:00.247283653 +654 69 0 days 00:00:00.241785373 +654 70 0 days 00:00:00.271629973 +654 71 0 days 00:00:00.426187526 +654 72 0 days 00:00:00.458715080 +654 73 0 days 00:00:00.901687715 +654 74 0 days 00:00:00.507719964 +654 77 0 days 00:00:00.230649593 +654 79 0 days 00:00:00.460216373 +654 80 0 days 00:00:00.304881080 +654 81 0 days 00:00:00.509700636 +654 82 0 days 00:00:00.458023273 +654 83 0 days 00:00:00.237622520 +654 84 0 days 00:00:00.232674906 +654 85 0 days 00:00:00.403744506 +654 86 0 days 00:00:00.458802473 +654 87 0 days 00:00:00.753566440 +654 88 0 days 00:00:00.399987940 +654 89 0 days 00:00:00.731352540 +654 90 0 days 00:00:00.249312540 +654 91 0 days 00:00:00.284631406 +654 93 0 days 00:00:00.781080675 +654 94 0 days 00:00:00.747768040 +654 95 0 days 00:00:00.239229560 +654 96 0 days 00:00:00.243615906 +654 98 0 days 00:00:00.451329005 +654 99 0 days 00:00:00.556581953 +654 100 0 days 00:00:00.533930348 +655 1 0 days 00:00:00.205977320 +655 3 0 days 00:00:00.433487956 +655 4 0 days 00:00:00.208904673 +655 5 0 days 00:00:00.136875600 +655 6 0 days 00:00:00.430911864 +655 7 0 days 00:00:00.138244340 +655 8 0 days 00:00:00.207384886 +655 9 0 days 00:00:00.237501004 +655 10 0 days 00:00:00.200207773 +655 11 0 days 00:00:00.356555640 +655 12 0 days 00:00:00.135451833 +655 13 0 days 00:00:00.360958713 +655 14 0 days 00:00:00.151587706 +655 15 0 days 00:00:00.205540713 +655 18 0 days 00:00:00.202701920 +655 20 0 days 00:00:00.330714066 +655 21 0 days 00:00:00.191281206 +655 22 0 days 00:00:00.207961006 +655 23 0 days 00:00:00.359457806 +655 24 0 days 00:00:00.142002726 +655 26 0 days 00:00:00.130211186 +655 27 0 days 00:00:00.350712880 +655 29 0 days 00:00:00.110558553 +655 30 0 days 00:00:00.135047360 +655 31 0 days 00:00:00.201930513 +655 32 0 days 00:00:00.365755873 +655 33 0 days 00:00:00.206213793 +655 34 0 days 00:00:00.196533293 +655 35 0 days 00:00:00.125875393 +655 37 0 days 00:00:00.198333653 +655 39 0 days 00:00:00.381443760 +655 40 0 days 00:00:00.501646223 +655 41 0 days 00:00:00.134883880 +655 42 0 days 00:00:00.215522053 +655 43 0 days 00:00:00.127804060 +655 44 0 days 00:00:00.129254126 +655 45 0 days 00:00:00.128318846 +655 46 0 days 00:00:00.249524350 +655 47 0 days 00:00:00.372146053 +655 48 0 days 00:00:00.130312680 +655 49 0 days 00:00:00.227702773 +655 51 0 days 00:00:00.124192673 +655 54 0 days 00:00:00.130644626 +655 55 0 days 00:00:00.439996585 +655 56 0 days 00:00:00.132698120 +655 57 0 days 00:00:00.378439906 +655 59 0 days 00:00:00.374025546 +655 60 0 days 00:00:00.373162080 +655 61 0 days 00:00:00.132945413 +655 62 0 days 00:00:00.244364626 +655 63 0 days 00:00:00.149085610 +655 64 0 days 00:00:00.243185233 +655 66 0 days 00:00:00.140766345 +655 67 0 days 00:00:00.140735613 +655 68 0 days 00:00:00.368400713 +655 70 0 days 00:00:00.214824660 +655 71 0 days 00:00:00.131794686 +655 72 0 days 00:00:00.382872266 +655 73 0 days 00:00:00.384423946 +655 74 0 days 00:00:00.272338091 +655 75 0 days 00:00:00.388129873 +655 76 0 days 00:00:00.142081226 +655 77 0 days 00:00:00.251549060 +655 78 0 days 00:00:00.214098913 +655 79 0 days 00:00:00.258654313 +655 83 0 days 00:00:00.152020440 +655 85 0 days 00:00:00.141964333 +655 86 0 days 00:00:00.287151160 +655 87 0 days 00:00:00.221279193 +655 88 0 days 00:00:00.221649280 +655 89 0 days 00:00:00.275529204 +655 90 0 days 00:00:00.224598593 +655 91 0 days 00:00:00.223904013 +655 93 0 days 00:00:00.138749793 +655 94 0 days 00:00:00.141999246 +655 95 0 days 00:00:00.388635520 +655 96 0 days 00:00:00.132088093 +655 97 0 days 00:00:00.374012900 +655 99 0 days 00:00:00.143118253 +655 100 0 days 00:00:00.215881086 +656 1 0 days 00:00:00.169469780 +656 2 0 days 00:00:00.225974273 +656 3 0 days 00:00:00.169244886 +656 4 0 days 00:00:00.410352600 +656 6 0 days 00:00:00.452725655 +656 7 0 days 00:00:00.133695653 +656 8 0 days 00:00:00.390553109 +656 9 0 days 00:00:00.387676093 +656 13 0 days 00:00:00.224961366 +656 14 0 days 00:00:00.402607913 +656 15 0 days 00:00:00.142284560 +656 16 0 days 00:00:00.392947420 +656 18 0 days 00:00:00.255929395 +656 19 0 days 00:00:00.401912440 +656 22 0 days 00:00:00.242378553 +656 23 0 days 00:00:00.427803480 +656 25 0 days 00:00:00.140932646 +656 26 0 days 00:00:00.130689660 +656 27 0 days 00:00:00.130700713 +656 28 0 days 00:00:00.132772186 +656 30 0 days 00:00:00.403454021 +656 35 0 days 00:00:00.190999464 +656 37 0 days 00:00:00.217054020 +656 38 0 days 00:00:00.254238866 +656 39 0 days 00:00:00.142319666 +656 40 0 days 00:00:00.225077033 +656 41 0 days 00:00:00.392306260 +656 43 0 days 00:00:00.144744560 +656 44 0 days 00:00:00.223069726 +656 45 0 days 00:00:00.399296333 +656 46 0 days 00:00:00.139167180 +656 47 0 days 00:00:00.402961973 +656 49 0 days 00:00:00.415921526 +656 50 0 days 00:00:00.168113860 +656 51 0 days 00:00:00.137560686 +656 52 0 days 00:00:00.422143760 +656 53 0 days 00:00:00.139367240 +656 54 0 days 00:00:00.153413580 +656 55 0 days 00:00:00.416792266 +656 56 0 days 00:00:00.143199595 +656 58 0 days 00:00:00.128605526 +656 59 0 days 00:00:00.135744360 +656 60 0 days 00:00:00.386466700 +656 61 0 days 00:00:00.129693080 +656 62 0 days 00:00:00.226659733 +656 64 0 days 00:00:00.224548193 +656 66 0 days 00:00:00.399634160 +656 68 0 days 00:00:00.398573146 +656 69 0 days 00:00:00.134091460 +656 71 0 days 00:00:00.229252900 +656 72 0 days 00:00:00.292183408 +656 73 0 days 00:00:00.538556838 +656 76 0 days 00:00:00.146376720 +656 79 0 days 00:00:00.145975613 +656 83 0 days 00:00:00.477360988 +656 84 0 days 00:00:00.160278660 +656 85 0 days 00:00:00.141580446 +656 87 0 days 00:00:00.224393520 +656 88 0 days 00:00:00.133079706 +656 92 0 days 00:00:00.131591140 +656 93 0 days 00:00:00.171337965 +656 95 0 days 00:00:00.418466240 +656 96 0 days 00:00:00.169221233 +656 97 0 days 00:00:00.129070886 +656 98 0 days 00:00:00.175230226 +656 99 0 days 00:00:00.177788553 +656 100 0 days 00:00:00.404028266 +657 1 0 days 00:00:01.289507960 +657 2 0 days 00:00:00.693839280 +657 3 0 days 00:00:01.301892394 +657 5 0 days 00:00:00.441647546 +657 6 0 days 00:00:00.407949617 +657 7 0 days 00:00:00.704031380 +657 8 0 days 00:00:00.800180505 +657 9 0 days 00:00:00.648422943 +657 10 0 days 00:00:01.130925720 +657 11 0 days 00:00:00.473866746 +657 12 0 days 00:00:00.688076120 +657 13 0 days 00:00:00.447099545 +657 14 0 days 00:00:01.186799433 +657 15 0 days 00:00:00.493209495 +657 16 0 days 00:00:00.739157950 +657 17 0 days 00:00:00.816080455 +657 18 0 days 00:00:00.742405520 +657 19 0 days 00:00:00.420046623 +657 20 0 days 00:00:01.177322166 +657 21 0 days 00:00:00.710567703 +657 22 0 days 00:00:00.692240990 +657 23 0 days 00:00:00.718013170 +657 24 0 days 00:00:00.447960964 +657 25 0 days 00:00:00.421786020 +657 26 0 days 00:00:00.616017782 +657 27 0 days 00:00:01.146428624 +657 28 0 days 00:00:00.772325773 +657 29 0 days 00:00:00.667027285 +657 30 0 days 00:00:00.739445394 +657 31 0 days 00:00:00.385213258 +657 32 0 days 00:00:01.291624126 +657 33 0 days 00:00:01.150248368 +657 34 0 days 00:00:01.161946360 +657 35 0 days 00:00:00.391195792 +657 36 0 days 00:00:01.191818828 +657 37 0 days 00:00:00.806356848 +657 38 0 days 00:00:00.683675975 +657 39 0 days 00:00:01.223233360 +657 41 0 days 00:00:00.719780831 +657 42 0 days 00:00:01.193604856 +657 43 0 days 00:00:01.222148446 +657 44 0 days 00:00:00.417917115 +657 45 0 days 00:00:00.506084107 +657 46 0 days 00:00:00.665198272 +657 47 0 days 00:00:00.415564844 +657 48 0 days 00:00:00.644117900 +657 49 0 days 00:00:01.199270296 +657 50 0 days 00:00:00.750131787 +657 52 0 days 00:00:00.646630412 +657 53 0 days 00:00:00.377374814 +657 54 0 days 00:00:00.488390432 +657 55 0 days 00:00:01.139958680 +657 56 0 days 00:00:01.068189200 +657 57 0 days 00:00:01.158049250 +657 58 0 days 00:00:01.156630592 +657 59 0 days 00:00:00.715346188 +657 60 0 days 00:00:00.631592362 +657 61 0 days 00:00:01.130139523 +657 62 0 days 00:00:00.647436220 +657 63 0 days 00:00:01.274660731 +657 64 0 days 00:00:01.190301343 +657 65 0 days 00:00:00.627591966 +657 66 0 days 00:00:00.764033712 +657 67 0 days 00:00:00.446830622 +657 68 0 days 00:00:01.196053244 +657 69 0 days 00:00:00.650190494 +657 70 0 days 00:00:01.155994772 +657 71 0 days 00:00:00.723526590 +657 72 0 days 00:00:00.395615103 +657 73 0 days 00:00:00.673760431 +657 74 0 days 00:00:01.304913200 +657 75 0 days 00:00:01.152032004 +657 76 0 days 00:00:00.397257234 +657 77 0 days 00:00:00.825918256 +657 78 0 days 00:00:00.445197257 +657 79 0 days 00:00:00.461873898 +657 80 0 days 00:00:01.112048308 +657 81 0 days 00:00:00.595255202 +657 82 0 days 00:00:00.426100518 +657 83 0 days 00:00:00.802406780 +657 84 0 days 00:00:01.112318348 +657 85 0 days 00:00:00.437344421 +657 86 0 days 00:00:00.619839880 +657 87 0 days 00:00:00.424401687 +657 88 0 days 00:00:00.833248183 +657 89 0 days 00:00:01.220037814 +657 90 0 days 00:00:00.706237194 +657 91 0 days 00:00:00.721193630 +657 92 0 days 00:00:00.467239717 +657 93 0 days 00:00:01.063047752 +657 94 0 days 00:00:01.119043724 +657 95 0 days 00:00:00.750374143 +657 97 0 days 00:00:01.221163982 +657 98 0 days 00:00:00.703885932 +657 99 0 days 00:00:01.275754824 +657 100 0 days 00:00:00.434457901 +658 1 0 days 00:00:00.633108248 +658 2 0 days 00:00:00.627923560 +658 3 0 days 00:00:00.635490974 +658 4 0 days 00:00:00.393595266 +658 5 0 days 00:00:00.341231480 +658 6 0 days 00:00:00.599587437 +658 7 0 days 00:00:00.401279646 +658 8 0 days 00:00:00.388321911 +658 9 0 days 00:00:00.398458873 +658 10 0 days 00:00:00.195414001 +658 11 0 days 00:00:00.570819466 +658 12 0 days 00:00:00.366499190 +658 13 0 days 00:00:00.601684083 +658 14 0 days 00:00:00.338146429 +658 15 0 days 00:00:00.236714258 +658 16 0 days 00:00:00.607095486 +658 17 0 days 00:00:00.217468976 +658 19 0 days 00:00:00.205662761 +658 20 0 days 00:00:00.371651811 +658 21 0 days 00:00:00.352470107 +658 22 0 days 00:00:00.608800573 +658 23 0 days 00:00:00.211713321 +658 24 0 days 00:00:00.628871236 +658 25 0 days 00:00:00.633380048 +658 26 0 days 00:00:00.361953306 +658 27 0 days 00:00:00.227008455 +658 28 0 days 00:00:00.211688357 +658 29 0 days 00:00:00.399199081 +658 30 0 days 00:00:00.601206203 +658 31 0 days 00:00:00.360876193 +658 32 0 days 00:00:00.227559115 +658 33 0 days 00:00:00.340159944 +658 34 0 days 00:00:00.196799404 +658 35 0 days 00:00:00.413678118 +658 36 0 days 00:00:00.616836790 +658 38 0 days 00:00:00.386639286 +658 39 0 days 00:00:00.388495996 +658 40 0 days 00:00:00.614283080 +658 41 0 days 00:00:00.608209585 +658 42 0 days 00:00:00.285506646 +658 43 0 days 00:00:00.632038354 +658 44 0 days 00:00:00.269866603 +658 45 0 days 00:00:00.657160855 +658 46 0 days 00:00:00.221529812 +658 47 0 days 00:00:00.659295290 +658 48 0 days 00:00:00.369006491 +658 49 0 days 00:00:00.226419427 +658 51 0 days 00:00:00.218106716 +658 52 0 days 00:00:00.224594370 +658 53 0 days 00:00:00.331626850 +658 54 0 days 00:00:00.222235766 +658 55 0 days 00:00:00.638999850 +658 56 0 days 00:00:00.370042226 +658 57 0 days 00:00:00.687603720 +658 58 0 days 00:00:00.648681580 +658 59 0 days 00:00:00.384635204 +658 60 0 days 00:00:00.555213620 +658 61 0 days 00:00:00.697472357 +658 62 0 days 00:00:00.597526523 +658 63 0 days 00:00:00.588190433 +658 64 0 days 00:00:00.626030697 +658 65 0 days 00:00:00.367235804 +658 66 0 days 00:00:00.225567968 +658 67 0 days 00:00:00.405279840 +658 68 0 days 00:00:00.248479315 +658 70 0 days 00:00:00.380108797 +658 71 0 days 00:00:00.225904245 +658 72 0 days 00:00:00.618423626 +658 73 0 days 00:00:00.643100610 +658 74 0 days 00:00:00.658168774 +658 75 0 days 00:00:00.396576656 +658 76 0 days 00:00:00.357623558 +658 77 0 days 00:00:00.336065276 +658 79 0 days 00:00:00.352962287 +658 80 0 days 00:00:00.209098288 +658 81 0 days 00:00:00.232656755 +658 82 0 days 00:00:00.398262741 +658 83 0 days 00:00:00.406628804 +658 84 0 days 00:00:00.225883292 +658 85 0 days 00:00:00.375454935 +658 86 0 days 00:00:00.395163913 +658 87 0 days 00:00:00.412590726 +658 88 0 days 00:00:00.199361005 +658 89 0 days 00:00:00.635260280 +658 90 0 days 00:00:00.570301972 +658 91 0 days 00:00:00.387021456 +658 92 0 days 00:00:00.215891145 +658 93 0 days 00:00:00.210095798 +658 94 0 days 00:00:00.317539902 +658 95 0 days 00:00:00.243117668 +658 96 0 days 00:00:00.571434185 +658 97 0 days 00:00:00.373310797 +658 98 0 days 00:00:00.663644622 +658 99 0 days 00:00:00.356194683 +658 100 0 days 00:00:00.281796902 +659 1 0 days 00:00:00.674964603 +659 3 0 days 00:00:00.681300220 +659 4 0 days 00:00:00.725292097 +659 6 0 days 00:00:00.682723002 +659 7 0 days 00:00:00.709536887 +659 8 0 days 00:00:00.697532434 +659 9 0 days 00:00:01.236501788 +659 10 0 days 00:00:01.278843216 +659 12 0 days 00:00:00.682568353 +659 13 0 days 00:00:01.207867366 +659 14 0 days 00:00:01.213664965 +659 19 0 days 00:00:00.746917256 +659 20 0 days 00:00:01.282588516 +659 22 0 days 00:00:00.699953960 +659 23 0 days 00:00:01.309244320 +659 24 0 days 00:00:01.286793123 +659 26 0 days 00:00:00.732193675 +659 27 0 days 00:00:00.678115792 +659 28 0 days 00:00:00.819831193 +659 30 0 days 00:00:00.728441437 +659 35 0 days 00:00:01.230100780 +659 36 0 days 00:00:01.258155291 +659 37 0 days 00:00:01.118141316 +659 38 0 days 00:00:01.144713784 +659 39 0 days 00:00:00.652755645 +659 41 0 days 00:00:00.722878534 +659 42 0 days 00:00:00.626832811 +659 43 0 days 00:00:00.642517915 +659 44 0 days 00:00:01.229892544 +659 45 0 days 00:00:01.182355886 +659 46 0 days 00:00:00.657717841 +659 47 0 days 00:00:01.187327461 +659 48 0 days 00:00:01.163695738 +659 49 0 days 00:00:01.127164914 +659 50 0 days 00:00:01.149876895 +659 52 0 days 00:00:00.407679957 +659 57 0 days 00:00:00.672540283 +659 61 0 days 00:00:01.411145787 +659 62 0 days 00:00:00.666278718 +659 64 0 days 00:00:00.673897592 +659 67 0 days 00:00:01.210833576 +659 69 0 days 00:00:00.805508088 +659 70 0 days 00:00:00.678803268 +659 71 0 days 00:00:01.168864126 +659 72 0 days 00:00:01.193027005 +659 73 0 days 00:00:01.272590177 +659 74 0 days 00:00:01.055597393 +659 75 0 days 00:00:00.550303601 +659 77 0 days 00:00:00.548115028 +659 79 0 days 00:00:00.990716236 +659 81 0 days 00:00:00.950019345 +659 82 0 days 00:00:00.327607159 +659 83 0 days 00:00:00.994502094 +659 87 0 days 00:00:00.331311331 +659 90 0 days 00:00:00.543963014 +659 94 0 days 00:00:00.957360837 +659 96 0 days 00:00:00.965045548 +659 97 0 days 00:00:00.326750316 +659 99 0 days 00:00:00.546662775 +660 2 0 days 00:00:00.499715864 +660 5 0 days 00:00:00.122529566 +660 9 0 days 00:00:00.504680818 +660 12 0 days 00:00:00.134850066 +660 13 0 days 00:00:00.127182553 +660 18 0 days 00:00:00.279344278 +660 20 0 days 00:00:00.510545298 +660 23 0 days 00:00:00.285324545 +660 24 0 days 00:00:00.502563861 +660 25 0 days 00:00:00.497336796 +660 27 0 days 00:00:00.512887853 +660 28 0 days 00:00:00.513620758 +660 30 0 days 00:00:00.136327300 +660 34 0 days 00:00:00.126290766 +660 35 0 days 00:00:00.509714121 +660 44 0 days 00:00:00.281114087 +660 46 0 days 00:00:00.283996636 +660 47 0 days 00:00:00.504873120 +660 48 0 days 00:00:00.508611852 +660 53 0 days 00:00:00.129447633 +660 54 0 days 00:00:00.498425143 +660 55 0 days 00:00:00.500965583 +660 56 0 days 00:00:00.531106604 +660 58 0 days 00:00:00.141559766 +660 59 0 days 00:00:00.514537323 +660 62 0 days 00:00:00.516017656 +660 64 0 days 00:00:00.124263246 +660 65 0 days 00:00:00.128776040 +660 69 0 days 00:00:00.512019173 +660 71 0 days 00:00:00.125300386 +660 72 0 days 00:00:00.517502440 +660 73 0 days 00:00:00.161364580 +660 74 0 days 00:00:00.532148976 +660 75 0 days 00:00:00.412973815 +660 77 0 days 00:00:00.228495903 +660 83 0 days 00:00:00.123253880 +660 85 0 days 00:00:00.510385325 +660 86 0 days 00:00:00.507168141 +660 87 0 days 00:00:00.271100244 +660 90 0 days 00:00:00.513102414 +660 93 0 days 00:00:00.509099424 +660 96 0 days 00:00:00.280195419 +660 99 0 days 00:00:00.483524064 +661 1 0 days 00:00:00.123865748 +661 2 0 days 00:00:00.114465604 +661 3 0 days 00:00:00.118023870 +661 4 0 days 00:00:00.122947681 +661 5 0 days 00:00:00.135852652 +661 6 0 days 00:00:00.099920735 +661 7 0 days 00:00:00.172506380 +661 8 0 days 00:00:00.153625405 +661 9 0 days 00:00:00.161092100 +661 10 0 days 00:00:00.137825475 +661 11 0 days 00:00:00.152336475 +661 12 0 days 00:00:00.100449130 +661 13 0 days 00:00:00.143488647 +661 14 0 days 00:00:00.138138650 +661 15 0 days 00:00:00.135458537 +661 16 0 days 00:00:00.118441720 +661 17 0 days 00:00:00.117853835 +661 18 0 days 00:00:00.137171242 +661 19 0 days 00:00:00.118796755 +661 20 0 days 00:00:00.162288020 +661 21 0 days 00:00:00.109643096 +661 22 0 days 00:00:00.133834705 +661 23 0 days 00:00:00.186287573 +661 24 0 days 00:00:00.099567980 +661 25 0 days 00:00:00.104007430 +661 26 0 days 00:00:00.153790640 +661 27 0 days 00:00:00.120123385 +661 28 0 days 00:00:00.168220353 +661 29 0 days 00:00:00.136040586 +661 30 0 days 00:00:00.152284475 +661 31 0 days 00:00:00.177799820 +661 32 0 days 00:00:00.174989705 +661 33 0 days 00:00:00.136731980 +661 34 0 days 00:00:00.151486685 +661 35 0 days 00:00:00.119879705 +661 36 0 days 00:00:00.106964408 +661 37 0 days 00:00:00.102199275 +661 38 0 days 00:00:00.118386958 +661 39 0 days 00:00:00.176018673 +661 40 0 days 00:00:00.128921443 +661 41 0 days 00:00:00.156438145 +661 42 0 days 00:00:00.111441800 +661 43 0 days 00:00:00.119268130 +661 44 0 days 00:00:00.157240380 +661 45 0 days 00:00:00.163998436 +661 46 0 days 00:00:00.104985240 +661 47 0 days 00:00:00.116007146 +661 48 0 days 00:00:00.155890915 +661 49 0 days 00:00:00.135900192 +661 50 0 days 00:00:00.162908604 +661 51 0 days 00:00:00.118304805 +661 52 0 days 00:00:00.118676120 +661 53 0 days 00:00:00.174618997 +661 54 0 days 00:00:00.134294328 +661 55 0 days 00:00:00.100974855 +661 56 0 days 00:00:00.131593056 +661 57 0 days 00:00:00.100718715 +661 58 0 days 00:00:00.114266820 +661 59 0 days 00:00:00.156340015 +661 60 0 days 00:00:00.157096750 +661 61 0 days 00:00:00.169740283 +661 62 0 days 00:00:00.124920630 +661 63 0 days 00:00:00.103873990 +661 64 0 days 00:00:00.130811520 +661 65 0 days 00:00:00.124174125 +661 66 0 days 00:00:00.118926905 +661 67 0 days 00:00:00.120803355 +661 68 0 days 00:00:00.166005584 +661 69 0 days 00:00:00.165550284 +661 70 0 days 00:00:00.123588100 +661 71 0 days 00:00:00.122402945 +661 72 0 days 00:00:00.102691665 +661 73 0 days 00:00:00.110510666 +661 74 0 days 00:00:00.107943780 +661 75 0 days 00:00:00.146802110 +661 76 0 days 00:00:00.184035884 +661 77 0 days 00:00:00.137839417 +661 78 0 days 00:00:00.120586355 +661 79 0 days 00:00:00.124145693 +661 80 0 days 00:00:00.134659533 +661 81 0 days 00:00:00.093557040 +661 82 0 days 00:00:00.129604872 +661 83 0 days 00:00:00.125865927 +661 84 0 days 00:00:00.115687565 +661 85 0 days 00:00:00.171722210 +661 86 0 days 00:00:00.115334080 +661 87 0 days 00:00:00.162366020 +661 88 0 days 00:00:00.136112390 +661 89 0 days 00:00:00.104004835 +661 90 0 days 00:00:00.185242588 +661 91 0 days 00:00:00.121685415 +661 92 0 days 00:00:00.103855255 +661 93 0 days 00:00:00.103653945 +661 94 0 days 00:00:00.147421967 +661 95 0 days 00:00:00.156351360 +661 96 0 days 00:00:00.120933368 +661 97 0 days 00:00:00.120731785 +661 98 0 days 00:00:00.178489205 +661 99 0 days 00:00:00.154575875 +661 100 0 days 00:00:00.154648975 +662 1 0 days 00:00:00.172691508 +662 2 0 days 00:00:00.181171520 +662 3 0 days 00:00:00.115651026 +662 4 0 days 00:00:00.128916612 +662 5 0 days 00:00:00.107964395 +662 6 0 days 00:00:00.163300920 +662 7 0 days 00:00:00.191743278 +662 8 0 days 00:00:00.115965182 +662 9 0 days 00:00:00.186216191 +662 10 0 days 00:00:00.103254086 +662 11 0 days 00:00:00.124835165 +662 12 0 days 00:00:00.192575671 +662 13 0 days 00:00:00.118839696 +662 14 0 days 00:00:00.158705010 +662 15 0 days 00:00:00.106208455 +662 16 0 days 00:00:00.192749263 +662 17 0 days 00:00:00.145551996 +662 18 0 days 00:00:00.133693908 +662 19 0 days 00:00:00.096583153 +662 20 0 days 00:00:00.134784453 +662 21 0 days 00:00:00.125454615 +662 22 0 days 00:00:00.107631410 +662 23 0 days 00:00:00.134831560 +662 24 0 days 00:00:00.124248930 +662 25 0 days 00:00:00.159667600 +662 26 0 days 00:00:00.158725865 +662 27 0 days 00:00:00.172825096 +662 28 0 days 00:00:00.124048308 +662 29 0 days 00:00:00.088326220 +662 30 0 days 00:00:00.169960803 +662 31 0 days 00:00:00.115080568 +662 32 0 days 00:00:00.099939230 +662 33 0 days 00:00:00.155647010 +662 34 0 days 00:00:00.167613768 +662 35 0 days 00:00:00.124076528 +662 36 0 days 00:00:00.106212573 +662 37 0 days 00:00:00.120084408 +662 38 0 days 00:00:00.166869272 +662 39 0 days 00:00:00.115372965 +662 40 0 days 00:00:00.162287400 +662 41 0 days 00:00:00.096282926 +662 42 0 days 00:00:00.108995108 +662 43 0 days 00:00:00.091085786 +662 44 0 days 00:00:00.155195860 +662 45 0 days 00:00:00.097433000 +662 46 0 days 00:00:00.116363584 +662 47 0 days 00:00:00.155418935 +662 48 0 days 00:00:00.118284195 +662 49 0 days 00:00:00.171852157 +662 50 0 days 00:00:00.155098290 +662 51 0 days 00:00:00.161552928 +662 52 0 days 00:00:00.120413300 +662 53 0 days 00:00:00.155352425 +662 54 0 days 00:00:00.175343717 +662 55 0 days 00:00:00.116350352 +662 56 0 days 00:00:00.124105645 +662 57 0 days 00:00:00.116602370 +662 58 0 days 00:00:00.098892355 +662 59 0 days 00:00:00.164584190 +662 60 0 days 00:00:00.117270895 +662 61 0 days 00:00:00.115116065 +662 62 0 days 00:00:00.149117255 +662 63 0 days 00:00:00.109072154 +662 64 0 days 00:00:00.114461825 +662 65 0 days 00:00:00.147676130 +662 66 0 days 00:00:00.135462502 +662 67 0 days 00:00:00.097880265 +662 68 0 days 00:00:00.116499895 +662 69 0 days 00:00:00.138316267 +662 70 0 days 00:00:00.152319870 +662 71 0 days 00:00:00.116912630 +662 72 0 days 00:00:00.156662896 +662 73 0 days 00:00:00.163559663 +662 74 0 days 00:00:00.134893802 +662 75 0 days 00:00:00.173403144 +662 76 0 days 00:00:00.147987365 +662 77 0 days 00:00:00.104706933 +662 78 0 days 00:00:00.147321170 +662 79 0 days 00:00:00.121905175 +662 80 0 days 00:00:00.097538490 +662 81 0 days 00:00:00.148031410 +662 82 0 days 00:00:00.132628190 +662 83 0 days 00:00:00.098568510 +662 84 0 days 00:00:00.145639385 +662 85 0 days 00:00:00.146301120 +662 86 0 days 00:00:00.152776580 +662 87 0 days 00:00:00.123841493 +662 88 0 days 00:00:00.144402385 +662 89 0 days 00:00:00.112596885 +662 90 0 days 00:00:00.114164245 +662 91 0 days 00:00:00.105406233 +662 92 0 days 00:00:00.153737248 +662 93 0 days 00:00:00.145882335 +662 94 0 days 00:00:00.096499920 +662 95 0 days 00:00:00.168243430 +662 96 0 days 00:00:00.102453313 +662 97 0 days 00:00:00.159950980 +662 98 0 days 00:00:00.133941831 +662 99 0 days 00:00:00.171731672 +662 100 0 days 00:00:00.096648810 +663 1 0 days 00:00:00.087603240 +663 2 0 days 00:00:00.080051380 +663 3 0 days 00:00:00.080755835 +663 4 0 days 00:00:00.088706491 +663 5 0 days 00:00:00.063555645 +663 6 0 days 00:00:00.049959980 +663 7 0 days 00:00:00.060063277 +663 8 0 days 00:00:00.061214260 +663 9 0 days 00:00:00.060279617 +663 10 0 days 00:00:00.058604296 +663 11 0 days 00:00:00.055574913 +663 12 0 days 00:00:00.049165560 +663 13 0 days 00:00:00.061939960 +663 14 0 days 00:00:00.061781557 +663 15 0 days 00:00:00.092141117 +663 16 0 days 00:00:00.061435945 +663 17 0 days 00:00:00.093592463 +663 18 0 days 00:00:00.078467790 +663 19 0 days 00:00:00.053244885 +663 20 0 days 00:00:00.055687613 +663 21 0 days 00:00:00.055304753 +663 23 0 days 00:00:00.058034686 +663 24 0 days 00:00:00.083281548 +663 25 0 days 00:00:00.073315720 +663 26 0 days 00:00:00.093576821 +663 27 0 days 00:00:00.069507445 +663 28 0 days 00:00:00.055792633 +663 29 0 days 00:00:00.057449223 +663 30 0 days 00:00:00.073207535 +663 31 0 days 00:00:00.068295702 +663 32 0 days 00:00:00.070728060 +663 33 0 days 00:00:00.080187775 +663 34 0 days 00:00:00.079920735 +663 35 0 days 00:00:00.070852966 +663 36 0 days 00:00:00.080118595 +663 37 0 days 00:00:00.091219945 +663 38 0 days 00:00:00.072003215 +663 39 0 days 00:00:00.062755875 +663 40 0 days 00:00:00.086200992 +663 41 0 days 00:00:00.062446015 +663 42 0 days 00:00:00.054819200 +663 43 0 days 00:00:00.095513126 +663 44 0 days 00:00:00.065541144 +663 45 0 days 00:00:00.062375500 +663 46 0 days 00:00:00.073595024 +663 47 0 days 00:00:00.056807566 +663 48 0 days 00:00:00.081805090 +663 49 0 days 00:00:00.066763528 +663 50 0 days 00:00:00.056171870 +663 51 0 days 00:00:00.090486405 +663 52 0 days 00:00:00.060636877 +663 53 0 days 00:00:00.062444365 +663 54 0 days 00:00:00.061662446 +663 55 0 days 00:00:00.065563347 +663 56 0 days 00:00:00.070564131 +663 57 0 days 00:00:00.092164775 +663 58 0 days 00:00:00.060742220 +663 60 0 days 00:00:00.063004345 +663 61 0 days 00:00:00.063017134 +663 62 0 days 00:00:00.057051000 +663 63 0 days 00:00:00.073264883 +663 64 0 days 00:00:00.067952280 +663 65 0 days 00:00:00.067580371 +663 66 0 days 00:00:00.065736172 +663 67 0 days 00:00:00.062910345 +663 68 0 days 00:00:00.057013866 +663 69 0 days 00:00:00.061804023 +663 70 0 days 00:00:00.063363434 +663 71 0 days 00:00:00.093402522 +663 72 0 days 00:00:00.091395257 +663 73 0 days 00:00:00.052851193 +663 74 0 days 00:00:00.071845963 +663 75 0 days 00:00:00.061790345 +663 76 0 days 00:00:00.061455208 +663 77 0 days 00:00:00.079543190 +663 78 0 days 00:00:00.072688420 +663 79 0 days 00:00:00.054810475 +663 80 0 days 00:00:00.058150430 +663 81 0 days 00:00:00.070575568 +663 82 0 days 00:00:00.079870485 +663 83 0 days 00:00:00.079841395 +663 84 0 days 00:00:00.092493974 +663 86 0 days 00:00:00.074354746 +663 87 0 days 00:00:00.062597275 +663 88 0 days 00:00:00.086523726 +663 89 0 days 00:00:00.079376855 +663 90 0 days 00:00:00.060232496 +663 91 0 days 00:00:00.071959443 +663 92 0 days 00:00:00.061639215 +663 93 0 days 00:00:00.084550200 +663 94 0 days 00:00:00.071844714 +663 95 0 days 00:00:00.061727770 +663 96 0 days 00:00:00.061833680 +663 97 0 days 00:00:00.071324646 +663 98 0 days 00:00:00.051688500 +663 99 0 days 00:00:00.067119462 +663 100 0 days 00:00:00.056186173 +664 1 0 days 00:00:00.057695892 +664 2 0 days 00:00:00.067900527 +664 3 0 days 00:00:00.075644540 +664 4 0 days 00:00:00.055020865 +664 5 0 days 00:00:00.073404604 +664 6 0 days 00:00:00.050034580 +664 7 0 days 00:00:00.060776108 +664 8 0 days 00:00:00.083043620 +664 9 0 days 00:00:00.100504821 +664 10 0 days 00:00:00.063134964 +664 11 0 days 00:00:00.062851195 +664 12 0 days 00:00:00.049709100 +664 13 0 days 00:00:00.086342880 +664 14 0 days 00:00:00.096249786 +664 15 0 days 00:00:00.068763446 +664 16 0 days 00:00:00.087592268 +664 17 0 days 00:00:00.092671134 +664 18 0 days 00:00:00.074474513 +664 19 0 days 00:00:00.049904133 +664 20 0 days 00:00:00.050056740 +664 21 0 days 00:00:00.075641584 +664 22 0 days 00:00:00.058834232 +664 25 0 days 00:00:00.084989405 +664 26 0 days 00:00:00.059314220 +664 27 0 days 00:00:00.100113351 +664 29 0 days 00:00:00.077054063 +664 30 0 days 00:00:00.100900426 +664 31 0 days 00:00:00.059993066 +664 32 0 days 00:00:00.068535936 +664 33 0 days 00:00:00.055440125 +664 34 0 days 00:00:00.085373380 +664 35 0 days 00:00:00.064087665 +664 36 0 days 00:00:00.064803310 +664 37 0 days 00:00:00.064805420 +664 38 0 days 00:00:00.075922060 +664 39 0 days 00:00:00.101929940 +664 40 0 days 00:00:00.064964602 +664 41 0 days 00:00:00.083542985 +664 42 0 days 00:00:00.077377300 +664 43 0 days 00:00:00.095672827 +664 44 0 days 00:00:00.098612754 +664 45 0 days 00:00:00.076754233 +664 46 0 days 00:00:00.083690060 +664 47 0 days 00:00:00.069947860 +664 48 0 days 00:00:00.084328965 +664 49 0 days 00:00:00.051384273 +664 50 0 days 00:00:00.055522055 +664 51 0 days 00:00:00.056245545 +664 52 0 days 00:00:00.067237353 +664 53 0 days 00:00:00.061461688 +664 54 0 days 00:00:00.060531304 +664 55 0 days 00:00:00.050447793 +664 56 0 days 00:00:00.075642993 +664 57 0 days 00:00:00.067044020 +664 58 0 days 00:00:00.054135233 +664 59 0 days 00:00:00.051498706 +664 60 0 days 00:00:00.103177816 +664 61 0 days 00:00:00.088427320 +664 62 0 days 00:00:00.084330985 +664 63 0 days 00:00:00.056118535 +664 64 0 days 00:00:00.076910645 +664 65 0 days 00:00:00.070535576 +664 66 0 days 00:00:00.063569537 +664 67 0 days 00:00:00.102147028 +664 68 0 days 00:00:00.088571404 +664 69 0 days 00:00:00.061863631 +664 70 0 days 00:00:00.097696050 +664 71 0 days 00:00:00.096691191 +664 72 0 days 00:00:00.075849440 +664 73 0 days 00:00:00.083848225 +664 74 0 days 00:00:00.065300700 +664 75 0 days 00:00:00.051012706 +664 76 0 days 00:00:00.062467580 +664 77 0 days 00:00:00.059387760 +664 78 0 days 00:00:00.076818317 +664 79 0 days 00:00:00.090824920 +664 80 0 days 00:00:00.064775970 +664 82 0 days 00:00:00.063730557 +664 83 0 days 00:00:00.087669844 +664 84 0 days 00:00:00.091698003 +664 85 0 days 00:00:00.051514113 +664 86 0 days 00:00:00.094692440 +664 87 0 days 00:00:00.068473968 +664 88 0 days 00:00:00.085023935 +664 89 0 days 00:00:00.072110337 +664 90 0 days 00:00:00.072231597 +664 91 0 days 00:00:00.059892460 +664 92 0 days 00:00:00.056607805 +664 93 0 days 00:00:00.056155480 +664 94 0 days 00:00:00.068231693 +664 95 0 days 00:00:00.099691362 +664 96 0 days 00:00:00.056165915 +664 97 0 days 00:00:00.064302560 +664 98 0 days 00:00:00.084010880 +664 99 0 days 00:00:00.056057710 +664 100 0 days 00:00:00.097947104 +665 1 0 days 00:00:00.143837437 +665 2 0 days 00:00:00.157769984 +665 3 0 days 00:00:00.117895123 +665 4 0 days 00:00:00.170640771 +665 5 0 days 00:00:00.117610932 +665 6 0 days 00:00:00.174587990 +665 7 0 days 00:00:00.139450252 +665 8 0 days 00:00:00.183639301 +665 10 0 days 00:00:00.140799820 +665 11 0 days 00:00:00.123153888 +665 12 0 days 00:00:00.103512724 +665 13 0 days 00:00:00.132455000 +665 14 0 days 00:00:00.117973868 +665 15 0 days 00:00:00.113830338 +665 16 0 days 00:00:00.109564443 +665 17 0 days 00:00:00.160975724 +665 18 0 days 00:00:00.165714610 +665 19 0 days 00:00:00.160333636 +665 21 0 days 00:00:00.178762526 +665 22 0 days 00:00:00.166720623 +665 23 0 days 00:00:00.135483750 +665 24 0 days 00:00:00.168974685 +665 25 0 days 00:00:00.117886754 +665 26 0 days 00:00:00.179072220 +665 27 0 days 00:00:00.166481130 +665 28 0 days 00:00:00.152257015 +665 29 0 days 00:00:00.167250692 +665 30 0 days 00:00:00.129809311 +665 31 0 days 00:00:00.170925002 +665 32 0 days 00:00:00.165634720 +665 33 0 days 00:00:00.187873501 +665 34 0 days 00:00:00.114920720 +665 35 0 days 00:00:00.109584220 +665 36 0 days 00:00:00.135342304 +665 37 0 days 00:00:00.113807130 +665 38 0 days 00:00:00.134869076 +665 40 0 days 00:00:00.182411248 +665 41 0 days 00:00:00.138278565 +665 42 0 days 00:00:00.139285444 +665 43 0 days 00:00:00.155497495 +665 44 0 days 00:00:00.145694230 +665 45 0 days 00:00:00.142375196 +665 46 0 days 00:00:00.123172495 +665 47 0 days 00:00:00.161918805 +665 48 0 days 00:00:00.186685064 +665 49 0 days 00:00:00.133777488 +665 50 0 days 00:00:00.091730793 +665 51 0 days 00:00:00.131549496 +665 52 0 days 00:00:00.123136442 +665 53 0 days 00:00:00.144344303 +665 54 0 days 00:00:00.156779155 +665 55 0 days 00:00:00.130795696 +665 56 0 days 00:00:00.118594952 +665 57 0 days 00:00:00.091136080 +665 58 0 days 00:00:00.107683696 +665 59 0 days 00:00:00.119101510 +665 61 0 days 00:00:00.105880788 +665 62 0 days 00:00:00.174275762 +665 63 0 days 00:00:00.171666516 +665 64 0 days 00:00:00.185241047 +665 65 0 days 00:00:00.172539188 +665 66 0 days 00:00:00.169981713 +665 67 0 days 00:00:00.134808525 +665 68 0 days 00:00:00.177664697 +665 69 0 days 00:00:00.136145397 +665 70 0 days 00:00:00.163438224 +665 71 0 days 00:00:00.112033948 +665 72 0 days 00:00:00.109086573 +665 73 0 days 00:00:00.142046444 +665 74 0 days 00:00:00.133877740 +665 75 0 days 00:00:00.183609787 +665 76 0 days 00:00:00.141692649 +665 77 0 days 00:00:00.131721842 +665 78 0 days 00:00:00.121605615 +665 80 0 days 00:00:00.139611561 +665 81 0 days 00:00:00.183649783 +665 82 0 days 00:00:00.122905030 +665 83 0 days 00:00:00.123452276 +665 84 0 days 00:00:00.187597011 +665 86 0 days 00:00:00.161221636 +665 87 0 days 00:00:00.097381750 +665 88 0 days 00:00:00.130405502 +665 89 0 days 00:00:00.103331684 +665 90 0 days 00:00:00.171100945 +665 91 0 days 00:00:00.163356856 +665 92 0 days 00:00:00.101968325 +665 93 0 days 00:00:00.120574061 +665 94 0 days 00:00:00.114197905 +665 95 0 days 00:00:00.135011380 +665 96 0 days 00:00:00.109378348 +665 97 0 days 00:00:00.169544774 +665 98 0 days 00:00:00.130719585 +665 99 0 days 00:00:00.175472014 +665 100 0 days 00:00:00.141533252 +666 2 0 days 00:00:00.073974867 +666 4 0 days 00:00:00.066150353 +666 5 0 days 00:00:00.086704212 +666 6 0 days 00:00:00.097548883 +666 7 0 days 00:00:00.087236532 +666 8 0 days 00:00:00.099999768 +666 9 0 days 00:00:00.068756754 +666 11 0 days 00:00:00.064037164 +666 12 0 days 00:00:00.060579831 +666 14 0 days 00:00:00.097227808 +666 16 0 days 00:00:00.061308190 +666 17 0 days 00:00:00.061350710 +666 19 0 days 00:00:00.100423167 +666 20 0 days 00:00:00.082162955 +666 21 0 days 00:00:00.083100950 +666 22 0 days 00:00:00.054810260 +666 23 0 days 00:00:00.062423018 +666 26 0 days 00:00:00.058982530 +666 27 0 days 00:00:00.066413724 +666 28 0 days 00:00:00.074248909 +666 29 0 days 00:00:00.061173542 +666 30 0 days 00:00:00.098943740 +666 31 0 days 00:00:00.072126346 +666 32 0 days 00:00:00.054884780 +666 33 0 days 00:00:00.091087180 +666 34 0 days 00:00:00.066162958 +666 36 0 days 00:00:00.061857948 +666 37 0 days 00:00:00.094938887 +666 38 0 days 00:00:00.072544084 +666 39 0 days 00:00:00.054207570 +666 40 0 days 00:00:00.059188436 +666 41 0 days 00:00:00.062648806 +666 42 0 days 00:00:00.064860336 +666 43 0 days 00:00:00.097726312 +666 44 0 days 00:00:00.088151364 +666 45 0 days 00:00:00.063976030 +666 46 0 days 00:00:00.060454477 +666 47 0 days 00:00:00.064505140 +666 48 0 days 00:00:00.092806720 +666 49 0 days 00:00:00.092137665 +666 50 0 days 00:00:00.094090607 +666 51 0 days 00:00:00.052979765 +666 52 0 days 00:00:00.074025357 +666 53 0 days 00:00:00.072726546 +666 54 0 days 00:00:00.095851767 +666 55 0 days 00:00:00.065074560 +666 56 0 days 00:00:00.053631345 +666 57 0 days 00:00:00.071569512 +666 58 0 days 00:00:00.057376513 +666 59 0 days 00:00:00.092403605 +666 60 0 days 00:00:00.075014564 +666 61 0 days 00:00:00.072944228 +666 62 0 days 00:00:00.083763635 +666 63 0 days 00:00:00.100801607 +666 64 0 days 00:00:00.097161683 +666 65 0 days 00:00:00.070409606 +666 66 0 days 00:00:00.086454200 +666 67 0 days 00:00:00.061446580 +666 68 0 days 00:00:00.053202525 +666 69 0 days 00:00:00.074298684 +666 70 0 days 00:00:00.098033238 +666 72 0 days 00:00:00.101053506 +666 73 0 days 00:00:00.064161125 +666 75 0 days 00:00:00.074299485 +666 76 0 days 00:00:00.061581294 +666 77 0 days 00:00:00.093760825 +666 80 0 days 00:00:00.094686037 +666 81 0 days 00:00:00.067221604 +666 82 0 days 00:00:00.063324270 +666 83 0 days 00:00:00.084934910 +666 85 0 days 00:00:00.100577081 +666 86 0 days 00:00:00.084053135 +666 88 0 days 00:00:00.074813650 +666 89 0 days 00:00:00.095365577 +666 90 0 days 00:00:00.062177540 +666 92 0 days 00:00:00.064865090 +666 93 0 days 00:00:00.074105365 +666 94 0 days 00:00:00.064910796 +666 95 0 days 00:00:00.100348426 +666 96 0 days 00:00:00.102187024 +666 97 0 days 00:00:00.072419795 +666 98 0 days 00:00:00.091707040 +666 99 0 days 00:00:00.071691515 +667 1 0 days 00:00:00.867509133 +667 2 0 days 00:00:00.995870230 +667 3 0 days 00:00:01.163775795 +667 4 0 days 00:00:01.440862196 +667 5 0 days 00:00:00.735770408 +667 6 0 days 00:00:00.401915125 +667 7 0 days 00:00:00.662607045 +667 8 0 days 00:00:00.684760856 +667 9 0 days 00:00:00.383512180 +667 10 0 days 00:00:01.302746728 +667 11 0 days 00:00:01.192870704 +667 12 0 days 00:00:00.786776234 +667 13 0 days 00:00:00.598138675 +667 14 0 days 00:00:01.393690720 +667 15 0 days 00:00:01.321689091 +667 16 0 days 00:00:01.545364612 +667 17 0 days 00:00:00.733783306 +667 18 0 days 00:00:00.505512205 +667 19 0 days 00:00:00.733492784 +667 20 0 days 00:00:00.425480788 +667 21 0 days 00:00:00.882051126 +667 22 0 days 00:00:00.652241747 +667 23 0 days 00:00:00.818446980 +667 24 0 days 00:00:00.741246270 +667 25 0 days 00:00:00.700364433 +667 26 0 days 00:00:01.125066545 +667 27 0 days 00:00:01.648293241 +667 28 0 days 00:00:00.715500440 +667 29 0 days 00:00:00.444947560 +667 30 0 days 00:00:00.584394642 +667 31 0 days 00:00:01.380882304 +667 32 0 days 00:00:01.320961650 +667 33 0 days 00:00:00.450544220 +667 34 0 days 00:00:01.089738672 +667 35 0 days 00:00:00.716897755 +667 36 0 days 00:00:00.433297600 +667 37 0 days 00:00:01.524776696 +667 38 0 days 00:00:01.088256970 +667 39 0 days 00:00:01.616529402 +667 40 0 days 00:00:00.452818080 +667 41 0 days 00:00:00.657772216 +667 43 0 days 00:00:00.579431375 +667 44 0 days 00:00:00.640232833 +667 46 0 days 00:00:01.234165290 +667 47 0 days 00:00:00.841751608 +667 48 0 days 00:00:00.872191857 +667 49 0 days 00:00:01.426903546 +667 50 0 days 00:00:01.540479455 +667 52 0 days 00:00:01.425379235 +667 53 0 days 00:00:00.809286697 +667 54 0 days 00:00:01.574154000 +667 55 0 days 00:00:01.487068670 +667 57 0 days 00:00:01.489251940 +667 58 0 days 00:00:00.564959028 +667 59 0 days 00:00:00.940290235 +667 60 0 days 00:00:00.596041316 +667 61 0 days 00:00:00.696074316 +667 62 0 days 00:00:01.552605502 +667 63 0 days 00:00:00.903895691 +667 65 0 days 00:00:00.884332933 +667 66 0 days 00:00:00.821497358 +667 67 0 days 00:00:00.756564204 +667 68 0 days 00:00:01.437388310 +667 69 0 days 00:00:00.391799210 +667 70 0 days 00:00:00.637839323 +667 71 0 days 00:00:01.241321831 +667 72 0 days 00:00:01.173755740 +667 73 0 days 00:00:01.094313544 +667 74 0 days 00:00:00.612380923 +667 75 0 days 00:00:00.799163246 +667 76 0 days 00:00:00.423194220 +667 77 0 days 00:00:00.690616246 +667 78 0 days 00:00:01.579885921 +667 79 0 days 00:00:01.187186212 +667 80 0 days 00:00:00.653740723 +667 81 0 days 00:00:00.580218395 +667 82 0 days 00:00:00.732516400 +667 83 0 days 00:00:00.592989786 +667 84 0 days 00:00:00.429002060 +667 85 0 days 00:00:01.021129025 +667 86 0 days 00:00:01.214414756 +667 87 0 days 00:00:00.564564616 +667 88 0 days 00:00:01.340445404 +667 89 0 days 00:00:01.580884268 +667 90 0 days 00:00:00.450189273 +667 91 0 days 00:00:00.409735666 +667 92 0 days 00:00:01.480958688 +667 93 0 days 00:00:01.334765552 +667 94 0 days 00:00:01.358841668 +667 95 0 days 00:00:00.645429205 +667 96 0 days 00:00:01.368885680 +667 97 0 days 00:00:00.370148268 +667 98 0 days 00:00:00.737632855 +667 99 0 days 00:00:00.797589600 +667 100 0 days 00:00:00.664155540 +668 1 0 days 00:00:00.695214520 +668 2 0 days 00:00:00.249609904 +668 3 0 days 00:00:00.559719968 +668 4 0 days 00:00:00.671492242 +668 5 0 days 00:00:00.777591726 +668 6 0 days 00:00:00.455859554 +668 8 0 days 00:00:00.759777212 +668 9 0 days 00:00:00.343743983 +668 10 0 days 00:00:00.219332261 +668 11 0 days 00:00:00.774606543 +668 12 0 days 00:00:00.233276300 +668 13 0 days 00:00:00.227201745 +668 14 0 days 00:00:00.451077592 +668 15 0 days 00:00:00.811251556 +668 16 0 days 00:00:00.240628032 +668 17 0 days 00:00:00.781027840 +668 18 0 days 00:00:00.816246240 +668 19 0 days 00:00:00.317137480 +668 20 0 days 00:00:00.713866140 +668 22 0 days 00:00:00.464604625 +668 23 0 days 00:00:00.315000730 +668 24 0 days 00:00:00.370565498 +668 25 0 days 00:00:00.723382746 +668 26 0 days 00:00:00.218402736 +668 27 0 days 00:00:00.199533110 +668 28 0 days 00:00:00.493888427 +668 29 0 days 00:00:00.214779871 +668 30 0 days 00:00:00.846218048 +668 31 0 days 00:00:00.252320660 +668 32 0 days 00:00:00.744153383 +668 33 0 days 00:00:00.230573126 +668 34 0 days 00:00:00.382336477 +668 35 0 days 00:00:00.753057112 +668 36 0 days 00:00:00.498964395 +668 37 0 days 00:00:00.229769443 +668 38 0 days 00:00:00.373423237 +668 39 0 days 00:00:00.607537095 +668 40 0 days 00:00:00.617547012 +668 41 0 days 00:00:00.732460076 +668 42 0 days 00:00:00.504448035 +668 43 0 days 00:00:00.838517909 +668 45 0 days 00:00:00.788879511 +668 46 0 days 00:00:00.300342096 +668 47 0 days 00:00:00.288409235 +668 48 0 days 00:00:00.493690912 +668 49 0 days 00:00:00.417927056 +668 50 0 days 00:00:00.772465145 +668 52 0 days 00:00:00.313100220 +668 53 0 days 00:00:00.240231490 +668 54 0 days 00:00:00.446352813 +668 55 0 days 00:00:00.681304734 +668 56 0 days 00:00:00.799311748 +668 57 0 days 00:00:00.215753546 +668 58 0 days 00:00:00.445918924 +668 59 0 days 00:00:00.629982583 +668 60 0 days 00:00:00.331670410 +668 61 0 days 00:00:00.202314826 +668 62 0 days 00:00:00.689577509 +668 63 0 days 00:00:00.349214820 +668 64 0 days 00:00:00.408631920 +668 65 0 days 00:00:00.666762293 +668 66 0 days 00:00:00.284932235 +668 67 0 days 00:00:00.434415054 +668 68 0 days 00:00:00.211719897 +668 69 0 days 00:00:00.742331936 +668 70 0 days 00:00:00.649739430 +668 72 0 days 00:00:00.378376278 +668 73 0 days 00:00:00.600038125 +668 74 0 days 00:00:00.203593372 +668 75 0 days 00:00:00.675012880 +668 76 0 days 00:00:00.225950932 +668 77 0 days 00:00:00.426033082 +668 78 0 days 00:00:00.222644568 +668 79 0 days 00:00:00.757391936 +668 80 0 days 00:00:00.397179309 +668 81 0 days 00:00:00.859316558 +668 83 0 days 00:00:00.326516240 +668 84 0 days 00:00:00.470294616 +668 85 0 days 00:00:00.217064842 +668 86 0 days 00:00:00.289055378 +668 87 0 days 00:00:00.724856620 +668 88 0 days 00:00:00.625307400 +668 89 0 days 00:00:00.306608294 +668 90 0 days 00:00:00.786784888 +668 91 0 days 00:00:00.699007260 +668 92 0 days 00:00:00.639728468 +668 93 0 days 00:00:00.453578344 +668 96 0 days 00:00:00.458275588 +668 97 0 days 00:00:00.587796028 +668 98 0 days 00:00:00.185683270 +668 99 0 days 00:00:00.591360810 +668 100 0 days 00:00:00.441345342 +669 1 0 days 00:00:00.848673028 +669 2 0 days 00:00:01.413927192 +669 3 0 days 00:00:00.376930295 +669 4 0 days 00:00:00.665650150 +669 5 0 days 00:00:00.794873217 +669 6 0 days 00:00:00.540002725 +669 7 0 days 00:00:01.356926772 +669 8 0 days 00:00:00.701060665 +669 9 0 days 00:00:00.410533250 +669 11 0 days 00:00:00.754638630 +669 13 0 days 00:00:01.207269366 +669 14 0 days 00:00:01.283635030 +669 15 0 days 00:00:00.728337356 +669 16 0 days 00:00:00.402373010 +669 17 0 days 00:00:01.159445390 +669 18 0 days 00:00:00.376390895 +669 19 0 days 00:00:00.425910960 +669 20 0 days 00:00:00.631473505 +669 21 0 days 00:00:00.625835060 +669 22 0 days 00:00:00.692258426 +669 23 0 days 00:00:01.547775213 +669 24 0 days 00:00:01.202836185 +669 25 0 days 00:00:00.689689945 +669 26 0 days 00:00:00.757245732 +669 27 0 days 00:00:01.231028140 +669 28 0 days 00:00:00.753000365 +669 29 0 days 00:00:00.643091723 +669 30 0 days 00:00:00.779248226 +669 31 0 days 00:00:00.383748165 +669 33 0 days 00:00:00.605678740 +669 34 0 days 00:00:00.785548820 +669 35 0 days 00:00:00.625435896 +669 36 0 days 00:00:00.372845620 +669 37 0 days 00:00:00.657819011 +669 38 0 days 00:00:00.430795480 +669 39 0 days 00:00:01.203301093 +669 40 0 days 00:00:00.868261997 +669 41 0 days 00:00:00.579716105 +669 42 0 days 00:00:00.423224622 +669 44 0 days 00:00:01.405385260 +669 45 0 days 00:00:00.901933736 +669 46 0 days 00:00:00.642018948 +669 47 0 days 00:00:00.626343312 +669 48 0 days 00:00:00.647490726 +669 50 0 days 00:00:00.393608236 +669 52 0 days 00:00:00.399270936 +669 53 0 days 00:00:00.391815065 +669 54 0 days 00:00:00.606709240 +669 55 0 days 00:00:00.593254410 +669 56 0 days 00:00:01.168217573 +669 57 0 days 00:00:01.434975500 +669 58 0 days 00:00:01.238128995 +669 59 0 days 00:00:00.468559540 +669 60 0 days 00:00:01.176313248 +669 61 0 days 00:00:00.724786183 +669 62 0 days 00:00:00.694573568 +669 63 0 days 00:00:00.411326588 +669 64 0 days 00:00:01.305423376 +669 65 0 days 00:00:00.430158800 +669 66 0 days 00:00:01.208121580 +669 67 0 days 00:00:00.340700220 +669 68 0 days 00:00:01.081814690 +669 69 0 days 00:00:00.650758390 +669 70 0 days 00:00:01.085434850 +669 71 0 days 00:00:00.471556345 +669 72 0 days 00:00:00.683356969 +669 73 0 days 00:00:00.696260170 +669 74 0 days 00:00:00.870970180 +669 75 0 days 00:00:00.408583852 +669 76 0 days 00:00:01.276647860 +669 77 0 days 00:00:01.097408180 +669 78 0 days 00:00:01.290725712 +669 79 0 days 00:00:00.406425592 +669 81 0 days 00:00:00.388398450 +669 82 0 days 00:00:00.469469662 +669 83 0 days 00:00:01.175408336 +669 84 0 days 00:00:01.093491395 +669 85 0 days 00:00:00.423608067 +669 86 0 days 00:00:00.375502892 +669 87 0 days 00:00:00.649621070 +669 88 0 days 00:00:00.751705416 +669 89 0 days 00:00:00.719629410 +669 90 0 days 00:00:00.428880655 +669 91 0 days 00:00:00.476645146 +669 92 0 days 00:00:01.256581440 +669 93 0 days 00:00:00.585498160 +669 94 0 days 00:00:01.173442924 +669 95 0 days 00:00:00.365168135 +669 96 0 days 00:00:01.338319330 +669 97 0 days 00:00:00.900121765 +669 98 0 days 00:00:01.329718329 +669 99 0 days 00:00:01.094799565 +669 100 0 days 00:00:01.298404550 +670 1 0 days 00:00:00.219874945 +670 2 0 days 00:00:00.281242038 +670 3 0 days 00:00:00.694907290 +670 4 0 days 00:00:00.358595080 +670 5 0 days 00:00:00.398690914 +670 7 0 days 00:00:00.325356960 +670 8 0 days 00:00:00.368116214 +670 9 0 days 00:00:00.224046415 +670 10 0 days 00:00:00.301977704 +670 11 0 days 00:00:00.353785113 +670 12 0 days 00:00:00.363334357 +670 13 0 days 00:00:00.358725165 +670 14 0 days 00:00:00.211342176 +670 15 0 days 00:00:00.201109760 +670 16 0 days 00:00:00.623465136 +670 17 0 days 00:00:00.664713336 +670 18 0 days 00:00:00.405055046 +670 19 0 days 00:00:00.392313608 +670 20 0 days 00:00:00.294255480 +670 21 0 days 00:00:00.635663665 +670 23 0 days 00:00:00.229167152 +670 24 0 days 00:00:00.273543741 +670 25 0 days 00:00:00.217724329 +670 26 0 days 00:00:00.442136030 +670 27 0 days 00:00:00.664777855 +670 28 0 days 00:00:00.644994945 +670 29 0 days 00:00:00.219364870 +670 30 0 days 00:00:00.712737684 +670 31 0 days 00:00:00.268489183 +670 32 0 days 00:00:00.232292682 +670 33 0 days 00:00:00.701057020 +670 35 0 days 00:00:00.311750843 +670 36 0 days 00:00:00.324151475 +670 37 0 days 00:00:00.299203832 +670 38 0 days 00:00:00.216806323 +670 39 0 days 00:00:00.609971948 +670 41 0 days 00:00:00.630499310 +670 42 0 days 00:00:00.313655861 +670 44 0 days 00:00:00.396409506 +670 45 0 days 00:00:00.228704170 +670 47 0 days 00:00:00.696706805 +670 48 0 days 00:00:00.207610415 +670 49 0 days 00:00:00.357536696 +670 50 0 days 00:00:00.343415740 +670 51 0 days 00:00:00.642056786 +670 52 0 days 00:00:00.332195220 +670 53 0 days 00:00:00.227657431 +670 54 0 days 00:00:00.235660842 +670 55 0 days 00:00:00.412431990 +670 56 0 days 00:00:00.224017144 +670 57 0 days 00:00:00.220835722 +670 58 0 days 00:00:00.364462557 +670 59 0 days 00:00:00.373746552 +670 60 0 days 00:00:00.643047185 +670 61 0 days 00:00:00.317342610 +670 63 0 days 00:00:00.385035256 +670 64 0 days 00:00:00.587857560 +670 65 0 days 00:00:00.727463250 +670 67 0 days 00:00:00.235823474 +670 68 0 days 00:00:00.271722440 +670 69 0 days 00:00:00.601443150 +670 70 0 days 00:00:00.325940130 +670 72 0 days 00:00:00.350148733 +670 73 0 days 00:00:00.436147657 +670 74 0 days 00:00:00.648611750 +670 75 0 days 00:00:00.494183462 +670 76 0 days 00:00:00.269082508 +670 77 0 days 00:00:00.456863970 +670 78 0 days 00:00:00.364873525 +670 79 0 days 00:00:00.666533120 +670 80 0 days 00:00:00.754427883 +670 81 0 days 00:00:00.631362343 +670 83 0 days 00:00:00.352719155 +670 86 0 days 00:00:00.244681330 +670 87 0 days 00:00:00.778767130 +670 88 0 days 00:00:00.220493463 +670 89 0 days 00:00:00.376327498 +670 90 0 days 00:00:00.442268444 +670 91 0 days 00:00:00.237907754 +670 92 0 days 00:00:00.329699360 +670 93 0 days 00:00:00.357711690 +670 94 0 days 00:00:00.235768189 +670 95 0 days 00:00:00.677168863 +670 96 0 days 00:00:00.631137425 +670 97 0 days 00:00:00.386996794 +670 98 0 days 00:00:00.721209396 +670 99 0 days 00:00:00.219308622 +670 100 0 days 00:00:00.207032500 +671 1 0 days 00:00:01.277578962 +671 2 0 days 00:00:00.518200816 +671 3 0 days 00:00:01.310035695 +671 4 0 days 00:00:01.208907170 +671 5 0 days 00:00:01.213799996 +671 6 0 days 00:00:01.133458395 +671 7 0 days 00:00:01.213867720 +671 8 0 days 00:00:00.506701756 +671 9 0 days 00:00:00.657330744 +671 10 0 days 00:00:01.147106515 +671 11 0 days 00:00:01.524308908 +671 12 0 days 00:00:01.260835395 +671 13 0 days 00:00:00.398345370 +671 14 0 days 00:00:00.850567713 +671 15 0 days 00:00:01.237443790 +671 16 0 days 00:00:00.654392364 +671 17 0 days 00:00:01.318511435 +671 18 0 days 00:00:01.132742760 +671 20 0 days 00:00:00.763328008 +671 21 0 days 00:00:01.120314650 +671 22 0 days 00:00:00.679786900 +671 23 0 days 00:00:00.767816468 +671 24 0 days 00:00:00.425378946 +671 25 0 days 00:00:00.704429680 +671 26 0 days 00:00:00.452879137 +671 27 0 days 00:00:01.169027835 +671 28 0 days 00:00:00.497780740 +671 29 0 days 00:00:01.245081564 +671 30 0 days 00:00:00.643135067 +671 31 0 days 00:00:00.605719090 +671 32 0 days 00:00:00.656784035 +671 33 0 days 00:00:00.781892220 +671 34 0 days 00:00:01.145459212 +671 35 0 days 00:00:01.270347072 +671 36 0 days 00:00:01.271307194 +671 37 0 days 00:00:00.900170022 +671 38 0 days 00:00:01.217975794 +671 39 0 days 00:00:01.164890010 +671 40 0 days 00:00:01.079156660 +671 41 0 days 00:00:01.208907731 +671 42 0 days 00:00:00.442437211 +671 43 0 days 00:00:00.834063122 +671 44 0 days 00:00:01.139547048 +671 45 0 days 00:00:00.358896800 +671 46 0 days 00:00:00.751717163 +671 47 0 days 00:00:00.473896217 +671 48 0 days 00:00:00.628213712 +671 50 0 days 00:00:01.071993170 +671 52 0 days 00:00:01.164157425 +671 53 0 days 00:00:01.272846704 +671 54 0 days 00:00:00.381105810 +671 55 0 days 00:00:00.449132037 +671 56 0 days 00:00:00.604797096 +671 57 0 days 00:00:01.287685013 +671 58 0 days 00:00:00.722987082 +671 59 0 days 00:00:00.722060000 +671 60 0 days 00:00:01.256064992 +671 61 0 days 00:00:00.428032376 +671 62 0 days 00:00:00.596226357 +671 63 0 days 00:00:01.094868268 +671 64 0 days 00:00:01.205669233 +671 65 0 days 00:00:00.897711147 +671 66 0 days 00:00:00.727601150 +671 67 0 days 00:00:01.041401025 +671 68 0 days 00:00:00.656517240 +671 69 0 days 00:00:00.618564168 +671 70 0 days 00:00:00.594511865 +671 71 0 days 00:00:01.248203208 +671 72 0 days 00:00:01.248187115 +671 73 0 days 00:00:00.579824500 +671 74 0 days 00:00:00.729350800 +671 75 0 days 00:00:00.609788828 +671 76 0 days 00:00:00.347529220 +671 77 0 days 00:00:00.735737232 +671 78 0 days 00:00:00.641990951 +671 79 0 days 00:00:00.432126740 +671 80 0 days 00:00:00.420603493 +671 81 0 days 00:00:00.667160100 +671 82 0 days 00:00:00.409595980 +671 83 0 days 00:00:00.547927552 +671 84 0 days 00:00:01.284285816 +671 86 0 days 00:00:00.619330032 +671 87 0 days 00:00:00.477939473 +671 88 0 days 00:00:00.725208107 +671 89 0 days 00:00:00.398839851 +671 90 0 days 00:00:00.853454322 +671 91 0 days 00:00:01.257047828 +671 93 0 days 00:00:01.193963120 +671 94 0 days 00:00:00.751529340 +671 95 0 days 00:00:00.634081800 +671 96 0 days 00:00:00.699840788 +671 98 0 days 00:00:01.286117983 +671 99 0 days 00:00:01.242553136 +671 100 0 days 00:00:00.612643690 +672 1 0 days 00:00:00.487652114 +672 3 0 days 00:00:00.695902860 +672 4 0 days 00:00:01.345642840 +672 5 0 days 00:00:00.427399740 +672 6 0 days 00:00:01.455519536 +672 7 0 days 00:00:00.530791016 +672 8 0 days 00:00:00.695882880 +672 9 0 days 00:00:01.165979155 +672 10 0 days 00:00:00.733402535 +672 11 0 days 00:00:00.466053100 +672 12 0 days 00:00:00.749200877 +672 13 0 days 00:00:00.736827784 +672 14 0 days 00:00:01.378578806 +672 15 0 days 00:00:00.493347552 +672 16 0 days 00:00:01.151497710 +672 17 0 days 00:00:00.796530272 +672 18 0 days 00:00:00.704987122 +672 19 0 days 00:00:01.072723835 +672 22 0 days 00:00:00.817310791 +672 23 0 days 00:00:01.343506204 +672 25 0 days 00:00:00.703516755 +672 26 0 days 00:00:01.232921296 +672 27 0 days 00:00:01.349209420 +672 28 0 days 00:00:00.428717300 +672 29 0 days 00:00:00.733862863 +672 30 0 days 00:00:00.382402880 +672 31 0 days 00:00:00.634744845 +672 32 0 days 00:00:00.459144436 +672 33 0 days 00:00:00.631685395 +672 34 0 days 00:00:01.457122909 +672 35 0 days 00:00:00.734384884 +672 36 0 days 00:00:00.518887304 +672 37 0 days 00:00:01.254916373 +672 38 0 days 00:00:01.445168411 +672 39 0 days 00:00:01.361117304 +672 40 0 days 00:00:00.710777990 +672 41 0 days 00:00:00.372418940 +672 42 0 days 00:00:01.232942060 +672 43 0 days 00:00:00.422057845 +672 44 0 days 00:00:01.286749545 +672 45 0 days 00:00:01.387093590 +672 46 0 days 00:00:00.743097373 +672 47 0 days 00:00:00.663986726 +672 48 0 days 00:00:01.332430584 +672 49 0 days 00:00:00.470958102 +672 50 0 days 00:00:00.343035805 +672 52 0 days 00:00:01.248003560 +672 53 0 days 00:00:01.226557602 +672 54 0 days 00:00:00.558766031 +672 55 0 days 00:00:01.436715486 +672 56 0 days 00:00:00.656942330 +672 57 0 days 00:00:01.293767137 +672 58 0 days 00:00:00.669975600 +672 60 0 days 00:00:00.786631327 +672 61 0 days 00:00:00.630082220 +672 62 0 days 00:00:00.352334455 +672 63 0 days 00:00:00.396232380 +672 64 0 days 00:00:00.485592416 +672 65 0 days 00:00:01.517654325 +672 66 0 days 00:00:01.211816040 +672 67 0 days 00:00:00.711974568 +672 68 0 days 00:00:00.487270374 +672 69 0 days 00:00:00.671342262 +672 70 0 days 00:00:00.462373340 +672 71 0 days 00:00:00.425191786 +672 72 0 days 00:00:00.691760837 +672 73 0 days 00:00:00.652106153 +672 74 0 days 00:00:01.246374615 +672 75 0 days 00:00:01.297901428 +672 76 0 days 00:00:01.219639860 +672 77 0 days 00:00:00.562812594 +672 78 0 days 00:00:00.691399471 +672 79 0 days 00:00:01.122969085 +672 80 0 days 00:00:00.447583222 +672 81 0 days 00:00:01.551330208 +672 82 0 days 00:00:00.414565228 +672 83 0 days 00:00:00.704721276 +672 84 0 days 00:00:00.673309492 +672 85 0 days 00:00:00.434185800 +672 86 0 days 00:00:00.575005610 +672 87 0 days 00:00:00.467893724 +672 88 0 days 00:00:00.674437088 +672 90 0 days 00:00:01.206136608 +672 91 0 days 00:00:01.560197469 +672 92 0 days 00:00:01.198672593 +672 93 0 days 00:00:00.633011532 +672 94 0 days 00:00:01.227224070 +672 95 0 days 00:00:01.307014425 +672 96 0 days 00:00:00.590383740 +672 97 0 days 00:00:00.645063260 +672 98 0 days 00:00:00.397011692 +672 99 0 days 00:00:00.672904568 +672 100 0 days 00:00:01.230418610 +673 1 0 days 00:00:00.305011230 +673 2 0 days 00:00:00.689543754 +673 3 0 days 00:00:00.665653020 +673 4 0 days 00:00:00.617796080 +673 5 0 days 00:00:00.401724462 +673 6 0 days 00:00:00.649172793 +673 7 0 days 00:00:00.244379465 +673 8 0 days 00:00:00.745311990 +673 10 0 days 00:00:00.564782270 +673 11 0 days 00:00:00.298385400 +673 12 0 days 00:00:00.198186890 +673 14 0 days 00:00:00.599017164 +673 15 0 days 00:00:00.445330940 +673 16 0 days 00:00:00.353675506 +673 17 0 days 00:00:00.194451020 +673 18 0 days 00:00:00.661611562 +673 19 0 days 00:00:00.223005874 +673 20 0 days 00:00:00.711366177 +673 21 0 days 00:00:00.182524572 +673 22 0 days 00:00:00.332663430 +673 23 0 days 00:00:00.350405230 +673 24 0 days 00:00:00.194682920 +673 25 0 days 00:00:00.333650446 +673 26 0 days 00:00:00.644769877 +673 27 0 days 00:00:00.388405171 +673 28 0 days 00:00:00.207814221 +673 29 0 days 00:00:00.183331648 +673 30 0 days 00:00:00.240808391 +673 32 0 days 00:00:00.205130380 +673 33 0 days 00:00:00.636389492 +673 34 0 days 00:00:00.222762653 +673 35 0 days 00:00:00.639307228 +673 36 0 days 00:00:00.673510860 +673 37 0 days 00:00:00.190099776 +673 38 0 days 00:00:00.359995114 +673 39 0 days 00:00:00.212645820 +673 40 0 days 00:00:00.404625835 +673 41 0 days 00:00:00.179760883 +673 42 0 days 00:00:00.211336073 +673 43 0 days 00:00:00.211986358 +673 44 0 days 00:00:00.479194595 +673 45 0 days 00:00:00.365849538 +673 46 0 days 00:00:00.677178110 +673 47 0 days 00:00:00.626877063 +673 48 0 days 00:00:00.311290020 +673 51 0 days 00:00:00.184645085 +673 52 0 days 00:00:00.351120684 +673 54 0 days 00:00:00.662509588 +673 55 0 days 00:00:00.235897157 +673 56 0 days 00:00:00.396621215 +673 57 0 days 00:00:00.221005413 +673 58 0 days 00:00:00.209267620 +673 59 0 days 00:00:00.656283851 +673 61 0 days 00:00:00.717526105 +673 62 0 days 00:00:00.425762415 +673 63 0 days 00:00:00.189678322 +673 64 0 days 00:00:00.244838113 +673 65 0 days 00:00:00.621149672 +673 66 0 days 00:00:00.207153297 +673 67 0 days 00:00:00.613376865 +673 69 0 days 00:00:00.227977318 +673 70 0 days 00:00:00.389175415 +673 71 0 days 00:00:00.328750956 +673 73 0 days 00:00:00.338843988 +673 74 0 days 00:00:00.635177564 +673 76 0 days 00:00:00.174342830 +673 77 0 days 00:00:00.626179376 +673 79 0 days 00:00:00.375530988 +673 80 0 days 00:00:00.678732720 +673 81 0 days 00:00:00.561144125 +673 82 0 days 00:00:00.426156565 +673 83 0 days 00:00:00.402242848 +673 84 0 days 00:00:00.219836237 +673 85 0 days 00:00:00.203219892 +673 87 0 days 00:00:00.340115204 +673 88 0 days 00:00:00.354764471 +673 90 0 days 00:00:00.627476020 +673 91 0 days 00:00:00.573827515 +673 92 0 days 00:00:00.192845365 +673 93 0 days 00:00:00.254630070 +673 94 0 days 00:00:00.191555322 +673 95 0 days 00:00:00.414721010 +673 96 0 days 00:00:00.240809289 +673 97 0 days 00:00:00.239059060 +673 98 0 days 00:00:00.578770335 +673 100 0 days 00:00:00.692387215 +674 1 0 days 00:00:00.626625344 +674 3 0 days 00:00:00.248924868 +674 4 0 days 00:00:00.206182203 +674 5 0 days 00:00:00.446909880 +674 6 0 days 00:00:00.634143496 +674 7 0 days 00:00:00.222706530 +674 8 0 days 00:00:00.706132422 +674 9 0 days 00:00:00.668448656 +674 10 0 days 00:00:00.719849132 +674 11 0 days 00:00:00.230510476 +674 12 0 days 00:00:00.713295203 +674 14 0 days 00:00:00.215659688 +674 15 0 days 00:00:00.202786365 +674 16 0 days 00:00:00.550308960 +674 17 0 days 00:00:00.200468305 +674 19 0 days 00:00:00.807939300 +674 20 0 days 00:00:00.218388072 +674 23 0 days 00:00:00.711775932 +674 24 0 days 00:00:00.396650500 +674 25 0 days 00:00:00.254258052 +674 26 0 days 00:00:00.697970058 +674 27 0 days 00:00:00.684703760 +674 28 0 days 00:00:00.240132307 +674 29 0 days 00:00:00.652021890 +674 30 0 days 00:00:00.723156482 +674 31 0 days 00:00:00.277654713 +674 32 0 days 00:00:00.228816585 +674 33 0 days 00:00:00.763530286 +674 34 0 days 00:00:00.227881470 +674 35 0 days 00:00:00.374540992 +674 37 0 days 00:00:00.615920204 +674 38 0 days 00:00:00.372786044 +674 39 0 days 00:00:00.355874576 +674 42 0 days 00:00:00.672080264 +674 43 0 days 00:00:00.453240935 +674 44 0 days 00:00:00.678270385 +674 46 0 days 00:00:00.390732456 +674 47 0 days 00:00:00.224255096 +674 48 0 days 00:00:00.664279986 +674 49 0 days 00:00:00.664035171 +674 50 0 days 00:00:00.428894073 +674 51 0 days 00:00:00.199434780 +674 52 0 days 00:00:00.207025986 +674 54 0 days 00:00:00.724824165 +674 55 0 days 00:00:00.343472000 +674 56 0 days 00:00:00.626380420 +674 57 0 days 00:00:00.418435302 +674 58 0 days 00:00:00.396950168 +674 59 0 days 00:00:00.216055940 +674 60 0 days 00:00:00.434578406 +674 61 0 days 00:00:00.211982762 +674 63 0 days 00:00:00.670132304 +674 64 0 days 00:00:00.280886455 +674 65 0 days 00:00:00.376027320 +674 66 0 days 00:00:00.377471536 +674 67 0 days 00:00:00.215752517 +674 68 0 days 00:00:00.275431551 +674 69 0 days 00:00:00.596425455 +674 70 0 days 00:00:00.715581537 +674 71 0 days 00:00:00.201290370 +674 72 0 days 00:00:00.271074140 +674 73 0 days 00:00:00.621398030 +674 74 0 days 00:00:00.609335396 +674 75 0 days 00:00:00.346617572 +674 76 0 days 00:00:00.370692142 +674 78 0 days 00:00:00.393830996 +674 79 0 days 00:00:00.793406286 +674 80 0 days 00:00:00.350626355 +674 81 0 days 00:00:00.357558345 +674 82 0 days 00:00:00.247279903 +674 83 0 days 00:00:00.223015610 +674 84 0 days 00:00:00.227212395 +674 85 0 days 00:00:00.705495832 +674 87 0 days 00:00:00.279105994 +674 89 0 days 00:00:00.349618646 +674 90 0 days 00:00:00.701757746 +674 91 0 days 00:00:00.376165814 +674 92 0 days 00:00:00.612120472 +674 93 0 days 00:00:00.377817520 +674 94 0 days 00:00:00.260097695 +674 95 0 days 00:00:00.845004154 +674 96 0 days 00:00:00.267572921 +674 97 0 days 00:00:00.706272168 +674 98 0 days 00:00:00.223530595 +674 99 0 days 00:00:00.662374835 +674 100 0 days 00:00:00.783558509 +675 1 0 days 00:00:10.090917806 +675 2 0 days 00:00:11.552966652 +675 3 0 days 00:00:06.661240546 +675 4 0 days 00:00:08.102160097 +675 5 0 days 00:00:11.519717843 +675 6 0 days 00:00:04.044859412 +675 7 0 days 00:00:10.484524886 +675 8 0 days 00:00:03.773666506 +675 9 0 days 00:00:03.782699713 +675 10 0 days 00:00:06.581997040 +675 11 0 days 00:00:06.060773860 +675 12 0 days 00:00:04.057731932 +675 13 0 days 00:00:11.642987625 +675 14 0 days 00:00:03.685813580 +675 15 0 days 00:00:06.636957183 +675 16 0 days 00:00:10.149600612 +675 17 0 days 00:00:05.842757578 +675 18 0 days 00:00:06.986154420 +675 19 0 days 00:00:10.327155537 +675 20 0 days 00:00:04.157322751 +675 21 0 days 00:00:06.521904522 +675 22 0 days 00:00:13.659731840 +675 23 0 days 00:00:05.955926731 +675 24 0 days 00:00:05.800631806 +675 25 0 days 00:00:04.061125894 +675 26 0 days 00:00:04.396898468 +675 27 0 days 00:00:04.295325628 +675 28 0 days 00:00:04.194078244 +675 29 0 days 00:00:11.107335353 +675 30 0 days 00:00:12.519135455 +675 31 0 days 00:00:05.308157262 +675 32 0 days 00:00:05.279467273 +675 33 0 days 00:00:04.085164431 +676 1 0 days 00:00:05.283786555 +676 2 0 days 00:00:02.120133045 +676 3 0 days 00:00:05.468422687 +676 4 0 days 00:00:01.858214320 +676 5 0 days 00:00:02.236322480 +676 6 0 days 00:00:02.188017914 +676 7 0 days 00:00:02.271161061 +676 8 0 days 00:00:02.007789946 +676 9 0 days 00:00:03.472486825 +676 10 0 days 00:00:03.647798842 +676 11 0 days 00:00:05.666740493 +676 12 0 days 00:00:02.259737750 +676 13 0 days 00:00:02.137846548 +676 14 0 days 00:00:03.869278253 +676 15 0 days 00:00:02.108704237 +676 16 0 days 00:00:03.277915782 +676 17 0 days 00:00:01.916752585 +676 18 0 days 00:00:03.056787360 +676 19 0 days 00:00:03.737037630 +676 20 0 days 00:00:02.984605385 +676 21 0 days 00:00:03.709767252 +676 22 0 days 00:00:03.364248403 +676 23 0 days 00:00:04.649953418 +676 24 0 days 00:00:03.541718871 +676 25 0 days 00:00:05.374348836 +676 26 0 days 00:00:02.103532611 +676 27 0 days 00:00:04.181766984 +676 28 0 days 00:00:02.026278746 +676 29 0 days 00:00:01.937079641 +676 30 0 days 00:00:06.914979768 +676 31 0 days 00:00:02.632805812 +676 32 0 days 00:00:03.112026493 +676 33 0 days 00:00:01.840765817 +676 34 0 days 00:00:05.197159977 +676 35 0 days 00:00:05.206732831 +676 36 0 days 00:00:03.261966335 +676 37 0 days 00:00:03.252165356 +676 38 0 days 00:00:01.851224708 +676 39 0 days 00:00:03.815584358 +676 40 0 days 00:00:03.540404300 +676 41 0 days 00:00:02.818831926 +676 42 0 days 00:00:07.011483738 +676 43 0 days 00:00:03.685430165 +676 44 0 days 00:00:01.929516291 +676 45 0 days 00:00:02.278786962 +676 46 0 days 00:00:02.168722591 +676 47 0 days 00:00:02.933495508 +676 48 0 days 00:00:06.284909340 +676 49 0 days 00:00:06.007295508 +676 50 0 days 00:00:02.267195320 +676 51 0 days 00:00:03.150014374 +676 52 0 days 00:00:01.891931206 +676 53 0 days 00:00:03.228229168 +676 54 0 days 00:00:02.179118990 +676 55 0 days 00:00:03.396230102 +676 56 0 days 00:00:05.678989253 +676 57 0 days 00:00:05.771237737 +676 58 0 days 00:00:01.897970800 +676 59 0 days 00:00:01.925820538 +676 60 0 days 00:00:02.123582285 +676 61 0 days 00:00:01.983796857 +676 62 0 days 00:00:01.853272683 +676 63 0 days 00:00:02.527592434 +676 64 0 days 00:00:03.661230804 +677 1 0 days 00:00:03.341667955 +677 2 0 days 00:00:05.217818505 +677 3 0 days 00:00:03.908971304 +677 4 0 days 00:00:09.499462980 +677 5 0 days 00:00:05.516535448 +677 6 0 days 00:00:06.228652480 +677 7 0 days 00:00:05.219042210 +677 8 0 days 00:00:09.881733156 +677 9 0 days 00:00:03.888381400 +677 10 0 days 00:00:08.015709505 +677 11 0 days 00:00:03.660787433 +677 12 0 days 00:00:10.533183522 +677 13 0 days 00:00:05.122832775 +677 14 0 days 00:00:06.313040112 +677 15 0 days 00:00:05.505422948 +677 16 0 days 00:00:05.852789980 +677 17 0 days 00:00:07.954114990 +677 18 0 days 00:00:06.324429285 +677 19 0 days 00:00:05.364889915 +677 20 0 days 00:00:09.021629305 +677 21 0 days 00:00:04.961903501 +677 22 0 days 00:00:04.750952476 +677 23 0 days 00:00:05.952494545 +677 24 0 days 00:00:05.183521425 +677 25 0 days 00:00:09.920528350 +677 26 0 days 00:00:03.805783233 +677 27 0 days 00:00:03.751834715 +677 28 0 days 00:00:05.798579056 +677 29 0 days 00:00:03.441502500 +677 30 0 days 00:00:03.509772830 +677 31 0 days 00:00:05.320498480 +677 32 0 days 00:00:04.019797834 +677 33 0 days 00:00:05.706581003 +677 34 0 days 00:00:04.084124074 +677 35 0 days 00:00:05.519361852 +677 36 0 days 00:00:03.265453010 +677 37 0 days 00:00:06.498501373 +677 38 0 days 00:00:05.230500910 +677 39 0 days 00:00:10.058222428 +677 40 0 days 00:00:05.631250300 +677 41 0 days 00:00:09.783818665 +677 42 0 days 00:00:04.296975824 +677 43 0 days 00:00:05.179167725 +677 44 0 days 00:00:08.937110610 +677 45 0 days 00:00:05.119555415 +677 46 0 days 00:00:10.274922156 +677 47 0 days 00:00:10.333340051 +677 48 0 days 00:00:03.521225075 +677 49 0 days 00:00:03.682413510 +677 50 0 days 00:00:05.688083223 +677 51 0 days 00:00:09.563892450 +677 52 0 days 00:00:05.190939700 +677 53 0 days 00:00:04.207355600 +677 54 0 days 00:00:08.811935540 +678 1 0 days 00:00:01.933914340 +678 2 0 days 00:00:01.811667880 +678 3 0 days 00:00:01.889789020 +678 4 0 days 00:00:02.440764626 +678 5 0 days 00:00:01.676552205 +678 6 0 days 00:00:02.647003625 +678 7 0 days 00:00:05.348461592 +678 8 0 days 00:00:02.724686450 +678 9 0 days 00:00:02.635183245 +678 10 0 days 00:00:01.793637520 +678 11 0 days 00:00:04.953649005 +678 12 0 days 00:00:05.455608886 +678 13 0 days 00:00:03.300246323 +678 14 0 days 00:00:01.931842302 +678 15 0 days 00:00:02.912917566 +678 16 0 days 00:00:02.076377850 +678 17 0 days 00:00:05.548251688 +678 18 0 days 00:00:03.036550760 +678 19 0 days 00:00:02.997198162 +678 20 0 days 00:00:01.886159652 +678 21 0 days 00:00:03.169969690 +678 22 0 days 00:00:01.988876824 +678 23 0 days 00:00:05.126156291 +678 24 0 days 00:00:05.163292442 +678 25 0 days 00:00:01.877074960 +678 26 0 days 00:00:01.910253890 +678 27 0 days 00:00:01.884324508 +678 28 0 days 00:00:02.858238917 +678 29 0 days 00:00:02.986664680 +678 30 0 days 00:00:01.940242942 +678 31 0 days 00:00:02.708315775 +678 32 0 days 00:00:01.815218550 +678 33 0 days 00:00:03.045628225 +678 34 0 days 00:00:04.841597272 +678 35 0 days 00:00:01.826482970 +678 36 0 days 00:00:02.013888324 +678 37 0 days 00:00:02.088902436 +678 38 0 days 00:00:02.042244636 +678 39 0 days 00:00:05.139058668 +678 40 0 days 00:00:03.001409054 +678 41 0 days 00:00:01.955653137 +678 42 0 days 00:00:02.101995897 +678 43 0 days 00:00:05.349510078 +678 44 0 days 00:00:05.552185173 +678 45 0 days 00:00:02.999782888 +678 46 0 days 00:00:05.158602188 +678 47 0 days 00:00:05.566676670 +678 48 0 days 00:00:02.022768944 +678 49 0 days 00:00:04.752568668 +678 50 0 days 00:00:05.397478853 +678 51 0 days 00:00:03.160787143 +678 52 0 days 00:00:01.893971517 +678 53 0 days 00:00:05.202200838 +678 54 0 days 00:00:05.339832332 +678 55 0 days 00:00:04.756782800 +678 56 0 days 00:00:02.208309595 +678 57 0 days 00:00:01.908833094 +678 58 0 days 00:00:04.615243220 +678 59 0 days 00:00:04.791281588 +678 60 0 days 00:00:04.184600174 +678 61 0 days 00:00:01.891135188 +678 62 0 days 00:00:02.696628144 +678 63 0 days 00:00:02.913657053 +678 64 0 days 00:00:02.890728150 +678 65 0 days 00:00:05.158911774 +678 66 0 days 00:00:05.257873490 +678 67 0 days 00:00:02.110355533 +678 68 0 days 00:00:05.369023476 +678 69 0 days 00:00:02.790566872 +678 70 0 days 00:00:01.888871764 +678 71 0 days 00:00:02.845926570 +678 72 0 days 00:00:02.988799577 +678 73 0 days 00:00:04.513147800 +678 74 0 days 00:00:01.849186006 +678 75 0 days 00:00:01.924382942 +678 76 0 days 00:00:05.066495980 +678 77 0 days 00:00:04.482612465 +678 78 0 days 00:00:01.812026290 +678 79 0 days 00:00:05.158355107 +678 80 0 days 00:00:01.873279762 +678 81 0 days 00:00:02.845790305 +678 82 0 days 00:00:03.284146704 +678 83 0 days 00:00:01.896542725 +678 84 0 days 00:00:02.848819936 +678 85 0 days 00:00:02.244997620 +679 1 0 days 00:00:06.508630243 +679 2 0 days 00:00:08.675727310 +679 3 0 days 00:00:03.102450115 +679 4 0 days 00:00:05.264286936 +679 5 0 days 00:00:05.034394745 +679 6 0 days 00:00:03.697750087 +679 7 0 days 00:00:03.685230806 +679 8 0 days 00:00:03.620757851 +679 9 0 days 00:00:08.580174605 +679 10 0 days 00:00:08.961850716 +679 11 0 days 00:00:03.213209725 +679 12 0 days 00:00:09.845025237 +679 13 0 days 00:00:05.417978690 +679 14 0 days 00:00:03.761570920 +679 15 0 days 00:00:05.972322574 +679 16 0 days 00:00:03.637512220 +679 17 0 days 00:00:04.017493207 +679 18 0 days 00:00:05.494563380 +679 19 0 days 00:00:03.798903900 +679 20 0 days 00:00:04.085907345 +679 21 0 days 00:00:09.425567754 +679 22 0 days 00:00:03.197004930 +679 23 0 days 00:00:08.434779690 +679 24 0 days 00:00:09.935385880 +679 25 0 days 00:00:08.587486910 +679 26 0 days 00:00:03.579330354 +679 27 0 days 00:00:03.611111085 +679 28 0 days 00:00:05.935336225 +679 29 0 days 00:00:09.660018408 +679 30 0 days 00:00:04.615559013 +679 31 0 days 00:00:10.378761340 +679 32 0 days 00:00:03.178587380 +679 33 0 days 00:00:07.925688920 +679 34 0 days 00:00:06.238651382 +679 35 0 days 00:00:08.608419970 +679 36 0 days 00:00:04.858506360 +679 37 0 days 00:00:08.511041085 +679 38 0 days 00:00:10.864636884 +679 39 0 days 00:00:03.694148844 +679 40 0 days 00:00:05.752919292 +679 41 0 days 00:00:09.439000015 +679 42 0 days 00:00:04.312151120 +679 43 0 days 00:00:05.094933005 +679 44 0 days 00:00:08.552281130 +679 45 0 days 00:00:05.845618822 +679 46 0 days 00:00:04.979489715 +679 47 0 days 00:00:05.192029936 +679 48 0 days 00:00:06.747720655 +679 49 0 days 00:00:05.640005255 +680 1 0 days 00:00:05.968150662 +680 2 0 days 00:00:05.692425866 +680 3 0 days 00:00:06.021970214 +680 4 0 days 00:00:05.323592875 +680 5 0 days 00:00:03.584892890 +680 6 0 days 00:00:05.780613951 +680 7 0 days 00:00:03.267557545 +680 8 0 days 00:00:05.696786300 +680 9 0 days 00:00:09.832296465 +680 10 0 days 00:00:05.181989455 +680 11 0 days 00:00:05.172492660 +680 12 0 days 00:00:05.101153595 +680 13 0 days 00:00:03.734555856 +680 14 0 days 00:00:08.892283840 +680 15 0 days 00:00:05.190073325 +680 16 0 days 00:00:03.484194844 +680 17 0 days 00:00:05.806779316 +680 18 0 days 00:00:05.705941152 +680 19 0 days 00:00:03.462493845 +680 20 0 days 00:00:03.603291886 +680 21 0 days 00:00:03.468861732 +680 22 0 days 00:00:08.736370330 +680 23 0 days 00:00:09.658133380 +680 24 0 days 00:00:06.243294315 +680 25 0 days 00:00:03.978186704 +680 26 0 days 00:00:09.740690445 +680 27 0 days 00:00:09.868206670 +680 28 0 days 00:00:04.444485224 +680 29 0 days 00:00:08.809004925 +680 30 0 days 00:00:03.753305580 +680 31 0 days 00:00:08.997640860 +680 32 0 days 00:00:05.061133333 +680 33 0 days 00:00:04.968654961 +680 34 0 days 00:00:05.478255850 +680 35 0 days 00:00:10.553998136 +680 36 0 days 00:00:03.795522697 +680 37 0 days 00:00:05.127008625 +680 38 0 days 00:00:05.156382725 +680 39 0 days 00:00:04.084343765 +680 40 0 days 00:00:09.490600636 +680 41 0 days 00:00:03.452021716 +680 42 0 days 00:00:10.040625556 +680 43 0 days 00:00:06.527148646 +680 44 0 days 00:00:10.101359831 +680 45 0 days 00:00:03.734341751 +680 46 0 days 00:00:05.632121192 +680 47 0 days 00:00:04.008229793 +680 48 0 days 00:00:09.262031528 +680 49 0 days 00:00:05.463846952 +680 50 0 days 00:00:09.359238348 +680 51 0 days 00:00:05.406909860 +680 52 0 days 00:00:09.726901020 +680 53 0 days 00:00:05.785205185 +681 1 0 days 00:00:01.897400790 +681 2 0 days 00:00:01.866959188 +681 3 0 days 00:00:04.648927045 +681 4 0 days 00:00:03.468532777 +681 5 0 days 00:00:04.766237882 +681 6 0 days 00:00:05.120080276 +681 7 0 days 00:00:04.261835765 +681 8 0 days 00:00:01.686352520 +681 9 0 days 00:00:01.770234760 +681 10 0 days 00:00:05.344801152 +681 11 0 days 00:00:03.458635920 +681 12 0 days 00:00:01.733976624 +681 13 0 days 00:00:02.154656127 +681 14 0 days 00:00:02.944022096 +681 15 0 days 00:00:01.719518984 +681 16 0 days 00:00:02.886366765 +681 17 0 days 00:00:02.716192945 +681 18 0 days 00:00:03.003560093 +681 19 0 days 00:00:02.067477052 +681 20 0 days 00:00:02.937938856 +681 21 0 days 00:00:02.889500765 +681 22 0 days 00:00:03.020334343 +681 23 0 days 00:00:02.734431766 +681 24 0 days 00:00:05.548752678 +681 25 0 days 00:00:04.975650624 +681 26 0 days 00:00:02.089954717 +681 27 0 days 00:00:01.899735248 +681 28 0 days 00:00:04.575647816 +681 29 0 days 00:00:03.818333208 +681 30 0 days 00:00:04.144548610 +681 31 0 days 00:00:04.637894710 +681 32 0 days 00:00:03.018960835 +681 33 0 days 00:00:02.984089892 +681 34 0 days 00:00:02.635434048 +681 35 0 days 00:00:02.818116731 +681 36 0 days 00:00:03.259951873 +681 37 0 days 00:00:04.354155595 +681 38 0 days 00:00:01.907343714 +681 39 0 days 00:00:04.648145755 +681 40 0 days 00:00:04.308626345 +681 41 0 days 00:00:02.535756200 +681 42 0 days 00:00:04.352853145 +681 43 0 days 00:00:04.613503256 +681 44 0 days 00:00:01.698868080 +681 45 0 days 00:00:03.091383390 +681 46 0 days 00:00:03.379269288 +681 47 0 days 00:00:03.245780142 +681 48 0 days 00:00:01.643705370 +681 49 0 days 00:00:02.013095632 +681 50 0 days 00:00:01.896670705 +681 51 0 days 00:00:02.021758255 +681 52 0 days 00:00:04.698963470 +681 53 0 days 00:00:01.959665290 +681 54 0 days 00:00:01.765459317 +681 55 0 days 00:00:04.691258300 +681 56 0 days 00:00:05.057240960 +681 57 0 days 00:00:01.874731020 +681 58 0 days 00:00:01.623744220 +681 59 0 days 00:00:02.659121116 +681 60 0 days 00:00:01.878280616 +681 61 0 days 00:00:01.657362945 +681 62 0 days 00:00:05.441264695 +681 63 0 days 00:00:05.664842006 +681 64 0 days 00:00:03.015636750 +681 65 0 days 00:00:04.748863330 +681 66 0 days 00:00:01.880613730 +681 67 0 days 00:00:02.660994772 +681 68 0 days 00:00:04.327967805 +681 69 0 days 00:00:04.323567190 +681 70 0 days 00:00:01.679927840 +681 71 0 days 00:00:05.398570485 +681 72 0 days 00:00:02.834848860 +681 73 0 days 00:00:02.234398820 +681 74 0 days 00:00:03.085076851 +681 75 0 days 00:00:02.898615866 +681 76 0 days 00:00:05.118984480 +681 77 0 days 00:00:02.492309970 +681 78 0 days 00:00:01.789980493 +681 79 0 days 00:00:02.255144445 +681 80 0 days 00:00:02.845218240 +682 1 0 days 00:00:05.633598635 +682 2 0 days 00:00:03.564188067 +682 3 0 days 00:00:03.010891460 +682 4 0 days 00:00:02.144040940 +682 5 0 days 00:00:04.104108580 +682 6 0 days 00:00:02.744250180 +682 7 0 days 00:00:02.685521340 +682 8 0 days 00:00:01.981149773 +682 9 0 days 00:00:04.729993744 +682 10 0 days 00:00:01.823169356 +682 11 0 days 00:00:02.304331672 +682 12 0 days 00:00:02.328600810 +682 13 0 days 00:00:02.647913240 +682 14 0 days 00:00:05.074877911 +682 15 0 days 00:00:01.976115381 +682 16 0 days 00:00:01.850989250 +682 17 0 days 00:00:02.204860448 +682 18 0 days 00:00:04.006323106 +682 19 0 days 00:00:02.380791585 +682 20 0 days 00:00:03.628825051 +682 21 0 days 00:00:02.976385132 +682 22 0 days 00:00:05.009711780 +682 23 0 days 00:00:01.919470746 +682 24 0 days 00:00:05.029312428 +682 25 0 days 00:00:03.457415300 +682 26 0 days 00:00:05.387529904 +682 27 0 days 00:00:05.326327515 +682 28 0 days 00:00:02.835404596 +682 29 0 days 00:00:04.902624800 +682 30 0 days 00:00:04.483845710 +682 31 0 days 00:00:01.897562085 +682 32 0 days 00:00:03.021233237 +682 33 0 days 00:00:02.727531140 +682 34 0 days 00:00:01.832020563 +682 35 0 days 00:00:05.058557700 +682 36 0 days 00:00:02.566366090 +682 37 0 days 00:00:02.037155503 +682 38 0 days 00:00:01.965117116 +682 39 0 days 00:00:01.938623000 +682 40 0 days 00:00:04.518222040 +682 41 0 days 00:00:04.968995073 +682 42 0 days 00:00:01.931780509 +682 43 0 days 00:00:01.944633271 +682 44 0 days 00:00:04.453459130 +682 45 0 days 00:00:01.781033270 +682 46 0 days 00:00:01.934551060 +682 47 0 days 00:00:01.848863783 +682 48 0 days 00:00:05.121731400 +682 49 0 days 00:00:05.417880502 +682 50 0 days 00:00:05.197902576 +682 51 0 days 00:00:02.716813140 +682 52 0 days 00:00:04.838650088 +682 53 0 days 00:00:01.757403816 +682 54 0 days 00:00:04.777531828 +682 55 0 days 00:00:01.839708045 +682 56 0 days 00:00:02.803920424 +682 57 0 days 00:00:02.740974016 +682 58 0 days 00:00:02.637999395 +682 59 0 days 00:00:01.866336368 +682 60 0 days 00:00:02.832859224 +682 61 0 days 00:00:02.762253304 +682 62 0 days 00:00:02.850282872 +682 63 0 days 00:00:02.928892931 +682 64 0 days 00:00:01.860618464 +682 65 0 days 00:00:02.605905760 +682 66 0 days 00:00:05.390760967 +682 67 0 days 00:00:02.498687700 +682 68 0 days 00:00:01.819431460 +682 69 0 days 00:00:02.835196704 +682 70 0 days 00:00:02.116888318 +682 71 0 days 00:00:02.437602331 +682 72 0 days 00:00:01.795315782 +682 73 0 days 00:00:02.024637442 +682 74 0 days 00:00:05.536263126 +682 75 0 days 00:00:04.863565583 +682 76 0 days 00:00:01.935017420 +682 77 0 days 00:00:03.050634842 +682 78 0 days 00:00:04.972747276 +682 79 0 days 00:00:02.174932561 +682 80 0 days 00:00:01.861295153 +682 81 0 days 00:00:01.943791502 +683 1 0 days 00:01:54.481769756 +683 2 0 days 00:02:13.181055581 +684 1 0 days 00:00:23.537215852 +684 2 0 days 00:00:37.421555184 +684 3 0 days 00:01:14.087223422 +684 4 0 days 00:00:21.795296280 +684 5 0 days 00:00:34.728459055 +684 6 0 days 00:00:58.123027640 +684 7 0 days 00:00:37.429184912 +684 8 0 days 00:00:39.479135508 +685 1 0 days 00:01:07.262157567 +685 2 0 days 00:02:11.735207196 +686 1 0 days 00:01:18.980518930 +686 2 0 days 00:01:05.174927940 +686 3 0 days 00:00:24.093839005 +686 4 0 days 00:00:35.448001625 +686 5 0 days 00:00:18.655396273 +686 6 0 days 00:01:20.346000140 +687 1 0 days 00:01:56.669701664 +687 2 0 days 00:00:30.482772886 +687 3 0 days 00:00:34.243441610 +687 4 0 days 00:00:58.127676400 +687 5 0 days 00:00:36.400970128 +687 6 0 days 00:00:58.037053670 +687 7 0 days 00:00:34.697267475 +687 8 0 days 00:00:59.081266385 +688 1 0 days 00:00:58.634495720 +688 2 0 days 00:01:06.608779095 +688 3 0 days 00:00:38.013041664 +688 4 0 days 00:01:05.424224730 +688 5 0 days 00:00:35.722668330 +688 6 0 days 00:01:10.554250644 +688 7 0 days 00:00:35.754495690 +688 8 0 days 00:00:59.266472333 +688 9 0 days 00:01:05.431628755 +689 1 0 days 00:00:00.198422933 +689 2 0 days 00:00:00.331798032 +689 3 0 days 00:00:00.762653906 +689 4 0 days 00:00:00.181818236 +689 5 0 days 00:00:00.333997298 +689 7 0 days 00:00:00.974324540 +689 8 0 days 00:00:00.316340242 +689 9 0 days 00:00:00.893130811 +689 10 0 days 00:00:00.291855605 +689 11 0 days 00:00:00.757330200 +689 13 0 days 00:00:00.151450928 +689 14 0 days 00:00:00.825510755 +689 15 0 days 00:00:00.221517870 +689 16 0 days 00:00:00.334177536 +689 17 0 days 00:00:00.270672446 +689 18 0 days 00:00:00.170412062 +689 20 0 days 00:00:00.149667025 +689 22 0 days 00:00:00.201492332 +689 23 0 days 00:00:00.318808637 +689 24 0 days 00:00:00.980336155 +689 25 0 days 00:00:00.874747563 +689 27 0 days 00:00:00.352169140 +689 28 0 days 00:00:00.359574111 +689 29 0 days 00:00:00.160273726 +689 31 0 days 00:00:00.760845546 +689 32 0 days 00:00:00.274248946 +689 33 0 days 00:00:00.322215400 +689 34 0 days 00:00:00.315962457 +689 35 0 days 00:00:00.198134155 +689 36 0 days 00:00:00.268049813 +689 37 0 days 00:00:00.981996190 +689 38 0 days 00:00:00.154700196 +689 39 0 days 00:00:00.161549526 +689 41 0 days 00:00:00.320302780 +689 45 0 days 00:00:00.896207514 +689 46 0 days 00:00:00.171331396 +689 47 0 days 00:00:00.208431068 +689 48 0 days 00:00:00.970248950 +689 49 0 days 00:00:00.259214510 +689 50 0 days 00:00:00.962923174 +689 51 0 days 00:00:00.874982266 +689 52 0 days 00:00:00.314993737 +689 53 0 days 00:00:00.267565246 +689 54 0 days 00:00:00.759199300 +689 55 0 days 00:00:00.366379385 +689 56 0 days 00:00:00.171425715 +689 57 0 days 00:00:00.967196446 +689 58 0 days 00:00:00.260537385 +689 59 0 days 00:00:00.373790700 +689 61 0 days 00:00:00.288615855 +689 62 0 days 00:00:00.822243895 +689 63 0 days 00:00:00.941964142 +689 64 0 days 00:00:00.269340353 +689 65 0 days 00:00:00.827825600 +689 66 0 days 00:00:00.160579813 +689 67 0 days 00:00:00.173672325 +689 68 0 days 00:00:00.179115205 +689 69 0 days 00:00:00.310796092 +689 71 0 days 00:00:00.322769884 +689 73 0 days 00:00:00.363334797 +689 74 0 days 00:00:00.927089882 +689 75 0 days 00:00:00.367363175 +689 76 0 days 00:00:00.264062940 +689 77 0 days 00:00:00.250177548 +689 78 0 days 00:00:00.802368545 +689 79 0 days 00:00:00.199800095 +689 80 0 days 00:00:00.957305173 +689 83 0 days 00:00:00.171920515 +689 84 0 days 00:00:00.827170035 +689 85 0 days 00:00:00.253000992 +689 86 0 days 00:00:00.293789355 +689 87 0 days 00:00:00.989264620 +689 88 0 days 00:00:00.967790060 +689 89 0 days 00:00:00.843260344 +689 90 0 days 00:00:00.958267152 +689 91 0 days 00:00:00.760375373 +689 92 0 days 00:00:00.925858677 +689 93 0 days 00:00:00.952007596 +689 94 0 days 00:00:00.148769525 +689 95 0 days 00:00:00.822843995 +689 96 0 days 00:00:00.328083266 +689 97 0 days 00:00:00.177073648 +689 99 0 days 00:00:00.252065020 +689 100 0 days 00:00:00.762932386 +690 1 0 days 00:00:00.337354030 +690 2 0 days 00:00:00.179316754 +690 3 0 days 00:00:00.386982048 +690 4 0 days 00:00:00.987869415 +690 5 0 days 00:00:00.385930870 +690 6 0 days 00:00:00.379050980 +690 7 0 days 00:00:00.341570375 +690 8 0 days 00:00:00.849375135 +690 9 0 days 00:00:00.208266463 +690 10 0 days 00:00:00.347959565 +690 11 0 days 00:00:00.155804770 +690 12 0 days 00:00:00.381237425 +690 13 0 days 00:00:00.773282026 +690 15 0 days 00:00:01.000307102 +690 16 0 days 00:00:00.272808604 +690 17 0 days 00:00:00.916340994 +690 19 0 days 00:00:00.164496980 +690 20 0 days 00:00:00.788789726 +690 21 0 days 00:00:00.867603715 +690 22 0 days 00:00:00.903333573 +690 23 0 days 00:00:00.177156833 +690 24 0 days 00:00:01.014933870 +690 25 0 days 00:00:00.903695823 +690 26 0 days 00:00:00.327825629 +690 29 0 days 00:00:00.361208127 +690 31 0 days 00:00:00.297069410 +690 32 0 days 00:00:00.288906105 +690 33 0 days 00:00:00.167870748 +690 35 0 days 00:00:01.008907351 +690 36 0 days 00:00:00.347552478 +690 37 0 days 00:00:00.770277626 +690 38 0 days 00:00:00.869691945 +690 39 0 days 00:00:00.296248965 +690 40 0 days 00:00:00.969497471 +690 41 0 days 00:00:00.347874230 +690 42 0 days 00:00:00.268479986 +690 43 0 days 00:00:00.806425733 +690 44 0 days 00:00:00.983861762 +690 45 0 days 00:00:00.161354610 +690 46 0 days 00:00:00.330783430 +690 47 0 days 00:00:00.801906673 +690 48 0 days 00:00:00.270713671 +690 49 0 days 00:00:00.153906410 +690 50 0 days 00:00:01.003125691 +690 51 0 days 00:00:00.275931240 +690 53 0 days 00:00:00.182372416 +690 54 0 days 00:00:00.182264709 +690 55 0 days 00:00:00.179870845 +690 56 0 days 00:00:00.327397501 +690 57 0 days 00:00:00.278203086 +690 58 0 days 00:00:00.272200414 +690 59 0 days 00:00:00.987431986 +690 60 0 days 00:00:00.218630526 +690 62 0 days 00:00:00.159927220 +690 63 0 days 00:00:00.912417333 +690 64 0 days 00:00:00.871095085 +690 65 0 days 00:00:00.329933784 +690 66 0 days 00:00:00.768086840 +690 67 0 days 00:00:00.921081696 +690 68 0 days 00:00:00.304332960 +690 69 0 days 00:00:00.997877268 +690 70 0 days 00:00:00.153570595 +690 71 0 days 00:00:00.948533015 +690 73 0 days 00:00:01.001644560 +690 75 0 days 00:00:00.947192935 +690 76 0 days 00:00:00.329131143 +690 77 0 days 00:00:00.788991766 +690 78 0 days 00:00:00.863295900 +690 79 0 days 00:00:00.876835575 +690 80 0 days 00:00:00.306969220 +690 82 0 days 00:00:00.177902466 +690 85 0 days 00:00:00.980910393 +690 86 0 days 00:00:00.329629643 +690 87 0 days 00:00:00.987756400 +690 88 0 days 00:00:00.336093656 +690 89 0 days 00:00:00.294617000 +690 90 0 days 00:00:01.022874347 +690 91 0 days 00:00:00.299348831 +690 92 0 days 00:00:00.170327400 +690 93 0 days 00:00:00.328009877 +690 94 0 days 00:00:00.965047194 +690 95 0 days 00:00:00.862588660 +690 96 0 days 00:00:00.343087480 +690 97 0 days 00:00:00.295409057 +690 98 0 days 00:00:00.286704243 +690 99 0 days 00:00:01.021584590 +690 100 0 days 00:00:00.209539903 +691 2 0 days 00:00:00.537672956 +691 3 0 days 00:00:00.186573520 +691 4 0 days 00:00:00.504912080 +691 5 0 days 00:00:00.188270025 +691 6 0 days 00:00:00.191481644 +691 7 0 days 00:00:00.170534306 +691 8 0 days 00:00:00.119457582 +691 9 0 days 00:00:00.540801745 +691 10 0 days 00:00:00.477601475 +691 11 0 days 00:00:00.542795428 +691 12 0 days 00:00:00.137637108 +691 14 0 days 00:00:00.512777600 +691 15 0 days 00:00:00.170694573 +691 16 0 days 00:00:00.147985414 +691 17 0 days 00:00:00.508651540 +691 18 0 days 00:00:00.185492250 +691 22 0 days 00:00:00.164860346 +691 23 0 days 00:00:00.168612786 +691 24 0 days 00:00:00.190518529 +691 25 0 days 00:00:00.481591050 +691 26 0 days 00:00:00.544279632 +691 27 0 days 00:00:00.172202084 +691 28 0 days 00:00:00.216457710 +691 30 0 days 00:00:00.544781156 +691 31 0 days 00:00:00.535191364 +691 32 0 days 00:00:00.163032140 +691 33 0 days 00:00:00.120152525 +691 34 0 days 00:00:00.179793216 +691 35 0 days 00:00:00.447619020 +691 36 0 days 00:00:00.124209393 +691 37 0 days 00:00:00.189189580 +691 38 0 days 00:00:00.187548365 +691 39 0 days 00:00:00.505818211 +691 40 0 days 00:00:00.218871804 +691 41 0 days 00:00:00.444333253 +691 42 0 days 00:00:00.538177632 +691 43 0 days 00:00:00.098694800 +691 44 0 days 00:00:00.186441690 +691 45 0 days 00:00:00.186241400 +691 46 0 days 00:00:00.119893258 +691 47 0 days 00:00:00.188424806 +691 48 0 days 00:00:00.538775313 +691 49 0 days 00:00:00.190914532 +691 50 0 days 00:00:00.205592713 +691 52 0 days 00:00:00.541365564 +691 53 0 days 00:00:00.184168669 +691 54 0 days 00:00:00.514548260 +691 57 0 days 00:00:00.095135733 +691 58 0 days 00:00:00.537512212 +691 59 0 days 00:00:00.192844874 +691 60 0 days 00:00:00.131700186 +691 62 0 days 00:00:00.536105588 +691 63 0 days 00:00:00.095406076 +691 64 0 days 00:00:00.145656664 +691 65 0 days 00:00:00.474906035 +691 66 0 days 00:00:00.539313407 +691 67 0 days 00:00:00.541039889 +691 69 0 days 00:00:00.167370766 +691 71 0 days 00:00:00.185618443 +691 72 0 days 00:00:00.151224821 +691 74 0 days 00:00:00.146159396 +691 75 0 days 00:00:00.212241597 +691 77 0 days 00:00:00.181773600 +691 79 0 days 00:00:00.147411774 +691 80 0 days 00:00:00.474344265 +691 81 0 days 00:00:00.199233025 +691 82 0 days 00:00:00.513595965 +691 83 0 days 00:00:00.176191060 +691 84 0 days 00:00:00.102868540 +691 85 0 days 00:00:00.544169341 +691 86 0 days 00:00:00.168298970 +691 87 0 days 00:00:00.214367753 +691 88 0 days 00:00:00.149087097 +691 89 0 days 00:00:00.178257352 +691 90 0 days 00:00:00.177213971 +691 91 0 days 00:00:00.477964130 +691 92 0 days 00:00:00.171225926 +691 93 0 days 00:00:00.538262428 +691 94 0 days 00:00:00.188610312 +691 95 0 days 00:00:00.146015429 +691 97 0 days 00:00:00.188940513 +691 99 0 days 00:00:00.098481438 +691 100 0 days 00:00:00.152672898 +692 1 0 days 00:00:00.440414793 +692 2 0 days 00:00:00.188054306 +692 3 0 days 00:00:00.496762850 +692 4 0 days 00:00:00.557208484 +692 5 0 days 00:00:00.500322340 +692 6 0 days 00:00:00.102919002 +692 8 0 days 00:00:00.103584906 +692 9 0 days 00:00:00.211353026 +692 10 0 days 00:00:00.496308005 +692 11 0 days 00:00:00.167001746 +692 13 0 days 00:00:00.100760500 +692 14 0 days 00:00:00.155310226 +692 15 0 days 00:00:00.460920853 +692 16 0 days 00:00:00.450138206 +692 17 0 days 00:00:00.171593240 +692 18 0 days 00:00:00.459299313 +692 19 0 days 00:00:00.184437329 +692 20 0 days 00:00:00.496973135 +692 21 0 days 00:00:00.494972525 +692 22 0 days 00:00:00.102518593 +692 23 0 days 00:00:00.529347936 +692 25 0 days 00:00:00.494869195 +692 26 0 days 00:00:00.181321252 +692 27 0 days 00:00:00.172871465 +692 28 0 days 00:00:00.543406853 +692 30 0 days 00:00:00.170554195 +692 32 0 days 00:00:00.564195014 +692 34 0 days 00:00:00.514851362 +692 36 0 days 00:00:00.160444028 +692 38 0 days 00:00:00.180851095 +692 39 0 days 00:00:00.237498051 +692 40 0 days 00:00:00.158914620 +692 41 0 days 00:00:00.215538064 +692 43 0 days 00:00:00.186475185 +692 44 0 days 00:00:00.474490240 +692 45 0 days 00:00:00.565002770 +692 46 0 days 00:00:00.169227726 +692 47 0 days 00:00:00.562537094 +692 48 0 days 00:00:00.558775467 +692 49 0 days 00:00:00.173769166 +692 50 0 days 00:00:00.109568272 +692 51 0 days 00:00:00.108371676 +692 54 0 days 00:00:00.114015006 +692 55 0 days 00:00:00.182798835 +692 56 0 days 00:00:00.107364253 +692 57 0 days 00:00:00.558648182 +692 58 0 days 00:00:00.550132927 +692 59 0 days 00:00:00.106864724 +692 60 0 days 00:00:00.530598583 +692 61 0 days 00:00:00.196806984 +692 62 0 days 00:00:00.548926218 +692 63 0 days 00:00:00.463644073 +692 64 0 days 00:00:00.469275000 +692 65 0 days 00:00:00.186172309 +692 66 0 days 00:00:00.109814120 +692 67 0 days 00:00:00.175543650 +692 70 0 days 00:00:00.170000493 +692 72 0 days 00:00:00.220116846 +692 73 0 days 00:00:00.551269425 +692 75 0 days 00:00:00.136689980 +692 76 0 days 00:00:00.154838734 +692 77 0 days 00:00:00.498098970 +692 78 0 days 00:00:00.084948673 +692 79 0 days 00:00:00.183223006 +692 80 0 days 00:00:00.122015781 +692 81 0 days 00:00:00.154918380 +692 83 0 days 00:00:00.161839080 +692 84 0 days 00:00:00.163311453 +692 85 0 days 00:00:00.479175440 +692 86 0 days 00:00:00.552125448 +692 87 0 days 00:00:00.088557593 +692 88 0 days 00:00:00.443161773 +692 89 0 days 00:00:00.174592480 +692 90 0 days 00:00:00.098681613 +692 91 0 days 00:00:00.539617153 +692 92 0 days 00:00:00.196053713 +692 93 0 days 00:00:00.085341100 +692 94 0 days 00:00:00.191521940 +692 96 0 days 00:00:00.098702853 +692 98 0 days 00:00:00.106652406 +692 99 0 days 00:00:00.438033546 +692 100 0 days 00:00:00.547607520 +693 1 0 days 00:00:00.862392890 +693 2 0 days 00:00:00.345040380 +693 3 0 days 00:00:00.296637630 +693 4 0 days 00:00:00.188509552 +693 5 0 days 00:00:00.989364734 +693 6 0 days 00:00:00.286939160 +693 7 0 days 00:00:00.163639382 +693 8 0 days 00:00:00.829448750 +693 9 0 days 00:00:00.285189252 +693 10 0 days 00:00:00.941112914 +693 11 0 days 00:00:00.960504160 +693 12 0 days 00:00:00.838550345 +693 13 0 days 00:00:00.157609008 +693 14 0 days 00:00:00.226900540 +693 15 0 days 00:00:00.209293726 +693 16 0 days 00:00:00.804113320 +693 17 0 days 00:00:00.288768760 +693 18 0 days 00:00:00.337299738 +693 19 0 days 00:00:00.273562366 +693 20 0 days 00:00:00.389657554 +693 21 0 days 00:00:00.264848800 +693 22 0 days 00:00:00.346499755 +693 23 0 days 00:00:01.016686827 +693 24 0 days 00:00:00.162089062 +693 25 0 days 00:00:00.284738612 +693 26 0 days 00:00:00.929070393 +693 27 0 days 00:00:00.995530243 +693 28 0 days 00:00:00.825911065 +693 29 0 days 00:00:00.877100216 +693 30 0 days 00:00:00.209986262 +693 31 0 days 00:00:00.837739745 +693 32 0 days 00:00:00.949730852 +693 33 0 days 00:00:00.184264020 +693 34 0 days 00:00:00.228873385 +693 35 0 days 00:00:00.212033572 +693 36 0 days 00:00:00.293075415 +693 37 0 days 00:00:00.297719040 +693 38 0 days 00:00:00.336855041 +693 39 0 days 00:00:00.836964980 +693 40 0 days 00:00:00.264856037 +693 41 0 days 00:00:00.164686083 +693 42 0 days 00:00:00.291650965 +693 43 0 days 00:00:00.801430280 +693 44 0 days 00:00:00.333414510 +693 45 0 days 00:00:00.302407370 +693 46 0 days 00:00:00.272860705 +693 47 0 days 00:00:00.300409868 +693 48 0 days 00:00:00.211784502 +693 49 0 days 00:00:01.003271196 +693 50 0 days 00:00:01.008947801 +693 51 0 days 00:00:00.805517255 +693 52 0 days 00:00:00.295493185 +693 53 0 days 00:00:00.241022955 +693 54 0 days 00:00:00.998239203 +693 55 0 days 00:00:00.185751588 +693 56 0 days 00:00:01.021486303 +693 57 0 days 00:00:00.287513056 +693 58 0 days 00:00:00.229622210 +693 59 0 days 00:00:00.828718515 +693 60 0 days 00:00:00.177684835 +693 61 0 days 00:00:01.018017460 +693 62 0 days 00:00:00.309509636 +693 63 0 days 00:00:00.210429475 +693 64 0 days 00:00:00.257832962 +693 65 0 days 00:00:00.876151140 +693 66 0 days 00:00:00.213093326 +693 67 0 days 00:00:00.279646505 +693 68 0 days 00:00:00.300885196 +693 69 0 days 00:00:00.276309612 +693 70 0 days 00:00:00.207737278 +693 71 0 days 00:00:00.349994344 +693 72 0 days 00:00:01.008982356 +693 73 0 days 00:00:00.841123335 +693 74 0 days 00:00:00.270402138 +693 75 0 days 00:00:00.197399095 +693 76 0 days 00:00:00.177796640 +693 77 0 days 00:00:00.974382623 +693 78 0 days 00:00:00.804223060 +693 79 0 days 00:00:00.988576827 +693 80 0 days 00:00:00.176298265 +693 81 0 days 00:00:00.178453453 +693 82 0 days 00:00:00.286408240 +693 83 0 days 00:00:00.205817456 +693 84 0 days 00:00:00.285858416 +693 85 0 days 00:00:00.334633615 +693 86 0 days 00:00:00.335723705 +693 87 0 days 00:00:00.206464176 +693 88 0 days 00:00:00.869672956 +693 89 0 days 00:00:00.285671692 +693 90 0 days 00:00:00.842876630 +693 91 0 days 00:00:00.163884600 +693 92 0 days 00:00:00.336445245 +693 93 0 days 00:00:00.177364627 +693 94 0 days 00:00:00.174502450 +693 95 0 days 00:00:00.971244257 +693 96 0 days 00:00:00.178226310 +693 97 0 days 00:00:00.389006416 +693 98 0 days 00:00:00.240105844 +693 99 0 days 00:00:00.163041000 +693 100 0 days 00:00:00.345130949 +694 1 0 days 00:00:00.150792160 +694 2 0 days 00:00:00.452831980 +694 3 0 days 00:00:00.114803941 +694 4 0 days 00:00:00.221141553 +694 5 0 days 00:00:00.160653865 +694 6 0 days 00:00:00.219158166 +694 7 0 days 00:00:00.098512982 +694 8 0 days 00:00:00.172024515 +694 9 0 days 00:00:00.115001857 +694 10 0 days 00:00:00.187643433 +694 11 0 days 00:00:00.472666495 +694 12 0 days 00:00:00.095957448 +694 13 0 days 00:00:00.219070526 +694 14 0 days 00:00:00.151787565 +694 15 0 days 00:00:00.102297940 +694 16 0 days 00:00:00.547254685 +694 17 0 days 00:00:00.093092184 +694 18 0 days 00:00:00.464507345 +694 19 0 days 00:00:00.153737821 +694 20 0 days 00:00:00.498864833 +694 21 0 days 00:00:00.168211273 +694 22 0 days 00:00:00.172057865 +694 23 0 days 00:00:00.517861528 +694 24 0 days 00:00:00.512964422 +694 25 0 days 00:00:00.160897080 +694 26 0 days 00:00:00.452752705 +694 27 0 days 00:00:00.193753705 +694 28 0 days 00:00:00.149238411 +694 29 0 days 00:00:00.161394975 +694 30 0 days 00:00:00.174393000 +694 31 0 days 00:00:00.465675505 +694 32 0 days 00:00:00.144151300 +694 33 0 days 00:00:00.452694115 +694 34 0 days 00:00:00.472494780 +694 35 0 days 00:00:00.452210500 +694 36 0 days 00:00:00.161126100 +694 37 0 days 00:00:00.096215863 +694 38 0 days 00:00:00.132504265 +694 39 0 days 00:00:00.165894585 +694 40 0 days 00:00:00.160974340 +694 41 0 days 00:00:00.167560440 +694 42 0 days 00:00:00.468828310 +694 43 0 days 00:00:00.168236525 +694 44 0 days 00:00:00.189542943 +694 45 0 days 00:00:00.090041076 +694 46 0 days 00:00:00.169607226 +694 47 0 days 00:00:00.190832105 +694 48 0 days 00:00:00.114387837 +694 49 0 days 00:00:00.465401310 +694 50 0 days 00:00:00.162960145 +694 51 0 days 00:00:00.167197735 +694 52 0 days 00:00:00.468044125 +694 53 0 days 00:00:00.098124852 +694 54 0 days 00:00:00.514227824 +694 55 0 days 00:00:00.166796165 +694 56 0 days 00:00:00.466470760 +694 57 0 days 00:00:00.465754320 +694 58 0 days 00:00:00.150603522 +694 59 0 days 00:00:00.160756255 +694 60 0 days 00:00:00.174616446 +694 61 0 days 00:00:00.454713220 +694 62 0 days 00:00:00.168260535 +694 63 0 days 00:00:00.090536280 +694 64 0 days 00:00:00.470391820 +694 65 0 days 00:00:00.096157608 +694 66 0 days 00:00:00.192807195 +694 67 0 days 00:00:00.168876040 +694 68 0 days 00:00:00.467523510 +694 69 0 days 00:00:00.454882385 +694 70 0 days 00:00:00.100998600 +694 71 0 days 00:00:00.485718032 +694 72 0 days 00:00:00.454148875 +694 73 0 days 00:00:00.167368435 +694 74 0 days 00:00:00.545916001 +694 75 0 days 00:00:00.462563695 +694 76 0 days 00:00:00.113015183 +694 77 0 days 00:00:00.103693220 +694 78 0 days 00:00:00.167273805 +694 79 0 days 00:00:00.462062500 +694 80 0 days 00:00:00.217640148 +694 81 0 days 00:00:00.461942360 +694 82 0 days 00:00:00.133868860 +694 83 0 days 00:00:00.529564146 +694 84 0 days 00:00:00.100858245 +694 85 0 days 00:00:00.461406870 +694 86 0 days 00:00:00.547704268 +694 87 0 days 00:00:00.091433751 +694 88 0 days 00:00:00.218105416 +694 89 0 days 00:00:00.547371522 +694 90 0 days 00:00:00.089432976 +694 91 0 days 00:00:00.167831340 +694 92 0 days 00:00:00.088693696 +694 93 0 days 00:00:00.192317305 +694 94 0 days 00:00:00.115196022 +694 95 0 days 00:00:00.193136760 +694 96 0 days 00:00:00.186232156 +694 97 0 days 00:00:00.193941055 +694 98 0 days 00:00:00.196096270 +694 99 0 days 00:00:00.197214620 +694 100 0 days 00:00:00.089736184 +695 1 0 days 00:00:01.390078082 +695 3 0 days 00:00:01.671281675 +695 6 0 days 00:00:08.945430428 +695 7 0 days 00:00:11.675934594 +695 9 0 days 00:00:00.719547038 +695 10 0 days 00:00:01.301191466 +695 11 0 days 00:00:00.515014393 +695 13 0 days 00:00:01.593266880 +695 14 0 days 00:00:06.092382136 +695 15 0 days 00:00:00.798312215 +695 16 0 days 00:00:09.133539405 +695 17 0 days 00:00:00.779426315 +695 18 0 days 00:00:09.448851176 +695 20 0 days 00:00:00.723903230 +695 21 0 days 00:00:02.281645291 +695 22 0 days 00:00:13.225364242 +695 24 0 days 00:00:02.647250925 +695 26 0 days 00:00:00.970778012 +695 28 0 days 00:00:01.087031887 +695 29 0 days 00:00:06.938069437 +695 30 0 days 00:00:07.460432062 +695 32 0 days 00:00:00.687166520 +695 33 0 days 00:00:01.237894996 +695 34 0 days 00:00:00.666051575 +695 35 0 days 00:00:07.788827344 +695 36 0 days 00:00:02.079393095 +695 37 0 days 00:00:01.179243166 +695 41 0 days 00:00:01.540252388 +695 42 0 days 00:00:05.731783810 +695 43 0 days 00:00:00.745844515 +695 44 0 days 00:00:00.430775625 +695 45 0 days 00:00:07.585226992 +696 1 0 days 00:00:00.784393226 +696 2 0 days 00:00:01.848786498 +696 3 0 days 00:00:01.408500785 +696 4 0 days 00:00:03.460340295 +696 5 0 days 00:00:06.487368602 +696 6 0 days 00:00:13.824865835 +696 7 0 days 00:00:07.838645130 +696 8 0 days 00:00:00.613405864 +696 9 0 days 00:00:12.882478744 +696 12 0 days 00:00:01.183136212 +696 13 0 days 00:00:02.205454085 +696 14 0 days 00:00:01.725913412 +696 16 0 days 00:00:02.224910955 +696 17 0 days 00:00:01.186103756 +696 18 0 days 00:00:01.050204331 +696 20 0 days 00:00:03.070730364 +696 21 0 days 00:00:03.516949700 +696 22 0 days 00:00:02.248705727 +696 23 0 days 00:00:00.929594637 +696 24 0 days 00:00:01.080398875 +696 26 0 days 00:00:14.324087765 +696 27 0 days 00:00:02.683932245 +696 28 0 days 00:00:00.946461120 +696 29 0 days 00:00:01.927706495 +696 30 0 days 00:00:03.599864880 +696 31 0 days 00:00:01.015751228 +696 32 0 days 00:00:00.821584689 +696 33 0 days 00:00:03.128051292 +696 34 0 days 00:00:00.923591856 +696 35 0 days 00:00:00.456265250 +696 36 0 days 00:00:02.056240295 +696 37 0 days 00:00:04.125422538 +696 38 0 days 00:00:03.089779123 +696 39 0 days 00:00:00.792142151 +696 40 0 days 00:00:06.488968525 +696 41 0 days 00:00:01.267334982 +696 42 0 days 00:00:03.248917220 +696 43 0 days 00:00:00.773200623 +696 44 0 days 00:00:07.165651725 +696 46 0 days 00:00:00.988741398 +696 47 0 days 00:00:01.575687311 +696 48 0 days 00:00:00.968480600 +696 49 0 days 00:00:03.189993925 +696 50 0 days 00:00:01.892485650 +696 51 0 days 00:00:08.701832600 +696 52 0 days 00:00:02.221753645 +696 53 0 days 00:00:03.375824772 +696 54 0 days 00:00:01.237885857 +697 2 0 days 00:00:02.914866708 +697 3 0 days 00:00:06.913150728 +697 4 0 days 00:00:01.948265194 +697 5 0 days 00:00:01.245523942 +697 6 0 days 00:00:07.634334405 +697 7 0 days 00:00:12.668785393 +697 8 0 days 00:00:00.938057954 +697 10 0 days 00:00:03.279390735 +697 11 0 days 00:00:01.831057482 +697 12 0 days 00:00:01.884364881 +697 13 0 days 00:00:01.093501132 +697 14 0 days 00:00:03.393521203 +697 15 0 days 00:00:00.696211026 +697 16 0 days 00:00:01.664686432 +697 17 0 days 00:00:04.753273945 +697 18 0 days 00:00:03.811216204 +697 19 0 days 00:00:02.357291108 +697 20 0 days 00:00:04.851185360 +697 21 0 days 00:00:00.633357472 +697 22 0 days 00:00:01.683817297 +697 23 0 days 00:00:01.474836457 +697 24 0 days 00:00:02.568227370 +697 25 0 days 00:00:00.935817554 +697 26 0 days 00:00:01.042661880 +697 27 0 days 00:00:09.560953640 +697 28 0 days 00:00:01.338627940 +697 29 0 days 00:00:01.860213880 +697 30 0 days 00:00:02.800681122 +697 31 0 days 00:00:13.883899985 +697 32 0 days 00:00:00.996212254 +697 33 0 days 00:00:00.938835038 +697 34 0 days 00:00:01.958105340 +697 35 0 days 00:00:11.443686300 +697 36 0 days 00:00:00.836541916 +697 37 0 days 00:00:02.547739842 +697 38 0 days 00:00:01.669196546 +697 39 0 days 00:00:00.785269780 +697 40 0 days 00:00:01.457843153 +697 41 0 days 00:00:02.031621182 +697 42 0 days 00:00:08.256486360 +697 43 0 days 00:00:01.201704276 +697 44 0 days 00:00:02.140063020 +697 45 0 days 00:00:01.241500305 +697 46 0 days 00:00:01.107365035 +697 47 0 days 00:00:10.907059150 +697 48 0 days 00:00:01.211132977 +697 49 0 days 00:00:01.439353055 +697 50 0 days 00:00:01.277849086 +697 51 0 days 00:00:03.679058816 +697 52 0 days 00:00:00.420539265 +697 53 0 days 00:00:10.766345045 +698 1 0 days 00:00:03.496508362 +698 2 0 days 00:00:00.469392220 +698 3 0 days 00:00:01.143883182 +698 4 0 days 00:00:01.881837137 +698 5 0 days 00:00:01.389744155 +698 6 0 days 00:00:00.866742305 +698 7 0 days 00:00:00.913135322 +698 10 0 days 00:00:02.334767500 +698 11 0 days 00:00:00.948008606 +698 12 0 days 00:00:01.169050180 +698 13 0 days 00:00:00.733248546 +698 14 0 days 00:00:00.420548981 +698 15 0 days 00:00:02.627398408 +698 16 0 days 00:00:00.538045774 +698 17 0 days 00:00:00.482607327 +698 18 0 days 00:00:00.469558895 +698 19 0 days 00:00:00.857889177 +698 20 0 days 00:00:02.242899665 +698 21 0 days 00:00:01.016694977 +698 22 0 days 00:00:00.898538656 +698 23 0 days 00:00:00.510910851 +698 24 0 days 00:00:00.399488295 +698 25 0 days 00:00:04.259900683 +698 26 0 days 00:00:05.839006800 +698 27 0 days 00:00:00.374740375 +698 28 0 days 00:00:00.415180018 +698 29 0 days 00:00:06.465604867 +698 30 0 days 00:00:00.767107995 +698 31 0 days 00:00:03.830288492 +698 32 0 days 00:00:00.706621874 +698 33 0 days 00:00:06.608130069 +698 34 0 days 00:00:00.815036720 +698 35 0 days 00:00:00.488065818 +698 36 0 days 00:00:00.314845975 +698 37 0 days 00:00:00.287378608 +698 38 0 days 00:00:03.967259584 +698 39 0 days 00:00:00.627641356 +698 40 0 days 00:00:03.457122665 +698 41 0 days 00:00:00.288852201 +698 42 0 days 00:00:03.258242088 +698 43 0 days 00:00:01.218697220 +698 44 0 days 00:00:02.646564235 +698 45 0 days 00:00:07.044778124 +698 46 0 days 00:00:04.063362796 +698 47 0 days 00:00:00.368161677 +698 48 0 days 00:00:00.343849385 +698 49 0 days 00:00:01.012485756 +698 51 0 days 00:00:00.743945554 +698 52 0 days 00:00:06.044032448 +698 53 0 days 00:00:05.844146755 +698 54 0 days 00:00:01.279836712 +698 55 0 days 00:00:00.428233096 +698 56 0 days 00:00:05.506786080 +698 57 0 days 00:00:03.603691727 +698 58 0 days 00:00:01.544032620 +698 59 0 days 00:00:00.791583700 +698 60 0 days 00:00:03.041952051 +698 61 0 days 00:00:01.551730568 +698 62 0 days 00:00:00.650160337 +698 63 0 days 00:00:00.265472186 +698 64 0 days 00:00:03.087661920 +698 65 0 days 00:00:00.879515831 +698 66 0 days 00:00:00.322212258 +698 67 0 days 00:00:00.447742597 +698 68 0 days 00:00:01.062482383 +698 69 0 days 00:00:00.945010758 +698 70 0 days 00:00:00.441854864 +698 71 0 days 00:00:00.760840788 +698 72 0 days 00:00:00.519888406 +698 73 0 days 00:00:01.554457537 +698 74 0 days 00:00:00.665105780 +698 75 0 days 00:00:01.460797722 +698 76 0 days 00:00:05.889504604 +698 77 0 days 00:00:01.431103976 +698 79 0 days 00:00:00.486520818 +698 81 0 days 00:00:00.384123749 +698 82 0 days 00:00:00.569080215 +698 83 0 days 00:00:00.434618129 +698 84 0 days 00:00:00.447356405 +698 85 0 days 00:00:03.101304750 +698 86 0 days 00:00:00.486923175 +698 87 0 days 00:00:03.670470755 +698 88 0 days 00:00:00.410509907 +698 90 0 days 00:00:01.482947560 +698 91 0 days 00:00:00.602743078 +698 92 0 days 00:00:00.909024033 +698 94 0 days 00:00:01.211573071 +698 95 0 days 00:00:00.485225898 +698 96 0 days 00:00:00.524934124 +698 97 0 days 00:00:00.924560588 +698 98 0 days 00:00:03.255589185 +698 99 0 days 00:00:01.230912880 +698 100 0 days 00:00:01.274486353 +699 1 0 days 00:00:01.091371236 +699 2 0 days 00:00:00.883968453 +699 3 0 days 00:00:00.343486800 +699 4 0 days 00:00:00.195471146 +699 5 0 days 00:00:00.177637515 +699 6 0 days 00:00:00.395539370 +699 7 0 days 00:00:00.172941330 +699 8 0 days 00:00:00.174638173 +699 9 0 days 00:00:01.096768800 +699 10 0 days 00:00:00.165913830 +699 11 0 days 00:00:00.361475012 +699 12 0 days 00:00:01.076792286 +699 13 0 days 00:00:00.405019211 +699 14 0 days 00:00:00.199135682 +699 15 0 days 00:00:00.183585996 +699 16 0 days 00:00:00.186151415 +699 17 0 days 00:00:00.345221885 +699 18 0 days 00:00:01.092645474 +699 19 0 days 00:00:00.203296810 +699 20 0 days 00:00:00.164661665 +699 21 0 days 00:00:00.316866726 +699 22 0 days 00:00:00.171455896 +699 23 0 days 00:00:00.384196812 +699 24 0 days 00:00:00.879079566 +699 25 0 days 00:00:00.336706160 +699 26 0 days 00:00:00.363552815 +699 27 0 days 00:00:01.105050407 +699 28 0 days 00:00:00.162416720 +699 29 0 days 00:00:00.203128605 +699 30 0 days 00:00:00.969310540 +699 31 0 days 00:00:00.980308990 +699 32 0 days 00:00:00.973619980 +699 33 0 days 00:00:00.177407920 +699 34 0 days 00:00:00.968759200 +699 35 0 days 00:00:01.137357666 +699 36 0 days 00:00:00.198803405 +699 37 0 days 00:00:00.357226515 +699 38 0 days 00:00:00.981443655 +699 39 0 days 00:00:00.881738553 +699 40 0 days 00:00:00.883522486 +699 41 0 days 00:00:00.178809340 +699 42 0 days 00:00:00.321090833 +699 43 0 days 00:00:01.164589388 +699 44 0 days 00:00:01.035194844 +699 45 0 days 00:00:00.373024916 +699 46 0 days 00:00:00.201715144 +699 47 0 days 00:00:00.877079766 +699 48 0 days 00:00:00.187769630 +699 49 0 days 00:00:00.333710693 +699 50 0 days 00:00:00.365787685 +699 51 0 days 00:00:01.036441292 +699 52 0 days 00:00:00.361559704 +699 53 0 days 00:00:00.341840075 +699 54 0 days 00:00:00.162688130 +699 55 0 days 00:00:00.169928340 +699 56 0 days 00:00:00.343683635 +699 57 0 days 00:00:00.352935736 +699 58 0 days 00:00:00.345068750 +699 59 0 days 00:00:00.198357355 +699 60 0 days 00:00:00.175200350 +699 61 0 days 00:00:01.065909023 +699 62 0 days 00:00:00.364116775 +699 63 0 days 00:00:00.202711850 +699 64 0 days 00:00:00.367054122 +699 65 0 days 00:00:00.976091880 +699 66 0 days 00:00:00.211466040 +699 67 0 days 00:00:01.027461476 +699 68 0 days 00:00:00.975307250 +699 69 0 days 00:00:00.355204828 +699 70 0 days 00:00:00.331470320 +699 71 0 days 00:00:00.166587528 +699 72 0 days 00:00:00.974278890 +699 73 0 days 00:00:01.030822888 +699 74 0 days 00:00:00.338086845 +699 75 0 days 00:00:00.973595265 +699 76 0 days 00:00:01.062516820 +699 77 0 days 00:00:00.972041935 +699 78 0 days 00:00:00.332085870 +699 79 0 days 00:00:00.170693516 +699 80 0 days 00:00:00.220178668 +699 81 0 days 00:00:00.194659050 +699 82 0 days 00:00:00.973368475 +699 83 0 days 00:00:01.030155824 +699 84 0 days 00:00:00.343932795 +699 85 0 days 00:00:00.202443520 +699 86 0 days 00:00:00.163564760 +699 87 0 days 00:00:00.177624183 +699 88 0 days 00:00:01.090131525 +699 89 0 days 00:00:01.111791257 +699 90 0 days 00:00:00.345714950 +699 91 0 days 00:00:01.095208357 +699 92 0 days 00:00:00.178247125 +699 93 0 days 00:00:00.199921065 +699 94 0 days 00:00:01.187283751 +699 95 0 days 00:00:00.335132286 +699 96 0 days 00:00:00.182297280 +699 97 0 days 00:00:00.395188220 +699 98 0 days 00:00:00.386647888 +699 99 0 days 00:00:00.212271644 +699 100 0 days 00:00:00.387995220 +700 1 0 days 00:00:00.347471413 +700 2 0 days 00:00:01.144074831 +700 3 0 days 00:00:00.342494325 +700 4 0 days 00:00:01.084574116 +700 5 0 days 00:00:00.190724808 +700 6 0 days 00:00:00.357872296 +700 7 0 days 00:00:01.142107057 +700 8 0 days 00:00:00.385510393 +700 9 0 days 00:00:00.421438405 +700 10 0 days 00:00:00.352867210 +700 11 0 days 00:00:00.339905540 +700 12 0 days 00:00:00.362027784 +700 13 0 days 00:00:00.210388310 +700 14 0 days 00:00:01.131795232 +700 15 0 days 00:00:00.984143325 +700 16 0 days 00:00:01.040987792 +700 17 0 days 00:00:00.205153167 +700 18 0 days 00:00:00.352902650 +700 19 0 days 00:00:00.893355620 +700 20 0 days 00:00:00.355521935 +700 21 0 days 00:00:00.209296995 +700 22 0 days 00:00:00.352779210 +700 23 0 days 00:00:00.984224540 +700 24 0 days 00:00:00.182885435 +700 25 0 days 00:00:01.042734384 +700 26 0 days 00:00:00.178954230 +700 27 0 days 00:00:00.312948473 +700 28 0 days 00:00:00.166721960 +700 29 0 days 00:00:00.341143295 +700 30 0 days 00:00:00.982546060 +700 31 0 days 00:00:00.353925632 +700 32 0 days 00:00:01.108060277 +700 33 0 days 00:00:00.388669100 +700 34 0 days 00:00:00.392460896 +700 35 0 days 00:00:00.990401260 +700 36 0 days 00:00:00.413926697 +700 37 0 days 00:00:00.897025833 +700 38 0 days 00:00:00.182385625 +700 39 0 days 00:00:00.394944920 +700 40 0 days 00:00:01.086376333 +700 41 0 days 00:00:00.180849175 +700 42 0 days 00:00:00.394522120 +700 43 0 days 00:00:00.389134922 +700 44 0 days 00:00:00.369020363 +700 45 0 days 00:00:00.166991150 +700 46 0 days 00:00:01.036009724 +700 47 0 days 00:00:00.413504571 +700 48 0 days 00:00:00.370147630 +700 49 0 days 00:00:00.231794402 +700 50 0 days 00:00:00.214139248 +700 51 0 days 00:00:00.988065925 +700 52 0 days 00:00:00.986121895 +700 53 0 days 00:00:00.342282435 +700 54 0 days 00:00:00.990613815 +700 55 0 days 00:00:00.379950553 +700 56 0 days 00:00:00.183915395 +700 57 0 days 00:00:00.175707776 +700 58 0 days 00:00:00.361602720 +700 59 0 days 00:00:00.311311706 +700 60 0 days 00:00:01.076493536 +700 61 0 days 00:00:01.107553951 +700 62 0 days 00:00:00.166287985 +700 63 0 days 00:00:00.374459785 +700 64 0 days 00:00:00.190201046 +700 65 0 days 00:00:00.198748377 +700 66 0 days 00:00:00.985817145 +700 67 0 days 00:00:00.989462950 +700 68 0 days 00:00:00.206878295 +700 69 0 days 00:00:00.357387872 +700 70 0 days 00:00:00.987329450 +700 71 0 days 00:00:01.103090771 +700 72 0 days 00:00:00.171621015 +700 73 0 days 00:00:00.373597954 +700 74 0 days 00:00:00.339543440 +700 75 0 days 00:00:01.124748035 +700 76 0 days 00:00:00.225234740 +700 77 0 days 00:00:00.990398495 +700 78 0 days 00:00:01.043454136 +700 79 0 days 00:00:00.340851830 +700 80 0 days 00:00:00.421788944 +700 81 0 days 00:00:00.215856568 +700 82 0 days 00:00:00.179325370 +700 83 0 days 00:00:00.203145007 +700 84 0 days 00:00:00.374324380 +700 85 0 days 00:00:00.988364665 +700 86 0 days 00:00:00.181611040 +700 87 0 days 00:00:00.199973874 +700 88 0 days 00:00:00.984286305 +700 89 0 days 00:00:00.219687076 +700 90 0 days 00:00:01.037670600 +700 91 0 days 00:00:00.374861310 +700 92 0 days 00:00:00.375584093 +700 93 0 days 00:00:00.175565872 +700 94 0 days 00:00:00.166289180 +700 95 0 days 00:00:00.422254100 +700 96 0 days 00:00:01.049365032 +700 97 0 days 00:00:00.385770794 +700 98 0 days 00:00:00.412612945 +700 99 0 days 00:00:00.207159225 +700 100 0 days 00:00:00.374390180 +701 1 0 days 00:00:00.101852295 +701 2 0 days 00:00:00.115133176 +701 3 0 days 00:00:00.206618626 +701 4 0 days 00:00:00.207742800 +701 5 0 days 00:00:00.561930665 +701 6 0 days 00:00:00.109323620 +701 7 0 days 00:00:00.562116770 +701 8 0 days 00:00:00.565041595 +701 9 0 days 00:00:00.132745194 +701 10 0 days 00:00:00.624262391 +701 11 0 days 00:00:00.105294508 +701 12 0 days 00:00:00.126417736 +701 13 0 days 00:00:00.559564270 +701 14 0 days 00:00:00.565050240 +701 15 0 days 00:00:00.229724615 +701 16 0 days 00:00:00.566206845 +701 17 0 days 00:00:00.102326245 +701 18 0 days 00:00:00.108365717 +701 19 0 days 00:00:00.105725348 +701 20 0 days 00:00:00.514739880 +701 21 0 days 00:00:00.104427400 +701 22 0 days 00:00:00.207374045 +701 23 0 days 00:00:00.206535790 +701 24 0 days 00:00:00.204304485 +701 25 0 days 00:00:00.207316280 +701 26 0 days 00:00:00.101713555 +701 27 0 days 00:00:00.615516680 +701 28 0 days 00:00:00.208481213 +701 29 0 days 00:00:00.221555685 +701 30 0 days 00:00:00.109596150 +701 31 0 days 00:00:00.564186140 +701 32 0 days 00:00:00.123550755 +701 33 0 days 00:00:00.201825920 +701 34 0 days 00:00:00.205259945 +701 35 0 days 00:00:00.207896115 +701 36 0 days 00:00:00.233187853 +701 37 0 days 00:00:00.594378812 +701 38 0 days 00:00:00.112218404 +701 39 0 days 00:00:00.561159120 +701 40 0 days 00:00:00.124261920 +701 41 0 days 00:00:00.203264245 +701 42 0 days 00:00:00.130006605 +701 43 0 days 00:00:00.565623955 +701 44 0 days 00:00:00.108561568 +701 45 0 days 00:00:00.520241480 +701 46 0 days 00:00:00.613481523 +701 47 0 days 00:00:00.130371936 +701 48 0 days 00:00:00.115119951 +701 49 0 days 00:00:00.515033853 +701 50 0 days 00:00:00.121522245 +701 51 0 days 00:00:00.627643174 +701 52 0 days 00:00:00.566074385 +701 53 0 days 00:00:00.206460420 +701 54 0 days 00:00:00.217227195 +701 55 0 days 00:00:00.217251335 +701 56 0 days 00:00:00.207853775 +701 57 0 days 00:00:00.108506865 +701 58 0 days 00:00:00.516448606 +701 59 0 days 00:00:00.563776645 +701 60 0 days 00:00:00.213507604 +701 61 0 days 00:00:00.121762960 +701 62 0 days 00:00:00.105531615 +701 63 0 days 00:00:00.587128540 +701 64 0 days 00:00:00.590256028 +701 65 0 days 00:00:00.223343540 +701 66 0 days 00:00:00.565459395 +701 67 0 days 00:00:00.230809255 +701 68 0 days 00:00:00.213467404 +701 69 0 days 00:00:00.514216206 +701 70 0 days 00:00:00.566329795 +701 71 0 days 00:00:00.103157615 +701 72 0 days 00:00:00.100311086 +701 73 0 days 00:00:00.198722480 +701 74 0 days 00:00:00.219533325 +701 75 0 days 00:00:00.128227116 +701 76 0 days 00:00:00.514093986 +701 77 0 days 00:00:00.107361820 +701 78 0 days 00:00:00.559090910 +701 79 0 days 00:00:00.248205610 +701 80 0 days 00:00:00.209325065 +701 81 0 days 00:00:00.207354325 +701 82 0 days 00:00:00.114344370 +701 83 0 days 00:00:00.205009035 +701 84 0 days 00:00:00.560981250 +701 85 0 days 00:00:00.223181166 +701 86 0 days 00:00:00.101316880 +701 87 0 days 00:00:00.560464700 +701 88 0 days 00:00:00.212339715 +701 89 0 days 00:00:00.221027095 +701 90 0 days 00:00:00.563007505 +701 91 0 days 00:00:00.244913866 +701 92 0 days 00:00:00.228386095 +701 93 0 days 00:00:00.564447830 +701 94 0 days 00:00:00.226179460 +701 95 0 days 00:00:00.214253268 +701 96 0 days 00:00:00.228708807 +701 97 0 days 00:00:00.102401570 +701 98 0 days 00:00:00.243995240 +701 99 0 days 00:00:00.221655995 +701 100 0 days 00:00:00.236797903 +702 1 0 days 00:00:00.226828730 +702 2 0 days 00:00:00.122245648 +702 3 0 days 00:00:00.124997760 +702 4 0 days 00:00:00.206566966 +702 5 0 days 00:00:00.615159296 +702 6 0 days 00:00:00.615517436 +702 7 0 days 00:00:00.223016645 +702 8 0 days 00:00:00.209412030 +702 9 0 days 00:00:00.098367866 +702 10 0 days 00:00:00.136739737 +702 11 0 days 00:00:00.229293748 +702 12 0 days 00:00:00.517079713 +702 13 0 days 00:00:00.131898520 +702 14 0 days 00:00:00.101426375 +702 15 0 days 00:00:00.246507022 +702 16 0 days 00:00:00.105480904 +702 17 0 days 00:00:00.105053920 +702 18 0 days 00:00:00.563742020 +702 19 0 days 00:00:00.570617060 +702 20 0 days 00:00:00.564753465 +702 21 0 days 00:00:00.113972336 +702 22 0 days 00:00:00.222793270 +702 23 0 days 00:00:00.614386590 +702 24 0 days 00:00:00.235618986 +702 25 0 days 00:00:00.519781160 +702 26 0 days 00:00:00.589876832 +702 27 0 days 00:00:00.594535968 +702 28 0 days 00:00:00.224546910 +702 29 0 days 00:00:00.675124106 +702 30 0 days 00:00:00.120239100 +702 31 0 days 00:00:00.626244751 +702 32 0 days 00:00:00.204151280 +702 33 0 days 00:00:00.208988540 +702 34 0 days 00:00:00.222805936 +702 35 0 days 00:00:00.594017480 +702 36 0 days 00:00:00.115130553 +702 37 0 days 00:00:00.129874076 +702 38 0 days 00:00:00.592669660 +702 39 0 days 00:00:00.563326450 +702 40 0 days 00:00:00.559338395 +702 41 0 days 00:00:00.610389263 +702 42 0 days 00:00:00.125941660 +702 43 0 days 00:00:00.107036385 +702 44 0 days 00:00:00.202841165 +702 45 0 days 00:00:00.213881570 +702 46 0 days 00:00:00.214687920 +702 47 0 days 00:00:00.221623530 +702 48 0 days 00:00:00.101220800 +702 49 0 days 00:00:00.558411980 +702 50 0 days 00:00:00.116795595 +702 51 0 days 00:00:00.214564166 +702 52 0 days 00:00:00.610277756 +702 53 0 days 00:00:00.107063535 +702 54 0 days 00:00:00.221801988 +702 55 0 days 00:00:00.558268345 +702 56 0 days 00:00:00.209689056 +702 57 0 days 00:00:00.207807490 +702 58 0 days 00:00:00.225321015 +702 59 0 days 00:00:00.119964229 +702 60 0 days 00:00:00.590196064 +702 61 0 days 00:00:00.208320505 +702 62 0 days 00:00:00.203964415 +702 63 0 days 00:00:00.099500350 +702 64 0 days 00:00:00.204353380 +702 65 0 days 00:00:00.590285384 +702 66 0 days 00:00:00.114658486 +702 67 0 days 00:00:00.562480180 +702 68 0 days 00:00:00.120392715 +702 69 0 days 00:00:00.565044490 +702 70 0 days 00:00:00.107170065 +702 71 0 days 00:00:00.098526095 +702 72 0 days 00:00:00.559215155 +702 73 0 days 00:00:00.564736150 +702 74 0 days 00:00:00.214322136 +702 75 0 days 00:00:00.219721083 +702 76 0 days 00:00:00.128168648 +702 77 0 days 00:00:00.631335502 +702 78 0 days 00:00:00.591628380 +702 79 0 days 00:00:00.214661923 +702 80 0 days 00:00:00.561208590 +702 81 0 days 00:00:00.207518255 +702 82 0 days 00:00:00.564439710 +702 83 0 days 00:00:00.105615504 +702 84 0 days 00:00:00.114690406 +702 85 0 days 00:00:00.114576436 +702 86 0 days 00:00:00.589622364 +702 87 0 days 00:00:00.231059260 +702 88 0 days 00:00:00.230182920 +702 89 0 days 00:00:00.587964960 +702 90 0 days 00:00:00.104114470 +702 91 0 days 00:00:00.608174800 +702 92 0 days 00:00:00.609298163 +702 93 0 days 00:00:00.100394275 +702 94 0 days 00:00:00.114823240 +702 95 0 days 00:00:00.213165048 +702 96 0 days 00:00:00.218914365 +702 97 0 days 00:00:00.591949584 +702 98 0 days 00:00:00.243821897 +702 99 0 days 00:00:00.562311615 +702 100 0 days 00:00:00.204909006 +703 1 0 days 00:00:00.229053404 +703 2 0 days 00:00:01.056600396 +703 3 0 days 00:00:00.328573775 +703 4 0 days 00:00:01.092280650 +703 5 0 days 00:00:00.370264240 +703 6 0 days 00:00:00.219063830 +703 7 0 days 00:00:00.184073016 +703 8 0 days 00:00:00.358725680 +703 9 0 days 00:00:00.195530755 +703 10 0 days 00:00:00.174810475 +703 11 0 days 00:00:00.959437260 +703 12 0 days 00:00:00.171869525 +703 13 0 days 00:00:01.014575480 +703 14 0 days 00:00:01.052134026 +703 15 0 days 00:00:00.371457130 +703 16 0 days 00:00:00.182273710 +703 17 0 days 00:00:00.184488792 +703 18 0 days 00:00:00.329104755 +703 19 0 days 00:00:00.360970535 +703 20 0 days 00:00:00.338190635 +703 21 0 days 00:00:00.333749260 +703 22 0 days 00:00:00.198347184 +703 23 0 days 00:00:00.957391495 +703 24 0 days 00:00:01.076244665 +703 25 0 days 00:00:01.016344332 +703 26 0 days 00:00:00.962945510 +703 27 0 days 00:00:01.048940143 +703 28 0 days 00:00:00.354174980 +703 29 0 days 00:00:01.017577064 +703 30 0 days 00:00:00.364257651 +703 31 0 days 00:00:00.365104677 +703 32 0 days 00:00:00.218697017 +703 33 0 days 00:00:00.327824560 +703 34 0 days 00:00:00.340488300 +703 35 0 days 00:00:00.401004735 +703 36 0 days 00:00:00.338755135 +703 37 0 days 00:00:01.013721088 +703 38 0 days 00:00:01.048750630 +703 39 0 days 00:00:00.966470370 +703 40 0 days 00:00:00.183156990 +703 41 0 days 00:00:00.337108770 +703 42 0 days 00:00:00.349804744 +703 43 0 days 00:00:00.186299103 +703 44 0 days 00:00:00.346920388 +703 45 0 days 00:00:01.054325640 +703 46 0 days 00:00:01.011735260 +703 47 0 days 00:00:00.964203160 +703 48 0 days 00:00:00.354704300 +703 49 0 days 00:00:00.956600210 +703 50 0 days 00:00:00.337768975 +703 51 0 days 00:00:01.049105786 +703 52 0 days 00:00:00.962301920 +703 53 0 days 00:00:00.195668580 +703 54 0 days 00:00:00.194241595 +703 55 0 days 00:00:00.962715995 +703 56 0 days 00:00:00.364915423 +703 57 0 days 00:00:00.172613880 +703 58 0 days 00:00:00.960453660 +703 59 0 days 00:00:00.197193565 +703 60 0 days 00:00:00.210433106 +703 61 0 days 00:00:00.360802305 +703 62 0 days 00:00:00.329959620 +703 63 0 days 00:00:00.959287655 +703 64 0 days 00:00:00.958784770 +703 65 0 days 00:00:00.357236815 +703 66 0 days 00:00:00.171498930 +703 67 0 days 00:00:00.336590740 +703 68 0 days 00:00:00.965402125 +703 69 0 days 00:00:00.963027645 +703 70 0 days 00:00:00.344039864 +703 71 0 days 00:00:00.191674992 +703 72 0 days 00:00:00.199437430 +703 73 0 days 00:00:00.963433245 +703 74 0 days 00:00:00.193369505 +703 75 0 days 00:00:00.956384190 +703 76 0 days 00:00:00.389485964 +703 77 0 days 00:00:00.187782657 +703 78 0 days 00:00:01.014773108 +703 79 0 days 00:00:00.391067103 +703 80 0 days 00:00:00.183623896 +703 81 0 days 00:00:00.210207265 +703 82 0 days 00:00:00.184303746 +703 83 0 days 00:00:00.206951276 +703 84 0 days 00:00:00.196029112 +703 85 0 days 00:00:00.170446460 +703 86 0 days 00:00:00.371430203 +703 87 0 days 00:00:00.219817828 +703 88 0 days 00:00:01.053589650 +703 89 0 days 00:00:00.193909050 +703 90 0 days 00:00:00.211314460 +703 91 0 days 00:00:00.199733714 +703 92 0 days 00:00:00.177920540 +703 93 0 days 00:00:00.959269625 +703 94 0 days 00:00:00.364372813 +703 95 0 days 00:00:01.016600436 +703 96 0 days 00:00:00.234252963 +703 97 0 days 00:00:00.215113420 +703 98 0 days 00:00:00.229688105 +703 99 0 days 00:00:00.171152495 +703 100 0 days 00:00:00.211930015 +704 1 0 days 00:00:00.217115970 +704 2 0 days 00:00:00.553241835 +704 3 0 days 00:00:00.106917585 +704 4 0 days 00:00:00.119056115 +704 5 0 days 00:00:00.113012125 +704 6 0 days 00:00:00.127866875 +704 7 0 days 00:00:00.122939825 +704 8 0 days 00:00:00.123371950 +704 9 0 days 00:00:00.550070365 +704 10 0 days 00:00:00.548684770 +704 11 0 days 00:00:00.548691790 +704 12 0 days 00:00:00.552177845 +704 13 0 days 00:00:00.208626220 +704 14 0 days 00:00:00.213892905 +704 15 0 days 00:00:00.207339820 +704 16 0 days 00:00:00.113362135 +704 17 0 days 00:00:00.129452220 +704 18 0 days 00:00:00.105587595 +704 19 0 days 00:00:00.197169445 +704 20 0 days 00:00:00.103760790 +704 21 0 days 00:00:00.199952520 +704 22 0 days 00:00:00.124090250 +704 23 0 days 00:00:00.203514404 +704 24 0 days 00:00:00.545045425 +704 25 0 days 00:00:00.104887540 +704 26 0 days 00:00:00.550901955 +704 27 0 days 00:00:00.213524330 +704 28 0 days 00:00:00.552276295 +704 29 0 days 00:00:00.551620950 +704 30 0 days 00:00:00.212589220 +704 31 0 days 00:00:00.217537540 +704 32 0 days 00:00:00.545974405 +704 33 0 days 00:00:00.552850820 +704 34 0 days 00:00:00.199215475 +704 35 0 days 00:00:00.204175600 +704 36 0 days 00:00:00.107282780 +704 37 0 days 00:00:00.549770325 +704 38 0 days 00:00:00.550063075 +704 39 0 days 00:00:00.550997565 +704 40 0 days 00:00:00.212326700 +704 41 0 days 00:00:00.208460036 +704 42 0 days 00:00:00.212407790 +704 43 0 days 00:00:00.111950810 +704 44 0 days 00:00:00.210946075 +704 45 0 days 00:00:00.548149890 +704 46 0 days 00:00:00.549229120 +704 47 0 days 00:00:00.201818240 +704 48 0 days 00:00:00.549791270 +704 49 0 days 00:00:00.213888875 +704 50 0 days 00:00:00.108811395 +704 51 0 days 00:00:00.113293042 +704 52 0 days 00:00:00.197835580 +704 53 0 days 00:00:00.196536745 +704 54 0 days 00:00:00.129868590 +704 55 0 days 00:00:00.549880555 +704 56 0 days 00:00:00.196130325 +704 57 0 days 00:00:00.573201680 +704 58 0 days 00:00:00.121032485 +704 59 0 days 00:00:00.548104470 +704 60 0 days 00:00:00.100221505 +704 61 0 days 00:00:00.210991755 +704 62 0 days 00:00:00.211407885 +704 63 0 days 00:00:00.546873270 +704 64 0 days 00:00:00.196278990 +704 65 0 days 00:00:00.548073805 +704 66 0 days 00:00:00.200180685 +704 67 0 days 00:00:00.550605730 +704 68 0 days 00:00:00.103439010 +704 69 0 days 00:00:00.550833845 +704 70 0 days 00:00:00.550807485 +704 71 0 days 00:00:00.551093320 +704 72 0 days 00:00:00.218623380 +704 73 0 days 00:00:00.207059864 +704 74 0 days 00:00:00.102507208 +704 75 0 days 00:00:00.099832450 +704 76 0 days 00:00:00.612379682 +704 77 0 days 00:00:00.197949695 +704 78 0 days 00:00:00.549273235 +704 79 0 days 00:00:00.199479895 +704 80 0 days 00:00:00.550432715 +704 81 0 days 00:00:00.100715090 +704 82 0 days 00:00:00.113574225 +704 83 0 days 00:00:00.200523685 +704 84 0 days 00:00:00.115999285 +704 85 0 days 00:00:00.198090190 +704 86 0 days 00:00:00.205969772 +704 87 0 days 00:00:00.101694435 +704 88 0 days 00:00:00.550119905 +704 89 0 days 00:00:00.550914545 +704 90 0 days 00:00:00.547344210 +704 91 0 days 00:00:00.226750628 +704 92 0 days 00:00:00.198282360 +704 93 0 days 00:00:00.118700735 +704 94 0 days 00:00:00.201254525 +704 95 0 days 00:00:00.127163708 +704 96 0 days 00:00:00.198849570 +704 97 0 days 00:00:00.572446984 +704 98 0 days 00:00:00.547917120 +704 99 0 days 00:00:00.550841465 +704 100 0 days 00:00:00.096105615 +705 1 0 days 00:00:00.926524160 +705 2 0 days 00:00:05.282359740 +705 3 0 days 00:00:08.852050978 +705 4 0 days 00:00:00.512537480 +705 5 0 days 00:00:01.270987720 +705 6 0 days 00:00:13.116657386 +705 7 0 days 00:00:01.426507306 +705 8 0 days 00:00:04.648998625 +705 9 0 days 00:00:10.656255986 +705 10 0 days 00:00:05.694462280 +705 11 0 days 00:00:01.381982950 +705 12 0 days 00:00:22.284521203 +705 13 0 days 00:00:01.201013475 +705 14 0 days 00:00:04.706029426 +705 15 0 days 00:00:14.823443712 +705 16 0 days 00:00:00.912938604 +705 17 0 days 00:00:01.763407348 +705 18 0 days 00:00:09.224716570 +705 19 0 days 00:00:00.724153660 +705 20 0 days 00:00:12.456770680 +705 21 0 days 00:00:00.615518204 +705 22 0 days 00:00:01.712520260 +705 23 0 days 00:00:21.320453910 +705 24 0 days 00:00:04.371840915 +705 25 0 days 00:00:00.497658028 +705 26 0 days 00:00:19.019231685 +705 27 0 days 00:00:03.787278533 +705 28 0 days 00:00:01.020116645 +705 29 0 days 00:00:00.602377875 +705 30 0 days 00:00:03.006620625 +705 31 0 days 00:00:01.300783124 +705 32 0 days 00:00:00.736628670 +705 33 0 days 00:00:00.640281480 +705 34 0 days 00:00:06.985013300 +705 35 0 days 00:00:01.896179600 +705 36 0 days 00:00:01.574551080 +705 37 0 days 00:00:00.649524720 +705 38 0 days 00:00:02.357736334 +705 39 0 days 00:00:00.412382960 +705 40 0 days 00:00:00.936382310 +705 41 0 days 00:00:00.797699320 +705 42 0 days 00:00:01.804588930 +705 43 0 days 00:00:02.362548060 +705 44 0 days 00:00:01.714150970 +705 45 0 days 00:00:01.314063220 +705 46 0 days 00:00:04.877779290 +705 47 0 days 00:00:08.204052251 +705 48 0 days 00:00:01.848317270 +705 49 0 days 00:00:01.467351532 +705 50 0 days 00:00:01.257727560 +705 51 0 days 00:00:01.360100275 +705 52 0 days 00:00:04.019184946 +705 53 0 days 00:00:00.938593272 +705 54 0 days 00:00:00.957604786 +705 55 0 days 00:00:03.790503846 +705 56 0 days 00:00:10.925546310 +705 57 0 days 00:00:00.968542272 +705 58 0 days 00:00:02.866543392 +705 59 0 days 00:00:01.774569995 +705 60 0 days 00:00:14.938332290 +705 61 0 days 00:00:01.063095765 +705 62 0 days 00:00:00.618037060 +705 63 0 days 00:00:02.094403556 +705 64 0 days 00:00:00.974712740 +705 65 0 days 00:00:02.793166148 +705 66 0 days 00:00:00.604754237 +705 67 0 days 00:00:04.938814010 +705 68 0 days 00:00:04.872422652 +705 69 0 days 00:00:05.633679228 +705 70 0 days 00:00:13.309101285 +705 71 0 days 00:00:03.544600763 +705 72 0 days 00:00:01.169081077 +705 73 0 days 00:00:03.094851085 +705 74 0 days 00:00:09.184504864 +705 75 0 days 00:00:04.494312825 +705 76 0 days 00:00:03.093116191 +706 1 0 days 00:00:18.090989800 +706 2 0 days 00:00:00.922760060 +706 3 0 days 00:00:01.057708125 +706 4 0 days 00:00:03.784873960 +706 5 0 days 00:00:05.221866764 +706 6 0 days 00:00:01.262482233 +706 7 0 days 00:00:01.124558080 +706 8 0 days 00:00:01.593060084 +706 9 0 days 00:00:01.231756420 +706 10 0 days 00:00:01.268677228 +706 11 0 days 00:00:03.701083780 +706 12 0 days 00:00:15.843816671 +706 13 0 days 00:00:09.504292390 +706 14 0 days 00:00:03.551148622 +706 15 0 days 00:00:04.176340790 +706 16 0 days 00:00:02.968242493 +706 17 0 days 00:00:02.009166995 +706 18 0 days 00:00:09.277181472 +706 19 0 days 00:00:01.167842460 +706 20 0 days 00:00:01.014376900 +706 21 0 days 00:00:19.742198656 +706 22 0 days 00:00:00.894406995 +706 23 0 days 00:00:01.193893615 +706 24 0 days 00:00:02.394478280 +706 25 0 days 00:00:02.171849360 +706 26 0 days 00:00:19.006791396 +706 27 0 days 00:00:00.945489010 +706 28 0 days 00:00:01.532516360 +706 29 0 days 00:00:01.801711330 +706 30 0 days 00:00:07.514046030 +706 31 0 days 00:00:01.712708400 +706 32 0 days 00:00:08.863483631 +706 33 0 days 00:00:01.242821245 +706 34 0 days 00:00:02.283430888 +706 35 0 days 00:00:00.910410966 +706 36 0 days 00:00:08.324348976 +706 37 0 days 00:00:00.796829851 +706 38 0 days 00:00:01.749815583 +706 39 0 days 00:00:04.874354060 +706 40 0 days 00:00:14.406834635 +706 41 0 days 00:00:09.039538305 +706 42 0 days 00:00:00.934426976 +706 43 0 days 00:00:00.901734430 +706 44 0 days 00:00:09.871699640 +706 45 0 days 00:00:01.712920788 +706 46 0 days 00:00:01.093858144 +706 47 0 days 00:00:00.654722630 +706 48 0 days 00:00:00.693333250 +706 49 0 days 00:00:10.773822215 +706 50 0 days 00:00:01.180711275 +706 51 0 days 00:00:01.855734060 +706 52 0 days 00:00:01.358779173 +706 53 0 days 00:00:01.656790895 +706 54 0 days 00:00:00.366620780 +706 55 0 days 00:00:01.341173512 +706 56 0 days 00:00:01.780785145 +706 57 0 days 00:00:00.690103740 +706 58 0 days 00:00:00.414853095 +706 59 0 days 00:00:02.876925176 +706 60 0 days 00:00:01.885351190 +706 61 0 days 00:00:01.719564793 +706 62 0 days 00:00:01.148744800 +706 63 0 days 00:00:01.239885580 +706 64 0 days 00:00:02.700927446 +706 65 0 days 00:00:09.165462225 +706 66 0 days 00:00:07.050948236 +706 67 0 days 00:00:01.271678670 +706 68 0 days 00:00:02.683768790 +706 69 0 days 00:00:12.595349076 +706 70 0 days 00:00:02.797015065 +706 71 0 days 00:00:00.877484250 +706 72 0 days 00:00:01.144873464 +706 73 0 days 00:00:09.950689251 +706 74 0 days 00:00:03.318142470 +706 75 0 days 00:00:01.152220855 +706 76 0 days 00:00:03.071547055 +706 77 0 days 00:00:04.962848095 +706 78 0 days 00:00:01.542674620 +706 79 0 days 00:00:17.456842785 +706 80 0 days 00:00:00.387673520 +706 81 0 days 00:00:03.953927292 +706 82 0 days 00:00:04.628031592 +706 83 0 days 00:00:15.865295673 +707 1 0 days 00:00:00.586441445 +707 2 0 days 00:00:01.147525235 +707 3 0 days 00:00:00.241847976 +707 4 0 days 00:00:00.270614635 +707 5 0 days 00:00:00.553956293 +707 6 0 days 00:00:00.655241920 +707 7 0 days 00:00:00.382877583 +707 8 0 days 00:00:00.844385210 +707 9 0 days 00:00:06.261761924 +707 10 0 days 00:00:00.368196045 +707 11 0 days 00:00:02.611403750 +707 12 0 days 00:00:09.406706570 +707 13 0 days 00:00:07.686966148 +707 14 0 days 00:00:00.332593040 +707 15 0 days 00:00:00.722493996 +707 16 0 days 00:00:00.775450390 +707 17 0 days 00:00:01.837589752 +707 18 0 days 00:00:01.715498855 +707 19 0 days 00:00:02.935224065 +707 20 0 days 00:00:02.094751606 +707 21 0 days 00:00:02.133525793 +707 22 0 days 00:00:02.768224256 +707 23 0 days 00:00:01.312861290 +707 24 0 days 00:00:00.681504484 +707 25 0 days 00:00:01.383870710 +707 26 0 days 00:00:00.812651850 +707 27 0 days 00:00:04.603141264 +707 28 0 days 00:00:00.438207336 +707 29 0 days 00:00:00.812523480 +707 30 0 days 00:00:00.839029940 +707 31 0 days 00:00:00.717355904 +707 32 0 days 00:00:05.016171174 +707 33 0 days 00:00:10.351573568 +707 34 0 days 00:00:00.538881956 +707 35 0 days 00:00:00.814153510 +707 36 0 days 00:00:01.337064353 +707 37 0 days 00:00:00.763429500 +707 38 0 days 00:00:00.550896460 +707 39 0 days 00:00:01.056313010 +707 40 0 days 00:00:01.381266985 +707 41 0 days 00:00:10.555634470 +707 42 0 days 00:00:01.107198395 +707 43 0 days 00:00:08.468635290 +707 44 0 days 00:00:07.301986086 +707 45 0 days 00:00:02.271723625 +707 46 0 days 00:00:00.533858985 +707 47 0 days 00:00:00.959987184 +707 48 0 days 00:00:00.967693006 +707 49 0 days 00:00:00.614586468 +707 50 0 days 00:00:00.368350226 +707 51 0 days 00:00:06.936033468 +707 52 0 days 00:00:00.855132506 +707 53 0 days 00:00:00.655399270 +707 54 0 days 00:00:04.525985540 +707 55 0 days 00:00:00.699844920 +707 56 0 days 00:00:00.536351265 +707 57 0 days 00:00:08.269663748 +707 58 0 days 00:00:07.644090413 +707 59 0 days 00:00:00.881976985 +707 60 0 days 00:00:01.037747848 +707 61 0 days 00:00:01.167122700 +707 62 0 days 00:00:00.758187504 +707 63 0 days 00:00:01.095830930 +707 64 0 days 00:00:05.891248660 +707 65 0 days 00:00:06.260121945 +707 66 0 days 00:00:00.576187685 +707 67 0 days 00:00:00.695124510 +707 68 0 days 00:00:01.493010026 +707 69 0 days 00:00:00.811391463 +707 70 0 days 00:00:01.582985353 +707 71 0 days 00:00:00.668000420 +707 72 0 days 00:00:08.265199228 +707 73 0 days 00:00:01.023338413 +707 74 0 days 00:00:00.618513135 +707 75 0 days 00:00:01.469210110 +707 76 0 days 00:00:00.260079493 +707 77 0 days 00:00:00.431064230 +707 78 0 days 00:00:00.352787316 +707 79 0 days 00:00:00.618857288 +707 80 0 days 00:00:02.404442583 +707 81 0 days 00:00:07.227643870 +707 82 0 days 00:00:01.552740215 +707 83 0 days 00:00:00.790422150 +707 84 0 days 00:00:09.177574705 +707 85 0 days 00:00:01.468780880 +707 86 0 days 00:00:09.671584520 +707 87 0 days 00:00:01.113216090 +707 88 0 days 00:00:01.680209235 +707 89 0 days 00:00:01.004781050 +707 90 0 days 00:00:00.569589995 +707 91 0 days 00:00:01.035202360 +707 92 0 days 00:00:01.907116320 +707 93 0 days 00:00:02.316151170 +707 94 0 days 00:00:02.117382896 +707 95 0 days 00:00:00.422665680 +707 96 0 days 00:00:01.964325700 +707 97 0 days 00:00:08.120205690 +707 98 0 days 00:00:00.546408670 +707 99 0 days 00:00:00.442736170 +707 100 0 days 00:00:00.199445210 +708 1 0 days 00:00:01.363164825 +708 2 0 days 00:00:02.155943982 +708 3 0 days 00:00:05.033247570 +708 4 0 days 00:00:00.787717600 +708 5 0 days 00:00:09.049384225 +708 6 0 days 00:00:02.665284588 +708 7 0 days 00:00:03.554006520 +708 8 0 days 00:00:02.101177815 +708 9 0 days 00:00:00.695879420 +708 10 0 days 00:00:05.202989100 +708 11 0 days 00:00:01.495213285 +708 12 0 days 00:00:06.188717710 +708 13 0 days 00:00:00.358083125 +708 14 0 days 00:00:02.748452387 +708 15 0 days 00:00:01.241761060 +708 16 0 days 00:00:02.264006880 +708 17 0 days 00:00:08.290623382 +708 18 0 days 00:00:01.891100355 +708 19 0 days 00:00:00.239017190 +708 20 0 days 00:00:04.127582776 +708 21 0 days 00:00:00.379866400 +708 22 0 days 00:00:00.726681852 +708 23 0 days 00:00:00.911060440 +708 24 0 days 00:00:00.441400610 +708 25 0 days 00:00:01.774434844 +708 26 0 days 00:00:02.663071720 +708 27 0 days 00:00:01.198002260 +708 28 0 days 00:00:07.339677885 +708 29 0 days 00:00:00.748024835 +708 30 0 days 00:00:00.670034680 +708 31 0 days 00:00:00.645788850 +708 32 0 days 00:00:03.405731123 +708 33 0 days 00:00:00.413683930 +708 34 0 days 00:00:03.582284105 +708 35 0 days 00:00:00.636245560 +708 36 0 days 00:00:08.177753777 +708 37 0 days 00:00:00.645730051 +708 38 0 days 00:00:00.203203610 +708 39 0 days 00:00:00.808413960 +708 40 0 days 00:00:01.288560404 +708 41 0 days 00:00:09.986368075 +708 42 0 days 00:00:00.798304730 +708 43 0 days 00:00:00.677225656 +708 44 0 days 00:00:01.115678923 +708 45 0 days 00:00:00.854556950 +708 46 0 days 00:00:10.307044500 +708 47 0 days 00:00:00.479181355 +708 48 0 days 00:00:10.568964305 +708 49 0 days 00:00:02.183886820 +708 50 0 days 00:00:00.307370280 +708 51 0 days 00:00:03.154822945 +708 52 0 days 00:00:01.746735156 +708 53 0 days 00:00:02.711076144 +708 54 0 days 00:00:01.815388105 +708 55 0 days 00:00:00.236235968 +708 56 0 days 00:00:00.721679780 +708 57 0 days 00:00:08.626729125 +708 58 0 days 00:00:00.309917760 +708 59 0 days 00:00:00.834434363 +708 60 0 days 00:00:01.020735423 +708 61 0 days 00:00:05.684814605 +708 62 0 days 00:00:01.847608505 +708 63 0 days 00:00:01.825942570 +708 64 0 days 00:00:02.397223680 +708 65 0 days 00:00:00.976039005 +708 66 0 days 00:00:00.520846975 +708 67 0 days 00:00:01.894203300 +708 68 0 days 00:00:00.652547904 +708 69 0 days 00:00:00.546138440 +708 70 0 days 00:00:00.702215906 +708 71 0 days 00:00:03.034306496 +708 72 0 days 00:00:02.175023184 +708 73 0 days 00:00:04.518693595 +708 74 0 days 00:00:01.434887715 +708 75 0 days 00:00:00.520930020 +708 76 0 days 00:00:09.937531425 +708 77 0 days 00:00:06.517539990 +708 78 0 days 00:00:01.253579284 +708 79 0 days 00:00:00.822944910 +708 80 0 days 00:00:01.556209370 +708 81 0 days 00:00:01.814206255 +708 82 0 days 00:00:01.906455490 +708 83 0 days 00:00:01.703920576 +708 84 0 days 00:00:04.859951520 +708 85 0 days 00:00:00.870998904 +708 86 0 days 00:00:01.966621855 +708 87 0 days 00:00:02.079889780 +708 88 0 days 00:00:01.681675966 +708 89 0 days 00:00:03.302405830 +708 90 0 days 00:00:00.404750196 +708 91 0 days 00:00:09.763426590 +708 92 0 days 00:00:01.197535165 +708 93 0 days 00:00:09.096954435 +708 94 0 days 00:00:01.022115435 +708 95 0 days 00:00:00.342650000 +708 96 0 days 00:00:00.953270185 +708 97 0 days 00:00:07.771519095 +708 98 0 days 00:00:00.968047880 +708 99 0 days 00:00:06.291071036 +708 100 0 days 00:00:01.170489040 +709 1 0 days 00:00:01.605164285 +709 2 0 days 00:00:04.166846330 +709 3 0 days 00:00:14.372743960 +709 4 0 days 00:00:03.195043210 +709 5 0 days 00:00:00.827479780 +709 6 0 days 00:00:01.789783570 +709 7 0 days 00:00:02.788315288 +709 8 0 days 00:00:02.829648164 +709 9 0 days 00:00:03.323969350 +709 10 0 days 00:00:01.227159480 +709 11 0 days 00:00:02.226694505 +709 12 0 days 00:00:01.418121250 +709 13 0 days 00:00:04.619278708 +709 14 0 days 00:00:06.748561210 +709 15 0 days 00:00:03.176865324 +709 16 0 days 00:00:08.915736165 +709 17 0 days 00:00:00.729001670 +709 18 0 days 00:00:10.253875140 +709 19 0 days 00:00:01.180279750 +709 20 0 days 00:00:16.097582747 +709 21 0 days 00:00:02.897141525 +709 22 0 days 00:00:00.987066680 +709 23 0 days 00:00:02.319340272 +709 24 0 days 00:00:01.356250180 +709 25 0 days 00:00:02.299551636 +709 26 0 days 00:00:00.578611020 +709 27 0 days 00:00:20.745853135 +709 28 0 days 00:00:01.542924466 +709 29 0 days 00:00:03.047208155 +709 30 0 days 00:00:01.358992275 +709 31 0 days 00:00:00.679710248 +709 32 0 days 00:00:00.929130350 +709 33 0 days 00:00:01.368031220 +709 34 0 days 00:00:09.933761165 +709 35 0 days 00:00:04.518325990 +709 36 0 days 00:00:01.508472895 +709 37 0 days 00:00:03.504692363 +709 38 0 days 00:00:02.898659223 +709 39 0 days 00:00:16.556039020 +709 40 0 days 00:00:00.451876865 +709 41 0 days 00:00:01.647850420 +709 42 0 days 00:00:00.850802925 +709 43 0 days 00:00:04.632348328 +709 44 0 days 00:00:01.994646355 +709 45 0 days 00:00:04.720847315 +709 46 0 days 00:00:00.986160455 +709 47 0 days 00:00:00.839214930 +709 48 0 days 00:00:13.940109750 +709 49 0 days 00:00:00.899516750 +709 50 0 days 00:00:03.647623175 +709 51 0 days 00:00:01.008827005 +709 52 0 days 00:00:01.519806320 +709 53 0 days 00:00:03.056786065 +709 54 0 days 00:00:05.302197505 +709 55 0 days 00:00:00.643646515 +709 56 0 days 00:00:01.901547385 +709 57 0 days 00:00:01.707434240 +709 58 0 days 00:00:04.592081365 +709 59 0 days 00:00:00.727173890 +709 60 0 days 00:00:00.675664076 +709 61 0 days 00:00:03.867599724 +709 62 0 days 00:00:16.337275757 +709 63 0 days 00:00:06.592634405 +709 64 0 days 00:00:17.695760925 +709 65 0 days 00:00:19.407729295 +709 66 0 days 00:00:03.056339790 +709 67 0 days 00:00:11.716838305 +709 68 0 days 00:00:00.823861295 +709 69 0 days 00:00:02.224806915 +709 70 0 days 00:00:11.899380695 +709 71 0 days 00:00:06.314478785 +709 72 0 days 00:00:01.113589365 +709 73 0 days 00:00:01.122798075 +709 74 0 days 00:00:00.820097736 +709 75 0 days 00:00:00.689694350 +709 76 0 days 00:00:08.606565415 +709 77 0 days 00:00:01.478414760 +709 78 0 days 00:00:00.782100252 +709 79 0 days 00:00:02.530164562 +710 1 0 days 00:00:08.958382350 +710 2 0 days 00:00:00.909793565 +710 3 0 days 00:00:00.611927340 +710 4 0 days 00:00:00.829102748 +710 5 0 days 00:00:02.190962825 +710 6 0 days 00:00:07.647422900 +710 7 0 days 00:00:03.498059710 +710 8 0 days 00:00:07.648143805 +710 9 0 days 00:00:09.283948460 +710 10 0 days 00:00:00.861235565 +710 11 0 days 00:00:00.676826386 +710 12 0 days 00:00:04.412256070 +710 13 0 days 00:00:01.020334905 +710 14 0 days 00:00:00.711958651 +710 15 0 days 00:00:10.375944985 +710 16 0 days 00:00:00.540390780 +710 17 0 days 00:00:10.671712375 +710 18 0 days 00:00:00.790715290 +710 19 0 days 00:00:00.467412485 +710 20 0 days 00:00:00.571169960 +710 21 0 days 00:00:01.486833765 +710 22 0 days 00:00:11.017177113 +710 23 0 days 00:00:00.698931205 +710 24 0 days 00:00:05.473459015 +710 25 0 days 00:00:00.646555410 +710 26 0 days 00:00:04.662130660 +710 27 0 days 00:00:00.645261671 +710 28 0 days 00:00:00.470927730 +710 29 0 days 00:00:01.612572025 +710 30 0 days 00:00:00.626707780 +710 31 0 days 00:00:04.941666490 +710 32 0 days 00:00:00.343912730 +710 33 0 days 00:00:01.090709035 +710 34 0 days 00:00:02.406909665 +710 35 0 days 00:00:00.205944760 +710 36 0 days 00:00:00.601949980 +710 37 0 days 00:00:01.403349900 +710 38 0 days 00:00:02.049632255 +710 39 0 days 00:00:10.898860663 +710 40 0 days 00:00:09.962505211 +710 41 0 days 00:00:07.381124785 +710 42 0 days 00:00:04.288760780 +710 43 0 days 00:00:07.461779476 +710 44 0 days 00:00:00.229247596 +710 45 0 days 00:00:08.226386546 +710 46 0 days 00:00:01.009581980 +710 47 0 days 00:00:00.360777025 +710 48 0 days 00:00:00.256779020 +710 49 0 days 00:00:09.527772245 +710 50 0 days 00:00:01.758198395 +710 51 0 days 00:00:01.343657595 +710 52 0 days 00:00:01.453388090 +710 53 0 days 00:00:01.091534976 +710 54 0 days 00:00:00.328878888 +710 55 0 days 00:00:02.544402036 +710 56 0 days 00:00:05.080245625 +710 57 0 days 00:00:00.701510755 +710 58 0 days 00:00:00.214136940 +710 59 0 days 00:00:00.508951460 +710 60 0 days 00:00:00.756487984 +710 61 0 days 00:00:06.627293940 +710 62 0 days 00:00:02.124540633 +710 63 0 days 00:00:01.664461990 +710 64 0 days 00:00:05.548697024 +710 65 0 days 00:00:01.623640780 +710 66 0 days 00:00:00.687970495 +710 67 0 days 00:00:07.677450610 +710 68 0 days 00:00:00.897414852 +710 69 0 days 00:00:00.453350748 +710 70 0 days 00:00:00.645368860 +710 71 0 days 00:00:00.707283916 +710 72 0 days 00:00:00.739387805 +710 73 0 days 00:00:00.673936120 +710 74 0 days 00:00:00.457446110 +710 75 0 days 00:00:00.655851740 +710 76 0 days 00:00:03.035353390 +710 77 0 days 00:00:06.905788400 +710 78 0 days 00:00:06.528310060 +710 79 0 days 00:00:00.693132412 +710 80 0 days 00:00:00.323692560 +710 81 0 days 00:00:00.554222335 +710 82 0 days 00:00:00.843283545 +710 83 0 days 00:00:02.205455764 +710 84 0 days 00:00:07.320423057 +710 85 0 days 00:00:01.050810525 +710 86 0 days 00:00:01.506245803 +710 87 0 days 00:00:01.737236082 +710 88 0 days 00:00:03.505763904 +710 89 0 days 00:00:02.301193396 +710 90 0 days 00:00:00.342925115 +710 91 0 days 00:00:00.699375480 +710 92 0 days 00:00:09.892111185 +710 93 0 days 00:00:02.395226445 +710 94 0 days 00:00:10.618927836 +710 95 0 days 00:00:01.063309750 +710 96 0 days 00:00:00.809919025 +710 97 0 days 00:00:00.748380735 +710 98 0 days 00:00:00.690084174 +710 99 0 days 00:00:03.513493985 +710 100 0 days 00:00:02.035221500 +711 1 0 days 00:00:01.797483860 +711 2 0 days 00:00:06.594682360 +711 3 0 days 00:00:05.281059957 +711 4 0 days 00:00:01.108523142 +711 5 0 days 00:00:10.273855063 +711 6 0 days 00:00:19.842874255 +711 7 0 days 00:00:01.554587008 +711 8 0 days 00:00:01.056210360 +711 9 0 days 00:00:01.844787645 +711 10 0 days 00:00:12.369217330 +711 11 0 days 00:00:01.197950957 +711 12 0 days 00:00:19.216804930 +711 13 0 days 00:00:01.387862660 +711 14 0 days 00:00:03.463770655 +711 15 0 days 00:00:03.147077095 +711 16 0 days 00:00:06.164235170 +711 17 0 days 00:00:01.439074210 +711 18 0 days 00:00:02.341683543 +711 19 0 days 00:00:18.506325830 +711 20 0 days 00:00:01.201437728 +711 21 0 days 00:00:02.562231785 +711 22 0 days 00:00:01.799722820 +711 23 0 days 00:00:01.312879615 +711 24 0 days 00:00:01.089241593 +711 25 0 days 00:00:03.801161106 +711 26 0 days 00:00:01.425545705 +711 27 0 days 00:00:12.452072095 +711 28 0 days 00:00:04.641855356 +711 29 0 days 00:00:01.013581600 +711 30 0 days 00:00:00.630196056 +711 31 0 days 00:00:01.463782920 +711 32 0 days 00:00:08.609959215 +711 33 0 days 00:00:01.546044233 +711 34 0 days 00:00:15.650582457 +711 35 0 days 00:00:01.325259780 +711 36 0 days 00:00:00.627053652 +711 37 0 days 00:00:02.721202245 +711 38 0 days 00:00:04.004478340 +711 39 0 days 00:00:03.189861316 +711 40 0 days 00:00:02.119095330 +711 41 0 days 00:00:00.771917266 +711 42 0 days 00:00:01.848277340 +711 43 0 days 00:00:00.922025866 +711 44 0 days 00:00:00.652738740 +711 45 0 days 00:00:00.654433488 +711 46 0 days 00:00:03.601376012 +711 47 0 days 00:00:18.086444625 +711 48 0 days 00:00:03.867361340 +711 49 0 days 00:00:03.801297055 +711 50 0 days 00:00:19.355483428 +711 51 0 days 00:00:01.112640170 +711 52 0 days 00:00:00.672168520 +711 53 0 days 00:00:01.235315060 +711 54 0 days 00:00:02.207849244 +711 55 0 days 00:00:01.519706427 +711 56 0 days 00:00:00.695194715 +711 57 0 days 00:00:01.351813540 +711 58 0 days 00:00:18.144233520 +711 59 0 days 00:00:01.397173828 +711 60 0 days 00:00:01.295634862 +711 61 0 days 00:00:03.755919665 +711 62 0 days 00:00:01.656862332 +711 63 0 days 00:00:01.957698096 +711 64 0 days 00:00:01.015322280 +711 65 0 days 00:00:01.681218662 +711 66 0 days 00:00:03.913245483 +711 67 0 days 00:00:00.770289073 +711 68 0 days 00:00:18.054665235 +711 69 0 days 00:00:13.757685585 +711 70 0 days 00:00:04.836672134 +711 71 0 days 00:00:04.134937227 +711 72 0 days 00:00:01.482308315 +711 73 0 days 00:00:11.024649925 +711 74 0 days 00:00:03.361337185 +711 75 0 days 00:00:02.181975440 +711 76 0 days 00:00:00.799486013 +711 77 0 days 00:00:20.771657380 +712 1 0 days 00:00:00.545056845 +712 2 0 days 00:00:01.286793736 +712 3 0 days 00:00:02.843660766 +712 4 0 days 00:00:00.342253770 +712 5 0 days 00:00:00.680208176 +712 6 0 days 00:00:00.572648464 +712 7 0 days 00:00:06.080688857 +712 8 0 days 00:00:02.788277165 +712 9 0 days 00:00:00.207471020 +712 10 0 days 00:00:00.850977672 +712 11 0 days 00:00:02.387372165 +712 12 0 days 00:00:00.786856377 +712 13 0 days 00:00:00.898155260 +712 14 0 days 00:00:04.427035253 +712 15 0 days 00:00:02.461766530 +712 16 0 days 00:00:06.997280860 +712 17 0 days 00:00:05.874957860 +712 18 0 days 00:00:00.812578255 +712 19 0 days 00:00:00.208737128 +712 20 0 days 00:00:03.181819928 +712 21 0 days 00:00:00.795287120 +712 22 0 days 00:00:02.273351660 +712 23 0 days 00:00:02.337976937 +712 24 0 days 00:00:02.146933832 +712 25 0 days 00:00:07.595093796 +712 26 0 days 00:00:00.981072090 +712 27 0 days 00:00:09.198416873 +712 28 0 days 00:00:01.316466105 +712 29 0 days 00:00:00.331119960 +712 30 0 days 00:00:01.413401250 +712 31 0 days 00:00:00.649729673 +712 32 0 days 00:00:00.296016030 +712 33 0 days 00:00:06.886280340 +712 34 0 days 00:00:01.295547531 +712 35 0 days 00:00:00.536737905 +712 36 0 days 00:00:00.344865392 +712 37 0 days 00:00:02.308109362 +712 38 0 days 00:00:00.617856605 +712 39 0 days 00:00:02.628874885 +712 40 0 days 00:00:00.804022942 +712 41 0 days 00:00:05.188560530 +712 42 0 days 00:00:00.734042640 +712 43 0 days 00:00:00.552979320 +712 44 0 days 00:00:00.163067895 +712 45 0 days 00:00:08.729141635 +712 46 0 days 00:00:02.061642690 +712 47 0 days 00:00:00.625832695 +712 48 0 days 00:00:01.555905350 +712 49 0 days 00:00:00.313482590 +712 50 0 days 00:00:02.625546363 +712 51 0 days 00:00:03.553362820 +712 52 0 days 00:00:00.978837566 +712 53 0 days 00:00:00.920332082 +712 54 0 days 00:00:06.387266195 +712 55 0 days 00:00:00.268874053 +712 56 0 days 00:00:00.740851852 +712 57 0 days 00:00:07.143334028 +712 58 0 days 00:00:01.161666792 +712 59 0 days 00:00:00.597679132 +712 60 0 days 00:00:02.361363840 +712 61 0 days 00:00:05.529425093 +712 62 0 days 00:00:00.885646113 +712 63 0 days 00:00:03.478227973 +712 64 0 days 00:00:00.267644166 +712 65 0 days 00:00:06.408566590 +712 66 0 days 00:00:00.624457580 +712 67 0 days 00:00:00.748891253 +712 68 0 days 00:00:08.937337384 +712 69 0 days 00:00:00.481332808 +712 70 0 days 00:00:01.205649170 +712 71 0 days 00:00:00.179017955 +712 72 0 days 00:00:02.766128785 +712 73 0 days 00:00:01.521833345 +712 74 0 days 00:00:00.423641948 +712 75 0 days 00:00:01.274196855 +712 76 0 days 00:00:00.707684000 +712 77 0 days 00:00:03.608611945 +712 78 0 days 00:00:08.444770415 +712 79 0 days 00:00:00.561699255 +712 80 0 days 00:00:04.106747883 +712 81 0 days 00:00:07.064236805 +712 82 0 days 00:00:01.958367260 +712 83 0 days 00:00:00.260055417 +712 84 0 days 00:00:00.900325406 +712 85 0 days 00:00:02.411107255 +712 86 0 days 00:00:00.522989761 +712 87 0 days 00:00:04.316205170 +712 88 0 days 00:00:03.642476455 +712 89 0 days 00:00:00.657067008 +712 90 0 days 00:00:05.052428516 +712 91 0 days 00:00:00.693363390 +712 92 0 days 00:00:01.285318700 +712 93 0 days 00:00:00.861788115 +712 94 0 days 00:00:00.533221165 +712 95 0 days 00:00:06.603260600 +712 96 0 days 00:00:04.327492435 +712 97 0 days 00:00:00.834822197 +712 98 0 days 00:00:08.473313245 +712 99 0 days 00:00:07.781137493 +712 100 0 days 00:00:09.040110910 +713 1 0 days 00:07:18.992718275 +714 1 0 days 00:14:28.794668450 +715 1 0 days 00:00:05.208758096 +715 2 0 days 00:00:07.726074552 +715 3 0 days 00:00:46.053037260 +715 4 0 days 00:00:08.857452882 +715 5 0 days 00:00:44.341987285 +715 6 0 days 00:00:23.506065195 +715 7 0 days 00:00:15.459928820 +715 8 0 days 00:00:05.982556626 +715 9 0 days 00:00:42.267208380 +715 10 0 days 00:00:15.070382606 +715 11 0 days 00:00:18.509914548 +715 12 0 days 00:00:09.338998746 +715 13 0 days 00:01:05.326186086 +715 14 0 days 00:00:04.950545435 +715 15 0 days 00:00:18.956665180 +715 16 0 days 00:00:23.562680583 +715 17 0 days 00:00:26.655382140 +716 1 0 days 00:00:03.201878212 +716 2 0 days 00:00:03.192357415 +716 3 0 days 00:00:26.206052465 +716 4 0 days 00:00:05.998398573 +716 5 0 days 00:00:07.287276185 +716 6 0 days 00:00:06.587612613 +716 7 0 days 00:00:07.659562513 +716 8 0 days 00:00:07.127148320 +716 9 0 days 00:00:04.145229960 +716 10 0 days 00:00:05.951747485 +716 11 0 days 00:00:10.598123686 +716 12 0 days 00:00:11.882276832 +716 13 0 days 00:00:03.057935191 +716 14 0 days 00:00:04.973684540 +716 15 0 days 00:00:07.686395328 +716 16 0 days 00:00:05.589937246 +716 17 0 days 00:00:18.862355616 +716 18 0 days 00:00:12.240427946 +716 19 0 days 00:00:29.558582900 +716 20 0 days 00:00:06.768140500 +716 21 0 days 00:00:17.826460775 +716 22 0 days 00:00:03.506923533 +716 23 0 days 00:00:33.413448353 +716 24 0 days 00:00:25.245603164 +716 25 0 days 00:00:05.575088886 +716 26 0 days 00:00:02.669121360 +716 27 0 days 00:00:05.839309120 +716 28 0 days 00:00:04.806024653 +716 29 0 days 00:00:10.654984333 +716 30 0 days 00:00:11.243446946 +716 31 0 days 00:00:09.258036625 +716 32 0 days 00:00:08.067377252 +716 33 0 days 00:00:24.107188853 +716 34 0 days 00:00:03.014766620 +716 35 0 days 00:00:03.861635948 +716 36 0 days 00:00:35.972314948 +716 37 0 days 00:00:07.370426224 +716 38 0 days 00:00:14.462192506 +717 1 0 days 00:01:10.102351875 +717 2 0 days 00:00:07.342700740 +717 3 0 days 00:00:17.889953641 +717 4 0 days 00:00:16.090339100 +717 5 0 days 00:00:08.781354286 +717 6 0 days 00:00:16.317561985 +717 7 0 days 00:00:16.900531350 +717 8 0 days 00:00:13.235790166 +717 9 0 days 00:00:46.090840133 +717 10 0 days 00:00:21.973060593 +717 11 0 days 00:00:06.804865753 +717 12 0 days 00:00:12.766604653 +717 13 0 days 00:00:09.987716195 +717 14 0 days 00:00:07.478390992 +717 15 0 days 00:00:12.044777908 +717 16 0 days 00:00:21.135536485 +717 17 0 days 00:00:09.952571760 +717 18 0 days 00:00:08.064681396 +717 19 0 days 00:00:19.720632552 +717 20 0 days 00:00:10.382695710 +717 21 0 days 00:00:08.440035073 +717 22 0 days 00:01:03.965694640 +718 1 0 days 00:00:41.421379022 +718 2 0 days 00:00:08.510339700 +718 3 0 days 00:00:06.260570335 +718 4 0 days 00:00:41.230971634 +718 5 0 days 00:00:16.565526806 +718 6 0 days 00:00:07.803477013 +718 7 0 days 00:00:12.955591753 +718 8 0 days 00:00:14.191455896 +718 9 0 days 00:00:19.481126266 +718 10 0 days 00:00:05.831641970 +718 11 0 days 00:00:15.462014328 +718 12 0 days 00:00:37.921545610 +718 13 0 days 00:00:04.537440066 +718 14 0 days 00:00:03.909116186 +718 15 0 days 00:00:07.623272485 +718 16 0 days 00:00:03.691899265 +718 17 0 days 00:00:03.156648590 +718 18 0 days 00:00:19.295335000 +718 19 0 days 00:00:09.447841085 +718 20 0 days 00:00:16.026062740 +718 21 0 days 00:00:05.949153760 +719 1 0 days 00:01:05.113424433 +719 2 0 days 00:04:51.400212983 +719 3 0 days 00:00:55.094467271 +719 4 0 days 00:01:41.210690866 +719 5 0 days 00:06:19.083341522 +719 6 0 days 00:01:34.503428337 +719 7 0 days 00:01:26.158637600 +719 8 0 days 00:02:09.566385033 +719 9 0 days 00:01:01.380618833 +720 1 0 days 00:03:02.273166500 +720 2 0 days 00:02:54.248238500 +720 3 0 days 00:05:19.142843300 +720 4 0 days 00:00:51.440380716 +720 5 0 days 00:06:20.981036354 +721 1 0 days 00:01:28.503121020 +721 2 0 days 00:00:48.479043916 +721 3 0 days 00:01:33.834823705 +721 4 0 days 00:11:51.377187100 +721 5 0 days 00:01:14.773941383 +722 1 0 days 00:07:05.207269316 +722 2 0 days 00:02:30.318234385 +722 3 0 days 00:00:41.889133936 +722 4 0 days 00:09:47.546359544 +723 1 0 days 00:10:54.698698585 +723 2 0 days 00:06:51.421395062 +723 3 0 days 00:14:28.264332357 +724 1 0 days 00:00:52.164276812 +724 2 0 days 00:02:41.120535166 +724 3 0 days 00:00:23.949668948 +724 4 0 days 00:00:34.596921771 +724 5 0 days 00:01:31.760199040 +724 6 0 days 00:01:11.238209936 +724 7 0 days 00:13:12.769733028 +725 1 0 days 00:04:05.041668762 +725 2 0 days 00:06:08.290965841 +725 3 0 days 00:01:37.752103685 +725 4 0 days 00:01:40.377756650 +725 5 0 days 00:01:56.830581866 +725 6 0 days 00:17:22.261344700 +726 1 0 days 00:01:27.574636033 +726 2 0 days 00:12:14.322845875 +726 3 0 days 00:01:00.804543166 +726 4 0 days 00:10:56.238571883 +727 1 0 days 00:00:16.252378445 +727 2 0 days 00:00:38.675531533 +727 3 0 days 00:00:24.620115470 +727 4 0 days 00:00:43.294826800 +727 5 0 days 00:00:13.018454993 +727 6 0 days 00:00:13.112752260 +727 7 0 days 00:00:15.196356393 +727 8 0 days 00:00:22.952684626 +727 9 0 days 00:00:22.586518270 +727 10 0 days 00:00:38.201851486 +727 11 0 days 00:00:39.059978140 +727 12 0 days 00:00:21.597059536 +727 13 0 days 00:00:14.092639995 +727 14 0 days 00:00:14.451683900 +727 15 0 days 00:00:26.357730420 +727 16 0 days 00:00:12.528164753 +727 17 0 days 00:00:15.097362170 +727 18 0 days 00:00:24.408050988 +727 19 0 days 00:00:42.942998575 +728 1 0 days 00:00:39.334186040 +728 2 0 days 00:00:39.904088973 +728 3 0 days 00:00:21.742357093 +728 4 0 days 00:00:23.078470153 +728 5 0 days 00:00:34.468551293 +728 6 0 days 00:00:21.536902340 +728 7 0 days 00:00:39.552515286 +728 8 0 days 00:00:21.066601153 +728 9 0 days 00:00:39.582159826 +728 10 0 days 00:00:40.121997653 +728 11 0 days 00:00:15.143271360 +728 12 0 days 00:00:39.362156186 +728 13 0 days 00:00:25.518120468 +728 14 0 days 00:00:24.126093986 +728 15 0 days 00:00:14.716064528 +728 16 0 days 00:00:39.735173140 +728 17 0 days 00:00:38.350909060 +728 18 0 days 00:00:21.546052440 +728 19 0 days 00:00:39.936026880 +729 1 0 days 00:00:12.658106896 +729 2 0 days 00:00:19.212287066 +729 3 0 days 00:00:18.777614435 +729 4 0 days 00:00:17.049108113 +729 5 0 days 00:00:09.340645650 +729 6 0 days 00:00:18.990670720 +729 7 0 days 00:00:21.577847505 +729 8 0 days 00:00:21.630314450 +729 9 0 days 00:00:19.263951460 +729 10 0 days 00:00:06.239114220 +729 11 0 days 00:00:10.667459693 +729 12 0 days 00:00:11.016132706 +729 13 0 days 00:00:09.529531560 +729 14 0 days 00:00:07.110264525 +729 15 0 days 00:00:10.665174853 +729 16 0 days 00:00:16.782814900 +729 17 0 days 00:00:12.349934996 +729 18 0 days 00:00:12.341351076 +729 19 0 days 00:00:07.003689920 +729 20 0 days 00:00:10.778272386 +729 21 0 days 00:00:10.830337753 +729 22 0 days 00:00:08.430658910 +729 23 0 days 00:00:19.601559793 +729 24 0 days 00:00:11.833219690 +729 25 0 days 00:00:07.264062390 +729 26 0 days 00:00:21.662037750 +729 27 0 days 00:00:19.424976840 +729 28 0 days 00:00:19.550308713 +729 29 0 days 00:00:21.582753375 +729 30 0 days 00:00:11.097201940 +729 31 0 days 00:00:08.203089893 +729 32 0 days 00:00:18.713526180 +729 33 0 days 00:00:19.023582213 +729 34 0 days 00:00:07.424733860 +729 35 0 days 00:00:18.874294666 +730 1 0 days 00:00:22.356251290 +730 2 0 days 00:00:23.051652664 +730 3 0 days 00:00:12.808463916 +730 4 0 days 00:00:19.918204380 +730 5 0 days 00:00:12.658439225 +730 6 0 days 00:00:21.715887235 +730 7 0 days 00:00:13.881887873 +730 8 0 days 00:00:08.479407205 +730 9 0 days 00:00:12.204229140 +730 10 0 days 00:00:07.248303345 +730 11 0 days 00:00:10.808216953 +730 12 0 days 00:00:19.959834913 +730 13 0 days 00:00:12.356503215 +730 14 0 days 00:00:10.932963820 +730 15 0 days 00:00:06.498265626 +730 16 0 days 00:00:22.069654800 +730 17 0 days 00:00:06.504747000 +730 18 0 days 00:00:12.153232170 +730 19 0 days 00:00:08.183471688 +730 20 0 days 00:00:09.928314346 +730 21 0 days 00:00:11.502350826 +730 22 0 days 00:00:21.632556320 +730 23 0 days 00:00:07.146747570 +730 24 0 days 00:00:20.069431013 +730 25 0 days 00:00:11.008305540 +730 26 0 days 00:00:07.146631675 +730 27 0 days 00:00:06.334191773 +730 28 0 days 00:00:21.792790895 +730 29 0 days 00:00:22.532442055 +730 30 0 days 00:00:23.136191556 +730 31 0 days 00:00:12.564915695 +730 32 0 days 00:00:23.802571532 +731 1 0 days 00:00:21.802688346 +731 2 0 days 00:00:21.930007520 +731 3 0 days 00:00:21.422535686 +731 4 0 days 00:00:40.995344400 +731 5 0 days 00:00:12.548919706 +731 6 0 days 00:00:21.798999466 +731 7 0 days 00:00:39.685750473 +731 8 0 days 00:00:39.739714833 +731 9 0 days 00:00:13.414201240 +731 10 0 days 00:00:21.699975733 +731 11 0 days 00:00:49.181690140 +731 12 0 days 00:00:21.531086380 +731 13 0 days 00:00:22.254678160 +731 14 0 days 00:00:21.786273413 +731 15 0 days 00:00:21.778412233 +731 16 0 days 00:00:39.811647720 +731 17 0 days 00:00:12.461649086 +731 18 0 days 00:00:21.657846166 +731 19 0 days 00:00:39.600864993 +731 20 0 days 00:00:39.232385746 +732 1 0 days 00:00:06.311192240 +732 2 0 days 00:00:20.231153373 +732 3 0 days 00:00:17.966706906 +732 4 0 days 00:00:12.440961066 +732 5 0 days 00:00:11.145365260 +732 6 0 days 00:00:20.273805226 +732 7 0 days 00:00:06.554878500 +732 8 0 days 00:00:19.693184853 +732 9 0 days 00:00:20.087983433 +732 10 0 days 00:00:19.921609273 +732 11 0 days 00:00:11.568777466 +732 12 0 days 00:00:07.147868006 +732 13 0 days 00:00:07.688744473 +732 14 0 days 00:00:06.658830946 +732 15 0 days 00:00:09.812200406 +732 16 0 days 00:00:05.786095880 +732 17 0 days 00:00:06.667409160 +732 18 0 days 00:00:10.848615840 +732 19 0 days 00:00:06.848931406 +732 20 0 days 00:00:11.072788306 +732 21 0 days 00:00:13.643852333 +732 22 0 days 00:00:19.846532506 +732 23 0 days 00:00:19.547934993 +732 24 0 days 00:00:11.397780526 +732 25 0 days 00:00:07.355443340 +732 26 0 days 00:00:06.584277553 +732 27 0 days 00:00:06.444417313 +732 28 0 days 00:00:07.262017350 +732 29 0 days 00:00:11.133008513 +732 30 0 days 00:00:08.066897086 +732 31 0 days 00:00:20.173814133 +732 32 0 days 00:00:20.133600026 +732 33 0 days 00:00:11.043684226 +732 34 0 days 00:00:06.416007080 +732 35 0 days 00:00:20.356845740 +732 36 0 days 00:00:06.717904346 +732 37 0 days 00:00:11.492591186 +732 38 0 days 00:00:20.449769386 +732 39 0 days 00:00:06.474371740 +732 40 0 days 00:00:19.535104686 +732 41 0 days 00:00:08.742592760 +732 42 0 days 00:00:06.925752840 +732 43 0 days 00:00:20.280764693 +732 44 0 days 00:00:20.496278006 +732 45 0 days 00:00:11.180542273 +732 46 0 days 00:00:20.315055213 +733 1 0 days 00:01:34.347685480 +733 2 0 days 00:00:46.942817200 +733 3 0 days 00:01:15.865095520 +733 4 0 days 00:01:24.761249006 +733 5 0 days 00:00:46.884068666 +733 6 0 days 00:00:47.845074213 +733 7 0 days 00:01:24.967224926 +733 8 0 days 00:00:47.603515033 +733 9 0 days 00:02:15.039409586 +734 1 0 days 00:00:49.439285906 +734 2 0 days 00:02:41.635560620 +734 3 0 days 00:02:58.879859790 +734 4 0 days 00:02:58.866315750 +735 1 0 days 00:01:39.653081966 +735 2 0 days 00:00:31.077827406 +735 3 0 days 00:01:39.835757793 +735 4 0 days 00:01:25.525452880 +735 5 0 days 00:00:48.809505740 +735 6 0 days 00:00:29.405646853 +735 7 0 days 00:00:29.994970073 +735 8 0 days 00:01:05.135320600 +735 9 0 days 00:00:31.032802560 +736 1 0 days 00:01:24.655530560 +736 2 0 days 00:01:35.938283645 +736 3 0 days 00:01:30.773087000 +736 4 0 days 00:00:29.568468480 +736 5 0 days 00:01:25.695241346 +736 6 0 days 00:00:30.078526006 +736 7 0 days 00:00:47.948675800 +736 8 0 days 00:00:49.028417446 +736 9 0 days 00:00:54.204259806 +737 1 0 days 00:00:55.985790383 +737 2 0 days 00:00:14.842631905 +737 3 0 days 00:00:48.278265116 +737 4 0 days 00:00:33.836344793 +737 5 0 days 00:00:14.403033546 +737 6 0 days 00:00:17.579528440 +737 7 0 days 00:00:42.342581193 +737 8 0 days 00:00:15.546509506 +737 9 0 days 00:00:14.634728633 +737 10 0 days 00:00:42.648758560 +737 11 0 days 00:00:12.898813033 +737 12 0 days 00:00:23.879509013 +737 13 0 days 00:00:18.196308633 +737 14 0 days 00:00:25.895600132 +737 15 0 days 00:00:40.071999773 +737 16 0 days 00:00:13.797073080 +737 17 0 days 00:00:23.563589380 +738 1 0 days 00:00:14.234652860 +738 2 0 days 00:00:06.182775165 +738 3 0 days 00:00:22.234780146 +738 4 0 days 00:00:07.889650433 +738 5 0 days 00:00:20.396892773 +738 6 0 days 00:00:08.776863746 +738 7 0 days 00:00:12.024730653 +738 8 0 days 00:00:12.769588966 +738 9 0 days 00:00:11.494643886 +738 10 0 days 00:00:06.835706605 +738 11 0 days 00:00:06.652545193 +738 12 0 days 00:00:21.812803466 +738 13 0 days 00:00:20.215213926 +738 14 0 days 00:00:21.167930380 +738 15 0 days 00:00:06.778256580 +738 16 0 days 00:00:21.005348980 +738 17 0 days 00:00:26.208933124 +738 18 0 days 00:00:06.500165066 +738 19 0 days 00:00:21.295436406 +738 20 0 days 00:00:08.432267704 +738 21 0 days 00:00:21.639161860 +738 22 0 days 00:00:11.748143420 +738 23 0 days 00:00:19.403315750 +738 24 0 days 00:00:08.945443766 +738 25 0 days 00:00:08.242136773 +738 26 0 days 00:00:08.776252540 +738 27 0 days 00:00:17.035308980 +738 28 0 days 00:00:12.067871340 +738 29 0 days 00:00:20.181582873 +738 30 0 days 00:00:09.516383506 +738 31 0 days 00:00:06.677249186 +738 32 0 days 00:00:14.401809484 +738 33 0 days 00:00:10.670985606 +738 34 0 days 00:00:14.784419008 +738 35 0 days 00:00:09.178045216 +738 36 0 days 00:00:13.638792556 +738 37 0 days 00:00:11.599361580 +739 1 0 days 00:01:59.649115773 +739 2 0 days 00:04:04.426289132 +739 3 0 days 00:01:58.786194200 +740 1 0 days 00:02:30.295106636 +740 2 0 days 00:00:40.251386326 +740 3 0 days 00:00:48.140872444 +740 4 0 days 00:00:40.736637573 +740 5 0 days 00:01:06.611591493 +740 6 0 days 00:00:50.171369353 +741 1 0 days 00:00:00.161146252 +741 2 0 days 00:00:00.188488770 +741 3 0 days 00:00:00.275733025 +741 4 0 days 00:00:00.197415255 +741 5 0 days 00:00:00.294203460 +741 6 0 days 00:00:00.170433703 +741 7 0 days 00:00:00.178960675 +741 8 0 days 00:00:00.191254815 +741 9 0 days 00:00:00.171475603 +741 10 0 days 00:00:00.264137193 +741 11 0 days 00:00:00.191148672 +741 12 0 days 00:00:00.157011473 +741 13 0 days 00:00:00.182094156 +741 14 0 days 00:00:00.290561698 +741 15 0 days 00:00:00.286468612 +741 16 0 days 00:00:00.292977240 +741 17 0 days 00:00:00.273861900 +741 20 0 days 00:00:00.249189200 +741 22 0 days 00:00:00.198916003 +741 24 0 days 00:00:00.253406776 +741 25 0 days 00:00:00.279534669 +741 26 0 days 00:00:00.175253166 +741 28 0 days 00:00:00.148716595 +741 30 0 days 00:00:00.190721537 +741 31 0 days 00:00:00.289850704 +741 32 0 days 00:00:00.154040473 +741 34 0 days 00:00:00.181179782 +741 35 0 days 00:00:00.166293106 +741 36 0 days 00:00:00.150814698 +741 37 0 days 00:00:00.150100174 +741 38 0 days 00:00:00.254303172 +741 40 0 days 00:00:00.152416168 +741 41 0 days 00:00:00.170636822 +741 44 0 days 00:00:00.128733720 +741 45 0 days 00:00:00.176665333 +741 47 0 days 00:00:00.189485632 +741 48 0 days 00:00:00.288174370 +741 49 0 days 00:00:00.195224002 +741 51 0 days 00:00:00.139651588 +741 52 0 days 00:00:00.153066348 +741 54 0 days 00:00:00.183891348 +741 55 0 days 00:00:00.289639830 +741 57 0 days 00:00:00.204655121 +741 58 0 days 00:00:00.256338984 +741 60 0 days 00:00:00.277186755 +741 61 0 days 00:00:00.286811834 +741 62 0 days 00:00:00.282392260 +741 63 0 days 00:00:00.281325870 +741 64 0 days 00:00:00.195421410 +741 65 0 days 00:00:00.173312888 +741 66 0 days 00:00:00.186847513 +741 67 0 days 00:00:00.121446313 +741 69 0 days 00:00:00.250246992 +741 70 0 days 00:00:00.199304436 +741 72 0 days 00:00:00.176483904 +741 73 0 days 00:00:00.152325204 +741 74 0 days 00:00:00.191108931 +741 75 0 days 00:00:00.294185708 +741 76 0 days 00:00:00.151468261 +741 77 0 days 00:00:00.290889846 +741 78 0 days 00:00:00.192301154 +741 79 0 days 00:00:00.143033522 +741 81 0 days 00:00:00.280212656 +741 83 0 days 00:00:00.185148054 +741 84 0 days 00:00:00.251295004 +741 85 0 days 00:00:00.194730715 +741 86 0 days 00:00:00.189909451 +741 87 0 days 00:00:00.263210523 +741 88 0 days 00:00:00.168916242 +741 89 0 days 00:00:00.181410242 +741 90 0 days 00:00:00.172001392 +741 91 0 days 00:00:00.151308530 +741 92 0 days 00:00:00.240033712 +741 93 0 days 00:00:00.265071680 +741 94 0 days 00:00:00.251427152 +741 95 0 days 00:00:00.143812510 +741 98 0 days 00:00:00.153929152 +742 2 0 days 00:00:00.261448736 +742 3 0 days 00:00:00.208436758 +742 4 0 days 00:00:00.131943195 +742 6 0 days 00:00:00.161115061 +742 7 0 days 00:00:00.169129440 +742 8 0 days 00:00:00.292299880 +742 9 0 days 00:00:00.257121044 +742 12 0 days 00:00:00.280135851 +742 13 0 days 00:00:00.313769273 +742 14 0 days 00:00:00.180766042 +742 15 0 days 00:00:00.150079215 +742 19 0 days 00:00:00.186432648 +742 20 0 days 00:00:00.288036077 +742 22 0 days 00:00:00.132081060 +742 23 0 days 00:00:00.173618394 +742 25 0 days 00:00:00.177159497 +742 26 0 days 00:00:00.180512113 +742 27 0 days 00:00:00.194352580 +742 28 0 days 00:00:00.314215409 +742 29 0 days 00:00:00.304565185 +742 31 0 days 00:00:00.190145145 +742 32 0 days 00:00:00.205300645 +742 33 0 days 00:00:00.194443185 +742 34 0 days 00:00:00.259825588 +742 36 0 days 00:00:00.123021533 +742 37 0 days 00:00:00.288224836 +742 38 0 days 00:00:00.297602510 +742 39 0 days 00:00:00.206393607 +742 40 0 days 00:00:00.318435851 +742 41 0 days 00:00:00.176955567 +742 43 0 days 00:00:00.201711502 +742 44 0 days 00:00:00.157843892 +742 45 0 days 00:00:00.295642511 +742 46 0 days 00:00:00.189903402 +742 47 0 days 00:00:00.156062872 +742 48 0 days 00:00:00.194918688 +742 51 0 days 00:00:00.135649200 +742 53 0 days 00:00:00.298789658 +742 54 0 days 00:00:00.284878643 +742 55 0 days 00:00:00.171402395 +742 56 0 days 00:00:00.186933405 +742 57 0 days 00:00:00.118938686 +742 58 0 days 00:00:00.154897820 +742 59 0 days 00:00:00.154081000 +742 60 0 days 00:00:00.201240056 +742 61 0 days 00:00:00.125880530 +742 62 0 days 00:00:00.166637735 +742 65 0 days 00:00:00.186145182 +742 66 0 days 00:00:00.160721474 +742 67 0 days 00:00:00.189269806 +742 68 0 days 00:00:00.260666728 +742 69 0 days 00:00:00.148385170 +742 71 0 days 00:00:00.149875602 +742 72 0 days 00:00:00.202613734 +742 73 0 days 00:00:00.190569267 +742 74 0 days 00:00:00.197168800 +742 76 0 days 00:00:00.192355985 +742 77 0 days 00:00:00.206200751 +742 79 0 days 00:00:00.297952447 +742 80 0 days 00:00:00.183888876 +742 81 0 days 00:00:00.192862742 +742 82 0 days 00:00:00.192122903 +742 83 0 days 00:00:00.146729964 +742 85 0 days 00:00:00.261965372 +742 86 0 days 00:00:00.203125465 +742 88 0 days 00:00:00.178180926 +742 89 0 days 00:00:00.118174413 +742 90 0 days 00:00:00.192811090 +742 91 0 days 00:00:00.152278753 +742 93 0 days 00:00:00.293073376 +742 94 0 days 00:00:00.204580182 +742 95 0 days 00:00:00.277244462 +742 96 0 days 00:00:00.134774746 +742 97 0 days 00:00:00.285564129 +742 98 0 days 00:00:00.195800995 +742 99 0 days 00:00:00.192418260 +742 100 0 days 00:00:00.154617776 +743 1 0 days 00:00:00.103495823 +743 2 0 days 00:00:00.104769408 +743 3 0 days 00:00:00.083459011 +743 4 0 days 00:00:00.105564271 +743 5 0 days 00:00:00.070949126 +743 6 0 days 00:00:00.123958806 +743 7 0 days 00:00:00.086152203 +743 9 0 days 00:00:00.095205603 +743 10 0 days 00:00:00.101012586 +743 11 0 days 00:00:00.108203665 +743 12 0 days 00:00:00.092020660 +743 14 0 days 00:00:00.167781973 +743 15 0 days 00:00:00.176481486 +743 16 0 days 00:00:00.104321036 +743 18 0 days 00:00:00.094850505 +743 19 0 days 00:00:00.093052362 +743 20 0 days 00:00:00.109080388 +743 21 0 days 00:00:00.081337605 +743 22 0 days 00:00:00.155036212 +743 23 0 days 00:00:00.160826608 +743 25 0 days 00:00:00.103030746 +743 26 0 days 00:00:00.164991198 +743 28 0 days 00:00:00.093341261 +743 29 0 days 00:00:00.101521742 +743 30 0 days 00:00:00.110053700 +743 31 0 days 00:00:00.080855116 +743 32 0 days 00:00:00.161800658 +743 33 0 days 00:00:00.163184213 +743 34 0 days 00:00:00.111494775 +743 35 0 days 00:00:00.104082395 +743 36 0 days 00:00:00.095764571 +743 37 0 days 00:00:00.149017773 +743 38 0 days 00:00:00.098378218 +743 39 0 days 00:00:00.084168720 +743 40 0 days 00:00:00.157002172 +743 41 0 days 00:00:00.086911712 +743 42 0 days 00:00:00.102795971 +743 43 0 days 00:00:00.159628972 +743 44 0 days 00:00:00.087275651 +743 45 0 days 00:00:00.154519720 +743 46 0 days 00:00:00.085558945 +743 47 0 days 00:00:00.111426921 +743 48 0 days 00:00:00.108110344 +743 50 0 days 00:00:00.163768491 +743 52 0 days 00:00:00.097553183 +743 53 0 days 00:00:00.084194188 +743 54 0 days 00:00:00.086769562 +743 55 0 days 00:00:00.140419412 +743 56 0 days 00:00:00.128182490 +743 57 0 days 00:00:00.143985733 +743 59 0 days 00:00:00.096878124 +743 60 0 days 00:00:00.159820125 +743 61 0 days 00:00:00.086834117 +743 62 0 days 00:00:00.124235993 +743 63 0 days 00:00:00.161066441 +743 64 0 days 00:00:00.143772824 +743 65 0 days 00:00:00.084321385 +743 66 0 days 00:00:00.155984757 +743 68 0 days 00:00:00.154095852 +743 69 0 days 00:00:00.104407520 +743 70 0 days 00:00:00.132725076 +743 71 0 days 00:00:00.100656543 +743 73 0 days 00:00:00.169181281 +743 75 0 days 00:00:00.157861346 +743 76 0 days 00:00:00.164856697 +743 77 0 days 00:00:00.085965826 +743 78 0 days 00:00:00.145325132 +743 79 0 days 00:00:00.104545594 +743 80 0 days 00:00:00.156645430 +743 81 0 days 00:00:00.087616832 +743 82 0 days 00:00:00.091549462 +743 83 0 days 00:00:00.089017651 +743 84 0 days 00:00:00.087814015 +743 85 0 days 00:00:00.102281084 +743 86 0 days 00:00:00.081191820 +743 87 0 days 00:00:00.118379628 +743 88 0 days 00:00:00.089334426 +743 89 0 days 00:00:00.118527895 +743 90 0 days 00:00:00.087234591 +743 92 0 days 00:00:00.155128486 +743 93 0 days 00:00:00.171934562 +743 94 0 days 00:00:00.085042146 +743 95 0 days 00:00:00.089709295 +743 96 0 days 00:00:00.165366884 +743 98 0 days 00:00:00.105287060 +743 99 0 days 00:00:00.141668832 +743 100 0 days 00:00:00.083827357 +744 1 0 days 00:00:00.109016705 +744 2 0 days 00:00:00.086016421 +744 3 0 days 00:00:00.162488194 +744 4 0 days 00:00:00.160160204 +744 5 0 days 00:00:00.078562773 +744 7 0 days 00:00:00.095327932 +744 8 0 days 00:00:00.086421687 +744 9 0 days 00:00:00.126337488 +744 10 0 days 00:00:00.089732330 +744 11 0 days 00:00:00.112942100 +744 12 0 days 00:00:00.110102460 +744 13 0 days 00:00:00.087022257 +744 14 0 days 00:00:00.087089570 +744 16 0 days 00:00:00.106299953 +744 18 0 days 00:00:00.107481685 +744 19 0 days 00:00:00.166129256 +744 20 0 days 00:00:00.083966630 +744 22 0 days 00:00:00.083406993 +744 23 0 days 00:00:00.086181773 +744 24 0 days 00:00:00.165785968 +744 25 0 days 00:00:00.100911808 +744 26 0 days 00:00:00.098003082 +744 27 0 days 00:00:00.114291035 +744 28 0 days 00:00:00.084820968 +744 29 0 days 00:00:00.164930984 +744 30 0 days 00:00:00.094601455 +744 31 0 days 00:00:00.089224532 +744 32 0 days 00:00:00.108250302 +744 33 0 days 00:00:00.167064725 +744 34 0 days 00:00:00.085644826 +744 35 0 days 00:00:00.166955046 +744 36 0 days 00:00:00.156654401 +744 37 0 days 00:00:00.140651890 +744 38 0 days 00:00:00.155343048 +744 39 0 days 00:00:00.093919228 +744 40 0 days 00:00:00.104941034 +744 41 0 days 00:00:00.088385226 +744 42 0 days 00:00:00.110876464 +744 43 0 days 00:00:00.125392726 +744 44 0 days 00:00:00.150648483 +744 45 0 days 00:00:00.085698775 +744 46 0 days 00:00:00.104844637 +744 48 0 days 00:00:00.154350960 +744 50 0 days 00:00:00.146084284 +744 51 0 days 00:00:00.151513623 +744 52 0 days 00:00:00.156618954 +744 55 0 days 00:00:00.102795824 +744 56 0 days 00:00:00.128061366 +744 57 0 days 00:00:00.088338300 +744 58 0 days 00:00:00.143649116 +744 59 0 days 00:00:00.084564248 +744 60 0 days 00:00:00.066854613 +744 61 0 days 00:00:00.081437300 +744 62 0 days 00:00:00.101032867 +744 63 0 days 00:00:00.085363742 +744 64 0 days 00:00:00.127655846 +744 65 0 days 00:00:00.088179589 +744 67 0 days 00:00:00.169530642 +744 68 0 days 00:00:00.087604042 +744 69 0 days 00:00:00.106346782 +744 71 0 days 00:00:00.087847825 +744 72 0 days 00:00:00.167199481 +744 73 0 days 00:00:00.113503997 +744 74 0 days 00:00:00.146102724 +744 75 0 days 00:00:00.096618040 +744 76 0 days 00:00:00.087408646 +744 77 0 days 00:00:00.087350120 +744 78 0 days 00:00:00.107430732 +744 81 0 days 00:00:00.097675637 +744 82 0 days 00:00:00.082856653 +744 83 0 days 00:00:00.084329776 +744 84 0 days 00:00:00.157758384 +744 85 0 days 00:00:00.085782665 +744 86 0 days 00:00:00.097858355 +744 87 0 days 00:00:00.152042010 +744 88 0 days 00:00:00.087373109 +744 89 0 days 00:00:00.086274295 +744 90 0 days 00:00:00.091654756 +744 91 0 days 00:00:00.101797765 +744 92 0 days 00:00:00.084668580 +744 94 0 days 00:00:00.146909108 +744 95 0 days 00:00:00.098231175 +744 96 0 days 00:00:00.069526273 +744 97 0 days 00:00:00.110722948 +744 99 0 days 00:00:00.100427693 +744 100 0 days 00:00:00.089557586 +745 1 0 days 00:00:00.281557168 +745 2 0 days 00:00:00.323553910 +745 3 0 days 00:00:00.290054969 +745 4 0 days 00:00:00.280219147 +745 5 0 days 00:00:00.288437214 +745 6 0 days 00:00:00.278930341 +745 8 0 days 00:00:00.289399005 +745 9 0 days 00:00:00.257821211 +745 10 0 days 00:00:00.269704302 +745 11 0 days 00:00:00.156181876 +745 12 0 days 00:00:00.199804363 +745 13 0 days 00:00:00.297803044 +745 14 0 days 00:00:00.141127984 +745 15 0 days 00:00:00.208138295 +745 16 0 days 00:00:00.196637970 +745 17 0 days 00:00:00.195024501 +745 18 0 days 00:00:00.293380270 +745 19 0 days 00:00:00.197687598 +745 20 0 days 00:00:00.152897980 +745 21 0 days 00:00:00.146842533 +745 22 0 days 00:00:00.200254697 +745 23 0 days 00:00:00.217143283 +745 24 0 days 00:00:00.281578912 +745 25 0 days 00:00:00.131222594 +745 28 0 days 00:00:00.277056868 +745 29 0 days 00:00:00.294551065 +745 30 0 days 00:00:00.191520248 +745 31 0 days 00:00:00.286173721 +745 32 0 days 00:00:00.279921340 +745 35 0 days 00:00:00.153475240 +745 36 0 days 00:00:00.197929325 +745 37 0 days 00:00:00.295107926 +745 38 0 days 00:00:00.293852342 +745 39 0 days 00:00:00.279338958 +745 40 0 days 00:00:00.185805661 +745 41 0 days 00:00:00.307961869 +745 42 0 days 00:00:00.294524815 +745 43 0 days 00:00:00.199625231 +745 44 0 days 00:00:00.166468765 +745 46 0 days 00:00:00.213894157 +745 47 0 days 00:00:00.139043808 +745 48 0 days 00:00:00.194901281 +745 49 0 days 00:00:00.284666776 +745 50 0 days 00:00:00.279354858 +745 51 0 days 00:00:00.134868600 +745 52 0 days 00:00:00.175904172 +745 53 0 days 00:00:00.191852989 +745 54 0 days 00:00:00.272990070 +745 55 0 days 00:00:00.195858550 +745 56 0 days 00:00:00.255498896 +745 57 0 days 00:00:00.151202368 +745 58 0 days 00:00:00.286744312 +745 59 0 days 00:00:00.284176258 +745 60 0 days 00:00:00.199251420 +745 61 0 days 00:00:00.283463350 +745 62 0 days 00:00:00.266251720 +745 64 0 days 00:00:00.182879580 +745 65 0 days 00:00:00.284799546 +745 66 0 days 00:00:00.266291600 +745 67 0 days 00:00:00.140467096 +745 68 0 days 00:00:00.198419912 +745 69 0 days 00:00:00.198134691 +745 70 0 days 00:00:00.196728195 +745 71 0 days 00:00:00.186189994 +745 72 0 days 00:00:00.194095109 +745 73 0 days 00:00:00.164495140 +745 74 0 days 00:00:00.198756633 +745 75 0 days 00:00:00.153551305 +745 76 0 days 00:00:00.159676886 +745 77 0 days 00:00:00.305431305 +745 78 0 days 00:00:00.166203595 +745 80 0 days 00:00:00.292031829 +745 81 0 days 00:00:00.205232497 +745 82 0 days 00:00:00.138082964 +745 83 0 days 00:00:00.178505360 +745 84 0 days 00:00:00.132422552 +745 85 0 days 00:00:00.202409221 +745 86 0 days 00:00:00.140638768 +745 87 0 days 00:00:00.288769015 +745 88 0 days 00:00:00.188027990 +745 89 0 days 00:00:00.136427588 +745 90 0 days 00:00:00.280009135 +745 91 0 days 00:00:00.288450600 +745 92 0 days 00:00:00.183677587 +745 93 0 days 00:00:00.190624988 +745 94 0 days 00:00:00.280919509 +745 95 0 days 00:00:00.192461576 +745 96 0 days 00:00:00.150534541 +745 97 0 days 00:00:00.199349044 +745 98 0 days 00:00:00.200801160 +745 99 0 days 00:00:00.268493452 +745 100 0 days 00:00:00.271019872 +746 1 0 days 00:00:00.163898694 +746 2 0 days 00:00:00.109095568 +746 3 0 days 00:00:00.153296741 +746 4 0 days 00:00:00.138057900 +746 5 0 days 00:00:00.086360662 +746 7 0 days 00:00:00.082395655 +746 8 0 days 00:00:00.153697818 +746 9 0 days 00:00:00.109142422 +746 10 0 days 00:00:00.097091249 +746 11 0 days 00:00:00.101623740 +746 12 0 days 00:00:00.109481767 +746 13 0 days 00:00:00.143445960 +746 14 0 days 00:00:00.155927150 +746 15 0 days 00:00:00.135952300 +746 16 0 days 00:00:00.087355612 +746 19 0 days 00:00:00.096553570 +746 22 0 days 00:00:00.142363692 +746 23 0 days 00:00:00.161160861 +746 25 0 days 00:00:00.101425945 +746 26 0 days 00:00:00.082582260 +746 27 0 days 00:00:00.095717312 +746 28 0 days 00:00:00.080885003 +746 29 0 days 00:00:00.158118506 +746 30 0 days 00:00:00.084789757 +746 31 0 days 00:00:00.106810033 +746 32 0 days 00:00:00.111643920 +746 33 0 days 00:00:00.145664046 +746 35 0 days 00:00:00.154505770 +746 36 0 days 00:00:00.154315117 +746 37 0 days 00:00:00.096203690 +746 38 0 days 00:00:00.100420164 +746 39 0 days 00:00:00.077645523 +746 40 0 days 00:00:00.137431275 +746 42 0 days 00:00:00.105183375 +746 43 0 days 00:00:00.080436331 +746 44 0 days 00:00:00.158609371 +746 45 0 days 00:00:00.082900540 +746 48 0 days 00:00:00.109820688 +746 49 0 days 00:00:00.110222812 +746 50 0 days 00:00:00.111523598 +746 51 0 days 00:00:00.153377251 +746 52 0 days 00:00:00.079339040 +746 53 0 days 00:00:00.159856207 +746 54 0 days 00:00:00.116715985 +746 55 0 days 00:00:00.099089256 +746 56 0 days 00:00:00.096741516 +746 57 0 days 00:00:00.079049756 +746 58 0 days 00:00:00.086673028 +746 59 0 days 00:00:00.108343085 +746 60 0 days 00:00:00.087638560 +746 61 0 days 00:00:00.102427057 +746 62 0 days 00:00:00.153383345 +746 63 0 days 00:00:00.092590115 +746 64 0 days 00:00:00.107988970 +746 65 0 days 00:00:00.106386258 +746 66 0 days 00:00:00.109378087 +746 67 0 days 00:00:00.176529591 +746 68 0 days 00:00:00.082015092 +746 69 0 days 00:00:00.108663252 +746 70 0 days 00:00:00.102117428 +746 71 0 days 00:00:00.082111980 +746 72 0 days 00:00:00.160440874 +746 73 0 days 00:00:00.112711198 +746 74 0 days 00:00:00.165793993 +746 75 0 days 00:00:00.097298205 +746 77 0 days 00:00:00.167407870 +746 78 0 days 00:00:00.113786492 +746 79 0 days 00:00:00.160905078 +746 80 0 days 00:00:00.103570440 +746 81 0 days 00:00:00.163543035 +746 82 0 days 00:00:00.111301244 +746 84 0 days 00:00:00.116172920 +746 85 0 days 00:00:00.082491196 +746 87 0 days 00:00:00.174420781 +746 88 0 days 00:00:00.167269420 +746 89 0 days 00:00:00.104953648 +746 90 0 days 00:00:00.160244555 +746 91 0 days 00:00:00.101306438 +746 92 0 days 00:00:00.093084600 +746 93 0 days 00:00:00.089257734 +746 94 0 days 00:00:00.071622544 +746 95 0 days 00:00:00.102017842 +746 96 0 days 00:00:00.110568736 +746 97 0 days 00:00:00.111500933 +746 98 0 days 00:00:00.109805907 +746 99 0 days 00:00:00.079742657 +746 100 0 days 00:00:00.093384120 +747 1 0 days 00:00:00.751857173 +747 2 0 days 00:00:00.566522420 +747 3 0 days 00:00:00.769557713 +747 4 0 days 00:00:00.563605650 +747 5 0 days 00:00:00.751615580 +747 6 0 days 00:00:00.410085026 +747 7 0 days 00:00:01.391288086 +747 8 0 days 00:00:00.734169773 +747 9 0 days 00:00:01.879509794 +747 10 0 days 00:00:00.632577686 +747 11 0 days 00:00:00.825888680 +747 12 0 days 00:00:00.786281580 +747 13 0 days 00:00:01.859008886 +747 14 0 days 00:00:00.839120700 +747 15 0 days 00:00:00.664405900 +747 16 0 days 00:00:00.988997962 +747 17 0 days 00:00:01.589575066 +747 18 0 days 00:00:00.795683413 +747 19 0 days 00:00:00.801222606 +747 20 0 days 00:00:00.894736540 +747 21 0 days 00:00:01.425226406 +747 22 0 days 00:00:01.576591780 +747 23 0 days 00:00:00.888069961 +747 24 0 days 00:00:00.586086220 +747 25 0 days 00:00:00.562057713 +747 26 0 days 00:00:00.922982980 +747 27 0 days 00:00:00.728651580 +747 28 0 days 00:00:01.109998626 +747 29 0 days 00:00:00.820231246 +747 30 0 days 00:00:00.766166873 +747 31 0 days 00:00:01.478521546 +747 32 0 days 00:00:01.140538733 +747 33 0 days 00:00:00.831627966 +747 35 0 days 00:00:00.781200060 +747 36 0 days 00:00:00.843079680 +747 37 0 days 00:00:00.787991520 +747 38 0 days 00:00:01.446978460 +747 39 0 days 00:00:01.871947080 +747 40 0 days 00:00:00.930223046 +747 41 0 days 00:00:01.038453751 +747 42 0 days 00:00:00.492625033 +747 43 0 days 00:00:00.442863560 +747 44 0 days 00:00:01.597123140 +747 45 0 days 00:00:00.749213520 +747 46 0 days 00:00:00.843617780 +747 47 0 days 00:00:01.575338893 +747 48 0 days 00:00:01.924221120 +747 49 0 days 00:00:00.413959926 +747 50 0 days 00:00:01.517325146 +747 51 0 days 00:00:00.739104400 +747 52 0 days 00:00:00.500916586 +747 53 0 days 00:00:01.438288913 +747 54 0 days 00:00:01.434916240 +747 55 0 days 00:00:00.984271657 +747 56 0 days 00:00:00.830509055 +747 57 0 days 00:00:00.813216706 +747 58 0 days 00:00:00.811433553 +747 59 0 days 00:00:00.784143433 +747 60 0 days 00:00:00.730461826 +747 61 0 days 00:00:01.896573122 +747 62 0 days 00:00:00.429976500 +747 63 0 days 00:00:00.787713903 +747 64 0 days 00:00:00.869704510 +747 65 0 days 00:00:00.796837573 +747 66 0 days 00:00:00.505160800 +747 67 0 days 00:00:00.527965760 +747 68 0 days 00:00:00.459821133 +747 69 0 days 00:00:00.788312980 +747 70 0 days 00:00:01.433148826 +747 71 0 days 00:00:00.799935233 +747 72 0 days 00:00:01.445264340 +747 73 0 days 00:00:00.473527733 +747 74 0 days 00:00:00.733790033 +747 75 0 days 00:00:00.533112933 +747 76 0 days 00:00:00.762249193 +747 77 0 days 00:00:00.803431340 +747 78 0 days 00:00:00.644509546 +747 79 0 days 00:00:00.706864826 +747 80 0 days 00:00:01.578545493 +747 81 0 days 00:00:01.421254080 +747 82 0 days 00:00:00.751464666 +747 83 0 days 00:00:00.567732513 +747 84 0 days 00:00:01.173009986 +747 85 0 days 00:00:01.031939380 +747 86 0 days 00:00:00.446058220 +747 87 0 days 00:00:00.696333520 +747 89 0 days 00:00:01.421719960 +747 90 0 days 00:00:01.667193934 +747 91 0 days 00:00:01.734957224 +747 92 0 days 00:00:00.373785846 +747 93 0 days 00:00:00.720549460 +747 94 0 days 00:00:00.373926500 +747 95 0 days 00:00:00.392870206 +747 96 0 days 00:00:00.493615406 +747 97 0 days 00:00:00.487473780 +747 98 0 days 00:00:00.610708733 +747 99 0 days 00:00:00.421756700 +747 100 0 days 00:00:01.234088300 +748 1 0 days 00:00:00.793023740 +748 2 0 days 00:00:00.745292546 +748 3 0 days 00:00:00.534731760 +748 4 0 days 00:00:00.402877053 +748 5 0 days 00:00:01.302936133 +748 6 0 days 00:00:00.423559646 +748 7 0 days 00:00:00.693164166 +748 8 0 days 00:00:00.381680713 +748 9 0 days 00:00:00.506015620 +748 10 0 days 00:00:01.782825125 +748 11 0 days 00:00:00.916842093 +748 12 0 days 00:00:01.297404226 +748 14 0 days 00:00:00.693538640 +748 15 0 days 00:00:00.418185093 +748 16 0 days 00:00:00.705340646 +748 17 0 days 00:00:00.411455053 +748 18 0 days 00:00:00.930759186 +748 19 0 days 00:00:00.735145686 +748 20 0 days 00:00:00.783119502 +748 21 0 days 00:00:00.519727654 +748 22 0 days 00:00:01.331856326 +748 23 0 days 00:00:00.699518586 +748 24 0 days 00:00:01.750592483 +748 25 0 days 00:00:00.508972940 +748 26 0 days 00:00:00.446964453 +748 27 0 days 00:00:01.532132632 +748 28 0 days 00:00:00.817965916 +748 29 0 days 00:00:01.766737949 +748 30 0 days 00:00:01.766549717 +748 31 0 days 00:00:00.911787672 +748 32 0 days 00:00:01.309492906 +748 33 0 days 00:00:01.682890786 +748 34 0 days 00:00:01.309990000 +748 35 0 days 00:00:01.307744346 +748 36 0 days 00:00:00.689969386 +748 37 0 days 00:00:00.393663353 +748 38 0 days 00:00:01.711549545 +748 39 0 days 00:00:01.295430393 +748 40 0 days 00:00:01.735147490 +748 41 0 days 00:00:00.708987400 +748 42 0 days 00:00:01.407607646 +748 43 0 days 00:00:00.691482026 +748 44 0 days 00:00:00.556550040 +748 45 0 days 00:00:01.294328146 +748 46 0 days 00:00:00.414525993 +748 47 0 days 00:00:00.672862333 +748 48 0 days 00:00:00.402244040 +748 50 0 days 00:00:00.498299040 +748 51 0 days 00:00:00.447939700 +748 52 0 days 00:00:00.674158240 +748 53 0 days 00:00:01.285167486 +748 54 0 days 00:00:01.312435206 +748 55 0 days 00:00:00.534501706 +748 56 0 days 00:00:01.295971173 +748 57 0 days 00:00:00.705855213 +748 58 0 days 00:00:00.542925700 +748 59 0 days 00:00:00.404746266 +748 60 0 days 00:00:01.305046973 +748 61 0 days 00:00:00.484310380 +748 62 0 days 00:00:00.407897200 +748 63 0 days 00:00:00.693841346 +748 64 0 days 00:00:00.420355535 +748 65 0 days 00:00:00.814926768 +748 66 0 days 00:00:00.415384646 +748 67 0 days 00:00:00.689849833 +748 68 0 days 00:00:00.399334773 +748 69 0 days 00:00:00.852459740 +748 70 0 days 00:00:01.301691813 +748 71 0 days 00:00:00.693202166 +748 72 0 days 00:00:00.816558940 +748 73 0 days 00:00:01.291310533 +748 74 0 days 00:00:00.417712960 +748 75 0 days 00:00:00.683338946 +748 76 0 days 00:00:01.312164153 +748 77 0 days 00:00:00.514370025 +748 78 0 days 00:00:00.813788968 +748 79 0 days 00:00:00.510660046 +748 80 0 days 00:00:01.302046873 +748 81 0 days 00:00:00.692179646 +748 82 0 days 00:00:01.346276936 +748 83 0 days 00:00:01.753098903 +748 84 0 days 00:00:00.468449526 +748 85 0 days 00:00:00.503399285 +748 86 0 days 00:00:00.793267700 +748 87 0 days 00:00:00.377432860 +748 88 0 days 00:00:00.691103360 +748 89 0 days 00:00:01.306681020 +748 90 0 days 00:00:00.554193453 +748 91 0 days 00:00:00.561716533 +748 92 0 days 00:00:00.462451140 +748 93 0 days 00:00:00.685574886 +748 94 0 days 00:00:01.547760964 +748 95 0 days 00:00:01.299268920 +748 96 0 days 00:00:00.698137160 +748 97 0 days 00:00:00.730623300 +748 98 0 days 00:00:01.292800606 +748 99 0 days 00:00:00.678390026 +748 100 0 days 00:00:00.554194286 +749 1 0 days 00:00:00.299154493 +749 2 0 days 00:00:00.249194785 +749 3 0 days 00:00:00.238469613 +749 4 0 days 00:00:00.197022966 +749 5 0 days 00:00:00.209011380 +749 6 0 days 00:00:00.344381046 +749 7 0 days 00:00:00.639711886 +749 8 0 days 00:00:00.343347413 +749 9 0 days 00:00:00.352561966 +749 10 0 days 00:00:00.416420066 +749 11 0 days 00:00:00.374576433 +749 12 0 days 00:00:00.382344440 +749 13 0 days 00:00:00.258907340 +749 14 0 days 00:00:00.320919453 +749 15 0 days 00:00:00.787761986 +749 16 0 days 00:00:00.194307540 +749 17 0 days 00:00:00.376143453 +749 18 0 days 00:00:00.199687760 +749 19 0 days 00:00:00.211435733 +749 20 0 days 00:00:00.643723180 +749 21 0 days 00:00:00.626078186 +749 22 0 days 00:00:00.382434053 +749 23 0 days 00:00:00.202317060 +749 24 0 days 00:00:00.635700146 +749 25 0 days 00:00:00.238416854 +749 26 0 days 00:00:00.206750956 +749 27 0 days 00:00:00.647846146 +749 28 0 days 00:00:00.463213540 +749 29 0 days 00:00:00.471116323 +749 30 0 days 00:00:00.668239100 +749 31 0 days 00:00:00.322376993 +749 32 0 days 00:00:00.232371233 +749 33 0 days 00:00:00.660531440 +749 34 0 days 00:00:00.558022633 +749 35 0 days 00:00:00.746850667 +749 36 0 days 00:00:00.645994640 +749 37 0 days 00:00:00.289106546 +749 38 0 days 00:00:00.359065880 +749 39 0 days 00:00:00.410038348 +749 40 0 days 00:00:00.621531906 +749 41 0 days 00:00:00.417805260 +749 42 0 days 00:00:00.640206900 +749 43 0 days 00:00:00.712674633 +749 44 0 days 00:00:00.230847053 +749 45 0 days 00:00:00.664768665 +749 46 0 days 00:00:00.488497058 +749 47 0 days 00:00:00.690449700 +749 48 0 days 00:00:00.228844240 +749 49 0 days 00:00:00.681706413 +749 50 0 days 00:00:00.218507220 +749 51 0 days 00:00:00.352566613 +749 52 0 days 00:00:00.197289040 +749 53 0 days 00:00:00.694357880 +749 54 0 days 00:00:00.633097340 +749 55 0 days 00:00:00.641275006 +749 56 0 days 00:00:00.867849200 +749 57 0 days 00:00:00.297563153 +749 58 0 days 00:00:00.643960580 +749 59 0 days 00:00:00.353891186 +749 60 0 days 00:00:00.198213646 +749 61 0 days 00:00:00.394125980 +749 62 0 days 00:00:00.755885001 +749 63 0 days 00:00:00.851289071 +749 64 0 days 00:00:00.757532573 +749 65 0 days 00:00:00.682127946 +749 66 0 days 00:00:00.186625360 +749 67 0 days 00:00:00.347306526 +749 68 0 days 00:00:00.249235573 +749 69 0 days 00:00:00.644850846 +749 70 0 days 00:00:00.252071313 +749 71 0 days 00:00:00.461067770 +749 72 0 days 00:00:00.729480082 +749 73 0 days 00:00:00.803819406 +749 74 0 days 00:00:00.199605320 +749 75 0 days 00:00:00.244171373 +749 76 0 days 00:00:00.396580084 +749 77 0 days 00:00:00.209633920 +749 78 0 days 00:00:00.293727187 +749 79 0 days 00:00:00.313555720 +749 80 0 days 00:00:00.275880691 +749 81 0 days 00:00:00.398705824 +749 82 0 days 00:00:00.605507480 +749 83 0 days 00:00:00.810268831 +749 84 0 days 00:00:00.322721746 +749 85 0 days 00:00:00.761602056 +749 86 0 days 00:00:00.266802727 +749 87 0 days 00:00:00.350725266 +749 88 0 days 00:00:00.650745600 +749 89 0 days 00:00:00.450485123 +749 90 0 days 00:00:00.345615086 +749 91 0 days 00:00:00.363303653 +749 92 0 days 00:00:00.407093780 +749 93 0 days 00:00:00.671156386 +749 94 0 days 00:00:00.340788366 +749 95 0 days 00:00:00.794731272 +749 96 0 days 00:00:00.212282360 +749 97 0 days 00:00:00.255751110 +749 98 0 days 00:00:00.367460620 +749 99 0 days 00:00:00.312861060 +749 100 0 days 00:00:00.684776540 +750 1 0 days 00:00:00.259385200 +750 2 0 days 00:00:00.254001482 +750 3 0 days 00:00:00.221226493 +750 4 0 days 00:00:00.360129800 +750 5 0 days 00:00:00.229423826 +750 6 0 days 00:00:00.733118745 +750 7 0 days 00:00:00.228509620 +750 8 0 days 00:00:00.918036481 +750 9 0 days 00:00:00.767323418 +750 10 0 days 00:00:00.249697760 +750 11 0 days 00:00:00.672573913 +750 12 0 days 00:00:00.453898217 +750 13 0 days 00:00:00.475106412 +750 14 0 days 00:00:00.704818453 +750 15 0 days 00:00:00.232007560 +750 16 0 days 00:00:00.674045466 +750 17 0 days 00:00:00.357923606 +750 18 0 days 00:00:00.721401120 +750 19 0 days 00:00:00.663404513 +750 20 0 days 00:00:00.220008133 +750 21 0 days 00:00:00.274965224 +750 22 0 days 00:00:00.221399673 +750 23 0 days 00:00:00.709789500 +750 24 0 days 00:00:00.375407806 +750 25 0 days 00:00:00.908423580 +750 26 0 days 00:00:00.406000460 +750 27 0 days 00:00:00.219923193 +750 28 0 days 00:00:00.231756495 +750 29 0 days 00:00:00.657692186 +750 30 0 days 00:00:00.354342960 +750 31 0 days 00:00:00.446826713 +750 32 0 days 00:00:00.658858813 +750 33 0 days 00:00:00.859074172 +750 34 0 days 00:00:00.434494106 +750 35 0 days 00:00:00.462178855 +750 36 0 days 00:00:00.254078266 +750 37 0 days 00:00:00.445616726 +750 38 0 days 00:00:00.358490820 +750 39 0 days 00:00:00.669858166 +750 40 0 days 00:00:00.301911413 +750 41 0 days 00:00:00.237724780 +750 42 0 days 00:00:00.667688373 +750 43 0 days 00:00:00.201023706 +750 44 0 days 00:00:00.485449618 +750 45 0 days 00:00:00.669188546 +750 46 0 days 00:00:00.697172700 +750 47 0 days 00:00:00.664311433 +750 48 0 days 00:00:00.360509793 +750 49 0 days 00:00:00.664068180 +750 50 0 days 00:00:00.353611380 +750 51 0 days 00:00:00.663649033 +750 52 0 days 00:00:00.669761840 +750 53 0 days 00:00:00.422260077 +750 54 0 days 00:00:00.251435090 +750 55 0 days 00:00:00.658163073 +750 56 0 days 00:00:00.689304680 +750 57 0 days 00:00:00.216175373 +750 58 0 days 00:00:00.224198860 +750 59 0 days 00:00:00.357556433 +750 60 0 days 00:00:00.363677513 +750 61 0 days 00:00:00.668244546 +750 62 0 days 00:00:00.437459460 +750 63 0 days 00:00:00.202372646 +750 64 0 days 00:00:00.221294626 +750 65 0 days 00:00:00.252098006 +750 66 0 days 00:00:00.269294654 +750 67 0 days 00:00:00.353769940 +750 68 0 days 00:00:00.260722973 +750 69 0 days 00:00:00.262843280 +750 70 0 days 00:00:00.657501420 +750 71 0 days 00:00:00.480554171 +750 72 0 days 00:00:00.901083598 +750 73 0 days 00:00:00.784812500 +750 74 0 days 00:00:00.262106792 +750 75 0 days 00:00:00.713567826 +750 76 0 days 00:00:00.244951791 +750 77 0 days 00:00:00.672741773 +750 78 0 days 00:00:00.216594840 +750 79 0 days 00:00:00.231275500 +750 80 0 days 00:00:00.676462973 +750 81 0 days 00:00:00.866970247 +750 82 0 days 00:00:00.392048446 +750 83 0 days 00:00:00.482549398 +750 84 0 days 00:00:00.732808850 +750 85 0 days 00:00:00.682490566 +750 86 0 days 00:00:00.383397560 +750 87 0 days 00:00:00.377660886 +750 88 0 days 00:00:00.271927945 +750 89 0 days 00:00:00.699816746 +750 90 0 days 00:00:00.847009557 +750 91 0 days 00:00:00.389579020 +750 92 0 days 00:00:00.270710815 +750 93 0 days 00:00:00.243808233 +750 94 0 days 00:00:00.250885644 +750 95 0 days 00:00:00.259840877 +750 96 0 days 00:00:00.298494433 +750 97 0 days 00:00:00.354464893 +750 98 0 days 00:00:00.357110906 +750 99 0 days 00:00:00.354052420 +750 100 0 days 00:00:00.405387276 +751 1 0 days 00:00:00.568055170 +751 2 0 days 00:00:00.668423088 +751 3 0 days 00:00:02.343487362 +751 4 0 days 00:00:00.709729780 +751 5 0 days 00:00:00.870060365 +751 6 0 days 00:00:01.986000916 +751 7 0 days 00:00:01.098700960 +751 8 0 days 00:00:02.240677540 +751 9 0 days 00:00:01.191201035 +751 10 0 days 00:00:01.877995335 +751 11 0 days 00:00:00.614504868 +751 12 0 days 00:00:00.772395774 +751 13 0 days 00:00:01.186718091 +751 14 0 days 00:00:02.581179232 +751 15 0 days 00:00:01.345735942 +751 16 0 days 00:00:00.783483862 +751 17 0 days 00:00:00.676641983 +751 18 0 days 00:00:01.333774112 +751 19 0 days 00:00:01.061657645 +751 20 0 days 00:00:00.701082910 +751 21 0 days 00:00:00.945458840 +751 22 0 days 00:00:00.491622395 +751 23 0 days 00:00:02.701714706 +751 24 0 days 00:00:01.228009865 +751 25 0 days 00:00:00.971238630 +751 26 0 days 00:00:02.206188820 +751 27 0 days 00:00:00.751628964 +751 28 0 days 00:00:00.564684360 +751 29 0 days 00:00:00.605288830 +751 30 0 days 00:00:02.283483165 +751 31 0 days 00:00:02.248493135 +751 32 0 days 00:00:01.104149714 +751 33 0 days 00:00:02.259219625 +751 34 0 days 00:00:01.228493030 +751 35 0 days 00:00:02.193374232 +751 36 0 days 00:00:00.732622560 +751 37 0 days 00:00:00.997270684 +751 38 0 days 00:00:01.004561710 +751 39 0 days 00:00:01.473077692 +751 40 0 days 00:00:01.230935900 +751 41 0 days 00:00:00.762505924 +751 42 0 days 00:00:02.059607270 +751 43 0 days 00:00:02.480826371 +751 44 0 days 00:00:00.791942810 +751 45 0 days 00:00:02.066174206 +751 46 0 days 00:00:01.326580756 +751 47 0 days 00:00:02.328805694 +751 48 0 days 00:00:01.375767708 +751 49 0 days 00:00:01.885301865 +751 50 0 days 00:00:01.274523094 +751 51 0 days 00:00:00.970175292 +751 52 0 days 00:00:00.498947340 +751 53 0 days 00:00:00.566034950 +751 54 0 days 00:00:01.274000640 +751 55 0 days 00:00:00.844407680 +751 56 0 days 00:00:00.554876480 +751 57 0 days 00:00:01.330419630 +751 58 0 days 00:00:01.966371810 +751 59 0 days 00:00:01.969250415 +751 60 0 days 00:00:00.641677940 +751 61 0 days 00:00:01.450191610 +751 62 0 days 00:00:00.750133856 +751 63 0 days 00:00:01.776303740 +751 64 0 days 00:00:02.205043895 +751 65 0 days 00:00:01.119674965 +751 66 0 days 00:00:01.654714970 +751 67 0 days 00:00:00.604192536 +751 68 0 days 00:00:01.013077454 +751 69 0 days 00:00:00.900176100 +751 70 0 days 00:00:00.727307016 +751 71 0 days 00:00:01.067220240 +751 72 0 days 00:00:02.114432576 +751 73 0 days 00:00:00.859729340 +751 74 0 days 00:00:01.820144088 +751 75 0 days 00:00:02.163572404 +751 76 0 days 00:00:00.564781190 +751 77 0 days 00:00:00.928292930 +751 78 0 days 00:00:02.565403430 +751 79 0 days 00:00:01.135754230 +751 80 0 days 00:00:02.069894332 +751 81 0 days 00:00:00.696103642 +751 82 0 days 00:00:01.000676654 +751 83 0 days 00:00:02.081582873 +751 84 0 days 00:00:00.521054180 +751 85 0 days 00:00:01.234241585 +751 86 0 days 00:00:01.870667185 +751 87 0 days 00:00:02.074806882 +751 88 0 days 00:00:00.615932223 +751 89 0 days 00:00:00.704662764 +751 90 0 days 00:00:01.094781885 +751 91 0 days 00:00:00.696005380 +751 92 0 days 00:00:02.183037505 +751 93 0 days 00:00:00.607548430 +751 94 0 days 00:00:00.964459900 +751 95 0 days 00:00:02.469090668 +751 96 0 days 00:00:00.527832720 +751 97 0 days 00:00:00.904534980 +751 98 0 days 00:00:01.215407220 +751 99 0 days 00:00:01.846988310 +751 100 0 days 00:00:02.553216555 +752 1 0 days 00:00:00.562274560 +752 2 0 days 00:00:00.486198220 +752 3 0 days 00:00:00.528823088 +752 4 0 days 00:00:00.438541810 +752 5 0 days 00:00:00.526837495 +752 6 0 days 00:00:00.307691708 +752 7 0 days 00:00:00.785986716 +752 8 0 days 00:00:00.583628500 +752 9 0 days 00:00:00.474717730 +752 10 0 days 00:00:01.234867820 +752 11 0 days 00:00:00.596403012 +752 12 0 days 00:00:00.730348508 +752 13 0 days 00:00:01.021902804 +752 14 0 days 00:00:00.331103842 +752 15 0 days 00:00:01.070251436 +752 16 0 days 00:00:01.207866196 +752 17 0 days 00:00:00.724749355 +752 18 0 days 00:00:00.553456248 +752 19 0 days 00:00:00.221251605 +752 20 0 days 00:00:00.512030596 +752 21 0 days 00:00:00.849264822 +752 22 0 days 00:00:00.502793178 +752 23 0 days 00:00:00.847339783 +752 24 0 days 00:00:00.309210574 +752 25 0 days 00:00:00.415590544 +752 26 0 days 00:00:00.418937776 +752 27 0 days 00:00:00.426657213 +752 28 0 days 00:00:00.255928290 +752 29 0 days 00:00:00.276165080 +752 30 0 days 00:00:00.812797640 +752 31 0 days 00:00:00.227083375 +752 32 0 days 00:00:00.297685700 +752 33 0 days 00:00:00.294508932 +752 34 0 days 00:00:00.508304302 +752 35 0 days 00:00:00.239517696 +752 36 0 days 00:00:00.432898327 +752 37 0 days 00:00:00.494792756 +752 38 0 days 00:00:00.706232296 +752 39 0 days 00:00:00.767162700 +752 40 0 days 00:00:00.271380057 +752 41 0 days 00:00:00.454520328 +752 42 0 days 00:00:00.274715147 +752 43 0 days 00:00:00.326040408 +752 44 0 days 00:00:00.426445715 +752 45 0 days 00:00:00.299026592 +752 46 0 days 00:00:00.777775528 +752 47 0 days 00:00:00.438716216 +752 48 0 days 00:00:00.272164803 +752 49 0 days 00:00:00.790730860 +752 50 0 days 00:00:00.916872471 +752 51 0 days 00:00:00.864575544 +752 52 0 days 00:00:00.891670256 +752 53 0 days 00:00:00.812655368 +752 54 0 days 00:00:00.554397654 +752 55 0 days 00:00:00.312745742 +752 56 0 days 00:00:00.786248005 +752 57 0 days 00:00:00.387718455 +752 58 0 days 00:00:00.242052900 +752 59 0 days 00:00:01.001839697 +752 60 0 days 00:00:00.259202275 +752 61 0 days 00:00:00.449367587 +752 62 0 days 00:00:00.541098898 +752 63 0 days 00:00:00.678094592 +752 64 0 days 00:00:00.918350421 +752 65 0 days 00:00:00.283185160 +752 66 0 days 00:00:00.334758646 +752 67 0 days 00:00:00.970090220 +752 68 0 days 00:00:00.490633314 +752 69 0 days 00:00:00.281974064 +752 70 0 days 00:00:00.321723248 +752 71 0 days 00:00:00.859476684 +752 72 0 days 00:00:00.302985406 +752 73 0 days 00:00:00.909435464 +752 74 0 days 00:00:00.805248433 +752 75 0 days 00:00:00.956348285 +752 76 0 days 00:00:00.448772722 +752 77 0 days 00:00:00.391899200 +752 78 0 days 00:00:00.224361470 +752 79 0 days 00:00:00.504604322 +752 80 0 days 00:00:00.302891011 +752 81 0 days 00:00:00.468108450 +752 82 0 days 00:00:00.890341934 +752 83 0 days 00:00:00.902850054 +752 84 0 days 00:00:00.443633385 +752 85 0 days 00:00:00.518913576 +752 86 0 days 00:00:00.885040723 +752 87 0 days 00:00:00.804863092 +752 88 0 days 00:00:00.341049360 +752 89 0 days 00:00:00.479610717 +752 90 0 days 00:00:00.257574504 +752 91 0 days 00:00:00.392428620 +752 92 0 days 00:00:00.231823785 +752 93 0 days 00:00:00.814571826 +752 94 0 days 00:00:00.920091664 +752 95 0 days 00:00:00.750056555 +752 96 0 days 00:00:00.554256397 +752 97 0 days 00:00:00.962330570 +752 98 0 days 00:00:00.503959174 +752 99 0 days 00:00:00.902083880 +752 100 0 days 00:00:00.748237530 +753 1 0 days 00:00:01.845684240 +753 2 0 days 00:00:00.964361464 +753 3 0 days 00:00:00.917977164 +753 4 0 days 00:00:00.435360860 +753 5 0 days 00:00:01.026365702 +753 6 0 days 00:00:00.832431394 +753 7 0 days 00:00:00.497942580 +753 8 0 days 00:00:00.588374325 +753 9 0 days 00:00:01.705175665 +753 10 0 days 00:00:00.889934253 +753 11 0 days 00:00:00.905892968 +753 12 0 days 00:00:00.797626636 +753 13 0 days 00:00:00.553518112 +753 14 0 days 00:00:02.100994785 +753 15 0 days 00:00:01.992371048 +753 16 0 days 00:00:02.071360276 +753 17 0 days 00:00:00.555415420 +753 18 0 days 00:00:01.099298130 +753 19 0 days 00:00:00.842272324 +753 20 0 days 00:00:00.984588566 +753 21 0 days 00:00:02.247028506 +753 22 0 days 00:00:02.134921960 +753 23 0 days 00:00:00.610353060 +753 24 0 days 00:00:01.820737796 +753 25 0 days 00:00:00.649774195 +753 26 0 days 00:00:00.599257850 +753 27 0 days 00:00:00.999076017 +753 28 0 days 00:00:01.781213860 +753 29 0 days 00:00:00.882783273 +753 30 0 days 00:00:01.655198073 +753 31 0 days 00:00:01.020359360 +753 32 0 days 00:00:01.036755580 +753 33 0 days 00:00:00.819852166 +753 34 0 days 00:00:00.805342806 +753 35 0 days 00:00:01.043300742 +753 36 0 days 00:00:00.589260100 +753 37 0 days 00:00:01.162622620 +753 38 0 days 00:00:00.598820454 +753 39 0 days 00:00:00.617458702 +753 40 0 days 00:00:00.781668900 +753 41 0 days 00:00:01.011848284 +753 42 0 days 00:00:00.990826120 +753 43 0 days 00:00:01.717198900 +753 44 0 days 00:00:01.087797247 +753 45 0 days 00:00:01.016529423 +753 46 0 days 00:00:00.963679063 +753 47 0 days 00:00:00.682569792 +753 48 0 days 00:00:00.732430680 +753 49 0 days 00:00:00.988447826 +753 50 0 days 00:00:00.634876165 +753 51 0 days 00:00:01.024667852 +753 52 0 days 00:00:00.811776173 +753 53 0 days 00:00:01.517356193 +753 54 0 days 00:00:01.799300004 +753 55 0 days 00:00:00.763997800 +753 56 0 days 00:00:00.617046646 +753 57 0 days 00:00:01.232661137 +753 58 0 days 00:00:00.501199900 +753 59 0 days 00:00:00.993346700 +753 60 0 days 00:00:01.281495346 +753 61 0 days 00:00:00.708792844 +753 62 0 days 00:00:01.257888744 +753 63 0 days 00:00:01.272881465 +753 64 0 days 00:00:00.700860158 +753 65 0 days 00:00:01.034610752 +753 66 0 days 00:00:00.938695813 +753 67 0 days 00:00:00.796302293 +753 68 0 days 00:00:00.521555735 +753 69 0 days 00:00:00.457301180 +753 70 0 days 00:00:00.781264931 +753 71 0 days 00:00:00.588973900 +753 72 0 days 00:00:01.896250293 +753 73 0 days 00:00:00.673603908 +753 74 0 days 00:00:00.653649990 +753 75 0 days 00:00:00.585705382 +753 76 0 days 00:00:02.136964885 +753 77 0 days 00:00:01.977346762 +753 78 0 days 00:00:00.801663426 +753 79 0 days 00:00:00.514864752 +753 80 0 days 00:00:01.490335813 +753 81 0 days 00:00:01.860442696 +753 82 0 days 00:00:00.765014775 +753 83 0 days 00:00:00.471431906 +753 84 0 days 00:00:00.925550572 +753 85 0 days 00:00:01.706038865 +753 86 0 days 00:00:00.759335991 +753 87 0 days 00:00:00.553883546 +753 88 0 days 00:00:02.040990715 +753 89 0 days 00:00:00.562046680 +753 90 0 days 00:00:01.744759710 +753 91 0 days 00:00:01.140893800 +753 92 0 days 00:00:00.614058608 +753 93 0 days 00:00:01.401455036 +753 94 0 days 00:00:01.059424606 +753 95 0 days 00:00:00.837301744 +753 96 0 days 00:00:01.033358373 +753 97 0 days 00:00:01.134955017 +753 98 0 days 00:00:01.083590768 +753 99 0 days 00:00:01.666700700 +753 100 0 days 00:00:00.507612715 +754 1 0 days 00:00:00.547088836 +754 2 0 days 00:00:00.541709737 +754 3 0 days 00:00:00.237098473 +754 4 0 days 00:00:00.351410432 +754 5 0 days 00:00:00.704507465 +754 6 0 days 00:00:00.487862632 +754 7 0 days 00:00:00.320282765 +754 8 0 days 00:00:00.303746595 +754 9 0 days 00:00:00.441362440 +754 10 0 days 00:00:00.772801986 +754 11 0 days 00:00:00.903157560 +754 12 0 days 00:00:00.244672726 +754 13 0 days 00:00:00.707527136 +754 14 0 days 00:00:00.958418340 +754 15 0 days 00:00:00.290118504 +754 16 0 days 00:00:00.274456053 +754 17 0 days 00:00:00.528978845 +754 18 0 days 00:00:00.817729933 +754 19 0 days 00:00:00.243626173 +754 20 0 days 00:00:01.106721588 +754 21 0 days 00:00:00.365674727 +754 22 0 days 00:00:00.922783455 +754 23 0 days 00:00:00.965047864 +754 24 0 days 00:00:00.437672832 +754 25 0 days 00:00:00.275098146 +754 26 0 days 00:00:00.555240140 +754 27 0 days 00:00:00.990760216 +754 28 0 days 00:00:00.486655890 +754 29 0 days 00:00:00.321315886 +754 30 0 days 00:00:01.152295883 +754 31 0 days 00:00:00.277825780 +754 32 0 days 00:00:00.385386928 +754 33 0 days 00:00:00.623245036 +754 34 0 days 00:00:01.070346760 +754 35 0 days 00:00:00.998427520 +754 36 0 days 00:00:00.245587693 +754 37 0 days 00:00:00.948935810 +754 38 0 days 00:00:00.571743714 +754 39 0 days 00:00:00.307420405 +754 40 0 days 00:00:00.741447233 +754 41 0 days 00:00:00.619483870 +754 42 0 days 00:00:01.172000953 +754 43 0 days 00:00:00.234820846 +754 44 0 days 00:00:00.948674952 +754 45 0 days 00:00:00.224559413 +754 46 0 days 00:00:00.605703432 +754 47 0 days 00:00:01.028113890 +754 48 0 days 00:00:00.432995300 +754 49 0 days 00:00:00.299203400 +754 50 0 days 00:00:00.352575226 +754 51 0 days 00:00:00.632042122 +754 52 0 days 00:00:00.306053904 +754 53 0 days 00:00:00.685396313 +754 54 0 days 00:00:00.326371068 +754 55 0 days 00:00:00.700277693 +754 56 0 days 00:00:00.463091650 +754 57 0 days 00:00:01.079785883 +754 58 0 days 00:00:00.203234365 +754 59 0 days 00:00:00.348638913 +754 60 0 days 00:00:00.376104005 +754 61 0 days 00:00:00.420924368 +754 62 0 days 00:00:00.278841724 +754 63 0 days 00:00:01.179515592 +754 64 0 days 00:00:00.711031480 +754 65 0 days 00:00:00.436367256 +754 66 0 days 00:00:00.762152573 +754 67 0 days 00:00:00.250017946 +754 68 0 days 00:00:00.383847606 +754 69 0 days 00:00:00.276213586 +754 70 0 days 00:00:00.372608582 +754 71 0 days 00:00:00.579664386 +754 72 0 days 00:00:01.020210214 +754 73 0 days 00:00:00.473384328 +754 74 0 days 00:00:00.364538663 +754 75 0 days 00:00:00.336273353 +754 76 0 days 00:00:00.372722934 +754 77 0 days 00:00:01.008017333 +754 78 0 days 00:00:00.558335396 +754 79 0 days 00:00:00.454297580 +754 80 0 days 00:00:00.214762440 +754 81 0 days 00:00:01.133611678 +754 82 0 days 00:00:00.224559233 +754 83 0 days 00:00:00.563730430 +754 84 0 days 00:00:00.943570300 +754 85 0 days 00:00:00.233524446 +754 86 0 days 00:00:00.490295508 +754 87 0 days 00:00:00.527172285 +754 88 0 days 00:00:00.268878100 +754 89 0 days 00:00:00.494386685 +754 90 0 days 00:00:00.775644466 +754 91 0 days 00:00:00.354687007 +754 92 0 days 00:00:00.983423473 +754 93 0 days 00:00:00.231630706 +754 94 0 days 00:00:00.656702516 +754 95 0 days 00:00:01.194710984 +754 96 0 days 00:00:01.218249148 +754 97 0 days 00:00:00.382707886 +754 98 0 days 00:00:00.522546120 +754 99 0 days 00:00:00.300333420 +754 100 0 days 00:00:00.780907140 +755 1 0 days 00:00:00.147757168 +755 2 0 days 00:00:00.225503895 +755 3 0 days 00:00:00.182251117 +755 4 0 days 00:00:00.150509920 +755 5 0 days 00:00:00.256089783 +755 6 0 days 00:00:00.144540266 +755 7 0 days 00:00:00.138645443 +755 8 0 days 00:00:00.176461064 +755 9 0 days 00:00:00.134725893 +755 10 0 days 00:00:00.182463050 +755 11 0 days 00:00:00.147343586 +755 12 0 days 00:00:00.256056826 +755 13 0 days 00:00:00.246178084 +755 14 0 days 00:00:00.208538700 +755 15 0 days 00:00:00.171663810 +755 16 0 days 00:00:00.170239132 +755 17 0 days 00:00:00.232105150 +755 18 0 days 00:00:00.123036410 +755 19 0 days 00:00:00.122429835 +755 20 0 days 00:00:00.154405008 +755 21 0 days 00:00:00.261649142 +755 22 0 days 00:00:00.158986210 +755 23 0 days 00:00:00.257630183 +755 24 0 days 00:00:00.248329624 +755 25 0 days 00:00:00.160539270 +755 26 0 days 00:00:00.113752893 +755 27 0 days 00:00:00.180352485 +755 28 0 days 00:00:00.165638022 +755 29 0 days 00:00:00.247620756 +755 30 0 days 00:00:00.168429105 +755 31 0 days 00:00:00.168162596 +755 32 0 days 00:00:00.169182868 +755 33 0 days 00:00:00.248538952 +755 34 0 days 00:00:00.247504236 +755 35 0 days 00:00:00.163309798 +755 36 0 days 00:00:00.246603336 +755 37 0 days 00:00:00.160691575 +755 38 0 days 00:00:00.124460675 +755 39 0 days 00:00:00.150782738 +755 40 0 days 00:00:00.157062613 +755 41 0 days 00:00:00.284876755 +755 42 0 days 00:00:00.275025168 +755 43 0 days 00:00:00.157569254 +755 44 0 days 00:00:00.273144028 +755 45 0 days 00:00:00.158652315 +755 46 0 days 00:00:00.247046340 +755 47 0 days 00:00:00.274540725 +755 48 0 days 00:00:00.188651980 +755 49 0 days 00:00:00.196069070 +755 50 0 days 00:00:00.240050025 +755 51 0 days 00:00:00.136627212 +755 52 0 days 00:00:00.146517611 +755 53 0 days 00:00:00.145156360 +755 54 0 days 00:00:00.185419265 +755 55 0 days 00:00:00.266375793 +755 56 0 days 00:00:00.253140176 +755 57 0 days 00:00:00.191029306 +755 58 0 days 00:00:00.127161375 +755 59 0 days 00:00:00.194861138 +755 60 0 days 00:00:00.159424110 +755 61 0 days 00:00:00.150541696 +755 62 0 days 00:00:00.175089140 +755 63 0 days 00:00:00.180767005 +755 64 0 days 00:00:00.133561344 +755 65 0 days 00:00:00.189271326 +755 66 0 days 00:00:00.176247360 +755 67 0 days 00:00:00.182482907 +755 68 0 days 00:00:00.267859270 +755 69 0 days 00:00:00.184950072 +755 70 0 days 00:00:00.160312460 +755 71 0 days 00:00:00.143556155 +755 72 0 days 00:00:00.160960425 +755 73 0 days 00:00:00.247822028 +755 74 0 days 00:00:00.188690042 +755 75 0 days 00:00:00.161415622 +755 76 0 days 00:00:00.158115845 +755 77 0 days 00:00:00.211976920 +755 78 0 days 00:00:00.176534970 +755 79 0 days 00:00:00.233561015 +755 80 0 days 00:00:00.176011166 +755 81 0 days 00:00:00.170649060 +755 82 0 days 00:00:00.149967844 +755 83 0 days 00:00:00.158786040 +755 84 0 days 00:00:00.137439936 +755 85 0 days 00:00:00.168980656 +755 86 0 days 00:00:00.160880795 +755 87 0 days 00:00:00.171193344 +755 88 0 days 00:00:00.235550800 +755 89 0 days 00:00:00.248658276 +755 90 0 days 00:00:00.264758660 +755 91 0 days 00:00:00.252273176 +755 92 0 days 00:00:00.156832336 +755 93 0 days 00:00:00.167671404 +755 94 0 days 00:00:00.178037830 +755 95 0 days 00:00:00.233514070 +755 96 0 days 00:00:00.236406965 +755 97 0 days 00:00:00.170036856 +755 98 0 days 00:00:00.138600316 +755 99 0 days 00:00:00.174945460 +755 100 0 days 00:00:00.303751530 +756 1 0 days 00:00:00.095293854 +756 2 0 days 00:00:00.093396016 +756 3 0 days 00:00:00.082484540 +756 4 0 days 00:00:00.073685080 +756 5 0 days 00:00:00.141501716 +756 6 0 days 00:00:00.150740682 +756 7 0 days 00:00:00.100818666 +756 8 0 days 00:00:00.094711674 +756 9 0 days 00:00:00.129836765 +756 10 0 days 00:00:00.151544648 +756 11 0 days 00:00:00.141706233 +756 12 0 days 00:00:00.136483448 +756 13 0 days 00:00:00.087738670 +756 14 0 days 00:00:00.095652713 +756 15 0 days 00:00:00.092683628 +756 16 0 days 00:00:00.080640855 +756 17 0 days 00:00:00.148098270 +756 18 0 days 00:00:00.095870994 +756 19 0 days 00:00:00.084710305 +756 20 0 days 00:00:00.148900505 +756 21 0 days 00:00:00.095124280 +756 22 0 days 00:00:00.093647930 +756 23 0 days 00:00:00.130254455 +756 24 0 days 00:00:00.145111137 +756 25 0 days 00:00:00.089823480 +756 26 0 days 00:00:00.091107508 +756 27 0 days 00:00:00.128760665 +756 28 0 days 00:00:00.146616334 +756 29 0 days 00:00:00.089854955 +756 30 0 days 00:00:00.079353068 +756 31 0 days 00:00:00.085429882 +756 32 0 days 00:00:00.073989766 +756 33 0 days 00:00:00.097462384 +756 34 0 days 00:00:00.078903401 +756 35 0 days 00:00:00.068340490 +756 36 0 days 00:00:00.086712300 +756 37 0 days 00:00:00.137750604 +756 38 0 days 00:00:00.145859900 +756 39 0 days 00:00:00.105222415 +756 40 0 days 00:00:00.130292600 +756 41 0 days 00:00:00.142293233 +756 42 0 days 00:00:00.137598808 +756 43 0 days 00:00:00.095278797 +756 44 0 days 00:00:00.090472440 +756 45 0 days 00:00:00.075806600 +756 46 0 days 00:00:00.067741050 +756 47 0 days 00:00:00.152187412 +756 48 0 days 00:00:00.077847130 +756 49 0 days 00:00:00.078217617 +756 50 0 days 00:00:00.083130083 +756 51 0 days 00:00:00.075016380 +756 52 0 days 00:00:00.142775470 +756 53 0 days 00:00:00.080342212 +756 54 0 days 00:00:00.076539297 +756 56 0 days 00:00:00.096291930 +756 57 0 days 00:00:00.152751476 +756 58 0 days 00:00:00.081290860 +756 59 0 days 00:00:00.088536340 +756 60 0 days 00:00:00.151258995 +756 61 0 days 00:00:00.078466942 +756 62 0 days 00:00:00.142973166 +756 63 0 days 00:00:00.076781134 +756 64 0 days 00:00:00.130396245 +756 65 0 days 00:00:00.149725972 +756 66 0 days 00:00:00.141722596 +756 67 0 days 00:00:00.146357471 +756 68 0 days 00:00:00.081309804 +756 69 0 days 00:00:00.093308946 +756 70 0 days 00:00:00.097740817 +756 71 0 days 00:00:00.077538424 +756 72 0 days 00:00:00.147108211 +756 73 0 days 00:00:00.074419066 +756 74 0 days 00:00:00.100917043 +756 75 0 days 00:00:00.087710050 +756 76 0 days 00:00:00.084099103 +756 77 0 days 00:00:00.149311757 +756 78 0 days 00:00:00.097741240 +756 79 0 days 00:00:00.146284394 +756 80 0 days 00:00:00.151274344 +756 81 0 days 00:00:00.095422940 +756 82 0 days 00:00:00.084977112 +756 83 0 days 00:00:00.083648484 +756 84 0 days 00:00:00.093338730 +756 85 0 days 00:00:00.138035544 +756 86 0 days 00:00:00.086346560 +756 87 0 days 00:00:00.069737290 +756 88 0 days 00:00:00.073527720 +756 89 0 days 00:00:00.149576290 +756 90 0 days 00:00:00.119140100 +756 91 0 days 00:00:00.092241356 +756 92 0 days 00:00:00.087223410 +756 93 0 days 00:00:00.130489740 +756 94 0 days 00:00:00.076982382 +756 95 0 days 00:00:00.096462208 +756 96 0 days 00:00:00.086477635 +756 97 0 days 00:00:00.150521710 +756 98 0 days 00:00:00.077607645 +756 99 0 days 00:00:00.095553143 +756 100 0 days 00:00:00.086633128 +757 1 0 days 00:00:00.286905556 +757 2 0 days 00:00:00.141051328 +757 3 0 days 00:00:00.186253848 +757 4 0 days 00:00:00.190052492 +757 5 0 days 00:00:00.136824984 +757 6 0 days 00:00:00.148273160 +757 7 0 days 00:00:00.133395252 +757 8 0 days 00:00:00.184526200 +757 9 0 days 00:00:00.128039495 +757 10 0 days 00:00:00.143575351 +757 11 0 days 00:00:00.284921697 +757 12 0 days 00:00:00.170054113 +757 13 0 days 00:00:00.139350640 +757 14 0 days 00:00:00.274963650 +757 15 0 days 00:00:00.277724270 +757 16 0 days 00:00:00.256034684 +757 17 0 days 00:00:00.276279168 +757 18 0 days 00:00:00.260323433 +757 19 0 days 00:00:00.127042440 +757 20 0 days 00:00:00.157684345 +757 21 0 days 00:00:00.185281504 +757 22 0 days 00:00:00.180048425 +757 23 0 days 00:00:00.145837068 +757 24 0 days 00:00:00.149826616 +757 25 0 days 00:00:00.199625570 +757 26 0 days 00:00:00.128579816 +757 27 0 days 00:00:00.126290635 +757 28 0 days 00:00:00.187297964 +757 29 0 days 00:00:00.137126120 +757 30 0 days 00:00:00.288469140 +757 31 0 days 00:00:00.252800596 +757 32 0 days 00:00:00.145127586 +757 33 0 days 00:00:00.259445336 +757 34 0 days 00:00:00.186428862 +757 35 0 days 00:00:00.136007808 +757 36 0 days 00:00:00.260336111 +757 37 0 days 00:00:00.209524875 +757 38 0 days 00:00:00.146289628 +757 39 0 days 00:00:00.157392820 +757 40 0 days 00:00:00.143958356 +757 41 0 days 00:00:00.152390656 +757 42 0 days 00:00:00.194903032 +757 43 0 days 00:00:00.176700905 +757 44 0 days 00:00:00.263380192 +757 45 0 days 00:00:00.171717600 +757 46 0 days 00:00:00.244932040 +757 47 0 days 00:00:00.197067030 +757 48 0 days 00:00:00.164185100 +757 49 0 days 00:00:00.262163583 +757 50 0 days 00:00:00.152901545 +757 51 0 days 00:00:00.130337748 +757 52 0 days 00:00:00.199250456 +757 53 0 days 00:00:00.181957592 +757 54 0 days 00:00:00.179504085 +757 55 0 days 00:00:00.200709220 +757 56 0 days 00:00:00.160972775 +757 57 0 days 00:00:00.187241360 +757 58 0 days 00:00:00.155529875 +757 59 0 days 00:00:00.157747968 +757 60 0 days 00:00:00.162008735 +757 61 0 days 00:00:00.147408316 +757 62 0 days 00:00:00.159695610 +757 63 0 days 00:00:00.144875675 +757 64 0 days 00:00:00.256204330 +757 65 0 days 00:00:00.148499540 +757 66 0 days 00:00:00.192119055 +757 67 0 days 00:00:00.254092944 +757 68 0 days 00:00:00.121948445 +757 69 0 days 00:00:00.243069244 +757 70 0 days 00:00:00.150020176 +757 71 0 days 00:00:00.167475310 +757 72 0 days 00:00:00.288754435 +757 73 0 days 00:00:00.233040680 +757 74 0 days 00:00:00.182476660 +757 75 0 days 00:00:00.159925093 +757 76 0 days 00:00:00.231716115 +757 77 0 days 00:00:00.256394716 +757 78 0 days 00:00:00.140671445 +757 79 0 days 00:00:00.141919457 +757 80 0 days 00:00:00.253799440 +757 81 0 days 00:00:00.143527793 +757 82 0 days 00:00:00.225576886 +757 83 0 days 00:00:00.288395744 +757 84 0 days 00:00:00.183800371 +757 85 0 days 00:00:00.164415680 +757 86 0 days 00:00:00.249695056 +757 87 0 days 00:00:00.143190588 +757 88 0 days 00:00:00.161119550 +757 89 0 days 00:00:00.166844445 +757 90 0 days 00:00:00.134440612 +757 91 0 days 00:00:00.287700621 +757 92 0 days 00:00:00.177250735 +757 93 0 days 00:00:00.269631828 +757 94 0 days 00:00:00.251996132 +757 95 0 days 00:00:00.276831017 +757 96 0 days 00:00:00.193959657 +757 97 0 days 00:00:00.175591685 +757 98 0 days 00:00:00.127375555 +757 99 0 days 00:00:00.181723120 +757 100 0 days 00:00:00.257438260 +758 1 0 days 00:00:00.227673686 +758 2 0 days 00:00:00.178117430 +758 3 0 days 00:00:00.263443512 +758 4 0 days 00:00:00.258755355 +758 5 0 days 00:00:00.238472180 +758 6 0 days 00:00:00.170586505 +758 7 0 days 00:00:00.141913750 +758 8 0 days 00:00:00.129131020 +758 9 0 days 00:00:00.154315986 +758 10 0 days 00:00:00.198997333 +758 11 0 days 00:00:00.187757260 +758 12 0 days 00:00:00.141229450 +758 13 0 days 00:00:00.141547060 +758 14 0 days 00:00:00.177118644 +758 15 0 days 00:00:00.265492913 +758 16 0 days 00:00:00.142670292 +758 17 0 days 00:00:00.267286736 +758 18 0 days 00:00:00.169249528 +758 19 0 days 00:00:00.176945116 +758 20 0 days 00:00:00.255952612 +758 21 0 days 00:00:00.188860694 +758 22 0 days 00:00:00.258181650 +758 23 0 days 00:00:00.253607905 +758 24 0 days 00:00:00.135215770 +758 25 0 days 00:00:00.256463106 +758 26 0 days 00:00:00.183005846 +758 27 0 days 00:00:00.132944740 +758 28 0 days 00:00:00.177104212 +758 29 0 days 00:00:00.170433045 +758 30 0 days 00:00:00.246388390 +758 31 0 days 00:00:00.148316165 +758 32 0 days 00:00:00.141470652 +758 33 0 days 00:00:00.147934970 +758 34 0 days 00:00:00.272491803 +758 35 0 days 00:00:00.141061716 +758 36 0 days 00:00:00.181867300 +758 37 0 days 00:00:00.197789310 +758 38 0 days 00:00:00.182278792 +758 39 0 days 00:00:00.189379556 +758 40 0 days 00:00:00.165357644 +758 41 0 days 00:00:00.167686584 +758 42 0 days 00:00:00.182059180 +758 43 0 days 00:00:00.133600570 +758 44 0 days 00:00:00.188127770 +758 45 0 days 00:00:00.252583628 +758 46 0 days 00:00:00.150138916 +758 47 0 days 00:00:00.256017896 +758 48 0 days 00:00:00.249161090 +758 49 0 days 00:00:00.139618834 +758 50 0 days 00:00:00.264038100 +758 51 0 days 00:00:00.185569673 +758 52 0 days 00:00:00.196495820 +758 53 0 days 00:00:00.264514533 +758 54 0 days 00:00:00.135575604 +758 55 0 days 00:00:00.145918175 +758 56 0 days 00:00:00.266506076 +758 57 0 days 00:00:00.164535023 +758 58 0 days 00:00:00.190437140 +758 59 0 days 00:00:00.288559077 +758 60 0 days 00:00:00.280542620 +758 61 0 days 00:00:00.156054095 +758 62 0 days 00:00:00.286652140 +758 63 0 days 00:00:00.179780408 +758 64 0 days 00:00:00.185861883 +758 65 0 days 00:00:00.306395900 +758 66 0 days 00:00:00.135880765 +758 67 0 days 00:00:00.277209996 +758 68 0 days 00:00:00.202898242 +758 69 0 days 00:00:00.150585446 +758 70 0 days 00:00:00.256796624 +758 71 0 days 00:00:00.147114575 +758 72 0 days 00:00:00.276457565 +758 73 0 days 00:00:00.145685880 +758 74 0 days 00:00:00.130194048 +758 75 0 days 00:00:00.244859825 +758 76 0 days 00:00:00.164429348 +758 77 0 days 00:00:00.235705445 +758 78 0 days 00:00:00.197443826 +758 79 0 days 00:00:00.131946340 +758 80 0 days 00:00:00.209475872 +758 81 0 days 00:00:00.264316852 +758 82 0 days 00:00:00.155087157 +758 83 0 days 00:00:00.172867440 +758 84 0 days 00:00:00.199208497 +758 85 0 days 00:00:00.260942632 +758 86 0 days 00:00:00.137862690 +758 87 0 days 00:00:00.151931348 +758 88 0 days 00:00:00.265558040 +758 89 0 days 00:00:00.174351135 +758 90 0 days 00:00:00.266878716 +758 91 0 days 00:00:00.260860384 +758 92 0 days 00:00:00.137262900 +758 93 0 days 00:00:00.153600253 +758 94 0 days 00:00:00.245559605 +758 95 0 days 00:00:00.256919952 +758 96 0 days 00:00:00.254266220 +758 97 0 days 00:00:00.155042966 +758 98 0 days 00:00:00.275162177 +758 99 0 days 00:00:00.266863660 +758 100 0 days 00:00:00.142966060 +759 1 0 days 00:00:00.167761251 +759 2 0 days 00:00:00.095681245 +759 3 0 days 00:00:00.084712055 +759 4 0 days 00:00:00.138851396 +759 5 0 days 00:00:00.076902310 +759 6 0 days 00:00:00.112256332 +759 7 0 days 00:00:00.105779845 +759 8 0 days 00:00:00.082548233 +759 9 0 days 00:00:00.132954455 +759 10 0 days 00:00:00.092271748 +759 11 0 days 00:00:00.112027955 +759 12 0 days 00:00:00.146904332 +759 13 0 days 00:00:00.147387862 +759 14 0 days 00:00:00.110403964 +759 15 0 days 00:00:00.093019625 +759 16 0 days 00:00:00.126485350 +759 17 0 days 00:00:00.079100273 +759 18 0 days 00:00:00.079383308 +759 19 0 days 00:00:00.082259550 +759 20 0 days 00:00:00.110683612 +759 21 0 days 00:00:00.147247747 +759 22 0 days 00:00:00.092686920 +759 23 0 days 00:00:00.108559865 +759 24 0 days 00:00:00.090511283 +759 25 0 days 00:00:00.143837251 +759 26 0 days 00:00:00.127972760 +759 27 0 days 00:00:00.104901468 +759 28 0 days 00:00:00.143774080 +759 29 0 days 00:00:00.140521696 +759 30 0 days 00:00:00.088117735 +759 31 0 days 00:00:00.080955947 +759 32 0 days 00:00:00.132768550 +759 33 0 days 00:00:00.143213780 +759 34 0 days 00:00:00.147791328 +759 35 0 days 00:00:00.094852700 +759 36 0 days 00:00:00.126606365 +759 37 0 days 00:00:00.097064872 +759 38 0 days 00:00:00.093090695 +759 39 0 days 00:00:00.108434547 +759 40 0 days 00:00:00.124229665 +759 41 0 days 00:00:00.139683233 +759 42 0 days 00:00:00.100362625 +759 43 0 days 00:00:00.078125488 +759 44 0 days 00:00:00.110506803 +759 45 0 days 00:00:00.068640113 +759 46 0 days 00:00:00.143415137 +759 47 0 days 00:00:00.080822753 +759 48 0 days 00:00:00.102477815 +759 49 0 days 00:00:00.092688495 +759 50 0 days 00:00:00.078704808 +759 51 0 days 00:00:00.083948896 +759 52 0 days 00:00:00.074677800 +759 53 0 days 00:00:00.082218916 +759 54 0 days 00:00:00.092857655 +759 55 0 days 00:00:00.092897870 +759 56 0 days 00:00:00.135569843 +759 57 0 days 00:00:00.122821085 +759 58 0 days 00:00:00.135266313 +759 59 0 days 00:00:00.104264622 +759 60 0 days 00:00:00.124949745 +759 61 0 days 00:00:00.081700720 +759 62 0 days 00:00:00.094434308 +759 63 0 days 00:00:00.103094400 +759 64 0 days 00:00:00.135689504 +759 65 0 days 00:00:00.106497134 +759 66 0 days 00:00:00.135405340 +759 67 0 days 00:00:00.080776560 +759 68 0 days 00:00:00.136008088 +759 69 0 days 00:00:00.078872088 +759 70 0 days 00:00:00.077104990 +759 71 0 days 00:00:00.143049362 +759 72 0 days 00:00:00.107792926 +759 73 0 days 00:00:00.099073372 +759 74 0 days 00:00:00.082653194 +759 75 0 days 00:00:00.116558526 +759 76 0 days 00:00:00.088465622 +759 77 0 days 00:00:00.125728200 +759 78 0 days 00:00:00.102315630 +759 79 0 days 00:00:00.092237610 +759 80 0 days 00:00:00.103364443 +759 81 0 days 00:00:00.068918393 +759 82 0 days 00:00:00.140611070 +759 83 0 days 00:00:00.086372078 +759 84 0 days 00:00:00.116507725 +759 85 0 days 00:00:00.079303322 +759 86 0 days 00:00:00.124388540 +759 87 0 days 00:00:00.135188600 +759 88 0 days 00:00:00.089707965 +759 89 0 days 00:00:00.124134370 +759 90 0 days 00:00:00.128548796 +759 91 0 days 00:00:00.107766073 +759 92 0 days 00:00:00.135271200 +759 93 0 days 00:00:00.076614055 +759 94 0 days 00:00:00.107616185 +759 95 0 days 00:00:00.137634620 +759 96 0 days 00:00:00.099207300 +759 97 0 days 00:00:00.092968715 +759 98 0 days 00:00:00.102756973 +759 99 0 days 00:00:00.125026400 +759 100 0 days 00:00:00.124041780 +760 1 0 days 00:00:00.081380887 +760 2 0 days 00:00:00.148419420 +760 3 0 days 00:00:00.131967625 +760 4 0 days 00:00:00.135570620 +760 5 0 days 00:00:00.107170860 +760 6 0 days 00:00:00.142853226 +760 7 0 days 00:00:00.079666970 +760 8 0 days 00:00:00.067123366 +760 9 0 days 00:00:00.087273510 +760 10 0 days 00:00:00.147081880 +760 11 0 days 00:00:00.143233776 +760 12 0 days 00:00:00.128620605 +760 13 0 days 00:00:00.083632572 +760 14 0 days 00:00:00.084774973 +760 15 0 days 00:00:00.147137385 +760 16 0 days 00:00:00.084755717 +760 17 0 days 00:00:00.107263080 +760 18 0 days 00:00:00.126359040 +760 19 0 days 00:00:00.136883128 +760 20 0 days 00:00:00.141209003 +760 21 0 days 00:00:00.086956324 +760 22 0 days 00:00:00.077473005 +760 23 0 days 00:00:00.067803133 +760 24 0 days 00:00:00.081862000 +760 25 0 days 00:00:00.078757315 +760 26 0 days 00:00:00.091882610 +760 27 0 days 00:00:00.077343580 +760 28 0 days 00:00:00.127592750 +760 29 0 days 00:00:00.080644822 +760 30 0 days 00:00:00.081946784 +760 31 0 days 00:00:00.108337657 +760 32 0 days 00:00:00.098890552 +760 33 0 days 00:00:00.082704873 +760 34 0 days 00:00:00.143376297 +760 35 0 days 00:00:00.135731692 +760 36 0 days 00:00:00.101389600 +760 37 0 days 00:00:00.077631350 +760 38 0 days 00:00:00.098750796 +760 39 0 days 00:00:00.079676948 +760 40 0 days 00:00:00.112589302 +760 41 0 days 00:00:00.077372460 +760 42 0 days 00:00:00.069939153 +760 43 0 days 00:00:00.090081440 +760 44 0 days 00:00:00.085328320 +760 45 0 days 00:00:00.143350045 +760 46 0 days 00:00:00.076736864 +760 47 0 days 00:00:00.143706822 +760 48 0 days 00:00:00.095364540 +760 49 0 days 00:00:00.130485536 +760 50 0 days 00:00:00.087214905 +760 51 0 days 00:00:00.125492170 +760 52 0 days 00:00:00.062104200 +760 53 0 days 00:00:00.093047593 +760 54 0 days 00:00:00.075698240 +760 55 0 days 00:00:00.075697725 +760 56 0 days 00:00:00.142834462 +760 57 0 days 00:00:00.068633240 +760 58 0 days 00:00:00.123917205 +760 59 0 days 00:00:00.076760426 +760 60 0 days 00:00:00.076515545 +760 61 0 days 00:00:00.079192496 +760 62 0 days 00:00:00.074824600 +760 63 0 days 00:00:00.139721260 +760 64 0 days 00:00:00.072548530 +760 65 0 days 00:00:00.131817496 +760 66 0 days 00:00:00.112625306 +760 67 0 days 00:00:00.140688665 +760 68 0 days 00:00:00.144578746 +760 69 0 days 00:00:00.125273495 +760 70 0 days 00:00:00.068418075 +760 71 0 days 00:00:00.099695957 +760 72 0 days 00:00:00.070627548 +760 73 0 days 00:00:00.077944131 +760 74 0 days 00:00:00.113951766 +760 75 0 days 00:00:00.099594752 +760 76 0 days 00:00:00.073360213 +760 77 0 days 00:00:00.125945210 +760 78 0 days 00:00:00.084305675 +760 79 0 days 00:00:00.088435312 +760 80 0 days 00:00:00.074281860 +760 81 0 days 00:00:00.075548111 +760 82 0 days 00:00:00.095445350 +760 83 0 days 00:00:00.142815090 +760 84 0 days 00:00:00.097551760 +760 85 0 days 00:00:00.090623548 +760 86 0 days 00:00:00.125821960 +760 87 0 days 00:00:00.094669994 +760 88 0 days 00:00:00.099042597 +760 89 0 days 00:00:00.142301082 +760 90 0 days 00:00:00.144796357 +760 91 0 days 00:00:00.075679805 +760 92 0 days 00:00:00.071565912 +760 93 0 days 00:00:00.077244395 +760 94 0 days 00:00:00.131491332 +760 95 0 days 00:00:00.092611996 +760 96 0 days 00:00:00.146523852 +760 97 0 days 00:00:00.083655575 +760 98 0 days 00:00:00.070810200 +760 99 0 days 00:00:00.094890217 +760 100 0 days 00:00:00.133276984 +761 1 0 days 00:00:00.756028416 +761 2 0 days 00:00:00.765405146 +761 3 0 days 00:00:01.026847834 +761 4 0 days 00:00:00.845665270 +761 5 0 days 00:00:01.640809764 +761 6 0 days 00:00:01.648542012 +761 7 0 days 00:00:01.539833640 +761 8 0 days 00:00:00.875868712 +761 9 0 days 00:00:00.777408093 +761 10 0 days 00:00:00.459060980 +761 11 0 days 00:00:01.402156760 +761 12 0 days 00:00:01.068382088 +761 13 0 days 00:00:01.405383013 +761 14 0 days 00:00:01.864867522 +761 15 0 days 00:00:00.951684916 +761 16 0 days 00:00:00.472198770 +761 17 0 days 00:00:00.830444960 +761 18 0 days 00:00:00.809935953 +761 19 0 days 00:00:00.528151205 +761 20 0 days 00:00:00.829110370 +761 21 0 days 00:00:01.559574670 +761 22 0 days 00:00:00.452511313 +761 23 0 days 00:00:00.936159331 +761 24 0 days 00:00:01.649382340 +761 25 0 days 00:00:01.376256146 +761 26 0 days 00:00:00.479801796 +761 27 0 days 00:00:00.871622544 +761 28 0 days 00:00:00.634167320 +761 29 0 days 00:00:01.731716610 +761 30 0 days 00:00:01.636770604 +761 31 0 days 00:00:01.412436686 +761 32 0 days 00:00:00.800115025 +761 33 0 days 00:00:00.930721333 +761 34 0 days 00:00:01.630013022 +761 35 0 days 00:00:00.583781865 +761 36 0 days 00:00:00.516829464 +761 37 0 days 00:00:00.821766220 +761 38 0 days 00:00:00.826428225 +761 39 0 days 00:00:00.585495996 +761 40 0 days 00:00:00.953890332 +761 41 0 days 00:00:01.658785724 +761 42 0 days 00:00:00.632966412 +761 43 0 days 00:00:01.410273473 +761 44 0 days 00:00:00.908369826 +761 45 0 days 00:00:01.622269420 +761 46 0 days 00:00:00.464843685 +761 47 0 days 00:00:00.519795890 +761 48 0 days 00:00:00.482619070 +761 49 0 days 00:00:01.550818990 +761 50 0 days 00:00:01.591472390 +761 51 0 days 00:00:00.922086420 +761 52 0 days 00:00:00.495092360 +761 53 0 days 00:00:01.556491725 +761 54 0 days 00:00:01.799835292 +761 55 0 days 00:00:00.822012545 +761 56 0 days 00:00:00.884049320 +761 57 0 days 00:00:00.621049480 +761 58 0 days 00:00:01.549180115 +761 59 0 days 00:00:00.612824265 +761 60 0 days 00:00:01.420893946 +761 61 0 days 00:00:00.560767010 +761 62 0 days 00:00:00.945966785 +761 63 0 days 00:00:00.597710714 +761 64 0 days 00:00:01.558583160 +761 65 0 days 00:00:01.671072204 +761 66 0 days 00:00:00.505328860 +761 67 0 days 00:00:01.526025405 +761 68 0 days 00:00:00.482063684 +761 69 0 days 00:00:00.511243608 +761 70 0 days 00:00:00.546864476 +761 71 0 days 00:00:00.488951632 +761 72 0 days 00:00:00.869929836 +761 73 0 days 00:00:00.939141900 +761 74 0 days 00:00:01.526433665 +761 75 0 days 00:00:01.350695326 +761 76 0 days 00:00:01.803513512 +761 77 0 days 00:00:01.811917008 +761 78 0 days 00:00:01.783625360 +761 79 0 days 00:00:00.441874133 +761 80 0 days 00:00:01.727495112 +761 81 0 days 00:00:01.709149132 +761 82 0 days 00:00:00.491532140 +761 83 0 days 00:00:00.491823365 +761 84 0 days 00:00:00.458856525 +761 85 0 days 00:00:01.740524416 +761 86 0 days 00:00:01.417437346 +761 87 0 days 00:00:00.829147350 +761 88 0 days 00:00:01.784429080 +761 89 0 days 00:00:00.473025265 +761 90 0 days 00:00:01.675702132 +761 91 0 days 00:00:01.585633875 +761 92 0 days 00:00:00.738124753 +761 93 0 days 00:00:00.813829645 +761 94 0 days 00:00:00.566907024 +761 95 0 days 00:00:01.565678095 +761 96 0 days 00:00:00.526181630 +761 97 0 days 00:00:00.771091433 +761 98 0 days 00:00:00.484167228 +761 99 0 days 00:00:00.828109060 +761 100 0 days 00:00:00.820727880 +762 1 0 days 00:00:01.665516560 +762 2 0 days 00:00:00.875244120 +762 3 0 days 00:00:00.967398940 +762 4 0 days 00:00:01.198096389 +762 5 0 days 00:00:00.508367635 +762 6 0 days 00:00:00.486111445 +762 7 0 days 00:00:00.901346516 +762 8 0 days 00:00:00.482044565 +762 9 0 days 00:00:01.148597570 +762 10 0 days 00:00:01.574462353 +762 11 0 days 00:00:01.061869897 +762 12 0 days 00:00:00.698047186 +762 13 0 days 00:00:00.684836596 +762 14 0 days 00:00:01.552277360 +762 15 0 days 00:00:00.934368105 +762 16 0 days 00:00:01.348406455 +762 17 0 days 00:00:01.547797480 +762 18 0 days 00:00:00.514047295 +762 19 0 days 00:00:01.565894940 +762 20 0 days 00:00:01.728076723 +762 21 0 days 00:00:01.698977955 +762 22 0 days 00:00:01.950123791 +762 23 0 days 00:00:01.029616274 +762 24 0 days 00:00:01.082880633 +762 25 0 days 00:00:01.736784035 +762 26 0 days 00:00:00.634229365 +762 27 0 days 00:00:00.543651984 +762 28 0 days 00:00:01.574125086 +762 29 0 days 00:00:00.989690990 +762 30 0 days 00:00:00.587560977 +762 31 0 days 00:00:00.910359625 +762 32 0 days 00:00:01.176974910 +762 33 0 days 00:00:00.816946340 +762 34 0 days 00:00:00.547071763 +762 35 0 days 00:00:00.519778085 +762 36 0 days 00:00:00.734697636 +762 37 0 days 00:00:01.574756606 +762 38 0 days 00:00:01.739161595 +762 39 0 days 00:00:00.915679548 +762 40 0 days 00:00:00.619435588 +762 41 0 days 00:00:00.505413485 +762 42 0 days 00:00:00.639332930 +762 43 0 days 00:00:00.473919448 +762 44 0 days 00:00:00.706607085 +762 45 0 days 00:00:00.998254448 +762 46 0 days 00:00:01.551227466 +762 47 0 days 00:00:01.735789640 +762 48 0 days 00:00:00.838118197 +762 49 0 days 00:00:01.573692900 +762 50 0 days 00:00:01.742919260 +762 51 0 days 00:00:00.967382880 +762 52 0 days 00:00:01.979633732 +762 53 0 days 00:00:00.620441480 +762 54 0 days 00:00:00.522787940 +762 55 0 days 00:00:00.946446552 +762 56 0 days 00:00:00.527561720 +762 57 0 days 00:00:01.889815803 +762 58 0 days 00:00:01.947447605 +762 59 0 days 00:00:00.462691006 +762 60 0 days 00:00:01.924306986 +762 61 0 days 00:00:00.977246750 +762 62 0 days 00:00:01.863462360 +762 63 0 days 00:00:00.767262060 +762 64 0 days 00:00:00.889098560 +762 65 0 days 00:00:01.853356988 +762 66 0 days 00:00:00.725627205 +762 67 0 days 00:00:00.576458860 +762 68 0 days 00:00:00.587689324 +762 69 0 days 00:00:01.915656424 +762 70 0 days 00:00:00.845768460 +762 71 0 days 00:00:00.606269120 +762 72 0 days 00:00:01.879516708 +762 73 0 days 00:00:00.729102726 +762 74 0 days 00:00:01.566508580 +762 75 0 days 00:00:01.835861148 +762 76 0 days 00:00:00.937907875 +762 77 0 days 00:00:00.509633465 +762 78 0 days 00:00:00.561438684 +762 79 0 days 00:00:01.911935543 +762 80 0 days 00:00:01.127131577 +762 81 0 days 00:00:01.810738925 +762 82 0 days 00:00:00.982114530 +762 83 0 days 00:00:00.637169195 +762 84 0 days 00:00:01.004924720 +762 85 0 days 00:00:01.870903936 +762 86 0 days 00:00:00.915625840 +762 87 0 days 00:00:01.936428436 +762 88 0 days 00:00:01.785559735 +762 89 0 days 00:00:00.918171920 +762 90 0 days 00:00:00.943956545 +762 91 0 days 00:00:00.619345448 +762 92 0 days 00:00:01.918338000 +762 93 0 days 00:00:01.820859375 +762 94 0 days 00:00:01.656012806 +762 95 0 days 00:00:00.891788000 +762 96 0 days 00:00:00.533512884 +762 97 0 days 00:00:00.729348270 +762 98 0 days 00:00:02.007777793 +762 99 0 days 00:00:00.959480075 +762 100 0 days 00:00:00.930220470 +763 1 0 days 00:00:00.251016593 +763 2 0 days 00:00:00.262788920 +763 3 0 days 00:00:00.268771445 +763 4 0 days 00:00:00.267843315 +763 5 0 days 00:00:00.258499756 +763 6 0 days 00:00:00.938422642 +763 7 0 days 00:00:00.284102986 +763 8 0 days 00:00:00.855190965 +763 9 0 days 00:00:00.351523695 +763 10 0 days 00:00:00.399908566 +763 11 0 days 00:00:00.504766668 +763 12 0 days 00:00:00.832090390 +763 13 0 days 00:00:00.491925120 +763 14 0 days 00:00:00.309318700 +763 15 0 days 00:00:00.457401665 +763 16 0 days 00:00:00.250676140 +763 17 0 days 00:00:00.526916614 +763 18 0 days 00:00:00.363992280 +763 19 0 days 00:00:00.851235985 +763 20 0 days 00:00:00.294403720 +763 21 0 days 00:00:00.258805024 +763 22 0 days 00:00:00.248174420 +763 23 0 days 00:00:00.867650515 +763 24 0 days 00:00:00.935607540 +763 25 0 days 00:00:00.908370148 +763 26 0 days 00:00:00.257030345 +763 27 0 days 00:00:00.446799745 +763 28 0 days 00:00:00.473411905 +763 29 0 days 00:00:00.451759065 +763 30 0 days 00:00:00.529650562 +763 31 0 days 00:00:00.916167750 +763 32 0 days 00:00:00.303434460 +763 33 0 days 00:00:00.278154704 +763 34 0 days 00:00:00.447442425 +763 35 0 days 00:00:00.945972073 +763 36 0 days 00:00:00.885461256 +763 37 0 days 00:00:00.501157730 +763 38 0 days 00:00:00.508271400 +763 39 0 days 00:00:00.516011420 +763 40 0 days 00:00:00.264239395 +763 41 0 days 00:00:00.477577575 +763 42 0 days 00:00:00.491059356 +763 43 0 days 00:00:00.978054155 +763 44 0 days 00:00:00.834265385 +763 45 0 days 00:00:00.454974355 +763 46 0 days 00:00:00.449726080 +763 47 0 days 00:00:00.950653451 +763 48 0 days 00:00:00.266593493 +763 49 0 days 00:00:00.459631390 +763 50 0 days 00:00:00.528281297 +763 51 0 days 00:00:00.545798940 +763 52 0 days 00:00:00.424036932 +763 53 0 days 00:00:00.323133710 +763 54 0 days 00:00:00.472978260 +763 55 0 days 00:00:00.538700071 +763 56 0 days 00:00:00.839646735 +763 57 0 days 00:00:00.910423988 +763 58 0 days 00:00:00.475977796 +763 59 0 days 00:00:00.278688765 +763 60 0 days 00:00:00.404452113 +763 61 0 days 00:00:00.805059412 +763 62 0 days 00:00:00.962253323 +763 63 0 days 00:00:00.535330770 +763 64 0 days 00:00:00.273534760 +763 65 0 days 00:00:00.334162011 +763 66 0 days 00:00:00.949404400 +763 67 0 days 00:00:00.501632883 +763 68 0 days 00:00:00.451612405 +763 69 0 days 00:00:00.926474436 +763 70 0 days 00:00:00.304118640 +763 71 0 days 00:00:00.297291220 +763 72 0 days 00:00:00.844189625 +763 73 0 days 00:00:00.914175284 +763 74 0 days 00:00:00.986291176 +763 75 0 days 00:00:00.260451030 +763 76 0 days 00:00:00.469211240 +763 77 0 days 00:00:00.541506210 +763 78 0 days 00:00:00.281527446 +763 79 0 days 00:00:00.244657605 +763 80 0 days 00:00:00.262556600 +763 81 0 days 00:00:00.512302874 +763 82 0 days 00:00:00.292772100 +763 83 0 days 00:00:00.999174202 +763 84 0 days 00:00:00.852671750 +763 85 0 days 00:00:00.404769025 +763 86 0 days 00:00:00.502754593 +763 87 0 days 00:00:00.333186296 +763 88 0 days 00:00:00.240884335 +763 89 0 days 00:00:00.834643873 +763 90 0 days 00:00:00.297004486 +763 91 0 days 00:00:00.375175355 +763 92 0 days 00:00:00.763023826 +763 93 0 days 00:00:00.791663215 +763 94 0 days 00:00:00.742729066 +763 95 0 days 00:00:00.456511945 +763 96 0 days 00:00:00.310606660 +763 97 0 days 00:00:00.840985595 +763 98 0 days 00:00:00.513353130 +763 99 0 days 00:00:00.451359105 +763 100 0 days 00:00:00.853505175 +764 1 0 days 00:00:00.449617086 +764 2 0 days 00:00:00.511729496 +764 3 0 days 00:00:00.240306860 +764 4 0 days 00:00:00.515682034 +764 5 0 days 00:00:00.911517025 +764 6 0 days 00:00:00.962457946 +764 7 0 days 00:00:00.346047368 +764 8 0 days 00:00:00.329868883 +764 9 0 days 00:00:00.953981813 +764 10 0 days 00:00:00.291748050 +764 11 0 days 00:00:00.847304086 +764 12 0 days 00:00:00.452721126 +764 13 0 days 00:00:00.462121955 +764 14 0 days 00:00:00.326108065 +764 15 0 days 00:00:00.894188365 +764 16 0 days 00:00:00.866837600 +764 17 0 days 00:00:00.251018710 +764 18 0 days 00:00:00.972110876 +764 19 0 days 00:00:00.788954160 +764 20 0 days 00:00:00.517769884 +764 21 0 days 00:00:00.863173900 +764 22 0 days 00:00:00.279649924 +764 23 0 days 00:00:00.312696143 +764 24 0 days 00:00:00.943726422 +764 25 0 days 00:00:00.852334346 +764 26 0 days 00:00:00.490440268 +764 27 0 days 00:00:00.903554955 +764 28 0 days 00:00:00.883430870 +764 29 0 days 00:00:00.460279060 +764 30 0 days 00:00:00.917906175 +764 31 0 days 00:00:00.347528293 +764 32 0 days 00:00:00.369016660 +764 33 0 days 00:00:00.514735653 +764 34 0 days 00:00:00.469289650 +764 35 0 days 00:00:00.892378495 +764 36 0 days 00:00:00.599946315 +764 37 0 days 00:00:00.844000055 +764 38 0 days 00:00:00.263578456 +764 39 0 days 00:00:00.472480690 +764 40 0 days 00:00:00.975407800 +764 41 0 days 00:00:00.880072015 +764 42 0 days 00:00:00.484513640 +764 43 0 days 00:00:00.471185915 +764 44 0 days 00:00:00.279467134 +764 45 0 days 00:00:00.252920055 +764 46 0 days 00:00:00.468017630 +764 47 0 days 00:00:01.013993262 +764 48 0 days 00:00:00.544998206 +764 49 0 days 00:00:00.970211506 +764 50 0 days 00:00:00.266326210 +764 51 0 days 00:00:00.527024788 +764 52 0 days 00:00:00.318552465 +764 53 0 days 00:00:00.497412185 +764 54 0 days 00:00:00.306184028 +764 55 0 days 00:00:00.867720165 +764 56 0 days 00:00:00.275311900 +764 57 0 days 00:00:00.484185020 +764 58 0 days 00:00:01.011798085 +764 59 0 days 00:00:00.502014575 +764 60 0 days 00:00:00.303675900 +764 61 0 days 00:00:00.937995704 +764 62 0 days 00:00:00.550482400 +764 63 0 days 00:00:00.602814531 +764 64 0 days 00:00:00.685935393 +764 65 0 days 00:00:00.565884290 +764 66 0 days 00:00:00.432128043 +764 67 0 days 00:00:00.392625844 +764 68 0 days 00:00:00.537930370 +764 69 0 days 00:00:00.589879126 +764 70 0 days 00:00:00.583618210 +764 71 0 days 00:00:00.383061388 +764 72 0 days 00:00:00.501273012 +764 73 0 days 00:00:00.288232730 +764 74 0 days 00:00:00.363737535 +764 75 0 days 00:00:00.884747350 +764 76 0 days 00:00:00.278303760 +764 77 0 days 00:00:00.510292933 +764 78 0 days 00:00:00.299311263 +764 79 0 days 00:00:00.675422106 +764 80 0 days 00:00:00.930155896 +764 81 0 days 00:00:00.303323720 +764 82 0 days 00:00:00.351977986 +764 83 0 days 00:00:00.537379190 +764 84 0 days 00:00:00.495093436 +764 85 0 days 00:00:00.794045613 +764 86 0 days 00:00:00.865892685 +764 87 0 days 00:00:00.922693220 +764 88 0 days 00:00:00.510630540 +764 89 0 days 00:00:00.318243360 +764 90 0 days 00:00:00.966414570 +764 91 0 days 00:00:00.573072792 +764 92 0 days 00:00:00.473680245 +764 93 0 days 00:00:00.903402946 +764 94 0 days 00:00:00.653721856 +764 95 0 days 00:00:00.320234120 +764 96 0 days 00:00:00.767348693 +764 97 0 days 00:00:00.990184860 +764 98 0 days 00:00:00.604529740 +764 99 0 days 00:00:01.026629760 +764 100 0 days 00:00:00.428012633 +765 1 0 days 00:00:00.645874240 +765 2 0 days 00:00:01.223463002 +765 3 0 days 00:00:00.671611088 +765 4 0 days 00:00:00.608905295 +765 5 0 days 00:00:00.632391300 +765 6 0 days 00:00:01.255747402 +765 7 0 days 00:00:00.552666280 +765 8 0 days 00:00:01.284495050 +765 9 0 days 00:00:01.249949260 +765 10 0 days 00:00:00.694839010 +765 11 0 days 00:00:01.021784465 +765 12 0 days 00:00:02.323552930 +765 13 0 days 00:00:01.885648750 +765 14 0 days 00:00:00.659710466 +765 15 0 days 00:00:00.563601332 +765 16 0 days 00:00:00.612228630 +765 17 0 days 00:00:01.975547990 +765 18 0 days 00:00:02.126276965 +765 19 0 days 00:00:00.863539540 +765 20 0 days 00:00:02.356153288 +765 21 0 days 00:00:02.012689650 +765 22 0 days 00:00:01.608233300 +765 23 0 days 00:00:01.639223226 +765 24 0 days 00:00:01.206787830 +765 25 0 days 00:00:01.550970120 +765 26 0 days 00:00:00.713723064 +765 27 0 days 00:00:01.278221413 +765 28 0 days 00:00:02.227880645 +765 29 0 days 00:00:02.906374880 +765 30 0 days 00:00:01.530989591 +765 31 0 days 00:00:02.270808216 +765 32 0 days 00:00:00.899899413 +765 33 0 days 00:00:00.967516246 +765 34 0 days 00:00:02.013749535 +765 35 0 days 00:00:02.287208444 +765 36 0 days 00:00:00.534922650 +765 37 0 days 00:00:01.150795620 +765 38 0 days 00:00:01.015915455 +765 39 0 days 00:00:01.066557946 +765 40 0 days 00:00:00.797586244 +765 41 0 days 00:00:01.276571770 +765 42 0 days 00:00:00.546802488 +765 43 0 days 00:00:01.226295485 +765 44 0 days 00:00:02.503956832 +765 45 0 days 00:00:00.640537920 +765 46 0 days 00:00:00.632492810 +765 47 0 days 00:00:01.368948850 +765 48 0 days 00:00:01.250692570 +765 49 0 days 00:00:00.889703986 +765 50 0 days 00:00:02.050947640 +765 51 0 days 00:00:02.567362305 +765 52 0 days 00:00:00.607545310 +765 53 0 days 00:00:00.582870605 +765 54 0 days 00:00:01.036259726 +765 55 0 days 00:00:02.135192940 +765 56 0 days 00:00:00.783350237 +765 57 0 days 00:00:01.060495804 +765 58 0 days 00:00:00.825679282 +765 59 0 days 00:00:02.184613500 +765 60 0 days 00:00:02.132051020 +765 61 0 days 00:00:02.226671443 +765 62 0 days 00:00:00.755291200 +765 63 0 days 00:00:02.151730995 +765 64 0 days 00:00:01.060850966 +765 65 0 days 00:00:01.184406396 +765 66 0 days 00:00:00.604321248 +765 67 0 days 00:00:01.102282236 +765 68 0 days 00:00:02.479980437 +765 69 0 days 00:00:01.961447140 +765 70 0 days 00:00:00.489501760 +765 71 0 days 00:00:00.859934823 +765 72 0 days 00:00:00.663041125 +765 73 0 days 00:00:01.291875165 +765 74 0 days 00:00:00.945387822 +765 75 0 days 00:00:02.221465210 +765 76 0 days 00:00:00.660126644 +765 77 0 days 00:00:02.465625125 +765 78 0 days 00:00:02.432314155 +765 79 0 days 00:00:00.902354840 +765 80 0 days 00:00:00.717029256 +765 81 0 days 00:00:02.159747353 +765 82 0 days 00:00:02.827343765 +765 83 0 days 00:00:01.206827605 +765 84 0 days 00:00:01.079832448 +765 85 0 days 00:00:00.890336546 +765 86 0 days 00:00:01.415926250 +765 87 0 days 00:00:01.304201800 +765 88 0 days 00:00:00.794406604 +765 89 0 days 00:00:01.030253905 +765 90 0 days 00:00:01.181338900 +765 91 0 days 00:00:01.141158892 +765 92 0 days 00:00:01.263986776 +765 93 0 days 00:00:00.770603760 +765 94 0 days 00:00:00.763092920 +765 95 0 days 00:00:00.867267083 +765 96 0 days 00:00:00.659540200 +765 97 0 days 00:00:01.315611000 +765 98 0 days 00:00:01.980851305 +765 99 0 days 00:00:01.212906480 +765 100 0 days 00:00:01.145465620 +766 1 0 days 00:00:00.577744781 +766 2 0 days 00:00:00.567632156 +766 3 0 days 00:00:00.916182860 +766 4 0 days 00:00:00.607285766 +766 5 0 days 00:00:00.915556575 +766 6 0 days 00:00:00.345282471 +766 7 0 days 00:00:00.238729452 +766 8 0 days 00:00:00.542227236 +766 9 0 days 00:00:00.445338470 +766 10 0 days 00:00:00.552193931 +766 11 0 days 00:00:00.803824740 +766 12 0 days 00:00:00.395537310 +766 13 0 days 00:00:00.560497810 +766 14 0 days 00:00:00.510388752 +766 15 0 days 00:00:00.288076342 +766 16 0 days 00:00:00.299631256 +766 17 0 days 00:00:00.530979456 +766 18 0 days 00:00:00.294023396 +766 19 0 days 00:00:00.570470830 +766 20 0 days 00:00:00.426885620 +766 21 0 days 00:00:00.588086796 +766 22 0 days 00:00:00.293883911 +766 23 0 days 00:00:01.036647751 +766 24 0 days 00:00:00.253842508 +766 25 0 days 00:00:00.329928200 +766 26 0 days 00:00:00.566814496 +766 27 0 days 00:00:00.391944645 +766 28 0 days 00:00:00.868795510 +766 29 0 days 00:00:00.277383126 +766 30 0 days 00:00:00.476602175 +766 31 0 days 00:00:00.331855548 +766 32 0 days 00:00:00.916223010 +766 33 0 days 00:00:00.988891162 +766 34 0 days 00:00:00.701474300 +766 35 0 days 00:00:00.915492450 +766 36 0 days 00:00:00.993370845 +766 37 0 days 00:00:00.523060330 +766 38 0 days 00:00:00.566396113 +766 39 0 days 00:00:01.045296980 +766 40 0 days 00:00:00.916440520 +766 41 0 days 00:00:00.529356988 +766 42 0 days 00:00:00.883504940 +766 43 0 days 00:00:00.679754465 +766 44 0 days 00:00:00.548825860 +766 45 0 days 00:00:00.305537892 +766 46 0 days 00:00:00.525065806 +766 47 0 days 00:00:00.309651451 +766 48 0 days 00:00:00.524687160 +766 49 0 days 00:00:00.244126670 +766 50 0 days 00:00:00.286141271 +766 51 0 days 00:00:00.502537426 +766 52 0 days 00:00:00.820249480 +766 53 0 days 00:00:00.854321455 +766 54 0 days 00:00:00.544510820 +766 55 0 days 00:00:00.287832332 +766 56 0 days 00:00:00.933848328 +766 57 0 days 00:00:00.241650035 +766 58 0 days 00:00:00.972040040 +766 59 0 days 00:00:00.366178440 +766 60 0 days 00:00:00.987369650 +766 61 0 days 00:00:01.058181748 +766 62 0 days 00:00:00.249509965 +766 63 0 days 00:00:01.005516660 +766 64 0 days 00:00:00.351591176 +766 65 0 days 00:00:00.386946332 +766 66 0 days 00:00:00.698628280 +766 67 0 days 00:00:00.902118315 +766 68 0 days 00:00:00.497417305 +766 69 0 days 00:00:00.980393340 +766 70 0 days 00:00:00.998991216 +766 71 0 days 00:00:00.831629430 +766 72 0 days 00:00:00.875269640 +766 73 0 days 00:00:00.507724250 +766 74 0 days 00:00:00.331041708 +766 75 0 days 00:00:00.956230622 +766 76 0 days 00:00:00.907263135 +766 77 0 days 00:00:00.359341420 +766 78 0 days 00:00:01.095702405 +766 79 0 days 00:00:00.492760596 +766 80 0 days 00:00:00.522598012 +766 81 0 days 00:00:00.546468070 +766 82 0 days 00:00:00.513508940 +766 83 0 days 00:00:00.553017200 +766 84 0 days 00:00:01.031128468 +766 85 0 days 00:00:00.584109360 +766 86 0 days 00:00:00.267256210 +766 87 0 days 00:00:00.276328626 +766 88 0 days 00:00:00.666675496 +766 89 0 days 00:00:00.371372026 +766 90 0 days 00:00:00.732993026 +766 91 0 days 00:00:00.537342933 +766 92 0 days 00:00:00.266541700 +766 93 0 days 00:00:00.760153653 +766 94 0 days 00:00:01.014169000 +766 95 0 days 00:00:00.240716780 +766 96 0 days 00:00:00.320755600 +766 97 0 days 00:00:00.369168564 +766 98 0 days 00:00:00.277342423 +766 99 0 days 00:00:00.720585951 +766 100 0 days 00:00:00.966283985 +767 1 0 days 00:00:00.601825586 +767 2 0 days 00:00:00.632832276 +767 3 0 days 00:00:01.543657670 +767 4 0 days 00:00:02.689066708 +767 5 0 days 00:00:00.581077253 +767 6 0 days 00:00:00.626901066 +767 7 0 days 00:00:00.555157006 +767 8 0 days 00:00:02.368305213 +767 9 0 days 00:00:02.165947984 +767 10 0 days 00:00:01.098476796 +767 11 0 days 00:00:02.318782513 +767 12 0 days 00:00:00.561424950 +767 13 0 days 00:00:00.596900496 +767 14 0 days 00:00:00.862603473 +767 15 0 days 00:00:00.996529373 +767 16 0 days 00:00:00.687472340 +767 17 0 days 00:00:00.613479247 +767 18 0 days 00:00:01.789340080 +767 19 0 days 00:00:00.509621880 +767 20 0 days 00:00:03.124730693 +767 21 0 days 00:00:01.451173832 +767 22 0 days 00:00:00.582228593 +767 23 0 days 00:00:00.776667188 +767 24 0 days 00:00:00.569117086 +767 25 0 days 00:00:01.198006835 +767 26 0 days 00:00:02.044586690 +767 27 0 days 00:00:00.990420960 +767 28 0 days 00:00:01.971833395 +767 29 0 days 00:00:00.539887046 +767 30 0 days 00:00:00.556331186 +767 31 0 days 00:00:00.829563405 +767 32 0 days 00:00:00.808422760 +767 33 0 days 00:00:00.908314508 +767 34 0 days 00:00:00.416470886 +767 35 0 days 00:00:01.128096420 +767 36 0 days 00:00:00.586569646 +767 37 0 days 00:00:00.589387493 +767 38 0 days 00:00:00.738112816 +767 39 0 days 00:00:02.179424352 +767 40 0 days 00:00:00.922076880 +767 41 0 days 00:00:00.598628735 +767 42 0 days 00:00:01.992602555 +767 43 0 days 00:00:00.523657186 +767 44 0 days 00:00:00.632418953 +767 45 0 days 00:00:02.149506995 +767 46 0 days 00:00:02.152740390 +767 47 0 days 00:00:00.698654140 +767 48 0 days 00:00:02.346236993 +767 49 0 days 00:00:02.340929486 +767 50 0 days 00:00:00.674946208 +767 51 0 days 00:00:00.611178333 +767 52 0 days 00:00:00.636600060 +767 53 0 days 00:00:00.689981424 +767 54 0 days 00:00:01.283683140 +767 55 0 days 00:00:02.570013046 +767 56 0 days 00:00:00.591009233 +767 57 0 days 00:00:02.254570320 +767 58 0 days 00:00:01.788632206 +767 59 0 days 00:00:00.625931813 +767 60 0 days 00:00:01.232172530 +767 61 0 days 00:00:01.175136116 +767 62 0 days 00:00:00.704969030 +767 63 0 days 00:00:01.175535268 +767 64 0 days 00:00:00.984243135 +767 65 0 days 00:00:00.850736708 +767 66 0 days 00:00:01.093477213 +767 67 0 days 00:00:01.014261965 +767 68 0 days 00:00:02.405692630 +767 69 0 days 00:00:00.678783866 +767 70 0 days 00:00:01.774231013 +767 71 0 days 00:00:00.607892312 +767 72 0 days 00:00:00.975409000 +767 73 0 days 00:00:00.600571420 +767 74 0 days 00:00:02.061853248 +767 75 0 days 00:00:02.587255538 +767 76 0 days 00:00:02.264671305 +767 77 0 days 00:00:00.591554060 +767 78 0 days 00:00:01.194973330 +767 79 0 days 00:00:01.244305400 +767 80 0 days 00:00:02.250552455 +767 81 0 days 00:00:00.776976900 +767 82 0 days 00:00:03.078950806 +767 83 0 days 00:00:00.649669480 +767 84 0 days 00:00:02.615177660 +767 85 0 days 00:00:02.600156366 +767 86 0 days 00:00:01.234632940 +767 87 0 days 00:00:00.547053893 +767 88 0 days 00:00:01.580157450 +767 89 0 days 00:00:01.223440240 +767 90 0 days 00:00:00.832605484 +767 91 0 days 00:00:02.083613540 +767 92 0 days 00:00:02.004483895 +767 93 0 days 00:00:01.419385802 +767 94 0 days 00:00:02.501611460 +767 95 0 days 00:00:00.624783355 +767 96 0 days 00:00:02.264902708 +767 97 0 days 00:00:01.315236545 +767 98 0 days 00:00:01.583333665 +767 99 0 days 00:00:00.796947652 +767 100 0 days 00:00:02.538401832 +768 1 0 days 00:00:00.610777266 +768 2 0 days 00:00:00.705710513 +768 3 0 days 00:00:00.290703606 +768 4 0 days 00:00:00.347044032 +768 5 0 days 00:00:00.589420971 +768 6 0 days 00:00:00.266324746 +768 7 0 days 00:00:00.500136080 +768 8 0 days 00:00:00.341426833 +768 9 0 days 00:00:00.471182668 +768 10 0 days 00:00:00.254555780 +768 11 0 days 00:00:00.263710040 +768 12 0 days 00:00:00.884936313 +768 13 0 days 00:00:00.910346553 +768 14 0 days 00:00:00.386196760 +768 15 0 days 00:00:00.423613095 +768 16 0 days 00:00:01.019405330 +768 17 0 days 00:00:00.579454596 +768 18 0 days 00:00:00.497451560 +768 19 0 days 00:00:00.884282533 +768 20 0 days 00:00:00.612552752 +768 21 0 days 00:00:00.579352914 +768 22 0 days 00:00:00.884564520 +768 23 0 days 00:00:01.010639170 +768 24 0 days 00:00:00.622029095 +768 25 0 days 00:00:01.031404035 +768 26 0 days 00:00:00.295176040 +768 27 0 days 00:00:00.283235645 +768 28 0 days 00:00:01.447805290 +768 29 0 days 00:00:00.783163960 +768 30 0 days 00:00:01.398365784 +768 31 0 days 00:00:01.349225010 +768 32 0 days 00:00:00.453857982 +768 33 0 days 00:00:00.526906277 +768 34 0 days 00:00:00.613888993 +768 35 0 days 00:00:00.623589160 +768 36 0 days 00:00:00.446846766 +768 37 0 days 00:00:01.146123648 +768 38 0 days 00:00:00.299479153 +768 39 0 days 00:00:00.639164680 +768 40 0 days 00:00:01.317244816 +768 41 0 days 00:00:00.639885595 +768 42 0 days 00:00:00.625405160 +768 43 0 days 00:00:00.692236930 +768 44 0 days 00:00:01.196680905 +768 45 0 days 00:00:00.950418926 +768 46 0 days 00:00:00.719871152 +768 47 0 days 00:00:00.308209972 +768 48 0 days 00:00:00.610713520 +768 49 0 days 00:00:00.539091420 +768 50 0 days 00:00:00.514011453 +768 51 0 days 00:00:00.301659230 +768 52 0 days 00:00:01.002089525 +768 53 0 days 00:00:00.452021392 +768 54 0 days 00:00:01.086589435 +768 55 0 days 00:00:00.465318928 +768 56 0 days 00:00:00.383393193 +768 57 0 days 00:00:01.531993856 +768 58 0 days 00:00:00.563628970 +768 59 0 days 00:00:00.673321800 +768 60 0 days 00:00:00.311886453 +768 61 0 days 00:00:00.540702925 +768 62 0 days 00:00:00.741449460 +768 63 0 days 00:00:00.294803873 +768 64 0 days 00:00:00.725960120 +768 65 0 days 00:00:00.297949093 +768 66 0 days 00:00:00.621537325 +768 67 0 days 00:00:00.344741868 +768 68 0 days 00:00:00.537766246 +768 69 0 days 00:00:00.326682146 +768 70 0 days 00:00:01.160982113 +768 71 0 days 00:00:00.298610160 +768 72 0 days 00:00:00.332748984 +768 73 0 days 00:00:00.453464392 +768 74 0 days 00:00:00.559360846 +768 75 0 days 00:00:00.488230780 +768 76 0 days 00:00:01.022385055 +768 77 0 days 00:00:00.359404268 +768 78 0 days 00:00:00.382941457 +768 79 0 days 00:00:00.351985630 +768 80 0 days 00:00:00.259587580 +768 81 0 days 00:00:00.326951290 +768 82 0 days 00:00:00.430213213 +768 83 0 days 00:00:01.028662580 +768 84 0 days 00:00:00.491042086 +768 85 0 days 00:00:00.971604480 +768 86 0 days 00:00:01.310098276 +768 87 0 days 00:00:00.317078506 +768 88 0 days 00:00:00.314722460 +768 89 0 days 00:00:01.516450446 +768 90 0 days 00:00:00.327508352 +768 91 0 days 00:00:00.496033786 +768 92 0 days 00:00:01.234596040 +768 93 0 days 00:00:00.551955005 +768 94 0 days 00:00:00.371197086 +768 95 0 days 00:00:00.387784740 +768 96 0 days 00:00:00.883515851 +768 97 0 days 00:00:00.620506050 +768 98 0 days 00:00:00.975889386 +768 99 0 days 00:00:00.947886653 +768 100 0 days 00:00:00.322477000 +769 1 0 days 00:00:15.978968490 +769 2 0 days 00:00:15.661192335 +769 3 0 days 00:00:10.213386670 +769 4 0 days 00:00:09.578331086 +769 5 0 days 00:00:07.938646520 +769 6 0 days 00:00:13.647848520 +769 7 0 days 00:00:14.665978580 +769 8 0 days 00:00:05.182788166 +769 9 0 days 00:00:06.037098920 +769 10 0 days 00:00:04.861287533 +769 11 0 days 00:00:16.843205495 +769 12 0 days 00:00:07.866712286 +769 13 0 days 00:00:15.103758426 +769 14 0 days 00:00:09.606557388 +769 15 0 days 00:00:08.868227800 +769 16 0 days 00:00:16.497503865 +769 17 0 days 00:00:10.393616540 +769 18 0 days 00:00:16.443223264 +769 19 0 days 00:00:07.797477126 +769 20 0 days 00:00:04.590260840 +769 21 0 days 00:00:08.228159480 +769 22 0 days 00:00:15.073276000 +769 23 0 days 00:00:10.442376732 +769 24 0 days 00:00:08.556701026 +769 25 0 days 00:00:15.465485880 +769 26 0 days 00:00:15.681359940 +769 27 0 days 00:00:09.160772440 +769 28 0 days 00:00:07.864634820 +769 29 0 days 00:00:15.053625340 +769 30 0 days 00:00:05.724022780 +769 31 0 days 00:00:07.475658806 +769 32 0 days 00:00:08.290528520 +769 33 0 days 00:00:09.318801740 +769 34 0 days 00:00:15.680784846 +769 35 0 days 00:00:09.247429465 +769 36 0 days 00:00:13.358495815 +769 37 0 days 00:00:05.305239244 +769 38 0 days 00:00:07.467476560 +769 39 0 days 00:00:05.089374545 +769 40 0 days 00:00:13.513032460 +769 41 0 days 00:00:04.615459493 +769 42 0 days 00:00:08.051488926 +769 43 0 days 00:00:05.717521360 +769 44 0 days 00:00:08.231601160 +769 45 0 days 00:00:08.431269306 +769 46 0 days 00:00:04.663990320 +769 47 0 days 00:00:04.911973653 +769 48 0 days 00:00:07.866544900 +769 49 0 days 00:00:05.200007373 +769 50 0 days 00:00:04.528577046 +769 51 0 days 00:00:14.908211666 +770 1 0 days 00:00:04.259162930 +770 2 0 days 00:00:09.209606770 +770 3 0 days 00:00:02.389551060 +770 4 0 days 00:00:06.422340100 +770 5 0 days 00:00:02.619676520 +770 6 0 days 00:00:02.377752466 +770 7 0 days 00:00:03.078423760 +770 8 0 days 00:00:04.284805946 +770 9 0 days 00:00:02.682795286 +770 10 0 days 00:00:02.803021405 +770 11 0 days 00:00:04.216041815 +770 12 0 days 00:00:07.471689220 +770 13 0 days 00:00:02.827578985 +770 14 0 days 00:00:07.214637475 +770 15 0 days 00:00:08.046843590 +770 16 0 days 00:00:02.694150880 +770 17 0 days 00:00:04.171128986 +770 18 0 days 00:00:04.366665890 +770 19 0 days 00:00:05.448910813 +770 20 0 days 00:00:02.986356296 +770 21 0 days 00:00:07.533424306 +770 22 0 days 00:00:04.210135255 +770 23 0 days 00:00:08.410264033 +770 24 0 days 00:00:02.691036686 +770 25 0 days 00:00:03.508337533 +770 26 0 days 00:00:07.867053033 +770 27 0 days 00:00:04.093096726 +770 28 0 days 00:00:08.179377840 +770 29 0 days 00:00:02.812319255 +770 30 0 days 00:00:07.163076680 +770 31 0 days 00:00:02.597251720 +770 32 0 days 00:00:04.740943456 +770 33 0 days 00:00:07.507494453 +770 34 0 days 00:00:03.914584435 +770 35 0 days 00:00:02.747539135 +770 36 0 days 00:00:04.389621745 +770 37 0 days 00:00:02.565172990 +770 38 0 days 00:00:04.042681833 +770 39 0 days 00:00:07.416378853 +770 40 0 days 00:00:08.000668873 +770 41 0 days 00:00:02.760471395 +770 42 0 days 00:00:02.480155480 +770 43 0 days 00:00:04.058687413 +770 44 0 days 00:00:04.768448600 +770 45 0 days 00:00:04.792377200 +770 46 0 days 00:00:03.500244553 +770 47 0 days 00:00:07.589104286 +770 48 0 days 00:00:02.378525640 +770 49 0 days 00:00:08.454213780 +770 50 0 days 00:00:04.272072006 +770 51 0 days 00:00:04.696267046 +770 52 0 days 00:00:04.853475713 +770 53 0 days 00:00:07.453430406 +770 54 0 days 00:00:02.627195396 +770 55 0 days 00:00:04.243105315 +770 56 0 days 00:00:07.504237946 +770 57 0 days 00:00:02.421448625 +770 58 0 days 00:00:07.801520335 +770 59 0 days 00:00:02.892674150 +770 60 0 days 00:00:04.317000920 +770 61 0 days 00:00:02.125508613 +770 62 0 days 00:00:02.577324480 +770 63 0 days 00:00:08.174583280 +770 64 0 days 00:00:04.115013260 +770 65 0 days 00:00:02.749329525 +770 66 0 days 00:00:02.441426080 +770 67 0 days 00:00:02.937188160 +770 68 0 days 00:00:04.208923780 +770 69 0 days 00:00:07.387730186 +770 70 0 days 00:00:07.329255153 +770 71 0 days 00:00:05.171667575 +770 72 0 days 00:00:06.714573740 +770 73 0 days 00:00:02.512423373 +770 74 0 days 00:00:02.488644865 +770 75 0 days 00:00:09.056932295 +770 76 0 days 00:00:03.187234226 +770 77 0 days 00:00:04.360838490 +770 78 0 days 00:00:08.807801330 +770 79 0 days 00:00:05.099650556 +770 80 0 days 00:00:02.657823490 +770 81 0 days 00:00:07.708592326 +770 82 0 days 00:00:07.988386813 +770 83 0 days 00:00:02.656107675 +770 84 0 days 00:00:04.285926493 +770 85 0 days 00:00:04.428602986 +770 86 0 days 00:00:07.718122973 +770 87 0 days 00:00:08.211080246 +770 88 0 days 00:00:07.626601046 +770 89 0 days 00:00:02.812233305 +770 90 0 days 00:00:04.206069000 +770 91 0 days 00:00:04.124964700 +770 92 0 days 00:00:04.239633160 +770 93 0 days 00:00:04.093976806 +770 94 0 days 00:00:07.622896973 +770 95 0 days 00:00:07.293632446 +770 96 0 days 00:00:08.070233606 +770 97 0 days 00:00:02.498736780 +770 98 0 days 00:00:04.655173280 +770 99 0 days 00:00:03.967112646 +770 100 0 days 00:00:06.870742253 +771 1 0 days 00:00:13.660621906 +771 2 0 days 00:00:08.444146240 +771 3 0 days 00:00:08.984515600 +771 4 0 days 00:00:09.782786560 +771 5 0 days 00:00:14.740564600 +771 6 0 days 00:00:07.234413150 +771 7 0 days 00:00:08.211083545 +771 8 0 days 00:00:05.952168490 +771 9 0 days 00:00:14.754991695 +771 10 0 days 00:00:13.477321346 +771 11 0 days 00:00:07.825000153 +771 12 0 days 00:00:16.989659560 +771 13 0 days 00:00:04.904536790 +771 14 0 days 00:00:08.378126920 +771 15 0 days 00:00:07.725804166 +771 16 0 days 00:00:04.940713930 +771 17 0 days 00:00:08.291562650 +771 18 0 days 00:00:08.172470750 +771 19 0 days 00:00:06.782278005 +771 20 0 days 00:00:05.134304285 +771 21 0 days 00:00:17.280448476 +771 22 0 days 00:00:06.444962986 +771 23 0 days 00:00:07.886588246 +771 24 0 days 00:00:16.038120508 +771 25 0 days 00:00:04.301609760 +771 26 0 days 00:00:04.642154150 +771 27 0 days 00:00:09.548098651 +771 28 0 days 00:00:08.838610355 +771 29 0 days 00:00:05.162995680 +771 30 0 days 00:00:07.333562846 +771 31 0 days 00:00:10.130907985 +771 32 0 days 00:00:16.073060945 +771 33 0 days 00:00:09.507314450 +771 34 0 days 00:00:09.091649770 +771 35 0 days 00:00:07.251566905 +771 36 0 days 00:00:08.094525840 +771 37 0 days 00:00:16.427273776 +771 38 0 days 00:00:05.393722311 +771 39 0 days 00:00:14.113358153 +771 40 0 days 00:00:06.236532015 +771 41 0 days 00:00:09.908273953 +771 42 0 days 00:00:14.975845270 +772 1 0 days 00:00:14.916910666 +772 2 0 days 00:00:07.808788846 +772 3 0 days 00:00:05.503851063 +772 4 0 days 00:00:08.173763395 +772 5 0 days 00:00:05.203166940 +772 6 0 days 00:00:05.154540792 +772 7 0 days 00:00:15.270231870 +772 8 0 days 00:00:14.699826140 +772 9 0 days 00:00:06.445289316 +772 10 0 days 00:00:07.598133316 +772 11 0 days 00:00:08.937785540 +772 12 0 days 00:00:16.950132220 +772 13 0 days 00:00:14.564800300 +772 14 0 days 00:00:09.189019312 +772 15 0 days 00:00:13.740795353 +772 16 0 days 00:00:06.499393290 +772 17 0 days 00:00:13.803201900 +772 18 0 days 00:00:04.710109773 +772 19 0 days 00:00:09.269671405 +772 20 0 days 00:00:08.452674800 +772 21 0 days 00:00:05.036261745 +772 22 0 days 00:00:06.542412576 +772 23 0 days 00:00:08.421403890 +772 24 0 days 00:00:15.980187992 +772 25 0 days 00:00:13.622157770 +772 26 0 days 00:00:06.173227420 +772 27 0 days 00:00:07.570284595 +772 28 0 days 00:00:12.485554740 +772 29 0 days 00:00:07.947370980 +772 30 0 days 00:00:16.096204988 +772 31 0 days 00:00:09.334127113 +772 32 0 days 00:00:08.761865930 +772 33 0 days 00:00:08.357426713 +772 34 0 days 00:00:09.790219003 +772 35 0 days 00:00:13.883686646 +772 36 0 days 00:00:08.597358030 +772 37 0 days 00:00:07.577166300 +772 38 0 days 00:00:12.041841046 +772 39 0 days 00:00:16.430136525 +772 40 0 days 00:00:07.008943275 +772 41 0 days 00:00:05.749614320 +772 42 0 days 00:00:16.627626040 +773 1 0 days 00:00:02.563888700 +773 2 0 days 00:00:06.890189946 +773 3 0 days 00:00:05.041687605 +773 4 0 days 00:00:04.193074820 +773 5 0 days 00:00:02.893993462 +773 6 0 days 00:00:07.217010400 +773 7 0 days 00:00:02.771742820 +773 8 0 days 00:00:04.161559530 +773 9 0 days 00:00:02.523332485 +773 10 0 days 00:00:08.142358270 +773 11 0 days 00:00:04.659494260 +773 12 0 days 00:00:04.406831015 +773 13 0 days 00:00:06.920704486 +773 14 0 days 00:00:02.865301272 +773 15 0 days 00:00:02.354578200 +773 16 0 days 00:00:04.172712055 +773 17 0 days 00:00:07.497437100 +773 18 0 days 00:00:02.440064520 +773 19 0 days 00:00:07.396179166 +773 20 0 days 00:00:07.670008190 +773 21 0 days 00:00:02.519087846 +773 22 0 days 00:00:08.717582896 +773 23 0 days 00:00:02.336365146 +773 24 0 days 00:00:04.667198290 +773 25 0 days 00:00:02.282783853 +773 26 0 days 00:00:04.294494980 +773 27 0 days 00:00:04.252181735 +773 28 0 days 00:00:04.371542250 +773 29 0 days 00:00:07.339904446 +773 30 0 days 00:00:08.126563475 +773 31 0 days 00:00:07.814587625 +773 32 0 days 00:00:04.130990010 +773 33 0 days 00:00:02.811241393 +773 34 0 days 00:00:05.167335368 +773 35 0 days 00:00:02.502160620 +773 36 0 days 00:00:08.335696690 +773 37 0 days 00:00:02.438471870 +773 38 0 days 00:00:04.876933333 +773 39 0 days 00:00:03.755772793 +773 40 0 days 00:00:04.876760110 +773 41 0 days 00:00:07.848998446 +773 42 0 days 00:00:04.780227895 +773 43 0 days 00:00:07.799166550 +773 44 0 days 00:00:04.420450316 +773 45 0 days 00:00:07.645789505 +773 46 0 days 00:00:03.838414566 +773 47 0 days 00:00:07.350890126 +773 48 0 days 00:00:02.567809855 +773 49 0 days 00:00:02.254616733 +773 50 0 days 00:00:02.723932110 +773 51 0 days 00:00:07.732182095 +773 52 0 days 00:00:08.325835575 +773 53 0 days 00:00:07.285762553 +773 54 0 days 00:00:08.181227940 +773 55 0 days 00:00:04.129230790 +773 56 0 days 00:00:04.441720068 +773 57 0 days 00:00:07.545378740 +773 58 0 days 00:00:02.735817792 +773 59 0 days 00:00:09.424160326 +773 60 0 days 00:00:03.348550333 +773 61 0 days 00:00:04.378904640 +773 62 0 days 00:00:02.617683630 +773 63 0 days 00:00:02.657277333 +773 64 0 days 00:00:02.737424960 +773 65 0 days 00:00:08.147289804 +773 66 0 days 00:00:06.045742226 +773 67 0 days 00:00:02.515386765 +773 68 0 days 00:00:08.118407280 +773 69 0 days 00:00:02.494557150 +773 70 0 days 00:00:07.608859995 +773 71 0 days 00:00:02.526372910 +773 72 0 days 00:00:04.725285865 +773 73 0 days 00:00:02.617598775 +773 74 0 days 00:00:07.219171433 +773 75 0 days 00:00:07.625318795 +773 76 0 days 00:00:04.138680160 +773 77 0 days 00:00:04.205158760 +773 78 0 days 00:00:04.479711410 +773 79 0 days 00:00:04.203081775 +773 80 0 days 00:00:07.514375835 +773 81 0 days 00:00:08.850799473 +774 1 0 days 00:00:03.323408413 +774 2 0 days 00:00:07.005243973 +774 3 0 days 00:00:04.325052225 +774 4 0 days 00:00:08.331692776 +774 5 0 days 00:00:07.881962180 +774 6 0 days 00:00:04.871177625 +774 7 0 days 00:00:02.544610625 +774 8 0 days 00:00:08.367176520 +774 9 0 days 00:00:07.726374470 +774 10 0 days 00:00:06.078302433 +774 11 0 days 00:00:07.735744140 +774 12 0 days 00:00:04.420252800 +774 13 0 days 00:00:03.202399873 +774 14 0 days 00:00:06.964305060 +774 15 0 days 00:00:02.694072200 +774 16 0 days 00:00:03.901782946 +774 17 0 days 00:00:07.682322775 +774 18 0 days 00:00:02.619169720 +774 19 0 days 00:00:02.892462355 +774 20 0 days 00:00:02.505327605 +774 21 0 days 00:00:05.108767702 +774 22 0 days 00:00:06.886128380 +774 23 0 days 00:00:05.092575950 +774 24 0 days 00:00:08.346606345 +774 25 0 days 00:00:07.024109106 +774 26 0 days 00:00:08.357732340 +774 27 0 days 00:00:02.056598260 +774 28 0 days 00:00:07.984997370 +774 29 0 days 00:00:09.133666080 +774 30 0 days 00:00:07.510384166 +774 31 0 days 00:00:02.670781395 +774 32 0 days 00:00:07.823843430 +774 33 0 days 00:00:08.423971668 +774 34 0 days 00:00:04.649393110 +774 35 0 days 00:00:02.819747726 +774 36 0 days 00:00:05.060112424 +774 37 0 days 00:00:02.517301365 +774 38 0 days 00:00:02.858292360 +774 39 0 days 00:00:04.431760340 +774 40 0 days 00:00:04.663699860 +774 41 0 days 00:00:03.075213963 +774 42 0 days 00:00:07.464525140 +774 43 0 days 00:00:06.961665273 +774 44 0 days 00:00:06.886223000 +774 45 0 days 00:00:08.322338820 +774 46 0 days 00:00:04.576634650 +774 47 0 days 00:00:04.133173013 +774 48 0 days 00:00:04.930512937 +774 49 0 days 00:00:04.103142773 +774 50 0 days 00:00:02.743119060 +774 51 0 days 00:00:03.785656020 +774 52 0 days 00:00:07.701642945 +774 53 0 days 00:00:04.293195750 +774 54 0 days 00:00:03.967703870 +774 55 0 days 00:00:04.250574880 +774 56 0 days 00:00:02.496673913 +774 57 0 days 00:00:04.507769360 +774 58 0 days 00:00:02.547690735 +774 59 0 days 00:00:02.976702035 +774 60 0 days 00:00:04.399379606 +774 61 0 days 00:00:02.555762355 +774 62 0 days 00:00:02.558965400 +774 63 0 days 00:00:02.441767273 +774 64 0 days 00:00:07.877575950 +774 65 0 days 00:00:02.736681570 +774 66 0 days 00:00:04.332685445 +774 67 0 days 00:00:07.354155280 +774 68 0 days 00:00:04.239170380 +774 69 0 days 00:00:04.442665522 +774 70 0 days 00:00:04.345948515 +774 71 0 days 00:00:03.903800400 +774 72 0 days 00:00:04.303805905 +774 73 0 days 00:00:07.270818428 +774 74 0 days 00:00:04.132428386 +774 75 0 days 00:00:07.121284033 +774 76 0 days 00:00:07.919709875 +774 77 0 days 00:00:07.799660400 +774 78 0 days 00:00:03.233103430 +774 79 0 days 00:00:04.250171055 +774 80 0 days 00:00:04.906616504 +774 81 0 days 00:00:02.525739595 +774 82 0 days 00:00:03.165069130 +775 1 0 days 00:00:04.561432946 +775 2 0 days 00:00:07.699134253 +775 3 0 days 00:00:04.933545055 +775 4 0 days 00:00:07.603709253 +775 5 0 days 00:00:05.759447440 +775 6 0 days 00:00:04.754242726 +775 7 0 days 00:00:07.477437433 +775 8 0 days 00:00:07.562827160 +775 9 0 days 00:00:08.215117300 +775 10 0 days 00:00:14.713364073 +775 11 0 days 00:00:05.028724346 +775 12 0 days 00:00:14.084309140 +775 13 0 days 00:00:04.516596680 +775 14 0 days 00:00:04.578079786 +775 15 0 days 00:00:14.857345006 +775 16 0 days 00:00:14.000042326 +775 17 0 days 00:00:08.226538033 +775 18 0 days 00:00:05.084607786 +775 19 0 days 00:00:04.496814713 +775 20 0 days 00:00:04.861878493 +775 21 0 days 00:00:13.615832426 +775 22 0 days 00:00:07.689959413 +775 23 0 days 00:00:08.888756368 +775 24 0 days 00:00:14.738810646 +775 25 0 days 00:00:13.850412366 +775 26 0 days 00:00:16.685752800 +775 27 0 days 00:00:07.643451300 +775 28 0 days 00:00:05.598005400 +775 29 0 days 00:00:04.469440466 +775 30 0 days 00:00:13.710747293 +775 31 0 days 00:00:13.851347613 +775 32 0 days 00:00:04.319391620 +775 33 0 days 00:00:15.427475350 +775 34 0 days 00:00:08.162247246 +775 35 0 days 00:00:07.609114933 +775 36 0 days 00:00:05.957107920 +775 37 0 days 00:00:06.661019266 +775 38 0 days 00:00:04.321737146 +775 39 0 days 00:00:07.509868473 +775 40 0 days 00:00:04.901388540 +775 41 0 days 00:00:04.461369886 +775 42 0 days 00:00:07.740237526 +775 43 0 days 00:00:05.825671753 +775 44 0 days 00:00:07.756380740 +775 45 0 days 00:00:07.618230120 +775 46 0 days 00:00:14.576331440 +775 47 0 days 00:00:08.130466360 +775 48 0 days 00:00:04.464029413 +775 49 0 days 00:00:07.957426640 +775 50 0 days 00:00:13.743901453 +775 51 0 days 00:00:07.506793766 +775 52 0 days 00:00:15.430306710 +775 53 0 days 00:00:08.192718973 +775 54 0 days 00:00:04.989945713 +775 55 0 days 00:00:08.858624505 +775 56 0 days 00:00:13.586899160 +775 57 0 days 00:00:14.522768846 +775 58 0 days 00:00:04.439605980 +775 59 0 days 00:00:14.673975000 +775 60 0 days 00:00:14.102344086 +775 61 0 days 00:00:13.874859880 +775 62 0 days 00:00:04.449023700 +775 63 0 days 00:00:07.455278513 +776 1 0 days 00:00:03.977479780 +776 2 0 days 00:00:04.341258246 +776 3 0 days 00:00:04.131603840 +776 4 0 days 00:00:07.022737320 +776 5 0 days 00:00:04.655279180 +776 6 0 days 00:00:08.800229566 +776 7 0 days 00:00:08.357656636 +776 8 0 days 00:00:04.071352480 +776 9 0 days 00:00:08.236761185 +776 10 0 days 00:00:07.091805300 +776 11 0 days 00:00:02.925659226 +776 12 0 days 00:00:02.876379180 +776 13 0 days 00:00:03.883614486 +776 14 0 days 00:00:02.303293546 +776 15 0 days 00:00:02.442440086 +776 16 0 days 00:00:06.927457000 +776 17 0 days 00:00:03.788327640 +776 18 0 days 00:00:03.835091693 +776 19 0 days 00:00:04.797877235 +776 20 0 days 00:00:02.436067165 +776 21 0 days 00:00:07.024467046 +776 22 0 days 00:00:03.836175433 +776 23 0 days 00:00:07.113924880 +776 24 0 days 00:00:02.300399760 +776 25 0 days 00:00:06.998441840 +776 26 0 days 00:00:02.936412486 +776 27 0 days 00:00:04.425486393 +776 28 0 days 00:00:04.446303120 +776 29 0 days 00:00:02.609701646 +776 30 0 days 00:00:02.980852725 +776 31 0 days 00:00:04.138758633 +776 32 0 days 00:00:02.321436133 +776 33 0 days 00:00:04.077649020 +776 34 0 days 00:00:04.130264180 +776 35 0 days 00:00:02.529023526 +776 36 0 days 00:00:04.669737753 +776 37 0 days 00:00:06.964564186 +776 38 0 days 00:00:03.418584100 +776 39 0 days 00:00:04.059494753 +776 40 0 days 00:00:04.066690393 +776 41 0 days 00:00:07.020297500 +776 42 0 days 00:00:02.821388906 +776 43 0 days 00:00:02.296927853 +776 44 0 days 00:00:02.428906060 +776 45 0 days 00:00:07.810018140 +776 46 0 days 00:00:04.209113286 +776 47 0 days 00:00:07.071082560 +776 48 0 days 00:00:07.153540400 +776 49 0 days 00:00:03.934583766 +776 50 0 days 00:00:07.430124726 +776 51 0 days 00:00:07.037172813 +776 52 0 days 00:00:04.390723646 +776 53 0 days 00:00:04.615243760 +776 54 0 days 00:00:06.161938160 +776 55 0 days 00:00:03.909818980 +776 56 0 days 00:00:06.276432753 +776 57 0 days 00:00:07.108083406 +776 58 0 days 00:00:02.529902985 +776 59 0 days 00:00:04.219794946 +776 60 0 days 00:00:02.400344260 +776 61 0 days 00:00:02.765593506 +776 62 0 days 00:00:06.672510700 +776 63 0 days 00:00:07.514143353 +776 64 0 days 00:00:04.243463773 +776 65 0 days 00:00:07.819419875 +776 66 0 days 00:00:06.987058780 +776 67 0 days 00:00:07.960300815 +776 68 0 days 00:00:03.860689973 +776 69 0 days 00:00:07.605369100 +776 70 0 days 00:00:07.080000513 +776 71 0 days 00:00:03.817389866 +776 72 0 days 00:00:02.316798486 +776 73 0 days 00:00:03.904818313 +776 74 0 days 00:00:07.297763320 +776 75 0 days 00:00:03.677159673 +776 76 0 days 00:00:02.660861153 +776 77 0 days 00:00:02.474910173 +776 78 0 days 00:00:03.163735350 +776 79 0 days 00:00:07.003721473 +776 80 0 days 00:00:02.310283986 +776 81 0 days 00:00:07.038381826 +776 82 0 days 00:00:02.479500100 +776 83 0 days 00:00:08.336118800 +776 84 0 days 00:00:02.668721875 +776 85 0 days 00:00:07.084368066 +776 86 0 days 00:00:02.308518126 +776 87 0 days 00:00:02.598108270 +776 88 0 days 00:00:04.039454773 +776 89 0 days 00:00:04.550060188 +776 90 0 days 00:00:07.036443020 +776 91 0 days 00:00:06.250252533 +776 92 0 days 00:00:07.962430260 +776 93 0 days 00:00:04.992559446 +776 94 0 days 00:00:04.441675420 +776 95 0 days 00:00:02.539793005 +776 96 0 days 00:00:07.072012446 +776 97 0 days 00:00:04.238555390 +776 98 0 days 00:00:02.566392670 +776 99 0 days 00:00:07.081643753 +776 100 0 days 00:00:02.409813033 +777 1 0 days 00:02:40.029986170 +777 2 0 days 00:03:32.598626806 +778 1 0 days 00:00:57.318408185 +778 2 0 days 00:01:36.625901150 +778 3 0 days 00:02:55.145459855 +778 4 0 days 00:03:51.194428426 +779 1 0 days 00:05:07.865245320 +779 2 0 days 00:02:36.689285926 +779 3 0 days 00:02:37.226701160 +780 1 0 days 00:01:25.183243373 +780 2 0 days 00:02:23.063469420 +780 3 0 days 00:02:21.072558053 +780 4 0 days 00:04:35.739460326 +781 1 0 days 00:02:36.350304346 +781 2 0 days 00:00:52.065530693 +781 3 0 days 00:01:36.620468913 +781 4 0 days 00:02:45.911930640 +781 5 0 days 00:02:36.209678400 +782 1 0 days 00:00:51.436149513 +782 2 0 days 00:02:36.386249946 +782 3 0 days 00:02:46.590905773 +782 4 0 days 00:02:46.589908373 +782 5 0 days 00:00:51.553325666 +783 1 0 days 00:01:16.261210100 +783 2 0 days 00:01:45.876656514 +783 3 0 days 00:01:02.225526466 +783 4 0 days 00:00:48.007474542 +783 5 0 days 00:01:34.623737016 +783 6 0 days 00:01:38.173641275 +783 7 0 days 00:01:54.180787066 +783 8 0 days 00:02:12.048947500 +783 9 0 days 00:01:24.158859583 +783 10 0 days 00:01:25.037188150 +783 11 0 days 00:02:49.659301412 +783 12 0 days 00:02:18.270178428 +783 13 0 days 00:01:24.461817916 +783 14 0 days 00:01:34.676252933 +784 1 0 days 00:00:52.659802057 +784 2 0 days 00:00:48.806881370 +784 3 0 days 00:01:18.989180750 +784 4 0 days 00:00:50.585149642 +784 5 0 days 00:01:36.618454633 +784 6 0 days 00:00:50.234177400 +784 7 0 days 00:01:45.820579885 +784 8 0 days 00:00:50.576083572 +784 9 0 days 00:01:15.834142183 +784 10 0 days 00:01:00.189186316 +784 11 0 days 00:01:01.469726333 +784 12 0 days 00:01:42.648901457 +784 13 0 days 00:01:00.343630025 +784 14 0 days 00:00:30.928490683 +784 15 0 days 00:01:39.479067666 +784 16 0 days 00:01:36.896133028 +784 17 0 days 00:00:50.979892087 +784 18 0 days 00:00:25.315509742 +784 19 0 days 00:01:21.222366627 +785 1 0 days 00:01:05.215854000 +785 2 0 days 00:02:02.959687166 +785 3 0 days 00:01:53.351889750 +785 4 0 days 00:00:48.562062657 +785 5 0 days 00:01:57.616328150 +785 6 0 days 00:03:54.616418983 +785 7 0 days 00:01:06.616018471 +785 8 0 days 00:01:44.168931666 +785 9 0 days 00:01:08.085395400 +785 10 0 days 00:01:20.080498066 +785 11 0 days 00:01:40.052387971 +785 12 0 days 00:00:59.580275083 +785 13 0 days 00:02:02.401759814 +785 14 0 days 00:00:58.598033450 +786 1 0 days 00:00:48.857558985 +786 2 0 days 00:00:57.652720850 +786 3 0 days 00:00:40.582785142 +786 4 0 days 00:00:38.503483400 +786 5 0 days 00:01:43.782901900 +786 6 0 days 00:00:44.138591783 +786 7 0 days 00:00:36.254614271 +786 8 0 days 00:00:46.609361428 +786 9 0 days 00:00:39.937440200 +786 10 0 days 00:00:40.116583000 +786 11 0 days 00:01:38.902332833 +786 12 0 days 00:00:46.980116716 +786 13 0 days 00:00:35.071066100 +786 14 0 days 00:00:56.446063842 +786 15 0 days 00:00:47.484354085 +786 16 0 days 00:01:58.604287200 +786 17 0 days 00:00:28.177172470 +786 18 0 days 00:00:56.231147850 +786 19 0 days 00:00:59.792499216 +786 20 0 days 00:00:35.692313400 +786 21 0 days 00:00:53.698036711 +786 22 0 days 00:00:23.300900112 +786 23 0 days 00:01:04.833680766 +787 1 0 days 00:03:07.196267985 +787 2 0 days 00:00:44.401227100 +787 3 0 days 00:01:47.434190262 +787 4 0 days 00:01:27.487272900 +787 5 0 days 00:01:36.272343600 +787 6 0 days 00:00:47.305894600 +787 7 0 days 00:00:52.304080914 +787 8 0 days 00:01:20.733141014 +787 9 0 days 00:01:01.367310122 +787 10 0 days 00:01:19.713516950 +787 11 0 days 00:01:16.723751077 +787 12 0 days 00:01:20.033375328 +787 13 0 days 00:00:56.489961233 +787 14 0 days 00:01:41.303222433 +787 15 0 days 00:01:04.369074133 +788 1 0 days 00:00:58.370604787 +788 2 0 days 00:01:09.011242925 +788 3 0 days 00:00:29.015902666 +788 4 0 days 00:00:35.684870485 +788 5 0 days 00:01:22.564584483 +788 6 0 days 00:00:42.258214800 +788 7 0 days 00:00:36.849771028 +788 8 0 days 00:00:49.403013771 +788 9 0 days 00:00:31.711891350 +788 10 0 days 00:00:41.490411583 +788 11 0 days 00:00:48.544816233 +788 12 0 days 00:01:09.855150257 +788 13 0 days 00:01:07.278582040 +788 14 0 days 00:00:47.016718162 +788 15 0 days 00:00:53.041940512 +788 16 0 days 00:00:21.028830171 +788 17 0 days 00:01:39.029083814 +788 18 0 days 00:00:41.702998083 +788 19 0 days 00:00:51.182667714 +788 20 0 days 00:00:43.996873566 +788 21 0 days 00:00:42.906210783 +788 22 0 days 00:01:38.202666057 +789 1 0 days 00:00:33.638193966 +789 2 0 days 00:01:53.183427816 +789 3 0 days 00:01:27.351531166 +789 4 0 days 00:03:25.389583983 +789 5 0 days 00:02:05.729788200 +789 6 0 days 00:01:52.587999700 +789 7 0 days 00:01:29.965724366 +789 8 0 days 00:01:12.244114700 +789 9 0 days 00:00:33.738136850 +789 10 0 days 00:01:54.092546750 +789 11 0 days 00:00:54.983777916 +789 12 0 days 00:00:33.326016500 +789 13 0 days 00:01:03.802381700 +789 14 0 days 00:00:41.652480533 +789 15 0 days 00:01:26.522714666 +789 16 0 days 00:01:02.220804583 +789 17 0 days 00:00:58.692933366 +789 18 0 days 00:02:56.991385583 +790 1 0 days 00:00:38.921213283 +790 2 0 days 00:00:38.465399250 +790 3 0 days 00:01:00.336824000 +790 4 0 days 00:00:36.443551666 +790 5 0 days 00:00:54.465201533 +790 6 0 days 00:00:56.117504914 +790 7 0 days 00:00:21.497404683 +790 8 0 days 00:00:40.039664100 +790 9 0 days 00:00:33.618959316 +790 10 0 days 00:00:47.094045100 +790 11 0 days 00:00:50.781157350 +790 12 0 days 00:01:02.953248500 +790 13 0 days 00:00:56.506013633 +790 14 0 days 00:00:23.968519014 +790 15 0 days 00:00:26.045328216 +790 16 0 days 00:00:26.718378083 +790 17 0 days 00:01:02.829763800 +790 18 0 days 00:00:52.035741616 +790 19 0 days 00:00:40.625360766 +790 20 0 days 00:01:06.959278033 +790 21 0 days 00:00:36.070828683 +790 22 0 days 00:01:06.084387250 +790 23 0 days 00:00:28.103919033 +790 24 0 days 00:00:27.312808916 +790 25 0 days 00:01:09.475534066 +790 26 0 days 00:00:27.927190633 +790 27 0 days 00:00:42.824877116 +790 28 0 days 00:00:56.548855583 +790 29 0 days 00:00:38.014630250 +790 30 0 days 00:00:26.425736666 +790 31 0 days 00:00:52.283766050 +790 32 0 days 00:01:03.260921166 +791 1 0 days 00:00:09.065302706 +791 2 0 days 00:00:09.150397293 +791 3 0 days 00:00:15.433096240 +791 4 0 days 00:00:08.658949140 +791 5 0 days 00:00:06.070936966 +791 6 0 days 00:00:06.085179940 +791 7 0 days 00:00:06.254705053 +791 8 0 days 00:00:06.682530285 +791 9 0 days 00:00:07.250284408 +791 10 0 days 00:00:06.697483833 +791 11 0 days 00:00:08.056527616 +791 12 0 days 00:00:08.493327630 +791 13 0 days 00:00:09.949480355 +791 14 0 days 00:00:17.057728316 +791 15 0 days 00:00:14.574293960 +791 16 0 days 00:00:06.146143060 +791 17 0 days 00:00:05.979859953 +791 18 0 days 00:00:08.751644346 +791 19 0 days 00:00:07.186350266 +791 20 0 days 00:00:08.825051506 +791 21 0 days 00:00:08.893044413 +791 22 0 days 00:00:14.373565480 +791 23 0 days 00:00:08.823117833 +791 24 0 days 00:00:06.137434413 +791 25 0 days 00:00:14.312337646 +791 26 0 days 00:00:06.190219386 +791 27 0 days 00:00:10.216257565 +791 28 0 days 00:00:14.560468140 +791 29 0 days 00:00:08.969089766 +791 30 0 days 00:00:09.114891333 +791 31 0 days 00:00:09.964246390 +791 32 0 days 00:00:14.726792893 +791 33 0 days 00:00:06.703486925 +791 34 0 days 00:00:14.666785593 +791 35 0 days 00:00:06.116219893 +791 36 0 days 00:00:14.846668853 +791 37 0 days 00:00:06.423498353 +791 38 0 days 00:00:08.934488380 +791 39 0 days 00:00:14.467878400 +791 40 0 days 00:00:09.107756093 +791 41 0 days 00:00:14.334217986 +791 42 0 days 00:00:09.029457360 +791 43 0 days 00:00:10.429835455 +791 44 0 days 00:00:07.254162200 +791 45 0 days 00:00:09.737643760 +791 46 0 days 00:00:08.897555166 +791 47 0 days 00:00:08.976741906 +791 48 0 days 00:00:14.119252453 +791 49 0 days 00:00:06.147304300 +791 50 0 days 00:00:14.834685080 +791 51 0 days 00:00:08.835962246 +791 52 0 days 00:00:07.182558732 +791 53 0 days 00:00:08.595916006 +791 54 0 days 00:00:06.849941010 +792 1 0 days 00:00:17.198194405 +792 2 0 days 00:00:08.722258316 +792 3 0 days 00:00:09.129834706 +792 4 0 days 00:00:15.301798020 +792 5 0 days 00:00:06.707967120 +792 6 0 days 00:00:09.656378353 +792 7 0 days 00:00:06.373544866 +792 8 0 days 00:00:10.613766030 +792 9 0 days 00:00:15.954130400 +792 10 0 days 00:00:07.678129480 +792 11 0 days 00:00:15.156378420 +792 12 0 days 00:00:15.543280333 +792 13 0 days 00:00:12.948220540 +792 14 0 days 00:00:09.334149873 +792 15 0 days 00:00:14.816676193 +792 16 0 days 00:00:07.111467870 +792 17 0 days 00:00:12.937784146 +792 18 0 days 00:00:17.256792720 +792 19 0 days 00:00:15.367815606 +792 20 0 days 00:00:10.502054350 +792 21 0 days 00:00:06.254468800 +792 22 0 days 00:00:15.262400853 +792 23 0 days 00:00:06.642156406 +792 24 0 days 00:00:07.486550650 +792 25 0 days 00:00:15.706885693 +792 26 0 days 00:00:09.272617906 +792 27 0 days 00:00:18.629523416 +792 28 0 days 00:00:10.552499275 +792 29 0 days 00:00:07.296168113 +792 30 0 days 00:00:06.293849086 +792 31 0 days 00:00:08.164318033 +792 32 0 days 00:00:15.194480666 +792 33 0 days 00:00:10.488184065 +792 34 0 days 00:00:06.971090560 +792 35 0 days 00:00:07.385320012 +792 36 0 days 00:00:10.605006640 +792 37 0 days 00:00:09.590500140 +792 38 0 days 00:00:10.048032973 +792 39 0 days 00:00:06.685646560 +792 40 0 days 00:00:07.158592233 +792 41 0 days 00:00:19.224254096 +792 42 0 days 00:00:06.842233333 +792 43 0 days 00:00:10.377432275 +792 44 0 days 00:00:17.076160215 +793 1 0 days 00:00:08.453703316 +793 2 0 days 00:00:04.441747700 +793 3 0 days 00:00:09.464436975 +793 4 0 days 00:00:04.605130080 +793 5 0 days 00:00:04.608034473 +793 6 0 days 00:00:08.243743820 +793 7 0 days 00:00:03.104932900 +793 8 0 days 00:00:03.128346926 +793 9 0 days 00:00:07.282636833 +793 10 0 days 00:00:04.425201153 +793 11 0 days 00:00:03.071893400 +793 12 0 days 00:00:03.448982720 +793 13 0 days 00:00:03.053376913 +793 14 0 days 00:00:04.690057466 +793 15 0 days 00:00:07.282084720 +793 16 0 days 00:00:03.722373245 +793 17 0 days 00:00:03.858320312 +793 18 0 days 00:00:04.584984413 +793 19 0 days 00:00:06.076877140 +793 20 0 days 00:00:04.882726155 +793 21 0 days 00:00:04.659911820 +793 22 0 days 00:00:07.442547180 +793 23 0 days 00:00:04.525512966 +793 24 0 days 00:00:07.309839193 +793 25 0 days 00:00:07.283488060 +793 26 0 days 00:00:07.161198193 +793 27 0 days 00:00:08.597795236 +793 28 0 days 00:00:05.121929085 +793 29 0 days 00:00:04.756077033 +793 30 0 days 00:00:04.431682726 +793 31 0 days 00:00:04.714358780 +793 32 0 days 00:00:07.338038780 +793 33 0 days 00:00:04.984087530 +793 34 0 days 00:00:03.011347906 +793 35 0 days 00:00:06.793657890 +793 36 0 days 00:00:07.244951613 +793 37 0 days 00:00:08.086080685 +793 38 0 days 00:00:05.513834396 +793 39 0 days 00:00:05.713936048 +793 40 0 days 00:00:04.529173180 +793 41 0 days 00:00:04.551505653 +793 42 0 days 00:00:04.490895260 +793 43 0 days 00:00:07.431354446 +793 44 0 days 00:00:04.609947466 +793 45 0 days 00:00:04.658272733 +793 46 0 days 00:00:05.384456244 +793 47 0 days 00:00:03.630695695 +793 48 0 days 00:00:08.614474532 +793 49 0 days 00:00:07.414617980 +793 50 0 days 00:00:04.526270620 +793 51 0 days 00:00:08.060094325 +793 52 0 days 00:00:08.598497524 +793 53 0 days 00:00:03.073163680 +793 54 0 days 00:00:07.195997800 +793 55 0 days 00:00:03.360755366 +793 56 0 days 00:00:04.555241086 +793 57 0 days 00:00:05.328888080 +793 58 0 days 00:00:03.644319152 +793 59 0 days 00:00:07.429516260 +793 60 0 days 00:00:04.547781893 +793 61 0 days 00:00:03.674631006 +793 62 0 days 00:00:05.081967520 +793 63 0 days 00:00:08.669886624 +793 64 0 days 00:00:07.198617666 +793 65 0 days 00:00:04.586956800 +793 66 0 days 00:00:08.265936110 +793 67 0 days 00:00:07.176825256 +793 68 0 days 00:00:04.518617540 +793 69 0 days 00:00:05.035331490 +793 70 0 days 00:00:03.372229295 +793 71 0 days 00:00:04.552340473 +793 72 0 days 00:00:04.550683586 +793 73 0 days 00:00:07.411990466 +793 74 0 days 00:00:03.541730020 +793 75 0 days 00:00:04.525767473 +793 76 0 days 00:00:07.413537826 +793 77 0 days 00:00:07.229722433 +793 78 0 days 00:00:07.419554080 +793 79 0 days 00:00:08.354767365 +793 80 0 days 00:00:07.424373840 +793 81 0 days 00:00:04.569560933 +793 82 0 days 00:00:07.183336126 +793 83 0 days 00:00:03.093291653 +793 84 0 days 00:00:07.296382493 +793 85 0 days 00:00:03.336631606 +794 1 0 days 00:00:03.280559913 +794 2 0 days 00:00:03.567107690 +794 3 0 days 00:00:04.753386900 +794 4 0 days 00:00:04.786458860 +794 5 0 days 00:00:03.768157932 +794 6 0 days 00:00:03.265333426 +794 7 0 days 00:00:07.645762628 +794 8 0 days 00:00:03.588387965 +794 9 0 days 00:00:07.818608906 +794 10 0 days 00:00:04.832892720 +794 11 0 days 00:00:03.861515776 +794 12 0 days 00:00:05.708747590 +794 13 0 days 00:00:03.600308820 +794 14 0 days 00:00:07.875070460 +794 15 0 days 00:00:08.681308210 +794 16 0 days 00:00:07.759608920 +794 17 0 days 00:00:03.764836546 +794 18 0 days 00:00:03.418089460 +794 19 0 days 00:00:04.768595446 +794 20 0 days 00:00:05.854612213 +794 21 0 days 00:00:03.982685812 +794 22 0 days 00:00:05.314396815 +794 23 0 days 00:00:03.235309373 +794 24 0 days 00:00:04.801503146 +794 25 0 days 00:00:05.714465408 +794 26 0 days 00:00:06.419697273 +794 27 0 days 00:00:05.334786030 +794 28 0 days 00:00:03.239389326 +794 29 0 days 00:00:04.045811796 +794 30 0 days 00:00:07.832720573 +794 31 0 days 00:00:04.750631266 +794 32 0 days 00:00:07.794118440 +794 33 0 days 00:00:07.688768533 +794 34 0 days 00:00:05.623361692 +794 35 0 days 00:00:04.826524746 +794 36 0 days 00:00:08.790810125 +794 37 0 days 00:00:03.667271815 +794 38 0 days 00:00:08.515998490 +794 39 0 days 00:00:04.670452620 +794 40 0 days 00:00:05.901872333 +794 41 0 days 00:00:05.516848552 +794 42 0 days 00:00:03.262722073 +794 43 0 days 00:00:04.738007413 +794 44 0 days 00:00:07.601379740 +794 45 0 days 00:00:07.818028180 +794 46 0 days 00:00:07.651946633 +794 47 0 days 00:00:03.941161980 +794 48 0 days 00:00:03.684615353 +794 49 0 days 00:00:04.303495192 +794 50 0 days 00:00:03.521742930 +794 51 0 days 00:00:05.332499565 +794 52 0 days 00:00:02.964582413 +794 53 0 days 00:00:05.397700415 +794 54 0 days 00:00:05.345174615 +794 55 0 days 00:00:08.629088555 +794 56 0 days 00:00:05.599463936 +794 57 0 days 00:00:04.761689773 +794 58 0 days 00:00:07.759306586 +794 59 0 days 00:00:03.816947936 +794 60 0 days 00:00:04.759404413 +794 61 0 days 00:00:05.164358080 +794 62 0 days 00:00:07.747711806 +794 63 0 days 00:00:03.650248205 +794 64 0 days 00:00:04.839119933 +794 65 0 days 00:00:03.270047133 +794 66 0 days 00:00:04.707130333 +794 67 0 days 00:00:07.757035586 +794 68 0 days 00:00:09.509104143 +794 69 0 days 00:00:03.260637286 +794 70 0 days 00:00:03.248538246 +794 71 0 days 00:00:08.514172320 +794 72 0 days 00:00:03.751341326 +794 73 0 days 00:00:07.631992200 +794 74 0 days 00:00:04.911894766 +794 75 0 days 00:00:05.897907812 +794 76 0 days 00:00:09.255123640 +794 77 0 days 00:00:04.144730748 +794 78 0 days 00:00:09.247707976 +794 79 0 days 00:00:05.000230166 +794 80 0 days 00:00:03.773577740 +794 81 0 days 00:00:04.952362180 +794 82 0 days 00:00:04.763902866 +794 83 0 days 00:00:05.519624840 +794 84 0 days 00:00:03.809730088 +795 1 0 days 00:00:11.917514384 +795 2 0 days 00:00:08.930352453 +795 3 0 days 00:00:06.868325900 +795 4 0 days 00:00:11.434047680 +795 5 0 days 00:00:07.006133246 +795 6 0 days 00:00:08.633725890 +795 7 0 days 00:00:07.537234213 +795 8 0 days 00:00:09.189302140 +795 9 0 days 00:00:17.982200426 +795 10 0 days 00:00:21.259225880 +795 11 0 days 00:00:10.923515584 +795 12 0 days 00:00:16.172643940 +795 13 0 days 00:00:08.400557885 +795 14 0 days 00:00:19.601817360 +795 15 0 days 00:00:07.251842730 +795 16 0 days 00:00:07.163877400 +795 17 0 days 00:00:13.994322425 +795 18 0 days 00:00:11.712469593 +795 19 0 days 00:00:15.706116393 +795 20 0 days 00:00:06.412966300 +795 21 0 days 00:00:12.316639453 +795 22 0 days 00:00:20.082965773 +795 23 0 days 00:00:07.782468192 +795 24 0 days 00:00:13.672619282 +795 25 0 days 00:00:18.200090710 +795 26 0 days 00:00:11.151903153 +795 27 0 days 00:00:16.825624975 +795 28 0 days 00:00:18.875463906 +795 29 0 days 00:00:09.365658173 +795 30 0 days 00:00:17.976850666 +795 31 0 days 00:00:06.699477413 +795 32 0 days 00:00:17.474907415 +795 33 0 days 00:00:10.384167090 +795 34 0 days 00:00:11.432604660 +795 35 0 days 00:00:07.491188106 +795 36 0 days 00:00:24.214557568 +795 37 0 days 00:00:11.812633960 +796 1 0 days 00:00:04.217133740 +796 2 0 days 00:00:05.424159613 +796 3 0 days 00:00:07.331107032 +796 4 0 days 00:00:03.367309946 +796 5 0 days 00:00:03.942709130 +796 6 0 days 00:00:07.804928126 +796 7 0 days 00:00:04.313804720 +796 8 0 days 00:00:05.801229508 +796 9 0 days 00:00:07.786733186 +796 10 0 days 00:00:09.930182226 +796 11 0 days 00:00:08.488716746 +796 12 0 days 00:00:04.284363746 +796 13 0 days 00:00:10.111301320 +796 14 0 days 00:00:04.901981780 +796 15 0 days 00:00:05.618264990 +796 16 0 days 00:00:11.689118330 +796 17 0 days 00:00:08.462883366 +796 18 0 days 00:00:05.644827280 +796 19 0 days 00:00:07.238509276 +796 20 0 days 00:00:03.766479026 +796 21 0 days 00:00:09.273999210 +796 22 0 days 00:00:09.222694220 +796 23 0 days 00:00:10.091624480 +796 24 0 days 00:00:06.155598455 +796 25 0 days 00:00:04.115354443 +796 26 0 days 00:00:06.054581166 +796 27 0 days 00:00:04.831658312 +796 28 0 days 00:00:10.106327765 +796 29 0 days 00:00:05.145404500 +796 30 0 days 00:00:10.678088040 +796 31 0 days 00:00:09.960649600 +796 32 0 days 00:00:05.770028052 +796 33 0 days 00:00:05.540795720 +796 34 0 days 00:00:04.128527293 +796 35 0 days 00:00:04.024177333 +796 36 0 days 00:00:03.784754086 +796 37 0 days 00:00:05.523806426 +796 38 0 days 00:00:09.099027184 +796 39 0 days 00:00:05.695186560 +796 40 0 days 00:00:03.193073660 +796 41 0 days 00:00:05.860152140 +796 42 0 days 00:00:05.876728773 +796 43 0 days 00:00:04.524267288 +796 44 0 days 00:00:03.499074953 +796 45 0 days 00:00:04.745465446 +796 46 0 days 00:00:08.374108293 +796 47 0 days 00:00:03.782615885 +796 48 0 days 00:00:05.046016726 +796 49 0 days 00:00:03.979136020 +796 50 0 days 00:00:03.869694740 +796 51 0 days 00:00:09.800602800 +796 52 0 days 00:00:08.310643286 +796 53 0 days 00:00:10.521004740 +796 54 0 days 00:00:09.045295693 +796 55 0 days 00:00:06.364540200 +796 56 0 days 00:00:05.109231146 +796 57 0 days 00:00:04.962642700 +796 58 0 days 00:00:04.934846973 +796 59 0 days 00:00:09.535362070 +796 60 0 days 00:00:06.501743066 +796 61 0 days 00:00:08.174616446 +796 62 0 days 00:00:11.249815984 +796 63 0 days 00:00:05.545546284 +796 64 0 days 00:00:03.538191833 +796 65 0 days 00:00:03.562878013 +796 66 0 days 00:00:10.726042520 +796 67 0 days 00:00:09.215240492 +796 68 0 days 00:00:03.854700080 +796 69 0 days 00:00:03.580265920 +796 70 0 days 00:00:10.215986526 +797 1 0 days 00:00:09.779482120 +797 2 0 days 00:00:15.817371053 +797 3 0 days 00:00:07.763798043 +797 4 0 days 00:00:09.478463993 +797 5 0 days 00:00:17.666998505 +797 6 0 days 00:00:15.653971306 +797 7 0 days 00:00:09.500767706 +797 8 0 days 00:00:18.604102480 +797 9 0 days 00:00:10.680623840 +797 10 0 days 00:00:07.390922380 +797 11 0 days 00:00:18.613785872 +797 12 0 days 00:00:07.187583700 +797 13 0 days 00:00:15.978026886 +797 14 0 days 00:00:11.210314440 +797 15 0 days 00:00:16.044218200 +797 16 0 days 00:00:16.085077500 +797 17 0 days 00:00:11.202277872 +797 18 0 days 00:00:17.860741490 +797 19 0 days 00:00:07.097637245 +797 20 0 days 00:00:06.395468126 +797 21 0 days 00:00:08.118838497 +797 22 0 days 00:00:10.683138090 +797 23 0 days 00:00:17.494261465 +797 24 0 days 00:00:15.304110686 +797 25 0 days 00:00:17.915685340 +797 26 0 days 00:00:11.957672113 +797 27 0 days 00:00:10.706852230 +797 28 0 days 00:00:09.397863873 +797 29 0 days 00:00:06.967989440 +797 30 0 days 00:00:07.763815726 +797 31 0 days 00:00:06.404369680 +797 32 0 days 00:00:18.261779604 +797 33 0 days 00:00:15.621890680 +797 34 0 days 00:00:06.421989946 +797 35 0 days 00:00:10.938097775 +797 36 0 days 00:00:07.077382665 +798 1 0 days 00:00:05.337775005 +798 2 0 days 00:00:03.496068060 +798 3 0 days 00:00:05.475215220 +798 4 0 days 00:00:05.970055600 +798 5 0 days 00:00:04.073445910 +798 6 0 days 00:00:09.469608472 +798 7 0 days 00:00:08.693490235 +798 8 0 days 00:00:04.076849120 +798 9 0 days 00:00:04.810046520 +798 10 0 days 00:00:08.076346733 +798 11 0 days 00:00:03.427503786 +798 12 0 days 00:00:09.606824508 +798 13 0 days 00:00:05.459198395 +798 14 0 days 00:00:03.823794760 +798 15 0 days 00:00:03.271721480 +798 16 0 days 00:00:03.620503925 +798 17 0 days 00:00:05.427168280 +798 18 0 days 00:00:07.522495210 +798 19 0 days 00:00:03.680873745 +798 20 0 days 00:00:05.567968530 +798 21 0 days 00:00:03.634672205 +798 22 0 days 00:00:04.048206030 +798 23 0 days 00:00:03.983113768 +798 24 0 days 00:00:09.411336624 +798 25 0 days 00:00:03.237467113 +798 26 0 days 00:00:09.063731145 +798 27 0 days 00:00:08.130674540 +798 28 0 days 00:00:08.898107450 +798 29 0 days 00:00:05.475237455 +798 30 0 days 00:00:04.900537900 +798 31 0 days 00:00:07.915795920 +798 32 0 days 00:00:03.920810933 +798 33 0 days 00:00:09.687853603 +798 34 0 days 00:00:04.959037760 +798 35 0 days 00:00:05.542465735 +798 36 0 days 00:00:08.781016825 +798 37 0 days 00:00:05.335763660 +798 38 0 days 00:00:03.908546992 +798 39 0 days 00:00:03.962029892 +798 40 0 days 00:00:09.917763256 +798 41 0 days 00:00:04.253678413 +798 42 0 days 00:00:03.694646955 +798 43 0 days 00:00:04.010467088 +798 44 0 days 00:00:03.630156520 +798 45 0 days 00:00:05.371165895 +798 46 0 days 00:00:03.669788615 +798 47 0 days 00:00:05.408400495 +798 48 0 days 00:00:10.269477414 +798 49 0 days 00:00:05.662144200 +798 50 0 days 00:00:04.105562182 +798 51 0 days 00:00:03.273983973 +798 52 0 days 00:00:09.613184300 +798 53 0 days 00:00:07.956755740 +798 54 0 days 00:00:07.930842673 +798 55 0 days 00:00:07.868157806 +798 56 0 days 00:00:08.022530373 +798 57 0 days 00:00:07.806095733 +798 58 0 days 00:00:03.883471856 +798 59 0 days 00:00:07.948834073 +798 60 0 days 00:00:08.770732780 +798 61 0 days 00:00:09.565982143 +798 62 0 days 00:00:08.775029640 +798 63 0 days 00:00:08.677447220 +798 64 0 days 00:00:08.004403080 +798 65 0 days 00:00:04.216450613 +798 66 0 days 00:00:05.379348855 +798 67 0 days 00:00:03.286260620 +798 68 0 days 00:00:05.017161720 +798 69 0 days 00:00:03.873994496 +799 1 0 days 00:00:13.139140180 +799 2 0 days 00:00:16.750845517 +799 3 0 days 00:00:36.419887240 +799 4 0 days 00:00:15.866583475 +799 5 0 days 00:00:15.855758300 +799 6 0 days 00:00:38.122832606 +799 7 0 days 00:00:37.908932116 +799 8 0 days 00:00:14.761889355 +799 9 0 days 00:00:19.588489286 +799 10 0 days 00:00:23.504551016 +799 11 0 days 00:00:16.307419366 +799 12 0 days 00:00:15.120557825 +799 13 0 days 00:00:15.858348000 +799 14 0 days 00:00:16.080410308 +799 15 0 days 00:00:19.587374553 +799 16 0 days 00:00:17.078051657 +799 17 0 days 00:00:31.825628160 +799 18 0 days 00:00:35.559611555 +800 1 0 days 00:00:23.342836726 +800 2 0 days 00:00:25.483931133 +800 3 0 days 00:00:17.303501052 +800 4 0 days 00:00:33.864141760 +800 5 0 days 00:00:17.897734096 +800 6 0 days 00:00:16.406392796 +800 7 0 days 00:00:33.799851430 +800 8 0 days 00:00:21.540152600 +800 9 0 days 00:00:36.835337100 +800 10 0 days 00:00:15.325116920 +800 11 0 days 00:00:17.203464648 +800 12 0 days 00:00:16.273752840 +800 13 0 days 00:00:16.855814037 +800 14 0 days 00:00:37.468340846 +800 15 0 days 00:00:18.831323895 +801 1 0 days 00:00:10.161996172 +801 2 0 days 00:00:15.191263304 +801 3 0 days 00:00:12.781654333 +801 4 0 days 00:00:22.401336004 +801 5 0 days 00:00:09.609317180 +801 6 0 days 00:00:14.551464136 +801 7 0 days 00:00:12.005926526 +801 8 0 days 00:00:13.812270655 +801 9 0 days 00:00:10.103629185 +801 10 0 days 00:00:21.075087585 +801 11 0 days 00:00:13.997761840 +801 12 0 days 00:00:14.982677356 +801 13 0 days 00:00:09.048423786 +801 14 0 days 00:00:23.305768396 +801 15 0 days 00:00:10.664785588 +801 16 0 days 00:00:12.349060646 +801 17 0 days 00:00:13.913642750 +801 18 0 days 00:00:14.786344348 +801 19 0 days 00:00:15.477077044 +801 20 0 days 00:00:12.130829906 +801 21 0 days 00:00:12.781346433 +801 22 0 days 00:00:14.261807000 +801 23 0 days 00:00:22.315492150 +801 24 0 days 00:00:13.558157380 +801 25 0 days 00:00:23.302908706 +802 1 0 days 00:00:21.246931640 +802 2 0 days 00:00:20.642714633 +802 3 0 days 00:00:13.230515850 +802 4 0 days 00:00:10.252523552 +802 5 0 days 00:00:09.652558440 +802 6 0 days 00:00:09.656284015 +802 7 0 days 00:00:15.351955713 +802 8 0 days 00:00:10.282631285 +802 9 0 days 00:00:21.192028680 +802 10 0 days 00:00:10.250504800 +802 11 0 days 00:00:21.237761435 +802 12 0 days 00:00:21.184660705 +802 13 0 days 00:00:11.294119656 +802 14 0 days 00:00:13.899737595 +802 15 0 days 00:00:10.394570940 +802 16 0 days 00:00:12.958473166 +802 17 0 days 00:00:10.008646224 +802 18 0 days 00:00:10.337492980 +802 19 0 days 00:00:13.621727695 +802 20 0 days 00:00:14.484259665 +802 21 0 days 00:00:12.443736300 +802 22 0 days 00:00:10.009258132 +802 23 0 days 00:00:23.100265745 +802 24 0 days 00:00:12.431680800 +802 25 0 days 00:00:12.614810893 +802 26 0 days 00:00:09.422292530 +802 27 0 days 00:00:09.704769955 +802 28 0 days 00:00:14.483501668 +802 29 0 days 00:00:20.650818746 +802 30 0 days 00:00:09.224871240 +802 31 0 days 00:00:14.492501410 +803 1 0 days 00:00:13.323621400 +803 2 0 days 00:00:19.726801146 +803 3 0 days 00:00:22.697585832 +803 4 0 days 00:00:15.130374000 +803 5 0 days 00:00:13.289931373 +803 6 0 days 00:00:30.023315106 +803 7 0 days 00:00:15.696840520 +803 8 0 days 00:00:24.224097105 +803 9 0 days 00:00:13.297608380 +803 10 0 days 00:00:19.691214440 +803 11 0 days 00:00:14.854892045 +803 12 0 days 00:00:19.087790713 +803 13 0 days 00:00:30.753866306 +803 14 0 days 00:00:15.771072665 +803 15 0 days 00:00:15.189113060 +803 16 0 days 00:00:13.294000306 +803 17 0 days 00:00:31.897087653 +803 18 0 days 00:00:16.073311512 +803 19 0 days 00:00:15.849624505 +803 20 0 days 00:00:19.082920926 +803 21 0 days 00:00:33.696139890 +803 22 0 days 00:00:30.755724433 +803 23 0 days 00:00:19.078049660 +803 24 0 days 00:00:13.604429173 +804 1 0 days 00:00:14.673166280 +804 2 0 days 00:00:14.620842844 +804 3 0 days 00:00:21.045257160 +804 4 0 days 00:00:12.679885793 +804 5 0 days 00:00:08.577565046 +804 6 0 days 00:00:11.471635877 +804 7 0 days 00:00:18.860378433 +804 8 0 days 00:00:08.386297186 +804 9 0 days 00:00:13.364839430 +804 10 0 days 00:00:19.960433613 +804 11 0 days 00:00:08.518931820 +804 12 0 days 00:00:13.747838800 +804 13 0 days 00:00:14.170011225 +804 14 0 days 00:00:09.976218480 +804 15 0 days 00:00:13.361022035 +804 16 0 days 00:00:25.203355233 +804 17 0 days 00:00:13.548550595 +804 18 0 days 00:00:09.358210440 +804 19 0 days 00:00:09.349234390 +804 20 0 days 00:00:10.783688368 +804 21 0 days 00:00:08.571700566 +804 22 0 days 00:00:09.301430620 +804 23 0 days 00:00:13.806961085 +804 24 0 days 00:00:22.789325595 +804 25 0 days 00:00:25.203431643 +804 26 0 days 00:00:09.926269405 +804 27 0 days 00:00:09.924089825 +804 28 0 days 00:00:09.354549330 +804 29 0 days 00:00:10.495801440 +804 30 0 days 00:00:10.958866220 +805 1 0 days 00:00:00.122897370 +805 2 0 days 00:00:00.167715620 +805 4 0 days 00:00:00.173809104 +805 5 0 days 00:00:00.120785642 +805 6 0 days 00:00:00.122440212 +805 7 0 days 00:00:00.183584275 +805 8 0 days 00:00:00.122731008 +805 9 0 days 00:00:00.174551862 +805 10 0 days 00:00:00.138287182 +805 11 0 days 00:00:00.136622437 +805 12 0 days 00:00:00.122487358 +805 13 0 days 00:00:00.119926237 +805 14 0 days 00:00:00.122589860 +805 15 0 days 00:00:00.138386813 +805 16 0 days 00:00:00.124941056 +805 17 0 days 00:00:00.135063902 +805 18 0 days 00:00:00.173714209 +805 19 0 days 00:00:00.148940733 +805 20 0 days 00:00:00.120951711 +805 22 0 days 00:00:00.145500914 +805 23 0 days 00:00:00.129457210 +805 24 0 days 00:00:00.148540131 +805 25 0 days 00:00:00.182706074 +805 26 0 days 00:00:00.180184511 +805 27 0 days 00:00:00.123454250 +805 29 0 days 00:00:00.103326146 +805 30 0 days 00:00:00.142013525 +805 32 0 days 00:00:00.187121814 +805 33 0 days 00:00:00.184718273 +805 34 0 days 00:00:00.165300026 +805 37 0 days 00:00:00.168086648 +805 38 0 days 00:00:00.123257058 +805 42 0 days 00:00:00.189882015 +805 43 0 days 00:00:00.119696904 +805 44 0 days 00:00:00.120332358 +805 45 0 days 00:00:00.146636962 +805 46 0 days 00:00:00.171243568 +805 47 0 days 00:00:00.123721224 +805 48 0 days 00:00:00.163567090 +805 49 0 days 00:00:00.161877111 +805 50 0 days 00:00:00.174746622 +805 51 0 days 00:00:00.119346417 +805 53 0 days 00:00:00.123108064 +805 54 0 days 00:00:00.119297770 +805 56 0 days 00:00:00.140203297 +805 57 0 days 00:00:00.129650512 +805 58 0 days 00:00:00.178042461 +805 61 0 days 00:00:00.138862300 +805 65 0 days 00:00:00.124896980 +805 66 0 days 00:00:00.131736170 +805 67 0 days 00:00:00.136414912 +805 68 0 days 00:00:00.146094260 +805 70 0 days 00:00:00.092393260 +805 72 0 days 00:00:00.139436950 +805 73 0 days 00:00:00.120876881 +805 76 0 days 00:00:00.150488354 +805 77 0 days 00:00:00.127254323 +805 78 0 days 00:00:00.164081254 +805 79 0 days 00:00:00.179107461 +805 80 0 days 00:00:00.144794411 +805 82 0 days 00:00:00.152668600 +805 84 0 days 00:00:00.092935133 +805 85 0 days 00:00:00.173494657 +805 86 0 days 00:00:00.116788945 +805 88 0 days 00:00:00.145011914 +805 90 0 days 00:00:00.137461085 +805 92 0 days 00:00:00.183387596 +805 95 0 days 00:00:00.184192827 +805 97 0 days 00:00:00.139799254 +805 98 0 days 00:00:00.173009925 +805 99 0 days 00:00:00.138994197 +805 100 0 days 00:00:00.106820253 +806 2 0 days 00:00:00.145671497 +806 3 0 days 00:00:00.127065660 +806 4 0 days 00:00:00.153816338 +806 6 0 days 00:00:00.143475503 +806 7 0 days 00:00:00.125658535 +806 8 0 days 00:00:00.150688870 +806 9 0 days 00:00:00.112945623 +806 10 0 days 00:00:00.120300581 +806 11 0 days 00:00:00.187939980 +806 12 0 days 00:00:00.193589123 +806 13 0 days 00:00:00.126180972 +806 14 0 days 00:00:00.122948829 +806 16 0 days 00:00:00.196694943 +806 18 0 days 00:00:00.141613730 +806 21 0 days 00:00:00.092552853 +806 22 0 days 00:00:00.181019711 +806 23 0 days 00:00:00.172659683 +806 24 0 days 00:00:00.162133608 +806 25 0 days 00:00:00.133600575 +806 26 0 days 00:00:00.152721087 +806 27 0 days 00:00:00.193490674 +806 28 0 days 00:00:00.150426281 +806 30 0 days 00:00:00.148611538 +806 31 0 days 00:00:00.096159473 +806 32 0 days 00:00:00.181480192 +806 33 0 days 00:00:00.182047406 +806 34 0 days 00:00:00.168225695 +806 35 0 days 00:00:00.139102825 +806 37 0 days 00:00:00.143405333 +806 38 0 days 00:00:00.123874322 +806 41 0 days 00:00:00.124590601 +806 42 0 days 00:00:00.103655170 +806 44 0 days 00:00:00.177957233 +806 45 0 days 00:00:00.200812756 +806 46 0 days 00:00:00.149607126 +806 47 0 days 00:00:00.201204797 +806 48 0 days 00:00:00.195618826 +806 49 0 days 00:00:00.179660660 +806 50 0 days 00:00:00.126037328 +806 52 0 days 00:00:00.191866872 +806 54 0 days 00:00:00.207150678 +806 55 0 days 00:00:00.093755633 +806 56 0 days 00:00:00.146615120 +806 58 0 days 00:00:00.153898532 +806 59 0 days 00:00:00.188013878 +806 60 0 days 00:00:00.143287720 +806 61 0 days 00:00:00.140901477 +806 62 0 days 00:00:00.181579248 +806 63 0 days 00:00:00.120953788 +806 64 0 days 00:00:00.199437651 +806 65 0 days 00:00:00.163511668 +806 66 0 days 00:00:00.166092108 +806 67 0 days 00:00:00.156637818 +806 68 0 days 00:00:00.109706493 +806 69 0 days 00:00:00.139163035 +806 70 0 days 00:00:00.119633806 +806 71 0 days 00:00:00.140406573 +806 72 0 days 00:00:00.127396030 +806 74 0 days 00:00:00.180894938 +806 77 0 days 00:00:00.127440520 +806 78 0 days 00:00:00.177725617 +806 79 0 days 00:00:00.179994435 +806 80 0 days 00:00:00.141890378 +806 81 0 days 00:00:00.166286660 +806 82 0 days 00:00:00.109468866 +806 83 0 days 00:00:00.190054237 +806 84 0 days 00:00:00.200977644 +806 85 0 days 00:00:00.126433255 +806 86 0 days 00:00:00.141680993 +806 87 0 days 00:00:00.124691612 +806 89 0 days 00:00:00.174150405 +806 90 0 days 00:00:00.162512373 +806 91 0 days 00:00:00.109423566 +806 92 0 days 00:00:00.136750042 +806 93 0 days 00:00:00.214483613 +806 94 0 days 00:00:00.184312764 +806 96 0 days 00:00:00.139896140 +806 97 0 days 00:00:00.187047447 +806 98 0 days 00:00:00.143157008 +806 99 0 days 00:00:00.201820371 +807 1 0 days 00:00:00.085280047 +807 3 0 days 00:00:00.072526129 +807 4 0 days 00:00:00.095244557 +807 6 0 days 00:00:00.059005855 +807 7 0 days 00:00:00.063166526 +807 8 0 days 00:00:00.096072842 +807 9 0 days 00:00:00.078109092 +807 10 0 days 00:00:00.084815424 +807 11 0 days 00:00:00.114636892 +807 12 0 days 00:00:00.069142325 +807 14 0 days 00:00:00.078299689 +807 16 0 days 00:00:00.100064515 +807 17 0 days 00:00:00.106430280 +807 19 0 days 00:00:00.078754816 +807 20 0 days 00:00:00.068530830 +807 21 0 days 00:00:00.105024900 +807 23 0 days 00:00:00.101186648 +807 24 0 days 00:00:00.055726900 +807 25 0 days 00:00:00.076447085 +807 26 0 days 00:00:00.107924981 +807 27 0 days 00:00:00.059552760 +807 28 0 days 00:00:00.067210547 +807 29 0 days 00:00:00.072980556 +807 31 0 days 00:00:00.054077166 +807 33 0 days 00:00:00.080977995 +807 34 0 days 00:00:00.103429346 +807 35 0 days 00:00:00.053526353 +807 36 0 days 00:00:00.097972822 +807 37 0 days 00:00:00.096124935 +807 38 0 days 00:00:00.058473500 +807 39 0 days 00:00:00.055155540 +807 40 0 days 00:00:00.080117403 +807 41 0 days 00:00:00.098772493 +807 42 0 days 00:00:00.055578080 +807 43 0 days 00:00:00.098660227 +807 44 0 days 00:00:00.054863993 +807 45 0 days 00:00:00.052611360 +807 46 0 days 00:00:00.053691400 +807 47 0 days 00:00:00.080942106 +807 48 0 days 00:00:00.111554517 +807 49 0 days 00:00:00.055258326 +807 50 0 days 00:00:00.070846485 +807 51 0 days 00:00:00.065876650 +807 52 0 days 00:00:00.101975600 +807 53 0 days 00:00:00.101071061 +807 54 0 days 00:00:00.064551420 +807 55 0 days 00:00:00.102448516 +807 56 0 days 00:00:00.071132558 +807 57 0 days 00:00:00.115098455 +807 59 0 days 00:00:00.070003360 +807 61 0 days 00:00:00.079109585 +807 62 0 days 00:00:00.096784953 +807 63 0 days 00:00:00.080522407 +807 65 0 days 00:00:00.110965234 +807 66 0 days 00:00:00.107383223 +807 67 0 days 00:00:00.072402937 +807 68 0 days 00:00:00.098840800 +807 69 0 days 00:00:00.067695100 +807 70 0 days 00:00:00.099139229 +807 71 0 days 00:00:00.071836047 +807 72 0 days 00:00:00.100053241 +807 73 0 days 00:00:00.079120468 +807 74 0 days 00:00:00.078068957 +807 75 0 days 00:00:00.082686500 +807 76 0 days 00:00:00.087335718 +807 79 0 days 00:00:00.075443913 +807 80 0 days 00:00:00.067143350 +807 81 0 days 00:00:00.080171266 +807 82 0 days 00:00:00.081899221 +807 83 0 days 00:00:00.111065596 +807 84 0 days 00:00:00.096309248 +807 87 0 days 00:00:00.100276825 +807 88 0 days 00:00:00.066531913 +807 89 0 days 00:00:00.070773084 +807 90 0 days 00:00:00.052136386 +807 92 0 days 00:00:00.063417074 +807 93 0 days 00:00:00.100991781 +807 95 0 days 00:00:00.094028265 +807 96 0 days 00:00:00.093443601 +807 97 0 days 00:00:00.096744510 +807 98 0 days 00:00:00.070064646 +807 99 0 days 00:00:00.094466329 +807 100 0 days 00:00:00.066957980 +808 1 0 days 00:00:00.072009880 +808 2 0 days 00:00:00.107366938 +808 3 0 days 00:00:00.099963025 +808 4 0 days 00:00:00.092974793 +808 5 0 days 00:00:00.090616396 +808 6 0 days 00:00:00.057838760 +808 7 0 days 00:00:00.076893827 +808 8 0 days 00:00:00.101030634 +808 9 0 days 00:00:00.076039836 +808 10 0 days 00:00:00.062395100 +808 11 0 days 00:00:00.054199953 +808 12 0 days 00:00:00.107923145 +808 13 0 days 00:00:00.077319871 +808 14 0 days 00:00:00.068907666 +808 15 0 days 00:00:00.060908533 +808 16 0 days 00:00:00.107853402 +808 17 0 days 00:00:00.084173211 +808 19 0 days 00:00:00.078179872 +808 21 0 days 00:00:00.086175723 +808 22 0 days 00:00:00.082125767 +808 24 0 days 00:00:00.052277693 +808 25 0 days 00:00:00.080127113 +808 26 0 days 00:00:00.103843367 +808 27 0 days 00:00:00.101661781 +808 28 0 days 00:00:00.054084606 +808 29 0 days 00:00:00.076171152 +808 30 0 days 00:00:00.099118909 +808 31 0 days 00:00:00.060203313 +808 33 0 days 00:00:00.076474245 +808 34 0 days 00:00:00.099861725 +808 35 0 days 00:00:00.060726773 +808 36 0 days 00:00:00.099962826 +808 37 0 days 00:00:00.056943573 +808 38 0 days 00:00:00.076741365 +808 39 0 days 00:00:00.097047435 +808 40 0 days 00:00:00.079847254 +808 41 0 days 00:00:00.054143493 +808 42 0 days 00:00:00.083536078 +808 44 0 days 00:00:00.070304452 +808 45 0 days 00:00:00.089119904 +808 46 0 days 00:00:00.058713392 +808 47 0 days 00:00:00.064873796 +808 48 0 days 00:00:00.076812506 +808 49 0 days 00:00:00.052319086 +808 51 0 days 00:00:00.052496193 +808 52 0 days 00:00:00.080386284 +808 53 0 days 00:00:00.107535522 +808 54 0 days 00:00:00.062758277 +808 55 0 days 00:00:00.079599846 +808 56 0 days 00:00:00.104216126 +808 57 0 days 00:00:00.055562106 +808 58 0 days 00:00:00.079143120 +808 59 0 days 00:00:00.051891313 +808 60 0 days 00:00:00.104495396 +808 61 0 days 00:00:00.110398047 +808 62 0 days 00:00:00.067662735 +808 63 0 days 00:00:00.104351725 +808 64 0 days 00:00:00.061437133 +808 65 0 days 00:00:00.059600584 +808 67 0 days 00:00:00.084599590 +808 68 0 days 00:00:00.105915218 +808 69 0 days 00:00:00.076899192 +808 71 0 days 00:00:00.105079720 +808 72 0 days 00:00:00.062880620 +808 73 0 days 00:00:00.086505033 +808 74 0 days 00:00:00.075654663 +808 75 0 days 00:00:00.098965201 +808 76 0 days 00:00:00.077810513 +808 77 0 days 00:00:00.051991713 +808 78 0 days 00:00:00.071972170 +808 79 0 days 00:00:00.068413835 +808 80 0 days 00:00:00.079604472 +808 82 0 days 00:00:00.053873233 +808 85 0 days 00:00:00.062228153 +808 86 0 days 00:00:00.062192193 +808 87 0 days 00:00:00.081233782 +808 88 0 days 00:00:00.066329721 +808 89 0 days 00:00:00.078550789 +808 90 0 days 00:00:00.069954742 +808 92 0 days 00:00:00.052601540 +808 93 0 days 00:00:00.112496737 +808 94 0 days 00:00:00.085730403 +808 95 0 days 00:00:00.060379900 +808 96 0 days 00:00:00.053348033 +808 97 0 days 00:00:00.116920117 +808 98 0 days 00:00:00.051552000 +808 100 0 days 00:00:00.069367567 +809 1 0 days 00:00:00.142323030 +809 2 0 days 00:00:00.136107978 +809 3 0 days 00:00:00.162956790 +809 4 0 days 00:00:00.177207932 +809 5 0 days 00:00:00.168629711 +809 6 0 days 00:00:00.172226988 +809 7 0 days 00:00:00.140037271 +809 8 0 days 00:00:00.139146948 +809 9 0 days 00:00:00.139001638 +809 10 0 days 00:00:00.172423374 +809 11 0 days 00:00:00.136607330 +809 12 0 days 00:00:00.129363597 +809 13 0 days 00:00:00.134586586 +809 14 0 days 00:00:00.115831965 +809 15 0 days 00:00:00.114976601 +809 16 0 days 00:00:00.118408202 +809 17 0 days 00:00:00.125702913 +809 18 0 days 00:00:00.117004555 +809 19 0 days 00:00:00.132716773 +809 20 0 days 00:00:00.131554627 +809 21 0 days 00:00:00.138544950 +809 22 0 days 00:00:00.163925217 +809 23 0 days 00:00:00.137707700 +809 24 0 days 00:00:00.166045660 +809 25 0 days 00:00:00.171706888 +809 26 0 days 00:00:00.175244372 +809 27 0 days 00:00:00.134562425 +809 28 0 days 00:00:00.131773197 +809 29 0 days 00:00:00.119228137 +809 30 0 days 00:00:00.167422794 +809 31 0 days 00:00:00.176322740 +809 32 0 days 00:00:00.174872955 +809 33 0 days 00:00:00.132000177 +809 34 0 days 00:00:00.170645804 +809 35 0 days 00:00:00.165643011 +809 36 0 days 00:00:00.172606314 +809 37 0 days 00:00:00.129058782 +809 38 0 days 00:00:00.134109935 +809 39 0 days 00:00:00.137860523 +809 40 0 days 00:00:00.111877832 +809 41 0 days 00:00:00.163114254 +809 42 0 days 00:00:00.134103886 +809 43 0 days 00:00:00.127638983 +809 44 0 days 00:00:00.133387115 +809 45 0 days 00:00:00.139216088 +809 46 0 days 00:00:00.134929098 +809 47 0 days 00:00:00.157981380 +809 48 0 days 00:00:00.133300666 +809 49 0 days 00:00:00.118205726 +809 50 0 days 00:00:00.119392068 +809 52 0 days 00:00:00.138505287 +809 53 0 days 00:00:00.118006936 +809 54 0 days 00:00:00.131612546 +809 55 0 days 00:00:00.138607709 +809 56 0 days 00:00:00.116153881 +809 57 0 days 00:00:00.135953286 +809 58 0 days 00:00:00.168701647 +809 59 0 days 00:00:00.115421165 +809 61 0 days 00:00:00.135556162 +809 62 0 days 00:00:00.116851540 +809 63 0 days 00:00:00.142252242 +809 64 0 days 00:00:00.115649500 +809 65 0 days 00:00:00.137139797 +809 66 0 days 00:00:00.138779014 +809 67 0 days 00:00:00.119051462 +809 68 0 days 00:00:00.116617568 +809 69 0 days 00:00:00.135818591 +809 70 0 days 00:00:00.172648446 +809 71 0 days 00:00:00.113199583 +809 72 0 days 00:00:00.164397291 +809 73 0 days 00:00:00.134673568 +809 74 0 days 00:00:00.132800677 +809 75 0 days 00:00:00.165115845 +809 76 0 days 00:00:00.137264476 +809 77 0 days 00:00:00.169052742 +809 78 0 days 00:00:00.131881310 +809 79 0 days 00:00:00.160424502 +809 80 0 days 00:00:00.171876854 +809 81 0 days 00:00:00.172589410 +809 82 0 days 00:00:00.138027770 +809 83 0 days 00:00:00.116254132 +809 84 0 days 00:00:00.119196734 +809 85 0 days 00:00:00.138786341 +809 86 0 days 00:00:00.165480900 +809 87 0 days 00:00:00.161523270 +809 88 0 days 00:00:00.136512718 +809 89 0 days 00:00:00.161269071 +809 90 0 days 00:00:00.115806554 +809 91 0 days 00:00:00.170071174 +809 92 0 days 00:00:00.134403610 +809 93 0 days 00:00:00.120133484 +809 94 0 days 00:00:00.176084170 +809 95 0 days 00:00:00.138593248 +809 96 0 days 00:00:00.118223329 +809 97 0 days 00:00:00.134603834 +809 98 0 days 00:00:00.137577413 +809 99 0 days 00:00:00.171575454 +809 100 0 days 00:00:00.170774517 +810 1 0 days 00:00:00.065676611 +810 2 0 days 00:00:00.067528480 +810 3 0 days 00:00:00.074167228 +810 4 0 days 00:00:00.076147155 +810 5 0 days 00:00:00.098175332 +810 6 0 days 00:00:00.066191890 +810 7 0 days 00:00:00.093743035 +810 8 0 days 00:00:00.073817608 +810 9 0 days 00:00:00.093041365 +810 10 0 days 00:00:00.097105744 +810 12 0 days 00:00:00.094593062 +810 13 0 days 00:00:00.091551540 +810 14 0 days 00:00:00.073332489 +810 15 0 days 00:00:00.071643655 +810 16 0 days 00:00:00.065834583 +810 17 0 days 00:00:00.066948468 +810 18 0 days 00:00:00.076858256 +810 19 0 days 00:00:00.063568788 +810 20 0 days 00:00:00.075733518 +810 21 0 days 00:00:00.095256381 +810 22 0 days 00:00:00.075789981 +810 24 0 days 00:00:00.077153341 +810 25 0 days 00:00:00.066368189 +810 26 0 days 00:00:00.075082438 +810 27 0 days 00:00:00.064261835 +810 28 0 days 00:00:00.096548820 +810 29 0 days 00:00:00.077045855 +810 30 0 days 00:00:00.076586760 +810 31 0 days 00:00:00.066278723 +810 32 0 days 00:00:00.095672175 +810 33 0 days 00:00:00.094666533 +810 34 0 days 00:00:00.099430715 +810 35 0 days 00:00:00.093013542 +810 36 0 days 00:00:00.066722168 +810 37 0 days 00:00:00.095304251 +810 38 0 days 00:00:00.094608633 +810 39 0 days 00:00:00.068003649 +810 40 0 days 00:00:00.100558035 +810 41 0 days 00:00:00.062910622 +810 42 0 days 00:00:00.095501413 +810 43 0 days 00:00:00.065029200 +810 44 0 days 00:00:00.097072652 +810 45 0 days 00:00:00.099279791 +810 46 0 days 00:00:00.065455926 +810 47 0 days 00:00:00.076536830 +810 48 0 days 00:00:00.067170405 +810 49 0 days 00:00:00.067061809 +810 50 0 days 00:00:00.065276920 +810 51 0 days 00:00:00.076334158 +810 52 0 days 00:00:00.075972218 +810 54 0 days 00:00:00.097670286 +810 55 0 days 00:00:00.091579890 +810 56 0 days 00:00:00.069626477 +810 57 0 days 00:00:00.074450801 +810 58 0 days 00:00:00.076954588 +810 59 0 days 00:00:00.076743196 +810 61 0 days 00:00:00.089995652 +810 62 0 days 00:00:00.073148003 +810 63 0 days 00:00:00.076375124 +810 64 0 days 00:00:00.096017260 +810 65 0 days 00:00:00.100365132 +810 66 0 days 00:00:00.073081936 +810 67 0 days 00:00:00.098120978 +810 69 0 days 00:00:00.065884599 +810 70 0 days 00:00:00.077532289 +810 71 0 days 00:00:00.065496665 +810 72 0 days 00:00:00.065836634 +810 73 0 days 00:00:00.067729305 +810 74 0 days 00:00:00.064453713 +810 75 0 days 00:00:00.071641790 +810 76 0 days 00:00:00.076293571 +810 77 0 days 00:00:00.076496304 +810 78 0 days 00:00:00.098485744 +810 79 0 days 00:00:00.099422900 +810 80 0 days 00:00:00.076236722 +810 81 0 days 00:00:00.096891022 +810 82 0 days 00:00:00.076558852 +810 83 0 days 00:00:00.096067435 +810 84 0 days 00:00:00.089697855 +810 85 0 days 00:00:00.064597360 +810 86 0 days 00:00:00.077503898 +810 87 0 days 00:00:00.075285790 +810 88 0 days 00:00:00.094249826 +810 89 0 days 00:00:00.095605464 +810 90 0 days 00:00:00.073314617 +810 91 0 days 00:00:00.063781332 +810 92 0 days 00:00:00.096205995 +810 93 0 days 00:00:00.076714982 +810 94 0 days 00:00:00.065749554 +810 95 0 days 00:00:00.093720995 +810 96 0 days 00:00:00.098127140 +810 97 0 days 00:00:00.070871311 +810 98 0 days 00:00:00.072936340 +810 99 0 days 00:00:00.065566792 +810 100 0 days 00:00:00.073162374 +811 1 0 days 00:00:00.326739946 +811 2 0 days 00:00:00.330443495 +811 4 0 days 00:00:00.243779852 +811 5 0 days 00:00:00.650791665 +811 6 0 days 00:00:00.224570640 +811 7 0 days 00:00:00.433608145 +811 8 0 days 00:00:00.384209022 +811 9 0 days 00:00:00.626255544 +811 10 0 days 00:00:00.425842782 +811 11 0 days 00:00:00.394242798 +811 12 0 days 00:00:00.405310257 +811 15 0 days 00:00:00.626372392 +811 17 0 days 00:00:00.572078915 +811 18 0 days 00:00:00.481441426 +811 20 0 days 00:00:00.622421188 +811 21 0 days 00:00:00.254829371 +811 23 0 days 00:00:00.717322693 +811 24 0 days 00:00:00.240040049 +811 25 0 days 00:00:00.333835520 +811 27 0 days 00:00:00.604811840 +811 30 0 days 00:00:00.407382051 +811 31 0 days 00:00:00.232220466 +811 32 0 days 00:00:00.236081345 +811 33 0 days 00:00:00.632629576 +811 34 0 days 00:00:00.233140474 +811 35 0 days 00:00:00.275002377 +811 36 0 days 00:00:00.279198220 +811 38 0 days 00:00:00.680792027 +811 39 0 days 00:00:00.428510733 +811 40 0 days 00:00:00.426954942 +811 41 0 days 00:00:00.404109050 +811 42 0 days 00:00:00.713034170 +811 43 0 days 00:00:00.671383740 +811 44 0 days 00:00:00.408672195 +811 45 0 days 00:00:00.194593706 +811 46 0 days 00:00:00.566538720 +811 47 0 days 00:00:00.259365278 +811 48 0 days 00:00:00.301791780 +811 49 0 days 00:00:00.202102433 +811 51 0 days 00:00:00.395294274 +811 52 0 days 00:00:00.248740700 +811 53 0 days 00:00:00.687687237 +811 54 0 days 00:00:00.324447490 +811 55 0 days 00:00:00.249748892 +811 56 0 days 00:00:00.612915556 +811 57 0 days 00:00:00.641653081 +811 58 0 days 00:00:00.248104428 +811 60 0 days 00:00:00.221613393 +811 61 0 days 00:00:00.658142532 +811 62 0 days 00:00:00.362963942 +811 65 0 days 00:00:00.213145257 +811 66 0 days 00:00:00.278262852 +811 67 0 days 00:00:00.279549893 +811 68 0 days 00:00:00.555424312 +811 69 0 days 00:00:00.521121980 +811 70 0 days 00:00:00.216312828 +811 71 0 days 00:00:00.325234404 +811 72 0 days 00:00:00.285358240 +811 73 0 days 00:00:00.623127748 +811 74 0 days 00:00:00.352657175 +811 75 0 days 00:00:00.306601233 +811 76 0 days 00:00:00.469576522 +811 77 0 days 00:00:00.303459130 +811 78 0 days 00:00:00.366155817 +811 79 0 days 00:00:00.254648960 +811 80 0 days 00:00:00.384327093 +811 81 0 days 00:00:00.212094145 +811 82 0 days 00:00:00.208979877 +811 83 0 days 00:00:00.644319728 +811 84 0 days 00:00:00.691511738 +811 85 0 days 00:00:00.366221618 +811 86 0 days 00:00:00.315680876 +811 87 0 days 00:00:00.656704838 +811 88 0 days 00:00:00.174249226 +811 89 0 days 00:00:00.228874750 +811 90 0 days 00:00:00.661509666 +811 91 0 days 00:00:00.364242882 +811 92 0 days 00:00:00.163322206 +811 93 0 days 00:00:00.649021127 +811 94 0 days 00:00:00.202161760 +811 95 0 days 00:00:00.347465348 +811 96 0 days 00:00:00.355298921 +811 97 0 days 00:00:00.523350917 +811 98 0 days 00:00:00.369278257 +811 99 0 days 00:00:00.452052828 +811 100 0 days 00:00:00.350406630 +812 1 0 days 00:00:00.572913728 +812 2 0 days 00:00:00.578668760 +812 4 0 days 00:00:00.595640348 +812 5 0 days 00:00:00.348106535 +812 6 0 days 00:00:00.706212377 +812 7 0 days 00:00:00.215245246 +812 8 0 days 00:00:00.183565600 +812 9 0 days 00:00:00.698604969 +812 10 0 days 00:00:00.403847558 +812 13 0 days 00:00:00.700168316 +812 15 0 days 00:00:00.382306344 +812 17 0 days 00:00:00.340987690 +812 18 0 days 00:00:00.272693553 +812 19 0 days 00:00:00.628184413 +812 21 0 days 00:00:00.692272743 +812 22 0 days 00:00:00.591965816 +812 23 0 days 00:00:00.206933313 +812 24 0 days 00:00:00.277572706 +812 25 0 days 00:00:00.328770994 +812 26 0 days 00:00:00.344348814 +812 27 0 days 00:00:00.256876806 +812 29 0 days 00:00:00.363404948 +812 30 0 days 00:00:00.399890225 +812 31 0 days 00:00:00.417379807 +812 32 0 days 00:00:00.351701266 +812 33 0 days 00:00:00.565252415 +812 34 0 days 00:00:00.359679588 +812 35 0 days 00:00:00.178714973 +812 36 0 days 00:00:00.661440308 +812 37 0 days 00:00:00.678634945 +812 38 0 days 00:00:00.244629835 +812 39 0 days 00:00:00.221489480 +812 40 0 days 00:00:00.311482086 +812 41 0 days 00:00:00.324929240 +812 42 0 days 00:00:00.353672968 +812 43 0 days 00:00:00.305764046 +812 44 0 days 00:00:00.237748866 +812 45 0 days 00:00:00.548571840 +812 46 0 days 00:00:00.192501606 +812 47 0 days 00:00:00.205425336 +812 48 0 days 00:00:00.631922213 +812 49 0 days 00:00:00.659227938 +812 50 0 days 00:00:00.228522589 +812 51 0 days 00:00:00.251136807 +812 52 0 days 00:00:00.629998160 +812 53 0 days 00:00:00.242696826 +812 54 0 days 00:00:00.718039811 +812 55 0 days 00:00:00.219094066 +812 56 0 days 00:00:00.680740520 +812 57 0 days 00:00:00.457551753 +812 58 0 days 00:00:00.231959381 +812 60 0 days 00:00:00.544928075 +812 61 0 days 00:00:00.287620366 +812 62 0 days 00:00:00.430822883 +812 63 0 days 00:00:00.294042660 +812 64 0 days 00:00:00.392356053 +812 65 0 days 00:00:00.245630306 +812 66 0 days 00:00:00.714093840 +812 69 0 days 00:00:00.222334737 +812 70 0 days 00:00:00.179100233 +812 71 0 days 00:00:00.371748567 +812 73 0 days 00:00:00.363549942 +812 74 0 days 00:00:00.192345170 +812 75 0 days 00:00:00.364940620 +812 76 0 days 00:00:00.198918120 +812 77 0 days 00:00:00.550063410 +812 78 0 days 00:00:00.402649967 +812 79 0 days 00:00:00.590286700 +812 80 0 days 00:00:00.352043103 +812 81 0 days 00:00:00.419845893 +812 82 0 days 00:00:00.186962593 +812 84 0 days 00:00:00.685658042 +812 85 0 days 00:00:00.303333273 +812 87 0 days 00:00:00.634292414 +812 89 0 days 00:00:00.285159463 +812 90 0 days 00:00:00.382904956 +812 91 0 days 00:00:00.249580186 +812 92 0 days 00:00:00.214750272 +812 93 0 days 00:00:00.605025245 +812 96 0 days 00:00:00.177169940 +812 99 0 days 00:00:00.220693572 +812 100 0 days 00:00:00.643517148 +813 1 0 days 00:00:00.341750515 +813 2 0 days 00:00:00.124158557 +813 3 0 days 00:00:00.088755960 +813 4 0 days 00:00:00.330482110 +813 5 0 days 00:00:00.174108522 +813 6 0 days 00:00:00.116476557 +813 7 0 days 00:00:00.138858138 +813 8 0 days 00:00:00.104380722 +813 9 0 days 00:00:00.267571375 +813 11 0 days 00:00:00.300279117 +813 12 0 days 00:00:00.313246400 +813 13 0 days 00:00:00.208289121 +813 14 0 days 00:00:00.186201713 +813 15 0 days 00:00:00.313704748 +813 17 0 days 00:00:00.180982148 +813 19 0 days 00:00:00.149570112 +813 20 0 days 00:00:00.095967526 +813 21 0 days 00:00:00.186943677 +813 22 0 days 00:00:00.343511078 +813 23 0 days 00:00:00.111918086 +813 25 0 days 00:00:00.119740652 +813 27 0 days 00:00:00.091992633 +813 28 0 days 00:00:00.138589696 +813 29 0 days 00:00:00.318948728 +813 30 0 days 00:00:00.175097233 +813 31 0 days 00:00:00.317107313 +813 32 0 days 00:00:00.327508112 +813 33 0 days 00:00:00.192329700 +813 34 0 days 00:00:00.286932812 +813 35 0 days 00:00:00.132179855 +813 37 0 days 00:00:00.160627416 +813 38 0 days 00:00:00.095880300 +813 39 0 days 00:00:00.342291608 +813 40 0 days 00:00:00.107516713 +813 41 0 days 00:00:00.321930507 +813 42 0 days 00:00:00.125869553 +813 44 0 days 00:00:00.190335851 +813 45 0 days 00:00:00.333145612 +813 46 0 days 00:00:00.195108017 +813 47 0 days 00:00:00.114859541 +813 48 0 days 00:00:00.173302211 +813 49 0 days 00:00:00.320246888 +813 50 0 days 00:00:00.103522313 +813 52 0 days 00:00:00.181783062 +813 53 0 days 00:00:00.297935886 +813 54 0 days 00:00:00.190224094 +813 55 0 days 00:00:00.343080080 +813 57 0 days 00:00:00.185810870 +813 58 0 days 00:00:00.183295198 +813 59 0 days 00:00:00.185592498 +813 60 0 days 00:00:00.366571745 +813 61 0 days 00:00:00.115338048 +813 62 0 days 00:00:00.331854669 +813 63 0 days 00:00:00.203094682 +813 64 0 days 00:00:00.350062704 +813 65 0 days 00:00:00.322619254 +813 66 0 days 00:00:00.096630146 +813 67 0 days 00:00:00.206956131 +813 68 0 days 00:00:00.098880060 +813 69 0 days 00:00:00.214990986 +813 70 0 days 00:00:00.308151467 +813 71 0 days 00:00:00.121474084 +813 72 0 days 00:00:00.291840133 +813 73 0 days 00:00:00.195873487 +813 74 0 days 00:00:00.156126847 +813 75 0 days 00:00:00.325084111 +813 76 0 days 00:00:00.195571727 +813 77 0 days 00:00:00.318135040 +813 78 0 days 00:00:00.314264120 +813 79 0 days 00:00:00.183514454 +813 80 0 days 00:00:00.123600004 +813 81 0 days 00:00:00.343713847 +813 83 0 days 00:00:00.200469210 +813 85 0 days 00:00:00.090711740 +813 86 0 days 00:00:00.186172964 +813 87 0 days 00:00:00.096074315 +813 88 0 days 00:00:00.171466400 +813 89 0 days 00:00:00.193125181 +813 90 0 days 00:00:00.116469192 +813 91 0 days 00:00:00.131535580 +813 92 0 days 00:00:00.298657730 +813 93 0 days 00:00:00.329484928 +813 95 0 days 00:00:00.156633072 +813 96 0 days 00:00:00.109049693 +813 97 0 days 00:00:00.133188755 +813 98 0 days 00:00:00.092196246 +813 99 0 days 00:00:00.191122144 +813 100 0 days 00:00:00.142620187 +814 1 0 days 00:00:00.111142313 +814 2 0 days 00:00:00.169932032 +814 3 0 days 00:00:00.161273480 +814 4 0 days 00:00:00.121513231 +814 6 0 days 00:00:00.200742174 +814 8 0 days 00:00:00.108598680 +814 10 0 days 00:00:00.340404145 +814 12 0 days 00:00:00.124767030 +814 13 0 days 00:00:00.355448587 +814 14 0 days 00:00:00.203672295 +814 16 0 days 00:00:00.219111524 +814 18 0 days 00:00:00.123519290 +814 19 0 days 00:00:00.186073804 +814 20 0 days 00:00:00.385678375 +814 21 0 days 00:00:00.191081730 +814 22 0 days 00:00:00.353272403 +814 23 0 days 00:00:00.100732980 +814 24 0 days 00:00:00.199898086 +814 25 0 days 00:00:00.111784512 +814 26 0 days 00:00:00.212280760 +814 27 0 days 00:00:00.323972008 +814 28 0 days 00:00:00.347874005 +814 29 0 days 00:00:00.309166000 +814 30 0 days 00:00:00.136815276 +814 31 0 days 00:00:00.188851888 +814 32 0 days 00:00:00.331466722 +814 33 0 days 00:00:00.120127197 +814 34 0 days 00:00:00.104760393 +814 36 0 days 00:00:00.351897055 +814 38 0 days 00:00:00.190156286 +814 39 0 days 00:00:00.293764500 +814 40 0 days 00:00:00.192601140 +814 41 0 days 00:00:00.194572801 +814 42 0 days 00:00:00.131557665 +814 43 0 days 00:00:00.116743953 +814 44 0 days 00:00:00.321287945 +814 46 0 days 00:00:00.122203734 +814 47 0 days 00:00:00.115352432 +814 49 0 days 00:00:00.200787230 +814 51 0 days 00:00:00.192801218 +814 52 0 days 00:00:00.221155763 +814 53 0 days 00:00:00.203322466 +814 54 0 days 00:00:00.093823053 +814 56 0 days 00:00:00.185344400 +814 57 0 days 00:00:00.100500826 +814 58 0 days 00:00:00.320533760 +814 59 0 days 00:00:00.121004009 +814 60 0 days 00:00:00.301451636 +814 61 0 days 00:00:00.323860810 +814 62 0 days 00:00:00.337228792 +814 63 0 days 00:00:00.094591593 +814 64 0 days 00:00:00.185915745 +814 65 0 days 00:00:00.115519495 +814 66 0 days 00:00:00.335497538 +814 68 0 days 00:00:00.355576336 +814 70 0 days 00:00:00.124480410 +814 71 0 days 00:00:00.350497072 +814 72 0 days 00:00:00.186896218 +814 73 0 days 00:00:00.095200093 +814 74 0 days 00:00:00.278661220 +814 75 0 days 00:00:00.118292861 +814 76 0 days 00:00:00.180331976 +814 77 0 days 00:00:00.210995675 +814 78 0 days 00:00:00.152404760 +814 80 0 days 00:00:00.096457440 +814 82 0 days 00:00:00.111558814 +814 83 0 days 00:00:00.357687908 +814 84 0 days 00:00:00.100126780 +814 86 0 days 00:00:00.195311038 +814 87 0 days 00:00:00.110861174 +814 88 0 days 00:00:00.176411216 +814 89 0 days 00:00:00.196752904 +814 90 0 days 00:00:00.321696556 +814 91 0 days 00:00:00.209445580 +814 92 0 days 00:00:00.096176620 +814 94 0 days 00:00:00.126451980 +814 95 0 days 00:00:00.166297073 +814 96 0 days 00:00:00.344176684 +814 97 0 days 00:00:00.282120790 +814 98 0 days 00:00:00.202449090 +814 100 0 days 00:00:00.315791240 +815 1 0 days 00:00:00.763641175 +815 2 0 days 00:00:00.788209712 +815 3 0 days 00:00:00.724592485 +815 4 0 days 00:00:00.472851457 +815 5 0 days 00:00:00.401801650 +815 6 0 days 00:00:00.868975332 +815 7 0 days 00:00:00.491535680 +815 8 0 days 00:00:00.836998788 +815 9 0 days 00:00:00.273141416 +815 10 0 days 00:00:00.261956410 +815 11 0 days 00:00:00.764145150 +815 12 0 days 00:00:00.862574864 +815 13 0 days 00:00:00.929698396 +815 14 0 days 00:00:00.262058180 +815 15 0 days 00:00:00.895079344 +815 16 0 days 00:00:00.444425608 +815 17 0 days 00:00:00.281502196 +815 18 0 days 00:00:00.503756173 +815 19 0 days 00:00:00.445795553 +815 20 0 days 00:00:00.859260403 +815 21 0 days 00:00:00.292120910 +815 22 0 days 00:00:00.290117843 +815 23 0 days 00:00:00.309299745 +815 24 0 days 00:00:00.273679430 +815 25 0 days 00:00:00.482480510 +815 26 0 days 00:00:00.239010595 +815 27 0 days 00:00:00.448442005 +815 28 0 days 00:00:00.490707770 +815 29 0 days 00:00:00.971348093 +815 30 0 days 00:00:00.435653340 +815 31 0 days 00:00:00.234635335 +815 32 0 days 00:00:00.854448340 +815 33 0 days 00:00:00.553508108 +815 34 0 days 00:00:00.522761120 +815 35 0 days 00:00:00.264469980 +815 36 0 days 00:00:00.830335084 +815 37 0 days 00:00:00.267542954 +815 38 0 days 00:00:00.715362195 +815 39 0 days 00:00:00.335429402 +815 40 0 days 00:00:00.370440900 +815 41 0 days 00:00:00.800247283 +815 42 0 days 00:00:00.677428820 +815 43 0 days 00:00:00.235325416 +815 44 0 days 00:00:00.443499286 +815 45 0 days 00:00:00.432433960 +815 46 0 days 00:00:00.687634850 +815 47 0 days 00:00:00.443829244 +815 48 0 days 00:00:00.313957400 +815 49 0 days 00:00:00.231044780 +815 50 0 days 00:00:00.450058056 +815 51 0 days 00:00:00.753285008 +815 52 0 days 00:00:00.222524520 +815 53 0 days 00:00:00.418899772 +815 54 0 days 00:00:00.484925926 +815 55 0 days 00:00:00.796508910 +815 56 0 days 00:00:00.437716256 +815 57 0 days 00:00:00.263935042 +815 58 0 days 00:00:00.312375572 +815 59 0 days 00:00:00.239890760 +815 60 0 days 00:00:00.852275686 +815 61 0 days 00:00:00.425714716 +815 62 0 days 00:00:00.260080030 +815 63 0 days 00:00:00.921222966 +815 64 0 days 00:00:00.393313435 +815 65 0 days 00:00:00.690397970 +815 66 0 days 00:00:00.295244537 +815 67 0 days 00:00:00.388496115 +815 68 0 days 00:00:00.511390345 +815 69 0 days 00:00:00.858892552 +815 70 0 days 00:00:00.331793794 +815 71 0 days 00:00:00.812190193 +815 72 0 days 00:00:00.695562505 +815 73 0 days 00:00:00.806365272 +815 74 0 days 00:00:00.825589005 +815 75 0 days 00:00:00.268690080 +815 76 0 days 00:00:00.873079516 +815 77 0 days 00:00:00.828135690 +815 78 0 days 00:00:00.258974408 +815 79 0 days 00:00:00.250864867 +815 80 0 days 00:00:00.450809610 +815 81 0 days 00:00:00.238058936 +815 82 0 days 00:00:00.365832396 +815 83 0 days 00:00:00.480417856 +815 84 0 days 00:00:00.789309180 +815 85 0 days 00:00:00.261604420 +815 86 0 days 00:00:00.267595533 +815 87 0 days 00:00:00.808791000 +815 88 0 days 00:00:00.553915107 +815 89 0 days 00:00:00.424860922 +815 90 0 days 00:00:00.422711050 +815 91 0 days 00:00:00.798915480 +815 92 0 days 00:00:00.849578104 +815 93 0 days 00:00:00.483112486 +815 94 0 days 00:00:00.446834830 +815 95 0 days 00:00:00.276832777 +815 96 0 days 00:00:00.240132906 +815 97 0 days 00:00:00.370756125 +815 98 0 days 00:00:00.863267292 +815 99 0 days 00:00:00.766250535 +815 100 0 days 00:00:00.800596688 +816 1 0 days 00:00:00.411157750 +816 2 0 days 00:00:00.154701320 +816 3 0 days 00:00:00.135014576 +816 4 0 days 00:00:00.141748320 +816 5 0 days 00:00:00.385134252 +816 6 0 days 00:00:00.165352312 +816 7 0 days 00:00:00.120198948 +816 8 0 days 00:00:00.363759164 +816 9 0 days 00:00:00.406282795 +816 10 0 days 00:00:00.117028592 +816 11 0 days 00:00:00.477187878 +816 12 0 days 00:00:00.139738662 +816 13 0 days 00:00:00.143841345 +816 14 0 days 00:00:00.390922740 +816 15 0 days 00:00:00.147149820 +816 16 0 days 00:00:00.138916080 +816 17 0 days 00:00:00.230320696 +816 18 0 days 00:00:00.229517275 +816 19 0 days 00:00:00.242248490 +816 20 0 days 00:00:00.239290717 +816 21 0 days 00:00:00.189637890 +816 22 0 days 00:00:00.188296880 +816 23 0 days 00:00:00.398740824 +816 24 0 days 00:00:00.366683540 +816 25 0 days 00:00:00.211299936 +816 26 0 days 00:00:00.183420450 +816 27 0 days 00:00:00.140975928 +816 28 0 days 00:00:00.150121605 +816 29 0 days 00:00:00.442441344 +816 30 0 days 00:00:00.137822434 +816 31 0 days 00:00:00.399845320 +816 32 0 days 00:00:00.218746637 +816 33 0 days 00:00:00.150817294 +816 34 0 days 00:00:00.260922348 +816 35 0 days 00:00:00.242135054 +816 36 0 days 00:00:00.148293480 +816 37 0 days 00:00:00.153989222 +816 38 0 days 00:00:00.481455200 +816 39 0 days 00:00:00.267526297 +816 40 0 days 00:00:00.380875633 +816 41 0 days 00:00:00.150529970 +816 42 0 days 00:00:00.139495145 +816 43 0 days 00:00:00.387368680 +816 44 0 days 00:00:00.208129670 +816 45 0 days 00:00:00.397378553 +816 46 0 days 00:00:00.453888510 +816 47 0 days 00:00:00.445765544 +816 48 0 days 00:00:00.158498956 +816 49 0 days 00:00:00.390628173 +816 50 0 days 00:00:00.266742416 +816 51 0 days 00:00:00.151447062 +816 52 0 days 00:00:00.195632024 +816 53 0 days 00:00:00.410632590 +816 54 0 days 00:00:00.225879612 +816 55 0 days 00:00:00.225983580 +816 56 0 days 00:00:00.221003865 +816 57 0 days 00:00:00.241908211 +816 58 0 days 00:00:00.431551420 +816 59 0 days 00:00:00.262468342 +816 60 0 days 00:00:00.396858880 +816 61 0 days 00:00:00.204357008 +816 62 0 days 00:00:00.427038696 +816 63 0 days 00:00:00.258907483 +816 64 0 days 00:00:00.130829962 +816 65 0 days 00:00:00.244882190 +816 66 0 days 00:00:00.172908868 +816 67 0 days 00:00:00.228809365 +816 68 0 days 00:00:00.214176523 +816 69 0 days 00:00:00.445950606 +816 70 0 days 00:00:00.427883880 +816 71 0 days 00:00:00.196454790 +816 72 0 days 00:00:00.152551274 +816 73 0 days 00:00:00.452871226 +816 74 0 days 00:00:00.221444565 +816 75 0 days 00:00:00.265813636 +816 76 0 days 00:00:00.383395988 +816 77 0 days 00:00:00.218789584 +816 78 0 days 00:00:00.166083937 +816 79 0 days 00:00:00.219851470 +816 80 0 days 00:00:00.118931315 +816 81 0 days 00:00:00.244452792 +816 82 0 days 00:00:00.368847720 +816 83 0 days 00:00:00.241396936 +816 84 0 days 00:00:00.224091232 +816 85 0 days 00:00:00.441764863 +816 86 0 days 00:00:00.158320705 +816 87 0 days 00:00:00.144289128 +816 88 0 days 00:00:00.203950756 +816 89 0 days 00:00:00.152822280 +816 90 0 days 00:00:00.250060920 +816 91 0 days 00:00:00.232013317 +816 92 0 days 00:00:00.149837160 +816 93 0 days 00:00:00.160006000 +816 94 0 days 00:00:00.248602134 +816 96 0 days 00:00:00.131718484 +816 97 0 days 00:00:00.490366345 +816 98 0 days 00:00:00.449686310 +816 99 0 days 00:00:00.408800465 +816 100 0 days 00:00:00.261316280 +817 1 0 days 00:00:00.429303232 +817 2 0 days 00:00:00.778622016 +817 3 0 days 00:00:00.775910940 +817 4 0 days 00:00:00.471253895 +817 5 0 days 00:00:00.500019473 +817 6 0 days 00:00:00.481715587 +817 7 0 days 00:00:00.730800856 +817 8 0 days 00:00:00.466315970 +817 9 0 days 00:00:00.474075875 +817 10 0 days 00:00:00.777389296 +817 11 0 days 00:00:00.779096770 +817 12 0 days 00:00:00.788901432 +817 13 0 days 00:00:00.419050104 +817 14 0 days 00:00:00.740809413 +817 15 0 days 00:00:00.406034145 +817 16 0 days 00:00:00.778008605 +817 17 0 days 00:00:00.754571188 +817 18 0 days 00:00:00.262509566 +817 19 0 days 00:00:00.422515448 +817 20 0 days 00:00:00.718566492 +817 21 0 days 00:00:00.284218962 +817 22 0 days 00:00:00.273622317 +817 23 0 days 00:00:00.269837536 +817 24 0 days 00:00:00.740782768 +817 25 0 days 00:00:00.274769477 +817 26 0 days 00:00:00.296468078 +817 27 0 days 00:00:00.786993923 +817 28 0 days 00:00:00.444389608 +817 29 0 days 00:00:00.784802336 +817 30 0 days 00:00:00.435184960 +817 31 0 days 00:00:00.803508122 +817 32 0 days 00:00:00.444693493 +817 33 0 days 00:00:00.435477431 +817 34 0 days 00:00:00.814193255 +817 35 0 days 00:00:00.746107840 +817 36 0 days 00:00:00.693215935 +817 37 0 days 00:00:00.754293336 +817 38 0 days 00:00:00.455004537 +817 39 0 days 00:00:00.236692948 +817 40 0 days 00:00:00.391770900 +817 41 0 days 00:00:00.249859222 +817 42 0 days 00:00:00.434602107 +817 43 0 days 00:00:00.282233895 +817 44 0 days 00:00:00.421912953 +817 45 0 days 00:00:00.306343769 +817 46 0 days 00:00:00.803028560 +817 47 0 days 00:00:00.793134992 +817 48 0 days 00:00:00.273140180 +817 49 0 days 00:00:00.767429820 +817 50 0 days 00:00:00.854212144 +817 51 0 days 00:00:00.838926000 +817 52 0 days 00:00:00.234520936 +817 53 0 days 00:00:00.459182382 +817 54 0 days 00:00:00.276798652 +817 55 0 days 00:00:00.781720202 +817 56 0 days 00:00:00.478484154 +817 57 0 days 00:00:00.241932496 +817 58 0 days 00:00:00.454272621 +817 59 0 days 00:00:00.818943367 +817 60 0 days 00:00:00.277935822 +817 61 0 days 00:00:00.753940076 +817 62 0 days 00:00:00.780011348 +817 63 0 days 00:00:00.713818051 +817 64 0 days 00:00:00.819902904 +817 65 0 days 00:00:00.829624945 +817 66 0 days 00:00:00.296597480 +817 67 0 days 00:00:00.268268658 +817 68 0 days 00:00:00.404610515 +817 69 0 days 00:00:00.200704356 +817 70 0 days 00:00:00.255131863 +817 71 0 days 00:00:00.269874586 +817 72 0 days 00:00:00.844961691 +817 73 0 days 00:00:00.468272378 +817 74 0 days 00:00:00.337508002 +817 75 0 days 00:00:00.271955036 +817 76 0 days 00:00:00.423155188 +817 77 0 days 00:00:00.781488863 +817 78 0 days 00:00:00.433170417 +817 79 0 days 00:00:00.479542392 +817 80 0 days 00:00:00.819086908 +817 81 0 days 00:00:00.471716703 +817 82 0 days 00:00:00.398579124 +817 83 0 days 00:00:00.240330048 +817 84 0 days 00:00:00.806158362 +817 85 0 days 00:00:00.263722558 +817 86 0 days 00:00:00.440296380 +817 87 0 days 00:00:00.433311476 +817 88 0 days 00:00:00.285746808 +817 89 0 days 00:00:00.282194926 +817 90 0 days 00:00:00.268735107 +817 91 0 days 00:00:00.456996068 +817 92 0 days 00:00:00.786589320 +817 93 0 days 00:00:00.278758638 +817 94 0 days 00:00:00.467799830 +817 95 0 days 00:00:00.255171313 +817 96 0 days 00:00:00.662213080 +817 97 0 days 00:00:00.817530644 +817 98 0 days 00:00:00.804493726 +817 99 0 days 00:00:00.725829084 +817 100 0 days 00:00:00.272928361 +818 1 0 days 00:00:00.242729174 +818 2 0 days 00:00:00.380598744 +818 3 0 days 00:00:00.398743500 +818 4 0 days 00:00:00.143975353 +818 5 0 days 00:00:00.139058992 +818 6 0 days 00:00:00.391376274 +818 7 0 days 00:00:00.141547576 +818 8 0 days 00:00:00.143995998 +818 9 0 days 00:00:00.240169535 +818 10 0 days 00:00:00.403998692 +818 11 0 days 00:00:00.244789735 +818 12 0 days 00:00:00.231064141 +818 13 0 days 00:00:00.214364555 +818 14 0 days 00:00:00.350192965 +818 15 0 days 00:00:00.227667312 +818 16 0 days 00:00:00.402423813 +818 17 0 days 00:00:00.387780613 +818 18 0 days 00:00:00.226619122 +818 19 0 days 00:00:00.410352678 +818 20 0 days 00:00:00.209335328 +818 21 0 days 00:00:00.384496977 +818 22 0 days 00:00:00.359796760 +818 23 0 days 00:00:00.217447204 +818 24 0 days 00:00:00.205942430 +818 25 0 days 00:00:00.153263055 +818 26 0 days 00:00:00.240196164 +818 27 0 days 00:00:00.282470353 +818 28 0 days 00:00:00.165454220 +818 29 0 days 00:00:00.241553971 +818 30 0 days 00:00:00.237349710 +818 31 0 days 00:00:00.249113752 +818 32 0 days 00:00:00.296233964 +818 33 0 days 00:00:00.323339076 +818 34 0 days 00:00:00.127898258 +818 35 0 days 00:00:00.334793875 +818 36 0 days 00:00:00.118925567 +818 37 0 days 00:00:00.192577746 +818 38 0 days 00:00:00.176210789 +818 39 0 days 00:00:00.118394614 +818 40 0 days 00:00:00.192297851 +818 41 0 days 00:00:00.192885687 +818 42 0 days 00:00:00.322949320 +818 43 0 days 00:00:00.124072474 +818 44 0 days 00:00:00.315125360 +818 45 0 days 00:00:00.333270762 +818 46 0 days 00:00:00.191917193 +818 47 0 days 00:00:00.189916902 +818 48 0 days 00:00:00.322123500 +818 49 0 days 00:00:00.309843523 +818 50 0 days 00:00:00.119520595 +818 51 0 days 00:00:00.120984454 +818 52 0 days 00:00:00.178441503 +818 53 0 days 00:00:00.311763868 +818 54 0 days 00:00:00.111554624 +818 55 0 days 00:00:00.143567744 +818 56 0 days 00:00:00.199147575 +818 57 0 days 00:00:00.115291745 +818 58 0 days 00:00:00.129708776 +818 59 0 days 00:00:00.128665080 +818 60 0 days 00:00:00.298448328 +818 61 0 days 00:00:00.127411367 +818 62 0 days 00:00:00.129291510 +818 63 0 days 00:00:00.177084943 +818 64 0 days 00:00:00.122872864 +818 65 0 days 00:00:00.285510916 +818 66 0 days 00:00:00.143235273 +818 67 0 days 00:00:00.193385578 +818 68 0 days 00:00:00.132181988 +818 69 0 days 00:00:00.139877709 +818 70 0 days 00:00:00.130070280 +818 71 0 days 00:00:00.126119974 +818 72 0 days 00:00:00.121838765 +818 73 0 days 00:00:00.203323004 +818 74 0 days 00:00:00.313645380 +818 75 0 days 00:00:00.117572903 +818 76 0 days 00:00:00.201540870 +818 77 0 days 00:00:00.200870455 +818 78 0 days 00:00:00.205561347 +818 79 0 days 00:00:00.134962968 +818 80 0 days 00:00:00.319533734 +818 81 0 days 00:00:00.187583855 +818 82 0 days 00:00:00.286675590 +818 83 0 days 00:00:00.190013750 +818 84 0 days 00:00:00.129648563 +818 85 0 days 00:00:00.340858547 +818 86 0 days 00:00:00.193183825 +818 87 0 days 00:00:00.291916220 +818 88 0 days 00:00:00.306672736 +818 89 0 days 00:00:00.264458260 +818 90 0 days 00:00:00.180126760 +818 91 0 days 00:00:00.197661872 +818 92 0 days 00:00:00.327727105 +818 93 0 days 00:00:00.305940256 +818 94 0 days 00:00:00.195426160 +818 95 0 days 00:00:00.121515156 +818 96 0 days 00:00:00.124507600 +818 97 0 days 00:00:00.203540024 +818 98 0 days 00:00:00.121870800 +818 99 0 days 00:00:00.333463374 +818 100 0 days 00:00:00.200462697 +819 1 0 days 00:00:00.120676908 +819 2 0 days 00:00:00.132041780 +819 3 0 days 00:00:00.108929440 +819 4 0 days 00:00:00.126302522 +819 5 0 days 00:00:00.117808065 +819 6 0 days 00:00:00.099135060 +819 7 0 days 00:00:00.104972048 +819 8 0 days 00:00:00.099050975 +819 9 0 days 00:00:00.129257600 +819 10 0 days 00:00:00.113686252 +819 11 0 days 00:00:00.172005703 +819 12 0 days 00:00:00.161786460 +819 13 0 days 00:00:00.159501528 +819 14 0 days 00:00:00.125050064 +819 15 0 days 00:00:00.124389065 +819 16 0 days 00:00:00.145753400 +819 17 0 days 00:00:00.105226970 +819 18 0 days 00:00:00.139299977 +819 19 0 days 00:00:00.124385605 +819 20 0 days 00:00:00.155752032 +819 21 0 days 00:00:00.108367036 +819 22 0 days 00:00:00.171390960 +819 23 0 days 00:00:00.138412235 +819 24 0 days 00:00:00.125503872 +819 25 0 days 00:00:00.125887956 +819 26 0 days 00:00:00.116589527 +819 27 0 days 00:00:00.102384370 +819 28 0 days 00:00:00.164197532 +819 29 0 days 00:00:00.123830376 +819 30 0 days 00:00:00.141847400 +819 31 0 days 00:00:00.121663095 +819 32 0 days 00:00:00.135962033 +819 33 0 days 00:00:00.113185792 +819 34 0 days 00:00:00.142676622 +819 35 0 days 00:00:00.130822540 +819 36 0 days 00:00:00.122583542 +819 37 0 days 00:00:00.163332120 +819 38 0 days 00:00:00.162031248 +819 39 0 days 00:00:00.129718868 +819 40 0 days 00:00:00.173877802 +819 41 0 days 00:00:00.123286090 +819 42 0 days 00:00:00.124506608 +819 43 0 days 00:00:00.139968046 +819 44 0 days 00:00:00.156406025 +819 45 0 days 00:00:00.130507576 +819 46 0 days 00:00:00.171556556 +819 47 0 days 00:00:00.162449376 +819 48 0 days 00:00:00.106955140 +819 49 0 days 00:00:00.115992330 +819 50 0 days 00:00:00.119380477 +819 51 0 days 00:00:00.125356931 +819 52 0 days 00:00:00.116056266 +819 53 0 days 00:00:00.116802817 +819 54 0 days 00:00:00.141879062 +819 55 0 days 00:00:00.174833660 +819 56 0 days 00:00:00.117614348 +819 57 0 days 00:00:00.116095086 +819 58 0 days 00:00:00.176998768 +819 59 0 days 00:00:00.131847986 +819 60 0 days 00:00:00.125289375 +819 61 0 days 00:00:00.121345950 +819 62 0 days 00:00:00.169412173 +819 63 0 days 00:00:00.155422270 +819 64 0 days 00:00:00.129905736 +819 65 0 days 00:00:00.107317595 +819 66 0 days 00:00:00.161539710 +819 67 0 days 00:00:00.142771000 +819 68 0 days 00:00:00.122704905 +819 69 0 days 00:00:00.137683666 +819 70 0 days 00:00:00.131623744 +819 71 0 days 00:00:00.134641590 +819 72 0 days 00:00:00.106682160 +819 73 0 days 00:00:00.131017144 +819 74 0 days 00:00:00.125146708 +819 75 0 days 00:00:00.180910905 +819 76 0 days 00:00:00.170682683 +819 77 0 days 00:00:00.122429335 +819 78 0 days 00:00:00.121057636 +819 79 0 days 00:00:00.122882430 +819 80 0 days 00:00:00.130339184 +819 81 0 days 00:00:00.153155940 +819 82 0 days 00:00:00.174621242 +819 83 0 days 00:00:00.125946948 +819 84 0 days 00:00:00.161591700 +819 85 0 days 00:00:00.112730548 +819 86 0 days 00:00:00.111960425 +819 87 0 days 00:00:00.109536970 +819 88 0 days 00:00:00.116625905 +819 89 0 days 00:00:00.172100763 +819 90 0 days 00:00:00.139507265 +819 91 0 days 00:00:00.165263880 +819 92 0 days 00:00:00.139485528 +819 93 0 days 00:00:00.170444096 +819 94 0 days 00:00:00.129208295 +819 95 0 days 00:00:00.150724618 +819 96 0 days 00:00:00.126012820 +819 97 0 days 00:00:00.161035335 +819 98 0 days 00:00:00.137415286 +819 99 0 days 00:00:00.125098445 +819 100 0 days 00:00:00.132969864 +820 1 0 days 00:00:00.120553182 +820 2 0 days 00:00:00.182465462 +820 3 0 days 00:00:00.124276435 +820 4 0 days 00:00:00.122310030 +820 5 0 days 00:00:00.136188977 +820 6 0 days 00:00:00.133660030 +820 7 0 days 00:00:00.131860688 +820 8 0 days 00:00:00.121551890 +820 9 0 days 00:00:00.154369155 +820 10 0 days 00:00:00.129642892 +820 11 0 days 00:00:00.129644344 +820 12 0 days 00:00:00.122700185 +820 13 0 days 00:00:00.116692102 +820 14 0 days 00:00:00.177218316 +820 15 0 days 00:00:00.171826086 +820 16 0 days 00:00:00.102293620 +820 17 0 days 00:00:00.153011475 +820 18 0 days 00:00:00.114568802 +820 19 0 days 00:00:00.169837700 +820 20 0 days 00:00:00.140735470 +820 21 0 days 00:00:00.145778913 +820 22 0 days 00:00:00.119455555 +820 23 0 days 00:00:00.114946128 +820 24 0 days 00:00:00.131353580 +820 25 0 days 00:00:00.147063265 +820 26 0 days 00:00:00.123214610 +820 27 0 days 00:00:00.119100950 +820 28 0 days 00:00:00.139892673 +820 29 0 days 00:00:00.179756547 +820 30 0 days 00:00:00.138798394 +820 31 0 days 00:00:00.131143864 +820 32 0 days 00:00:00.175968828 +820 33 0 days 00:00:00.140249740 +820 34 0 days 00:00:00.124706295 +820 35 0 days 00:00:00.158083305 +820 36 0 days 00:00:00.109484540 +820 37 0 days 00:00:00.109349240 +820 38 0 days 00:00:00.109457816 +820 39 0 days 00:00:00.103861580 +820 40 0 days 00:00:00.118160020 +820 41 0 days 00:00:00.174832360 +820 42 0 days 00:00:00.125339485 +820 43 0 days 00:00:00.169356740 +820 44 0 days 00:00:00.167639340 +820 45 0 days 00:00:00.178523382 +820 46 0 days 00:00:00.137693910 +820 47 0 days 00:00:00.163212020 +820 48 0 days 00:00:00.114399800 +820 49 0 days 00:00:00.129966120 +820 50 0 days 00:00:00.110734526 +820 51 0 days 00:00:00.158892670 +820 52 0 days 00:00:00.170215792 +820 53 0 days 00:00:00.121656530 +820 54 0 days 00:00:00.127148044 +820 55 0 days 00:00:00.108829605 +820 56 0 days 00:00:00.141585845 +820 57 0 days 00:00:00.123338895 +820 58 0 days 00:00:00.141423465 +820 59 0 days 00:00:00.143687626 +820 60 0 days 00:00:00.138786380 +820 61 0 days 00:00:00.142191753 +820 62 0 days 00:00:00.110032528 +820 63 0 days 00:00:00.111028820 +820 64 0 days 00:00:00.123943735 +820 65 0 days 00:00:00.122911452 +820 66 0 days 00:00:00.130887800 +820 67 0 days 00:00:00.133041693 +820 68 0 days 00:00:00.124803440 +820 69 0 days 00:00:00.117925352 +820 70 0 days 00:00:00.108965580 +820 71 0 days 00:00:00.161758400 +820 72 0 days 00:00:00.124222750 +820 73 0 days 00:00:00.157463590 +820 74 0 days 00:00:00.141647985 +820 75 0 days 00:00:00.161898205 +820 76 0 days 00:00:00.128922295 +820 77 0 days 00:00:00.155898160 +820 78 0 days 00:00:00.131791660 +820 79 0 days 00:00:00.156467875 +820 80 0 days 00:00:00.145171353 +820 81 0 days 00:00:00.116102140 +820 82 0 days 00:00:00.117575597 +820 83 0 days 00:00:00.185189053 +820 84 0 days 00:00:00.135157924 +820 85 0 days 00:00:00.130927732 +820 86 0 days 00:00:00.116106810 +820 87 0 days 00:00:00.110172333 +820 88 0 days 00:00:00.113192460 +820 89 0 days 00:00:00.143909212 +820 90 0 days 00:00:00.114503744 +820 91 0 days 00:00:00.162684392 +820 92 0 days 00:00:00.133177062 +820 93 0 days 00:00:00.102978255 +820 94 0 days 00:00:00.097126466 +820 95 0 days 00:00:00.121066610 +820 96 0 days 00:00:00.168322276 +820 97 0 days 00:00:00.091922160 +820 98 0 days 00:00:00.119167505 +820 99 0 days 00:00:00.101893850 +820 100 0 days 00:00:00.100094485 +821 1 0 days 00:00:00.092618713 +821 2 0 days 00:00:00.056903150 +821 3 0 days 00:00:00.062778795 +821 4 0 days 00:00:00.064137394 +821 5 0 days 00:00:00.065213720 +821 6 0 days 00:00:00.058695430 +821 7 0 days 00:00:00.078064687 +821 8 0 days 00:00:00.063517584 +821 9 0 days 00:00:00.087959575 +821 10 0 days 00:00:00.093078636 +821 11 0 days 00:00:00.091326288 +821 12 0 days 00:00:00.074667168 +821 13 0 days 00:00:00.094461820 +821 14 0 days 00:00:00.096048231 +821 15 0 days 00:00:00.060483364 +821 16 0 days 00:00:00.065937420 +821 17 0 days 00:00:00.085135670 +821 18 0 days 00:00:00.059376325 +821 19 0 days 00:00:00.084424130 +821 20 0 days 00:00:00.066400046 +821 21 0 days 00:00:00.059172105 +821 22 0 days 00:00:00.069218025 +821 23 0 days 00:00:00.065422660 +821 24 0 days 00:00:00.076078557 +821 25 0 days 00:00:00.064520095 +821 26 0 days 00:00:00.088180510 +821 27 0 days 00:00:00.092078920 +821 28 0 days 00:00:00.093376123 +821 29 0 days 00:00:00.096229854 +821 30 0 days 00:00:00.054720173 +821 31 0 days 00:00:00.089770416 +821 32 0 days 00:00:00.070568850 +821 33 0 days 00:00:00.091647316 +821 34 0 days 00:00:00.052809746 +821 35 0 days 00:00:00.067391976 +821 36 0 days 00:00:00.076439598 +821 37 0 days 00:00:00.061349960 +821 38 0 days 00:00:00.077667305 +821 39 0 days 00:00:00.059041488 +821 40 0 days 00:00:00.060648766 +821 41 0 days 00:00:00.055866840 +821 42 0 days 00:00:00.068788010 +821 43 0 days 00:00:00.080947060 +821 44 0 days 00:00:00.062621305 +821 45 0 days 00:00:00.061579524 +821 46 0 days 00:00:00.064940877 +821 47 0 days 00:00:00.059997708 +821 48 0 days 00:00:00.065350955 +821 49 0 days 00:00:00.091526110 +821 50 0 days 00:00:00.088317904 +821 51 0 days 00:00:00.065749607 +821 52 0 days 00:00:00.059685852 +821 53 0 days 00:00:00.065502750 +821 54 0 days 00:00:00.064511745 +821 55 0 days 00:00:00.074460865 +821 56 0 days 00:00:00.078087370 +821 57 0 days 00:00:00.087274944 +821 58 0 days 00:00:00.095054700 +821 59 0 days 00:00:00.082188430 +821 60 0 days 00:00:00.062579060 +821 61 0 days 00:00:00.071850910 +821 62 0 days 00:00:00.073307397 +821 63 0 days 00:00:00.066520254 +821 64 0 days 00:00:00.067215950 +821 65 0 days 00:00:00.062406293 +821 66 0 days 00:00:00.072565224 +821 67 0 days 00:00:00.064362300 +821 68 0 days 00:00:00.057199700 +821 69 0 days 00:00:00.067070940 +821 70 0 days 00:00:00.064579905 +821 71 0 days 00:00:00.059461973 +821 72 0 days 00:00:00.056863045 +821 73 0 days 00:00:00.075070820 +821 74 0 days 00:00:00.071536926 +821 75 0 days 00:00:00.064011400 +821 76 0 days 00:00:00.084761290 +821 77 0 days 00:00:00.087420132 +821 78 0 days 00:00:00.062669088 +821 79 0 days 00:00:00.056288580 +821 80 0 days 00:00:00.057510285 +821 81 0 days 00:00:00.087916120 +821 82 0 days 00:00:00.088131353 +821 83 0 days 00:00:00.063487402 +821 84 0 days 00:00:00.072399951 +821 85 0 days 00:00:00.077316224 +821 86 0 days 00:00:00.066434388 +821 87 0 days 00:00:00.055889210 +821 88 0 days 00:00:00.052334286 +821 89 0 days 00:00:00.062874802 +821 90 0 days 00:00:00.052009093 +821 91 0 days 00:00:00.077001550 +821 92 0 days 00:00:00.058966490 +821 93 0 days 00:00:00.064745660 +821 94 0 days 00:00:00.059332072 +821 95 0 days 00:00:00.066772916 +821 96 0 days 00:00:00.056376880 +821 97 0 days 00:00:00.073490270 +821 98 0 days 00:00:00.091008804 +821 99 0 days 00:00:00.060693712 +821 100 0 days 00:00:00.061680240 +822 1 0 days 00:00:00.062755668 +822 2 0 days 00:00:00.066792186 +822 3 0 days 00:00:00.077643065 +822 4 0 days 00:00:00.066598938 +822 5 0 days 00:00:00.103199763 +822 6 0 days 00:00:00.077047065 +822 7 0 days 00:00:00.081528653 +822 8 0 days 00:00:00.066327868 +822 9 0 days 00:00:00.098199140 +822 10 0 days 00:00:00.069516940 +822 11 0 days 00:00:00.073513684 +822 12 0 days 00:00:00.071599080 +822 13 0 days 00:00:00.073656635 +822 14 0 days 00:00:00.059014575 +822 15 0 days 00:00:00.095353423 +822 16 0 days 00:00:00.060468815 +822 17 0 days 00:00:00.059417200 +822 18 0 days 00:00:00.103857853 +822 19 0 days 00:00:00.057691680 +822 20 0 days 00:00:00.096723702 +822 21 0 days 00:00:00.064612514 +822 22 0 days 00:00:00.094713940 +822 23 0 days 00:00:00.064320476 +822 24 0 days 00:00:00.100205750 +822 25 0 days 00:00:00.062314744 +822 26 0 days 00:00:00.053837300 +822 27 0 days 00:00:00.085931860 +822 28 0 days 00:00:00.076173022 +822 29 0 days 00:00:00.060815552 +822 30 0 days 00:00:00.078449200 +822 31 0 days 00:00:00.060515624 +822 32 0 days 00:00:00.073331905 +822 33 0 days 00:00:00.091216900 +822 34 0 days 00:00:00.077721156 +822 35 0 days 00:00:00.065792320 +822 36 0 days 00:00:00.088005735 +822 37 0 days 00:00:00.088397075 +822 38 0 days 00:00:00.074239023 +822 39 0 days 00:00:00.072685215 +822 40 0 days 00:00:00.095404051 +822 41 0 days 00:00:00.066706204 +822 42 0 days 00:00:00.062180060 +822 43 0 days 00:00:00.091362260 +822 44 0 days 00:00:00.057372430 +822 45 0 days 00:00:00.074927577 +822 46 0 days 00:00:00.066027405 +822 47 0 days 00:00:00.089323528 +822 48 0 days 00:00:00.062660644 +822 49 0 days 00:00:00.065760730 +822 50 0 days 00:00:00.061988466 +822 51 0 days 00:00:00.069536828 +822 52 0 days 00:00:00.091496512 +822 53 0 days 00:00:00.096684012 +822 54 0 days 00:00:00.052711806 +822 55 0 days 00:00:00.090686892 +822 56 0 days 00:00:00.072450240 +822 57 0 days 00:00:00.100264442 +822 58 0 days 00:00:00.067483720 +822 59 0 days 00:00:00.069334404 +822 60 0 days 00:00:00.065464065 +822 61 0 days 00:00:00.073560010 +822 62 0 days 00:00:00.095731805 +822 63 0 days 00:00:00.069968298 +822 64 0 days 00:00:00.095258883 +822 65 0 days 00:00:00.060298675 +822 66 0 days 00:00:00.077528056 +822 67 0 days 00:00:00.058305415 +822 68 0 days 00:00:00.077742726 +822 69 0 days 00:00:00.093000475 +822 70 0 days 00:00:00.062511500 +822 71 0 days 00:00:00.092754150 +822 72 0 days 00:00:00.069826208 +822 73 0 days 00:00:00.093454896 +822 74 0 days 00:00:00.076116982 +822 75 0 days 00:00:00.070400472 +822 76 0 days 00:00:00.088170470 +822 77 0 days 00:00:00.066365245 +822 78 0 days 00:00:00.095229643 +822 79 0 days 00:00:00.097555490 +822 80 0 days 00:00:00.094450012 +822 81 0 days 00:00:00.064899440 +822 82 0 days 00:00:00.069702176 +822 83 0 days 00:00:00.074138020 +822 84 0 days 00:00:00.066843900 +822 85 0 days 00:00:00.078065102 +822 86 0 days 00:00:00.061196672 +822 87 0 days 00:00:00.093272584 +822 88 0 days 00:00:00.051331860 +822 89 0 days 00:00:00.063367073 +822 90 0 days 00:00:00.070818468 +822 91 0 days 00:00:00.066413840 +822 92 0 days 00:00:00.071792990 +822 93 0 days 00:00:00.099187448 +822 94 0 days 00:00:00.060778348 +822 95 0 days 00:00:00.060110313 +822 96 0 days 00:00:00.066198675 +822 97 0 days 00:00:00.064778666 +822 98 0 days 00:00:00.060759108 +822 99 0 days 00:00:00.060380436 +822 100 0 days 00:00:00.066684903 +823 1 0 days 00:00:00.130535432 +823 2 0 days 00:00:00.134849418 +823 3 0 days 00:00:00.167571870 +823 4 0 days 00:00:00.167136154 +823 5 0 days 00:00:00.114487517 +823 6 0 days 00:00:00.143258781 +823 7 0 days 00:00:00.130630824 +823 8 0 days 00:00:00.122586780 +823 9 0 days 00:00:00.168550476 +823 10 0 days 00:00:00.142197794 +823 11 0 days 00:00:00.176369706 +823 12 0 days 00:00:00.116520003 +823 13 0 days 00:00:00.173018450 +823 14 0 days 00:00:00.120242437 +823 15 0 days 00:00:00.160594416 +823 16 0 days 00:00:00.129098160 +823 17 0 days 00:00:00.121474440 +823 18 0 days 00:00:00.167033072 +823 19 0 days 00:00:00.167907850 +823 20 0 days 00:00:00.164433440 +823 21 0 days 00:00:00.118741424 +823 22 0 days 00:00:00.124961355 +823 23 0 days 00:00:00.135182833 +823 24 0 days 00:00:00.115352808 +823 25 0 days 00:00:00.165382796 +823 26 0 days 00:00:00.122042162 +823 27 0 days 00:00:00.187047635 +823 28 0 days 00:00:00.114876136 +823 29 0 days 00:00:00.114049716 +823 30 0 days 00:00:00.158758110 +823 31 0 days 00:00:00.142970514 +823 32 0 days 00:00:00.138745871 +823 33 0 days 00:00:00.182692860 +823 34 0 days 00:00:00.167340636 +823 35 0 days 00:00:00.125166375 +823 36 0 days 00:00:00.181542902 +823 37 0 days 00:00:00.117227816 +823 38 0 days 00:00:00.181491491 +823 39 0 days 00:00:00.185282095 +823 40 0 days 00:00:00.140766191 +823 41 0 days 00:00:00.120012471 +823 42 0 days 00:00:00.137263950 +823 43 0 days 00:00:00.142014440 +823 44 0 days 00:00:00.116108088 +823 45 0 days 00:00:00.138390503 +823 46 0 days 00:00:00.175497740 +823 47 0 days 00:00:00.144607080 +823 48 0 days 00:00:00.123747592 +823 49 0 days 00:00:00.121893174 +823 50 0 days 00:00:00.114323832 +823 51 0 days 00:00:00.138340786 +823 52 0 days 00:00:00.174539870 +823 53 0 days 00:00:00.124690465 +823 54 0 days 00:00:00.114996016 +823 55 0 days 00:00:00.147669943 +823 56 0 days 00:00:00.123492016 +823 57 0 days 00:00:00.132889150 +823 58 0 days 00:00:00.139364796 +823 59 0 days 00:00:00.150376182 +823 60 0 days 00:00:00.112469844 +823 61 0 days 00:00:00.137428423 +823 62 0 days 00:00:00.111730886 +823 63 0 days 00:00:00.158756375 +823 64 0 days 00:00:00.120120673 +823 65 0 days 00:00:00.127871913 +823 66 0 days 00:00:00.164594320 +823 67 0 days 00:00:00.127993125 +823 68 0 days 00:00:00.129458560 +823 69 0 days 00:00:00.139073374 +823 70 0 days 00:00:00.136509720 +823 71 0 days 00:00:00.127422300 +823 72 0 days 00:00:00.137917173 +823 73 0 days 00:00:00.122848467 +823 74 0 days 00:00:00.172541346 +823 75 0 days 00:00:00.167154648 +823 76 0 days 00:00:00.177904025 +823 77 0 days 00:00:00.179350494 +823 78 0 days 00:00:00.168296160 +823 79 0 days 00:00:00.182673412 +823 80 0 days 00:00:00.137380270 +823 81 0 days 00:00:00.167583548 +823 82 0 days 00:00:00.141577780 +823 83 0 days 00:00:00.167770100 +823 84 0 days 00:00:00.122992330 +823 85 0 days 00:00:00.127834625 +823 86 0 days 00:00:00.177721240 +823 87 0 days 00:00:00.177040136 +823 88 0 days 00:00:00.123276651 +823 89 0 days 00:00:00.127242432 +823 90 0 days 00:00:00.137281577 +823 91 0 days 00:00:00.169579922 +823 92 0 days 00:00:00.161884132 +823 93 0 days 00:00:00.164820304 +823 94 0 days 00:00:00.123137211 +823 95 0 days 00:00:00.129938623 +823 96 0 days 00:00:00.122753548 +823 97 0 days 00:00:00.118812740 +823 98 0 days 00:00:00.143487309 +823 99 0 days 00:00:00.137267092 +823 100 0 days 00:00:00.135983751 +824 1 0 days 00:00:00.096963502 +824 2 0 days 00:00:00.076795272 +824 3 0 days 00:00:00.067512191 +824 4 0 days 00:00:00.097762234 +824 5 0 days 00:00:00.065462584 +824 6 0 days 00:00:00.086995965 +824 7 0 days 00:00:00.088040720 +824 8 0 days 00:00:00.093756710 +824 9 0 days 00:00:00.082182846 +824 10 0 days 00:00:00.098487772 +824 11 0 days 00:00:00.064398040 +824 12 0 days 00:00:00.078614307 +824 13 0 days 00:00:00.069202104 +824 14 0 days 00:00:00.088499560 +824 15 0 days 00:00:00.062720350 +824 16 0 days 00:00:00.096925577 +824 17 0 days 00:00:00.097230556 +824 18 0 days 00:00:00.074334920 +824 19 0 days 00:00:00.092356400 +824 20 0 days 00:00:00.094284883 +824 21 0 days 00:00:00.068927040 +824 22 0 days 00:00:00.072871522 +824 23 0 days 00:00:00.070335310 +824 24 0 days 00:00:00.090964446 +824 25 0 days 00:00:00.073475220 +824 26 0 days 00:00:00.080722261 +824 27 0 days 00:00:00.095599642 +824 28 0 days 00:00:00.073822770 +824 29 0 days 00:00:00.075597876 +824 30 0 days 00:00:00.092558762 +824 31 0 days 00:00:00.095073704 +824 32 0 days 00:00:00.076919088 +824 33 0 days 00:00:00.080615527 +824 34 0 days 00:00:00.084350804 +824 35 0 days 00:00:00.069422620 +824 36 0 days 00:00:00.091776372 +824 37 0 days 00:00:00.061310006 +824 38 0 days 00:00:00.078543940 +824 39 0 days 00:00:00.081502445 +824 40 0 days 00:00:00.061881170 +824 41 0 days 00:00:00.067724540 +824 42 0 days 00:00:00.066182606 +824 43 0 days 00:00:00.064777590 +824 44 0 days 00:00:00.092873357 +824 45 0 days 00:00:00.094361854 +824 46 0 days 00:00:00.070771325 +824 47 0 days 00:00:00.065470112 +824 48 0 days 00:00:00.096235845 +824 49 0 days 00:00:00.091257280 +824 50 0 days 00:00:00.077933415 +824 51 0 days 00:00:00.067726982 +824 52 0 days 00:00:00.071282081 +824 53 0 days 00:00:00.093305593 +824 54 0 days 00:00:00.062755237 +824 55 0 days 00:00:00.067499000 +824 56 0 days 00:00:00.063303015 +824 57 0 days 00:00:00.062192031 +824 58 0 days 00:00:00.092808793 +824 59 0 days 00:00:00.062013271 +824 60 0 days 00:00:00.074622045 +824 61 0 days 00:00:00.087459235 +824 62 0 days 00:00:00.085219276 +824 63 0 days 00:00:00.086956520 +824 64 0 days 00:00:00.074536481 +824 65 0 days 00:00:00.063184352 +824 66 0 days 00:00:00.073729588 +824 67 0 days 00:00:00.063846071 +824 68 0 days 00:00:00.091130365 +824 69 0 days 00:00:00.087002896 +824 70 0 days 00:00:00.081190390 +824 71 0 days 00:00:00.067567826 +824 72 0 days 00:00:00.068413424 +824 73 0 days 00:00:00.092908866 +824 74 0 days 00:00:00.064622234 +824 75 0 days 00:00:00.063534067 +824 76 0 days 00:00:00.060293390 +824 77 0 days 00:00:00.095275628 +824 78 0 days 00:00:00.063599633 +824 79 0 days 00:00:00.071408831 +824 80 0 days 00:00:00.075326964 +824 81 0 days 00:00:00.067039008 +824 82 0 days 00:00:00.090979857 +824 83 0 days 00:00:00.063399948 +824 84 0 days 00:00:00.092926864 +824 85 0 days 00:00:00.079001842 +824 86 0 days 00:00:00.092333097 +824 87 0 days 00:00:00.059960992 +824 88 0 days 00:00:00.068188080 +824 89 0 days 00:00:00.095352664 +824 90 0 days 00:00:00.086163436 +824 91 0 days 00:00:00.066335255 +824 92 0 days 00:00:00.070023538 +824 93 0 days 00:00:00.073900960 +824 94 0 days 00:00:00.059854300 +824 95 0 days 00:00:00.062956771 +824 96 0 days 00:00:00.091379347 +824 97 0 days 00:00:00.067258060 +824 98 0 days 00:00:00.062690885 +824 99 0 days 00:00:00.061180836 +824 100 0 days 00:00:00.074808574 +825 1 0 days 00:00:00.279878735 +825 2 0 days 00:00:00.394694490 +825 3 0 days 00:00:00.461912962 +825 4 0 days 00:00:00.620213193 +825 5 0 days 00:00:00.721635685 +825 6 0 days 00:00:00.781161695 +825 7 0 days 00:00:00.452456920 +825 8 0 days 00:00:00.744029875 +825 9 0 days 00:00:00.772557120 +825 10 0 days 00:00:00.443791620 +825 11 0 days 00:00:00.368368513 +825 12 0 days 00:00:00.243079440 +825 13 0 days 00:00:00.428190736 +825 14 0 days 00:00:00.518950525 +825 15 0 days 00:00:00.787616124 +825 16 0 days 00:00:00.273093860 +825 17 0 days 00:00:00.738086130 +825 18 0 days 00:00:00.239471680 +825 19 0 days 00:00:00.438444510 +825 20 0 days 00:00:00.262348564 +825 21 0 days 00:00:00.201680953 +825 22 0 days 00:00:00.736394015 +825 23 0 days 00:00:00.766565256 +825 24 0 days 00:00:00.281723765 +825 25 0 days 00:00:00.264054953 +825 26 0 days 00:00:00.447641073 +825 27 0 days 00:00:00.858206600 +825 28 0 days 00:00:00.665735986 +825 29 0 days 00:00:00.872124440 +825 30 0 days 00:00:00.483420345 +825 31 0 days 00:00:00.435278408 +825 32 0 days 00:00:00.444679816 +825 33 0 days 00:00:00.800446320 +825 34 0 days 00:00:00.264264046 +825 35 0 days 00:00:00.833959711 +825 36 0 days 00:00:00.232163246 +825 37 0 days 00:00:00.272117877 +825 38 0 days 00:00:00.820486526 +825 39 0 days 00:00:00.251548344 +825 40 0 days 00:00:00.266088940 +825 41 0 days 00:00:00.456844048 +825 42 0 days 00:00:00.442417900 +825 43 0 days 00:00:00.248812596 +825 44 0 days 00:00:00.552934100 +825 45 0 days 00:00:00.808226780 +825 46 0 days 00:00:00.433407096 +825 47 0 days 00:00:00.800141015 +825 48 0 days 00:00:00.255530650 +825 49 0 days 00:00:00.278250605 +825 50 0 days 00:00:00.798709348 +825 51 0 days 00:00:00.243511565 +825 52 0 days 00:00:00.425986608 +825 53 0 days 00:00:00.839583277 +825 54 0 days 00:00:00.300165556 +825 55 0 days 00:00:00.419468095 +825 56 0 days 00:00:00.507154870 +825 57 0 days 00:00:00.262617724 +825 58 0 days 00:00:00.458263036 +825 59 0 days 00:00:00.234178220 +825 60 0 days 00:00:00.468098791 +825 61 0 days 00:00:00.637587300 +825 62 0 days 00:00:00.433338888 +825 63 0 days 00:00:00.392730485 +825 64 0 days 00:00:00.459008986 +825 65 0 days 00:00:00.429883625 +825 66 0 days 00:00:00.737758868 +825 67 0 days 00:00:00.423996836 +825 68 0 days 00:00:00.625686326 +825 69 0 days 00:00:00.255592716 +825 70 0 days 00:00:00.282702908 +825 71 0 days 00:00:00.253105653 +825 72 0 days 00:00:00.787944806 +825 73 0 days 00:00:00.223960340 +825 74 0 days 00:00:00.860764796 +825 75 0 days 00:00:00.434416888 +825 76 0 days 00:00:00.821325693 +825 77 0 days 00:00:00.397837075 +825 78 0 days 00:00:00.811750576 +825 79 0 days 00:00:00.440288850 +825 80 0 days 00:00:00.271699220 +825 81 0 days 00:00:00.280969160 +825 82 0 days 00:00:00.402431450 +825 83 0 days 00:00:00.428909668 +825 84 0 days 00:00:00.502732420 +825 85 0 days 00:00:00.279853455 +825 86 0 days 00:00:00.830003540 +825 87 0 days 00:00:00.251501415 +825 88 0 days 00:00:00.271700111 +825 89 0 days 00:00:00.409330450 +825 90 0 days 00:00:00.456251304 +825 91 0 days 00:00:00.257032330 +825 92 0 days 00:00:00.196113803 +825 93 0 days 00:00:00.761000915 +825 94 0 days 00:00:00.452014650 +825 95 0 days 00:00:00.778486532 +825 96 0 days 00:00:00.797918740 +825 97 0 days 00:00:00.669811840 +825 98 0 days 00:00:00.439762760 +825 99 0 days 00:00:00.732025515 +825 100 0 days 00:00:00.823872608 +826 1 0 days 00:00:00.838209836 +826 2 0 days 00:00:00.853907784 +826 3 0 days 00:00:00.261128633 +826 4 0 days 00:00:00.760060420 +826 5 0 days 00:00:00.905929280 +826 6 0 days 00:00:00.263821690 +826 7 0 days 00:00:00.267027492 +826 8 0 days 00:00:00.780618240 +826 9 0 days 00:00:00.427940095 +826 10 0 days 00:00:00.831237733 +826 11 0 days 00:00:00.880029400 +826 12 0 days 00:00:00.470897323 +826 13 0 days 00:00:00.458950076 +826 14 0 days 00:00:00.478258600 +826 15 0 days 00:00:00.432526915 +826 16 0 days 00:00:00.279481326 +826 17 0 days 00:00:00.871546360 +826 18 0 days 00:00:00.427103170 +826 19 0 days 00:00:00.843348810 +826 20 0 days 00:00:00.241090235 +826 21 0 days 00:00:00.923292272 +826 22 0 days 00:00:00.682612480 +826 23 0 days 00:00:00.817391784 +826 24 0 days 00:00:00.277980275 +826 25 0 days 00:00:00.313387334 +826 26 0 days 00:00:00.417015890 +826 27 0 days 00:00:00.370496860 +826 28 0 days 00:00:00.785443395 +826 29 0 days 00:00:00.458963943 +826 30 0 days 00:00:00.752559335 +826 31 0 days 00:00:00.741915830 +826 32 0 days 00:00:00.455519826 +826 33 0 days 00:00:00.849056908 +826 34 0 days 00:00:00.407435315 +826 35 0 days 00:00:00.454504640 +826 36 0 days 00:00:00.259779370 +826 37 0 days 00:00:00.266389436 +826 38 0 days 00:00:00.850198613 +826 39 0 days 00:00:00.269745713 +826 40 0 days 00:00:00.387469140 +826 41 0 days 00:00:00.286810706 +826 42 0 days 00:00:00.414054590 +826 43 0 days 00:00:00.804348344 +826 44 0 days 00:00:00.677514086 +826 45 0 days 00:00:00.416439840 +826 46 0 days 00:00:00.668315606 +826 47 0 days 00:00:00.472063588 +826 48 0 days 00:00:00.711675166 +826 49 0 days 00:00:00.448045380 +826 50 0 days 00:00:00.446065144 +826 51 0 days 00:00:00.274907564 +826 52 0 days 00:00:00.465340802 +826 53 0 days 00:00:00.441288124 +826 54 0 days 00:00:00.314511688 +826 55 0 days 00:00:00.458889753 +826 56 0 days 00:00:00.775654535 +826 57 0 days 00:00:00.460287423 +826 58 0 days 00:00:00.277087624 +826 59 0 days 00:00:00.792424245 +826 60 0 days 00:00:00.468914963 +826 61 0 days 00:00:00.281073520 +826 62 0 days 00:00:00.460573460 +826 63 0 days 00:00:00.267028710 +826 64 0 days 00:00:00.274161544 +826 65 0 days 00:00:00.445944640 +826 66 0 days 00:00:00.367752440 +826 67 0 days 00:00:00.295448110 +826 68 0 days 00:00:00.489082411 +826 69 0 days 00:00:00.287068092 +826 70 0 days 00:00:00.454746535 +826 71 0 days 00:00:00.271419713 +826 72 0 days 00:00:00.277069470 +826 73 0 days 00:00:00.456063344 +826 74 0 days 00:00:00.275991753 +826 75 0 days 00:00:00.456578080 +826 76 0 days 00:00:00.276902433 +826 77 0 days 00:00:00.253708595 +826 78 0 days 00:00:00.257095500 +826 79 0 days 00:00:00.276043288 +826 80 0 days 00:00:00.287560684 +826 81 0 days 00:00:00.439611940 +826 82 0 days 00:00:00.315435940 +826 83 0 days 00:00:00.272884806 +826 84 0 days 00:00:00.880192483 +826 85 0 days 00:00:00.276959240 +826 86 0 days 00:00:00.443932056 +826 87 0 days 00:00:00.790335795 +826 88 0 days 00:00:00.453065204 +826 89 0 days 00:00:00.720317966 +826 90 0 days 00:00:00.772855020 +826 91 0 days 00:00:00.698597826 +826 92 0 days 00:00:00.781650230 +826 93 0 days 00:00:00.455008310 +826 94 0 days 00:00:00.766942200 +826 95 0 days 00:00:00.274173530 +826 96 0 days 00:00:00.482445813 +826 97 0 days 00:00:00.862983672 +826 98 0 days 00:00:00.825491484 +826 99 0 days 00:00:00.465080436 +826 100 0 days 00:00:00.467417064 +827 1 0 days 00:00:00.214234950 +827 2 0 days 00:00:00.213247620 +827 3 0 days 00:00:00.382660885 +827 4 0 days 00:00:00.223233912 +827 5 0 days 00:00:00.394355916 +827 6 0 days 00:00:00.384238910 +827 7 0 days 00:00:00.192012793 +827 8 0 days 00:00:00.245657585 +827 9 0 days 00:00:00.221883765 +827 10 0 days 00:00:00.405637888 +827 11 0 days 00:00:00.166705313 +827 12 0 days 00:00:00.122040210 +827 13 0 days 00:00:00.128995115 +827 14 0 days 00:00:00.219510160 +827 15 0 days 00:00:00.396237400 +827 16 0 days 00:00:00.329620460 +827 17 0 days 00:00:00.210510315 +827 18 0 days 00:00:00.226054933 +827 19 0 days 00:00:00.433970017 +827 20 0 days 00:00:00.379566395 +827 21 0 days 00:00:00.420376022 +827 22 0 days 00:00:00.137459870 +827 23 0 days 00:00:00.239776660 +827 24 0 days 00:00:00.225068616 +827 25 0 days 00:00:00.386596292 +827 26 0 days 00:00:00.215406172 +827 27 0 days 00:00:00.205170255 +827 28 0 days 00:00:00.238950465 +827 29 0 days 00:00:00.140906240 +827 30 0 days 00:00:00.190473753 +827 31 0 days 00:00:00.401611230 +827 32 0 days 00:00:00.344737253 +827 33 0 days 00:00:00.415620612 +827 34 0 days 00:00:00.237400202 +827 35 0 days 00:00:00.138995226 +827 36 0 days 00:00:00.226677836 +827 37 0 days 00:00:00.431218010 +827 38 0 days 00:00:00.430673176 +827 39 0 days 00:00:00.138774832 +827 40 0 days 00:00:00.235030904 +827 41 0 days 00:00:00.216023240 +827 42 0 days 00:00:00.212523010 +827 43 0 days 00:00:00.151058844 +827 44 0 days 00:00:00.254555128 +827 45 0 days 00:00:00.118248160 +827 46 0 days 00:00:00.376542805 +827 47 0 days 00:00:00.226009480 +827 48 0 days 00:00:00.247742585 +827 49 0 days 00:00:00.398032435 +827 50 0 days 00:00:00.253178431 +827 51 0 days 00:00:00.453722846 +827 52 0 days 00:00:00.209760075 +827 53 0 days 00:00:00.257863500 +827 54 0 days 00:00:00.120533705 +827 55 0 days 00:00:00.217123460 +827 56 0 days 00:00:00.422203164 +827 57 0 days 00:00:00.132225604 +827 58 0 days 00:00:00.423732200 +827 59 0 days 00:00:00.433410000 +827 60 0 days 00:00:00.392349380 +827 61 0 days 00:00:00.126552870 +827 62 0 days 00:00:00.124519980 +827 63 0 days 00:00:00.214280900 +827 64 0 days 00:00:00.157183553 +827 65 0 days 00:00:00.131744285 +827 66 0 days 00:00:00.227846340 +827 67 0 days 00:00:00.118115260 +827 68 0 days 00:00:00.137465677 +827 69 0 days 00:00:00.132038676 +827 70 0 days 00:00:00.244082073 +827 71 0 days 00:00:00.398986045 +827 72 0 days 00:00:00.199475580 +827 73 0 days 00:00:00.419361612 +827 74 0 days 00:00:00.127860085 +827 75 0 days 00:00:00.324707140 +827 76 0 days 00:00:00.099070543 +827 77 0 days 00:00:00.259308260 +827 78 0 days 00:00:00.167292760 +827 79 0 days 00:00:00.290553840 +827 80 0 days 00:00:00.107347593 +827 81 0 days 00:00:00.321621856 +827 82 0 days 00:00:00.293565095 +827 83 0 days 00:00:00.179678015 +827 84 0 days 00:00:00.179281971 +827 85 0 days 00:00:00.180244966 +827 86 0 days 00:00:00.105870863 +827 87 0 days 00:00:00.294706020 +827 88 0 days 00:00:00.286168455 +827 89 0 days 00:00:00.312110344 +827 90 0 days 00:00:00.104915433 +827 91 0 days 00:00:00.146219613 +827 92 0 days 00:00:00.171614723 +827 93 0 days 00:00:00.130818728 +827 94 0 days 00:00:00.103602484 +827 95 0 days 00:00:00.168575100 +827 96 0 days 00:00:00.113942632 +827 97 0 days 00:00:00.083078210 +827 98 0 days 00:00:00.089027735 +827 99 0 days 00:00:00.261664366 +827 100 0 days 00:00:00.183039063 +828 1 0 days 00:00:00.348479210 +828 2 0 days 00:00:00.224473960 +828 3 0 days 00:00:00.162885700 +828 4 0 days 00:00:00.124959922 +828 5 0 days 00:00:00.323851064 +828 6 0 days 00:00:00.102440268 +828 7 0 days 00:00:00.346246273 +828 8 0 days 00:00:00.100752585 +828 9 0 days 00:00:00.103467430 +828 10 0 days 00:00:00.112534296 +828 11 0 days 00:00:00.156914436 +828 12 0 days 00:00:00.167814515 +828 13 0 days 00:00:00.161785875 +828 14 0 days 00:00:00.294593488 +828 15 0 days 00:00:00.092433016 +828 16 0 days 00:00:00.264507200 +828 17 0 days 00:00:00.164287060 +828 18 0 days 00:00:00.161105840 +828 19 0 days 00:00:00.105818340 +828 20 0 days 00:00:00.158928875 +828 21 0 days 00:00:00.171559585 +828 22 0 days 00:00:00.332974452 +828 23 0 days 00:00:00.127672008 +828 24 0 days 00:00:00.188372171 +828 25 0 days 00:00:00.366763593 +828 26 0 days 00:00:00.092690265 +828 27 0 days 00:00:00.095889730 +828 28 0 days 00:00:00.277747453 +828 29 0 days 00:00:00.178364125 +828 30 0 days 00:00:00.086711715 +828 31 0 days 00:00:00.168738800 +828 32 0 days 00:00:00.099956073 +828 33 0 days 00:00:00.107048801 +828 34 0 days 00:00:00.096129052 +828 35 0 days 00:00:00.170129704 +828 36 0 days 00:00:00.169007132 +828 37 0 days 00:00:00.314687650 +828 38 0 days 00:00:00.102071490 +828 39 0 days 00:00:00.100323300 +828 40 0 days 00:00:00.319082952 +828 41 0 days 00:00:00.193793030 +828 42 0 days 00:00:00.347937180 +828 43 0 days 00:00:00.327674452 +828 44 0 days 00:00:00.311483736 +828 45 0 days 00:00:00.095227540 +828 46 0 days 00:00:00.168191844 +828 47 0 days 00:00:00.095903170 +828 48 0 days 00:00:00.338071048 +828 49 0 days 00:00:00.265675480 +828 50 0 days 00:00:00.094524595 +828 51 0 days 00:00:00.093182235 +828 52 0 days 00:00:00.100417500 +828 53 0 days 00:00:00.112242892 +828 54 0 days 00:00:00.100860090 +828 55 0 days 00:00:00.101461010 +828 56 0 days 00:00:00.327809843 +828 57 0 days 00:00:00.245785886 +828 58 0 days 00:00:00.137211870 +828 59 0 days 00:00:00.109992851 +828 60 0 days 00:00:00.105122716 +828 61 0 days 00:00:00.280609140 +828 62 0 days 00:00:00.153894280 +828 63 0 days 00:00:00.165047920 +828 64 0 days 00:00:00.260301980 +828 65 0 days 00:00:00.100971200 +828 66 0 days 00:00:00.162077872 +828 67 0 days 00:00:00.170398695 +828 68 0 days 00:00:00.097067540 +828 69 0 days 00:00:00.104097164 +828 70 0 days 00:00:00.151118528 +828 71 0 days 00:00:00.337315306 +828 72 0 days 00:00:00.102511473 +828 73 0 days 00:00:00.095087086 +828 74 0 days 00:00:00.140762213 +828 75 0 days 00:00:00.190631986 +828 76 0 days 00:00:00.174432884 +828 77 0 days 00:00:00.160516785 +828 78 0 days 00:00:00.104848940 +828 79 0 days 00:00:00.153222705 +828 80 0 days 00:00:00.100700900 +828 81 0 days 00:00:00.108742870 +828 82 0 days 00:00:00.194857843 +828 83 0 days 00:00:00.105497775 +828 84 0 days 00:00:00.089945195 +828 85 0 days 00:00:00.325788560 +828 86 0 days 00:00:00.160119955 +828 87 0 days 00:00:00.348169973 +828 88 0 days 00:00:00.177507650 +828 89 0 days 00:00:00.156794273 +828 90 0 days 00:00:00.106321290 +828 91 0 days 00:00:00.123176640 +828 92 0 days 00:00:00.133375786 +828 93 0 days 00:00:00.183417660 +828 94 0 days 00:00:00.122191537 +828 95 0 days 00:00:00.277356973 +828 96 0 days 00:00:00.176706440 +828 97 0 days 00:00:00.300583660 +828 98 0 days 00:00:00.125096980 +828 99 0 days 00:00:00.312370252 +828 100 0 days 00:00:00.106455640 +829 1 0 days 00:00:00.459292640 +829 2 0 days 00:00:00.481891575 +829 3 0 days 00:00:00.840679435 +829 4 0 days 00:00:00.280416470 +829 5 0 days 00:00:00.727048700 +829 6 0 days 00:00:00.489258265 +829 7 0 days 00:00:00.235728690 +829 8 0 days 00:00:00.457605288 +829 9 0 days 00:00:00.444942516 +829 10 0 days 00:00:00.402653635 +829 11 0 days 00:00:00.409071715 +829 12 0 days 00:00:00.531860824 +829 13 0 days 00:00:00.450297240 +829 14 0 days 00:00:00.744841926 +829 15 0 days 00:00:00.518795566 +829 16 0 days 00:00:00.425626860 +829 17 0 days 00:00:00.830824576 +829 18 0 days 00:00:00.440671640 +829 19 0 days 00:00:00.263418570 +829 20 0 days 00:00:00.828798383 +829 21 0 days 00:00:00.273432444 +829 22 0 days 00:00:00.237224364 +829 23 0 days 00:00:00.887348168 +829 24 0 days 00:00:00.283201101 +829 25 0 days 00:00:00.938935866 +829 26 0 days 00:00:00.836094973 +829 27 0 days 00:00:00.388022070 +829 28 0 days 00:00:00.233720745 +829 29 0 days 00:00:00.937240036 +829 30 0 days 00:00:00.437635355 +829 31 0 days 00:00:00.296857270 +829 32 0 days 00:00:00.414082075 +829 33 0 days 00:00:00.302805214 +829 34 0 days 00:00:00.818721324 +829 35 0 days 00:00:00.298989872 +829 36 0 days 00:00:00.287545336 +829 37 0 days 00:00:00.452501785 +829 38 0 days 00:00:00.295583796 +829 39 0 days 00:00:00.411416885 +829 40 0 days 00:00:00.926249108 +829 41 0 days 00:00:00.705980925 +829 42 0 days 00:00:00.291036795 +829 43 0 days 00:00:00.895233064 +829 44 0 days 00:00:00.712057320 +829 45 0 days 00:00:00.929447496 +829 46 0 days 00:00:00.783191700 +829 47 0 days 00:00:00.247631245 +829 48 0 days 00:00:00.851694556 +829 49 0 days 00:00:00.508255226 +829 50 0 days 00:00:00.223329015 +829 51 0 days 00:00:00.272233424 +829 52 0 days 00:00:00.256930075 +829 53 0 days 00:00:00.283837835 +829 54 0 days 00:00:00.254638125 +829 55 0 days 00:00:00.260514265 +829 56 0 days 00:00:00.446829845 +829 57 0 days 00:00:00.444011793 +829 58 0 days 00:00:00.467104130 +829 59 0 days 00:00:00.986778490 +829 60 0 days 00:00:00.965239677 +829 61 0 days 00:00:00.458519234 +829 62 0 days 00:00:00.443744953 +829 63 0 days 00:00:00.834907096 +829 64 0 days 00:00:00.435717728 +829 65 0 days 00:00:00.772306128 +829 66 0 days 00:00:00.263979386 +829 67 0 days 00:00:00.247232300 +829 68 0 days 00:00:00.264184500 +829 69 0 days 00:00:00.452035760 +829 70 0 days 00:00:00.694187860 +829 71 0 days 00:00:00.280946720 +829 72 0 days 00:00:00.863022388 +829 73 0 days 00:00:00.465474290 +829 74 0 days 00:00:00.948564908 +829 75 0 days 00:00:00.520801356 +829 76 0 days 00:00:00.391989356 +829 77 0 days 00:00:00.422490106 +829 78 0 days 00:00:00.466633015 +829 79 0 days 00:00:00.446137660 +829 80 0 days 00:00:00.841528835 +829 81 0 days 00:00:00.788749090 +829 82 0 days 00:00:00.268217016 +829 83 0 days 00:00:00.711638360 +829 84 0 days 00:00:00.845830620 +829 85 0 days 00:00:00.874526188 +829 86 0 days 00:00:00.798149540 +829 87 0 days 00:00:00.459201576 +829 88 0 days 00:00:00.831129660 +829 89 0 days 00:00:00.414884220 +829 90 0 days 00:00:00.774387880 +829 91 0 days 00:00:00.819226730 +829 92 0 days 00:00:00.423684124 +829 93 0 days 00:00:00.262525785 +829 94 0 days 00:00:00.415254092 +829 95 0 days 00:00:00.271225463 +829 96 0 days 00:00:00.303623424 +829 97 0 days 00:00:00.284288364 +829 98 0 days 00:00:00.436448106 +829 99 0 days 00:00:00.861083000 +829 100 0 days 00:00:00.488990631 +830 1 0 days 00:00:00.185403487 +830 2 0 days 00:00:00.484951536 +830 3 0 days 00:00:00.151203948 +830 4 0 days 00:00:00.390619313 +830 5 0 days 00:00:00.262118300 +830 6 0 days 00:00:00.161923084 +830 7 0 days 00:00:00.546555822 +830 8 0 days 00:00:00.275599344 +830 9 0 days 00:00:00.151018088 +830 10 0 days 00:00:00.375895730 +830 11 0 days 00:00:00.514956146 +830 12 0 days 00:00:00.263681546 +830 13 0 days 00:00:00.400600468 +830 14 0 days 00:00:00.141215885 +830 15 0 days 00:00:00.135036010 +830 16 0 days 00:00:00.253681065 +830 17 0 days 00:00:00.427299046 +830 18 0 days 00:00:00.247053990 +830 19 0 days 00:00:00.150435915 +830 20 0 days 00:00:00.158885562 +830 21 0 days 00:00:00.118714395 +830 22 0 days 00:00:00.161674735 +830 23 0 days 00:00:00.160813916 +830 24 0 days 00:00:00.424858993 +830 25 0 days 00:00:00.467307948 +830 26 0 days 00:00:00.153033411 +830 27 0 days 00:00:00.512173385 +830 28 0 days 00:00:00.228023760 +830 29 0 days 00:00:00.240683963 +830 30 0 days 00:00:00.252537496 +830 31 0 days 00:00:00.430245290 +830 32 0 days 00:00:00.471161165 +830 33 0 days 00:00:00.182369343 +830 34 0 days 00:00:00.480567246 +830 35 0 days 00:00:00.445399757 +830 36 0 days 00:00:00.129652935 +830 37 0 days 00:00:00.237312605 +830 38 0 days 00:00:00.142596150 +830 39 0 days 00:00:00.225550705 +830 40 0 days 00:00:00.470437264 +830 41 0 days 00:00:00.256292114 +830 42 0 days 00:00:00.147908475 +830 43 0 days 00:00:00.249231108 +830 44 0 days 00:00:00.209773005 +830 45 0 days 00:00:00.406924585 +830 46 0 days 00:00:00.447935220 +830 47 0 days 00:00:00.242479593 +830 48 0 days 00:00:00.166412352 +830 49 0 days 00:00:00.461008426 +830 50 0 days 00:00:00.253445048 +830 51 0 days 00:00:00.139953985 +830 52 0 days 00:00:00.410040830 +830 53 0 days 00:00:00.440352300 +830 54 0 days 00:00:00.154140765 +830 55 0 days 00:00:00.244935612 +830 56 0 days 00:00:00.158509377 +830 57 0 days 00:00:00.258612380 +830 58 0 days 00:00:00.138194630 +830 59 0 days 00:00:00.172798451 +830 60 0 days 00:00:00.160481820 +830 61 0 days 00:00:00.254862920 +830 62 0 days 00:00:00.147107976 +830 63 0 days 00:00:00.430985680 +830 64 0 days 00:00:00.427666732 +830 65 0 days 00:00:00.460168810 +830 66 0 days 00:00:00.395366906 +830 67 0 days 00:00:00.466004188 +830 68 0 days 00:00:00.247917285 +830 69 0 days 00:00:00.458386920 +830 70 0 days 00:00:00.468056544 +830 71 0 days 00:00:00.154076240 +830 72 0 days 00:00:00.140882286 +830 73 0 days 00:00:00.415020595 +830 74 0 days 00:00:00.283068094 +830 75 0 days 00:00:00.245615300 +830 76 0 days 00:00:00.130430640 +830 77 0 days 00:00:00.216084126 +830 78 0 days 00:00:00.112718605 +830 79 0 days 00:00:00.214697680 +830 80 0 days 00:00:00.443423830 +830 81 0 days 00:00:00.207352506 +830 82 0 days 00:00:00.161812576 +830 83 0 days 00:00:00.485040003 +830 84 0 days 00:00:00.143065156 +830 85 0 days 00:00:00.239707252 +830 86 0 days 00:00:00.151922200 +830 87 0 days 00:00:00.460662268 +830 88 0 days 00:00:00.129782090 +830 89 0 days 00:00:00.233266432 +830 90 0 days 00:00:00.420414820 +830 91 0 days 00:00:00.428259925 +830 92 0 days 00:00:00.139572146 +830 93 0 days 00:00:00.486376080 +830 94 0 days 00:00:00.457100540 +830 95 0 days 00:00:00.246355590 +830 96 0 days 00:00:00.136542112 +830 97 0 days 00:00:00.150230884 +830 98 0 days 00:00:00.432029390 +830 99 0 days 00:00:00.135529950 +830 100 0 days 00:00:00.483171508 +831 1 0 days 00:00:00.281225812 +831 2 0 days 00:00:00.266740688 +831 3 0 days 00:00:00.271611228 +831 4 0 days 00:00:00.461098140 +831 5 0 days 00:00:00.485078327 +831 6 0 days 00:00:00.282800496 +831 7 0 days 00:00:00.752642665 +831 8 0 days 00:00:00.257505970 +831 9 0 days 00:00:00.830624020 +831 10 0 days 00:00:00.577972513 +831 11 0 days 00:00:00.443531856 +831 12 0 days 00:00:00.584250140 +831 13 0 days 00:00:00.439950716 +831 14 0 days 00:00:00.411723415 +831 15 0 days 00:00:00.355677840 +831 16 0 days 00:00:00.258737932 +831 17 0 days 00:00:00.839235694 +831 18 0 days 00:00:00.778181944 +831 19 0 days 00:00:00.769849885 +831 20 0 days 00:00:00.272440447 +831 21 0 days 00:00:00.903382123 +831 22 0 days 00:00:00.259680446 +831 23 0 days 00:00:00.838736508 +831 24 0 days 00:00:00.491704600 +831 25 0 days 00:00:00.423099240 +831 26 0 days 00:00:00.519773080 +831 27 0 days 00:00:00.404685685 +831 28 0 days 00:00:00.437204490 +831 29 0 days 00:00:00.376182120 +831 30 0 days 00:00:00.761445990 +831 31 0 days 00:00:00.289787754 +831 32 0 days 00:00:00.271988457 +831 33 0 days 00:00:00.400732220 +831 34 0 days 00:00:00.829281116 +831 35 0 days 00:00:00.251272176 +831 36 0 days 00:00:00.549276652 +831 37 0 days 00:00:00.278987190 +831 38 0 days 00:00:00.209909066 +831 39 0 days 00:00:00.483101460 +831 40 0 days 00:00:00.825677293 +831 41 0 days 00:00:00.588511630 +831 42 0 days 00:00:00.399599570 +831 43 0 days 00:00:00.219931093 +831 44 0 days 00:00:00.814399448 +831 45 0 days 00:00:00.251001584 +831 46 0 days 00:00:00.263579716 +831 47 0 days 00:00:00.281017477 +831 48 0 days 00:00:00.302648672 +831 49 0 days 00:00:00.765797460 +831 50 0 days 00:00:00.268669973 +831 51 0 days 00:00:00.419243165 +831 52 0 days 00:00:00.272482188 +831 53 0 days 00:00:00.488983746 +831 54 0 days 00:00:00.450800220 +831 55 0 days 00:00:00.222803380 +831 56 0 days 00:00:00.437391936 +831 57 0 days 00:00:00.791702180 +831 58 0 days 00:00:00.751917785 +831 59 0 days 00:00:00.256525066 +831 60 0 days 00:00:00.267663620 +831 61 0 days 00:00:00.681127066 +831 62 0 days 00:00:00.284331110 +831 63 0 days 00:00:00.740820200 +831 64 0 days 00:00:00.805840270 +831 65 0 days 00:00:00.785298624 +831 66 0 days 00:00:00.410328340 +831 67 0 days 00:00:00.788866820 +831 68 0 days 00:00:00.414406525 +831 69 0 days 00:00:00.461384803 +831 70 0 days 00:00:00.278754035 +831 71 0 days 00:00:00.473612796 +831 72 0 days 00:00:00.702139533 +831 73 0 days 00:00:00.243473400 +831 74 0 days 00:00:00.241880664 +831 75 0 days 00:00:00.765262050 +831 76 0 days 00:00:00.494995486 +831 77 0 days 00:00:00.452305856 +831 78 0 days 00:00:00.752833590 +831 79 0 days 00:00:00.763917385 +831 80 0 days 00:00:00.820228456 +831 81 0 days 00:00:00.434419490 +831 82 0 days 00:00:00.768314035 +831 83 0 days 00:00:00.242723020 +831 84 0 days 00:00:00.434046020 +831 85 0 days 00:00:00.250960632 +831 86 0 days 00:00:00.441578496 +831 87 0 days 00:00:00.814455840 +831 88 0 days 00:00:00.671519633 +831 89 0 days 00:00:00.467059114 +831 90 0 days 00:00:00.245585926 +831 91 0 days 00:00:00.253678296 +831 92 0 days 00:00:00.264548354 +831 93 0 days 00:00:00.525348355 +831 94 0 days 00:00:00.742448060 +831 95 0 days 00:00:00.868309428 +831 96 0 days 00:00:00.466234913 +831 97 0 days 00:00:00.459419968 +831 98 0 days 00:00:00.853397346 +831 99 0 days 00:00:00.442720948 +831 100 0 days 00:00:00.245093132 +832 1 0 days 00:00:00.148943657 +832 2 0 days 00:00:00.417870426 +832 3 0 days 00:00:00.433963796 +832 4 0 days 00:00:00.137616500 +832 5 0 days 00:00:00.396026760 +832 6 0 days 00:00:00.210695445 +832 7 0 days 00:00:00.418078833 +832 8 0 days 00:00:00.407618768 +832 9 0 days 00:00:00.116435270 +832 10 0 days 00:00:00.390308035 +832 11 0 days 00:00:00.381123020 +832 12 0 days 00:00:00.218017040 +832 13 0 days 00:00:00.097607140 +832 14 0 days 00:00:00.449079837 +832 15 0 days 00:00:00.388099090 +832 16 0 days 00:00:00.379593675 +832 17 0 days 00:00:00.123190785 +832 18 0 days 00:00:00.117538910 +832 19 0 days 00:00:00.270343305 +832 20 0 days 00:00:00.139953548 +832 21 0 days 00:00:00.193269080 +832 22 0 days 00:00:00.126840870 +832 23 0 days 00:00:00.442859570 +832 24 0 days 00:00:00.126561930 +832 25 0 days 00:00:00.236198762 +832 26 0 days 00:00:00.223839695 +832 27 0 days 00:00:00.222486460 +832 28 0 days 00:00:00.261364153 +832 29 0 days 00:00:00.139614596 +832 30 0 days 00:00:00.425525048 +832 31 0 days 00:00:00.128791325 +832 32 0 days 00:00:00.246625876 +832 33 0 days 00:00:00.136234108 +832 34 0 days 00:00:00.139306110 +832 35 0 days 00:00:00.234051380 +832 36 0 days 00:00:00.232137844 +832 37 0 days 00:00:00.445309731 +832 38 0 days 00:00:00.131439515 +832 39 0 days 00:00:00.441549650 +832 40 0 days 00:00:00.237313840 +832 41 0 days 00:00:00.259365940 +832 42 0 days 00:00:00.138587950 +832 43 0 days 00:00:00.220018550 +832 44 0 days 00:00:00.407841765 +832 45 0 days 00:00:00.233228884 +832 46 0 days 00:00:00.221624825 +832 47 0 days 00:00:00.424804284 +832 48 0 days 00:00:00.242522346 +832 49 0 days 00:00:00.428134968 +832 50 0 days 00:00:00.140673997 +832 51 0 days 00:00:00.416033484 +832 52 0 days 00:00:00.399660755 +832 53 0 days 00:00:00.466055455 +832 54 0 days 00:00:00.440387223 +832 55 0 days 00:00:00.142867030 +832 56 0 days 00:00:00.125806340 +832 57 0 days 00:00:00.143498473 +832 58 0 days 00:00:00.240739483 +832 59 0 days 00:00:00.409366705 +832 60 0 days 00:00:00.252931720 +832 61 0 days 00:00:00.432794088 +832 62 0 days 00:00:00.248355786 +832 63 0 days 00:00:00.234237316 +832 64 0 days 00:00:00.164464854 +832 65 0 days 00:00:00.128618030 +832 66 0 days 00:00:00.439431253 +832 67 0 days 00:00:00.235990720 +832 68 0 days 00:00:00.132402650 +832 69 0 days 00:00:00.410480765 +832 70 0 days 00:00:00.232911684 +832 71 0 days 00:00:00.371922770 +832 72 0 days 00:00:00.138269605 +832 73 0 days 00:00:00.391163590 +832 74 0 days 00:00:00.448130050 +832 75 0 days 00:00:00.150320000 +832 76 0 days 00:00:00.257747204 +832 77 0 days 00:00:00.144748303 +832 78 0 days 00:00:00.362252452 +832 79 0 days 00:00:00.195321315 +832 80 0 days 00:00:00.431409812 +832 81 0 days 00:00:00.258672949 +832 82 0 days 00:00:00.248786170 +832 83 0 days 00:00:00.215029265 +832 84 0 days 00:00:00.407400380 +832 85 0 days 00:00:00.117872410 +832 86 0 days 00:00:00.255858417 +832 87 0 days 00:00:00.439893850 +832 88 0 days 00:00:00.482806717 +832 89 0 days 00:00:00.130751605 +832 90 0 days 00:00:00.467415500 +832 91 0 days 00:00:00.238444568 +832 92 0 days 00:00:00.434740192 +832 93 0 days 00:00:00.157724540 +832 94 0 days 00:00:00.142033593 +832 95 0 days 00:00:00.141311275 +832 96 0 days 00:00:00.259828630 +832 97 0 days 00:00:00.440108784 +832 98 0 days 00:00:00.403436530 +832 99 0 days 00:00:00.404809225 +832 100 0 days 00:00:00.258350537 +833 1 0 days 00:00:02.752048150 +833 2 0 days 00:00:02.376596406 +833 3 0 days 00:00:02.522804452 +833 4 0 days 00:00:02.182816573 +833 5 0 days 00:00:04.977624993 +833 6 0 days 00:00:06.024763324 +833 7 0 days 00:00:02.294877420 +833 8 0 days 00:00:03.145308053 +833 9 0 days 00:00:06.077828024 +833 10 0 days 00:00:02.708004806 +833 11 0 days 00:00:05.177895153 +833 12 0 days 00:00:03.484658775 +833 13 0 days 00:00:03.078854746 +833 14 0 days 00:00:05.290713866 +833 15 0 days 00:00:04.238517226 +833 16 0 days 00:00:02.809987380 +833 17 0 days 00:00:06.369459916 +833 18 0 days 00:00:02.186435553 +833 19 0 days 00:00:02.319708446 +833 20 0 days 00:00:03.765043980 +833 21 0 days 00:00:03.696038748 +833 22 0 days 00:00:02.488261755 +833 23 0 days 00:00:02.403720225 +833 24 0 days 00:00:02.239282080 +833 25 0 days 00:00:04.992151793 +833 26 0 days 00:00:06.295104825 +833 27 0 days 00:00:05.093452546 +833 28 0 days 00:00:02.122913933 +833 29 0 days 00:00:03.244726360 +833 30 0 days 00:00:03.665670020 +833 31 0 days 00:00:05.481912666 +833 32 0 days 00:00:03.434631830 +833 33 0 days 00:00:05.738597065 +833 34 0 days 00:00:02.185843500 +833 35 0 days 00:00:05.183291666 +833 36 0 days 00:00:03.025618713 +833 37 0 days 00:00:02.714976117 +833 38 0 days 00:00:02.742403933 +833 39 0 days 00:00:03.863791300 +833 40 0 days 00:00:06.243490460 +833 41 0 days 00:00:03.536063860 +833 42 0 days 00:00:03.092433526 +833 43 0 days 00:00:05.082879200 +833 44 0 days 00:00:05.785356980 +833 45 0 days 00:00:03.147369346 +833 46 0 days 00:00:02.666503274 +833 47 0 days 00:00:03.606532280 +833 48 0 days 00:00:05.327502646 +833 49 0 days 00:00:05.782341285 +833 50 0 days 00:00:03.928429300 +833 51 0 days 00:00:03.575923575 +833 52 0 days 00:00:01.881383046 +833 53 0 days 00:00:03.878785100 +833 54 0 days 00:00:03.816561540 +833 55 0 days 00:00:02.422262675 +833 56 0 days 00:00:04.154485772 +833 57 0 days 00:00:03.775362848 +833 58 0 days 00:00:02.529298276 +833 59 0 days 00:00:05.911568888 +833 60 0 days 00:00:03.854423553 +833 61 0 days 00:00:02.490031330 +833 62 0 days 00:00:02.169695793 +833 63 0 days 00:00:03.860480245 +833 64 0 days 00:00:03.662746432 +833 65 0 days 00:00:03.431979960 +833 66 0 days 00:00:06.127198620 +833 67 0 days 00:00:03.649796452 +833 68 0 days 00:00:02.349923000 +833 69 0 days 00:00:02.324295425 +833 70 0 days 00:00:05.171850580 +833 71 0 days 00:00:02.203437466 +833 72 0 days 00:00:02.435302145 +833 73 0 days 00:00:02.268693920 +833 74 0 days 00:00:03.951649060 +833 75 0 days 00:00:05.732130045 +833 76 0 days 00:00:03.104910720 +833 77 0 days 00:00:02.503091540 +833 78 0 days 00:00:02.796698260 +833 79 0 days 00:00:03.305513520 +833 80 0 days 00:00:02.346531905 +833 81 0 days 00:00:04.379535200 +833 82 0 days 00:00:02.368120075 +833 83 0 days 00:00:06.058721325 +833 84 0 days 00:00:03.477236860 +833 85 0 days 00:00:03.129835473 +833 86 0 days 00:00:03.104676906 +833 87 0 days 00:00:03.546267265 +833 88 0 days 00:00:02.719663100 +833 89 0 days 00:00:02.768983620 +833 90 0 days 00:00:02.134537913 +833 91 0 days 00:00:05.280806673 +833 92 0 days 00:00:02.863886524 +833 93 0 days 00:00:05.017358400 +833 94 0 days 00:00:02.668513288 +833 95 0 days 00:00:06.063586070 +833 96 0 days 00:00:03.312520833 +833 97 0 days 00:00:06.483227375 +833 98 0 days 00:00:06.082503212 +833 99 0 days 00:00:02.386503320 +833 100 0 days 00:00:03.898746640 +834 1 0 days 00:00:02.468910535 +834 2 0 days 00:00:05.726170686 +834 3 0 days 00:00:06.151803635 +834 4 0 days 00:00:05.825313286 +834 5 0 days 00:00:02.483559835 +834 6 0 days 00:00:03.446467246 +834 7 0 days 00:00:04.588821093 +834 8 0 days 00:00:02.214328480 +834 9 0 days 00:00:02.853583465 +834 10 0 days 00:00:06.470462050 +834 11 0 days 00:00:02.515140315 +834 12 0 days 00:00:03.619659830 +834 13 0 days 00:00:04.100221505 +834 14 0 days 00:00:03.737884210 +834 15 0 days 00:00:03.525125225 +834 16 0 days 00:00:03.551332306 +834 17 0 days 00:00:03.224295543 +834 18 0 days 00:00:02.461523320 +834 19 0 days 00:00:05.279079706 +834 20 0 days 00:00:05.313699326 +834 21 0 days 00:00:04.136901705 +834 22 0 days 00:00:06.546196272 +834 23 0 days 00:00:03.271586433 +834 24 0 days 00:00:03.895325764 +834 25 0 days 00:00:05.930017950 +834 26 0 days 00:00:03.869265716 +834 27 0 days 00:00:02.783489820 +834 28 0 days 00:00:02.598073840 +834 29 0 days 00:00:05.521569510 +834 30 0 days 00:00:02.744789590 +834 31 0 days 00:00:03.317056226 +834 32 0 days 00:00:05.418440453 +834 33 0 days 00:00:06.120219245 +834 34 0 days 00:00:05.352963500 +834 35 0 days 00:00:05.856338660 +834 36 0 days 00:00:02.507906575 +834 37 0 days 00:00:03.281240320 +834 38 0 days 00:00:06.497487765 +834 39 0 days 00:00:02.854267911 +834 40 0 days 00:00:02.718190863 +834 41 0 days 00:00:02.539872110 +834 42 0 days 00:00:06.166301860 +834 43 0 days 00:00:03.259525986 +834 44 0 days 00:00:03.704118755 +834 45 0 days 00:00:02.410296400 +834 46 0 days 00:00:03.752825460 +834 47 0 days 00:00:04.044388163 +834 48 0 days 00:00:05.738071546 +834 49 0 days 00:00:03.614005560 +834 50 0 days 00:00:03.792745210 +834 51 0 days 00:00:02.795015520 +834 52 0 days 00:00:03.437673033 +834 53 0 days 00:00:02.515830080 +834 54 0 days 00:00:02.719387600 +834 55 0 days 00:00:06.097888215 +834 56 0 days 00:00:02.545172592 +834 57 0 days 00:00:05.670928873 +834 58 0 days 00:00:02.199249713 +834 59 0 days 00:00:03.526234193 +834 60 0 days 00:00:02.485964860 +834 61 0 days 00:00:02.447545435 +834 62 0 days 00:00:02.472709445 +834 63 0 days 00:00:02.622069788 +834 64 0 days 00:00:02.471372326 +834 65 0 days 00:00:06.050640145 +834 66 0 days 00:00:05.321447166 +834 67 0 days 00:00:05.929409590 +834 68 0 days 00:00:06.708708088 +834 69 0 days 00:00:05.904176750 +834 70 0 days 00:00:02.701169948 +834 71 0 days 00:00:02.335997873 +834 72 0 days 00:00:02.593259545 +834 73 0 days 00:00:03.561799140 +834 74 0 days 00:00:05.042197535 +834 75 0 days 00:00:03.276892406 +834 76 0 days 00:00:03.398773900 +834 77 0 days 00:00:05.827473410 +834 78 0 days 00:00:05.514157306 +834 79 0 days 00:00:02.432329706 +834 80 0 days 00:00:02.626915760 +834 81 0 days 00:00:06.163019905 +834 82 0 days 00:00:06.060529165 +834 83 0 days 00:00:05.474955080 +834 84 0 days 00:00:02.760125352 +834 85 0 days 00:00:02.310698033 +834 86 0 days 00:00:03.713782745 +834 87 0 days 00:00:02.621225055 +834 88 0 days 00:00:05.525294346 +834 89 0 days 00:00:03.335737193 +834 90 0 days 00:00:06.503692992 +834 91 0 days 00:00:03.217114776 +834 92 0 days 00:00:02.600506945 +834 93 0 days 00:00:02.647196132 +834 94 0 days 00:00:06.165061275 +834 95 0 days 00:00:04.096046690 +834 96 0 days 00:00:02.610842655 +834 97 0 days 00:00:03.204160042 +834 98 0 days 00:00:02.626452800 +834 99 0 days 00:00:03.716361245 +834 100 0 days 00:00:02.243014713 +835 1 0 days 00:00:01.167601160 +835 2 0 days 00:00:01.827677130 +835 3 0 days 00:00:02.243582180 +835 4 0 days 00:00:01.773123140 +835 5 0 days 00:00:01.326278550 +835 6 0 days 00:00:01.206361430 +835 7 0 days 00:00:01.206388705 +835 8 0 days 00:00:01.153696346 +835 9 0 days 00:00:02.938779135 +835 10 0 days 00:00:01.160734726 +835 11 0 days 00:00:03.479185522 +835 12 0 days 00:00:01.436201175 +835 13 0 days 00:00:02.836665820 +835 14 0 days 00:00:02.751028526 +835 15 0 days 00:00:02.855707740 +835 16 0 days 00:00:02.746995680 +835 17 0 days 00:00:02.619766720 +835 18 0 days 00:00:03.120517230 +835 19 0 days 00:00:01.127852440 +835 20 0 days 00:00:01.365329600 +835 21 0 days 00:00:01.291913230 +835 22 0 days 00:00:01.330909420 +835 23 0 days 00:00:01.660515666 +835 24 0 days 00:00:01.116359500 +835 25 0 days 00:00:01.205010300 +835 26 0 days 00:00:01.706027386 +835 27 0 days 00:00:02.593542613 +835 28 0 days 00:00:02.341900126 +835 29 0 days 00:00:01.201570686 +835 30 0 days 00:00:01.794621675 +835 31 0 days 00:00:02.836543030 +835 32 0 days 00:00:01.756910400 +835 33 0 days 00:00:01.297150484 +835 34 0 days 00:00:01.706896366 +835 35 0 days 00:00:01.575265840 +835 36 0 days 00:00:01.736019566 +835 37 0 days 00:00:01.948685630 +835 38 0 days 00:00:01.279687733 +835 39 0 days 00:00:01.744955240 +835 40 0 days 00:00:01.116006306 +835 41 0 days 00:00:02.936730505 +835 42 0 days 00:00:01.311329724 +835 43 0 days 00:00:01.597019913 +835 44 0 days 00:00:02.563741153 +835 45 0 days 00:00:01.584464406 +835 46 0 days 00:00:01.410647800 +835 47 0 days 00:00:01.746780686 +835 48 0 days 00:00:02.874964650 +835 49 0 days 00:00:02.876778905 +835 50 0 days 00:00:01.510231460 +835 51 0 days 00:00:01.245963500 +835 52 0 days 00:00:01.228688795 +835 53 0 days 00:00:02.169277240 +835 54 0 days 00:00:01.908747635 +835 55 0 days 00:00:01.839440980 +835 56 0 days 00:00:02.533298320 +835 57 0 days 00:00:01.297363524 +835 58 0 days 00:00:01.331979295 +835 59 0 days 00:00:01.443875520 +835 60 0 days 00:00:02.017153148 +835 61 0 days 00:00:01.423907320 +835 62 0 days 00:00:02.755323435 +835 63 0 days 00:00:01.348742004 +835 64 0 days 00:00:01.903491876 +835 65 0 days 00:00:02.757078120 +835 66 0 days 00:00:02.601196106 +835 67 0 days 00:00:01.252167235 +835 68 0 days 00:00:02.600913600 +835 69 0 days 00:00:02.796886073 +835 70 0 days 00:00:01.605949873 +835 71 0 days 00:00:01.795584820 +835 72 0 days 00:00:02.727094713 +835 73 0 days 00:00:01.980633128 +835 74 0 days 00:00:02.877685900 +835 75 0 days 00:00:02.505887553 +835 76 0 days 00:00:01.896567720 +835 77 0 days 00:00:02.497748480 +835 78 0 days 00:00:02.730188293 +835 79 0 days 00:00:02.563115973 +835 80 0 days 00:00:02.715587620 +835 81 0 days 00:00:03.110893084 +835 82 0 days 00:00:02.902198705 +835 83 0 days 00:00:02.805520106 +835 84 0 days 00:00:01.229337920 +835 85 0 days 00:00:01.766760940 +835 86 0 days 00:00:01.790873515 +835 87 0 days 00:00:01.746011230 +835 88 0 days 00:00:01.334414806 +835 89 0 days 00:00:02.204266313 +835 90 0 days 00:00:01.281383108 +835 91 0 days 00:00:02.145944333 +835 92 0 days 00:00:01.121439980 +835 93 0 days 00:00:03.252959376 +835 94 0 days 00:00:02.586586160 +835 95 0 days 00:00:01.821667870 +835 96 0 days 00:00:01.110054406 +835 97 0 days 00:00:02.434532490 +835 98 0 days 00:00:01.209009640 +835 99 0 days 00:00:02.177477793 +835 100 0 days 00:00:02.726904806 +836 1 0 days 00:00:02.897724293 +836 2 0 days 00:00:01.229494753 +836 3 0 days 00:00:02.738528726 +836 4 0 days 00:00:02.758214606 +836 5 0 days 00:00:01.045907026 +836 6 0 days 00:00:01.280261530 +836 7 0 days 00:00:01.422061590 +836 8 0 days 00:00:01.897403545 +836 9 0 days 00:00:01.175605273 +836 10 0 days 00:00:03.122827060 +836 11 0 days 00:00:01.413245355 +836 12 0 days 00:00:02.661290093 +836 13 0 days 00:00:01.874513360 +836 14 0 days 00:00:01.320315940 +836 15 0 days 00:00:03.064252660 +836 16 0 days 00:00:01.925270960 +836 17 0 days 00:00:03.063110240 +836 18 0 days 00:00:01.826107835 +836 19 0 days 00:00:01.342967092 +836 20 0 days 00:00:02.916790446 +836 21 0 days 00:00:01.384008306 +836 22 0 days 00:00:02.979069845 +836 23 0 days 00:00:02.940642445 +836 24 0 days 00:00:01.340556440 +836 25 0 days 00:00:03.057078480 +836 26 0 days 00:00:01.715260453 +836 27 0 days 00:00:03.178064508 +836 28 0 days 00:00:02.755345613 +836 29 0 days 00:00:01.213532246 +836 30 0 days 00:00:02.055587415 +836 31 0 days 00:00:01.918788540 +836 32 0 days 00:00:03.301401760 +836 33 0 days 00:00:03.060743185 +836 34 0 days 00:00:01.348439092 +836 35 0 days 00:00:01.639131384 +836 36 0 days 00:00:01.286360340 +836 37 0 days 00:00:03.024009235 +836 38 0 days 00:00:01.288353030 +836 39 0 days 00:00:02.114304655 +836 40 0 days 00:00:01.386428324 +836 41 0 days 00:00:03.542842130 +836 42 0 days 00:00:01.688367113 +836 43 0 days 00:00:02.897790846 +836 44 0 days 00:00:01.270470053 +836 45 0 days 00:00:01.926300800 +836 46 0 days 00:00:01.885328225 +836 47 0 days 00:00:01.320103912 +836 48 0 days 00:00:02.701779140 +836 49 0 days 00:00:01.279971285 +836 50 0 days 00:00:01.845684880 +836 51 0 days 00:00:01.148880046 +836 52 0 days 00:00:01.282682620 +836 53 0 days 00:00:02.700764706 +836 54 0 days 00:00:01.681215926 +836 55 0 days 00:00:01.966962360 +836 56 0 days 00:00:02.100907964 +836 57 0 days 00:00:02.047200632 +836 58 0 days 00:00:01.316322960 +836 59 0 days 00:00:03.312655080 +836 60 0 days 00:00:01.216083980 +836 61 0 days 00:00:03.040018760 +836 62 0 days 00:00:01.156735506 +836 63 0 days 00:00:03.229563556 +836 64 0 days 00:00:01.144893860 +836 65 0 days 00:00:01.904811620 +836 66 0 days 00:00:03.128524780 +836 67 0 days 00:00:01.933540805 +836 68 0 days 00:00:02.012260416 +836 69 0 days 00:00:03.131830030 +836 70 0 days 00:00:01.530741495 +836 71 0 days 00:00:01.270260335 +836 72 0 days 00:00:03.244552715 +836 73 0 days 00:00:01.711093720 +836 74 0 days 00:00:01.449788176 +836 75 0 days 00:00:02.880367353 +836 76 0 days 00:00:02.159511284 +836 77 0 days 00:00:02.954009440 +836 78 0 days 00:00:01.267508270 +836 79 0 days 00:00:01.280959804 +836 80 0 days 00:00:03.363668086 +836 81 0 days 00:00:01.185033400 +836 82 0 days 00:00:01.839613955 +836 83 0 days 00:00:02.772456226 +836 84 0 days 00:00:01.813788053 +836 85 0 days 00:00:01.980941564 +836 86 0 days 00:00:01.920349775 +836 87 0 days 00:00:03.246805364 +836 88 0 days 00:00:02.474874826 +836 89 0 days 00:00:01.960726104 +836 90 0 days 00:00:01.896939400 +836 91 0 days 00:00:01.887428970 +836 92 0 days 00:00:01.653584473 +836 93 0 days 00:00:01.712582026 +836 94 0 days 00:00:03.027639585 +836 95 0 days 00:00:01.468909996 +836 96 0 days 00:00:01.451948811 +836 97 0 days 00:00:02.739344386 +836 98 0 days 00:00:03.126052595 +836 99 0 days 00:00:03.122321195 +836 100 0 days 00:00:01.277147030 +837 1 0 days 00:00:02.497895125 +837 2 0 days 00:00:02.266607640 +837 3 0 days 00:00:08.183404740 +837 4 0 days 00:00:06.484088393 +837 5 0 days 00:00:02.459013826 +837 6 0 days 00:00:02.667006846 +837 7 0 days 00:00:04.076051793 +837 8 0 days 00:00:03.030188650 +837 9 0 days 00:00:03.856959030 +837 10 0 days 00:00:07.464325300 +837 11 0 days 00:00:03.741708933 +837 12 0 days 00:00:07.039569653 +837 13 0 days 00:00:03.260077977 +837 14 0 days 00:00:06.716852295 +837 15 0 days 00:00:07.844594250 +837 16 0 days 00:00:08.222050232 +837 17 0 days 00:00:02.331887413 +837 18 0 days 00:00:03.731316026 +837 19 0 days 00:00:04.438714625 +837 20 0 days 00:00:05.257212210 +837 21 0 days 00:00:02.490786706 +837 22 0 days 00:00:03.350568165 +837 23 0 days 00:00:06.600124206 +837 24 0 days 00:00:05.713346246 +837 25 0 days 00:00:03.987838226 +837 26 0 days 00:00:03.930100990 +837 27 0 days 00:00:04.728806230 +837 28 0 days 00:00:06.191036133 +837 29 0 days 00:00:04.551220295 +837 30 0 days 00:00:02.275888033 +837 31 0 days 00:00:04.189070692 +837 32 0 days 00:00:02.745235050 +837 33 0 days 00:00:06.318849686 +837 34 0 days 00:00:06.647839053 +837 35 0 days 00:00:03.218300110 +837 36 0 days 00:00:04.358237140 +837 37 0 days 00:00:06.217937786 +837 38 0 days 00:00:03.686546113 +837 39 0 days 00:00:05.933886166 +837 40 0 days 00:00:07.533825765 +837 41 0 days 00:00:07.006585137 +837 42 0 days 00:00:04.168206120 +837 43 0 days 00:00:02.246655340 +837 44 0 days 00:00:02.228044273 +837 45 0 days 00:00:04.220106290 +837 46 0 days 00:00:03.028855946 +837 47 0 days 00:00:02.423882140 +837 48 0 days 00:00:05.937797993 +837 49 0 days 00:00:02.888199566 +837 50 0 days 00:00:07.872191296 +837 51 0 days 00:00:07.761751765 +837 52 0 days 00:00:05.233343280 +837 53 0 days 00:00:07.144782655 +837 54 0 days 00:00:02.701683835 +837 55 0 days 00:00:04.647223964 +837 56 0 days 00:00:05.891951665 +837 57 0 days 00:00:03.549650466 +837 58 0 days 00:00:02.508602012 +837 59 0 days 00:00:07.695387775 +837 60 0 days 00:00:02.328057946 +837 61 0 days 00:00:03.065689456 +837 62 0 days 00:00:05.680731886 +837 63 0 days 00:00:02.790915620 +837 64 0 days 00:00:07.781197460 +837 65 0 days 00:00:04.143421826 +837 66 0 days 00:00:08.685661691 +837 67 0 days 00:00:02.585383100 +837 68 0 days 00:00:04.349096436 +837 69 0 days 00:00:02.886246936 +837 70 0 days 00:00:02.686584305 +837 71 0 days 00:00:06.983613746 +837 72 0 days 00:00:03.488024426 +837 73 0 days 00:00:06.634684760 +837 74 0 days 00:00:04.475088932 +837 75 0 days 00:00:07.043431968 +837 76 0 days 00:00:07.054296410 +837 77 0 days 00:00:05.949258630 +837 78 0 days 00:00:02.692164140 +837 79 0 days 00:00:02.452673800 +837 80 0 days 00:00:04.274489133 +837 81 0 days 00:00:02.395401050 +837 82 0 days 00:00:02.574288635 +837 83 0 days 00:00:06.425288860 +837 84 0 days 00:00:05.785566496 +837 85 0 days 00:00:02.494243680 +837 86 0 days 00:00:04.313322100 +837 87 0 days 00:00:06.568662416 +837 88 0 days 00:00:06.593541512 +837 89 0 days 00:00:04.087476753 +837 90 0 days 00:00:07.310673460 +837 91 0 days 00:00:02.644894805 +837 92 0 days 00:00:07.496598066 +838 1 0 days 00:00:02.669980705 +838 2 0 days 00:00:02.031851970 +838 3 0 days 00:00:01.350349572 +838 4 0 days 00:00:04.059427535 +838 5 0 days 00:00:01.947225705 +838 6 0 days 00:00:03.773239760 +838 7 0 days 00:00:01.270837880 +838 8 0 days 00:00:01.350568586 +838 9 0 days 00:00:01.271356255 +838 10 0 days 00:00:02.205613440 +838 11 0 days 00:00:01.489945810 +838 12 0 days 00:00:02.777395213 +838 13 0 days 00:00:01.353695493 +838 14 0 days 00:00:02.273627604 +838 15 0 days 00:00:01.539214205 +838 16 0 days 00:00:03.560934813 +838 17 0 days 00:00:01.218884770 +838 18 0 days 00:00:01.385842885 +838 19 0 days 00:00:03.725549515 +838 20 0 days 00:00:02.986275055 +838 21 0 days 00:00:03.730365620 +838 22 0 days 00:00:01.243762073 +838 23 0 days 00:00:01.807672806 +838 24 0 days 00:00:01.485302688 +838 25 0 days 00:00:02.437062503 +838 26 0 days 00:00:01.523353620 +838 27 0 days 00:00:03.352138113 +838 28 0 days 00:00:01.958117920 +838 29 0 days 00:00:01.684704215 +838 30 0 days 00:00:04.046903555 +838 31 0 days 00:00:01.176079893 +838 32 0 days 00:00:03.281754812 +838 33 0 days 00:00:02.131938195 +838 34 0 days 00:00:03.686020173 +838 35 0 days 00:00:03.492332733 +838 36 0 days 00:00:02.266237693 +838 37 0 days 00:00:02.656466236 +838 38 0 days 00:00:02.034499770 +838 39 0 days 00:00:01.606824633 +838 40 0 days 00:00:01.521654140 +838 41 0 days 00:00:01.809148500 +838 42 0 days 00:00:01.473207500 +838 43 0 days 00:00:02.579466928 +838 44 0 days 00:00:01.466059200 +838 45 0 days 00:00:01.249663680 +838 46 0 days 00:00:01.417323745 +838 47 0 days 00:00:01.857756545 +838 48 0 days 00:00:01.467452620 +838 49 0 days 00:00:01.684918636 +838 50 0 days 00:00:03.782144766 +838 51 0 days 00:00:01.363842386 +838 52 0 days 00:00:02.375848446 +838 53 0 days 00:00:01.550085135 +838 54 0 days 00:00:02.098776860 +838 55 0 days 00:00:01.245799693 +838 56 0 days 00:00:03.539789733 +838 57 0 days 00:00:01.815414406 +838 58 0 days 00:00:01.522878870 +838 59 0 days 00:00:01.682265236 +838 60 0 days 00:00:02.716918653 +838 61 0 days 00:00:02.284125230 +838 62 0 days 00:00:03.304407000 +838 63 0 days 00:00:01.405848615 +838 64 0 days 00:00:03.018058293 +838 65 0 days 00:00:03.980821450 +838 66 0 days 00:00:01.227056053 +838 67 0 days 00:00:03.603043073 +838 68 0 days 00:00:02.871776386 +838 69 0 days 00:00:01.369455066 +838 70 0 days 00:00:01.224629693 +838 71 0 days 00:00:03.472910503 +838 72 0 days 00:00:02.908276573 +838 73 0 days 00:00:02.083432606 +838 74 0 days 00:00:01.290751105 +838 75 0 days 00:00:03.047064160 +838 76 0 days 00:00:02.011809260 +838 77 0 days 00:00:04.024668635 +838 78 0 days 00:00:03.534656873 +838 79 0 days 00:00:02.634051492 +838 80 0 days 00:00:01.456855984 +838 81 0 days 00:00:01.501163166 +838 82 0 days 00:00:03.116013250 +838 83 0 days 00:00:02.325644575 +838 84 0 days 00:00:03.768882444 +838 85 0 days 00:00:03.442196426 +838 86 0 days 00:00:02.073218465 +838 87 0 days 00:00:01.673786346 +838 88 0 days 00:00:04.488307566 +838 89 0 days 00:00:02.098572935 +838 90 0 days 00:00:04.098961150 +838 91 0 days 00:00:02.875264793 +838 92 0 days 00:00:01.326590966 +838 93 0 days 00:00:02.257663515 +838 94 0 days 00:00:03.605676793 +838 95 0 days 00:00:04.031515488 +838 96 0 days 00:00:01.562699380 +838 97 0 days 00:00:01.674552013 +838 98 0 days 00:00:01.711465833 +838 99 0 days 00:00:03.823274733 +838 100 0 days 00:00:01.777337480 +839 1 0 days 00:00:06.929969940 +839 2 0 days 00:00:03.692275565 +839 3 0 days 00:00:03.993653875 +839 4 0 days 00:00:03.735225425 +839 5 0 days 00:00:03.439018386 +839 6 0 days 00:00:06.594037110 +839 7 0 days 00:00:05.543044160 +839 8 0 days 00:00:06.864276230 +839 9 0 days 00:00:02.521451730 +839 10 0 days 00:00:03.313850493 +839 11 0 days 00:00:02.735832230 +839 12 0 days 00:00:04.208121073 +839 13 0 days 00:00:05.497827580 +839 14 0 days 00:00:06.142711880 +839 15 0 days 00:00:03.555110820 +839 16 0 days 00:00:06.489221585 +839 17 0 days 00:00:02.781172993 +839 18 0 days 00:00:04.008435060 +839 19 0 days 00:00:03.431918500 +839 20 0 days 00:00:03.888449140 +839 21 0 days 00:00:05.853235620 +839 22 0 days 00:00:07.102441027 +839 23 0 days 00:00:06.267595475 +839 24 0 days 00:00:06.203263685 +839 25 0 days 00:00:03.963048280 +839 26 0 days 00:00:02.830643966 +839 27 0 days 00:00:04.028248370 +839 28 0 days 00:00:06.841769946 +839 29 0 days 00:00:06.027720315 +839 30 0 days 00:00:05.474027608 +839 31 0 days 00:00:06.573042485 +839 32 0 days 00:00:03.766583880 +839 33 0 days 00:00:02.666790096 +839 34 0 days 00:00:03.639086280 +839 35 0 days 00:00:03.920966532 +839 36 0 days 00:00:06.848219256 +839 37 0 days 00:00:02.499912370 +839 38 0 days 00:00:05.255287215 +839 39 0 days 00:00:02.733727033 +839 40 0 days 00:00:04.014322412 +839 41 0 days 00:00:03.651950593 +839 42 0 days 00:00:02.684660730 +839 43 0 days 00:00:06.090326425 +839 44 0 days 00:00:07.006328588 +839 45 0 days 00:00:06.560802332 +839 46 0 days 00:00:06.153427335 +839 47 0 days 00:00:02.271074046 +839 48 0 days 00:00:05.455057400 +839 49 0 days 00:00:03.926906970 +839 50 0 days 00:00:06.558157420 +839 51 0 days 00:00:03.724929265 +839 52 0 days 00:00:03.491210586 +839 53 0 days 00:00:02.492928275 +839 54 0 days 00:00:02.568740490 +839 55 0 days 00:00:02.686130160 +839 56 0 days 00:00:03.496565380 +839 57 0 days 00:00:07.283329260 +839 58 0 days 00:00:02.777168191 +839 59 0 days 00:00:05.615208906 +839 60 0 days 00:00:06.372406552 +839 61 0 days 00:00:05.482775980 +839 62 0 days 00:00:02.654300645 +839 63 0 days 00:00:02.244239900 +839 64 0 days 00:00:02.655051332 +839 65 0 days 00:00:05.509013586 +839 66 0 days 00:00:03.764132775 +839 67 0 days 00:00:03.287381506 +839 68 0 days 00:00:06.531972105 +839 69 0 days 00:00:05.529861433 +839 70 0 days 00:00:06.910429213 +839 71 0 days 00:00:02.501052533 +839 72 0 days 00:00:03.703776240 +839 73 0 days 00:00:03.262373205 +839 74 0 days 00:00:02.747502470 +839 75 0 days 00:00:02.452206306 +839 76 0 days 00:00:06.149164590 +839 77 0 days 00:00:03.796720170 +839 78 0 days 00:00:02.801904345 +839 79 0 days 00:00:02.597200840 +839 80 0 days 00:00:02.660559160 +839 81 0 days 00:00:02.608569290 +839 82 0 days 00:00:03.761631865 +839 83 0 days 00:00:06.796556500 +839 84 0 days 00:00:06.455849452 +839 85 0 days 00:00:03.731602805 +839 86 0 days 00:00:06.226265420 +839 87 0 days 00:00:07.027387748 +840 1 0 days 00:00:03.359588420 +840 2 0 days 00:00:01.904726226 +840 3 0 days 00:00:01.497569056 +840 4 0 days 00:00:02.947262960 +840 5 0 days 00:00:03.064466650 +840 6 0 days 00:00:01.327013525 +840 7 0 days 00:00:03.078458330 +840 8 0 days 00:00:03.306518352 +840 9 0 days 00:00:01.947313600 +840 10 0 days 00:00:01.482519240 +840 11 0 days 00:00:02.022692340 +840 12 0 days 00:00:03.246162095 +840 13 0 days 00:00:01.364758804 +840 14 0 days 00:00:01.286968495 +840 15 0 days 00:00:03.040717760 +840 16 0 days 00:00:03.521214664 +840 17 0 days 00:00:02.861783366 +840 18 0 days 00:00:01.735932020 +840 19 0 days 00:00:01.436750332 +840 20 0 days 00:00:01.930436065 +840 21 0 days 00:00:03.549510944 +840 22 0 days 00:00:02.793706480 +840 23 0 days 00:00:02.061181393 +840 24 0 days 00:00:03.215488144 +840 25 0 days 00:00:03.352961755 +840 26 0 days 00:00:01.283507990 +840 27 0 days 00:00:01.397925676 +840 28 0 days 00:00:01.503970684 +840 29 0 days 00:00:03.113991845 +840 30 0 days 00:00:01.059440480 +840 31 0 days 00:00:02.998413260 +840 32 0 days 00:00:01.754300280 +840 33 0 days 00:00:01.546930093 +840 34 0 days 00:00:01.743631500 +840 35 0 days 00:00:01.287074185 +840 36 0 days 00:00:01.725071440 +840 37 0 days 00:00:01.903185980 +840 38 0 days 00:00:01.306198070 +840 39 0 days 00:00:01.772781206 +840 40 0 days 00:00:01.977715190 +840 41 0 days 00:00:02.879417153 +840 42 0 days 00:00:02.188807636 +840 43 0 days 00:00:03.030693665 +840 44 0 days 00:00:01.813785613 +840 45 0 days 00:00:01.982567428 +840 46 0 days 00:00:01.155047900 +840 47 0 days 00:00:01.189410826 +840 48 0 days 00:00:01.715046306 +840 49 0 days 00:00:01.794382860 +840 50 0 days 00:00:02.221712816 +840 51 0 days 00:00:01.349827748 +840 52 0 days 00:00:01.357262220 +840 53 0 days 00:00:01.954434795 +840 54 0 days 00:00:01.696448600 +840 55 0 days 00:00:02.010951692 +840 56 0 days 00:00:01.871470325 +840 57 0 days 00:00:01.412257325 +840 58 0 days 00:00:02.048351800 +840 59 0 days 00:00:02.801801333 +840 60 0 days 00:00:01.903570680 +840 61 0 days 00:00:02.036391520 +840 62 0 days 00:00:01.835963333 +840 63 0 days 00:00:02.191323030 +840 64 0 days 00:00:03.040746490 +840 65 0 days 00:00:01.510162770 +840 66 0 days 00:00:01.272580770 +840 67 0 days 00:00:02.353847593 +840 68 0 days 00:00:01.270843073 +840 69 0 days 00:00:01.241786926 +840 70 0 days 00:00:02.067275636 +840 71 0 days 00:00:01.288718870 +840 72 0 days 00:00:02.805529480 +840 73 0 days 00:00:02.809117146 +840 74 0 days 00:00:02.851408993 +840 75 0 days 00:00:01.754229453 +840 76 0 days 00:00:03.359587070 +840 77 0 days 00:00:01.748768486 +840 78 0 days 00:00:01.307423190 +840 79 0 days 00:00:01.880479820 +840 80 0 days 00:00:03.312140095 +840 81 0 days 00:00:02.788290340 +840 82 0 days 00:00:01.808086666 +840 83 0 days 00:00:02.827262866 +840 84 0 days 00:00:01.493989700 +840 85 0 days 00:00:02.863119793 +840 86 0 days 00:00:03.312951440 +840 87 0 days 00:00:01.730334320 +840 88 0 days 00:00:01.361331000 +840 89 0 days 00:00:02.119392348 +840 90 0 days 00:00:01.298907440 +840 91 0 days 00:00:01.517968176 +840 92 0 days 00:00:03.283390100 +840 93 0 days 00:00:01.298359220 +840 94 0 days 00:00:01.144561266 +840 95 0 days 00:00:01.391366484 +840 96 0 days 00:00:01.547715226 +840 97 0 days 00:00:02.938288753 +840 98 0 days 00:00:01.855525045 +840 99 0 days 00:00:01.686877526 +840 100 0 days 00:00:01.913630645 +841 1 0 days 00:00:50.057974716 +841 2 0 days 00:00:46.625554725 +841 3 0 days 00:00:57.146381110 +841 4 0 days 00:01:45.274937090 +841 5 0 days 00:01:00.672212296 +841 6 0 days 00:01:23.140165973 +841 7 0 days 00:01:49.818755386 +842 1 0 days 00:00:56.226819135 +842 2 0 days 00:00:17.550041290 +842 3 0 days 00:00:19.153647200 +842 4 0 days 00:00:22.174810204 +842 5 0 days 00:00:18.740495200 +842 6 0 days 00:00:30.711905356 +842 7 0 days 00:00:25.532796753 +842 8 0 days 00:00:31.912324220 +842 9 0 days 00:00:22.323432070 +842 10 0 days 00:00:17.569051405 +842 11 0 days 00:00:19.066989613 +842 12 0 days 00:00:19.442123544 +842 13 0 days 00:00:30.021330592 +842 14 0 days 00:00:20.891317251 +842 15 0 days 00:00:30.739155700 +842 16 0 days 00:00:16.452016293 +842 17 0 days 00:00:25.137204466 +843 1 0 days 00:01:20.678023826 +843 2 0 days 00:02:39.028897945 +843 3 0 days 00:01:17.252702496 +844 1 0 days 00:01:08.374189755 +844 2 0 days 00:01:01.058267986 +844 3 0 days 00:02:20.555907025 +844 4 0 days 00:02:05.568929760 +844 5 0 days 00:01:00.790283426 +844 6 0 days 00:02:06.496702445 +845 1 0 days 00:01:17.647127766 +845 2 0 days 00:00:43.558695736 +845 3 0 days 00:01:28.001064096 +845 4 0 days 00:01:02.583331173 +845 5 0 days 00:00:43.629026312 +845 6 0 days 00:01:19.546259280 +846 1 0 days 00:01:42.305162170 +846 2 0 days 00:00:27.369771693 +846 3 0 days 00:00:39.401816313 +846 4 0 days 00:00:44.484053840 +846 5 0 days 00:00:38.739384720 +846 6 0 days 00:01:32.655480396 +847 1 0 days 00:00:20.577596633 +847 2 0 days 00:00:30.792671500 +847 3 0 days 00:00:28.422714522 +847 4 0 days 00:00:20.439819272 +847 5 0 days 00:00:22.082372128 +847 6 0 days 00:00:28.374622557 +847 7 0 days 00:00:18.643063075 +847 8 0 days 00:00:25.947780375 +847 9 0 days 00:00:22.440115888 +847 10 0 days 00:00:28.510497987 +847 11 0 days 00:00:21.314690028 +847 12 0 days 00:00:23.147048500 +847 13 0 days 00:00:18.943595433 +847 14 0 days 00:00:24.780185550 +847 15 0 days 00:00:34.570508300 +847 16 0 days 00:00:23.069983675 +847 17 0 days 00:00:23.250150600 +847 18 0 days 00:00:22.885456475 +847 19 0 days 00:00:29.611466810 +847 20 0 days 00:00:21.455483518 +847 21 0 days 00:00:23.938521766 +847 22 0 days 00:00:19.094043528 +847 23 0 days 00:00:47.117247550 +847 24 0 days 00:00:28.690610942 +847 25 0 days 00:00:18.191141150 +847 26 0 days 00:00:22.627324014 +847 27 0 days 00:00:19.285321737 +847 28 0 days 00:00:24.302810250 +847 29 0 days 00:00:28.669394233 +847 30 0 days 00:00:40.397891383 +847 31 0 days 00:00:20.967093485 +847 32 0 days 00:00:19.916507416 +847 33 0 days 00:00:19.670028800 +847 34 0 days 00:00:40.645970525 +847 35 0 days 00:00:23.964787685 +847 36 0 days 00:00:30.975732785 +847 37 0 days 00:00:18.216843166 +847 38 0 days 00:00:22.862671012 +847 39 0 days 00:00:31.722169900 +847 40 0 days 00:00:29.318014866 +847 41 0 days 00:00:20.128612666 +847 42 0 days 00:00:20.070271442 +847 43 0 days 00:00:24.865522200 +847 44 0 days 00:00:27.583377814 +847 45 0 days 00:00:39.506847600 +847 46 0 days 00:00:30.300160066 +847 47 0 days 00:00:36.553315133 +848 1 0 days 00:00:10.808389900 +848 2 0 days 00:00:10.255709490 +848 3 0 days 00:00:14.514433066 +848 4 0 days 00:00:10.689547583 +848 5 0 days 00:00:14.403701233 +848 6 0 days 00:00:14.609294750 +848 7 0 days 00:00:11.190858785 +848 8 0 days 00:00:12.873678600 +848 9 0 days 00:00:26.793148416 +848 10 0 days 00:00:10.641506288 +848 11 0 days 00:00:25.446128816 +848 12 0 days 00:00:09.755663000 +848 13 0 days 00:00:29.234779350 +848 14 0 days 00:00:12.533825733 +848 15 0 days 00:00:11.318596442 +848 16 0 days 00:00:09.737290490 +848 17 0 days 00:00:10.079394984 +848 18 0 days 00:00:09.524291183 +848 19 0 days 00:00:11.647663000 +848 20 0 days 00:00:20.558592683 +848 21 0 days 00:00:11.011589822 +848 22 0 days 00:00:17.552305450 +848 23 0 days 00:00:10.502771716 +848 24 0 days 00:00:13.250100122 +848 25 0 days 00:00:10.742249514 +848 26 0 days 00:00:16.133480077 +848 27 0 days 00:00:11.081980366 +848 28 0 days 00:00:17.574483193 +848 29 0 days 00:00:12.498105333 +848 30 0 days 00:00:15.124134535 +848 31 0 days 00:00:13.249532985 +848 32 0 days 00:00:12.621869066 +848 33 0 days 00:00:12.191752728 +848 34 0 days 00:00:10.636288225 +848 35 0 days 00:00:09.932684171 +848 36 0 days 00:00:20.414877766 +848 37 0 days 00:00:14.979006616 +848 38 0 days 00:00:20.191228100 +848 39 0 days 00:00:27.407948414 +848 40 0 days 00:00:15.258949633 +848 41 0 days 00:00:30.130682807 +848 42 0 days 00:00:10.550622857 +848 43 0 days 00:00:17.136660883 +848 44 0 days 00:00:17.648485100 +848 45 0 days 00:00:15.588687883 +848 46 0 days 00:00:16.612029300 +848 47 0 days 00:00:10.391697633 +848 48 0 days 00:00:19.609566566 +848 49 0 days 00:00:26.571561114 +848 50 0 days 00:00:13.767659942 +848 51 0 days 00:00:10.606143671 +848 52 0 days 00:00:12.288205200 +848 53 0 days 00:00:11.644417216 +848 54 0 days 00:00:19.586118850 +848 55 0 days 00:00:13.940758718 +848 56 0 days 00:00:18.229271863 +848 57 0 days 00:00:10.297552000 +848 58 0 days 00:00:18.672927850 +848 59 0 days 00:00:12.878624466 +848 60 0 days 00:00:13.950312263 +848 61 0 days 00:00:16.258212283 +848 62 0 days 00:00:19.069002363 +848 63 0 days 00:00:19.055724533 +848 64 0 days 00:00:18.968492333 +848 65 0 days 00:00:18.257248200 +848 66 0 days 00:00:12.730452250 +848 67 0 days 00:00:10.119725600 +848 68 0 days 00:00:10.353135300 +848 69 0 days 00:00:09.204527416 +848 70 0 days 00:00:13.915982416 +848 71 0 days 00:00:15.444832266 +848 72 0 days 00:00:20.197620007 +848 73 0 days 00:00:28.215998100 +848 74 0 days 00:00:16.497746216 +849 1 0 days 00:00:30.019293833 +849 2 0 days 00:00:24.743189280 +849 3 0 days 00:00:30.907298983 +849 4 0 days 00:00:18.410760383 +849 5 0 days 00:00:33.493268057 +849 6 0 days 00:00:18.987188714 +849 7 0 days 00:00:20.081286057 +849 8 0 days 00:00:28.288555225 +849 9 0 days 00:00:35.273095571 +849 10 0 days 00:00:24.344594183 +849 11 0 days 00:00:20.807015870 +849 12 0 days 00:00:23.671207044 +849 13 0 days 00:00:20.780466055 +849 14 0 days 00:00:29.868628271 +849 15 0 days 00:00:21.391629300 +849 16 0 days 00:00:26.576060566 +849 17 0 days 00:00:17.952730933 +849 18 0 days 00:00:17.727986825 +849 19 0 days 00:00:50.520007150 +849 20 0 days 00:00:35.939360540 +849 21 0 days 00:00:26.847919342 +849 22 0 days 00:00:19.951925133 +849 23 0 days 00:00:27.083941728 +849 24 0 days 00:00:22.279701477 +849 25 0 days 00:00:21.549968025 +849 26 0 days 00:00:21.449776000 +849 27 0 days 00:00:22.153900750 +849 28 0 days 00:00:36.968355937 +849 29 0 days 00:00:25.528135742 +849 30 0 days 00:00:25.659653066 +849 31 0 days 00:00:57.011763250 +849 32 0 days 00:00:26.246461133 +849 33 0 days 00:00:25.605717835 +849 34 0 days 00:00:18.695615328 +849 35 0 days 00:00:31.453545694 +849 36 0 days 00:00:38.142370642 +850 1 0 days 00:00:15.225259011 +850 2 0 days 00:00:14.405995471 +850 3 0 days 00:00:20.249421500 +850 4 0 days 00:00:12.569305790 +850 5 0 days 00:00:15.412791800 +850 6 0 days 00:00:10.528763014 +850 7 0 days 00:00:14.358051688 +850 8 0 days 00:00:18.656803650 +850 9 0 days 00:00:13.516209328 +850 10 0 days 00:00:15.881970083 +850 11 0 days 00:00:10.763132587 +850 12 0 days 00:00:10.470705471 +850 13 0 days 00:00:20.735481528 +850 14 0 days 00:00:12.255892388 +850 15 0 days 00:00:17.394563036 +850 16 0 days 00:00:13.575271822 +850 17 0 days 00:00:12.179132654 +850 18 0 days 00:00:15.961764544 +850 19 0 days 00:00:13.956563466 +850 20 0 days 00:00:10.624716028 +850 21 0 days 00:00:11.256491075 +850 22 0 days 00:00:09.711701400 +850 23 0 days 00:00:21.061311488 +850 24 0 days 00:00:14.310546216 +850 25 0 days 00:00:22.853148175 +850 26 0 days 00:00:17.065417766 +850 27 0 days 00:00:18.398984537 +850 28 0 days 00:00:14.017402650 +850 29 0 days 00:00:13.899544987 +850 30 0 days 00:00:12.155131509 +850 31 0 days 00:00:28.304633533 +850 32 0 days 00:00:10.046135966 +850 33 0 days 00:00:12.870433142 +850 34 0 days 00:00:22.494413500 +850 35 0 days 00:00:17.420130700 +850 36 0 days 00:00:12.543295600 +850 37 0 days 00:00:19.613483166 +850 38 0 days 00:00:13.274472775 +850 39 0 days 00:00:13.601778611 +850 40 0 days 00:00:26.489146362 +850 41 0 days 00:00:10.420818400 +850 42 0 days 00:00:17.587778750 +850 43 0 days 00:00:17.431684162 +850 44 0 days 00:00:18.414912140 +850 45 0 days 00:00:11.812056557 +850 46 0 days 00:00:17.906672314 +850 47 0 days 00:00:10.221216842 +850 48 0 days 00:00:28.151467544 +850 49 0 days 00:00:10.601161571 +850 50 0 days 00:00:11.399756471 +850 51 0 days 00:00:16.681929716 +850 52 0 days 00:00:19.056139700 +850 53 0 days 00:00:09.058722200 +850 54 0 days 00:00:10.402371600 +850 55 0 days 00:00:09.285583175 +850 56 0 days 00:00:10.794560583 +850 57 0 days 00:00:09.754825183 +850 58 0 days 00:00:18.517609000 +850 59 0 days 00:00:15.130406533 +850 60 0 days 00:00:13.874783840 +850 61 0 days 00:00:15.567473427 +850 62 0 days 00:00:17.808304571 +850 63 0 days 00:00:15.384481871 +850 64 0 days 00:00:13.395137222 +850 65 0 days 00:00:10.032586784 +850 66 0 days 00:00:13.950799955 +850 67 0 days 00:00:10.921817000 +850 68 0 days 00:00:11.909529933 +850 69 0 days 00:00:08.578088915 +850 70 0 days 00:00:18.508768966 +850 71 0 days 00:00:12.661621250 +850 72 0 days 00:00:13.518704000 +850 73 0 days 00:00:14.164028585 +851 1 0 days 00:00:38.175843600 +851 2 0 days 00:00:14.977992250 +851 3 0 days 00:00:17.650465250 +851 4 0 days 00:00:19.504806400 +851 5 0 days 00:00:25.055469450 +851 6 0 days 00:00:20.184858730 +851 7 0 days 00:00:51.690309300 +851 8 0 days 00:00:20.145546633 +851 9 0 days 00:00:21.858732966 +851 10 0 days 00:00:23.386349916 +851 11 0 days 00:00:18.881224657 +851 12 0 days 00:00:29.299636916 +851 13 0 days 00:00:27.666002277 +851 14 0 days 00:00:17.380865500 +851 15 0 days 00:00:33.731944450 +851 16 0 days 00:00:58.979888528 +851 17 0 days 00:00:20.905673900 +851 18 0 days 00:00:24.919951633 +851 19 0 days 00:00:31.287038542 +851 20 0 days 00:00:38.872659688 +851 21 0 days 00:00:18.060257887 +851 22 0 days 00:00:29.582478700 +851 23 0 days 00:00:40.011343716 +851 24 0 days 00:00:28.860163466 +851 25 0 days 00:00:36.731314750 +851 26 0 days 00:00:22.479649342 +851 27 0 days 00:00:28.438560450 +851 28 0 days 00:00:33.415583600 +851 29 0 days 00:00:20.071350300 +851 30 0 days 00:00:17.560937500 +851 31 0 days 00:00:28.779898483 +851 32 0 days 00:00:23.425750166 +851 33 0 days 00:00:31.524517014 +851 34 0 days 00:00:38.882739212 +851 35 0 days 00:00:31.320549837 +851 36 0 days 00:00:22.900910833 +851 37 0 days 00:00:29.188922683 +851 38 0 days 00:00:17.722763342 +851 39 0 days 00:00:19.619737900 +851 40 0 days 00:00:18.132117087 +851 41 0 days 00:00:35.279562087 +851 42 0 days 00:00:19.782476650 +851 43 0 days 00:00:17.490168077 +851 44 0 days 00:00:53.822171090 +852 1 0 days 00:00:14.818491383 +852 2 0 days 00:00:12.356710333 +852 3 0 days 00:00:11.760661171 +852 4 0 days 00:00:09.602589800 +852 5 0 days 00:00:08.632194828 +852 6 0 days 00:00:32.102916050 +852 7 0 days 00:00:10.854237457 +852 8 0 days 00:00:12.451136000 +852 9 0 days 00:00:11.460750900 +852 10 0 days 00:00:13.261824025 +852 11 0 days 00:00:18.440616466 +852 12 0 days 00:00:12.585582012 +852 13 0 days 00:00:13.194965811 +852 14 0 days 00:00:21.445953033 +852 15 0 days 00:00:14.948040562 +852 16 0 days 00:00:11.796180285 +852 17 0 days 00:00:19.242542244 +852 18 0 days 00:00:19.027048650 +852 19 0 days 00:00:11.711571350 +852 20 0 days 00:00:13.098264333 +852 21 0 days 00:00:16.612558457 +852 22 0 days 00:00:13.399706830 +852 23 0 days 00:00:31.512977014 +852 24 0 days 00:00:20.253598933 +852 25 0 days 00:00:15.187441033 +852 26 0 days 00:00:28.715311655 +852 27 0 days 00:00:12.958236957 +852 28 0 days 00:00:16.287426433 +852 29 0 days 00:00:10.771286654 +852 30 0 days 00:00:19.238324350 +852 31 0 days 00:00:12.148510800 +852 32 0 days 00:00:11.603530800 +852 33 0 days 00:00:11.293177408 +852 34 0 days 00:00:09.793117550 +852 35 0 days 00:00:12.632313783 +852 36 0 days 00:00:27.460462300 +852 37 0 days 00:00:14.377617766 +852 38 0 days 00:00:10.390013411 +852 39 0 days 00:00:11.189783483 +852 40 0 days 00:00:18.661793350 +852 41 0 days 00:00:10.953504550 +852 42 0 days 00:00:09.887587933 +852 43 0 days 00:00:08.696054750 +852 44 0 days 00:00:12.851287188 +852 45 0 days 00:00:11.125173181 +852 46 0 days 00:00:19.409318875 +852 47 0 days 00:00:11.879378275 +852 48 0 days 00:00:21.670297350 +852 49 0 days 00:00:28.760307150 +852 50 0 days 00:00:17.428099033 +852 51 0 days 00:00:18.671679900 +852 52 0 days 00:00:21.433853833 +852 53 0 days 00:00:12.589490828 +852 54 0 days 00:00:08.649053228 +852 55 0 days 00:00:14.685354362 +852 56 0 days 00:00:10.133763770 +852 57 0 days 00:00:16.549207166 +852 58 0 days 00:00:16.894802333 +852 59 0 days 00:00:18.632011933 +852 60 0 days 00:00:31.491231016 +852 61 0 days 00:00:19.309724728 +852 62 0 days 00:00:30.612592866 +852 63 0 days 00:00:15.744657966 +852 64 0 days 00:00:12.988276600 +852 65 0 days 00:00:27.304361700 +852 66 0 days 00:00:11.270302940 +852 67 0 days 00:00:09.819675320 +852 68 0 days 00:00:15.394006622 +852 69 0 days 00:00:10.032810100 +852 70 0 days 00:00:22.179754566 +852 71 0 days 00:00:13.442068488 +852 72 0 days 00:00:21.619625350 +852 73 0 days 00:00:20.311925866 +852 74 0 days 00:00:21.601996537 +852 75 0 days 00:00:19.571639042 +853 1 0 days 00:00:29.260143911 +853 2 0 days 00:00:14.860861314 +853 3 0 days 00:00:22.686168150 +853 4 0 days 00:00:23.267870242 +853 5 0 days 00:00:16.600960233 +853 6 0 days 00:00:18.071139383 +853 7 0 days 00:00:29.189635766 +853 8 0 days 00:00:30.229190937 +853 9 0 days 00:00:14.661830100 +853 10 0 days 00:00:21.214925450 +853 11 0 days 00:00:18.705474600 +853 12 0 days 00:00:19.361504512 +853 13 0 days 00:00:21.545914300 +853 14 0 days 00:00:20.712738800 +853 15 0 days 00:00:41.672007666 +853 16 0 days 00:00:23.995317883 +853 17 0 days 00:00:29.458224512 +853 18 0 days 00:00:32.257133866 +853 19 0 days 00:00:18.486869533 +853 20 0 days 00:00:17.289452957 +853 21 0 days 00:00:26.023729966 +853 22 0 days 00:00:29.602366550 +853 23 0 days 00:00:50.936570266 +853 24 0 days 00:00:19.022629880 +853 25 0 days 00:00:19.086586325 +853 26 0 days 00:00:16.463359128 +853 27 0 days 00:00:17.881684600 +853 28 0 days 00:00:22.285442883 +853 29 0 days 00:00:22.603713883 +853 30 0 days 00:00:34.113581900 +853 31 0 days 00:00:22.927038100 +853 32 0 days 00:00:38.320970971 +853 33 0 days 00:00:20.694564514 +853 34 0 days 00:00:20.626027357 +853 35 0 days 00:00:34.486935150 +853 36 0 days 00:00:17.939374787 +853 37 0 days 00:00:33.891792775 +853 38 0 days 00:00:19.933019575 +853 39 0 days 00:00:29.848480983 +853 40 0 days 00:00:16.903380414 +853 41 0 days 00:00:20.459185240 +853 42 0 days 00:00:18.299491057 +853 43 0 days 00:00:33.889299750 +853 44 0 days 00:00:21.511018216 +853 45 0 days 00:00:24.470975920 +853 46 0 days 00:00:20.938620833 +853 47 0 days 00:00:23.324554216 +853 48 0 days 00:00:19.441981366 +853 49 0 days 00:00:16.949081871 +853 50 0 days 00:00:14.852323950 +853 51 0 days 00:00:57.334967016 +854 1 0 days 00:00:15.393324457 +854 2 0 days 00:00:17.066263350 +854 3 0 days 00:00:29.715850183 +854 4 0 days 00:00:08.275122050 +854 5 0 days 00:00:15.864806566 +854 6 0 days 00:00:26.041568966 +854 7 0 days 00:00:30.614471570 +854 8 0 days 00:00:14.400779766 +854 9 0 days 00:00:29.712569500 +854 10 0 days 00:00:16.450203350 +854 11 0 days 00:00:11.687360511 +854 12 0 days 00:00:16.507463300 +854 13 0 days 00:00:10.745707366 +854 14 0 days 00:00:12.000921071 +854 15 0 days 00:00:14.824894606 +854 16 0 days 00:00:20.316458014 +854 17 0 days 00:00:15.941277983 +854 18 0 days 00:00:31.481010114 +854 19 0 days 00:00:21.945674833 +854 20 0 days 00:00:15.327284716 +854 21 0 days 00:00:14.807744683 +854 22 0 days 00:00:09.778531266 +854 23 0 days 00:00:19.671819333 +854 24 0 days 00:00:19.107292057 +854 25 0 days 00:00:29.626364642 +854 26 0 days 00:00:11.289806891 +854 27 0 days 00:00:12.674295487 +854 28 0 days 00:00:20.383026950 +854 29 0 days 00:00:20.407113700 +854 30 0 days 00:00:29.311077250 +854 31 0 days 00:00:15.525705571 +854 32 0 days 00:00:20.580556816 +854 33 0 days 00:00:12.480831500 +854 34 0 days 00:00:14.221908016 +854 35 0 days 00:00:12.135399440 +854 36 0 days 00:00:13.035105566 +854 37 0 days 00:00:12.746783016 +854 38 0 days 00:00:13.081957900 +854 39 0 days 00:00:16.996755472 +854 40 0 days 00:00:11.614737450 +854 41 0 days 00:00:10.300769276 +854 42 0 days 00:00:20.503273516 +854 43 0 days 00:00:10.047503666 +854 44 0 days 00:00:28.361945833 +854 45 0 days 00:00:11.891633733 +854 46 0 days 00:00:16.822566500 +854 47 0 days 00:00:22.495444837 +854 48 0 days 00:00:16.808352957 +854 49 0 days 00:00:13.644039237 +854 50 0 days 00:00:12.898302616 +854 51 0 days 00:00:15.205526871 +854 52 0 days 00:00:10.008358460 +854 53 0 days 00:00:08.924910183 +854 54 0 days 00:00:08.485660883 +854 55 0 days 00:00:10.710903383 +854 56 0 days 00:00:09.736374928 +854 57 0 days 00:00:11.352443333 +854 58 0 days 00:00:30.065002100 +854 59 0 days 00:00:11.998996300 +854 60 0 days 00:00:09.192113185 +854 61 0 days 00:00:15.629191966 +854 62 0 days 00:00:20.520852171 +854 63 0 days 00:00:15.544004014 +854 64 0 days 00:00:10.052657036 +854 65 0 days 00:00:15.897035316 +854 66 0 days 00:00:18.963878528 +854 67 0 days 00:00:15.533942728 +854 68 0 days 00:00:16.158017366 +854 69 0 days 00:00:15.473143316 +854 70 0 days 00:00:12.259961950 +854 71 0 days 00:00:10.938667255 +854 72 0 days 00:00:11.074734750 +854 73 0 days 00:00:16.211510866 +854 74 0 days 00:00:10.130267400 +854 75 0 days 00:00:18.465588000 +854 76 0 days 00:00:14.612346136 +855 1 0 days 00:00:24.126550300 +855 2 0 days 00:00:15.980114153 +855 3 0 days 00:00:24.546646620 +855 4 0 days 00:00:13.086548445 +855 5 0 days 00:00:30.892448511 +855 6 0 days 00:00:15.388757905 +855 7 0 days 00:00:10.493101253 +855 8 0 days 00:00:16.712786488 +855 9 0 days 00:00:18.902572116 +855 10 0 days 00:00:11.671165600 +855 11 0 days 00:00:16.532327452 +855 12 0 days 00:00:15.548764715 +855 13 0 days 00:00:27.174187340 +855 14 0 days 00:00:24.420680460 +855 15 0 days 00:00:10.812294476 +855 16 0 days 00:00:18.010876415 +855 17 0 days 00:00:14.574233746 +855 18 0 days 00:00:14.068362053 +855 19 0 days 00:00:13.861958426 +855 20 0 days 00:00:27.206940620 +855 21 0 days 00:00:21.395741526 +855 22 0 days 00:00:10.628656104 +856 1 0 days 00:00:18.597381537 +856 2 0 days 00:00:10.411996464 +856 3 0 days 00:00:10.135760640 +856 4 0 days 00:00:29.912984596 +856 5 0 days 00:00:13.367023948 +856 6 0 days 00:00:31.889800168 +856 7 0 days 00:00:10.275916135 +856 8 0 days 00:00:31.287489996 +856 9 0 days 00:00:14.442526753 +856 10 0 days 00:00:18.277489948 +856 11 0 days 00:00:28.290050975 +856 12 0 days 00:00:17.114855824 +856 13 0 days 00:00:28.288769310 +856 14 0 days 00:00:10.572821085 +856 15 0 days 00:00:11.124932880 +856 16 0 days 00:00:24.948161080 +856 17 0 days 00:00:11.110281982 +856 18 0 days 00:00:24.853097053 +857 1 0 days 00:00:07.054436020 +857 2 0 days 00:00:14.293382084 +857 3 0 days 00:00:04.396257520 +857 4 0 days 00:00:06.892601733 +857 5 0 days 00:00:08.766182240 +857 6 0 days 00:00:13.751186010 +857 7 0 days 00:00:12.287908593 +857 8 0 days 00:00:07.960083890 +857 9 0 days 00:00:07.825917365 +857 10 0 days 00:00:13.719420965 +857 11 0 days 00:00:04.950539840 +857 12 0 days 00:00:05.317208066 +857 13 0 days 00:00:07.979295033 +857 14 0 days 00:00:05.931505113 +857 15 0 days 00:00:06.022631820 +857 16 0 days 00:00:06.205806286 +857 17 0 days 00:00:04.952618880 +857 18 0 days 00:00:08.089490080 +857 19 0 days 00:00:15.300580640 +857 20 0 days 00:00:04.468325006 +857 21 0 days 00:00:06.137805305 +857 22 0 days 00:00:08.656090080 +857 23 0 days 00:00:13.701885150 +857 24 0 days 00:00:05.592651446 +857 25 0 days 00:00:08.416883804 +857 26 0 days 00:00:07.423185433 +857 27 0 days 00:00:08.588582710 +857 28 0 days 00:00:06.222042286 +857 29 0 days 00:00:09.372021224 +857 30 0 days 00:00:05.239047940 +857 31 0 days 00:00:08.412800096 +857 32 0 days 00:00:05.716695065 +857 33 0 days 00:00:09.297405402 +857 34 0 days 00:00:15.565557028 +857 35 0 days 00:00:08.108049510 +857 36 0 days 00:00:08.823723031 +857 37 0 days 00:00:08.009115186 +857 38 0 days 00:00:15.582951387 +857 39 0 days 00:00:11.933943240 +857 40 0 days 00:00:05.703748197 +857 41 0 days 00:00:04.462192486 +857 42 0 days 00:00:12.227987513 +857 43 0 days 00:00:15.391284765 +858 1 0 days 00:00:04.506220093 +858 2 0 days 00:00:15.947556680 +858 3 0 days 00:00:07.356720473 +858 4 0 days 00:00:05.990329385 +858 5 0 days 00:00:07.911722836 +858 6 0 days 00:00:08.050700255 +858 7 0 days 00:00:11.366411173 +858 8 0 days 00:00:07.216174260 +858 9 0 days 00:00:09.424024780 +858 10 0 days 00:00:05.419584755 +858 11 0 days 00:00:07.401774253 +858 12 0 days 00:00:16.056583728 +858 13 0 days 00:00:05.039576770 +858 14 0 days 00:00:07.128164945 +858 15 0 days 00:00:07.641160560 +858 16 0 days 00:00:16.185395725 +858 17 0 days 00:00:14.326801990 +858 18 0 days 00:00:15.132645572 +858 19 0 days 00:00:05.472179493 +858 20 0 days 00:00:05.575515080 +858 21 0 days 00:00:15.713639253 +858 22 0 days 00:00:05.061988075 +858 23 0 days 00:00:10.083082456 +858 24 0 days 00:00:05.082733420 +858 25 0 days 00:00:09.736451192 +858 26 0 days 00:00:05.625608240 +858 27 0 days 00:00:12.653669060 +858 28 0 days 00:00:05.277893380 +858 29 0 days 00:00:05.560049650 +858 30 0 days 00:00:07.129225385 +858 31 0 days 00:00:09.631968090 +858 32 0 days 00:00:05.931497373 +858 33 0 days 00:00:04.975820610 +858 34 0 days 00:00:12.845505600 +858 35 0 days 00:00:08.426245440 +858 36 0 days 00:00:05.252811785 +858 37 0 days 00:00:08.062091040 +858 38 0 days 00:00:10.291098088 +858 39 0 days 00:00:08.581164500 +858 40 0 days 00:00:08.561824260 +858 41 0 days 00:00:07.343452346 +858 42 0 days 00:00:12.843114726 +858 43 0 days 00:00:09.062391168 +859 1 0 days 00:00:18.564599505 +859 2 0 days 00:00:17.872787316 +859 3 0 days 00:00:28.561366415 +859 4 0 days 00:00:09.870825205 +859 5 0 days 00:00:15.950514180 +859 6 0 days 00:00:09.867585865 +859 7 0 days 00:00:17.805589750 +859 8 0 days 00:00:10.621088345 +859 9 0 days 00:00:10.827015520 +859 10 0 days 00:00:16.543593570 +859 11 0 days 00:00:28.108704645 +859 12 0 days 00:00:14.610641020 +859 13 0 days 00:00:12.203006320 +859 14 0 days 00:00:25.328647486 +859 15 0 days 00:00:09.382997180 +859 16 0 days 00:00:08.820560746 +859 17 0 days 00:00:14.520082806 +859 18 0 days 00:00:11.642794664 +859 19 0 days 00:00:25.865024406 +859 20 0 days 00:00:18.031104246 +859 21 0 days 00:00:31.605919610 +859 22 0 days 00:00:10.036805260 +859 23 0 days 00:00:11.883351511 +859 24 0 days 00:00:14.317965486 +859 25 0 days 00:00:17.061559968 +860 1 0 days 00:00:08.093611980 +860 2 0 days 00:00:05.836840484 +860 3 0 days 00:00:07.355287760 +860 4 0 days 00:00:08.571795410 +860 5 0 days 00:00:08.177426602 +860 6 0 days 00:00:07.512062020 +860 7 0 days 00:00:04.676366533 +860 8 0 days 00:00:07.341947706 +860 9 0 days 00:00:09.054660925 +860 10 0 days 00:00:13.653204280 +860 11 0 days 00:00:12.812292970 +860 12 0 days 00:00:16.088677520 +860 13 0 days 00:00:08.216279830 +860 14 0 days 00:00:04.924586166 +860 15 0 days 00:00:12.828667793 +860 16 0 days 00:00:07.683328113 +860 17 0 days 00:00:05.161442555 +860 18 0 days 00:00:14.434874395 +860 19 0 days 00:00:04.117034540 +860 20 0 days 00:00:05.747874406 +860 21 0 days 00:00:09.149930942 +860 22 0 days 00:00:13.459648333 +860 23 0 days 00:00:15.553316193 +860 24 0 days 00:00:16.156590600 +860 25 0 days 00:00:04.983925875 +860 26 0 days 00:00:07.310141106 +860 27 0 days 00:00:07.346263460 +860 28 0 days 00:00:12.858169186 +860 29 0 days 00:00:12.937719146 +860 30 0 days 00:00:16.171981140 +860 31 0 days 00:00:05.762550848 +860 32 0 days 00:00:05.088732930 +860 33 0 days 00:00:14.263366755 +860 34 0 days 00:00:08.461504175 +860 35 0 days 00:00:05.432691600 +860 36 0 days 00:00:10.194230966 +860 37 0 days 00:00:12.589203440 +860 38 0 days 00:00:08.289936080 +860 39 0 days 00:00:14.269194880 +860 40 0 days 00:00:14.052344580 +860 41 0 days 00:00:10.434268356 +860 42 0 days 00:00:05.820560890 +861 1 0 days 00:00:31.317698520 +861 2 0 days 00:00:16.398024826 +861 3 0 days 00:00:09.179996920 +861 4 0 days 00:00:18.080336100 +861 5 0 days 00:00:10.945090088 +861 6 0 days 00:00:10.571153555 +861 7 0 days 00:00:09.318643540 +861 8 0 days 00:00:10.367705133 +861 9 0 days 00:00:15.384880950 +861 10 0 days 00:00:29.964656000 +861 11 0 days 00:00:30.738400120 +861 12 0 days 00:00:09.017121066 +861 13 0 days 00:00:32.220424295 +861 14 0 days 00:00:36.788656973 +861 15 0 days 00:00:32.509807596 +861 16 0 days 00:00:15.288193700 +861 17 0 days 00:00:10.950543525 +861 18 0 days 00:00:10.565616500 +861 19 0 days 00:00:11.487417316 +862 1 0 days 00:00:07.859985533 +862 2 0 days 00:00:14.116786320 +862 3 0 days 00:00:15.050273266 +862 4 0 days 00:00:11.041330766 +862 5 0 days 00:00:05.438991860 +862 6 0 days 00:00:06.540750846 +862 7 0 days 00:00:05.222673725 +862 8 0 days 00:00:12.600944506 +862 9 0 days 00:00:16.382928480 +862 10 0 days 00:00:06.481398060 +862 11 0 days 00:00:07.401474693 +862 12 0 days 00:00:05.458773292 +862 13 0 days 00:00:13.072868573 +862 14 0 days 00:00:07.529683873 +862 15 0 days 00:00:09.116797500 +862 16 0 days 00:00:10.331437956 +862 17 0 days 00:00:07.977042206 +862 18 0 days 00:00:05.370970685 +862 19 0 days 00:00:08.086451546 +862 20 0 days 00:00:08.240763925 +862 21 0 days 00:00:10.760765525 +862 22 0 days 00:00:15.080288180 +862 23 0 days 00:00:09.122501540 +862 24 0 days 00:00:17.710589543 +862 25 0 days 00:00:04.952864245 +862 26 0 days 00:00:05.667832124 +862 27 0 days 00:00:06.032958986 +862 28 0 days 00:00:07.769382215 +862 29 0 days 00:00:14.759699870 +862 30 0 days 00:00:05.020154885 +862 31 0 days 00:00:19.739964754 +862 32 0 days 00:00:16.546983710 +862 33 0 days 00:00:06.453586132 +862 34 0 days 00:00:05.324338386 +862 35 0 days 00:00:09.504322985 +862 36 0 days 00:00:04.356403600 +862 37 0 days 00:00:15.519390135 +862 38 0 days 00:00:15.928105417 +863 1 0 days 00:01:12.789226700 +863 2 0 days 00:00:28.617115120 +863 3 0 days 00:01:26.778350372 +863 4 0 days 00:00:30.410147686 +863 5 0 days 00:00:30.465441360 +863 6 0 days 00:00:51.086258336 +863 7 0 days 00:01:31.059483080 +864 1 0 days 00:00:24.815358740 +864 2 0 days 00:00:50.664712954 +864 3 0 days 00:00:24.199849153 +864 4 0 days 00:00:27.700822160 +864 5 0 days 00:00:46.571407475 +864 6 0 days 00:01:21.961961175 +864 7 0 days 00:00:41.594192546 +864 8 0 days 00:00:46.567752725 +864 9 0 days 00:00:31.840165602 +864 10 0 days 00:00:31.374108315 +865 1 0 days 00:00:27.779743895 +865 2 0 days 00:00:28.882325170 +865 3 0 days 00:00:51.018668380 +865 4 0 days 00:00:35.039421658 +865 5 0 days 00:00:27.760208995 +865 6 0 days 00:00:18.702445050 +865 7 0 days 00:00:44.812800373 +865 8 0 days 00:00:53.270148836 +865 9 0 days 00:00:32.702804826 +866 1 0 days 00:00:16.288966680 +866 2 0 days 00:00:30.959574056 +866 3 0 days 00:00:18.480937200 +866 4 0 days 00:00:45.730131640 +866 5 0 days 00:00:19.276005011 +866 6 0 days 00:00:15.191605013 +866 7 0 days 00:00:54.476133320 +866 8 0 days 00:00:31.312795406 +866 9 0 days 00:00:19.331406836 +866 10 0 days 00:00:27.856530560 +866 11 0 days 00:00:31.313000306 +866 12 0 days 00:00:51.199955425 +866 13 0 days 00:00:44.791229060 +867 1 0 days 00:01:28.124091006 +867 2 0 days 00:00:24.073205353 +867 3 0 days 00:01:26.594776856 +867 4 0 days 00:00:39.648464766 +867 5 0 days 00:01:11.600300046 +867 6 0 days 00:01:20.192722745 +867 7 0 days 00:00:30.348684483 +868 1 0 days 00:00:16.890646515 +868 2 0 days 00:00:18.295777176 +868 3 0 days 00:00:17.950152412 +868 4 0 days 00:00:44.760217853 +868 5 0 days 00:00:59.750660116 +868 6 0 days 00:00:17.218085560 +868 7 0 days 00:00:54.109783560 +868 8 0 days 00:00:27.759261725 +868 9 0 days 00:00:28.768578160 +868 10 0 days 00:00:44.751453686 +868 11 0 days 00:00:45.401972553 +869 1 0 days 00:00:00.270912003 +869 2 0 days 00:00:00.127644940 +869 3 0 days 00:00:00.126038030 +869 4 0 days 00:00:00.282225896 +869 5 0 days 00:00:00.195290409 +869 6 0 days 00:00:00.165787525 +869 7 0 days 00:00:00.171218960 +869 8 0 days 00:00:00.132717528 +869 9 0 days 00:00:00.160567270 +869 11 0 days 00:00:00.272482364 +869 12 0 days 00:00:00.137697860 +869 14 0 days 00:00:00.148241442 +869 15 0 days 00:00:00.275044674 +869 16 0 days 00:00:00.256106670 +869 17 0 days 00:00:00.129066405 +869 18 0 days 00:00:00.185021055 +869 19 0 days 00:00:00.249386924 +869 20 0 days 00:00:00.179262306 +869 22 0 days 00:00:00.145477465 +869 23 0 days 00:00:00.199294494 +869 24 0 days 00:00:00.188470586 +869 25 0 days 00:00:00.146556320 +869 26 0 days 00:00:00.201989497 +869 27 0 days 00:00:00.232408175 +869 28 0 days 00:00:00.250478972 +869 30 0 days 00:00:00.117492720 +869 31 0 days 00:00:00.197287422 +869 32 0 days 00:00:00.129777840 +869 33 0 days 00:00:00.162441660 +869 35 0 days 00:00:00.166159575 +869 36 0 days 00:00:00.234820870 +869 37 0 days 00:00:00.138185548 +869 38 0 days 00:00:00.148775540 +869 39 0 days 00:00:00.124329445 +869 40 0 days 00:00:00.229842550 +869 41 0 days 00:00:00.273312566 +869 42 0 days 00:00:00.286387918 +869 43 0 days 00:00:00.151073784 +869 44 0 days 00:00:00.284458780 +869 46 0 days 00:00:00.180973950 +869 47 0 days 00:00:00.259902065 +869 48 0 days 00:00:00.126644570 +869 49 0 days 00:00:00.139261750 +869 50 0 days 00:00:00.151977998 +869 51 0 days 00:00:00.173486052 +869 53 0 days 00:00:00.128528110 +869 54 0 days 00:00:00.192582558 +869 55 0 days 00:00:00.150030882 +869 56 0 days 00:00:00.131674508 +869 57 0 days 00:00:00.188939890 +869 58 0 days 00:00:00.147896298 +869 59 0 days 00:00:00.178556515 +869 60 0 days 00:00:00.149878685 +869 61 0 days 00:00:00.245018164 +869 62 0 days 00:00:00.231895660 +869 63 0 days 00:00:00.270156600 +869 64 0 days 00:00:00.279204278 +869 65 0 days 00:00:00.279672823 +869 66 0 days 00:00:00.262659526 +869 67 0 days 00:00:00.150718256 +869 68 0 days 00:00:00.275493650 +869 69 0 days 00:00:00.187913770 +869 70 0 days 00:00:00.146024071 +869 71 0 days 00:00:00.246711176 +869 72 0 days 00:00:00.194689098 +869 73 0 days 00:00:00.184015122 +869 74 0 days 00:00:00.183749422 +869 76 0 days 00:00:00.155737077 +869 77 0 days 00:00:00.162845845 +869 78 0 days 00:00:00.150299300 +869 79 0 days 00:00:00.140280097 +869 80 0 days 00:00:00.264002868 +869 81 0 days 00:00:00.178425665 +869 83 0 days 00:00:00.146946161 +869 84 0 days 00:00:00.190707148 +869 86 0 days 00:00:00.138094382 +869 87 0 days 00:00:00.177701142 +869 88 0 days 00:00:00.129133975 +869 89 0 days 00:00:00.122147780 +869 90 0 days 00:00:00.225864115 +869 91 0 days 00:00:00.193784828 +869 92 0 days 00:00:00.162771080 +869 93 0 days 00:00:00.234988280 +869 94 0 days 00:00:00.288622822 +869 95 0 days 00:00:00.131590316 +869 96 0 days 00:00:00.146618510 +869 97 0 days 00:00:00.188537726 +869 98 0 days 00:00:00.168876908 +869 99 0 days 00:00:00.174566310 +869 100 0 days 00:00:00.115452220 +870 1 0 days 00:00:00.246999300 +870 2 0 days 00:00:00.125224110 +870 3 0 days 00:00:00.146425918 +870 4 0 days 00:00:00.231027205 +870 5 0 days 00:00:00.165151310 +870 6 0 days 00:00:00.179161588 +870 7 0 days 00:00:00.129626480 +870 8 0 days 00:00:00.256439870 +870 10 0 days 00:00:00.254528780 +870 11 0 days 00:00:00.198409732 +870 12 0 days 00:00:00.233506795 +870 13 0 days 00:00:00.195007995 +870 14 0 days 00:00:00.138445557 +870 15 0 days 00:00:00.141922462 +870 17 0 days 00:00:00.173801385 +870 18 0 days 00:00:00.183536716 +870 19 0 days 00:00:00.170894588 +870 20 0 days 00:00:00.143012388 +870 21 0 days 00:00:00.290352080 +870 22 0 days 00:00:00.257584443 +870 23 0 days 00:00:00.251794090 +870 24 0 days 00:00:00.234914232 +870 25 0 days 00:00:00.198233627 +870 26 0 days 00:00:00.209637426 +870 27 0 days 00:00:00.258563413 +870 28 0 days 00:00:00.134073724 +870 29 0 days 00:00:00.157038755 +870 30 0 days 00:00:00.162040013 +870 31 0 days 00:00:00.281791154 +870 32 0 days 00:00:00.153183385 +870 33 0 days 00:00:00.263312275 +870 35 0 days 00:00:00.128481364 +870 36 0 days 00:00:00.278844232 +870 37 0 days 00:00:00.225798270 +870 38 0 days 00:00:00.229773805 +870 39 0 days 00:00:00.171324810 +870 40 0 days 00:00:00.159716110 +870 41 0 days 00:00:00.156738145 +870 42 0 days 00:00:00.123173935 +870 43 0 days 00:00:00.159212605 +870 44 0 days 00:00:00.168155240 +870 45 0 days 00:00:00.270763317 +870 46 0 days 00:00:00.188053215 +870 47 0 days 00:00:00.179751756 +870 48 0 days 00:00:00.145517630 +870 49 0 days 00:00:00.153482460 +870 50 0 days 00:00:00.133573676 +870 51 0 days 00:00:00.147329164 +870 52 0 days 00:00:00.174832011 +870 53 0 days 00:00:00.144650160 +870 54 0 days 00:00:00.179122102 +870 55 0 days 00:00:00.146666826 +870 56 0 days 00:00:00.145050762 +870 57 0 days 00:00:00.118001090 +870 58 0 days 00:00:00.182199808 +870 59 0 days 00:00:00.146594591 +870 60 0 days 00:00:00.125796228 +870 61 0 days 00:00:00.150480522 +870 62 0 days 00:00:00.154185986 +870 63 0 days 00:00:00.231521472 +870 64 0 days 00:00:00.198066310 +870 65 0 days 00:00:00.224963495 +870 66 0 days 00:00:00.139440340 +870 67 0 days 00:00:00.176037508 +870 68 0 days 00:00:00.182273192 +870 69 0 days 00:00:00.238328756 +870 70 0 days 00:00:00.200912766 +870 71 0 days 00:00:00.135609355 +870 72 0 days 00:00:00.266947361 +870 73 0 days 00:00:00.185380296 +870 74 0 days 00:00:00.232477284 +870 75 0 days 00:00:00.183429863 +870 76 0 days 00:00:00.160469985 +870 77 0 days 00:00:00.141635322 +870 78 0 days 00:00:00.117725180 +870 79 0 days 00:00:00.229624212 +870 80 0 days 00:00:00.151372660 +870 81 0 days 00:00:00.140433668 +870 82 0 days 00:00:00.261637041 +870 83 0 days 00:00:00.128052448 +870 84 0 days 00:00:00.146818897 +870 85 0 days 00:00:00.140936280 +870 86 0 days 00:00:00.186060276 +870 87 0 days 00:00:00.137437160 +870 88 0 days 00:00:00.184381264 +870 89 0 days 00:00:00.174842951 +870 91 0 days 00:00:00.166538385 +870 92 0 days 00:00:00.127611350 +870 94 0 days 00:00:00.135839348 +870 95 0 days 00:00:00.114793373 +870 96 0 days 00:00:00.216983540 +870 97 0 days 00:00:00.197084986 +870 98 0 days 00:00:00.107891320 +870 99 0 days 00:00:00.236401944 +870 100 0 days 00:00:00.219920720 +871 1 0 days 00:00:00.101553916 +871 2 0 days 00:00:00.122462590 +871 3 0 days 00:00:00.128238320 +871 4 0 days 00:00:00.070431428 +871 5 0 days 00:00:00.074801873 +871 6 0 days 00:00:00.078876753 +871 7 0 days 00:00:00.083248255 +871 8 0 days 00:00:00.121560580 +871 9 0 days 00:00:00.067864090 +871 10 0 days 00:00:00.076941996 +871 11 0 days 00:00:00.072030823 +871 12 0 days 00:00:00.093792606 +871 13 0 days 00:00:00.078204622 +871 14 0 days 00:00:00.138876460 +871 15 0 days 00:00:00.137522616 +871 16 0 days 00:00:00.074503846 +871 17 0 days 00:00:00.078345025 +871 18 0 days 00:00:00.076413760 +871 19 0 days 00:00:00.147367315 +871 20 0 days 00:00:00.068152505 +871 21 0 days 00:00:00.130402440 +871 22 0 days 00:00:00.130561124 +871 23 0 days 00:00:00.068022790 +871 24 0 days 00:00:00.083776905 +871 25 0 days 00:00:00.078427775 +871 26 0 days 00:00:00.098328720 +871 27 0 days 00:00:00.063774095 +871 28 0 days 00:00:00.081699070 +871 29 0 days 00:00:00.118892310 +871 30 0 days 00:00:00.070491916 +871 31 0 days 00:00:00.131647455 +871 33 0 days 00:00:00.099479468 +871 34 0 days 00:00:00.149235750 +871 35 0 days 00:00:00.150453416 +871 36 0 days 00:00:00.078062250 +871 37 0 days 00:00:00.128131135 +871 38 0 days 00:00:00.102675900 +871 39 0 days 00:00:00.090144335 +871 40 0 days 00:00:00.073068760 +871 41 0 days 00:00:00.083382948 +871 42 0 days 00:00:00.145175044 +871 43 0 days 00:00:00.085088105 +871 44 0 days 00:00:00.069061010 +871 45 0 days 00:00:00.095878436 +871 46 0 days 00:00:00.075015960 +871 47 0 days 00:00:00.123562170 +871 48 0 days 00:00:00.077695056 +871 49 0 days 00:00:00.103854707 +871 50 0 days 00:00:00.150572624 +871 51 0 days 00:00:00.088873240 +871 52 0 days 00:00:00.079690512 +871 53 0 days 00:00:00.069335275 +871 54 0 days 00:00:00.061528120 +871 55 0 days 00:00:00.144912144 +871 57 0 days 00:00:00.090057126 +871 58 0 days 00:00:00.152118508 +871 59 0 days 00:00:00.093846185 +871 60 0 days 00:00:00.074936420 +871 61 0 days 00:00:00.071172060 +871 62 0 days 00:00:00.083527146 +871 64 0 days 00:00:00.125992460 +871 65 0 days 00:00:00.071663044 +871 66 0 days 00:00:00.105034494 +871 67 0 days 00:00:00.087460213 +871 68 0 days 00:00:00.149709462 +871 69 0 days 00:00:00.067892650 +871 70 0 days 00:00:00.090254310 +871 71 0 days 00:00:00.123316985 +871 72 0 days 00:00:00.070007390 +871 73 0 days 00:00:00.078838168 +871 74 0 days 00:00:00.129844360 +871 75 0 days 00:00:00.086856645 +871 76 0 days 00:00:00.077580195 +871 77 0 days 00:00:00.082561556 +871 78 0 days 00:00:00.069094930 +871 79 0 days 00:00:00.078857105 +871 80 0 days 00:00:00.133542996 +871 82 0 days 00:00:00.121150515 +871 83 0 days 00:00:00.071938664 +871 84 0 days 00:00:00.099813282 +871 85 0 days 00:00:00.083505460 +871 86 0 days 00:00:00.102760122 +871 87 0 days 00:00:00.071675572 +871 88 0 days 00:00:00.078956876 +871 89 0 days 00:00:00.102594923 +871 90 0 days 00:00:00.079834980 +871 91 0 days 00:00:00.076583496 +871 92 0 days 00:00:00.143870668 +871 93 0 days 00:00:00.078972526 +871 94 0 days 00:00:00.084211290 +871 96 0 days 00:00:00.090160116 +871 97 0 days 00:00:00.136084064 +871 98 0 days 00:00:00.154522320 +871 99 0 days 00:00:00.145798915 +871 100 0 days 00:00:00.080941587 +872 1 0 days 00:00:00.092498505 +872 2 0 days 00:00:00.148189944 +872 3 0 days 00:00:00.071617285 +872 4 0 days 00:00:00.126741720 +872 5 0 days 00:00:00.138380023 +872 6 0 days 00:00:00.126175805 +872 7 0 days 00:00:00.081969230 +872 8 0 days 00:00:00.106428179 +872 9 0 days 00:00:00.123945230 +872 10 0 days 00:00:00.134081316 +872 11 0 days 00:00:00.125286790 +872 12 0 days 00:00:00.088437184 +872 13 0 days 00:00:00.146147465 +872 14 0 days 00:00:00.136751800 +872 15 0 days 00:00:00.067682070 +872 16 0 days 00:00:00.072622656 +872 17 0 days 00:00:00.085494753 +872 18 0 days 00:00:00.103689154 +872 19 0 days 00:00:00.132453884 +872 20 0 days 00:00:00.131057592 +872 21 0 days 00:00:00.082249515 +872 22 0 days 00:00:00.066571305 +872 23 0 days 00:00:00.067608570 +872 24 0 days 00:00:00.128258772 +872 25 0 days 00:00:00.099201450 +872 26 0 days 00:00:00.102625180 +872 27 0 days 00:00:00.116860333 +872 28 0 days 00:00:00.138058660 +872 29 0 days 00:00:00.071191815 +872 30 0 days 00:00:00.080743700 +872 31 0 days 00:00:00.096638985 +872 32 0 days 00:00:00.082974084 +872 33 0 days 00:00:00.097227980 +872 34 0 days 00:00:00.076484034 +872 35 0 days 00:00:00.085000090 +872 37 0 days 00:00:00.126321990 +872 38 0 days 00:00:00.098190820 +872 39 0 days 00:00:00.084233005 +872 40 0 days 00:00:00.061416966 +872 41 0 days 00:00:00.075475113 +872 42 0 days 00:00:00.133098953 +872 43 0 days 00:00:00.147394420 +872 44 0 days 00:00:00.073052370 +872 45 0 days 00:00:00.077100608 +872 46 0 days 00:00:00.092231245 +872 47 0 days 00:00:00.073794993 +872 48 0 days 00:00:00.127802795 +872 49 0 days 00:00:00.066585600 +872 50 0 days 00:00:00.082108465 +872 51 0 days 00:00:00.066504905 +872 52 0 days 00:00:00.145398033 +872 53 0 days 00:00:00.087391995 +872 54 0 days 00:00:00.098842960 +872 55 0 days 00:00:00.099369448 +872 56 0 days 00:00:00.080227375 +872 57 0 days 00:00:00.086211372 +872 58 0 days 00:00:00.092746822 +872 59 0 days 00:00:00.135459611 +872 60 0 days 00:00:00.115109880 +872 61 0 days 00:00:00.072126246 +872 62 0 days 00:00:00.069406563 +872 63 0 days 00:00:00.129176150 +872 64 0 days 00:00:00.087679733 +872 65 0 days 00:00:00.078625610 +872 66 0 days 00:00:00.084262352 +872 67 0 days 00:00:00.142944092 +872 68 0 days 00:00:00.076003511 +872 69 0 days 00:00:00.115869600 +872 70 0 days 00:00:00.079091340 +872 71 0 days 00:00:00.057868206 +872 72 0 days 00:00:00.116374020 +872 73 0 days 00:00:00.114681650 +872 74 0 days 00:00:00.079804410 +872 75 0 days 00:00:00.058860300 +872 76 0 days 00:00:00.082347584 +872 77 0 days 00:00:00.141016067 +872 78 0 days 00:00:00.136821434 +872 79 0 days 00:00:00.072344680 +872 80 0 days 00:00:00.073725046 +872 81 0 days 00:00:00.101189473 +872 82 0 days 00:00:00.137559484 +872 83 0 days 00:00:00.071353282 +872 84 0 days 00:00:00.070900625 +872 85 0 days 00:00:00.091644132 +872 86 0 days 00:00:00.068098652 +872 87 0 days 00:00:00.066351552 +872 88 0 days 00:00:00.066401204 +872 89 0 days 00:00:00.067150112 +872 90 0 days 00:00:00.064249100 +872 91 0 days 00:00:00.081866440 +872 92 0 days 00:00:00.068245420 +872 93 0 days 00:00:00.071064446 +872 94 0 days 00:00:00.114703840 +872 95 0 days 00:00:00.120635380 +872 96 0 days 00:00:00.091297510 +872 97 0 days 00:00:00.072182073 +872 98 0 days 00:00:00.085460710 +872 99 0 days 00:00:00.132829127 +872 100 0 days 00:00:00.073044924 +873 1 0 days 00:00:00.159617965 +873 2 0 days 00:00:00.280764356 +873 3 0 days 00:00:00.134002590 +873 4 0 days 00:00:00.160449350 +873 5 0 days 00:00:00.188446118 +873 6 0 days 00:00:00.281996802 +873 7 0 days 00:00:00.139100163 +873 8 0 days 00:00:00.274032294 +873 9 0 days 00:00:00.125422065 +873 10 0 days 00:00:00.250047696 +873 11 0 days 00:00:00.273849432 +873 12 0 days 00:00:00.177468443 +873 13 0 days 00:00:00.151107750 +873 14 0 days 00:00:00.190228966 +873 15 0 days 00:00:00.256956573 +873 16 0 days 00:00:00.254282113 +873 17 0 days 00:00:00.272014608 +873 18 0 days 00:00:00.128265165 +873 19 0 days 00:00:00.179706302 +873 20 0 days 00:00:00.242936768 +873 21 0 days 00:00:00.188453631 +873 22 0 days 00:00:00.148297635 +873 23 0 days 00:00:00.266782595 +873 24 0 days 00:00:00.152316076 +873 25 0 days 00:00:00.147967497 +873 26 0 days 00:00:00.126587415 +873 27 0 days 00:00:00.243610972 +873 28 0 days 00:00:00.194062640 +873 29 0 days 00:00:00.228133420 +873 30 0 days 00:00:00.127649855 +873 31 0 days 00:00:00.154908449 +873 32 0 days 00:00:00.171385684 +873 33 0 days 00:00:00.241137700 +873 34 0 days 00:00:00.127213785 +873 35 0 days 00:00:00.276343207 +873 36 0 days 00:00:00.175029326 +873 37 0 days 00:00:00.232917405 +873 38 0 days 00:00:00.181288730 +873 39 0 days 00:00:00.136925145 +873 40 0 days 00:00:00.125295000 +873 41 0 days 00:00:00.144406795 +873 42 0 days 00:00:00.249837845 +873 43 0 days 00:00:00.220002295 +873 45 0 days 00:00:00.179074940 +873 46 0 days 00:00:00.273061947 +873 47 0 days 00:00:00.142626980 +873 48 0 days 00:00:00.179706550 +873 49 0 days 00:00:00.144798069 +873 50 0 days 00:00:00.245091486 +873 51 0 days 00:00:00.157720555 +873 52 0 days 00:00:00.170306084 +873 53 0 days 00:00:00.155864260 +873 54 0 days 00:00:00.251642503 +873 55 0 days 00:00:00.133375460 +873 56 0 days 00:00:00.121155140 +873 57 0 days 00:00:00.253371305 +873 58 0 days 00:00:00.163229645 +873 59 0 days 00:00:00.245389044 +873 60 0 days 00:00:00.227572285 +873 61 0 days 00:00:00.143777502 +873 62 0 days 00:00:00.138732810 +873 63 0 days 00:00:00.145399437 +873 64 0 days 00:00:00.142351043 +873 65 0 days 00:00:00.163311240 +873 66 0 days 00:00:00.130902170 +873 67 0 days 00:00:00.154126850 +873 68 0 days 00:00:00.275550304 +873 69 0 days 00:00:00.254272060 +873 70 0 days 00:00:00.230601105 +873 71 0 days 00:00:00.171978793 +873 72 0 days 00:00:00.162076870 +873 73 0 days 00:00:00.182615174 +873 74 0 days 00:00:00.181361017 +873 75 0 days 00:00:00.277707860 +873 76 0 days 00:00:00.176845394 +873 77 0 days 00:00:00.156982555 +873 78 0 days 00:00:00.124948225 +873 79 0 days 00:00:00.259725717 +873 80 0 days 00:00:00.223038815 +873 81 0 days 00:00:00.276934690 +873 82 0 days 00:00:00.136058603 +873 83 0 days 00:00:00.268670870 +873 84 0 days 00:00:00.184997405 +873 85 0 days 00:00:00.150147725 +873 86 0 days 00:00:00.169574836 +873 87 0 days 00:00:00.158492145 +873 88 0 days 00:00:00.263775044 +873 89 0 days 00:00:00.228153955 +873 90 0 days 00:00:00.154966468 +873 91 0 days 00:00:00.282660153 +873 92 0 days 00:00:00.192306853 +873 93 0 days 00:00:00.281943566 +873 94 0 days 00:00:00.271854746 +873 95 0 days 00:00:00.187888073 +873 96 0 days 00:00:00.143073822 +873 97 0 days 00:00:00.280624075 +873 98 0 days 00:00:00.230808140 +873 99 0 days 00:00:00.152907971 +873 100 0 days 00:00:00.128079170 +874 2 0 days 00:00:00.145327448 +874 3 0 days 00:00:00.099696086 +874 6 0 days 00:00:00.091663712 +874 7 0 days 00:00:00.091005092 +874 8 0 days 00:00:00.083933795 +874 9 0 days 00:00:00.125148325 +874 10 0 days 00:00:00.094813290 +874 11 0 days 00:00:00.126311890 +874 12 0 days 00:00:00.087980840 +874 13 0 days 00:00:00.074603248 +874 14 0 days 00:00:00.081205040 +874 15 0 days 00:00:00.099808953 +874 16 0 days 00:00:00.079266422 +874 17 0 days 00:00:00.090850828 +874 18 0 days 00:00:00.080723291 +874 19 0 days 00:00:00.086034650 +874 20 0 days 00:00:00.100431536 +874 21 0 days 00:00:00.079401482 +874 22 0 days 00:00:00.151529437 +874 23 0 days 00:00:00.079191895 +874 24 0 days 00:00:00.085925800 +874 25 0 days 00:00:00.100851162 +874 26 0 days 00:00:00.081686763 +874 27 0 days 00:00:00.147181263 +874 28 0 days 00:00:00.132046252 +874 29 0 days 00:00:00.086290750 +874 30 0 days 00:00:00.144367320 +874 31 0 days 00:00:00.101609534 +874 32 0 days 00:00:00.151207382 +874 33 0 days 00:00:00.098564622 +874 34 0 days 00:00:00.126310495 +874 35 0 days 00:00:00.080604392 +874 36 0 days 00:00:00.073600200 +874 37 0 days 00:00:00.095697856 +874 38 0 days 00:00:00.131681024 +874 39 0 days 00:00:00.124443730 +874 40 0 days 00:00:00.093574610 +874 41 0 days 00:00:00.075791743 +874 42 0 days 00:00:00.077956965 +874 43 0 days 00:00:00.125809270 +874 44 0 days 00:00:00.132189616 +874 45 0 days 00:00:00.078895908 +874 46 0 days 00:00:00.080428568 +874 47 0 days 00:00:00.098542687 +874 48 0 days 00:00:00.125153810 +874 49 0 days 00:00:00.143294950 +874 50 0 days 00:00:00.132074828 +874 51 0 days 00:00:00.072595748 +874 52 0 days 00:00:00.083145249 +874 53 0 days 00:00:00.143971535 +874 54 0 days 00:00:00.145685673 +874 55 0 days 00:00:00.082003258 +874 56 0 days 00:00:00.140206688 +874 57 0 days 00:00:00.125615535 +874 58 0 days 00:00:00.086849510 +874 59 0 days 00:00:00.147951468 +874 60 0 days 00:00:00.145386122 +874 61 0 days 00:00:00.096559177 +874 62 0 days 00:00:00.091499344 +874 63 0 days 00:00:00.082939286 +874 64 0 days 00:00:00.136781040 +874 65 0 days 00:00:00.083721415 +874 66 0 days 00:00:00.080119975 +874 67 0 days 00:00:00.076201623 +874 68 0 days 00:00:00.086663265 +874 69 0 days 00:00:00.142042695 +874 70 0 days 00:00:00.083426651 +874 71 0 days 00:00:00.145760088 +874 72 0 days 00:00:00.091833664 +874 73 0 days 00:00:00.069864330 +874 74 0 days 00:00:00.074492680 +874 75 0 days 00:00:00.103632064 +874 76 0 days 00:00:00.139399882 +874 77 0 days 00:00:00.083286302 +874 78 0 days 00:00:00.133329060 +874 79 0 days 00:00:00.125214725 +874 80 0 days 00:00:00.098138894 +874 81 0 days 00:00:00.077714565 +874 82 0 days 00:00:00.132096992 +874 84 0 days 00:00:00.087248180 +874 86 0 days 00:00:00.103158202 +874 87 0 days 00:00:00.076595595 +874 88 0 days 00:00:00.099264850 +874 89 0 days 00:00:00.147868467 +874 90 0 days 00:00:00.074667590 +874 91 0 days 00:00:00.078439222 +874 92 0 days 00:00:00.077084727 +874 93 0 days 00:00:00.129692912 +874 94 0 days 00:00:00.132825666 +874 95 0 days 00:00:00.089838528 +874 96 0 days 00:00:00.073925076 +874 97 0 days 00:00:00.098181932 +874 98 0 days 00:00:00.099706756 +874 99 0 days 00:00:00.079728598 +874 100 0 days 00:00:00.091824923 +875 1 0 days 00:00:00.424977910 +875 2 0 days 00:00:01.338819230 +875 3 0 days 00:00:00.420946176 +875 4 0 days 00:00:01.449258965 +875 5 0 days 00:00:01.376291730 +875 6 0 days 00:00:01.386918644 +875 7 0 days 00:00:00.708846517 +875 8 0 days 00:00:00.767411124 +875 9 0 days 00:00:00.790306576 +875 10 0 days 00:00:00.401013375 +875 11 0 days 00:00:00.459860704 +875 12 0 days 00:00:01.351960863 +875 14 0 days 00:00:01.265828320 +875 15 0 days 00:00:00.825025304 +875 16 0 days 00:00:00.659307375 +875 17 0 days 00:00:00.578470268 +875 18 0 days 00:00:01.315125296 +875 19 0 days 00:00:00.523971957 +875 20 0 days 00:00:01.510815795 +875 21 0 days 00:00:00.376178375 +875 22 0 days 00:00:00.613604433 +875 23 0 days 00:00:00.564330460 +875 24 0 days 00:00:00.450038585 +875 25 0 days 00:00:00.462330527 +875 26 0 days 00:00:01.276104792 +875 27 0 days 00:00:01.387240100 +875 28 0 days 00:00:00.473982836 +875 29 0 days 00:00:01.175435100 +875 30 0 days 00:00:00.756874810 +875 31 0 days 00:00:00.480450005 +875 32 0 days 00:00:00.454197954 +875 33 0 days 00:00:01.561812246 +875 34 0 days 00:00:00.416893480 +875 35 0 days 00:00:00.714224875 +875 36 0 days 00:00:00.691195265 +875 37 0 days 00:00:01.516536350 +875 38 0 days 00:00:01.671074070 +875 39 0 days 00:00:01.402266654 +875 40 0 days 00:00:00.418098520 +875 41 0 days 00:00:00.710221484 +875 42 0 days 00:00:01.319831335 +875 43 0 days 00:00:00.821293910 +875 44 0 days 00:00:00.610841180 +875 45 0 days 00:00:00.795919426 +875 46 0 days 00:00:00.717255405 +875 47 0 days 00:00:01.450123525 +875 48 0 days 00:00:00.760188853 +875 49 0 days 00:00:01.670146820 +875 50 0 days 00:00:00.414890300 +875 51 0 days 00:00:01.374165790 +875 52 0 days 00:00:01.226161906 +875 53 0 days 00:00:00.440168700 +875 54 0 days 00:00:01.638428800 +875 55 0 days 00:00:01.280673810 +875 56 0 days 00:00:00.556229932 +875 57 0 days 00:00:01.493922736 +875 58 0 days 00:00:00.878763745 +875 59 0 days 00:00:00.799438825 +875 60 0 days 00:00:00.404815324 +875 61 0 days 00:00:00.383758090 +875 62 0 days 00:00:00.542228926 +875 63 0 days 00:00:00.523903274 +875 64 0 days 00:00:01.409443386 +875 65 0 days 00:00:01.349176720 +875 66 0 days 00:00:00.746437380 +875 67 0 days 00:00:00.556918185 +875 68 0 days 00:00:00.762680453 +875 69 0 days 00:00:00.691719780 +875 70 0 days 00:00:00.965777988 +875 71 0 days 00:00:00.754229403 +875 72 0 days 00:00:01.540466262 +875 73 0 days 00:00:01.344033130 +875 74 0 days 00:00:01.440179190 +875 75 0 days 00:00:00.374110085 +875 76 0 days 00:00:01.469738033 +875 77 0 days 00:00:01.276442860 +875 78 0 days 00:00:00.844037314 +875 80 0 days 00:00:00.558804135 +875 81 0 days 00:00:00.510481828 +875 82 0 days 00:00:00.426470160 +875 83 0 days 00:00:00.474818526 +875 84 0 days 00:00:00.832146397 +875 85 0 days 00:00:01.196188746 +875 86 0 days 00:00:01.433185143 +875 87 0 days 00:00:00.464882220 +875 89 0 days 00:00:00.477196026 +875 90 0 days 00:00:01.113287906 +875 91 0 days 00:00:01.112468673 +875 92 0 days 00:00:00.842909853 +875 93 0 days 00:00:01.027714124 +875 94 0 days 00:00:00.869315290 +875 95 0 days 00:00:00.403832985 +875 96 0 days 00:00:01.580896924 +875 97 0 days 00:00:00.494296303 +875 98 0 days 00:00:01.422895512 +875 100 0 days 00:00:00.724769288 +876 1 0 days 00:00:01.099282257 +876 2 0 days 00:00:00.493224263 +876 3 0 days 00:00:00.849716408 +876 4 0 days 00:00:00.696354750 +876 5 0 days 00:00:01.542294450 +876 6 0 days 00:00:01.516365316 +876 7 0 days 00:00:01.483411010 +876 8 0 days 00:00:00.973858155 +876 9 0 days 00:00:00.603987529 +876 10 0 days 00:00:00.846429310 +876 11 0 days 00:00:00.486263151 +876 12 0 days 00:00:01.645989378 +876 13 0 days 00:00:00.998392783 +876 14 0 days 00:00:01.488579332 +876 15 0 days 00:00:01.515203324 +876 16 0 days 00:00:00.987647423 +876 17 0 days 00:00:01.670026063 +876 18 0 days 00:00:00.824951562 +876 19 0 days 00:00:00.462589910 +876 20 0 days 00:00:01.354639230 +876 21 0 days 00:00:00.766726872 +876 22 0 days 00:00:00.367282806 +876 23 0 days 00:00:01.236899193 +876 24 0 days 00:00:00.474180350 +876 25 0 days 00:00:01.620468948 +876 26 0 days 00:00:00.557262925 +876 27 0 days 00:00:00.736986771 +876 28 0 days 00:00:01.015947184 +876 29 0 days 00:00:00.443248590 +876 30 0 days 00:00:00.801145225 +876 31 0 days 00:00:00.760187140 +876 32 0 days 00:00:00.512337486 +876 33 0 days 00:00:00.499297151 +876 34 0 days 00:00:00.817688455 +876 35 0 days 00:00:00.737549660 +876 36 0 days 00:00:01.156125293 +876 37 0 days 00:00:00.555972492 +876 38 0 days 00:00:01.437523556 +876 39 0 days 00:00:00.434786904 +876 40 0 days 00:00:01.462916988 +876 41 0 days 00:00:00.894607344 +876 42 0 days 00:00:01.429260095 +876 43 0 days 00:00:00.503461592 +876 44 0 days 00:00:00.936791604 +876 45 0 days 00:00:01.383421950 +876 46 0 days 00:00:00.512048551 +876 47 0 days 00:00:00.825996590 +876 48 0 days 00:00:01.488316470 +876 49 0 days 00:00:00.514710030 +876 50 0 days 00:00:01.498120548 +876 51 0 days 00:00:00.995122084 +876 52 0 days 00:00:00.420882700 +876 53 0 days 00:00:01.212666186 +876 54 0 days 00:00:00.681963133 +876 55 0 days 00:00:00.889991300 +876 56 0 days 00:00:00.518102170 +876 57 0 days 00:00:00.449189720 +876 58 0 days 00:00:00.706206850 +876 59 0 days 00:00:00.807707916 +876 60 0 days 00:00:00.521982375 +876 61 0 days 00:00:01.677809807 +876 62 0 days 00:00:01.296058753 +876 63 0 days 00:00:01.049058634 +876 64 0 days 00:00:00.543771678 +876 65 0 days 00:00:01.559136100 +876 66 0 days 00:00:00.826131198 +876 67 0 days 00:00:00.943168860 +876 68 0 days 00:00:01.721257354 +876 69 0 days 00:00:00.526104765 +876 70 0 days 00:00:00.942505389 +876 71 0 days 00:00:01.405612285 +876 72 0 days 00:00:00.555994565 +876 73 0 days 00:00:00.724447120 +876 74 0 days 00:00:00.653647112 +876 75 0 days 00:00:00.801158617 +876 76 0 days 00:00:01.361894200 +876 77 0 days 00:00:01.567724710 +876 78 0 days 00:00:00.692788660 +876 79 0 days 00:00:00.831710680 +876 80 0 days 00:00:00.459790925 +876 81 0 days 00:00:00.395916900 +876 82 0 days 00:00:00.794184293 +876 83 0 days 00:00:00.856267696 +876 84 0 days 00:00:00.471879791 +876 85 0 days 00:00:01.498205097 +876 86 0 days 00:00:01.278422626 +876 87 0 days 00:00:00.521090051 +876 88 0 days 00:00:00.514520020 +876 89 0 days 00:00:00.471093986 +876 90 0 days 00:00:00.415948795 +876 91 0 days 00:00:01.319181030 +876 92 0 days 00:00:01.497822334 +876 93 0 days 00:00:00.875820449 +876 94 0 days 00:00:00.415461085 +876 95 0 days 00:00:00.467135570 +876 96 0 days 00:00:00.750701435 +876 97 0 days 00:00:01.424911524 +876 98 0 days 00:00:00.410642840 +876 99 0 days 00:00:00.407562560 +876 100 0 days 00:00:01.656921878 +877 1 0 days 00:00:00.899142672 +877 2 0 days 00:00:00.660021215 +877 3 0 days 00:00:00.838421521 +877 4 0 days 00:00:00.289184805 +877 5 0 days 00:00:00.348357405 +877 6 0 days 00:00:00.386583443 +877 7 0 days 00:00:00.324307532 +877 8 0 days 00:00:00.389172765 +877 9 0 days 00:00:00.352545830 +877 10 0 days 00:00:00.839543657 +877 11 0 days 00:00:00.383996286 +877 12 0 days 00:00:00.369659830 +877 13 0 days 00:00:00.227175820 +877 14 0 days 00:00:00.237863983 +877 15 0 days 00:00:00.836735552 +877 16 0 days 00:00:00.356095900 +877 17 0 days 00:00:00.403163390 +877 18 0 days 00:00:00.376603684 +877 19 0 days 00:00:00.707588404 +877 20 0 days 00:00:00.524571096 +877 21 0 days 00:00:00.241206291 +877 22 0 days 00:00:00.732262240 +877 23 0 days 00:00:00.293893690 +877 24 0 days 00:00:00.756149140 +877 25 0 days 00:00:00.326285395 +877 26 0 days 00:00:00.428635492 +877 27 0 days 00:00:00.292685531 +877 28 0 days 00:00:00.267072747 +877 29 0 days 00:00:00.435084991 +877 30 0 days 00:00:00.394697312 +877 31 0 days 00:00:00.362151120 +877 32 0 days 00:00:00.439105776 +877 33 0 days 00:00:00.764121536 +877 34 0 days 00:00:00.428085744 +877 35 0 days 00:00:00.648138600 +877 36 0 days 00:00:00.244625390 +877 37 0 days 00:00:00.664250680 +877 38 0 days 00:00:00.403303945 +877 39 0 days 00:00:00.704625320 +877 40 0 days 00:00:00.646539420 +877 41 0 days 00:00:00.469399218 +877 42 0 days 00:00:00.420253676 +877 43 0 days 00:00:00.668918275 +877 44 0 days 00:00:00.430861698 +877 46 0 days 00:00:00.786587790 +877 47 0 days 00:00:00.254760394 +877 48 0 days 00:00:00.293620184 +877 49 0 days 00:00:00.365435585 +877 50 0 days 00:00:00.775411010 +877 51 0 days 00:00:00.729002782 +877 52 0 days 00:00:00.200067990 +877 53 0 days 00:00:00.348940275 +877 54 0 days 00:00:00.826369507 +877 55 0 days 00:00:00.352980837 +877 56 0 days 00:00:00.269631484 +877 57 0 days 00:00:00.830627475 +877 58 0 days 00:00:00.470399500 +877 59 0 days 00:00:00.649430373 +877 60 0 days 00:00:00.284056995 +877 61 0 days 00:00:00.414346855 +877 62 0 days 00:00:00.213511946 +877 63 0 days 00:00:00.216425068 +877 64 0 days 00:00:00.701099285 +877 65 0 days 00:00:00.408207765 +877 66 0 days 00:00:00.650072665 +877 67 0 days 00:00:00.454867350 +877 68 0 days 00:00:00.444659957 +877 69 0 days 00:00:00.406337105 +877 70 0 days 00:00:00.431638790 +877 71 0 days 00:00:00.412346640 +877 72 0 days 00:00:00.254879051 +877 73 0 days 00:00:00.698519340 +877 74 0 days 00:00:00.226341110 +877 75 0 days 00:00:00.442735896 +877 76 0 days 00:00:00.310069320 +877 77 0 days 00:00:00.200990825 +877 78 0 days 00:00:00.287330098 +877 79 0 days 00:00:00.387272595 +877 80 0 days 00:00:00.762851565 +877 81 0 days 00:00:00.820636045 +877 83 0 days 00:00:00.373891204 +877 84 0 days 00:00:00.491260615 +877 85 0 days 00:00:00.246646486 +877 86 0 days 00:00:00.597229473 +877 87 0 days 00:00:00.758707142 +877 88 0 days 00:00:00.351889000 +877 89 0 days 00:00:00.432296618 +877 90 0 days 00:00:00.282146482 +877 91 0 days 00:00:00.596480733 +877 92 0 days 00:00:00.441703482 +877 94 0 days 00:00:00.426442837 +877 95 0 days 00:00:00.428377594 +877 96 0 days 00:00:00.348198900 +877 97 0 days 00:00:00.781363580 +877 98 0 days 00:00:00.312774157 +877 99 0 days 00:00:00.421594192 +877 100 0 days 00:00:00.290482710 +878 1 0 days 00:00:00.363814950 +878 2 0 days 00:00:00.302817270 +878 3 0 days 00:00:00.249605523 +878 4 0 days 00:00:00.313168908 +878 5 0 days 00:00:00.200960665 +878 6 0 days 00:00:00.446853943 +878 7 0 days 00:00:00.856767358 +878 8 0 days 00:00:00.361647470 +878 9 0 days 00:00:00.728749465 +878 10 0 days 00:00:00.736479975 +878 11 0 days 00:00:00.824831657 +878 12 0 days 00:00:00.419850296 +878 13 0 days 00:00:00.807898738 +878 14 0 days 00:00:00.379984035 +878 15 0 days 00:00:00.241632965 +878 16 0 days 00:00:00.727324324 +878 17 0 days 00:00:00.681195460 +878 18 0 days 00:00:00.239060286 +878 19 0 days 00:00:00.682819025 +878 20 0 days 00:00:00.536025647 +878 21 0 days 00:00:00.198445835 +878 22 0 days 00:00:00.361169056 +878 23 0 days 00:00:00.789723648 +878 24 0 days 00:00:00.429951244 +878 25 0 days 00:00:00.317012022 +878 26 0 days 00:00:00.310304198 +878 27 0 days 00:00:00.467653491 +878 28 0 days 00:00:00.224805212 +878 29 0 days 00:00:00.332103130 +878 30 0 days 00:00:00.512550854 +878 31 0 days 00:00:00.822589034 +878 32 0 days 00:00:00.246344005 +878 33 0 days 00:00:00.665932620 +878 34 0 days 00:00:00.468626681 +878 35 0 days 00:00:00.454982251 +878 36 0 days 00:00:00.359094695 +878 37 0 days 00:00:00.247597100 +878 38 0 days 00:00:00.388424472 +878 39 0 days 00:00:00.801953680 +878 40 0 days 00:00:00.360871845 +878 42 0 days 00:00:00.811752610 +878 43 0 days 00:00:00.818587629 +878 44 0 days 00:00:00.252202137 +878 45 0 days 00:00:00.674400665 +878 46 0 days 00:00:00.273432762 +878 47 0 days 00:00:00.239324400 +878 48 0 days 00:00:00.257143790 +878 49 0 days 00:00:00.398940450 +878 50 0 days 00:00:00.453823110 +878 51 0 days 00:00:00.354759895 +878 52 0 days 00:00:00.390445432 +878 53 0 days 00:00:00.229427412 +878 54 0 days 00:00:00.171033915 +878 55 0 days 00:00:00.335750920 +878 56 0 days 00:00:00.321924935 +878 57 0 days 00:00:00.554336440 +878 58 0 days 00:00:00.224037390 +878 59 0 days 00:00:00.262478900 +878 61 0 days 00:00:00.363345547 +878 62 0 days 00:00:00.282427035 +878 64 0 days 00:00:00.304569458 +878 65 0 days 00:00:00.662135628 +878 66 0 days 00:00:00.239481577 +878 67 0 days 00:00:00.211095216 +878 68 0 days 00:00:00.217771585 +878 69 0 days 00:00:00.556552780 +878 70 0 days 00:00:00.238299892 +878 72 0 days 00:00:00.306924955 +878 73 0 days 00:00:00.536044352 +878 74 0 days 00:00:00.485634892 +878 75 0 days 00:00:00.161842015 +878 76 0 days 00:00:00.191069597 +878 77 0 days 00:00:00.213178328 +878 78 0 days 00:00:00.314767391 +878 79 0 days 00:00:00.532421376 +878 80 0 days 00:00:00.324676378 +878 81 0 days 00:00:00.311247965 +878 82 0 days 00:00:00.306355116 +878 83 0 days 00:00:00.171712143 +878 84 0 days 00:00:00.593496801 +878 85 0 days 00:00:00.622225994 +878 86 0 days 00:00:00.253613680 +878 87 0 days 00:00:00.479316195 +878 88 0 days 00:00:00.607145426 +878 89 0 days 00:00:00.191186644 +878 90 0 days 00:00:00.267115485 +878 91 0 days 00:00:00.155153725 +878 92 0 days 00:00:00.437954730 +878 93 0 days 00:00:00.405329497 +878 94 0 days 00:00:00.552341348 +878 95 0 days 00:00:00.177927380 +878 96 0 days 00:00:00.203429958 +878 97 0 days 00:00:00.279654080 +878 98 0 days 00:00:00.429288733 +878 99 0 days 00:00:00.295271053 +878 100 0 days 00:00:00.198257845 +879 1 0 days 00:00:00.789245388 +879 2 0 days 00:00:00.788841517 +879 3 0 days 00:00:01.362196004 +879 4 0 days 00:00:01.582164622 +879 5 0 days 00:00:01.241219290 +879 6 0 days 00:00:00.470137580 +879 7 0 days 00:00:00.377536248 +879 8 0 days 00:00:00.653259500 +879 9 0 days 00:00:00.636592855 +879 10 0 days 00:00:01.280977275 +879 11 0 days 00:00:01.565607253 +879 12 0 days 00:00:00.690795160 +879 13 0 days 00:00:00.434607690 +879 14 0 days 00:00:00.643796484 +879 15 0 days 00:00:01.559089424 +879 16 0 days 00:00:00.526300650 +879 17 0 days 00:00:01.041401434 +879 18 0 days 00:00:01.267577450 +879 19 0 days 00:00:01.297534468 +879 20 0 days 00:00:01.344935320 +879 21 0 days 00:00:01.542239945 +879 22 0 days 00:00:00.970863147 +879 23 0 days 00:00:01.210176610 +879 24 0 days 00:00:01.440937112 +879 25 0 days 00:00:01.291678790 +879 26 0 days 00:00:00.775310164 +879 27 0 days 00:00:00.530754710 +879 28 0 days 00:00:00.814510650 +879 29 0 days 00:00:00.463875460 +879 30 0 days 00:00:01.290789665 +879 31 0 days 00:00:01.280104620 +879 32 0 days 00:00:00.805292923 +879 33 0 days 00:00:01.420391816 +879 34 0 days 00:00:00.450509680 +879 35 0 days 00:00:01.556416074 +879 36 0 days 00:00:01.420118490 +879 37 0 days 00:00:00.939526228 +879 38 0 days 00:00:00.506356828 +879 39 0 days 00:00:01.095000353 +879 40 0 days 00:00:00.770415480 +879 41 0 days 00:00:01.284786135 +879 42 0 days 00:00:00.393980490 +879 43 0 days 00:00:01.378796373 +879 44 0 days 00:00:01.354659120 +879 45 0 days 00:00:00.374495755 +879 46 0 days 00:00:00.993264345 +879 47 0 days 00:00:00.524652897 +879 48 0 days 00:00:00.470169817 +879 49 0 days 00:00:01.363237064 +879 50 0 days 00:00:00.801542616 +879 51 0 days 00:00:00.469135865 +879 52 0 days 00:00:01.397494493 +879 53 0 days 00:00:00.379851340 +879 54 0 days 00:00:01.280952515 +879 55 0 days 00:00:00.547643470 +879 56 0 days 00:00:01.525146675 +879 57 0 days 00:00:00.996502644 +879 58 0 days 00:00:00.411868112 +879 59 0 days 00:00:00.968119316 +879 60 0 days 00:00:01.535100822 +879 61 0 days 00:00:01.410288870 +879 62 0 days 00:00:00.589008456 +879 63 0 days 00:00:00.765544190 +879 64 0 days 00:00:00.394744704 +879 65 0 days 00:00:00.407076244 +879 66 0 days 00:00:01.042317473 +879 67 0 days 00:00:00.806181152 +879 68 0 days 00:00:00.718559280 +879 69 0 days 00:00:01.592918005 +879 70 0 days 00:00:00.554414087 +879 71 0 days 00:00:00.767237360 +879 72 0 days 00:00:01.389275196 +879 73 0 days 00:00:00.484735821 +879 74 0 days 00:00:00.535923020 +879 75 0 days 00:00:00.453764648 +879 76 0 days 00:00:01.582100270 +879 77 0 days 00:00:00.925800276 +879 78 0 days 00:00:00.828711282 +879 79 0 days 00:00:00.347464535 +879 80 0 days 00:00:01.590321368 +879 81 0 days 00:00:00.859181657 +879 82 0 days 00:00:00.423976160 +879 83 0 days 00:00:00.670763160 +879 84 0 days 00:00:00.773027220 +879 85 0 days 00:00:00.744553005 +879 86 0 days 00:00:00.489835293 +879 87 0 days 00:00:01.368209880 +879 88 0 days 00:00:00.401929720 +879 89 0 days 00:00:01.692951891 +879 90 0 days 00:00:00.497410000 +879 91 0 days 00:00:01.529234022 +879 92 0 days 00:00:00.652384450 +879 93 0 days 00:00:00.554540232 +879 94 0 days 00:00:01.729780446 +879 95 0 days 00:00:00.889569365 +879 96 0 days 00:00:00.543146132 +879 97 0 days 00:00:00.445297495 +879 98 0 days 00:00:00.797882280 +879 99 0 days 00:00:00.822033717 +879 100 0 days 00:00:00.711698220 +880 1 0 days 00:00:00.247052980 +880 2 0 days 00:00:00.702217925 +880 3 0 days 00:00:00.712314456 +880 4 0 days 00:00:00.423746780 +880 5 0 days 00:00:00.234510653 +880 6 0 days 00:00:00.709320764 +880 7 0 days 00:00:00.918613562 +880 8 0 days 00:00:00.754604540 +880 9 0 days 00:00:00.761457805 +880 10 0 days 00:00:00.783632836 +880 11 0 days 00:00:00.685712912 +880 12 0 days 00:00:00.357309365 +880 13 0 days 00:00:00.337457375 +880 14 0 days 00:00:00.716844513 +880 15 0 days 00:00:00.428738848 +880 16 0 days 00:00:00.694903933 +880 17 0 days 00:00:00.243918320 +880 18 0 days 00:00:00.738914140 +880 19 0 days 00:00:00.448854115 +880 20 0 days 00:00:00.366875844 +880 21 0 days 00:00:00.291971240 +880 22 0 days 00:00:00.735920962 +880 23 0 days 00:00:00.431921648 +880 24 0 days 00:00:00.324031637 +880 25 0 days 00:00:00.742561280 +880 26 0 days 00:00:00.752742086 +880 27 0 days 00:00:00.722155372 +880 28 0 days 00:00:00.483685588 +880 29 0 days 00:00:00.326228932 +880 30 0 days 00:00:00.237475677 +880 31 0 days 00:00:00.754628668 +880 32 0 days 00:00:00.347135428 +880 33 0 days 00:00:00.399597994 +880 34 0 days 00:00:00.293170120 +880 35 0 days 00:00:00.444101396 +880 36 0 days 00:00:00.360600394 +880 37 0 days 00:00:00.227928835 +880 38 0 days 00:00:00.705992013 +880 39 0 days 00:00:00.356868600 +880 40 0 days 00:00:00.701107768 +880 41 0 days 00:00:00.405436228 +880 42 0 days 00:00:00.284286624 +880 43 0 days 00:00:00.256383621 +880 44 0 days 00:00:00.746840456 +880 45 0 days 00:00:00.425961166 +880 46 0 days 00:00:00.252835980 +880 47 0 days 00:00:00.732300836 +880 48 0 days 00:00:00.821648865 +880 49 0 days 00:00:00.207492540 +880 50 0 days 00:00:00.256519717 +880 51 0 days 00:00:00.248121088 +880 52 0 days 00:00:00.720728568 +880 53 0 days 00:00:00.387503065 +880 54 0 days 00:00:00.774599534 +880 55 0 days 00:00:00.238951111 +880 56 0 days 00:00:00.217301522 +880 57 0 days 00:00:00.156153392 +880 58 0 days 00:00:00.436440560 +880 59 0 days 00:00:00.216207688 +880 60 0 days 00:00:00.198232720 +880 61 0 days 00:00:00.426380053 +880 62 0 days 00:00:00.776652594 +880 63 0 days 00:00:00.382687892 +880 64 0 days 00:00:00.496508994 +880 65 0 days 00:00:00.503158214 +880 66 0 days 00:00:00.435856694 +880 67 0 days 00:00:00.814065517 +880 68 0 days 00:00:00.240444870 +880 69 0 days 00:00:00.768310700 +880 70 0 days 00:00:00.377175816 +880 71 0 days 00:00:00.393414840 +880 72 0 days 00:00:00.243285825 +880 73 0 days 00:00:00.846997172 +880 74 0 days 00:00:00.474706602 +880 75 0 days 00:00:00.470133800 +880 76 0 days 00:00:00.249317704 +880 77 0 days 00:00:00.737535802 +880 78 0 days 00:00:00.364186604 +880 79 0 days 00:00:00.368920788 +880 80 0 days 00:00:00.223086625 +880 81 0 days 00:00:00.213187284 +880 82 0 days 00:00:00.443835560 +880 83 0 days 00:00:00.412570988 +880 84 0 days 00:00:00.785713494 +880 85 0 days 00:00:00.469702832 +880 86 0 days 00:00:00.383296023 +880 87 0 days 00:00:00.374951964 +880 88 0 days 00:00:00.433420205 +880 89 0 days 00:00:00.408129042 +880 90 0 days 00:00:00.815750804 +880 91 0 days 00:00:00.204489020 +880 92 0 days 00:00:00.652760255 +880 93 0 days 00:00:00.195242035 +880 94 0 days 00:00:00.386455205 +880 95 0 days 00:00:00.406019111 +880 96 0 days 00:00:00.237417465 +880 97 0 days 00:00:00.769490127 +880 98 0 days 00:00:00.252616997 +880 99 0 days 00:00:00.683546588 +880 100 0 days 00:00:00.357267470 +881 1 0 days 00:00:01.435165164 +881 2 0 days 00:00:01.434915876 +881 3 0 days 00:00:00.799215926 +881 4 0 days 00:00:00.791463104 +881 5 0 days 00:00:00.656096620 +881 6 0 days 00:00:00.666645036 +881 7 0 days 00:00:00.831841514 +881 8 0 days 00:00:00.374017944 +881 9 0 days 00:00:00.412259977 +881 10 0 days 00:00:00.825361128 +881 11 0 days 00:00:00.743934236 +881 12 0 days 00:00:00.756204633 +881 13 0 days 00:00:00.401039068 +881 14 0 days 00:00:00.671822073 +881 15 0 days 00:00:00.821748410 +881 16 0 days 00:00:00.421607280 +881 17 0 days 00:00:00.950227262 +881 18 0 days 00:00:00.466472355 +881 19 0 days 00:00:00.517368622 +881 20 0 days 00:00:01.347295050 +881 21 0 days 00:00:01.387832745 +881 22 0 days 00:00:00.730472963 +881 23 0 days 00:00:00.827352052 +881 24 0 days 00:00:00.393695840 +881 25 0 days 00:00:01.524299920 +881 26 0 days 00:00:00.701283355 +881 27 0 days 00:00:00.624203790 +881 28 0 days 00:00:00.399960340 +881 29 0 days 00:00:00.435196390 +881 30 0 days 00:00:01.326920852 +881 31 0 days 00:00:00.750721175 +881 32 0 days 00:00:01.113469453 +881 33 0 days 00:00:00.512571250 +881 34 0 days 00:00:00.465719765 +881 35 0 days 00:00:00.776049772 +881 36 0 days 00:00:00.475393140 +881 37 0 days 00:00:01.440922848 +881 38 0 days 00:00:01.461272440 +881 39 0 days 00:00:01.568309883 +881 40 0 days 00:00:00.693080845 +881 41 0 days 00:00:01.432795645 +881 42 0 days 00:00:00.739321468 +881 43 0 days 00:00:00.627841108 +881 44 0 days 00:00:00.736992805 +881 45 0 days 00:00:00.448595010 +881 46 0 days 00:00:00.750168808 +881 47 0 days 00:00:00.785015528 +881 48 0 days 00:00:00.961919200 +881 49 0 days 00:00:00.797520097 +881 50 0 days 00:00:01.677642840 +881 51 0 days 00:00:01.577699690 +881 52 0 days 00:00:01.419698450 +881 53 0 days 00:00:00.804463810 +881 54 0 days 00:00:00.515703626 +881 55 0 days 00:00:00.361166715 +881 56 0 days 00:00:00.811904513 +881 57 0 days 00:00:01.651655210 +881 58 0 days 00:00:00.402481320 +881 59 0 days 00:00:00.515879937 +881 60 0 days 00:00:00.464762100 +881 61 0 days 00:00:00.452179935 +881 62 0 days 00:00:00.493988295 +881 63 0 days 00:00:00.547237277 +881 64 0 days 00:00:00.403865144 +881 65 0 days 00:00:00.921755523 +881 66 0 days 00:00:01.409820530 +881 67 0 days 00:00:01.396985000 +881 68 0 days 00:00:00.697271685 +881 69 0 days 00:00:00.437766895 +881 70 0 days 00:00:00.698830005 +881 71 0 days 00:00:00.942312883 +881 72 0 days 00:00:00.480094090 +881 73 0 days 00:00:00.483280780 +881 74 0 days 00:00:01.314495620 +881 75 0 days 00:00:00.844044140 +881 76 0 days 00:00:00.376752740 +881 77 0 days 00:00:00.740788184 +881 78 0 days 00:00:01.472243542 +881 79 0 days 00:00:00.474967269 +881 80 0 days 00:00:00.479141164 +881 81 0 days 00:00:00.746363663 +881 82 0 days 00:00:00.508906200 +881 83 0 days 00:00:00.776836324 +881 84 0 days 00:00:01.006037211 +881 85 0 days 00:00:00.899133160 +881 86 0 days 00:00:01.468450368 +881 87 0 days 00:00:01.133165360 +881 88 0 days 00:00:00.789752706 +881 89 0 days 00:00:01.497317531 +881 90 0 days 00:00:01.553422428 +881 91 0 days 00:00:00.384769750 +881 92 0 days 00:00:00.811073210 +881 93 0 days 00:00:01.416890724 +881 94 0 days 00:00:00.288315780 +881 95 0 days 00:00:00.658875785 +881 96 0 days 00:00:00.470536455 +881 97 0 days 00:00:01.090308668 +881 98 0 days 00:00:00.534930031 +881 99 0 days 00:00:00.839940690 +881 100 0 days 00:00:01.593818704 +882 1 0 days 00:00:00.771309417 +882 2 0 days 00:00:00.224293284 +882 3 0 days 00:00:00.760352430 +882 4 0 days 00:00:00.704854672 +882 5 0 days 00:00:00.728746920 +882 6 0 days 00:00:00.836735262 +882 7 0 days 00:00:00.224702217 +882 8 0 days 00:00:00.487970112 +882 9 0 days 00:00:00.371693195 +882 10 0 days 00:00:00.653098760 +882 11 0 days 00:00:00.746022660 +882 12 0 days 00:00:00.787659856 +882 13 0 days 00:00:00.226551242 +882 14 0 days 00:00:00.365857076 +882 15 0 days 00:00:00.793709855 +882 16 0 days 00:00:00.711885080 +882 17 0 days 00:00:00.184376613 +882 18 0 days 00:00:00.518056750 +882 19 0 days 00:00:00.223547964 +882 20 0 days 00:00:00.282050876 +882 21 0 days 00:00:00.443891384 +882 22 0 days 00:00:00.354448285 +882 23 0 days 00:00:00.726375494 +882 24 0 days 00:00:00.688561505 +882 25 0 days 00:00:00.640400050 +882 26 0 days 00:00:00.710222252 +882 27 0 days 00:00:00.268679642 +882 28 0 days 00:00:00.382333148 +882 29 0 days 00:00:00.451591488 +882 30 0 days 00:00:00.138387920 +882 31 0 days 00:00:00.780787944 +882 32 0 days 00:00:00.258174800 +882 33 0 days 00:00:00.296438335 +882 34 0 days 00:00:00.460557992 +882 35 0 days 00:00:00.714287180 +882 36 0 days 00:00:00.305645764 +882 37 0 days 00:00:00.245733544 +882 38 0 days 00:00:00.236319047 +882 39 0 days 00:00:00.369759110 +882 40 0 days 00:00:00.429545514 +882 41 0 days 00:00:00.794238018 +882 42 0 days 00:00:00.665908445 +882 43 0 days 00:00:00.386780993 +882 44 0 days 00:00:00.184541925 +882 45 0 days 00:00:00.684798140 +882 46 0 days 00:00:00.489384672 +882 47 0 days 00:00:00.380855336 +882 48 0 days 00:00:00.418953102 +882 49 0 days 00:00:00.242494615 +882 50 0 days 00:00:00.233198734 +882 51 0 days 00:00:00.212349835 +882 52 0 days 00:00:00.717792416 +882 53 0 days 00:00:00.224255468 +882 54 0 days 00:00:00.343548455 +882 55 0 days 00:00:00.748093920 +882 56 0 days 00:00:00.201351540 +882 57 0 days 00:00:00.754060755 +882 58 0 days 00:00:00.717376566 +882 59 0 days 00:00:00.279403071 +882 60 0 days 00:00:00.725237334 +882 61 0 days 00:00:00.370890660 +882 62 0 days 00:00:00.684052996 +882 63 0 days 00:00:00.198641015 +882 64 0 days 00:00:00.371062310 +882 66 0 days 00:00:00.228236220 +882 67 0 days 00:00:00.439461220 +882 68 0 days 00:00:00.808835260 +882 69 0 days 00:00:00.259578330 +882 70 0 days 00:00:00.751482706 +882 71 0 days 00:00:00.284138120 +882 72 0 days 00:00:00.413963598 +882 73 0 days 00:00:00.420986172 +882 74 0 days 00:00:00.774909145 +882 75 0 days 00:00:00.208801432 +882 76 0 days 00:00:00.189103940 +882 77 0 days 00:00:00.350624270 +882 78 0 days 00:00:00.642257860 +882 79 0 days 00:00:00.365791200 +882 80 0 days 00:00:00.751003305 +882 81 0 days 00:00:00.226862795 +882 82 0 days 00:00:00.405808796 +882 83 0 days 00:00:00.425541566 +882 84 0 days 00:00:00.213094820 +882 85 0 days 00:00:00.480034650 +882 86 0 days 00:00:00.406178652 +882 87 0 days 00:00:00.213989631 +882 88 0 days 00:00:00.226133577 +882 89 0 days 00:00:00.787461002 +882 90 0 days 00:00:00.412865343 +882 91 0 days 00:00:00.494063904 +882 92 0 days 00:00:00.724288033 +882 93 0 days 00:00:00.675441470 +882 94 0 days 00:00:00.209847632 +882 95 0 days 00:00:00.381478245 +882 96 0 days 00:00:00.222703777 +882 97 0 days 00:00:00.238964971 +882 98 0 days 00:00:00.346241585 +882 99 0 days 00:00:00.207058090 +882 100 0 days 00:00:00.743356492 +883 1 0 days 00:00:03.748371004 +883 2 0 days 00:00:09.967612050 +883 3 0 days 00:00:03.240369100 +883 4 0 days 00:00:05.955019485 +883 5 0 days 00:00:09.211637013 +883 6 0 days 00:00:10.087897040 +883 7 0 days 00:00:03.490596270 +883 8 0 days 00:00:03.708348725 +883 9 0 days 00:00:10.444262664 +883 10 0 days 00:00:08.905371020 +883 11 0 days 00:00:10.199167640 +883 12 0 days 00:00:03.595478606 +883 13 0 days 00:00:09.500561920 +883 14 0 days 00:00:07.070154463 +883 15 0 days 00:00:03.461371360 +883 16 0 days 00:00:02.858354693 +883 17 0 days 00:00:09.461576644 +883 18 0 days 00:00:03.601961245 +883 19 0 days 00:00:08.916911733 +883 20 0 days 00:00:05.775902040 +883 21 0 days 00:00:03.966666564 +883 22 0 days 00:00:05.704264500 +883 23 0 days 00:00:11.202553793 +883 24 0 days 00:00:09.909429575 +883 25 0 days 00:00:05.123633300 +883 26 0 days 00:00:10.372323085 +883 27 0 days 00:00:05.056953453 +883 28 0 days 00:00:06.578742980 +883 29 0 days 00:00:03.475882420 +883 30 0 days 00:00:05.902558160 +883 31 0 days 00:00:05.156770900 +883 32 0 days 00:00:03.486827480 +883 33 0 days 00:00:07.937026846 +883 34 0 days 00:00:08.928060260 +883 35 0 days 00:00:10.070588955 +883 36 0 days 00:00:05.005256212 +883 37 0 days 00:00:04.518865744 +883 38 0 days 00:00:03.620936265 +883 39 0 days 00:00:09.265822253 +883 40 0 days 00:00:04.314755246 +883 41 0 days 00:00:03.907240106 +883 42 0 days 00:00:11.266909112 +883 43 0 days 00:00:05.676703940 +883 44 0 days 00:00:10.984882523 +883 45 0 days 00:00:09.336546293 +883 46 0 days 00:00:05.060347966 +883 47 0 days 00:00:09.153922460 +883 48 0 days 00:00:03.591895350 +883 49 0 days 00:00:09.894803505 +883 50 0 days 00:00:05.208230260 +883 51 0 days 00:00:06.453301971 +883 52 0 days 00:00:03.506923325 +883 53 0 days 00:00:03.752637992 +883 54 0 days 00:00:04.266314115 +883 55 0 days 00:00:03.567736035 +883 56 0 days 00:00:04.600631200 +883 57 0 days 00:00:07.842005966 +883 58 0 days 00:00:09.295257053 +883 59 0 days 00:00:05.847844655 +883 60 0 days 00:00:05.635136200 +883 61 0 days 00:00:09.697602606 +883 62 0 days 00:00:05.103722520 +883 63 0 days 00:00:09.099591760 +883 64 0 days 00:00:03.972908853 +883 65 0 days 00:00:10.398129710 +883 66 0 days 00:00:09.390986666 +884 1 0 days 00:00:05.074740560 +884 2 0 days 00:00:02.124353584 +884 3 0 days 00:00:04.487151333 +884 4 0 days 00:00:03.343410792 +884 5 0 days 00:00:01.733738540 +884 6 0 days 00:00:04.512301740 +884 7 0 days 00:00:02.962313000 +884 8 0 days 00:00:02.363831772 +884 9 0 days 00:00:04.538008740 +884 10 0 days 00:00:04.999077050 +884 11 0 days 00:00:02.247862080 +884 12 0 days 00:00:02.685510300 +884 13 0 days 00:00:01.940827793 +884 14 0 days 00:00:05.009099495 +884 15 0 days 00:00:05.076947035 +884 16 0 days 00:00:01.804178175 +884 17 0 days 00:00:01.805544000 +884 18 0 days 00:00:05.387472224 +884 19 0 days 00:00:02.968637875 +884 20 0 days 00:00:05.429398656 +884 21 0 days 00:00:03.097537196 +884 22 0 days 00:00:01.870170700 +884 23 0 days 00:00:02.925830695 +884 24 0 days 00:00:04.967589020 +884 25 0 days 00:00:01.776320785 +884 26 0 days 00:00:02.603746833 +884 27 0 days 00:00:03.164992716 +884 28 0 days 00:00:03.542639364 +884 29 0 days 00:00:02.922590200 +884 30 0 days 00:00:04.534917593 +884 31 0 days 00:00:05.425194784 +884 32 0 days 00:00:05.032402050 +884 33 0 days 00:00:01.819438913 +884 34 0 days 00:00:02.483550695 +884 35 0 days 00:00:01.972235035 +884 36 0 days 00:00:03.502479313 +884 37 0 days 00:00:02.373141975 +884 38 0 days 00:00:05.076610390 +884 39 0 days 00:00:05.646038840 +884 40 0 days 00:00:01.871335556 +884 41 0 days 00:00:01.743138706 +884 42 0 days 00:00:04.729173553 +884 43 0 days 00:00:02.014545066 +884 44 0 days 00:00:03.254158855 +884 45 0 days 00:00:01.862355210 +884 46 0 days 00:00:03.455270304 +884 47 0 days 00:00:02.623613760 +884 48 0 days 00:00:05.252563225 +884 49 0 days 00:00:02.607037953 +884 50 0 days 00:00:02.697296066 +884 51 0 days 00:00:01.602171040 +884 52 0 days 00:00:03.165698700 +884 53 0 days 00:00:02.607708413 +884 54 0 days 00:00:02.205539476 +884 55 0 days 00:00:01.994869023 +884 56 0 days 00:00:02.911471070 +884 57 0 days 00:00:05.705811200 +884 58 0 days 00:00:05.118483775 +884 59 0 days 00:00:02.889848780 +884 60 0 days 00:00:05.610507660 +884 61 0 days 00:00:02.425741533 +884 62 0 days 00:00:04.730099640 +884 63 0 days 00:00:02.740856053 +884 64 0 days 00:00:01.871340745 +884 65 0 days 00:00:03.313656963 +884 66 0 days 00:00:02.965135505 +884 67 0 days 00:00:01.608733660 +884 68 0 days 00:00:05.052870960 +884 69 0 days 00:00:05.210977855 +884 70 0 days 00:00:01.601548413 +884 71 0 days 00:00:04.032497473 +884 72 0 days 00:00:01.872651140 +884 73 0 days 00:00:02.704972186 +884 74 0 days 00:00:02.546236366 +884 75 0 days 00:00:05.212867305 +884 76 0 days 00:00:01.880459756 +884 77 0 days 00:00:02.111115060 +884 78 0 days 00:00:02.783740900 +884 79 0 days 00:00:02.622137460 +884 80 0 days 00:00:01.759976373 +884 81 0 days 00:00:02.974543685 +884 82 0 days 00:00:02.656381966 +884 83 0 days 00:00:02.572932460 +884 84 0 days 00:00:05.042603890 +884 85 0 days 00:00:02.891747400 +884 86 0 days 00:00:02.976118120 +884 87 0 days 00:00:03.270321264 +884 88 0 days 00:00:02.593085200 +884 89 0 days 00:00:02.839015590 +884 90 0 days 00:00:05.522763366 +884 91 0 days 00:00:04.531854886 +884 92 0 days 00:00:01.802189425 +884 93 0 days 00:00:03.104353408 +884 94 0 days 00:00:04.466265700 +884 95 0 days 00:00:01.780746190 +884 96 0 days 00:00:02.626566680 +884 97 0 days 00:00:04.593165373 +884 98 0 days 00:00:01.722358826 +884 99 0 days 00:00:05.181486820 +884 100 0 days 00:00:02.978057610 +885 1 0 days 00:00:06.483479805 +885 2 0 days 00:00:11.828924530 +885 3 0 days 00:00:05.920914005 +885 4 0 days 00:00:06.788424743 +885 5 0 days 00:00:08.794275830 +885 6 0 days 00:00:05.554947820 +885 7 0 days 00:00:11.055709165 +885 8 0 days 00:00:07.642200104 +885 9 0 days 00:00:05.820633492 +885 10 0 days 00:00:11.453266425 +885 11 0 days 00:00:11.651943750 +885 12 0 days 00:00:11.663885155 +885 13 0 days 00:00:10.183302225 +885 14 0 days 00:00:05.856068615 +885 15 0 days 00:00:03.798905970 +885 16 0 days 00:00:10.887909910 +885 17 0 days 00:00:10.035432295 +885 18 0 days 00:00:08.416791510 +885 19 0 days 00:00:05.588496753 +885 20 0 days 00:00:05.840996935 +885 21 0 days 00:00:06.082951180 +885 22 0 days 00:00:12.706472344 +885 23 0 days 00:00:03.724490170 +885 24 0 days 00:00:04.111122345 +885 25 0 days 00:00:07.089458360 +885 26 0 days 00:00:06.515315684 +885 27 0 days 00:00:04.336942548 +885 28 0 days 00:00:11.815208431 +885 29 0 days 00:00:10.311800255 +885 30 0 days 00:00:10.421968535 +885 31 0 days 00:00:06.463231715 +885 32 0 days 00:00:05.159729540 +885 33 0 days 00:00:06.116119413 +885 34 0 days 00:00:11.460191635 +885 35 0 days 00:00:13.695249770 +885 36 0 days 00:00:03.850436925 +885 37 0 days 00:00:04.324741555 +885 38 0 days 00:00:04.307279416 +885 39 0 days 00:00:04.016175422 +885 40 0 days 00:00:05.847254645 +885 41 0 days 00:00:06.442681680 +885 42 0 days 00:00:09.570493625 +885 43 0 days 00:00:06.505012500 +885 44 0 days 00:00:05.704302965 +885 45 0 days 00:00:03.818768756 +885 46 0 days 00:00:08.988852953 +885 47 0 days 00:00:12.583723550 +885 48 0 days 00:00:10.907601533 +885 49 0 days 00:00:04.098680176 +885 50 0 days 00:00:05.931072035 +886 1 0 days 00:00:01.952563766 +886 2 0 days 00:00:03.625965867 +886 3 0 days 00:00:06.358807560 +886 4 0 days 00:00:05.518772600 +886 5 0 days 00:00:06.297468708 +886 6 0 days 00:00:06.086455390 +886 7 0 days 00:00:01.959524016 +886 8 0 days 00:00:06.371744668 +886 9 0 days 00:00:05.978417960 +886 10 0 days 00:00:02.690814200 +886 11 0 days 00:00:02.651154753 +886 12 0 days 00:00:03.196660628 +886 13 0 days 00:00:02.669843860 +886 14 0 days 00:00:04.935527206 +886 15 0 days 00:00:03.432027960 +886 16 0 days 00:00:02.443700121 +886 17 0 days 00:00:01.950229440 +886 18 0 days 00:00:03.029992915 +886 19 0 days 00:00:02.691286740 +886 20 0 days 00:00:05.604416620 +886 21 0 days 00:00:02.479194510 +886 22 0 days 00:00:05.858221710 +886 23 0 days 00:00:01.994656145 +886 24 0 days 00:00:05.931986225 +886 25 0 days 00:00:02.277806945 +886 26 0 days 00:00:02.707183268 +886 27 0 days 00:00:03.139288423 +886 28 0 days 00:00:05.972383022 +886 29 0 days 00:00:02.944987050 +886 30 0 days 00:00:01.977294974 +886 31 0 days 00:00:02.064264853 +886 32 0 days 00:00:02.892798555 +886 33 0 days 00:00:03.513431540 +886 34 0 days 00:00:03.565027930 +886 35 0 days 00:00:03.118389566 +886 36 0 days 00:00:01.935440416 +886 37 0 days 00:00:03.538515408 +886 38 0 days 00:00:04.937998793 +886 39 0 days 00:00:02.947169485 +886 40 0 days 00:00:04.353920757 +886 41 0 days 00:00:02.256979497 +886 42 0 days 00:00:03.250098935 +886 43 0 days 00:00:05.822992615 +886 44 0 days 00:00:06.108582656 +886 45 0 days 00:00:05.756182172 +886 46 0 days 00:00:01.935416635 +886 47 0 days 00:00:01.972117963 +886 48 0 days 00:00:02.411159560 +886 49 0 days 00:00:02.197043066 +886 50 0 days 00:00:02.312975708 +886 51 0 days 00:00:03.354591533 +886 52 0 days 00:00:01.991013790 +886 53 0 days 00:00:02.014097640 +886 54 0 days 00:00:01.831755940 +886 55 0 days 00:00:01.635219360 +886 56 0 days 00:00:05.612433666 +886 57 0 days 00:00:02.953829355 +886 58 0 days 00:00:04.505549680 +886 59 0 days 00:00:02.738968330 +886 60 0 days 00:00:06.410634016 +886 61 0 days 00:00:02.889977865 +886 62 0 days 00:00:05.725648365 +886 63 0 days 00:00:02.034760622 +886 64 0 days 00:00:06.474919263 +886 65 0 days 00:00:02.376783618 +886 66 0 days 00:00:05.643245505 +886 67 0 days 00:00:02.514510825 +886 68 0 days 00:00:02.357068466 +886 69 0 days 00:00:02.951677080 +886 70 0 days 00:00:01.665912253 +886 71 0 days 00:00:01.961845910 +886 72 0 days 00:00:02.012897322 +886 73 0 days 00:00:03.677099440 +886 74 0 days 00:00:05.618429150 +886 75 0 days 00:00:03.009814560 +886 76 0 days 00:00:03.003672060 +886 77 0 days 00:00:02.939128420 +886 78 0 days 00:00:05.782278660 +886 79 0 days 00:00:05.706620660 +886 80 0 days 00:00:03.208564140 +886 81 0 days 00:00:03.194806011 +886 82 0 days 00:00:01.656920793 +886 83 0 days 00:00:03.263812604 +886 84 0 days 00:00:02.018658284 +886 85 0 days 00:00:02.797836866 +886 86 0 days 00:00:04.842992620 +886 87 0 days 00:00:02.166585773 +886 88 0 days 00:00:05.835067720 +886 89 0 days 00:00:03.086753036 +886 90 0 days 00:00:02.920854205 +886 91 0 days 00:00:04.532142320 +886 92 0 days 00:00:02.115802151 +886 93 0 days 00:00:05.813051495 +886 94 0 days 00:00:02.988249795 +886 95 0 days 00:00:05.997266596 +886 96 0 days 00:00:02.111643063 +886 97 0 days 00:00:06.724218956 +887 1 0 days 00:00:03.782253013 +887 2 0 days 00:00:08.400622466 +887 3 0 days 00:00:03.500674960 +887 4 0 days 00:00:06.360401180 +887 5 0 days 00:00:03.556718570 +887 6 0 days 00:00:09.433703990 +887 7 0 days 00:00:08.333218553 +887 8 0 days 00:00:03.717387550 +887 9 0 days 00:00:05.998723744 +887 10 0 days 00:00:04.317967840 +887 11 0 days 00:00:04.130039600 +887 12 0 days 00:00:05.401607065 +887 13 0 days 00:00:04.708538872 +887 14 0 days 00:00:08.448280973 +887 15 0 days 00:00:04.772889880 +887 16 0 days 00:00:10.037247384 +887 17 0 days 00:00:09.578344200 +887 18 0 days 00:00:10.363110156 +887 19 0 days 00:00:03.047522733 +887 20 0 days 00:00:09.970906695 +887 21 0 days 00:00:08.619705285 +887 22 0 days 00:00:03.390187135 +887 23 0 days 00:00:04.536201232 +887 24 0 days 00:00:05.448869600 +887 25 0 days 00:00:09.516354765 +887 26 0 days 00:00:03.567016456 +887 27 0 days 00:00:03.538078133 +887 28 0 days 00:00:05.915856848 +887 29 0 days 00:00:08.646181106 +887 30 0 days 00:00:03.693368730 +887 31 0 days 00:00:08.584230493 +887 32 0 days 00:00:03.329252865 +887 33 0 days 00:00:10.952040680 +887 34 0 days 00:00:08.522348766 +887 35 0 days 00:00:03.217765706 +887 36 0 days 00:00:05.453181740 +887 37 0 days 00:00:10.088266824 +887 38 0 days 00:00:03.651361845 +887 39 0 days 00:00:09.595981480 +887 40 0 days 00:00:02.970334873 +887 41 0 days 00:00:03.349489400 +887 42 0 days 00:00:04.043188736 +887 43 0 days 00:00:10.510091350 +887 44 0 days 00:00:09.456984130 +887 45 0 days 00:00:09.595949550 +887 46 0 days 00:00:05.658334145 +887 47 0 days 00:00:08.640938160 +887 48 0 days 00:00:03.371373065 +887 49 0 days 00:00:05.409565825 +887 50 0 days 00:00:05.414681995 +887 51 0 days 00:00:08.515040373 +887 52 0 days 00:00:08.639724753 +887 53 0 days 00:00:03.507667430 +887 54 0 days 00:00:09.168969020 +887 55 0 days 00:00:03.360528575 +887 56 0 days 00:00:03.038287786 +887 57 0 days 00:00:03.368131165 +887 58 0 days 00:00:03.068395833 +887 59 0 days 00:00:08.588712053 +887 60 0 days 00:00:05.994806785 +887 61 0 days 00:00:05.036985566 +887 62 0 days 00:00:03.417616440 +887 63 0 days 00:00:08.523691146 +887 64 0 days 00:00:10.506554196 +887 65 0 days 00:00:05.331518575 +887 66 0 days 00:00:05.368878360 +887 67 0 days 00:00:03.045563153 +887 68 0 days 00:00:06.229602373 +887 69 0 days 00:00:05.573810815 +887 70 0 days 00:00:10.103025560 +888 1 0 days 00:00:06.319405535 +888 2 0 days 00:00:05.830075390 +888 3 0 days 00:00:05.104056806 +888 4 0 days 00:00:05.606212080 +888 5 0 days 00:00:05.700682495 +888 6 0 days 00:00:05.587831200 +888 7 0 days 00:00:03.163437500 +888 8 0 days 00:00:11.319503656 +888 9 0 days 00:00:05.034242386 +888 10 0 days 00:00:03.289668806 +888 11 0 days 00:00:05.547795427 +888 12 0 days 00:00:03.489793840 +888 13 0 days 00:00:03.467991370 +888 14 0 days 00:00:08.976602140 +888 15 0 days 00:00:03.260377706 +888 16 0 days 00:00:06.695079835 +888 17 0 days 00:00:05.704900805 +888 18 0 days 00:00:09.826113310 +888 19 0 days 00:00:09.828111760 +888 20 0 days 00:00:03.425179875 +888 21 0 days 00:00:03.263890033 +888 22 0 days 00:00:03.844708925 +888 23 0 days 00:00:09.821039665 +888 24 0 days 00:00:09.959401595 +888 25 0 days 00:00:08.662300006 +888 26 0 days 00:00:03.705134392 +888 27 0 days 00:00:05.601268015 +888 28 0 days 00:00:05.653119070 +888 29 0 days 00:00:10.429456688 +888 30 0 days 00:00:06.667825270 +888 31 0 days 00:00:03.461321790 +888 32 0 days 00:00:08.924824233 +888 33 0 days 00:00:08.969193360 +888 34 0 days 00:00:03.930494808 +888 35 0 days 00:00:10.434193176 +888 36 0 days 00:00:03.653424136 +888 37 0 days 00:00:10.435895968 +888 38 0 days 00:00:04.128095665 +888 39 0 days 00:00:06.289908628 +888 40 0 days 00:00:03.541186493 +888 41 0 days 00:00:03.945133625 +888 42 0 days 00:00:03.555462765 +888 43 0 days 00:00:06.292566130 +888 44 0 days 00:00:09.157873386 +888 45 0 days 00:00:05.175924586 +888 46 0 days 00:00:05.558590090 +888 47 0 days 00:00:07.042768720 +888 48 0 days 00:00:05.272891170 +888 49 0 days 00:00:03.890154396 +888 50 0 days 00:00:04.988629180 +888 51 0 days 00:00:03.470293255 +888 52 0 days 00:00:03.976915620 +888 53 0 days 00:00:03.994265695 +888 54 0 days 00:00:06.251105692 +888 55 0 days 00:00:04.801476795 +888 56 0 days 00:00:09.672756040 +888 57 0 days 00:00:08.803566660 +888 58 0 days 00:00:06.833512650 +888 59 0 days 00:00:09.904495350 +888 60 0 days 00:00:03.618641060 +888 61 0 days 00:00:10.442135272 +888 62 0 days 00:00:10.854305320 +888 63 0 days 00:00:03.822144572 +888 64 0 days 00:00:09.888914775 +888 65 0 days 00:00:10.785056188 +889 1 0 days 00:00:03.234050680 +889 2 0 days 00:00:01.685647415 +889 3 0 days 00:00:01.639431240 +889 4 0 days 00:00:01.987236600 +889 5 0 days 00:00:02.837280035 +889 6 0 days 00:00:03.027255160 +889 7 0 days 00:00:01.771721980 +889 8 0 days 00:00:04.757346735 +889 9 0 days 00:00:04.865884050 +889 10 0 days 00:00:02.486799986 +889 11 0 days 00:00:05.353381746 +889 12 0 days 00:00:04.790461510 +889 13 0 days 00:00:04.612205260 +889 14 0 days 00:00:04.865638005 +889 15 0 days 00:00:01.697056375 +889 16 0 days 00:00:01.806625650 +889 17 0 days 00:00:05.020618330 +889 18 0 days 00:00:01.731741513 +889 19 0 days 00:00:01.592223886 +889 20 0 days 00:00:04.977894876 +889 21 0 days 00:00:02.384870280 +889 22 0 days 00:00:02.139478752 +889 23 0 days 00:00:05.298673893 +889 24 0 days 00:00:02.979199730 +889 25 0 days 00:00:02.491119373 +889 26 0 days 00:00:01.785556972 +889 27 0 days 00:00:02.794345030 +889 28 0 days 00:00:03.944725560 +889 29 0 days 00:00:04.269940466 +889 30 0 days 00:00:02.433229393 +889 31 0 days 00:00:01.835873172 +889 32 0 days 00:00:04.379190045 +889 33 0 days 00:00:03.007428492 +889 34 0 days 00:00:02.728841570 +889 35 0 days 00:00:02.428276986 +889 36 0 days 00:00:02.567611133 +889 37 0 days 00:00:05.498283996 +889 38 0 days 00:00:01.563225855 +889 39 0 days 00:00:02.871890552 +889 40 0 days 00:00:01.868394993 +889 41 0 days 00:00:04.790403310 +889 42 0 days 00:00:01.941340140 +889 43 0 days 00:00:03.010199500 +889 44 0 days 00:00:01.562154626 +889 45 0 days 00:00:04.680020265 +889 46 0 days 00:00:01.521995106 +889 47 0 days 00:00:02.851657040 +889 48 0 days 00:00:01.980679140 +889 49 0 days 00:00:02.654588840 +889 50 0 days 00:00:01.907237295 +889 51 0 days 00:00:02.586275346 +889 52 0 days 00:00:01.737530800 +889 53 0 days 00:00:01.818398648 +889 54 0 days 00:00:02.229874076 +889 55 0 days 00:00:05.142717284 +889 56 0 days 00:00:01.740485470 +889 57 0 days 00:00:03.514110976 +889 58 0 days 00:00:02.957462604 +889 59 0 days 00:00:03.595284846 +889 60 0 days 00:00:05.447258240 +889 61 0 days 00:00:04.672108340 +889 62 0 days 00:00:03.055873913 +889 63 0 days 00:00:01.795061240 +889 64 0 days 00:00:01.899350870 +889 65 0 days 00:00:01.632600880 +889 66 0 days 00:00:01.582180126 +889 67 0 days 00:00:04.953211460 +889 68 0 days 00:00:04.566249526 +889 69 0 days 00:00:02.098668720 +889 70 0 days 00:00:04.309789453 +889 71 0 days 00:00:03.404885225 +889 72 0 days 00:00:02.871328945 +889 73 0 days 00:00:04.964889180 +889 74 0 days 00:00:02.781083625 +889 75 0 days 00:00:01.747478205 +889 76 0 days 00:00:04.791363400 +889 77 0 days 00:00:01.855065293 +889 78 0 days 00:00:02.752552995 +889 79 0 days 00:00:02.920283115 +889 80 0 days 00:00:05.131202816 +889 81 0 days 00:00:05.304414392 +889 82 0 days 00:00:02.925861008 +889 83 0 days 00:00:02.947706912 +889 84 0 days 00:00:01.720626260 +889 85 0 days 00:00:01.711230705 +889 86 0 days 00:00:02.459450006 +889 87 0 days 00:00:05.140238244 +889 88 0 days 00:00:05.505027113 +889 89 0 days 00:00:04.839992385 +889 90 0 days 00:00:03.138766148 +889 91 0 days 00:00:01.621087253 +889 92 0 days 00:00:02.513748586 +889 93 0 days 00:00:02.092709880 +889 94 0 days 00:00:05.411285753 +889 95 0 days 00:00:02.583802573 +889 96 0 days 00:00:04.334249760 +889 97 0 days 00:00:01.755579910 +889 98 0 days 00:00:04.757718060 +889 99 0 days 00:00:03.129363308 +889 100 0 days 00:00:02.235852120 +890 1 0 days 00:00:03.060254800 +890 2 0 days 00:00:01.805633093 +890 3 0 days 00:00:05.297522635 +890 4 0 days 00:00:01.694041420 +890 5 0 days 00:00:02.842005820 +890 6 0 days 00:00:03.252688955 +890 7 0 days 00:00:01.865466255 +890 8 0 days 00:00:02.606637260 +890 9 0 days 00:00:05.506856632 +890 10 0 days 00:00:03.174505422 +890 11 0 days 00:00:04.501355273 +890 12 0 days 00:00:01.940300356 +890 13 0 days 00:00:02.037231020 +890 14 0 days 00:00:05.752849762 +890 15 0 days 00:00:02.844735025 +890 16 0 days 00:00:05.609131146 +890 17 0 days 00:00:02.513610120 +890 18 0 days 00:00:01.706652340 +890 19 0 days 00:00:01.876883310 +890 20 0 days 00:00:01.823347264 +890 21 0 days 00:00:02.843837040 +890 22 0 days 00:00:04.562143975 +890 23 0 days 00:00:03.013325816 +890 24 0 days 00:00:03.809671666 +890 25 0 days 00:00:02.135608823 +890 26 0 days 00:00:03.021344960 +890 27 0 days 00:00:04.413417610 +890 28 0 days 00:00:03.358865043 +890 29 0 days 00:00:02.842329725 +890 30 0 days 00:00:01.608861660 +890 31 0 days 00:00:05.399586336 +890 32 0 days 00:00:01.726405090 +890 33 0 days 00:00:04.536676480 +890 34 0 days 00:00:01.578891946 +890 35 0 days 00:00:05.027133960 +890 36 0 days 00:00:02.301240500 +890 37 0 days 00:00:05.366207480 +890 38 0 days 00:00:02.052376350 +890 39 0 days 00:00:02.585771286 +890 40 0 days 00:00:01.789206280 +890 41 0 days 00:00:02.705260780 +890 42 0 days 00:00:03.323858724 +890 43 0 days 00:00:02.993675890 +890 44 0 days 00:00:02.562155346 +890 45 0 days 00:00:02.875293640 +890 46 0 days 00:00:02.835646290 +890 47 0 days 00:00:01.959852988 +890 48 0 days 00:00:05.286078345 +890 49 0 days 00:00:05.279311800 +890 50 0 days 00:00:05.448908683 +890 51 0 days 00:00:03.255002062 +890 52 0 days 00:00:02.982632704 +890 53 0 days 00:00:05.820981095 +890 54 0 days 00:00:01.671825026 +890 55 0 days 00:00:01.665400646 +890 56 0 days 00:00:05.020723250 +890 57 0 days 00:00:04.659012880 +890 58 0 days 00:00:01.978917845 +890 59 0 days 00:00:05.482157936 +890 60 0 days 00:00:03.144583828 +890 61 0 days 00:00:02.082006746 +890 62 0 days 00:00:04.917812975 +890 63 0 days 00:00:01.660467726 +890 64 0 days 00:00:05.407675004 +890 65 0 days 00:00:05.665518670 +890 66 0 days 00:00:02.847842580 +890 67 0 days 00:00:03.210475271 +890 68 0 days 00:00:04.439849120 +890 69 0 days 00:00:05.487559292 +890 70 0 days 00:00:03.341364355 +890 71 0 days 00:00:04.815256813 +890 72 0 days 00:00:05.331649260 +890 73 0 days 00:00:04.860491455 +890 74 0 days 00:00:05.253271464 +890 75 0 days 00:00:02.873010540 +890 76 0 days 00:00:02.840996810 +890 77 0 days 00:00:03.459056156 +890 78 0 days 00:00:02.660726346 +890 79 0 days 00:00:02.851530553 +890 80 0 days 00:00:03.217366452 +890 81 0 days 00:00:05.611067380 +890 82 0 days 00:00:01.897072696 +890 83 0 days 00:00:01.836622456 +890 84 0 days 00:00:05.443030820 +890 85 0 days 00:00:02.655762980 +890 86 0 days 00:00:03.306286216 +890 87 0 days 00:00:04.818467453 +890 88 0 days 00:00:02.846309835 +890 89 0 days 00:00:02.821324775 +890 90 0 days 00:00:02.027931585 +890 91 0 days 00:00:02.342739012 +890 92 0 days 00:00:04.502980653 +890 93 0 days 00:00:03.150883565 +890 94 0 days 00:00:01.924086920 +890 95 0 days 00:00:04.508656760 +890 96 0 days 00:00:05.498420844 +890 97 0 days 00:00:02.058286865 +890 98 0 days 00:00:01.982638460 +890 99 0 days 00:00:03.014629564 +890 100 0 days 00:00:01.731992075 +891 1 0 days 00:00:50.549635676 +891 2 0 days 00:01:12.748719420 +891 3 0 days 00:02:49.641675670 +891 4 0 days 00:00:41.077724320 +891 5 0 days 00:02:16.331198673 +892 1 0 days 00:00:28.681379730 +892 2 0 days 00:01:22.565133733 +892 3 0 days 00:01:22.979186393 +892 4 0 days 00:00:24.898642733 +892 5 0 days 00:01:21.382280713 +892 6 0 days 00:00:27.863624380 +892 7 0 days 00:00:49.202460990 +892 8 0 days 00:00:43.649125286 +892 9 0 days 00:01:38.856564500 +893 1 0 days 00:00:46.794372655 +893 2 0 days 00:01:20.384186165 +893 3 0 days 00:00:41.641239653 +893 4 0 days 00:02:30.863405575 +893 5 0 days 00:02:42.698357792 +894 1 0 days 00:00:55.312156437 +894 2 0 days 00:00:43.476911893 +894 3 0 days 00:00:32.087335588 +894 4 0 days 00:00:49.145225915 +894 5 0 days 00:01:38.806212528 +894 6 0 days 00:00:30.325314360 +894 7 0 days 00:00:25.165725420 +894 8 0 days 00:00:28.478267035 +895 1 0 days 00:02:29.481497685 +895 2 0 days 00:01:11.808247793 +895 3 0 days 00:02:14.094269306 +895 4 0 days 00:00:48.881638776 +895 5 0 days 00:02:14.018890213 +896 1 0 days 00:00:55.331746368 +896 2 0 days 00:00:48.636405940 +896 3 0 days 00:01:38.339315260 +896 4 0 days 00:01:22.409680060 +896 5 0 days 00:00:28.322126800 +896 6 0 days 00:00:25.296521946 +896 7 0 days 00:00:25.302226500 +896 8 0 days 00:01:37.848860532 +897 1 0 days 00:00:56.135025000 +897 2 0 days 00:00:27.504176413 +897 3 0 days 00:00:45.635207133 +897 4 0 days 00:00:47.958978453 +897 5 0 days 00:00:30.310682915 +897 6 0 days 00:00:40.840124066 +897 7 0 days 00:01:45.764747686 +897 8 0 days 00:00:17.708148320 +897 9 0 days 00:00:45.825658848 +897 10 0 days 00:01:24.055844533 +897 11 0 days 00:01:06.196001793 +898 1 0 days 00:00:15.637160606 +898 2 0 days 00:00:50.859171795 +898 3 0 days 00:00:09.656293226 +898 4 0 days 00:00:07.473480613 +898 5 0 days 00:00:54.870548573 +898 6 0 days 00:00:09.735089135 +898 7 0 days 00:00:11.961508948 +898 8 0 days 00:00:06.268328292 +898 9 0 days 00:00:43.376874470 +898 10 0 days 00:00:24.843508873 +898 11 0 days 00:00:05.973488960 +898 12 0 days 00:00:11.505885820 +898 13 0 days 00:00:07.633992120 +898 14 0 days 00:00:53.712327500 +898 15 0 days 00:00:07.592845473 +898 16 0 days 00:00:18.991327773 +898 17 0 days 00:00:32.609035106 +898 18 0 days 00:00:14.244262620 +898 19 0 days 00:01:30.763161566 +898 20 0 days 00:00:46.923933860 +898 21 0 days 00:00:25.203145653 +899 1 0 days 00:00:57.464233190 +899 2 0 days 00:01:51.003396106 +900 1 0 days 00:02:56.512073232 +900 2 0 days 00:01:03.992892365 +900 3 0 days 00:01:26.317150540 +900 4 0 days 00:02:39.451491894 +901 1 0 days 00:01:20.261813395 +901 2 0 days 00:01:20.149171865 +901 3 0 days 00:01:26.449877425 +901 4 0 days 00:01:50.697844260 +901 5 0 days 00:01:44.029554975 +902 1 0 days 00:01:43.489636175 +902 2 0 days 00:00:33.034076520 +902 3 0 days 00:01:21.242287165 +902 4 0 days 00:01:58.376548668 +903 1 0 days 00:01:16.616003586 +903 2 0 days 00:00:31.593557880 +903 3 0 days 00:01:03.813566413 +903 4 0 days 00:01:07.534823553 +903 5 0 days 00:00:54.724582126 +903 6 0 days 00:02:04.310243480 +903 7 0 days 00:00:38.646302960 +903 8 0 days 00:00:28.820904426 +903 9 0 days 00:01:02.370427313 +903 10 0 days 00:00:42.033286613 +904 1 0 days 00:00:35.637171686 +904 2 0 days 00:00:08.651741166 +904 3 0 days 00:00:15.313992600 +904 4 0 days 00:00:18.218258160 +904 5 0 days 00:00:12.390792033 +904 6 0 days 00:00:09.064924046 +904 8 0 days 00:00:22.819196100 +904 9 0 days 00:00:27.003415933 +904 10 0 days 00:00:35.115534340 +904 11 0 days 00:01:37.057747733 +904 12 0 days 00:00:25.030691260 +904 13 0 days 00:00:29.480716300 +904 14 0 days 00:00:44.591704360 +904 15 0 days 00:00:22.986848233 +904 16 0 days 00:00:29.239467300 +904 17 0 days 00:00:15.095047446 +904 18 0 days 00:00:20.175332516 +904 19 0 days 00:00:09.208286046 +904 20 0 days 00:00:14.332920873 +904 22 0 days 00:00:22.102429700 +904 23 0 days 00:00:48.759054820 +905 1 0 days 00:01:06.782965135 +905 2 0 days 00:01:33.966136700 +905 3 0 days 00:01:03.561849440 +905 4 0 days 00:00:47.889098844 +905 5 0 days 00:01:18.243931860 +905 6 0 days 00:01:56.732389953 +906 1 0 days 00:00:25.282715066 +906 2 0 days 00:01:56.654500780 +906 3 0 days 00:01:27.122244495 +906 4 0 days 00:00:27.954429360 +906 5 0 days 00:00:32.024211135 +906 6 0 days 00:01:54.971328456 +907 4 0 days 00:00:00.141946340 +907 7 0 days 00:00:00.365966338 +907 8 0 days 00:00:00.177088127 +907 9 0 days 00:00:00.181846255 +907 11 0 days 00:00:00.372099932 +907 14 0 days 00:00:00.140668480 +907 20 0 days 00:00:00.359401342 +907 32 0 days 00:00:00.367668911 +907 33 0 days 00:00:00.172925574 +907 42 0 days 00:00:00.385139957 +907 43 0 days 00:00:00.381005078 +907 45 0 days 00:00:00.204569595 +907 48 0 days 00:00:00.371087663 +907 51 0 days 00:00:00.183329285 +907 52 0 days 00:00:00.180815116 +907 60 0 days 00:00:00.174662031 +907 62 0 days 00:00:00.229244631 +907 67 0 days 00:00:00.383518432 +907 70 0 days 00:00:00.177053350 +907 74 0 days 00:00:00.352700407 +907 79 0 days 00:00:00.180214770 +907 80 0 days 00:00:00.380719108 +907 81 0 days 00:00:00.393037321 +907 84 0 days 00:00:00.186071692 +907 88 0 days 00:00:00.377937265 +907 90 0 days 00:00:00.372602200 +907 92 0 days 00:00:00.240108915 +907 96 0 days 00:00:00.361428921 +907 97 0 days 00:00:00.341721340 +908 6 0 days 00:00:00.195372374 +908 8 0 days 00:00:00.264491777 +908 9 0 days 00:00:00.149761506 +908 10 0 days 00:00:00.302389726 +908 12 0 days 00:00:00.247111760 +908 13 0 days 00:00:00.258993976 +908 14 0 days 00:00:00.165117453 +908 15 0 days 00:00:00.266711588 +908 16 0 days 00:00:00.184122193 +908 18 0 days 00:00:00.151296353 +908 19 0 days 00:00:00.245518267 +908 21 0 days 00:00:00.291670146 +908 23 0 days 00:00:00.145750080 +908 24 0 days 00:00:00.154735526 +908 25 0 days 00:00:00.201088553 +908 26 0 days 00:00:00.149404920 +908 27 0 days 00:00:00.205784073 +908 28 0 days 00:00:00.391260903 +908 34 0 days 00:00:00.185743865 +908 36 0 days 00:00:00.273501076 +908 46 0 days 00:00:00.168809685 +908 50 0 days 00:00:00.154474613 +908 53 0 days 00:00:00.187256155 +908 54 0 days 00:00:00.186914264 +908 61 0 days 00:00:00.174991148 +908 69 0 days 00:00:00.412124485 +908 70 0 days 00:00:00.363373086 +908 74 0 days 00:00:00.198057260 +908 77 0 days 00:00:00.399785200 +908 78 0 days 00:00:00.357503036 +908 79 0 days 00:00:00.417730003 +908 80 0 days 00:00:00.190013875 +908 81 0 days 00:00:00.151692260 +908 82 0 days 00:00:00.349505736 +908 83 0 days 00:00:00.259823522 +908 85 0 days 00:00:00.150970720 +908 89 0 days 00:00:00.141682006 +908 90 0 days 00:00:00.257286814 +908 91 0 days 00:00:00.319466246 +908 94 0 days 00:00:00.262549163 +908 95 0 days 00:00:00.333513315 +908 96 0 days 00:00:00.249336730 +909 8 0 days 00:00:00.198055660 +909 10 0 days 00:00:00.161116973 +909 15 0 days 00:00:00.100106715 +909 17 0 days 00:00:00.119313308 +909 22 0 days 00:00:00.190940868 +909 25 0 days 00:00:00.129696626 +909 27 0 days 00:00:00.185756864 +909 30 0 days 00:00:00.167373795 +909 35 0 days 00:00:00.084184146 +909 39 0 days 00:00:00.210089268 +909 42 0 days 00:00:00.130920068 +909 46 0 days 00:00:00.097212640 +909 48 0 days 00:00:00.200185320 +909 49 0 days 00:00:00.127219688 +909 51 0 days 00:00:00.080902793 +909 52 0 days 00:00:00.130045525 +909 53 0 days 00:00:00.197508237 +909 54 0 days 00:00:00.125083270 +909 55 0 days 00:00:00.118225122 +909 59 0 days 00:00:00.206503700 +909 62 0 days 00:00:00.123762054 +909 69 0 days 00:00:00.128556166 +909 71 0 days 00:00:00.102300973 +909 79 0 days 00:00:00.101228693 +909 82 0 days 00:00:00.097090017 +909 84 0 days 00:00:00.125244300 +909 89 0 days 00:00:00.088990324 +909 95 0 days 00:00:00.210200081 +909 96 0 days 00:00:00.210438511 +910 1 0 days 00:00:00.130615731 +910 4 0 days 00:00:00.096085485 +910 6 0 days 00:00:00.138435446 +910 7 0 days 00:00:00.086226553 +910 12 0 days 00:00:00.105399163 +910 13 0 days 00:00:00.229613005 +910 14 0 days 00:00:00.221483008 +910 17 0 days 00:00:00.134431790 +910 18 0 days 00:00:00.106221813 +910 20 0 days 00:00:00.126180833 +910 22 0 days 00:00:00.111619380 +910 24 0 days 00:00:00.171665086 +910 26 0 days 00:00:00.104976536 +910 30 0 days 00:00:00.172004726 +910 31 0 days 00:00:00.176803466 +910 35 0 days 00:00:00.086120706 +910 37 0 days 00:00:00.098641786 +910 39 0 days 00:00:00.133466362 +910 40 0 days 00:00:00.096065113 +910 43 0 days 00:00:00.215643567 +910 47 0 days 00:00:00.135185921 +910 51 0 days 00:00:00.118436250 +910 53 0 days 00:00:00.166892313 +910 55 0 days 00:00:00.213664620 +910 59 0 days 00:00:00.093825666 +910 60 0 days 00:00:00.216181351 +910 69 0 days 00:00:00.087970605 +910 70 0 days 00:00:00.222931208 +910 71 0 days 00:00:00.140575588 +910 80 0 days 00:00:00.178352366 +910 81 0 days 00:00:00.093329592 +910 84 0 days 00:00:00.216278321 +910 85 0 days 00:00:00.121469193 +910 86 0 days 00:00:00.117394204 +910 93 0 days 00:00:00.188847333 +910 96 0 days 00:00:00.242881788 +911 13 0 days 00:00:00.307530176 +911 16 0 days 00:00:00.417383905 +911 21 0 days 00:00:00.291461424 +911 27 0 days 00:00:00.498496555 +911 29 0 days 00:00:00.513208631 +911 36 0 days 00:00:00.304416281 +911 48 0 days 00:00:00.507280971 +911 52 0 days 00:00:00.504489932 +911 60 0 days 00:00:00.493592168 +911 66 0 days 00:00:00.371829266 +911 67 0 days 00:00:00.484511629 +911 89 0 days 00:00:00.312029181 +912 3 0 days 00:00:00.165934291 +912 7 0 days 00:00:00.274020244 +912 8 0 days 00:00:00.116536213 +912 13 0 days 00:00:00.218403437 +912 15 0 days 00:00:00.218375695 +912 19 0 days 00:00:00.153350922 +912 22 0 days 00:00:00.270284332 +912 28 0 days 00:00:00.156213143 +912 30 0 days 00:00:00.278472185 +912 31 0 days 00:00:00.261697084 +912 34 0 days 00:00:00.215708108 +912 36 0 days 00:00:00.208135040 +912 51 0 days 00:00:00.255481092 +912 54 0 days 00:00:00.254046218 +912 57 0 days 00:00:00.247414200 +912 59 0 days 00:00:00.148104727 +912 61 0 days 00:00:00.264986469 +912 64 0 days 00:00:00.145609553 +912 66 0 days 00:00:00.244033395 +912 67 0 days 00:00:00.145512232 +912 72 0 days 00:00:00.264115075 +912 76 0 days 00:00:00.256065265 +912 84 0 days 00:00:00.255489583 +912 85 0 days 00:00:00.262366381 +912 86 0 days 00:00:00.255857136 +912 89 0 days 00:00:00.259847700 +913 1 0 days 00:00:01.042315251 +913 3 0 days 00:00:00.606005473 +913 5 0 days 00:00:09.197473604 +913 8 0 days 00:00:07.492085673 +913 9 0 days 00:00:00.804337035 +913 10 0 days 00:00:04.140733234 +913 11 0 days 00:00:01.655294480 +913 12 0 days 00:00:02.084128071 +913 13 0 days 00:00:07.391706073 +913 14 0 days 00:00:03.555801220 +913 16 0 days 00:00:10.761415333 +913 17 0 days 00:00:02.287236400 +913 18 0 days 00:00:04.410922206 +913 19 0 days 00:00:09.050515376 +913 21 0 days 00:00:01.159438573 +913 23 0 days 00:00:02.260109540 +913 24 0 days 00:00:02.604905313 +913 25 0 days 00:00:01.375051446 +913 26 0 days 00:00:03.115977586 +913 27 0 days 00:00:03.641993166 +913 28 0 days 00:00:03.986794338 +913 29 0 days 00:00:01.860370666 +913 30 0 days 00:00:01.659843575 +913 32 0 days 00:00:00.809280556 +913 33 0 days 00:00:02.420999826 +913 35 0 days 00:00:02.093234700 +913 36 0 days 00:00:02.021289200 +913 37 0 days 00:00:00.865351080 +913 38 0 days 00:00:02.043760106 +913 39 0 days 00:00:01.299622735 +913 40 0 days 00:00:04.610076236 +913 43 0 days 00:00:06.103285885 +913 45 0 days 00:00:00.689061376 +913 46 0 days 00:00:01.100163573 +913 47 0 days 00:00:02.893356631 +913 48 0 days 00:00:03.883235433 +913 49 0 days 00:00:02.734674160 +913 50 0 days 00:00:01.167887833 +913 52 0 days 00:00:01.444624775 +913 53 0 days 00:00:02.888459770 +913 54 0 days 00:00:11.094667040 +913 55 0 days 00:00:00.871267026 +913 57 0 days 00:00:00.687971255 +913 58 0 days 00:00:11.929577880 +913 59 0 days 00:00:03.319774188 +914 1 0 days 00:00:02.611812597 +914 4 0 days 00:00:02.992769566 +914 5 0 days 00:00:01.692559086 +914 8 0 days 00:00:01.610392193 +914 10 0 days 00:00:01.242765526 +914 12 0 days 00:00:00.928196173 +914 13 0 days 00:00:11.804622220 +914 15 0 days 00:00:03.070276306 +914 17 0 days 00:00:02.333038606 +914 18 0 days 00:00:02.805156377 +914 19 0 days 00:00:01.160383320 +914 20 0 days 00:00:02.491092300 +914 21 0 days 00:00:03.032327486 +914 22 0 days 00:00:01.469247340 +914 25 0 days 00:00:01.538166786 +914 26 0 days 00:00:18.026584940 +914 27 0 days 00:00:04.816366360 +914 28 0 days 00:00:04.169588713 +914 29 0 days 00:00:04.323862386 +914 30 0 days 00:00:05.064919753 +914 32 0 days 00:00:03.855594298 +914 33 0 days 00:00:01.242769260 +914 35 0 days 00:00:03.894357533 +914 36 0 days 00:00:01.779337166 +914 37 0 days 00:00:04.990786996 +914 38 0 days 00:00:02.077664666 +914 39 0 days 00:00:02.366144806 +914 40 0 days 00:00:02.866967150 +914 42 0 days 00:00:02.029672693 +914 43 0 days 00:00:01.318183853 +914 44 0 days 00:00:00.645814926 +914 45 0 days 00:00:02.014181300 +914 46 0 days 00:00:01.208849546 +914 47 0 days 00:00:01.909142606 +914 48 0 days 00:00:07.101688526 +914 50 0 days 00:00:01.956256993 +914 51 0 days 00:00:02.912976888 +914 52 0 days 00:00:03.576818630 +914 53 0 days 00:00:04.726041880 +914 54 0 days 00:00:01.697422926 +915 2 0 days 00:00:00.712227266 +915 3 0 days 00:00:00.868179033 +915 4 0 days 00:00:02.015209881 +915 5 0 days 00:00:01.439085813 +915 6 0 days 00:00:01.701633816 +915 7 0 days 00:00:04.201004526 +915 8 0 days 00:00:00.747946286 +915 9 0 days 00:00:00.269795010 +915 10 0 days 00:00:02.844958830 +915 11 0 days 00:00:03.110877020 +915 12 0 days 00:00:00.772330181 +915 13 0 days 00:00:03.885727580 +915 16 0 days 00:00:01.856515746 +915 19 0 days 00:00:00.514315873 +915 20 0 days 00:00:00.910505100 +915 22 0 days 00:00:01.521236066 +915 23 0 days 00:00:00.827928460 +915 24 0 days 00:00:02.396631326 +915 25 0 days 00:00:03.675422520 +915 26 0 days 00:00:01.435304387 +915 28 0 days 00:00:01.807224480 +915 29 0 days 00:00:01.199348746 +915 30 0 days 00:00:00.571516013 +915 32 0 days 00:00:01.589347940 +915 33 0 days 00:00:01.155463940 +915 34 0 days 00:00:00.346899353 +915 35 0 days 00:00:00.727465126 +915 36 0 days 00:00:01.873470493 +915 37 0 days 00:00:03.286156281 +915 38 0 days 00:00:00.644809453 +915 39 0 days 00:00:01.972411687 +915 41 0 days 00:00:01.426858766 +915 42 0 days 00:00:00.593000093 +915 44 0 days 00:00:01.057668180 +915 46 0 days 00:00:00.962149210 +915 47 0 days 00:00:01.608426680 +915 48 0 days 00:00:00.923886328 +915 49 0 days 00:00:01.250630458 +915 50 0 days 00:00:00.738311051 +915 52 0 days 00:00:01.005632173 +915 53 0 days 00:00:03.583315646 +915 54 0 days 00:00:01.101212526 +915 57 0 days 00:00:00.793432740 +915 59 0 days 00:00:00.880710340 +915 60 0 days 00:00:01.556049560 +915 61 0 days 00:00:01.284929993 +915 62 0 days 00:00:00.348703960 +915 63 0 days 00:00:00.544056673 +915 64 0 days 00:00:01.850034143 +915 66 0 days 00:00:00.757492213 +915 68 0 days 00:00:01.023336566 +915 69 0 days 00:00:01.676466733 +915 70 0 days 00:00:01.471795993 +915 71 0 days 00:00:00.359668186 +915 72 0 days 00:00:00.375771086 +915 77 0 days 00:00:00.548544906 +915 78 0 days 00:00:00.451385440 +915 79 0 days 00:00:01.343488295 +915 81 0 days 00:00:01.688048653 +915 83 0 days 00:00:01.023732380 +915 84 0 days 00:00:00.417926786 +915 85 0 days 00:00:00.596913200 +915 86 0 days 00:00:00.680077933 +915 87 0 days 00:00:00.447802926 +915 88 0 days 00:00:01.177634366 +915 89 0 days 00:00:01.049739976 +915 90 0 days 00:00:01.994968280 +915 93 0 days 00:00:00.959717413 +915 94 0 days 00:00:00.799918940 +915 95 0 days 00:00:01.616026246 +915 97 0 days 00:00:01.621022273 +915 98 0 days 00:00:01.076978800 +915 99 0 days 00:00:02.150384906 +915 100 0 days 00:00:00.650820273 +916 2 0 days 00:00:00.498423643 +916 3 0 days 00:00:00.357013940 +916 4 0 days 00:00:03.082468193 +916 5 0 days 00:00:01.668183733 +916 6 0 days 00:00:01.948512960 +916 7 0 days 00:00:01.086873980 +916 10 0 days 00:00:01.096348346 +916 11 0 days 00:00:00.852166985 +916 12 0 days 00:00:00.794450960 +916 13 0 days 00:00:00.650821346 +916 14 0 days 00:00:01.206763358 +916 15 0 days 00:00:03.335516449 +916 16 0 days 00:00:01.108102173 +916 18 0 days 00:00:02.996975070 +916 19 0 days 00:00:01.064298790 +916 22 0 days 00:00:00.393467573 +916 24 0 days 00:00:00.572928473 +916 25 0 days 00:00:01.681846280 +916 26 0 days 00:00:00.424915340 +916 27 0 days 00:00:01.000145633 +916 28 0 days 00:00:01.312176833 +916 30 0 days 00:00:01.003257766 +916 31 0 days 00:00:00.466518649 +916 32 0 days 00:00:00.625087953 +916 37 0 days 00:00:00.795730073 +916 38 0 days 00:00:00.956678213 +916 39 0 days 00:00:00.336975898 +916 40 0 days 00:00:01.575307407 +916 41 0 days 00:00:01.644617301 +916 42 0 days 00:00:01.147943653 +916 43 0 days 00:00:01.547114300 +916 46 0 days 00:00:01.792238961 +916 47 0 days 00:00:01.528197686 +916 48 0 days 00:00:00.928291580 +916 50 0 days 00:00:00.372705333 +916 51 0 days 00:00:01.245169053 +916 52 0 days 00:00:01.055572860 +916 54 0 days 00:00:00.802881565 +916 55 0 days 00:00:00.937211906 +916 56 0 days 00:00:00.749259993 +916 57 0 days 00:00:00.861388993 +916 58 0 days 00:00:00.613243133 +916 60 0 days 00:00:00.640657473 +916 61 0 days 00:00:03.472569533 +916 65 0 days 00:00:00.787256306 +916 67 0 days 00:00:01.565724045 +916 69 0 days 00:00:00.870145906 +916 70 0 days 00:00:02.247843286 +916 74 0 days 00:00:02.511569680 +916 75 0 days 00:00:02.203311693 +916 76 0 days 00:00:02.096411740 +916 77 0 days 00:00:00.574829083 +916 78 0 days 00:00:02.166459646 +916 79 0 days 00:00:03.718239833 +916 81 0 days 00:00:00.670897500 +916 82 0 days 00:00:00.535008920 +916 85 0 days 00:00:01.414731746 +916 87 0 days 00:00:01.138956426 +916 89 0 days 00:00:00.796677720 +916 91 0 days 00:00:01.642563360 +916 93 0 days 00:00:01.033682240 +916 94 0 days 00:00:01.622791480 +916 95 0 days 00:00:00.891236460 +916 96 0 days 00:00:03.323502140 +916 97 0 days 00:00:01.221183586 +916 98 0 days 00:00:00.959898573 +916 99 0 days 00:00:01.789464122 +916 100 0 days 00:00:01.166773693 +917 1 0 days 00:00:11.710262394 +917 2 0 days 00:00:01.084428362 +917 3 0 days 00:00:03.133566272 +917 4 0 days 00:00:03.907622207 +917 5 0 days 00:00:01.521553520 +917 7 0 days 00:00:04.597449945 +917 8 0 days 00:00:04.882115236 +917 9 0 days 00:00:02.603587900 +917 10 0 days 00:00:02.021435265 +917 11 0 days 00:00:01.260313544 +917 13 0 days 00:00:03.151857655 +917 14 0 days 00:00:01.227858903 +917 15 0 days 00:00:07.734485044 +917 16 0 days 00:00:02.943364750 +917 17 0 days 00:00:01.685513588 +917 18 0 days 00:00:02.567071506 +917 19 0 days 00:00:05.254403676 +917 20 0 days 00:00:04.051047526 +917 21 0 days 00:00:01.233206628 +917 22 0 days 00:00:02.525152250 +917 23 0 days 00:00:20.007436018 +917 24 0 days 00:00:02.495892317 +917 25 0 days 00:00:02.206899497 +917 27 0 days 00:00:01.155186888 +917 28 0 days 00:00:01.223133530 +917 29 0 days 00:00:02.081403563 +917 30 0 days 00:00:09.154265620 +917 32 0 days 00:00:02.398306813 +917 33 0 days 00:00:01.369488176 +917 35 0 days 00:00:03.817349824 +917 36 0 days 00:00:03.877952788 +917 37 0 days 00:00:04.149218663 +917 38 0 days 00:00:05.101305648 +917 39 0 days 00:00:01.126722158 +917 40 0 days 00:00:03.199204445 +917 41 0 days 00:00:01.272185396 +917 42 0 days 00:00:03.519321896 +917 43 0 days 00:00:03.067587165 +917 44 0 days 00:00:02.176257428 +917 45 0 days 00:00:03.380592960 +917 46 0 days 00:00:00.863842697 +917 47 0 days 00:00:02.534058758 +918 1 0 days 00:00:01.252888884 +918 2 0 days 00:00:02.546833445 +918 3 0 days 00:00:00.988946120 +918 4 0 days 00:00:02.562470028 +918 5 0 days 00:00:01.561414362 +918 6 0 days 00:00:01.626142245 +918 7 0 days 00:00:01.879000076 +918 8 0 days 00:00:00.697782912 +918 9 0 days 00:00:00.727251848 +918 11 0 days 00:00:00.789988756 +918 12 0 days 00:00:01.553131793 +918 14 0 days 00:00:01.180718387 +918 15 0 days 00:00:02.078827386 +918 16 0 days 00:00:00.537174664 +918 17 0 days 00:00:01.331161185 +918 18 0 days 00:00:00.946883275 +918 19 0 days 00:00:01.769004768 +918 20 0 days 00:00:01.397822344 +918 21 0 days 00:00:01.929213320 +918 22 0 days 00:00:00.738310342 +918 23 0 days 00:00:01.582357790 +918 24 0 days 00:00:01.561844397 +918 25 0 days 00:00:00.609268025 +918 26 0 days 00:00:02.960780618 +918 27 0 days 00:00:00.787690126 +918 28 0 days 00:00:01.406900952 +918 29 0 days 00:00:01.088664602 +918 30 0 days 00:00:01.195751756 +918 31 0 days 00:00:06.277301481 +918 32 0 days 00:00:01.080726176 +918 33 0 days 00:00:00.867017280 +918 34 0 days 00:00:01.495365661 +918 35 0 days 00:00:01.167310738 +918 36 0 days 00:00:01.116684548 +918 37 0 days 00:00:00.844924872 +918 38 0 days 00:00:01.177472200 +918 39 0 days 00:00:02.255397810 +918 40 0 days 00:00:01.477310043 +918 41 0 days 00:00:01.288164840 +918 43 0 days 00:00:01.293039344 +918 44 0 days 00:00:02.047459963 +918 45 0 days 00:00:01.867407015 +918 46 0 days 00:00:02.031996068 +918 47 0 days 00:00:01.234076460 +918 48 0 days 00:00:01.563261212 +918 50 0 days 00:00:00.379671572 +918 51 0 days 00:00:02.541282788 +918 52 0 days 00:00:03.968753805 +918 54 0 days 00:00:03.517705434 +918 55 0 days 00:00:01.370991796 +918 56 0 days 00:00:01.181199272 +918 57 0 days 00:00:00.826311596 +918 58 0 days 00:00:01.735647960 +918 59 0 days 00:00:04.597787475 +918 60 0 days 00:00:00.638805616 +918 61 0 days 00:00:00.901634228 +918 62 0 days 00:00:00.521394955 +918 63 0 days 00:00:00.919157647 +918 64 0 days 00:00:01.133353690 +918 65 0 days 00:00:01.928410205 +918 66 0 days 00:00:01.196720523 +918 67 0 days 00:00:00.744443096 +918 68 0 days 00:00:01.324581889 +918 69 0 days 00:00:00.936622540 +918 70 0 days 00:00:04.118654546 +918 71 0 days 00:00:05.235844811 +918 72 0 days 00:00:01.593973390 +918 73 0 days 00:00:05.390818566 +918 74 0 days 00:00:01.747548360 +918 75 0 days 00:00:04.076986237 +918 76 0 days 00:00:02.557683337 +918 77 0 days 00:00:05.108431450 +918 78 0 days 00:00:02.056508544 +918 79 0 days 00:00:01.698945348 +918 80 0 days 00:00:02.073795880 +918 82 0 days 00:00:04.414061950 +918 83 0 days 00:00:01.717734668 +918 84 0 days 00:00:01.299891902 +918 86 0 days 00:00:01.498131302 +918 87 0 days 00:00:01.272534428 +918 88 0 days 00:00:01.600560562 +918 89 0 days 00:00:01.428395964 +918 90 0 days 00:00:01.251932241 +918 91 0 days 00:00:01.474756385 +918 92 0 days 00:00:01.073826755 +918 93 0 days 00:00:01.305959625 +918 94 0 days 00:00:01.182062047 +918 95 0 days 00:00:00.568384255 +918 97 0 days 00:00:00.569416750 +918 99 0 days 00:00:01.391226296 +918 100 0 days 00:00:01.885064876 +919 1 0 days 00:00:01.717974560 +919 2 0 days 00:00:05.798306694 +919 3 0 days 00:00:03.416645133 +919 4 0 days 00:00:01.650205586 +919 5 0 days 00:00:06.374622946 +919 7 0 days 00:00:01.229980573 +919 8 0 days 00:00:00.967964315 +919 9 0 days 00:00:02.731340483 +919 10 0 days 00:00:02.366989500 +919 11 0 days 00:00:00.727802660 +919 12 0 days 00:00:01.306234470 +919 13 0 days 00:00:01.754435033 +919 14 0 days 00:00:01.167071805 +919 15 0 days 00:00:03.331523870 +919 16 0 days 00:00:00.817715080 +919 17 0 days 00:00:01.800830286 +919 18 0 days 00:00:00.964825253 +919 19 0 days 00:00:02.256300810 +919 20 0 days 00:00:01.796435293 +919 21 0 days 00:00:04.078902870 +919 22 0 days 00:00:05.257393720 +919 23 0 days 00:00:01.064785035 +919 24 0 days 00:00:01.873891126 +919 25 0 days 00:00:01.756611726 +919 26 0 days 00:00:02.680425206 +919 27 0 days 00:00:01.968970566 +919 29 0 days 00:00:01.062701793 +919 30 0 days 00:00:02.617439750 +919 31 0 days 00:00:02.684889853 +919 32 0 days 00:00:00.872128886 +919 33 0 days 00:00:02.880095626 +919 34 0 days 00:00:01.199949760 +919 35 0 days 00:00:02.082255186 +919 36 0 days 00:00:02.481131280 +919 37 0 days 00:00:00.989046933 +919 38 0 days 00:00:05.010327453 +919 39 0 days 00:00:09.136346853 +919 40 0 days 00:00:02.977199553 +919 41 0 days 00:00:04.973334952 +919 42 0 days 00:00:01.596519573 +919 43 0 days 00:00:01.327487740 +919 44 0 days 00:00:01.085160905 +919 45 0 days 00:00:03.279449524 +919 46 0 days 00:00:01.811535266 +919 47 0 days 00:00:03.299675191 +919 48 0 days 00:00:02.234030926 +919 49 0 days 00:00:03.974017193 +919 50 0 days 00:00:08.634375555 +919 51 0 days 00:00:01.852507596 +919 52 0 days 00:00:02.800926888 +919 53 0 days 00:00:01.492664186 +919 55 0 days 00:00:01.527048756 +919 57 0 days 00:00:02.323694290 +919 58 0 days 00:00:00.756185673 +919 59 0 days 00:00:11.994416020 +919 60 0 days 00:00:02.816434580 +919 61 0 days 00:00:02.165582552 +919 62 0 days 00:00:04.149484395 +919 63 0 days 00:00:01.204555906 +919 64 0 days 00:00:01.089958673 +919 65 0 days 00:00:00.584828726 +919 66 0 days 00:00:03.839001600 +919 67 0 days 00:00:02.425972020 +919 68 0 days 00:00:02.681711457 +919 69 0 days 00:00:02.505461116 +919 70 0 days 00:00:01.454906913 +919 71 0 days 00:00:02.878897593 +919 73 0 days 00:00:00.725350453 +919 74 0 days 00:00:02.433326156 +919 75 0 days 00:00:02.041277273 +919 76 0 days 00:00:03.575950560 +919 77 0 days 00:00:01.414218152 +919 78 0 days 00:00:00.817413526 +919 79 0 days 00:00:02.109194280 +919 80 0 days 00:00:04.126245232 +919 81 0 days 00:00:01.493319213 +919 82 0 days 00:00:04.722255253 +919 83 0 days 00:00:01.057327580 +919 84 0 days 00:00:13.753423290 +919 85 0 days 00:00:03.756955985 +919 86 0 days 00:00:02.571267222 +919 87 0 days 00:00:09.890815356 +919 88 0 days 00:00:02.320058773 +919 89 0 days 00:00:03.184201046 +919 90 0 days 00:00:01.724541573 +919 91 0 days 00:00:01.466543188 +919 93 0 days 00:00:01.412217253 +919 94 0 days 00:00:01.523292100 +919 95 0 days 00:00:02.653891924 +919 96 0 days 00:00:01.091872850 +919 97 0 days 00:00:02.487359038 +919 98 0 days 00:00:00.778386360 +919 99 0 days 00:00:02.265132626 +919 100 0 days 00:00:06.990043680 +920 1 0 days 00:00:02.070677280 +920 2 0 days 00:00:02.248589060 +920 3 0 days 00:00:01.307863380 +920 4 0 days 00:00:00.830806180 +920 6 0 days 00:00:02.803428315 +920 7 0 days 00:00:00.892441496 +920 8 0 days 00:00:03.588035380 +920 9 0 days 00:00:02.739936263 +920 12 0 days 00:00:01.171272548 +920 13 0 days 00:00:01.514151073 +920 14 0 days 00:00:01.134150322 +920 15 0 days 00:00:01.106277760 +920 16 0 days 00:00:01.201386213 +920 17 0 days 00:00:01.258011310 +920 18 0 days 00:00:00.782956786 +920 19 0 days 00:00:00.586549013 +920 20 0 days 00:00:04.755110600 +920 21 0 days 00:00:00.460818832 +920 22 0 days 00:00:00.561764813 +920 23 0 days 00:00:00.719851366 +920 24 0 days 00:00:00.584805340 +920 25 0 days 00:00:01.682386690 +920 26 0 days 00:00:01.784979766 +920 28 0 days 00:00:00.675799904 +920 30 0 days 00:00:01.350707260 +920 31 0 days 00:00:01.858480970 +920 32 0 days 00:00:00.927863026 +920 33 0 days 00:00:01.185801315 +920 34 0 days 00:00:01.111486160 +920 36 0 days 00:00:00.494397136 +920 37 0 days 00:00:01.449777846 +920 38 0 days 00:00:01.073761426 +920 39 0 days 00:00:00.918776500 +920 40 0 days 00:00:00.522973233 +920 41 0 days 00:00:00.916115920 +920 42 0 days 00:00:02.356603503 +920 43 0 days 00:00:00.338543316 +920 44 0 days 00:00:00.730860873 +920 45 0 days 00:00:00.612634780 +920 46 0 days 00:00:03.153124126 +920 47 0 days 00:00:01.200462296 +920 48 0 days 00:00:01.511756588 +920 49 0 days 00:00:01.393525585 +920 50 0 days 00:00:00.906819060 +920 51 0 days 00:00:00.549693400 +920 52 0 days 00:00:00.553088906 +920 53 0 days 00:00:00.535953551 +920 54 0 days 00:00:00.746919426 +920 55 0 days 00:00:01.764655972 +920 56 0 days 00:00:00.917731573 +920 57 0 days 00:00:00.827384593 +920 58 0 days 00:00:03.620340215 +920 59 0 days 00:00:00.521675100 +920 60 0 days 00:00:00.742455560 +920 61 0 days 00:00:04.338782780 +920 62 0 days 00:00:00.907142786 +920 64 0 days 00:00:01.354029470 +920 65 0 days 00:00:01.146751806 +920 66 0 days 00:00:01.431311326 +920 67 0 days 00:00:01.241562382 +920 68 0 days 00:00:01.698079802 +920 69 0 days 00:00:00.524436266 +920 70 0 days 00:00:01.592312713 +920 71 0 days 00:00:01.897336165 +920 72 0 days 00:00:00.742258686 +920 73 0 days 00:00:01.254376380 +920 74 0 days 00:00:00.831631700 +920 75 0 days 00:00:00.830475846 +920 76 0 days 00:00:01.007365233 +920 77 0 days 00:00:01.428318994 +920 78 0 days 00:00:00.381333800 +920 79 0 days 00:00:01.261566587 +920 80 0 days 00:00:00.508693600 +920 81 0 days 00:00:00.264125166 +920 82 0 days 00:00:01.013855800 +920 83 0 days 00:00:00.620127860 +920 84 0 days 00:00:01.003193370 +920 85 0 days 00:00:00.758480213 +920 86 0 days 00:00:00.555851046 +920 87 0 days 00:00:02.599932248 +920 88 0 days 00:00:00.841447868 +920 89 0 days 00:00:00.802284916 +920 90 0 days 00:00:01.496153000 +920 91 0 days 00:00:00.780506280 +920 92 0 days 00:00:00.522223116 +920 93 0 days 00:00:01.236889834 +920 94 0 days 00:00:00.911186066 +920 95 0 days 00:00:03.150759706 +920 96 0 days 00:00:02.202993913 +920 97 0 days 00:00:02.647375165 +920 98 0 days 00:00:01.226916300 +920 99 0 days 00:00:01.300525564 +920 100 0 days 00:00:00.553954000 +921 1 0 days 00:00:00.149750810 +921 2 0 days 00:00:00.175828323 +921 3 0 days 00:00:00.148982250 +921 4 0 days 00:00:00.174177595 +921 5 0 days 00:00:00.148184955 +921 6 0 days 00:00:00.146183305 +921 7 0 days 00:00:00.175095768 +921 8 0 days 00:00:00.143857425 +921 9 0 days 00:00:00.177970945 +921 10 0 days 00:00:00.195398780 +921 11 0 days 00:00:00.164177760 +921 12 0 days 00:00:00.168310500 +921 13 0 days 00:00:00.177747114 +921 14 0 days 00:00:00.216531004 +921 15 0 days 00:00:00.200888970 +921 16 0 days 00:00:00.137766920 +921 17 0 days 00:00:00.196873930 +921 18 0 days 00:00:00.145695690 +921 19 0 days 00:00:00.142965035 +921 20 0 days 00:00:00.156313945 +921 21 0 days 00:00:00.162622590 +921 22 0 days 00:00:00.181089506 +921 23 0 days 00:00:00.147264362 +921 24 0 days 00:00:00.200672135 +921 25 0 days 00:00:00.198546400 +921 26 0 days 00:00:00.225947236 +921 27 0 days 00:00:00.218706015 +921 28 0 days 00:00:00.130291515 +921 29 0 days 00:00:00.180634855 +921 30 0 days 00:00:00.159942220 +921 31 0 days 00:00:00.125333770 +921 32 0 days 00:00:00.149864835 +921 33 0 days 00:00:00.201969150 +921 34 0 days 00:00:00.153749440 +921 35 0 days 00:00:00.243973707 +921 36 0 days 00:00:00.181204790 +921 37 0 days 00:00:00.151781460 +921 38 0 days 00:00:00.155042970 +921 39 0 days 00:00:00.199024755 +921 40 0 days 00:00:00.138435391 +921 41 0 days 00:00:00.190235880 +921 42 0 days 00:00:00.203196640 +921 43 0 days 00:00:00.155127400 +921 44 0 days 00:00:00.174208685 +921 45 0 days 00:00:00.234655072 +921 46 0 days 00:00:00.147312947 +921 47 0 days 00:00:00.147096776 +921 48 0 days 00:00:00.196913690 +921 49 0 days 00:00:00.150023045 +921 50 0 days 00:00:00.175483157 +921 51 0 days 00:00:00.137010176 +921 52 0 days 00:00:00.138125634 +921 53 0 days 00:00:00.222263155 +921 54 0 days 00:00:00.237626311 +921 55 0 days 00:00:00.129106475 +921 56 0 days 00:00:00.235186548 +921 57 0 days 00:00:00.145362830 +921 58 0 days 00:00:00.204996972 +921 59 0 days 00:00:00.197311575 +921 60 0 days 00:00:00.164910438 +921 61 0 days 00:00:00.154684015 +921 62 0 days 00:00:00.152194060 +921 63 0 days 00:00:00.141325950 +921 64 0 days 00:00:00.245916616 +921 65 0 days 00:00:00.127595880 +921 66 0 days 00:00:00.159954480 +921 67 0 days 00:00:00.160466790 +921 68 0 days 00:00:00.159640924 +921 69 0 days 00:00:00.162834315 +921 70 0 days 00:00:00.148030040 +921 71 0 days 00:00:00.175919311 +921 72 0 days 00:00:00.214272645 +921 73 0 days 00:00:00.145254325 +921 74 0 days 00:00:00.210478805 +921 75 0 days 00:00:00.145872497 +921 76 0 days 00:00:00.186801056 +921 77 0 days 00:00:00.164132813 +921 78 0 days 00:00:00.161308260 +921 79 0 days 00:00:00.184918165 +921 80 0 days 00:00:00.220031000 +921 81 0 days 00:00:00.220591550 +921 82 0 days 00:00:00.204087110 +921 83 0 days 00:00:00.164159830 +921 84 0 days 00:00:00.149936020 +921 85 0 days 00:00:00.213132390 +921 86 0 days 00:00:00.215849408 +921 87 0 days 00:00:00.229486832 +921 88 0 days 00:00:00.148119110 +921 89 0 days 00:00:00.155682045 +921 90 0 days 00:00:00.163144150 +921 91 0 days 00:00:00.149001470 +921 92 0 days 00:00:00.173315655 +921 93 0 days 00:00:00.154492235 +921 94 0 days 00:00:00.153708590 +921 95 0 days 00:00:00.167391635 +921 96 0 days 00:00:00.220282200 +921 97 0 days 00:00:00.118056050 +921 98 0 days 00:00:00.149471655 +921 99 0 days 00:00:00.202354500 +921 100 0 days 00:00:00.243606710 +922 1 0 days 00:00:00.211071605 +922 2 0 days 00:00:00.154868672 +922 3 0 days 00:00:00.126664130 +922 4 0 days 00:00:00.216110180 +922 5 0 days 00:00:00.152262990 +922 6 0 days 00:00:00.211475510 +922 7 0 days 00:00:00.218952564 +922 8 0 days 00:00:00.170433392 +922 9 0 days 00:00:00.189631343 +922 10 0 days 00:00:00.211028985 +922 11 0 days 00:00:00.122349450 +922 12 0 days 00:00:00.133607833 +922 13 0 days 00:00:00.206962815 +922 14 0 days 00:00:00.123470850 +922 15 0 days 00:00:00.223472445 +922 16 0 days 00:00:00.154336976 +922 17 0 days 00:00:00.132003885 +922 18 0 days 00:00:00.142993720 +922 19 0 days 00:00:00.175937985 +922 20 0 days 00:00:00.212515020 +922 21 0 days 00:00:00.151010980 +922 22 0 days 00:00:00.139454433 +922 23 0 days 00:00:00.119722460 +922 24 0 days 00:00:00.159022015 +922 25 0 days 00:00:00.230574556 +922 26 0 days 00:00:00.237909440 +922 27 0 days 00:00:00.211948420 +922 28 0 days 00:00:00.184739633 +922 29 0 days 00:00:00.135897065 +922 30 0 days 00:00:00.125011105 +922 31 0 days 00:00:00.187581076 +922 32 0 days 00:00:00.228111900 +922 33 0 days 00:00:00.176766355 +922 34 0 days 00:00:00.236562405 +922 35 0 days 00:00:00.219385555 +922 36 0 days 00:00:00.158244170 +922 37 0 days 00:00:00.242120408 +922 38 0 days 00:00:00.164511515 +922 39 0 days 00:00:00.151654960 +922 40 0 days 00:00:00.195416540 +922 41 0 days 00:00:00.174162563 +922 42 0 days 00:00:00.208662295 +922 43 0 days 00:00:00.163778596 +922 44 0 days 00:00:00.166334920 +922 45 0 days 00:00:00.210266120 +922 46 0 days 00:00:00.155471545 +922 47 0 days 00:00:00.150553665 +922 48 0 days 00:00:00.234909310 +922 49 0 days 00:00:00.151598910 +922 50 0 days 00:00:00.146200625 +922 51 0 days 00:00:00.226353365 +922 52 0 days 00:00:00.218978325 +922 53 0 days 00:00:00.157958450 +922 54 0 days 00:00:00.150265670 +922 55 0 days 00:00:00.149704980 +922 56 0 days 00:00:00.134392326 +922 57 0 days 00:00:00.142038820 +922 58 0 days 00:00:00.159444268 +922 59 0 days 00:00:00.147628538 +922 60 0 days 00:00:00.195671192 +922 61 0 days 00:00:00.210382410 +922 62 0 days 00:00:00.160693480 +922 63 0 days 00:00:00.184154410 +922 64 0 days 00:00:00.144982605 +922 65 0 days 00:00:00.120394100 +922 66 0 days 00:00:00.141277203 +922 67 0 days 00:00:00.210666715 +922 68 0 days 00:00:00.161644245 +922 69 0 days 00:00:00.127877625 +922 70 0 days 00:00:00.124066295 +922 71 0 days 00:00:00.131048536 +922 72 0 days 00:00:00.148755930 +922 73 0 days 00:00:00.158527506 +922 74 0 days 00:00:00.204992535 +922 75 0 days 00:00:00.126426170 +922 76 0 days 00:00:00.121510530 +922 77 0 days 00:00:00.171715090 +922 78 0 days 00:00:00.206497620 +922 79 0 days 00:00:00.241607740 +922 80 0 days 00:00:00.171205300 +922 81 0 days 00:00:00.216082560 +922 82 0 days 00:00:00.167988702 +922 83 0 days 00:00:00.131245975 +922 84 0 days 00:00:00.160048445 +922 85 0 days 00:00:00.152606885 +922 86 0 days 00:00:00.158733070 +922 87 0 days 00:00:00.144565080 +922 88 0 days 00:00:00.216485065 +922 89 0 days 00:00:00.219300710 +922 90 0 days 00:00:00.124572950 +922 91 0 days 00:00:00.170210040 +922 92 0 days 00:00:00.229778040 +922 93 0 days 00:00:00.166212395 +922 94 0 days 00:00:00.185302180 +922 95 0 days 00:00:00.154584245 +922 96 0 days 00:00:00.216929455 +922 97 0 days 00:00:00.150579475 +922 98 0 days 00:00:00.127184124 +922 99 0 days 00:00:00.170583700 +922 100 0 days 00:00:00.171803460 +923 1 0 days 00:00:00.079540180 +923 2 0 days 00:00:00.083208645 +923 3 0 days 00:00:00.091325825 +923 4 0 days 00:00:00.108349370 +923 5 0 days 00:00:00.084089845 +923 6 0 days 00:00:00.114121865 +923 7 0 days 00:00:00.116429000 +923 8 0 days 00:00:00.089385630 +923 9 0 days 00:00:00.116364990 +923 10 0 days 00:00:00.063042073 +923 11 0 days 00:00:00.075523275 +923 12 0 days 00:00:00.101712068 +923 13 0 days 00:00:00.108478550 +923 14 0 days 00:00:00.082419475 +923 15 0 days 00:00:00.108960040 +923 16 0 days 00:00:00.081257695 +923 17 0 days 00:00:00.072626235 +923 18 0 days 00:00:00.125577035 +923 19 0 days 00:00:00.088444483 +923 20 0 days 00:00:00.117678130 +923 21 0 days 00:00:00.117458315 +923 22 0 days 00:00:00.082829370 +923 23 0 days 00:00:00.118175865 +923 24 0 days 00:00:00.096768570 +923 25 0 days 00:00:00.114868315 +923 26 0 days 00:00:00.078940235 +923 27 0 days 00:00:00.078755890 +923 28 0 days 00:00:00.122349316 +923 29 0 days 00:00:00.079776705 +923 30 0 days 00:00:00.126691235 +923 31 0 days 00:00:00.096666440 +923 32 0 days 00:00:00.098109285 +923 33 0 days 00:00:00.084829445 +923 34 0 days 00:00:00.093107230 +923 35 0 days 00:00:00.133092445 +923 36 0 days 00:00:00.088539735 +923 37 0 days 00:00:00.118287575 +923 38 0 days 00:00:00.084226460 +923 39 0 days 00:00:00.091386031 +923 40 0 days 00:00:00.068014040 +923 41 0 days 00:00:00.078308395 +923 42 0 days 00:00:00.091717573 +923 43 0 days 00:00:00.067453365 +923 44 0 days 00:00:00.090747263 +923 45 0 days 00:00:00.092891293 +923 46 0 days 00:00:00.122931175 +923 47 0 days 00:00:00.073988930 +923 48 0 days 00:00:00.071759660 +923 49 0 days 00:00:00.128844590 +923 50 0 days 00:00:00.096505913 +923 51 0 days 00:00:00.082404913 +923 52 0 days 00:00:00.069113750 +923 53 0 days 00:00:00.094345068 +923 54 0 days 00:00:00.127337128 +923 55 0 days 00:00:00.097769114 +923 56 0 days 00:00:00.082233645 +923 57 0 days 00:00:00.104260720 +923 58 0 days 00:00:00.118354225 +923 59 0 days 00:00:00.073337235 +923 60 0 days 00:00:00.094141952 +923 61 0 days 00:00:00.089321010 +923 62 0 days 00:00:00.131613810 +923 63 0 days 00:00:00.132693080 +923 64 0 days 00:00:00.095226373 +923 65 0 days 00:00:00.100019300 +923 66 0 days 00:00:00.098589300 +923 67 0 days 00:00:00.113997745 +923 68 0 days 00:00:00.069105860 +923 69 0 days 00:00:00.081112700 +923 70 0 days 00:00:00.075985910 +923 71 0 days 00:00:00.085075860 +923 72 0 days 00:00:00.089141395 +923 73 0 days 00:00:00.070386055 +923 74 0 days 00:00:00.090082990 +923 75 0 days 00:00:00.070479190 +923 76 0 days 00:00:00.070957080 +923 77 0 days 00:00:00.073512605 +923 78 0 days 00:00:00.081961785 +923 79 0 days 00:00:00.095162952 +923 80 0 days 00:00:00.090105925 +923 81 0 days 00:00:00.085123180 +923 82 0 days 00:00:00.076133245 +923 83 0 days 00:00:00.114538490 +923 84 0 days 00:00:00.084059390 +923 85 0 days 00:00:00.085772685 +923 86 0 days 00:00:00.074042120 +923 87 0 days 00:00:00.083577346 +923 88 0 days 00:00:00.086208055 +923 89 0 days 00:00:00.097674457 +923 90 0 days 00:00:00.098844225 +923 91 0 days 00:00:00.092428984 +923 92 0 days 00:00:00.128662124 +923 93 0 days 00:00:00.082203815 +923 94 0 days 00:00:00.125503210 +923 95 0 days 00:00:00.091672065 +923 96 0 days 00:00:00.089887780 +923 97 0 days 00:00:00.090287775 +923 98 0 days 00:00:00.114175235 +923 99 0 days 00:00:00.071240665 +923 100 0 days 00:00:00.070724365 +924 1 0 days 00:00:00.080186820 +924 2 0 days 00:00:00.125722040 +924 3 0 days 00:00:00.120379630 +924 4 0 days 00:00:00.079054995 +924 5 0 days 00:00:00.129487310 +924 6 0 days 00:00:00.111920710 +924 7 0 days 00:00:00.077970055 +924 8 0 days 00:00:00.088384380 +924 9 0 days 00:00:00.081939400 +924 10 0 days 00:00:00.093571780 +924 11 0 days 00:00:00.078032840 +924 12 0 days 00:00:00.075061560 +924 13 0 days 00:00:00.083875480 +924 14 0 days 00:00:00.121069450 +924 15 0 days 00:00:00.118969785 +924 16 0 days 00:00:00.096708676 +924 17 0 days 00:00:00.070882245 +924 18 0 days 00:00:00.117742405 +924 19 0 days 00:00:00.134464340 +924 20 0 days 00:00:00.076054670 +924 21 0 days 00:00:00.123454255 +924 22 0 days 00:00:00.085037415 +924 23 0 days 00:00:00.081288295 +924 24 0 days 00:00:00.093261020 +924 25 0 days 00:00:00.100180908 +924 26 0 days 00:00:00.098984310 +924 27 0 days 00:00:00.118488090 +924 28 0 days 00:00:00.124514310 +924 29 0 days 00:00:00.121636230 +924 30 0 days 00:00:00.091413780 +924 31 0 days 00:00:00.072395365 +924 32 0 days 00:00:00.069731550 +924 33 0 days 00:00:00.121160915 +924 34 0 days 00:00:00.092664600 +924 35 0 days 00:00:00.117496186 +924 36 0 days 00:00:00.091102815 +924 37 0 days 00:00:00.069093555 +924 38 0 days 00:00:00.120781300 +924 39 0 days 00:00:00.128485460 +924 40 0 days 00:00:00.117518153 +924 41 0 days 00:00:00.084942210 +924 42 0 days 00:00:00.069179995 +924 43 0 days 00:00:00.082808875 +924 44 0 days 00:00:00.128780325 +924 45 0 days 00:00:00.079739121 +924 46 0 days 00:00:00.080702620 +924 47 0 days 00:00:00.120509520 +924 48 0 days 00:00:00.099784731 +924 49 0 days 00:00:00.079393310 +924 50 0 days 00:00:00.091192640 +924 51 0 days 00:00:00.082529183 +924 52 0 days 00:00:00.098747797 +924 53 0 days 00:00:00.129329730 +924 54 0 days 00:00:00.070566370 +924 55 0 days 00:00:00.131109360 +924 56 0 days 00:00:00.118138520 +924 57 0 days 00:00:00.097192948 +924 58 0 days 00:00:00.071281835 +924 59 0 days 00:00:00.127610024 +924 60 0 days 00:00:00.089142450 +924 61 0 days 00:00:00.069111020 +924 62 0 days 00:00:00.137762000 +924 63 0 days 00:00:00.093242123 +924 64 0 days 00:00:00.119823850 +924 65 0 days 00:00:00.079038532 +924 66 0 days 00:00:00.117473490 +924 67 0 days 00:00:00.119481790 +924 68 0 days 00:00:00.091646260 +924 69 0 days 00:00:00.101647502 +924 70 0 days 00:00:00.082120970 +924 71 0 days 00:00:00.083394975 +924 72 0 days 00:00:00.070122805 +924 73 0 days 00:00:00.090028190 +924 74 0 days 00:00:00.125681215 +924 75 0 days 00:00:00.109661813 +924 76 0 days 00:00:00.071072980 +924 77 0 days 00:00:00.082966130 +924 78 0 days 00:00:00.121633635 +924 79 0 days 00:00:00.118386970 +924 80 0 days 00:00:00.083137015 +924 81 0 days 00:00:00.081574855 +924 82 0 days 00:00:00.119514150 +924 83 0 days 00:00:00.086203365 +924 84 0 days 00:00:00.110831360 +924 85 0 days 00:00:00.118623520 +924 86 0 days 00:00:00.081516013 +924 87 0 days 00:00:00.081172620 +924 88 0 days 00:00:00.092248660 +924 89 0 days 00:00:00.100808640 +924 90 0 days 00:00:00.092446540 +924 91 0 days 00:00:00.108752465 +924 92 0 days 00:00:00.071947913 +924 93 0 days 00:00:00.071868480 +924 94 0 days 00:00:00.123058780 +924 95 0 days 00:00:00.118552310 +924 96 0 days 00:00:00.118716260 +924 97 0 days 00:00:00.119403945 +924 98 0 days 00:00:00.090378208 +924 99 0 days 00:00:00.085381465 +924 100 0 days 00:00:00.092939185 +925 1 0 days 00:00:00.178124004 +925 2 0 days 00:00:00.189153540 +925 3 0 days 00:00:00.173567450 +925 4 0 days 00:00:00.137894040 +925 5 0 days 00:00:00.176148305 +925 6 0 days 00:00:00.182583915 +925 7 0 days 00:00:00.171847155 +925 8 0 days 00:00:00.147908105 +925 9 0 days 00:00:00.214814670 +925 10 0 days 00:00:00.200673775 +925 11 0 days 00:00:00.180482710 +925 12 0 days 00:00:00.173786785 +925 13 0 days 00:00:00.142383912 +925 14 0 days 00:00:00.295195475 +925 15 0 days 00:00:00.216275460 +925 16 0 days 00:00:00.278337270 +925 17 0 days 00:00:00.291520940 +925 18 0 days 00:00:00.265861292 +925 19 0 days 00:00:00.188283550 +925 20 0 days 00:00:00.169081740 +925 21 0 days 00:00:00.178745080 +925 22 0 days 00:00:00.155489275 +925 23 0 days 00:00:00.230541028 +925 24 0 days 00:00:00.170787451 +925 25 0 days 00:00:00.170294685 +925 26 0 days 00:00:00.181489630 +925 27 0 days 00:00:00.156006676 +925 28 0 days 00:00:00.151969805 +925 29 0 days 00:00:00.180112420 +925 30 0 days 00:00:00.196375064 +925 31 0 days 00:00:00.156519496 +925 32 0 days 00:00:00.248016730 +925 33 0 days 00:00:00.185033931 +925 34 0 days 00:00:00.230297780 +925 35 0 days 00:00:00.162080090 +925 36 0 days 00:00:00.207513880 +925 37 0 days 00:00:00.240364500 +925 38 0 days 00:00:00.241509990 +925 39 0 days 00:00:00.272323515 +925 40 0 days 00:00:00.157733735 +925 41 0 days 00:00:00.161017165 +925 42 0 days 00:00:00.250616544 +925 43 0 days 00:00:00.169596968 +925 44 0 days 00:00:00.245771192 +925 45 0 days 00:00:00.243554195 +925 46 0 days 00:00:00.129697835 +925 47 0 days 00:00:00.131863510 +925 48 0 days 00:00:00.217649955 +925 49 0 days 00:00:00.173928688 +925 50 0 days 00:00:00.241626650 +925 51 0 days 00:00:00.228711885 +925 52 0 days 00:00:00.152493570 +925 53 0 days 00:00:00.156310915 +925 54 0 days 00:00:00.138868780 +925 55 0 days 00:00:00.166943805 +925 56 0 days 00:00:00.162026148 +925 57 0 days 00:00:00.152480020 +925 58 0 days 00:00:00.132038540 +925 59 0 days 00:00:00.229424285 +925 60 0 days 00:00:00.237326735 +925 61 0 days 00:00:00.128024650 +925 62 0 days 00:00:00.247136730 +925 63 0 days 00:00:00.222663850 +925 64 0 days 00:00:00.132710290 +925 65 0 days 00:00:00.128322795 +925 66 0 days 00:00:00.153990330 +925 67 0 days 00:00:00.240407928 +925 68 0 days 00:00:00.180574230 +925 69 0 days 00:00:00.225963725 +925 70 0 days 00:00:00.220790965 +925 71 0 days 00:00:00.234298030 +925 72 0 days 00:00:00.126631105 +925 73 0 days 00:00:00.155658430 +925 74 0 days 00:00:00.134105110 +925 75 0 days 00:00:00.248401740 +925 76 0 days 00:00:00.159880716 +925 77 0 days 00:00:00.223846850 +925 78 0 days 00:00:00.159157325 +925 79 0 days 00:00:00.177389830 +925 80 0 days 00:00:00.155681080 +925 81 0 days 00:00:00.156226630 +925 82 0 days 00:00:00.214254405 +925 83 0 days 00:00:00.153519885 +925 84 0 days 00:00:00.245513385 +925 85 0 days 00:00:00.155095080 +925 86 0 days 00:00:00.223652230 +925 87 0 days 00:00:00.236290728 +925 88 0 days 00:00:00.195182965 +925 89 0 days 00:00:00.220557610 +925 90 0 days 00:00:00.231123024 +925 91 0 days 00:00:00.179255936 +925 92 0 days 00:00:00.125704200 +925 93 0 days 00:00:00.204286670 +925 94 0 days 00:00:00.154920180 +925 95 0 days 00:00:00.159884705 +925 96 0 days 00:00:00.169569895 +925 97 0 days 00:00:00.219327555 +925 98 0 days 00:00:00.244002203 +925 99 0 days 00:00:00.231238948 +925 100 0 days 00:00:00.203888290 +926 1 0 days 00:00:00.163864235 +926 2 0 days 00:00:00.164893405 +926 3 0 days 00:00:00.177076053 +926 4 0 days 00:00:00.100551250 +926 5 0 days 00:00:00.097666052 +926 6 0 days 00:00:00.114863997 +926 7 0 days 00:00:00.161803075 +926 8 0 days 00:00:00.096925946 +926 9 0 days 00:00:00.182025370 +926 10 0 days 00:00:00.121876727 +926 11 0 days 00:00:00.098977920 +926 12 0 days 00:00:00.180502431 +926 13 0 days 00:00:00.082787095 +926 14 0 days 00:00:00.193229345 +926 15 0 days 00:00:00.170313548 +926 16 0 days 00:00:00.088785915 +926 17 0 days 00:00:00.087696720 +926 18 0 days 00:00:00.164038960 +926 19 0 days 00:00:00.117740170 +926 20 0 days 00:00:00.079154563 +926 21 0 days 00:00:00.085979160 +926 22 0 days 00:00:00.094887170 +926 23 0 days 00:00:00.094602475 +926 24 0 days 00:00:00.137410795 +926 25 0 days 00:00:00.078389696 +926 26 0 days 00:00:00.109429760 +926 27 0 days 00:00:00.085227095 +926 28 0 days 00:00:00.098276415 +926 29 0 days 00:00:00.174316633 +926 30 0 days 00:00:00.156401625 +926 31 0 days 00:00:00.123626995 +926 32 0 days 00:00:00.104881220 +926 33 0 days 00:00:00.105758746 +926 34 0 days 00:00:00.172100036 +926 35 0 days 00:00:00.105868908 +926 36 0 days 00:00:00.154864275 +926 37 0 days 00:00:00.106729576 +926 38 0 days 00:00:00.118178665 +926 39 0 days 00:00:00.121360160 +926 40 0 days 00:00:00.101666148 +926 41 0 days 00:00:00.154869275 +926 42 0 days 00:00:00.141004755 +926 43 0 days 00:00:00.105267984 +926 44 0 days 00:00:00.121492200 +926 45 0 days 00:00:00.151440500 +926 46 0 days 00:00:00.171997605 +926 47 0 days 00:00:00.101147240 +926 48 0 days 00:00:00.113038755 +926 49 0 days 00:00:00.162301630 +926 50 0 days 00:00:00.167374280 +926 51 0 days 00:00:00.094339700 +926 52 0 days 00:00:00.080919435 +926 53 0 days 00:00:00.100468250 +926 54 0 days 00:00:00.122250320 +926 55 0 days 00:00:00.088850108 +926 56 0 days 00:00:00.171475884 +926 57 0 days 00:00:00.163530540 +926 58 0 days 00:00:00.103306455 +926 59 0 days 00:00:00.104806985 +926 60 0 days 00:00:00.184983068 +926 61 0 days 00:00:00.163577525 +926 62 0 days 00:00:00.090589508 +926 63 0 days 00:00:00.082260055 +926 64 0 days 00:00:00.097161888 +926 65 0 days 00:00:00.077028404 +926 66 0 days 00:00:00.112276596 +926 67 0 days 00:00:00.092240100 +926 68 0 days 00:00:00.084336153 +926 69 0 days 00:00:00.091210470 +926 70 0 days 00:00:00.105206225 +926 71 0 days 00:00:00.133221492 +926 72 0 days 00:00:00.074961475 +926 73 0 days 00:00:00.152121380 +926 74 0 days 00:00:00.157180000 +926 75 0 days 00:00:00.105969730 +926 76 0 days 00:00:00.099222380 +926 77 0 days 00:00:00.104601510 +926 78 0 days 00:00:00.174317215 +926 79 0 days 00:00:00.130759745 +926 80 0 days 00:00:00.116179170 +926 81 0 days 00:00:00.170050270 +926 82 0 days 00:00:00.100341970 +926 83 0 days 00:00:00.106634815 +926 84 0 days 00:00:00.119348182 +926 85 0 days 00:00:00.100442265 +926 86 0 days 00:00:00.103442832 +926 87 0 days 00:00:00.169755856 +926 88 0 days 00:00:00.086772700 +926 89 0 days 00:00:00.156588515 +926 90 0 days 00:00:00.098904855 +926 91 0 days 00:00:00.092607398 +926 92 0 days 00:00:00.099532831 +926 93 0 days 00:00:00.091371600 +926 94 0 days 00:00:00.152349130 +926 95 0 days 00:00:00.156586125 +926 96 0 days 00:00:00.081474035 +926 97 0 days 00:00:00.154750500 +926 98 0 days 00:00:00.096568875 +926 99 0 days 00:00:00.078761680 +926 100 0 days 00:00:00.155478636 +927 1 0 days 00:00:01.827800930 +927 2 0 days 00:00:00.652292080 +927 3 0 days 00:00:00.650681085 +927 4 0 days 00:00:03.760647020 +927 5 0 days 00:00:00.392555365 +927 6 0 days 00:00:00.676577944 +927 7 0 days 00:00:04.462056330 +927 8 0 days 00:00:00.724705595 +927 9 0 days 00:00:02.157363700 +927 10 0 days 00:00:01.148459576 +927 11 0 days 00:00:01.676795215 +927 12 0 days 00:00:01.912087635 +927 13 0 days 00:00:00.487998815 +927 14 0 days 00:00:00.798570668 +927 15 0 days 00:00:00.588512627 +927 16 0 days 00:00:01.098251620 +927 17 0 days 00:00:01.136222130 +927 18 0 days 00:00:01.466819240 +927 19 0 days 00:00:00.552665850 +927 20 0 days 00:00:03.911760250 +927 21 0 days 00:00:03.037371305 +927 22 0 days 00:00:00.709710445 +927 23 0 days 00:00:02.027734316 +927 24 0 days 00:00:01.255776757 +927 25 0 days 00:00:01.192492805 +927 26 0 days 00:00:02.734225175 +927 27 0 days 00:00:02.097988460 +927 28 0 days 00:00:02.601707780 +927 29 0 days 00:00:00.711083630 +927 30 0 days 00:00:08.931485470 +927 31 0 days 00:00:01.053422360 +927 32 0 days 00:00:00.656282600 +927 33 0 days 00:00:10.294100965 +927 34 0 days 00:00:00.585261845 +927 35 0 days 00:00:02.473552872 +927 36 0 days 00:00:01.249230120 +927 37 0 days 00:00:01.173567295 +927 38 0 days 00:00:05.191539645 +927 39 0 days 00:00:00.640430190 +927 40 0 days 00:00:04.191162700 +927 41 0 days 00:00:00.913722435 +927 42 0 days 00:00:00.487704200 +927 43 0 days 00:00:05.294177330 +927 44 0 days 00:00:03.569953900 +927 45 0 days 00:00:01.830716110 +927 46 0 days 00:00:01.275579493 +927 47 0 days 00:00:02.911889165 +927 48 0 days 00:00:01.193673960 +927 49 0 days 00:00:04.069152930 +927 50 0 days 00:00:02.835677415 +927 51 0 days 00:00:01.024821795 +927 52 0 days 00:00:00.738089385 +927 53 0 days 00:00:00.967834180 +927 54 0 days 00:00:00.700680645 +927 55 0 days 00:00:01.852677543 +927 56 0 days 00:00:05.957136455 +927 57 0 days 00:00:01.906891925 +927 58 0 days 00:00:04.447728176 +927 59 0 days 00:00:00.482786750 +927 60 0 days 00:00:02.950577160 +927 61 0 days 00:00:02.908917620 +927 62 0 days 00:00:02.267129610 +927 63 0 days 00:00:00.544070772 +927 64 0 days 00:00:04.284028820 +927 65 0 days 00:00:01.493432837 +927 66 0 days 00:00:00.337233860 +927 67 0 days 00:00:05.057928450 +927 68 0 days 00:00:01.152903925 +927 69 0 days 00:00:01.349922300 +927 70 0 days 00:00:00.902588515 +927 71 0 days 00:00:00.585678345 +927 72 0 days 00:00:03.804031016 +927 73 0 days 00:00:01.667807980 +927 74 0 days 00:00:00.377518325 +927 75 0 days 00:00:03.439612815 +927 76 0 days 00:00:02.013850645 +927 77 0 days 00:00:03.274406223 +927 78 0 days 00:00:03.198328370 +927 79 0 days 00:00:02.089662548 +927 80 0 days 00:00:02.342406410 +927 81 0 days 00:00:01.061863910 +927 82 0 days 00:00:03.660348660 +927 83 0 days 00:00:02.930647515 +927 84 0 days 00:00:01.015667437 +927 85 0 days 00:00:02.571550060 +927 86 0 days 00:00:00.491606865 +927 87 0 days 00:00:02.478708530 +927 88 0 days 00:00:00.533738680 +927 89 0 days 00:00:00.469064582 +927 90 0 days 00:00:00.522272645 +927 91 0 days 00:00:02.558083185 +927 92 0 days 00:00:01.862103340 +927 93 0 days 00:00:09.812898935 +927 94 0 days 00:00:01.759880185 +927 95 0 days 00:00:01.835206465 +927 96 0 days 00:00:00.862231800 +927 97 0 days 00:00:02.445941670 +927 98 0 days 00:00:01.671937150 +927 99 0 days 00:00:10.600854025 +927 100 0 days 00:00:00.409798935 +928 1 0 days 00:00:00.793497806 +928 2 0 days 00:00:01.101047290 +928 3 0 days 00:00:02.016312645 +928 4 0 days 00:00:00.823877900 +928 5 0 days 00:00:04.343408080 +928 6 0 days 00:00:01.548741345 +928 7 0 days 00:00:00.521897660 +928 8 0 days 00:00:00.622889825 +928 9 0 days 00:00:00.708384725 +928 10 0 days 00:00:02.465934565 +928 11 0 days 00:00:01.086966470 +928 12 0 days 00:00:02.162374960 +928 13 0 days 00:00:04.048526280 +928 14 0 days 00:00:09.010784640 +928 15 0 days 00:00:02.111979260 +928 16 0 days 00:00:01.524733824 +928 17 0 days 00:00:00.877682625 +928 18 0 days 00:00:00.574812510 +928 19 0 days 00:00:02.355791505 +928 20 0 days 00:00:06.071514170 +928 21 0 days 00:00:00.829199960 +928 22 0 days 00:00:01.001086850 +928 23 0 days 00:00:00.764452945 +928 24 0 days 00:00:01.303134300 +928 25 0 days 00:00:02.240070150 +928 26 0 days 00:00:04.018634135 +928 27 0 days 00:00:01.829555275 +928 28 0 days 00:00:02.498507876 +928 29 0 days 00:00:00.959418280 +928 30 0 days 00:00:04.157722105 +928 31 0 days 00:00:00.734076634 +928 32 0 days 00:00:03.759554235 +928 33 0 days 00:00:01.462616425 +928 34 0 days 00:00:00.960568520 +928 35 0 days 00:00:03.601898160 +928 36 0 days 00:00:02.255340165 +928 37 0 days 00:00:01.522092406 +928 38 0 days 00:00:01.163816825 +928 39 0 days 00:00:01.111676345 +928 40 0 days 00:00:02.651721800 +928 41 0 days 00:00:01.019119443 +928 42 0 days 00:00:03.039838520 +928 43 0 days 00:00:02.348692273 +928 44 0 days 00:00:01.360399905 +928 45 0 days 00:00:00.561775490 +928 46 0 days 00:00:01.752085120 +928 47 0 days 00:00:02.740579220 +928 48 0 days 00:00:02.588253416 +928 49 0 days 00:00:01.166948345 +928 50 0 days 00:00:03.398657975 +928 51 0 days 00:00:00.612331152 +928 52 0 days 00:00:00.392213035 +928 53 0 days 00:00:00.586489330 +928 54 0 days 00:00:01.763579235 +928 55 0 days 00:00:01.202935536 +928 56 0 days 00:00:01.097183030 +928 57 0 days 00:00:00.857084656 +928 58 0 days 00:00:01.247174370 +928 59 0 days 00:00:00.540093352 +928 60 0 days 00:00:03.537860320 +928 61 0 days 00:00:03.577989700 +928 62 0 days 00:00:02.891447915 +928 63 0 days 00:00:01.122602530 +928 64 0 days 00:00:01.145318525 +928 65 0 days 00:00:03.082283800 +928 66 0 days 00:00:02.010824670 +928 67 0 days 00:00:01.122557905 +928 68 0 days 00:00:01.595634955 +928 69 0 days 00:00:02.981830860 +928 70 0 days 00:00:02.074432780 +928 71 0 days 00:00:00.716436025 +928 72 0 days 00:00:03.126970600 +928 73 0 days 00:00:00.807853923 +928 74 0 days 00:00:01.394793060 +928 75 0 days 00:00:00.720827725 +928 76 0 days 00:00:01.860089890 +928 77 0 days 00:00:02.809656515 +928 78 0 days 00:00:01.153085615 +928 79 0 days 00:00:01.480987060 +928 80 0 days 00:00:10.310638435 +928 81 0 days 00:00:01.278573392 +928 82 0 days 00:00:01.531933385 +928 83 0 days 00:00:03.120617275 +928 84 0 days 00:00:03.701505100 +928 85 0 days 00:00:01.257772650 +928 86 0 days 00:00:02.248630500 +928 87 0 days 00:00:00.995638590 +928 88 0 days 00:00:02.469480295 +928 89 0 days 00:00:04.804594891 +928 90 0 days 00:00:01.583620612 +928 91 0 days 00:00:01.335421665 +928 92 0 days 00:00:00.856396405 +928 93 0 days 00:00:01.044613580 +928 94 0 days 00:00:01.122923410 +928 95 0 days 00:00:01.720381255 +928 96 0 days 00:00:01.368849205 +928 97 0 days 00:00:01.384423600 +928 98 0 days 00:00:01.443129170 +928 99 0 days 00:00:01.143071795 +928 100 0 days 00:00:00.772312600 +929 1 0 days 00:00:01.434349715 +929 2 0 days 00:00:00.775223690 +929 3 0 days 00:00:01.061979960 +929 4 0 days 00:00:01.094394870 +929 5 0 days 00:00:00.835691985 +929 6 0 days 00:00:00.313549292 +929 7 0 days 00:00:00.765344250 +929 8 0 days 00:00:04.152613440 +929 9 0 days 00:00:00.828068576 +929 10 0 days 00:00:00.899025850 +929 11 0 days 00:00:03.540508895 +929 12 0 days 00:00:00.994019535 +929 13 0 days 00:00:00.606545110 +929 14 0 days 00:00:02.161946080 +929 15 0 days 00:00:01.243255451 +929 16 0 days 00:00:00.905206395 +929 17 0 days 00:00:00.724107045 +929 18 0 days 00:00:03.310820740 +929 19 0 days 00:00:00.863081376 +929 20 0 days 00:00:00.528666955 +929 21 0 days 00:00:00.679547385 +929 22 0 days 00:00:00.496822445 +929 23 0 days 00:00:00.858394735 +929 24 0 days 00:00:00.481273010 +929 25 0 days 00:00:00.497263590 +929 26 0 days 00:00:00.765527745 +929 27 0 days 00:00:00.731198390 +929 28 0 days 00:00:00.644816584 +929 29 0 days 00:00:00.520350765 +929 30 0 days 00:00:01.351280270 +929 31 0 days 00:00:00.767236060 +929 32 0 days 00:00:00.558077450 +929 33 0 days 00:00:00.483502185 +929 34 0 days 00:00:01.851510920 +929 35 0 days 00:00:04.326215864 +929 36 0 days 00:00:00.592124705 +929 37 0 days 00:00:00.898730035 +929 38 0 days 00:00:01.343451316 +929 39 0 days 00:00:00.342839792 +929 40 0 days 00:00:00.912944390 +929 41 0 days 00:00:01.210972295 +929 42 0 days 00:00:00.646908245 +929 43 0 days 00:00:00.768263272 +929 44 0 days 00:00:00.615912900 +929 45 0 days 00:00:02.356562028 +929 46 0 days 00:00:00.675997985 +929 47 0 days 00:00:00.703934445 +929 48 0 days 00:00:00.841759035 +929 49 0 days 00:00:00.656213080 +929 50 0 days 00:00:00.601052265 +929 51 0 days 00:00:00.444608175 +929 52 0 days 00:00:00.421979145 +929 53 0 days 00:00:00.641511692 +929 54 0 days 00:00:00.804594073 +929 55 0 days 00:00:00.929008275 +929 56 0 days 00:00:00.981658845 +929 57 0 days 00:00:01.430754715 +929 58 0 days 00:00:00.492033230 +929 59 0 days 00:00:00.584949620 +929 60 0 days 00:00:00.536567264 +929 61 0 days 00:00:00.430416380 +929 62 0 days 00:00:00.626627620 +929 63 0 days 00:00:03.395385205 +929 64 0 days 00:00:00.722591305 +929 65 0 days 00:00:00.617204625 +929 66 0 days 00:00:00.390721812 +929 67 0 days 00:00:00.233632590 +929 68 0 days 00:00:00.482852616 +929 69 0 days 00:00:00.248447855 +929 70 0 days 00:00:00.401966548 +929 71 0 days 00:00:00.485957415 +929 72 0 days 00:00:00.369160126 +929 73 0 days 00:00:00.480537160 +929 74 0 days 00:00:00.511865992 +929 75 0 days 00:00:03.068190180 +929 76 0 days 00:00:01.489843975 +929 77 0 days 00:00:00.484887530 +929 78 0 days 00:00:00.330545125 +929 79 0 days 00:00:02.788081625 +929 80 0 days 00:00:00.171965157 +929 81 0 days 00:00:00.427631876 +929 82 0 days 00:00:00.670276845 +929 83 0 days 00:00:01.042910135 +929 84 0 days 00:00:01.099718410 +929 85 0 days 00:00:01.695737375 +929 86 0 days 00:00:01.245278655 +929 87 0 days 00:00:00.743869090 +929 88 0 days 00:00:01.210525415 +929 89 0 days 00:00:00.768545675 +929 90 0 days 00:00:00.175451550 +929 91 0 days 00:00:00.612614030 +929 92 0 days 00:00:00.883905920 +929 93 0 days 00:00:00.674879645 +929 94 0 days 00:00:00.511868328 +929 95 0 days 00:00:00.648992796 +929 96 0 days 00:00:00.466129395 +929 97 0 days 00:00:00.922421040 +929 98 0 days 00:00:01.005087030 +929 99 0 days 00:00:00.211671036 +929 100 0 days 00:00:04.245992604 +930 1 0 days 00:00:00.348794455 +930 2 0 days 00:00:00.669098980 +930 3 0 days 00:00:00.518556405 +930 4 0 days 00:00:01.458480926 +930 5 0 days 00:00:01.263607200 +930 6 0 days 00:00:00.181684770 +930 7 0 days 00:00:01.097689396 +930 8 0 days 00:00:01.251296100 +930 9 0 days 00:00:02.012923172 +930 10 0 days 00:00:00.540056770 +930 11 0 days 00:00:00.362821790 +930 12 0 days 00:00:00.717417265 +930 13 0 days 00:00:02.133582645 +930 14 0 days 00:00:00.580960800 +930 15 0 days 00:00:00.196031756 +930 16 0 days 00:00:00.432759532 +930 17 0 days 00:00:03.696095445 +930 18 0 days 00:00:00.586414120 +930 19 0 days 00:00:02.104485263 +930 20 0 days 00:00:00.305248140 +930 21 0 days 00:00:02.203088575 +930 22 0 days 00:00:00.965421750 +930 23 0 days 00:00:00.445106360 +930 24 0 days 00:00:00.758830440 +930 25 0 days 00:00:00.643010152 +930 26 0 days 00:00:00.384921910 +930 27 0 days 00:00:01.126124460 +930 28 0 days 00:00:01.533423028 +930 29 0 days 00:00:01.085118660 +930 30 0 days 00:00:01.844947875 +930 31 0 days 00:00:03.484019865 +930 32 0 days 00:00:00.303247000 +930 33 0 days 00:00:01.748866506 +930 34 0 days 00:00:00.349587716 +930 35 0 days 00:00:00.685898535 +930 36 0 days 00:00:00.759141956 +930 37 0 days 00:00:00.321079925 +930 38 0 days 00:00:01.736371992 +930 39 0 days 00:00:00.360961404 +930 40 0 days 00:00:00.367307090 +930 41 0 days 00:00:00.946389673 +930 42 0 days 00:00:00.806392706 +930 43 0 days 00:00:00.610289775 +930 44 0 days 00:00:03.612983690 +930 45 0 days 00:00:02.230162755 +930 46 0 days 00:00:00.859611295 +930 47 0 days 00:00:00.310798788 +930 48 0 days 00:00:03.803054870 +930 49 0 days 00:00:00.226694765 +930 50 0 days 00:00:00.285125540 +930 51 0 days 00:00:00.509805566 +930 52 0 days 00:00:01.423677195 +930 53 0 days 00:00:00.573310348 +930 54 0 days 00:00:00.187268428 +930 55 0 days 00:00:02.365994392 +930 56 0 days 00:00:00.941466240 +930 57 0 days 00:00:00.824951075 +930 58 0 days 00:00:01.005739800 +930 59 0 days 00:00:00.653823652 +930 60 0 days 00:00:01.019225893 +930 61 0 days 00:00:00.401918752 +930 62 0 days 00:00:00.531527004 +930 63 0 days 00:00:00.629075210 +930 64 0 days 00:00:00.611394000 +930 65 0 days 00:00:00.513931415 +930 66 0 days 00:00:01.093764905 +930 67 0 days 00:00:00.480974075 +930 68 0 days 00:00:00.616306340 +930 69 0 days 00:00:01.864436812 +930 70 0 days 00:00:01.161470910 +930 71 0 days 00:00:01.284501805 +930 72 0 days 00:00:00.771294885 +930 73 0 days 00:00:00.948153780 +930 74 0 days 00:00:00.790133080 +930 75 0 days 00:00:00.351181415 +930 76 0 days 00:00:00.988395095 +930 77 0 days 00:00:00.556961325 +930 78 0 days 00:00:00.712633016 +930 79 0 days 00:00:00.443431470 +930 80 0 days 00:00:00.778705985 +930 81 0 days 00:00:00.386641140 +930 82 0 days 00:00:02.011954665 +930 83 0 days 00:00:01.045038305 +930 84 0 days 00:00:00.715795840 +930 85 0 days 00:00:00.514553600 +930 86 0 days 00:00:02.247165335 +930 87 0 days 00:00:00.237828300 +930 88 0 days 00:00:02.004179260 +930 89 0 days 00:00:01.027117140 +930 90 0 days 00:00:01.333195625 +930 91 0 days 00:00:01.483288870 +930 92 0 days 00:00:00.799246344 +930 93 0 days 00:00:01.634912630 +930 94 0 days 00:00:00.513182880 +930 95 0 days 00:00:00.548157075 +930 96 0 days 00:00:00.529724740 +930 97 0 days 00:00:02.489763917 +930 98 0 days 00:00:01.683478290 +930 99 0 days 00:00:04.159027605 +930 100 0 days 00:00:01.127434920 +931 1 0 days 00:00:03.879432825 +931 2 0 days 00:00:00.940949620 +931 3 0 days 00:00:02.963380503 +931 4 0 days 00:00:09.080776682 +931 5 0 days 00:00:02.618103240 +931 6 0 days 00:00:02.511598020 +931 7 0 days 00:00:01.560878990 +931 8 0 days 00:00:01.713546006 +931 9 0 days 00:00:11.166354100 +931 10 0 days 00:00:00.942153605 +931 11 0 days 00:00:04.557378975 +931 12 0 days 00:00:04.045923245 +931 13 0 days 00:00:02.722695806 +931 14 0 days 00:00:01.529370253 +931 15 0 days 00:00:04.054458565 +931 16 0 days 00:00:01.870045620 +931 17 0 days 00:00:03.834867536 +931 18 0 days 00:00:02.664987628 +931 19 0 days 00:00:09.791274932 +931 20 0 days 00:00:02.485252118 +931 21 0 days 00:00:01.676723328 +931 22 0 days 00:00:14.028933305 +931 23 0 days 00:00:01.509432180 +931 24 0 days 00:00:07.495171925 +931 25 0 days 00:00:12.897873548 +931 26 0 days 00:00:04.341303500 +931 27 0 days 00:00:02.479740433 +931 28 0 days 00:00:02.701757806 +931 29 0 days 00:00:01.177276320 +931 30 0 days 00:00:01.407062810 +931 31 0 days 00:00:01.180328530 +931 32 0 days 00:00:00.649230546 +931 33 0 days 00:00:02.575979046 +931 34 0 days 00:00:00.870633665 +931 35 0 days 00:00:04.169706600 +931 36 0 days 00:00:03.271486428 +931 37 0 days 00:00:01.343253426 +931 38 0 days 00:00:02.384608642 +931 39 0 days 00:00:01.007669224 +931 40 0 days 00:00:02.158728505 +931 41 0 days 00:00:01.796047213 +931 42 0 days 00:00:02.366942364 +931 43 0 days 00:00:02.045789180 +931 44 0 days 00:00:02.705346328 +931 45 0 days 00:00:07.447827770 +931 46 0 days 00:00:01.676722246 +931 47 0 days 00:00:08.843816663 +931 48 0 days 00:00:02.935699376 +931 49 0 days 00:00:01.828921595 +931 50 0 days 00:00:01.131837180 +931 51 0 days 00:00:09.265512060 +931 52 0 days 00:00:04.112787793 +931 53 0 days 00:00:04.057091145 +931 54 0 days 00:00:01.729558530 +931 55 0 days 00:00:02.182248520 +931 56 0 days 00:00:01.719431630 +931 57 0 days 00:00:00.689547980 +931 58 0 days 00:00:00.939280240 +931 59 0 days 00:00:02.965534352 +931 60 0 days 00:00:02.563419526 +931 61 0 days 00:00:04.032919890 +931 62 0 days 00:00:05.277710332 +931 63 0 days 00:00:04.155289860 +931 64 0 days 00:00:17.797604620 +931 65 0 days 00:00:02.503762810 +931 66 0 days 00:00:02.576580135 +931 67 0 days 00:00:02.427463546 +931 68 0 days 00:00:02.840594374 +931 69 0 days 00:00:01.576087473 +931 70 0 days 00:00:03.476007490 +931 71 0 days 00:00:12.516071355 +931 72 0 days 00:00:04.826354653 +931 73 0 days 00:00:01.628102720 +931 74 0 days 00:00:03.573170450 +931 75 0 days 00:00:02.587084333 +931 76 0 days 00:00:20.428771088 +931 77 0 days 00:00:17.527590306 +931 78 0 days 00:00:03.844422000 +931 79 0 days 00:00:01.831418200 +931 80 0 days 00:00:02.881724690 +931 81 0 days 00:00:20.085102076 +931 82 0 days 00:00:04.842457115 +931 83 0 days 00:00:03.792427405 +931 84 0 days 00:00:01.610909430 +931 85 0 days 00:00:01.200600790 +931 86 0 days 00:00:03.872210120 +931 87 0 days 00:00:00.954702471 +931 88 0 days 00:00:13.583016190 +932 1 0 days 00:00:07.113674876 +932 2 0 days 00:00:00.765298046 +932 3 0 days 00:00:01.008733105 +932 4 0 days 00:00:01.119650905 +932 5 0 days 00:00:01.759502675 +932 6 0 days 00:00:01.902073060 +932 7 0 days 00:00:01.710395160 +932 8 0 days 00:00:01.552657540 +932 9 0 days 00:00:02.111238395 +932 10 0 days 00:00:01.670485411 +932 11 0 days 00:00:05.007768853 +932 12 0 days 00:00:00.349603575 +932 13 0 days 00:00:01.468872450 +932 14 0 days 00:00:01.569941600 +932 15 0 days 00:00:06.284903415 +932 16 0 days 00:00:00.386401696 +932 17 0 days 00:00:00.929288595 +932 18 0 days 00:00:02.940520868 +932 19 0 days 00:00:02.683317955 +932 20 0 days 00:00:01.645825308 +932 21 0 days 00:00:01.446625568 +932 22 0 days 00:00:01.214957390 +932 23 0 days 00:00:01.085165845 +932 24 0 days 00:00:02.102153550 +932 25 0 days 00:00:01.312608075 +932 26 0 days 00:00:01.219778860 +932 27 0 days 00:00:00.842684175 +932 28 0 days 00:00:00.453559970 +932 29 0 days 00:00:00.773310380 +932 30 0 days 00:00:02.085380633 +932 31 0 days 00:00:01.026573748 +932 32 0 days 00:00:01.382421120 +932 33 0 days 00:00:03.080847670 +932 34 0 days 00:00:00.854730060 +932 35 0 days 00:00:02.961423240 +932 36 0 days 00:00:03.303532750 +932 37 0 days 00:00:01.044692713 +932 38 0 days 00:00:02.255069686 +932 39 0 days 00:00:01.066640473 +932 40 0 days 00:00:01.739637990 +932 41 0 days 00:00:01.588195606 +932 42 0 days 00:00:00.542750735 +932 43 0 days 00:00:04.080628095 +932 44 0 days 00:00:04.983284256 +932 45 0 days 00:00:02.527592346 +932 46 0 days 00:00:00.338962390 +932 47 0 days 00:00:00.710390900 +932 48 0 days 00:00:02.095086445 +932 49 0 days 00:00:02.267813585 +932 50 0 days 00:00:01.392949348 +932 51 0 days 00:00:02.249707006 +932 52 0 days 00:00:03.688476870 +932 53 0 days 00:00:00.771583660 +932 54 0 days 00:00:00.305974888 +932 55 0 days 00:00:00.427154440 +932 56 0 days 00:00:02.172178368 +932 57 0 days 00:00:01.495564693 +932 58 0 days 00:00:00.873235280 +932 59 0 days 00:00:01.879125940 +932 60 0 days 00:00:00.759840465 +932 61 0 days 00:00:00.747353280 +932 62 0 days 00:00:00.322334852 +932 63 0 days 00:00:00.980625334 +932 64 0 days 00:00:02.289311305 +932 65 0 days 00:00:00.548736891 +932 66 0 days 00:00:02.536688426 +932 67 0 days 00:00:01.138081025 +932 68 0 days 00:00:01.128923604 +932 69 0 days 00:00:02.572084410 +932 70 0 days 00:00:00.583897844 +932 71 0 days 00:00:01.732161000 +932 72 0 days 00:00:02.206675685 +932 73 0 days 00:00:01.736569836 +932 74 0 days 00:00:00.891853260 +932 75 0 days 00:00:01.175958724 +932 76 0 days 00:00:05.939558516 +932 77 0 days 00:00:01.063695945 +932 78 0 days 00:00:02.035321565 +932 79 0 days 00:00:03.759255125 +932 80 0 days 00:00:00.423711180 +932 81 0 days 00:00:00.410828032 +932 82 0 days 00:00:00.886722185 +932 83 0 days 00:00:06.519273166 +932 84 0 days 00:00:01.177027540 +932 85 0 days 00:00:01.694873240 +932 86 0 days 00:00:00.541651952 +932 87 0 days 00:00:01.129942164 +932 88 0 days 00:00:00.637266400 +932 89 0 days 00:00:01.434600153 +932 90 0 days 00:00:00.343566912 +932 91 0 days 00:00:01.199804912 +932 92 0 days 00:00:01.372233093 +932 93 0 days 00:00:01.704232960 +932 94 0 days 00:00:00.601656192 +932 95 0 days 00:00:03.970271095 +932 96 0 days 00:00:03.885567600 +932 97 0 days 00:00:01.044529770 +932 98 0 days 00:00:00.324344815 +932 99 0 days 00:00:00.431007540 +932 100 0 days 00:00:01.193646713 +933 1 0 days 00:00:11.530005665 +933 2 0 days 00:00:01.968854170 +933 3 0 days 00:00:02.042576430 +933 4 0 days 00:00:01.283909463 +933 5 0 days 00:00:08.085857448 +933 6 0 days 00:00:03.881570460 +933 7 0 days 00:00:04.373120868 +933 8 0 days 00:00:01.085836960 +933 9 0 days 00:00:03.829197895 +933 10 0 days 00:00:05.754788305 +933 11 0 days 00:00:09.101165693 +933 13 0 days 00:00:00.824892580 +933 14 0 days 00:00:04.177933300 +933 15 0 days 00:00:02.677911303 +933 16 0 days 00:00:01.504085385 +933 17 0 days 00:00:01.551838084 +933 18 0 days 00:00:09.601095280 +933 19 0 days 00:00:02.760691384 +933 20 0 days 00:00:05.150312075 +933 21 0 days 00:00:12.483542884 +933 22 0 days 00:00:02.228236808 +933 23 0 days 00:00:10.386580345 +933 24 0 days 00:00:02.576936853 +933 25 0 days 00:00:02.976792488 +933 26 0 days 00:00:01.120762440 +933 27 0 days 00:00:01.922854004 +933 28 0 days 00:00:00.967576402 +933 29 0 days 00:00:01.851071766 +933 30 0 days 00:00:00.651107895 +933 31 0 days 00:00:04.679378462 +933 32 0 days 00:00:05.905921376 +933 33 0 days 00:00:07.382466702 +933 34 0 days 00:00:01.942958473 +933 35 0 days 00:00:05.376523773 +933 36 0 days 00:00:03.324588620 +933 37 0 days 00:00:02.344826366 +933 38 0 days 00:00:05.760457654 +933 39 0 days 00:00:04.999836688 +933 40 0 days 00:00:02.847429345 +933 41 0 days 00:00:05.632855617 +933 42 0 days 00:00:01.205321080 +933 43 0 days 00:00:01.960564060 +933 44 0 days 00:00:08.087446060 +933 45 0 days 00:00:02.719919425 +933 46 0 days 00:00:05.928221280 +933 47 0 days 00:00:13.504398912 +933 48 0 days 00:00:02.317922570 +933 49 0 days 00:00:01.965037450 +933 50 0 days 00:00:01.301396456 +933 51 0 days 00:00:02.788619680 +933 52 0 days 00:00:00.985356225 +933 53 0 days 00:00:01.700559984 +933 54 0 days 00:00:01.784111750 +933 55 0 days 00:00:04.481799716 +933 56 0 days 00:00:06.389071503 +933 57 0 days 00:00:09.201068205 +933 58 0 days 00:00:07.064207060 +933 59 0 days 00:00:01.120718216 +933 60 0 days 00:00:02.137231094 +933 61 0 days 00:00:02.487643408 +933 62 0 days 00:00:00.684268480 +933 63 0 days 00:00:07.563056100 +933 64 0 days 00:00:14.382486412 +933 65 0 days 00:00:04.078293757 +933 66 0 days 00:00:02.375119830 +933 67 0 days 00:00:02.219781055 +933 68 0 days 00:00:02.697712177 +933 69 0 days 00:00:02.553728960 +933 70 0 days 00:00:04.464907940 +933 71 0 days 00:00:01.422396106 +933 72 0 days 00:00:03.633434931 +933 73 0 days 00:00:02.863961850 +933 74 0 days 00:00:02.870212625 +933 75 0 days 00:00:04.417979688 +933 76 0 days 00:00:01.287626117 +934 1 0 days 00:00:00.414494128 +934 2 0 days 00:00:01.071037855 +934 3 0 days 00:00:00.731830945 +934 4 0 days 00:00:01.172595540 +934 5 0 days 00:00:00.577935480 +934 6 0 days 00:00:00.619821444 +934 7 0 days 00:00:01.839887000 +934 8 0 days 00:00:02.570739764 +934 9 0 days 00:00:00.687795723 +934 10 0 days 00:00:01.891386066 +934 11 0 days 00:00:01.418534900 +934 12 0 days 00:00:04.298361156 +934 13 0 days 00:00:00.394241460 +934 14 0 days 00:00:00.549552135 +934 15 0 days 00:00:02.079956675 +934 16 0 days 00:00:07.522845056 +934 17 0 days 00:00:00.934761547 +934 18 0 days 00:00:02.447740455 +934 19 0 days 00:00:00.910100415 +934 20 0 days 00:00:00.823162533 +934 21 0 days 00:00:03.523002910 +934 22 0 days 00:00:00.857067576 +934 23 0 days 00:00:00.439850051 +934 24 0 days 00:00:02.548727808 +934 25 0 days 00:00:00.434215842 +934 26 0 days 00:00:03.980227240 +934 27 0 days 00:00:00.911479377 +934 28 0 days 00:00:00.988078846 +934 29 0 days 00:00:00.844236990 +934 30 0 days 00:00:00.313124440 +934 31 0 days 00:00:01.655894185 +934 32 0 days 00:00:00.816812370 +934 33 0 days 00:00:02.149382115 +934 34 0 days 00:00:02.321878330 +934 35 0 days 00:00:01.475337435 +934 36 0 days 00:00:02.623343120 +934 37 0 days 00:00:00.889482606 +934 38 0 days 00:00:01.705022340 +934 39 0 days 00:00:00.764117525 +934 40 0 days 00:00:07.230468446 +934 41 0 days 00:00:00.674740400 +934 42 0 days 00:00:02.617072055 +934 43 0 days 00:00:01.173013128 +934 44 0 days 00:00:02.068422030 +934 45 0 days 00:00:01.124110526 +934 46 0 days 00:00:01.374817132 +934 47 0 days 00:00:02.653810343 +934 48 0 days 00:00:01.882060365 +934 49 0 days 00:00:02.649257980 +934 50 0 days 00:00:03.068712980 +934 51 0 days 00:00:00.948312160 +934 52 0 days 00:00:02.294719357 +934 53 0 days 00:00:01.912590556 +934 54 0 days 00:00:03.294499675 +934 55 0 days 00:00:03.141083836 +934 56 0 days 00:00:05.331184975 +934 57 0 days 00:00:00.559991846 +934 58 0 days 00:00:01.224255545 +934 59 0 days 00:00:00.280632603 +934 60 0 days 00:00:02.621822740 +934 61 0 days 00:00:08.454993086 +934 62 0 days 00:00:00.915216314 +934 63 0 days 00:00:02.925477170 +934 64 0 days 00:00:01.891796893 +934 65 0 days 00:00:00.359644666 +934 66 0 days 00:00:01.855106526 +934 67 0 days 00:00:01.042299120 +934 68 0 days 00:00:01.488023760 +934 69 0 days 00:00:00.699160920 +934 70 0 days 00:00:00.271821024 +934 71 0 days 00:00:03.792276820 +934 72 0 days 00:00:01.459343076 +934 73 0 days 00:00:03.979666388 +934 74 0 days 00:00:03.733343840 +934 75 0 days 00:00:01.662574535 +934 76 0 days 00:00:01.406586675 +934 77 0 days 00:00:01.191171935 +934 78 0 days 00:00:01.860546828 +934 79 0 days 00:00:05.171668930 +934 80 0 days 00:00:00.466713020 +934 81 0 days 00:00:00.462688050 +934 82 0 days 00:00:00.481362077 +934 83 0 days 00:00:01.038515231 +934 84 0 days 00:00:00.651392633 +934 85 0 days 00:00:00.863033980 +934 86 0 days 00:00:00.775545985 +934 87 0 days 00:00:00.740652146 +934 88 0 days 00:00:02.353036985 +934 89 0 days 00:00:01.654519820 +934 90 0 days 00:00:01.467782144 +934 91 0 days 00:00:01.198340000 +934 92 0 days 00:00:01.516804850 +934 93 0 days 00:00:00.705511084 +934 94 0 days 00:00:02.248390224 +934 95 0 days 00:00:02.425106257 +934 96 0 days 00:00:01.588235126 +934 97 0 days 00:00:01.757216270 +934 98 0 days 00:00:01.023934846 +934 99 0 days 00:00:00.995788748 +934 100 0 days 00:00:01.265146310 +935 1 0 days 00:01:23.315106673 +935 2 0 days 00:01:23.348762080 +935 3 0 days 00:04:13.780145446 +935 4 0 days 00:01:43.139217720 +935 5 0 days 00:04:44.919702933 +936 1 0 days 00:01:43.871000815 +936 2 0 days 00:01:18.092513093 +936 3 0 days 00:02:52.649318620 +936 4 0 days 00:02:52.581968153 +936 5 0 days 00:01:01.131087320 +937 1 0 days 00:01:23.073067540 +937 2 0 days 00:04:11.342650326 +937 3 0 days 00:02:24.347805690 +937 4 0 days 00:02:50.982443505 +938 1 0 days 00:01:27.024066395 +938 2 0 days 00:03:11.650337530 +938 3 0 days 00:01:03.863010960 +938 4 0 days 00:01:32.716888560 +939 1 0 days 00:00:09.479074380 +939 2 0 days 00:00:52.641429840 +939 3 0 days 00:00:24.619815386 +939 4 0 days 00:00:10.824148446 +939 5 0 days 00:00:11.656682786 +939 6 0 days 00:00:15.413569740 +939 7 0 days 00:00:05.950166933 +939 8 0 days 00:00:31.919083213 +939 9 0 days 00:00:32.935013506 +939 10 0 days 00:00:10.214003520 +939 11 0 days 00:00:17.773776013 +939 12 0 days 00:00:34.866077986 +939 13 0 days 00:00:05.870343593 +939 14 0 days 00:00:23.732799120 +939 15 0 days 00:00:16.704679720 +939 16 0 days 00:00:40.176132713 +939 17 0 days 00:00:11.644290860 +939 18 0 days 00:00:17.143134960 +939 19 0 days 00:00:18.388312660 +939 20 0 days 00:00:26.999992033 +939 21 0 days 00:00:05.515492853 +939 22 0 days 00:00:12.146496820 +939 23 0 days 00:00:12.325413986 +939 24 0 days 00:00:07.594831273 +939 25 0 days 00:00:12.443531340 +939 26 0 days 00:00:13.168863560 +939 27 0 days 00:00:22.891033326 +939 28 0 days 00:00:17.610851533 +939 29 0 days 00:00:05.351526120 +939 30 0 days 00:00:05.171087126 +939 31 0 days 00:00:08.894253940 +939 32 0 days 00:00:39.452883186 +940 1 0 days 00:00:05.757598940 +940 2 0 days 00:00:14.441981140 +940 3 0 days 00:00:08.917652980 +940 4 0 days 00:00:10.773506326 +940 5 0 days 00:00:05.586042306 +940 6 0 days 00:00:06.139036025 +940 7 0 days 00:00:05.201442853 +940 8 0 days 00:00:11.533050893 +940 9 0 days 00:00:12.328044946 +940 10 0 days 00:00:29.355750540 +940 11 0 days 00:00:09.648819880 +940 12 0 days 00:00:33.265894836 +940 13 0 days 00:00:05.935398768 +940 14 0 days 00:00:06.066789315 +940 15 0 days 00:00:03.797861346 +940 16 0 days 00:00:03.926354273 +940 17 0 days 00:00:12.568810920 +940 18 0 days 00:00:10.288578896 +940 19 0 days 00:00:11.696384710 +940 20 0 days 00:00:23.493311733 +940 21 0 days 00:00:06.920129160 +940 22 0 days 00:00:10.716885113 +940 23 0 days 00:00:03.733396346 +940 24 0 days 00:00:08.774808120 +940 25 0 days 00:00:13.723004186 +940 26 0 days 00:00:10.301618740 +940 27 0 days 00:00:07.816000300 +940 28 0 days 00:00:03.132426820 +940 29 0 days 00:00:05.561702920 +940 30 0 days 00:00:06.189127800 +940 31 0 days 00:00:05.546101080 +940 32 0 days 00:00:05.282723325 +940 33 0 days 00:00:02.679125840 +940 34 0 days 00:00:05.233118793 +940 35 0 days 00:00:10.047225813 +940 36 0 days 00:00:06.827403913 +940 37 0 days 00:00:07.937367806 +940 38 0 days 00:00:05.609651553 +940 39 0 days 00:00:04.319718493 +940 40 0 days 00:00:04.027512520 +940 41 0 days 00:00:08.926501493 +940 42 0 days 00:00:08.597210860 +940 43 0 days 00:00:13.199444660 +940 44 0 days 00:00:12.321127833 +940 45 0 days 00:00:04.945539680 +940 46 0 days 00:00:17.157351925 +940 47 0 days 00:00:05.878544008 +940 48 0 days 00:00:11.255780166 +940 49 0 days 00:00:10.311201585 +940 50 0 days 00:00:10.753561973 +940 51 0 days 00:00:12.414718266 +941 1 0 days 00:00:13.173884083 +941 2 0 days 00:01:08.906918345 +941 3 0 days 00:00:20.764058100 +941 4 0 days 00:01:00.355838235 +941 5 0 days 00:00:35.706492100 +941 6 0 days 00:00:09.867675470 +941 7 0 days 00:00:23.168554780 +941 8 0 days 00:00:11.750762665 +941 9 0 days 00:00:14.801145137 +941 10 0 days 00:00:08.849000593 +941 11 0 days 00:00:05.642871475 +941 12 0 days 00:00:15.652094431 +941 13 0 days 00:00:15.849486775 +941 14 0 days 00:00:10.030393233 +941 15 0 days 00:00:14.327805985 +941 16 0 days 00:00:17.529276820 +941 17 0 days 00:00:08.252802546 +941 18 0 days 00:00:18.152705810 +941 19 0 days 00:00:33.642683511 +942 1 0 days 00:00:02.895132490 +942 2 0 days 00:00:35.835637572 +942 3 0 days 00:00:11.182233628 +942 4 0 days 00:00:15.489949605 +942 5 0 days 00:00:05.215926330 +942 6 0 days 00:00:16.700391213 +942 7 0 days 00:00:14.414918926 +942 8 0 days 00:00:05.239633395 +942 9 0 days 00:00:04.940510800 +942 10 0 days 00:00:10.024010506 +942 11 0 days 00:00:10.959898688 +942 12 0 days 00:00:04.712396220 +942 13 0 days 00:00:17.031417092 +942 14 0 days 00:00:03.481792315 +942 15 0 days 00:00:06.326036195 +942 16 0 days 00:00:16.023503205 +942 17 0 days 00:00:09.488682612 +942 18 0 days 00:00:05.005930150 +942 19 0 days 00:00:07.728945980 +942 20 0 days 00:00:05.245950286 +942 21 0 days 00:00:05.681472250 +942 22 0 days 00:00:11.479947145 +942 23 0 days 00:00:05.540732920 +942 24 0 days 00:00:05.608367520 +942 25 0 days 00:00:08.516288235 +942 26 0 days 00:00:07.623940445 +942 27 0 days 00:00:33.316196680 +942 28 0 days 00:00:05.388644310 +942 29 0 days 00:00:11.593458833 +942 30 0 days 00:00:27.931538806 +942 31 0 days 00:00:11.631436464 +942 32 0 days 00:00:16.276993355 +942 33 0 days 00:00:04.694959540 +942 34 0 days 00:00:06.664711793 +942 35 0 days 00:00:04.233630636 +942 36 0 days 00:00:04.216278240 +942 37 0 days 00:00:06.151388610 +942 38 0 days 00:00:09.895868550 +942 39 0 days 00:00:08.055973820 +942 40 0 days 00:00:10.558589085 +943 1 0 days 00:04:04.029996920 +943 2 0 days 00:04:44.876987440 +943 3 0 days 00:01:23.371961606 +944 1 0 days 00:01:14.378437193 +944 2 0 days 00:00:48.834582366 +944 3 0 days 00:02:33.365748000 +944 4 0 days 00:01:14.775736826 +944 5 0 days 00:01:14.752724653 +944 6 0 days 00:01:00.833327633 +944 7 0 days 00:01:21.729439825 +945 1 0 days 00:00:09.916043853 +945 2 0 days 00:00:14.316177713 +945 3 0 days 00:00:07.614632860 +945 4 0 days 00:00:16.935706660 +945 5 0 days 00:00:15.400165380 +945 6 0 days 00:00:07.002689973 +945 7 0 days 00:00:25.741563525 +945 8 0 days 00:00:28.284832730 +945 9 0 days 00:00:11.987863845 +945 10 0 days 00:00:20.138010200 +945 11 0 days 00:00:35.398110428 +945 12 0 days 00:00:08.434134250 +945 13 0 days 00:00:41.411731056 +945 14 0 days 00:00:36.821524006 +945 15 0 days 00:00:24.908345540 +945 16 0 days 00:00:07.432230590 +945 17 0 days 00:00:20.715470360 +945 18 0 days 00:00:13.999346520 +945 19 0 days 00:00:06.654097116 +945 20 0 days 00:00:16.302966560 +945 21 0 days 00:01:02.152902725 +946 1 0 days 00:00:08.582352240 +946 2 0 days 00:00:08.967397675 +946 3 0 days 00:00:09.283929016 +946 4 0 days 00:00:05.684193013 +946 5 0 days 00:00:19.074871820 +946 6 0 days 00:00:04.889467905 +946 7 0 days 00:00:03.782872160 +946 8 0 days 00:00:04.924676965 +946 9 0 days 00:00:11.764359515 +946 10 0 days 00:00:09.788060720 +946 11 0 days 00:00:06.410749315 +946 12 0 days 00:00:08.005627475 +946 13 0 days 00:00:04.738704225 +946 14 0 days 00:00:09.142613425 +946 15 0 days 00:00:05.273723353 +946 16 0 days 00:00:16.961427596 +946 17 0 days 00:00:10.284027580 +946 18 0 days 00:00:05.697170040 +946 19 0 days 00:00:03.456515868 +946 20 0 days 00:00:10.334112350 +946 21 0 days 00:00:18.874714086 +946 22 0 days 00:00:07.669426020 +946 23 0 days 00:00:07.282012460 +946 24 0 days 00:00:04.478972128 +946 25 0 days 00:00:10.315513595 +946 26 0 days 00:00:11.942224300 +946 27 0 days 00:00:25.748282564 +946 28 0 days 00:00:15.975913025 +946 29 0 days 00:00:10.628250610 +946 30 0 days 00:00:27.736477920 +946 31 0 days 00:00:15.255021350 +946 32 0 days 00:00:10.334317346 +946 33 0 days 00:00:10.412172790 +946 34 0 days 00:00:09.215484940 +946 35 0 days 00:00:13.909561065 +946 36 0 days 00:00:08.447692180 +946 37 0 days 00:00:05.132845193 +946 38 0 days 00:00:07.098025765 +946 39 0 days 00:00:05.251129008 +946 40 0 days 00:00:07.801813670 +946 41 0 days 00:00:11.688488795 +946 42 0 days 00:00:38.447692628 +947 1 0 days 00:00:08.384995793 +947 2 0 days 00:00:10.939343326 +947 3 0 days 00:00:38.671433940 +947 4 0 days 00:00:26.369690633 +947 5 0 days 00:00:21.135408000 +947 6 0 days 00:00:36.585596506 +947 7 0 days 00:00:23.355111806 +947 8 0 days 00:00:14.370074093 +947 9 0 days 00:00:16.579565060 +947 10 0 days 00:00:11.456196100 +947 11 0 days 00:00:05.750573346 +947 12 0 days 00:00:10.043575266 +947 13 0 days 00:00:09.477614393 +947 14 0 days 00:00:16.157418906 +947 15 0 days 00:00:17.622945613 +947 16 0 days 00:00:08.135561420 +947 17 0 days 00:00:06.624111220 +947 18 0 days 00:00:14.568498180 +947 19 0 days 00:00:28.354981973 +947 20 0 days 00:00:18.648471180 +947 21 0 days 00:00:27.244897753 +947 22 0 days 00:00:10.394981706 +947 23 0 days 00:00:20.649955740 +947 24 0 days 00:00:08.932319293 +947 25 0 days 00:00:20.160118113 +947 26 0 days 00:00:41.209016906 +947 27 0 days 00:00:14.330881013 +947 28 0 days 00:00:14.949448415 +947 29 0 days 00:00:19.926618613 +947 30 0 days 00:00:09.712155946 +947 31 0 days 00:00:23.137631586 +947 32 0 days 00:00:21.135057740 +948 1 0 days 00:00:04.487142733 +948 2 0 days 00:00:10.200867666 +948 3 0 days 00:00:04.935814006 +948 4 0 days 00:00:15.223654873 +948 5 0 days 00:00:12.722605153 +948 6 0 days 00:00:09.906479313 +948 7 0 days 00:00:13.763250330 +948 8 0 days 00:00:05.691401626 +948 9 0 days 00:00:04.765765726 +948 10 0 days 00:00:06.120892453 +948 11 0 days 00:00:28.407734648 +948 12 0 days 00:00:30.977938130 +948 13 0 days 00:00:08.581752780 +948 14 0 days 00:00:10.662159840 +948 15 0 days 00:00:14.961089300 +948 16 0 days 00:00:15.330449053 +948 17 0 days 00:00:07.534393140 +948 18 0 days 00:00:04.104344680 +948 19 0 days 00:00:08.977861006 +948 20 0 days 00:00:23.016225013 +948 21 0 days 00:00:05.596655610 +948 22 0 days 00:00:04.776771486 +948 23 0 days 00:00:05.147455026 +948 24 0 days 00:00:08.227099360 +948 25 0 days 00:00:07.363138226 +948 26 0 days 00:00:11.584509920 +948 27 0 days 00:00:04.907564693 +948 28 0 days 00:00:14.211533513 +948 29 0 days 00:00:14.773345333 +948 30 0 days 00:00:05.186126506 +948 31 0 days 00:00:14.076148166 +948 32 0 days 00:00:05.058459046 +948 33 0 days 00:00:05.342417653 +948 34 0 days 00:00:08.145673226 +948 35 0 days 00:00:06.482869846 +948 36 0 days 00:00:15.032044240 +948 37 0 days 00:00:29.734858673 +948 38 0 days 00:00:06.667754520 +948 39 0 days 00:00:32.496682120 +948 40 0 days 00:00:09.381529246 +948 41 0 days 00:00:03.609507975 +948 42 0 days 00:00:06.932124886 +948 43 0 days 00:00:03.838730686 +948 44 0 days 00:00:05.616231020 +948 45 0 days 00:00:19.004617013 +948 46 0 days 00:00:13.737073553 +949 1 0 days 00:00:18.351951642 +949 2 0 days 00:01:37.892631228 +949 3 0 days 00:01:34.291297360 +949 4 0 days 00:00:33.036276192 +950 1 0 days 00:00:39.123377108 +950 2 0 days 00:00:13.631327756 +950 3 0 days 00:00:09.448716755 +950 4 0 days 00:00:08.419945775 +950 5 0 days 00:00:11.385033772 +950 6 0 days 00:00:36.572218180 +950 7 0 days 00:00:11.604262205 +950 8 0 days 00:00:29.168501076 +950 9 0 days 00:00:09.487619292 +950 10 0 days 00:00:41.694183128 +950 11 0 days 00:00:08.936012910 +950 12 0 days 00:00:28.032084836 +950 13 0 days 00:00:18.553437466 +950 14 0 days 00:00:09.490064394 +950 15 0 days 00:00:12.911169965 +950 16 0 days 00:00:29.949788314 +951 1 0 days 00:00:37.093170706 +951 2 0 days 00:00:48.791527360 +951 3 0 days 00:00:16.163205675 +951 4 0 days 00:00:17.022990790 +951 5 0 days 00:00:18.296156766 +951 6 0 days 00:01:13.987504073 +951 7 0 days 00:00:11.862166000 +951 8 0 days 00:00:20.359889385 +951 9 0 days 00:00:41.865044540 +951 10 0 days 00:00:58.247695835 +951 11 0 days 00:00:24.605751350 +951 12 0 days 00:00:47.431410397 +951 13 0 days 00:01:13.825677886 +952 1 0 days 00:01:06.726759625 +952 2 0 days 00:01:02.581440548 +952 3 0 days 00:00:33.030093280 +952 4 0 days 00:00:58.505687832 +952 5 0 days 00:00:46.423740003 +952 6 0 days 00:00:27.883932720 +952 7 0 days 00:00:25.130637170 +953 1 0 days 00:00:58.865017788 +953 2 0 days 00:00:10.038412057 +953 3 0 days 00:00:23.524018900 +953 4 0 days 00:00:23.870399725 +953 5 0 days 00:00:51.888761845 +953 6 0 days 00:00:55.200460544 +953 7 0 days 00:00:08.457630733 +953 8 0 days 00:00:21.245041068 +953 9 0 days 00:00:15.618199095 +953 10 0 days 00:00:09.112896826 +953 11 0 days 00:00:46.291769653 +953 12 0 days 00:00:23.461132306 +954 1 0 days 00:00:41.633504225 +954 2 0 days 00:00:10.568934020 +954 3 0 days 00:00:22.934892166 +954 4 0 days 00:00:40.495044416 +954 5 0 days 00:00:14.238237630 +954 6 0 days 00:00:16.469924128 +954 7 0 days 00:00:29.123165202 +954 8 0 days 00:00:13.651478972 +954 9 0 days 00:00:12.846851075 +954 10 0 days 00:00:57.897034826 +954 11 0 days 00:00:32.746425486 +954 12 0 days 00:00:52.096089180 +955 1 0 days 00:00:00.323275944 +955 2 0 days 00:00:01.867952565 +955 3 0 days 00:00:00.339686285 +955 4 0 days 00:00:00.834991747 +955 5 0 days 00:00:00.872948125 +955 6 0 days 00:00:00.869249492 +955 7 0 days 00:00:00.533969809 +955 8 0 days 00:00:00.845263031 +955 9 0 days 00:00:00.279200616 +955 10 0 days 00:00:00.372925205 +955 12 0 days 00:00:00.888792096 +955 16 0 days 00:00:00.282369038 +955 17 0 days 00:00:00.247648386 +955 19 0 days 00:00:03.642221321 +955 20 0 days 00:00:01.850282022 +955 21 0 days 00:00:01.870762782 +955 22 0 days 00:00:00.319227718 +955 23 0 days 00:00:00.246281808 +955 24 0 days 00:00:00.327988721 +955 25 0 days 00:00:00.374929226 +955 26 0 days 00:00:00.235729826 +955 29 0 days 00:00:00.275017392 +955 30 0 days 00:00:01.807284903 +955 31 0 days 00:00:00.852928941 +955 32 0 days 00:00:00.248079818 +955 34 0 days 00:00:00.480763449 +955 35 0 days 00:00:00.900750331 +955 36 0 days 00:00:00.326292125 +955 37 0 days 00:00:01.734155072 +955 38 0 days 00:00:00.495905757 +955 39 0 days 00:00:01.834130289 +955 40 0 days 00:00:00.326500371 +955 41 0 days 00:00:00.316130638 +955 43 0 days 00:00:03.432162345 +955 44 0 days 00:00:00.196536912 +955 45 0 days 00:00:00.850669110 +955 46 0 days 00:00:01.837048128 +955 47 0 days 00:00:00.361817000 +955 48 0 days 00:00:00.899313178 +955 50 0 days 00:00:00.371678716 +955 51 0 days 00:00:00.875979948 +955 53 0 days 00:00:00.183869952 +955 55 0 days 00:00:00.311328025 +955 56 0 days 00:00:00.531121707 +955 57 0 days 00:00:01.818283610 +955 58 0 days 00:00:00.366140386 +955 59 0 days 00:00:00.289081906 +955 60 0 days 00:00:00.857835501 +955 61 0 days 00:00:00.307858240 +955 62 0 days 00:00:01.811168321 +955 63 0 days 00:00:00.868689609 +955 64 0 days 00:00:00.487299730 +955 65 0 days 00:00:00.222566850 +955 66 0 days 00:00:03.554741605 +955 67 0 days 00:00:00.886781198 +955 68 0 days 00:00:00.881867060 +955 69 0 days 00:00:01.802737363 +955 70 0 days 00:00:00.882785905 +955 71 0 days 00:00:00.530036229 +955 72 0 days 00:00:01.760884135 +955 74 0 days 00:00:00.910727928 +955 75 0 days 00:00:00.287069263 +955 76 0 days 00:00:00.859709107 +955 77 0 days 00:00:00.867684220 +955 78 0 days 00:00:03.503351880 +955 80 0 days 00:00:01.803245427 +955 81 0 days 00:00:00.837637400 +955 82 0 days 00:00:01.822764604 +955 83 0 days 00:00:00.271151390 +955 85 0 days 00:00:00.327430152 +955 86 0 days 00:00:01.754929891 +955 87 0 days 00:00:00.292915320 +955 88 0 days 00:00:03.525222405 +955 89 0 days 00:00:00.866656690 +955 90 0 days 00:00:00.873118107 +955 92 0 days 00:00:01.826933009 +955 93 0 days 00:00:00.331708842 +955 94 0 days 00:00:00.223599807 +955 95 0 days 00:00:00.148036046 +955 96 0 days 00:00:00.236342386 +955 97 0 days 00:00:00.369485883 +955 98 0 days 00:00:00.224408860 +955 99 0 days 00:00:00.286537688 +955 100 0 days 00:00:03.471349800 +956 1 0 days 00:00:01.898543596 +956 2 0 days 00:00:00.888723266 +956 3 0 days 00:00:01.839595072 +956 4 0 days 00:00:00.498566897 +956 5 0 days 00:00:00.376827023 +956 8 0 days 00:00:00.538189042 +956 10 0 days 00:00:03.782571295 +956 12 0 days 00:00:00.368114380 +956 13 0 days 00:00:00.299905006 +956 14 0 days 00:00:00.893048114 +956 15 0 days 00:00:01.907456004 +956 16 0 days 00:00:00.676519593 +956 17 0 days 00:00:00.230121671 +956 18 0 days 00:00:00.851227102 +956 19 0 days 00:00:00.379739353 +956 20 0 days 00:00:00.851391430 +956 21 0 days 00:00:00.315180700 +956 22 0 days 00:00:00.204541186 +956 23 0 days 00:00:00.875723630 +956 25 0 days 00:00:00.550932974 +956 26 0 days 00:00:03.752849782 +956 27 0 days 00:00:00.313935171 +956 28 0 days 00:00:00.301843977 +956 29 0 days 00:00:00.314419086 +956 30 0 days 00:00:00.324828801 +956 31 0 days 00:00:03.677530409 +956 32 0 days 00:00:00.493122755 +956 33 0 days 00:00:00.306307462 +956 35 0 days 00:00:01.751216071 +956 36 0 days 00:00:00.271650658 +956 37 0 days 00:00:00.326069641 +956 38 0 days 00:00:00.361322491 +956 39 0 days 00:00:00.500761572 +956 40 0 days 00:00:00.521257747 +956 41 0 days 00:00:00.827211338 +956 42 0 days 00:00:00.174682931 +956 43 0 days 00:00:00.559403474 +956 44 0 days 00:00:03.721486495 +956 47 0 days 00:00:00.196534009 +956 49 0 days 00:00:00.905148612 +956 50 0 days 00:00:00.847667980 +956 51 0 days 00:00:03.630151964 +956 52 0 days 00:00:03.102659750 +956 53 0 days 00:00:00.896687137 +956 54 0 days 00:00:00.542260598 +956 55 0 days 00:00:00.203672835 +956 56 0 days 00:00:00.337978045 +956 57 0 days 00:00:00.280982546 +956 58 0 days 00:00:03.673331375 +956 59 0 days 00:00:01.896933590 +956 60 0 days 00:00:00.330829960 +956 61 0 days 00:00:00.272387275 +956 62 0 days 00:00:00.237261585 +956 63 0 days 00:00:00.320903255 +956 65 0 days 00:00:00.305219895 +956 66 0 days 00:00:00.259221215 +956 67 0 days 00:00:03.805341936 +956 68 0 days 00:00:02.797500773 +956 70 0 days 00:00:01.790819662 +956 71 0 days 00:00:00.847911051 +956 72 0 days 00:00:00.248188890 +956 73 0 days 00:00:03.641578996 +956 77 0 days 00:00:00.912780168 +956 79 0 days 00:00:01.822592642 +956 80 0 days 00:00:00.204491476 +956 81 0 days 00:00:00.509759752 +956 82 0 days 00:00:01.879312220 +956 83 0 days 00:00:03.724884098 +956 85 0 days 00:00:01.912794170 +956 86 0 days 00:00:00.240620460 +956 87 0 days 00:00:03.135972130 +956 88 0 days 00:00:00.937491926 +956 89 0 days 00:00:00.902293074 +956 90 0 days 00:00:00.913178358 +956 91 0 days 00:00:00.870020850 +956 92 0 days 00:00:00.865671680 +956 93 0 days 00:00:00.331455098 +956 95 0 days 00:00:00.377318785 +956 97 0 days 00:00:00.394105381 +956 98 0 days 00:00:00.316936074 +956 99 0 days 00:00:00.905018108 +956 100 0 days 00:00:00.363062598 +957 1 0 days 00:00:02.036465609 +957 2 0 days 00:00:00.128397096 +957 5 0 days 00:00:00.475010685 +957 6 0 days 00:00:00.187366491 +957 7 0 days 00:00:01.963550641 +957 10 0 days 00:00:00.102825413 +957 11 0 days 00:00:00.182653831 +957 12 0 days 00:00:00.303252176 +957 13 0 days 00:00:00.517855878 +957 20 0 days 00:00:00.282561218 +957 21 0 days 00:00:00.187863373 +957 22 0 days 00:00:01.024397490 +957 23 0 days 00:00:00.995351614 +957 24 0 days 00:00:00.480426038 +957 25 0 days 00:00:01.948999031 +957 26 0 days 00:00:00.207054504 +957 27 0 days 00:00:00.181686372 +957 28 0 days 00:00:00.508316648 +957 29 0 days 00:00:01.986076336 +957 31 0 days 00:00:01.009271117 +957 33 0 days 00:00:00.495686787 +957 34 0 days 00:00:00.473362700 +957 35 0 days 00:00:01.967075630 +957 36 0 days 00:00:01.016815550 +957 37 0 days 00:00:00.184102245 +957 38 0 days 00:00:01.016581454 +957 39 0 days 00:00:00.219559059 +957 40 0 days 00:00:00.511110395 +957 41 0 days 00:00:00.134110320 +957 43 0 days 00:00:02.017582648 +957 44 0 days 00:00:00.505700083 +957 45 0 days 00:00:00.997552435 +957 47 0 days 00:00:00.192051640 +957 48 0 days 00:00:00.170714315 +957 49 0 days 00:00:00.281667844 +957 50 0 days 00:00:01.885726745 +957 52 0 days 00:00:00.186746005 +957 53 0 days 00:00:00.502935506 +957 55 0 days 00:00:00.488729033 +957 56 0 days 00:00:00.475191174 +957 57 0 days 00:00:00.161387121 +957 58 0 days 00:00:00.207443422 +957 60 0 days 00:00:01.823599143 +957 61 0 days 00:00:00.213905745 +957 62 0 days 00:00:00.217711793 +957 63 0 days 00:00:01.903382164 +957 64 0 days 00:00:01.019592437 +957 65 0 days 00:00:01.011564530 +957 67 0 days 00:00:00.138894782 +957 70 0 days 00:00:00.205845588 +957 71 0 days 00:00:00.180998087 +957 72 0 days 00:00:00.482215172 +957 74 0 days 00:00:00.138750114 +957 76 0 days 00:00:00.166470835 +957 77 0 days 00:00:01.862065494 +957 80 0 days 00:00:02.009306165 +957 82 0 days 00:00:01.464860713 +957 83 0 days 00:00:00.301656712 +957 84 0 days 00:00:00.213433606 +957 85 0 days 00:00:00.515517253 +957 89 0 days 00:00:00.957372120 +957 90 0 days 00:00:00.401509340 +957 92 0 days 00:00:00.470072782 +957 93 0 days 00:00:00.109832289 +957 94 0 days 00:00:00.140121041 +957 95 0 days 00:00:00.306381407 +957 96 0 days 00:00:00.292083650 +957 98 0 days 00:00:02.024577466 +958 2 0 days 00:00:00.219323690 +958 4 0 days 00:00:00.100071040 +958 5 0 days 00:00:00.171236541 +958 6 0 days 00:00:00.253352035 +958 7 0 days 00:00:01.038169815 +958 8 0 days 00:00:00.480193521 +958 9 0 days 00:00:00.100053613 +958 10 0 days 00:00:00.112020011 +958 11 0 days 00:00:02.048304387 +958 12 0 days 00:00:01.046319267 +958 13 0 days 00:00:00.308231172 +958 15 0 days 00:00:00.495012928 +958 16 0 days 00:00:00.111232635 +958 17 0 days 00:00:00.471637475 +958 18 0 days 00:00:00.518122390 +958 19 0 days 00:00:00.282176423 +958 20 0 days 00:00:00.170670094 +958 22 0 days 00:00:00.501007713 +958 24 0 days 00:00:02.026662960 +958 25 0 days 00:00:00.212993856 +958 26 0 days 00:00:00.396894140 +958 27 0 days 00:00:01.040081812 +958 30 0 days 00:00:00.504116082 +958 32 0 days 00:00:00.129010913 +958 34 0 days 00:00:00.183359793 +958 36 0 days 00:00:00.173172995 +958 37 0 days 00:00:02.029901157 +958 38 0 days 00:00:02.061478207 +958 41 0 days 00:00:00.124394328 +958 43 0 days 00:00:01.037955483 +958 44 0 days 00:00:02.003499700 +958 47 0 days 00:00:01.039991352 +958 52 0 days 00:00:01.027816054 +958 53 0 days 00:00:00.498591158 +958 56 0 days 00:00:00.283437822 +958 57 0 days 00:00:00.283043376 +958 58 0 days 00:00:02.016670386 +958 61 0 days 00:00:00.452024384 +958 62 0 days 00:00:02.043586332 +958 64 0 days 00:00:00.175317931 +958 65 0 days 00:00:00.481161582 +958 66 0 days 00:00:00.218963625 +958 68 0 days 00:00:00.312875418 +958 70 0 days 00:00:02.057047595 +958 71 0 days 00:00:00.170194246 +958 72 0 days 00:00:02.061391722 +958 73 0 days 00:00:00.304692838 +958 74 0 days 00:00:00.227003662 +958 79 0 days 00:00:00.473540449 +958 81 0 days 00:00:00.395484553 +958 83 0 days 00:00:00.286252908 +958 84 0 days 00:00:00.217101062 +958 86 0 days 00:00:00.185756936 +958 87 0 days 00:00:00.283453930 +958 88 0 days 00:00:00.315245450 +958 89 0 days 00:00:01.024448860 +958 91 0 days 00:00:00.158529105 +958 95 0 days 00:00:01.036295143 +958 97 0 days 00:00:00.169853300 +958 99 0 days 00:00:00.131073135 +958 100 0 days 00:00:00.160878061 +959 1 0 days 00:00:00.208638635 +959 2 0 days 00:00:03.262708353 +959 3 0 days 00:00:03.117701704 +959 4 0 days 00:00:00.827114134 +959 5 0 days 00:00:00.246954913 +959 6 0 days 00:00:01.701668420 +959 7 0 days 00:00:01.765431593 +959 8 0 days 00:00:00.832936823 +959 9 0 days 00:00:00.237376562 +959 10 0 days 00:00:00.296356392 +959 11 0 days 00:00:00.367566033 +959 12 0 days 00:00:00.202739226 +959 13 0 days 00:00:00.185149960 +959 14 0 days 00:00:03.262425266 +959 15 0 days 00:00:00.293778150 +959 16 0 days 00:00:00.220661828 +959 18 0 days 00:00:00.463279355 +959 19 0 days 00:00:00.825515502 +959 20 0 days 00:00:00.308398900 +959 21 0 days 00:00:00.804593572 +959 22 0 days 00:00:00.385320980 +959 23 0 days 00:00:00.328666957 +959 24 0 days 00:00:00.237275231 +959 25 0 days 00:00:03.111000444 +959 26 0 days 00:00:00.306984545 +959 27 0 days 00:00:03.371149394 +959 28 0 days 00:00:00.340045422 +959 29 0 days 00:00:03.256300626 +959 30 0 days 00:00:00.305663626 +959 31 0 days 00:00:00.817622611 +959 32 0 days 00:00:00.314415370 +959 33 0 days 00:00:03.492415902 +959 34 0 days 00:00:00.805233222 +959 35 0 days 00:00:03.159955692 +959 36 0 days 00:00:00.777981216 +959 37 0 days 00:00:00.526898772 +959 38 0 days 00:00:00.793470814 +959 39 0 days 00:00:00.287319155 +959 40 0 days 00:00:00.308868582 +959 41 0 days 00:00:00.807357430 +959 42 0 days 00:00:00.365483040 +959 43 0 days 00:00:00.454745020 +959 44 0 days 00:00:00.491245340 +959 45 0 days 00:00:00.801971235 +959 46 0 days 00:00:00.302120367 +959 47 0 days 00:00:00.306462192 +959 48 0 days 00:00:00.798177943 +959 49 0 days 00:00:00.807167012 +959 50 0 days 00:00:00.189185108 +959 51 0 days 00:00:03.239967203 +959 52 0 days 00:00:00.220519277 +959 53 0 days 00:00:03.360656911 +959 54 0 days 00:00:00.813054960 +959 55 0 days 00:00:01.656572813 +959 56 0 days 00:00:01.684825560 +959 57 0 days 00:00:00.303332610 +959 58 0 days 00:00:03.264675356 +959 59 0 days 00:00:00.179994037 +959 60 0 days 00:00:00.831075530 +959 61 0 days 00:00:01.733405100 +959 62 0 days 00:00:00.462212333 +959 63 0 days 00:00:00.467106480 +959 64 0 days 00:00:00.247723105 +959 65 0 days 00:00:01.677083533 +959 66 0 days 00:00:00.329425142 +959 67 0 days 00:00:00.353538640 +959 68 0 days 00:00:03.418271257 +959 69 0 days 00:00:01.722076491 +959 70 0 days 00:00:00.344622036 +959 71 0 days 00:00:00.387429231 +959 72 0 days 00:00:01.723180545 +959 73 0 days 00:00:03.541134505 +959 74 0 days 00:00:00.871309422 +959 75 0 days 00:00:00.865752065 +959 76 0 days 00:00:00.520487634 +959 77 0 days 00:00:00.775804333 +959 78 0 days 00:00:00.372607405 +959 79 0 days 00:00:00.802690740 +959 80 0 days 00:00:00.344662858 +959 81 0 days 00:00:00.183230031 +959 82 0 days 00:00:00.514267691 +959 83 0 days 00:00:00.204592885 +959 84 0 days 00:00:00.311135245 +959 85 0 days 00:00:00.343372628 +959 86 0 days 00:00:03.230226440 +959 87 0 days 00:00:00.302052350 +959 88 0 days 00:00:03.143464232 +959 90 0 days 00:00:00.188945292 +959 91 0 days 00:00:00.254273800 +959 93 0 days 00:00:00.247603380 +959 94 0 days 00:00:00.802624130 +959 95 0 days 00:00:00.265161717 +959 96 0 days 00:00:00.828691551 +959 97 0 days 00:00:00.364199282 +959 98 0 days 00:00:00.215894958 +959 99 0 days 00:00:01.656370820 +959 100 0 days 00:00:00.791744171 +960 1 0 days 00:00:00.477517857 +960 2 0 days 00:00:00.268569936 +960 3 0 days 00:00:00.186883240 +960 4 0 days 00:00:00.223674384 +960 5 0 days 00:00:00.272274076 +960 6 0 days 00:00:00.144717298 +960 7 0 days 00:00:00.222546589 +960 8 0 days 00:00:00.272245756 +960 9 0 days 00:00:00.226057585 +960 10 0 days 00:00:00.266856068 +960 11 0 days 00:00:00.995813812 +960 12 0 days 00:00:00.968459248 +960 13 0 days 00:00:00.148529950 +960 14 0 days 00:00:00.130819451 +960 15 0 days 00:00:00.188428678 +960 16 0 days 00:00:00.131868766 +960 17 0 days 00:00:00.477327684 +960 18 0 days 00:00:00.484689382 +960 19 0 days 00:00:00.111086710 +960 20 0 days 00:00:00.465903185 +960 21 0 days 00:00:00.459788131 +960 22 0 days 00:00:00.938052422 +960 23 0 days 00:00:00.947287971 +960 24 0 days 00:00:01.742463380 +960 25 0 days 00:00:00.450200180 +960 26 0 days 00:00:00.173433951 +960 27 0 days 00:00:00.452655880 +960 28 0 days 00:00:00.269016502 +960 29 0 days 00:00:00.216540580 +960 30 0 days 00:00:00.110462378 +960 31 0 days 00:00:01.837344540 +960 32 0 days 00:00:00.426303924 +960 33 0 days 00:00:00.177724566 +960 34 0 days 00:00:00.276282036 +960 35 0 days 00:00:01.738299216 +960 36 0 days 00:00:00.492665250 +960 37 0 days 00:00:00.178813622 +960 39 0 days 00:00:00.153235758 +960 40 0 days 00:00:00.943349928 +960 41 0 days 00:00:00.939770465 +960 42 0 days 00:00:00.139952833 +960 43 0 days 00:00:00.194466552 +960 44 0 days 00:00:00.179803410 +960 45 0 days 00:00:00.980543302 +960 46 0 days 00:00:01.862300957 +960 47 0 days 00:00:01.913700760 +960 48 0 days 00:00:00.168315684 +960 50 0 days 00:00:00.104378888 +960 51 0 days 00:00:01.829333191 +960 52 0 days 00:00:00.179293648 +960 53 0 days 00:00:01.913616238 +960 54 0 days 00:00:00.174043628 +960 55 0 days 00:00:00.112531075 +960 56 0 days 00:00:00.221169015 +960 57 0 days 00:00:01.868211930 +960 58 0 days 00:00:00.176452468 +960 59 0 days 00:00:00.502436886 +960 60 0 days 00:00:01.733735576 +960 61 0 days 00:00:00.940183674 +960 62 0 days 00:00:00.977593624 +960 63 0 days 00:00:00.175523361 +960 64 0 days 00:00:00.142214632 +960 65 0 days 00:00:00.184844452 +960 66 0 days 00:00:01.804940236 +960 67 0 days 00:00:00.452499117 +960 68 0 days 00:00:00.217223013 +960 69 0 days 00:00:00.436290016 +960 70 0 days 00:00:00.141438751 +960 71 0 days 00:00:00.148939205 +960 72 0 days 00:00:00.467998500 +960 73 0 days 00:00:00.175708128 +960 74 0 days 00:00:00.179171613 +960 75 0 days 00:00:00.274658058 +960 76 0 days 00:00:00.468485173 +960 77 0 days 00:00:00.467371135 +960 78 0 days 00:00:01.856297720 +960 79 0 days 00:00:00.099329796 +960 80 0 days 00:00:00.137613523 +960 81 0 days 00:00:00.178908952 +960 82 0 days 00:00:00.453810057 +960 83 0 days 00:00:00.149838521 +960 84 0 days 00:00:00.227333728 +960 85 0 days 00:00:01.811292933 +960 86 0 days 00:00:00.958448137 +960 87 0 days 00:00:00.990253848 +960 89 0 days 00:00:00.466863277 +960 90 0 days 00:00:01.016456061 +960 91 0 days 00:00:00.185991550 +960 92 0 days 00:00:00.220542440 +960 93 0 days 00:00:00.197179922 +960 94 0 days 00:00:00.189849134 +960 95 0 days 00:00:00.465912137 +960 96 0 days 00:00:00.131810121 +960 97 0 days 00:00:00.970259933 +960 98 0 days 00:00:00.144907708 +960 99 0 days 00:00:00.468664444 +960 100 0 days 00:00:00.190645918 +961 1 0 days 00:04:24.466928304 +961 2 0 days 00:05:49.898001054 +962 1 0 days 00:01:29.412817565 +962 2 0 days 00:01:39.135238598 +963 1 0 days 00:01:32.525116729 +963 2 0 days 00:00:16.485187448 +963 3 0 days 00:00:06.661538012 +963 4 0 days 00:00:29.643908732 +964 1 0 days 00:00:18.286097908 +964 2 0 days 00:06:29.243697080 +965 1 0 days 00:00:09.431461096 +965 2 0 days 00:00:05.189779460 +965 3 0 days 00:03:17.900983925 +965 4 0 days 00:00:06.753586388 +965 5 0 days 00:00:50.723393510 +966 2 0 days 00:02:23.243818129 +967 1 0 days 00:00:46.480159510 +967 2 0 days 00:01:43.017810542 +967 3 0 days 00:02:27.080113913 +968 1 0 days 00:00:19.273844801 +968 2 0 days 00:01:18.908325113 +968 3 0 days 00:00:10.975566360 +968 4 0 days 00:00:42.807362296 +968 5 0 days 00:01:22.682491125 +968 6 0 days 00:00:11.155153900 +968 7 0 days 00:01:34.944317412 +968 8 0 days 00:01:40.151369611 +969 1 0 days 00:00:03.504018902 +969 2 0 days 00:00:06.663806807 +969 3 0 days 00:00:22.567871100 +969 4 0 days 00:00:05.272358584 +969 5 0 days 00:01:26.295626848 +969 6 0 days 00:00:05.909643380 +969 7 0 days 00:00:13.939199763 +969 8 0 days 00:00:00.757399270 +969 9 0 days 00:03:20.590180213 +969 10 0 days 00:01:22.355408085 +969 11 0 days 00:00:25.060921342 +970 1 0 days 00:00:27.465305312 +970 2 0 days 00:00:44.365111940 +970 3 0 days 00:02:47.044995530 +970 4 0 days 00:02:27.717063630 +971 1 0 days 00:00:00.283288456 +971 2 0 days 00:00:00.194336954 +971 3 0 days 00:00:01.535657548 +971 4 0 days 00:00:01.582126833 +971 5 0 days 00:00:00.363912840 +971 6 0 days 00:00:00.259791095 +971 7 0 days 00:00:01.503773976 +971 8 0 days 00:00:00.507928170 +971 9 0 days 00:00:01.454131725 +971 10 0 days 00:00:00.178622620 +971 11 0 days 00:00:00.564434012 +971 12 0 days 00:00:00.241247540 +971 13 0 days 00:00:00.194540080 +971 14 0 days 00:00:00.587232387 +971 15 0 days 00:00:00.515239610 +971 16 0 days 00:00:00.242391405 +971 17 0 days 00:00:01.581524283 +971 18 0 days 00:00:00.518843660 +971 19 0 days 00:00:00.240019955 +971 20 0 days 00:00:00.194729600 +971 21 0 days 00:00:00.894369305 +971 22 0 days 00:00:00.369807106 +971 23 0 days 00:00:00.253490520 +971 24 0 days 00:00:00.185416750 +971 25 0 days 00:00:00.907739500 +971 26 0 days 00:00:01.619926662 +971 27 0 days 00:00:00.582020872 +971 28 0 days 00:00:00.232935570 +971 29 0 days 00:00:00.346226243 +971 30 0 days 00:00:00.215923880 +971 31 0 days 00:00:00.516210700 +971 32 0 days 00:00:00.568490116 +971 33 0 days 00:00:00.538428340 +971 34 0 days 00:00:00.265724328 +971 35 0 days 00:00:00.507367315 +971 36 0 days 00:00:00.500416266 +971 37 0 days 00:00:00.287260360 +971 38 0 days 00:00:00.249675723 +971 39 0 days 00:00:00.854610992 +971 40 0 days 00:00:00.893634745 +971 41 0 days 00:00:00.876595603 +971 42 0 days 00:00:00.342402830 +971 43 0 days 00:00:00.546846185 +971 44 0 days 00:00:00.206075588 +971 45 0 days 00:00:01.443030550 +971 46 0 days 00:00:01.462382090 +971 47 0 days 00:00:00.278698033 +971 48 0 days 00:00:00.559305552 +971 49 0 days 00:00:00.546505966 +971 50 0 days 00:00:00.214675840 +971 51 0 days 00:00:00.239648692 +971 52 0 days 00:00:00.228771535 +971 53 0 days 00:00:00.276903940 +971 54 0 days 00:00:00.910528075 +971 55 0 days 00:00:00.334442820 +971 56 0 days 00:00:00.891230070 +971 57 0 days 00:00:00.828716185 +971 58 0 days 00:00:00.361726046 +971 59 0 days 00:00:00.221870328 +971 60 0 days 00:00:00.549558580 +971 61 0 days 00:00:00.256544116 +971 62 0 days 00:00:00.342331770 +971 63 0 days 00:00:00.582669086 +971 64 0 days 00:00:00.389327776 +971 65 0 days 00:00:00.611785622 +971 66 0 days 00:00:00.533080685 +971 67 0 days 00:00:00.186465460 +971 68 0 days 00:00:00.510333660 +971 69 0 days 00:00:01.427988565 +971 70 0 days 00:00:00.234775028 +971 71 0 days 00:00:01.558431806 +971 72 0 days 00:00:00.250252368 +971 73 0 days 00:00:00.555739313 +971 74 0 days 00:00:00.377637988 +971 75 0 days 00:00:01.454606555 +971 76 0 days 00:00:00.558748396 +971 77 0 days 00:00:00.316198773 +971 78 0 days 00:00:00.252787028 +971 79 0 days 00:00:00.489046555 +971 80 0 days 00:00:00.358773740 +971 81 0 days 00:00:00.272768388 +971 82 0 days 00:00:00.241227900 +971 83 0 days 00:00:00.378176905 +971 84 0 days 00:00:00.554563196 +971 85 0 days 00:00:00.255033852 +971 86 0 days 00:00:00.241250270 +971 87 0 days 00:00:00.205540036 +971 88 0 days 00:00:01.618994771 +971 89 0 days 00:00:00.278704766 +971 90 0 days 00:00:00.579673150 +971 91 0 days 00:00:01.529420816 +971 92 0 days 00:00:00.910219810 +971 93 0 days 00:00:00.200476302 +971 94 0 days 00:00:00.621965650 +971 95 0 days 00:00:00.856800184 +971 96 0 days 00:00:00.244020400 +971 97 0 days 00:00:00.275526270 +971 98 0 days 00:00:00.535268368 +971 99 0 days 00:00:00.176983455 +971 100 0 days 00:00:00.537378855 +972 1 0 days 00:00:00.975793160 +972 2 0 days 00:00:00.227319886 +972 3 0 days 00:00:00.748043713 +972 4 0 days 00:00:00.199709860 +972 5 0 days 00:00:00.167529240 +972 6 0 days 00:00:00.167494620 +972 9 0 days 00:00:00.189598146 +972 10 0 days 00:00:00.206294180 +972 11 0 days 00:00:00.172473546 +972 12 0 days 00:00:00.496087986 +972 13 0 days 00:00:01.079733961 +972 14 0 days 00:00:01.300056753 +972 15 0 days 00:00:01.349106666 +972 16 0 days 00:00:00.224813606 +972 17 0 days 00:00:00.244520513 +972 18 0 days 00:00:01.340231173 +972 19 0 days 00:00:00.201526340 +972 20 0 days 00:00:00.242728686 +972 21 0 days 00:00:00.173407753 +972 22 0 days 00:00:01.358065160 +972 23 0 days 00:00:01.340612680 +972 24 0 days 00:00:00.458595313 +972 25 0 days 00:00:00.209028296 +972 26 0 days 00:00:00.300152540 +972 27 0 days 00:00:01.765346926 +972 28 0 days 00:00:00.316016506 +972 29 0 days 00:00:00.165035800 +972 30 0 days 00:00:00.523988860 +972 31 0 days 00:00:01.302178800 +972 32 0 days 00:00:01.534159380 +972 33 0 days 00:00:00.317906873 +972 34 0 days 00:00:00.166536026 +972 35 0 days 00:00:01.684824601 +972 36 0 days 00:00:00.479427926 +972 37 0 days 00:00:00.307931680 +972 38 0 days 00:00:00.179740846 +972 39 0 days 00:00:01.343981086 +972 40 0 days 00:00:00.312529753 +972 41 0 days 00:00:00.155247560 +972 42 0 days 00:00:00.244070975 +972 43 0 days 00:00:01.462428185 +972 44 0 days 00:00:00.634689101 +972 45 0 days 00:00:01.343407553 +972 46 0 days 00:00:00.201018240 +972 47 0 days 00:00:00.166965160 +972 48 0 days 00:00:00.317243213 +972 49 0 days 00:00:00.225588046 +972 50 0 days 00:00:00.321132266 +972 51 0 days 00:00:00.320423200 +972 52 0 days 00:00:00.499735406 +972 53 0 days 00:00:00.226454240 +972 54 0 days 00:00:00.169223250 +972 55 0 days 00:00:00.375277306 +972 56 0 days 00:00:00.226219186 +972 57 0 days 00:00:00.164522000 +972 58 0 days 00:00:00.386065993 +972 59 0 days 00:00:00.300624346 +972 60 0 days 00:00:00.476213586 +972 61 0 days 00:00:00.409108272 +972 62 0 days 00:00:00.228891940 +972 63 0 days 00:00:01.455374955 +972 64 0 days 00:00:01.300533986 +972 65 0 days 00:00:00.156372753 +972 66 0 days 00:00:00.173118193 +972 67 0 days 00:00:00.301466653 +972 68 0 days 00:00:00.474499226 +972 69 0 days 00:00:00.227370946 +972 70 0 days 00:00:00.183063380 +972 71 0 days 00:00:00.224900293 +972 72 0 days 00:00:00.750885160 +972 73 0 days 00:00:00.323093213 +972 74 0 days 00:00:00.225323753 +972 75 0 days 00:00:00.189133753 +972 76 0 days 00:00:00.163840240 +972 78 0 days 00:00:00.497118980 +972 79 0 days 00:00:00.155588793 +972 80 0 days 00:00:01.373451986 +972 81 0 days 00:00:00.486945700 +972 82 0 days 00:00:00.789945826 +972 83 0 days 00:00:00.310399206 +972 84 0 days 00:00:00.302078653 +972 85 0 days 00:00:00.165090953 +972 86 0 days 00:00:01.706768969 +972 87 0 days 00:00:00.223585433 +972 88 0 days 00:00:00.313393566 +972 89 0 days 00:00:00.513328513 +972 90 0 days 00:00:00.230174913 +972 92 0 days 00:00:00.155255280 +972 93 0 days 00:00:00.216502393 +972 94 0 days 00:00:00.752729386 +972 95 0 days 00:00:01.326143880 +972 97 0 days 00:00:00.217977100 +972 98 0 days 00:00:00.382713132 +972 99 0 days 00:00:00.497162053 +972 100 0 days 00:00:00.302948340 +973 1 0 days 00:00:00.839798896 +973 2 0 days 00:00:00.208378537 +973 3 0 days 00:00:00.520558090 +973 4 0 days 00:00:00.320056932 +973 5 0 days 00:00:00.151809026 +973 6 0 days 00:00:00.282138495 +973 7 0 days 00:00:00.196099665 +973 8 0 days 00:00:00.864222103 +973 9 0 days 00:00:00.271727035 +973 10 0 days 00:00:00.145632993 +973 11 0 days 00:00:00.145737825 +973 12 0 days 00:00:00.119514490 +973 13 0 days 00:00:00.134003815 +973 14 0 days 00:00:00.300267856 +973 15 0 days 00:00:00.308449324 +973 16 0 days 00:00:00.203963193 +973 17 0 days 00:00:00.523017635 +973 18 0 days 00:00:00.120712368 +973 19 0 days 00:00:00.155534520 +973 20 0 days 00:00:00.155466800 +973 21 0 days 00:00:00.132402770 +973 22 0 days 00:00:00.884361202 +973 23 0 days 00:00:00.121711025 +973 24 0 days 00:00:00.179233960 +973 25 0 days 00:00:00.886490216 +973 26 0 days 00:00:00.162324537 +973 27 0 days 00:00:00.284023892 +973 28 0 days 00:00:00.179479705 +973 29 0 days 00:00:00.300332535 +973 30 0 days 00:00:00.872598064 +973 31 0 days 00:00:00.880844616 +973 32 0 days 00:00:00.210180528 +973 33 0 days 00:00:00.107433396 +973 34 0 days 00:00:00.514749060 +973 35 0 days 00:00:00.151274608 +973 36 0 days 00:00:00.505274126 +973 37 0 days 00:00:00.199358892 +973 38 0 days 00:00:00.328175225 +973 39 0 days 00:00:00.524652754 +973 40 0 days 00:00:00.331070737 +973 41 0 days 00:00:00.138155220 +973 42 0 days 00:00:00.864779184 +973 43 0 days 00:00:00.830331650 +973 44 0 days 00:00:00.140463080 +973 45 0 days 00:00:00.139496568 +973 46 0 days 00:00:00.141824843 +973 47 0 days 00:00:00.201549356 +973 48 0 days 00:00:00.208213225 +973 49 0 days 00:00:00.514319380 +973 50 0 days 00:00:00.306233265 +973 51 0 days 00:00:00.215167829 +973 52 0 days 00:00:00.305279352 +973 53 0 days 00:00:00.923155626 +973 54 0 days 00:00:00.916501355 +973 55 0 days 00:00:00.113974062 +973 56 0 days 00:00:00.311606833 +973 57 0 days 00:00:00.284921805 +973 58 0 days 00:00:00.162201585 +973 59 0 days 00:00:00.162351380 +973 60 0 days 00:00:00.116377960 +973 61 0 days 00:00:00.322113678 +973 62 0 days 00:00:00.122193028 +973 63 0 days 00:00:00.479024150 +973 64 0 days 00:00:00.826507245 +973 65 0 days 00:00:00.139014896 +973 66 0 days 00:00:00.142329851 +973 67 0 days 00:00:00.282089440 +973 68 0 days 00:00:00.506063060 +973 69 0 days 00:00:00.110542284 +973 70 0 days 00:00:00.335039391 +973 71 0 days 00:00:00.141456412 +973 72 0 days 00:00:00.131075053 +973 73 0 days 00:00:00.105485462 +973 74 0 days 00:00:00.148868134 +973 75 0 days 00:00:00.507621016 +973 76 0 days 00:00:00.304066765 +973 77 0 days 00:00:00.116078668 +973 78 0 days 00:00:00.126276530 +973 79 0 days 00:00:00.464635255 +973 80 0 days 00:00:00.295632120 +973 81 0 days 00:00:00.492370316 +973 82 0 days 00:00:00.466796330 +973 83 0 days 00:00:00.893415837 +973 84 0 days 00:00:00.337139002 +973 85 0 days 00:00:00.150022270 +973 86 0 days 00:00:00.330164431 +973 87 0 days 00:00:00.132774806 +973 88 0 days 00:00:00.213710647 +973 89 0 days 00:00:00.327066315 +973 90 0 days 00:00:00.153386530 +973 91 0 days 00:00:00.862162410 +973 92 0 days 00:00:00.880665580 +973 93 0 days 00:00:00.199575588 +973 94 0 days 00:00:00.138580180 +973 95 0 days 00:00:00.862055196 +973 96 0 days 00:00:00.538322009 +973 97 0 days 00:00:00.301178105 +973 98 0 days 00:00:00.135620220 +973 99 0 days 00:00:00.148215588 +973 100 0 days 00:00:00.143703875 +974 1 0 days 00:00:00.088251486 +974 2 0 days 00:00:00.139387300 +974 3 0 days 00:00:00.156620170 +974 4 0 days 00:00:00.115049200 +974 6 0 days 00:00:00.465383593 +974 8 0 days 00:00:00.100267526 +974 9 0 days 00:00:00.279686333 +974 10 0 days 00:00:00.093434580 +974 11 0 days 00:00:00.102970626 +974 12 0 days 00:00:00.294385066 +974 13 0 days 00:00:00.490790280 +974 14 0 days 00:00:00.129685100 +974 15 0 days 00:00:00.100321546 +974 16 0 days 00:00:00.194689720 +974 17 0 days 00:00:00.271209873 +974 18 0 days 00:00:00.298412720 +974 19 0 days 00:00:00.185671000 +974 20 0 days 00:00:00.126128406 +974 21 0 days 00:00:00.294347040 +974 23 0 days 00:00:00.440261046 +974 24 0 days 00:00:00.493771053 +974 25 0 days 00:00:00.100292553 +974 26 0 days 00:00:00.747849093 +974 27 0 days 00:00:00.113653800 +974 29 0 days 00:00:00.268981086 +974 30 0 days 00:00:00.127753166 +974 31 0 days 00:00:00.283024953 +974 32 0 days 00:00:00.281023440 +974 33 0 days 00:00:00.176406586 +974 34 0 days 00:00:00.186890280 +974 36 0 days 00:00:00.464694253 +974 37 0 days 00:00:00.088995586 +974 38 0 days 00:00:00.132395613 +974 39 0 days 00:00:00.336934945 +974 40 0 days 00:00:00.187444680 +974 41 0 days 00:00:00.089809066 +974 42 0 days 00:00:00.296333806 +974 43 0 days 00:00:00.455998586 +974 44 0 days 00:00:00.116208366 +974 45 0 days 00:00:00.130535780 +974 46 0 days 00:00:00.135336873 +974 47 0 days 00:00:00.270290286 +974 48 0 days 00:00:00.772682840 +974 49 0 days 00:00:00.185816173 +974 50 0 days 00:00:00.458284653 +974 52 0 days 00:00:00.739552620 +974 53 0 days 00:00:00.132483553 +974 54 0 days 00:00:00.293728826 +974 56 0 days 00:00:00.101161460 +974 57 0 days 00:00:00.130209946 +974 58 0 days 00:00:00.465619933 +974 59 0 days 00:00:00.307173412 +974 60 0 days 00:00:00.271138573 +974 61 0 days 00:00:00.741699133 +974 62 0 days 00:00:00.090196480 +974 63 0 days 00:00:00.283676480 +974 64 0 days 00:00:00.280860993 +974 65 0 days 00:00:00.141665246 +974 66 0 days 00:00:00.130551326 +974 67 0 days 00:00:00.493304406 +974 68 0 days 00:00:00.430885773 +974 69 0 days 00:00:00.271313920 +974 70 0 days 00:00:00.271646193 +974 71 0 days 00:00:00.269476893 +974 72 0 days 00:00:00.132830866 +974 73 0 days 00:00:00.281399806 +974 74 0 days 00:00:00.141558506 +974 76 0 days 00:00:00.141097353 +974 77 0 days 00:00:00.482813970 +974 78 0 days 00:00:00.177895306 +974 79 0 days 00:00:00.105568780 +974 80 0 days 00:00:00.132819760 +974 81 0 days 00:00:00.100935853 +974 82 0 days 00:00:00.141464800 +974 83 0 days 00:00:00.134637443 +974 84 0 days 00:00:00.494790120 +974 85 0 days 00:00:00.283173613 +974 86 0 days 00:00:00.110094720 +974 87 0 days 00:00:00.105219913 +974 88 0 days 00:00:00.270845186 +974 89 0 days 00:00:00.185964033 +974 90 0 days 00:00:00.282870826 +974 91 0 days 00:00:00.101993780 +974 92 0 days 00:00:00.089523413 +974 93 0 days 00:00:00.131348826 +974 94 0 days 00:00:00.185494160 +974 95 0 days 00:00:00.756679800 +974 96 0 days 00:00:00.127175466 +974 97 0 days 00:00:00.105304600 +974 99 0 days 00:00:00.432523393 +974 100 0 days 00:00:00.282844646 +975 1 0 days 00:00:00.254569265 +975 2 0 days 00:00:00.259965590 +975 3 0 days 00:00:00.346281085 +975 4 0 days 00:00:00.200214910 +975 5 0 days 00:00:00.347216656 +975 6 0 days 00:00:00.322967465 +975 7 0 days 00:00:00.942964868 +975 8 0 days 00:00:00.294778477 +975 9 0 days 00:00:00.236424010 +975 10 0 days 00:00:00.814802315 +975 11 0 days 00:00:01.297558886 +975 12 0 days 00:00:00.217047060 +975 13 0 days 00:00:00.851832284 +975 14 0 days 00:00:00.250051378 +975 15 0 days 00:00:00.315655400 +975 16 0 days 00:00:00.332409570 +975 17 0 days 00:00:01.299753753 +975 18 0 days 00:00:00.244102136 +975 19 0 days 00:00:00.240100053 +975 20 0 days 00:00:00.516939956 +975 21 0 days 00:00:00.275874372 +975 22 0 days 00:00:00.186270937 +975 23 0 days 00:00:00.275978388 +975 24 0 days 00:00:00.522335400 +975 25 0 days 00:00:00.343482000 +975 26 0 days 00:00:00.581248236 +975 27 0 days 00:00:00.251702673 +975 28 0 days 00:00:00.254256540 +975 29 0 days 00:00:00.252799434 +975 30 0 days 00:00:00.274918125 +975 31 0 days 00:00:00.370122627 +975 32 0 days 00:00:00.243219450 +975 33 0 days 00:00:00.260612390 +975 34 0 days 00:00:00.997667982 +975 35 0 days 00:00:00.815230995 +975 36 0 days 00:00:01.461555640 +975 37 0 days 00:00:00.280038736 +975 38 0 days 00:00:00.243844860 +975 39 0 days 00:00:01.296740980 +975 40 0 days 00:00:00.198548872 +975 41 0 days 00:00:00.258934360 +975 42 0 days 00:00:00.261162860 +975 43 0 days 00:00:00.520789610 +975 44 0 days 00:00:00.275472664 +975 45 0 days 00:00:00.341119920 +975 46 0 days 00:00:00.237035996 +975 47 0 days 00:00:00.545474360 +975 48 0 days 00:00:01.463356330 +975 49 0 days 00:00:00.208837060 +975 50 0 days 00:00:00.326030205 +975 51 0 days 00:00:00.343730715 +975 52 0 days 00:00:00.262419910 +975 53 0 days 00:00:00.324012460 +975 54 0 days 00:00:00.174711628 +975 55 0 days 00:00:00.254249824 +975 56 0 days 00:00:00.186691950 +975 57 0 days 00:00:01.436961740 +975 58 0 days 00:00:00.535348315 +975 59 0 days 00:00:00.563653180 +975 60 0 days 00:00:00.237705460 +975 61 0 days 00:00:00.229259988 +975 62 0 days 00:00:00.227512664 +975 63 0 days 00:00:00.273294472 +975 64 0 days 00:00:00.251322996 +975 65 0 days 00:00:01.484846240 +975 66 0 days 00:00:00.522826910 +975 67 0 days 00:00:00.592469910 +975 68 0 days 00:00:00.897930795 +975 69 0 days 00:00:00.538760445 +975 70 0 days 00:00:00.455650713 +975 71 0 days 00:00:01.459537165 +975 72 0 days 00:00:00.263393900 +975 73 0 days 00:00:00.342550270 +975 74 0 days 00:00:00.226529357 +975 75 0 days 00:00:00.924559725 +975 76 0 days 00:00:00.323237955 +975 77 0 days 00:00:00.270295990 +975 78 0 days 00:00:00.260788676 +975 79 0 days 00:00:00.495493915 +975 80 0 days 00:00:00.245703160 +975 81 0 days 00:00:00.818086760 +975 82 0 days 00:00:00.218045296 +975 83 0 days 00:00:00.390676758 +975 84 0 days 00:00:00.202084523 +975 85 0 days 00:00:00.197227791 +975 86 0 days 00:00:00.768004486 +975 87 0 days 00:00:00.621315855 +975 88 0 days 00:00:00.200366900 +975 89 0 days 00:00:00.242360155 +975 90 0 days 00:00:00.476364206 +975 91 0 days 00:00:00.568906045 +975 92 0 days 00:00:00.489829975 +975 93 0 days 00:00:00.472709113 +975 94 0 days 00:00:00.856346736 +975 95 0 days 00:00:00.537563348 +975 96 0 days 00:00:00.228397145 +975 97 0 days 00:00:00.212398315 +975 98 0 days 00:00:00.385153884 +975 99 0 days 00:00:00.219588717 +975 100 0 days 00:00:01.674604331 +976 1 0 days 00:00:01.570268836 +976 2 0 days 00:00:00.299396345 +976 3 0 days 00:00:00.237474308 +976 4 0 days 00:00:00.507422240 +976 5 0 days 00:00:01.444418305 +976 6 0 days 00:00:00.235309855 +976 7 0 days 00:00:00.274175982 +976 8 0 days 00:00:00.348846450 +976 9 0 days 00:00:00.843417535 +976 10 0 days 00:00:01.359528920 +976 11 0 days 00:00:01.645286922 +976 12 0 days 00:00:00.294836266 +976 13 0 days 00:00:01.480373510 +976 14 0 days 00:00:00.273093048 +976 15 0 days 00:00:01.014015945 +976 16 0 days 00:00:00.219944552 +976 17 0 days 00:00:00.522400560 +976 18 0 days 00:00:00.183014340 +976 19 0 days 00:00:00.269320090 +976 20 0 days 00:00:01.518839332 +976 21 0 days 00:00:00.260233232 +976 22 0 days 00:00:00.310836373 +976 23 0 days 00:00:00.228658017 +976 24 0 days 00:00:01.022466475 +976 25 0 days 00:00:00.521590830 +976 26 0 days 00:00:00.221159884 +976 27 0 days 00:00:00.235091102 +976 28 0 days 00:00:00.521341660 +976 29 0 days 00:00:00.274314934 +976 30 0 days 00:00:00.949924200 +976 31 0 days 00:00:01.565135156 +976 32 0 days 00:00:00.223483970 +976 33 0 days 00:00:00.217091168 +976 34 0 days 00:00:00.289468672 +976 35 0 days 00:00:00.529877590 +976 36 0 days 00:00:00.344398012 +976 37 0 days 00:00:00.212926320 +976 38 0 days 00:00:00.573989760 +976 39 0 days 00:00:00.499351795 +976 40 0 days 00:00:00.248403408 +976 41 0 days 00:00:00.248628370 +976 42 0 days 00:00:01.440645105 +976 43 0 days 00:00:00.827481855 +976 44 0 days 00:00:00.544307880 +976 45 0 days 00:00:00.342940670 +976 46 0 days 00:00:00.290436984 +976 47 0 days 00:00:00.307350700 +976 48 0 days 00:00:00.270714412 +976 49 0 days 00:00:00.601614778 +976 50 0 days 00:00:00.545794035 +976 51 0 days 00:00:00.570171308 +976 52 0 days 00:00:00.901430085 +976 53 0 days 00:00:00.379440617 +976 54 0 days 00:00:01.455293130 +976 55 0 days 00:00:00.250301380 +976 56 0 days 00:00:01.726600452 +976 57 0 days 00:00:00.262602216 +976 58 0 days 00:00:00.538906455 +976 59 0 days 00:00:00.563632416 +976 60 0 days 00:00:00.355947165 +976 61 0 days 00:00:00.362589988 +976 62 0 days 00:00:00.282013677 +976 63 0 days 00:00:00.203395950 +976 64 0 days 00:00:00.276237430 +976 65 0 days 00:00:00.302749657 +976 66 0 days 00:00:00.234133437 +976 67 0 days 00:00:00.228052880 +976 68 0 days 00:00:00.345149715 +976 69 0 days 00:00:00.260500208 +976 70 0 days 00:00:00.903453595 +976 71 0 days 00:00:00.360666764 +976 72 0 days 00:00:00.211975280 +976 73 0 days 00:00:00.261016217 +976 74 0 days 00:00:00.370031167 +976 75 0 days 00:00:01.475440035 +976 76 0 days 00:00:00.562936296 +976 77 0 days 00:00:00.543563248 +976 78 0 days 00:00:00.559940076 +976 79 0 days 00:00:01.479819335 +976 80 0 days 00:00:01.477140960 +976 81 0 days 00:00:00.599897457 +976 82 0 days 00:00:00.536383516 +976 83 0 days 00:00:00.227651626 +976 84 0 days 00:00:00.367082953 +976 85 0 days 00:00:00.264223780 +976 86 0 days 00:00:00.274804028 +976 87 0 days 00:00:01.475724650 +976 88 0 days 00:00:00.908032155 +976 89 0 days 00:00:01.515003544 +976 90 0 days 00:00:00.179831512 +976 91 0 days 00:00:00.228380406 +976 92 0 days 00:00:01.646345834 +976 93 0 days 00:00:00.275693782 +976 94 0 days 00:00:00.227852740 +976 95 0 days 00:00:00.322011860 +976 96 0 days 00:00:00.916277591 +976 97 0 days 00:00:00.201535850 +976 98 0 days 00:00:00.359736730 +976 99 0 days 00:00:00.826159215 +976 100 0 days 00:00:00.212400426 +977 1 0 days 00:00:00.137889917 +977 2 0 days 00:00:00.105988600 +977 3 0 days 00:00:00.106611534 +977 4 0 days 00:00:00.285808860 +977 5 0 days 00:00:00.156308850 +977 6 0 days 00:00:00.147140377 +977 7 0 days 00:00:00.209973786 +977 8 0 days 00:00:00.284592870 +977 9 0 days 00:00:00.151526897 +977 10 0 days 00:00:00.114704835 +977 11 0 days 00:00:00.127279400 +977 12 0 days 00:00:00.121075128 +977 13 0 days 00:00:00.299221710 +977 14 0 days 00:00:00.151024082 +977 15 0 days 00:00:00.104751105 +977 16 0 days 00:00:00.484761710 +977 17 0 days 00:00:00.166206120 +977 18 0 days 00:00:00.876208980 +977 19 0 days 00:00:00.152891784 +977 20 0 days 00:00:00.193386316 +977 21 0 days 00:00:00.488107910 +977 22 0 days 00:00:00.269389193 +977 23 0 days 00:00:00.299615670 +977 24 0 days 00:00:00.534325202 +977 25 0 days 00:00:00.152304028 +977 26 0 days 00:00:00.122728253 +977 27 0 days 00:00:00.295290904 +977 28 0 days 00:00:00.299192315 +977 29 0 days 00:00:00.429410913 +977 30 0 days 00:00:00.138682004 +977 31 0 days 00:00:00.154038264 +977 32 0 days 00:00:00.883442300 +977 33 0 days 00:00:00.842041528 +977 34 0 days 00:00:00.154078197 +977 35 0 days 00:00:00.939471588 +977 36 0 days 00:00:00.857909404 +977 37 0 days 00:00:00.154426246 +977 38 0 days 00:00:00.107295360 +977 39 0 days 00:00:00.284154435 +977 40 0 days 00:00:00.330941355 +977 41 0 days 00:00:00.303199335 +977 42 0 days 00:00:00.341732325 +977 43 0 days 00:00:00.302027545 +977 44 0 days 00:00:00.160822956 +977 45 0 days 00:00:00.149100860 +977 46 0 days 00:00:00.321192023 +977 47 0 days 00:00:00.485776540 +977 48 0 days 00:00:00.186627980 +977 49 0 days 00:00:00.112452050 +977 50 0 days 00:00:00.300653145 +977 51 0 days 00:00:00.194803895 +977 52 0 days 00:00:00.823551060 +977 53 0 days 00:00:00.114970416 +977 54 0 days 00:00:00.127899093 +977 55 0 days 00:00:00.484974650 +977 56 0 days 00:00:00.891945296 +977 57 0 days 00:00:00.157763928 +977 58 0 days 00:00:00.508117235 +977 59 0 days 00:00:00.108431800 +977 60 0 days 00:00:00.304033325 +977 61 0 days 00:00:00.139299005 +977 62 0 days 00:00:00.310640050 +977 63 0 days 00:00:00.540587285 +977 64 0 days 00:00:00.145270516 +977 65 0 days 00:00:00.204046360 +977 66 0 days 00:00:00.129811530 +977 67 0 days 00:00:00.221190201 +977 68 0 days 00:00:00.216895975 +977 69 0 days 00:00:00.343051463 +977 70 0 days 00:00:00.205513962 +977 71 0 days 00:00:00.484517025 +977 72 0 days 00:00:00.824977345 +977 73 0 days 00:00:00.319444305 +977 74 0 days 00:00:00.125104340 +977 75 0 days 00:00:00.131405693 +977 76 0 days 00:00:00.304511485 +977 77 0 days 00:00:00.144663772 +977 78 0 days 00:00:00.161421553 +977 79 0 days 00:00:00.114996340 +977 80 0 days 00:00:00.116226624 +977 81 0 days 00:00:00.216609509 +977 82 0 days 00:00:00.112598946 +977 83 0 days 00:00:00.839010675 +977 84 0 days 00:00:00.291932970 +977 85 0 days 00:00:00.320217726 +977 86 0 days 00:00:00.840207095 +977 87 0 days 00:00:00.307520228 +977 88 0 days 00:00:00.142117296 +977 89 0 days 00:00:00.481399650 +977 90 0 days 00:00:00.274544725 +977 91 0 days 00:00:00.838240425 +977 93 0 days 00:00:00.829235980 +977 94 0 days 00:00:00.112722437 +977 95 0 days 00:00:00.811422935 +977 96 0 days 00:00:00.113979020 +977 97 0 days 00:00:00.522908967 +977 98 0 days 00:00:00.282775260 +977 99 0 days 00:00:00.195484625 +977 100 0 days 00:00:00.201777888 +978 1 0 days 00:00:00.301401135 +978 2 0 days 00:00:00.125555590 +978 3 0 days 00:00:00.502665980 +978 4 0 days 00:00:00.157109795 +978 5 0 days 00:00:00.828364510 +978 6 0 days 00:00:00.306438680 +978 7 0 days 00:00:00.165087226 +978 8 0 days 00:00:00.187908015 +978 9 0 days 00:00:00.312561980 +978 10 0 days 00:00:00.168414742 +978 11 0 days 00:00:00.484997080 +978 12 0 days 00:00:00.902035347 +978 14 0 days 00:00:00.155312605 +978 15 0 days 00:00:00.329468997 +978 16 0 days 00:00:00.301609170 +978 17 0 days 00:00:00.144125486 +978 18 0 days 00:00:00.813529955 +978 19 0 days 00:00:00.207529773 +978 20 0 days 00:00:00.504887484 +978 21 0 days 00:00:00.134606932 +978 22 0 days 00:00:00.202246580 +978 23 0 days 00:00:00.292497720 +978 24 0 days 00:00:00.142882367 +978 25 0 days 00:00:00.511664228 +978 26 0 days 00:00:00.137640145 +978 27 0 days 00:00:00.301571675 +978 28 0 days 00:00:00.149283026 +978 29 0 days 00:00:00.843000145 +978 30 0 days 00:00:00.867169260 +978 31 0 days 00:00:00.137855193 +978 32 0 days 00:00:00.199312230 +978 33 0 days 00:00:00.304663564 +978 34 0 days 00:00:00.134349964 +978 35 0 days 00:00:00.469326565 +978 36 0 days 00:00:00.133278524 +978 37 0 days 00:00:00.868974420 +978 38 0 days 00:00:00.278979986 +978 39 0 days 00:00:00.199295108 +978 40 0 days 00:00:00.123631280 +978 41 0 days 00:00:00.096947830 +978 42 0 days 00:00:00.502021040 +978 43 0 days 00:00:00.147978966 +978 44 0 days 00:00:00.490875426 +978 45 0 days 00:00:00.287977215 +978 46 0 days 00:00:00.123553700 +978 47 0 days 00:00:00.191248144 +978 48 0 days 00:00:00.110386156 +978 49 0 days 00:00:00.133287404 +978 50 0 days 00:00:00.851360140 +978 51 0 days 00:00:00.528091044 +978 52 0 days 00:00:00.312765836 +978 53 0 days 00:00:00.148728122 +978 54 0 days 00:00:00.125473787 +978 55 0 days 00:00:00.137394026 +978 56 0 days 00:00:00.813269000 +978 57 0 days 00:00:00.118799008 +978 58 0 days 00:00:00.157875976 +978 59 0 days 00:00:00.169769600 +978 60 0 days 00:00:00.829323655 +978 61 0 days 00:00:00.125747896 +978 62 0 days 00:00:00.320376584 +978 63 0 days 00:00:00.210666856 +978 64 0 days 00:00:00.151554441 +978 65 0 days 00:00:00.135109738 +978 66 0 days 00:00:00.311041300 +978 67 0 days 00:00:00.167107683 +978 68 0 days 00:00:00.147937904 +978 69 0 days 00:00:00.525062260 +978 70 0 days 00:00:00.305817740 +978 71 0 days 00:00:00.827953935 +978 72 0 days 00:00:00.303772324 +978 73 0 days 00:00:00.149270251 +978 74 0 days 00:00:00.210718645 +978 75 0 days 00:00:00.124427212 +978 76 0 days 00:00:00.143266867 +978 77 0 days 00:00:00.324270603 +978 78 0 days 00:00:00.467788145 +978 79 0 days 00:00:00.095812293 +978 80 0 days 00:00:00.172769028 +978 81 0 days 00:00:00.301185105 +978 82 0 days 00:00:00.190968233 +978 83 0 days 00:00:00.143626040 +978 84 0 days 00:00:00.149164597 +978 85 0 days 00:00:00.190801140 +978 86 0 days 00:00:00.324198086 +978 87 0 days 00:00:00.171350165 +978 88 0 days 00:00:00.127750413 +978 89 0 days 00:00:00.310206245 +978 90 0 days 00:00:00.151164061 +978 91 0 days 00:00:00.219060525 +978 92 0 days 00:00:00.136318840 +978 93 0 days 00:00:00.129719035 +978 94 0 days 00:00:00.133710226 +978 95 0 days 00:00:00.826927370 +978 96 0 days 00:00:00.189740255 +978 97 0 days 00:00:00.783438353 +978 98 0 days 00:00:00.286131980 +978 99 0 days 00:00:00.293551600 +978 100 0 days 00:00:00.116705643 +979 1 0 days 00:00:52.657682113 +979 2 0 days 00:00:04.538570480 +979 3 0 days 00:05:25.184854600 +979 5 0 days 00:00:50.582703395 +979 6 0 days 00:00:06.972581731 +980 2 0 days 00:00:53.530970315 +980 3 0 days 00:00:27.988163405 +980 4 0 days 00:00:16.537128310 +980 6 0 days 00:01:56.865014530 +980 7 0 days 00:00:11.320256456 +980 8 0 days 00:00:09.340799110 +980 10 0 days 00:00:30.323673545 +980 11 0 days 00:01:35.441540135 +980 12 0 days 00:01:49.452984330 +981 1 0 days 00:01:30.952652751 +981 3 0 days 00:00:15.480857630 +981 4 0 days 00:03:18.860541832 +982 1 0 days 00:08:35.842211068 +983 1 0 days 00:00:41.530995486 +983 2 0 days 00:04:56.148434315 +984 1 0 days 00:00:21.952655608 +984 2 0 days 00:01:01.510555234 +984 3 0 days 00:00:32.063467203 +984 4 0 days 00:00:39.088800690 +984 5 0 days 00:00:21.208069882 +984 6 0 days 00:00:36.469494651 +984 7 0 days 00:01:05.184481350 +985 1 0 days 00:00:13.238534777 +985 2 0 days 00:00:23.068289992 +985 3 0 days 00:00:12.410513200 +985 4 0 days 00:00:12.239152193 +985 5 0 days 00:00:47.019656996 +985 6 0 days 00:00:26.437583993 +985 7 0 days 00:00:21.929034557 +985 8 0 days 00:00:15.288724878 +986 1 0 days 00:03:34.875675843 +987 1 0 days 00:05:16.768095972 +988 1 0 days 00:00:32.252430626 +988 2 0 days 00:00:16.765985513 +988 3 0 days 00:00:28.744018355 +988 4 0 days 00:00:23.194719560 +988 5 0 days 00:00:18.003887406 +988 6 0 days 00:01:11.027778536 +988 7 0 days 00:01:32.893668646 +988 8 0 days 00:00:56.083086780 +988 9 0 days 00:00:16.640841333 +988 10 0 days 00:01:13.658786364 +989 1 0 days 00:00:23.062098600 +989 2 0 days 00:00:23.300285080 +989 3 0 days 00:00:14.588089976 +989 4 0 days 00:00:15.586901108 +989 6 0 days 00:00:13.256426072 +989 7 0 days 00:00:13.607328816 +989 8 0 days 00:00:44.106893031 +989 9 0 days 00:00:37.933651010 +990 1 0 days 00:01:02.953378935 +990 2 0 days 00:00:57.160362813 +990 3 0 days 00:00:22.338661331 +990 4 0 days 00:01:41.884967391 +991 1 0 days 00:00:15.617144733 +991 2 0 days 00:00:14.307659092 +991 3 0 days 00:00:11.256667326 +991 4 0 days 00:00:45.713126013 +991 5 0 days 00:00:19.636519946 +991 6 0 days 00:00:10.191865000 +991 7 0 days 00:00:16.951437973 +991 8 0 days 00:00:11.398204646 +991 9 0 days 00:00:45.645379833 +991 10 0 days 00:00:44.796414440 +991 11 0 days 00:00:19.770637266 +991 13 0 days 00:00:45.541845133 +991 14 0 days 00:00:11.003394960 +991 15 0 days 00:00:15.009758104 +991 16 0 days 00:00:13.351740360 +991 17 0 days 00:00:34.310382140 +992 1 0 days 00:00:31.537181420 +992 2 0 days 00:00:15.584451965 +992 3 0 days 00:00:33.562080485 +992 4 0 days 00:00:14.801656420 +992 5 0 days 00:00:19.545036553 +992 6 0 days 00:00:13.872650673 +992 7 0 days 00:00:32.218767286 +992 8 0 days 00:00:23.090336008 +992 9 0 days 00:00:21.716606000 +992 10 0 days 00:00:31.731661426 +992 11 0 days 00:00:22.240357172 +992 12 0 days 00:00:31.711036186 +992 13 0 days 00:00:13.842186373 +992 14 0 days 00:00:16.001121428 +992 15 0 days 00:00:13.179798206 +992 16 0 days 00:00:30.424347386 +992 17 0 days 00:00:13.181220913 +992 18 0 days 00:00:29.919568133 +992 19 0 days 00:00:13.161158033 +992 20 0 days 00:00:32.188542726 +992 21 0 days 00:00:19.944225846 +992 22 0 days 00:00:33.519193900 +993 1 0 days 00:00:15.278138380 +993 2 0 days 00:00:31.958550086 +993 3 0 days 00:00:31.942449426 +993 4 0 days 00:00:14.964491490 +993 5 0 days 00:00:38.033501840 +993 6 0 days 00:00:14.970877915 +993 7 0 days 00:00:34.511971230 +993 8 0 days 00:00:34.507531480 +993 9 0 days 00:00:13.410277986 +993 10 0 days 00:00:19.769567740 +993 11 0 days 00:00:22.760381928 +993 12 0 days 00:00:31.888096720 +993 13 0 days 00:00:15.956076905 +993 14 0 days 00:00:15.184489345 +993 15 0 days 00:00:35.723916570 +993 16 0 days 00:00:14.317686966 +993 17 0 days 00:00:15.982236660 +993 18 0 days 00:00:31.888930606 +993 19 0 days 00:00:15.316822795 +993 20 0 days 00:00:38.831452576 +994 1 0 days 00:00:12.064848213 +994 2 0 days 00:00:19.875319593 +994 3 0 days 00:00:19.860701946 +994 4 0 days 00:00:19.863328946 +994 5 0 days 00:00:20.452800766 +994 6 0 days 00:00:08.920771246 +994 7 0 days 00:00:21.377571190 +994 8 0 days 00:00:20.182152886 +994 9 0 days 00:00:19.852083973 +994 10 0 days 00:00:08.699139540 +994 11 0 days 00:00:20.975771315 +994 12 0 days 00:00:08.440161800 +994 13 0 days 00:00:08.486770886 +994 14 0 days 00:00:11.721957700 +994 15 0 days 00:00:09.464226145 +994 16 0 days 00:00:08.428972826 +994 17 0 days 00:00:08.290659593 +994 18 0 days 00:00:12.156148826 +994 19 0 days 00:00:08.270921346 +994 20 0 days 00:00:12.487675900 +994 21 0 days 00:00:12.190725613 +994 22 0 days 00:00:08.328703166 +994 23 0 days 00:00:20.185346386 +994 24 0 days 00:00:09.451265445 +994 25 0 days 00:00:13.080985510 +994 26 0 days 00:00:13.264079520 +994 27 0 days 00:00:08.714206633 +994 28 0 days 00:00:13.286992910 +994 29 0 days 00:00:12.165136746 +994 30 0 days 00:00:08.323613453 +994 31 0 days 00:00:19.856612360 +994 32 0 days 00:00:13.368592975 +994 33 0 days 00:00:08.433148666 +994 34 0 days 00:00:20.403177720 +994 35 0 days 00:00:08.997455800 +994 36 0 days 00:00:10.683690513 +994 37 0 days 00:00:09.539969070 +994 38 0 days 00:00:18.792950620 +994 39 0 days 00:00:12.046876800 +994 40 0 days 00:00:21.000232385 +995 1 0 days 00:00:19.251612686 +995 2 0 days 00:00:21.087646905 +995 3 0 days 00:00:19.264476200 +995 4 0 days 00:00:09.317815660 +995 5 0 days 00:00:18.858332486 +995 6 0 days 00:00:13.747922935 +995 7 0 days 00:00:11.961695086 +995 8 0 days 00:00:25.366362511 +995 9 0 days 00:00:18.854414073 +995 10 0 days 00:00:10.617943472 +995 11 0 days 00:00:19.942274420 +995 12 0 days 00:00:23.756123912 +995 13 0 days 00:00:12.307282086 +995 14 0 days 00:00:09.377054170 +995 15 0 days 00:00:18.869767186 +995 16 0 days 00:00:13.808012670 +995 17 0 days 00:00:13.372887535 +995 18 0 days 00:00:19.945522066 +995 19 0 days 00:00:11.761465720 +995 20 0 days 00:00:08.889839493 +995 21 0 days 00:00:19.960092473 +995 22 0 days 00:00:19.279832226 +995 23 0 days 00:00:19.947838600 +995 24 0 days 00:00:10.569367988 +995 25 0 days 00:00:12.129585353 +995 26 0 days 00:00:19.976670766 +995 27 0 days 00:00:13.165973030 +995 28 0 days 00:00:11.775896453 +995 29 0 days 00:00:11.971949473 +995 30 0 days 00:00:09.329773235 +995 31 0 days 00:00:19.290537520 +996 1 0 days 00:00:29.993744526 +996 2 0 days 00:00:18.999705800 +996 3 0 days 00:00:30.400906600 +996 4 0 days 00:00:19.906966173 +996 5 0 days 00:00:22.324507460 +996 6 0 days 00:00:18.810789633 +996 7 0 days 00:00:13.303326120 +996 8 0 days 00:00:33.598148565 +996 9 0 days 00:00:30.037934646 +996 10 0 days 00:00:19.880942220 +996 11 0 days 00:00:19.511441900 +996 12 0 days 00:00:37.595545280 +996 13 0 days 00:00:29.849304180 +996 14 0 days 00:00:32.222299800 +996 15 0 days 00:00:31.803288486 +996 16 0 days 00:00:15.091458450 +996 17 0 days 00:00:30.432148393 +996 18 0 days 00:00:29.797868273 +996 19 0 days 00:00:33.597425295 +996 20 0 days 00:00:19.889145560 +997 1 0 days 00:00:13.191268855 +997 2 0 days 00:00:12.402958626 +997 3 0 days 00:00:08.249978906 +997 4 0 days 00:00:11.808691240 +997 5 0 days 00:00:20.026543153 +997 6 0 days 00:00:20.377041133 +997 7 0 days 00:00:13.878548425 +997 8 0 days 00:00:09.224482580 +997 9 0 days 00:00:22.419421070 +997 10 0 days 00:00:20.048449286 +997 11 0 days 00:00:11.812526813 +997 12 0 days 00:00:15.048407916 +997 13 0 days 00:00:11.828794893 +997 14 0 days 00:00:18.957397746 +997 15 0 days 00:00:09.427583465 +997 16 0 days 00:00:18.954821286 +997 17 0 days 00:00:21.190717335 +997 18 0 days 00:00:13.215612790 +997 19 0 days 00:00:22.440018415 +997 20 0 days 00:00:13.500455470 +997 21 0 days 00:00:11.831888766 +997 22 0 days 00:00:09.240272825 +997 23 0 days 00:00:09.333909165 +997 24 0 days 00:00:20.051814160 +997 25 0 days 00:00:18.926962193 +997 26 0 days 00:00:08.283553726 +997 27 0 days 00:00:20.905923206 +997 28 0 days 00:00:19.867021646 +997 29 0 days 00:00:14.774086320 +997 30 0 days 00:00:12.407762300 +997 31 0 days 00:00:08.666284426 +997 32 0 days 00:00:11.809920040 +997 33 0 days 00:00:20.038557880 +997 34 0 days 00:00:09.213207095 +997 35 0 days 00:00:12.402082653 +998 1 0 days 00:00:05.195420766 +998 2 0 days 00:00:04.957556900 +998 3 0 days 00:00:05.136305195 +998 4 0 days 00:00:05.128681313 +998 5 0 days 00:00:07.894180360 +998 6 0 days 00:00:16.301399935 +998 7 0 days 00:00:07.377818015 +998 8 0 days 00:00:14.653875246 +998 9 0 days 00:00:07.791545380 +998 10 0 days 00:00:08.333822810 +998 11 0 days 00:00:07.760310760 +998 12 0 days 00:00:16.004514045 +998 13 0 days 00:00:15.928543290 +998 14 0 days 00:00:13.686224153 +998 15 0 days 00:00:08.386421420 +998 16 0 days 00:00:08.962892392 +998 17 0 days 00:00:13.752311960 +998 18 0 days 00:00:06.107080635 +998 19 0 days 00:00:13.930559593 +998 20 0 days 00:00:15.964755015 +998 21 0 days 00:00:13.839861540 +998 22 0 days 00:00:13.629884700 +998 23 0 days 00:00:08.475813635 +998 24 0 days 00:00:04.431656286 +998 25 0 days 00:00:13.790486026 +998 26 0 days 00:00:06.003899033 +998 27 0 days 00:00:15.555759225 +998 28 0 days 00:00:05.332577680 +998 29 0 days 00:00:04.732128253 +998 30 0 days 00:00:17.298944488 +998 31 0 days 00:00:08.208258646 +998 32 0 days 00:00:04.585120240 +998 33 0 days 00:00:14.651962266 +998 34 0 days 00:00:14.611575133 +998 35 0 days 00:00:04.611255226 +998 36 0 days 00:00:07.543267360 +998 37 0 days 00:00:05.166347410 +998 38 0 days 00:00:07.704733146 +998 39 0 days 00:00:14.217201300 +998 40 0 days 00:00:05.772738420 +998 41 0 days 00:00:14.350287113 +998 42 0 days 00:00:16.169575210 +998 43 0 days 00:00:13.687938600 +998 44 0 days 00:00:08.015047566 +998 45 0 days 00:00:15.935433620 +998 46 0 days 00:00:14.457836053 +998 47 0 days 00:00:14.036109880 +998 48 0 days 00:00:07.902814466 +998 49 0 days 00:00:08.832657250 +999 1 0 days 00:00:02.337182340 +999 2 0 days 00:00:07.178274713 +999 3 0 days 00:00:04.288345305 +999 4 0 days 00:00:07.478443360 +999 5 0 days 00:00:03.059763880 +999 6 0 days 00:00:07.147822106 +999 7 0 days 00:00:07.758298555 +999 8 0 days 00:00:04.036247720 +999 9 0 days 00:00:02.868333480 +999 10 0 days 00:00:08.076513545 +999 11 0 days 00:00:02.567956130 +999 12 0 days 00:00:02.629310325 +999 13 0 days 00:00:05.450371420 +999 14 0 days 00:00:04.314850890 +999 15 0 days 00:00:03.477189140 +999 16 0 days 00:00:02.920257585 +999 17 0 days 00:00:05.048600805 +999 18 0 days 00:00:08.065622370 +999 19 0 days 00:00:03.903336520 +999 20 0 days 00:00:06.978281380 +999 21 0 days 00:00:02.554295530 +999 22 0 days 00:00:07.027770033 +999 23 0 days 00:00:07.808322150 +999 24 0 days 00:00:02.795047385 +999 25 0 days 00:00:04.011541100 +999 26 0 days 00:00:07.066349060 +999 27 0 days 00:00:03.088140715 +999 28 0 days 00:00:04.700330456 +999 29 0 days 00:00:02.414262080 +999 30 0 days 00:00:04.699974352 +999 31 0 days 00:00:03.980373680 +999 32 0 days 00:00:04.515102750 +999 33 0 days 00:00:04.427954865 +999 34 0 days 00:00:03.961412560 +999 35 0 days 00:00:08.153096965 +999 36 0 days 00:00:03.957503480 +999 37 0 days 00:00:05.128946540 +999 38 0 days 00:00:08.073497070 +999 39 0 days 00:00:04.402669790 +999 40 0 days 00:00:07.791389025 +999 41 0 days 00:00:03.096673650 +999 42 0 days 00:00:08.114177970 +999 43 0 days 00:00:08.057077295 +999 44 0 days 00:00:07.819659185 +999 45 0 days 00:00:04.333623230 +999 46 0 days 00:00:06.704152326 +999 47 0 days 00:00:02.471302673 +999 48 0 days 00:00:03.049202480 +999 49 0 days 00:00:04.019919060 +999 50 0 days 00:00:04.390687765 +999 51 0 days 00:00:08.253363205 +999 52 0 days 00:00:02.638917095 +999 53 0 days 00:00:02.618768490 +999 54 0 days 00:00:07.141659560 +999 55 0 days 00:00:04.336117760 +999 56 0 days 00:00:03.933675773 +999 57 0 days 00:00:03.671451745 +999 58 0 days 00:00:06.977095560 +999 59 0 days 00:00:07.426125006 +999 60 0 days 00:00:02.514336053 +999 61 0 days 00:00:04.390878060 +999 62 0 days 00:00:03.979535100 +999 63 0 days 00:00:02.543178493 +999 64 0 days 00:00:02.620525670 +999 65 0 days 00:00:04.029098586 +999 66 0 days 00:00:07.110442040 +999 67 0 days 00:00:02.839548915 +999 68 0 days 00:00:04.063938993 +999 69 0 days 00:00:03.956343740 +999 70 0 days 00:00:02.756203400 +999 71 0 days 00:00:03.267781360 +999 72 0 days 00:00:02.262821406 +999 73 0 days 00:00:02.473891146 +999 74 0 days 00:00:04.314915215 +999 75 0 days 00:00:07.222232613 +999 76 0 days 00:00:04.437021840 +999 77 0 days 00:00:04.542602126 +999 78 0 days 00:00:04.112748053 +999 79 0 days 00:00:02.548379700 +999 80 0 days 00:00:07.251803006 +999 81 0 days 00:00:03.317755806 +999 82 0 days 00:00:02.628786560 +999 83 0 days 00:00:04.792708135 +999 84 0 days 00:00:07.406973386 +999 85 0 days 00:00:02.866452795 +999 86 0 days 00:00:02.783852590 +999 87 0 days 00:00:02.635938905 +999 88 0 days 00:00:02.519701533 +999 89 0 days 00:00:08.259937505 +999 90 0 days 00:00:04.894324910 +999 91 0 days 00:00:04.295595740 +999 92 0 days 00:00:04.569827520 +999 93 0 days 00:00:04.063157233 +999 94 0 days 00:00:02.686649005 +999 95 0 days 00:00:08.198374985 +999 96 0 days 00:00:07.339700466 +999 97 0 days 00:00:07.656562066 +999 98 0 days 00:00:02.806360506 +999 99 0 days 00:00:08.211762565 +999 100 0 days 00:00:04.159644800 +1000 1 0 days 00:00:15.229990913 +1000 2 0 days 00:00:14.802759373 +1000 3 0 days 00:00:14.426881360 +1000 4 0 days 00:00:14.387110926 +1000 5 0 days 00:00:04.474250126 +1000 6 0 days 00:00:07.496958613 +1000 7 0 days 00:00:07.295855920 +1000 8 0 days 00:00:09.177990520 +1000 9 0 days 00:00:08.120571366 +1000 10 0 days 00:00:14.529569086 +1000 11 0 days 00:00:14.700876640 +1000 12 0 days 00:00:04.816515880 +1000 13 0 days 00:00:15.241794900 +1000 14 0 days 00:00:07.678360746 +1000 15 0 days 00:00:14.631146806 +1000 16 0 days 00:00:13.703373720 +1000 17 0 days 00:00:08.157242393 +1000 18 0 days 00:00:04.985511846 +1000 19 0 days 00:00:07.501860186 +1000 20 0 days 00:00:04.602598426 +1000 21 0 days 00:00:08.574972346 +1000 22 0 days 00:00:07.848018106 +1000 23 0 days 00:00:04.321021366 +1000 24 0 days 00:00:07.663106130 +1000 25 0 days 00:00:08.914483693 +1000 26 0 days 00:00:07.625418920 +1000 27 0 days 00:00:14.251125353 +1000 28 0 days 00:00:04.768964880 +1000 29 0 days 00:00:05.296450606 +1000 30 0 days 00:00:14.864652986 +1000 31 0 days 00:00:06.367888893 +1000 32 0 days 00:00:15.945634166 +1000 33 0 days 00:00:14.385857486 +1000 34 0 days 00:00:14.664611173 +1000 35 0 days 00:00:14.549686360 +1000 36 0 days 00:00:13.762032713 +1000 37 0 days 00:00:07.601727340 +1000 38 0 days 00:00:14.901981560 +1000 39 0 days 00:00:07.669094013 +1000 40 0 days 00:00:04.604856326 +1000 41 0 days 00:00:05.867805446 +1000 42 0 days 00:00:07.924298173 +1000 43 0 days 00:00:08.016263020 +1000 44 0 days 00:00:14.652954540 +1000 45 0 days 00:00:09.655056553 +1000 46 0 days 00:00:13.993184946 +1000 47 0 days 00:00:05.190629140 +1000 48 0 days 00:00:06.958532953 +1000 49 0 days 00:00:14.713478520 +1000 50 0 days 00:00:07.650798566 +1000 51 0 days 00:00:07.330491353 +1000 52 0 days 00:00:13.848676426 +1000 53 0 days 00:00:06.868166746 +1000 54 0 days 00:00:04.697206340 +1000 55 0 days 00:00:07.939926480 +1000 56 0 days 00:00:06.631649340 +1000 57 0 days 00:00:04.508268673 +1000 58 0 days 00:00:05.522971820 +1000 59 0 days 00:00:04.283022586 +1000 60 0 days 00:00:07.456893826 +1000 61 0 days 00:00:07.732069780 +1001 1 0 days 00:00:03.942331000 +1001 2 0 days 00:00:07.369269100 +1001 3 0 days 00:00:03.996326500 +1001 4 0 days 00:00:03.315859986 +1001 5 0 days 00:00:02.779804333 +1001 6 0 days 00:00:03.926395933 +1001 7 0 days 00:00:03.796844580 +1001 8 0 days 00:00:03.812491400 +1001 9 0 days 00:00:02.608596380 +1001 10 0 days 00:00:07.288985760 +1001 11 0 days 00:00:07.407253880 +1001 12 0 days 00:00:04.166549873 +1001 13 0 days 00:00:07.536437473 +1001 14 0 days 00:00:03.869058426 +1001 15 0 days 00:00:02.719481986 +1001 16 0 days 00:00:02.487703506 +1001 17 0 days 00:00:07.275065413 +1001 18 0 days 00:00:02.718321760 +1001 19 0 days 00:00:07.043820093 +1001 20 0 days 00:00:07.061998906 +1001 21 0 days 00:00:05.204010033 +1001 22 0 days 00:00:07.139988940 +1001 23 0 days 00:00:08.099089100 +1001 24 0 days 00:00:04.050782520 +1001 25 0 days 00:00:03.045229940 +1001 26 0 days 00:00:02.715521613 +1001 27 0 days 00:00:04.110256193 +1001 28 0 days 00:00:07.083322780 +1001 29 0 days 00:00:07.678787833 +1001 30 0 days 00:00:02.643759973 +1001 31 0 days 00:00:03.877144180 +1001 32 0 days 00:00:04.878784333 +1001 33 0 days 00:00:02.454306906 +1001 34 0 days 00:00:03.895265666 +1001 35 0 days 00:00:03.911800120 +1001 36 0 days 00:00:07.303710260 +1001 37 0 days 00:00:07.324956793 +1001 38 0 days 00:00:02.334886080 +1001 39 0 days 00:00:07.670996406 +1001 40 0 days 00:00:04.949726453 +1001 41 0 days 00:00:02.284790353 +1001 42 0 days 00:00:02.412346000 +1001 43 0 days 00:00:03.867086926 +1001 44 0 days 00:00:07.886898566 +1001 45 0 days 00:00:03.762371060 +1001 46 0 days 00:00:03.996024753 +1001 47 0 days 00:00:02.382677573 +1001 48 0 days 00:00:02.331499733 +1001 49 0 days 00:00:05.039164013 +1001 50 0 days 00:00:02.823713380 +1001 51 0 days 00:00:02.444197705 +1001 52 0 days 00:00:04.591221940 +1001 53 0 days 00:00:03.998732193 +1001 54 0 days 00:00:04.435058680 +1001 55 0 days 00:00:07.266755166 +1001 56 0 days 00:00:04.042381240 +1001 57 0 days 00:00:07.023701020 +1001 58 0 days 00:00:07.349169060 +1001 59 0 days 00:00:03.853785246 +1001 60 0 days 00:00:02.534260193 +1001 61 0 days 00:00:02.689202053 +1001 62 0 days 00:00:02.281510213 +1001 63 0 days 00:00:04.305477573 +1001 64 0 days 00:00:04.202586575 +1001 65 0 days 00:00:07.130770033 +1001 66 0 days 00:00:04.107858840 +1001 67 0 days 00:00:03.882798446 +1001 68 0 days 00:00:07.709758720 +1001 69 0 days 00:00:07.015032053 +1001 70 0 days 00:00:04.725436800 +1001 71 0 days 00:00:07.309483266 +1001 72 0 days 00:00:04.894517133 +1001 73 0 days 00:00:07.644024480 +1001 74 0 days 00:00:02.697030860 +1001 75 0 days 00:00:02.409838640 +1001 76 0 days 00:00:07.418994853 +1001 77 0 days 00:00:07.080244973 +1001 78 0 days 00:00:03.933208433 +1001 79 0 days 00:00:02.455698833 +1001 80 0 days 00:00:04.004911420 +1001 81 0 days 00:00:07.590899766 +1001 82 0 days 00:00:02.563092386 +1001 83 0 days 00:00:07.049322946 +1001 84 0 days 00:00:07.692606073 +1001 85 0 days 00:00:03.061156615 +1001 86 0 days 00:00:07.650458193 +1001 87 0 days 00:00:02.636195993 +1001 88 0 days 00:00:07.255519800 +1001 89 0 days 00:00:04.277961120 +1001 90 0 days 00:00:03.775235266 +1001 91 0 days 00:00:07.127002146 +1001 92 0 days 00:00:04.119468166 +1001 93 0 days 00:00:02.928472893 +1001 94 0 days 00:00:04.558579386 +1001 95 0 days 00:00:04.332912806 +1001 96 0 days 00:00:03.023866720 +1001 97 0 days 00:00:07.674554260 +1001 98 0 days 00:00:07.835369033 +1001 99 0 days 00:00:03.902888693 +1001 100 0 days 00:00:02.785086993 +1002 1 0 days 00:00:04.317379826 +1002 2 0 days 00:00:07.509685840 +1002 3 0 days 00:00:04.476581920 +1002 4 0 days 00:00:07.514892426 +1002 5 0 days 00:00:04.514771506 +1002 6 0 days 00:00:13.585483580 +1002 7 0 days 00:00:07.529801953 +1002 8 0 days 00:00:06.531821520 +1002 9 0 days 00:00:13.998103280 +1002 10 0 days 00:00:04.435895613 +1002 11 0 days 00:00:05.076099120 +1002 12 0 days 00:00:07.450284613 +1002 13 0 days 00:00:13.622663533 +1002 14 0 days 00:00:04.440812713 +1002 15 0 days 00:00:13.589316786 +1002 16 0 days 00:00:07.432650686 +1002 17 0 days 00:00:04.194143966 +1002 18 0 days 00:00:13.657549153 +1002 19 0 days 00:00:05.128415753 +1002 20 0 days 00:00:04.470046953 +1002 21 0 days 00:00:04.235808813 +1002 22 0 days 00:00:13.482114066 +1002 23 0 days 00:00:13.353191153 +1002 24 0 days 00:00:04.208093533 +1002 25 0 days 00:00:07.682214693 +1002 26 0 days 00:00:13.412776900 +1002 27 0 days 00:00:07.336351365 +1002 28 0 days 00:00:04.196644720 +1002 29 0 days 00:00:13.756270633 +1002 30 0 days 00:00:04.911124040 +1002 31 0 days 00:00:05.726627685 +1002 32 0 days 00:00:06.145507760 +1002 33 0 days 00:00:13.630756006 +1002 34 0 days 00:00:07.209465780 +1002 35 0 days 00:00:05.040139305 +1002 36 0 days 00:00:05.441924960 +1002 37 0 days 00:00:08.231633580 +1002 38 0 days 00:00:06.817232930 +1002 39 0 days 00:00:13.905030813 +1002 40 0 days 00:00:15.221775515 +1002 41 0 days 00:00:04.965846626 +1002 42 0 days 00:00:14.297532913 +1002 43 0 days 00:00:07.542483966 +1002 44 0 days 00:00:13.333335306 +1002 45 0 days 00:00:13.476502953 +1002 46 0 days 00:00:05.192587025 +1002 47 0 days 00:00:06.948391946 +1002 48 0 days 00:00:04.219975946 +1002 49 0 days 00:00:07.311981213 +1002 50 0 days 00:00:05.197643205 +1002 51 0 days 00:00:13.811898260 +1002 52 0 days 00:00:14.401310113 +1002 53 0 days 00:00:07.411137613 +1002 54 0 days 00:00:07.941603006 +1002 55 0 days 00:00:05.146053193 +1002 56 0 days 00:00:07.420218646 +1002 57 0 days 00:00:13.689552806 +1002 58 0 days 00:00:04.907550673 +1002 59 0 days 00:00:04.685524853 +1002 60 0 days 00:00:04.494568906 +1002 61 0 days 00:00:07.531693920 +1002 62 0 days 00:00:05.193865160 +1002 63 0 days 00:00:15.782668675 +1002 64 0 days 00:00:07.528237673 +1002 65 0 days 00:00:05.248035800 +1002 66 0 days 00:00:04.447313266 +1002 67 0 days 00:00:13.709278960 +1003 1 0 days 00:00:09.035954665 +1003 2 0 days 00:00:08.656680306 +1003 3 0 days 00:00:07.799215153 +1003 4 0 days 00:00:14.371412340 +1003 5 0 days 00:00:08.609699840 +1003 6 0 days 00:00:17.975593763 +1003 7 0 days 00:00:05.318659126 +1003 8 0 days 00:00:13.612073440 +1003 9 0 days 00:00:08.024464586 +1003 10 0 days 00:00:05.146961880 +1003 11 0 days 00:00:07.819187166 +1003 12 0 days 00:00:14.342358960 +1003 13 0 days 00:00:08.010542295 +1003 14 0 days 00:00:04.779708793 +1003 15 0 days 00:00:07.791506880 +1003 16 0 days 00:00:15.080549920 +1003 17 0 days 00:00:07.862779106 +1003 18 0 days 00:00:05.823069020 +1003 19 0 days 00:00:07.358894473 +1003 20 0 days 00:00:05.420272120 +1003 21 0 days 00:00:08.283584766 +1003 22 0 days 00:00:07.193316660 +1003 23 0 days 00:00:06.405262735 +1003 24 0 days 00:00:08.646956595 +1003 25 0 days 00:00:06.111048940 +1003 26 0 days 00:00:05.468476886 +1003 27 0 days 00:00:09.665116093 +1003 28 0 days 00:00:05.309673085 +1003 29 0 days 00:00:07.832006746 +1003 30 0 days 00:00:13.999540286 +1003 31 0 days 00:00:10.319229940 +1003 32 0 days 00:00:07.922894333 +1003 33 0 days 00:00:08.119387726 +1003 34 0 days 00:00:05.427983233 +1003 35 0 days 00:00:14.197104953 +1003 36 0 days 00:00:04.925964740 +1003 37 0 days 00:00:07.732687833 +1003 38 0 days 00:00:08.160227766 +1003 39 0 days 00:00:08.161134220 +1003 40 0 days 00:00:06.275826193 +1003 41 0 days 00:00:17.017426332 +1003 42 0 days 00:00:07.744434173 +1003 43 0 days 00:00:05.678844460 +1003 44 0 days 00:00:05.200509365 +1003 45 0 days 00:00:04.708518913 +1003 46 0 days 00:00:04.588863406 +1003 47 0 days 00:00:05.877351140 +1003 48 0 days 00:00:14.126351120 +1003 49 0 days 00:00:10.308816790 +1003 50 0 days 00:00:15.677572490 +1003 51 0 days 00:00:08.232864390 +1003 52 0 days 00:00:05.092082032 +1003 53 0 days 00:00:05.679792570 +1003 54 0 days 00:00:04.431849113 +1003 55 0 days 00:00:04.298037360 +1003 56 0 days 00:00:09.402597436 +1003 57 0 days 00:00:14.634462006 +1003 58 0 days 00:00:08.490563775 +1004 1 0 days 00:00:03.854985046 +1004 2 0 days 00:00:02.219446833 +1004 3 0 days 00:00:02.276977340 +1004 4 0 days 00:00:03.622702493 +1004 5 0 days 00:00:02.228763026 +1004 6 0 days 00:00:02.213116346 +1004 7 0 days 00:00:06.851457506 +1004 8 0 days 00:00:03.649235113 +1004 9 0 days 00:00:04.327563746 +1004 10 0 days 00:00:02.222588813 +1004 11 0 days 00:00:03.753215673 +1004 12 0 days 00:00:03.783511713 +1004 13 0 days 00:00:06.861189653 +1004 14 0 days 00:00:02.396157166 +1004 15 0 days 00:00:02.332221480 +1004 16 0 days 00:00:03.170922506 +1004 17 0 days 00:00:06.984091840 +1004 18 0 days 00:00:04.334696406 +1004 19 0 days 00:00:02.243936160 +1004 20 0 days 00:00:03.680680426 +1004 21 0 days 00:00:03.788650220 +1004 22 0 days 00:00:06.866692426 +1004 23 0 days 00:00:06.843504386 +1004 24 0 days 00:00:06.990654886 +1004 25 0 days 00:00:06.846680346 +1004 26 0 days 00:00:02.712731486 +1004 27 0 days 00:00:02.649723695 +1004 28 0 days 00:00:03.711146633 +1004 29 0 days 00:00:03.813792860 +1004 30 0 days 00:00:02.954787980 +1004 31 0 days 00:00:02.269448466 +1004 32 0 days 00:00:07.068501373 +1004 33 0 days 00:00:04.395761924 +1004 34 0 days 00:00:04.044554120 +1004 35 0 days 00:00:03.592832213 +1004 36 0 days 00:00:04.465470448 +1004 37 0 days 00:00:04.204121735 +1004 38 0 days 00:00:02.231935240 +1004 39 0 days 00:00:07.199814686 +1004 40 0 days 00:00:02.610401725 +1004 41 0 days 00:00:06.980837606 +1004 42 0 days 00:00:06.852660106 +1004 43 0 days 00:00:03.953049733 +1004 44 0 days 00:00:06.987582860 +1004 45 0 days 00:00:02.247425246 +1004 46 0 days 00:00:03.787555240 +1004 47 0 days 00:00:03.932488093 +1004 48 0 days 00:00:02.225540640 +1004 49 0 days 00:00:04.361434733 +1004 50 0 days 00:00:06.803196853 +1004 51 0 days 00:00:03.692276180 +1004 52 0 days 00:00:03.739378833 +1004 53 0 days 00:00:04.157121790 +1004 54 0 days 00:00:02.672373595 +1004 55 0 days 00:00:07.039832286 +1004 56 0 days 00:00:08.110646685 +1004 57 0 days 00:00:03.930876133 +1004 58 0 days 00:00:02.230738953 +1004 59 0 days 00:00:03.787304480 +1004 60 0 days 00:00:06.675870706 +1004 61 0 days 00:00:03.418691466 +1004 62 0 days 00:00:07.645366385 +1004 63 0 days 00:00:06.193765920 +1004 64 0 days 00:00:02.258349786 +1004 65 0 days 00:00:03.859915820 +1004 66 0 days 00:00:02.533359840 +1004 67 0 days 00:00:03.703273833 +1004 68 0 days 00:00:02.632048060 +1004 69 0 days 00:00:03.754129246 +1004 70 0 days 00:00:04.150883685 +1004 71 0 days 00:00:02.368968246 +1004 72 0 days 00:00:04.010185806 +1004 73 0 days 00:00:04.292199706 +1004 74 0 days 00:00:03.821050593 +1004 75 0 days 00:00:06.810937753 +1004 76 0 days 00:00:07.026875720 +1004 77 0 days 00:00:03.783064360 +1004 78 0 days 00:00:03.778058440 +1004 79 0 days 00:00:03.711867640 +1004 80 0 days 00:00:06.816144920 +1004 81 0 days 00:00:03.987267793 +1004 82 0 days 00:00:03.270614946 +1004 83 0 days 00:00:06.265499960 +1004 84 0 days 00:00:04.292304150 +1004 85 0 days 00:00:06.823145766 +1004 86 0 days 00:00:03.705452920 +1004 87 0 days 00:00:07.766376380 +1004 88 0 days 00:00:03.686151460 +1004 89 0 days 00:00:06.820739013 +1004 90 0 days 00:00:04.672813786 +1004 91 0 days 00:00:04.124706953 +1004 92 0 days 00:00:02.379178380 +1004 93 0 days 00:00:07.835314130 +1004 94 0 days 00:00:02.363547133 +1004 95 0 days 00:00:03.536498020 +1004 96 0 days 00:00:02.475994873 +1004 97 0 days 00:00:04.710423073 +1004 98 0 days 00:00:07.874815775 +1004 99 0 days 00:00:07.896312340 +1004 100 0 days 00:00:07.019970713 +1005 1 0 days 00:00:04.989887610 +1005 2 0 days 00:00:03.125485966 +1005 3 0 days 00:00:02.697607206 +1005 4 0 days 00:00:02.454505800 +1005 5 0 days 00:00:08.280813786 +1005 6 0 days 00:00:04.953416850 +1005 7 0 days 00:00:03.997630573 +1005 8 0 days 00:00:07.325387473 +1005 9 0 days 00:00:05.434253705 +1005 10 0 days 00:00:02.754853645 +1005 11 0 days 00:00:08.127750810 +1005 12 0 days 00:00:02.976862375 +1005 13 0 days 00:00:04.708667120 +1005 14 0 days 00:00:04.043754666 +1005 15 0 days 00:00:02.553961540 +1005 16 0 days 00:00:03.812150415 +1005 17 0 days 00:00:04.439513585 +1005 18 0 days 00:00:04.154370410 +1005 19 0 days 00:00:06.907684006 +1005 20 0 days 00:00:04.192149640 +1005 21 0 days 00:00:03.194407925 +1005 22 0 days 00:00:07.104776866 +1005 23 0 days 00:00:02.541601726 +1005 24 0 days 00:00:04.753968740 +1005 25 0 days 00:00:07.122274886 +1005 26 0 days 00:00:08.128070445 +1005 27 0 days 00:00:07.620955213 +1005 28 0 days 00:00:07.643844780 +1005 29 0 days 00:00:03.142542390 +1005 30 0 days 00:00:04.709865050 +1005 31 0 days 00:00:07.246630713 +1005 32 0 days 00:00:03.089235560 +1005 33 0 days 00:00:02.837927980 +1005 34 0 days 00:00:03.972677520 +1005 35 0 days 00:00:04.280229506 +1005 36 0 days 00:00:04.110050795 +1005 37 0 days 00:00:02.863171575 +1005 38 0 days 00:00:07.246180380 +1005 39 0 days 00:00:04.058863133 +1005 40 0 days 00:00:02.900208905 +1005 41 0 days 00:00:07.112386460 +1005 42 0 days 00:00:08.531340690 +1005 43 0 days 00:00:07.261488346 +1005 44 0 days 00:00:02.816415253 +1005 45 0 days 00:00:02.485486273 +1005 46 0 days 00:00:02.656908460 +1005 47 0 days 00:00:04.470248180 +1005 48 0 days 00:00:02.507147453 +1005 49 0 days 00:00:04.532337515 +1005 50 0 days 00:00:02.549627260 +1005 51 0 days 00:00:04.414615155 +1005 52 0 days 00:00:09.086199260 +1005 53 0 days 00:00:02.713443440 +1005 54 0 days 00:00:04.069292780 +1005 55 0 days 00:00:07.284474866 +1005 56 0 days 00:00:03.970236533 +1005 57 0 days 00:00:04.648967730 +1005 58 0 days 00:00:04.003625933 +1005 59 0 days 00:00:07.243441293 +1005 60 0 days 00:00:02.837818920 +1005 61 0 days 00:00:07.158013453 +1005 62 0 days 00:00:07.963698745 +1005 63 0 days 00:00:02.723925240 +1005 64 0 days 00:00:07.123621826 +1005 65 0 days 00:00:04.473771325 +1005 66 0 days 00:00:07.360924353 +1005 67 0 days 00:00:04.017079300 +1005 68 0 days 00:00:05.233323990 +1005 69 0 days 00:00:04.074129040 +1005 70 0 days 00:00:03.097560933 +1005 71 0 days 00:00:04.486942090 +1005 72 0 days 00:00:04.476669260 +1005 73 0 days 00:00:03.062548425 +1005 74 0 days 00:00:04.863402100 +1005 75 0 days 00:00:08.469943136 +1005 76 0 days 00:00:02.877993913 +1005 77 0 days 00:00:02.884351806 +1005 78 0 days 00:00:04.085692020 +1005 79 0 days 00:00:02.489324286 +1005 80 0 days 00:00:02.971297840 +1005 81 0 days 00:00:07.150345200 +1005 82 0 days 00:00:03.176686435 +1005 83 0 days 00:00:07.246629240 +1005 84 0 days 00:00:04.597968825 +1005 85 0 days 00:00:04.537082360 +1005 86 0 days 00:00:02.837219205 +1005 87 0 days 00:00:04.496009840 +1005 88 0 days 00:00:04.575861866 +1005 89 0 days 00:00:03.002119500 +1005 90 0 days 00:00:04.022587873 +1005 91 0 days 00:00:08.084031715 +1005 92 0 days 00:00:04.217386206 +1005 93 0 days 00:00:02.713175386 +1005 94 0 days 00:00:08.285878425 +1005 95 0 days 00:00:04.431555780 +1005 96 0 days 00:00:04.155912126 +1005 97 0 days 00:00:02.812059980 +1005 98 0 days 00:00:02.851431560 +1005 99 0 days 00:00:04.053028453 +1005 100 0 days 00:00:07.110924813 +1006 1 0 days 00:00:00.190482460 +1006 2 0 days 00:00:00.093161593 +1006 3 0 days 00:00:00.115965780 +1006 4 0 days 00:00:00.113286226 +1006 5 0 days 00:00:00.098633400 +1006 6 0 days 00:00:00.105686126 +1006 7 0 days 00:00:00.118633233 +1006 8 0 days 00:00:00.112204580 +1006 9 0 days 00:00:00.121429013 +1006 10 0 days 00:00:00.101955600 +1006 11 0 days 00:00:00.104289400 +1006 12 0 days 00:00:00.126154526 +1006 13 0 days 00:00:00.129939946 +1006 14 0 days 00:00:00.128360340 +1006 15 0 days 00:00:00.165348266 +1006 16 0 days 00:00:00.164862780 +1006 17 0 days 00:00:00.167445820 +1006 18 0 days 00:00:00.108944806 +1006 19 0 days 00:00:00.134442653 +1006 20 0 days 00:00:00.126867426 +1006 21 0 days 00:00:00.162459986 +1006 22 0 days 00:00:00.128001760 +1006 23 0 days 00:00:00.163828606 +1006 24 0 days 00:00:00.112150113 +1006 25 0 days 00:00:00.105426946 +1006 26 0 days 00:00:00.165402133 +1006 27 0 days 00:00:00.113752153 +1006 28 0 days 00:00:00.126922073 +1006 29 0 days 00:00:00.110654746 +1006 30 0 days 00:00:00.165503353 +1006 31 0 days 00:00:00.127641233 +1006 32 0 days 00:00:00.165548873 +1006 33 0 days 00:00:00.169430953 +1006 34 0 days 00:00:00.125028500 +1006 35 0 days 00:00:00.167290526 +1006 36 0 days 00:00:00.110048126 +1006 37 0 days 00:00:00.165158326 +1006 38 0 days 00:00:00.128724326 +1006 39 0 days 00:00:00.111513860 +1006 40 0 days 00:00:00.105346880 +1006 41 0 days 00:00:00.166898993 +1006 42 0 days 00:00:00.126136746 +1006 43 0 days 00:00:00.164515606 +1006 44 0 days 00:00:00.127992580 +1006 45 0 days 00:00:00.162406160 +1006 46 0 days 00:00:00.162350400 +1006 47 0 days 00:00:00.107827286 +1006 48 0 days 00:00:00.106731820 +1006 49 0 days 00:00:00.166222486 +1006 50 0 days 00:00:00.160899713 +1006 51 0 days 00:00:00.158512166 +1006 52 0 days 00:00:00.124176326 +1006 53 0 days 00:00:00.167028493 +1006 54 0 days 00:00:00.165619266 +1006 55 0 days 00:00:00.110757173 +1006 56 0 days 00:00:00.107403980 +1006 57 0 days 00:00:00.164729986 +1006 58 0 days 00:00:00.106197540 +1006 59 0 days 00:00:00.166729626 +1006 60 0 days 00:00:00.159452726 +1006 61 0 days 00:00:00.129081760 +1006 62 0 days 00:00:00.130642766 +1006 63 0 days 00:00:00.128356460 +1006 64 0 days 00:00:00.163024193 +1006 65 0 days 00:00:00.161423833 +1006 66 0 days 00:00:00.126084773 +1006 67 0 days 00:00:00.113358880 +1006 68 0 days 00:00:00.112361953 +1006 69 0 days 00:00:00.132538760 +1006 70 0 days 00:00:00.108799006 +1006 71 0 days 00:00:00.166769733 +1006 72 0 days 00:00:00.128692600 +1006 73 0 days 00:00:00.115336126 +1006 74 0 days 00:00:00.129337146 +1006 75 0 days 00:00:00.129141753 +1006 76 0 days 00:00:00.124870313 +1006 77 0 days 00:00:00.171700773 +1006 78 0 days 00:00:00.124905180 +1006 79 0 days 00:00:00.126780220 +1006 80 0 days 00:00:00.162630580 +1006 81 0 days 00:00:00.107970073 +1006 82 0 days 00:00:00.125378526 +1006 83 0 days 00:00:00.159750666 +1006 84 0 days 00:00:00.161611226 +1006 85 0 days 00:00:00.162448893 +1006 86 0 days 00:00:00.129717153 +1006 87 0 days 00:00:00.162562200 +1006 88 0 days 00:00:00.105412413 +1006 89 0 days 00:00:00.127995673 +1006 90 0 days 00:00:00.106693000 +1006 91 0 days 00:00:00.104090920 +1006 92 0 days 00:00:00.106809213 +1006 93 0 days 00:00:00.162162320 +1006 94 0 days 00:00:00.125804706 +1006 95 0 days 00:00:00.111019880 +1006 96 0 days 00:00:00.115143626 +1006 97 0 days 00:00:00.108771226 +1006 98 0 days 00:00:00.121607466 +1006 99 0 days 00:00:00.117341966 +1006 100 0 days 00:00:00.168378453 +1007 1 0 days 00:00:00.138285220 +1007 2 0 days 00:00:00.119370166 +1007 3 0 days 00:00:00.113974146 +1007 4 0 days 00:00:00.136571300 +1007 5 0 days 00:00:00.134775053 +1007 6 0 days 00:00:00.177793626 +1007 7 0 days 00:00:00.180158393 +1007 8 0 days 00:00:00.126987480 +1007 9 0 days 00:00:00.181640793 +1007 10 0 days 00:00:00.178267106 +1007 11 0 days 00:00:00.133569480 +1007 12 0 days 00:00:00.110760186 +1007 13 0 days 00:00:00.114667206 +1007 14 0 days 00:00:00.130964500 +1007 15 0 days 00:00:00.112796266 +1007 16 0 days 00:00:00.134746420 +1007 17 0 days 00:00:00.173983693 +1007 18 0 days 00:00:00.112597446 +1007 19 0 days 00:00:00.112484906 +1007 20 0 days 00:00:00.175526260 +1007 21 0 days 00:00:00.182687626 +1007 22 0 days 00:00:00.132161633 +1007 23 0 days 00:00:00.132936020 +1007 24 0 days 00:00:00.176517106 +1007 25 0 days 00:00:00.178787026 +1007 26 0 days 00:00:00.181065606 +1007 27 0 days 00:00:00.189529255 +1007 28 0 days 00:00:00.177845826 +1007 29 0 days 00:00:00.132022433 +1007 30 0 days 00:00:00.109537773 +1007 31 0 days 00:00:00.138404113 +1007 32 0 days 00:00:00.135690333 +1007 33 0 days 00:00:00.175875513 +1007 34 0 days 00:00:00.132606373 +1007 35 0 days 00:00:00.173122400 +1007 36 0 days 00:00:00.174457346 +1007 37 0 days 00:00:00.180778360 +1007 38 0 days 00:00:00.175901866 +1007 39 0 days 00:00:00.178152786 +1007 40 0 days 00:00:00.179656253 +1007 41 0 days 00:00:00.179446453 +1007 42 0 days 00:00:00.174296486 +1007 43 0 days 00:00:00.110854820 +1007 44 0 days 00:00:00.110284766 +1007 45 0 days 00:00:00.112768406 +1007 46 0 days 00:00:00.137863413 +1007 47 0 days 00:00:00.173433926 +1007 48 0 days 00:00:00.112680726 +1007 49 0 days 00:00:00.178342733 +1007 50 0 days 00:00:00.135517726 +1007 51 0 days 00:00:00.135848826 +1007 52 0 days 00:00:00.112277333 +1007 53 0 days 00:00:00.109963233 +1007 54 0 days 00:00:00.189838070 +1007 55 0 days 00:00:00.114178846 +1007 56 0 days 00:00:00.134338033 +1007 57 0 days 00:00:00.133757266 +1007 58 0 days 00:00:00.112500960 +1007 59 0 days 00:00:00.110450480 +1007 60 0 days 00:00:00.178250726 +1007 61 0 days 00:00:00.179904313 +1007 62 0 days 00:00:00.112442866 +1007 63 0 days 00:00:00.113067060 +1007 64 0 days 00:00:00.114432380 +1007 65 0 days 00:00:00.113130433 +1007 66 0 days 00:00:00.183125980 +1007 67 0 days 00:00:00.132251233 +1007 68 0 days 00:00:00.109695460 +1007 69 0 days 00:00:00.111437160 +1007 70 0 days 00:00:00.114632193 +1007 71 0 days 00:00:00.131348533 +1007 72 0 days 00:00:00.113338700 +1007 73 0 days 00:00:00.186538486 +1007 74 0 days 00:00:00.112807573 +1007 75 0 days 00:00:00.170583966 +1007 76 0 days 00:00:00.127782073 +1007 77 0 days 00:00:00.166850806 +1007 78 0 days 00:00:00.108772260 +1007 79 0 days 00:00:00.108575280 +1007 80 0 days 00:00:00.173518560 +1007 81 0 days 00:00:00.123151173 +1007 82 0 days 00:00:00.170201573 +1007 83 0 days 00:00:00.162520753 +1007 84 0 days 00:00:00.128174686 +1007 85 0 days 00:00:00.160242406 +1007 86 0 days 00:00:00.171700560 +1007 87 0 days 00:00:00.175146886 +1007 88 0 days 00:00:00.168371826 +1007 89 0 days 00:00:00.162165733 +1007 90 0 days 00:00:00.169634340 +1007 91 0 days 00:00:00.110166246 +1007 92 0 days 00:00:00.107015206 +1007 93 0 days 00:00:00.125141406 +1007 94 0 days 00:00:00.123986966 +1007 95 0 days 00:00:00.112841906 +1007 96 0 days 00:00:00.106168713 +1007 97 0 days 00:00:00.125666900 +1007 98 0 days 00:00:00.103580253 +1007 99 0 days 00:00:00.106151793 +1007 100 0 days 00:00:00.111142093 +1008 1 0 days 00:00:00.074000773 +1008 2 0 days 00:00:00.073405740 +1008 3 0 days 00:00:00.088410173 +1008 4 0 days 00:00:00.089222666 +1008 5 0 days 00:00:00.097099720 +1008 6 0 days 00:00:00.100607240 +1008 7 0 days 00:00:00.072347186 +1008 8 0 days 00:00:00.070743586 +1008 9 0 days 00:00:00.070465166 +1008 10 0 days 00:00:00.092594486 +1008 11 0 days 00:00:00.070738366 +1008 12 0 days 00:00:00.098766253 +1008 13 0 days 00:00:00.078214493 +1008 14 0 days 00:00:00.090795840 +1008 15 0 days 00:00:00.091655046 +1008 16 0 days 00:00:00.064791766 +1008 17 0 days 00:00:00.066100073 +1008 18 0 days 00:00:00.065771380 +1008 19 0 days 00:00:00.078540753 +1008 20 0 days 00:00:00.073780880 +1008 21 0 days 00:00:00.071978393 +1008 22 0 days 00:00:00.070985426 +1008 23 0 days 00:00:00.071407426 +1008 24 0 days 00:00:00.091857860 +1008 25 0 days 00:00:00.071309420 +1008 26 0 days 00:00:00.073935646 +1008 27 0 days 00:00:00.071345066 +1008 28 0 days 00:00:00.069439933 +1008 29 0 days 00:00:00.073773473 +1008 30 0 days 00:00:00.095663720 +1008 31 0 days 00:00:00.071796626 +1008 32 0 days 00:00:00.080796233 +1008 33 0 days 00:00:00.072638366 +1008 34 0 days 00:00:00.060656393 +1008 35 0 days 00:00:00.062281613 +1008 36 0 days 00:00:00.093408246 +1008 37 0 days 00:00:00.091045886 +1008 38 0 days 00:00:00.071915433 +1008 39 0 days 00:00:00.072022106 +1008 40 0 days 00:00:00.064260326 +1008 41 0 days 00:00:00.063228640 +1008 42 0 days 00:00:00.062861500 +1008 43 0 days 00:00:00.071370280 +1008 44 0 days 00:00:00.071067440 +1008 45 0 days 00:00:00.090973173 +1008 46 0 days 00:00:00.094176360 +1008 47 0 days 00:00:00.070753746 +1008 48 0 days 00:00:00.095964706 +1008 49 0 days 00:00:00.071001860 +1008 50 0 days 00:00:00.071848453 +1008 51 0 days 00:00:00.072747340 +1008 52 0 days 00:00:00.064281113 +1008 53 0 days 00:00:00.063212233 +1008 54 0 days 00:00:00.092554526 +1008 55 0 days 00:00:00.063907460 +1008 56 0 days 00:00:00.093440706 +1008 57 0 days 00:00:00.063565160 +1008 58 0 days 00:00:00.073361006 +1008 59 0 days 00:00:00.063002713 +1008 60 0 days 00:00:00.080416413 +1008 61 0 days 00:00:00.063525140 +1008 62 0 days 00:00:00.062942133 +1008 63 0 days 00:00:00.075115853 +1008 64 0 days 00:00:00.097599733 +1008 65 0 days 00:00:00.071625506 +1008 66 0 days 00:00:00.064592066 +1008 67 0 days 00:00:00.098586060 +1008 68 0 days 00:00:00.093848200 +1008 69 0 days 00:00:00.092319920 +1008 70 0 days 00:00:00.072142733 +1008 71 0 days 00:00:00.075224606 +1008 72 0 days 00:00:00.090206273 +1008 73 0 days 00:00:00.064135360 +1008 74 0 days 00:00:00.063843873 +1008 75 0 days 00:00:00.063163020 +1008 76 0 days 00:00:00.072309513 +1008 77 0 days 00:00:00.093257373 +1008 78 0 days 00:00:00.071993866 +1008 79 0 days 00:00:00.070967660 +1008 80 0 days 00:00:00.095634446 +1008 81 0 days 00:00:00.094330840 +1008 82 0 days 00:00:00.065112420 +1008 83 0 days 00:00:00.092730453 +1008 84 0 days 00:00:00.092447726 +1008 85 0 days 00:00:00.092418520 +1008 86 0 days 00:00:00.093526340 +1008 87 0 days 00:00:00.066756500 +1008 88 0 days 00:00:00.091755006 +1008 89 0 days 00:00:00.072269706 +1008 90 0 days 00:00:00.072903106 +1008 91 0 days 00:00:00.064085273 +1008 92 0 days 00:00:00.072271613 +1008 93 0 days 00:00:00.092496613 +1008 94 0 days 00:00:00.072106586 +1008 95 0 days 00:00:00.070171093 +1008 96 0 days 00:00:00.071810720 +1008 97 0 days 00:00:00.091429520 +1008 98 0 days 00:00:00.063879593 +1008 99 0 days 00:00:00.068077893 +1008 100 0 days 00:00:00.072145986 +1009 1 0 days 00:00:00.080082466 +1009 2 0 days 00:00:00.075108513 +1009 3 0 days 00:00:00.073441033 +1009 4 0 days 00:00:00.075426580 +1009 5 0 days 00:00:00.080964693 +1009 6 0 days 00:00:00.103834966 +1009 7 0 days 00:00:00.101395606 +1009 8 0 days 00:00:00.066300506 +1009 9 0 days 00:00:00.069532353 +1009 10 0 days 00:00:00.075285753 +1009 11 0 days 00:00:00.075331693 +1009 12 0 days 00:00:00.076132280 +1009 13 0 days 00:00:00.064542560 +1009 14 0 days 00:00:00.073647820 +1009 15 0 days 00:00:00.066316106 +1009 16 0 days 00:00:00.074460173 +1009 17 0 days 00:00:00.074580653 +1009 18 0 days 00:00:00.097910973 +1009 19 0 days 00:00:00.067245766 +1009 20 0 days 00:00:00.067060400 +1009 21 0 days 00:00:00.097451100 +1009 22 0 days 00:00:00.063416446 +1009 23 0 days 00:00:00.073391960 +1009 24 0 days 00:00:00.071642793 +1009 25 0 days 00:00:00.098961653 +1009 26 0 days 00:00:00.074597000 +1009 27 0 days 00:00:00.098830053 +1009 28 0 days 00:00:00.074455406 +1009 29 0 days 00:00:00.078010386 +1009 30 0 days 00:00:00.065417346 +1009 31 0 days 00:00:00.106146328 +1009 32 0 days 00:00:00.062806413 +1009 33 0 days 00:00:00.064665180 +1009 34 0 days 00:00:00.075503860 +1009 35 0 days 00:00:00.105831110 +1009 36 0 days 00:00:00.098176940 +1009 37 0 days 00:00:00.063466726 +1009 38 0 days 00:00:00.066731866 +1009 39 0 days 00:00:00.065715986 +1009 40 0 days 00:00:00.064162100 +1009 41 0 days 00:00:00.075227633 +1009 42 0 days 00:00:00.105465840 +1009 43 0 days 00:00:00.097156480 +1009 44 0 days 00:00:00.065052540 +1009 45 0 days 00:00:00.069979733 +1009 46 0 days 00:00:00.074903133 +1009 47 0 days 00:00:00.073373100 +1009 48 0 days 00:00:00.083659786 +1009 49 0 days 00:00:00.099910713 +1009 50 0 days 00:00:00.078677393 +1009 51 0 days 00:00:00.099101453 +1009 52 0 days 00:00:00.067109226 +1009 53 0 days 00:00:00.066005466 +1009 54 0 days 00:00:00.073581200 +1009 55 0 days 00:00:00.064257060 +1009 56 0 days 00:00:00.073252760 +1009 57 0 days 00:00:00.067862173 +1009 58 0 days 00:00:00.072618180 +1009 59 0 days 00:00:00.097515340 +1009 60 0 days 00:00:00.072797233 +1009 61 0 days 00:00:00.073886360 +1009 62 0 days 00:00:00.072722866 +1009 63 0 days 00:00:00.073054613 +1009 64 0 days 00:00:00.064286313 +1009 65 0 days 00:00:00.099288473 +1009 66 0 days 00:00:00.071340240 +1009 67 0 days 00:00:00.064135486 +1009 68 0 days 00:00:00.075825393 +1009 69 0 days 00:00:00.067566166 +1009 70 0 days 00:00:00.065880613 +1009 71 0 days 00:00:00.098916093 +1009 72 0 days 00:00:00.072566550 +1009 73 0 days 00:00:00.098045433 +1009 74 0 days 00:00:00.079493866 +1009 75 0 days 00:00:00.073807420 +1009 76 0 days 00:00:00.097720873 +1009 77 0 days 00:00:00.072904646 +1009 78 0 days 00:00:00.095341260 +1009 79 0 days 00:00:00.095760220 +1009 80 0 days 00:00:00.065964133 +1009 81 0 days 00:00:00.096333780 +1009 82 0 days 00:00:00.103054353 +1009 83 0 days 00:00:00.065200720 +1009 84 0 days 00:00:00.062969380 +1009 85 0 days 00:00:00.065357193 +1009 86 0 days 00:00:00.065990526 +1009 87 0 days 00:00:00.101152305 +1009 88 0 days 00:00:00.097271753 +1009 89 0 days 00:00:00.095271073 +1009 90 0 days 00:00:00.074050533 +1009 91 0 days 00:00:00.098163193 +1009 92 0 days 00:00:00.096370993 +1009 93 0 days 00:00:00.102985426 +1009 94 0 days 00:00:00.064294253 +1009 95 0 days 00:00:00.098138913 +1009 96 0 days 00:00:00.076193546 +1009 97 0 days 00:00:00.098094920 +1009 98 0 days 00:00:00.063953253 +1009 99 0 days 00:00:00.068847700 +1009 100 0 days 00:00:00.077241460 +1010 1 0 days 00:00:00.131203460 +1010 2 0 days 00:00:00.112466386 +1010 3 0 days 00:00:00.113382820 +1010 4 0 days 00:00:00.097278680 +1010 5 0 days 00:00:00.175702928 +1010 6 0 days 00:00:00.100435013 +1010 7 0 days 00:00:00.096037686 +1010 8 0 days 00:00:00.128977770 +1010 9 0 days 00:00:00.098406760 +1010 10 0 days 00:00:00.119786373 +1010 11 0 days 00:00:00.117038646 +1010 12 0 days 00:00:00.120560280 +1010 13 0 days 00:00:00.108785445 +1010 14 0 days 00:00:00.160861213 +1010 15 0 days 00:00:00.119850300 +1010 16 0 days 00:00:00.189625928 +1010 17 0 days 00:00:00.179247708 +1010 18 0 days 00:00:00.100409473 +1010 19 0 days 00:00:00.157788393 +1010 20 0 days 00:00:00.170688225 +1010 21 0 days 00:00:00.118316620 +1010 22 0 days 00:00:00.099796500 +1010 23 0 days 00:00:00.119502220 +1010 24 0 days 00:00:00.099291240 +1010 25 0 days 00:00:00.097262080 +1010 26 0 days 00:00:00.132191804 +1010 27 0 days 00:00:00.111909384 +1010 28 0 days 00:00:00.132077864 +1010 29 0 days 00:00:00.189427354 +1010 30 0 days 00:00:00.122299126 +1010 31 0 days 00:00:00.121683406 +1010 32 0 days 00:00:00.185521653 +1010 33 0 days 00:00:00.121301333 +1010 34 0 days 00:00:00.124646620 +1010 35 0 days 00:00:00.180994508 +1010 36 0 days 00:00:00.101018560 +1010 37 0 days 00:00:00.141120637 +1010 38 0 days 00:00:00.155597973 +1010 39 0 days 00:00:00.120871293 +1010 40 0 days 00:00:00.103706413 +1010 41 0 days 00:00:00.100514033 +1010 42 0 days 00:00:00.121964740 +1010 43 0 days 00:00:00.111132250 +1010 44 0 days 00:00:00.099991026 +1010 45 0 days 00:00:00.104435913 +1010 46 0 days 00:00:00.123717986 +1010 47 0 days 00:00:00.173462595 +1010 48 0 days 00:00:00.103942480 +1010 49 0 days 00:00:00.101662813 +1010 50 0 days 00:00:00.159573080 +1010 51 0 days 00:00:00.123419020 +1010 52 0 days 00:00:00.160018533 +1010 53 0 days 00:00:00.119965586 +1010 54 0 days 00:00:00.181600496 +1010 55 0 days 00:00:00.130349620 +1010 56 0 days 00:00:00.100846933 +1010 57 0 days 00:00:00.119409133 +1010 58 0 days 00:00:00.105329886 +1010 59 0 days 00:00:00.159878773 +1010 60 0 days 00:00:00.161019133 +1010 61 0 days 00:00:00.174684905 +1010 62 0 days 00:00:00.159006906 +1010 63 0 days 00:00:00.137436508 +1010 64 0 days 00:00:00.121786293 +1010 65 0 days 00:00:00.101886166 +1010 66 0 days 00:00:00.099607686 +1010 67 0 days 00:00:00.102767240 +1010 68 0 days 00:00:00.128312850 +1010 69 0 days 00:00:00.155577113 +1010 70 0 days 00:00:00.174739540 +1010 71 0 days 00:00:00.130294525 +1010 72 0 days 00:00:00.099663933 +1010 73 0 days 00:00:00.161120646 +1010 74 0 days 00:00:00.125112133 +1010 75 0 days 00:00:00.160106886 +1010 76 0 days 00:00:00.100078013 +1010 77 0 days 00:00:00.099163246 +1010 78 0 days 00:00:00.120956013 +1010 79 0 days 00:00:00.159480073 +1010 80 0 days 00:00:00.122206760 +1010 81 0 days 00:00:00.122479153 +1010 82 0 days 00:00:00.120567806 +1010 83 0 days 00:00:00.105104780 +1010 84 0 days 00:00:00.124116113 +1010 85 0 days 00:00:00.123521520 +1010 86 0 days 00:00:00.119363560 +1010 87 0 days 00:00:00.121061146 +1010 88 0 days 00:00:00.103048166 +1010 89 0 days 00:00:00.128790940 +1010 90 0 days 00:00:00.120585633 +1010 91 0 days 00:00:00.122658126 +1010 92 0 days 00:00:00.125565180 +1010 93 0 days 00:00:00.130152530 +1010 94 0 days 00:00:00.160179606 +1010 95 0 days 00:00:00.182643036 +1010 96 0 days 00:00:00.102273406 +1010 97 0 days 00:00:00.102754993 +1010 98 0 days 00:00:00.101528060 +1010 99 0 days 00:00:00.109130620 +1010 100 0 days 00:00:00.121770626 +1011 1 0 days 00:00:00.071382913 +1011 2 0 days 00:00:00.061314213 +1011 3 0 days 00:00:00.066101145 +1011 4 0 days 00:00:00.071584680 +1011 5 0 days 00:00:00.071096400 +1011 6 0 days 00:00:00.062393506 +1011 7 0 days 00:00:00.063719326 +1011 8 0 days 00:00:00.102334815 +1011 9 0 days 00:00:00.093720020 +1011 10 0 days 00:00:00.097095013 +1011 11 0 days 00:00:00.093616686 +1011 12 0 days 00:00:00.060968740 +1011 13 0 days 00:00:00.081173068 +1011 14 0 days 00:00:00.071963773 +1011 15 0 days 00:00:00.069846793 +1011 16 0 days 00:00:00.076817924 +1011 17 0 days 00:00:00.091544480 +1011 18 0 days 00:00:00.073041753 +1011 19 0 days 00:00:00.060967460 +1011 20 0 days 00:00:00.100891685 +1011 21 0 days 00:00:00.071445173 +1011 22 0 days 00:00:00.072191353 +1011 23 0 days 00:00:00.071338740 +1011 24 0 days 00:00:00.092445220 +1011 25 0 days 00:00:00.060697913 +1011 26 0 days 00:00:00.095806980 +1011 27 0 days 00:00:00.073848926 +1011 28 0 days 00:00:00.063845320 +1011 29 0 days 00:00:00.061449820 +1011 30 0 days 00:00:00.095512080 +1011 31 0 days 00:00:00.070763473 +1011 32 0 days 00:00:00.071665013 +1011 33 0 days 00:00:00.099146520 +1011 34 0 days 00:00:00.061214226 +1011 35 0 days 00:00:00.072611920 +1011 36 0 days 00:00:00.059382940 +1011 37 0 days 00:00:00.061639453 +1011 38 0 days 00:00:00.079527803 +1011 39 0 days 00:00:00.063319813 +1011 40 0 days 00:00:00.063128113 +1011 41 0 days 00:00:00.104103792 +1011 42 0 days 00:00:00.095710080 +1011 43 0 days 00:00:00.059147953 +1011 44 0 days 00:00:00.073600580 +1011 45 0 days 00:00:00.061104413 +1011 46 0 days 00:00:00.062794320 +1011 47 0 days 00:00:00.062781760 +1011 48 0 days 00:00:00.069511193 +1011 49 0 days 00:00:00.060713900 +1011 50 0 days 00:00:00.071989840 +1011 51 0 days 00:00:00.063330913 +1011 52 0 days 00:00:00.075387955 +1011 53 0 days 00:00:00.061779826 +1011 54 0 days 00:00:00.062811053 +1011 55 0 days 00:00:00.095284206 +1011 56 0 days 00:00:00.095127633 +1011 57 0 days 00:00:00.071672200 +1011 58 0 days 00:00:00.070476053 +1011 59 0 days 00:00:00.069649853 +1011 60 0 days 00:00:00.104912820 +1011 61 0 days 00:00:00.094326126 +1011 62 0 days 00:00:00.063726020 +1011 63 0 days 00:00:00.062805786 +1011 64 0 days 00:00:00.072181706 +1011 65 0 days 00:00:00.095803813 +1011 66 0 days 00:00:00.071454893 +1011 67 0 days 00:00:00.096302133 +1011 68 0 days 00:00:00.064953153 +1011 69 0 days 00:00:00.077139820 +1011 70 0 days 00:00:00.096312333 +1011 71 0 days 00:00:00.072646186 +1011 72 0 days 00:00:00.063730600 +1011 73 0 days 00:00:00.072743326 +1011 74 0 days 00:00:00.095282520 +1011 75 0 days 00:00:00.063128240 +1011 76 0 days 00:00:00.109641193 +1011 77 0 days 00:00:00.080818505 +1011 78 0 days 00:00:00.095575153 +1011 79 0 days 00:00:00.062908060 +1011 80 0 days 00:00:00.070388413 +1011 81 0 days 00:00:00.095518233 +1011 82 0 days 00:00:00.071448753 +1011 83 0 days 00:00:00.096441046 +1011 84 0 days 00:00:00.072501286 +1011 85 0 days 00:00:00.062477433 +1011 86 0 days 00:00:00.062269986 +1011 87 0 days 00:00:00.079537688 +1011 88 0 days 00:00:00.073878740 +1011 89 0 days 00:00:00.095375713 +1011 90 0 days 00:00:00.096627146 +1011 91 0 days 00:00:00.070250673 +1011 92 0 days 00:00:00.072716453 +1011 93 0 days 00:00:00.073956073 +1011 94 0 days 00:00:00.094086300 +1011 95 0 days 00:00:00.060160840 +1011 96 0 days 00:00:00.065119466 +1011 97 0 days 00:00:00.096377866 +1011 98 0 days 00:00:00.096141033 +1011 99 0 days 00:00:00.071327040 +1011 100 0 days 00:00:00.069599140 +1012 1 0 days 00:00:00.436237626 +1012 2 0 days 00:00:00.536831286 +1012 3 0 days 00:00:00.972647573 +1012 4 0 days 00:00:00.821785520 +1012 5 0 days 00:00:00.459345820 +1012 6 0 days 00:00:00.840773260 +1012 7 0 days 00:00:00.445449540 +1012 8 0 days 00:00:00.299208033 +1012 9 0 days 00:00:00.507131353 +1012 10 0 days 00:00:00.262196293 +1012 11 0 days 00:00:00.983452260 +1012 12 0 days 00:00:00.525446020 +1012 13 0 days 00:00:00.290413446 +1012 14 0 days 00:00:00.515607400 +1012 15 0 days 00:00:00.447476440 +1012 16 0 days 00:00:00.536242560 +1012 17 0 days 00:00:00.532936273 +1012 18 0 days 00:00:00.299471240 +1012 19 0 days 00:00:00.975560366 +1012 20 0 days 00:00:00.268611733 +1012 21 0 days 00:00:00.454740120 +1012 22 0 days 00:00:00.960008780 +1012 23 0 days 00:00:00.453369266 +1012 24 0 days 00:00:00.717542053 +1012 25 0 days 00:00:00.964269420 +1012 26 0 days 00:00:00.257685213 +1012 27 0 days 00:00:00.260269000 +1012 28 0 days 00:00:00.520882860 +1012 29 0 days 00:00:00.264036813 +1012 30 0 days 00:00:00.301822020 +1012 31 0 days 00:00:00.262923626 +1012 32 0 days 00:00:00.260359746 +1012 33 0 days 00:00:00.293031273 +1012 34 0 days 00:00:01.000796053 +1012 35 0 days 00:00:00.441229933 +1012 36 0 days 00:00:00.292575100 +1012 37 0 days 00:00:00.444079573 +1012 38 0 days 00:00:00.840412313 +1012 39 0 days 00:00:00.973965800 +1012 40 0 days 00:00:00.285764506 +1012 41 0 days 00:00:00.263383120 +1012 42 0 days 00:00:00.516467033 +1012 43 0 days 00:00:00.534421373 +1012 44 0 days 00:00:00.436076906 +1012 45 0 days 00:00:00.311094026 +1012 46 0 days 00:00:00.457047260 +1012 47 0 days 00:00:00.987633740 +1012 48 0 days 00:00:00.305580933 +1012 49 0 days 00:00:00.317453706 +1012 50 0 days 00:00:00.978404093 +1012 51 0 days 00:00:00.983177953 +1012 52 0 days 00:00:00.950721766 +1012 53 0 days 00:00:00.440405720 +1012 54 0 days 00:00:00.261388066 +1012 55 0 days 00:00:00.541134626 +1012 56 0 days 00:00:00.318765786 +1012 57 0 days 00:00:00.306245206 +1012 58 0 days 00:00:00.311925393 +1012 59 0 days 00:00:00.437780260 +1012 60 0 days 00:00:00.247929413 +1012 61 0 days 00:00:00.813493400 +1012 62 0 days 00:00:00.824371953 +1012 63 0 days 00:00:00.976814313 +1012 64 0 days 00:00:00.256894866 +1012 65 0 days 00:00:00.311374240 +1012 66 0 days 00:00:00.458951320 +1012 67 0 days 00:00:00.255556393 +1012 68 0 days 00:00:00.518601033 +1012 69 0 days 00:00:00.300595106 +1012 70 0 days 00:00:00.452232700 +1012 71 0 days 00:00:00.515694840 +1012 72 0 days 00:00:00.981212326 +1012 73 0 days 00:00:00.834967746 +1012 74 0 days 00:00:00.265501633 +1012 75 0 days 00:00:00.523031586 +1012 76 0 days 00:00:00.838920973 +1012 77 0 days 00:00:00.828449946 +1012 78 0 days 00:00:00.553751320 +1012 79 0 days 00:00:00.959512693 +1012 80 0 days 00:00:00.312685693 +1012 81 0 days 00:00:00.968430126 +1012 82 0 days 00:00:00.516156266 +1012 83 0 days 00:00:00.949889800 +1012 84 0 days 00:00:00.837248140 +1012 85 0 days 00:00:00.963228180 +1012 86 0 days 00:00:00.828463853 +1012 87 0 days 00:00:00.458796553 +1012 88 0 days 00:00:00.520171200 +1012 89 0 days 00:00:00.314816906 +1012 90 0 days 00:00:00.932722506 +1012 91 0 days 00:00:00.458507433 +1012 92 0 days 00:00:00.519851066 +1012 93 0 days 00:00:00.811831860 +1012 94 0 days 00:00:00.311607320 +1012 95 0 days 00:00:00.443420813 +1012 96 0 days 00:00:00.263702206 +1012 97 0 days 00:00:00.460462040 +1012 98 0 days 00:00:00.456345080 +1012 99 0 days 00:00:00.465691246 +1012 100 0 days 00:00:00.267630086 +1013 1 0 days 00:00:00.995958813 +1013 2 0 days 00:00:00.544377360 +1013 3 0 days 00:00:01.119375893 +1013 4 0 days 00:00:00.980782520 +1013 5 0 days 00:00:00.991586606 +1013 6 0 days 00:00:00.335767313 +1013 7 0 days 00:00:00.975357393 +1013 8 0 days 00:00:01.002549220 +1013 9 0 days 00:00:00.608656640 +1013 10 0 days 00:00:01.163605780 +1013 11 0 days 00:00:01.129849146 +1013 12 0 days 00:00:00.541834560 +1013 13 0 days 00:00:00.359130873 +1013 14 0 days 00:00:00.354034013 +1013 15 0 days 00:00:00.564707193 +1013 16 0 days 00:00:00.542248533 +1013 17 0 days 00:00:00.574312993 +1013 18 0 days 00:00:00.525654306 +1013 19 0 days 00:00:00.310609826 +1013 20 0 days 00:00:00.562839140 +1013 21 0 days 00:00:00.302724480 +1013 22 0 days 00:00:00.637271866 +1013 23 0 days 00:00:00.542800786 +1013 24 0 days 00:00:00.302308773 +1013 25 0 days 00:00:01.001943720 +1013 26 0 days 00:00:00.533410220 +1013 27 0 days 00:00:01.147619246 +1013 28 0 days 00:00:01.166444886 +1013 29 0 days 00:00:00.314273273 +1013 30 0 days 00:00:01.187691400 +1013 31 0 days 00:00:00.543035766 +1013 32 0 days 00:00:00.625732213 +1013 33 0 days 00:00:00.565103060 +1013 34 0 days 00:00:01.047421700 +1013 35 0 days 00:00:00.284822586 +1013 36 0 days 00:00:00.630181973 +1013 37 0 days 00:00:00.370370726 +1013 38 0 days 00:00:00.352019120 +1013 39 0 days 00:00:00.547246713 +1013 40 0 days 00:00:00.311244940 +1013 41 0 days 00:00:01.184794026 +1013 42 0 days 00:00:00.363648280 +1013 43 0 days 00:00:00.325626873 +1013 44 0 days 00:00:00.643188480 +1013 45 0 days 00:00:00.648304553 +1013 46 0 days 00:00:00.562982853 +1013 47 0 days 00:00:00.353509320 +1013 48 0 days 00:00:00.315954580 +1013 49 0 days 00:00:00.290180400 +1013 50 0 days 00:00:00.629003566 +1013 51 0 days 00:00:00.622476552 +1013 52 0 days 00:00:00.317348433 +1013 53 0 days 00:00:01.164492686 +1013 54 0 days 00:00:01.141967526 +1013 55 0 days 00:00:00.588155306 +1013 56 0 days 00:00:00.350460566 +1013 57 0 days 00:00:00.278781553 +1013 58 0 days 00:00:00.281021706 +1013 59 0 days 00:00:00.586311126 +1013 60 0 days 00:00:00.329144393 +1013 61 0 days 00:00:00.311969793 +1013 62 0 days 00:00:00.356667446 +1013 63 0 days 00:00:00.297281486 +1013 64 0 days 00:00:00.305660060 +1013 65 0 days 00:00:00.979358413 +1013 66 0 days 00:00:00.603435740 +1013 67 0 days 00:00:00.337298913 +1013 68 0 days 00:00:00.288937440 +1013 69 0 days 00:00:00.590585280 +1013 70 0 days 00:00:00.349495053 +1013 71 0 days 00:00:00.587348046 +1013 72 0 days 00:00:00.327827760 +1013 73 0 days 00:00:01.098698133 +1013 74 0 days 00:00:00.564300060 +1013 75 0 days 00:00:00.557304633 +1013 76 0 days 00:00:00.964261793 +1013 77 0 days 00:00:00.333988966 +1013 78 0 days 00:00:00.847512226 +1013 79 0 days 00:00:00.311633740 +1013 80 0 days 00:00:01.092448466 +1013 81 0 days 00:00:00.346930273 +1013 82 0 days 00:00:00.352285320 +1013 83 0 days 00:00:00.310269273 +1013 84 0 days 00:00:01.127749080 +1013 85 0 days 00:00:00.337179766 +1013 86 0 days 00:00:00.555044286 +1013 87 0 days 00:00:00.440743786 +1013 88 0 days 00:00:00.976861966 +1013 89 0 days 00:00:00.622339980 +1013 90 0 days 00:00:00.371359326 +1013 91 0 days 00:00:00.335549126 +1013 92 0 days 00:00:00.576426280 +1013 93 0 days 00:00:00.287551206 +1013 94 0 days 00:00:00.944697960 +1013 95 0 days 00:00:00.973032960 +1013 96 0 days 00:00:01.073447166 +1013 97 0 days 00:00:00.314883633 +1013 98 0 days 00:00:00.946231846 +1013 99 0 days 00:00:00.286254313 +1013 100 0 days 00:00:00.604638980 +1014 1 0 days 00:00:00.494827900 +1014 2 0 days 00:00:00.182809833 +1014 3 0 days 00:00:00.393646373 +1014 4 0 days 00:00:00.183731680 +1014 5 0 days 00:00:00.183647186 +1014 6 0 days 00:00:00.190778913 +1014 7 0 days 00:00:00.516662700 +1014 8 0 days 00:00:00.261059420 +1014 9 0 days 00:00:00.262650160 +1014 10 0 days 00:00:00.158455160 +1014 11 0 days 00:00:00.523125593 +1014 12 0 days 00:00:00.161624966 +1014 13 0 days 00:00:00.503029653 +1014 14 0 days 00:00:00.180513053 +1014 15 0 days 00:00:00.256091660 +1014 16 0 days 00:00:00.500968280 +1014 17 0 days 00:00:00.288477573 +1014 18 0 days 00:00:00.463824456 +1014 19 0 days 00:00:00.280249546 +1014 20 0 days 00:00:00.261291120 +1014 21 0 days 00:00:00.183751053 +1014 22 0 days 00:00:00.550334380 +1014 23 0 days 00:00:00.271057186 +1014 24 0 days 00:00:00.172562073 +1014 25 0 days 00:00:00.240809666 +1014 26 0 days 00:00:00.244580640 +1014 27 0 days 00:00:00.173455093 +1014 28 0 days 00:00:00.535313860 +1014 29 0 days 00:00:00.198562500 +1014 30 0 days 00:00:00.472387186 +1014 31 0 days 00:00:00.290613586 +1014 32 0 days 00:00:00.520937700 +1014 33 0 days 00:00:00.264333426 +1014 34 0 days 00:00:00.184226200 +1014 35 0 days 00:00:00.183270600 +1014 36 0 days 00:00:00.157652700 +1014 37 0 days 00:00:00.290288720 +1014 38 0 days 00:00:00.155871766 +1014 39 0 days 00:00:00.287935800 +1014 40 0 days 00:00:00.157340593 +1014 41 0 days 00:00:00.445398673 +1014 42 0 days 00:00:00.183565100 +1014 43 0 days 00:00:00.157671500 +1014 44 0 days 00:00:00.288788213 +1014 45 0 days 00:00:00.165097113 +1014 46 0 days 00:00:00.149932626 +1014 47 0 days 00:00:00.446037926 +1014 48 0 days 00:00:00.265168053 +1014 49 0 days 00:00:00.126296253 +1014 50 0 days 00:00:00.239018600 +1014 51 0 days 00:00:00.396390433 +1014 52 0 days 00:00:00.420039853 +1014 53 0 days 00:00:00.225929520 +1014 54 0 days 00:00:00.208322680 +1014 55 0 days 00:00:00.240809573 +1014 56 0 days 00:00:00.150141246 +1014 57 0 days 00:00:00.239252866 +1014 58 0 days 00:00:00.420777080 +1014 59 0 days 00:00:00.384456060 +1014 60 0 days 00:00:00.210018166 +1014 61 0 days 00:00:00.239607366 +1014 62 0 days 00:00:00.385141600 +1014 63 0 days 00:00:00.130459566 +1014 64 0 days 00:00:00.233901226 +1014 65 0 days 00:00:00.234899346 +1014 66 0 days 00:00:00.207727566 +1014 67 0 days 00:00:00.235147320 +1014 68 0 days 00:00:00.129489006 +1014 69 0 days 00:00:00.157370200 +1014 70 0 days 00:00:00.390972066 +1014 71 0 days 00:00:00.417025286 +1014 72 0 days 00:00:00.443434726 +1014 73 0 days 00:00:00.139931313 +1014 74 0 days 00:00:00.228788406 +1014 75 0 days 00:00:00.387850613 +1014 76 0 days 00:00:00.418174780 +1014 77 0 days 00:00:00.130343386 +1014 78 0 days 00:00:00.237033826 +1014 79 0 days 00:00:00.234112246 +1014 80 0 days 00:00:00.233137753 +1014 81 0 days 00:00:00.138216133 +1014 82 0 days 00:00:00.125701586 +1014 83 0 days 00:00:00.126854200 +1014 84 0 days 00:00:00.156868860 +1014 85 0 days 00:00:00.129671406 +1014 86 0 days 00:00:00.444592033 +1014 87 0 days 00:00:00.190893446 +1014 88 0 days 00:00:00.225963506 +1014 89 0 days 00:00:00.390955866 +1014 90 0 days 00:00:00.390888866 +1014 91 0 days 00:00:00.133950580 +1014 92 0 days 00:00:00.133465480 +1014 93 0 days 00:00:00.125994373 +1014 94 0 days 00:00:00.413627713 +1014 95 0 days 00:00:00.234324106 +1014 96 0 days 00:00:00.155193173 +1014 97 0 days 00:00:00.106590060 +1014 98 0 days 00:00:00.440690406 +1014 99 0 days 00:00:00.392092793 +1014 100 0 days 00:00:00.131813473 +1015 1 0 days 00:00:00.450050520 +1015 2 0 days 00:00:00.443769653 +1015 3 0 days 00:00:00.234694506 +1015 4 0 days 00:00:00.221452440 +1015 5 0 days 00:00:00.132156586 +1015 6 0 days 00:00:00.234641813 +1015 7 0 days 00:00:00.258360940 +1015 8 0 days 00:00:00.444490900 +1015 9 0 days 00:00:00.240971606 +1015 10 0 days 00:00:00.253956960 +1015 11 0 days 00:00:00.261217940 +1015 12 0 days 00:00:00.156430046 +1015 13 0 days 00:00:00.260029466 +1015 14 0 days 00:00:00.418155440 +1015 15 0 days 00:00:00.130300193 +1015 16 0 days 00:00:00.417228420 +1015 17 0 days 00:00:00.402536840 +1015 18 0 days 00:00:00.171180320 +1015 19 0 days 00:00:00.472259476 +1015 20 0 days 00:00:00.126274333 +1015 21 0 days 00:00:00.425260346 +1015 22 0 days 00:00:00.439842033 +1015 23 0 days 00:00:00.130429953 +1015 24 0 days 00:00:00.251686145 +1015 25 0 days 00:00:00.443106740 +1015 26 0 days 00:00:00.166717966 +1015 27 0 days 00:00:00.406738093 +1015 28 0 days 00:00:00.348763760 +1015 29 0 days 00:00:00.150640320 +1015 30 0 days 00:00:00.360677780 +1015 31 0 days 00:00:00.233492533 +1015 32 0 days 00:00:00.156034526 +1015 33 0 days 00:00:00.232875826 +1015 34 0 days 00:00:00.444015453 +1015 35 0 days 00:00:00.208550680 +1015 36 0 days 00:00:00.233133973 +1015 37 0 days 00:00:00.443526186 +1015 38 0 days 00:00:00.232762540 +1015 39 0 days 00:00:00.139081666 +1015 40 0 days 00:00:00.226738653 +1015 41 0 days 00:00:00.390738045 +1015 42 0 days 00:00:00.441930266 +1015 43 0 days 00:00:00.230233066 +1015 44 0 days 00:00:00.420043313 +1015 45 0 days 00:00:00.443873393 +1015 46 0 days 00:00:00.389238873 +1015 47 0 days 00:00:00.129758693 +1015 48 0 days 00:00:00.217244260 +1015 49 0 days 00:00:00.217457575 +1015 50 0 days 00:00:00.223877293 +1015 51 0 days 00:00:00.443687533 +1015 52 0 days 00:00:00.260805313 +1015 53 0 days 00:00:00.448404333 +1015 54 0 days 00:00:00.445600533 +1015 55 0 days 00:00:00.148062033 +1015 56 0 days 00:00:00.112514660 +1015 57 0 days 00:00:00.128430960 +1015 58 0 days 00:00:00.395202213 +1015 59 0 days 00:00:00.436227926 +1015 60 0 days 00:00:00.155343473 +1015 61 0 days 00:00:00.111214500 +1015 62 0 days 00:00:00.138046385 +1015 63 0 days 00:00:00.432603195 +1015 64 0 days 00:00:00.123063360 +1015 65 0 days 00:00:00.153677560 +1015 66 0 days 00:00:00.130514173 +1015 67 0 days 00:00:00.129671386 +1015 68 0 days 00:00:00.238198853 +1015 69 0 days 00:00:00.233615400 +1015 70 0 days 00:00:00.127976080 +1015 71 0 days 00:00:00.391205600 +1015 72 0 days 00:00:00.234180486 +1015 73 0 days 00:00:00.156881066 +1015 74 0 days 00:00:00.417178426 +1015 75 0 days 00:00:00.130880306 +1015 76 0 days 00:00:00.244391826 +1015 77 0 days 00:00:00.127785566 +1015 78 0 days 00:00:00.435708100 +1015 79 0 days 00:00:00.206894053 +1015 80 0 days 00:00:00.234188780 +1015 81 0 days 00:00:00.242560386 +1015 82 0 days 00:00:00.206356746 +1015 83 0 days 00:00:00.124730953 +1015 84 0 days 00:00:00.154268546 +1015 85 0 days 00:00:00.443182626 +1015 86 0 days 00:00:00.139632900 +1015 87 0 days 00:00:00.121196400 +1015 88 0 days 00:00:00.445263255 +1015 89 0 days 00:00:00.211453173 +1015 90 0 days 00:00:00.234169986 +1015 91 0 days 00:00:00.233276453 +1015 92 0 days 00:00:00.442130126 +1015 93 0 days 00:00:00.441232940 +1015 94 0 days 00:00:00.134332880 +1015 95 0 days 00:00:00.395678006 +1015 96 0 days 00:00:00.145537006 +1015 97 0 days 00:00:00.217028406 +1015 98 0 days 00:00:00.127179680 +1015 99 0 days 00:00:00.128436233 +1015 100 0 days 00:00:00.439073900 +1016 1 0 days 00:00:00.853470860 +1016 2 0 days 00:00:00.254435666 +1016 3 0 days 00:00:00.844364466 +1016 4 0 days 00:00:00.256422766 +1016 5 0 days 00:00:00.979213472 +1016 6 0 days 00:00:00.457120793 +1016 7 0 days 00:00:00.434726713 +1016 8 0 days 00:00:00.973908624 +1016 9 0 days 00:00:00.459660900 +1016 10 0 days 00:00:00.273550975 +1016 11 0 days 00:00:00.795886140 +1016 12 0 days 00:00:00.916548615 +1016 13 0 days 00:00:00.482667470 +1016 14 0 days 00:00:00.808091406 +1016 15 0 days 00:00:00.273943613 +1016 16 0 days 00:00:00.803006073 +1016 17 0 days 00:00:00.458163733 +1016 18 0 days 00:00:00.734577246 +1016 19 0 days 00:00:00.804244246 +1016 20 0 days 00:00:00.919019820 +1016 21 0 days 00:00:00.252617046 +1016 22 0 days 00:00:00.272173733 +1016 23 0 days 00:00:00.275000240 +1016 24 0 days 00:00:00.976221124 +1016 25 0 days 00:00:00.266604680 +1016 26 0 days 00:00:00.510011870 +1016 27 0 days 00:00:00.847889573 +1016 28 0 days 00:00:00.799132400 +1016 29 0 days 00:00:00.782861546 +1016 30 0 days 00:00:00.821787733 +1016 31 0 days 00:00:00.797878933 +1016 32 0 days 00:00:00.433261946 +1016 33 0 days 00:00:00.845562733 +1016 34 0 days 00:00:00.460780533 +1016 35 0 days 00:00:00.527855917 +1016 36 0 days 00:00:00.974671104 +1016 37 0 days 00:00:00.860156120 +1016 38 0 days 00:00:00.844673000 +1016 39 0 days 00:00:00.455240926 +1016 40 0 days 00:00:00.413557280 +1016 41 0 days 00:00:00.282792706 +1016 42 0 days 00:00:00.434178073 +1016 43 0 days 00:00:00.780590793 +1016 44 0 days 00:00:00.535153432 +1016 45 0 days 00:00:00.844089546 +1016 46 0 days 00:00:00.257416060 +1016 47 0 days 00:00:00.514576440 +1016 48 0 days 00:00:00.251655360 +1016 49 0 days 00:00:01.055895665 +1016 50 0 days 00:00:00.827317960 +1016 51 0 days 00:00:00.269424266 +1016 52 0 days 00:00:00.424621946 +1016 53 0 days 00:00:00.460705533 +1016 54 0 days 00:00:00.811073906 +1016 55 0 days 00:00:00.468036013 +1016 56 0 days 00:00:00.272450300 +1016 57 0 days 00:00:00.464157160 +1016 58 0 days 00:00:00.476706853 +1016 59 0 days 00:00:00.446502026 +1016 60 0 days 00:00:00.803468713 +1016 61 0 days 00:00:00.270792453 +1016 62 0 days 00:00:00.810099500 +1016 63 0 days 00:00:00.252536753 +1016 64 0 days 00:00:00.240661366 +1016 65 0 days 00:00:00.849720693 +1016 66 0 days 00:00:00.797892446 +1016 67 0 days 00:00:00.926363865 +1016 68 0 days 00:00:00.933992472 +1016 69 0 days 00:00:00.473734573 +1016 70 0 days 00:00:00.467467640 +1016 71 0 days 00:00:00.863970880 +1016 72 0 days 00:00:00.853739873 +1016 73 0 days 00:00:00.312038288 +1016 74 0 days 00:00:00.435170166 +1016 75 0 days 00:00:00.841998613 +1016 76 0 days 00:00:00.429638833 +1016 77 0 days 00:00:00.249859420 +1016 78 0 days 00:00:00.310952830 +1016 79 0 days 00:00:00.437447306 +1016 80 0 days 00:00:00.437033333 +1016 81 0 days 00:00:00.830440966 +1016 82 0 days 00:00:00.447402380 +1016 83 0 days 00:00:00.432162813 +1016 84 0 days 00:00:00.848482933 +1016 85 0 days 00:00:00.548779588 +1016 86 0 days 00:00:00.568738580 +1016 87 0 days 00:00:00.514009392 +1016 88 0 days 00:00:00.477352386 +1016 89 0 days 00:00:00.862989080 +1016 90 0 days 00:00:00.472361266 +1016 91 0 days 00:00:00.838969186 +1016 92 0 days 00:00:00.267176380 +1016 93 0 days 00:00:00.855011913 +1016 94 0 days 00:00:00.530687250 +1016 95 0 days 00:00:00.256418460 +1016 96 0 days 00:00:00.856175746 +1016 97 0 days 00:00:00.264234933 +1016 98 0 days 00:00:00.878765006 +1016 99 0 days 00:00:00.823267940 +1016 100 0 days 00:00:00.336899045 +1017 1 0 days 00:00:00.267054080 +1017 2 0 days 00:00:00.255284473 +1017 3 0 days 00:00:00.244027900 +1017 4 0 days 00:00:00.131682933 +1017 5 0 days 00:00:00.150750680 +1017 6 0 days 00:00:00.145782460 +1017 7 0 days 00:00:00.407526786 +1017 8 0 days 00:00:00.252318545 +1017 9 0 days 00:00:00.243711200 +1017 10 0 days 00:00:00.252611320 +1017 11 0 days 00:00:00.146519220 +1017 12 0 days 00:00:00.268999333 +1017 13 0 days 00:00:00.276694546 +1017 14 0 days 00:00:00.267735060 +1017 15 0 days 00:00:00.138287813 +1017 16 0 days 00:00:00.231364300 +1017 17 0 days 00:00:00.248555400 +1017 18 0 days 00:00:00.148310293 +1017 19 0 days 00:00:00.148287660 +1017 20 0 days 00:00:00.233686400 +1017 21 0 days 00:00:00.437753473 +1017 22 0 days 00:00:00.135372880 +1017 23 0 days 00:00:00.485327520 +1017 24 0 days 00:00:00.435911186 +1017 25 0 days 00:00:00.150289633 +1017 26 0 days 00:00:00.434444460 +1017 27 0 days 00:00:00.270829826 +1017 28 0 days 00:00:00.437454200 +1017 29 0 days 00:00:00.167754780 +1017 30 0 days 00:00:00.233171026 +1017 31 0 days 00:00:00.159939326 +1017 32 0 days 00:00:00.442510046 +1017 33 0 days 00:00:00.435546860 +1017 34 0 days 00:00:00.439020300 +1017 35 0 days 00:00:00.436383020 +1017 36 0 days 00:00:00.155327500 +1017 37 0 days 00:00:00.145996860 +1017 38 0 days 00:00:00.410958726 +1017 39 0 days 00:00:00.246678573 +1017 40 0 days 00:00:00.145653060 +1017 41 0 days 00:00:00.459618360 +1017 42 0 days 00:00:00.436685400 +1017 43 0 days 00:00:00.150482413 +1017 44 0 days 00:00:00.430461066 +1017 45 0 days 00:00:00.128690820 +1017 46 0 days 00:00:00.145636840 +1017 47 0 days 00:00:00.229513593 +1017 48 0 days 00:00:00.228914013 +1017 49 0 days 00:00:00.457468333 +1017 50 0 days 00:00:00.232537526 +1017 51 0 days 00:00:00.131513386 +1017 52 0 days 00:00:00.137129460 +1017 53 0 days 00:00:00.146418713 +1017 54 0 days 00:00:00.379106080 +1017 55 0 days 00:00:00.247514046 +1017 56 0 days 00:00:00.148382726 +1017 57 0 days 00:00:00.409254200 +1017 58 0 days 00:00:00.154205820 +1017 59 0 days 00:00:00.157347120 +1017 60 0 days 00:00:00.442763320 +1017 61 0 days 00:00:00.232294880 +1017 62 0 days 00:00:00.259522393 +1017 63 0 days 00:00:00.247881313 +1017 64 0 days 00:00:00.238741526 +1017 65 0 days 00:00:00.147327980 +1017 66 0 days 00:00:00.246153525 +1017 67 0 days 00:00:00.421165213 +1017 68 0 days 00:00:00.155405790 +1017 69 0 days 00:00:00.269718870 +1017 70 0 days 00:00:00.433145133 +1017 71 0 days 00:00:00.247507513 +1017 72 0 days 00:00:00.262217612 +1017 73 0 days 00:00:00.249362080 +1017 74 0 days 00:00:00.146185560 +1017 75 0 days 00:00:00.436794253 +1017 76 0 days 00:00:00.243697628 +1017 77 0 days 00:00:00.409278306 +1017 78 0 days 00:00:00.142428480 +1017 79 0 days 00:00:00.250038340 +1017 80 0 days 00:00:00.499146280 +1017 81 0 days 00:00:00.452333495 +1017 82 0 days 00:00:00.150737893 +1017 83 0 days 00:00:00.245314106 +1017 84 0 days 00:00:00.228674066 +1017 85 0 days 00:00:00.235766420 +1017 86 0 days 00:00:00.146906700 +1017 87 0 days 00:00:00.138236066 +1017 88 0 days 00:00:00.145904720 +1017 89 0 days 00:00:00.249224033 +1017 90 0 days 00:00:00.145997786 +1017 91 0 days 00:00:00.415865993 +1017 92 0 days 00:00:00.144973053 +1017 93 0 days 00:00:00.127412906 +1017 94 0 days 00:00:00.139676226 +1017 95 0 days 00:00:00.237485806 +1017 96 0 days 00:00:00.139101580 +1017 97 0 days 00:00:00.484706480 +1017 98 0 days 00:00:00.141431985 +1017 99 0 days 00:00:00.439075833 +1017 100 0 days 00:00:00.233369813 +1018 1 0 days 00:00:00.401159006 +1018 2 0 days 00:00:00.497084500 +1018 3 0 days 00:00:00.336947920 +1018 4 0 days 00:00:00.867336466 +1018 5 0 days 00:00:00.725044152 +1018 6 0 days 00:00:00.333779466 +1018 7 0 days 00:00:00.587172920 +1018 8 0 days 00:00:00.356725486 +1018 9 0 days 00:00:00.323999820 +1018 10 0 days 00:00:01.107220553 +1018 11 0 days 00:00:00.860548053 +1018 12 0 days 00:00:00.482501773 +1018 13 0 days 00:00:00.614114113 +1018 14 0 days 00:00:00.594361033 +1018 15 0 days 00:00:00.839502546 +1018 16 0 days 00:00:00.342247406 +1018 17 0 days 00:00:00.591881393 +1018 18 0 days 00:00:01.154820373 +1018 19 0 days 00:00:00.277767746 +1018 20 0 days 00:00:00.746622223 +1018 21 0 days 00:00:00.456496913 +1018 22 0 days 00:00:00.392372860 +1018 23 0 days 00:00:00.244932813 +1018 24 0 days 00:00:00.488473620 +1018 25 0 days 00:00:00.591916793 +1018 26 0 days 00:00:00.903494266 +1018 27 0 days 00:00:01.140353840 +1018 28 0 days 00:00:00.875732860 +1018 29 0 days 00:00:01.157900713 +1018 30 0 days 00:00:00.875774040 +1018 31 0 days 00:00:00.323475033 +1018 32 0 days 00:00:00.590671226 +1018 33 0 days 00:00:00.580801646 +1018 34 0 days 00:00:00.833617026 +1018 35 0 days 00:00:01.146189140 +1018 36 0 days 00:00:00.582190673 +1018 37 0 days 00:00:01.146059186 +1018 38 0 days 00:00:00.473758206 +1018 39 0 days 00:00:00.605691440 +1018 40 0 days 00:00:00.699924033 +1018 41 0 days 00:00:00.349729586 +1018 42 0 days 00:00:00.474244226 +1018 43 0 days 00:00:01.106963220 +1018 44 0 days 00:00:00.340252693 +1018 45 0 days 00:00:00.577122526 +1018 46 0 days 00:00:00.458397913 +1018 47 0 days 00:00:00.264266186 +1018 48 0 days 00:00:00.346925333 +1018 49 0 days 00:00:00.898956360 +1018 50 0 days 00:00:00.696107970 +1018 51 0 days 00:00:00.275580453 +1018 52 0 days 00:00:00.667572388 +1018 53 0 days 00:00:00.616930113 +1018 54 0 days 00:00:00.327242506 +1018 55 0 days 00:00:00.343203526 +1018 56 0 days 00:00:00.390947140 +1018 57 0 days 00:00:00.910486715 +1018 58 0 days 00:00:01.437834910 +1018 59 0 days 00:00:00.442987966 +1018 60 0 days 00:00:01.102038913 +1018 61 0 days 00:00:01.161027753 +1018 62 0 days 00:00:01.444368983 +1018 63 0 days 00:00:00.546114975 +1018 64 0 days 00:00:00.905155686 +1018 65 0 days 00:00:00.511406886 +1018 66 0 days 00:00:01.067278292 +1018 67 0 days 00:00:00.497072493 +1018 68 0 days 00:00:00.346314966 +1018 69 0 days 00:00:00.312288860 +1018 70 0 days 00:00:00.498116753 +1018 71 0 days 00:00:00.526264996 +1018 72 0 days 00:00:00.482786213 +1018 73 0 days 00:00:00.368607166 +1018 74 0 days 00:00:00.268886866 +1018 75 0 days 00:00:00.626240260 +1018 76 0 days 00:00:00.364289660 +1018 77 0 days 00:00:01.017516104 +1018 78 0 days 00:00:00.642737260 +1018 79 0 days 00:00:00.895247580 +1018 80 0 days 00:00:00.480606626 +1018 81 0 days 00:00:01.321122016 +1018 82 0 days 00:00:00.625391786 +1018 83 0 days 00:00:00.352512660 +1018 84 0 days 00:00:00.280861006 +1018 85 0 days 00:00:00.618428420 +1018 86 0 days 00:00:00.538654340 +1018 87 0 days 00:00:00.522759006 +1018 88 0 days 00:00:01.176722820 +1018 89 0 days 00:00:00.468774066 +1018 90 0 days 00:00:01.117338983 +1018 91 0 days 00:00:00.860966793 +1018 92 0 days 00:00:00.972386193 +1018 93 0 days 00:00:00.273490166 +1018 94 0 days 00:00:00.939159313 +1018 95 0 days 00:00:01.210085580 +1018 96 0 days 00:00:00.647782486 +1018 97 0 days 00:00:00.312778233 +1018 98 0 days 00:00:01.147990513 +1018 99 0 days 00:00:00.604814246 +1018 100 0 days 00:00:00.311933933 +1019 1 0 days 00:00:00.495691233 +1019 2 0 days 00:00:00.280850132 +1019 3 0 days 00:00:00.654194380 +1019 4 0 days 00:00:00.192110126 +1019 5 0 days 00:00:00.422241773 +1019 6 0 days 00:00:00.596025593 +1019 7 0 days 00:00:00.416708773 +1019 8 0 days 00:00:00.186666760 +1019 9 0 days 00:00:00.258113060 +1019 10 0 days 00:00:00.251745206 +1019 11 0 days 00:00:00.655730916 +1019 12 0 days 00:00:00.312906593 +1019 13 0 days 00:00:00.441175213 +1019 14 0 days 00:00:00.270382146 +1019 15 0 days 00:00:00.266827873 +1019 16 0 days 00:00:00.601709846 +1019 17 0 days 00:00:00.551767800 +1019 18 0 days 00:00:00.423161956 +1019 19 0 days 00:00:00.197466146 +1019 20 0 days 00:00:00.174637100 +1019 21 0 days 00:00:00.456416933 +1019 22 0 days 00:00:00.183868180 +1019 23 0 days 00:00:00.187105480 +1019 24 0 days 00:00:00.187237880 +1019 25 0 days 00:00:00.661878920 +1019 26 0 days 00:00:00.165323866 +1019 27 0 days 00:00:00.388053612 +1019 28 0 days 00:00:00.711984344 +1019 29 0 days 00:00:00.583435086 +1019 30 0 days 00:00:00.269083406 +1019 31 0 days 00:00:00.249414866 +1019 32 0 days 00:00:00.562533720 +1019 33 0 days 00:00:00.186964346 +1019 34 0 days 00:00:00.208552940 +1019 35 0 days 00:00:00.315378216 +1019 36 0 days 00:00:00.557539360 +1019 37 0 days 00:00:00.275463406 +1019 38 0 days 00:00:00.332901500 +1019 39 0 days 00:00:00.263904406 +1019 40 0 days 00:00:00.189420813 +1019 41 0 days 00:00:00.165109473 +1019 42 0 days 00:00:00.187967933 +1019 43 0 days 00:00:00.602953280 +1019 44 0 days 00:00:00.151750200 +1019 45 0 days 00:00:00.497358733 +1019 46 0 days 00:00:00.471326426 +1019 47 0 days 00:00:00.192586640 +1019 48 0 days 00:00:00.621002753 +1019 49 0 days 00:00:00.192844840 +1019 50 0 days 00:00:00.379455390 +1019 51 0 days 00:00:00.173284180 +1019 52 0 days 00:00:00.186047133 +1019 53 0 days 00:00:00.188805193 +1019 54 0 days 00:00:00.383999853 +1019 55 0 days 00:00:00.262940073 +1019 56 0 days 00:00:00.604249153 +1019 57 0 days 00:00:00.183479906 +1019 58 0 days 00:00:00.208750006 +1019 59 0 days 00:00:00.289126153 +1019 60 0 days 00:00:00.290231073 +1019 61 0 days 00:00:00.296867313 +1019 62 0 days 00:00:00.190984073 +1019 63 0 days 00:00:00.193853906 +1019 64 0 days 00:00:00.198521013 +1019 65 0 days 00:00:00.368667033 +1019 66 0 days 00:00:00.261764320 +1019 67 0 days 00:00:00.603574930 +1019 68 0 days 00:00:00.341596600 +1019 69 0 days 00:00:00.338160653 +1019 70 0 days 00:00:00.519933820 +1019 71 0 days 00:00:00.378000956 +1019 72 0 days 00:00:00.287701500 +1019 73 0 days 00:00:00.238242992 +1019 74 0 days 00:00:00.162126206 +1019 75 0 days 00:00:00.466308633 +1019 76 0 days 00:00:00.206588926 +1019 77 0 days 00:00:00.328958252 +1019 78 0 days 00:00:00.189364566 +1019 79 0 days 00:00:00.551090820 +1019 80 0 days 00:00:00.238803793 +1019 81 0 days 00:00:00.281793760 +1019 82 0 days 00:00:00.607988020 +1019 83 0 days 00:00:00.179760313 +1019 84 0 days 00:00:00.183876213 +1019 85 0 days 00:00:00.288544100 +1019 86 0 days 00:00:00.519791440 +1019 87 0 days 00:00:00.340401453 +1019 88 0 days 00:00:00.183154300 +1019 89 0 days 00:00:00.721065676 +1019 90 0 days 00:00:00.109497420 +1019 91 0 days 00:00:00.338230180 +1019 92 0 days 00:00:00.340672423 +1019 93 0 days 00:00:00.183320693 +1019 94 0 days 00:00:00.157911793 +1019 95 0 days 00:00:00.182529306 +1019 96 0 days 00:00:00.610793313 +1019 97 0 days 00:00:00.200240013 +1019 98 0 days 00:00:00.507938520 +1019 99 0 days 00:00:00.338428920 +1019 100 0 days 00:00:00.157333540 +1020 1 0 days 00:00:00.157597113 +1020 2 0 days 00:00:00.093338186 +1020 3 0 days 00:00:00.102951253 +1020 4 0 days 00:00:00.089789946 +1020 5 0 days 00:00:00.102360906 +1020 6 0 days 00:00:00.089915153 +1020 7 0 days 00:00:00.107271406 +1020 8 0 days 00:00:00.096838633 +1020 9 0 days 00:00:00.092777480 +1020 10 0 days 00:00:00.101429586 +1020 11 0 days 00:00:00.112791300 +1020 12 0 days 00:00:00.123263653 +1020 13 0 days 00:00:00.101633175 +1020 14 0 days 00:00:00.122431166 +1020 15 0 days 00:00:00.094309893 +1020 16 0 days 00:00:00.132393613 +1020 17 0 days 00:00:00.120746886 +1020 18 0 days 00:00:00.122403140 +1020 19 0 days 00:00:00.092912066 +1020 20 0 days 00:00:00.122865173 +1020 21 0 days 00:00:00.116140997 +1020 22 0 days 00:00:00.088525906 +1020 23 0 days 00:00:00.093161253 +1020 24 0 days 00:00:00.102756780 +1020 25 0 days 00:00:00.110620420 +1020 26 0 days 00:00:00.116746960 +1020 27 0 days 00:00:00.092731093 +1020 28 0 days 00:00:00.125369813 +1020 29 0 days 00:00:00.102219113 +1020 30 0 days 00:00:00.145275764 +1020 31 0 days 00:00:00.103973320 +1020 32 0 days 00:00:00.124112000 +1020 33 0 days 00:00:00.103742786 +1020 34 0 days 00:00:00.131527846 +1020 35 0 days 00:00:00.130032020 +1020 36 0 days 00:00:00.108573633 +1020 37 0 days 00:00:00.091908833 +1020 38 0 days 00:00:00.101372020 +1020 39 0 days 00:00:00.093529306 +1020 40 0 days 00:00:00.100730233 +1020 41 0 days 00:00:00.134865533 +1020 42 0 days 00:00:00.099758220 +1020 43 0 days 00:00:00.102326446 +1020 44 0 days 00:00:00.126025808 +1020 45 0 days 00:00:00.103239660 +1020 46 0 days 00:00:00.090441626 +1020 47 0 days 00:00:00.104422560 +1020 48 0 days 00:00:00.116728680 +1020 49 0 days 00:00:00.089432626 +1020 50 0 days 00:00:00.124568906 +1020 51 0 days 00:00:00.091896260 +1020 52 0 days 00:00:00.089706093 +1020 53 0 days 00:00:00.099005720 +1020 54 0 days 00:00:00.121345193 +1020 55 0 days 00:00:00.122376366 +1020 56 0 days 00:00:00.105430286 +1020 57 0 days 00:00:00.089869700 +1020 58 0 days 00:00:00.091849493 +1020 59 0 days 00:00:00.123269680 +1020 60 0 days 00:00:00.101393593 +1020 61 0 days 00:00:00.141757933 +1020 62 0 days 00:00:00.128371153 +1020 63 0 days 00:00:00.112713925 +1020 64 0 days 00:00:00.122516700 +1020 65 0 days 00:00:00.123681760 +1020 66 0 days 00:00:00.100889080 +1020 67 0 days 00:00:00.104080830 +1020 68 0 days 00:00:00.121851973 +1020 69 0 days 00:00:00.095964653 +1020 70 0 days 00:00:00.100331953 +1020 71 0 days 00:00:00.092453373 +1020 72 0 days 00:00:00.099583793 +1020 73 0 days 00:00:00.093453260 +1020 74 0 days 00:00:00.090592473 +1020 75 0 days 00:00:00.103399466 +1020 76 0 days 00:00:00.103114773 +1020 77 0 days 00:00:00.105092220 +1020 78 0 days 00:00:00.091831646 +1020 79 0 days 00:00:00.101326453 +1020 80 0 days 00:00:00.108497546 +1020 81 0 days 00:00:00.103202386 +1020 82 0 days 00:00:00.101364193 +1020 83 0 days 00:00:00.122785913 +1020 84 0 days 00:00:00.126469726 +1020 85 0 days 00:00:00.116129846 +1020 86 0 days 00:00:00.129181406 +1020 87 0 days 00:00:00.092643460 +1020 88 0 days 00:00:00.091538406 +1020 89 0 days 00:00:00.122242500 +1020 90 0 days 00:00:00.098029206 +1020 91 0 days 00:00:00.091179473 +1020 92 0 days 00:00:00.090885826 +1020 93 0 days 00:00:00.129465906 +1020 94 0 days 00:00:00.115337986 +1020 95 0 days 00:00:00.132058513 +1020 96 0 days 00:00:00.103422540 +1020 97 0 days 00:00:00.101058953 +1020 98 0 days 00:00:00.124994580 +1020 99 0 days 00:00:00.093551346 +1020 100 0 days 00:00:00.126972106 +1021 1 0 days 00:00:00.126445986 +1021 2 0 days 00:00:00.113297520 +1021 3 0 days 00:00:00.158433386 +1021 4 0 days 00:00:00.094349213 +1021 5 0 days 00:00:00.093800366 +1021 6 0 days 00:00:00.094366193 +1021 7 0 days 00:00:00.129187520 +1021 8 0 days 00:00:00.108660728 +1021 9 0 days 00:00:00.122883368 +1021 10 0 days 00:00:00.129792200 +1021 11 0 days 00:00:00.139137893 +1021 12 0 days 00:00:00.093427960 +1021 13 0 days 00:00:00.124785380 +1021 14 0 days 00:00:00.132765745 +1021 15 0 days 00:00:00.093492680 +1021 16 0 days 00:00:00.140043420 +1021 17 0 days 00:00:00.103499066 +1021 18 0 days 00:00:00.136813453 +1021 19 0 days 00:00:00.131137813 +1021 20 0 days 00:00:00.107007870 +1021 21 0 days 00:00:00.133772373 +1021 22 0 days 00:00:00.119640085 +1021 23 0 days 00:00:00.129247560 +1021 24 0 days 00:00:00.124140952 +1021 25 0 days 00:00:00.128564240 +1021 26 0 days 00:00:00.161873751 +1021 27 0 days 00:00:00.091895640 +1021 28 0 days 00:00:00.123376988 +1021 29 0 days 00:00:00.109923886 +1021 30 0 days 00:00:00.129785693 +1021 31 0 days 00:00:00.093947626 +1021 32 0 days 00:00:00.093390100 +1021 33 0 days 00:00:00.106939126 +1021 34 0 days 00:00:00.117763537 +1021 35 0 days 00:00:00.094511286 +1021 36 0 days 00:00:00.124776924 +1021 37 0 days 00:00:00.122265833 +1021 38 0 days 00:00:00.129262213 +1021 39 0 days 00:00:00.156065123 +1021 40 0 days 00:00:00.091216320 +1021 41 0 days 00:00:00.103422993 +1021 42 0 days 00:00:00.100720226 +1021 43 0 days 00:00:00.119253350 +1021 44 0 days 00:00:00.103599620 +1021 45 0 days 00:00:00.095465860 +1021 46 0 days 00:00:00.117561924 +1021 47 0 days 00:00:00.107386693 +1021 48 0 days 00:00:00.093915753 +1021 49 0 days 00:00:00.104909106 +1021 50 0 days 00:00:00.121750000 +1021 51 0 days 00:00:00.093507013 +1021 52 0 days 00:00:00.119981975 +1021 53 0 days 00:00:00.113958640 +1021 54 0 days 00:00:00.114387890 +1021 55 0 days 00:00:00.133819432 +1021 56 0 days 00:00:00.124456326 +1021 57 0 days 00:00:00.093044406 +1021 58 0 days 00:00:00.105085215 +1021 59 0 days 00:00:00.113884226 +1021 60 0 days 00:00:00.158516483 +1021 61 0 days 00:00:00.111946740 +1021 62 0 days 00:00:00.093956280 +1021 63 0 days 00:00:00.104968513 +1021 64 0 days 00:00:00.143185550 +1021 65 0 days 00:00:00.100700340 +1021 66 0 days 00:00:00.133436366 +1021 67 0 days 00:00:00.123665696 +1021 68 0 days 00:00:00.142787780 +1021 69 0 days 00:00:00.093978186 +1021 70 0 days 00:00:00.105031266 +1021 71 0 days 00:00:00.096294760 +1021 72 0 days 00:00:00.093944000 +1021 73 0 days 00:00:00.093334573 +1021 74 0 days 00:00:00.105884113 +1021 75 0 days 00:00:00.107360826 +1021 76 0 days 00:00:00.181063002 +1021 77 0 days 00:00:00.165484537 +1021 78 0 days 00:00:00.093915406 +1021 79 0 days 00:00:00.132924086 +1021 80 0 days 00:00:00.132581853 +1021 81 0 days 00:00:00.155560390 +1021 82 0 days 00:00:00.106149793 +1021 83 0 days 00:00:00.113377733 +1021 84 0 days 00:00:00.098759773 +1021 85 0 days 00:00:00.109613506 +1021 86 0 days 00:00:00.107194433 +1021 87 0 days 00:00:00.107630313 +1021 88 0 days 00:00:00.116769620 +1021 89 0 days 00:00:00.132314020 +1021 90 0 days 00:00:00.108413886 +1021 91 0 days 00:00:00.103091386 +1021 92 0 days 00:00:00.136179337 +1021 93 0 days 00:00:00.104933700 +1021 94 0 days 00:00:00.135619780 +1021 95 0 days 00:00:00.095016293 +1021 96 0 days 00:00:00.113241000 +1021 97 0 days 00:00:00.109137913 +1021 98 0 days 00:00:00.173379672 +1021 99 0 days 00:00:00.110403580 +1021 100 0 days 00:00:00.106786040 +1022 1 0 days 00:00:00.059679866 +1022 2 0 days 00:00:00.073914960 +1022 3 0 days 00:00:00.075241253 +1022 4 0 days 00:00:00.058120940 +1022 5 0 days 00:00:00.076177626 +1022 6 0 days 00:00:00.072061220 +1022 7 0 days 00:00:00.057201620 +1022 8 0 days 00:00:00.054762386 +1022 9 0 days 00:00:00.056673500 +1022 10 0 days 00:00:00.073869140 +1022 11 0 days 00:00:00.053545780 +1022 12 0 days 00:00:00.066629060 +1022 13 0 days 00:00:00.074270213 +1022 14 0 days 00:00:00.061403772 +1022 15 0 days 00:00:00.056527006 +1022 16 0 days 00:00:00.073748986 +1022 17 0 days 00:00:00.062365540 +1022 18 0 days 00:00:00.065153600 +1022 19 0 days 00:00:00.057312713 +1022 20 0 days 00:00:00.057966840 +1022 21 0 days 00:00:00.052614186 +1022 22 0 days 00:00:00.071174346 +1022 23 0 days 00:00:00.061654693 +1022 24 0 days 00:00:00.072039593 +1022 25 0 days 00:00:00.059424446 +1022 26 0 days 00:00:00.061738473 +1022 27 0 days 00:00:00.077119400 +1022 28 0 days 00:00:00.072735553 +1022 29 0 days 00:00:00.068189928 +1022 30 0 days 00:00:00.078898020 +1022 31 0 days 00:00:00.058783973 +1022 32 0 days 00:00:00.073728586 +1022 33 0 days 00:00:00.073202633 +1022 34 0 days 00:00:00.058249486 +1022 35 0 days 00:00:00.052956573 +1022 36 0 days 00:00:00.052093433 +1022 37 0 days 00:00:00.070360360 +1022 38 0 days 00:00:00.051737620 +1022 39 0 days 00:00:00.086230352 +1022 40 0 days 00:00:00.057149046 +1022 41 0 days 00:00:00.052950086 +1022 42 0 days 00:00:00.058685240 +1022 43 0 days 00:00:00.073791986 +1022 44 0 days 00:00:00.053587546 +1022 45 0 days 00:00:00.055800840 +1022 46 0 days 00:00:00.071935966 +1022 47 0 days 00:00:00.058984006 +1022 48 0 days 00:00:00.058938840 +1022 49 0 days 00:00:00.073685652 +1022 50 0 days 00:00:00.058645473 +1022 51 0 days 00:00:00.071059666 +1022 52 0 days 00:00:00.052748873 +1022 53 0 days 00:00:00.077012380 +1022 54 0 days 00:00:00.054027573 +1022 55 0 days 00:00:00.073015540 +1022 56 0 days 00:00:00.054034933 +1022 57 0 days 00:00:00.062872993 +1022 58 0 days 00:00:00.052820946 +1022 59 0 days 00:00:00.059966660 +1022 60 0 days 00:00:00.075989093 +1022 61 0 days 00:00:00.052572626 +1022 62 0 days 00:00:00.076096380 +1022 63 0 days 00:00:00.054834573 +1022 64 0 days 00:00:00.052621946 +1022 65 0 days 00:00:00.053843740 +1022 66 0 days 00:00:00.055615333 +1022 67 0 days 00:00:00.059902593 +1022 68 0 days 00:00:00.072800086 +1022 69 0 days 00:00:00.057135760 +1022 70 0 days 00:00:00.071970533 +1022 71 0 days 00:00:00.060509146 +1022 72 0 days 00:00:00.058359740 +1022 73 0 days 00:00:00.052734820 +1022 74 0 days 00:00:00.052612960 +1022 75 0 days 00:00:00.069233926 +1022 76 0 days 00:00:00.066266455 +1022 77 0 days 00:00:00.054595193 +1022 78 0 days 00:00:00.052018453 +1022 79 0 days 00:00:00.074506566 +1022 80 0 days 00:00:00.055947593 +1022 81 0 days 00:00:00.055596053 +1022 82 0 days 00:00:00.053207386 +1022 83 0 days 00:00:00.075165586 +1022 84 0 days 00:00:00.061134273 +1022 85 0 days 00:00:00.072768013 +1022 86 0 days 00:00:00.052932673 +1022 87 0 days 00:00:00.063312513 +1022 88 0 days 00:00:00.058889313 +1022 89 0 days 00:00:00.061807053 +1022 90 0 days 00:00:00.071951166 +1022 91 0 days 00:00:00.084036395 +1022 92 0 days 00:00:00.076126040 +1022 93 0 days 00:00:00.060401520 +1022 94 0 days 00:00:00.072482740 +1022 95 0 days 00:00:00.059787646 +1022 96 0 days 00:00:00.074420660 +1022 97 0 days 00:00:00.076565266 +1022 98 0 days 00:00:00.060903653 +1022 99 0 days 00:00:00.060660626 +1022 100 0 days 00:00:00.077748780 +1023 1 0 days 00:00:00.077627813 +1023 2 0 days 00:00:00.065553553 +1023 3 0 days 00:00:00.076963500 +1023 4 0 days 00:00:00.087244725 +1023 5 0 days 00:00:00.064633180 +1023 6 0 days 00:00:00.061249220 +1023 7 0 days 00:00:00.079065393 +1023 8 0 days 00:00:00.074885380 +1023 9 0 days 00:00:00.058319680 +1023 10 0 days 00:00:00.091919362 +1023 11 0 days 00:00:00.079246230 +1023 12 0 days 00:00:00.081044770 +1023 13 0 days 00:00:00.087310273 +1023 14 0 days 00:00:00.059605640 +1023 15 0 days 00:00:00.071458426 +1023 16 0 days 00:00:00.059152833 +1023 17 0 days 00:00:00.074157606 +1023 18 0 days 00:00:00.061887980 +1023 19 0 days 00:00:00.064216373 +1023 20 0 days 00:00:00.078840170 +1023 21 0 days 00:00:00.058545646 +1023 22 0 days 00:00:00.071218913 +1023 23 0 days 00:00:00.060327260 +1023 24 0 days 00:00:00.071967052 +1023 25 0 days 00:00:00.058382580 +1023 26 0 days 00:00:00.053040926 +1023 27 0 days 00:00:00.058204320 +1023 28 0 days 00:00:00.058798473 +1023 29 0 days 00:00:00.058028826 +1023 30 0 days 00:00:00.081274053 +1023 31 0 days 00:00:00.057357126 +1023 32 0 days 00:00:00.059163566 +1023 33 0 days 00:00:00.058636533 +1023 34 0 days 00:00:00.052783653 +1023 35 0 days 00:00:00.052423873 +1023 36 0 days 00:00:00.074807946 +1023 37 0 days 00:00:00.058290193 +1023 38 0 days 00:00:00.058360120 +1023 39 0 days 00:00:00.057858820 +1023 40 0 days 00:00:00.058998960 +1023 41 0 days 00:00:00.053333306 +1023 42 0 days 00:00:00.057666980 +1023 43 0 days 00:00:00.071663566 +1023 44 0 days 00:00:00.067397672 +1023 45 0 days 00:00:00.058790980 +1023 46 0 days 00:00:00.052393260 +1023 47 0 days 00:00:00.075641786 +1023 48 0 days 00:00:00.075516473 +1023 49 0 days 00:00:00.090128477 +1023 50 0 days 00:00:00.075126657 +1023 51 0 days 00:00:00.054274840 +1023 52 0 days 00:00:00.064833185 +1023 53 0 days 00:00:00.053395233 +1023 54 0 days 00:00:00.057551265 +1023 55 0 days 00:00:00.052373166 +1023 56 0 days 00:00:00.074293486 +1023 57 0 days 00:00:00.061509693 +1023 58 0 days 00:00:00.067435940 +1023 59 0 days 00:00:00.055591033 +1023 60 0 days 00:00:00.085804396 +1023 61 0 days 00:00:00.061164440 +1023 62 0 days 00:00:00.053253573 +1023 63 0 days 00:00:00.063989976 +1023 64 0 days 00:00:00.087427160 +1023 65 0 days 00:00:00.081132420 +1023 66 0 days 00:00:00.086103355 +1023 67 0 days 00:00:00.074403596 +1023 68 0 days 00:00:00.060991566 +1023 69 0 days 00:00:00.057313253 +1023 70 0 days 00:00:00.058520120 +1023 71 0 days 00:00:00.070770293 +1023 72 0 days 00:00:00.059574993 +1023 73 0 days 00:00:00.061315566 +1023 74 0 days 00:00:00.069812082 +1023 75 0 days 00:00:00.059909433 +1023 76 0 days 00:00:00.051772293 +1023 77 0 days 00:00:00.068642112 +1023 78 0 days 00:00:00.064285573 +1023 79 0 days 00:00:00.062503706 +1023 80 0 days 00:00:00.059997286 +1023 81 0 days 00:00:00.079336293 +1023 82 0 days 00:00:00.086482896 +1023 83 0 days 00:00:00.073230060 +1023 84 0 days 00:00:00.062490253 +1023 85 0 days 00:00:00.083680364 +1023 86 0 days 00:00:00.053866166 +1023 87 0 days 00:00:00.072447766 +1023 88 0 days 00:00:00.053233940 +1023 89 0 days 00:00:00.058950653 +1023 90 0 days 00:00:00.091550950 +1023 91 0 days 00:00:00.077307353 +1023 92 0 days 00:00:00.054805160 +1023 93 0 days 00:00:00.060553500 +1023 94 0 days 00:00:00.054136226 +1023 95 0 days 00:00:00.088917245 +1023 96 0 days 00:00:00.053306093 +1023 97 0 days 00:00:00.074155686 +1023 98 0 days 00:00:00.077335146 +1023 99 0 days 00:00:00.072809463 +1023 100 0 days 00:00:00.054667180 +1024 1 0 days 00:00:00.135022196 +1024 2 0 days 00:00:00.110463315 +1024 3 0 days 00:00:00.135373185 +1024 4 0 days 00:00:00.090625620 +1024 5 0 days 00:00:00.154889633 +1024 6 0 days 00:00:00.122296660 +1024 7 0 days 00:00:00.119465684 +1024 8 0 days 00:00:00.161068118 +1024 9 0 days 00:00:00.100197533 +1024 10 0 days 00:00:00.091831133 +1024 11 0 days 00:00:00.102718446 +1024 12 0 days 00:00:00.134547513 +1024 13 0 days 00:00:00.128864648 +1024 14 0 days 00:00:00.100591526 +1024 15 0 days 00:00:00.098371126 +1024 16 0 days 00:00:00.123028760 +1024 17 0 days 00:00:00.142870240 +1024 18 0 days 00:00:00.093935406 +1024 19 0 days 00:00:00.123532472 +1024 20 0 days 00:00:00.114276335 +1024 21 0 days 00:00:00.093300173 +1024 22 0 days 00:00:00.161529211 +1024 23 0 days 00:00:00.142495060 +1024 24 0 days 00:00:00.114705397 +1024 25 0 days 00:00:00.091531113 +1024 26 0 days 00:00:00.117034084 +1024 27 0 days 00:00:00.126011953 +1024 28 0 days 00:00:00.089106253 +1024 29 0 days 00:00:00.123366513 +1024 30 0 days 00:00:00.091508633 +1024 31 0 days 00:00:00.118563780 +1024 32 0 days 00:00:00.102042366 +1024 33 0 days 00:00:00.142619973 +1024 34 0 days 00:00:00.099468286 +1024 35 0 days 00:00:00.129220966 +1024 36 0 days 00:00:00.090231100 +1024 37 0 days 00:00:00.106724993 +1024 38 0 days 00:00:00.142097600 +1024 39 0 days 00:00:00.100589920 +1024 40 0 days 00:00:00.121775888 +1024 41 0 days 00:00:00.123698626 +1024 42 0 days 00:00:00.131885286 +1024 43 0 days 00:00:00.130821157 +1024 44 0 days 00:00:00.102643246 +1024 45 0 days 00:00:00.100393266 +1024 46 0 days 00:00:00.091279353 +1024 47 0 days 00:00:00.127166166 +1024 48 0 days 00:00:00.122667912 +1024 49 0 days 00:00:00.109926780 +1024 50 0 days 00:00:00.112443190 +1024 51 0 days 00:00:00.090362146 +1024 52 0 days 00:00:00.102870793 +1024 53 0 days 00:00:00.119145236 +1024 54 0 days 00:00:00.093692706 +1024 55 0 days 00:00:00.127829500 +1024 56 0 days 00:00:00.110970073 +1024 57 0 days 00:00:00.121633660 +1024 58 0 days 00:00:00.103633386 +1024 59 0 days 00:00:00.148742480 +1024 60 0 days 00:00:00.102210493 +1024 61 0 days 00:00:00.119584972 +1024 62 0 days 00:00:00.126014700 +1024 63 0 days 00:00:00.109995048 +1024 64 0 days 00:00:00.093961120 +1024 65 0 days 00:00:00.103557860 +1024 66 0 days 00:00:00.123649056 +1024 67 0 days 00:00:00.090998320 +1024 68 0 days 00:00:00.094426120 +1024 69 0 days 00:00:00.159726605 +1024 70 0 days 00:00:00.102114353 +1024 71 0 days 00:00:00.091313220 +1024 72 0 days 00:00:00.132935585 +1024 73 0 days 00:00:00.164287233 +1024 74 0 days 00:00:00.100631646 +1024 75 0 days 00:00:00.111498168 +1024 76 0 days 00:00:00.156552204 +1024 77 0 days 00:00:00.102329166 +1024 78 0 days 00:00:00.154325096 +1024 79 0 days 00:00:00.093478460 +1024 80 0 days 00:00:00.148376840 +1024 81 0 days 00:00:00.092861720 +1024 82 0 days 00:00:00.095379566 +1024 83 0 days 00:00:00.119668372 +1024 84 0 days 00:00:00.091747740 +1024 85 0 days 00:00:00.131966647 +1024 86 0 days 00:00:00.127580940 +1024 87 0 days 00:00:00.105882660 +1024 88 0 days 00:00:00.091898866 +1024 89 0 days 00:00:00.127370733 +1024 90 0 days 00:00:00.138269125 +1024 91 0 days 00:00:00.103520553 +1024 92 0 days 00:00:00.104083313 +1024 93 0 days 00:00:00.144666946 +1024 94 0 days 00:00:00.103348240 +1024 95 0 days 00:00:00.124079766 +1024 96 0 days 00:00:00.099405680 +1024 97 0 days 00:00:00.118278641 +1024 98 0 days 00:00:00.151339043 +1024 99 0 days 00:00:00.156253536 +1024 100 0 days 00:00:00.125899106 +1025 1 0 days 00:00:00.070613853 +1025 2 0 days 00:00:00.069483618 +1025 3 0 days 00:00:00.074166854 +1025 4 0 days 00:00:00.059499813 +1025 5 0 days 00:00:00.059317806 +1025 6 0 days 00:00:00.093933185 +1025 7 0 days 00:00:00.079134680 +1025 8 0 days 00:00:00.060138360 +1025 9 0 days 00:00:00.055202526 +1025 10 0 days 00:00:00.053466006 +1025 11 0 days 00:00:00.060179646 +1025 12 0 days 00:00:00.071411228 +1025 13 0 days 00:00:00.071288993 +1025 14 0 days 00:00:00.073064660 +1025 15 0 days 00:00:00.060470060 +1025 16 0 days 00:00:00.061942053 +1025 17 0 days 00:00:00.061184072 +1025 18 0 days 00:00:00.054196200 +1025 19 0 days 00:00:00.064578506 +1025 20 0 days 00:00:00.058931453 +1025 21 0 days 00:00:00.075359707 +1025 22 0 days 00:00:00.053215993 +1025 23 0 days 00:00:00.055479320 +1025 24 0 days 00:00:00.055365193 +1025 25 0 days 00:00:00.068293180 +1025 26 0 days 00:00:00.072650753 +1025 27 0 days 00:00:00.059073286 +1025 28 0 days 00:00:00.074108300 +1025 29 0 days 00:00:00.071815480 +1025 30 0 days 00:00:00.060179673 +1025 31 0 days 00:00:00.090551051 +1025 32 0 days 00:00:00.070475436 +1025 33 0 days 00:00:00.058715646 +1025 34 0 days 00:00:00.074777526 +1025 35 0 days 00:00:00.054462866 +1025 36 0 days 00:00:00.074739811 +1025 37 0 days 00:00:00.060338206 +1025 38 0 days 00:00:00.065892097 +1025 39 0 days 00:00:00.063631806 +1025 40 0 days 00:00:00.055700240 +1025 41 0 days 00:00:00.053858413 +1025 42 0 days 00:00:00.092455073 +1025 43 0 days 00:00:00.053811926 +1025 44 0 days 00:00:00.072837500 +1025 45 0 days 00:00:00.092933402 +1025 46 0 days 00:00:00.085446652 +1025 47 0 days 00:00:00.062142213 +1025 48 0 days 00:00:00.072956855 +1025 49 0 days 00:00:00.072731366 +1025 50 0 days 00:00:00.086724460 +1025 51 0 days 00:00:00.094858784 +1025 52 0 days 00:00:00.087174736 +1025 53 0 days 00:00:00.058552960 +1025 54 0 days 00:00:00.062845526 +1025 55 0 days 00:00:00.082780120 +1025 56 0 days 00:00:00.083231806 +1025 57 0 days 00:00:00.084273284 +1025 58 0 days 00:00:00.074195266 +1025 59 0 days 00:00:00.056639806 +1025 60 0 days 00:00:00.057933640 +1025 61 0 days 00:00:00.075279627 +1025 62 0 days 00:00:00.078438693 +1025 63 0 days 00:00:00.053702233 +1025 64 0 days 00:00:00.075836400 +1025 65 0 days 00:00:00.066908537 +1025 66 0 days 00:00:00.058530546 +1025 67 0 days 00:00:00.070238553 +1025 68 0 days 00:00:00.062801906 +1025 69 0 days 00:00:00.059021826 +1025 70 0 days 00:00:00.064466035 +1025 71 0 days 00:00:00.073957506 +1025 72 0 days 00:00:00.057101686 +1025 73 0 days 00:00:00.057124400 +1025 74 0 days 00:00:00.054026926 +1025 75 0 days 00:00:00.068774520 +1025 76 0 days 00:00:00.056042440 +1025 77 0 days 00:00:00.084208008 +1025 78 0 days 00:00:00.069608880 +1025 79 0 days 00:00:00.068080713 +1025 80 0 days 00:00:00.056768186 +1025 81 0 days 00:00:00.056811073 +1025 82 0 days 00:00:00.057334246 +1025 83 0 days 00:00:00.057587986 +1025 84 0 days 00:00:00.066328686 +1025 85 0 days 00:00:00.059937726 +1025 86 0 days 00:00:00.069419473 +1025 87 0 days 00:00:00.067785167 +1025 88 0 days 00:00:00.070978871 +1025 89 0 days 00:00:00.074835835 +1025 90 0 days 00:00:00.053979693 +1025 91 0 days 00:00:00.057183086 +1025 92 0 days 00:00:00.077674220 +1025 93 0 days 00:00:00.051606813 +1025 94 0 days 00:00:00.070844260 +1025 95 0 days 00:00:00.052433146 +1025 96 0 days 00:00:00.058035566 +1025 97 0 days 00:00:00.080657853 +1025 98 0 days 00:00:00.062227266 +1025 99 0 days 00:00:00.061442700 +1025 100 0 days 00:00:00.060459846 +1026 1 0 days 00:00:00.712075790 +1026 2 0 days 00:00:00.187378426 +1026 3 0 days 00:00:00.302172206 +1026 4 0 days 00:00:00.183019546 +1026 5 0 days 00:00:00.204383013 +1026 6 0 days 00:00:00.559363560 +1026 7 0 days 00:00:00.569448680 +1026 8 0 days 00:00:00.183159193 +1026 9 0 days 00:00:00.270825520 +1026 10 0 days 00:00:00.552368046 +1026 11 0 days 00:00:00.184121460 +1026 12 0 days 00:00:00.564663420 +1026 13 0 days 00:00:00.311879193 +1026 14 0 days 00:00:00.559495100 +1026 15 0 days 00:00:00.696726956 +1026 16 0 days 00:00:00.551053153 +1026 17 0 days 00:00:00.554695366 +1026 18 0 days 00:00:00.193793726 +1026 19 0 days 00:00:00.547573920 +1026 20 0 days 00:00:00.305210500 +1026 21 0 days 00:00:00.311191240 +1026 22 0 days 00:00:00.211315806 +1026 23 0 days 00:00:00.183104160 +1026 24 0 days 00:00:00.181645940 +1026 25 0 days 00:00:00.534805346 +1026 26 0 days 00:00:00.189702480 +1026 27 0 days 00:00:00.565238986 +1026 28 0 days 00:00:00.214155013 +1026 29 0 days 00:00:00.197416206 +1026 30 0 days 00:00:00.192241280 +1026 31 0 days 00:00:00.581356473 +1026 32 0 days 00:00:00.196368620 +1026 33 0 days 00:00:00.566681993 +1026 34 0 days 00:00:00.552964233 +1026 35 0 days 00:00:00.307660033 +1026 36 0 days 00:00:00.544475766 +1026 37 0 days 00:00:00.311708586 +1026 38 0 days 00:00:00.557034400 +1026 39 0 days 00:00:00.203049646 +1026 40 0 days 00:00:00.477279675 +1026 41 0 days 00:00:00.651468610 +1026 42 0 days 00:00:00.567463313 +1026 43 0 days 00:00:00.311492866 +1026 44 0 days 00:00:00.567718160 +1026 45 0 days 00:00:00.334690740 +1026 46 0 days 00:00:00.191502666 +1026 47 0 days 00:00:00.310927986 +1026 48 0 days 00:00:00.645379405 +1026 49 0 days 00:00:00.219958293 +1026 50 0 days 00:00:00.220505560 +1026 51 0 days 00:00:00.234308432 +1026 52 0 days 00:00:00.552191820 +1026 53 0 days 00:00:00.630402990 +1026 54 0 days 00:00:00.296878553 +1026 55 0 days 00:00:00.239726740 +1026 56 0 days 00:00:00.189199626 +1026 57 0 days 00:00:00.203004600 +1026 58 0 days 00:00:00.177546186 +1026 59 0 days 00:00:00.650332260 +1026 60 0 days 00:00:00.316939540 +1026 61 0 days 00:00:00.354139240 +1026 62 0 days 00:00:00.544157453 +1026 63 0 days 00:00:00.381351610 +1026 64 0 days 00:00:00.747528317 +1026 65 0 days 00:00:00.193290853 +1026 66 0 days 00:00:00.559329760 +1026 67 0 days 00:00:00.615595060 +1026 68 0 days 00:00:00.564032153 +1026 69 0 days 00:00:00.329668753 +1026 70 0 days 00:00:00.141438093 +1026 71 0 days 00:00:00.414167100 +1026 72 0 days 00:00:00.573656273 +1026 73 0 days 00:00:00.299551800 +1026 74 0 days 00:00:00.588864900 +1026 75 0 days 00:00:00.182126253 +1026 76 0 days 00:00:00.184421140 +1026 77 0 days 00:00:00.185904940 +1026 78 0 days 00:00:00.188679680 +1026 79 0 days 00:00:00.553874066 +1026 80 0 days 00:00:00.361229112 +1026 81 0 days 00:00:00.572024820 +1026 82 0 days 00:00:00.535989206 +1026 83 0 days 00:00:00.318386466 +1026 84 0 days 00:00:00.319366786 +1026 85 0 days 00:00:00.310272753 +1026 86 0 days 00:00:00.555763846 +1026 87 0 days 00:00:00.199885286 +1026 88 0 days 00:00:00.563674660 +1026 89 0 days 00:00:00.575845853 +1026 90 0 days 00:00:00.186187026 +1026 91 0 days 00:00:00.299253528 +1026 92 0 days 00:00:00.201880500 +1026 93 0 days 00:00:00.337951453 +1026 94 0 days 00:00:00.315772286 +1026 95 0 days 00:00:00.176092920 +1026 96 0 days 00:00:00.179125686 +1026 97 0 days 00:00:00.308240960 +1026 98 0 days 00:00:00.555929060 +1026 99 0 days 00:00:00.203170186 +1026 100 0 days 00:00:00.206384986 +1027 1 0 days 00:00:00.241436360 +1027 2 0 days 00:00:00.399078416 +1027 3 0 days 00:00:00.195548386 +1027 4 0 days 00:00:00.440537945 +1027 5 0 days 00:00:00.350135886 +1027 6 0 days 00:00:00.407160212 +1027 7 0 days 00:00:00.259562965 +1027 8 0 days 00:00:00.327635333 +1027 9 0 days 00:00:00.374345115 +1027 10 0 days 00:00:00.405262400 +1027 11 0 days 00:00:00.233466035 +1027 12 0 days 00:00:00.625732306 +1027 13 0 days 00:00:00.211861480 +1027 14 0 days 00:00:00.629491153 +1027 15 0 days 00:00:00.247581790 +1027 16 0 days 00:00:00.411332370 +1027 17 0 days 00:00:00.233758450 +1027 18 0 days 00:00:00.227357033 +1027 19 0 days 00:00:00.624941780 +1027 20 0 days 00:00:00.624223320 +1027 21 0 days 00:00:00.198388820 +1027 22 0 days 00:00:00.399349545 +1027 23 0 days 00:00:00.222106990 +1027 24 0 days 00:00:00.200319913 +1027 25 0 days 00:00:00.601544466 +1027 26 0 days 00:00:00.321450820 +1027 27 0 days 00:00:00.205651373 +1027 28 0 days 00:00:00.681492245 +1027 29 0 days 00:00:00.205718353 +1027 30 0 days 00:00:00.353945066 +1027 31 0 days 00:00:00.673336070 +1027 32 0 days 00:00:00.348879413 +1027 33 0 days 00:00:00.207847760 +1027 34 0 days 00:00:00.405906413 +1027 35 0 days 00:00:00.337009286 +1027 36 0 days 00:00:00.750598400 +1027 37 0 days 00:00:00.253897140 +1027 38 0 days 00:00:00.192307986 +1027 39 0 days 00:00:00.347940373 +1027 40 0 days 00:00:00.622500066 +1027 41 0 days 00:00:00.206778846 +1027 42 0 days 00:00:00.614307880 +1027 43 0 days 00:00:00.616146626 +1027 44 0 days 00:00:00.424887726 +1027 45 0 days 00:00:00.205170593 +1027 46 0 days 00:00:00.328274593 +1027 47 0 days 00:00:00.392698372 +1027 48 0 days 00:00:00.200811766 +1027 49 0 days 00:00:00.250009865 +1027 50 0 days 00:00:00.623502533 +1027 51 0 days 00:00:00.433907533 +1027 52 0 days 00:00:00.195564033 +1027 53 0 days 00:00:00.618589100 +1027 54 0 days 00:00:00.243750192 +1027 55 0 days 00:00:00.266473742 +1027 56 0 days 00:00:00.628280500 +1027 57 0 days 00:00:00.449554053 +1027 58 0 days 00:00:00.431239557 +1027 59 0 days 00:00:00.623569280 +1027 60 0 days 00:00:00.673781635 +1027 61 0 days 00:00:00.442837373 +1027 62 0 days 00:00:00.235471668 +1027 63 0 days 00:00:00.202985373 +1027 64 0 days 00:00:00.354879593 +1027 65 0 days 00:00:00.249963060 +1027 66 0 days 00:00:00.204243406 +1027 67 0 days 00:00:00.363465440 +1027 68 0 days 00:00:00.250879186 +1027 69 0 days 00:00:00.346692040 +1027 70 0 days 00:00:00.710749345 +1027 71 0 days 00:00:00.201113453 +1027 72 0 days 00:00:00.207138333 +1027 73 0 days 00:00:00.189286033 +1027 74 0 days 00:00:00.269894243 +1027 75 0 days 00:00:00.204725986 +1027 76 0 days 00:00:00.425158228 +1027 77 0 days 00:00:00.722578456 +1027 78 0 days 00:00:00.311905503 +1027 79 0 days 00:00:00.607994353 +1027 80 0 days 00:00:00.368604195 +1027 81 0 days 00:00:00.675223130 +1027 82 0 days 00:00:00.202656993 +1027 83 0 days 00:00:00.333536326 +1027 84 0 days 00:00:00.635917053 +1027 85 0 days 00:00:00.610519506 +1027 86 0 days 00:00:00.200432966 +1027 87 0 days 00:00:00.196101893 +1027 88 0 days 00:00:00.383138835 +1027 89 0 days 00:00:00.449417722 +1027 90 0 days 00:00:00.314486537 +1027 91 0 days 00:00:00.318827746 +1027 92 0 days 00:00:00.433593631 +1027 93 0 days 00:00:00.414938344 +1027 94 0 days 00:00:00.416232188 +1027 95 0 days 00:00:00.618815466 +1027 96 0 days 00:00:00.342636680 +1027 97 0 days 00:00:00.608253000 +1027 98 0 days 00:00:00.436441123 +1027 99 0 days 00:00:00.187438173 +1027 100 0 days 00:00:00.189729080 +1028 1 0 days 00:00:00.168351446 +1028 2 0 days 00:00:00.295231806 +1028 3 0 days 00:00:00.174240653 +1028 4 0 days 00:00:00.170589280 +1028 5 0 days 00:00:00.103390206 +1028 6 0 days 00:00:00.106947626 +1028 7 0 days 00:00:00.297901713 +1028 8 0 days 00:00:00.303867353 +1028 9 0 days 00:00:00.094839553 +1028 10 0 days 00:00:00.108131566 +1028 11 0 days 00:00:00.289512693 +1028 12 0 days 00:00:00.162206260 +1028 13 0 days 00:00:00.283050146 +1028 14 0 days 00:00:00.166628920 +1028 15 0 days 00:00:00.281432086 +1028 16 0 days 00:00:00.108538686 +1028 17 0 days 00:00:00.104897286 +1028 18 0 days 00:00:00.101553520 +1028 19 0 days 00:00:00.164125453 +1028 20 0 days 00:00:00.159709086 +1028 21 0 days 00:00:00.111155853 +1028 22 0 days 00:00:00.291267840 +1028 23 0 days 00:00:00.287074866 +1028 24 0 days 00:00:00.174661746 +1028 25 0 days 00:00:00.294613966 +1028 26 0 days 00:00:00.281300560 +1028 27 0 days 00:00:00.094318720 +1028 28 0 days 00:00:00.098256053 +1028 29 0 days 00:00:00.167024953 +1028 30 0 days 00:00:00.291441540 +1028 31 0 days 00:00:00.290151780 +1028 32 0 days 00:00:00.166043393 +1028 33 0 days 00:00:00.093651840 +1028 34 0 days 00:00:00.097786853 +1028 35 0 days 00:00:00.292113206 +1028 36 0 days 00:00:00.094636106 +1028 37 0 days 00:00:00.095942313 +1028 38 0 days 00:00:00.297049460 +1028 39 0 days 00:00:00.288250886 +1028 40 0 days 00:00:00.286563080 +1028 41 0 days 00:00:00.105592153 +1028 42 0 days 00:00:00.096194386 +1028 43 0 days 00:00:00.155506513 +1028 44 0 days 00:00:00.096210726 +1028 45 0 days 00:00:00.189150950 +1028 46 0 days 00:00:00.163100566 +1028 47 0 days 00:00:00.160756366 +1028 48 0 days 00:00:00.299651613 +1028 49 0 days 00:00:00.222724300 +1028 50 0 days 00:00:00.158098773 +1028 51 0 days 00:00:00.096259180 +1028 52 0 days 00:00:00.098591713 +1028 53 0 days 00:00:00.164309566 +1028 54 0 days 00:00:00.098724793 +1028 55 0 days 00:00:00.167314800 +1028 56 0 days 00:00:00.287413393 +1028 57 0 days 00:00:00.159588073 +1028 58 0 days 00:00:00.160021306 +1028 59 0 days 00:00:00.096723473 +1028 60 0 days 00:00:00.107342160 +1028 61 0 days 00:00:00.291213820 +1028 62 0 days 00:00:00.096897500 +1028 63 0 days 00:00:00.166552146 +1028 64 0 days 00:00:00.112020093 +1028 65 0 days 00:00:00.212549713 +1028 66 0 days 00:00:00.160027793 +1028 67 0 days 00:00:00.308244786 +1028 68 0 days 00:00:00.099085786 +1028 69 0 days 00:00:00.168107173 +1028 70 0 days 00:00:00.100507340 +1028 71 0 days 00:00:00.329092300 +1028 72 0 days 00:00:00.160853953 +1028 73 0 days 00:00:00.289023220 +1028 74 0 days 00:00:00.101587146 +1028 75 0 days 00:00:00.167326993 +1028 76 0 days 00:00:00.163985713 +1028 77 0 days 00:00:00.298212240 +1028 78 0 days 00:00:00.098830726 +1028 79 0 days 00:00:00.096798206 +1028 80 0 days 00:00:00.162550200 +1028 81 0 days 00:00:00.095681046 +1028 82 0 days 00:00:00.097800953 +1028 83 0 days 00:00:00.289738226 +1028 84 0 days 00:00:00.104363906 +1028 85 0 days 00:00:00.282562386 +1028 86 0 days 00:00:00.103557860 +1028 87 0 days 00:00:00.161325486 +1028 88 0 days 00:00:00.283576806 +1028 89 0 days 00:00:00.160312553 +1028 90 0 days 00:00:00.097747840 +1028 91 0 days 00:00:00.093631566 +1028 92 0 days 00:00:00.273086200 +1028 93 0 days 00:00:00.141513693 +1028 94 0 days 00:00:00.137763060 +1028 95 0 days 00:00:00.220582413 +1028 96 0 days 00:00:00.135116380 +1028 97 0 days 00:00:00.131371160 +1028 98 0 days 00:00:00.077061433 +1028 99 0 days 00:00:00.236245146 +1028 100 0 days 00:00:00.135455300 +1029 1 0 days 00:00:00.161510883 +1029 2 0 days 00:00:00.148077895 +1029 3 0 days 00:00:00.100653217 +1029 4 0 days 00:00:00.254757806 +1029 5 0 days 00:00:00.174893382 +1029 6 0 days 00:00:00.318952972 +1029 7 0 days 00:00:00.301821166 +1029 8 0 days 00:00:00.135904346 +1029 9 0 days 00:00:00.300387648 +1029 10 0 days 00:00:00.082563966 +1029 11 0 days 00:00:00.086688193 +1029 12 0 days 00:00:00.156874820 +1029 13 0 days 00:00:00.159334500 +1029 14 0 days 00:00:00.254457900 +1029 15 0 days 00:00:00.093808166 +1029 16 0 days 00:00:00.101710142 +1029 17 0 days 00:00:00.252988986 +1029 18 0 days 00:00:00.254652720 +1029 19 0 days 00:00:00.161259752 +1029 20 0 days 00:00:00.278509095 +1029 21 0 days 00:00:00.164305413 +1029 22 0 days 00:00:00.272174060 +1029 23 0 days 00:00:00.254296753 +1029 24 0 days 00:00:00.283408355 +1029 25 0 days 00:00:00.088484940 +1029 26 0 days 00:00:00.285653295 +1029 27 0 days 00:00:00.254746286 +1029 28 0 days 00:00:00.088098953 +1029 29 0 days 00:00:00.297021864 +1029 30 0 days 00:00:00.174545965 +1029 31 0 days 00:00:00.162886330 +1029 32 0 days 00:00:00.143735993 +1029 33 0 days 00:00:00.144036913 +1029 34 0 days 00:00:00.086040766 +1029 35 0 days 00:00:00.105813843 +1029 36 0 days 00:00:00.139687306 +1029 37 0 days 00:00:00.080404906 +1029 38 0 days 00:00:00.090866380 +1029 39 0 days 00:00:00.082270393 +1029 40 0 days 00:00:00.139015953 +1029 41 0 days 00:00:00.304730640 +1029 42 0 days 00:00:00.143078240 +1029 43 0 days 00:00:00.142482920 +1029 44 0 days 00:00:00.086422640 +1029 45 0 days 00:00:00.130900040 +1029 46 0 days 00:00:00.081599466 +1029 47 0 days 00:00:00.083885640 +1029 48 0 days 00:00:00.137604166 +1029 49 0 days 00:00:00.257820160 +1029 50 0 days 00:00:00.081454833 +1029 51 0 days 00:00:00.144303593 +1029 52 0 days 00:00:00.098612846 +1029 53 0 days 00:00:00.074791380 +1029 54 0 days 00:00:00.253121040 +1029 55 0 days 00:00:00.135372566 +1029 56 0 days 00:00:00.074644580 +1029 57 0 days 00:00:00.101467800 +1029 58 0 days 00:00:00.092785480 +1029 59 0 days 00:00:00.094759200 +1029 60 0 days 00:00:00.092523900 +1029 61 0 days 00:00:00.093443846 +1029 62 0 days 00:00:00.080114360 +1029 63 0 days 00:00:00.142999046 +1029 64 0 days 00:00:00.133252273 +1029 65 0 days 00:00:00.081840533 +1029 66 0 days 00:00:00.096461826 +1029 67 0 days 00:00:00.296502736 +1029 68 0 days 00:00:00.140090493 +1029 69 0 days 00:00:00.286310000 +1029 70 0 days 00:00:00.162898408 +1029 71 0 days 00:00:00.134820826 +1029 72 0 days 00:00:00.153542468 +1029 73 0 days 00:00:00.137035240 +1029 74 0 days 00:00:00.139560680 +1029 75 0 days 00:00:00.282310285 +1029 76 0 days 00:00:00.087548360 +1029 77 0 days 00:00:00.277733190 +1029 78 0 days 00:00:00.082141606 +1029 79 0 days 00:00:00.248438633 +1029 80 0 days 00:00:00.139087213 +1029 81 0 days 00:00:00.145270346 +1029 82 0 days 00:00:00.246700133 +1029 83 0 days 00:00:00.164032468 +1029 84 0 days 00:00:00.086969253 +1029 85 0 days 00:00:00.106996655 +1029 86 0 days 00:00:00.087555453 +1029 87 0 days 00:00:00.141686146 +1029 88 0 days 00:00:00.102363205 +1029 89 0 days 00:00:00.161461964 +1029 90 0 days 00:00:00.087597260 +1029 91 0 days 00:00:00.144287053 +1029 92 0 days 00:00:00.170450796 +1029 93 0 days 00:00:00.087055466 +1029 94 0 days 00:00:00.136637133 +1029 95 0 days 00:00:00.082396146 +1029 96 0 days 00:00:00.249936906 +1029 97 0 days 00:00:00.250961860 +1029 98 0 days 00:00:00.306069963 +1029 99 0 days 00:00:00.253013640 +1029 100 0 days 00:00:00.081996333 +1030 1 0 days 00:00:00.358351000 +1030 2 0 days 00:00:00.605621560 +1030 3 0 days 00:00:00.568889740 +1030 4 0 days 00:00:00.221354956 +1030 5 0 days 00:00:00.569773793 +1030 6 0 days 00:00:00.318836493 +1030 7 0 days 00:00:00.189091926 +1030 8 0 days 00:00:00.230494150 +1030 9 0 days 00:00:00.188654200 +1030 10 0 days 00:00:00.312359226 +1030 11 0 days 00:00:00.220664212 +1030 12 0 days 00:00:00.447643926 +1030 13 0 days 00:00:00.600264800 +1030 14 0 days 00:00:00.190375580 +1030 15 0 days 00:00:00.603694306 +1030 16 0 days 00:00:00.243217640 +1030 17 0 days 00:00:00.583778066 +1030 18 0 days 00:00:00.592506486 +1030 19 0 days 00:00:00.341151200 +1030 20 0 days 00:00:00.205651893 +1030 21 0 days 00:00:00.224423892 +1030 22 0 days 00:00:00.383214356 +1030 23 0 days 00:00:00.209015653 +1030 24 0 days 00:00:00.768031395 +1030 25 0 days 00:00:00.338603166 +1030 26 0 days 00:00:00.690715316 +1030 27 0 days 00:00:00.611420193 +1030 28 0 days 00:00:00.600022460 +1030 29 0 days 00:00:00.191583040 +1030 30 0 days 00:00:00.227089785 +1030 31 0 days 00:00:00.591027680 +1030 32 0 days 00:00:00.211445493 +1030 33 0 days 00:00:00.660460025 +1030 34 0 days 00:00:00.193662680 +1030 35 0 days 00:00:00.325772240 +1030 36 0 days 00:00:00.582651053 +1030 37 0 days 00:00:00.183580220 +1030 38 0 days 00:00:00.219872704 +1030 39 0 days 00:00:00.376389076 +1030 40 0 days 00:00:00.182723100 +1030 41 0 days 00:00:00.201594280 +1030 42 0 days 00:00:00.421575064 +1030 43 0 days 00:00:00.210623855 +1030 44 0 days 00:00:00.578407366 +1030 45 0 days 00:00:00.593212393 +1030 46 0 days 00:00:00.326503406 +1030 47 0 days 00:00:00.188207180 +1030 48 0 days 00:00:00.644101855 +1030 49 0 days 00:00:00.387902912 +1030 50 0 days 00:00:00.564931200 +1030 51 0 days 00:00:00.255800868 +1030 52 0 days 00:00:00.201477766 +1030 53 0 days 00:00:00.187423320 +1030 54 0 days 00:00:00.215523025 +1030 55 0 days 00:00:00.396045296 +1030 56 0 days 00:00:00.590159846 +1030 57 0 days 00:00:00.219557728 +1030 58 0 days 00:00:00.191585553 +1030 59 0 days 00:00:00.222389968 +1030 60 0 days 00:00:00.341575986 +1030 61 0 days 00:00:00.196444066 +1030 62 0 days 00:00:00.590900220 +1030 63 0 days 00:00:00.347379490 +1030 64 0 days 00:00:00.324551820 +1030 65 0 days 00:00:00.349233450 +1030 66 0 days 00:00:00.724388066 +1030 67 0 days 00:00:00.222301568 +1030 68 0 days 00:00:00.427743108 +1030 69 0 days 00:00:00.577117573 +1030 70 0 days 00:00:00.226790590 +1030 71 0 days 00:00:00.588435400 +1030 72 0 days 00:00:00.186359513 +1030 73 0 days 00:00:00.409331642 +1030 74 0 days 00:00:00.674217736 +1030 75 0 days 00:00:00.192906306 +1030 76 0 days 00:00:00.376099968 +1030 77 0 days 00:00:00.590758073 +1030 78 0 days 00:00:00.592348593 +1030 79 0 days 00:00:00.594562793 +1030 80 0 days 00:00:00.673584560 +1030 81 0 days 00:00:00.179453700 +1030 82 0 days 00:00:00.334127380 +1030 83 0 days 00:00:00.332584986 +1030 84 0 days 00:00:00.335608180 +1030 85 0 days 00:00:00.196888880 +1030 86 0 days 00:00:00.198702813 +1030 87 0 days 00:00:00.332435293 +1030 88 0 days 00:00:00.180430333 +1030 89 0 days 00:00:00.326772040 +1030 90 0 days 00:00:00.407373205 +1030 91 0 days 00:00:00.596708713 +1030 92 0 days 00:00:00.587226980 +1030 93 0 days 00:00:00.283937917 +1030 94 0 days 00:00:00.720231660 +1030 95 0 days 00:00:00.322009953 +1030 96 0 days 00:00:00.230020296 +1030 97 0 days 00:00:00.327122126 +1030 98 0 days 00:00:00.571151253 +1030 99 0 days 00:00:00.588750833 +1030 100 0 days 00:00:00.220383484 +1031 1 0 days 00:00:00.168089726 +1031 2 0 days 00:00:00.100331140 +1031 3 0 days 00:00:00.202677263 +1031 4 0 days 00:00:00.170629500 +1031 5 0 days 00:00:00.305133440 +1031 6 0 days 00:00:00.299641720 +1031 7 0 days 00:00:00.222909605 +1031 8 0 days 00:00:00.105374246 +1031 9 0 days 00:00:00.099640980 +1031 10 0 days 00:00:00.097478986 +1031 11 0 days 00:00:00.369207994 +1031 12 0 days 00:00:00.288475193 +1031 13 0 days 00:00:00.124798243 +1031 14 0 days 00:00:00.094632040 +1031 15 0 days 00:00:00.295940973 +1031 16 0 days 00:00:00.118138625 +1031 17 0 days 00:00:00.294311726 +1031 18 0 days 00:00:00.358399320 +1031 19 0 days 00:00:00.095120300 +1031 20 0 days 00:00:00.166684433 +1031 21 0 days 00:00:00.097302226 +1031 22 0 days 00:00:00.168394793 +1031 23 0 days 00:00:00.170483766 +1031 24 0 days 00:00:00.132598384 +1031 25 0 days 00:00:00.303252693 +1031 26 0 days 00:00:00.171178333 +1031 27 0 days 00:00:00.248386750 +1031 28 0 days 00:00:00.362719863 +1031 29 0 days 00:00:00.199716532 +1031 30 0 days 00:00:00.094047626 +1031 31 0 days 00:00:00.297308506 +1031 32 0 days 00:00:00.302994493 +1031 33 0 days 00:00:00.358826186 +1031 34 0 days 00:00:00.206160516 +1031 35 0 days 00:00:00.098077780 +1031 36 0 days 00:00:00.379984586 +1031 37 0 days 00:00:00.103046553 +1031 38 0 days 00:00:00.347518400 +1031 39 0 days 00:00:00.127980427 +1031 40 0 days 00:00:00.301830186 +1031 41 0 days 00:00:00.173522800 +1031 42 0 days 00:00:00.249960050 +1031 43 0 days 00:00:00.332407755 +1031 44 0 days 00:00:00.361129446 +1031 45 0 days 00:00:00.160157453 +1031 46 0 days 00:00:00.328371070 +1031 47 0 days 00:00:00.326260745 +1031 48 0 days 00:00:00.170898713 +1031 49 0 days 00:00:00.099102200 +1031 50 0 days 00:00:00.218269102 +1031 51 0 days 00:00:00.325273095 +1031 52 0 days 00:00:00.110150725 +1031 53 0 days 00:00:00.168251933 +1031 54 0 days 00:00:00.207321256 +1031 55 0 days 00:00:00.211788400 +1031 56 0 days 00:00:00.097980046 +1031 57 0 days 00:00:00.214947131 +1031 58 0 days 00:00:00.368693797 +1031 59 0 days 00:00:00.100474580 +1031 60 0 days 00:00:00.118168506 +1031 61 0 days 00:00:00.329740185 +1031 62 0 days 00:00:00.121233604 +1031 63 0 days 00:00:00.164437993 +1031 64 0 days 00:00:00.192036732 +1031 65 0 days 00:00:00.104947840 +1031 66 0 days 00:00:00.190086260 +1031 67 0 days 00:00:00.362710020 +1031 68 0 days 00:00:00.164256593 +1031 69 0 days 00:00:00.324507380 +1031 70 0 days 00:00:00.302930346 +1031 71 0 days 00:00:00.249357295 +1031 72 0 days 00:00:00.355462710 +1031 73 0 days 00:00:00.377150888 +1031 74 0 days 00:00:00.207159733 +1031 75 0 days 00:00:00.293531220 +1031 76 0 days 00:00:00.299902673 +1031 77 0 days 00:00:00.201270976 +1031 78 0 days 00:00:00.296441720 +1031 79 0 days 00:00:00.294729893 +1031 80 0 days 00:00:00.328039830 +1031 81 0 days 00:00:00.331379880 +1031 82 0 days 00:00:00.172714720 +1031 83 0 days 00:00:00.294087013 +1031 84 0 days 00:00:00.095718026 +1031 85 0 days 00:00:00.096600040 +1031 86 0 days 00:00:00.106107880 +1031 87 0 days 00:00:00.346569728 +1031 88 0 days 00:00:00.356047580 +1031 89 0 days 00:00:00.292996333 +1031 90 0 days 00:00:00.100854606 +1031 91 0 days 00:00:00.164592726 +1031 92 0 days 00:00:00.099437313 +1031 93 0 days 00:00:00.097935186 +1031 94 0 days 00:00:00.171516266 +1031 95 0 days 00:00:00.095854406 +1031 96 0 days 00:00:00.127512034 +1031 97 0 days 00:00:00.170511073 +1031 98 0 days 00:00:00.102055100 +1031 99 0 days 00:00:00.170341640 +1031 100 0 days 00:00:00.195966016 +1032 1 0 days 00:00:00.359444733 +1032 2 0 days 00:00:00.359471592 +1032 3 0 days 00:00:00.465759940 +1032 4 0 days 00:00:00.518436140 +1032 5 0 days 00:00:00.587731865 +1032 6 0 days 00:00:00.158126273 +1032 7 0 days 00:00:00.182180766 +1032 8 0 days 00:00:00.169739873 +1032 9 0 days 00:00:00.205933833 +1032 10 0 days 00:00:00.184612420 +1032 11 0 days 00:00:00.220352020 +1032 12 0 days 00:00:00.166114880 +1032 13 0 days 00:00:00.161285340 +1032 14 0 days 00:00:00.554123033 +1032 15 0 days 00:00:00.224734680 +1032 16 0 days 00:00:00.243360240 +1032 17 0 days 00:00:00.290067528 +1032 18 0 days 00:00:00.174980966 +1032 19 0 days 00:00:00.184063513 +1032 20 0 days 00:00:00.260157720 +1032 21 0 days 00:00:00.467493800 +1032 22 0 days 00:00:00.417367006 +1032 23 0 days 00:00:00.304492588 +1032 24 0 days 00:00:00.487054275 +1032 25 0 days 00:00:00.221239706 +1032 26 0 days 00:00:00.524025706 +1032 27 0 days 00:00:00.286564893 +1032 28 0 days 00:00:00.495372646 +1032 29 0 days 00:00:00.194654690 +1032 30 0 days 00:00:00.470075193 +1032 31 0 days 00:00:00.294542886 +1032 32 0 days 00:00:00.185595513 +1032 33 0 days 00:00:00.478001990 +1032 34 0 days 00:00:00.158181406 +1032 35 0 days 00:00:00.197029133 +1032 36 0 days 00:00:00.279746713 +1032 37 0 days 00:00:00.161725126 +1032 38 0 days 00:00:00.164823100 +1032 39 0 days 00:00:00.165587993 +1032 40 0 days 00:00:00.551875693 +1032 41 0 days 00:00:00.523626993 +1032 42 0 days 00:00:00.310797726 +1032 43 0 days 00:00:00.604784983 +1032 44 0 days 00:00:00.432486446 +1032 45 0 days 00:00:00.515087775 +1032 46 0 days 00:00:00.520858226 +1032 47 0 days 00:00:00.162765786 +1032 48 0 days 00:00:00.348384926 +1032 49 0 days 00:00:00.305238450 +1032 50 0 days 00:00:00.195773980 +1032 51 0 days 00:00:00.175334486 +1032 52 0 days 00:00:00.392912585 +1032 53 0 days 00:00:00.157004513 +1032 54 0 days 00:00:00.280068820 +1032 55 0 days 00:00:00.159580126 +1032 56 0 days 00:00:00.130328706 +1032 57 0 days 00:00:00.171029540 +1032 58 0 days 00:00:00.267288413 +1032 59 0 days 00:00:00.189840815 +1032 60 0 days 00:00:00.156420000 +1032 61 0 days 00:00:00.403011855 +1032 62 0 days 00:00:00.176134413 +1032 63 0 days 00:00:00.354177316 +1032 64 0 days 00:00:00.513418306 +1032 65 0 days 00:00:00.310272242 +1032 66 0 days 00:00:00.489096620 +1032 67 0 days 00:00:00.176288070 +1032 68 0 days 00:00:00.610423095 +1032 69 0 days 00:00:00.172541966 +1032 70 0 days 00:00:00.160653406 +1032 71 0 days 00:00:00.304545452 +1032 72 0 days 00:00:00.541887913 +1032 73 0 days 00:00:00.299194673 +1032 74 0 days 00:00:00.159485133 +1032 75 0 days 00:00:00.625280324 +1032 76 0 days 00:00:00.584736223 +1032 77 0 days 00:00:00.205550868 +1032 78 0 days 00:00:00.195105445 +1032 79 0 days 00:00:00.189134266 +1032 80 0 days 00:00:00.170953013 +1032 81 0 days 00:00:00.603870090 +1032 82 0 days 00:00:00.307513786 +1032 83 0 days 00:00:00.496552340 +1032 84 0 days 00:00:00.331812480 +1032 85 0 days 00:00:00.168565715 +1032 86 0 days 00:00:00.364052117 +1032 87 0 days 00:00:00.127032820 +1032 88 0 days 00:00:00.321618716 +1032 89 0 days 00:00:00.498268860 +1032 90 0 days 00:00:00.317480900 +1032 91 0 days 00:00:00.299481500 +1032 92 0 days 00:00:00.616887985 +1032 93 0 days 00:00:00.442059746 +1032 94 0 days 00:00:00.294498755 +1032 95 0 days 00:00:00.322538353 +1032 96 0 days 00:00:00.436444460 +1032 97 0 days 00:00:00.160947740 +1032 98 0 days 00:00:00.554871585 +1032 99 0 days 00:00:00.448438160 +1032 100 0 days 00:00:00.185830726 +1033 1 0 days 00:00:00.089446506 +1033 2 0 days 00:00:00.076612006 +1033 3 0 days 00:00:00.083454746 +1033 4 0 days 00:00:00.104039933 +1033 5 0 days 00:00:00.346600217 +1033 6 0 days 00:00:00.060603606 +1033 7 0 days 00:00:00.159875093 +1033 8 0 days 00:00:00.157443146 +1033 9 0 days 00:00:00.162363820 +1033 10 0 days 00:00:00.144613346 +1033 11 0 days 00:00:00.234428933 +1033 12 0 days 00:00:00.234233766 +1033 13 0 days 00:00:00.136385880 +1033 14 0 days 00:00:00.221382273 +1033 15 0 days 00:00:00.073015333 +1033 16 0 days 00:00:00.098162106 +1033 17 0 days 00:00:00.142591033 +1033 18 0 days 00:00:00.159943433 +1033 19 0 days 00:00:00.131587286 +1033 20 0 days 00:00:00.155490440 +1033 21 0 days 00:00:00.089707860 +1033 22 0 days 00:00:00.243354535 +1033 23 0 days 00:00:00.153054220 +1033 24 0 days 00:00:00.102558250 +1033 25 0 days 00:00:00.076862800 +1033 26 0 days 00:00:00.096794466 +1033 27 0 days 00:00:00.184507734 +1033 28 0 days 00:00:00.156144166 +1033 29 0 days 00:00:00.278671813 +1033 30 0 days 00:00:00.171632586 +1033 31 0 days 00:00:00.141661640 +1033 32 0 days 00:00:00.128812753 +1033 33 0 days 00:00:00.174221715 +1033 34 0 days 00:00:00.188197826 +1033 35 0 days 00:00:00.170894487 +1033 36 0 days 00:00:00.241852846 +1033 37 0 days 00:00:00.273106866 +1033 38 0 days 00:00:00.129688528 +1033 39 0 days 00:00:00.103882113 +1033 40 0 days 00:00:00.093561660 +1033 41 0 days 00:00:00.157705925 +1033 42 0 days 00:00:00.311396400 +1033 43 0 days 00:00:00.093101406 +1033 44 0 days 00:00:00.159933720 +1033 45 0 days 00:00:00.192539017 +1033 46 0 days 00:00:00.127275786 +1033 47 0 days 00:00:00.091878260 +1033 48 0 days 00:00:00.255454480 +1033 49 0 days 00:00:00.250648390 +1033 50 0 days 00:00:00.086272990 +1033 51 0 days 00:00:00.144819475 +1033 52 0 days 00:00:00.096514273 +1033 53 0 days 00:00:00.106010126 +1033 54 0 days 00:00:00.262636748 +1033 55 0 days 00:00:00.126807560 +1033 56 0 days 00:00:00.326662870 +1033 57 0 days 00:00:00.091173366 +1033 58 0 days 00:00:00.228891826 +1033 59 0 days 00:00:00.092269826 +1033 60 0 days 00:00:00.073664100 +1033 61 0 days 00:00:00.076537780 +1033 62 0 days 00:00:00.302743046 +1033 63 0 days 00:00:00.068425086 +1033 64 0 days 00:00:00.168902214 +1033 65 0 days 00:00:00.077413160 +1033 66 0 days 00:00:00.136687606 +1033 67 0 days 00:00:00.096868965 +1033 68 0 days 00:00:00.176478250 +1033 69 0 days 00:00:00.320249336 +1033 70 0 days 00:00:00.151905113 +1033 71 0 days 00:00:00.113749482 +1033 72 0 days 00:00:00.159799880 +1033 73 0 days 00:00:00.277455735 +1033 74 0 days 00:00:00.151061080 +1033 75 0 days 00:00:00.081781873 +1033 76 0 days 00:00:00.164794273 +1033 77 0 days 00:00:00.139141973 +1033 78 0 days 00:00:00.332014526 +1033 79 0 days 00:00:00.104451330 +1033 80 0 days 00:00:00.261945506 +1033 81 0 days 00:00:00.159802386 +1033 82 0 days 00:00:00.250247466 +1033 83 0 days 00:00:00.100626060 +1033 84 0 days 00:00:00.103681646 +1033 85 0 days 00:00:00.282271806 +1033 86 0 days 00:00:00.252759326 +1033 87 0 days 00:00:00.197630472 +1033 88 0 days 00:00:00.263610946 +1033 89 0 days 00:00:00.180463103 +1033 90 0 days 00:00:00.293094492 +1033 91 0 days 00:00:00.182458130 +1033 92 0 days 00:00:00.282757353 +1033 93 0 days 00:00:00.111969720 +1033 94 0 days 00:00:00.225573633 +1033 95 0 days 00:00:00.160387140 +1033 96 0 days 00:00:00.225173133 +1033 97 0 days 00:00:00.150196233 +1033 98 0 days 00:00:00.095291840 +1033 99 0 days 00:00:00.123922860 +1033 100 0 days 00:00:00.176819585 +1034 1 0 days 00:00:04.004457015 +1034 2 0 days 00:00:01.951438926 +1034 3 0 days 00:00:04.437117740 +1034 4 0 days 00:00:02.025978320 +1034 5 0 days 00:00:02.739208406 +1034 6 0 days 00:00:02.697415646 +1034 7 0 days 00:00:02.706130752 +1034 8 0 days 00:00:02.759465026 +1034 9 0 days 00:00:02.096651413 +1034 10 0 days 00:00:02.206237480 +1034 11 0 days 00:00:01.953668713 +1034 12 0 days 00:00:02.063089593 +1034 13 0 days 00:00:02.029064226 +1034 14 0 days 00:00:01.851467753 +1034 15 0 days 00:00:02.819356166 +1034 16 0 days 00:00:02.602987522 +1034 17 0 days 00:00:01.954562746 +1034 18 0 days 00:00:04.318194946 +1034 19 0 days 00:00:02.252870466 +1034 20 0 days 00:00:04.439006313 +1034 21 0 days 00:00:02.189596440 +1034 22 0 days 00:00:02.521378871 +1034 23 0 days 00:00:04.797987993 +1034 24 0 days 00:00:04.442569240 +1034 25 0 days 00:00:01.925698886 +1034 26 0 days 00:00:04.369275293 +1034 27 0 days 00:00:05.537331873 +1034 28 0 days 00:00:02.207297435 +1034 29 0 days 00:00:02.795231926 +1034 30 0 days 00:00:03.005250620 +1034 31 0 days 00:00:01.964092360 +1034 32 0 days 00:00:04.320787080 +1034 33 0 days 00:00:03.795982565 +1034 34 0 days 00:00:02.983636380 +1034 35 0 days 00:00:02.321007672 +1034 36 0 days 00:00:06.068469571 +1034 37 0 days 00:00:02.031519400 +1034 38 0 days 00:00:05.956859916 +1034 39 0 days 00:00:02.821223306 +1034 40 0 days 00:00:02.399207280 +1034 41 0 days 00:00:04.702231153 +1034 42 0 days 00:00:02.893813546 +1034 43 0 days 00:00:04.386151833 +1034 44 0 days 00:00:04.740191593 +1034 45 0 days 00:00:04.833361050 +1034 46 0 days 00:00:01.938365773 +1034 47 0 days 00:00:02.878568520 +1034 48 0 days 00:00:02.792483740 +1034 49 0 days 00:00:01.985135580 +1034 50 0 days 00:00:05.741789548 +1034 51 0 days 00:00:02.548911572 +1034 52 0 days 00:00:02.595810897 +1034 53 0 days 00:00:02.192908150 +1034 54 0 days 00:00:04.430163906 +1034 55 0 days 00:00:02.774617606 +1034 56 0 days 00:00:02.997491586 +1034 57 0 days 00:00:05.369317032 +1034 58 0 days 00:00:04.785082093 +1034 59 0 days 00:00:02.827067493 +1034 60 0 days 00:00:04.239888426 +1034 61 0 days 00:00:01.988778133 +1034 62 0 days 00:00:02.184568635 +1034 63 0 days 00:00:04.548308066 +1034 64 0 days 00:00:05.383283700 +1034 65 0 days 00:00:02.100312046 +1034 66 0 days 00:00:04.243154626 +1034 67 0 days 00:00:02.105211993 +1034 68 0 days 00:00:02.213945926 +1034 69 0 days 00:00:02.753948320 +1034 70 0 days 00:00:02.741910026 +1034 71 0 days 00:00:02.708635580 +1034 72 0 days 00:00:06.349117314 +1034 73 0 days 00:00:04.358174246 +1034 74 0 days 00:00:06.015054550 +1034 75 0 days 00:00:02.087447220 +1034 76 0 days 00:00:02.412151640 +1034 77 0 days 00:00:02.106155526 +1034 78 0 days 00:00:02.103149740 +1034 79 0 days 00:00:02.603645257 +1034 80 0 days 00:00:02.138602026 +1034 81 0 days 00:00:04.000970462 +1034 82 0 days 00:00:02.526855495 +1034 83 0 days 00:00:02.967511613 +1034 84 0 days 00:00:05.733370592 +1034 85 0 days 00:00:02.998602900 +1034 86 0 days 00:00:03.002692366 +1034 87 0 days 00:00:03.691454243 +1034 88 0 days 00:00:03.541798483 +1034 89 0 days 00:00:02.179652130 +1034 90 0 days 00:00:02.337551886 +1034 91 0 days 00:00:02.761362920 +1034 92 0 days 00:00:02.244177500 +1034 93 0 days 00:00:02.768321833 +1034 94 0 days 00:00:02.217634000 +1034 95 0 days 00:00:03.013093340 +1034 96 0 days 00:00:03.119082710 +1034 97 0 days 00:00:02.485334977 +1034 98 0 days 00:00:05.553612360 +1034 99 0 days 00:00:03.130841793 +1034 100 0 days 00:00:01.985300713 +1035 1 0 days 00:00:02.256431570 +1035 2 0 days 00:00:03.011197006 +1035 3 0 days 00:00:02.089075500 +1035 4 0 days 00:00:02.933632780 +1035 5 0 days 00:00:02.557706680 +1035 6 0 days 00:00:06.003945146 +1035 7 0 days 00:00:02.938593660 +1035 8 0 days 00:00:05.152505233 +1035 9 0 days 00:00:02.445860968 +1035 10 0 days 00:00:02.983238120 +1035 11 0 days 00:00:03.055321446 +1035 12 0 days 00:00:04.901078886 +1035 13 0 days 00:00:02.300990495 +1035 14 0 days 00:00:05.113990386 +1035 15 0 days 00:00:02.534506575 +1035 16 0 days 00:00:02.844347991 +1035 17 0 days 00:00:04.736042173 +1035 18 0 days 00:00:02.637264266 +1035 19 0 days 00:00:04.775980560 +1035 20 0 days 00:00:02.960792293 +1035 21 0 days 00:00:05.068614140 +1035 22 0 days 00:00:02.299474825 +1035 23 0 days 00:00:03.726043814 +1035 24 0 days 00:00:02.371605096 +1035 25 0 days 00:00:03.292230200 +1035 26 0 days 00:00:03.842688631 +1035 27 0 days 00:00:03.603503540 +1035 28 0 days 00:00:05.165165235 +1035 29 0 days 00:00:03.728277717 +1035 30 0 days 00:00:06.369499853 +1035 31 0 days 00:00:04.755057413 +1035 32 0 days 00:00:04.165489333 +1035 33 0 days 00:00:03.822014764 +1035 34 0 days 00:00:02.327587660 +1035 35 0 days 00:00:03.737523352 +1035 36 0 days 00:00:03.929402270 +1035 37 0 days 00:00:03.751708220 +1035 38 0 days 00:00:04.753721286 +1035 39 0 days 00:00:02.402774836 +1035 40 0 days 00:00:04.685334873 +1035 41 0 days 00:00:02.230960035 +1035 42 0 days 00:00:03.404615640 +1035 43 0 days 00:00:02.066865006 +1035 44 0 days 00:00:02.402683555 +1035 45 0 days 00:00:05.135051380 +1035 46 0 days 00:00:02.582169050 +1035 47 0 days 00:00:02.607198525 +1035 48 0 days 00:00:02.032328553 +1035 49 0 days 00:00:03.067476986 +1035 50 0 days 00:00:02.856307640 +1035 51 0 days 00:00:04.772411860 +1035 52 0 days 00:00:03.104601646 +1035 53 0 days 00:00:04.655708320 +1035 54 0 days 00:00:02.436819516 +1035 55 0 days 00:00:04.206736046 +1035 56 0 days 00:00:02.448591336 +1035 57 0 days 00:00:03.515894620 +1035 58 0 days 00:00:03.707402663 +1035 59 0 days 00:00:03.295681760 +1035 60 0 days 00:00:03.265753085 +1035 61 0 days 00:00:03.012733313 +1035 62 0 days 00:00:05.111873646 +1035 63 0 days 00:00:05.236886500 +1035 64 0 days 00:00:03.204816753 +1035 65 0 days 00:00:05.412255605 +1035 66 0 days 00:00:02.066737000 +1035 67 0 days 00:00:03.528644690 +1035 68 0 days 00:00:02.027982300 +1035 69 0 days 00:00:02.554452472 +1035 70 0 days 00:00:02.551091956 +1035 71 0 days 00:00:05.665711232 +1035 72 0 days 00:00:06.772515531 +1035 73 0 days 00:00:04.636083093 +1035 74 0 days 00:00:05.632163028 +1035 75 0 days 00:00:03.648266493 +1035 76 0 days 00:00:03.253832665 +1035 77 0 days 00:00:03.813757620 +1035 78 0 days 00:00:02.301438830 +1035 79 0 days 00:00:02.772992263 +1035 80 0 days 00:00:06.258929262 +1035 81 0 days 00:00:02.228846105 +1035 82 0 days 00:00:03.278523160 +1035 83 0 days 00:00:03.453750436 +1035 84 0 days 00:00:04.175720540 +1035 85 0 days 00:00:02.281764025 +1035 86 0 days 00:00:03.143733380 +1035 87 0 days 00:00:04.574719273 +1035 88 0 days 00:00:03.299939586 +1035 89 0 days 00:00:02.523732076 +1035 90 0 days 00:00:03.726897628 +1035 91 0 days 00:00:02.244207160 +1035 92 0 days 00:00:03.120805766 +1035 93 0 days 00:00:03.970176551 +1035 94 0 days 00:00:02.031764480 +1035 95 0 days 00:00:05.656346976 +1035 96 0 days 00:00:04.755087393 +1035 97 0 days 00:00:02.586556495 +1035 98 0 days 00:00:02.571585710 +1035 99 0 days 00:00:02.442400366 +1035 100 0 days 00:00:04.500090160 +1036 1 0 days 00:00:01.459385446 +1036 2 0 days 00:00:02.178836500 +1036 3 0 days 00:00:01.886580763 +1036 4 0 days 00:00:01.186320620 +1036 5 0 days 00:00:03.093546248 +1036 6 0 days 00:00:01.687021988 +1036 7 0 days 00:00:02.178551553 +1036 8 0 days 00:00:01.042364826 +1036 9 0 days 00:00:02.264322560 +1036 10 0 days 00:00:01.183246136 +1036 11 0 days 00:00:02.215049293 +1036 12 0 days 00:00:02.420082593 +1036 13 0 days 00:00:01.805818855 +1036 14 0 days 00:00:02.306566686 +1036 15 0 days 00:00:02.400278313 +1036 16 0 days 00:00:02.438295626 +1036 17 0 days 00:00:02.221080006 +1036 18 0 days 00:00:01.138734330 +1036 19 0 days 00:00:02.263528460 +1036 20 0 days 00:00:01.367902651 +1036 21 0 days 00:00:02.867684502 +1036 22 0 days 00:00:01.586403645 +1036 23 0 days 00:00:02.191455565 +1036 24 0 days 00:00:01.546354100 +1036 25 0 days 00:00:01.134754835 +1036 26 0 days 00:00:02.322450726 +1036 27 0 days 00:00:01.026837473 +1036 28 0 days 00:00:01.221469366 +1036 29 0 days 00:00:01.425810186 +1036 30 0 days 00:00:01.925630460 +1036 31 0 days 00:00:02.264375526 +1036 32 0 days 00:00:01.476151635 +1036 33 0 days 00:00:01.080912253 +1036 34 0 days 00:00:01.580744406 +1036 35 0 days 00:00:01.185017700 +1036 36 0 days 00:00:01.806988992 +1036 37 0 days 00:00:01.131050375 +1036 38 0 days 00:00:01.021073346 +1036 39 0 days 00:00:01.039796346 +1036 40 0 days 00:00:01.269965897 +1036 41 0 days 00:00:01.097480340 +1036 42 0 days 00:00:01.065095626 +1036 43 0 days 00:00:01.014864253 +1036 44 0 days 00:00:01.407274726 +1036 45 0 days 00:00:01.549188900 +1036 46 0 days 00:00:02.261630886 +1036 47 0 days 00:00:03.078422956 +1036 48 0 days 00:00:01.078783180 +1036 49 0 days 00:00:02.260459426 +1036 50 0 days 00:00:01.392494060 +1036 51 0 days 00:00:01.436392655 +1036 52 0 days 00:00:01.737154584 +1036 53 0 days 00:00:01.070541213 +1036 54 0 days 00:00:01.555194420 +1036 55 0 days 00:00:01.376562096 +1036 56 0 days 00:00:01.438058166 +1036 57 0 days 00:00:01.988810520 +1036 58 0 days 00:00:02.264271946 +1036 59 0 days 00:00:01.126853480 +1036 60 0 days 00:00:02.747763156 +1036 61 0 days 00:00:02.467147100 +1036 62 0 days 00:00:01.005614786 +1036 63 0 days 00:00:01.192744933 +1036 64 0 days 00:00:01.459647493 +1036 65 0 days 00:00:02.421348640 +1036 66 0 days 00:00:02.507281320 +1036 67 0 days 00:00:01.667545528 +1036 68 0 days 00:00:01.478178606 +1036 69 0 days 00:00:01.271600648 +1036 70 0 days 00:00:01.234803556 +1036 71 0 days 00:00:02.199015113 +1036 72 0 days 00:00:01.839796891 +1036 73 0 days 00:00:01.924586526 +1036 74 0 days 00:00:02.946207830 +1036 75 0 days 00:00:01.919404309 +1036 76 0 days 00:00:01.220229680 +1036 77 0 days 00:00:01.172245840 +1036 78 0 days 00:00:03.095571782 +1036 79 0 days 00:00:01.241045960 +1036 80 0 days 00:00:01.159462710 +1036 81 0 days 00:00:01.170383675 +1036 82 0 days 00:00:03.054807278 +1036 83 0 days 00:00:02.268168473 +1036 84 0 days 00:00:01.206448713 +1036 85 0 days 00:00:01.593566040 +1036 86 0 days 00:00:01.253257112 +1036 87 0 days 00:00:01.837261412 +1036 88 0 days 00:00:01.034431026 +1036 89 0 days 00:00:02.038612013 +1036 90 0 days 00:00:01.559811466 +1036 91 0 days 00:00:01.211776685 +1036 92 0 days 00:00:01.053693586 +1036 93 0 days 00:00:01.451517820 +1036 94 0 days 00:00:02.443272226 +1036 95 0 days 00:00:01.167480940 +1036 96 0 days 00:00:02.676671848 +1036 97 0 days 00:00:01.523430766 +1036 98 0 days 00:00:01.577746240 +1036 99 0 days 00:00:02.222286420 +1036 100 0 days 00:00:01.922073272 +1037 1 0 days 00:00:01.397279784 +1037 2 0 days 00:00:01.067932486 +1037 3 0 days 00:00:02.587697993 +1037 4 0 days 00:00:02.352479260 +1037 5 0 days 00:00:01.060461766 +1037 6 0 days 00:00:02.018011170 +1037 7 0 days 00:00:01.538677726 +1037 8 0 days 00:00:02.643676390 +1037 9 0 days 00:00:01.219133215 +1037 10 0 days 00:00:01.207035455 +1037 11 0 days 00:00:03.013060162 +1037 12 0 days 00:00:02.122507144 +1037 13 0 days 00:00:01.693186195 +1037 14 0 days 00:00:01.716029810 +1037 15 0 days 00:00:01.667359540 +1037 16 0 days 00:00:01.166582765 +1037 17 0 days 00:00:02.629493990 +1037 18 0 days 00:00:01.058768766 +1037 19 0 days 00:00:01.236931748 +1037 20 0 days 00:00:01.667333460 +1037 21 0 days 00:00:02.442437193 +1037 22 0 days 00:00:02.015881311 +1037 23 0 days 00:00:01.202450140 +1037 24 0 days 00:00:02.858339576 +1037 25 0 days 00:00:01.271747310 +1037 26 0 days 00:00:02.355652640 +1037 27 0 days 00:00:01.133223506 +1037 28 0 days 00:00:02.447004420 +1037 29 0 days 00:00:01.221528070 +1037 30 0 days 00:00:02.419766160 +1037 31 0 days 00:00:01.721144545 +1037 32 0 days 00:00:01.679679815 +1037 33 0 days 00:00:01.164533710 +1037 34 0 days 00:00:01.494150480 +1037 35 0 days 00:00:01.203026655 +1037 36 0 days 00:00:02.761849535 +1037 37 0 days 00:00:01.666185490 +1037 38 0 days 00:00:01.227220965 +1037 39 0 days 00:00:02.696985175 +1037 40 0 days 00:00:01.604189833 +1037 41 0 days 00:00:01.635020693 +1037 42 0 days 00:00:02.772223205 +1037 43 0 days 00:00:02.051499465 +1037 44 0 days 00:00:01.182483905 +1037 45 0 days 00:00:01.619855100 +1037 46 0 days 00:00:01.968248326 +1037 47 0 days 00:00:01.814508906 +1037 48 0 days 00:00:03.225946440 +1037 49 0 days 00:00:01.496406973 +1037 50 0 days 00:00:01.534690620 +1037 51 0 days 00:00:01.813355673 +1037 52 0 days 00:00:01.057277633 +1037 53 0 days 00:00:02.932636085 +1037 54 0 days 00:00:02.039164231 +1037 55 0 days 00:00:02.167265030 +1037 56 0 days 00:00:01.165095115 +1037 57 0 days 00:00:01.522418726 +1037 58 0 days 00:00:01.384489617 +1037 59 0 days 00:00:01.528176473 +1037 60 0 days 00:00:01.522416413 +1037 61 0 days 00:00:02.326144013 +1037 62 0 days 00:00:01.251161812 +1037 63 0 days 00:00:03.109174112 +1037 64 0 days 00:00:01.715465345 +1037 65 0 days 00:00:01.416704782 +1037 66 0 days 00:00:02.387536606 +1037 67 0 days 00:00:01.401110588 +1037 68 0 days 00:00:01.521339893 +1037 69 0 days 00:00:01.807991400 +1037 70 0 days 00:00:01.657087780 +1037 71 0 days 00:00:01.409941562 +1037 72 0 days 00:00:01.161666235 +1037 73 0 days 00:00:03.027448440 +1037 74 0 days 00:00:02.298649366 +1037 75 0 days 00:00:01.154867130 +1037 76 0 days 00:00:01.081045393 +1037 77 0 days 00:00:01.118466466 +1037 78 0 days 00:00:01.689761635 +1037 79 0 days 00:00:01.519338406 +1037 80 0 days 00:00:01.817452115 +1037 81 0 days 00:00:01.362634240 +1037 82 0 days 00:00:02.624110065 +1037 83 0 days 00:00:01.880144485 +1037 84 0 days 00:00:01.750910452 +1037 85 0 days 00:00:03.135435792 +1037 86 0 days 00:00:01.055102540 +1037 87 0 days 00:00:03.336595760 +1037 88 0 days 00:00:01.064436606 +1037 89 0 days 00:00:01.499691546 +1037 90 0 days 00:00:01.515594986 +1037 91 0 days 00:00:02.872354810 +1037 92 0 days 00:00:01.661658653 +1037 93 0 days 00:00:03.489354124 +1037 94 0 days 00:00:02.371731313 +1037 95 0 days 00:00:01.165222135 +1037 96 0 days 00:00:01.532551293 +1037 97 0 days 00:00:01.599568908 +1037 98 0 days 00:00:01.166675435 +1037 99 0 days 00:00:01.976556146 +1037 100 0 days 00:00:01.951636402 +1038 1 0 days 00:00:03.946881112 +1038 2 0 days 00:00:06.582767975 +1038 3 0 days 00:00:06.979381451 +1038 4 0 days 00:00:03.774052842 +1038 5 0 days 00:00:02.442724730 +1038 6 0 days 00:00:06.599791685 +1038 7 0 days 00:00:04.594682684 +1038 8 0 days 00:00:03.170296312 +1038 9 0 days 00:00:08.192796095 +1038 10 0 days 00:00:03.750741213 +1038 11 0 days 00:00:03.807636550 +1038 12 0 days 00:00:03.693000620 +1038 13 0 days 00:00:05.367188349 +1038 14 0 days 00:00:03.918175540 +1038 15 0 days 00:00:02.643882600 +1038 16 0 days 00:00:07.934292142 +1038 17 0 days 00:00:05.127354786 +1038 18 0 days 00:00:07.114401458 +1038 19 0 days 00:00:06.859122875 +1038 20 0 days 00:00:04.565955160 +1038 21 0 days 00:00:04.786959228 +1038 22 0 days 00:00:04.804342200 +1038 23 0 days 00:00:06.927196711 +1038 24 0 days 00:00:03.218598660 +1038 25 0 days 00:00:02.861456930 +1038 26 0 days 00:00:03.099215308 +1038 27 0 days 00:00:03.474519042 +1038 28 0 days 00:00:04.178119836 +1038 29 0 days 00:00:07.280773385 +1038 30 0 days 00:00:07.081960442 +1038 31 0 days 00:00:04.066514286 +1038 32 0 days 00:00:02.522344004 +1038 33 0 days 00:00:02.396584710 +1038 34 0 days 00:00:03.140039368 +1038 35 0 days 00:00:04.098311513 +1038 36 0 days 00:00:03.053716432 +1038 37 0 days 00:00:05.790062376 +1038 38 0 days 00:00:03.159306842 +1038 39 0 days 00:00:03.136100793 +1038 40 0 days 00:00:08.187013548 +1038 41 0 days 00:00:04.116937448 +1038 42 0 days 00:00:02.664432562 +1038 43 0 days 00:00:02.956272367 +1038 44 0 days 00:00:07.687497248 +1038 45 0 days 00:00:02.972474262 +1038 46 0 days 00:00:03.107421900 +1038 47 0 days 00:00:07.263385522 +1038 48 0 days 00:00:05.916970108 +1039 1 0 days 00:00:03.083255986 +1039 2 0 days 00:00:02.032608796 +1039 3 0 days 00:00:01.814828227 +1039 4 0 days 00:00:02.394037662 +1039 5 0 days 00:00:02.261520568 +1039 6 0 days 00:00:02.049176650 +1039 7 0 days 00:00:02.355958118 +1039 8 0 days 00:00:04.063746657 +1039 9 0 days 00:00:01.445485977 +1039 10 0 days 00:00:03.732483310 +1039 11 0 days 00:00:03.126376656 +1039 12 0 days 00:00:01.544689834 +1039 13 0 days 00:00:01.876207545 +1039 14 0 days 00:00:01.811191036 +1039 15 0 days 00:00:02.076233200 +1039 16 0 days 00:00:03.391912326 +1039 17 0 days 00:00:01.445952183 +1039 18 0 days 00:00:02.084915767 +1039 19 0 days 00:00:02.423852668 +1039 20 0 days 00:00:01.605321943 +1039 21 0 days 00:00:02.571781381 +1039 22 0 days 00:00:01.947829445 +1039 23 0 days 00:00:02.296606950 +1039 24 0 days 00:00:01.525815231 +1039 25 0 days 00:00:02.408008037 +1039 26 0 days 00:00:02.399297368 +1039 27 0 days 00:00:01.455734673 +1039 28 0 days 00:00:01.325579812 +1039 29 0 days 00:00:01.545160537 +1039 30 0 days 00:00:01.272286456 +1039 31 0 days 00:00:02.656250417 +1039 32 0 days 00:00:01.925923677 +1039 33 0 days 00:00:01.518886460 +1039 34 0 days 00:00:02.805084320 +1039 35 0 days 00:00:01.428544400 +1039 36 0 days 00:00:03.362559773 +1039 37 0 days 00:00:01.665765800 +1039 38 0 days 00:00:02.141956362 +1039 39 0 days 00:00:01.293689498 +1039 40 0 days 00:00:01.905526832 +1039 41 0 days 00:00:01.536582188 +1039 42 0 days 00:00:02.142814188 +1039 43 0 days 00:00:02.297146710 +1039 44 0 days 00:00:02.569256282 +1039 45 0 days 00:00:01.470665496 +1039 46 0 days 00:00:01.362605320 +1039 47 0 days 00:00:04.425485347 +1039 48 0 days 00:00:02.018330244 +1039 49 0 days 00:00:02.122308094 +1039 50 0 days 00:00:02.947460486 +1039 51 0 days 00:00:02.686620508 +1039 52 0 days 00:00:02.278917477 +1039 53 0 days 00:00:02.103860646 +1039 54 0 days 00:00:04.441352527 +1039 55 0 days 00:00:02.868314712 +1039 56 0 days 00:00:04.215760341 +1039 57 0 days 00:00:01.456854597 +1039 58 0 days 00:00:03.006212232 +1039 59 0 days 00:00:03.770357633 +1039 60 0 days 00:00:01.258546300 +1039 61 0 days 00:00:01.508303520 +1039 62 0 days 00:00:01.590801245 +1039 63 0 days 00:00:01.406948365 +1039 64 0 days 00:00:04.246785618 +1039 65 0 days 00:00:03.676136062 +1039 66 0 days 00:00:01.649757750 +1039 67 0 days 00:00:02.282506380 +1039 68 0 days 00:00:03.671227020 +1039 69 0 days 00:00:01.261858144 +1039 70 0 days 00:00:02.210267564 +1039 71 0 days 00:00:01.622962540 +1039 72 0 days 00:00:01.359759424 +1039 73 0 days 00:00:01.328684895 +1039 74 0 days 00:00:01.478791868 +1039 75 0 days 00:00:01.721047736 +1039 76 0 days 00:00:02.046652327 +1039 77 0 days 00:00:02.552818905 +1039 78 0 days 00:00:02.363354792 +1039 79 0 days 00:00:03.927879668 +1039 80 0 days 00:00:03.937185892 +1039 81 0 days 00:00:01.523144175 +1039 82 0 days 00:00:03.240932112 +1039 83 0 days 00:00:02.052494457 +1039 84 0 days 00:00:01.835320613 +1039 85 0 days 00:00:02.381425442 +1039 86 0 days 00:00:01.996764885 +1039 87 0 days 00:00:02.436493444 +1039 88 0 days 00:00:01.664078354 +1039 89 0 days 00:00:01.913901457 +1039 90 0 days 00:00:02.088454228 +1039 91 0 days 00:00:03.159909785 +1039 92 0 days 00:00:01.734036118 +1039 93 0 days 00:00:03.997882333 +1039 94 0 days 00:00:01.653934067 +1040 1 0 days 00:00:04.939881360 +1040 2 0 days 00:00:05.280842335 +1040 3 0 days 00:00:03.563826884 +1040 4 0 days 00:00:03.282924453 +1040 5 0 days 00:00:04.182825217 +1040 6 0 days 00:00:02.547345365 +1040 7 0 days 00:00:02.435945860 +1040 8 0 days 00:00:02.208596580 +1040 9 0 days 00:00:05.799473524 +1040 10 0 days 00:00:05.397827165 +1040 11 0 days 00:00:03.767922893 +1040 12 0 days 00:00:03.307184955 +1040 13 0 days 00:00:02.581282328 +1040 14 0 days 00:00:03.696569976 +1040 15 0 days 00:00:05.147214493 +1040 16 0 days 00:00:02.333449765 +1040 17 0 days 00:00:02.273283946 +1040 18 0 days 00:00:04.741502586 +1040 19 0 days 00:00:02.324571073 +1040 20 0 days 00:00:02.688188270 +1040 21 0 days 00:00:04.882604513 +1040 22 0 days 00:00:05.800972220 +1040 23 0 days 00:00:04.891513280 +1040 24 0 days 00:00:02.575726150 +1040 25 0 days 00:00:02.043732626 +1040 26 0 days 00:00:02.261772540 +1040 27 0 days 00:00:06.009399603 +1040 28 0 days 00:00:02.273004160 +1040 29 0 days 00:00:02.049256426 +1040 30 0 days 00:00:05.183395426 +1040 31 0 days 00:00:05.100045046 +1040 32 0 days 00:00:03.488589736 +1040 33 0 days 00:00:03.642146350 +1040 34 0 days 00:00:02.592551268 +1040 35 0 days 00:00:02.323151030 +1040 36 0 days 00:00:03.449535740 +1040 37 0 days 00:00:02.734265980 +1040 38 0 days 00:00:06.359091715 +1040 39 0 days 00:00:03.629263116 +1040 40 0 days 00:00:04.882816353 +1040 41 0 days 00:00:05.589544584 +1040 42 0 days 00:00:03.387616690 +1040 43 0 days 00:00:04.803050040 +1040 44 0 days 00:00:03.366054755 +1040 45 0 days 00:00:04.680791100 +1040 46 0 days 00:00:02.040413166 +1040 47 0 days 00:00:05.759516192 +1040 48 0 days 00:00:04.886810053 +1040 49 0 days 00:00:05.959426310 +1040 50 0 days 00:00:05.193611013 +1040 51 0 days 00:00:02.111514306 +1040 52 0 days 00:00:06.269562804 +1040 53 0 days 00:00:03.622692726 +1040 54 0 days 00:00:03.677876010 +1040 55 0 days 00:00:05.212244026 +1040 56 0 days 00:00:03.646807623 +1040 57 0 days 00:00:02.396523716 +1040 58 0 days 00:00:04.747061260 +1040 59 0 days 00:00:02.907406093 +1040 60 0 days 00:00:05.279168050 +1040 61 0 days 00:00:03.220293933 +1040 62 0 days 00:00:02.053470540 +1040 63 0 days 00:00:05.200749740 +1040 64 0 days 00:00:05.129812913 +1040 65 0 days 00:00:05.958784913 +1040 66 0 days 00:00:05.656707788 +1040 67 0 days 00:00:04.859139320 +1040 68 0 days 00:00:02.496394664 +1040 69 0 days 00:00:04.249529074 +1040 70 0 days 00:00:02.230234220 +1040 71 0 days 00:00:02.392561665 +1040 72 0 days 00:00:03.155619826 +1040 73 0 days 00:00:02.683572516 +1040 74 0 days 00:00:02.699540254 +1040 75 0 days 00:00:03.365188660 +1040 76 0 days 00:00:05.439981275 +1040 77 0 days 00:00:04.833533320 +1040 78 0 days 00:00:06.112600603 +1040 79 0 days 00:00:05.263174653 +1040 80 0 days 00:00:02.429973668 +1040 81 0 days 00:00:05.257362465 +1040 82 0 days 00:00:05.149696726 +1040 83 0 days 00:00:04.770489300 +1040 84 0 days 00:00:04.857699160 +1040 85 0 days 00:00:05.440142845 +1040 86 0 days 00:00:02.065856300 +1040 87 0 days 00:00:05.246639920 +1040 88 0 days 00:00:05.448505635 +1040 89 0 days 00:00:03.699596940 +1040 90 0 days 00:00:03.208820466 +1040 91 0 days 00:00:02.296989340 +1040 92 0 days 00:00:02.295009940 +1040 93 0 days 00:00:03.446659900 +1040 94 0 days 00:00:02.028057800 +1040 95 0 days 00:00:03.048601980 +1040 96 0 days 00:00:05.565179495 +1040 97 0 days 00:00:05.924279280 +1040 98 0 days 00:00:03.456483570 +1040 99 0 days 00:00:04.160982500 +1040 100 0 days 00:00:03.013096400 +1041 1 0 days 00:00:01.263137400 +1041 2 0 days 00:00:01.334573262 +1041 3 0 days 00:00:01.058311746 +1041 4 0 days 00:00:02.439441146 +1041 5 0 days 00:00:01.654532000 +1041 6 0 days 00:00:02.436652895 +1041 7 0 days 00:00:02.961893310 +1041 8 0 days 00:00:02.625484653 +1041 9 0 days 00:00:02.649893960 +1041 10 0 days 00:00:01.322398930 +1041 11 0 days 00:00:01.353528796 +1041 12 0 days 00:00:01.695279520 +1041 13 0 days 00:00:01.703199925 +1041 14 0 days 00:00:02.959367084 +1041 15 0 days 00:00:01.452905510 +1041 16 0 days 00:00:01.888003993 +1041 17 0 days 00:00:01.919216314 +1041 18 0 days 00:00:01.794227790 +1041 19 0 days 00:00:03.321227065 +1041 20 0 days 00:00:01.877546233 +1041 21 0 days 00:00:01.919707225 +1041 22 0 days 00:00:01.244588748 +1041 23 0 days 00:00:01.695724750 +1041 24 0 days 00:00:01.275913940 +1041 25 0 days 00:00:03.169130560 +1041 26 0 days 00:00:02.751039415 +1041 27 0 days 00:00:01.197042800 +1041 28 0 days 00:00:01.699118460 +1041 29 0 days 00:00:01.263898740 +1041 30 0 days 00:00:02.945266480 +1041 31 0 days 00:00:01.058790913 +1041 32 0 days 00:00:02.977984560 +1041 33 0 days 00:00:01.356673148 +1041 34 0 days 00:00:01.236124544 +1041 35 0 days 00:00:01.700741173 +1041 36 0 days 00:00:01.830136265 +1041 37 0 days 00:00:02.820252495 +1041 38 0 days 00:00:01.794504860 +1041 39 0 days 00:00:01.254491832 +1041 40 0 days 00:00:01.799152328 +1041 41 0 days 00:00:01.814245176 +1041 42 0 days 00:00:01.517143093 +1041 43 0 days 00:00:01.967197557 +1041 44 0 days 00:00:01.154375766 +1041 45 0 days 00:00:02.891016424 +1041 46 0 days 00:00:03.076455004 +1041 47 0 days 00:00:01.360917688 +1041 48 0 days 00:00:03.341520588 +1041 49 0 days 00:00:01.815315705 +1041 50 0 days 00:00:01.681874340 +1041 51 0 days 00:00:02.897210280 +1041 52 0 days 00:00:02.751081416 +1041 53 0 days 00:00:01.670441695 +1041 54 0 days 00:00:03.153639080 +1041 55 0 days 00:00:01.681137600 +1041 56 0 days 00:00:02.201973446 +1041 57 0 days 00:00:01.223515444 +1041 58 0 days 00:00:02.132228471 +1041 59 0 days 00:00:01.201188666 +1041 60 0 days 00:00:01.757385592 +1041 61 0 days 00:00:01.512477560 +1041 62 0 days 00:00:01.217847526 +1041 63 0 days 00:00:01.244440876 +1041 64 0 days 00:00:01.193108850 +1041 65 0 days 00:00:01.075437266 +1041 66 0 days 00:00:01.705043945 +1041 67 0 days 00:00:01.471290205 +1041 68 0 days 00:00:02.902731030 +1041 69 0 days 00:00:02.732009765 +1041 70 0 days 00:00:01.761426895 +1041 71 0 days 00:00:01.173422485 +1041 72 0 days 00:00:01.321984716 +1041 73 0 days 00:00:02.949076380 +1041 74 0 days 00:00:01.716102690 +1041 75 0 days 00:00:02.916189132 +1041 76 0 days 00:00:01.723790580 +1041 77 0 days 00:00:01.381646148 +1041 78 0 days 00:00:01.808164356 +1041 79 0 days 00:00:01.730113565 +1041 80 0 days 00:00:01.855296770 +1041 81 0 days 00:00:01.311729450 +1041 82 0 days 00:00:01.346328445 +1041 83 0 days 00:00:01.304267265 +1041 84 0 days 00:00:01.875878423 +1041 85 0 days 00:00:01.708831455 +1041 86 0 days 00:00:02.821616020 +1041 87 0 days 00:00:01.307086270 +1041 88 0 days 00:00:01.215901465 +1041 89 0 days 00:00:01.073735020 +1041 90 0 days 00:00:01.385962231 +1041 91 0 days 00:00:02.601547420 +1041 92 0 days 00:00:01.638976613 +1041 93 0 days 00:00:01.102589913 +1041 94 0 days 00:00:01.235862520 +1041 95 0 days 00:00:01.818746660 +1041 96 0 days 00:00:01.290755620 +1041 97 0 days 00:00:01.266656925 +1041 98 0 days 00:00:01.227379616 +1041 99 0 days 00:00:01.330757062 +1041 100 0 days 00:00:01.700528940 +1042 1 0 days 00:00:39.698459000 +1042 2 0 days 00:00:32.300600680 +1042 3 0 days 00:00:20.504109226 +1042 4 0 days 00:01:03.652788220 +1042 5 0 days 00:00:40.504509622 +1042 6 0 days 00:01:07.321801144 +1042 7 0 days 00:00:32.216739793 +1042 8 0 days 00:00:32.261123293 +1042 9 0 days 00:00:20.401852353 +1042 10 0 days 00:01:03.483559492 +1043 1 0 days 00:00:16.205668957 +1043 2 0 days 00:00:15.374215933 +1043 3 0 days 00:00:21.311584875 +1043 4 0 days 00:00:22.646156540 +1043 5 0 days 00:00:12.343263693 +1043 6 0 days 00:00:19.627704486 +1043 7 0 days 00:00:12.450115580 +1043 8 0 days 00:00:24.052436148 +1043 9 0 days 00:00:12.617174426 +1043 10 0 days 00:00:34.375511500 +1043 11 0 days 00:00:22.481054404 +1043 12 0 days 00:00:12.367716093 +1043 13 0 days 00:00:23.344118220 +1043 14 0 days 00:00:12.460894086 +1043 16 0 days 00:00:15.692102776 +1043 17 0 days 00:00:21.303097990 +1043 18 0 days 00:00:15.087359716 +1043 19 0 days 00:00:40.762628671 +1044 1 0 days 00:01:03.938930125 +1044 2 0 days 00:01:08.169620724 +1044 3 0 days 00:00:24.721823768 +1044 4 0 days 00:00:57.208673766 +1044 5 0 days 00:00:25.839303903 +1044 6 0 days 00:00:35.477575230 +1044 7 0 days 00:00:21.090272413 +1044 8 0 days 00:00:25.082185560 +1044 9 0 days 00:00:32.358300453 +1044 10 0 days 00:00:34.763857655 +1044 11 0 days 00:00:40.097074296 +1045 1 0 days 00:00:36.388727915 +1045 2 0 days 00:00:15.941001243 +1045 3 0 days 00:00:15.755602140 +1045 4 0 days 00:00:22.253922585 +1045 5 0 days 00:00:23.266361932 +1045 6 0 days 00:00:21.190939950 +1045 8 0 days 00:00:23.779760610 +1045 10 0 days 00:00:19.956161826 +1045 12 0 days 00:00:21.531473300 +1045 13 0 days 00:00:19.906287086 +1045 14 0 days 00:00:14.343805735 +1045 15 0 days 00:00:34.916833446 +1045 16 0 days 00:00:14.342105955 +1045 17 0 days 00:00:14.395458390 +1045 18 0 days 00:00:21.481031420 +1045 19 0 days 00:00:21.196595515 +1045 20 0 days 00:00:14.401369975 +1045 21 0 days 00:00:36.248687215 +1045 23 0 days 00:00:14.271367710 +1045 24 0 days 00:00:14.307989575 +1046 1 0 days 00:00:32.259388226 +1046 2 0 days 00:00:32.292375220 +1046 3 0 days 00:00:20.345997840 +1046 4 0 days 00:00:20.537253286 +1046 5 0 days 00:00:25.441950930 +1046 6 0 days 00:00:32.465020626 +1046 7 0 days 00:00:38.462664626 +1046 8 0 days 00:00:22.773159285 +1046 9 0 days 00:00:20.744844793 +1046 10 0 days 00:00:53.190321826 +1046 11 0 days 00:00:20.622739093 +1046 12 0 days 00:00:23.155782375 +1046 13 0 days 00:00:32.759800213 +1046 14 0 days 00:00:32.471440173 +1046 15 0 days 00:00:36.897113692 +1046 17 0 days 00:00:36.954533168 +1047 1 0 days 00:00:12.635131186 +1047 2 0 days 00:00:19.735568646 +1047 3 0 days 00:00:19.991167966 +1047 4 0 days 00:00:14.024921265 +1047 5 0 days 00:00:12.609716466 +1047 6 0 days 00:00:12.784189546 +1047 7 0 days 00:00:21.420267960 +1047 8 0 days 00:00:21.383382750 +1047 9 0 days 00:00:34.297359513 +1047 10 0 days 00:00:38.702773140 +1047 11 0 days 00:00:22.486542068 +1047 12 0 days 00:00:21.340124425 +1047 13 0 days 00:00:19.730527713 +1047 14 0 days 00:00:19.848785993 +1047 15 0 days 00:00:21.183192445 +1047 16 0 days 00:00:14.994083804 +1047 17 0 days 00:00:14.340523725 +1047 18 0 days 00:00:12.816762640 +1047 19 0 days 00:00:12.736371986 +1047 21 0 days 00:00:35.994546285 +1047 22 0 days 00:00:12.514546026 +1047 23 0 days 00:00:34.909838433 +1047 24 0 days 00:00:12.274611533 +1047 25 0 days 00:00:35.018565620 diff --git a/ablation/runtime/params.tsv b/ablation/runtime/params.tsv new file mode 100644 index 000000000..554bd776d --- /dev/null +++ b/ablation/runtime/params.tsv @@ -0,0 +1,788728 @@ +experiment_id trial_id param_name param_value +0 1 model.embedding_dim 2.0 +0 1 optimizer.lr 0.0015497131345244333 +0 1 negative_sampler.num_negs_per_pos 23.0 +0 1 training.batch_size 2.0 +0 2 model.embedding_dim 1.0 +0 2 optimizer.lr 0.003981455076008925 +0 2 negative_sampler.num_negs_per_pos 12.0 +0 2 training.batch_size 0.0 +0 3 model.embedding_dim 1.0 +0 3 optimizer.lr 0.021649521934071055 +0 3 negative_sampler.num_negs_per_pos 55.0 +0 3 training.batch_size 2.0 +0 4 model.embedding_dim 2.0 +0 4 optimizer.lr 0.06887822274423355 +0 4 negative_sampler.num_negs_per_pos 11.0 +0 4 training.batch_size 1.0 +0 5 model.embedding_dim 0.0 +0 5 optimizer.lr 0.011607008750134373 +0 5 negative_sampler.num_negs_per_pos 31.0 +0 5 training.batch_size 2.0 +0 6 model.embedding_dim 1.0 +0 6 optimizer.lr 0.045682211669369706 +0 6 negative_sampler.num_negs_per_pos 11.0 +0 6 training.batch_size 0.0 +0 7 model.embedding_dim 2.0 +0 7 optimizer.lr 0.05710936529587552 +0 7 negative_sampler.num_negs_per_pos 28.0 +0 7 training.batch_size 0.0 +0 8 model.embedding_dim 2.0 +0 8 optimizer.lr 0.0032939266788137916 +0 8 negative_sampler.num_negs_per_pos 21.0 +0 8 training.batch_size 2.0 +0 9 model.embedding_dim 2.0 +0 9 optimizer.lr 0.0309028477921421 +0 9 negative_sampler.num_negs_per_pos 24.0 +0 9 training.batch_size 1.0 +0 10 model.embedding_dim 1.0 +0 10 optimizer.lr 0.028272230617271618 +0 10 negative_sampler.num_negs_per_pos 85.0 +0 10 training.batch_size 1.0 +0 11 model.embedding_dim 1.0 +0 11 optimizer.lr 0.09668165156135632 +0 11 negative_sampler.num_negs_per_pos 64.0 +0 11 training.batch_size 2.0 +0 12 model.embedding_dim 2.0 +0 12 optimizer.lr 0.01761797869118534 +0 12 negative_sampler.num_negs_per_pos 2.0 +0 12 training.batch_size 2.0 +0 13 model.embedding_dim 0.0 +0 13 optimizer.lr 0.006155439161619009 +0 13 negative_sampler.num_negs_per_pos 13.0 +0 13 training.batch_size 1.0 +0 14 model.embedding_dim 0.0 +0 14 optimizer.lr 0.047493364421744746 +0 14 negative_sampler.num_negs_per_pos 25.0 +0 14 training.batch_size 1.0 +0 15 model.embedding_dim 2.0 +0 15 optimizer.lr 0.001014551667426264 +0 15 negative_sampler.num_negs_per_pos 48.0 +0 15 training.batch_size 0.0 +0 16 model.embedding_dim 1.0 +0 16 optimizer.lr 0.00287947087616508 +0 16 negative_sampler.num_negs_per_pos 0.0 +0 16 training.batch_size 1.0 +0 17 model.embedding_dim 1.0 +0 17 optimizer.lr 0.0013894063416783028 +0 17 negative_sampler.num_negs_per_pos 24.0 +0 17 training.batch_size 1.0 +0 18 model.embedding_dim 0.0 +0 18 optimizer.lr 0.09990992186329085 +0 18 negative_sampler.num_negs_per_pos 97.0 +0 18 training.batch_size 1.0 +0 19 model.embedding_dim 1.0 +0 19 optimizer.lr 0.026336687269349478 +0 19 negative_sampler.num_negs_per_pos 56.0 +0 19 training.batch_size 2.0 +0 20 model.embedding_dim 1.0 +0 20 optimizer.lr 0.0020261723396900375 +0 20 negative_sampler.num_negs_per_pos 71.0 +0 20 training.batch_size 2.0 +0 21 model.embedding_dim 2.0 +0 21 optimizer.lr 0.0735334953090647 +0 21 negative_sampler.num_negs_per_pos 93.0 +0 21 training.batch_size 0.0 +0 22 model.embedding_dim 0.0 +0 22 optimizer.lr 0.0022429409405501357 +0 22 negative_sampler.num_negs_per_pos 1.0 +0 22 training.batch_size 1.0 +0 23 model.embedding_dim 1.0 +0 23 optimizer.lr 0.026869788966924435 +0 23 negative_sampler.num_negs_per_pos 54.0 +0 23 training.batch_size 2.0 +0 24 model.embedding_dim 1.0 +0 24 optimizer.lr 0.0013537406055147189 +0 24 negative_sampler.num_negs_per_pos 53.0 +0 24 training.batch_size 1.0 +0 25 model.embedding_dim 0.0 +0 25 optimizer.lr 0.09627142184836633 +0 25 negative_sampler.num_negs_per_pos 26.0 +0 25 training.batch_size 2.0 +0 26 model.embedding_dim 1.0 +0 26 optimizer.lr 0.00255541561605145 +0 26 negative_sampler.num_negs_per_pos 63.0 +0 26 training.batch_size 1.0 +0 27 model.embedding_dim 2.0 +0 27 optimizer.lr 0.011998661366389315 +0 27 negative_sampler.num_negs_per_pos 15.0 +0 27 training.batch_size 0.0 +0 28 model.embedding_dim 2.0 +0 28 optimizer.lr 0.00789811451843194 +0 28 negative_sampler.num_negs_per_pos 53.0 +0 28 training.batch_size 1.0 +0 29 model.embedding_dim 0.0 +0 29 optimizer.lr 0.08765813395351259 +0 29 negative_sampler.num_negs_per_pos 17.0 +0 29 training.batch_size 1.0 +0 1 dataset """fb15k237""" +0 1 model """complex""" +0 1 loss """bceaftersigmoid""" +0 1 regularizer """no""" +0 1 optimizer """adam""" +0 1 training_loop """owa""" +0 1 negative_sampler """basic""" +0 1 evaluator """rankbased""" +0 2 dataset """fb15k237""" +0 2 model """complex""" +0 2 loss """bceaftersigmoid""" +0 2 regularizer """no""" +0 2 optimizer """adam""" +0 2 training_loop """owa""" +0 2 negative_sampler """basic""" +0 2 evaluator """rankbased""" +0 3 dataset """fb15k237""" +0 3 model """complex""" +0 3 loss """bceaftersigmoid""" +0 3 regularizer """no""" +0 3 optimizer """adam""" +0 3 training_loop """owa""" +0 3 negative_sampler """basic""" +0 3 evaluator """rankbased""" +0 4 dataset """fb15k237""" +0 4 model """complex""" +0 4 loss """bceaftersigmoid""" +0 4 regularizer """no""" +0 4 optimizer """adam""" +0 4 training_loop """owa""" +0 4 negative_sampler """basic""" +0 4 evaluator """rankbased""" +0 5 dataset """fb15k237""" +0 5 model """complex""" +0 5 loss """bceaftersigmoid""" +0 5 regularizer """no""" +0 5 optimizer """adam""" +0 5 training_loop """owa""" +0 5 negative_sampler """basic""" +0 5 evaluator """rankbased""" +0 6 dataset """fb15k237""" +0 6 model """complex""" +0 6 loss """bceaftersigmoid""" +0 6 regularizer """no""" +0 6 optimizer """adam""" +0 6 training_loop """owa""" +0 6 negative_sampler """basic""" +0 6 evaluator """rankbased""" +0 7 dataset """fb15k237""" +0 7 model """complex""" +0 7 loss """bceaftersigmoid""" +0 7 regularizer """no""" +0 7 optimizer """adam""" +0 7 training_loop """owa""" +0 7 negative_sampler """basic""" +0 7 evaluator """rankbased""" +0 8 dataset """fb15k237""" +0 8 model """complex""" +0 8 loss """bceaftersigmoid""" +0 8 regularizer """no""" +0 8 optimizer """adam""" +0 8 training_loop """owa""" +0 8 negative_sampler """basic""" +0 8 evaluator """rankbased""" +0 9 dataset """fb15k237""" +0 9 model """complex""" +0 9 loss """bceaftersigmoid""" +0 9 regularizer """no""" +0 9 optimizer """adam""" +0 9 training_loop """owa""" +0 9 negative_sampler """basic""" +0 9 evaluator """rankbased""" +0 10 dataset """fb15k237""" +0 10 model """complex""" +0 10 loss """bceaftersigmoid""" +0 10 regularizer """no""" +0 10 optimizer """adam""" +0 10 training_loop """owa""" +0 10 negative_sampler """basic""" +0 10 evaluator """rankbased""" +0 11 dataset """fb15k237""" +0 11 model """complex""" +0 11 loss """bceaftersigmoid""" +0 11 regularizer """no""" +0 11 optimizer """adam""" +0 11 training_loop """owa""" +0 11 negative_sampler """basic""" +0 11 evaluator """rankbased""" +0 12 dataset """fb15k237""" +0 12 model """complex""" +0 12 loss """bceaftersigmoid""" +0 12 regularizer """no""" +0 12 optimizer """adam""" +0 12 training_loop """owa""" +0 12 negative_sampler """basic""" +0 12 evaluator """rankbased""" +0 13 dataset """fb15k237""" +0 13 model """complex""" +0 13 loss """bceaftersigmoid""" +0 13 regularizer """no""" +0 13 optimizer """adam""" +0 13 training_loop """owa""" +0 13 negative_sampler """basic""" +0 13 evaluator """rankbased""" +0 14 dataset """fb15k237""" +0 14 model """complex""" +0 14 loss """bceaftersigmoid""" +0 14 regularizer """no""" +0 14 optimizer """adam""" +0 14 training_loop """owa""" +0 14 negative_sampler """basic""" +0 14 evaluator """rankbased""" +0 15 dataset """fb15k237""" +0 15 model """complex""" +0 15 loss """bceaftersigmoid""" +0 15 regularizer """no""" +0 15 optimizer """adam""" +0 15 training_loop """owa""" +0 15 negative_sampler """basic""" +0 15 evaluator """rankbased""" +0 16 dataset """fb15k237""" +0 16 model """complex""" +0 16 loss """bceaftersigmoid""" +0 16 regularizer """no""" +0 16 optimizer """adam""" +0 16 training_loop """owa""" +0 16 negative_sampler """basic""" +0 16 evaluator """rankbased""" +0 17 dataset """fb15k237""" +0 17 model """complex""" +0 17 loss """bceaftersigmoid""" +0 17 regularizer """no""" +0 17 optimizer """adam""" +0 17 training_loop """owa""" +0 17 negative_sampler """basic""" +0 17 evaluator """rankbased""" +0 18 dataset """fb15k237""" +0 18 model """complex""" +0 18 loss """bceaftersigmoid""" +0 18 regularizer """no""" +0 18 optimizer """adam""" +0 18 training_loop """owa""" +0 18 negative_sampler """basic""" +0 18 evaluator """rankbased""" +0 19 dataset """fb15k237""" +0 19 model """complex""" +0 19 loss """bceaftersigmoid""" +0 19 regularizer """no""" +0 19 optimizer """adam""" +0 19 training_loop """owa""" +0 19 negative_sampler """basic""" +0 19 evaluator """rankbased""" +0 20 dataset """fb15k237""" +0 20 model """complex""" +0 20 loss """bceaftersigmoid""" +0 20 regularizer """no""" +0 20 optimizer """adam""" +0 20 training_loop """owa""" +0 20 negative_sampler """basic""" +0 20 evaluator """rankbased""" +0 21 dataset """fb15k237""" +0 21 model """complex""" +0 21 loss """bceaftersigmoid""" +0 21 regularizer """no""" +0 21 optimizer """adam""" +0 21 training_loop """owa""" +0 21 negative_sampler """basic""" +0 21 evaluator """rankbased""" +0 22 dataset """fb15k237""" +0 22 model """complex""" +0 22 loss """bceaftersigmoid""" +0 22 regularizer """no""" +0 22 optimizer """adam""" +0 22 training_loop """owa""" +0 22 negative_sampler """basic""" +0 22 evaluator """rankbased""" +0 23 dataset """fb15k237""" +0 23 model """complex""" +0 23 loss """bceaftersigmoid""" +0 23 regularizer """no""" +0 23 optimizer """adam""" +0 23 training_loop """owa""" +0 23 negative_sampler """basic""" +0 23 evaluator """rankbased""" +0 24 dataset """fb15k237""" +0 24 model """complex""" +0 24 loss """bceaftersigmoid""" +0 24 regularizer """no""" +0 24 optimizer """adam""" +0 24 training_loop """owa""" +0 24 negative_sampler """basic""" +0 24 evaluator """rankbased""" +0 25 dataset """fb15k237""" +0 25 model """complex""" +0 25 loss """bceaftersigmoid""" +0 25 regularizer """no""" +0 25 optimizer """adam""" +0 25 training_loop """owa""" +0 25 negative_sampler """basic""" +0 25 evaluator """rankbased""" +0 26 dataset """fb15k237""" +0 26 model """complex""" +0 26 loss """bceaftersigmoid""" +0 26 regularizer """no""" +0 26 optimizer """adam""" +0 26 training_loop """owa""" +0 26 negative_sampler """basic""" +0 26 evaluator """rankbased""" +0 27 dataset """fb15k237""" +0 27 model """complex""" +0 27 loss """bceaftersigmoid""" +0 27 regularizer """no""" +0 27 optimizer """adam""" +0 27 training_loop """owa""" +0 27 negative_sampler """basic""" +0 27 evaluator """rankbased""" +0 28 dataset """fb15k237""" +0 28 model """complex""" +0 28 loss """bceaftersigmoid""" +0 28 regularizer """no""" +0 28 optimizer """adam""" +0 28 training_loop """owa""" +0 28 negative_sampler """basic""" +0 28 evaluator """rankbased""" +0 29 dataset """fb15k237""" +0 29 model """complex""" +0 29 loss """bceaftersigmoid""" +0 29 regularizer """no""" +0 29 optimizer """adam""" +0 29 training_loop """owa""" +0 29 negative_sampler """basic""" +0 29 evaluator """rankbased""" +1 1 model.embedding_dim 1.0 +1 1 optimizer.lr 0.0016898943039874916 +1 1 negative_sampler.num_negs_per_pos 82.0 +1 1 training.batch_size 0.0 +1 2 model.embedding_dim 2.0 +1 2 optimizer.lr 0.00270366152039507 +1 2 negative_sampler.num_negs_per_pos 25.0 +1 2 training.batch_size 0.0 +1 3 model.embedding_dim 2.0 +1 3 optimizer.lr 0.07179675858747844 +1 3 negative_sampler.num_negs_per_pos 15.0 +1 3 training.batch_size 1.0 +1 4 model.embedding_dim 1.0 +1 4 optimizer.lr 0.008140365017861546 +1 4 negative_sampler.num_negs_per_pos 77.0 +1 4 training.batch_size 0.0 +1 5 model.embedding_dim 0.0 +1 5 optimizer.lr 0.010287553795875912 +1 5 negative_sampler.num_negs_per_pos 56.0 +1 5 training.batch_size 0.0 +1 6 model.embedding_dim 0.0 +1 6 optimizer.lr 0.025613682586384114 +1 6 negative_sampler.num_negs_per_pos 16.0 +1 6 training.batch_size 2.0 +1 7 model.embedding_dim 1.0 +1 7 optimizer.lr 0.00115829589818213 +1 7 negative_sampler.num_negs_per_pos 62.0 +1 7 training.batch_size 1.0 +1 8 model.embedding_dim 1.0 +1 8 optimizer.lr 0.0019372226969722023 +1 8 negative_sampler.num_negs_per_pos 9.0 +1 8 training.batch_size 0.0 +1 9 model.embedding_dim 0.0 +1 9 optimizer.lr 0.023743033539185332 +1 9 negative_sampler.num_negs_per_pos 73.0 +1 9 training.batch_size 1.0 +1 10 model.embedding_dim 2.0 +1 10 optimizer.lr 0.006565763158107635 +1 10 negative_sampler.num_negs_per_pos 23.0 +1 10 training.batch_size 0.0 +1 11 model.embedding_dim 1.0 +1 11 optimizer.lr 0.00924125883478061 +1 11 negative_sampler.num_negs_per_pos 4.0 +1 11 training.batch_size 1.0 +1 12 model.embedding_dim 1.0 +1 12 optimizer.lr 0.00668665304135319 +1 12 negative_sampler.num_negs_per_pos 88.0 +1 12 training.batch_size 1.0 +1 13 model.embedding_dim 2.0 +1 13 optimizer.lr 0.01859625692246386 +1 13 negative_sampler.num_negs_per_pos 98.0 +1 13 training.batch_size 2.0 +1 14 model.embedding_dim 1.0 +1 14 optimizer.lr 0.001315929322881302 +1 14 negative_sampler.num_negs_per_pos 33.0 +1 14 training.batch_size 0.0 +1 15 model.embedding_dim 1.0 +1 15 optimizer.lr 0.015465312815956323 +1 15 negative_sampler.num_negs_per_pos 83.0 +1 15 training.batch_size 1.0 +1 16 model.embedding_dim 2.0 +1 16 optimizer.lr 0.0029516241730838102 +1 16 negative_sampler.num_negs_per_pos 7.0 +1 16 training.batch_size 1.0 +1 17 model.embedding_dim 1.0 +1 17 optimizer.lr 0.008760110866631934 +1 17 negative_sampler.num_negs_per_pos 75.0 +1 17 training.batch_size 2.0 +1 18 model.embedding_dim 2.0 +1 18 optimizer.lr 0.006251924526088945 +1 18 negative_sampler.num_negs_per_pos 78.0 +1 18 training.batch_size 2.0 +1 19 model.embedding_dim 0.0 +1 19 optimizer.lr 0.0015882343412904132 +1 19 negative_sampler.num_negs_per_pos 64.0 +1 19 training.batch_size 0.0 +1 20 model.embedding_dim 2.0 +1 20 optimizer.lr 0.018147809305542963 +1 20 negative_sampler.num_negs_per_pos 17.0 +1 20 training.batch_size 2.0 +1 21 model.embedding_dim 0.0 +1 21 optimizer.lr 0.0076151746295538185 +1 21 negative_sampler.num_negs_per_pos 87.0 +1 21 training.batch_size 0.0 +1 22 model.embedding_dim 2.0 +1 22 optimizer.lr 0.004863202695904888 +1 22 negative_sampler.num_negs_per_pos 34.0 +1 22 training.batch_size 0.0 +1 23 model.embedding_dim 2.0 +1 23 optimizer.lr 0.004445465856818809 +1 23 negative_sampler.num_negs_per_pos 32.0 +1 23 training.batch_size 0.0 +1 24 model.embedding_dim 2.0 +1 24 optimizer.lr 0.005393727017210576 +1 24 negative_sampler.num_negs_per_pos 28.0 +1 24 training.batch_size 2.0 +1 25 model.embedding_dim 0.0 +1 25 optimizer.lr 0.04784792444106548 +1 25 negative_sampler.num_negs_per_pos 57.0 +1 25 training.batch_size 0.0 +1 1 dataset """fb15k237""" +1 1 model """complex""" +1 1 loss """softplus""" +1 1 regularizer """no""" +1 1 optimizer """adam""" +1 1 training_loop """owa""" +1 1 negative_sampler """basic""" +1 1 evaluator """rankbased""" +1 2 dataset """fb15k237""" +1 2 model """complex""" +1 2 loss """softplus""" +1 2 regularizer """no""" +1 2 optimizer """adam""" +1 2 training_loop """owa""" +1 2 negative_sampler """basic""" +1 2 evaluator """rankbased""" +1 3 dataset """fb15k237""" +1 3 model """complex""" +1 3 loss """softplus""" +1 3 regularizer """no""" +1 3 optimizer """adam""" +1 3 training_loop """owa""" +1 3 negative_sampler """basic""" +1 3 evaluator """rankbased""" +1 4 dataset """fb15k237""" +1 4 model """complex""" +1 4 loss """softplus""" +1 4 regularizer """no""" +1 4 optimizer """adam""" +1 4 training_loop """owa""" +1 4 negative_sampler """basic""" +1 4 evaluator """rankbased""" +1 5 dataset """fb15k237""" +1 5 model """complex""" +1 5 loss """softplus""" +1 5 regularizer """no""" +1 5 optimizer """adam""" +1 5 training_loop """owa""" +1 5 negative_sampler """basic""" +1 5 evaluator """rankbased""" +1 6 dataset """fb15k237""" +1 6 model """complex""" +1 6 loss """softplus""" +1 6 regularizer """no""" +1 6 optimizer """adam""" +1 6 training_loop """owa""" +1 6 negative_sampler """basic""" +1 6 evaluator """rankbased""" +1 7 dataset """fb15k237""" +1 7 model """complex""" +1 7 loss """softplus""" +1 7 regularizer """no""" +1 7 optimizer """adam""" +1 7 training_loop """owa""" +1 7 negative_sampler """basic""" +1 7 evaluator """rankbased""" +1 8 dataset """fb15k237""" +1 8 model """complex""" +1 8 loss """softplus""" +1 8 regularizer """no""" +1 8 optimizer """adam""" +1 8 training_loop """owa""" +1 8 negative_sampler """basic""" +1 8 evaluator """rankbased""" +1 9 dataset """fb15k237""" +1 9 model """complex""" +1 9 loss """softplus""" +1 9 regularizer """no""" +1 9 optimizer """adam""" +1 9 training_loop """owa""" +1 9 negative_sampler """basic""" +1 9 evaluator """rankbased""" +1 10 dataset """fb15k237""" +1 10 model """complex""" +1 10 loss """softplus""" +1 10 regularizer """no""" +1 10 optimizer """adam""" +1 10 training_loop """owa""" +1 10 negative_sampler """basic""" +1 10 evaluator """rankbased""" +1 11 dataset """fb15k237""" +1 11 model """complex""" +1 11 loss """softplus""" +1 11 regularizer """no""" +1 11 optimizer """adam""" +1 11 training_loop """owa""" +1 11 negative_sampler """basic""" +1 11 evaluator """rankbased""" +1 12 dataset """fb15k237""" +1 12 model """complex""" +1 12 loss """softplus""" +1 12 regularizer """no""" +1 12 optimizer """adam""" +1 12 training_loop """owa""" +1 12 negative_sampler """basic""" +1 12 evaluator """rankbased""" +1 13 dataset """fb15k237""" +1 13 model """complex""" +1 13 loss """softplus""" +1 13 regularizer """no""" +1 13 optimizer """adam""" +1 13 training_loop """owa""" +1 13 negative_sampler """basic""" +1 13 evaluator """rankbased""" +1 14 dataset """fb15k237""" +1 14 model """complex""" +1 14 loss """softplus""" +1 14 regularizer """no""" +1 14 optimizer """adam""" +1 14 training_loop """owa""" +1 14 negative_sampler """basic""" +1 14 evaluator """rankbased""" +1 15 dataset """fb15k237""" +1 15 model """complex""" +1 15 loss """softplus""" +1 15 regularizer """no""" +1 15 optimizer """adam""" +1 15 training_loop """owa""" +1 15 negative_sampler """basic""" +1 15 evaluator """rankbased""" +1 16 dataset """fb15k237""" +1 16 model """complex""" +1 16 loss """softplus""" +1 16 regularizer """no""" +1 16 optimizer """adam""" +1 16 training_loop """owa""" +1 16 negative_sampler """basic""" +1 16 evaluator """rankbased""" +1 17 dataset """fb15k237""" +1 17 model """complex""" +1 17 loss """softplus""" +1 17 regularizer """no""" +1 17 optimizer """adam""" +1 17 training_loop """owa""" +1 17 negative_sampler """basic""" +1 17 evaluator """rankbased""" +1 18 dataset """fb15k237""" +1 18 model """complex""" +1 18 loss """softplus""" +1 18 regularizer """no""" +1 18 optimizer """adam""" +1 18 training_loop """owa""" +1 18 negative_sampler """basic""" +1 18 evaluator """rankbased""" +1 19 dataset """fb15k237""" +1 19 model """complex""" +1 19 loss """softplus""" +1 19 regularizer """no""" +1 19 optimizer """adam""" +1 19 training_loop """owa""" +1 19 negative_sampler """basic""" +1 19 evaluator """rankbased""" +1 20 dataset """fb15k237""" +1 20 model """complex""" +1 20 loss """softplus""" +1 20 regularizer """no""" +1 20 optimizer """adam""" +1 20 training_loop """owa""" +1 20 negative_sampler """basic""" +1 20 evaluator """rankbased""" +1 21 dataset """fb15k237""" +1 21 model """complex""" +1 21 loss """softplus""" +1 21 regularizer """no""" +1 21 optimizer """adam""" +1 21 training_loop """owa""" +1 21 negative_sampler """basic""" +1 21 evaluator """rankbased""" +1 22 dataset """fb15k237""" +1 22 model """complex""" +1 22 loss """softplus""" +1 22 regularizer """no""" +1 22 optimizer """adam""" +1 22 training_loop """owa""" +1 22 negative_sampler """basic""" +1 22 evaluator """rankbased""" +1 23 dataset """fb15k237""" +1 23 model """complex""" +1 23 loss """softplus""" +1 23 regularizer """no""" +1 23 optimizer """adam""" +1 23 training_loop """owa""" +1 23 negative_sampler """basic""" +1 23 evaluator """rankbased""" +1 24 dataset """fb15k237""" +1 24 model """complex""" +1 24 loss """softplus""" +1 24 regularizer """no""" +1 24 optimizer """adam""" +1 24 training_loop """owa""" +1 24 negative_sampler """basic""" +1 24 evaluator """rankbased""" +1 25 dataset """fb15k237""" +1 25 model """complex""" +1 25 loss """softplus""" +1 25 regularizer """no""" +1 25 optimizer """adam""" +1 25 training_loop """owa""" +1 25 negative_sampler """basic""" +1 25 evaluator """rankbased""" +2 1 model.embedding_dim 0.0 +2 1 optimizer.lr 0.041827684434297865 +2 1 negative_sampler.num_negs_per_pos 44.0 +2 1 training.batch_size 0.0 +2 2 model.embedding_dim 1.0 +2 2 optimizer.lr 0.02346638312738017 +2 2 negative_sampler.num_negs_per_pos 77.0 +2 2 training.batch_size 1.0 +2 3 model.embedding_dim 0.0 +2 3 optimizer.lr 0.03424249072624925 +2 3 negative_sampler.num_negs_per_pos 28.0 +2 3 training.batch_size 2.0 +2 4 model.embedding_dim 2.0 +2 4 optimizer.lr 0.0037530646699646467 +2 4 negative_sampler.num_negs_per_pos 92.0 +2 4 training.batch_size 0.0 +2 5 model.embedding_dim 0.0 +2 5 optimizer.lr 0.056198875124146945 +2 5 negative_sampler.num_negs_per_pos 98.0 +2 5 training.batch_size 0.0 +2 6 model.embedding_dim 0.0 +2 6 optimizer.lr 0.03426133518780301 +2 6 negative_sampler.num_negs_per_pos 36.0 +2 6 training.batch_size 2.0 +2 7 model.embedding_dim 2.0 +2 7 optimizer.lr 0.0014494924622430467 +2 7 negative_sampler.num_negs_per_pos 40.0 +2 7 training.batch_size 2.0 +2 8 model.embedding_dim 2.0 +2 8 optimizer.lr 0.011370306266253126 +2 8 negative_sampler.num_negs_per_pos 45.0 +2 8 training.batch_size 0.0 +2 9 model.embedding_dim 1.0 +2 9 optimizer.lr 0.0065427238369255535 +2 9 negative_sampler.num_negs_per_pos 18.0 +2 9 training.batch_size 2.0 +2 10 model.embedding_dim 0.0 +2 10 optimizer.lr 0.011703111340997593 +2 10 negative_sampler.num_negs_per_pos 84.0 +2 10 training.batch_size 1.0 +2 11 model.embedding_dim 2.0 +2 11 optimizer.lr 0.07336958013643376 +2 11 negative_sampler.num_negs_per_pos 84.0 +2 11 training.batch_size 0.0 +2 12 model.embedding_dim 1.0 +2 12 optimizer.lr 0.04773055441195216 +2 12 negative_sampler.num_negs_per_pos 2.0 +2 12 training.batch_size 1.0 +2 13 model.embedding_dim 0.0 +2 13 optimizer.lr 0.0038670345696363473 +2 13 negative_sampler.num_negs_per_pos 75.0 +2 13 training.batch_size 0.0 +2 14 model.embedding_dim 2.0 +2 14 optimizer.lr 0.07339267163231095 +2 14 negative_sampler.num_negs_per_pos 16.0 +2 14 training.batch_size 2.0 +2 15 model.embedding_dim 1.0 +2 15 optimizer.lr 0.060666341871871685 +2 15 negative_sampler.num_negs_per_pos 60.0 +2 15 training.batch_size 2.0 +2 16 model.embedding_dim 2.0 +2 16 optimizer.lr 0.005092783310061479 +2 16 negative_sampler.num_negs_per_pos 99.0 +2 16 training.batch_size 2.0 +2 17 model.embedding_dim 0.0 +2 17 optimizer.lr 0.003696795551616802 +2 17 negative_sampler.num_negs_per_pos 79.0 +2 17 training.batch_size 1.0 +2 18 model.embedding_dim 0.0 +2 18 optimizer.lr 0.0010790030283089245 +2 18 negative_sampler.num_negs_per_pos 74.0 +2 18 training.batch_size 1.0 +2 19 model.embedding_dim 0.0 +2 19 optimizer.lr 0.010019841005206401 +2 19 negative_sampler.num_negs_per_pos 84.0 +2 19 training.batch_size 1.0 +2 20 model.embedding_dim 1.0 +2 20 optimizer.lr 0.0029007722928214375 +2 20 negative_sampler.num_negs_per_pos 72.0 +2 20 training.batch_size 1.0 +2 21 model.embedding_dim 1.0 +2 21 optimizer.lr 0.06209003408662139 +2 21 negative_sampler.num_negs_per_pos 36.0 +2 21 training.batch_size 0.0 +2 22 model.embedding_dim 2.0 +2 22 optimizer.lr 0.05560535158057506 +2 22 negative_sampler.num_negs_per_pos 33.0 +2 22 training.batch_size 0.0 +2 23 model.embedding_dim 2.0 +2 23 optimizer.lr 0.03940395028474403 +2 23 negative_sampler.num_negs_per_pos 43.0 +2 23 training.batch_size 0.0 +2 24 model.embedding_dim 2.0 +2 24 optimizer.lr 0.008758945698054448 +2 24 negative_sampler.num_negs_per_pos 73.0 +2 24 training.batch_size 0.0 +2 25 model.embedding_dim 1.0 +2 25 optimizer.lr 0.043562525687879626 +2 25 negative_sampler.num_negs_per_pos 18.0 +2 25 training.batch_size 1.0 +2 26 model.embedding_dim 0.0 +2 26 optimizer.lr 0.0038917586595198245 +2 26 negative_sampler.num_negs_per_pos 36.0 +2 26 training.batch_size 2.0 +2 27 model.embedding_dim 0.0 +2 27 optimizer.lr 0.044350121053162 +2 27 negative_sampler.num_negs_per_pos 85.0 +2 27 training.batch_size 2.0 +2 28 model.embedding_dim 1.0 +2 28 optimizer.lr 0.05407052758684125 +2 28 negative_sampler.num_negs_per_pos 87.0 +2 28 training.batch_size 1.0 +2 29 model.embedding_dim 2.0 +2 29 optimizer.lr 0.010774094430481265 +2 29 negative_sampler.num_negs_per_pos 63.0 +2 29 training.batch_size 0.0 +2 30 model.embedding_dim 1.0 +2 30 optimizer.lr 0.002309741490740824 +2 30 negative_sampler.num_negs_per_pos 0.0 +2 30 training.batch_size 0.0 +2 31 model.embedding_dim 2.0 +2 31 optimizer.lr 0.021273429193094308 +2 31 negative_sampler.num_negs_per_pos 6.0 +2 31 training.batch_size 1.0 +2 32 model.embedding_dim 2.0 +2 32 optimizer.lr 0.006053483526409268 +2 32 negative_sampler.num_negs_per_pos 48.0 +2 32 training.batch_size 0.0 +2 33 model.embedding_dim 0.0 +2 33 optimizer.lr 0.05531659109102715 +2 33 negative_sampler.num_negs_per_pos 36.0 +2 33 training.batch_size 2.0 +2 34 model.embedding_dim 0.0 +2 34 optimizer.lr 0.02692819515582961 +2 34 negative_sampler.num_negs_per_pos 33.0 +2 34 training.batch_size 1.0 +2 35 model.embedding_dim 1.0 +2 35 optimizer.lr 0.01438412469516731 +2 35 negative_sampler.num_negs_per_pos 78.0 +2 35 training.batch_size 1.0 +2 36 model.embedding_dim 0.0 +2 36 optimizer.lr 0.01109962191384476 +2 36 negative_sampler.num_negs_per_pos 30.0 +2 36 training.batch_size 0.0 +2 37 model.embedding_dim 1.0 +2 37 optimizer.lr 0.0033208514678297256 +2 37 negative_sampler.num_negs_per_pos 33.0 +2 37 training.batch_size 0.0 +2 38 model.embedding_dim 0.0 +2 38 optimizer.lr 0.0012035702051290116 +2 38 negative_sampler.num_negs_per_pos 73.0 +2 38 training.batch_size 0.0 +2 39 model.embedding_dim 2.0 +2 39 optimizer.lr 0.04628556125397973 +2 39 negative_sampler.num_negs_per_pos 26.0 +2 39 training.batch_size 1.0 +2 40 model.embedding_dim 2.0 +2 40 optimizer.lr 0.09155238119269839 +2 40 negative_sampler.num_negs_per_pos 42.0 +2 40 training.batch_size 0.0 +2 41 model.embedding_dim 2.0 +2 41 optimizer.lr 0.08386542864891004 +2 41 negative_sampler.num_negs_per_pos 62.0 +2 41 training.batch_size 2.0 +2 42 model.embedding_dim 2.0 +2 42 optimizer.lr 0.030029666643138747 +2 42 negative_sampler.num_negs_per_pos 7.0 +2 42 training.batch_size 2.0 +2 43 model.embedding_dim 2.0 +2 43 optimizer.lr 0.004876213590879976 +2 43 negative_sampler.num_negs_per_pos 22.0 +2 43 training.batch_size 1.0 +2 44 model.embedding_dim 0.0 +2 44 optimizer.lr 0.00914155994659327 +2 44 negative_sampler.num_negs_per_pos 57.0 +2 44 training.batch_size 2.0 +2 45 model.embedding_dim 1.0 +2 45 optimizer.lr 0.024451043051260255 +2 45 negative_sampler.num_negs_per_pos 63.0 +2 45 training.batch_size 0.0 +2 46 model.embedding_dim 0.0 +2 46 optimizer.lr 0.002894206911880821 +2 46 negative_sampler.num_negs_per_pos 48.0 +2 46 training.batch_size 1.0 +2 47 model.embedding_dim 1.0 +2 47 optimizer.lr 0.059570430003694434 +2 47 negative_sampler.num_negs_per_pos 47.0 +2 47 training.batch_size 2.0 +2 48 model.embedding_dim 1.0 +2 48 optimizer.lr 0.0015864038709996451 +2 48 negative_sampler.num_negs_per_pos 69.0 +2 48 training.batch_size 2.0 +2 49 model.embedding_dim 1.0 +2 49 optimizer.lr 0.04550695604158771 +2 49 negative_sampler.num_negs_per_pos 19.0 +2 49 training.batch_size 0.0 +2 1 dataset """fb15k237""" +2 1 model """complex""" +2 1 loss """bceaftersigmoid""" +2 1 regularizer """no""" +2 1 optimizer """adam""" +2 1 training_loop """owa""" +2 1 negative_sampler """basic""" +2 1 evaluator """rankbased""" +2 2 dataset """fb15k237""" +2 2 model """complex""" +2 2 loss """bceaftersigmoid""" +2 2 regularizer """no""" +2 2 optimizer """adam""" +2 2 training_loop """owa""" +2 2 negative_sampler """basic""" +2 2 evaluator """rankbased""" +2 3 dataset """fb15k237""" +2 3 model """complex""" +2 3 loss """bceaftersigmoid""" +2 3 regularizer """no""" +2 3 optimizer """adam""" +2 3 training_loop """owa""" +2 3 negative_sampler """basic""" +2 3 evaluator """rankbased""" +2 4 dataset """fb15k237""" +2 4 model """complex""" +2 4 loss """bceaftersigmoid""" +2 4 regularizer """no""" +2 4 optimizer """adam""" +2 4 training_loop """owa""" +2 4 negative_sampler """basic""" +2 4 evaluator """rankbased""" +2 5 dataset """fb15k237""" +2 5 model """complex""" +2 5 loss """bceaftersigmoid""" +2 5 regularizer """no""" +2 5 optimizer """adam""" +2 5 training_loop """owa""" +2 5 negative_sampler """basic""" +2 5 evaluator """rankbased""" +2 6 dataset """fb15k237""" +2 6 model """complex""" +2 6 loss """bceaftersigmoid""" +2 6 regularizer """no""" +2 6 optimizer """adam""" +2 6 training_loop """owa""" +2 6 negative_sampler """basic""" +2 6 evaluator """rankbased""" +2 7 dataset """fb15k237""" +2 7 model """complex""" +2 7 loss """bceaftersigmoid""" +2 7 regularizer """no""" +2 7 optimizer """adam""" +2 7 training_loop """owa""" +2 7 negative_sampler """basic""" +2 7 evaluator """rankbased""" +2 8 dataset """fb15k237""" +2 8 model """complex""" +2 8 loss """bceaftersigmoid""" +2 8 regularizer """no""" +2 8 optimizer """adam""" +2 8 training_loop """owa""" +2 8 negative_sampler """basic""" +2 8 evaluator """rankbased""" +2 9 dataset """fb15k237""" +2 9 model """complex""" +2 9 loss """bceaftersigmoid""" +2 9 regularizer """no""" +2 9 optimizer """adam""" +2 9 training_loop """owa""" +2 9 negative_sampler """basic""" +2 9 evaluator """rankbased""" +2 10 dataset """fb15k237""" +2 10 model """complex""" +2 10 loss """bceaftersigmoid""" +2 10 regularizer """no""" +2 10 optimizer """adam""" +2 10 training_loop """owa""" +2 10 negative_sampler """basic""" +2 10 evaluator """rankbased""" +2 11 dataset """fb15k237""" +2 11 model """complex""" +2 11 loss """bceaftersigmoid""" +2 11 regularizer """no""" +2 11 optimizer """adam""" +2 11 training_loop """owa""" +2 11 negative_sampler """basic""" +2 11 evaluator """rankbased""" +2 12 dataset """fb15k237""" +2 12 model """complex""" +2 12 loss """bceaftersigmoid""" +2 12 regularizer """no""" +2 12 optimizer """adam""" +2 12 training_loop """owa""" +2 12 negative_sampler """basic""" +2 12 evaluator """rankbased""" +2 13 dataset """fb15k237""" +2 13 model """complex""" +2 13 loss """bceaftersigmoid""" +2 13 regularizer """no""" +2 13 optimizer """adam""" +2 13 training_loop """owa""" +2 13 negative_sampler """basic""" +2 13 evaluator """rankbased""" +2 14 dataset """fb15k237""" +2 14 model """complex""" +2 14 loss """bceaftersigmoid""" +2 14 regularizer """no""" +2 14 optimizer """adam""" +2 14 training_loop """owa""" +2 14 negative_sampler """basic""" +2 14 evaluator """rankbased""" +2 15 dataset """fb15k237""" +2 15 model """complex""" +2 15 loss """bceaftersigmoid""" +2 15 regularizer """no""" +2 15 optimizer """adam""" +2 15 training_loop """owa""" +2 15 negative_sampler """basic""" +2 15 evaluator """rankbased""" +2 16 dataset """fb15k237""" +2 16 model """complex""" +2 16 loss """bceaftersigmoid""" +2 16 regularizer """no""" +2 16 optimizer """adam""" +2 16 training_loop """owa""" +2 16 negative_sampler """basic""" +2 16 evaluator """rankbased""" +2 17 dataset """fb15k237""" +2 17 model """complex""" +2 17 loss """bceaftersigmoid""" +2 17 regularizer """no""" +2 17 optimizer """adam""" +2 17 training_loop """owa""" +2 17 negative_sampler """basic""" +2 17 evaluator """rankbased""" +2 18 dataset """fb15k237""" +2 18 model """complex""" +2 18 loss """bceaftersigmoid""" +2 18 regularizer """no""" +2 18 optimizer """adam""" +2 18 training_loop """owa""" +2 18 negative_sampler """basic""" +2 18 evaluator """rankbased""" +2 19 dataset """fb15k237""" +2 19 model """complex""" +2 19 loss """bceaftersigmoid""" +2 19 regularizer """no""" +2 19 optimizer """adam""" +2 19 training_loop """owa""" +2 19 negative_sampler """basic""" +2 19 evaluator """rankbased""" +2 20 dataset """fb15k237""" +2 20 model """complex""" +2 20 loss """bceaftersigmoid""" +2 20 regularizer """no""" +2 20 optimizer """adam""" +2 20 training_loop """owa""" +2 20 negative_sampler """basic""" +2 20 evaluator """rankbased""" +2 21 dataset """fb15k237""" +2 21 model """complex""" +2 21 loss """bceaftersigmoid""" +2 21 regularizer """no""" +2 21 optimizer """adam""" +2 21 training_loop """owa""" +2 21 negative_sampler """basic""" +2 21 evaluator """rankbased""" +2 22 dataset """fb15k237""" +2 22 model """complex""" +2 22 loss """bceaftersigmoid""" +2 22 regularizer """no""" +2 22 optimizer """adam""" +2 22 training_loop """owa""" +2 22 negative_sampler """basic""" +2 22 evaluator """rankbased""" +2 23 dataset """fb15k237""" +2 23 model """complex""" +2 23 loss """bceaftersigmoid""" +2 23 regularizer """no""" +2 23 optimizer """adam""" +2 23 training_loop """owa""" +2 23 negative_sampler """basic""" +2 23 evaluator """rankbased""" +2 24 dataset """fb15k237""" +2 24 model """complex""" +2 24 loss """bceaftersigmoid""" +2 24 regularizer """no""" +2 24 optimizer """adam""" +2 24 training_loop """owa""" +2 24 negative_sampler """basic""" +2 24 evaluator """rankbased""" +2 25 dataset """fb15k237""" +2 25 model """complex""" +2 25 loss """bceaftersigmoid""" +2 25 regularizer """no""" +2 25 optimizer """adam""" +2 25 training_loop """owa""" +2 25 negative_sampler """basic""" +2 25 evaluator """rankbased""" +2 26 dataset """fb15k237""" +2 26 model """complex""" +2 26 loss """bceaftersigmoid""" +2 26 regularizer """no""" +2 26 optimizer """adam""" +2 26 training_loop """owa""" +2 26 negative_sampler """basic""" +2 26 evaluator """rankbased""" +2 27 dataset """fb15k237""" +2 27 model """complex""" +2 27 loss """bceaftersigmoid""" +2 27 regularizer """no""" +2 27 optimizer """adam""" +2 27 training_loop """owa""" +2 27 negative_sampler """basic""" +2 27 evaluator """rankbased""" +2 28 dataset """fb15k237""" +2 28 model """complex""" +2 28 loss """bceaftersigmoid""" +2 28 regularizer """no""" +2 28 optimizer """adam""" +2 28 training_loop """owa""" +2 28 negative_sampler """basic""" +2 28 evaluator """rankbased""" +2 29 dataset """fb15k237""" +2 29 model """complex""" +2 29 loss """bceaftersigmoid""" +2 29 regularizer """no""" +2 29 optimizer """adam""" +2 29 training_loop """owa""" +2 29 negative_sampler """basic""" +2 29 evaluator """rankbased""" +2 30 dataset """fb15k237""" +2 30 model """complex""" +2 30 loss """bceaftersigmoid""" +2 30 regularizer """no""" +2 30 optimizer """adam""" +2 30 training_loop """owa""" +2 30 negative_sampler """basic""" +2 30 evaluator """rankbased""" +2 31 dataset """fb15k237""" +2 31 model """complex""" +2 31 loss """bceaftersigmoid""" +2 31 regularizer """no""" +2 31 optimizer """adam""" +2 31 training_loop """owa""" +2 31 negative_sampler """basic""" +2 31 evaluator """rankbased""" +2 32 dataset """fb15k237""" +2 32 model """complex""" +2 32 loss """bceaftersigmoid""" +2 32 regularizer """no""" +2 32 optimizer """adam""" +2 32 training_loop """owa""" +2 32 negative_sampler """basic""" +2 32 evaluator """rankbased""" +2 33 dataset """fb15k237""" +2 33 model """complex""" +2 33 loss """bceaftersigmoid""" +2 33 regularizer """no""" +2 33 optimizer """adam""" +2 33 training_loop """owa""" +2 33 negative_sampler """basic""" +2 33 evaluator """rankbased""" +2 34 dataset """fb15k237""" +2 34 model """complex""" +2 34 loss """bceaftersigmoid""" +2 34 regularizer """no""" +2 34 optimizer """adam""" +2 34 training_loop """owa""" +2 34 negative_sampler """basic""" +2 34 evaluator """rankbased""" +2 35 dataset """fb15k237""" +2 35 model """complex""" +2 35 loss """bceaftersigmoid""" +2 35 regularizer """no""" +2 35 optimizer """adam""" +2 35 training_loop """owa""" +2 35 negative_sampler """basic""" +2 35 evaluator """rankbased""" +2 36 dataset """fb15k237""" +2 36 model """complex""" +2 36 loss """bceaftersigmoid""" +2 36 regularizer """no""" +2 36 optimizer """adam""" +2 36 training_loop """owa""" +2 36 negative_sampler """basic""" +2 36 evaluator """rankbased""" +2 37 dataset """fb15k237""" +2 37 model """complex""" +2 37 loss """bceaftersigmoid""" +2 37 regularizer """no""" +2 37 optimizer """adam""" +2 37 training_loop """owa""" +2 37 negative_sampler """basic""" +2 37 evaluator """rankbased""" +2 38 dataset """fb15k237""" +2 38 model """complex""" +2 38 loss """bceaftersigmoid""" +2 38 regularizer """no""" +2 38 optimizer """adam""" +2 38 training_loop """owa""" +2 38 negative_sampler """basic""" +2 38 evaluator """rankbased""" +2 39 dataset """fb15k237""" +2 39 model """complex""" +2 39 loss """bceaftersigmoid""" +2 39 regularizer """no""" +2 39 optimizer """adam""" +2 39 training_loop """owa""" +2 39 negative_sampler """basic""" +2 39 evaluator """rankbased""" +2 40 dataset """fb15k237""" +2 40 model """complex""" +2 40 loss """bceaftersigmoid""" +2 40 regularizer """no""" +2 40 optimizer """adam""" +2 40 training_loop """owa""" +2 40 negative_sampler """basic""" +2 40 evaluator """rankbased""" +2 41 dataset """fb15k237""" +2 41 model """complex""" +2 41 loss """bceaftersigmoid""" +2 41 regularizer """no""" +2 41 optimizer """adam""" +2 41 training_loop """owa""" +2 41 negative_sampler """basic""" +2 41 evaluator """rankbased""" +2 42 dataset """fb15k237""" +2 42 model """complex""" +2 42 loss """bceaftersigmoid""" +2 42 regularizer """no""" +2 42 optimizer """adam""" +2 42 training_loop """owa""" +2 42 negative_sampler """basic""" +2 42 evaluator """rankbased""" +2 43 dataset """fb15k237""" +2 43 model """complex""" +2 43 loss """bceaftersigmoid""" +2 43 regularizer """no""" +2 43 optimizer """adam""" +2 43 training_loop """owa""" +2 43 negative_sampler """basic""" +2 43 evaluator """rankbased""" +2 44 dataset """fb15k237""" +2 44 model """complex""" +2 44 loss """bceaftersigmoid""" +2 44 regularizer """no""" +2 44 optimizer """adam""" +2 44 training_loop """owa""" +2 44 negative_sampler """basic""" +2 44 evaluator """rankbased""" +2 45 dataset """fb15k237""" +2 45 model """complex""" +2 45 loss """bceaftersigmoid""" +2 45 regularizer """no""" +2 45 optimizer """adam""" +2 45 training_loop """owa""" +2 45 negative_sampler """basic""" +2 45 evaluator """rankbased""" +2 46 dataset """fb15k237""" +2 46 model """complex""" +2 46 loss """bceaftersigmoid""" +2 46 regularizer """no""" +2 46 optimizer """adam""" +2 46 training_loop """owa""" +2 46 negative_sampler """basic""" +2 46 evaluator """rankbased""" +2 47 dataset """fb15k237""" +2 47 model """complex""" +2 47 loss """bceaftersigmoid""" +2 47 regularizer """no""" +2 47 optimizer """adam""" +2 47 training_loop """owa""" +2 47 negative_sampler """basic""" +2 47 evaluator """rankbased""" +2 48 dataset """fb15k237""" +2 48 model """complex""" +2 48 loss """bceaftersigmoid""" +2 48 regularizer """no""" +2 48 optimizer """adam""" +2 48 training_loop """owa""" +2 48 negative_sampler """basic""" +2 48 evaluator """rankbased""" +2 49 dataset """fb15k237""" +2 49 model """complex""" +2 49 loss """bceaftersigmoid""" +2 49 regularizer """no""" +2 49 optimizer """adam""" +2 49 training_loop """owa""" +2 49 negative_sampler """basic""" +2 49 evaluator """rankbased""" +3 1 model.embedding_dim 2.0 +3 1 optimizer.lr 0.011737609021898906 +3 1 negative_sampler.num_negs_per_pos 55.0 +3 1 training.batch_size 0.0 +3 2 model.embedding_dim 2.0 +3 2 optimizer.lr 0.00537017730942467 +3 2 negative_sampler.num_negs_per_pos 62.0 +3 2 training.batch_size 1.0 +3 3 model.embedding_dim 1.0 +3 3 optimizer.lr 0.0781967091574402 +3 3 negative_sampler.num_negs_per_pos 43.0 +3 3 training.batch_size 0.0 +3 4 model.embedding_dim 0.0 +3 4 optimizer.lr 0.008613291287166333 +3 4 negative_sampler.num_negs_per_pos 79.0 +3 4 training.batch_size 1.0 +3 5 model.embedding_dim 0.0 +3 5 optimizer.lr 0.07497841115088787 +3 5 negative_sampler.num_negs_per_pos 51.0 +3 5 training.batch_size 0.0 +3 6 model.embedding_dim 0.0 +3 6 optimizer.lr 0.003283617664152116 +3 6 negative_sampler.num_negs_per_pos 39.0 +3 6 training.batch_size 1.0 +3 7 model.embedding_dim 2.0 +3 7 optimizer.lr 0.009753795026244886 +3 7 negative_sampler.num_negs_per_pos 21.0 +3 7 training.batch_size 2.0 +3 8 model.embedding_dim 1.0 +3 8 optimizer.lr 0.0012552764128390046 +3 8 negative_sampler.num_negs_per_pos 73.0 +3 8 training.batch_size 1.0 +3 9 model.embedding_dim 2.0 +3 9 optimizer.lr 0.06432208425166965 +3 9 negative_sampler.num_negs_per_pos 28.0 +3 9 training.batch_size 1.0 +3 10 model.embedding_dim 1.0 +3 10 optimizer.lr 0.020841406866336235 +3 10 negative_sampler.num_negs_per_pos 60.0 +3 10 training.batch_size 2.0 +3 11 model.embedding_dim 0.0 +3 11 optimizer.lr 0.003071046753988392 +3 11 negative_sampler.num_negs_per_pos 55.0 +3 11 training.batch_size 1.0 +3 12 model.embedding_dim 1.0 +3 12 optimizer.lr 0.0032171286422962085 +3 12 negative_sampler.num_negs_per_pos 93.0 +3 12 training.batch_size 0.0 +3 13 model.embedding_dim 2.0 +3 13 optimizer.lr 0.004056269887422708 +3 13 negative_sampler.num_negs_per_pos 81.0 +3 13 training.batch_size 2.0 +3 14 model.embedding_dim 1.0 +3 14 optimizer.lr 0.060351162974314324 +3 14 negative_sampler.num_negs_per_pos 63.0 +3 14 training.batch_size 2.0 +3 15 model.embedding_dim 1.0 +3 15 optimizer.lr 0.008613035938186095 +3 15 negative_sampler.num_negs_per_pos 3.0 +3 15 training.batch_size 1.0 +3 16 model.embedding_dim 2.0 +3 16 optimizer.lr 0.05658035070789325 +3 16 negative_sampler.num_negs_per_pos 11.0 +3 16 training.batch_size 2.0 +3 17 model.embedding_dim 1.0 +3 17 optimizer.lr 0.05981599756161139 +3 17 negative_sampler.num_negs_per_pos 98.0 +3 17 training.batch_size 2.0 +3 18 model.embedding_dim 0.0 +3 18 optimizer.lr 0.07423037641904479 +3 18 negative_sampler.num_negs_per_pos 68.0 +3 18 training.batch_size 0.0 +3 19 model.embedding_dim 0.0 +3 19 optimizer.lr 0.0260748672770528 +3 19 negative_sampler.num_negs_per_pos 62.0 +3 19 training.batch_size 1.0 +3 20 model.embedding_dim 0.0 +3 20 optimizer.lr 0.017934895497531897 +3 20 negative_sampler.num_negs_per_pos 96.0 +3 20 training.batch_size 2.0 +3 21 model.embedding_dim 0.0 +3 21 optimizer.lr 0.0010208734343496392 +3 21 negative_sampler.num_negs_per_pos 47.0 +3 21 training.batch_size 0.0 +3 22 model.embedding_dim 2.0 +3 22 optimizer.lr 0.00125404397710592 +3 22 negative_sampler.num_negs_per_pos 53.0 +3 22 training.batch_size 1.0 +3 23 model.embedding_dim 1.0 +3 23 optimizer.lr 0.002907766399650996 +3 23 negative_sampler.num_negs_per_pos 15.0 +3 23 training.batch_size 2.0 +3 24 model.embedding_dim 0.0 +3 24 optimizer.lr 0.030580438927582883 +3 24 negative_sampler.num_negs_per_pos 28.0 +3 24 training.batch_size 2.0 +3 25 model.embedding_dim 1.0 +3 25 optimizer.lr 0.0031696060009471267 +3 25 negative_sampler.num_negs_per_pos 89.0 +3 25 training.batch_size 2.0 +3 26 model.embedding_dim 2.0 +3 26 optimizer.lr 0.03269840660060904 +3 26 negative_sampler.num_negs_per_pos 11.0 +3 26 training.batch_size 2.0 +3 27 model.embedding_dim 2.0 +3 27 optimizer.lr 0.003868650059665657 +3 27 negative_sampler.num_negs_per_pos 57.0 +3 27 training.batch_size 2.0 +3 28 model.embedding_dim 2.0 +3 28 optimizer.lr 0.006616264205691584 +3 28 negative_sampler.num_negs_per_pos 78.0 +3 28 training.batch_size 1.0 +3 29 model.embedding_dim 2.0 +3 29 optimizer.lr 0.009561897327556763 +3 29 negative_sampler.num_negs_per_pos 38.0 +3 29 training.batch_size 0.0 +3 30 model.embedding_dim 0.0 +3 30 optimizer.lr 0.0013624991609157474 +3 30 negative_sampler.num_negs_per_pos 21.0 +3 30 training.batch_size 0.0 +3 31 model.embedding_dim 1.0 +3 31 optimizer.lr 0.00498982911657607 +3 31 negative_sampler.num_negs_per_pos 82.0 +3 31 training.batch_size 0.0 +3 32 model.embedding_dim 1.0 +3 32 optimizer.lr 0.020439900966370182 +3 32 negative_sampler.num_negs_per_pos 34.0 +3 32 training.batch_size 1.0 +3 33 model.embedding_dim 1.0 +3 33 optimizer.lr 0.02533116997228355 +3 33 negative_sampler.num_negs_per_pos 41.0 +3 33 training.batch_size 0.0 +3 34 model.embedding_dim 1.0 +3 34 optimizer.lr 0.0014192298916618606 +3 34 negative_sampler.num_negs_per_pos 90.0 +3 34 training.batch_size 0.0 +3 35 model.embedding_dim 2.0 +3 35 optimizer.lr 0.00933096639110895 +3 35 negative_sampler.num_negs_per_pos 61.0 +3 35 training.batch_size 2.0 +3 36 model.embedding_dim 2.0 +3 36 optimizer.lr 0.07554805514035742 +3 36 negative_sampler.num_negs_per_pos 67.0 +3 36 training.batch_size 0.0 +3 37 model.embedding_dim 1.0 +3 37 optimizer.lr 0.038048833492079076 +3 37 negative_sampler.num_negs_per_pos 39.0 +3 37 training.batch_size 1.0 +3 1 dataset """fb15k237""" +3 1 model """complex""" +3 1 loss """softplus""" +3 1 regularizer """no""" +3 1 optimizer """adam""" +3 1 training_loop """owa""" +3 1 negative_sampler """basic""" +3 1 evaluator """rankbased""" +3 2 dataset """fb15k237""" +3 2 model """complex""" +3 2 loss """softplus""" +3 2 regularizer """no""" +3 2 optimizer """adam""" +3 2 training_loop """owa""" +3 2 negative_sampler """basic""" +3 2 evaluator """rankbased""" +3 3 dataset """fb15k237""" +3 3 model """complex""" +3 3 loss """softplus""" +3 3 regularizer """no""" +3 3 optimizer """adam""" +3 3 training_loop """owa""" +3 3 negative_sampler """basic""" +3 3 evaluator """rankbased""" +3 4 dataset """fb15k237""" +3 4 model """complex""" +3 4 loss """softplus""" +3 4 regularizer """no""" +3 4 optimizer """adam""" +3 4 training_loop """owa""" +3 4 negative_sampler """basic""" +3 4 evaluator """rankbased""" +3 5 dataset """fb15k237""" +3 5 model """complex""" +3 5 loss """softplus""" +3 5 regularizer """no""" +3 5 optimizer """adam""" +3 5 training_loop """owa""" +3 5 negative_sampler """basic""" +3 5 evaluator """rankbased""" +3 6 dataset """fb15k237""" +3 6 model """complex""" +3 6 loss """softplus""" +3 6 regularizer """no""" +3 6 optimizer """adam""" +3 6 training_loop """owa""" +3 6 negative_sampler """basic""" +3 6 evaluator """rankbased""" +3 7 dataset """fb15k237""" +3 7 model """complex""" +3 7 loss """softplus""" +3 7 regularizer """no""" +3 7 optimizer """adam""" +3 7 training_loop """owa""" +3 7 negative_sampler """basic""" +3 7 evaluator """rankbased""" +3 8 dataset """fb15k237""" +3 8 model """complex""" +3 8 loss """softplus""" +3 8 regularizer """no""" +3 8 optimizer """adam""" +3 8 training_loop """owa""" +3 8 negative_sampler """basic""" +3 8 evaluator """rankbased""" +3 9 dataset """fb15k237""" +3 9 model """complex""" +3 9 loss """softplus""" +3 9 regularizer """no""" +3 9 optimizer """adam""" +3 9 training_loop """owa""" +3 9 negative_sampler """basic""" +3 9 evaluator """rankbased""" +3 10 dataset """fb15k237""" +3 10 model """complex""" +3 10 loss """softplus""" +3 10 regularizer """no""" +3 10 optimizer """adam""" +3 10 training_loop """owa""" +3 10 negative_sampler """basic""" +3 10 evaluator """rankbased""" +3 11 dataset """fb15k237""" +3 11 model """complex""" +3 11 loss """softplus""" +3 11 regularizer """no""" +3 11 optimizer """adam""" +3 11 training_loop """owa""" +3 11 negative_sampler """basic""" +3 11 evaluator """rankbased""" +3 12 dataset """fb15k237""" +3 12 model """complex""" +3 12 loss """softplus""" +3 12 regularizer """no""" +3 12 optimizer """adam""" +3 12 training_loop """owa""" +3 12 negative_sampler """basic""" +3 12 evaluator """rankbased""" +3 13 dataset """fb15k237""" +3 13 model """complex""" +3 13 loss """softplus""" +3 13 regularizer """no""" +3 13 optimizer """adam""" +3 13 training_loop """owa""" +3 13 negative_sampler """basic""" +3 13 evaluator """rankbased""" +3 14 dataset """fb15k237""" +3 14 model """complex""" +3 14 loss """softplus""" +3 14 regularizer """no""" +3 14 optimizer """adam""" +3 14 training_loop """owa""" +3 14 negative_sampler """basic""" +3 14 evaluator """rankbased""" +3 15 dataset """fb15k237""" +3 15 model """complex""" +3 15 loss """softplus""" +3 15 regularizer """no""" +3 15 optimizer """adam""" +3 15 training_loop """owa""" +3 15 negative_sampler """basic""" +3 15 evaluator """rankbased""" +3 16 dataset """fb15k237""" +3 16 model """complex""" +3 16 loss """softplus""" +3 16 regularizer """no""" +3 16 optimizer """adam""" +3 16 training_loop """owa""" +3 16 negative_sampler """basic""" +3 16 evaluator """rankbased""" +3 17 dataset """fb15k237""" +3 17 model """complex""" +3 17 loss """softplus""" +3 17 regularizer """no""" +3 17 optimizer """adam""" +3 17 training_loop """owa""" +3 17 negative_sampler """basic""" +3 17 evaluator """rankbased""" +3 18 dataset """fb15k237""" +3 18 model """complex""" +3 18 loss """softplus""" +3 18 regularizer """no""" +3 18 optimizer """adam""" +3 18 training_loop """owa""" +3 18 negative_sampler """basic""" +3 18 evaluator """rankbased""" +3 19 dataset """fb15k237""" +3 19 model """complex""" +3 19 loss """softplus""" +3 19 regularizer """no""" +3 19 optimizer """adam""" +3 19 training_loop """owa""" +3 19 negative_sampler """basic""" +3 19 evaluator """rankbased""" +3 20 dataset """fb15k237""" +3 20 model """complex""" +3 20 loss """softplus""" +3 20 regularizer """no""" +3 20 optimizer """adam""" +3 20 training_loop """owa""" +3 20 negative_sampler """basic""" +3 20 evaluator """rankbased""" +3 21 dataset """fb15k237""" +3 21 model """complex""" +3 21 loss """softplus""" +3 21 regularizer """no""" +3 21 optimizer """adam""" +3 21 training_loop """owa""" +3 21 negative_sampler """basic""" +3 21 evaluator """rankbased""" +3 22 dataset """fb15k237""" +3 22 model """complex""" +3 22 loss """softplus""" +3 22 regularizer """no""" +3 22 optimizer """adam""" +3 22 training_loop """owa""" +3 22 negative_sampler """basic""" +3 22 evaluator """rankbased""" +3 23 dataset """fb15k237""" +3 23 model """complex""" +3 23 loss """softplus""" +3 23 regularizer """no""" +3 23 optimizer """adam""" +3 23 training_loop """owa""" +3 23 negative_sampler """basic""" +3 23 evaluator """rankbased""" +3 24 dataset """fb15k237""" +3 24 model """complex""" +3 24 loss """softplus""" +3 24 regularizer """no""" +3 24 optimizer """adam""" +3 24 training_loop """owa""" +3 24 negative_sampler """basic""" +3 24 evaluator """rankbased""" +3 25 dataset """fb15k237""" +3 25 model """complex""" +3 25 loss """softplus""" +3 25 regularizer """no""" +3 25 optimizer """adam""" +3 25 training_loop """owa""" +3 25 negative_sampler """basic""" +3 25 evaluator """rankbased""" +3 26 dataset """fb15k237""" +3 26 model """complex""" +3 26 loss """softplus""" +3 26 regularizer """no""" +3 26 optimizer """adam""" +3 26 training_loop """owa""" +3 26 negative_sampler """basic""" +3 26 evaluator """rankbased""" +3 27 dataset """fb15k237""" +3 27 model """complex""" +3 27 loss """softplus""" +3 27 regularizer """no""" +3 27 optimizer """adam""" +3 27 training_loop """owa""" +3 27 negative_sampler """basic""" +3 27 evaluator """rankbased""" +3 28 dataset """fb15k237""" +3 28 model """complex""" +3 28 loss """softplus""" +3 28 regularizer """no""" +3 28 optimizer """adam""" +3 28 training_loop """owa""" +3 28 negative_sampler """basic""" +3 28 evaluator """rankbased""" +3 29 dataset """fb15k237""" +3 29 model """complex""" +3 29 loss """softplus""" +3 29 regularizer """no""" +3 29 optimizer """adam""" +3 29 training_loop """owa""" +3 29 negative_sampler """basic""" +3 29 evaluator """rankbased""" +3 30 dataset """fb15k237""" +3 30 model """complex""" +3 30 loss """softplus""" +3 30 regularizer """no""" +3 30 optimizer """adam""" +3 30 training_loop """owa""" +3 30 negative_sampler """basic""" +3 30 evaluator """rankbased""" +3 31 dataset """fb15k237""" +3 31 model """complex""" +3 31 loss """softplus""" +3 31 regularizer """no""" +3 31 optimizer """adam""" +3 31 training_loop """owa""" +3 31 negative_sampler """basic""" +3 31 evaluator """rankbased""" +3 32 dataset """fb15k237""" +3 32 model """complex""" +3 32 loss """softplus""" +3 32 regularizer """no""" +3 32 optimizer """adam""" +3 32 training_loop """owa""" +3 32 negative_sampler """basic""" +3 32 evaluator """rankbased""" +3 33 dataset """fb15k237""" +3 33 model """complex""" +3 33 loss """softplus""" +3 33 regularizer """no""" +3 33 optimizer """adam""" +3 33 training_loop """owa""" +3 33 negative_sampler """basic""" +3 33 evaluator """rankbased""" +3 34 dataset """fb15k237""" +3 34 model """complex""" +3 34 loss """softplus""" +3 34 regularizer """no""" +3 34 optimizer """adam""" +3 34 training_loop """owa""" +3 34 negative_sampler """basic""" +3 34 evaluator """rankbased""" +3 35 dataset """fb15k237""" +3 35 model """complex""" +3 35 loss """softplus""" +3 35 regularizer """no""" +3 35 optimizer """adam""" +3 35 training_loop """owa""" +3 35 negative_sampler """basic""" +3 35 evaluator """rankbased""" +3 36 dataset """fb15k237""" +3 36 model """complex""" +3 36 loss """softplus""" +3 36 regularizer """no""" +3 36 optimizer """adam""" +3 36 training_loop """owa""" +3 36 negative_sampler """basic""" +3 36 evaluator """rankbased""" +3 37 dataset """fb15k237""" +3 37 model """complex""" +3 37 loss """softplus""" +3 37 regularizer """no""" +3 37 optimizer """adam""" +3 37 training_loop """owa""" +3 37 negative_sampler """basic""" +3 37 evaluator """rankbased""" +4 1 model.embedding_dim 1.0 +4 1 loss.margin 25.27079402324726 +4 1 loss.adversarial_temperature 0.5065950220805256 +4 1 optimizer.lr 0.05162613250741366 +4 1 negative_sampler.num_negs_per_pos 47.0 +4 1 training.batch_size 1.0 +4 2 model.embedding_dim 0.0 +4 2 loss.margin 17.663363342086637 +4 2 loss.adversarial_temperature 0.48961020284398327 +4 2 optimizer.lr 0.0023531107823058107 +4 2 negative_sampler.num_negs_per_pos 35.0 +4 2 training.batch_size 0.0 +4 3 model.embedding_dim 1.0 +4 3 loss.margin 1.0392639153334067 +4 3 loss.adversarial_temperature 0.3458558039381531 +4 3 optimizer.lr 0.05265749674901745 +4 3 negative_sampler.num_negs_per_pos 7.0 +4 3 training.batch_size 1.0 +4 4 model.embedding_dim 1.0 +4 4 loss.margin 8.592714196985492 +4 4 loss.adversarial_temperature 0.192546978919181 +4 4 optimizer.lr 0.012752018887739885 +4 4 negative_sampler.num_negs_per_pos 75.0 +4 4 training.batch_size 0.0 +4 5 model.embedding_dim 0.0 +4 5 loss.margin 26.747230777631298 +4 5 loss.adversarial_temperature 0.3730026372961829 +4 5 optimizer.lr 0.0037408810518387518 +4 5 negative_sampler.num_negs_per_pos 70.0 +4 5 training.batch_size 0.0 +4 6 model.embedding_dim 1.0 +4 6 loss.margin 26.690630908982605 +4 6 loss.adversarial_temperature 0.17970916094232628 +4 6 optimizer.lr 0.011688095866462826 +4 6 negative_sampler.num_negs_per_pos 19.0 +4 6 training.batch_size 1.0 +4 7 model.embedding_dim 0.0 +4 7 loss.margin 4.812637783903918 +4 7 loss.adversarial_temperature 0.3084926079991531 +4 7 optimizer.lr 0.06620599927914216 +4 7 negative_sampler.num_negs_per_pos 0.0 +4 7 training.batch_size 2.0 +4 8 model.embedding_dim 2.0 +4 8 loss.margin 16.242033252217063 +4 8 loss.adversarial_temperature 0.5640987618489349 +4 8 optimizer.lr 0.09074151375079016 +4 8 negative_sampler.num_negs_per_pos 79.0 +4 8 training.batch_size 2.0 +4 9 model.embedding_dim 2.0 +4 9 loss.margin 19.312842193512278 +4 9 loss.adversarial_temperature 0.42751181025071466 +4 9 optimizer.lr 0.013528675596904469 +4 9 negative_sampler.num_negs_per_pos 68.0 +4 9 training.batch_size 0.0 +4 10 model.embedding_dim 0.0 +4 10 loss.margin 14.441328417464517 +4 10 loss.adversarial_temperature 0.9427874693785062 +4 10 optimizer.lr 0.026771634326336915 +4 10 negative_sampler.num_negs_per_pos 10.0 +4 10 training.batch_size 2.0 +4 11 model.embedding_dim 2.0 +4 11 loss.margin 11.954510856789286 +4 11 loss.adversarial_temperature 0.266990326370236 +4 11 optimizer.lr 0.0575189406447229 +4 11 negative_sampler.num_negs_per_pos 70.0 +4 11 training.batch_size 0.0 +4 12 model.embedding_dim 0.0 +4 12 loss.margin 19.513841202002713 +4 12 loss.adversarial_temperature 0.3547141471377825 +4 12 optimizer.lr 0.00887314488870299 +4 12 negative_sampler.num_negs_per_pos 66.0 +4 12 training.batch_size 2.0 +4 13 model.embedding_dim 2.0 +4 13 loss.margin 17.487519536677922 +4 13 loss.adversarial_temperature 0.23463765246582102 +4 13 optimizer.lr 0.0091973483588361 +4 13 negative_sampler.num_negs_per_pos 39.0 +4 13 training.batch_size 1.0 +4 14 model.embedding_dim 1.0 +4 14 loss.margin 3.3214433573104323 +4 14 loss.adversarial_temperature 0.4300951359919926 +4 14 optimizer.lr 0.01115140525929177 +4 14 negative_sampler.num_negs_per_pos 54.0 +4 14 training.batch_size 0.0 +4 15 model.embedding_dim 0.0 +4 15 loss.margin 9.693518422226912 +4 15 loss.adversarial_temperature 0.7137568153073379 +4 15 optimizer.lr 0.013124385646179642 +4 15 negative_sampler.num_negs_per_pos 2.0 +4 15 training.batch_size 2.0 +4 16 model.embedding_dim 1.0 +4 16 loss.margin 6.315437936867262 +4 16 loss.adversarial_temperature 0.21677538392607967 +4 16 optimizer.lr 0.03794428804657612 +4 16 negative_sampler.num_negs_per_pos 79.0 +4 16 training.batch_size 0.0 +4 17 model.embedding_dim 1.0 +4 17 loss.margin 22.476776667168846 +4 17 loss.adversarial_temperature 0.5282482806491444 +4 17 optimizer.lr 0.043797888940074095 +4 17 negative_sampler.num_negs_per_pos 97.0 +4 17 training.batch_size 0.0 +4 18 model.embedding_dim 2.0 +4 18 loss.margin 2.5805584233799843 +4 18 loss.adversarial_temperature 0.33113068562413756 +4 18 optimizer.lr 0.0033898704932379077 +4 18 negative_sampler.num_negs_per_pos 17.0 +4 18 training.batch_size 2.0 +4 19 model.embedding_dim 2.0 +4 19 loss.margin 20.587062284913163 +4 19 loss.adversarial_temperature 0.3567158099247974 +4 19 optimizer.lr 0.02492850273920856 +4 19 negative_sampler.num_negs_per_pos 59.0 +4 19 training.batch_size 2.0 +4 20 model.embedding_dim 0.0 +4 20 loss.margin 27.775301251240563 +4 20 loss.adversarial_temperature 0.4341620823932548 +4 20 optimizer.lr 0.04279090826805961 +4 20 negative_sampler.num_negs_per_pos 18.0 +4 20 training.batch_size 0.0 +4 1 dataset """fb15k237""" +4 1 model """complex""" +4 1 loss """nssa""" +4 1 regularizer """no""" +4 1 optimizer """adam""" +4 1 training_loop """owa""" +4 1 negative_sampler """basic""" +4 1 evaluator """rankbased""" +4 2 dataset """fb15k237""" +4 2 model """complex""" +4 2 loss """nssa""" +4 2 regularizer """no""" +4 2 optimizer """adam""" +4 2 training_loop """owa""" +4 2 negative_sampler """basic""" +4 2 evaluator """rankbased""" +4 3 dataset """fb15k237""" +4 3 model """complex""" +4 3 loss """nssa""" +4 3 regularizer """no""" +4 3 optimizer """adam""" +4 3 training_loop """owa""" +4 3 negative_sampler """basic""" +4 3 evaluator """rankbased""" +4 4 dataset """fb15k237""" +4 4 model """complex""" +4 4 loss """nssa""" +4 4 regularizer """no""" +4 4 optimizer """adam""" +4 4 training_loop """owa""" +4 4 negative_sampler """basic""" +4 4 evaluator """rankbased""" +4 5 dataset """fb15k237""" +4 5 model """complex""" +4 5 loss """nssa""" +4 5 regularizer """no""" +4 5 optimizer """adam""" +4 5 training_loop """owa""" +4 5 negative_sampler """basic""" +4 5 evaluator """rankbased""" +4 6 dataset """fb15k237""" +4 6 model """complex""" +4 6 loss """nssa""" +4 6 regularizer """no""" +4 6 optimizer """adam""" +4 6 training_loop """owa""" +4 6 negative_sampler """basic""" +4 6 evaluator """rankbased""" +4 7 dataset """fb15k237""" +4 7 model """complex""" +4 7 loss """nssa""" +4 7 regularizer """no""" +4 7 optimizer """adam""" +4 7 training_loop """owa""" +4 7 negative_sampler """basic""" +4 7 evaluator """rankbased""" +4 8 dataset """fb15k237""" +4 8 model """complex""" +4 8 loss """nssa""" +4 8 regularizer """no""" +4 8 optimizer """adam""" +4 8 training_loop """owa""" +4 8 negative_sampler """basic""" +4 8 evaluator """rankbased""" +4 9 dataset """fb15k237""" +4 9 model """complex""" +4 9 loss """nssa""" +4 9 regularizer """no""" +4 9 optimizer """adam""" +4 9 training_loop """owa""" +4 9 negative_sampler """basic""" +4 9 evaluator """rankbased""" +4 10 dataset """fb15k237""" +4 10 model """complex""" +4 10 loss """nssa""" +4 10 regularizer """no""" +4 10 optimizer """adam""" +4 10 training_loop """owa""" +4 10 negative_sampler """basic""" +4 10 evaluator """rankbased""" +4 11 dataset """fb15k237""" +4 11 model """complex""" +4 11 loss """nssa""" +4 11 regularizer """no""" +4 11 optimizer """adam""" +4 11 training_loop """owa""" +4 11 negative_sampler """basic""" +4 11 evaluator """rankbased""" +4 12 dataset """fb15k237""" +4 12 model """complex""" +4 12 loss """nssa""" +4 12 regularizer """no""" +4 12 optimizer """adam""" +4 12 training_loop """owa""" +4 12 negative_sampler """basic""" +4 12 evaluator """rankbased""" +4 13 dataset """fb15k237""" +4 13 model """complex""" +4 13 loss """nssa""" +4 13 regularizer """no""" +4 13 optimizer """adam""" +4 13 training_loop """owa""" +4 13 negative_sampler """basic""" +4 13 evaluator """rankbased""" +4 14 dataset """fb15k237""" +4 14 model """complex""" +4 14 loss """nssa""" +4 14 regularizer """no""" +4 14 optimizer """adam""" +4 14 training_loop """owa""" +4 14 negative_sampler """basic""" +4 14 evaluator """rankbased""" +4 15 dataset """fb15k237""" +4 15 model """complex""" +4 15 loss """nssa""" +4 15 regularizer """no""" +4 15 optimizer """adam""" +4 15 training_loop """owa""" +4 15 negative_sampler """basic""" +4 15 evaluator """rankbased""" +4 16 dataset """fb15k237""" +4 16 model """complex""" +4 16 loss """nssa""" +4 16 regularizer """no""" +4 16 optimizer """adam""" +4 16 training_loop """owa""" +4 16 negative_sampler """basic""" +4 16 evaluator """rankbased""" +4 17 dataset """fb15k237""" +4 17 model """complex""" +4 17 loss """nssa""" +4 17 regularizer """no""" +4 17 optimizer """adam""" +4 17 training_loop """owa""" +4 17 negative_sampler """basic""" +4 17 evaluator """rankbased""" +4 18 dataset """fb15k237""" +4 18 model """complex""" +4 18 loss """nssa""" +4 18 regularizer """no""" +4 18 optimizer """adam""" +4 18 training_loop """owa""" +4 18 negative_sampler """basic""" +4 18 evaluator """rankbased""" +4 19 dataset """fb15k237""" +4 19 model """complex""" +4 19 loss """nssa""" +4 19 regularizer """no""" +4 19 optimizer """adam""" +4 19 training_loop """owa""" +4 19 negative_sampler """basic""" +4 19 evaluator """rankbased""" +4 20 dataset """fb15k237""" +4 20 model """complex""" +4 20 loss """nssa""" +4 20 regularizer """no""" +4 20 optimizer """adam""" +4 20 training_loop """owa""" +4 20 negative_sampler """basic""" +4 20 evaluator """rankbased""" +5 1 model.embedding_dim 0.0 +5 1 loss.margin 22.104687912941078 +5 1 loss.adversarial_temperature 0.3828345598492946 +5 1 optimizer.lr 0.059589178377245644 +5 1 negative_sampler.num_negs_per_pos 30.0 +5 1 training.batch_size 2.0 +5 2 model.embedding_dim 0.0 +5 2 loss.margin 13.946959595667925 +5 2 loss.adversarial_temperature 0.25148857252128803 +5 2 optimizer.lr 0.005987197020322106 +5 2 negative_sampler.num_negs_per_pos 55.0 +5 2 training.batch_size 1.0 +5 3 model.embedding_dim 2.0 +5 3 loss.margin 25.37430167240976 +5 3 loss.adversarial_temperature 0.44061343712422474 +5 3 optimizer.lr 0.04621091676256713 +5 3 negative_sampler.num_negs_per_pos 11.0 +5 3 training.batch_size 1.0 +5 4 model.embedding_dim 0.0 +5 4 loss.margin 22.894213608490013 +5 4 loss.adversarial_temperature 0.8246458638223958 +5 4 optimizer.lr 0.05490830081382404 +5 4 negative_sampler.num_negs_per_pos 82.0 +5 4 training.batch_size 2.0 +5 5 model.embedding_dim 0.0 +5 5 loss.margin 26.93264986053582 +5 5 loss.adversarial_temperature 0.46066735403150083 +5 5 optimizer.lr 0.011669441542093342 +5 5 negative_sampler.num_negs_per_pos 24.0 +5 5 training.batch_size 1.0 +5 6 model.embedding_dim 2.0 +5 6 loss.margin 11.38781419192289 +5 6 loss.adversarial_temperature 0.8151572004959466 +5 6 optimizer.lr 0.0049258219397530735 +5 6 negative_sampler.num_negs_per_pos 93.0 +5 6 training.batch_size 1.0 +5 7 model.embedding_dim 1.0 +5 7 loss.margin 22.74264470811986 +5 7 loss.adversarial_temperature 0.979460335111638 +5 7 optimizer.lr 0.06312899663722367 +5 7 negative_sampler.num_negs_per_pos 95.0 +5 7 training.batch_size 0.0 +5 8 model.embedding_dim 2.0 +5 8 loss.margin 24.92358350002639 +5 8 loss.adversarial_temperature 0.7108518876484126 +5 8 optimizer.lr 0.026341320381290008 +5 8 negative_sampler.num_negs_per_pos 85.0 +5 8 training.batch_size 0.0 +5 9 model.embedding_dim 1.0 +5 9 loss.margin 22.871604211697687 +5 9 loss.adversarial_temperature 0.7651572288364779 +5 9 optimizer.lr 0.0956978029298372 +5 9 negative_sampler.num_negs_per_pos 70.0 +5 9 training.batch_size 1.0 +5 10 model.embedding_dim 0.0 +5 10 loss.margin 11.615114265829371 +5 10 loss.adversarial_temperature 0.2077505735260824 +5 10 optimizer.lr 0.021247303430116785 +5 10 negative_sampler.num_negs_per_pos 91.0 +5 10 training.batch_size 0.0 +5 11 model.embedding_dim 0.0 +5 11 loss.margin 2.5672777529645403 +5 11 loss.adversarial_temperature 0.9081982751970035 +5 11 optimizer.lr 0.012298090011274862 +5 11 negative_sampler.num_negs_per_pos 51.0 +5 11 training.batch_size 0.0 +5 12 model.embedding_dim 1.0 +5 12 loss.margin 5.948523436499087 +5 12 loss.adversarial_temperature 0.8243637011692361 +5 12 optimizer.lr 0.07284595379403243 +5 12 negative_sampler.num_negs_per_pos 66.0 +5 12 training.batch_size 0.0 +5 13 model.embedding_dim 1.0 +5 13 loss.margin 23.789567653499702 +5 13 loss.adversarial_temperature 0.2631620658122914 +5 13 optimizer.lr 0.03813275491631163 +5 13 negative_sampler.num_negs_per_pos 21.0 +5 13 training.batch_size 1.0 +5 14 model.embedding_dim 1.0 +5 14 loss.margin 14.272749648473912 +5 14 loss.adversarial_temperature 0.9689282592672576 +5 14 optimizer.lr 0.05650173351237898 +5 14 negative_sampler.num_negs_per_pos 11.0 +5 14 training.batch_size 1.0 +5 15 model.embedding_dim 2.0 +5 15 loss.margin 15.63823014935312 +5 15 loss.adversarial_temperature 0.1920403673163546 +5 15 optimizer.lr 0.030937761581352478 +5 15 negative_sampler.num_negs_per_pos 75.0 +5 15 training.batch_size 2.0 +5 16 model.embedding_dim 2.0 +5 16 loss.margin 18.78886522332616 +5 16 loss.adversarial_temperature 0.5943695325587212 +5 16 optimizer.lr 0.013952577976153655 +5 16 negative_sampler.num_negs_per_pos 56.0 +5 16 training.batch_size 0.0 +5 17 model.embedding_dim 2.0 +5 17 loss.margin 2.724390975606257 +5 17 loss.adversarial_temperature 0.6396066170345708 +5 17 optimizer.lr 0.002286717646217636 +5 17 negative_sampler.num_negs_per_pos 29.0 +5 17 training.batch_size 1.0 +5 18 model.embedding_dim 2.0 +5 18 loss.margin 15.32146536064721 +5 18 loss.adversarial_temperature 0.8547988185669789 +5 18 optimizer.lr 0.0076100743085481225 +5 18 negative_sampler.num_negs_per_pos 34.0 +5 18 training.batch_size 2.0 +5 19 model.embedding_dim 2.0 +5 19 loss.margin 20.169739032675917 +5 19 loss.adversarial_temperature 0.19558488973689406 +5 19 optimizer.lr 0.01878762726140468 +5 19 negative_sampler.num_negs_per_pos 83.0 +5 19 training.batch_size 1.0 +5 20 model.embedding_dim 2.0 +5 20 loss.margin 18.904143619950553 +5 20 loss.adversarial_temperature 0.19231428466536962 +5 20 optimizer.lr 0.09397447450022609 +5 20 negative_sampler.num_negs_per_pos 0.0 +5 20 training.batch_size 0.0 +5 21 model.embedding_dim 0.0 +5 21 loss.margin 21.6300107935637 +5 21 loss.adversarial_temperature 0.39190679525900585 +5 21 optimizer.lr 0.0051476705893249205 +5 21 negative_sampler.num_negs_per_pos 32.0 +5 21 training.batch_size 0.0 +5 22 model.embedding_dim 0.0 +5 22 loss.margin 25.906572124801375 +5 22 loss.adversarial_temperature 0.47246065311374164 +5 22 optimizer.lr 0.0014018825044150478 +5 22 negative_sampler.num_negs_per_pos 43.0 +5 22 training.batch_size 0.0 +5 23 model.embedding_dim 1.0 +5 23 loss.margin 23.045757116669492 +5 23 loss.adversarial_temperature 0.23755862280780393 +5 23 optimizer.lr 0.020348970224269503 +5 23 negative_sampler.num_negs_per_pos 96.0 +5 23 training.batch_size 2.0 +5 24 model.embedding_dim 0.0 +5 24 loss.margin 5.6412265745749215 +5 24 loss.adversarial_temperature 0.889357189918939 +5 24 optimizer.lr 0.002449148167830076 +5 24 negative_sampler.num_negs_per_pos 76.0 +5 24 training.batch_size 2.0 +5 25 model.embedding_dim 2.0 +5 25 loss.margin 3.5416908418756234 +5 25 loss.adversarial_temperature 0.9536653262101598 +5 25 optimizer.lr 0.003688687126038997 +5 25 negative_sampler.num_negs_per_pos 64.0 +5 25 training.batch_size 0.0 +5 26 model.embedding_dim 1.0 +5 26 loss.margin 3.3875338088054945 +5 26 loss.adversarial_temperature 0.42347791075733504 +5 26 optimizer.lr 0.006575489442229437 +5 26 negative_sampler.num_negs_per_pos 40.0 +5 26 training.batch_size 0.0 +5 27 model.embedding_dim 1.0 +5 27 loss.margin 11.555437455331507 +5 27 loss.adversarial_temperature 0.661628062860538 +5 27 optimizer.lr 0.039809247541957644 +5 27 negative_sampler.num_negs_per_pos 73.0 +5 27 training.batch_size 2.0 +5 28 model.embedding_dim 1.0 +5 28 loss.margin 6.626451604707941 +5 28 loss.adversarial_temperature 0.7545987807350581 +5 28 optimizer.lr 0.0036759050073751384 +5 28 negative_sampler.num_negs_per_pos 83.0 +5 28 training.batch_size 0.0 +5 29 model.embedding_dim 1.0 +5 29 loss.margin 4.099751380513415 +5 29 loss.adversarial_temperature 0.7775511585510555 +5 29 optimizer.lr 0.08003933816421493 +5 29 negative_sampler.num_negs_per_pos 72.0 +5 29 training.batch_size 1.0 +5 30 model.embedding_dim 1.0 +5 30 loss.margin 13.763491704753413 +5 30 loss.adversarial_temperature 0.18268489019614173 +5 30 optimizer.lr 0.009732965148020216 +5 30 negative_sampler.num_negs_per_pos 1.0 +5 30 training.batch_size 2.0 +5 31 model.embedding_dim 1.0 +5 31 loss.margin 8.671387993845098 +5 31 loss.adversarial_temperature 0.7686068177215118 +5 31 optimizer.lr 0.04645116741825777 +5 31 negative_sampler.num_negs_per_pos 19.0 +5 31 training.batch_size 1.0 +5 32 model.embedding_dim 0.0 +5 32 loss.margin 1.3463964938122115 +5 32 loss.adversarial_temperature 0.10449810223497838 +5 32 optimizer.lr 0.03982908656692517 +5 32 negative_sampler.num_negs_per_pos 94.0 +5 32 training.batch_size 2.0 +5 33 model.embedding_dim 2.0 +5 33 loss.margin 10.874318500227423 +5 33 loss.adversarial_temperature 0.4608970025785158 +5 33 optimizer.lr 0.08905014661629455 +5 33 negative_sampler.num_negs_per_pos 98.0 +5 33 training.batch_size 2.0 +5 34 model.embedding_dim 1.0 +5 34 loss.margin 6.8646272265973405 +5 34 loss.adversarial_temperature 0.9078623950888149 +5 34 optimizer.lr 0.05076340697343637 +5 34 negative_sampler.num_negs_per_pos 51.0 +5 34 training.batch_size 2.0 +5 1 dataset """fb15k237""" +5 1 model """complex""" +5 1 loss """nssa""" +5 1 regularizer """no""" +5 1 optimizer """adam""" +5 1 training_loop """owa""" +5 1 negative_sampler """basic""" +5 1 evaluator """rankbased""" +5 2 dataset """fb15k237""" +5 2 model """complex""" +5 2 loss """nssa""" +5 2 regularizer """no""" +5 2 optimizer """adam""" +5 2 training_loop """owa""" +5 2 negative_sampler """basic""" +5 2 evaluator """rankbased""" +5 3 dataset """fb15k237""" +5 3 model """complex""" +5 3 loss """nssa""" +5 3 regularizer """no""" +5 3 optimizer """adam""" +5 3 training_loop """owa""" +5 3 negative_sampler """basic""" +5 3 evaluator """rankbased""" +5 4 dataset """fb15k237""" +5 4 model """complex""" +5 4 loss """nssa""" +5 4 regularizer """no""" +5 4 optimizer """adam""" +5 4 training_loop """owa""" +5 4 negative_sampler """basic""" +5 4 evaluator """rankbased""" +5 5 dataset """fb15k237""" +5 5 model """complex""" +5 5 loss """nssa""" +5 5 regularizer """no""" +5 5 optimizer """adam""" +5 5 training_loop """owa""" +5 5 negative_sampler """basic""" +5 5 evaluator """rankbased""" +5 6 dataset """fb15k237""" +5 6 model """complex""" +5 6 loss """nssa""" +5 6 regularizer """no""" +5 6 optimizer """adam""" +5 6 training_loop """owa""" +5 6 negative_sampler """basic""" +5 6 evaluator """rankbased""" +5 7 dataset """fb15k237""" +5 7 model """complex""" +5 7 loss """nssa""" +5 7 regularizer """no""" +5 7 optimizer """adam""" +5 7 training_loop """owa""" +5 7 negative_sampler """basic""" +5 7 evaluator """rankbased""" +5 8 dataset """fb15k237""" +5 8 model """complex""" +5 8 loss """nssa""" +5 8 regularizer """no""" +5 8 optimizer """adam""" +5 8 training_loop """owa""" +5 8 negative_sampler """basic""" +5 8 evaluator """rankbased""" +5 9 dataset """fb15k237""" +5 9 model """complex""" +5 9 loss """nssa""" +5 9 regularizer """no""" +5 9 optimizer """adam""" +5 9 training_loop """owa""" +5 9 negative_sampler """basic""" +5 9 evaluator """rankbased""" +5 10 dataset """fb15k237""" +5 10 model """complex""" +5 10 loss """nssa""" +5 10 regularizer """no""" +5 10 optimizer """adam""" +5 10 training_loop """owa""" +5 10 negative_sampler """basic""" +5 10 evaluator """rankbased""" +5 11 dataset """fb15k237""" +5 11 model """complex""" +5 11 loss """nssa""" +5 11 regularizer """no""" +5 11 optimizer """adam""" +5 11 training_loop """owa""" +5 11 negative_sampler """basic""" +5 11 evaluator """rankbased""" +5 12 dataset """fb15k237""" +5 12 model """complex""" +5 12 loss """nssa""" +5 12 regularizer """no""" +5 12 optimizer """adam""" +5 12 training_loop """owa""" +5 12 negative_sampler """basic""" +5 12 evaluator """rankbased""" +5 13 dataset """fb15k237""" +5 13 model """complex""" +5 13 loss """nssa""" +5 13 regularizer """no""" +5 13 optimizer """adam""" +5 13 training_loop """owa""" +5 13 negative_sampler """basic""" +5 13 evaluator """rankbased""" +5 14 dataset """fb15k237""" +5 14 model """complex""" +5 14 loss """nssa""" +5 14 regularizer """no""" +5 14 optimizer """adam""" +5 14 training_loop """owa""" +5 14 negative_sampler """basic""" +5 14 evaluator """rankbased""" +5 15 dataset """fb15k237""" +5 15 model """complex""" +5 15 loss """nssa""" +5 15 regularizer """no""" +5 15 optimizer """adam""" +5 15 training_loop """owa""" +5 15 negative_sampler """basic""" +5 15 evaluator """rankbased""" +5 16 dataset """fb15k237""" +5 16 model """complex""" +5 16 loss """nssa""" +5 16 regularizer """no""" +5 16 optimizer """adam""" +5 16 training_loop """owa""" +5 16 negative_sampler """basic""" +5 16 evaluator """rankbased""" +5 17 dataset """fb15k237""" +5 17 model """complex""" +5 17 loss """nssa""" +5 17 regularizer """no""" +5 17 optimizer """adam""" +5 17 training_loop """owa""" +5 17 negative_sampler """basic""" +5 17 evaluator """rankbased""" +5 18 dataset """fb15k237""" +5 18 model """complex""" +5 18 loss """nssa""" +5 18 regularizer """no""" +5 18 optimizer """adam""" +5 18 training_loop """owa""" +5 18 negative_sampler """basic""" +5 18 evaluator """rankbased""" +5 19 dataset """fb15k237""" +5 19 model """complex""" +5 19 loss """nssa""" +5 19 regularizer """no""" +5 19 optimizer """adam""" +5 19 training_loop """owa""" +5 19 negative_sampler """basic""" +5 19 evaluator """rankbased""" +5 20 dataset """fb15k237""" +5 20 model """complex""" +5 20 loss """nssa""" +5 20 regularizer """no""" +5 20 optimizer """adam""" +5 20 training_loop """owa""" +5 20 negative_sampler """basic""" +5 20 evaluator """rankbased""" +5 21 dataset """fb15k237""" +5 21 model """complex""" +5 21 loss """nssa""" +5 21 regularizer """no""" +5 21 optimizer """adam""" +5 21 training_loop """owa""" +5 21 negative_sampler """basic""" +5 21 evaluator """rankbased""" +5 22 dataset """fb15k237""" +5 22 model """complex""" +5 22 loss """nssa""" +5 22 regularizer """no""" +5 22 optimizer """adam""" +5 22 training_loop """owa""" +5 22 negative_sampler """basic""" +5 22 evaluator """rankbased""" +5 23 dataset """fb15k237""" +5 23 model """complex""" +5 23 loss """nssa""" +5 23 regularizer """no""" +5 23 optimizer """adam""" +5 23 training_loop """owa""" +5 23 negative_sampler """basic""" +5 23 evaluator """rankbased""" +5 24 dataset """fb15k237""" +5 24 model """complex""" +5 24 loss """nssa""" +5 24 regularizer """no""" +5 24 optimizer """adam""" +5 24 training_loop """owa""" +5 24 negative_sampler """basic""" +5 24 evaluator """rankbased""" +5 25 dataset """fb15k237""" +5 25 model """complex""" +5 25 loss """nssa""" +5 25 regularizer """no""" +5 25 optimizer """adam""" +5 25 training_loop """owa""" +5 25 negative_sampler """basic""" +5 25 evaluator """rankbased""" +5 26 dataset """fb15k237""" +5 26 model """complex""" +5 26 loss """nssa""" +5 26 regularizer """no""" +5 26 optimizer """adam""" +5 26 training_loop """owa""" +5 26 negative_sampler """basic""" +5 26 evaluator """rankbased""" +5 27 dataset """fb15k237""" +5 27 model """complex""" +5 27 loss """nssa""" +5 27 regularizer """no""" +5 27 optimizer """adam""" +5 27 training_loop """owa""" +5 27 negative_sampler """basic""" +5 27 evaluator """rankbased""" +5 28 dataset """fb15k237""" +5 28 model """complex""" +5 28 loss """nssa""" +5 28 regularizer """no""" +5 28 optimizer """adam""" +5 28 training_loop """owa""" +5 28 negative_sampler """basic""" +5 28 evaluator """rankbased""" +5 29 dataset """fb15k237""" +5 29 model """complex""" +5 29 loss """nssa""" +5 29 regularizer """no""" +5 29 optimizer """adam""" +5 29 training_loop """owa""" +5 29 negative_sampler """basic""" +5 29 evaluator """rankbased""" +5 30 dataset """fb15k237""" +5 30 model """complex""" +5 30 loss """nssa""" +5 30 regularizer """no""" +5 30 optimizer """adam""" +5 30 training_loop """owa""" +5 30 negative_sampler """basic""" +5 30 evaluator """rankbased""" +5 31 dataset """fb15k237""" +5 31 model """complex""" +5 31 loss """nssa""" +5 31 regularizer """no""" +5 31 optimizer """adam""" +5 31 training_loop """owa""" +5 31 negative_sampler """basic""" +5 31 evaluator """rankbased""" +5 32 dataset """fb15k237""" +5 32 model """complex""" +5 32 loss """nssa""" +5 32 regularizer """no""" +5 32 optimizer """adam""" +5 32 training_loop """owa""" +5 32 negative_sampler """basic""" +5 32 evaluator """rankbased""" +5 33 dataset """fb15k237""" +5 33 model """complex""" +5 33 loss """nssa""" +5 33 regularizer """no""" +5 33 optimizer """adam""" +5 33 training_loop """owa""" +5 33 negative_sampler """basic""" +5 33 evaluator """rankbased""" +5 34 dataset """fb15k237""" +5 34 model """complex""" +5 34 loss """nssa""" +5 34 regularizer """no""" +5 34 optimizer """adam""" +5 34 training_loop """owa""" +5 34 negative_sampler """basic""" +5 34 evaluator """rankbased""" +6 1 model.embedding_dim 1.0 +6 1 loss.margin 5.106335582198125 +6 1 optimizer.lr 0.011322584346264281 +6 1 negative_sampler.num_negs_per_pos 82.0 +6 1 training.batch_size 1.0 +6 2 model.embedding_dim 0.0 +6 2 loss.margin 5.295072857251313 +6 2 optimizer.lr 0.0017679674625618097 +6 2 negative_sampler.num_negs_per_pos 20.0 +6 2 training.batch_size 0.0 +6 3 model.embedding_dim 1.0 +6 3 loss.margin 0.9380957297226693 +6 3 optimizer.lr 0.01952295196972904 +6 3 negative_sampler.num_negs_per_pos 98.0 +6 3 training.batch_size 2.0 +6 4 model.embedding_dim 2.0 +6 4 loss.margin 9.896226110232508 +6 4 optimizer.lr 0.02422670869132154 +6 4 negative_sampler.num_negs_per_pos 58.0 +6 4 training.batch_size 1.0 +6 5 model.embedding_dim 2.0 +6 5 loss.margin 9.37934946473919 +6 5 optimizer.lr 0.004225843158882435 +6 5 negative_sampler.num_negs_per_pos 23.0 +6 5 training.batch_size 2.0 +6 6 model.embedding_dim 2.0 +6 6 loss.margin 5.504815261646759 +6 6 optimizer.lr 0.031126814814474043 +6 6 negative_sampler.num_negs_per_pos 51.0 +6 6 training.batch_size 0.0 +6 7 model.embedding_dim 2.0 +6 7 loss.margin 9.172379932235355 +6 7 optimizer.lr 0.010498718914681337 +6 7 negative_sampler.num_negs_per_pos 22.0 +6 7 training.batch_size 1.0 +6 8 model.embedding_dim 2.0 +6 8 loss.margin 6.578918680676877 +6 8 optimizer.lr 0.06193648365153209 +6 8 negative_sampler.num_negs_per_pos 69.0 +6 8 training.batch_size 2.0 +6 9 model.embedding_dim 0.0 +6 9 loss.margin 4.034791416272579 +6 9 optimizer.lr 0.015340381803415811 +6 9 negative_sampler.num_negs_per_pos 46.0 +6 9 training.batch_size 2.0 +6 10 model.embedding_dim 1.0 +6 10 loss.margin 9.239283279911339 +6 10 optimizer.lr 0.001977936871399324 +6 10 negative_sampler.num_negs_per_pos 72.0 +6 10 training.batch_size 1.0 +6 11 model.embedding_dim 2.0 +6 11 loss.margin 2.9687187338464933 +6 11 optimizer.lr 0.027649812850097787 +6 11 negative_sampler.num_negs_per_pos 55.0 +6 11 training.batch_size 0.0 +6 1 dataset """fb15k237""" +6 1 model """complex""" +6 1 loss """marginranking""" +6 1 regularizer """no""" +6 1 optimizer """adam""" +6 1 training_loop """owa""" +6 1 negative_sampler """basic""" +6 1 evaluator """rankbased""" +6 2 dataset """fb15k237""" +6 2 model """complex""" +6 2 loss """marginranking""" +6 2 regularizer """no""" +6 2 optimizer """adam""" +6 2 training_loop """owa""" +6 2 negative_sampler """basic""" +6 2 evaluator """rankbased""" +6 3 dataset """fb15k237""" +6 3 model """complex""" +6 3 loss """marginranking""" +6 3 regularizer """no""" +6 3 optimizer """adam""" +6 3 training_loop """owa""" +6 3 negative_sampler """basic""" +6 3 evaluator """rankbased""" +6 4 dataset """fb15k237""" +6 4 model """complex""" +6 4 loss """marginranking""" +6 4 regularizer """no""" +6 4 optimizer """adam""" +6 4 training_loop """owa""" +6 4 negative_sampler """basic""" +6 4 evaluator """rankbased""" +6 5 dataset """fb15k237""" +6 5 model """complex""" +6 5 loss """marginranking""" +6 5 regularizer """no""" +6 5 optimizer """adam""" +6 5 training_loop """owa""" +6 5 negative_sampler """basic""" +6 5 evaluator """rankbased""" +6 6 dataset """fb15k237""" +6 6 model """complex""" +6 6 loss """marginranking""" +6 6 regularizer """no""" +6 6 optimizer """adam""" +6 6 training_loop """owa""" +6 6 negative_sampler """basic""" +6 6 evaluator """rankbased""" +6 7 dataset """fb15k237""" +6 7 model """complex""" +6 7 loss """marginranking""" +6 7 regularizer """no""" +6 7 optimizer """adam""" +6 7 training_loop """owa""" +6 7 negative_sampler """basic""" +6 7 evaluator """rankbased""" +6 8 dataset """fb15k237""" +6 8 model """complex""" +6 8 loss """marginranking""" +6 8 regularizer """no""" +6 8 optimizer """adam""" +6 8 training_loop """owa""" +6 8 negative_sampler """basic""" +6 8 evaluator """rankbased""" +6 9 dataset """fb15k237""" +6 9 model """complex""" +6 9 loss """marginranking""" +6 9 regularizer """no""" +6 9 optimizer """adam""" +6 9 training_loop """owa""" +6 9 negative_sampler """basic""" +6 9 evaluator """rankbased""" +6 10 dataset """fb15k237""" +6 10 model """complex""" +6 10 loss """marginranking""" +6 10 regularizer """no""" +6 10 optimizer """adam""" +6 10 training_loop """owa""" +6 10 negative_sampler """basic""" +6 10 evaluator """rankbased""" +6 11 dataset """fb15k237""" +6 11 model """complex""" +6 11 loss """marginranking""" +6 11 regularizer """no""" +6 11 optimizer """adam""" +6 11 training_loop """owa""" +6 11 negative_sampler """basic""" +6 11 evaluator """rankbased""" +7 1 model.embedding_dim 0.0 +7 1 loss.margin 2.251766100876799 +7 1 optimizer.lr 0.01359369075431818 +7 1 negative_sampler.num_negs_per_pos 65.0 +7 1 training.batch_size 2.0 +7 2 model.embedding_dim 0.0 +7 2 loss.margin 1.1302285588102896 +7 2 optimizer.lr 0.0042698995140344085 +7 2 negative_sampler.num_negs_per_pos 89.0 +7 2 training.batch_size 1.0 +7 3 model.embedding_dim 1.0 +7 3 loss.margin 7.456590907924605 +7 3 optimizer.lr 0.0433100066239531 +7 3 negative_sampler.num_negs_per_pos 76.0 +7 3 training.batch_size 1.0 +7 4 model.embedding_dim 2.0 +7 4 loss.margin 4.832586759696706 +7 4 optimizer.lr 0.00426642888770433 +7 4 negative_sampler.num_negs_per_pos 24.0 +7 4 training.batch_size 1.0 +7 5 model.embedding_dim 1.0 +7 5 loss.margin 4.683941445472948 +7 5 optimizer.lr 0.0020208859012303924 +7 5 negative_sampler.num_negs_per_pos 43.0 +7 5 training.batch_size 2.0 +7 6 model.embedding_dim 2.0 +7 6 loss.margin 5.048600079043628 +7 6 optimizer.lr 0.0019515584443586937 +7 6 negative_sampler.num_negs_per_pos 73.0 +7 6 training.batch_size 1.0 +7 7 model.embedding_dim 2.0 +7 7 loss.margin 2.9637455527287218 +7 7 optimizer.lr 0.03565001910129386 +7 7 negative_sampler.num_negs_per_pos 95.0 +7 7 training.batch_size 1.0 +7 8 model.embedding_dim 1.0 +7 8 loss.margin 8.57524756765171 +7 8 optimizer.lr 0.0010125373366825217 +7 8 negative_sampler.num_negs_per_pos 38.0 +7 8 training.batch_size 2.0 +7 9 model.embedding_dim 2.0 +7 9 loss.margin 1.8449915771548864 +7 9 optimizer.lr 0.0011361503999883296 +7 9 negative_sampler.num_negs_per_pos 42.0 +7 9 training.batch_size 1.0 +7 10 model.embedding_dim 1.0 +7 10 loss.margin 9.530611349736446 +7 10 optimizer.lr 0.007018301292099652 +7 10 negative_sampler.num_negs_per_pos 31.0 +7 10 training.batch_size 1.0 +7 11 model.embedding_dim 0.0 +7 11 loss.margin 4.196681276756393 +7 11 optimizer.lr 0.01199409441061611 +7 11 negative_sampler.num_negs_per_pos 77.0 +7 11 training.batch_size 1.0 +7 12 model.embedding_dim 1.0 +7 12 loss.margin 9.79893091572708 +7 12 optimizer.lr 0.018992379146795437 +7 12 negative_sampler.num_negs_per_pos 65.0 +7 12 training.batch_size 1.0 +7 13 model.embedding_dim 1.0 +7 13 loss.margin 7.458801114345946 +7 13 optimizer.lr 0.0034092638094167325 +7 13 negative_sampler.num_negs_per_pos 85.0 +7 13 training.batch_size 0.0 +7 14 model.embedding_dim 0.0 +7 14 loss.margin 2.6868184989547563 +7 14 optimizer.lr 0.0028228643710781196 +7 14 negative_sampler.num_negs_per_pos 49.0 +7 14 training.batch_size 0.0 +7 15 model.embedding_dim 2.0 +7 15 loss.margin 8.523268546001848 +7 15 optimizer.lr 0.00652686195902236 +7 15 negative_sampler.num_negs_per_pos 91.0 +7 15 training.batch_size 2.0 +7 16 model.embedding_dim 1.0 +7 16 loss.margin 6.3322736085233275 +7 16 optimizer.lr 0.0012664611410523423 +7 16 negative_sampler.num_negs_per_pos 63.0 +7 16 training.batch_size 2.0 +7 17 model.embedding_dim 0.0 +7 17 loss.margin 1.5431808378286989 +7 17 optimizer.lr 0.007283247053301378 +7 17 negative_sampler.num_negs_per_pos 70.0 +7 17 training.batch_size 1.0 +7 18 model.embedding_dim 2.0 +7 18 loss.margin 3.246806008833256 +7 18 optimizer.lr 0.059322189061300795 +7 18 negative_sampler.num_negs_per_pos 82.0 +7 18 training.batch_size 2.0 +7 19 model.embedding_dim 0.0 +7 19 loss.margin 9.71614383841113 +7 19 optimizer.lr 0.004289666353237195 +7 19 negative_sampler.num_negs_per_pos 17.0 +7 19 training.batch_size 1.0 +7 20 model.embedding_dim 0.0 +7 20 loss.margin 9.281690439791845 +7 20 optimizer.lr 0.07911811475784315 +7 20 negative_sampler.num_negs_per_pos 19.0 +7 20 training.batch_size 0.0 +7 21 model.embedding_dim 0.0 +7 21 loss.margin 7.772354183315908 +7 21 optimizer.lr 0.006858413620566135 +7 21 negative_sampler.num_negs_per_pos 96.0 +7 21 training.batch_size 0.0 +7 22 model.embedding_dim 0.0 +7 22 loss.margin 3.746514446858141 +7 22 optimizer.lr 0.06593620415206616 +7 22 negative_sampler.num_negs_per_pos 16.0 +7 22 training.batch_size 0.0 +7 23 model.embedding_dim 1.0 +7 23 loss.margin 9.994457737368641 +7 23 optimizer.lr 0.0020852230908385005 +7 23 negative_sampler.num_negs_per_pos 96.0 +7 23 training.batch_size 2.0 +7 24 model.embedding_dim 0.0 +7 24 loss.margin 3.4361279244199623 +7 24 optimizer.lr 0.007772409966901501 +7 24 negative_sampler.num_negs_per_pos 75.0 +7 24 training.batch_size 1.0 +7 1 dataset """fb15k237""" +7 1 model """complex""" +7 1 loss """marginranking""" +7 1 regularizer """no""" +7 1 optimizer """adam""" +7 1 training_loop """owa""" +7 1 negative_sampler """basic""" +7 1 evaluator """rankbased""" +7 2 dataset """fb15k237""" +7 2 model """complex""" +7 2 loss """marginranking""" +7 2 regularizer """no""" +7 2 optimizer """adam""" +7 2 training_loop """owa""" +7 2 negative_sampler """basic""" +7 2 evaluator """rankbased""" +7 3 dataset """fb15k237""" +7 3 model """complex""" +7 3 loss """marginranking""" +7 3 regularizer """no""" +7 3 optimizer """adam""" +7 3 training_loop """owa""" +7 3 negative_sampler """basic""" +7 3 evaluator """rankbased""" +7 4 dataset """fb15k237""" +7 4 model """complex""" +7 4 loss """marginranking""" +7 4 regularizer """no""" +7 4 optimizer """adam""" +7 4 training_loop """owa""" +7 4 negative_sampler """basic""" +7 4 evaluator """rankbased""" +7 5 dataset """fb15k237""" +7 5 model """complex""" +7 5 loss """marginranking""" +7 5 regularizer """no""" +7 5 optimizer """adam""" +7 5 training_loop """owa""" +7 5 negative_sampler """basic""" +7 5 evaluator """rankbased""" +7 6 dataset """fb15k237""" +7 6 model """complex""" +7 6 loss """marginranking""" +7 6 regularizer """no""" +7 6 optimizer """adam""" +7 6 training_loop """owa""" +7 6 negative_sampler """basic""" +7 6 evaluator """rankbased""" +7 7 dataset """fb15k237""" +7 7 model """complex""" +7 7 loss """marginranking""" +7 7 regularizer """no""" +7 7 optimizer """adam""" +7 7 training_loop """owa""" +7 7 negative_sampler """basic""" +7 7 evaluator """rankbased""" +7 8 dataset """fb15k237""" +7 8 model """complex""" +7 8 loss """marginranking""" +7 8 regularizer """no""" +7 8 optimizer """adam""" +7 8 training_loop """owa""" +7 8 negative_sampler """basic""" +7 8 evaluator """rankbased""" +7 9 dataset """fb15k237""" +7 9 model """complex""" +7 9 loss """marginranking""" +7 9 regularizer """no""" +7 9 optimizer """adam""" +7 9 training_loop """owa""" +7 9 negative_sampler """basic""" +7 9 evaluator """rankbased""" +7 10 dataset """fb15k237""" +7 10 model """complex""" +7 10 loss """marginranking""" +7 10 regularizer """no""" +7 10 optimizer """adam""" +7 10 training_loop """owa""" +7 10 negative_sampler """basic""" +7 10 evaluator """rankbased""" +7 11 dataset """fb15k237""" +7 11 model """complex""" +7 11 loss """marginranking""" +7 11 regularizer """no""" +7 11 optimizer """adam""" +7 11 training_loop """owa""" +7 11 negative_sampler """basic""" +7 11 evaluator """rankbased""" +7 12 dataset """fb15k237""" +7 12 model """complex""" +7 12 loss """marginranking""" +7 12 regularizer """no""" +7 12 optimizer """adam""" +7 12 training_loop """owa""" +7 12 negative_sampler """basic""" +7 12 evaluator """rankbased""" +7 13 dataset """fb15k237""" +7 13 model """complex""" +7 13 loss """marginranking""" +7 13 regularizer """no""" +7 13 optimizer """adam""" +7 13 training_loop """owa""" +7 13 negative_sampler """basic""" +7 13 evaluator """rankbased""" +7 14 dataset """fb15k237""" +7 14 model """complex""" +7 14 loss """marginranking""" +7 14 regularizer """no""" +7 14 optimizer """adam""" +7 14 training_loop """owa""" +7 14 negative_sampler """basic""" +7 14 evaluator """rankbased""" +7 15 dataset """fb15k237""" +7 15 model """complex""" +7 15 loss """marginranking""" +7 15 regularizer """no""" +7 15 optimizer """adam""" +7 15 training_loop """owa""" +7 15 negative_sampler """basic""" +7 15 evaluator """rankbased""" +7 16 dataset """fb15k237""" +7 16 model """complex""" +7 16 loss """marginranking""" +7 16 regularizer """no""" +7 16 optimizer """adam""" +7 16 training_loop """owa""" +7 16 negative_sampler """basic""" +7 16 evaluator """rankbased""" +7 17 dataset """fb15k237""" +7 17 model """complex""" +7 17 loss """marginranking""" +7 17 regularizer """no""" +7 17 optimizer """adam""" +7 17 training_loop """owa""" +7 17 negative_sampler """basic""" +7 17 evaluator """rankbased""" +7 18 dataset """fb15k237""" +7 18 model """complex""" +7 18 loss """marginranking""" +7 18 regularizer """no""" +7 18 optimizer """adam""" +7 18 training_loop """owa""" +7 18 negative_sampler """basic""" +7 18 evaluator """rankbased""" +7 19 dataset """fb15k237""" +7 19 model """complex""" +7 19 loss """marginranking""" +7 19 regularizer """no""" +7 19 optimizer """adam""" +7 19 training_loop """owa""" +7 19 negative_sampler """basic""" +7 19 evaluator """rankbased""" +7 20 dataset """fb15k237""" +7 20 model """complex""" +7 20 loss """marginranking""" +7 20 regularizer """no""" +7 20 optimizer """adam""" +7 20 training_loop """owa""" +7 20 negative_sampler """basic""" +7 20 evaluator """rankbased""" +7 21 dataset """fb15k237""" +7 21 model """complex""" +7 21 loss """marginranking""" +7 21 regularizer """no""" +7 21 optimizer """adam""" +7 21 training_loop """owa""" +7 21 negative_sampler """basic""" +7 21 evaluator """rankbased""" +7 22 dataset """fb15k237""" +7 22 model """complex""" +7 22 loss """marginranking""" +7 22 regularizer """no""" +7 22 optimizer """adam""" +7 22 training_loop """owa""" +7 22 negative_sampler """basic""" +7 22 evaluator """rankbased""" +7 23 dataset """fb15k237""" +7 23 model """complex""" +7 23 loss """marginranking""" +7 23 regularizer """no""" +7 23 optimizer """adam""" +7 23 training_loop """owa""" +7 23 negative_sampler """basic""" +7 23 evaluator """rankbased""" +7 24 dataset """fb15k237""" +7 24 model """complex""" +7 24 loss """marginranking""" +7 24 regularizer """no""" +7 24 optimizer """adam""" +7 24 training_loop """owa""" +7 24 negative_sampler """basic""" +7 24 evaluator """rankbased""" +8 1 model.embedding_dim 0.0 +8 1 optimizer.lr 0.017302339082977097 +8 1 training.batch_size 0.0 +8 1 training.label_smoothing 0.01534228243148804 +8 2 model.embedding_dim 0.0 +8 2 optimizer.lr 0.08479947349745019 +8 2 training.batch_size 2.0 +8 2 training.label_smoothing 0.6659326372662778 +8 3 model.embedding_dim 0.0 +8 3 optimizer.lr 0.0064051276573025865 +8 3 training.batch_size 2.0 +8 3 training.label_smoothing 0.031882221990236964 +8 4 model.embedding_dim 1.0 +8 4 optimizer.lr 0.0015510710094309593 +8 4 training.batch_size 0.0 +8 4 training.label_smoothing 0.07383721554981285 +8 5 model.embedding_dim 0.0 +8 5 optimizer.lr 0.043005682667389536 +8 5 training.batch_size 2.0 +8 5 training.label_smoothing 0.0034894997559133485 +8 6 model.embedding_dim 0.0 +8 6 optimizer.lr 0.09189879160385822 +8 6 training.batch_size 0.0 +8 6 training.label_smoothing 0.7907715528687194 +8 7 model.embedding_dim 0.0 +8 7 optimizer.lr 0.008739285624427535 +8 7 training.batch_size 1.0 +8 7 training.label_smoothing 0.011626744667194969 +8 1 dataset """fb15k237""" +8 1 model """complex""" +8 1 loss """bceaftersigmoid""" +8 1 regularizer """no""" +8 1 optimizer """adam""" +8 1 training_loop """lcwa""" +8 1 evaluator """rankbased""" +8 2 dataset """fb15k237""" +8 2 model """complex""" +8 2 loss """bceaftersigmoid""" +8 2 regularizer """no""" +8 2 optimizer """adam""" +8 2 training_loop """lcwa""" +8 2 evaluator """rankbased""" +8 3 dataset """fb15k237""" +8 3 model """complex""" +8 3 loss """bceaftersigmoid""" +8 3 regularizer """no""" +8 3 optimizer """adam""" +8 3 training_loop """lcwa""" +8 3 evaluator """rankbased""" +8 4 dataset """fb15k237""" +8 4 model """complex""" +8 4 loss """bceaftersigmoid""" +8 4 regularizer """no""" +8 4 optimizer """adam""" +8 4 training_loop """lcwa""" +8 4 evaluator """rankbased""" +8 5 dataset """fb15k237""" +8 5 model """complex""" +8 5 loss """bceaftersigmoid""" +8 5 regularizer """no""" +8 5 optimizer """adam""" +8 5 training_loop """lcwa""" +8 5 evaluator """rankbased""" +8 6 dataset """fb15k237""" +8 6 model """complex""" +8 6 loss """bceaftersigmoid""" +8 6 regularizer """no""" +8 6 optimizer """adam""" +8 6 training_loop """lcwa""" +8 6 evaluator """rankbased""" +8 7 dataset """fb15k237""" +8 7 model """complex""" +8 7 loss """bceaftersigmoid""" +8 7 regularizer """no""" +8 7 optimizer """adam""" +8 7 training_loop """lcwa""" +8 7 evaluator """rankbased""" +9 1 model.embedding_dim 2.0 +9 1 optimizer.lr 0.030299338288548867 +9 1 training.batch_size 0.0 +9 1 training.label_smoothing 0.026043544379693043 +9 2 model.embedding_dim 2.0 +9 2 optimizer.lr 0.013231778649485848 +9 2 training.batch_size 2.0 +9 2 training.label_smoothing 0.015257552178618782 +9 3 model.embedding_dim 2.0 +9 3 optimizer.lr 0.04559024988876227 +9 3 training.batch_size 1.0 +9 3 training.label_smoothing 0.9088553109658272 +9 4 model.embedding_dim 0.0 +9 4 optimizer.lr 0.018063166059439664 +9 4 training.batch_size 1.0 +9 4 training.label_smoothing 0.009830914477207025 +9 5 model.embedding_dim 0.0 +9 5 optimizer.lr 0.0020767331520833215 +9 5 training.batch_size 2.0 +9 5 training.label_smoothing 0.02553521652522603 +9 6 model.embedding_dim 0.0 +9 6 optimizer.lr 0.004685775150684246 +9 6 training.batch_size 1.0 +9 6 training.label_smoothing 0.025837667168296424 +9 7 model.embedding_dim 0.0 +9 7 optimizer.lr 0.0776434637174778 +9 7 training.batch_size 2.0 +9 7 training.label_smoothing 0.005509042063461489 +9 8 model.embedding_dim 2.0 +9 8 optimizer.lr 0.004809066135812368 +9 8 training.batch_size 0.0 +9 8 training.label_smoothing 0.0010618917649301504 +9 1 dataset """fb15k237""" +9 1 model """complex""" +9 1 loss """softplus""" +9 1 regularizer """no""" +9 1 optimizer """adam""" +9 1 training_loop """lcwa""" +9 1 evaluator """rankbased""" +9 2 dataset """fb15k237""" +9 2 model """complex""" +9 2 loss """softplus""" +9 2 regularizer """no""" +9 2 optimizer """adam""" +9 2 training_loop """lcwa""" +9 2 evaluator """rankbased""" +9 3 dataset """fb15k237""" +9 3 model """complex""" +9 3 loss """softplus""" +9 3 regularizer """no""" +9 3 optimizer """adam""" +9 3 training_loop """lcwa""" +9 3 evaluator """rankbased""" +9 4 dataset """fb15k237""" +9 4 model """complex""" +9 4 loss """softplus""" +9 4 regularizer """no""" +9 4 optimizer """adam""" +9 4 training_loop """lcwa""" +9 4 evaluator """rankbased""" +9 5 dataset """fb15k237""" +9 5 model """complex""" +9 5 loss """softplus""" +9 5 regularizer """no""" +9 5 optimizer """adam""" +9 5 training_loop """lcwa""" +9 5 evaluator """rankbased""" +9 6 dataset """fb15k237""" +9 6 model """complex""" +9 6 loss """softplus""" +9 6 regularizer """no""" +9 6 optimizer """adam""" +9 6 training_loop """lcwa""" +9 6 evaluator """rankbased""" +9 7 dataset """fb15k237""" +9 7 model """complex""" +9 7 loss """softplus""" +9 7 regularizer """no""" +9 7 optimizer """adam""" +9 7 training_loop """lcwa""" +9 7 evaluator """rankbased""" +9 8 dataset """fb15k237""" +9 8 model """complex""" +9 8 loss """softplus""" +9 8 regularizer """no""" +9 8 optimizer """adam""" +9 8 training_loop """lcwa""" +9 8 evaluator """rankbased""" +10 1 model.embedding_dim 0.0 +10 1 optimizer.lr 0.001020781162492194 +10 1 training.batch_size 1.0 +10 1 training.label_smoothing 0.8074492001611917 +10 2 model.embedding_dim 0.0 +10 2 optimizer.lr 0.0036106286623297703 +10 2 training.batch_size 2.0 +10 2 training.label_smoothing 0.010925913785340177 +10 3 model.embedding_dim 2.0 +10 3 optimizer.lr 0.009416467638837984 +10 3 training.batch_size 1.0 +10 3 training.label_smoothing 0.058271639328505105 +10 4 model.embedding_dim 2.0 +10 4 optimizer.lr 0.0016993157929369677 +10 4 training.batch_size 1.0 +10 4 training.label_smoothing 0.06907159138867373 +10 5 model.embedding_dim 1.0 +10 5 optimizer.lr 0.019433921852471454 +10 5 training.batch_size 2.0 +10 5 training.label_smoothing 0.014523660588200739 +10 6 model.embedding_dim 1.0 +10 6 optimizer.lr 0.006867569797943046 +10 6 training.batch_size 2.0 +10 6 training.label_smoothing 0.7354366857078304 +10 7 model.embedding_dim 0.0 +10 7 optimizer.lr 0.006108002074877468 +10 7 training.batch_size 0.0 +10 7 training.label_smoothing 0.05334281560317004 +10 1 dataset """fb15k237""" +10 1 model """complex""" +10 1 loss """bceaftersigmoid""" +10 1 regularizer """no""" +10 1 optimizer """adam""" +10 1 training_loop """lcwa""" +10 1 evaluator """rankbased""" +10 2 dataset """fb15k237""" +10 2 model """complex""" +10 2 loss """bceaftersigmoid""" +10 2 regularizer """no""" +10 2 optimizer """adam""" +10 2 training_loop """lcwa""" +10 2 evaluator """rankbased""" +10 3 dataset """fb15k237""" +10 3 model """complex""" +10 3 loss """bceaftersigmoid""" +10 3 regularizer """no""" +10 3 optimizer """adam""" +10 3 training_loop """lcwa""" +10 3 evaluator """rankbased""" +10 4 dataset """fb15k237""" +10 4 model """complex""" +10 4 loss """bceaftersigmoid""" +10 4 regularizer """no""" +10 4 optimizer """adam""" +10 4 training_loop """lcwa""" +10 4 evaluator """rankbased""" +10 5 dataset """fb15k237""" +10 5 model """complex""" +10 5 loss """bceaftersigmoid""" +10 5 regularizer """no""" +10 5 optimizer """adam""" +10 5 training_loop """lcwa""" +10 5 evaluator """rankbased""" +10 6 dataset """fb15k237""" +10 6 model """complex""" +10 6 loss """bceaftersigmoid""" +10 6 regularizer """no""" +10 6 optimizer """adam""" +10 6 training_loop """lcwa""" +10 6 evaluator """rankbased""" +10 7 dataset """fb15k237""" +10 7 model """complex""" +10 7 loss """bceaftersigmoid""" +10 7 regularizer """no""" +10 7 optimizer """adam""" +10 7 training_loop """lcwa""" +10 7 evaluator """rankbased""" +11 1 model.embedding_dim 1.0 +11 1 optimizer.lr 0.05078796770677431 +11 1 training.batch_size 2.0 +11 1 training.label_smoothing 0.002796979241804866 +11 2 model.embedding_dim 2.0 +11 2 optimizer.lr 0.03420673531272069 +11 2 training.batch_size 2.0 +11 2 training.label_smoothing 0.0032206905939693555 +11 3 model.embedding_dim 0.0 +11 3 optimizer.lr 0.08200434662040924 +11 3 training.batch_size 0.0 +11 3 training.label_smoothing 0.7963380264185074 +11 4 model.embedding_dim 2.0 +11 4 optimizer.lr 0.004135563913615666 +11 4 training.batch_size 0.0 +11 4 training.label_smoothing 0.1993474934635239 +11 5 model.embedding_dim 0.0 +11 5 optimizer.lr 0.06884300357160297 +11 5 training.batch_size 2.0 +11 5 training.label_smoothing 0.2325746278588569 +11 6 model.embedding_dim 2.0 +11 6 optimizer.lr 0.001182630961615016 +11 6 training.batch_size 0.0 +11 6 training.label_smoothing 0.28381875780513627 +11 7 model.embedding_dim 2.0 +11 7 optimizer.lr 0.006547249653873264 +11 7 training.batch_size 2.0 +11 7 training.label_smoothing 0.7668879640807164 +11 8 model.embedding_dim 2.0 +11 8 optimizer.lr 0.025876979352846923 +11 8 training.batch_size 2.0 +11 8 training.label_smoothing 0.26080197602036365 +11 9 model.embedding_dim 1.0 +11 9 optimizer.lr 0.008134287638529141 +11 9 training.batch_size 0.0 +11 9 training.label_smoothing 0.006662855150156815 +11 10 model.embedding_dim 1.0 +11 10 optimizer.lr 0.011381998374305972 +11 10 training.batch_size 1.0 +11 10 training.label_smoothing 0.03367350588063131 +11 11 model.embedding_dim 0.0 +11 11 optimizer.lr 0.03650783257620589 +11 11 training.batch_size 2.0 +11 11 training.label_smoothing 0.001050525923662119 +11 1 dataset """fb15k237""" +11 1 model """complex""" +11 1 loss """softplus""" +11 1 regularizer """no""" +11 1 optimizer """adam""" +11 1 training_loop """lcwa""" +11 1 evaluator """rankbased""" +11 2 dataset """fb15k237""" +11 2 model """complex""" +11 2 loss """softplus""" +11 2 regularizer """no""" +11 2 optimizer """adam""" +11 2 training_loop """lcwa""" +11 2 evaluator """rankbased""" +11 3 dataset """fb15k237""" +11 3 model """complex""" +11 3 loss """softplus""" +11 3 regularizer """no""" +11 3 optimizer """adam""" +11 3 training_loop """lcwa""" +11 3 evaluator """rankbased""" +11 4 dataset """fb15k237""" +11 4 model """complex""" +11 4 loss """softplus""" +11 4 regularizer """no""" +11 4 optimizer """adam""" +11 4 training_loop """lcwa""" +11 4 evaluator """rankbased""" +11 5 dataset """fb15k237""" +11 5 model """complex""" +11 5 loss """softplus""" +11 5 regularizer """no""" +11 5 optimizer """adam""" +11 5 training_loop """lcwa""" +11 5 evaluator """rankbased""" +11 6 dataset """fb15k237""" +11 6 model """complex""" +11 6 loss """softplus""" +11 6 regularizer """no""" +11 6 optimizer """adam""" +11 6 training_loop """lcwa""" +11 6 evaluator """rankbased""" +11 7 dataset """fb15k237""" +11 7 model """complex""" +11 7 loss """softplus""" +11 7 regularizer """no""" +11 7 optimizer """adam""" +11 7 training_loop """lcwa""" +11 7 evaluator """rankbased""" +11 8 dataset """fb15k237""" +11 8 model """complex""" +11 8 loss """softplus""" +11 8 regularizer """no""" +11 8 optimizer """adam""" +11 8 training_loop """lcwa""" +11 8 evaluator """rankbased""" +11 9 dataset """fb15k237""" +11 9 model """complex""" +11 9 loss """softplus""" +11 9 regularizer """no""" +11 9 optimizer """adam""" +11 9 training_loop """lcwa""" +11 9 evaluator """rankbased""" +11 10 dataset """fb15k237""" +11 10 model """complex""" +11 10 loss """softplus""" +11 10 regularizer """no""" +11 10 optimizer """adam""" +11 10 training_loop """lcwa""" +11 10 evaluator """rankbased""" +11 11 dataset """fb15k237""" +11 11 model """complex""" +11 11 loss """softplus""" +11 11 regularizer """no""" +11 11 optimizer """adam""" +11 11 training_loop """lcwa""" +11 11 evaluator """rankbased""" +12 1 model.embedding_dim 2.0 +12 1 optimizer.lr 0.015873657554281783 +12 1 training.batch_size 0.0 +12 1 training.label_smoothing 0.5638898465994975 +12 2 model.embedding_dim 1.0 +12 2 optimizer.lr 0.015806939110659283 +12 2 training.batch_size 2.0 +12 2 training.label_smoothing 0.18420226804873527 +12 3 model.embedding_dim 2.0 +12 3 optimizer.lr 0.007525067744232913 +12 3 training.batch_size 1.0 +12 3 training.label_smoothing 0.08094657004944494 +12 4 model.embedding_dim 0.0 +12 4 optimizer.lr 0.01730320943605772 +12 4 training.batch_size 1.0 +12 4 training.label_smoothing 0.0870842785316148 +12 5 model.embedding_dim 0.0 +12 5 optimizer.lr 0.005744250893729515 +12 5 training.batch_size 2.0 +12 5 training.label_smoothing 0.07174201286279182 +12 1 dataset """fb15k237""" +12 1 model """complex""" +12 1 loss """crossentropy""" +12 1 regularizer """no""" +12 1 optimizer """adam""" +12 1 training_loop """lcwa""" +12 1 evaluator """rankbased""" +12 2 dataset """fb15k237""" +12 2 model """complex""" +12 2 loss """crossentropy""" +12 2 regularizer """no""" +12 2 optimizer """adam""" +12 2 training_loop """lcwa""" +12 2 evaluator """rankbased""" +12 3 dataset """fb15k237""" +12 3 model """complex""" +12 3 loss """crossentropy""" +12 3 regularizer """no""" +12 3 optimizer """adam""" +12 3 training_loop """lcwa""" +12 3 evaluator """rankbased""" +12 4 dataset """fb15k237""" +12 4 model """complex""" +12 4 loss """crossentropy""" +12 4 regularizer """no""" +12 4 optimizer """adam""" +12 4 training_loop """lcwa""" +12 4 evaluator """rankbased""" +12 5 dataset """fb15k237""" +12 5 model """complex""" +12 5 loss """crossentropy""" +12 5 regularizer """no""" +12 5 optimizer """adam""" +12 5 training_loop """lcwa""" +12 5 evaluator """rankbased""" +13 1 model.embedding_dim 2.0 +13 1 optimizer.lr 0.07336594870745249 +13 1 training.batch_size 0.0 +13 1 training.label_smoothing 0.7000480421607962 +13 2 model.embedding_dim 0.0 +13 2 optimizer.lr 0.001680768669872992 +13 2 training.batch_size 2.0 +13 2 training.label_smoothing 0.6636377345797028 +13 3 model.embedding_dim 2.0 +13 3 optimizer.lr 0.008795464995580123 +13 3 training.batch_size 1.0 +13 3 training.label_smoothing 0.0023968507749909597 +13 4 model.embedding_dim 0.0 +13 4 optimizer.lr 0.010515189772219964 +13 4 training.batch_size 0.0 +13 4 training.label_smoothing 0.03374490526864947 +13 5 model.embedding_dim 0.0 +13 5 optimizer.lr 0.0011748121133240933 +13 5 training.batch_size 1.0 +13 5 training.label_smoothing 0.6138070936451409 +13 6 model.embedding_dim 1.0 +13 6 optimizer.lr 0.008114578533000077 +13 6 training.batch_size 2.0 +13 6 training.label_smoothing 0.16628676788232175 +13 7 model.embedding_dim 2.0 +13 7 optimizer.lr 0.0020313663970877725 +13 7 training.batch_size 2.0 +13 7 training.label_smoothing 0.023418937512095082 +13 8 model.embedding_dim 1.0 +13 8 optimizer.lr 0.007544380717521153 +13 8 training.batch_size 0.0 +13 8 training.label_smoothing 0.18727706345985817 +13 1 dataset """fb15k237""" +13 1 model """complex""" +13 1 loss """crossentropy""" +13 1 regularizer """no""" +13 1 optimizer """adam""" +13 1 training_loop """lcwa""" +13 1 evaluator """rankbased""" +13 2 dataset """fb15k237""" +13 2 model """complex""" +13 2 loss """crossentropy""" +13 2 regularizer """no""" +13 2 optimizer """adam""" +13 2 training_loop """lcwa""" +13 2 evaluator """rankbased""" +13 3 dataset """fb15k237""" +13 3 model """complex""" +13 3 loss """crossentropy""" +13 3 regularizer """no""" +13 3 optimizer """adam""" +13 3 training_loop """lcwa""" +13 3 evaluator """rankbased""" +13 4 dataset """fb15k237""" +13 4 model """complex""" +13 4 loss """crossentropy""" +13 4 regularizer """no""" +13 4 optimizer """adam""" +13 4 training_loop """lcwa""" +13 4 evaluator """rankbased""" +13 5 dataset """fb15k237""" +13 5 model """complex""" +13 5 loss """crossentropy""" +13 5 regularizer """no""" +13 5 optimizer """adam""" +13 5 training_loop """lcwa""" +13 5 evaluator """rankbased""" +13 6 dataset """fb15k237""" +13 6 model """complex""" +13 6 loss """crossentropy""" +13 6 regularizer """no""" +13 6 optimizer """adam""" +13 6 training_loop """lcwa""" +13 6 evaluator """rankbased""" +13 7 dataset """fb15k237""" +13 7 model """complex""" +13 7 loss """crossentropy""" +13 7 regularizer """no""" +13 7 optimizer """adam""" +13 7 training_loop """lcwa""" +13 7 evaluator """rankbased""" +13 8 dataset """fb15k237""" +13 8 model """complex""" +13 8 loss """crossentropy""" +13 8 regularizer """no""" +13 8 optimizer """adam""" +13 8 training_loop """lcwa""" +13 8 evaluator """rankbased""" +14 1 model.embedding_dim 1.0 +14 1 training.batch_size 1.0 +14 1 training.label_smoothing 0.06892528842220517 +14 2 model.embedding_dim 2.0 +14 2 training.batch_size 2.0 +14 2 training.label_smoothing 0.00886906072241482 +14 3 model.embedding_dim 2.0 +14 3 training.batch_size 1.0 +14 3 training.label_smoothing 0.05175798365678855 +14 4 model.embedding_dim 2.0 +14 4 training.batch_size 2.0 +14 4 training.label_smoothing 0.02953182275756783 +14 5 model.embedding_dim 0.0 +14 5 training.batch_size 0.0 +14 5 training.label_smoothing 0.9327775093646603 +14 6 model.embedding_dim 2.0 +14 6 training.batch_size 2.0 +14 6 training.label_smoothing 0.0052539518180969 +14 7 model.embedding_dim 1.0 +14 7 training.batch_size 1.0 +14 7 training.label_smoothing 0.004141624185836906 +14 8 model.embedding_dim 1.0 +14 8 training.batch_size 2.0 +14 8 training.label_smoothing 0.050300482863696525 +14 9 model.embedding_dim 1.0 +14 9 training.batch_size 2.0 +14 9 training.label_smoothing 0.0038888077668322385 +14 10 model.embedding_dim 2.0 +14 10 training.batch_size 1.0 +14 10 training.label_smoothing 0.012512195024634394 +14 11 model.embedding_dim 1.0 +14 11 training.batch_size 0.0 +14 11 training.label_smoothing 0.0015578350893133122 +14 12 model.embedding_dim 0.0 +14 12 training.batch_size 1.0 +14 12 training.label_smoothing 0.009352007048836257 +14 13 model.embedding_dim 0.0 +14 13 training.batch_size 1.0 +14 13 training.label_smoothing 0.3862591476224448 +14 14 model.embedding_dim 0.0 +14 14 training.batch_size 1.0 +14 14 training.label_smoothing 0.008224836994956453 +14 15 model.embedding_dim 0.0 +14 15 training.batch_size 1.0 +14 15 training.label_smoothing 0.2949035871104255 +14 16 model.embedding_dim 2.0 +14 16 training.batch_size 0.0 +14 16 training.label_smoothing 0.012563534081665812 +14 17 model.embedding_dim 0.0 +14 17 training.batch_size 1.0 +14 17 training.label_smoothing 0.009784013344310198 +14 18 model.embedding_dim 0.0 +14 18 training.batch_size 0.0 +14 18 training.label_smoothing 0.024534739378114352 +14 19 model.embedding_dim 0.0 +14 19 training.batch_size 0.0 +14 19 training.label_smoothing 0.0014153891474666812 +14 20 model.embedding_dim 1.0 +14 20 training.batch_size 0.0 +14 20 training.label_smoothing 0.014922267376200221 +14 21 model.embedding_dim 0.0 +14 21 training.batch_size 0.0 +14 21 training.label_smoothing 0.1290271018953507 +14 22 model.embedding_dim 2.0 +14 22 training.batch_size 2.0 +14 22 training.label_smoothing 0.007887725702156614 +14 23 model.embedding_dim 0.0 +14 23 training.batch_size 1.0 +14 23 training.label_smoothing 0.010354005344298119 +14 24 model.embedding_dim 0.0 +14 24 training.batch_size 2.0 +14 24 training.label_smoothing 0.0665422778678533 +14 25 model.embedding_dim 0.0 +14 25 training.batch_size 2.0 +14 25 training.label_smoothing 0.002493913061757371 +14 26 model.embedding_dim 2.0 +14 26 training.batch_size 2.0 +14 26 training.label_smoothing 0.7217748130419587 +14 27 model.embedding_dim 1.0 +14 27 training.batch_size 1.0 +14 27 training.label_smoothing 0.3502871576263855 +14 28 model.embedding_dim 0.0 +14 28 training.batch_size 0.0 +14 28 training.label_smoothing 0.18257524694900365 +14 29 model.embedding_dim 0.0 +14 29 training.batch_size 1.0 +14 29 training.label_smoothing 0.027494735744203795 +14 30 model.embedding_dim 0.0 +14 30 training.batch_size 0.0 +14 30 training.label_smoothing 0.10851827756672086 +14 31 model.embedding_dim 2.0 +14 31 training.batch_size 1.0 +14 31 training.label_smoothing 0.7393581121696607 +14 32 model.embedding_dim 0.0 +14 32 training.batch_size 0.0 +14 32 training.label_smoothing 0.014612323636168286 +14 33 model.embedding_dim 0.0 +14 33 training.batch_size 0.0 +14 33 training.label_smoothing 0.00140521242516022 +14 34 model.embedding_dim 0.0 +14 34 training.batch_size 1.0 +14 34 training.label_smoothing 0.1336381696062631 +14 35 model.embedding_dim 0.0 +14 35 training.batch_size 2.0 +14 35 training.label_smoothing 0.0020732163531655104 +14 36 model.embedding_dim 2.0 +14 36 training.batch_size 1.0 +14 36 training.label_smoothing 0.03917014815214868 +14 37 model.embedding_dim 1.0 +14 37 training.batch_size 1.0 +14 37 training.label_smoothing 0.0012411741600385625 +14 38 model.embedding_dim 0.0 +14 38 training.batch_size 0.0 +14 38 training.label_smoothing 0.025097875679617822 +14 39 model.embedding_dim 2.0 +14 39 training.batch_size 2.0 +14 39 training.label_smoothing 0.004800340964568611 +14 40 model.embedding_dim 0.0 +14 40 training.batch_size 2.0 +14 40 training.label_smoothing 0.2471157592039178 +14 41 model.embedding_dim 1.0 +14 41 training.batch_size 0.0 +14 41 training.label_smoothing 0.43185620463376434 +14 42 model.embedding_dim 2.0 +14 42 training.batch_size 0.0 +14 42 training.label_smoothing 0.31041247652651094 +14 43 model.embedding_dim 0.0 +14 43 training.batch_size 2.0 +14 43 training.label_smoothing 0.04627596257880317 +14 44 model.embedding_dim 2.0 +14 44 training.batch_size 1.0 +14 44 training.label_smoothing 0.2755177416029276 +14 45 model.embedding_dim 0.0 +14 45 training.batch_size 2.0 +14 45 training.label_smoothing 0.4124643762736425 +14 46 model.embedding_dim 1.0 +14 46 training.batch_size 0.0 +14 46 training.label_smoothing 0.3552044246638563 +14 47 model.embedding_dim 1.0 +14 47 training.batch_size 2.0 +14 47 training.label_smoothing 0.022519454775195437 +14 48 model.embedding_dim 2.0 +14 48 training.batch_size 0.0 +14 48 training.label_smoothing 0.28158700528580294 +14 49 model.embedding_dim 1.0 +14 49 training.batch_size 1.0 +14 49 training.label_smoothing 0.042967693805160606 +14 50 model.embedding_dim 0.0 +14 50 training.batch_size 1.0 +14 50 training.label_smoothing 0.005624235886215987 +14 51 model.embedding_dim 0.0 +14 51 training.batch_size 0.0 +14 51 training.label_smoothing 0.5129899690157573 +14 52 model.embedding_dim 1.0 +14 52 training.batch_size 1.0 +14 52 training.label_smoothing 0.06946709921777496 +14 53 model.embedding_dim 2.0 +14 53 training.batch_size 2.0 +14 53 training.label_smoothing 0.020153894854084052 +14 54 model.embedding_dim 0.0 +14 54 training.batch_size 2.0 +14 54 training.label_smoothing 0.002002992759962149 +14 55 model.embedding_dim 0.0 +14 55 training.batch_size 2.0 +14 55 training.label_smoothing 0.011380952576835331 +14 56 model.embedding_dim 0.0 +14 56 training.batch_size 1.0 +14 56 training.label_smoothing 0.004678741907136634 +14 57 model.embedding_dim 1.0 +14 57 training.batch_size 2.0 +14 57 training.label_smoothing 0.00345849624311445 +14 58 model.embedding_dim 0.0 +14 58 training.batch_size 0.0 +14 58 training.label_smoothing 0.05274493419198379 +14 59 model.embedding_dim 2.0 +14 59 training.batch_size 0.0 +14 59 training.label_smoothing 0.07167884540001286 +14 60 model.embedding_dim 1.0 +14 60 training.batch_size 0.0 +14 60 training.label_smoothing 0.0046553875730405495 +14 61 model.embedding_dim 1.0 +14 61 training.batch_size 0.0 +14 61 training.label_smoothing 0.10909304423708353 +14 62 model.embedding_dim 2.0 +14 62 training.batch_size 2.0 +14 62 training.label_smoothing 0.024328215420776806 +14 63 model.embedding_dim 1.0 +14 63 training.batch_size 2.0 +14 63 training.label_smoothing 0.011841983279976787 +14 64 model.embedding_dim 1.0 +14 64 training.batch_size 2.0 +14 64 training.label_smoothing 0.004716613962824964 +14 65 model.embedding_dim 2.0 +14 65 training.batch_size 2.0 +14 65 training.label_smoothing 0.1908212928412229 +14 66 model.embedding_dim 0.0 +14 66 training.batch_size 2.0 +14 66 training.label_smoothing 0.005951964393668785 +14 67 model.embedding_dim 1.0 +14 67 training.batch_size 0.0 +14 67 training.label_smoothing 0.5085382023204883 +14 68 model.embedding_dim 2.0 +14 68 training.batch_size 0.0 +14 68 training.label_smoothing 0.0011879379689894829 +14 69 model.embedding_dim 2.0 +14 69 training.batch_size 2.0 +14 69 training.label_smoothing 0.004321820623713291 +14 70 model.embedding_dim 2.0 +14 70 training.batch_size 1.0 +14 70 training.label_smoothing 0.008186105394095106 +14 71 model.embedding_dim 0.0 +14 71 training.batch_size 0.0 +14 71 training.label_smoothing 0.031087786605028898 +14 72 model.embedding_dim 0.0 +14 72 training.batch_size 1.0 +14 72 training.label_smoothing 0.035549138945500734 +14 73 model.embedding_dim 0.0 +14 73 training.batch_size 0.0 +14 73 training.label_smoothing 0.0037160636661676192 +14 74 model.embedding_dim 0.0 +14 74 training.batch_size 2.0 +14 74 training.label_smoothing 0.001665417108440853 +14 75 model.embedding_dim 2.0 +14 75 training.batch_size 2.0 +14 75 training.label_smoothing 0.2395681840836936 +14 76 model.embedding_dim 0.0 +14 76 training.batch_size 1.0 +14 76 training.label_smoothing 0.3136392550633321 +14 77 model.embedding_dim 1.0 +14 77 training.batch_size 1.0 +14 77 training.label_smoothing 0.159677816570101 +14 78 model.embedding_dim 1.0 +14 78 training.batch_size 0.0 +14 78 training.label_smoothing 0.009019478273277879 +14 79 model.embedding_dim 1.0 +14 79 training.batch_size 1.0 +14 79 training.label_smoothing 0.0017905071543136023 +14 80 model.embedding_dim 1.0 +14 80 training.batch_size 0.0 +14 80 training.label_smoothing 0.014311035516997661 +14 81 model.embedding_dim 1.0 +14 81 training.batch_size 1.0 +14 81 training.label_smoothing 0.02816889515786771 +14 82 model.embedding_dim 2.0 +14 82 training.batch_size 0.0 +14 82 training.label_smoothing 0.0040454678125876825 +14 83 model.embedding_dim 2.0 +14 83 training.batch_size 2.0 +14 83 training.label_smoothing 0.0029475274670119514 +14 84 model.embedding_dim 1.0 +14 84 training.batch_size 2.0 +14 84 training.label_smoothing 0.012584690870893174 +14 85 model.embedding_dim 1.0 +14 85 training.batch_size 0.0 +14 85 training.label_smoothing 0.0014046081715440941 +14 86 model.embedding_dim 1.0 +14 86 training.batch_size 0.0 +14 86 training.label_smoothing 0.2158852500328064 +14 87 model.embedding_dim 0.0 +14 87 training.batch_size 0.0 +14 87 training.label_smoothing 0.023462789923517614 +14 88 model.embedding_dim 1.0 +14 88 training.batch_size 1.0 +14 88 training.label_smoothing 0.011333914578372123 +14 89 model.embedding_dim 0.0 +14 89 training.batch_size 1.0 +14 89 training.label_smoothing 0.005763483731576001 +14 90 model.embedding_dim 0.0 +14 90 training.batch_size 0.0 +14 90 training.label_smoothing 0.015059704969614299 +14 91 model.embedding_dim 2.0 +14 91 training.batch_size 1.0 +14 91 training.label_smoothing 0.007786292558321576 +14 92 model.embedding_dim 2.0 +14 92 training.batch_size 2.0 +14 92 training.label_smoothing 0.031078658229124787 +14 93 model.embedding_dim 1.0 +14 93 training.batch_size 0.0 +14 93 training.label_smoothing 0.007236917794727365 +14 94 model.embedding_dim 0.0 +14 94 training.batch_size 1.0 +14 94 training.label_smoothing 0.0023085053054208472 +14 95 model.embedding_dim 1.0 +14 95 training.batch_size 1.0 +14 95 training.label_smoothing 0.008920980832420176 +14 96 model.embedding_dim 2.0 +14 96 training.batch_size 1.0 +14 96 training.label_smoothing 0.013279228810289626 +14 97 model.embedding_dim 2.0 +14 97 training.batch_size 2.0 +14 97 training.label_smoothing 0.10642281190911222 +14 98 model.embedding_dim 1.0 +14 98 training.batch_size 2.0 +14 98 training.label_smoothing 0.11094287419087413 +14 99 model.embedding_dim 2.0 +14 99 training.batch_size 2.0 +14 99 training.label_smoothing 0.0010577549401472326 +14 100 model.embedding_dim 0.0 +14 100 training.batch_size 2.0 +14 100 training.label_smoothing 0.1831360891392827 +14 1 dataset """kinships""" +14 1 model """complex""" +14 1 loss """crossentropy""" +14 1 regularizer """no""" +14 1 optimizer """adadelta""" +14 1 training_loop """lcwa""" +14 1 evaluator """rankbased""" +14 2 dataset """kinships""" +14 2 model """complex""" +14 2 loss """crossentropy""" +14 2 regularizer """no""" +14 2 optimizer """adadelta""" +14 2 training_loop """lcwa""" +14 2 evaluator """rankbased""" +14 3 dataset """kinships""" +14 3 model """complex""" +14 3 loss """crossentropy""" +14 3 regularizer """no""" +14 3 optimizer """adadelta""" +14 3 training_loop """lcwa""" +14 3 evaluator """rankbased""" +14 4 dataset """kinships""" +14 4 model """complex""" +14 4 loss """crossentropy""" +14 4 regularizer """no""" +14 4 optimizer """adadelta""" +14 4 training_loop """lcwa""" +14 4 evaluator """rankbased""" +14 5 dataset """kinships""" +14 5 model """complex""" +14 5 loss """crossentropy""" +14 5 regularizer """no""" +14 5 optimizer """adadelta""" +14 5 training_loop """lcwa""" +14 5 evaluator """rankbased""" +14 6 dataset """kinships""" +14 6 model """complex""" +14 6 loss """crossentropy""" +14 6 regularizer """no""" +14 6 optimizer """adadelta""" +14 6 training_loop """lcwa""" +14 6 evaluator """rankbased""" +14 7 dataset """kinships""" +14 7 model """complex""" +14 7 loss """crossentropy""" +14 7 regularizer """no""" +14 7 optimizer """adadelta""" +14 7 training_loop """lcwa""" +14 7 evaluator """rankbased""" +14 8 dataset """kinships""" +14 8 model """complex""" +14 8 loss """crossentropy""" +14 8 regularizer """no""" +14 8 optimizer """adadelta""" +14 8 training_loop """lcwa""" +14 8 evaluator """rankbased""" +14 9 dataset """kinships""" +14 9 model """complex""" +14 9 loss """crossentropy""" +14 9 regularizer """no""" +14 9 optimizer """adadelta""" +14 9 training_loop """lcwa""" +14 9 evaluator """rankbased""" +14 10 dataset """kinships""" +14 10 model """complex""" +14 10 loss """crossentropy""" +14 10 regularizer """no""" +14 10 optimizer """adadelta""" +14 10 training_loop """lcwa""" +14 10 evaluator """rankbased""" +14 11 dataset """kinships""" +14 11 model """complex""" +14 11 loss """crossentropy""" +14 11 regularizer """no""" +14 11 optimizer """adadelta""" +14 11 training_loop """lcwa""" +14 11 evaluator """rankbased""" +14 12 dataset """kinships""" +14 12 model """complex""" +14 12 loss """crossentropy""" +14 12 regularizer """no""" +14 12 optimizer """adadelta""" +14 12 training_loop """lcwa""" +14 12 evaluator """rankbased""" +14 13 dataset """kinships""" +14 13 model """complex""" +14 13 loss """crossentropy""" +14 13 regularizer """no""" +14 13 optimizer """adadelta""" +14 13 training_loop """lcwa""" +14 13 evaluator """rankbased""" +14 14 dataset """kinships""" +14 14 model """complex""" +14 14 loss """crossentropy""" +14 14 regularizer """no""" +14 14 optimizer """adadelta""" +14 14 training_loop """lcwa""" +14 14 evaluator """rankbased""" +14 15 dataset """kinships""" +14 15 model """complex""" +14 15 loss """crossentropy""" +14 15 regularizer """no""" +14 15 optimizer """adadelta""" +14 15 training_loop """lcwa""" +14 15 evaluator """rankbased""" +14 16 dataset """kinships""" +14 16 model """complex""" +14 16 loss """crossentropy""" +14 16 regularizer """no""" +14 16 optimizer """adadelta""" +14 16 training_loop """lcwa""" +14 16 evaluator """rankbased""" +14 17 dataset """kinships""" +14 17 model """complex""" +14 17 loss """crossentropy""" +14 17 regularizer """no""" +14 17 optimizer """adadelta""" +14 17 training_loop """lcwa""" +14 17 evaluator """rankbased""" +14 18 dataset """kinships""" +14 18 model """complex""" +14 18 loss """crossentropy""" +14 18 regularizer """no""" +14 18 optimizer """adadelta""" +14 18 training_loop """lcwa""" +14 18 evaluator """rankbased""" +14 19 dataset """kinships""" +14 19 model """complex""" +14 19 loss """crossentropy""" +14 19 regularizer """no""" +14 19 optimizer """adadelta""" +14 19 training_loop """lcwa""" +14 19 evaluator """rankbased""" +14 20 dataset """kinships""" +14 20 model """complex""" +14 20 loss """crossentropy""" +14 20 regularizer """no""" +14 20 optimizer """adadelta""" +14 20 training_loop """lcwa""" +14 20 evaluator """rankbased""" +14 21 dataset """kinships""" +14 21 model """complex""" +14 21 loss """crossentropy""" +14 21 regularizer """no""" +14 21 optimizer """adadelta""" +14 21 training_loop """lcwa""" +14 21 evaluator """rankbased""" +14 22 dataset """kinships""" +14 22 model """complex""" +14 22 loss """crossentropy""" +14 22 regularizer """no""" +14 22 optimizer """adadelta""" +14 22 training_loop """lcwa""" +14 22 evaluator """rankbased""" +14 23 dataset """kinships""" +14 23 model """complex""" +14 23 loss """crossentropy""" +14 23 regularizer """no""" +14 23 optimizer """adadelta""" +14 23 training_loop """lcwa""" +14 23 evaluator """rankbased""" +14 24 dataset """kinships""" +14 24 model """complex""" +14 24 loss """crossentropy""" +14 24 regularizer """no""" +14 24 optimizer """adadelta""" +14 24 training_loop """lcwa""" +14 24 evaluator """rankbased""" +14 25 dataset """kinships""" +14 25 model """complex""" +14 25 loss """crossentropy""" +14 25 regularizer """no""" +14 25 optimizer """adadelta""" +14 25 training_loop """lcwa""" +14 25 evaluator """rankbased""" +14 26 dataset """kinships""" +14 26 model """complex""" +14 26 loss """crossentropy""" +14 26 regularizer """no""" +14 26 optimizer """adadelta""" +14 26 training_loop """lcwa""" +14 26 evaluator """rankbased""" +14 27 dataset """kinships""" +14 27 model """complex""" +14 27 loss """crossentropy""" +14 27 regularizer """no""" +14 27 optimizer """adadelta""" +14 27 training_loop """lcwa""" +14 27 evaluator """rankbased""" +14 28 dataset """kinships""" +14 28 model """complex""" +14 28 loss """crossentropy""" +14 28 regularizer """no""" +14 28 optimizer """adadelta""" +14 28 training_loop """lcwa""" +14 28 evaluator """rankbased""" +14 29 dataset """kinships""" +14 29 model """complex""" +14 29 loss """crossentropy""" +14 29 regularizer """no""" +14 29 optimizer """adadelta""" +14 29 training_loop """lcwa""" +14 29 evaluator """rankbased""" +14 30 dataset """kinships""" +14 30 model """complex""" +14 30 loss """crossentropy""" +14 30 regularizer """no""" +14 30 optimizer """adadelta""" +14 30 training_loop """lcwa""" +14 30 evaluator """rankbased""" +14 31 dataset """kinships""" +14 31 model """complex""" +14 31 loss """crossentropy""" +14 31 regularizer """no""" +14 31 optimizer """adadelta""" +14 31 training_loop """lcwa""" +14 31 evaluator """rankbased""" +14 32 dataset """kinships""" +14 32 model """complex""" +14 32 loss """crossentropy""" +14 32 regularizer """no""" +14 32 optimizer """adadelta""" +14 32 training_loop """lcwa""" +14 32 evaluator """rankbased""" +14 33 dataset """kinships""" +14 33 model """complex""" +14 33 loss """crossentropy""" +14 33 regularizer """no""" +14 33 optimizer """adadelta""" +14 33 training_loop """lcwa""" +14 33 evaluator """rankbased""" +14 34 dataset """kinships""" +14 34 model """complex""" +14 34 loss """crossentropy""" +14 34 regularizer """no""" +14 34 optimizer """adadelta""" +14 34 training_loop """lcwa""" +14 34 evaluator """rankbased""" +14 35 dataset """kinships""" +14 35 model """complex""" +14 35 loss """crossentropy""" +14 35 regularizer """no""" +14 35 optimizer """adadelta""" +14 35 training_loop """lcwa""" +14 35 evaluator """rankbased""" +14 36 dataset """kinships""" +14 36 model """complex""" +14 36 loss """crossentropy""" +14 36 regularizer """no""" +14 36 optimizer """adadelta""" +14 36 training_loop """lcwa""" +14 36 evaluator """rankbased""" +14 37 dataset """kinships""" +14 37 model """complex""" +14 37 loss """crossentropy""" +14 37 regularizer """no""" +14 37 optimizer """adadelta""" +14 37 training_loop """lcwa""" +14 37 evaluator """rankbased""" +14 38 dataset """kinships""" +14 38 model """complex""" +14 38 loss """crossentropy""" +14 38 regularizer """no""" +14 38 optimizer """adadelta""" +14 38 training_loop """lcwa""" +14 38 evaluator """rankbased""" +14 39 dataset """kinships""" +14 39 model """complex""" +14 39 loss """crossentropy""" +14 39 regularizer """no""" +14 39 optimizer """adadelta""" +14 39 training_loop """lcwa""" +14 39 evaluator """rankbased""" +14 40 dataset """kinships""" +14 40 model """complex""" +14 40 loss """crossentropy""" +14 40 regularizer """no""" +14 40 optimizer """adadelta""" +14 40 training_loop """lcwa""" +14 40 evaluator """rankbased""" +14 41 dataset """kinships""" +14 41 model """complex""" +14 41 loss """crossentropy""" +14 41 regularizer """no""" +14 41 optimizer """adadelta""" +14 41 training_loop """lcwa""" +14 41 evaluator """rankbased""" +14 42 dataset """kinships""" +14 42 model """complex""" +14 42 loss """crossentropy""" +14 42 regularizer """no""" +14 42 optimizer """adadelta""" +14 42 training_loop """lcwa""" +14 42 evaluator """rankbased""" +14 43 dataset """kinships""" +14 43 model """complex""" +14 43 loss """crossentropy""" +14 43 regularizer """no""" +14 43 optimizer """adadelta""" +14 43 training_loop """lcwa""" +14 43 evaluator """rankbased""" +14 44 dataset """kinships""" +14 44 model """complex""" +14 44 loss """crossentropy""" +14 44 regularizer """no""" +14 44 optimizer """adadelta""" +14 44 training_loop """lcwa""" +14 44 evaluator """rankbased""" +14 45 dataset """kinships""" +14 45 model """complex""" +14 45 loss """crossentropy""" +14 45 regularizer """no""" +14 45 optimizer """adadelta""" +14 45 training_loop """lcwa""" +14 45 evaluator """rankbased""" +14 46 dataset """kinships""" +14 46 model """complex""" +14 46 loss """crossentropy""" +14 46 regularizer """no""" +14 46 optimizer """adadelta""" +14 46 training_loop """lcwa""" +14 46 evaluator """rankbased""" +14 47 dataset """kinships""" +14 47 model """complex""" +14 47 loss """crossentropy""" +14 47 regularizer """no""" +14 47 optimizer """adadelta""" +14 47 training_loop """lcwa""" +14 47 evaluator """rankbased""" +14 48 dataset """kinships""" +14 48 model """complex""" +14 48 loss """crossentropy""" +14 48 regularizer """no""" +14 48 optimizer """adadelta""" +14 48 training_loop """lcwa""" +14 48 evaluator """rankbased""" +14 49 dataset """kinships""" +14 49 model """complex""" +14 49 loss """crossentropy""" +14 49 regularizer """no""" +14 49 optimizer """adadelta""" +14 49 training_loop """lcwa""" +14 49 evaluator """rankbased""" +14 50 dataset """kinships""" +14 50 model """complex""" +14 50 loss """crossentropy""" +14 50 regularizer """no""" +14 50 optimizer """adadelta""" +14 50 training_loop """lcwa""" +14 50 evaluator """rankbased""" +14 51 dataset """kinships""" +14 51 model """complex""" +14 51 loss """crossentropy""" +14 51 regularizer """no""" +14 51 optimizer """adadelta""" +14 51 training_loop """lcwa""" +14 51 evaluator """rankbased""" +14 52 dataset """kinships""" +14 52 model """complex""" +14 52 loss """crossentropy""" +14 52 regularizer """no""" +14 52 optimizer """adadelta""" +14 52 training_loop """lcwa""" +14 52 evaluator """rankbased""" +14 53 dataset """kinships""" +14 53 model """complex""" +14 53 loss """crossentropy""" +14 53 regularizer """no""" +14 53 optimizer """adadelta""" +14 53 training_loop """lcwa""" +14 53 evaluator """rankbased""" +14 54 dataset """kinships""" +14 54 model """complex""" +14 54 loss """crossentropy""" +14 54 regularizer """no""" +14 54 optimizer """adadelta""" +14 54 training_loop """lcwa""" +14 54 evaluator """rankbased""" +14 55 dataset """kinships""" +14 55 model """complex""" +14 55 loss """crossentropy""" +14 55 regularizer """no""" +14 55 optimizer """adadelta""" +14 55 training_loop """lcwa""" +14 55 evaluator """rankbased""" +14 56 dataset """kinships""" +14 56 model """complex""" +14 56 loss """crossentropy""" +14 56 regularizer """no""" +14 56 optimizer """adadelta""" +14 56 training_loop """lcwa""" +14 56 evaluator """rankbased""" +14 57 dataset """kinships""" +14 57 model """complex""" +14 57 loss """crossentropy""" +14 57 regularizer """no""" +14 57 optimizer """adadelta""" +14 57 training_loop """lcwa""" +14 57 evaluator """rankbased""" +14 58 dataset """kinships""" +14 58 model """complex""" +14 58 loss """crossentropy""" +14 58 regularizer """no""" +14 58 optimizer """adadelta""" +14 58 training_loop """lcwa""" +14 58 evaluator """rankbased""" +14 59 dataset """kinships""" +14 59 model """complex""" +14 59 loss """crossentropy""" +14 59 regularizer """no""" +14 59 optimizer """adadelta""" +14 59 training_loop """lcwa""" +14 59 evaluator """rankbased""" +14 60 dataset """kinships""" +14 60 model """complex""" +14 60 loss """crossentropy""" +14 60 regularizer """no""" +14 60 optimizer """adadelta""" +14 60 training_loop """lcwa""" +14 60 evaluator """rankbased""" +14 61 dataset """kinships""" +14 61 model """complex""" +14 61 loss """crossentropy""" +14 61 regularizer """no""" +14 61 optimizer """adadelta""" +14 61 training_loop """lcwa""" +14 61 evaluator """rankbased""" +14 62 dataset """kinships""" +14 62 model """complex""" +14 62 loss """crossentropy""" +14 62 regularizer """no""" +14 62 optimizer """adadelta""" +14 62 training_loop """lcwa""" +14 62 evaluator """rankbased""" +14 63 dataset """kinships""" +14 63 model """complex""" +14 63 loss """crossentropy""" +14 63 regularizer """no""" +14 63 optimizer """adadelta""" +14 63 training_loop """lcwa""" +14 63 evaluator """rankbased""" +14 64 dataset """kinships""" +14 64 model """complex""" +14 64 loss """crossentropy""" +14 64 regularizer """no""" +14 64 optimizer """adadelta""" +14 64 training_loop """lcwa""" +14 64 evaluator """rankbased""" +14 65 dataset """kinships""" +14 65 model """complex""" +14 65 loss """crossentropy""" +14 65 regularizer """no""" +14 65 optimizer """adadelta""" +14 65 training_loop """lcwa""" +14 65 evaluator """rankbased""" +14 66 dataset """kinships""" +14 66 model """complex""" +14 66 loss """crossentropy""" +14 66 regularizer """no""" +14 66 optimizer """adadelta""" +14 66 training_loop """lcwa""" +14 66 evaluator """rankbased""" +14 67 dataset """kinships""" +14 67 model """complex""" +14 67 loss """crossentropy""" +14 67 regularizer """no""" +14 67 optimizer """adadelta""" +14 67 training_loop """lcwa""" +14 67 evaluator """rankbased""" +14 68 dataset """kinships""" +14 68 model """complex""" +14 68 loss """crossentropy""" +14 68 regularizer """no""" +14 68 optimizer """adadelta""" +14 68 training_loop """lcwa""" +14 68 evaluator """rankbased""" +14 69 dataset """kinships""" +14 69 model """complex""" +14 69 loss """crossentropy""" +14 69 regularizer """no""" +14 69 optimizer """adadelta""" +14 69 training_loop """lcwa""" +14 69 evaluator """rankbased""" +14 70 dataset """kinships""" +14 70 model """complex""" +14 70 loss """crossentropy""" +14 70 regularizer """no""" +14 70 optimizer """adadelta""" +14 70 training_loop """lcwa""" +14 70 evaluator """rankbased""" +14 71 dataset """kinships""" +14 71 model """complex""" +14 71 loss """crossentropy""" +14 71 regularizer """no""" +14 71 optimizer """adadelta""" +14 71 training_loop """lcwa""" +14 71 evaluator """rankbased""" +14 72 dataset """kinships""" +14 72 model """complex""" +14 72 loss """crossentropy""" +14 72 regularizer """no""" +14 72 optimizer """adadelta""" +14 72 training_loop """lcwa""" +14 72 evaluator """rankbased""" +14 73 dataset """kinships""" +14 73 model """complex""" +14 73 loss """crossentropy""" +14 73 regularizer """no""" +14 73 optimizer """adadelta""" +14 73 training_loop """lcwa""" +14 73 evaluator """rankbased""" +14 74 dataset """kinships""" +14 74 model """complex""" +14 74 loss """crossentropy""" +14 74 regularizer """no""" +14 74 optimizer """adadelta""" +14 74 training_loop """lcwa""" +14 74 evaluator """rankbased""" +14 75 dataset """kinships""" +14 75 model """complex""" +14 75 loss """crossentropy""" +14 75 regularizer """no""" +14 75 optimizer """adadelta""" +14 75 training_loop """lcwa""" +14 75 evaluator """rankbased""" +14 76 dataset """kinships""" +14 76 model """complex""" +14 76 loss """crossentropy""" +14 76 regularizer """no""" +14 76 optimizer """adadelta""" +14 76 training_loop """lcwa""" +14 76 evaluator """rankbased""" +14 77 dataset """kinships""" +14 77 model """complex""" +14 77 loss """crossentropy""" +14 77 regularizer """no""" +14 77 optimizer """adadelta""" +14 77 training_loop """lcwa""" +14 77 evaluator """rankbased""" +14 78 dataset """kinships""" +14 78 model """complex""" +14 78 loss """crossentropy""" +14 78 regularizer """no""" +14 78 optimizer """adadelta""" +14 78 training_loop """lcwa""" +14 78 evaluator """rankbased""" +14 79 dataset """kinships""" +14 79 model """complex""" +14 79 loss """crossentropy""" +14 79 regularizer """no""" +14 79 optimizer """adadelta""" +14 79 training_loop """lcwa""" +14 79 evaluator """rankbased""" +14 80 dataset """kinships""" +14 80 model """complex""" +14 80 loss """crossentropy""" +14 80 regularizer """no""" +14 80 optimizer """adadelta""" +14 80 training_loop """lcwa""" +14 80 evaluator """rankbased""" +14 81 dataset """kinships""" +14 81 model """complex""" +14 81 loss """crossentropy""" +14 81 regularizer """no""" +14 81 optimizer """adadelta""" +14 81 training_loop """lcwa""" +14 81 evaluator """rankbased""" +14 82 dataset """kinships""" +14 82 model """complex""" +14 82 loss """crossentropy""" +14 82 regularizer """no""" +14 82 optimizer """adadelta""" +14 82 training_loop """lcwa""" +14 82 evaluator """rankbased""" +14 83 dataset """kinships""" +14 83 model """complex""" +14 83 loss """crossentropy""" +14 83 regularizer """no""" +14 83 optimizer """adadelta""" +14 83 training_loop """lcwa""" +14 83 evaluator """rankbased""" +14 84 dataset """kinships""" +14 84 model """complex""" +14 84 loss """crossentropy""" +14 84 regularizer """no""" +14 84 optimizer """adadelta""" +14 84 training_loop """lcwa""" +14 84 evaluator """rankbased""" +14 85 dataset """kinships""" +14 85 model """complex""" +14 85 loss """crossentropy""" +14 85 regularizer """no""" +14 85 optimizer """adadelta""" +14 85 training_loop """lcwa""" +14 85 evaluator """rankbased""" +14 86 dataset """kinships""" +14 86 model """complex""" +14 86 loss """crossentropy""" +14 86 regularizer """no""" +14 86 optimizer """adadelta""" +14 86 training_loop """lcwa""" +14 86 evaluator """rankbased""" +14 87 dataset """kinships""" +14 87 model """complex""" +14 87 loss """crossentropy""" +14 87 regularizer """no""" +14 87 optimizer """adadelta""" +14 87 training_loop """lcwa""" +14 87 evaluator """rankbased""" +14 88 dataset """kinships""" +14 88 model """complex""" +14 88 loss """crossentropy""" +14 88 regularizer """no""" +14 88 optimizer """adadelta""" +14 88 training_loop """lcwa""" +14 88 evaluator """rankbased""" +14 89 dataset """kinships""" +14 89 model """complex""" +14 89 loss """crossentropy""" +14 89 regularizer """no""" +14 89 optimizer """adadelta""" +14 89 training_loop """lcwa""" +14 89 evaluator """rankbased""" +14 90 dataset """kinships""" +14 90 model """complex""" +14 90 loss """crossentropy""" +14 90 regularizer """no""" +14 90 optimizer """adadelta""" +14 90 training_loop """lcwa""" +14 90 evaluator """rankbased""" +14 91 dataset """kinships""" +14 91 model """complex""" +14 91 loss """crossentropy""" +14 91 regularizer """no""" +14 91 optimizer """adadelta""" +14 91 training_loop """lcwa""" +14 91 evaluator """rankbased""" +14 92 dataset """kinships""" +14 92 model """complex""" +14 92 loss """crossentropy""" +14 92 regularizer """no""" +14 92 optimizer """adadelta""" +14 92 training_loop """lcwa""" +14 92 evaluator """rankbased""" +14 93 dataset """kinships""" +14 93 model """complex""" +14 93 loss """crossentropy""" +14 93 regularizer """no""" +14 93 optimizer """adadelta""" +14 93 training_loop """lcwa""" +14 93 evaluator """rankbased""" +14 94 dataset """kinships""" +14 94 model """complex""" +14 94 loss """crossentropy""" +14 94 regularizer """no""" +14 94 optimizer """adadelta""" +14 94 training_loop """lcwa""" +14 94 evaluator """rankbased""" +14 95 dataset """kinships""" +14 95 model """complex""" +14 95 loss """crossentropy""" +14 95 regularizer """no""" +14 95 optimizer """adadelta""" +14 95 training_loop """lcwa""" +14 95 evaluator """rankbased""" +14 96 dataset """kinships""" +14 96 model """complex""" +14 96 loss """crossentropy""" +14 96 regularizer """no""" +14 96 optimizer """adadelta""" +14 96 training_loop """lcwa""" +14 96 evaluator """rankbased""" +14 97 dataset """kinships""" +14 97 model """complex""" +14 97 loss """crossentropy""" +14 97 regularizer """no""" +14 97 optimizer """adadelta""" +14 97 training_loop """lcwa""" +14 97 evaluator """rankbased""" +14 98 dataset """kinships""" +14 98 model """complex""" +14 98 loss """crossentropy""" +14 98 regularizer """no""" +14 98 optimizer """adadelta""" +14 98 training_loop """lcwa""" +14 98 evaluator """rankbased""" +14 99 dataset """kinships""" +14 99 model """complex""" +14 99 loss """crossentropy""" +14 99 regularizer """no""" +14 99 optimizer """adadelta""" +14 99 training_loop """lcwa""" +14 99 evaluator """rankbased""" +14 100 dataset """kinships""" +14 100 model """complex""" +14 100 loss """crossentropy""" +14 100 regularizer """no""" +14 100 optimizer """adadelta""" +14 100 training_loop """lcwa""" +14 100 evaluator """rankbased""" +15 1 model.embedding_dim 0.0 +15 1 training.batch_size 0.0 +15 1 training.label_smoothing 0.18715744695563064 +15 2 model.embedding_dim 2.0 +15 2 training.batch_size 1.0 +15 2 training.label_smoothing 0.0020028065806875148 +15 3 model.embedding_dim 1.0 +15 3 training.batch_size 0.0 +15 3 training.label_smoothing 0.10269719213395129 +15 4 model.embedding_dim 2.0 +15 4 training.batch_size 2.0 +15 4 training.label_smoothing 0.05955666663709975 +15 5 model.embedding_dim 0.0 +15 5 training.batch_size 2.0 +15 5 training.label_smoothing 0.014536962504356517 +15 6 model.embedding_dim 0.0 +15 6 training.batch_size 0.0 +15 6 training.label_smoothing 0.002952666728807416 +15 7 model.embedding_dim 0.0 +15 7 training.batch_size 1.0 +15 7 training.label_smoothing 0.0112111505648737 +15 8 model.embedding_dim 2.0 +15 8 training.batch_size 0.0 +15 8 training.label_smoothing 0.0053980012703697495 +15 9 model.embedding_dim 0.0 +15 9 training.batch_size 1.0 +15 9 training.label_smoothing 0.016997915752883168 +15 10 model.embedding_dim 2.0 +15 10 training.batch_size 2.0 +15 10 training.label_smoothing 0.04822983762209544 +15 11 model.embedding_dim 1.0 +15 11 training.batch_size 2.0 +15 11 training.label_smoothing 0.6844770603147472 +15 12 model.embedding_dim 2.0 +15 12 training.batch_size 0.0 +15 12 training.label_smoothing 0.0020159490037848222 +15 13 model.embedding_dim 2.0 +15 13 training.batch_size 1.0 +15 13 training.label_smoothing 0.015464571683392485 +15 14 model.embedding_dim 2.0 +15 14 training.batch_size 1.0 +15 14 training.label_smoothing 0.0032239919019585784 +15 15 model.embedding_dim 2.0 +15 15 training.batch_size 1.0 +15 15 training.label_smoothing 0.756549977104405 +15 16 model.embedding_dim 2.0 +15 16 training.batch_size 2.0 +15 16 training.label_smoothing 0.4632141658456149 +15 17 model.embedding_dim 1.0 +15 17 training.batch_size 2.0 +15 17 training.label_smoothing 0.08366040847266121 +15 18 model.embedding_dim 0.0 +15 18 training.batch_size 2.0 +15 18 training.label_smoothing 0.6905260382005951 +15 19 model.embedding_dim 1.0 +15 19 training.batch_size 2.0 +15 19 training.label_smoothing 0.08592827183397102 +15 20 model.embedding_dim 1.0 +15 20 training.batch_size 1.0 +15 20 training.label_smoothing 0.03086912074341424 +15 21 model.embedding_dim 1.0 +15 21 training.batch_size 2.0 +15 21 training.label_smoothing 0.0033211194115488427 +15 22 model.embedding_dim 0.0 +15 22 training.batch_size 2.0 +15 22 training.label_smoothing 0.06501921702808194 +15 23 model.embedding_dim 2.0 +15 23 training.batch_size 2.0 +15 23 training.label_smoothing 0.07857283930863813 +15 24 model.embedding_dim 1.0 +15 24 training.batch_size 0.0 +15 24 training.label_smoothing 0.002897731347101516 +15 25 model.embedding_dim 1.0 +15 25 training.batch_size 2.0 +15 25 training.label_smoothing 0.0278870968558719 +15 26 model.embedding_dim 2.0 +15 26 training.batch_size 2.0 +15 26 training.label_smoothing 0.4510388552324105 +15 27 model.embedding_dim 0.0 +15 27 training.batch_size 1.0 +15 27 training.label_smoothing 0.019065402663596076 +15 28 model.embedding_dim 2.0 +15 28 training.batch_size 1.0 +15 28 training.label_smoothing 0.6558265797518343 +15 29 model.embedding_dim 2.0 +15 29 training.batch_size 0.0 +15 29 training.label_smoothing 0.7538441899458264 +15 30 model.embedding_dim 2.0 +15 30 training.batch_size 2.0 +15 30 training.label_smoothing 0.36633889743468384 +15 31 model.embedding_dim 2.0 +15 31 training.batch_size 0.0 +15 31 training.label_smoothing 0.00917419400705371 +15 32 model.embedding_dim 1.0 +15 32 training.batch_size 0.0 +15 32 training.label_smoothing 0.04536950978001183 +15 33 model.embedding_dim 2.0 +15 33 training.batch_size 1.0 +15 33 training.label_smoothing 0.019691872848996327 +15 34 model.embedding_dim 0.0 +15 34 training.batch_size 1.0 +15 34 training.label_smoothing 0.02112151459543635 +15 35 model.embedding_dim 2.0 +15 35 training.batch_size 2.0 +15 35 training.label_smoothing 0.0730595390357329 +15 36 model.embedding_dim 2.0 +15 36 training.batch_size 1.0 +15 36 training.label_smoothing 0.04738839826438132 +15 37 model.embedding_dim 0.0 +15 37 training.batch_size 2.0 +15 37 training.label_smoothing 0.004595098577263557 +15 38 model.embedding_dim 0.0 +15 38 training.batch_size 0.0 +15 38 training.label_smoothing 0.022529706709570208 +15 39 model.embedding_dim 1.0 +15 39 training.batch_size 0.0 +15 39 training.label_smoothing 0.21973059328794567 +15 40 model.embedding_dim 2.0 +15 40 training.batch_size 1.0 +15 40 training.label_smoothing 0.02675841538888125 +15 41 model.embedding_dim 1.0 +15 41 training.batch_size 0.0 +15 41 training.label_smoothing 0.031337163269605756 +15 42 model.embedding_dim 2.0 +15 42 training.batch_size 0.0 +15 42 training.label_smoothing 0.0016742861888173507 +15 43 model.embedding_dim 0.0 +15 43 training.batch_size 0.0 +15 43 training.label_smoothing 0.002280749153130397 +15 44 model.embedding_dim 0.0 +15 44 training.batch_size 0.0 +15 44 training.label_smoothing 0.015213497667218134 +15 45 model.embedding_dim 0.0 +15 45 training.batch_size 0.0 +15 45 training.label_smoothing 0.017821256925656222 +15 46 model.embedding_dim 2.0 +15 46 training.batch_size 0.0 +15 46 training.label_smoothing 0.08709401042519298 +15 47 model.embedding_dim 0.0 +15 47 training.batch_size 1.0 +15 47 training.label_smoothing 0.2249473718353018 +15 48 model.embedding_dim 2.0 +15 48 training.batch_size 0.0 +15 48 training.label_smoothing 0.14609736214256822 +15 49 model.embedding_dim 0.0 +15 49 training.batch_size 2.0 +15 49 training.label_smoothing 0.02303290258176453 +15 50 model.embedding_dim 1.0 +15 50 training.batch_size 2.0 +15 50 training.label_smoothing 0.31563330856800026 +15 51 model.embedding_dim 2.0 +15 51 training.batch_size 1.0 +15 51 training.label_smoothing 0.0018314216434194835 +15 52 model.embedding_dim 1.0 +15 52 training.batch_size 1.0 +15 52 training.label_smoothing 0.14928413924275818 +15 53 model.embedding_dim 2.0 +15 53 training.batch_size 2.0 +15 53 training.label_smoothing 0.0021871180951583188 +15 54 model.embedding_dim 0.0 +15 54 training.batch_size 0.0 +15 54 training.label_smoothing 0.07572661239335107 +15 55 model.embedding_dim 2.0 +15 55 training.batch_size 2.0 +15 55 training.label_smoothing 0.35912281809269747 +15 56 model.embedding_dim 2.0 +15 56 training.batch_size 0.0 +15 56 training.label_smoothing 0.9968678612811086 +15 57 model.embedding_dim 1.0 +15 57 training.batch_size 1.0 +15 57 training.label_smoothing 0.09591041513714964 +15 58 model.embedding_dim 1.0 +15 58 training.batch_size 0.0 +15 58 training.label_smoothing 0.09709844931851264 +15 59 model.embedding_dim 1.0 +15 59 training.batch_size 2.0 +15 59 training.label_smoothing 0.03948241869325503 +15 60 model.embedding_dim 0.0 +15 60 training.batch_size 1.0 +15 60 training.label_smoothing 0.002076059312735086 +15 61 model.embedding_dim 2.0 +15 61 training.batch_size 1.0 +15 61 training.label_smoothing 0.7271215020211553 +15 62 model.embedding_dim 2.0 +15 62 training.batch_size 0.0 +15 62 training.label_smoothing 0.266366115068129 +15 63 model.embedding_dim 1.0 +15 63 training.batch_size 1.0 +15 63 training.label_smoothing 0.03737643116450199 +15 64 model.embedding_dim 0.0 +15 64 training.batch_size 2.0 +15 64 training.label_smoothing 0.001920035570082574 +15 65 model.embedding_dim 2.0 +15 65 training.batch_size 0.0 +15 65 training.label_smoothing 0.006702928968218481 +15 66 model.embedding_dim 1.0 +15 66 training.batch_size 0.0 +15 66 training.label_smoothing 0.013842224237478514 +15 67 model.embedding_dim 0.0 +15 67 training.batch_size 1.0 +15 67 training.label_smoothing 0.0019028013644633367 +15 68 model.embedding_dim 2.0 +15 68 training.batch_size 2.0 +15 68 training.label_smoothing 0.060480544732876826 +15 69 model.embedding_dim 0.0 +15 69 training.batch_size 1.0 +15 69 training.label_smoothing 0.009318515120433767 +15 70 model.embedding_dim 1.0 +15 70 training.batch_size 2.0 +15 70 training.label_smoothing 0.7995960731717685 +15 71 model.embedding_dim 2.0 +15 71 training.batch_size 1.0 +15 71 training.label_smoothing 0.0034395322875227334 +15 72 model.embedding_dim 1.0 +15 72 training.batch_size 2.0 +15 72 training.label_smoothing 0.17063005046776505 +15 73 model.embedding_dim 0.0 +15 73 training.batch_size 2.0 +15 73 training.label_smoothing 0.007248189832218834 +15 74 model.embedding_dim 0.0 +15 74 training.batch_size 0.0 +15 74 training.label_smoothing 0.01653581473220451 +15 75 model.embedding_dim 0.0 +15 75 training.batch_size 0.0 +15 75 training.label_smoothing 0.002841223453618987 +15 76 model.embedding_dim 2.0 +15 76 training.batch_size 0.0 +15 76 training.label_smoothing 0.08187854581322014 +15 77 model.embedding_dim 2.0 +15 77 training.batch_size 0.0 +15 77 training.label_smoothing 0.16780031095213663 +15 78 model.embedding_dim 0.0 +15 78 training.batch_size 1.0 +15 78 training.label_smoothing 0.6670769751327261 +15 79 model.embedding_dim 2.0 +15 79 training.batch_size 0.0 +15 79 training.label_smoothing 0.21550958949683657 +15 80 model.embedding_dim 1.0 +15 80 training.batch_size 1.0 +15 80 training.label_smoothing 0.8456076526074877 +15 81 model.embedding_dim 0.0 +15 81 training.batch_size 2.0 +15 81 training.label_smoothing 0.9155857078755647 +15 82 model.embedding_dim 1.0 +15 82 training.batch_size 2.0 +15 82 training.label_smoothing 0.02042871827732603 +15 83 model.embedding_dim 1.0 +15 83 training.batch_size 0.0 +15 83 training.label_smoothing 0.5525136267699248 +15 84 model.embedding_dim 0.0 +15 84 training.batch_size 2.0 +15 84 training.label_smoothing 0.1385515356600509 +15 85 model.embedding_dim 1.0 +15 85 training.batch_size 2.0 +15 85 training.label_smoothing 0.027208572757186356 +15 86 model.embedding_dim 0.0 +15 86 training.batch_size 0.0 +15 86 training.label_smoothing 0.26670423631420453 +15 87 model.embedding_dim 2.0 +15 87 training.batch_size 2.0 +15 87 training.label_smoothing 0.005074485134402961 +15 88 model.embedding_dim 2.0 +15 88 training.batch_size 0.0 +15 88 training.label_smoothing 0.002073628431045691 +15 89 model.embedding_dim 1.0 +15 89 training.batch_size 2.0 +15 89 training.label_smoothing 0.003323823247477158 +15 90 model.embedding_dim 0.0 +15 90 training.batch_size 1.0 +15 90 training.label_smoothing 0.11614426980672045 +15 91 model.embedding_dim 1.0 +15 91 training.batch_size 2.0 +15 91 training.label_smoothing 0.0040570372630526486 +15 92 model.embedding_dim 0.0 +15 92 training.batch_size 0.0 +15 92 training.label_smoothing 0.6597664822535754 +15 93 model.embedding_dim 0.0 +15 93 training.batch_size 0.0 +15 93 training.label_smoothing 0.010959321990343388 +15 94 model.embedding_dim 0.0 +15 94 training.batch_size 1.0 +15 94 training.label_smoothing 0.042500607223560666 +15 95 model.embedding_dim 2.0 +15 95 training.batch_size 2.0 +15 95 training.label_smoothing 0.4939102438304145 +15 96 model.embedding_dim 0.0 +15 96 training.batch_size 1.0 +15 96 training.label_smoothing 0.554925392921048 +15 97 model.embedding_dim 2.0 +15 97 training.batch_size 1.0 +15 97 training.label_smoothing 0.021990971845087758 +15 98 model.embedding_dim 0.0 +15 98 training.batch_size 2.0 +15 98 training.label_smoothing 0.05779629183982823 +15 99 model.embedding_dim 0.0 +15 99 training.batch_size 1.0 +15 99 training.label_smoothing 0.004270201920512775 +15 100 model.embedding_dim 1.0 +15 100 training.batch_size 1.0 +15 100 training.label_smoothing 0.019731103561539332 +15 1 dataset """kinships""" +15 1 model """complex""" +15 1 loss """crossentropy""" +15 1 regularizer """no""" +15 1 optimizer """adadelta""" +15 1 training_loop """lcwa""" +15 1 evaluator """rankbased""" +15 2 dataset """kinships""" +15 2 model """complex""" +15 2 loss """crossentropy""" +15 2 regularizer """no""" +15 2 optimizer """adadelta""" +15 2 training_loop """lcwa""" +15 2 evaluator """rankbased""" +15 3 dataset """kinships""" +15 3 model """complex""" +15 3 loss """crossentropy""" +15 3 regularizer """no""" +15 3 optimizer """adadelta""" +15 3 training_loop """lcwa""" +15 3 evaluator """rankbased""" +15 4 dataset """kinships""" +15 4 model """complex""" +15 4 loss """crossentropy""" +15 4 regularizer """no""" +15 4 optimizer """adadelta""" +15 4 training_loop """lcwa""" +15 4 evaluator """rankbased""" +15 5 dataset """kinships""" +15 5 model """complex""" +15 5 loss """crossentropy""" +15 5 regularizer """no""" +15 5 optimizer """adadelta""" +15 5 training_loop """lcwa""" +15 5 evaluator """rankbased""" +15 6 dataset """kinships""" +15 6 model """complex""" +15 6 loss """crossentropy""" +15 6 regularizer """no""" +15 6 optimizer """adadelta""" +15 6 training_loop """lcwa""" +15 6 evaluator """rankbased""" +15 7 dataset """kinships""" +15 7 model """complex""" +15 7 loss """crossentropy""" +15 7 regularizer """no""" +15 7 optimizer """adadelta""" +15 7 training_loop """lcwa""" +15 7 evaluator """rankbased""" +15 8 dataset """kinships""" +15 8 model """complex""" +15 8 loss """crossentropy""" +15 8 regularizer """no""" +15 8 optimizer """adadelta""" +15 8 training_loop """lcwa""" +15 8 evaluator """rankbased""" +15 9 dataset """kinships""" +15 9 model """complex""" +15 9 loss """crossentropy""" +15 9 regularizer """no""" +15 9 optimizer """adadelta""" +15 9 training_loop """lcwa""" +15 9 evaluator """rankbased""" +15 10 dataset """kinships""" +15 10 model """complex""" +15 10 loss """crossentropy""" +15 10 regularizer """no""" +15 10 optimizer """adadelta""" +15 10 training_loop """lcwa""" +15 10 evaluator """rankbased""" +15 11 dataset """kinships""" +15 11 model """complex""" +15 11 loss """crossentropy""" +15 11 regularizer """no""" +15 11 optimizer """adadelta""" +15 11 training_loop """lcwa""" +15 11 evaluator """rankbased""" +15 12 dataset """kinships""" +15 12 model """complex""" +15 12 loss """crossentropy""" +15 12 regularizer """no""" +15 12 optimizer """adadelta""" +15 12 training_loop """lcwa""" +15 12 evaluator """rankbased""" +15 13 dataset """kinships""" +15 13 model """complex""" +15 13 loss """crossentropy""" +15 13 regularizer """no""" +15 13 optimizer """adadelta""" +15 13 training_loop """lcwa""" +15 13 evaluator """rankbased""" +15 14 dataset """kinships""" +15 14 model """complex""" +15 14 loss """crossentropy""" +15 14 regularizer """no""" +15 14 optimizer """adadelta""" +15 14 training_loop """lcwa""" +15 14 evaluator """rankbased""" +15 15 dataset """kinships""" +15 15 model """complex""" +15 15 loss """crossentropy""" +15 15 regularizer """no""" +15 15 optimizer """adadelta""" +15 15 training_loop """lcwa""" +15 15 evaluator """rankbased""" +15 16 dataset """kinships""" +15 16 model """complex""" +15 16 loss """crossentropy""" +15 16 regularizer """no""" +15 16 optimizer """adadelta""" +15 16 training_loop """lcwa""" +15 16 evaluator """rankbased""" +15 17 dataset """kinships""" +15 17 model """complex""" +15 17 loss """crossentropy""" +15 17 regularizer """no""" +15 17 optimizer """adadelta""" +15 17 training_loop """lcwa""" +15 17 evaluator """rankbased""" +15 18 dataset """kinships""" +15 18 model """complex""" +15 18 loss """crossentropy""" +15 18 regularizer """no""" +15 18 optimizer """adadelta""" +15 18 training_loop """lcwa""" +15 18 evaluator """rankbased""" +15 19 dataset """kinships""" +15 19 model """complex""" +15 19 loss """crossentropy""" +15 19 regularizer """no""" +15 19 optimizer """adadelta""" +15 19 training_loop """lcwa""" +15 19 evaluator """rankbased""" +15 20 dataset """kinships""" +15 20 model """complex""" +15 20 loss """crossentropy""" +15 20 regularizer """no""" +15 20 optimizer """adadelta""" +15 20 training_loop """lcwa""" +15 20 evaluator """rankbased""" +15 21 dataset """kinships""" +15 21 model """complex""" +15 21 loss """crossentropy""" +15 21 regularizer """no""" +15 21 optimizer """adadelta""" +15 21 training_loop """lcwa""" +15 21 evaluator """rankbased""" +15 22 dataset """kinships""" +15 22 model """complex""" +15 22 loss """crossentropy""" +15 22 regularizer """no""" +15 22 optimizer """adadelta""" +15 22 training_loop """lcwa""" +15 22 evaluator """rankbased""" +15 23 dataset """kinships""" +15 23 model """complex""" +15 23 loss """crossentropy""" +15 23 regularizer """no""" +15 23 optimizer """adadelta""" +15 23 training_loop """lcwa""" +15 23 evaluator """rankbased""" +15 24 dataset """kinships""" +15 24 model """complex""" +15 24 loss """crossentropy""" +15 24 regularizer """no""" +15 24 optimizer """adadelta""" +15 24 training_loop """lcwa""" +15 24 evaluator """rankbased""" +15 25 dataset """kinships""" +15 25 model """complex""" +15 25 loss """crossentropy""" +15 25 regularizer """no""" +15 25 optimizer """adadelta""" +15 25 training_loop """lcwa""" +15 25 evaluator """rankbased""" +15 26 dataset """kinships""" +15 26 model """complex""" +15 26 loss """crossentropy""" +15 26 regularizer """no""" +15 26 optimizer """adadelta""" +15 26 training_loop """lcwa""" +15 26 evaluator """rankbased""" +15 27 dataset """kinships""" +15 27 model """complex""" +15 27 loss """crossentropy""" +15 27 regularizer """no""" +15 27 optimizer """adadelta""" +15 27 training_loop """lcwa""" +15 27 evaluator """rankbased""" +15 28 dataset """kinships""" +15 28 model """complex""" +15 28 loss """crossentropy""" +15 28 regularizer """no""" +15 28 optimizer """adadelta""" +15 28 training_loop """lcwa""" +15 28 evaluator """rankbased""" +15 29 dataset """kinships""" +15 29 model """complex""" +15 29 loss """crossentropy""" +15 29 regularizer """no""" +15 29 optimizer """adadelta""" +15 29 training_loop """lcwa""" +15 29 evaluator """rankbased""" +15 30 dataset """kinships""" +15 30 model """complex""" +15 30 loss """crossentropy""" +15 30 regularizer """no""" +15 30 optimizer """adadelta""" +15 30 training_loop """lcwa""" +15 30 evaluator """rankbased""" +15 31 dataset """kinships""" +15 31 model """complex""" +15 31 loss """crossentropy""" +15 31 regularizer """no""" +15 31 optimizer """adadelta""" +15 31 training_loop """lcwa""" +15 31 evaluator """rankbased""" +15 32 dataset """kinships""" +15 32 model """complex""" +15 32 loss """crossentropy""" +15 32 regularizer """no""" +15 32 optimizer """adadelta""" +15 32 training_loop """lcwa""" +15 32 evaluator """rankbased""" +15 33 dataset """kinships""" +15 33 model """complex""" +15 33 loss """crossentropy""" +15 33 regularizer """no""" +15 33 optimizer """adadelta""" +15 33 training_loop """lcwa""" +15 33 evaluator """rankbased""" +15 34 dataset """kinships""" +15 34 model """complex""" +15 34 loss """crossentropy""" +15 34 regularizer """no""" +15 34 optimizer """adadelta""" +15 34 training_loop """lcwa""" +15 34 evaluator """rankbased""" +15 35 dataset """kinships""" +15 35 model """complex""" +15 35 loss """crossentropy""" +15 35 regularizer """no""" +15 35 optimizer """adadelta""" +15 35 training_loop """lcwa""" +15 35 evaluator """rankbased""" +15 36 dataset """kinships""" +15 36 model """complex""" +15 36 loss """crossentropy""" +15 36 regularizer """no""" +15 36 optimizer """adadelta""" +15 36 training_loop """lcwa""" +15 36 evaluator """rankbased""" +15 37 dataset """kinships""" +15 37 model """complex""" +15 37 loss """crossentropy""" +15 37 regularizer """no""" +15 37 optimizer """adadelta""" +15 37 training_loop """lcwa""" +15 37 evaluator """rankbased""" +15 38 dataset """kinships""" +15 38 model """complex""" +15 38 loss """crossentropy""" +15 38 regularizer """no""" +15 38 optimizer """adadelta""" +15 38 training_loop """lcwa""" +15 38 evaluator """rankbased""" +15 39 dataset """kinships""" +15 39 model """complex""" +15 39 loss """crossentropy""" +15 39 regularizer """no""" +15 39 optimizer """adadelta""" +15 39 training_loop """lcwa""" +15 39 evaluator """rankbased""" +15 40 dataset """kinships""" +15 40 model """complex""" +15 40 loss """crossentropy""" +15 40 regularizer """no""" +15 40 optimizer """adadelta""" +15 40 training_loop """lcwa""" +15 40 evaluator """rankbased""" +15 41 dataset """kinships""" +15 41 model """complex""" +15 41 loss """crossentropy""" +15 41 regularizer """no""" +15 41 optimizer """adadelta""" +15 41 training_loop """lcwa""" +15 41 evaluator """rankbased""" +15 42 dataset """kinships""" +15 42 model """complex""" +15 42 loss """crossentropy""" +15 42 regularizer """no""" +15 42 optimizer """adadelta""" +15 42 training_loop """lcwa""" +15 42 evaluator """rankbased""" +15 43 dataset """kinships""" +15 43 model """complex""" +15 43 loss """crossentropy""" +15 43 regularizer """no""" +15 43 optimizer """adadelta""" +15 43 training_loop """lcwa""" +15 43 evaluator """rankbased""" +15 44 dataset """kinships""" +15 44 model """complex""" +15 44 loss """crossentropy""" +15 44 regularizer """no""" +15 44 optimizer """adadelta""" +15 44 training_loop """lcwa""" +15 44 evaluator """rankbased""" +15 45 dataset """kinships""" +15 45 model """complex""" +15 45 loss """crossentropy""" +15 45 regularizer """no""" +15 45 optimizer """adadelta""" +15 45 training_loop """lcwa""" +15 45 evaluator """rankbased""" +15 46 dataset """kinships""" +15 46 model """complex""" +15 46 loss """crossentropy""" +15 46 regularizer """no""" +15 46 optimizer """adadelta""" +15 46 training_loop """lcwa""" +15 46 evaluator """rankbased""" +15 47 dataset """kinships""" +15 47 model """complex""" +15 47 loss """crossentropy""" +15 47 regularizer """no""" +15 47 optimizer """adadelta""" +15 47 training_loop """lcwa""" +15 47 evaluator """rankbased""" +15 48 dataset """kinships""" +15 48 model """complex""" +15 48 loss """crossentropy""" +15 48 regularizer """no""" +15 48 optimizer """adadelta""" +15 48 training_loop """lcwa""" +15 48 evaluator """rankbased""" +15 49 dataset """kinships""" +15 49 model """complex""" +15 49 loss """crossentropy""" +15 49 regularizer """no""" +15 49 optimizer """adadelta""" +15 49 training_loop """lcwa""" +15 49 evaluator """rankbased""" +15 50 dataset """kinships""" +15 50 model """complex""" +15 50 loss """crossentropy""" +15 50 regularizer """no""" +15 50 optimizer """adadelta""" +15 50 training_loop """lcwa""" +15 50 evaluator """rankbased""" +15 51 dataset """kinships""" +15 51 model """complex""" +15 51 loss """crossentropy""" +15 51 regularizer """no""" +15 51 optimizer """adadelta""" +15 51 training_loop """lcwa""" +15 51 evaluator """rankbased""" +15 52 dataset """kinships""" +15 52 model """complex""" +15 52 loss """crossentropy""" +15 52 regularizer """no""" +15 52 optimizer """adadelta""" +15 52 training_loop """lcwa""" +15 52 evaluator """rankbased""" +15 53 dataset """kinships""" +15 53 model """complex""" +15 53 loss """crossentropy""" +15 53 regularizer """no""" +15 53 optimizer """adadelta""" +15 53 training_loop """lcwa""" +15 53 evaluator """rankbased""" +15 54 dataset """kinships""" +15 54 model """complex""" +15 54 loss """crossentropy""" +15 54 regularizer """no""" +15 54 optimizer """adadelta""" +15 54 training_loop """lcwa""" +15 54 evaluator """rankbased""" +15 55 dataset """kinships""" +15 55 model """complex""" +15 55 loss """crossentropy""" +15 55 regularizer """no""" +15 55 optimizer """adadelta""" +15 55 training_loop """lcwa""" +15 55 evaluator """rankbased""" +15 56 dataset """kinships""" +15 56 model """complex""" +15 56 loss """crossentropy""" +15 56 regularizer """no""" +15 56 optimizer """adadelta""" +15 56 training_loop """lcwa""" +15 56 evaluator """rankbased""" +15 57 dataset """kinships""" +15 57 model """complex""" +15 57 loss """crossentropy""" +15 57 regularizer """no""" +15 57 optimizer """adadelta""" +15 57 training_loop """lcwa""" +15 57 evaluator """rankbased""" +15 58 dataset """kinships""" +15 58 model """complex""" +15 58 loss """crossentropy""" +15 58 regularizer """no""" +15 58 optimizer """adadelta""" +15 58 training_loop """lcwa""" +15 58 evaluator """rankbased""" +15 59 dataset """kinships""" +15 59 model """complex""" +15 59 loss """crossentropy""" +15 59 regularizer """no""" +15 59 optimizer """adadelta""" +15 59 training_loop """lcwa""" +15 59 evaluator """rankbased""" +15 60 dataset """kinships""" +15 60 model """complex""" +15 60 loss """crossentropy""" +15 60 regularizer """no""" +15 60 optimizer """adadelta""" +15 60 training_loop """lcwa""" +15 60 evaluator """rankbased""" +15 61 dataset """kinships""" +15 61 model """complex""" +15 61 loss """crossentropy""" +15 61 regularizer """no""" +15 61 optimizer """adadelta""" +15 61 training_loop """lcwa""" +15 61 evaluator """rankbased""" +15 62 dataset """kinships""" +15 62 model """complex""" +15 62 loss """crossentropy""" +15 62 regularizer """no""" +15 62 optimizer """adadelta""" +15 62 training_loop """lcwa""" +15 62 evaluator """rankbased""" +15 63 dataset """kinships""" +15 63 model """complex""" +15 63 loss """crossentropy""" +15 63 regularizer """no""" +15 63 optimizer """adadelta""" +15 63 training_loop """lcwa""" +15 63 evaluator """rankbased""" +15 64 dataset """kinships""" +15 64 model """complex""" +15 64 loss """crossentropy""" +15 64 regularizer """no""" +15 64 optimizer """adadelta""" +15 64 training_loop """lcwa""" +15 64 evaluator """rankbased""" +15 65 dataset """kinships""" +15 65 model """complex""" +15 65 loss """crossentropy""" +15 65 regularizer """no""" +15 65 optimizer """adadelta""" +15 65 training_loop """lcwa""" +15 65 evaluator """rankbased""" +15 66 dataset """kinships""" +15 66 model """complex""" +15 66 loss """crossentropy""" +15 66 regularizer """no""" +15 66 optimizer """adadelta""" +15 66 training_loop """lcwa""" +15 66 evaluator """rankbased""" +15 67 dataset """kinships""" +15 67 model """complex""" +15 67 loss """crossentropy""" +15 67 regularizer """no""" +15 67 optimizer """adadelta""" +15 67 training_loop """lcwa""" +15 67 evaluator """rankbased""" +15 68 dataset """kinships""" +15 68 model """complex""" +15 68 loss """crossentropy""" +15 68 regularizer """no""" +15 68 optimizer """adadelta""" +15 68 training_loop """lcwa""" +15 68 evaluator """rankbased""" +15 69 dataset """kinships""" +15 69 model """complex""" +15 69 loss """crossentropy""" +15 69 regularizer """no""" +15 69 optimizer """adadelta""" +15 69 training_loop """lcwa""" +15 69 evaluator """rankbased""" +15 70 dataset """kinships""" +15 70 model """complex""" +15 70 loss """crossentropy""" +15 70 regularizer """no""" +15 70 optimizer """adadelta""" +15 70 training_loop """lcwa""" +15 70 evaluator """rankbased""" +15 71 dataset """kinships""" +15 71 model """complex""" +15 71 loss """crossentropy""" +15 71 regularizer """no""" +15 71 optimizer """adadelta""" +15 71 training_loop """lcwa""" +15 71 evaluator """rankbased""" +15 72 dataset """kinships""" +15 72 model """complex""" +15 72 loss """crossentropy""" +15 72 regularizer """no""" +15 72 optimizer """adadelta""" +15 72 training_loop """lcwa""" +15 72 evaluator """rankbased""" +15 73 dataset """kinships""" +15 73 model """complex""" +15 73 loss """crossentropy""" +15 73 regularizer """no""" +15 73 optimizer """adadelta""" +15 73 training_loop """lcwa""" +15 73 evaluator """rankbased""" +15 74 dataset """kinships""" +15 74 model """complex""" +15 74 loss """crossentropy""" +15 74 regularizer """no""" +15 74 optimizer """adadelta""" +15 74 training_loop """lcwa""" +15 74 evaluator """rankbased""" +15 75 dataset """kinships""" +15 75 model """complex""" +15 75 loss """crossentropy""" +15 75 regularizer """no""" +15 75 optimizer """adadelta""" +15 75 training_loop """lcwa""" +15 75 evaluator """rankbased""" +15 76 dataset """kinships""" +15 76 model """complex""" +15 76 loss """crossentropy""" +15 76 regularizer """no""" +15 76 optimizer """adadelta""" +15 76 training_loop """lcwa""" +15 76 evaluator """rankbased""" +15 77 dataset """kinships""" +15 77 model """complex""" +15 77 loss """crossentropy""" +15 77 regularizer """no""" +15 77 optimizer """adadelta""" +15 77 training_loop """lcwa""" +15 77 evaluator """rankbased""" +15 78 dataset """kinships""" +15 78 model """complex""" +15 78 loss """crossentropy""" +15 78 regularizer """no""" +15 78 optimizer """adadelta""" +15 78 training_loop """lcwa""" +15 78 evaluator """rankbased""" +15 79 dataset """kinships""" +15 79 model """complex""" +15 79 loss """crossentropy""" +15 79 regularizer """no""" +15 79 optimizer """adadelta""" +15 79 training_loop """lcwa""" +15 79 evaluator """rankbased""" +15 80 dataset """kinships""" +15 80 model """complex""" +15 80 loss """crossentropy""" +15 80 regularizer """no""" +15 80 optimizer """adadelta""" +15 80 training_loop """lcwa""" +15 80 evaluator """rankbased""" +15 81 dataset """kinships""" +15 81 model """complex""" +15 81 loss """crossentropy""" +15 81 regularizer """no""" +15 81 optimizer """adadelta""" +15 81 training_loop """lcwa""" +15 81 evaluator """rankbased""" +15 82 dataset """kinships""" +15 82 model """complex""" +15 82 loss """crossentropy""" +15 82 regularizer """no""" +15 82 optimizer """adadelta""" +15 82 training_loop """lcwa""" +15 82 evaluator """rankbased""" +15 83 dataset """kinships""" +15 83 model """complex""" +15 83 loss """crossentropy""" +15 83 regularizer """no""" +15 83 optimizer """adadelta""" +15 83 training_loop """lcwa""" +15 83 evaluator """rankbased""" +15 84 dataset """kinships""" +15 84 model """complex""" +15 84 loss """crossentropy""" +15 84 regularizer """no""" +15 84 optimizer """adadelta""" +15 84 training_loop """lcwa""" +15 84 evaluator """rankbased""" +15 85 dataset """kinships""" +15 85 model """complex""" +15 85 loss """crossentropy""" +15 85 regularizer """no""" +15 85 optimizer """adadelta""" +15 85 training_loop """lcwa""" +15 85 evaluator """rankbased""" +15 86 dataset """kinships""" +15 86 model """complex""" +15 86 loss """crossentropy""" +15 86 regularizer """no""" +15 86 optimizer """adadelta""" +15 86 training_loop """lcwa""" +15 86 evaluator """rankbased""" +15 87 dataset """kinships""" +15 87 model """complex""" +15 87 loss """crossentropy""" +15 87 regularizer """no""" +15 87 optimizer """adadelta""" +15 87 training_loop """lcwa""" +15 87 evaluator """rankbased""" +15 88 dataset """kinships""" +15 88 model """complex""" +15 88 loss """crossentropy""" +15 88 regularizer """no""" +15 88 optimizer """adadelta""" +15 88 training_loop """lcwa""" +15 88 evaluator """rankbased""" +15 89 dataset """kinships""" +15 89 model """complex""" +15 89 loss """crossentropy""" +15 89 regularizer """no""" +15 89 optimizer """adadelta""" +15 89 training_loop """lcwa""" +15 89 evaluator """rankbased""" +15 90 dataset """kinships""" +15 90 model """complex""" +15 90 loss """crossentropy""" +15 90 regularizer """no""" +15 90 optimizer """adadelta""" +15 90 training_loop """lcwa""" +15 90 evaluator """rankbased""" +15 91 dataset """kinships""" +15 91 model """complex""" +15 91 loss """crossentropy""" +15 91 regularizer """no""" +15 91 optimizer """adadelta""" +15 91 training_loop """lcwa""" +15 91 evaluator """rankbased""" +15 92 dataset """kinships""" +15 92 model """complex""" +15 92 loss """crossentropy""" +15 92 regularizer """no""" +15 92 optimizer """adadelta""" +15 92 training_loop """lcwa""" +15 92 evaluator """rankbased""" +15 93 dataset """kinships""" +15 93 model """complex""" +15 93 loss """crossentropy""" +15 93 regularizer """no""" +15 93 optimizer """adadelta""" +15 93 training_loop """lcwa""" +15 93 evaluator """rankbased""" +15 94 dataset """kinships""" +15 94 model """complex""" +15 94 loss """crossentropy""" +15 94 regularizer """no""" +15 94 optimizer """adadelta""" +15 94 training_loop """lcwa""" +15 94 evaluator """rankbased""" +15 95 dataset """kinships""" +15 95 model """complex""" +15 95 loss """crossentropy""" +15 95 regularizer """no""" +15 95 optimizer """adadelta""" +15 95 training_loop """lcwa""" +15 95 evaluator """rankbased""" +15 96 dataset """kinships""" +15 96 model """complex""" +15 96 loss """crossentropy""" +15 96 regularizer """no""" +15 96 optimizer """adadelta""" +15 96 training_loop """lcwa""" +15 96 evaluator """rankbased""" +15 97 dataset """kinships""" +15 97 model """complex""" +15 97 loss """crossentropy""" +15 97 regularizer """no""" +15 97 optimizer """adadelta""" +15 97 training_loop """lcwa""" +15 97 evaluator """rankbased""" +15 98 dataset """kinships""" +15 98 model """complex""" +15 98 loss """crossentropy""" +15 98 regularizer """no""" +15 98 optimizer """adadelta""" +15 98 training_loop """lcwa""" +15 98 evaluator """rankbased""" +15 99 dataset """kinships""" +15 99 model """complex""" +15 99 loss """crossentropy""" +15 99 regularizer """no""" +15 99 optimizer """adadelta""" +15 99 training_loop """lcwa""" +15 99 evaluator """rankbased""" +15 100 dataset """kinships""" +15 100 model """complex""" +15 100 loss """crossentropy""" +15 100 regularizer """no""" +15 100 optimizer """adadelta""" +15 100 training_loop """lcwa""" +15 100 evaluator """rankbased""" +16 1 model.embedding_dim 2.0 +16 1 training.batch_size 0.0 +16 1 training.label_smoothing 0.002400511352372244 +16 2 model.embedding_dim 1.0 +16 2 training.batch_size 2.0 +16 2 training.label_smoothing 0.0068992707845054 +16 3 model.embedding_dim 0.0 +16 3 training.batch_size 1.0 +16 3 training.label_smoothing 0.001513742282667123 +16 4 model.embedding_dim 2.0 +16 4 training.batch_size 0.0 +16 4 training.label_smoothing 0.653372753828433 +16 5 model.embedding_dim 1.0 +16 5 training.batch_size 0.0 +16 5 training.label_smoothing 0.021195959171887612 +16 6 model.embedding_dim 2.0 +16 6 training.batch_size 0.0 +16 6 training.label_smoothing 0.12686027402609046 +16 7 model.embedding_dim 0.0 +16 7 training.batch_size 2.0 +16 7 training.label_smoothing 0.005871448096022402 +16 8 model.embedding_dim 2.0 +16 8 training.batch_size 2.0 +16 8 training.label_smoothing 0.0010167691559677214 +16 9 model.embedding_dim 1.0 +16 9 training.batch_size 2.0 +16 9 training.label_smoothing 0.0028264271113568017 +16 10 model.embedding_dim 0.0 +16 10 training.batch_size 0.0 +16 10 training.label_smoothing 0.28187937373587274 +16 11 model.embedding_dim 2.0 +16 11 training.batch_size 1.0 +16 11 training.label_smoothing 0.33706768727096487 +16 12 model.embedding_dim 0.0 +16 12 training.batch_size 0.0 +16 12 training.label_smoothing 0.32002790337132314 +16 13 model.embedding_dim 1.0 +16 13 training.batch_size 1.0 +16 13 training.label_smoothing 0.014810101432624963 +16 14 model.embedding_dim 0.0 +16 14 training.batch_size 0.0 +16 14 training.label_smoothing 0.3084646513028138 +16 15 model.embedding_dim 0.0 +16 15 training.batch_size 0.0 +16 15 training.label_smoothing 0.06851583211285657 +16 16 model.embedding_dim 1.0 +16 16 training.batch_size 1.0 +16 16 training.label_smoothing 0.6000519312694449 +16 17 model.embedding_dim 2.0 +16 17 training.batch_size 1.0 +16 17 training.label_smoothing 0.002991067261847025 +16 18 model.embedding_dim 1.0 +16 18 training.batch_size 1.0 +16 18 training.label_smoothing 0.0016355936903847334 +16 19 model.embedding_dim 2.0 +16 19 training.batch_size 1.0 +16 19 training.label_smoothing 0.6380657046982033 +16 20 model.embedding_dim 2.0 +16 20 training.batch_size 1.0 +16 20 training.label_smoothing 0.8420119938086582 +16 21 model.embedding_dim 0.0 +16 21 training.batch_size 0.0 +16 21 training.label_smoothing 0.0503182007707147 +16 22 model.embedding_dim 2.0 +16 22 training.batch_size 2.0 +16 22 training.label_smoothing 0.001229890990215181 +16 23 model.embedding_dim 1.0 +16 23 training.batch_size 1.0 +16 23 training.label_smoothing 0.0016995917697665208 +16 24 model.embedding_dim 2.0 +16 24 training.batch_size 2.0 +16 24 training.label_smoothing 0.005002983794138055 +16 25 model.embedding_dim 2.0 +16 25 training.batch_size 0.0 +16 25 training.label_smoothing 0.1904182440344881 +16 26 model.embedding_dim 0.0 +16 26 training.batch_size 0.0 +16 26 training.label_smoothing 0.28673604406129277 +16 27 model.embedding_dim 1.0 +16 27 training.batch_size 1.0 +16 27 training.label_smoothing 0.0030072267287926157 +16 28 model.embedding_dim 2.0 +16 28 training.batch_size 2.0 +16 28 training.label_smoothing 0.06520461446536677 +16 29 model.embedding_dim 1.0 +16 29 training.batch_size 2.0 +16 29 training.label_smoothing 0.0037730131526378906 +16 30 model.embedding_dim 0.0 +16 30 training.batch_size 1.0 +16 30 training.label_smoothing 0.04830905886083055 +16 31 model.embedding_dim 0.0 +16 31 training.batch_size 1.0 +16 31 training.label_smoothing 0.0032819209816913266 +16 32 model.embedding_dim 1.0 +16 32 training.batch_size 2.0 +16 32 training.label_smoothing 0.10182916567704149 +16 33 model.embedding_dim 1.0 +16 33 training.batch_size 2.0 +16 33 training.label_smoothing 0.2504085667192289 +16 34 model.embedding_dim 1.0 +16 34 training.batch_size 1.0 +16 34 training.label_smoothing 0.18139343060008853 +16 35 model.embedding_dim 1.0 +16 35 training.batch_size 2.0 +16 35 training.label_smoothing 0.003686270488368773 +16 36 model.embedding_dim 2.0 +16 36 training.batch_size 0.0 +16 36 training.label_smoothing 0.002391611469304204 +16 37 model.embedding_dim 2.0 +16 37 training.batch_size 2.0 +16 37 training.label_smoothing 0.032655769892869474 +16 38 model.embedding_dim 0.0 +16 38 training.batch_size 1.0 +16 38 training.label_smoothing 0.0014703751294350312 +16 39 model.embedding_dim 1.0 +16 39 training.batch_size 1.0 +16 39 training.label_smoothing 0.3426510858109415 +16 40 model.embedding_dim 1.0 +16 40 training.batch_size 2.0 +16 40 training.label_smoothing 0.09398496643774423 +16 41 model.embedding_dim 2.0 +16 41 training.batch_size 0.0 +16 41 training.label_smoothing 0.09354286646877184 +16 42 model.embedding_dim 1.0 +16 42 training.batch_size 1.0 +16 42 training.label_smoothing 0.03557944172622391 +16 43 model.embedding_dim 1.0 +16 43 training.batch_size 0.0 +16 43 training.label_smoothing 0.23053660491917913 +16 44 model.embedding_dim 1.0 +16 44 training.batch_size 0.0 +16 44 training.label_smoothing 0.3722889647726086 +16 45 model.embedding_dim 0.0 +16 45 training.batch_size 0.0 +16 45 training.label_smoothing 0.04345095223410089 +16 46 model.embedding_dim 1.0 +16 46 training.batch_size 1.0 +16 46 training.label_smoothing 0.43000625502825035 +16 47 model.embedding_dim 0.0 +16 47 training.batch_size 1.0 +16 47 training.label_smoothing 0.0174382786062621 +16 48 model.embedding_dim 1.0 +16 48 training.batch_size 1.0 +16 48 training.label_smoothing 0.10877625707397214 +16 49 model.embedding_dim 0.0 +16 49 training.batch_size 0.0 +16 49 training.label_smoothing 0.0017153690419821148 +16 50 model.embedding_dim 1.0 +16 50 training.batch_size 0.0 +16 50 training.label_smoothing 0.004095985164489011 +16 51 model.embedding_dim 1.0 +16 51 training.batch_size 0.0 +16 51 training.label_smoothing 0.006192228798788229 +16 52 model.embedding_dim 1.0 +16 52 training.batch_size 2.0 +16 52 training.label_smoothing 0.010604500189534426 +16 53 model.embedding_dim 1.0 +16 53 training.batch_size 0.0 +16 53 training.label_smoothing 0.018909889752755635 +16 54 model.embedding_dim 1.0 +16 54 training.batch_size 0.0 +16 54 training.label_smoothing 0.0030186391227051516 +16 55 model.embedding_dim 2.0 +16 55 training.batch_size 2.0 +16 55 training.label_smoothing 0.0029880988437480946 +16 56 model.embedding_dim 1.0 +16 56 training.batch_size 0.0 +16 56 training.label_smoothing 0.011165082210682582 +16 57 model.embedding_dim 0.0 +16 57 training.batch_size 0.0 +16 57 training.label_smoothing 0.08383743019555673 +16 58 model.embedding_dim 0.0 +16 58 training.batch_size 1.0 +16 58 training.label_smoothing 0.04623775473176247 +16 59 model.embedding_dim 2.0 +16 59 training.batch_size 1.0 +16 59 training.label_smoothing 0.06082509695026083 +16 60 model.embedding_dim 1.0 +16 60 training.batch_size 0.0 +16 60 training.label_smoothing 0.037043799813809224 +16 61 model.embedding_dim 1.0 +16 61 training.batch_size 2.0 +16 61 training.label_smoothing 0.21196262092213194 +16 62 model.embedding_dim 0.0 +16 62 training.batch_size 2.0 +16 62 training.label_smoothing 0.05311298067032119 +16 63 model.embedding_dim 2.0 +16 63 training.batch_size 0.0 +16 63 training.label_smoothing 0.024335771923046975 +16 64 model.embedding_dim 2.0 +16 64 training.batch_size 0.0 +16 64 training.label_smoothing 0.17194501319775723 +16 65 model.embedding_dim 0.0 +16 65 training.batch_size 0.0 +16 65 training.label_smoothing 0.0036399223070353376 +16 66 model.embedding_dim 2.0 +16 66 training.batch_size 2.0 +16 66 training.label_smoothing 0.42360670199720263 +16 67 model.embedding_dim 0.0 +16 67 training.batch_size 0.0 +16 67 training.label_smoothing 0.13468504475551693 +16 68 model.embedding_dim 0.0 +16 68 training.batch_size 1.0 +16 68 training.label_smoothing 0.011822778043748662 +16 69 model.embedding_dim 2.0 +16 69 training.batch_size 0.0 +16 69 training.label_smoothing 0.5541515520850793 +16 70 model.embedding_dim 1.0 +16 70 training.batch_size 2.0 +16 70 training.label_smoothing 0.013578133416425539 +16 71 model.embedding_dim 0.0 +16 71 training.batch_size 1.0 +16 71 training.label_smoothing 0.846703677245146 +16 72 model.embedding_dim 1.0 +16 72 training.batch_size 2.0 +16 72 training.label_smoothing 0.1953892365357727 +16 73 model.embedding_dim 0.0 +16 73 training.batch_size 1.0 +16 73 training.label_smoothing 0.7595937086899996 +16 74 model.embedding_dim 2.0 +16 74 training.batch_size 1.0 +16 74 training.label_smoothing 0.39544692226764244 +16 75 model.embedding_dim 2.0 +16 75 training.batch_size 0.0 +16 75 training.label_smoothing 0.09335415242382605 +16 76 model.embedding_dim 0.0 +16 76 training.batch_size 0.0 +16 76 training.label_smoothing 0.020139223072598925 +16 77 model.embedding_dim 0.0 +16 77 training.batch_size 1.0 +16 77 training.label_smoothing 0.06911733324136533 +16 78 model.embedding_dim 2.0 +16 78 training.batch_size 0.0 +16 78 training.label_smoothing 0.0023183806099783284 +16 79 model.embedding_dim 1.0 +16 79 training.batch_size 1.0 +16 79 training.label_smoothing 0.20585883104304084 +16 80 model.embedding_dim 1.0 +16 80 training.batch_size 0.0 +16 80 training.label_smoothing 0.03260597355620068 +16 81 model.embedding_dim 2.0 +16 81 training.batch_size 2.0 +16 81 training.label_smoothing 0.8371359017947492 +16 82 model.embedding_dim 2.0 +16 82 training.batch_size 0.0 +16 82 training.label_smoothing 0.0011754090811937937 +16 83 model.embedding_dim 1.0 +16 83 training.batch_size 2.0 +16 83 training.label_smoothing 0.003916702434083931 +16 84 model.embedding_dim 0.0 +16 84 training.batch_size 2.0 +16 84 training.label_smoothing 0.007029082659272503 +16 85 model.embedding_dim 0.0 +16 85 training.batch_size 1.0 +16 85 training.label_smoothing 0.28312358486405836 +16 86 model.embedding_dim 0.0 +16 86 training.batch_size 2.0 +16 86 training.label_smoothing 0.10458681407220544 +16 87 model.embedding_dim 2.0 +16 87 training.batch_size 2.0 +16 87 training.label_smoothing 0.0017009987211621669 +16 88 model.embedding_dim 2.0 +16 88 training.batch_size 1.0 +16 88 training.label_smoothing 0.0012230543052033375 +16 89 model.embedding_dim 1.0 +16 89 training.batch_size 0.0 +16 89 training.label_smoothing 0.1713357823485042 +16 90 model.embedding_dim 2.0 +16 90 training.batch_size 1.0 +16 90 training.label_smoothing 0.0010603081036564019 +16 91 model.embedding_dim 1.0 +16 91 training.batch_size 2.0 +16 91 training.label_smoothing 0.11073743795308783 +16 92 model.embedding_dim 2.0 +16 92 training.batch_size 1.0 +16 92 training.label_smoothing 0.007741167142097221 +16 93 model.embedding_dim 1.0 +16 93 training.batch_size 0.0 +16 93 training.label_smoothing 0.056216357629132456 +16 94 model.embedding_dim 0.0 +16 94 training.batch_size 2.0 +16 94 training.label_smoothing 0.6666214124647467 +16 95 model.embedding_dim 0.0 +16 95 training.batch_size 0.0 +16 95 training.label_smoothing 0.04272276623703268 +16 96 model.embedding_dim 0.0 +16 96 training.batch_size 1.0 +16 96 training.label_smoothing 0.005960650317058781 +16 97 model.embedding_dim 1.0 +16 97 training.batch_size 2.0 +16 97 training.label_smoothing 0.021622709285922144 +16 98 model.embedding_dim 0.0 +16 98 training.batch_size 2.0 +16 98 training.label_smoothing 0.0012101212885041247 +16 99 model.embedding_dim 0.0 +16 99 training.batch_size 0.0 +16 99 training.label_smoothing 0.018076251249212284 +16 100 model.embedding_dim 0.0 +16 100 training.batch_size 1.0 +16 100 training.label_smoothing 0.005283921137970819 +16 1 dataset """kinships""" +16 1 model """complex""" +16 1 loss """bceaftersigmoid""" +16 1 regularizer """no""" +16 1 optimizer """adadelta""" +16 1 training_loop """lcwa""" +16 1 evaluator """rankbased""" +16 2 dataset """kinships""" +16 2 model """complex""" +16 2 loss """bceaftersigmoid""" +16 2 regularizer """no""" +16 2 optimizer """adadelta""" +16 2 training_loop """lcwa""" +16 2 evaluator """rankbased""" +16 3 dataset """kinships""" +16 3 model """complex""" +16 3 loss """bceaftersigmoid""" +16 3 regularizer """no""" +16 3 optimizer """adadelta""" +16 3 training_loop """lcwa""" +16 3 evaluator """rankbased""" +16 4 dataset """kinships""" +16 4 model """complex""" +16 4 loss """bceaftersigmoid""" +16 4 regularizer """no""" +16 4 optimizer """adadelta""" +16 4 training_loop """lcwa""" +16 4 evaluator """rankbased""" +16 5 dataset """kinships""" +16 5 model """complex""" +16 5 loss """bceaftersigmoid""" +16 5 regularizer """no""" +16 5 optimizer """adadelta""" +16 5 training_loop """lcwa""" +16 5 evaluator """rankbased""" +16 6 dataset """kinships""" +16 6 model """complex""" +16 6 loss """bceaftersigmoid""" +16 6 regularizer """no""" +16 6 optimizer """adadelta""" +16 6 training_loop """lcwa""" +16 6 evaluator """rankbased""" +16 7 dataset """kinships""" +16 7 model """complex""" +16 7 loss """bceaftersigmoid""" +16 7 regularizer """no""" +16 7 optimizer """adadelta""" +16 7 training_loop """lcwa""" +16 7 evaluator """rankbased""" +16 8 dataset """kinships""" +16 8 model """complex""" +16 8 loss """bceaftersigmoid""" +16 8 regularizer """no""" +16 8 optimizer """adadelta""" +16 8 training_loop """lcwa""" +16 8 evaluator """rankbased""" +16 9 dataset """kinships""" +16 9 model """complex""" +16 9 loss """bceaftersigmoid""" +16 9 regularizer """no""" +16 9 optimizer """adadelta""" +16 9 training_loop """lcwa""" +16 9 evaluator """rankbased""" +16 10 dataset """kinships""" +16 10 model """complex""" +16 10 loss """bceaftersigmoid""" +16 10 regularizer """no""" +16 10 optimizer """adadelta""" +16 10 training_loop """lcwa""" +16 10 evaluator """rankbased""" +16 11 dataset """kinships""" +16 11 model """complex""" +16 11 loss """bceaftersigmoid""" +16 11 regularizer """no""" +16 11 optimizer """adadelta""" +16 11 training_loop """lcwa""" +16 11 evaluator """rankbased""" +16 12 dataset """kinships""" +16 12 model """complex""" +16 12 loss """bceaftersigmoid""" +16 12 regularizer """no""" +16 12 optimizer """adadelta""" +16 12 training_loop """lcwa""" +16 12 evaluator """rankbased""" +16 13 dataset """kinships""" +16 13 model """complex""" +16 13 loss """bceaftersigmoid""" +16 13 regularizer """no""" +16 13 optimizer """adadelta""" +16 13 training_loop """lcwa""" +16 13 evaluator """rankbased""" +16 14 dataset """kinships""" +16 14 model """complex""" +16 14 loss """bceaftersigmoid""" +16 14 regularizer """no""" +16 14 optimizer """adadelta""" +16 14 training_loop """lcwa""" +16 14 evaluator """rankbased""" +16 15 dataset """kinships""" +16 15 model """complex""" +16 15 loss """bceaftersigmoid""" +16 15 regularizer """no""" +16 15 optimizer """adadelta""" +16 15 training_loop """lcwa""" +16 15 evaluator """rankbased""" +16 16 dataset """kinships""" +16 16 model """complex""" +16 16 loss """bceaftersigmoid""" +16 16 regularizer """no""" +16 16 optimizer """adadelta""" +16 16 training_loop """lcwa""" +16 16 evaluator """rankbased""" +16 17 dataset """kinships""" +16 17 model """complex""" +16 17 loss """bceaftersigmoid""" +16 17 regularizer """no""" +16 17 optimizer """adadelta""" +16 17 training_loop """lcwa""" +16 17 evaluator """rankbased""" +16 18 dataset """kinships""" +16 18 model """complex""" +16 18 loss """bceaftersigmoid""" +16 18 regularizer """no""" +16 18 optimizer """adadelta""" +16 18 training_loop """lcwa""" +16 18 evaluator """rankbased""" +16 19 dataset """kinships""" +16 19 model """complex""" +16 19 loss """bceaftersigmoid""" +16 19 regularizer """no""" +16 19 optimizer """adadelta""" +16 19 training_loop """lcwa""" +16 19 evaluator """rankbased""" +16 20 dataset """kinships""" +16 20 model """complex""" +16 20 loss """bceaftersigmoid""" +16 20 regularizer """no""" +16 20 optimizer """adadelta""" +16 20 training_loop """lcwa""" +16 20 evaluator """rankbased""" +16 21 dataset """kinships""" +16 21 model """complex""" +16 21 loss """bceaftersigmoid""" +16 21 regularizer """no""" +16 21 optimizer """adadelta""" +16 21 training_loop """lcwa""" +16 21 evaluator """rankbased""" +16 22 dataset """kinships""" +16 22 model """complex""" +16 22 loss """bceaftersigmoid""" +16 22 regularizer """no""" +16 22 optimizer """adadelta""" +16 22 training_loop """lcwa""" +16 22 evaluator """rankbased""" +16 23 dataset """kinships""" +16 23 model """complex""" +16 23 loss """bceaftersigmoid""" +16 23 regularizer """no""" +16 23 optimizer """adadelta""" +16 23 training_loop """lcwa""" +16 23 evaluator """rankbased""" +16 24 dataset """kinships""" +16 24 model """complex""" +16 24 loss """bceaftersigmoid""" +16 24 regularizer """no""" +16 24 optimizer """adadelta""" +16 24 training_loop """lcwa""" +16 24 evaluator """rankbased""" +16 25 dataset """kinships""" +16 25 model """complex""" +16 25 loss """bceaftersigmoid""" +16 25 regularizer """no""" +16 25 optimizer """adadelta""" +16 25 training_loop """lcwa""" +16 25 evaluator """rankbased""" +16 26 dataset """kinships""" +16 26 model """complex""" +16 26 loss """bceaftersigmoid""" +16 26 regularizer """no""" +16 26 optimizer """adadelta""" +16 26 training_loop """lcwa""" +16 26 evaluator """rankbased""" +16 27 dataset """kinships""" +16 27 model """complex""" +16 27 loss """bceaftersigmoid""" +16 27 regularizer """no""" +16 27 optimizer """adadelta""" +16 27 training_loop """lcwa""" +16 27 evaluator """rankbased""" +16 28 dataset """kinships""" +16 28 model """complex""" +16 28 loss """bceaftersigmoid""" +16 28 regularizer """no""" +16 28 optimizer """adadelta""" +16 28 training_loop """lcwa""" +16 28 evaluator """rankbased""" +16 29 dataset """kinships""" +16 29 model """complex""" +16 29 loss """bceaftersigmoid""" +16 29 regularizer """no""" +16 29 optimizer """adadelta""" +16 29 training_loop """lcwa""" +16 29 evaluator """rankbased""" +16 30 dataset """kinships""" +16 30 model """complex""" +16 30 loss """bceaftersigmoid""" +16 30 regularizer """no""" +16 30 optimizer """adadelta""" +16 30 training_loop """lcwa""" +16 30 evaluator """rankbased""" +16 31 dataset """kinships""" +16 31 model """complex""" +16 31 loss """bceaftersigmoid""" +16 31 regularizer """no""" +16 31 optimizer """adadelta""" +16 31 training_loop """lcwa""" +16 31 evaluator """rankbased""" +16 32 dataset """kinships""" +16 32 model """complex""" +16 32 loss """bceaftersigmoid""" +16 32 regularizer """no""" +16 32 optimizer """adadelta""" +16 32 training_loop """lcwa""" +16 32 evaluator """rankbased""" +16 33 dataset """kinships""" +16 33 model """complex""" +16 33 loss """bceaftersigmoid""" +16 33 regularizer """no""" +16 33 optimizer """adadelta""" +16 33 training_loop """lcwa""" +16 33 evaluator """rankbased""" +16 34 dataset """kinships""" +16 34 model """complex""" +16 34 loss """bceaftersigmoid""" +16 34 regularizer """no""" +16 34 optimizer """adadelta""" +16 34 training_loop """lcwa""" +16 34 evaluator """rankbased""" +16 35 dataset """kinships""" +16 35 model """complex""" +16 35 loss """bceaftersigmoid""" +16 35 regularizer """no""" +16 35 optimizer """adadelta""" +16 35 training_loop """lcwa""" +16 35 evaluator """rankbased""" +16 36 dataset """kinships""" +16 36 model """complex""" +16 36 loss """bceaftersigmoid""" +16 36 regularizer """no""" +16 36 optimizer """adadelta""" +16 36 training_loop """lcwa""" +16 36 evaluator """rankbased""" +16 37 dataset """kinships""" +16 37 model """complex""" +16 37 loss """bceaftersigmoid""" +16 37 regularizer """no""" +16 37 optimizer """adadelta""" +16 37 training_loop """lcwa""" +16 37 evaluator """rankbased""" +16 38 dataset """kinships""" +16 38 model """complex""" +16 38 loss """bceaftersigmoid""" +16 38 regularizer """no""" +16 38 optimizer """adadelta""" +16 38 training_loop """lcwa""" +16 38 evaluator """rankbased""" +16 39 dataset """kinships""" +16 39 model """complex""" +16 39 loss """bceaftersigmoid""" +16 39 regularizer """no""" +16 39 optimizer """adadelta""" +16 39 training_loop """lcwa""" +16 39 evaluator """rankbased""" +16 40 dataset """kinships""" +16 40 model """complex""" +16 40 loss """bceaftersigmoid""" +16 40 regularizer """no""" +16 40 optimizer """adadelta""" +16 40 training_loop """lcwa""" +16 40 evaluator """rankbased""" +16 41 dataset """kinships""" +16 41 model """complex""" +16 41 loss """bceaftersigmoid""" +16 41 regularizer """no""" +16 41 optimizer """adadelta""" +16 41 training_loop """lcwa""" +16 41 evaluator """rankbased""" +16 42 dataset """kinships""" +16 42 model """complex""" +16 42 loss """bceaftersigmoid""" +16 42 regularizer """no""" +16 42 optimizer """adadelta""" +16 42 training_loop """lcwa""" +16 42 evaluator """rankbased""" +16 43 dataset """kinships""" +16 43 model """complex""" +16 43 loss """bceaftersigmoid""" +16 43 regularizer """no""" +16 43 optimizer """adadelta""" +16 43 training_loop """lcwa""" +16 43 evaluator """rankbased""" +16 44 dataset """kinships""" +16 44 model """complex""" +16 44 loss """bceaftersigmoid""" +16 44 regularizer """no""" +16 44 optimizer """adadelta""" +16 44 training_loop """lcwa""" +16 44 evaluator """rankbased""" +16 45 dataset """kinships""" +16 45 model """complex""" +16 45 loss """bceaftersigmoid""" +16 45 regularizer """no""" +16 45 optimizer """adadelta""" +16 45 training_loop """lcwa""" +16 45 evaluator """rankbased""" +16 46 dataset """kinships""" +16 46 model """complex""" +16 46 loss """bceaftersigmoid""" +16 46 regularizer """no""" +16 46 optimizer """adadelta""" +16 46 training_loop """lcwa""" +16 46 evaluator """rankbased""" +16 47 dataset """kinships""" +16 47 model """complex""" +16 47 loss """bceaftersigmoid""" +16 47 regularizer """no""" +16 47 optimizer """adadelta""" +16 47 training_loop """lcwa""" +16 47 evaluator """rankbased""" +16 48 dataset """kinships""" +16 48 model """complex""" +16 48 loss """bceaftersigmoid""" +16 48 regularizer """no""" +16 48 optimizer """adadelta""" +16 48 training_loop """lcwa""" +16 48 evaluator """rankbased""" +16 49 dataset """kinships""" +16 49 model """complex""" +16 49 loss """bceaftersigmoid""" +16 49 regularizer """no""" +16 49 optimizer """adadelta""" +16 49 training_loop """lcwa""" +16 49 evaluator """rankbased""" +16 50 dataset """kinships""" +16 50 model """complex""" +16 50 loss """bceaftersigmoid""" +16 50 regularizer """no""" +16 50 optimizer """adadelta""" +16 50 training_loop """lcwa""" +16 50 evaluator """rankbased""" +16 51 dataset """kinships""" +16 51 model """complex""" +16 51 loss """bceaftersigmoid""" +16 51 regularizer """no""" +16 51 optimizer """adadelta""" +16 51 training_loop """lcwa""" +16 51 evaluator """rankbased""" +16 52 dataset """kinships""" +16 52 model """complex""" +16 52 loss """bceaftersigmoid""" +16 52 regularizer """no""" +16 52 optimizer """adadelta""" +16 52 training_loop """lcwa""" +16 52 evaluator """rankbased""" +16 53 dataset """kinships""" +16 53 model """complex""" +16 53 loss """bceaftersigmoid""" +16 53 regularizer """no""" +16 53 optimizer """adadelta""" +16 53 training_loop """lcwa""" +16 53 evaluator """rankbased""" +16 54 dataset """kinships""" +16 54 model """complex""" +16 54 loss """bceaftersigmoid""" +16 54 regularizer """no""" +16 54 optimizer """adadelta""" +16 54 training_loop """lcwa""" +16 54 evaluator """rankbased""" +16 55 dataset """kinships""" +16 55 model """complex""" +16 55 loss """bceaftersigmoid""" +16 55 regularizer """no""" +16 55 optimizer """adadelta""" +16 55 training_loop """lcwa""" +16 55 evaluator """rankbased""" +16 56 dataset """kinships""" +16 56 model """complex""" +16 56 loss """bceaftersigmoid""" +16 56 regularizer """no""" +16 56 optimizer """adadelta""" +16 56 training_loop """lcwa""" +16 56 evaluator """rankbased""" +16 57 dataset """kinships""" +16 57 model """complex""" +16 57 loss """bceaftersigmoid""" +16 57 regularizer """no""" +16 57 optimizer """adadelta""" +16 57 training_loop """lcwa""" +16 57 evaluator """rankbased""" +16 58 dataset """kinships""" +16 58 model """complex""" +16 58 loss """bceaftersigmoid""" +16 58 regularizer """no""" +16 58 optimizer """adadelta""" +16 58 training_loop """lcwa""" +16 58 evaluator """rankbased""" +16 59 dataset """kinships""" +16 59 model """complex""" +16 59 loss """bceaftersigmoid""" +16 59 regularizer """no""" +16 59 optimizer """adadelta""" +16 59 training_loop """lcwa""" +16 59 evaluator """rankbased""" +16 60 dataset """kinships""" +16 60 model """complex""" +16 60 loss """bceaftersigmoid""" +16 60 regularizer """no""" +16 60 optimizer """adadelta""" +16 60 training_loop """lcwa""" +16 60 evaluator """rankbased""" +16 61 dataset """kinships""" +16 61 model """complex""" +16 61 loss """bceaftersigmoid""" +16 61 regularizer """no""" +16 61 optimizer """adadelta""" +16 61 training_loop """lcwa""" +16 61 evaluator """rankbased""" +16 62 dataset """kinships""" +16 62 model """complex""" +16 62 loss """bceaftersigmoid""" +16 62 regularizer """no""" +16 62 optimizer """adadelta""" +16 62 training_loop """lcwa""" +16 62 evaluator """rankbased""" +16 63 dataset """kinships""" +16 63 model """complex""" +16 63 loss """bceaftersigmoid""" +16 63 regularizer """no""" +16 63 optimizer """adadelta""" +16 63 training_loop """lcwa""" +16 63 evaluator """rankbased""" +16 64 dataset """kinships""" +16 64 model """complex""" +16 64 loss """bceaftersigmoid""" +16 64 regularizer """no""" +16 64 optimizer """adadelta""" +16 64 training_loop """lcwa""" +16 64 evaluator """rankbased""" +16 65 dataset """kinships""" +16 65 model """complex""" +16 65 loss """bceaftersigmoid""" +16 65 regularizer """no""" +16 65 optimizer """adadelta""" +16 65 training_loop """lcwa""" +16 65 evaluator """rankbased""" +16 66 dataset """kinships""" +16 66 model """complex""" +16 66 loss """bceaftersigmoid""" +16 66 regularizer """no""" +16 66 optimizer """adadelta""" +16 66 training_loop """lcwa""" +16 66 evaluator """rankbased""" +16 67 dataset """kinships""" +16 67 model """complex""" +16 67 loss """bceaftersigmoid""" +16 67 regularizer """no""" +16 67 optimizer """adadelta""" +16 67 training_loop """lcwa""" +16 67 evaluator """rankbased""" +16 68 dataset """kinships""" +16 68 model """complex""" +16 68 loss """bceaftersigmoid""" +16 68 regularizer """no""" +16 68 optimizer """adadelta""" +16 68 training_loop """lcwa""" +16 68 evaluator """rankbased""" +16 69 dataset """kinships""" +16 69 model """complex""" +16 69 loss """bceaftersigmoid""" +16 69 regularizer """no""" +16 69 optimizer """adadelta""" +16 69 training_loop """lcwa""" +16 69 evaluator """rankbased""" +16 70 dataset """kinships""" +16 70 model """complex""" +16 70 loss """bceaftersigmoid""" +16 70 regularizer """no""" +16 70 optimizer """adadelta""" +16 70 training_loop """lcwa""" +16 70 evaluator """rankbased""" +16 71 dataset """kinships""" +16 71 model """complex""" +16 71 loss """bceaftersigmoid""" +16 71 regularizer """no""" +16 71 optimizer """adadelta""" +16 71 training_loop """lcwa""" +16 71 evaluator """rankbased""" +16 72 dataset """kinships""" +16 72 model """complex""" +16 72 loss """bceaftersigmoid""" +16 72 regularizer """no""" +16 72 optimizer """adadelta""" +16 72 training_loop """lcwa""" +16 72 evaluator """rankbased""" +16 73 dataset """kinships""" +16 73 model """complex""" +16 73 loss """bceaftersigmoid""" +16 73 regularizer """no""" +16 73 optimizer """adadelta""" +16 73 training_loop """lcwa""" +16 73 evaluator """rankbased""" +16 74 dataset """kinships""" +16 74 model """complex""" +16 74 loss """bceaftersigmoid""" +16 74 regularizer """no""" +16 74 optimizer """adadelta""" +16 74 training_loop """lcwa""" +16 74 evaluator """rankbased""" +16 75 dataset """kinships""" +16 75 model """complex""" +16 75 loss """bceaftersigmoid""" +16 75 regularizer """no""" +16 75 optimizer """adadelta""" +16 75 training_loop """lcwa""" +16 75 evaluator """rankbased""" +16 76 dataset """kinships""" +16 76 model """complex""" +16 76 loss """bceaftersigmoid""" +16 76 regularizer """no""" +16 76 optimizer """adadelta""" +16 76 training_loop """lcwa""" +16 76 evaluator """rankbased""" +16 77 dataset """kinships""" +16 77 model """complex""" +16 77 loss """bceaftersigmoid""" +16 77 regularizer """no""" +16 77 optimizer """adadelta""" +16 77 training_loop """lcwa""" +16 77 evaluator """rankbased""" +16 78 dataset """kinships""" +16 78 model """complex""" +16 78 loss """bceaftersigmoid""" +16 78 regularizer """no""" +16 78 optimizer """adadelta""" +16 78 training_loop """lcwa""" +16 78 evaluator """rankbased""" +16 79 dataset """kinships""" +16 79 model """complex""" +16 79 loss """bceaftersigmoid""" +16 79 regularizer """no""" +16 79 optimizer """adadelta""" +16 79 training_loop """lcwa""" +16 79 evaluator """rankbased""" +16 80 dataset """kinships""" +16 80 model """complex""" +16 80 loss """bceaftersigmoid""" +16 80 regularizer """no""" +16 80 optimizer """adadelta""" +16 80 training_loop """lcwa""" +16 80 evaluator """rankbased""" +16 81 dataset """kinships""" +16 81 model """complex""" +16 81 loss """bceaftersigmoid""" +16 81 regularizer """no""" +16 81 optimizer """adadelta""" +16 81 training_loop """lcwa""" +16 81 evaluator """rankbased""" +16 82 dataset """kinships""" +16 82 model """complex""" +16 82 loss """bceaftersigmoid""" +16 82 regularizer """no""" +16 82 optimizer """adadelta""" +16 82 training_loop """lcwa""" +16 82 evaluator """rankbased""" +16 83 dataset """kinships""" +16 83 model """complex""" +16 83 loss """bceaftersigmoid""" +16 83 regularizer """no""" +16 83 optimizer """adadelta""" +16 83 training_loop """lcwa""" +16 83 evaluator """rankbased""" +16 84 dataset """kinships""" +16 84 model """complex""" +16 84 loss """bceaftersigmoid""" +16 84 regularizer """no""" +16 84 optimizer """adadelta""" +16 84 training_loop """lcwa""" +16 84 evaluator """rankbased""" +16 85 dataset """kinships""" +16 85 model """complex""" +16 85 loss """bceaftersigmoid""" +16 85 regularizer """no""" +16 85 optimizer """adadelta""" +16 85 training_loop """lcwa""" +16 85 evaluator """rankbased""" +16 86 dataset """kinships""" +16 86 model """complex""" +16 86 loss """bceaftersigmoid""" +16 86 regularizer """no""" +16 86 optimizer """adadelta""" +16 86 training_loop """lcwa""" +16 86 evaluator """rankbased""" +16 87 dataset """kinships""" +16 87 model """complex""" +16 87 loss """bceaftersigmoid""" +16 87 regularizer """no""" +16 87 optimizer """adadelta""" +16 87 training_loop """lcwa""" +16 87 evaluator """rankbased""" +16 88 dataset """kinships""" +16 88 model """complex""" +16 88 loss """bceaftersigmoid""" +16 88 regularizer """no""" +16 88 optimizer """adadelta""" +16 88 training_loop """lcwa""" +16 88 evaluator """rankbased""" +16 89 dataset """kinships""" +16 89 model """complex""" +16 89 loss """bceaftersigmoid""" +16 89 regularizer """no""" +16 89 optimizer """adadelta""" +16 89 training_loop """lcwa""" +16 89 evaluator """rankbased""" +16 90 dataset """kinships""" +16 90 model """complex""" +16 90 loss """bceaftersigmoid""" +16 90 regularizer """no""" +16 90 optimizer """adadelta""" +16 90 training_loop """lcwa""" +16 90 evaluator """rankbased""" +16 91 dataset """kinships""" +16 91 model """complex""" +16 91 loss """bceaftersigmoid""" +16 91 regularizer """no""" +16 91 optimizer """adadelta""" +16 91 training_loop """lcwa""" +16 91 evaluator """rankbased""" +16 92 dataset """kinships""" +16 92 model """complex""" +16 92 loss """bceaftersigmoid""" +16 92 regularizer """no""" +16 92 optimizer """adadelta""" +16 92 training_loop """lcwa""" +16 92 evaluator """rankbased""" +16 93 dataset """kinships""" +16 93 model """complex""" +16 93 loss """bceaftersigmoid""" +16 93 regularizer """no""" +16 93 optimizer """adadelta""" +16 93 training_loop """lcwa""" +16 93 evaluator """rankbased""" +16 94 dataset """kinships""" +16 94 model """complex""" +16 94 loss """bceaftersigmoid""" +16 94 regularizer """no""" +16 94 optimizer """adadelta""" +16 94 training_loop """lcwa""" +16 94 evaluator """rankbased""" +16 95 dataset """kinships""" +16 95 model """complex""" +16 95 loss """bceaftersigmoid""" +16 95 regularizer """no""" +16 95 optimizer """adadelta""" +16 95 training_loop """lcwa""" +16 95 evaluator """rankbased""" +16 96 dataset """kinships""" +16 96 model """complex""" +16 96 loss """bceaftersigmoid""" +16 96 regularizer """no""" +16 96 optimizer """adadelta""" +16 96 training_loop """lcwa""" +16 96 evaluator """rankbased""" +16 97 dataset """kinships""" +16 97 model """complex""" +16 97 loss """bceaftersigmoid""" +16 97 regularizer """no""" +16 97 optimizer """adadelta""" +16 97 training_loop """lcwa""" +16 97 evaluator """rankbased""" +16 98 dataset """kinships""" +16 98 model """complex""" +16 98 loss """bceaftersigmoid""" +16 98 regularizer """no""" +16 98 optimizer """adadelta""" +16 98 training_loop """lcwa""" +16 98 evaluator """rankbased""" +16 99 dataset """kinships""" +16 99 model """complex""" +16 99 loss """bceaftersigmoid""" +16 99 regularizer """no""" +16 99 optimizer """adadelta""" +16 99 training_loop """lcwa""" +16 99 evaluator """rankbased""" +16 100 dataset """kinships""" +16 100 model """complex""" +16 100 loss """bceaftersigmoid""" +16 100 regularizer """no""" +16 100 optimizer """adadelta""" +16 100 training_loop """lcwa""" +16 100 evaluator """rankbased""" +17 1 model.embedding_dim 1.0 +17 1 training.batch_size 0.0 +17 1 training.label_smoothing 0.12337989418178351 +17 2 model.embedding_dim 1.0 +17 2 training.batch_size 1.0 +17 2 training.label_smoothing 0.07053481128242398 +17 3 model.embedding_dim 2.0 +17 3 training.batch_size 2.0 +17 3 training.label_smoothing 0.021172571403733385 +17 4 model.embedding_dim 1.0 +17 4 training.batch_size 1.0 +17 4 training.label_smoothing 0.0027267503852704605 +17 5 model.embedding_dim 2.0 +17 5 training.batch_size 1.0 +17 5 training.label_smoothing 0.004092451206479156 +17 6 model.embedding_dim 1.0 +17 6 training.batch_size 1.0 +17 6 training.label_smoothing 0.13883346788323553 +17 7 model.embedding_dim 1.0 +17 7 training.batch_size 1.0 +17 7 training.label_smoothing 0.5964343781142689 +17 8 model.embedding_dim 0.0 +17 8 training.batch_size 1.0 +17 8 training.label_smoothing 0.009210547432222547 +17 9 model.embedding_dim 1.0 +17 9 training.batch_size 1.0 +17 9 training.label_smoothing 0.00905201676126337 +17 10 model.embedding_dim 2.0 +17 10 training.batch_size 0.0 +17 10 training.label_smoothing 0.031926737225077044 +17 11 model.embedding_dim 0.0 +17 11 training.batch_size 1.0 +17 11 training.label_smoothing 0.0074038852852184815 +17 12 model.embedding_dim 1.0 +17 12 training.batch_size 0.0 +17 12 training.label_smoothing 0.10125015507069215 +17 13 model.embedding_dim 2.0 +17 13 training.batch_size 2.0 +17 13 training.label_smoothing 0.17280569159568304 +17 14 model.embedding_dim 1.0 +17 14 training.batch_size 1.0 +17 14 training.label_smoothing 0.23576780621736088 +17 15 model.embedding_dim 0.0 +17 15 training.batch_size 2.0 +17 15 training.label_smoothing 0.30057340215107586 +17 16 model.embedding_dim 2.0 +17 16 training.batch_size 2.0 +17 16 training.label_smoothing 0.010086475642141974 +17 17 model.embedding_dim 0.0 +17 17 training.batch_size 0.0 +17 17 training.label_smoothing 0.11237994069678162 +17 18 model.embedding_dim 1.0 +17 18 training.batch_size 2.0 +17 18 training.label_smoothing 0.5488215482125532 +17 19 model.embedding_dim 2.0 +17 19 training.batch_size 1.0 +17 19 training.label_smoothing 0.16498576631632048 +17 20 model.embedding_dim 1.0 +17 20 training.batch_size 0.0 +17 20 training.label_smoothing 0.5779306039878113 +17 21 model.embedding_dim 0.0 +17 21 training.batch_size 2.0 +17 21 training.label_smoothing 0.0738219075230116 +17 22 model.embedding_dim 0.0 +17 22 training.batch_size 1.0 +17 22 training.label_smoothing 0.027865624359850967 +17 23 model.embedding_dim 2.0 +17 23 training.batch_size 2.0 +17 23 training.label_smoothing 0.6630762798794455 +17 24 model.embedding_dim 0.0 +17 24 training.batch_size 2.0 +17 24 training.label_smoothing 0.35840760900852253 +17 25 model.embedding_dim 2.0 +17 25 training.batch_size 2.0 +17 25 training.label_smoothing 0.009490318617406054 +17 26 model.embedding_dim 0.0 +17 26 training.batch_size 2.0 +17 26 training.label_smoothing 0.18268177475812025 +17 27 model.embedding_dim 0.0 +17 27 training.batch_size 0.0 +17 27 training.label_smoothing 0.45781199118187843 +17 28 model.embedding_dim 2.0 +17 28 training.batch_size 1.0 +17 28 training.label_smoothing 0.008190094181897126 +17 29 model.embedding_dim 1.0 +17 29 training.batch_size 0.0 +17 29 training.label_smoothing 0.15273719257314702 +17 30 model.embedding_dim 2.0 +17 30 training.batch_size 1.0 +17 30 training.label_smoothing 0.05763343405165223 +17 31 model.embedding_dim 1.0 +17 31 training.batch_size 2.0 +17 31 training.label_smoothing 0.024325091490386173 +17 32 model.embedding_dim 2.0 +17 32 training.batch_size 1.0 +17 32 training.label_smoothing 0.33150466606705653 +17 33 model.embedding_dim 1.0 +17 33 training.batch_size 2.0 +17 33 training.label_smoothing 0.17011780027093915 +17 34 model.embedding_dim 2.0 +17 34 training.batch_size 1.0 +17 34 training.label_smoothing 0.005577101972723982 +17 35 model.embedding_dim 2.0 +17 35 training.batch_size 1.0 +17 35 training.label_smoothing 0.009153078544904722 +17 36 model.embedding_dim 0.0 +17 36 training.batch_size 0.0 +17 36 training.label_smoothing 0.0018848373707225877 +17 37 model.embedding_dim 0.0 +17 37 training.batch_size 0.0 +17 37 training.label_smoothing 0.08333711914452155 +17 38 model.embedding_dim 2.0 +17 38 training.batch_size 1.0 +17 38 training.label_smoothing 0.012621541017137784 +17 39 model.embedding_dim 0.0 +17 39 training.batch_size 2.0 +17 39 training.label_smoothing 0.284015334309903 +17 40 model.embedding_dim 1.0 +17 40 training.batch_size 1.0 +17 40 training.label_smoothing 0.0031630827179823166 +17 41 model.embedding_dim 1.0 +17 41 training.batch_size 2.0 +17 41 training.label_smoothing 0.0035403816860431437 +17 42 model.embedding_dim 2.0 +17 42 training.batch_size 1.0 +17 42 training.label_smoothing 0.008863146432064884 +17 43 model.embedding_dim 1.0 +17 43 training.batch_size 0.0 +17 43 training.label_smoothing 0.03855251002405843 +17 44 model.embedding_dim 1.0 +17 44 training.batch_size 1.0 +17 44 training.label_smoothing 0.015567797571533313 +17 45 model.embedding_dim 2.0 +17 45 training.batch_size 1.0 +17 45 training.label_smoothing 0.011014657893891649 +17 46 model.embedding_dim 0.0 +17 46 training.batch_size 0.0 +17 46 training.label_smoothing 0.018025743969466258 +17 47 model.embedding_dim 2.0 +17 47 training.batch_size 0.0 +17 47 training.label_smoothing 0.4212846054676614 +17 48 model.embedding_dim 1.0 +17 48 training.batch_size 1.0 +17 48 training.label_smoothing 0.546418815915322 +17 49 model.embedding_dim 1.0 +17 49 training.batch_size 0.0 +17 49 training.label_smoothing 0.010548452960255571 +17 50 model.embedding_dim 1.0 +17 50 training.batch_size 1.0 +17 50 training.label_smoothing 0.007651546809820981 +17 51 model.embedding_dim 2.0 +17 51 training.batch_size 1.0 +17 51 training.label_smoothing 0.0019913792891792796 +17 52 model.embedding_dim 2.0 +17 52 training.batch_size 0.0 +17 52 training.label_smoothing 0.0014215679556847409 +17 53 model.embedding_dim 1.0 +17 53 training.batch_size 0.0 +17 53 training.label_smoothing 0.004363690279386796 +17 54 model.embedding_dim 1.0 +17 54 training.batch_size 2.0 +17 54 training.label_smoothing 0.027217834146816042 +17 55 model.embedding_dim 0.0 +17 55 training.batch_size 2.0 +17 55 training.label_smoothing 0.2783716772720877 +17 56 model.embedding_dim 0.0 +17 56 training.batch_size 2.0 +17 56 training.label_smoothing 0.10938023217473503 +17 57 model.embedding_dim 0.0 +17 57 training.batch_size 1.0 +17 57 training.label_smoothing 0.07836897049816598 +17 58 model.embedding_dim 0.0 +17 58 training.batch_size 0.0 +17 58 training.label_smoothing 0.01071221511964518 +17 59 model.embedding_dim 1.0 +17 59 training.batch_size 2.0 +17 59 training.label_smoothing 0.0010376101679740546 +17 60 model.embedding_dim 1.0 +17 60 training.batch_size 0.0 +17 60 training.label_smoothing 0.0033930080194255006 +17 61 model.embedding_dim 2.0 +17 61 training.batch_size 0.0 +17 61 training.label_smoothing 0.40405840404494986 +17 62 model.embedding_dim 1.0 +17 62 training.batch_size 0.0 +17 62 training.label_smoothing 0.007500863454869131 +17 63 model.embedding_dim 2.0 +17 63 training.batch_size 0.0 +17 63 training.label_smoothing 0.0020213739655368782 +17 64 model.embedding_dim 1.0 +17 64 training.batch_size 0.0 +17 64 training.label_smoothing 0.007732276481681257 +17 65 model.embedding_dim 2.0 +17 65 training.batch_size 1.0 +17 65 training.label_smoothing 0.001189907252104024 +17 66 model.embedding_dim 0.0 +17 66 training.batch_size 0.0 +17 66 training.label_smoothing 0.0015661635614469091 +17 67 model.embedding_dim 0.0 +17 67 training.batch_size 1.0 +17 67 training.label_smoothing 0.03852394743814069 +17 68 model.embedding_dim 1.0 +17 68 training.batch_size 0.0 +17 68 training.label_smoothing 0.0010777922537194907 +17 69 model.embedding_dim 2.0 +17 69 training.batch_size 0.0 +17 69 training.label_smoothing 0.11407501963114512 +17 70 model.embedding_dim 1.0 +17 70 training.batch_size 0.0 +17 70 training.label_smoothing 0.0025511592741736546 +17 71 model.embedding_dim 1.0 +17 71 training.batch_size 1.0 +17 71 training.label_smoothing 0.002804874869114709 +17 72 model.embedding_dim 1.0 +17 72 training.batch_size 1.0 +17 72 training.label_smoothing 0.0010375102913473709 +17 73 model.embedding_dim 2.0 +17 73 training.batch_size 0.0 +17 73 training.label_smoothing 0.21092648711278925 +17 74 model.embedding_dim 2.0 +17 74 training.batch_size 1.0 +17 74 training.label_smoothing 0.0030198551723052826 +17 75 model.embedding_dim 1.0 +17 75 training.batch_size 2.0 +17 75 training.label_smoothing 0.00125064950107502 +17 76 model.embedding_dim 1.0 +17 76 training.batch_size 0.0 +17 76 training.label_smoothing 0.11845742378813268 +17 77 model.embedding_dim 2.0 +17 77 training.batch_size 2.0 +17 77 training.label_smoothing 0.002073902428939473 +17 78 model.embedding_dim 1.0 +17 78 training.batch_size 1.0 +17 78 training.label_smoothing 0.0185785096034195 +17 79 model.embedding_dim 1.0 +17 79 training.batch_size 1.0 +17 79 training.label_smoothing 0.045749327424820865 +17 80 model.embedding_dim 0.0 +17 80 training.batch_size 1.0 +17 80 training.label_smoothing 0.0021607098035334752 +17 81 model.embedding_dim 1.0 +17 81 training.batch_size 2.0 +17 81 training.label_smoothing 0.016430167893865477 +17 82 model.embedding_dim 0.0 +17 82 training.batch_size 0.0 +17 82 training.label_smoothing 0.08556318649055852 +17 83 model.embedding_dim 2.0 +17 83 training.batch_size 0.0 +17 83 training.label_smoothing 0.05502315323799323 +17 84 model.embedding_dim 2.0 +17 84 training.batch_size 0.0 +17 84 training.label_smoothing 0.12734903966115052 +17 85 model.embedding_dim 1.0 +17 85 training.batch_size 2.0 +17 85 training.label_smoothing 0.0017293434147415464 +17 86 model.embedding_dim 2.0 +17 86 training.batch_size 0.0 +17 86 training.label_smoothing 0.009331231517907412 +17 87 model.embedding_dim 1.0 +17 87 training.batch_size 1.0 +17 87 training.label_smoothing 0.0014335527257385164 +17 88 model.embedding_dim 0.0 +17 88 training.batch_size 1.0 +17 88 training.label_smoothing 0.19572064925122362 +17 89 model.embedding_dim 0.0 +17 89 training.batch_size 2.0 +17 89 training.label_smoothing 0.007049847063786554 +17 90 model.embedding_dim 0.0 +17 90 training.batch_size 0.0 +17 90 training.label_smoothing 0.007415522505802147 +17 91 model.embedding_dim 0.0 +17 91 training.batch_size 1.0 +17 91 training.label_smoothing 0.030268195418736103 +17 92 model.embedding_dim 0.0 +17 92 training.batch_size 2.0 +17 92 training.label_smoothing 0.009113224072602517 +17 93 model.embedding_dim 0.0 +17 93 training.batch_size 1.0 +17 93 training.label_smoothing 0.12872834224272922 +17 94 model.embedding_dim 2.0 +17 94 training.batch_size 1.0 +17 94 training.label_smoothing 0.03544006638044366 +17 95 model.embedding_dim 1.0 +17 95 training.batch_size 2.0 +17 95 training.label_smoothing 0.6666669819332041 +17 96 model.embedding_dim 2.0 +17 96 training.batch_size 2.0 +17 96 training.label_smoothing 0.0818017797157806 +17 97 model.embedding_dim 0.0 +17 97 training.batch_size 0.0 +17 97 training.label_smoothing 0.12938782764516513 +17 98 model.embedding_dim 0.0 +17 98 training.batch_size 2.0 +17 98 training.label_smoothing 0.0037622635684304855 +17 99 model.embedding_dim 2.0 +17 99 training.batch_size 1.0 +17 99 training.label_smoothing 0.020119571770290893 +17 100 model.embedding_dim 2.0 +17 100 training.batch_size 0.0 +17 100 training.label_smoothing 0.14419563716777148 +17 1 dataset """kinships""" +17 1 model """complex""" +17 1 loss """softplus""" +17 1 regularizer """no""" +17 1 optimizer """adadelta""" +17 1 training_loop """lcwa""" +17 1 evaluator """rankbased""" +17 2 dataset """kinships""" +17 2 model """complex""" +17 2 loss """softplus""" +17 2 regularizer """no""" +17 2 optimizer """adadelta""" +17 2 training_loop """lcwa""" +17 2 evaluator """rankbased""" +17 3 dataset """kinships""" +17 3 model """complex""" +17 3 loss """softplus""" +17 3 regularizer """no""" +17 3 optimizer """adadelta""" +17 3 training_loop """lcwa""" +17 3 evaluator """rankbased""" +17 4 dataset """kinships""" +17 4 model """complex""" +17 4 loss """softplus""" +17 4 regularizer """no""" +17 4 optimizer """adadelta""" +17 4 training_loop """lcwa""" +17 4 evaluator """rankbased""" +17 5 dataset """kinships""" +17 5 model """complex""" +17 5 loss """softplus""" +17 5 regularizer """no""" +17 5 optimizer """adadelta""" +17 5 training_loop """lcwa""" +17 5 evaluator """rankbased""" +17 6 dataset """kinships""" +17 6 model """complex""" +17 6 loss """softplus""" +17 6 regularizer """no""" +17 6 optimizer """adadelta""" +17 6 training_loop """lcwa""" +17 6 evaluator """rankbased""" +17 7 dataset """kinships""" +17 7 model """complex""" +17 7 loss """softplus""" +17 7 regularizer """no""" +17 7 optimizer """adadelta""" +17 7 training_loop """lcwa""" +17 7 evaluator """rankbased""" +17 8 dataset """kinships""" +17 8 model """complex""" +17 8 loss """softplus""" +17 8 regularizer """no""" +17 8 optimizer """adadelta""" +17 8 training_loop """lcwa""" +17 8 evaluator """rankbased""" +17 9 dataset """kinships""" +17 9 model """complex""" +17 9 loss """softplus""" +17 9 regularizer """no""" +17 9 optimizer """adadelta""" +17 9 training_loop """lcwa""" +17 9 evaluator """rankbased""" +17 10 dataset """kinships""" +17 10 model """complex""" +17 10 loss """softplus""" +17 10 regularizer """no""" +17 10 optimizer """adadelta""" +17 10 training_loop """lcwa""" +17 10 evaluator """rankbased""" +17 11 dataset """kinships""" +17 11 model """complex""" +17 11 loss """softplus""" +17 11 regularizer """no""" +17 11 optimizer """adadelta""" +17 11 training_loop """lcwa""" +17 11 evaluator """rankbased""" +17 12 dataset """kinships""" +17 12 model """complex""" +17 12 loss """softplus""" +17 12 regularizer """no""" +17 12 optimizer """adadelta""" +17 12 training_loop """lcwa""" +17 12 evaluator """rankbased""" +17 13 dataset """kinships""" +17 13 model """complex""" +17 13 loss """softplus""" +17 13 regularizer """no""" +17 13 optimizer """adadelta""" +17 13 training_loop """lcwa""" +17 13 evaluator """rankbased""" +17 14 dataset """kinships""" +17 14 model """complex""" +17 14 loss """softplus""" +17 14 regularizer """no""" +17 14 optimizer """adadelta""" +17 14 training_loop """lcwa""" +17 14 evaluator """rankbased""" +17 15 dataset """kinships""" +17 15 model """complex""" +17 15 loss """softplus""" +17 15 regularizer """no""" +17 15 optimizer """adadelta""" +17 15 training_loop """lcwa""" +17 15 evaluator """rankbased""" +17 16 dataset """kinships""" +17 16 model """complex""" +17 16 loss """softplus""" +17 16 regularizer """no""" +17 16 optimizer """adadelta""" +17 16 training_loop """lcwa""" +17 16 evaluator """rankbased""" +17 17 dataset """kinships""" +17 17 model """complex""" +17 17 loss """softplus""" +17 17 regularizer """no""" +17 17 optimizer """adadelta""" +17 17 training_loop """lcwa""" +17 17 evaluator """rankbased""" +17 18 dataset """kinships""" +17 18 model """complex""" +17 18 loss """softplus""" +17 18 regularizer """no""" +17 18 optimizer """adadelta""" +17 18 training_loop """lcwa""" +17 18 evaluator """rankbased""" +17 19 dataset """kinships""" +17 19 model """complex""" +17 19 loss """softplus""" +17 19 regularizer """no""" +17 19 optimizer """adadelta""" +17 19 training_loop """lcwa""" +17 19 evaluator """rankbased""" +17 20 dataset """kinships""" +17 20 model """complex""" +17 20 loss """softplus""" +17 20 regularizer """no""" +17 20 optimizer """adadelta""" +17 20 training_loop """lcwa""" +17 20 evaluator """rankbased""" +17 21 dataset """kinships""" +17 21 model """complex""" +17 21 loss """softplus""" +17 21 regularizer """no""" +17 21 optimizer """adadelta""" +17 21 training_loop """lcwa""" +17 21 evaluator """rankbased""" +17 22 dataset """kinships""" +17 22 model """complex""" +17 22 loss """softplus""" +17 22 regularizer """no""" +17 22 optimizer """adadelta""" +17 22 training_loop """lcwa""" +17 22 evaluator """rankbased""" +17 23 dataset """kinships""" +17 23 model """complex""" +17 23 loss """softplus""" +17 23 regularizer """no""" +17 23 optimizer """adadelta""" +17 23 training_loop """lcwa""" +17 23 evaluator """rankbased""" +17 24 dataset """kinships""" +17 24 model """complex""" +17 24 loss """softplus""" +17 24 regularizer """no""" +17 24 optimizer """adadelta""" +17 24 training_loop """lcwa""" +17 24 evaluator """rankbased""" +17 25 dataset """kinships""" +17 25 model """complex""" +17 25 loss """softplus""" +17 25 regularizer """no""" +17 25 optimizer """adadelta""" +17 25 training_loop """lcwa""" +17 25 evaluator """rankbased""" +17 26 dataset """kinships""" +17 26 model """complex""" +17 26 loss """softplus""" +17 26 regularizer """no""" +17 26 optimizer """adadelta""" +17 26 training_loop """lcwa""" +17 26 evaluator """rankbased""" +17 27 dataset """kinships""" +17 27 model """complex""" +17 27 loss """softplus""" +17 27 regularizer """no""" +17 27 optimizer """adadelta""" +17 27 training_loop """lcwa""" +17 27 evaluator """rankbased""" +17 28 dataset """kinships""" +17 28 model """complex""" +17 28 loss """softplus""" +17 28 regularizer """no""" +17 28 optimizer """adadelta""" +17 28 training_loop """lcwa""" +17 28 evaluator """rankbased""" +17 29 dataset """kinships""" +17 29 model """complex""" +17 29 loss """softplus""" +17 29 regularizer """no""" +17 29 optimizer """adadelta""" +17 29 training_loop """lcwa""" +17 29 evaluator """rankbased""" +17 30 dataset """kinships""" +17 30 model """complex""" +17 30 loss """softplus""" +17 30 regularizer """no""" +17 30 optimizer """adadelta""" +17 30 training_loop """lcwa""" +17 30 evaluator """rankbased""" +17 31 dataset """kinships""" +17 31 model """complex""" +17 31 loss """softplus""" +17 31 regularizer """no""" +17 31 optimizer """adadelta""" +17 31 training_loop """lcwa""" +17 31 evaluator """rankbased""" +17 32 dataset """kinships""" +17 32 model """complex""" +17 32 loss """softplus""" +17 32 regularizer """no""" +17 32 optimizer """adadelta""" +17 32 training_loop """lcwa""" +17 32 evaluator """rankbased""" +17 33 dataset """kinships""" +17 33 model """complex""" +17 33 loss """softplus""" +17 33 regularizer """no""" +17 33 optimizer """adadelta""" +17 33 training_loop """lcwa""" +17 33 evaluator """rankbased""" +17 34 dataset """kinships""" +17 34 model """complex""" +17 34 loss """softplus""" +17 34 regularizer """no""" +17 34 optimizer """adadelta""" +17 34 training_loop """lcwa""" +17 34 evaluator """rankbased""" +17 35 dataset """kinships""" +17 35 model """complex""" +17 35 loss """softplus""" +17 35 regularizer """no""" +17 35 optimizer """adadelta""" +17 35 training_loop """lcwa""" +17 35 evaluator """rankbased""" +17 36 dataset """kinships""" +17 36 model """complex""" +17 36 loss """softplus""" +17 36 regularizer """no""" +17 36 optimizer """adadelta""" +17 36 training_loop """lcwa""" +17 36 evaluator """rankbased""" +17 37 dataset """kinships""" +17 37 model """complex""" +17 37 loss """softplus""" +17 37 regularizer """no""" +17 37 optimizer """adadelta""" +17 37 training_loop """lcwa""" +17 37 evaluator """rankbased""" +17 38 dataset """kinships""" +17 38 model """complex""" +17 38 loss """softplus""" +17 38 regularizer """no""" +17 38 optimizer """adadelta""" +17 38 training_loop """lcwa""" +17 38 evaluator """rankbased""" +17 39 dataset """kinships""" +17 39 model """complex""" +17 39 loss """softplus""" +17 39 regularizer """no""" +17 39 optimizer """adadelta""" +17 39 training_loop """lcwa""" +17 39 evaluator """rankbased""" +17 40 dataset """kinships""" +17 40 model """complex""" +17 40 loss """softplus""" +17 40 regularizer """no""" +17 40 optimizer """adadelta""" +17 40 training_loop """lcwa""" +17 40 evaluator """rankbased""" +17 41 dataset """kinships""" +17 41 model """complex""" +17 41 loss """softplus""" +17 41 regularizer """no""" +17 41 optimizer """adadelta""" +17 41 training_loop """lcwa""" +17 41 evaluator """rankbased""" +17 42 dataset """kinships""" +17 42 model """complex""" +17 42 loss """softplus""" +17 42 regularizer """no""" +17 42 optimizer """adadelta""" +17 42 training_loop """lcwa""" +17 42 evaluator """rankbased""" +17 43 dataset """kinships""" +17 43 model """complex""" +17 43 loss """softplus""" +17 43 regularizer """no""" +17 43 optimizer """adadelta""" +17 43 training_loop """lcwa""" +17 43 evaluator """rankbased""" +17 44 dataset """kinships""" +17 44 model """complex""" +17 44 loss """softplus""" +17 44 regularizer """no""" +17 44 optimizer """adadelta""" +17 44 training_loop """lcwa""" +17 44 evaluator """rankbased""" +17 45 dataset """kinships""" +17 45 model """complex""" +17 45 loss """softplus""" +17 45 regularizer """no""" +17 45 optimizer """adadelta""" +17 45 training_loop """lcwa""" +17 45 evaluator """rankbased""" +17 46 dataset """kinships""" +17 46 model """complex""" +17 46 loss """softplus""" +17 46 regularizer """no""" +17 46 optimizer """adadelta""" +17 46 training_loop """lcwa""" +17 46 evaluator """rankbased""" +17 47 dataset """kinships""" +17 47 model """complex""" +17 47 loss """softplus""" +17 47 regularizer """no""" +17 47 optimizer """adadelta""" +17 47 training_loop """lcwa""" +17 47 evaluator """rankbased""" +17 48 dataset """kinships""" +17 48 model """complex""" +17 48 loss """softplus""" +17 48 regularizer """no""" +17 48 optimizer """adadelta""" +17 48 training_loop """lcwa""" +17 48 evaluator """rankbased""" +17 49 dataset """kinships""" +17 49 model """complex""" +17 49 loss """softplus""" +17 49 regularizer """no""" +17 49 optimizer """adadelta""" +17 49 training_loop """lcwa""" +17 49 evaluator """rankbased""" +17 50 dataset """kinships""" +17 50 model """complex""" +17 50 loss """softplus""" +17 50 regularizer """no""" +17 50 optimizer """adadelta""" +17 50 training_loop """lcwa""" +17 50 evaluator """rankbased""" +17 51 dataset """kinships""" +17 51 model """complex""" +17 51 loss """softplus""" +17 51 regularizer """no""" +17 51 optimizer """adadelta""" +17 51 training_loop """lcwa""" +17 51 evaluator """rankbased""" +17 52 dataset """kinships""" +17 52 model """complex""" +17 52 loss """softplus""" +17 52 regularizer """no""" +17 52 optimizer """adadelta""" +17 52 training_loop """lcwa""" +17 52 evaluator """rankbased""" +17 53 dataset """kinships""" +17 53 model """complex""" +17 53 loss """softplus""" +17 53 regularizer """no""" +17 53 optimizer """adadelta""" +17 53 training_loop """lcwa""" +17 53 evaluator """rankbased""" +17 54 dataset """kinships""" +17 54 model """complex""" +17 54 loss """softplus""" +17 54 regularizer """no""" +17 54 optimizer """adadelta""" +17 54 training_loop """lcwa""" +17 54 evaluator """rankbased""" +17 55 dataset """kinships""" +17 55 model """complex""" +17 55 loss """softplus""" +17 55 regularizer """no""" +17 55 optimizer """adadelta""" +17 55 training_loop """lcwa""" +17 55 evaluator """rankbased""" +17 56 dataset """kinships""" +17 56 model """complex""" +17 56 loss """softplus""" +17 56 regularizer """no""" +17 56 optimizer """adadelta""" +17 56 training_loop """lcwa""" +17 56 evaluator """rankbased""" +17 57 dataset """kinships""" +17 57 model """complex""" +17 57 loss """softplus""" +17 57 regularizer """no""" +17 57 optimizer """adadelta""" +17 57 training_loop """lcwa""" +17 57 evaluator """rankbased""" +17 58 dataset """kinships""" +17 58 model """complex""" +17 58 loss """softplus""" +17 58 regularizer """no""" +17 58 optimizer """adadelta""" +17 58 training_loop """lcwa""" +17 58 evaluator """rankbased""" +17 59 dataset """kinships""" +17 59 model """complex""" +17 59 loss """softplus""" +17 59 regularizer """no""" +17 59 optimizer """adadelta""" +17 59 training_loop """lcwa""" +17 59 evaluator """rankbased""" +17 60 dataset """kinships""" +17 60 model """complex""" +17 60 loss """softplus""" +17 60 regularizer """no""" +17 60 optimizer """adadelta""" +17 60 training_loop """lcwa""" +17 60 evaluator """rankbased""" +17 61 dataset """kinships""" +17 61 model """complex""" +17 61 loss """softplus""" +17 61 regularizer """no""" +17 61 optimizer """adadelta""" +17 61 training_loop """lcwa""" +17 61 evaluator """rankbased""" +17 62 dataset """kinships""" +17 62 model """complex""" +17 62 loss """softplus""" +17 62 regularizer """no""" +17 62 optimizer """adadelta""" +17 62 training_loop """lcwa""" +17 62 evaluator """rankbased""" +17 63 dataset """kinships""" +17 63 model """complex""" +17 63 loss """softplus""" +17 63 regularizer """no""" +17 63 optimizer """adadelta""" +17 63 training_loop """lcwa""" +17 63 evaluator """rankbased""" +17 64 dataset """kinships""" +17 64 model """complex""" +17 64 loss """softplus""" +17 64 regularizer """no""" +17 64 optimizer """adadelta""" +17 64 training_loop """lcwa""" +17 64 evaluator """rankbased""" +17 65 dataset """kinships""" +17 65 model """complex""" +17 65 loss """softplus""" +17 65 regularizer """no""" +17 65 optimizer """adadelta""" +17 65 training_loop """lcwa""" +17 65 evaluator """rankbased""" +17 66 dataset """kinships""" +17 66 model """complex""" +17 66 loss """softplus""" +17 66 regularizer """no""" +17 66 optimizer """adadelta""" +17 66 training_loop """lcwa""" +17 66 evaluator """rankbased""" +17 67 dataset """kinships""" +17 67 model """complex""" +17 67 loss """softplus""" +17 67 regularizer """no""" +17 67 optimizer """adadelta""" +17 67 training_loop """lcwa""" +17 67 evaluator """rankbased""" +17 68 dataset """kinships""" +17 68 model """complex""" +17 68 loss """softplus""" +17 68 regularizer """no""" +17 68 optimizer """adadelta""" +17 68 training_loop """lcwa""" +17 68 evaluator """rankbased""" +17 69 dataset """kinships""" +17 69 model """complex""" +17 69 loss """softplus""" +17 69 regularizer """no""" +17 69 optimizer """adadelta""" +17 69 training_loop """lcwa""" +17 69 evaluator """rankbased""" +17 70 dataset """kinships""" +17 70 model """complex""" +17 70 loss """softplus""" +17 70 regularizer """no""" +17 70 optimizer """adadelta""" +17 70 training_loop """lcwa""" +17 70 evaluator """rankbased""" +17 71 dataset """kinships""" +17 71 model """complex""" +17 71 loss """softplus""" +17 71 regularizer """no""" +17 71 optimizer """adadelta""" +17 71 training_loop """lcwa""" +17 71 evaluator """rankbased""" +17 72 dataset """kinships""" +17 72 model """complex""" +17 72 loss """softplus""" +17 72 regularizer """no""" +17 72 optimizer """adadelta""" +17 72 training_loop """lcwa""" +17 72 evaluator """rankbased""" +17 73 dataset """kinships""" +17 73 model """complex""" +17 73 loss """softplus""" +17 73 regularizer """no""" +17 73 optimizer """adadelta""" +17 73 training_loop """lcwa""" +17 73 evaluator """rankbased""" +17 74 dataset """kinships""" +17 74 model """complex""" +17 74 loss """softplus""" +17 74 regularizer """no""" +17 74 optimizer """adadelta""" +17 74 training_loop """lcwa""" +17 74 evaluator """rankbased""" +17 75 dataset """kinships""" +17 75 model """complex""" +17 75 loss """softplus""" +17 75 regularizer """no""" +17 75 optimizer """adadelta""" +17 75 training_loop """lcwa""" +17 75 evaluator """rankbased""" +17 76 dataset """kinships""" +17 76 model """complex""" +17 76 loss """softplus""" +17 76 regularizer """no""" +17 76 optimizer """adadelta""" +17 76 training_loop """lcwa""" +17 76 evaluator """rankbased""" +17 77 dataset """kinships""" +17 77 model """complex""" +17 77 loss """softplus""" +17 77 regularizer """no""" +17 77 optimizer """adadelta""" +17 77 training_loop """lcwa""" +17 77 evaluator """rankbased""" +17 78 dataset """kinships""" +17 78 model """complex""" +17 78 loss """softplus""" +17 78 regularizer """no""" +17 78 optimizer """adadelta""" +17 78 training_loop """lcwa""" +17 78 evaluator """rankbased""" +17 79 dataset """kinships""" +17 79 model """complex""" +17 79 loss """softplus""" +17 79 regularizer """no""" +17 79 optimizer """adadelta""" +17 79 training_loop """lcwa""" +17 79 evaluator """rankbased""" +17 80 dataset """kinships""" +17 80 model """complex""" +17 80 loss """softplus""" +17 80 regularizer """no""" +17 80 optimizer """adadelta""" +17 80 training_loop """lcwa""" +17 80 evaluator """rankbased""" +17 81 dataset """kinships""" +17 81 model """complex""" +17 81 loss """softplus""" +17 81 regularizer """no""" +17 81 optimizer """adadelta""" +17 81 training_loop """lcwa""" +17 81 evaluator """rankbased""" +17 82 dataset """kinships""" +17 82 model """complex""" +17 82 loss """softplus""" +17 82 regularizer """no""" +17 82 optimizer """adadelta""" +17 82 training_loop """lcwa""" +17 82 evaluator """rankbased""" +17 83 dataset """kinships""" +17 83 model """complex""" +17 83 loss """softplus""" +17 83 regularizer """no""" +17 83 optimizer """adadelta""" +17 83 training_loop """lcwa""" +17 83 evaluator """rankbased""" +17 84 dataset """kinships""" +17 84 model """complex""" +17 84 loss """softplus""" +17 84 regularizer """no""" +17 84 optimizer """adadelta""" +17 84 training_loop """lcwa""" +17 84 evaluator """rankbased""" +17 85 dataset """kinships""" +17 85 model """complex""" +17 85 loss """softplus""" +17 85 regularizer """no""" +17 85 optimizer """adadelta""" +17 85 training_loop """lcwa""" +17 85 evaluator """rankbased""" +17 86 dataset """kinships""" +17 86 model """complex""" +17 86 loss """softplus""" +17 86 regularizer """no""" +17 86 optimizer """adadelta""" +17 86 training_loop """lcwa""" +17 86 evaluator """rankbased""" +17 87 dataset """kinships""" +17 87 model """complex""" +17 87 loss """softplus""" +17 87 regularizer """no""" +17 87 optimizer """adadelta""" +17 87 training_loop """lcwa""" +17 87 evaluator """rankbased""" +17 88 dataset """kinships""" +17 88 model """complex""" +17 88 loss """softplus""" +17 88 regularizer """no""" +17 88 optimizer """adadelta""" +17 88 training_loop """lcwa""" +17 88 evaluator """rankbased""" +17 89 dataset """kinships""" +17 89 model """complex""" +17 89 loss """softplus""" +17 89 regularizer """no""" +17 89 optimizer """adadelta""" +17 89 training_loop """lcwa""" +17 89 evaluator """rankbased""" +17 90 dataset """kinships""" +17 90 model """complex""" +17 90 loss """softplus""" +17 90 regularizer """no""" +17 90 optimizer """adadelta""" +17 90 training_loop """lcwa""" +17 90 evaluator """rankbased""" +17 91 dataset """kinships""" +17 91 model """complex""" +17 91 loss """softplus""" +17 91 regularizer """no""" +17 91 optimizer """adadelta""" +17 91 training_loop """lcwa""" +17 91 evaluator """rankbased""" +17 92 dataset """kinships""" +17 92 model """complex""" +17 92 loss """softplus""" +17 92 regularizer """no""" +17 92 optimizer """adadelta""" +17 92 training_loop """lcwa""" +17 92 evaluator """rankbased""" +17 93 dataset """kinships""" +17 93 model """complex""" +17 93 loss """softplus""" +17 93 regularizer """no""" +17 93 optimizer """adadelta""" +17 93 training_loop """lcwa""" +17 93 evaluator """rankbased""" +17 94 dataset """kinships""" +17 94 model """complex""" +17 94 loss """softplus""" +17 94 regularizer """no""" +17 94 optimizer """adadelta""" +17 94 training_loop """lcwa""" +17 94 evaluator """rankbased""" +17 95 dataset """kinships""" +17 95 model """complex""" +17 95 loss """softplus""" +17 95 regularizer """no""" +17 95 optimizer """adadelta""" +17 95 training_loop """lcwa""" +17 95 evaluator """rankbased""" +17 96 dataset """kinships""" +17 96 model """complex""" +17 96 loss """softplus""" +17 96 regularizer """no""" +17 96 optimizer """adadelta""" +17 96 training_loop """lcwa""" +17 96 evaluator """rankbased""" +17 97 dataset """kinships""" +17 97 model """complex""" +17 97 loss """softplus""" +17 97 regularizer """no""" +17 97 optimizer """adadelta""" +17 97 training_loop """lcwa""" +17 97 evaluator """rankbased""" +17 98 dataset """kinships""" +17 98 model """complex""" +17 98 loss """softplus""" +17 98 regularizer """no""" +17 98 optimizer """adadelta""" +17 98 training_loop """lcwa""" +17 98 evaluator """rankbased""" +17 99 dataset """kinships""" +17 99 model """complex""" +17 99 loss """softplus""" +17 99 regularizer """no""" +17 99 optimizer """adadelta""" +17 99 training_loop """lcwa""" +17 99 evaluator """rankbased""" +17 100 dataset """kinships""" +17 100 model """complex""" +17 100 loss """softplus""" +17 100 regularizer """no""" +17 100 optimizer """adadelta""" +17 100 training_loop """lcwa""" +17 100 evaluator """rankbased""" +18 1 model.embedding_dim 0.0 +18 1 training.batch_size 0.0 +18 1 training.label_smoothing 0.29847144721033825 +18 2 model.embedding_dim 0.0 +18 2 training.batch_size 0.0 +18 2 training.label_smoothing 0.07055683116871256 +18 3 model.embedding_dim 1.0 +18 3 training.batch_size 0.0 +18 3 training.label_smoothing 0.24997756319187936 +18 4 model.embedding_dim 2.0 +18 4 training.batch_size 0.0 +18 4 training.label_smoothing 0.09364369284996725 +18 5 model.embedding_dim 1.0 +18 5 training.batch_size 0.0 +18 5 training.label_smoothing 0.3453830329891493 +18 6 model.embedding_dim 2.0 +18 6 training.batch_size 1.0 +18 6 training.label_smoothing 0.0019416263818633967 +18 7 model.embedding_dim 0.0 +18 7 training.batch_size 0.0 +18 7 training.label_smoothing 0.15781944932482844 +18 8 model.embedding_dim 0.0 +18 8 training.batch_size 2.0 +18 8 training.label_smoothing 0.001795647509057342 +18 9 model.embedding_dim 2.0 +18 9 training.batch_size 2.0 +18 9 training.label_smoothing 0.062148488132258844 +18 10 model.embedding_dim 1.0 +18 10 training.batch_size 0.0 +18 10 training.label_smoothing 0.9085789823345013 +18 11 model.embedding_dim 1.0 +18 11 training.batch_size 1.0 +18 11 training.label_smoothing 0.042765933033748336 +18 12 model.embedding_dim 0.0 +18 12 training.batch_size 2.0 +18 12 training.label_smoothing 0.6951970479337528 +18 13 model.embedding_dim 1.0 +18 13 training.batch_size 0.0 +18 13 training.label_smoothing 0.039820565051308696 +18 14 model.embedding_dim 2.0 +18 14 training.batch_size 1.0 +18 14 training.label_smoothing 0.1656504451466893 +18 15 model.embedding_dim 0.0 +18 15 training.batch_size 2.0 +18 15 training.label_smoothing 0.09488778045148283 +18 16 model.embedding_dim 0.0 +18 16 training.batch_size 1.0 +18 16 training.label_smoothing 0.006837776839434862 +18 17 model.embedding_dim 1.0 +18 17 training.batch_size 1.0 +18 17 training.label_smoothing 0.002005683215182114 +18 18 model.embedding_dim 0.0 +18 18 training.batch_size 0.0 +18 18 training.label_smoothing 0.0036805817183343647 +18 19 model.embedding_dim 0.0 +18 19 training.batch_size 2.0 +18 19 training.label_smoothing 0.06966274099889684 +18 20 model.embedding_dim 1.0 +18 20 training.batch_size 0.0 +18 20 training.label_smoothing 0.12139943549878407 +18 21 model.embedding_dim 0.0 +18 21 training.batch_size 0.0 +18 21 training.label_smoothing 0.8187166896000098 +18 22 model.embedding_dim 1.0 +18 22 training.batch_size 2.0 +18 22 training.label_smoothing 0.0029320087351181454 +18 23 model.embedding_dim 0.0 +18 23 training.batch_size 2.0 +18 23 training.label_smoothing 0.0025608889752224443 +18 24 model.embedding_dim 1.0 +18 24 training.batch_size 0.0 +18 24 training.label_smoothing 0.02721879032552857 +18 25 model.embedding_dim 0.0 +18 25 training.batch_size 0.0 +18 25 training.label_smoothing 0.0010188499603553608 +18 26 model.embedding_dim 0.0 +18 26 training.batch_size 2.0 +18 26 training.label_smoothing 0.004730289809257369 +18 27 model.embedding_dim 1.0 +18 27 training.batch_size 1.0 +18 27 training.label_smoothing 0.2966725373574173 +18 28 model.embedding_dim 2.0 +18 28 training.batch_size 1.0 +18 28 training.label_smoothing 0.02515982448963382 +18 29 model.embedding_dim 2.0 +18 29 training.batch_size 2.0 +18 29 training.label_smoothing 0.06785249838381113 +18 30 model.embedding_dim 1.0 +18 30 training.batch_size 0.0 +18 30 training.label_smoothing 0.3578431826031105 +18 31 model.embedding_dim 1.0 +18 31 training.batch_size 1.0 +18 31 training.label_smoothing 0.062264450893417214 +18 32 model.embedding_dim 2.0 +18 32 training.batch_size 1.0 +18 32 training.label_smoothing 0.007647880381282125 +18 33 model.embedding_dim 0.0 +18 33 training.batch_size 2.0 +18 33 training.label_smoothing 0.03000769116717595 +18 34 model.embedding_dim 1.0 +18 34 training.batch_size 2.0 +18 34 training.label_smoothing 0.12138244967773187 +18 35 model.embedding_dim 2.0 +18 35 training.batch_size 1.0 +18 35 training.label_smoothing 0.01293348305546618 +18 36 model.embedding_dim 2.0 +18 36 training.batch_size 1.0 +18 36 training.label_smoothing 0.0015336827881818316 +18 37 model.embedding_dim 1.0 +18 37 training.batch_size 2.0 +18 37 training.label_smoothing 0.254859132983156 +18 38 model.embedding_dim 0.0 +18 38 training.batch_size 0.0 +18 38 training.label_smoothing 0.009923977184535068 +18 39 model.embedding_dim 2.0 +18 39 training.batch_size 2.0 +18 39 training.label_smoothing 0.3495156974271452 +18 40 model.embedding_dim 0.0 +18 40 training.batch_size 0.0 +18 40 training.label_smoothing 0.005531128428481408 +18 41 model.embedding_dim 2.0 +18 41 training.batch_size 2.0 +18 41 training.label_smoothing 0.012042743665467814 +18 42 model.embedding_dim 2.0 +18 42 training.batch_size 1.0 +18 42 training.label_smoothing 0.14844338231389975 +18 43 model.embedding_dim 1.0 +18 43 training.batch_size 0.0 +18 43 training.label_smoothing 0.0422585827664923 +18 44 model.embedding_dim 1.0 +18 44 training.batch_size 2.0 +18 44 training.label_smoothing 0.006857750582782645 +18 45 model.embedding_dim 1.0 +18 45 training.batch_size 2.0 +18 45 training.label_smoothing 0.016762630170335694 +18 46 model.embedding_dim 1.0 +18 46 training.batch_size 1.0 +18 46 training.label_smoothing 0.014459299673806568 +18 47 model.embedding_dim 1.0 +18 47 training.batch_size 2.0 +18 47 training.label_smoothing 0.10508600681934213 +18 48 model.embedding_dim 2.0 +18 48 training.batch_size 2.0 +18 48 training.label_smoothing 0.07900404340351525 +18 49 model.embedding_dim 0.0 +18 49 training.batch_size 2.0 +18 49 training.label_smoothing 0.509342223429854 +18 50 model.embedding_dim 0.0 +18 50 training.batch_size 0.0 +18 50 training.label_smoothing 0.003822684405377785 +18 51 model.embedding_dim 0.0 +18 51 training.batch_size 2.0 +18 51 training.label_smoothing 0.05504040925054862 +18 52 model.embedding_dim 1.0 +18 52 training.batch_size 2.0 +18 52 training.label_smoothing 0.01772970407487185 +18 53 model.embedding_dim 2.0 +18 53 training.batch_size 0.0 +18 53 training.label_smoothing 0.01442995835148232 +18 54 model.embedding_dim 2.0 +18 54 training.batch_size 1.0 +18 54 training.label_smoothing 0.33470393920490565 +18 55 model.embedding_dim 1.0 +18 55 training.batch_size 2.0 +18 55 training.label_smoothing 0.6765733623730326 +18 56 model.embedding_dim 1.0 +18 56 training.batch_size 0.0 +18 56 training.label_smoothing 0.11856439299110984 +18 57 model.embedding_dim 1.0 +18 57 training.batch_size 2.0 +18 57 training.label_smoothing 0.21646056972004785 +18 58 model.embedding_dim 0.0 +18 58 training.batch_size 1.0 +18 58 training.label_smoothing 0.39910491691811617 +18 59 model.embedding_dim 0.0 +18 59 training.batch_size 0.0 +18 59 training.label_smoothing 0.01212089602390152 +18 60 model.embedding_dim 1.0 +18 60 training.batch_size 2.0 +18 60 training.label_smoothing 0.001980347749717254 +18 61 model.embedding_dim 2.0 +18 61 training.batch_size 2.0 +18 61 training.label_smoothing 0.0029168398967659957 +18 62 model.embedding_dim 0.0 +18 62 training.batch_size 1.0 +18 62 training.label_smoothing 0.007328110067355971 +18 63 model.embedding_dim 0.0 +18 63 training.batch_size 1.0 +18 63 training.label_smoothing 0.0023760877594247573 +18 64 model.embedding_dim 2.0 +18 64 training.batch_size 2.0 +18 64 training.label_smoothing 0.020267674613139493 +18 65 model.embedding_dim 1.0 +18 65 training.batch_size 1.0 +18 65 training.label_smoothing 0.014619634175807498 +18 66 model.embedding_dim 1.0 +18 66 training.batch_size 1.0 +18 66 training.label_smoothing 0.10094262929440852 +18 67 model.embedding_dim 2.0 +18 67 training.batch_size 1.0 +18 67 training.label_smoothing 0.15559604137203883 +18 68 model.embedding_dim 2.0 +18 68 training.batch_size 0.0 +18 68 training.label_smoothing 0.22739513380154025 +18 69 model.embedding_dim 1.0 +18 69 training.batch_size 2.0 +18 69 training.label_smoothing 0.027654454757390784 +18 70 model.embedding_dim 0.0 +18 70 training.batch_size 2.0 +18 70 training.label_smoothing 0.0010928766429328008 +18 71 model.embedding_dim 0.0 +18 71 training.batch_size 0.0 +18 71 training.label_smoothing 0.14604306276302295 +18 72 model.embedding_dim 0.0 +18 72 training.batch_size 0.0 +18 72 training.label_smoothing 0.23556075509192673 +18 73 model.embedding_dim 0.0 +18 73 training.batch_size 2.0 +18 73 training.label_smoothing 0.011944409719160625 +18 74 model.embedding_dim 0.0 +18 74 training.batch_size 2.0 +18 74 training.label_smoothing 0.07536025612328234 +18 75 model.embedding_dim 2.0 +18 75 training.batch_size 1.0 +18 75 training.label_smoothing 0.0010918053510061726 +18 76 model.embedding_dim 0.0 +18 76 training.batch_size 1.0 +18 76 training.label_smoothing 0.8641710124789348 +18 77 model.embedding_dim 2.0 +18 77 training.batch_size 2.0 +18 77 training.label_smoothing 0.03644447829285715 +18 78 model.embedding_dim 0.0 +18 78 training.batch_size 2.0 +18 78 training.label_smoothing 0.015743358144071285 +18 79 model.embedding_dim 0.0 +18 79 training.batch_size 0.0 +18 79 training.label_smoothing 0.013670206890131209 +18 80 model.embedding_dim 2.0 +18 80 training.batch_size 0.0 +18 80 training.label_smoothing 0.0030261671450541364 +18 81 model.embedding_dim 0.0 +18 81 training.batch_size 2.0 +18 81 training.label_smoothing 0.4591929706436071 +18 82 model.embedding_dim 2.0 +18 82 training.batch_size 0.0 +18 82 training.label_smoothing 0.0030810966090910608 +18 83 model.embedding_dim 1.0 +18 83 training.batch_size 1.0 +18 83 training.label_smoothing 0.005776809166822901 +18 84 model.embedding_dim 2.0 +18 84 training.batch_size 2.0 +18 84 training.label_smoothing 0.0022610798699596326 +18 85 model.embedding_dim 1.0 +18 85 training.batch_size 2.0 +18 85 training.label_smoothing 0.0011839001016591085 +18 86 model.embedding_dim 1.0 +18 86 training.batch_size 0.0 +18 86 training.label_smoothing 0.13225024714858236 +18 87 model.embedding_dim 0.0 +18 87 training.batch_size 0.0 +18 87 training.label_smoothing 0.0013169729478474924 +18 88 model.embedding_dim 0.0 +18 88 training.batch_size 0.0 +18 88 training.label_smoothing 0.002443790933514041 +18 89 model.embedding_dim 1.0 +18 89 training.batch_size 0.0 +18 89 training.label_smoothing 0.03517474699453373 +18 90 model.embedding_dim 0.0 +18 90 training.batch_size 0.0 +18 90 training.label_smoothing 0.0018563072272397307 +18 91 model.embedding_dim 2.0 +18 91 training.batch_size 2.0 +18 91 training.label_smoothing 0.20449997056791624 +18 92 model.embedding_dim 0.0 +18 92 training.batch_size 1.0 +18 92 training.label_smoothing 0.0012073347816168487 +18 93 model.embedding_dim 0.0 +18 93 training.batch_size 0.0 +18 93 training.label_smoothing 0.011527803404472875 +18 94 model.embedding_dim 0.0 +18 94 training.batch_size 2.0 +18 94 training.label_smoothing 0.004304654287330382 +18 95 model.embedding_dim 1.0 +18 95 training.batch_size 0.0 +18 95 training.label_smoothing 0.001946138720159082 +18 96 model.embedding_dim 0.0 +18 96 training.batch_size 1.0 +18 96 training.label_smoothing 0.4431763637618181 +18 97 model.embedding_dim 2.0 +18 97 training.batch_size 1.0 +18 97 training.label_smoothing 0.1416630772778874 +18 98 model.embedding_dim 0.0 +18 98 training.batch_size 1.0 +18 98 training.label_smoothing 0.030964830481789458 +18 99 model.embedding_dim 2.0 +18 99 training.batch_size 1.0 +18 99 training.label_smoothing 0.14020754121477433 +18 100 model.embedding_dim 1.0 +18 100 training.batch_size 2.0 +18 100 training.label_smoothing 0.0012061547392208413 +18 1 dataset """kinships""" +18 1 model """complex""" +18 1 loss """bceaftersigmoid""" +18 1 regularizer """no""" +18 1 optimizer """adadelta""" +18 1 training_loop """lcwa""" +18 1 evaluator """rankbased""" +18 2 dataset """kinships""" +18 2 model """complex""" +18 2 loss """bceaftersigmoid""" +18 2 regularizer """no""" +18 2 optimizer """adadelta""" +18 2 training_loop """lcwa""" +18 2 evaluator """rankbased""" +18 3 dataset """kinships""" +18 3 model """complex""" +18 3 loss """bceaftersigmoid""" +18 3 regularizer """no""" +18 3 optimizer """adadelta""" +18 3 training_loop """lcwa""" +18 3 evaluator """rankbased""" +18 4 dataset """kinships""" +18 4 model """complex""" +18 4 loss """bceaftersigmoid""" +18 4 regularizer """no""" +18 4 optimizer """adadelta""" +18 4 training_loop """lcwa""" +18 4 evaluator """rankbased""" +18 5 dataset """kinships""" +18 5 model """complex""" +18 5 loss """bceaftersigmoid""" +18 5 regularizer """no""" +18 5 optimizer """adadelta""" +18 5 training_loop """lcwa""" +18 5 evaluator """rankbased""" +18 6 dataset """kinships""" +18 6 model """complex""" +18 6 loss """bceaftersigmoid""" +18 6 regularizer """no""" +18 6 optimizer """adadelta""" +18 6 training_loop """lcwa""" +18 6 evaluator """rankbased""" +18 7 dataset """kinships""" +18 7 model """complex""" +18 7 loss """bceaftersigmoid""" +18 7 regularizer """no""" +18 7 optimizer """adadelta""" +18 7 training_loop """lcwa""" +18 7 evaluator """rankbased""" +18 8 dataset """kinships""" +18 8 model """complex""" +18 8 loss """bceaftersigmoid""" +18 8 regularizer """no""" +18 8 optimizer """adadelta""" +18 8 training_loop """lcwa""" +18 8 evaluator """rankbased""" +18 9 dataset """kinships""" +18 9 model """complex""" +18 9 loss """bceaftersigmoid""" +18 9 regularizer """no""" +18 9 optimizer """adadelta""" +18 9 training_loop """lcwa""" +18 9 evaluator """rankbased""" +18 10 dataset """kinships""" +18 10 model """complex""" +18 10 loss """bceaftersigmoid""" +18 10 regularizer """no""" +18 10 optimizer """adadelta""" +18 10 training_loop """lcwa""" +18 10 evaluator """rankbased""" +18 11 dataset """kinships""" +18 11 model """complex""" +18 11 loss """bceaftersigmoid""" +18 11 regularizer """no""" +18 11 optimizer """adadelta""" +18 11 training_loop """lcwa""" +18 11 evaluator """rankbased""" +18 12 dataset """kinships""" +18 12 model """complex""" +18 12 loss """bceaftersigmoid""" +18 12 regularizer """no""" +18 12 optimizer """adadelta""" +18 12 training_loop """lcwa""" +18 12 evaluator """rankbased""" +18 13 dataset """kinships""" +18 13 model """complex""" +18 13 loss """bceaftersigmoid""" +18 13 regularizer """no""" +18 13 optimizer """adadelta""" +18 13 training_loop """lcwa""" +18 13 evaluator """rankbased""" +18 14 dataset """kinships""" +18 14 model """complex""" +18 14 loss """bceaftersigmoid""" +18 14 regularizer """no""" +18 14 optimizer """adadelta""" +18 14 training_loop """lcwa""" +18 14 evaluator """rankbased""" +18 15 dataset """kinships""" +18 15 model """complex""" +18 15 loss """bceaftersigmoid""" +18 15 regularizer """no""" +18 15 optimizer """adadelta""" +18 15 training_loop """lcwa""" +18 15 evaluator """rankbased""" +18 16 dataset """kinships""" +18 16 model """complex""" +18 16 loss """bceaftersigmoid""" +18 16 regularizer """no""" +18 16 optimizer """adadelta""" +18 16 training_loop """lcwa""" +18 16 evaluator """rankbased""" +18 17 dataset """kinships""" +18 17 model """complex""" +18 17 loss """bceaftersigmoid""" +18 17 regularizer """no""" +18 17 optimizer """adadelta""" +18 17 training_loop """lcwa""" +18 17 evaluator """rankbased""" +18 18 dataset """kinships""" +18 18 model """complex""" +18 18 loss """bceaftersigmoid""" +18 18 regularizer """no""" +18 18 optimizer """adadelta""" +18 18 training_loop """lcwa""" +18 18 evaluator """rankbased""" +18 19 dataset """kinships""" +18 19 model """complex""" +18 19 loss """bceaftersigmoid""" +18 19 regularizer """no""" +18 19 optimizer """adadelta""" +18 19 training_loop """lcwa""" +18 19 evaluator """rankbased""" +18 20 dataset """kinships""" +18 20 model """complex""" +18 20 loss """bceaftersigmoid""" +18 20 regularizer """no""" +18 20 optimizer """adadelta""" +18 20 training_loop """lcwa""" +18 20 evaluator """rankbased""" +18 21 dataset """kinships""" +18 21 model """complex""" +18 21 loss """bceaftersigmoid""" +18 21 regularizer """no""" +18 21 optimizer """adadelta""" +18 21 training_loop """lcwa""" +18 21 evaluator """rankbased""" +18 22 dataset """kinships""" +18 22 model """complex""" +18 22 loss """bceaftersigmoid""" +18 22 regularizer """no""" +18 22 optimizer """adadelta""" +18 22 training_loop """lcwa""" +18 22 evaluator """rankbased""" +18 23 dataset """kinships""" +18 23 model """complex""" +18 23 loss """bceaftersigmoid""" +18 23 regularizer """no""" +18 23 optimizer """adadelta""" +18 23 training_loop """lcwa""" +18 23 evaluator """rankbased""" +18 24 dataset """kinships""" +18 24 model """complex""" +18 24 loss """bceaftersigmoid""" +18 24 regularizer """no""" +18 24 optimizer """adadelta""" +18 24 training_loop """lcwa""" +18 24 evaluator """rankbased""" +18 25 dataset """kinships""" +18 25 model """complex""" +18 25 loss """bceaftersigmoid""" +18 25 regularizer """no""" +18 25 optimizer """adadelta""" +18 25 training_loop """lcwa""" +18 25 evaluator """rankbased""" +18 26 dataset """kinships""" +18 26 model """complex""" +18 26 loss """bceaftersigmoid""" +18 26 regularizer """no""" +18 26 optimizer """adadelta""" +18 26 training_loop """lcwa""" +18 26 evaluator """rankbased""" +18 27 dataset """kinships""" +18 27 model """complex""" +18 27 loss """bceaftersigmoid""" +18 27 regularizer """no""" +18 27 optimizer """adadelta""" +18 27 training_loop """lcwa""" +18 27 evaluator """rankbased""" +18 28 dataset """kinships""" +18 28 model """complex""" +18 28 loss """bceaftersigmoid""" +18 28 regularizer """no""" +18 28 optimizer """adadelta""" +18 28 training_loop """lcwa""" +18 28 evaluator """rankbased""" +18 29 dataset """kinships""" +18 29 model """complex""" +18 29 loss """bceaftersigmoid""" +18 29 regularizer """no""" +18 29 optimizer """adadelta""" +18 29 training_loop """lcwa""" +18 29 evaluator """rankbased""" +18 30 dataset """kinships""" +18 30 model """complex""" +18 30 loss """bceaftersigmoid""" +18 30 regularizer """no""" +18 30 optimizer """adadelta""" +18 30 training_loop """lcwa""" +18 30 evaluator """rankbased""" +18 31 dataset """kinships""" +18 31 model """complex""" +18 31 loss """bceaftersigmoid""" +18 31 regularizer """no""" +18 31 optimizer """adadelta""" +18 31 training_loop """lcwa""" +18 31 evaluator """rankbased""" +18 32 dataset """kinships""" +18 32 model """complex""" +18 32 loss """bceaftersigmoid""" +18 32 regularizer """no""" +18 32 optimizer """adadelta""" +18 32 training_loop """lcwa""" +18 32 evaluator """rankbased""" +18 33 dataset """kinships""" +18 33 model """complex""" +18 33 loss """bceaftersigmoid""" +18 33 regularizer """no""" +18 33 optimizer """adadelta""" +18 33 training_loop """lcwa""" +18 33 evaluator """rankbased""" +18 34 dataset """kinships""" +18 34 model """complex""" +18 34 loss """bceaftersigmoid""" +18 34 regularizer """no""" +18 34 optimizer """adadelta""" +18 34 training_loop """lcwa""" +18 34 evaluator """rankbased""" +18 35 dataset """kinships""" +18 35 model """complex""" +18 35 loss """bceaftersigmoid""" +18 35 regularizer """no""" +18 35 optimizer """adadelta""" +18 35 training_loop """lcwa""" +18 35 evaluator """rankbased""" +18 36 dataset """kinships""" +18 36 model """complex""" +18 36 loss """bceaftersigmoid""" +18 36 regularizer """no""" +18 36 optimizer """adadelta""" +18 36 training_loop """lcwa""" +18 36 evaluator """rankbased""" +18 37 dataset """kinships""" +18 37 model """complex""" +18 37 loss """bceaftersigmoid""" +18 37 regularizer """no""" +18 37 optimizer """adadelta""" +18 37 training_loop """lcwa""" +18 37 evaluator """rankbased""" +18 38 dataset """kinships""" +18 38 model """complex""" +18 38 loss """bceaftersigmoid""" +18 38 regularizer """no""" +18 38 optimizer """adadelta""" +18 38 training_loop """lcwa""" +18 38 evaluator """rankbased""" +18 39 dataset """kinships""" +18 39 model """complex""" +18 39 loss """bceaftersigmoid""" +18 39 regularizer """no""" +18 39 optimizer """adadelta""" +18 39 training_loop """lcwa""" +18 39 evaluator """rankbased""" +18 40 dataset """kinships""" +18 40 model """complex""" +18 40 loss """bceaftersigmoid""" +18 40 regularizer """no""" +18 40 optimizer """adadelta""" +18 40 training_loop """lcwa""" +18 40 evaluator """rankbased""" +18 41 dataset """kinships""" +18 41 model """complex""" +18 41 loss """bceaftersigmoid""" +18 41 regularizer """no""" +18 41 optimizer """adadelta""" +18 41 training_loop """lcwa""" +18 41 evaluator """rankbased""" +18 42 dataset """kinships""" +18 42 model """complex""" +18 42 loss """bceaftersigmoid""" +18 42 regularizer """no""" +18 42 optimizer """adadelta""" +18 42 training_loop """lcwa""" +18 42 evaluator """rankbased""" +18 43 dataset """kinships""" +18 43 model """complex""" +18 43 loss """bceaftersigmoid""" +18 43 regularizer """no""" +18 43 optimizer """adadelta""" +18 43 training_loop """lcwa""" +18 43 evaluator """rankbased""" +18 44 dataset """kinships""" +18 44 model """complex""" +18 44 loss """bceaftersigmoid""" +18 44 regularizer """no""" +18 44 optimizer """adadelta""" +18 44 training_loop """lcwa""" +18 44 evaluator """rankbased""" +18 45 dataset """kinships""" +18 45 model """complex""" +18 45 loss """bceaftersigmoid""" +18 45 regularizer """no""" +18 45 optimizer """adadelta""" +18 45 training_loop """lcwa""" +18 45 evaluator """rankbased""" +18 46 dataset """kinships""" +18 46 model """complex""" +18 46 loss """bceaftersigmoid""" +18 46 regularizer """no""" +18 46 optimizer """adadelta""" +18 46 training_loop """lcwa""" +18 46 evaluator """rankbased""" +18 47 dataset """kinships""" +18 47 model """complex""" +18 47 loss """bceaftersigmoid""" +18 47 regularizer """no""" +18 47 optimizer """adadelta""" +18 47 training_loop """lcwa""" +18 47 evaluator """rankbased""" +18 48 dataset """kinships""" +18 48 model """complex""" +18 48 loss """bceaftersigmoid""" +18 48 regularizer """no""" +18 48 optimizer """adadelta""" +18 48 training_loop """lcwa""" +18 48 evaluator """rankbased""" +18 49 dataset """kinships""" +18 49 model """complex""" +18 49 loss """bceaftersigmoid""" +18 49 regularizer """no""" +18 49 optimizer """adadelta""" +18 49 training_loop """lcwa""" +18 49 evaluator """rankbased""" +18 50 dataset """kinships""" +18 50 model """complex""" +18 50 loss """bceaftersigmoid""" +18 50 regularizer """no""" +18 50 optimizer """adadelta""" +18 50 training_loop """lcwa""" +18 50 evaluator """rankbased""" +18 51 dataset """kinships""" +18 51 model """complex""" +18 51 loss """bceaftersigmoid""" +18 51 regularizer """no""" +18 51 optimizer """adadelta""" +18 51 training_loop """lcwa""" +18 51 evaluator """rankbased""" +18 52 dataset """kinships""" +18 52 model """complex""" +18 52 loss """bceaftersigmoid""" +18 52 regularizer """no""" +18 52 optimizer """adadelta""" +18 52 training_loop """lcwa""" +18 52 evaluator """rankbased""" +18 53 dataset """kinships""" +18 53 model """complex""" +18 53 loss """bceaftersigmoid""" +18 53 regularizer """no""" +18 53 optimizer """adadelta""" +18 53 training_loop """lcwa""" +18 53 evaluator """rankbased""" +18 54 dataset """kinships""" +18 54 model """complex""" +18 54 loss """bceaftersigmoid""" +18 54 regularizer """no""" +18 54 optimizer """adadelta""" +18 54 training_loop """lcwa""" +18 54 evaluator """rankbased""" +18 55 dataset """kinships""" +18 55 model """complex""" +18 55 loss """bceaftersigmoid""" +18 55 regularizer """no""" +18 55 optimizer """adadelta""" +18 55 training_loop """lcwa""" +18 55 evaluator """rankbased""" +18 56 dataset """kinships""" +18 56 model """complex""" +18 56 loss """bceaftersigmoid""" +18 56 regularizer """no""" +18 56 optimizer """adadelta""" +18 56 training_loop """lcwa""" +18 56 evaluator """rankbased""" +18 57 dataset """kinships""" +18 57 model """complex""" +18 57 loss """bceaftersigmoid""" +18 57 regularizer """no""" +18 57 optimizer """adadelta""" +18 57 training_loop """lcwa""" +18 57 evaluator """rankbased""" +18 58 dataset """kinships""" +18 58 model """complex""" +18 58 loss """bceaftersigmoid""" +18 58 regularizer """no""" +18 58 optimizer """adadelta""" +18 58 training_loop """lcwa""" +18 58 evaluator """rankbased""" +18 59 dataset """kinships""" +18 59 model """complex""" +18 59 loss """bceaftersigmoid""" +18 59 regularizer """no""" +18 59 optimizer """adadelta""" +18 59 training_loop """lcwa""" +18 59 evaluator """rankbased""" +18 60 dataset """kinships""" +18 60 model """complex""" +18 60 loss """bceaftersigmoid""" +18 60 regularizer """no""" +18 60 optimizer """adadelta""" +18 60 training_loop """lcwa""" +18 60 evaluator """rankbased""" +18 61 dataset """kinships""" +18 61 model """complex""" +18 61 loss """bceaftersigmoid""" +18 61 regularizer """no""" +18 61 optimizer """adadelta""" +18 61 training_loop """lcwa""" +18 61 evaluator """rankbased""" +18 62 dataset """kinships""" +18 62 model """complex""" +18 62 loss """bceaftersigmoid""" +18 62 regularizer """no""" +18 62 optimizer """adadelta""" +18 62 training_loop """lcwa""" +18 62 evaluator """rankbased""" +18 63 dataset """kinships""" +18 63 model """complex""" +18 63 loss """bceaftersigmoid""" +18 63 regularizer """no""" +18 63 optimizer """adadelta""" +18 63 training_loop """lcwa""" +18 63 evaluator """rankbased""" +18 64 dataset """kinships""" +18 64 model """complex""" +18 64 loss """bceaftersigmoid""" +18 64 regularizer """no""" +18 64 optimizer """adadelta""" +18 64 training_loop """lcwa""" +18 64 evaluator """rankbased""" +18 65 dataset """kinships""" +18 65 model """complex""" +18 65 loss """bceaftersigmoid""" +18 65 regularizer """no""" +18 65 optimizer """adadelta""" +18 65 training_loop """lcwa""" +18 65 evaluator """rankbased""" +18 66 dataset """kinships""" +18 66 model """complex""" +18 66 loss """bceaftersigmoid""" +18 66 regularizer """no""" +18 66 optimizer """adadelta""" +18 66 training_loop """lcwa""" +18 66 evaluator """rankbased""" +18 67 dataset """kinships""" +18 67 model """complex""" +18 67 loss """bceaftersigmoid""" +18 67 regularizer """no""" +18 67 optimizer """adadelta""" +18 67 training_loop """lcwa""" +18 67 evaluator """rankbased""" +18 68 dataset """kinships""" +18 68 model """complex""" +18 68 loss """bceaftersigmoid""" +18 68 regularizer """no""" +18 68 optimizer """adadelta""" +18 68 training_loop """lcwa""" +18 68 evaluator """rankbased""" +18 69 dataset """kinships""" +18 69 model """complex""" +18 69 loss """bceaftersigmoid""" +18 69 regularizer """no""" +18 69 optimizer """adadelta""" +18 69 training_loop """lcwa""" +18 69 evaluator """rankbased""" +18 70 dataset """kinships""" +18 70 model """complex""" +18 70 loss """bceaftersigmoid""" +18 70 regularizer """no""" +18 70 optimizer """adadelta""" +18 70 training_loop """lcwa""" +18 70 evaluator """rankbased""" +18 71 dataset """kinships""" +18 71 model """complex""" +18 71 loss """bceaftersigmoid""" +18 71 regularizer """no""" +18 71 optimizer """adadelta""" +18 71 training_loop """lcwa""" +18 71 evaluator """rankbased""" +18 72 dataset """kinships""" +18 72 model """complex""" +18 72 loss """bceaftersigmoid""" +18 72 regularizer """no""" +18 72 optimizer """adadelta""" +18 72 training_loop """lcwa""" +18 72 evaluator """rankbased""" +18 73 dataset """kinships""" +18 73 model """complex""" +18 73 loss """bceaftersigmoid""" +18 73 regularizer """no""" +18 73 optimizer """adadelta""" +18 73 training_loop """lcwa""" +18 73 evaluator """rankbased""" +18 74 dataset """kinships""" +18 74 model """complex""" +18 74 loss """bceaftersigmoid""" +18 74 regularizer """no""" +18 74 optimizer """adadelta""" +18 74 training_loop """lcwa""" +18 74 evaluator """rankbased""" +18 75 dataset """kinships""" +18 75 model """complex""" +18 75 loss """bceaftersigmoid""" +18 75 regularizer """no""" +18 75 optimizer """adadelta""" +18 75 training_loop """lcwa""" +18 75 evaluator """rankbased""" +18 76 dataset """kinships""" +18 76 model """complex""" +18 76 loss """bceaftersigmoid""" +18 76 regularizer """no""" +18 76 optimizer """adadelta""" +18 76 training_loop """lcwa""" +18 76 evaluator """rankbased""" +18 77 dataset """kinships""" +18 77 model """complex""" +18 77 loss """bceaftersigmoid""" +18 77 regularizer """no""" +18 77 optimizer """adadelta""" +18 77 training_loop """lcwa""" +18 77 evaluator """rankbased""" +18 78 dataset """kinships""" +18 78 model """complex""" +18 78 loss """bceaftersigmoid""" +18 78 regularizer """no""" +18 78 optimizer """adadelta""" +18 78 training_loop """lcwa""" +18 78 evaluator """rankbased""" +18 79 dataset """kinships""" +18 79 model """complex""" +18 79 loss """bceaftersigmoid""" +18 79 regularizer """no""" +18 79 optimizer """adadelta""" +18 79 training_loop """lcwa""" +18 79 evaluator """rankbased""" +18 80 dataset """kinships""" +18 80 model """complex""" +18 80 loss """bceaftersigmoid""" +18 80 regularizer """no""" +18 80 optimizer """adadelta""" +18 80 training_loop """lcwa""" +18 80 evaluator """rankbased""" +18 81 dataset """kinships""" +18 81 model """complex""" +18 81 loss """bceaftersigmoid""" +18 81 regularizer """no""" +18 81 optimizer """adadelta""" +18 81 training_loop """lcwa""" +18 81 evaluator """rankbased""" +18 82 dataset """kinships""" +18 82 model """complex""" +18 82 loss """bceaftersigmoid""" +18 82 regularizer """no""" +18 82 optimizer """adadelta""" +18 82 training_loop """lcwa""" +18 82 evaluator """rankbased""" +18 83 dataset """kinships""" +18 83 model """complex""" +18 83 loss """bceaftersigmoid""" +18 83 regularizer """no""" +18 83 optimizer """adadelta""" +18 83 training_loop """lcwa""" +18 83 evaluator """rankbased""" +18 84 dataset """kinships""" +18 84 model """complex""" +18 84 loss """bceaftersigmoid""" +18 84 regularizer """no""" +18 84 optimizer """adadelta""" +18 84 training_loop """lcwa""" +18 84 evaluator """rankbased""" +18 85 dataset """kinships""" +18 85 model """complex""" +18 85 loss """bceaftersigmoid""" +18 85 regularizer """no""" +18 85 optimizer """adadelta""" +18 85 training_loop """lcwa""" +18 85 evaluator """rankbased""" +18 86 dataset """kinships""" +18 86 model """complex""" +18 86 loss """bceaftersigmoid""" +18 86 regularizer """no""" +18 86 optimizer """adadelta""" +18 86 training_loop """lcwa""" +18 86 evaluator """rankbased""" +18 87 dataset """kinships""" +18 87 model """complex""" +18 87 loss """bceaftersigmoid""" +18 87 regularizer """no""" +18 87 optimizer """adadelta""" +18 87 training_loop """lcwa""" +18 87 evaluator """rankbased""" +18 88 dataset """kinships""" +18 88 model """complex""" +18 88 loss """bceaftersigmoid""" +18 88 regularizer """no""" +18 88 optimizer """adadelta""" +18 88 training_loop """lcwa""" +18 88 evaluator """rankbased""" +18 89 dataset """kinships""" +18 89 model """complex""" +18 89 loss """bceaftersigmoid""" +18 89 regularizer """no""" +18 89 optimizer """adadelta""" +18 89 training_loop """lcwa""" +18 89 evaluator """rankbased""" +18 90 dataset """kinships""" +18 90 model """complex""" +18 90 loss """bceaftersigmoid""" +18 90 regularizer """no""" +18 90 optimizer """adadelta""" +18 90 training_loop """lcwa""" +18 90 evaluator """rankbased""" +18 91 dataset """kinships""" +18 91 model """complex""" +18 91 loss """bceaftersigmoid""" +18 91 regularizer """no""" +18 91 optimizer """adadelta""" +18 91 training_loop """lcwa""" +18 91 evaluator """rankbased""" +18 92 dataset """kinships""" +18 92 model """complex""" +18 92 loss """bceaftersigmoid""" +18 92 regularizer """no""" +18 92 optimizer """adadelta""" +18 92 training_loop """lcwa""" +18 92 evaluator """rankbased""" +18 93 dataset """kinships""" +18 93 model """complex""" +18 93 loss """bceaftersigmoid""" +18 93 regularizer """no""" +18 93 optimizer """adadelta""" +18 93 training_loop """lcwa""" +18 93 evaluator """rankbased""" +18 94 dataset """kinships""" +18 94 model """complex""" +18 94 loss """bceaftersigmoid""" +18 94 regularizer """no""" +18 94 optimizer """adadelta""" +18 94 training_loop """lcwa""" +18 94 evaluator """rankbased""" +18 95 dataset """kinships""" +18 95 model """complex""" +18 95 loss """bceaftersigmoid""" +18 95 regularizer """no""" +18 95 optimizer """adadelta""" +18 95 training_loop """lcwa""" +18 95 evaluator """rankbased""" +18 96 dataset """kinships""" +18 96 model """complex""" +18 96 loss """bceaftersigmoid""" +18 96 regularizer """no""" +18 96 optimizer """adadelta""" +18 96 training_loop """lcwa""" +18 96 evaluator """rankbased""" +18 97 dataset """kinships""" +18 97 model """complex""" +18 97 loss """bceaftersigmoid""" +18 97 regularizer """no""" +18 97 optimizer """adadelta""" +18 97 training_loop """lcwa""" +18 97 evaluator """rankbased""" +18 98 dataset """kinships""" +18 98 model """complex""" +18 98 loss """bceaftersigmoid""" +18 98 regularizer """no""" +18 98 optimizer """adadelta""" +18 98 training_loop """lcwa""" +18 98 evaluator """rankbased""" +18 99 dataset """kinships""" +18 99 model """complex""" +18 99 loss """bceaftersigmoid""" +18 99 regularizer """no""" +18 99 optimizer """adadelta""" +18 99 training_loop """lcwa""" +18 99 evaluator """rankbased""" +18 100 dataset """kinships""" +18 100 model """complex""" +18 100 loss """bceaftersigmoid""" +18 100 regularizer """no""" +18 100 optimizer """adadelta""" +18 100 training_loop """lcwa""" +18 100 evaluator """rankbased""" +19 1 model.embedding_dim 0.0 +19 1 training.batch_size 2.0 +19 1 training.label_smoothing 0.013169912763816613 +19 2 model.embedding_dim 1.0 +19 2 training.batch_size 2.0 +19 2 training.label_smoothing 0.4110307942443679 +19 3 model.embedding_dim 2.0 +19 3 training.batch_size 2.0 +19 3 training.label_smoothing 0.45131995654888823 +19 4 model.embedding_dim 0.0 +19 4 training.batch_size 1.0 +19 4 training.label_smoothing 0.05199417527797837 +19 5 model.embedding_dim 0.0 +19 5 training.batch_size 1.0 +19 5 training.label_smoothing 0.37633741976490814 +19 6 model.embedding_dim 2.0 +19 6 training.batch_size 0.0 +19 6 training.label_smoothing 0.6449452597064518 +19 7 model.embedding_dim 2.0 +19 7 training.batch_size 0.0 +19 7 training.label_smoothing 0.0011661456079481985 +19 8 model.embedding_dim 2.0 +19 8 training.batch_size 0.0 +19 8 training.label_smoothing 0.003859103266392944 +19 9 model.embedding_dim 2.0 +19 9 training.batch_size 2.0 +19 9 training.label_smoothing 0.7059921190902564 +19 10 model.embedding_dim 2.0 +19 10 training.batch_size 1.0 +19 10 training.label_smoothing 0.591084314560012 +19 11 model.embedding_dim 2.0 +19 11 training.batch_size 2.0 +19 11 training.label_smoothing 0.06122966975708135 +19 12 model.embedding_dim 0.0 +19 12 training.batch_size 1.0 +19 12 training.label_smoothing 0.8443807375083101 +19 13 model.embedding_dim 2.0 +19 13 training.batch_size 2.0 +19 13 training.label_smoothing 0.07156027498222829 +19 14 model.embedding_dim 0.0 +19 14 training.batch_size 2.0 +19 14 training.label_smoothing 0.06262780982886863 +19 15 model.embedding_dim 2.0 +19 15 training.batch_size 2.0 +19 15 training.label_smoothing 0.005564340179441708 +19 16 model.embedding_dim 2.0 +19 16 training.batch_size 0.0 +19 16 training.label_smoothing 0.5556304111236018 +19 17 model.embedding_dim 0.0 +19 17 training.batch_size 2.0 +19 17 training.label_smoothing 0.048531101758997316 +19 18 model.embedding_dim 1.0 +19 18 training.batch_size 0.0 +19 18 training.label_smoothing 0.0068916185095172135 +19 19 model.embedding_dim 0.0 +19 19 training.batch_size 1.0 +19 19 training.label_smoothing 0.9087839556518171 +19 20 model.embedding_dim 1.0 +19 20 training.batch_size 1.0 +19 20 training.label_smoothing 0.2752845306287321 +19 21 model.embedding_dim 0.0 +19 21 training.batch_size 1.0 +19 21 training.label_smoothing 0.06065096026687357 +19 22 model.embedding_dim 2.0 +19 22 training.batch_size 1.0 +19 22 training.label_smoothing 0.014698643882106496 +19 23 model.embedding_dim 0.0 +19 23 training.batch_size 0.0 +19 23 training.label_smoothing 0.4794507223169861 +19 24 model.embedding_dim 2.0 +19 24 training.batch_size 0.0 +19 24 training.label_smoothing 0.001979704452885614 +19 25 model.embedding_dim 1.0 +19 25 training.batch_size 0.0 +19 25 training.label_smoothing 0.4930100499408585 +19 26 model.embedding_dim 1.0 +19 26 training.batch_size 0.0 +19 26 training.label_smoothing 0.010236631826492197 +19 27 model.embedding_dim 1.0 +19 27 training.batch_size 1.0 +19 27 training.label_smoothing 0.003957943814371192 +19 28 model.embedding_dim 2.0 +19 28 training.batch_size 2.0 +19 28 training.label_smoothing 0.1063553828750881 +19 29 model.embedding_dim 1.0 +19 29 training.batch_size 0.0 +19 29 training.label_smoothing 0.07002392953290751 +19 30 model.embedding_dim 1.0 +19 30 training.batch_size 0.0 +19 30 training.label_smoothing 0.11284946829707156 +19 31 model.embedding_dim 0.0 +19 31 training.batch_size 0.0 +19 31 training.label_smoothing 0.02824754557825734 +19 32 model.embedding_dim 1.0 +19 32 training.batch_size 0.0 +19 32 training.label_smoothing 0.6754590007823223 +19 33 model.embedding_dim 0.0 +19 33 training.batch_size 2.0 +19 33 training.label_smoothing 0.0027188856933869424 +19 34 model.embedding_dim 1.0 +19 34 training.batch_size 1.0 +19 34 training.label_smoothing 0.520482131738931 +19 35 model.embedding_dim 1.0 +19 35 training.batch_size 0.0 +19 35 training.label_smoothing 0.0167096551194545 +19 36 model.embedding_dim 1.0 +19 36 training.batch_size 1.0 +19 36 training.label_smoothing 0.9392298107797505 +19 37 model.embedding_dim 0.0 +19 37 training.batch_size 1.0 +19 37 training.label_smoothing 0.002264925716884709 +19 38 model.embedding_dim 2.0 +19 38 training.batch_size 2.0 +19 38 training.label_smoothing 0.008045245909757668 +19 39 model.embedding_dim 1.0 +19 39 training.batch_size 0.0 +19 39 training.label_smoothing 0.14638112051136853 +19 40 model.embedding_dim 1.0 +19 40 training.batch_size 2.0 +19 40 training.label_smoothing 0.24609416213431085 +19 41 model.embedding_dim 0.0 +19 41 training.batch_size 0.0 +19 41 training.label_smoothing 0.016323318297030417 +19 42 model.embedding_dim 1.0 +19 42 training.batch_size 1.0 +19 42 training.label_smoothing 0.09724678037164032 +19 43 model.embedding_dim 2.0 +19 43 training.batch_size 0.0 +19 43 training.label_smoothing 0.019082518143743843 +19 44 model.embedding_dim 1.0 +19 44 training.batch_size 1.0 +19 44 training.label_smoothing 0.0012588403048274237 +19 45 model.embedding_dim 2.0 +19 45 training.batch_size 2.0 +19 45 training.label_smoothing 0.0030324335600935568 +19 46 model.embedding_dim 0.0 +19 46 training.batch_size 0.0 +19 46 training.label_smoothing 0.04169881600925373 +19 47 model.embedding_dim 0.0 +19 47 training.batch_size 0.0 +19 47 training.label_smoothing 0.1546849683458697 +19 48 model.embedding_dim 0.0 +19 48 training.batch_size 2.0 +19 48 training.label_smoothing 0.05900830795393409 +19 49 model.embedding_dim 1.0 +19 49 training.batch_size 2.0 +19 49 training.label_smoothing 0.00813591834310803 +19 50 model.embedding_dim 2.0 +19 50 training.batch_size 0.0 +19 50 training.label_smoothing 0.018744546568540642 +19 51 model.embedding_dim 0.0 +19 51 training.batch_size 2.0 +19 51 training.label_smoothing 0.10439152480876542 +19 52 model.embedding_dim 0.0 +19 52 training.batch_size 1.0 +19 52 training.label_smoothing 0.8387719805760743 +19 53 model.embedding_dim 2.0 +19 53 training.batch_size 2.0 +19 53 training.label_smoothing 0.3458319609959987 +19 54 model.embedding_dim 0.0 +19 54 training.batch_size 1.0 +19 54 training.label_smoothing 0.051464350623451935 +19 55 model.embedding_dim 0.0 +19 55 training.batch_size 0.0 +19 55 training.label_smoothing 0.01662589177412917 +19 56 model.embedding_dim 2.0 +19 56 training.batch_size 0.0 +19 56 training.label_smoothing 0.4308743082866194 +19 57 model.embedding_dim 2.0 +19 57 training.batch_size 2.0 +19 57 training.label_smoothing 0.004561356026794071 +19 58 model.embedding_dim 0.0 +19 58 training.batch_size 0.0 +19 58 training.label_smoothing 0.14065519118249253 +19 59 model.embedding_dim 0.0 +19 59 training.batch_size 1.0 +19 59 training.label_smoothing 0.004677935272503253 +19 60 model.embedding_dim 1.0 +19 60 training.batch_size 2.0 +19 60 training.label_smoothing 0.18042392134072213 +19 61 model.embedding_dim 2.0 +19 61 training.batch_size 2.0 +19 61 training.label_smoothing 0.0037804905206476275 +19 62 model.embedding_dim 0.0 +19 62 training.batch_size 1.0 +19 62 training.label_smoothing 0.0016822306711618514 +19 63 model.embedding_dim 1.0 +19 63 training.batch_size 0.0 +19 63 training.label_smoothing 0.012466215125794057 +19 64 model.embedding_dim 0.0 +19 64 training.batch_size 0.0 +19 64 training.label_smoothing 0.3866055731449079 +19 65 model.embedding_dim 2.0 +19 65 training.batch_size 0.0 +19 65 training.label_smoothing 0.14887045474606808 +19 66 model.embedding_dim 2.0 +19 66 training.batch_size 2.0 +19 66 training.label_smoothing 0.3797542734756706 +19 67 model.embedding_dim 2.0 +19 67 training.batch_size 0.0 +19 67 training.label_smoothing 0.016630139733921176 +19 68 model.embedding_dim 1.0 +19 68 training.batch_size 0.0 +19 68 training.label_smoothing 0.039801720195142117 +19 69 model.embedding_dim 1.0 +19 69 training.batch_size 0.0 +19 69 training.label_smoothing 0.028787171210355445 +19 70 model.embedding_dim 1.0 +19 70 training.batch_size 2.0 +19 70 training.label_smoothing 0.002043080718993008 +19 71 model.embedding_dim 0.0 +19 71 training.batch_size 0.0 +19 71 training.label_smoothing 0.006446398597337767 +19 72 model.embedding_dim 2.0 +19 72 training.batch_size 1.0 +19 72 training.label_smoothing 0.022663069482226798 +19 73 model.embedding_dim 1.0 +19 73 training.batch_size 0.0 +19 73 training.label_smoothing 0.01121038437821013 +19 74 model.embedding_dim 2.0 +19 74 training.batch_size 2.0 +19 74 training.label_smoothing 0.1845828116772821 +19 75 model.embedding_dim 0.0 +19 75 training.batch_size 1.0 +19 75 training.label_smoothing 0.7828852064123051 +19 76 model.embedding_dim 2.0 +19 76 training.batch_size 0.0 +19 76 training.label_smoothing 0.21226791200085332 +19 77 model.embedding_dim 1.0 +19 77 training.batch_size 1.0 +19 77 training.label_smoothing 0.0010809504519744748 +19 78 model.embedding_dim 1.0 +19 78 training.batch_size 2.0 +19 78 training.label_smoothing 0.003206980828404463 +19 79 model.embedding_dim 1.0 +19 79 training.batch_size 0.0 +19 79 training.label_smoothing 0.03654715882576424 +19 80 model.embedding_dim 1.0 +19 80 training.batch_size 2.0 +19 80 training.label_smoothing 0.23222732612813615 +19 81 model.embedding_dim 2.0 +19 81 training.batch_size 2.0 +19 81 training.label_smoothing 0.8491240254869409 +19 82 model.embedding_dim 0.0 +19 82 training.batch_size 2.0 +19 82 training.label_smoothing 0.015705966451948584 +19 83 model.embedding_dim 0.0 +19 83 training.batch_size 1.0 +19 83 training.label_smoothing 0.570129818690042 +19 84 model.embedding_dim 1.0 +19 84 training.batch_size 0.0 +19 84 training.label_smoothing 0.0027182499236051835 +19 85 model.embedding_dim 2.0 +19 85 training.batch_size 0.0 +19 85 training.label_smoothing 0.01214934967560814 +19 86 model.embedding_dim 0.0 +19 86 training.batch_size 1.0 +19 86 training.label_smoothing 0.19688197112664038 +19 87 model.embedding_dim 1.0 +19 87 training.batch_size 2.0 +19 87 training.label_smoothing 0.00515368528876566 +19 88 model.embedding_dim 2.0 +19 88 training.batch_size 0.0 +19 88 training.label_smoothing 0.06142813867398245 +19 89 model.embedding_dim 1.0 +19 89 training.batch_size 2.0 +19 89 training.label_smoothing 0.4341079435284791 +19 90 model.embedding_dim 0.0 +19 90 training.batch_size 1.0 +19 90 training.label_smoothing 0.002171073686803466 +19 91 model.embedding_dim 2.0 +19 91 training.batch_size 2.0 +19 91 training.label_smoothing 0.004071927576886857 +19 92 model.embedding_dim 1.0 +19 92 training.batch_size 2.0 +19 92 training.label_smoothing 0.24493287585571225 +19 93 model.embedding_dim 1.0 +19 93 training.batch_size 1.0 +19 93 training.label_smoothing 0.08025133747148379 +19 94 model.embedding_dim 0.0 +19 94 training.batch_size 2.0 +19 94 training.label_smoothing 0.7947753603593161 +19 95 model.embedding_dim 0.0 +19 95 training.batch_size 1.0 +19 95 training.label_smoothing 0.3149146577845543 +19 96 model.embedding_dim 1.0 +19 96 training.batch_size 1.0 +19 96 training.label_smoothing 0.3081163636247516 +19 97 model.embedding_dim 1.0 +19 97 training.batch_size 1.0 +19 97 training.label_smoothing 0.029364347627808117 +19 98 model.embedding_dim 0.0 +19 98 training.batch_size 2.0 +19 98 training.label_smoothing 0.2969068697813916 +19 99 model.embedding_dim 1.0 +19 99 training.batch_size 0.0 +19 99 training.label_smoothing 0.32467504115415863 +19 100 model.embedding_dim 2.0 +19 100 training.batch_size 1.0 +19 100 training.label_smoothing 0.033891104579478595 +19 1 dataset """kinships""" +19 1 model """complex""" +19 1 loss """softplus""" +19 1 regularizer """no""" +19 1 optimizer """adadelta""" +19 1 training_loop """lcwa""" +19 1 evaluator """rankbased""" +19 2 dataset """kinships""" +19 2 model """complex""" +19 2 loss """softplus""" +19 2 regularizer """no""" +19 2 optimizer """adadelta""" +19 2 training_loop """lcwa""" +19 2 evaluator """rankbased""" +19 3 dataset """kinships""" +19 3 model """complex""" +19 3 loss """softplus""" +19 3 regularizer """no""" +19 3 optimizer """adadelta""" +19 3 training_loop """lcwa""" +19 3 evaluator """rankbased""" +19 4 dataset """kinships""" +19 4 model """complex""" +19 4 loss """softplus""" +19 4 regularizer """no""" +19 4 optimizer """adadelta""" +19 4 training_loop """lcwa""" +19 4 evaluator """rankbased""" +19 5 dataset """kinships""" +19 5 model """complex""" +19 5 loss """softplus""" +19 5 regularizer """no""" +19 5 optimizer """adadelta""" +19 5 training_loop """lcwa""" +19 5 evaluator """rankbased""" +19 6 dataset """kinships""" +19 6 model """complex""" +19 6 loss """softplus""" +19 6 regularizer """no""" +19 6 optimizer """adadelta""" +19 6 training_loop """lcwa""" +19 6 evaluator """rankbased""" +19 7 dataset """kinships""" +19 7 model """complex""" +19 7 loss """softplus""" +19 7 regularizer """no""" +19 7 optimizer """adadelta""" +19 7 training_loop """lcwa""" +19 7 evaluator """rankbased""" +19 8 dataset """kinships""" +19 8 model """complex""" +19 8 loss """softplus""" +19 8 regularizer """no""" +19 8 optimizer """adadelta""" +19 8 training_loop """lcwa""" +19 8 evaluator """rankbased""" +19 9 dataset """kinships""" +19 9 model """complex""" +19 9 loss """softplus""" +19 9 regularizer """no""" +19 9 optimizer """adadelta""" +19 9 training_loop """lcwa""" +19 9 evaluator """rankbased""" +19 10 dataset """kinships""" +19 10 model """complex""" +19 10 loss """softplus""" +19 10 regularizer """no""" +19 10 optimizer """adadelta""" +19 10 training_loop """lcwa""" +19 10 evaluator """rankbased""" +19 11 dataset """kinships""" +19 11 model """complex""" +19 11 loss """softplus""" +19 11 regularizer """no""" +19 11 optimizer """adadelta""" +19 11 training_loop """lcwa""" +19 11 evaluator """rankbased""" +19 12 dataset """kinships""" +19 12 model """complex""" +19 12 loss """softplus""" +19 12 regularizer """no""" +19 12 optimizer """adadelta""" +19 12 training_loop """lcwa""" +19 12 evaluator """rankbased""" +19 13 dataset """kinships""" +19 13 model """complex""" +19 13 loss """softplus""" +19 13 regularizer """no""" +19 13 optimizer """adadelta""" +19 13 training_loop """lcwa""" +19 13 evaluator """rankbased""" +19 14 dataset """kinships""" +19 14 model """complex""" +19 14 loss """softplus""" +19 14 regularizer """no""" +19 14 optimizer """adadelta""" +19 14 training_loop """lcwa""" +19 14 evaluator """rankbased""" +19 15 dataset """kinships""" +19 15 model """complex""" +19 15 loss """softplus""" +19 15 regularizer """no""" +19 15 optimizer """adadelta""" +19 15 training_loop """lcwa""" +19 15 evaluator """rankbased""" +19 16 dataset """kinships""" +19 16 model """complex""" +19 16 loss """softplus""" +19 16 regularizer """no""" +19 16 optimizer """adadelta""" +19 16 training_loop """lcwa""" +19 16 evaluator """rankbased""" +19 17 dataset """kinships""" +19 17 model """complex""" +19 17 loss """softplus""" +19 17 regularizer """no""" +19 17 optimizer """adadelta""" +19 17 training_loop """lcwa""" +19 17 evaluator """rankbased""" +19 18 dataset """kinships""" +19 18 model """complex""" +19 18 loss """softplus""" +19 18 regularizer """no""" +19 18 optimizer """adadelta""" +19 18 training_loop """lcwa""" +19 18 evaluator """rankbased""" +19 19 dataset """kinships""" +19 19 model """complex""" +19 19 loss """softplus""" +19 19 regularizer """no""" +19 19 optimizer """adadelta""" +19 19 training_loop """lcwa""" +19 19 evaluator """rankbased""" +19 20 dataset """kinships""" +19 20 model """complex""" +19 20 loss """softplus""" +19 20 regularizer """no""" +19 20 optimizer """adadelta""" +19 20 training_loop """lcwa""" +19 20 evaluator """rankbased""" +19 21 dataset """kinships""" +19 21 model """complex""" +19 21 loss """softplus""" +19 21 regularizer """no""" +19 21 optimizer """adadelta""" +19 21 training_loop """lcwa""" +19 21 evaluator """rankbased""" +19 22 dataset """kinships""" +19 22 model """complex""" +19 22 loss """softplus""" +19 22 regularizer """no""" +19 22 optimizer """adadelta""" +19 22 training_loop """lcwa""" +19 22 evaluator """rankbased""" +19 23 dataset """kinships""" +19 23 model """complex""" +19 23 loss """softplus""" +19 23 regularizer """no""" +19 23 optimizer """adadelta""" +19 23 training_loop """lcwa""" +19 23 evaluator """rankbased""" +19 24 dataset """kinships""" +19 24 model """complex""" +19 24 loss """softplus""" +19 24 regularizer """no""" +19 24 optimizer """adadelta""" +19 24 training_loop """lcwa""" +19 24 evaluator """rankbased""" +19 25 dataset """kinships""" +19 25 model """complex""" +19 25 loss """softplus""" +19 25 regularizer """no""" +19 25 optimizer """adadelta""" +19 25 training_loop """lcwa""" +19 25 evaluator """rankbased""" +19 26 dataset """kinships""" +19 26 model """complex""" +19 26 loss """softplus""" +19 26 regularizer """no""" +19 26 optimizer """adadelta""" +19 26 training_loop """lcwa""" +19 26 evaluator """rankbased""" +19 27 dataset """kinships""" +19 27 model """complex""" +19 27 loss """softplus""" +19 27 regularizer """no""" +19 27 optimizer """adadelta""" +19 27 training_loop """lcwa""" +19 27 evaluator """rankbased""" +19 28 dataset """kinships""" +19 28 model """complex""" +19 28 loss """softplus""" +19 28 regularizer """no""" +19 28 optimizer """adadelta""" +19 28 training_loop """lcwa""" +19 28 evaluator """rankbased""" +19 29 dataset """kinships""" +19 29 model """complex""" +19 29 loss """softplus""" +19 29 regularizer """no""" +19 29 optimizer """adadelta""" +19 29 training_loop """lcwa""" +19 29 evaluator """rankbased""" +19 30 dataset """kinships""" +19 30 model """complex""" +19 30 loss """softplus""" +19 30 regularizer """no""" +19 30 optimizer """adadelta""" +19 30 training_loop """lcwa""" +19 30 evaluator """rankbased""" +19 31 dataset """kinships""" +19 31 model """complex""" +19 31 loss """softplus""" +19 31 regularizer """no""" +19 31 optimizer """adadelta""" +19 31 training_loop """lcwa""" +19 31 evaluator """rankbased""" +19 32 dataset """kinships""" +19 32 model """complex""" +19 32 loss """softplus""" +19 32 regularizer """no""" +19 32 optimizer """adadelta""" +19 32 training_loop """lcwa""" +19 32 evaluator """rankbased""" +19 33 dataset """kinships""" +19 33 model """complex""" +19 33 loss """softplus""" +19 33 regularizer """no""" +19 33 optimizer """adadelta""" +19 33 training_loop """lcwa""" +19 33 evaluator """rankbased""" +19 34 dataset """kinships""" +19 34 model """complex""" +19 34 loss """softplus""" +19 34 regularizer """no""" +19 34 optimizer """adadelta""" +19 34 training_loop """lcwa""" +19 34 evaluator """rankbased""" +19 35 dataset """kinships""" +19 35 model """complex""" +19 35 loss """softplus""" +19 35 regularizer """no""" +19 35 optimizer """adadelta""" +19 35 training_loop """lcwa""" +19 35 evaluator """rankbased""" +19 36 dataset """kinships""" +19 36 model """complex""" +19 36 loss """softplus""" +19 36 regularizer """no""" +19 36 optimizer """adadelta""" +19 36 training_loop """lcwa""" +19 36 evaluator """rankbased""" +19 37 dataset """kinships""" +19 37 model """complex""" +19 37 loss """softplus""" +19 37 regularizer """no""" +19 37 optimizer """adadelta""" +19 37 training_loop """lcwa""" +19 37 evaluator """rankbased""" +19 38 dataset """kinships""" +19 38 model """complex""" +19 38 loss """softplus""" +19 38 regularizer """no""" +19 38 optimizer """adadelta""" +19 38 training_loop """lcwa""" +19 38 evaluator """rankbased""" +19 39 dataset """kinships""" +19 39 model """complex""" +19 39 loss """softplus""" +19 39 regularizer """no""" +19 39 optimizer """adadelta""" +19 39 training_loop """lcwa""" +19 39 evaluator """rankbased""" +19 40 dataset """kinships""" +19 40 model """complex""" +19 40 loss """softplus""" +19 40 regularizer """no""" +19 40 optimizer """adadelta""" +19 40 training_loop """lcwa""" +19 40 evaluator """rankbased""" +19 41 dataset """kinships""" +19 41 model """complex""" +19 41 loss """softplus""" +19 41 regularizer """no""" +19 41 optimizer """adadelta""" +19 41 training_loop """lcwa""" +19 41 evaluator """rankbased""" +19 42 dataset """kinships""" +19 42 model """complex""" +19 42 loss """softplus""" +19 42 regularizer """no""" +19 42 optimizer """adadelta""" +19 42 training_loop """lcwa""" +19 42 evaluator """rankbased""" +19 43 dataset """kinships""" +19 43 model """complex""" +19 43 loss """softplus""" +19 43 regularizer """no""" +19 43 optimizer """adadelta""" +19 43 training_loop """lcwa""" +19 43 evaluator """rankbased""" +19 44 dataset """kinships""" +19 44 model """complex""" +19 44 loss """softplus""" +19 44 regularizer """no""" +19 44 optimizer """adadelta""" +19 44 training_loop """lcwa""" +19 44 evaluator """rankbased""" +19 45 dataset """kinships""" +19 45 model """complex""" +19 45 loss """softplus""" +19 45 regularizer """no""" +19 45 optimizer """adadelta""" +19 45 training_loop """lcwa""" +19 45 evaluator """rankbased""" +19 46 dataset """kinships""" +19 46 model """complex""" +19 46 loss """softplus""" +19 46 regularizer """no""" +19 46 optimizer """adadelta""" +19 46 training_loop """lcwa""" +19 46 evaluator """rankbased""" +19 47 dataset """kinships""" +19 47 model """complex""" +19 47 loss """softplus""" +19 47 regularizer """no""" +19 47 optimizer """adadelta""" +19 47 training_loop """lcwa""" +19 47 evaluator """rankbased""" +19 48 dataset """kinships""" +19 48 model """complex""" +19 48 loss """softplus""" +19 48 regularizer """no""" +19 48 optimizer """adadelta""" +19 48 training_loop """lcwa""" +19 48 evaluator """rankbased""" +19 49 dataset """kinships""" +19 49 model """complex""" +19 49 loss """softplus""" +19 49 regularizer """no""" +19 49 optimizer """adadelta""" +19 49 training_loop """lcwa""" +19 49 evaluator """rankbased""" +19 50 dataset """kinships""" +19 50 model """complex""" +19 50 loss """softplus""" +19 50 regularizer """no""" +19 50 optimizer """adadelta""" +19 50 training_loop """lcwa""" +19 50 evaluator """rankbased""" +19 51 dataset """kinships""" +19 51 model """complex""" +19 51 loss """softplus""" +19 51 regularizer """no""" +19 51 optimizer """adadelta""" +19 51 training_loop """lcwa""" +19 51 evaluator """rankbased""" +19 52 dataset """kinships""" +19 52 model """complex""" +19 52 loss """softplus""" +19 52 regularizer """no""" +19 52 optimizer """adadelta""" +19 52 training_loop """lcwa""" +19 52 evaluator """rankbased""" +19 53 dataset """kinships""" +19 53 model """complex""" +19 53 loss """softplus""" +19 53 regularizer """no""" +19 53 optimizer """adadelta""" +19 53 training_loop """lcwa""" +19 53 evaluator """rankbased""" +19 54 dataset """kinships""" +19 54 model """complex""" +19 54 loss """softplus""" +19 54 regularizer """no""" +19 54 optimizer """adadelta""" +19 54 training_loop """lcwa""" +19 54 evaluator """rankbased""" +19 55 dataset """kinships""" +19 55 model """complex""" +19 55 loss """softplus""" +19 55 regularizer """no""" +19 55 optimizer """adadelta""" +19 55 training_loop """lcwa""" +19 55 evaluator """rankbased""" +19 56 dataset """kinships""" +19 56 model """complex""" +19 56 loss """softplus""" +19 56 regularizer """no""" +19 56 optimizer """adadelta""" +19 56 training_loop """lcwa""" +19 56 evaluator """rankbased""" +19 57 dataset """kinships""" +19 57 model """complex""" +19 57 loss """softplus""" +19 57 regularizer """no""" +19 57 optimizer """adadelta""" +19 57 training_loop """lcwa""" +19 57 evaluator """rankbased""" +19 58 dataset """kinships""" +19 58 model """complex""" +19 58 loss """softplus""" +19 58 regularizer """no""" +19 58 optimizer """adadelta""" +19 58 training_loop """lcwa""" +19 58 evaluator """rankbased""" +19 59 dataset """kinships""" +19 59 model """complex""" +19 59 loss """softplus""" +19 59 regularizer """no""" +19 59 optimizer """adadelta""" +19 59 training_loop """lcwa""" +19 59 evaluator """rankbased""" +19 60 dataset """kinships""" +19 60 model """complex""" +19 60 loss """softplus""" +19 60 regularizer """no""" +19 60 optimizer """adadelta""" +19 60 training_loop """lcwa""" +19 60 evaluator """rankbased""" +19 61 dataset """kinships""" +19 61 model """complex""" +19 61 loss """softplus""" +19 61 regularizer """no""" +19 61 optimizer """adadelta""" +19 61 training_loop """lcwa""" +19 61 evaluator """rankbased""" +19 62 dataset """kinships""" +19 62 model """complex""" +19 62 loss """softplus""" +19 62 regularizer """no""" +19 62 optimizer """adadelta""" +19 62 training_loop """lcwa""" +19 62 evaluator """rankbased""" +19 63 dataset """kinships""" +19 63 model """complex""" +19 63 loss """softplus""" +19 63 regularizer """no""" +19 63 optimizer """adadelta""" +19 63 training_loop """lcwa""" +19 63 evaluator """rankbased""" +19 64 dataset """kinships""" +19 64 model """complex""" +19 64 loss """softplus""" +19 64 regularizer """no""" +19 64 optimizer """adadelta""" +19 64 training_loop """lcwa""" +19 64 evaluator """rankbased""" +19 65 dataset """kinships""" +19 65 model """complex""" +19 65 loss """softplus""" +19 65 regularizer """no""" +19 65 optimizer """adadelta""" +19 65 training_loop """lcwa""" +19 65 evaluator """rankbased""" +19 66 dataset """kinships""" +19 66 model """complex""" +19 66 loss """softplus""" +19 66 regularizer """no""" +19 66 optimizer """adadelta""" +19 66 training_loop """lcwa""" +19 66 evaluator """rankbased""" +19 67 dataset """kinships""" +19 67 model """complex""" +19 67 loss """softplus""" +19 67 regularizer """no""" +19 67 optimizer """adadelta""" +19 67 training_loop """lcwa""" +19 67 evaluator """rankbased""" +19 68 dataset """kinships""" +19 68 model """complex""" +19 68 loss """softplus""" +19 68 regularizer """no""" +19 68 optimizer """adadelta""" +19 68 training_loop """lcwa""" +19 68 evaluator """rankbased""" +19 69 dataset """kinships""" +19 69 model """complex""" +19 69 loss """softplus""" +19 69 regularizer """no""" +19 69 optimizer """adadelta""" +19 69 training_loop """lcwa""" +19 69 evaluator """rankbased""" +19 70 dataset """kinships""" +19 70 model """complex""" +19 70 loss """softplus""" +19 70 regularizer """no""" +19 70 optimizer """adadelta""" +19 70 training_loop """lcwa""" +19 70 evaluator """rankbased""" +19 71 dataset """kinships""" +19 71 model """complex""" +19 71 loss """softplus""" +19 71 regularizer """no""" +19 71 optimizer """adadelta""" +19 71 training_loop """lcwa""" +19 71 evaluator """rankbased""" +19 72 dataset """kinships""" +19 72 model """complex""" +19 72 loss """softplus""" +19 72 regularizer """no""" +19 72 optimizer """adadelta""" +19 72 training_loop """lcwa""" +19 72 evaluator """rankbased""" +19 73 dataset """kinships""" +19 73 model """complex""" +19 73 loss """softplus""" +19 73 regularizer """no""" +19 73 optimizer """adadelta""" +19 73 training_loop """lcwa""" +19 73 evaluator """rankbased""" +19 74 dataset """kinships""" +19 74 model """complex""" +19 74 loss """softplus""" +19 74 regularizer """no""" +19 74 optimizer """adadelta""" +19 74 training_loop """lcwa""" +19 74 evaluator """rankbased""" +19 75 dataset """kinships""" +19 75 model """complex""" +19 75 loss """softplus""" +19 75 regularizer """no""" +19 75 optimizer """adadelta""" +19 75 training_loop """lcwa""" +19 75 evaluator """rankbased""" +19 76 dataset """kinships""" +19 76 model """complex""" +19 76 loss """softplus""" +19 76 regularizer """no""" +19 76 optimizer """adadelta""" +19 76 training_loop """lcwa""" +19 76 evaluator """rankbased""" +19 77 dataset """kinships""" +19 77 model """complex""" +19 77 loss """softplus""" +19 77 regularizer """no""" +19 77 optimizer """adadelta""" +19 77 training_loop """lcwa""" +19 77 evaluator """rankbased""" +19 78 dataset """kinships""" +19 78 model """complex""" +19 78 loss """softplus""" +19 78 regularizer """no""" +19 78 optimizer """adadelta""" +19 78 training_loop """lcwa""" +19 78 evaluator """rankbased""" +19 79 dataset """kinships""" +19 79 model """complex""" +19 79 loss """softplus""" +19 79 regularizer """no""" +19 79 optimizer """adadelta""" +19 79 training_loop """lcwa""" +19 79 evaluator """rankbased""" +19 80 dataset """kinships""" +19 80 model """complex""" +19 80 loss """softplus""" +19 80 regularizer """no""" +19 80 optimizer """adadelta""" +19 80 training_loop """lcwa""" +19 80 evaluator """rankbased""" +19 81 dataset """kinships""" +19 81 model """complex""" +19 81 loss """softplus""" +19 81 regularizer """no""" +19 81 optimizer """adadelta""" +19 81 training_loop """lcwa""" +19 81 evaluator """rankbased""" +19 82 dataset """kinships""" +19 82 model """complex""" +19 82 loss """softplus""" +19 82 regularizer """no""" +19 82 optimizer """adadelta""" +19 82 training_loop """lcwa""" +19 82 evaluator """rankbased""" +19 83 dataset """kinships""" +19 83 model """complex""" +19 83 loss """softplus""" +19 83 regularizer """no""" +19 83 optimizer """adadelta""" +19 83 training_loop """lcwa""" +19 83 evaluator """rankbased""" +19 84 dataset """kinships""" +19 84 model """complex""" +19 84 loss """softplus""" +19 84 regularizer """no""" +19 84 optimizer """adadelta""" +19 84 training_loop """lcwa""" +19 84 evaluator """rankbased""" +19 85 dataset """kinships""" +19 85 model """complex""" +19 85 loss """softplus""" +19 85 regularizer """no""" +19 85 optimizer """adadelta""" +19 85 training_loop """lcwa""" +19 85 evaluator """rankbased""" +19 86 dataset """kinships""" +19 86 model """complex""" +19 86 loss """softplus""" +19 86 regularizer """no""" +19 86 optimizer """adadelta""" +19 86 training_loop """lcwa""" +19 86 evaluator """rankbased""" +19 87 dataset """kinships""" +19 87 model """complex""" +19 87 loss """softplus""" +19 87 regularizer """no""" +19 87 optimizer """adadelta""" +19 87 training_loop """lcwa""" +19 87 evaluator """rankbased""" +19 88 dataset """kinships""" +19 88 model """complex""" +19 88 loss """softplus""" +19 88 regularizer """no""" +19 88 optimizer """adadelta""" +19 88 training_loop """lcwa""" +19 88 evaluator """rankbased""" +19 89 dataset """kinships""" +19 89 model """complex""" +19 89 loss """softplus""" +19 89 regularizer """no""" +19 89 optimizer """adadelta""" +19 89 training_loop """lcwa""" +19 89 evaluator """rankbased""" +19 90 dataset """kinships""" +19 90 model """complex""" +19 90 loss """softplus""" +19 90 regularizer """no""" +19 90 optimizer """adadelta""" +19 90 training_loop """lcwa""" +19 90 evaluator """rankbased""" +19 91 dataset """kinships""" +19 91 model """complex""" +19 91 loss """softplus""" +19 91 regularizer """no""" +19 91 optimizer """adadelta""" +19 91 training_loop """lcwa""" +19 91 evaluator """rankbased""" +19 92 dataset """kinships""" +19 92 model """complex""" +19 92 loss """softplus""" +19 92 regularizer """no""" +19 92 optimizer """adadelta""" +19 92 training_loop """lcwa""" +19 92 evaluator """rankbased""" +19 93 dataset """kinships""" +19 93 model """complex""" +19 93 loss """softplus""" +19 93 regularizer """no""" +19 93 optimizer """adadelta""" +19 93 training_loop """lcwa""" +19 93 evaluator """rankbased""" +19 94 dataset """kinships""" +19 94 model """complex""" +19 94 loss """softplus""" +19 94 regularizer """no""" +19 94 optimizer """adadelta""" +19 94 training_loop """lcwa""" +19 94 evaluator """rankbased""" +19 95 dataset """kinships""" +19 95 model """complex""" +19 95 loss """softplus""" +19 95 regularizer """no""" +19 95 optimizer """adadelta""" +19 95 training_loop """lcwa""" +19 95 evaluator """rankbased""" +19 96 dataset """kinships""" +19 96 model """complex""" +19 96 loss """softplus""" +19 96 regularizer """no""" +19 96 optimizer """adadelta""" +19 96 training_loop """lcwa""" +19 96 evaluator """rankbased""" +19 97 dataset """kinships""" +19 97 model """complex""" +19 97 loss """softplus""" +19 97 regularizer """no""" +19 97 optimizer """adadelta""" +19 97 training_loop """lcwa""" +19 97 evaluator """rankbased""" +19 98 dataset """kinships""" +19 98 model """complex""" +19 98 loss """softplus""" +19 98 regularizer """no""" +19 98 optimizer """adadelta""" +19 98 training_loop """lcwa""" +19 98 evaluator """rankbased""" +19 99 dataset """kinships""" +19 99 model """complex""" +19 99 loss """softplus""" +19 99 regularizer """no""" +19 99 optimizer """adadelta""" +19 99 training_loop """lcwa""" +19 99 evaluator """rankbased""" +19 100 dataset """kinships""" +19 100 model """complex""" +19 100 loss """softplus""" +19 100 regularizer """no""" +19 100 optimizer """adadelta""" +19 100 training_loop """lcwa""" +19 100 evaluator """rankbased""" +20 1 model.embedding_dim 2.0 +20 1 negative_sampler.num_negs_per_pos 44.0 +20 1 training.batch_size 1.0 +20 2 model.embedding_dim 0.0 +20 2 negative_sampler.num_negs_per_pos 67.0 +20 2 training.batch_size 2.0 +20 3 model.embedding_dim 0.0 +20 3 negative_sampler.num_negs_per_pos 12.0 +20 3 training.batch_size 2.0 +20 4 model.embedding_dim 0.0 +20 4 negative_sampler.num_negs_per_pos 3.0 +20 4 training.batch_size 2.0 +20 5 model.embedding_dim 1.0 +20 5 negative_sampler.num_negs_per_pos 3.0 +20 5 training.batch_size 0.0 +20 6 model.embedding_dim 1.0 +20 6 negative_sampler.num_negs_per_pos 67.0 +20 6 training.batch_size 2.0 +20 7 model.embedding_dim 2.0 +20 7 negative_sampler.num_negs_per_pos 18.0 +20 7 training.batch_size 2.0 +20 8 model.embedding_dim 0.0 +20 8 negative_sampler.num_negs_per_pos 29.0 +20 8 training.batch_size 1.0 +20 9 model.embedding_dim 0.0 +20 9 negative_sampler.num_negs_per_pos 1.0 +20 9 training.batch_size 1.0 +20 10 model.embedding_dim 0.0 +20 10 negative_sampler.num_negs_per_pos 73.0 +20 10 training.batch_size 2.0 +20 11 model.embedding_dim 2.0 +20 11 negative_sampler.num_negs_per_pos 66.0 +20 11 training.batch_size 2.0 +20 12 model.embedding_dim 2.0 +20 12 negative_sampler.num_negs_per_pos 81.0 +20 12 training.batch_size 0.0 +20 13 model.embedding_dim 1.0 +20 13 negative_sampler.num_negs_per_pos 57.0 +20 13 training.batch_size 2.0 +20 14 model.embedding_dim 1.0 +20 14 negative_sampler.num_negs_per_pos 1.0 +20 14 training.batch_size 0.0 +20 15 model.embedding_dim 0.0 +20 15 negative_sampler.num_negs_per_pos 26.0 +20 15 training.batch_size 0.0 +20 16 model.embedding_dim 0.0 +20 16 negative_sampler.num_negs_per_pos 96.0 +20 16 training.batch_size 2.0 +20 17 model.embedding_dim 0.0 +20 17 negative_sampler.num_negs_per_pos 12.0 +20 17 training.batch_size 0.0 +20 18 model.embedding_dim 1.0 +20 18 negative_sampler.num_negs_per_pos 91.0 +20 18 training.batch_size 2.0 +20 19 model.embedding_dim 1.0 +20 19 negative_sampler.num_negs_per_pos 22.0 +20 19 training.batch_size 0.0 +20 20 model.embedding_dim 2.0 +20 20 negative_sampler.num_negs_per_pos 92.0 +20 20 training.batch_size 1.0 +20 21 model.embedding_dim 1.0 +20 21 negative_sampler.num_negs_per_pos 1.0 +20 21 training.batch_size 0.0 +20 22 model.embedding_dim 2.0 +20 22 negative_sampler.num_negs_per_pos 30.0 +20 22 training.batch_size 2.0 +20 23 model.embedding_dim 2.0 +20 23 negative_sampler.num_negs_per_pos 4.0 +20 23 training.batch_size 2.0 +20 24 model.embedding_dim 0.0 +20 24 negative_sampler.num_negs_per_pos 44.0 +20 24 training.batch_size 1.0 +20 25 model.embedding_dim 1.0 +20 25 negative_sampler.num_negs_per_pos 13.0 +20 25 training.batch_size 1.0 +20 26 model.embedding_dim 0.0 +20 26 negative_sampler.num_negs_per_pos 63.0 +20 26 training.batch_size 2.0 +20 27 model.embedding_dim 0.0 +20 27 negative_sampler.num_negs_per_pos 16.0 +20 27 training.batch_size 1.0 +20 28 model.embedding_dim 1.0 +20 28 negative_sampler.num_negs_per_pos 50.0 +20 28 training.batch_size 0.0 +20 29 model.embedding_dim 0.0 +20 29 negative_sampler.num_negs_per_pos 24.0 +20 29 training.batch_size 1.0 +20 30 model.embedding_dim 1.0 +20 30 negative_sampler.num_negs_per_pos 47.0 +20 30 training.batch_size 2.0 +20 31 model.embedding_dim 2.0 +20 31 negative_sampler.num_negs_per_pos 57.0 +20 31 training.batch_size 2.0 +20 32 model.embedding_dim 1.0 +20 32 negative_sampler.num_negs_per_pos 13.0 +20 32 training.batch_size 1.0 +20 33 model.embedding_dim 1.0 +20 33 negative_sampler.num_negs_per_pos 83.0 +20 33 training.batch_size 1.0 +20 34 model.embedding_dim 1.0 +20 34 negative_sampler.num_negs_per_pos 44.0 +20 34 training.batch_size 1.0 +20 35 model.embedding_dim 0.0 +20 35 negative_sampler.num_negs_per_pos 22.0 +20 35 training.batch_size 1.0 +20 36 model.embedding_dim 2.0 +20 36 negative_sampler.num_negs_per_pos 95.0 +20 36 training.batch_size 1.0 +20 37 model.embedding_dim 2.0 +20 37 negative_sampler.num_negs_per_pos 3.0 +20 37 training.batch_size 0.0 +20 38 model.embedding_dim 0.0 +20 38 negative_sampler.num_negs_per_pos 36.0 +20 38 training.batch_size 0.0 +20 39 model.embedding_dim 1.0 +20 39 negative_sampler.num_negs_per_pos 0.0 +20 39 training.batch_size 2.0 +20 40 model.embedding_dim 1.0 +20 40 negative_sampler.num_negs_per_pos 73.0 +20 40 training.batch_size 1.0 +20 41 model.embedding_dim 2.0 +20 41 negative_sampler.num_negs_per_pos 95.0 +20 41 training.batch_size 1.0 +20 42 model.embedding_dim 2.0 +20 42 negative_sampler.num_negs_per_pos 11.0 +20 42 training.batch_size 0.0 +20 43 model.embedding_dim 2.0 +20 43 negative_sampler.num_negs_per_pos 79.0 +20 43 training.batch_size 1.0 +20 44 model.embedding_dim 0.0 +20 44 negative_sampler.num_negs_per_pos 8.0 +20 44 training.batch_size 2.0 +20 45 model.embedding_dim 2.0 +20 45 negative_sampler.num_negs_per_pos 65.0 +20 45 training.batch_size 1.0 +20 46 model.embedding_dim 0.0 +20 46 negative_sampler.num_negs_per_pos 91.0 +20 46 training.batch_size 0.0 +20 47 model.embedding_dim 1.0 +20 47 negative_sampler.num_negs_per_pos 18.0 +20 47 training.batch_size 2.0 +20 48 model.embedding_dim 0.0 +20 48 negative_sampler.num_negs_per_pos 20.0 +20 48 training.batch_size 2.0 +20 49 model.embedding_dim 2.0 +20 49 negative_sampler.num_negs_per_pos 86.0 +20 49 training.batch_size 2.0 +20 50 model.embedding_dim 2.0 +20 50 negative_sampler.num_negs_per_pos 83.0 +20 50 training.batch_size 1.0 +20 51 model.embedding_dim 0.0 +20 51 negative_sampler.num_negs_per_pos 89.0 +20 51 training.batch_size 1.0 +20 52 model.embedding_dim 0.0 +20 52 negative_sampler.num_negs_per_pos 21.0 +20 52 training.batch_size 1.0 +20 53 model.embedding_dim 1.0 +20 53 negative_sampler.num_negs_per_pos 4.0 +20 53 training.batch_size 1.0 +20 54 model.embedding_dim 1.0 +20 54 negative_sampler.num_negs_per_pos 40.0 +20 54 training.batch_size 1.0 +20 55 model.embedding_dim 0.0 +20 55 negative_sampler.num_negs_per_pos 4.0 +20 55 training.batch_size 1.0 +20 56 model.embedding_dim 0.0 +20 56 negative_sampler.num_negs_per_pos 22.0 +20 56 training.batch_size 1.0 +20 57 model.embedding_dim 0.0 +20 57 negative_sampler.num_negs_per_pos 14.0 +20 57 training.batch_size 0.0 +20 58 model.embedding_dim 1.0 +20 58 negative_sampler.num_negs_per_pos 67.0 +20 58 training.batch_size 2.0 +20 59 model.embedding_dim 0.0 +20 59 negative_sampler.num_negs_per_pos 61.0 +20 59 training.batch_size 1.0 +20 60 model.embedding_dim 2.0 +20 60 negative_sampler.num_negs_per_pos 45.0 +20 60 training.batch_size 0.0 +20 61 model.embedding_dim 2.0 +20 61 negative_sampler.num_negs_per_pos 66.0 +20 61 training.batch_size 0.0 +20 62 model.embedding_dim 0.0 +20 62 negative_sampler.num_negs_per_pos 25.0 +20 62 training.batch_size 1.0 +20 63 model.embedding_dim 2.0 +20 63 negative_sampler.num_negs_per_pos 63.0 +20 63 training.batch_size 2.0 +20 64 model.embedding_dim 1.0 +20 64 negative_sampler.num_negs_per_pos 32.0 +20 64 training.batch_size 1.0 +20 65 model.embedding_dim 0.0 +20 65 negative_sampler.num_negs_per_pos 6.0 +20 65 training.batch_size 1.0 +20 66 model.embedding_dim 0.0 +20 66 negative_sampler.num_negs_per_pos 51.0 +20 66 training.batch_size 1.0 +20 67 model.embedding_dim 1.0 +20 67 negative_sampler.num_negs_per_pos 34.0 +20 67 training.batch_size 0.0 +20 68 model.embedding_dim 1.0 +20 68 negative_sampler.num_negs_per_pos 28.0 +20 68 training.batch_size 1.0 +20 69 model.embedding_dim 2.0 +20 69 negative_sampler.num_negs_per_pos 85.0 +20 69 training.batch_size 1.0 +20 70 model.embedding_dim 1.0 +20 70 negative_sampler.num_negs_per_pos 53.0 +20 70 training.batch_size 0.0 +20 71 model.embedding_dim 1.0 +20 71 negative_sampler.num_negs_per_pos 9.0 +20 71 training.batch_size 2.0 +20 72 model.embedding_dim 0.0 +20 72 negative_sampler.num_negs_per_pos 35.0 +20 72 training.batch_size 1.0 +20 73 model.embedding_dim 1.0 +20 73 negative_sampler.num_negs_per_pos 36.0 +20 73 training.batch_size 1.0 +20 74 model.embedding_dim 0.0 +20 74 negative_sampler.num_negs_per_pos 68.0 +20 74 training.batch_size 2.0 +20 75 model.embedding_dim 0.0 +20 75 negative_sampler.num_negs_per_pos 26.0 +20 75 training.batch_size 0.0 +20 76 model.embedding_dim 2.0 +20 76 negative_sampler.num_negs_per_pos 38.0 +20 76 training.batch_size 0.0 +20 77 model.embedding_dim 1.0 +20 77 negative_sampler.num_negs_per_pos 8.0 +20 77 training.batch_size 0.0 +20 78 model.embedding_dim 0.0 +20 78 negative_sampler.num_negs_per_pos 70.0 +20 78 training.batch_size 0.0 +20 79 model.embedding_dim 2.0 +20 79 negative_sampler.num_negs_per_pos 27.0 +20 79 training.batch_size 2.0 +20 80 model.embedding_dim 2.0 +20 80 negative_sampler.num_negs_per_pos 63.0 +20 80 training.batch_size 2.0 +20 81 model.embedding_dim 0.0 +20 81 negative_sampler.num_negs_per_pos 34.0 +20 81 training.batch_size 0.0 +20 82 model.embedding_dim 1.0 +20 82 negative_sampler.num_negs_per_pos 25.0 +20 82 training.batch_size 1.0 +20 83 model.embedding_dim 1.0 +20 83 negative_sampler.num_negs_per_pos 20.0 +20 83 training.batch_size 0.0 +20 84 model.embedding_dim 2.0 +20 84 negative_sampler.num_negs_per_pos 56.0 +20 84 training.batch_size 2.0 +20 85 model.embedding_dim 2.0 +20 85 negative_sampler.num_negs_per_pos 70.0 +20 85 training.batch_size 1.0 +20 86 model.embedding_dim 0.0 +20 86 negative_sampler.num_negs_per_pos 76.0 +20 86 training.batch_size 0.0 +20 87 model.embedding_dim 2.0 +20 87 negative_sampler.num_negs_per_pos 81.0 +20 87 training.batch_size 2.0 +20 88 model.embedding_dim 1.0 +20 88 negative_sampler.num_negs_per_pos 22.0 +20 88 training.batch_size 0.0 +20 89 model.embedding_dim 1.0 +20 89 negative_sampler.num_negs_per_pos 88.0 +20 89 training.batch_size 0.0 +20 90 model.embedding_dim 2.0 +20 90 negative_sampler.num_negs_per_pos 4.0 +20 90 training.batch_size 1.0 +20 91 model.embedding_dim 2.0 +20 91 negative_sampler.num_negs_per_pos 68.0 +20 91 training.batch_size 2.0 +20 92 model.embedding_dim 0.0 +20 92 negative_sampler.num_negs_per_pos 36.0 +20 92 training.batch_size 2.0 +20 93 model.embedding_dim 2.0 +20 93 negative_sampler.num_negs_per_pos 36.0 +20 93 training.batch_size 0.0 +20 94 model.embedding_dim 1.0 +20 94 negative_sampler.num_negs_per_pos 81.0 +20 94 training.batch_size 1.0 +20 95 model.embedding_dim 2.0 +20 95 negative_sampler.num_negs_per_pos 47.0 +20 95 training.batch_size 1.0 +20 96 model.embedding_dim 2.0 +20 96 negative_sampler.num_negs_per_pos 4.0 +20 96 training.batch_size 1.0 +20 97 model.embedding_dim 1.0 +20 97 negative_sampler.num_negs_per_pos 12.0 +20 97 training.batch_size 2.0 +20 98 model.embedding_dim 1.0 +20 98 negative_sampler.num_negs_per_pos 78.0 +20 98 training.batch_size 1.0 +20 99 model.embedding_dim 0.0 +20 99 negative_sampler.num_negs_per_pos 2.0 +20 99 training.batch_size 2.0 +20 100 model.embedding_dim 2.0 +20 100 negative_sampler.num_negs_per_pos 98.0 +20 100 training.batch_size 1.0 +20 1 dataset """kinships""" +20 1 model """complex""" +20 1 loss """bceaftersigmoid""" +20 1 regularizer """no""" +20 1 optimizer """adadelta""" +20 1 training_loop """owa""" +20 1 negative_sampler """basic""" +20 1 evaluator """rankbased""" +20 2 dataset """kinships""" +20 2 model """complex""" +20 2 loss """bceaftersigmoid""" +20 2 regularizer """no""" +20 2 optimizer """adadelta""" +20 2 training_loop """owa""" +20 2 negative_sampler """basic""" +20 2 evaluator """rankbased""" +20 3 dataset """kinships""" +20 3 model """complex""" +20 3 loss """bceaftersigmoid""" +20 3 regularizer """no""" +20 3 optimizer """adadelta""" +20 3 training_loop """owa""" +20 3 negative_sampler """basic""" +20 3 evaluator """rankbased""" +20 4 dataset """kinships""" +20 4 model """complex""" +20 4 loss """bceaftersigmoid""" +20 4 regularizer """no""" +20 4 optimizer """adadelta""" +20 4 training_loop """owa""" +20 4 negative_sampler """basic""" +20 4 evaluator """rankbased""" +20 5 dataset """kinships""" +20 5 model """complex""" +20 5 loss """bceaftersigmoid""" +20 5 regularizer """no""" +20 5 optimizer """adadelta""" +20 5 training_loop """owa""" +20 5 negative_sampler """basic""" +20 5 evaluator """rankbased""" +20 6 dataset """kinships""" +20 6 model """complex""" +20 6 loss """bceaftersigmoid""" +20 6 regularizer """no""" +20 6 optimizer """adadelta""" +20 6 training_loop """owa""" +20 6 negative_sampler """basic""" +20 6 evaluator """rankbased""" +20 7 dataset """kinships""" +20 7 model """complex""" +20 7 loss """bceaftersigmoid""" +20 7 regularizer """no""" +20 7 optimizer """adadelta""" +20 7 training_loop """owa""" +20 7 negative_sampler """basic""" +20 7 evaluator """rankbased""" +20 8 dataset """kinships""" +20 8 model """complex""" +20 8 loss """bceaftersigmoid""" +20 8 regularizer """no""" +20 8 optimizer """adadelta""" +20 8 training_loop """owa""" +20 8 negative_sampler """basic""" +20 8 evaluator """rankbased""" +20 9 dataset """kinships""" +20 9 model """complex""" +20 9 loss """bceaftersigmoid""" +20 9 regularizer """no""" +20 9 optimizer """adadelta""" +20 9 training_loop """owa""" +20 9 negative_sampler """basic""" +20 9 evaluator """rankbased""" +20 10 dataset """kinships""" +20 10 model """complex""" +20 10 loss """bceaftersigmoid""" +20 10 regularizer """no""" +20 10 optimizer """adadelta""" +20 10 training_loop """owa""" +20 10 negative_sampler """basic""" +20 10 evaluator """rankbased""" +20 11 dataset """kinships""" +20 11 model """complex""" +20 11 loss """bceaftersigmoid""" +20 11 regularizer """no""" +20 11 optimizer """adadelta""" +20 11 training_loop """owa""" +20 11 negative_sampler """basic""" +20 11 evaluator """rankbased""" +20 12 dataset """kinships""" +20 12 model """complex""" +20 12 loss """bceaftersigmoid""" +20 12 regularizer """no""" +20 12 optimizer """adadelta""" +20 12 training_loop """owa""" +20 12 negative_sampler """basic""" +20 12 evaluator """rankbased""" +20 13 dataset """kinships""" +20 13 model """complex""" +20 13 loss """bceaftersigmoid""" +20 13 regularizer """no""" +20 13 optimizer """adadelta""" +20 13 training_loop """owa""" +20 13 negative_sampler """basic""" +20 13 evaluator """rankbased""" +20 14 dataset """kinships""" +20 14 model """complex""" +20 14 loss """bceaftersigmoid""" +20 14 regularizer """no""" +20 14 optimizer """adadelta""" +20 14 training_loop """owa""" +20 14 negative_sampler """basic""" +20 14 evaluator """rankbased""" +20 15 dataset """kinships""" +20 15 model """complex""" +20 15 loss """bceaftersigmoid""" +20 15 regularizer """no""" +20 15 optimizer """adadelta""" +20 15 training_loop """owa""" +20 15 negative_sampler """basic""" +20 15 evaluator """rankbased""" +20 16 dataset """kinships""" +20 16 model """complex""" +20 16 loss """bceaftersigmoid""" +20 16 regularizer """no""" +20 16 optimizer """adadelta""" +20 16 training_loop """owa""" +20 16 negative_sampler """basic""" +20 16 evaluator """rankbased""" +20 17 dataset """kinships""" +20 17 model """complex""" +20 17 loss """bceaftersigmoid""" +20 17 regularizer """no""" +20 17 optimizer """adadelta""" +20 17 training_loop """owa""" +20 17 negative_sampler """basic""" +20 17 evaluator """rankbased""" +20 18 dataset """kinships""" +20 18 model """complex""" +20 18 loss """bceaftersigmoid""" +20 18 regularizer """no""" +20 18 optimizer """adadelta""" +20 18 training_loop """owa""" +20 18 negative_sampler """basic""" +20 18 evaluator """rankbased""" +20 19 dataset """kinships""" +20 19 model """complex""" +20 19 loss """bceaftersigmoid""" +20 19 regularizer """no""" +20 19 optimizer """adadelta""" +20 19 training_loop """owa""" +20 19 negative_sampler """basic""" +20 19 evaluator """rankbased""" +20 20 dataset """kinships""" +20 20 model """complex""" +20 20 loss """bceaftersigmoid""" +20 20 regularizer """no""" +20 20 optimizer """adadelta""" +20 20 training_loop """owa""" +20 20 negative_sampler """basic""" +20 20 evaluator """rankbased""" +20 21 dataset """kinships""" +20 21 model """complex""" +20 21 loss """bceaftersigmoid""" +20 21 regularizer """no""" +20 21 optimizer """adadelta""" +20 21 training_loop """owa""" +20 21 negative_sampler """basic""" +20 21 evaluator """rankbased""" +20 22 dataset """kinships""" +20 22 model """complex""" +20 22 loss """bceaftersigmoid""" +20 22 regularizer """no""" +20 22 optimizer """adadelta""" +20 22 training_loop """owa""" +20 22 negative_sampler """basic""" +20 22 evaluator """rankbased""" +20 23 dataset """kinships""" +20 23 model """complex""" +20 23 loss """bceaftersigmoid""" +20 23 regularizer """no""" +20 23 optimizer """adadelta""" +20 23 training_loop """owa""" +20 23 negative_sampler """basic""" +20 23 evaluator """rankbased""" +20 24 dataset """kinships""" +20 24 model """complex""" +20 24 loss """bceaftersigmoid""" +20 24 regularizer """no""" +20 24 optimizer """adadelta""" +20 24 training_loop """owa""" +20 24 negative_sampler """basic""" +20 24 evaluator """rankbased""" +20 25 dataset """kinships""" +20 25 model """complex""" +20 25 loss """bceaftersigmoid""" +20 25 regularizer """no""" +20 25 optimizer """adadelta""" +20 25 training_loop """owa""" +20 25 negative_sampler """basic""" +20 25 evaluator """rankbased""" +20 26 dataset """kinships""" +20 26 model """complex""" +20 26 loss """bceaftersigmoid""" +20 26 regularizer """no""" +20 26 optimizer """adadelta""" +20 26 training_loop """owa""" +20 26 negative_sampler """basic""" +20 26 evaluator """rankbased""" +20 27 dataset """kinships""" +20 27 model """complex""" +20 27 loss """bceaftersigmoid""" +20 27 regularizer """no""" +20 27 optimizer """adadelta""" +20 27 training_loop """owa""" +20 27 negative_sampler """basic""" +20 27 evaluator """rankbased""" +20 28 dataset """kinships""" +20 28 model """complex""" +20 28 loss """bceaftersigmoid""" +20 28 regularizer """no""" +20 28 optimizer """adadelta""" +20 28 training_loop """owa""" +20 28 negative_sampler """basic""" +20 28 evaluator """rankbased""" +20 29 dataset """kinships""" +20 29 model """complex""" +20 29 loss """bceaftersigmoid""" +20 29 regularizer """no""" +20 29 optimizer """adadelta""" +20 29 training_loop """owa""" +20 29 negative_sampler """basic""" +20 29 evaluator """rankbased""" +20 30 dataset """kinships""" +20 30 model """complex""" +20 30 loss """bceaftersigmoid""" +20 30 regularizer """no""" +20 30 optimizer """adadelta""" +20 30 training_loop """owa""" +20 30 negative_sampler """basic""" +20 30 evaluator """rankbased""" +20 31 dataset """kinships""" +20 31 model """complex""" +20 31 loss """bceaftersigmoid""" +20 31 regularizer """no""" +20 31 optimizer """adadelta""" +20 31 training_loop """owa""" +20 31 negative_sampler """basic""" +20 31 evaluator """rankbased""" +20 32 dataset """kinships""" +20 32 model """complex""" +20 32 loss """bceaftersigmoid""" +20 32 regularizer """no""" +20 32 optimizer """adadelta""" +20 32 training_loop """owa""" +20 32 negative_sampler """basic""" +20 32 evaluator """rankbased""" +20 33 dataset """kinships""" +20 33 model """complex""" +20 33 loss """bceaftersigmoid""" +20 33 regularizer """no""" +20 33 optimizer """adadelta""" +20 33 training_loop """owa""" +20 33 negative_sampler """basic""" +20 33 evaluator """rankbased""" +20 34 dataset """kinships""" +20 34 model """complex""" +20 34 loss """bceaftersigmoid""" +20 34 regularizer """no""" +20 34 optimizer """adadelta""" +20 34 training_loop """owa""" +20 34 negative_sampler """basic""" +20 34 evaluator """rankbased""" +20 35 dataset """kinships""" +20 35 model """complex""" +20 35 loss """bceaftersigmoid""" +20 35 regularizer """no""" +20 35 optimizer """adadelta""" +20 35 training_loop """owa""" +20 35 negative_sampler """basic""" +20 35 evaluator """rankbased""" +20 36 dataset """kinships""" +20 36 model """complex""" +20 36 loss """bceaftersigmoid""" +20 36 regularizer """no""" +20 36 optimizer """adadelta""" +20 36 training_loop """owa""" +20 36 negative_sampler """basic""" +20 36 evaluator """rankbased""" +20 37 dataset """kinships""" +20 37 model """complex""" +20 37 loss """bceaftersigmoid""" +20 37 regularizer """no""" +20 37 optimizer """adadelta""" +20 37 training_loop """owa""" +20 37 negative_sampler """basic""" +20 37 evaluator """rankbased""" +20 38 dataset """kinships""" +20 38 model """complex""" +20 38 loss """bceaftersigmoid""" +20 38 regularizer """no""" +20 38 optimizer """adadelta""" +20 38 training_loop """owa""" +20 38 negative_sampler """basic""" +20 38 evaluator """rankbased""" +20 39 dataset """kinships""" +20 39 model """complex""" +20 39 loss """bceaftersigmoid""" +20 39 regularizer """no""" +20 39 optimizer """adadelta""" +20 39 training_loop """owa""" +20 39 negative_sampler """basic""" +20 39 evaluator """rankbased""" +20 40 dataset """kinships""" +20 40 model """complex""" +20 40 loss """bceaftersigmoid""" +20 40 regularizer """no""" +20 40 optimizer """adadelta""" +20 40 training_loop """owa""" +20 40 negative_sampler """basic""" +20 40 evaluator """rankbased""" +20 41 dataset """kinships""" +20 41 model """complex""" +20 41 loss """bceaftersigmoid""" +20 41 regularizer """no""" +20 41 optimizer """adadelta""" +20 41 training_loop """owa""" +20 41 negative_sampler """basic""" +20 41 evaluator """rankbased""" +20 42 dataset """kinships""" +20 42 model """complex""" +20 42 loss """bceaftersigmoid""" +20 42 regularizer """no""" +20 42 optimizer """adadelta""" +20 42 training_loop """owa""" +20 42 negative_sampler """basic""" +20 42 evaluator """rankbased""" +20 43 dataset """kinships""" +20 43 model """complex""" +20 43 loss """bceaftersigmoid""" +20 43 regularizer """no""" +20 43 optimizer """adadelta""" +20 43 training_loop """owa""" +20 43 negative_sampler """basic""" +20 43 evaluator """rankbased""" +20 44 dataset """kinships""" +20 44 model """complex""" +20 44 loss """bceaftersigmoid""" +20 44 regularizer """no""" +20 44 optimizer """adadelta""" +20 44 training_loop """owa""" +20 44 negative_sampler """basic""" +20 44 evaluator """rankbased""" +20 45 dataset """kinships""" +20 45 model """complex""" +20 45 loss """bceaftersigmoid""" +20 45 regularizer """no""" +20 45 optimizer """adadelta""" +20 45 training_loop """owa""" +20 45 negative_sampler """basic""" +20 45 evaluator """rankbased""" +20 46 dataset """kinships""" +20 46 model """complex""" +20 46 loss """bceaftersigmoid""" +20 46 regularizer """no""" +20 46 optimizer """adadelta""" +20 46 training_loop """owa""" +20 46 negative_sampler """basic""" +20 46 evaluator """rankbased""" +20 47 dataset """kinships""" +20 47 model """complex""" +20 47 loss """bceaftersigmoid""" +20 47 regularizer """no""" +20 47 optimizer """adadelta""" +20 47 training_loop """owa""" +20 47 negative_sampler """basic""" +20 47 evaluator """rankbased""" +20 48 dataset """kinships""" +20 48 model """complex""" +20 48 loss """bceaftersigmoid""" +20 48 regularizer """no""" +20 48 optimizer """adadelta""" +20 48 training_loop """owa""" +20 48 negative_sampler """basic""" +20 48 evaluator """rankbased""" +20 49 dataset """kinships""" +20 49 model """complex""" +20 49 loss """bceaftersigmoid""" +20 49 regularizer """no""" +20 49 optimizer """adadelta""" +20 49 training_loop """owa""" +20 49 negative_sampler """basic""" +20 49 evaluator """rankbased""" +20 50 dataset """kinships""" +20 50 model """complex""" +20 50 loss """bceaftersigmoid""" +20 50 regularizer """no""" +20 50 optimizer """adadelta""" +20 50 training_loop """owa""" +20 50 negative_sampler """basic""" +20 50 evaluator """rankbased""" +20 51 dataset """kinships""" +20 51 model """complex""" +20 51 loss """bceaftersigmoid""" +20 51 regularizer """no""" +20 51 optimizer """adadelta""" +20 51 training_loop """owa""" +20 51 negative_sampler """basic""" +20 51 evaluator """rankbased""" +20 52 dataset """kinships""" +20 52 model """complex""" +20 52 loss """bceaftersigmoid""" +20 52 regularizer """no""" +20 52 optimizer """adadelta""" +20 52 training_loop """owa""" +20 52 negative_sampler """basic""" +20 52 evaluator """rankbased""" +20 53 dataset """kinships""" +20 53 model """complex""" +20 53 loss """bceaftersigmoid""" +20 53 regularizer """no""" +20 53 optimizer """adadelta""" +20 53 training_loop """owa""" +20 53 negative_sampler """basic""" +20 53 evaluator """rankbased""" +20 54 dataset """kinships""" +20 54 model """complex""" +20 54 loss """bceaftersigmoid""" +20 54 regularizer """no""" +20 54 optimizer """adadelta""" +20 54 training_loop """owa""" +20 54 negative_sampler """basic""" +20 54 evaluator """rankbased""" +20 55 dataset """kinships""" +20 55 model """complex""" +20 55 loss """bceaftersigmoid""" +20 55 regularizer """no""" +20 55 optimizer """adadelta""" +20 55 training_loop """owa""" +20 55 negative_sampler """basic""" +20 55 evaluator """rankbased""" +20 56 dataset """kinships""" +20 56 model """complex""" +20 56 loss """bceaftersigmoid""" +20 56 regularizer """no""" +20 56 optimizer """adadelta""" +20 56 training_loop """owa""" +20 56 negative_sampler """basic""" +20 56 evaluator """rankbased""" +20 57 dataset """kinships""" +20 57 model """complex""" +20 57 loss """bceaftersigmoid""" +20 57 regularizer """no""" +20 57 optimizer """adadelta""" +20 57 training_loop """owa""" +20 57 negative_sampler """basic""" +20 57 evaluator """rankbased""" +20 58 dataset """kinships""" +20 58 model """complex""" +20 58 loss """bceaftersigmoid""" +20 58 regularizer """no""" +20 58 optimizer """adadelta""" +20 58 training_loop """owa""" +20 58 negative_sampler """basic""" +20 58 evaluator """rankbased""" +20 59 dataset """kinships""" +20 59 model """complex""" +20 59 loss """bceaftersigmoid""" +20 59 regularizer """no""" +20 59 optimizer """adadelta""" +20 59 training_loop """owa""" +20 59 negative_sampler """basic""" +20 59 evaluator """rankbased""" +20 60 dataset """kinships""" +20 60 model """complex""" +20 60 loss """bceaftersigmoid""" +20 60 regularizer """no""" +20 60 optimizer """adadelta""" +20 60 training_loop """owa""" +20 60 negative_sampler """basic""" +20 60 evaluator """rankbased""" +20 61 dataset """kinships""" +20 61 model """complex""" +20 61 loss """bceaftersigmoid""" +20 61 regularizer """no""" +20 61 optimizer """adadelta""" +20 61 training_loop """owa""" +20 61 negative_sampler """basic""" +20 61 evaluator """rankbased""" +20 62 dataset """kinships""" +20 62 model """complex""" +20 62 loss """bceaftersigmoid""" +20 62 regularizer """no""" +20 62 optimizer """adadelta""" +20 62 training_loop """owa""" +20 62 negative_sampler """basic""" +20 62 evaluator """rankbased""" +20 63 dataset """kinships""" +20 63 model """complex""" +20 63 loss """bceaftersigmoid""" +20 63 regularizer """no""" +20 63 optimizer """adadelta""" +20 63 training_loop """owa""" +20 63 negative_sampler """basic""" +20 63 evaluator """rankbased""" +20 64 dataset """kinships""" +20 64 model """complex""" +20 64 loss """bceaftersigmoid""" +20 64 regularizer """no""" +20 64 optimizer """adadelta""" +20 64 training_loop """owa""" +20 64 negative_sampler """basic""" +20 64 evaluator """rankbased""" +20 65 dataset """kinships""" +20 65 model """complex""" +20 65 loss """bceaftersigmoid""" +20 65 regularizer """no""" +20 65 optimizer """adadelta""" +20 65 training_loop """owa""" +20 65 negative_sampler """basic""" +20 65 evaluator """rankbased""" +20 66 dataset """kinships""" +20 66 model """complex""" +20 66 loss """bceaftersigmoid""" +20 66 regularizer """no""" +20 66 optimizer """adadelta""" +20 66 training_loop """owa""" +20 66 negative_sampler """basic""" +20 66 evaluator """rankbased""" +20 67 dataset """kinships""" +20 67 model """complex""" +20 67 loss """bceaftersigmoid""" +20 67 regularizer """no""" +20 67 optimizer """adadelta""" +20 67 training_loop """owa""" +20 67 negative_sampler """basic""" +20 67 evaluator """rankbased""" +20 68 dataset """kinships""" +20 68 model """complex""" +20 68 loss """bceaftersigmoid""" +20 68 regularizer """no""" +20 68 optimizer """adadelta""" +20 68 training_loop """owa""" +20 68 negative_sampler """basic""" +20 68 evaluator """rankbased""" +20 69 dataset """kinships""" +20 69 model """complex""" +20 69 loss """bceaftersigmoid""" +20 69 regularizer """no""" +20 69 optimizer """adadelta""" +20 69 training_loop """owa""" +20 69 negative_sampler """basic""" +20 69 evaluator """rankbased""" +20 70 dataset """kinships""" +20 70 model """complex""" +20 70 loss """bceaftersigmoid""" +20 70 regularizer """no""" +20 70 optimizer """adadelta""" +20 70 training_loop """owa""" +20 70 negative_sampler """basic""" +20 70 evaluator """rankbased""" +20 71 dataset """kinships""" +20 71 model """complex""" +20 71 loss """bceaftersigmoid""" +20 71 regularizer """no""" +20 71 optimizer """adadelta""" +20 71 training_loop """owa""" +20 71 negative_sampler """basic""" +20 71 evaluator """rankbased""" +20 72 dataset """kinships""" +20 72 model """complex""" +20 72 loss """bceaftersigmoid""" +20 72 regularizer """no""" +20 72 optimizer """adadelta""" +20 72 training_loop """owa""" +20 72 negative_sampler """basic""" +20 72 evaluator """rankbased""" +20 73 dataset """kinships""" +20 73 model """complex""" +20 73 loss """bceaftersigmoid""" +20 73 regularizer """no""" +20 73 optimizer """adadelta""" +20 73 training_loop """owa""" +20 73 negative_sampler """basic""" +20 73 evaluator """rankbased""" +20 74 dataset """kinships""" +20 74 model """complex""" +20 74 loss """bceaftersigmoid""" +20 74 regularizer """no""" +20 74 optimizer """adadelta""" +20 74 training_loop """owa""" +20 74 negative_sampler """basic""" +20 74 evaluator """rankbased""" +20 75 dataset """kinships""" +20 75 model """complex""" +20 75 loss """bceaftersigmoid""" +20 75 regularizer """no""" +20 75 optimizer """adadelta""" +20 75 training_loop """owa""" +20 75 negative_sampler """basic""" +20 75 evaluator """rankbased""" +20 76 dataset """kinships""" +20 76 model """complex""" +20 76 loss """bceaftersigmoid""" +20 76 regularizer """no""" +20 76 optimizer """adadelta""" +20 76 training_loop """owa""" +20 76 negative_sampler """basic""" +20 76 evaluator """rankbased""" +20 77 dataset """kinships""" +20 77 model """complex""" +20 77 loss """bceaftersigmoid""" +20 77 regularizer """no""" +20 77 optimizer """adadelta""" +20 77 training_loop """owa""" +20 77 negative_sampler """basic""" +20 77 evaluator """rankbased""" +20 78 dataset """kinships""" +20 78 model """complex""" +20 78 loss """bceaftersigmoid""" +20 78 regularizer """no""" +20 78 optimizer """adadelta""" +20 78 training_loop """owa""" +20 78 negative_sampler """basic""" +20 78 evaluator """rankbased""" +20 79 dataset """kinships""" +20 79 model """complex""" +20 79 loss """bceaftersigmoid""" +20 79 regularizer """no""" +20 79 optimizer """adadelta""" +20 79 training_loop """owa""" +20 79 negative_sampler """basic""" +20 79 evaluator """rankbased""" +20 80 dataset """kinships""" +20 80 model """complex""" +20 80 loss """bceaftersigmoid""" +20 80 regularizer """no""" +20 80 optimizer """adadelta""" +20 80 training_loop """owa""" +20 80 negative_sampler """basic""" +20 80 evaluator """rankbased""" +20 81 dataset """kinships""" +20 81 model """complex""" +20 81 loss """bceaftersigmoid""" +20 81 regularizer """no""" +20 81 optimizer """adadelta""" +20 81 training_loop """owa""" +20 81 negative_sampler """basic""" +20 81 evaluator """rankbased""" +20 82 dataset """kinships""" +20 82 model """complex""" +20 82 loss """bceaftersigmoid""" +20 82 regularizer """no""" +20 82 optimizer """adadelta""" +20 82 training_loop """owa""" +20 82 negative_sampler """basic""" +20 82 evaluator """rankbased""" +20 83 dataset """kinships""" +20 83 model """complex""" +20 83 loss """bceaftersigmoid""" +20 83 regularizer """no""" +20 83 optimizer """adadelta""" +20 83 training_loop """owa""" +20 83 negative_sampler """basic""" +20 83 evaluator """rankbased""" +20 84 dataset """kinships""" +20 84 model """complex""" +20 84 loss """bceaftersigmoid""" +20 84 regularizer """no""" +20 84 optimizer """adadelta""" +20 84 training_loop """owa""" +20 84 negative_sampler """basic""" +20 84 evaluator """rankbased""" +20 85 dataset """kinships""" +20 85 model """complex""" +20 85 loss """bceaftersigmoid""" +20 85 regularizer """no""" +20 85 optimizer """adadelta""" +20 85 training_loop """owa""" +20 85 negative_sampler """basic""" +20 85 evaluator """rankbased""" +20 86 dataset """kinships""" +20 86 model """complex""" +20 86 loss """bceaftersigmoid""" +20 86 regularizer """no""" +20 86 optimizer """adadelta""" +20 86 training_loop """owa""" +20 86 negative_sampler """basic""" +20 86 evaluator """rankbased""" +20 87 dataset """kinships""" +20 87 model """complex""" +20 87 loss """bceaftersigmoid""" +20 87 regularizer """no""" +20 87 optimizer """adadelta""" +20 87 training_loop """owa""" +20 87 negative_sampler """basic""" +20 87 evaluator """rankbased""" +20 88 dataset """kinships""" +20 88 model """complex""" +20 88 loss """bceaftersigmoid""" +20 88 regularizer """no""" +20 88 optimizer """adadelta""" +20 88 training_loop """owa""" +20 88 negative_sampler """basic""" +20 88 evaluator """rankbased""" +20 89 dataset """kinships""" +20 89 model """complex""" +20 89 loss """bceaftersigmoid""" +20 89 regularizer """no""" +20 89 optimizer """adadelta""" +20 89 training_loop """owa""" +20 89 negative_sampler """basic""" +20 89 evaluator """rankbased""" +20 90 dataset """kinships""" +20 90 model """complex""" +20 90 loss """bceaftersigmoid""" +20 90 regularizer """no""" +20 90 optimizer """adadelta""" +20 90 training_loop """owa""" +20 90 negative_sampler """basic""" +20 90 evaluator """rankbased""" +20 91 dataset """kinships""" +20 91 model """complex""" +20 91 loss """bceaftersigmoid""" +20 91 regularizer """no""" +20 91 optimizer """adadelta""" +20 91 training_loop """owa""" +20 91 negative_sampler """basic""" +20 91 evaluator """rankbased""" +20 92 dataset """kinships""" +20 92 model """complex""" +20 92 loss """bceaftersigmoid""" +20 92 regularizer """no""" +20 92 optimizer """adadelta""" +20 92 training_loop """owa""" +20 92 negative_sampler """basic""" +20 92 evaluator """rankbased""" +20 93 dataset """kinships""" +20 93 model """complex""" +20 93 loss """bceaftersigmoid""" +20 93 regularizer """no""" +20 93 optimizer """adadelta""" +20 93 training_loop """owa""" +20 93 negative_sampler """basic""" +20 93 evaluator """rankbased""" +20 94 dataset """kinships""" +20 94 model """complex""" +20 94 loss """bceaftersigmoid""" +20 94 regularizer """no""" +20 94 optimizer """adadelta""" +20 94 training_loop """owa""" +20 94 negative_sampler """basic""" +20 94 evaluator """rankbased""" +20 95 dataset """kinships""" +20 95 model """complex""" +20 95 loss """bceaftersigmoid""" +20 95 regularizer """no""" +20 95 optimizer """adadelta""" +20 95 training_loop """owa""" +20 95 negative_sampler """basic""" +20 95 evaluator """rankbased""" +20 96 dataset """kinships""" +20 96 model """complex""" +20 96 loss """bceaftersigmoid""" +20 96 regularizer """no""" +20 96 optimizer """adadelta""" +20 96 training_loop """owa""" +20 96 negative_sampler """basic""" +20 96 evaluator """rankbased""" +20 97 dataset """kinships""" +20 97 model """complex""" +20 97 loss """bceaftersigmoid""" +20 97 regularizer """no""" +20 97 optimizer """adadelta""" +20 97 training_loop """owa""" +20 97 negative_sampler """basic""" +20 97 evaluator """rankbased""" +20 98 dataset """kinships""" +20 98 model """complex""" +20 98 loss """bceaftersigmoid""" +20 98 regularizer """no""" +20 98 optimizer """adadelta""" +20 98 training_loop """owa""" +20 98 negative_sampler """basic""" +20 98 evaluator """rankbased""" +20 99 dataset """kinships""" +20 99 model """complex""" +20 99 loss """bceaftersigmoid""" +20 99 regularizer """no""" +20 99 optimizer """adadelta""" +20 99 training_loop """owa""" +20 99 negative_sampler """basic""" +20 99 evaluator """rankbased""" +20 100 dataset """kinships""" +20 100 model """complex""" +20 100 loss """bceaftersigmoid""" +20 100 regularizer """no""" +20 100 optimizer """adadelta""" +20 100 training_loop """owa""" +20 100 negative_sampler """basic""" +20 100 evaluator """rankbased""" +21 1 model.embedding_dim 1.0 +21 1 negative_sampler.num_negs_per_pos 64.0 +21 1 training.batch_size 2.0 +21 2 model.embedding_dim 0.0 +21 2 negative_sampler.num_negs_per_pos 25.0 +21 2 training.batch_size 1.0 +21 3 model.embedding_dim 2.0 +21 3 negative_sampler.num_negs_per_pos 25.0 +21 3 training.batch_size 2.0 +21 4 model.embedding_dim 1.0 +21 4 negative_sampler.num_negs_per_pos 39.0 +21 4 training.batch_size 2.0 +21 5 model.embedding_dim 0.0 +21 5 negative_sampler.num_negs_per_pos 17.0 +21 5 training.batch_size 1.0 +21 6 model.embedding_dim 1.0 +21 6 negative_sampler.num_negs_per_pos 36.0 +21 6 training.batch_size 0.0 +21 7 model.embedding_dim 1.0 +21 7 negative_sampler.num_negs_per_pos 69.0 +21 7 training.batch_size 0.0 +21 8 model.embedding_dim 0.0 +21 8 negative_sampler.num_negs_per_pos 30.0 +21 8 training.batch_size 0.0 +21 9 model.embedding_dim 2.0 +21 9 negative_sampler.num_negs_per_pos 28.0 +21 9 training.batch_size 1.0 +21 10 model.embedding_dim 0.0 +21 10 negative_sampler.num_negs_per_pos 75.0 +21 10 training.batch_size 1.0 +21 11 model.embedding_dim 1.0 +21 11 negative_sampler.num_negs_per_pos 4.0 +21 11 training.batch_size 0.0 +21 12 model.embedding_dim 1.0 +21 12 negative_sampler.num_negs_per_pos 27.0 +21 12 training.batch_size 2.0 +21 13 model.embedding_dim 0.0 +21 13 negative_sampler.num_negs_per_pos 75.0 +21 13 training.batch_size 2.0 +21 14 model.embedding_dim 0.0 +21 14 negative_sampler.num_negs_per_pos 76.0 +21 14 training.batch_size 2.0 +21 15 model.embedding_dim 2.0 +21 15 negative_sampler.num_negs_per_pos 24.0 +21 15 training.batch_size 1.0 +21 16 model.embedding_dim 1.0 +21 16 negative_sampler.num_negs_per_pos 54.0 +21 16 training.batch_size 0.0 +21 17 model.embedding_dim 0.0 +21 17 negative_sampler.num_negs_per_pos 3.0 +21 17 training.batch_size 1.0 +21 18 model.embedding_dim 2.0 +21 18 negative_sampler.num_negs_per_pos 17.0 +21 18 training.batch_size 2.0 +21 19 model.embedding_dim 2.0 +21 19 negative_sampler.num_negs_per_pos 12.0 +21 19 training.batch_size 0.0 +21 20 model.embedding_dim 1.0 +21 20 negative_sampler.num_negs_per_pos 53.0 +21 20 training.batch_size 1.0 +21 21 model.embedding_dim 1.0 +21 21 negative_sampler.num_negs_per_pos 33.0 +21 21 training.batch_size 2.0 +21 22 model.embedding_dim 0.0 +21 22 negative_sampler.num_negs_per_pos 14.0 +21 22 training.batch_size 2.0 +21 23 model.embedding_dim 0.0 +21 23 negative_sampler.num_negs_per_pos 60.0 +21 23 training.batch_size 1.0 +21 24 model.embedding_dim 1.0 +21 24 negative_sampler.num_negs_per_pos 19.0 +21 24 training.batch_size 2.0 +21 25 model.embedding_dim 0.0 +21 25 negative_sampler.num_negs_per_pos 9.0 +21 25 training.batch_size 2.0 +21 26 model.embedding_dim 0.0 +21 26 negative_sampler.num_negs_per_pos 25.0 +21 26 training.batch_size 1.0 +21 27 model.embedding_dim 0.0 +21 27 negative_sampler.num_negs_per_pos 27.0 +21 27 training.batch_size 1.0 +21 28 model.embedding_dim 1.0 +21 28 negative_sampler.num_negs_per_pos 78.0 +21 28 training.batch_size 1.0 +21 29 model.embedding_dim 2.0 +21 29 negative_sampler.num_negs_per_pos 33.0 +21 29 training.batch_size 0.0 +21 30 model.embedding_dim 1.0 +21 30 negative_sampler.num_negs_per_pos 87.0 +21 30 training.batch_size 0.0 +21 31 model.embedding_dim 0.0 +21 31 negative_sampler.num_negs_per_pos 31.0 +21 31 training.batch_size 1.0 +21 32 model.embedding_dim 1.0 +21 32 negative_sampler.num_negs_per_pos 44.0 +21 32 training.batch_size 1.0 +21 33 model.embedding_dim 1.0 +21 33 negative_sampler.num_negs_per_pos 86.0 +21 33 training.batch_size 0.0 +21 34 model.embedding_dim 0.0 +21 34 negative_sampler.num_negs_per_pos 86.0 +21 34 training.batch_size 1.0 +21 35 model.embedding_dim 2.0 +21 35 negative_sampler.num_negs_per_pos 54.0 +21 35 training.batch_size 2.0 +21 36 model.embedding_dim 2.0 +21 36 negative_sampler.num_negs_per_pos 6.0 +21 36 training.batch_size 2.0 +21 37 model.embedding_dim 0.0 +21 37 negative_sampler.num_negs_per_pos 26.0 +21 37 training.batch_size 1.0 +21 38 model.embedding_dim 2.0 +21 38 negative_sampler.num_negs_per_pos 56.0 +21 38 training.batch_size 2.0 +21 39 model.embedding_dim 2.0 +21 39 negative_sampler.num_negs_per_pos 95.0 +21 39 training.batch_size 1.0 +21 40 model.embedding_dim 1.0 +21 40 negative_sampler.num_negs_per_pos 91.0 +21 40 training.batch_size 1.0 +21 41 model.embedding_dim 0.0 +21 41 negative_sampler.num_negs_per_pos 22.0 +21 41 training.batch_size 2.0 +21 42 model.embedding_dim 1.0 +21 42 negative_sampler.num_negs_per_pos 26.0 +21 42 training.batch_size 1.0 +21 43 model.embedding_dim 0.0 +21 43 negative_sampler.num_negs_per_pos 78.0 +21 43 training.batch_size 0.0 +21 44 model.embedding_dim 2.0 +21 44 negative_sampler.num_negs_per_pos 90.0 +21 44 training.batch_size 0.0 +21 45 model.embedding_dim 2.0 +21 45 negative_sampler.num_negs_per_pos 96.0 +21 45 training.batch_size 2.0 +21 46 model.embedding_dim 1.0 +21 46 negative_sampler.num_negs_per_pos 73.0 +21 46 training.batch_size 2.0 +21 47 model.embedding_dim 1.0 +21 47 negative_sampler.num_negs_per_pos 86.0 +21 47 training.batch_size 1.0 +21 48 model.embedding_dim 1.0 +21 48 negative_sampler.num_negs_per_pos 86.0 +21 48 training.batch_size 0.0 +21 49 model.embedding_dim 0.0 +21 49 negative_sampler.num_negs_per_pos 93.0 +21 49 training.batch_size 2.0 +21 50 model.embedding_dim 1.0 +21 50 negative_sampler.num_negs_per_pos 88.0 +21 50 training.batch_size 0.0 +21 51 model.embedding_dim 2.0 +21 51 negative_sampler.num_negs_per_pos 51.0 +21 51 training.batch_size 1.0 +21 52 model.embedding_dim 2.0 +21 52 negative_sampler.num_negs_per_pos 13.0 +21 52 training.batch_size 0.0 +21 53 model.embedding_dim 0.0 +21 53 negative_sampler.num_negs_per_pos 55.0 +21 53 training.batch_size 0.0 +21 54 model.embedding_dim 2.0 +21 54 negative_sampler.num_negs_per_pos 95.0 +21 54 training.batch_size 0.0 +21 55 model.embedding_dim 1.0 +21 55 negative_sampler.num_negs_per_pos 47.0 +21 55 training.batch_size 2.0 +21 56 model.embedding_dim 2.0 +21 56 negative_sampler.num_negs_per_pos 72.0 +21 56 training.batch_size 0.0 +21 57 model.embedding_dim 1.0 +21 57 negative_sampler.num_negs_per_pos 61.0 +21 57 training.batch_size 2.0 +21 58 model.embedding_dim 0.0 +21 58 negative_sampler.num_negs_per_pos 86.0 +21 58 training.batch_size 1.0 +21 59 model.embedding_dim 0.0 +21 59 negative_sampler.num_negs_per_pos 27.0 +21 59 training.batch_size 2.0 +21 60 model.embedding_dim 1.0 +21 60 negative_sampler.num_negs_per_pos 42.0 +21 60 training.batch_size 1.0 +21 61 model.embedding_dim 2.0 +21 61 negative_sampler.num_negs_per_pos 19.0 +21 61 training.batch_size 2.0 +21 62 model.embedding_dim 0.0 +21 62 negative_sampler.num_negs_per_pos 81.0 +21 62 training.batch_size 1.0 +21 63 model.embedding_dim 2.0 +21 63 negative_sampler.num_negs_per_pos 30.0 +21 63 training.batch_size 2.0 +21 64 model.embedding_dim 2.0 +21 64 negative_sampler.num_negs_per_pos 8.0 +21 64 training.batch_size 2.0 +21 65 model.embedding_dim 1.0 +21 65 negative_sampler.num_negs_per_pos 73.0 +21 65 training.batch_size 0.0 +21 66 model.embedding_dim 2.0 +21 66 negative_sampler.num_negs_per_pos 19.0 +21 66 training.batch_size 2.0 +21 67 model.embedding_dim 1.0 +21 67 negative_sampler.num_negs_per_pos 83.0 +21 67 training.batch_size 0.0 +21 68 model.embedding_dim 2.0 +21 68 negative_sampler.num_negs_per_pos 46.0 +21 68 training.batch_size 2.0 +21 69 model.embedding_dim 1.0 +21 69 negative_sampler.num_negs_per_pos 32.0 +21 69 training.batch_size 1.0 +21 70 model.embedding_dim 2.0 +21 70 negative_sampler.num_negs_per_pos 85.0 +21 70 training.batch_size 0.0 +21 71 model.embedding_dim 2.0 +21 71 negative_sampler.num_negs_per_pos 83.0 +21 71 training.batch_size 0.0 +21 72 model.embedding_dim 1.0 +21 72 negative_sampler.num_negs_per_pos 35.0 +21 72 training.batch_size 2.0 +21 73 model.embedding_dim 1.0 +21 73 negative_sampler.num_negs_per_pos 40.0 +21 73 training.batch_size 1.0 +21 74 model.embedding_dim 1.0 +21 74 negative_sampler.num_negs_per_pos 26.0 +21 74 training.batch_size 2.0 +21 75 model.embedding_dim 1.0 +21 75 negative_sampler.num_negs_per_pos 10.0 +21 75 training.batch_size 0.0 +21 76 model.embedding_dim 2.0 +21 76 negative_sampler.num_negs_per_pos 28.0 +21 76 training.batch_size 0.0 +21 77 model.embedding_dim 0.0 +21 77 negative_sampler.num_negs_per_pos 60.0 +21 77 training.batch_size 0.0 +21 78 model.embedding_dim 1.0 +21 78 negative_sampler.num_negs_per_pos 1.0 +21 78 training.batch_size 0.0 +21 79 model.embedding_dim 1.0 +21 79 negative_sampler.num_negs_per_pos 59.0 +21 79 training.batch_size 0.0 +21 80 model.embedding_dim 0.0 +21 80 negative_sampler.num_negs_per_pos 94.0 +21 80 training.batch_size 0.0 +21 81 model.embedding_dim 0.0 +21 81 negative_sampler.num_negs_per_pos 87.0 +21 81 training.batch_size 2.0 +21 82 model.embedding_dim 1.0 +21 82 negative_sampler.num_negs_per_pos 41.0 +21 82 training.batch_size 0.0 +21 83 model.embedding_dim 0.0 +21 83 negative_sampler.num_negs_per_pos 76.0 +21 83 training.batch_size 0.0 +21 84 model.embedding_dim 0.0 +21 84 negative_sampler.num_negs_per_pos 11.0 +21 84 training.batch_size 1.0 +21 85 model.embedding_dim 1.0 +21 85 negative_sampler.num_negs_per_pos 17.0 +21 85 training.batch_size 0.0 +21 86 model.embedding_dim 2.0 +21 86 negative_sampler.num_negs_per_pos 22.0 +21 86 training.batch_size 2.0 +21 87 model.embedding_dim 0.0 +21 87 negative_sampler.num_negs_per_pos 84.0 +21 87 training.batch_size 0.0 +21 88 model.embedding_dim 0.0 +21 88 negative_sampler.num_negs_per_pos 72.0 +21 88 training.batch_size 1.0 +21 89 model.embedding_dim 1.0 +21 89 negative_sampler.num_negs_per_pos 29.0 +21 89 training.batch_size 2.0 +21 90 model.embedding_dim 0.0 +21 90 negative_sampler.num_negs_per_pos 75.0 +21 90 training.batch_size 0.0 +21 91 model.embedding_dim 0.0 +21 91 negative_sampler.num_negs_per_pos 16.0 +21 91 training.batch_size 0.0 +21 92 model.embedding_dim 0.0 +21 92 negative_sampler.num_negs_per_pos 69.0 +21 92 training.batch_size 1.0 +21 93 model.embedding_dim 1.0 +21 93 negative_sampler.num_negs_per_pos 16.0 +21 93 training.batch_size 0.0 +21 94 model.embedding_dim 1.0 +21 94 negative_sampler.num_negs_per_pos 29.0 +21 94 training.batch_size 1.0 +21 95 model.embedding_dim 0.0 +21 95 negative_sampler.num_negs_per_pos 66.0 +21 95 training.batch_size 0.0 +21 96 model.embedding_dim 1.0 +21 96 negative_sampler.num_negs_per_pos 40.0 +21 96 training.batch_size 2.0 +21 97 model.embedding_dim 0.0 +21 97 negative_sampler.num_negs_per_pos 88.0 +21 97 training.batch_size 1.0 +21 98 model.embedding_dim 0.0 +21 98 negative_sampler.num_negs_per_pos 98.0 +21 98 training.batch_size 0.0 +21 99 model.embedding_dim 2.0 +21 99 negative_sampler.num_negs_per_pos 64.0 +21 99 training.batch_size 1.0 +21 100 model.embedding_dim 1.0 +21 100 negative_sampler.num_negs_per_pos 25.0 +21 100 training.batch_size 0.0 +21 1 dataset """kinships""" +21 1 model """complex""" +21 1 loss """softplus""" +21 1 regularizer """no""" +21 1 optimizer """adadelta""" +21 1 training_loop """owa""" +21 1 negative_sampler """basic""" +21 1 evaluator """rankbased""" +21 2 dataset """kinships""" +21 2 model """complex""" +21 2 loss """softplus""" +21 2 regularizer """no""" +21 2 optimizer """adadelta""" +21 2 training_loop """owa""" +21 2 negative_sampler """basic""" +21 2 evaluator """rankbased""" +21 3 dataset """kinships""" +21 3 model """complex""" +21 3 loss """softplus""" +21 3 regularizer """no""" +21 3 optimizer """adadelta""" +21 3 training_loop """owa""" +21 3 negative_sampler """basic""" +21 3 evaluator """rankbased""" +21 4 dataset """kinships""" +21 4 model """complex""" +21 4 loss """softplus""" +21 4 regularizer """no""" +21 4 optimizer """adadelta""" +21 4 training_loop """owa""" +21 4 negative_sampler """basic""" +21 4 evaluator """rankbased""" +21 5 dataset """kinships""" +21 5 model """complex""" +21 5 loss """softplus""" +21 5 regularizer """no""" +21 5 optimizer """adadelta""" +21 5 training_loop """owa""" +21 5 negative_sampler """basic""" +21 5 evaluator """rankbased""" +21 6 dataset """kinships""" +21 6 model """complex""" +21 6 loss """softplus""" +21 6 regularizer """no""" +21 6 optimizer """adadelta""" +21 6 training_loop """owa""" +21 6 negative_sampler """basic""" +21 6 evaluator """rankbased""" +21 7 dataset """kinships""" +21 7 model """complex""" +21 7 loss """softplus""" +21 7 regularizer """no""" +21 7 optimizer """adadelta""" +21 7 training_loop """owa""" +21 7 negative_sampler """basic""" +21 7 evaluator """rankbased""" +21 8 dataset """kinships""" +21 8 model """complex""" +21 8 loss """softplus""" +21 8 regularizer """no""" +21 8 optimizer """adadelta""" +21 8 training_loop """owa""" +21 8 negative_sampler """basic""" +21 8 evaluator """rankbased""" +21 9 dataset """kinships""" +21 9 model """complex""" +21 9 loss """softplus""" +21 9 regularizer """no""" +21 9 optimizer """adadelta""" +21 9 training_loop """owa""" +21 9 negative_sampler """basic""" +21 9 evaluator """rankbased""" +21 10 dataset """kinships""" +21 10 model """complex""" +21 10 loss """softplus""" +21 10 regularizer """no""" +21 10 optimizer """adadelta""" +21 10 training_loop """owa""" +21 10 negative_sampler """basic""" +21 10 evaluator """rankbased""" +21 11 dataset """kinships""" +21 11 model """complex""" +21 11 loss """softplus""" +21 11 regularizer """no""" +21 11 optimizer """adadelta""" +21 11 training_loop """owa""" +21 11 negative_sampler """basic""" +21 11 evaluator """rankbased""" +21 12 dataset """kinships""" +21 12 model """complex""" +21 12 loss """softplus""" +21 12 regularizer """no""" +21 12 optimizer """adadelta""" +21 12 training_loop """owa""" +21 12 negative_sampler """basic""" +21 12 evaluator """rankbased""" +21 13 dataset """kinships""" +21 13 model """complex""" +21 13 loss """softplus""" +21 13 regularizer """no""" +21 13 optimizer """adadelta""" +21 13 training_loop """owa""" +21 13 negative_sampler """basic""" +21 13 evaluator """rankbased""" +21 14 dataset """kinships""" +21 14 model """complex""" +21 14 loss """softplus""" +21 14 regularizer """no""" +21 14 optimizer """adadelta""" +21 14 training_loop """owa""" +21 14 negative_sampler """basic""" +21 14 evaluator """rankbased""" +21 15 dataset """kinships""" +21 15 model """complex""" +21 15 loss """softplus""" +21 15 regularizer """no""" +21 15 optimizer """adadelta""" +21 15 training_loop """owa""" +21 15 negative_sampler """basic""" +21 15 evaluator """rankbased""" +21 16 dataset """kinships""" +21 16 model """complex""" +21 16 loss """softplus""" +21 16 regularizer """no""" +21 16 optimizer """adadelta""" +21 16 training_loop """owa""" +21 16 negative_sampler """basic""" +21 16 evaluator """rankbased""" +21 17 dataset """kinships""" +21 17 model """complex""" +21 17 loss """softplus""" +21 17 regularizer """no""" +21 17 optimizer """adadelta""" +21 17 training_loop """owa""" +21 17 negative_sampler """basic""" +21 17 evaluator """rankbased""" +21 18 dataset """kinships""" +21 18 model """complex""" +21 18 loss """softplus""" +21 18 regularizer """no""" +21 18 optimizer """adadelta""" +21 18 training_loop """owa""" +21 18 negative_sampler """basic""" +21 18 evaluator """rankbased""" +21 19 dataset """kinships""" +21 19 model """complex""" +21 19 loss """softplus""" +21 19 regularizer """no""" +21 19 optimizer """adadelta""" +21 19 training_loop """owa""" +21 19 negative_sampler """basic""" +21 19 evaluator """rankbased""" +21 20 dataset """kinships""" +21 20 model """complex""" +21 20 loss """softplus""" +21 20 regularizer """no""" +21 20 optimizer """adadelta""" +21 20 training_loop """owa""" +21 20 negative_sampler """basic""" +21 20 evaluator """rankbased""" +21 21 dataset """kinships""" +21 21 model """complex""" +21 21 loss """softplus""" +21 21 regularizer """no""" +21 21 optimizer """adadelta""" +21 21 training_loop """owa""" +21 21 negative_sampler """basic""" +21 21 evaluator """rankbased""" +21 22 dataset """kinships""" +21 22 model """complex""" +21 22 loss """softplus""" +21 22 regularizer """no""" +21 22 optimizer """adadelta""" +21 22 training_loop """owa""" +21 22 negative_sampler """basic""" +21 22 evaluator """rankbased""" +21 23 dataset """kinships""" +21 23 model """complex""" +21 23 loss """softplus""" +21 23 regularizer """no""" +21 23 optimizer """adadelta""" +21 23 training_loop """owa""" +21 23 negative_sampler """basic""" +21 23 evaluator """rankbased""" +21 24 dataset """kinships""" +21 24 model """complex""" +21 24 loss """softplus""" +21 24 regularizer """no""" +21 24 optimizer """adadelta""" +21 24 training_loop """owa""" +21 24 negative_sampler """basic""" +21 24 evaluator """rankbased""" +21 25 dataset """kinships""" +21 25 model """complex""" +21 25 loss """softplus""" +21 25 regularizer """no""" +21 25 optimizer """adadelta""" +21 25 training_loop """owa""" +21 25 negative_sampler """basic""" +21 25 evaluator """rankbased""" +21 26 dataset """kinships""" +21 26 model """complex""" +21 26 loss """softplus""" +21 26 regularizer """no""" +21 26 optimizer """adadelta""" +21 26 training_loop """owa""" +21 26 negative_sampler """basic""" +21 26 evaluator """rankbased""" +21 27 dataset """kinships""" +21 27 model """complex""" +21 27 loss """softplus""" +21 27 regularizer """no""" +21 27 optimizer """adadelta""" +21 27 training_loop """owa""" +21 27 negative_sampler """basic""" +21 27 evaluator """rankbased""" +21 28 dataset """kinships""" +21 28 model """complex""" +21 28 loss """softplus""" +21 28 regularizer """no""" +21 28 optimizer """adadelta""" +21 28 training_loop """owa""" +21 28 negative_sampler """basic""" +21 28 evaluator """rankbased""" +21 29 dataset """kinships""" +21 29 model """complex""" +21 29 loss """softplus""" +21 29 regularizer """no""" +21 29 optimizer """adadelta""" +21 29 training_loop """owa""" +21 29 negative_sampler """basic""" +21 29 evaluator """rankbased""" +21 30 dataset """kinships""" +21 30 model """complex""" +21 30 loss """softplus""" +21 30 regularizer """no""" +21 30 optimizer """adadelta""" +21 30 training_loop """owa""" +21 30 negative_sampler """basic""" +21 30 evaluator """rankbased""" +21 31 dataset """kinships""" +21 31 model """complex""" +21 31 loss """softplus""" +21 31 regularizer """no""" +21 31 optimizer """adadelta""" +21 31 training_loop """owa""" +21 31 negative_sampler """basic""" +21 31 evaluator """rankbased""" +21 32 dataset """kinships""" +21 32 model """complex""" +21 32 loss """softplus""" +21 32 regularizer """no""" +21 32 optimizer """adadelta""" +21 32 training_loop """owa""" +21 32 negative_sampler """basic""" +21 32 evaluator """rankbased""" +21 33 dataset """kinships""" +21 33 model """complex""" +21 33 loss """softplus""" +21 33 regularizer """no""" +21 33 optimizer """adadelta""" +21 33 training_loop """owa""" +21 33 negative_sampler """basic""" +21 33 evaluator """rankbased""" +21 34 dataset """kinships""" +21 34 model """complex""" +21 34 loss """softplus""" +21 34 regularizer """no""" +21 34 optimizer """adadelta""" +21 34 training_loop """owa""" +21 34 negative_sampler """basic""" +21 34 evaluator """rankbased""" +21 35 dataset """kinships""" +21 35 model """complex""" +21 35 loss """softplus""" +21 35 regularizer """no""" +21 35 optimizer """adadelta""" +21 35 training_loop """owa""" +21 35 negative_sampler """basic""" +21 35 evaluator """rankbased""" +21 36 dataset """kinships""" +21 36 model """complex""" +21 36 loss """softplus""" +21 36 regularizer """no""" +21 36 optimizer """adadelta""" +21 36 training_loop """owa""" +21 36 negative_sampler """basic""" +21 36 evaluator """rankbased""" +21 37 dataset """kinships""" +21 37 model """complex""" +21 37 loss """softplus""" +21 37 regularizer """no""" +21 37 optimizer """adadelta""" +21 37 training_loop """owa""" +21 37 negative_sampler """basic""" +21 37 evaluator """rankbased""" +21 38 dataset """kinships""" +21 38 model """complex""" +21 38 loss """softplus""" +21 38 regularizer """no""" +21 38 optimizer """adadelta""" +21 38 training_loop """owa""" +21 38 negative_sampler """basic""" +21 38 evaluator """rankbased""" +21 39 dataset """kinships""" +21 39 model """complex""" +21 39 loss """softplus""" +21 39 regularizer """no""" +21 39 optimizer """adadelta""" +21 39 training_loop """owa""" +21 39 negative_sampler """basic""" +21 39 evaluator """rankbased""" +21 40 dataset """kinships""" +21 40 model """complex""" +21 40 loss """softplus""" +21 40 regularizer """no""" +21 40 optimizer """adadelta""" +21 40 training_loop """owa""" +21 40 negative_sampler """basic""" +21 40 evaluator """rankbased""" +21 41 dataset """kinships""" +21 41 model """complex""" +21 41 loss """softplus""" +21 41 regularizer """no""" +21 41 optimizer """adadelta""" +21 41 training_loop """owa""" +21 41 negative_sampler """basic""" +21 41 evaluator """rankbased""" +21 42 dataset """kinships""" +21 42 model """complex""" +21 42 loss """softplus""" +21 42 regularizer """no""" +21 42 optimizer """adadelta""" +21 42 training_loop """owa""" +21 42 negative_sampler """basic""" +21 42 evaluator """rankbased""" +21 43 dataset """kinships""" +21 43 model """complex""" +21 43 loss """softplus""" +21 43 regularizer """no""" +21 43 optimizer """adadelta""" +21 43 training_loop """owa""" +21 43 negative_sampler """basic""" +21 43 evaluator """rankbased""" +21 44 dataset """kinships""" +21 44 model """complex""" +21 44 loss """softplus""" +21 44 regularizer """no""" +21 44 optimizer """adadelta""" +21 44 training_loop """owa""" +21 44 negative_sampler """basic""" +21 44 evaluator """rankbased""" +21 45 dataset """kinships""" +21 45 model """complex""" +21 45 loss """softplus""" +21 45 regularizer """no""" +21 45 optimizer """adadelta""" +21 45 training_loop """owa""" +21 45 negative_sampler """basic""" +21 45 evaluator """rankbased""" +21 46 dataset """kinships""" +21 46 model """complex""" +21 46 loss """softplus""" +21 46 regularizer """no""" +21 46 optimizer """adadelta""" +21 46 training_loop """owa""" +21 46 negative_sampler """basic""" +21 46 evaluator """rankbased""" +21 47 dataset """kinships""" +21 47 model """complex""" +21 47 loss """softplus""" +21 47 regularizer """no""" +21 47 optimizer """adadelta""" +21 47 training_loop """owa""" +21 47 negative_sampler """basic""" +21 47 evaluator """rankbased""" +21 48 dataset """kinships""" +21 48 model """complex""" +21 48 loss """softplus""" +21 48 regularizer """no""" +21 48 optimizer """adadelta""" +21 48 training_loop """owa""" +21 48 negative_sampler """basic""" +21 48 evaluator """rankbased""" +21 49 dataset """kinships""" +21 49 model """complex""" +21 49 loss """softplus""" +21 49 regularizer """no""" +21 49 optimizer """adadelta""" +21 49 training_loop """owa""" +21 49 negative_sampler """basic""" +21 49 evaluator """rankbased""" +21 50 dataset """kinships""" +21 50 model """complex""" +21 50 loss """softplus""" +21 50 regularizer """no""" +21 50 optimizer """adadelta""" +21 50 training_loop """owa""" +21 50 negative_sampler """basic""" +21 50 evaluator """rankbased""" +21 51 dataset """kinships""" +21 51 model """complex""" +21 51 loss """softplus""" +21 51 regularizer """no""" +21 51 optimizer """adadelta""" +21 51 training_loop """owa""" +21 51 negative_sampler """basic""" +21 51 evaluator """rankbased""" +21 52 dataset """kinships""" +21 52 model """complex""" +21 52 loss """softplus""" +21 52 regularizer """no""" +21 52 optimizer """adadelta""" +21 52 training_loop """owa""" +21 52 negative_sampler """basic""" +21 52 evaluator """rankbased""" +21 53 dataset """kinships""" +21 53 model """complex""" +21 53 loss """softplus""" +21 53 regularizer """no""" +21 53 optimizer """adadelta""" +21 53 training_loop """owa""" +21 53 negative_sampler """basic""" +21 53 evaluator """rankbased""" +21 54 dataset """kinships""" +21 54 model """complex""" +21 54 loss """softplus""" +21 54 regularizer """no""" +21 54 optimizer """adadelta""" +21 54 training_loop """owa""" +21 54 negative_sampler """basic""" +21 54 evaluator """rankbased""" +21 55 dataset """kinships""" +21 55 model """complex""" +21 55 loss """softplus""" +21 55 regularizer """no""" +21 55 optimizer """adadelta""" +21 55 training_loop """owa""" +21 55 negative_sampler """basic""" +21 55 evaluator """rankbased""" +21 56 dataset """kinships""" +21 56 model """complex""" +21 56 loss """softplus""" +21 56 regularizer """no""" +21 56 optimizer """adadelta""" +21 56 training_loop """owa""" +21 56 negative_sampler """basic""" +21 56 evaluator """rankbased""" +21 57 dataset """kinships""" +21 57 model """complex""" +21 57 loss """softplus""" +21 57 regularizer """no""" +21 57 optimizer """adadelta""" +21 57 training_loop """owa""" +21 57 negative_sampler """basic""" +21 57 evaluator """rankbased""" +21 58 dataset """kinships""" +21 58 model """complex""" +21 58 loss """softplus""" +21 58 regularizer """no""" +21 58 optimizer """adadelta""" +21 58 training_loop """owa""" +21 58 negative_sampler """basic""" +21 58 evaluator """rankbased""" +21 59 dataset """kinships""" +21 59 model """complex""" +21 59 loss """softplus""" +21 59 regularizer """no""" +21 59 optimizer """adadelta""" +21 59 training_loop """owa""" +21 59 negative_sampler """basic""" +21 59 evaluator """rankbased""" +21 60 dataset """kinships""" +21 60 model """complex""" +21 60 loss """softplus""" +21 60 regularizer """no""" +21 60 optimizer """adadelta""" +21 60 training_loop """owa""" +21 60 negative_sampler """basic""" +21 60 evaluator """rankbased""" +21 61 dataset """kinships""" +21 61 model """complex""" +21 61 loss """softplus""" +21 61 regularizer """no""" +21 61 optimizer """adadelta""" +21 61 training_loop """owa""" +21 61 negative_sampler """basic""" +21 61 evaluator """rankbased""" +21 62 dataset """kinships""" +21 62 model """complex""" +21 62 loss """softplus""" +21 62 regularizer """no""" +21 62 optimizer """adadelta""" +21 62 training_loop """owa""" +21 62 negative_sampler """basic""" +21 62 evaluator """rankbased""" +21 63 dataset """kinships""" +21 63 model """complex""" +21 63 loss """softplus""" +21 63 regularizer """no""" +21 63 optimizer """adadelta""" +21 63 training_loop """owa""" +21 63 negative_sampler """basic""" +21 63 evaluator """rankbased""" +21 64 dataset """kinships""" +21 64 model """complex""" +21 64 loss """softplus""" +21 64 regularizer """no""" +21 64 optimizer """adadelta""" +21 64 training_loop """owa""" +21 64 negative_sampler """basic""" +21 64 evaluator """rankbased""" +21 65 dataset """kinships""" +21 65 model """complex""" +21 65 loss """softplus""" +21 65 regularizer """no""" +21 65 optimizer """adadelta""" +21 65 training_loop """owa""" +21 65 negative_sampler """basic""" +21 65 evaluator """rankbased""" +21 66 dataset """kinships""" +21 66 model """complex""" +21 66 loss """softplus""" +21 66 regularizer """no""" +21 66 optimizer """adadelta""" +21 66 training_loop """owa""" +21 66 negative_sampler """basic""" +21 66 evaluator """rankbased""" +21 67 dataset """kinships""" +21 67 model """complex""" +21 67 loss """softplus""" +21 67 regularizer """no""" +21 67 optimizer """adadelta""" +21 67 training_loop """owa""" +21 67 negative_sampler """basic""" +21 67 evaluator """rankbased""" +21 68 dataset """kinships""" +21 68 model """complex""" +21 68 loss """softplus""" +21 68 regularizer """no""" +21 68 optimizer """adadelta""" +21 68 training_loop """owa""" +21 68 negative_sampler """basic""" +21 68 evaluator """rankbased""" +21 69 dataset """kinships""" +21 69 model """complex""" +21 69 loss """softplus""" +21 69 regularizer """no""" +21 69 optimizer """adadelta""" +21 69 training_loop """owa""" +21 69 negative_sampler """basic""" +21 69 evaluator """rankbased""" +21 70 dataset """kinships""" +21 70 model """complex""" +21 70 loss """softplus""" +21 70 regularizer """no""" +21 70 optimizer """adadelta""" +21 70 training_loop """owa""" +21 70 negative_sampler """basic""" +21 70 evaluator """rankbased""" +21 71 dataset """kinships""" +21 71 model """complex""" +21 71 loss """softplus""" +21 71 regularizer """no""" +21 71 optimizer """adadelta""" +21 71 training_loop """owa""" +21 71 negative_sampler """basic""" +21 71 evaluator """rankbased""" +21 72 dataset """kinships""" +21 72 model """complex""" +21 72 loss """softplus""" +21 72 regularizer """no""" +21 72 optimizer """adadelta""" +21 72 training_loop """owa""" +21 72 negative_sampler """basic""" +21 72 evaluator """rankbased""" +21 73 dataset """kinships""" +21 73 model """complex""" +21 73 loss """softplus""" +21 73 regularizer """no""" +21 73 optimizer """adadelta""" +21 73 training_loop """owa""" +21 73 negative_sampler """basic""" +21 73 evaluator """rankbased""" +21 74 dataset """kinships""" +21 74 model """complex""" +21 74 loss """softplus""" +21 74 regularizer """no""" +21 74 optimizer """adadelta""" +21 74 training_loop """owa""" +21 74 negative_sampler """basic""" +21 74 evaluator """rankbased""" +21 75 dataset """kinships""" +21 75 model """complex""" +21 75 loss """softplus""" +21 75 regularizer """no""" +21 75 optimizer """adadelta""" +21 75 training_loop """owa""" +21 75 negative_sampler """basic""" +21 75 evaluator """rankbased""" +21 76 dataset """kinships""" +21 76 model """complex""" +21 76 loss """softplus""" +21 76 regularizer """no""" +21 76 optimizer """adadelta""" +21 76 training_loop """owa""" +21 76 negative_sampler """basic""" +21 76 evaluator """rankbased""" +21 77 dataset """kinships""" +21 77 model """complex""" +21 77 loss """softplus""" +21 77 regularizer """no""" +21 77 optimizer """adadelta""" +21 77 training_loop """owa""" +21 77 negative_sampler """basic""" +21 77 evaluator """rankbased""" +21 78 dataset """kinships""" +21 78 model """complex""" +21 78 loss """softplus""" +21 78 regularizer """no""" +21 78 optimizer """adadelta""" +21 78 training_loop """owa""" +21 78 negative_sampler """basic""" +21 78 evaluator """rankbased""" +21 79 dataset """kinships""" +21 79 model """complex""" +21 79 loss """softplus""" +21 79 regularizer """no""" +21 79 optimizer """adadelta""" +21 79 training_loop """owa""" +21 79 negative_sampler """basic""" +21 79 evaluator """rankbased""" +21 80 dataset """kinships""" +21 80 model """complex""" +21 80 loss """softplus""" +21 80 regularizer """no""" +21 80 optimizer """adadelta""" +21 80 training_loop """owa""" +21 80 negative_sampler """basic""" +21 80 evaluator """rankbased""" +21 81 dataset """kinships""" +21 81 model """complex""" +21 81 loss """softplus""" +21 81 regularizer """no""" +21 81 optimizer """adadelta""" +21 81 training_loop """owa""" +21 81 negative_sampler """basic""" +21 81 evaluator """rankbased""" +21 82 dataset """kinships""" +21 82 model """complex""" +21 82 loss """softplus""" +21 82 regularizer """no""" +21 82 optimizer """adadelta""" +21 82 training_loop """owa""" +21 82 negative_sampler """basic""" +21 82 evaluator """rankbased""" +21 83 dataset """kinships""" +21 83 model """complex""" +21 83 loss """softplus""" +21 83 regularizer """no""" +21 83 optimizer """adadelta""" +21 83 training_loop """owa""" +21 83 negative_sampler """basic""" +21 83 evaluator """rankbased""" +21 84 dataset """kinships""" +21 84 model """complex""" +21 84 loss """softplus""" +21 84 regularizer """no""" +21 84 optimizer """adadelta""" +21 84 training_loop """owa""" +21 84 negative_sampler """basic""" +21 84 evaluator """rankbased""" +21 85 dataset """kinships""" +21 85 model """complex""" +21 85 loss """softplus""" +21 85 regularizer """no""" +21 85 optimizer """adadelta""" +21 85 training_loop """owa""" +21 85 negative_sampler """basic""" +21 85 evaluator """rankbased""" +21 86 dataset """kinships""" +21 86 model """complex""" +21 86 loss """softplus""" +21 86 regularizer """no""" +21 86 optimizer """adadelta""" +21 86 training_loop """owa""" +21 86 negative_sampler """basic""" +21 86 evaluator """rankbased""" +21 87 dataset """kinships""" +21 87 model """complex""" +21 87 loss """softplus""" +21 87 regularizer """no""" +21 87 optimizer """adadelta""" +21 87 training_loop """owa""" +21 87 negative_sampler """basic""" +21 87 evaluator """rankbased""" +21 88 dataset """kinships""" +21 88 model """complex""" +21 88 loss """softplus""" +21 88 regularizer """no""" +21 88 optimizer """adadelta""" +21 88 training_loop """owa""" +21 88 negative_sampler """basic""" +21 88 evaluator """rankbased""" +21 89 dataset """kinships""" +21 89 model """complex""" +21 89 loss """softplus""" +21 89 regularizer """no""" +21 89 optimizer """adadelta""" +21 89 training_loop """owa""" +21 89 negative_sampler """basic""" +21 89 evaluator """rankbased""" +21 90 dataset """kinships""" +21 90 model """complex""" +21 90 loss """softplus""" +21 90 regularizer """no""" +21 90 optimizer """adadelta""" +21 90 training_loop """owa""" +21 90 negative_sampler """basic""" +21 90 evaluator """rankbased""" +21 91 dataset """kinships""" +21 91 model """complex""" +21 91 loss """softplus""" +21 91 regularizer """no""" +21 91 optimizer """adadelta""" +21 91 training_loop """owa""" +21 91 negative_sampler """basic""" +21 91 evaluator """rankbased""" +21 92 dataset """kinships""" +21 92 model """complex""" +21 92 loss """softplus""" +21 92 regularizer """no""" +21 92 optimizer """adadelta""" +21 92 training_loop """owa""" +21 92 negative_sampler """basic""" +21 92 evaluator """rankbased""" +21 93 dataset """kinships""" +21 93 model """complex""" +21 93 loss """softplus""" +21 93 regularizer """no""" +21 93 optimizer """adadelta""" +21 93 training_loop """owa""" +21 93 negative_sampler """basic""" +21 93 evaluator """rankbased""" +21 94 dataset """kinships""" +21 94 model """complex""" +21 94 loss """softplus""" +21 94 regularizer """no""" +21 94 optimizer """adadelta""" +21 94 training_loop """owa""" +21 94 negative_sampler """basic""" +21 94 evaluator """rankbased""" +21 95 dataset """kinships""" +21 95 model """complex""" +21 95 loss """softplus""" +21 95 regularizer """no""" +21 95 optimizer """adadelta""" +21 95 training_loop """owa""" +21 95 negative_sampler """basic""" +21 95 evaluator """rankbased""" +21 96 dataset """kinships""" +21 96 model """complex""" +21 96 loss """softplus""" +21 96 regularizer """no""" +21 96 optimizer """adadelta""" +21 96 training_loop """owa""" +21 96 negative_sampler """basic""" +21 96 evaluator """rankbased""" +21 97 dataset """kinships""" +21 97 model """complex""" +21 97 loss """softplus""" +21 97 regularizer """no""" +21 97 optimizer """adadelta""" +21 97 training_loop """owa""" +21 97 negative_sampler """basic""" +21 97 evaluator """rankbased""" +21 98 dataset """kinships""" +21 98 model """complex""" +21 98 loss """softplus""" +21 98 regularizer """no""" +21 98 optimizer """adadelta""" +21 98 training_loop """owa""" +21 98 negative_sampler """basic""" +21 98 evaluator """rankbased""" +21 99 dataset """kinships""" +21 99 model """complex""" +21 99 loss """softplus""" +21 99 regularizer """no""" +21 99 optimizer """adadelta""" +21 99 training_loop """owa""" +21 99 negative_sampler """basic""" +21 99 evaluator """rankbased""" +21 100 dataset """kinships""" +21 100 model """complex""" +21 100 loss """softplus""" +21 100 regularizer """no""" +21 100 optimizer """adadelta""" +21 100 training_loop """owa""" +21 100 negative_sampler """basic""" +21 100 evaluator """rankbased""" +22 1 model.embedding_dim 1.0 +22 1 negative_sampler.num_negs_per_pos 91.0 +22 1 training.batch_size 0.0 +22 2 model.embedding_dim 1.0 +22 2 negative_sampler.num_negs_per_pos 43.0 +22 2 training.batch_size 0.0 +22 3 model.embedding_dim 1.0 +22 3 negative_sampler.num_negs_per_pos 34.0 +22 3 training.batch_size 0.0 +22 4 model.embedding_dim 0.0 +22 4 negative_sampler.num_negs_per_pos 13.0 +22 4 training.batch_size 1.0 +22 5 model.embedding_dim 2.0 +22 5 negative_sampler.num_negs_per_pos 33.0 +22 5 training.batch_size 0.0 +22 6 model.embedding_dim 0.0 +22 6 negative_sampler.num_negs_per_pos 17.0 +22 6 training.batch_size 1.0 +22 7 model.embedding_dim 1.0 +22 7 negative_sampler.num_negs_per_pos 88.0 +22 7 training.batch_size 2.0 +22 8 model.embedding_dim 2.0 +22 8 negative_sampler.num_negs_per_pos 20.0 +22 8 training.batch_size 0.0 +22 9 model.embedding_dim 1.0 +22 9 negative_sampler.num_negs_per_pos 98.0 +22 9 training.batch_size 1.0 +22 10 model.embedding_dim 0.0 +22 10 negative_sampler.num_negs_per_pos 89.0 +22 10 training.batch_size 0.0 +22 11 model.embedding_dim 0.0 +22 11 negative_sampler.num_negs_per_pos 49.0 +22 11 training.batch_size 2.0 +22 12 model.embedding_dim 2.0 +22 12 negative_sampler.num_negs_per_pos 9.0 +22 12 training.batch_size 2.0 +22 13 model.embedding_dim 2.0 +22 13 negative_sampler.num_negs_per_pos 31.0 +22 13 training.batch_size 1.0 +22 14 model.embedding_dim 0.0 +22 14 negative_sampler.num_negs_per_pos 97.0 +22 14 training.batch_size 2.0 +22 15 model.embedding_dim 1.0 +22 15 negative_sampler.num_negs_per_pos 18.0 +22 15 training.batch_size 0.0 +22 16 model.embedding_dim 2.0 +22 16 negative_sampler.num_negs_per_pos 11.0 +22 16 training.batch_size 0.0 +22 17 model.embedding_dim 0.0 +22 17 negative_sampler.num_negs_per_pos 87.0 +22 17 training.batch_size 1.0 +22 18 model.embedding_dim 2.0 +22 18 negative_sampler.num_negs_per_pos 24.0 +22 18 training.batch_size 1.0 +22 19 model.embedding_dim 0.0 +22 19 negative_sampler.num_negs_per_pos 29.0 +22 19 training.batch_size 2.0 +22 20 model.embedding_dim 1.0 +22 20 negative_sampler.num_negs_per_pos 14.0 +22 20 training.batch_size 1.0 +22 21 model.embedding_dim 2.0 +22 21 negative_sampler.num_negs_per_pos 35.0 +22 21 training.batch_size 2.0 +22 22 model.embedding_dim 2.0 +22 22 negative_sampler.num_negs_per_pos 28.0 +22 22 training.batch_size 2.0 +22 23 model.embedding_dim 2.0 +22 23 negative_sampler.num_negs_per_pos 28.0 +22 23 training.batch_size 1.0 +22 24 model.embedding_dim 1.0 +22 24 negative_sampler.num_negs_per_pos 89.0 +22 24 training.batch_size 1.0 +22 25 model.embedding_dim 1.0 +22 25 negative_sampler.num_negs_per_pos 7.0 +22 25 training.batch_size 0.0 +22 26 model.embedding_dim 2.0 +22 26 negative_sampler.num_negs_per_pos 22.0 +22 26 training.batch_size 2.0 +22 27 model.embedding_dim 2.0 +22 27 negative_sampler.num_negs_per_pos 47.0 +22 27 training.batch_size 2.0 +22 28 model.embedding_dim 2.0 +22 28 negative_sampler.num_negs_per_pos 94.0 +22 28 training.batch_size 1.0 +22 29 model.embedding_dim 0.0 +22 29 negative_sampler.num_negs_per_pos 90.0 +22 29 training.batch_size 1.0 +22 30 model.embedding_dim 0.0 +22 30 negative_sampler.num_negs_per_pos 19.0 +22 30 training.batch_size 2.0 +22 31 model.embedding_dim 1.0 +22 31 negative_sampler.num_negs_per_pos 31.0 +22 31 training.batch_size 0.0 +22 32 model.embedding_dim 2.0 +22 32 negative_sampler.num_negs_per_pos 45.0 +22 32 training.batch_size 1.0 +22 33 model.embedding_dim 0.0 +22 33 negative_sampler.num_negs_per_pos 27.0 +22 33 training.batch_size 1.0 +22 34 model.embedding_dim 2.0 +22 34 negative_sampler.num_negs_per_pos 87.0 +22 34 training.batch_size 2.0 +22 35 model.embedding_dim 2.0 +22 35 negative_sampler.num_negs_per_pos 78.0 +22 35 training.batch_size 1.0 +22 36 model.embedding_dim 1.0 +22 36 negative_sampler.num_negs_per_pos 67.0 +22 36 training.batch_size 0.0 +22 37 model.embedding_dim 1.0 +22 37 negative_sampler.num_negs_per_pos 50.0 +22 37 training.batch_size 0.0 +22 38 model.embedding_dim 2.0 +22 38 negative_sampler.num_negs_per_pos 53.0 +22 38 training.batch_size 2.0 +22 39 model.embedding_dim 1.0 +22 39 negative_sampler.num_negs_per_pos 87.0 +22 39 training.batch_size 2.0 +22 40 model.embedding_dim 2.0 +22 40 negative_sampler.num_negs_per_pos 60.0 +22 40 training.batch_size 2.0 +22 41 model.embedding_dim 0.0 +22 41 negative_sampler.num_negs_per_pos 43.0 +22 41 training.batch_size 0.0 +22 42 model.embedding_dim 1.0 +22 42 negative_sampler.num_negs_per_pos 74.0 +22 42 training.batch_size 1.0 +22 43 model.embedding_dim 0.0 +22 43 negative_sampler.num_negs_per_pos 53.0 +22 43 training.batch_size 2.0 +22 44 model.embedding_dim 1.0 +22 44 negative_sampler.num_negs_per_pos 13.0 +22 44 training.batch_size 0.0 +22 45 model.embedding_dim 0.0 +22 45 negative_sampler.num_negs_per_pos 81.0 +22 45 training.batch_size 2.0 +22 46 model.embedding_dim 2.0 +22 46 negative_sampler.num_negs_per_pos 45.0 +22 46 training.batch_size 1.0 +22 47 model.embedding_dim 1.0 +22 47 negative_sampler.num_negs_per_pos 35.0 +22 47 training.batch_size 1.0 +22 48 model.embedding_dim 0.0 +22 48 negative_sampler.num_negs_per_pos 13.0 +22 48 training.batch_size 0.0 +22 49 model.embedding_dim 0.0 +22 49 negative_sampler.num_negs_per_pos 24.0 +22 49 training.batch_size 2.0 +22 50 model.embedding_dim 2.0 +22 50 negative_sampler.num_negs_per_pos 15.0 +22 50 training.batch_size 1.0 +22 51 model.embedding_dim 1.0 +22 51 negative_sampler.num_negs_per_pos 60.0 +22 51 training.batch_size 0.0 +22 52 model.embedding_dim 0.0 +22 52 negative_sampler.num_negs_per_pos 47.0 +22 52 training.batch_size 0.0 +22 53 model.embedding_dim 1.0 +22 53 negative_sampler.num_negs_per_pos 4.0 +22 53 training.batch_size 2.0 +22 54 model.embedding_dim 2.0 +22 54 negative_sampler.num_negs_per_pos 72.0 +22 54 training.batch_size 1.0 +22 55 model.embedding_dim 1.0 +22 55 negative_sampler.num_negs_per_pos 15.0 +22 55 training.batch_size 2.0 +22 56 model.embedding_dim 2.0 +22 56 negative_sampler.num_negs_per_pos 71.0 +22 56 training.batch_size 1.0 +22 57 model.embedding_dim 0.0 +22 57 negative_sampler.num_negs_per_pos 3.0 +22 57 training.batch_size 2.0 +22 58 model.embedding_dim 2.0 +22 58 negative_sampler.num_negs_per_pos 47.0 +22 58 training.batch_size 1.0 +22 59 model.embedding_dim 2.0 +22 59 negative_sampler.num_negs_per_pos 22.0 +22 59 training.batch_size 1.0 +22 60 model.embedding_dim 2.0 +22 60 negative_sampler.num_negs_per_pos 86.0 +22 60 training.batch_size 0.0 +22 61 model.embedding_dim 1.0 +22 61 negative_sampler.num_negs_per_pos 64.0 +22 61 training.batch_size 2.0 +22 62 model.embedding_dim 1.0 +22 62 negative_sampler.num_negs_per_pos 23.0 +22 62 training.batch_size 2.0 +22 63 model.embedding_dim 1.0 +22 63 negative_sampler.num_negs_per_pos 8.0 +22 63 training.batch_size 0.0 +22 64 model.embedding_dim 0.0 +22 64 negative_sampler.num_negs_per_pos 50.0 +22 64 training.batch_size 2.0 +22 65 model.embedding_dim 2.0 +22 65 negative_sampler.num_negs_per_pos 92.0 +22 65 training.batch_size 1.0 +22 66 model.embedding_dim 0.0 +22 66 negative_sampler.num_negs_per_pos 55.0 +22 66 training.batch_size 2.0 +22 67 model.embedding_dim 1.0 +22 67 negative_sampler.num_negs_per_pos 61.0 +22 67 training.batch_size 0.0 +22 68 model.embedding_dim 1.0 +22 68 negative_sampler.num_negs_per_pos 0.0 +22 68 training.batch_size 0.0 +22 69 model.embedding_dim 0.0 +22 69 negative_sampler.num_negs_per_pos 10.0 +22 69 training.batch_size 1.0 +22 70 model.embedding_dim 2.0 +22 70 negative_sampler.num_negs_per_pos 31.0 +22 70 training.batch_size 2.0 +22 71 model.embedding_dim 2.0 +22 71 negative_sampler.num_negs_per_pos 33.0 +22 71 training.batch_size 1.0 +22 72 model.embedding_dim 1.0 +22 72 negative_sampler.num_negs_per_pos 4.0 +22 72 training.batch_size 1.0 +22 73 model.embedding_dim 2.0 +22 73 negative_sampler.num_negs_per_pos 37.0 +22 73 training.batch_size 2.0 +22 74 model.embedding_dim 1.0 +22 74 negative_sampler.num_negs_per_pos 74.0 +22 74 training.batch_size 1.0 +22 75 model.embedding_dim 0.0 +22 75 negative_sampler.num_negs_per_pos 69.0 +22 75 training.batch_size 1.0 +22 76 model.embedding_dim 0.0 +22 76 negative_sampler.num_negs_per_pos 79.0 +22 76 training.batch_size 1.0 +22 77 model.embedding_dim 1.0 +22 77 negative_sampler.num_negs_per_pos 56.0 +22 77 training.batch_size 1.0 +22 78 model.embedding_dim 0.0 +22 78 negative_sampler.num_negs_per_pos 21.0 +22 78 training.batch_size 0.0 +22 79 model.embedding_dim 1.0 +22 79 negative_sampler.num_negs_per_pos 5.0 +22 79 training.batch_size 2.0 +22 80 model.embedding_dim 1.0 +22 80 negative_sampler.num_negs_per_pos 45.0 +22 80 training.batch_size 1.0 +22 81 model.embedding_dim 1.0 +22 81 negative_sampler.num_negs_per_pos 61.0 +22 81 training.batch_size 0.0 +22 82 model.embedding_dim 1.0 +22 82 negative_sampler.num_negs_per_pos 51.0 +22 82 training.batch_size 1.0 +22 83 model.embedding_dim 0.0 +22 83 negative_sampler.num_negs_per_pos 37.0 +22 83 training.batch_size 1.0 +22 84 model.embedding_dim 1.0 +22 84 negative_sampler.num_negs_per_pos 49.0 +22 84 training.batch_size 0.0 +22 85 model.embedding_dim 0.0 +22 85 negative_sampler.num_negs_per_pos 40.0 +22 85 training.batch_size 1.0 +22 86 model.embedding_dim 1.0 +22 86 negative_sampler.num_negs_per_pos 25.0 +22 86 training.batch_size 2.0 +22 87 model.embedding_dim 1.0 +22 87 negative_sampler.num_negs_per_pos 97.0 +22 87 training.batch_size 1.0 +22 88 model.embedding_dim 0.0 +22 88 negative_sampler.num_negs_per_pos 19.0 +22 88 training.batch_size 1.0 +22 89 model.embedding_dim 1.0 +22 89 negative_sampler.num_negs_per_pos 69.0 +22 89 training.batch_size 2.0 +22 90 model.embedding_dim 0.0 +22 90 negative_sampler.num_negs_per_pos 87.0 +22 90 training.batch_size 2.0 +22 91 model.embedding_dim 1.0 +22 91 negative_sampler.num_negs_per_pos 66.0 +22 91 training.batch_size 2.0 +22 92 model.embedding_dim 0.0 +22 92 negative_sampler.num_negs_per_pos 95.0 +22 92 training.batch_size 0.0 +22 93 model.embedding_dim 2.0 +22 93 negative_sampler.num_negs_per_pos 22.0 +22 93 training.batch_size 1.0 +22 94 model.embedding_dim 1.0 +22 94 negative_sampler.num_negs_per_pos 66.0 +22 94 training.batch_size 1.0 +22 95 model.embedding_dim 1.0 +22 95 negative_sampler.num_negs_per_pos 96.0 +22 95 training.batch_size 2.0 +22 96 model.embedding_dim 1.0 +22 96 negative_sampler.num_negs_per_pos 1.0 +22 96 training.batch_size 2.0 +22 97 model.embedding_dim 1.0 +22 97 negative_sampler.num_negs_per_pos 35.0 +22 97 training.batch_size 0.0 +22 98 model.embedding_dim 1.0 +22 98 negative_sampler.num_negs_per_pos 36.0 +22 98 training.batch_size 2.0 +22 99 model.embedding_dim 2.0 +22 99 negative_sampler.num_negs_per_pos 19.0 +22 99 training.batch_size 0.0 +22 100 model.embedding_dim 2.0 +22 100 negative_sampler.num_negs_per_pos 73.0 +22 100 training.batch_size 2.0 +22 1 dataset """kinships""" +22 1 model """complex""" +22 1 loss """bceaftersigmoid""" +22 1 regularizer """no""" +22 1 optimizer """adadelta""" +22 1 training_loop """owa""" +22 1 negative_sampler """basic""" +22 1 evaluator """rankbased""" +22 2 dataset """kinships""" +22 2 model """complex""" +22 2 loss """bceaftersigmoid""" +22 2 regularizer """no""" +22 2 optimizer """adadelta""" +22 2 training_loop """owa""" +22 2 negative_sampler """basic""" +22 2 evaluator """rankbased""" +22 3 dataset """kinships""" +22 3 model """complex""" +22 3 loss """bceaftersigmoid""" +22 3 regularizer """no""" +22 3 optimizer """adadelta""" +22 3 training_loop """owa""" +22 3 negative_sampler """basic""" +22 3 evaluator """rankbased""" +22 4 dataset """kinships""" +22 4 model """complex""" +22 4 loss """bceaftersigmoid""" +22 4 regularizer """no""" +22 4 optimizer """adadelta""" +22 4 training_loop """owa""" +22 4 negative_sampler """basic""" +22 4 evaluator """rankbased""" +22 5 dataset """kinships""" +22 5 model """complex""" +22 5 loss """bceaftersigmoid""" +22 5 regularizer """no""" +22 5 optimizer """adadelta""" +22 5 training_loop """owa""" +22 5 negative_sampler """basic""" +22 5 evaluator """rankbased""" +22 6 dataset """kinships""" +22 6 model """complex""" +22 6 loss """bceaftersigmoid""" +22 6 regularizer """no""" +22 6 optimizer """adadelta""" +22 6 training_loop """owa""" +22 6 negative_sampler """basic""" +22 6 evaluator """rankbased""" +22 7 dataset """kinships""" +22 7 model """complex""" +22 7 loss """bceaftersigmoid""" +22 7 regularizer """no""" +22 7 optimizer """adadelta""" +22 7 training_loop """owa""" +22 7 negative_sampler """basic""" +22 7 evaluator """rankbased""" +22 8 dataset """kinships""" +22 8 model """complex""" +22 8 loss """bceaftersigmoid""" +22 8 regularizer """no""" +22 8 optimizer """adadelta""" +22 8 training_loop """owa""" +22 8 negative_sampler """basic""" +22 8 evaluator """rankbased""" +22 9 dataset """kinships""" +22 9 model """complex""" +22 9 loss """bceaftersigmoid""" +22 9 regularizer """no""" +22 9 optimizer """adadelta""" +22 9 training_loop """owa""" +22 9 negative_sampler """basic""" +22 9 evaluator """rankbased""" +22 10 dataset """kinships""" +22 10 model """complex""" +22 10 loss """bceaftersigmoid""" +22 10 regularizer """no""" +22 10 optimizer """adadelta""" +22 10 training_loop """owa""" +22 10 negative_sampler """basic""" +22 10 evaluator """rankbased""" +22 11 dataset """kinships""" +22 11 model """complex""" +22 11 loss """bceaftersigmoid""" +22 11 regularizer """no""" +22 11 optimizer """adadelta""" +22 11 training_loop """owa""" +22 11 negative_sampler """basic""" +22 11 evaluator """rankbased""" +22 12 dataset """kinships""" +22 12 model """complex""" +22 12 loss """bceaftersigmoid""" +22 12 regularizer """no""" +22 12 optimizer """adadelta""" +22 12 training_loop """owa""" +22 12 negative_sampler """basic""" +22 12 evaluator """rankbased""" +22 13 dataset """kinships""" +22 13 model """complex""" +22 13 loss """bceaftersigmoid""" +22 13 regularizer """no""" +22 13 optimizer """adadelta""" +22 13 training_loop """owa""" +22 13 negative_sampler """basic""" +22 13 evaluator """rankbased""" +22 14 dataset """kinships""" +22 14 model """complex""" +22 14 loss """bceaftersigmoid""" +22 14 regularizer """no""" +22 14 optimizer """adadelta""" +22 14 training_loop """owa""" +22 14 negative_sampler """basic""" +22 14 evaluator """rankbased""" +22 15 dataset """kinships""" +22 15 model """complex""" +22 15 loss """bceaftersigmoid""" +22 15 regularizer """no""" +22 15 optimizer """adadelta""" +22 15 training_loop """owa""" +22 15 negative_sampler """basic""" +22 15 evaluator """rankbased""" +22 16 dataset """kinships""" +22 16 model """complex""" +22 16 loss """bceaftersigmoid""" +22 16 regularizer """no""" +22 16 optimizer """adadelta""" +22 16 training_loop """owa""" +22 16 negative_sampler """basic""" +22 16 evaluator """rankbased""" +22 17 dataset """kinships""" +22 17 model """complex""" +22 17 loss """bceaftersigmoid""" +22 17 regularizer """no""" +22 17 optimizer """adadelta""" +22 17 training_loop """owa""" +22 17 negative_sampler """basic""" +22 17 evaluator """rankbased""" +22 18 dataset """kinships""" +22 18 model """complex""" +22 18 loss """bceaftersigmoid""" +22 18 regularizer """no""" +22 18 optimizer """adadelta""" +22 18 training_loop """owa""" +22 18 negative_sampler """basic""" +22 18 evaluator """rankbased""" +22 19 dataset """kinships""" +22 19 model """complex""" +22 19 loss """bceaftersigmoid""" +22 19 regularizer """no""" +22 19 optimizer """adadelta""" +22 19 training_loop """owa""" +22 19 negative_sampler """basic""" +22 19 evaluator """rankbased""" +22 20 dataset """kinships""" +22 20 model """complex""" +22 20 loss """bceaftersigmoid""" +22 20 regularizer """no""" +22 20 optimizer """adadelta""" +22 20 training_loop """owa""" +22 20 negative_sampler """basic""" +22 20 evaluator """rankbased""" +22 21 dataset """kinships""" +22 21 model """complex""" +22 21 loss """bceaftersigmoid""" +22 21 regularizer """no""" +22 21 optimizer """adadelta""" +22 21 training_loop """owa""" +22 21 negative_sampler """basic""" +22 21 evaluator """rankbased""" +22 22 dataset """kinships""" +22 22 model """complex""" +22 22 loss """bceaftersigmoid""" +22 22 regularizer """no""" +22 22 optimizer """adadelta""" +22 22 training_loop """owa""" +22 22 negative_sampler """basic""" +22 22 evaluator """rankbased""" +22 23 dataset """kinships""" +22 23 model """complex""" +22 23 loss """bceaftersigmoid""" +22 23 regularizer """no""" +22 23 optimizer """adadelta""" +22 23 training_loop """owa""" +22 23 negative_sampler """basic""" +22 23 evaluator """rankbased""" +22 24 dataset """kinships""" +22 24 model """complex""" +22 24 loss """bceaftersigmoid""" +22 24 regularizer """no""" +22 24 optimizer """adadelta""" +22 24 training_loop """owa""" +22 24 negative_sampler """basic""" +22 24 evaluator """rankbased""" +22 25 dataset """kinships""" +22 25 model """complex""" +22 25 loss """bceaftersigmoid""" +22 25 regularizer """no""" +22 25 optimizer """adadelta""" +22 25 training_loop """owa""" +22 25 negative_sampler """basic""" +22 25 evaluator """rankbased""" +22 26 dataset """kinships""" +22 26 model """complex""" +22 26 loss """bceaftersigmoid""" +22 26 regularizer """no""" +22 26 optimizer """adadelta""" +22 26 training_loop """owa""" +22 26 negative_sampler """basic""" +22 26 evaluator """rankbased""" +22 27 dataset """kinships""" +22 27 model """complex""" +22 27 loss """bceaftersigmoid""" +22 27 regularizer """no""" +22 27 optimizer """adadelta""" +22 27 training_loop """owa""" +22 27 negative_sampler """basic""" +22 27 evaluator """rankbased""" +22 28 dataset """kinships""" +22 28 model """complex""" +22 28 loss """bceaftersigmoid""" +22 28 regularizer """no""" +22 28 optimizer """adadelta""" +22 28 training_loop """owa""" +22 28 negative_sampler """basic""" +22 28 evaluator """rankbased""" +22 29 dataset """kinships""" +22 29 model """complex""" +22 29 loss """bceaftersigmoid""" +22 29 regularizer """no""" +22 29 optimizer """adadelta""" +22 29 training_loop """owa""" +22 29 negative_sampler """basic""" +22 29 evaluator """rankbased""" +22 30 dataset """kinships""" +22 30 model """complex""" +22 30 loss """bceaftersigmoid""" +22 30 regularizer """no""" +22 30 optimizer """adadelta""" +22 30 training_loop """owa""" +22 30 negative_sampler """basic""" +22 30 evaluator """rankbased""" +22 31 dataset """kinships""" +22 31 model """complex""" +22 31 loss """bceaftersigmoid""" +22 31 regularizer """no""" +22 31 optimizer """adadelta""" +22 31 training_loop """owa""" +22 31 negative_sampler """basic""" +22 31 evaluator """rankbased""" +22 32 dataset """kinships""" +22 32 model """complex""" +22 32 loss """bceaftersigmoid""" +22 32 regularizer """no""" +22 32 optimizer """adadelta""" +22 32 training_loop """owa""" +22 32 negative_sampler """basic""" +22 32 evaluator """rankbased""" +22 33 dataset """kinships""" +22 33 model """complex""" +22 33 loss """bceaftersigmoid""" +22 33 regularizer """no""" +22 33 optimizer """adadelta""" +22 33 training_loop """owa""" +22 33 negative_sampler """basic""" +22 33 evaluator """rankbased""" +22 34 dataset """kinships""" +22 34 model """complex""" +22 34 loss """bceaftersigmoid""" +22 34 regularizer """no""" +22 34 optimizer """adadelta""" +22 34 training_loop """owa""" +22 34 negative_sampler """basic""" +22 34 evaluator """rankbased""" +22 35 dataset """kinships""" +22 35 model """complex""" +22 35 loss """bceaftersigmoid""" +22 35 regularizer """no""" +22 35 optimizer """adadelta""" +22 35 training_loop """owa""" +22 35 negative_sampler """basic""" +22 35 evaluator """rankbased""" +22 36 dataset """kinships""" +22 36 model """complex""" +22 36 loss """bceaftersigmoid""" +22 36 regularizer """no""" +22 36 optimizer """adadelta""" +22 36 training_loop """owa""" +22 36 negative_sampler """basic""" +22 36 evaluator """rankbased""" +22 37 dataset """kinships""" +22 37 model """complex""" +22 37 loss """bceaftersigmoid""" +22 37 regularizer """no""" +22 37 optimizer """adadelta""" +22 37 training_loop """owa""" +22 37 negative_sampler """basic""" +22 37 evaluator """rankbased""" +22 38 dataset """kinships""" +22 38 model """complex""" +22 38 loss """bceaftersigmoid""" +22 38 regularizer """no""" +22 38 optimizer """adadelta""" +22 38 training_loop """owa""" +22 38 negative_sampler """basic""" +22 38 evaluator """rankbased""" +22 39 dataset """kinships""" +22 39 model """complex""" +22 39 loss """bceaftersigmoid""" +22 39 regularizer """no""" +22 39 optimizer """adadelta""" +22 39 training_loop """owa""" +22 39 negative_sampler """basic""" +22 39 evaluator """rankbased""" +22 40 dataset """kinships""" +22 40 model """complex""" +22 40 loss """bceaftersigmoid""" +22 40 regularizer """no""" +22 40 optimizer """adadelta""" +22 40 training_loop """owa""" +22 40 negative_sampler """basic""" +22 40 evaluator """rankbased""" +22 41 dataset """kinships""" +22 41 model """complex""" +22 41 loss """bceaftersigmoid""" +22 41 regularizer """no""" +22 41 optimizer """adadelta""" +22 41 training_loop """owa""" +22 41 negative_sampler """basic""" +22 41 evaluator """rankbased""" +22 42 dataset """kinships""" +22 42 model """complex""" +22 42 loss """bceaftersigmoid""" +22 42 regularizer """no""" +22 42 optimizer """adadelta""" +22 42 training_loop """owa""" +22 42 negative_sampler """basic""" +22 42 evaluator """rankbased""" +22 43 dataset """kinships""" +22 43 model """complex""" +22 43 loss """bceaftersigmoid""" +22 43 regularizer """no""" +22 43 optimizer """adadelta""" +22 43 training_loop """owa""" +22 43 negative_sampler """basic""" +22 43 evaluator """rankbased""" +22 44 dataset """kinships""" +22 44 model """complex""" +22 44 loss """bceaftersigmoid""" +22 44 regularizer """no""" +22 44 optimizer """adadelta""" +22 44 training_loop """owa""" +22 44 negative_sampler """basic""" +22 44 evaluator """rankbased""" +22 45 dataset """kinships""" +22 45 model """complex""" +22 45 loss """bceaftersigmoid""" +22 45 regularizer """no""" +22 45 optimizer """adadelta""" +22 45 training_loop """owa""" +22 45 negative_sampler """basic""" +22 45 evaluator """rankbased""" +22 46 dataset """kinships""" +22 46 model """complex""" +22 46 loss """bceaftersigmoid""" +22 46 regularizer """no""" +22 46 optimizer """adadelta""" +22 46 training_loop """owa""" +22 46 negative_sampler """basic""" +22 46 evaluator """rankbased""" +22 47 dataset """kinships""" +22 47 model """complex""" +22 47 loss """bceaftersigmoid""" +22 47 regularizer """no""" +22 47 optimizer """adadelta""" +22 47 training_loop """owa""" +22 47 negative_sampler """basic""" +22 47 evaluator """rankbased""" +22 48 dataset """kinships""" +22 48 model """complex""" +22 48 loss """bceaftersigmoid""" +22 48 regularizer """no""" +22 48 optimizer """adadelta""" +22 48 training_loop """owa""" +22 48 negative_sampler """basic""" +22 48 evaluator """rankbased""" +22 49 dataset """kinships""" +22 49 model """complex""" +22 49 loss """bceaftersigmoid""" +22 49 regularizer """no""" +22 49 optimizer """adadelta""" +22 49 training_loop """owa""" +22 49 negative_sampler """basic""" +22 49 evaluator """rankbased""" +22 50 dataset """kinships""" +22 50 model """complex""" +22 50 loss """bceaftersigmoid""" +22 50 regularizer """no""" +22 50 optimizer """adadelta""" +22 50 training_loop """owa""" +22 50 negative_sampler """basic""" +22 50 evaluator """rankbased""" +22 51 dataset """kinships""" +22 51 model """complex""" +22 51 loss """bceaftersigmoid""" +22 51 regularizer """no""" +22 51 optimizer """adadelta""" +22 51 training_loop """owa""" +22 51 negative_sampler """basic""" +22 51 evaluator """rankbased""" +22 52 dataset """kinships""" +22 52 model """complex""" +22 52 loss """bceaftersigmoid""" +22 52 regularizer """no""" +22 52 optimizer """adadelta""" +22 52 training_loop """owa""" +22 52 negative_sampler """basic""" +22 52 evaluator """rankbased""" +22 53 dataset """kinships""" +22 53 model """complex""" +22 53 loss """bceaftersigmoid""" +22 53 regularizer """no""" +22 53 optimizer """adadelta""" +22 53 training_loop """owa""" +22 53 negative_sampler """basic""" +22 53 evaluator """rankbased""" +22 54 dataset """kinships""" +22 54 model """complex""" +22 54 loss """bceaftersigmoid""" +22 54 regularizer """no""" +22 54 optimizer """adadelta""" +22 54 training_loop """owa""" +22 54 negative_sampler """basic""" +22 54 evaluator """rankbased""" +22 55 dataset """kinships""" +22 55 model """complex""" +22 55 loss """bceaftersigmoid""" +22 55 regularizer """no""" +22 55 optimizer """adadelta""" +22 55 training_loop """owa""" +22 55 negative_sampler """basic""" +22 55 evaluator """rankbased""" +22 56 dataset """kinships""" +22 56 model """complex""" +22 56 loss """bceaftersigmoid""" +22 56 regularizer """no""" +22 56 optimizer """adadelta""" +22 56 training_loop """owa""" +22 56 negative_sampler """basic""" +22 56 evaluator """rankbased""" +22 57 dataset """kinships""" +22 57 model """complex""" +22 57 loss """bceaftersigmoid""" +22 57 regularizer """no""" +22 57 optimizer """adadelta""" +22 57 training_loop """owa""" +22 57 negative_sampler """basic""" +22 57 evaluator """rankbased""" +22 58 dataset """kinships""" +22 58 model """complex""" +22 58 loss """bceaftersigmoid""" +22 58 regularizer """no""" +22 58 optimizer """adadelta""" +22 58 training_loop """owa""" +22 58 negative_sampler """basic""" +22 58 evaluator """rankbased""" +22 59 dataset """kinships""" +22 59 model """complex""" +22 59 loss """bceaftersigmoid""" +22 59 regularizer """no""" +22 59 optimizer """adadelta""" +22 59 training_loop """owa""" +22 59 negative_sampler """basic""" +22 59 evaluator """rankbased""" +22 60 dataset """kinships""" +22 60 model """complex""" +22 60 loss """bceaftersigmoid""" +22 60 regularizer """no""" +22 60 optimizer """adadelta""" +22 60 training_loop """owa""" +22 60 negative_sampler """basic""" +22 60 evaluator """rankbased""" +22 61 dataset """kinships""" +22 61 model """complex""" +22 61 loss """bceaftersigmoid""" +22 61 regularizer """no""" +22 61 optimizer """adadelta""" +22 61 training_loop """owa""" +22 61 negative_sampler """basic""" +22 61 evaluator """rankbased""" +22 62 dataset """kinships""" +22 62 model """complex""" +22 62 loss """bceaftersigmoid""" +22 62 regularizer """no""" +22 62 optimizer """adadelta""" +22 62 training_loop """owa""" +22 62 negative_sampler """basic""" +22 62 evaluator """rankbased""" +22 63 dataset """kinships""" +22 63 model """complex""" +22 63 loss """bceaftersigmoid""" +22 63 regularizer """no""" +22 63 optimizer """adadelta""" +22 63 training_loop """owa""" +22 63 negative_sampler """basic""" +22 63 evaluator """rankbased""" +22 64 dataset """kinships""" +22 64 model """complex""" +22 64 loss """bceaftersigmoid""" +22 64 regularizer """no""" +22 64 optimizer """adadelta""" +22 64 training_loop """owa""" +22 64 negative_sampler """basic""" +22 64 evaluator """rankbased""" +22 65 dataset """kinships""" +22 65 model """complex""" +22 65 loss """bceaftersigmoid""" +22 65 regularizer """no""" +22 65 optimizer """adadelta""" +22 65 training_loop """owa""" +22 65 negative_sampler """basic""" +22 65 evaluator """rankbased""" +22 66 dataset """kinships""" +22 66 model """complex""" +22 66 loss """bceaftersigmoid""" +22 66 regularizer """no""" +22 66 optimizer """adadelta""" +22 66 training_loop """owa""" +22 66 negative_sampler """basic""" +22 66 evaluator """rankbased""" +22 67 dataset """kinships""" +22 67 model """complex""" +22 67 loss """bceaftersigmoid""" +22 67 regularizer """no""" +22 67 optimizer """adadelta""" +22 67 training_loop """owa""" +22 67 negative_sampler """basic""" +22 67 evaluator """rankbased""" +22 68 dataset """kinships""" +22 68 model """complex""" +22 68 loss """bceaftersigmoid""" +22 68 regularizer """no""" +22 68 optimizer """adadelta""" +22 68 training_loop """owa""" +22 68 negative_sampler """basic""" +22 68 evaluator """rankbased""" +22 69 dataset """kinships""" +22 69 model """complex""" +22 69 loss """bceaftersigmoid""" +22 69 regularizer """no""" +22 69 optimizer """adadelta""" +22 69 training_loop """owa""" +22 69 negative_sampler """basic""" +22 69 evaluator """rankbased""" +22 70 dataset """kinships""" +22 70 model """complex""" +22 70 loss """bceaftersigmoid""" +22 70 regularizer """no""" +22 70 optimizer """adadelta""" +22 70 training_loop """owa""" +22 70 negative_sampler """basic""" +22 70 evaluator """rankbased""" +22 71 dataset """kinships""" +22 71 model """complex""" +22 71 loss """bceaftersigmoid""" +22 71 regularizer """no""" +22 71 optimizer """adadelta""" +22 71 training_loop """owa""" +22 71 negative_sampler """basic""" +22 71 evaluator """rankbased""" +22 72 dataset """kinships""" +22 72 model """complex""" +22 72 loss """bceaftersigmoid""" +22 72 regularizer """no""" +22 72 optimizer """adadelta""" +22 72 training_loop """owa""" +22 72 negative_sampler """basic""" +22 72 evaluator """rankbased""" +22 73 dataset """kinships""" +22 73 model """complex""" +22 73 loss """bceaftersigmoid""" +22 73 regularizer """no""" +22 73 optimizer """adadelta""" +22 73 training_loop """owa""" +22 73 negative_sampler """basic""" +22 73 evaluator """rankbased""" +22 74 dataset """kinships""" +22 74 model """complex""" +22 74 loss """bceaftersigmoid""" +22 74 regularizer """no""" +22 74 optimizer """adadelta""" +22 74 training_loop """owa""" +22 74 negative_sampler """basic""" +22 74 evaluator """rankbased""" +22 75 dataset """kinships""" +22 75 model """complex""" +22 75 loss """bceaftersigmoid""" +22 75 regularizer """no""" +22 75 optimizer """adadelta""" +22 75 training_loop """owa""" +22 75 negative_sampler """basic""" +22 75 evaluator """rankbased""" +22 76 dataset """kinships""" +22 76 model """complex""" +22 76 loss """bceaftersigmoid""" +22 76 regularizer """no""" +22 76 optimizer """adadelta""" +22 76 training_loop """owa""" +22 76 negative_sampler """basic""" +22 76 evaluator """rankbased""" +22 77 dataset """kinships""" +22 77 model """complex""" +22 77 loss """bceaftersigmoid""" +22 77 regularizer """no""" +22 77 optimizer """adadelta""" +22 77 training_loop """owa""" +22 77 negative_sampler """basic""" +22 77 evaluator """rankbased""" +22 78 dataset """kinships""" +22 78 model """complex""" +22 78 loss """bceaftersigmoid""" +22 78 regularizer """no""" +22 78 optimizer """adadelta""" +22 78 training_loop """owa""" +22 78 negative_sampler """basic""" +22 78 evaluator """rankbased""" +22 79 dataset """kinships""" +22 79 model """complex""" +22 79 loss """bceaftersigmoid""" +22 79 regularizer """no""" +22 79 optimizer """adadelta""" +22 79 training_loop """owa""" +22 79 negative_sampler """basic""" +22 79 evaluator """rankbased""" +22 80 dataset """kinships""" +22 80 model """complex""" +22 80 loss """bceaftersigmoid""" +22 80 regularizer """no""" +22 80 optimizer """adadelta""" +22 80 training_loop """owa""" +22 80 negative_sampler """basic""" +22 80 evaluator """rankbased""" +22 81 dataset """kinships""" +22 81 model """complex""" +22 81 loss """bceaftersigmoid""" +22 81 regularizer """no""" +22 81 optimizer """adadelta""" +22 81 training_loop """owa""" +22 81 negative_sampler """basic""" +22 81 evaluator """rankbased""" +22 82 dataset """kinships""" +22 82 model """complex""" +22 82 loss """bceaftersigmoid""" +22 82 regularizer """no""" +22 82 optimizer """adadelta""" +22 82 training_loop """owa""" +22 82 negative_sampler """basic""" +22 82 evaluator """rankbased""" +22 83 dataset """kinships""" +22 83 model """complex""" +22 83 loss """bceaftersigmoid""" +22 83 regularizer """no""" +22 83 optimizer """adadelta""" +22 83 training_loop """owa""" +22 83 negative_sampler """basic""" +22 83 evaluator """rankbased""" +22 84 dataset """kinships""" +22 84 model """complex""" +22 84 loss """bceaftersigmoid""" +22 84 regularizer """no""" +22 84 optimizer """adadelta""" +22 84 training_loop """owa""" +22 84 negative_sampler """basic""" +22 84 evaluator """rankbased""" +22 85 dataset """kinships""" +22 85 model """complex""" +22 85 loss """bceaftersigmoid""" +22 85 regularizer """no""" +22 85 optimizer """adadelta""" +22 85 training_loop """owa""" +22 85 negative_sampler """basic""" +22 85 evaluator """rankbased""" +22 86 dataset """kinships""" +22 86 model """complex""" +22 86 loss """bceaftersigmoid""" +22 86 regularizer """no""" +22 86 optimizer """adadelta""" +22 86 training_loop """owa""" +22 86 negative_sampler """basic""" +22 86 evaluator """rankbased""" +22 87 dataset """kinships""" +22 87 model """complex""" +22 87 loss """bceaftersigmoid""" +22 87 regularizer """no""" +22 87 optimizer """adadelta""" +22 87 training_loop """owa""" +22 87 negative_sampler """basic""" +22 87 evaluator """rankbased""" +22 88 dataset """kinships""" +22 88 model """complex""" +22 88 loss """bceaftersigmoid""" +22 88 regularizer """no""" +22 88 optimizer """adadelta""" +22 88 training_loop """owa""" +22 88 negative_sampler """basic""" +22 88 evaluator """rankbased""" +22 89 dataset """kinships""" +22 89 model """complex""" +22 89 loss """bceaftersigmoid""" +22 89 regularizer """no""" +22 89 optimizer """adadelta""" +22 89 training_loop """owa""" +22 89 negative_sampler """basic""" +22 89 evaluator """rankbased""" +22 90 dataset """kinships""" +22 90 model """complex""" +22 90 loss """bceaftersigmoid""" +22 90 regularizer """no""" +22 90 optimizer """adadelta""" +22 90 training_loop """owa""" +22 90 negative_sampler """basic""" +22 90 evaluator """rankbased""" +22 91 dataset """kinships""" +22 91 model """complex""" +22 91 loss """bceaftersigmoid""" +22 91 regularizer """no""" +22 91 optimizer """adadelta""" +22 91 training_loop """owa""" +22 91 negative_sampler """basic""" +22 91 evaluator """rankbased""" +22 92 dataset """kinships""" +22 92 model """complex""" +22 92 loss """bceaftersigmoid""" +22 92 regularizer """no""" +22 92 optimizer """adadelta""" +22 92 training_loop """owa""" +22 92 negative_sampler """basic""" +22 92 evaluator """rankbased""" +22 93 dataset """kinships""" +22 93 model """complex""" +22 93 loss """bceaftersigmoid""" +22 93 regularizer """no""" +22 93 optimizer """adadelta""" +22 93 training_loop """owa""" +22 93 negative_sampler """basic""" +22 93 evaluator """rankbased""" +22 94 dataset """kinships""" +22 94 model """complex""" +22 94 loss """bceaftersigmoid""" +22 94 regularizer """no""" +22 94 optimizer """adadelta""" +22 94 training_loop """owa""" +22 94 negative_sampler """basic""" +22 94 evaluator """rankbased""" +22 95 dataset """kinships""" +22 95 model """complex""" +22 95 loss """bceaftersigmoid""" +22 95 regularizer """no""" +22 95 optimizer """adadelta""" +22 95 training_loop """owa""" +22 95 negative_sampler """basic""" +22 95 evaluator """rankbased""" +22 96 dataset """kinships""" +22 96 model """complex""" +22 96 loss """bceaftersigmoid""" +22 96 regularizer """no""" +22 96 optimizer """adadelta""" +22 96 training_loop """owa""" +22 96 negative_sampler """basic""" +22 96 evaluator """rankbased""" +22 97 dataset """kinships""" +22 97 model """complex""" +22 97 loss """bceaftersigmoid""" +22 97 regularizer """no""" +22 97 optimizer """adadelta""" +22 97 training_loop """owa""" +22 97 negative_sampler """basic""" +22 97 evaluator """rankbased""" +22 98 dataset """kinships""" +22 98 model """complex""" +22 98 loss """bceaftersigmoid""" +22 98 regularizer """no""" +22 98 optimizer """adadelta""" +22 98 training_loop """owa""" +22 98 negative_sampler """basic""" +22 98 evaluator """rankbased""" +22 99 dataset """kinships""" +22 99 model """complex""" +22 99 loss """bceaftersigmoid""" +22 99 regularizer """no""" +22 99 optimizer """adadelta""" +22 99 training_loop """owa""" +22 99 negative_sampler """basic""" +22 99 evaluator """rankbased""" +22 100 dataset """kinships""" +22 100 model """complex""" +22 100 loss """bceaftersigmoid""" +22 100 regularizer """no""" +22 100 optimizer """adadelta""" +22 100 training_loop """owa""" +22 100 negative_sampler """basic""" +22 100 evaluator """rankbased""" +23 1 model.embedding_dim 1.0 +23 1 negative_sampler.num_negs_per_pos 38.0 +23 1 training.batch_size 2.0 +23 2 model.embedding_dim 0.0 +23 2 negative_sampler.num_negs_per_pos 73.0 +23 2 training.batch_size 1.0 +23 3 model.embedding_dim 2.0 +23 3 negative_sampler.num_negs_per_pos 68.0 +23 3 training.batch_size 1.0 +23 4 model.embedding_dim 0.0 +23 4 negative_sampler.num_negs_per_pos 58.0 +23 4 training.batch_size 1.0 +23 5 model.embedding_dim 1.0 +23 5 negative_sampler.num_negs_per_pos 78.0 +23 5 training.batch_size 1.0 +23 6 model.embedding_dim 1.0 +23 6 negative_sampler.num_negs_per_pos 13.0 +23 6 training.batch_size 0.0 +23 7 model.embedding_dim 1.0 +23 7 negative_sampler.num_negs_per_pos 1.0 +23 7 training.batch_size 2.0 +23 8 model.embedding_dim 1.0 +23 8 negative_sampler.num_negs_per_pos 47.0 +23 8 training.batch_size 0.0 +23 9 model.embedding_dim 0.0 +23 9 negative_sampler.num_negs_per_pos 52.0 +23 9 training.batch_size 2.0 +23 10 model.embedding_dim 2.0 +23 10 negative_sampler.num_negs_per_pos 45.0 +23 10 training.batch_size 1.0 +23 11 model.embedding_dim 0.0 +23 11 negative_sampler.num_negs_per_pos 58.0 +23 11 training.batch_size 2.0 +23 12 model.embedding_dim 1.0 +23 12 negative_sampler.num_negs_per_pos 72.0 +23 12 training.batch_size 1.0 +23 13 model.embedding_dim 2.0 +23 13 negative_sampler.num_negs_per_pos 71.0 +23 13 training.batch_size 0.0 +23 14 model.embedding_dim 2.0 +23 14 negative_sampler.num_negs_per_pos 80.0 +23 14 training.batch_size 0.0 +23 15 model.embedding_dim 2.0 +23 15 negative_sampler.num_negs_per_pos 41.0 +23 15 training.batch_size 2.0 +23 16 model.embedding_dim 0.0 +23 16 negative_sampler.num_negs_per_pos 44.0 +23 16 training.batch_size 0.0 +23 17 model.embedding_dim 1.0 +23 17 negative_sampler.num_negs_per_pos 70.0 +23 17 training.batch_size 0.0 +23 18 model.embedding_dim 0.0 +23 18 negative_sampler.num_negs_per_pos 12.0 +23 18 training.batch_size 0.0 +23 19 model.embedding_dim 1.0 +23 19 negative_sampler.num_negs_per_pos 3.0 +23 19 training.batch_size 1.0 +23 20 model.embedding_dim 2.0 +23 20 negative_sampler.num_negs_per_pos 87.0 +23 20 training.batch_size 2.0 +23 21 model.embedding_dim 0.0 +23 21 negative_sampler.num_negs_per_pos 75.0 +23 21 training.batch_size 0.0 +23 22 model.embedding_dim 1.0 +23 22 negative_sampler.num_negs_per_pos 21.0 +23 22 training.batch_size 2.0 +23 23 model.embedding_dim 0.0 +23 23 negative_sampler.num_negs_per_pos 3.0 +23 23 training.batch_size 1.0 +23 24 model.embedding_dim 1.0 +23 24 negative_sampler.num_negs_per_pos 93.0 +23 24 training.batch_size 0.0 +23 25 model.embedding_dim 2.0 +23 25 negative_sampler.num_negs_per_pos 38.0 +23 25 training.batch_size 1.0 +23 26 model.embedding_dim 1.0 +23 26 negative_sampler.num_negs_per_pos 66.0 +23 26 training.batch_size 1.0 +23 27 model.embedding_dim 2.0 +23 27 negative_sampler.num_negs_per_pos 33.0 +23 27 training.batch_size 1.0 +23 28 model.embedding_dim 0.0 +23 28 negative_sampler.num_negs_per_pos 33.0 +23 28 training.batch_size 0.0 +23 29 model.embedding_dim 0.0 +23 29 negative_sampler.num_negs_per_pos 59.0 +23 29 training.batch_size 0.0 +23 30 model.embedding_dim 2.0 +23 30 negative_sampler.num_negs_per_pos 3.0 +23 30 training.batch_size 1.0 +23 31 model.embedding_dim 0.0 +23 31 negative_sampler.num_negs_per_pos 89.0 +23 31 training.batch_size 2.0 +23 32 model.embedding_dim 2.0 +23 32 negative_sampler.num_negs_per_pos 31.0 +23 32 training.batch_size 1.0 +23 33 model.embedding_dim 0.0 +23 33 negative_sampler.num_negs_per_pos 62.0 +23 33 training.batch_size 1.0 +23 34 model.embedding_dim 2.0 +23 34 negative_sampler.num_negs_per_pos 29.0 +23 34 training.batch_size 2.0 +23 35 model.embedding_dim 2.0 +23 35 negative_sampler.num_negs_per_pos 92.0 +23 35 training.batch_size 1.0 +23 36 model.embedding_dim 1.0 +23 36 negative_sampler.num_negs_per_pos 60.0 +23 36 training.batch_size 2.0 +23 37 model.embedding_dim 1.0 +23 37 negative_sampler.num_negs_per_pos 1.0 +23 37 training.batch_size 2.0 +23 38 model.embedding_dim 2.0 +23 38 negative_sampler.num_negs_per_pos 53.0 +23 38 training.batch_size 2.0 +23 39 model.embedding_dim 2.0 +23 39 negative_sampler.num_negs_per_pos 57.0 +23 39 training.batch_size 2.0 +23 40 model.embedding_dim 0.0 +23 40 negative_sampler.num_negs_per_pos 6.0 +23 40 training.batch_size 0.0 +23 41 model.embedding_dim 0.0 +23 41 negative_sampler.num_negs_per_pos 64.0 +23 41 training.batch_size 1.0 +23 42 model.embedding_dim 2.0 +23 42 negative_sampler.num_negs_per_pos 48.0 +23 42 training.batch_size 1.0 +23 43 model.embedding_dim 1.0 +23 43 negative_sampler.num_negs_per_pos 38.0 +23 43 training.batch_size 0.0 +23 44 model.embedding_dim 1.0 +23 44 negative_sampler.num_negs_per_pos 9.0 +23 44 training.batch_size 1.0 +23 45 model.embedding_dim 1.0 +23 45 negative_sampler.num_negs_per_pos 11.0 +23 45 training.batch_size 1.0 +23 46 model.embedding_dim 2.0 +23 46 negative_sampler.num_negs_per_pos 17.0 +23 46 training.batch_size 0.0 +23 47 model.embedding_dim 2.0 +23 47 negative_sampler.num_negs_per_pos 30.0 +23 47 training.batch_size 1.0 +23 48 model.embedding_dim 1.0 +23 48 negative_sampler.num_negs_per_pos 11.0 +23 48 training.batch_size 2.0 +23 49 model.embedding_dim 2.0 +23 49 negative_sampler.num_negs_per_pos 79.0 +23 49 training.batch_size 2.0 +23 50 model.embedding_dim 0.0 +23 50 negative_sampler.num_negs_per_pos 20.0 +23 50 training.batch_size 0.0 +23 51 model.embedding_dim 2.0 +23 51 negative_sampler.num_negs_per_pos 84.0 +23 51 training.batch_size 1.0 +23 52 model.embedding_dim 0.0 +23 52 negative_sampler.num_negs_per_pos 34.0 +23 52 training.batch_size 0.0 +23 53 model.embedding_dim 2.0 +23 53 negative_sampler.num_negs_per_pos 13.0 +23 53 training.batch_size 0.0 +23 54 model.embedding_dim 0.0 +23 54 negative_sampler.num_negs_per_pos 90.0 +23 54 training.batch_size 1.0 +23 55 model.embedding_dim 0.0 +23 55 negative_sampler.num_negs_per_pos 19.0 +23 55 training.batch_size 1.0 +23 56 model.embedding_dim 0.0 +23 56 negative_sampler.num_negs_per_pos 27.0 +23 56 training.batch_size 1.0 +23 57 model.embedding_dim 1.0 +23 57 negative_sampler.num_negs_per_pos 62.0 +23 57 training.batch_size 0.0 +23 58 model.embedding_dim 0.0 +23 58 negative_sampler.num_negs_per_pos 18.0 +23 58 training.batch_size 0.0 +23 59 model.embedding_dim 2.0 +23 59 negative_sampler.num_negs_per_pos 11.0 +23 59 training.batch_size 2.0 +23 60 model.embedding_dim 0.0 +23 60 negative_sampler.num_negs_per_pos 71.0 +23 60 training.batch_size 1.0 +23 61 model.embedding_dim 0.0 +23 61 negative_sampler.num_negs_per_pos 93.0 +23 61 training.batch_size 1.0 +23 62 model.embedding_dim 0.0 +23 62 negative_sampler.num_negs_per_pos 54.0 +23 62 training.batch_size 1.0 +23 63 model.embedding_dim 1.0 +23 63 negative_sampler.num_negs_per_pos 75.0 +23 63 training.batch_size 0.0 +23 64 model.embedding_dim 1.0 +23 64 negative_sampler.num_negs_per_pos 97.0 +23 64 training.batch_size 0.0 +23 65 model.embedding_dim 0.0 +23 65 negative_sampler.num_negs_per_pos 94.0 +23 65 training.batch_size 1.0 +23 66 model.embedding_dim 0.0 +23 66 negative_sampler.num_negs_per_pos 98.0 +23 66 training.batch_size 1.0 +23 67 model.embedding_dim 1.0 +23 67 negative_sampler.num_negs_per_pos 44.0 +23 67 training.batch_size 0.0 +23 68 model.embedding_dim 1.0 +23 68 negative_sampler.num_negs_per_pos 30.0 +23 68 training.batch_size 0.0 +23 69 model.embedding_dim 2.0 +23 69 negative_sampler.num_negs_per_pos 82.0 +23 69 training.batch_size 2.0 +23 70 model.embedding_dim 1.0 +23 70 negative_sampler.num_negs_per_pos 7.0 +23 70 training.batch_size 0.0 +23 71 model.embedding_dim 1.0 +23 71 negative_sampler.num_negs_per_pos 84.0 +23 71 training.batch_size 1.0 +23 72 model.embedding_dim 1.0 +23 72 negative_sampler.num_negs_per_pos 22.0 +23 72 training.batch_size 2.0 +23 73 model.embedding_dim 1.0 +23 73 negative_sampler.num_negs_per_pos 97.0 +23 73 training.batch_size 1.0 +23 74 model.embedding_dim 2.0 +23 74 negative_sampler.num_negs_per_pos 25.0 +23 74 training.batch_size 2.0 +23 75 model.embedding_dim 0.0 +23 75 negative_sampler.num_negs_per_pos 41.0 +23 75 training.batch_size 0.0 +23 76 model.embedding_dim 1.0 +23 76 negative_sampler.num_negs_per_pos 77.0 +23 76 training.batch_size 2.0 +23 77 model.embedding_dim 2.0 +23 77 negative_sampler.num_negs_per_pos 81.0 +23 77 training.batch_size 1.0 +23 78 model.embedding_dim 2.0 +23 78 negative_sampler.num_negs_per_pos 36.0 +23 78 training.batch_size 0.0 +23 79 model.embedding_dim 1.0 +23 79 negative_sampler.num_negs_per_pos 10.0 +23 79 training.batch_size 0.0 +23 80 model.embedding_dim 2.0 +23 80 negative_sampler.num_negs_per_pos 84.0 +23 80 training.batch_size 1.0 +23 81 model.embedding_dim 2.0 +23 81 negative_sampler.num_negs_per_pos 3.0 +23 81 training.batch_size 1.0 +23 82 model.embedding_dim 2.0 +23 82 negative_sampler.num_negs_per_pos 94.0 +23 82 training.batch_size 0.0 +23 83 model.embedding_dim 1.0 +23 83 negative_sampler.num_negs_per_pos 42.0 +23 83 training.batch_size 0.0 +23 84 model.embedding_dim 0.0 +23 84 negative_sampler.num_negs_per_pos 54.0 +23 84 training.batch_size 0.0 +23 85 model.embedding_dim 1.0 +23 85 negative_sampler.num_negs_per_pos 33.0 +23 85 training.batch_size 2.0 +23 86 model.embedding_dim 2.0 +23 86 negative_sampler.num_negs_per_pos 44.0 +23 86 training.batch_size 1.0 +23 87 model.embedding_dim 0.0 +23 87 negative_sampler.num_negs_per_pos 57.0 +23 87 training.batch_size 0.0 +23 88 model.embedding_dim 2.0 +23 88 negative_sampler.num_negs_per_pos 39.0 +23 88 training.batch_size 2.0 +23 89 model.embedding_dim 2.0 +23 89 negative_sampler.num_negs_per_pos 25.0 +23 89 training.batch_size 2.0 +23 90 model.embedding_dim 1.0 +23 90 negative_sampler.num_negs_per_pos 40.0 +23 90 training.batch_size 1.0 +23 91 model.embedding_dim 1.0 +23 91 negative_sampler.num_negs_per_pos 37.0 +23 91 training.batch_size 1.0 +23 92 model.embedding_dim 2.0 +23 92 negative_sampler.num_negs_per_pos 6.0 +23 92 training.batch_size 1.0 +23 93 model.embedding_dim 0.0 +23 93 negative_sampler.num_negs_per_pos 32.0 +23 93 training.batch_size 0.0 +23 94 model.embedding_dim 2.0 +23 94 negative_sampler.num_negs_per_pos 58.0 +23 94 training.batch_size 0.0 +23 95 model.embedding_dim 0.0 +23 95 negative_sampler.num_negs_per_pos 83.0 +23 95 training.batch_size 2.0 +23 96 model.embedding_dim 1.0 +23 96 negative_sampler.num_negs_per_pos 63.0 +23 96 training.batch_size 0.0 +23 97 model.embedding_dim 1.0 +23 97 negative_sampler.num_negs_per_pos 99.0 +23 97 training.batch_size 1.0 +23 98 model.embedding_dim 2.0 +23 98 negative_sampler.num_negs_per_pos 44.0 +23 98 training.batch_size 2.0 +23 99 model.embedding_dim 1.0 +23 99 negative_sampler.num_negs_per_pos 24.0 +23 99 training.batch_size 2.0 +23 100 model.embedding_dim 2.0 +23 100 negative_sampler.num_negs_per_pos 75.0 +23 100 training.batch_size 1.0 +23 1 dataset """kinships""" +23 1 model """complex""" +23 1 loss """softplus""" +23 1 regularizer """no""" +23 1 optimizer """adadelta""" +23 1 training_loop """owa""" +23 1 negative_sampler """basic""" +23 1 evaluator """rankbased""" +23 2 dataset """kinships""" +23 2 model """complex""" +23 2 loss """softplus""" +23 2 regularizer """no""" +23 2 optimizer """adadelta""" +23 2 training_loop """owa""" +23 2 negative_sampler """basic""" +23 2 evaluator """rankbased""" +23 3 dataset """kinships""" +23 3 model """complex""" +23 3 loss """softplus""" +23 3 regularizer """no""" +23 3 optimizer """adadelta""" +23 3 training_loop """owa""" +23 3 negative_sampler """basic""" +23 3 evaluator """rankbased""" +23 4 dataset """kinships""" +23 4 model """complex""" +23 4 loss """softplus""" +23 4 regularizer """no""" +23 4 optimizer """adadelta""" +23 4 training_loop """owa""" +23 4 negative_sampler """basic""" +23 4 evaluator """rankbased""" +23 5 dataset """kinships""" +23 5 model """complex""" +23 5 loss """softplus""" +23 5 regularizer """no""" +23 5 optimizer """adadelta""" +23 5 training_loop """owa""" +23 5 negative_sampler """basic""" +23 5 evaluator """rankbased""" +23 6 dataset """kinships""" +23 6 model """complex""" +23 6 loss """softplus""" +23 6 regularizer """no""" +23 6 optimizer """adadelta""" +23 6 training_loop """owa""" +23 6 negative_sampler """basic""" +23 6 evaluator """rankbased""" +23 7 dataset """kinships""" +23 7 model """complex""" +23 7 loss """softplus""" +23 7 regularizer """no""" +23 7 optimizer """adadelta""" +23 7 training_loop """owa""" +23 7 negative_sampler """basic""" +23 7 evaluator """rankbased""" +23 8 dataset """kinships""" +23 8 model """complex""" +23 8 loss """softplus""" +23 8 regularizer """no""" +23 8 optimizer """adadelta""" +23 8 training_loop """owa""" +23 8 negative_sampler """basic""" +23 8 evaluator """rankbased""" +23 9 dataset """kinships""" +23 9 model """complex""" +23 9 loss """softplus""" +23 9 regularizer """no""" +23 9 optimizer """adadelta""" +23 9 training_loop """owa""" +23 9 negative_sampler """basic""" +23 9 evaluator """rankbased""" +23 10 dataset """kinships""" +23 10 model """complex""" +23 10 loss """softplus""" +23 10 regularizer """no""" +23 10 optimizer """adadelta""" +23 10 training_loop """owa""" +23 10 negative_sampler """basic""" +23 10 evaluator """rankbased""" +23 11 dataset """kinships""" +23 11 model """complex""" +23 11 loss """softplus""" +23 11 regularizer """no""" +23 11 optimizer """adadelta""" +23 11 training_loop """owa""" +23 11 negative_sampler """basic""" +23 11 evaluator """rankbased""" +23 12 dataset """kinships""" +23 12 model """complex""" +23 12 loss """softplus""" +23 12 regularizer """no""" +23 12 optimizer """adadelta""" +23 12 training_loop """owa""" +23 12 negative_sampler """basic""" +23 12 evaluator """rankbased""" +23 13 dataset """kinships""" +23 13 model """complex""" +23 13 loss """softplus""" +23 13 regularizer """no""" +23 13 optimizer """adadelta""" +23 13 training_loop """owa""" +23 13 negative_sampler """basic""" +23 13 evaluator """rankbased""" +23 14 dataset """kinships""" +23 14 model """complex""" +23 14 loss """softplus""" +23 14 regularizer """no""" +23 14 optimizer """adadelta""" +23 14 training_loop """owa""" +23 14 negative_sampler """basic""" +23 14 evaluator """rankbased""" +23 15 dataset """kinships""" +23 15 model """complex""" +23 15 loss """softplus""" +23 15 regularizer """no""" +23 15 optimizer """adadelta""" +23 15 training_loop """owa""" +23 15 negative_sampler """basic""" +23 15 evaluator """rankbased""" +23 16 dataset """kinships""" +23 16 model """complex""" +23 16 loss """softplus""" +23 16 regularizer """no""" +23 16 optimizer """adadelta""" +23 16 training_loop """owa""" +23 16 negative_sampler """basic""" +23 16 evaluator """rankbased""" +23 17 dataset """kinships""" +23 17 model """complex""" +23 17 loss """softplus""" +23 17 regularizer """no""" +23 17 optimizer """adadelta""" +23 17 training_loop """owa""" +23 17 negative_sampler """basic""" +23 17 evaluator """rankbased""" +23 18 dataset """kinships""" +23 18 model """complex""" +23 18 loss """softplus""" +23 18 regularizer """no""" +23 18 optimizer """adadelta""" +23 18 training_loop """owa""" +23 18 negative_sampler """basic""" +23 18 evaluator """rankbased""" +23 19 dataset """kinships""" +23 19 model """complex""" +23 19 loss """softplus""" +23 19 regularizer """no""" +23 19 optimizer """adadelta""" +23 19 training_loop """owa""" +23 19 negative_sampler """basic""" +23 19 evaluator """rankbased""" +23 20 dataset """kinships""" +23 20 model """complex""" +23 20 loss """softplus""" +23 20 regularizer """no""" +23 20 optimizer """adadelta""" +23 20 training_loop """owa""" +23 20 negative_sampler """basic""" +23 20 evaluator """rankbased""" +23 21 dataset """kinships""" +23 21 model """complex""" +23 21 loss """softplus""" +23 21 regularizer """no""" +23 21 optimizer """adadelta""" +23 21 training_loop """owa""" +23 21 negative_sampler """basic""" +23 21 evaluator """rankbased""" +23 22 dataset """kinships""" +23 22 model """complex""" +23 22 loss """softplus""" +23 22 regularizer """no""" +23 22 optimizer """adadelta""" +23 22 training_loop """owa""" +23 22 negative_sampler """basic""" +23 22 evaluator """rankbased""" +23 23 dataset """kinships""" +23 23 model """complex""" +23 23 loss """softplus""" +23 23 regularizer """no""" +23 23 optimizer """adadelta""" +23 23 training_loop """owa""" +23 23 negative_sampler """basic""" +23 23 evaluator """rankbased""" +23 24 dataset """kinships""" +23 24 model """complex""" +23 24 loss """softplus""" +23 24 regularizer """no""" +23 24 optimizer """adadelta""" +23 24 training_loop """owa""" +23 24 negative_sampler """basic""" +23 24 evaluator """rankbased""" +23 25 dataset """kinships""" +23 25 model """complex""" +23 25 loss """softplus""" +23 25 regularizer """no""" +23 25 optimizer """adadelta""" +23 25 training_loop """owa""" +23 25 negative_sampler """basic""" +23 25 evaluator """rankbased""" +23 26 dataset """kinships""" +23 26 model """complex""" +23 26 loss """softplus""" +23 26 regularizer """no""" +23 26 optimizer """adadelta""" +23 26 training_loop """owa""" +23 26 negative_sampler """basic""" +23 26 evaluator """rankbased""" +23 27 dataset """kinships""" +23 27 model """complex""" +23 27 loss """softplus""" +23 27 regularizer """no""" +23 27 optimizer """adadelta""" +23 27 training_loop """owa""" +23 27 negative_sampler """basic""" +23 27 evaluator """rankbased""" +23 28 dataset """kinships""" +23 28 model """complex""" +23 28 loss """softplus""" +23 28 regularizer """no""" +23 28 optimizer """adadelta""" +23 28 training_loop """owa""" +23 28 negative_sampler """basic""" +23 28 evaluator """rankbased""" +23 29 dataset """kinships""" +23 29 model """complex""" +23 29 loss """softplus""" +23 29 regularizer """no""" +23 29 optimizer """adadelta""" +23 29 training_loop """owa""" +23 29 negative_sampler """basic""" +23 29 evaluator """rankbased""" +23 30 dataset """kinships""" +23 30 model """complex""" +23 30 loss """softplus""" +23 30 regularizer """no""" +23 30 optimizer """adadelta""" +23 30 training_loop """owa""" +23 30 negative_sampler """basic""" +23 30 evaluator """rankbased""" +23 31 dataset """kinships""" +23 31 model """complex""" +23 31 loss """softplus""" +23 31 regularizer """no""" +23 31 optimizer """adadelta""" +23 31 training_loop """owa""" +23 31 negative_sampler """basic""" +23 31 evaluator """rankbased""" +23 32 dataset """kinships""" +23 32 model """complex""" +23 32 loss """softplus""" +23 32 regularizer """no""" +23 32 optimizer """adadelta""" +23 32 training_loop """owa""" +23 32 negative_sampler """basic""" +23 32 evaluator """rankbased""" +23 33 dataset """kinships""" +23 33 model """complex""" +23 33 loss """softplus""" +23 33 regularizer """no""" +23 33 optimizer """adadelta""" +23 33 training_loop """owa""" +23 33 negative_sampler """basic""" +23 33 evaluator """rankbased""" +23 34 dataset """kinships""" +23 34 model """complex""" +23 34 loss """softplus""" +23 34 regularizer """no""" +23 34 optimizer """adadelta""" +23 34 training_loop """owa""" +23 34 negative_sampler """basic""" +23 34 evaluator """rankbased""" +23 35 dataset """kinships""" +23 35 model """complex""" +23 35 loss """softplus""" +23 35 regularizer """no""" +23 35 optimizer """adadelta""" +23 35 training_loop """owa""" +23 35 negative_sampler """basic""" +23 35 evaluator """rankbased""" +23 36 dataset """kinships""" +23 36 model """complex""" +23 36 loss """softplus""" +23 36 regularizer """no""" +23 36 optimizer """adadelta""" +23 36 training_loop """owa""" +23 36 negative_sampler """basic""" +23 36 evaluator """rankbased""" +23 37 dataset """kinships""" +23 37 model """complex""" +23 37 loss """softplus""" +23 37 regularizer """no""" +23 37 optimizer """adadelta""" +23 37 training_loop """owa""" +23 37 negative_sampler """basic""" +23 37 evaluator """rankbased""" +23 38 dataset """kinships""" +23 38 model """complex""" +23 38 loss """softplus""" +23 38 regularizer """no""" +23 38 optimizer """adadelta""" +23 38 training_loop """owa""" +23 38 negative_sampler """basic""" +23 38 evaluator """rankbased""" +23 39 dataset """kinships""" +23 39 model """complex""" +23 39 loss """softplus""" +23 39 regularizer """no""" +23 39 optimizer """adadelta""" +23 39 training_loop """owa""" +23 39 negative_sampler """basic""" +23 39 evaluator """rankbased""" +23 40 dataset """kinships""" +23 40 model """complex""" +23 40 loss """softplus""" +23 40 regularizer """no""" +23 40 optimizer """adadelta""" +23 40 training_loop """owa""" +23 40 negative_sampler """basic""" +23 40 evaluator """rankbased""" +23 41 dataset """kinships""" +23 41 model """complex""" +23 41 loss """softplus""" +23 41 regularizer """no""" +23 41 optimizer """adadelta""" +23 41 training_loop """owa""" +23 41 negative_sampler """basic""" +23 41 evaluator """rankbased""" +23 42 dataset """kinships""" +23 42 model """complex""" +23 42 loss """softplus""" +23 42 regularizer """no""" +23 42 optimizer """adadelta""" +23 42 training_loop """owa""" +23 42 negative_sampler """basic""" +23 42 evaluator """rankbased""" +23 43 dataset """kinships""" +23 43 model """complex""" +23 43 loss """softplus""" +23 43 regularizer """no""" +23 43 optimizer """adadelta""" +23 43 training_loop """owa""" +23 43 negative_sampler """basic""" +23 43 evaluator """rankbased""" +23 44 dataset """kinships""" +23 44 model """complex""" +23 44 loss """softplus""" +23 44 regularizer """no""" +23 44 optimizer """adadelta""" +23 44 training_loop """owa""" +23 44 negative_sampler """basic""" +23 44 evaluator """rankbased""" +23 45 dataset """kinships""" +23 45 model """complex""" +23 45 loss """softplus""" +23 45 regularizer """no""" +23 45 optimizer """adadelta""" +23 45 training_loop """owa""" +23 45 negative_sampler """basic""" +23 45 evaluator """rankbased""" +23 46 dataset """kinships""" +23 46 model """complex""" +23 46 loss """softplus""" +23 46 regularizer """no""" +23 46 optimizer """adadelta""" +23 46 training_loop """owa""" +23 46 negative_sampler """basic""" +23 46 evaluator """rankbased""" +23 47 dataset """kinships""" +23 47 model """complex""" +23 47 loss """softplus""" +23 47 regularizer """no""" +23 47 optimizer """adadelta""" +23 47 training_loop """owa""" +23 47 negative_sampler """basic""" +23 47 evaluator """rankbased""" +23 48 dataset """kinships""" +23 48 model """complex""" +23 48 loss """softplus""" +23 48 regularizer """no""" +23 48 optimizer """adadelta""" +23 48 training_loop """owa""" +23 48 negative_sampler """basic""" +23 48 evaluator """rankbased""" +23 49 dataset """kinships""" +23 49 model """complex""" +23 49 loss """softplus""" +23 49 regularizer """no""" +23 49 optimizer """adadelta""" +23 49 training_loop """owa""" +23 49 negative_sampler """basic""" +23 49 evaluator """rankbased""" +23 50 dataset """kinships""" +23 50 model """complex""" +23 50 loss """softplus""" +23 50 regularizer """no""" +23 50 optimizer """adadelta""" +23 50 training_loop """owa""" +23 50 negative_sampler """basic""" +23 50 evaluator """rankbased""" +23 51 dataset """kinships""" +23 51 model """complex""" +23 51 loss """softplus""" +23 51 regularizer """no""" +23 51 optimizer """adadelta""" +23 51 training_loop """owa""" +23 51 negative_sampler """basic""" +23 51 evaluator """rankbased""" +23 52 dataset """kinships""" +23 52 model """complex""" +23 52 loss """softplus""" +23 52 regularizer """no""" +23 52 optimizer """adadelta""" +23 52 training_loop """owa""" +23 52 negative_sampler """basic""" +23 52 evaluator """rankbased""" +23 53 dataset """kinships""" +23 53 model """complex""" +23 53 loss """softplus""" +23 53 regularizer """no""" +23 53 optimizer """adadelta""" +23 53 training_loop """owa""" +23 53 negative_sampler """basic""" +23 53 evaluator """rankbased""" +23 54 dataset """kinships""" +23 54 model """complex""" +23 54 loss """softplus""" +23 54 regularizer """no""" +23 54 optimizer """adadelta""" +23 54 training_loop """owa""" +23 54 negative_sampler """basic""" +23 54 evaluator """rankbased""" +23 55 dataset """kinships""" +23 55 model """complex""" +23 55 loss """softplus""" +23 55 regularizer """no""" +23 55 optimizer """adadelta""" +23 55 training_loop """owa""" +23 55 negative_sampler """basic""" +23 55 evaluator """rankbased""" +23 56 dataset """kinships""" +23 56 model """complex""" +23 56 loss """softplus""" +23 56 regularizer """no""" +23 56 optimizer """adadelta""" +23 56 training_loop """owa""" +23 56 negative_sampler """basic""" +23 56 evaluator """rankbased""" +23 57 dataset """kinships""" +23 57 model """complex""" +23 57 loss """softplus""" +23 57 regularizer """no""" +23 57 optimizer """adadelta""" +23 57 training_loop """owa""" +23 57 negative_sampler """basic""" +23 57 evaluator """rankbased""" +23 58 dataset """kinships""" +23 58 model """complex""" +23 58 loss """softplus""" +23 58 regularizer """no""" +23 58 optimizer """adadelta""" +23 58 training_loop """owa""" +23 58 negative_sampler """basic""" +23 58 evaluator """rankbased""" +23 59 dataset """kinships""" +23 59 model """complex""" +23 59 loss """softplus""" +23 59 regularizer """no""" +23 59 optimizer """adadelta""" +23 59 training_loop """owa""" +23 59 negative_sampler """basic""" +23 59 evaluator """rankbased""" +23 60 dataset """kinships""" +23 60 model """complex""" +23 60 loss """softplus""" +23 60 regularizer """no""" +23 60 optimizer """adadelta""" +23 60 training_loop """owa""" +23 60 negative_sampler """basic""" +23 60 evaluator """rankbased""" +23 61 dataset """kinships""" +23 61 model """complex""" +23 61 loss """softplus""" +23 61 regularizer """no""" +23 61 optimizer """adadelta""" +23 61 training_loop """owa""" +23 61 negative_sampler """basic""" +23 61 evaluator """rankbased""" +23 62 dataset """kinships""" +23 62 model """complex""" +23 62 loss """softplus""" +23 62 regularizer """no""" +23 62 optimizer """adadelta""" +23 62 training_loop """owa""" +23 62 negative_sampler """basic""" +23 62 evaluator """rankbased""" +23 63 dataset """kinships""" +23 63 model """complex""" +23 63 loss """softplus""" +23 63 regularizer """no""" +23 63 optimizer """adadelta""" +23 63 training_loop """owa""" +23 63 negative_sampler """basic""" +23 63 evaluator """rankbased""" +23 64 dataset """kinships""" +23 64 model """complex""" +23 64 loss """softplus""" +23 64 regularizer """no""" +23 64 optimizer """adadelta""" +23 64 training_loop """owa""" +23 64 negative_sampler """basic""" +23 64 evaluator """rankbased""" +23 65 dataset """kinships""" +23 65 model """complex""" +23 65 loss """softplus""" +23 65 regularizer """no""" +23 65 optimizer """adadelta""" +23 65 training_loop """owa""" +23 65 negative_sampler """basic""" +23 65 evaluator """rankbased""" +23 66 dataset """kinships""" +23 66 model """complex""" +23 66 loss """softplus""" +23 66 regularizer """no""" +23 66 optimizer """adadelta""" +23 66 training_loop """owa""" +23 66 negative_sampler """basic""" +23 66 evaluator """rankbased""" +23 67 dataset """kinships""" +23 67 model """complex""" +23 67 loss """softplus""" +23 67 regularizer """no""" +23 67 optimizer """adadelta""" +23 67 training_loop """owa""" +23 67 negative_sampler """basic""" +23 67 evaluator """rankbased""" +23 68 dataset """kinships""" +23 68 model """complex""" +23 68 loss """softplus""" +23 68 regularizer """no""" +23 68 optimizer """adadelta""" +23 68 training_loop """owa""" +23 68 negative_sampler """basic""" +23 68 evaluator """rankbased""" +23 69 dataset """kinships""" +23 69 model """complex""" +23 69 loss """softplus""" +23 69 regularizer """no""" +23 69 optimizer """adadelta""" +23 69 training_loop """owa""" +23 69 negative_sampler """basic""" +23 69 evaluator """rankbased""" +23 70 dataset """kinships""" +23 70 model """complex""" +23 70 loss """softplus""" +23 70 regularizer """no""" +23 70 optimizer """adadelta""" +23 70 training_loop """owa""" +23 70 negative_sampler """basic""" +23 70 evaluator """rankbased""" +23 71 dataset """kinships""" +23 71 model """complex""" +23 71 loss """softplus""" +23 71 regularizer """no""" +23 71 optimizer """adadelta""" +23 71 training_loop """owa""" +23 71 negative_sampler """basic""" +23 71 evaluator """rankbased""" +23 72 dataset """kinships""" +23 72 model """complex""" +23 72 loss """softplus""" +23 72 regularizer """no""" +23 72 optimizer """adadelta""" +23 72 training_loop """owa""" +23 72 negative_sampler """basic""" +23 72 evaluator """rankbased""" +23 73 dataset """kinships""" +23 73 model """complex""" +23 73 loss """softplus""" +23 73 regularizer """no""" +23 73 optimizer """adadelta""" +23 73 training_loop """owa""" +23 73 negative_sampler """basic""" +23 73 evaluator """rankbased""" +23 74 dataset """kinships""" +23 74 model """complex""" +23 74 loss """softplus""" +23 74 regularizer """no""" +23 74 optimizer """adadelta""" +23 74 training_loop """owa""" +23 74 negative_sampler """basic""" +23 74 evaluator """rankbased""" +23 75 dataset """kinships""" +23 75 model """complex""" +23 75 loss """softplus""" +23 75 regularizer """no""" +23 75 optimizer """adadelta""" +23 75 training_loop """owa""" +23 75 negative_sampler """basic""" +23 75 evaluator """rankbased""" +23 76 dataset """kinships""" +23 76 model """complex""" +23 76 loss """softplus""" +23 76 regularizer """no""" +23 76 optimizer """adadelta""" +23 76 training_loop """owa""" +23 76 negative_sampler """basic""" +23 76 evaluator """rankbased""" +23 77 dataset """kinships""" +23 77 model """complex""" +23 77 loss """softplus""" +23 77 regularizer """no""" +23 77 optimizer """adadelta""" +23 77 training_loop """owa""" +23 77 negative_sampler """basic""" +23 77 evaluator """rankbased""" +23 78 dataset """kinships""" +23 78 model """complex""" +23 78 loss """softplus""" +23 78 regularizer """no""" +23 78 optimizer """adadelta""" +23 78 training_loop """owa""" +23 78 negative_sampler """basic""" +23 78 evaluator """rankbased""" +23 79 dataset """kinships""" +23 79 model """complex""" +23 79 loss """softplus""" +23 79 regularizer """no""" +23 79 optimizer """adadelta""" +23 79 training_loop """owa""" +23 79 negative_sampler """basic""" +23 79 evaluator """rankbased""" +23 80 dataset """kinships""" +23 80 model """complex""" +23 80 loss """softplus""" +23 80 regularizer """no""" +23 80 optimizer """adadelta""" +23 80 training_loop """owa""" +23 80 negative_sampler """basic""" +23 80 evaluator """rankbased""" +23 81 dataset """kinships""" +23 81 model """complex""" +23 81 loss """softplus""" +23 81 regularizer """no""" +23 81 optimizer """adadelta""" +23 81 training_loop """owa""" +23 81 negative_sampler """basic""" +23 81 evaluator """rankbased""" +23 82 dataset """kinships""" +23 82 model """complex""" +23 82 loss """softplus""" +23 82 regularizer """no""" +23 82 optimizer """adadelta""" +23 82 training_loop """owa""" +23 82 negative_sampler """basic""" +23 82 evaluator """rankbased""" +23 83 dataset """kinships""" +23 83 model """complex""" +23 83 loss """softplus""" +23 83 regularizer """no""" +23 83 optimizer """adadelta""" +23 83 training_loop """owa""" +23 83 negative_sampler """basic""" +23 83 evaluator """rankbased""" +23 84 dataset """kinships""" +23 84 model """complex""" +23 84 loss """softplus""" +23 84 regularizer """no""" +23 84 optimizer """adadelta""" +23 84 training_loop """owa""" +23 84 negative_sampler """basic""" +23 84 evaluator """rankbased""" +23 85 dataset """kinships""" +23 85 model """complex""" +23 85 loss """softplus""" +23 85 regularizer """no""" +23 85 optimizer """adadelta""" +23 85 training_loop """owa""" +23 85 negative_sampler """basic""" +23 85 evaluator """rankbased""" +23 86 dataset """kinships""" +23 86 model """complex""" +23 86 loss """softplus""" +23 86 regularizer """no""" +23 86 optimizer """adadelta""" +23 86 training_loop """owa""" +23 86 negative_sampler """basic""" +23 86 evaluator """rankbased""" +23 87 dataset """kinships""" +23 87 model """complex""" +23 87 loss """softplus""" +23 87 regularizer """no""" +23 87 optimizer """adadelta""" +23 87 training_loop """owa""" +23 87 negative_sampler """basic""" +23 87 evaluator """rankbased""" +23 88 dataset """kinships""" +23 88 model """complex""" +23 88 loss """softplus""" +23 88 regularizer """no""" +23 88 optimizer """adadelta""" +23 88 training_loop """owa""" +23 88 negative_sampler """basic""" +23 88 evaluator """rankbased""" +23 89 dataset """kinships""" +23 89 model """complex""" +23 89 loss """softplus""" +23 89 regularizer """no""" +23 89 optimizer """adadelta""" +23 89 training_loop """owa""" +23 89 negative_sampler """basic""" +23 89 evaluator """rankbased""" +23 90 dataset """kinships""" +23 90 model """complex""" +23 90 loss """softplus""" +23 90 regularizer """no""" +23 90 optimizer """adadelta""" +23 90 training_loop """owa""" +23 90 negative_sampler """basic""" +23 90 evaluator """rankbased""" +23 91 dataset """kinships""" +23 91 model """complex""" +23 91 loss """softplus""" +23 91 regularizer """no""" +23 91 optimizer """adadelta""" +23 91 training_loop """owa""" +23 91 negative_sampler """basic""" +23 91 evaluator """rankbased""" +23 92 dataset """kinships""" +23 92 model """complex""" +23 92 loss """softplus""" +23 92 regularizer """no""" +23 92 optimizer """adadelta""" +23 92 training_loop """owa""" +23 92 negative_sampler """basic""" +23 92 evaluator """rankbased""" +23 93 dataset """kinships""" +23 93 model """complex""" +23 93 loss """softplus""" +23 93 regularizer """no""" +23 93 optimizer """adadelta""" +23 93 training_loop """owa""" +23 93 negative_sampler """basic""" +23 93 evaluator """rankbased""" +23 94 dataset """kinships""" +23 94 model """complex""" +23 94 loss """softplus""" +23 94 regularizer """no""" +23 94 optimizer """adadelta""" +23 94 training_loop """owa""" +23 94 negative_sampler """basic""" +23 94 evaluator """rankbased""" +23 95 dataset """kinships""" +23 95 model """complex""" +23 95 loss """softplus""" +23 95 regularizer """no""" +23 95 optimizer """adadelta""" +23 95 training_loop """owa""" +23 95 negative_sampler """basic""" +23 95 evaluator """rankbased""" +23 96 dataset """kinships""" +23 96 model """complex""" +23 96 loss """softplus""" +23 96 regularizer """no""" +23 96 optimizer """adadelta""" +23 96 training_loop """owa""" +23 96 negative_sampler """basic""" +23 96 evaluator """rankbased""" +23 97 dataset """kinships""" +23 97 model """complex""" +23 97 loss """softplus""" +23 97 regularizer """no""" +23 97 optimizer """adadelta""" +23 97 training_loop """owa""" +23 97 negative_sampler """basic""" +23 97 evaluator """rankbased""" +23 98 dataset """kinships""" +23 98 model """complex""" +23 98 loss """softplus""" +23 98 regularizer """no""" +23 98 optimizer """adadelta""" +23 98 training_loop """owa""" +23 98 negative_sampler """basic""" +23 98 evaluator """rankbased""" +23 99 dataset """kinships""" +23 99 model """complex""" +23 99 loss """softplus""" +23 99 regularizer """no""" +23 99 optimizer """adadelta""" +23 99 training_loop """owa""" +23 99 negative_sampler """basic""" +23 99 evaluator """rankbased""" +23 100 dataset """kinships""" +23 100 model """complex""" +23 100 loss """softplus""" +23 100 regularizer """no""" +23 100 optimizer """adadelta""" +23 100 training_loop """owa""" +23 100 negative_sampler """basic""" +23 100 evaluator """rankbased""" +24 1 model.embedding_dim 1.0 +24 1 loss.margin 2.0232236343284535 +24 1 negative_sampler.num_negs_per_pos 35.0 +24 1 training.batch_size 0.0 +24 2 model.embedding_dim 2.0 +24 2 loss.margin 3.446297674319948 +24 2 negative_sampler.num_negs_per_pos 91.0 +24 2 training.batch_size 0.0 +24 3 model.embedding_dim 1.0 +24 3 loss.margin 0.6196241474724302 +24 3 negative_sampler.num_negs_per_pos 95.0 +24 3 training.batch_size 2.0 +24 4 model.embedding_dim 0.0 +24 4 loss.margin 4.6896321453252074 +24 4 negative_sampler.num_negs_per_pos 68.0 +24 4 training.batch_size 0.0 +24 5 model.embedding_dim 1.0 +24 5 loss.margin 9.068218278910852 +24 5 negative_sampler.num_negs_per_pos 36.0 +24 5 training.batch_size 0.0 +24 6 model.embedding_dim 2.0 +24 6 loss.margin 8.46920779719656 +24 6 negative_sampler.num_negs_per_pos 83.0 +24 6 training.batch_size 2.0 +24 7 model.embedding_dim 0.0 +24 7 loss.margin 4.477813349455469 +24 7 negative_sampler.num_negs_per_pos 75.0 +24 7 training.batch_size 2.0 +24 8 model.embedding_dim 1.0 +24 8 loss.margin 1.9370465274002742 +24 8 negative_sampler.num_negs_per_pos 30.0 +24 8 training.batch_size 0.0 +24 9 model.embedding_dim 2.0 +24 9 loss.margin 3.824083795899706 +24 9 negative_sampler.num_negs_per_pos 0.0 +24 9 training.batch_size 1.0 +24 10 model.embedding_dim 0.0 +24 10 loss.margin 6.038582641340213 +24 10 negative_sampler.num_negs_per_pos 93.0 +24 10 training.batch_size 0.0 +24 11 model.embedding_dim 2.0 +24 11 loss.margin 9.196979605799084 +24 11 negative_sampler.num_negs_per_pos 60.0 +24 11 training.batch_size 0.0 +24 12 model.embedding_dim 0.0 +24 12 loss.margin 1.6792790624294565 +24 12 negative_sampler.num_negs_per_pos 50.0 +24 12 training.batch_size 2.0 +24 13 model.embedding_dim 0.0 +24 13 loss.margin 6.801655674929552 +24 13 negative_sampler.num_negs_per_pos 60.0 +24 13 training.batch_size 1.0 +24 14 model.embedding_dim 2.0 +24 14 loss.margin 2.7107076035201665 +24 14 negative_sampler.num_negs_per_pos 60.0 +24 14 training.batch_size 2.0 +24 15 model.embedding_dim 1.0 +24 15 loss.margin 5.00464309069102 +24 15 negative_sampler.num_negs_per_pos 54.0 +24 15 training.batch_size 0.0 +24 16 model.embedding_dim 0.0 +24 16 loss.margin 1.9612018526765183 +24 16 negative_sampler.num_negs_per_pos 16.0 +24 16 training.batch_size 2.0 +24 17 model.embedding_dim 1.0 +24 17 loss.margin 4.741818088833186 +24 17 negative_sampler.num_negs_per_pos 25.0 +24 17 training.batch_size 1.0 +24 18 model.embedding_dim 0.0 +24 18 loss.margin 4.645369567744402 +24 18 negative_sampler.num_negs_per_pos 90.0 +24 18 training.batch_size 1.0 +24 19 model.embedding_dim 2.0 +24 19 loss.margin 3.5454019708491735 +24 19 negative_sampler.num_negs_per_pos 17.0 +24 19 training.batch_size 2.0 +24 20 model.embedding_dim 1.0 +24 20 loss.margin 4.498980821755286 +24 20 negative_sampler.num_negs_per_pos 78.0 +24 20 training.batch_size 1.0 +24 21 model.embedding_dim 1.0 +24 21 loss.margin 2.8729866360029335 +24 21 negative_sampler.num_negs_per_pos 30.0 +24 21 training.batch_size 2.0 +24 22 model.embedding_dim 0.0 +24 22 loss.margin 6.8161070666391925 +24 22 negative_sampler.num_negs_per_pos 11.0 +24 22 training.batch_size 0.0 +24 23 model.embedding_dim 2.0 +24 23 loss.margin 5.072420803425759 +24 23 negative_sampler.num_negs_per_pos 18.0 +24 23 training.batch_size 1.0 +24 24 model.embedding_dim 2.0 +24 24 loss.margin 4.3459883379543784 +24 24 negative_sampler.num_negs_per_pos 47.0 +24 24 training.batch_size 2.0 +24 25 model.embedding_dim 1.0 +24 25 loss.margin 4.466209446558798 +24 25 negative_sampler.num_negs_per_pos 40.0 +24 25 training.batch_size 0.0 +24 26 model.embedding_dim 1.0 +24 26 loss.margin 8.644526151761243 +24 26 negative_sampler.num_negs_per_pos 63.0 +24 26 training.batch_size 2.0 +24 27 model.embedding_dim 1.0 +24 27 loss.margin 1.6987322164257164 +24 27 negative_sampler.num_negs_per_pos 34.0 +24 27 training.batch_size 2.0 +24 28 model.embedding_dim 1.0 +24 28 loss.margin 3.005095298163315 +24 28 negative_sampler.num_negs_per_pos 63.0 +24 28 training.batch_size 2.0 +24 29 model.embedding_dim 0.0 +24 29 loss.margin 8.77319648573402 +24 29 negative_sampler.num_negs_per_pos 11.0 +24 29 training.batch_size 0.0 +24 30 model.embedding_dim 1.0 +24 30 loss.margin 5.722289853633965 +24 30 negative_sampler.num_negs_per_pos 11.0 +24 30 training.batch_size 2.0 +24 31 model.embedding_dim 1.0 +24 31 loss.margin 6.801408559155251 +24 31 negative_sampler.num_negs_per_pos 88.0 +24 31 training.batch_size 2.0 +24 32 model.embedding_dim 0.0 +24 32 loss.margin 5.723484629002066 +24 32 negative_sampler.num_negs_per_pos 38.0 +24 32 training.batch_size 2.0 +24 33 model.embedding_dim 2.0 +24 33 loss.margin 8.92851986892702 +24 33 negative_sampler.num_negs_per_pos 16.0 +24 33 training.batch_size 1.0 +24 34 model.embedding_dim 2.0 +24 34 loss.margin 6.087071443787462 +24 34 negative_sampler.num_negs_per_pos 38.0 +24 34 training.batch_size 1.0 +24 35 model.embedding_dim 1.0 +24 35 loss.margin 2.9065469188778295 +24 35 negative_sampler.num_negs_per_pos 64.0 +24 35 training.batch_size 0.0 +24 36 model.embedding_dim 0.0 +24 36 loss.margin 0.6067614236558487 +24 36 negative_sampler.num_negs_per_pos 7.0 +24 36 training.batch_size 0.0 +24 37 model.embedding_dim 0.0 +24 37 loss.margin 6.120520572602361 +24 37 negative_sampler.num_negs_per_pos 85.0 +24 37 training.batch_size 1.0 +24 38 model.embedding_dim 0.0 +24 38 loss.margin 7.55918714448186 +24 38 negative_sampler.num_negs_per_pos 57.0 +24 38 training.batch_size 0.0 +24 39 model.embedding_dim 0.0 +24 39 loss.margin 3.370386724030153 +24 39 negative_sampler.num_negs_per_pos 0.0 +24 39 training.batch_size 1.0 +24 40 model.embedding_dim 0.0 +24 40 loss.margin 2.0914595632586988 +24 40 negative_sampler.num_negs_per_pos 67.0 +24 40 training.batch_size 0.0 +24 41 model.embedding_dim 2.0 +24 41 loss.margin 6.932337145230052 +24 41 negative_sampler.num_negs_per_pos 77.0 +24 41 training.batch_size 1.0 +24 42 model.embedding_dim 2.0 +24 42 loss.margin 2.4190299507874125 +24 42 negative_sampler.num_negs_per_pos 32.0 +24 42 training.batch_size 1.0 +24 43 model.embedding_dim 2.0 +24 43 loss.margin 1.2081082170433777 +24 43 negative_sampler.num_negs_per_pos 95.0 +24 43 training.batch_size 1.0 +24 44 model.embedding_dim 1.0 +24 44 loss.margin 8.806937573550702 +24 44 negative_sampler.num_negs_per_pos 35.0 +24 44 training.batch_size 0.0 +24 45 model.embedding_dim 1.0 +24 45 loss.margin 5.357338708802331 +24 45 negative_sampler.num_negs_per_pos 32.0 +24 45 training.batch_size 2.0 +24 46 model.embedding_dim 1.0 +24 46 loss.margin 2.940015090770496 +24 46 negative_sampler.num_negs_per_pos 23.0 +24 46 training.batch_size 1.0 +24 47 model.embedding_dim 0.0 +24 47 loss.margin 5.568175518218963 +24 47 negative_sampler.num_negs_per_pos 8.0 +24 47 training.batch_size 0.0 +24 48 model.embedding_dim 0.0 +24 48 loss.margin 4.7256770473533605 +24 48 negative_sampler.num_negs_per_pos 98.0 +24 48 training.batch_size 2.0 +24 49 model.embedding_dim 2.0 +24 49 loss.margin 7.08820105395923 +24 49 negative_sampler.num_negs_per_pos 20.0 +24 49 training.batch_size 0.0 +24 50 model.embedding_dim 2.0 +24 50 loss.margin 6.192213281699549 +24 50 negative_sampler.num_negs_per_pos 16.0 +24 50 training.batch_size 2.0 +24 51 model.embedding_dim 1.0 +24 51 loss.margin 1.015907953284064 +24 51 negative_sampler.num_negs_per_pos 58.0 +24 51 training.batch_size 2.0 +24 52 model.embedding_dim 1.0 +24 52 loss.margin 2.879067858531624 +24 52 negative_sampler.num_negs_per_pos 19.0 +24 52 training.batch_size 0.0 +24 53 model.embedding_dim 1.0 +24 53 loss.margin 4.637479832883562 +24 53 negative_sampler.num_negs_per_pos 11.0 +24 53 training.batch_size 1.0 +24 54 model.embedding_dim 1.0 +24 54 loss.margin 6.423866838876515 +24 54 negative_sampler.num_negs_per_pos 84.0 +24 54 training.batch_size 2.0 +24 55 model.embedding_dim 1.0 +24 55 loss.margin 0.7082656038853977 +24 55 negative_sampler.num_negs_per_pos 21.0 +24 55 training.batch_size 1.0 +24 56 model.embedding_dim 2.0 +24 56 loss.margin 6.502013493436144 +24 56 negative_sampler.num_negs_per_pos 97.0 +24 56 training.batch_size 2.0 +24 57 model.embedding_dim 1.0 +24 57 loss.margin 2.7301482710699148 +24 57 negative_sampler.num_negs_per_pos 1.0 +24 57 training.batch_size 2.0 +24 58 model.embedding_dim 1.0 +24 58 loss.margin 6.653028419494859 +24 58 negative_sampler.num_negs_per_pos 61.0 +24 58 training.batch_size 2.0 +24 59 model.embedding_dim 1.0 +24 59 loss.margin 1.5267108091377692 +24 59 negative_sampler.num_negs_per_pos 64.0 +24 59 training.batch_size 1.0 +24 60 model.embedding_dim 0.0 +24 60 loss.margin 5.4222776926079685 +24 60 negative_sampler.num_negs_per_pos 2.0 +24 60 training.batch_size 0.0 +24 61 model.embedding_dim 0.0 +24 61 loss.margin 5.944476942869636 +24 61 negative_sampler.num_negs_per_pos 53.0 +24 61 training.batch_size 2.0 +24 62 model.embedding_dim 2.0 +24 62 loss.margin 8.541544884825631 +24 62 negative_sampler.num_negs_per_pos 56.0 +24 62 training.batch_size 1.0 +24 63 model.embedding_dim 2.0 +24 63 loss.margin 3.304387665185745 +24 63 negative_sampler.num_negs_per_pos 49.0 +24 63 training.batch_size 2.0 +24 64 model.embedding_dim 0.0 +24 64 loss.margin 2.348810835217381 +24 64 negative_sampler.num_negs_per_pos 1.0 +24 64 training.batch_size 0.0 +24 65 model.embedding_dim 1.0 +24 65 loss.margin 4.33017491961552 +24 65 negative_sampler.num_negs_per_pos 28.0 +24 65 training.batch_size 2.0 +24 66 model.embedding_dim 2.0 +24 66 loss.margin 1.0615504523113055 +24 66 negative_sampler.num_negs_per_pos 87.0 +24 66 training.batch_size 1.0 +24 67 model.embedding_dim 0.0 +24 67 loss.margin 0.8689636927815831 +24 67 negative_sampler.num_negs_per_pos 58.0 +24 67 training.batch_size 2.0 +24 68 model.embedding_dim 2.0 +24 68 loss.margin 4.6484812967687885 +24 68 negative_sampler.num_negs_per_pos 25.0 +24 68 training.batch_size 0.0 +24 69 model.embedding_dim 1.0 +24 69 loss.margin 9.280407667825532 +24 69 negative_sampler.num_negs_per_pos 2.0 +24 69 training.batch_size 0.0 +24 70 model.embedding_dim 0.0 +24 70 loss.margin 7.517850094092064 +24 70 negative_sampler.num_negs_per_pos 59.0 +24 70 training.batch_size 2.0 +24 71 model.embedding_dim 0.0 +24 71 loss.margin 5.927557905840698 +24 71 negative_sampler.num_negs_per_pos 32.0 +24 71 training.batch_size 2.0 +24 72 model.embedding_dim 0.0 +24 72 loss.margin 3.6924045160589642 +24 72 negative_sampler.num_negs_per_pos 17.0 +24 72 training.batch_size 2.0 +24 73 model.embedding_dim 1.0 +24 73 loss.margin 2.420310312319623 +24 73 negative_sampler.num_negs_per_pos 79.0 +24 73 training.batch_size 2.0 +24 74 model.embedding_dim 1.0 +24 74 loss.margin 3.935825808414755 +24 74 negative_sampler.num_negs_per_pos 25.0 +24 74 training.batch_size 2.0 +24 75 model.embedding_dim 2.0 +24 75 loss.margin 7.2793628155997325 +24 75 negative_sampler.num_negs_per_pos 94.0 +24 75 training.batch_size 1.0 +24 76 model.embedding_dim 0.0 +24 76 loss.margin 6.969455452524922 +24 76 negative_sampler.num_negs_per_pos 19.0 +24 76 training.batch_size 0.0 +24 77 model.embedding_dim 2.0 +24 77 loss.margin 5.274855263246326 +24 77 negative_sampler.num_negs_per_pos 78.0 +24 77 training.batch_size 0.0 +24 78 model.embedding_dim 2.0 +24 78 loss.margin 1.6361154187844273 +24 78 negative_sampler.num_negs_per_pos 57.0 +24 78 training.batch_size 2.0 +24 79 model.embedding_dim 0.0 +24 79 loss.margin 8.703955068652558 +24 79 negative_sampler.num_negs_per_pos 43.0 +24 79 training.batch_size 0.0 +24 80 model.embedding_dim 1.0 +24 80 loss.margin 9.634143823751577 +24 80 negative_sampler.num_negs_per_pos 96.0 +24 80 training.batch_size 2.0 +24 81 model.embedding_dim 2.0 +24 81 loss.margin 4.291306843243766 +24 81 negative_sampler.num_negs_per_pos 32.0 +24 81 training.batch_size 2.0 +24 82 model.embedding_dim 0.0 +24 82 loss.margin 3.424527584009113 +24 82 negative_sampler.num_negs_per_pos 63.0 +24 82 training.batch_size 0.0 +24 83 model.embedding_dim 2.0 +24 83 loss.margin 3.9965406316182586 +24 83 negative_sampler.num_negs_per_pos 46.0 +24 83 training.batch_size 2.0 +24 84 model.embedding_dim 1.0 +24 84 loss.margin 8.575673741659944 +24 84 negative_sampler.num_negs_per_pos 21.0 +24 84 training.batch_size 2.0 +24 85 model.embedding_dim 0.0 +24 85 loss.margin 3.6821539027329595 +24 85 negative_sampler.num_negs_per_pos 93.0 +24 85 training.batch_size 1.0 +24 86 model.embedding_dim 2.0 +24 86 loss.margin 7.433470182091379 +24 86 negative_sampler.num_negs_per_pos 58.0 +24 86 training.batch_size 0.0 +24 87 model.embedding_dim 0.0 +24 87 loss.margin 8.651593080530791 +24 87 negative_sampler.num_negs_per_pos 2.0 +24 87 training.batch_size 0.0 +24 88 model.embedding_dim 1.0 +24 88 loss.margin 6.3795551200015925 +24 88 negative_sampler.num_negs_per_pos 23.0 +24 88 training.batch_size 2.0 +24 89 model.embedding_dim 1.0 +24 89 loss.margin 2.0200114310413997 +24 89 negative_sampler.num_negs_per_pos 71.0 +24 89 training.batch_size 0.0 +24 90 model.embedding_dim 0.0 +24 90 loss.margin 2.57385594219549 +24 90 negative_sampler.num_negs_per_pos 91.0 +24 90 training.batch_size 2.0 +24 91 model.embedding_dim 1.0 +24 91 loss.margin 5.754263175832121 +24 91 negative_sampler.num_negs_per_pos 27.0 +24 91 training.batch_size 2.0 +24 92 model.embedding_dim 2.0 +24 92 loss.margin 4.182247154857701 +24 92 negative_sampler.num_negs_per_pos 98.0 +24 92 training.batch_size 1.0 +24 93 model.embedding_dim 1.0 +24 93 loss.margin 6.119638152940755 +24 93 negative_sampler.num_negs_per_pos 25.0 +24 93 training.batch_size 2.0 +24 94 model.embedding_dim 1.0 +24 94 loss.margin 9.402331902220094 +24 94 negative_sampler.num_negs_per_pos 89.0 +24 94 training.batch_size 2.0 +24 95 model.embedding_dim 2.0 +24 95 loss.margin 9.94624069679053 +24 95 negative_sampler.num_negs_per_pos 17.0 +24 95 training.batch_size 2.0 +24 96 model.embedding_dim 2.0 +24 96 loss.margin 6.296808885981301 +24 96 negative_sampler.num_negs_per_pos 83.0 +24 96 training.batch_size 2.0 +24 97 model.embedding_dim 2.0 +24 97 loss.margin 4.037810496282205 +24 97 negative_sampler.num_negs_per_pos 95.0 +24 97 training.batch_size 0.0 +24 98 model.embedding_dim 0.0 +24 98 loss.margin 1.866361064561223 +24 98 negative_sampler.num_negs_per_pos 99.0 +24 98 training.batch_size 0.0 +24 99 model.embedding_dim 0.0 +24 99 loss.margin 7.7166114097951874 +24 99 negative_sampler.num_negs_per_pos 3.0 +24 99 training.batch_size 1.0 +24 100 model.embedding_dim 0.0 +24 100 loss.margin 6.7158429081440465 +24 100 negative_sampler.num_negs_per_pos 52.0 +24 100 training.batch_size 0.0 +24 1 dataset """kinships""" +24 1 model """complex""" +24 1 loss """marginranking""" +24 1 regularizer """no""" +24 1 optimizer """adadelta""" +24 1 training_loop """owa""" +24 1 negative_sampler """basic""" +24 1 evaluator """rankbased""" +24 2 dataset """kinships""" +24 2 model """complex""" +24 2 loss """marginranking""" +24 2 regularizer """no""" +24 2 optimizer """adadelta""" +24 2 training_loop """owa""" +24 2 negative_sampler """basic""" +24 2 evaluator """rankbased""" +24 3 dataset """kinships""" +24 3 model """complex""" +24 3 loss """marginranking""" +24 3 regularizer """no""" +24 3 optimizer """adadelta""" +24 3 training_loop """owa""" +24 3 negative_sampler """basic""" +24 3 evaluator """rankbased""" +24 4 dataset """kinships""" +24 4 model """complex""" +24 4 loss """marginranking""" +24 4 regularizer """no""" +24 4 optimizer """adadelta""" +24 4 training_loop """owa""" +24 4 negative_sampler """basic""" +24 4 evaluator """rankbased""" +24 5 dataset """kinships""" +24 5 model """complex""" +24 5 loss """marginranking""" +24 5 regularizer """no""" +24 5 optimizer """adadelta""" +24 5 training_loop """owa""" +24 5 negative_sampler """basic""" +24 5 evaluator """rankbased""" +24 6 dataset """kinships""" +24 6 model """complex""" +24 6 loss """marginranking""" +24 6 regularizer """no""" +24 6 optimizer """adadelta""" +24 6 training_loop """owa""" +24 6 negative_sampler """basic""" +24 6 evaluator """rankbased""" +24 7 dataset """kinships""" +24 7 model """complex""" +24 7 loss """marginranking""" +24 7 regularizer """no""" +24 7 optimizer """adadelta""" +24 7 training_loop """owa""" +24 7 negative_sampler """basic""" +24 7 evaluator """rankbased""" +24 8 dataset """kinships""" +24 8 model """complex""" +24 8 loss """marginranking""" +24 8 regularizer """no""" +24 8 optimizer """adadelta""" +24 8 training_loop """owa""" +24 8 negative_sampler """basic""" +24 8 evaluator """rankbased""" +24 9 dataset """kinships""" +24 9 model """complex""" +24 9 loss """marginranking""" +24 9 regularizer """no""" +24 9 optimizer """adadelta""" +24 9 training_loop """owa""" +24 9 negative_sampler """basic""" +24 9 evaluator """rankbased""" +24 10 dataset """kinships""" +24 10 model """complex""" +24 10 loss """marginranking""" +24 10 regularizer """no""" +24 10 optimizer """adadelta""" +24 10 training_loop """owa""" +24 10 negative_sampler """basic""" +24 10 evaluator """rankbased""" +24 11 dataset """kinships""" +24 11 model """complex""" +24 11 loss """marginranking""" +24 11 regularizer """no""" +24 11 optimizer """adadelta""" +24 11 training_loop """owa""" +24 11 negative_sampler """basic""" +24 11 evaluator """rankbased""" +24 12 dataset """kinships""" +24 12 model """complex""" +24 12 loss """marginranking""" +24 12 regularizer """no""" +24 12 optimizer """adadelta""" +24 12 training_loop """owa""" +24 12 negative_sampler """basic""" +24 12 evaluator """rankbased""" +24 13 dataset """kinships""" +24 13 model """complex""" +24 13 loss """marginranking""" +24 13 regularizer """no""" +24 13 optimizer """adadelta""" +24 13 training_loop """owa""" +24 13 negative_sampler """basic""" +24 13 evaluator """rankbased""" +24 14 dataset """kinships""" +24 14 model """complex""" +24 14 loss """marginranking""" +24 14 regularizer """no""" +24 14 optimizer """adadelta""" +24 14 training_loop """owa""" +24 14 negative_sampler """basic""" +24 14 evaluator """rankbased""" +24 15 dataset """kinships""" +24 15 model """complex""" +24 15 loss """marginranking""" +24 15 regularizer """no""" +24 15 optimizer """adadelta""" +24 15 training_loop """owa""" +24 15 negative_sampler """basic""" +24 15 evaluator """rankbased""" +24 16 dataset """kinships""" +24 16 model """complex""" +24 16 loss """marginranking""" +24 16 regularizer """no""" +24 16 optimizer """adadelta""" +24 16 training_loop """owa""" +24 16 negative_sampler """basic""" +24 16 evaluator """rankbased""" +24 17 dataset """kinships""" +24 17 model """complex""" +24 17 loss """marginranking""" +24 17 regularizer """no""" +24 17 optimizer """adadelta""" +24 17 training_loop """owa""" +24 17 negative_sampler """basic""" +24 17 evaluator """rankbased""" +24 18 dataset """kinships""" +24 18 model """complex""" +24 18 loss """marginranking""" +24 18 regularizer """no""" +24 18 optimizer """adadelta""" +24 18 training_loop """owa""" +24 18 negative_sampler """basic""" +24 18 evaluator """rankbased""" +24 19 dataset """kinships""" +24 19 model """complex""" +24 19 loss """marginranking""" +24 19 regularizer """no""" +24 19 optimizer """adadelta""" +24 19 training_loop """owa""" +24 19 negative_sampler """basic""" +24 19 evaluator """rankbased""" +24 20 dataset """kinships""" +24 20 model """complex""" +24 20 loss """marginranking""" +24 20 regularizer """no""" +24 20 optimizer """adadelta""" +24 20 training_loop """owa""" +24 20 negative_sampler """basic""" +24 20 evaluator """rankbased""" +24 21 dataset """kinships""" +24 21 model """complex""" +24 21 loss """marginranking""" +24 21 regularizer """no""" +24 21 optimizer """adadelta""" +24 21 training_loop """owa""" +24 21 negative_sampler """basic""" +24 21 evaluator """rankbased""" +24 22 dataset """kinships""" +24 22 model """complex""" +24 22 loss """marginranking""" +24 22 regularizer """no""" +24 22 optimizer """adadelta""" +24 22 training_loop """owa""" +24 22 negative_sampler """basic""" +24 22 evaluator """rankbased""" +24 23 dataset """kinships""" +24 23 model """complex""" +24 23 loss """marginranking""" +24 23 regularizer """no""" +24 23 optimizer """adadelta""" +24 23 training_loop """owa""" +24 23 negative_sampler """basic""" +24 23 evaluator """rankbased""" +24 24 dataset """kinships""" +24 24 model """complex""" +24 24 loss """marginranking""" +24 24 regularizer """no""" +24 24 optimizer """adadelta""" +24 24 training_loop """owa""" +24 24 negative_sampler """basic""" +24 24 evaluator """rankbased""" +24 25 dataset """kinships""" +24 25 model """complex""" +24 25 loss """marginranking""" +24 25 regularizer """no""" +24 25 optimizer """adadelta""" +24 25 training_loop """owa""" +24 25 negative_sampler """basic""" +24 25 evaluator """rankbased""" +24 26 dataset """kinships""" +24 26 model """complex""" +24 26 loss """marginranking""" +24 26 regularizer """no""" +24 26 optimizer """adadelta""" +24 26 training_loop """owa""" +24 26 negative_sampler """basic""" +24 26 evaluator """rankbased""" +24 27 dataset """kinships""" +24 27 model """complex""" +24 27 loss """marginranking""" +24 27 regularizer """no""" +24 27 optimizer """adadelta""" +24 27 training_loop """owa""" +24 27 negative_sampler """basic""" +24 27 evaluator """rankbased""" +24 28 dataset """kinships""" +24 28 model """complex""" +24 28 loss """marginranking""" +24 28 regularizer """no""" +24 28 optimizer """adadelta""" +24 28 training_loop """owa""" +24 28 negative_sampler """basic""" +24 28 evaluator """rankbased""" +24 29 dataset """kinships""" +24 29 model """complex""" +24 29 loss """marginranking""" +24 29 regularizer """no""" +24 29 optimizer """adadelta""" +24 29 training_loop """owa""" +24 29 negative_sampler """basic""" +24 29 evaluator """rankbased""" +24 30 dataset """kinships""" +24 30 model """complex""" +24 30 loss """marginranking""" +24 30 regularizer """no""" +24 30 optimizer """adadelta""" +24 30 training_loop """owa""" +24 30 negative_sampler """basic""" +24 30 evaluator """rankbased""" +24 31 dataset """kinships""" +24 31 model """complex""" +24 31 loss """marginranking""" +24 31 regularizer """no""" +24 31 optimizer """adadelta""" +24 31 training_loop """owa""" +24 31 negative_sampler """basic""" +24 31 evaluator """rankbased""" +24 32 dataset """kinships""" +24 32 model """complex""" +24 32 loss """marginranking""" +24 32 regularizer """no""" +24 32 optimizer """adadelta""" +24 32 training_loop """owa""" +24 32 negative_sampler """basic""" +24 32 evaluator """rankbased""" +24 33 dataset """kinships""" +24 33 model """complex""" +24 33 loss """marginranking""" +24 33 regularizer """no""" +24 33 optimizer """adadelta""" +24 33 training_loop """owa""" +24 33 negative_sampler """basic""" +24 33 evaluator """rankbased""" +24 34 dataset """kinships""" +24 34 model """complex""" +24 34 loss """marginranking""" +24 34 regularizer """no""" +24 34 optimizer """adadelta""" +24 34 training_loop """owa""" +24 34 negative_sampler """basic""" +24 34 evaluator """rankbased""" +24 35 dataset """kinships""" +24 35 model """complex""" +24 35 loss """marginranking""" +24 35 regularizer """no""" +24 35 optimizer """adadelta""" +24 35 training_loop """owa""" +24 35 negative_sampler """basic""" +24 35 evaluator """rankbased""" +24 36 dataset """kinships""" +24 36 model """complex""" +24 36 loss """marginranking""" +24 36 regularizer """no""" +24 36 optimizer """adadelta""" +24 36 training_loop """owa""" +24 36 negative_sampler """basic""" +24 36 evaluator """rankbased""" +24 37 dataset """kinships""" +24 37 model """complex""" +24 37 loss """marginranking""" +24 37 regularizer """no""" +24 37 optimizer """adadelta""" +24 37 training_loop """owa""" +24 37 negative_sampler """basic""" +24 37 evaluator """rankbased""" +24 38 dataset """kinships""" +24 38 model """complex""" +24 38 loss """marginranking""" +24 38 regularizer """no""" +24 38 optimizer """adadelta""" +24 38 training_loop """owa""" +24 38 negative_sampler """basic""" +24 38 evaluator """rankbased""" +24 39 dataset """kinships""" +24 39 model """complex""" +24 39 loss """marginranking""" +24 39 regularizer """no""" +24 39 optimizer """adadelta""" +24 39 training_loop """owa""" +24 39 negative_sampler """basic""" +24 39 evaluator """rankbased""" +24 40 dataset """kinships""" +24 40 model """complex""" +24 40 loss """marginranking""" +24 40 regularizer """no""" +24 40 optimizer """adadelta""" +24 40 training_loop """owa""" +24 40 negative_sampler """basic""" +24 40 evaluator """rankbased""" +24 41 dataset """kinships""" +24 41 model """complex""" +24 41 loss """marginranking""" +24 41 regularizer """no""" +24 41 optimizer """adadelta""" +24 41 training_loop """owa""" +24 41 negative_sampler """basic""" +24 41 evaluator """rankbased""" +24 42 dataset """kinships""" +24 42 model """complex""" +24 42 loss """marginranking""" +24 42 regularizer """no""" +24 42 optimizer """adadelta""" +24 42 training_loop """owa""" +24 42 negative_sampler """basic""" +24 42 evaluator """rankbased""" +24 43 dataset """kinships""" +24 43 model """complex""" +24 43 loss """marginranking""" +24 43 regularizer """no""" +24 43 optimizer """adadelta""" +24 43 training_loop """owa""" +24 43 negative_sampler """basic""" +24 43 evaluator """rankbased""" +24 44 dataset """kinships""" +24 44 model """complex""" +24 44 loss """marginranking""" +24 44 regularizer """no""" +24 44 optimizer """adadelta""" +24 44 training_loop """owa""" +24 44 negative_sampler """basic""" +24 44 evaluator """rankbased""" +24 45 dataset """kinships""" +24 45 model """complex""" +24 45 loss """marginranking""" +24 45 regularizer """no""" +24 45 optimizer """adadelta""" +24 45 training_loop """owa""" +24 45 negative_sampler """basic""" +24 45 evaluator """rankbased""" +24 46 dataset """kinships""" +24 46 model """complex""" +24 46 loss """marginranking""" +24 46 regularizer """no""" +24 46 optimizer """adadelta""" +24 46 training_loop """owa""" +24 46 negative_sampler """basic""" +24 46 evaluator """rankbased""" +24 47 dataset """kinships""" +24 47 model """complex""" +24 47 loss """marginranking""" +24 47 regularizer """no""" +24 47 optimizer """adadelta""" +24 47 training_loop """owa""" +24 47 negative_sampler """basic""" +24 47 evaluator """rankbased""" +24 48 dataset """kinships""" +24 48 model """complex""" +24 48 loss """marginranking""" +24 48 regularizer """no""" +24 48 optimizer """adadelta""" +24 48 training_loop """owa""" +24 48 negative_sampler """basic""" +24 48 evaluator """rankbased""" +24 49 dataset """kinships""" +24 49 model """complex""" +24 49 loss """marginranking""" +24 49 regularizer """no""" +24 49 optimizer """adadelta""" +24 49 training_loop """owa""" +24 49 negative_sampler """basic""" +24 49 evaluator """rankbased""" +24 50 dataset """kinships""" +24 50 model """complex""" +24 50 loss """marginranking""" +24 50 regularizer """no""" +24 50 optimizer """adadelta""" +24 50 training_loop """owa""" +24 50 negative_sampler """basic""" +24 50 evaluator """rankbased""" +24 51 dataset """kinships""" +24 51 model """complex""" +24 51 loss """marginranking""" +24 51 regularizer """no""" +24 51 optimizer """adadelta""" +24 51 training_loop """owa""" +24 51 negative_sampler """basic""" +24 51 evaluator """rankbased""" +24 52 dataset """kinships""" +24 52 model """complex""" +24 52 loss """marginranking""" +24 52 regularizer """no""" +24 52 optimizer """adadelta""" +24 52 training_loop """owa""" +24 52 negative_sampler """basic""" +24 52 evaluator """rankbased""" +24 53 dataset """kinships""" +24 53 model """complex""" +24 53 loss """marginranking""" +24 53 regularizer """no""" +24 53 optimizer """adadelta""" +24 53 training_loop """owa""" +24 53 negative_sampler """basic""" +24 53 evaluator """rankbased""" +24 54 dataset """kinships""" +24 54 model """complex""" +24 54 loss """marginranking""" +24 54 regularizer """no""" +24 54 optimizer """adadelta""" +24 54 training_loop """owa""" +24 54 negative_sampler """basic""" +24 54 evaluator """rankbased""" +24 55 dataset """kinships""" +24 55 model """complex""" +24 55 loss """marginranking""" +24 55 regularizer """no""" +24 55 optimizer """adadelta""" +24 55 training_loop """owa""" +24 55 negative_sampler """basic""" +24 55 evaluator """rankbased""" +24 56 dataset """kinships""" +24 56 model """complex""" +24 56 loss """marginranking""" +24 56 regularizer """no""" +24 56 optimizer """adadelta""" +24 56 training_loop """owa""" +24 56 negative_sampler """basic""" +24 56 evaluator """rankbased""" +24 57 dataset """kinships""" +24 57 model """complex""" +24 57 loss """marginranking""" +24 57 regularizer """no""" +24 57 optimizer """adadelta""" +24 57 training_loop """owa""" +24 57 negative_sampler """basic""" +24 57 evaluator """rankbased""" +24 58 dataset """kinships""" +24 58 model """complex""" +24 58 loss """marginranking""" +24 58 regularizer """no""" +24 58 optimizer """adadelta""" +24 58 training_loop """owa""" +24 58 negative_sampler """basic""" +24 58 evaluator """rankbased""" +24 59 dataset """kinships""" +24 59 model """complex""" +24 59 loss """marginranking""" +24 59 regularizer """no""" +24 59 optimizer """adadelta""" +24 59 training_loop """owa""" +24 59 negative_sampler """basic""" +24 59 evaluator """rankbased""" +24 60 dataset """kinships""" +24 60 model """complex""" +24 60 loss """marginranking""" +24 60 regularizer """no""" +24 60 optimizer """adadelta""" +24 60 training_loop """owa""" +24 60 negative_sampler """basic""" +24 60 evaluator """rankbased""" +24 61 dataset """kinships""" +24 61 model """complex""" +24 61 loss """marginranking""" +24 61 regularizer """no""" +24 61 optimizer """adadelta""" +24 61 training_loop """owa""" +24 61 negative_sampler """basic""" +24 61 evaluator """rankbased""" +24 62 dataset """kinships""" +24 62 model """complex""" +24 62 loss """marginranking""" +24 62 regularizer """no""" +24 62 optimizer """adadelta""" +24 62 training_loop """owa""" +24 62 negative_sampler """basic""" +24 62 evaluator """rankbased""" +24 63 dataset """kinships""" +24 63 model """complex""" +24 63 loss """marginranking""" +24 63 regularizer """no""" +24 63 optimizer """adadelta""" +24 63 training_loop """owa""" +24 63 negative_sampler """basic""" +24 63 evaluator """rankbased""" +24 64 dataset """kinships""" +24 64 model """complex""" +24 64 loss """marginranking""" +24 64 regularizer """no""" +24 64 optimizer """adadelta""" +24 64 training_loop """owa""" +24 64 negative_sampler """basic""" +24 64 evaluator """rankbased""" +24 65 dataset """kinships""" +24 65 model """complex""" +24 65 loss """marginranking""" +24 65 regularizer """no""" +24 65 optimizer """adadelta""" +24 65 training_loop """owa""" +24 65 negative_sampler """basic""" +24 65 evaluator """rankbased""" +24 66 dataset """kinships""" +24 66 model """complex""" +24 66 loss """marginranking""" +24 66 regularizer """no""" +24 66 optimizer """adadelta""" +24 66 training_loop """owa""" +24 66 negative_sampler """basic""" +24 66 evaluator """rankbased""" +24 67 dataset """kinships""" +24 67 model """complex""" +24 67 loss """marginranking""" +24 67 regularizer """no""" +24 67 optimizer """adadelta""" +24 67 training_loop """owa""" +24 67 negative_sampler """basic""" +24 67 evaluator """rankbased""" +24 68 dataset """kinships""" +24 68 model """complex""" +24 68 loss """marginranking""" +24 68 regularizer """no""" +24 68 optimizer """adadelta""" +24 68 training_loop """owa""" +24 68 negative_sampler """basic""" +24 68 evaluator """rankbased""" +24 69 dataset """kinships""" +24 69 model """complex""" +24 69 loss """marginranking""" +24 69 regularizer """no""" +24 69 optimizer """adadelta""" +24 69 training_loop """owa""" +24 69 negative_sampler """basic""" +24 69 evaluator """rankbased""" +24 70 dataset """kinships""" +24 70 model """complex""" +24 70 loss """marginranking""" +24 70 regularizer """no""" +24 70 optimizer """adadelta""" +24 70 training_loop """owa""" +24 70 negative_sampler """basic""" +24 70 evaluator """rankbased""" +24 71 dataset """kinships""" +24 71 model """complex""" +24 71 loss """marginranking""" +24 71 regularizer """no""" +24 71 optimizer """adadelta""" +24 71 training_loop """owa""" +24 71 negative_sampler """basic""" +24 71 evaluator """rankbased""" +24 72 dataset """kinships""" +24 72 model """complex""" +24 72 loss """marginranking""" +24 72 regularizer """no""" +24 72 optimizer """adadelta""" +24 72 training_loop """owa""" +24 72 negative_sampler """basic""" +24 72 evaluator """rankbased""" +24 73 dataset """kinships""" +24 73 model """complex""" +24 73 loss """marginranking""" +24 73 regularizer """no""" +24 73 optimizer """adadelta""" +24 73 training_loop """owa""" +24 73 negative_sampler """basic""" +24 73 evaluator """rankbased""" +24 74 dataset """kinships""" +24 74 model """complex""" +24 74 loss """marginranking""" +24 74 regularizer """no""" +24 74 optimizer """adadelta""" +24 74 training_loop """owa""" +24 74 negative_sampler """basic""" +24 74 evaluator """rankbased""" +24 75 dataset """kinships""" +24 75 model """complex""" +24 75 loss """marginranking""" +24 75 regularizer """no""" +24 75 optimizer """adadelta""" +24 75 training_loop """owa""" +24 75 negative_sampler """basic""" +24 75 evaluator """rankbased""" +24 76 dataset """kinships""" +24 76 model """complex""" +24 76 loss """marginranking""" +24 76 regularizer """no""" +24 76 optimizer """adadelta""" +24 76 training_loop """owa""" +24 76 negative_sampler """basic""" +24 76 evaluator """rankbased""" +24 77 dataset """kinships""" +24 77 model """complex""" +24 77 loss """marginranking""" +24 77 regularizer """no""" +24 77 optimizer """adadelta""" +24 77 training_loop """owa""" +24 77 negative_sampler """basic""" +24 77 evaluator """rankbased""" +24 78 dataset """kinships""" +24 78 model """complex""" +24 78 loss """marginranking""" +24 78 regularizer """no""" +24 78 optimizer """adadelta""" +24 78 training_loop """owa""" +24 78 negative_sampler """basic""" +24 78 evaluator """rankbased""" +24 79 dataset """kinships""" +24 79 model """complex""" +24 79 loss """marginranking""" +24 79 regularizer """no""" +24 79 optimizer """adadelta""" +24 79 training_loop """owa""" +24 79 negative_sampler """basic""" +24 79 evaluator """rankbased""" +24 80 dataset """kinships""" +24 80 model """complex""" +24 80 loss """marginranking""" +24 80 regularizer """no""" +24 80 optimizer """adadelta""" +24 80 training_loop """owa""" +24 80 negative_sampler """basic""" +24 80 evaluator """rankbased""" +24 81 dataset """kinships""" +24 81 model """complex""" +24 81 loss """marginranking""" +24 81 regularizer """no""" +24 81 optimizer """adadelta""" +24 81 training_loop """owa""" +24 81 negative_sampler """basic""" +24 81 evaluator """rankbased""" +24 82 dataset """kinships""" +24 82 model """complex""" +24 82 loss """marginranking""" +24 82 regularizer """no""" +24 82 optimizer """adadelta""" +24 82 training_loop """owa""" +24 82 negative_sampler """basic""" +24 82 evaluator """rankbased""" +24 83 dataset """kinships""" +24 83 model """complex""" +24 83 loss """marginranking""" +24 83 regularizer """no""" +24 83 optimizer """adadelta""" +24 83 training_loop """owa""" +24 83 negative_sampler """basic""" +24 83 evaluator """rankbased""" +24 84 dataset """kinships""" +24 84 model """complex""" +24 84 loss """marginranking""" +24 84 regularizer """no""" +24 84 optimizer """adadelta""" +24 84 training_loop """owa""" +24 84 negative_sampler """basic""" +24 84 evaluator """rankbased""" +24 85 dataset """kinships""" +24 85 model """complex""" +24 85 loss """marginranking""" +24 85 regularizer """no""" +24 85 optimizer """adadelta""" +24 85 training_loop """owa""" +24 85 negative_sampler """basic""" +24 85 evaluator """rankbased""" +24 86 dataset """kinships""" +24 86 model """complex""" +24 86 loss """marginranking""" +24 86 regularizer """no""" +24 86 optimizer """adadelta""" +24 86 training_loop """owa""" +24 86 negative_sampler """basic""" +24 86 evaluator """rankbased""" +24 87 dataset """kinships""" +24 87 model """complex""" +24 87 loss """marginranking""" +24 87 regularizer """no""" +24 87 optimizer """adadelta""" +24 87 training_loop """owa""" +24 87 negative_sampler """basic""" +24 87 evaluator """rankbased""" +24 88 dataset """kinships""" +24 88 model """complex""" +24 88 loss """marginranking""" +24 88 regularizer """no""" +24 88 optimizer """adadelta""" +24 88 training_loop """owa""" +24 88 negative_sampler """basic""" +24 88 evaluator """rankbased""" +24 89 dataset """kinships""" +24 89 model """complex""" +24 89 loss """marginranking""" +24 89 regularizer """no""" +24 89 optimizer """adadelta""" +24 89 training_loop """owa""" +24 89 negative_sampler """basic""" +24 89 evaluator """rankbased""" +24 90 dataset """kinships""" +24 90 model """complex""" +24 90 loss """marginranking""" +24 90 regularizer """no""" +24 90 optimizer """adadelta""" +24 90 training_loop """owa""" +24 90 negative_sampler """basic""" +24 90 evaluator """rankbased""" +24 91 dataset """kinships""" +24 91 model """complex""" +24 91 loss """marginranking""" +24 91 regularizer """no""" +24 91 optimizer """adadelta""" +24 91 training_loop """owa""" +24 91 negative_sampler """basic""" +24 91 evaluator """rankbased""" +24 92 dataset """kinships""" +24 92 model """complex""" +24 92 loss """marginranking""" +24 92 regularizer """no""" +24 92 optimizer """adadelta""" +24 92 training_loop """owa""" +24 92 negative_sampler """basic""" +24 92 evaluator """rankbased""" +24 93 dataset """kinships""" +24 93 model """complex""" +24 93 loss """marginranking""" +24 93 regularizer """no""" +24 93 optimizer """adadelta""" +24 93 training_loop """owa""" +24 93 negative_sampler """basic""" +24 93 evaluator """rankbased""" +24 94 dataset """kinships""" +24 94 model """complex""" +24 94 loss """marginranking""" +24 94 regularizer """no""" +24 94 optimizer """adadelta""" +24 94 training_loop """owa""" +24 94 negative_sampler """basic""" +24 94 evaluator """rankbased""" +24 95 dataset """kinships""" +24 95 model """complex""" +24 95 loss """marginranking""" +24 95 regularizer """no""" +24 95 optimizer """adadelta""" +24 95 training_loop """owa""" +24 95 negative_sampler """basic""" +24 95 evaluator """rankbased""" +24 96 dataset """kinships""" +24 96 model """complex""" +24 96 loss """marginranking""" +24 96 regularizer """no""" +24 96 optimizer """adadelta""" +24 96 training_loop """owa""" +24 96 negative_sampler """basic""" +24 96 evaluator """rankbased""" +24 97 dataset """kinships""" +24 97 model """complex""" +24 97 loss """marginranking""" +24 97 regularizer """no""" +24 97 optimizer """adadelta""" +24 97 training_loop """owa""" +24 97 negative_sampler """basic""" +24 97 evaluator """rankbased""" +24 98 dataset """kinships""" +24 98 model """complex""" +24 98 loss """marginranking""" +24 98 regularizer """no""" +24 98 optimizer """adadelta""" +24 98 training_loop """owa""" +24 98 negative_sampler """basic""" +24 98 evaluator """rankbased""" +24 99 dataset """kinships""" +24 99 model """complex""" +24 99 loss """marginranking""" +24 99 regularizer """no""" +24 99 optimizer """adadelta""" +24 99 training_loop """owa""" +24 99 negative_sampler """basic""" +24 99 evaluator """rankbased""" +24 100 dataset """kinships""" +24 100 model """complex""" +24 100 loss """marginranking""" +24 100 regularizer """no""" +24 100 optimizer """adadelta""" +24 100 training_loop """owa""" +24 100 negative_sampler """basic""" +24 100 evaluator """rankbased""" +25 1 model.embedding_dim 1.0 +25 1 loss.margin 1.458991304033362 +25 1 negative_sampler.num_negs_per_pos 82.0 +25 1 training.batch_size 2.0 +25 2 model.embedding_dim 2.0 +25 2 loss.margin 3.1416959223053245 +25 2 negative_sampler.num_negs_per_pos 66.0 +25 2 training.batch_size 1.0 +25 3 model.embedding_dim 0.0 +25 3 loss.margin 4.394563130881782 +25 3 negative_sampler.num_negs_per_pos 33.0 +25 3 training.batch_size 0.0 +25 4 model.embedding_dim 2.0 +25 4 loss.margin 5.078361621539453 +25 4 negative_sampler.num_negs_per_pos 90.0 +25 4 training.batch_size 0.0 +25 5 model.embedding_dim 0.0 +25 5 loss.margin 2.147091066729666 +25 5 negative_sampler.num_negs_per_pos 82.0 +25 5 training.batch_size 2.0 +25 6 model.embedding_dim 0.0 +25 6 loss.margin 1.022757666589803 +25 6 negative_sampler.num_negs_per_pos 26.0 +25 6 training.batch_size 2.0 +25 7 model.embedding_dim 2.0 +25 7 loss.margin 8.614513708122725 +25 7 negative_sampler.num_negs_per_pos 32.0 +25 7 training.batch_size 2.0 +25 8 model.embedding_dim 0.0 +25 8 loss.margin 7.327724864667459 +25 8 negative_sampler.num_negs_per_pos 3.0 +25 8 training.batch_size 2.0 +25 9 model.embedding_dim 0.0 +25 9 loss.margin 0.7046159973742767 +25 9 negative_sampler.num_negs_per_pos 90.0 +25 9 training.batch_size 1.0 +25 10 model.embedding_dim 1.0 +25 10 loss.margin 5.708148340421093 +25 10 negative_sampler.num_negs_per_pos 77.0 +25 10 training.batch_size 1.0 +25 11 model.embedding_dim 0.0 +25 11 loss.margin 5.323639429253643 +25 11 negative_sampler.num_negs_per_pos 64.0 +25 11 training.batch_size 1.0 +25 12 model.embedding_dim 0.0 +25 12 loss.margin 5.182803100894085 +25 12 negative_sampler.num_negs_per_pos 63.0 +25 12 training.batch_size 0.0 +25 13 model.embedding_dim 1.0 +25 13 loss.margin 3.068739143924211 +25 13 negative_sampler.num_negs_per_pos 98.0 +25 13 training.batch_size 2.0 +25 14 model.embedding_dim 2.0 +25 14 loss.margin 6.011939894215084 +25 14 negative_sampler.num_negs_per_pos 39.0 +25 14 training.batch_size 2.0 +25 15 model.embedding_dim 2.0 +25 15 loss.margin 3.579122681045646 +25 15 negative_sampler.num_negs_per_pos 78.0 +25 15 training.batch_size 2.0 +25 16 model.embedding_dim 1.0 +25 16 loss.margin 8.66600276926602 +25 16 negative_sampler.num_negs_per_pos 2.0 +25 16 training.batch_size 1.0 +25 17 model.embedding_dim 1.0 +25 17 loss.margin 5.279327965491098 +25 17 negative_sampler.num_negs_per_pos 17.0 +25 17 training.batch_size 0.0 +25 18 model.embedding_dim 1.0 +25 18 loss.margin 3.3486914654042224 +25 18 negative_sampler.num_negs_per_pos 27.0 +25 18 training.batch_size 0.0 +25 19 model.embedding_dim 1.0 +25 19 loss.margin 2.7434648760585887 +25 19 negative_sampler.num_negs_per_pos 30.0 +25 19 training.batch_size 2.0 +25 20 model.embedding_dim 0.0 +25 20 loss.margin 8.183017763211403 +25 20 negative_sampler.num_negs_per_pos 21.0 +25 20 training.batch_size 2.0 +25 21 model.embedding_dim 1.0 +25 21 loss.margin 5.554953783230668 +25 21 negative_sampler.num_negs_per_pos 39.0 +25 21 training.batch_size 2.0 +25 22 model.embedding_dim 2.0 +25 22 loss.margin 2.259611632055358 +25 22 negative_sampler.num_negs_per_pos 72.0 +25 22 training.batch_size 2.0 +25 23 model.embedding_dim 0.0 +25 23 loss.margin 4.979769767422646 +25 23 negative_sampler.num_negs_per_pos 39.0 +25 23 training.batch_size 2.0 +25 24 model.embedding_dim 2.0 +25 24 loss.margin 5.9997635005948 +25 24 negative_sampler.num_negs_per_pos 99.0 +25 24 training.batch_size 0.0 +25 25 model.embedding_dim 1.0 +25 25 loss.margin 0.9711167231548526 +25 25 negative_sampler.num_negs_per_pos 49.0 +25 25 training.batch_size 1.0 +25 26 model.embedding_dim 1.0 +25 26 loss.margin 9.815585851992537 +25 26 negative_sampler.num_negs_per_pos 1.0 +25 26 training.batch_size 2.0 +25 27 model.embedding_dim 0.0 +25 27 loss.margin 1.0722978854683878 +25 27 negative_sampler.num_negs_per_pos 44.0 +25 27 training.batch_size 1.0 +25 28 model.embedding_dim 0.0 +25 28 loss.margin 9.876997916908522 +25 28 negative_sampler.num_negs_per_pos 64.0 +25 28 training.batch_size 2.0 +25 29 model.embedding_dim 2.0 +25 29 loss.margin 7.796832297682628 +25 29 negative_sampler.num_negs_per_pos 8.0 +25 29 training.batch_size 2.0 +25 30 model.embedding_dim 1.0 +25 30 loss.margin 9.519225607155388 +25 30 negative_sampler.num_negs_per_pos 38.0 +25 30 training.batch_size 2.0 +25 31 model.embedding_dim 1.0 +25 31 loss.margin 7.046061352121088 +25 31 negative_sampler.num_negs_per_pos 74.0 +25 31 training.batch_size 2.0 +25 32 model.embedding_dim 0.0 +25 32 loss.margin 2.3093869807559453 +25 32 negative_sampler.num_negs_per_pos 47.0 +25 32 training.batch_size 1.0 +25 33 model.embedding_dim 0.0 +25 33 loss.margin 1.3483082043305585 +25 33 negative_sampler.num_negs_per_pos 8.0 +25 33 training.batch_size 1.0 +25 34 model.embedding_dim 2.0 +25 34 loss.margin 7.062323236681015 +25 34 negative_sampler.num_negs_per_pos 48.0 +25 34 training.batch_size 2.0 +25 35 model.embedding_dim 1.0 +25 35 loss.margin 6.430100405710736 +25 35 negative_sampler.num_negs_per_pos 62.0 +25 35 training.batch_size 0.0 +25 36 model.embedding_dim 0.0 +25 36 loss.margin 5.621773789571925 +25 36 negative_sampler.num_negs_per_pos 99.0 +25 36 training.batch_size 2.0 +25 37 model.embedding_dim 2.0 +25 37 loss.margin 7.508118008960724 +25 37 negative_sampler.num_negs_per_pos 22.0 +25 37 training.batch_size 0.0 +25 38 model.embedding_dim 2.0 +25 38 loss.margin 2.0954309139685137 +25 38 negative_sampler.num_negs_per_pos 2.0 +25 38 training.batch_size 1.0 +25 39 model.embedding_dim 0.0 +25 39 loss.margin 4.346179209572632 +25 39 negative_sampler.num_negs_per_pos 9.0 +25 39 training.batch_size 1.0 +25 40 model.embedding_dim 1.0 +25 40 loss.margin 4.6650107302235275 +25 40 negative_sampler.num_negs_per_pos 29.0 +25 40 training.batch_size 2.0 +25 41 model.embedding_dim 2.0 +25 41 loss.margin 3.682184465472623 +25 41 negative_sampler.num_negs_per_pos 97.0 +25 41 training.batch_size 2.0 +25 42 model.embedding_dim 2.0 +25 42 loss.margin 2.4727078660970023 +25 42 negative_sampler.num_negs_per_pos 96.0 +25 42 training.batch_size 1.0 +25 43 model.embedding_dim 1.0 +25 43 loss.margin 4.61276338321255 +25 43 negative_sampler.num_negs_per_pos 49.0 +25 43 training.batch_size 2.0 +25 44 model.embedding_dim 2.0 +25 44 loss.margin 4.504523941118647 +25 44 negative_sampler.num_negs_per_pos 30.0 +25 44 training.batch_size 1.0 +25 45 model.embedding_dim 2.0 +25 45 loss.margin 8.822217876682144 +25 45 negative_sampler.num_negs_per_pos 26.0 +25 45 training.batch_size 1.0 +25 46 model.embedding_dim 1.0 +25 46 loss.margin 1.4966045611887728 +25 46 negative_sampler.num_negs_per_pos 63.0 +25 46 training.batch_size 2.0 +25 47 model.embedding_dim 1.0 +25 47 loss.margin 8.121184012770163 +25 47 negative_sampler.num_negs_per_pos 72.0 +25 47 training.batch_size 1.0 +25 48 model.embedding_dim 2.0 +25 48 loss.margin 4.146508513207655 +25 48 negative_sampler.num_negs_per_pos 40.0 +25 48 training.batch_size 1.0 +25 49 model.embedding_dim 1.0 +25 49 loss.margin 9.411942237569638 +25 49 negative_sampler.num_negs_per_pos 44.0 +25 49 training.batch_size 2.0 +25 50 model.embedding_dim 2.0 +25 50 loss.margin 1.7590585171341844 +25 50 negative_sampler.num_negs_per_pos 41.0 +25 50 training.batch_size 1.0 +25 51 model.embedding_dim 1.0 +25 51 loss.margin 9.457367699834242 +25 51 negative_sampler.num_negs_per_pos 25.0 +25 51 training.batch_size 0.0 +25 52 model.embedding_dim 1.0 +25 52 loss.margin 9.159662342117402 +25 52 negative_sampler.num_negs_per_pos 50.0 +25 52 training.batch_size 1.0 +25 53 model.embedding_dim 1.0 +25 53 loss.margin 3.7528382532192928 +25 53 negative_sampler.num_negs_per_pos 66.0 +25 53 training.batch_size 2.0 +25 54 model.embedding_dim 2.0 +25 54 loss.margin 2.1840055248111088 +25 54 negative_sampler.num_negs_per_pos 4.0 +25 54 training.batch_size 0.0 +25 55 model.embedding_dim 2.0 +25 55 loss.margin 8.989901111613102 +25 55 negative_sampler.num_negs_per_pos 27.0 +25 55 training.batch_size 2.0 +25 56 model.embedding_dim 0.0 +25 56 loss.margin 7.6056477899626636 +25 56 negative_sampler.num_negs_per_pos 0.0 +25 56 training.batch_size 2.0 +25 57 model.embedding_dim 2.0 +25 57 loss.margin 6.1173224124892975 +25 57 negative_sampler.num_negs_per_pos 12.0 +25 57 training.batch_size 1.0 +25 58 model.embedding_dim 2.0 +25 58 loss.margin 4.935109268606258 +25 58 negative_sampler.num_negs_per_pos 94.0 +25 58 training.batch_size 1.0 +25 59 model.embedding_dim 1.0 +25 59 loss.margin 7.879578518023197 +25 59 negative_sampler.num_negs_per_pos 47.0 +25 59 training.batch_size 0.0 +25 60 model.embedding_dim 2.0 +25 60 loss.margin 4.65756768736039 +25 60 negative_sampler.num_negs_per_pos 39.0 +25 60 training.batch_size 1.0 +25 61 model.embedding_dim 0.0 +25 61 loss.margin 1.593443149579872 +25 61 negative_sampler.num_negs_per_pos 3.0 +25 61 training.batch_size 2.0 +25 62 model.embedding_dim 2.0 +25 62 loss.margin 2.1452985375947864 +25 62 negative_sampler.num_negs_per_pos 0.0 +25 62 training.batch_size 2.0 +25 63 model.embedding_dim 2.0 +25 63 loss.margin 8.54110628436868 +25 63 negative_sampler.num_negs_per_pos 54.0 +25 63 training.batch_size 1.0 +25 64 model.embedding_dim 1.0 +25 64 loss.margin 9.894759292869713 +25 64 negative_sampler.num_negs_per_pos 53.0 +25 64 training.batch_size 1.0 +25 65 model.embedding_dim 2.0 +25 65 loss.margin 1.6607976123493993 +25 65 negative_sampler.num_negs_per_pos 65.0 +25 65 training.batch_size 2.0 +25 66 model.embedding_dim 0.0 +25 66 loss.margin 9.529665752542538 +25 66 negative_sampler.num_negs_per_pos 89.0 +25 66 training.batch_size 0.0 +25 67 model.embedding_dim 0.0 +25 67 loss.margin 7.066400394207795 +25 67 negative_sampler.num_negs_per_pos 14.0 +25 67 training.batch_size 2.0 +25 68 model.embedding_dim 0.0 +25 68 loss.margin 5.283110040406434 +25 68 negative_sampler.num_negs_per_pos 3.0 +25 68 training.batch_size 0.0 +25 69 model.embedding_dim 1.0 +25 69 loss.margin 0.7248141667135113 +25 69 negative_sampler.num_negs_per_pos 51.0 +25 69 training.batch_size 0.0 +25 70 model.embedding_dim 2.0 +25 70 loss.margin 3.2612873040161903 +25 70 negative_sampler.num_negs_per_pos 77.0 +25 70 training.batch_size 0.0 +25 71 model.embedding_dim 0.0 +25 71 loss.margin 5.906178918247616 +25 71 negative_sampler.num_negs_per_pos 79.0 +25 71 training.batch_size 2.0 +25 72 model.embedding_dim 2.0 +25 72 loss.margin 5.917041627905359 +25 72 negative_sampler.num_negs_per_pos 45.0 +25 72 training.batch_size 2.0 +25 73 model.embedding_dim 1.0 +25 73 loss.margin 2.698859080976566 +25 73 negative_sampler.num_negs_per_pos 24.0 +25 73 training.batch_size 0.0 +25 74 model.embedding_dim 1.0 +25 74 loss.margin 2.9721409233714895 +25 74 negative_sampler.num_negs_per_pos 52.0 +25 74 training.batch_size 0.0 +25 75 model.embedding_dim 1.0 +25 75 loss.margin 7.480892832756661 +25 75 negative_sampler.num_negs_per_pos 13.0 +25 75 training.batch_size 1.0 +25 76 model.embedding_dim 2.0 +25 76 loss.margin 7.641256903150894 +25 76 negative_sampler.num_negs_per_pos 36.0 +25 76 training.batch_size 2.0 +25 77 model.embedding_dim 1.0 +25 77 loss.margin 6.614595954666421 +25 77 negative_sampler.num_negs_per_pos 34.0 +25 77 training.batch_size 2.0 +25 78 model.embedding_dim 2.0 +25 78 loss.margin 4.494019124588049 +25 78 negative_sampler.num_negs_per_pos 78.0 +25 78 training.batch_size 0.0 +25 79 model.embedding_dim 2.0 +25 79 loss.margin 4.129673924850316 +25 79 negative_sampler.num_negs_per_pos 15.0 +25 79 training.batch_size 1.0 +25 80 model.embedding_dim 0.0 +25 80 loss.margin 7.820103367270887 +25 80 negative_sampler.num_negs_per_pos 15.0 +25 80 training.batch_size 2.0 +25 81 model.embedding_dim 1.0 +25 81 loss.margin 1.9048819015635106 +25 81 negative_sampler.num_negs_per_pos 26.0 +25 81 training.batch_size 0.0 +25 82 model.embedding_dim 1.0 +25 82 loss.margin 9.416083999614012 +25 82 negative_sampler.num_negs_per_pos 88.0 +25 82 training.batch_size 0.0 +25 83 model.embedding_dim 0.0 +25 83 loss.margin 4.73147771250965 +25 83 negative_sampler.num_negs_per_pos 15.0 +25 83 training.batch_size 2.0 +25 84 model.embedding_dim 2.0 +25 84 loss.margin 7.8069169539237615 +25 84 negative_sampler.num_negs_per_pos 82.0 +25 84 training.batch_size 0.0 +25 85 model.embedding_dim 2.0 +25 85 loss.margin 7.076196970320421 +25 85 negative_sampler.num_negs_per_pos 37.0 +25 85 training.batch_size 1.0 +25 86 model.embedding_dim 0.0 +25 86 loss.margin 8.74264627481285 +25 86 negative_sampler.num_negs_per_pos 2.0 +25 86 training.batch_size 0.0 +25 87 model.embedding_dim 2.0 +25 87 loss.margin 4.955426334642533 +25 87 negative_sampler.num_negs_per_pos 10.0 +25 87 training.batch_size 2.0 +25 88 model.embedding_dim 2.0 +25 88 loss.margin 9.11973716152719 +25 88 negative_sampler.num_negs_per_pos 94.0 +25 88 training.batch_size 1.0 +25 89 model.embedding_dim 0.0 +25 89 loss.margin 6.65017625133082 +25 89 negative_sampler.num_negs_per_pos 5.0 +25 89 training.batch_size 0.0 +25 90 model.embedding_dim 0.0 +25 90 loss.margin 9.588409203369936 +25 90 negative_sampler.num_negs_per_pos 9.0 +25 90 training.batch_size 1.0 +25 91 model.embedding_dim 1.0 +25 91 loss.margin 8.836471835797981 +25 91 negative_sampler.num_negs_per_pos 29.0 +25 91 training.batch_size 0.0 +25 92 model.embedding_dim 0.0 +25 92 loss.margin 9.613170763738854 +25 92 negative_sampler.num_negs_per_pos 88.0 +25 92 training.batch_size 2.0 +25 93 model.embedding_dim 2.0 +25 93 loss.margin 8.444564086185398 +25 93 negative_sampler.num_negs_per_pos 30.0 +25 93 training.batch_size 1.0 +25 94 model.embedding_dim 0.0 +25 94 loss.margin 4.704847946994281 +25 94 negative_sampler.num_negs_per_pos 85.0 +25 94 training.batch_size 0.0 +25 95 model.embedding_dim 1.0 +25 95 loss.margin 8.962216373587836 +25 95 negative_sampler.num_negs_per_pos 74.0 +25 95 training.batch_size 2.0 +25 96 model.embedding_dim 2.0 +25 96 loss.margin 4.086718875591615 +25 96 negative_sampler.num_negs_per_pos 64.0 +25 96 training.batch_size 2.0 +25 97 model.embedding_dim 0.0 +25 97 loss.margin 2.728394702445177 +25 97 negative_sampler.num_negs_per_pos 41.0 +25 97 training.batch_size 2.0 +25 98 model.embedding_dim 0.0 +25 98 loss.margin 6.293296990545474 +25 98 negative_sampler.num_negs_per_pos 34.0 +25 98 training.batch_size 2.0 +25 99 model.embedding_dim 1.0 +25 99 loss.margin 7.55569480864537 +25 99 negative_sampler.num_negs_per_pos 42.0 +25 99 training.batch_size 0.0 +25 100 model.embedding_dim 2.0 +25 100 loss.margin 0.5268741017996201 +25 100 negative_sampler.num_negs_per_pos 40.0 +25 100 training.batch_size 1.0 +25 1 dataset """kinships""" +25 1 model """complex""" +25 1 loss """marginranking""" +25 1 regularizer """no""" +25 1 optimizer """adadelta""" +25 1 training_loop """owa""" +25 1 negative_sampler """basic""" +25 1 evaluator """rankbased""" +25 2 dataset """kinships""" +25 2 model """complex""" +25 2 loss """marginranking""" +25 2 regularizer """no""" +25 2 optimizer """adadelta""" +25 2 training_loop """owa""" +25 2 negative_sampler """basic""" +25 2 evaluator """rankbased""" +25 3 dataset """kinships""" +25 3 model """complex""" +25 3 loss """marginranking""" +25 3 regularizer """no""" +25 3 optimizer """adadelta""" +25 3 training_loop """owa""" +25 3 negative_sampler """basic""" +25 3 evaluator """rankbased""" +25 4 dataset """kinships""" +25 4 model """complex""" +25 4 loss """marginranking""" +25 4 regularizer """no""" +25 4 optimizer """adadelta""" +25 4 training_loop """owa""" +25 4 negative_sampler """basic""" +25 4 evaluator """rankbased""" +25 5 dataset """kinships""" +25 5 model """complex""" +25 5 loss """marginranking""" +25 5 regularizer """no""" +25 5 optimizer """adadelta""" +25 5 training_loop """owa""" +25 5 negative_sampler """basic""" +25 5 evaluator """rankbased""" +25 6 dataset """kinships""" +25 6 model """complex""" +25 6 loss """marginranking""" +25 6 regularizer """no""" +25 6 optimizer """adadelta""" +25 6 training_loop """owa""" +25 6 negative_sampler """basic""" +25 6 evaluator """rankbased""" +25 7 dataset """kinships""" +25 7 model """complex""" +25 7 loss """marginranking""" +25 7 regularizer """no""" +25 7 optimizer """adadelta""" +25 7 training_loop """owa""" +25 7 negative_sampler """basic""" +25 7 evaluator """rankbased""" +25 8 dataset """kinships""" +25 8 model """complex""" +25 8 loss """marginranking""" +25 8 regularizer """no""" +25 8 optimizer """adadelta""" +25 8 training_loop """owa""" +25 8 negative_sampler """basic""" +25 8 evaluator """rankbased""" +25 9 dataset """kinships""" +25 9 model """complex""" +25 9 loss """marginranking""" +25 9 regularizer """no""" +25 9 optimizer """adadelta""" +25 9 training_loop """owa""" +25 9 negative_sampler """basic""" +25 9 evaluator """rankbased""" +25 10 dataset """kinships""" +25 10 model """complex""" +25 10 loss """marginranking""" +25 10 regularizer """no""" +25 10 optimizer """adadelta""" +25 10 training_loop """owa""" +25 10 negative_sampler """basic""" +25 10 evaluator """rankbased""" +25 11 dataset """kinships""" +25 11 model """complex""" +25 11 loss """marginranking""" +25 11 regularizer """no""" +25 11 optimizer """adadelta""" +25 11 training_loop """owa""" +25 11 negative_sampler """basic""" +25 11 evaluator """rankbased""" +25 12 dataset """kinships""" +25 12 model """complex""" +25 12 loss """marginranking""" +25 12 regularizer """no""" +25 12 optimizer """adadelta""" +25 12 training_loop """owa""" +25 12 negative_sampler """basic""" +25 12 evaluator """rankbased""" +25 13 dataset """kinships""" +25 13 model """complex""" +25 13 loss """marginranking""" +25 13 regularizer """no""" +25 13 optimizer """adadelta""" +25 13 training_loop """owa""" +25 13 negative_sampler """basic""" +25 13 evaluator """rankbased""" +25 14 dataset """kinships""" +25 14 model """complex""" +25 14 loss """marginranking""" +25 14 regularizer """no""" +25 14 optimizer """adadelta""" +25 14 training_loop """owa""" +25 14 negative_sampler """basic""" +25 14 evaluator """rankbased""" +25 15 dataset """kinships""" +25 15 model """complex""" +25 15 loss """marginranking""" +25 15 regularizer """no""" +25 15 optimizer """adadelta""" +25 15 training_loop """owa""" +25 15 negative_sampler """basic""" +25 15 evaluator """rankbased""" +25 16 dataset """kinships""" +25 16 model """complex""" +25 16 loss """marginranking""" +25 16 regularizer """no""" +25 16 optimizer """adadelta""" +25 16 training_loop """owa""" +25 16 negative_sampler """basic""" +25 16 evaluator """rankbased""" +25 17 dataset """kinships""" +25 17 model """complex""" +25 17 loss """marginranking""" +25 17 regularizer """no""" +25 17 optimizer """adadelta""" +25 17 training_loop """owa""" +25 17 negative_sampler """basic""" +25 17 evaluator """rankbased""" +25 18 dataset """kinships""" +25 18 model """complex""" +25 18 loss """marginranking""" +25 18 regularizer """no""" +25 18 optimizer """adadelta""" +25 18 training_loop """owa""" +25 18 negative_sampler """basic""" +25 18 evaluator """rankbased""" +25 19 dataset """kinships""" +25 19 model """complex""" +25 19 loss """marginranking""" +25 19 regularizer """no""" +25 19 optimizer """adadelta""" +25 19 training_loop """owa""" +25 19 negative_sampler """basic""" +25 19 evaluator """rankbased""" +25 20 dataset """kinships""" +25 20 model """complex""" +25 20 loss """marginranking""" +25 20 regularizer """no""" +25 20 optimizer """adadelta""" +25 20 training_loop """owa""" +25 20 negative_sampler """basic""" +25 20 evaluator """rankbased""" +25 21 dataset """kinships""" +25 21 model """complex""" +25 21 loss """marginranking""" +25 21 regularizer """no""" +25 21 optimizer """adadelta""" +25 21 training_loop """owa""" +25 21 negative_sampler """basic""" +25 21 evaluator """rankbased""" +25 22 dataset """kinships""" +25 22 model """complex""" +25 22 loss """marginranking""" +25 22 regularizer """no""" +25 22 optimizer """adadelta""" +25 22 training_loop """owa""" +25 22 negative_sampler """basic""" +25 22 evaluator """rankbased""" +25 23 dataset """kinships""" +25 23 model """complex""" +25 23 loss """marginranking""" +25 23 regularizer """no""" +25 23 optimizer """adadelta""" +25 23 training_loop """owa""" +25 23 negative_sampler """basic""" +25 23 evaluator """rankbased""" +25 24 dataset """kinships""" +25 24 model """complex""" +25 24 loss """marginranking""" +25 24 regularizer """no""" +25 24 optimizer """adadelta""" +25 24 training_loop """owa""" +25 24 negative_sampler """basic""" +25 24 evaluator """rankbased""" +25 25 dataset """kinships""" +25 25 model """complex""" +25 25 loss """marginranking""" +25 25 regularizer """no""" +25 25 optimizer """adadelta""" +25 25 training_loop """owa""" +25 25 negative_sampler """basic""" +25 25 evaluator """rankbased""" +25 26 dataset """kinships""" +25 26 model """complex""" +25 26 loss """marginranking""" +25 26 regularizer """no""" +25 26 optimizer """adadelta""" +25 26 training_loop """owa""" +25 26 negative_sampler """basic""" +25 26 evaluator """rankbased""" +25 27 dataset """kinships""" +25 27 model """complex""" +25 27 loss """marginranking""" +25 27 regularizer """no""" +25 27 optimizer """adadelta""" +25 27 training_loop """owa""" +25 27 negative_sampler """basic""" +25 27 evaluator """rankbased""" +25 28 dataset """kinships""" +25 28 model """complex""" +25 28 loss """marginranking""" +25 28 regularizer """no""" +25 28 optimizer """adadelta""" +25 28 training_loop """owa""" +25 28 negative_sampler """basic""" +25 28 evaluator """rankbased""" +25 29 dataset """kinships""" +25 29 model """complex""" +25 29 loss """marginranking""" +25 29 regularizer """no""" +25 29 optimizer """adadelta""" +25 29 training_loop """owa""" +25 29 negative_sampler """basic""" +25 29 evaluator """rankbased""" +25 30 dataset """kinships""" +25 30 model """complex""" +25 30 loss """marginranking""" +25 30 regularizer """no""" +25 30 optimizer """adadelta""" +25 30 training_loop """owa""" +25 30 negative_sampler """basic""" +25 30 evaluator """rankbased""" +25 31 dataset """kinships""" +25 31 model """complex""" +25 31 loss """marginranking""" +25 31 regularizer """no""" +25 31 optimizer """adadelta""" +25 31 training_loop """owa""" +25 31 negative_sampler """basic""" +25 31 evaluator """rankbased""" +25 32 dataset """kinships""" +25 32 model """complex""" +25 32 loss """marginranking""" +25 32 regularizer """no""" +25 32 optimizer """adadelta""" +25 32 training_loop """owa""" +25 32 negative_sampler """basic""" +25 32 evaluator """rankbased""" +25 33 dataset """kinships""" +25 33 model """complex""" +25 33 loss """marginranking""" +25 33 regularizer """no""" +25 33 optimizer """adadelta""" +25 33 training_loop """owa""" +25 33 negative_sampler """basic""" +25 33 evaluator """rankbased""" +25 34 dataset """kinships""" +25 34 model """complex""" +25 34 loss """marginranking""" +25 34 regularizer """no""" +25 34 optimizer """adadelta""" +25 34 training_loop """owa""" +25 34 negative_sampler """basic""" +25 34 evaluator """rankbased""" +25 35 dataset """kinships""" +25 35 model """complex""" +25 35 loss """marginranking""" +25 35 regularizer """no""" +25 35 optimizer """adadelta""" +25 35 training_loop """owa""" +25 35 negative_sampler """basic""" +25 35 evaluator """rankbased""" +25 36 dataset """kinships""" +25 36 model """complex""" +25 36 loss """marginranking""" +25 36 regularizer """no""" +25 36 optimizer """adadelta""" +25 36 training_loop """owa""" +25 36 negative_sampler """basic""" +25 36 evaluator """rankbased""" +25 37 dataset """kinships""" +25 37 model """complex""" +25 37 loss """marginranking""" +25 37 regularizer """no""" +25 37 optimizer """adadelta""" +25 37 training_loop """owa""" +25 37 negative_sampler """basic""" +25 37 evaluator """rankbased""" +25 38 dataset """kinships""" +25 38 model """complex""" +25 38 loss """marginranking""" +25 38 regularizer """no""" +25 38 optimizer """adadelta""" +25 38 training_loop """owa""" +25 38 negative_sampler """basic""" +25 38 evaluator """rankbased""" +25 39 dataset """kinships""" +25 39 model """complex""" +25 39 loss """marginranking""" +25 39 regularizer """no""" +25 39 optimizer """adadelta""" +25 39 training_loop """owa""" +25 39 negative_sampler """basic""" +25 39 evaluator """rankbased""" +25 40 dataset """kinships""" +25 40 model """complex""" +25 40 loss """marginranking""" +25 40 regularizer """no""" +25 40 optimizer """adadelta""" +25 40 training_loop """owa""" +25 40 negative_sampler """basic""" +25 40 evaluator """rankbased""" +25 41 dataset """kinships""" +25 41 model """complex""" +25 41 loss """marginranking""" +25 41 regularizer """no""" +25 41 optimizer """adadelta""" +25 41 training_loop """owa""" +25 41 negative_sampler """basic""" +25 41 evaluator """rankbased""" +25 42 dataset """kinships""" +25 42 model """complex""" +25 42 loss """marginranking""" +25 42 regularizer """no""" +25 42 optimizer """adadelta""" +25 42 training_loop """owa""" +25 42 negative_sampler """basic""" +25 42 evaluator """rankbased""" +25 43 dataset """kinships""" +25 43 model """complex""" +25 43 loss """marginranking""" +25 43 regularizer """no""" +25 43 optimizer """adadelta""" +25 43 training_loop """owa""" +25 43 negative_sampler """basic""" +25 43 evaluator """rankbased""" +25 44 dataset """kinships""" +25 44 model """complex""" +25 44 loss """marginranking""" +25 44 regularizer """no""" +25 44 optimizer """adadelta""" +25 44 training_loop """owa""" +25 44 negative_sampler """basic""" +25 44 evaluator """rankbased""" +25 45 dataset """kinships""" +25 45 model """complex""" +25 45 loss """marginranking""" +25 45 regularizer """no""" +25 45 optimizer """adadelta""" +25 45 training_loop """owa""" +25 45 negative_sampler """basic""" +25 45 evaluator """rankbased""" +25 46 dataset """kinships""" +25 46 model """complex""" +25 46 loss """marginranking""" +25 46 regularizer """no""" +25 46 optimizer """adadelta""" +25 46 training_loop """owa""" +25 46 negative_sampler """basic""" +25 46 evaluator """rankbased""" +25 47 dataset """kinships""" +25 47 model """complex""" +25 47 loss """marginranking""" +25 47 regularizer """no""" +25 47 optimizer """adadelta""" +25 47 training_loop """owa""" +25 47 negative_sampler """basic""" +25 47 evaluator """rankbased""" +25 48 dataset """kinships""" +25 48 model """complex""" +25 48 loss """marginranking""" +25 48 regularizer """no""" +25 48 optimizer """adadelta""" +25 48 training_loop """owa""" +25 48 negative_sampler """basic""" +25 48 evaluator """rankbased""" +25 49 dataset """kinships""" +25 49 model """complex""" +25 49 loss """marginranking""" +25 49 regularizer """no""" +25 49 optimizer """adadelta""" +25 49 training_loop """owa""" +25 49 negative_sampler """basic""" +25 49 evaluator """rankbased""" +25 50 dataset """kinships""" +25 50 model """complex""" +25 50 loss """marginranking""" +25 50 regularizer """no""" +25 50 optimizer """adadelta""" +25 50 training_loop """owa""" +25 50 negative_sampler """basic""" +25 50 evaluator """rankbased""" +25 51 dataset """kinships""" +25 51 model """complex""" +25 51 loss """marginranking""" +25 51 regularizer """no""" +25 51 optimizer """adadelta""" +25 51 training_loop """owa""" +25 51 negative_sampler """basic""" +25 51 evaluator """rankbased""" +25 52 dataset """kinships""" +25 52 model """complex""" +25 52 loss """marginranking""" +25 52 regularizer """no""" +25 52 optimizer """adadelta""" +25 52 training_loop """owa""" +25 52 negative_sampler """basic""" +25 52 evaluator """rankbased""" +25 53 dataset """kinships""" +25 53 model """complex""" +25 53 loss """marginranking""" +25 53 regularizer """no""" +25 53 optimizer """adadelta""" +25 53 training_loop """owa""" +25 53 negative_sampler """basic""" +25 53 evaluator """rankbased""" +25 54 dataset """kinships""" +25 54 model """complex""" +25 54 loss """marginranking""" +25 54 regularizer """no""" +25 54 optimizer """adadelta""" +25 54 training_loop """owa""" +25 54 negative_sampler """basic""" +25 54 evaluator """rankbased""" +25 55 dataset """kinships""" +25 55 model """complex""" +25 55 loss """marginranking""" +25 55 regularizer """no""" +25 55 optimizer """adadelta""" +25 55 training_loop """owa""" +25 55 negative_sampler """basic""" +25 55 evaluator """rankbased""" +25 56 dataset """kinships""" +25 56 model """complex""" +25 56 loss """marginranking""" +25 56 regularizer """no""" +25 56 optimizer """adadelta""" +25 56 training_loop """owa""" +25 56 negative_sampler """basic""" +25 56 evaluator """rankbased""" +25 57 dataset """kinships""" +25 57 model """complex""" +25 57 loss """marginranking""" +25 57 regularizer """no""" +25 57 optimizer """adadelta""" +25 57 training_loop """owa""" +25 57 negative_sampler """basic""" +25 57 evaluator """rankbased""" +25 58 dataset """kinships""" +25 58 model """complex""" +25 58 loss """marginranking""" +25 58 regularizer """no""" +25 58 optimizer """adadelta""" +25 58 training_loop """owa""" +25 58 negative_sampler """basic""" +25 58 evaluator """rankbased""" +25 59 dataset """kinships""" +25 59 model """complex""" +25 59 loss """marginranking""" +25 59 regularizer """no""" +25 59 optimizer """adadelta""" +25 59 training_loop """owa""" +25 59 negative_sampler """basic""" +25 59 evaluator """rankbased""" +25 60 dataset """kinships""" +25 60 model """complex""" +25 60 loss """marginranking""" +25 60 regularizer """no""" +25 60 optimizer """adadelta""" +25 60 training_loop """owa""" +25 60 negative_sampler """basic""" +25 60 evaluator """rankbased""" +25 61 dataset """kinships""" +25 61 model """complex""" +25 61 loss """marginranking""" +25 61 regularizer """no""" +25 61 optimizer """adadelta""" +25 61 training_loop """owa""" +25 61 negative_sampler """basic""" +25 61 evaluator """rankbased""" +25 62 dataset """kinships""" +25 62 model """complex""" +25 62 loss """marginranking""" +25 62 regularizer """no""" +25 62 optimizer """adadelta""" +25 62 training_loop """owa""" +25 62 negative_sampler """basic""" +25 62 evaluator """rankbased""" +25 63 dataset """kinships""" +25 63 model """complex""" +25 63 loss """marginranking""" +25 63 regularizer """no""" +25 63 optimizer """adadelta""" +25 63 training_loop """owa""" +25 63 negative_sampler """basic""" +25 63 evaluator """rankbased""" +25 64 dataset """kinships""" +25 64 model """complex""" +25 64 loss """marginranking""" +25 64 regularizer """no""" +25 64 optimizer """adadelta""" +25 64 training_loop """owa""" +25 64 negative_sampler """basic""" +25 64 evaluator """rankbased""" +25 65 dataset """kinships""" +25 65 model """complex""" +25 65 loss """marginranking""" +25 65 regularizer """no""" +25 65 optimizer """adadelta""" +25 65 training_loop """owa""" +25 65 negative_sampler """basic""" +25 65 evaluator """rankbased""" +25 66 dataset """kinships""" +25 66 model """complex""" +25 66 loss """marginranking""" +25 66 regularizer """no""" +25 66 optimizer """adadelta""" +25 66 training_loop """owa""" +25 66 negative_sampler """basic""" +25 66 evaluator """rankbased""" +25 67 dataset """kinships""" +25 67 model """complex""" +25 67 loss """marginranking""" +25 67 regularizer """no""" +25 67 optimizer """adadelta""" +25 67 training_loop """owa""" +25 67 negative_sampler """basic""" +25 67 evaluator """rankbased""" +25 68 dataset """kinships""" +25 68 model """complex""" +25 68 loss """marginranking""" +25 68 regularizer """no""" +25 68 optimizer """adadelta""" +25 68 training_loop """owa""" +25 68 negative_sampler """basic""" +25 68 evaluator """rankbased""" +25 69 dataset """kinships""" +25 69 model """complex""" +25 69 loss """marginranking""" +25 69 regularizer """no""" +25 69 optimizer """adadelta""" +25 69 training_loop """owa""" +25 69 negative_sampler """basic""" +25 69 evaluator """rankbased""" +25 70 dataset """kinships""" +25 70 model """complex""" +25 70 loss """marginranking""" +25 70 regularizer """no""" +25 70 optimizer """adadelta""" +25 70 training_loop """owa""" +25 70 negative_sampler """basic""" +25 70 evaluator """rankbased""" +25 71 dataset """kinships""" +25 71 model """complex""" +25 71 loss """marginranking""" +25 71 regularizer """no""" +25 71 optimizer """adadelta""" +25 71 training_loop """owa""" +25 71 negative_sampler """basic""" +25 71 evaluator """rankbased""" +25 72 dataset """kinships""" +25 72 model """complex""" +25 72 loss """marginranking""" +25 72 regularizer """no""" +25 72 optimizer """adadelta""" +25 72 training_loop """owa""" +25 72 negative_sampler """basic""" +25 72 evaluator """rankbased""" +25 73 dataset """kinships""" +25 73 model """complex""" +25 73 loss """marginranking""" +25 73 regularizer """no""" +25 73 optimizer """adadelta""" +25 73 training_loop """owa""" +25 73 negative_sampler """basic""" +25 73 evaluator """rankbased""" +25 74 dataset """kinships""" +25 74 model """complex""" +25 74 loss """marginranking""" +25 74 regularizer """no""" +25 74 optimizer """adadelta""" +25 74 training_loop """owa""" +25 74 negative_sampler """basic""" +25 74 evaluator """rankbased""" +25 75 dataset """kinships""" +25 75 model """complex""" +25 75 loss """marginranking""" +25 75 regularizer """no""" +25 75 optimizer """adadelta""" +25 75 training_loop """owa""" +25 75 negative_sampler """basic""" +25 75 evaluator """rankbased""" +25 76 dataset """kinships""" +25 76 model """complex""" +25 76 loss """marginranking""" +25 76 regularizer """no""" +25 76 optimizer """adadelta""" +25 76 training_loop """owa""" +25 76 negative_sampler """basic""" +25 76 evaluator """rankbased""" +25 77 dataset """kinships""" +25 77 model """complex""" +25 77 loss """marginranking""" +25 77 regularizer """no""" +25 77 optimizer """adadelta""" +25 77 training_loop """owa""" +25 77 negative_sampler """basic""" +25 77 evaluator """rankbased""" +25 78 dataset """kinships""" +25 78 model """complex""" +25 78 loss """marginranking""" +25 78 regularizer """no""" +25 78 optimizer """adadelta""" +25 78 training_loop """owa""" +25 78 negative_sampler """basic""" +25 78 evaluator """rankbased""" +25 79 dataset """kinships""" +25 79 model """complex""" +25 79 loss """marginranking""" +25 79 regularizer """no""" +25 79 optimizer """adadelta""" +25 79 training_loop """owa""" +25 79 negative_sampler """basic""" +25 79 evaluator """rankbased""" +25 80 dataset """kinships""" +25 80 model """complex""" +25 80 loss """marginranking""" +25 80 regularizer """no""" +25 80 optimizer """adadelta""" +25 80 training_loop """owa""" +25 80 negative_sampler """basic""" +25 80 evaluator """rankbased""" +25 81 dataset """kinships""" +25 81 model """complex""" +25 81 loss """marginranking""" +25 81 regularizer """no""" +25 81 optimizer """adadelta""" +25 81 training_loop """owa""" +25 81 negative_sampler """basic""" +25 81 evaluator """rankbased""" +25 82 dataset """kinships""" +25 82 model """complex""" +25 82 loss """marginranking""" +25 82 regularizer """no""" +25 82 optimizer """adadelta""" +25 82 training_loop """owa""" +25 82 negative_sampler """basic""" +25 82 evaluator """rankbased""" +25 83 dataset """kinships""" +25 83 model """complex""" +25 83 loss """marginranking""" +25 83 regularizer """no""" +25 83 optimizer """adadelta""" +25 83 training_loop """owa""" +25 83 negative_sampler """basic""" +25 83 evaluator """rankbased""" +25 84 dataset """kinships""" +25 84 model """complex""" +25 84 loss """marginranking""" +25 84 regularizer """no""" +25 84 optimizer """adadelta""" +25 84 training_loop """owa""" +25 84 negative_sampler """basic""" +25 84 evaluator """rankbased""" +25 85 dataset """kinships""" +25 85 model """complex""" +25 85 loss """marginranking""" +25 85 regularizer """no""" +25 85 optimizer """adadelta""" +25 85 training_loop """owa""" +25 85 negative_sampler """basic""" +25 85 evaluator """rankbased""" +25 86 dataset """kinships""" +25 86 model """complex""" +25 86 loss """marginranking""" +25 86 regularizer """no""" +25 86 optimizer """adadelta""" +25 86 training_loop """owa""" +25 86 negative_sampler """basic""" +25 86 evaluator """rankbased""" +25 87 dataset """kinships""" +25 87 model """complex""" +25 87 loss """marginranking""" +25 87 regularizer """no""" +25 87 optimizer """adadelta""" +25 87 training_loop """owa""" +25 87 negative_sampler """basic""" +25 87 evaluator """rankbased""" +25 88 dataset """kinships""" +25 88 model """complex""" +25 88 loss """marginranking""" +25 88 regularizer """no""" +25 88 optimizer """adadelta""" +25 88 training_loop """owa""" +25 88 negative_sampler """basic""" +25 88 evaluator """rankbased""" +25 89 dataset """kinships""" +25 89 model """complex""" +25 89 loss """marginranking""" +25 89 regularizer """no""" +25 89 optimizer """adadelta""" +25 89 training_loop """owa""" +25 89 negative_sampler """basic""" +25 89 evaluator """rankbased""" +25 90 dataset """kinships""" +25 90 model """complex""" +25 90 loss """marginranking""" +25 90 regularizer """no""" +25 90 optimizer """adadelta""" +25 90 training_loop """owa""" +25 90 negative_sampler """basic""" +25 90 evaluator """rankbased""" +25 91 dataset """kinships""" +25 91 model """complex""" +25 91 loss """marginranking""" +25 91 regularizer """no""" +25 91 optimizer """adadelta""" +25 91 training_loop """owa""" +25 91 negative_sampler """basic""" +25 91 evaluator """rankbased""" +25 92 dataset """kinships""" +25 92 model """complex""" +25 92 loss """marginranking""" +25 92 regularizer """no""" +25 92 optimizer """adadelta""" +25 92 training_loop """owa""" +25 92 negative_sampler """basic""" +25 92 evaluator """rankbased""" +25 93 dataset """kinships""" +25 93 model """complex""" +25 93 loss """marginranking""" +25 93 regularizer """no""" +25 93 optimizer """adadelta""" +25 93 training_loop """owa""" +25 93 negative_sampler """basic""" +25 93 evaluator """rankbased""" +25 94 dataset """kinships""" +25 94 model """complex""" +25 94 loss """marginranking""" +25 94 regularizer """no""" +25 94 optimizer """adadelta""" +25 94 training_loop """owa""" +25 94 negative_sampler """basic""" +25 94 evaluator """rankbased""" +25 95 dataset """kinships""" +25 95 model """complex""" +25 95 loss """marginranking""" +25 95 regularizer """no""" +25 95 optimizer """adadelta""" +25 95 training_loop """owa""" +25 95 negative_sampler """basic""" +25 95 evaluator """rankbased""" +25 96 dataset """kinships""" +25 96 model """complex""" +25 96 loss """marginranking""" +25 96 regularizer """no""" +25 96 optimizer """adadelta""" +25 96 training_loop """owa""" +25 96 negative_sampler """basic""" +25 96 evaluator """rankbased""" +25 97 dataset """kinships""" +25 97 model """complex""" +25 97 loss """marginranking""" +25 97 regularizer """no""" +25 97 optimizer """adadelta""" +25 97 training_loop """owa""" +25 97 negative_sampler """basic""" +25 97 evaluator """rankbased""" +25 98 dataset """kinships""" +25 98 model """complex""" +25 98 loss """marginranking""" +25 98 regularizer """no""" +25 98 optimizer """adadelta""" +25 98 training_loop """owa""" +25 98 negative_sampler """basic""" +25 98 evaluator """rankbased""" +25 99 dataset """kinships""" +25 99 model """complex""" +25 99 loss """marginranking""" +25 99 regularizer """no""" +25 99 optimizer """adadelta""" +25 99 training_loop """owa""" +25 99 negative_sampler """basic""" +25 99 evaluator """rankbased""" +25 100 dataset """kinships""" +25 100 model """complex""" +25 100 loss """marginranking""" +25 100 regularizer """no""" +25 100 optimizer """adadelta""" +25 100 training_loop """owa""" +25 100 negative_sampler """basic""" +25 100 evaluator """rankbased""" +26 1 model.embedding_dim 2.0 +26 1 loss.margin 29.5147941859735 +26 1 loss.adversarial_temperature 0.6611156840583818 +26 1 negative_sampler.num_negs_per_pos 81.0 +26 1 training.batch_size 2.0 +26 2 model.embedding_dim 2.0 +26 2 loss.margin 11.017520268239457 +26 2 loss.adversarial_temperature 0.15833527717832477 +26 2 negative_sampler.num_negs_per_pos 4.0 +26 2 training.batch_size 0.0 +26 3 model.embedding_dim 1.0 +26 3 loss.margin 11.254194572117292 +26 3 loss.adversarial_temperature 0.9861179481937566 +26 3 negative_sampler.num_negs_per_pos 21.0 +26 3 training.batch_size 0.0 +26 4 model.embedding_dim 0.0 +26 4 loss.margin 28.115460086191682 +26 4 loss.adversarial_temperature 0.6964139861244245 +26 4 negative_sampler.num_negs_per_pos 89.0 +26 4 training.batch_size 0.0 +26 5 model.embedding_dim 2.0 +26 5 loss.margin 14.355099195171713 +26 5 loss.adversarial_temperature 0.6903782808941464 +26 5 negative_sampler.num_negs_per_pos 7.0 +26 5 training.batch_size 2.0 +26 6 model.embedding_dim 2.0 +26 6 loss.margin 5.67623936988526 +26 6 loss.adversarial_temperature 0.14855717138901425 +26 6 negative_sampler.num_negs_per_pos 45.0 +26 6 training.batch_size 0.0 +26 7 model.embedding_dim 1.0 +26 7 loss.margin 12.828765399822041 +26 7 loss.adversarial_temperature 0.6563516420192653 +26 7 negative_sampler.num_negs_per_pos 31.0 +26 7 training.batch_size 1.0 +26 8 model.embedding_dim 1.0 +26 8 loss.margin 9.303272117287284 +26 8 loss.adversarial_temperature 0.36179695344857143 +26 8 negative_sampler.num_negs_per_pos 46.0 +26 8 training.batch_size 2.0 +26 9 model.embedding_dim 1.0 +26 9 loss.margin 20.782737074073445 +26 9 loss.adversarial_temperature 0.46498637013434685 +26 9 negative_sampler.num_negs_per_pos 83.0 +26 9 training.batch_size 1.0 +26 10 model.embedding_dim 0.0 +26 10 loss.margin 3.758191858677693 +26 10 loss.adversarial_temperature 0.580353280743148 +26 10 negative_sampler.num_negs_per_pos 6.0 +26 10 training.batch_size 0.0 +26 11 model.embedding_dim 2.0 +26 11 loss.margin 2.1752375716815924 +26 11 loss.adversarial_temperature 0.9439130254772843 +26 11 negative_sampler.num_negs_per_pos 66.0 +26 11 training.batch_size 0.0 +26 12 model.embedding_dim 0.0 +26 12 loss.margin 25.18781802483555 +26 12 loss.adversarial_temperature 0.11725301581544917 +26 12 negative_sampler.num_negs_per_pos 76.0 +26 12 training.batch_size 2.0 +26 13 model.embedding_dim 1.0 +26 13 loss.margin 10.375428472850539 +26 13 loss.adversarial_temperature 0.2803000269832517 +26 13 negative_sampler.num_negs_per_pos 25.0 +26 13 training.batch_size 1.0 +26 14 model.embedding_dim 1.0 +26 14 loss.margin 2.4624887769943973 +26 14 loss.adversarial_temperature 0.35073314261008814 +26 14 negative_sampler.num_negs_per_pos 49.0 +26 14 training.batch_size 2.0 +26 15 model.embedding_dim 2.0 +26 15 loss.margin 22.202656927848288 +26 15 loss.adversarial_temperature 0.1682132642278778 +26 15 negative_sampler.num_negs_per_pos 25.0 +26 15 training.batch_size 1.0 +26 16 model.embedding_dim 0.0 +26 16 loss.margin 20.302502499519978 +26 16 loss.adversarial_temperature 0.1451909135909358 +26 16 negative_sampler.num_negs_per_pos 9.0 +26 16 training.batch_size 1.0 +26 17 model.embedding_dim 1.0 +26 17 loss.margin 12.26534118441122 +26 17 loss.adversarial_temperature 0.40696011662113873 +26 17 negative_sampler.num_negs_per_pos 68.0 +26 17 training.batch_size 2.0 +26 18 model.embedding_dim 1.0 +26 18 loss.margin 24.409302431835734 +26 18 loss.adversarial_temperature 0.8566614263668244 +26 18 negative_sampler.num_negs_per_pos 64.0 +26 18 training.batch_size 0.0 +26 19 model.embedding_dim 1.0 +26 19 loss.margin 20.06959160161532 +26 19 loss.adversarial_temperature 0.4959071266458277 +26 19 negative_sampler.num_negs_per_pos 86.0 +26 19 training.batch_size 0.0 +26 20 model.embedding_dim 1.0 +26 20 loss.margin 21.55694089256384 +26 20 loss.adversarial_temperature 0.7594801524985199 +26 20 negative_sampler.num_negs_per_pos 58.0 +26 20 training.batch_size 2.0 +26 21 model.embedding_dim 1.0 +26 21 loss.margin 4.772632988613491 +26 21 loss.adversarial_temperature 0.5360166988294343 +26 21 negative_sampler.num_negs_per_pos 16.0 +26 21 training.batch_size 0.0 +26 22 model.embedding_dim 0.0 +26 22 loss.margin 8.035793895954335 +26 22 loss.adversarial_temperature 0.9355531628156316 +26 22 negative_sampler.num_negs_per_pos 44.0 +26 22 training.batch_size 2.0 +26 23 model.embedding_dim 1.0 +26 23 loss.margin 18.370777136915944 +26 23 loss.adversarial_temperature 0.8439943811551952 +26 23 negative_sampler.num_negs_per_pos 83.0 +26 23 training.batch_size 1.0 +26 24 model.embedding_dim 0.0 +26 24 loss.margin 24.562562141038047 +26 24 loss.adversarial_temperature 0.6473215397030058 +26 24 negative_sampler.num_negs_per_pos 10.0 +26 24 training.batch_size 2.0 +26 25 model.embedding_dim 1.0 +26 25 loss.margin 10.868554016394897 +26 25 loss.adversarial_temperature 0.9805376543873618 +26 25 negative_sampler.num_negs_per_pos 67.0 +26 25 training.batch_size 0.0 +26 26 model.embedding_dim 1.0 +26 26 loss.margin 4.988813201533958 +26 26 loss.adversarial_temperature 0.8059961373456777 +26 26 negative_sampler.num_negs_per_pos 59.0 +26 26 training.batch_size 0.0 +26 27 model.embedding_dim 2.0 +26 27 loss.margin 16.903788960048466 +26 27 loss.adversarial_temperature 0.7865775470178967 +26 27 negative_sampler.num_negs_per_pos 10.0 +26 27 training.batch_size 2.0 +26 28 model.embedding_dim 0.0 +26 28 loss.margin 16.6928773908347 +26 28 loss.adversarial_temperature 0.7996201055060428 +26 28 negative_sampler.num_negs_per_pos 48.0 +26 28 training.batch_size 2.0 +26 29 model.embedding_dim 2.0 +26 29 loss.margin 18.67838220069058 +26 29 loss.adversarial_temperature 0.3606626493497884 +26 29 negative_sampler.num_negs_per_pos 73.0 +26 29 training.batch_size 2.0 +26 30 model.embedding_dim 1.0 +26 30 loss.margin 27.686181147277956 +26 30 loss.adversarial_temperature 0.35717111289556736 +26 30 negative_sampler.num_negs_per_pos 98.0 +26 30 training.batch_size 0.0 +26 31 model.embedding_dim 1.0 +26 31 loss.margin 5.137774283480544 +26 31 loss.adversarial_temperature 0.13466502215280057 +26 31 negative_sampler.num_negs_per_pos 34.0 +26 31 training.batch_size 1.0 +26 32 model.embedding_dim 0.0 +26 32 loss.margin 5.08611973895719 +26 32 loss.adversarial_temperature 0.6718953583664155 +26 32 negative_sampler.num_negs_per_pos 72.0 +26 32 training.batch_size 2.0 +26 33 model.embedding_dim 0.0 +26 33 loss.margin 22.09063990710017 +26 33 loss.adversarial_temperature 0.1685067927699002 +26 33 negative_sampler.num_negs_per_pos 9.0 +26 33 training.batch_size 1.0 +26 34 model.embedding_dim 2.0 +26 34 loss.margin 8.882779968202605 +26 34 loss.adversarial_temperature 0.10033719636543204 +26 34 negative_sampler.num_negs_per_pos 22.0 +26 34 training.batch_size 1.0 +26 35 model.embedding_dim 1.0 +26 35 loss.margin 10.56664792716032 +26 35 loss.adversarial_temperature 0.9387630443696889 +26 35 negative_sampler.num_negs_per_pos 18.0 +26 35 training.batch_size 2.0 +26 36 model.embedding_dim 1.0 +26 36 loss.margin 20.184145941015426 +26 36 loss.adversarial_temperature 0.2867910600762916 +26 36 negative_sampler.num_negs_per_pos 69.0 +26 36 training.batch_size 2.0 +26 37 model.embedding_dim 0.0 +26 37 loss.margin 13.975092364399941 +26 37 loss.adversarial_temperature 0.6615187269938939 +26 37 negative_sampler.num_negs_per_pos 41.0 +26 37 training.batch_size 1.0 +26 38 model.embedding_dim 0.0 +26 38 loss.margin 5.421760146390801 +26 38 loss.adversarial_temperature 0.5730221067814143 +26 38 negative_sampler.num_negs_per_pos 70.0 +26 38 training.batch_size 1.0 +26 39 model.embedding_dim 2.0 +26 39 loss.margin 1.6030488235674316 +26 39 loss.adversarial_temperature 0.2778406003512549 +26 39 negative_sampler.num_negs_per_pos 41.0 +26 39 training.batch_size 2.0 +26 40 model.embedding_dim 2.0 +26 40 loss.margin 28.690895796390887 +26 40 loss.adversarial_temperature 0.39350104731237534 +26 40 negative_sampler.num_negs_per_pos 26.0 +26 40 training.batch_size 1.0 +26 41 model.embedding_dim 1.0 +26 41 loss.margin 11.802372710653788 +26 41 loss.adversarial_temperature 0.5854821497924947 +26 41 negative_sampler.num_negs_per_pos 54.0 +26 41 training.batch_size 1.0 +26 42 model.embedding_dim 1.0 +26 42 loss.margin 18.084690530519758 +26 42 loss.adversarial_temperature 0.7154930941871588 +26 42 negative_sampler.num_negs_per_pos 55.0 +26 42 training.batch_size 1.0 +26 43 model.embedding_dim 0.0 +26 43 loss.margin 5.732816812196862 +26 43 loss.adversarial_temperature 0.8535797007887767 +26 43 negative_sampler.num_negs_per_pos 77.0 +26 43 training.batch_size 2.0 +26 44 model.embedding_dim 1.0 +26 44 loss.margin 17.541401023426854 +26 44 loss.adversarial_temperature 0.5896280004167191 +26 44 negative_sampler.num_negs_per_pos 85.0 +26 44 training.batch_size 0.0 +26 45 model.embedding_dim 0.0 +26 45 loss.margin 23.194298667009708 +26 45 loss.adversarial_temperature 0.7800750968137234 +26 45 negative_sampler.num_negs_per_pos 20.0 +26 45 training.batch_size 1.0 +26 46 model.embedding_dim 1.0 +26 46 loss.margin 28.41793962601451 +26 46 loss.adversarial_temperature 0.3487608765008323 +26 46 negative_sampler.num_negs_per_pos 33.0 +26 46 training.batch_size 2.0 +26 47 model.embedding_dim 0.0 +26 47 loss.margin 16.80227064207849 +26 47 loss.adversarial_temperature 0.28129747389310633 +26 47 negative_sampler.num_negs_per_pos 13.0 +26 47 training.batch_size 0.0 +26 48 model.embedding_dim 1.0 +26 48 loss.margin 21.3211261521823 +26 48 loss.adversarial_temperature 0.5306586658642025 +26 48 negative_sampler.num_negs_per_pos 23.0 +26 48 training.batch_size 0.0 +26 49 model.embedding_dim 1.0 +26 49 loss.margin 24.747503739585653 +26 49 loss.adversarial_temperature 0.7318865563591278 +26 49 negative_sampler.num_negs_per_pos 59.0 +26 49 training.batch_size 0.0 +26 50 model.embedding_dim 0.0 +26 50 loss.margin 13.912884963839801 +26 50 loss.adversarial_temperature 0.4230168317752181 +26 50 negative_sampler.num_negs_per_pos 97.0 +26 50 training.batch_size 2.0 +26 51 model.embedding_dim 0.0 +26 51 loss.margin 18.342061354907113 +26 51 loss.adversarial_temperature 0.5510610977686589 +26 51 negative_sampler.num_negs_per_pos 20.0 +26 51 training.batch_size 1.0 +26 52 model.embedding_dim 2.0 +26 52 loss.margin 27.554823677125835 +26 52 loss.adversarial_temperature 0.9321022901779255 +26 52 negative_sampler.num_negs_per_pos 76.0 +26 52 training.batch_size 2.0 +26 53 model.embedding_dim 1.0 +26 53 loss.margin 28.18497043060942 +26 53 loss.adversarial_temperature 0.4943586918140467 +26 53 negative_sampler.num_negs_per_pos 10.0 +26 53 training.batch_size 2.0 +26 54 model.embedding_dim 2.0 +26 54 loss.margin 6.686433121905664 +26 54 loss.adversarial_temperature 0.9922922462973424 +26 54 negative_sampler.num_negs_per_pos 91.0 +26 54 training.batch_size 2.0 +26 55 model.embedding_dim 1.0 +26 55 loss.margin 18.859838752284396 +26 55 loss.adversarial_temperature 0.8974895611520319 +26 55 negative_sampler.num_negs_per_pos 37.0 +26 55 training.batch_size 0.0 +26 56 model.embedding_dim 1.0 +26 56 loss.margin 20.535489542099167 +26 56 loss.adversarial_temperature 0.9982303778198762 +26 56 negative_sampler.num_negs_per_pos 23.0 +26 56 training.batch_size 1.0 +26 57 model.embedding_dim 1.0 +26 57 loss.margin 25.336102259092584 +26 57 loss.adversarial_temperature 0.7479414359089402 +26 57 negative_sampler.num_negs_per_pos 37.0 +26 57 training.batch_size 2.0 +26 58 model.embedding_dim 2.0 +26 58 loss.margin 15.979834948765165 +26 58 loss.adversarial_temperature 0.9529808779158712 +26 58 negative_sampler.num_negs_per_pos 55.0 +26 58 training.batch_size 2.0 +26 59 model.embedding_dim 2.0 +26 59 loss.margin 10.754918930141553 +26 59 loss.adversarial_temperature 0.7143813862168362 +26 59 negative_sampler.num_negs_per_pos 9.0 +26 59 training.batch_size 0.0 +26 60 model.embedding_dim 0.0 +26 60 loss.margin 10.761647443341248 +26 60 loss.adversarial_temperature 0.9856351685203985 +26 60 negative_sampler.num_negs_per_pos 84.0 +26 60 training.batch_size 2.0 +26 61 model.embedding_dim 0.0 +26 61 loss.margin 4.879481071384886 +26 61 loss.adversarial_temperature 0.4397338145939832 +26 61 negative_sampler.num_negs_per_pos 19.0 +26 61 training.batch_size 1.0 +26 62 model.embedding_dim 0.0 +26 62 loss.margin 12.415104986098248 +26 62 loss.adversarial_temperature 0.6469084599888317 +26 62 negative_sampler.num_negs_per_pos 8.0 +26 62 training.batch_size 1.0 +26 63 model.embedding_dim 1.0 +26 63 loss.margin 28.333570755683358 +26 63 loss.adversarial_temperature 0.5032332647236686 +26 63 negative_sampler.num_negs_per_pos 46.0 +26 63 training.batch_size 0.0 +26 64 model.embedding_dim 0.0 +26 64 loss.margin 2.3250214705120147 +26 64 loss.adversarial_temperature 0.7921752535021492 +26 64 negative_sampler.num_negs_per_pos 28.0 +26 64 training.batch_size 2.0 +26 65 model.embedding_dim 0.0 +26 65 loss.margin 6.438632712261615 +26 65 loss.adversarial_temperature 0.522130142633401 +26 65 negative_sampler.num_negs_per_pos 45.0 +26 65 training.batch_size 0.0 +26 66 model.embedding_dim 0.0 +26 66 loss.margin 12.279617219977624 +26 66 loss.adversarial_temperature 0.8325690279528464 +26 66 negative_sampler.num_negs_per_pos 56.0 +26 66 training.batch_size 0.0 +26 67 model.embedding_dim 0.0 +26 67 loss.margin 25.23978235081999 +26 67 loss.adversarial_temperature 0.8396686637704421 +26 67 negative_sampler.num_negs_per_pos 72.0 +26 67 training.batch_size 2.0 +26 68 model.embedding_dim 1.0 +26 68 loss.margin 25.94717521595685 +26 68 loss.adversarial_temperature 0.67415396257109 +26 68 negative_sampler.num_negs_per_pos 5.0 +26 68 training.batch_size 0.0 +26 69 model.embedding_dim 0.0 +26 69 loss.margin 12.387452126663442 +26 69 loss.adversarial_temperature 0.6052828183709908 +26 69 negative_sampler.num_negs_per_pos 36.0 +26 69 training.batch_size 0.0 +26 70 model.embedding_dim 1.0 +26 70 loss.margin 22.624347688607685 +26 70 loss.adversarial_temperature 0.39321442026097453 +26 70 negative_sampler.num_negs_per_pos 82.0 +26 70 training.batch_size 2.0 +26 71 model.embedding_dim 2.0 +26 71 loss.margin 14.939460141610013 +26 71 loss.adversarial_temperature 0.851381981826423 +26 71 negative_sampler.num_negs_per_pos 26.0 +26 71 training.batch_size 0.0 +26 72 model.embedding_dim 1.0 +26 72 loss.margin 29.90963287373699 +26 72 loss.adversarial_temperature 0.9559919548155581 +26 72 negative_sampler.num_negs_per_pos 23.0 +26 72 training.batch_size 2.0 +26 73 model.embedding_dim 1.0 +26 73 loss.margin 9.34670005347184 +26 73 loss.adversarial_temperature 0.9505493726409959 +26 73 negative_sampler.num_negs_per_pos 20.0 +26 73 training.batch_size 0.0 +26 74 model.embedding_dim 1.0 +26 74 loss.margin 25.114023215896697 +26 74 loss.adversarial_temperature 0.8881103994812176 +26 74 negative_sampler.num_negs_per_pos 25.0 +26 74 training.batch_size 1.0 +26 75 model.embedding_dim 0.0 +26 75 loss.margin 1.614299830652233 +26 75 loss.adversarial_temperature 0.44563426751625623 +26 75 negative_sampler.num_negs_per_pos 57.0 +26 75 training.batch_size 2.0 +26 76 model.embedding_dim 1.0 +26 76 loss.margin 1.2570275267451476 +26 76 loss.adversarial_temperature 0.6007486134843552 +26 76 negative_sampler.num_negs_per_pos 80.0 +26 76 training.batch_size 1.0 +26 77 model.embedding_dim 0.0 +26 77 loss.margin 24.882917729980335 +26 77 loss.adversarial_temperature 0.7656126484280289 +26 77 negative_sampler.num_negs_per_pos 80.0 +26 77 training.batch_size 0.0 +26 78 model.embedding_dim 0.0 +26 78 loss.margin 10.542176019509343 +26 78 loss.adversarial_temperature 0.8667970180176386 +26 78 negative_sampler.num_negs_per_pos 89.0 +26 78 training.batch_size 0.0 +26 79 model.embedding_dim 1.0 +26 79 loss.margin 17.309947293524775 +26 79 loss.adversarial_temperature 0.2294317988925675 +26 79 negative_sampler.num_negs_per_pos 8.0 +26 79 training.batch_size 2.0 +26 80 model.embedding_dim 2.0 +26 80 loss.margin 20.211905347118766 +26 80 loss.adversarial_temperature 0.8401290333642701 +26 80 negative_sampler.num_negs_per_pos 69.0 +26 80 training.batch_size 0.0 +26 81 model.embedding_dim 1.0 +26 81 loss.margin 18.413896201756092 +26 81 loss.adversarial_temperature 0.8807836506417317 +26 81 negative_sampler.num_negs_per_pos 17.0 +26 81 training.batch_size 0.0 +26 82 model.embedding_dim 0.0 +26 82 loss.margin 13.372752140662884 +26 82 loss.adversarial_temperature 0.3649217671994269 +26 82 negative_sampler.num_negs_per_pos 20.0 +26 82 training.batch_size 2.0 +26 83 model.embedding_dim 2.0 +26 83 loss.margin 12.979253182866726 +26 83 loss.adversarial_temperature 0.6138067354319786 +26 83 negative_sampler.num_negs_per_pos 18.0 +26 83 training.batch_size 2.0 +26 84 model.embedding_dim 1.0 +26 84 loss.margin 18.294172307343615 +26 84 loss.adversarial_temperature 0.6033628958532904 +26 84 negative_sampler.num_negs_per_pos 64.0 +26 84 training.batch_size 2.0 +26 85 model.embedding_dim 2.0 +26 85 loss.margin 8.386670986308154 +26 85 loss.adversarial_temperature 0.7816377025244858 +26 85 negative_sampler.num_negs_per_pos 65.0 +26 85 training.batch_size 2.0 +26 86 model.embedding_dim 1.0 +26 86 loss.margin 24.9160072313628 +26 86 loss.adversarial_temperature 0.7421872673061467 +26 86 negative_sampler.num_negs_per_pos 52.0 +26 86 training.batch_size 2.0 +26 87 model.embedding_dim 0.0 +26 87 loss.margin 12.43175557450588 +26 87 loss.adversarial_temperature 0.34815085387255645 +26 87 negative_sampler.num_negs_per_pos 28.0 +26 87 training.batch_size 0.0 +26 88 model.embedding_dim 2.0 +26 88 loss.margin 6.175952007567666 +26 88 loss.adversarial_temperature 0.8456849519191771 +26 88 negative_sampler.num_negs_per_pos 31.0 +26 88 training.batch_size 1.0 +26 89 model.embedding_dim 0.0 +26 89 loss.margin 24.1396058328142 +26 89 loss.adversarial_temperature 0.1282979853703015 +26 89 negative_sampler.num_negs_per_pos 29.0 +26 89 training.batch_size 2.0 +26 90 model.embedding_dim 1.0 +26 90 loss.margin 7.8645403492823345 +26 90 loss.adversarial_temperature 0.9713914358289923 +26 90 negative_sampler.num_negs_per_pos 38.0 +26 90 training.batch_size 1.0 +26 91 model.embedding_dim 1.0 +26 91 loss.margin 21.134026830789434 +26 91 loss.adversarial_temperature 0.9245736880955432 +26 91 negative_sampler.num_negs_per_pos 6.0 +26 91 training.batch_size 0.0 +26 92 model.embedding_dim 0.0 +26 92 loss.margin 29.677089628622376 +26 92 loss.adversarial_temperature 0.45445373490138863 +26 92 negative_sampler.num_negs_per_pos 85.0 +26 92 training.batch_size 1.0 +26 93 model.embedding_dim 0.0 +26 93 loss.margin 21.984066425075127 +26 93 loss.adversarial_temperature 0.32932403933123855 +26 93 negative_sampler.num_negs_per_pos 30.0 +26 93 training.batch_size 1.0 +26 94 model.embedding_dim 1.0 +26 94 loss.margin 20.984549360529762 +26 94 loss.adversarial_temperature 0.404391396663105 +26 94 negative_sampler.num_negs_per_pos 14.0 +26 94 training.batch_size 2.0 +26 95 model.embedding_dim 2.0 +26 95 loss.margin 1.7121850881388752 +26 95 loss.adversarial_temperature 0.5605038812309763 +26 95 negative_sampler.num_negs_per_pos 44.0 +26 95 training.batch_size 1.0 +26 96 model.embedding_dim 2.0 +26 96 loss.margin 19.637880983354936 +26 96 loss.adversarial_temperature 0.9592561102928749 +26 96 negative_sampler.num_negs_per_pos 93.0 +26 96 training.batch_size 1.0 +26 97 model.embedding_dim 2.0 +26 97 loss.margin 23.381905553701348 +26 97 loss.adversarial_temperature 0.4236851370921654 +26 97 negative_sampler.num_negs_per_pos 31.0 +26 97 training.batch_size 0.0 +26 98 model.embedding_dim 1.0 +26 98 loss.margin 3.703953783532109 +26 98 loss.adversarial_temperature 0.7965607187245014 +26 98 negative_sampler.num_negs_per_pos 61.0 +26 98 training.batch_size 1.0 +26 99 model.embedding_dim 1.0 +26 99 loss.margin 29.22550681054984 +26 99 loss.adversarial_temperature 0.8440412022246213 +26 99 negative_sampler.num_negs_per_pos 41.0 +26 99 training.batch_size 2.0 +26 100 model.embedding_dim 1.0 +26 100 loss.margin 1.1502740499750166 +26 100 loss.adversarial_temperature 0.13580954217452718 +26 100 negative_sampler.num_negs_per_pos 3.0 +26 100 training.batch_size 0.0 +26 1 dataset """kinships""" +26 1 model """complex""" +26 1 loss """nssa""" +26 1 regularizer """no""" +26 1 optimizer """adadelta""" +26 1 training_loop """owa""" +26 1 negative_sampler """basic""" +26 1 evaluator """rankbased""" +26 2 dataset """kinships""" +26 2 model """complex""" +26 2 loss """nssa""" +26 2 regularizer """no""" +26 2 optimizer """adadelta""" +26 2 training_loop """owa""" +26 2 negative_sampler """basic""" +26 2 evaluator """rankbased""" +26 3 dataset """kinships""" +26 3 model """complex""" +26 3 loss """nssa""" +26 3 regularizer """no""" +26 3 optimizer """adadelta""" +26 3 training_loop """owa""" +26 3 negative_sampler """basic""" +26 3 evaluator """rankbased""" +26 4 dataset """kinships""" +26 4 model """complex""" +26 4 loss """nssa""" +26 4 regularizer """no""" +26 4 optimizer """adadelta""" +26 4 training_loop """owa""" +26 4 negative_sampler """basic""" +26 4 evaluator """rankbased""" +26 5 dataset """kinships""" +26 5 model """complex""" +26 5 loss """nssa""" +26 5 regularizer """no""" +26 5 optimizer """adadelta""" +26 5 training_loop """owa""" +26 5 negative_sampler """basic""" +26 5 evaluator """rankbased""" +26 6 dataset """kinships""" +26 6 model """complex""" +26 6 loss """nssa""" +26 6 regularizer """no""" +26 6 optimizer """adadelta""" +26 6 training_loop """owa""" +26 6 negative_sampler """basic""" +26 6 evaluator """rankbased""" +26 7 dataset """kinships""" +26 7 model """complex""" +26 7 loss """nssa""" +26 7 regularizer """no""" +26 7 optimizer """adadelta""" +26 7 training_loop """owa""" +26 7 negative_sampler """basic""" +26 7 evaluator """rankbased""" +26 8 dataset """kinships""" +26 8 model """complex""" +26 8 loss """nssa""" +26 8 regularizer """no""" +26 8 optimizer """adadelta""" +26 8 training_loop """owa""" +26 8 negative_sampler """basic""" +26 8 evaluator """rankbased""" +26 9 dataset """kinships""" +26 9 model """complex""" +26 9 loss """nssa""" +26 9 regularizer """no""" +26 9 optimizer """adadelta""" +26 9 training_loop """owa""" +26 9 negative_sampler """basic""" +26 9 evaluator """rankbased""" +26 10 dataset """kinships""" +26 10 model """complex""" +26 10 loss """nssa""" +26 10 regularizer """no""" +26 10 optimizer """adadelta""" +26 10 training_loop """owa""" +26 10 negative_sampler """basic""" +26 10 evaluator """rankbased""" +26 11 dataset """kinships""" +26 11 model """complex""" +26 11 loss """nssa""" +26 11 regularizer """no""" +26 11 optimizer """adadelta""" +26 11 training_loop """owa""" +26 11 negative_sampler """basic""" +26 11 evaluator """rankbased""" +26 12 dataset """kinships""" +26 12 model """complex""" +26 12 loss """nssa""" +26 12 regularizer """no""" +26 12 optimizer """adadelta""" +26 12 training_loop """owa""" +26 12 negative_sampler """basic""" +26 12 evaluator """rankbased""" +26 13 dataset """kinships""" +26 13 model """complex""" +26 13 loss """nssa""" +26 13 regularizer """no""" +26 13 optimizer """adadelta""" +26 13 training_loop """owa""" +26 13 negative_sampler """basic""" +26 13 evaluator """rankbased""" +26 14 dataset """kinships""" +26 14 model """complex""" +26 14 loss """nssa""" +26 14 regularizer """no""" +26 14 optimizer """adadelta""" +26 14 training_loop """owa""" +26 14 negative_sampler """basic""" +26 14 evaluator """rankbased""" +26 15 dataset """kinships""" +26 15 model """complex""" +26 15 loss """nssa""" +26 15 regularizer """no""" +26 15 optimizer """adadelta""" +26 15 training_loop """owa""" +26 15 negative_sampler """basic""" +26 15 evaluator """rankbased""" +26 16 dataset """kinships""" +26 16 model """complex""" +26 16 loss """nssa""" +26 16 regularizer """no""" +26 16 optimizer """adadelta""" +26 16 training_loop """owa""" +26 16 negative_sampler """basic""" +26 16 evaluator """rankbased""" +26 17 dataset """kinships""" +26 17 model """complex""" +26 17 loss """nssa""" +26 17 regularizer """no""" +26 17 optimizer """adadelta""" +26 17 training_loop """owa""" +26 17 negative_sampler """basic""" +26 17 evaluator """rankbased""" +26 18 dataset """kinships""" +26 18 model """complex""" +26 18 loss """nssa""" +26 18 regularizer """no""" +26 18 optimizer """adadelta""" +26 18 training_loop """owa""" +26 18 negative_sampler """basic""" +26 18 evaluator """rankbased""" +26 19 dataset """kinships""" +26 19 model """complex""" +26 19 loss """nssa""" +26 19 regularizer """no""" +26 19 optimizer """adadelta""" +26 19 training_loop """owa""" +26 19 negative_sampler """basic""" +26 19 evaluator """rankbased""" +26 20 dataset """kinships""" +26 20 model """complex""" +26 20 loss """nssa""" +26 20 regularizer """no""" +26 20 optimizer """adadelta""" +26 20 training_loop """owa""" +26 20 negative_sampler """basic""" +26 20 evaluator """rankbased""" +26 21 dataset """kinships""" +26 21 model """complex""" +26 21 loss """nssa""" +26 21 regularizer """no""" +26 21 optimizer """adadelta""" +26 21 training_loop """owa""" +26 21 negative_sampler """basic""" +26 21 evaluator """rankbased""" +26 22 dataset """kinships""" +26 22 model """complex""" +26 22 loss """nssa""" +26 22 regularizer """no""" +26 22 optimizer """adadelta""" +26 22 training_loop """owa""" +26 22 negative_sampler """basic""" +26 22 evaluator """rankbased""" +26 23 dataset """kinships""" +26 23 model """complex""" +26 23 loss """nssa""" +26 23 regularizer """no""" +26 23 optimizer """adadelta""" +26 23 training_loop """owa""" +26 23 negative_sampler """basic""" +26 23 evaluator """rankbased""" +26 24 dataset """kinships""" +26 24 model """complex""" +26 24 loss """nssa""" +26 24 regularizer """no""" +26 24 optimizer """adadelta""" +26 24 training_loop """owa""" +26 24 negative_sampler """basic""" +26 24 evaluator """rankbased""" +26 25 dataset """kinships""" +26 25 model """complex""" +26 25 loss """nssa""" +26 25 regularizer """no""" +26 25 optimizer """adadelta""" +26 25 training_loop """owa""" +26 25 negative_sampler """basic""" +26 25 evaluator """rankbased""" +26 26 dataset """kinships""" +26 26 model """complex""" +26 26 loss """nssa""" +26 26 regularizer """no""" +26 26 optimizer """adadelta""" +26 26 training_loop """owa""" +26 26 negative_sampler """basic""" +26 26 evaluator """rankbased""" +26 27 dataset """kinships""" +26 27 model """complex""" +26 27 loss """nssa""" +26 27 regularizer """no""" +26 27 optimizer """adadelta""" +26 27 training_loop """owa""" +26 27 negative_sampler """basic""" +26 27 evaluator """rankbased""" +26 28 dataset """kinships""" +26 28 model """complex""" +26 28 loss """nssa""" +26 28 regularizer """no""" +26 28 optimizer """adadelta""" +26 28 training_loop """owa""" +26 28 negative_sampler """basic""" +26 28 evaluator """rankbased""" +26 29 dataset """kinships""" +26 29 model """complex""" +26 29 loss """nssa""" +26 29 regularizer """no""" +26 29 optimizer """adadelta""" +26 29 training_loop """owa""" +26 29 negative_sampler """basic""" +26 29 evaluator """rankbased""" +26 30 dataset """kinships""" +26 30 model """complex""" +26 30 loss """nssa""" +26 30 regularizer """no""" +26 30 optimizer """adadelta""" +26 30 training_loop """owa""" +26 30 negative_sampler """basic""" +26 30 evaluator """rankbased""" +26 31 dataset """kinships""" +26 31 model """complex""" +26 31 loss """nssa""" +26 31 regularizer """no""" +26 31 optimizer """adadelta""" +26 31 training_loop """owa""" +26 31 negative_sampler """basic""" +26 31 evaluator """rankbased""" +26 32 dataset """kinships""" +26 32 model """complex""" +26 32 loss """nssa""" +26 32 regularizer """no""" +26 32 optimizer """adadelta""" +26 32 training_loop """owa""" +26 32 negative_sampler """basic""" +26 32 evaluator """rankbased""" +26 33 dataset """kinships""" +26 33 model """complex""" +26 33 loss """nssa""" +26 33 regularizer """no""" +26 33 optimizer """adadelta""" +26 33 training_loop """owa""" +26 33 negative_sampler """basic""" +26 33 evaluator """rankbased""" +26 34 dataset """kinships""" +26 34 model """complex""" +26 34 loss """nssa""" +26 34 regularizer """no""" +26 34 optimizer """adadelta""" +26 34 training_loop """owa""" +26 34 negative_sampler """basic""" +26 34 evaluator """rankbased""" +26 35 dataset """kinships""" +26 35 model """complex""" +26 35 loss """nssa""" +26 35 regularizer """no""" +26 35 optimizer """adadelta""" +26 35 training_loop """owa""" +26 35 negative_sampler """basic""" +26 35 evaluator """rankbased""" +26 36 dataset """kinships""" +26 36 model """complex""" +26 36 loss """nssa""" +26 36 regularizer """no""" +26 36 optimizer """adadelta""" +26 36 training_loop """owa""" +26 36 negative_sampler """basic""" +26 36 evaluator """rankbased""" +26 37 dataset """kinships""" +26 37 model """complex""" +26 37 loss """nssa""" +26 37 regularizer """no""" +26 37 optimizer """adadelta""" +26 37 training_loop """owa""" +26 37 negative_sampler """basic""" +26 37 evaluator """rankbased""" +26 38 dataset """kinships""" +26 38 model """complex""" +26 38 loss """nssa""" +26 38 regularizer """no""" +26 38 optimizer """adadelta""" +26 38 training_loop """owa""" +26 38 negative_sampler """basic""" +26 38 evaluator """rankbased""" +26 39 dataset """kinships""" +26 39 model """complex""" +26 39 loss """nssa""" +26 39 regularizer """no""" +26 39 optimizer """adadelta""" +26 39 training_loop """owa""" +26 39 negative_sampler """basic""" +26 39 evaluator """rankbased""" +26 40 dataset """kinships""" +26 40 model """complex""" +26 40 loss """nssa""" +26 40 regularizer """no""" +26 40 optimizer """adadelta""" +26 40 training_loop """owa""" +26 40 negative_sampler """basic""" +26 40 evaluator """rankbased""" +26 41 dataset """kinships""" +26 41 model """complex""" +26 41 loss """nssa""" +26 41 regularizer """no""" +26 41 optimizer """adadelta""" +26 41 training_loop """owa""" +26 41 negative_sampler """basic""" +26 41 evaluator """rankbased""" +26 42 dataset """kinships""" +26 42 model """complex""" +26 42 loss """nssa""" +26 42 regularizer """no""" +26 42 optimizer """adadelta""" +26 42 training_loop """owa""" +26 42 negative_sampler """basic""" +26 42 evaluator """rankbased""" +26 43 dataset """kinships""" +26 43 model """complex""" +26 43 loss """nssa""" +26 43 regularizer """no""" +26 43 optimizer """adadelta""" +26 43 training_loop """owa""" +26 43 negative_sampler """basic""" +26 43 evaluator """rankbased""" +26 44 dataset """kinships""" +26 44 model """complex""" +26 44 loss """nssa""" +26 44 regularizer """no""" +26 44 optimizer """adadelta""" +26 44 training_loop """owa""" +26 44 negative_sampler """basic""" +26 44 evaluator """rankbased""" +26 45 dataset """kinships""" +26 45 model """complex""" +26 45 loss """nssa""" +26 45 regularizer """no""" +26 45 optimizer """adadelta""" +26 45 training_loop """owa""" +26 45 negative_sampler """basic""" +26 45 evaluator """rankbased""" +26 46 dataset """kinships""" +26 46 model """complex""" +26 46 loss """nssa""" +26 46 regularizer """no""" +26 46 optimizer """adadelta""" +26 46 training_loop """owa""" +26 46 negative_sampler """basic""" +26 46 evaluator """rankbased""" +26 47 dataset """kinships""" +26 47 model """complex""" +26 47 loss """nssa""" +26 47 regularizer """no""" +26 47 optimizer """adadelta""" +26 47 training_loop """owa""" +26 47 negative_sampler """basic""" +26 47 evaluator """rankbased""" +26 48 dataset """kinships""" +26 48 model """complex""" +26 48 loss """nssa""" +26 48 regularizer """no""" +26 48 optimizer """adadelta""" +26 48 training_loop """owa""" +26 48 negative_sampler """basic""" +26 48 evaluator """rankbased""" +26 49 dataset """kinships""" +26 49 model """complex""" +26 49 loss """nssa""" +26 49 regularizer """no""" +26 49 optimizer """adadelta""" +26 49 training_loop """owa""" +26 49 negative_sampler """basic""" +26 49 evaluator """rankbased""" +26 50 dataset """kinships""" +26 50 model """complex""" +26 50 loss """nssa""" +26 50 regularizer """no""" +26 50 optimizer """adadelta""" +26 50 training_loop """owa""" +26 50 negative_sampler """basic""" +26 50 evaluator """rankbased""" +26 51 dataset """kinships""" +26 51 model """complex""" +26 51 loss """nssa""" +26 51 regularizer """no""" +26 51 optimizer """adadelta""" +26 51 training_loop """owa""" +26 51 negative_sampler """basic""" +26 51 evaluator """rankbased""" +26 52 dataset """kinships""" +26 52 model """complex""" +26 52 loss """nssa""" +26 52 regularizer """no""" +26 52 optimizer """adadelta""" +26 52 training_loop """owa""" +26 52 negative_sampler """basic""" +26 52 evaluator """rankbased""" +26 53 dataset """kinships""" +26 53 model """complex""" +26 53 loss """nssa""" +26 53 regularizer """no""" +26 53 optimizer """adadelta""" +26 53 training_loop """owa""" +26 53 negative_sampler """basic""" +26 53 evaluator """rankbased""" +26 54 dataset """kinships""" +26 54 model """complex""" +26 54 loss """nssa""" +26 54 regularizer """no""" +26 54 optimizer """adadelta""" +26 54 training_loop """owa""" +26 54 negative_sampler """basic""" +26 54 evaluator """rankbased""" +26 55 dataset """kinships""" +26 55 model """complex""" +26 55 loss """nssa""" +26 55 regularizer """no""" +26 55 optimizer """adadelta""" +26 55 training_loop """owa""" +26 55 negative_sampler """basic""" +26 55 evaluator """rankbased""" +26 56 dataset """kinships""" +26 56 model """complex""" +26 56 loss """nssa""" +26 56 regularizer """no""" +26 56 optimizer """adadelta""" +26 56 training_loop """owa""" +26 56 negative_sampler """basic""" +26 56 evaluator """rankbased""" +26 57 dataset """kinships""" +26 57 model """complex""" +26 57 loss """nssa""" +26 57 regularizer """no""" +26 57 optimizer """adadelta""" +26 57 training_loop """owa""" +26 57 negative_sampler """basic""" +26 57 evaluator """rankbased""" +26 58 dataset """kinships""" +26 58 model """complex""" +26 58 loss """nssa""" +26 58 regularizer """no""" +26 58 optimizer """adadelta""" +26 58 training_loop """owa""" +26 58 negative_sampler """basic""" +26 58 evaluator """rankbased""" +26 59 dataset """kinships""" +26 59 model """complex""" +26 59 loss """nssa""" +26 59 regularizer """no""" +26 59 optimizer """adadelta""" +26 59 training_loop """owa""" +26 59 negative_sampler """basic""" +26 59 evaluator """rankbased""" +26 60 dataset """kinships""" +26 60 model """complex""" +26 60 loss """nssa""" +26 60 regularizer """no""" +26 60 optimizer """adadelta""" +26 60 training_loop """owa""" +26 60 negative_sampler """basic""" +26 60 evaluator """rankbased""" +26 61 dataset """kinships""" +26 61 model """complex""" +26 61 loss """nssa""" +26 61 regularizer """no""" +26 61 optimizer """adadelta""" +26 61 training_loop """owa""" +26 61 negative_sampler """basic""" +26 61 evaluator """rankbased""" +26 62 dataset """kinships""" +26 62 model """complex""" +26 62 loss """nssa""" +26 62 regularizer """no""" +26 62 optimizer """adadelta""" +26 62 training_loop """owa""" +26 62 negative_sampler """basic""" +26 62 evaluator """rankbased""" +26 63 dataset """kinships""" +26 63 model """complex""" +26 63 loss """nssa""" +26 63 regularizer """no""" +26 63 optimizer """adadelta""" +26 63 training_loop """owa""" +26 63 negative_sampler """basic""" +26 63 evaluator """rankbased""" +26 64 dataset """kinships""" +26 64 model """complex""" +26 64 loss """nssa""" +26 64 regularizer """no""" +26 64 optimizer """adadelta""" +26 64 training_loop """owa""" +26 64 negative_sampler """basic""" +26 64 evaluator """rankbased""" +26 65 dataset """kinships""" +26 65 model """complex""" +26 65 loss """nssa""" +26 65 regularizer """no""" +26 65 optimizer """adadelta""" +26 65 training_loop """owa""" +26 65 negative_sampler """basic""" +26 65 evaluator """rankbased""" +26 66 dataset """kinships""" +26 66 model """complex""" +26 66 loss """nssa""" +26 66 regularizer """no""" +26 66 optimizer """adadelta""" +26 66 training_loop """owa""" +26 66 negative_sampler """basic""" +26 66 evaluator """rankbased""" +26 67 dataset """kinships""" +26 67 model """complex""" +26 67 loss """nssa""" +26 67 regularizer """no""" +26 67 optimizer """adadelta""" +26 67 training_loop """owa""" +26 67 negative_sampler """basic""" +26 67 evaluator """rankbased""" +26 68 dataset """kinships""" +26 68 model """complex""" +26 68 loss """nssa""" +26 68 regularizer """no""" +26 68 optimizer """adadelta""" +26 68 training_loop """owa""" +26 68 negative_sampler """basic""" +26 68 evaluator """rankbased""" +26 69 dataset """kinships""" +26 69 model """complex""" +26 69 loss """nssa""" +26 69 regularizer """no""" +26 69 optimizer """adadelta""" +26 69 training_loop """owa""" +26 69 negative_sampler """basic""" +26 69 evaluator """rankbased""" +26 70 dataset """kinships""" +26 70 model """complex""" +26 70 loss """nssa""" +26 70 regularizer """no""" +26 70 optimizer """adadelta""" +26 70 training_loop """owa""" +26 70 negative_sampler """basic""" +26 70 evaluator """rankbased""" +26 71 dataset """kinships""" +26 71 model """complex""" +26 71 loss """nssa""" +26 71 regularizer """no""" +26 71 optimizer """adadelta""" +26 71 training_loop """owa""" +26 71 negative_sampler """basic""" +26 71 evaluator """rankbased""" +26 72 dataset """kinships""" +26 72 model """complex""" +26 72 loss """nssa""" +26 72 regularizer """no""" +26 72 optimizer """adadelta""" +26 72 training_loop """owa""" +26 72 negative_sampler """basic""" +26 72 evaluator """rankbased""" +26 73 dataset """kinships""" +26 73 model """complex""" +26 73 loss """nssa""" +26 73 regularizer """no""" +26 73 optimizer """adadelta""" +26 73 training_loop """owa""" +26 73 negative_sampler """basic""" +26 73 evaluator """rankbased""" +26 74 dataset """kinships""" +26 74 model """complex""" +26 74 loss """nssa""" +26 74 regularizer """no""" +26 74 optimizer """adadelta""" +26 74 training_loop """owa""" +26 74 negative_sampler """basic""" +26 74 evaluator """rankbased""" +26 75 dataset """kinships""" +26 75 model """complex""" +26 75 loss """nssa""" +26 75 regularizer """no""" +26 75 optimizer """adadelta""" +26 75 training_loop """owa""" +26 75 negative_sampler """basic""" +26 75 evaluator """rankbased""" +26 76 dataset """kinships""" +26 76 model """complex""" +26 76 loss """nssa""" +26 76 regularizer """no""" +26 76 optimizer """adadelta""" +26 76 training_loop """owa""" +26 76 negative_sampler """basic""" +26 76 evaluator """rankbased""" +26 77 dataset """kinships""" +26 77 model """complex""" +26 77 loss """nssa""" +26 77 regularizer """no""" +26 77 optimizer """adadelta""" +26 77 training_loop """owa""" +26 77 negative_sampler """basic""" +26 77 evaluator """rankbased""" +26 78 dataset """kinships""" +26 78 model """complex""" +26 78 loss """nssa""" +26 78 regularizer """no""" +26 78 optimizer """adadelta""" +26 78 training_loop """owa""" +26 78 negative_sampler """basic""" +26 78 evaluator """rankbased""" +26 79 dataset """kinships""" +26 79 model """complex""" +26 79 loss """nssa""" +26 79 regularizer """no""" +26 79 optimizer """adadelta""" +26 79 training_loop """owa""" +26 79 negative_sampler """basic""" +26 79 evaluator """rankbased""" +26 80 dataset """kinships""" +26 80 model """complex""" +26 80 loss """nssa""" +26 80 regularizer """no""" +26 80 optimizer """adadelta""" +26 80 training_loop """owa""" +26 80 negative_sampler """basic""" +26 80 evaluator """rankbased""" +26 81 dataset """kinships""" +26 81 model """complex""" +26 81 loss """nssa""" +26 81 regularizer """no""" +26 81 optimizer """adadelta""" +26 81 training_loop """owa""" +26 81 negative_sampler """basic""" +26 81 evaluator """rankbased""" +26 82 dataset """kinships""" +26 82 model """complex""" +26 82 loss """nssa""" +26 82 regularizer """no""" +26 82 optimizer """adadelta""" +26 82 training_loop """owa""" +26 82 negative_sampler """basic""" +26 82 evaluator """rankbased""" +26 83 dataset """kinships""" +26 83 model """complex""" +26 83 loss """nssa""" +26 83 regularizer """no""" +26 83 optimizer """adadelta""" +26 83 training_loop """owa""" +26 83 negative_sampler """basic""" +26 83 evaluator """rankbased""" +26 84 dataset """kinships""" +26 84 model """complex""" +26 84 loss """nssa""" +26 84 regularizer """no""" +26 84 optimizer """adadelta""" +26 84 training_loop """owa""" +26 84 negative_sampler """basic""" +26 84 evaluator """rankbased""" +26 85 dataset """kinships""" +26 85 model """complex""" +26 85 loss """nssa""" +26 85 regularizer """no""" +26 85 optimizer """adadelta""" +26 85 training_loop """owa""" +26 85 negative_sampler """basic""" +26 85 evaluator """rankbased""" +26 86 dataset """kinships""" +26 86 model """complex""" +26 86 loss """nssa""" +26 86 regularizer """no""" +26 86 optimizer """adadelta""" +26 86 training_loop """owa""" +26 86 negative_sampler """basic""" +26 86 evaluator """rankbased""" +26 87 dataset """kinships""" +26 87 model """complex""" +26 87 loss """nssa""" +26 87 regularizer """no""" +26 87 optimizer """adadelta""" +26 87 training_loop """owa""" +26 87 negative_sampler """basic""" +26 87 evaluator """rankbased""" +26 88 dataset """kinships""" +26 88 model """complex""" +26 88 loss """nssa""" +26 88 regularizer """no""" +26 88 optimizer """adadelta""" +26 88 training_loop """owa""" +26 88 negative_sampler """basic""" +26 88 evaluator """rankbased""" +26 89 dataset """kinships""" +26 89 model """complex""" +26 89 loss """nssa""" +26 89 regularizer """no""" +26 89 optimizer """adadelta""" +26 89 training_loop """owa""" +26 89 negative_sampler """basic""" +26 89 evaluator """rankbased""" +26 90 dataset """kinships""" +26 90 model """complex""" +26 90 loss """nssa""" +26 90 regularizer """no""" +26 90 optimizer """adadelta""" +26 90 training_loop """owa""" +26 90 negative_sampler """basic""" +26 90 evaluator """rankbased""" +26 91 dataset """kinships""" +26 91 model """complex""" +26 91 loss """nssa""" +26 91 regularizer """no""" +26 91 optimizer """adadelta""" +26 91 training_loop """owa""" +26 91 negative_sampler """basic""" +26 91 evaluator """rankbased""" +26 92 dataset """kinships""" +26 92 model """complex""" +26 92 loss """nssa""" +26 92 regularizer """no""" +26 92 optimizer """adadelta""" +26 92 training_loop """owa""" +26 92 negative_sampler """basic""" +26 92 evaluator """rankbased""" +26 93 dataset """kinships""" +26 93 model """complex""" +26 93 loss """nssa""" +26 93 regularizer """no""" +26 93 optimizer """adadelta""" +26 93 training_loop """owa""" +26 93 negative_sampler """basic""" +26 93 evaluator """rankbased""" +26 94 dataset """kinships""" +26 94 model """complex""" +26 94 loss """nssa""" +26 94 regularizer """no""" +26 94 optimizer """adadelta""" +26 94 training_loop """owa""" +26 94 negative_sampler """basic""" +26 94 evaluator """rankbased""" +26 95 dataset """kinships""" +26 95 model """complex""" +26 95 loss """nssa""" +26 95 regularizer """no""" +26 95 optimizer """adadelta""" +26 95 training_loop """owa""" +26 95 negative_sampler """basic""" +26 95 evaluator """rankbased""" +26 96 dataset """kinships""" +26 96 model """complex""" +26 96 loss """nssa""" +26 96 regularizer """no""" +26 96 optimizer """adadelta""" +26 96 training_loop """owa""" +26 96 negative_sampler """basic""" +26 96 evaluator """rankbased""" +26 97 dataset """kinships""" +26 97 model """complex""" +26 97 loss """nssa""" +26 97 regularizer """no""" +26 97 optimizer """adadelta""" +26 97 training_loop """owa""" +26 97 negative_sampler """basic""" +26 97 evaluator """rankbased""" +26 98 dataset """kinships""" +26 98 model """complex""" +26 98 loss """nssa""" +26 98 regularizer """no""" +26 98 optimizer """adadelta""" +26 98 training_loop """owa""" +26 98 negative_sampler """basic""" +26 98 evaluator """rankbased""" +26 99 dataset """kinships""" +26 99 model """complex""" +26 99 loss """nssa""" +26 99 regularizer """no""" +26 99 optimizer """adadelta""" +26 99 training_loop """owa""" +26 99 negative_sampler """basic""" +26 99 evaluator """rankbased""" +26 100 dataset """kinships""" +26 100 model """complex""" +26 100 loss """nssa""" +26 100 regularizer """no""" +26 100 optimizer """adadelta""" +26 100 training_loop """owa""" +26 100 negative_sampler """basic""" +26 100 evaluator """rankbased""" +27 1 model.embedding_dim 1.0 +27 1 loss.margin 16.447891262140928 +27 1 loss.adversarial_temperature 0.5820930322358931 +27 1 negative_sampler.num_negs_per_pos 24.0 +27 1 training.batch_size 1.0 +27 2 model.embedding_dim 2.0 +27 2 loss.margin 4.650842434766719 +27 2 loss.adversarial_temperature 0.5643024956502947 +27 2 negative_sampler.num_negs_per_pos 47.0 +27 2 training.batch_size 2.0 +27 3 model.embedding_dim 1.0 +27 3 loss.margin 7.20098898869699 +27 3 loss.adversarial_temperature 0.7280209777701341 +27 3 negative_sampler.num_negs_per_pos 6.0 +27 3 training.batch_size 2.0 +27 4 model.embedding_dim 2.0 +27 4 loss.margin 14.984881401429563 +27 4 loss.adversarial_temperature 0.8210649944719105 +27 4 negative_sampler.num_negs_per_pos 19.0 +27 4 training.batch_size 2.0 +27 5 model.embedding_dim 2.0 +27 5 loss.margin 1.0296971742154006 +27 5 loss.adversarial_temperature 0.3600131114705386 +27 5 negative_sampler.num_negs_per_pos 12.0 +27 5 training.batch_size 0.0 +27 6 model.embedding_dim 1.0 +27 6 loss.margin 15.914981751249067 +27 6 loss.adversarial_temperature 0.7112142571823087 +27 6 negative_sampler.num_negs_per_pos 52.0 +27 6 training.batch_size 0.0 +27 7 model.embedding_dim 1.0 +27 7 loss.margin 20.228377547003813 +27 7 loss.adversarial_temperature 0.873260677269786 +27 7 negative_sampler.num_negs_per_pos 96.0 +27 7 training.batch_size 0.0 +27 8 model.embedding_dim 1.0 +27 8 loss.margin 20.373751892264927 +27 8 loss.adversarial_temperature 0.5593811857911438 +27 8 negative_sampler.num_negs_per_pos 91.0 +27 8 training.batch_size 0.0 +27 9 model.embedding_dim 2.0 +27 9 loss.margin 10.338616108653886 +27 9 loss.adversarial_temperature 0.429767622208885 +27 9 negative_sampler.num_negs_per_pos 75.0 +27 9 training.batch_size 0.0 +27 10 model.embedding_dim 2.0 +27 10 loss.margin 27.005101288254334 +27 10 loss.adversarial_temperature 0.7446577306667064 +27 10 negative_sampler.num_negs_per_pos 21.0 +27 10 training.batch_size 0.0 +27 11 model.embedding_dim 1.0 +27 11 loss.margin 2.4193352435401048 +27 11 loss.adversarial_temperature 0.7038791919033415 +27 11 negative_sampler.num_negs_per_pos 92.0 +27 11 training.batch_size 2.0 +27 12 model.embedding_dim 1.0 +27 12 loss.margin 4.116981315121642 +27 12 loss.adversarial_temperature 0.4836713785509016 +27 12 negative_sampler.num_negs_per_pos 57.0 +27 12 training.batch_size 0.0 +27 13 model.embedding_dim 2.0 +27 13 loss.margin 29.73218834189026 +27 13 loss.adversarial_temperature 0.49599803353403893 +27 13 negative_sampler.num_negs_per_pos 34.0 +27 13 training.batch_size 0.0 +27 14 model.embedding_dim 1.0 +27 14 loss.margin 26.446190706852697 +27 14 loss.adversarial_temperature 0.6185035779290217 +27 14 negative_sampler.num_negs_per_pos 94.0 +27 14 training.batch_size 0.0 +27 15 model.embedding_dim 2.0 +27 15 loss.margin 1.315594827897384 +27 15 loss.adversarial_temperature 0.6666043213099749 +27 15 negative_sampler.num_negs_per_pos 34.0 +27 15 training.batch_size 1.0 +27 16 model.embedding_dim 0.0 +27 16 loss.margin 25.9012338305991 +27 16 loss.adversarial_temperature 0.6652546135408611 +27 16 negative_sampler.num_negs_per_pos 48.0 +27 16 training.batch_size 0.0 +27 17 model.embedding_dim 2.0 +27 17 loss.margin 29.3360232649791 +27 17 loss.adversarial_temperature 0.5673680416485136 +27 17 negative_sampler.num_negs_per_pos 31.0 +27 17 training.batch_size 2.0 +27 18 model.embedding_dim 2.0 +27 18 loss.margin 15.724212778262892 +27 18 loss.adversarial_temperature 0.2920472422757334 +27 18 negative_sampler.num_negs_per_pos 90.0 +27 18 training.batch_size 2.0 +27 19 model.embedding_dim 0.0 +27 19 loss.margin 9.067931993369644 +27 19 loss.adversarial_temperature 0.9284296160522563 +27 19 negative_sampler.num_negs_per_pos 81.0 +27 19 training.batch_size 0.0 +27 20 model.embedding_dim 1.0 +27 20 loss.margin 21.142530186282194 +27 20 loss.adversarial_temperature 0.5613211540357794 +27 20 negative_sampler.num_negs_per_pos 45.0 +27 20 training.batch_size 2.0 +27 21 model.embedding_dim 0.0 +27 21 loss.margin 8.947910120530494 +27 21 loss.adversarial_temperature 0.6624700222022116 +27 21 negative_sampler.num_negs_per_pos 28.0 +27 21 training.batch_size 0.0 +27 22 model.embedding_dim 2.0 +27 22 loss.margin 16.110085242451046 +27 22 loss.adversarial_temperature 0.21158015738339409 +27 22 negative_sampler.num_negs_per_pos 90.0 +27 22 training.batch_size 0.0 +27 23 model.embedding_dim 1.0 +27 23 loss.margin 5.767895171909227 +27 23 loss.adversarial_temperature 0.7892313282154262 +27 23 negative_sampler.num_negs_per_pos 18.0 +27 23 training.batch_size 1.0 +27 24 model.embedding_dim 1.0 +27 24 loss.margin 27.57419200864491 +27 24 loss.adversarial_temperature 0.7126561226717157 +27 24 negative_sampler.num_negs_per_pos 17.0 +27 24 training.batch_size 2.0 +27 25 model.embedding_dim 2.0 +27 25 loss.margin 15.397043763173635 +27 25 loss.adversarial_temperature 0.9872699698396677 +27 25 negative_sampler.num_negs_per_pos 96.0 +27 25 training.batch_size 0.0 +27 26 model.embedding_dim 1.0 +27 26 loss.margin 11.98703121242131 +27 26 loss.adversarial_temperature 0.13290201852180092 +27 26 negative_sampler.num_negs_per_pos 27.0 +27 26 training.batch_size 0.0 +27 27 model.embedding_dim 2.0 +27 27 loss.margin 21.934285644048217 +27 27 loss.adversarial_temperature 0.3241208496507591 +27 27 negative_sampler.num_negs_per_pos 2.0 +27 27 training.batch_size 0.0 +27 28 model.embedding_dim 2.0 +27 28 loss.margin 8.772191570087312 +27 28 loss.adversarial_temperature 0.2010612248447593 +27 28 negative_sampler.num_negs_per_pos 41.0 +27 28 training.batch_size 0.0 +27 29 model.embedding_dim 2.0 +27 29 loss.margin 15.47258404567603 +27 29 loss.adversarial_temperature 0.6594769125027757 +27 29 negative_sampler.num_negs_per_pos 17.0 +27 29 training.batch_size 2.0 +27 30 model.embedding_dim 0.0 +27 30 loss.margin 9.752273407460027 +27 30 loss.adversarial_temperature 0.41631895315232725 +27 30 negative_sampler.num_negs_per_pos 34.0 +27 30 training.batch_size 0.0 +27 31 model.embedding_dim 0.0 +27 31 loss.margin 9.112257597864897 +27 31 loss.adversarial_temperature 0.9977701797377457 +27 31 negative_sampler.num_negs_per_pos 39.0 +27 31 training.batch_size 2.0 +27 32 model.embedding_dim 1.0 +27 32 loss.margin 29.09161211035338 +27 32 loss.adversarial_temperature 0.1314350204971341 +27 32 negative_sampler.num_negs_per_pos 62.0 +27 32 training.batch_size 1.0 +27 33 model.embedding_dim 1.0 +27 33 loss.margin 15.347130089753442 +27 33 loss.adversarial_temperature 0.6859340922176399 +27 33 negative_sampler.num_negs_per_pos 72.0 +27 33 training.batch_size 1.0 +27 34 model.embedding_dim 2.0 +27 34 loss.margin 5.090832887332218 +27 34 loss.adversarial_temperature 0.9321089663632082 +27 34 negative_sampler.num_negs_per_pos 0.0 +27 34 training.batch_size 2.0 +27 35 model.embedding_dim 0.0 +27 35 loss.margin 8.867412968313158 +27 35 loss.adversarial_temperature 0.8871632598245283 +27 35 negative_sampler.num_negs_per_pos 86.0 +27 35 training.batch_size 1.0 +27 36 model.embedding_dim 1.0 +27 36 loss.margin 3.990304023450915 +27 36 loss.adversarial_temperature 0.23951991793704633 +27 36 negative_sampler.num_negs_per_pos 12.0 +27 36 training.batch_size 1.0 +27 37 model.embedding_dim 2.0 +27 37 loss.margin 8.884744807157178 +27 37 loss.adversarial_temperature 0.2724187455946292 +27 37 negative_sampler.num_negs_per_pos 18.0 +27 37 training.batch_size 2.0 +27 38 model.embedding_dim 2.0 +27 38 loss.margin 23.543225851071984 +27 38 loss.adversarial_temperature 0.6891026326815543 +27 38 negative_sampler.num_negs_per_pos 89.0 +27 38 training.batch_size 1.0 +27 39 model.embedding_dim 2.0 +27 39 loss.margin 21.48360624480662 +27 39 loss.adversarial_temperature 0.5048175046614474 +27 39 negative_sampler.num_negs_per_pos 72.0 +27 39 training.batch_size 0.0 +27 40 model.embedding_dim 2.0 +27 40 loss.margin 4.8058406564298615 +27 40 loss.adversarial_temperature 0.20463879252940587 +27 40 negative_sampler.num_negs_per_pos 54.0 +27 40 training.batch_size 1.0 +27 41 model.embedding_dim 1.0 +27 41 loss.margin 19.717028518196464 +27 41 loss.adversarial_temperature 0.7559918447364143 +27 41 negative_sampler.num_negs_per_pos 53.0 +27 41 training.batch_size 0.0 +27 42 model.embedding_dim 2.0 +27 42 loss.margin 1.3407650695363955 +27 42 loss.adversarial_temperature 0.8543977062861817 +27 42 negative_sampler.num_negs_per_pos 2.0 +27 42 training.batch_size 2.0 +27 43 model.embedding_dim 2.0 +27 43 loss.margin 8.634569817784016 +27 43 loss.adversarial_temperature 0.7844198327748738 +27 43 negative_sampler.num_negs_per_pos 29.0 +27 43 training.batch_size 2.0 +27 44 model.embedding_dim 0.0 +27 44 loss.margin 22.820586757029822 +27 44 loss.adversarial_temperature 0.18040645820126142 +27 44 negative_sampler.num_negs_per_pos 59.0 +27 44 training.batch_size 1.0 +27 45 model.embedding_dim 0.0 +27 45 loss.margin 14.854961118148035 +27 45 loss.adversarial_temperature 0.7273978854228298 +27 45 negative_sampler.num_negs_per_pos 82.0 +27 45 training.batch_size 2.0 +27 46 model.embedding_dim 0.0 +27 46 loss.margin 8.041484760789091 +27 46 loss.adversarial_temperature 0.637804471192575 +27 46 negative_sampler.num_negs_per_pos 76.0 +27 46 training.batch_size 1.0 +27 47 model.embedding_dim 2.0 +27 47 loss.margin 5.448420239938186 +27 47 loss.adversarial_temperature 0.5227056987964523 +27 47 negative_sampler.num_negs_per_pos 48.0 +27 47 training.batch_size 2.0 +27 48 model.embedding_dim 1.0 +27 48 loss.margin 28.140962222192158 +27 48 loss.adversarial_temperature 0.33857489808568164 +27 48 negative_sampler.num_negs_per_pos 14.0 +27 48 training.batch_size 2.0 +27 49 model.embedding_dim 1.0 +27 49 loss.margin 3.172422845229067 +27 49 loss.adversarial_temperature 0.11680680134321487 +27 49 negative_sampler.num_negs_per_pos 36.0 +27 49 training.batch_size 0.0 +27 50 model.embedding_dim 0.0 +27 50 loss.margin 15.986150280712002 +27 50 loss.adversarial_temperature 0.1608577192197133 +27 50 negative_sampler.num_negs_per_pos 4.0 +27 50 training.batch_size 0.0 +27 51 model.embedding_dim 0.0 +27 51 loss.margin 8.618620176757952 +27 51 loss.adversarial_temperature 0.8051112806624561 +27 51 negative_sampler.num_negs_per_pos 53.0 +27 51 training.batch_size 1.0 +27 52 model.embedding_dim 0.0 +27 52 loss.margin 18.682146506801747 +27 52 loss.adversarial_temperature 0.7509920125617574 +27 52 negative_sampler.num_negs_per_pos 86.0 +27 52 training.batch_size 1.0 +27 53 model.embedding_dim 2.0 +27 53 loss.margin 9.4835655643326 +27 53 loss.adversarial_temperature 0.6713132153870344 +27 53 negative_sampler.num_negs_per_pos 8.0 +27 53 training.batch_size 1.0 +27 54 model.embedding_dim 2.0 +27 54 loss.margin 12.265075159703224 +27 54 loss.adversarial_temperature 0.47600055752024906 +27 54 negative_sampler.num_negs_per_pos 97.0 +27 54 training.batch_size 0.0 +27 55 model.embedding_dim 0.0 +27 55 loss.margin 22.29544230864096 +27 55 loss.adversarial_temperature 0.20672992577285915 +27 55 negative_sampler.num_negs_per_pos 64.0 +27 55 training.batch_size 1.0 +27 56 model.embedding_dim 0.0 +27 56 loss.margin 1.4304688871677107 +27 56 loss.adversarial_temperature 0.9855945938568226 +27 56 negative_sampler.num_negs_per_pos 95.0 +27 56 training.batch_size 0.0 +27 57 model.embedding_dim 1.0 +27 57 loss.margin 14.94676773091891 +27 57 loss.adversarial_temperature 0.8190212613721621 +27 57 negative_sampler.num_negs_per_pos 59.0 +27 57 training.batch_size 1.0 +27 58 model.embedding_dim 1.0 +27 58 loss.margin 2.900255441848067 +27 58 loss.adversarial_temperature 0.7290788882295475 +27 58 negative_sampler.num_negs_per_pos 39.0 +27 58 training.batch_size 1.0 +27 59 model.embedding_dim 2.0 +27 59 loss.margin 24.97733657906302 +27 59 loss.adversarial_temperature 0.5288277826378895 +27 59 negative_sampler.num_negs_per_pos 40.0 +27 59 training.batch_size 2.0 +27 60 model.embedding_dim 1.0 +27 60 loss.margin 26.272156634181776 +27 60 loss.adversarial_temperature 0.6983123289233119 +27 60 negative_sampler.num_negs_per_pos 28.0 +27 60 training.batch_size 1.0 +27 61 model.embedding_dim 1.0 +27 61 loss.margin 23.089893942347324 +27 61 loss.adversarial_temperature 0.876485668212151 +27 61 negative_sampler.num_negs_per_pos 96.0 +27 61 training.batch_size 2.0 +27 62 model.embedding_dim 2.0 +27 62 loss.margin 16.354341165182095 +27 62 loss.adversarial_temperature 0.5907061693163835 +27 62 negative_sampler.num_negs_per_pos 85.0 +27 62 training.batch_size 1.0 +27 63 model.embedding_dim 1.0 +27 63 loss.margin 24.06969157383829 +27 63 loss.adversarial_temperature 0.4573737211578953 +27 63 negative_sampler.num_negs_per_pos 4.0 +27 63 training.batch_size 2.0 +27 64 model.embedding_dim 1.0 +27 64 loss.margin 29.990376509932467 +27 64 loss.adversarial_temperature 0.2918309144330857 +27 64 negative_sampler.num_negs_per_pos 60.0 +27 64 training.batch_size 0.0 +27 65 model.embedding_dim 1.0 +27 65 loss.margin 29.540985829458027 +27 65 loss.adversarial_temperature 0.7590141472812894 +27 65 negative_sampler.num_negs_per_pos 70.0 +27 65 training.batch_size 2.0 +27 66 model.embedding_dim 1.0 +27 66 loss.margin 7.858416139506491 +27 66 loss.adversarial_temperature 0.17577954728940834 +27 66 negative_sampler.num_negs_per_pos 50.0 +27 66 training.batch_size 0.0 +27 67 model.embedding_dim 0.0 +27 67 loss.margin 16.224487959992032 +27 67 loss.adversarial_temperature 0.7926071283408476 +27 67 negative_sampler.num_negs_per_pos 41.0 +27 67 training.batch_size 2.0 +27 68 model.embedding_dim 0.0 +27 68 loss.margin 14.013826044870742 +27 68 loss.adversarial_temperature 0.4720005126763529 +27 68 negative_sampler.num_negs_per_pos 94.0 +27 68 training.batch_size 1.0 +27 69 model.embedding_dim 1.0 +27 69 loss.margin 29.62139020862532 +27 69 loss.adversarial_temperature 0.23501683209305319 +27 69 negative_sampler.num_negs_per_pos 92.0 +27 69 training.batch_size 1.0 +27 70 model.embedding_dim 0.0 +27 70 loss.margin 4.954092102437355 +27 70 loss.adversarial_temperature 0.4500347167404226 +27 70 negative_sampler.num_negs_per_pos 27.0 +27 70 training.batch_size 0.0 +27 71 model.embedding_dim 0.0 +27 71 loss.margin 20.967457860337543 +27 71 loss.adversarial_temperature 0.5150243314768099 +27 71 negative_sampler.num_negs_per_pos 81.0 +27 71 training.batch_size 1.0 +27 72 model.embedding_dim 1.0 +27 72 loss.margin 22.608700649654622 +27 72 loss.adversarial_temperature 0.7039250445128497 +27 72 negative_sampler.num_negs_per_pos 83.0 +27 72 training.batch_size 2.0 +27 73 model.embedding_dim 2.0 +27 73 loss.margin 23.632047352363877 +27 73 loss.adversarial_temperature 0.9181034934118504 +27 73 negative_sampler.num_negs_per_pos 41.0 +27 73 training.batch_size 1.0 +27 74 model.embedding_dim 0.0 +27 74 loss.margin 27.985355949553597 +27 74 loss.adversarial_temperature 0.39961083552653354 +27 74 negative_sampler.num_negs_per_pos 57.0 +27 74 training.batch_size 0.0 +27 75 model.embedding_dim 0.0 +27 75 loss.margin 28.050415883208473 +27 75 loss.adversarial_temperature 0.4309846865351875 +27 75 negative_sampler.num_negs_per_pos 31.0 +27 75 training.batch_size 1.0 +27 76 model.embedding_dim 2.0 +27 76 loss.margin 8.019122493359959 +27 76 loss.adversarial_temperature 0.5916240421918054 +27 76 negative_sampler.num_negs_per_pos 4.0 +27 76 training.batch_size 1.0 +27 77 model.embedding_dim 2.0 +27 77 loss.margin 2.796717634178314 +27 77 loss.adversarial_temperature 0.24619278912014794 +27 77 negative_sampler.num_negs_per_pos 57.0 +27 77 training.batch_size 0.0 +27 78 model.embedding_dim 0.0 +27 78 loss.margin 10.65409361001361 +27 78 loss.adversarial_temperature 0.34302380845186614 +27 78 negative_sampler.num_negs_per_pos 47.0 +27 78 training.batch_size 2.0 +27 79 model.embedding_dim 0.0 +27 79 loss.margin 22.80510074501483 +27 79 loss.adversarial_temperature 0.18684609513381994 +27 79 negative_sampler.num_negs_per_pos 38.0 +27 79 training.batch_size 2.0 +27 80 model.embedding_dim 0.0 +27 80 loss.margin 4.96038999081453 +27 80 loss.adversarial_temperature 0.5857348414335815 +27 80 negative_sampler.num_negs_per_pos 98.0 +27 80 training.batch_size 0.0 +27 81 model.embedding_dim 0.0 +27 81 loss.margin 7.413395429334265 +27 81 loss.adversarial_temperature 0.45897683390426136 +27 81 negative_sampler.num_negs_per_pos 56.0 +27 81 training.batch_size 1.0 +27 82 model.embedding_dim 2.0 +27 82 loss.margin 27.6828211779095 +27 82 loss.adversarial_temperature 0.5755108968738517 +27 82 negative_sampler.num_negs_per_pos 21.0 +27 82 training.batch_size 2.0 +27 83 model.embedding_dim 1.0 +27 83 loss.margin 12.252059730011437 +27 83 loss.adversarial_temperature 0.8961610656253862 +27 83 negative_sampler.num_negs_per_pos 89.0 +27 83 training.batch_size 1.0 +27 84 model.embedding_dim 1.0 +27 84 loss.margin 6.322985426190798 +27 84 loss.adversarial_temperature 0.5347839785086002 +27 84 negative_sampler.num_negs_per_pos 57.0 +27 84 training.batch_size 2.0 +27 85 model.embedding_dim 2.0 +27 85 loss.margin 11.45621046561373 +27 85 loss.adversarial_temperature 0.9412389062231283 +27 85 negative_sampler.num_negs_per_pos 71.0 +27 85 training.batch_size 0.0 +27 86 model.embedding_dim 1.0 +27 86 loss.margin 18.450190271229417 +27 86 loss.adversarial_temperature 0.19250833116304622 +27 86 negative_sampler.num_negs_per_pos 2.0 +27 86 training.batch_size 2.0 +27 87 model.embedding_dim 2.0 +27 87 loss.margin 15.845729240704769 +27 87 loss.adversarial_temperature 0.8148806027990794 +27 87 negative_sampler.num_negs_per_pos 37.0 +27 87 training.batch_size 1.0 +27 88 model.embedding_dim 2.0 +27 88 loss.margin 22.68037537357299 +27 88 loss.adversarial_temperature 0.531495576389466 +27 88 negative_sampler.num_negs_per_pos 68.0 +27 88 training.batch_size 0.0 +27 89 model.embedding_dim 2.0 +27 89 loss.margin 11.300475497975755 +27 89 loss.adversarial_temperature 0.8561451291879084 +27 89 negative_sampler.num_negs_per_pos 6.0 +27 89 training.batch_size 1.0 +27 90 model.embedding_dim 1.0 +27 90 loss.margin 20.349561850283443 +27 90 loss.adversarial_temperature 0.7901420080919661 +27 90 negative_sampler.num_negs_per_pos 69.0 +27 90 training.batch_size 1.0 +27 91 model.embedding_dim 0.0 +27 91 loss.margin 5.102731070189598 +27 91 loss.adversarial_temperature 0.29374047124618874 +27 91 negative_sampler.num_negs_per_pos 48.0 +27 91 training.batch_size 0.0 +27 92 model.embedding_dim 2.0 +27 92 loss.margin 12.902827593324275 +27 92 loss.adversarial_temperature 0.6459193145639288 +27 92 negative_sampler.num_negs_per_pos 33.0 +27 92 training.batch_size 0.0 +27 93 model.embedding_dim 1.0 +27 93 loss.margin 9.934063820010223 +27 93 loss.adversarial_temperature 0.7379742916536969 +27 93 negative_sampler.num_negs_per_pos 65.0 +27 93 training.batch_size 2.0 +27 94 model.embedding_dim 2.0 +27 94 loss.margin 27.841152170069115 +27 94 loss.adversarial_temperature 0.8704329808625484 +27 94 negative_sampler.num_negs_per_pos 2.0 +27 94 training.batch_size 0.0 +27 95 model.embedding_dim 0.0 +27 95 loss.margin 21.065125158756207 +27 95 loss.adversarial_temperature 0.9632125240947755 +27 95 negative_sampler.num_negs_per_pos 39.0 +27 95 training.batch_size 1.0 +27 96 model.embedding_dim 1.0 +27 96 loss.margin 11.28744944987695 +27 96 loss.adversarial_temperature 0.448811873230736 +27 96 negative_sampler.num_negs_per_pos 73.0 +27 96 training.batch_size 0.0 +27 97 model.embedding_dim 2.0 +27 97 loss.margin 23.20963532808746 +27 97 loss.adversarial_temperature 0.32120761937361675 +27 97 negative_sampler.num_negs_per_pos 47.0 +27 97 training.batch_size 0.0 +27 98 model.embedding_dim 2.0 +27 98 loss.margin 24.737762434399517 +27 98 loss.adversarial_temperature 0.1429563511385548 +27 98 negative_sampler.num_negs_per_pos 4.0 +27 98 training.batch_size 0.0 +27 99 model.embedding_dim 2.0 +27 99 loss.margin 14.332116049616335 +27 99 loss.adversarial_temperature 0.10163794292436942 +27 99 negative_sampler.num_negs_per_pos 91.0 +27 99 training.batch_size 0.0 +27 100 model.embedding_dim 1.0 +27 100 loss.margin 6.4387003415190485 +27 100 loss.adversarial_temperature 0.37199138263565323 +27 100 negative_sampler.num_negs_per_pos 86.0 +27 100 training.batch_size 0.0 +27 1 dataset """kinships""" +27 1 model """complex""" +27 1 loss """nssa""" +27 1 regularizer """no""" +27 1 optimizer """adadelta""" +27 1 training_loop """owa""" +27 1 negative_sampler """basic""" +27 1 evaluator """rankbased""" +27 2 dataset """kinships""" +27 2 model """complex""" +27 2 loss """nssa""" +27 2 regularizer """no""" +27 2 optimizer """adadelta""" +27 2 training_loop """owa""" +27 2 negative_sampler """basic""" +27 2 evaluator """rankbased""" +27 3 dataset """kinships""" +27 3 model """complex""" +27 3 loss """nssa""" +27 3 regularizer """no""" +27 3 optimizer """adadelta""" +27 3 training_loop """owa""" +27 3 negative_sampler """basic""" +27 3 evaluator """rankbased""" +27 4 dataset """kinships""" +27 4 model """complex""" +27 4 loss """nssa""" +27 4 regularizer """no""" +27 4 optimizer """adadelta""" +27 4 training_loop """owa""" +27 4 negative_sampler """basic""" +27 4 evaluator """rankbased""" +27 5 dataset """kinships""" +27 5 model """complex""" +27 5 loss """nssa""" +27 5 regularizer """no""" +27 5 optimizer """adadelta""" +27 5 training_loop """owa""" +27 5 negative_sampler """basic""" +27 5 evaluator """rankbased""" +27 6 dataset """kinships""" +27 6 model """complex""" +27 6 loss """nssa""" +27 6 regularizer """no""" +27 6 optimizer """adadelta""" +27 6 training_loop """owa""" +27 6 negative_sampler """basic""" +27 6 evaluator """rankbased""" +27 7 dataset """kinships""" +27 7 model """complex""" +27 7 loss """nssa""" +27 7 regularizer """no""" +27 7 optimizer """adadelta""" +27 7 training_loop """owa""" +27 7 negative_sampler """basic""" +27 7 evaluator """rankbased""" +27 8 dataset """kinships""" +27 8 model """complex""" +27 8 loss """nssa""" +27 8 regularizer """no""" +27 8 optimizer """adadelta""" +27 8 training_loop """owa""" +27 8 negative_sampler """basic""" +27 8 evaluator """rankbased""" +27 9 dataset """kinships""" +27 9 model """complex""" +27 9 loss """nssa""" +27 9 regularizer """no""" +27 9 optimizer """adadelta""" +27 9 training_loop """owa""" +27 9 negative_sampler """basic""" +27 9 evaluator """rankbased""" +27 10 dataset """kinships""" +27 10 model """complex""" +27 10 loss """nssa""" +27 10 regularizer """no""" +27 10 optimizer """adadelta""" +27 10 training_loop """owa""" +27 10 negative_sampler """basic""" +27 10 evaluator """rankbased""" +27 11 dataset """kinships""" +27 11 model """complex""" +27 11 loss """nssa""" +27 11 regularizer """no""" +27 11 optimizer """adadelta""" +27 11 training_loop """owa""" +27 11 negative_sampler """basic""" +27 11 evaluator """rankbased""" +27 12 dataset """kinships""" +27 12 model """complex""" +27 12 loss """nssa""" +27 12 regularizer """no""" +27 12 optimizer """adadelta""" +27 12 training_loop """owa""" +27 12 negative_sampler """basic""" +27 12 evaluator """rankbased""" +27 13 dataset """kinships""" +27 13 model """complex""" +27 13 loss """nssa""" +27 13 regularizer """no""" +27 13 optimizer """adadelta""" +27 13 training_loop """owa""" +27 13 negative_sampler """basic""" +27 13 evaluator """rankbased""" +27 14 dataset """kinships""" +27 14 model """complex""" +27 14 loss """nssa""" +27 14 regularizer """no""" +27 14 optimizer """adadelta""" +27 14 training_loop """owa""" +27 14 negative_sampler """basic""" +27 14 evaluator """rankbased""" +27 15 dataset """kinships""" +27 15 model """complex""" +27 15 loss """nssa""" +27 15 regularizer """no""" +27 15 optimizer """adadelta""" +27 15 training_loop """owa""" +27 15 negative_sampler """basic""" +27 15 evaluator """rankbased""" +27 16 dataset """kinships""" +27 16 model """complex""" +27 16 loss """nssa""" +27 16 regularizer """no""" +27 16 optimizer """adadelta""" +27 16 training_loop """owa""" +27 16 negative_sampler """basic""" +27 16 evaluator """rankbased""" +27 17 dataset """kinships""" +27 17 model """complex""" +27 17 loss """nssa""" +27 17 regularizer """no""" +27 17 optimizer """adadelta""" +27 17 training_loop """owa""" +27 17 negative_sampler """basic""" +27 17 evaluator """rankbased""" +27 18 dataset """kinships""" +27 18 model """complex""" +27 18 loss """nssa""" +27 18 regularizer """no""" +27 18 optimizer """adadelta""" +27 18 training_loop """owa""" +27 18 negative_sampler """basic""" +27 18 evaluator """rankbased""" +27 19 dataset """kinships""" +27 19 model """complex""" +27 19 loss """nssa""" +27 19 regularizer """no""" +27 19 optimizer """adadelta""" +27 19 training_loop """owa""" +27 19 negative_sampler """basic""" +27 19 evaluator """rankbased""" +27 20 dataset """kinships""" +27 20 model """complex""" +27 20 loss """nssa""" +27 20 regularizer """no""" +27 20 optimizer """adadelta""" +27 20 training_loop """owa""" +27 20 negative_sampler """basic""" +27 20 evaluator """rankbased""" +27 21 dataset """kinships""" +27 21 model """complex""" +27 21 loss """nssa""" +27 21 regularizer """no""" +27 21 optimizer """adadelta""" +27 21 training_loop """owa""" +27 21 negative_sampler """basic""" +27 21 evaluator """rankbased""" +27 22 dataset """kinships""" +27 22 model """complex""" +27 22 loss """nssa""" +27 22 regularizer """no""" +27 22 optimizer """adadelta""" +27 22 training_loop """owa""" +27 22 negative_sampler """basic""" +27 22 evaluator """rankbased""" +27 23 dataset """kinships""" +27 23 model """complex""" +27 23 loss """nssa""" +27 23 regularizer """no""" +27 23 optimizer """adadelta""" +27 23 training_loop """owa""" +27 23 negative_sampler """basic""" +27 23 evaluator """rankbased""" +27 24 dataset """kinships""" +27 24 model """complex""" +27 24 loss """nssa""" +27 24 regularizer """no""" +27 24 optimizer """adadelta""" +27 24 training_loop """owa""" +27 24 negative_sampler """basic""" +27 24 evaluator """rankbased""" +27 25 dataset """kinships""" +27 25 model """complex""" +27 25 loss """nssa""" +27 25 regularizer """no""" +27 25 optimizer """adadelta""" +27 25 training_loop """owa""" +27 25 negative_sampler """basic""" +27 25 evaluator """rankbased""" +27 26 dataset """kinships""" +27 26 model """complex""" +27 26 loss """nssa""" +27 26 regularizer """no""" +27 26 optimizer """adadelta""" +27 26 training_loop """owa""" +27 26 negative_sampler """basic""" +27 26 evaluator """rankbased""" +27 27 dataset """kinships""" +27 27 model """complex""" +27 27 loss """nssa""" +27 27 regularizer """no""" +27 27 optimizer """adadelta""" +27 27 training_loop """owa""" +27 27 negative_sampler """basic""" +27 27 evaluator """rankbased""" +27 28 dataset """kinships""" +27 28 model """complex""" +27 28 loss """nssa""" +27 28 regularizer """no""" +27 28 optimizer """adadelta""" +27 28 training_loop """owa""" +27 28 negative_sampler """basic""" +27 28 evaluator """rankbased""" +27 29 dataset """kinships""" +27 29 model """complex""" +27 29 loss """nssa""" +27 29 regularizer """no""" +27 29 optimizer """adadelta""" +27 29 training_loop """owa""" +27 29 negative_sampler """basic""" +27 29 evaluator """rankbased""" +27 30 dataset """kinships""" +27 30 model """complex""" +27 30 loss """nssa""" +27 30 regularizer """no""" +27 30 optimizer """adadelta""" +27 30 training_loop """owa""" +27 30 negative_sampler """basic""" +27 30 evaluator """rankbased""" +27 31 dataset """kinships""" +27 31 model """complex""" +27 31 loss """nssa""" +27 31 regularizer """no""" +27 31 optimizer """adadelta""" +27 31 training_loop """owa""" +27 31 negative_sampler """basic""" +27 31 evaluator """rankbased""" +27 32 dataset """kinships""" +27 32 model """complex""" +27 32 loss """nssa""" +27 32 regularizer """no""" +27 32 optimizer """adadelta""" +27 32 training_loop """owa""" +27 32 negative_sampler """basic""" +27 32 evaluator """rankbased""" +27 33 dataset """kinships""" +27 33 model """complex""" +27 33 loss """nssa""" +27 33 regularizer """no""" +27 33 optimizer """adadelta""" +27 33 training_loop """owa""" +27 33 negative_sampler """basic""" +27 33 evaluator """rankbased""" +27 34 dataset """kinships""" +27 34 model """complex""" +27 34 loss """nssa""" +27 34 regularizer """no""" +27 34 optimizer """adadelta""" +27 34 training_loop """owa""" +27 34 negative_sampler """basic""" +27 34 evaluator """rankbased""" +27 35 dataset """kinships""" +27 35 model """complex""" +27 35 loss """nssa""" +27 35 regularizer """no""" +27 35 optimizer """adadelta""" +27 35 training_loop """owa""" +27 35 negative_sampler """basic""" +27 35 evaluator """rankbased""" +27 36 dataset """kinships""" +27 36 model """complex""" +27 36 loss """nssa""" +27 36 regularizer """no""" +27 36 optimizer """adadelta""" +27 36 training_loop """owa""" +27 36 negative_sampler """basic""" +27 36 evaluator """rankbased""" +27 37 dataset """kinships""" +27 37 model """complex""" +27 37 loss """nssa""" +27 37 regularizer """no""" +27 37 optimizer """adadelta""" +27 37 training_loop """owa""" +27 37 negative_sampler """basic""" +27 37 evaluator """rankbased""" +27 38 dataset """kinships""" +27 38 model """complex""" +27 38 loss """nssa""" +27 38 regularizer """no""" +27 38 optimizer """adadelta""" +27 38 training_loop """owa""" +27 38 negative_sampler """basic""" +27 38 evaluator """rankbased""" +27 39 dataset """kinships""" +27 39 model """complex""" +27 39 loss """nssa""" +27 39 regularizer """no""" +27 39 optimizer """adadelta""" +27 39 training_loop """owa""" +27 39 negative_sampler """basic""" +27 39 evaluator """rankbased""" +27 40 dataset """kinships""" +27 40 model """complex""" +27 40 loss """nssa""" +27 40 regularizer """no""" +27 40 optimizer """adadelta""" +27 40 training_loop """owa""" +27 40 negative_sampler """basic""" +27 40 evaluator """rankbased""" +27 41 dataset """kinships""" +27 41 model """complex""" +27 41 loss """nssa""" +27 41 regularizer """no""" +27 41 optimizer """adadelta""" +27 41 training_loop """owa""" +27 41 negative_sampler """basic""" +27 41 evaluator """rankbased""" +27 42 dataset """kinships""" +27 42 model """complex""" +27 42 loss """nssa""" +27 42 regularizer """no""" +27 42 optimizer """adadelta""" +27 42 training_loop """owa""" +27 42 negative_sampler """basic""" +27 42 evaluator """rankbased""" +27 43 dataset """kinships""" +27 43 model """complex""" +27 43 loss """nssa""" +27 43 regularizer """no""" +27 43 optimizer """adadelta""" +27 43 training_loop """owa""" +27 43 negative_sampler """basic""" +27 43 evaluator """rankbased""" +27 44 dataset """kinships""" +27 44 model """complex""" +27 44 loss """nssa""" +27 44 regularizer """no""" +27 44 optimizer """adadelta""" +27 44 training_loop """owa""" +27 44 negative_sampler """basic""" +27 44 evaluator """rankbased""" +27 45 dataset """kinships""" +27 45 model """complex""" +27 45 loss """nssa""" +27 45 regularizer """no""" +27 45 optimizer """adadelta""" +27 45 training_loop """owa""" +27 45 negative_sampler """basic""" +27 45 evaluator """rankbased""" +27 46 dataset """kinships""" +27 46 model """complex""" +27 46 loss """nssa""" +27 46 regularizer """no""" +27 46 optimizer """adadelta""" +27 46 training_loop """owa""" +27 46 negative_sampler """basic""" +27 46 evaluator """rankbased""" +27 47 dataset """kinships""" +27 47 model """complex""" +27 47 loss """nssa""" +27 47 regularizer """no""" +27 47 optimizer """adadelta""" +27 47 training_loop """owa""" +27 47 negative_sampler """basic""" +27 47 evaluator """rankbased""" +27 48 dataset """kinships""" +27 48 model """complex""" +27 48 loss """nssa""" +27 48 regularizer """no""" +27 48 optimizer """adadelta""" +27 48 training_loop """owa""" +27 48 negative_sampler """basic""" +27 48 evaluator """rankbased""" +27 49 dataset """kinships""" +27 49 model """complex""" +27 49 loss """nssa""" +27 49 regularizer """no""" +27 49 optimizer """adadelta""" +27 49 training_loop """owa""" +27 49 negative_sampler """basic""" +27 49 evaluator """rankbased""" +27 50 dataset """kinships""" +27 50 model """complex""" +27 50 loss """nssa""" +27 50 regularizer """no""" +27 50 optimizer """adadelta""" +27 50 training_loop """owa""" +27 50 negative_sampler """basic""" +27 50 evaluator """rankbased""" +27 51 dataset """kinships""" +27 51 model """complex""" +27 51 loss """nssa""" +27 51 regularizer """no""" +27 51 optimizer """adadelta""" +27 51 training_loop """owa""" +27 51 negative_sampler """basic""" +27 51 evaluator """rankbased""" +27 52 dataset """kinships""" +27 52 model """complex""" +27 52 loss """nssa""" +27 52 regularizer """no""" +27 52 optimizer """adadelta""" +27 52 training_loop """owa""" +27 52 negative_sampler """basic""" +27 52 evaluator """rankbased""" +27 53 dataset """kinships""" +27 53 model """complex""" +27 53 loss """nssa""" +27 53 regularizer """no""" +27 53 optimizer """adadelta""" +27 53 training_loop """owa""" +27 53 negative_sampler """basic""" +27 53 evaluator """rankbased""" +27 54 dataset """kinships""" +27 54 model """complex""" +27 54 loss """nssa""" +27 54 regularizer """no""" +27 54 optimizer """adadelta""" +27 54 training_loop """owa""" +27 54 negative_sampler """basic""" +27 54 evaluator """rankbased""" +27 55 dataset """kinships""" +27 55 model """complex""" +27 55 loss """nssa""" +27 55 regularizer """no""" +27 55 optimizer """adadelta""" +27 55 training_loop """owa""" +27 55 negative_sampler """basic""" +27 55 evaluator """rankbased""" +27 56 dataset """kinships""" +27 56 model """complex""" +27 56 loss """nssa""" +27 56 regularizer """no""" +27 56 optimizer """adadelta""" +27 56 training_loop """owa""" +27 56 negative_sampler """basic""" +27 56 evaluator """rankbased""" +27 57 dataset """kinships""" +27 57 model """complex""" +27 57 loss """nssa""" +27 57 regularizer """no""" +27 57 optimizer """adadelta""" +27 57 training_loop """owa""" +27 57 negative_sampler """basic""" +27 57 evaluator """rankbased""" +27 58 dataset """kinships""" +27 58 model """complex""" +27 58 loss """nssa""" +27 58 regularizer """no""" +27 58 optimizer """adadelta""" +27 58 training_loop """owa""" +27 58 negative_sampler """basic""" +27 58 evaluator """rankbased""" +27 59 dataset """kinships""" +27 59 model """complex""" +27 59 loss """nssa""" +27 59 regularizer """no""" +27 59 optimizer """adadelta""" +27 59 training_loop """owa""" +27 59 negative_sampler """basic""" +27 59 evaluator """rankbased""" +27 60 dataset """kinships""" +27 60 model """complex""" +27 60 loss """nssa""" +27 60 regularizer """no""" +27 60 optimizer """adadelta""" +27 60 training_loop """owa""" +27 60 negative_sampler """basic""" +27 60 evaluator """rankbased""" +27 61 dataset """kinships""" +27 61 model """complex""" +27 61 loss """nssa""" +27 61 regularizer """no""" +27 61 optimizer """adadelta""" +27 61 training_loop """owa""" +27 61 negative_sampler """basic""" +27 61 evaluator """rankbased""" +27 62 dataset """kinships""" +27 62 model """complex""" +27 62 loss """nssa""" +27 62 regularizer """no""" +27 62 optimizer """adadelta""" +27 62 training_loop """owa""" +27 62 negative_sampler """basic""" +27 62 evaluator """rankbased""" +27 63 dataset """kinships""" +27 63 model """complex""" +27 63 loss """nssa""" +27 63 regularizer """no""" +27 63 optimizer """adadelta""" +27 63 training_loop """owa""" +27 63 negative_sampler """basic""" +27 63 evaluator """rankbased""" +27 64 dataset """kinships""" +27 64 model """complex""" +27 64 loss """nssa""" +27 64 regularizer """no""" +27 64 optimizer """adadelta""" +27 64 training_loop """owa""" +27 64 negative_sampler """basic""" +27 64 evaluator """rankbased""" +27 65 dataset """kinships""" +27 65 model """complex""" +27 65 loss """nssa""" +27 65 regularizer """no""" +27 65 optimizer """adadelta""" +27 65 training_loop """owa""" +27 65 negative_sampler """basic""" +27 65 evaluator """rankbased""" +27 66 dataset """kinships""" +27 66 model """complex""" +27 66 loss """nssa""" +27 66 regularizer """no""" +27 66 optimizer """adadelta""" +27 66 training_loop """owa""" +27 66 negative_sampler """basic""" +27 66 evaluator """rankbased""" +27 67 dataset """kinships""" +27 67 model """complex""" +27 67 loss """nssa""" +27 67 regularizer """no""" +27 67 optimizer """adadelta""" +27 67 training_loop """owa""" +27 67 negative_sampler """basic""" +27 67 evaluator """rankbased""" +27 68 dataset """kinships""" +27 68 model """complex""" +27 68 loss """nssa""" +27 68 regularizer """no""" +27 68 optimizer """adadelta""" +27 68 training_loop """owa""" +27 68 negative_sampler """basic""" +27 68 evaluator """rankbased""" +27 69 dataset """kinships""" +27 69 model """complex""" +27 69 loss """nssa""" +27 69 regularizer """no""" +27 69 optimizer """adadelta""" +27 69 training_loop """owa""" +27 69 negative_sampler """basic""" +27 69 evaluator """rankbased""" +27 70 dataset """kinships""" +27 70 model """complex""" +27 70 loss """nssa""" +27 70 regularizer """no""" +27 70 optimizer """adadelta""" +27 70 training_loop """owa""" +27 70 negative_sampler """basic""" +27 70 evaluator """rankbased""" +27 71 dataset """kinships""" +27 71 model """complex""" +27 71 loss """nssa""" +27 71 regularizer """no""" +27 71 optimizer """adadelta""" +27 71 training_loop """owa""" +27 71 negative_sampler """basic""" +27 71 evaluator """rankbased""" +27 72 dataset """kinships""" +27 72 model """complex""" +27 72 loss """nssa""" +27 72 regularizer """no""" +27 72 optimizer """adadelta""" +27 72 training_loop """owa""" +27 72 negative_sampler """basic""" +27 72 evaluator """rankbased""" +27 73 dataset """kinships""" +27 73 model """complex""" +27 73 loss """nssa""" +27 73 regularizer """no""" +27 73 optimizer """adadelta""" +27 73 training_loop """owa""" +27 73 negative_sampler """basic""" +27 73 evaluator """rankbased""" +27 74 dataset """kinships""" +27 74 model """complex""" +27 74 loss """nssa""" +27 74 regularizer """no""" +27 74 optimizer """adadelta""" +27 74 training_loop """owa""" +27 74 negative_sampler """basic""" +27 74 evaluator """rankbased""" +27 75 dataset """kinships""" +27 75 model """complex""" +27 75 loss """nssa""" +27 75 regularizer """no""" +27 75 optimizer """adadelta""" +27 75 training_loop """owa""" +27 75 negative_sampler """basic""" +27 75 evaluator """rankbased""" +27 76 dataset """kinships""" +27 76 model """complex""" +27 76 loss """nssa""" +27 76 regularizer """no""" +27 76 optimizer """adadelta""" +27 76 training_loop """owa""" +27 76 negative_sampler """basic""" +27 76 evaluator """rankbased""" +27 77 dataset """kinships""" +27 77 model """complex""" +27 77 loss """nssa""" +27 77 regularizer """no""" +27 77 optimizer """adadelta""" +27 77 training_loop """owa""" +27 77 negative_sampler """basic""" +27 77 evaluator """rankbased""" +27 78 dataset """kinships""" +27 78 model """complex""" +27 78 loss """nssa""" +27 78 regularizer """no""" +27 78 optimizer """adadelta""" +27 78 training_loop """owa""" +27 78 negative_sampler """basic""" +27 78 evaluator """rankbased""" +27 79 dataset """kinships""" +27 79 model """complex""" +27 79 loss """nssa""" +27 79 regularizer """no""" +27 79 optimizer """adadelta""" +27 79 training_loop """owa""" +27 79 negative_sampler """basic""" +27 79 evaluator """rankbased""" +27 80 dataset """kinships""" +27 80 model """complex""" +27 80 loss """nssa""" +27 80 regularizer """no""" +27 80 optimizer """adadelta""" +27 80 training_loop """owa""" +27 80 negative_sampler """basic""" +27 80 evaluator """rankbased""" +27 81 dataset """kinships""" +27 81 model """complex""" +27 81 loss """nssa""" +27 81 regularizer """no""" +27 81 optimizer """adadelta""" +27 81 training_loop """owa""" +27 81 negative_sampler """basic""" +27 81 evaluator """rankbased""" +27 82 dataset """kinships""" +27 82 model """complex""" +27 82 loss """nssa""" +27 82 regularizer """no""" +27 82 optimizer """adadelta""" +27 82 training_loop """owa""" +27 82 negative_sampler """basic""" +27 82 evaluator """rankbased""" +27 83 dataset """kinships""" +27 83 model """complex""" +27 83 loss """nssa""" +27 83 regularizer """no""" +27 83 optimizer """adadelta""" +27 83 training_loop """owa""" +27 83 negative_sampler """basic""" +27 83 evaluator """rankbased""" +27 84 dataset """kinships""" +27 84 model """complex""" +27 84 loss """nssa""" +27 84 regularizer """no""" +27 84 optimizer """adadelta""" +27 84 training_loop """owa""" +27 84 negative_sampler """basic""" +27 84 evaluator """rankbased""" +27 85 dataset """kinships""" +27 85 model """complex""" +27 85 loss """nssa""" +27 85 regularizer """no""" +27 85 optimizer """adadelta""" +27 85 training_loop """owa""" +27 85 negative_sampler """basic""" +27 85 evaluator """rankbased""" +27 86 dataset """kinships""" +27 86 model """complex""" +27 86 loss """nssa""" +27 86 regularizer """no""" +27 86 optimizer """adadelta""" +27 86 training_loop """owa""" +27 86 negative_sampler """basic""" +27 86 evaluator """rankbased""" +27 87 dataset """kinships""" +27 87 model """complex""" +27 87 loss """nssa""" +27 87 regularizer """no""" +27 87 optimizer """adadelta""" +27 87 training_loop """owa""" +27 87 negative_sampler """basic""" +27 87 evaluator """rankbased""" +27 88 dataset """kinships""" +27 88 model """complex""" +27 88 loss """nssa""" +27 88 regularizer """no""" +27 88 optimizer """adadelta""" +27 88 training_loop """owa""" +27 88 negative_sampler """basic""" +27 88 evaluator """rankbased""" +27 89 dataset """kinships""" +27 89 model """complex""" +27 89 loss """nssa""" +27 89 regularizer """no""" +27 89 optimizer """adadelta""" +27 89 training_loop """owa""" +27 89 negative_sampler """basic""" +27 89 evaluator """rankbased""" +27 90 dataset """kinships""" +27 90 model """complex""" +27 90 loss """nssa""" +27 90 regularizer """no""" +27 90 optimizer """adadelta""" +27 90 training_loop """owa""" +27 90 negative_sampler """basic""" +27 90 evaluator """rankbased""" +27 91 dataset """kinships""" +27 91 model """complex""" +27 91 loss """nssa""" +27 91 regularizer """no""" +27 91 optimizer """adadelta""" +27 91 training_loop """owa""" +27 91 negative_sampler """basic""" +27 91 evaluator """rankbased""" +27 92 dataset """kinships""" +27 92 model """complex""" +27 92 loss """nssa""" +27 92 regularizer """no""" +27 92 optimizer """adadelta""" +27 92 training_loop """owa""" +27 92 negative_sampler """basic""" +27 92 evaluator """rankbased""" +27 93 dataset """kinships""" +27 93 model """complex""" +27 93 loss """nssa""" +27 93 regularizer """no""" +27 93 optimizer """adadelta""" +27 93 training_loop """owa""" +27 93 negative_sampler """basic""" +27 93 evaluator """rankbased""" +27 94 dataset """kinships""" +27 94 model """complex""" +27 94 loss """nssa""" +27 94 regularizer """no""" +27 94 optimizer """adadelta""" +27 94 training_loop """owa""" +27 94 negative_sampler """basic""" +27 94 evaluator """rankbased""" +27 95 dataset """kinships""" +27 95 model """complex""" +27 95 loss """nssa""" +27 95 regularizer """no""" +27 95 optimizer """adadelta""" +27 95 training_loop """owa""" +27 95 negative_sampler """basic""" +27 95 evaluator """rankbased""" +27 96 dataset """kinships""" +27 96 model """complex""" +27 96 loss """nssa""" +27 96 regularizer """no""" +27 96 optimizer """adadelta""" +27 96 training_loop """owa""" +27 96 negative_sampler """basic""" +27 96 evaluator """rankbased""" +27 97 dataset """kinships""" +27 97 model """complex""" +27 97 loss """nssa""" +27 97 regularizer """no""" +27 97 optimizer """adadelta""" +27 97 training_loop """owa""" +27 97 negative_sampler """basic""" +27 97 evaluator """rankbased""" +27 98 dataset """kinships""" +27 98 model """complex""" +27 98 loss """nssa""" +27 98 regularizer """no""" +27 98 optimizer """adadelta""" +27 98 training_loop """owa""" +27 98 negative_sampler """basic""" +27 98 evaluator """rankbased""" +27 99 dataset """kinships""" +27 99 model """complex""" +27 99 loss """nssa""" +27 99 regularizer """no""" +27 99 optimizer """adadelta""" +27 99 training_loop """owa""" +27 99 negative_sampler """basic""" +27 99 evaluator """rankbased""" +27 100 dataset """kinships""" +27 100 model """complex""" +27 100 loss """nssa""" +27 100 regularizer """no""" +27 100 optimizer """adadelta""" +27 100 training_loop """owa""" +27 100 negative_sampler """basic""" +27 100 evaluator """rankbased""" +28 1 model.embedding_dim 2.0 +28 1 optimizer.lr 0.0252268983314358 +28 1 training.batch_size 0.0 +28 1 training.label_smoothing 0.42404377939735294 +28 2 model.embedding_dim 0.0 +28 2 optimizer.lr 0.0018494070591902685 +28 2 training.batch_size 2.0 +28 2 training.label_smoothing 0.9732632496893231 +28 3 model.embedding_dim 1.0 +28 3 optimizer.lr 0.005432410653932049 +28 3 training.batch_size 2.0 +28 3 training.label_smoothing 0.1347547207004005 +28 4 model.embedding_dim 2.0 +28 4 optimizer.lr 0.012502972470680147 +28 4 training.batch_size 1.0 +28 4 training.label_smoothing 0.09870594285975713 +28 5 model.embedding_dim 2.0 +28 5 optimizer.lr 0.0023496586027457003 +28 5 training.batch_size 1.0 +28 5 training.label_smoothing 0.49834863805177604 +28 6 model.embedding_dim 0.0 +28 6 optimizer.lr 0.0012560977043048617 +28 6 training.batch_size 2.0 +28 6 training.label_smoothing 0.05821696970003071 +28 7 model.embedding_dim 0.0 +28 7 optimizer.lr 0.0023840909292900973 +28 7 training.batch_size 2.0 +28 7 training.label_smoothing 0.10851786225579861 +28 8 model.embedding_dim 1.0 +28 8 optimizer.lr 0.028458426030298446 +28 8 training.batch_size 2.0 +28 8 training.label_smoothing 0.007263494072855671 +28 9 model.embedding_dim 2.0 +28 9 optimizer.lr 0.05561139207670833 +28 9 training.batch_size 1.0 +28 9 training.label_smoothing 0.5991871075809777 +28 10 model.embedding_dim 1.0 +28 10 optimizer.lr 0.006559588714747565 +28 10 training.batch_size 0.0 +28 10 training.label_smoothing 0.004440603052993034 +28 11 model.embedding_dim 2.0 +28 11 optimizer.lr 0.001991186745845425 +28 11 training.batch_size 0.0 +28 11 training.label_smoothing 0.01518931543445914 +28 12 model.embedding_dim 2.0 +28 12 optimizer.lr 0.001023529028717117 +28 12 training.batch_size 0.0 +28 12 training.label_smoothing 0.00854852764389247 +28 13 model.embedding_dim 2.0 +28 13 optimizer.lr 0.005474167843028839 +28 13 training.batch_size 2.0 +28 13 training.label_smoothing 0.23584587706734073 +28 14 model.embedding_dim 0.0 +28 14 optimizer.lr 0.021099273269457805 +28 14 training.batch_size 0.0 +28 14 training.label_smoothing 0.009661067476090551 +28 15 model.embedding_dim 2.0 +28 15 optimizer.lr 0.023354573158277832 +28 15 training.batch_size 0.0 +28 15 training.label_smoothing 0.014341592660122611 +28 16 model.embedding_dim 0.0 +28 16 optimizer.lr 0.008153420339732839 +28 16 training.batch_size 2.0 +28 16 training.label_smoothing 0.0012077927337736992 +28 17 model.embedding_dim 2.0 +28 17 optimizer.lr 0.005946778041352271 +28 17 training.batch_size 2.0 +28 17 training.label_smoothing 0.009592095251648452 +28 18 model.embedding_dim 1.0 +28 18 optimizer.lr 0.001326915263925954 +28 18 training.batch_size 2.0 +28 18 training.label_smoothing 0.5876227534185683 +28 19 model.embedding_dim 1.0 +28 19 optimizer.lr 0.0018910495172688375 +28 19 training.batch_size 0.0 +28 19 training.label_smoothing 0.007862845057268924 +28 20 model.embedding_dim 1.0 +28 20 optimizer.lr 0.021972841892579194 +28 20 training.batch_size 2.0 +28 20 training.label_smoothing 0.591332238051387 +28 21 model.embedding_dim 0.0 +28 21 optimizer.lr 0.09136916853709963 +28 21 training.batch_size 1.0 +28 21 training.label_smoothing 0.005831522959507021 +28 22 model.embedding_dim 1.0 +28 22 optimizer.lr 0.022226067025980928 +28 22 training.batch_size 0.0 +28 22 training.label_smoothing 0.3877199727158877 +28 23 model.embedding_dim 2.0 +28 23 optimizer.lr 0.002082886457489398 +28 23 training.batch_size 1.0 +28 23 training.label_smoothing 0.13270718175412283 +28 24 model.embedding_dim 2.0 +28 24 optimizer.lr 0.022737306934854703 +28 24 training.batch_size 1.0 +28 24 training.label_smoothing 0.025050324221121426 +28 25 model.embedding_dim 2.0 +28 25 optimizer.lr 0.006552832959397612 +28 25 training.batch_size 0.0 +28 25 training.label_smoothing 0.17247121407162802 +28 26 model.embedding_dim 0.0 +28 26 optimizer.lr 0.03711365874490132 +28 26 training.batch_size 0.0 +28 26 training.label_smoothing 0.7382736419706165 +28 27 model.embedding_dim 2.0 +28 27 optimizer.lr 0.0010021848147626165 +28 27 training.batch_size 2.0 +28 27 training.label_smoothing 0.007527590501446798 +28 28 model.embedding_dim 2.0 +28 28 optimizer.lr 0.002592060625660357 +28 28 training.batch_size 0.0 +28 28 training.label_smoothing 0.006645038377004267 +28 29 model.embedding_dim 0.0 +28 29 optimizer.lr 0.052470068162718805 +28 29 training.batch_size 1.0 +28 29 training.label_smoothing 0.03615725480468513 +28 30 model.embedding_dim 1.0 +28 30 optimizer.lr 0.0020076923065572524 +28 30 training.batch_size 2.0 +28 30 training.label_smoothing 0.04372284814587994 +28 31 model.embedding_dim 0.0 +28 31 optimizer.lr 0.0039424462419132035 +28 31 training.batch_size 2.0 +28 31 training.label_smoothing 0.04044660438033253 +28 32 model.embedding_dim 2.0 +28 32 optimizer.lr 0.06515739145455571 +28 32 training.batch_size 1.0 +28 32 training.label_smoothing 0.1402431134135938 +28 33 model.embedding_dim 1.0 +28 33 optimizer.lr 0.003188986050186657 +28 33 training.batch_size 0.0 +28 33 training.label_smoothing 0.021651938633332087 +28 34 model.embedding_dim 2.0 +28 34 optimizer.lr 0.0686643487635881 +28 34 training.batch_size 0.0 +28 34 training.label_smoothing 0.0022933129549258273 +28 35 model.embedding_dim 0.0 +28 35 optimizer.lr 0.008224180457211742 +28 35 training.batch_size 1.0 +28 35 training.label_smoothing 0.8308788789714373 +28 36 model.embedding_dim 0.0 +28 36 optimizer.lr 0.0056861790748787 +28 36 training.batch_size 0.0 +28 36 training.label_smoothing 0.003213971495817641 +28 37 model.embedding_dim 2.0 +28 37 optimizer.lr 0.004855313560752009 +28 37 training.batch_size 2.0 +28 37 training.label_smoothing 0.17616809754326568 +28 38 model.embedding_dim 2.0 +28 38 optimizer.lr 0.0012774286395811005 +28 38 training.batch_size 1.0 +28 38 training.label_smoothing 0.026578500830199057 +28 39 model.embedding_dim 2.0 +28 39 optimizer.lr 0.018890150965981126 +28 39 training.batch_size 1.0 +28 39 training.label_smoothing 0.4290781461377226 +28 40 model.embedding_dim 1.0 +28 40 optimizer.lr 0.008825931219739633 +28 40 training.batch_size 0.0 +28 40 training.label_smoothing 0.001229269571093993 +28 41 model.embedding_dim 2.0 +28 41 optimizer.lr 0.02747943046299538 +28 41 training.batch_size 2.0 +28 41 training.label_smoothing 0.21525747915415075 +28 42 model.embedding_dim 1.0 +28 42 optimizer.lr 0.0014927479442466984 +28 42 training.batch_size 1.0 +28 42 training.label_smoothing 0.1830155000455051 +28 43 model.embedding_dim 1.0 +28 43 optimizer.lr 0.042640319317964845 +28 43 training.batch_size 2.0 +28 43 training.label_smoothing 0.6225078956491882 +28 44 model.embedding_dim 2.0 +28 44 optimizer.lr 0.0027173731548599993 +28 44 training.batch_size 1.0 +28 44 training.label_smoothing 0.5642009611006384 +28 45 model.embedding_dim 1.0 +28 45 optimizer.lr 0.004727171644512636 +28 45 training.batch_size 1.0 +28 45 training.label_smoothing 0.0028815725079939306 +28 46 model.embedding_dim 2.0 +28 46 optimizer.lr 0.004736634907758889 +28 46 training.batch_size 2.0 +28 46 training.label_smoothing 0.00801926202004752 +28 47 model.embedding_dim 1.0 +28 47 optimizer.lr 0.015479247424236234 +28 47 training.batch_size 0.0 +28 47 training.label_smoothing 0.004643296394322442 +28 48 model.embedding_dim 0.0 +28 48 optimizer.lr 0.013253265457696384 +28 48 training.batch_size 2.0 +28 48 training.label_smoothing 0.0014033094361497538 +28 49 model.embedding_dim 0.0 +28 49 optimizer.lr 0.04573632373363562 +28 49 training.batch_size 1.0 +28 49 training.label_smoothing 0.001062981903891826 +28 50 model.embedding_dim 0.0 +28 50 optimizer.lr 0.004769774955038688 +28 50 training.batch_size 2.0 +28 50 training.label_smoothing 0.06672199012167625 +28 51 model.embedding_dim 2.0 +28 51 optimizer.lr 0.004123678140039322 +28 51 training.batch_size 0.0 +28 51 training.label_smoothing 0.004807190517299726 +28 52 model.embedding_dim 2.0 +28 52 optimizer.lr 0.006795302353683959 +28 52 training.batch_size 0.0 +28 52 training.label_smoothing 0.02383662855564187 +28 53 model.embedding_dim 0.0 +28 53 optimizer.lr 0.001714288628938695 +28 53 training.batch_size 1.0 +28 53 training.label_smoothing 0.13057925360317132 +28 54 model.embedding_dim 2.0 +28 54 optimizer.lr 0.002234728850728095 +28 54 training.batch_size 1.0 +28 54 training.label_smoothing 0.031217531199249072 +28 55 model.embedding_dim 1.0 +28 55 optimizer.lr 0.09160228720692097 +28 55 training.batch_size 2.0 +28 55 training.label_smoothing 0.929193581781885 +28 56 model.embedding_dim 1.0 +28 56 optimizer.lr 0.0014028497440116718 +28 56 training.batch_size 0.0 +28 56 training.label_smoothing 0.013841740564227429 +28 57 model.embedding_dim 2.0 +28 57 optimizer.lr 0.0026339739060376372 +28 57 training.batch_size 0.0 +28 57 training.label_smoothing 0.0014865482911193995 +28 58 model.embedding_dim 2.0 +28 58 optimizer.lr 0.023072318998083048 +28 58 training.batch_size 0.0 +28 58 training.label_smoothing 0.588870338006269 +28 59 model.embedding_dim 2.0 +28 59 optimizer.lr 0.009534074992246776 +28 59 training.batch_size 2.0 +28 59 training.label_smoothing 0.4966753329209766 +28 60 model.embedding_dim 0.0 +28 60 optimizer.lr 0.010981409059079785 +28 60 training.batch_size 2.0 +28 60 training.label_smoothing 0.31638266931879205 +28 61 model.embedding_dim 0.0 +28 61 optimizer.lr 0.044351241874106585 +28 61 training.batch_size 2.0 +28 61 training.label_smoothing 0.0012885692791249122 +28 62 model.embedding_dim 1.0 +28 62 optimizer.lr 0.01663481916431402 +28 62 training.batch_size 2.0 +28 62 training.label_smoothing 0.11637785241511843 +28 63 model.embedding_dim 0.0 +28 63 optimizer.lr 0.008827519826330796 +28 63 training.batch_size 0.0 +28 63 training.label_smoothing 0.20843713868152614 +28 64 model.embedding_dim 0.0 +28 64 optimizer.lr 0.06682967289966142 +28 64 training.batch_size 0.0 +28 64 training.label_smoothing 0.3076745913959208 +28 65 model.embedding_dim 0.0 +28 65 optimizer.lr 0.001969223955917535 +28 65 training.batch_size 0.0 +28 65 training.label_smoothing 0.730173435515465 +28 66 model.embedding_dim 0.0 +28 66 optimizer.lr 0.0034882191425204168 +28 66 training.batch_size 1.0 +28 66 training.label_smoothing 0.025176042895725566 +28 67 model.embedding_dim 0.0 +28 67 optimizer.lr 0.01844342395675061 +28 67 training.batch_size 1.0 +28 67 training.label_smoothing 0.01510760252289717 +28 68 model.embedding_dim 2.0 +28 68 optimizer.lr 0.060942391637110675 +28 68 training.batch_size 1.0 +28 68 training.label_smoothing 0.02473369733677743 +28 69 model.embedding_dim 0.0 +28 69 optimizer.lr 0.0013969152695080013 +28 69 training.batch_size 0.0 +28 69 training.label_smoothing 0.020314548265189775 +28 70 model.embedding_dim 1.0 +28 70 optimizer.lr 0.0017482757507498799 +28 70 training.batch_size 0.0 +28 70 training.label_smoothing 0.019415655267377943 +28 71 model.embedding_dim 1.0 +28 71 optimizer.lr 0.020133304658527133 +28 71 training.batch_size 1.0 +28 71 training.label_smoothing 0.1378696682431695 +28 72 model.embedding_dim 2.0 +28 72 optimizer.lr 0.006670901608599883 +28 72 training.batch_size 2.0 +28 72 training.label_smoothing 0.0018665344329675561 +28 73 model.embedding_dim 2.0 +28 73 optimizer.lr 0.001522172918295041 +28 73 training.batch_size 0.0 +28 73 training.label_smoothing 0.08012364152603456 +28 74 model.embedding_dim 2.0 +28 74 optimizer.lr 0.0018539297568376145 +28 74 training.batch_size 1.0 +28 74 training.label_smoothing 0.0037417467154281427 +28 75 model.embedding_dim 2.0 +28 75 optimizer.lr 0.003183268037040521 +28 75 training.batch_size 2.0 +28 75 training.label_smoothing 0.01705885955668039 +28 76 model.embedding_dim 1.0 +28 76 optimizer.lr 0.0011042085468071963 +28 76 training.batch_size 2.0 +28 76 training.label_smoothing 0.0010420241946085085 +28 77 model.embedding_dim 1.0 +28 77 optimizer.lr 0.021736213541257557 +28 77 training.batch_size 0.0 +28 77 training.label_smoothing 0.006980233246735561 +28 78 model.embedding_dim 0.0 +28 78 optimizer.lr 0.014857195130565387 +28 78 training.batch_size 1.0 +28 78 training.label_smoothing 0.0021444435398570182 +28 79 model.embedding_dim 0.0 +28 79 optimizer.lr 0.03664877644013249 +28 79 training.batch_size 1.0 +28 79 training.label_smoothing 0.0022231125221496696 +28 80 model.embedding_dim 0.0 +28 80 optimizer.lr 0.0016440139360071513 +28 80 training.batch_size 1.0 +28 80 training.label_smoothing 0.11670337876422489 +28 81 model.embedding_dim 1.0 +28 81 optimizer.lr 0.004311281904405983 +28 81 training.batch_size 1.0 +28 81 training.label_smoothing 0.0023002833055156217 +28 82 model.embedding_dim 0.0 +28 82 optimizer.lr 0.0013498107691973151 +28 82 training.batch_size 0.0 +28 82 training.label_smoothing 0.24257543824832353 +28 83 model.embedding_dim 2.0 +28 83 optimizer.lr 0.0019499586593223735 +28 83 training.batch_size 0.0 +28 83 training.label_smoothing 0.019336065188215207 +28 84 model.embedding_dim 0.0 +28 84 optimizer.lr 0.020379761547378007 +28 84 training.batch_size 2.0 +28 84 training.label_smoothing 0.0107636309262333 +28 85 model.embedding_dim 1.0 +28 85 optimizer.lr 0.024621927192519637 +28 85 training.batch_size 1.0 +28 85 training.label_smoothing 0.12063007252531062 +28 86 model.embedding_dim 0.0 +28 86 optimizer.lr 0.009457306829938629 +28 86 training.batch_size 1.0 +28 86 training.label_smoothing 0.3789079942837967 +28 87 model.embedding_dim 1.0 +28 87 optimizer.lr 0.006676376919106968 +28 87 training.batch_size 1.0 +28 87 training.label_smoothing 0.006350641427686595 +28 88 model.embedding_dim 0.0 +28 88 optimizer.lr 0.0018401599685221827 +28 88 training.batch_size 2.0 +28 88 training.label_smoothing 0.0466959400229024 +28 89 model.embedding_dim 2.0 +28 89 optimizer.lr 0.013865564433321511 +28 89 training.batch_size 0.0 +28 89 training.label_smoothing 0.7750724854172698 +28 90 model.embedding_dim 0.0 +28 90 optimizer.lr 0.06242687258033406 +28 90 training.batch_size 1.0 +28 90 training.label_smoothing 0.004917095561721602 +28 91 model.embedding_dim 2.0 +28 91 optimizer.lr 0.0024963927061302203 +28 91 training.batch_size 1.0 +28 91 training.label_smoothing 0.07680996110134969 +28 92 model.embedding_dim 0.0 +28 92 optimizer.lr 0.02551703745156793 +28 92 training.batch_size 2.0 +28 92 training.label_smoothing 0.02710245637987424 +28 93 model.embedding_dim 2.0 +28 93 optimizer.lr 0.0034666277558837184 +28 93 training.batch_size 0.0 +28 93 training.label_smoothing 0.11096316122272429 +28 94 model.embedding_dim 1.0 +28 94 optimizer.lr 0.05625685413022534 +28 94 training.batch_size 1.0 +28 94 training.label_smoothing 0.028695837371337206 +28 95 model.embedding_dim 0.0 +28 95 optimizer.lr 0.07077071518324884 +28 95 training.batch_size 2.0 +28 95 training.label_smoothing 0.001477497326403412 +28 96 model.embedding_dim 2.0 +28 96 optimizer.lr 0.0018748022923931473 +28 96 training.batch_size 1.0 +28 96 training.label_smoothing 0.05238911266363772 +28 97 model.embedding_dim 1.0 +28 97 optimizer.lr 0.019088699301061007 +28 97 training.batch_size 1.0 +28 97 training.label_smoothing 0.05792337580572999 +28 98 model.embedding_dim 0.0 +28 98 optimizer.lr 0.010201978900557258 +28 98 training.batch_size 2.0 +28 98 training.label_smoothing 0.002991183820950175 +28 99 model.embedding_dim 1.0 +28 99 optimizer.lr 0.012644362678958144 +28 99 training.batch_size 0.0 +28 99 training.label_smoothing 0.15040767738081473 +28 100 model.embedding_dim 1.0 +28 100 optimizer.lr 0.007485436013107319 +28 100 training.batch_size 2.0 +28 100 training.label_smoothing 0.027595708316700027 +28 1 dataset """kinships""" +28 1 model """complex""" +28 1 loss """bceaftersigmoid""" +28 1 regularizer """no""" +28 1 optimizer """adam""" +28 1 training_loop """lcwa""" +28 1 evaluator """rankbased""" +28 2 dataset """kinships""" +28 2 model """complex""" +28 2 loss """bceaftersigmoid""" +28 2 regularizer """no""" +28 2 optimizer """adam""" +28 2 training_loop """lcwa""" +28 2 evaluator """rankbased""" +28 3 dataset """kinships""" +28 3 model """complex""" +28 3 loss """bceaftersigmoid""" +28 3 regularizer """no""" +28 3 optimizer """adam""" +28 3 training_loop """lcwa""" +28 3 evaluator """rankbased""" +28 4 dataset """kinships""" +28 4 model """complex""" +28 4 loss """bceaftersigmoid""" +28 4 regularizer """no""" +28 4 optimizer """adam""" +28 4 training_loop """lcwa""" +28 4 evaluator """rankbased""" +28 5 dataset """kinships""" +28 5 model """complex""" +28 5 loss """bceaftersigmoid""" +28 5 regularizer """no""" +28 5 optimizer """adam""" +28 5 training_loop """lcwa""" +28 5 evaluator """rankbased""" +28 6 dataset """kinships""" +28 6 model """complex""" +28 6 loss """bceaftersigmoid""" +28 6 regularizer """no""" +28 6 optimizer """adam""" +28 6 training_loop """lcwa""" +28 6 evaluator """rankbased""" +28 7 dataset """kinships""" +28 7 model """complex""" +28 7 loss """bceaftersigmoid""" +28 7 regularizer """no""" +28 7 optimizer """adam""" +28 7 training_loop """lcwa""" +28 7 evaluator """rankbased""" +28 8 dataset """kinships""" +28 8 model """complex""" +28 8 loss """bceaftersigmoid""" +28 8 regularizer """no""" +28 8 optimizer """adam""" +28 8 training_loop """lcwa""" +28 8 evaluator """rankbased""" +28 9 dataset """kinships""" +28 9 model """complex""" +28 9 loss """bceaftersigmoid""" +28 9 regularizer """no""" +28 9 optimizer """adam""" +28 9 training_loop """lcwa""" +28 9 evaluator """rankbased""" +28 10 dataset """kinships""" +28 10 model """complex""" +28 10 loss """bceaftersigmoid""" +28 10 regularizer """no""" +28 10 optimizer """adam""" +28 10 training_loop """lcwa""" +28 10 evaluator """rankbased""" +28 11 dataset """kinships""" +28 11 model """complex""" +28 11 loss """bceaftersigmoid""" +28 11 regularizer """no""" +28 11 optimizer """adam""" +28 11 training_loop """lcwa""" +28 11 evaluator """rankbased""" +28 12 dataset """kinships""" +28 12 model """complex""" +28 12 loss """bceaftersigmoid""" +28 12 regularizer """no""" +28 12 optimizer """adam""" +28 12 training_loop """lcwa""" +28 12 evaluator """rankbased""" +28 13 dataset """kinships""" +28 13 model """complex""" +28 13 loss """bceaftersigmoid""" +28 13 regularizer """no""" +28 13 optimizer """adam""" +28 13 training_loop """lcwa""" +28 13 evaluator """rankbased""" +28 14 dataset """kinships""" +28 14 model """complex""" +28 14 loss """bceaftersigmoid""" +28 14 regularizer """no""" +28 14 optimizer """adam""" +28 14 training_loop """lcwa""" +28 14 evaluator """rankbased""" +28 15 dataset """kinships""" +28 15 model """complex""" +28 15 loss """bceaftersigmoid""" +28 15 regularizer """no""" +28 15 optimizer """adam""" +28 15 training_loop """lcwa""" +28 15 evaluator """rankbased""" +28 16 dataset """kinships""" +28 16 model """complex""" +28 16 loss """bceaftersigmoid""" +28 16 regularizer """no""" +28 16 optimizer """adam""" +28 16 training_loop """lcwa""" +28 16 evaluator """rankbased""" +28 17 dataset """kinships""" +28 17 model """complex""" +28 17 loss """bceaftersigmoid""" +28 17 regularizer """no""" +28 17 optimizer """adam""" +28 17 training_loop """lcwa""" +28 17 evaluator """rankbased""" +28 18 dataset """kinships""" +28 18 model """complex""" +28 18 loss """bceaftersigmoid""" +28 18 regularizer """no""" +28 18 optimizer """adam""" +28 18 training_loop """lcwa""" +28 18 evaluator """rankbased""" +28 19 dataset """kinships""" +28 19 model """complex""" +28 19 loss """bceaftersigmoid""" +28 19 regularizer """no""" +28 19 optimizer """adam""" +28 19 training_loop """lcwa""" +28 19 evaluator """rankbased""" +28 20 dataset """kinships""" +28 20 model """complex""" +28 20 loss """bceaftersigmoid""" +28 20 regularizer """no""" +28 20 optimizer """adam""" +28 20 training_loop """lcwa""" +28 20 evaluator """rankbased""" +28 21 dataset """kinships""" +28 21 model """complex""" +28 21 loss """bceaftersigmoid""" +28 21 regularizer """no""" +28 21 optimizer """adam""" +28 21 training_loop """lcwa""" +28 21 evaluator """rankbased""" +28 22 dataset """kinships""" +28 22 model """complex""" +28 22 loss """bceaftersigmoid""" +28 22 regularizer """no""" +28 22 optimizer """adam""" +28 22 training_loop """lcwa""" +28 22 evaluator """rankbased""" +28 23 dataset """kinships""" +28 23 model """complex""" +28 23 loss """bceaftersigmoid""" +28 23 regularizer """no""" +28 23 optimizer """adam""" +28 23 training_loop """lcwa""" +28 23 evaluator """rankbased""" +28 24 dataset """kinships""" +28 24 model """complex""" +28 24 loss """bceaftersigmoid""" +28 24 regularizer """no""" +28 24 optimizer """adam""" +28 24 training_loop """lcwa""" +28 24 evaluator """rankbased""" +28 25 dataset """kinships""" +28 25 model """complex""" +28 25 loss """bceaftersigmoid""" +28 25 regularizer """no""" +28 25 optimizer """adam""" +28 25 training_loop """lcwa""" +28 25 evaluator """rankbased""" +28 26 dataset """kinships""" +28 26 model """complex""" +28 26 loss """bceaftersigmoid""" +28 26 regularizer """no""" +28 26 optimizer """adam""" +28 26 training_loop """lcwa""" +28 26 evaluator """rankbased""" +28 27 dataset """kinships""" +28 27 model """complex""" +28 27 loss """bceaftersigmoid""" +28 27 regularizer """no""" +28 27 optimizer """adam""" +28 27 training_loop """lcwa""" +28 27 evaluator """rankbased""" +28 28 dataset """kinships""" +28 28 model """complex""" +28 28 loss """bceaftersigmoid""" +28 28 regularizer """no""" +28 28 optimizer """adam""" +28 28 training_loop """lcwa""" +28 28 evaluator """rankbased""" +28 29 dataset """kinships""" +28 29 model """complex""" +28 29 loss """bceaftersigmoid""" +28 29 regularizer """no""" +28 29 optimizer """adam""" +28 29 training_loop """lcwa""" +28 29 evaluator """rankbased""" +28 30 dataset """kinships""" +28 30 model """complex""" +28 30 loss """bceaftersigmoid""" +28 30 regularizer """no""" +28 30 optimizer """adam""" +28 30 training_loop """lcwa""" +28 30 evaluator """rankbased""" +28 31 dataset """kinships""" +28 31 model """complex""" +28 31 loss """bceaftersigmoid""" +28 31 regularizer """no""" +28 31 optimizer """adam""" +28 31 training_loop """lcwa""" +28 31 evaluator """rankbased""" +28 32 dataset """kinships""" +28 32 model """complex""" +28 32 loss """bceaftersigmoid""" +28 32 regularizer """no""" +28 32 optimizer """adam""" +28 32 training_loop """lcwa""" +28 32 evaluator """rankbased""" +28 33 dataset """kinships""" +28 33 model """complex""" +28 33 loss """bceaftersigmoid""" +28 33 regularizer """no""" +28 33 optimizer """adam""" +28 33 training_loop """lcwa""" +28 33 evaluator """rankbased""" +28 34 dataset """kinships""" +28 34 model """complex""" +28 34 loss """bceaftersigmoid""" +28 34 regularizer """no""" +28 34 optimizer """adam""" +28 34 training_loop """lcwa""" +28 34 evaluator """rankbased""" +28 35 dataset """kinships""" +28 35 model """complex""" +28 35 loss """bceaftersigmoid""" +28 35 regularizer """no""" +28 35 optimizer """adam""" +28 35 training_loop """lcwa""" +28 35 evaluator """rankbased""" +28 36 dataset """kinships""" +28 36 model """complex""" +28 36 loss """bceaftersigmoid""" +28 36 regularizer """no""" +28 36 optimizer """adam""" +28 36 training_loop """lcwa""" +28 36 evaluator """rankbased""" +28 37 dataset """kinships""" +28 37 model """complex""" +28 37 loss """bceaftersigmoid""" +28 37 regularizer """no""" +28 37 optimizer """adam""" +28 37 training_loop """lcwa""" +28 37 evaluator """rankbased""" +28 38 dataset """kinships""" +28 38 model """complex""" +28 38 loss """bceaftersigmoid""" +28 38 regularizer """no""" +28 38 optimizer """adam""" +28 38 training_loop """lcwa""" +28 38 evaluator """rankbased""" +28 39 dataset """kinships""" +28 39 model """complex""" +28 39 loss """bceaftersigmoid""" +28 39 regularizer """no""" +28 39 optimizer """adam""" +28 39 training_loop """lcwa""" +28 39 evaluator """rankbased""" +28 40 dataset """kinships""" +28 40 model """complex""" +28 40 loss """bceaftersigmoid""" +28 40 regularizer """no""" +28 40 optimizer """adam""" +28 40 training_loop """lcwa""" +28 40 evaluator """rankbased""" +28 41 dataset """kinships""" +28 41 model """complex""" +28 41 loss """bceaftersigmoid""" +28 41 regularizer """no""" +28 41 optimizer """adam""" +28 41 training_loop """lcwa""" +28 41 evaluator """rankbased""" +28 42 dataset """kinships""" +28 42 model """complex""" +28 42 loss """bceaftersigmoid""" +28 42 regularizer """no""" +28 42 optimizer """adam""" +28 42 training_loop """lcwa""" +28 42 evaluator """rankbased""" +28 43 dataset """kinships""" +28 43 model """complex""" +28 43 loss """bceaftersigmoid""" +28 43 regularizer """no""" +28 43 optimizer """adam""" +28 43 training_loop """lcwa""" +28 43 evaluator """rankbased""" +28 44 dataset """kinships""" +28 44 model """complex""" +28 44 loss """bceaftersigmoid""" +28 44 regularizer """no""" +28 44 optimizer """adam""" +28 44 training_loop """lcwa""" +28 44 evaluator """rankbased""" +28 45 dataset """kinships""" +28 45 model """complex""" +28 45 loss """bceaftersigmoid""" +28 45 regularizer """no""" +28 45 optimizer """adam""" +28 45 training_loop """lcwa""" +28 45 evaluator """rankbased""" +28 46 dataset """kinships""" +28 46 model """complex""" +28 46 loss """bceaftersigmoid""" +28 46 regularizer """no""" +28 46 optimizer """adam""" +28 46 training_loop """lcwa""" +28 46 evaluator """rankbased""" +28 47 dataset """kinships""" +28 47 model """complex""" +28 47 loss """bceaftersigmoid""" +28 47 regularizer """no""" +28 47 optimizer """adam""" +28 47 training_loop """lcwa""" +28 47 evaluator """rankbased""" +28 48 dataset """kinships""" +28 48 model """complex""" +28 48 loss """bceaftersigmoid""" +28 48 regularizer """no""" +28 48 optimizer """adam""" +28 48 training_loop """lcwa""" +28 48 evaluator """rankbased""" +28 49 dataset """kinships""" +28 49 model """complex""" +28 49 loss """bceaftersigmoid""" +28 49 regularizer """no""" +28 49 optimizer """adam""" +28 49 training_loop """lcwa""" +28 49 evaluator """rankbased""" +28 50 dataset """kinships""" +28 50 model """complex""" +28 50 loss """bceaftersigmoid""" +28 50 regularizer """no""" +28 50 optimizer """adam""" +28 50 training_loop """lcwa""" +28 50 evaluator """rankbased""" +28 51 dataset """kinships""" +28 51 model """complex""" +28 51 loss """bceaftersigmoid""" +28 51 regularizer """no""" +28 51 optimizer """adam""" +28 51 training_loop """lcwa""" +28 51 evaluator """rankbased""" +28 52 dataset """kinships""" +28 52 model """complex""" +28 52 loss """bceaftersigmoid""" +28 52 regularizer """no""" +28 52 optimizer """adam""" +28 52 training_loop """lcwa""" +28 52 evaluator """rankbased""" +28 53 dataset """kinships""" +28 53 model """complex""" +28 53 loss """bceaftersigmoid""" +28 53 regularizer """no""" +28 53 optimizer """adam""" +28 53 training_loop """lcwa""" +28 53 evaluator """rankbased""" +28 54 dataset """kinships""" +28 54 model """complex""" +28 54 loss """bceaftersigmoid""" +28 54 regularizer """no""" +28 54 optimizer """adam""" +28 54 training_loop """lcwa""" +28 54 evaluator """rankbased""" +28 55 dataset """kinships""" +28 55 model """complex""" +28 55 loss """bceaftersigmoid""" +28 55 regularizer """no""" +28 55 optimizer """adam""" +28 55 training_loop """lcwa""" +28 55 evaluator """rankbased""" +28 56 dataset """kinships""" +28 56 model """complex""" +28 56 loss """bceaftersigmoid""" +28 56 regularizer """no""" +28 56 optimizer """adam""" +28 56 training_loop """lcwa""" +28 56 evaluator """rankbased""" +28 57 dataset """kinships""" +28 57 model """complex""" +28 57 loss """bceaftersigmoid""" +28 57 regularizer """no""" +28 57 optimizer """adam""" +28 57 training_loop """lcwa""" +28 57 evaluator """rankbased""" +28 58 dataset """kinships""" +28 58 model """complex""" +28 58 loss """bceaftersigmoid""" +28 58 regularizer """no""" +28 58 optimizer """adam""" +28 58 training_loop """lcwa""" +28 58 evaluator """rankbased""" +28 59 dataset """kinships""" +28 59 model """complex""" +28 59 loss """bceaftersigmoid""" +28 59 regularizer """no""" +28 59 optimizer """adam""" +28 59 training_loop """lcwa""" +28 59 evaluator """rankbased""" +28 60 dataset """kinships""" +28 60 model """complex""" +28 60 loss """bceaftersigmoid""" +28 60 regularizer """no""" +28 60 optimizer """adam""" +28 60 training_loop """lcwa""" +28 60 evaluator """rankbased""" +28 61 dataset """kinships""" +28 61 model """complex""" +28 61 loss """bceaftersigmoid""" +28 61 regularizer """no""" +28 61 optimizer """adam""" +28 61 training_loop """lcwa""" +28 61 evaluator """rankbased""" +28 62 dataset """kinships""" +28 62 model """complex""" +28 62 loss """bceaftersigmoid""" +28 62 regularizer """no""" +28 62 optimizer """adam""" +28 62 training_loop """lcwa""" +28 62 evaluator """rankbased""" +28 63 dataset """kinships""" +28 63 model """complex""" +28 63 loss """bceaftersigmoid""" +28 63 regularizer """no""" +28 63 optimizer """adam""" +28 63 training_loop """lcwa""" +28 63 evaluator """rankbased""" +28 64 dataset """kinships""" +28 64 model """complex""" +28 64 loss """bceaftersigmoid""" +28 64 regularizer """no""" +28 64 optimizer """adam""" +28 64 training_loop """lcwa""" +28 64 evaluator """rankbased""" +28 65 dataset """kinships""" +28 65 model """complex""" +28 65 loss """bceaftersigmoid""" +28 65 regularizer """no""" +28 65 optimizer """adam""" +28 65 training_loop """lcwa""" +28 65 evaluator """rankbased""" +28 66 dataset """kinships""" +28 66 model """complex""" +28 66 loss """bceaftersigmoid""" +28 66 regularizer """no""" +28 66 optimizer """adam""" +28 66 training_loop """lcwa""" +28 66 evaluator """rankbased""" +28 67 dataset """kinships""" +28 67 model """complex""" +28 67 loss """bceaftersigmoid""" +28 67 regularizer """no""" +28 67 optimizer """adam""" +28 67 training_loop """lcwa""" +28 67 evaluator """rankbased""" +28 68 dataset """kinships""" +28 68 model """complex""" +28 68 loss """bceaftersigmoid""" +28 68 regularizer """no""" +28 68 optimizer """adam""" +28 68 training_loop """lcwa""" +28 68 evaluator """rankbased""" +28 69 dataset """kinships""" +28 69 model """complex""" +28 69 loss """bceaftersigmoid""" +28 69 regularizer """no""" +28 69 optimizer """adam""" +28 69 training_loop """lcwa""" +28 69 evaluator """rankbased""" +28 70 dataset """kinships""" +28 70 model """complex""" +28 70 loss """bceaftersigmoid""" +28 70 regularizer """no""" +28 70 optimizer """adam""" +28 70 training_loop """lcwa""" +28 70 evaluator """rankbased""" +28 71 dataset """kinships""" +28 71 model """complex""" +28 71 loss """bceaftersigmoid""" +28 71 regularizer """no""" +28 71 optimizer """adam""" +28 71 training_loop """lcwa""" +28 71 evaluator """rankbased""" +28 72 dataset """kinships""" +28 72 model """complex""" +28 72 loss """bceaftersigmoid""" +28 72 regularizer """no""" +28 72 optimizer """adam""" +28 72 training_loop """lcwa""" +28 72 evaluator """rankbased""" +28 73 dataset """kinships""" +28 73 model """complex""" +28 73 loss """bceaftersigmoid""" +28 73 regularizer """no""" +28 73 optimizer """adam""" +28 73 training_loop """lcwa""" +28 73 evaluator """rankbased""" +28 74 dataset """kinships""" +28 74 model """complex""" +28 74 loss """bceaftersigmoid""" +28 74 regularizer """no""" +28 74 optimizer """adam""" +28 74 training_loop """lcwa""" +28 74 evaluator """rankbased""" +28 75 dataset """kinships""" +28 75 model """complex""" +28 75 loss """bceaftersigmoid""" +28 75 regularizer """no""" +28 75 optimizer """adam""" +28 75 training_loop """lcwa""" +28 75 evaluator """rankbased""" +28 76 dataset """kinships""" +28 76 model """complex""" +28 76 loss """bceaftersigmoid""" +28 76 regularizer """no""" +28 76 optimizer """adam""" +28 76 training_loop """lcwa""" +28 76 evaluator """rankbased""" +28 77 dataset """kinships""" +28 77 model """complex""" +28 77 loss """bceaftersigmoid""" +28 77 regularizer """no""" +28 77 optimizer """adam""" +28 77 training_loop """lcwa""" +28 77 evaluator """rankbased""" +28 78 dataset """kinships""" +28 78 model """complex""" +28 78 loss """bceaftersigmoid""" +28 78 regularizer """no""" +28 78 optimizer """adam""" +28 78 training_loop """lcwa""" +28 78 evaluator """rankbased""" +28 79 dataset """kinships""" +28 79 model """complex""" +28 79 loss """bceaftersigmoid""" +28 79 regularizer """no""" +28 79 optimizer """adam""" +28 79 training_loop """lcwa""" +28 79 evaluator """rankbased""" +28 80 dataset """kinships""" +28 80 model """complex""" +28 80 loss """bceaftersigmoid""" +28 80 regularizer """no""" +28 80 optimizer """adam""" +28 80 training_loop """lcwa""" +28 80 evaluator """rankbased""" +28 81 dataset """kinships""" +28 81 model """complex""" +28 81 loss """bceaftersigmoid""" +28 81 regularizer """no""" +28 81 optimizer """adam""" +28 81 training_loop """lcwa""" +28 81 evaluator """rankbased""" +28 82 dataset """kinships""" +28 82 model """complex""" +28 82 loss """bceaftersigmoid""" +28 82 regularizer """no""" +28 82 optimizer """adam""" +28 82 training_loop """lcwa""" +28 82 evaluator """rankbased""" +28 83 dataset """kinships""" +28 83 model """complex""" +28 83 loss """bceaftersigmoid""" +28 83 regularizer """no""" +28 83 optimizer """adam""" +28 83 training_loop """lcwa""" +28 83 evaluator """rankbased""" +28 84 dataset """kinships""" +28 84 model """complex""" +28 84 loss """bceaftersigmoid""" +28 84 regularizer """no""" +28 84 optimizer """adam""" +28 84 training_loop """lcwa""" +28 84 evaluator """rankbased""" +28 85 dataset """kinships""" +28 85 model """complex""" +28 85 loss """bceaftersigmoid""" +28 85 regularizer """no""" +28 85 optimizer """adam""" +28 85 training_loop """lcwa""" +28 85 evaluator """rankbased""" +28 86 dataset """kinships""" +28 86 model """complex""" +28 86 loss """bceaftersigmoid""" +28 86 regularizer """no""" +28 86 optimizer """adam""" +28 86 training_loop """lcwa""" +28 86 evaluator """rankbased""" +28 87 dataset """kinships""" +28 87 model """complex""" +28 87 loss """bceaftersigmoid""" +28 87 regularizer """no""" +28 87 optimizer """adam""" +28 87 training_loop """lcwa""" +28 87 evaluator """rankbased""" +28 88 dataset """kinships""" +28 88 model """complex""" +28 88 loss """bceaftersigmoid""" +28 88 regularizer """no""" +28 88 optimizer """adam""" +28 88 training_loop """lcwa""" +28 88 evaluator """rankbased""" +28 89 dataset """kinships""" +28 89 model """complex""" +28 89 loss """bceaftersigmoid""" +28 89 regularizer """no""" +28 89 optimizer """adam""" +28 89 training_loop """lcwa""" +28 89 evaluator """rankbased""" +28 90 dataset """kinships""" +28 90 model """complex""" +28 90 loss """bceaftersigmoid""" +28 90 regularizer """no""" +28 90 optimizer """adam""" +28 90 training_loop """lcwa""" +28 90 evaluator """rankbased""" +28 91 dataset """kinships""" +28 91 model """complex""" +28 91 loss """bceaftersigmoid""" +28 91 regularizer """no""" +28 91 optimizer """adam""" +28 91 training_loop """lcwa""" +28 91 evaluator """rankbased""" +28 92 dataset """kinships""" +28 92 model """complex""" +28 92 loss """bceaftersigmoid""" +28 92 regularizer """no""" +28 92 optimizer """adam""" +28 92 training_loop """lcwa""" +28 92 evaluator """rankbased""" +28 93 dataset """kinships""" +28 93 model """complex""" +28 93 loss """bceaftersigmoid""" +28 93 regularizer """no""" +28 93 optimizer """adam""" +28 93 training_loop """lcwa""" +28 93 evaluator """rankbased""" +28 94 dataset """kinships""" +28 94 model """complex""" +28 94 loss """bceaftersigmoid""" +28 94 regularizer """no""" +28 94 optimizer """adam""" +28 94 training_loop """lcwa""" +28 94 evaluator """rankbased""" +28 95 dataset """kinships""" +28 95 model """complex""" +28 95 loss """bceaftersigmoid""" +28 95 regularizer """no""" +28 95 optimizer """adam""" +28 95 training_loop """lcwa""" +28 95 evaluator """rankbased""" +28 96 dataset """kinships""" +28 96 model """complex""" +28 96 loss """bceaftersigmoid""" +28 96 regularizer """no""" +28 96 optimizer """adam""" +28 96 training_loop """lcwa""" +28 96 evaluator """rankbased""" +28 97 dataset """kinships""" +28 97 model """complex""" +28 97 loss """bceaftersigmoid""" +28 97 regularizer """no""" +28 97 optimizer """adam""" +28 97 training_loop """lcwa""" +28 97 evaluator """rankbased""" +28 98 dataset """kinships""" +28 98 model """complex""" +28 98 loss """bceaftersigmoid""" +28 98 regularizer """no""" +28 98 optimizer """adam""" +28 98 training_loop """lcwa""" +28 98 evaluator """rankbased""" +28 99 dataset """kinships""" +28 99 model """complex""" +28 99 loss """bceaftersigmoid""" +28 99 regularizer """no""" +28 99 optimizer """adam""" +28 99 training_loop """lcwa""" +28 99 evaluator """rankbased""" +28 100 dataset """kinships""" +28 100 model """complex""" +28 100 loss """bceaftersigmoid""" +28 100 regularizer """no""" +28 100 optimizer """adam""" +28 100 training_loop """lcwa""" +28 100 evaluator """rankbased""" +29 1 model.embedding_dim 1.0 +29 1 optimizer.lr 0.001984523064693745 +29 1 training.batch_size 2.0 +29 1 training.label_smoothing 0.018908581180926847 +29 2 model.embedding_dim 0.0 +29 2 optimizer.lr 0.03485621169093798 +29 2 training.batch_size 2.0 +29 2 training.label_smoothing 0.012977148667993522 +29 3 model.embedding_dim 2.0 +29 3 optimizer.lr 0.020919764114922405 +29 3 training.batch_size 2.0 +29 3 training.label_smoothing 0.464783011636998 +29 4 model.embedding_dim 2.0 +29 4 optimizer.lr 0.003258271274966903 +29 4 training.batch_size 1.0 +29 4 training.label_smoothing 0.04653043579863489 +29 5 model.embedding_dim 1.0 +29 5 optimizer.lr 0.03472462237563877 +29 5 training.batch_size 2.0 +29 5 training.label_smoothing 0.4543016657028202 +29 6 model.embedding_dim 2.0 +29 6 optimizer.lr 0.003379586243115368 +29 6 training.batch_size 0.0 +29 6 training.label_smoothing 0.07803650957239788 +29 7 model.embedding_dim 0.0 +29 7 optimizer.lr 0.002829977731464848 +29 7 training.batch_size 2.0 +29 7 training.label_smoothing 0.00656239522745522 +29 8 model.embedding_dim 0.0 +29 8 optimizer.lr 0.09205265141300045 +29 8 training.batch_size 2.0 +29 8 training.label_smoothing 0.006224312306189892 +29 9 model.embedding_dim 0.0 +29 9 optimizer.lr 0.018306929815955927 +29 9 training.batch_size 2.0 +29 9 training.label_smoothing 0.046862908943121215 +29 10 model.embedding_dim 2.0 +29 10 optimizer.lr 0.04424269294846416 +29 10 training.batch_size 2.0 +29 10 training.label_smoothing 0.016248739443758178 +29 11 model.embedding_dim 0.0 +29 11 optimizer.lr 0.0038859561029746274 +29 11 training.batch_size 2.0 +29 11 training.label_smoothing 0.40046613772317313 +29 12 model.embedding_dim 2.0 +29 12 optimizer.lr 0.005313084986802617 +29 12 training.batch_size 0.0 +29 12 training.label_smoothing 0.0011234465479574302 +29 13 model.embedding_dim 2.0 +29 13 optimizer.lr 0.006444201066141011 +29 13 training.batch_size 2.0 +29 13 training.label_smoothing 0.01189161132980017 +29 14 model.embedding_dim 2.0 +29 14 optimizer.lr 0.011527116969228642 +29 14 training.batch_size 0.0 +29 14 training.label_smoothing 0.0010289784196486533 +29 15 model.embedding_dim 1.0 +29 15 optimizer.lr 0.057259645822406086 +29 15 training.batch_size 2.0 +29 15 training.label_smoothing 0.004507219964519285 +29 16 model.embedding_dim 0.0 +29 16 optimizer.lr 0.0037917547650339507 +29 16 training.batch_size 2.0 +29 16 training.label_smoothing 0.0019385084639086932 +29 17 model.embedding_dim 0.0 +29 17 optimizer.lr 0.001582560740772466 +29 17 training.batch_size 2.0 +29 17 training.label_smoothing 0.002615239841915183 +29 18 model.embedding_dim 0.0 +29 18 optimizer.lr 0.014776606056760134 +29 18 training.batch_size 2.0 +29 18 training.label_smoothing 0.19432565275425243 +29 19 model.embedding_dim 1.0 +29 19 optimizer.lr 0.07782320775560313 +29 19 training.batch_size 2.0 +29 19 training.label_smoothing 0.04005554900455721 +29 20 model.embedding_dim 0.0 +29 20 optimizer.lr 0.05864530716188622 +29 20 training.batch_size 1.0 +29 20 training.label_smoothing 0.008536505278504244 +29 21 model.embedding_dim 0.0 +29 21 optimizer.lr 0.09692381778633262 +29 21 training.batch_size 1.0 +29 21 training.label_smoothing 0.4161978103000176 +29 22 model.embedding_dim 1.0 +29 22 optimizer.lr 0.013387949767125555 +29 22 training.batch_size 0.0 +29 22 training.label_smoothing 0.670557027178993 +29 23 model.embedding_dim 0.0 +29 23 optimizer.lr 0.02102466647974571 +29 23 training.batch_size 2.0 +29 23 training.label_smoothing 0.0010777971892862372 +29 24 model.embedding_dim 0.0 +29 24 optimizer.lr 0.008258665270631765 +29 24 training.batch_size 1.0 +29 24 training.label_smoothing 0.5059225895965764 +29 25 model.embedding_dim 0.0 +29 25 optimizer.lr 0.004692941440058717 +29 25 training.batch_size 0.0 +29 25 training.label_smoothing 0.028975229410866995 +29 26 model.embedding_dim 0.0 +29 26 optimizer.lr 0.002432159665121559 +29 26 training.batch_size 1.0 +29 26 training.label_smoothing 0.06033748911088521 +29 27 model.embedding_dim 2.0 +29 27 optimizer.lr 0.0025969281517774167 +29 27 training.batch_size 2.0 +29 27 training.label_smoothing 0.7978211274321263 +29 28 model.embedding_dim 0.0 +29 28 optimizer.lr 0.03328918523712509 +29 28 training.batch_size 0.0 +29 28 training.label_smoothing 0.3566942419393335 +29 29 model.embedding_dim 0.0 +29 29 optimizer.lr 0.0017915378907429544 +29 29 training.batch_size 1.0 +29 29 training.label_smoothing 0.035899228593060405 +29 30 model.embedding_dim 0.0 +29 30 optimizer.lr 0.004861939050397308 +29 30 training.batch_size 1.0 +29 30 training.label_smoothing 0.04709151602780006 +29 31 model.embedding_dim 1.0 +29 31 optimizer.lr 0.05046818340365549 +29 31 training.batch_size 0.0 +29 31 training.label_smoothing 0.08711643506967612 +29 32 model.embedding_dim 1.0 +29 32 optimizer.lr 0.0011475892135790226 +29 32 training.batch_size 1.0 +29 32 training.label_smoothing 0.0038495629336141733 +29 33 model.embedding_dim 2.0 +29 33 optimizer.lr 0.009624025662197172 +29 33 training.batch_size 1.0 +29 33 training.label_smoothing 0.03126273505462075 +29 34 model.embedding_dim 1.0 +29 34 optimizer.lr 0.012994104376980828 +29 34 training.batch_size 0.0 +29 34 training.label_smoothing 0.2519029989249948 +29 35 model.embedding_dim 2.0 +29 35 optimizer.lr 0.05042360649667678 +29 35 training.batch_size 2.0 +29 35 training.label_smoothing 0.0013424484816942411 +29 36 model.embedding_dim 1.0 +29 36 optimizer.lr 0.004249676158371394 +29 36 training.batch_size 2.0 +29 36 training.label_smoothing 0.006441338009457517 +29 37 model.embedding_dim 2.0 +29 37 optimizer.lr 0.04974773795703087 +29 37 training.batch_size 0.0 +29 37 training.label_smoothing 0.0742287463898952 +29 38 model.embedding_dim 1.0 +29 38 optimizer.lr 0.09746192026596363 +29 38 training.batch_size 1.0 +29 38 training.label_smoothing 0.0015795569060010725 +29 39 model.embedding_dim 2.0 +29 39 optimizer.lr 0.0033914929453223363 +29 39 training.batch_size 1.0 +29 39 training.label_smoothing 0.001156543715305987 +29 40 model.embedding_dim 0.0 +29 40 optimizer.lr 0.002040745630197959 +29 40 training.batch_size 2.0 +29 40 training.label_smoothing 0.1514677226086325 +29 41 model.embedding_dim 0.0 +29 41 optimizer.lr 0.019717716573436926 +29 41 training.batch_size 0.0 +29 41 training.label_smoothing 0.27761159067873614 +29 42 model.embedding_dim 0.0 +29 42 optimizer.lr 0.0010750386251474422 +29 42 training.batch_size 2.0 +29 42 training.label_smoothing 0.0022021653362493455 +29 43 model.embedding_dim 0.0 +29 43 optimizer.lr 0.04253646841357687 +29 43 training.batch_size 0.0 +29 43 training.label_smoothing 0.04521903676352898 +29 44 model.embedding_dim 2.0 +29 44 optimizer.lr 0.0021324991371218243 +29 44 training.batch_size 0.0 +29 44 training.label_smoothing 0.0017479804190002285 +29 45 model.embedding_dim 1.0 +29 45 optimizer.lr 0.018510320340911587 +29 45 training.batch_size 1.0 +29 45 training.label_smoothing 0.0011578791290132204 +29 46 model.embedding_dim 0.0 +29 46 optimizer.lr 0.0106708994966434 +29 46 training.batch_size 1.0 +29 46 training.label_smoothing 0.014324632689858693 +29 47 model.embedding_dim 0.0 +29 47 optimizer.lr 0.0012314119856083085 +29 47 training.batch_size 1.0 +29 47 training.label_smoothing 0.006408279352937835 +29 48 model.embedding_dim 2.0 +29 48 optimizer.lr 0.01786597913854448 +29 48 training.batch_size 1.0 +29 48 training.label_smoothing 0.24278958299594183 +29 49 model.embedding_dim 0.0 +29 49 optimizer.lr 0.015457765606145285 +29 49 training.batch_size 2.0 +29 49 training.label_smoothing 0.11287191538032794 +29 50 model.embedding_dim 1.0 +29 50 optimizer.lr 0.028826008724008076 +29 50 training.batch_size 0.0 +29 50 training.label_smoothing 0.3250768017465838 +29 51 model.embedding_dim 0.0 +29 51 optimizer.lr 0.0042704153571242265 +29 51 training.batch_size 2.0 +29 51 training.label_smoothing 0.08785753417769196 +29 52 model.embedding_dim 1.0 +29 52 optimizer.lr 0.00974981855827822 +29 52 training.batch_size 0.0 +29 52 training.label_smoothing 0.10765731371722463 +29 53 model.embedding_dim 0.0 +29 53 optimizer.lr 0.004747469093639727 +29 53 training.batch_size 1.0 +29 53 training.label_smoothing 0.022357840675116383 +29 54 model.embedding_dim 0.0 +29 54 optimizer.lr 0.0022330748339834134 +29 54 training.batch_size 1.0 +29 54 training.label_smoothing 0.031479619851781056 +29 55 model.embedding_dim 0.0 +29 55 optimizer.lr 0.006109992222209406 +29 55 training.batch_size 0.0 +29 55 training.label_smoothing 0.05534687248734272 +29 56 model.embedding_dim 2.0 +29 56 optimizer.lr 0.0010006337723403671 +29 56 training.batch_size 2.0 +29 56 training.label_smoothing 0.004337130016467095 +29 57 model.embedding_dim 1.0 +29 57 optimizer.lr 0.017567420888814345 +29 57 training.batch_size 1.0 +29 57 training.label_smoothing 0.0028323006422348025 +29 58 model.embedding_dim 1.0 +29 58 optimizer.lr 0.022524698871786507 +29 58 training.batch_size 1.0 +29 58 training.label_smoothing 0.2696744127193481 +29 59 model.embedding_dim 1.0 +29 59 optimizer.lr 0.002437922588322696 +29 59 training.batch_size 2.0 +29 59 training.label_smoothing 0.939812785813891 +29 60 model.embedding_dim 0.0 +29 60 optimizer.lr 0.00432559782388231 +29 60 training.batch_size 2.0 +29 60 training.label_smoothing 0.42300743904255855 +29 61 model.embedding_dim 0.0 +29 61 optimizer.lr 0.005278539459560616 +29 61 training.batch_size 2.0 +29 61 training.label_smoothing 0.0011804298937760492 +29 62 model.embedding_dim 2.0 +29 62 optimizer.lr 0.0012251489310505404 +29 62 training.batch_size 2.0 +29 62 training.label_smoothing 0.004341122629275378 +29 63 model.embedding_dim 0.0 +29 63 optimizer.lr 0.0027759310168173957 +29 63 training.batch_size 2.0 +29 63 training.label_smoothing 0.18619989549072347 +29 64 model.embedding_dim 0.0 +29 64 optimizer.lr 0.0014382994987099022 +29 64 training.batch_size 1.0 +29 64 training.label_smoothing 0.4846976729265064 +29 65 model.embedding_dim 0.0 +29 65 optimizer.lr 0.002373963141706005 +29 65 training.batch_size 2.0 +29 65 training.label_smoothing 0.005168520698689754 +29 66 model.embedding_dim 0.0 +29 66 optimizer.lr 0.013830001138589478 +29 66 training.batch_size 0.0 +29 66 training.label_smoothing 0.028754812874437513 +29 67 model.embedding_dim 1.0 +29 67 optimizer.lr 0.0045405503627293205 +29 67 training.batch_size 1.0 +29 67 training.label_smoothing 0.002846045794769808 +29 68 model.embedding_dim 1.0 +29 68 optimizer.lr 0.02286700327861432 +29 68 training.batch_size 2.0 +29 68 training.label_smoothing 0.3278167527464325 +29 69 model.embedding_dim 2.0 +29 69 optimizer.lr 0.005625246974092319 +29 69 training.batch_size 1.0 +29 69 training.label_smoothing 0.4925594798228647 +29 70 model.embedding_dim 0.0 +29 70 optimizer.lr 0.008213601429794888 +29 70 training.batch_size 0.0 +29 70 training.label_smoothing 0.005077546895952044 +29 71 model.embedding_dim 1.0 +29 71 optimizer.lr 0.0028500495684260943 +29 71 training.batch_size 2.0 +29 71 training.label_smoothing 0.012912703172338271 +29 72 model.embedding_dim 0.0 +29 72 optimizer.lr 0.0013171234431567785 +29 72 training.batch_size 1.0 +29 72 training.label_smoothing 0.0042244548337911185 +29 73 model.embedding_dim 1.0 +29 73 optimizer.lr 0.02463522239435029 +29 73 training.batch_size 1.0 +29 73 training.label_smoothing 0.03156889721978314 +29 74 model.embedding_dim 1.0 +29 74 optimizer.lr 0.040613035694241 +29 74 training.batch_size 1.0 +29 74 training.label_smoothing 0.0015853573777786447 +29 75 model.embedding_dim 2.0 +29 75 optimizer.lr 0.01892279549056661 +29 75 training.batch_size 0.0 +29 75 training.label_smoothing 0.10615159474689743 +29 76 model.embedding_dim 1.0 +29 76 optimizer.lr 0.004870425090336687 +29 76 training.batch_size 0.0 +29 76 training.label_smoothing 0.004464384624943992 +29 77 model.embedding_dim 2.0 +29 77 optimizer.lr 0.0013302698830666615 +29 77 training.batch_size 0.0 +29 77 training.label_smoothing 0.008030533514769867 +29 78 model.embedding_dim 1.0 +29 78 optimizer.lr 0.07129455569861139 +29 78 training.batch_size 0.0 +29 78 training.label_smoothing 0.0035630747627380874 +29 79 model.embedding_dim 2.0 +29 79 optimizer.lr 0.06272855954134844 +29 79 training.batch_size 0.0 +29 79 training.label_smoothing 0.0032054525926539547 +29 80 model.embedding_dim 1.0 +29 80 optimizer.lr 0.029145447042252748 +29 80 training.batch_size 0.0 +29 80 training.label_smoothing 0.02784755470495314 +29 81 model.embedding_dim 0.0 +29 81 optimizer.lr 0.016995320901335334 +29 81 training.batch_size 0.0 +29 81 training.label_smoothing 0.0017651690940639364 +29 82 model.embedding_dim 2.0 +29 82 optimizer.lr 0.0019077079558514393 +29 82 training.batch_size 0.0 +29 82 training.label_smoothing 0.5923742638464358 +29 83 model.embedding_dim 2.0 +29 83 optimizer.lr 0.0012155646032971001 +29 83 training.batch_size 1.0 +29 83 training.label_smoothing 0.021682544021965667 +29 84 model.embedding_dim 2.0 +29 84 optimizer.lr 0.011534313492184589 +29 84 training.batch_size 0.0 +29 84 training.label_smoothing 0.23293270340098396 +29 85 model.embedding_dim 0.0 +29 85 optimizer.lr 0.004946272254397475 +29 85 training.batch_size 0.0 +29 85 training.label_smoothing 0.03774394223681614 +29 86 model.embedding_dim 2.0 +29 86 optimizer.lr 0.01137138900641557 +29 86 training.batch_size 1.0 +29 86 training.label_smoothing 0.008368996341591169 +29 87 model.embedding_dim 1.0 +29 87 optimizer.lr 0.001372729221047578 +29 87 training.batch_size 0.0 +29 87 training.label_smoothing 0.02110452364553692 +29 88 model.embedding_dim 1.0 +29 88 optimizer.lr 0.003140389094637917 +29 88 training.batch_size 0.0 +29 88 training.label_smoothing 0.11259024120859072 +29 89 model.embedding_dim 1.0 +29 89 optimizer.lr 0.005043564131911154 +29 89 training.batch_size 1.0 +29 89 training.label_smoothing 0.014684613346259232 +29 90 model.embedding_dim 2.0 +29 90 optimizer.lr 0.0023348424056326127 +29 90 training.batch_size 2.0 +29 90 training.label_smoothing 0.05075483179122156 +29 91 model.embedding_dim 0.0 +29 91 optimizer.lr 0.03584790128010887 +29 91 training.batch_size 1.0 +29 91 training.label_smoothing 0.2862652245185877 +29 92 model.embedding_dim 1.0 +29 92 optimizer.lr 0.025121749324305085 +29 92 training.batch_size 0.0 +29 92 training.label_smoothing 0.5861028950030408 +29 93 model.embedding_dim 0.0 +29 93 optimizer.lr 0.010593798569731243 +29 93 training.batch_size 2.0 +29 93 training.label_smoothing 0.1036329127509465 +29 94 model.embedding_dim 1.0 +29 94 optimizer.lr 0.001639114230681237 +29 94 training.batch_size 0.0 +29 94 training.label_smoothing 0.509940186368972 +29 95 model.embedding_dim 1.0 +29 95 optimizer.lr 0.04172728774186108 +29 95 training.batch_size 0.0 +29 95 training.label_smoothing 0.019566351304568744 +29 96 model.embedding_dim 0.0 +29 96 optimizer.lr 0.045467298853168206 +29 96 training.batch_size 1.0 +29 96 training.label_smoothing 0.049998594151469546 +29 97 model.embedding_dim 1.0 +29 97 optimizer.lr 0.004546839039056779 +29 97 training.batch_size 0.0 +29 97 training.label_smoothing 0.7529312614253003 +29 98 model.embedding_dim 2.0 +29 98 optimizer.lr 0.0076570701300189056 +29 98 training.batch_size 2.0 +29 98 training.label_smoothing 0.0018744080243802144 +29 99 model.embedding_dim 1.0 +29 99 optimizer.lr 0.07524814299666822 +29 99 training.batch_size 1.0 +29 99 training.label_smoothing 0.6507574509853805 +29 100 model.embedding_dim 0.0 +29 100 optimizer.lr 0.0023023575344204494 +29 100 training.batch_size 1.0 +29 100 training.label_smoothing 0.08864821503875167 +29 1 dataset """kinships""" +29 1 model """complex""" +29 1 loss """softplus""" +29 1 regularizer """no""" +29 1 optimizer """adam""" +29 1 training_loop """lcwa""" +29 1 evaluator """rankbased""" +29 2 dataset """kinships""" +29 2 model """complex""" +29 2 loss """softplus""" +29 2 regularizer """no""" +29 2 optimizer """adam""" +29 2 training_loop """lcwa""" +29 2 evaluator """rankbased""" +29 3 dataset """kinships""" +29 3 model """complex""" +29 3 loss """softplus""" +29 3 regularizer """no""" +29 3 optimizer """adam""" +29 3 training_loop """lcwa""" +29 3 evaluator """rankbased""" +29 4 dataset """kinships""" +29 4 model """complex""" +29 4 loss """softplus""" +29 4 regularizer """no""" +29 4 optimizer """adam""" +29 4 training_loop """lcwa""" +29 4 evaluator """rankbased""" +29 5 dataset """kinships""" +29 5 model """complex""" +29 5 loss """softplus""" +29 5 regularizer """no""" +29 5 optimizer """adam""" +29 5 training_loop """lcwa""" +29 5 evaluator """rankbased""" +29 6 dataset """kinships""" +29 6 model """complex""" +29 6 loss """softplus""" +29 6 regularizer """no""" +29 6 optimizer """adam""" +29 6 training_loop """lcwa""" +29 6 evaluator """rankbased""" +29 7 dataset """kinships""" +29 7 model """complex""" +29 7 loss """softplus""" +29 7 regularizer """no""" +29 7 optimizer """adam""" +29 7 training_loop """lcwa""" +29 7 evaluator """rankbased""" +29 8 dataset """kinships""" +29 8 model """complex""" +29 8 loss """softplus""" +29 8 regularizer """no""" +29 8 optimizer """adam""" +29 8 training_loop """lcwa""" +29 8 evaluator """rankbased""" +29 9 dataset """kinships""" +29 9 model """complex""" +29 9 loss """softplus""" +29 9 regularizer """no""" +29 9 optimizer """adam""" +29 9 training_loop """lcwa""" +29 9 evaluator """rankbased""" +29 10 dataset """kinships""" +29 10 model """complex""" +29 10 loss """softplus""" +29 10 regularizer """no""" +29 10 optimizer """adam""" +29 10 training_loop """lcwa""" +29 10 evaluator """rankbased""" +29 11 dataset """kinships""" +29 11 model """complex""" +29 11 loss """softplus""" +29 11 regularizer """no""" +29 11 optimizer """adam""" +29 11 training_loop """lcwa""" +29 11 evaluator """rankbased""" +29 12 dataset """kinships""" +29 12 model """complex""" +29 12 loss """softplus""" +29 12 regularizer """no""" +29 12 optimizer """adam""" +29 12 training_loop """lcwa""" +29 12 evaluator """rankbased""" +29 13 dataset """kinships""" +29 13 model """complex""" +29 13 loss """softplus""" +29 13 regularizer """no""" +29 13 optimizer """adam""" +29 13 training_loop """lcwa""" +29 13 evaluator """rankbased""" +29 14 dataset """kinships""" +29 14 model """complex""" +29 14 loss """softplus""" +29 14 regularizer """no""" +29 14 optimizer """adam""" +29 14 training_loop """lcwa""" +29 14 evaluator """rankbased""" +29 15 dataset """kinships""" +29 15 model """complex""" +29 15 loss """softplus""" +29 15 regularizer """no""" +29 15 optimizer """adam""" +29 15 training_loop """lcwa""" +29 15 evaluator """rankbased""" +29 16 dataset """kinships""" +29 16 model """complex""" +29 16 loss """softplus""" +29 16 regularizer """no""" +29 16 optimizer """adam""" +29 16 training_loop """lcwa""" +29 16 evaluator """rankbased""" +29 17 dataset """kinships""" +29 17 model """complex""" +29 17 loss """softplus""" +29 17 regularizer """no""" +29 17 optimizer """adam""" +29 17 training_loop """lcwa""" +29 17 evaluator """rankbased""" +29 18 dataset """kinships""" +29 18 model """complex""" +29 18 loss """softplus""" +29 18 regularizer """no""" +29 18 optimizer """adam""" +29 18 training_loop """lcwa""" +29 18 evaluator """rankbased""" +29 19 dataset """kinships""" +29 19 model """complex""" +29 19 loss """softplus""" +29 19 regularizer """no""" +29 19 optimizer """adam""" +29 19 training_loop """lcwa""" +29 19 evaluator """rankbased""" +29 20 dataset """kinships""" +29 20 model """complex""" +29 20 loss """softplus""" +29 20 regularizer """no""" +29 20 optimizer """adam""" +29 20 training_loop """lcwa""" +29 20 evaluator """rankbased""" +29 21 dataset """kinships""" +29 21 model """complex""" +29 21 loss """softplus""" +29 21 regularizer """no""" +29 21 optimizer """adam""" +29 21 training_loop """lcwa""" +29 21 evaluator """rankbased""" +29 22 dataset """kinships""" +29 22 model """complex""" +29 22 loss """softplus""" +29 22 regularizer """no""" +29 22 optimizer """adam""" +29 22 training_loop """lcwa""" +29 22 evaluator """rankbased""" +29 23 dataset """kinships""" +29 23 model """complex""" +29 23 loss """softplus""" +29 23 regularizer """no""" +29 23 optimizer """adam""" +29 23 training_loop """lcwa""" +29 23 evaluator """rankbased""" +29 24 dataset """kinships""" +29 24 model """complex""" +29 24 loss """softplus""" +29 24 regularizer """no""" +29 24 optimizer """adam""" +29 24 training_loop """lcwa""" +29 24 evaluator """rankbased""" +29 25 dataset """kinships""" +29 25 model """complex""" +29 25 loss """softplus""" +29 25 regularizer """no""" +29 25 optimizer """adam""" +29 25 training_loop """lcwa""" +29 25 evaluator """rankbased""" +29 26 dataset """kinships""" +29 26 model """complex""" +29 26 loss """softplus""" +29 26 regularizer """no""" +29 26 optimizer """adam""" +29 26 training_loop """lcwa""" +29 26 evaluator """rankbased""" +29 27 dataset """kinships""" +29 27 model """complex""" +29 27 loss """softplus""" +29 27 regularizer """no""" +29 27 optimizer """adam""" +29 27 training_loop """lcwa""" +29 27 evaluator """rankbased""" +29 28 dataset """kinships""" +29 28 model """complex""" +29 28 loss """softplus""" +29 28 regularizer """no""" +29 28 optimizer """adam""" +29 28 training_loop """lcwa""" +29 28 evaluator """rankbased""" +29 29 dataset """kinships""" +29 29 model """complex""" +29 29 loss """softplus""" +29 29 regularizer """no""" +29 29 optimizer """adam""" +29 29 training_loop """lcwa""" +29 29 evaluator """rankbased""" +29 30 dataset """kinships""" +29 30 model """complex""" +29 30 loss """softplus""" +29 30 regularizer """no""" +29 30 optimizer """adam""" +29 30 training_loop """lcwa""" +29 30 evaluator """rankbased""" +29 31 dataset """kinships""" +29 31 model """complex""" +29 31 loss """softplus""" +29 31 regularizer """no""" +29 31 optimizer """adam""" +29 31 training_loop """lcwa""" +29 31 evaluator """rankbased""" +29 32 dataset """kinships""" +29 32 model """complex""" +29 32 loss """softplus""" +29 32 regularizer """no""" +29 32 optimizer """adam""" +29 32 training_loop """lcwa""" +29 32 evaluator """rankbased""" +29 33 dataset """kinships""" +29 33 model """complex""" +29 33 loss """softplus""" +29 33 regularizer """no""" +29 33 optimizer """adam""" +29 33 training_loop """lcwa""" +29 33 evaluator """rankbased""" +29 34 dataset """kinships""" +29 34 model """complex""" +29 34 loss """softplus""" +29 34 regularizer """no""" +29 34 optimizer """adam""" +29 34 training_loop """lcwa""" +29 34 evaluator """rankbased""" +29 35 dataset """kinships""" +29 35 model """complex""" +29 35 loss """softplus""" +29 35 regularizer """no""" +29 35 optimizer """adam""" +29 35 training_loop """lcwa""" +29 35 evaluator """rankbased""" +29 36 dataset """kinships""" +29 36 model """complex""" +29 36 loss """softplus""" +29 36 regularizer """no""" +29 36 optimizer """adam""" +29 36 training_loop """lcwa""" +29 36 evaluator """rankbased""" +29 37 dataset """kinships""" +29 37 model """complex""" +29 37 loss """softplus""" +29 37 regularizer """no""" +29 37 optimizer """adam""" +29 37 training_loop """lcwa""" +29 37 evaluator """rankbased""" +29 38 dataset """kinships""" +29 38 model """complex""" +29 38 loss """softplus""" +29 38 regularizer """no""" +29 38 optimizer """adam""" +29 38 training_loop """lcwa""" +29 38 evaluator """rankbased""" +29 39 dataset """kinships""" +29 39 model """complex""" +29 39 loss """softplus""" +29 39 regularizer """no""" +29 39 optimizer """adam""" +29 39 training_loop """lcwa""" +29 39 evaluator """rankbased""" +29 40 dataset """kinships""" +29 40 model """complex""" +29 40 loss """softplus""" +29 40 regularizer """no""" +29 40 optimizer """adam""" +29 40 training_loop """lcwa""" +29 40 evaluator """rankbased""" +29 41 dataset """kinships""" +29 41 model """complex""" +29 41 loss """softplus""" +29 41 regularizer """no""" +29 41 optimizer """adam""" +29 41 training_loop """lcwa""" +29 41 evaluator """rankbased""" +29 42 dataset """kinships""" +29 42 model """complex""" +29 42 loss """softplus""" +29 42 regularizer """no""" +29 42 optimizer """adam""" +29 42 training_loop """lcwa""" +29 42 evaluator """rankbased""" +29 43 dataset """kinships""" +29 43 model """complex""" +29 43 loss """softplus""" +29 43 regularizer """no""" +29 43 optimizer """adam""" +29 43 training_loop """lcwa""" +29 43 evaluator """rankbased""" +29 44 dataset """kinships""" +29 44 model """complex""" +29 44 loss """softplus""" +29 44 regularizer """no""" +29 44 optimizer """adam""" +29 44 training_loop """lcwa""" +29 44 evaluator """rankbased""" +29 45 dataset """kinships""" +29 45 model """complex""" +29 45 loss """softplus""" +29 45 regularizer """no""" +29 45 optimizer """adam""" +29 45 training_loop """lcwa""" +29 45 evaluator """rankbased""" +29 46 dataset """kinships""" +29 46 model """complex""" +29 46 loss """softplus""" +29 46 regularizer """no""" +29 46 optimizer """adam""" +29 46 training_loop """lcwa""" +29 46 evaluator """rankbased""" +29 47 dataset """kinships""" +29 47 model """complex""" +29 47 loss """softplus""" +29 47 regularizer """no""" +29 47 optimizer """adam""" +29 47 training_loop """lcwa""" +29 47 evaluator """rankbased""" +29 48 dataset """kinships""" +29 48 model """complex""" +29 48 loss """softplus""" +29 48 regularizer """no""" +29 48 optimizer """adam""" +29 48 training_loop """lcwa""" +29 48 evaluator """rankbased""" +29 49 dataset """kinships""" +29 49 model """complex""" +29 49 loss """softplus""" +29 49 regularizer """no""" +29 49 optimizer """adam""" +29 49 training_loop """lcwa""" +29 49 evaluator """rankbased""" +29 50 dataset """kinships""" +29 50 model """complex""" +29 50 loss """softplus""" +29 50 regularizer """no""" +29 50 optimizer """adam""" +29 50 training_loop """lcwa""" +29 50 evaluator """rankbased""" +29 51 dataset """kinships""" +29 51 model """complex""" +29 51 loss """softplus""" +29 51 regularizer """no""" +29 51 optimizer """adam""" +29 51 training_loop """lcwa""" +29 51 evaluator """rankbased""" +29 52 dataset """kinships""" +29 52 model """complex""" +29 52 loss """softplus""" +29 52 regularizer """no""" +29 52 optimizer """adam""" +29 52 training_loop """lcwa""" +29 52 evaluator """rankbased""" +29 53 dataset """kinships""" +29 53 model """complex""" +29 53 loss """softplus""" +29 53 regularizer """no""" +29 53 optimizer """adam""" +29 53 training_loop """lcwa""" +29 53 evaluator """rankbased""" +29 54 dataset """kinships""" +29 54 model """complex""" +29 54 loss """softplus""" +29 54 regularizer """no""" +29 54 optimizer """adam""" +29 54 training_loop """lcwa""" +29 54 evaluator """rankbased""" +29 55 dataset """kinships""" +29 55 model """complex""" +29 55 loss """softplus""" +29 55 regularizer """no""" +29 55 optimizer """adam""" +29 55 training_loop """lcwa""" +29 55 evaluator """rankbased""" +29 56 dataset """kinships""" +29 56 model """complex""" +29 56 loss """softplus""" +29 56 regularizer """no""" +29 56 optimizer """adam""" +29 56 training_loop """lcwa""" +29 56 evaluator """rankbased""" +29 57 dataset """kinships""" +29 57 model """complex""" +29 57 loss """softplus""" +29 57 regularizer """no""" +29 57 optimizer """adam""" +29 57 training_loop """lcwa""" +29 57 evaluator """rankbased""" +29 58 dataset """kinships""" +29 58 model """complex""" +29 58 loss """softplus""" +29 58 regularizer """no""" +29 58 optimizer """adam""" +29 58 training_loop """lcwa""" +29 58 evaluator """rankbased""" +29 59 dataset """kinships""" +29 59 model """complex""" +29 59 loss """softplus""" +29 59 regularizer """no""" +29 59 optimizer """adam""" +29 59 training_loop """lcwa""" +29 59 evaluator """rankbased""" +29 60 dataset """kinships""" +29 60 model """complex""" +29 60 loss """softplus""" +29 60 regularizer """no""" +29 60 optimizer """adam""" +29 60 training_loop """lcwa""" +29 60 evaluator """rankbased""" +29 61 dataset """kinships""" +29 61 model """complex""" +29 61 loss """softplus""" +29 61 regularizer """no""" +29 61 optimizer """adam""" +29 61 training_loop """lcwa""" +29 61 evaluator """rankbased""" +29 62 dataset """kinships""" +29 62 model """complex""" +29 62 loss """softplus""" +29 62 regularizer """no""" +29 62 optimizer """adam""" +29 62 training_loop """lcwa""" +29 62 evaluator """rankbased""" +29 63 dataset """kinships""" +29 63 model """complex""" +29 63 loss """softplus""" +29 63 regularizer """no""" +29 63 optimizer """adam""" +29 63 training_loop """lcwa""" +29 63 evaluator """rankbased""" +29 64 dataset """kinships""" +29 64 model """complex""" +29 64 loss """softplus""" +29 64 regularizer """no""" +29 64 optimizer """adam""" +29 64 training_loop """lcwa""" +29 64 evaluator """rankbased""" +29 65 dataset """kinships""" +29 65 model """complex""" +29 65 loss """softplus""" +29 65 regularizer """no""" +29 65 optimizer """adam""" +29 65 training_loop """lcwa""" +29 65 evaluator """rankbased""" +29 66 dataset """kinships""" +29 66 model """complex""" +29 66 loss """softplus""" +29 66 regularizer """no""" +29 66 optimizer """adam""" +29 66 training_loop """lcwa""" +29 66 evaluator """rankbased""" +29 67 dataset """kinships""" +29 67 model """complex""" +29 67 loss """softplus""" +29 67 regularizer """no""" +29 67 optimizer """adam""" +29 67 training_loop """lcwa""" +29 67 evaluator """rankbased""" +29 68 dataset """kinships""" +29 68 model """complex""" +29 68 loss """softplus""" +29 68 regularizer """no""" +29 68 optimizer """adam""" +29 68 training_loop """lcwa""" +29 68 evaluator """rankbased""" +29 69 dataset """kinships""" +29 69 model """complex""" +29 69 loss """softplus""" +29 69 regularizer """no""" +29 69 optimizer """adam""" +29 69 training_loop """lcwa""" +29 69 evaluator """rankbased""" +29 70 dataset """kinships""" +29 70 model """complex""" +29 70 loss """softplus""" +29 70 regularizer """no""" +29 70 optimizer """adam""" +29 70 training_loop """lcwa""" +29 70 evaluator """rankbased""" +29 71 dataset """kinships""" +29 71 model """complex""" +29 71 loss """softplus""" +29 71 regularizer """no""" +29 71 optimizer """adam""" +29 71 training_loop """lcwa""" +29 71 evaluator """rankbased""" +29 72 dataset """kinships""" +29 72 model """complex""" +29 72 loss """softplus""" +29 72 regularizer """no""" +29 72 optimizer """adam""" +29 72 training_loop """lcwa""" +29 72 evaluator """rankbased""" +29 73 dataset """kinships""" +29 73 model """complex""" +29 73 loss """softplus""" +29 73 regularizer """no""" +29 73 optimizer """adam""" +29 73 training_loop """lcwa""" +29 73 evaluator """rankbased""" +29 74 dataset """kinships""" +29 74 model """complex""" +29 74 loss """softplus""" +29 74 regularizer """no""" +29 74 optimizer """adam""" +29 74 training_loop """lcwa""" +29 74 evaluator """rankbased""" +29 75 dataset """kinships""" +29 75 model """complex""" +29 75 loss """softplus""" +29 75 regularizer """no""" +29 75 optimizer """adam""" +29 75 training_loop """lcwa""" +29 75 evaluator """rankbased""" +29 76 dataset """kinships""" +29 76 model """complex""" +29 76 loss """softplus""" +29 76 regularizer """no""" +29 76 optimizer """adam""" +29 76 training_loop """lcwa""" +29 76 evaluator """rankbased""" +29 77 dataset """kinships""" +29 77 model """complex""" +29 77 loss """softplus""" +29 77 regularizer """no""" +29 77 optimizer """adam""" +29 77 training_loop """lcwa""" +29 77 evaluator """rankbased""" +29 78 dataset """kinships""" +29 78 model """complex""" +29 78 loss """softplus""" +29 78 regularizer """no""" +29 78 optimizer """adam""" +29 78 training_loop """lcwa""" +29 78 evaluator """rankbased""" +29 79 dataset """kinships""" +29 79 model """complex""" +29 79 loss """softplus""" +29 79 regularizer """no""" +29 79 optimizer """adam""" +29 79 training_loop """lcwa""" +29 79 evaluator """rankbased""" +29 80 dataset """kinships""" +29 80 model """complex""" +29 80 loss """softplus""" +29 80 regularizer """no""" +29 80 optimizer """adam""" +29 80 training_loop """lcwa""" +29 80 evaluator """rankbased""" +29 81 dataset """kinships""" +29 81 model """complex""" +29 81 loss """softplus""" +29 81 regularizer """no""" +29 81 optimizer """adam""" +29 81 training_loop """lcwa""" +29 81 evaluator """rankbased""" +29 82 dataset """kinships""" +29 82 model """complex""" +29 82 loss """softplus""" +29 82 regularizer """no""" +29 82 optimizer """adam""" +29 82 training_loop """lcwa""" +29 82 evaluator """rankbased""" +29 83 dataset """kinships""" +29 83 model """complex""" +29 83 loss """softplus""" +29 83 regularizer """no""" +29 83 optimizer """adam""" +29 83 training_loop """lcwa""" +29 83 evaluator """rankbased""" +29 84 dataset """kinships""" +29 84 model """complex""" +29 84 loss """softplus""" +29 84 regularizer """no""" +29 84 optimizer """adam""" +29 84 training_loop """lcwa""" +29 84 evaluator """rankbased""" +29 85 dataset """kinships""" +29 85 model """complex""" +29 85 loss """softplus""" +29 85 regularizer """no""" +29 85 optimizer """adam""" +29 85 training_loop """lcwa""" +29 85 evaluator """rankbased""" +29 86 dataset """kinships""" +29 86 model """complex""" +29 86 loss """softplus""" +29 86 regularizer """no""" +29 86 optimizer """adam""" +29 86 training_loop """lcwa""" +29 86 evaluator """rankbased""" +29 87 dataset """kinships""" +29 87 model """complex""" +29 87 loss """softplus""" +29 87 regularizer """no""" +29 87 optimizer """adam""" +29 87 training_loop """lcwa""" +29 87 evaluator """rankbased""" +29 88 dataset """kinships""" +29 88 model """complex""" +29 88 loss """softplus""" +29 88 regularizer """no""" +29 88 optimizer """adam""" +29 88 training_loop """lcwa""" +29 88 evaluator """rankbased""" +29 89 dataset """kinships""" +29 89 model """complex""" +29 89 loss """softplus""" +29 89 regularizer """no""" +29 89 optimizer """adam""" +29 89 training_loop """lcwa""" +29 89 evaluator """rankbased""" +29 90 dataset """kinships""" +29 90 model """complex""" +29 90 loss """softplus""" +29 90 regularizer """no""" +29 90 optimizer """adam""" +29 90 training_loop """lcwa""" +29 90 evaluator """rankbased""" +29 91 dataset """kinships""" +29 91 model """complex""" +29 91 loss """softplus""" +29 91 regularizer """no""" +29 91 optimizer """adam""" +29 91 training_loop """lcwa""" +29 91 evaluator """rankbased""" +29 92 dataset """kinships""" +29 92 model """complex""" +29 92 loss """softplus""" +29 92 regularizer """no""" +29 92 optimizer """adam""" +29 92 training_loop """lcwa""" +29 92 evaluator """rankbased""" +29 93 dataset """kinships""" +29 93 model """complex""" +29 93 loss """softplus""" +29 93 regularizer """no""" +29 93 optimizer """adam""" +29 93 training_loop """lcwa""" +29 93 evaluator """rankbased""" +29 94 dataset """kinships""" +29 94 model """complex""" +29 94 loss """softplus""" +29 94 regularizer """no""" +29 94 optimizer """adam""" +29 94 training_loop """lcwa""" +29 94 evaluator """rankbased""" +29 95 dataset """kinships""" +29 95 model """complex""" +29 95 loss """softplus""" +29 95 regularizer """no""" +29 95 optimizer """adam""" +29 95 training_loop """lcwa""" +29 95 evaluator """rankbased""" +29 96 dataset """kinships""" +29 96 model """complex""" +29 96 loss """softplus""" +29 96 regularizer """no""" +29 96 optimizer """adam""" +29 96 training_loop """lcwa""" +29 96 evaluator """rankbased""" +29 97 dataset """kinships""" +29 97 model """complex""" +29 97 loss """softplus""" +29 97 regularizer """no""" +29 97 optimizer """adam""" +29 97 training_loop """lcwa""" +29 97 evaluator """rankbased""" +29 98 dataset """kinships""" +29 98 model """complex""" +29 98 loss """softplus""" +29 98 regularizer """no""" +29 98 optimizer """adam""" +29 98 training_loop """lcwa""" +29 98 evaluator """rankbased""" +29 99 dataset """kinships""" +29 99 model """complex""" +29 99 loss """softplus""" +29 99 regularizer """no""" +29 99 optimizer """adam""" +29 99 training_loop """lcwa""" +29 99 evaluator """rankbased""" +29 100 dataset """kinships""" +29 100 model """complex""" +29 100 loss """softplus""" +29 100 regularizer """no""" +29 100 optimizer """adam""" +29 100 training_loop """lcwa""" +29 100 evaluator """rankbased""" +30 1 model.embedding_dim 1.0 +30 1 optimizer.lr 0.0027391025155623044 +30 1 training.batch_size 2.0 +30 1 training.label_smoothing 0.001658168511246355 +30 2 model.embedding_dim 2.0 +30 2 optimizer.lr 0.008296250039400174 +30 2 training.batch_size 1.0 +30 2 training.label_smoothing 0.0014762998634231996 +30 3 model.embedding_dim 1.0 +30 3 optimizer.lr 0.053853602506487054 +30 3 training.batch_size 0.0 +30 3 training.label_smoothing 0.28288685996057766 +30 4 model.embedding_dim 0.0 +30 4 optimizer.lr 0.002659483526101512 +30 4 training.batch_size 0.0 +30 4 training.label_smoothing 0.04929559885529786 +30 5 model.embedding_dim 0.0 +30 5 optimizer.lr 0.003213554389858828 +30 5 training.batch_size 2.0 +30 5 training.label_smoothing 0.22405340228782178 +30 6 model.embedding_dim 1.0 +30 6 optimizer.lr 0.019531516425826227 +30 6 training.batch_size 1.0 +30 6 training.label_smoothing 0.24659380149137813 +30 7 model.embedding_dim 1.0 +30 7 optimizer.lr 0.009730129857905855 +30 7 training.batch_size 2.0 +30 7 training.label_smoothing 0.7109131062198143 +30 8 model.embedding_dim 2.0 +30 8 optimizer.lr 0.0023271849682408386 +30 8 training.batch_size 1.0 +30 8 training.label_smoothing 0.2319867634629444 +30 9 model.embedding_dim 2.0 +30 9 optimizer.lr 0.06668209508357933 +30 9 training.batch_size 2.0 +30 9 training.label_smoothing 0.008984176313693512 +30 10 model.embedding_dim 0.0 +30 10 optimizer.lr 0.008200388827276237 +30 10 training.batch_size 0.0 +30 10 training.label_smoothing 0.02858974924877055 +30 11 model.embedding_dim 1.0 +30 11 optimizer.lr 0.010031494466573202 +30 11 training.batch_size 2.0 +30 11 training.label_smoothing 0.10483327489943625 +30 12 model.embedding_dim 2.0 +30 12 optimizer.lr 0.0020732399047095444 +30 12 training.batch_size 1.0 +30 12 training.label_smoothing 0.01070967949649588 +30 13 model.embedding_dim 0.0 +30 13 optimizer.lr 0.09218682840453475 +30 13 training.batch_size 0.0 +30 13 training.label_smoothing 0.12599795951513412 +30 14 model.embedding_dim 1.0 +30 14 optimizer.lr 0.004588983627939618 +30 14 training.batch_size 2.0 +30 14 training.label_smoothing 0.04282465924381676 +30 15 model.embedding_dim 0.0 +30 15 optimizer.lr 0.033494018366557854 +30 15 training.batch_size 0.0 +30 15 training.label_smoothing 0.0011864179147592676 +30 16 model.embedding_dim 2.0 +30 16 optimizer.lr 0.0016169934726265529 +30 16 training.batch_size 2.0 +30 16 training.label_smoothing 0.0031901093377959236 +30 17 model.embedding_dim 1.0 +30 17 optimizer.lr 0.001156276077983256 +30 17 training.batch_size 0.0 +30 17 training.label_smoothing 0.18776196007906817 +30 18 model.embedding_dim 1.0 +30 18 optimizer.lr 0.034265420032934826 +30 18 training.batch_size 1.0 +30 18 training.label_smoothing 0.20316990759779963 +30 19 model.embedding_dim 0.0 +30 19 optimizer.lr 0.07316967641732623 +30 19 training.batch_size 1.0 +30 19 training.label_smoothing 0.0015017326072613447 +30 20 model.embedding_dim 2.0 +30 20 optimizer.lr 0.01431754167014648 +30 20 training.batch_size 1.0 +30 20 training.label_smoothing 0.01585633923824761 +30 21 model.embedding_dim 2.0 +30 21 optimizer.lr 0.005039886678366898 +30 21 training.batch_size 2.0 +30 21 training.label_smoothing 0.004106353743024164 +30 22 model.embedding_dim 2.0 +30 22 optimizer.lr 0.011324389174680448 +30 22 training.batch_size 2.0 +30 22 training.label_smoothing 0.002271336292919403 +30 23 model.embedding_dim 1.0 +30 23 optimizer.lr 0.01858585466238418 +30 23 training.batch_size 1.0 +30 23 training.label_smoothing 0.0997551959149834 +30 24 model.embedding_dim 0.0 +30 24 optimizer.lr 0.08694157223917308 +30 24 training.batch_size 2.0 +30 24 training.label_smoothing 0.021634196687254883 +30 25 model.embedding_dim 2.0 +30 25 optimizer.lr 0.0058877584096182255 +30 25 training.batch_size 2.0 +30 25 training.label_smoothing 0.9173583568536673 +30 26 model.embedding_dim 1.0 +30 26 optimizer.lr 0.0021208541710662897 +30 26 training.batch_size 1.0 +30 26 training.label_smoothing 0.6530851432819623 +30 27 model.embedding_dim 0.0 +30 27 optimizer.lr 0.005147562493695492 +30 27 training.batch_size 1.0 +30 27 training.label_smoothing 0.006328239769596846 +30 28 model.embedding_dim 0.0 +30 28 optimizer.lr 0.007909329481885113 +30 28 training.batch_size 1.0 +30 28 training.label_smoothing 0.04529417136254793 +30 29 model.embedding_dim 1.0 +30 29 optimizer.lr 0.0077562430004681155 +30 29 training.batch_size 0.0 +30 29 training.label_smoothing 0.0011658537473689273 +30 30 model.embedding_dim 0.0 +30 30 optimizer.lr 0.0029555125147151824 +30 30 training.batch_size 0.0 +30 30 training.label_smoothing 0.025275878546763244 +30 31 model.embedding_dim 2.0 +30 31 optimizer.lr 0.02956108198753195 +30 31 training.batch_size 1.0 +30 31 training.label_smoothing 0.0015923676131004388 +30 32 model.embedding_dim 2.0 +30 32 optimizer.lr 0.015430702513441122 +30 32 training.batch_size 0.0 +30 32 training.label_smoothing 0.2697775626526628 +30 33 model.embedding_dim 2.0 +30 33 optimizer.lr 0.015023904681824332 +30 33 training.batch_size 2.0 +30 33 training.label_smoothing 0.025059786125703816 +30 34 model.embedding_dim 1.0 +30 34 optimizer.lr 0.00995985406348563 +30 34 training.batch_size 0.0 +30 34 training.label_smoothing 0.0016174057756579493 +30 35 model.embedding_dim 1.0 +30 35 optimizer.lr 0.0068762335042951185 +30 35 training.batch_size 1.0 +30 35 training.label_smoothing 0.004760425024132216 +30 36 model.embedding_dim 0.0 +30 36 optimizer.lr 0.0067723056438057086 +30 36 training.batch_size 0.0 +30 36 training.label_smoothing 0.0031533211240621787 +30 37 model.embedding_dim 1.0 +30 37 optimizer.lr 0.0014230217862453338 +30 37 training.batch_size 1.0 +30 37 training.label_smoothing 0.050791316572609255 +30 38 model.embedding_dim 0.0 +30 38 optimizer.lr 0.006811887764914433 +30 38 training.batch_size 0.0 +30 38 training.label_smoothing 0.014854479226435529 +30 39 model.embedding_dim 2.0 +30 39 optimizer.lr 0.09456283475500968 +30 39 training.batch_size 1.0 +30 39 training.label_smoothing 0.0037254641486994104 +30 40 model.embedding_dim 1.0 +30 40 optimizer.lr 0.04028504355162233 +30 40 training.batch_size 2.0 +30 40 training.label_smoothing 0.435959628964588 +30 41 model.embedding_dim 1.0 +30 41 optimizer.lr 0.01931219156437254 +30 41 training.batch_size 0.0 +30 41 training.label_smoothing 0.002517267370042408 +30 42 model.embedding_dim 1.0 +30 42 optimizer.lr 0.0022656988612203867 +30 42 training.batch_size 2.0 +30 42 training.label_smoothing 0.0013388863451649917 +30 43 model.embedding_dim 0.0 +30 43 optimizer.lr 0.0018314651933574572 +30 43 training.batch_size 2.0 +30 43 training.label_smoothing 0.2821352736005013 +30 44 model.embedding_dim 2.0 +30 44 optimizer.lr 0.002172168736066482 +30 44 training.batch_size 0.0 +30 44 training.label_smoothing 0.19018159713126223 +30 45 model.embedding_dim 2.0 +30 45 optimizer.lr 0.042879646278650846 +30 45 training.batch_size 1.0 +30 45 training.label_smoothing 0.024454536369672885 +30 46 model.embedding_dim 1.0 +30 46 optimizer.lr 0.06763543512699728 +30 46 training.batch_size 2.0 +30 46 training.label_smoothing 0.014700085903664466 +30 47 model.embedding_dim 2.0 +30 47 optimizer.lr 0.010840137613977431 +30 47 training.batch_size 0.0 +30 47 training.label_smoothing 0.06702814080155728 +30 48 model.embedding_dim 0.0 +30 48 optimizer.lr 0.05165061963020849 +30 48 training.batch_size 1.0 +30 48 training.label_smoothing 0.01376464689916058 +30 49 model.embedding_dim 2.0 +30 49 optimizer.lr 0.017377124474868638 +30 49 training.batch_size 0.0 +30 49 training.label_smoothing 0.00980342684698654 +30 50 model.embedding_dim 0.0 +30 50 optimizer.lr 0.0017967481176400033 +30 50 training.batch_size 1.0 +30 50 training.label_smoothing 0.04074806123543595 +30 51 model.embedding_dim 2.0 +30 51 optimizer.lr 0.0017164534186709078 +30 51 training.batch_size 2.0 +30 51 training.label_smoothing 0.6890991915474941 +30 52 model.embedding_dim 2.0 +30 52 optimizer.lr 0.02005860267458959 +30 52 training.batch_size 2.0 +30 52 training.label_smoothing 0.27985972072452714 +30 53 model.embedding_dim 2.0 +30 53 optimizer.lr 0.004280281144665816 +30 53 training.batch_size 1.0 +30 53 training.label_smoothing 0.0011731610120135675 +30 54 model.embedding_dim 0.0 +30 54 optimizer.lr 0.0012459758632978372 +30 54 training.batch_size 0.0 +30 54 training.label_smoothing 0.4465124294848025 +30 55 model.embedding_dim 1.0 +30 55 optimizer.lr 0.003434042134467379 +30 55 training.batch_size 2.0 +30 55 training.label_smoothing 0.0031704114327980116 +30 56 model.embedding_dim 0.0 +30 56 optimizer.lr 0.0011071897296431048 +30 56 training.batch_size 1.0 +30 56 training.label_smoothing 0.01725804194250692 +30 57 model.embedding_dim 0.0 +30 57 optimizer.lr 0.009448355307212603 +30 57 training.batch_size 2.0 +30 57 training.label_smoothing 0.008591370066384964 +30 58 model.embedding_dim 0.0 +30 58 optimizer.lr 0.017785608811003092 +30 58 training.batch_size 2.0 +30 58 training.label_smoothing 0.001553876178070771 +30 59 model.embedding_dim 0.0 +30 59 optimizer.lr 0.0055719122549100445 +30 59 training.batch_size 0.0 +30 59 training.label_smoothing 0.050064315731668904 +30 60 model.embedding_dim 2.0 +30 60 optimizer.lr 0.004341210918356349 +30 60 training.batch_size 0.0 +30 60 training.label_smoothing 0.327644655351155 +30 61 model.embedding_dim 1.0 +30 61 optimizer.lr 0.001601339621589068 +30 61 training.batch_size 0.0 +30 61 training.label_smoothing 0.030209891702870462 +30 62 model.embedding_dim 0.0 +30 62 optimizer.lr 0.003023158538839203 +30 62 training.batch_size 1.0 +30 62 training.label_smoothing 0.07057213060989315 +30 63 model.embedding_dim 2.0 +30 63 optimizer.lr 0.007777587277316536 +30 63 training.batch_size 0.0 +30 63 training.label_smoothing 0.04514228366314604 +30 64 model.embedding_dim 1.0 +30 64 optimizer.lr 0.01602613881958139 +30 64 training.batch_size 0.0 +30 64 training.label_smoothing 0.003283058581417611 +30 65 model.embedding_dim 0.0 +30 65 optimizer.lr 0.006720433839684553 +30 65 training.batch_size 1.0 +30 65 training.label_smoothing 0.03646128914720089 +30 66 model.embedding_dim 1.0 +30 66 optimizer.lr 0.01788436524391816 +30 66 training.batch_size 0.0 +30 66 training.label_smoothing 0.0354990202199536 +30 67 model.embedding_dim 0.0 +30 67 optimizer.lr 0.0071035978982384555 +30 67 training.batch_size 2.0 +30 67 training.label_smoothing 0.361127538626334 +30 68 model.embedding_dim 2.0 +30 68 optimizer.lr 0.0022932975499618587 +30 68 training.batch_size 0.0 +30 68 training.label_smoothing 0.76091340946501 +30 69 model.embedding_dim 0.0 +30 69 optimizer.lr 0.012336513368320885 +30 69 training.batch_size 1.0 +30 69 training.label_smoothing 0.033565124234983916 +30 70 model.embedding_dim 0.0 +30 70 optimizer.lr 0.007811430658126363 +30 70 training.batch_size 1.0 +30 70 training.label_smoothing 0.30658708629634573 +30 71 model.embedding_dim 0.0 +30 71 optimizer.lr 0.02361194884058237 +30 71 training.batch_size 1.0 +30 71 training.label_smoothing 0.03196474104515079 +30 72 model.embedding_dim 0.0 +30 72 optimizer.lr 0.002809240507321888 +30 72 training.batch_size 2.0 +30 72 training.label_smoothing 0.03407766097427147 +30 73 model.embedding_dim 0.0 +30 73 optimizer.lr 0.014555019629055282 +30 73 training.batch_size 2.0 +30 73 training.label_smoothing 0.012464908874860205 +30 74 model.embedding_dim 0.0 +30 74 optimizer.lr 0.023578127705780657 +30 74 training.batch_size 2.0 +30 74 training.label_smoothing 0.016610483753941228 +30 75 model.embedding_dim 2.0 +30 75 optimizer.lr 0.005724394063295254 +30 75 training.batch_size 0.0 +30 75 training.label_smoothing 0.09256102207997051 +30 76 model.embedding_dim 1.0 +30 76 optimizer.lr 0.02019317973332283 +30 76 training.batch_size 0.0 +30 76 training.label_smoothing 0.09481775680918159 +30 77 model.embedding_dim 1.0 +30 77 optimizer.lr 0.013228427984919886 +30 77 training.batch_size 2.0 +30 77 training.label_smoothing 0.06801073508709175 +30 78 model.embedding_dim 1.0 +30 78 optimizer.lr 0.03327049446483261 +30 78 training.batch_size 2.0 +30 78 training.label_smoothing 0.02885843855753249 +30 79 model.embedding_dim 0.0 +30 79 optimizer.lr 0.01307627723689202 +30 79 training.batch_size 0.0 +30 79 training.label_smoothing 0.0014603775722804482 +30 80 model.embedding_dim 2.0 +30 80 optimizer.lr 0.09633997888862571 +30 80 training.batch_size 1.0 +30 80 training.label_smoothing 0.0023170825780213203 +30 81 model.embedding_dim 1.0 +30 81 optimizer.lr 0.001307674611337499 +30 81 training.batch_size 0.0 +30 81 training.label_smoothing 0.011043654629232702 +30 82 model.embedding_dim 0.0 +30 82 optimizer.lr 0.0022785700472124693 +30 82 training.batch_size 2.0 +30 82 training.label_smoothing 0.07786733084471809 +30 83 model.embedding_dim 2.0 +30 83 optimizer.lr 0.0027876377069245627 +30 83 training.batch_size 0.0 +30 83 training.label_smoothing 0.012298394355448645 +30 84 model.embedding_dim 1.0 +30 84 optimizer.lr 0.002773816144617397 +30 84 training.batch_size 1.0 +30 84 training.label_smoothing 0.0018174916520008598 +30 85 model.embedding_dim 1.0 +30 85 optimizer.lr 0.08901838868884424 +30 85 training.batch_size 1.0 +30 85 training.label_smoothing 0.36036078791850035 +30 86 model.embedding_dim 2.0 +30 86 optimizer.lr 0.00410827479248609 +30 86 training.batch_size 1.0 +30 86 training.label_smoothing 0.06434657973118382 +30 87 model.embedding_dim 0.0 +30 87 optimizer.lr 0.005492924347154794 +30 87 training.batch_size 0.0 +30 87 training.label_smoothing 0.165366986789398 +30 88 model.embedding_dim 0.0 +30 88 optimizer.lr 0.0035226887044687814 +30 88 training.batch_size 2.0 +30 88 training.label_smoothing 0.013927337518323578 +30 89 model.embedding_dim 1.0 +30 89 optimizer.lr 0.04029288507876881 +30 89 training.batch_size 0.0 +30 89 training.label_smoothing 0.14819291310137828 +30 90 model.embedding_dim 1.0 +30 90 optimizer.lr 0.0907793988472945 +30 90 training.batch_size 1.0 +30 90 training.label_smoothing 0.0011431930090850702 +30 91 model.embedding_dim 1.0 +30 91 optimizer.lr 0.009469105705013607 +30 91 training.batch_size 1.0 +30 91 training.label_smoothing 0.007154017319825942 +30 92 model.embedding_dim 1.0 +30 92 optimizer.lr 0.0846617007512158 +30 92 training.batch_size 0.0 +30 92 training.label_smoothing 0.003130705748450064 +30 93 model.embedding_dim 1.0 +30 93 optimizer.lr 0.009308012302023471 +30 93 training.batch_size 1.0 +30 93 training.label_smoothing 0.03374144919953181 +30 94 model.embedding_dim 0.0 +30 94 optimizer.lr 0.022982726144879374 +30 94 training.batch_size 1.0 +30 94 training.label_smoothing 0.009060663095337314 +30 95 model.embedding_dim 1.0 +30 95 optimizer.lr 0.001012917956585776 +30 95 training.batch_size 0.0 +30 95 training.label_smoothing 0.006048227421378248 +30 96 model.embedding_dim 2.0 +30 96 optimizer.lr 0.020843853840623552 +30 96 training.batch_size 2.0 +30 96 training.label_smoothing 0.3822274522651744 +30 97 model.embedding_dim 1.0 +30 97 optimizer.lr 0.021090878724366102 +30 97 training.batch_size 2.0 +30 97 training.label_smoothing 0.5827162988219906 +30 98 model.embedding_dim 0.0 +30 98 optimizer.lr 0.0012989362325255107 +30 98 training.batch_size 1.0 +30 98 training.label_smoothing 0.00907247129264978 +30 99 model.embedding_dim 1.0 +30 99 optimizer.lr 0.00938958822013367 +30 99 training.batch_size 1.0 +30 99 training.label_smoothing 0.04761310655054523 +30 100 model.embedding_dim 1.0 +30 100 optimizer.lr 0.06404094183748803 +30 100 training.batch_size 0.0 +30 100 training.label_smoothing 0.5408331476641873 +30 1 dataset """kinships""" +30 1 model """complex""" +30 1 loss """bceaftersigmoid""" +30 1 regularizer """no""" +30 1 optimizer """adam""" +30 1 training_loop """lcwa""" +30 1 evaluator """rankbased""" +30 2 dataset """kinships""" +30 2 model """complex""" +30 2 loss """bceaftersigmoid""" +30 2 regularizer """no""" +30 2 optimizer """adam""" +30 2 training_loop """lcwa""" +30 2 evaluator """rankbased""" +30 3 dataset """kinships""" +30 3 model """complex""" +30 3 loss """bceaftersigmoid""" +30 3 regularizer """no""" +30 3 optimizer """adam""" +30 3 training_loop """lcwa""" +30 3 evaluator """rankbased""" +30 4 dataset """kinships""" +30 4 model """complex""" +30 4 loss """bceaftersigmoid""" +30 4 regularizer """no""" +30 4 optimizer """adam""" +30 4 training_loop """lcwa""" +30 4 evaluator """rankbased""" +30 5 dataset """kinships""" +30 5 model """complex""" +30 5 loss """bceaftersigmoid""" +30 5 regularizer """no""" +30 5 optimizer """adam""" +30 5 training_loop """lcwa""" +30 5 evaluator """rankbased""" +30 6 dataset """kinships""" +30 6 model """complex""" +30 6 loss """bceaftersigmoid""" +30 6 regularizer """no""" +30 6 optimizer """adam""" +30 6 training_loop """lcwa""" +30 6 evaluator """rankbased""" +30 7 dataset """kinships""" +30 7 model """complex""" +30 7 loss """bceaftersigmoid""" +30 7 regularizer """no""" +30 7 optimizer """adam""" +30 7 training_loop """lcwa""" +30 7 evaluator """rankbased""" +30 8 dataset """kinships""" +30 8 model """complex""" +30 8 loss """bceaftersigmoid""" +30 8 regularizer """no""" +30 8 optimizer """adam""" +30 8 training_loop """lcwa""" +30 8 evaluator """rankbased""" +30 9 dataset """kinships""" +30 9 model """complex""" +30 9 loss """bceaftersigmoid""" +30 9 regularizer """no""" +30 9 optimizer """adam""" +30 9 training_loop """lcwa""" +30 9 evaluator """rankbased""" +30 10 dataset """kinships""" +30 10 model """complex""" +30 10 loss """bceaftersigmoid""" +30 10 regularizer """no""" +30 10 optimizer """adam""" +30 10 training_loop """lcwa""" +30 10 evaluator """rankbased""" +30 11 dataset """kinships""" +30 11 model """complex""" +30 11 loss """bceaftersigmoid""" +30 11 regularizer """no""" +30 11 optimizer """adam""" +30 11 training_loop """lcwa""" +30 11 evaluator """rankbased""" +30 12 dataset """kinships""" +30 12 model """complex""" +30 12 loss """bceaftersigmoid""" +30 12 regularizer """no""" +30 12 optimizer """adam""" +30 12 training_loop """lcwa""" +30 12 evaluator """rankbased""" +30 13 dataset """kinships""" +30 13 model """complex""" +30 13 loss """bceaftersigmoid""" +30 13 regularizer """no""" +30 13 optimizer """adam""" +30 13 training_loop """lcwa""" +30 13 evaluator """rankbased""" +30 14 dataset """kinships""" +30 14 model """complex""" +30 14 loss """bceaftersigmoid""" +30 14 regularizer """no""" +30 14 optimizer """adam""" +30 14 training_loop """lcwa""" +30 14 evaluator """rankbased""" +30 15 dataset """kinships""" +30 15 model """complex""" +30 15 loss """bceaftersigmoid""" +30 15 regularizer """no""" +30 15 optimizer """adam""" +30 15 training_loop """lcwa""" +30 15 evaluator """rankbased""" +30 16 dataset """kinships""" +30 16 model """complex""" +30 16 loss """bceaftersigmoid""" +30 16 regularizer """no""" +30 16 optimizer """adam""" +30 16 training_loop """lcwa""" +30 16 evaluator """rankbased""" +30 17 dataset """kinships""" +30 17 model """complex""" +30 17 loss """bceaftersigmoid""" +30 17 regularizer """no""" +30 17 optimizer """adam""" +30 17 training_loop """lcwa""" +30 17 evaluator """rankbased""" +30 18 dataset """kinships""" +30 18 model """complex""" +30 18 loss """bceaftersigmoid""" +30 18 regularizer """no""" +30 18 optimizer """adam""" +30 18 training_loop """lcwa""" +30 18 evaluator """rankbased""" +30 19 dataset """kinships""" +30 19 model """complex""" +30 19 loss """bceaftersigmoid""" +30 19 regularizer """no""" +30 19 optimizer """adam""" +30 19 training_loop """lcwa""" +30 19 evaluator """rankbased""" +30 20 dataset """kinships""" +30 20 model """complex""" +30 20 loss """bceaftersigmoid""" +30 20 regularizer """no""" +30 20 optimizer """adam""" +30 20 training_loop """lcwa""" +30 20 evaluator """rankbased""" +30 21 dataset """kinships""" +30 21 model """complex""" +30 21 loss """bceaftersigmoid""" +30 21 regularizer """no""" +30 21 optimizer """adam""" +30 21 training_loop """lcwa""" +30 21 evaluator """rankbased""" +30 22 dataset """kinships""" +30 22 model """complex""" +30 22 loss """bceaftersigmoid""" +30 22 regularizer """no""" +30 22 optimizer """adam""" +30 22 training_loop """lcwa""" +30 22 evaluator """rankbased""" +30 23 dataset """kinships""" +30 23 model """complex""" +30 23 loss """bceaftersigmoid""" +30 23 regularizer """no""" +30 23 optimizer """adam""" +30 23 training_loop """lcwa""" +30 23 evaluator """rankbased""" +30 24 dataset """kinships""" +30 24 model """complex""" +30 24 loss """bceaftersigmoid""" +30 24 regularizer """no""" +30 24 optimizer """adam""" +30 24 training_loop """lcwa""" +30 24 evaluator """rankbased""" +30 25 dataset """kinships""" +30 25 model """complex""" +30 25 loss """bceaftersigmoid""" +30 25 regularizer """no""" +30 25 optimizer """adam""" +30 25 training_loop """lcwa""" +30 25 evaluator """rankbased""" +30 26 dataset """kinships""" +30 26 model """complex""" +30 26 loss """bceaftersigmoid""" +30 26 regularizer """no""" +30 26 optimizer """adam""" +30 26 training_loop """lcwa""" +30 26 evaluator """rankbased""" +30 27 dataset """kinships""" +30 27 model """complex""" +30 27 loss """bceaftersigmoid""" +30 27 regularizer """no""" +30 27 optimizer """adam""" +30 27 training_loop """lcwa""" +30 27 evaluator """rankbased""" +30 28 dataset """kinships""" +30 28 model """complex""" +30 28 loss """bceaftersigmoid""" +30 28 regularizer """no""" +30 28 optimizer """adam""" +30 28 training_loop """lcwa""" +30 28 evaluator """rankbased""" +30 29 dataset """kinships""" +30 29 model """complex""" +30 29 loss """bceaftersigmoid""" +30 29 regularizer """no""" +30 29 optimizer """adam""" +30 29 training_loop """lcwa""" +30 29 evaluator """rankbased""" +30 30 dataset """kinships""" +30 30 model """complex""" +30 30 loss """bceaftersigmoid""" +30 30 regularizer """no""" +30 30 optimizer """adam""" +30 30 training_loop """lcwa""" +30 30 evaluator """rankbased""" +30 31 dataset """kinships""" +30 31 model """complex""" +30 31 loss """bceaftersigmoid""" +30 31 regularizer """no""" +30 31 optimizer """adam""" +30 31 training_loop """lcwa""" +30 31 evaluator """rankbased""" +30 32 dataset """kinships""" +30 32 model """complex""" +30 32 loss """bceaftersigmoid""" +30 32 regularizer """no""" +30 32 optimizer """adam""" +30 32 training_loop """lcwa""" +30 32 evaluator """rankbased""" +30 33 dataset """kinships""" +30 33 model """complex""" +30 33 loss """bceaftersigmoid""" +30 33 regularizer """no""" +30 33 optimizer """adam""" +30 33 training_loop """lcwa""" +30 33 evaluator """rankbased""" +30 34 dataset """kinships""" +30 34 model """complex""" +30 34 loss """bceaftersigmoid""" +30 34 regularizer """no""" +30 34 optimizer """adam""" +30 34 training_loop """lcwa""" +30 34 evaluator """rankbased""" +30 35 dataset """kinships""" +30 35 model """complex""" +30 35 loss """bceaftersigmoid""" +30 35 regularizer """no""" +30 35 optimizer """adam""" +30 35 training_loop """lcwa""" +30 35 evaluator """rankbased""" +30 36 dataset """kinships""" +30 36 model """complex""" +30 36 loss """bceaftersigmoid""" +30 36 regularizer """no""" +30 36 optimizer """adam""" +30 36 training_loop """lcwa""" +30 36 evaluator """rankbased""" +30 37 dataset """kinships""" +30 37 model """complex""" +30 37 loss """bceaftersigmoid""" +30 37 regularizer """no""" +30 37 optimizer """adam""" +30 37 training_loop """lcwa""" +30 37 evaluator """rankbased""" +30 38 dataset """kinships""" +30 38 model """complex""" +30 38 loss """bceaftersigmoid""" +30 38 regularizer """no""" +30 38 optimizer """adam""" +30 38 training_loop """lcwa""" +30 38 evaluator """rankbased""" +30 39 dataset """kinships""" +30 39 model """complex""" +30 39 loss """bceaftersigmoid""" +30 39 regularizer """no""" +30 39 optimizer """adam""" +30 39 training_loop """lcwa""" +30 39 evaluator """rankbased""" +30 40 dataset """kinships""" +30 40 model """complex""" +30 40 loss """bceaftersigmoid""" +30 40 regularizer """no""" +30 40 optimizer """adam""" +30 40 training_loop """lcwa""" +30 40 evaluator """rankbased""" +30 41 dataset """kinships""" +30 41 model """complex""" +30 41 loss """bceaftersigmoid""" +30 41 regularizer """no""" +30 41 optimizer """adam""" +30 41 training_loop """lcwa""" +30 41 evaluator """rankbased""" +30 42 dataset """kinships""" +30 42 model """complex""" +30 42 loss """bceaftersigmoid""" +30 42 regularizer """no""" +30 42 optimizer """adam""" +30 42 training_loop """lcwa""" +30 42 evaluator """rankbased""" +30 43 dataset """kinships""" +30 43 model """complex""" +30 43 loss """bceaftersigmoid""" +30 43 regularizer """no""" +30 43 optimizer """adam""" +30 43 training_loop """lcwa""" +30 43 evaluator """rankbased""" +30 44 dataset """kinships""" +30 44 model """complex""" +30 44 loss """bceaftersigmoid""" +30 44 regularizer """no""" +30 44 optimizer """adam""" +30 44 training_loop """lcwa""" +30 44 evaluator """rankbased""" +30 45 dataset """kinships""" +30 45 model """complex""" +30 45 loss """bceaftersigmoid""" +30 45 regularizer """no""" +30 45 optimizer """adam""" +30 45 training_loop """lcwa""" +30 45 evaluator """rankbased""" +30 46 dataset """kinships""" +30 46 model """complex""" +30 46 loss """bceaftersigmoid""" +30 46 regularizer """no""" +30 46 optimizer """adam""" +30 46 training_loop """lcwa""" +30 46 evaluator """rankbased""" +30 47 dataset """kinships""" +30 47 model """complex""" +30 47 loss """bceaftersigmoid""" +30 47 regularizer """no""" +30 47 optimizer """adam""" +30 47 training_loop """lcwa""" +30 47 evaluator """rankbased""" +30 48 dataset """kinships""" +30 48 model """complex""" +30 48 loss """bceaftersigmoid""" +30 48 regularizer """no""" +30 48 optimizer """adam""" +30 48 training_loop """lcwa""" +30 48 evaluator """rankbased""" +30 49 dataset """kinships""" +30 49 model """complex""" +30 49 loss """bceaftersigmoid""" +30 49 regularizer """no""" +30 49 optimizer """adam""" +30 49 training_loop """lcwa""" +30 49 evaluator """rankbased""" +30 50 dataset """kinships""" +30 50 model """complex""" +30 50 loss """bceaftersigmoid""" +30 50 regularizer """no""" +30 50 optimizer """adam""" +30 50 training_loop """lcwa""" +30 50 evaluator """rankbased""" +30 51 dataset """kinships""" +30 51 model """complex""" +30 51 loss """bceaftersigmoid""" +30 51 regularizer """no""" +30 51 optimizer """adam""" +30 51 training_loop """lcwa""" +30 51 evaluator """rankbased""" +30 52 dataset """kinships""" +30 52 model """complex""" +30 52 loss """bceaftersigmoid""" +30 52 regularizer """no""" +30 52 optimizer """adam""" +30 52 training_loop """lcwa""" +30 52 evaluator """rankbased""" +30 53 dataset """kinships""" +30 53 model """complex""" +30 53 loss """bceaftersigmoid""" +30 53 regularizer """no""" +30 53 optimizer """adam""" +30 53 training_loop """lcwa""" +30 53 evaluator """rankbased""" +30 54 dataset """kinships""" +30 54 model """complex""" +30 54 loss """bceaftersigmoid""" +30 54 regularizer """no""" +30 54 optimizer """adam""" +30 54 training_loop """lcwa""" +30 54 evaluator """rankbased""" +30 55 dataset """kinships""" +30 55 model """complex""" +30 55 loss """bceaftersigmoid""" +30 55 regularizer """no""" +30 55 optimizer """adam""" +30 55 training_loop """lcwa""" +30 55 evaluator """rankbased""" +30 56 dataset """kinships""" +30 56 model """complex""" +30 56 loss """bceaftersigmoid""" +30 56 regularizer """no""" +30 56 optimizer """adam""" +30 56 training_loop """lcwa""" +30 56 evaluator """rankbased""" +30 57 dataset """kinships""" +30 57 model """complex""" +30 57 loss """bceaftersigmoid""" +30 57 regularizer """no""" +30 57 optimizer """adam""" +30 57 training_loop """lcwa""" +30 57 evaluator """rankbased""" +30 58 dataset """kinships""" +30 58 model """complex""" +30 58 loss """bceaftersigmoid""" +30 58 regularizer """no""" +30 58 optimizer """adam""" +30 58 training_loop """lcwa""" +30 58 evaluator """rankbased""" +30 59 dataset """kinships""" +30 59 model """complex""" +30 59 loss """bceaftersigmoid""" +30 59 regularizer """no""" +30 59 optimizer """adam""" +30 59 training_loop """lcwa""" +30 59 evaluator """rankbased""" +30 60 dataset """kinships""" +30 60 model """complex""" +30 60 loss """bceaftersigmoid""" +30 60 regularizer """no""" +30 60 optimizer """adam""" +30 60 training_loop """lcwa""" +30 60 evaluator """rankbased""" +30 61 dataset """kinships""" +30 61 model """complex""" +30 61 loss """bceaftersigmoid""" +30 61 regularizer """no""" +30 61 optimizer """adam""" +30 61 training_loop """lcwa""" +30 61 evaluator """rankbased""" +30 62 dataset """kinships""" +30 62 model """complex""" +30 62 loss """bceaftersigmoid""" +30 62 regularizer """no""" +30 62 optimizer """adam""" +30 62 training_loop """lcwa""" +30 62 evaluator """rankbased""" +30 63 dataset """kinships""" +30 63 model """complex""" +30 63 loss """bceaftersigmoid""" +30 63 regularizer """no""" +30 63 optimizer """adam""" +30 63 training_loop """lcwa""" +30 63 evaluator """rankbased""" +30 64 dataset """kinships""" +30 64 model """complex""" +30 64 loss """bceaftersigmoid""" +30 64 regularizer """no""" +30 64 optimizer """adam""" +30 64 training_loop """lcwa""" +30 64 evaluator """rankbased""" +30 65 dataset """kinships""" +30 65 model """complex""" +30 65 loss """bceaftersigmoid""" +30 65 regularizer """no""" +30 65 optimizer """adam""" +30 65 training_loop """lcwa""" +30 65 evaluator """rankbased""" +30 66 dataset """kinships""" +30 66 model """complex""" +30 66 loss """bceaftersigmoid""" +30 66 regularizer """no""" +30 66 optimizer """adam""" +30 66 training_loop """lcwa""" +30 66 evaluator """rankbased""" +30 67 dataset """kinships""" +30 67 model """complex""" +30 67 loss """bceaftersigmoid""" +30 67 regularizer """no""" +30 67 optimizer """adam""" +30 67 training_loop """lcwa""" +30 67 evaluator """rankbased""" +30 68 dataset """kinships""" +30 68 model """complex""" +30 68 loss """bceaftersigmoid""" +30 68 regularizer """no""" +30 68 optimizer """adam""" +30 68 training_loop """lcwa""" +30 68 evaluator """rankbased""" +30 69 dataset """kinships""" +30 69 model """complex""" +30 69 loss """bceaftersigmoid""" +30 69 regularizer """no""" +30 69 optimizer """adam""" +30 69 training_loop """lcwa""" +30 69 evaluator """rankbased""" +30 70 dataset """kinships""" +30 70 model """complex""" +30 70 loss """bceaftersigmoid""" +30 70 regularizer """no""" +30 70 optimizer """adam""" +30 70 training_loop """lcwa""" +30 70 evaluator """rankbased""" +30 71 dataset """kinships""" +30 71 model """complex""" +30 71 loss """bceaftersigmoid""" +30 71 regularizer """no""" +30 71 optimizer """adam""" +30 71 training_loop """lcwa""" +30 71 evaluator """rankbased""" +30 72 dataset """kinships""" +30 72 model """complex""" +30 72 loss """bceaftersigmoid""" +30 72 regularizer """no""" +30 72 optimizer """adam""" +30 72 training_loop """lcwa""" +30 72 evaluator """rankbased""" +30 73 dataset """kinships""" +30 73 model """complex""" +30 73 loss """bceaftersigmoid""" +30 73 regularizer """no""" +30 73 optimizer """adam""" +30 73 training_loop """lcwa""" +30 73 evaluator """rankbased""" +30 74 dataset """kinships""" +30 74 model """complex""" +30 74 loss """bceaftersigmoid""" +30 74 regularizer """no""" +30 74 optimizer """adam""" +30 74 training_loop """lcwa""" +30 74 evaluator """rankbased""" +30 75 dataset """kinships""" +30 75 model """complex""" +30 75 loss """bceaftersigmoid""" +30 75 regularizer """no""" +30 75 optimizer """adam""" +30 75 training_loop """lcwa""" +30 75 evaluator """rankbased""" +30 76 dataset """kinships""" +30 76 model """complex""" +30 76 loss """bceaftersigmoid""" +30 76 regularizer """no""" +30 76 optimizer """adam""" +30 76 training_loop """lcwa""" +30 76 evaluator """rankbased""" +30 77 dataset """kinships""" +30 77 model """complex""" +30 77 loss """bceaftersigmoid""" +30 77 regularizer """no""" +30 77 optimizer """adam""" +30 77 training_loop """lcwa""" +30 77 evaluator """rankbased""" +30 78 dataset """kinships""" +30 78 model """complex""" +30 78 loss """bceaftersigmoid""" +30 78 regularizer """no""" +30 78 optimizer """adam""" +30 78 training_loop """lcwa""" +30 78 evaluator """rankbased""" +30 79 dataset """kinships""" +30 79 model """complex""" +30 79 loss """bceaftersigmoid""" +30 79 regularizer """no""" +30 79 optimizer """adam""" +30 79 training_loop """lcwa""" +30 79 evaluator """rankbased""" +30 80 dataset """kinships""" +30 80 model """complex""" +30 80 loss """bceaftersigmoid""" +30 80 regularizer """no""" +30 80 optimizer """adam""" +30 80 training_loop """lcwa""" +30 80 evaluator """rankbased""" +30 81 dataset """kinships""" +30 81 model """complex""" +30 81 loss """bceaftersigmoid""" +30 81 regularizer """no""" +30 81 optimizer """adam""" +30 81 training_loop """lcwa""" +30 81 evaluator """rankbased""" +30 82 dataset """kinships""" +30 82 model """complex""" +30 82 loss """bceaftersigmoid""" +30 82 regularizer """no""" +30 82 optimizer """adam""" +30 82 training_loop """lcwa""" +30 82 evaluator """rankbased""" +30 83 dataset """kinships""" +30 83 model """complex""" +30 83 loss """bceaftersigmoid""" +30 83 regularizer """no""" +30 83 optimizer """adam""" +30 83 training_loop """lcwa""" +30 83 evaluator """rankbased""" +30 84 dataset """kinships""" +30 84 model """complex""" +30 84 loss """bceaftersigmoid""" +30 84 regularizer """no""" +30 84 optimizer """adam""" +30 84 training_loop """lcwa""" +30 84 evaluator """rankbased""" +30 85 dataset """kinships""" +30 85 model """complex""" +30 85 loss """bceaftersigmoid""" +30 85 regularizer """no""" +30 85 optimizer """adam""" +30 85 training_loop """lcwa""" +30 85 evaluator """rankbased""" +30 86 dataset """kinships""" +30 86 model """complex""" +30 86 loss """bceaftersigmoid""" +30 86 regularizer """no""" +30 86 optimizer """adam""" +30 86 training_loop """lcwa""" +30 86 evaluator """rankbased""" +30 87 dataset """kinships""" +30 87 model """complex""" +30 87 loss """bceaftersigmoid""" +30 87 regularizer """no""" +30 87 optimizer """adam""" +30 87 training_loop """lcwa""" +30 87 evaluator """rankbased""" +30 88 dataset """kinships""" +30 88 model """complex""" +30 88 loss """bceaftersigmoid""" +30 88 regularizer """no""" +30 88 optimizer """adam""" +30 88 training_loop """lcwa""" +30 88 evaluator """rankbased""" +30 89 dataset """kinships""" +30 89 model """complex""" +30 89 loss """bceaftersigmoid""" +30 89 regularizer """no""" +30 89 optimizer """adam""" +30 89 training_loop """lcwa""" +30 89 evaluator """rankbased""" +30 90 dataset """kinships""" +30 90 model """complex""" +30 90 loss """bceaftersigmoid""" +30 90 regularizer """no""" +30 90 optimizer """adam""" +30 90 training_loop """lcwa""" +30 90 evaluator """rankbased""" +30 91 dataset """kinships""" +30 91 model """complex""" +30 91 loss """bceaftersigmoid""" +30 91 regularizer """no""" +30 91 optimizer """adam""" +30 91 training_loop """lcwa""" +30 91 evaluator """rankbased""" +30 92 dataset """kinships""" +30 92 model """complex""" +30 92 loss """bceaftersigmoid""" +30 92 regularizer """no""" +30 92 optimizer """adam""" +30 92 training_loop """lcwa""" +30 92 evaluator """rankbased""" +30 93 dataset """kinships""" +30 93 model """complex""" +30 93 loss """bceaftersigmoid""" +30 93 regularizer """no""" +30 93 optimizer """adam""" +30 93 training_loop """lcwa""" +30 93 evaluator """rankbased""" +30 94 dataset """kinships""" +30 94 model """complex""" +30 94 loss """bceaftersigmoid""" +30 94 regularizer """no""" +30 94 optimizer """adam""" +30 94 training_loop """lcwa""" +30 94 evaluator """rankbased""" +30 95 dataset """kinships""" +30 95 model """complex""" +30 95 loss """bceaftersigmoid""" +30 95 regularizer """no""" +30 95 optimizer """adam""" +30 95 training_loop """lcwa""" +30 95 evaluator """rankbased""" +30 96 dataset """kinships""" +30 96 model """complex""" +30 96 loss """bceaftersigmoid""" +30 96 regularizer """no""" +30 96 optimizer """adam""" +30 96 training_loop """lcwa""" +30 96 evaluator """rankbased""" +30 97 dataset """kinships""" +30 97 model """complex""" +30 97 loss """bceaftersigmoid""" +30 97 regularizer """no""" +30 97 optimizer """adam""" +30 97 training_loop """lcwa""" +30 97 evaluator """rankbased""" +30 98 dataset """kinships""" +30 98 model """complex""" +30 98 loss """bceaftersigmoid""" +30 98 regularizer """no""" +30 98 optimizer """adam""" +30 98 training_loop """lcwa""" +30 98 evaluator """rankbased""" +30 99 dataset """kinships""" +30 99 model """complex""" +30 99 loss """bceaftersigmoid""" +30 99 regularizer """no""" +30 99 optimizer """adam""" +30 99 training_loop """lcwa""" +30 99 evaluator """rankbased""" +30 100 dataset """kinships""" +30 100 model """complex""" +30 100 loss """bceaftersigmoid""" +30 100 regularizer """no""" +30 100 optimizer """adam""" +30 100 training_loop """lcwa""" +30 100 evaluator """rankbased""" +31 1 model.embedding_dim 1.0 +31 1 optimizer.lr 0.0522194048355374 +31 1 training.batch_size 2.0 +31 1 training.label_smoothing 0.11789986958419216 +31 2 model.embedding_dim 2.0 +31 2 optimizer.lr 0.004479486353743171 +31 2 training.batch_size 0.0 +31 2 training.label_smoothing 0.006303470874717029 +31 3 model.embedding_dim 0.0 +31 3 optimizer.lr 0.03255166433245733 +31 3 training.batch_size 2.0 +31 3 training.label_smoothing 0.6023904959189067 +31 4 model.embedding_dim 2.0 +31 4 optimizer.lr 0.08413775295243779 +31 4 training.batch_size 2.0 +31 4 training.label_smoothing 0.3253016299519046 +31 5 model.embedding_dim 0.0 +31 5 optimizer.lr 0.05922503007378918 +31 5 training.batch_size 1.0 +31 5 training.label_smoothing 0.0010241685457001383 +31 6 model.embedding_dim 2.0 +31 6 optimizer.lr 0.05278664565376212 +31 6 training.batch_size 0.0 +31 6 training.label_smoothing 0.0031982928685938767 +31 7 model.embedding_dim 1.0 +31 7 optimizer.lr 0.03001251001096813 +31 7 training.batch_size 0.0 +31 7 training.label_smoothing 0.03190819804824472 +31 8 model.embedding_dim 0.0 +31 8 optimizer.lr 0.029760729648328568 +31 8 training.batch_size 0.0 +31 8 training.label_smoothing 0.001232127305405134 +31 9 model.embedding_dim 1.0 +31 9 optimizer.lr 0.0393142027146239 +31 9 training.batch_size 2.0 +31 9 training.label_smoothing 0.03139442618632067 +31 10 model.embedding_dim 0.0 +31 10 optimizer.lr 0.0012600740688040058 +31 10 training.batch_size 1.0 +31 10 training.label_smoothing 0.0010061882819108305 +31 11 model.embedding_dim 1.0 +31 11 optimizer.lr 0.004578462390005253 +31 11 training.batch_size 0.0 +31 11 training.label_smoothing 0.8580175568519883 +31 12 model.embedding_dim 2.0 +31 12 optimizer.lr 0.09015779490409186 +31 12 training.batch_size 1.0 +31 12 training.label_smoothing 0.3922331824094223 +31 13 model.embedding_dim 2.0 +31 13 optimizer.lr 0.003931304652278063 +31 13 training.batch_size 0.0 +31 13 training.label_smoothing 0.0022708648135396914 +31 14 model.embedding_dim 2.0 +31 14 optimizer.lr 0.004469420003181451 +31 14 training.batch_size 2.0 +31 14 training.label_smoothing 0.007875431762342481 +31 15 model.embedding_dim 0.0 +31 15 optimizer.lr 0.001537163386455438 +31 15 training.batch_size 1.0 +31 15 training.label_smoothing 0.0015235069561206304 +31 16 model.embedding_dim 2.0 +31 16 optimizer.lr 0.07004563198124791 +31 16 training.batch_size 0.0 +31 16 training.label_smoothing 0.08763565290215361 +31 17 model.embedding_dim 1.0 +31 17 optimizer.lr 0.00917465359411845 +31 17 training.batch_size 1.0 +31 17 training.label_smoothing 0.022851993240562685 +31 18 model.embedding_dim 0.0 +31 18 optimizer.lr 0.0011689532581874436 +31 18 training.batch_size 1.0 +31 18 training.label_smoothing 0.01670118329800974 +31 19 model.embedding_dim 2.0 +31 19 optimizer.lr 0.007918562035766375 +31 19 training.batch_size 1.0 +31 19 training.label_smoothing 0.04979571827616469 +31 20 model.embedding_dim 1.0 +31 20 optimizer.lr 0.0010579482841476947 +31 20 training.batch_size 1.0 +31 20 training.label_smoothing 0.022505259650529318 +31 21 model.embedding_dim 1.0 +31 21 optimizer.lr 0.007293614806189049 +31 21 training.batch_size 0.0 +31 21 training.label_smoothing 0.12354820180334704 +31 22 model.embedding_dim 2.0 +31 22 optimizer.lr 0.013109308618513608 +31 22 training.batch_size 0.0 +31 22 training.label_smoothing 0.0020250968011483387 +31 23 model.embedding_dim 0.0 +31 23 optimizer.lr 0.0010344076938920122 +31 23 training.batch_size 2.0 +31 23 training.label_smoothing 0.5464603508619675 +31 24 model.embedding_dim 1.0 +31 24 optimizer.lr 0.06326916583898079 +31 24 training.batch_size 0.0 +31 24 training.label_smoothing 0.7966319966402423 +31 25 model.embedding_dim 2.0 +31 25 optimizer.lr 0.012984337870423033 +31 25 training.batch_size 1.0 +31 25 training.label_smoothing 0.006753200966833237 +31 26 model.embedding_dim 2.0 +31 26 optimizer.lr 0.0038325329587690225 +31 26 training.batch_size 1.0 +31 26 training.label_smoothing 0.0033175357164697055 +31 27 model.embedding_dim 1.0 +31 27 optimizer.lr 0.023539801122760034 +31 27 training.batch_size 1.0 +31 27 training.label_smoothing 0.34361452688107047 +31 28 model.embedding_dim 2.0 +31 28 optimizer.lr 0.031154005828856062 +31 28 training.batch_size 2.0 +31 28 training.label_smoothing 0.07611934821584461 +31 29 model.embedding_dim 1.0 +31 29 optimizer.lr 0.03219027568546408 +31 29 training.batch_size 2.0 +31 29 training.label_smoothing 0.0011236965806824062 +31 30 model.embedding_dim 1.0 +31 30 optimizer.lr 0.0034013505053779895 +31 30 training.batch_size 1.0 +31 30 training.label_smoothing 0.07229908682762441 +31 31 model.embedding_dim 2.0 +31 31 optimizer.lr 0.0015244098949088944 +31 31 training.batch_size 2.0 +31 31 training.label_smoothing 0.054455630241388074 +31 32 model.embedding_dim 0.0 +31 32 optimizer.lr 0.012209833957011005 +31 32 training.batch_size 1.0 +31 32 training.label_smoothing 0.04179145510191238 +31 33 model.embedding_dim 2.0 +31 33 optimizer.lr 0.0941888919422323 +31 33 training.batch_size 1.0 +31 33 training.label_smoothing 0.007108274670397096 +31 34 model.embedding_dim 1.0 +31 34 optimizer.lr 0.014463098038477222 +31 34 training.batch_size 2.0 +31 34 training.label_smoothing 0.02870256313331304 +31 35 model.embedding_dim 0.0 +31 35 optimizer.lr 0.01707756028710032 +31 35 training.batch_size 2.0 +31 35 training.label_smoothing 0.3774126494149558 +31 36 model.embedding_dim 1.0 +31 36 optimizer.lr 0.0010574819097707439 +31 36 training.batch_size 1.0 +31 36 training.label_smoothing 0.18286862247900704 +31 37 model.embedding_dim 2.0 +31 37 optimizer.lr 0.011584188393890884 +31 37 training.batch_size 1.0 +31 37 training.label_smoothing 0.01518960958741782 +31 38 model.embedding_dim 0.0 +31 38 optimizer.lr 0.001918032696791805 +31 38 training.batch_size 2.0 +31 38 training.label_smoothing 0.1616860222898485 +31 39 model.embedding_dim 2.0 +31 39 optimizer.lr 0.037900535827801674 +31 39 training.batch_size 1.0 +31 39 training.label_smoothing 0.0182601017730822 +31 40 model.embedding_dim 0.0 +31 40 optimizer.lr 0.07917021510529815 +31 40 training.batch_size 1.0 +31 40 training.label_smoothing 0.005846399911536543 +31 41 model.embedding_dim 2.0 +31 41 optimizer.lr 0.004363013616779507 +31 41 training.batch_size 0.0 +31 41 training.label_smoothing 0.0010091355635144985 +31 42 model.embedding_dim 1.0 +31 42 optimizer.lr 0.03779301946784522 +31 42 training.batch_size 0.0 +31 42 training.label_smoothing 0.1573724640937335 +31 43 model.embedding_dim 0.0 +31 43 optimizer.lr 0.01064470541936195 +31 43 training.batch_size 0.0 +31 43 training.label_smoothing 0.03690906421126687 +31 44 model.embedding_dim 0.0 +31 44 optimizer.lr 0.011241645197420566 +31 44 training.batch_size 2.0 +31 44 training.label_smoothing 0.18769895312182713 +31 45 model.embedding_dim 1.0 +31 45 optimizer.lr 0.08338235267098362 +31 45 training.batch_size 0.0 +31 45 training.label_smoothing 0.8418171718134416 +31 46 model.embedding_dim 0.0 +31 46 optimizer.lr 0.09877104908117013 +31 46 training.batch_size 1.0 +31 46 training.label_smoothing 0.002648459962111206 +31 47 model.embedding_dim 1.0 +31 47 optimizer.lr 0.003856123596819053 +31 47 training.batch_size 1.0 +31 47 training.label_smoothing 0.5914431432754478 +31 48 model.embedding_dim 0.0 +31 48 optimizer.lr 0.008713946748808087 +31 48 training.batch_size 2.0 +31 48 training.label_smoothing 0.005283690781710932 +31 49 model.embedding_dim 1.0 +31 49 optimizer.lr 0.009291644955733118 +31 49 training.batch_size 2.0 +31 49 training.label_smoothing 0.0019843926155634056 +31 50 model.embedding_dim 2.0 +31 50 optimizer.lr 0.00958858075521818 +31 50 training.batch_size 0.0 +31 50 training.label_smoothing 0.09245492029690774 +31 51 model.embedding_dim 1.0 +31 51 optimizer.lr 0.07820990577455822 +31 51 training.batch_size 1.0 +31 51 training.label_smoothing 0.04232090778639938 +31 52 model.embedding_dim 1.0 +31 52 optimizer.lr 0.021308338960152478 +31 52 training.batch_size 0.0 +31 52 training.label_smoothing 0.08568330573104121 +31 53 model.embedding_dim 0.0 +31 53 optimizer.lr 0.004170486715914446 +31 53 training.batch_size 2.0 +31 53 training.label_smoothing 0.011473061767698934 +31 54 model.embedding_dim 0.0 +31 54 optimizer.lr 0.03728512341146237 +31 54 training.batch_size 2.0 +31 54 training.label_smoothing 0.016716520142312515 +31 55 model.embedding_dim 0.0 +31 55 optimizer.lr 0.005063896142504573 +31 55 training.batch_size 1.0 +31 55 training.label_smoothing 0.31761531860143416 +31 56 model.embedding_dim 1.0 +31 56 optimizer.lr 0.03213353437256338 +31 56 training.batch_size 1.0 +31 56 training.label_smoothing 0.1045100977466397 +31 57 model.embedding_dim 1.0 +31 57 optimizer.lr 0.0019428412374062438 +31 57 training.batch_size 1.0 +31 57 training.label_smoothing 0.009929331292182259 +31 58 model.embedding_dim 2.0 +31 58 optimizer.lr 0.004263433743577964 +31 58 training.batch_size 1.0 +31 58 training.label_smoothing 0.025862664063578997 +31 59 model.embedding_dim 0.0 +31 59 optimizer.lr 0.013827007588153008 +31 59 training.batch_size 0.0 +31 59 training.label_smoothing 0.17104876393624652 +31 60 model.embedding_dim 2.0 +31 60 optimizer.lr 0.01831845067755076 +31 60 training.batch_size 0.0 +31 60 training.label_smoothing 0.851928879683893 +31 61 model.embedding_dim 1.0 +31 61 optimizer.lr 0.0038023653670072248 +31 61 training.batch_size 2.0 +31 61 training.label_smoothing 0.5388402976096555 +31 62 model.embedding_dim 1.0 +31 62 optimizer.lr 0.060790655281393186 +31 62 training.batch_size 2.0 +31 62 training.label_smoothing 0.014506421328144907 +31 63 model.embedding_dim 2.0 +31 63 optimizer.lr 0.0036112510377653852 +31 63 training.batch_size 2.0 +31 63 training.label_smoothing 0.01639941651887406 +31 64 model.embedding_dim 0.0 +31 64 optimizer.lr 0.021598316729072965 +31 64 training.batch_size 2.0 +31 64 training.label_smoothing 0.0010331840085084881 +31 65 model.embedding_dim 1.0 +31 65 optimizer.lr 0.049382785842004846 +31 65 training.batch_size 1.0 +31 65 training.label_smoothing 0.05100881389617703 +31 66 model.embedding_dim 0.0 +31 66 optimizer.lr 0.0011490367032734143 +31 66 training.batch_size 0.0 +31 66 training.label_smoothing 0.0021652924884424467 +31 67 model.embedding_dim 1.0 +31 67 optimizer.lr 0.09075772579917894 +31 67 training.batch_size 2.0 +31 67 training.label_smoothing 0.020581540853011036 +31 68 model.embedding_dim 1.0 +31 68 optimizer.lr 0.008653612170472938 +31 68 training.batch_size 0.0 +31 68 training.label_smoothing 0.17353508456208766 +31 69 model.embedding_dim 1.0 +31 69 optimizer.lr 0.04846770403928834 +31 69 training.batch_size 2.0 +31 69 training.label_smoothing 0.28688930525583783 +31 70 model.embedding_dim 1.0 +31 70 optimizer.lr 0.026224799011583345 +31 70 training.batch_size 1.0 +31 70 training.label_smoothing 0.0815651864172413 +31 71 model.embedding_dim 2.0 +31 71 optimizer.lr 0.010706736925930814 +31 71 training.batch_size 1.0 +31 71 training.label_smoothing 0.037595222189964445 +31 72 model.embedding_dim 0.0 +31 72 optimizer.lr 0.003832712230933076 +31 72 training.batch_size 1.0 +31 72 training.label_smoothing 0.04443191732269332 +31 73 model.embedding_dim 2.0 +31 73 optimizer.lr 0.07692122344579694 +31 73 training.batch_size 2.0 +31 73 training.label_smoothing 0.0012172955432664479 +31 74 model.embedding_dim 1.0 +31 74 optimizer.lr 0.008063275109671034 +31 74 training.batch_size 2.0 +31 74 training.label_smoothing 0.031206654604204423 +31 75 model.embedding_dim 0.0 +31 75 optimizer.lr 0.02643686311339745 +31 75 training.batch_size 1.0 +31 75 training.label_smoothing 0.00181073680323046 +31 76 model.embedding_dim 0.0 +31 76 optimizer.lr 0.0024885574380307416 +31 76 training.batch_size 2.0 +31 76 training.label_smoothing 0.0021603381450622464 +31 77 model.embedding_dim 1.0 +31 77 optimizer.lr 0.0029511132644799943 +31 77 training.batch_size 2.0 +31 77 training.label_smoothing 0.002225794352214892 +31 78 model.embedding_dim 1.0 +31 78 optimizer.lr 0.003153098093934993 +31 78 training.batch_size 0.0 +31 78 training.label_smoothing 0.0014782131101363318 +31 79 model.embedding_dim 0.0 +31 79 optimizer.lr 0.0019769313968732443 +31 79 training.batch_size 2.0 +31 79 training.label_smoothing 0.0010062797474284881 +31 80 model.embedding_dim 2.0 +31 80 optimizer.lr 0.004098981491998027 +31 80 training.batch_size 1.0 +31 80 training.label_smoothing 0.34722478780981914 +31 81 model.embedding_dim 1.0 +31 81 optimizer.lr 0.0045697905922128185 +31 81 training.batch_size 1.0 +31 81 training.label_smoothing 0.004639882757550675 +31 82 model.embedding_dim 0.0 +31 82 optimizer.lr 0.05553864656112823 +31 82 training.batch_size 2.0 +31 82 training.label_smoothing 0.025803916349063476 +31 83 model.embedding_dim 2.0 +31 83 optimizer.lr 0.047097383563305444 +31 83 training.batch_size 1.0 +31 83 training.label_smoothing 0.5468887572760459 +31 84 model.embedding_dim 2.0 +31 84 optimizer.lr 0.001334882252726086 +31 84 training.batch_size 0.0 +31 84 training.label_smoothing 0.0027114864541519664 +31 85 model.embedding_dim 2.0 +31 85 optimizer.lr 0.005490258428756033 +31 85 training.batch_size 2.0 +31 85 training.label_smoothing 0.0073890060367370666 +31 86 model.embedding_dim 2.0 +31 86 optimizer.lr 0.016217378028426807 +31 86 training.batch_size 1.0 +31 86 training.label_smoothing 0.3915323790833958 +31 87 model.embedding_dim 1.0 +31 87 optimizer.lr 0.018669560720922296 +31 87 training.batch_size 1.0 +31 87 training.label_smoothing 0.09280565360620407 +31 88 model.embedding_dim 2.0 +31 88 optimizer.lr 0.0034776713224322973 +31 88 training.batch_size 1.0 +31 88 training.label_smoothing 0.0533701380448131 +31 89 model.embedding_dim 2.0 +31 89 optimizer.lr 0.03644716997068823 +31 89 training.batch_size 1.0 +31 89 training.label_smoothing 0.004664257179966699 +31 90 model.embedding_dim 2.0 +31 90 optimizer.lr 0.005394160677343672 +31 90 training.batch_size 2.0 +31 90 training.label_smoothing 0.0033738572431930855 +31 91 model.embedding_dim 1.0 +31 91 optimizer.lr 0.009249404755879424 +31 91 training.batch_size 0.0 +31 91 training.label_smoothing 0.009470361045463498 +31 92 model.embedding_dim 0.0 +31 92 optimizer.lr 0.06226868534368034 +31 92 training.batch_size 1.0 +31 92 training.label_smoothing 0.22504908334662924 +31 93 model.embedding_dim 0.0 +31 93 optimizer.lr 0.0015791878950286579 +31 93 training.batch_size 2.0 +31 93 training.label_smoothing 0.9825919847830947 +31 94 model.embedding_dim 0.0 +31 94 optimizer.lr 0.004337629842759522 +31 94 training.batch_size 2.0 +31 94 training.label_smoothing 0.013092007533904703 +31 95 model.embedding_dim 1.0 +31 95 optimizer.lr 0.0405148060958458 +31 95 training.batch_size 0.0 +31 95 training.label_smoothing 0.0014968161902225176 +31 96 model.embedding_dim 0.0 +31 96 optimizer.lr 0.06554711576412171 +31 96 training.batch_size 0.0 +31 96 training.label_smoothing 0.16782546400930792 +31 97 model.embedding_dim 2.0 +31 97 optimizer.lr 0.007255234608344353 +31 97 training.batch_size 2.0 +31 97 training.label_smoothing 0.0013618542462431947 +31 98 model.embedding_dim 0.0 +31 98 optimizer.lr 0.008086138082289859 +31 98 training.batch_size 1.0 +31 98 training.label_smoothing 0.037427309021314685 +31 99 model.embedding_dim 0.0 +31 99 optimizer.lr 0.0026615030500220286 +31 99 training.batch_size 2.0 +31 99 training.label_smoothing 0.6644027264646024 +31 100 model.embedding_dim 1.0 +31 100 optimizer.lr 0.02528673704775978 +31 100 training.batch_size 1.0 +31 100 training.label_smoothing 0.3507616133041164 +31 1 dataset """kinships""" +31 1 model """complex""" +31 1 loss """softplus""" +31 1 regularizer """no""" +31 1 optimizer """adam""" +31 1 training_loop """lcwa""" +31 1 evaluator """rankbased""" +31 2 dataset """kinships""" +31 2 model """complex""" +31 2 loss """softplus""" +31 2 regularizer """no""" +31 2 optimizer """adam""" +31 2 training_loop """lcwa""" +31 2 evaluator """rankbased""" +31 3 dataset """kinships""" +31 3 model """complex""" +31 3 loss """softplus""" +31 3 regularizer """no""" +31 3 optimizer """adam""" +31 3 training_loop """lcwa""" +31 3 evaluator """rankbased""" +31 4 dataset """kinships""" +31 4 model """complex""" +31 4 loss """softplus""" +31 4 regularizer """no""" +31 4 optimizer """adam""" +31 4 training_loop """lcwa""" +31 4 evaluator """rankbased""" +31 5 dataset """kinships""" +31 5 model """complex""" +31 5 loss """softplus""" +31 5 regularizer """no""" +31 5 optimizer """adam""" +31 5 training_loop """lcwa""" +31 5 evaluator """rankbased""" +31 6 dataset """kinships""" +31 6 model """complex""" +31 6 loss """softplus""" +31 6 regularizer """no""" +31 6 optimizer """adam""" +31 6 training_loop """lcwa""" +31 6 evaluator """rankbased""" +31 7 dataset """kinships""" +31 7 model """complex""" +31 7 loss """softplus""" +31 7 regularizer """no""" +31 7 optimizer """adam""" +31 7 training_loop """lcwa""" +31 7 evaluator """rankbased""" +31 8 dataset """kinships""" +31 8 model """complex""" +31 8 loss """softplus""" +31 8 regularizer """no""" +31 8 optimizer """adam""" +31 8 training_loop """lcwa""" +31 8 evaluator """rankbased""" +31 9 dataset """kinships""" +31 9 model """complex""" +31 9 loss """softplus""" +31 9 regularizer """no""" +31 9 optimizer """adam""" +31 9 training_loop """lcwa""" +31 9 evaluator """rankbased""" +31 10 dataset """kinships""" +31 10 model """complex""" +31 10 loss """softplus""" +31 10 regularizer """no""" +31 10 optimizer """adam""" +31 10 training_loop """lcwa""" +31 10 evaluator """rankbased""" +31 11 dataset """kinships""" +31 11 model """complex""" +31 11 loss """softplus""" +31 11 regularizer """no""" +31 11 optimizer """adam""" +31 11 training_loop """lcwa""" +31 11 evaluator """rankbased""" +31 12 dataset """kinships""" +31 12 model """complex""" +31 12 loss """softplus""" +31 12 regularizer """no""" +31 12 optimizer """adam""" +31 12 training_loop """lcwa""" +31 12 evaluator """rankbased""" +31 13 dataset """kinships""" +31 13 model """complex""" +31 13 loss """softplus""" +31 13 regularizer """no""" +31 13 optimizer """adam""" +31 13 training_loop """lcwa""" +31 13 evaluator """rankbased""" +31 14 dataset """kinships""" +31 14 model """complex""" +31 14 loss """softplus""" +31 14 regularizer """no""" +31 14 optimizer """adam""" +31 14 training_loop """lcwa""" +31 14 evaluator """rankbased""" +31 15 dataset """kinships""" +31 15 model """complex""" +31 15 loss """softplus""" +31 15 regularizer """no""" +31 15 optimizer """adam""" +31 15 training_loop """lcwa""" +31 15 evaluator """rankbased""" +31 16 dataset """kinships""" +31 16 model """complex""" +31 16 loss """softplus""" +31 16 regularizer """no""" +31 16 optimizer """adam""" +31 16 training_loop """lcwa""" +31 16 evaluator """rankbased""" +31 17 dataset """kinships""" +31 17 model """complex""" +31 17 loss """softplus""" +31 17 regularizer """no""" +31 17 optimizer """adam""" +31 17 training_loop """lcwa""" +31 17 evaluator """rankbased""" +31 18 dataset """kinships""" +31 18 model """complex""" +31 18 loss """softplus""" +31 18 regularizer """no""" +31 18 optimizer """adam""" +31 18 training_loop """lcwa""" +31 18 evaluator """rankbased""" +31 19 dataset """kinships""" +31 19 model """complex""" +31 19 loss """softplus""" +31 19 regularizer """no""" +31 19 optimizer """adam""" +31 19 training_loop """lcwa""" +31 19 evaluator """rankbased""" +31 20 dataset """kinships""" +31 20 model """complex""" +31 20 loss """softplus""" +31 20 regularizer """no""" +31 20 optimizer """adam""" +31 20 training_loop """lcwa""" +31 20 evaluator """rankbased""" +31 21 dataset """kinships""" +31 21 model """complex""" +31 21 loss """softplus""" +31 21 regularizer """no""" +31 21 optimizer """adam""" +31 21 training_loop """lcwa""" +31 21 evaluator """rankbased""" +31 22 dataset """kinships""" +31 22 model """complex""" +31 22 loss """softplus""" +31 22 regularizer """no""" +31 22 optimizer """adam""" +31 22 training_loop """lcwa""" +31 22 evaluator """rankbased""" +31 23 dataset """kinships""" +31 23 model """complex""" +31 23 loss """softplus""" +31 23 regularizer """no""" +31 23 optimizer """adam""" +31 23 training_loop """lcwa""" +31 23 evaluator """rankbased""" +31 24 dataset """kinships""" +31 24 model """complex""" +31 24 loss """softplus""" +31 24 regularizer """no""" +31 24 optimizer """adam""" +31 24 training_loop """lcwa""" +31 24 evaluator """rankbased""" +31 25 dataset """kinships""" +31 25 model """complex""" +31 25 loss """softplus""" +31 25 regularizer """no""" +31 25 optimizer """adam""" +31 25 training_loop """lcwa""" +31 25 evaluator """rankbased""" +31 26 dataset """kinships""" +31 26 model """complex""" +31 26 loss """softplus""" +31 26 regularizer """no""" +31 26 optimizer """adam""" +31 26 training_loop """lcwa""" +31 26 evaluator """rankbased""" +31 27 dataset """kinships""" +31 27 model """complex""" +31 27 loss """softplus""" +31 27 regularizer """no""" +31 27 optimizer """adam""" +31 27 training_loop """lcwa""" +31 27 evaluator """rankbased""" +31 28 dataset """kinships""" +31 28 model """complex""" +31 28 loss """softplus""" +31 28 regularizer """no""" +31 28 optimizer """adam""" +31 28 training_loop """lcwa""" +31 28 evaluator """rankbased""" +31 29 dataset """kinships""" +31 29 model """complex""" +31 29 loss """softplus""" +31 29 regularizer """no""" +31 29 optimizer """adam""" +31 29 training_loop """lcwa""" +31 29 evaluator """rankbased""" +31 30 dataset """kinships""" +31 30 model """complex""" +31 30 loss """softplus""" +31 30 regularizer """no""" +31 30 optimizer """adam""" +31 30 training_loop """lcwa""" +31 30 evaluator """rankbased""" +31 31 dataset """kinships""" +31 31 model """complex""" +31 31 loss """softplus""" +31 31 regularizer """no""" +31 31 optimizer """adam""" +31 31 training_loop """lcwa""" +31 31 evaluator """rankbased""" +31 32 dataset """kinships""" +31 32 model """complex""" +31 32 loss """softplus""" +31 32 regularizer """no""" +31 32 optimizer """adam""" +31 32 training_loop """lcwa""" +31 32 evaluator """rankbased""" +31 33 dataset """kinships""" +31 33 model """complex""" +31 33 loss """softplus""" +31 33 regularizer """no""" +31 33 optimizer """adam""" +31 33 training_loop """lcwa""" +31 33 evaluator """rankbased""" +31 34 dataset """kinships""" +31 34 model """complex""" +31 34 loss """softplus""" +31 34 regularizer """no""" +31 34 optimizer """adam""" +31 34 training_loop """lcwa""" +31 34 evaluator """rankbased""" +31 35 dataset """kinships""" +31 35 model """complex""" +31 35 loss """softplus""" +31 35 regularizer """no""" +31 35 optimizer """adam""" +31 35 training_loop """lcwa""" +31 35 evaluator """rankbased""" +31 36 dataset """kinships""" +31 36 model """complex""" +31 36 loss """softplus""" +31 36 regularizer """no""" +31 36 optimizer """adam""" +31 36 training_loop """lcwa""" +31 36 evaluator """rankbased""" +31 37 dataset """kinships""" +31 37 model """complex""" +31 37 loss """softplus""" +31 37 regularizer """no""" +31 37 optimizer """adam""" +31 37 training_loop """lcwa""" +31 37 evaluator """rankbased""" +31 38 dataset """kinships""" +31 38 model """complex""" +31 38 loss """softplus""" +31 38 regularizer """no""" +31 38 optimizer """adam""" +31 38 training_loop """lcwa""" +31 38 evaluator """rankbased""" +31 39 dataset """kinships""" +31 39 model """complex""" +31 39 loss """softplus""" +31 39 regularizer """no""" +31 39 optimizer """adam""" +31 39 training_loop """lcwa""" +31 39 evaluator """rankbased""" +31 40 dataset """kinships""" +31 40 model """complex""" +31 40 loss """softplus""" +31 40 regularizer """no""" +31 40 optimizer """adam""" +31 40 training_loop """lcwa""" +31 40 evaluator """rankbased""" +31 41 dataset """kinships""" +31 41 model """complex""" +31 41 loss """softplus""" +31 41 regularizer """no""" +31 41 optimizer """adam""" +31 41 training_loop """lcwa""" +31 41 evaluator """rankbased""" +31 42 dataset """kinships""" +31 42 model """complex""" +31 42 loss """softplus""" +31 42 regularizer """no""" +31 42 optimizer """adam""" +31 42 training_loop """lcwa""" +31 42 evaluator """rankbased""" +31 43 dataset """kinships""" +31 43 model """complex""" +31 43 loss """softplus""" +31 43 regularizer """no""" +31 43 optimizer """adam""" +31 43 training_loop """lcwa""" +31 43 evaluator """rankbased""" +31 44 dataset """kinships""" +31 44 model """complex""" +31 44 loss """softplus""" +31 44 regularizer """no""" +31 44 optimizer """adam""" +31 44 training_loop """lcwa""" +31 44 evaluator """rankbased""" +31 45 dataset """kinships""" +31 45 model """complex""" +31 45 loss """softplus""" +31 45 regularizer """no""" +31 45 optimizer """adam""" +31 45 training_loop """lcwa""" +31 45 evaluator """rankbased""" +31 46 dataset """kinships""" +31 46 model """complex""" +31 46 loss """softplus""" +31 46 regularizer """no""" +31 46 optimizer """adam""" +31 46 training_loop """lcwa""" +31 46 evaluator """rankbased""" +31 47 dataset """kinships""" +31 47 model """complex""" +31 47 loss """softplus""" +31 47 regularizer """no""" +31 47 optimizer """adam""" +31 47 training_loop """lcwa""" +31 47 evaluator """rankbased""" +31 48 dataset """kinships""" +31 48 model """complex""" +31 48 loss """softplus""" +31 48 regularizer """no""" +31 48 optimizer """adam""" +31 48 training_loop """lcwa""" +31 48 evaluator """rankbased""" +31 49 dataset """kinships""" +31 49 model """complex""" +31 49 loss """softplus""" +31 49 regularizer """no""" +31 49 optimizer """adam""" +31 49 training_loop """lcwa""" +31 49 evaluator """rankbased""" +31 50 dataset """kinships""" +31 50 model """complex""" +31 50 loss """softplus""" +31 50 regularizer """no""" +31 50 optimizer """adam""" +31 50 training_loop """lcwa""" +31 50 evaluator """rankbased""" +31 51 dataset """kinships""" +31 51 model """complex""" +31 51 loss """softplus""" +31 51 regularizer """no""" +31 51 optimizer """adam""" +31 51 training_loop """lcwa""" +31 51 evaluator """rankbased""" +31 52 dataset """kinships""" +31 52 model """complex""" +31 52 loss """softplus""" +31 52 regularizer """no""" +31 52 optimizer """adam""" +31 52 training_loop """lcwa""" +31 52 evaluator """rankbased""" +31 53 dataset """kinships""" +31 53 model """complex""" +31 53 loss """softplus""" +31 53 regularizer """no""" +31 53 optimizer """adam""" +31 53 training_loop """lcwa""" +31 53 evaluator """rankbased""" +31 54 dataset """kinships""" +31 54 model """complex""" +31 54 loss """softplus""" +31 54 regularizer """no""" +31 54 optimizer """adam""" +31 54 training_loop """lcwa""" +31 54 evaluator """rankbased""" +31 55 dataset """kinships""" +31 55 model """complex""" +31 55 loss """softplus""" +31 55 regularizer """no""" +31 55 optimizer """adam""" +31 55 training_loop """lcwa""" +31 55 evaluator """rankbased""" +31 56 dataset """kinships""" +31 56 model """complex""" +31 56 loss """softplus""" +31 56 regularizer """no""" +31 56 optimizer """adam""" +31 56 training_loop """lcwa""" +31 56 evaluator """rankbased""" +31 57 dataset """kinships""" +31 57 model """complex""" +31 57 loss """softplus""" +31 57 regularizer """no""" +31 57 optimizer """adam""" +31 57 training_loop """lcwa""" +31 57 evaluator """rankbased""" +31 58 dataset """kinships""" +31 58 model """complex""" +31 58 loss """softplus""" +31 58 regularizer """no""" +31 58 optimizer """adam""" +31 58 training_loop """lcwa""" +31 58 evaluator """rankbased""" +31 59 dataset """kinships""" +31 59 model """complex""" +31 59 loss """softplus""" +31 59 regularizer """no""" +31 59 optimizer """adam""" +31 59 training_loop """lcwa""" +31 59 evaluator """rankbased""" +31 60 dataset """kinships""" +31 60 model """complex""" +31 60 loss """softplus""" +31 60 regularizer """no""" +31 60 optimizer """adam""" +31 60 training_loop """lcwa""" +31 60 evaluator """rankbased""" +31 61 dataset """kinships""" +31 61 model """complex""" +31 61 loss """softplus""" +31 61 regularizer """no""" +31 61 optimizer """adam""" +31 61 training_loop """lcwa""" +31 61 evaluator """rankbased""" +31 62 dataset """kinships""" +31 62 model """complex""" +31 62 loss """softplus""" +31 62 regularizer """no""" +31 62 optimizer """adam""" +31 62 training_loop """lcwa""" +31 62 evaluator """rankbased""" +31 63 dataset """kinships""" +31 63 model """complex""" +31 63 loss """softplus""" +31 63 regularizer """no""" +31 63 optimizer """adam""" +31 63 training_loop """lcwa""" +31 63 evaluator """rankbased""" +31 64 dataset """kinships""" +31 64 model """complex""" +31 64 loss """softplus""" +31 64 regularizer """no""" +31 64 optimizer """adam""" +31 64 training_loop """lcwa""" +31 64 evaluator """rankbased""" +31 65 dataset """kinships""" +31 65 model """complex""" +31 65 loss """softplus""" +31 65 regularizer """no""" +31 65 optimizer """adam""" +31 65 training_loop """lcwa""" +31 65 evaluator """rankbased""" +31 66 dataset """kinships""" +31 66 model """complex""" +31 66 loss """softplus""" +31 66 regularizer """no""" +31 66 optimizer """adam""" +31 66 training_loop """lcwa""" +31 66 evaluator """rankbased""" +31 67 dataset """kinships""" +31 67 model """complex""" +31 67 loss """softplus""" +31 67 regularizer """no""" +31 67 optimizer """adam""" +31 67 training_loop """lcwa""" +31 67 evaluator """rankbased""" +31 68 dataset """kinships""" +31 68 model """complex""" +31 68 loss """softplus""" +31 68 regularizer """no""" +31 68 optimizer """adam""" +31 68 training_loop """lcwa""" +31 68 evaluator """rankbased""" +31 69 dataset """kinships""" +31 69 model """complex""" +31 69 loss """softplus""" +31 69 regularizer """no""" +31 69 optimizer """adam""" +31 69 training_loop """lcwa""" +31 69 evaluator """rankbased""" +31 70 dataset """kinships""" +31 70 model """complex""" +31 70 loss """softplus""" +31 70 regularizer """no""" +31 70 optimizer """adam""" +31 70 training_loop """lcwa""" +31 70 evaluator """rankbased""" +31 71 dataset """kinships""" +31 71 model """complex""" +31 71 loss """softplus""" +31 71 regularizer """no""" +31 71 optimizer """adam""" +31 71 training_loop """lcwa""" +31 71 evaluator """rankbased""" +31 72 dataset """kinships""" +31 72 model """complex""" +31 72 loss """softplus""" +31 72 regularizer """no""" +31 72 optimizer """adam""" +31 72 training_loop """lcwa""" +31 72 evaluator """rankbased""" +31 73 dataset """kinships""" +31 73 model """complex""" +31 73 loss """softplus""" +31 73 regularizer """no""" +31 73 optimizer """adam""" +31 73 training_loop """lcwa""" +31 73 evaluator """rankbased""" +31 74 dataset """kinships""" +31 74 model """complex""" +31 74 loss """softplus""" +31 74 regularizer """no""" +31 74 optimizer """adam""" +31 74 training_loop """lcwa""" +31 74 evaluator """rankbased""" +31 75 dataset """kinships""" +31 75 model """complex""" +31 75 loss """softplus""" +31 75 regularizer """no""" +31 75 optimizer """adam""" +31 75 training_loop """lcwa""" +31 75 evaluator """rankbased""" +31 76 dataset """kinships""" +31 76 model """complex""" +31 76 loss """softplus""" +31 76 regularizer """no""" +31 76 optimizer """adam""" +31 76 training_loop """lcwa""" +31 76 evaluator """rankbased""" +31 77 dataset """kinships""" +31 77 model """complex""" +31 77 loss """softplus""" +31 77 regularizer """no""" +31 77 optimizer """adam""" +31 77 training_loop """lcwa""" +31 77 evaluator """rankbased""" +31 78 dataset """kinships""" +31 78 model """complex""" +31 78 loss """softplus""" +31 78 regularizer """no""" +31 78 optimizer """adam""" +31 78 training_loop """lcwa""" +31 78 evaluator """rankbased""" +31 79 dataset """kinships""" +31 79 model """complex""" +31 79 loss """softplus""" +31 79 regularizer """no""" +31 79 optimizer """adam""" +31 79 training_loop """lcwa""" +31 79 evaluator """rankbased""" +31 80 dataset """kinships""" +31 80 model """complex""" +31 80 loss """softplus""" +31 80 regularizer """no""" +31 80 optimizer """adam""" +31 80 training_loop """lcwa""" +31 80 evaluator """rankbased""" +31 81 dataset """kinships""" +31 81 model """complex""" +31 81 loss """softplus""" +31 81 regularizer """no""" +31 81 optimizer """adam""" +31 81 training_loop """lcwa""" +31 81 evaluator """rankbased""" +31 82 dataset """kinships""" +31 82 model """complex""" +31 82 loss """softplus""" +31 82 regularizer """no""" +31 82 optimizer """adam""" +31 82 training_loop """lcwa""" +31 82 evaluator """rankbased""" +31 83 dataset """kinships""" +31 83 model """complex""" +31 83 loss """softplus""" +31 83 regularizer """no""" +31 83 optimizer """adam""" +31 83 training_loop """lcwa""" +31 83 evaluator """rankbased""" +31 84 dataset """kinships""" +31 84 model """complex""" +31 84 loss """softplus""" +31 84 regularizer """no""" +31 84 optimizer """adam""" +31 84 training_loop """lcwa""" +31 84 evaluator """rankbased""" +31 85 dataset """kinships""" +31 85 model """complex""" +31 85 loss """softplus""" +31 85 regularizer """no""" +31 85 optimizer """adam""" +31 85 training_loop """lcwa""" +31 85 evaluator """rankbased""" +31 86 dataset """kinships""" +31 86 model """complex""" +31 86 loss """softplus""" +31 86 regularizer """no""" +31 86 optimizer """adam""" +31 86 training_loop """lcwa""" +31 86 evaluator """rankbased""" +31 87 dataset """kinships""" +31 87 model """complex""" +31 87 loss """softplus""" +31 87 regularizer """no""" +31 87 optimizer """adam""" +31 87 training_loop """lcwa""" +31 87 evaluator """rankbased""" +31 88 dataset """kinships""" +31 88 model """complex""" +31 88 loss """softplus""" +31 88 regularizer """no""" +31 88 optimizer """adam""" +31 88 training_loop """lcwa""" +31 88 evaluator """rankbased""" +31 89 dataset """kinships""" +31 89 model """complex""" +31 89 loss """softplus""" +31 89 regularizer """no""" +31 89 optimizer """adam""" +31 89 training_loop """lcwa""" +31 89 evaluator """rankbased""" +31 90 dataset """kinships""" +31 90 model """complex""" +31 90 loss """softplus""" +31 90 regularizer """no""" +31 90 optimizer """adam""" +31 90 training_loop """lcwa""" +31 90 evaluator """rankbased""" +31 91 dataset """kinships""" +31 91 model """complex""" +31 91 loss """softplus""" +31 91 regularizer """no""" +31 91 optimizer """adam""" +31 91 training_loop """lcwa""" +31 91 evaluator """rankbased""" +31 92 dataset """kinships""" +31 92 model """complex""" +31 92 loss """softplus""" +31 92 regularizer """no""" +31 92 optimizer """adam""" +31 92 training_loop """lcwa""" +31 92 evaluator """rankbased""" +31 93 dataset """kinships""" +31 93 model """complex""" +31 93 loss """softplus""" +31 93 regularizer """no""" +31 93 optimizer """adam""" +31 93 training_loop """lcwa""" +31 93 evaluator """rankbased""" +31 94 dataset """kinships""" +31 94 model """complex""" +31 94 loss """softplus""" +31 94 regularizer """no""" +31 94 optimizer """adam""" +31 94 training_loop """lcwa""" +31 94 evaluator """rankbased""" +31 95 dataset """kinships""" +31 95 model """complex""" +31 95 loss """softplus""" +31 95 regularizer """no""" +31 95 optimizer """adam""" +31 95 training_loop """lcwa""" +31 95 evaluator """rankbased""" +31 96 dataset """kinships""" +31 96 model """complex""" +31 96 loss """softplus""" +31 96 regularizer """no""" +31 96 optimizer """adam""" +31 96 training_loop """lcwa""" +31 96 evaluator """rankbased""" +31 97 dataset """kinships""" +31 97 model """complex""" +31 97 loss """softplus""" +31 97 regularizer """no""" +31 97 optimizer """adam""" +31 97 training_loop """lcwa""" +31 97 evaluator """rankbased""" +31 98 dataset """kinships""" +31 98 model """complex""" +31 98 loss """softplus""" +31 98 regularizer """no""" +31 98 optimizer """adam""" +31 98 training_loop """lcwa""" +31 98 evaluator """rankbased""" +31 99 dataset """kinships""" +31 99 model """complex""" +31 99 loss """softplus""" +31 99 regularizer """no""" +31 99 optimizer """adam""" +31 99 training_loop """lcwa""" +31 99 evaluator """rankbased""" +31 100 dataset """kinships""" +31 100 model """complex""" +31 100 loss """softplus""" +31 100 regularizer """no""" +31 100 optimizer """adam""" +31 100 training_loop """lcwa""" +31 100 evaluator """rankbased""" +32 1 model.embedding_dim 0.0 +32 1 optimizer.lr 0.05535965467840411 +32 1 training.batch_size 1.0 +32 1 training.label_smoothing 0.0050273296265715695 +32 2 model.embedding_dim 1.0 +32 2 optimizer.lr 0.032185985530369554 +32 2 training.batch_size 2.0 +32 2 training.label_smoothing 0.0017433789186058824 +32 3 model.embedding_dim 1.0 +32 3 optimizer.lr 0.032257596431853754 +32 3 training.batch_size 1.0 +32 3 training.label_smoothing 0.1357606023771682 +32 4 model.embedding_dim 0.0 +32 4 optimizer.lr 0.0012289699283577493 +32 4 training.batch_size 1.0 +32 4 training.label_smoothing 0.04516005840540908 +32 5 model.embedding_dim 2.0 +32 5 optimizer.lr 0.0012120810644964909 +32 5 training.batch_size 1.0 +32 5 training.label_smoothing 0.017630715486193885 +32 6 model.embedding_dim 0.0 +32 6 optimizer.lr 0.034661629968494895 +32 6 training.batch_size 0.0 +32 6 training.label_smoothing 0.008755587961216447 +32 7 model.embedding_dim 1.0 +32 7 optimizer.lr 0.002077626923658975 +32 7 training.batch_size 2.0 +32 7 training.label_smoothing 0.004349151695406041 +32 8 model.embedding_dim 1.0 +32 8 optimizer.lr 0.0319435618590828 +32 8 training.batch_size 1.0 +32 8 training.label_smoothing 0.08613648178279877 +32 9 model.embedding_dim 2.0 +32 9 optimizer.lr 0.08706562100767917 +32 9 training.batch_size 0.0 +32 9 training.label_smoothing 0.05589382376057027 +32 10 model.embedding_dim 2.0 +32 10 optimizer.lr 0.041668598185576604 +32 10 training.batch_size 0.0 +32 10 training.label_smoothing 0.16752093037083318 +32 11 model.embedding_dim 1.0 +32 11 optimizer.lr 0.004732314662673953 +32 11 training.batch_size 0.0 +32 11 training.label_smoothing 0.5837834286402273 +32 12 model.embedding_dim 2.0 +32 12 optimizer.lr 0.002986137776524576 +32 12 training.batch_size 1.0 +32 12 training.label_smoothing 0.004742466492827637 +32 13 model.embedding_dim 0.0 +32 13 optimizer.lr 0.004500128329186306 +32 13 training.batch_size 0.0 +32 13 training.label_smoothing 0.29687575293115376 +32 14 model.embedding_dim 1.0 +32 14 optimizer.lr 0.0016476249397146055 +32 14 training.batch_size 1.0 +32 14 training.label_smoothing 0.0039870616337947026 +32 15 model.embedding_dim 2.0 +32 15 optimizer.lr 0.045024019134797984 +32 15 training.batch_size 2.0 +32 15 training.label_smoothing 0.02164497388510382 +32 16 model.embedding_dim 0.0 +32 16 optimizer.lr 0.0028449896329201555 +32 16 training.batch_size 2.0 +32 16 training.label_smoothing 0.6446152219935439 +32 17 model.embedding_dim 0.0 +32 17 optimizer.lr 0.09990394751699572 +32 17 training.batch_size 2.0 +32 17 training.label_smoothing 0.010692783538754021 +32 18 model.embedding_dim 1.0 +32 18 optimizer.lr 0.09973000566359379 +32 18 training.batch_size 1.0 +32 18 training.label_smoothing 0.02794029875935272 +32 19 model.embedding_dim 2.0 +32 19 optimizer.lr 0.003506507628037419 +32 19 training.batch_size 2.0 +32 19 training.label_smoothing 0.0676098378534345 +32 20 model.embedding_dim 1.0 +32 20 optimizer.lr 0.04774885205211372 +32 20 training.batch_size 0.0 +32 20 training.label_smoothing 0.08605632853050897 +32 21 model.embedding_dim 0.0 +32 21 optimizer.lr 0.0018543631420514734 +32 21 training.batch_size 2.0 +32 21 training.label_smoothing 0.0015362098874822506 +32 22 model.embedding_dim 1.0 +32 22 optimizer.lr 0.004151557950408773 +32 22 training.batch_size 0.0 +32 22 training.label_smoothing 0.0666117660697239 +32 23 model.embedding_dim 2.0 +32 23 optimizer.lr 0.06657568461833001 +32 23 training.batch_size 2.0 +32 23 training.label_smoothing 0.35633529982364304 +32 24 model.embedding_dim 0.0 +32 24 optimizer.lr 0.00825844652375911 +32 24 training.batch_size 2.0 +32 24 training.label_smoothing 0.04729398804631877 +32 25 model.embedding_dim 2.0 +32 25 optimizer.lr 0.034571423774433195 +32 25 training.batch_size 0.0 +32 25 training.label_smoothing 0.03384009809082132 +32 26 model.embedding_dim 2.0 +32 26 optimizer.lr 0.013573688246119892 +32 26 training.batch_size 0.0 +32 26 training.label_smoothing 0.9652095704480702 +32 27 model.embedding_dim 1.0 +32 27 optimizer.lr 0.020804261567631318 +32 27 training.batch_size 1.0 +32 27 training.label_smoothing 0.0031939147769920982 +32 28 model.embedding_dim 0.0 +32 28 optimizer.lr 0.0051646032693806335 +32 28 training.batch_size 2.0 +32 28 training.label_smoothing 0.21886312472465555 +32 29 model.embedding_dim 2.0 +32 29 optimizer.lr 0.001499195170388985 +32 29 training.batch_size 1.0 +32 29 training.label_smoothing 0.029484768781373547 +32 30 model.embedding_dim 1.0 +32 30 optimizer.lr 0.0010216428358756646 +32 30 training.batch_size 1.0 +32 30 training.label_smoothing 0.003074707634955245 +32 31 model.embedding_dim 0.0 +32 31 optimizer.lr 0.002805240513075439 +32 31 training.batch_size 0.0 +32 31 training.label_smoothing 0.0018458695050401984 +32 32 model.embedding_dim 0.0 +32 32 optimizer.lr 0.006640287279258904 +32 32 training.batch_size 1.0 +32 32 training.label_smoothing 0.11533510195284512 +32 33 model.embedding_dim 0.0 +32 33 optimizer.lr 0.018486643131432147 +32 33 training.batch_size 1.0 +32 33 training.label_smoothing 0.08259021380046294 +32 34 model.embedding_dim 0.0 +32 34 optimizer.lr 0.0024071731589095776 +32 34 training.batch_size 1.0 +32 34 training.label_smoothing 0.2799483536277205 +32 35 model.embedding_dim 2.0 +32 35 optimizer.lr 0.022302102484576144 +32 35 training.batch_size 0.0 +32 35 training.label_smoothing 0.026875993128439003 +32 36 model.embedding_dim 2.0 +32 36 optimizer.lr 0.006979692748490084 +32 36 training.batch_size 2.0 +32 36 training.label_smoothing 0.09372013443569295 +32 37 model.embedding_dim 0.0 +32 37 optimizer.lr 0.003426140697628356 +32 37 training.batch_size 2.0 +32 37 training.label_smoothing 0.13050001609210482 +32 38 model.embedding_dim 1.0 +32 38 optimizer.lr 0.0014443940811115598 +32 38 training.batch_size 0.0 +32 38 training.label_smoothing 0.39085928097133793 +32 39 model.embedding_dim 0.0 +32 39 optimizer.lr 0.00782333604505672 +32 39 training.batch_size 1.0 +32 39 training.label_smoothing 0.004420466826199779 +32 40 model.embedding_dim 0.0 +32 40 optimizer.lr 0.004476260804359542 +32 40 training.batch_size 1.0 +32 40 training.label_smoothing 0.4834602106360868 +32 41 model.embedding_dim 1.0 +32 41 optimizer.lr 0.0017233349926060809 +32 41 training.batch_size 0.0 +32 41 training.label_smoothing 0.001215625299819768 +32 42 model.embedding_dim 0.0 +32 42 optimizer.lr 0.009681394777035035 +32 42 training.batch_size 0.0 +32 42 training.label_smoothing 0.015152619659186996 +32 43 model.embedding_dim 2.0 +32 43 optimizer.lr 0.008792001894350853 +32 43 training.batch_size 1.0 +32 43 training.label_smoothing 0.04825365526916085 +32 44 model.embedding_dim 2.0 +32 44 optimizer.lr 0.01915805957211306 +32 44 training.batch_size 1.0 +32 44 training.label_smoothing 0.1082431753962839 +32 45 model.embedding_dim 1.0 +32 45 optimizer.lr 0.018010156689378987 +32 45 training.batch_size 0.0 +32 45 training.label_smoothing 0.8357712020961731 +32 46 model.embedding_dim 2.0 +32 46 optimizer.lr 0.03352790606109228 +32 46 training.batch_size 2.0 +32 46 training.label_smoothing 0.08538964884284149 +32 47 model.embedding_dim 2.0 +32 47 optimizer.lr 0.018476769208232994 +32 47 training.batch_size 2.0 +32 47 training.label_smoothing 0.12750577727486093 +32 48 model.embedding_dim 1.0 +32 48 optimizer.lr 0.0013504980048264495 +32 48 training.batch_size 0.0 +32 48 training.label_smoothing 0.009172247470978525 +32 49 model.embedding_dim 2.0 +32 49 optimizer.lr 0.004099144440289005 +32 49 training.batch_size 1.0 +32 49 training.label_smoothing 0.028859548382096274 +32 50 model.embedding_dim 1.0 +32 50 optimizer.lr 0.0024573351495283586 +32 50 training.batch_size 1.0 +32 50 training.label_smoothing 0.0014437863610577914 +32 51 model.embedding_dim 0.0 +32 51 optimizer.lr 0.002203207588093328 +32 51 training.batch_size 2.0 +32 51 training.label_smoothing 0.9975162783203089 +32 52 model.embedding_dim 1.0 +32 52 optimizer.lr 0.02226483489762017 +32 52 training.batch_size 2.0 +32 52 training.label_smoothing 0.043415729573489284 +32 53 model.embedding_dim 0.0 +32 53 optimizer.lr 0.0345035072013729 +32 53 training.batch_size 0.0 +32 53 training.label_smoothing 0.02629230854280229 +32 54 model.embedding_dim 1.0 +32 54 optimizer.lr 0.037888409324562555 +32 54 training.batch_size 0.0 +32 54 training.label_smoothing 0.15935881184726838 +32 55 model.embedding_dim 1.0 +32 55 optimizer.lr 0.005598700516673096 +32 55 training.batch_size 0.0 +32 55 training.label_smoothing 0.0187473418605368 +32 56 model.embedding_dim 0.0 +32 56 optimizer.lr 0.09232652623657887 +32 56 training.batch_size 1.0 +32 56 training.label_smoothing 0.38998306809466554 +32 57 model.embedding_dim 0.0 +32 57 optimizer.lr 0.001967748676958928 +32 57 training.batch_size 0.0 +32 57 training.label_smoothing 0.02281161402772067 +32 58 model.embedding_dim 0.0 +32 58 optimizer.lr 0.0013939104015359487 +32 58 training.batch_size 2.0 +32 58 training.label_smoothing 0.12316754334199266 +32 59 model.embedding_dim 0.0 +32 59 optimizer.lr 0.0329351653431257 +32 59 training.batch_size 2.0 +32 59 training.label_smoothing 0.003320156908921824 +32 60 model.embedding_dim 2.0 +32 60 optimizer.lr 0.0048926993832380744 +32 60 training.batch_size 1.0 +32 60 training.label_smoothing 0.004493083852743876 +32 61 model.embedding_dim 2.0 +32 61 optimizer.lr 0.024667718339660364 +32 61 training.batch_size 2.0 +32 61 training.label_smoothing 0.0032484872416684487 +32 62 model.embedding_dim 2.0 +32 62 optimizer.lr 0.0013821466503218993 +32 62 training.batch_size 2.0 +32 62 training.label_smoothing 0.32679355044986874 +32 63 model.embedding_dim 1.0 +32 63 optimizer.lr 0.00485494566178385 +32 63 training.batch_size 2.0 +32 63 training.label_smoothing 0.010957034476947979 +32 64 model.embedding_dim 1.0 +32 64 optimizer.lr 0.011173093420731217 +32 64 training.batch_size 0.0 +32 64 training.label_smoothing 0.003410177787753634 +32 65 model.embedding_dim 2.0 +32 65 optimizer.lr 0.0016545573492357647 +32 65 training.batch_size 1.0 +32 65 training.label_smoothing 0.0595753045632379 +32 66 model.embedding_dim 2.0 +32 66 optimizer.lr 0.0026313019821613084 +32 66 training.batch_size 2.0 +32 66 training.label_smoothing 0.008045485913503906 +32 67 model.embedding_dim 0.0 +32 67 optimizer.lr 0.014527408979470953 +32 67 training.batch_size 1.0 +32 67 training.label_smoothing 0.5296966105180164 +32 68 model.embedding_dim 0.0 +32 68 optimizer.lr 0.0012851824523995301 +32 68 training.batch_size 0.0 +32 68 training.label_smoothing 0.012505264394203177 +32 69 model.embedding_dim 2.0 +32 69 optimizer.lr 0.06832012245474 +32 69 training.batch_size 1.0 +32 69 training.label_smoothing 0.003437693786985972 +32 70 model.embedding_dim 0.0 +32 70 optimizer.lr 0.004927349425795399 +32 70 training.batch_size 1.0 +32 70 training.label_smoothing 0.06783460849477334 +32 71 model.embedding_dim 2.0 +32 71 optimizer.lr 0.04712587663543255 +32 71 training.batch_size 2.0 +32 71 training.label_smoothing 0.2353827364746851 +32 72 model.embedding_dim 2.0 +32 72 optimizer.lr 0.0015715180463032167 +32 72 training.batch_size 0.0 +32 72 training.label_smoothing 0.45778236428687447 +32 73 model.embedding_dim 1.0 +32 73 optimizer.lr 0.0013362978248942375 +32 73 training.batch_size 1.0 +32 73 training.label_smoothing 0.29185961566210017 +32 74 model.embedding_dim 1.0 +32 74 optimizer.lr 0.06316018515515043 +32 74 training.batch_size 0.0 +32 74 training.label_smoothing 0.0024088341675770077 +32 75 model.embedding_dim 1.0 +32 75 optimizer.lr 0.09405510875299361 +32 75 training.batch_size 1.0 +32 75 training.label_smoothing 0.007227218114916424 +32 76 model.embedding_dim 2.0 +32 76 optimizer.lr 0.05823750574526399 +32 76 training.batch_size 1.0 +32 76 training.label_smoothing 0.0067216354860781665 +32 77 model.embedding_dim 0.0 +32 77 optimizer.lr 0.01668985237138558 +32 77 training.batch_size 1.0 +32 77 training.label_smoothing 0.09760206723611237 +32 78 model.embedding_dim 2.0 +32 78 optimizer.lr 0.002236442331731312 +32 78 training.batch_size 0.0 +32 78 training.label_smoothing 0.4417946103357722 +32 79 model.embedding_dim 0.0 +32 79 optimizer.lr 0.0056971835249462945 +32 79 training.batch_size 2.0 +32 79 training.label_smoothing 0.22490564125870827 +32 80 model.embedding_dim 1.0 +32 80 optimizer.lr 0.0015721882148855154 +32 80 training.batch_size 1.0 +32 80 training.label_smoothing 0.0097358250536216 +32 81 model.embedding_dim 1.0 +32 81 optimizer.lr 0.004432087980693751 +32 81 training.batch_size 0.0 +32 81 training.label_smoothing 0.0027541240453799005 +32 82 model.embedding_dim 0.0 +32 82 optimizer.lr 0.0032035942508222117 +32 82 training.batch_size 2.0 +32 82 training.label_smoothing 0.01068942942510287 +32 83 model.embedding_dim 0.0 +32 83 optimizer.lr 0.03528530677154877 +32 83 training.batch_size 0.0 +32 83 training.label_smoothing 0.14781790616676357 +32 84 model.embedding_dim 2.0 +32 84 optimizer.lr 0.0018723159904327424 +32 84 training.batch_size 0.0 +32 84 training.label_smoothing 0.092558593600174 +32 85 model.embedding_dim 2.0 +32 85 optimizer.lr 0.01664942776917528 +32 85 training.batch_size 1.0 +32 85 training.label_smoothing 0.023353933772218224 +32 86 model.embedding_dim 1.0 +32 86 optimizer.lr 0.0666597160685233 +32 86 training.batch_size 2.0 +32 86 training.label_smoothing 0.055490789555130635 +32 87 model.embedding_dim 2.0 +32 87 optimizer.lr 0.08010182990458084 +32 87 training.batch_size 2.0 +32 87 training.label_smoothing 0.004397251458884741 +32 88 model.embedding_dim 2.0 +32 88 optimizer.lr 0.004774018754053744 +32 88 training.batch_size 1.0 +32 88 training.label_smoothing 0.013051947105526946 +32 89 model.embedding_dim 1.0 +32 89 optimizer.lr 0.014362912756979158 +32 89 training.batch_size 1.0 +32 89 training.label_smoothing 0.015953570199757027 +32 90 model.embedding_dim 0.0 +32 90 optimizer.lr 0.0021864935123857973 +32 90 training.batch_size 2.0 +32 90 training.label_smoothing 0.050530171001632256 +32 91 model.embedding_dim 0.0 +32 91 optimizer.lr 0.008536138689966064 +32 91 training.batch_size 2.0 +32 91 training.label_smoothing 0.04397593405317406 +32 92 model.embedding_dim 0.0 +32 92 optimizer.lr 0.0017164829085849795 +32 92 training.batch_size 2.0 +32 92 training.label_smoothing 0.03947389769808418 +32 93 model.embedding_dim 2.0 +32 93 optimizer.lr 0.011678916364076074 +32 93 training.batch_size 2.0 +32 93 training.label_smoothing 0.0010915583502328135 +32 94 model.embedding_dim 2.0 +32 94 optimizer.lr 0.025605254869615682 +32 94 training.batch_size 0.0 +32 94 training.label_smoothing 0.0036791661086141756 +32 95 model.embedding_dim 0.0 +32 95 optimizer.lr 0.049435757073526786 +32 95 training.batch_size 2.0 +32 95 training.label_smoothing 0.013482672653723935 +32 96 model.embedding_dim 1.0 +32 96 optimizer.lr 0.08625642718504306 +32 96 training.batch_size 1.0 +32 96 training.label_smoothing 0.035677820022466375 +32 97 model.embedding_dim 1.0 +32 97 optimizer.lr 0.005637526969448739 +32 97 training.batch_size 2.0 +32 97 training.label_smoothing 0.015414148790762144 +32 98 model.embedding_dim 0.0 +32 98 optimizer.lr 0.0076984552400405715 +32 98 training.batch_size 2.0 +32 98 training.label_smoothing 0.0031483298688494785 +32 99 model.embedding_dim 2.0 +32 99 optimizer.lr 0.0069487476359613755 +32 99 training.batch_size 2.0 +32 99 training.label_smoothing 0.0753983651392805 +32 100 model.embedding_dim 1.0 +32 100 optimizer.lr 0.02483872008706759 +32 100 training.batch_size 1.0 +32 100 training.label_smoothing 0.029261074449350794 +32 1 dataset """kinships""" +32 1 model """complex""" +32 1 loss """crossentropy""" +32 1 regularizer """no""" +32 1 optimizer """adam""" +32 1 training_loop """lcwa""" +32 1 evaluator """rankbased""" +32 2 dataset """kinships""" +32 2 model """complex""" +32 2 loss """crossentropy""" +32 2 regularizer """no""" +32 2 optimizer """adam""" +32 2 training_loop """lcwa""" +32 2 evaluator """rankbased""" +32 3 dataset """kinships""" +32 3 model """complex""" +32 3 loss """crossentropy""" +32 3 regularizer """no""" +32 3 optimizer """adam""" +32 3 training_loop """lcwa""" +32 3 evaluator """rankbased""" +32 4 dataset """kinships""" +32 4 model """complex""" +32 4 loss """crossentropy""" +32 4 regularizer """no""" +32 4 optimizer """adam""" +32 4 training_loop """lcwa""" +32 4 evaluator """rankbased""" +32 5 dataset """kinships""" +32 5 model """complex""" +32 5 loss """crossentropy""" +32 5 regularizer """no""" +32 5 optimizer """adam""" +32 5 training_loop """lcwa""" +32 5 evaluator """rankbased""" +32 6 dataset """kinships""" +32 6 model """complex""" +32 6 loss """crossentropy""" +32 6 regularizer """no""" +32 6 optimizer """adam""" +32 6 training_loop """lcwa""" +32 6 evaluator """rankbased""" +32 7 dataset """kinships""" +32 7 model """complex""" +32 7 loss """crossentropy""" +32 7 regularizer """no""" +32 7 optimizer """adam""" +32 7 training_loop """lcwa""" +32 7 evaluator """rankbased""" +32 8 dataset """kinships""" +32 8 model """complex""" +32 8 loss """crossentropy""" +32 8 regularizer """no""" +32 8 optimizer """adam""" +32 8 training_loop """lcwa""" +32 8 evaluator """rankbased""" +32 9 dataset """kinships""" +32 9 model """complex""" +32 9 loss """crossentropy""" +32 9 regularizer """no""" +32 9 optimizer """adam""" +32 9 training_loop """lcwa""" +32 9 evaluator """rankbased""" +32 10 dataset """kinships""" +32 10 model """complex""" +32 10 loss """crossentropy""" +32 10 regularizer """no""" +32 10 optimizer """adam""" +32 10 training_loop """lcwa""" +32 10 evaluator """rankbased""" +32 11 dataset """kinships""" +32 11 model """complex""" +32 11 loss """crossentropy""" +32 11 regularizer """no""" +32 11 optimizer """adam""" +32 11 training_loop """lcwa""" +32 11 evaluator """rankbased""" +32 12 dataset """kinships""" +32 12 model """complex""" +32 12 loss """crossentropy""" +32 12 regularizer """no""" +32 12 optimizer """adam""" +32 12 training_loop """lcwa""" +32 12 evaluator """rankbased""" +32 13 dataset """kinships""" +32 13 model """complex""" +32 13 loss """crossentropy""" +32 13 regularizer """no""" +32 13 optimizer """adam""" +32 13 training_loop """lcwa""" +32 13 evaluator """rankbased""" +32 14 dataset """kinships""" +32 14 model """complex""" +32 14 loss """crossentropy""" +32 14 regularizer """no""" +32 14 optimizer """adam""" +32 14 training_loop """lcwa""" +32 14 evaluator """rankbased""" +32 15 dataset """kinships""" +32 15 model """complex""" +32 15 loss """crossentropy""" +32 15 regularizer """no""" +32 15 optimizer """adam""" +32 15 training_loop """lcwa""" +32 15 evaluator """rankbased""" +32 16 dataset """kinships""" +32 16 model """complex""" +32 16 loss """crossentropy""" +32 16 regularizer """no""" +32 16 optimizer """adam""" +32 16 training_loop """lcwa""" +32 16 evaluator """rankbased""" +32 17 dataset """kinships""" +32 17 model """complex""" +32 17 loss """crossentropy""" +32 17 regularizer """no""" +32 17 optimizer """adam""" +32 17 training_loop """lcwa""" +32 17 evaluator """rankbased""" +32 18 dataset """kinships""" +32 18 model """complex""" +32 18 loss """crossentropy""" +32 18 regularizer """no""" +32 18 optimizer """adam""" +32 18 training_loop """lcwa""" +32 18 evaluator """rankbased""" +32 19 dataset """kinships""" +32 19 model """complex""" +32 19 loss """crossentropy""" +32 19 regularizer """no""" +32 19 optimizer """adam""" +32 19 training_loop """lcwa""" +32 19 evaluator """rankbased""" +32 20 dataset """kinships""" +32 20 model """complex""" +32 20 loss """crossentropy""" +32 20 regularizer """no""" +32 20 optimizer """adam""" +32 20 training_loop """lcwa""" +32 20 evaluator """rankbased""" +32 21 dataset """kinships""" +32 21 model """complex""" +32 21 loss """crossentropy""" +32 21 regularizer """no""" +32 21 optimizer """adam""" +32 21 training_loop """lcwa""" +32 21 evaluator """rankbased""" +32 22 dataset """kinships""" +32 22 model """complex""" +32 22 loss """crossentropy""" +32 22 regularizer """no""" +32 22 optimizer """adam""" +32 22 training_loop """lcwa""" +32 22 evaluator """rankbased""" +32 23 dataset """kinships""" +32 23 model """complex""" +32 23 loss """crossentropy""" +32 23 regularizer """no""" +32 23 optimizer """adam""" +32 23 training_loop """lcwa""" +32 23 evaluator """rankbased""" +32 24 dataset """kinships""" +32 24 model """complex""" +32 24 loss """crossentropy""" +32 24 regularizer """no""" +32 24 optimizer """adam""" +32 24 training_loop """lcwa""" +32 24 evaluator """rankbased""" +32 25 dataset """kinships""" +32 25 model """complex""" +32 25 loss """crossentropy""" +32 25 regularizer """no""" +32 25 optimizer """adam""" +32 25 training_loop """lcwa""" +32 25 evaluator """rankbased""" +32 26 dataset """kinships""" +32 26 model """complex""" +32 26 loss """crossentropy""" +32 26 regularizer """no""" +32 26 optimizer """adam""" +32 26 training_loop """lcwa""" +32 26 evaluator """rankbased""" +32 27 dataset """kinships""" +32 27 model """complex""" +32 27 loss """crossentropy""" +32 27 regularizer """no""" +32 27 optimizer """adam""" +32 27 training_loop """lcwa""" +32 27 evaluator """rankbased""" +32 28 dataset """kinships""" +32 28 model """complex""" +32 28 loss """crossentropy""" +32 28 regularizer """no""" +32 28 optimizer """adam""" +32 28 training_loop """lcwa""" +32 28 evaluator """rankbased""" +32 29 dataset """kinships""" +32 29 model """complex""" +32 29 loss """crossentropy""" +32 29 regularizer """no""" +32 29 optimizer """adam""" +32 29 training_loop """lcwa""" +32 29 evaluator """rankbased""" +32 30 dataset """kinships""" +32 30 model """complex""" +32 30 loss """crossentropy""" +32 30 regularizer """no""" +32 30 optimizer """adam""" +32 30 training_loop """lcwa""" +32 30 evaluator """rankbased""" +32 31 dataset """kinships""" +32 31 model """complex""" +32 31 loss """crossentropy""" +32 31 regularizer """no""" +32 31 optimizer """adam""" +32 31 training_loop """lcwa""" +32 31 evaluator """rankbased""" +32 32 dataset """kinships""" +32 32 model """complex""" +32 32 loss """crossentropy""" +32 32 regularizer """no""" +32 32 optimizer """adam""" +32 32 training_loop """lcwa""" +32 32 evaluator """rankbased""" +32 33 dataset """kinships""" +32 33 model """complex""" +32 33 loss """crossentropy""" +32 33 regularizer """no""" +32 33 optimizer """adam""" +32 33 training_loop """lcwa""" +32 33 evaluator """rankbased""" +32 34 dataset """kinships""" +32 34 model """complex""" +32 34 loss """crossentropy""" +32 34 regularizer """no""" +32 34 optimizer """adam""" +32 34 training_loop """lcwa""" +32 34 evaluator """rankbased""" +32 35 dataset """kinships""" +32 35 model """complex""" +32 35 loss """crossentropy""" +32 35 regularizer """no""" +32 35 optimizer """adam""" +32 35 training_loop """lcwa""" +32 35 evaluator """rankbased""" +32 36 dataset """kinships""" +32 36 model """complex""" +32 36 loss """crossentropy""" +32 36 regularizer """no""" +32 36 optimizer """adam""" +32 36 training_loop """lcwa""" +32 36 evaluator """rankbased""" +32 37 dataset """kinships""" +32 37 model """complex""" +32 37 loss """crossentropy""" +32 37 regularizer """no""" +32 37 optimizer """adam""" +32 37 training_loop """lcwa""" +32 37 evaluator """rankbased""" +32 38 dataset """kinships""" +32 38 model """complex""" +32 38 loss """crossentropy""" +32 38 regularizer """no""" +32 38 optimizer """adam""" +32 38 training_loop """lcwa""" +32 38 evaluator """rankbased""" +32 39 dataset """kinships""" +32 39 model """complex""" +32 39 loss """crossentropy""" +32 39 regularizer """no""" +32 39 optimizer """adam""" +32 39 training_loop """lcwa""" +32 39 evaluator """rankbased""" +32 40 dataset """kinships""" +32 40 model """complex""" +32 40 loss """crossentropy""" +32 40 regularizer """no""" +32 40 optimizer """adam""" +32 40 training_loop """lcwa""" +32 40 evaluator """rankbased""" +32 41 dataset """kinships""" +32 41 model """complex""" +32 41 loss """crossentropy""" +32 41 regularizer """no""" +32 41 optimizer """adam""" +32 41 training_loop """lcwa""" +32 41 evaluator """rankbased""" +32 42 dataset """kinships""" +32 42 model """complex""" +32 42 loss """crossentropy""" +32 42 regularizer """no""" +32 42 optimizer """adam""" +32 42 training_loop """lcwa""" +32 42 evaluator """rankbased""" +32 43 dataset """kinships""" +32 43 model """complex""" +32 43 loss """crossentropy""" +32 43 regularizer """no""" +32 43 optimizer """adam""" +32 43 training_loop """lcwa""" +32 43 evaluator """rankbased""" +32 44 dataset """kinships""" +32 44 model """complex""" +32 44 loss """crossentropy""" +32 44 regularizer """no""" +32 44 optimizer """adam""" +32 44 training_loop """lcwa""" +32 44 evaluator """rankbased""" +32 45 dataset """kinships""" +32 45 model """complex""" +32 45 loss """crossentropy""" +32 45 regularizer """no""" +32 45 optimizer """adam""" +32 45 training_loop """lcwa""" +32 45 evaluator """rankbased""" +32 46 dataset """kinships""" +32 46 model """complex""" +32 46 loss """crossentropy""" +32 46 regularizer """no""" +32 46 optimizer """adam""" +32 46 training_loop """lcwa""" +32 46 evaluator """rankbased""" +32 47 dataset """kinships""" +32 47 model """complex""" +32 47 loss """crossentropy""" +32 47 regularizer """no""" +32 47 optimizer """adam""" +32 47 training_loop """lcwa""" +32 47 evaluator """rankbased""" +32 48 dataset """kinships""" +32 48 model """complex""" +32 48 loss """crossentropy""" +32 48 regularizer """no""" +32 48 optimizer """adam""" +32 48 training_loop """lcwa""" +32 48 evaluator """rankbased""" +32 49 dataset """kinships""" +32 49 model """complex""" +32 49 loss """crossentropy""" +32 49 regularizer """no""" +32 49 optimizer """adam""" +32 49 training_loop """lcwa""" +32 49 evaluator """rankbased""" +32 50 dataset """kinships""" +32 50 model """complex""" +32 50 loss """crossentropy""" +32 50 regularizer """no""" +32 50 optimizer """adam""" +32 50 training_loop """lcwa""" +32 50 evaluator """rankbased""" +32 51 dataset """kinships""" +32 51 model """complex""" +32 51 loss """crossentropy""" +32 51 regularizer """no""" +32 51 optimizer """adam""" +32 51 training_loop """lcwa""" +32 51 evaluator """rankbased""" +32 52 dataset """kinships""" +32 52 model """complex""" +32 52 loss """crossentropy""" +32 52 regularizer """no""" +32 52 optimizer """adam""" +32 52 training_loop """lcwa""" +32 52 evaluator """rankbased""" +32 53 dataset """kinships""" +32 53 model """complex""" +32 53 loss """crossentropy""" +32 53 regularizer """no""" +32 53 optimizer """adam""" +32 53 training_loop """lcwa""" +32 53 evaluator """rankbased""" +32 54 dataset """kinships""" +32 54 model """complex""" +32 54 loss """crossentropy""" +32 54 regularizer """no""" +32 54 optimizer """adam""" +32 54 training_loop """lcwa""" +32 54 evaluator """rankbased""" +32 55 dataset """kinships""" +32 55 model """complex""" +32 55 loss """crossentropy""" +32 55 regularizer """no""" +32 55 optimizer """adam""" +32 55 training_loop """lcwa""" +32 55 evaluator """rankbased""" +32 56 dataset """kinships""" +32 56 model """complex""" +32 56 loss """crossentropy""" +32 56 regularizer """no""" +32 56 optimizer """adam""" +32 56 training_loop """lcwa""" +32 56 evaluator """rankbased""" +32 57 dataset """kinships""" +32 57 model """complex""" +32 57 loss """crossentropy""" +32 57 regularizer """no""" +32 57 optimizer """adam""" +32 57 training_loop """lcwa""" +32 57 evaluator """rankbased""" +32 58 dataset """kinships""" +32 58 model """complex""" +32 58 loss """crossentropy""" +32 58 regularizer """no""" +32 58 optimizer """adam""" +32 58 training_loop """lcwa""" +32 58 evaluator """rankbased""" +32 59 dataset """kinships""" +32 59 model """complex""" +32 59 loss """crossentropy""" +32 59 regularizer """no""" +32 59 optimizer """adam""" +32 59 training_loop """lcwa""" +32 59 evaluator """rankbased""" +32 60 dataset """kinships""" +32 60 model """complex""" +32 60 loss """crossentropy""" +32 60 regularizer """no""" +32 60 optimizer """adam""" +32 60 training_loop """lcwa""" +32 60 evaluator """rankbased""" +32 61 dataset """kinships""" +32 61 model """complex""" +32 61 loss """crossentropy""" +32 61 regularizer """no""" +32 61 optimizer """adam""" +32 61 training_loop """lcwa""" +32 61 evaluator """rankbased""" +32 62 dataset """kinships""" +32 62 model """complex""" +32 62 loss """crossentropy""" +32 62 regularizer """no""" +32 62 optimizer """adam""" +32 62 training_loop """lcwa""" +32 62 evaluator """rankbased""" +32 63 dataset """kinships""" +32 63 model """complex""" +32 63 loss """crossentropy""" +32 63 regularizer """no""" +32 63 optimizer """adam""" +32 63 training_loop """lcwa""" +32 63 evaluator """rankbased""" +32 64 dataset """kinships""" +32 64 model """complex""" +32 64 loss """crossentropy""" +32 64 regularizer """no""" +32 64 optimizer """adam""" +32 64 training_loop """lcwa""" +32 64 evaluator """rankbased""" +32 65 dataset """kinships""" +32 65 model """complex""" +32 65 loss """crossentropy""" +32 65 regularizer """no""" +32 65 optimizer """adam""" +32 65 training_loop """lcwa""" +32 65 evaluator """rankbased""" +32 66 dataset """kinships""" +32 66 model """complex""" +32 66 loss """crossentropy""" +32 66 regularizer """no""" +32 66 optimizer """adam""" +32 66 training_loop """lcwa""" +32 66 evaluator """rankbased""" +32 67 dataset """kinships""" +32 67 model """complex""" +32 67 loss """crossentropy""" +32 67 regularizer """no""" +32 67 optimizer """adam""" +32 67 training_loop """lcwa""" +32 67 evaluator """rankbased""" +32 68 dataset """kinships""" +32 68 model """complex""" +32 68 loss """crossentropy""" +32 68 regularizer """no""" +32 68 optimizer """adam""" +32 68 training_loop """lcwa""" +32 68 evaluator """rankbased""" +32 69 dataset """kinships""" +32 69 model """complex""" +32 69 loss """crossentropy""" +32 69 regularizer """no""" +32 69 optimizer """adam""" +32 69 training_loop """lcwa""" +32 69 evaluator """rankbased""" +32 70 dataset """kinships""" +32 70 model """complex""" +32 70 loss """crossentropy""" +32 70 regularizer """no""" +32 70 optimizer """adam""" +32 70 training_loop """lcwa""" +32 70 evaluator """rankbased""" +32 71 dataset """kinships""" +32 71 model """complex""" +32 71 loss """crossentropy""" +32 71 regularizer """no""" +32 71 optimizer """adam""" +32 71 training_loop """lcwa""" +32 71 evaluator """rankbased""" +32 72 dataset """kinships""" +32 72 model """complex""" +32 72 loss """crossentropy""" +32 72 regularizer """no""" +32 72 optimizer """adam""" +32 72 training_loop """lcwa""" +32 72 evaluator """rankbased""" +32 73 dataset """kinships""" +32 73 model """complex""" +32 73 loss """crossentropy""" +32 73 regularizer """no""" +32 73 optimizer """adam""" +32 73 training_loop """lcwa""" +32 73 evaluator """rankbased""" +32 74 dataset """kinships""" +32 74 model """complex""" +32 74 loss """crossentropy""" +32 74 regularizer """no""" +32 74 optimizer """adam""" +32 74 training_loop """lcwa""" +32 74 evaluator """rankbased""" +32 75 dataset """kinships""" +32 75 model """complex""" +32 75 loss """crossentropy""" +32 75 regularizer """no""" +32 75 optimizer """adam""" +32 75 training_loop """lcwa""" +32 75 evaluator """rankbased""" +32 76 dataset """kinships""" +32 76 model """complex""" +32 76 loss """crossentropy""" +32 76 regularizer """no""" +32 76 optimizer """adam""" +32 76 training_loop """lcwa""" +32 76 evaluator """rankbased""" +32 77 dataset """kinships""" +32 77 model """complex""" +32 77 loss """crossentropy""" +32 77 regularizer """no""" +32 77 optimizer """adam""" +32 77 training_loop """lcwa""" +32 77 evaluator """rankbased""" +32 78 dataset """kinships""" +32 78 model """complex""" +32 78 loss """crossentropy""" +32 78 regularizer """no""" +32 78 optimizer """adam""" +32 78 training_loop """lcwa""" +32 78 evaluator """rankbased""" +32 79 dataset """kinships""" +32 79 model """complex""" +32 79 loss """crossentropy""" +32 79 regularizer """no""" +32 79 optimizer """adam""" +32 79 training_loop """lcwa""" +32 79 evaluator """rankbased""" +32 80 dataset """kinships""" +32 80 model """complex""" +32 80 loss """crossentropy""" +32 80 regularizer """no""" +32 80 optimizer """adam""" +32 80 training_loop """lcwa""" +32 80 evaluator """rankbased""" +32 81 dataset """kinships""" +32 81 model """complex""" +32 81 loss """crossentropy""" +32 81 regularizer """no""" +32 81 optimizer """adam""" +32 81 training_loop """lcwa""" +32 81 evaluator """rankbased""" +32 82 dataset """kinships""" +32 82 model """complex""" +32 82 loss """crossentropy""" +32 82 regularizer """no""" +32 82 optimizer """adam""" +32 82 training_loop """lcwa""" +32 82 evaluator """rankbased""" +32 83 dataset """kinships""" +32 83 model """complex""" +32 83 loss """crossentropy""" +32 83 regularizer """no""" +32 83 optimizer """adam""" +32 83 training_loop """lcwa""" +32 83 evaluator """rankbased""" +32 84 dataset """kinships""" +32 84 model """complex""" +32 84 loss """crossentropy""" +32 84 regularizer """no""" +32 84 optimizer """adam""" +32 84 training_loop """lcwa""" +32 84 evaluator """rankbased""" +32 85 dataset """kinships""" +32 85 model """complex""" +32 85 loss """crossentropy""" +32 85 regularizer """no""" +32 85 optimizer """adam""" +32 85 training_loop """lcwa""" +32 85 evaluator """rankbased""" +32 86 dataset """kinships""" +32 86 model """complex""" +32 86 loss """crossentropy""" +32 86 regularizer """no""" +32 86 optimizer """adam""" +32 86 training_loop """lcwa""" +32 86 evaluator """rankbased""" +32 87 dataset """kinships""" +32 87 model """complex""" +32 87 loss """crossentropy""" +32 87 regularizer """no""" +32 87 optimizer """adam""" +32 87 training_loop """lcwa""" +32 87 evaluator """rankbased""" +32 88 dataset """kinships""" +32 88 model """complex""" +32 88 loss """crossentropy""" +32 88 regularizer """no""" +32 88 optimizer """adam""" +32 88 training_loop """lcwa""" +32 88 evaluator """rankbased""" +32 89 dataset """kinships""" +32 89 model """complex""" +32 89 loss """crossentropy""" +32 89 regularizer """no""" +32 89 optimizer """adam""" +32 89 training_loop """lcwa""" +32 89 evaluator """rankbased""" +32 90 dataset """kinships""" +32 90 model """complex""" +32 90 loss """crossentropy""" +32 90 regularizer """no""" +32 90 optimizer """adam""" +32 90 training_loop """lcwa""" +32 90 evaluator """rankbased""" +32 91 dataset """kinships""" +32 91 model """complex""" +32 91 loss """crossentropy""" +32 91 regularizer """no""" +32 91 optimizer """adam""" +32 91 training_loop """lcwa""" +32 91 evaluator """rankbased""" +32 92 dataset """kinships""" +32 92 model """complex""" +32 92 loss """crossentropy""" +32 92 regularizer """no""" +32 92 optimizer """adam""" +32 92 training_loop """lcwa""" +32 92 evaluator """rankbased""" +32 93 dataset """kinships""" +32 93 model """complex""" +32 93 loss """crossentropy""" +32 93 regularizer """no""" +32 93 optimizer """adam""" +32 93 training_loop """lcwa""" +32 93 evaluator """rankbased""" +32 94 dataset """kinships""" +32 94 model """complex""" +32 94 loss """crossentropy""" +32 94 regularizer """no""" +32 94 optimizer """adam""" +32 94 training_loop """lcwa""" +32 94 evaluator """rankbased""" +32 95 dataset """kinships""" +32 95 model """complex""" +32 95 loss """crossentropy""" +32 95 regularizer """no""" +32 95 optimizer """adam""" +32 95 training_loop """lcwa""" +32 95 evaluator """rankbased""" +32 96 dataset """kinships""" +32 96 model """complex""" +32 96 loss """crossentropy""" +32 96 regularizer """no""" +32 96 optimizer """adam""" +32 96 training_loop """lcwa""" +32 96 evaluator """rankbased""" +32 97 dataset """kinships""" +32 97 model """complex""" +32 97 loss """crossentropy""" +32 97 regularizer """no""" +32 97 optimizer """adam""" +32 97 training_loop """lcwa""" +32 97 evaluator """rankbased""" +32 98 dataset """kinships""" +32 98 model """complex""" +32 98 loss """crossentropy""" +32 98 regularizer """no""" +32 98 optimizer """adam""" +32 98 training_loop """lcwa""" +32 98 evaluator """rankbased""" +32 99 dataset """kinships""" +32 99 model """complex""" +32 99 loss """crossentropy""" +32 99 regularizer """no""" +32 99 optimizer """adam""" +32 99 training_loop """lcwa""" +32 99 evaluator """rankbased""" +32 100 dataset """kinships""" +32 100 model """complex""" +32 100 loss """crossentropy""" +32 100 regularizer """no""" +32 100 optimizer """adam""" +32 100 training_loop """lcwa""" +32 100 evaluator """rankbased""" +33 1 model.embedding_dim 1.0 +33 1 optimizer.lr 0.0028250168609106497 +33 1 training.batch_size 0.0 +33 1 training.label_smoothing 0.2463194260587973 +33 2 model.embedding_dim 0.0 +33 2 optimizer.lr 0.0246227418520588 +33 2 training.batch_size 1.0 +33 2 training.label_smoothing 0.00474826103721028 +33 3 model.embedding_dim 2.0 +33 3 optimizer.lr 0.04465149212109051 +33 3 training.batch_size 2.0 +33 3 training.label_smoothing 0.02366418547654725 +33 4 model.embedding_dim 1.0 +33 4 optimizer.lr 0.00537846879924034 +33 4 training.batch_size 0.0 +33 4 training.label_smoothing 0.6087488971837214 +33 5 model.embedding_dim 1.0 +33 5 optimizer.lr 0.017408869431667556 +33 5 training.batch_size 1.0 +33 5 training.label_smoothing 0.4930508992401986 +33 6 model.embedding_dim 1.0 +33 6 optimizer.lr 0.032831256931013526 +33 6 training.batch_size 2.0 +33 6 training.label_smoothing 0.33173886109195516 +33 7 model.embedding_dim 2.0 +33 7 optimizer.lr 0.0024651372972502223 +33 7 training.batch_size 0.0 +33 7 training.label_smoothing 0.00537170764275346 +33 8 model.embedding_dim 0.0 +33 8 optimizer.lr 0.03580881438168837 +33 8 training.batch_size 2.0 +33 8 training.label_smoothing 0.05903919140290755 +33 9 model.embedding_dim 2.0 +33 9 optimizer.lr 0.07934660334289027 +33 9 training.batch_size 1.0 +33 9 training.label_smoothing 0.03144297562384498 +33 10 model.embedding_dim 2.0 +33 10 optimizer.lr 0.008090431482454289 +33 10 training.batch_size 0.0 +33 10 training.label_smoothing 0.9122477719995485 +33 11 model.embedding_dim 0.0 +33 11 optimizer.lr 0.007417197361732269 +33 11 training.batch_size 2.0 +33 11 training.label_smoothing 0.18620049258723118 +33 12 model.embedding_dim 0.0 +33 12 optimizer.lr 0.0014966652242179243 +33 12 training.batch_size 1.0 +33 12 training.label_smoothing 0.3423697878933858 +33 13 model.embedding_dim 1.0 +33 13 optimizer.lr 0.06711353981988763 +33 13 training.batch_size 0.0 +33 13 training.label_smoothing 0.06153650774551344 +33 14 model.embedding_dim 2.0 +33 14 optimizer.lr 0.034667217713497185 +33 14 training.batch_size 1.0 +33 14 training.label_smoothing 0.0011591424025282472 +33 15 model.embedding_dim 1.0 +33 15 optimizer.lr 0.04198326190596015 +33 15 training.batch_size 2.0 +33 15 training.label_smoothing 0.0025493881474433428 +33 16 model.embedding_dim 2.0 +33 16 optimizer.lr 0.003458434312823963 +33 16 training.batch_size 2.0 +33 16 training.label_smoothing 0.06746258062271504 +33 17 model.embedding_dim 0.0 +33 17 optimizer.lr 0.05623615456248263 +33 17 training.batch_size 0.0 +33 17 training.label_smoothing 0.7856162512356436 +33 18 model.embedding_dim 0.0 +33 18 optimizer.lr 0.010196255681502711 +33 18 training.batch_size 0.0 +33 18 training.label_smoothing 0.004309510887141179 +33 19 model.embedding_dim 2.0 +33 19 optimizer.lr 0.023738450541235873 +33 19 training.batch_size 1.0 +33 19 training.label_smoothing 0.5358081738734225 +33 20 model.embedding_dim 0.0 +33 20 optimizer.lr 0.0018543495206679147 +33 20 training.batch_size 2.0 +33 20 training.label_smoothing 0.053440159303437415 +33 21 model.embedding_dim 0.0 +33 21 optimizer.lr 0.012647140869833182 +33 21 training.batch_size 0.0 +33 21 training.label_smoothing 0.0014034849702705633 +33 22 model.embedding_dim 2.0 +33 22 optimizer.lr 0.05422359377283618 +33 22 training.batch_size 0.0 +33 22 training.label_smoothing 0.013805159672123425 +33 23 model.embedding_dim 1.0 +33 23 optimizer.lr 0.02304611282808833 +33 23 training.batch_size 0.0 +33 23 training.label_smoothing 0.5166975016742589 +33 24 model.embedding_dim 2.0 +33 24 optimizer.lr 0.0018022048441965698 +33 24 training.batch_size 2.0 +33 24 training.label_smoothing 0.1079131846396424 +33 25 model.embedding_dim 0.0 +33 25 optimizer.lr 0.00536640492636744 +33 25 training.batch_size 1.0 +33 25 training.label_smoothing 0.6737965951444864 +33 26 model.embedding_dim 2.0 +33 26 optimizer.lr 0.0032492190246399028 +33 26 training.batch_size 2.0 +33 26 training.label_smoothing 0.018528273095504262 +33 27 model.embedding_dim 0.0 +33 27 optimizer.lr 0.002958618538431816 +33 27 training.batch_size 1.0 +33 27 training.label_smoothing 0.44287876787859 +33 28 model.embedding_dim 0.0 +33 28 optimizer.lr 0.00321677637283486 +33 28 training.batch_size 1.0 +33 28 training.label_smoothing 0.008291405973026801 +33 29 model.embedding_dim 0.0 +33 29 optimizer.lr 0.0015442224976818632 +33 29 training.batch_size 0.0 +33 29 training.label_smoothing 0.026719391261017134 +33 30 model.embedding_dim 0.0 +33 30 optimizer.lr 0.0019033334294509328 +33 30 training.batch_size 1.0 +33 30 training.label_smoothing 0.006892863146506905 +33 31 model.embedding_dim 2.0 +33 31 optimizer.lr 0.012069121603329755 +33 31 training.batch_size 2.0 +33 31 training.label_smoothing 0.001187384618526752 +33 32 model.embedding_dim 1.0 +33 32 optimizer.lr 0.0847917839086618 +33 32 training.batch_size 1.0 +33 32 training.label_smoothing 0.003433140986235008 +33 33 model.embedding_dim 2.0 +33 33 optimizer.lr 0.007687270628600801 +33 33 training.batch_size 2.0 +33 33 training.label_smoothing 0.6432975393167698 +33 34 model.embedding_dim 0.0 +33 34 optimizer.lr 0.00599525805130678 +33 34 training.batch_size 2.0 +33 34 training.label_smoothing 0.0014805940029065272 +33 35 model.embedding_dim 2.0 +33 35 optimizer.lr 0.03838251056839964 +33 35 training.batch_size 1.0 +33 35 training.label_smoothing 0.028134076474581552 +33 36 model.embedding_dim 0.0 +33 36 optimizer.lr 0.04638964612888427 +33 36 training.batch_size 0.0 +33 36 training.label_smoothing 0.49469291226365203 +33 37 model.embedding_dim 0.0 +33 37 optimizer.lr 0.011656121891593272 +33 37 training.batch_size 2.0 +33 37 training.label_smoothing 0.8672720045729372 +33 38 model.embedding_dim 0.0 +33 38 optimizer.lr 0.03644176304900624 +33 38 training.batch_size 2.0 +33 38 training.label_smoothing 0.003230789197762911 +33 39 model.embedding_dim 0.0 +33 39 optimizer.lr 0.0011868023949397073 +33 39 training.batch_size 0.0 +33 39 training.label_smoothing 0.019150634311067803 +33 40 model.embedding_dim 1.0 +33 40 optimizer.lr 0.04268317082423417 +33 40 training.batch_size 1.0 +33 40 training.label_smoothing 0.010943979127072503 +33 41 model.embedding_dim 2.0 +33 41 optimizer.lr 0.005940888475925739 +33 41 training.batch_size 2.0 +33 41 training.label_smoothing 0.030207616808977347 +33 42 model.embedding_dim 1.0 +33 42 optimizer.lr 0.0173891068771359 +33 42 training.batch_size 1.0 +33 42 training.label_smoothing 0.05885885396745272 +33 43 model.embedding_dim 2.0 +33 43 optimizer.lr 0.025070500085308346 +33 43 training.batch_size 0.0 +33 43 training.label_smoothing 0.010686101955186466 +33 44 model.embedding_dim 0.0 +33 44 optimizer.lr 0.08493075520589068 +33 44 training.batch_size 0.0 +33 44 training.label_smoothing 0.0023389297208114344 +33 45 model.embedding_dim 0.0 +33 45 optimizer.lr 0.0017289140602910569 +33 45 training.batch_size 1.0 +33 45 training.label_smoothing 0.007712684220191196 +33 46 model.embedding_dim 1.0 +33 46 optimizer.lr 0.0014772984405311104 +33 46 training.batch_size 0.0 +33 46 training.label_smoothing 0.005309971302100526 +33 47 model.embedding_dim 2.0 +33 47 optimizer.lr 0.007376088146272743 +33 47 training.batch_size 1.0 +33 47 training.label_smoothing 0.07047366184593762 +33 48 model.embedding_dim 0.0 +33 48 optimizer.lr 0.0032542565563770222 +33 48 training.batch_size 2.0 +33 48 training.label_smoothing 0.001022838013369174 +33 49 model.embedding_dim 0.0 +33 49 optimizer.lr 0.0011658673406522862 +33 49 training.batch_size 2.0 +33 49 training.label_smoothing 0.6980472039266583 +33 50 model.embedding_dim 0.0 +33 50 optimizer.lr 0.023898997456195975 +33 50 training.batch_size 0.0 +33 50 training.label_smoothing 0.007387901004662414 +33 51 model.embedding_dim 0.0 +33 51 optimizer.lr 0.003526243254926661 +33 51 training.batch_size 2.0 +33 51 training.label_smoothing 0.0034090560745233603 +33 52 model.embedding_dim 1.0 +33 52 optimizer.lr 0.03090223468019338 +33 52 training.batch_size 2.0 +33 52 training.label_smoothing 0.0015548327616797491 +33 53 model.embedding_dim 1.0 +33 53 optimizer.lr 0.020708359471881158 +33 53 training.batch_size 2.0 +33 53 training.label_smoothing 0.0072618452674053586 +33 54 model.embedding_dim 2.0 +33 54 optimizer.lr 0.029072353733004812 +33 54 training.batch_size 2.0 +33 54 training.label_smoothing 0.008199985673341457 +33 55 model.embedding_dim 0.0 +33 55 optimizer.lr 0.08713812409388788 +33 55 training.batch_size 0.0 +33 55 training.label_smoothing 0.016093805086861725 +33 56 model.embedding_dim 1.0 +33 56 optimizer.lr 0.03326524124034132 +33 56 training.batch_size 1.0 +33 56 training.label_smoothing 0.01561703243169074 +33 57 model.embedding_dim 0.0 +33 57 optimizer.lr 0.09610224830188818 +33 57 training.batch_size 1.0 +33 57 training.label_smoothing 0.7008933459206236 +33 58 model.embedding_dim 0.0 +33 58 optimizer.lr 0.06550323806851754 +33 58 training.batch_size 0.0 +33 58 training.label_smoothing 0.0015825329942804076 +33 59 model.embedding_dim 2.0 +33 59 optimizer.lr 0.013942275822208625 +33 59 training.batch_size 1.0 +33 59 training.label_smoothing 0.0635091148279029 +33 60 model.embedding_dim 1.0 +33 60 optimizer.lr 0.07414623728544845 +33 60 training.batch_size 2.0 +33 60 training.label_smoothing 0.001814966471612206 +33 61 model.embedding_dim 1.0 +33 61 optimizer.lr 0.04283219911599574 +33 61 training.batch_size 0.0 +33 61 training.label_smoothing 0.1615672269609509 +33 62 model.embedding_dim 2.0 +33 62 optimizer.lr 0.005060023313368648 +33 62 training.batch_size 0.0 +33 62 training.label_smoothing 0.7107975478885769 +33 63 model.embedding_dim 0.0 +33 63 optimizer.lr 0.007056786351711031 +33 63 training.batch_size 1.0 +33 63 training.label_smoothing 0.005964640510653504 +33 64 model.embedding_dim 2.0 +33 64 optimizer.lr 0.003945282939893658 +33 64 training.batch_size 0.0 +33 64 training.label_smoothing 0.06421737621228041 +33 65 model.embedding_dim 1.0 +33 65 optimizer.lr 0.01590040519091375 +33 65 training.batch_size 1.0 +33 65 training.label_smoothing 0.009587341128053357 +33 66 model.embedding_dim 1.0 +33 66 optimizer.lr 0.008666855385346072 +33 66 training.batch_size 1.0 +33 66 training.label_smoothing 0.005289256465871128 +33 67 model.embedding_dim 1.0 +33 67 optimizer.lr 0.0033401251164606664 +33 67 training.batch_size 0.0 +33 67 training.label_smoothing 0.0017783514086051313 +33 68 model.embedding_dim 1.0 +33 68 optimizer.lr 0.0015445846059600477 +33 68 training.batch_size 1.0 +33 68 training.label_smoothing 0.0017493273586031237 +33 69 model.embedding_dim 0.0 +33 69 optimizer.lr 0.02140015247243095 +33 69 training.batch_size 1.0 +33 69 training.label_smoothing 0.031105925706074847 +33 70 model.embedding_dim 2.0 +33 70 optimizer.lr 0.033958391944192506 +33 70 training.batch_size 2.0 +33 70 training.label_smoothing 0.0027703244865505653 +33 71 model.embedding_dim 1.0 +33 71 optimizer.lr 0.09982423500369063 +33 71 training.batch_size 2.0 +33 71 training.label_smoothing 0.0016771417119868287 +33 72 model.embedding_dim 0.0 +33 72 optimizer.lr 0.020762782623666463 +33 72 training.batch_size 2.0 +33 72 training.label_smoothing 0.3653429084117506 +33 73 model.embedding_dim 1.0 +33 73 optimizer.lr 0.01575473783621318 +33 73 training.batch_size 0.0 +33 73 training.label_smoothing 0.011541564046523303 +33 74 model.embedding_dim 2.0 +33 74 optimizer.lr 0.01900070859757479 +33 74 training.batch_size 2.0 +33 74 training.label_smoothing 0.005033699272894309 +33 75 model.embedding_dim 1.0 +33 75 optimizer.lr 0.0011027709733859037 +33 75 training.batch_size 0.0 +33 75 training.label_smoothing 0.2946353613063489 +33 76 model.embedding_dim 1.0 +33 76 optimizer.lr 0.008931443442876084 +33 76 training.batch_size 0.0 +33 76 training.label_smoothing 0.001671289892003896 +33 77 model.embedding_dim 2.0 +33 77 optimizer.lr 0.02888504050808402 +33 77 training.batch_size 0.0 +33 77 training.label_smoothing 0.0038734195906598213 +33 78 model.embedding_dim 1.0 +33 78 optimizer.lr 0.004464660662287093 +33 78 training.batch_size 2.0 +33 78 training.label_smoothing 0.0661973225142139 +33 79 model.embedding_dim 1.0 +33 79 optimizer.lr 0.013024211888976617 +33 79 training.batch_size 2.0 +33 79 training.label_smoothing 0.8968775182971843 +33 80 model.embedding_dim 0.0 +33 80 optimizer.lr 0.040168742774811854 +33 80 training.batch_size 0.0 +33 80 training.label_smoothing 0.004442220467076391 +33 81 model.embedding_dim 2.0 +33 81 optimizer.lr 0.0017221174163395624 +33 81 training.batch_size 0.0 +33 81 training.label_smoothing 0.4088067456384279 +33 82 model.embedding_dim 1.0 +33 82 optimizer.lr 0.04732026455778788 +33 82 training.batch_size 0.0 +33 82 training.label_smoothing 0.06868781539487066 +33 83 model.embedding_dim 0.0 +33 83 optimizer.lr 0.07650878510057256 +33 83 training.batch_size 2.0 +33 83 training.label_smoothing 0.09600190397797843 +33 84 model.embedding_dim 0.0 +33 84 optimizer.lr 0.015555785838053117 +33 84 training.batch_size 0.0 +33 84 training.label_smoothing 0.011348125212335384 +33 85 model.embedding_dim 2.0 +33 85 optimizer.lr 0.005144427755124695 +33 85 training.batch_size 0.0 +33 85 training.label_smoothing 0.0037088361138533183 +33 86 model.embedding_dim 1.0 +33 86 optimizer.lr 0.05205557267289016 +33 86 training.batch_size 1.0 +33 86 training.label_smoothing 0.010811590211200239 +33 87 model.embedding_dim 1.0 +33 87 optimizer.lr 0.0011678856986519193 +33 87 training.batch_size 0.0 +33 87 training.label_smoothing 0.39055147046506106 +33 88 model.embedding_dim 1.0 +33 88 optimizer.lr 0.07662932235119757 +33 88 training.batch_size 1.0 +33 88 training.label_smoothing 0.06721214367571501 +33 89 model.embedding_dim 0.0 +33 89 optimizer.lr 0.05998126317744848 +33 89 training.batch_size 1.0 +33 89 training.label_smoothing 0.13799219807982205 +33 90 model.embedding_dim 0.0 +33 90 optimizer.lr 0.006806203765416519 +33 90 training.batch_size 1.0 +33 90 training.label_smoothing 0.29191956746873277 +33 91 model.embedding_dim 2.0 +33 91 optimizer.lr 0.04971303315566772 +33 91 training.batch_size 2.0 +33 91 training.label_smoothing 0.0010134216429881163 +33 92 model.embedding_dim 0.0 +33 92 optimizer.lr 0.0012040117487262531 +33 92 training.batch_size 0.0 +33 92 training.label_smoothing 0.009737168243103193 +33 93 model.embedding_dim 1.0 +33 93 optimizer.lr 0.007087838468440701 +33 93 training.batch_size 1.0 +33 93 training.label_smoothing 0.006583173642528277 +33 94 model.embedding_dim 1.0 +33 94 optimizer.lr 0.007033482219425606 +33 94 training.batch_size 0.0 +33 94 training.label_smoothing 0.009815719616817146 +33 95 model.embedding_dim 2.0 +33 95 optimizer.lr 0.005463497796566272 +33 95 training.batch_size 0.0 +33 95 training.label_smoothing 0.24633322970075736 +33 96 model.embedding_dim 1.0 +33 96 optimizer.lr 0.0021587605355019228 +33 96 training.batch_size 0.0 +33 96 training.label_smoothing 0.2663557955188265 +33 97 model.embedding_dim 2.0 +33 97 optimizer.lr 0.06474721084084088 +33 97 training.batch_size 1.0 +33 97 training.label_smoothing 0.028914368808228164 +33 98 model.embedding_dim 0.0 +33 98 optimizer.lr 0.0031998698381513263 +33 98 training.batch_size 2.0 +33 98 training.label_smoothing 0.08262465663873797 +33 99 model.embedding_dim 1.0 +33 99 optimizer.lr 0.016101169335450874 +33 99 training.batch_size 2.0 +33 99 training.label_smoothing 0.003955556251081238 +33 100 model.embedding_dim 2.0 +33 100 optimizer.lr 0.02318151207287899 +33 100 training.batch_size 1.0 +33 100 training.label_smoothing 0.40666636657969957 +33 1 dataset """kinships""" +33 1 model """complex""" +33 1 loss """crossentropy""" +33 1 regularizer """no""" +33 1 optimizer """adam""" +33 1 training_loop """lcwa""" +33 1 evaluator """rankbased""" +33 2 dataset """kinships""" +33 2 model """complex""" +33 2 loss """crossentropy""" +33 2 regularizer """no""" +33 2 optimizer """adam""" +33 2 training_loop """lcwa""" +33 2 evaluator """rankbased""" +33 3 dataset """kinships""" +33 3 model """complex""" +33 3 loss """crossentropy""" +33 3 regularizer """no""" +33 3 optimizer """adam""" +33 3 training_loop """lcwa""" +33 3 evaluator """rankbased""" +33 4 dataset """kinships""" +33 4 model """complex""" +33 4 loss """crossentropy""" +33 4 regularizer """no""" +33 4 optimizer """adam""" +33 4 training_loop """lcwa""" +33 4 evaluator """rankbased""" +33 5 dataset """kinships""" +33 5 model """complex""" +33 5 loss """crossentropy""" +33 5 regularizer """no""" +33 5 optimizer """adam""" +33 5 training_loop """lcwa""" +33 5 evaluator """rankbased""" +33 6 dataset """kinships""" +33 6 model """complex""" +33 6 loss """crossentropy""" +33 6 regularizer """no""" +33 6 optimizer """adam""" +33 6 training_loop """lcwa""" +33 6 evaluator """rankbased""" +33 7 dataset """kinships""" +33 7 model """complex""" +33 7 loss """crossentropy""" +33 7 regularizer """no""" +33 7 optimizer """adam""" +33 7 training_loop """lcwa""" +33 7 evaluator """rankbased""" +33 8 dataset """kinships""" +33 8 model """complex""" +33 8 loss """crossentropy""" +33 8 regularizer """no""" +33 8 optimizer """adam""" +33 8 training_loop """lcwa""" +33 8 evaluator """rankbased""" +33 9 dataset """kinships""" +33 9 model """complex""" +33 9 loss """crossentropy""" +33 9 regularizer """no""" +33 9 optimizer """adam""" +33 9 training_loop """lcwa""" +33 9 evaluator """rankbased""" +33 10 dataset """kinships""" +33 10 model """complex""" +33 10 loss """crossentropy""" +33 10 regularizer """no""" +33 10 optimizer """adam""" +33 10 training_loop """lcwa""" +33 10 evaluator """rankbased""" +33 11 dataset """kinships""" +33 11 model """complex""" +33 11 loss """crossentropy""" +33 11 regularizer """no""" +33 11 optimizer """adam""" +33 11 training_loop """lcwa""" +33 11 evaluator """rankbased""" +33 12 dataset """kinships""" +33 12 model """complex""" +33 12 loss """crossentropy""" +33 12 regularizer """no""" +33 12 optimizer """adam""" +33 12 training_loop """lcwa""" +33 12 evaluator """rankbased""" +33 13 dataset """kinships""" +33 13 model """complex""" +33 13 loss """crossentropy""" +33 13 regularizer """no""" +33 13 optimizer """adam""" +33 13 training_loop """lcwa""" +33 13 evaluator """rankbased""" +33 14 dataset """kinships""" +33 14 model """complex""" +33 14 loss """crossentropy""" +33 14 regularizer """no""" +33 14 optimizer """adam""" +33 14 training_loop """lcwa""" +33 14 evaluator """rankbased""" +33 15 dataset """kinships""" +33 15 model """complex""" +33 15 loss """crossentropy""" +33 15 regularizer """no""" +33 15 optimizer """adam""" +33 15 training_loop """lcwa""" +33 15 evaluator """rankbased""" +33 16 dataset """kinships""" +33 16 model """complex""" +33 16 loss """crossentropy""" +33 16 regularizer """no""" +33 16 optimizer """adam""" +33 16 training_loop """lcwa""" +33 16 evaluator """rankbased""" +33 17 dataset """kinships""" +33 17 model """complex""" +33 17 loss """crossentropy""" +33 17 regularizer """no""" +33 17 optimizer """adam""" +33 17 training_loop """lcwa""" +33 17 evaluator """rankbased""" +33 18 dataset """kinships""" +33 18 model """complex""" +33 18 loss """crossentropy""" +33 18 regularizer """no""" +33 18 optimizer """adam""" +33 18 training_loop """lcwa""" +33 18 evaluator """rankbased""" +33 19 dataset """kinships""" +33 19 model """complex""" +33 19 loss """crossentropy""" +33 19 regularizer """no""" +33 19 optimizer """adam""" +33 19 training_loop """lcwa""" +33 19 evaluator """rankbased""" +33 20 dataset """kinships""" +33 20 model """complex""" +33 20 loss """crossentropy""" +33 20 regularizer """no""" +33 20 optimizer """adam""" +33 20 training_loop """lcwa""" +33 20 evaluator """rankbased""" +33 21 dataset """kinships""" +33 21 model """complex""" +33 21 loss """crossentropy""" +33 21 regularizer """no""" +33 21 optimizer """adam""" +33 21 training_loop """lcwa""" +33 21 evaluator """rankbased""" +33 22 dataset """kinships""" +33 22 model """complex""" +33 22 loss """crossentropy""" +33 22 regularizer """no""" +33 22 optimizer """adam""" +33 22 training_loop """lcwa""" +33 22 evaluator """rankbased""" +33 23 dataset """kinships""" +33 23 model """complex""" +33 23 loss """crossentropy""" +33 23 regularizer """no""" +33 23 optimizer """adam""" +33 23 training_loop """lcwa""" +33 23 evaluator """rankbased""" +33 24 dataset """kinships""" +33 24 model """complex""" +33 24 loss """crossentropy""" +33 24 regularizer """no""" +33 24 optimizer """adam""" +33 24 training_loop """lcwa""" +33 24 evaluator """rankbased""" +33 25 dataset """kinships""" +33 25 model """complex""" +33 25 loss """crossentropy""" +33 25 regularizer """no""" +33 25 optimizer """adam""" +33 25 training_loop """lcwa""" +33 25 evaluator """rankbased""" +33 26 dataset """kinships""" +33 26 model """complex""" +33 26 loss """crossentropy""" +33 26 regularizer """no""" +33 26 optimizer """adam""" +33 26 training_loop """lcwa""" +33 26 evaluator """rankbased""" +33 27 dataset """kinships""" +33 27 model """complex""" +33 27 loss """crossentropy""" +33 27 regularizer """no""" +33 27 optimizer """adam""" +33 27 training_loop """lcwa""" +33 27 evaluator """rankbased""" +33 28 dataset """kinships""" +33 28 model """complex""" +33 28 loss """crossentropy""" +33 28 regularizer """no""" +33 28 optimizer """adam""" +33 28 training_loop """lcwa""" +33 28 evaluator """rankbased""" +33 29 dataset """kinships""" +33 29 model """complex""" +33 29 loss """crossentropy""" +33 29 regularizer """no""" +33 29 optimizer """adam""" +33 29 training_loop """lcwa""" +33 29 evaluator """rankbased""" +33 30 dataset """kinships""" +33 30 model """complex""" +33 30 loss """crossentropy""" +33 30 regularizer """no""" +33 30 optimizer """adam""" +33 30 training_loop """lcwa""" +33 30 evaluator """rankbased""" +33 31 dataset """kinships""" +33 31 model """complex""" +33 31 loss """crossentropy""" +33 31 regularizer """no""" +33 31 optimizer """adam""" +33 31 training_loop """lcwa""" +33 31 evaluator """rankbased""" +33 32 dataset """kinships""" +33 32 model """complex""" +33 32 loss """crossentropy""" +33 32 regularizer """no""" +33 32 optimizer """adam""" +33 32 training_loop """lcwa""" +33 32 evaluator """rankbased""" +33 33 dataset """kinships""" +33 33 model """complex""" +33 33 loss """crossentropy""" +33 33 regularizer """no""" +33 33 optimizer """adam""" +33 33 training_loop """lcwa""" +33 33 evaluator """rankbased""" +33 34 dataset """kinships""" +33 34 model """complex""" +33 34 loss """crossentropy""" +33 34 regularizer """no""" +33 34 optimizer """adam""" +33 34 training_loop """lcwa""" +33 34 evaluator """rankbased""" +33 35 dataset """kinships""" +33 35 model """complex""" +33 35 loss """crossentropy""" +33 35 regularizer """no""" +33 35 optimizer """adam""" +33 35 training_loop """lcwa""" +33 35 evaluator """rankbased""" +33 36 dataset """kinships""" +33 36 model """complex""" +33 36 loss """crossentropy""" +33 36 regularizer """no""" +33 36 optimizer """adam""" +33 36 training_loop """lcwa""" +33 36 evaluator """rankbased""" +33 37 dataset """kinships""" +33 37 model """complex""" +33 37 loss """crossentropy""" +33 37 regularizer """no""" +33 37 optimizer """adam""" +33 37 training_loop """lcwa""" +33 37 evaluator """rankbased""" +33 38 dataset """kinships""" +33 38 model """complex""" +33 38 loss """crossentropy""" +33 38 regularizer """no""" +33 38 optimizer """adam""" +33 38 training_loop """lcwa""" +33 38 evaluator """rankbased""" +33 39 dataset """kinships""" +33 39 model """complex""" +33 39 loss """crossentropy""" +33 39 regularizer """no""" +33 39 optimizer """adam""" +33 39 training_loop """lcwa""" +33 39 evaluator """rankbased""" +33 40 dataset """kinships""" +33 40 model """complex""" +33 40 loss """crossentropy""" +33 40 regularizer """no""" +33 40 optimizer """adam""" +33 40 training_loop """lcwa""" +33 40 evaluator """rankbased""" +33 41 dataset """kinships""" +33 41 model """complex""" +33 41 loss """crossentropy""" +33 41 regularizer """no""" +33 41 optimizer """adam""" +33 41 training_loop """lcwa""" +33 41 evaluator """rankbased""" +33 42 dataset """kinships""" +33 42 model """complex""" +33 42 loss """crossentropy""" +33 42 regularizer """no""" +33 42 optimizer """adam""" +33 42 training_loop """lcwa""" +33 42 evaluator """rankbased""" +33 43 dataset """kinships""" +33 43 model """complex""" +33 43 loss """crossentropy""" +33 43 regularizer """no""" +33 43 optimizer """adam""" +33 43 training_loop """lcwa""" +33 43 evaluator """rankbased""" +33 44 dataset """kinships""" +33 44 model """complex""" +33 44 loss """crossentropy""" +33 44 regularizer """no""" +33 44 optimizer """adam""" +33 44 training_loop """lcwa""" +33 44 evaluator """rankbased""" +33 45 dataset """kinships""" +33 45 model """complex""" +33 45 loss """crossentropy""" +33 45 regularizer """no""" +33 45 optimizer """adam""" +33 45 training_loop """lcwa""" +33 45 evaluator """rankbased""" +33 46 dataset """kinships""" +33 46 model """complex""" +33 46 loss """crossentropy""" +33 46 regularizer """no""" +33 46 optimizer """adam""" +33 46 training_loop """lcwa""" +33 46 evaluator """rankbased""" +33 47 dataset """kinships""" +33 47 model """complex""" +33 47 loss """crossentropy""" +33 47 regularizer """no""" +33 47 optimizer """adam""" +33 47 training_loop """lcwa""" +33 47 evaluator """rankbased""" +33 48 dataset """kinships""" +33 48 model """complex""" +33 48 loss """crossentropy""" +33 48 regularizer """no""" +33 48 optimizer """adam""" +33 48 training_loop """lcwa""" +33 48 evaluator """rankbased""" +33 49 dataset """kinships""" +33 49 model """complex""" +33 49 loss """crossentropy""" +33 49 regularizer """no""" +33 49 optimizer """adam""" +33 49 training_loop """lcwa""" +33 49 evaluator """rankbased""" +33 50 dataset """kinships""" +33 50 model """complex""" +33 50 loss """crossentropy""" +33 50 regularizer """no""" +33 50 optimizer """adam""" +33 50 training_loop """lcwa""" +33 50 evaluator """rankbased""" +33 51 dataset """kinships""" +33 51 model """complex""" +33 51 loss """crossentropy""" +33 51 regularizer """no""" +33 51 optimizer """adam""" +33 51 training_loop """lcwa""" +33 51 evaluator """rankbased""" +33 52 dataset """kinships""" +33 52 model """complex""" +33 52 loss """crossentropy""" +33 52 regularizer """no""" +33 52 optimizer """adam""" +33 52 training_loop """lcwa""" +33 52 evaluator """rankbased""" +33 53 dataset """kinships""" +33 53 model """complex""" +33 53 loss """crossentropy""" +33 53 regularizer """no""" +33 53 optimizer """adam""" +33 53 training_loop """lcwa""" +33 53 evaluator """rankbased""" +33 54 dataset """kinships""" +33 54 model """complex""" +33 54 loss """crossentropy""" +33 54 regularizer """no""" +33 54 optimizer """adam""" +33 54 training_loop """lcwa""" +33 54 evaluator """rankbased""" +33 55 dataset """kinships""" +33 55 model """complex""" +33 55 loss """crossentropy""" +33 55 regularizer """no""" +33 55 optimizer """adam""" +33 55 training_loop """lcwa""" +33 55 evaluator """rankbased""" +33 56 dataset """kinships""" +33 56 model """complex""" +33 56 loss """crossentropy""" +33 56 regularizer """no""" +33 56 optimizer """adam""" +33 56 training_loop """lcwa""" +33 56 evaluator """rankbased""" +33 57 dataset """kinships""" +33 57 model """complex""" +33 57 loss """crossentropy""" +33 57 regularizer """no""" +33 57 optimizer """adam""" +33 57 training_loop """lcwa""" +33 57 evaluator """rankbased""" +33 58 dataset """kinships""" +33 58 model """complex""" +33 58 loss """crossentropy""" +33 58 regularizer """no""" +33 58 optimizer """adam""" +33 58 training_loop """lcwa""" +33 58 evaluator """rankbased""" +33 59 dataset """kinships""" +33 59 model """complex""" +33 59 loss """crossentropy""" +33 59 regularizer """no""" +33 59 optimizer """adam""" +33 59 training_loop """lcwa""" +33 59 evaluator """rankbased""" +33 60 dataset """kinships""" +33 60 model """complex""" +33 60 loss """crossentropy""" +33 60 regularizer """no""" +33 60 optimizer """adam""" +33 60 training_loop """lcwa""" +33 60 evaluator """rankbased""" +33 61 dataset """kinships""" +33 61 model """complex""" +33 61 loss """crossentropy""" +33 61 regularizer """no""" +33 61 optimizer """adam""" +33 61 training_loop """lcwa""" +33 61 evaluator """rankbased""" +33 62 dataset """kinships""" +33 62 model """complex""" +33 62 loss """crossentropy""" +33 62 regularizer """no""" +33 62 optimizer """adam""" +33 62 training_loop """lcwa""" +33 62 evaluator """rankbased""" +33 63 dataset """kinships""" +33 63 model """complex""" +33 63 loss """crossentropy""" +33 63 regularizer """no""" +33 63 optimizer """adam""" +33 63 training_loop """lcwa""" +33 63 evaluator """rankbased""" +33 64 dataset """kinships""" +33 64 model """complex""" +33 64 loss """crossentropy""" +33 64 regularizer """no""" +33 64 optimizer """adam""" +33 64 training_loop """lcwa""" +33 64 evaluator """rankbased""" +33 65 dataset """kinships""" +33 65 model """complex""" +33 65 loss """crossentropy""" +33 65 regularizer """no""" +33 65 optimizer """adam""" +33 65 training_loop """lcwa""" +33 65 evaluator """rankbased""" +33 66 dataset """kinships""" +33 66 model """complex""" +33 66 loss """crossentropy""" +33 66 regularizer """no""" +33 66 optimizer """adam""" +33 66 training_loop """lcwa""" +33 66 evaluator """rankbased""" +33 67 dataset """kinships""" +33 67 model """complex""" +33 67 loss """crossentropy""" +33 67 regularizer """no""" +33 67 optimizer """adam""" +33 67 training_loop """lcwa""" +33 67 evaluator """rankbased""" +33 68 dataset """kinships""" +33 68 model """complex""" +33 68 loss """crossentropy""" +33 68 regularizer """no""" +33 68 optimizer """adam""" +33 68 training_loop """lcwa""" +33 68 evaluator """rankbased""" +33 69 dataset """kinships""" +33 69 model """complex""" +33 69 loss """crossentropy""" +33 69 regularizer """no""" +33 69 optimizer """adam""" +33 69 training_loop """lcwa""" +33 69 evaluator """rankbased""" +33 70 dataset """kinships""" +33 70 model """complex""" +33 70 loss """crossentropy""" +33 70 regularizer """no""" +33 70 optimizer """adam""" +33 70 training_loop """lcwa""" +33 70 evaluator """rankbased""" +33 71 dataset """kinships""" +33 71 model """complex""" +33 71 loss """crossentropy""" +33 71 regularizer """no""" +33 71 optimizer """adam""" +33 71 training_loop """lcwa""" +33 71 evaluator """rankbased""" +33 72 dataset """kinships""" +33 72 model """complex""" +33 72 loss """crossentropy""" +33 72 regularizer """no""" +33 72 optimizer """adam""" +33 72 training_loop """lcwa""" +33 72 evaluator """rankbased""" +33 73 dataset """kinships""" +33 73 model """complex""" +33 73 loss """crossentropy""" +33 73 regularizer """no""" +33 73 optimizer """adam""" +33 73 training_loop """lcwa""" +33 73 evaluator """rankbased""" +33 74 dataset """kinships""" +33 74 model """complex""" +33 74 loss """crossentropy""" +33 74 regularizer """no""" +33 74 optimizer """adam""" +33 74 training_loop """lcwa""" +33 74 evaluator """rankbased""" +33 75 dataset """kinships""" +33 75 model """complex""" +33 75 loss """crossentropy""" +33 75 regularizer """no""" +33 75 optimizer """adam""" +33 75 training_loop """lcwa""" +33 75 evaluator """rankbased""" +33 76 dataset """kinships""" +33 76 model """complex""" +33 76 loss """crossentropy""" +33 76 regularizer """no""" +33 76 optimizer """adam""" +33 76 training_loop """lcwa""" +33 76 evaluator """rankbased""" +33 77 dataset """kinships""" +33 77 model """complex""" +33 77 loss """crossentropy""" +33 77 regularizer """no""" +33 77 optimizer """adam""" +33 77 training_loop """lcwa""" +33 77 evaluator """rankbased""" +33 78 dataset """kinships""" +33 78 model """complex""" +33 78 loss """crossentropy""" +33 78 regularizer """no""" +33 78 optimizer """adam""" +33 78 training_loop """lcwa""" +33 78 evaluator """rankbased""" +33 79 dataset """kinships""" +33 79 model """complex""" +33 79 loss """crossentropy""" +33 79 regularizer """no""" +33 79 optimizer """adam""" +33 79 training_loop """lcwa""" +33 79 evaluator """rankbased""" +33 80 dataset """kinships""" +33 80 model """complex""" +33 80 loss """crossentropy""" +33 80 regularizer """no""" +33 80 optimizer """adam""" +33 80 training_loop """lcwa""" +33 80 evaluator """rankbased""" +33 81 dataset """kinships""" +33 81 model """complex""" +33 81 loss """crossentropy""" +33 81 regularizer """no""" +33 81 optimizer """adam""" +33 81 training_loop """lcwa""" +33 81 evaluator """rankbased""" +33 82 dataset """kinships""" +33 82 model """complex""" +33 82 loss """crossentropy""" +33 82 regularizer """no""" +33 82 optimizer """adam""" +33 82 training_loop """lcwa""" +33 82 evaluator """rankbased""" +33 83 dataset """kinships""" +33 83 model """complex""" +33 83 loss """crossentropy""" +33 83 regularizer """no""" +33 83 optimizer """adam""" +33 83 training_loop """lcwa""" +33 83 evaluator """rankbased""" +33 84 dataset """kinships""" +33 84 model """complex""" +33 84 loss """crossentropy""" +33 84 regularizer """no""" +33 84 optimizer """adam""" +33 84 training_loop """lcwa""" +33 84 evaluator """rankbased""" +33 85 dataset """kinships""" +33 85 model """complex""" +33 85 loss """crossentropy""" +33 85 regularizer """no""" +33 85 optimizer """adam""" +33 85 training_loop """lcwa""" +33 85 evaluator """rankbased""" +33 86 dataset """kinships""" +33 86 model """complex""" +33 86 loss """crossentropy""" +33 86 regularizer """no""" +33 86 optimizer """adam""" +33 86 training_loop """lcwa""" +33 86 evaluator """rankbased""" +33 87 dataset """kinships""" +33 87 model """complex""" +33 87 loss """crossentropy""" +33 87 regularizer """no""" +33 87 optimizer """adam""" +33 87 training_loop """lcwa""" +33 87 evaluator """rankbased""" +33 88 dataset """kinships""" +33 88 model """complex""" +33 88 loss """crossentropy""" +33 88 regularizer """no""" +33 88 optimizer """adam""" +33 88 training_loop """lcwa""" +33 88 evaluator """rankbased""" +33 89 dataset """kinships""" +33 89 model """complex""" +33 89 loss """crossentropy""" +33 89 regularizer """no""" +33 89 optimizer """adam""" +33 89 training_loop """lcwa""" +33 89 evaluator """rankbased""" +33 90 dataset """kinships""" +33 90 model """complex""" +33 90 loss """crossentropy""" +33 90 regularizer """no""" +33 90 optimizer """adam""" +33 90 training_loop """lcwa""" +33 90 evaluator """rankbased""" +33 91 dataset """kinships""" +33 91 model """complex""" +33 91 loss """crossentropy""" +33 91 regularizer """no""" +33 91 optimizer """adam""" +33 91 training_loop """lcwa""" +33 91 evaluator """rankbased""" +33 92 dataset """kinships""" +33 92 model """complex""" +33 92 loss """crossentropy""" +33 92 regularizer """no""" +33 92 optimizer """adam""" +33 92 training_loop """lcwa""" +33 92 evaluator """rankbased""" +33 93 dataset """kinships""" +33 93 model """complex""" +33 93 loss """crossentropy""" +33 93 regularizer """no""" +33 93 optimizer """adam""" +33 93 training_loop """lcwa""" +33 93 evaluator """rankbased""" +33 94 dataset """kinships""" +33 94 model """complex""" +33 94 loss """crossentropy""" +33 94 regularizer """no""" +33 94 optimizer """adam""" +33 94 training_loop """lcwa""" +33 94 evaluator """rankbased""" +33 95 dataset """kinships""" +33 95 model """complex""" +33 95 loss """crossentropy""" +33 95 regularizer """no""" +33 95 optimizer """adam""" +33 95 training_loop """lcwa""" +33 95 evaluator """rankbased""" +33 96 dataset """kinships""" +33 96 model """complex""" +33 96 loss """crossentropy""" +33 96 regularizer """no""" +33 96 optimizer """adam""" +33 96 training_loop """lcwa""" +33 96 evaluator """rankbased""" +33 97 dataset """kinships""" +33 97 model """complex""" +33 97 loss """crossentropy""" +33 97 regularizer """no""" +33 97 optimizer """adam""" +33 97 training_loop """lcwa""" +33 97 evaluator """rankbased""" +33 98 dataset """kinships""" +33 98 model """complex""" +33 98 loss """crossentropy""" +33 98 regularizer """no""" +33 98 optimizer """adam""" +33 98 training_loop """lcwa""" +33 98 evaluator """rankbased""" +33 99 dataset """kinships""" +33 99 model """complex""" +33 99 loss """crossentropy""" +33 99 regularizer """no""" +33 99 optimizer """adam""" +33 99 training_loop """lcwa""" +33 99 evaluator """rankbased""" +33 100 dataset """kinships""" +33 100 model """complex""" +33 100 loss """crossentropy""" +33 100 regularizer """no""" +33 100 optimizer """adam""" +33 100 training_loop """lcwa""" +33 100 evaluator """rankbased""" +34 1 model.embedding_dim 0.0 +34 1 optimizer.lr 0.013508236754792966 +34 1 negative_sampler.num_negs_per_pos 60.0 +34 1 training.batch_size 1.0 +34 2 model.embedding_dim 0.0 +34 2 optimizer.lr 0.019466223555351072 +34 2 negative_sampler.num_negs_per_pos 22.0 +34 2 training.batch_size 1.0 +34 3 model.embedding_dim 2.0 +34 3 optimizer.lr 0.004001798147385264 +34 3 negative_sampler.num_negs_per_pos 66.0 +34 3 training.batch_size 1.0 +34 4 model.embedding_dim 0.0 +34 4 optimizer.lr 0.02792305675306618 +34 4 negative_sampler.num_negs_per_pos 6.0 +34 4 training.batch_size 1.0 +34 5 model.embedding_dim 1.0 +34 5 optimizer.lr 0.007024154413915746 +34 5 negative_sampler.num_negs_per_pos 87.0 +34 5 training.batch_size 1.0 +34 6 model.embedding_dim 2.0 +34 6 optimizer.lr 0.006965930890072681 +34 6 negative_sampler.num_negs_per_pos 36.0 +34 6 training.batch_size 1.0 +34 7 model.embedding_dim 2.0 +34 7 optimizer.lr 0.0033420068629827246 +34 7 negative_sampler.num_negs_per_pos 89.0 +34 7 training.batch_size 2.0 +34 8 model.embedding_dim 0.0 +34 8 optimizer.lr 0.006259161431239367 +34 8 negative_sampler.num_negs_per_pos 62.0 +34 8 training.batch_size 0.0 +34 9 model.embedding_dim 1.0 +34 9 optimizer.lr 0.0010905194807175535 +34 9 negative_sampler.num_negs_per_pos 31.0 +34 9 training.batch_size 1.0 +34 10 model.embedding_dim 2.0 +34 10 optimizer.lr 0.0010628156330059033 +34 10 negative_sampler.num_negs_per_pos 93.0 +34 10 training.batch_size 0.0 +34 11 model.embedding_dim 1.0 +34 11 optimizer.lr 0.07075808251636777 +34 11 negative_sampler.num_negs_per_pos 26.0 +34 11 training.batch_size 2.0 +34 12 model.embedding_dim 0.0 +34 12 optimizer.lr 0.0032068010114032455 +34 12 negative_sampler.num_negs_per_pos 0.0 +34 12 training.batch_size 0.0 +34 13 model.embedding_dim 0.0 +34 13 optimizer.lr 0.033131728936497545 +34 13 negative_sampler.num_negs_per_pos 51.0 +34 13 training.batch_size 1.0 +34 14 model.embedding_dim 2.0 +34 14 optimizer.lr 0.025433550141322907 +34 14 negative_sampler.num_negs_per_pos 13.0 +34 14 training.batch_size 2.0 +34 15 model.embedding_dim 1.0 +34 15 optimizer.lr 0.0017508425703903082 +34 15 negative_sampler.num_negs_per_pos 78.0 +34 15 training.batch_size 2.0 +34 16 model.embedding_dim 1.0 +34 16 optimizer.lr 0.004090583595553068 +34 16 negative_sampler.num_negs_per_pos 65.0 +34 16 training.batch_size 0.0 +34 17 model.embedding_dim 0.0 +34 17 optimizer.lr 0.0033525781531055024 +34 17 negative_sampler.num_negs_per_pos 81.0 +34 17 training.batch_size 0.0 +34 18 model.embedding_dim 2.0 +34 18 optimizer.lr 0.09168056052428201 +34 18 negative_sampler.num_negs_per_pos 37.0 +34 18 training.batch_size 2.0 +34 19 model.embedding_dim 1.0 +34 19 optimizer.lr 0.007195760999793442 +34 19 negative_sampler.num_negs_per_pos 17.0 +34 19 training.batch_size 0.0 +34 20 model.embedding_dim 2.0 +34 20 optimizer.lr 0.007477119723443889 +34 20 negative_sampler.num_negs_per_pos 67.0 +34 20 training.batch_size 1.0 +34 21 model.embedding_dim 2.0 +34 21 optimizer.lr 0.004608413988840315 +34 21 negative_sampler.num_negs_per_pos 47.0 +34 21 training.batch_size 0.0 +34 22 model.embedding_dim 1.0 +34 22 optimizer.lr 0.002744439742137525 +34 22 negative_sampler.num_negs_per_pos 96.0 +34 22 training.batch_size 0.0 +34 23 model.embedding_dim 2.0 +34 23 optimizer.lr 0.023046441851081033 +34 23 negative_sampler.num_negs_per_pos 45.0 +34 23 training.batch_size 0.0 +34 24 model.embedding_dim 2.0 +34 24 optimizer.lr 0.002691939138071647 +34 24 negative_sampler.num_negs_per_pos 8.0 +34 24 training.batch_size 0.0 +34 25 model.embedding_dim 1.0 +34 25 optimizer.lr 0.017277905758787643 +34 25 negative_sampler.num_negs_per_pos 56.0 +34 25 training.batch_size 2.0 +34 26 model.embedding_dim 2.0 +34 26 optimizer.lr 0.0274566161333569 +34 26 negative_sampler.num_negs_per_pos 45.0 +34 26 training.batch_size 2.0 +34 27 model.embedding_dim 1.0 +34 27 optimizer.lr 0.004021247683164287 +34 27 negative_sampler.num_negs_per_pos 99.0 +34 27 training.batch_size 2.0 +34 28 model.embedding_dim 1.0 +34 28 optimizer.lr 0.0067005501644358595 +34 28 negative_sampler.num_negs_per_pos 10.0 +34 28 training.batch_size 1.0 +34 29 model.embedding_dim 2.0 +34 29 optimizer.lr 0.07151268461106064 +34 29 negative_sampler.num_negs_per_pos 1.0 +34 29 training.batch_size 1.0 +34 30 model.embedding_dim 2.0 +34 30 optimizer.lr 0.008779010357966046 +34 30 negative_sampler.num_negs_per_pos 96.0 +34 30 training.batch_size 0.0 +34 31 model.embedding_dim 2.0 +34 31 optimizer.lr 0.0015141321774861172 +34 31 negative_sampler.num_negs_per_pos 90.0 +34 31 training.batch_size 2.0 +34 32 model.embedding_dim 1.0 +34 32 optimizer.lr 0.016767544753226612 +34 32 negative_sampler.num_negs_per_pos 42.0 +34 32 training.batch_size 2.0 +34 33 model.embedding_dim 2.0 +34 33 optimizer.lr 0.015102228280732446 +34 33 negative_sampler.num_negs_per_pos 50.0 +34 33 training.batch_size 1.0 +34 34 model.embedding_dim 1.0 +34 34 optimizer.lr 0.04038299075261112 +34 34 negative_sampler.num_negs_per_pos 14.0 +34 34 training.batch_size 1.0 +34 35 model.embedding_dim 1.0 +34 35 optimizer.lr 0.09372067247851289 +34 35 negative_sampler.num_negs_per_pos 81.0 +34 35 training.batch_size 0.0 +34 36 model.embedding_dim 2.0 +34 36 optimizer.lr 0.0013506322814145623 +34 36 negative_sampler.num_negs_per_pos 91.0 +34 36 training.batch_size 0.0 +34 37 model.embedding_dim 0.0 +34 37 optimizer.lr 0.06839352078705674 +34 37 negative_sampler.num_negs_per_pos 40.0 +34 37 training.batch_size 1.0 +34 38 model.embedding_dim 0.0 +34 38 optimizer.lr 0.048157002795245 +34 38 negative_sampler.num_negs_per_pos 14.0 +34 38 training.batch_size 1.0 +34 39 model.embedding_dim 2.0 +34 39 optimizer.lr 0.003031910767720085 +34 39 negative_sampler.num_negs_per_pos 70.0 +34 39 training.batch_size 0.0 +34 40 model.embedding_dim 2.0 +34 40 optimizer.lr 0.012476309088642308 +34 40 negative_sampler.num_negs_per_pos 5.0 +34 40 training.batch_size 0.0 +34 41 model.embedding_dim 1.0 +34 41 optimizer.lr 0.008302068900963971 +34 41 negative_sampler.num_negs_per_pos 76.0 +34 41 training.batch_size 2.0 +34 42 model.embedding_dim 0.0 +34 42 optimizer.lr 0.0011044251909617636 +34 42 negative_sampler.num_negs_per_pos 79.0 +34 42 training.batch_size 1.0 +34 43 model.embedding_dim 2.0 +34 43 optimizer.lr 0.0011535766830755624 +34 43 negative_sampler.num_negs_per_pos 82.0 +34 43 training.batch_size 0.0 +34 44 model.embedding_dim 0.0 +34 44 optimizer.lr 0.01767257518569352 +34 44 negative_sampler.num_negs_per_pos 18.0 +34 44 training.batch_size 1.0 +34 45 model.embedding_dim 2.0 +34 45 optimizer.lr 0.0014297337227404889 +34 45 negative_sampler.num_negs_per_pos 91.0 +34 45 training.batch_size 0.0 +34 46 model.embedding_dim 1.0 +34 46 optimizer.lr 0.003220186865561512 +34 46 negative_sampler.num_negs_per_pos 30.0 +34 46 training.batch_size 0.0 +34 47 model.embedding_dim 0.0 +34 47 optimizer.lr 0.006686582905305295 +34 47 negative_sampler.num_negs_per_pos 5.0 +34 47 training.batch_size 1.0 +34 48 model.embedding_dim 1.0 +34 48 optimizer.lr 0.0107476278292071 +34 48 negative_sampler.num_negs_per_pos 62.0 +34 48 training.batch_size 2.0 +34 49 model.embedding_dim 0.0 +34 49 optimizer.lr 0.02425566454905034 +34 49 negative_sampler.num_negs_per_pos 11.0 +34 49 training.batch_size 1.0 +34 50 model.embedding_dim 2.0 +34 50 optimizer.lr 0.002354282233724014 +34 50 negative_sampler.num_negs_per_pos 93.0 +34 50 training.batch_size 1.0 +34 51 model.embedding_dim 2.0 +34 51 optimizer.lr 0.0013480623311767433 +34 51 negative_sampler.num_negs_per_pos 6.0 +34 51 training.batch_size 0.0 +34 52 model.embedding_dim 0.0 +34 52 optimizer.lr 0.001250229730172629 +34 52 negative_sampler.num_negs_per_pos 56.0 +34 52 training.batch_size 2.0 +34 53 model.embedding_dim 0.0 +34 53 optimizer.lr 0.07583718665646132 +34 53 negative_sampler.num_negs_per_pos 14.0 +34 53 training.batch_size 0.0 +34 54 model.embedding_dim 1.0 +34 54 optimizer.lr 0.0012743304075732136 +34 54 negative_sampler.num_negs_per_pos 17.0 +34 54 training.batch_size 0.0 +34 55 model.embedding_dim 1.0 +34 55 optimizer.lr 0.007670113690490968 +34 55 negative_sampler.num_negs_per_pos 21.0 +34 55 training.batch_size 0.0 +34 56 model.embedding_dim 0.0 +34 56 optimizer.lr 0.09117147069109874 +34 56 negative_sampler.num_negs_per_pos 54.0 +34 56 training.batch_size 1.0 +34 57 model.embedding_dim 0.0 +34 57 optimizer.lr 0.0015931902535368226 +34 57 negative_sampler.num_negs_per_pos 42.0 +34 57 training.batch_size 0.0 +34 58 model.embedding_dim 2.0 +34 58 optimizer.lr 0.0951218719311045 +34 58 negative_sampler.num_negs_per_pos 13.0 +34 58 training.batch_size 0.0 +34 59 model.embedding_dim 0.0 +34 59 optimizer.lr 0.002013287484370435 +34 59 negative_sampler.num_negs_per_pos 54.0 +34 59 training.batch_size 1.0 +34 60 model.embedding_dim 2.0 +34 60 optimizer.lr 0.002354693479276503 +34 60 negative_sampler.num_negs_per_pos 33.0 +34 60 training.batch_size 1.0 +34 61 model.embedding_dim 1.0 +34 61 optimizer.lr 0.002419170311413402 +34 61 negative_sampler.num_negs_per_pos 67.0 +34 61 training.batch_size 2.0 +34 62 model.embedding_dim 2.0 +34 62 optimizer.lr 0.005441031220547776 +34 62 negative_sampler.num_negs_per_pos 98.0 +34 62 training.batch_size 1.0 +34 63 model.embedding_dim 2.0 +34 63 optimizer.lr 0.010969228484260915 +34 63 negative_sampler.num_negs_per_pos 31.0 +34 63 training.batch_size 1.0 +34 64 model.embedding_dim 1.0 +34 64 optimizer.lr 0.024673305662279096 +34 64 negative_sampler.num_negs_per_pos 44.0 +34 64 training.batch_size 0.0 +34 65 model.embedding_dim 2.0 +34 65 optimizer.lr 0.08331487011760337 +34 65 negative_sampler.num_negs_per_pos 63.0 +34 65 training.batch_size 2.0 +34 66 model.embedding_dim 0.0 +34 66 optimizer.lr 0.02794457597422786 +34 66 negative_sampler.num_negs_per_pos 70.0 +34 66 training.batch_size 2.0 +34 67 model.embedding_dim 0.0 +34 67 optimizer.lr 0.07064451061009999 +34 67 negative_sampler.num_negs_per_pos 33.0 +34 67 training.batch_size 1.0 +34 68 model.embedding_dim 0.0 +34 68 optimizer.lr 0.006551014661855654 +34 68 negative_sampler.num_negs_per_pos 56.0 +34 68 training.batch_size 0.0 +34 69 model.embedding_dim 0.0 +34 69 optimizer.lr 0.00928463158427555 +34 69 negative_sampler.num_negs_per_pos 96.0 +34 69 training.batch_size 0.0 +34 70 model.embedding_dim 2.0 +34 70 optimizer.lr 0.020292717530511116 +34 70 negative_sampler.num_negs_per_pos 67.0 +34 70 training.batch_size 1.0 +34 71 model.embedding_dim 0.0 +34 71 optimizer.lr 0.047458688705828 +34 71 negative_sampler.num_negs_per_pos 37.0 +34 71 training.batch_size 0.0 +34 72 model.embedding_dim 2.0 +34 72 optimizer.lr 0.004070880897607341 +34 72 negative_sampler.num_negs_per_pos 70.0 +34 72 training.batch_size 1.0 +34 73 model.embedding_dim 1.0 +34 73 optimizer.lr 0.032422798493380985 +34 73 negative_sampler.num_negs_per_pos 21.0 +34 73 training.batch_size 1.0 +34 74 model.embedding_dim 2.0 +34 74 optimizer.lr 0.060504580757484556 +34 74 negative_sampler.num_negs_per_pos 98.0 +34 74 training.batch_size 2.0 +34 75 model.embedding_dim 2.0 +34 75 optimizer.lr 0.033554316941766305 +34 75 negative_sampler.num_negs_per_pos 10.0 +34 75 training.batch_size 1.0 +34 76 model.embedding_dim 1.0 +34 76 optimizer.lr 0.014791191224109808 +34 76 negative_sampler.num_negs_per_pos 70.0 +34 76 training.batch_size 1.0 +34 77 model.embedding_dim 1.0 +34 77 optimizer.lr 0.001170255873087402 +34 77 negative_sampler.num_negs_per_pos 84.0 +34 77 training.batch_size 2.0 +34 78 model.embedding_dim 1.0 +34 78 optimizer.lr 0.09318600198450128 +34 78 negative_sampler.num_negs_per_pos 16.0 +34 78 training.batch_size 0.0 +34 79 model.embedding_dim 1.0 +34 79 optimizer.lr 0.005849506581301945 +34 79 negative_sampler.num_negs_per_pos 98.0 +34 79 training.batch_size 2.0 +34 80 model.embedding_dim 0.0 +34 80 optimizer.lr 0.016860078080492315 +34 80 negative_sampler.num_negs_per_pos 3.0 +34 80 training.batch_size 2.0 +34 81 model.embedding_dim 1.0 +34 81 optimizer.lr 0.012809553372435827 +34 81 negative_sampler.num_negs_per_pos 31.0 +34 81 training.batch_size 1.0 +34 82 model.embedding_dim 1.0 +34 82 optimizer.lr 0.005051547227862752 +34 82 negative_sampler.num_negs_per_pos 96.0 +34 82 training.batch_size 0.0 +34 83 model.embedding_dim 0.0 +34 83 optimizer.lr 0.0054433766427060114 +34 83 negative_sampler.num_negs_per_pos 16.0 +34 83 training.batch_size 2.0 +34 84 model.embedding_dim 1.0 +34 84 optimizer.lr 0.0012599582671925511 +34 84 negative_sampler.num_negs_per_pos 13.0 +34 84 training.batch_size 2.0 +34 85 model.embedding_dim 0.0 +34 85 optimizer.lr 0.06423813747024981 +34 85 negative_sampler.num_negs_per_pos 35.0 +34 85 training.batch_size 2.0 +34 86 model.embedding_dim 1.0 +34 86 optimizer.lr 0.032409258132002176 +34 86 negative_sampler.num_negs_per_pos 29.0 +34 86 training.batch_size 1.0 +34 87 model.embedding_dim 1.0 +34 87 optimizer.lr 0.008135340110572035 +34 87 negative_sampler.num_negs_per_pos 46.0 +34 87 training.batch_size 1.0 +34 88 model.embedding_dim 2.0 +34 88 optimizer.lr 0.01655099452961773 +34 88 negative_sampler.num_negs_per_pos 97.0 +34 88 training.batch_size 1.0 +34 89 model.embedding_dim 2.0 +34 89 optimizer.lr 0.002293633856473054 +34 89 negative_sampler.num_negs_per_pos 97.0 +34 89 training.batch_size 1.0 +34 90 model.embedding_dim 1.0 +34 90 optimizer.lr 0.0031125186577606635 +34 90 negative_sampler.num_negs_per_pos 87.0 +34 90 training.batch_size 0.0 +34 91 model.embedding_dim 0.0 +34 91 optimizer.lr 0.012843105003411414 +34 91 negative_sampler.num_negs_per_pos 87.0 +34 91 training.batch_size 0.0 +34 92 model.embedding_dim 0.0 +34 92 optimizer.lr 0.06146158753894436 +34 92 negative_sampler.num_negs_per_pos 35.0 +34 92 training.batch_size 1.0 +34 93 model.embedding_dim 0.0 +34 93 optimizer.lr 0.024076166296356616 +34 93 negative_sampler.num_negs_per_pos 52.0 +34 93 training.batch_size 2.0 +34 94 model.embedding_dim 1.0 +34 94 optimizer.lr 0.01870061479986768 +34 94 negative_sampler.num_negs_per_pos 21.0 +34 94 training.batch_size 0.0 +34 95 model.embedding_dim 1.0 +34 95 optimizer.lr 0.003292756629322468 +34 95 negative_sampler.num_negs_per_pos 1.0 +34 95 training.batch_size 1.0 +34 96 model.embedding_dim 1.0 +34 96 optimizer.lr 0.006744526959250551 +34 96 negative_sampler.num_negs_per_pos 6.0 +34 96 training.batch_size 0.0 +34 97 model.embedding_dim 1.0 +34 97 optimizer.lr 0.0021777484361612904 +34 97 negative_sampler.num_negs_per_pos 15.0 +34 97 training.batch_size 0.0 +34 98 model.embedding_dim 2.0 +34 98 optimizer.lr 0.008121270251069655 +34 98 negative_sampler.num_negs_per_pos 91.0 +34 98 training.batch_size 0.0 +34 99 model.embedding_dim 2.0 +34 99 optimizer.lr 0.06575974422060238 +34 99 negative_sampler.num_negs_per_pos 71.0 +34 99 training.batch_size 2.0 +34 100 model.embedding_dim 0.0 +34 100 optimizer.lr 0.0023720318113984924 +34 100 negative_sampler.num_negs_per_pos 67.0 +34 100 training.batch_size 2.0 +34 1 dataset """kinships""" +34 1 model """complex""" +34 1 loss """bceaftersigmoid""" +34 1 regularizer """no""" +34 1 optimizer """adam""" +34 1 training_loop """owa""" +34 1 negative_sampler """basic""" +34 1 evaluator """rankbased""" +34 2 dataset """kinships""" +34 2 model """complex""" +34 2 loss """bceaftersigmoid""" +34 2 regularizer """no""" +34 2 optimizer """adam""" +34 2 training_loop """owa""" +34 2 negative_sampler """basic""" +34 2 evaluator """rankbased""" +34 3 dataset """kinships""" +34 3 model """complex""" +34 3 loss """bceaftersigmoid""" +34 3 regularizer """no""" +34 3 optimizer """adam""" +34 3 training_loop """owa""" +34 3 negative_sampler """basic""" +34 3 evaluator """rankbased""" +34 4 dataset """kinships""" +34 4 model """complex""" +34 4 loss """bceaftersigmoid""" +34 4 regularizer """no""" +34 4 optimizer """adam""" +34 4 training_loop """owa""" +34 4 negative_sampler """basic""" +34 4 evaluator """rankbased""" +34 5 dataset """kinships""" +34 5 model """complex""" +34 5 loss """bceaftersigmoid""" +34 5 regularizer """no""" +34 5 optimizer """adam""" +34 5 training_loop """owa""" +34 5 negative_sampler """basic""" +34 5 evaluator """rankbased""" +34 6 dataset """kinships""" +34 6 model """complex""" +34 6 loss """bceaftersigmoid""" +34 6 regularizer """no""" +34 6 optimizer """adam""" +34 6 training_loop """owa""" +34 6 negative_sampler """basic""" +34 6 evaluator """rankbased""" +34 7 dataset """kinships""" +34 7 model """complex""" +34 7 loss """bceaftersigmoid""" +34 7 regularizer """no""" +34 7 optimizer """adam""" +34 7 training_loop """owa""" +34 7 negative_sampler """basic""" +34 7 evaluator """rankbased""" +34 8 dataset """kinships""" +34 8 model """complex""" +34 8 loss """bceaftersigmoid""" +34 8 regularizer """no""" +34 8 optimizer """adam""" +34 8 training_loop """owa""" +34 8 negative_sampler """basic""" +34 8 evaluator """rankbased""" +34 9 dataset """kinships""" +34 9 model """complex""" +34 9 loss """bceaftersigmoid""" +34 9 regularizer """no""" +34 9 optimizer """adam""" +34 9 training_loop """owa""" +34 9 negative_sampler """basic""" +34 9 evaluator """rankbased""" +34 10 dataset """kinships""" +34 10 model """complex""" +34 10 loss """bceaftersigmoid""" +34 10 regularizer """no""" +34 10 optimizer """adam""" +34 10 training_loop """owa""" +34 10 negative_sampler """basic""" +34 10 evaluator """rankbased""" +34 11 dataset """kinships""" +34 11 model """complex""" +34 11 loss """bceaftersigmoid""" +34 11 regularizer """no""" +34 11 optimizer """adam""" +34 11 training_loop """owa""" +34 11 negative_sampler """basic""" +34 11 evaluator """rankbased""" +34 12 dataset """kinships""" +34 12 model """complex""" +34 12 loss """bceaftersigmoid""" +34 12 regularizer """no""" +34 12 optimizer """adam""" +34 12 training_loop """owa""" +34 12 negative_sampler """basic""" +34 12 evaluator """rankbased""" +34 13 dataset """kinships""" +34 13 model """complex""" +34 13 loss """bceaftersigmoid""" +34 13 regularizer """no""" +34 13 optimizer """adam""" +34 13 training_loop """owa""" +34 13 negative_sampler """basic""" +34 13 evaluator """rankbased""" +34 14 dataset """kinships""" +34 14 model """complex""" +34 14 loss """bceaftersigmoid""" +34 14 regularizer """no""" +34 14 optimizer """adam""" +34 14 training_loop """owa""" +34 14 negative_sampler """basic""" +34 14 evaluator """rankbased""" +34 15 dataset """kinships""" +34 15 model """complex""" +34 15 loss """bceaftersigmoid""" +34 15 regularizer """no""" +34 15 optimizer """adam""" +34 15 training_loop """owa""" +34 15 negative_sampler """basic""" +34 15 evaluator """rankbased""" +34 16 dataset """kinships""" +34 16 model """complex""" +34 16 loss """bceaftersigmoid""" +34 16 regularizer """no""" +34 16 optimizer """adam""" +34 16 training_loop """owa""" +34 16 negative_sampler """basic""" +34 16 evaluator """rankbased""" +34 17 dataset """kinships""" +34 17 model """complex""" +34 17 loss """bceaftersigmoid""" +34 17 regularizer """no""" +34 17 optimizer """adam""" +34 17 training_loop """owa""" +34 17 negative_sampler """basic""" +34 17 evaluator """rankbased""" +34 18 dataset """kinships""" +34 18 model """complex""" +34 18 loss """bceaftersigmoid""" +34 18 regularizer """no""" +34 18 optimizer """adam""" +34 18 training_loop """owa""" +34 18 negative_sampler """basic""" +34 18 evaluator """rankbased""" +34 19 dataset """kinships""" +34 19 model """complex""" +34 19 loss """bceaftersigmoid""" +34 19 regularizer """no""" +34 19 optimizer """adam""" +34 19 training_loop """owa""" +34 19 negative_sampler """basic""" +34 19 evaluator """rankbased""" +34 20 dataset """kinships""" +34 20 model """complex""" +34 20 loss """bceaftersigmoid""" +34 20 regularizer """no""" +34 20 optimizer """adam""" +34 20 training_loop """owa""" +34 20 negative_sampler """basic""" +34 20 evaluator """rankbased""" +34 21 dataset """kinships""" +34 21 model """complex""" +34 21 loss """bceaftersigmoid""" +34 21 regularizer """no""" +34 21 optimizer """adam""" +34 21 training_loop """owa""" +34 21 negative_sampler """basic""" +34 21 evaluator """rankbased""" +34 22 dataset """kinships""" +34 22 model """complex""" +34 22 loss """bceaftersigmoid""" +34 22 regularizer """no""" +34 22 optimizer """adam""" +34 22 training_loop """owa""" +34 22 negative_sampler """basic""" +34 22 evaluator """rankbased""" +34 23 dataset """kinships""" +34 23 model """complex""" +34 23 loss """bceaftersigmoid""" +34 23 regularizer """no""" +34 23 optimizer """adam""" +34 23 training_loop """owa""" +34 23 negative_sampler """basic""" +34 23 evaluator """rankbased""" +34 24 dataset """kinships""" +34 24 model """complex""" +34 24 loss """bceaftersigmoid""" +34 24 regularizer """no""" +34 24 optimizer """adam""" +34 24 training_loop """owa""" +34 24 negative_sampler """basic""" +34 24 evaluator """rankbased""" +34 25 dataset """kinships""" +34 25 model """complex""" +34 25 loss """bceaftersigmoid""" +34 25 regularizer """no""" +34 25 optimizer """adam""" +34 25 training_loop """owa""" +34 25 negative_sampler """basic""" +34 25 evaluator """rankbased""" +34 26 dataset """kinships""" +34 26 model """complex""" +34 26 loss """bceaftersigmoid""" +34 26 regularizer """no""" +34 26 optimizer """adam""" +34 26 training_loop """owa""" +34 26 negative_sampler """basic""" +34 26 evaluator """rankbased""" +34 27 dataset """kinships""" +34 27 model """complex""" +34 27 loss """bceaftersigmoid""" +34 27 regularizer """no""" +34 27 optimizer """adam""" +34 27 training_loop """owa""" +34 27 negative_sampler """basic""" +34 27 evaluator """rankbased""" +34 28 dataset """kinships""" +34 28 model """complex""" +34 28 loss """bceaftersigmoid""" +34 28 regularizer """no""" +34 28 optimizer """adam""" +34 28 training_loop """owa""" +34 28 negative_sampler """basic""" +34 28 evaluator """rankbased""" +34 29 dataset """kinships""" +34 29 model """complex""" +34 29 loss """bceaftersigmoid""" +34 29 regularizer """no""" +34 29 optimizer """adam""" +34 29 training_loop """owa""" +34 29 negative_sampler """basic""" +34 29 evaluator """rankbased""" +34 30 dataset """kinships""" +34 30 model """complex""" +34 30 loss """bceaftersigmoid""" +34 30 regularizer """no""" +34 30 optimizer """adam""" +34 30 training_loop """owa""" +34 30 negative_sampler """basic""" +34 30 evaluator """rankbased""" +34 31 dataset """kinships""" +34 31 model """complex""" +34 31 loss """bceaftersigmoid""" +34 31 regularizer """no""" +34 31 optimizer """adam""" +34 31 training_loop """owa""" +34 31 negative_sampler """basic""" +34 31 evaluator """rankbased""" +34 32 dataset """kinships""" +34 32 model """complex""" +34 32 loss """bceaftersigmoid""" +34 32 regularizer """no""" +34 32 optimizer """adam""" +34 32 training_loop """owa""" +34 32 negative_sampler """basic""" +34 32 evaluator """rankbased""" +34 33 dataset """kinships""" +34 33 model """complex""" +34 33 loss """bceaftersigmoid""" +34 33 regularizer """no""" +34 33 optimizer """adam""" +34 33 training_loop """owa""" +34 33 negative_sampler """basic""" +34 33 evaluator """rankbased""" +34 34 dataset """kinships""" +34 34 model """complex""" +34 34 loss """bceaftersigmoid""" +34 34 regularizer """no""" +34 34 optimizer """adam""" +34 34 training_loop """owa""" +34 34 negative_sampler """basic""" +34 34 evaluator """rankbased""" +34 35 dataset """kinships""" +34 35 model """complex""" +34 35 loss """bceaftersigmoid""" +34 35 regularizer """no""" +34 35 optimizer """adam""" +34 35 training_loop """owa""" +34 35 negative_sampler """basic""" +34 35 evaluator """rankbased""" +34 36 dataset """kinships""" +34 36 model """complex""" +34 36 loss """bceaftersigmoid""" +34 36 regularizer """no""" +34 36 optimizer """adam""" +34 36 training_loop """owa""" +34 36 negative_sampler """basic""" +34 36 evaluator """rankbased""" +34 37 dataset """kinships""" +34 37 model """complex""" +34 37 loss """bceaftersigmoid""" +34 37 regularizer """no""" +34 37 optimizer """adam""" +34 37 training_loop """owa""" +34 37 negative_sampler """basic""" +34 37 evaluator """rankbased""" +34 38 dataset """kinships""" +34 38 model """complex""" +34 38 loss """bceaftersigmoid""" +34 38 regularizer """no""" +34 38 optimizer """adam""" +34 38 training_loop """owa""" +34 38 negative_sampler """basic""" +34 38 evaluator """rankbased""" +34 39 dataset """kinships""" +34 39 model """complex""" +34 39 loss """bceaftersigmoid""" +34 39 regularizer """no""" +34 39 optimizer """adam""" +34 39 training_loop """owa""" +34 39 negative_sampler """basic""" +34 39 evaluator """rankbased""" +34 40 dataset """kinships""" +34 40 model """complex""" +34 40 loss """bceaftersigmoid""" +34 40 regularizer """no""" +34 40 optimizer """adam""" +34 40 training_loop """owa""" +34 40 negative_sampler """basic""" +34 40 evaluator """rankbased""" +34 41 dataset """kinships""" +34 41 model """complex""" +34 41 loss """bceaftersigmoid""" +34 41 regularizer """no""" +34 41 optimizer """adam""" +34 41 training_loop """owa""" +34 41 negative_sampler """basic""" +34 41 evaluator """rankbased""" +34 42 dataset """kinships""" +34 42 model """complex""" +34 42 loss """bceaftersigmoid""" +34 42 regularizer """no""" +34 42 optimizer """adam""" +34 42 training_loop """owa""" +34 42 negative_sampler """basic""" +34 42 evaluator """rankbased""" +34 43 dataset """kinships""" +34 43 model """complex""" +34 43 loss """bceaftersigmoid""" +34 43 regularizer """no""" +34 43 optimizer """adam""" +34 43 training_loop """owa""" +34 43 negative_sampler """basic""" +34 43 evaluator """rankbased""" +34 44 dataset """kinships""" +34 44 model """complex""" +34 44 loss """bceaftersigmoid""" +34 44 regularizer """no""" +34 44 optimizer """adam""" +34 44 training_loop """owa""" +34 44 negative_sampler """basic""" +34 44 evaluator """rankbased""" +34 45 dataset """kinships""" +34 45 model """complex""" +34 45 loss """bceaftersigmoid""" +34 45 regularizer """no""" +34 45 optimizer """adam""" +34 45 training_loop """owa""" +34 45 negative_sampler """basic""" +34 45 evaluator """rankbased""" +34 46 dataset """kinships""" +34 46 model """complex""" +34 46 loss """bceaftersigmoid""" +34 46 regularizer """no""" +34 46 optimizer """adam""" +34 46 training_loop """owa""" +34 46 negative_sampler """basic""" +34 46 evaluator """rankbased""" +34 47 dataset """kinships""" +34 47 model """complex""" +34 47 loss """bceaftersigmoid""" +34 47 regularizer """no""" +34 47 optimizer """adam""" +34 47 training_loop """owa""" +34 47 negative_sampler """basic""" +34 47 evaluator """rankbased""" +34 48 dataset """kinships""" +34 48 model """complex""" +34 48 loss """bceaftersigmoid""" +34 48 regularizer """no""" +34 48 optimizer """adam""" +34 48 training_loop """owa""" +34 48 negative_sampler """basic""" +34 48 evaluator """rankbased""" +34 49 dataset """kinships""" +34 49 model """complex""" +34 49 loss """bceaftersigmoid""" +34 49 regularizer """no""" +34 49 optimizer """adam""" +34 49 training_loop """owa""" +34 49 negative_sampler """basic""" +34 49 evaluator """rankbased""" +34 50 dataset """kinships""" +34 50 model """complex""" +34 50 loss """bceaftersigmoid""" +34 50 regularizer """no""" +34 50 optimizer """adam""" +34 50 training_loop """owa""" +34 50 negative_sampler """basic""" +34 50 evaluator """rankbased""" +34 51 dataset """kinships""" +34 51 model """complex""" +34 51 loss """bceaftersigmoid""" +34 51 regularizer """no""" +34 51 optimizer """adam""" +34 51 training_loop """owa""" +34 51 negative_sampler """basic""" +34 51 evaluator """rankbased""" +34 52 dataset """kinships""" +34 52 model """complex""" +34 52 loss """bceaftersigmoid""" +34 52 regularizer """no""" +34 52 optimizer """adam""" +34 52 training_loop """owa""" +34 52 negative_sampler """basic""" +34 52 evaluator """rankbased""" +34 53 dataset """kinships""" +34 53 model """complex""" +34 53 loss """bceaftersigmoid""" +34 53 regularizer """no""" +34 53 optimizer """adam""" +34 53 training_loop """owa""" +34 53 negative_sampler """basic""" +34 53 evaluator """rankbased""" +34 54 dataset """kinships""" +34 54 model """complex""" +34 54 loss """bceaftersigmoid""" +34 54 regularizer """no""" +34 54 optimizer """adam""" +34 54 training_loop """owa""" +34 54 negative_sampler """basic""" +34 54 evaluator """rankbased""" +34 55 dataset """kinships""" +34 55 model """complex""" +34 55 loss """bceaftersigmoid""" +34 55 regularizer """no""" +34 55 optimizer """adam""" +34 55 training_loop """owa""" +34 55 negative_sampler """basic""" +34 55 evaluator """rankbased""" +34 56 dataset """kinships""" +34 56 model """complex""" +34 56 loss """bceaftersigmoid""" +34 56 regularizer """no""" +34 56 optimizer """adam""" +34 56 training_loop """owa""" +34 56 negative_sampler """basic""" +34 56 evaluator """rankbased""" +34 57 dataset """kinships""" +34 57 model """complex""" +34 57 loss """bceaftersigmoid""" +34 57 regularizer """no""" +34 57 optimizer """adam""" +34 57 training_loop """owa""" +34 57 negative_sampler """basic""" +34 57 evaluator """rankbased""" +34 58 dataset """kinships""" +34 58 model """complex""" +34 58 loss """bceaftersigmoid""" +34 58 regularizer """no""" +34 58 optimizer """adam""" +34 58 training_loop """owa""" +34 58 negative_sampler """basic""" +34 58 evaluator """rankbased""" +34 59 dataset """kinships""" +34 59 model """complex""" +34 59 loss """bceaftersigmoid""" +34 59 regularizer """no""" +34 59 optimizer """adam""" +34 59 training_loop """owa""" +34 59 negative_sampler """basic""" +34 59 evaluator """rankbased""" +34 60 dataset """kinships""" +34 60 model """complex""" +34 60 loss """bceaftersigmoid""" +34 60 regularizer """no""" +34 60 optimizer """adam""" +34 60 training_loop """owa""" +34 60 negative_sampler """basic""" +34 60 evaluator """rankbased""" +34 61 dataset """kinships""" +34 61 model """complex""" +34 61 loss """bceaftersigmoid""" +34 61 regularizer """no""" +34 61 optimizer """adam""" +34 61 training_loop """owa""" +34 61 negative_sampler """basic""" +34 61 evaluator """rankbased""" +34 62 dataset """kinships""" +34 62 model """complex""" +34 62 loss """bceaftersigmoid""" +34 62 regularizer """no""" +34 62 optimizer """adam""" +34 62 training_loop """owa""" +34 62 negative_sampler """basic""" +34 62 evaluator """rankbased""" +34 63 dataset """kinships""" +34 63 model """complex""" +34 63 loss """bceaftersigmoid""" +34 63 regularizer """no""" +34 63 optimizer """adam""" +34 63 training_loop """owa""" +34 63 negative_sampler """basic""" +34 63 evaluator """rankbased""" +34 64 dataset """kinships""" +34 64 model """complex""" +34 64 loss """bceaftersigmoid""" +34 64 regularizer """no""" +34 64 optimizer """adam""" +34 64 training_loop """owa""" +34 64 negative_sampler """basic""" +34 64 evaluator """rankbased""" +34 65 dataset """kinships""" +34 65 model """complex""" +34 65 loss """bceaftersigmoid""" +34 65 regularizer """no""" +34 65 optimizer """adam""" +34 65 training_loop """owa""" +34 65 negative_sampler """basic""" +34 65 evaluator """rankbased""" +34 66 dataset """kinships""" +34 66 model """complex""" +34 66 loss """bceaftersigmoid""" +34 66 regularizer """no""" +34 66 optimizer """adam""" +34 66 training_loop """owa""" +34 66 negative_sampler """basic""" +34 66 evaluator """rankbased""" +34 67 dataset """kinships""" +34 67 model """complex""" +34 67 loss """bceaftersigmoid""" +34 67 regularizer """no""" +34 67 optimizer """adam""" +34 67 training_loop """owa""" +34 67 negative_sampler """basic""" +34 67 evaluator """rankbased""" +34 68 dataset """kinships""" +34 68 model """complex""" +34 68 loss """bceaftersigmoid""" +34 68 regularizer """no""" +34 68 optimizer """adam""" +34 68 training_loop """owa""" +34 68 negative_sampler """basic""" +34 68 evaluator """rankbased""" +34 69 dataset """kinships""" +34 69 model """complex""" +34 69 loss """bceaftersigmoid""" +34 69 regularizer """no""" +34 69 optimizer """adam""" +34 69 training_loop """owa""" +34 69 negative_sampler """basic""" +34 69 evaluator """rankbased""" +34 70 dataset """kinships""" +34 70 model """complex""" +34 70 loss """bceaftersigmoid""" +34 70 regularizer """no""" +34 70 optimizer """adam""" +34 70 training_loop """owa""" +34 70 negative_sampler """basic""" +34 70 evaluator """rankbased""" +34 71 dataset """kinships""" +34 71 model """complex""" +34 71 loss """bceaftersigmoid""" +34 71 regularizer """no""" +34 71 optimizer """adam""" +34 71 training_loop """owa""" +34 71 negative_sampler """basic""" +34 71 evaluator """rankbased""" +34 72 dataset """kinships""" +34 72 model """complex""" +34 72 loss """bceaftersigmoid""" +34 72 regularizer """no""" +34 72 optimizer """adam""" +34 72 training_loop """owa""" +34 72 negative_sampler """basic""" +34 72 evaluator """rankbased""" +34 73 dataset """kinships""" +34 73 model """complex""" +34 73 loss """bceaftersigmoid""" +34 73 regularizer """no""" +34 73 optimizer """adam""" +34 73 training_loop """owa""" +34 73 negative_sampler """basic""" +34 73 evaluator """rankbased""" +34 74 dataset """kinships""" +34 74 model """complex""" +34 74 loss """bceaftersigmoid""" +34 74 regularizer """no""" +34 74 optimizer """adam""" +34 74 training_loop """owa""" +34 74 negative_sampler """basic""" +34 74 evaluator """rankbased""" +34 75 dataset """kinships""" +34 75 model """complex""" +34 75 loss """bceaftersigmoid""" +34 75 regularizer """no""" +34 75 optimizer """adam""" +34 75 training_loop """owa""" +34 75 negative_sampler """basic""" +34 75 evaluator """rankbased""" +34 76 dataset """kinships""" +34 76 model """complex""" +34 76 loss """bceaftersigmoid""" +34 76 regularizer """no""" +34 76 optimizer """adam""" +34 76 training_loop """owa""" +34 76 negative_sampler """basic""" +34 76 evaluator """rankbased""" +34 77 dataset """kinships""" +34 77 model """complex""" +34 77 loss """bceaftersigmoid""" +34 77 regularizer """no""" +34 77 optimizer """adam""" +34 77 training_loop """owa""" +34 77 negative_sampler """basic""" +34 77 evaluator """rankbased""" +34 78 dataset """kinships""" +34 78 model """complex""" +34 78 loss """bceaftersigmoid""" +34 78 regularizer """no""" +34 78 optimizer """adam""" +34 78 training_loop """owa""" +34 78 negative_sampler """basic""" +34 78 evaluator """rankbased""" +34 79 dataset """kinships""" +34 79 model """complex""" +34 79 loss """bceaftersigmoid""" +34 79 regularizer """no""" +34 79 optimizer """adam""" +34 79 training_loop """owa""" +34 79 negative_sampler """basic""" +34 79 evaluator """rankbased""" +34 80 dataset """kinships""" +34 80 model """complex""" +34 80 loss """bceaftersigmoid""" +34 80 regularizer """no""" +34 80 optimizer """adam""" +34 80 training_loop """owa""" +34 80 negative_sampler """basic""" +34 80 evaluator """rankbased""" +34 81 dataset """kinships""" +34 81 model """complex""" +34 81 loss """bceaftersigmoid""" +34 81 regularizer """no""" +34 81 optimizer """adam""" +34 81 training_loop """owa""" +34 81 negative_sampler """basic""" +34 81 evaluator """rankbased""" +34 82 dataset """kinships""" +34 82 model """complex""" +34 82 loss """bceaftersigmoid""" +34 82 regularizer """no""" +34 82 optimizer """adam""" +34 82 training_loop """owa""" +34 82 negative_sampler """basic""" +34 82 evaluator """rankbased""" +34 83 dataset """kinships""" +34 83 model """complex""" +34 83 loss """bceaftersigmoid""" +34 83 regularizer """no""" +34 83 optimizer """adam""" +34 83 training_loop """owa""" +34 83 negative_sampler """basic""" +34 83 evaluator """rankbased""" +34 84 dataset """kinships""" +34 84 model """complex""" +34 84 loss """bceaftersigmoid""" +34 84 regularizer """no""" +34 84 optimizer """adam""" +34 84 training_loop """owa""" +34 84 negative_sampler """basic""" +34 84 evaluator """rankbased""" +34 85 dataset """kinships""" +34 85 model """complex""" +34 85 loss """bceaftersigmoid""" +34 85 regularizer """no""" +34 85 optimizer """adam""" +34 85 training_loop """owa""" +34 85 negative_sampler """basic""" +34 85 evaluator """rankbased""" +34 86 dataset """kinships""" +34 86 model """complex""" +34 86 loss """bceaftersigmoid""" +34 86 regularizer """no""" +34 86 optimizer """adam""" +34 86 training_loop """owa""" +34 86 negative_sampler """basic""" +34 86 evaluator """rankbased""" +34 87 dataset """kinships""" +34 87 model """complex""" +34 87 loss """bceaftersigmoid""" +34 87 regularizer """no""" +34 87 optimizer """adam""" +34 87 training_loop """owa""" +34 87 negative_sampler """basic""" +34 87 evaluator """rankbased""" +34 88 dataset """kinships""" +34 88 model """complex""" +34 88 loss """bceaftersigmoid""" +34 88 regularizer """no""" +34 88 optimizer """adam""" +34 88 training_loop """owa""" +34 88 negative_sampler """basic""" +34 88 evaluator """rankbased""" +34 89 dataset """kinships""" +34 89 model """complex""" +34 89 loss """bceaftersigmoid""" +34 89 regularizer """no""" +34 89 optimizer """adam""" +34 89 training_loop """owa""" +34 89 negative_sampler """basic""" +34 89 evaluator """rankbased""" +34 90 dataset """kinships""" +34 90 model """complex""" +34 90 loss """bceaftersigmoid""" +34 90 regularizer """no""" +34 90 optimizer """adam""" +34 90 training_loop """owa""" +34 90 negative_sampler """basic""" +34 90 evaluator """rankbased""" +34 91 dataset """kinships""" +34 91 model """complex""" +34 91 loss """bceaftersigmoid""" +34 91 regularizer """no""" +34 91 optimizer """adam""" +34 91 training_loop """owa""" +34 91 negative_sampler """basic""" +34 91 evaluator """rankbased""" +34 92 dataset """kinships""" +34 92 model """complex""" +34 92 loss """bceaftersigmoid""" +34 92 regularizer """no""" +34 92 optimizer """adam""" +34 92 training_loop """owa""" +34 92 negative_sampler """basic""" +34 92 evaluator """rankbased""" +34 93 dataset """kinships""" +34 93 model """complex""" +34 93 loss """bceaftersigmoid""" +34 93 regularizer """no""" +34 93 optimizer """adam""" +34 93 training_loop """owa""" +34 93 negative_sampler """basic""" +34 93 evaluator """rankbased""" +34 94 dataset """kinships""" +34 94 model """complex""" +34 94 loss """bceaftersigmoid""" +34 94 regularizer """no""" +34 94 optimizer """adam""" +34 94 training_loop """owa""" +34 94 negative_sampler """basic""" +34 94 evaluator """rankbased""" +34 95 dataset """kinships""" +34 95 model """complex""" +34 95 loss """bceaftersigmoid""" +34 95 regularizer """no""" +34 95 optimizer """adam""" +34 95 training_loop """owa""" +34 95 negative_sampler """basic""" +34 95 evaluator """rankbased""" +34 96 dataset """kinships""" +34 96 model """complex""" +34 96 loss """bceaftersigmoid""" +34 96 regularizer """no""" +34 96 optimizer """adam""" +34 96 training_loop """owa""" +34 96 negative_sampler """basic""" +34 96 evaluator """rankbased""" +34 97 dataset """kinships""" +34 97 model """complex""" +34 97 loss """bceaftersigmoid""" +34 97 regularizer """no""" +34 97 optimizer """adam""" +34 97 training_loop """owa""" +34 97 negative_sampler """basic""" +34 97 evaluator """rankbased""" +34 98 dataset """kinships""" +34 98 model """complex""" +34 98 loss """bceaftersigmoid""" +34 98 regularizer """no""" +34 98 optimizer """adam""" +34 98 training_loop """owa""" +34 98 negative_sampler """basic""" +34 98 evaluator """rankbased""" +34 99 dataset """kinships""" +34 99 model """complex""" +34 99 loss """bceaftersigmoid""" +34 99 regularizer """no""" +34 99 optimizer """adam""" +34 99 training_loop """owa""" +34 99 negative_sampler """basic""" +34 99 evaluator """rankbased""" +34 100 dataset """kinships""" +34 100 model """complex""" +34 100 loss """bceaftersigmoid""" +34 100 regularizer """no""" +34 100 optimizer """adam""" +34 100 training_loop """owa""" +34 100 negative_sampler """basic""" +34 100 evaluator """rankbased""" +35 1 model.embedding_dim 1.0 +35 1 optimizer.lr 0.015937536675852643 +35 1 negative_sampler.num_negs_per_pos 70.0 +35 1 training.batch_size 1.0 +35 2 model.embedding_dim 0.0 +35 2 optimizer.lr 0.01591858575658802 +35 2 negative_sampler.num_negs_per_pos 88.0 +35 2 training.batch_size 0.0 +35 3 model.embedding_dim 1.0 +35 3 optimizer.lr 0.035225674603860224 +35 3 negative_sampler.num_negs_per_pos 14.0 +35 3 training.batch_size 2.0 +35 4 model.embedding_dim 2.0 +35 4 optimizer.lr 0.03872420083693466 +35 4 negative_sampler.num_negs_per_pos 93.0 +35 4 training.batch_size 2.0 +35 5 model.embedding_dim 2.0 +35 5 optimizer.lr 0.06775172040233242 +35 5 negative_sampler.num_negs_per_pos 72.0 +35 5 training.batch_size 2.0 +35 6 model.embedding_dim 2.0 +35 6 optimizer.lr 0.020769747969280022 +35 6 negative_sampler.num_negs_per_pos 20.0 +35 6 training.batch_size 1.0 +35 7 model.embedding_dim 0.0 +35 7 optimizer.lr 0.0033343363999635365 +35 7 negative_sampler.num_negs_per_pos 41.0 +35 7 training.batch_size 2.0 +35 8 model.embedding_dim 1.0 +35 8 optimizer.lr 0.008017711474199008 +35 8 negative_sampler.num_negs_per_pos 3.0 +35 8 training.batch_size 1.0 +35 9 model.embedding_dim 0.0 +35 9 optimizer.lr 0.002803762235660027 +35 9 negative_sampler.num_negs_per_pos 8.0 +35 9 training.batch_size 0.0 +35 10 model.embedding_dim 1.0 +35 10 optimizer.lr 0.003993003356247104 +35 10 negative_sampler.num_negs_per_pos 44.0 +35 10 training.batch_size 2.0 +35 11 model.embedding_dim 2.0 +35 11 optimizer.lr 0.00271292687757753 +35 11 negative_sampler.num_negs_per_pos 70.0 +35 11 training.batch_size 2.0 +35 12 model.embedding_dim 0.0 +35 12 optimizer.lr 0.0010710415759286958 +35 12 negative_sampler.num_negs_per_pos 50.0 +35 12 training.batch_size 2.0 +35 13 model.embedding_dim 0.0 +35 13 optimizer.lr 0.010977670507313082 +35 13 negative_sampler.num_negs_per_pos 63.0 +35 13 training.batch_size 1.0 +35 14 model.embedding_dim 2.0 +35 14 optimizer.lr 0.0017103229121612404 +35 14 negative_sampler.num_negs_per_pos 95.0 +35 14 training.batch_size 2.0 +35 15 model.embedding_dim 2.0 +35 15 optimizer.lr 0.004336028848758224 +35 15 negative_sampler.num_negs_per_pos 72.0 +35 15 training.batch_size 1.0 +35 16 model.embedding_dim 1.0 +35 16 optimizer.lr 0.038726114560821 +35 16 negative_sampler.num_negs_per_pos 63.0 +35 16 training.batch_size 0.0 +35 17 model.embedding_dim 0.0 +35 17 optimizer.lr 0.019098834289159285 +35 17 negative_sampler.num_negs_per_pos 92.0 +35 17 training.batch_size 0.0 +35 18 model.embedding_dim 2.0 +35 18 optimizer.lr 0.02836363427939877 +35 18 negative_sampler.num_negs_per_pos 95.0 +35 18 training.batch_size 1.0 +35 19 model.embedding_dim 1.0 +35 19 optimizer.lr 0.03362582042088318 +35 19 negative_sampler.num_negs_per_pos 16.0 +35 19 training.batch_size 1.0 +35 20 model.embedding_dim 1.0 +35 20 optimizer.lr 0.004468554122843349 +35 20 negative_sampler.num_negs_per_pos 4.0 +35 20 training.batch_size 1.0 +35 21 model.embedding_dim 0.0 +35 21 optimizer.lr 0.029798952500173212 +35 21 negative_sampler.num_negs_per_pos 21.0 +35 21 training.batch_size 2.0 +35 22 model.embedding_dim 0.0 +35 22 optimizer.lr 0.001577212360268615 +35 22 negative_sampler.num_negs_per_pos 3.0 +35 22 training.batch_size 1.0 +35 23 model.embedding_dim 0.0 +35 23 optimizer.lr 0.004648812837661719 +35 23 negative_sampler.num_negs_per_pos 16.0 +35 23 training.batch_size 2.0 +35 24 model.embedding_dim 0.0 +35 24 optimizer.lr 0.009464567161390623 +35 24 negative_sampler.num_negs_per_pos 64.0 +35 24 training.batch_size 1.0 +35 25 model.embedding_dim 0.0 +35 25 optimizer.lr 0.0071470113851583635 +35 25 negative_sampler.num_negs_per_pos 37.0 +35 25 training.batch_size 0.0 +35 26 model.embedding_dim 0.0 +35 26 optimizer.lr 0.06795256338853865 +35 26 negative_sampler.num_negs_per_pos 65.0 +35 26 training.batch_size 0.0 +35 27 model.embedding_dim 2.0 +35 27 optimizer.lr 0.05066733748752292 +35 27 negative_sampler.num_negs_per_pos 38.0 +35 27 training.batch_size 2.0 +35 28 model.embedding_dim 2.0 +35 28 optimizer.lr 0.03294868529335456 +35 28 negative_sampler.num_negs_per_pos 84.0 +35 28 training.batch_size 0.0 +35 29 model.embedding_dim 1.0 +35 29 optimizer.lr 0.003513813315518711 +35 29 negative_sampler.num_negs_per_pos 87.0 +35 29 training.batch_size 2.0 +35 30 model.embedding_dim 2.0 +35 30 optimizer.lr 0.0011174771412680458 +35 30 negative_sampler.num_negs_per_pos 89.0 +35 30 training.batch_size 1.0 +35 31 model.embedding_dim 1.0 +35 31 optimizer.lr 0.027851927695741623 +35 31 negative_sampler.num_negs_per_pos 48.0 +35 31 training.batch_size 1.0 +35 32 model.embedding_dim 1.0 +35 32 optimizer.lr 0.009081611520190819 +35 32 negative_sampler.num_negs_per_pos 22.0 +35 32 training.batch_size 0.0 +35 33 model.embedding_dim 0.0 +35 33 optimizer.lr 0.023981114131927203 +35 33 negative_sampler.num_negs_per_pos 4.0 +35 33 training.batch_size 0.0 +35 34 model.embedding_dim 2.0 +35 34 optimizer.lr 0.004663748424716298 +35 34 negative_sampler.num_negs_per_pos 67.0 +35 34 training.batch_size 1.0 +35 35 model.embedding_dim 2.0 +35 35 optimizer.lr 0.0029402904272237754 +35 35 negative_sampler.num_negs_per_pos 51.0 +35 35 training.batch_size 2.0 +35 36 model.embedding_dim 0.0 +35 36 optimizer.lr 0.0017026042049566568 +35 36 negative_sampler.num_negs_per_pos 1.0 +35 36 training.batch_size 1.0 +35 37 model.embedding_dim 1.0 +35 37 optimizer.lr 0.029713369500682603 +35 37 negative_sampler.num_negs_per_pos 59.0 +35 37 training.batch_size 0.0 +35 38 model.embedding_dim 0.0 +35 38 optimizer.lr 0.09526708748272433 +35 38 negative_sampler.num_negs_per_pos 17.0 +35 38 training.batch_size 1.0 +35 39 model.embedding_dim 2.0 +35 39 optimizer.lr 0.02613321898049886 +35 39 negative_sampler.num_negs_per_pos 58.0 +35 39 training.batch_size 0.0 +35 40 model.embedding_dim 1.0 +35 40 optimizer.lr 0.04457540126053458 +35 40 negative_sampler.num_negs_per_pos 72.0 +35 40 training.batch_size 2.0 +35 41 model.embedding_dim 2.0 +35 41 optimizer.lr 0.009564547414933901 +35 41 negative_sampler.num_negs_per_pos 10.0 +35 41 training.batch_size 1.0 +35 42 model.embedding_dim 1.0 +35 42 optimizer.lr 0.005962954052395056 +35 42 negative_sampler.num_negs_per_pos 70.0 +35 42 training.batch_size 0.0 +35 43 model.embedding_dim 0.0 +35 43 optimizer.lr 0.04782888703965545 +35 43 negative_sampler.num_negs_per_pos 85.0 +35 43 training.batch_size 0.0 +35 44 model.embedding_dim 0.0 +35 44 optimizer.lr 0.0029778046264887412 +35 44 negative_sampler.num_negs_per_pos 2.0 +35 44 training.batch_size 1.0 +35 45 model.embedding_dim 2.0 +35 45 optimizer.lr 0.004197999185420423 +35 45 negative_sampler.num_negs_per_pos 27.0 +35 45 training.batch_size 0.0 +35 46 model.embedding_dim 0.0 +35 46 optimizer.lr 0.0010682638083334554 +35 46 negative_sampler.num_negs_per_pos 29.0 +35 46 training.batch_size 1.0 +35 47 model.embedding_dim 2.0 +35 47 optimizer.lr 0.03907507357599888 +35 47 negative_sampler.num_negs_per_pos 24.0 +35 47 training.batch_size 0.0 +35 48 model.embedding_dim 0.0 +35 48 optimizer.lr 0.009949876498360726 +35 48 negative_sampler.num_negs_per_pos 30.0 +35 48 training.batch_size 2.0 +35 49 model.embedding_dim 1.0 +35 49 optimizer.lr 0.09612616319492215 +35 49 negative_sampler.num_negs_per_pos 68.0 +35 49 training.batch_size 0.0 +35 50 model.embedding_dim 0.0 +35 50 optimizer.lr 0.007619554886311154 +35 50 negative_sampler.num_negs_per_pos 32.0 +35 50 training.batch_size 0.0 +35 51 model.embedding_dim 0.0 +35 51 optimizer.lr 0.002966416127360629 +35 51 negative_sampler.num_negs_per_pos 58.0 +35 51 training.batch_size 1.0 +35 52 model.embedding_dim 0.0 +35 52 optimizer.lr 0.05339674805062552 +35 52 negative_sampler.num_negs_per_pos 58.0 +35 52 training.batch_size 0.0 +35 53 model.embedding_dim 0.0 +35 53 optimizer.lr 0.0015115605354424086 +35 53 negative_sampler.num_negs_per_pos 7.0 +35 53 training.batch_size 1.0 +35 54 model.embedding_dim 0.0 +35 54 optimizer.lr 0.05056003258404722 +35 54 negative_sampler.num_negs_per_pos 32.0 +35 54 training.batch_size 1.0 +35 55 model.embedding_dim 2.0 +35 55 optimizer.lr 0.0029682806723571836 +35 55 negative_sampler.num_negs_per_pos 17.0 +35 55 training.batch_size 0.0 +35 56 model.embedding_dim 0.0 +35 56 optimizer.lr 0.02457289072506699 +35 56 negative_sampler.num_negs_per_pos 22.0 +35 56 training.batch_size 2.0 +35 57 model.embedding_dim 0.0 +35 57 optimizer.lr 0.02299992408171524 +35 57 negative_sampler.num_negs_per_pos 64.0 +35 57 training.batch_size 1.0 +35 58 model.embedding_dim 1.0 +35 58 optimizer.lr 0.030812202826647133 +35 58 negative_sampler.num_negs_per_pos 57.0 +35 58 training.batch_size 0.0 +35 59 model.embedding_dim 2.0 +35 59 optimizer.lr 0.0038917604541532894 +35 59 negative_sampler.num_negs_per_pos 81.0 +35 59 training.batch_size 2.0 +35 60 model.embedding_dim 1.0 +35 60 optimizer.lr 0.01969849381508586 +35 60 negative_sampler.num_negs_per_pos 27.0 +35 60 training.batch_size 2.0 +35 61 model.embedding_dim 2.0 +35 61 optimizer.lr 0.004286383618666231 +35 61 negative_sampler.num_negs_per_pos 60.0 +35 61 training.batch_size 1.0 +35 62 model.embedding_dim 2.0 +35 62 optimizer.lr 0.00518872871716781 +35 62 negative_sampler.num_negs_per_pos 81.0 +35 62 training.batch_size 1.0 +35 63 model.embedding_dim 1.0 +35 63 optimizer.lr 0.017591709783246375 +35 63 negative_sampler.num_negs_per_pos 45.0 +35 63 training.batch_size 1.0 +35 64 model.embedding_dim 1.0 +35 64 optimizer.lr 0.004527609507259991 +35 64 negative_sampler.num_negs_per_pos 94.0 +35 64 training.batch_size 1.0 +35 65 model.embedding_dim 2.0 +35 65 optimizer.lr 0.004154591184301892 +35 65 negative_sampler.num_negs_per_pos 31.0 +35 65 training.batch_size 2.0 +35 66 model.embedding_dim 0.0 +35 66 optimizer.lr 0.0055180392566276356 +35 66 negative_sampler.num_negs_per_pos 48.0 +35 66 training.batch_size 1.0 +35 67 model.embedding_dim 0.0 +35 67 optimizer.lr 0.00920556040171155 +35 67 negative_sampler.num_negs_per_pos 79.0 +35 67 training.batch_size 2.0 +35 68 model.embedding_dim 0.0 +35 68 optimizer.lr 0.004405108717669673 +35 68 negative_sampler.num_negs_per_pos 58.0 +35 68 training.batch_size 1.0 +35 69 model.embedding_dim 1.0 +35 69 optimizer.lr 0.014868739599434739 +35 69 negative_sampler.num_negs_per_pos 3.0 +35 69 training.batch_size 2.0 +35 70 model.embedding_dim 2.0 +35 70 optimizer.lr 0.007969003194126942 +35 70 negative_sampler.num_negs_per_pos 53.0 +35 70 training.batch_size 2.0 +35 71 model.embedding_dim 0.0 +35 71 optimizer.lr 0.0019617730969574924 +35 71 negative_sampler.num_negs_per_pos 13.0 +35 71 training.batch_size 1.0 +35 72 model.embedding_dim 2.0 +35 72 optimizer.lr 0.002025860188882044 +35 72 negative_sampler.num_negs_per_pos 51.0 +35 72 training.batch_size 1.0 +35 73 model.embedding_dim 0.0 +35 73 optimizer.lr 0.0011518115501314352 +35 73 negative_sampler.num_negs_per_pos 51.0 +35 73 training.batch_size 1.0 +35 74 model.embedding_dim 1.0 +35 74 optimizer.lr 0.02762947102315119 +35 74 negative_sampler.num_negs_per_pos 11.0 +35 74 training.batch_size 1.0 +35 75 model.embedding_dim 0.0 +35 75 optimizer.lr 0.05399231105428374 +35 75 negative_sampler.num_negs_per_pos 11.0 +35 75 training.batch_size 1.0 +35 76 model.embedding_dim 2.0 +35 76 optimizer.lr 0.001222646557167078 +35 76 negative_sampler.num_negs_per_pos 7.0 +35 76 training.batch_size 2.0 +35 77 model.embedding_dim 1.0 +35 77 optimizer.lr 0.02040053500801566 +35 77 negative_sampler.num_negs_per_pos 53.0 +35 77 training.batch_size 1.0 +35 78 model.embedding_dim 2.0 +35 78 optimizer.lr 0.0015799571975749606 +35 78 negative_sampler.num_negs_per_pos 4.0 +35 78 training.batch_size 0.0 +35 79 model.embedding_dim 1.0 +35 79 optimizer.lr 0.008718844006248177 +35 79 negative_sampler.num_negs_per_pos 64.0 +35 79 training.batch_size 1.0 +35 80 model.embedding_dim 0.0 +35 80 optimizer.lr 0.07307385501032636 +35 80 negative_sampler.num_negs_per_pos 96.0 +35 80 training.batch_size 0.0 +35 81 model.embedding_dim 0.0 +35 81 optimizer.lr 0.09247366514549377 +35 81 negative_sampler.num_negs_per_pos 38.0 +35 81 training.batch_size 1.0 +35 82 model.embedding_dim 2.0 +35 82 optimizer.lr 0.013061580108787624 +35 82 negative_sampler.num_negs_per_pos 90.0 +35 82 training.batch_size 1.0 +35 83 model.embedding_dim 1.0 +35 83 optimizer.lr 0.011120529442938294 +35 83 negative_sampler.num_negs_per_pos 55.0 +35 83 training.batch_size 0.0 +35 84 model.embedding_dim 0.0 +35 84 optimizer.lr 0.0020856516151216077 +35 84 negative_sampler.num_negs_per_pos 36.0 +35 84 training.batch_size 0.0 +35 85 model.embedding_dim 1.0 +35 85 optimizer.lr 0.0905998808945347 +35 85 negative_sampler.num_negs_per_pos 32.0 +35 85 training.batch_size 1.0 +35 86 model.embedding_dim 2.0 +35 86 optimizer.lr 0.01264411686155089 +35 86 negative_sampler.num_negs_per_pos 51.0 +35 86 training.batch_size 0.0 +35 87 model.embedding_dim 0.0 +35 87 optimizer.lr 0.003002648671430364 +35 87 negative_sampler.num_negs_per_pos 90.0 +35 87 training.batch_size 1.0 +35 88 model.embedding_dim 2.0 +35 88 optimizer.lr 0.001155139376792627 +35 88 negative_sampler.num_negs_per_pos 9.0 +35 88 training.batch_size 0.0 +35 89 model.embedding_dim 0.0 +35 89 optimizer.lr 0.0020788299106312053 +35 89 negative_sampler.num_negs_per_pos 1.0 +35 89 training.batch_size 0.0 +35 90 model.embedding_dim 0.0 +35 90 optimizer.lr 0.020196127994545652 +35 90 negative_sampler.num_negs_per_pos 90.0 +35 90 training.batch_size 1.0 +35 91 model.embedding_dim 1.0 +35 91 optimizer.lr 0.018126180722111338 +35 91 negative_sampler.num_negs_per_pos 87.0 +35 91 training.batch_size 1.0 +35 92 model.embedding_dim 0.0 +35 92 optimizer.lr 0.003253415823552682 +35 92 negative_sampler.num_negs_per_pos 46.0 +35 92 training.batch_size 2.0 +35 93 model.embedding_dim 2.0 +35 93 optimizer.lr 0.0021522926762078147 +35 93 negative_sampler.num_negs_per_pos 24.0 +35 93 training.batch_size 2.0 +35 94 model.embedding_dim 2.0 +35 94 optimizer.lr 0.05903671439393222 +35 94 negative_sampler.num_negs_per_pos 91.0 +35 94 training.batch_size 0.0 +35 95 model.embedding_dim 2.0 +35 95 optimizer.lr 0.001350798621127788 +35 95 negative_sampler.num_negs_per_pos 17.0 +35 95 training.batch_size 2.0 +35 96 model.embedding_dim 2.0 +35 96 optimizer.lr 0.002393328744143343 +35 96 negative_sampler.num_negs_per_pos 78.0 +35 96 training.batch_size 2.0 +35 97 model.embedding_dim 0.0 +35 97 optimizer.lr 0.016243495135482827 +35 97 negative_sampler.num_negs_per_pos 25.0 +35 97 training.batch_size 1.0 +35 98 model.embedding_dim 0.0 +35 98 optimizer.lr 0.03485416631581745 +35 98 negative_sampler.num_negs_per_pos 72.0 +35 98 training.batch_size 0.0 +35 99 model.embedding_dim 1.0 +35 99 optimizer.lr 0.0021232300601239248 +35 99 negative_sampler.num_negs_per_pos 0.0 +35 99 training.batch_size 1.0 +35 100 model.embedding_dim 1.0 +35 100 optimizer.lr 0.0013286026773116368 +35 100 negative_sampler.num_negs_per_pos 37.0 +35 100 training.batch_size 0.0 +35 1 dataset """kinships""" +35 1 model """complex""" +35 1 loss """softplus""" +35 1 regularizer """no""" +35 1 optimizer """adam""" +35 1 training_loop """owa""" +35 1 negative_sampler """basic""" +35 1 evaluator """rankbased""" +35 2 dataset """kinships""" +35 2 model """complex""" +35 2 loss """softplus""" +35 2 regularizer """no""" +35 2 optimizer """adam""" +35 2 training_loop """owa""" +35 2 negative_sampler """basic""" +35 2 evaluator """rankbased""" +35 3 dataset """kinships""" +35 3 model """complex""" +35 3 loss """softplus""" +35 3 regularizer """no""" +35 3 optimizer """adam""" +35 3 training_loop """owa""" +35 3 negative_sampler """basic""" +35 3 evaluator """rankbased""" +35 4 dataset """kinships""" +35 4 model """complex""" +35 4 loss """softplus""" +35 4 regularizer """no""" +35 4 optimizer """adam""" +35 4 training_loop """owa""" +35 4 negative_sampler """basic""" +35 4 evaluator """rankbased""" +35 5 dataset """kinships""" +35 5 model """complex""" +35 5 loss """softplus""" +35 5 regularizer """no""" +35 5 optimizer """adam""" +35 5 training_loop """owa""" +35 5 negative_sampler """basic""" +35 5 evaluator """rankbased""" +35 6 dataset """kinships""" +35 6 model """complex""" +35 6 loss """softplus""" +35 6 regularizer """no""" +35 6 optimizer """adam""" +35 6 training_loop """owa""" +35 6 negative_sampler """basic""" +35 6 evaluator """rankbased""" +35 7 dataset """kinships""" +35 7 model """complex""" +35 7 loss """softplus""" +35 7 regularizer """no""" +35 7 optimizer """adam""" +35 7 training_loop """owa""" +35 7 negative_sampler """basic""" +35 7 evaluator """rankbased""" +35 8 dataset """kinships""" +35 8 model """complex""" +35 8 loss """softplus""" +35 8 regularizer """no""" +35 8 optimizer """adam""" +35 8 training_loop """owa""" +35 8 negative_sampler """basic""" +35 8 evaluator """rankbased""" +35 9 dataset """kinships""" +35 9 model """complex""" +35 9 loss """softplus""" +35 9 regularizer """no""" +35 9 optimizer """adam""" +35 9 training_loop """owa""" +35 9 negative_sampler """basic""" +35 9 evaluator """rankbased""" +35 10 dataset """kinships""" +35 10 model """complex""" +35 10 loss """softplus""" +35 10 regularizer """no""" +35 10 optimizer """adam""" +35 10 training_loop """owa""" +35 10 negative_sampler """basic""" +35 10 evaluator """rankbased""" +35 11 dataset """kinships""" +35 11 model """complex""" +35 11 loss """softplus""" +35 11 regularizer """no""" +35 11 optimizer """adam""" +35 11 training_loop """owa""" +35 11 negative_sampler """basic""" +35 11 evaluator """rankbased""" +35 12 dataset """kinships""" +35 12 model """complex""" +35 12 loss """softplus""" +35 12 regularizer """no""" +35 12 optimizer """adam""" +35 12 training_loop """owa""" +35 12 negative_sampler """basic""" +35 12 evaluator """rankbased""" +35 13 dataset """kinships""" +35 13 model """complex""" +35 13 loss """softplus""" +35 13 regularizer """no""" +35 13 optimizer """adam""" +35 13 training_loop """owa""" +35 13 negative_sampler """basic""" +35 13 evaluator """rankbased""" +35 14 dataset """kinships""" +35 14 model """complex""" +35 14 loss """softplus""" +35 14 regularizer """no""" +35 14 optimizer """adam""" +35 14 training_loop """owa""" +35 14 negative_sampler """basic""" +35 14 evaluator """rankbased""" +35 15 dataset """kinships""" +35 15 model """complex""" +35 15 loss """softplus""" +35 15 regularizer """no""" +35 15 optimizer """adam""" +35 15 training_loop """owa""" +35 15 negative_sampler """basic""" +35 15 evaluator """rankbased""" +35 16 dataset """kinships""" +35 16 model """complex""" +35 16 loss """softplus""" +35 16 regularizer """no""" +35 16 optimizer """adam""" +35 16 training_loop """owa""" +35 16 negative_sampler """basic""" +35 16 evaluator """rankbased""" +35 17 dataset """kinships""" +35 17 model """complex""" +35 17 loss """softplus""" +35 17 regularizer """no""" +35 17 optimizer """adam""" +35 17 training_loop """owa""" +35 17 negative_sampler """basic""" +35 17 evaluator """rankbased""" +35 18 dataset """kinships""" +35 18 model """complex""" +35 18 loss """softplus""" +35 18 regularizer """no""" +35 18 optimizer """adam""" +35 18 training_loop """owa""" +35 18 negative_sampler """basic""" +35 18 evaluator """rankbased""" +35 19 dataset """kinships""" +35 19 model """complex""" +35 19 loss """softplus""" +35 19 regularizer """no""" +35 19 optimizer """adam""" +35 19 training_loop """owa""" +35 19 negative_sampler """basic""" +35 19 evaluator """rankbased""" +35 20 dataset """kinships""" +35 20 model """complex""" +35 20 loss """softplus""" +35 20 regularizer """no""" +35 20 optimizer """adam""" +35 20 training_loop """owa""" +35 20 negative_sampler """basic""" +35 20 evaluator """rankbased""" +35 21 dataset """kinships""" +35 21 model """complex""" +35 21 loss """softplus""" +35 21 regularizer """no""" +35 21 optimizer """adam""" +35 21 training_loop """owa""" +35 21 negative_sampler """basic""" +35 21 evaluator """rankbased""" +35 22 dataset """kinships""" +35 22 model """complex""" +35 22 loss """softplus""" +35 22 regularizer """no""" +35 22 optimizer """adam""" +35 22 training_loop """owa""" +35 22 negative_sampler """basic""" +35 22 evaluator """rankbased""" +35 23 dataset """kinships""" +35 23 model """complex""" +35 23 loss """softplus""" +35 23 regularizer """no""" +35 23 optimizer """adam""" +35 23 training_loop """owa""" +35 23 negative_sampler """basic""" +35 23 evaluator """rankbased""" +35 24 dataset """kinships""" +35 24 model """complex""" +35 24 loss """softplus""" +35 24 regularizer """no""" +35 24 optimizer """adam""" +35 24 training_loop """owa""" +35 24 negative_sampler """basic""" +35 24 evaluator """rankbased""" +35 25 dataset """kinships""" +35 25 model """complex""" +35 25 loss """softplus""" +35 25 regularizer """no""" +35 25 optimizer """adam""" +35 25 training_loop """owa""" +35 25 negative_sampler """basic""" +35 25 evaluator """rankbased""" +35 26 dataset """kinships""" +35 26 model """complex""" +35 26 loss """softplus""" +35 26 regularizer """no""" +35 26 optimizer """adam""" +35 26 training_loop """owa""" +35 26 negative_sampler """basic""" +35 26 evaluator """rankbased""" +35 27 dataset """kinships""" +35 27 model """complex""" +35 27 loss """softplus""" +35 27 regularizer """no""" +35 27 optimizer """adam""" +35 27 training_loop """owa""" +35 27 negative_sampler """basic""" +35 27 evaluator """rankbased""" +35 28 dataset """kinships""" +35 28 model """complex""" +35 28 loss """softplus""" +35 28 regularizer """no""" +35 28 optimizer """adam""" +35 28 training_loop """owa""" +35 28 negative_sampler """basic""" +35 28 evaluator """rankbased""" +35 29 dataset """kinships""" +35 29 model """complex""" +35 29 loss """softplus""" +35 29 regularizer """no""" +35 29 optimizer """adam""" +35 29 training_loop """owa""" +35 29 negative_sampler """basic""" +35 29 evaluator """rankbased""" +35 30 dataset """kinships""" +35 30 model """complex""" +35 30 loss """softplus""" +35 30 regularizer """no""" +35 30 optimizer """adam""" +35 30 training_loop """owa""" +35 30 negative_sampler """basic""" +35 30 evaluator """rankbased""" +35 31 dataset """kinships""" +35 31 model """complex""" +35 31 loss """softplus""" +35 31 regularizer """no""" +35 31 optimizer """adam""" +35 31 training_loop """owa""" +35 31 negative_sampler """basic""" +35 31 evaluator """rankbased""" +35 32 dataset """kinships""" +35 32 model """complex""" +35 32 loss """softplus""" +35 32 regularizer """no""" +35 32 optimizer """adam""" +35 32 training_loop """owa""" +35 32 negative_sampler """basic""" +35 32 evaluator """rankbased""" +35 33 dataset """kinships""" +35 33 model """complex""" +35 33 loss """softplus""" +35 33 regularizer """no""" +35 33 optimizer """adam""" +35 33 training_loop """owa""" +35 33 negative_sampler """basic""" +35 33 evaluator """rankbased""" +35 34 dataset """kinships""" +35 34 model """complex""" +35 34 loss """softplus""" +35 34 regularizer """no""" +35 34 optimizer """adam""" +35 34 training_loop """owa""" +35 34 negative_sampler """basic""" +35 34 evaluator """rankbased""" +35 35 dataset """kinships""" +35 35 model """complex""" +35 35 loss """softplus""" +35 35 regularizer """no""" +35 35 optimizer """adam""" +35 35 training_loop """owa""" +35 35 negative_sampler """basic""" +35 35 evaluator """rankbased""" +35 36 dataset """kinships""" +35 36 model """complex""" +35 36 loss """softplus""" +35 36 regularizer """no""" +35 36 optimizer """adam""" +35 36 training_loop """owa""" +35 36 negative_sampler """basic""" +35 36 evaluator """rankbased""" +35 37 dataset """kinships""" +35 37 model """complex""" +35 37 loss """softplus""" +35 37 regularizer """no""" +35 37 optimizer """adam""" +35 37 training_loop """owa""" +35 37 negative_sampler """basic""" +35 37 evaluator """rankbased""" +35 38 dataset """kinships""" +35 38 model """complex""" +35 38 loss """softplus""" +35 38 regularizer """no""" +35 38 optimizer """adam""" +35 38 training_loop """owa""" +35 38 negative_sampler """basic""" +35 38 evaluator """rankbased""" +35 39 dataset """kinships""" +35 39 model """complex""" +35 39 loss """softplus""" +35 39 regularizer """no""" +35 39 optimizer """adam""" +35 39 training_loop """owa""" +35 39 negative_sampler """basic""" +35 39 evaluator """rankbased""" +35 40 dataset """kinships""" +35 40 model """complex""" +35 40 loss """softplus""" +35 40 regularizer """no""" +35 40 optimizer """adam""" +35 40 training_loop """owa""" +35 40 negative_sampler """basic""" +35 40 evaluator """rankbased""" +35 41 dataset """kinships""" +35 41 model """complex""" +35 41 loss """softplus""" +35 41 regularizer """no""" +35 41 optimizer """adam""" +35 41 training_loop """owa""" +35 41 negative_sampler """basic""" +35 41 evaluator """rankbased""" +35 42 dataset """kinships""" +35 42 model """complex""" +35 42 loss """softplus""" +35 42 regularizer """no""" +35 42 optimizer """adam""" +35 42 training_loop """owa""" +35 42 negative_sampler """basic""" +35 42 evaluator """rankbased""" +35 43 dataset """kinships""" +35 43 model """complex""" +35 43 loss """softplus""" +35 43 regularizer """no""" +35 43 optimizer """adam""" +35 43 training_loop """owa""" +35 43 negative_sampler """basic""" +35 43 evaluator """rankbased""" +35 44 dataset """kinships""" +35 44 model """complex""" +35 44 loss """softplus""" +35 44 regularizer """no""" +35 44 optimizer """adam""" +35 44 training_loop """owa""" +35 44 negative_sampler """basic""" +35 44 evaluator """rankbased""" +35 45 dataset """kinships""" +35 45 model """complex""" +35 45 loss """softplus""" +35 45 regularizer """no""" +35 45 optimizer """adam""" +35 45 training_loop """owa""" +35 45 negative_sampler """basic""" +35 45 evaluator """rankbased""" +35 46 dataset """kinships""" +35 46 model """complex""" +35 46 loss """softplus""" +35 46 regularizer """no""" +35 46 optimizer """adam""" +35 46 training_loop """owa""" +35 46 negative_sampler """basic""" +35 46 evaluator """rankbased""" +35 47 dataset """kinships""" +35 47 model """complex""" +35 47 loss """softplus""" +35 47 regularizer """no""" +35 47 optimizer """adam""" +35 47 training_loop """owa""" +35 47 negative_sampler """basic""" +35 47 evaluator """rankbased""" +35 48 dataset """kinships""" +35 48 model """complex""" +35 48 loss """softplus""" +35 48 regularizer """no""" +35 48 optimizer """adam""" +35 48 training_loop """owa""" +35 48 negative_sampler """basic""" +35 48 evaluator """rankbased""" +35 49 dataset """kinships""" +35 49 model """complex""" +35 49 loss """softplus""" +35 49 regularizer """no""" +35 49 optimizer """adam""" +35 49 training_loop """owa""" +35 49 negative_sampler """basic""" +35 49 evaluator """rankbased""" +35 50 dataset """kinships""" +35 50 model """complex""" +35 50 loss """softplus""" +35 50 regularizer """no""" +35 50 optimizer """adam""" +35 50 training_loop """owa""" +35 50 negative_sampler """basic""" +35 50 evaluator """rankbased""" +35 51 dataset """kinships""" +35 51 model """complex""" +35 51 loss """softplus""" +35 51 regularizer """no""" +35 51 optimizer """adam""" +35 51 training_loop """owa""" +35 51 negative_sampler """basic""" +35 51 evaluator """rankbased""" +35 52 dataset """kinships""" +35 52 model """complex""" +35 52 loss """softplus""" +35 52 regularizer """no""" +35 52 optimizer """adam""" +35 52 training_loop """owa""" +35 52 negative_sampler """basic""" +35 52 evaluator """rankbased""" +35 53 dataset """kinships""" +35 53 model """complex""" +35 53 loss """softplus""" +35 53 regularizer """no""" +35 53 optimizer """adam""" +35 53 training_loop """owa""" +35 53 negative_sampler """basic""" +35 53 evaluator """rankbased""" +35 54 dataset """kinships""" +35 54 model """complex""" +35 54 loss """softplus""" +35 54 regularizer """no""" +35 54 optimizer """adam""" +35 54 training_loop """owa""" +35 54 negative_sampler """basic""" +35 54 evaluator """rankbased""" +35 55 dataset """kinships""" +35 55 model """complex""" +35 55 loss """softplus""" +35 55 regularizer """no""" +35 55 optimizer """adam""" +35 55 training_loop """owa""" +35 55 negative_sampler """basic""" +35 55 evaluator """rankbased""" +35 56 dataset """kinships""" +35 56 model """complex""" +35 56 loss """softplus""" +35 56 regularizer """no""" +35 56 optimizer """adam""" +35 56 training_loop """owa""" +35 56 negative_sampler """basic""" +35 56 evaluator """rankbased""" +35 57 dataset """kinships""" +35 57 model """complex""" +35 57 loss """softplus""" +35 57 regularizer """no""" +35 57 optimizer """adam""" +35 57 training_loop """owa""" +35 57 negative_sampler """basic""" +35 57 evaluator """rankbased""" +35 58 dataset """kinships""" +35 58 model """complex""" +35 58 loss """softplus""" +35 58 regularizer """no""" +35 58 optimizer """adam""" +35 58 training_loop """owa""" +35 58 negative_sampler """basic""" +35 58 evaluator """rankbased""" +35 59 dataset """kinships""" +35 59 model """complex""" +35 59 loss """softplus""" +35 59 regularizer """no""" +35 59 optimizer """adam""" +35 59 training_loop """owa""" +35 59 negative_sampler """basic""" +35 59 evaluator """rankbased""" +35 60 dataset """kinships""" +35 60 model """complex""" +35 60 loss """softplus""" +35 60 regularizer """no""" +35 60 optimizer """adam""" +35 60 training_loop """owa""" +35 60 negative_sampler """basic""" +35 60 evaluator """rankbased""" +35 61 dataset """kinships""" +35 61 model """complex""" +35 61 loss """softplus""" +35 61 regularizer """no""" +35 61 optimizer """adam""" +35 61 training_loop """owa""" +35 61 negative_sampler """basic""" +35 61 evaluator """rankbased""" +35 62 dataset """kinships""" +35 62 model """complex""" +35 62 loss """softplus""" +35 62 regularizer """no""" +35 62 optimizer """adam""" +35 62 training_loop """owa""" +35 62 negative_sampler """basic""" +35 62 evaluator """rankbased""" +35 63 dataset """kinships""" +35 63 model """complex""" +35 63 loss """softplus""" +35 63 regularizer """no""" +35 63 optimizer """adam""" +35 63 training_loop """owa""" +35 63 negative_sampler """basic""" +35 63 evaluator """rankbased""" +35 64 dataset """kinships""" +35 64 model """complex""" +35 64 loss """softplus""" +35 64 regularizer """no""" +35 64 optimizer """adam""" +35 64 training_loop """owa""" +35 64 negative_sampler """basic""" +35 64 evaluator """rankbased""" +35 65 dataset """kinships""" +35 65 model """complex""" +35 65 loss """softplus""" +35 65 regularizer """no""" +35 65 optimizer """adam""" +35 65 training_loop """owa""" +35 65 negative_sampler """basic""" +35 65 evaluator """rankbased""" +35 66 dataset """kinships""" +35 66 model """complex""" +35 66 loss """softplus""" +35 66 regularizer """no""" +35 66 optimizer """adam""" +35 66 training_loop """owa""" +35 66 negative_sampler """basic""" +35 66 evaluator """rankbased""" +35 67 dataset """kinships""" +35 67 model """complex""" +35 67 loss """softplus""" +35 67 regularizer """no""" +35 67 optimizer """adam""" +35 67 training_loop """owa""" +35 67 negative_sampler """basic""" +35 67 evaluator """rankbased""" +35 68 dataset """kinships""" +35 68 model """complex""" +35 68 loss """softplus""" +35 68 regularizer """no""" +35 68 optimizer """adam""" +35 68 training_loop """owa""" +35 68 negative_sampler """basic""" +35 68 evaluator """rankbased""" +35 69 dataset """kinships""" +35 69 model """complex""" +35 69 loss """softplus""" +35 69 regularizer """no""" +35 69 optimizer """adam""" +35 69 training_loop """owa""" +35 69 negative_sampler """basic""" +35 69 evaluator """rankbased""" +35 70 dataset """kinships""" +35 70 model """complex""" +35 70 loss """softplus""" +35 70 regularizer """no""" +35 70 optimizer """adam""" +35 70 training_loop """owa""" +35 70 negative_sampler """basic""" +35 70 evaluator """rankbased""" +35 71 dataset """kinships""" +35 71 model """complex""" +35 71 loss """softplus""" +35 71 regularizer """no""" +35 71 optimizer """adam""" +35 71 training_loop """owa""" +35 71 negative_sampler """basic""" +35 71 evaluator """rankbased""" +35 72 dataset """kinships""" +35 72 model """complex""" +35 72 loss """softplus""" +35 72 regularizer """no""" +35 72 optimizer """adam""" +35 72 training_loop """owa""" +35 72 negative_sampler """basic""" +35 72 evaluator """rankbased""" +35 73 dataset """kinships""" +35 73 model """complex""" +35 73 loss """softplus""" +35 73 regularizer """no""" +35 73 optimizer """adam""" +35 73 training_loop """owa""" +35 73 negative_sampler """basic""" +35 73 evaluator """rankbased""" +35 74 dataset """kinships""" +35 74 model """complex""" +35 74 loss """softplus""" +35 74 regularizer """no""" +35 74 optimizer """adam""" +35 74 training_loop """owa""" +35 74 negative_sampler """basic""" +35 74 evaluator """rankbased""" +35 75 dataset """kinships""" +35 75 model """complex""" +35 75 loss """softplus""" +35 75 regularizer """no""" +35 75 optimizer """adam""" +35 75 training_loop """owa""" +35 75 negative_sampler """basic""" +35 75 evaluator """rankbased""" +35 76 dataset """kinships""" +35 76 model """complex""" +35 76 loss """softplus""" +35 76 regularizer """no""" +35 76 optimizer """adam""" +35 76 training_loop """owa""" +35 76 negative_sampler """basic""" +35 76 evaluator """rankbased""" +35 77 dataset """kinships""" +35 77 model """complex""" +35 77 loss """softplus""" +35 77 regularizer """no""" +35 77 optimizer """adam""" +35 77 training_loop """owa""" +35 77 negative_sampler """basic""" +35 77 evaluator """rankbased""" +35 78 dataset """kinships""" +35 78 model """complex""" +35 78 loss """softplus""" +35 78 regularizer """no""" +35 78 optimizer """adam""" +35 78 training_loop """owa""" +35 78 negative_sampler """basic""" +35 78 evaluator """rankbased""" +35 79 dataset """kinships""" +35 79 model """complex""" +35 79 loss """softplus""" +35 79 regularizer """no""" +35 79 optimizer """adam""" +35 79 training_loop """owa""" +35 79 negative_sampler """basic""" +35 79 evaluator """rankbased""" +35 80 dataset """kinships""" +35 80 model """complex""" +35 80 loss """softplus""" +35 80 regularizer """no""" +35 80 optimizer """adam""" +35 80 training_loop """owa""" +35 80 negative_sampler """basic""" +35 80 evaluator """rankbased""" +35 81 dataset """kinships""" +35 81 model """complex""" +35 81 loss """softplus""" +35 81 regularizer """no""" +35 81 optimizer """adam""" +35 81 training_loop """owa""" +35 81 negative_sampler """basic""" +35 81 evaluator """rankbased""" +35 82 dataset """kinships""" +35 82 model """complex""" +35 82 loss """softplus""" +35 82 regularizer """no""" +35 82 optimizer """adam""" +35 82 training_loop """owa""" +35 82 negative_sampler """basic""" +35 82 evaluator """rankbased""" +35 83 dataset """kinships""" +35 83 model """complex""" +35 83 loss """softplus""" +35 83 regularizer """no""" +35 83 optimizer """adam""" +35 83 training_loop """owa""" +35 83 negative_sampler """basic""" +35 83 evaluator """rankbased""" +35 84 dataset """kinships""" +35 84 model """complex""" +35 84 loss """softplus""" +35 84 regularizer """no""" +35 84 optimizer """adam""" +35 84 training_loop """owa""" +35 84 negative_sampler """basic""" +35 84 evaluator """rankbased""" +35 85 dataset """kinships""" +35 85 model """complex""" +35 85 loss """softplus""" +35 85 regularizer """no""" +35 85 optimizer """adam""" +35 85 training_loop """owa""" +35 85 negative_sampler """basic""" +35 85 evaluator """rankbased""" +35 86 dataset """kinships""" +35 86 model """complex""" +35 86 loss """softplus""" +35 86 regularizer """no""" +35 86 optimizer """adam""" +35 86 training_loop """owa""" +35 86 negative_sampler """basic""" +35 86 evaluator """rankbased""" +35 87 dataset """kinships""" +35 87 model """complex""" +35 87 loss """softplus""" +35 87 regularizer """no""" +35 87 optimizer """adam""" +35 87 training_loop """owa""" +35 87 negative_sampler """basic""" +35 87 evaluator """rankbased""" +35 88 dataset """kinships""" +35 88 model """complex""" +35 88 loss """softplus""" +35 88 regularizer """no""" +35 88 optimizer """adam""" +35 88 training_loop """owa""" +35 88 negative_sampler """basic""" +35 88 evaluator """rankbased""" +35 89 dataset """kinships""" +35 89 model """complex""" +35 89 loss """softplus""" +35 89 regularizer """no""" +35 89 optimizer """adam""" +35 89 training_loop """owa""" +35 89 negative_sampler """basic""" +35 89 evaluator """rankbased""" +35 90 dataset """kinships""" +35 90 model """complex""" +35 90 loss """softplus""" +35 90 regularizer """no""" +35 90 optimizer """adam""" +35 90 training_loop """owa""" +35 90 negative_sampler """basic""" +35 90 evaluator """rankbased""" +35 91 dataset """kinships""" +35 91 model """complex""" +35 91 loss """softplus""" +35 91 regularizer """no""" +35 91 optimizer """adam""" +35 91 training_loop """owa""" +35 91 negative_sampler """basic""" +35 91 evaluator """rankbased""" +35 92 dataset """kinships""" +35 92 model """complex""" +35 92 loss """softplus""" +35 92 regularizer """no""" +35 92 optimizer """adam""" +35 92 training_loop """owa""" +35 92 negative_sampler """basic""" +35 92 evaluator """rankbased""" +35 93 dataset """kinships""" +35 93 model """complex""" +35 93 loss """softplus""" +35 93 regularizer """no""" +35 93 optimizer """adam""" +35 93 training_loop """owa""" +35 93 negative_sampler """basic""" +35 93 evaluator """rankbased""" +35 94 dataset """kinships""" +35 94 model """complex""" +35 94 loss """softplus""" +35 94 regularizer """no""" +35 94 optimizer """adam""" +35 94 training_loop """owa""" +35 94 negative_sampler """basic""" +35 94 evaluator """rankbased""" +35 95 dataset """kinships""" +35 95 model """complex""" +35 95 loss """softplus""" +35 95 regularizer """no""" +35 95 optimizer """adam""" +35 95 training_loop """owa""" +35 95 negative_sampler """basic""" +35 95 evaluator """rankbased""" +35 96 dataset """kinships""" +35 96 model """complex""" +35 96 loss """softplus""" +35 96 regularizer """no""" +35 96 optimizer """adam""" +35 96 training_loop """owa""" +35 96 negative_sampler """basic""" +35 96 evaluator """rankbased""" +35 97 dataset """kinships""" +35 97 model """complex""" +35 97 loss """softplus""" +35 97 regularizer """no""" +35 97 optimizer """adam""" +35 97 training_loop """owa""" +35 97 negative_sampler """basic""" +35 97 evaluator """rankbased""" +35 98 dataset """kinships""" +35 98 model """complex""" +35 98 loss """softplus""" +35 98 regularizer """no""" +35 98 optimizer """adam""" +35 98 training_loop """owa""" +35 98 negative_sampler """basic""" +35 98 evaluator """rankbased""" +35 99 dataset """kinships""" +35 99 model """complex""" +35 99 loss """softplus""" +35 99 regularizer """no""" +35 99 optimizer """adam""" +35 99 training_loop """owa""" +35 99 negative_sampler """basic""" +35 99 evaluator """rankbased""" +35 100 dataset """kinships""" +35 100 model """complex""" +35 100 loss """softplus""" +35 100 regularizer """no""" +35 100 optimizer """adam""" +35 100 training_loop """owa""" +35 100 negative_sampler """basic""" +35 100 evaluator """rankbased""" +36 1 model.embedding_dim 2.0 +36 1 optimizer.lr 0.005148420017449659 +36 1 negative_sampler.num_negs_per_pos 60.0 +36 1 training.batch_size 2.0 +36 2 model.embedding_dim 2.0 +36 2 optimizer.lr 0.07460196887317133 +36 2 negative_sampler.num_negs_per_pos 52.0 +36 2 training.batch_size 1.0 +36 3 model.embedding_dim 0.0 +36 3 optimizer.lr 0.0017631152073399232 +36 3 negative_sampler.num_negs_per_pos 27.0 +36 3 training.batch_size 0.0 +36 4 model.embedding_dim 0.0 +36 4 optimizer.lr 0.003945409045503099 +36 4 negative_sampler.num_negs_per_pos 90.0 +36 4 training.batch_size 1.0 +36 5 model.embedding_dim 1.0 +36 5 optimizer.lr 0.005331009194391584 +36 5 negative_sampler.num_negs_per_pos 39.0 +36 5 training.batch_size 1.0 +36 6 model.embedding_dim 2.0 +36 6 optimizer.lr 0.04243401693825848 +36 6 negative_sampler.num_negs_per_pos 83.0 +36 6 training.batch_size 1.0 +36 7 model.embedding_dim 0.0 +36 7 optimizer.lr 0.005029746638177817 +36 7 negative_sampler.num_negs_per_pos 4.0 +36 7 training.batch_size 2.0 +36 8 model.embedding_dim 1.0 +36 8 optimizer.lr 0.025938740317337797 +36 8 negative_sampler.num_negs_per_pos 24.0 +36 8 training.batch_size 1.0 +36 9 model.embedding_dim 1.0 +36 9 optimizer.lr 0.005548199397309724 +36 9 negative_sampler.num_negs_per_pos 53.0 +36 9 training.batch_size 2.0 +36 10 model.embedding_dim 0.0 +36 10 optimizer.lr 0.0016592765199850252 +36 10 negative_sampler.num_negs_per_pos 78.0 +36 10 training.batch_size 0.0 +36 11 model.embedding_dim 0.0 +36 11 optimizer.lr 0.011779772486292151 +36 11 negative_sampler.num_negs_per_pos 64.0 +36 11 training.batch_size 2.0 +36 12 model.embedding_dim 0.0 +36 12 optimizer.lr 0.008348689410616226 +36 12 negative_sampler.num_negs_per_pos 87.0 +36 12 training.batch_size 1.0 +36 13 model.embedding_dim 1.0 +36 13 optimizer.lr 0.0017071456466700996 +36 13 negative_sampler.num_negs_per_pos 76.0 +36 13 training.batch_size 2.0 +36 14 model.embedding_dim 0.0 +36 14 optimizer.lr 0.022020364632848774 +36 14 negative_sampler.num_negs_per_pos 52.0 +36 14 training.batch_size 2.0 +36 15 model.embedding_dim 1.0 +36 15 optimizer.lr 0.0023015207432079358 +36 15 negative_sampler.num_negs_per_pos 33.0 +36 15 training.batch_size 2.0 +36 16 model.embedding_dim 1.0 +36 16 optimizer.lr 0.0012618811046777997 +36 16 negative_sampler.num_negs_per_pos 87.0 +36 16 training.batch_size 2.0 +36 17 model.embedding_dim 1.0 +36 17 optimizer.lr 0.0020539695093623705 +36 17 negative_sampler.num_negs_per_pos 24.0 +36 17 training.batch_size 2.0 +36 18 model.embedding_dim 0.0 +36 18 optimizer.lr 0.06808798913629688 +36 18 negative_sampler.num_negs_per_pos 65.0 +36 18 training.batch_size 1.0 +36 19 model.embedding_dim 0.0 +36 19 optimizer.lr 0.003454505802310193 +36 19 negative_sampler.num_negs_per_pos 92.0 +36 19 training.batch_size 2.0 +36 20 model.embedding_dim 2.0 +36 20 optimizer.lr 0.07841668179700288 +36 20 negative_sampler.num_negs_per_pos 1.0 +36 20 training.batch_size 1.0 +36 21 model.embedding_dim 2.0 +36 21 optimizer.lr 0.005547137523894938 +36 21 negative_sampler.num_negs_per_pos 59.0 +36 21 training.batch_size 2.0 +36 22 model.embedding_dim 0.0 +36 22 optimizer.lr 0.0467969808082604 +36 22 negative_sampler.num_negs_per_pos 69.0 +36 22 training.batch_size 2.0 +36 23 model.embedding_dim 0.0 +36 23 optimizer.lr 0.013976498572022596 +36 23 negative_sampler.num_negs_per_pos 24.0 +36 23 training.batch_size 1.0 +36 24 model.embedding_dim 2.0 +36 24 optimizer.lr 0.00259248386881905 +36 24 negative_sampler.num_negs_per_pos 66.0 +36 24 training.batch_size 2.0 +36 25 model.embedding_dim 0.0 +36 25 optimizer.lr 0.01519251143919273 +36 25 negative_sampler.num_negs_per_pos 38.0 +36 25 training.batch_size 0.0 +36 26 model.embedding_dim 0.0 +36 26 optimizer.lr 0.014162159954986646 +36 26 negative_sampler.num_negs_per_pos 24.0 +36 26 training.batch_size 0.0 +36 27 model.embedding_dim 0.0 +36 27 optimizer.lr 0.01288709704076756 +36 27 negative_sampler.num_negs_per_pos 43.0 +36 27 training.batch_size 2.0 +36 28 model.embedding_dim 0.0 +36 28 optimizer.lr 0.001213164870264985 +36 28 negative_sampler.num_negs_per_pos 83.0 +36 28 training.batch_size 2.0 +36 29 model.embedding_dim 0.0 +36 29 optimizer.lr 0.011264572128342667 +36 29 negative_sampler.num_negs_per_pos 54.0 +36 29 training.batch_size 2.0 +36 30 model.embedding_dim 2.0 +36 30 optimizer.lr 0.003727822211219096 +36 30 negative_sampler.num_negs_per_pos 9.0 +36 30 training.batch_size 1.0 +36 31 model.embedding_dim 1.0 +36 31 optimizer.lr 0.002991427432947237 +36 31 negative_sampler.num_negs_per_pos 7.0 +36 31 training.batch_size 0.0 +36 32 model.embedding_dim 0.0 +36 32 optimizer.lr 0.004338031380264096 +36 32 negative_sampler.num_negs_per_pos 89.0 +36 32 training.batch_size 0.0 +36 33 model.embedding_dim 1.0 +36 33 optimizer.lr 0.01808998522086072 +36 33 negative_sampler.num_negs_per_pos 69.0 +36 33 training.batch_size 0.0 +36 34 model.embedding_dim 0.0 +36 34 optimizer.lr 0.03605863632075053 +36 34 negative_sampler.num_negs_per_pos 13.0 +36 34 training.batch_size 1.0 +36 35 model.embedding_dim 1.0 +36 35 optimizer.lr 0.0636733056962725 +36 35 negative_sampler.num_negs_per_pos 79.0 +36 35 training.batch_size 1.0 +36 36 model.embedding_dim 1.0 +36 36 optimizer.lr 0.0011223286700826223 +36 36 negative_sampler.num_negs_per_pos 66.0 +36 36 training.batch_size 1.0 +36 37 model.embedding_dim 0.0 +36 37 optimizer.lr 0.03632479924428369 +36 37 negative_sampler.num_negs_per_pos 52.0 +36 37 training.batch_size 0.0 +36 38 model.embedding_dim 2.0 +36 38 optimizer.lr 0.004742895470824887 +36 38 negative_sampler.num_negs_per_pos 49.0 +36 38 training.batch_size 1.0 +36 39 model.embedding_dim 1.0 +36 39 optimizer.lr 0.0017968027271016145 +36 39 negative_sampler.num_negs_per_pos 76.0 +36 39 training.batch_size 2.0 +36 40 model.embedding_dim 2.0 +36 40 optimizer.lr 0.02960026575818487 +36 40 negative_sampler.num_negs_per_pos 92.0 +36 40 training.batch_size 2.0 +36 41 model.embedding_dim 2.0 +36 41 optimizer.lr 0.004433497860030783 +36 41 negative_sampler.num_negs_per_pos 33.0 +36 41 training.batch_size 2.0 +36 42 model.embedding_dim 1.0 +36 42 optimizer.lr 0.004842172505250419 +36 42 negative_sampler.num_negs_per_pos 12.0 +36 42 training.batch_size 2.0 +36 43 model.embedding_dim 0.0 +36 43 optimizer.lr 0.02216342510069619 +36 43 negative_sampler.num_negs_per_pos 76.0 +36 43 training.batch_size 1.0 +36 44 model.embedding_dim 2.0 +36 44 optimizer.lr 0.007551367359668683 +36 44 negative_sampler.num_negs_per_pos 97.0 +36 44 training.batch_size 2.0 +36 45 model.embedding_dim 0.0 +36 45 optimizer.lr 0.03631228695631755 +36 45 negative_sampler.num_negs_per_pos 70.0 +36 45 training.batch_size 1.0 +36 46 model.embedding_dim 2.0 +36 46 optimizer.lr 0.0163656832069376 +36 46 negative_sampler.num_negs_per_pos 76.0 +36 46 training.batch_size 2.0 +36 47 model.embedding_dim 2.0 +36 47 optimizer.lr 0.020036834707997078 +36 47 negative_sampler.num_negs_per_pos 78.0 +36 47 training.batch_size 0.0 +36 48 model.embedding_dim 0.0 +36 48 optimizer.lr 0.018137569789334458 +36 48 negative_sampler.num_negs_per_pos 74.0 +36 48 training.batch_size 1.0 +36 49 model.embedding_dim 2.0 +36 49 optimizer.lr 0.01691555389828915 +36 49 negative_sampler.num_negs_per_pos 83.0 +36 49 training.batch_size 1.0 +36 50 model.embedding_dim 0.0 +36 50 optimizer.lr 0.04767298037860718 +36 50 negative_sampler.num_negs_per_pos 52.0 +36 50 training.batch_size 0.0 +36 51 model.embedding_dim 0.0 +36 51 optimizer.lr 0.060038638450516 +36 51 negative_sampler.num_negs_per_pos 28.0 +36 51 training.batch_size 0.0 +36 52 model.embedding_dim 2.0 +36 52 optimizer.lr 0.004022005210052508 +36 52 negative_sampler.num_negs_per_pos 23.0 +36 52 training.batch_size 0.0 +36 53 model.embedding_dim 0.0 +36 53 optimizer.lr 0.009886271015134054 +36 53 negative_sampler.num_negs_per_pos 17.0 +36 53 training.batch_size 0.0 +36 54 model.embedding_dim 2.0 +36 54 optimizer.lr 0.0011556444050086633 +36 54 negative_sampler.num_negs_per_pos 99.0 +36 54 training.batch_size 1.0 +36 55 model.embedding_dim 2.0 +36 55 optimizer.lr 0.0037065688579722123 +36 55 negative_sampler.num_negs_per_pos 90.0 +36 55 training.batch_size 2.0 +36 56 model.embedding_dim 0.0 +36 56 optimizer.lr 0.004152445990741535 +36 56 negative_sampler.num_negs_per_pos 69.0 +36 56 training.batch_size 1.0 +36 57 model.embedding_dim 0.0 +36 57 optimizer.lr 0.015286210751233561 +36 57 negative_sampler.num_negs_per_pos 24.0 +36 57 training.batch_size 2.0 +36 58 model.embedding_dim 1.0 +36 58 optimizer.lr 0.008864377488340154 +36 58 negative_sampler.num_negs_per_pos 16.0 +36 58 training.batch_size 2.0 +36 59 model.embedding_dim 1.0 +36 59 optimizer.lr 0.0020169506390623525 +36 59 negative_sampler.num_negs_per_pos 3.0 +36 59 training.batch_size 1.0 +36 60 model.embedding_dim 2.0 +36 60 optimizer.lr 0.011091568236588741 +36 60 negative_sampler.num_negs_per_pos 56.0 +36 60 training.batch_size 0.0 +36 61 model.embedding_dim 2.0 +36 61 optimizer.lr 0.08641144151230633 +36 61 negative_sampler.num_negs_per_pos 86.0 +36 61 training.batch_size 0.0 +36 62 model.embedding_dim 2.0 +36 62 optimizer.lr 0.0019358986563235052 +36 62 negative_sampler.num_negs_per_pos 15.0 +36 62 training.batch_size 1.0 +36 63 model.embedding_dim 2.0 +36 63 optimizer.lr 0.02025179102894579 +36 63 negative_sampler.num_negs_per_pos 86.0 +36 63 training.batch_size 2.0 +36 64 model.embedding_dim 1.0 +36 64 optimizer.lr 0.0038394229426369423 +36 64 negative_sampler.num_negs_per_pos 87.0 +36 64 training.batch_size 1.0 +36 65 model.embedding_dim 0.0 +36 65 optimizer.lr 0.007351534023133416 +36 65 negative_sampler.num_negs_per_pos 33.0 +36 65 training.batch_size 2.0 +36 66 model.embedding_dim 2.0 +36 66 optimizer.lr 0.03036544958882618 +36 66 negative_sampler.num_negs_per_pos 60.0 +36 66 training.batch_size 2.0 +36 67 model.embedding_dim 0.0 +36 67 optimizer.lr 0.04601265382953147 +36 67 negative_sampler.num_negs_per_pos 68.0 +36 67 training.batch_size 2.0 +36 68 model.embedding_dim 0.0 +36 68 optimizer.lr 0.009125789949902516 +36 68 negative_sampler.num_negs_per_pos 96.0 +36 68 training.batch_size 1.0 +36 69 model.embedding_dim 0.0 +36 69 optimizer.lr 0.09762798277105462 +36 69 negative_sampler.num_negs_per_pos 80.0 +36 69 training.batch_size 2.0 +36 70 model.embedding_dim 1.0 +36 70 optimizer.lr 0.058799436352181 +36 70 negative_sampler.num_negs_per_pos 90.0 +36 70 training.batch_size 1.0 +36 71 model.embedding_dim 0.0 +36 71 optimizer.lr 0.010526207688130333 +36 71 negative_sampler.num_negs_per_pos 77.0 +36 71 training.batch_size 2.0 +36 72 model.embedding_dim 2.0 +36 72 optimizer.lr 0.0019379806622457586 +36 72 negative_sampler.num_negs_per_pos 71.0 +36 72 training.batch_size 2.0 +36 73 model.embedding_dim 0.0 +36 73 optimizer.lr 0.005496993684572888 +36 73 negative_sampler.num_negs_per_pos 24.0 +36 73 training.batch_size 1.0 +36 74 model.embedding_dim 2.0 +36 74 optimizer.lr 0.019903090648309748 +36 74 negative_sampler.num_negs_per_pos 36.0 +36 74 training.batch_size 0.0 +36 75 model.embedding_dim 1.0 +36 75 optimizer.lr 0.004453857608481607 +36 75 negative_sampler.num_negs_per_pos 2.0 +36 75 training.batch_size 0.0 +36 76 model.embedding_dim 0.0 +36 76 optimizer.lr 0.007835820002888584 +36 76 negative_sampler.num_negs_per_pos 96.0 +36 76 training.batch_size 0.0 +36 77 model.embedding_dim 0.0 +36 77 optimizer.lr 0.006594867383637903 +36 77 negative_sampler.num_negs_per_pos 5.0 +36 77 training.batch_size 1.0 +36 78 model.embedding_dim 2.0 +36 78 optimizer.lr 0.002356167258852877 +36 78 negative_sampler.num_negs_per_pos 65.0 +36 78 training.batch_size 0.0 +36 79 model.embedding_dim 2.0 +36 79 optimizer.lr 0.0043445822452864895 +36 79 negative_sampler.num_negs_per_pos 2.0 +36 79 training.batch_size 0.0 +36 80 model.embedding_dim 2.0 +36 80 optimizer.lr 0.015482206070771588 +36 80 negative_sampler.num_negs_per_pos 73.0 +36 80 training.batch_size 0.0 +36 81 model.embedding_dim 1.0 +36 81 optimizer.lr 0.017048661646196197 +36 81 negative_sampler.num_negs_per_pos 51.0 +36 81 training.batch_size 1.0 +36 82 model.embedding_dim 0.0 +36 82 optimizer.lr 0.0025273830455076257 +36 82 negative_sampler.num_negs_per_pos 64.0 +36 82 training.batch_size 2.0 +36 83 model.embedding_dim 1.0 +36 83 optimizer.lr 0.041660087273450365 +36 83 negative_sampler.num_negs_per_pos 64.0 +36 83 training.batch_size 2.0 +36 84 model.embedding_dim 2.0 +36 84 optimizer.lr 0.012663933019762954 +36 84 negative_sampler.num_negs_per_pos 65.0 +36 84 training.batch_size 2.0 +36 85 model.embedding_dim 1.0 +36 85 optimizer.lr 0.001436777715712572 +36 85 negative_sampler.num_negs_per_pos 20.0 +36 85 training.batch_size 1.0 +36 86 model.embedding_dim 1.0 +36 86 optimizer.lr 0.09312385149646142 +36 86 negative_sampler.num_negs_per_pos 21.0 +36 86 training.batch_size 1.0 +36 87 model.embedding_dim 1.0 +36 87 optimizer.lr 0.016859473642994328 +36 87 negative_sampler.num_negs_per_pos 20.0 +36 87 training.batch_size 2.0 +36 88 model.embedding_dim 2.0 +36 88 optimizer.lr 0.08675356470691294 +36 88 negative_sampler.num_negs_per_pos 7.0 +36 88 training.batch_size 0.0 +36 89 model.embedding_dim 2.0 +36 89 optimizer.lr 0.024876230150343355 +36 89 negative_sampler.num_negs_per_pos 29.0 +36 89 training.batch_size 0.0 +36 90 model.embedding_dim 2.0 +36 90 optimizer.lr 0.0016652406202285206 +36 90 negative_sampler.num_negs_per_pos 50.0 +36 90 training.batch_size 2.0 +36 91 model.embedding_dim 0.0 +36 91 optimizer.lr 0.058350420713744924 +36 91 negative_sampler.num_negs_per_pos 25.0 +36 91 training.batch_size 2.0 +36 92 model.embedding_dim 2.0 +36 92 optimizer.lr 0.01803165339323255 +36 92 negative_sampler.num_negs_per_pos 50.0 +36 92 training.batch_size 2.0 +36 93 model.embedding_dim 2.0 +36 93 optimizer.lr 0.0492555583569582 +36 93 negative_sampler.num_negs_per_pos 40.0 +36 93 training.batch_size 0.0 +36 94 model.embedding_dim 1.0 +36 94 optimizer.lr 0.011108313844629701 +36 94 negative_sampler.num_negs_per_pos 85.0 +36 94 training.batch_size 1.0 +36 95 model.embedding_dim 0.0 +36 95 optimizer.lr 0.0027196068978419816 +36 95 negative_sampler.num_negs_per_pos 14.0 +36 95 training.batch_size 0.0 +36 96 model.embedding_dim 2.0 +36 96 optimizer.lr 0.03576038297474203 +36 96 negative_sampler.num_negs_per_pos 74.0 +36 96 training.batch_size 0.0 +36 97 model.embedding_dim 1.0 +36 97 optimizer.lr 0.014732108755321816 +36 97 negative_sampler.num_negs_per_pos 54.0 +36 97 training.batch_size 0.0 +36 98 model.embedding_dim 0.0 +36 98 optimizer.lr 0.07843681123427983 +36 98 negative_sampler.num_negs_per_pos 67.0 +36 98 training.batch_size 2.0 +36 99 model.embedding_dim 0.0 +36 99 optimizer.lr 0.027157413798209518 +36 99 negative_sampler.num_negs_per_pos 95.0 +36 99 training.batch_size 2.0 +36 100 model.embedding_dim 2.0 +36 100 optimizer.lr 0.059247270923167804 +36 100 negative_sampler.num_negs_per_pos 83.0 +36 100 training.batch_size 1.0 +36 1 dataset """kinships""" +36 1 model """complex""" +36 1 loss """bceaftersigmoid""" +36 1 regularizer """no""" +36 1 optimizer """adam""" +36 1 training_loop """owa""" +36 1 negative_sampler """basic""" +36 1 evaluator """rankbased""" +36 2 dataset """kinships""" +36 2 model """complex""" +36 2 loss """bceaftersigmoid""" +36 2 regularizer """no""" +36 2 optimizer """adam""" +36 2 training_loop """owa""" +36 2 negative_sampler """basic""" +36 2 evaluator """rankbased""" +36 3 dataset """kinships""" +36 3 model """complex""" +36 3 loss """bceaftersigmoid""" +36 3 regularizer """no""" +36 3 optimizer """adam""" +36 3 training_loop """owa""" +36 3 negative_sampler """basic""" +36 3 evaluator """rankbased""" +36 4 dataset """kinships""" +36 4 model """complex""" +36 4 loss """bceaftersigmoid""" +36 4 regularizer """no""" +36 4 optimizer """adam""" +36 4 training_loop """owa""" +36 4 negative_sampler """basic""" +36 4 evaluator """rankbased""" +36 5 dataset """kinships""" +36 5 model """complex""" +36 5 loss """bceaftersigmoid""" +36 5 regularizer """no""" +36 5 optimizer """adam""" +36 5 training_loop """owa""" +36 5 negative_sampler """basic""" +36 5 evaluator """rankbased""" +36 6 dataset """kinships""" +36 6 model """complex""" +36 6 loss """bceaftersigmoid""" +36 6 regularizer """no""" +36 6 optimizer """adam""" +36 6 training_loop """owa""" +36 6 negative_sampler """basic""" +36 6 evaluator """rankbased""" +36 7 dataset """kinships""" +36 7 model """complex""" +36 7 loss """bceaftersigmoid""" +36 7 regularizer """no""" +36 7 optimizer """adam""" +36 7 training_loop """owa""" +36 7 negative_sampler """basic""" +36 7 evaluator """rankbased""" +36 8 dataset """kinships""" +36 8 model """complex""" +36 8 loss """bceaftersigmoid""" +36 8 regularizer """no""" +36 8 optimizer """adam""" +36 8 training_loop """owa""" +36 8 negative_sampler """basic""" +36 8 evaluator """rankbased""" +36 9 dataset """kinships""" +36 9 model """complex""" +36 9 loss """bceaftersigmoid""" +36 9 regularizer """no""" +36 9 optimizer """adam""" +36 9 training_loop """owa""" +36 9 negative_sampler """basic""" +36 9 evaluator """rankbased""" +36 10 dataset """kinships""" +36 10 model """complex""" +36 10 loss """bceaftersigmoid""" +36 10 regularizer """no""" +36 10 optimizer """adam""" +36 10 training_loop """owa""" +36 10 negative_sampler """basic""" +36 10 evaluator """rankbased""" +36 11 dataset """kinships""" +36 11 model """complex""" +36 11 loss """bceaftersigmoid""" +36 11 regularizer """no""" +36 11 optimizer """adam""" +36 11 training_loop """owa""" +36 11 negative_sampler """basic""" +36 11 evaluator """rankbased""" +36 12 dataset """kinships""" +36 12 model """complex""" +36 12 loss """bceaftersigmoid""" +36 12 regularizer """no""" +36 12 optimizer """adam""" +36 12 training_loop """owa""" +36 12 negative_sampler """basic""" +36 12 evaluator """rankbased""" +36 13 dataset """kinships""" +36 13 model """complex""" +36 13 loss """bceaftersigmoid""" +36 13 regularizer """no""" +36 13 optimizer """adam""" +36 13 training_loop """owa""" +36 13 negative_sampler """basic""" +36 13 evaluator """rankbased""" +36 14 dataset """kinships""" +36 14 model """complex""" +36 14 loss """bceaftersigmoid""" +36 14 regularizer """no""" +36 14 optimizer """adam""" +36 14 training_loop """owa""" +36 14 negative_sampler """basic""" +36 14 evaluator """rankbased""" +36 15 dataset """kinships""" +36 15 model """complex""" +36 15 loss """bceaftersigmoid""" +36 15 regularizer """no""" +36 15 optimizer """adam""" +36 15 training_loop """owa""" +36 15 negative_sampler """basic""" +36 15 evaluator """rankbased""" +36 16 dataset """kinships""" +36 16 model """complex""" +36 16 loss """bceaftersigmoid""" +36 16 regularizer """no""" +36 16 optimizer """adam""" +36 16 training_loop """owa""" +36 16 negative_sampler """basic""" +36 16 evaluator """rankbased""" +36 17 dataset """kinships""" +36 17 model """complex""" +36 17 loss """bceaftersigmoid""" +36 17 regularizer """no""" +36 17 optimizer """adam""" +36 17 training_loop """owa""" +36 17 negative_sampler """basic""" +36 17 evaluator """rankbased""" +36 18 dataset """kinships""" +36 18 model """complex""" +36 18 loss """bceaftersigmoid""" +36 18 regularizer """no""" +36 18 optimizer """adam""" +36 18 training_loop """owa""" +36 18 negative_sampler """basic""" +36 18 evaluator """rankbased""" +36 19 dataset """kinships""" +36 19 model """complex""" +36 19 loss """bceaftersigmoid""" +36 19 regularizer """no""" +36 19 optimizer """adam""" +36 19 training_loop """owa""" +36 19 negative_sampler """basic""" +36 19 evaluator """rankbased""" +36 20 dataset """kinships""" +36 20 model """complex""" +36 20 loss """bceaftersigmoid""" +36 20 regularizer """no""" +36 20 optimizer """adam""" +36 20 training_loop """owa""" +36 20 negative_sampler """basic""" +36 20 evaluator """rankbased""" +36 21 dataset """kinships""" +36 21 model """complex""" +36 21 loss """bceaftersigmoid""" +36 21 regularizer """no""" +36 21 optimizer """adam""" +36 21 training_loop """owa""" +36 21 negative_sampler """basic""" +36 21 evaluator """rankbased""" +36 22 dataset """kinships""" +36 22 model """complex""" +36 22 loss """bceaftersigmoid""" +36 22 regularizer """no""" +36 22 optimizer """adam""" +36 22 training_loop """owa""" +36 22 negative_sampler """basic""" +36 22 evaluator """rankbased""" +36 23 dataset """kinships""" +36 23 model """complex""" +36 23 loss """bceaftersigmoid""" +36 23 regularizer """no""" +36 23 optimizer """adam""" +36 23 training_loop """owa""" +36 23 negative_sampler """basic""" +36 23 evaluator """rankbased""" +36 24 dataset """kinships""" +36 24 model """complex""" +36 24 loss """bceaftersigmoid""" +36 24 regularizer """no""" +36 24 optimizer """adam""" +36 24 training_loop """owa""" +36 24 negative_sampler """basic""" +36 24 evaluator """rankbased""" +36 25 dataset """kinships""" +36 25 model """complex""" +36 25 loss """bceaftersigmoid""" +36 25 regularizer """no""" +36 25 optimizer """adam""" +36 25 training_loop """owa""" +36 25 negative_sampler """basic""" +36 25 evaluator """rankbased""" +36 26 dataset """kinships""" +36 26 model """complex""" +36 26 loss """bceaftersigmoid""" +36 26 regularizer """no""" +36 26 optimizer """adam""" +36 26 training_loop """owa""" +36 26 negative_sampler """basic""" +36 26 evaluator """rankbased""" +36 27 dataset """kinships""" +36 27 model """complex""" +36 27 loss """bceaftersigmoid""" +36 27 regularizer """no""" +36 27 optimizer """adam""" +36 27 training_loop """owa""" +36 27 negative_sampler """basic""" +36 27 evaluator """rankbased""" +36 28 dataset """kinships""" +36 28 model """complex""" +36 28 loss """bceaftersigmoid""" +36 28 regularizer """no""" +36 28 optimizer """adam""" +36 28 training_loop """owa""" +36 28 negative_sampler """basic""" +36 28 evaluator """rankbased""" +36 29 dataset """kinships""" +36 29 model """complex""" +36 29 loss """bceaftersigmoid""" +36 29 regularizer """no""" +36 29 optimizer """adam""" +36 29 training_loop """owa""" +36 29 negative_sampler """basic""" +36 29 evaluator """rankbased""" +36 30 dataset """kinships""" +36 30 model """complex""" +36 30 loss """bceaftersigmoid""" +36 30 regularizer """no""" +36 30 optimizer """adam""" +36 30 training_loop """owa""" +36 30 negative_sampler """basic""" +36 30 evaluator """rankbased""" +36 31 dataset """kinships""" +36 31 model """complex""" +36 31 loss """bceaftersigmoid""" +36 31 regularizer """no""" +36 31 optimizer """adam""" +36 31 training_loop """owa""" +36 31 negative_sampler """basic""" +36 31 evaluator """rankbased""" +36 32 dataset """kinships""" +36 32 model """complex""" +36 32 loss """bceaftersigmoid""" +36 32 regularizer """no""" +36 32 optimizer """adam""" +36 32 training_loop """owa""" +36 32 negative_sampler """basic""" +36 32 evaluator """rankbased""" +36 33 dataset """kinships""" +36 33 model """complex""" +36 33 loss """bceaftersigmoid""" +36 33 regularizer """no""" +36 33 optimizer """adam""" +36 33 training_loop """owa""" +36 33 negative_sampler """basic""" +36 33 evaluator """rankbased""" +36 34 dataset """kinships""" +36 34 model """complex""" +36 34 loss """bceaftersigmoid""" +36 34 regularizer """no""" +36 34 optimizer """adam""" +36 34 training_loop """owa""" +36 34 negative_sampler """basic""" +36 34 evaluator """rankbased""" +36 35 dataset """kinships""" +36 35 model """complex""" +36 35 loss """bceaftersigmoid""" +36 35 regularizer """no""" +36 35 optimizer """adam""" +36 35 training_loop """owa""" +36 35 negative_sampler """basic""" +36 35 evaluator """rankbased""" +36 36 dataset """kinships""" +36 36 model """complex""" +36 36 loss """bceaftersigmoid""" +36 36 regularizer """no""" +36 36 optimizer """adam""" +36 36 training_loop """owa""" +36 36 negative_sampler """basic""" +36 36 evaluator """rankbased""" +36 37 dataset """kinships""" +36 37 model """complex""" +36 37 loss """bceaftersigmoid""" +36 37 regularizer """no""" +36 37 optimizer """adam""" +36 37 training_loop """owa""" +36 37 negative_sampler """basic""" +36 37 evaluator """rankbased""" +36 38 dataset """kinships""" +36 38 model """complex""" +36 38 loss """bceaftersigmoid""" +36 38 regularizer """no""" +36 38 optimizer """adam""" +36 38 training_loop """owa""" +36 38 negative_sampler """basic""" +36 38 evaluator """rankbased""" +36 39 dataset """kinships""" +36 39 model """complex""" +36 39 loss """bceaftersigmoid""" +36 39 regularizer """no""" +36 39 optimizer """adam""" +36 39 training_loop """owa""" +36 39 negative_sampler """basic""" +36 39 evaluator """rankbased""" +36 40 dataset """kinships""" +36 40 model """complex""" +36 40 loss """bceaftersigmoid""" +36 40 regularizer """no""" +36 40 optimizer """adam""" +36 40 training_loop """owa""" +36 40 negative_sampler """basic""" +36 40 evaluator """rankbased""" +36 41 dataset """kinships""" +36 41 model """complex""" +36 41 loss """bceaftersigmoid""" +36 41 regularizer """no""" +36 41 optimizer """adam""" +36 41 training_loop """owa""" +36 41 negative_sampler """basic""" +36 41 evaluator """rankbased""" +36 42 dataset """kinships""" +36 42 model """complex""" +36 42 loss """bceaftersigmoid""" +36 42 regularizer """no""" +36 42 optimizer """adam""" +36 42 training_loop """owa""" +36 42 negative_sampler """basic""" +36 42 evaluator """rankbased""" +36 43 dataset """kinships""" +36 43 model """complex""" +36 43 loss """bceaftersigmoid""" +36 43 regularizer """no""" +36 43 optimizer """adam""" +36 43 training_loop """owa""" +36 43 negative_sampler """basic""" +36 43 evaluator """rankbased""" +36 44 dataset """kinships""" +36 44 model """complex""" +36 44 loss """bceaftersigmoid""" +36 44 regularizer """no""" +36 44 optimizer """adam""" +36 44 training_loop """owa""" +36 44 negative_sampler """basic""" +36 44 evaluator """rankbased""" +36 45 dataset """kinships""" +36 45 model """complex""" +36 45 loss """bceaftersigmoid""" +36 45 regularizer """no""" +36 45 optimizer """adam""" +36 45 training_loop """owa""" +36 45 negative_sampler """basic""" +36 45 evaluator """rankbased""" +36 46 dataset """kinships""" +36 46 model """complex""" +36 46 loss """bceaftersigmoid""" +36 46 regularizer """no""" +36 46 optimizer """adam""" +36 46 training_loop """owa""" +36 46 negative_sampler """basic""" +36 46 evaluator """rankbased""" +36 47 dataset """kinships""" +36 47 model """complex""" +36 47 loss """bceaftersigmoid""" +36 47 regularizer """no""" +36 47 optimizer """adam""" +36 47 training_loop """owa""" +36 47 negative_sampler """basic""" +36 47 evaluator """rankbased""" +36 48 dataset """kinships""" +36 48 model """complex""" +36 48 loss """bceaftersigmoid""" +36 48 regularizer """no""" +36 48 optimizer """adam""" +36 48 training_loop """owa""" +36 48 negative_sampler """basic""" +36 48 evaluator """rankbased""" +36 49 dataset """kinships""" +36 49 model """complex""" +36 49 loss """bceaftersigmoid""" +36 49 regularizer """no""" +36 49 optimizer """adam""" +36 49 training_loop """owa""" +36 49 negative_sampler """basic""" +36 49 evaluator """rankbased""" +36 50 dataset """kinships""" +36 50 model """complex""" +36 50 loss """bceaftersigmoid""" +36 50 regularizer """no""" +36 50 optimizer """adam""" +36 50 training_loop """owa""" +36 50 negative_sampler """basic""" +36 50 evaluator """rankbased""" +36 51 dataset """kinships""" +36 51 model """complex""" +36 51 loss """bceaftersigmoid""" +36 51 regularizer """no""" +36 51 optimizer """adam""" +36 51 training_loop """owa""" +36 51 negative_sampler """basic""" +36 51 evaluator """rankbased""" +36 52 dataset """kinships""" +36 52 model """complex""" +36 52 loss """bceaftersigmoid""" +36 52 regularizer """no""" +36 52 optimizer """adam""" +36 52 training_loop """owa""" +36 52 negative_sampler """basic""" +36 52 evaluator """rankbased""" +36 53 dataset """kinships""" +36 53 model """complex""" +36 53 loss """bceaftersigmoid""" +36 53 regularizer """no""" +36 53 optimizer """adam""" +36 53 training_loop """owa""" +36 53 negative_sampler """basic""" +36 53 evaluator """rankbased""" +36 54 dataset """kinships""" +36 54 model """complex""" +36 54 loss """bceaftersigmoid""" +36 54 regularizer """no""" +36 54 optimizer """adam""" +36 54 training_loop """owa""" +36 54 negative_sampler """basic""" +36 54 evaluator """rankbased""" +36 55 dataset """kinships""" +36 55 model """complex""" +36 55 loss """bceaftersigmoid""" +36 55 regularizer """no""" +36 55 optimizer """adam""" +36 55 training_loop """owa""" +36 55 negative_sampler """basic""" +36 55 evaluator """rankbased""" +36 56 dataset """kinships""" +36 56 model """complex""" +36 56 loss """bceaftersigmoid""" +36 56 regularizer """no""" +36 56 optimizer """adam""" +36 56 training_loop """owa""" +36 56 negative_sampler """basic""" +36 56 evaluator """rankbased""" +36 57 dataset """kinships""" +36 57 model """complex""" +36 57 loss """bceaftersigmoid""" +36 57 regularizer """no""" +36 57 optimizer """adam""" +36 57 training_loop """owa""" +36 57 negative_sampler """basic""" +36 57 evaluator """rankbased""" +36 58 dataset """kinships""" +36 58 model """complex""" +36 58 loss """bceaftersigmoid""" +36 58 regularizer """no""" +36 58 optimizer """adam""" +36 58 training_loop """owa""" +36 58 negative_sampler """basic""" +36 58 evaluator """rankbased""" +36 59 dataset """kinships""" +36 59 model """complex""" +36 59 loss """bceaftersigmoid""" +36 59 regularizer """no""" +36 59 optimizer """adam""" +36 59 training_loop """owa""" +36 59 negative_sampler """basic""" +36 59 evaluator """rankbased""" +36 60 dataset """kinships""" +36 60 model """complex""" +36 60 loss """bceaftersigmoid""" +36 60 regularizer """no""" +36 60 optimizer """adam""" +36 60 training_loop """owa""" +36 60 negative_sampler """basic""" +36 60 evaluator """rankbased""" +36 61 dataset """kinships""" +36 61 model """complex""" +36 61 loss """bceaftersigmoid""" +36 61 regularizer """no""" +36 61 optimizer """adam""" +36 61 training_loop """owa""" +36 61 negative_sampler """basic""" +36 61 evaluator """rankbased""" +36 62 dataset """kinships""" +36 62 model """complex""" +36 62 loss """bceaftersigmoid""" +36 62 regularizer """no""" +36 62 optimizer """adam""" +36 62 training_loop """owa""" +36 62 negative_sampler """basic""" +36 62 evaluator """rankbased""" +36 63 dataset """kinships""" +36 63 model """complex""" +36 63 loss """bceaftersigmoid""" +36 63 regularizer """no""" +36 63 optimizer """adam""" +36 63 training_loop """owa""" +36 63 negative_sampler """basic""" +36 63 evaluator """rankbased""" +36 64 dataset """kinships""" +36 64 model """complex""" +36 64 loss """bceaftersigmoid""" +36 64 regularizer """no""" +36 64 optimizer """adam""" +36 64 training_loop """owa""" +36 64 negative_sampler """basic""" +36 64 evaluator """rankbased""" +36 65 dataset """kinships""" +36 65 model """complex""" +36 65 loss """bceaftersigmoid""" +36 65 regularizer """no""" +36 65 optimizer """adam""" +36 65 training_loop """owa""" +36 65 negative_sampler """basic""" +36 65 evaluator """rankbased""" +36 66 dataset """kinships""" +36 66 model """complex""" +36 66 loss """bceaftersigmoid""" +36 66 regularizer """no""" +36 66 optimizer """adam""" +36 66 training_loop """owa""" +36 66 negative_sampler """basic""" +36 66 evaluator """rankbased""" +36 67 dataset """kinships""" +36 67 model """complex""" +36 67 loss """bceaftersigmoid""" +36 67 regularizer """no""" +36 67 optimizer """adam""" +36 67 training_loop """owa""" +36 67 negative_sampler """basic""" +36 67 evaluator """rankbased""" +36 68 dataset """kinships""" +36 68 model """complex""" +36 68 loss """bceaftersigmoid""" +36 68 regularizer """no""" +36 68 optimizer """adam""" +36 68 training_loop """owa""" +36 68 negative_sampler """basic""" +36 68 evaluator """rankbased""" +36 69 dataset """kinships""" +36 69 model """complex""" +36 69 loss """bceaftersigmoid""" +36 69 regularizer """no""" +36 69 optimizer """adam""" +36 69 training_loop """owa""" +36 69 negative_sampler """basic""" +36 69 evaluator """rankbased""" +36 70 dataset """kinships""" +36 70 model """complex""" +36 70 loss """bceaftersigmoid""" +36 70 regularizer """no""" +36 70 optimizer """adam""" +36 70 training_loop """owa""" +36 70 negative_sampler """basic""" +36 70 evaluator """rankbased""" +36 71 dataset """kinships""" +36 71 model """complex""" +36 71 loss """bceaftersigmoid""" +36 71 regularizer """no""" +36 71 optimizer """adam""" +36 71 training_loop """owa""" +36 71 negative_sampler """basic""" +36 71 evaluator """rankbased""" +36 72 dataset """kinships""" +36 72 model """complex""" +36 72 loss """bceaftersigmoid""" +36 72 regularizer """no""" +36 72 optimizer """adam""" +36 72 training_loop """owa""" +36 72 negative_sampler """basic""" +36 72 evaluator """rankbased""" +36 73 dataset """kinships""" +36 73 model """complex""" +36 73 loss """bceaftersigmoid""" +36 73 regularizer """no""" +36 73 optimizer """adam""" +36 73 training_loop """owa""" +36 73 negative_sampler """basic""" +36 73 evaluator """rankbased""" +36 74 dataset """kinships""" +36 74 model """complex""" +36 74 loss """bceaftersigmoid""" +36 74 regularizer """no""" +36 74 optimizer """adam""" +36 74 training_loop """owa""" +36 74 negative_sampler """basic""" +36 74 evaluator """rankbased""" +36 75 dataset """kinships""" +36 75 model """complex""" +36 75 loss """bceaftersigmoid""" +36 75 regularizer """no""" +36 75 optimizer """adam""" +36 75 training_loop """owa""" +36 75 negative_sampler """basic""" +36 75 evaluator """rankbased""" +36 76 dataset """kinships""" +36 76 model """complex""" +36 76 loss """bceaftersigmoid""" +36 76 regularizer """no""" +36 76 optimizer """adam""" +36 76 training_loop """owa""" +36 76 negative_sampler """basic""" +36 76 evaluator """rankbased""" +36 77 dataset """kinships""" +36 77 model """complex""" +36 77 loss """bceaftersigmoid""" +36 77 regularizer """no""" +36 77 optimizer """adam""" +36 77 training_loop """owa""" +36 77 negative_sampler """basic""" +36 77 evaluator """rankbased""" +36 78 dataset """kinships""" +36 78 model """complex""" +36 78 loss """bceaftersigmoid""" +36 78 regularizer """no""" +36 78 optimizer """adam""" +36 78 training_loop """owa""" +36 78 negative_sampler """basic""" +36 78 evaluator """rankbased""" +36 79 dataset """kinships""" +36 79 model """complex""" +36 79 loss """bceaftersigmoid""" +36 79 regularizer """no""" +36 79 optimizer """adam""" +36 79 training_loop """owa""" +36 79 negative_sampler """basic""" +36 79 evaluator """rankbased""" +36 80 dataset """kinships""" +36 80 model """complex""" +36 80 loss """bceaftersigmoid""" +36 80 regularizer """no""" +36 80 optimizer """adam""" +36 80 training_loop """owa""" +36 80 negative_sampler """basic""" +36 80 evaluator """rankbased""" +36 81 dataset """kinships""" +36 81 model """complex""" +36 81 loss """bceaftersigmoid""" +36 81 regularizer """no""" +36 81 optimizer """adam""" +36 81 training_loop """owa""" +36 81 negative_sampler """basic""" +36 81 evaluator """rankbased""" +36 82 dataset """kinships""" +36 82 model """complex""" +36 82 loss """bceaftersigmoid""" +36 82 regularizer """no""" +36 82 optimizer """adam""" +36 82 training_loop """owa""" +36 82 negative_sampler """basic""" +36 82 evaluator """rankbased""" +36 83 dataset """kinships""" +36 83 model """complex""" +36 83 loss """bceaftersigmoid""" +36 83 regularizer """no""" +36 83 optimizer """adam""" +36 83 training_loop """owa""" +36 83 negative_sampler """basic""" +36 83 evaluator """rankbased""" +36 84 dataset """kinships""" +36 84 model """complex""" +36 84 loss """bceaftersigmoid""" +36 84 regularizer """no""" +36 84 optimizer """adam""" +36 84 training_loop """owa""" +36 84 negative_sampler """basic""" +36 84 evaluator """rankbased""" +36 85 dataset """kinships""" +36 85 model """complex""" +36 85 loss """bceaftersigmoid""" +36 85 regularizer """no""" +36 85 optimizer """adam""" +36 85 training_loop """owa""" +36 85 negative_sampler """basic""" +36 85 evaluator """rankbased""" +36 86 dataset """kinships""" +36 86 model """complex""" +36 86 loss """bceaftersigmoid""" +36 86 regularizer """no""" +36 86 optimizer """adam""" +36 86 training_loop """owa""" +36 86 negative_sampler """basic""" +36 86 evaluator """rankbased""" +36 87 dataset """kinships""" +36 87 model """complex""" +36 87 loss """bceaftersigmoid""" +36 87 regularizer """no""" +36 87 optimizer """adam""" +36 87 training_loop """owa""" +36 87 negative_sampler """basic""" +36 87 evaluator """rankbased""" +36 88 dataset """kinships""" +36 88 model """complex""" +36 88 loss """bceaftersigmoid""" +36 88 regularizer """no""" +36 88 optimizer """adam""" +36 88 training_loop """owa""" +36 88 negative_sampler """basic""" +36 88 evaluator """rankbased""" +36 89 dataset """kinships""" +36 89 model """complex""" +36 89 loss """bceaftersigmoid""" +36 89 regularizer """no""" +36 89 optimizer """adam""" +36 89 training_loop """owa""" +36 89 negative_sampler """basic""" +36 89 evaluator """rankbased""" +36 90 dataset """kinships""" +36 90 model """complex""" +36 90 loss """bceaftersigmoid""" +36 90 regularizer """no""" +36 90 optimizer """adam""" +36 90 training_loop """owa""" +36 90 negative_sampler """basic""" +36 90 evaluator """rankbased""" +36 91 dataset """kinships""" +36 91 model """complex""" +36 91 loss """bceaftersigmoid""" +36 91 regularizer """no""" +36 91 optimizer """adam""" +36 91 training_loop """owa""" +36 91 negative_sampler """basic""" +36 91 evaluator """rankbased""" +36 92 dataset """kinships""" +36 92 model """complex""" +36 92 loss """bceaftersigmoid""" +36 92 regularizer """no""" +36 92 optimizer """adam""" +36 92 training_loop """owa""" +36 92 negative_sampler """basic""" +36 92 evaluator """rankbased""" +36 93 dataset """kinships""" +36 93 model """complex""" +36 93 loss """bceaftersigmoid""" +36 93 regularizer """no""" +36 93 optimizer """adam""" +36 93 training_loop """owa""" +36 93 negative_sampler """basic""" +36 93 evaluator """rankbased""" +36 94 dataset """kinships""" +36 94 model """complex""" +36 94 loss """bceaftersigmoid""" +36 94 regularizer """no""" +36 94 optimizer """adam""" +36 94 training_loop """owa""" +36 94 negative_sampler """basic""" +36 94 evaluator """rankbased""" +36 95 dataset """kinships""" +36 95 model """complex""" +36 95 loss """bceaftersigmoid""" +36 95 regularizer """no""" +36 95 optimizer """adam""" +36 95 training_loop """owa""" +36 95 negative_sampler """basic""" +36 95 evaluator """rankbased""" +36 96 dataset """kinships""" +36 96 model """complex""" +36 96 loss """bceaftersigmoid""" +36 96 regularizer """no""" +36 96 optimizer """adam""" +36 96 training_loop """owa""" +36 96 negative_sampler """basic""" +36 96 evaluator """rankbased""" +36 97 dataset """kinships""" +36 97 model """complex""" +36 97 loss """bceaftersigmoid""" +36 97 regularizer """no""" +36 97 optimizer """adam""" +36 97 training_loop """owa""" +36 97 negative_sampler """basic""" +36 97 evaluator """rankbased""" +36 98 dataset """kinships""" +36 98 model """complex""" +36 98 loss """bceaftersigmoid""" +36 98 regularizer """no""" +36 98 optimizer """adam""" +36 98 training_loop """owa""" +36 98 negative_sampler """basic""" +36 98 evaluator """rankbased""" +36 99 dataset """kinships""" +36 99 model """complex""" +36 99 loss """bceaftersigmoid""" +36 99 regularizer """no""" +36 99 optimizer """adam""" +36 99 training_loop """owa""" +36 99 negative_sampler """basic""" +36 99 evaluator """rankbased""" +36 100 dataset """kinships""" +36 100 model """complex""" +36 100 loss """bceaftersigmoid""" +36 100 regularizer """no""" +36 100 optimizer """adam""" +36 100 training_loop """owa""" +36 100 negative_sampler """basic""" +36 100 evaluator """rankbased""" +37 1 model.embedding_dim 2.0 +37 1 optimizer.lr 0.08974598910272209 +37 1 negative_sampler.num_negs_per_pos 59.0 +37 1 training.batch_size 1.0 +37 2 model.embedding_dim 0.0 +37 2 optimizer.lr 0.002497936173623666 +37 2 negative_sampler.num_negs_per_pos 40.0 +37 2 training.batch_size 2.0 +37 3 model.embedding_dim 0.0 +37 3 optimizer.lr 0.001520917009141517 +37 3 negative_sampler.num_negs_per_pos 61.0 +37 3 training.batch_size 2.0 +37 4 model.embedding_dim 0.0 +37 4 optimizer.lr 0.002075509218667418 +37 4 negative_sampler.num_negs_per_pos 45.0 +37 4 training.batch_size 1.0 +37 5 model.embedding_dim 1.0 +37 5 optimizer.lr 0.031854397233976206 +37 5 negative_sampler.num_negs_per_pos 0.0 +37 5 training.batch_size 1.0 +37 6 model.embedding_dim 2.0 +37 6 optimizer.lr 0.0011575446929006652 +37 6 negative_sampler.num_negs_per_pos 87.0 +37 6 training.batch_size 0.0 +37 7 model.embedding_dim 0.0 +37 7 optimizer.lr 0.004573291602953102 +37 7 negative_sampler.num_negs_per_pos 21.0 +37 7 training.batch_size 1.0 +37 8 model.embedding_dim 1.0 +37 8 optimizer.lr 0.0010439356467704256 +37 8 negative_sampler.num_negs_per_pos 56.0 +37 8 training.batch_size 2.0 +37 9 model.embedding_dim 0.0 +37 9 optimizer.lr 0.01746817917569591 +37 9 negative_sampler.num_negs_per_pos 64.0 +37 9 training.batch_size 1.0 +37 10 model.embedding_dim 1.0 +37 10 optimizer.lr 0.005309060945533605 +37 10 negative_sampler.num_negs_per_pos 86.0 +37 10 training.batch_size 0.0 +37 11 model.embedding_dim 2.0 +37 11 optimizer.lr 0.030103086380335528 +37 11 negative_sampler.num_negs_per_pos 2.0 +37 11 training.batch_size 2.0 +37 12 model.embedding_dim 2.0 +37 12 optimizer.lr 0.0424152250216654 +37 12 negative_sampler.num_negs_per_pos 82.0 +37 12 training.batch_size 1.0 +37 13 model.embedding_dim 2.0 +37 13 optimizer.lr 0.011046599516954638 +37 13 negative_sampler.num_negs_per_pos 85.0 +37 13 training.batch_size 2.0 +37 14 model.embedding_dim 0.0 +37 14 optimizer.lr 0.009193419559391037 +37 14 negative_sampler.num_negs_per_pos 46.0 +37 14 training.batch_size 1.0 +37 15 model.embedding_dim 1.0 +37 15 optimizer.lr 0.07428832730630029 +37 15 negative_sampler.num_negs_per_pos 71.0 +37 15 training.batch_size 2.0 +37 16 model.embedding_dim 0.0 +37 16 optimizer.lr 0.02596725806524595 +37 16 negative_sampler.num_negs_per_pos 49.0 +37 16 training.batch_size 1.0 +37 17 model.embedding_dim 1.0 +37 17 optimizer.lr 0.006978563482480401 +37 17 negative_sampler.num_negs_per_pos 48.0 +37 17 training.batch_size 2.0 +37 18 model.embedding_dim 1.0 +37 18 optimizer.lr 0.012780040730593367 +37 18 negative_sampler.num_negs_per_pos 66.0 +37 18 training.batch_size 0.0 +37 19 model.embedding_dim 1.0 +37 19 optimizer.lr 0.005524793613002135 +37 19 negative_sampler.num_negs_per_pos 63.0 +37 19 training.batch_size 2.0 +37 20 model.embedding_dim 1.0 +37 20 optimizer.lr 0.04516963529070883 +37 20 negative_sampler.num_negs_per_pos 11.0 +37 20 training.batch_size 2.0 +37 21 model.embedding_dim 2.0 +37 21 optimizer.lr 0.008972717286675655 +37 21 negative_sampler.num_negs_per_pos 37.0 +37 21 training.batch_size 2.0 +37 22 model.embedding_dim 2.0 +37 22 optimizer.lr 0.0024873881501788746 +37 22 negative_sampler.num_negs_per_pos 20.0 +37 22 training.batch_size 0.0 +37 23 model.embedding_dim 1.0 +37 23 optimizer.lr 0.005544335602673054 +37 23 negative_sampler.num_negs_per_pos 27.0 +37 23 training.batch_size 1.0 +37 24 model.embedding_dim 2.0 +37 24 optimizer.lr 0.006384833331001781 +37 24 negative_sampler.num_negs_per_pos 42.0 +37 24 training.batch_size 0.0 +37 25 model.embedding_dim 0.0 +37 25 optimizer.lr 0.0038159346718559185 +37 25 negative_sampler.num_negs_per_pos 56.0 +37 25 training.batch_size 1.0 +37 26 model.embedding_dim 2.0 +37 26 optimizer.lr 0.0030165128849296684 +37 26 negative_sampler.num_negs_per_pos 45.0 +37 26 training.batch_size 1.0 +37 27 model.embedding_dim 2.0 +37 27 optimizer.lr 0.03677636468427669 +37 27 negative_sampler.num_negs_per_pos 11.0 +37 27 training.batch_size 0.0 +37 28 model.embedding_dim 0.0 +37 28 optimizer.lr 0.05748619656801368 +37 28 negative_sampler.num_negs_per_pos 0.0 +37 28 training.batch_size 1.0 +37 29 model.embedding_dim 1.0 +37 29 optimizer.lr 0.013533794361561574 +37 29 negative_sampler.num_negs_per_pos 81.0 +37 29 training.batch_size 1.0 +37 30 model.embedding_dim 1.0 +37 30 optimizer.lr 0.08972885066825549 +37 30 negative_sampler.num_negs_per_pos 12.0 +37 30 training.batch_size 0.0 +37 31 model.embedding_dim 1.0 +37 31 optimizer.lr 0.0010227178497463644 +37 31 negative_sampler.num_negs_per_pos 36.0 +37 31 training.batch_size 0.0 +37 32 model.embedding_dim 1.0 +37 32 optimizer.lr 0.012338213335523566 +37 32 negative_sampler.num_negs_per_pos 79.0 +37 32 training.batch_size 2.0 +37 33 model.embedding_dim 1.0 +37 33 optimizer.lr 0.0029440522823643003 +37 33 negative_sampler.num_negs_per_pos 71.0 +37 33 training.batch_size 2.0 +37 34 model.embedding_dim 2.0 +37 34 optimizer.lr 0.0025889978292182834 +37 34 negative_sampler.num_negs_per_pos 13.0 +37 34 training.batch_size 2.0 +37 35 model.embedding_dim 1.0 +37 35 optimizer.lr 0.0065259804119568105 +37 35 negative_sampler.num_negs_per_pos 13.0 +37 35 training.batch_size 2.0 +37 36 model.embedding_dim 0.0 +37 36 optimizer.lr 0.05911752510081871 +37 36 negative_sampler.num_negs_per_pos 79.0 +37 36 training.batch_size 1.0 +37 37 model.embedding_dim 0.0 +37 37 optimizer.lr 0.00859148285358384 +37 37 negative_sampler.num_negs_per_pos 80.0 +37 37 training.batch_size 1.0 +37 38 model.embedding_dim 2.0 +37 38 optimizer.lr 0.08052809000027074 +37 38 negative_sampler.num_negs_per_pos 60.0 +37 38 training.batch_size 2.0 +37 39 model.embedding_dim 1.0 +37 39 optimizer.lr 0.007887125285686409 +37 39 negative_sampler.num_negs_per_pos 75.0 +37 39 training.batch_size 2.0 +37 40 model.embedding_dim 0.0 +37 40 optimizer.lr 0.008442754613774773 +37 40 negative_sampler.num_negs_per_pos 68.0 +37 40 training.batch_size 2.0 +37 41 model.embedding_dim 1.0 +37 41 optimizer.lr 0.0064340529919054415 +37 41 negative_sampler.num_negs_per_pos 47.0 +37 41 training.batch_size 1.0 +37 42 model.embedding_dim 2.0 +37 42 optimizer.lr 0.010819944876196861 +37 42 negative_sampler.num_negs_per_pos 16.0 +37 42 training.batch_size 2.0 +37 43 model.embedding_dim 2.0 +37 43 optimizer.lr 0.02963015308774389 +37 43 negative_sampler.num_negs_per_pos 61.0 +37 43 training.batch_size 0.0 +37 44 model.embedding_dim 0.0 +37 44 optimizer.lr 0.0043172531196004685 +37 44 negative_sampler.num_negs_per_pos 63.0 +37 44 training.batch_size 0.0 +37 45 model.embedding_dim 2.0 +37 45 optimizer.lr 0.0791092050549636 +37 45 negative_sampler.num_negs_per_pos 11.0 +37 45 training.batch_size 0.0 +37 46 model.embedding_dim 2.0 +37 46 optimizer.lr 0.09507345253168083 +37 46 negative_sampler.num_negs_per_pos 38.0 +37 46 training.batch_size 0.0 +37 47 model.embedding_dim 1.0 +37 47 optimizer.lr 0.004016124141514559 +37 47 negative_sampler.num_negs_per_pos 44.0 +37 47 training.batch_size 2.0 +37 48 model.embedding_dim 2.0 +37 48 optimizer.lr 0.0010882264976714096 +37 48 negative_sampler.num_negs_per_pos 79.0 +37 48 training.batch_size 0.0 +37 49 model.embedding_dim 1.0 +37 49 optimizer.lr 0.010003361134728404 +37 49 negative_sampler.num_negs_per_pos 73.0 +37 49 training.batch_size 2.0 +37 50 model.embedding_dim 1.0 +37 50 optimizer.lr 0.00550749812699331 +37 50 negative_sampler.num_negs_per_pos 54.0 +37 50 training.batch_size 2.0 +37 51 model.embedding_dim 0.0 +37 51 optimizer.lr 0.006350981446232918 +37 51 negative_sampler.num_negs_per_pos 40.0 +37 51 training.batch_size 0.0 +37 52 model.embedding_dim 2.0 +37 52 optimizer.lr 0.09084722700610703 +37 52 negative_sampler.num_negs_per_pos 44.0 +37 52 training.batch_size 2.0 +37 53 model.embedding_dim 0.0 +37 53 optimizer.lr 0.0030180535490801678 +37 53 negative_sampler.num_negs_per_pos 93.0 +37 53 training.batch_size 1.0 +37 54 model.embedding_dim 2.0 +37 54 optimizer.lr 0.01045782655718001 +37 54 negative_sampler.num_negs_per_pos 44.0 +37 54 training.batch_size 0.0 +37 55 model.embedding_dim 0.0 +37 55 optimizer.lr 0.008208596862706008 +37 55 negative_sampler.num_negs_per_pos 89.0 +37 55 training.batch_size 0.0 +37 56 model.embedding_dim 0.0 +37 56 optimizer.lr 0.029989867005882673 +37 56 negative_sampler.num_negs_per_pos 76.0 +37 56 training.batch_size 0.0 +37 57 model.embedding_dim 0.0 +37 57 optimizer.lr 0.0011727034226328835 +37 57 negative_sampler.num_negs_per_pos 75.0 +37 57 training.batch_size 0.0 +37 58 model.embedding_dim 1.0 +37 58 optimizer.lr 0.04912528424466917 +37 58 negative_sampler.num_negs_per_pos 65.0 +37 58 training.batch_size 0.0 +37 59 model.embedding_dim 1.0 +37 59 optimizer.lr 0.006120794858521879 +37 59 negative_sampler.num_negs_per_pos 18.0 +37 59 training.batch_size 1.0 +37 60 model.embedding_dim 0.0 +37 60 optimizer.lr 0.01051652784316192 +37 60 negative_sampler.num_negs_per_pos 36.0 +37 60 training.batch_size 0.0 +37 61 model.embedding_dim 1.0 +37 61 optimizer.lr 0.025802502934715615 +37 61 negative_sampler.num_negs_per_pos 12.0 +37 61 training.batch_size 1.0 +37 62 model.embedding_dim 2.0 +37 62 optimizer.lr 0.008562299952113004 +37 62 negative_sampler.num_negs_per_pos 52.0 +37 62 training.batch_size 1.0 +37 63 model.embedding_dim 1.0 +37 63 optimizer.lr 0.005902705458316763 +37 63 negative_sampler.num_negs_per_pos 28.0 +37 63 training.batch_size 0.0 +37 64 model.embedding_dim 2.0 +37 64 optimizer.lr 0.03858645259862949 +37 64 negative_sampler.num_negs_per_pos 29.0 +37 64 training.batch_size 2.0 +37 65 model.embedding_dim 2.0 +37 65 optimizer.lr 0.012236256637471923 +37 65 negative_sampler.num_negs_per_pos 2.0 +37 65 training.batch_size 2.0 +37 66 model.embedding_dim 1.0 +37 66 optimizer.lr 0.06910799076559877 +37 66 negative_sampler.num_negs_per_pos 27.0 +37 66 training.batch_size 1.0 +37 67 model.embedding_dim 0.0 +37 67 optimizer.lr 0.05507594404532056 +37 67 negative_sampler.num_negs_per_pos 20.0 +37 67 training.batch_size 0.0 +37 68 model.embedding_dim 1.0 +37 68 optimizer.lr 0.01304242092428905 +37 68 negative_sampler.num_negs_per_pos 14.0 +37 68 training.batch_size 1.0 +37 69 model.embedding_dim 2.0 +37 69 optimizer.lr 0.007094976382049888 +37 69 negative_sampler.num_negs_per_pos 23.0 +37 69 training.batch_size 2.0 +37 70 model.embedding_dim 2.0 +37 70 optimizer.lr 0.006250356462707451 +37 70 negative_sampler.num_negs_per_pos 81.0 +37 70 training.batch_size 0.0 +37 71 model.embedding_dim 1.0 +37 71 optimizer.lr 0.07006236786244988 +37 71 negative_sampler.num_negs_per_pos 99.0 +37 71 training.batch_size 0.0 +37 72 model.embedding_dim 2.0 +37 72 optimizer.lr 0.0010118150943915017 +37 72 negative_sampler.num_negs_per_pos 98.0 +37 72 training.batch_size 2.0 +37 73 model.embedding_dim 2.0 +37 73 optimizer.lr 0.006008853742266796 +37 73 negative_sampler.num_negs_per_pos 3.0 +37 73 training.batch_size 2.0 +37 74 model.embedding_dim 2.0 +37 74 optimizer.lr 0.0028842113090187973 +37 74 negative_sampler.num_negs_per_pos 56.0 +37 74 training.batch_size 0.0 +37 75 model.embedding_dim 1.0 +37 75 optimizer.lr 0.0784882852603186 +37 75 negative_sampler.num_negs_per_pos 21.0 +37 75 training.batch_size 0.0 +37 76 model.embedding_dim 0.0 +37 76 optimizer.lr 0.03448140496275903 +37 76 negative_sampler.num_negs_per_pos 35.0 +37 76 training.batch_size 0.0 +37 77 model.embedding_dim 2.0 +37 77 optimizer.lr 0.025133037441377192 +37 77 negative_sampler.num_negs_per_pos 2.0 +37 77 training.batch_size 1.0 +37 78 model.embedding_dim 1.0 +37 78 optimizer.lr 0.009052342712348279 +37 78 negative_sampler.num_negs_per_pos 18.0 +37 78 training.batch_size 0.0 +37 79 model.embedding_dim 2.0 +37 79 optimizer.lr 0.0028712091982723304 +37 79 negative_sampler.num_negs_per_pos 71.0 +37 79 training.batch_size 1.0 +37 80 model.embedding_dim 0.0 +37 80 optimizer.lr 0.03228286909302011 +37 80 negative_sampler.num_negs_per_pos 45.0 +37 80 training.batch_size 2.0 +37 81 model.embedding_dim 0.0 +37 81 optimizer.lr 0.004655962115559649 +37 81 negative_sampler.num_negs_per_pos 36.0 +37 81 training.batch_size 2.0 +37 82 model.embedding_dim 2.0 +37 82 optimizer.lr 0.0028276440620368104 +37 82 negative_sampler.num_negs_per_pos 3.0 +37 82 training.batch_size 0.0 +37 83 model.embedding_dim 2.0 +37 83 optimizer.lr 0.01965188160302636 +37 83 negative_sampler.num_negs_per_pos 68.0 +37 83 training.batch_size 2.0 +37 84 model.embedding_dim 1.0 +37 84 optimizer.lr 0.03625025723755821 +37 84 negative_sampler.num_negs_per_pos 17.0 +37 84 training.batch_size 0.0 +37 85 model.embedding_dim 2.0 +37 85 optimizer.lr 0.09788594679865677 +37 85 negative_sampler.num_negs_per_pos 24.0 +37 85 training.batch_size 0.0 +37 86 model.embedding_dim 1.0 +37 86 optimizer.lr 0.012998814289168664 +37 86 negative_sampler.num_negs_per_pos 18.0 +37 86 training.batch_size 1.0 +37 87 model.embedding_dim 2.0 +37 87 optimizer.lr 0.0017744946030313737 +37 87 negative_sampler.num_negs_per_pos 92.0 +37 87 training.batch_size 0.0 +37 88 model.embedding_dim 2.0 +37 88 optimizer.lr 0.09804645698094519 +37 88 negative_sampler.num_negs_per_pos 14.0 +37 88 training.batch_size 2.0 +37 89 model.embedding_dim 0.0 +37 89 optimizer.lr 0.0016712925506248216 +37 89 negative_sampler.num_negs_per_pos 81.0 +37 89 training.batch_size 1.0 +37 90 model.embedding_dim 2.0 +37 90 optimizer.lr 0.003873405059218231 +37 90 negative_sampler.num_negs_per_pos 56.0 +37 90 training.batch_size 2.0 +37 91 model.embedding_dim 0.0 +37 91 optimizer.lr 0.0013995325432246692 +37 91 negative_sampler.num_negs_per_pos 54.0 +37 91 training.batch_size 1.0 +37 92 model.embedding_dim 0.0 +37 92 optimizer.lr 0.02183366197467636 +37 92 negative_sampler.num_negs_per_pos 64.0 +37 92 training.batch_size 2.0 +37 93 model.embedding_dim 0.0 +37 93 optimizer.lr 0.018262341378992013 +37 93 negative_sampler.num_negs_per_pos 12.0 +37 93 training.batch_size 1.0 +37 94 model.embedding_dim 2.0 +37 94 optimizer.lr 0.035567573077784 +37 94 negative_sampler.num_negs_per_pos 61.0 +37 94 training.batch_size 0.0 +37 95 model.embedding_dim 0.0 +37 95 optimizer.lr 0.0016240781382438941 +37 95 negative_sampler.num_negs_per_pos 45.0 +37 95 training.batch_size 2.0 +37 96 model.embedding_dim 1.0 +37 96 optimizer.lr 0.012030785885379442 +37 96 negative_sampler.num_negs_per_pos 89.0 +37 96 training.batch_size 2.0 +37 97 model.embedding_dim 2.0 +37 97 optimizer.lr 0.022645673553018192 +37 97 negative_sampler.num_negs_per_pos 15.0 +37 97 training.batch_size 0.0 +37 98 model.embedding_dim 0.0 +37 98 optimizer.lr 0.06114673115571386 +37 98 negative_sampler.num_negs_per_pos 47.0 +37 98 training.batch_size 2.0 +37 99 model.embedding_dim 0.0 +37 99 optimizer.lr 0.004537461098976234 +37 99 negative_sampler.num_negs_per_pos 19.0 +37 99 training.batch_size 1.0 +37 100 model.embedding_dim 1.0 +37 100 optimizer.lr 0.004552289111728496 +37 100 negative_sampler.num_negs_per_pos 32.0 +37 100 training.batch_size 1.0 +37 1 dataset """kinships""" +37 1 model """complex""" +37 1 loss """softplus""" +37 1 regularizer """no""" +37 1 optimizer """adam""" +37 1 training_loop """owa""" +37 1 negative_sampler """basic""" +37 1 evaluator """rankbased""" +37 2 dataset """kinships""" +37 2 model """complex""" +37 2 loss """softplus""" +37 2 regularizer """no""" +37 2 optimizer """adam""" +37 2 training_loop """owa""" +37 2 negative_sampler """basic""" +37 2 evaluator """rankbased""" +37 3 dataset """kinships""" +37 3 model """complex""" +37 3 loss """softplus""" +37 3 regularizer """no""" +37 3 optimizer """adam""" +37 3 training_loop """owa""" +37 3 negative_sampler """basic""" +37 3 evaluator """rankbased""" +37 4 dataset """kinships""" +37 4 model """complex""" +37 4 loss """softplus""" +37 4 regularizer """no""" +37 4 optimizer """adam""" +37 4 training_loop """owa""" +37 4 negative_sampler """basic""" +37 4 evaluator """rankbased""" +37 5 dataset """kinships""" +37 5 model """complex""" +37 5 loss """softplus""" +37 5 regularizer """no""" +37 5 optimizer """adam""" +37 5 training_loop """owa""" +37 5 negative_sampler """basic""" +37 5 evaluator """rankbased""" +37 6 dataset """kinships""" +37 6 model """complex""" +37 6 loss """softplus""" +37 6 regularizer """no""" +37 6 optimizer """adam""" +37 6 training_loop """owa""" +37 6 negative_sampler """basic""" +37 6 evaluator """rankbased""" +37 7 dataset """kinships""" +37 7 model """complex""" +37 7 loss """softplus""" +37 7 regularizer """no""" +37 7 optimizer """adam""" +37 7 training_loop """owa""" +37 7 negative_sampler """basic""" +37 7 evaluator """rankbased""" +37 8 dataset """kinships""" +37 8 model """complex""" +37 8 loss """softplus""" +37 8 regularizer """no""" +37 8 optimizer """adam""" +37 8 training_loop """owa""" +37 8 negative_sampler """basic""" +37 8 evaluator """rankbased""" +37 9 dataset """kinships""" +37 9 model """complex""" +37 9 loss """softplus""" +37 9 regularizer """no""" +37 9 optimizer """adam""" +37 9 training_loop """owa""" +37 9 negative_sampler """basic""" +37 9 evaluator """rankbased""" +37 10 dataset """kinships""" +37 10 model """complex""" +37 10 loss """softplus""" +37 10 regularizer """no""" +37 10 optimizer """adam""" +37 10 training_loop """owa""" +37 10 negative_sampler """basic""" +37 10 evaluator """rankbased""" +37 11 dataset """kinships""" +37 11 model """complex""" +37 11 loss """softplus""" +37 11 regularizer """no""" +37 11 optimizer """adam""" +37 11 training_loop """owa""" +37 11 negative_sampler """basic""" +37 11 evaluator """rankbased""" +37 12 dataset """kinships""" +37 12 model """complex""" +37 12 loss """softplus""" +37 12 regularizer """no""" +37 12 optimizer """adam""" +37 12 training_loop """owa""" +37 12 negative_sampler """basic""" +37 12 evaluator """rankbased""" +37 13 dataset """kinships""" +37 13 model """complex""" +37 13 loss """softplus""" +37 13 regularizer """no""" +37 13 optimizer """adam""" +37 13 training_loop """owa""" +37 13 negative_sampler """basic""" +37 13 evaluator """rankbased""" +37 14 dataset """kinships""" +37 14 model """complex""" +37 14 loss """softplus""" +37 14 regularizer """no""" +37 14 optimizer """adam""" +37 14 training_loop """owa""" +37 14 negative_sampler """basic""" +37 14 evaluator """rankbased""" +37 15 dataset """kinships""" +37 15 model """complex""" +37 15 loss """softplus""" +37 15 regularizer """no""" +37 15 optimizer """adam""" +37 15 training_loop """owa""" +37 15 negative_sampler """basic""" +37 15 evaluator """rankbased""" +37 16 dataset """kinships""" +37 16 model """complex""" +37 16 loss """softplus""" +37 16 regularizer """no""" +37 16 optimizer """adam""" +37 16 training_loop """owa""" +37 16 negative_sampler """basic""" +37 16 evaluator """rankbased""" +37 17 dataset """kinships""" +37 17 model """complex""" +37 17 loss """softplus""" +37 17 regularizer """no""" +37 17 optimizer """adam""" +37 17 training_loop """owa""" +37 17 negative_sampler """basic""" +37 17 evaluator """rankbased""" +37 18 dataset """kinships""" +37 18 model """complex""" +37 18 loss """softplus""" +37 18 regularizer """no""" +37 18 optimizer """adam""" +37 18 training_loop """owa""" +37 18 negative_sampler """basic""" +37 18 evaluator """rankbased""" +37 19 dataset """kinships""" +37 19 model """complex""" +37 19 loss """softplus""" +37 19 regularizer """no""" +37 19 optimizer """adam""" +37 19 training_loop """owa""" +37 19 negative_sampler """basic""" +37 19 evaluator """rankbased""" +37 20 dataset """kinships""" +37 20 model """complex""" +37 20 loss """softplus""" +37 20 regularizer """no""" +37 20 optimizer """adam""" +37 20 training_loop """owa""" +37 20 negative_sampler """basic""" +37 20 evaluator """rankbased""" +37 21 dataset """kinships""" +37 21 model """complex""" +37 21 loss """softplus""" +37 21 regularizer """no""" +37 21 optimizer """adam""" +37 21 training_loop """owa""" +37 21 negative_sampler """basic""" +37 21 evaluator """rankbased""" +37 22 dataset """kinships""" +37 22 model """complex""" +37 22 loss """softplus""" +37 22 regularizer """no""" +37 22 optimizer """adam""" +37 22 training_loop """owa""" +37 22 negative_sampler """basic""" +37 22 evaluator """rankbased""" +37 23 dataset """kinships""" +37 23 model """complex""" +37 23 loss """softplus""" +37 23 regularizer """no""" +37 23 optimizer """adam""" +37 23 training_loop """owa""" +37 23 negative_sampler """basic""" +37 23 evaluator """rankbased""" +37 24 dataset """kinships""" +37 24 model """complex""" +37 24 loss """softplus""" +37 24 regularizer """no""" +37 24 optimizer """adam""" +37 24 training_loop """owa""" +37 24 negative_sampler """basic""" +37 24 evaluator """rankbased""" +37 25 dataset """kinships""" +37 25 model """complex""" +37 25 loss """softplus""" +37 25 regularizer """no""" +37 25 optimizer """adam""" +37 25 training_loop """owa""" +37 25 negative_sampler """basic""" +37 25 evaluator """rankbased""" +37 26 dataset """kinships""" +37 26 model """complex""" +37 26 loss """softplus""" +37 26 regularizer """no""" +37 26 optimizer """adam""" +37 26 training_loop """owa""" +37 26 negative_sampler """basic""" +37 26 evaluator """rankbased""" +37 27 dataset """kinships""" +37 27 model """complex""" +37 27 loss """softplus""" +37 27 regularizer """no""" +37 27 optimizer """adam""" +37 27 training_loop """owa""" +37 27 negative_sampler """basic""" +37 27 evaluator """rankbased""" +37 28 dataset """kinships""" +37 28 model """complex""" +37 28 loss """softplus""" +37 28 regularizer """no""" +37 28 optimizer """adam""" +37 28 training_loop """owa""" +37 28 negative_sampler """basic""" +37 28 evaluator """rankbased""" +37 29 dataset """kinships""" +37 29 model """complex""" +37 29 loss """softplus""" +37 29 regularizer """no""" +37 29 optimizer """adam""" +37 29 training_loop """owa""" +37 29 negative_sampler """basic""" +37 29 evaluator """rankbased""" +37 30 dataset """kinships""" +37 30 model """complex""" +37 30 loss """softplus""" +37 30 regularizer """no""" +37 30 optimizer """adam""" +37 30 training_loop """owa""" +37 30 negative_sampler """basic""" +37 30 evaluator """rankbased""" +37 31 dataset """kinships""" +37 31 model """complex""" +37 31 loss """softplus""" +37 31 regularizer """no""" +37 31 optimizer """adam""" +37 31 training_loop """owa""" +37 31 negative_sampler """basic""" +37 31 evaluator """rankbased""" +37 32 dataset """kinships""" +37 32 model """complex""" +37 32 loss """softplus""" +37 32 regularizer """no""" +37 32 optimizer """adam""" +37 32 training_loop """owa""" +37 32 negative_sampler """basic""" +37 32 evaluator """rankbased""" +37 33 dataset """kinships""" +37 33 model """complex""" +37 33 loss """softplus""" +37 33 regularizer """no""" +37 33 optimizer """adam""" +37 33 training_loop """owa""" +37 33 negative_sampler """basic""" +37 33 evaluator """rankbased""" +37 34 dataset """kinships""" +37 34 model """complex""" +37 34 loss """softplus""" +37 34 regularizer """no""" +37 34 optimizer """adam""" +37 34 training_loop """owa""" +37 34 negative_sampler """basic""" +37 34 evaluator """rankbased""" +37 35 dataset """kinships""" +37 35 model """complex""" +37 35 loss """softplus""" +37 35 regularizer """no""" +37 35 optimizer """adam""" +37 35 training_loop """owa""" +37 35 negative_sampler """basic""" +37 35 evaluator """rankbased""" +37 36 dataset """kinships""" +37 36 model """complex""" +37 36 loss """softplus""" +37 36 regularizer """no""" +37 36 optimizer """adam""" +37 36 training_loop """owa""" +37 36 negative_sampler """basic""" +37 36 evaluator """rankbased""" +37 37 dataset """kinships""" +37 37 model """complex""" +37 37 loss """softplus""" +37 37 regularizer """no""" +37 37 optimizer """adam""" +37 37 training_loop """owa""" +37 37 negative_sampler """basic""" +37 37 evaluator """rankbased""" +37 38 dataset """kinships""" +37 38 model """complex""" +37 38 loss """softplus""" +37 38 regularizer """no""" +37 38 optimizer """adam""" +37 38 training_loop """owa""" +37 38 negative_sampler """basic""" +37 38 evaluator """rankbased""" +37 39 dataset """kinships""" +37 39 model """complex""" +37 39 loss """softplus""" +37 39 regularizer """no""" +37 39 optimizer """adam""" +37 39 training_loop """owa""" +37 39 negative_sampler """basic""" +37 39 evaluator """rankbased""" +37 40 dataset """kinships""" +37 40 model """complex""" +37 40 loss """softplus""" +37 40 regularizer """no""" +37 40 optimizer """adam""" +37 40 training_loop """owa""" +37 40 negative_sampler """basic""" +37 40 evaluator """rankbased""" +37 41 dataset """kinships""" +37 41 model """complex""" +37 41 loss """softplus""" +37 41 regularizer """no""" +37 41 optimizer """adam""" +37 41 training_loop """owa""" +37 41 negative_sampler """basic""" +37 41 evaluator """rankbased""" +37 42 dataset """kinships""" +37 42 model """complex""" +37 42 loss """softplus""" +37 42 regularizer """no""" +37 42 optimizer """adam""" +37 42 training_loop """owa""" +37 42 negative_sampler """basic""" +37 42 evaluator """rankbased""" +37 43 dataset """kinships""" +37 43 model """complex""" +37 43 loss """softplus""" +37 43 regularizer """no""" +37 43 optimizer """adam""" +37 43 training_loop """owa""" +37 43 negative_sampler """basic""" +37 43 evaluator """rankbased""" +37 44 dataset """kinships""" +37 44 model """complex""" +37 44 loss """softplus""" +37 44 regularizer """no""" +37 44 optimizer """adam""" +37 44 training_loop """owa""" +37 44 negative_sampler """basic""" +37 44 evaluator """rankbased""" +37 45 dataset """kinships""" +37 45 model """complex""" +37 45 loss """softplus""" +37 45 regularizer """no""" +37 45 optimizer """adam""" +37 45 training_loop """owa""" +37 45 negative_sampler """basic""" +37 45 evaluator """rankbased""" +37 46 dataset """kinships""" +37 46 model """complex""" +37 46 loss """softplus""" +37 46 regularizer """no""" +37 46 optimizer """adam""" +37 46 training_loop """owa""" +37 46 negative_sampler """basic""" +37 46 evaluator """rankbased""" +37 47 dataset """kinships""" +37 47 model """complex""" +37 47 loss """softplus""" +37 47 regularizer """no""" +37 47 optimizer """adam""" +37 47 training_loop """owa""" +37 47 negative_sampler """basic""" +37 47 evaluator """rankbased""" +37 48 dataset """kinships""" +37 48 model """complex""" +37 48 loss """softplus""" +37 48 regularizer """no""" +37 48 optimizer """adam""" +37 48 training_loop """owa""" +37 48 negative_sampler """basic""" +37 48 evaluator """rankbased""" +37 49 dataset """kinships""" +37 49 model """complex""" +37 49 loss """softplus""" +37 49 regularizer """no""" +37 49 optimizer """adam""" +37 49 training_loop """owa""" +37 49 negative_sampler """basic""" +37 49 evaluator """rankbased""" +37 50 dataset """kinships""" +37 50 model """complex""" +37 50 loss """softplus""" +37 50 regularizer """no""" +37 50 optimizer """adam""" +37 50 training_loop """owa""" +37 50 negative_sampler """basic""" +37 50 evaluator """rankbased""" +37 51 dataset """kinships""" +37 51 model """complex""" +37 51 loss """softplus""" +37 51 regularizer """no""" +37 51 optimizer """adam""" +37 51 training_loop """owa""" +37 51 negative_sampler """basic""" +37 51 evaluator """rankbased""" +37 52 dataset """kinships""" +37 52 model """complex""" +37 52 loss """softplus""" +37 52 regularizer """no""" +37 52 optimizer """adam""" +37 52 training_loop """owa""" +37 52 negative_sampler """basic""" +37 52 evaluator """rankbased""" +37 53 dataset """kinships""" +37 53 model """complex""" +37 53 loss """softplus""" +37 53 regularizer """no""" +37 53 optimizer """adam""" +37 53 training_loop """owa""" +37 53 negative_sampler """basic""" +37 53 evaluator """rankbased""" +37 54 dataset """kinships""" +37 54 model """complex""" +37 54 loss """softplus""" +37 54 regularizer """no""" +37 54 optimizer """adam""" +37 54 training_loop """owa""" +37 54 negative_sampler """basic""" +37 54 evaluator """rankbased""" +37 55 dataset """kinships""" +37 55 model """complex""" +37 55 loss """softplus""" +37 55 regularizer """no""" +37 55 optimizer """adam""" +37 55 training_loop """owa""" +37 55 negative_sampler """basic""" +37 55 evaluator """rankbased""" +37 56 dataset """kinships""" +37 56 model """complex""" +37 56 loss """softplus""" +37 56 regularizer """no""" +37 56 optimizer """adam""" +37 56 training_loop """owa""" +37 56 negative_sampler """basic""" +37 56 evaluator """rankbased""" +37 57 dataset """kinships""" +37 57 model """complex""" +37 57 loss """softplus""" +37 57 regularizer """no""" +37 57 optimizer """adam""" +37 57 training_loop """owa""" +37 57 negative_sampler """basic""" +37 57 evaluator """rankbased""" +37 58 dataset """kinships""" +37 58 model """complex""" +37 58 loss """softplus""" +37 58 regularizer """no""" +37 58 optimizer """adam""" +37 58 training_loop """owa""" +37 58 negative_sampler """basic""" +37 58 evaluator """rankbased""" +37 59 dataset """kinships""" +37 59 model """complex""" +37 59 loss """softplus""" +37 59 regularizer """no""" +37 59 optimizer """adam""" +37 59 training_loop """owa""" +37 59 negative_sampler """basic""" +37 59 evaluator """rankbased""" +37 60 dataset """kinships""" +37 60 model """complex""" +37 60 loss """softplus""" +37 60 regularizer """no""" +37 60 optimizer """adam""" +37 60 training_loop """owa""" +37 60 negative_sampler """basic""" +37 60 evaluator """rankbased""" +37 61 dataset """kinships""" +37 61 model """complex""" +37 61 loss """softplus""" +37 61 regularizer """no""" +37 61 optimizer """adam""" +37 61 training_loop """owa""" +37 61 negative_sampler """basic""" +37 61 evaluator """rankbased""" +37 62 dataset """kinships""" +37 62 model """complex""" +37 62 loss """softplus""" +37 62 regularizer """no""" +37 62 optimizer """adam""" +37 62 training_loop """owa""" +37 62 negative_sampler """basic""" +37 62 evaluator """rankbased""" +37 63 dataset """kinships""" +37 63 model """complex""" +37 63 loss """softplus""" +37 63 regularizer """no""" +37 63 optimizer """adam""" +37 63 training_loop """owa""" +37 63 negative_sampler """basic""" +37 63 evaluator """rankbased""" +37 64 dataset """kinships""" +37 64 model """complex""" +37 64 loss """softplus""" +37 64 regularizer """no""" +37 64 optimizer """adam""" +37 64 training_loop """owa""" +37 64 negative_sampler """basic""" +37 64 evaluator """rankbased""" +37 65 dataset """kinships""" +37 65 model """complex""" +37 65 loss """softplus""" +37 65 regularizer """no""" +37 65 optimizer """adam""" +37 65 training_loop """owa""" +37 65 negative_sampler """basic""" +37 65 evaluator """rankbased""" +37 66 dataset """kinships""" +37 66 model """complex""" +37 66 loss """softplus""" +37 66 regularizer """no""" +37 66 optimizer """adam""" +37 66 training_loop """owa""" +37 66 negative_sampler """basic""" +37 66 evaluator """rankbased""" +37 67 dataset """kinships""" +37 67 model """complex""" +37 67 loss """softplus""" +37 67 regularizer """no""" +37 67 optimizer """adam""" +37 67 training_loop """owa""" +37 67 negative_sampler """basic""" +37 67 evaluator """rankbased""" +37 68 dataset """kinships""" +37 68 model """complex""" +37 68 loss """softplus""" +37 68 regularizer """no""" +37 68 optimizer """adam""" +37 68 training_loop """owa""" +37 68 negative_sampler """basic""" +37 68 evaluator """rankbased""" +37 69 dataset """kinships""" +37 69 model """complex""" +37 69 loss """softplus""" +37 69 regularizer """no""" +37 69 optimizer """adam""" +37 69 training_loop """owa""" +37 69 negative_sampler """basic""" +37 69 evaluator """rankbased""" +37 70 dataset """kinships""" +37 70 model """complex""" +37 70 loss """softplus""" +37 70 regularizer """no""" +37 70 optimizer """adam""" +37 70 training_loop """owa""" +37 70 negative_sampler """basic""" +37 70 evaluator """rankbased""" +37 71 dataset """kinships""" +37 71 model """complex""" +37 71 loss """softplus""" +37 71 regularizer """no""" +37 71 optimizer """adam""" +37 71 training_loop """owa""" +37 71 negative_sampler """basic""" +37 71 evaluator """rankbased""" +37 72 dataset """kinships""" +37 72 model """complex""" +37 72 loss """softplus""" +37 72 regularizer """no""" +37 72 optimizer """adam""" +37 72 training_loop """owa""" +37 72 negative_sampler """basic""" +37 72 evaluator """rankbased""" +37 73 dataset """kinships""" +37 73 model """complex""" +37 73 loss """softplus""" +37 73 regularizer """no""" +37 73 optimizer """adam""" +37 73 training_loop """owa""" +37 73 negative_sampler """basic""" +37 73 evaluator """rankbased""" +37 74 dataset """kinships""" +37 74 model """complex""" +37 74 loss """softplus""" +37 74 regularizer """no""" +37 74 optimizer """adam""" +37 74 training_loop """owa""" +37 74 negative_sampler """basic""" +37 74 evaluator """rankbased""" +37 75 dataset """kinships""" +37 75 model """complex""" +37 75 loss """softplus""" +37 75 regularizer """no""" +37 75 optimizer """adam""" +37 75 training_loop """owa""" +37 75 negative_sampler """basic""" +37 75 evaluator """rankbased""" +37 76 dataset """kinships""" +37 76 model """complex""" +37 76 loss """softplus""" +37 76 regularizer """no""" +37 76 optimizer """adam""" +37 76 training_loop """owa""" +37 76 negative_sampler """basic""" +37 76 evaluator """rankbased""" +37 77 dataset """kinships""" +37 77 model """complex""" +37 77 loss """softplus""" +37 77 regularizer """no""" +37 77 optimizer """adam""" +37 77 training_loop """owa""" +37 77 negative_sampler """basic""" +37 77 evaluator """rankbased""" +37 78 dataset """kinships""" +37 78 model """complex""" +37 78 loss """softplus""" +37 78 regularizer """no""" +37 78 optimizer """adam""" +37 78 training_loop """owa""" +37 78 negative_sampler """basic""" +37 78 evaluator """rankbased""" +37 79 dataset """kinships""" +37 79 model """complex""" +37 79 loss """softplus""" +37 79 regularizer """no""" +37 79 optimizer """adam""" +37 79 training_loop """owa""" +37 79 negative_sampler """basic""" +37 79 evaluator """rankbased""" +37 80 dataset """kinships""" +37 80 model """complex""" +37 80 loss """softplus""" +37 80 regularizer """no""" +37 80 optimizer """adam""" +37 80 training_loop """owa""" +37 80 negative_sampler """basic""" +37 80 evaluator """rankbased""" +37 81 dataset """kinships""" +37 81 model """complex""" +37 81 loss """softplus""" +37 81 regularizer """no""" +37 81 optimizer """adam""" +37 81 training_loop """owa""" +37 81 negative_sampler """basic""" +37 81 evaluator """rankbased""" +37 82 dataset """kinships""" +37 82 model """complex""" +37 82 loss """softplus""" +37 82 regularizer """no""" +37 82 optimizer """adam""" +37 82 training_loop """owa""" +37 82 negative_sampler """basic""" +37 82 evaluator """rankbased""" +37 83 dataset """kinships""" +37 83 model """complex""" +37 83 loss """softplus""" +37 83 regularizer """no""" +37 83 optimizer """adam""" +37 83 training_loop """owa""" +37 83 negative_sampler """basic""" +37 83 evaluator """rankbased""" +37 84 dataset """kinships""" +37 84 model """complex""" +37 84 loss """softplus""" +37 84 regularizer """no""" +37 84 optimizer """adam""" +37 84 training_loop """owa""" +37 84 negative_sampler """basic""" +37 84 evaluator """rankbased""" +37 85 dataset """kinships""" +37 85 model """complex""" +37 85 loss """softplus""" +37 85 regularizer """no""" +37 85 optimizer """adam""" +37 85 training_loop """owa""" +37 85 negative_sampler """basic""" +37 85 evaluator """rankbased""" +37 86 dataset """kinships""" +37 86 model """complex""" +37 86 loss """softplus""" +37 86 regularizer """no""" +37 86 optimizer """adam""" +37 86 training_loop """owa""" +37 86 negative_sampler """basic""" +37 86 evaluator """rankbased""" +37 87 dataset """kinships""" +37 87 model """complex""" +37 87 loss """softplus""" +37 87 regularizer """no""" +37 87 optimizer """adam""" +37 87 training_loop """owa""" +37 87 negative_sampler """basic""" +37 87 evaluator """rankbased""" +37 88 dataset """kinships""" +37 88 model """complex""" +37 88 loss """softplus""" +37 88 regularizer """no""" +37 88 optimizer """adam""" +37 88 training_loop """owa""" +37 88 negative_sampler """basic""" +37 88 evaluator """rankbased""" +37 89 dataset """kinships""" +37 89 model """complex""" +37 89 loss """softplus""" +37 89 regularizer """no""" +37 89 optimizer """adam""" +37 89 training_loop """owa""" +37 89 negative_sampler """basic""" +37 89 evaluator """rankbased""" +37 90 dataset """kinships""" +37 90 model """complex""" +37 90 loss """softplus""" +37 90 regularizer """no""" +37 90 optimizer """adam""" +37 90 training_loop """owa""" +37 90 negative_sampler """basic""" +37 90 evaluator """rankbased""" +37 91 dataset """kinships""" +37 91 model """complex""" +37 91 loss """softplus""" +37 91 regularizer """no""" +37 91 optimizer """adam""" +37 91 training_loop """owa""" +37 91 negative_sampler """basic""" +37 91 evaluator """rankbased""" +37 92 dataset """kinships""" +37 92 model """complex""" +37 92 loss """softplus""" +37 92 regularizer """no""" +37 92 optimizer """adam""" +37 92 training_loop """owa""" +37 92 negative_sampler """basic""" +37 92 evaluator """rankbased""" +37 93 dataset """kinships""" +37 93 model """complex""" +37 93 loss """softplus""" +37 93 regularizer """no""" +37 93 optimizer """adam""" +37 93 training_loop """owa""" +37 93 negative_sampler """basic""" +37 93 evaluator """rankbased""" +37 94 dataset """kinships""" +37 94 model """complex""" +37 94 loss """softplus""" +37 94 regularizer """no""" +37 94 optimizer """adam""" +37 94 training_loop """owa""" +37 94 negative_sampler """basic""" +37 94 evaluator """rankbased""" +37 95 dataset """kinships""" +37 95 model """complex""" +37 95 loss """softplus""" +37 95 regularizer """no""" +37 95 optimizer """adam""" +37 95 training_loop """owa""" +37 95 negative_sampler """basic""" +37 95 evaluator """rankbased""" +37 96 dataset """kinships""" +37 96 model """complex""" +37 96 loss """softplus""" +37 96 regularizer """no""" +37 96 optimizer """adam""" +37 96 training_loop """owa""" +37 96 negative_sampler """basic""" +37 96 evaluator """rankbased""" +37 97 dataset """kinships""" +37 97 model """complex""" +37 97 loss """softplus""" +37 97 regularizer """no""" +37 97 optimizer """adam""" +37 97 training_loop """owa""" +37 97 negative_sampler """basic""" +37 97 evaluator """rankbased""" +37 98 dataset """kinships""" +37 98 model """complex""" +37 98 loss """softplus""" +37 98 regularizer """no""" +37 98 optimizer """adam""" +37 98 training_loop """owa""" +37 98 negative_sampler """basic""" +37 98 evaluator """rankbased""" +37 99 dataset """kinships""" +37 99 model """complex""" +37 99 loss """softplus""" +37 99 regularizer """no""" +37 99 optimizer """adam""" +37 99 training_loop """owa""" +37 99 negative_sampler """basic""" +37 99 evaluator """rankbased""" +37 100 dataset """kinships""" +37 100 model """complex""" +37 100 loss """softplus""" +37 100 regularizer """no""" +37 100 optimizer """adam""" +37 100 training_loop """owa""" +37 100 negative_sampler """basic""" +37 100 evaluator """rankbased""" +38 1 model.embedding_dim 2.0 +38 1 loss.margin 17.59354750891791 +38 1 loss.adversarial_temperature 0.6365326748675281 +38 1 optimizer.lr 0.0011611432758662301 +38 1 negative_sampler.num_negs_per_pos 76.0 +38 1 training.batch_size 1.0 +38 2 model.embedding_dim 2.0 +38 2 loss.margin 15.039930873467224 +38 2 loss.adversarial_temperature 0.10571049015389801 +38 2 optimizer.lr 0.0010340528066759291 +38 2 negative_sampler.num_negs_per_pos 7.0 +38 2 training.batch_size 1.0 +38 3 model.embedding_dim 0.0 +38 3 loss.margin 8.608998053519999 +38 3 loss.adversarial_temperature 0.338050484076955 +38 3 optimizer.lr 0.010948824751059861 +38 3 negative_sampler.num_negs_per_pos 96.0 +38 3 training.batch_size 1.0 +38 4 model.embedding_dim 2.0 +38 4 loss.margin 28.716066464453768 +38 4 loss.adversarial_temperature 0.5420082171766095 +38 4 optimizer.lr 0.014282452353210672 +38 4 negative_sampler.num_negs_per_pos 14.0 +38 4 training.batch_size 1.0 +38 5 model.embedding_dim 1.0 +38 5 loss.margin 22.511761212610462 +38 5 loss.adversarial_temperature 0.6428448047652469 +38 5 optimizer.lr 0.010247721076556214 +38 5 negative_sampler.num_negs_per_pos 34.0 +38 5 training.batch_size 2.0 +38 6 model.embedding_dim 1.0 +38 6 loss.margin 2.049117653746225 +38 6 loss.adversarial_temperature 0.2677407400171873 +38 6 optimizer.lr 0.0034395840020300613 +38 6 negative_sampler.num_negs_per_pos 27.0 +38 6 training.batch_size 1.0 +38 7 model.embedding_dim 2.0 +38 7 loss.margin 25.31842807038665 +38 7 loss.adversarial_temperature 0.55472274639958 +38 7 optimizer.lr 0.0011300112090643708 +38 7 negative_sampler.num_negs_per_pos 87.0 +38 7 training.batch_size 1.0 +38 8 model.embedding_dim 1.0 +38 8 loss.margin 19.793029520890034 +38 8 loss.adversarial_temperature 0.8461653724983599 +38 8 optimizer.lr 0.06377499866312225 +38 8 negative_sampler.num_negs_per_pos 73.0 +38 8 training.batch_size 2.0 +38 9 model.embedding_dim 1.0 +38 9 loss.margin 26.74268187398656 +38 9 loss.adversarial_temperature 0.6504042560914314 +38 9 optimizer.lr 0.005191089730831145 +38 9 negative_sampler.num_negs_per_pos 23.0 +38 9 training.batch_size 1.0 +38 10 model.embedding_dim 2.0 +38 10 loss.margin 8.641081372361128 +38 10 loss.adversarial_temperature 0.7021696768070743 +38 10 optimizer.lr 0.030104447855604746 +38 10 negative_sampler.num_negs_per_pos 80.0 +38 10 training.batch_size 2.0 +38 11 model.embedding_dim 2.0 +38 11 loss.margin 23.976633636696242 +38 11 loss.adversarial_temperature 0.6170136026032861 +38 11 optimizer.lr 0.0038637526765545453 +38 11 negative_sampler.num_negs_per_pos 53.0 +38 11 training.batch_size 0.0 +38 12 model.embedding_dim 1.0 +38 12 loss.margin 10.058515264061727 +38 12 loss.adversarial_temperature 0.34481557600894347 +38 12 optimizer.lr 0.014069508380388193 +38 12 negative_sampler.num_negs_per_pos 36.0 +38 12 training.batch_size 2.0 +38 13 model.embedding_dim 2.0 +38 13 loss.margin 20.151657696831013 +38 13 loss.adversarial_temperature 0.7419428836150145 +38 13 optimizer.lr 0.003575705001490845 +38 13 negative_sampler.num_negs_per_pos 16.0 +38 13 training.batch_size 0.0 +38 14 model.embedding_dim 2.0 +38 14 loss.margin 14.792281645236658 +38 14 loss.adversarial_temperature 0.16213392825199546 +38 14 optimizer.lr 0.0014843114198419471 +38 14 negative_sampler.num_negs_per_pos 98.0 +38 14 training.batch_size 0.0 +38 15 model.embedding_dim 2.0 +38 15 loss.margin 8.481446933660994 +38 15 loss.adversarial_temperature 0.4548453893342612 +38 15 optimizer.lr 0.07894695409599371 +38 15 negative_sampler.num_negs_per_pos 94.0 +38 15 training.batch_size 1.0 +38 16 model.embedding_dim 0.0 +38 16 loss.margin 26.429294587977672 +38 16 loss.adversarial_temperature 0.5504542331487702 +38 16 optimizer.lr 0.03223429227941722 +38 16 negative_sampler.num_negs_per_pos 7.0 +38 16 training.batch_size 2.0 +38 17 model.embedding_dim 2.0 +38 17 loss.margin 8.29752193199667 +38 17 loss.adversarial_temperature 0.664419441318341 +38 17 optimizer.lr 0.005196301936396317 +38 17 negative_sampler.num_negs_per_pos 21.0 +38 17 training.batch_size 2.0 +38 18 model.embedding_dim 0.0 +38 18 loss.margin 29.70829428231171 +38 18 loss.adversarial_temperature 0.8588531111581019 +38 18 optimizer.lr 0.001104405404093187 +38 18 negative_sampler.num_negs_per_pos 13.0 +38 18 training.batch_size 2.0 +38 19 model.embedding_dim 2.0 +38 19 loss.margin 5.341431167544636 +38 19 loss.adversarial_temperature 0.8102831386153012 +38 19 optimizer.lr 0.052541292906183514 +38 19 negative_sampler.num_negs_per_pos 65.0 +38 19 training.batch_size 2.0 +38 20 model.embedding_dim 0.0 +38 20 loss.margin 23.63411216625977 +38 20 loss.adversarial_temperature 0.14973939693641786 +38 20 optimizer.lr 0.005920479460594788 +38 20 negative_sampler.num_negs_per_pos 40.0 +38 20 training.batch_size 2.0 +38 21 model.embedding_dim 2.0 +38 21 loss.margin 1.0884061895029613 +38 21 loss.adversarial_temperature 0.510658844233889 +38 21 optimizer.lr 0.028526401287590052 +38 21 negative_sampler.num_negs_per_pos 63.0 +38 21 training.batch_size 2.0 +38 22 model.embedding_dim 1.0 +38 22 loss.margin 22.628510472326145 +38 22 loss.adversarial_temperature 0.5930087522670622 +38 22 optimizer.lr 0.007681298114344837 +38 22 negative_sampler.num_negs_per_pos 63.0 +38 22 training.batch_size 0.0 +38 23 model.embedding_dim 2.0 +38 23 loss.margin 5.746616541271704 +38 23 loss.adversarial_temperature 0.5115401879245896 +38 23 optimizer.lr 0.008610555006106385 +38 23 negative_sampler.num_negs_per_pos 74.0 +38 23 training.batch_size 1.0 +38 24 model.embedding_dim 1.0 +38 24 loss.margin 13.784689322003352 +38 24 loss.adversarial_temperature 0.44670047409916047 +38 24 optimizer.lr 0.0365836456641193 +38 24 negative_sampler.num_negs_per_pos 9.0 +38 24 training.batch_size 0.0 +38 25 model.embedding_dim 0.0 +38 25 loss.margin 19.11350644156137 +38 25 loss.adversarial_temperature 0.681514509069873 +38 25 optimizer.lr 0.037997914111442516 +38 25 negative_sampler.num_negs_per_pos 91.0 +38 25 training.batch_size 1.0 +38 26 model.embedding_dim 2.0 +38 26 loss.margin 14.32824573992334 +38 26 loss.adversarial_temperature 0.556317240367689 +38 26 optimizer.lr 0.005340127366479814 +38 26 negative_sampler.num_negs_per_pos 33.0 +38 26 training.batch_size 0.0 +38 27 model.embedding_dim 2.0 +38 27 loss.margin 13.382917244908635 +38 27 loss.adversarial_temperature 0.5467117188198406 +38 27 optimizer.lr 0.0549678365176001 +38 27 negative_sampler.num_negs_per_pos 6.0 +38 27 training.batch_size 0.0 +38 28 model.embedding_dim 1.0 +38 28 loss.margin 16.259913636385257 +38 28 loss.adversarial_temperature 0.6236163355820858 +38 28 optimizer.lr 0.0020662573091302138 +38 28 negative_sampler.num_negs_per_pos 63.0 +38 28 training.batch_size 2.0 +38 29 model.embedding_dim 2.0 +38 29 loss.margin 13.060845343777782 +38 29 loss.adversarial_temperature 0.12302877130711487 +38 29 optimizer.lr 0.07581110763418984 +38 29 negative_sampler.num_negs_per_pos 0.0 +38 29 training.batch_size 2.0 +38 30 model.embedding_dim 2.0 +38 30 loss.margin 2.3103222223900994 +38 30 loss.adversarial_temperature 0.9389781035314282 +38 30 optimizer.lr 0.03892897507146392 +38 30 negative_sampler.num_negs_per_pos 58.0 +38 30 training.batch_size 1.0 +38 31 model.embedding_dim 1.0 +38 31 loss.margin 7.396840592616636 +38 31 loss.adversarial_temperature 0.5022622881641456 +38 31 optimizer.lr 0.039136552262097404 +38 31 negative_sampler.num_negs_per_pos 53.0 +38 31 training.batch_size 1.0 +38 32 model.embedding_dim 0.0 +38 32 loss.margin 18.964664945337187 +38 32 loss.adversarial_temperature 0.5745885619182881 +38 32 optimizer.lr 0.0028898066083228244 +38 32 negative_sampler.num_negs_per_pos 38.0 +38 32 training.batch_size 0.0 +38 33 model.embedding_dim 2.0 +38 33 loss.margin 8.086609143585623 +38 33 loss.adversarial_temperature 0.4609073168430352 +38 33 optimizer.lr 0.08377846380832131 +38 33 negative_sampler.num_negs_per_pos 89.0 +38 33 training.batch_size 0.0 +38 34 model.embedding_dim 2.0 +38 34 loss.margin 2.7856162182788395 +38 34 loss.adversarial_temperature 0.2791041201772915 +38 34 optimizer.lr 0.005773277775992078 +38 34 negative_sampler.num_negs_per_pos 68.0 +38 34 training.batch_size 1.0 +38 35 model.embedding_dim 2.0 +38 35 loss.margin 6.261330786008319 +38 35 loss.adversarial_temperature 0.975549455556289 +38 35 optimizer.lr 0.09313994152157372 +38 35 negative_sampler.num_negs_per_pos 16.0 +38 35 training.batch_size 0.0 +38 36 model.embedding_dim 0.0 +38 36 loss.margin 18.89350156689501 +38 36 loss.adversarial_temperature 0.20639773254310495 +38 36 optimizer.lr 0.00986861222006272 +38 36 negative_sampler.num_negs_per_pos 65.0 +38 36 training.batch_size 0.0 +38 37 model.embedding_dim 2.0 +38 37 loss.margin 25.15167668411318 +38 37 loss.adversarial_temperature 0.7169785012429756 +38 37 optimizer.lr 0.00885627767380885 +38 37 negative_sampler.num_negs_per_pos 15.0 +38 37 training.batch_size 0.0 +38 38 model.embedding_dim 1.0 +38 38 loss.margin 6.905473942503219 +38 38 loss.adversarial_temperature 0.5140299948268949 +38 38 optimizer.lr 0.009396410151312191 +38 38 negative_sampler.num_negs_per_pos 88.0 +38 38 training.batch_size 0.0 +38 39 model.embedding_dim 2.0 +38 39 loss.margin 15.489585019857893 +38 39 loss.adversarial_temperature 0.1776885816275195 +38 39 optimizer.lr 0.03451041442832105 +38 39 negative_sampler.num_negs_per_pos 55.0 +38 39 training.batch_size 0.0 +38 40 model.embedding_dim 1.0 +38 40 loss.margin 2.7377505769642343 +38 40 loss.adversarial_temperature 0.8311351083057742 +38 40 optimizer.lr 0.06284003199308352 +38 40 negative_sampler.num_negs_per_pos 56.0 +38 40 training.batch_size 1.0 +38 41 model.embedding_dim 1.0 +38 41 loss.margin 20.60854163827207 +38 41 loss.adversarial_temperature 0.6192185152573444 +38 41 optimizer.lr 0.005101957960331128 +38 41 negative_sampler.num_negs_per_pos 9.0 +38 41 training.batch_size 2.0 +38 42 model.embedding_dim 2.0 +38 42 loss.margin 24.187701867809754 +38 42 loss.adversarial_temperature 0.2620709476167136 +38 42 optimizer.lr 0.012338496017124983 +38 42 negative_sampler.num_negs_per_pos 87.0 +38 42 training.batch_size 1.0 +38 43 model.embedding_dim 2.0 +38 43 loss.margin 29.303214829130386 +38 43 loss.adversarial_temperature 0.7368996444232154 +38 43 optimizer.lr 0.0049307959347454945 +38 43 negative_sampler.num_negs_per_pos 84.0 +38 43 training.batch_size 2.0 +38 44 model.embedding_dim 1.0 +38 44 loss.margin 7.569724920136304 +38 44 loss.adversarial_temperature 0.6906406674529166 +38 44 optimizer.lr 0.0022560530455369775 +38 44 negative_sampler.num_negs_per_pos 29.0 +38 44 training.batch_size 0.0 +38 45 model.embedding_dim 1.0 +38 45 loss.margin 14.754493806634477 +38 45 loss.adversarial_temperature 0.8518812595511157 +38 45 optimizer.lr 0.02109895153603609 +38 45 negative_sampler.num_negs_per_pos 38.0 +38 45 training.batch_size 1.0 +38 46 model.embedding_dim 2.0 +38 46 loss.margin 23.052306435668093 +38 46 loss.adversarial_temperature 0.3050216362548826 +38 46 optimizer.lr 0.0017261489272460862 +38 46 negative_sampler.num_negs_per_pos 87.0 +38 46 training.batch_size 2.0 +38 47 model.embedding_dim 0.0 +38 47 loss.margin 2.64258520886324 +38 47 loss.adversarial_temperature 0.890684873355716 +38 47 optimizer.lr 0.012403514566182203 +38 47 negative_sampler.num_negs_per_pos 25.0 +38 47 training.batch_size 0.0 +38 48 model.embedding_dim 2.0 +38 48 loss.margin 9.06538889803325 +38 48 loss.adversarial_temperature 0.9132705329072514 +38 48 optimizer.lr 0.05698661634133686 +38 48 negative_sampler.num_negs_per_pos 47.0 +38 48 training.batch_size 2.0 +38 49 model.embedding_dim 2.0 +38 49 loss.margin 24.44566939919262 +38 49 loss.adversarial_temperature 0.8915733831175732 +38 49 optimizer.lr 0.0018800334831742657 +38 49 negative_sampler.num_negs_per_pos 95.0 +38 49 training.batch_size 0.0 +38 50 model.embedding_dim 0.0 +38 50 loss.margin 20.367777076830293 +38 50 loss.adversarial_temperature 0.9519337135117044 +38 50 optimizer.lr 0.007713483490296537 +38 50 negative_sampler.num_negs_per_pos 85.0 +38 50 training.batch_size 0.0 +38 51 model.embedding_dim 2.0 +38 51 loss.margin 25.648342846335108 +38 51 loss.adversarial_temperature 0.6248178828458679 +38 51 optimizer.lr 0.001212682505413343 +38 51 negative_sampler.num_negs_per_pos 77.0 +38 51 training.batch_size 0.0 +38 52 model.embedding_dim 2.0 +38 52 loss.margin 20.26325937270069 +38 52 loss.adversarial_temperature 0.46950472465857906 +38 52 optimizer.lr 0.0647152879738273 +38 52 negative_sampler.num_negs_per_pos 98.0 +38 52 training.batch_size 0.0 +38 53 model.embedding_dim 0.0 +38 53 loss.margin 21.11761797604677 +38 53 loss.adversarial_temperature 0.9536282619475853 +38 53 optimizer.lr 0.009675531488276917 +38 53 negative_sampler.num_negs_per_pos 15.0 +38 53 training.batch_size 1.0 +38 54 model.embedding_dim 1.0 +38 54 loss.margin 19.802668746449996 +38 54 loss.adversarial_temperature 0.36033605520706435 +38 54 optimizer.lr 0.013767478568495434 +38 54 negative_sampler.num_negs_per_pos 43.0 +38 54 training.batch_size 1.0 +38 55 model.embedding_dim 2.0 +38 55 loss.margin 23.896004906843753 +38 55 loss.adversarial_temperature 0.6050233630563594 +38 55 optimizer.lr 0.002698337147745168 +38 55 negative_sampler.num_negs_per_pos 88.0 +38 55 training.batch_size 1.0 +38 56 model.embedding_dim 2.0 +38 56 loss.margin 6.123535263535504 +38 56 loss.adversarial_temperature 0.8798377008636554 +38 56 optimizer.lr 0.05496235775187415 +38 56 negative_sampler.num_negs_per_pos 74.0 +38 56 training.batch_size 1.0 +38 57 model.embedding_dim 0.0 +38 57 loss.margin 5.80412319759603 +38 57 loss.adversarial_temperature 0.2246012161635607 +38 57 optimizer.lr 0.009183351049574644 +38 57 negative_sampler.num_negs_per_pos 90.0 +38 57 training.batch_size 2.0 +38 58 model.embedding_dim 2.0 +38 58 loss.margin 9.508318164673629 +38 58 loss.adversarial_temperature 0.8064596599034477 +38 58 optimizer.lr 0.002943660228824061 +38 58 negative_sampler.num_negs_per_pos 26.0 +38 58 training.batch_size 0.0 +38 59 model.embedding_dim 0.0 +38 59 loss.margin 13.46875943187454 +38 59 loss.adversarial_temperature 0.5596197428652319 +38 59 optimizer.lr 0.00756435837102422 +38 59 negative_sampler.num_negs_per_pos 78.0 +38 59 training.batch_size 2.0 +38 60 model.embedding_dim 0.0 +38 60 loss.margin 29.790516853350617 +38 60 loss.adversarial_temperature 0.9351839872975141 +38 60 optimizer.lr 0.011237455122075 +38 60 negative_sampler.num_negs_per_pos 14.0 +38 60 training.batch_size 0.0 +38 61 model.embedding_dim 1.0 +38 61 loss.margin 27.133338942411683 +38 61 loss.adversarial_temperature 0.3395211191020747 +38 61 optimizer.lr 0.09912395328023381 +38 61 negative_sampler.num_negs_per_pos 20.0 +38 61 training.batch_size 2.0 +38 62 model.embedding_dim 2.0 +38 62 loss.margin 13.129143011249704 +38 62 loss.adversarial_temperature 0.5578211563058747 +38 62 optimizer.lr 0.002805140907912464 +38 62 negative_sampler.num_negs_per_pos 89.0 +38 62 training.batch_size 2.0 +38 63 model.embedding_dim 2.0 +38 63 loss.margin 23.800977243502736 +38 63 loss.adversarial_temperature 0.7782878045970473 +38 63 optimizer.lr 0.06784820370626525 +38 63 negative_sampler.num_negs_per_pos 9.0 +38 63 training.batch_size 0.0 +38 64 model.embedding_dim 0.0 +38 64 loss.margin 11.937138214470552 +38 64 loss.adversarial_temperature 0.8260512430980553 +38 64 optimizer.lr 0.002349346740300043 +38 64 negative_sampler.num_negs_per_pos 18.0 +38 64 training.batch_size 1.0 +38 65 model.embedding_dim 1.0 +38 65 loss.margin 19.89086324149801 +38 65 loss.adversarial_temperature 0.49950312968818056 +38 65 optimizer.lr 0.06446736510044199 +38 65 negative_sampler.num_negs_per_pos 64.0 +38 65 training.batch_size 1.0 +38 66 model.embedding_dim 0.0 +38 66 loss.margin 18.124898042140618 +38 66 loss.adversarial_temperature 0.591297539567854 +38 66 optimizer.lr 0.004507098006798777 +38 66 negative_sampler.num_negs_per_pos 20.0 +38 66 training.batch_size 2.0 +38 67 model.embedding_dim 1.0 +38 67 loss.margin 23.255574398139892 +38 67 loss.adversarial_temperature 0.508299725596387 +38 67 optimizer.lr 0.0011778679194061126 +38 67 negative_sampler.num_negs_per_pos 24.0 +38 67 training.batch_size 1.0 +38 68 model.embedding_dim 0.0 +38 68 loss.margin 10.35533081073477 +38 68 loss.adversarial_temperature 0.5859597840257159 +38 68 optimizer.lr 0.0015850815563381393 +38 68 negative_sampler.num_negs_per_pos 15.0 +38 68 training.batch_size 0.0 +38 69 model.embedding_dim 1.0 +38 69 loss.margin 20.951860879643064 +38 69 loss.adversarial_temperature 0.6393398527957214 +38 69 optimizer.lr 0.0027145193182396546 +38 69 negative_sampler.num_negs_per_pos 99.0 +38 69 training.batch_size 1.0 +38 70 model.embedding_dim 1.0 +38 70 loss.margin 4.142240262108034 +38 70 loss.adversarial_temperature 0.8830322091909405 +38 70 optimizer.lr 0.002775807084981391 +38 70 negative_sampler.num_negs_per_pos 11.0 +38 70 training.batch_size 0.0 +38 71 model.embedding_dim 2.0 +38 71 loss.margin 6.574856507571568 +38 71 loss.adversarial_temperature 0.12912264493338704 +38 71 optimizer.lr 0.012730485881665267 +38 71 negative_sampler.num_negs_per_pos 23.0 +38 71 training.batch_size 0.0 +38 72 model.embedding_dim 1.0 +38 72 loss.margin 2.23445670156055 +38 72 loss.adversarial_temperature 0.2689291025789431 +38 72 optimizer.lr 0.01415408785307367 +38 72 negative_sampler.num_negs_per_pos 62.0 +38 72 training.batch_size 0.0 +38 73 model.embedding_dim 2.0 +38 73 loss.margin 15.003124587197423 +38 73 loss.adversarial_temperature 0.18314397961875456 +38 73 optimizer.lr 0.0024426286650934587 +38 73 negative_sampler.num_negs_per_pos 28.0 +38 73 training.batch_size 0.0 +38 74 model.embedding_dim 1.0 +38 74 loss.margin 20.400165343445604 +38 74 loss.adversarial_temperature 0.7229122941027285 +38 74 optimizer.lr 0.008931641370475035 +38 74 negative_sampler.num_negs_per_pos 67.0 +38 74 training.batch_size 2.0 +38 75 model.embedding_dim 1.0 +38 75 loss.margin 17.100831499619886 +38 75 loss.adversarial_temperature 0.8068918385284446 +38 75 optimizer.lr 0.00962983374904934 +38 75 negative_sampler.num_negs_per_pos 43.0 +38 75 training.batch_size 1.0 +38 76 model.embedding_dim 2.0 +38 76 loss.margin 26.499971947019713 +38 76 loss.adversarial_temperature 0.8527443059594526 +38 76 optimizer.lr 0.003409879482416871 +38 76 negative_sampler.num_negs_per_pos 51.0 +38 76 training.batch_size 2.0 +38 77 model.embedding_dim 0.0 +38 77 loss.margin 10.464355052151582 +38 77 loss.adversarial_temperature 0.9264885322542665 +38 77 optimizer.lr 0.008167512987753435 +38 77 negative_sampler.num_negs_per_pos 51.0 +38 77 training.batch_size 0.0 +38 78 model.embedding_dim 2.0 +38 78 loss.margin 10.556594561286115 +38 78 loss.adversarial_temperature 0.9809865467067357 +38 78 optimizer.lr 0.0081512157152062 +38 78 negative_sampler.num_negs_per_pos 51.0 +38 78 training.batch_size 1.0 +38 79 model.embedding_dim 0.0 +38 79 loss.margin 13.231231896395471 +38 79 loss.adversarial_temperature 0.7613582281816038 +38 79 optimizer.lr 0.06554974959169049 +38 79 negative_sampler.num_negs_per_pos 90.0 +38 79 training.batch_size 2.0 +38 80 model.embedding_dim 0.0 +38 80 loss.margin 15.475181954507308 +38 80 loss.adversarial_temperature 0.22078158198367445 +38 80 optimizer.lr 0.007901324344089366 +38 80 negative_sampler.num_negs_per_pos 39.0 +38 80 training.batch_size 0.0 +38 81 model.embedding_dim 1.0 +38 81 loss.margin 11.497529056062724 +38 81 loss.adversarial_temperature 0.40329937970904983 +38 81 optimizer.lr 0.028787165017439532 +38 81 negative_sampler.num_negs_per_pos 5.0 +38 81 training.batch_size 1.0 +38 82 model.embedding_dim 0.0 +38 82 loss.margin 1.4365094844464368 +38 82 loss.adversarial_temperature 0.10366532723153471 +38 82 optimizer.lr 0.019821380408983097 +38 82 negative_sampler.num_negs_per_pos 42.0 +38 82 training.batch_size 2.0 +38 83 model.embedding_dim 0.0 +38 83 loss.margin 9.224091712888622 +38 83 loss.adversarial_temperature 0.8217147942323947 +38 83 optimizer.lr 0.02358789938831531 +38 83 negative_sampler.num_negs_per_pos 49.0 +38 83 training.batch_size 2.0 +38 84 model.embedding_dim 0.0 +38 84 loss.margin 9.266938035453196 +38 84 loss.adversarial_temperature 0.5326292662655252 +38 84 optimizer.lr 0.0024707339860285146 +38 84 negative_sampler.num_negs_per_pos 55.0 +38 84 training.batch_size 1.0 +38 85 model.embedding_dim 2.0 +38 85 loss.margin 28.24055193929215 +38 85 loss.adversarial_temperature 0.6961550600010474 +38 85 optimizer.lr 0.003278917046916364 +38 85 negative_sampler.num_negs_per_pos 51.0 +38 85 training.batch_size 2.0 +38 86 model.embedding_dim 2.0 +38 86 loss.margin 24.427653304002355 +38 86 loss.adversarial_temperature 0.4122483739055377 +38 86 optimizer.lr 0.029127291392755886 +38 86 negative_sampler.num_negs_per_pos 45.0 +38 86 training.batch_size 0.0 +38 87 model.embedding_dim 1.0 +38 87 loss.margin 1.5147831403186283 +38 87 loss.adversarial_temperature 0.8738672325515708 +38 87 optimizer.lr 0.0030712775842452176 +38 87 negative_sampler.num_negs_per_pos 59.0 +38 87 training.batch_size 0.0 +38 88 model.embedding_dim 0.0 +38 88 loss.margin 20.599609414002543 +38 88 loss.adversarial_temperature 0.44702828803316785 +38 88 optimizer.lr 0.07072035135377967 +38 88 negative_sampler.num_negs_per_pos 16.0 +38 88 training.batch_size 0.0 +38 89 model.embedding_dim 2.0 +38 89 loss.margin 9.671224650512226 +38 89 loss.adversarial_temperature 0.30659703918877534 +38 89 optimizer.lr 0.005284704464069252 +38 89 negative_sampler.num_negs_per_pos 33.0 +38 89 training.batch_size 1.0 +38 90 model.embedding_dim 1.0 +38 90 loss.margin 5.918378072063636 +38 90 loss.adversarial_temperature 0.8666774755095985 +38 90 optimizer.lr 0.0496845087934591 +38 90 negative_sampler.num_negs_per_pos 40.0 +38 90 training.batch_size 2.0 +38 91 model.embedding_dim 0.0 +38 91 loss.margin 12.512285760778225 +38 91 loss.adversarial_temperature 0.44363704906467083 +38 91 optimizer.lr 0.03436971692835057 +38 91 negative_sampler.num_negs_per_pos 88.0 +38 91 training.batch_size 2.0 +38 92 model.embedding_dim 0.0 +38 92 loss.margin 17.724194829756744 +38 92 loss.adversarial_temperature 0.8862257949553071 +38 92 optimizer.lr 0.020475615876970516 +38 92 negative_sampler.num_negs_per_pos 10.0 +38 92 training.batch_size 1.0 +38 93 model.embedding_dim 2.0 +38 93 loss.margin 2.1426111743543452 +38 93 loss.adversarial_temperature 0.48697325266493274 +38 93 optimizer.lr 0.002198100700998463 +38 93 negative_sampler.num_negs_per_pos 17.0 +38 93 training.batch_size 2.0 +38 94 model.embedding_dim 2.0 +38 94 loss.margin 24.77921579576254 +38 94 loss.adversarial_temperature 0.5567531298856229 +38 94 optimizer.lr 0.06145784721362372 +38 94 negative_sampler.num_negs_per_pos 17.0 +38 94 training.batch_size 1.0 +38 95 model.embedding_dim 1.0 +38 95 loss.margin 28.498957337701075 +38 95 loss.adversarial_temperature 0.35004436991164856 +38 95 optimizer.lr 0.08000439564266656 +38 95 negative_sampler.num_negs_per_pos 50.0 +38 95 training.batch_size 0.0 +38 96 model.embedding_dim 2.0 +38 96 loss.margin 16.50420719382425 +38 96 loss.adversarial_temperature 0.2946465945258886 +38 96 optimizer.lr 0.056974326009009264 +38 96 negative_sampler.num_negs_per_pos 50.0 +38 96 training.batch_size 2.0 +38 97 model.embedding_dim 0.0 +38 97 loss.margin 3.7837258978857577 +38 97 loss.adversarial_temperature 0.7174953293392651 +38 97 optimizer.lr 0.003046903371053653 +38 97 negative_sampler.num_negs_per_pos 52.0 +38 97 training.batch_size 1.0 +38 98 model.embedding_dim 2.0 +38 98 loss.margin 23.619785280019276 +38 98 loss.adversarial_temperature 0.5004211839524347 +38 98 optimizer.lr 0.0034607171951626914 +38 98 negative_sampler.num_negs_per_pos 48.0 +38 98 training.batch_size 1.0 +38 99 model.embedding_dim 0.0 +38 99 loss.margin 21.56302269516773 +38 99 loss.adversarial_temperature 0.43776130542557945 +38 99 optimizer.lr 0.0015005775358446164 +38 99 negative_sampler.num_negs_per_pos 98.0 +38 99 training.batch_size 1.0 +38 100 model.embedding_dim 0.0 +38 100 loss.margin 8.09066946835108 +38 100 loss.adversarial_temperature 0.8003078604158453 +38 100 optimizer.lr 0.0014865980338285832 +38 100 negative_sampler.num_negs_per_pos 27.0 +38 100 training.batch_size 0.0 +38 1 dataset """kinships""" +38 1 model """complex""" +38 1 loss """nssa""" +38 1 regularizer """no""" +38 1 optimizer """adam""" +38 1 training_loop """owa""" +38 1 negative_sampler """basic""" +38 1 evaluator """rankbased""" +38 2 dataset """kinships""" +38 2 model """complex""" +38 2 loss """nssa""" +38 2 regularizer """no""" +38 2 optimizer """adam""" +38 2 training_loop """owa""" +38 2 negative_sampler """basic""" +38 2 evaluator """rankbased""" +38 3 dataset """kinships""" +38 3 model """complex""" +38 3 loss """nssa""" +38 3 regularizer """no""" +38 3 optimizer """adam""" +38 3 training_loop """owa""" +38 3 negative_sampler """basic""" +38 3 evaluator """rankbased""" +38 4 dataset """kinships""" +38 4 model """complex""" +38 4 loss """nssa""" +38 4 regularizer """no""" +38 4 optimizer """adam""" +38 4 training_loop """owa""" +38 4 negative_sampler """basic""" +38 4 evaluator """rankbased""" +38 5 dataset """kinships""" +38 5 model """complex""" +38 5 loss """nssa""" +38 5 regularizer """no""" +38 5 optimizer """adam""" +38 5 training_loop """owa""" +38 5 negative_sampler """basic""" +38 5 evaluator """rankbased""" +38 6 dataset """kinships""" +38 6 model """complex""" +38 6 loss """nssa""" +38 6 regularizer """no""" +38 6 optimizer """adam""" +38 6 training_loop """owa""" +38 6 negative_sampler """basic""" +38 6 evaluator """rankbased""" +38 7 dataset """kinships""" +38 7 model """complex""" +38 7 loss """nssa""" +38 7 regularizer """no""" +38 7 optimizer """adam""" +38 7 training_loop """owa""" +38 7 negative_sampler """basic""" +38 7 evaluator """rankbased""" +38 8 dataset """kinships""" +38 8 model """complex""" +38 8 loss """nssa""" +38 8 regularizer """no""" +38 8 optimizer """adam""" +38 8 training_loop """owa""" +38 8 negative_sampler """basic""" +38 8 evaluator """rankbased""" +38 9 dataset """kinships""" +38 9 model """complex""" +38 9 loss """nssa""" +38 9 regularizer """no""" +38 9 optimizer """adam""" +38 9 training_loop """owa""" +38 9 negative_sampler """basic""" +38 9 evaluator """rankbased""" +38 10 dataset """kinships""" +38 10 model """complex""" +38 10 loss """nssa""" +38 10 regularizer """no""" +38 10 optimizer """adam""" +38 10 training_loop """owa""" +38 10 negative_sampler """basic""" +38 10 evaluator """rankbased""" +38 11 dataset """kinships""" +38 11 model """complex""" +38 11 loss """nssa""" +38 11 regularizer """no""" +38 11 optimizer """adam""" +38 11 training_loop """owa""" +38 11 negative_sampler """basic""" +38 11 evaluator """rankbased""" +38 12 dataset """kinships""" +38 12 model """complex""" +38 12 loss """nssa""" +38 12 regularizer """no""" +38 12 optimizer """adam""" +38 12 training_loop """owa""" +38 12 negative_sampler """basic""" +38 12 evaluator """rankbased""" +38 13 dataset """kinships""" +38 13 model """complex""" +38 13 loss """nssa""" +38 13 regularizer """no""" +38 13 optimizer """adam""" +38 13 training_loop """owa""" +38 13 negative_sampler """basic""" +38 13 evaluator """rankbased""" +38 14 dataset """kinships""" +38 14 model """complex""" +38 14 loss """nssa""" +38 14 regularizer """no""" +38 14 optimizer """adam""" +38 14 training_loop """owa""" +38 14 negative_sampler """basic""" +38 14 evaluator """rankbased""" +38 15 dataset """kinships""" +38 15 model """complex""" +38 15 loss """nssa""" +38 15 regularizer """no""" +38 15 optimizer """adam""" +38 15 training_loop """owa""" +38 15 negative_sampler """basic""" +38 15 evaluator """rankbased""" +38 16 dataset """kinships""" +38 16 model """complex""" +38 16 loss """nssa""" +38 16 regularizer """no""" +38 16 optimizer """adam""" +38 16 training_loop """owa""" +38 16 negative_sampler """basic""" +38 16 evaluator """rankbased""" +38 17 dataset """kinships""" +38 17 model """complex""" +38 17 loss """nssa""" +38 17 regularizer """no""" +38 17 optimizer """adam""" +38 17 training_loop """owa""" +38 17 negative_sampler """basic""" +38 17 evaluator """rankbased""" +38 18 dataset """kinships""" +38 18 model """complex""" +38 18 loss """nssa""" +38 18 regularizer """no""" +38 18 optimizer """adam""" +38 18 training_loop """owa""" +38 18 negative_sampler """basic""" +38 18 evaluator """rankbased""" +38 19 dataset """kinships""" +38 19 model """complex""" +38 19 loss """nssa""" +38 19 regularizer """no""" +38 19 optimizer """adam""" +38 19 training_loop """owa""" +38 19 negative_sampler """basic""" +38 19 evaluator """rankbased""" +38 20 dataset """kinships""" +38 20 model """complex""" +38 20 loss """nssa""" +38 20 regularizer """no""" +38 20 optimizer """adam""" +38 20 training_loop """owa""" +38 20 negative_sampler """basic""" +38 20 evaluator """rankbased""" +38 21 dataset """kinships""" +38 21 model """complex""" +38 21 loss """nssa""" +38 21 regularizer """no""" +38 21 optimizer """adam""" +38 21 training_loop """owa""" +38 21 negative_sampler """basic""" +38 21 evaluator """rankbased""" +38 22 dataset """kinships""" +38 22 model """complex""" +38 22 loss """nssa""" +38 22 regularizer """no""" +38 22 optimizer """adam""" +38 22 training_loop """owa""" +38 22 negative_sampler """basic""" +38 22 evaluator """rankbased""" +38 23 dataset """kinships""" +38 23 model """complex""" +38 23 loss """nssa""" +38 23 regularizer """no""" +38 23 optimizer """adam""" +38 23 training_loop """owa""" +38 23 negative_sampler """basic""" +38 23 evaluator """rankbased""" +38 24 dataset """kinships""" +38 24 model """complex""" +38 24 loss """nssa""" +38 24 regularizer """no""" +38 24 optimizer """adam""" +38 24 training_loop """owa""" +38 24 negative_sampler """basic""" +38 24 evaluator """rankbased""" +38 25 dataset """kinships""" +38 25 model """complex""" +38 25 loss """nssa""" +38 25 regularizer """no""" +38 25 optimizer """adam""" +38 25 training_loop """owa""" +38 25 negative_sampler """basic""" +38 25 evaluator """rankbased""" +38 26 dataset """kinships""" +38 26 model """complex""" +38 26 loss """nssa""" +38 26 regularizer """no""" +38 26 optimizer """adam""" +38 26 training_loop """owa""" +38 26 negative_sampler """basic""" +38 26 evaluator """rankbased""" +38 27 dataset """kinships""" +38 27 model """complex""" +38 27 loss """nssa""" +38 27 regularizer """no""" +38 27 optimizer """adam""" +38 27 training_loop """owa""" +38 27 negative_sampler """basic""" +38 27 evaluator """rankbased""" +38 28 dataset """kinships""" +38 28 model """complex""" +38 28 loss """nssa""" +38 28 regularizer """no""" +38 28 optimizer """adam""" +38 28 training_loop """owa""" +38 28 negative_sampler """basic""" +38 28 evaluator """rankbased""" +38 29 dataset """kinships""" +38 29 model """complex""" +38 29 loss """nssa""" +38 29 regularizer """no""" +38 29 optimizer """adam""" +38 29 training_loop """owa""" +38 29 negative_sampler """basic""" +38 29 evaluator """rankbased""" +38 30 dataset """kinships""" +38 30 model """complex""" +38 30 loss """nssa""" +38 30 regularizer """no""" +38 30 optimizer """adam""" +38 30 training_loop """owa""" +38 30 negative_sampler """basic""" +38 30 evaluator """rankbased""" +38 31 dataset """kinships""" +38 31 model """complex""" +38 31 loss """nssa""" +38 31 regularizer """no""" +38 31 optimizer """adam""" +38 31 training_loop """owa""" +38 31 negative_sampler """basic""" +38 31 evaluator """rankbased""" +38 32 dataset """kinships""" +38 32 model """complex""" +38 32 loss """nssa""" +38 32 regularizer """no""" +38 32 optimizer """adam""" +38 32 training_loop """owa""" +38 32 negative_sampler """basic""" +38 32 evaluator """rankbased""" +38 33 dataset """kinships""" +38 33 model """complex""" +38 33 loss """nssa""" +38 33 regularizer """no""" +38 33 optimizer """adam""" +38 33 training_loop """owa""" +38 33 negative_sampler """basic""" +38 33 evaluator """rankbased""" +38 34 dataset """kinships""" +38 34 model """complex""" +38 34 loss """nssa""" +38 34 regularizer """no""" +38 34 optimizer """adam""" +38 34 training_loop """owa""" +38 34 negative_sampler """basic""" +38 34 evaluator """rankbased""" +38 35 dataset """kinships""" +38 35 model """complex""" +38 35 loss """nssa""" +38 35 regularizer """no""" +38 35 optimizer """adam""" +38 35 training_loop """owa""" +38 35 negative_sampler """basic""" +38 35 evaluator """rankbased""" +38 36 dataset """kinships""" +38 36 model """complex""" +38 36 loss """nssa""" +38 36 regularizer """no""" +38 36 optimizer """adam""" +38 36 training_loop """owa""" +38 36 negative_sampler """basic""" +38 36 evaluator """rankbased""" +38 37 dataset """kinships""" +38 37 model """complex""" +38 37 loss """nssa""" +38 37 regularizer """no""" +38 37 optimizer """adam""" +38 37 training_loop """owa""" +38 37 negative_sampler """basic""" +38 37 evaluator """rankbased""" +38 38 dataset """kinships""" +38 38 model """complex""" +38 38 loss """nssa""" +38 38 regularizer """no""" +38 38 optimizer """adam""" +38 38 training_loop """owa""" +38 38 negative_sampler """basic""" +38 38 evaluator """rankbased""" +38 39 dataset """kinships""" +38 39 model """complex""" +38 39 loss """nssa""" +38 39 regularizer """no""" +38 39 optimizer """adam""" +38 39 training_loop """owa""" +38 39 negative_sampler """basic""" +38 39 evaluator """rankbased""" +38 40 dataset """kinships""" +38 40 model """complex""" +38 40 loss """nssa""" +38 40 regularizer """no""" +38 40 optimizer """adam""" +38 40 training_loop """owa""" +38 40 negative_sampler """basic""" +38 40 evaluator """rankbased""" +38 41 dataset """kinships""" +38 41 model """complex""" +38 41 loss """nssa""" +38 41 regularizer """no""" +38 41 optimizer """adam""" +38 41 training_loop """owa""" +38 41 negative_sampler """basic""" +38 41 evaluator """rankbased""" +38 42 dataset """kinships""" +38 42 model """complex""" +38 42 loss """nssa""" +38 42 regularizer """no""" +38 42 optimizer """adam""" +38 42 training_loop """owa""" +38 42 negative_sampler """basic""" +38 42 evaluator """rankbased""" +38 43 dataset """kinships""" +38 43 model """complex""" +38 43 loss """nssa""" +38 43 regularizer """no""" +38 43 optimizer """adam""" +38 43 training_loop """owa""" +38 43 negative_sampler """basic""" +38 43 evaluator """rankbased""" +38 44 dataset """kinships""" +38 44 model """complex""" +38 44 loss """nssa""" +38 44 regularizer """no""" +38 44 optimizer """adam""" +38 44 training_loop """owa""" +38 44 negative_sampler """basic""" +38 44 evaluator """rankbased""" +38 45 dataset """kinships""" +38 45 model """complex""" +38 45 loss """nssa""" +38 45 regularizer """no""" +38 45 optimizer """adam""" +38 45 training_loop """owa""" +38 45 negative_sampler """basic""" +38 45 evaluator """rankbased""" +38 46 dataset """kinships""" +38 46 model """complex""" +38 46 loss """nssa""" +38 46 regularizer """no""" +38 46 optimizer """adam""" +38 46 training_loop """owa""" +38 46 negative_sampler """basic""" +38 46 evaluator """rankbased""" +38 47 dataset """kinships""" +38 47 model """complex""" +38 47 loss """nssa""" +38 47 regularizer """no""" +38 47 optimizer """adam""" +38 47 training_loop """owa""" +38 47 negative_sampler """basic""" +38 47 evaluator """rankbased""" +38 48 dataset """kinships""" +38 48 model """complex""" +38 48 loss """nssa""" +38 48 regularizer """no""" +38 48 optimizer """adam""" +38 48 training_loop """owa""" +38 48 negative_sampler """basic""" +38 48 evaluator """rankbased""" +38 49 dataset """kinships""" +38 49 model """complex""" +38 49 loss """nssa""" +38 49 regularizer """no""" +38 49 optimizer """adam""" +38 49 training_loop """owa""" +38 49 negative_sampler """basic""" +38 49 evaluator """rankbased""" +38 50 dataset """kinships""" +38 50 model """complex""" +38 50 loss """nssa""" +38 50 regularizer """no""" +38 50 optimizer """adam""" +38 50 training_loop """owa""" +38 50 negative_sampler """basic""" +38 50 evaluator """rankbased""" +38 51 dataset """kinships""" +38 51 model """complex""" +38 51 loss """nssa""" +38 51 regularizer """no""" +38 51 optimizer """adam""" +38 51 training_loop """owa""" +38 51 negative_sampler """basic""" +38 51 evaluator """rankbased""" +38 52 dataset """kinships""" +38 52 model """complex""" +38 52 loss """nssa""" +38 52 regularizer """no""" +38 52 optimizer """adam""" +38 52 training_loop """owa""" +38 52 negative_sampler """basic""" +38 52 evaluator """rankbased""" +38 53 dataset """kinships""" +38 53 model """complex""" +38 53 loss """nssa""" +38 53 regularizer """no""" +38 53 optimizer """adam""" +38 53 training_loop """owa""" +38 53 negative_sampler """basic""" +38 53 evaluator """rankbased""" +38 54 dataset """kinships""" +38 54 model """complex""" +38 54 loss """nssa""" +38 54 regularizer """no""" +38 54 optimizer """adam""" +38 54 training_loop """owa""" +38 54 negative_sampler """basic""" +38 54 evaluator """rankbased""" +38 55 dataset """kinships""" +38 55 model """complex""" +38 55 loss """nssa""" +38 55 regularizer """no""" +38 55 optimizer """adam""" +38 55 training_loop """owa""" +38 55 negative_sampler """basic""" +38 55 evaluator """rankbased""" +38 56 dataset """kinships""" +38 56 model """complex""" +38 56 loss """nssa""" +38 56 regularizer """no""" +38 56 optimizer """adam""" +38 56 training_loop """owa""" +38 56 negative_sampler """basic""" +38 56 evaluator """rankbased""" +38 57 dataset """kinships""" +38 57 model """complex""" +38 57 loss """nssa""" +38 57 regularizer """no""" +38 57 optimizer """adam""" +38 57 training_loop """owa""" +38 57 negative_sampler """basic""" +38 57 evaluator """rankbased""" +38 58 dataset """kinships""" +38 58 model """complex""" +38 58 loss """nssa""" +38 58 regularizer """no""" +38 58 optimizer """adam""" +38 58 training_loop """owa""" +38 58 negative_sampler """basic""" +38 58 evaluator """rankbased""" +38 59 dataset """kinships""" +38 59 model """complex""" +38 59 loss """nssa""" +38 59 regularizer """no""" +38 59 optimizer """adam""" +38 59 training_loop """owa""" +38 59 negative_sampler """basic""" +38 59 evaluator """rankbased""" +38 60 dataset """kinships""" +38 60 model """complex""" +38 60 loss """nssa""" +38 60 regularizer """no""" +38 60 optimizer """adam""" +38 60 training_loop """owa""" +38 60 negative_sampler """basic""" +38 60 evaluator """rankbased""" +38 61 dataset """kinships""" +38 61 model """complex""" +38 61 loss """nssa""" +38 61 regularizer """no""" +38 61 optimizer """adam""" +38 61 training_loop """owa""" +38 61 negative_sampler """basic""" +38 61 evaluator """rankbased""" +38 62 dataset """kinships""" +38 62 model """complex""" +38 62 loss """nssa""" +38 62 regularizer """no""" +38 62 optimizer """adam""" +38 62 training_loop """owa""" +38 62 negative_sampler """basic""" +38 62 evaluator """rankbased""" +38 63 dataset """kinships""" +38 63 model """complex""" +38 63 loss """nssa""" +38 63 regularizer """no""" +38 63 optimizer """adam""" +38 63 training_loop """owa""" +38 63 negative_sampler """basic""" +38 63 evaluator """rankbased""" +38 64 dataset """kinships""" +38 64 model """complex""" +38 64 loss """nssa""" +38 64 regularizer """no""" +38 64 optimizer """adam""" +38 64 training_loop """owa""" +38 64 negative_sampler """basic""" +38 64 evaluator """rankbased""" +38 65 dataset """kinships""" +38 65 model """complex""" +38 65 loss """nssa""" +38 65 regularizer """no""" +38 65 optimizer """adam""" +38 65 training_loop """owa""" +38 65 negative_sampler """basic""" +38 65 evaluator """rankbased""" +38 66 dataset """kinships""" +38 66 model """complex""" +38 66 loss """nssa""" +38 66 regularizer """no""" +38 66 optimizer """adam""" +38 66 training_loop """owa""" +38 66 negative_sampler """basic""" +38 66 evaluator """rankbased""" +38 67 dataset """kinships""" +38 67 model """complex""" +38 67 loss """nssa""" +38 67 regularizer """no""" +38 67 optimizer """adam""" +38 67 training_loop """owa""" +38 67 negative_sampler """basic""" +38 67 evaluator """rankbased""" +38 68 dataset """kinships""" +38 68 model """complex""" +38 68 loss """nssa""" +38 68 regularizer """no""" +38 68 optimizer """adam""" +38 68 training_loop """owa""" +38 68 negative_sampler """basic""" +38 68 evaluator """rankbased""" +38 69 dataset """kinships""" +38 69 model """complex""" +38 69 loss """nssa""" +38 69 regularizer """no""" +38 69 optimizer """adam""" +38 69 training_loop """owa""" +38 69 negative_sampler """basic""" +38 69 evaluator """rankbased""" +38 70 dataset """kinships""" +38 70 model """complex""" +38 70 loss """nssa""" +38 70 regularizer """no""" +38 70 optimizer """adam""" +38 70 training_loop """owa""" +38 70 negative_sampler """basic""" +38 70 evaluator """rankbased""" +38 71 dataset """kinships""" +38 71 model """complex""" +38 71 loss """nssa""" +38 71 regularizer """no""" +38 71 optimizer """adam""" +38 71 training_loop """owa""" +38 71 negative_sampler """basic""" +38 71 evaluator """rankbased""" +38 72 dataset """kinships""" +38 72 model """complex""" +38 72 loss """nssa""" +38 72 regularizer """no""" +38 72 optimizer """adam""" +38 72 training_loop """owa""" +38 72 negative_sampler """basic""" +38 72 evaluator """rankbased""" +38 73 dataset """kinships""" +38 73 model """complex""" +38 73 loss """nssa""" +38 73 regularizer """no""" +38 73 optimizer """adam""" +38 73 training_loop """owa""" +38 73 negative_sampler """basic""" +38 73 evaluator """rankbased""" +38 74 dataset """kinships""" +38 74 model """complex""" +38 74 loss """nssa""" +38 74 regularizer """no""" +38 74 optimizer """adam""" +38 74 training_loop """owa""" +38 74 negative_sampler """basic""" +38 74 evaluator """rankbased""" +38 75 dataset """kinships""" +38 75 model """complex""" +38 75 loss """nssa""" +38 75 regularizer """no""" +38 75 optimizer """adam""" +38 75 training_loop """owa""" +38 75 negative_sampler """basic""" +38 75 evaluator """rankbased""" +38 76 dataset """kinships""" +38 76 model """complex""" +38 76 loss """nssa""" +38 76 regularizer """no""" +38 76 optimizer """adam""" +38 76 training_loop """owa""" +38 76 negative_sampler """basic""" +38 76 evaluator """rankbased""" +38 77 dataset """kinships""" +38 77 model """complex""" +38 77 loss """nssa""" +38 77 regularizer """no""" +38 77 optimizer """adam""" +38 77 training_loop """owa""" +38 77 negative_sampler """basic""" +38 77 evaluator """rankbased""" +38 78 dataset """kinships""" +38 78 model """complex""" +38 78 loss """nssa""" +38 78 regularizer """no""" +38 78 optimizer """adam""" +38 78 training_loop """owa""" +38 78 negative_sampler """basic""" +38 78 evaluator """rankbased""" +38 79 dataset """kinships""" +38 79 model """complex""" +38 79 loss """nssa""" +38 79 regularizer """no""" +38 79 optimizer """adam""" +38 79 training_loop """owa""" +38 79 negative_sampler """basic""" +38 79 evaluator """rankbased""" +38 80 dataset """kinships""" +38 80 model """complex""" +38 80 loss """nssa""" +38 80 regularizer """no""" +38 80 optimizer """adam""" +38 80 training_loop """owa""" +38 80 negative_sampler """basic""" +38 80 evaluator """rankbased""" +38 81 dataset """kinships""" +38 81 model """complex""" +38 81 loss """nssa""" +38 81 regularizer """no""" +38 81 optimizer """adam""" +38 81 training_loop """owa""" +38 81 negative_sampler """basic""" +38 81 evaluator """rankbased""" +38 82 dataset """kinships""" +38 82 model """complex""" +38 82 loss """nssa""" +38 82 regularizer """no""" +38 82 optimizer """adam""" +38 82 training_loop """owa""" +38 82 negative_sampler """basic""" +38 82 evaluator """rankbased""" +38 83 dataset """kinships""" +38 83 model """complex""" +38 83 loss """nssa""" +38 83 regularizer """no""" +38 83 optimizer """adam""" +38 83 training_loop """owa""" +38 83 negative_sampler """basic""" +38 83 evaluator """rankbased""" +38 84 dataset """kinships""" +38 84 model """complex""" +38 84 loss """nssa""" +38 84 regularizer """no""" +38 84 optimizer """adam""" +38 84 training_loop """owa""" +38 84 negative_sampler """basic""" +38 84 evaluator """rankbased""" +38 85 dataset """kinships""" +38 85 model """complex""" +38 85 loss """nssa""" +38 85 regularizer """no""" +38 85 optimizer """adam""" +38 85 training_loop """owa""" +38 85 negative_sampler """basic""" +38 85 evaluator """rankbased""" +38 86 dataset """kinships""" +38 86 model """complex""" +38 86 loss """nssa""" +38 86 regularizer """no""" +38 86 optimizer """adam""" +38 86 training_loop """owa""" +38 86 negative_sampler """basic""" +38 86 evaluator """rankbased""" +38 87 dataset """kinships""" +38 87 model """complex""" +38 87 loss """nssa""" +38 87 regularizer """no""" +38 87 optimizer """adam""" +38 87 training_loop """owa""" +38 87 negative_sampler """basic""" +38 87 evaluator """rankbased""" +38 88 dataset """kinships""" +38 88 model """complex""" +38 88 loss """nssa""" +38 88 regularizer """no""" +38 88 optimizer """adam""" +38 88 training_loop """owa""" +38 88 negative_sampler """basic""" +38 88 evaluator """rankbased""" +38 89 dataset """kinships""" +38 89 model """complex""" +38 89 loss """nssa""" +38 89 regularizer """no""" +38 89 optimizer """adam""" +38 89 training_loop """owa""" +38 89 negative_sampler """basic""" +38 89 evaluator """rankbased""" +38 90 dataset """kinships""" +38 90 model """complex""" +38 90 loss """nssa""" +38 90 regularizer """no""" +38 90 optimizer """adam""" +38 90 training_loop """owa""" +38 90 negative_sampler """basic""" +38 90 evaluator """rankbased""" +38 91 dataset """kinships""" +38 91 model """complex""" +38 91 loss """nssa""" +38 91 regularizer """no""" +38 91 optimizer """adam""" +38 91 training_loop """owa""" +38 91 negative_sampler """basic""" +38 91 evaluator """rankbased""" +38 92 dataset """kinships""" +38 92 model """complex""" +38 92 loss """nssa""" +38 92 regularizer """no""" +38 92 optimizer """adam""" +38 92 training_loop """owa""" +38 92 negative_sampler """basic""" +38 92 evaluator """rankbased""" +38 93 dataset """kinships""" +38 93 model """complex""" +38 93 loss """nssa""" +38 93 regularizer """no""" +38 93 optimizer """adam""" +38 93 training_loop """owa""" +38 93 negative_sampler """basic""" +38 93 evaluator """rankbased""" +38 94 dataset """kinships""" +38 94 model """complex""" +38 94 loss """nssa""" +38 94 regularizer """no""" +38 94 optimizer """adam""" +38 94 training_loop """owa""" +38 94 negative_sampler """basic""" +38 94 evaluator """rankbased""" +38 95 dataset """kinships""" +38 95 model """complex""" +38 95 loss """nssa""" +38 95 regularizer """no""" +38 95 optimizer """adam""" +38 95 training_loop """owa""" +38 95 negative_sampler """basic""" +38 95 evaluator """rankbased""" +38 96 dataset """kinships""" +38 96 model """complex""" +38 96 loss """nssa""" +38 96 regularizer """no""" +38 96 optimizer """adam""" +38 96 training_loop """owa""" +38 96 negative_sampler """basic""" +38 96 evaluator """rankbased""" +38 97 dataset """kinships""" +38 97 model """complex""" +38 97 loss """nssa""" +38 97 regularizer """no""" +38 97 optimizer """adam""" +38 97 training_loop """owa""" +38 97 negative_sampler """basic""" +38 97 evaluator """rankbased""" +38 98 dataset """kinships""" +38 98 model """complex""" +38 98 loss """nssa""" +38 98 regularizer """no""" +38 98 optimizer """adam""" +38 98 training_loop """owa""" +38 98 negative_sampler """basic""" +38 98 evaluator """rankbased""" +38 99 dataset """kinships""" +38 99 model """complex""" +38 99 loss """nssa""" +38 99 regularizer """no""" +38 99 optimizer """adam""" +38 99 training_loop """owa""" +38 99 negative_sampler """basic""" +38 99 evaluator """rankbased""" +38 100 dataset """kinships""" +38 100 model """complex""" +38 100 loss """nssa""" +38 100 regularizer """no""" +38 100 optimizer """adam""" +38 100 training_loop """owa""" +38 100 negative_sampler """basic""" +38 100 evaluator """rankbased""" +39 1 model.embedding_dim 0.0 +39 1 loss.margin 25.918883040549723 +39 1 loss.adversarial_temperature 0.36291374099499574 +39 1 optimizer.lr 0.027684434645847262 +39 1 negative_sampler.num_negs_per_pos 36.0 +39 1 training.batch_size 2.0 +39 2 model.embedding_dim 1.0 +39 2 loss.margin 12.505337545320508 +39 2 loss.adversarial_temperature 0.8773193509932222 +39 2 optimizer.lr 0.006954704122400858 +39 2 negative_sampler.num_negs_per_pos 91.0 +39 2 training.batch_size 2.0 +39 3 model.embedding_dim 2.0 +39 3 loss.margin 23.79113374610379 +39 3 loss.adversarial_temperature 0.11789691044198934 +39 3 optimizer.lr 0.003954951324604298 +39 3 negative_sampler.num_negs_per_pos 24.0 +39 3 training.batch_size 2.0 +39 4 model.embedding_dim 2.0 +39 4 loss.margin 18.181990532521258 +39 4 loss.adversarial_temperature 0.34593493582635715 +39 4 optimizer.lr 0.08345441079392871 +39 4 negative_sampler.num_negs_per_pos 65.0 +39 4 training.batch_size 0.0 +39 5 model.embedding_dim 2.0 +39 5 loss.margin 29.63887924386953 +39 5 loss.adversarial_temperature 0.3948565183177716 +39 5 optimizer.lr 0.007887608095256293 +39 5 negative_sampler.num_negs_per_pos 66.0 +39 5 training.batch_size 2.0 +39 6 model.embedding_dim 2.0 +39 6 loss.margin 15.391551236462687 +39 6 loss.adversarial_temperature 0.8035609596098443 +39 6 optimizer.lr 0.021716719361263494 +39 6 negative_sampler.num_negs_per_pos 53.0 +39 6 training.batch_size 0.0 +39 7 model.embedding_dim 0.0 +39 7 loss.margin 13.460182763780557 +39 7 loss.adversarial_temperature 0.21378219563186532 +39 7 optimizer.lr 0.0013620252238528536 +39 7 negative_sampler.num_negs_per_pos 37.0 +39 7 training.batch_size 1.0 +39 8 model.embedding_dim 1.0 +39 8 loss.margin 26.027378501530087 +39 8 loss.adversarial_temperature 0.48676127617631515 +39 8 optimizer.lr 0.006585582513039488 +39 8 negative_sampler.num_negs_per_pos 58.0 +39 8 training.batch_size 0.0 +39 9 model.embedding_dim 0.0 +39 9 loss.margin 8.35037814223879 +39 9 loss.adversarial_temperature 0.3127474309570752 +39 9 optimizer.lr 0.0020321104135924035 +39 9 negative_sampler.num_negs_per_pos 78.0 +39 9 training.batch_size 2.0 +39 10 model.embedding_dim 2.0 +39 10 loss.margin 13.000438598174126 +39 10 loss.adversarial_temperature 0.2266612425225493 +39 10 optimizer.lr 0.006670613007374148 +39 10 negative_sampler.num_negs_per_pos 9.0 +39 10 training.batch_size 2.0 +39 11 model.embedding_dim 0.0 +39 11 loss.margin 25.46476815584402 +39 11 loss.adversarial_temperature 0.8268559542758047 +39 11 optimizer.lr 0.029567120315795208 +39 11 negative_sampler.num_negs_per_pos 63.0 +39 11 training.batch_size 0.0 +39 12 model.embedding_dim 2.0 +39 12 loss.margin 18.239582672618692 +39 12 loss.adversarial_temperature 0.9337951911454316 +39 12 optimizer.lr 0.0023395174148476217 +39 12 negative_sampler.num_negs_per_pos 17.0 +39 12 training.batch_size 1.0 +39 13 model.embedding_dim 0.0 +39 13 loss.margin 17.390750861031027 +39 13 loss.adversarial_temperature 0.6639926081551165 +39 13 optimizer.lr 0.005444001334570873 +39 13 negative_sampler.num_negs_per_pos 35.0 +39 13 training.batch_size 1.0 +39 14 model.embedding_dim 1.0 +39 14 loss.margin 16.969808509526523 +39 14 loss.adversarial_temperature 0.8901551516417829 +39 14 optimizer.lr 0.06563203364828361 +39 14 negative_sampler.num_negs_per_pos 56.0 +39 14 training.batch_size 1.0 +39 15 model.embedding_dim 2.0 +39 15 loss.margin 13.474354244915823 +39 15 loss.adversarial_temperature 0.3373775626485134 +39 15 optimizer.lr 0.012088629523948818 +39 15 negative_sampler.num_negs_per_pos 79.0 +39 15 training.batch_size 1.0 +39 16 model.embedding_dim 1.0 +39 16 loss.margin 27.59008690207903 +39 16 loss.adversarial_temperature 0.191580154249533 +39 16 optimizer.lr 0.007101482189835838 +39 16 negative_sampler.num_negs_per_pos 24.0 +39 16 training.batch_size 1.0 +39 17 model.embedding_dim 2.0 +39 17 loss.margin 24.661589385896878 +39 17 loss.adversarial_temperature 0.3411088181874428 +39 17 optimizer.lr 0.0024699379100779884 +39 17 negative_sampler.num_negs_per_pos 35.0 +39 17 training.batch_size 2.0 +39 18 model.embedding_dim 0.0 +39 18 loss.margin 4.9179303386693425 +39 18 loss.adversarial_temperature 0.3089749585266425 +39 18 optimizer.lr 0.0018208618590428965 +39 18 negative_sampler.num_negs_per_pos 84.0 +39 18 training.batch_size 0.0 +39 19 model.embedding_dim 0.0 +39 19 loss.margin 28.23414860458971 +39 19 loss.adversarial_temperature 0.6491347328127979 +39 19 optimizer.lr 0.005077104586180737 +39 19 negative_sampler.num_negs_per_pos 48.0 +39 19 training.batch_size 0.0 +39 20 model.embedding_dim 2.0 +39 20 loss.margin 23.697791580516256 +39 20 loss.adversarial_temperature 0.3820967081796113 +39 20 optimizer.lr 0.0030962382801888078 +39 20 negative_sampler.num_negs_per_pos 58.0 +39 20 training.batch_size 1.0 +39 21 model.embedding_dim 2.0 +39 21 loss.margin 29.75107199356777 +39 21 loss.adversarial_temperature 0.15123217266893965 +39 21 optimizer.lr 0.00501486622591188 +39 21 negative_sampler.num_negs_per_pos 29.0 +39 21 training.batch_size 1.0 +39 22 model.embedding_dim 1.0 +39 22 loss.margin 25.558372870535987 +39 22 loss.adversarial_temperature 0.19872059922500737 +39 22 optimizer.lr 0.0354917607752572 +39 22 negative_sampler.num_negs_per_pos 61.0 +39 22 training.batch_size 0.0 +39 23 model.embedding_dim 2.0 +39 23 loss.margin 3.6307759488171536 +39 23 loss.adversarial_temperature 0.9508335263286237 +39 23 optimizer.lr 0.003856723800474317 +39 23 negative_sampler.num_negs_per_pos 37.0 +39 23 training.batch_size 2.0 +39 24 model.embedding_dim 2.0 +39 24 loss.margin 26.338939040936634 +39 24 loss.adversarial_temperature 0.4364477358224699 +39 24 optimizer.lr 0.010216375450917041 +39 24 negative_sampler.num_negs_per_pos 30.0 +39 24 training.batch_size 1.0 +39 25 model.embedding_dim 1.0 +39 25 loss.margin 18.663238198955945 +39 25 loss.adversarial_temperature 0.8021279143681681 +39 25 optimizer.lr 0.014794453891828464 +39 25 negative_sampler.num_negs_per_pos 66.0 +39 25 training.batch_size 0.0 +39 26 model.embedding_dim 2.0 +39 26 loss.margin 2.331316945883423 +39 26 loss.adversarial_temperature 0.4639202469637075 +39 26 optimizer.lr 0.03136667938528306 +39 26 negative_sampler.num_negs_per_pos 25.0 +39 26 training.batch_size 1.0 +39 27 model.embedding_dim 2.0 +39 27 loss.margin 19.535967134185253 +39 27 loss.adversarial_temperature 0.9801232565171873 +39 27 optimizer.lr 0.007802369350162227 +39 27 negative_sampler.num_negs_per_pos 78.0 +39 27 training.batch_size 2.0 +39 28 model.embedding_dim 0.0 +39 28 loss.margin 28.327014644402624 +39 28 loss.adversarial_temperature 0.2711496982596953 +39 28 optimizer.lr 0.0012094944562743963 +39 28 negative_sampler.num_negs_per_pos 42.0 +39 28 training.batch_size 0.0 +39 29 model.embedding_dim 1.0 +39 29 loss.margin 28.38108135736123 +39 29 loss.adversarial_temperature 0.4788937663625482 +39 29 optimizer.lr 0.018470623257118096 +39 29 negative_sampler.num_negs_per_pos 82.0 +39 29 training.batch_size 2.0 +39 30 model.embedding_dim 0.0 +39 30 loss.margin 18.8040414587943 +39 30 loss.adversarial_temperature 0.5812904578323166 +39 30 optimizer.lr 0.0019905594623931284 +39 30 negative_sampler.num_negs_per_pos 18.0 +39 30 training.batch_size 2.0 +39 31 model.embedding_dim 0.0 +39 31 loss.margin 28.874437317510694 +39 31 loss.adversarial_temperature 0.9942063819084835 +39 31 optimizer.lr 0.0016205650314077768 +39 31 negative_sampler.num_negs_per_pos 92.0 +39 31 training.batch_size 0.0 +39 32 model.embedding_dim 0.0 +39 32 loss.margin 24.63105328747813 +39 32 loss.adversarial_temperature 0.532608114605715 +39 32 optimizer.lr 0.0015751958218069946 +39 32 negative_sampler.num_negs_per_pos 26.0 +39 32 training.batch_size 1.0 +39 33 model.embedding_dim 2.0 +39 33 loss.margin 17.489352311765156 +39 33 loss.adversarial_temperature 0.3900350726334566 +39 33 optimizer.lr 0.0656638599375993 +39 33 negative_sampler.num_negs_per_pos 30.0 +39 33 training.batch_size 0.0 +39 34 model.embedding_dim 0.0 +39 34 loss.margin 20.081653435719268 +39 34 loss.adversarial_temperature 0.8488739733845553 +39 34 optimizer.lr 0.04771504741732406 +39 34 negative_sampler.num_negs_per_pos 25.0 +39 34 training.batch_size 2.0 +39 35 model.embedding_dim 1.0 +39 35 loss.margin 27.601913648645937 +39 35 loss.adversarial_temperature 0.23611144403671352 +39 35 optimizer.lr 0.04462176601189394 +39 35 negative_sampler.num_negs_per_pos 0.0 +39 35 training.batch_size 2.0 +39 36 model.embedding_dim 1.0 +39 36 loss.margin 12.890113113423208 +39 36 loss.adversarial_temperature 0.14318493535079627 +39 36 optimizer.lr 0.0333789774063937 +39 36 negative_sampler.num_negs_per_pos 48.0 +39 36 training.batch_size 2.0 +39 37 model.embedding_dim 0.0 +39 37 loss.margin 11.206734977009424 +39 37 loss.adversarial_temperature 0.5428232502506425 +39 37 optimizer.lr 0.001455622951108706 +39 37 negative_sampler.num_negs_per_pos 6.0 +39 37 training.batch_size 2.0 +39 38 model.embedding_dim 2.0 +39 38 loss.margin 11.515105284223457 +39 38 loss.adversarial_temperature 0.5269371585150426 +39 38 optimizer.lr 0.0267108990950757 +39 38 negative_sampler.num_negs_per_pos 65.0 +39 38 training.batch_size 0.0 +39 39 model.embedding_dim 0.0 +39 39 loss.margin 26.24234799332402 +39 39 loss.adversarial_temperature 0.47212129440312 +39 39 optimizer.lr 0.00535896332785469 +39 39 negative_sampler.num_negs_per_pos 67.0 +39 39 training.batch_size 0.0 +39 40 model.embedding_dim 2.0 +39 40 loss.margin 14.691067416860172 +39 40 loss.adversarial_temperature 0.3616683878090937 +39 40 optimizer.lr 0.02734576242685517 +39 40 negative_sampler.num_negs_per_pos 20.0 +39 40 training.batch_size 0.0 +39 41 model.embedding_dim 2.0 +39 41 loss.margin 1.3210800256378628 +39 41 loss.adversarial_temperature 0.7110380038315163 +39 41 optimizer.lr 0.02206865141799217 +39 41 negative_sampler.num_negs_per_pos 79.0 +39 41 training.batch_size 0.0 +39 42 model.embedding_dim 0.0 +39 42 loss.margin 21.09935218094961 +39 42 loss.adversarial_temperature 0.6016242373779791 +39 42 optimizer.lr 0.0025439604186185895 +39 42 negative_sampler.num_negs_per_pos 73.0 +39 42 training.batch_size 0.0 +39 43 model.embedding_dim 1.0 +39 43 loss.margin 1.4842557451797407 +39 43 loss.adversarial_temperature 0.7904677067201497 +39 43 optimizer.lr 0.01843754053118617 +39 43 negative_sampler.num_negs_per_pos 82.0 +39 43 training.batch_size 0.0 +39 44 model.embedding_dim 2.0 +39 44 loss.margin 16.282858477886748 +39 44 loss.adversarial_temperature 0.9021205232950279 +39 44 optimizer.lr 0.0019822436320022937 +39 44 negative_sampler.num_negs_per_pos 12.0 +39 44 training.batch_size 0.0 +39 45 model.embedding_dim 1.0 +39 45 loss.margin 6.01799290553971 +39 45 loss.adversarial_temperature 0.9902478707814318 +39 45 optimizer.lr 0.06124760430481454 +39 45 negative_sampler.num_negs_per_pos 11.0 +39 45 training.batch_size 1.0 +39 46 model.embedding_dim 1.0 +39 46 loss.margin 15.197183126179505 +39 46 loss.adversarial_temperature 0.7842078043697426 +39 46 optimizer.lr 0.00866076095805057 +39 46 negative_sampler.num_negs_per_pos 99.0 +39 46 training.batch_size 2.0 +39 47 model.embedding_dim 1.0 +39 47 loss.margin 15.950234961445627 +39 47 loss.adversarial_temperature 0.6762206715476947 +39 47 optimizer.lr 0.04401619594905831 +39 47 negative_sampler.num_negs_per_pos 66.0 +39 47 training.batch_size 0.0 +39 48 model.embedding_dim 0.0 +39 48 loss.margin 27.091584797465455 +39 48 loss.adversarial_temperature 0.3071779623299362 +39 48 optimizer.lr 0.08304481857260398 +39 48 negative_sampler.num_negs_per_pos 8.0 +39 48 training.batch_size 1.0 +39 49 model.embedding_dim 0.0 +39 49 loss.margin 1.7268013720794129 +39 49 loss.adversarial_temperature 0.48531435133055345 +39 49 optimizer.lr 0.001249719422518913 +39 49 negative_sampler.num_negs_per_pos 30.0 +39 49 training.batch_size 2.0 +39 50 model.embedding_dim 1.0 +39 50 loss.margin 18.7539214880338 +39 50 loss.adversarial_temperature 0.3115639216264696 +39 50 optimizer.lr 0.020718461286133832 +39 50 negative_sampler.num_negs_per_pos 79.0 +39 50 training.batch_size 1.0 +39 51 model.embedding_dim 2.0 +39 51 loss.margin 6.698778062407833 +39 51 loss.adversarial_temperature 0.20842865596418697 +39 51 optimizer.lr 0.00467985207856886 +39 51 negative_sampler.num_negs_per_pos 75.0 +39 51 training.batch_size 2.0 +39 52 model.embedding_dim 2.0 +39 52 loss.margin 6.160014915551836 +39 52 loss.adversarial_temperature 0.6562335713057745 +39 52 optimizer.lr 0.01067169944409689 +39 52 negative_sampler.num_negs_per_pos 48.0 +39 52 training.batch_size 0.0 +39 53 model.embedding_dim 2.0 +39 53 loss.margin 29.881163510808832 +39 53 loss.adversarial_temperature 0.30402007251743823 +39 53 optimizer.lr 0.0012473798015150885 +39 53 negative_sampler.num_negs_per_pos 95.0 +39 53 training.batch_size 1.0 +39 54 model.embedding_dim 1.0 +39 54 loss.margin 22.21994208774176 +39 54 loss.adversarial_temperature 0.14990389496471263 +39 54 optimizer.lr 0.0017898407061401373 +39 54 negative_sampler.num_negs_per_pos 37.0 +39 54 training.batch_size 2.0 +39 55 model.embedding_dim 1.0 +39 55 loss.margin 1.0372573716299591 +39 55 loss.adversarial_temperature 0.5735364544324029 +39 55 optimizer.lr 0.01217133658769629 +39 55 negative_sampler.num_negs_per_pos 76.0 +39 55 training.batch_size 0.0 +39 56 model.embedding_dim 1.0 +39 56 loss.margin 22.35162802004819 +39 56 loss.adversarial_temperature 0.6876959606094935 +39 56 optimizer.lr 0.027555372987004445 +39 56 negative_sampler.num_negs_per_pos 1.0 +39 56 training.batch_size 1.0 +39 57 model.embedding_dim 0.0 +39 57 loss.margin 24.954230040058896 +39 57 loss.adversarial_temperature 0.7367850334033024 +39 57 optimizer.lr 0.0011050901626348363 +39 57 negative_sampler.num_negs_per_pos 16.0 +39 57 training.batch_size 1.0 +39 58 model.embedding_dim 2.0 +39 58 loss.margin 25.495724219374367 +39 58 loss.adversarial_temperature 0.26315183822606497 +39 58 optimizer.lr 0.005900906397657619 +39 58 negative_sampler.num_negs_per_pos 8.0 +39 58 training.batch_size 0.0 +39 59 model.embedding_dim 2.0 +39 59 loss.margin 20.2710411296283 +39 59 loss.adversarial_temperature 0.9842308557508316 +39 59 optimizer.lr 0.048179549244469544 +39 59 negative_sampler.num_negs_per_pos 12.0 +39 59 training.batch_size 0.0 +39 60 model.embedding_dim 0.0 +39 60 loss.margin 10.587681489943602 +39 60 loss.adversarial_temperature 0.9108918613101292 +39 60 optimizer.lr 0.06882955642286197 +39 60 negative_sampler.num_negs_per_pos 72.0 +39 60 training.batch_size 2.0 +39 61 model.embedding_dim 1.0 +39 61 loss.margin 28.892450266507556 +39 61 loss.adversarial_temperature 0.6401124665794703 +39 61 optimizer.lr 0.022418625604376706 +39 61 negative_sampler.num_negs_per_pos 74.0 +39 61 training.batch_size 2.0 +39 62 model.embedding_dim 0.0 +39 62 loss.margin 27.389492757458886 +39 62 loss.adversarial_temperature 0.5476796662726994 +39 62 optimizer.lr 0.012431939940306872 +39 62 negative_sampler.num_negs_per_pos 81.0 +39 62 training.batch_size 1.0 +39 63 model.embedding_dim 0.0 +39 63 loss.margin 7.7676502545545585 +39 63 loss.adversarial_temperature 0.7635516641675627 +39 63 optimizer.lr 0.006363142920050361 +39 63 negative_sampler.num_negs_per_pos 4.0 +39 63 training.batch_size 2.0 +39 64 model.embedding_dim 1.0 +39 64 loss.margin 28.655396950957606 +39 64 loss.adversarial_temperature 0.7450929656261961 +39 64 optimizer.lr 0.0039053139527481452 +39 64 negative_sampler.num_negs_per_pos 91.0 +39 64 training.batch_size 2.0 +39 65 model.embedding_dim 2.0 +39 65 loss.margin 1.9456656366446137 +39 65 loss.adversarial_temperature 0.8900805190535385 +39 65 optimizer.lr 0.03106257512637217 +39 65 negative_sampler.num_negs_per_pos 13.0 +39 65 training.batch_size 2.0 +39 66 model.embedding_dim 0.0 +39 66 loss.margin 26.160903211484236 +39 66 loss.adversarial_temperature 0.9728590083410348 +39 66 optimizer.lr 0.0023611548395917367 +39 66 negative_sampler.num_negs_per_pos 95.0 +39 66 training.batch_size 1.0 +39 67 model.embedding_dim 0.0 +39 67 loss.margin 11.91307550190179 +39 67 loss.adversarial_temperature 0.34099054500579434 +39 67 optimizer.lr 0.002029308709197141 +39 67 negative_sampler.num_negs_per_pos 66.0 +39 67 training.batch_size 1.0 +39 68 model.embedding_dim 0.0 +39 68 loss.margin 19.427970900770152 +39 68 loss.adversarial_temperature 0.9029174377793623 +39 68 optimizer.lr 0.002153168125248037 +39 68 negative_sampler.num_negs_per_pos 68.0 +39 68 training.batch_size 2.0 +39 69 model.embedding_dim 0.0 +39 69 loss.margin 4.683376546078243 +39 69 loss.adversarial_temperature 0.7363022914172556 +39 69 optimizer.lr 0.003398499279281018 +39 69 negative_sampler.num_negs_per_pos 5.0 +39 69 training.batch_size 1.0 +39 70 model.embedding_dim 2.0 +39 70 loss.margin 15.717181444109054 +39 70 loss.adversarial_temperature 0.6922309031422034 +39 70 optimizer.lr 0.03725183356314152 +39 70 negative_sampler.num_negs_per_pos 91.0 +39 70 training.batch_size 1.0 +39 71 model.embedding_dim 0.0 +39 71 loss.margin 23.14182708212804 +39 71 loss.adversarial_temperature 0.7171292721273239 +39 71 optimizer.lr 0.03531691756788518 +39 71 negative_sampler.num_negs_per_pos 89.0 +39 71 training.batch_size 2.0 +39 72 model.embedding_dim 2.0 +39 72 loss.margin 21.334767675028445 +39 72 loss.adversarial_temperature 0.11708373207093609 +39 72 optimizer.lr 0.015151049060037556 +39 72 negative_sampler.num_negs_per_pos 94.0 +39 72 training.batch_size 1.0 +39 73 model.embedding_dim 1.0 +39 73 loss.margin 16.998358680184687 +39 73 loss.adversarial_temperature 0.13922573563924281 +39 73 optimizer.lr 0.005195784942526593 +39 73 negative_sampler.num_negs_per_pos 87.0 +39 73 training.batch_size 0.0 +39 74 model.embedding_dim 0.0 +39 74 loss.margin 24.04759308944208 +39 74 loss.adversarial_temperature 0.3561852053288066 +39 74 optimizer.lr 0.08493477463553865 +39 74 negative_sampler.num_negs_per_pos 68.0 +39 74 training.batch_size 1.0 +39 75 model.embedding_dim 0.0 +39 75 loss.margin 9.518145044055183 +39 75 loss.adversarial_temperature 0.5372924448973976 +39 75 optimizer.lr 0.017817631068909087 +39 75 negative_sampler.num_negs_per_pos 41.0 +39 75 training.batch_size 1.0 +39 76 model.embedding_dim 1.0 +39 76 loss.margin 15.554095897161947 +39 76 loss.adversarial_temperature 0.14016881983654553 +39 76 optimizer.lr 0.014578853107349967 +39 76 negative_sampler.num_negs_per_pos 52.0 +39 76 training.batch_size 2.0 +39 77 model.embedding_dim 1.0 +39 77 loss.margin 18.356129208276684 +39 77 loss.adversarial_temperature 0.6349996276898708 +39 77 optimizer.lr 0.022975616034596434 +39 77 negative_sampler.num_negs_per_pos 22.0 +39 77 training.batch_size 1.0 +39 78 model.embedding_dim 2.0 +39 78 loss.margin 25.638525677949676 +39 78 loss.adversarial_temperature 0.7821406869225123 +39 78 optimizer.lr 0.006311447460243363 +39 78 negative_sampler.num_negs_per_pos 18.0 +39 78 training.batch_size 2.0 +39 79 model.embedding_dim 0.0 +39 79 loss.margin 9.475562931703102 +39 79 loss.adversarial_temperature 0.45871825489833806 +39 79 optimizer.lr 0.056497295953279056 +39 79 negative_sampler.num_negs_per_pos 55.0 +39 79 training.batch_size 2.0 +39 80 model.embedding_dim 1.0 +39 80 loss.margin 24.176317983922083 +39 80 loss.adversarial_temperature 0.27357740820281073 +39 80 optimizer.lr 0.003190480123611858 +39 80 negative_sampler.num_negs_per_pos 62.0 +39 80 training.batch_size 0.0 +39 81 model.embedding_dim 2.0 +39 81 loss.margin 16.74147400051342 +39 81 loss.adversarial_temperature 0.9759212587948302 +39 81 optimizer.lr 0.022814533604429685 +39 81 negative_sampler.num_negs_per_pos 13.0 +39 81 training.batch_size 1.0 +39 82 model.embedding_dim 1.0 +39 82 loss.margin 14.25244572985482 +39 82 loss.adversarial_temperature 0.11711640318561642 +39 82 optimizer.lr 0.05381557401249692 +39 82 negative_sampler.num_negs_per_pos 32.0 +39 82 training.batch_size 2.0 +39 83 model.embedding_dim 0.0 +39 83 loss.margin 15.932396890748674 +39 83 loss.adversarial_temperature 0.742462107995823 +39 83 optimizer.lr 0.031011224103777428 +39 83 negative_sampler.num_negs_per_pos 36.0 +39 83 training.batch_size 2.0 +39 84 model.embedding_dim 0.0 +39 84 loss.margin 1.860496089871682 +39 84 loss.adversarial_temperature 0.830707324825975 +39 84 optimizer.lr 0.018493458859808402 +39 84 negative_sampler.num_negs_per_pos 32.0 +39 84 training.batch_size 2.0 +39 85 model.embedding_dim 2.0 +39 85 loss.margin 9.983781349457546 +39 85 loss.adversarial_temperature 0.11197817071011229 +39 85 optimizer.lr 0.007230380996513985 +39 85 negative_sampler.num_negs_per_pos 89.0 +39 85 training.batch_size 0.0 +39 86 model.embedding_dim 1.0 +39 86 loss.margin 17.643628096457242 +39 86 loss.adversarial_temperature 0.6228652777783198 +39 86 optimizer.lr 0.005203257346454988 +39 86 negative_sampler.num_negs_per_pos 87.0 +39 86 training.batch_size 2.0 +39 87 model.embedding_dim 1.0 +39 87 loss.margin 25.71681727671227 +39 87 loss.adversarial_temperature 0.9700119265605655 +39 87 optimizer.lr 0.0011902717863337192 +39 87 negative_sampler.num_negs_per_pos 34.0 +39 87 training.batch_size 2.0 +39 88 model.embedding_dim 0.0 +39 88 loss.margin 16.33351518193193 +39 88 loss.adversarial_temperature 0.43255541041231493 +39 88 optimizer.lr 0.00577247989197207 +39 88 negative_sampler.num_negs_per_pos 5.0 +39 88 training.batch_size 0.0 +39 89 model.embedding_dim 1.0 +39 89 loss.margin 8.888572700265321 +39 89 loss.adversarial_temperature 0.10141963130661769 +39 89 optimizer.lr 0.004581069415016014 +39 89 negative_sampler.num_negs_per_pos 12.0 +39 89 training.batch_size 0.0 +39 90 model.embedding_dim 2.0 +39 90 loss.margin 29.410553405595927 +39 90 loss.adversarial_temperature 0.3547483394680748 +39 90 optimizer.lr 0.018108723989893077 +39 90 negative_sampler.num_negs_per_pos 81.0 +39 90 training.batch_size 0.0 +39 91 model.embedding_dim 1.0 +39 91 loss.margin 22.32925405574688 +39 91 loss.adversarial_temperature 0.7332048033445036 +39 91 optimizer.lr 0.002578024711992274 +39 91 negative_sampler.num_negs_per_pos 11.0 +39 91 training.batch_size 0.0 +39 92 model.embedding_dim 1.0 +39 92 loss.margin 17.49507897066352 +39 92 loss.adversarial_temperature 0.9386540912975719 +39 92 optimizer.lr 0.0038164420847986917 +39 92 negative_sampler.num_negs_per_pos 94.0 +39 92 training.batch_size 1.0 +39 93 model.embedding_dim 2.0 +39 93 loss.margin 13.301711433066316 +39 93 loss.adversarial_temperature 0.8496825851926324 +39 93 optimizer.lr 0.0018583561827574297 +39 93 negative_sampler.num_negs_per_pos 55.0 +39 93 training.batch_size 0.0 +39 94 model.embedding_dim 1.0 +39 94 loss.margin 11.59518239623898 +39 94 loss.adversarial_temperature 0.9762723240057392 +39 94 optimizer.lr 0.020829105016057652 +39 94 negative_sampler.num_negs_per_pos 1.0 +39 94 training.batch_size 1.0 +39 95 model.embedding_dim 0.0 +39 95 loss.margin 28.23487025214465 +39 95 loss.adversarial_temperature 0.5896224697988061 +39 95 optimizer.lr 0.0019969167875039076 +39 95 negative_sampler.num_negs_per_pos 93.0 +39 95 training.batch_size 2.0 +39 96 model.embedding_dim 2.0 +39 96 loss.margin 14.06033600474205 +39 96 loss.adversarial_temperature 0.17072418777942222 +39 96 optimizer.lr 0.0023411227417205133 +39 96 negative_sampler.num_negs_per_pos 56.0 +39 96 training.batch_size 0.0 +39 97 model.embedding_dim 1.0 +39 97 loss.margin 11.58427218916652 +39 97 loss.adversarial_temperature 0.7061766163374034 +39 97 optimizer.lr 0.0012851974082124882 +39 97 negative_sampler.num_negs_per_pos 10.0 +39 97 training.batch_size 2.0 +39 98 model.embedding_dim 1.0 +39 98 loss.margin 12.019826276405306 +39 98 loss.adversarial_temperature 0.35557148816617834 +39 98 optimizer.lr 0.016954829365697817 +39 98 negative_sampler.num_negs_per_pos 2.0 +39 98 training.batch_size 1.0 +39 99 model.embedding_dim 0.0 +39 99 loss.margin 27.04144583277538 +39 99 loss.adversarial_temperature 0.371542440747902 +39 99 optimizer.lr 0.001892226077024684 +39 99 negative_sampler.num_negs_per_pos 46.0 +39 99 training.batch_size 0.0 +39 100 model.embedding_dim 0.0 +39 100 loss.margin 12.344895509218913 +39 100 loss.adversarial_temperature 0.5005602077147892 +39 100 optimizer.lr 0.0016217993093450938 +39 100 negative_sampler.num_negs_per_pos 71.0 +39 100 training.batch_size 0.0 +39 1 dataset """kinships""" +39 1 model """complex""" +39 1 loss """nssa""" +39 1 regularizer """no""" +39 1 optimizer """adam""" +39 1 training_loop """owa""" +39 1 negative_sampler """basic""" +39 1 evaluator """rankbased""" +39 2 dataset """kinships""" +39 2 model """complex""" +39 2 loss """nssa""" +39 2 regularizer """no""" +39 2 optimizer """adam""" +39 2 training_loop """owa""" +39 2 negative_sampler """basic""" +39 2 evaluator """rankbased""" +39 3 dataset """kinships""" +39 3 model """complex""" +39 3 loss """nssa""" +39 3 regularizer """no""" +39 3 optimizer """adam""" +39 3 training_loop """owa""" +39 3 negative_sampler """basic""" +39 3 evaluator """rankbased""" +39 4 dataset """kinships""" +39 4 model """complex""" +39 4 loss """nssa""" +39 4 regularizer """no""" +39 4 optimizer """adam""" +39 4 training_loop """owa""" +39 4 negative_sampler """basic""" +39 4 evaluator """rankbased""" +39 5 dataset """kinships""" +39 5 model """complex""" +39 5 loss """nssa""" +39 5 regularizer """no""" +39 5 optimizer """adam""" +39 5 training_loop """owa""" +39 5 negative_sampler """basic""" +39 5 evaluator """rankbased""" +39 6 dataset """kinships""" +39 6 model """complex""" +39 6 loss """nssa""" +39 6 regularizer """no""" +39 6 optimizer """adam""" +39 6 training_loop """owa""" +39 6 negative_sampler """basic""" +39 6 evaluator """rankbased""" +39 7 dataset """kinships""" +39 7 model """complex""" +39 7 loss """nssa""" +39 7 regularizer """no""" +39 7 optimizer """adam""" +39 7 training_loop """owa""" +39 7 negative_sampler """basic""" +39 7 evaluator """rankbased""" +39 8 dataset """kinships""" +39 8 model """complex""" +39 8 loss """nssa""" +39 8 regularizer """no""" +39 8 optimizer """adam""" +39 8 training_loop """owa""" +39 8 negative_sampler """basic""" +39 8 evaluator """rankbased""" +39 9 dataset """kinships""" +39 9 model """complex""" +39 9 loss """nssa""" +39 9 regularizer """no""" +39 9 optimizer """adam""" +39 9 training_loop """owa""" +39 9 negative_sampler """basic""" +39 9 evaluator """rankbased""" +39 10 dataset """kinships""" +39 10 model """complex""" +39 10 loss """nssa""" +39 10 regularizer """no""" +39 10 optimizer """adam""" +39 10 training_loop """owa""" +39 10 negative_sampler """basic""" +39 10 evaluator """rankbased""" +39 11 dataset """kinships""" +39 11 model """complex""" +39 11 loss """nssa""" +39 11 regularizer """no""" +39 11 optimizer """adam""" +39 11 training_loop """owa""" +39 11 negative_sampler """basic""" +39 11 evaluator """rankbased""" +39 12 dataset """kinships""" +39 12 model """complex""" +39 12 loss """nssa""" +39 12 regularizer """no""" +39 12 optimizer """adam""" +39 12 training_loop """owa""" +39 12 negative_sampler """basic""" +39 12 evaluator """rankbased""" +39 13 dataset """kinships""" +39 13 model """complex""" +39 13 loss """nssa""" +39 13 regularizer """no""" +39 13 optimizer """adam""" +39 13 training_loop """owa""" +39 13 negative_sampler """basic""" +39 13 evaluator """rankbased""" +39 14 dataset """kinships""" +39 14 model """complex""" +39 14 loss """nssa""" +39 14 regularizer """no""" +39 14 optimizer """adam""" +39 14 training_loop """owa""" +39 14 negative_sampler """basic""" +39 14 evaluator """rankbased""" +39 15 dataset """kinships""" +39 15 model """complex""" +39 15 loss """nssa""" +39 15 regularizer """no""" +39 15 optimizer """adam""" +39 15 training_loop """owa""" +39 15 negative_sampler """basic""" +39 15 evaluator """rankbased""" +39 16 dataset """kinships""" +39 16 model """complex""" +39 16 loss """nssa""" +39 16 regularizer """no""" +39 16 optimizer """adam""" +39 16 training_loop """owa""" +39 16 negative_sampler """basic""" +39 16 evaluator """rankbased""" +39 17 dataset """kinships""" +39 17 model """complex""" +39 17 loss """nssa""" +39 17 regularizer """no""" +39 17 optimizer """adam""" +39 17 training_loop """owa""" +39 17 negative_sampler """basic""" +39 17 evaluator """rankbased""" +39 18 dataset """kinships""" +39 18 model """complex""" +39 18 loss """nssa""" +39 18 regularizer """no""" +39 18 optimizer """adam""" +39 18 training_loop """owa""" +39 18 negative_sampler """basic""" +39 18 evaluator """rankbased""" +39 19 dataset """kinships""" +39 19 model """complex""" +39 19 loss """nssa""" +39 19 regularizer """no""" +39 19 optimizer """adam""" +39 19 training_loop """owa""" +39 19 negative_sampler """basic""" +39 19 evaluator """rankbased""" +39 20 dataset """kinships""" +39 20 model """complex""" +39 20 loss """nssa""" +39 20 regularizer """no""" +39 20 optimizer """adam""" +39 20 training_loop """owa""" +39 20 negative_sampler """basic""" +39 20 evaluator """rankbased""" +39 21 dataset """kinships""" +39 21 model """complex""" +39 21 loss """nssa""" +39 21 regularizer """no""" +39 21 optimizer """adam""" +39 21 training_loop """owa""" +39 21 negative_sampler """basic""" +39 21 evaluator """rankbased""" +39 22 dataset """kinships""" +39 22 model """complex""" +39 22 loss """nssa""" +39 22 regularizer """no""" +39 22 optimizer """adam""" +39 22 training_loop """owa""" +39 22 negative_sampler """basic""" +39 22 evaluator """rankbased""" +39 23 dataset """kinships""" +39 23 model """complex""" +39 23 loss """nssa""" +39 23 regularizer """no""" +39 23 optimizer """adam""" +39 23 training_loop """owa""" +39 23 negative_sampler """basic""" +39 23 evaluator """rankbased""" +39 24 dataset """kinships""" +39 24 model """complex""" +39 24 loss """nssa""" +39 24 regularizer """no""" +39 24 optimizer """adam""" +39 24 training_loop """owa""" +39 24 negative_sampler """basic""" +39 24 evaluator """rankbased""" +39 25 dataset """kinships""" +39 25 model """complex""" +39 25 loss """nssa""" +39 25 regularizer """no""" +39 25 optimizer """adam""" +39 25 training_loop """owa""" +39 25 negative_sampler """basic""" +39 25 evaluator """rankbased""" +39 26 dataset """kinships""" +39 26 model """complex""" +39 26 loss """nssa""" +39 26 regularizer """no""" +39 26 optimizer """adam""" +39 26 training_loop """owa""" +39 26 negative_sampler """basic""" +39 26 evaluator """rankbased""" +39 27 dataset """kinships""" +39 27 model """complex""" +39 27 loss """nssa""" +39 27 regularizer """no""" +39 27 optimizer """adam""" +39 27 training_loop """owa""" +39 27 negative_sampler """basic""" +39 27 evaluator """rankbased""" +39 28 dataset """kinships""" +39 28 model """complex""" +39 28 loss """nssa""" +39 28 regularizer """no""" +39 28 optimizer """adam""" +39 28 training_loop """owa""" +39 28 negative_sampler """basic""" +39 28 evaluator """rankbased""" +39 29 dataset """kinships""" +39 29 model """complex""" +39 29 loss """nssa""" +39 29 regularizer """no""" +39 29 optimizer """adam""" +39 29 training_loop """owa""" +39 29 negative_sampler """basic""" +39 29 evaluator """rankbased""" +39 30 dataset """kinships""" +39 30 model """complex""" +39 30 loss """nssa""" +39 30 regularizer """no""" +39 30 optimizer """adam""" +39 30 training_loop """owa""" +39 30 negative_sampler """basic""" +39 30 evaluator """rankbased""" +39 31 dataset """kinships""" +39 31 model """complex""" +39 31 loss """nssa""" +39 31 regularizer """no""" +39 31 optimizer """adam""" +39 31 training_loop """owa""" +39 31 negative_sampler """basic""" +39 31 evaluator """rankbased""" +39 32 dataset """kinships""" +39 32 model """complex""" +39 32 loss """nssa""" +39 32 regularizer """no""" +39 32 optimizer """adam""" +39 32 training_loop """owa""" +39 32 negative_sampler """basic""" +39 32 evaluator """rankbased""" +39 33 dataset """kinships""" +39 33 model """complex""" +39 33 loss """nssa""" +39 33 regularizer """no""" +39 33 optimizer """adam""" +39 33 training_loop """owa""" +39 33 negative_sampler """basic""" +39 33 evaluator """rankbased""" +39 34 dataset """kinships""" +39 34 model """complex""" +39 34 loss """nssa""" +39 34 regularizer """no""" +39 34 optimizer """adam""" +39 34 training_loop """owa""" +39 34 negative_sampler """basic""" +39 34 evaluator """rankbased""" +39 35 dataset """kinships""" +39 35 model """complex""" +39 35 loss """nssa""" +39 35 regularizer """no""" +39 35 optimizer """adam""" +39 35 training_loop """owa""" +39 35 negative_sampler """basic""" +39 35 evaluator """rankbased""" +39 36 dataset """kinships""" +39 36 model """complex""" +39 36 loss """nssa""" +39 36 regularizer """no""" +39 36 optimizer """adam""" +39 36 training_loop """owa""" +39 36 negative_sampler """basic""" +39 36 evaluator """rankbased""" +39 37 dataset """kinships""" +39 37 model """complex""" +39 37 loss """nssa""" +39 37 regularizer """no""" +39 37 optimizer """adam""" +39 37 training_loop """owa""" +39 37 negative_sampler """basic""" +39 37 evaluator """rankbased""" +39 38 dataset """kinships""" +39 38 model """complex""" +39 38 loss """nssa""" +39 38 regularizer """no""" +39 38 optimizer """adam""" +39 38 training_loop """owa""" +39 38 negative_sampler """basic""" +39 38 evaluator """rankbased""" +39 39 dataset """kinships""" +39 39 model """complex""" +39 39 loss """nssa""" +39 39 regularizer """no""" +39 39 optimizer """adam""" +39 39 training_loop """owa""" +39 39 negative_sampler """basic""" +39 39 evaluator """rankbased""" +39 40 dataset """kinships""" +39 40 model """complex""" +39 40 loss """nssa""" +39 40 regularizer """no""" +39 40 optimizer """adam""" +39 40 training_loop """owa""" +39 40 negative_sampler """basic""" +39 40 evaluator """rankbased""" +39 41 dataset """kinships""" +39 41 model """complex""" +39 41 loss """nssa""" +39 41 regularizer """no""" +39 41 optimizer """adam""" +39 41 training_loop """owa""" +39 41 negative_sampler """basic""" +39 41 evaluator """rankbased""" +39 42 dataset """kinships""" +39 42 model """complex""" +39 42 loss """nssa""" +39 42 regularizer """no""" +39 42 optimizer """adam""" +39 42 training_loop """owa""" +39 42 negative_sampler """basic""" +39 42 evaluator """rankbased""" +39 43 dataset """kinships""" +39 43 model """complex""" +39 43 loss """nssa""" +39 43 regularizer """no""" +39 43 optimizer """adam""" +39 43 training_loop """owa""" +39 43 negative_sampler """basic""" +39 43 evaluator """rankbased""" +39 44 dataset """kinships""" +39 44 model """complex""" +39 44 loss """nssa""" +39 44 regularizer """no""" +39 44 optimizer """adam""" +39 44 training_loop """owa""" +39 44 negative_sampler """basic""" +39 44 evaluator """rankbased""" +39 45 dataset """kinships""" +39 45 model """complex""" +39 45 loss """nssa""" +39 45 regularizer """no""" +39 45 optimizer """adam""" +39 45 training_loop """owa""" +39 45 negative_sampler """basic""" +39 45 evaluator """rankbased""" +39 46 dataset """kinships""" +39 46 model """complex""" +39 46 loss """nssa""" +39 46 regularizer """no""" +39 46 optimizer """adam""" +39 46 training_loop """owa""" +39 46 negative_sampler """basic""" +39 46 evaluator """rankbased""" +39 47 dataset """kinships""" +39 47 model """complex""" +39 47 loss """nssa""" +39 47 regularizer """no""" +39 47 optimizer """adam""" +39 47 training_loop """owa""" +39 47 negative_sampler """basic""" +39 47 evaluator """rankbased""" +39 48 dataset """kinships""" +39 48 model """complex""" +39 48 loss """nssa""" +39 48 regularizer """no""" +39 48 optimizer """adam""" +39 48 training_loop """owa""" +39 48 negative_sampler """basic""" +39 48 evaluator """rankbased""" +39 49 dataset """kinships""" +39 49 model """complex""" +39 49 loss """nssa""" +39 49 regularizer """no""" +39 49 optimizer """adam""" +39 49 training_loop """owa""" +39 49 negative_sampler """basic""" +39 49 evaluator """rankbased""" +39 50 dataset """kinships""" +39 50 model """complex""" +39 50 loss """nssa""" +39 50 regularizer """no""" +39 50 optimizer """adam""" +39 50 training_loop """owa""" +39 50 negative_sampler """basic""" +39 50 evaluator """rankbased""" +39 51 dataset """kinships""" +39 51 model """complex""" +39 51 loss """nssa""" +39 51 regularizer """no""" +39 51 optimizer """adam""" +39 51 training_loop """owa""" +39 51 negative_sampler """basic""" +39 51 evaluator """rankbased""" +39 52 dataset """kinships""" +39 52 model """complex""" +39 52 loss """nssa""" +39 52 regularizer """no""" +39 52 optimizer """adam""" +39 52 training_loop """owa""" +39 52 negative_sampler """basic""" +39 52 evaluator """rankbased""" +39 53 dataset """kinships""" +39 53 model """complex""" +39 53 loss """nssa""" +39 53 regularizer """no""" +39 53 optimizer """adam""" +39 53 training_loop """owa""" +39 53 negative_sampler """basic""" +39 53 evaluator """rankbased""" +39 54 dataset """kinships""" +39 54 model """complex""" +39 54 loss """nssa""" +39 54 regularizer """no""" +39 54 optimizer """adam""" +39 54 training_loop """owa""" +39 54 negative_sampler """basic""" +39 54 evaluator """rankbased""" +39 55 dataset """kinships""" +39 55 model """complex""" +39 55 loss """nssa""" +39 55 regularizer """no""" +39 55 optimizer """adam""" +39 55 training_loop """owa""" +39 55 negative_sampler """basic""" +39 55 evaluator """rankbased""" +39 56 dataset """kinships""" +39 56 model """complex""" +39 56 loss """nssa""" +39 56 regularizer """no""" +39 56 optimizer """adam""" +39 56 training_loop """owa""" +39 56 negative_sampler """basic""" +39 56 evaluator """rankbased""" +39 57 dataset """kinships""" +39 57 model """complex""" +39 57 loss """nssa""" +39 57 regularizer """no""" +39 57 optimizer """adam""" +39 57 training_loop """owa""" +39 57 negative_sampler """basic""" +39 57 evaluator """rankbased""" +39 58 dataset """kinships""" +39 58 model """complex""" +39 58 loss """nssa""" +39 58 regularizer """no""" +39 58 optimizer """adam""" +39 58 training_loop """owa""" +39 58 negative_sampler """basic""" +39 58 evaluator """rankbased""" +39 59 dataset """kinships""" +39 59 model """complex""" +39 59 loss """nssa""" +39 59 regularizer """no""" +39 59 optimizer """adam""" +39 59 training_loop """owa""" +39 59 negative_sampler """basic""" +39 59 evaluator """rankbased""" +39 60 dataset """kinships""" +39 60 model """complex""" +39 60 loss """nssa""" +39 60 regularizer """no""" +39 60 optimizer """adam""" +39 60 training_loop """owa""" +39 60 negative_sampler """basic""" +39 60 evaluator """rankbased""" +39 61 dataset """kinships""" +39 61 model """complex""" +39 61 loss """nssa""" +39 61 regularizer """no""" +39 61 optimizer """adam""" +39 61 training_loop """owa""" +39 61 negative_sampler """basic""" +39 61 evaluator """rankbased""" +39 62 dataset """kinships""" +39 62 model """complex""" +39 62 loss """nssa""" +39 62 regularizer """no""" +39 62 optimizer """adam""" +39 62 training_loop """owa""" +39 62 negative_sampler """basic""" +39 62 evaluator """rankbased""" +39 63 dataset """kinships""" +39 63 model """complex""" +39 63 loss """nssa""" +39 63 regularizer """no""" +39 63 optimizer """adam""" +39 63 training_loop """owa""" +39 63 negative_sampler """basic""" +39 63 evaluator """rankbased""" +39 64 dataset """kinships""" +39 64 model """complex""" +39 64 loss """nssa""" +39 64 regularizer """no""" +39 64 optimizer """adam""" +39 64 training_loop """owa""" +39 64 negative_sampler """basic""" +39 64 evaluator """rankbased""" +39 65 dataset """kinships""" +39 65 model """complex""" +39 65 loss """nssa""" +39 65 regularizer """no""" +39 65 optimizer """adam""" +39 65 training_loop """owa""" +39 65 negative_sampler """basic""" +39 65 evaluator """rankbased""" +39 66 dataset """kinships""" +39 66 model """complex""" +39 66 loss """nssa""" +39 66 regularizer """no""" +39 66 optimizer """adam""" +39 66 training_loop """owa""" +39 66 negative_sampler """basic""" +39 66 evaluator """rankbased""" +39 67 dataset """kinships""" +39 67 model """complex""" +39 67 loss """nssa""" +39 67 regularizer """no""" +39 67 optimizer """adam""" +39 67 training_loop """owa""" +39 67 negative_sampler """basic""" +39 67 evaluator """rankbased""" +39 68 dataset """kinships""" +39 68 model """complex""" +39 68 loss """nssa""" +39 68 regularizer """no""" +39 68 optimizer """adam""" +39 68 training_loop """owa""" +39 68 negative_sampler """basic""" +39 68 evaluator """rankbased""" +39 69 dataset """kinships""" +39 69 model """complex""" +39 69 loss """nssa""" +39 69 regularizer """no""" +39 69 optimizer """adam""" +39 69 training_loop """owa""" +39 69 negative_sampler """basic""" +39 69 evaluator """rankbased""" +39 70 dataset """kinships""" +39 70 model """complex""" +39 70 loss """nssa""" +39 70 regularizer """no""" +39 70 optimizer """adam""" +39 70 training_loop """owa""" +39 70 negative_sampler """basic""" +39 70 evaluator """rankbased""" +39 71 dataset """kinships""" +39 71 model """complex""" +39 71 loss """nssa""" +39 71 regularizer """no""" +39 71 optimizer """adam""" +39 71 training_loop """owa""" +39 71 negative_sampler """basic""" +39 71 evaluator """rankbased""" +39 72 dataset """kinships""" +39 72 model """complex""" +39 72 loss """nssa""" +39 72 regularizer """no""" +39 72 optimizer """adam""" +39 72 training_loop """owa""" +39 72 negative_sampler """basic""" +39 72 evaluator """rankbased""" +39 73 dataset """kinships""" +39 73 model """complex""" +39 73 loss """nssa""" +39 73 regularizer """no""" +39 73 optimizer """adam""" +39 73 training_loop """owa""" +39 73 negative_sampler """basic""" +39 73 evaluator """rankbased""" +39 74 dataset """kinships""" +39 74 model """complex""" +39 74 loss """nssa""" +39 74 regularizer """no""" +39 74 optimizer """adam""" +39 74 training_loop """owa""" +39 74 negative_sampler """basic""" +39 74 evaluator """rankbased""" +39 75 dataset """kinships""" +39 75 model """complex""" +39 75 loss """nssa""" +39 75 regularizer """no""" +39 75 optimizer """adam""" +39 75 training_loop """owa""" +39 75 negative_sampler """basic""" +39 75 evaluator """rankbased""" +39 76 dataset """kinships""" +39 76 model """complex""" +39 76 loss """nssa""" +39 76 regularizer """no""" +39 76 optimizer """adam""" +39 76 training_loop """owa""" +39 76 negative_sampler """basic""" +39 76 evaluator """rankbased""" +39 77 dataset """kinships""" +39 77 model """complex""" +39 77 loss """nssa""" +39 77 regularizer """no""" +39 77 optimizer """adam""" +39 77 training_loop """owa""" +39 77 negative_sampler """basic""" +39 77 evaluator """rankbased""" +39 78 dataset """kinships""" +39 78 model """complex""" +39 78 loss """nssa""" +39 78 regularizer """no""" +39 78 optimizer """adam""" +39 78 training_loop """owa""" +39 78 negative_sampler """basic""" +39 78 evaluator """rankbased""" +39 79 dataset """kinships""" +39 79 model """complex""" +39 79 loss """nssa""" +39 79 regularizer """no""" +39 79 optimizer """adam""" +39 79 training_loop """owa""" +39 79 negative_sampler """basic""" +39 79 evaluator """rankbased""" +39 80 dataset """kinships""" +39 80 model """complex""" +39 80 loss """nssa""" +39 80 regularizer """no""" +39 80 optimizer """adam""" +39 80 training_loop """owa""" +39 80 negative_sampler """basic""" +39 80 evaluator """rankbased""" +39 81 dataset """kinships""" +39 81 model """complex""" +39 81 loss """nssa""" +39 81 regularizer """no""" +39 81 optimizer """adam""" +39 81 training_loop """owa""" +39 81 negative_sampler """basic""" +39 81 evaluator """rankbased""" +39 82 dataset """kinships""" +39 82 model """complex""" +39 82 loss """nssa""" +39 82 regularizer """no""" +39 82 optimizer """adam""" +39 82 training_loop """owa""" +39 82 negative_sampler """basic""" +39 82 evaluator """rankbased""" +39 83 dataset """kinships""" +39 83 model """complex""" +39 83 loss """nssa""" +39 83 regularizer """no""" +39 83 optimizer """adam""" +39 83 training_loop """owa""" +39 83 negative_sampler """basic""" +39 83 evaluator """rankbased""" +39 84 dataset """kinships""" +39 84 model """complex""" +39 84 loss """nssa""" +39 84 regularizer """no""" +39 84 optimizer """adam""" +39 84 training_loop """owa""" +39 84 negative_sampler """basic""" +39 84 evaluator """rankbased""" +39 85 dataset """kinships""" +39 85 model """complex""" +39 85 loss """nssa""" +39 85 regularizer """no""" +39 85 optimizer """adam""" +39 85 training_loop """owa""" +39 85 negative_sampler """basic""" +39 85 evaluator """rankbased""" +39 86 dataset """kinships""" +39 86 model """complex""" +39 86 loss """nssa""" +39 86 regularizer """no""" +39 86 optimizer """adam""" +39 86 training_loop """owa""" +39 86 negative_sampler """basic""" +39 86 evaluator """rankbased""" +39 87 dataset """kinships""" +39 87 model """complex""" +39 87 loss """nssa""" +39 87 regularizer """no""" +39 87 optimizer """adam""" +39 87 training_loop """owa""" +39 87 negative_sampler """basic""" +39 87 evaluator """rankbased""" +39 88 dataset """kinships""" +39 88 model """complex""" +39 88 loss """nssa""" +39 88 regularizer """no""" +39 88 optimizer """adam""" +39 88 training_loop """owa""" +39 88 negative_sampler """basic""" +39 88 evaluator """rankbased""" +39 89 dataset """kinships""" +39 89 model """complex""" +39 89 loss """nssa""" +39 89 regularizer """no""" +39 89 optimizer """adam""" +39 89 training_loop """owa""" +39 89 negative_sampler """basic""" +39 89 evaluator """rankbased""" +39 90 dataset """kinships""" +39 90 model """complex""" +39 90 loss """nssa""" +39 90 regularizer """no""" +39 90 optimizer """adam""" +39 90 training_loop """owa""" +39 90 negative_sampler """basic""" +39 90 evaluator """rankbased""" +39 91 dataset """kinships""" +39 91 model """complex""" +39 91 loss """nssa""" +39 91 regularizer """no""" +39 91 optimizer """adam""" +39 91 training_loop """owa""" +39 91 negative_sampler """basic""" +39 91 evaluator """rankbased""" +39 92 dataset """kinships""" +39 92 model """complex""" +39 92 loss """nssa""" +39 92 regularizer """no""" +39 92 optimizer """adam""" +39 92 training_loop """owa""" +39 92 negative_sampler """basic""" +39 92 evaluator """rankbased""" +39 93 dataset """kinships""" +39 93 model """complex""" +39 93 loss """nssa""" +39 93 regularizer """no""" +39 93 optimizer """adam""" +39 93 training_loop """owa""" +39 93 negative_sampler """basic""" +39 93 evaluator """rankbased""" +39 94 dataset """kinships""" +39 94 model """complex""" +39 94 loss """nssa""" +39 94 regularizer """no""" +39 94 optimizer """adam""" +39 94 training_loop """owa""" +39 94 negative_sampler """basic""" +39 94 evaluator """rankbased""" +39 95 dataset """kinships""" +39 95 model """complex""" +39 95 loss """nssa""" +39 95 regularizer """no""" +39 95 optimizer """adam""" +39 95 training_loop """owa""" +39 95 negative_sampler """basic""" +39 95 evaluator """rankbased""" +39 96 dataset """kinships""" +39 96 model """complex""" +39 96 loss """nssa""" +39 96 regularizer """no""" +39 96 optimizer """adam""" +39 96 training_loop """owa""" +39 96 negative_sampler """basic""" +39 96 evaluator """rankbased""" +39 97 dataset """kinships""" +39 97 model """complex""" +39 97 loss """nssa""" +39 97 regularizer """no""" +39 97 optimizer """adam""" +39 97 training_loop """owa""" +39 97 negative_sampler """basic""" +39 97 evaluator """rankbased""" +39 98 dataset """kinships""" +39 98 model """complex""" +39 98 loss """nssa""" +39 98 regularizer """no""" +39 98 optimizer """adam""" +39 98 training_loop """owa""" +39 98 negative_sampler """basic""" +39 98 evaluator """rankbased""" +39 99 dataset """kinships""" +39 99 model """complex""" +39 99 loss """nssa""" +39 99 regularizer """no""" +39 99 optimizer """adam""" +39 99 training_loop """owa""" +39 99 negative_sampler """basic""" +39 99 evaluator """rankbased""" +39 100 dataset """kinships""" +39 100 model """complex""" +39 100 loss """nssa""" +39 100 regularizer """no""" +39 100 optimizer """adam""" +39 100 training_loop """owa""" +39 100 negative_sampler """basic""" +39 100 evaluator """rankbased""" +40 1 model.embedding_dim 2.0 +40 1 loss.margin 2.90795997685906 +40 1 optimizer.lr 0.013409317323379561 +40 1 negative_sampler.num_negs_per_pos 12.0 +40 1 training.batch_size 0.0 +40 2 model.embedding_dim 2.0 +40 2 loss.margin 9.991605961489801 +40 2 optimizer.lr 0.0956839150995541 +40 2 negative_sampler.num_negs_per_pos 14.0 +40 2 training.batch_size 0.0 +40 3 model.embedding_dim 1.0 +40 3 loss.margin 8.89032796423345 +40 3 optimizer.lr 0.025686060185131202 +40 3 negative_sampler.num_negs_per_pos 76.0 +40 3 training.batch_size 1.0 +40 4 model.embedding_dim 1.0 +40 4 loss.margin 3.5729010862327093 +40 4 optimizer.lr 0.032382562640263034 +40 4 negative_sampler.num_negs_per_pos 69.0 +40 4 training.batch_size 2.0 +40 5 model.embedding_dim 1.0 +40 5 loss.margin 6.085034533510701 +40 5 optimizer.lr 0.007883177708472227 +40 5 negative_sampler.num_negs_per_pos 28.0 +40 5 training.batch_size 0.0 +40 6 model.embedding_dim 2.0 +40 6 loss.margin 3.6406406351711396 +40 6 optimizer.lr 0.006530279938043917 +40 6 negative_sampler.num_negs_per_pos 59.0 +40 6 training.batch_size 2.0 +40 7 model.embedding_dim 2.0 +40 7 loss.margin 1.217682754016065 +40 7 optimizer.lr 0.004265292265649788 +40 7 negative_sampler.num_negs_per_pos 35.0 +40 7 training.batch_size 1.0 +40 8 model.embedding_dim 0.0 +40 8 loss.margin 8.249916760939483 +40 8 optimizer.lr 0.04098313692473368 +40 8 negative_sampler.num_negs_per_pos 2.0 +40 8 training.batch_size 2.0 +40 9 model.embedding_dim 1.0 +40 9 loss.margin 0.7634080525803111 +40 9 optimizer.lr 0.020720436731885703 +40 9 negative_sampler.num_negs_per_pos 88.0 +40 9 training.batch_size 0.0 +40 10 model.embedding_dim 0.0 +40 10 loss.margin 3.919342128863998 +40 10 optimizer.lr 0.019549542542175575 +40 10 negative_sampler.num_negs_per_pos 46.0 +40 10 training.batch_size 0.0 +40 11 model.embedding_dim 0.0 +40 11 loss.margin 1.6667725972123566 +40 11 optimizer.lr 0.0035828555552499803 +40 11 negative_sampler.num_negs_per_pos 98.0 +40 11 training.batch_size 0.0 +40 12 model.embedding_dim 1.0 +40 12 loss.margin 9.004045916944202 +40 12 optimizer.lr 0.045842652875949276 +40 12 negative_sampler.num_negs_per_pos 51.0 +40 12 training.batch_size 1.0 +40 13 model.embedding_dim 2.0 +40 13 loss.margin 9.359168840708248 +40 13 optimizer.lr 0.021922965803819965 +40 13 negative_sampler.num_negs_per_pos 75.0 +40 13 training.batch_size 2.0 +40 14 model.embedding_dim 0.0 +40 14 loss.margin 0.7736988298865499 +40 14 optimizer.lr 0.005781581677712353 +40 14 negative_sampler.num_negs_per_pos 71.0 +40 14 training.batch_size 1.0 +40 15 model.embedding_dim 1.0 +40 15 loss.margin 9.242582172499 +40 15 optimizer.lr 0.02830176586795721 +40 15 negative_sampler.num_negs_per_pos 46.0 +40 15 training.batch_size 1.0 +40 16 model.embedding_dim 2.0 +40 16 loss.margin 8.886580153010073 +40 16 optimizer.lr 0.04223206389849344 +40 16 negative_sampler.num_negs_per_pos 79.0 +40 16 training.batch_size 2.0 +40 17 model.embedding_dim 1.0 +40 17 loss.margin 0.7906098777293085 +40 17 optimizer.lr 0.020792438516028636 +40 17 negative_sampler.num_negs_per_pos 30.0 +40 17 training.batch_size 2.0 +40 18 model.embedding_dim 0.0 +40 18 loss.margin 3.6093930158169325 +40 18 optimizer.lr 0.03149130602305189 +40 18 negative_sampler.num_negs_per_pos 51.0 +40 18 training.batch_size 1.0 +40 19 model.embedding_dim 2.0 +40 19 loss.margin 2.9569224500082583 +40 19 optimizer.lr 0.0035143499112939183 +40 19 negative_sampler.num_negs_per_pos 23.0 +40 19 training.batch_size 0.0 +40 20 model.embedding_dim 0.0 +40 20 loss.margin 8.20881663190422 +40 20 optimizer.lr 0.029291402561813954 +40 20 negative_sampler.num_negs_per_pos 64.0 +40 20 training.batch_size 0.0 +40 21 model.embedding_dim 1.0 +40 21 loss.margin 1.0456876921805043 +40 21 optimizer.lr 0.007577297068617956 +40 21 negative_sampler.num_negs_per_pos 91.0 +40 21 training.batch_size 0.0 +40 22 model.embedding_dim 1.0 +40 22 loss.margin 7.245533694235871 +40 22 optimizer.lr 0.026670922141147486 +40 22 negative_sampler.num_negs_per_pos 84.0 +40 22 training.batch_size 0.0 +40 23 model.embedding_dim 1.0 +40 23 loss.margin 3.8141859315419273 +40 23 optimizer.lr 0.053568664611665553 +40 23 negative_sampler.num_negs_per_pos 57.0 +40 23 training.batch_size 1.0 +40 24 model.embedding_dim 2.0 +40 24 loss.margin 7.1855877456929855 +40 24 optimizer.lr 0.0016135032556580408 +40 24 negative_sampler.num_negs_per_pos 92.0 +40 24 training.batch_size 2.0 +40 25 model.embedding_dim 0.0 +40 25 loss.margin 5.75543721710983 +40 25 optimizer.lr 0.017741728697892943 +40 25 negative_sampler.num_negs_per_pos 18.0 +40 25 training.batch_size 2.0 +40 26 model.embedding_dim 0.0 +40 26 loss.margin 8.055670669118568 +40 26 optimizer.lr 0.06218447799464025 +40 26 negative_sampler.num_negs_per_pos 78.0 +40 26 training.batch_size 1.0 +40 27 model.embedding_dim 2.0 +40 27 loss.margin 7.38291707675253 +40 27 optimizer.lr 0.0025318683817151405 +40 27 negative_sampler.num_negs_per_pos 61.0 +40 27 training.batch_size 2.0 +40 28 model.embedding_dim 2.0 +40 28 loss.margin 9.225561588774944 +40 28 optimizer.lr 0.020498321794881973 +40 28 negative_sampler.num_negs_per_pos 78.0 +40 28 training.batch_size 2.0 +40 29 model.embedding_dim 2.0 +40 29 loss.margin 1.1942269277136643 +40 29 optimizer.lr 0.011464777036541942 +40 29 negative_sampler.num_negs_per_pos 2.0 +40 29 training.batch_size 2.0 +40 30 model.embedding_dim 0.0 +40 30 loss.margin 0.6540776283301548 +40 30 optimizer.lr 0.012028400529182016 +40 30 negative_sampler.num_negs_per_pos 23.0 +40 30 training.batch_size 1.0 +40 31 model.embedding_dim 1.0 +40 31 loss.margin 2.487184740023774 +40 31 optimizer.lr 0.0015468965375056774 +40 31 negative_sampler.num_negs_per_pos 72.0 +40 31 training.batch_size 1.0 +40 32 model.embedding_dim 2.0 +40 32 loss.margin 6.469076642741641 +40 32 optimizer.lr 0.005896346868157189 +40 32 negative_sampler.num_negs_per_pos 3.0 +40 32 training.batch_size 2.0 +40 33 model.embedding_dim 2.0 +40 33 loss.margin 9.816644015774207 +40 33 optimizer.lr 0.0036825536460765498 +40 33 negative_sampler.num_negs_per_pos 63.0 +40 33 training.batch_size 2.0 +40 34 model.embedding_dim 2.0 +40 34 loss.margin 4.777699303734752 +40 34 optimizer.lr 0.003521561488856228 +40 34 negative_sampler.num_negs_per_pos 10.0 +40 34 training.batch_size 1.0 +40 35 model.embedding_dim 2.0 +40 35 loss.margin 8.399063932342093 +40 35 optimizer.lr 0.014966370913095193 +40 35 negative_sampler.num_negs_per_pos 44.0 +40 35 training.batch_size 0.0 +40 36 model.embedding_dim 0.0 +40 36 loss.margin 6.610607361991097 +40 36 optimizer.lr 0.020172741309916312 +40 36 negative_sampler.num_negs_per_pos 28.0 +40 36 training.batch_size 0.0 +40 37 model.embedding_dim 2.0 +40 37 loss.margin 9.280203257463945 +40 37 optimizer.lr 0.028250688553651097 +40 37 negative_sampler.num_negs_per_pos 69.0 +40 37 training.batch_size 1.0 +40 38 model.embedding_dim 0.0 +40 38 loss.margin 4.085974793201085 +40 38 optimizer.lr 0.0016641324739840454 +40 38 negative_sampler.num_negs_per_pos 34.0 +40 38 training.batch_size 2.0 +40 39 model.embedding_dim 2.0 +40 39 loss.margin 6.066265295694909 +40 39 optimizer.lr 0.005423542425135321 +40 39 negative_sampler.num_negs_per_pos 37.0 +40 39 training.batch_size 2.0 +40 40 model.embedding_dim 1.0 +40 40 loss.margin 4.356727608653829 +40 40 optimizer.lr 0.0021627500786965594 +40 40 negative_sampler.num_negs_per_pos 89.0 +40 40 training.batch_size 0.0 +40 41 model.embedding_dim 0.0 +40 41 loss.margin 1.514191875973056 +40 41 optimizer.lr 0.006629459072117812 +40 41 negative_sampler.num_negs_per_pos 57.0 +40 41 training.batch_size 1.0 +40 42 model.embedding_dim 1.0 +40 42 loss.margin 2.1622894087417857 +40 42 optimizer.lr 0.005534734663663027 +40 42 negative_sampler.num_negs_per_pos 26.0 +40 42 training.batch_size 0.0 +40 43 model.embedding_dim 2.0 +40 43 loss.margin 8.050735214304572 +40 43 optimizer.lr 0.002749056521411751 +40 43 negative_sampler.num_negs_per_pos 21.0 +40 43 training.batch_size 2.0 +40 44 model.embedding_dim 2.0 +40 44 loss.margin 4.612214892173912 +40 44 optimizer.lr 0.013185415694253196 +40 44 negative_sampler.num_negs_per_pos 12.0 +40 44 training.batch_size 0.0 +40 45 model.embedding_dim 1.0 +40 45 loss.margin 8.649013208786963 +40 45 optimizer.lr 0.007147053945362111 +40 45 negative_sampler.num_negs_per_pos 49.0 +40 45 training.batch_size 0.0 +40 46 model.embedding_dim 2.0 +40 46 loss.margin 9.662003780756777 +40 46 optimizer.lr 0.002699803552409779 +40 46 negative_sampler.num_negs_per_pos 87.0 +40 46 training.batch_size 0.0 +40 47 model.embedding_dim 1.0 +40 47 loss.margin 7.11915183096162 +40 47 optimizer.lr 0.072562896088243 +40 47 negative_sampler.num_negs_per_pos 62.0 +40 47 training.batch_size 0.0 +40 48 model.embedding_dim 2.0 +40 48 loss.margin 7.115597993268938 +40 48 optimizer.lr 0.0010347991204821912 +40 48 negative_sampler.num_negs_per_pos 57.0 +40 48 training.batch_size 2.0 +40 49 model.embedding_dim 0.0 +40 49 loss.margin 3.956331144781378 +40 49 optimizer.lr 0.026843989004506297 +40 49 negative_sampler.num_negs_per_pos 68.0 +40 49 training.batch_size 1.0 +40 50 model.embedding_dim 2.0 +40 50 loss.margin 8.456102589514579 +40 50 optimizer.lr 0.007480993868581846 +40 50 negative_sampler.num_negs_per_pos 2.0 +40 50 training.batch_size 1.0 +40 51 model.embedding_dim 1.0 +40 51 loss.margin 1.6002971238306707 +40 51 optimizer.lr 0.016659165850806117 +40 51 negative_sampler.num_negs_per_pos 66.0 +40 51 training.batch_size 0.0 +40 52 model.embedding_dim 1.0 +40 52 loss.margin 3.530536386876324 +40 52 optimizer.lr 0.004273611192663941 +40 52 negative_sampler.num_negs_per_pos 3.0 +40 52 training.batch_size 2.0 +40 53 model.embedding_dim 2.0 +40 53 loss.margin 1.831273608921513 +40 53 optimizer.lr 0.009103381088323552 +40 53 negative_sampler.num_negs_per_pos 68.0 +40 53 training.batch_size 1.0 +40 54 model.embedding_dim 0.0 +40 54 loss.margin 5.342996433065518 +40 54 optimizer.lr 0.02777123078465808 +40 54 negative_sampler.num_negs_per_pos 68.0 +40 54 training.batch_size 2.0 +40 55 model.embedding_dim 1.0 +40 55 loss.margin 8.868303192629833 +40 55 optimizer.lr 0.008139968895983582 +40 55 negative_sampler.num_negs_per_pos 12.0 +40 55 training.batch_size 0.0 +40 56 model.embedding_dim 0.0 +40 56 loss.margin 9.965492859840575 +40 56 optimizer.lr 0.02929590295057827 +40 56 negative_sampler.num_negs_per_pos 28.0 +40 56 training.batch_size 0.0 +40 57 model.embedding_dim 2.0 +40 57 loss.margin 2.703154829428719 +40 57 optimizer.lr 0.001339706288868445 +40 57 negative_sampler.num_negs_per_pos 20.0 +40 57 training.batch_size 1.0 +40 58 model.embedding_dim 1.0 +40 58 loss.margin 4.405523614621401 +40 58 optimizer.lr 0.0035626355381463623 +40 58 negative_sampler.num_negs_per_pos 49.0 +40 58 training.batch_size 0.0 +40 59 model.embedding_dim 1.0 +40 59 loss.margin 3.272334999904415 +40 59 optimizer.lr 0.0421527649135559 +40 59 negative_sampler.num_negs_per_pos 3.0 +40 59 training.batch_size 0.0 +40 60 model.embedding_dim 2.0 +40 60 loss.margin 1.6138994479518367 +40 60 optimizer.lr 0.0031058565060449552 +40 60 negative_sampler.num_negs_per_pos 5.0 +40 60 training.batch_size 2.0 +40 61 model.embedding_dim 1.0 +40 61 loss.margin 0.9501219131690369 +40 61 optimizer.lr 0.0039862337546607675 +40 61 negative_sampler.num_negs_per_pos 73.0 +40 61 training.batch_size 0.0 +40 62 model.embedding_dim 1.0 +40 62 loss.margin 2.0633067215824976 +40 62 optimizer.lr 0.04100993726001552 +40 62 negative_sampler.num_negs_per_pos 12.0 +40 62 training.batch_size 1.0 +40 63 model.embedding_dim 2.0 +40 63 loss.margin 3.2743137921050183 +40 63 optimizer.lr 0.006447283235515868 +40 63 negative_sampler.num_negs_per_pos 82.0 +40 63 training.batch_size 2.0 +40 64 model.embedding_dim 0.0 +40 64 loss.margin 7.577800963185875 +40 64 optimizer.lr 0.0010339980271473605 +40 64 negative_sampler.num_negs_per_pos 19.0 +40 64 training.batch_size 0.0 +40 65 model.embedding_dim 1.0 +40 65 loss.margin 9.153407319047353 +40 65 optimizer.lr 0.0055499655773645525 +40 65 negative_sampler.num_negs_per_pos 97.0 +40 65 training.batch_size 1.0 +40 66 model.embedding_dim 1.0 +40 66 loss.margin 9.296921439023654 +40 66 optimizer.lr 0.011828019802528785 +40 66 negative_sampler.num_negs_per_pos 44.0 +40 66 training.batch_size 2.0 +40 67 model.embedding_dim 0.0 +40 67 loss.margin 9.038903107016507 +40 67 optimizer.lr 0.010304418328268682 +40 67 negative_sampler.num_negs_per_pos 62.0 +40 67 training.batch_size 1.0 +40 68 model.embedding_dim 0.0 +40 68 loss.margin 6.768398769666525 +40 68 optimizer.lr 0.0035354179932279373 +40 68 negative_sampler.num_negs_per_pos 18.0 +40 68 training.batch_size 2.0 +40 69 model.embedding_dim 2.0 +40 69 loss.margin 6.0872301734844365 +40 69 optimizer.lr 0.054455886999704016 +40 69 negative_sampler.num_negs_per_pos 88.0 +40 69 training.batch_size 1.0 +40 70 model.embedding_dim 2.0 +40 70 loss.margin 2.551396444709355 +40 70 optimizer.lr 0.005297808113332575 +40 70 negative_sampler.num_negs_per_pos 57.0 +40 70 training.batch_size 2.0 +40 71 model.embedding_dim 2.0 +40 71 loss.margin 2.2026723617175876 +40 71 optimizer.lr 0.0036501428333012126 +40 71 negative_sampler.num_negs_per_pos 39.0 +40 71 training.batch_size 0.0 +40 72 model.embedding_dim 1.0 +40 72 loss.margin 2.070034559615992 +40 72 optimizer.lr 0.0044257823252246255 +40 72 negative_sampler.num_negs_per_pos 16.0 +40 72 training.batch_size 2.0 +40 73 model.embedding_dim 1.0 +40 73 loss.margin 6.612795101900909 +40 73 optimizer.lr 0.0075399596128125 +40 73 negative_sampler.num_negs_per_pos 72.0 +40 73 training.batch_size 0.0 +40 74 model.embedding_dim 2.0 +40 74 loss.margin 7.671455093300932 +40 74 optimizer.lr 0.046650254282439116 +40 74 negative_sampler.num_negs_per_pos 18.0 +40 74 training.batch_size 2.0 +40 75 model.embedding_dim 2.0 +40 75 loss.margin 7.460165522877637 +40 75 optimizer.lr 0.06372870256520861 +40 75 negative_sampler.num_negs_per_pos 51.0 +40 75 training.batch_size 0.0 +40 76 model.embedding_dim 1.0 +40 76 loss.margin 6.766340221238243 +40 76 optimizer.lr 0.0072673838236747175 +40 76 negative_sampler.num_negs_per_pos 77.0 +40 76 training.batch_size 2.0 +40 77 model.embedding_dim 0.0 +40 77 loss.margin 2.459714691996844 +40 77 optimizer.lr 0.00160933730170162 +40 77 negative_sampler.num_negs_per_pos 79.0 +40 77 training.batch_size 0.0 +40 78 model.embedding_dim 1.0 +40 78 loss.margin 1.1863014203370468 +40 78 optimizer.lr 0.08619973537975593 +40 78 negative_sampler.num_negs_per_pos 59.0 +40 78 training.batch_size 1.0 +40 79 model.embedding_dim 1.0 +40 79 loss.margin 5.266354765959406 +40 79 optimizer.lr 0.09466841094359504 +40 79 negative_sampler.num_negs_per_pos 43.0 +40 79 training.batch_size 1.0 +40 80 model.embedding_dim 2.0 +40 80 loss.margin 6.521119882820081 +40 80 optimizer.lr 0.0024031022905618045 +40 80 negative_sampler.num_negs_per_pos 80.0 +40 80 training.batch_size 2.0 +40 81 model.embedding_dim 0.0 +40 81 loss.margin 5.147409322836663 +40 81 optimizer.lr 0.08634030537754528 +40 81 negative_sampler.num_negs_per_pos 64.0 +40 81 training.batch_size 0.0 +40 82 model.embedding_dim 1.0 +40 82 loss.margin 3.8020424245063285 +40 82 optimizer.lr 0.0026087967284527244 +40 82 negative_sampler.num_negs_per_pos 44.0 +40 82 training.batch_size 0.0 +40 83 model.embedding_dim 0.0 +40 83 loss.margin 5.5544281628435686 +40 83 optimizer.lr 0.0562772353975785 +40 83 negative_sampler.num_negs_per_pos 78.0 +40 83 training.batch_size 0.0 +40 84 model.embedding_dim 1.0 +40 84 loss.margin 2.610909957082193 +40 84 optimizer.lr 0.008509013397704929 +40 84 negative_sampler.num_negs_per_pos 21.0 +40 84 training.batch_size 0.0 +40 85 model.embedding_dim 0.0 +40 85 loss.margin 4.783277960109535 +40 85 optimizer.lr 0.028240386179013404 +40 85 negative_sampler.num_negs_per_pos 85.0 +40 85 training.batch_size 0.0 +40 86 model.embedding_dim 0.0 +40 86 loss.margin 9.437750494318394 +40 86 optimizer.lr 0.0026567735996203003 +40 86 negative_sampler.num_negs_per_pos 63.0 +40 86 training.batch_size 0.0 +40 87 model.embedding_dim 0.0 +40 87 loss.margin 1.9866271121986583 +40 87 optimizer.lr 0.055796229424799115 +40 87 negative_sampler.num_negs_per_pos 72.0 +40 87 training.batch_size 0.0 +40 88 model.embedding_dim 0.0 +40 88 loss.margin 8.115664110297944 +40 88 optimizer.lr 0.0014345449244869877 +40 88 negative_sampler.num_negs_per_pos 87.0 +40 88 training.batch_size 0.0 +40 89 model.embedding_dim 0.0 +40 89 loss.margin 6.581048991282943 +40 89 optimizer.lr 0.0012959874394640816 +40 89 negative_sampler.num_negs_per_pos 23.0 +40 89 training.batch_size 1.0 +40 90 model.embedding_dim 0.0 +40 90 loss.margin 0.5789194214485018 +40 90 optimizer.lr 0.009433027957022942 +40 90 negative_sampler.num_negs_per_pos 6.0 +40 90 training.batch_size 2.0 +40 91 model.embedding_dim 0.0 +40 91 loss.margin 4.142537236636526 +40 91 optimizer.lr 0.024002726856795133 +40 91 negative_sampler.num_negs_per_pos 11.0 +40 91 training.batch_size 2.0 +40 92 model.embedding_dim 2.0 +40 92 loss.margin 1.1579709069948758 +40 92 optimizer.lr 0.04361378830014105 +40 92 negative_sampler.num_negs_per_pos 79.0 +40 92 training.batch_size 2.0 +40 93 model.embedding_dim 1.0 +40 93 loss.margin 3.163032947654719 +40 93 optimizer.lr 0.024584104757221517 +40 93 negative_sampler.num_negs_per_pos 76.0 +40 93 training.batch_size 2.0 +40 94 model.embedding_dim 0.0 +40 94 loss.margin 3.6744394843948966 +40 94 optimizer.lr 0.09488924410154957 +40 94 negative_sampler.num_negs_per_pos 49.0 +40 94 training.batch_size 1.0 +40 95 model.embedding_dim 2.0 +40 95 loss.margin 6.764315834071988 +40 95 optimizer.lr 0.0038004566433086674 +40 95 negative_sampler.num_negs_per_pos 93.0 +40 95 training.batch_size 0.0 +40 96 model.embedding_dim 2.0 +40 96 loss.margin 3.0056441337898274 +40 96 optimizer.lr 0.001585635249825429 +40 96 negative_sampler.num_negs_per_pos 38.0 +40 96 training.batch_size 1.0 +40 97 model.embedding_dim 2.0 +40 97 loss.margin 6.905232241207993 +40 97 optimizer.lr 0.020115765210782077 +40 97 negative_sampler.num_negs_per_pos 19.0 +40 97 training.batch_size 2.0 +40 98 model.embedding_dim 0.0 +40 98 loss.margin 1.7553481033951455 +40 98 optimizer.lr 0.07045884295386147 +40 98 negative_sampler.num_negs_per_pos 56.0 +40 98 training.batch_size 0.0 +40 99 model.embedding_dim 1.0 +40 99 loss.margin 8.346812767962575 +40 99 optimizer.lr 0.0016422350635746247 +40 99 negative_sampler.num_negs_per_pos 89.0 +40 99 training.batch_size 1.0 +40 100 model.embedding_dim 2.0 +40 100 loss.margin 3.3633689464464016 +40 100 optimizer.lr 0.08500840314079883 +40 100 negative_sampler.num_negs_per_pos 44.0 +40 100 training.batch_size 2.0 +40 1 dataset """kinships""" +40 1 model """complex""" +40 1 loss """marginranking""" +40 1 regularizer """no""" +40 1 optimizer """adam""" +40 1 training_loop """owa""" +40 1 negative_sampler """basic""" +40 1 evaluator """rankbased""" +40 2 dataset """kinships""" +40 2 model """complex""" +40 2 loss """marginranking""" +40 2 regularizer """no""" +40 2 optimizer """adam""" +40 2 training_loop """owa""" +40 2 negative_sampler """basic""" +40 2 evaluator """rankbased""" +40 3 dataset """kinships""" +40 3 model """complex""" +40 3 loss """marginranking""" +40 3 regularizer """no""" +40 3 optimizer """adam""" +40 3 training_loop """owa""" +40 3 negative_sampler """basic""" +40 3 evaluator """rankbased""" +40 4 dataset """kinships""" +40 4 model """complex""" +40 4 loss """marginranking""" +40 4 regularizer """no""" +40 4 optimizer """adam""" +40 4 training_loop """owa""" +40 4 negative_sampler """basic""" +40 4 evaluator """rankbased""" +40 5 dataset """kinships""" +40 5 model """complex""" +40 5 loss """marginranking""" +40 5 regularizer """no""" +40 5 optimizer """adam""" +40 5 training_loop """owa""" +40 5 negative_sampler """basic""" +40 5 evaluator """rankbased""" +40 6 dataset """kinships""" +40 6 model """complex""" +40 6 loss """marginranking""" +40 6 regularizer """no""" +40 6 optimizer """adam""" +40 6 training_loop """owa""" +40 6 negative_sampler """basic""" +40 6 evaluator """rankbased""" +40 7 dataset """kinships""" +40 7 model """complex""" +40 7 loss """marginranking""" +40 7 regularizer """no""" +40 7 optimizer """adam""" +40 7 training_loop """owa""" +40 7 negative_sampler """basic""" +40 7 evaluator """rankbased""" +40 8 dataset """kinships""" +40 8 model """complex""" +40 8 loss """marginranking""" +40 8 regularizer """no""" +40 8 optimizer """adam""" +40 8 training_loop """owa""" +40 8 negative_sampler """basic""" +40 8 evaluator """rankbased""" +40 9 dataset """kinships""" +40 9 model """complex""" +40 9 loss """marginranking""" +40 9 regularizer """no""" +40 9 optimizer """adam""" +40 9 training_loop """owa""" +40 9 negative_sampler """basic""" +40 9 evaluator """rankbased""" +40 10 dataset """kinships""" +40 10 model """complex""" +40 10 loss """marginranking""" +40 10 regularizer """no""" +40 10 optimizer """adam""" +40 10 training_loop """owa""" +40 10 negative_sampler """basic""" +40 10 evaluator """rankbased""" +40 11 dataset """kinships""" +40 11 model """complex""" +40 11 loss """marginranking""" +40 11 regularizer """no""" +40 11 optimizer """adam""" +40 11 training_loop """owa""" +40 11 negative_sampler """basic""" +40 11 evaluator """rankbased""" +40 12 dataset """kinships""" +40 12 model """complex""" +40 12 loss """marginranking""" +40 12 regularizer """no""" +40 12 optimizer """adam""" +40 12 training_loop """owa""" +40 12 negative_sampler """basic""" +40 12 evaluator """rankbased""" +40 13 dataset """kinships""" +40 13 model """complex""" +40 13 loss """marginranking""" +40 13 regularizer """no""" +40 13 optimizer """adam""" +40 13 training_loop """owa""" +40 13 negative_sampler """basic""" +40 13 evaluator """rankbased""" +40 14 dataset """kinships""" +40 14 model """complex""" +40 14 loss """marginranking""" +40 14 regularizer """no""" +40 14 optimizer """adam""" +40 14 training_loop """owa""" +40 14 negative_sampler """basic""" +40 14 evaluator """rankbased""" +40 15 dataset """kinships""" +40 15 model """complex""" +40 15 loss """marginranking""" +40 15 regularizer """no""" +40 15 optimizer """adam""" +40 15 training_loop """owa""" +40 15 negative_sampler """basic""" +40 15 evaluator """rankbased""" +40 16 dataset """kinships""" +40 16 model """complex""" +40 16 loss """marginranking""" +40 16 regularizer """no""" +40 16 optimizer """adam""" +40 16 training_loop """owa""" +40 16 negative_sampler """basic""" +40 16 evaluator """rankbased""" +40 17 dataset """kinships""" +40 17 model """complex""" +40 17 loss """marginranking""" +40 17 regularizer """no""" +40 17 optimizer """adam""" +40 17 training_loop """owa""" +40 17 negative_sampler """basic""" +40 17 evaluator """rankbased""" +40 18 dataset """kinships""" +40 18 model """complex""" +40 18 loss """marginranking""" +40 18 regularizer """no""" +40 18 optimizer """adam""" +40 18 training_loop """owa""" +40 18 negative_sampler """basic""" +40 18 evaluator """rankbased""" +40 19 dataset """kinships""" +40 19 model """complex""" +40 19 loss """marginranking""" +40 19 regularizer """no""" +40 19 optimizer """adam""" +40 19 training_loop """owa""" +40 19 negative_sampler """basic""" +40 19 evaluator """rankbased""" +40 20 dataset """kinships""" +40 20 model """complex""" +40 20 loss """marginranking""" +40 20 regularizer """no""" +40 20 optimizer """adam""" +40 20 training_loop """owa""" +40 20 negative_sampler """basic""" +40 20 evaluator """rankbased""" +40 21 dataset """kinships""" +40 21 model """complex""" +40 21 loss """marginranking""" +40 21 regularizer """no""" +40 21 optimizer """adam""" +40 21 training_loop """owa""" +40 21 negative_sampler """basic""" +40 21 evaluator """rankbased""" +40 22 dataset """kinships""" +40 22 model """complex""" +40 22 loss """marginranking""" +40 22 regularizer """no""" +40 22 optimizer """adam""" +40 22 training_loop """owa""" +40 22 negative_sampler """basic""" +40 22 evaluator """rankbased""" +40 23 dataset """kinships""" +40 23 model """complex""" +40 23 loss """marginranking""" +40 23 regularizer """no""" +40 23 optimizer """adam""" +40 23 training_loop """owa""" +40 23 negative_sampler """basic""" +40 23 evaluator """rankbased""" +40 24 dataset """kinships""" +40 24 model """complex""" +40 24 loss """marginranking""" +40 24 regularizer """no""" +40 24 optimizer """adam""" +40 24 training_loop """owa""" +40 24 negative_sampler """basic""" +40 24 evaluator """rankbased""" +40 25 dataset """kinships""" +40 25 model """complex""" +40 25 loss """marginranking""" +40 25 regularizer """no""" +40 25 optimizer """adam""" +40 25 training_loop """owa""" +40 25 negative_sampler """basic""" +40 25 evaluator """rankbased""" +40 26 dataset """kinships""" +40 26 model """complex""" +40 26 loss """marginranking""" +40 26 regularizer """no""" +40 26 optimizer """adam""" +40 26 training_loop """owa""" +40 26 negative_sampler """basic""" +40 26 evaluator """rankbased""" +40 27 dataset """kinships""" +40 27 model """complex""" +40 27 loss """marginranking""" +40 27 regularizer """no""" +40 27 optimizer """adam""" +40 27 training_loop """owa""" +40 27 negative_sampler """basic""" +40 27 evaluator """rankbased""" +40 28 dataset """kinships""" +40 28 model """complex""" +40 28 loss """marginranking""" +40 28 regularizer """no""" +40 28 optimizer """adam""" +40 28 training_loop """owa""" +40 28 negative_sampler """basic""" +40 28 evaluator """rankbased""" +40 29 dataset """kinships""" +40 29 model """complex""" +40 29 loss """marginranking""" +40 29 regularizer """no""" +40 29 optimizer """adam""" +40 29 training_loop """owa""" +40 29 negative_sampler """basic""" +40 29 evaluator """rankbased""" +40 30 dataset """kinships""" +40 30 model """complex""" +40 30 loss """marginranking""" +40 30 regularizer """no""" +40 30 optimizer """adam""" +40 30 training_loop """owa""" +40 30 negative_sampler """basic""" +40 30 evaluator """rankbased""" +40 31 dataset """kinships""" +40 31 model """complex""" +40 31 loss """marginranking""" +40 31 regularizer """no""" +40 31 optimizer """adam""" +40 31 training_loop """owa""" +40 31 negative_sampler """basic""" +40 31 evaluator """rankbased""" +40 32 dataset """kinships""" +40 32 model """complex""" +40 32 loss """marginranking""" +40 32 regularizer """no""" +40 32 optimizer """adam""" +40 32 training_loop """owa""" +40 32 negative_sampler """basic""" +40 32 evaluator """rankbased""" +40 33 dataset """kinships""" +40 33 model """complex""" +40 33 loss """marginranking""" +40 33 regularizer """no""" +40 33 optimizer """adam""" +40 33 training_loop """owa""" +40 33 negative_sampler """basic""" +40 33 evaluator """rankbased""" +40 34 dataset """kinships""" +40 34 model """complex""" +40 34 loss """marginranking""" +40 34 regularizer """no""" +40 34 optimizer """adam""" +40 34 training_loop """owa""" +40 34 negative_sampler """basic""" +40 34 evaluator """rankbased""" +40 35 dataset """kinships""" +40 35 model """complex""" +40 35 loss """marginranking""" +40 35 regularizer """no""" +40 35 optimizer """adam""" +40 35 training_loop """owa""" +40 35 negative_sampler """basic""" +40 35 evaluator """rankbased""" +40 36 dataset """kinships""" +40 36 model """complex""" +40 36 loss """marginranking""" +40 36 regularizer """no""" +40 36 optimizer """adam""" +40 36 training_loop """owa""" +40 36 negative_sampler """basic""" +40 36 evaluator """rankbased""" +40 37 dataset """kinships""" +40 37 model """complex""" +40 37 loss """marginranking""" +40 37 regularizer """no""" +40 37 optimizer """adam""" +40 37 training_loop """owa""" +40 37 negative_sampler """basic""" +40 37 evaluator """rankbased""" +40 38 dataset """kinships""" +40 38 model """complex""" +40 38 loss """marginranking""" +40 38 regularizer """no""" +40 38 optimizer """adam""" +40 38 training_loop """owa""" +40 38 negative_sampler """basic""" +40 38 evaluator """rankbased""" +40 39 dataset """kinships""" +40 39 model """complex""" +40 39 loss """marginranking""" +40 39 regularizer """no""" +40 39 optimizer """adam""" +40 39 training_loop """owa""" +40 39 negative_sampler """basic""" +40 39 evaluator """rankbased""" +40 40 dataset """kinships""" +40 40 model """complex""" +40 40 loss """marginranking""" +40 40 regularizer """no""" +40 40 optimizer """adam""" +40 40 training_loop """owa""" +40 40 negative_sampler """basic""" +40 40 evaluator """rankbased""" +40 41 dataset """kinships""" +40 41 model """complex""" +40 41 loss """marginranking""" +40 41 regularizer """no""" +40 41 optimizer """adam""" +40 41 training_loop """owa""" +40 41 negative_sampler """basic""" +40 41 evaluator """rankbased""" +40 42 dataset """kinships""" +40 42 model """complex""" +40 42 loss """marginranking""" +40 42 regularizer """no""" +40 42 optimizer """adam""" +40 42 training_loop """owa""" +40 42 negative_sampler """basic""" +40 42 evaluator """rankbased""" +40 43 dataset """kinships""" +40 43 model """complex""" +40 43 loss """marginranking""" +40 43 regularizer """no""" +40 43 optimizer """adam""" +40 43 training_loop """owa""" +40 43 negative_sampler """basic""" +40 43 evaluator """rankbased""" +40 44 dataset """kinships""" +40 44 model """complex""" +40 44 loss """marginranking""" +40 44 regularizer """no""" +40 44 optimizer """adam""" +40 44 training_loop """owa""" +40 44 negative_sampler """basic""" +40 44 evaluator """rankbased""" +40 45 dataset """kinships""" +40 45 model """complex""" +40 45 loss """marginranking""" +40 45 regularizer """no""" +40 45 optimizer """adam""" +40 45 training_loop """owa""" +40 45 negative_sampler """basic""" +40 45 evaluator """rankbased""" +40 46 dataset """kinships""" +40 46 model """complex""" +40 46 loss """marginranking""" +40 46 regularizer """no""" +40 46 optimizer """adam""" +40 46 training_loop """owa""" +40 46 negative_sampler """basic""" +40 46 evaluator """rankbased""" +40 47 dataset """kinships""" +40 47 model """complex""" +40 47 loss """marginranking""" +40 47 regularizer """no""" +40 47 optimizer """adam""" +40 47 training_loop """owa""" +40 47 negative_sampler """basic""" +40 47 evaluator """rankbased""" +40 48 dataset """kinships""" +40 48 model """complex""" +40 48 loss """marginranking""" +40 48 regularizer """no""" +40 48 optimizer """adam""" +40 48 training_loop """owa""" +40 48 negative_sampler """basic""" +40 48 evaluator """rankbased""" +40 49 dataset """kinships""" +40 49 model """complex""" +40 49 loss """marginranking""" +40 49 regularizer """no""" +40 49 optimizer """adam""" +40 49 training_loop """owa""" +40 49 negative_sampler """basic""" +40 49 evaluator """rankbased""" +40 50 dataset """kinships""" +40 50 model """complex""" +40 50 loss """marginranking""" +40 50 regularizer """no""" +40 50 optimizer """adam""" +40 50 training_loop """owa""" +40 50 negative_sampler """basic""" +40 50 evaluator """rankbased""" +40 51 dataset """kinships""" +40 51 model """complex""" +40 51 loss """marginranking""" +40 51 regularizer """no""" +40 51 optimizer """adam""" +40 51 training_loop """owa""" +40 51 negative_sampler """basic""" +40 51 evaluator """rankbased""" +40 52 dataset """kinships""" +40 52 model """complex""" +40 52 loss """marginranking""" +40 52 regularizer """no""" +40 52 optimizer """adam""" +40 52 training_loop """owa""" +40 52 negative_sampler """basic""" +40 52 evaluator """rankbased""" +40 53 dataset """kinships""" +40 53 model """complex""" +40 53 loss """marginranking""" +40 53 regularizer """no""" +40 53 optimizer """adam""" +40 53 training_loop """owa""" +40 53 negative_sampler """basic""" +40 53 evaluator """rankbased""" +40 54 dataset """kinships""" +40 54 model """complex""" +40 54 loss """marginranking""" +40 54 regularizer """no""" +40 54 optimizer """adam""" +40 54 training_loop """owa""" +40 54 negative_sampler """basic""" +40 54 evaluator """rankbased""" +40 55 dataset """kinships""" +40 55 model """complex""" +40 55 loss """marginranking""" +40 55 regularizer """no""" +40 55 optimizer """adam""" +40 55 training_loop """owa""" +40 55 negative_sampler """basic""" +40 55 evaluator """rankbased""" +40 56 dataset """kinships""" +40 56 model """complex""" +40 56 loss """marginranking""" +40 56 regularizer """no""" +40 56 optimizer """adam""" +40 56 training_loop """owa""" +40 56 negative_sampler """basic""" +40 56 evaluator """rankbased""" +40 57 dataset """kinships""" +40 57 model """complex""" +40 57 loss """marginranking""" +40 57 regularizer """no""" +40 57 optimizer """adam""" +40 57 training_loop """owa""" +40 57 negative_sampler """basic""" +40 57 evaluator """rankbased""" +40 58 dataset """kinships""" +40 58 model """complex""" +40 58 loss """marginranking""" +40 58 regularizer """no""" +40 58 optimizer """adam""" +40 58 training_loop """owa""" +40 58 negative_sampler """basic""" +40 58 evaluator """rankbased""" +40 59 dataset """kinships""" +40 59 model """complex""" +40 59 loss """marginranking""" +40 59 regularizer """no""" +40 59 optimizer """adam""" +40 59 training_loop """owa""" +40 59 negative_sampler """basic""" +40 59 evaluator """rankbased""" +40 60 dataset """kinships""" +40 60 model """complex""" +40 60 loss """marginranking""" +40 60 regularizer """no""" +40 60 optimizer """adam""" +40 60 training_loop """owa""" +40 60 negative_sampler """basic""" +40 60 evaluator """rankbased""" +40 61 dataset """kinships""" +40 61 model """complex""" +40 61 loss """marginranking""" +40 61 regularizer """no""" +40 61 optimizer """adam""" +40 61 training_loop """owa""" +40 61 negative_sampler """basic""" +40 61 evaluator """rankbased""" +40 62 dataset """kinships""" +40 62 model """complex""" +40 62 loss """marginranking""" +40 62 regularizer """no""" +40 62 optimizer """adam""" +40 62 training_loop """owa""" +40 62 negative_sampler """basic""" +40 62 evaluator """rankbased""" +40 63 dataset """kinships""" +40 63 model """complex""" +40 63 loss """marginranking""" +40 63 regularizer """no""" +40 63 optimizer """adam""" +40 63 training_loop """owa""" +40 63 negative_sampler """basic""" +40 63 evaluator """rankbased""" +40 64 dataset """kinships""" +40 64 model """complex""" +40 64 loss """marginranking""" +40 64 regularizer """no""" +40 64 optimizer """adam""" +40 64 training_loop """owa""" +40 64 negative_sampler """basic""" +40 64 evaluator """rankbased""" +40 65 dataset """kinships""" +40 65 model """complex""" +40 65 loss """marginranking""" +40 65 regularizer """no""" +40 65 optimizer """adam""" +40 65 training_loop """owa""" +40 65 negative_sampler """basic""" +40 65 evaluator """rankbased""" +40 66 dataset """kinships""" +40 66 model """complex""" +40 66 loss """marginranking""" +40 66 regularizer """no""" +40 66 optimizer """adam""" +40 66 training_loop """owa""" +40 66 negative_sampler """basic""" +40 66 evaluator """rankbased""" +40 67 dataset """kinships""" +40 67 model """complex""" +40 67 loss """marginranking""" +40 67 regularizer """no""" +40 67 optimizer """adam""" +40 67 training_loop """owa""" +40 67 negative_sampler """basic""" +40 67 evaluator """rankbased""" +40 68 dataset """kinships""" +40 68 model """complex""" +40 68 loss """marginranking""" +40 68 regularizer """no""" +40 68 optimizer """adam""" +40 68 training_loop """owa""" +40 68 negative_sampler """basic""" +40 68 evaluator """rankbased""" +40 69 dataset """kinships""" +40 69 model """complex""" +40 69 loss """marginranking""" +40 69 regularizer """no""" +40 69 optimizer """adam""" +40 69 training_loop """owa""" +40 69 negative_sampler """basic""" +40 69 evaluator """rankbased""" +40 70 dataset """kinships""" +40 70 model """complex""" +40 70 loss """marginranking""" +40 70 regularizer """no""" +40 70 optimizer """adam""" +40 70 training_loop """owa""" +40 70 negative_sampler """basic""" +40 70 evaluator """rankbased""" +40 71 dataset """kinships""" +40 71 model """complex""" +40 71 loss """marginranking""" +40 71 regularizer """no""" +40 71 optimizer """adam""" +40 71 training_loop """owa""" +40 71 negative_sampler """basic""" +40 71 evaluator """rankbased""" +40 72 dataset """kinships""" +40 72 model """complex""" +40 72 loss """marginranking""" +40 72 regularizer """no""" +40 72 optimizer """adam""" +40 72 training_loop """owa""" +40 72 negative_sampler """basic""" +40 72 evaluator """rankbased""" +40 73 dataset """kinships""" +40 73 model """complex""" +40 73 loss """marginranking""" +40 73 regularizer """no""" +40 73 optimizer """adam""" +40 73 training_loop """owa""" +40 73 negative_sampler """basic""" +40 73 evaluator """rankbased""" +40 74 dataset """kinships""" +40 74 model """complex""" +40 74 loss """marginranking""" +40 74 regularizer """no""" +40 74 optimizer """adam""" +40 74 training_loop """owa""" +40 74 negative_sampler """basic""" +40 74 evaluator """rankbased""" +40 75 dataset """kinships""" +40 75 model """complex""" +40 75 loss """marginranking""" +40 75 regularizer """no""" +40 75 optimizer """adam""" +40 75 training_loop """owa""" +40 75 negative_sampler """basic""" +40 75 evaluator """rankbased""" +40 76 dataset """kinships""" +40 76 model """complex""" +40 76 loss """marginranking""" +40 76 regularizer """no""" +40 76 optimizer """adam""" +40 76 training_loop """owa""" +40 76 negative_sampler """basic""" +40 76 evaluator """rankbased""" +40 77 dataset """kinships""" +40 77 model """complex""" +40 77 loss """marginranking""" +40 77 regularizer """no""" +40 77 optimizer """adam""" +40 77 training_loop """owa""" +40 77 negative_sampler """basic""" +40 77 evaluator """rankbased""" +40 78 dataset """kinships""" +40 78 model """complex""" +40 78 loss """marginranking""" +40 78 regularizer """no""" +40 78 optimizer """adam""" +40 78 training_loop """owa""" +40 78 negative_sampler """basic""" +40 78 evaluator """rankbased""" +40 79 dataset """kinships""" +40 79 model """complex""" +40 79 loss """marginranking""" +40 79 regularizer """no""" +40 79 optimizer """adam""" +40 79 training_loop """owa""" +40 79 negative_sampler """basic""" +40 79 evaluator """rankbased""" +40 80 dataset """kinships""" +40 80 model """complex""" +40 80 loss """marginranking""" +40 80 regularizer """no""" +40 80 optimizer """adam""" +40 80 training_loop """owa""" +40 80 negative_sampler """basic""" +40 80 evaluator """rankbased""" +40 81 dataset """kinships""" +40 81 model """complex""" +40 81 loss """marginranking""" +40 81 regularizer """no""" +40 81 optimizer """adam""" +40 81 training_loop """owa""" +40 81 negative_sampler """basic""" +40 81 evaluator """rankbased""" +40 82 dataset """kinships""" +40 82 model """complex""" +40 82 loss """marginranking""" +40 82 regularizer """no""" +40 82 optimizer """adam""" +40 82 training_loop """owa""" +40 82 negative_sampler """basic""" +40 82 evaluator """rankbased""" +40 83 dataset """kinships""" +40 83 model """complex""" +40 83 loss """marginranking""" +40 83 regularizer """no""" +40 83 optimizer """adam""" +40 83 training_loop """owa""" +40 83 negative_sampler """basic""" +40 83 evaluator """rankbased""" +40 84 dataset """kinships""" +40 84 model """complex""" +40 84 loss """marginranking""" +40 84 regularizer """no""" +40 84 optimizer """adam""" +40 84 training_loop """owa""" +40 84 negative_sampler """basic""" +40 84 evaluator """rankbased""" +40 85 dataset """kinships""" +40 85 model """complex""" +40 85 loss """marginranking""" +40 85 regularizer """no""" +40 85 optimizer """adam""" +40 85 training_loop """owa""" +40 85 negative_sampler """basic""" +40 85 evaluator """rankbased""" +40 86 dataset """kinships""" +40 86 model """complex""" +40 86 loss """marginranking""" +40 86 regularizer """no""" +40 86 optimizer """adam""" +40 86 training_loop """owa""" +40 86 negative_sampler """basic""" +40 86 evaluator """rankbased""" +40 87 dataset """kinships""" +40 87 model """complex""" +40 87 loss """marginranking""" +40 87 regularizer """no""" +40 87 optimizer """adam""" +40 87 training_loop """owa""" +40 87 negative_sampler """basic""" +40 87 evaluator """rankbased""" +40 88 dataset """kinships""" +40 88 model """complex""" +40 88 loss """marginranking""" +40 88 regularizer """no""" +40 88 optimizer """adam""" +40 88 training_loop """owa""" +40 88 negative_sampler """basic""" +40 88 evaluator """rankbased""" +40 89 dataset """kinships""" +40 89 model """complex""" +40 89 loss """marginranking""" +40 89 regularizer """no""" +40 89 optimizer """adam""" +40 89 training_loop """owa""" +40 89 negative_sampler """basic""" +40 89 evaluator """rankbased""" +40 90 dataset """kinships""" +40 90 model """complex""" +40 90 loss """marginranking""" +40 90 regularizer """no""" +40 90 optimizer """adam""" +40 90 training_loop """owa""" +40 90 negative_sampler """basic""" +40 90 evaluator """rankbased""" +40 91 dataset """kinships""" +40 91 model """complex""" +40 91 loss """marginranking""" +40 91 regularizer """no""" +40 91 optimizer """adam""" +40 91 training_loop """owa""" +40 91 negative_sampler """basic""" +40 91 evaluator """rankbased""" +40 92 dataset """kinships""" +40 92 model """complex""" +40 92 loss """marginranking""" +40 92 regularizer """no""" +40 92 optimizer """adam""" +40 92 training_loop """owa""" +40 92 negative_sampler """basic""" +40 92 evaluator """rankbased""" +40 93 dataset """kinships""" +40 93 model """complex""" +40 93 loss """marginranking""" +40 93 regularizer """no""" +40 93 optimizer """adam""" +40 93 training_loop """owa""" +40 93 negative_sampler """basic""" +40 93 evaluator """rankbased""" +40 94 dataset """kinships""" +40 94 model """complex""" +40 94 loss """marginranking""" +40 94 regularizer """no""" +40 94 optimizer """adam""" +40 94 training_loop """owa""" +40 94 negative_sampler """basic""" +40 94 evaluator """rankbased""" +40 95 dataset """kinships""" +40 95 model """complex""" +40 95 loss """marginranking""" +40 95 regularizer """no""" +40 95 optimizer """adam""" +40 95 training_loop """owa""" +40 95 negative_sampler """basic""" +40 95 evaluator """rankbased""" +40 96 dataset """kinships""" +40 96 model """complex""" +40 96 loss """marginranking""" +40 96 regularizer """no""" +40 96 optimizer """adam""" +40 96 training_loop """owa""" +40 96 negative_sampler """basic""" +40 96 evaluator """rankbased""" +40 97 dataset """kinships""" +40 97 model """complex""" +40 97 loss """marginranking""" +40 97 regularizer """no""" +40 97 optimizer """adam""" +40 97 training_loop """owa""" +40 97 negative_sampler """basic""" +40 97 evaluator """rankbased""" +40 98 dataset """kinships""" +40 98 model """complex""" +40 98 loss """marginranking""" +40 98 regularizer """no""" +40 98 optimizer """adam""" +40 98 training_loop """owa""" +40 98 negative_sampler """basic""" +40 98 evaluator """rankbased""" +40 99 dataset """kinships""" +40 99 model """complex""" +40 99 loss """marginranking""" +40 99 regularizer """no""" +40 99 optimizer """adam""" +40 99 training_loop """owa""" +40 99 negative_sampler """basic""" +40 99 evaluator """rankbased""" +40 100 dataset """kinships""" +40 100 model """complex""" +40 100 loss """marginranking""" +40 100 regularizer """no""" +40 100 optimizer """adam""" +40 100 training_loop """owa""" +40 100 negative_sampler """basic""" +40 100 evaluator """rankbased""" +41 1 model.embedding_dim 0.0 +41 1 loss.margin 1.5495050001600452 +41 1 optimizer.lr 0.0014409789271743639 +41 1 negative_sampler.num_negs_per_pos 15.0 +41 1 training.batch_size 2.0 +41 2 model.embedding_dim 0.0 +41 2 loss.margin 7.442304294725707 +41 2 optimizer.lr 0.021089445217186267 +41 2 negative_sampler.num_negs_per_pos 94.0 +41 2 training.batch_size 1.0 +41 3 model.embedding_dim 0.0 +41 3 loss.margin 6.6328036113051665 +41 3 optimizer.lr 0.02617022984448716 +41 3 negative_sampler.num_negs_per_pos 4.0 +41 3 training.batch_size 0.0 +41 4 model.embedding_dim 2.0 +41 4 loss.margin 8.109530013913524 +41 4 optimizer.lr 0.004671892667477428 +41 4 negative_sampler.num_negs_per_pos 17.0 +41 4 training.batch_size 2.0 +41 5 model.embedding_dim 1.0 +41 5 loss.margin 1.0251397976889505 +41 5 optimizer.lr 0.058788378462076504 +41 5 negative_sampler.num_negs_per_pos 91.0 +41 5 training.batch_size 0.0 +41 6 model.embedding_dim 2.0 +41 6 loss.margin 1.860904612151778 +41 6 optimizer.lr 0.007652396415349782 +41 6 negative_sampler.num_negs_per_pos 24.0 +41 6 training.batch_size 2.0 +41 7 model.embedding_dim 2.0 +41 7 loss.margin 6.5315520553764905 +41 7 optimizer.lr 0.0029650683016680074 +41 7 negative_sampler.num_negs_per_pos 16.0 +41 7 training.batch_size 1.0 +41 8 model.embedding_dim 0.0 +41 8 loss.margin 5.646422452659934 +41 8 optimizer.lr 0.012313871934303725 +41 8 negative_sampler.num_negs_per_pos 75.0 +41 8 training.batch_size 0.0 +41 9 model.embedding_dim 0.0 +41 9 loss.margin 4.492744715368891 +41 9 optimizer.lr 0.055429217162529965 +41 9 negative_sampler.num_negs_per_pos 37.0 +41 9 training.batch_size 0.0 +41 10 model.embedding_dim 0.0 +41 10 loss.margin 1.1900614080635021 +41 10 optimizer.lr 0.014476065032967228 +41 10 negative_sampler.num_negs_per_pos 98.0 +41 10 training.batch_size 0.0 +41 11 model.embedding_dim 1.0 +41 11 loss.margin 4.953467165356463 +41 11 optimizer.lr 0.04326357372866183 +41 11 negative_sampler.num_negs_per_pos 96.0 +41 11 training.batch_size 0.0 +41 12 model.embedding_dim 1.0 +41 12 loss.margin 3.7440553522454096 +41 12 optimizer.lr 0.00452174551429113 +41 12 negative_sampler.num_negs_per_pos 35.0 +41 12 training.batch_size 0.0 +41 13 model.embedding_dim 1.0 +41 13 loss.margin 5.880470694204208 +41 13 optimizer.lr 0.012535487745402809 +41 13 negative_sampler.num_negs_per_pos 78.0 +41 13 training.batch_size 2.0 +41 14 model.embedding_dim 2.0 +41 14 loss.margin 5.194131633307191 +41 14 optimizer.lr 0.004491096069582283 +41 14 negative_sampler.num_negs_per_pos 68.0 +41 14 training.batch_size 1.0 +41 15 model.embedding_dim 0.0 +41 15 loss.margin 3.8715233008872563 +41 15 optimizer.lr 0.005043775969845024 +41 15 negative_sampler.num_negs_per_pos 48.0 +41 15 training.batch_size 0.0 +41 16 model.embedding_dim 2.0 +41 16 loss.margin 4.632751872910319 +41 16 optimizer.lr 0.013929387046587024 +41 16 negative_sampler.num_negs_per_pos 41.0 +41 16 training.batch_size 1.0 +41 17 model.embedding_dim 1.0 +41 17 loss.margin 1.9572428532010873 +41 17 optimizer.lr 0.003941095572371749 +41 17 negative_sampler.num_negs_per_pos 82.0 +41 17 training.batch_size 2.0 +41 18 model.embedding_dim 2.0 +41 18 loss.margin 4.230679943927978 +41 18 optimizer.lr 0.001714385261377627 +41 18 negative_sampler.num_negs_per_pos 87.0 +41 18 training.batch_size 0.0 +41 19 model.embedding_dim 2.0 +41 19 loss.margin 5.220081391015581 +41 19 optimizer.lr 0.001236025404784433 +41 19 negative_sampler.num_negs_per_pos 57.0 +41 19 training.batch_size 2.0 +41 20 model.embedding_dim 2.0 +41 20 loss.margin 7.716138537355996 +41 20 optimizer.lr 0.016648355360759037 +41 20 negative_sampler.num_negs_per_pos 88.0 +41 20 training.batch_size 0.0 +41 21 model.embedding_dim 0.0 +41 21 loss.margin 1.079396222111072 +41 21 optimizer.lr 0.02035402313753371 +41 21 negative_sampler.num_negs_per_pos 42.0 +41 21 training.batch_size 0.0 +41 22 model.embedding_dim 0.0 +41 22 loss.margin 2.599714647940738 +41 22 optimizer.lr 0.010913202309496542 +41 22 negative_sampler.num_negs_per_pos 2.0 +41 22 training.batch_size 0.0 +41 23 model.embedding_dim 1.0 +41 23 loss.margin 6.252711879489408 +41 23 optimizer.lr 0.0011864272088614265 +41 23 negative_sampler.num_negs_per_pos 60.0 +41 23 training.batch_size 1.0 +41 24 model.embedding_dim 2.0 +41 24 loss.margin 3.6648253805939626 +41 24 optimizer.lr 0.0019953777681001443 +41 24 negative_sampler.num_negs_per_pos 43.0 +41 24 training.batch_size 2.0 +41 25 model.embedding_dim 2.0 +41 25 loss.margin 1.6726361510844603 +41 25 optimizer.lr 0.0040342770246014365 +41 25 negative_sampler.num_negs_per_pos 0.0 +41 25 training.batch_size 1.0 +41 26 model.embedding_dim 1.0 +41 26 loss.margin 3.160636563399595 +41 26 optimizer.lr 0.08659585219428285 +41 26 negative_sampler.num_negs_per_pos 87.0 +41 26 training.batch_size 0.0 +41 27 model.embedding_dim 1.0 +41 27 loss.margin 2.435753369727118 +41 27 optimizer.lr 0.04202542974380497 +41 27 negative_sampler.num_negs_per_pos 84.0 +41 27 training.batch_size 2.0 +41 28 model.embedding_dim 1.0 +41 28 loss.margin 4.799031745005066 +41 28 optimizer.lr 0.09313943284289405 +41 28 negative_sampler.num_negs_per_pos 53.0 +41 28 training.batch_size 0.0 +41 29 model.embedding_dim 0.0 +41 29 loss.margin 2.502428269603974 +41 29 optimizer.lr 0.007321268130921601 +41 29 negative_sampler.num_negs_per_pos 80.0 +41 29 training.batch_size 2.0 +41 30 model.embedding_dim 0.0 +41 30 loss.margin 9.055550782017658 +41 30 optimizer.lr 0.0029596019609641993 +41 30 negative_sampler.num_negs_per_pos 2.0 +41 30 training.batch_size 1.0 +41 31 model.embedding_dim 2.0 +41 31 loss.margin 4.412807522836877 +41 31 optimizer.lr 0.00687752438313071 +41 31 negative_sampler.num_negs_per_pos 19.0 +41 31 training.batch_size 2.0 +41 32 model.embedding_dim 1.0 +41 32 loss.margin 7.89868869491544 +41 32 optimizer.lr 0.006240871563274088 +41 32 negative_sampler.num_negs_per_pos 30.0 +41 32 training.batch_size 0.0 +41 33 model.embedding_dim 0.0 +41 33 loss.margin 6.254011027418776 +41 33 optimizer.lr 0.014907260085720965 +41 33 negative_sampler.num_negs_per_pos 77.0 +41 33 training.batch_size 2.0 +41 34 model.embedding_dim 2.0 +41 34 loss.margin 6.7567845650386715 +41 34 optimizer.lr 0.002818163679710138 +41 34 negative_sampler.num_negs_per_pos 31.0 +41 34 training.batch_size 2.0 +41 35 model.embedding_dim 0.0 +41 35 loss.margin 3.2088837327985704 +41 35 optimizer.lr 0.001183414751752247 +41 35 negative_sampler.num_negs_per_pos 18.0 +41 35 training.batch_size 0.0 +41 36 model.embedding_dim 1.0 +41 36 loss.margin 1.684306023929966 +41 36 optimizer.lr 0.013560243745359192 +41 36 negative_sampler.num_negs_per_pos 14.0 +41 36 training.batch_size 0.0 +41 37 model.embedding_dim 0.0 +41 37 loss.margin 6.9063332971783264 +41 37 optimizer.lr 0.026100563289517254 +41 37 negative_sampler.num_negs_per_pos 22.0 +41 37 training.batch_size 2.0 +41 38 model.embedding_dim 0.0 +41 38 loss.margin 4.235532580984345 +41 38 optimizer.lr 0.005031045078083598 +41 38 negative_sampler.num_negs_per_pos 38.0 +41 38 training.batch_size 0.0 +41 39 model.embedding_dim 0.0 +41 39 loss.margin 2.654699575735136 +41 39 optimizer.lr 0.005370342911860316 +41 39 negative_sampler.num_negs_per_pos 55.0 +41 39 training.batch_size 0.0 +41 40 model.embedding_dim 2.0 +41 40 loss.margin 6.521215005400707 +41 40 optimizer.lr 0.048482272053040544 +41 40 negative_sampler.num_negs_per_pos 2.0 +41 40 training.batch_size 2.0 +41 41 model.embedding_dim 1.0 +41 41 loss.margin 4.3340954826647895 +41 41 optimizer.lr 0.0454899323628026 +41 41 negative_sampler.num_negs_per_pos 93.0 +41 41 training.batch_size 2.0 +41 42 model.embedding_dim 2.0 +41 42 loss.margin 7.3703521896090365 +41 42 optimizer.lr 0.0013756276837625936 +41 42 negative_sampler.num_negs_per_pos 65.0 +41 42 training.batch_size 0.0 +41 43 model.embedding_dim 2.0 +41 43 loss.margin 5.877640388022313 +41 43 optimizer.lr 0.004847794864154363 +41 43 negative_sampler.num_negs_per_pos 74.0 +41 43 training.batch_size 1.0 +41 44 model.embedding_dim 0.0 +41 44 loss.margin 3.943672547762054 +41 44 optimizer.lr 0.05073892982029438 +41 44 negative_sampler.num_negs_per_pos 95.0 +41 44 training.batch_size 1.0 +41 45 model.embedding_dim 2.0 +41 45 loss.margin 4.771897345707224 +41 45 optimizer.lr 0.0027435974141927677 +41 45 negative_sampler.num_negs_per_pos 78.0 +41 45 training.batch_size 2.0 +41 46 model.embedding_dim 0.0 +41 46 loss.margin 8.31453183199167 +41 46 optimizer.lr 0.06868128170322213 +41 46 negative_sampler.num_negs_per_pos 83.0 +41 46 training.batch_size 0.0 +41 47 model.embedding_dim 1.0 +41 47 loss.margin 7.762419867010368 +41 47 optimizer.lr 0.03551746079716324 +41 47 negative_sampler.num_negs_per_pos 4.0 +41 47 training.batch_size 1.0 +41 48 model.embedding_dim 0.0 +41 48 loss.margin 3.304923394002116 +41 48 optimizer.lr 0.07857551005328545 +41 48 negative_sampler.num_negs_per_pos 35.0 +41 48 training.batch_size 1.0 +41 49 model.embedding_dim 0.0 +41 49 loss.margin 3.7972038092265086 +41 49 optimizer.lr 0.024136649092857804 +41 49 negative_sampler.num_negs_per_pos 53.0 +41 49 training.batch_size 1.0 +41 50 model.embedding_dim 0.0 +41 50 loss.margin 9.117567883523561 +41 50 optimizer.lr 0.004435590736860061 +41 50 negative_sampler.num_negs_per_pos 87.0 +41 50 training.batch_size 2.0 +41 51 model.embedding_dim 0.0 +41 51 loss.margin 1.6111081761509598 +41 51 optimizer.lr 0.06841905117130481 +41 51 negative_sampler.num_negs_per_pos 59.0 +41 51 training.batch_size 1.0 +41 52 model.embedding_dim 1.0 +41 52 loss.margin 0.8381198289564527 +41 52 optimizer.lr 0.0045620321062352425 +41 52 negative_sampler.num_negs_per_pos 3.0 +41 52 training.batch_size 1.0 +41 53 model.embedding_dim 0.0 +41 53 loss.margin 3.93655750669073 +41 53 optimizer.lr 0.07012422103892954 +41 53 negative_sampler.num_negs_per_pos 38.0 +41 53 training.batch_size 2.0 +41 54 model.embedding_dim 1.0 +41 54 loss.margin 9.994191903367591 +41 54 optimizer.lr 0.0030739444058574283 +41 54 negative_sampler.num_negs_per_pos 44.0 +41 54 training.batch_size 0.0 +41 55 model.embedding_dim 1.0 +41 55 loss.margin 9.508769198529562 +41 55 optimizer.lr 0.0025126644737972883 +41 55 negative_sampler.num_negs_per_pos 95.0 +41 55 training.batch_size 2.0 +41 56 model.embedding_dim 2.0 +41 56 loss.margin 7.799871590411656 +41 56 optimizer.lr 0.0026605048108044936 +41 56 negative_sampler.num_negs_per_pos 47.0 +41 56 training.batch_size 0.0 +41 57 model.embedding_dim 2.0 +41 57 loss.margin 5.7615671896162866 +41 57 optimizer.lr 0.022252356026396778 +41 57 negative_sampler.num_negs_per_pos 90.0 +41 57 training.batch_size 2.0 +41 58 model.embedding_dim 2.0 +41 58 loss.margin 6.886705559484853 +41 58 optimizer.lr 0.00649588488505353 +41 58 negative_sampler.num_negs_per_pos 2.0 +41 58 training.batch_size 0.0 +41 59 model.embedding_dim 2.0 +41 59 loss.margin 9.665247432772595 +41 59 optimizer.lr 0.002042710749445646 +41 59 negative_sampler.num_negs_per_pos 57.0 +41 59 training.batch_size 1.0 +41 60 model.embedding_dim 1.0 +41 60 loss.margin 6.129514129083931 +41 60 optimizer.lr 0.00449643356316153 +41 60 negative_sampler.num_negs_per_pos 6.0 +41 60 training.batch_size 0.0 +41 61 model.embedding_dim 2.0 +41 61 loss.margin 7.160822213128348 +41 61 optimizer.lr 0.008107445122006507 +41 61 negative_sampler.num_negs_per_pos 27.0 +41 61 training.batch_size 1.0 +41 62 model.embedding_dim 1.0 +41 62 loss.margin 7.628890319016864 +41 62 optimizer.lr 0.005588139158387394 +41 62 negative_sampler.num_negs_per_pos 91.0 +41 62 training.batch_size 0.0 +41 63 model.embedding_dim 1.0 +41 63 loss.margin 8.39650546343388 +41 63 optimizer.lr 0.032139596453399796 +41 63 negative_sampler.num_negs_per_pos 45.0 +41 63 training.batch_size 2.0 +41 64 model.embedding_dim 1.0 +41 64 loss.margin 5.29389882755283 +41 64 optimizer.lr 0.0019910101434230183 +41 64 negative_sampler.num_negs_per_pos 65.0 +41 64 training.batch_size 2.0 +41 65 model.embedding_dim 2.0 +41 65 loss.margin 3.809720758056157 +41 65 optimizer.lr 0.003117201526160924 +41 65 negative_sampler.num_negs_per_pos 36.0 +41 65 training.batch_size 1.0 +41 66 model.embedding_dim 0.0 +41 66 loss.margin 5.1514973486305875 +41 66 optimizer.lr 0.04131368466718785 +41 66 negative_sampler.num_negs_per_pos 70.0 +41 66 training.batch_size 2.0 +41 67 model.embedding_dim 0.0 +41 67 loss.margin 3.3833825589023645 +41 67 optimizer.lr 0.07749667591772393 +41 67 negative_sampler.num_negs_per_pos 40.0 +41 67 training.batch_size 2.0 +41 68 model.embedding_dim 1.0 +41 68 loss.margin 6.730245347350152 +41 68 optimizer.lr 0.001242456630582546 +41 68 negative_sampler.num_negs_per_pos 21.0 +41 68 training.batch_size 0.0 +41 69 model.embedding_dim 1.0 +41 69 loss.margin 1.2627217042278223 +41 69 optimizer.lr 0.0037394632374858237 +41 69 negative_sampler.num_negs_per_pos 43.0 +41 69 training.batch_size 1.0 +41 70 model.embedding_dim 1.0 +41 70 loss.margin 2.0421539985129558 +41 70 optimizer.lr 0.0028329336135279877 +41 70 negative_sampler.num_negs_per_pos 0.0 +41 70 training.batch_size 2.0 +41 71 model.embedding_dim 2.0 +41 71 loss.margin 7.887532911571376 +41 71 optimizer.lr 0.03731540343014158 +41 71 negative_sampler.num_negs_per_pos 40.0 +41 71 training.batch_size 1.0 +41 72 model.embedding_dim 1.0 +41 72 loss.margin 9.449055697332346 +41 72 optimizer.lr 0.013314229381031705 +41 72 negative_sampler.num_negs_per_pos 60.0 +41 72 training.batch_size 0.0 +41 73 model.embedding_dim 2.0 +41 73 loss.margin 8.654434220829746 +41 73 optimizer.lr 0.02851880601204514 +41 73 negative_sampler.num_negs_per_pos 77.0 +41 73 training.batch_size 2.0 +41 74 model.embedding_dim 1.0 +41 74 loss.margin 1.3368501421210925 +41 74 optimizer.lr 0.0010541697399351732 +41 74 negative_sampler.num_negs_per_pos 98.0 +41 74 training.batch_size 2.0 +41 75 model.embedding_dim 0.0 +41 75 loss.margin 6.343362414129612 +41 75 optimizer.lr 0.031189417498937756 +41 75 negative_sampler.num_negs_per_pos 39.0 +41 75 training.batch_size 2.0 +41 76 model.embedding_dim 1.0 +41 76 loss.margin 2.0864396278383643 +41 76 optimizer.lr 0.003585089874289637 +41 76 negative_sampler.num_negs_per_pos 54.0 +41 76 training.batch_size 2.0 +41 77 model.embedding_dim 1.0 +41 77 loss.margin 6.632834357372529 +41 77 optimizer.lr 0.07586606621430166 +41 77 negative_sampler.num_negs_per_pos 25.0 +41 77 training.batch_size 1.0 +41 78 model.embedding_dim 2.0 +41 78 loss.margin 3.7542988680018228 +41 78 optimizer.lr 0.00318227079450467 +41 78 negative_sampler.num_negs_per_pos 8.0 +41 78 training.batch_size 0.0 +41 79 model.embedding_dim 0.0 +41 79 loss.margin 8.343171591759326 +41 79 optimizer.lr 0.012371128902362428 +41 79 negative_sampler.num_negs_per_pos 21.0 +41 79 training.batch_size 2.0 +41 80 model.embedding_dim 1.0 +41 80 loss.margin 2.3985582710778885 +41 80 optimizer.lr 0.020409368217756324 +41 80 negative_sampler.num_negs_per_pos 73.0 +41 80 training.batch_size 1.0 +41 81 model.embedding_dim 2.0 +41 81 loss.margin 5.55822749545 +41 81 optimizer.lr 0.04663884640078064 +41 81 negative_sampler.num_negs_per_pos 80.0 +41 81 training.batch_size 2.0 +41 82 model.embedding_dim 1.0 +41 82 loss.margin 8.521850665631916 +41 82 optimizer.lr 0.04429772970003776 +41 82 negative_sampler.num_negs_per_pos 86.0 +41 82 training.batch_size 1.0 +41 83 model.embedding_dim 0.0 +41 83 loss.margin 1.3172740890435566 +41 83 optimizer.lr 0.06254182604202364 +41 83 negative_sampler.num_negs_per_pos 60.0 +41 83 training.batch_size 0.0 +41 84 model.embedding_dim 1.0 +41 84 loss.margin 3.0690177091742696 +41 84 optimizer.lr 0.00859605858869322 +41 84 negative_sampler.num_negs_per_pos 7.0 +41 84 training.batch_size 0.0 +41 85 model.embedding_dim 2.0 +41 85 loss.margin 1.1428661779934666 +41 85 optimizer.lr 0.0010522513432377515 +41 85 negative_sampler.num_negs_per_pos 63.0 +41 85 training.batch_size 2.0 +41 86 model.embedding_dim 0.0 +41 86 loss.margin 7.925450489568137 +41 86 optimizer.lr 0.023574102746757475 +41 86 negative_sampler.num_negs_per_pos 7.0 +41 86 training.batch_size 0.0 +41 87 model.embedding_dim 2.0 +41 87 loss.margin 1.1952872329427098 +41 87 optimizer.lr 0.00532183154490338 +41 87 negative_sampler.num_negs_per_pos 7.0 +41 87 training.batch_size 1.0 +41 88 model.embedding_dim 2.0 +41 88 loss.margin 8.115774194654632 +41 88 optimizer.lr 0.06913727008418216 +41 88 negative_sampler.num_negs_per_pos 10.0 +41 88 training.batch_size 2.0 +41 89 model.embedding_dim 2.0 +41 89 loss.margin 9.822764976035753 +41 89 optimizer.lr 0.027538701315938423 +41 89 negative_sampler.num_negs_per_pos 52.0 +41 89 training.batch_size 1.0 +41 90 model.embedding_dim 0.0 +41 90 loss.margin 1.5128254522352074 +41 90 optimizer.lr 0.0023570553715664797 +41 90 negative_sampler.num_negs_per_pos 97.0 +41 90 training.batch_size 2.0 +41 91 model.embedding_dim 2.0 +41 91 loss.margin 6.89998728019106 +41 91 optimizer.lr 0.0019686330327380306 +41 91 negative_sampler.num_negs_per_pos 45.0 +41 91 training.batch_size 1.0 +41 92 model.embedding_dim 1.0 +41 92 loss.margin 3.3147656367904106 +41 92 optimizer.lr 0.009477865382785236 +41 92 negative_sampler.num_negs_per_pos 28.0 +41 92 training.batch_size 1.0 +41 93 model.embedding_dim 1.0 +41 93 loss.margin 9.55883161609803 +41 93 optimizer.lr 0.0037911145411625193 +41 93 negative_sampler.num_negs_per_pos 28.0 +41 93 training.batch_size 0.0 +41 94 model.embedding_dim 1.0 +41 94 loss.margin 2.283294468014889 +41 94 optimizer.lr 0.09503093052502433 +41 94 negative_sampler.num_negs_per_pos 19.0 +41 94 training.batch_size 2.0 +41 95 model.embedding_dim 2.0 +41 95 loss.margin 8.125591102959184 +41 95 optimizer.lr 0.005281595773302154 +41 95 negative_sampler.num_negs_per_pos 63.0 +41 95 training.batch_size 2.0 +41 96 model.embedding_dim 2.0 +41 96 loss.margin 5.753569896770596 +41 96 optimizer.lr 0.003928022991845015 +41 96 negative_sampler.num_negs_per_pos 71.0 +41 96 training.batch_size 2.0 +41 97 model.embedding_dim 0.0 +41 97 loss.margin 5.810771331846446 +41 97 optimizer.lr 0.024992663535717278 +41 97 negative_sampler.num_negs_per_pos 80.0 +41 97 training.batch_size 0.0 +41 98 model.embedding_dim 2.0 +41 98 loss.margin 7.567730598120931 +41 98 optimizer.lr 0.0029862498278911092 +41 98 negative_sampler.num_negs_per_pos 69.0 +41 98 training.batch_size 1.0 +41 99 model.embedding_dim 0.0 +41 99 loss.margin 2.5201671653574405 +41 99 optimizer.lr 0.003010301437677766 +41 99 negative_sampler.num_negs_per_pos 62.0 +41 99 training.batch_size 0.0 +41 100 model.embedding_dim 0.0 +41 100 loss.margin 0.971135451418652 +41 100 optimizer.lr 0.016475771481366273 +41 100 negative_sampler.num_negs_per_pos 32.0 +41 100 training.batch_size 0.0 +41 1 dataset """kinships""" +41 1 model """complex""" +41 1 loss """marginranking""" +41 1 regularizer """no""" +41 1 optimizer """adam""" +41 1 training_loop """owa""" +41 1 negative_sampler """basic""" +41 1 evaluator """rankbased""" +41 2 dataset """kinships""" +41 2 model """complex""" +41 2 loss """marginranking""" +41 2 regularizer """no""" +41 2 optimizer """adam""" +41 2 training_loop """owa""" +41 2 negative_sampler """basic""" +41 2 evaluator """rankbased""" +41 3 dataset """kinships""" +41 3 model """complex""" +41 3 loss """marginranking""" +41 3 regularizer """no""" +41 3 optimizer """adam""" +41 3 training_loop """owa""" +41 3 negative_sampler """basic""" +41 3 evaluator """rankbased""" +41 4 dataset """kinships""" +41 4 model """complex""" +41 4 loss """marginranking""" +41 4 regularizer """no""" +41 4 optimizer """adam""" +41 4 training_loop """owa""" +41 4 negative_sampler """basic""" +41 4 evaluator """rankbased""" +41 5 dataset """kinships""" +41 5 model """complex""" +41 5 loss """marginranking""" +41 5 regularizer """no""" +41 5 optimizer """adam""" +41 5 training_loop """owa""" +41 5 negative_sampler """basic""" +41 5 evaluator """rankbased""" +41 6 dataset """kinships""" +41 6 model """complex""" +41 6 loss """marginranking""" +41 6 regularizer """no""" +41 6 optimizer """adam""" +41 6 training_loop """owa""" +41 6 negative_sampler """basic""" +41 6 evaluator """rankbased""" +41 7 dataset """kinships""" +41 7 model """complex""" +41 7 loss """marginranking""" +41 7 regularizer """no""" +41 7 optimizer """adam""" +41 7 training_loop """owa""" +41 7 negative_sampler """basic""" +41 7 evaluator """rankbased""" +41 8 dataset """kinships""" +41 8 model """complex""" +41 8 loss """marginranking""" +41 8 regularizer """no""" +41 8 optimizer """adam""" +41 8 training_loop """owa""" +41 8 negative_sampler """basic""" +41 8 evaluator """rankbased""" +41 9 dataset """kinships""" +41 9 model """complex""" +41 9 loss """marginranking""" +41 9 regularizer """no""" +41 9 optimizer """adam""" +41 9 training_loop """owa""" +41 9 negative_sampler """basic""" +41 9 evaluator """rankbased""" +41 10 dataset """kinships""" +41 10 model """complex""" +41 10 loss """marginranking""" +41 10 regularizer """no""" +41 10 optimizer """adam""" +41 10 training_loop """owa""" +41 10 negative_sampler """basic""" +41 10 evaluator """rankbased""" +41 11 dataset """kinships""" +41 11 model """complex""" +41 11 loss """marginranking""" +41 11 regularizer """no""" +41 11 optimizer """adam""" +41 11 training_loop """owa""" +41 11 negative_sampler """basic""" +41 11 evaluator """rankbased""" +41 12 dataset """kinships""" +41 12 model """complex""" +41 12 loss """marginranking""" +41 12 regularizer """no""" +41 12 optimizer """adam""" +41 12 training_loop """owa""" +41 12 negative_sampler """basic""" +41 12 evaluator """rankbased""" +41 13 dataset """kinships""" +41 13 model """complex""" +41 13 loss """marginranking""" +41 13 regularizer """no""" +41 13 optimizer """adam""" +41 13 training_loop """owa""" +41 13 negative_sampler """basic""" +41 13 evaluator """rankbased""" +41 14 dataset """kinships""" +41 14 model """complex""" +41 14 loss """marginranking""" +41 14 regularizer """no""" +41 14 optimizer """adam""" +41 14 training_loop """owa""" +41 14 negative_sampler """basic""" +41 14 evaluator """rankbased""" +41 15 dataset """kinships""" +41 15 model """complex""" +41 15 loss """marginranking""" +41 15 regularizer """no""" +41 15 optimizer """adam""" +41 15 training_loop """owa""" +41 15 negative_sampler """basic""" +41 15 evaluator """rankbased""" +41 16 dataset """kinships""" +41 16 model """complex""" +41 16 loss """marginranking""" +41 16 regularizer """no""" +41 16 optimizer """adam""" +41 16 training_loop """owa""" +41 16 negative_sampler """basic""" +41 16 evaluator """rankbased""" +41 17 dataset """kinships""" +41 17 model """complex""" +41 17 loss """marginranking""" +41 17 regularizer """no""" +41 17 optimizer """adam""" +41 17 training_loop """owa""" +41 17 negative_sampler """basic""" +41 17 evaluator """rankbased""" +41 18 dataset """kinships""" +41 18 model """complex""" +41 18 loss """marginranking""" +41 18 regularizer """no""" +41 18 optimizer """adam""" +41 18 training_loop """owa""" +41 18 negative_sampler """basic""" +41 18 evaluator """rankbased""" +41 19 dataset """kinships""" +41 19 model """complex""" +41 19 loss """marginranking""" +41 19 regularizer """no""" +41 19 optimizer """adam""" +41 19 training_loop """owa""" +41 19 negative_sampler """basic""" +41 19 evaluator """rankbased""" +41 20 dataset """kinships""" +41 20 model """complex""" +41 20 loss """marginranking""" +41 20 regularizer """no""" +41 20 optimizer """adam""" +41 20 training_loop """owa""" +41 20 negative_sampler """basic""" +41 20 evaluator """rankbased""" +41 21 dataset """kinships""" +41 21 model """complex""" +41 21 loss """marginranking""" +41 21 regularizer """no""" +41 21 optimizer """adam""" +41 21 training_loop """owa""" +41 21 negative_sampler """basic""" +41 21 evaluator """rankbased""" +41 22 dataset """kinships""" +41 22 model """complex""" +41 22 loss """marginranking""" +41 22 regularizer """no""" +41 22 optimizer """adam""" +41 22 training_loop """owa""" +41 22 negative_sampler """basic""" +41 22 evaluator """rankbased""" +41 23 dataset """kinships""" +41 23 model """complex""" +41 23 loss """marginranking""" +41 23 regularizer """no""" +41 23 optimizer """adam""" +41 23 training_loop """owa""" +41 23 negative_sampler """basic""" +41 23 evaluator """rankbased""" +41 24 dataset """kinships""" +41 24 model """complex""" +41 24 loss """marginranking""" +41 24 regularizer """no""" +41 24 optimizer """adam""" +41 24 training_loop """owa""" +41 24 negative_sampler """basic""" +41 24 evaluator """rankbased""" +41 25 dataset """kinships""" +41 25 model """complex""" +41 25 loss """marginranking""" +41 25 regularizer """no""" +41 25 optimizer """adam""" +41 25 training_loop """owa""" +41 25 negative_sampler """basic""" +41 25 evaluator """rankbased""" +41 26 dataset """kinships""" +41 26 model """complex""" +41 26 loss """marginranking""" +41 26 regularizer """no""" +41 26 optimizer """adam""" +41 26 training_loop """owa""" +41 26 negative_sampler """basic""" +41 26 evaluator """rankbased""" +41 27 dataset """kinships""" +41 27 model """complex""" +41 27 loss """marginranking""" +41 27 regularizer """no""" +41 27 optimizer """adam""" +41 27 training_loop """owa""" +41 27 negative_sampler """basic""" +41 27 evaluator """rankbased""" +41 28 dataset """kinships""" +41 28 model """complex""" +41 28 loss """marginranking""" +41 28 regularizer """no""" +41 28 optimizer """adam""" +41 28 training_loop """owa""" +41 28 negative_sampler """basic""" +41 28 evaluator """rankbased""" +41 29 dataset """kinships""" +41 29 model """complex""" +41 29 loss """marginranking""" +41 29 regularizer """no""" +41 29 optimizer """adam""" +41 29 training_loop """owa""" +41 29 negative_sampler """basic""" +41 29 evaluator """rankbased""" +41 30 dataset """kinships""" +41 30 model """complex""" +41 30 loss """marginranking""" +41 30 regularizer """no""" +41 30 optimizer """adam""" +41 30 training_loop """owa""" +41 30 negative_sampler """basic""" +41 30 evaluator """rankbased""" +41 31 dataset """kinships""" +41 31 model """complex""" +41 31 loss """marginranking""" +41 31 regularizer """no""" +41 31 optimizer """adam""" +41 31 training_loop """owa""" +41 31 negative_sampler """basic""" +41 31 evaluator """rankbased""" +41 32 dataset """kinships""" +41 32 model """complex""" +41 32 loss """marginranking""" +41 32 regularizer """no""" +41 32 optimizer """adam""" +41 32 training_loop """owa""" +41 32 negative_sampler """basic""" +41 32 evaluator """rankbased""" +41 33 dataset """kinships""" +41 33 model """complex""" +41 33 loss """marginranking""" +41 33 regularizer """no""" +41 33 optimizer """adam""" +41 33 training_loop """owa""" +41 33 negative_sampler """basic""" +41 33 evaluator """rankbased""" +41 34 dataset """kinships""" +41 34 model """complex""" +41 34 loss """marginranking""" +41 34 regularizer """no""" +41 34 optimizer """adam""" +41 34 training_loop """owa""" +41 34 negative_sampler """basic""" +41 34 evaluator """rankbased""" +41 35 dataset """kinships""" +41 35 model """complex""" +41 35 loss """marginranking""" +41 35 regularizer """no""" +41 35 optimizer """adam""" +41 35 training_loop """owa""" +41 35 negative_sampler """basic""" +41 35 evaluator """rankbased""" +41 36 dataset """kinships""" +41 36 model """complex""" +41 36 loss """marginranking""" +41 36 regularizer """no""" +41 36 optimizer """adam""" +41 36 training_loop """owa""" +41 36 negative_sampler """basic""" +41 36 evaluator """rankbased""" +41 37 dataset """kinships""" +41 37 model """complex""" +41 37 loss """marginranking""" +41 37 regularizer """no""" +41 37 optimizer """adam""" +41 37 training_loop """owa""" +41 37 negative_sampler """basic""" +41 37 evaluator """rankbased""" +41 38 dataset """kinships""" +41 38 model """complex""" +41 38 loss """marginranking""" +41 38 regularizer """no""" +41 38 optimizer """adam""" +41 38 training_loop """owa""" +41 38 negative_sampler """basic""" +41 38 evaluator """rankbased""" +41 39 dataset """kinships""" +41 39 model """complex""" +41 39 loss """marginranking""" +41 39 regularizer """no""" +41 39 optimizer """adam""" +41 39 training_loop """owa""" +41 39 negative_sampler """basic""" +41 39 evaluator """rankbased""" +41 40 dataset """kinships""" +41 40 model """complex""" +41 40 loss """marginranking""" +41 40 regularizer """no""" +41 40 optimizer """adam""" +41 40 training_loop """owa""" +41 40 negative_sampler """basic""" +41 40 evaluator """rankbased""" +41 41 dataset """kinships""" +41 41 model """complex""" +41 41 loss """marginranking""" +41 41 regularizer """no""" +41 41 optimizer """adam""" +41 41 training_loop """owa""" +41 41 negative_sampler """basic""" +41 41 evaluator """rankbased""" +41 42 dataset """kinships""" +41 42 model """complex""" +41 42 loss """marginranking""" +41 42 regularizer """no""" +41 42 optimizer """adam""" +41 42 training_loop """owa""" +41 42 negative_sampler """basic""" +41 42 evaluator """rankbased""" +41 43 dataset """kinships""" +41 43 model """complex""" +41 43 loss """marginranking""" +41 43 regularizer """no""" +41 43 optimizer """adam""" +41 43 training_loop """owa""" +41 43 negative_sampler """basic""" +41 43 evaluator """rankbased""" +41 44 dataset """kinships""" +41 44 model """complex""" +41 44 loss """marginranking""" +41 44 regularizer """no""" +41 44 optimizer """adam""" +41 44 training_loop """owa""" +41 44 negative_sampler """basic""" +41 44 evaluator """rankbased""" +41 45 dataset """kinships""" +41 45 model """complex""" +41 45 loss """marginranking""" +41 45 regularizer """no""" +41 45 optimizer """adam""" +41 45 training_loop """owa""" +41 45 negative_sampler """basic""" +41 45 evaluator """rankbased""" +41 46 dataset """kinships""" +41 46 model """complex""" +41 46 loss """marginranking""" +41 46 regularizer """no""" +41 46 optimizer """adam""" +41 46 training_loop """owa""" +41 46 negative_sampler """basic""" +41 46 evaluator """rankbased""" +41 47 dataset """kinships""" +41 47 model """complex""" +41 47 loss """marginranking""" +41 47 regularizer """no""" +41 47 optimizer """adam""" +41 47 training_loop """owa""" +41 47 negative_sampler """basic""" +41 47 evaluator """rankbased""" +41 48 dataset """kinships""" +41 48 model """complex""" +41 48 loss """marginranking""" +41 48 regularizer """no""" +41 48 optimizer """adam""" +41 48 training_loop """owa""" +41 48 negative_sampler """basic""" +41 48 evaluator """rankbased""" +41 49 dataset """kinships""" +41 49 model """complex""" +41 49 loss """marginranking""" +41 49 regularizer """no""" +41 49 optimizer """adam""" +41 49 training_loop """owa""" +41 49 negative_sampler """basic""" +41 49 evaluator """rankbased""" +41 50 dataset """kinships""" +41 50 model """complex""" +41 50 loss """marginranking""" +41 50 regularizer """no""" +41 50 optimizer """adam""" +41 50 training_loop """owa""" +41 50 negative_sampler """basic""" +41 50 evaluator """rankbased""" +41 51 dataset """kinships""" +41 51 model """complex""" +41 51 loss """marginranking""" +41 51 regularizer """no""" +41 51 optimizer """adam""" +41 51 training_loop """owa""" +41 51 negative_sampler """basic""" +41 51 evaluator """rankbased""" +41 52 dataset """kinships""" +41 52 model """complex""" +41 52 loss """marginranking""" +41 52 regularizer """no""" +41 52 optimizer """adam""" +41 52 training_loop """owa""" +41 52 negative_sampler """basic""" +41 52 evaluator """rankbased""" +41 53 dataset """kinships""" +41 53 model """complex""" +41 53 loss """marginranking""" +41 53 regularizer """no""" +41 53 optimizer """adam""" +41 53 training_loop """owa""" +41 53 negative_sampler """basic""" +41 53 evaluator """rankbased""" +41 54 dataset """kinships""" +41 54 model """complex""" +41 54 loss """marginranking""" +41 54 regularizer """no""" +41 54 optimizer """adam""" +41 54 training_loop """owa""" +41 54 negative_sampler """basic""" +41 54 evaluator """rankbased""" +41 55 dataset """kinships""" +41 55 model """complex""" +41 55 loss """marginranking""" +41 55 regularizer """no""" +41 55 optimizer """adam""" +41 55 training_loop """owa""" +41 55 negative_sampler """basic""" +41 55 evaluator """rankbased""" +41 56 dataset """kinships""" +41 56 model """complex""" +41 56 loss """marginranking""" +41 56 regularizer """no""" +41 56 optimizer """adam""" +41 56 training_loop """owa""" +41 56 negative_sampler """basic""" +41 56 evaluator """rankbased""" +41 57 dataset """kinships""" +41 57 model """complex""" +41 57 loss """marginranking""" +41 57 regularizer """no""" +41 57 optimizer """adam""" +41 57 training_loop """owa""" +41 57 negative_sampler """basic""" +41 57 evaluator """rankbased""" +41 58 dataset """kinships""" +41 58 model """complex""" +41 58 loss """marginranking""" +41 58 regularizer """no""" +41 58 optimizer """adam""" +41 58 training_loop """owa""" +41 58 negative_sampler """basic""" +41 58 evaluator """rankbased""" +41 59 dataset """kinships""" +41 59 model """complex""" +41 59 loss """marginranking""" +41 59 regularizer """no""" +41 59 optimizer """adam""" +41 59 training_loop """owa""" +41 59 negative_sampler """basic""" +41 59 evaluator """rankbased""" +41 60 dataset """kinships""" +41 60 model """complex""" +41 60 loss """marginranking""" +41 60 regularizer """no""" +41 60 optimizer """adam""" +41 60 training_loop """owa""" +41 60 negative_sampler """basic""" +41 60 evaluator """rankbased""" +41 61 dataset """kinships""" +41 61 model """complex""" +41 61 loss """marginranking""" +41 61 regularizer """no""" +41 61 optimizer """adam""" +41 61 training_loop """owa""" +41 61 negative_sampler """basic""" +41 61 evaluator """rankbased""" +41 62 dataset """kinships""" +41 62 model """complex""" +41 62 loss """marginranking""" +41 62 regularizer """no""" +41 62 optimizer """adam""" +41 62 training_loop """owa""" +41 62 negative_sampler """basic""" +41 62 evaluator """rankbased""" +41 63 dataset """kinships""" +41 63 model """complex""" +41 63 loss """marginranking""" +41 63 regularizer """no""" +41 63 optimizer """adam""" +41 63 training_loop """owa""" +41 63 negative_sampler """basic""" +41 63 evaluator """rankbased""" +41 64 dataset """kinships""" +41 64 model """complex""" +41 64 loss """marginranking""" +41 64 regularizer """no""" +41 64 optimizer """adam""" +41 64 training_loop """owa""" +41 64 negative_sampler """basic""" +41 64 evaluator """rankbased""" +41 65 dataset """kinships""" +41 65 model """complex""" +41 65 loss """marginranking""" +41 65 regularizer """no""" +41 65 optimizer """adam""" +41 65 training_loop """owa""" +41 65 negative_sampler """basic""" +41 65 evaluator """rankbased""" +41 66 dataset """kinships""" +41 66 model """complex""" +41 66 loss """marginranking""" +41 66 regularizer """no""" +41 66 optimizer """adam""" +41 66 training_loop """owa""" +41 66 negative_sampler """basic""" +41 66 evaluator """rankbased""" +41 67 dataset """kinships""" +41 67 model """complex""" +41 67 loss """marginranking""" +41 67 regularizer """no""" +41 67 optimizer """adam""" +41 67 training_loop """owa""" +41 67 negative_sampler """basic""" +41 67 evaluator """rankbased""" +41 68 dataset """kinships""" +41 68 model """complex""" +41 68 loss """marginranking""" +41 68 regularizer """no""" +41 68 optimizer """adam""" +41 68 training_loop """owa""" +41 68 negative_sampler """basic""" +41 68 evaluator """rankbased""" +41 69 dataset """kinships""" +41 69 model """complex""" +41 69 loss """marginranking""" +41 69 regularizer """no""" +41 69 optimizer """adam""" +41 69 training_loop """owa""" +41 69 negative_sampler """basic""" +41 69 evaluator """rankbased""" +41 70 dataset """kinships""" +41 70 model """complex""" +41 70 loss """marginranking""" +41 70 regularizer """no""" +41 70 optimizer """adam""" +41 70 training_loop """owa""" +41 70 negative_sampler """basic""" +41 70 evaluator """rankbased""" +41 71 dataset """kinships""" +41 71 model """complex""" +41 71 loss """marginranking""" +41 71 regularizer """no""" +41 71 optimizer """adam""" +41 71 training_loop """owa""" +41 71 negative_sampler """basic""" +41 71 evaluator """rankbased""" +41 72 dataset """kinships""" +41 72 model """complex""" +41 72 loss """marginranking""" +41 72 regularizer """no""" +41 72 optimizer """adam""" +41 72 training_loop """owa""" +41 72 negative_sampler """basic""" +41 72 evaluator """rankbased""" +41 73 dataset """kinships""" +41 73 model """complex""" +41 73 loss """marginranking""" +41 73 regularizer """no""" +41 73 optimizer """adam""" +41 73 training_loop """owa""" +41 73 negative_sampler """basic""" +41 73 evaluator """rankbased""" +41 74 dataset """kinships""" +41 74 model """complex""" +41 74 loss """marginranking""" +41 74 regularizer """no""" +41 74 optimizer """adam""" +41 74 training_loop """owa""" +41 74 negative_sampler """basic""" +41 74 evaluator """rankbased""" +41 75 dataset """kinships""" +41 75 model """complex""" +41 75 loss """marginranking""" +41 75 regularizer """no""" +41 75 optimizer """adam""" +41 75 training_loop """owa""" +41 75 negative_sampler """basic""" +41 75 evaluator """rankbased""" +41 76 dataset """kinships""" +41 76 model """complex""" +41 76 loss """marginranking""" +41 76 regularizer """no""" +41 76 optimizer """adam""" +41 76 training_loop """owa""" +41 76 negative_sampler """basic""" +41 76 evaluator """rankbased""" +41 77 dataset """kinships""" +41 77 model """complex""" +41 77 loss """marginranking""" +41 77 regularizer """no""" +41 77 optimizer """adam""" +41 77 training_loop """owa""" +41 77 negative_sampler """basic""" +41 77 evaluator """rankbased""" +41 78 dataset """kinships""" +41 78 model """complex""" +41 78 loss """marginranking""" +41 78 regularizer """no""" +41 78 optimizer """adam""" +41 78 training_loop """owa""" +41 78 negative_sampler """basic""" +41 78 evaluator """rankbased""" +41 79 dataset """kinships""" +41 79 model """complex""" +41 79 loss """marginranking""" +41 79 regularizer """no""" +41 79 optimizer """adam""" +41 79 training_loop """owa""" +41 79 negative_sampler """basic""" +41 79 evaluator """rankbased""" +41 80 dataset """kinships""" +41 80 model """complex""" +41 80 loss """marginranking""" +41 80 regularizer """no""" +41 80 optimizer """adam""" +41 80 training_loop """owa""" +41 80 negative_sampler """basic""" +41 80 evaluator """rankbased""" +41 81 dataset """kinships""" +41 81 model """complex""" +41 81 loss """marginranking""" +41 81 regularizer """no""" +41 81 optimizer """adam""" +41 81 training_loop """owa""" +41 81 negative_sampler """basic""" +41 81 evaluator """rankbased""" +41 82 dataset """kinships""" +41 82 model """complex""" +41 82 loss """marginranking""" +41 82 regularizer """no""" +41 82 optimizer """adam""" +41 82 training_loop """owa""" +41 82 negative_sampler """basic""" +41 82 evaluator """rankbased""" +41 83 dataset """kinships""" +41 83 model """complex""" +41 83 loss """marginranking""" +41 83 regularizer """no""" +41 83 optimizer """adam""" +41 83 training_loop """owa""" +41 83 negative_sampler """basic""" +41 83 evaluator """rankbased""" +41 84 dataset """kinships""" +41 84 model """complex""" +41 84 loss """marginranking""" +41 84 regularizer """no""" +41 84 optimizer """adam""" +41 84 training_loop """owa""" +41 84 negative_sampler """basic""" +41 84 evaluator """rankbased""" +41 85 dataset """kinships""" +41 85 model """complex""" +41 85 loss """marginranking""" +41 85 regularizer """no""" +41 85 optimizer """adam""" +41 85 training_loop """owa""" +41 85 negative_sampler """basic""" +41 85 evaluator """rankbased""" +41 86 dataset """kinships""" +41 86 model """complex""" +41 86 loss """marginranking""" +41 86 regularizer """no""" +41 86 optimizer """adam""" +41 86 training_loop """owa""" +41 86 negative_sampler """basic""" +41 86 evaluator """rankbased""" +41 87 dataset """kinships""" +41 87 model """complex""" +41 87 loss """marginranking""" +41 87 regularizer """no""" +41 87 optimizer """adam""" +41 87 training_loop """owa""" +41 87 negative_sampler """basic""" +41 87 evaluator """rankbased""" +41 88 dataset """kinships""" +41 88 model """complex""" +41 88 loss """marginranking""" +41 88 regularizer """no""" +41 88 optimizer """adam""" +41 88 training_loop """owa""" +41 88 negative_sampler """basic""" +41 88 evaluator """rankbased""" +41 89 dataset """kinships""" +41 89 model """complex""" +41 89 loss """marginranking""" +41 89 regularizer """no""" +41 89 optimizer """adam""" +41 89 training_loop """owa""" +41 89 negative_sampler """basic""" +41 89 evaluator """rankbased""" +41 90 dataset """kinships""" +41 90 model """complex""" +41 90 loss """marginranking""" +41 90 regularizer """no""" +41 90 optimizer """adam""" +41 90 training_loop """owa""" +41 90 negative_sampler """basic""" +41 90 evaluator """rankbased""" +41 91 dataset """kinships""" +41 91 model """complex""" +41 91 loss """marginranking""" +41 91 regularizer """no""" +41 91 optimizer """adam""" +41 91 training_loop """owa""" +41 91 negative_sampler """basic""" +41 91 evaluator """rankbased""" +41 92 dataset """kinships""" +41 92 model """complex""" +41 92 loss """marginranking""" +41 92 regularizer """no""" +41 92 optimizer """adam""" +41 92 training_loop """owa""" +41 92 negative_sampler """basic""" +41 92 evaluator """rankbased""" +41 93 dataset """kinships""" +41 93 model """complex""" +41 93 loss """marginranking""" +41 93 regularizer """no""" +41 93 optimizer """adam""" +41 93 training_loop """owa""" +41 93 negative_sampler """basic""" +41 93 evaluator """rankbased""" +41 94 dataset """kinships""" +41 94 model """complex""" +41 94 loss """marginranking""" +41 94 regularizer """no""" +41 94 optimizer """adam""" +41 94 training_loop """owa""" +41 94 negative_sampler """basic""" +41 94 evaluator """rankbased""" +41 95 dataset """kinships""" +41 95 model """complex""" +41 95 loss """marginranking""" +41 95 regularizer """no""" +41 95 optimizer """adam""" +41 95 training_loop """owa""" +41 95 negative_sampler """basic""" +41 95 evaluator """rankbased""" +41 96 dataset """kinships""" +41 96 model """complex""" +41 96 loss """marginranking""" +41 96 regularizer """no""" +41 96 optimizer """adam""" +41 96 training_loop """owa""" +41 96 negative_sampler """basic""" +41 96 evaluator """rankbased""" +41 97 dataset """kinships""" +41 97 model """complex""" +41 97 loss """marginranking""" +41 97 regularizer """no""" +41 97 optimizer """adam""" +41 97 training_loop """owa""" +41 97 negative_sampler """basic""" +41 97 evaluator """rankbased""" +41 98 dataset """kinships""" +41 98 model """complex""" +41 98 loss """marginranking""" +41 98 regularizer """no""" +41 98 optimizer """adam""" +41 98 training_loop """owa""" +41 98 negative_sampler """basic""" +41 98 evaluator """rankbased""" +41 99 dataset """kinships""" +41 99 model """complex""" +41 99 loss """marginranking""" +41 99 regularizer """no""" +41 99 optimizer """adam""" +41 99 training_loop """owa""" +41 99 negative_sampler """basic""" +41 99 evaluator """rankbased""" +41 100 dataset """kinships""" +41 100 model """complex""" +41 100 loss """marginranking""" +41 100 regularizer """no""" +41 100 optimizer """adam""" +41 100 training_loop """owa""" +41 100 negative_sampler """basic""" +41 100 evaluator """rankbased""" +42 1 model.embedding_dim 0.0 +42 1 optimizer.lr 0.002658483988436463 +42 1 negative_sampler.num_negs_per_pos 31.0 +42 1 training.batch_size 1.0 +42 2 model.embedding_dim 1.0 +42 2 optimizer.lr 0.0027598328652434223 +42 2 negative_sampler.num_negs_per_pos 57.0 +42 2 training.batch_size 2.0 +42 3 model.embedding_dim 0.0 +42 3 optimizer.lr 0.0015920045775936324 +42 3 negative_sampler.num_negs_per_pos 33.0 +42 3 training.batch_size 2.0 +42 4 model.embedding_dim 0.0 +42 4 optimizer.lr 0.0346651448503982 +42 4 negative_sampler.num_negs_per_pos 42.0 +42 4 training.batch_size 1.0 +42 5 model.embedding_dim 0.0 +42 5 optimizer.lr 0.00547278340294747 +42 5 negative_sampler.num_negs_per_pos 13.0 +42 5 training.batch_size 2.0 +42 6 model.embedding_dim 0.0 +42 6 optimizer.lr 0.03238319011580265 +42 6 negative_sampler.num_negs_per_pos 37.0 +42 6 training.batch_size 2.0 +42 7 model.embedding_dim 2.0 +42 7 optimizer.lr 0.0036969700407563483 +42 7 negative_sampler.num_negs_per_pos 14.0 +42 7 training.batch_size 0.0 +42 8 model.embedding_dim 1.0 +42 8 optimizer.lr 0.003232596461718021 +42 8 negative_sampler.num_negs_per_pos 91.0 +42 8 training.batch_size 0.0 +42 9 model.embedding_dim 2.0 +42 9 optimizer.lr 0.002753606617365717 +42 9 negative_sampler.num_negs_per_pos 21.0 +42 9 training.batch_size 1.0 +42 10 model.embedding_dim 2.0 +42 10 optimizer.lr 0.0010080956419895187 +42 10 negative_sampler.num_negs_per_pos 38.0 +42 10 training.batch_size 0.0 +42 11 model.embedding_dim 1.0 +42 11 optimizer.lr 0.010789179256659958 +42 11 negative_sampler.num_negs_per_pos 70.0 +42 11 training.batch_size 1.0 +42 12 model.embedding_dim 0.0 +42 12 optimizer.lr 0.050796328396011454 +42 12 negative_sampler.num_negs_per_pos 71.0 +42 12 training.batch_size 1.0 +42 13 model.embedding_dim 0.0 +42 13 optimizer.lr 0.008490202044624837 +42 13 negative_sampler.num_negs_per_pos 41.0 +42 13 training.batch_size 1.0 +42 14 model.embedding_dim 0.0 +42 14 optimizer.lr 0.06009969284425662 +42 14 negative_sampler.num_negs_per_pos 77.0 +42 14 training.batch_size 1.0 +42 15 model.embedding_dim 2.0 +42 15 optimizer.lr 0.0021131600817558918 +42 15 negative_sampler.num_negs_per_pos 19.0 +42 15 training.batch_size 2.0 +42 16 model.embedding_dim 2.0 +42 16 optimizer.lr 0.005780472486223525 +42 16 negative_sampler.num_negs_per_pos 66.0 +42 16 training.batch_size 0.0 +42 17 model.embedding_dim 1.0 +42 17 optimizer.lr 0.0013543143549228866 +42 17 negative_sampler.num_negs_per_pos 30.0 +42 17 training.batch_size 0.0 +42 18 model.embedding_dim 1.0 +42 18 optimizer.lr 0.04224709277741956 +42 18 negative_sampler.num_negs_per_pos 10.0 +42 18 training.batch_size 2.0 +42 19 model.embedding_dim 2.0 +42 19 optimizer.lr 0.0030564143442875797 +42 19 negative_sampler.num_negs_per_pos 84.0 +42 19 training.batch_size 2.0 +42 20 model.embedding_dim 1.0 +42 20 optimizer.lr 0.017771548034172108 +42 20 negative_sampler.num_negs_per_pos 8.0 +42 20 training.batch_size 0.0 +42 21 model.embedding_dim 1.0 +42 21 optimizer.lr 0.0037734407820312036 +42 21 negative_sampler.num_negs_per_pos 58.0 +42 21 training.batch_size 0.0 +42 22 model.embedding_dim 2.0 +42 22 optimizer.lr 0.009156211058585828 +42 22 negative_sampler.num_negs_per_pos 31.0 +42 22 training.batch_size 0.0 +42 23 model.embedding_dim 0.0 +42 23 optimizer.lr 0.006826252755372979 +42 23 negative_sampler.num_negs_per_pos 79.0 +42 23 training.batch_size 2.0 +42 24 model.embedding_dim 0.0 +42 24 optimizer.lr 0.0026355232664002966 +42 24 negative_sampler.num_negs_per_pos 30.0 +42 24 training.batch_size 1.0 +42 25 model.embedding_dim 2.0 +42 25 optimizer.lr 0.0010657803711796926 +42 25 negative_sampler.num_negs_per_pos 37.0 +42 25 training.batch_size 1.0 +42 26 model.embedding_dim 0.0 +42 26 optimizer.lr 0.05336461649781969 +42 26 negative_sampler.num_negs_per_pos 4.0 +42 26 training.batch_size 1.0 +42 27 model.embedding_dim 0.0 +42 27 optimizer.lr 0.019375007405773095 +42 27 negative_sampler.num_negs_per_pos 18.0 +42 27 training.batch_size 2.0 +42 28 model.embedding_dim 0.0 +42 28 optimizer.lr 0.09582084641768228 +42 28 negative_sampler.num_negs_per_pos 7.0 +42 28 training.batch_size 1.0 +42 29 model.embedding_dim 1.0 +42 29 optimizer.lr 0.006647476002452001 +42 29 negative_sampler.num_negs_per_pos 1.0 +42 29 training.batch_size 1.0 +42 30 model.embedding_dim 0.0 +42 30 optimizer.lr 0.012902654914593172 +42 30 negative_sampler.num_negs_per_pos 1.0 +42 30 training.batch_size 0.0 +42 31 model.embedding_dim 0.0 +42 31 optimizer.lr 0.005151634750783094 +42 31 negative_sampler.num_negs_per_pos 49.0 +42 31 training.batch_size 0.0 +42 32 model.embedding_dim 2.0 +42 32 optimizer.lr 0.0030189671507459435 +42 32 negative_sampler.num_negs_per_pos 56.0 +42 32 training.batch_size 2.0 +42 33 model.embedding_dim 0.0 +42 33 optimizer.lr 0.024674483337175895 +42 33 negative_sampler.num_negs_per_pos 22.0 +42 33 training.batch_size 0.0 +42 34 model.embedding_dim 2.0 +42 34 optimizer.lr 0.09350807135978129 +42 34 negative_sampler.num_negs_per_pos 15.0 +42 34 training.batch_size 1.0 +42 35 model.embedding_dim 2.0 +42 35 optimizer.lr 0.004548790314286109 +42 35 negative_sampler.num_negs_per_pos 75.0 +42 35 training.batch_size 1.0 +42 36 model.embedding_dim 2.0 +42 36 optimizer.lr 0.022708365036195912 +42 36 negative_sampler.num_negs_per_pos 1.0 +42 36 training.batch_size 2.0 +42 37 model.embedding_dim 1.0 +42 37 optimizer.lr 0.03720248668638285 +42 37 negative_sampler.num_negs_per_pos 33.0 +42 37 training.batch_size 0.0 +42 38 model.embedding_dim 1.0 +42 38 optimizer.lr 0.023372816481175507 +42 38 negative_sampler.num_negs_per_pos 91.0 +42 38 training.batch_size 1.0 +42 39 model.embedding_dim 2.0 +42 39 optimizer.lr 0.0038028876657079593 +42 39 negative_sampler.num_negs_per_pos 9.0 +42 39 training.batch_size 2.0 +42 40 model.embedding_dim 1.0 +42 40 optimizer.lr 0.0994174610131317 +42 40 negative_sampler.num_negs_per_pos 35.0 +42 40 training.batch_size 1.0 +42 41 model.embedding_dim 1.0 +42 41 optimizer.lr 0.06334240746983258 +42 41 negative_sampler.num_negs_per_pos 79.0 +42 41 training.batch_size 1.0 +42 42 model.embedding_dim 1.0 +42 42 optimizer.lr 0.029812026215482117 +42 42 negative_sampler.num_negs_per_pos 33.0 +42 42 training.batch_size 0.0 +42 43 model.embedding_dim 2.0 +42 43 optimizer.lr 0.01058355568603607 +42 43 negative_sampler.num_negs_per_pos 16.0 +42 43 training.batch_size 0.0 +42 44 model.embedding_dim 2.0 +42 44 optimizer.lr 0.009257430286351146 +42 44 negative_sampler.num_negs_per_pos 77.0 +42 44 training.batch_size 0.0 +42 45 model.embedding_dim 1.0 +42 45 optimizer.lr 0.006013514049474918 +42 45 negative_sampler.num_negs_per_pos 43.0 +42 45 training.batch_size 1.0 +42 1 dataset """wn18rr""" +42 1 model """complex""" +42 1 loss """bceaftersigmoid""" +42 1 regularizer """no""" +42 1 optimizer """adam""" +42 1 training_loop """owa""" +42 1 negative_sampler """basic""" +42 1 evaluator """rankbased""" +42 2 dataset """wn18rr""" +42 2 model """complex""" +42 2 loss """bceaftersigmoid""" +42 2 regularizer """no""" +42 2 optimizer """adam""" +42 2 training_loop """owa""" +42 2 negative_sampler """basic""" +42 2 evaluator """rankbased""" +42 3 dataset """wn18rr""" +42 3 model """complex""" +42 3 loss """bceaftersigmoid""" +42 3 regularizer """no""" +42 3 optimizer """adam""" +42 3 training_loop """owa""" +42 3 negative_sampler """basic""" +42 3 evaluator """rankbased""" +42 4 dataset """wn18rr""" +42 4 model """complex""" +42 4 loss """bceaftersigmoid""" +42 4 regularizer """no""" +42 4 optimizer """adam""" +42 4 training_loop """owa""" +42 4 negative_sampler """basic""" +42 4 evaluator """rankbased""" +42 5 dataset """wn18rr""" +42 5 model """complex""" +42 5 loss """bceaftersigmoid""" +42 5 regularizer """no""" +42 5 optimizer """adam""" +42 5 training_loop """owa""" +42 5 negative_sampler """basic""" +42 5 evaluator """rankbased""" +42 6 dataset """wn18rr""" +42 6 model """complex""" +42 6 loss """bceaftersigmoid""" +42 6 regularizer """no""" +42 6 optimizer """adam""" +42 6 training_loop """owa""" +42 6 negative_sampler """basic""" +42 6 evaluator """rankbased""" +42 7 dataset """wn18rr""" +42 7 model """complex""" +42 7 loss """bceaftersigmoid""" +42 7 regularizer """no""" +42 7 optimizer """adam""" +42 7 training_loop """owa""" +42 7 negative_sampler """basic""" +42 7 evaluator """rankbased""" +42 8 dataset """wn18rr""" +42 8 model """complex""" +42 8 loss """bceaftersigmoid""" +42 8 regularizer """no""" +42 8 optimizer """adam""" +42 8 training_loop """owa""" +42 8 negative_sampler """basic""" +42 8 evaluator """rankbased""" +42 9 dataset """wn18rr""" +42 9 model """complex""" +42 9 loss """bceaftersigmoid""" +42 9 regularizer """no""" +42 9 optimizer """adam""" +42 9 training_loop """owa""" +42 9 negative_sampler """basic""" +42 9 evaluator """rankbased""" +42 10 dataset """wn18rr""" +42 10 model """complex""" +42 10 loss """bceaftersigmoid""" +42 10 regularizer """no""" +42 10 optimizer """adam""" +42 10 training_loop """owa""" +42 10 negative_sampler """basic""" +42 10 evaluator """rankbased""" +42 11 dataset """wn18rr""" +42 11 model """complex""" +42 11 loss """bceaftersigmoid""" +42 11 regularizer """no""" +42 11 optimizer """adam""" +42 11 training_loop """owa""" +42 11 negative_sampler """basic""" +42 11 evaluator """rankbased""" +42 12 dataset """wn18rr""" +42 12 model """complex""" +42 12 loss """bceaftersigmoid""" +42 12 regularizer """no""" +42 12 optimizer """adam""" +42 12 training_loop """owa""" +42 12 negative_sampler """basic""" +42 12 evaluator """rankbased""" +42 13 dataset """wn18rr""" +42 13 model """complex""" +42 13 loss """bceaftersigmoid""" +42 13 regularizer """no""" +42 13 optimizer """adam""" +42 13 training_loop """owa""" +42 13 negative_sampler """basic""" +42 13 evaluator """rankbased""" +42 14 dataset """wn18rr""" +42 14 model """complex""" +42 14 loss """bceaftersigmoid""" +42 14 regularizer """no""" +42 14 optimizer """adam""" +42 14 training_loop """owa""" +42 14 negative_sampler """basic""" +42 14 evaluator """rankbased""" +42 15 dataset """wn18rr""" +42 15 model """complex""" +42 15 loss """bceaftersigmoid""" +42 15 regularizer """no""" +42 15 optimizer """adam""" +42 15 training_loop """owa""" +42 15 negative_sampler """basic""" +42 15 evaluator """rankbased""" +42 16 dataset """wn18rr""" +42 16 model """complex""" +42 16 loss """bceaftersigmoid""" +42 16 regularizer """no""" +42 16 optimizer """adam""" +42 16 training_loop """owa""" +42 16 negative_sampler """basic""" +42 16 evaluator """rankbased""" +42 17 dataset """wn18rr""" +42 17 model """complex""" +42 17 loss """bceaftersigmoid""" +42 17 regularizer """no""" +42 17 optimizer """adam""" +42 17 training_loop """owa""" +42 17 negative_sampler """basic""" +42 17 evaluator """rankbased""" +42 18 dataset """wn18rr""" +42 18 model """complex""" +42 18 loss """bceaftersigmoid""" +42 18 regularizer """no""" +42 18 optimizer """adam""" +42 18 training_loop """owa""" +42 18 negative_sampler """basic""" +42 18 evaluator """rankbased""" +42 19 dataset """wn18rr""" +42 19 model """complex""" +42 19 loss """bceaftersigmoid""" +42 19 regularizer """no""" +42 19 optimizer """adam""" +42 19 training_loop """owa""" +42 19 negative_sampler """basic""" +42 19 evaluator """rankbased""" +42 20 dataset """wn18rr""" +42 20 model """complex""" +42 20 loss """bceaftersigmoid""" +42 20 regularizer """no""" +42 20 optimizer """adam""" +42 20 training_loop """owa""" +42 20 negative_sampler """basic""" +42 20 evaluator """rankbased""" +42 21 dataset """wn18rr""" +42 21 model """complex""" +42 21 loss """bceaftersigmoid""" +42 21 regularizer """no""" +42 21 optimizer """adam""" +42 21 training_loop """owa""" +42 21 negative_sampler """basic""" +42 21 evaluator """rankbased""" +42 22 dataset """wn18rr""" +42 22 model """complex""" +42 22 loss """bceaftersigmoid""" +42 22 regularizer """no""" +42 22 optimizer """adam""" +42 22 training_loop """owa""" +42 22 negative_sampler """basic""" +42 22 evaluator """rankbased""" +42 23 dataset """wn18rr""" +42 23 model """complex""" +42 23 loss """bceaftersigmoid""" +42 23 regularizer """no""" +42 23 optimizer """adam""" +42 23 training_loop """owa""" +42 23 negative_sampler """basic""" +42 23 evaluator """rankbased""" +42 24 dataset """wn18rr""" +42 24 model """complex""" +42 24 loss """bceaftersigmoid""" +42 24 regularizer """no""" +42 24 optimizer """adam""" +42 24 training_loop """owa""" +42 24 negative_sampler """basic""" +42 24 evaluator """rankbased""" +42 25 dataset """wn18rr""" +42 25 model """complex""" +42 25 loss """bceaftersigmoid""" +42 25 regularizer """no""" +42 25 optimizer """adam""" +42 25 training_loop """owa""" +42 25 negative_sampler """basic""" +42 25 evaluator """rankbased""" +42 26 dataset """wn18rr""" +42 26 model """complex""" +42 26 loss """bceaftersigmoid""" +42 26 regularizer """no""" +42 26 optimizer """adam""" +42 26 training_loop """owa""" +42 26 negative_sampler """basic""" +42 26 evaluator """rankbased""" +42 27 dataset """wn18rr""" +42 27 model """complex""" +42 27 loss """bceaftersigmoid""" +42 27 regularizer """no""" +42 27 optimizer """adam""" +42 27 training_loop """owa""" +42 27 negative_sampler """basic""" +42 27 evaluator """rankbased""" +42 28 dataset """wn18rr""" +42 28 model """complex""" +42 28 loss """bceaftersigmoid""" +42 28 regularizer """no""" +42 28 optimizer """adam""" +42 28 training_loop """owa""" +42 28 negative_sampler """basic""" +42 28 evaluator """rankbased""" +42 29 dataset """wn18rr""" +42 29 model """complex""" +42 29 loss """bceaftersigmoid""" +42 29 regularizer """no""" +42 29 optimizer """adam""" +42 29 training_loop """owa""" +42 29 negative_sampler """basic""" +42 29 evaluator """rankbased""" +42 30 dataset """wn18rr""" +42 30 model """complex""" +42 30 loss """bceaftersigmoid""" +42 30 regularizer """no""" +42 30 optimizer """adam""" +42 30 training_loop """owa""" +42 30 negative_sampler """basic""" +42 30 evaluator """rankbased""" +42 31 dataset """wn18rr""" +42 31 model """complex""" +42 31 loss """bceaftersigmoid""" +42 31 regularizer """no""" +42 31 optimizer """adam""" +42 31 training_loop """owa""" +42 31 negative_sampler """basic""" +42 31 evaluator """rankbased""" +42 32 dataset """wn18rr""" +42 32 model """complex""" +42 32 loss """bceaftersigmoid""" +42 32 regularizer """no""" +42 32 optimizer """adam""" +42 32 training_loop """owa""" +42 32 negative_sampler """basic""" +42 32 evaluator """rankbased""" +42 33 dataset """wn18rr""" +42 33 model """complex""" +42 33 loss """bceaftersigmoid""" +42 33 regularizer """no""" +42 33 optimizer """adam""" +42 33 training_loop """owa""" +42 33 negative_sampler """basic""" +42 33 evaluator """rankbased""" +42 34 dataset """wn18rr""" +42 34 model """complex""" +42 34 loss """bceaftersigmoid""" +42 34 regularizer """no""" +42 34 optimizer """adam""" +42 34 training_loop """owa""" +42 34 negative_sampler """basic""" +42 34 evaluator """rankbased""" +42 35 dataset """wn18rr""" +42 35 model """complex""" +42 35 loss """bceaftersigmoid""" +42 35 regularizer """no""" +42 35 optimizer """adam""" +42 35 training_loop """owa""" +42 35 negative_sampler """basic""" +42 35 evaluator """rankbased""" +42 36 dataset """wn18rr""" +42 36 model """complex""" +42 36 loss """bceaftersigmoid""" +42 36 regularizer """no""" +42 36 optimizer """adam""" +42 36 training_loop """owa""" +42 36 negative_sampler """basic""" +42 36 evaluator """rankbased""" +42 37 dataset """wn18rr""" +42 37 model """complex""" +42 37 loss """bceaftersigmoid""" +42 37 regularizer """no""" +42 37 optimizer """adam""" +42 37 training_loop """owa""" +42 37 negative_sampler """basic""" +42 37 evaluator """rankbased""" +42 38 dataset """wn18rr""" +42 38 model """complex""" +42 38 loss """bceaftersigmoid""" +42 38 regularizer """no""" +42 38 optimizer """adam""" +42 38 training_loop """owa""" +42 38 negative_sampler """basic""" +42 38 evaluator """rankbased""" +42 39 dataset """wn18rr""" +42 39 model """complex""" +42 39 loss """bceaftersigmoid""" +42 39 regularizer """no""" +42 39 optimizer """adam""" +42 39 training_loop """owa""" +42 39 negative_sampler """basic""" +42 39 evaluator """rankbased""" +42 40 dataset """wn18rr""" +42 40 model """complex""" +42 40 loss """bceaftersigmoid""" +42 40 regularizer """no""" +42 40 optimizer """adam""" +42 40 training_loop """owa""" +42 40 negative_sampler """basic""" +42 40 evaluator """rankbased""" +42 41 dataset """wn18rr""" +42 41 model """complex""" +42 41 loss """bceaftersigmoid""" +42 41 regularizer """no""" +42 41 optimizer """adam""" +42 41 training_loop """owa""" +42 41 negative_sampler """basic""" +42 41 evaluator """rankbased""" +42 42 dataset """wn18rr""" +42 42 model """complex""" +42 42 loss """bceaftersigmoid""" +42 42 regularizer """no""" +42 42 optimizer """adam""" +42 42 training_loop """owa""" +42 42 negative_sampler """basic""" +42 42 evaluator """rankbased""" +42 43 dataset """wn18rr""" +42 43 model """complex""" +42 43 loss """bceaftersigmoid""" +42 43 regularizer """no""" +42 43 optimizer """adam""" +42 43 training_loop """owa""" +42 43 negative_sampler """basic""" +42 43 evaluator """rankbased""" +42 44 dataset """wn18rr""" +42 44 model """complex""" +42 44 loss """bceaftersigmoid""" +42 44 regularizer """no""" +42 44 optimizer """adam""" +42 44 training_loop """owa""" +42 44 negative_sampler """basic""" +42 44 evaluator """rankbased""" +42 45 dataset """wn18rr""" +42 45 model """complex""" +42 45 loss """bceaftersigmoid""" +42 45 regularizer """no""" +42 45 optimizer """adam""" +42 45 training_loop """owa""" +42 45 negative_sampler """basic""" +42 45 evaluator """rankbased""" +43 1 model.embedding_dim 1.0 +43 1 optimizer.lr 0.011750082721288567 +43 1 negative_sampler.num_negs_per_pos 41.0 +43 1 training.batch_size 2.0 +43 2 model.embedding_dim 0.0 +43 2 optimizer.lr 0.0014916710957185427 +43 2 negative_sampler.num_negs_per_pos 28.0 +43 2 training.batch_size 0.0 +43 3 model.embedding_dim 0.0 +43 3 optimizer.lr 0.0034289730665172454 +43 3 negative_sampler.num_negs_per_pos 7.0 +43 3 training.batch_size 1.0 +43 4 model.embedding_dim 0.0 +43 4 optimizer.lr 0.018995325060106698 +43 4 negative_sampler.num_negs_per_pos 22.0 +43 4 training.batch_size 1.0 +43 5 model.embedding_dim 2.0 +43 5 optimizer.lr 0.019134313901161933 +43 5 negative_sampler.num_negs_per_pos 7.0 +43 5 training.batch_size 1.0 +43 6 model.embedding_dim 0.0 +43 6 optimizer.lr 0.010047042746768818 +43 6 negative_sampler.num_negs_per_pos 82.0 +43 6 training.batch_size 1.0 +43 7 model.embedding_dim 2.0 +43 7 optimizer.lr 0.012978172496362012 +43 7 negative_sampler.num_negs_per_pos 53.0 +43 7 training.batch_size 2.0 +43 8 model.embedding_dim 0.0 +43 8 optimizer.lr 0.0063119423945837225 +43 8 negative_sampler.num_negs_per_pos 83.0 +43 8 training.batch_size 1.0 +43 9 model.embedding_dim 0.0 +43 9 optimizer.lr 0.0044129811330726005 +43 9 negative_sampler.num_negs_per_pos 82.0 +43 9 training.batch_size 1.0 +43 10 model.embedding_dim 1.0 +43 10 optimizer.lr 0.0219580216905102 +43 10 negative_sampler.num_negs_per_pos 20.0 +43 10 training.batch_size 2.0 +43 11 model.embedding_dim 0.0 +43 11 optimizer.lr 0.009700916201915422 +43 11 negative_sampler.num_negs_per_pos 28.0 +43 11 training.batch_size 1.0 +43 12 model.embedding_dim 2.0 +43 12 optimizer.lr 0.03326699645066114 +43 12 negative_sampler.num_negs_per_pos 56.0 +43 12 training.batch_size 0.0 +43 13 model.embedding_dim 2.0 +43 13 optimizer.lr 0.031858343637095034 +43 13 negative_sampler.num_negs_per_pos 76.0 +43 13 training.batch_size 0.0 +43 14 model.embedding_dim 1.0 +43 14 optimizer.lr 0.01332021157899014 +43 14 negative_sampler.num_negs_per_pos 27.0 +43 14 training.batch_size 1.0 +43 15 model.embedding_dim 1.0 +43 15 optimizer.lr 0.01000091969055439 +43 15 negative_sampler.num_negs_per_pos 31.0 +43 15 training.batch_size 0.0 +43 16 model.embedding_dim 2.0 +43 16 optimizer.lr 0.08058253927951264 +43 16 negative_sampler.num_negs_per_pos 83.0 +43 16 training.batch_size 1.0 +43 17 model.embedding_dim 0.0 +43 17 optimizer.lr 0.05160682855214479 +43 17 negative_sampler.num_negs_per_pos 31.0 +43 17 training.batch_size 2.0 +43 18 model.embedding_dim 0.0 +43 18 optimizer.lr 0.0058944425661891795 +43 18 negative_sampler.num_negs_per_pos 85.0 +43 18 training.batch_size 1.0 +43 19 model.embedding_dim 0.0 +43 19 optimizer.lr 0.001889914777714341 +43 19 negative_sampler.num_negs_per_pos 79.0 +43 19 training.batch_size 2.0 +43 20 model.embedding_dim 0.0 +43 20 optimizer.lr 0.001052978126257984 +43 20 negative_sampler.num_negs_per_pos 35.0 +43 20 training.batch_size 2.0 +43 21 model.embedding_dim 2.0 +43 21 optimizer.lr 0.0015502777903477543 +43 21 negative_sampler.num_negs_per_pos 63.0 +43 21 training.batch_size 2.0 +43 22 model.embedding_dim 0.0 +43 22 optimizer.lr 0.0262662608657236 +43 22 negative_sampler.num_negs_per_pos 84.0 +43 22 training.batch_size 2.0 +43 23 model.embedding_dim 0.0 +43 23 optimizer.lr 0.07386847788015445 +43 23 negative_sampler.num_negs_per_pos 81.0 +43 23 training.batch_size 2.0 +43 24 model.embedding_dim 0.0 +43 24 optimizer.lr 0.0027008588637741603 +43 24 negative_sampler.num_negs_per_pos 63.0 +43 24 training.batch_size 1.0 +43 25 model.embedding_dim 1.0 +43 25 optimizer.lr 0.0034068357755303908 +43 25 negative_sampler.num_negs_per_pos 9.0 +43 25 training.batch_size 2.0 +43 26 model.embedding_dim 1.0 +43 26 optimizer.lr 0.013220083426775343 +43 26 negative_sampler.num_negs_per_pos 82.0 +43 26 training.batch_size 2.0 +43 27 model.embedding_dim 0.0 +43 27 optimizer.lr 0.005173041634280357 +43 27 negative_sampler.num_negs_per_pos 34.0 +43 27 training.batch_size 2.0 +43 28 model.embedding_dim 2.0 +43 28 optimizer.lr 0.005094959040044779 +43 28 negative_sampler.num_negs_per_pos 80.0 +43 28 training.batch_size 0.0 +43 29 model.embedding_dim 2.0 +43 29 optimizer.lr 0.0022678181052703797 +43 29 negative_sampler.num_negs_per_pos 1.0 +43 29 training.batch_size 0.0 +43 30 model.embedding_dim 1.0 +43 30 optimizer.lr 0.05158812894509304 +43 30 negative_sampler.num_negs_per_pos 6.0 +43 30 training.batch_size 2.0 +43 31 model.embedding_dim 2.0 +43 31 optimizer.lr 0.031498967331945425 +43 31 negative_sampler.num_negs_per_pos 92.0 +43 31 training.batch_size 1.0 +43 32 model.embedding_dim 2.0 +43 32 optimizer.lr 0.006051691511201899 +43 32 negative_sampler.num_negs_per_pos 66.0 +43 32 training.batch_size 0.0 +43 33 model.embedding_dim 0.0 +43 33 optimizer.lr 0.013127457285582018 +43 33 negative_sampler.num_negs_per_pos 0.0 +43 33 training.batch_size 0.0 +43 34 model.embedding_dim 1.0 +43 34 optimizer.lr 0.017606975450959527 +43 34 negative_sampler.num_negs_per_pos 97.0 +43 34 training.batch_size 2.0 +43 35 model.embedding_dim 1.0 +43 35 optimizer.lr 0.02992309888422499 +43 35 negative_sampler.num_negs_per_pos 45.0 +43 35 training.batch_size 2.0 +43 36 model.embedding_dim 1.0 +43 36 optimizer.lr 0.05917704511637243 +43 36 negative_sampler.num_negs_per_pos 64.0 +43 36 training.batch_size 2.0 +43 37 model.embedding_dim 0.0 +43 37 optimizer.lr 0.018630136610605794 +43 37 negative_sampler.num_negs_per_pos 10.0 +43 37 training.batch_size 2.0 +43 38 model.embedding_dim 1.0 +43 38 optimizer.lr 0.04448750272854112 +43 38 negative_sampler.num_negs_per_pos 28.0 +43 38 training.batch_size 0.0 +43 39 model.embedding_dim 1.0 +43 39 optimizer.lr 0.007397926612362336 +43 39 negative_sampler.num_negs_per_pos 70.0 +43 39 training.batch_size 1.0 +43 40 model.embedding_dim 1.0 +43 40 optimizer.lr 0.030498670843059714 +43 40 negative_sampler.num_negs_per_pos 34.0 +43 40 training.batch_size 2.0 +43 41 model.embedding_dim 0.0 +43 41 optimizer.lr 0.011138726727337848 +43 41 negative_sampler.num_negs_per_pos 91.0 +43 41 training.batch_size 1.0 +43 42 model.embedding_dim 1.0 +43 42 optimizer.lr 0.00582017003651359 +43 42 negative_sampler.num_negs_per_pos 1.0 +43 42 training.batch_size 2.0 +43 43 model.embedding_dim 0.0 +43 43 optimizer.lr 0.049668667455242906 +43 43 negative_sampler.num_negs_per_pos 20.0 +43 43 training.batch_size 1.0 +43 44 model.embedding_dim 1.0 +43 44 optimizer.lr 0.016490248573185557 +43 44 negative_sampler.num_negs_per_pos 78.0 +43 44 training.batch_size 2.0 +43 45 model.embedding_dim 0.0 +43 45 optimizer.lr 0.03830562279172783 +43 45 negative_sampler.num_negs_per_pos 48.0 +43 45 training.batch_size 0.0 +43 46 model.embedding_dim 1.0 +43 46 optimizer.lr 0.09940462502274837 +43 46 negative_sampler.num_negs_per_pos 83.0 +43 46 training.batch_size 0.0 +43 47 model.embedding_dim 2.0 +43 47 optimizer.lr 0.011743850933248149 +43 47 negative_sampler.num_negs_per_pos 15.0 +43 47 training.batch_size 0.0 +43 48 model.embedding_dim 2.0 +43 48 optimizer.lr 0.01808894973233684 +43 48 negative_sampler.num_negs_per_pos 83.0 +43 48 training.batch_size 2.0 +43 49 model.embedding_dim 0.0 +43 49 optimizer.lr 0.0790134260496337 +43 49 negative_sampler.num_negs_per_pos 34.0 +43 49 training.batch_size 1.0 +43 50 model.embedding_dim 1.0 +43 50 optimizer.lr 0.0568325536781852 +43 50 negative_sampler.num_negs_per_pos 94.0 +43 50 training.batch_size 2.0 +43 51 model.embedding_dim 0.0 +43 51 optimizer.lr 0.004188974050702495 +43 51 negative_sampler.num_negs_per_pos 29.0 +43 51 training.batch_size 0.0 +43 52 model.embedding_dim 0.0 +43 52 optimizer.lr 0.0015620350927354506 +43 52 negative_sampler.num_negs_per_pos 95.0 +43 52 training.batch_size 1.0 +43 53 model.embedding_dim 0.0 +43 53 optimizer.lr 0.0012726400901908756 +43 53 negative_sampler.num_negs_per_pos 89.0 +43 53 training.batch_size 2.0 +43 54 model.embedding_dim 1.0 +43 54 optimizer.lr 0.004190298473563173 +43 54 negative_sampler.num_negs_per_pos 12.0 +43 54 training.batch_size 0.0 +43 55 model.embedding_dim 0.0 +43 55 optimizer.lr 0.003253938456566068 +43 55 negative_sampler.num_negs_per_pos 59.0 +43 55 training.batch_size 0.0 +43 56 model.embedding_dim 0.0 +43 56 optimizer.lr 0.005452274417770351 +43 56 negative_sampler.num_negs_per_pos 8.0 +43 56 training.batch_size 0.0 +43 57 model.embedding_dim 0.0 +43 57 optimizer.lr 0.011376447835914304 +43 57 negative_sampler.num_negs_per_pos 40.0 +43 57 training.batch_size 0.0 +43 1 dataset """wn18rr""" +43 1 model """complex""" +43 1 loss """softplus""" +43 1 regularizer """no""" +43 1 optimizer """adam""" +43 1 training_loop """owa""" +43 1 negative_sampler """basic""" +43 1 evaluator """rankbased""" +43 2 dataset """wn18rr""" +43 2 model """complex""" +43 2 loss """softplus""" +43 2 regularizer """no""" +43 2 optimizer """adam""" +43 2 training_loop """owa""" +43 2 negative_sampler """basic""" +43 2 evaluator """rankbased""" +43 3 dataset """wn18rr""" +43 3 model """complex""" +43 3 loss """softplus""" +43 3 regularizer """no""" +43 3 optimizer """adam""" +43 3 training_loop """owa""" +43 3 negative_sampler """basic""" +43 3 evaluator """rankbased""" +43 4 dataset """wn18rr""" +43 4 model """complex""" +43 4 loss """softplus""" +43 4 regularizer """no""" +43 4 optimizer """adam""" +43 4 training_loop """owa""" +43 4 negative_sampler """basic""" +43 4 evaluator """rankbased""" +43 5 dataset """wn18rr""" +43 5 model """complex""" +43 5 loss """softplus""" +43 5 regularizer """no""" +43 5 optimizer """adam""" +43 5 training_loop """owa""" +43 5 negative_sampler """basic""" +43 5 evaluator """rankbased""" +43 6 dataset """wn18rr""" +43 6 model """complex""" +43 6 loss """softplus""" +43 6 regularizer """no""" +43 6 optimizer """adam""" +43 6 training_loop """owa""" +43 6 negative_sampler """basic""" +43 6 evaluator """rankbased""" +43 7 dataset """wn18rr""" +43 7 model """complex""" +43 7 loss """softplus""" +43 7 regularizer """no""" +43 7 optimizer """adam""" +43 7 training_loop """owa""" +43 7 negative_sampler """basic""" +43 7 evaluator """rankbased""" +43 8 dataset """wn18rr""" +43 8 model """complex""" +43 8 loss """softplus""" +43 8 regularizer """no""" +43 8 optimizer """adam""" +43 8 training_loop """owa""" +43 8 negative_sampler """basic""" +43 8 evaluator """rankbased""" +43 9 dataset """wn18rr""" +43 9 model """complex""" +43 9 loss """softplus""" +43 9 regularizer """no""" +43 9 optimizer """adam""" +43 9 training_loop """owa""" +43 9 negative_sampler """basic""" +43 9 evaluator """rankbased""" +43 10 dataset """wn18rr""" +43 10 model """complex""" +43 10 loss """softplus""" +43 10 regularizer """no""" +43 10 optimizer """adam""" +43 10 training_loop """owa""" +43 10 negative_sampler """basic""" +43 10 evaluator """rankbased""" +43 11 dataset """wn18rr""" +43 11 model """complex""" +43 11 loss """softplus""" +43 11 regularizer """no""" +43 11 optimizer """adam""" +43 11 training_loop """owa""" +43 11 negative_sampler """basic""" +43 11 evaluator """rankbased""" +43 12 dataset """wn18rr""" +43 12 model """complex""" +43 12 loss """softplus""" +43 12 regularizer """no""" +43 12 optimizer """adam""" +43 12 training_loop """owa""" +43 12 negative_sampler """basic""" +43 12 evaluator """rankbased""" +43 13 dataset """wn18rr""" +43 13 model """complex""" +43 13 loss """softplus""" +43 13 regularizer """no""" +43 13 optimizer """adam""" +43 13 training_loop """owa""" +43 13 negative_sampler """basic""" +43 13 evaluator """rankbased""" +43 14 dataset """wn18rr""" +43 14 model """complex""" +43 14 loss """softplus""" +43 14 regularizer """no""" +43 14 optimizer """adam""" +43 14 training_loop """owa""" +43 14 negative_sampler """basic""" +43 14 evaluator """rankbased""" +43 15 dataset """wn18rr""" +43 15 model """complex""" +43 15 loss """softplus""" +43 15 regularizer """no""" +43 15 optimizer """adam""" +43 15 training_loop """owa""" +43 15 negative_sampler """basic""" +43 15 evaluator """rankbased""" +43 16 dataset """wn18rr""" +43 16 model """complex""" +43 16 loss """softplus""" +43 16 regularizer """no""" +43 16 optimizer """adam""" +43 16 training_loop """owa""" +43 16 negative_sampler """basic""" +43 16 evaluator """rankbased""" +43 17 dataset """wn18rr""" +43 17 model """complex""" +43 17 loss """softplus""" +43 17 regularizer """no""" +43 17 optimizer """adam""" +43 17 training_loop """owa""" +43 17 negative_sampler """basic""" +43 17 evaluator """rankbased""" +43 18 dataset """wn18rr""" +43 18 model """complex""" +43 18 loss """softplus""" +43 18 regularizer """no""" +43 18 optimizer """adam""" +43 18 training_loop """owa""" +43 18 negative_sampler """basic""" +43 18 evaluator """rankbased""" +43 19 dataset """wn18rr""" +43 19 model """complex""" +43 19 loss """softplus""" +43 19 regularizer """no""" +43 19 optimizer """adam""" +43 19 training_loop """owa""" +43 19 negative_sampler """basic""" +43 19 evaluator """rankbased""" +43 20 dataset """wn18rr""" +43 20 model """complex""" +43 20 loss """softplus""" +43 20 regularizer """no""" +43 20 optimizer """adam""" +43 20 training_loop """owa""" +43 20 negative_sampler """basic""" +43 20 evaluator """rankbased""" +43 21 dataset """wn18rr""" +43 21 model """complex""" +43 21 loss """softplus""" +43 21 regularizer """no""" +43 21 optimizer """adam""" +43 21 training_loop """owa""" +43 21 negative_sampler """basic""" +43 21 evaluator """rankbased""" +43 22 dataset """wn18rr""" +43 22 model """complex""" +43 22 loss """softplus""" +43 22 regularizer """no""" +43 22 optimizer """adam""" +43 22 training_loop """owa""" +43 22 negative_sampler """basic""" +43 22 evaluator """rankbased""" +43 23 dataset """wn18rr""" +43 23 model """complex""" +43 23 loss """softplus""" +43 23 regularizer """no""" +43 23 optimizer """adam""" +43 23 training_loop """owa""" +43 23 negative_sampler """basic""" +43 23 evaluator """rankbased""" +43 24 dataset """wn18rr""" +43 24 model """complex""" +43 24 loss """softplus""" +43 24 regularizer """no""" +43 24 optimizer """adam""" +43 24 training_loop """owa""" +43 24 negative_sampler """basic""" +43 24 evaluator """rankbased""" +43 25 dataset """wn18rr""" +43 25 model """complex""" +43 25 loss """softplus""" +43 25 regularizer """no""" +43 25 optimizer """adam""" +43 25 training_loop """owa""" +43 25 negative_sampler """basic""" +43 25 evaluator """rankbased""" +43 26 dataset """wn18rr""" +43 26 model """complex""" +43 26 loss """softplus""" +43 26 regularizer """no""" +43 26 optimizer """adam""" +43 26 training_loop """owa""" +43 26 negative_sampler """basic""" +43 26 evaluator """rankbased""" +43 27 dataset """wn18rr""" +43 27 model """complex""" +43 27 loss """softplus""" +43 27 regularizer """no""" +43 27 optimizer """adam""" +43 27 training_loop """owa""" +43 27 negative_sampler """basic""" +43 27 evaluator """rankbased""" +43 28 dataset """wn18rr""" +43 28 model """complex""" +43 28 loss """softplus""" +43 28 regularizer """no""" +43 28 optimizer """adam""" +43 28 training_loop """owa""" +43 28 negative_sampler """basic""" +43 28 evaluator """rankbased""" +43 29 dataset """wn18rr""" +43 29 model """complex""" +43 29 loss """softplus""" +43 29 regularizer """no""" +43 29 optimizer """adam""" +43 29 training_loop """owa""" +43 29 negative_sampler """basic""" +43 29 evaluator """rankbased""" +43 30 dataset """wn18rr""" +43 30 model """complex""" +43 30 loss """softplus""" +43 30 regularizer """no""" +43 30 optimizer """adam""" +43 30 training_loop """owa""" +43 30 negative_sampler """basic""" +43 30 evaluator """rankbased""" +43 31 dataset """wn18rr""" +43 31 model """complex""" +43 31 loss """softplus""" +43 31 regularizer """no""" +43 31 optimizer """adam""" +43 31 training_loop """owa""" +43 31 negative_sampler """basic""" +43 31 evaluator """rankbased""" +43 32 dataset """wn18rr""" +43 32 model """complex""" +43 32 loss """softplus""" +43 32 regularizer """no""" +43 32 optimizer """adam""" +43 32 training_loop """owa""" +43 32 negative_sampler """basic""" +43 32 evaluator """rankbased""" +43 33 dataset """wn18rr""" +43 33 model """complex""" +43 33 loss """softplus""" +43 33 regularizer """no""" +43 33 optimizer """adam""" +43 33 training_loop """owa""" +43 33 negative_sampler """basic""" +43 33 evaluator """rankbased""" +43 34 dataset """wn18rr""" +43 34 model """complex""" +43 34 loss """softplus""" +43 34 regularizer """no""" +43 34 optimizer """adam""" +43 34 training_loop """owa""" +43 34 negative_sampler """basic""" +43 34 evaluator """rankbased""" +43 35 dataset """wn18rr""" +43 35 model """complex""" +43 35 loss """softplus""" +43 35 regularizer """no""" +43 35 optimizer """adam""" +43 35 training_loop """owa""" +43 35 negative_sampler """basic""" +43 35 evaluator """rankbased""" +43 36 dataset """wn18rr""" +43 36 model """complex""" +43 36 loss """softplus""" +43 36 regularizer """no""" +43 36 optimizer """adam""" +43 36 training_loop """owa""" +43 36 negative_sampler """basic""" +43 36 evaluator """rankbased""" +43 37 dataset """wn18rr""" +43 37 model """complex""" +43 37 loss """softplus""" +43 37 regularizer """no""" +43 37 optimizer """adam""" +43 37 training_loop """owa""" +43 37 negative_sampler """basic""" +43 37 evaluator """rankbased""" +43 38 dataset """wn18rr""" +43 38 model """complex""" +43 38 loss """softplus""" +43 38 regularizer """no""" +43 38 optimizer """adam""" +43 38 training_loop """owa""" +43 38 negative_sampler """basic""" +43 38 evaluator """rankbased""" +43 39 dataset """wn18rr""" +43 39 model """complex""" +43 39 loss """softplus""" +43 39 regularizer """no""" +43 39 optimizer """adam""" +43 39 training_loop """owa""" +43 39 negative_sampler """basic""" +43 39 evaluator """rankbased""" +43 40 dataset """wn18rr""" +43 40 model """complex""" +43 40 loss """softplus""" +43 40 regularizer """no""" +43 40 optimizer """adam""" +43 40 training_loop """owa""" +43 40 negative_sampler """basic""" +43 40 evaluator """rankbased""" +43 41 dataset """wn18rr""" +43 41 model """complex""" +43 41 loss """softplus""" +43 41 regularizer """no""" +43 41 optimizer """adam""" +43 41 training_loop """owa""" +43 41 negative_sampler """basic""" +43 41 evaluator """rankbased""" +43 42 dataset """wn18rr""" +43 42 model """complex""" +43 42 loss """softplus""" +43 42 regularizer """no""" +43 42 optimizer """adam""" +43 42 training_loop """owa""" +43 42 negative_sampler """basic""" +43 42 evaluator """rankbased""" +43 43 dataset """wn18rr""" +43 43 model """complex""" +43 43 loss """softplus""" +43 43 regularizer """no""" +43 43 optimizer """adam""" +43 43 training_loop """owa""" +43 43 negative_sampler """basic""" +43 43 evaluator """rankbased""" +43 44 dataset """wn18rr""" +43 44 model """complex""" +43 44 loss """softplus""" +43 44 regularizer """no""" +43 44 optimizer """adam""" +43 44 training_loop """owa""" +43 44 negative_sampler """basic""" +43 44 evaluator """rankbased""" +43 45 dataset """wn18rr""" +43 45 model """complex""" +43 45 loss """softplus""" +43 45 regularizer """no""" +43 45 optimizer """adam""" +43 45 training_loop """owa""" +43 45 negative_sampler """basic""" +43 45 evaluator """rankbased""" +43 46 dataset """wn18rr""" +43 46 model """complex""" +43 46 loss """softplus""" +43 46 regularizer """no""" +43 46 optimizer """adam""" +43 46 training_loop """owa""" +43 46 negative_sampler """basic""" +43 46 evaluator """rankbased""" +43 47 dataset """wn18rr""" +43 47 model """complex""" +43 47 loss """softplus""" +43 47 regularizer """no""" +43 47 optimizer """adam""" +43 47 training_loop """owa""" +43 47 negative_sampler """basic""" +43 47 evaluator """rankbased""" +43 48 dataset """wn18rr""" +43 48 model """complex""" +43 48 loss """softplus""" +43 48 regularizer """no""" +43 48 optimizer """adam""" +43 48 training_loop """owa""" +43 48 negative_sampler """basic""" +43 48 evaluator """rankbased""" +43 49 dataset """wn18rr""" +43 49 model """complex""" +43 49 loss """softplus""" +43 49 regularizer """no""" +43 49 optimizer """adam""" +43 49 training_loop """owa""" +43 49 negative_sampler """basic""" +43 49 evaluator """rankbased""" +43 50 dataset """wn18rr""" +43 50 model """complex""" +43 50 loss """softplus""" +43 50 regularizer """no""" +43 50 optimizer """adam""" +43 50 training_loop """owa""" +43 50 negative_sampler """basic""" +43 50 evaluator """rankbased""" +43 51 dataset """wn18rr""" +43 51 model """complex""" +43 51 loss """softplus""" +43 51 regularizer """no""" +43 51 optimizer """adam""" +43 51 training_loop """owa""" +43 51 negative_sampler """basic""" +43 51 evaluator """rankbased""" +43 52 dataset """wn18rr""" +43 52 model """complex""" +43 52 loss """softplus""" +43 52 regularizer """no""" +43 52 optimizer """adam""" +43 52 training_loop """owa""" +43 52 negative_sampler """basic""" +43 52 evaluator """rankbased""" +43 53 dataset """wn18rr""" +43 53 model """complex""" +43 53 loss """softplus""" +43 53 regularizer """no""" +43 53 optimizer """adam""" +43 53 training_loop """owa""" +43 53 negative_sampler """basic""" +43 53 evaluator """rankbased""" +43 54 dataset """wn18rr""" +43 54 model """complex""" +43 54 loss """softplus""" +43 54 regularizer """no""" +43 54 optimizer """adam""" +43 54 training_loop """owa""" +43 54 negative_sampler """basic""" +43 54 evaluator """rankbased""" +43 55 dataset """wn18rr""" +43 55 model """complex""" +43 55 loss """softplus""" +43 55 regularizer """no""" +43 55 optimizer """adam""" +43 55 training_loop """owa""" +43 55 negative_sampler """basic""" +43 55 evaluator """rankbased""" +43 56 dataset """wn18rr""" +43 56 model """complex""" +43 56 loss """softplus""" +43 56 regularizer """no""" +43 56 optimizer """adam""" +43 56 training_loop """owa""" +43 56 negative_sampler """basic""" +43 56 evaluator """rankbased""" +43 57 dataset """wn18rr""" +43 57 model """complex""" +43 57 loss """softplus""" +43 57 regularizer """no""" +43 57 optimizer """adam""" +43 57 training_loop """owa""" +43 57 negative_sampler """basic""" +43 57 evaluator """rankbased""" +44 1 model.embedding_dim 1.0 +44 1 optimizer.lr 0.025637833165095176 +44 1 negative_sampler.num_negs_per_pos 83.0 +44 1 training.batch_size 0.0 +44 2 model.embedding_dim 0.0 +44 2 optimizer.lr 0.021030642189032042 +44 2 negative_sampler.num_negs_per_pos 82.0 +44 2 training.batch_size 0.0 +44 3 model.embedding_dim 0.0 +44 3 optimizer.lr 0.014326191590176892 +44 3 negative_sampler.num_negs_per_pos 10.0 +44 3 training.batch_size 2.0 +44 4 model.embedding_dim 0.0 +44 4 optimizer.lr 0.04016825507715472 +44 4 negative_sampler.num_negs_per_pos 41.0 +44 4 training.batch_size 2.0 +44 5 model.embedding_dim 2.0 +44 5 optimizer.lr 0.0031882960294461193 +44 5 negative_sampler.num_negs_per_pos 56.0 +44 5 training.batch_size 2.0 +44 6 model.embedding_dim 1.0 +44 6 optimizer.lr 0.01919620858826315 +44 6 negative_sampler.num_negs_per_pos 22.0 +44 6 training.batch_size 1.0 +44 7 model.embedding_dim 1.0 +44 7 optimizer.lr 0.001691423435385629 +44 7 negative_sampler.num_negs_per_pos 23.0 +44 7 training.batch_size 1.0 +44 8 model.embedding_dim 1.0 +44 8 optimizer.lr 0.013560527978143718 +44 8 negative_sampler.num_negs_per_pos 46.0 +44 8 training.batch_size 2.0 +44 9 model.embedding_dim 1.0 +44 9 optimizer.lr 0.0027154600300585774 +44 9 negative_sampler.num_negs_per_pos 41.0 +44 9 training.batch_size 0.0 +44 10 model.embedding_dim 1.0 +44 10 optimizer.lr 0.0015295703610641257 +44 10 negative_sampler.num_negs_per_pos 26.0 +44 10 training.batch_size 1.0 +44 11 model.embedding_dim 2.0 +44 11 optimizer.lr 0.07478498909680437 +44 11 negative_sampler.num_negs_per_pos 82.0 +44 11 training.batch_size 1.0 +44 12 model.embedding_dim 1.0 +44 12 optimizer.lr 0.019230540733797284 +44 12 negative_sampler.num_negs_per_pos 20.0 +44 12 training.batch_size 2.0 +44 13 model.embedding_dim 2.0 +44 13 optimizer.lr 0.07313749881000124 +44 13 negative_sampler.num_negs_per_pos 74.0 +44 13 training.batch_size 2.0 +44 14 model.embedding_dim 2.0 +44 14 optimizer.lr 0.002316634348335226 +44 14 negative_sampler.num_negs_per_pos 76.0 +44 14 training.batch_size 2.0 +44 15 model.embedding_dim 1.0 +44 15 optimizer.lr 0.09682727796717354 +44 15 negative_sampler.num_negs_per_pos 4.0 +44 15 training.batch_size 0.0 +44 16 model.embedding_dim 2.0 +44 16 optimizer.lr 0.006201010783629649 +44 16 negative_sampler.num_negs_per_pos 3.0 +44 16 training.batch_size 0.0 +44 17 model.embedding_dim 1.0 +44 17 optimizer.lr 0.004953480681909017 +44 17 negative_sampler.num_negs_per_pos 4.0 +44 17 training.batch_size 1.0 +44 18 model.embedding_dim 2.0 +44 18 optimizer.lr 0.08193062650420492 +44 18 negative_sampler.num_negs_per_pos 41.0 +44 18 training.batch_size 0.0 +44 19 model.embedding_dim 2.0 +44 19 optimizer.lr 0.0019809409399172604 +44 19 negative_sampler.num_negs_per_pos 40.0 +44 19 training.batch_size 2.0 +44 20 model.embedding_dim 0.0 +44 20 optimizer.lr 0.0014489433340577483 +44 20 negative_sampler.num_negs_per_pos 55.0 +44 20 training.batch_size 0.0 +44 21 model.embedding_dim 2.0 +44 21 optimizer.lr 0.001256742862583038 +44 21 negative_sampler.num_negs_per_pos 24.0 +44 21 training.batch_size 1.0 +44 22 model.embedding_dim 1.0 +44 22 optimizer.lr 0.03531652887253744 +44 22 negative_sampler.num_negs_per_pos 47.0 +44 22 training.batch_size 2.0 +44 23 model.embedding_dim 0.0 +44 23 optimizer.lr 0.002445162569303241 +44 23 negative_sampler.num_negs_per_pos 45.0 +44 23 training.batch_size 1.0 +44 24 model.embedding_dim 0.0 +44 24 optimizer.lr 0.02313109628232228 +44 24 negative_sampler.num_negs_per_pos 55.0 +44 24 training.batch_size 2.0 +44 25 model.embedding_dim 0.0 +44 25 optimizer.lr 0.026936186317315435 +44 25 negative_sampler.num_negs_per_pos 2.0 +44 25 training.batch_size 2.0 +44 26 model.embedding_dim 1.0 +44 26 optimizer.lr 0.002203433524340552 +44 26 negative_sampler.num_negs_per_pos 68.0 +44 26 training.batch_size 2.0 +44 27 model.embedding_dim 2.0 +44 27 optimizer.lr 0.0025539664220239956 +44 27 negative_sampler.num_negs_per_pos 88.0 +44 27 training.batch_size 0.0 +44 28 model.embedding_dim 2.0 +44 28 optimizer.lr 0.0012205107694003656 +44 28 negative_sampler.num_negs_per_pos 11.0 +44 28 training.batch_size 2.0 +44 29 model.embedding_dim 1.0 +44 29 optimizer.lr 0.0586854886948008 +44 29 negative_sampler.num_negs_per_pos 9.0 +44 29 training.batch_size 1.0 +44 30 model.embedding_dim 2.0 +44 30 optimizer.lr 0.09080714872211355 +44 30 negative_sampler.num_negs_per_pos 7.0 +44 30 training.batch_size 1.0 +44 31 model.embedding_dim 0.0 +44 31 optimizer.lr 0.0011513999199535849 +44 31 negative_sampler.num_negs_per_pos 61.0 +44 31 training.batch_size 2.0 +44 32 model.embedding_dim 0.0 +44 32 optimizer.lr 0.007599128055143982 +44 32 negative_sampler.num_negs_per_pos 92.0 +44 32 training.batch_size 0.0 +44 33 model.embedding_dim 1.0 +44 33 optimizer.lr 0.02219960742600709 +44 33 negative_sampler.num_negs_per_pos 34.0 +44 33 training.batch_size 2.0 +44 34 model.embedding_dim 2.0 +44 34 optimizer.lr 0.02261240764016017 +44 34 negative_sampler.num_negs_per_pos 42.0 +44 34 training.batch_size 2.0 +44 35 model.embedding_dim 1.0 +44 35 optimizer.lr 0.00821946324144543 +44 35 negative_sampler.num_negs_per_pos 12.0 +44 35 training.batch_size 1.0 +44 36 model.embedding_dim 0.0 +44 36 optimizer.lr 0.010652065263124157 +44 36 negative_sampler.num_negs_per_pos 13.0 +44 36 training.batch_size 0.0 +44 37 model.embedding_dim 0.0 +44 37 optimizer.lr 0.0713077621766949 +44 37 negative_sampler.num_negs_per_pos 40.0 +44 37 training.batch_size 1.0 +44 38 model.embedding_dim 2.0 +44 38 optimizer.lr 0.006640151738404362 +44 38 negative_sampler.num_negs_per_pos 21.0 +44 38 training.batch_size 1.0 +44 39 model.embedding_dim 1.0 +44 39 optimizer.lr 0.010951095738801068 +44 39 negative_sampler.num_negs_per_pos 86.0 +44 39 training.batch_size 2.0 +44 40 model.embedding_dim 1.0 +44 40 optimizer.lr 0.0663026669450254 +44 40 negative_sampler.num_negs_per_pos 14.0 +44 40 training.batch_size 2.0 +44 41 model.embedding_dim 0.0 +44 41 optimizer.lr 0.009034917890116656 +44 41 negative_sampler.num_negs_per_pos 96.0 +44 41 training.batch_size 1.0 +44 42 model.embedding_dim 1.0 +44 42 optimizer.lr 0.004360181538152752 +44 42 negative_sampler.num_negs_per_pos 79.0 +44 42 training.batch_size 0.0 +44 43 model.embedding_dim 0.0 +44 43 optimizer.lr 0.028603396510435985 +44 43 negative_sampler.num_negs_per_pos 19.0 +44 43 training.batch_size 2.0 +44 44 model.embedding_dim 0.0 +44 44 optimizer.lr 0.040295969577709013 +44 44 negative_sampler.num_negs_per_pos 41.0 +44 44 training.batch_size 2.0 +44 45 model.embedding_dim 0.0 +44 45 optimizer.lr 0.023367818694916144 +44 45 negative_sampler.num_negs_per_pos 31.0 +44 45 training.batch_size 2.0 +44 46 model.embedding_dim 2.0 +44 46 optimizer.lr 0.06800123482880029 +44 46 negative_sampler.num_negs_per_pos 66.0 +44 46 training.batch_size 0.0 +44 47 model.embedding_dim 0.0 +44 47 optimizer.lr 0.008525907861391173 +44 47 negative_sampler.num_negs_per_pos 58.0 +44 47 training.batch_size 1.0 +44 48 model.embedding_dim 1.0 +44 48 optimizer.lr 0.0019448744989178696 +44 48 negative_sampler.num_negs_per_pos 11.0 +44 48 training.batch_size 1.0 +44 49 model.embedding_dim 2.0 +44 49 optimizer.lr 0.005088839324437045 +44 49 negative_sampler.num_negs_per_pos 45.0 +44 49 training.batch_size 1.0 +44 50 model.embedding_dim 1.0 +44 50 optimizer.lr 0.0533745328167049 +44 50 negative_sampler.num_negs_per_pos 59.0 +44 50 training.batch_size 1.0 +44 51 model.embedding_dim 2.0 +44 51 optimizer.lr 0.0025153619440005997 +44 51 negative_sampler.num_negs_per_pos 27.0 +44 51 training.batch_size 0.0 +44 52 model.embedding_dim 0.0 +44 52 optimizer.lr 0.011526496635702854 +44 52 negative_sampler.num_negs_per_pos 12.0 +44 52 training.batch_size 0.0 +44 53 model.embedding_dim 2.0 +44 53 optimizer.lr 0.04748180862597751 +44 53 negative_sampler.num_negs_per_pos 2.0 +44 53 training.batch_size 2.0 +44 54 model.embedding_dim 0.0 +44 54 optimizer.lr 0.0010755797137401576 +44 54 negative_sampler.num_negs_per_pos 4.0 +44 54 training.batch_size 2.0 +44 55 model.embedding_dim 2.0 +44 55 optimizer.lr 0.0016319992787754557 +44 55 negative_sampler.num_negs_per_pos 69.0 +44 55 training.batch_size 0.0 +44 56 model.embedding_dim 0.0 +44 56 optimizer.lr 0.0011881332149231147 +44 56 negative_sampler.num_negs_per_pos 7.0 +44 56 training.batch_size 0.0 +44 57 model.embedding_dim 0.0 +44 57 optimizer.lr 0.03374944256937029 +44 57 negative_sampler.num_negs_per_pos 28.0 +44 57 training.batch_size 0.0 +44 58 model.embedding_dim 2.0 +44 58 optimizer.lr 0.011113502416466465 +44 58 negative_sampler.num_negs_per_pos 48.0 +44 58 training.batch_size 0.0 +44 59 model.embedding_dim 2.0 +44 59 optimizer.lr 0.01383390483859305 +44 59 negative_sampler.num_negs_per_pos 21.0 +44 59 training.batch_size 0.0 +44 60 model.embedding_dim 1.0 +44 60 optimizer.lr 0.036348596760375496 +44 60 negative_sampler.num_negs_per_pos 24.0 +44 60 training.batch_size 1.0 +44 61 model.embedding_dim 2.0 +44 61 optimizer.lr 0.003009789087404879 +44 61 negative_sampler.num_negs_per_pos 56.0 +44 61 training.batch_size 2.0 +44 62 model.embedding_dim 0.0 +44 62 optimizer.lr 0.019553008753277337 +44 62 negative_sampler.num_negs_per_pos 28.0 +44 62 training.batch_size 2.0 +44 63 model.embedding_dim 1.0 +44 63 optimizer.lr 0.04435134561712724 +44 63 negative_sampler.num_negs_per_pos 79.0 +44 63 training.batch_size 2.0 +44 64 model.embedding_dim 0.0 +44 64 optimizer.lr 0.01241957224292467 +44 64 negative_sampler.num_negs_per_pos 59.0 +44 64 training.batch_size 0.0 +44 65 model.embedding_dim 2.0 +44 65 optimizer.lr 0.0032666025573564066 +44 65 negative_sampler.num_negs_per_pos 49.0 +44 65 training.batch_size 0.0 +44 66 model.embedding_dim 1.0 +44 66 optimizer.lr 0.07022644977680408 +44 66 negative_sampler.num_negs_per_pos 63.0 +44 66 training.batch_size 0.0 +44 67 model.embedding_dim 0.0 +44 67 optimizer.lr 0.0791639405996121 +44 67 negative_sampler.num_negs_per_pos 2.0 +44 67 training.batch_size 2.0 +44 68 model.embedding_dim 1.0 +44 68 optimizer.lr 0.006835832809555187 +44 68 negative_sampler.num_negs_per_pos 68.0 +44 68 training.batch_size 0.0 +44 69 model.embedding_dim 0.0 +44 69 optimizer.lr 0.0018703638297585246 +44 69 negative_sampler.num_negs_per_pos 50.0 +44 69 training.batch_size 1.0 +44 70 model.embedding_dim 0.0 +44 70 optimizer.lr 0.0013439562377751458 +44 70 negative_sampler.num_negs_per_pos 51.0 +44 70 training.batch_size 1.0 +44 71 model.embedding_dim 1.0 +44 71 optimizer.lr 0.06865312941281143 +44 71 negative_sampler.num_negs_per_pos 23.0 +44 71 training.batch_size 2.0 +44 72 model.embedding_dim 2.0 +44 72 optimizer.lr 0.0012179792425625541 +44 72 negative_sampler.num_negs_per_pos 80.0 +44 72 training.batch_size 1.0 +44 73 model.embedding_dim 2.0 +44 73 optimizer.lr 0.016902061859934904 +44 73 negative_sampler.num_negs_per_pos 73.0 +44 73 training.batch_size 2.0 +44 74 model.embedding_dim 2.0 +44 74 optimizer.lr 0.011143652671789744 +44 74 negative_sampler.num_negs_per_pos 16.0 +44 74 training.batch_size 0.0 +44 75 model.embedding_dim 0.0 +44 75 optimizer.lr 0.010677386315270827 +44 75 negative_sampler.num_negs_per_pos 57.0 +44 75 training.batch_size 2.0 +44 76 model.embedding_dim 0.0 +44 76 optimizer.lr 0.046895860452484055 +44 76 negative_sampler.num_negs_per_pos 94.0 +44 76 training.batch_size 0.0 +44 77 model.embedding_dim 2.0 +44 77 optimizer.lr 0.0054441867685074705 +44 77 negative_sampler.num_negs_per_pos 51.0 +44 77 training.batch_size 0.0 +44 78 model.embedding_dim 1.0 +44 78 optimizer.lr 0.0027087146372949578 +44 78 negative_sampler.num_negs_per_pos 32.0 +44 78 training.batch_size 0.0 +44 79 model.embedding_dim 1.0 +44 79 optimizer.lr 0.0018701338270978387 +44 79 negative_sampler.num_negs_per_pos 60.0 +44 79 training.batch_size 2.0 +44 80 model.embedding_dim 0.0 +44 80 optimizer.lr 0.08872185861394453 +44 80 negative_sampler.num_negs_per_pos 38.0 +44 80 training.batch_size 2.0 +44 81 model.embedding_dim 2.0 +44 81 optimizer.lr 0.05645520898733947 +44 81 negative_sampler.num_negs_per_pos 1.0 +44 81 training.batch_size 1.0 +44 82 model.embedding_dim 1.0 +44 82 optimizer.lr 0.003313601334275578 +44 82 negative_sampler.num_negs_per_pos 24.0 +44 82 training.batch_size 2.0 +44 83 model.embedding_dim 1.0 +44 83 optimizer.lr 0.0015630489735782476 +44 83 negative_sampler.num_negs_per_pos 55.0 +44 83 training.batch_size 1.0 +44 84 model.embedding_dim 1.0 +44 84 optimizer.lr 0.0031580579000312 +44 84 negative_sampler.num_negs_per_pos 4.0 +44 84 training.batch_size 2.0 +44 85 model.embedding_dim 1.0 +44 85 optimizer.lr 0.0014473286521266046 +44 85 negative_sampler.num_negs_per_pos 33.0 +44 85 training.batch_size 1.0 +44 86 model.embedding_dim 2.0 +44 86 optimizer.lr 0.09398352781064345 +44 86 negative_sampler.num_negs_per_pos 97.0 +44 86 training.batch_size 0.0 +44 87 model.embedding_dim 1.0 +44 87 optimizer.lr 0.012211592864278932 +44 87 negative_sampler.num_negs_per_pos 35.0 +44 87 training.batch_size 0.0 +44 88 model.embedding_dim 1.0 +44 88 optimizer.lr 0.002408278947350616 +44 88 negative_sampler.num_negs_per_pos 44.0 +44 88 training.batch_size 0.0 +44 89 model.embedding_dim 0.0 +44 89 optimizer.lr 0.005422722384352039 +44 89 negative_sampler.num_negs_per_pos 29.0 +44 89 training.batch_size 1.0 +44 90 model.embedding_dim 0.0 +44 90 optimizer.lr 0.04060262369975026 +44 90 negative_sampler.num_negs_per_pos 16.0 +44 90 training.batch_size 1.0 +44 91 model.embedding_dim 0.0 +44 91 optimizer.lr 0.014257965613061705 +44 91 negative_sampler.num_negs_per_pos 90.0 +44 91 training.batch_size 1.0 +44 92 model.embedding_dim 0.0 +44 92 optimizer.lr 0.06339810309854674 +44 92 negative_sampler.num_negs_per_pos 96.0 +44 92 training.batch_size 0.0 +44 93 model.embedding_dim 1.0 +44 93 optimizer.lr 0.04405225786632538 +44 93 negative_sampler.num_negs_per_pos 7.0 +44 93 training.batch_size 2.0 +44 94 model.embedding_dim 0.0 +44 94 optimizer.lr 0.01667294290206075 +44 94 negative_sampler.num_negs_per_pos 82.0 +44 94 training.batch_size 2.0 +44 95 model.embedding_dim 2.0 +44 95 optimizer.lr 0.007431736316753697 +44 95 negative_sampler.num_negs_per_pos 86.0 +44 95 training.batch_size 1.0 +44 96 model.embedding_dim 0.0 +44 96 optimizer.lr 0.003956342099694751 +44 96 negative_sampler.num_negs_per_pos 15.0 +44 96 training.batch_size 2.0 +44 97 model.embedding_dim 0.0 +44 97 optimizer.lr 0.009843939412862133 +44 97 negative_sampler.num_negs_per_pos 0.0 +44 97 training.batch_size 2.0 +44 98 model.embedding_dim 2.0 +44 98 optimizer.lr 0.009406817574100525 +44 98 negative_sampler.num_negs_per_pos 5.0 +44 98 training.batch_size 1.0 +44 99 model.embedding_dim 0.0 +44 99 optimizer.lr 0.08637924673935492 +44 99 negative_sampler.num_negs_per_pos 43.0 +44 99 training.batch_size 1.0 +44 100 model.embedding_dim 2.0 +44 100 optimizer.lr 0.0200058255680929 +44 100 negative_sampler.num_negs_per_pos 65.0 +44 100 training.batch_size 2.0 +44 1 dataset """wn18rr""" +44 1 model """complex""" +44 1 loss """bceaftersigmoid""" +44 1 regularizer """no""" +44 1 optimizer """adam""" +44 1 training_loop """owa""" +44 1 negative_sampler """basic""" +44 1 evaluator """rankbased""" +44 2 dataset """wn18rr""" +44 2 model """complex""" +44 2 loss """bceaftersigmoid""" +44 2 regularizer """no""" +44 2 optimizer """adam""" +44 2 training_loop """owa""" +44 2 negative_sampler """basic""" +44 2 evaluator """rankbased""" +44 3 dataset """wn18rr""" +44 3 model """complex""" +44 3 loss """bceaftersigmoid""" +44 3 regularizer """no""" +44 3 optimizer """adam""" +44 3 training_loop """owa""" +44 3 negative_sampler """basic""" +44 3 evaluator """rankbased""" +44 4 dataset """wn18rr""" +44 4 model """complex""" +44 4 loss """bceaftersigmoid""" +44 4 regularizer """no""" +44 4 optimizer """adam""" +44 4 training_loop """owa""" +44 4 negative_sampler """basic""" +44 4 evaluator """rankbased""" +44 5 dataset """wn18rr""" +44 5 model """complex""" +44 5 loss """bceaftersigmoid""" +44 5 regularizer """no""" +44 5 optimizer """adam""" +44 5 training_loop """owa""" +44 5 negative_sampler """basic""" +44 5 evaluator """rankbased""" +44 6 dataset """wn18rr""" +44 6 model """complex""" +44 6 loss """bceaftersigmoid""" +44 6 regularizer """no""" +44 6 optimizer """adam""" +44 6 training_loop """owa""" +44 6 negative_sampler """basic""" +44 6 evaluator """rankbased""" +44 7 dataset """wn18rr""" +44 7 model """complex""" +44 7 loss """bceaftersigmoid""" +44 7 regularizer """no""" +44 7 optimizer """adam""" +44 7 training_loop """owa""" +44 7 negative_sampler """basic""" +44 7 evaluator """rankbased""" +44 8 dataset """wn18rr""" +44 8 model """complex""" +44 8 loss """bceaftersigmoid""" +44 8 regularizer """no""" +44 8 optimizer """adam""" +44 8 training_loop """owa""" +44 8 negative_sampler """basic""" +44 8 evaluator """rankbased""" +44 9 dataset """wn18rr""" +44 9 model """complex""" +44 9 loss """bceaftersigmoid""" +44 9 regularizer """no""" +44 9 optimizer """adam""" +44 9 training_loop """owa""" +44 9 negative_sampler """basic""" +44 9 evaluator """rankbased""" +44 10 dataset """wn18rr""" +44 10 model """complex""" +44 10 loss """bceaftersigmoid""" +44 10 regularizer """no""" +44 10 optimizer """adam""" +44 10 training_loop """owa""" +44 10 negative_sampler """basic""" +44 10 evaluator """rankbased""" +44 11 dataset """wn18rr""" +44 11 model """complex""" +44 11 loss """bceaftersigmoid""" +44 11 regularizer """no""" +44 11 optimizer """adam""" +44 11 training_loop """owa""" +44 11 negative_sampler """basic""" +44 11 evaluator """rankbased""" +44 12 dataset """wn18rr""" +44 12 model """complex""" +44 12 loss """bceaftersigmoid""" +44 12 regularizer """no""" +44 12 optimizer """adam""" +44 12 training_loop """owa""" +44 12 negative_sampler """basic""" +44 12 evaluator """rankbased""" +44 13 dataset """wn18rr""" +44 13 model """complex""" +44 13 loss """bceaftersigmoid""" +44 13 regularizer """no""" +44 13 optimizer """adam""" +44 13 training_loop """owa""" +44 13 negative_sampler """basic""" +44 13 evaluator """rankbased""" +44 14 dataset """wn18rr""" +44 14 model """complex""" +44 14 loss """bceaftersigmoid""" +44 14 regularizer """no""" +44 14 optimizer """adam""" +44 14 training_loop """owa""" +44 14 negative_sampler """basic""" +44 14 evaluator """rankbased""" +44 15 dataset """wn18rr""" +44 15 model """complex""" +44 15 loss """bceaftersigmoid""" +44 15 regularizer """no""" +44 15 optimizer """adam""" +44 15 training_loop """owa""" +44 15 negative_sampler """basic""" +44 15 evaluator """rankbased""" +44 16 dataset """wn18rr""" +44 16 model """complex""" +44 16 loss """bceaftersigmoid""" +44 16 regularizer """no""" +44 16 optimizer """adam""" +44 16 training_loop """owa""" +44 16 negative_sampler """basic""" +44 16 evaluator """rankbased""" +44 17 dataset """wn18rr""" +44 17 model """complex""" +44 17 loss """bceaftersigmoid""" +44 17 regularizer """no""" +44 17 optimizer """adam""" +44 17 training_loop """owa""" +44 17 negative_sampler """basic""" +44 17 evaluator """rankbased""" +44 18 dataset """wn18rr""" +44 18 model """complex""" +44 18 loss """bceaftersigmoid""" +44 18 regularizer """no""" +44 18 optimizer """adam""" +44 18 training_loop """owa""" +44 18 negative_sampler """basic""" +44 18 evaluator """rankbased""" +44 19 dataset """wn18rr""" +44 19 model """complex""" +44 19 loss """bceaftersigmoid""" +44 19 regularizer """no""" +44 19 optimizer """adam""" +44 19 training_loop """owa""" +44 19 negative_sampler """basic""" +44 19 evaluator """rankbased""" +44 20 dataset """wn18rr""" +44 20 model """complex""" +44 20 loss """bceaftersigmoid""" +44 20 regularizer """no""" +44 20 optimizer """adam""" +44 20 training_loop """owa""" +44 20 negative_sampler """basic""" +44 20 evaluator """rankbased""" +44 21 dataset """wn18rr""" +44 21 model """complex""" +44 21 loss """bceaftersigmoid""" +44 21 regularizer """no""" +44 21 optimizer """adam""" +44 21 training_loop """owa""" +44 21 negative_sampler """basic""" +44 21 evaluator """rankbased""" +44 22 dataset """wn18rr""" +44 22 model """complex""" +44 22 loss """bceaftersigmoid""" +44 22 regularizer """no""" +44 22 optimizer """adam""" +44 22 training_loop """owa""" +44 22 negative_sampler """basic""" +44 22 evaluator """rankbased""" +44 23 dataset """wn18rr""" +44 23 model """complex""" +44 23 loss """bceaftersigmoid""" +44 23 regularizer """no""" +44 23 optimizer """adam""" +44 23 training_loop """owa""" +44 23 negative_sampler """basic""" +44 23 evaluator """rankbased""" +44 24 dataset """wn18rr""" +44 24 model """complex""" +44 24 loss """bceaftersigmoid""" +44 24 regularizer """no""" +44 24 optimizer """adam""" +44 24 training_loop """owa""" +44 24 negative_sampler """basic""" +44 24 evaluator """rankbased""" +44 25 dataset """wn18rr""" +44 25 model """complex""" +44 25 loss """bceaftersigmoid""" +44 25 regularizer """no""" +44 25 optimizer """adam""" +44 25 training_loop """owa""" +44 25 negative_sampler """basic""" +44 25 evaluator """rankbased""" +44 26 dataset """wn18rr""" +44 26 model """complex""" +44 26 loss """bceaftersigmoid""" +44 26 regularizer """no""" +44 26 optimizer """adam""" +44 26 training_loop """owa""" +44 26 negative_sampler """basic""" +44 26 evaluator """rankbased""" +44 27 dataset """wn18rr""" +44 27 model """complex""" +44 27 loss """bceaftersigmoid""" +44 27 regularizer """no""" +44 27 optimizer """adam""" +44 27 training_loop """owa""" +44 27 negative_sampler """basic""" +44 27 evaluator """rankbased""" +44 28 dataset """wn18rr""" +44 28 model """complex""" +44 28 loss """bceaftersigmoid""" +44 28 regularizer """no""" +44 28 optimizer """adam""" +44 28 training_loop """owa""" +44 28 negative_sampler """basic""" +44 28 evaluator """rankbased""" +44 29 dataset """wn18rr""" +44 29 model """complex""" +44 29 loss """bceaftersigmoid""" +44 29 regularizer """no""" +44 29 optimizer """adam""" +44 29 training_loop """owa""" +44 29 negative_sampler """basic""" +44 29 evaluator """rankbased""" +44 30 dataset """wn18rr""" +44 30 model """complex""" +44 30 loss """bceaftersigmoid""" +44 30 regularizer """no""" +44 30 optimizer """adam""" +44 30 training_loop """owa""" +44 30 negative_sampler """basic""" +44 30 evaluator """rankbased""" +44 31 dataset """wn18rr""" +44 31 model """complex""" +44 31 loss """bceaftersigmoid""" +44 31 regularizer """no""" +44 31 optimizer """adam""" +44 31 training_loop """owa""" +44 31 negative_sampler """basic""" +44 31 evaluator """rankbased""" +44 32 dataset """wn18rr""" +44 32 model """complex""" +44 32 loss """bceaftersigmoid""" +44 32 regularizer """no""" +44 32 optimizer """adam""" +44 32 training_loop """owa""" +44 32 negative_sampler """basic""" +44 32 evaluator """rankbased""" +44 33 dataset """wn18rr""" +44 33 model """complex""" +44 33 loss """bceaftersigmoid""" +44 33 regularizer """no""" +44 33 optimizer """adam""" +44 33 training_loop """owa""" +44 33 negative_sampler """basic""" +44 33 evaluator """rankbased""" +44 34 dataset """wn18rr""" +44 34 model """complex""" +44 34 loss """bceaftersigmoid""" +44 34 regularizer """no""" +44 34 optimizer """adam""" +44 34 training_loop """owa""" +44 34 negative_sampler """basic""" +44 34 evaluator """rankbased""" +44 35 dataset """wn18rr""" +44 35 model """complex""" +44 35 loss """bceaftersigmoid""" +44 35 regularizer """no""" +44 35 optimizer """adam""" +44 35 training_loop """owa""" +44 35 negative_sampler """basic""" +44 35 evaluator """rankbased""" +44 36 dataset """wn18rr""" +44 36 model """complex""" +44 36 loss """bceaftersigmoid""" +44 36 regularizer """no""" +44 36 optimizer """adam""" +44 36 training_loop """owa""" +44 36 negative_sampler """basic""" +44 36 evaluator """rankbased""" +44 37 dataset """wn18rr""" +44 37 model """complex""" +44 37 loss """bceaftersigmoid""" +44 37 regularizer """no""" +44 37 optimizer """adam""" +44 37 training_loop """owa""" +44 37 negative_sampler """basic""" +44 37 evaluator """rankbased""" +44 38 dataset """wn18rr""" +44 38 model """complex""" +44 38 loss """bceaftersigmoid""" +44 38 regularizer """no""" +44 38 optimizer """adam""" +44 38 training_loop """owa""" +44 38 negative_sampler """basic""" +44 38 evaluator """rankbased""" +44 39 dataset """wn18rr""" +44 39 model """complex""" +44 39 loss """bceaftersigmoid""" +44 39 regularizer """no""" +44 39 optimizer """adam""" +44 39 training_loop """owa""" +44 39 negative_sampler """basic""" +44 39 evaluator """rankbased""" +44 40 dataset """wn18rr""" +44 40 model """complex""" +44 40 loss """bceaftersigmoid""" +44 40 regularizer """no""" +44 40 optimizer """adam""" +44 40 training_loop """owa""" +44 40 negative_sampler """basic""" +44 40 evaluator """rankbased""" +44 41 dataset """wn18rr""" +44 41 model """complex""" +44 41 loss """bceaftersigmoid""" +44 41 regularizer """no""" +44 41 optimizer """adam""" +44 41 training_loop """owa""" +44 41 negative_sampler """basic""" +44 41 evaluator """rankbased""" +44 42 dataset """wn18rr""" +44 42 model """complex""" +44 42 loss """bceaftersigmoid""" +44 42 regularizer """no""" +44 42 optimizer """adam""" +44 42 training_loop """owa""" +44 42 negative_sampler """basic""" +44 42 evaluator """rankbased""" +44 43 dataset """wn18rr""" +44 43 model """complex""" +44 43 loss """bceaftersigmoid""" +44 43 regularizer """no""" +44 43 optimizer """adam""" +44 43 training_loop """owa""" +44 43 negative_sampler """basic""" +44 43 evaluator """rankbased""" +44 44 dataset """wn18rr""" +44 44 model """complex""" +44 44 loss """bceaftersigmoid""" +44 44 regularizer """no""" +44 44 optimizer """adam""" +44 44 training_loop """owa""" +44 44 negative_sampler """basic""" +44 44 evaluator """rankbased""" +44 45 dataset """wn18rr""" +44 45 model """complex""" +44 45 loss """bceaftersigmoid""" +44 45 regularizer """no""" +44 45 optimizer """adam""" +44 45 training_loop """owa""" +44 45 negative_sampler """basic""" +44 45 evaluator """rankbased""" +44 46 dataset """wn18rr""" +44 46 model """complex""" +44 46 loss """bceaftersigmoid""" +44 46 regularizer """no""" +44 46 optimizer """adam""" +44 46 training_loop """owa""" +44 46 negative_sampler """basic""" +44 46 evaluator """rankbased""" +44 47 dataset """wn18rr""" +44 47 model """complex""" +44 47 loss """bceaftersigmoid""" +44 47 regularizer """no""" +44 47 optimizer """adam""" +44 47 training_loop """owa""" +44 47 negative_sampler """basic""" +44 47 evaluator """rankbased""" +44 48 dataset """wn18rr""" +44 48 model """complex""" +44 48 loss """bceaftersigmoid""" +44 48 regularizer """no""" +44 48 optimizer """adam""" +44 48 training_loop """owa""" +44 48 negative_sampler """basic""" +44 48 evaluator """rankbased""" +44 49 dataset """wn18rr""" +44 49 model """complex""" +44 49 loss """bceaftersigmoid""" +44 49 regularizer """no""" +44 49 optimizer """adam""" +44 49 training_loop """owa""" +44 49 negative_sampler """basic""" +44 49 evaluator """rankbased""" +44 50 dataset """wn18rr""" +44 50 model """complex""" +44 50 loss """bceaftersigmoid""" +44 50 regularizer """no""" +44 50 optimizer """adam""" +44 50 training_loop """owa""" +44 50 negative_sampler """basic""" +44 50 evaluator """rankbased""" +44 51 dataset """wn18rr""" +44 51 model """complex""" +44 51 loss """bceaftersigmoid""" +44 51 regularizer """no""" +44 51 optimizer """adam""" +44 51 training_loop """owa""" +44 51 negative_sampler """basic""" +44 51 evaluator """rankbased""" +44 52 dataset """wn18rr""" +44 52 model """complex""" +44 52 loss """bceaftersigmoid""" +44 52 regularizer """no""" +44 52 optimizer """adam""" +44 52 training_loop """owa""" +44 52 negative_sampler """basic""" +44 52 evaluator """rankbased""" +44 53 dataset """wn18rr""" +44 53 model """complex""" +44 53 loss """bceaftersigmoid""" +44 53 regularizer """no""" +44 53 optimizer """adam""" +44 53 training_loop """owa""" +44 53 negative_sampler """basic""" +44 53 evaluator """rankbased""" +44 54 dataset """wn18rr""" +44 54 model """complex""" +44 54 loss """bceaftersigmoid""" +44 54 regularizer """no""" +44 54 optimizer """adam""" +44 54 training_loop """owa""" +44 54 negative_sampler """basic""" +44 54 evaluator """rankbased""" +44 55 dataset """wn18rr""" +44 55 model """complex""" +44 55 loss """bceaftersigmoid""" +44 55 regularizer """no""" +44 55 optimizer """adam""" +44 55 training_loop """owa""" +44 55 negative_sampler """basic""" +44 55 evaluator """rankbased""" +44 56 dataset """wn18rr""" +44 56 model """complex""" +44 56 loss """bceaftersigmoid""" +44 56 regularizer """no""" +44 56 optimizer """adam""" +44 56 training_loop """owa""" +44 56 negative_sampler """basic""" +44 56 evaluator """rankbased""" +44 57 dataset """wn18rr""" +44 57 model """complex""" +44 57 loss """bceaftersigmoid""" +44 57 regularizer """no""" +44 57 optimizer """adam""" +44 57 training_loop """owa""" +44 57 negative_sampler """basic""" +44 57 evaluator """rankbased""" +44 58 dataset """wn18rr""" +44 58 model """complex""" +44 58 loss """bceaftersigmoid""" +44 58 regularizer """no""" +44 58 optimizer """adam""" +44 58 training_loop """owa""" +44 58 negative_sampler """basic""" +44 58 evaluator """rankbased""" +44 59 dataset """wn18rr""" +44 59 model """complex""" +44 59 loss """bceaftersigmoid""" +44 59 regularizer """no""" +44 59 optimizer """adam""" +44 59 training_loop """owa""" +44 59 negative_sampler """basic""" +44 59 evaluator """rankbased""" +44 60 dataset """wn18rr""" +44 60 model """complex""" +44 60 loss """bceaftersigmoid""" +44 60 regularizer """no""" +44 60 optimizer """adam""" +44 60 training_loop """owa""" +44 60 negative_sampler """basic""" +44 60 evaluator """rankbased""" +44 61 dataset """wn18rr""" +44 61 model """complex""" +44 61 loss """bceaftersigmoid""" +44 61 regularizer """no""" +44 61 optimizer """adam""" +44 61 training_loop """owa""" +44 61 negative_sampler """basic""" +44 61 evaluator """rankbased""" +44 62 dataset """wn18rr""" +44 62 model """complex""" +44 62 loss """bceaftersigmoid""" +44 62 regularizer """no""" +44 62 optimizer """adam""" +44 62 training_loop """owa""" +44 62 negative_sampler """basic""" +44 62 evaluator """rankbased""" +44 63 dataset """wn18rr""" +44 63 model """complex""" +44 63 loss """bceaftersigmoid""" +44 63 regularizer """no""" +44 63 optimizer """adam""" +44 63 training_loop """owa""" +44 63 negative_sampler """basic""" +44 63 evaluator """rankbased""" +44 64 dataset """wn18rr""" +44 64 model """complex""" +44 64 loss """bceaftersigmoid""" +44 64 regularizer """no""" +44 64 optimizer """adam""" +44 64 training_loop """owa""" +44 64 negative_sampler """basic""" +44 64 evaluator """rankbased""" +44 65 dataset """wn18rr""" +44 65 model """complex""" +44 65 loss """bceaftersigmoid""" +44 65 regularizer """no""" +44 65 optimizer """adam""" +44 65 training_loop """owa""" +44 65 negative_sampler """basic""" +44 65 evaluator """rankbased""" +44 66 dataset """wn18rr""" +44 66 model """complex""" +44 66 loss """bceaftersigmoid""" +44 66 regularizer """no""" +44 66 optimizer """adam""" +44 66 training_loop """owa""" +44 66 negative_sampler """basic""" +44 66 evaluator """rankbased""" +44 67 dataset """wn18rr""" +44 67 model """complex""" +44 67 loss """bceaftersigmoid""" +44 67 regularizer """no""" +44 67 optimizer """adam""" +44 67 training_loop """owa""" +44 67 negative_sampler """basic""" +44 67 evaluator """rankbased""" +44 68 dataset """wn18rr""" +44 68 model """complex""" +44 68 loss """bceaftersigmoid""" +44 68 regularizer """no""" +44 68 optimizer """adam""" +44 68 training_loop """owa""" +44 68 negative_sampler """basic""" +44 68 evaluator """rankbased""" +44 69 dataset """wn18rr""" +44 69 model """complex""" +44 69 loss """bceaftersigmoid""" +44 69 regularizer """no""" +44 69 optimizer """adam""" +44 69 training_loop """owa""" +44 69 negative_sampler """basic""" +44 69 evaluator """rankbased""" +44 70 dataset """wn18rr""" +44 70 model """complex""" +44 70 loss """bceaftersigmoid""" +44 70 regularizer """no""" +44 70 optimizer """adam""" +44 70 training_loop """owa""" +44 70 negative_sampler """basic""" +44 70 evaluator """rankbased""" +44 71 dataset """wn18rr""" +44 71 model """complex""" +44 71 loss """bceaftersigmoid""" +44 71 regularizer """no""" +44 71 optimizer """adam""" +44 71 training_loop """owa""" +44 71 negative_sampler """basic""" +44 71 evaluator """rankbased""" +44 72 dataset """wn18rr""" +44 72 model """complex""" +44 72 loss """bceaftersigmoid""" +44 72 regularizer """no""" +44 72 optimizer """adam""" +44 72 training_loop """owa""" +44 72 negative_sampler """basic""" +44 72 evaluator """rankbased""" +44 73 dataset """wn18rr""" +44 73 model """complex""" +44 73 loss """bceaftersigmoid""" +44 73 regularizer """no""" +44 73 optimizer """adam""" +44 73 training_loop """owa""" +44 73 negative_sampler """basic""" +44 73 evaluator """rankbased""" +44 74 dataset """wn18rr""" +44 74 model """complex""" +44 74 loss """bceaftersigmoid""" +44 74 regularizer """no""" +44 74 optimizer """adam""" +44 74 training_loop """owa""" +44 74 negative_sampler """basic""" +44 74 evaluator """rankbased""" +44 75 dataset """wn18rr""" +44 75 model """complex""" +44 75 loss """bceaftersigmoid""" +44 75 regularizer """no""" +44 75 optimizer """adam""" +44 75 training_loop """owa""" +44 75 negative_sampler """basic""" +44 75 evaluator """rankbased""" +44 76 dataset """wn18rr""" +44 76 model """complex""" +44 76 loss """bceaftersigmoid""" +44 76 regularizer """no""" +44 76 optimizer """adam""" +44 76 training_loop """owa""" +44 76 negative_sampler """basic""" +44 76 evaluator """rankbased""" +44 77 dataset """wn18rr""" +44 77 model """complex""" +44 77 loss """bceaftersigmoid""" +44 77 regularizer """no""" +44 77 optimizer """adam""" +44 77 training_loop """owa""" +44 77 negative_sampler """basic""" +44 77 evaluator """rankbased""" +44 78 dataset """wn18rr""" +44 78 model """complex""" +44 78 loss """bceaftersigmoid""" +44 78 regularizer """no""" +44 78 optimizer """adam""" +44 78 training_loop """owa""" +44 78 negative_sampler """basic""" +44 78 evaluator """rankbased""" +44 79 dataset """wn18rr""" +44 79 model """complex""" +44 79 loss """bceaftersigmoid""" +44 79 regularizer """no""" +44 79 optimizer """adam""" +44 79 training_loop """owa""" +44 79 negative_sampler """basic""" +44 79 evaluator """rankbased""" +44 80 dataset """wn18rr""" +44 80 model """complex""" +44 80 loss """bceaftersigmoid""" +44 80 regularizer """no""" +44 80 optimizer """adam""" +44 80 training_loop """owa""" +44 80 negative_sampler """basic""" +44 80 evaluator """rankbased""" +44 81 dataset """wn18rr""" +44 81 model """complex""" +44 81 loss """bceaftersigmoid""" +44 81 regularizer """no""" +44 81 optimizer """adam""" +44 81 training_loop """owa""" +44 81 negative_sampler """basic""" +44 81 evaluator """rankbased""" +44 82 dataset """wn18rr""" +44 82 model """complex""" +44 82 loss """bceaftersigmoid""" +44 82 regularizer """no""" +44 82 optimizer """adam""" +44 82 training_loop """owa""" +44 82 negative_sampler """basic""" +44 82 evaluator """rankbased""" +44 83 dataset """wn18rr""" +44 83 model """complex""" +44 83 loss """bceaftersigmoid""" +44 83 regularizer """no""" +44 83 optimizer """adam""" +44 83 training_loop """owa""" +44 83 negative_sampler """basic""" +44 83 evaluator """rankbased""" +44 84 dataset """wn18rr""" +44 84 model """complex""" +44 84 loss """bceaftersigmoid""" +44 84 regularizer """no""" +44 84 optimizer """adam""" +44 84 training_loop """owa""" +44 84 negative_sampler """basic""" +44 84 evaluator """rankbased""" +44 85 dataset """wn18rr""" +44 85 model """complex""" +44 85 loss """bceaftersigmoid""" +44 85 regularizer """no""" +44 85 optimizer """adam""" +44 85 training_loop """owa""" +44 85 negative_sampler """basic""" +44 85 evaluator """rankbased""" +44 86 dataset """wn18rr""" +44 86 model """complex""" +44 86 loss """bceaftersigmoid""" +44 86 regularizer """no""" +44 86 optimizer """adam""" +44 86 training_loop """owa""" +44 86 negative_sampler """basic""" +44 86 evaluator """rankbased""" +44 87 dataset """wn18rr""" +44 87 model """complex""" +44 87 loss """bceaftersigmoid""" +44 87 regularizer """no""" +44 87 optimizer """adam""" +44 87 training_loop """owa""" +44 87 negative_sampler """basic""" +44 87 evaluator """rankbased""" +44 88 dataset """wn18rr""" +44 88 model """complex""" +44 88 loss """bceaftersigmoid""" +44 88 regularizer """no""" +44 88 optimizer """adam""" +44 88 training_loop """owa""" +44 88 negative_sampler """basic""" +44 88 evaluator """rankbased""" +44 89 dataset """wn18rr""" +44 89 model """complex""" +44 89 loss """bceaftersigmoid""" +44 89 regularizer """no""" +44 89 optimizer """adam""" +44 89 training_loop """owa""" +44 89 negative_sampler """basic""" +44 89 evaluator """rankbased""" +44 90 dataset """wn18rr""" +44 90 model """complex""" +44 90 loss """bceaftersigmoid""" +44 90 regularizer """no""" +44 90 optimizer """adam""" +44 90 training_loop """owa""" +44 90 negative_sampler """basic""" +44 90 evaluator """rankbased""" +44 91 dataset """wn18rr""" +44 91 model """complex""" +44 91 loss """bceaftersigmoid""" +44 91 regularizer """no""" +44 91 optimizer """adam""" +44 91 training_loop """owa""" +44 91 negative_sampler """basic""" +44 91 evaluator """rankbased""" +44 92 dataset """wn18rr""" +44 92 model """complex""" +44 92 loss """bceaftersigmoid""" +44 92 regularizer """no""" +44 92 optimizer """adam""" +44 92 training_loop """owa""" +44 92 negative_sampler """basic""" +44 92 evaluator """rankbased""" +44 93 dataset """wn18rr""" +44 93 model """complex""" +44 93 loss """bceaftersigmoid""" +44 93 regularizer """no""" +44 93 optimizer """adam""" +44 93 training_loop """owa""" +44 93 negative_sampler """basic""" +44 93 evaluator """rankbased""" +44 94 dataset """wn18rr""" +44 94 model """complex""" +44 94 loss """bceaftersigmoid""" +44 94 regularizer """no""" +44 94 optimizer """adam""" +44 94 training_loop """owa""" +44 94 negative_sampler """basic""" +44 94 evaluator """rankbased""" +44 95 dataset """wn18rr""" +44 95 model """complex""" +44 95 loss """bceaftersigmoid""" +44 95 regularizer """no""" +44 95 optimizer """adam""" +44 95 training_loop """owa""" +44 95 negative_sampler """basic""" +44 95 evaluator """rankbased""" +44 96 dataset """wn18rr""" +44 96 model """complex""" +44 96 loss """bceaftersigmoid""" +44 96 regularizer """no""" +44 96 optimizer """adam""" +44 96 training_loop """owa""" +44 96 negative_sampler """basic""" +44 96 evaluator """rankbased""" +44 97 dataset """wn18rr""" +44 97 model """complex""" +44 97 loss """bceaftersigmoid""" +44 97 regularizer """no""" +44 97 optimizer """adam""" +44 97 training_loop """owa""" +44 97 negative_sampler """basic""" +44 97 evaluator """rankbased""" +44 98 dataset """wn18rr""" +44 98 model """complex""" +44 98 loss """bceaftersigmoid""" +44 98 regularizer """no""" +44 98 optimizer """adam""" +44 98 training_loop """owa""" +44 98 negative_sampler """basic""" +44 98 evaluator """rankbased""" +44 99 dataset """wn18rr""" +44 99 model """complex""" +44 99 loss """bceaftersigmoid""" +44 99 regularizer """no""" +44 99 optimizer """adam""" +44 99 training_loop """owa""" +44 99 negative_sampler """basic""" +44 99 evaluator """rankbased""" +44 100 dataset """wn18rr""" +44 100 model """complex""" +44 100 loss """bceaftersigmoid""" +44 100 regularizer """no""" +44 100 optimizer """adam""" +44 100 training_loop """owa""" +44 100 negative_sampler """basic""" +44 100 evaluator """rankbased""" +45 1 model.embedding_dim 2.0 +45 1 optimizer.lr 0.004652113665267051 +45 1 negative_sampler.num_negs_per_pos 7.0 +45 1 training.batch_size 0.0 +45 2 model.embedding_dim 1.0 +45 2 optimizer.lr 0.0051700481156334264 +45 2 negative_sampler.num_negs_per_pos 65.0 +45 2 training.batch_size 2.0 +45 3 model.embedding_dim 0.0 +45 3 optimizer.lr 0.005004931213861516 +45 3 negative_sampler.num_negs_per_pos 97.0 +45 3 training.batch_size 0.0 +45 4 model.embedding_dim 0.0 +45 4 optimizer.lr 0.004246093976176378 +45 4 negative_sampler.num_negs_per_pos 87.0 +45 4 training.batch_size 1.0 +45 5 model.embedding_dim 2.0 +45 5 optimizer.lr 0.0020957587868415087 +45 5 negative_sampler.num_negs_per_pos 6.0 +45 5 training.batch_size 1.0 +45 6 model.embedding_dim 1.0 +45 6 optimizer.lr 0.009053110714289823 +45 6 negative_sampler.num_negs_per_pos 13.0 +45 6 training.batch_size 0.0 +45 7 model.embedding_dim 1.0 +45 7 optimizer.lr 0.04371191346584077 +45 7 negative_sampler.num_negs_per_pos 84.0 +45 7 training.batch_size 1.0 +45 8 model.embedding_dim 1.0 +45 8 optimizer.lr 0.030147913089813818 +45 8 negative_sampler.num_negs_per_pos 30.0 +45 8 training.batch_size 1.0 +45 9 model.embedding_dim 0.0 +45 9 optimizer.lr 0.007904346251911578 +45 9 negative_sampler.num_negs_per_pos 61.0 +45 9 training.batch_size 0.0 +45 10 model.embedding_dim 1.0 +45 10 optimizer.lr 0.0010305601748807284 +45 10 negative_sampler.num_negs_per_pos 82.0 +45 10 training.batch_size 1.0 +45 11 model.embedding_dim 0.0 +45 11 optimizer.lr 0.028820225851497055 +45 11 negative_sampler.num_negs_per_pos 66.0 +45 11 training.batch_size 0.0 +45 12 model.embedding_dim 0.0 +45 12 optimizer.lr 0.0048650823721995565 +45 12 negative_sampler.num_negs_per_pos 44.0 +45 12 training.batch_size 2.0 +45 13 model.embedding_dim 2.0 +45 13 optimizer.lr 0.05528918358279147 +45 13 negative_sampler.num_negs_per_pos 79.0 +45 13 training.batch_size 2.0 +45 14 model.embedding_dim 2.0 +45 14 optimizer.lr 0.0017919351699242979 +45 14 negative_sampler.num_negs_per_pos 85.0 +45 14 training.batch_size 0.0 +45 15 model.embedding_dim 0.0 +45 15 optimizer.lr 0.0034377850705316535 +45 15 negative_sampler.num_negs_per_pos 16.0 +45 15 training.batch_size 2.0 +45 16 model.embedding_dim 2.0 +45 16 optimizer.lr 0.04795238116849049 +45 16 negative_sampler.num_negs_per_pos 29.0 +45 16 training.batch_size 1.0 +45 17 model.embedding_dim 1.0 +45 17 optimizer.lr 0.005435012955794589 +45 17 negative_sampler.num_negs_per_pos 35.0 +45 17 training.batch_size 2.0 +45 18 model.embedding_dim 1.0 +45 18 optimizer.lr 0.041349921066642266 +45 18 negative_sampler.num_negs_per_pos 97.0 +45 18 training.batch_size 1.0 +45 19 model.embedding_dim 0.0 +45 19 optimizer.lr 0.00650394565032217 +45 19 negative_sampler.num_negs_per_pos 16.0 +45 19 training.batch_size 2.0 +45 20 model.embedding_dim 1.0 +45 20 optimizer.lr 0.0013829714749847212 +45 20 negative_sampler.num_negs_per_pos 77.0 +45 20 training.batch_size 0.0 +45 21 model.embedding_dim 0.0 +45 21 optimizer.lr 0.0010889901337787034 +45 21 negative_sampler.num_negs_per_pos 59.0 +45 21 training.batch_size 2.0 +45 22 model.embedding_dim 0.0 +45 22 optimizer.lr 0.04129886387838782 +45 22 negative_sampler.num_negs_per_pos 34.0 +45 22 training.batch_size 2.0 +45 23 model.embedding_dim 2.0 +45 23 optimizer.lr 0.0030755605934826664 +45 23 negative_sampler.num_negs_per_pos 28.0 +45 23 training.batch_size 1.0 +45 24 model.embedding_dim 2.0 +45 24 optimizer.lr 0.0028686529658820408 +45 24 negative_sampler.num_negs_per_pos 10.0 +45 24 training.batch_size 1.0 +45 25 model.embedding_dim 0.0 +45 25 optimizer.lr 0.01776700632259231 +45 25 negative_sampler.num_negs_per_pos 43.0 +45 25 training.batch_size 1.0 +45 26 model.embedding_dim 1.0 +45 26 optimizer.lr 0.008247203449783939 +45 26 negative_sampler.num_negs_per_pos 85.0 +45 26 training.batch_size 1.0 +45 27 model.embedding_dim 2.0 +45 27 optimizer.lr 0.005396116125715846 +45 27 negative_sampler.num_negs_per_pos 43.0 +45 27 training.batch_size 1.0 +45 28 model.embedding_dim 1.0 +45 28 optimizer.lr 0.09430402464459953 +45 28 negative_sampler.num_negs_per_pos 76.0 +45 28 training.batch_size 2.0 +45 29 model.embedding_dim 2.0 +45 29 optimizer.lr 0.001670547875085496 +45 29 negative_sampler.num_negs_per_pos 28.0 +45 29 training.batch_size 1.0 +45 30 model.embedding_dim 0.0 +45 30 optimizer.lr 0.0025337662209633438 +45 30 negative_sampler.num_negs_per_pos 27.0 +45 30 training.batch_size 1.0 +45 31 model.embedding_dim 0.0 +45 31 optimizer.lr 0.0011656221318561402 +45 31 negative_sampler.num_negs_per_pos 83.0 +45 31 training.batch_size 1.0 +45 32 model.embedding_dim 2.0 +45 32 optimizer.lr 0.0653490843827194 +45 32 negative_sampler.num_negs_per_pos 54.0 +45 32 training.batch_size 2.0 +45 33 model.embedding_dim 0.0 +45 33 optimizer.lr 0.0018636630135746453 +45 33 negative_sampler.num_negs_per_pos 90.0 +45 33 training.batch_size 1.0 +45 34 model.embedding_dim 0.0 +45 34 optimizer.lr 0.047411197740941304 +45 34 negative_sampler.num_negs_per_pos 83.0 +45 34 training.batch_size 1.0 +45 35 model.embedding_dim 1.0 +45 35 optimizer.lr 0.005009645251261105 +45 35 negative_sampler.num_negs_per_pos 56.0 +45 35 training.batch_size 2.0 +45 36 model.embedding_dim 0.0 +45 36 optimizer.lr 0.0016466148974384656 +45 36 negative_sampler.num_negs_per_pos 9.0 +45 36 training.batch_size 1.0 +45 37 model.embedding_dim 2.0 +45 37 optimizer.lr 0.05639707514159211 +45 37 negative_sampler.num_negs_per_pos 73.0 +45 37 training.batch_size 1.0 +45 38 model.embedding_dim 1.0 +45 38 optimizer.lr 0.002357407687708708 +45 38 negative_sampler.num_negs_per_pos 1.0 +45 38 training.batch_size 1.0 +45 39 model.embedding_dim 2.0 +45 39 optimizer.lr 0.004020561294943233 +45 39 negative_sampler.num_negs_per_pos 53.0 +45 39 training.batch_size 1.0 +45 40 model.embedding_dim 2.0 +45 40 optimizer.lr 0.00614689904784507 +45 40 negative_sampler.num_negs_per_pos 57.0 +45 40 training.batch_size 2.0 +45 41 model.embedding_dim 1.0 +45 41 optimizer.lr 0.0023569830554053795 +45 41 negative_sampler.num_negs_per_pos 26.0 +45 41 training.batch_size 2.0 +45 42 model.embedding_dim 0.0 +45 42 optimizer.lr 0.0017451660952704563 +45 42 negative_sampler.num_negs_per_pos 38.0 +45 42 training.batch_size 2.0 +45 43 model.embedding_dim 2.0 +45 43 optimizer.lr 0.03802512503417061 +45 43 negative_sampler.num_negs_per_pos 93.0 +45 43 training.batch_size 2.0 +45 44 model.embedding_dim 1.0 +45 44 optimizer.lr 0.0068775209495010045 +45 44 negative_sampler.num_negs_per_pos 18.0 +45 44 training.batch_size 2.0 +45 45 model.embedding_dim 2.0 +45 45 optimizer.lr 0.06317191313353146 +45 45 negative_sampler.num_negs_per_pos 42.0 +45 45 training.batch_size 0.0 +45 46 model.embedding_dim 1.0 +45 46 optimizer.lr 0.007973981267256328 +45 46 negative_sampler.num_negs_per_pos 7.0 +45 46 training.batch_size 0.0 +45 47 model.embedding_dim 1.0 +45 47 optimizer.lr 0.005768238557172809 +45 47 negative_sampler.num_negs_per_pos 69.0 +45 47 training.batch_size 0.0 +45 48 model.embedding_dim 0.0 +45 48 optimizer.lr 0.025219547972032876 +45 48 negative_sampler.num_negs_per_pos 37.0 +45 48 training.batch_size 1.0 +45 49 model.embedding_dim 1.0 +45 49 optimizer.lr 0.002437985819640295 +45 49 negative_sampler.num_negs_per_pos 72.0 +45 49 training.batch_size 1.0 +45 50 model.embedding_dim 2.0 +45 50 optimizer.lr 0.003020685514718745 +45 50 negative_sampler.num_negs_per_pos 46.0 +45 50 training.batch_size 0.0 +45 51 model.embedding_dim 2.0 +45 51 optimizer.lr 0.032157545582285486 +45 51 negative_sampler.num_negs_per_pos 70.0 +45 51 training.batch_size 1.0 +45 52 model.embedding_dim 2.0 +45 52 optimizer.lr 0.0205254881863972 +45 52 negative_sampler.num_negs_per_pos 5.0 +45 52 training.batch_size 1.0 +45 53 model.embedding_dim 2.0 +45 53 optimizer.lr 0.02577978937296399 +45 53 negative_sampler.num_negs_per_pos 63.0 +45 53 training.batch_size 1.0 +45 54 model.embedding_dim 0.0 +45 54 optimizer.lr 0.004971125078682673 +45 54 negative_sampler.num_negs_per_pos 82.0 +45 54 training.batch_size 1.0 +45 55 model.embedding_dim 0.0 +45 55 optimizer.lr 0.006361461141563394 +45 55 negative_sampler.num_negs_per_pos 11.0 +45 55 training.batch_size 2.0 +45 56 model.embedding_dim 0.0 +45 56 optimizer.lr 0.002351352160046702 +45 56 negative_sampler.num_negs_per_pos 68.0 +45 56 training.batch_size 1.0 +45 57 model.embedding_dim 0.0 +45 57 optimizer.lr 0.0038227819668507036 +45 57 negative_sampler.num_negs_per_pos 60.0 +45 57 training.batch_size 2.0 +45 58 model.embedding_dim 0.0 +45 58 optimizer.lr 0.029583459369585032 +45 58 negative_sampler.num_negs_per_pos 26.0 +45 58 training.batch_size 1.0 +45 59 model.embedding_dim 1.0 +45 59 optimizer.lr 0.0062387192437183825 +45 59 negative_sampler.num_negs_per_pos 81.0 +45 59 training.batch_size 0.0 +45 60 model.embedding_dim 1.0 +45 60 optimizer.lr 0.017657817132946802 +45 60 negative_sampler.num_negs_per_pos 86.0 +45 60 training.batch_size 2.0 +45 61 model.embedding_dim 2.0 +45 61 optimizer.lr 0.0015264968809022977 +45 61 negative_sampler.num_negs_per_pos 63.0 +45 61 training.batch_size 1.0 +45 62 model.embedding_dim 0.0 +45 62 optimizer.lr 0.001910730782828424 +45 62 negative_sampler.num_negs_per_pos 76.0 +45 62 training.batch_size 0.0 +45 63 model.embedding_dim 0.0 +45 63 optimizer.lr 0.07960281200760463 +45 63 negative_sampler.num_negs_per_pos 30.0 +45 63 training.batch_size 1.0 +45 64 model.embedding_dim 0.0 +45 64 optimizer.lr 0.0034817694277317836 +45 64 negative_sampler.num_negs_per_pos 25.0 +45 64 training.batch_size 1.0 +45 65 model.embedding_dim 0.0 +45 65 optimizer.lr 0.03601231854254862 +45 65 negative_sampler.num_negs_per_pos 57.0 +45 65 training.batch_size 2.0 +45 66 model.embedding_dim 2.0 +45 66 optimizer.lr 0.00860166020349113 +45 66 negative_sampler.num_negs_per_pos 45.0 +45 66 training.batch_size 1.0 +45 67 model.embedding_dim 0.0 +45 67 optimizer.lr 0.004226755030189429 +45 67 negative_sampler.num_negs_per_pos 49.0 +45 67 training.batch_size 1.0 +45 68 model.embedding_dim 0.0 +45 68 optimizer.lr 0.001286428565040331 +45 68 negative_sampler.num_negs_per_pos 75.0 +45 68 training.batch_size 1.0 +45 69 model.embedding_dim 1.0 +45 69 optimizer.lr 0.024584502552341667 +45 69 negative_sampler.num_negs_per_pos 25.0 +45 69 training.batch_size 1.0 +45 70 model.embedding_dim 1.0 +45 70 optimizer.lr 0.04109082886208205 +45 70 negative_sampler.num_negs_per_pos 36.0 +45 70 training.batch_size 1.0 +45 71 model.embedding_dim 1.0 +45 71 optimizer.lr 0.020228392962999817 +45 71 negative_sampler.num_negs_per_pos 49.0 +45 71 training.batch_size 1.0 +45 72 model.embedding_dim 1.0 +45 72 optimizer.lr 0.008481722683403383 +45 72 negative_sampler.num_negs_per_pos 19.0 +45 72 training.batch_size 2.0 +45 73 model.embedding_dim 1.0 +45 73 optimizer.lr 0.05396907891906843 +45 73 negative_sampler.num_negs_per_pos 92.0 +45 73 training.batch_size 2.0 +45 74 model.embedding_dim 0.0 +45 74 optimizer.lr 0.010536885649888075 +45 74 negative_sampler.num_negs_per_pos 40.0 +45 74 training.batch_size 2.0 +45 75 model.embedding_dim 2.0 +45 75 optimizer.lr 0.01475163273831464 +45 75 negative_sampler.num_negs_per_pos 58.0 +45 75 training.batch_size 1.0 +45 76 model.embedding_dim 2.0 +45 76 optimizer.lr 0.018760006015543836 +45 76 negative_sampler.num_negs_per_pos 23.0 +45 76 training.batch_size 1.0 +45 77 model.embedding_dim 2.0 +45 77 optimizer.lr 0.04440104858558693 +45 77 negative_sampler.num_negs_per_pos 70.0 +45 77 training.batch_size 1.0 +45 78 model.embedding_dim 0.0 +45 78 optimizer.lr 0.076069579904433 +45 78 negative_sampler.num_negs_per_pos 25.0 +45 78 training.batch_size 0.0 +45 79 model.embedding_dim 1.0 +45 79 optimizer.lr 0.009258912580584448 +45 79 negative_sampler.num_negs_per_pos 26.0 +45 79 training.batch_size 0.0 +45 80 model.embedding_dim 2.0 +45 80 optimizer.lr 0.0022705309291692222 +45 80 negative_sampler.num_negs_per_pos 84.0 +45 80 training.batch_size 2.0 +45 81 model.embedding_dim 2.0 +45 81 optimizer.lr 0.0062075745092211535 +45 81 negative_sampler.num_negs_per_pos 99.0 +45 81 training.batch_size 1.0 +45 82 model.embedding_dim 1.0 +45 82 optimizer.lr 0.0031909062225435117 +45 82 negative_sampler.num_negs_per_pos 40.0 +45 82 training.batch_size 2.0 +45 83 model.embedding_dim 2.0 +45 83 optimizer.lr 0.04224441457364133 +45 83 negative_sampler.num_negs_per_pos 10.0 +45 83 training.batch_size 1.0 +45 84 model.embedding_dim 2.0 +45 84 optimizer.lr 0.06880660946387528 +45 84 negative_sampler.num_negs_per_pos 95.0 +45 84 training.batch_size 1.0 +45 85 model.embedding_dim 0.0 +45 85 optimizer.lr 0.07311127225468768 +45 85 negative_sampler.num_negs_per_pos 26.0 +45 85 training.batch_size 0.0 +45 86 model.embedding_dim 0.0 +45 86 optimizer.lr 0.0897918675655805 +45 86 negative_sampler.num_negs_per_pos 7.0 +45 86 training.batch_size 2.0 +45 87 model.embedding_dim 0.0 +45 87 optimizer.lr 0.0056312901857256525 +45 87 negative_sampler.num_negs_per_pos 90.0 +45 87 training.batch_size 1.0 +45 88 model.embedding_dim 0.0 +45 88 optimizer.lr 0.007659454460277928 +45 88 negative_sampler.num_negs_per_pos 83.0 +45 88 training.batch_size 2.0 +45 89 model.embedding_dim 1.0 +45 89 optimizer.lr 0.03636884850715032 +45 89 negative_sampler.num_negs_per_pos 86.0 +45 89 training.batch_size 0.0 +45 90 model.embedding_dim 0.0 +45 90 optimizer.lr 0.009458197445213267 +45 90 negative_sampler.num_negs_per_pos 56.0 +45 90 training.batch_size 1.0 +45 91 model.embedding_dim 2.0 +45 91 optimizer.lr 0.0016191394658758085 +45 91 negative_sampler.num_negs_per_pos 9.0 +45 91 training.batch_size 1.0 +45 92 model.embedding_dim 2.0 +45 92 optimizer.lr 0.0187939174962929 +45 92 negative_sampler.num_negs_per_pos 55.0 +45 92 training.batch_size 0.0 +45 93 model.embedding_dim 0.0 +45 93 optimizer.lr 0.08034446816800778 +45 93 negative_sampler.num_negs_per_pos 6.0 +45 93 training.batch_size 1.0 +45 94 model.embedding_dim 2.0 +45 94 optimizer.lr 0.08930956787880284 +45 94 negative_sampler.num_negs_per_pos 50.0 +45 94 training.batch_size 2.0 +45 95 model.embedding_dim 0.0 +45 95 optimizer.lr 0.021602858858666012 +45 95 negative_sampler.num_negs_per_pos 29.0 +45 95 training.batch_size 0.0 +45 96 model.embedding_dim 2.0 +45 96 optimizer.lr 0.0017700986924562654 +45 96 negative_sampler.num_negs_per_pos 85.0 +45 96 training.batch_size 2.0 +45 97 model.embedding_dim 0.0 +45 97 optimizer.lr 0.060322760606917575 +45 97 negative_sampler.num_negs_per_pos 30.0 +45 97 training.batch_size 2.0 +45 98 model.embedding_dim 1.0 +45 98 optimizer.lr 0.07148883449387522 +45 98 negative_sampler.num_negs_per_pos 62.0 +45 98 training.batch_size 2.0 +45 99 model.embedding_dim 0.0 +45 99 optimizer.lr 0.022145301559033978 +45 99 negative_sampler.num_negs_per_pos 1.0 +45 99 training.batch_size 1.0 +45 100 model.embedding_dim 0.0 +45 100 optimizer.lr 0.03279443045929809 +45 100 negative_sampler.num_negs_per_pos 92.0 +45 100 training.batch_size 1.0 +45 1 dataset """wn18rr""" +45 1 model """complex""" +45 1 loss """softplus""" +45 1 regularizer """no""" +45 1 optimizer """adam""" +45 1 training_loop """owa""" +45 1 negative_sampler """basic""" +45 1 evaluator """rankbased""" +45 2 dataset """wn18rr""" +45 2 model """complex""" +45 2 loss """softplus""" +45 2 regularizer """no""" +45 2 optimizer """adam""" +45 2 training_loop """owa""" +45 2 negative_sampler """basic""" +45 2 evaluator """rankbased""" +45 3 dataset """wn18rr""" +45 3 model """complex""" +45 3 loss """softplus""" +45 3 regularizer """no""" +45 3 optimizer """adam""" +45 3 training_loop """owa""" +45 3 negative_sampler """basic""" +45 3 evaluator """rankbased""" +45 4 dataset """wn18rr""" +45 4 model """complex""" +45 4 loss """softplus""" +45 4 regularizer """no""" +45 4 optimizer """adam""" +45 4 training_loop """owa""" +45 4 negative_sampler """basic""" +45 4 evaluator """rankbased""" +45 5 dataset """wn18rr""" +45 5 model """complex""" +45 5 loss """softplus""" +45 5 regularizer """no""" +45 5 optimizer """adam""" +45 5 training_loop """owa""" +45 5 negative_sampler """basic""" +45 5 evaluator """rankbased""" +45 6 dataset """wn18rr""" +45 6 model """complex""" +45 6 loss """softplus""" +45 6 regularizer """no""" +45 6 optimizer """adam""" +45 6 training_loop """owa""" +45 6 negative_sampler """basic""" +45 6 evaluator """rankbased""" +45 7 dataset """wn18rr""" +45 7 model """complex""" +45 7 loss """softplus""" +45 7 regularizer """no""" +45 7 optimizer """adam""" +45 7 training_loop """owa""" +45 7 negative_sampler """basic""" +45 7 evaluator """rankbased""" +45 8 dataset """wn18rr""" +45 8 model """complex""" +45 8 loss """softplus""" +45 8 regularizer """no""" +45 8 optimizer """adam""" +45 8 training_loop """owa""" +45 8 negative_sampler """basic""" +45 8 evaluator """rankbased""" +45 9 dataset """wn18rr""" +45 9 model """complex""" +45 9 loss """softplus""" +45 9 regularizer """no""" +45 9 optimizer """adam""" +45 9 training_loop """owa""" +45 9 negative_sampler """basic""" +45 9 evaluator """rankbased""" +45 10 dataset """wn18rr""" +45 10 model """complex""" +45 10 loss """softplus""" +45 10 regularizer """no""" +45 10 optimizer """adam""" +45 10 training_loop """owa""" +45 10 negative_sampler """basic""" +45 10 evaluator """rankbased""" +45 11 dataset """wn18rr""" +45 11 model """complex""" +45 11 loss """softplus""" +45 11 regularizer """no""" +45 11 optimizer """adam""" +45 11 training_loop """owa""" +45 11 negative_sampler """basic""" +45 11 evaluator """rankbased""" +45 12 dataset """wn18rr""" +45 12 model """complex""" +45 12 loss """softplus""" +45 12 regularizer """no""" +45 12 optimizer """adam""" +45 12 training_loop """owa""" +45 12 negative_sampler """basic""" +45 12 evaluator """rankbased""" +45 13 dataset """wn18rr""" +45 13 model """complex""" +45 13 loss """softplus""" +45 13 regularizer """no""" +45 13 optimizer """adam""" +45 13 training_loop """owa""" +45 13 negative_sampler """basic""" +45 13 evaluator """rankbased""" +45 14 dataset """wn18rr""" +45 14 model """complex""" +45 14 loss """softplus""" +45 14 regularizer """no""" +45 14 optimizer """adam""" +45 14 training_loop """owa""" +45 14 negative_sampler """basic""" +45 14 evaluator """rankbased""" +45 15 dataset """wn18rr""" +45 15 model """complex""" +45 15 loss """softplus""" +45 15 regularizer """no""" +45 15 optimizer """adam""" +45 15 training_loop """owa""" +45 15 negative_sampler """basic""" +45 15 evaluator """rankbased""" +45 16 dataset """wn18rr""" +45 16 model """complex""" +45 16 loss """softplus""" +45 16 regularizer """no""" +45 16 optimizer """adam""" +45 16 training_loop """owa""" +45 16 negative_sampler """basic""" +45 16 evaluator """rankbased""" +45 17 dataset """wn18rr""" +45 17 model """complex""" +45 17 loss """softplus""" +45 17 regularizer """no""" +45 17 optimizer """adam""" +45 17 training_loop """owa""" +45 17 negative_sampler """basic""" +45 17 evaluator """rankbased""" +45 18 dataset """wn18rr""" +45 18 model """complex""" +45 18 loss """softplus""" +45 18 regularizer """no""" +45 18 optimizer """adam""" +45 18 training_loop """owa""" +45 18 negative_sampler """basic""" +45 18 evaluator """rankbased""" +45 19 dataset """wn18rr""" +45 19 model """complex""" +45 19 loss """softplus""" +45 19 regularizer """no""" +45 19 optimizer """adam""" +45 19 training_loop """owa""" +45 19 negative_sampler """basic""" +45 19 evaluator """rankbased""" +45 20 dataset """wn18rr""" +45 20 model """complex""" +45 20 loss """softplus""" +45 20 regularizer """no""" +45 20 optimizer """adam""" +45 20 training_loop """owa""" +45 20 negative_sampler """basic""" +45 20 evaluator """rankbased""" +45 21 dataset """wn18rr""" +45 21 model """complex""" +45 21 loss """softplus""" +45 21 regularizer """no""" +45 21 optimizer """adam""" +45 21 training_loop """owa""" +45 21 negative_sampler """basic""" +45 21 evaluator """rankbased""" +45 22 dataset """wn18rr""" +45 22 model """complex""" +45 22 loss """softplus""" +45 22 regularizer """no""" +45 22 optimizer """adam""" +45 22 training_loop """owa""" +45 22 negative_sampler """basic""" +45 22 evaluator """rankbased""" +45 23 dataset """wn18rr""" +45 23 model """complex""" +45 23 loss """softplus""" +45 23 regularizer """no""" +45 23 optimizer """adam""" +45 23 training_loop """owa""" +45 23 negative_sampler """basic""" +45 23 evaluator """rankbased""" +45 24 dataset """wn18rr""" +45 24 model """complex""" +45 24 loss """softplus""" +45 24 regularizer """no""" +45 24 optimizer """adam""" +45 24 training_loop """owa""" +45 24 negative_sampler """basic""" +45 24 evaluator """rankbased""" +45 25 dataset """wn18rr""" +45 25 model """complex""" +45 25 loss """softplus""" +45 25 regularizer """no""" +45 25 optimizer """adam""" +45 25 training_loop """owa""" +45 25 negative_sampler """basic""" +45 25 evaluator """rankbased""" +45 26 dataset """wn18rr""" +45 26 model """complex""" +45 26 loss """softplus""" +45 26 regularizer """no""" +45 26 optimizer """adam""" +45 26 training_loop """owa""" +45 26 negative_sampler """basic""" +45 26 evaluator """rankbased""" +45 27 dataset """wn18rr""" +45 27 model """complex""" +45 27 loss """softplus""" +45 27 regularizer """no""" +45 27 optimizer """adam""" +45 27 training_loop """owa""" +45 27 negative_sampler """basic""" +45 27 evaluator """rankbased""" +45 28 dataset """wn18rr""" +45 28 model """complex""" +45 28 loss """softplus""" +45 28 regularizer """no""" +45 28 optimizer """adam""" +45 28 training_loop """owa""" +45 28 negative_sampler """basic""" +45 28 evaluator """rankbased""" +45 29 dataset """wn18rr""" +45 29 model """complex""" +45 29 loss """softplus""" +45 29 regularizer """no""" +45 29 optimizer """adam""" +45 29 training_loop """owa""" +45 29 negative_sampler """basic""" +45 29 evaluator """rankbased""" +45 30 dataset """wn18rr""" +45 30 model """complex""" +45 30 loss """softplus""" +45 30 regularizer """no""" +45 30 optimizer """adam""" +45 30 training_loop """owa""" +45 30 negative_sampler """basic""" +45 30 evaluator """rankbased""" +45 31 dataset """wn18rr""" +45 31 model """complex""" +45 31 loss """softplus""" +45 31 regularizer """no""" +45 31 optimizer """adam""" +45 31 training_loop """owa""" +45 31 negative_sampler """basic""" +45 31 evaluator """rankbased""" +45 32 dataset """wn18rr""" +45 32 model """complex""" +45 32 loss """softplus""" +45 32 regularizer """no""" +45 32 optimizer """adam""" +45 32 training_loop """owa""" +45 32 negative_sampler """basic""" +45 32 evaluator """rankbased""" +45 33 dataset """wn18rr""" +45 33 model """complex""" +45 33 loss """softplus""" +45 33 regularizer """no""" +45 33 optimizer """adam""" +45 33 training_loop """owa""" +45 33 negative_sampler """basic""" +45 33 evaluator """rankbased""" +45 34 dataset """wn18rr""" +45 34 model """complex""" +45 34 loss """softplus""" +45 34 regularizer """no""" +45 34 optimizer """adam""" +45 34 training_loop """owa""" +45 34 negative_sampler """basic""" +45 34 evaluator """rankbased""" +45 35 dataset """wn18rr""" +45 35 model """complex""" +45 35 loss """softplus""" +45 35 regularizer """no""" +45 35 optimizer """adam""" +45 35 training_loop """owa""" +45 35 negative_sampler """basic""" +45 35 evaluator """rankbased""" +45 36 dataset """wn18rr""" +45 36 model """complex""" +45 36 loss """softplus""" +45 36 regularizer """no""" +45 36 optimizer """adam""" +45 36 training_loop """owa""" +45 36 negative_sampler """basic""" +45 36 evaluator """rankbased""" +45 37 dataset """wn18rr""" +45 37 model """complex""" +45 37 loss """softplus""" +45 37 regularizer """no""" +45 37 optimizer """adam""" +45 37 training_loop """owa""" +45 37 negative_sampler """basic""" +45 37 evaluator """rankbased""" +45 38 dataset """wn18rr""" +45 38 model """complex""" +45 38 loss """softplus""" +45 38 regularizer """no""" +45 38 optimizer """adam""" +45 38 training_loop """owa""" +45 38 negative_sampler """basic""" +45 38 evaluator """rankbased""" +45 39 dataset """wn18rr""" +45 39 model """complex""" +45 39 loss """softplus""" +45 39 regularizer """no""" +45 39 optimizer """adam""" +45 39 training_loop """owa""" +45 39 negative_sampler """basic""" +45 39 evaluator """rankbased""" +45 40 dataset """wn18rr""" +45 40 model """complex""" +45 40 loss """softplus""" +45 40 regularizer """no""" +45 40 optimizer """adam""" +45 40 training_loop """owa""" +45 40 negative_sampler """basic""" +45 40 evaluator """rankbased""" +45 41 dataset """wn18rr""" +45 41 model """complex""" +45 41 loss """softplus""" +45 41 regularizer """no""" +45 41 optimizer """adam""" +45 41 training_loop """owa""" +45 41 negative_sampler """basic""" +45 41 evaluator """rankbased""" +45 42 dataset """wn18rr""" +45 42 model """complex""" +45 42 loss """softplus""" +45 42 regularizer """no""" +45 42 optimizer """adam""" +45 42 training_loop """owa""" +45 42 negative_sampler """basic""" +45 42 evaluator """rankbased""" +45 43 dataset """wn18rr""" +45 43 model """complex""" +45 43 loss """softplus""" +45 43 regularizer """no""" +45 43 optimizer """adam""" +45 43 training_loop """owa""" +45 43 negative_sampler """basic""" +45 43 evaluator """rankbased""" +45 44 dataset """wn18rr""" +45 44 model """complex""" +45 44 loss """softplus""" +45 44 regularizer """no""" +45 44 optimizer """adam""" +45 44 training_loop """owa""" +45 44 negative_sampler """basic""" +45 44 evaluator """rankbased""" +45 45 dataset """wn18rr""" +45 45 model """complex""" +45 45 loss """softplus""" +45 45 regularizer """no""" +45 45 optimizer """adam""" +45 45 training_loop """owa""" +45 45 negative_sampler """basic""" +45 45 evaluator """rankbased""" +45 46 dataset """wn18rr""" +45 46 model """complex""" +45 46 loss """softplus""" +45 46 regularizer """no""" +45 46 optimizer """adam""" +45 46 training_loop """owa""" +45 46 negative_sampler """basic""" +45 46 evaluator """rankbased""" +45 47 dataset """wn18rr""" +45 47 model """complex""" +45 47 loss """softplus""" +45 47 regularizer """no""" +45 47 optimizer """adam""" +45 47 training_loop """owa""" +45 47 negative_sampler """basic""" +45 47 evaluator """rankbased""" +45 48 dataset """wn18rr""" +45 48 model """complex""" +45 48 loss """softplus""" +45 48 regularizer """no""" +45 48 optimizer """adam""" +45 48 training_loop """owa""" +45 48 negative_sampler """basic""" +45 48 evaluator """rankbased""" +45 49 dataset """wn18rr""" +45 49 model """complex""" +45 49 loss """softplus""" +45 49 regularizer """no""" +45 49 optimizer """adam""" +45 49 training_loop """owa""" +45 49 negative_sampler """basic""" +45 49 evaluator """rankbased""" +45 50 dataset """wn18rr""" +45 50 model """complex""" +45 50 loss """softplus""" +45 50 regularizer """no""" +45 50 optimizer """adam""" +45 50 training_loop """owa""" +45 50 negative_sampler """basic""" +45 50 evaluator """rankbased""" +45 51 dataset """wn18rr""" +45 51 model """complex""" +45 51 loss """softplus""" +45 51 regularizer """no""" +45 51 optimizer """adam""" +45 51 training_loop """owa""" +45 51 negative_sampler """basic""" +45 51 evaluator """rankbased""" +45 52 dataset """wn18rr""" +45 52 model """complex""" +45 52 loss """softplus""" +45 52 regularizer """no""" +45 52 optimizer """adam""" +45 52 training_loop """owa""" +45 52 negative_sampler """basic""" +45 52 evaluator """rankbased""" +45 53 dataset """wn18rr""" +45 53 model """complex""" +45 53 loss """softplus""" +45 53 regularizer """no""" +45 53 optimizer """adam""" +45 53 training_loop """owa""" +45 53 negative_sampler """basic""" +45 53 evaluator """rankbased""" +45 54 dataset """wn18rr""" +45 54 model """complex""" +45 54 loss """softplus""" +45 54 regularizer """no""" +45 54 optimizer """adam""" +45 54 training_loop """owa""" +45 54 negative_sampler """basic""" +45 54 evaluator """rankbased""" +45 55 dataset """wn18rr""" +45 55 model """complex""" +45 55 loss """softplus""" +45 55 regularizer """no""" +45 55 optimizer """adam""" +45 55 training_loop """owa""" +45 55 negative_sampler """basic""" +45 55 evaluator """rankbased""" +45 56 dataset """wn18rr""" +45 56 model """complex""" +45 56 loss """softplus""" +45 56 regularizer """no""" +45 56 optimizer """adam""" +45 56 training_loop """owa""" +45 56 negative_sampler """basic""" +45 56 evaluator """rankbased""" +45 57 dataset """wn18rr""" +45 57 model """complex""" +45 57 loss """softplus""" +45 57 regularizer """no""" +45 57 optimizer """adam""" +45 57 training_loop """owa""" +45 57 negative_sampler """basic""" +45 57 evaluator """rankbased""" +45 58 dataset """wn18rr""" +45 58 model """complex""" +45 58 loss """softplus""" +45 58 regularizer """no""" +45 58 optimizer """adam""" +45 58 training_loop """owa""" +45 58 negative_sampler """basic""" +45 58 evaluator """rankbased""" +45 59 dataset """wn18rr""" +45 59 model """complex""" +45 59 loss """softplus""" +45 59 regularizer """no""" +45 59 optimizer """adam""" +45 59 training_loop """owa""" +45 59 negative_sampler """basic""" +45 59 evaluator """rankbased""" +45 60 dataset """wn18rr""" +45 60 model """complex""" +45 60 loss """softplus""" +45 60 regularizer """no""" +45 60 optimizer """adam""" +45 60 training_loop """owa""" +45 60 negative_sampler """basic""" +45 60 evaluator """rankbased""" +45 61 dataset """wn18rr""" +45 61 model """complex""" +45 61 loss """softplus""" +45 61 regularizer """no""" +45 61 optimizer """adam""" +45 61 training_loop """owa""" +45 61 negative_sampler """basic""" +45 61 evaluator """rankbased""" +45 62 dataset """wn18rr""" +45 62 model """complex""" +45 62 loss """softplus""" +45 62 regularizer """no""" +45 62 optimizer """adam""" +45 62 training_loop """owa""" +45 62 negative_sampler """basic""" +45 62 evaluator """rankbased""" +45 63 dataset """wn18rr""" +45 63 model """complex""" +45 63 loss """softplus""" +45 63 regularizer """no""" +45 63 optimizer """adam""" +45 63 training_loop """owa""" +45 63 negative_sampler """basic""" +45 63 evaluator """rankbased""" +45 64 dataset """wn18rr""" +45 64 model """complex""" +45 64 loss """softplus""" +45 64 regularizer """no""" +45 64 optimizer """adam""" +45 64 training_loop """owa""" +45 64 negative_sampler """basic""" +45 64 evaluator """rankbased""" +45 65 dataset """wn18rr""" +45 65 model """complex""" +45 65 loss """softplus""" +45 65 regularizer """no""" +45 65 optimizer """adam""" +45 65 training_loop """owa""" +45 65 negative_sampler """basic""" +45 65 evaluator """rankbased""" +45 66 dataset """wn18rr""" +45 66 model """complex""" +45 66 loss """softplus""" +45 66 regularizer """no""" +45 66 optimizer """adam""" +45 66 training_loop """owa""" +45 66 negative_sampler """basic""" +45 66 evaluator """rankbased""" +45 67 dataset """wn18rr""" +45 67 model """complex""" +45 67 loss """softplus""" +45 67 regularizer """no""" +45 67 optimizer """adam""" +45 67 training_loop """owa""" +45 67 negative_sampler """basic""" +45 67 evaluator """rankbased""" +45 68 dataset """wn18rr""" +45 68 model """complex""" +45 68 loss """softplus""" +45 68 regularizer """no""" +45 68 optimizer """adam""" +45 68 training_loop """owa""" +45 68 negative_sampler """basic""" +45 68 evaluator """rankbased""" +45 69 dataset """wn18rr""" +45 69 model """complex""" +45 69 loss """softplus""" +45 69 regularizer """no""" +45 69 optimizer """adam""" +45 69 training_loop """owa""" +45 69 negative_sampler """basic""" +45 69 evaluator """rankbased""" +45 70 dataset """wn18rr""" +45 70 model """complex""" +45 70 loss """softplus""" +45 70 regularizer """no""" +45 70 optimizer """adam""" +45 70 training_loop """owa""" +45 70 negative_sampler """basic""" +45 70 evaluator """rankbased""" +45 71 dataset """wn18rr""" +45 71 model """complex""" +45 71 loss """softplus""" +45 71 regularizer """no""" +45 71 optimizer """adam""" +45 71 training_loop """owa""" +45 71 negative_sampler """basic""" +45 71 evaluator """rankbased""" +45 72 dataset """wn18rr""" +45 72 model """complex""" +45 72 loss """softplus""" +45 72 regularizer """no""" +45 72 optimizer """adam""" +45 72 training_loop """owa""" +45 72 negative_sampler """basic""" +45 72 evaluator """rankbased""" +45 73 dataset """wn18rr""" +45 73 model """complex""" +45 73 loss """softplus""" +45 73 regularizer """no""" +45 73 optimizer """adam""" +45 73 training_loop """owa""" +45 73 negative_sampler """basic""" +45 73 evaluator """rankbased""" +45 74 dataset """wn18rr""" +45 74 model """complex""" +45 74 loss """softplus""" +45 74 regularizer """no""" +45 74 optimizer """adam""" +45 74 training_loop """owa""" +45 74 negative_sampler """basic""" +45 74 evaluator """rankbased""" +45 75 dataset """wn18rr""" +45 75 model """complex""" +45 75 loss """softplus""" +45 75 regularizer """no""" +45 75 optimizer """adam""" +45 75 training_loop """owa""" +45 75 negative_sampler """basic""" +45 75 evaluator """rankbased""" +45 76 dataset """wn18rr""" +45 76 model """complex""" +45 76 loss """softplus""" +45 76 regularizer """no""" +45 76 optimizer """adam""" +45 76 training_loop """owa""" +45 76 negative_sampler """basic""" +45 76 evaluator """rankbased""" +45 77 dataset """wn18rr""" +45 77 model """complex""" +45 77 loss """softplus""" +45 77 regularizer """no""" +45 77 optimizer """adam""" +45 77 training_loop """owa""" +45 77 negative_sampler """basic""" +45 77 evaluator """rankbased""" +45 78 dataset """wn18rr""" +45 78 model """complex""" +45 78 loss """softplus""" +45 78 regularizer """no""" +45 78 optimizer """adam""" +45 78 training_loop """owa""" +45 78 negative_sampler """basic""" +45 78 evaluator """rankbased""" +45 79 dataset """wn18rr""" +45 79 model """complex""" +45 79 loss """softplus""" +45 79 regularizer """no""" +45 79 optimizer """adam""" +45 79 training_loop """owa""" +45 79 negative_sampler """basic""" +45 79 evaluator """rankbased""" +45 80 dataset """wn18rr""" +45 80 model """complex""" +45 80 loss """softplus""" +45 80 regularizer """no""" +45 80 optimizer """adam""" +45 80 training_loop """owa""" +45 80 negative_sampler """basic""" +45 80 evaluator """rankbased""" +45 81 dataset """wn18rr""" +45 81 model """complex""" +45 81 loss """softplus""" +45 81 regularizer """no""" +45 81 optimizer """adam""" +45 81 training_loop """owa""" +45 81 negative_sampler """basic""" +45 81 evaluator """rankbased""" +45 82 dataset """wn18rr""" +45 82 model """complex""" +45 82 loss """softplus""" +45 82 regularizer """no""" +45 82 optimizer """adam""" +45 82 training_loop """owa""" +45 82 negative_sampler """basic""" +45 82 evaluator """rankbased""" +45 83 dataset """wn18rr""" +45 83 model """complex""" +45 83 loss """softplus""" +45 83 regularizer """no""" +45 83 optimizer """adam""" +45 83 training_loop """owa""" +45 83 negative_sampler """basic""" +45 83 evaluator """rankbased""" +45 84 dataset """wn18rr""" +45 84 model """complex""" +45 84 loss """softplus""" +45 84 regularizer """no""" +45 84 optimizer """adam""" +45 84 training_loop """owa""" +45 84 negative_sampler """basic""" +45 84 evaluator """rankbased""" +45 85 dataset """wn18rr""" +45 85 model """complex""" +45 85 loss """softplus""" +45 85 regularizer """no""" +45 85 optimizer """adam""" +45 85 training_loop """owa""" +45 85 negative_sampler """basic""" +45 85 evaluator """rankbased""" +45 86 dataset """wn18rr""" +45 86 model """complex""" +45 86 loss """softplus""" +45 86 regularizer """no""" +45 86 optimizer """adam""" +45 86 training_loop """owa""" +45 86 negative_sampler """basic""" +45 86 evaluator """rankbased""" +45 87 dataset """wn18rr""" +45 87 model """complex""" +45 87 loss """softplus""" +45 87 regularizer """no""" +45 87 optimizer """adam""" +45 87 training_loop """owa""" +45 87 negative_sampler """basic""" +45 87 evaluator """rankbased""" +45 88 dataset """wn18rr""" +45 88 model """complex""" +45 88 loss """softplus""" +45 88 regularizer """no""" +45 88 optimizer """adam""" +45 88 training_loop """owa""" +45 88 negative_sampler """basic""" +45 88 evaluator """rankbased""" +45 89 dataset """wn18rr""" +45 89 model """complex""" +45 89 loss """softplus""" +45 89 regularizer """no""" +45 89 optimizer """adam""" +45 89 training_loop """owa""" +45 89 negative_sampler """basic""" +45 89 evaluator """rankbased""" +45 90 dataset """wn18rr""" +45 90 model """complex""" +45 90 loss """softplus""" +45 90 regularizer """no""" +45 90 optimizer """adam""" +45 90 training_loop """owa""" +45 90 negative_sampler """basic""" +45 90 evaluator """rankbased""" +45 91 dataset """wn18rr""" +45 91 model """complex""" +45 91 loss """softplus""" +45 91 regularizer """no""" +45 91 optimizer """adam""" +45 91 training_loop """owa""" +45 91 negative_sampler """basic""" +45 91 evaluator """rankbased""" +45 92 dataset """wn18rr""" +45 92 model """complex""" +45 92 loss """softplus""" +45 92 regularizer """no""" +45 92 optimizer """adam""" +45 92 training_loop """owa""" +45 92 negative_sampler """basic""" +45 92 evaluator """rankbased""" +45 93 dataset """wn18rr""" +45 93 model """complex""" +45 93 loss """softplus""" +45 93 regularizer """no""" +45 93 optimizer """adam""" +45 93 training_loop """owa""" +45 93 negative_sampler """basic""" +45 93 evaluator """rankbased""" +45 94 dataset """wn18rr""" +45 94 model """complex""" +45 94 loss """softplus""" +45 94 regularizer """no""" +45 94 optimizer """adam""" +45 94 training_loop """owa""" +45 94 negative_sampler """basic""" +45 94 evaluator """rankbased""" +45 95 dataset """wn18rr""" +45 95 model """complex""" +45 95 loss """softplus""" +45 95 regularizer """no""" +45 95 optimizer """adam""" +45 95 training_loop """owa""" +45 95 negative_sampler """basic""" +45 95 evaluator """rankbased""" +45 96 dataset """wn18rr""" +45 96 model """complex""" +45 96 loss """softplus""" +45 96 regularizer """no""" +45 96 optimizer """adam""" +45 96 training_loop """owa""" +45 96 negative_sampler """basic""" +45 96 evaluator """rankbased""" +45 97 dataset """wn18rr""" +45 97 model """complex""" +45 97 loss """softplus""" +45 97 regularizer """no""" +45 97 optimizer """adam""" +45 97 training_loop """owa""" +45 97 negative_sampler """basic""" +45 97 evaluator """rankbased""" +45 98 dataset """wn18rr""" +45 98 model """complex""" +45 98 loss """softplus""" +45 98 regularizer """no""" +45 98 optimizer """adam""" +45 98 training_loop """owa""" +45 98 negative_sampler """basic""" +45 98 evaluator """rankbased""" +45 99 dataset """wn18rr""" +45 99 model """complex""" +45 99 loss """softplus""" +45 99 regularizer """no""" +45 99 optimizer """adam""" +45 99 training_loop """owa""" +45 99 negative_sampler """basic""" +45 99 evaluator """rankbased""" +45 100 dataset """wn18rr""" +45 100 model """complex""" +45 100 loss """softplus""" +45 100 regularizer """no""" +45 100 optimizer """adam""" +45 100 training_loop """owa""" +45 100 negative_sampler """basic""" +45 100 evaluator """rankbased""" +46 1 model.embedding_dim 0.0 +46 1 loss.margin 8.107023344824238 +46 1 optimizer.lr 0.0033533670277657423 +46 1 negative_sampler.num_negs_per_pos 43.0 +46 1 training.batch_size 0.0 +46 2 model.embedding_dim 2.0 +46 2 loss.margin 6.312634470992782 +46 2 optimizer.lr 0.007156685775603601 +46 2 negative_sampler.num_negs_per_pos 84.0 +46 2 training.batch_size 0.0 +46 3 model.embedding_dim 0.0 +46 3 loss.margin 5.648668101646599 +46 3 optimizer.lr 0.05133314337517455 +46 3 negative_sampler.num_negs_per_pos 50.0 +46 3 training.batch_size 0.0 +46 4 model.embedding_dim 2.0 +46 4 loss.margin 5.4978129339304935 +46 4 optimizer.lr 0.009053507593924169 +46 4 negative_sampler.num_negs_per_pos 31.0 +46 4 training.batch_size 0.0 +46 5 model.embedding_dim 2.0 +46 5 loss.margin 1.057044150336068 +46 5 optimizer.lr 0.02736623445619993 +46 5 negative_sampler.num_negs_per_pos 67.0 +46 5 training.batch_size 0.0 +46 6 model.embedding_dim 2.0 +46 6 loss.margin 7.4347418532557406 +46 6 optimizer.lr 0.09264104663752207 +46 6 negative_sampler.num_negs_per_pos 68.0 +46 6 training.batch_size 0.0 +46 7 model.embedding_dim 1.0 +46 7 loss.margin 1.3851187095518753 +46 7 optimizer.lr 0.0049421735072683395 +46 7 negative_sampler.num_negs_per_pos 47.0 +46 7 training.batch_size 1.0 +46 8 model.embedding_dim 0.0 +46 8 loss.margin 5.086507677865155 +46 8 optimizer.lr 0.0020896137611215223 +46 8 negative_sampler.num_negs_per_pos 48.0 +46 8 training.batch_size 1.0 +46 9 model.embedding_dim 2.0 +46 9 loss.margin 9.011016410334655 +46 9 optimizer.lr 0.02118575534504117 +46 9 negative_sampler.num_negs_per_pos 37.0 +46 9 training.batch_size 0.0 +46 10 model.embedding_dim 2.0 +46 10 loss.margin 7.658819628180479 +46 10 optimizer.lr 0.010535980546955167 +46 10 negative_sampler.num_negs_per_pos 93.0 +46 10 training.batch_size 1.0 +46 11 model.embedding_dim 2.0 +46 11 loss.margin 8.978863000164697 +46 11 optimizer.lr 0.0029894517061949386 +46 11 negative_sampler.num_negs_per_pos 0.0 +46 11 training.batch_size 1.0 +46 12 model.embedding_dim 0.0 +46 12 loss.margin 6.125109117254678 +46 12 optimizer.lr 0.03494418294706735 +46 12 negative_sampler.num_negs_per_pos 15.0 +46 12 training.batch_size 0.0 +46 13 model.embedding_dim 1.0 +46 13 loss.margin 5.9494416827381 +46 13 optimizer.lr 0.09132056036410002 +46 13 negative_sampler.num_negs_per_pos 92.0 +46 13 training.batch_size 0.0 +46 14 model.embedding_dim 0.0 +46 14 loss.margin 5.549236341048213 +46 14 optimizer.lr 0.030396434212184617 +46 14 negative_sampler.num_negs_per_pos 59.0 +46 14 training.batch_size 2.0 +46 15 model.embedding_dim 1.0 +46 15 loss.margin 8.721068253556965 +46 15 optimizer.lr 0.06871339518907979 +46 15 negative_sampler.num_negs_per_pos 43.0 +46 15 training.batch_size 1.0 +46 16 model.embedding_dim 2.0 +46 16 loss.margin 5.065969617218462 +46 16 optimizer.lr 0.004304247030243985 +46 16 negative_sampler.num_negs_per_pos 90.0 +46 16 training.batch_size 2.0 +46 17 model.embedding_dim 0.0 +46 17 loss.margin 5.157576241473838 +46 17 optimizer.lr 0.05384053430453872 +46 17 negative_sampler.num_negs_per_pos 26.0 +46 17 training.batch_size 0.0 +46 18 model.embedding_dim 2.0 +46 18 loss.margin 0.8086579502463037 +46 18 optimizer.lr 0.016969785299280126 +46 18 negative_sampler.num_negs_per_pos 46.0 +46 18 training.batch_size 2.0 +46 19 model.embedding_dim 0.0 +46 19 loss.margin 9.902481826445607 +46 19 optimizer.lr 0.0014513097126146294 +46 19 negative_sampler.num_negs_per_pos 33.0 +46 19 training.batch_size 2.0 +46 20 model.embedding_dim 0.0 +46 20 loss.margin 5.263319883042593 +46 20 optimizer.lr 0.003927175297512107 +46 20 negative_sampler.num_negs_per_pos 83.0 +46 20 training.batch_size 2.0 +46 21 model.embedding_dim 1.0 +46 21 loss.margin 9.934491345770347 +46 21 optimizer.lr 0.005749837299496608 +46 21 negative_sampler.num_negs_per_pos 52.0 +46 21 training.batch_size 2.0 +46 22 model.embedding_dim 1.0 +46 22 loss.margin 6.559923645064033 +46 22 optimizer.lr 0.0940426575530539 +46 22 negative_sampler.num_negs_per_pos 78.0 +46 22 training.batch_size 0.0 +46 23 model.embedding_dim 2.0 +46 23 loss.margin 1.7147352328348282 +46 23 optimizer.lr 0.011728115366639194 +46 23 negative_sampler.num_negs_per_pos 69.0 +46 23 training.batch_size 1.0 +46 24 model.embedding_dim 0.0 +46 24 loss.margin 2.9600191803998923 +46 24 optimizer.lr 0.005190300132733648 +46 24 negative_sampler.num_negs_per_pos 76.0 +46 24 training.batch_size 2.0 +46 25 model.embedding_dim 0.0 +46 25 loss.margin 8.28902429328208 +46 25 optimizer.lr 0.03367537184592484 +46 25 negative_sampler.num_negs_per_pos 44.0 +46 25 training.batch_size 1.0 +46 26 model.embedding_dim 2.0 +46 26 loss.margin 1.4885425077325782 +46 26 optimizer.lr 0.019974659123116766 +46 26 negative_sampler.num_negs_per_pos 9.0 +46 26 training.batch_size 2.0 +46 27 model.embedding_dim 1.0 +46 27 loss.margin 0.5231615880880947 +46 27 optimizer.lr 0.007294362864241728 +46 27 negative_sampler.num_negs_per_pos 36.0 +46 27 training.batch_size 0.0 +46 28 model.embedding_dim 1.0 +46 28 loss.margin 9.275987446584404 +46 28 optimizer.lr 0.08908543315406205 +46 28 negative_sampler.num_negs_per_pos 48.0 +46 28 training.batch_size 1.0 +46 29 model.embedding_dim 0.0 +46 29 loss.margin 8.919736131892632 +46 29 optimizer.lr 0.040059885754003446 +46 29 negative_sampler.num_negs_per_pos 94.0 +46 29 training.batch_size 1.0 +46 30 model.embedding_dim 1.0 +46 30 loss.margin 2.0335483592756267 +46 30 optimizer.lr 0.008781870978679066 +46 30 negative_sampler.num_negs_per_pos 87.0 +46 30 training.batch_size 0.0 +46 31 model.embedding_dim 0.0 +46 31 loss.margin 3.1945907557305078 +46 31 optimizer.lr 0.09161989527144758 +46 31 negative_sampler.num_negs_per_pos 42.0 +46 31 training.batch_size 1.0 +46 32 model.embedding_dim 1.0 +46 32 loss.margin 5.471449185071789 +46 32 optimizer.lr 0.005088358335266608 +46 32 negative_sampler.num_negs_per_pos 46.0 +46 32 training.batch_size 1.0 +46 33 model.embedding_dim 2.0 +46 33 loss.margin 4.97739577539539 +46 33 optimizer.lr 0.06002738140423935 +46 33 negative_sampler.num_negs_per_pos 93.0 +46 33 training.batch_size 2.0 +46 34 model.embedding_dim 1.0 +46 34 loss.margin 9.739333726272392 +46 34 optimizer.lr 0.06520456707364011 +46 34 negative_sampler.num_negs_per_pos 40.0 +46 34 training.batch_size 2.0 +46 35 model.embedding_dim 0.0 +46 35 loss.margin 8.481557624377734 +46 35 optimizer.lr 0.0205679444865429 +46 35 negative_sampler.num_negs_per_pos 65.0 +46 35 training.batch_size 0.0 +46 36 model.embedding_dim 1.0 +46 36 loss.margin 3.1326503670506205 +46 36 optimizer.lr 0.0013368870760297159 +46 36 negative_sampler.num_negs_per_pos 37.0 +46 36 training.batch_size 0.0 +46 1 dataset """wn18rr""" +46 1 model """complex""" +46 1 loss """marginranking""" +46 1 regularizer """no""" +46 1 optimizer """adam""" +46 1 training_loop """owa""" +46 1 negative_sampler """basic""" +46 1 evaluator """rankbased""" +46 2 dataset """wn18rr""" +46 2 model """complex""" +46 2 loss """marginranking""" +46 2 regularizer """no""" +46 2 optimizer """adam""" +46 2 training_loop """owa""" +46 2 negative_sampler """basic""" +46 2 evaluator """rankbased""" +46 3 dataset """wn18rr""" +46 3 model """complex""" +46 3 loss """marginranking""" +46 3 regularizer """no""" +46 3 optimizer """adam""" +46 3 training_loop """owa""" +46 3 negative_sampler """basic""" +46 3 evaluator """rankbased""" +46 4 dataset """wn18rr""" +46 4 model """complex""" +46 4 loss """marginranking""" +46 4 regularizer """no""" +46 4 optimizer """adam""" +46 4 training_loop """owa""" +46 4 negative_sampler """basic""" +46 4 evaluator """rankbased""" +46 5 dataset """wn18rr""" +46 5 model """complex""" +46 5 loss """marginranking""" +46 5 regularizer """no""" +46 5 optimizer """adam""" +46 5 training_loop """owa""" +46 5 negative_sampler """basic""" +46 5 evaluator """rankbased""" +46 6 dataset """wn18rr""" +46 6 model """complex""" +46 6 loss """marginranking""" +46 6 regularizer """no""" +46 6 optimizer """adam""" +46 6 training_loop """owa""" +46 6 negative_sampler """basic""" +46 6 evaluator """rankbased""" +46 7 dataset """wn18rr""" +46 7 model """complex""" +46 7 loss """marginranking""" +46 7 regularizer """no""" +46 7 optimizer """adam""" +46 7 training_loop """owa""" +46 7 negative_sampler """basic""" +46 7 evaluator """rankbased""" +46 8 dataset """wn18rr""" +46 8 model """complex""" +46 8 loss """marginranking""" +46 8 regularizer """no""" +46 8 optimizer """adam""" +46 8 training_loop """owa""" +46 8 negative_sampler """basic""" +46 8 evaluator """rankbased""" +46 9 dataset """wn18rr""" +46 9 model """complex""" +46 9 loss """marginranking""" +46 9 regularizer """no""" +46 9 optimizer """adam""" +46 9 training_loop """owa""" +46 9 negative_sampler """basic""" +46 9 evaluator """rankbased""" +46 10 dataset """wn18rr""" +46 10 model """complex""" +46 10 loss """marginranking""" +46 10 regularizer """no""" +46 10 optimizer """adam""" +46 10 training_loop """owa""" +46 10 negative_sampler """basic""" +46 10 evaluator """rankbased""" +46 11 dataset """wn18rr""" +46 11 model """complex""" +46 11 loss """marginranking""" +46 11 regularizer """no""" +46 11 optimizer """adam""" +46 11 training_loop """owa""" +46 11 negative_sampler """basic""" +46 11 evaluator """rankbased""" +46 12 dataset """wn18rr""" +46 12 model """complex""" +46 12 loss """marginranking""" +46 12 regularizer """no""" +46 12 optimizer """adam""" +46 12 training_loop """owa""" +46 12 negative_sampler """basic""" +46 12 evaluator """rankbased""" +46 13 dataset """wn18rr""" +46 13 model """complex""" +46 13 loss """marginranking""" +46 13 regularizer """no""" +46 13 optimizer """adam""" +46 13 training_loop """owa""" +46 13 negative_sampler """basic""" +46 13 evaluator """rankbased""" +46 14 dataset """wn18rr""" +46 14 model """complex""" +46 14 loss """marginranking""" +46 14 regularizer """no""" +46 14 optimizer """adam""" +46 14 training_loop """owa""" +46 14 negative_sampler """basic""" +46 14 evaluator """rankbased""" +46 15 dataset """wn18rr""" +46 15 model """complex""" +46 15 loss """marginranking""" +46 15 regularizer """no""" +46 15 optimizer """adam""" +46 15 training_loop """owa""" +46 15 negative_sampler """basic""" +46 15 evaluator """rankbased""" +46 16 dataset """wn18rr""" +46 16 model """complex""" +46 16 loss """marginranking""" +46 16 regularizer """no""" +46 16 optimizer """adam""" +46 16 training_loop """owa""" +46 16 negative_sampler """basic""" +46 16 evaluator """rankbased""" +46 17 dataset """wn18rr""" +46 17 model """complex""" +46 17 loss """marginranking""" +46 17 regularizer """no""" +46 17 optimizer """adam""" +46 17 training_loop """owa""" +46 17 negative_sampler """basic""" +46 17 evaluator """rankbased""" +46 18 dataset """wn18rr""" +46 18 model """complex""" +46 18 loss """marginranking""" +46 18 regularizer """no""" +46 18 optimizer """adam""" +46 18 training_loop """owa""" +46 18 negative_sampler """basic""" +46 18 evaluator """rankbased""" +46 19 dataset """wn18rr""" +46 19 model """complex""" +46 19 loss """marginranking""" +46 19 regularizer """no""" +46 19 optimizer """adam""" +46 19 training_loop """owa""" +46 19 negative_sampler """basic""" +46 19 evaluator """rankbased""" +46 20 dataset """wn18rr""" +46 20 model """complex""" +46 20 loss """marginranking""" +46 20 regularizer """no""" +46 20 optimizer """adam""" +46 20 training_loop """owa""" +46 20 negative_sampler """basic""" +46 20 evaluator """rankbased""" +46 21 dataset """wn18rr""" +46 21 model """complex""" +46 21 loss """marginranking""" +46 21 regularizer """no""" +46 21 optimizer """adam""" +46 21 training_loop """owa""" +46 21 negative_sampler """basic""" +46 21 evaluator """rankbased""" +46 22 dataset """wn18rr""" +46 22 model """complex""" +46 22 loss """marginranking""" +46 22 regularizer """no""" +46 22 optimizer """adam""" +46 22 training_loop """owa""" +46 22 negative_sampler """basic""" +46 22 evaluator """rankbased""" +46 23 dataset """wn18rr""" +46 23 model """complex""" +46 23 loss """marginranking""" +46 23 regularizer """no""" +46 23 optimizer """adam""" +46 23 training_loop """owa""" +46 23 negative_sampler """basic""" +46 23 evaluator """rankbased""" +46 24 dataset """wn18rr""" +46 24 model """complex""" +46 24 loss """marginranking""" +46 24 regularizer """no""" +46 24 optimizer """adam""" +46 24 training_loop """owa""" +46 24 negative_sampler """basic""" +46 24 evaluator """rankbased""" +46 25 dataset """wn18rr""" +46 25 model """complex""" +46 25 loss """marginranking""" +46 25 regularizer """no""" +46 25 optimizer """adam""" +46 25 training_loop """owa""" +46 25 negative_sampler """basic""" +46 25 evaluator """rankbased""" +46 26 dataset """wn18rr""" +46 26 model """complex""" +46 26 loss """marginranking""" +46 26 regularizer """no""" +46 26 optimizer """adam""" +46 26 training_loop """owa""" +46 26 negative_sampler """basic""" +46 26 evaluator """rankbased""" +46 27 dataset """wn18rr""" +46 27 model """complex""" +46 27 loss """marginranking""" +46 27 regularizer """no""" +46 27 optimizer """adam""" +46 27 training_loop """owa""" +46 27 negative_sampler """basic""" +46 27 evaluator """rankbased""" +46 28 dataset """wn18rr""" +46 28 model """complex""" +46 28 loss """marginranking""" +46 28 regularizer """no""" +46 28 optimizer """adam""" +46 28 training_loop """owa""" +46 28 negative_sampler """basic""" +46 28 evaluator """rankbased""" +46 29 dataset """wn18rr""" +46 29 model """complex""" +46 29 loss """marginranking""" +46 29 regularizer """no""" +46 29 optimizer """adam""" +46 29 training_loop """owa""" +46 29 negative_sampler """basic""" +46 29 evaluator """rankbased""" +46 30 dataset """wn18rr""" +46 30 model """complex""" +46 30 loss """marginranking""" +46 30 regularizer """no""" +46 30 optimizer """adam""" +46 30 training_loop """owa""" +46 30 negative_sampler """basic""" +46 30 evaluator """rankbased""" +46 31 dataset """wn18rr""" +46 31 model """complex""" +46 31 loss """marginranking""" +46 31 regularizer """no""" +46 31 optimizer """adam""" +46 31 training_loop """owa""" +46 31 negative_sampler """basic""" +46 31 evaluator """rankbased""" +46 32 dataset """wn18rr""" +46 32 model """complex""" +46 32 loss """marginranking""" +46 32 regularizer """no""" +46 32 optimizer """adam""" +46 32 training_loop """owa""" +46 32 negative_sampler """basic""" +46 32 evaluator """rankbased""" +46 33 dataset """wn18rr""" +46 33 model """complex""" +46 33 loss """marginranking""" +46 33 regularizer """no""" +46 33 optimizer """adam""" +46 33 training_loop """owa""" +46 33 negative_sampler """basic""" +46 33 evaluator """rankbased""" +46 34 dataset """wn18rr""" +46 34 model """complex""" +46 34 loss """marginranking""" +46 34 regularizer """no""" +46 34 optimizer """adam""" +46 34 training_loop """owa""" +46 34 negative_sampler """basic""" +46 34 evaluator """rankbased""" +46 35 dataset """wn18rr""" +46 35 model """complex""" +46 35 loss """marginranking""" +46 35 regularizer """no""" +46 35 optimizer """adam""" +46 35 training_loop """owa""" +46 35 negative_sampler """basic""" +46 35 evaluator """rankbased""" +46 36 dataset """wn18rr""" +46 36 model """complex""" +46 36 loss """marginranking""" +46 36 regularizer """no""" +46 36 optimizer """adam""" +46 36 training_loop """owa""" +46 36 negative_sampler """basic""" +46 36 evaluator """rankbased""" +47 1 model.embedding_dim 0.0 +47 1 loss.margin 8.809846900616973 +47 1 optimizer.lr 0.009140465545708014 +47 1 negative_sampler.num_negs_per_pos 55.0 +47 1 training.batch_size 0.0 +47 2 model.embedding_dim 0.0 +47 2 loss.margin 8.005211888205846 +47 2 optimizer.lr 0.02033361091036742 +47 2 negative_sampler.num_negs_per_pos 55.0 +47 2 training.batch_size 0.0 +47 3 model.embedding_dim 1.0 +47 3 loss.margin 7.823692231844087 +47 3 optimizer.lr 0.0011782000061308106 +47 3 negative_sampler.num_negs_per_pos 63.0 +47 3 training.batch_size 1.0 +47 4 model.embedding_dim 1.0 +47 4 loss.margin 9.981163509868198 +47 4 optimizer.lr 0.008294218388079807 +47 4 negative_sampler.num_negs_per_pos 62.0 +47 4 training.batch_size 1.0 +47 5 model.embedding_dim 2.0 +47 5 loss.margin 8.374852822452429 +47 5 optimizer.lr 0.025361171537909438 +47 5 negative_sampler.num_negs_per_pos 23.0 +47 5 training.batch_size 2.0 +47 6 model.embedding_dim 0.0 +47 6 loss.margin 3.659399092557837 +47 6 optimizer.lr 0.0010185851924926878 +47 6 negative_sampler.num_negs_per_pos 95.0 +47 6 training.batch_size 2.0 +47 7 model.embedding_dim 0.0 +47 7 loss.margin 1.1419336763566683 +47 7 optimizer.lr 0.006870711142580844 +47 7 negative_sampler.num_negs_per_pos 32.0 +47 7 training.batch_size 0.0 +47 8 model.embedding_dim 1.0 +47 8 loss.margin 3.4442279860568186 +47 8 optimizer.lr 0.003969232117850188 +47 8 negative_sampler.num_negs_per_pos 77.0 +47 8 training.batch_size 0.0 +47 9 model.embedding_dim 0.0 +47 9 loss.margin 1.2712525068182199 +47 9 optimizer.lr 0.0024180929425267374 +47 9 negative_sampler.num_negs_per_pos 76.0 +47 9 training.batch_size 1.0 +47 10 model.embedding_dim 2.0 +47 10 loss.margin 8.557415128470948 +47 10 optimizer.lr 0.0014604669175052624 +47 10 negative_sampler.num_negs_per_pos 51.0 +47 10 training.batch_size 0.0 +47 11 model.embedding_dim 0.0 +47 11 loss.margin 4.627556117880316 +47 11 optimizer.lr 0.008516744024007738 +47 11 negative_sampler.num_negs_per_pos 0.0 +47 11 training.batch_size 1.0 +47 12 model.embedding_dim 0.0 +47 12 loss.margin 6.1211787999512115 +47 12 optimizer.lr 0.01672972268464053 +47 12 negative_sampler.num_negs_per_pos 77.0 +47 12 training.batch_size 1.0 +47 13 model.embedding_dim 0.0 +47 13 loss.margin 7.360588154584389 +47 13 optimizer.lr 0.006424932243315918 +47 13 negative_sampler.num_negs_per_pos 4.0 +47 13 training.batch_size 1.0 +47 14 model.embedding_dim 0.0 +47 14 loss.margin 1.1038294632124017 +47 14 optimizer.lr 0.024559269625770694 +47 14 negative_sampler.num_negs_per_pos 2.0 +47 14 training.batch_size 2.0 +47 15 model.embedding_dim 2.0 +47 15 loss.margin 4.8855227908849095 +47 15 optimizer.lr 0.004805792993983096 +47 15 negative_sampler.num_negs_per_pos 85.0 +47 15 training.batch_size 0.0 +47 16 model.embedding_dim 0.0 +47 16 loss.margin 2.164700512516074 +47 16 optimizer.lr 0.004422869343502652 +47 16 negative_sampler.num_negs_per_pos 2.0 +47 16 training.batch_size 2.0 +47 17 model.embedding_dim 2.0 +47 17 loss.margin 8.668531584835968 +47 17 optimizer.lr 0.04109291045621887 +47 17 negative_sampler.num_negs_per_pos 13.0 +47 17 training.batch_size 2.0 +47 18 model.embedding_dim 0.0 +47 18 loss.margin 0.8925530053638836 +47 18 optimizer.lr 0.009022933264451347 +47 18 negative_sampler.num_negs_per_pos 73.0 +47 18 training.batch_size 2.0 +47 19 model.embedding_dim 0.0 +47 19 loss.margin 8.187492198412308 +47 19 optimizer.lr 0.002191550626605886 +47 19 negative_sampler.num_negs_per_pos 77.0 +47 19 training.batch_size 1.0 +47 20 model.embedding_dim 2.0 +47 20 loss.margin 8.659037314857382 +47 20 optimizer.lr 0.0022652728558525325 +47 20 negative_sampler.num_negs_per_pos 40.0 +47 20 training.batch_size 2.0 +47 21 model.embedding_dim 1.0 +47 21 loss.margin 7.028139260595095 +47 21 optimizer.lr 0.002421618100485186 +47 21 negative_sampler.num_negs_per_pos 46.0 +47 21 training.batch_size 1.0 +47 22 model.embedding_dim 0.0 +47 22 loss.margin 3.6047176972592667 +47 22 optimizer.lr 0.0011721679042899353 +47 22 negative_sampler.num_negs_per_pos 49.0 +47 22 training.batch_size 2.0 +47 23 model.embedding_dim 2.0 +47 23 loss.margin 3.374590149609363 +47 23 optimizer.lr 0.0013298738113325871 +47 23 negative_sampler.num_negs_per_pos 72.0 +47 23 training.batch_size 2.0 +47 24 model.embedding_dim 0.0 +47 24 loss.margin 0.7746206941877838 +47 24 optimizer.lr 0.042678825612098764 +47 24 negative_sampler.num_negs_per_pos 1.0 +47 24 training.batch_size 0.0 +47 25 model.embedding_dim 2.0 +47 25 loss.margin 8.263160921766344 +47 25 optimizer.lr 0.05707075737293965 +47 25 negative_sampler.num_negs_per_pos 77.0 +47 25 training.batch_size 2.0 +47 26 model.embedding_dim 2.0 +47 26 loss.margin 8.625952803185779 +47 26 optimizer.lr 0.020705507423740516 +47 26 negative_sampler.num_negs_per_pos 59.0 +47 26 training.batch_size 1.0 +47 27 model.embedding_dim 1.0 +47 27 loss.margin 9.231230991355067 +47 27 optimizer.lr 0.011021769450195564 +47 27 negative_sampler.num_negs_per_pos 7.0 +47 27 training.batch_size 2.0 +47 28 model.embedding_dim 0.0 +47 28 loss.margin 6.28313751357077 +47 28 optimizer.lr 0.007493363302376416 +47 28 negative_sampler.num_negs_per_pos 7.0 +47 28 training.batch_size 0.0 +47 29 model.embedding_dim 0.0 +47 29 loss.margin 6.216514254243533 +47 29 optimizer.lr 0.0028208767403069624 +47 29 negative_sampler.num_negs_per_pos 33.0 +47 29 training.batch_size 2.0 +47 30 model.embedding_dim 1.0 +47 30 loss.margin 9.200723095325898 +47 30 optimizer.lr 0.0013150389350861397 +47 30 negative_sampler.num_negs_per_pos 58.0 +47 30 training.batch_size 1.0 +47 31 model.embedding_dim 0.0 +47 31 loss.margin 7.110211398876447 +47 31 optimizer.lr 0.004795854181113488 +47 31 negative_sampler.num_negs_per_pos 6.0 +47 31 training.batch_size 1.0 +47 32 model.embedding_dim 0.0 +47 32 loss.margin 5.906454884975333 +47 32 optimizer.lr 0.008464926262297197 +47 32 negative_sampler.num_negs_per_pos 47.0 +47 32 training.batch_size 2.0 +47 33 model.embedding_dim 1.0 +47 33 loss.margin 8.053351716490267 +47 33 optimizer.lr 0.0029891091843629626 +47 33 negative_sampler.num_negs_per_pos 56.0 +47 33 training.batch_size 1.0 +47 34 model.embedding_dim 0.0 +47 34 loss.margin 5.815361043901193 +47 34 optimizer.lr 0.00835588872002325 +47 34 negative_sampler.num_negs_per_pos 2.0 +47 34 training.batch_size 0.0 +47 35 model.embedding_dim 2.0 +47 35 loss.margin 4.418281502465721 +47 35 optimizer.lr 0.09654141930592071 +47 35 negative_sampler.num_negs_per_pos 93.0 +47 35 training.batch_size 0.0 +47 36 model.embedding_dim 1.0 +47 36 loss.margin 2.21322500217337 +47 36 optimizer.lr 0.008779798137771586 +47 36 negative_sampler.num_negs_per_pos 44.0 +47 36 training.batch_size 2.0 +47 37 model.embedding_dim 2.0 +47 37 loss.margin 4.080384454992192 +47 37 optimizer.lr 0.025936728000721086 +47 37 negative_sampler.num_negs_per_pos 43.0 +47 37 training.batch_size 1.0 +47 38 model.embedding_dim 1.0 +47 38 loss.margin 1.5322344917417552 +47 38 optimizer.lr 0.0012407366923439563 +47 38 negative_sampler.num_negs_per_pos 23.0 +47 38 training.batch_size 0.0 +47 39 model.embedding_dim 1.0 +47 39 loss.margin 3.1386152002399728 +47 39 optimizer.lr 0.011228801485928126 +47 39 negative_sampler.num_negs_per_pos 45.0 +47 39 training.batch_size 1.0 +47 40 model.embedding_dim 2.0 +47 40 loss.margin 6.9854592190038645 +47 40 optimizer.lr 0.020184594151356008 +47 40 negative_sampler.num_negs_per_pos 10.0 +47 40 training.batch_size 2.0 +47 41 model.embedding_dim 0.0 +47 41 loss.margin 3.996401030267181 +47 41 optimizer.lr 0.004607231536383081 +47 41 negative_sampler.num_negs_per_pos 53.0 +47 41 training.batch_size 0.0 +47 42 model.embedding_dim 1.0 +47 42 loss.margin 2.8295569022793337 +47 42 optimizer.lr 0.0035352133761938223 +47 42 negative_sampler.num_negs_per_pos 40.0 +47 42 training.batch_size 0.0 +47 43 model.embedding_dim 0.0 +47 43 loss.margin 3.9914856150724813 +47 43 optimizer.lr 0.08126239575135592 +47 43 negative_sampler.num_negs_per_pos 24.0 +47 43 training.batch_size 1.0 +47 44 model.embedding_dim 0.0 +47 44 loss.margin 7.765418808272704 +47 44 optimizer.lr 0.016894164958457705 +47 44 negative_sampler.num_negs_per_pos 78.0 +47 44 training.batch_size 0.0 +47 45 model.embedding_dim 0.0 +47 45 loss.margin 6.381929290882367 +47 45 optimizer.lr 0.002383327008270162 +47 45 negative_sampler.num_negs_per_pos 94.0 +47 45 training.batch_size 0.0 +47 46 model.embedding_dim 2.0 +47 46 loss.margin 8.136568576106974 +47 46 optimizer.lr 0.0021234862958371824 +47 46 negative_sampler.num_negs_per_pos 64.0 +47 46 training.batch_size 2.0 +47 47 model.embedding_dim 1.0 +47 47 loss.margin 4.4784455805241 +47 47 optimizer.lr 0.020480984428226524 +47 47 negative_sampler.num_negs_per_pos 8.0 +47 47 training.batch_size 1.0 +47 48 model.embedding_dim 2.0 +47 48 loss.margin 5.706772884071134 +47 48 optimizer.lr 0.02994125971317648 +47 48 negative_sampler.num_negs_per_pos 16.0 +47 48 training.batch_size 2.0 +47 49 model.embedding_dim 2.0 +47 49 loss.margin 7.498645872307055 +47 49 optimizer.lr 0.03892066241228482 +47 49 negative_sampler.num_negs_per_pos 98.0 +47 49 training.batch_size 0.0 +47 50 model.embedding_dim 0.0 +47 50 loss.margin 5.640298100728011 +47 50 optimizer.lr 0.006807890791561752 +47 50 negative_sampler.num_negs_per_pos 36.0 +47 50 training.batch_size 2.0 +47 51 model.embedding_dim 0.0 +47 51 loss.margin 4.313372740367087 +47 51 optimizer.lr 0.029331120144466198 +47 51 negative_sampler.num_negs_per_pos 1.0 +47 51 training.batch_size 1.0 +47 52 model.embedding_dim 2.0 +47 52 loss.margin 6.5504786432428395 +47 52 optimizer.lr 0.0010596798055474443 +47 52 negative_sampler.num_negs_per_pos 85.0 +47 52 training.batch_size 0.0 +47 53 model.embedding_dim 0.0 +47 53 loss.margin 1.1154316951590353 +47 53 optimizer.lr 0.0016964569621506138 +47 53 negative_sampler.num_negs_per_pos 68.0 +47 53 training.batch_size 1.0 +47 54 model.embedding_dim 1.0 +47 54 loss.margin 1.3160435095347371 +47 54 optimizer.lr 0.012982033606979899 +47 54 negative_sampler.num_negs_per_pos 26.0 +47 54 training.batch_size 1.0 +47 55 model.embedding_dim 2.0 +47 55 loss.margin 0.7756300975833378 +47 55 optimizer.lr 0.08097773114356513 +47 55 negative_sampler.num_negs_per_pos 53.0 +47 55 training.batch_size 0.0 +47 56 model.embedding_dim 0.0 +47 56 loss.margin 2.5451650824983587 +47 56 optimizer.lr 0.004483543814693792 +47 56 negative_sampler.num_negs_per_pos 56.0 +47 56 training.batch_size 1.0 +47 57 model.embedding_dim 0.0 +47 57 loss.margin 0.9974339997489678 +47 57 optimizer.lr 0.0194084637880935 +47 57 negative_sampler.num_negs_per_pos 61.0 +47 57 training.batch_size 0.0 +47 58 model.embedding_dim 0.0 +47 58 loss.margin 6.581286846798046 +47 58 optimizer.lr 0.06437121488155594 +47 58 negative_sampler.num_negs_per_pos 88.0 +47 58 training.batch_size 1.0 +47 59 model.embedding_dim 2.0 +47 59 loss.margin 6.552985329531438 +47 59 optimizer.lr 0.0255049219809813 +47 59 negative_sampler.num_negs_per_pos 10.0 +47 59 training.batch_size 0.0 +47 60 model.embedding_dim 0.0 +47 60 loss.margin 7.196329886333403 +47 60 optimizer.lr 0.001090471023793566 +47 60 negative_sampler.num_negs_per_pos 51.0 +47 60 training.batch_size 1.0 +47 61 model.embedding_dim 2.0 +47 61 loss.margin 9.29757875413028 +47 61 optimizer.lr 0.0016246068354951817 +47 61 negative_sampler.num_negs_per_pos 36.0 +47 61 training.batch_size 2.0 +47 62 model.embedding_dim 2.0 +47 62 loss.margin 4.771058284937468 +47 62 optimizer.lr 0.014741594663195214 +47 62 negative_sampler.num_negs_per_pos 55.0 +47 62 training.batch_size 1.0 +47 63 model.embedding_dim 1.0 +47 63 loss.margin 8.964838488416254 +47 63 optimizer.lr 0.007413669728406162 +47 63 negative_sampler.num_negs_per_pos 90.0 +47 63 training.batch_size 0.0 +47 64 model.embedding_dim 0.0 +47 64 loss.margin 4.151437449316501 +47 64 optimizer.lr 0.015931951759624682 +47 64 negative_sampler.num_negs_per_pos 12.0 +47 64 training.batch_size 0.0 +47 65 model.embedding_dim 1.0 +47 65 loss.margin 7.8387574621677 +47 65 optimizer.lr 0.028949556169051628 +47 65 negative_sampler.num_negs_per_pos 22.0 +47 65 training.batch_size 0.0 +47 66 model.embedding_dim 0.0 +47 66 loss.margin 2.6995894566333445 +47 66 optimizer.lr 0.0020962955590991955 +47 66 negative_sampler.num_negs_per_pos 4.0 +47 66 training.batch_size 0.0 +47 67 model.embedding_dim 2.0 +47 67 loss.margin 3.6582542829653404 +47 67 optimizer.lr 0.004262533139785154 +47 67 negative_sampler.num_negs_per_pos 38.0 +47 67 training.batch_size 1.0 +47 68 model.embedding_dim 0.0 +47 68 loss.margin 1.9136014073115413 +47 68 optimizer.lr 0.0026348738245450393 +47 68 negative_sampler.num_negs_per_pos 66.0 +47 68 training.batch_size 0.0 +47 69 model.embedding_dim 1.0 +47 69 loss.margin 4.4152848474718756 +47 69 optimizer.lr 0.014756010514673486 +47 69 negative_sampler.num_negs_per_pos 70.0 +47 69 training.batch_size 2.0 +47 1 dataset """wn18rr""" +47 1 model """complex""" +47 1 loss """marginranking""" +47 1 regularizer """no""" +47 1 optimizer """adam""" +47 1 training_loop """owa""" +47 1 negative_sampler """basic""" +47 1 evaluator """rankbased""" +47 2 dataset """wn18rr""" +47 2 model """complex""" +47 2 loss """marginranking""" +47 2 regularizer """no""" +47 2 optimizer """adam""" +47 2 training_loop """owa""" +47 2 negative_sampler """basic""" +47 2 evaluator """rankbased""" +47 3 dataset """wn18rr""" +47 3 model """complex""" +47 3 loss """marginranking""" +47 3 regularizer """no""" +47 3 optimizer """adam""" +47 3 training_loop """owa""" +47 3 negative_sampler """basic""" +47 3 evaluator """rankbased""" +47 4 dataset """wn18rr""" +47 4 model """complex""" +47 4 loss """marginranking""" +47 4 regularizer """no""" +47 4 optimizer """adam""" +47 4 training_loop """owa""" +47 4 negative_sampler """basic""" +47 4 evaluator """rankbased""" +47 5 dataset """wn18rr""" +47 5 model """complex""" +47 5 loss """marginranking""" +47 5 regularizer """no""" +47 5 optimizer """adam""" +47 5 training_loop """owa""" +47 5 negative_sampler """basic""" +47 5 evaluator """rankbased""" +47 6 dataset """wn18rr""" +47 6 model """complex""" +47 6 loss """marginranking""" +47 6 regularizer """no""" +47 6 optimizer """adam""" +47 6 training_loop """owa""" +47 6 negative_sampler """basic""" +47 6 evaluator """rankbased""" +47 7 dataset """wn18rr""" +47 7 model """complex""" +47 7 loss """marginranking""" +47 7 regularizer """no""" +47 7 optimizer """adam""" +47 7 training_loop """owa""" +47 7 negative_sampler """basic""" +47 7 evaluator """rankbased""" +47 8 dataset """wn18rr""" +47 8 model """complex""" +47 8 loss """marginranking""" +47 8 regularizer """no""" +47 8 optimizer """adam""" +47 8 training_loop """owa""" +47 8 negative_sampler """basic""" +47 8 evaluator """rankbased""" +47 9 dataset """wn18rr""" +47 9 model """complex""" +47 9 loss """marginranking""" +47 9 regularizer """no""" +47 9 optimizer """adam""" +47 9 training_loop """owa""" +47 9 negative_sampler """basic""" +47 9 evaluator """rankbased""" +47 10 dataset """wn18rr""" +47 10 model """complex""" +47 10 loss """marginranking""" +47 10 regularizer """no""" +47 10 optimizer """adam""" +47 10 training_loop """owa""" +47 10 negative_sampler """basic""" +47 10 evaluator """rankbased""" +47 11 dataset """wn18rr""" +47 11 model """complex""" +47 11 loss """marginranking""" +47 11 regularizer """no""" +47 11 optimizer """adam""" +47 11 training_loop """owa""" +47 11 negative_sampler """basic""" +47 11 evaluator """rankbased""" +47 12 dataset """wn18rr""" +47 12 model """complex""" +47 12 loss """marginranking""" +47 12 regularizer """no""" +47 12 optimizer """adam""" +47 12 training_loop """owa""" +47 12 negative_sampler """basic""" +47 12 evaluator """rankbased""" +47 13 dataset """wn18rr""" +47 13 model """complex""" +47 13 loss """marginranking""" +47 13 regularizer """no""" +47 13 optimizer """adam""" +47 13 training_loop """owa""" +47 13 negative_sampler """basic""" +47 13 evaluator """rankbased""" +47 14 dataset """wn18rr""" +47 14 model """complex""" +47 14 loss """marginranking""" +47 14 regularizer """no""" +47 14 optimizer """adam""" +47 14 training_loop """owa""" +47 14 negative_sampler """basic""" +47 14 evaluator """rankbased""" +47 15 dataset """wn18rr""" +47 15 model """complex""" +47 15 loss """marginranking""" +47 15 regularizer """no""" +47 15 optimizer """adam""" +47 15 training_loop """owa""" +47 15 negative_sampler """basic""" +47 15 evaluator """rankbased""" +47 16 dataset """wn18rr""" +47 16 model """complex""" +47 16 loss """marginranking""" +47 16 regularizer """no""" +47 16 optimizer """adam""" +47 16 training_loop """owa""" +47 16 negative_sampler """basic""" +47 16 evaluator """rankbased""" +47 17 dataset """wn18rr""" +47 17 model """complex""" +47 17 loss """marginranking""" +47 17 regularizer """no""" +47 17 optimizer """adam""" +47 17 training_loop """owa""" +47 17 negative_sampler """basic""" +47 17 evaluator """rankbased""" +47 18 dataset """wn18rr""" +47 18 model """complex""" +47 18 loss """marginranking""" +47 18 regularizer """no""" +47 18 optimizer """adam""" +47 18 training_loop """owa""" +47 18 negative_sampler """basic""" +47 18 evaluator """rankbased""" +47 19 dataset """wn18rr""" +47 19 model """complex""" +47 19 loss """marginranking""" +47 19 regularizer """no""" +47 19 optimizer """adam""" +47 19 training_loop """owa""" +47 19 negative_sampler """basic""" +47 19 evaluator """rankbased""" +47 20 dataset """wn18rr""" +47 20 model """complex""" +47 20 loss """marginranking""" +47 20 regularizer """no""" +47 20 optimizer """adam""" +47 20 training_loop """owa""" +47 20 negative_sampler """basic""" +47 20 evaluator """rankbased""" +47 21 dataset """wn18rr""" +47 21 model """complex""" +47 21 loss """marginranking""" +47 21 regularizer """no""" +47 21 optimizer """adam""" +47 21 training_loop """owa""" +47 21 negative_sampler """basic""" +47 21 evaluator """rankbased""" +47 22 dataset """wn18rr""" +47 22 model """complex""" +47 22 loss """marginranking""" +47 22 regularizer """no""" +47 22 optimizer """adam""" +47 22 training_loop """owa""" +47 22 negative_sampler """basic""" +47 22 evaluator """rankbased""" +47 23 dataset """wn18rr""" +47 23 model """complex""" +47 23 loss """marginranking""" +47 23 regularizer """no""" +47 23 optimizer """adam""" +47 23 training_loop """owa""" +47 23 negative_sampler """basic""" +47 23 evaluator """rankbased""" +47 24 dataset """wn18rr""" +47 24 model """complex""" +47 24 loss """marginranking""" +47 24 regularizer """no""" +47 24 optimizer """adam""" +47 24 training_loop """owa""" +47 24 negative_sampler """basic""" +47 24 evaluator """rankbased""" +47 25 dataset """wn18rr""" +47 25 model """complex""" +47 25 loss """marginranking""" +47 25 regularizer """no""" +47 25 optimizer """adam""" +47 25 training_loop """owa""" +47 25 negative_sampler """basic""" +47 25 evaluator """rankbased""" +47 26 dataset """wn18rr""" +47 26 model """complex""" +47 26 loss """marginranking""" +47 26 regularizer """no""" +47 26 optimizer """adam""" +47 26 training_loop """owa""" +47 26 negative_sampler """basic""" +47 26 evaluator """rankbased""" +47 27 dataset """wn18rr""" +47 27 model """complex""" +47 27 loss """marginranking""" +47 27 regularizer """no""" +47 27 optimizer """adam""" +47 27 training_loop """owa""" +47 27 negative_sampler """basic""" +47 27 evaluator """rankbased""" +47 28 dataset """wn18rr""" +47 28 model """complex""" +47 28 loss """marginranking""" +47 28 regularizer """no""" +47 28 optimizer """adam""" +47 28 training_loop """owa""" +47 28 negative_sampler """basic""" +47 28 evaluator """rankbased""" +47 29 dataset """wn18rr""" +47 29 model """complex""" +47 29 loss """marginranking""" +47 29 regularizer """no""" +47 29 optimizer """adam""" +47 29 training_loop """owa""" +47 29 negative_sampler """basic""" +47 29 evaluator """rankbased""" +47 30 dataset """wn18rr""" +47 30 model """complex""" +47 30 loss """marginranking""" +47 30 regularizer """no""" +47 30 optimizer """adam""" +47 30 training_loop """owa""" +47 30 negative_sampler """basic""" +47 30 evaluator """rankbased""" +47 31 dataset """wn18rr""" +47 31 model """complex""" +47 31 loss """marginranking""" +47 31 regularizer """no""" +47 31 optimizer """adam""" +47 31 training_loop """owa""" +47 31 negative_sampler """basic""" +47 31 evaluator """rankbased""" +47 32 dataset """wn18rr""" +47 32 model """complex""" +47 32 loss """marginranking""" +47 32 regularizer """no""" +47 32 optimizer """adam""" +47 32 training_loop """owa""" +47 32 negative_sampler """basic""" +47 32 evaluator """rankbased""" +47 33 dataset """wn18rr""" +47 33 model """complex""" +47 33 loss """marginranking""" +47 33 regularizer """no""" +47 33 optimizer """adam""" +47 33 training_loop """owa""" +47 33 negative_sampler """basic""" +47 33 evaluator """rankbased""" +47 34 dataset """wn18rr""" +47 34 model """complex""" +47 34 loss """marginranking""" +47 34 regularizer """no""" +47 34 optimizer """adam""" +47 34 training_loop """owa""" +47 34 negative_sampler """basic""" +47 34 evaluator """rankbased""" +47 35 dataset """wn18rr""" +47 35 model """complex""" +47 35 loss """marginranking""" +47 35 regularizer """no""" +47 35 optimizer """adam""" +47 35 training_loop """owa""" +47 35 negative_sampler """basic""" +47 35 evaluator """rankbased""" +47 36 dataset """wn18rr""" +47 36 model """complex""" +47 36 loss """marginranking""" +47 36 regularizer """no""" +47 36 optimizer """adam""" +47 36 training_loop """owa""" +47 36 negative_sampler """basic""" +47 36 evaluator """rankbased""" +47 37 dataset """wn18rr""" +47 37 model """complex""" +47 37 loss """marginranking""" +47 37 regularizer """no""" +47 37 optimizer """adam""" +47 37 training_loop """owa""" +47 37 negative_sampler """basic""" +47 37 evaluator """rankbased""" +47 38 dataset """wn18rr""" +47 38 model """complex""" +47 38 loss """marginranking""" +47 38 regularizer """no""" +47 38 optimizer """adam""" +47 38 training_loop """owa""" +47 38 negative_sampler """basic""" +47 38 evaluator """rankbased""" +47 39 dataset """wn18rr""" +47 39 model """complex""" +47 39 loss """marginranking""" +47 39 regularizer """no""" +47 39 optimizer """adam""" +47 39 training_loop """owa""" +47 39 negative_sampler """basic""" +47 39 evaluator """rankbased""" +47 40 dataset """wn18rr""" +47 40 model """complex""" +47 40 loss """marginranking""" +47 40 regularizer """no""" +47 40 optimizer """adam""" +47 40 training_loop """owa""" +47 40 negative_sampler """basic""" +47 40 evaluator """rankbased""" +47 41 dataset """wn18rr""" +47 41 model """complex""" +47 41 loss """marginranking""" +47 41 regularizer """no""" +47 41 optimizer """adam""" +47 41 training_loop """owa""" +47 41 negative_sampler """basic""" +47 41 evaluator """rankbased""" +47 42 dataset """wn18rr""" +47 42 model """complex""" +47 42 loss """marginranking""" +47 42 regularizer """no""" +47 42 optimizer """adam""" +47 42 training_loop """owa""" +47 42 negative_sampler """basic""" +47 42 evaluator """rankbased""" +47 43 dataset """wn18rr""" +47 43 model """complex""" +47 43 loss """marginranking""" +47 43 regularizer """no""" +47 43 optimizer """adam""" +47 43 training_loop """owa""" +47 43 negative_sampler """basic""" +47 43 evaluator """rankbased""" +47 44 dataset """wn18rr""" +47 44 model """complex""" +47 44 loss """marginranking""" +47 44 regularizer """no""" +47 44 optimizer """adam""" +47 44 training_loop """owa""" +47 44 negative_sampler """basic""" +47 44 evaluator """rankbased""" +47 45 dataset """wn18rr""" +47 45 model """complex""" +47 45 loss """marginranking""" +47 45 regularizer """no""" +47 45 optimizer """adam""" +47 45 training_loop """owa""" +47 45 negative_sampler """basic""" +47 45 evaluator """rankbased""" +47 46 dataset """wn18rr""" +47 46 model """complex""" +47 46 loss """marginranking""" +47 46 regularizer """no""" +47 46 optimizer """adam""" +47 46 training_loop """owa""" +47 46 negative_sampler """basic""" +47 46 evaluator """rankbased""" +47 47 dataset """wn18rr""" +47 47 model """complex""" +47 47 loss """marginranking""" +47 47 regularizer """no""" +47 47 optimizer """adam""" +47 47 training_loop """owa""" +47 47 negative_sampler """basic""" +47 47 evaluator """rankbased""" +47 48 dataset """wn18rr""" +47 48 model """complex""" +47 48 loss """marginranking""" +47 48 regularizer """no""" +47 48 optimizer """adam""" +47 48 training_loop """owa""" +47 48 negative_sampler """basic""" +47 48 evaluator """rankbased""" +47 49 dataset """wn18rr""" +47 49 model """complex""" +47 49 loss """marginranking""" +47 49 regularizer """no""" +47 49 optimizer """adam""" +47 49 training_loop """owa""" +47 49 negative_sampler """basic""" +47 49 evaluator """rankbased""" +47 50 dataset """wn18rr""" +47 50 model """complex""" +47 50 loss """marginranking""" +47 50 regularizer """no""" +47 50 optimizer """adam""" +47 50 training_loop """owa""" +47 50 negative_sampler """basic""" +47 50 evaluator """rankbased""" +47 51 dataset """wn18rr""" +47 51 model """complex""" +47 51 loss """marginranking""" +47 51 regularizer """no""" +47 51 optimizer """adam""" +47 51 training_loop """owa""" +47 51 negative_sampler """basic""" +47 51 evaluator """rankbased""" +47 52 dataset """wn18rr""" +47 52 model """complex""" +47 52 loss """marginranking""" +47 52 regularizer """no""" +47 52 optimizer """adam""" +47 52 training_loop """owa""" +47 52 negative_sampler """basic""" +47 52 evaluator """rankbased""" +47 53 dataset """wn18rr""" +47 53 model """complex""" +47 53 loss """marginranking""" +47 53 regularizer """no""" +47 53 optimizer """adam""" +47 53 training_loop """owa""" +47 53 negative_sampler """basic""" +47 53 evaluator """rankbased""" +47 54 dataset """wn18rr""" +47 54 model """complex""" +47 54 loss """marginranking""" +47 54 regularizer """no""" +47 54 optimizer """adam""" +47 54 training_loop """owa""" +47 54 negative_sampler """basic""" +47 54 evaluator """rankbased""" +47 55 dataset """wn18rr""" +47 55 model """complex""" +47 55 loss """marginranking""" +47 55 regularizer """no""" +47 55 optimizer """adam""" +47 55 training_loop """owa""" +47 55 negative_sampler """basic""" +47 55 evaluator """rankbased""" +47 56 dataset """wn18rr""" +47 56 model """complex""" +47 56 loss """marginranking""" +47 56 regularizer """no""" +47 56 optimizer """adam""" +47 56 training_loop """owa""" +47 56 negative_sampler """basic""" +47 56 evaluator """rankbased""" +47 57 dataset """wn18rr""" +47 57 model """complex""" +47 57 loss """marginranking""" +47 57 regularizer """no""" +47 57 optimizer """adam""" +47 57 training_loop """owa""" +47 57 negative_sampler """basic""" +47 57 evaluator """rankbased""" +47 58 dataset """wn18rr""" +47 58 model """complex""" +47 58 loss """marginranking""" +47 58 regularizer """no""" +47 58 optimizer """adam""" +47 58 training_loop """owa""" +47 58 negative_sampler """basic""" +47 58 evaluator """rankbased""" +47 59 dataset """wn18rr""" +47 59 model """complex""" +47 59 loss """marginranking""" +47 59 regularizer """no""" +47 59 optimizer """adam""" +47 59 training_loop """owa""" +47 59 negative_sampler """basic""" +47 59 evaluator """rankbased""" +47 60 dataset """wn18rr""" +47 60 model """complex""" +47 60 loss """marginranking""" +47 60 regularizer """no""" +47 60 optimizer """adam""" +47 60 training_loop """owa""" +47 60 negative_sampler """basic""" +47 60 evaluator """rankbased""" +47 61 dataset """wn18rr""" +47 61 model """complex""" +47 61 loss """marginranking""" +47 61 regularizer """no""" +47 61 optimizer """adam""" +47 61 training_loop """owa""" +47 61 negative_sampler """basic""" +47 61 evaluator """rankbased""" +47 62 dataset """wn18rr""" +47 62 model """complex""" +47 62 loss """marginranking""" +47 62 regularizer """no""" +47 62 optimizer """adam""" +47 62 training_loop """owa""" +47 62 negative_sampler """basic""" +47 62 evaluator """rankbased""" +47 63 dataset """wn18rr""" +47 63 model """complex""" +47 63 loss """marginranking""" +47 63 regularizer """no""" +47 63 optimizer """adam""" +47 63 training_loop """owa""" +47 63 negative_sampler """basic""" +47 63 evaluator """rankbased""" +47 64 dataset """wn18rr""" +47 64 model """complex""" +47 64 loss """marginranking""" +47 64 regularizer """no""" +47 64 optimizer """adam""" +47 64 training_loop """owa""" +47 64 negative_sampler """basic""" +47 64 evaluator """rankbased""" +47 65 dataset """wn18rr""" +47 65 model """complex""" +47 65 loss """marginranking""" +47 65 regularizer """no""" +47 65 optimizer """adam""" +47 65 training_loop """owa""" +47 65 negative_sampler """basic""" +47 65 evaluator """rankbased""" +47 66 dataset """wn18rr""" +47 66 model """complex""" +47 66 loss """marginranking""" +47 66 regularizer """no""" +47 66 optimizer """adam""" +47 66 training_loop """owa""" +47 66 negative_sampler """basic""" +47 66 evaluator """rankbased""" +47 67 dataset """wn18rr""" +47 67 model """complex""" +47 67 loss """marginranking""" +47 67 regularizer """no""" +47 67 optimizer """adam""" +47 67 training_loop """owa""" +47 67 negative_sampler """basic""" +47 67 evaluator """rankbased""" +47 68 dataset """wn18rr""" +47 68 model """complex""" +47 68 loss """marginranking""" +47 68 regularizer """no""" +47 68 optimizer """adam""" +47 68 training_loop """owa""" +47 68 negative_sampler """basic""" +47 68 evaluator """rankbased""" +47 69 dataset """wn18rr""" +47 69 model """complex""" +47 69 loss """marginranking""" +47 69 regularizer """no""" +47 69 optimizer """adam""" +47 69 training_loop """owa""" +47 69 negative_sampler """basic""" +47 69 evaluator """rankbased""" +48 1 model.embedding_dim 0.0 +48 1 loss.margin 7.617052414371828 +48 1 loss.adversarial_temperature 0.3551565048760494 +48 1 optimizer.lr 0.003960147871393484 +48 1 negative_sampler.num_negs_per_pos 91.0 +48 1 training.batch_size 0.0 +48 2 model.embedding_dim 2.0 +48 2 loss.margin 8.021540576147837 +48 2 loss.adversarial_temperature 0.6554666090266303 +48 2 optimizer.lr 0.008619529697190452 +48 2 negative_sampler.num_negs_per_pos 59.0 +48 2 training.batch_size 0.0 +48 3 model.embedding_dim 0.0 +48 3 loss.margin 18.538724474953703 +48 3 loss.adversarial_temperature 0.1916135780333952 +48 3 optimizer.lr 0.026379543086518176 +48 3 negative_sampler.num_negs_per_pos 48.0 +48 3 training.batch_size 2.0 +48 4 model.embedding_dim 0.0 +48 4 loss.margin 12.107689744046292 +48 4 loss.adversarial_temperature 0.502122506829174 +48 4 optimizer.lr 0.015383767798472147 +48 4 negative_sampler.num_negs_per_pos 24.0 +48 4 training.batch_size 0.0 +48 5 model.embedding_dim 1.0 +48 5 loss.margin 17.692904819745742 +48 5 loss.adversarial_temperature 0.6529700589711825 +48 5 optimizer.lr 0.0024753126109747694 +48 5 negative_sampler.num_negs_per_pos 41.0 +48 5 training.batch_size 2.0 +48 6 model.embedding_dim 1.0 +48 6 loss.margin 15.9378352199052 +48 6 loss.adversarial_temperature 0.6989556838118368 +48 6 optimizer.lr 0.02903369366616949 +48 6 negative_sampler.num_negs_per_pos 4.0 +48 6 training.batch_size 0.0 +48 7 model.embedding_dim 1.0 +48 7 loss.margin 4.87919930597479 +48 7 loss.adversarial_temperature 0.7373331637039977 +48 7 optimizer.lr 0.0026646042375454357 +48 7 negative_sampler.num_negs_per_pos 25.0 +48 7 training.batch_size 1.0 +48 8 model.embedding_dim 0.0 +48 8 loss.margin 26.266822452613095 +48 8 loss.adversarial_temperature 0.43921424650172636 +48 8 optimizer.lr 0.002854605712716868 +48 8 negative_sampler.num_negs_per_pos 46.0 +48 8 training.batch_size 2.0 +48 9 model.embedding_dim 1.0 +48 9 loss.margin 27.889349753724296 +48 9 loss.adversarial_temperature 0.8782563320317412 +48 9 optimizer.lr 0.001986234054187251 +48 9 negative_sampler.num_negs_per_pos 29.0 +48 9 training.batch_size 0.0 +48 10 model.embedding_dim 0.0 +48 10 loss.margin 1.597250984867455 +48 10 loss.adversarial_temperature 0.6296111083055173 +48 10 optimizer.lr 0.0018259458323845333 +48 10 negative_sampler.num_negs_per_pos 20.0 +48 10 training.batch_size 0.0 +48 11 model.embedding_dim 1.0 +48 11 loss.margin 13.5533834363764 +48 11 loss.adversarial_temperature 0.44295418563221195 +48 11 optimizer.lr 0.0026070089639421102 +48 11 negative_sampler.num_negs_per_pos 87.0 +48 11 training.batch_size 0.0 +48 12 model.embedding_dim 0.0 +48 12 loss.margin 7.229043873960401 +48 12 loss.adversarial_temperature 0.520523785511468 +48 12 optimizer.lr 0.028232809307207313 +48 12 negative_sampler.num_negs_per_pos 8.0 +48 12 training.batch_size 2.0 +48 13 model.embedding_dim 0.0 +48 13 loss.margin 14.993048564692117 +48 13 loss.adversarial_temperature 0.8208166994103744 +48 13 optimizer.lr 0.0426417894963815 +48 13 negative_sampler.num_negs_per_pos 93.0 +48 13 training.batch_size 2.0 +48 14 model.embedding_dim 1.0 +48 14 loss.margin 2.8967437148818656 +48 14 loss.adversarial_temperature 0.8256969768824214 +48 14 optimizer.lr 0.08099830459972042 +48 14 negative_sampler.num_negs_per_pos 17.0 +48 14 training.batch_size 0.0 +48 15 model.embedding_dim 2.0 +48 15 loss.margin 29.629237642889727 +48 15 loss.adversarial_temperature 0.5690639400316002 +48 15 optimizer.lr 0.0014616600041617183 +48 15 negative_sampler.num_negs_per_pos 30.0 +48 15 training.batch_size 0.0 +48 16 model.embedding_dim 1.0 +48 16 loss.margin 3.6208539898837495 +48 16 loss.adversarial_temperature 0.38316364207825043 +48 16 optimizer.lr 0.0015715537504032812 +48 16 negative_sampler.num_negs_per_pos 42.0 +48 16 training.batch_size 1.0 +48 17 model.embedding_dim 2.0 +48 17 loss.margin 11.019254584905495 +48 17 loss.adversarial_temperature 0.7520001307852312 +48 17 optimizer.lr 0.01707586331221641 +48 17 negative_sampler.num_negs_per_pos 50.0 +48 17 training.batch_size 0.0 +48 18 model.embedding_dim 1.0 +48 18 loss.margin 21.59001143636971 +48 18 loss.adversarial_temperature 0.23847504691134933 +48 18 optimizer.lr 0.013454567482117288 +48 18 negative_sampler.num_negs_per_pos 75.0 +48 18 training.batch_size 2.0 +48 19 model.embedding_dim 1.0 +48 19 loss.margin 25.247902569931174 +48 19 loss.adversarial_temperature 0.986453662991775 +48 19 optimizer.lr 0.006629168736726603 +48 19 negative_sampler.num_negs_per_pos 88.0 +48 19 training.batch_size 2.0 +48 20 model.embedding_dim 2.0 +48 20 loss.margin 1.8235953602248363 +48 20 loss.adversarial_temperature 0.1198021674373818 +48 20 optimizer.lr 0.007062428987556846 +48 20 negative_sampler.num_negs_per_pos 59.0 +48 20 training.batch_size 1.0 +48 21 model.embedding_dim 0.0 +48 21 loss.margin 9.198929581048892 +48 21 loss.adversarial_temperature 0.6285252807532886 +48 21 optimizer.lr 0.0051626095046675815 +48 21 negative_sampler.num_negs_per_pos 37.0 +48 21 training.batch_size 2.0 +48 22 model.embedding_dim 0.0 +48 22 loss.margin 19.710577009465403 +48 22 loss.adversarial_temperature 0.5625057134496176 +48 22 optimizer.lr 0.02051253606345498 +48 22 negative_sampler.num_negs_per_pos 1.0 +48 22 training.batch_size 1.0 +48 23 model.embedding_dim 1.0 +48 23 loss.margin 27.940734151496734 +48 23 loss.adversarial_temperature 0.1373162292220874 +48 23 optimizer.lr 0.07204338263682322 +48 23 negative_sampler.num_negs_per_pos 40.0 +48 23 training.batch_size 2.0 +48 24 model.embedding_dim 2.0 +48 24 loss.margin 12.596314567130236 +48 24 loss.adversarial_temperature 0.35571494496912676 +48 24 optimizer.lr 0.004772944849434548 +48 24 negative_sampler.num_negs_per_pos 48.0 +48 24 training.batch_size 2.0 +48 25 model.embedding_dim 2.0 +48 25 loss.margin 1.9471186211933411 +48 25 loss.adversarial_temperature 0.7692239632886604 +48 25 optimizer.lr 0.016261626038792748 +48 25 negative_sampler.num_negs_per_pos 49.0 +48 25 training.batch_size 0.0 +48 26 model.embedding_dim 1.0 +48 26 loss.margin 13.769045993103777 +48 26 loss.adversarial_temperature 0.44518868842085496 +48 26 optimizer.lr 0.0016706119680328267 +48 26 negative_sampler.num_negs_per_pos 97.0 +48 26 training.batch_size 1.0 +48 27 model.embedding_dim 1.0 +48 27 loss.margin 9.566800905317404 +48 27 loss.adversarial_temperature 0.19286521506225307 +48 27 optimizer.lr 0.0018423488080016952 +48 27 negative_sampler.num_negs_per_pos 27.0 +48 27 training.batch_size 2.0 +48 28 model.embedding_dim 1.0 +48 28 loss.margin 29.86829383592216 +48 28 loss.adversarial_temperature 0.23874237326275277 +48 28 optimizer.lr 0.08248845274018012 +48 28 negative_sampler.num_negs_per_pos 88.0 +48 28 training.batch_size 0.0 +48 29 model.embedding_dim 0.0 +48 29 loss.margin 6.738294072649098 +48 29 loss.adversarial_temperature 0.2035353207907245 +48 29 optimizer.lr 0.002766219186810357 +48 29 negative_sampler.num_negs_per_pos 82.0 +48 29 training.batch_size 0.0 +48 30 model.embedding_dim 1.0 +48 30 loss.margin 26.902944938386035 +48 30 loss.adversarial_temperature 0.7271543430359955 +48 30 optimizer.lr 0.05951386356762471 +48 30 negative_sampler.num_negs_per_pos 8.0 +48 30 training.batch_size 1.0 +48 31 model.embedding_dim 0.0 +48 31 loss.margin 23.30745000466943 +48 31 loss.adversarial_temperature 0.4449210246744428 +48 31 optimizer.lr 0.023085397878810778 +48 31 negative_sampler.num_negs_per_pos 96.0 +48 31 training.batch_size 1.0 +48 32 model.embedding_dim 0.0 +48 32 loss.margin 2.648579650726846 +48 32 loss.adversarial_temperature 0.5298203572733098 +48 32 optimizer.lr 0.03767026513646232 +48 32 negative_sampler.num_negs_per_pos 21.0 +48 32 training.batch_size 1.0 +48 33 model.embedding_dim 2.0 +48 33 loss.margin 26.665659186910545 +48 33 loss.adversarial_temperature 0.3355463131221084 +48 33 optimizer.lr 0.002657886805910865 +48 33 negative_sampler.num_negs_per_pos 1.0 +48 33 training.batch_size 0.0 +48 34 model.embedding_dim 0.0 +48 34 loss.margin 12.039177622095105 +48 34 loss.adversarial_temperature 0.22871757297518153 +48 34 optimizer.lr 0.01203719956409534 +48 34 negative_sampler.num_negs_per_pos 45.0 +48 34 training.batch_size 2.0 +48 35 model.embedding_dim 0.0 +48 35 loss.margin 28.91957259564569 +48 35 loss.adversarial_temperature 0.26491508866645014 +48 35 optimizer.lr 0.0021059646884556012 +48 35 negative_sampler.num_negs_per_pos 65.0 +48 35 training.batch_size 0.0 +48 36 model.embedding_dim 1.0 +48 36 loss.margin 17.39327498444269 +48 36 loss.adversarial_temperature 0.4146404023463031 +48 36 optimizer.lr 0.019778382986913333 +48 36 negative_sampler.num_negs_per_pos 46.0 +48 36 training.batch_size 1.0 +48 37 model.embedding_dim 1.0 +48 37 loss.margin 5.246545150873509 +48 37 loss.adversarial_temperature 0.9926562802127199 +48 37 optimizer.lr 0.0038301148706853375 +48 37 negative_sampler.num_negs_per_pos 17.0 +48 37 training.batch_size 0.0 +48 38 model.embedding_dim 0.0 +48 38 loss.margin 11.902615953304354 +48 38 loss.adversarial_temperature 0.6057358900781212 +48 38 optimizer.lr 0.036858886720991184 +48 38 negative_sampler.num_negs_per_pos 56.0 +48 38 training.batch_size 2.0 +48 39 model.embedding_dim 1.0 +48 39 loss.margin 11.621183839555984 +48 39 loss.adversarial_temperature 0.6667951698453799 +48 39 optimizer.lr 0.0033926380898662966 +48 39 negative_sampler.num_negs_per_pos 50.0 +48 39 training.batch_size 0.0 +48 40 model.embedding_dim 0.0 +48 40 loss.margin 11.3394200975833 +48 40 loss.adversarial_temperature 0.9166374489101985 +48 40 optimizer.lr 0.005969442357703015 +48 40 negative_sampler.num_negs_per_pos 66.0 +48 40 training.batch_size 2.0 +48 41 model.embedding_dim 0.0 +48 41 loss.margin 22.70741879869767 +48 41 loss.adversarial_temperature 0.6948508917502413 +48 41 optimizer.lr 0.0018301126327112155 +48 41 negative_sampler.num_negs_per_pos 38.0 +48 41 training.batch_size 0.0 +48 42 model.embedding_dim 1.0 +48 42 loss.margin 21.524143493638896 +48 42 loss.adversarial_temperature 0.587006642627736 +48 42 optimizer.lr 0.0013195047275710485 +48 42 negative_sampler.num_negs_per_pos 46.0 +48 42 training.batch_size 1.0 +48 43 model.embedding_dim 1.0 +48 43 loss.margin 7.012766881912998 +48 43 loss.adversarial_temperature 0.5231185342878687 +48 43 optimizer.lr 0.04463662401640231 +48 43 negative_sampler.num_negs_per_pos 22.0 +48 43 training.batch_size 1.0 +48 44 model.embedding_dim 0.0 +48 44 loss.margin 21.739803268457734 +48 44 loss.adversarial_temperature 0.7861026762398402 +48 44 optimizer.lr 0.011923116801703312 +48 44 negative_sampler.num_negs_per_pos 30.0 +48 44 training.batch_size 0.0 +48 45 model.embedding_dim 1.0 +48 45 loss.margin 14.449386947270746 +48 45 loss.adversarial_temperature 0.2849971687337452 +48 45 optimizer.lr 0.03751001876920745 +48 45 negative_sampler.num_negs_per_pos 68.0 +48 45 training.batch_size 2.0 +48 46 model.embedding_dim 0.0 +48 46 loss.margin 4.879875483974443 +48 46 loss.adversarial_temperature 0.6729028966263859 +48 46 optimizer.lr 0.0014299209581222732 +48 46 negative_sampler.num_negs_per_pos 13.0 +48 46 training.batch_size 1.0 +48 47 model.embedding_dim 2.0 +48 47 loss.margin 24.521302882233826 +48 47 loss.adversarial_temperature 0.8823319824960956 +48 47 optimizer.lr 0.06457664241208864 +48 47 negative_sampler.num_negs_per_pos 21.0 +48 47 training.batch_size 1.0 +48 48 model.embedding_dim 1.0 +48 48 loss.margin 4.193673885101208 +48 48 loss.adversarial_temperature 0.7830008144784151 +48 48 optimizer.lr 0.012253125236314402 +48 48 negative_sampler.num_negs_per_pos 77.0 +48 48 training.batch_size 2.0 +48 49 model.embedding_dim 2.0 +48 49 loss.margin 24.087156201657557 +48 49 loss.adversarial_temperature 0.9513495759767872 +48 49 optimizer.lr 0.04272801968544759 +48 49 negative_sampler.num_negs_per_pos 95.0 +48 49 training.batch_size 1.0 +48 50 model.embedding_dim 0.0 +48 50 loss.margin 12.348192878697542 +48 50 loss.adversarial_temperature 0.2837157417298746 +48 50 optimizer.lr 0.004217790750204024 +48 50 negative_sampler.num_negs_per_pos 64.0 +48 50 training.batch_size 1.0 +48 51 model.embedding_dim 2.0 +48 51 loss.margin 25.011028505887353 +48 51 loss.adversarial_temperature 0.6303908629528115 +48 51 optimizer.lr 0.001239326977089677 +48 51 negative_sampler.num_negs_per_pos 61.0 +48 51 training.batch_size 0.0 +48 52 model.embedding_dim 2.0 +48 52 loss.margin 18.624236937369286 +48 52 loss.adversarial_temperature 0.6510791989461834 +48 52 optimizer.lr 0.009161381102481592 +48 52 negative_sampler.num_negs_per_pos 96.0 +48 52 training.batch_size 0.0 +48 1 dataset """wn18rr""" +48 1 model """complex""" +48 1 loss """nssa""" +48 1 regularizer """no""" +48 1 optimizer """adam""" +48 1 training_loop """owa""" +48 1 negative_sampler """basic""" +48 1 evaluator """rankbased""" +48 2 dataset """wn18rr""" +48 2 model """complex""" +48 2 loss """nssa""" +48 2 regularizer """no""" +48 2 optimizer """adam""" +48 2 training_loop """owa""" +48 2 negative_sampler """basic""" +48 2 evaluator """rankbased""" +48 3 dataset """wn18rr""" +48 3 model """complex""" +48 3 loss """nssa""" +48 3 regularizer """no""" +48 3 optimizer """adam""" +48 3 training_loop """owa""" +48 3 negative_sampler """basic""" +48 3 evaluator """rankbased""" +48 4 dataset """wn18rr""" +48 4 model """complex""" +48 4 loss """nssa""" +48 4 regularizer """no""" +48 4 optimizer """adam""" +48 4 training_loop """owa""" +48 4 negative_sampler """basic""" +48 4 evaluator """rankbased""" +48 5 dataset """wn18rr""" +48 5 model """complex""" +48 5 loss """nssa""" +48 5 regularizer """no""" +48 5 optimizer """adam""" +48 5 training_loop """owa""" +48 5 negative_sampler """basic""" +48 5 evaluator """rankbased""" +48 6 dataset """wn18rr""" +48 6 model """complex""" +48 6 loss """nssa""" +48 6 regularizer """no""" +48 6 optimizer """adam""" +48 6 training_loop """owa""" +48 6 negative_sampler """basic""" +48 6 evaluator """rankbased""" +48 7 dataset """wn18rr""" +48 7 model """complex""" +48 7 loss """nssa""" +48 7 regularizer """no""" +48 7 optimizer """adam""" +48 7 training_loop """owa""" +48 7 negative_sampler """basic""" +48 7 evaluator """rankbased""" +48 8 dataset """wn18rr""" +48 8 model """complex""" +48 8 loss """nssa""" +48 8 regularizer """no""" +48 8 optimizer """adam""" +48 8 training_loop """owa""" +48 8 negative_sampler """basic""" +48 8 evaluator """rankbased""" +48 9 dataset """wn18rr""" +48 9 model """complex""" +48 9 loss """nssa""" +48 9 regularizer """no""" +48 9 optimizer """adam""" +48 9 training_loop """owa""" +48 9 negative_sampler """basic""" +48 9 evaluator """rankbased""" +48 10 dataset """wn18rr""" +48 10 model """complex""" +48 10 loss """nssa""" +48 10 regularizer """no""" +48 10 optimizer """adam""" +48 10 training_loop """owa""" +48 10 negative_sampler """basic""" +48 10 evaluator """rankbased""" +48 11 dataset """wn18rr""" +48 11 model """complex""" +48 11 loss """nssa""" +48 11 regularizer """no""" +48 11 optimizer """adam""" +48 11 training_loop """owa""" +48 11 negative_sampler """basic""" +48 11 evaluator """rankbased""" +48 12 dataset """wn18rr""" +48 12 model """complex""" +48 12 loss """nssa""" +48 12 regularizer """no""" +48 12 optimizer """adam""" +48 12 training_loop """owa""" +48 12 negative_sampler """basic""" +48 12 evaluator """rankbased""" +48 13 dataset """wn18rr""" +48 13 model """complex""" +48 13 loss """nssa""" +48 13 regularizer """no""" +48 13 optimizer """adam""" +48 13 training_loop """owa""" +48 13 negative_sampler """basic""" +48 13 evaluator """rankbased""" +48 14 dataset """wn18rr""" +48 14 model """complex""" +48 14 loss """nssa""" +48 14 regularizer """no""" +48 14 optimizer """adam""" +48 14 training_loop """owa""" +48 14 negative_sampler """basic""" +48 14 evaluator """rankbased""" +48 15 dataset """wn18rr""" +48 15 model """complex""" +48 15 loss """nssa""" +48 15 regularizer """no""" +48 15 optimizer """adam""" +48 15 training_loop """owa""" +48 15 negative_sampler """basic""" +48 15 evaluator """rankbased""" +48 16 dataset """wn18rr""" +48 16 model """complex""" +48 16 loss """nssa""" +48 16 regularizer """no""" +48 16 optimizer """adam""" +48 16 training_loop """owa""" +48 16 negative_sampler """basic""" +48 16 evaluator """rankbased""" +48 17 dataset """wn18rr""" +48 17 model """complex""" +48 17 loss """nssa""" +48 17 regularizer """no""" +48 17 optimizer """adam""" +48 17 training_loop """owa""" +48 17 negative_sampler """basic""" +48 17 evaluator """rankbased""" +48 18 dataset """wn18rr""" +48 18 model """complex""" +48 18 loss """nssa""" +48 18 regularizer """no""" +48 18 optimizer """adam""" +48 18 training_loop """owa""" +48 18 negative_sampler """basic""" +48 18 evaluator """rankbased""" +48 19 dataset """wn18rr""" +48 19 model """complex""" +48 19 loss """nssa""" +48 19 regularizer """no""" +48 19 optimizer """adam""" +48 19 training_loop """owa""" +48 19 negative_sampler """basic""" +48 19 evaluator """rankbased""" +48 20 dataset """wn18rr""" +48 20 model """complex""" +48 20 loss """nssa""" +48 20 regularizer """no""" +48 20 optimizer """adam""" +48 20 training_loop """owa""" +48 20 negative_sampler """basic""" +48 20 evaluator """rankbased""" +48 21 dataset """wn18rr""" +48 21 model """complex""" +48 21 loss """nssa""" +48 21 regularizer """no""" +48 21 optimizer """adam""" +48 21 training_loop """owa""" +48 21 negative_sampler """basic""" +48 21 evaluator """rankbased""" +48 22 dataset """wn18rr""" +48 22 model """complex""" +48 22 loss """nssa""" +48 22 regularizer """no""" +48 22 optimizer """adam""" +48 22 training_loop """owa""" +48 22 negative_sampler """basic""" +48 22 evaluator """rankbased""" +48 23 dataset """wn18rr""" +48 23 model """complex""" +48 23 loss """nssa""" +48 23 regularizer """no""" +48 23 optimizer """adam""" +48 23 training_loop """owa""" +48 23 negative_sampler """basic""" +48 23 evaluator """rankbased""" +48 24 dataset """wn18rr""" +48 24 model """complex""" +48 24 loss """nssa""" +48 24 regularizer """no""" +48 24 optimizer """adam""" +48 24 training_loop """owa""" +48 24 negative_sampler """basic""" +48 24 evaluator """rankbased""" +48 25 dataset """wn18rr""" +48 25 model """complex""" +48 25 loss """nssa""" +48 25 regularizer """no""" +48 25 optimizer """adam""" +48 25 training_loop """owa""" +48 25 negative_sampler """basic""" +48 25 evaluator """rankbased""" +48 26 dataset """wn18rr""" +48 26 model """complex""" +48 26 loss """nssa""" +48 26 regularizer """no""" +48 26 optimizer """adam""" +48 26 training_loop """owa""" +48 26 negative_sampler """basic""" +48 26 evaluator """rankbased""" +48 27 dataset """wn18rr""" +48 27 model """complex""" +48 27 loss """nssa""" +48 27 regularizer """no""" +48 27 optimizer """adam""" +48 27 training_loop """owa""" +48 27 negative_sampler """basic""" +48 27 evaluator """rankbased""" +48 28 dataset """wn18rr""" +48 28 model """complex""" +48 28 loss """nssa""" +48 28 regularizer """no""" +48 28 optimizer """adam""" +48 28 training_loop """owa""" +48 28 negative_sampler """basic""" +48 28 evaluator """rankbased""" +48 29 dataset """wn18rr""" +48 29 model """complex""" +48 29 loss """nssa""" +48 29 regularizer """no""" +48 29 optimizer """adam""" +48 29 training_loop """owa""" +48 29 negative_sampler """basic""" +48 29 evaluator """rankbased""" +48 30 dataset """wn18rr""" +48 30 model """complex""" +48 30 loss """nssa""" +48 30 regularizer """no""" +48 30 optimizer """adam""" +48 30 training_loop """owa""" +48 30 negative_sampler """basic""" +48 30 evaluator """rankbased""" +48 31 dataset """wn18rr""" +48 31 model """complex""" +48 31 loss """nssa""" +48 31 regularizer """no""" +48 31 optimizer """adam""" +48 31 training_loop """owa""" +48 31 negative_sampler """basic""" +48 31 evaluator """rankbased""" +48 32 dataset """wn18rr""" +48 32 model """complex""" +48 32 loss """nssa""" +48 32 regularizer """no""" +48 32 optimizer """adam""" +48 32 training_loop """owa""" +48 32 negative_sampler """basic""" +48 32 evaluator """rankbased""" +48 33 dataset """wn18rr""" +48 33 model """complex""" +48 33 loss """nssa""" +48 33 regularizer """no""" +48 33 optimizer """adam""" +48 33 training_loop """owa""" +48 33 negative_sampler """basic""" +48 33 evaluator """rankbased""" +48 34 dataset """wn18rr""" +48 34 model """complex""" +48 34 loss """nssa""" +48 34 regularizer """no""" +48 34 optimizer """adam""" +48 34 training_loop """owa""" +48 34 negative_sampler """basic""" +48 34 evaluator """rankbased""" +48 35 dataset """wn18rr""" +48 35 model """complex""" +48 35 loss """nssa""" +48 35 regularizer """no""" +48 35 optimizer """adam""" +48 35 training_loop """owa""" +48 35 negative_sampler """basic""" +48 35 evaluator """rankbased""" +48 36 dataset """wn18rr""" +48 36 model """complex""" +48 36 loss """nssa""" +48 36 regularizer """no""" +48 36 optimizer """adam""" +48 36 training_loop """owa""" +48 36 negative_sampler """basic""" +48 36 evaluator """rankbased""" +48 37 dataset """wn18rr""" +48 37 model """complex""" +48 37 loss """nssa""" +48 37 regularizer """no""" +48 37 optimizer """adam""" +48 37 training_loop """owa""" +48 37 negative_sampler """basic""" +48 37 evaluator """rankbased""" +48 38 dataset """wn18rr""" +48 38 model """complex""" +48 38 loss """nssa""" +48 38 regularizer """no""" +48 38 optimizer """adam""" +48 38 training_loop """owa""" +48 38 negative_sampler """basic""" +48 38 evaluator """rankbased""" +48 39 dataset """wn18rr""" +48 39 model """complex""" +48 39 loss """nssa""" +48 39 regularizer """no""" +48 39 optimizer """adam""" +48 39 training_loop """owa""" +48 39 negative_sampler """basic""" +48 39 evaluator """rankbased""" +48 40 dataset """wn18rr""" +48 40 model """complex""" +48 40 loss """nssa""" +48 40 regularizer """no""" +48 40 optimizer """adam""" +48 40 training_loop """owa""" +48 40 negative_sampler """basic""" +48 40 evaluator """rankbased""" +48 41 dataset """wn18rr""" +48 41 model """complex""" +48 41 loss """nssa""" +48 41 regularizer """no""" +48 41 optimizer """adam""" +48 41 training_loop """owa""" +48 41 negative_sampler """basic""" +48 41 evaluator """rankbased""" +48 42 dataset """wn18rr""" +48 42 model """complex""" +48 42 loss """nssa""" +48 42 regularizer """no""" +48 42 optimizer """adam""" +48 42 training_loop """owa""" +48 42 negative_sampler """basic""" +48 42 evaluator """rankbased""" +48 43 dataset """wn18rr""" +48 43 model """complex""" +48 43 loss """nssa""" +48 43 regularizer """no""" +48 43 optimizer """adam""" +48 43 training_loop """owa""" +48 43 negative_sampler """basic""" +48 43 evaluator """rankbased""" +48 44 dataset """wn18rr""" +48 44 model """complex""" +48 44 loss """nssa""" +48 44 regularizer """no""" +48 44 optimizer """adam""" +48 44 training_loop """owa""" +48 44 negative_sampler """basic""" +48 44 evaluator """rankbased""" +48 45 dataset """wn18rr""" +48 45 model """complex""" +48 45 loss """nssa""" +48 45 regularizer """no""" +48 45 optimizer """adam""" +48 45 training_loop """owa""" +48 45 negative_sampler """basic""" +48 45 evaluator """rankbased""" +48 46 dataset """wn18rr""" +48 46 model """complex""" +48 46 loss """nssa""" +48 46 regularizer """no""" +48 46 optimizer """adam""" +48 46 training_loop """owa""" +48 46 negative_sampler """basic""" +48 46 evaluator """rankbased""" +48 47 dataset """wn18rr""" +48 47 model """complex""" +48 47 loss """nssa""" +48 47 regularizer """no""" +48 47 optimizer """adam""" +48 47 training_loop """owa""" +48 47 negative_sampler """basic""" +48 47 evaluator """rankbased""" +48 48 dataset """wn18rr""" +48 48 model """complex""" +48 48 loss """nssa""" +48 48 regularizer """no""" +48 48 optimizer """adam""" +48 48 training_loop """owa""" +48 48 negative_sampler """basic""" +48 48 evaluator """rankbased""" +48 49 dataset """wn18rr""" +48 49 model """complex""" +48 49 loss """nssa""" +48 49 regularizer """no""" +48 49 optimizer """adam""" +48 49 training_loop """owa""" +48 49 negative_sampler """basic""" +48 49 evaluator """rankbased""" +48 50 dataset """wn18rr""" +48 50 model """complex""" +48 50 loss """nssa""" +48 50 regularizer """no""" +48 50 optimizer """adam""" +48 50 training_loop """owa""" +48 50 negative_sampler """basic""" +48 50 evaluator """rankbased""" +48 51 dataset """wn18rr""" +48 51 model """complex""" +48 51 loss """nssa""" +48 51 regularizer """no""" +48 51 optimizer """adam""" +48 51 training_loop """owa""" +48 51 negative_sampler """basic""" +48 51 evaluator """rankbased""" +48 52 dataset """wn18rr""" +48 52 model """complex""" +48 52 loss """nssa""" +48 52 regularizer """no""" +48 52 optimizer """adam""" +48 52 training_loop """owa""" +48 52 negative_sampler """basic""" +48 52 evaluator """rankbased""" +49 1 model.embedding_dim 1.0 +49 1 loss.margin 27.952431402403473 +49 1 loss.adversarial_temperature 0.3704608884180404 +49 1 optimizer.lr 0.024987579534106755 +49 1 negative_sampler.num_negs_per_pos 65.0 +49 1 training.batch_size 0.0 +49 2 model.embedding_dim 2.0 +49 2 loss.margin 21.56880663863768 +49 2 loss.adversarial_temperature 0.209737388830857 +49 2 optimizer.lr 0.0015582052743003194 +49 2 negative_sampler.num_negs_per_pos 57.0 +49 2 training.batch_size 0.0 +49 3 model.embedding_dim 1.0 +49 3 loss.margin 14.657292078243582 +49 3 loss.adversarial_temperature 0.27261773263607825 +49 3 optimizer.lr 0.003511434033858889 +49 3 negative_sampler.num_negs_per_pos 82.0 +49 3 training.batch_size 2.0 +49 4 model.embedding_dim 2.0 +49 4 loss.margin 25.695183771157797 +49 4 loss.adversarial_temperature 0.138685090086126 +49 4 optimizer.lr 0.0014970160347511413 +49 4 negative_sampler.num_negs_per_pos 15.0 +49 4 training.batch_size 0.0 +49 5 model.embedding_dim 0.0 +49 5 loss.margin 12.817458286336224 +49 5 loss.adversarial_temperature 0.722894759161368 +49 5 optimizer.lr 0.03696361478658153 +49 5 negative_sampler.num_negs_per_pos 11.0 +49 5 training.batch_size 1.0 +49 6 model.embedding_dim 0.0 +49 6 loss.margin 28.73678245660777 +49 6 loss.adversarial_temperature 0.4269582183021797 +49 6 optimizer.lr 0.014737024151284148 +49 6 negative_sampler.num_negs_per_pos 42.0 +49 6 training.batch_size 1.0 +49 7 model.embedding_dim 2.0 +49 7 loss.margin 17.12054866024477 +49 7 loss.adversarial_temperature 0.9989737786821752 +49 7 optimizer.lr 0.015460181214791731 +49 7 negative_sampler.num_negs_per_pos 27.0 +49 7 training.batch_size 0.0 +49 8 model.embedding_dim 2.0 +49 8 loss.margin 13.76492429754687 +49 8 loss.adversarial_temperature 0.23333333076342205 +49 8 optimizer.lr 0.0110458899284407 +49 8 negative_sampler.num_negs_per_pos 69.0 +49 8 training.batch_size 1.0 +49 9 model.embedding_dim 0.0 +49 9 loss.margin 21.69808286473479 +49 9 loss.adversarial_temperature 0.6094716292360849 +49 9 optimizer.lr 0.006932397588991409 +49 9 negative_sampler.num_negs_per_pos 83.0 +49 9 training.batch_size 2.0 +49 10 model.embedding_dim 0.0 +49 10 loss.margin 25.115674455438555 +49 10 loss.adversarial_temperature 0.7777505753893091 +49 10 optimizer.lr 0.0038010789515946076 +49 10 negative_sampler.num_negs_per_pos 19.0 +49 10 training.batch_size 0.0 +49 11 model.embedding_dim 1.0 +49 11 loss.margin 17.00213101306187 +49 11 loss.adversarial_temperature 0.13721333560556911 +49 11 optimizer.lr 0.03958229324305405 +49 11 negative_sampler.num_negs_per_pos 13.0 +49 11 training.batch_size 1.0 +49 12 model.embedding_dim 2.0 +49 12 loss.margin 22.188318505540146 +49 12 loss.adversarial_temperature 0.2108389427911055 +49 12 optimizer.lr 0.046420613264196935 +49 12 negative_sampler.num_negs_per_pos 70.0 +49 12 training.batch_size 2.0 +49 13 model.embedding_dim 1.0 +49 13 loss.margin 25.43462197055179 +49 13 loss.adversarial_temperature 0.880443087023565 +49 13 optimizer.lr 0.02379261654314638 +49 13 negative_sampler.num_negs_per_pos 60.0 +49 13 training.batch_size 1.0 +49 14 model.embedding_dim 2.0 +49 14 loss.margin 14.518634492845369 +49 14 loss.adversarial_temperature 0.6653118992407451 +49 14 optimizer.lr 0.006933102522727684 +49 14 negative_sampler.num_negs_per_pos 7.0 +49 14 training.batch_size 2.0 +49 15 model.embedding_dim 0.0 +49 15 loss.margin 27.852243665758547 +49 15 loss.adversarial_temperature 0.399926514594122 +49 15 optimizer.lr 0.012482281841112107 +49 15 negative_sampler.num_negs_per_pos 62.0 +49 15 training.batch_size 2.0 +49 16 model.embedding_dim 2.0 +49 16 loss.margin 17.359455201416505 +49 16 loss.adversarial_temperature 0.8068083178801826 +49 16 optimizer.lr 0.042168663940091396 +49 16 negative_sampler.num_negs_per_pos 46.0 +49 16 training.batch_size 1.0 +49 17 model.embedding_dim 1.0 +49 17 loss.margin 20.472671245400736 +49 17 loss.adversarial_temperature 0.5475218820888579 +49 17 optimizer.lr 0.060675025858288066 +49 17 negative_sampler.num_negs_per_pos 32.0 +49 17 training.batch_size 1.0 +49 18 model.embedding_dim 0.0 +49 18 loss.margin 17.463683281836236 +49 18 loss.adversarial_temperature 0.6922320846519661 +49 18 optimizer.lr 0.002660527225509239 +49 18 negative_sampler.num_negs_per_pos 68.0 +49 18 training.batch_size 0.0 +49 19 model.embedding_dim 0.0 +49 19 loss.margin 16.34964603665718 +49 19 loss.adversarial_temperature 0.32164572172388517 +49 19 optimizer.lr 0.01197889312562089 +49 19 negative_sampler.num_negs_per_pos 22.0 +49 19 training.batch_size 1.0 +49 20 model.embedding_dim 2.0 +49 20 loss.margin 4.3121670885173895 +49 20 loss.adversarial_temperature 0.119202489463789 +49 20 optimizer.lr 0.056467300405820486 +49 20 negative_sampler.num_negs_per_pos 28.0 +49 20 training.batch_size 0.0 +49 21 model.embedding_dim 2.0 +49 21 loss.margin 21.617492637232907 +49 21 loss.adversarial_temperature 0.44225250433632735 +49 21 optimizer.lr 0.0024782778012614765 +49 21 negative_sampler.num_negs_per_pos 36.0 +49 21 training.batch_size 1.0 +49 22 model.embedding_dim 1.0 +49 22 loss.margin 16.58339527376945 +49 22 loss.adversarial_temperature 0.10030021888873811 +49 22 optimizer.lr 0.002094639140554962 +49 22 negative_sampler.num_negs_per_pos 35.0 +49 22 training.batch_size 1.0 +49 23 model.embedding_dim 1.0 +49 23 loss.margin 24.187763266839823 +49 23 loss.adversarial_temperature 0.6163999349888997 +49 23 optimizer.lr 0.05490769872677637 +49 23 negative_sampler.num_negs_per_pos 37.0 +49 23 training.batch_size 2.0 +49 24 model.embedding_dim 1.0 +49 24 loss.margin 25.760962641852522 +49 24 loss.adversarial_temperature 0.4643104540789171 +49 24 optimizer.lr 0.09726091062655357 +49 24 negative_sampler.num_negs_per_pos 33.0 +49 24 training.batch_size 0.0 +49 25 model.embedding_dim 0.0 +49 25 loss.margin 3.0475282489897735 +49 25 loss.adversarial_temperature 0.25257194752044465 +49 25 optimizer.lr 0.00259125753489356 +49 25 negative_sampler.num_negs_per_pos 73.0 +49 25 training.batch_size 1.0 +49 26 model.embedding_dim 0.0 +49 26 loss.margin 24.29442146490774 +49 26 loss.adversarial_temperature 0.6286011621265021 +49 26 optimizer.lr 0.0010114861062293244 +49 26 negative_sampler.num_negs_per_pos 27.0 +49 26 training.batch_size 0.0 +49 27 model.embedding_dim 1.0 +49 27 loss.margin 21.546010302487307 +49 27 loss.adversarial_temperature 0.2060017721285549 +49 27 optimizer.lr 0.07902939318945666 +49 27 negative_sampler.num_negs_per_pos 41.0 +49 27 training.batch_size 0.0 +49 28 model.embedding_dim 2.0 +49 28 loss.margin 20.521283675523865 +49 28 loss.adversarial_temperature 0.10715729864135087 +49 28 optimizer.lr 0.00293344589983746 +49 28 negative_sampler.num_negs_per_pos 2.0 +49 28 training.batch_size 0.0 +49 29 model.embedding_dim 0.0 +49 29 loss.margin 7.184883210218184 +49 29 loss.adversarial_temperature 0.7452278837215035 +49 29 optimizer.lr 0.012426191838389122 +49 29 negative_sampler.num_negs_per_pos 58.0 +49 29 training.batch_size 0.0 +49 30 model.embedding_dim 0.0 +49 30 loss.margin 26.894732820793273 +49 30 loss.adversarial_temperature 0.9194873628725565 +49 30 optimizer.lr 0.0014302928216589383 +49 30 negative_sampler.num_negs_per_pos 14.0 +49 30 training.batch_size 2.0 +49 31 model.embedding_dim 1.0 +49 31 loss.margin 19.277710742308297 +49 31 loss.adversarial_temperature 0.24906587927248877 +49 31 optimizer.lr 0.015823236672806478 +49 31 negative_sampler.num_negs_per_pos 75.0 +49 31 training.batch_size 1.0 +49 32 model.embedding_dim 1.0 +49 32 loss.margin 28.48156051921939 +49 32 loss.adversarial_temperature 0.17421171032436308 +49 32 optimizer.lr 0.0013538617410730309 +49 32 negative_sampler.num_negs_per_pos 19.0 +49 32 training.batch_size 2.0 +49 33 model.embedding_dim 1.0 +49 33 loss.margin 7.7878995054468065 +49 33 loss.adversarial_temperature 0.42406723434022514 +49 33 optimizer.lr 0.028131259093044682 +49 33 negative_sampler.num_negs_per_pos 14.0 +49 33 training.batch_size 2.0 +49 34 model.embedding_dim 1.0 +49 34 loss.margin 26.29898640570945 +49 34 loss.adversarial_temperature 0.5651392694315978 +49 34 optimizer.lr 0.008602894531467599 +49 34 negative_sampler.num_negs_per_pos 87.0 +49 34 training.batch_size 1.0 +49 35 model.embedding_dim 0.0 +49 35 loss.margin 6.725770971592475 +49 35 loss.adversarial_temperature 0.9154864200287383 +49 35 optimizer.lr 0.015705845269723354 +49 35 negative_sampler.num_negs_per_pos 26.0 +49 35 training.batch_size 2.0 +49 36 model.embedding_dim 2.0 +49 36 loss.margin 2.6155955162612083 +49 36 loss.adversarial_temperature 0.40336396835241906 +49 36 optimizer.lr 0.009802707362121936 +49 36 negative_sampler.num_negs_per_pos 20.0 +49 36 training.batch_size 0.0 +49 37 model.embedding_dim 0.0 +49 37 loss.margin 22.569241406651788 +49 37 loss.adversarial_temperature 0.434574680099419 +49 37 optimizer.lr 0.017899244152271128 +49 37 negative_sampler.num_negs_per_pos 82.0 +49 37 training.batch_size 1.0 +49 38 model.embedding_dim 0.0 +49 38 loss.margin 24.38054334480649 +49 38 loss.adversarial_temperature 0.7446823171918668 +49 38 optimizer.lr 0.007949057972533898 +49 38 negative_sampler.num_negs_per_pos 19.0 +49 38 training.batch_size 2.0 +49 39 model.embedding_dim 0.0 +49 39 loss.margin 14.680061634381232 +49 39 loss.adversarial_temperature 0.1137359430634306 +49 39 optimizer.lr 0.006884531120521792 +49 39 negative_sampler.num_negs_per_pos 56.0 +49 39 training.batch_size 2.0 +49 40 model.embedding_dim 2.0 +49 40 loss.margin 16.099824337863872 +49 40 loss.adversarial_temperature 0.19375649283563232 +49 40 optimizer.lr 0.004448519602808172 +49 40 negative_sampler.num_negs_per_pos 40.0 +49 40 training.batch_size 2.0 +49 41 model.embedding_dim 1.0 +49 41 loss.margin 28.870240979447424 +49 41 loss.adversarial_temperature 0.9910828015768025 +49 41 optimizer.lr 0.007557670361710277 +49 41 negative_sampler.num_negs_per_pos 39.0 +49 41 training.batch_size 0.0 +49 42 model.embedding_dim 0.0 +49 42 loss.margin 16.41829647587397 +49 42 loss.adversarial_temperature 0.9753937437301873 +49 42 optimizer.lr 0.0032894581173986107 +49 42 negative_sampler.num_negs_per_pos 1.0 +49 42 training.batch_size 2.0 +49 43 model.embedding_dim 2.0 +49 43 loss.margin 22.989420257435107 +49 43 loss.adversarial_temperature 0.23202348434558606 +49 43 optimizer.lr 0.0037328442040937336 +49 43 negative_sampler.num_negs_per_pos 89.0 +49 43 training.batch_size 1.0 +49 44 model.embedding_dim 1.0 +49 44 loss.margin 2.8750627343051676 +49 44 loss.adversarial_temperature 0.46876073060077866 +49 44 optimizer.lr 0.013237727618023104 +49 44 negative_sampler.num_negs_per_pos 42.0 +49 44 training.batch_size 1.0 +49 45 model.embedding_dim 1.0 +49 45 loss.margin 29.19782519233597 +49 45 loss.adversarial_temperature 0.6713163525407639 +49 45 optimizer.lr 0.0016726562164620354 +49 45 negative_sampler.num_negs_per_pos 13.0 +49 45 training.batch_size 2.0 +49 46 model.embedding_dim 0.0 +49 46 loss.margin 8.559259012082 +49 46 loss.adversarial_temperature 0.6991006625096348 +49 46 optimizer.lr 0.006493443166350781 +49 46 negative_sampler.num_negs_per_pos 1.0 +49 46 training.batch_size 2.0 +49 47 model.embedding_dim 2.0 +49 47 loss.margin 8.366920890473237 +49 47 loss.adversarial_temperature 0.30124975594820963 +49 47 optimizer.lr 0.01116910420365285 +49 47 negative_sampler.num_negs_per_pos 32.0 +49 47 training.batch_size 2.0 +49 48 model.embedding_dim 1.0 +49 48 loss.margin 4.840622142712616 +49 48 loss.adversarial_temperature 0.8166307753653538 +49 48 optimizer.lr 0.013625352271771082 +49 48 negative_sampler.num_negs_per_pos 39.0 +49 48 training.batch_size 1.0 +49 49 model.embedding_dim 2.0 +49 49 loss.margin 27.53294873194548 +49 49 loss.adversarial_temperature 0.9009328728439736 +49 49 optimizer.lr 0.04360713968197314 +49 49 negative_sampler.num_negs_per_pos 93.0 +49 49 training.batch_size 1.0 +49 50 model.embedding_dim 0.0 +49 50 loss.margin 7.871806251820068 +49 50 loss.adversarial_temperature 0.3879455872301837 +49 50 optimizer.lr 0.05714895955847644 +49 50 negative_sampler.num_negs_per_pos 81.0 +49 50 training.batch_size 1.0 +49 51 model.embedding_dim 0.0 +49 51 loss.margin 13.680330261103762 +49 51 loss.adversarial_temperature 0.6709621454687272 +49 51 optimizer.lr 0.057323675864182175 +49 51 negative_sampler.num_negs_per_pos 27.0 +49 51 training.batch_size 1.0 +49 52 model.embedding_dim 0.0 +49 52 loss.margin 13.66449559483667 +49 52 loss.adversarial_temperature 0.7618645460056657 +49 52 optimizer.lr 0.01293647122145714 +49 52 negative_sampler.num_negs_per_pos 33.0 +49 52 training.batch_size 1.0 +49 53 model.embedding_dim 2.0 +49 53 loss.margin 5.7176422261719395 +49 53 loss.adversarial_temperature 0.6668023875018892 +49 53 optimizer.lr 0.03071998048096589 +49 53 negative_sampler.num_negs_per_pos 87.0 +49 53 training.batch_size 2.0 +49 54 model.embedding_dim 0.0 +49 54 loss.margin 21.746532331524442 +49 54 loss.adversarial_temperature 0.21666489634721306 +49 54 optimizer.lr 0.0011326862475780194 +49 54 negative_sampler.num_negs_per_pos 5.0 +49 54 training.batch_size 2.0 +49 55 model.embedding_dim 1.0 +49 55 loss.margin 26.298360913726682 +49 55 loss.adversarial_temperature 0.7839885954040648 +49 55 optimizer.lr 0.07519108320983531 +49 55 negative_sampler.num_negs_per_pos 94.0 +49 55 training.batch_size 0.0 +49 56 model.embedding_dim 1.0 +49 56 loss.margin 10.340883715206118 +49 56 loss.adversarial_temperature 0.14580661814727838 +49 56 optimizer.lr 0.0010618761562451457 +49 56 negative_sampler.num_negs_per_pos 46.0 +49 56 training.batch_size 2.0 +49 57 model.embedding_dim 1.0 +49 57 loss.margin 2.5698108722159074 +49 57 loss.adversarial_temperature 0.9676902041167194 +49 57 optimizer.lr 0.0017996606091567385 +49 57 negative_sampler.num_negs_per_pos 79.0 +49 57 training.batch_size 2.0 +49 58 model.embedding_dim 0.0 +49 58 loss.margin 23.174218500283406 +49 58 loss.adversarial_temperature 0.4605379583058012 +49 58 optimizer.lr 0.0010411872609032355 +49 58 negative_sampler.num_negs_per_pos 50.0 +49 58 training.batch_size 2.0 +49 59 model.embedding_dim 1.0 +49 59 loss.margin 2.4694015259652593 +49 59 loss.adversarial_temperature 0.42323579512944276 +49 59 optimizer.lr 0.0015062009975137056 +49 59 negative_sampler.num_negs_per_pos 32.0 +49 59 training.batch_size 0.0 +49 60 model.embedding_dim 1.0 +49 60 loss.margin 28.566333243349433 +49 60 loss.adversarial_temperature 0.17874142636424428 +49 60 optimizer.lr 0.00233286463871524 +49 60 negative_sampler.num_negs_per_pos 92.0 +49 60 training.batch_size 0.0 +49 61 model.embedding_dim 0.0 +49 61 loss.margin 5.643273444015074 +49 61 loss.adversarial_temperature 0.7934283629725251 +49 61 optimizer.lr 0.027384939375228422 +49 61 negative_sampler.num_negs_per_pos 68.0 +49 61 training.batch_size 0.0 +49 62 model.embedding_dim 2.0 +49 62 loss.margin 23.852440928843905 +49 62 loss.adversarial_temperature 0.3488619426814904 +49 62 optimizer.lr 0.0017546968564364118 +49 62 negative_sampler.num_negs_per_pos 69.0 +49 62 training.batch_size 2.0 +49 63 model.embedding_dim 1.0 +49 63 loss.margin 24.013033096303662 +49 63 loss.adversarial_temperature 0.30047180639938625 +49 63 optimizer.lr 0.019310643952251693 +49 63 negative_sampler.num_negs_per_pos 2.0 +49 63 training.batch_size 1.0 +49 64 model.embedding_dim 2.0 +49 64 loss.margin 4.736787988180898 +49 64 loss.adversarial_temperature 0.8038589603908212 +49 64 optimizer.lr 0.003141356986469524 +49 64 negative_sampler.num_negs_per_pos 32.0 +49 64 training.batch_size 2.0 +49 65 model.embedding_dim 2.0 +49 65 loss.margin 11.669024601798043 +49 65 loss.adversarial_temperature 0.9517634559034689 +49 65 optimizer.lr 0.04021626793050055 +49 65 negative_sampler.num_negs_per_pos 28.0 +49 65 training.batch_size 0.0 +49 66 model.embedding_dim 2.0 +49 66 loss.margin 12.80262609990965 +49 66 loss.adversarial_temperature 0.9449637862220049 +49 66 optimizer.lr 0.0182675762097941 +49 66 negative_sampler.num_negs_per_pos 6.0 +49 66 training.batch_size 2.0 +49 67 model.embedding_dim 1.0 +49 67 loss.margin 24.242829807400156 +49 67 loss.adversarial_temperature 0.3914568552206939 +49 67 optimizer.lr 0.006041001418706999 +49 67 negative_sampler.num_negs_per_pos 35.0 +49 67 training.batch_size 0.0 +49 68 model.embedding_dim 1.0 +49 68 loss.margin 1.364826632863064 +49 68 loss.adversarial_temperature 0.8612906068580446 +49 68 optimizer.lr 0.010044738064696603 +49 68 negative_sampler.num_negs_per_pos 58.0 +49 68 training.batch_size 0.0 +49 69 model.embedding_dim 1.0 +49 69 loss.margin 25.662419457210977 +49 69 loss.adversarial_temperature 0.7181694280029313 +49 69 optimizer.lr 0.08902521907984155 +49 69 negative_sampler.num_negs_per_pos 9.0 +49 69 training.batch_size 2.0 +49 70 model.embedding_dim 1.0 +49 70 loss.margin 18.570966400022385 +49 70 loss.adversarial_temperature 0.10196292203838042 +49 70 optimizer.lr 0.002720913536516527 +49 70 negative_sampler.num_negs_per_pos 42.0 +49 70 training.batch_size 0.0 +49 71 model.embedding_dim 2.0 +49 71 loss.margin 18.33522568973207 +49 71 loss.adversarial_temperature 0.6089241888169531 +49 71 optimizer.lr 0.03188115481053446 +49 71 negative_sampler.num_negs_per_pos 14.0 +49 71 training.batch_size 1.0 +49 72 model.embedding_dim 0.0 +49 72 loss.margin 26.8934772711632 +49 72 loss.adversarial_temperature 0.9814453570958498 +49 72 optimizer.lr 0.01330000737960308 +49 72 negative_sampler.num_negs_per_pos 79.0 +49 72 training.batch_size 2.0 +49 73 model.embedding_dim 1.0 +49 73 loss.margin 24.486455180494676 +49 73 loss.adversarial_temperature 0.10458301209716842 +49 73 optimizer.lr 0.042172634255003526 +49 73 negative_sampler.num_negs_per_pos 33.0 +49 73 training.batch_size 2.0 +49 74 model.embedding_dim 0.0 +49 74 loss.margin 26.769076021085795 +49 74 loss.adversarial_temperature 0.31418324823274724 +49 74 optimizer.lr 0.09938867345572265 +49 74 negative_sampler.num_negs_per_pos 13.0 +49 74 training.batch_size 1.0 +49 75 model.embedding_dim 1.0 +49 75 loss.margin 1.8146619592346438 +49 75 loss.adversarial_temperature 0.6851486300741993 +49 75 optimizer.lr 0.04428089455076425 +49 75 negative_sampler.num_negs_per_pos 0.0 +49 75 training.batch_size 1.0 +49 76 model.embedding_dim 1.0 +49 76 loss.margin 2.768480745079546 +49 76 loss.adversarial_temperature 0.8839090220508737 +49 76 optimizer.lr 0.00876619488079304 +49 76 negative_sampler.num_negs_per_pos 10.0 +49 76 training.batch_size 0.0 +49 77 model.embedding_dim 2.0 +49 77 loss.margin 2.8420875888956614 +49 77 loss.adversarial_temperature 0.22151731638869948 +49 77 optimizer.lr 0.08039254593759163 +49 77 negative_sampler.num_negs_per_pos 40.0 +49 77 training.batch_size 0.0 +49 78 model.embedding_dim 0.0 +49 78 loss.margin 15.935403707010602 +49 78 loss.adversarial_temperature 0.4811295248109306 +49 78 optimizer.lr 0.0016632354059789526 +49 78 negative_sampler.num_negs_per_pos 14.0 +49 78 training.batch_size 0.0 +49 79 model.embedding_dim 2.0 +49 79 loss.margin 22.268207299588372 +49 79 loss.adversarial_temperature 0.434397962282883 +49 79 optimizer.lr 0.002264007032340834 +49 79 negative_sampler.num_negs_per_pos 41.0 +49 79 training.batch_size 2.0 +49 80 model.embedding_dim 0.0 +49 80 loss.margin 6.136994411546729 +49 80 loss.adversarial_temperature 0.6087985105894297 +49 80 optimizer.lr 0.01084390989652785 +49 80 negative_sampler.num_negs_per_pos 86.0 +49 80 training.batch_size 0.0 +49 81 model.embedding_dim 1.0 +49 81 loss.margin 13.83636183877193 +49 81 loss.adversarial_temperature 0.4825709729907457 +49 81 optimizer.lr 0.01944643600841711 +49 81 negative_sampler.num_negs_per_pos 35.0 +49 81 training.batch_size 0.0 +49 82 model.embedding_dim 2.0 +49 82 loss.margin 18.874066362520256 +49 82 loss.adversarial_temperature 0.5500987662033201 +49 82 optimizer.lr 0.001530205676706071 +49 82 negative_sampler.num_negs_per_pos 44.0 +49 82 training.batch_size 1.0 +49 83 model.embedding_dim 0.0 +49 83 loss.margin 16.874493065144335 +49 83 loss.adversarial_temperature 0.8426793887389644 +49 83 optimizer.lr 0.01265215701758257 +49 83 negative_sampler.num_negs_per_pos 90.0 +49 83 training.batch_size 2.0 +49 84 model.embedding_dim 2.0 +49 84 loss.margin 27.0114519684627 +49 84 loss.adversarial_temperature 0.6147851802027414 +49 84 optimizer.lr 0.00807603303718132 +49 84 negative_sampler.num_negs_per_pos 71.0 +49 84 training.batch_size 2.0 +49 85 model.embedding_dim 2.0 +49 85 loss.margin 8.17109272310805 +49 85 loss.adversarial_temperature 0.14402612922360597 +49 85 optimizer.lr 0.002077729417381831 +49 85 negative_sampler.num_negs_per_pos 26.0 +49 85 training.batch_size 0.0 +49 86 model.embedding_dim 0.0 +49 86 loss.margin 4.161072724473544 +49 86 loss.adversarial_temperature 0.8728446749015587 +49 86 optimizer.lr 0.0141995771253288 +49 86 negative_sampler.num_negs_per_pos 50.0 +49 86 training.batch_size 1.0 +49 87 model.embedding_dim 0.0 +49 87 loss.margin 10.204464912243193 +49 87 loss.adversarial_temperature 0.9207746193744938 +49 87 optimizer.lr 0.07103371645054095 +49 87 negative_sampler.num_negs_per_pos 75.0 +49 87 training.batch_size 1.0 +49 88 model.embedding_dim 1.0 +49 88 loss.margin 3.7787952463565073 +49 88 loss.adversarial_temperature 0.21503672443131128 +49 88 optimizer.lr 0.014632116968698525 +49 88 negative_sampler.num_negs_per_pos 97.0 +49 88 training.batch_size 2.0 +49 89 model.embedding_dim 1.0 +49 89 loss.margin 9.392220090686306 +49 89 loss.adversarial_temperature 0.6955527719193726 +49 89 optimizer.lr 0.06284911163280434 +49 89 negative_sampler.num_negs_per_pos 14.0 +49 89 training.batch_size 0.0 +49 90 model.embedding_dim 2.0 +49 90 loss.margin 20.25060444392563 +49 90 loss.adversarial_temperature 0.8147045617231123 +49 90 optimizer.lr 0.0032199359395074186 +49 90 negative_sampler.num_negs_per_pos 26.0 +49 90 training.batch_size 0.0 +49 91 model.embedding_dim 2.0 +49 91 loss.margin 7.524488963160879 +49 91 loss.adversarial_temperature 0.1996864795353774 +49 91 optimizer.lr 0.06694323736649681 +49 91 negative_sampler.num_negs_per_pos 49.0 +49 91 training.batch_size 1.0 +49 92 model.embedding_dim 1.0 +49 92 loss.margin 1.8525478842497185 +49 92 loss.adversarial_temperature 0.5262560425973022 +49 92 optimizer.lr 0.011020548283234767 +49 92 negative_sampler.num_negs_per_pos 82.0 +49 92 training.batch_size 2.0 +49 93 model.embedding_dim 0.0 +49 93 loss.margin 20.364844387034758 +49 93 loss.adversarial_temperature 0.5808535144292823 +49 93 optimizer.lr 0.07558231650544275 +49 93 negative_sampler.num_negs_per_pos 23.0 +49 93 training.batch_size 1.0 +49 94 model.embedding_dim 0.0 +49 94 loss.margin 12.20076944101862 +49 94 loss.adversarial_temperature 0.90335830840116 +49 94 optimizer.lr 0.06506649668014021 +49 94 negative_sampler.num_negs_per_pos 29.0 +49 94 training.batch_size 1.0 +49 95 model.embedding_dim 2.0 +49 95 loss.margin 27.77286162720493 +49 95 loss.adversarial_temperature 0.738862129236743 +49 95 optimizer.lr 0.0032079400170678383 +49 95 negative_sampler.num_negs_per_pos 17.0 +49 95 training.batch_size 1.0 +49 96 model.embedding_dim 1.0 +49 96 loss.margin 26.939425635071327 +49 96 loss.adversarial_temperature 0.32640226279566553 +49 96 optimizer.lr 0.08818102056958903 +49 96 negative_sampler.num_negs_per_pos 10.0 +49 96 training.batch_size 0.0 +49 97 model.embedding_dim 1.0 +49 97 loss.margin 16.914860056766717 +49 97 loss.adversarial_temperature 0.730274912262417 +49 97 optimizer.lr 0.013594370864555696 +49 97 negative_sampler.num_negs_per_pos 42.0 +49 97 training.batch_size 2.0 +49 98 model.embedding_dim 2.0 +49 98 loss.margin 15.469481947166788 +49 98 loss.adversarial_temperature 0.5006889527140231 +49 98 optimizer.lr 0.0012708167809691038 +49 98 negative_sampler.num_negs_per_pos 20.0 +49 98 training.batch_size 0.0 +49 99 model.embedding_dim 1.0 +49 99 loss.margin 9.862901314413103 +49 99 loss.adversarial_temperature 0.4156543622168687 +49 99 optimizer.lr 0.001749378906621673 +49 99 negative_sampler.num_negs_per_pos 80.0 +49 99 training.batch_size 2.0 +49 1 dataset """wn18rr""" +49 1 model """complex""" +49 1 loss """nssa""" +49 1 regularizer """no""" +49 1 optimizer """adam""" +49 1 training_loop """owa""" +49 1 negative_sampler """basic""" +49 1 evaluator """rankbased""" +49 2 dataset """wn18rr""" +49 2 model """complex""" +49 2 loss """nssa""" +49 2 regularizer """no""" +49 2 optimizer """adam""" +49 2 training_loop """owa""" +49 2 negative_sampler """basic""" +49 2 evaluator """rankbased""" +49 3 dataset """wn18rr""" +49 3 model """complex""" +49 3 loss """nssa""" +49 3 regularizer """no""" +49 3 optimizer """adam""" +49 3 training_loop """owa""" +49 3 negative_sampler """basic""" +49 3 evaluator """rankbased""" +49 4 dataset """wn18rr""" +49 4 model """complex""" +49 4 loss """nssa""" +49 4 regularizer """no""" +49 4 optimizer """adam""" +49 4 training_loop """owa""" +49 4 negative_sampler """basic""" +49 4 evaluator """rankbased""" +49 5 dataset """wn18rr""" +49 5 model """complex""" +49 5 loss """nssa""" +49 5 regularizer """no""" +49 5 optimizer """adam""" +49 5 training_loop """owa""" +49 5 negative_sampler """basic""" +49 5 evaluator """rankbased""" +49 6 dataset """wn18rr""" +49 6 model """complex""" +49 6 loss """nssa""" +49 6 regularizer """no""" +49 6 optimizer """adam""" +49 6 training_loop """owa""" +49 6 negative_sampler """basic""" +49 6 evaluator """rankbased""" +49 7 dataset """wn18rr""" +49 7 model """complex""" +49 7 loss """nssa""" +49 7 regularizer """no""" +49 7 optimizer """adam""" +49 7 training_loop """owa""" +49 7 negative_sampler """basic""" +49 7 evaluator """rankbased""" +49 8 dataset """wn18rr""" +49 8 model """complex""" +49 8 loss """nssa""" +49 8 regularizer """no""" +49 8 optimizer """adam""" +49 8 training_loop """owa""" +49 8 negative_sampler """basic""" +49 8 evaluator """rankbased""" +49 9 dataset """wn18rr""" +49 9 model """complex""" +49 9 loss """nssa""" +49 9 regularizer """no""" +49 9 optimizer """adam""" +49 9 training_loop """owa""" +49 9 negative_sampler """basic""" +49 9 evaluator """rankbased""" +49 10 dataset """wn18rr""" +49 10 model """complex""" +49 10 loss """nssa""" +49 10 regularizer """no""" +49 10 optimizer """adam""" +49 10 training_loop """owa""" +49 10 negative_sampler """basic""" +49 10 evaluator """rankbased""" +49 11 dataset """wn18rr""" +49 11 model """complex""" +49 11 loss """nssa""" +49 11 regularizer """no""" +49 11 optimizer """adam""" +49 11 training_loop """owa""" +49 11 negative_sampler """basic""" +49 11 evaluator """rankbased""" +49 12 dataset """wn18rr""" +49 12 model """complex""" +49 12 loss """nssa""" +49 12 regularizer """no""" +49 12 optimizer """adam""" +49 12 training_loop """owa""" +49 12 negative_sampler """basic""" +49 12 evaluator """rankbased""" +49 13 dataset """wn18rr""" +49 13 model """complex""" +49 13 loss """nssa""" +49 13 regularizer """no""" +49 13 optimizer """adam""" +49 13 training_loop """owa""" +49 13 negative_sampler """basic""" +49 13 evaluator """rankbased""" +49 14 dataset """wn18rr""" +49 14 model """complex""" +49 14 loss """nssa""" +49 14 regularizer """no""" +49 14 optimizer """adam""" +49 14 training_loop """owa""" +49 14 negative_sampler """basic""" +49 14 evaluator """rankbased""" +49 15 dataset """wn18rr""" +49 15 model """complex""" +49 15 loss """nssa""" +49 15 regularizer """no""" +49 15 optimizer """adam""" +49 15 training_loop """owa""" +49 15 negative_sampler """basic""" +49 15 evaluator """rankbased""" +49 16 dataset """wn18rr""" +49 16 model """complex""" +49 16 loss """nssa""" +49 16 regularizer """no""" +49 16 optimizer """adam""" +49 16 training_loop """owa""" +49 16 negative_sampler """basic""" +49 16 evaluator """rankbased""" +49 17 dataset """wn18rr""" +49 17 model """complex""" +49 17 loss """nssa""" +49 17 regularizer """no""" +49 17 optimizer """adam""" +49 17 training_loop """owa""" +49 17 negative_sampler """basic""" +49 17 evaluator """rankbased""" +49 18 dataset """wn18rr""" +49 18 model """complex""" +49 18 loss """nssa""" +49 18 regularizer """no""" +49 18 optimizer """adam""" +49 18 training_loop """owa""" +49 18 negative_sampler """basic""" +49 18 evaluator """rankbased""" +49 19 dataset """wn18rr""" +49 19 model """complex""" +49 19 loss """nssa""" +49 19 regularizer """no""" +49 19 optimizer """adam""" +49 19 training_loop """owa""" +49 19 negative_sampler """basic""" +49 19 evaluator """rankbased""" +49 20 dataset """wn18rr""" +49 20 model """complex""" +49 20 loss """nssa""" +49 20 regularizer """no""" +49 20 optimizer """adam""" +49 20 training_loop """owa""" +49 20 negative_sampler """basic""" +49 20 evaluator """rankbased""" +49 21 dataset """wn18rr""" +49 21 model """complex""" +49 21 loss """nssa""" +49 21 regularizer """no""" +49 21 optimizer """adam""" +49 21 training_loop """owa""" +49 21 negative_sampler """basic""" +49 21 evaluator """rankbased""" +49 22 dataset """wn18rr""" +49 22 model """complex""" +49 22 loss """nssa""" +49 22 regularizer """no""" +49 22 optimizer """adam""" +49 22 training_loop """owa""" +49 22 negative_sampler """basic""" +49 22 evaluator """rankbased""" +49 23 dataset """wn18rr""" +49 23 model """complex""" +49 23 loss """nssa""" +49 23 regularizer """no""" +49 23 optimizer """adam""" +49 23 training_loop """owa""" +49 23 negative_sampler """basic""" +49 23 evaluator """rankbased""" +49 24 dataset """wn18rr""" +49 24 model """complex""" +49 24 loss """nssa""" +49 24 regularizer """no""" +49 24 optimizer """adam""" +49 24 training_loop """owa""" +49 24 negative_sampler """basic""" +49 24 evaluator """rankbased""" +49 25 dataset """wn18rr""" +49 25 model """complex""" +49 25 loss """nssa""" +49 25 regularizer """no""" +49 25 optimizer """adam""" +49 25 training_loop """owa""" +49 25 negative_sampler """basic""" +49 25 evaluator """rankbased""" +49 26 dataset """wn18rr""" +49 26 model """complex""" +49 26 loss """nssa""" +49 26 regularizer """no""" +49 26 optimizer """adam""" +49 26 training_loop """owa""" +49 26 negative_sampler """basic""" +49 26 evaluator """rankbased""" +49 27 dataset """wn18rr""" +49 27 model """complex""" +49 27 loss """nssa""" +49 27 regularizer """no""" +49 27 optimizer """adam""" +49 27 training_loop """owa""" +49 27 negative_sampler """basic""" +49 27 evaluator """rankbased""" +49 28 dataset """wn18rr""" +49 28 model """complex""" +49 28 loss """nssa""" +49 28 regularizer """no""" +49 28 optimizer """adam""" +49 28 training_loop """owa""" +49 28 negative_sampler """basic""" +49 28 evaluator """rankbased""" +49 29 dataset """wn18rr""" +49 29 model """complex""" +49 29 loss """nssa""" +49 29 regularizer """no""" +49 29 optimizer """adam""" +49 29 training_loop """owa""" +49 29 negative_sampler """basic""" +49 29 evaluator """rankbased""" +49 30 dataset """wn18rr""" +49 30 model """complex""" +49 30 loss """nssa""" +49 30 regularizer """no""" +49 30 optimizer """adam""" +49 30 training_loop """owa""" +49 30 negative_sampler """basic""" +49 30 evaluator """rankbased""" +49 31 dataset """wn18rr""" +49 31 model """complex""" +49 31 loss """nssa""" +49 31 regularizer """no""" +49 31 optimizer """adam""" +49 31 training_loop """owa""" +49 31 negative_sampler """basic""" +49 31 evaluator """rankbased""" +49 32 dataset """wn18rr""" +49 32 model """complex""" +49 32 loss """nssa""" +49 32 regularizer """no""" +49 32 optimizer """adam""" +49 32 training_loop """owa""" +49 32 negative_sampler """basic""" +49 32 evaluator """rankbased""" +49 33 dataset """wn18rr""" +49 33 model """complex""" +49 33 loss """nssa""" +49 33 regularizer """no""" +49 33 optimizer """adam""" +49 33 training_loop """owa""" +49 33 negative_sampler """basic""" +49 33 evaluator """rankbased""" +49 34 dataset """wn18rr""" +49 34 model """complex""" +49 34 loss """nssa""" +49 34 regularizer """no""" +49 34 optimizer """adam""" +49 34 training_loop """owa""" +49 34 negative_sampler """basic""" +49 34 evaluator """rankbased""" +49 35 dataset """wn18rr""" +49 35 model """complex""" +49 35 loss """nssa""" +49 35 regularizer """no""" +49 35 optimizer """adam""" +49 35 training_loop """owa""" +49 35 negative_sampler """basic""" +49 35 evaluator """rankbased""" +49 36 dataset """wn18rr""" +49 36 model """complex""" +49 36 loss """nssa""" +49 36 regularizer """no""" +49 36 optimizer """adam""" +49 36 training_loop """owa""" +49 36 negative_sampler """basic""" +49 36 evaluator """rankbased""" +49 37 dataset """wn18rr""" +49 37 model """complex""" +49 37 loss """nssa""" +49 37 regularizer """no""" +49 37 optimizer """adam""" +49 37 training_loop """owa""" +49 37 negative_sampler """basic""" +49 37 evaluator """rankbased""" +49 38 dataset """wn18rr""" +49 38 model """complex""" +49 38 loss """nssa""" +49 38 regularizer """no""" +49 38 optimizer """adam""" +49 38 training_loop """owa""" +49 38 negative_sampler """basic""" +49 38 evaluator """rankbased""" +49 39 dataset """wn18rr""" +49 39 model """complex""" +49 39 loss """nssa""" +49 39 regularizer """no""" +49 39 optimizer """adam""" +49 39 training_loop """owa""" +49 39 negative_sampler """basic""" +49 39 evaluator """rankbased""" +49 40 dataset """wn18rr""" +49 40 model """complex""" +49 40 loss """nssa""" +49 40 regularizer """no""" +49 40 optimizer """adam""" +49 40 training_loop """owa""" +49 40 negative_sampler """basic""" +49 40 evaluator """rankbased""" +49 41 dataset """wn18rr""" +49 41 model """complex""" +49 41 loss """nssa""" +49 41 regularizer """no""" +49 41 optimizer """adam""" +49 41 training_loop """owa""" +49 41 negative_sampler """basic""" +49 41 evaluator """rankbased""" +49 42 dataset """wn18rr""" +49 42 model """complex""" +49 42 loss """nssa""" +49 42 regularizer """no""" +49 42 optimizer """adam""" +49 42 training_loop """owa""" +49 42 negative_sampler """basic""" +49 42 evaluator """rankbased""" +49 43 dataset """wn18rr""" +49 43 model """complex""" +49 43 loss """nssa""" +49 43 regularizer """no""" +49 43 optimizer """adam""" +49 43 training_loop """owa""" +49 43 negative_sampler """basic""" +49 43 evaluator """rankbased""" +49 44 dataset """wn18rr""" +49 44 model """complex""" +49 44 loss """nssa""" +49 44 regularizer """no""" +49 44 optimizer """adam""" +49 44 training_loop """owa""" +49 44 negative_sampler """basic""" +49 44 evaluator """rankbased""" +49 45 dataset """wn18rr""" +49 45 model """complex""" +49 45 loss """nssa""" +49 45 regularizer """no""" +49 45 optimizer """adam""" +49 45 training_loop """owa""" +49 45 negative_sampler """basic""" +49 45 evaluator """rankbased""" +49 46 dataset """wn18rr""" +49 46 model """complex""" +49 46 loss """nssa""" +49 46 regularizer """no""" +49 46 optimizer """adam""" +49 46 training_loop """owa""" +49 46 negative_sampler """basic""" +49 46 evaluator """rankbased""" +49 47 dataset """wn18rr""" +49 47 model """complex""" +49 47 loss """nssa""" +49 47 regularizer """no""" +49 47 optimizer """adam""" +49 47 training_loop """owa""" +49 47 negative_sampler """basic""" +49 47 evaluator """rankbased""" +49 48 dataset """wn18rr""" +49 48 model """complex""" +49 48 loss """nssa""" +49 48 regularizer """no""" +49 48 optimizer """adam""" +49 48 training_loop """owa""" +49 48 negative_sampler """basic""" +49 48 evaluator """rankbased""" +49 49 dataset """wn18rr""" +49 49 model """complex""" +49 49 loss """nssa""" +49 49 regularizer """no""" +49 49 optimizer """adam""" +49 49 training_loop """owa""" +49 49 negative_sampler """basic""" +49 49 evaluator """rankbased""" +49 50 dataset """wn18rr""" +49 50 model """complex""" +49 50 loss """nssa""" +49 50 regularizer """no""" +49 50 optimizer """adam""" +49 50 training_loop """owa""" +49 50 negative_sampler """basic""" +49 50 evaluator """rankbased""" +49 51 dataset """wn18rr""" +49 51 model """complex""" +49 51 loss """nssa""" +49 51 regularizer """no""" +49 51 optimizer """adam""" +49 51 training_loop """owa""" +49 51 negative_sampler """basic""" +49 51 evaluator """rankbased""" +49 52 dataset """wn18rr""" +49 52 model """complex""" +49 52 loss """nssa""" +49 52 regularizer """no""" +49 52 optimizer """adam""" +49 52 training_loop """owa""" +49 52 negative_sampler """basic""" +49 52 evaluator """rankbased""" +49 53 dataset """wn18rr""" +49 53 model """complex""" +49 53 loss """nssa""" +49 53 regularizer """no""" +49 53 optimizer """adam""" +49 53 training_loop """owa""" +49 53 negative_sampler """basic""" +49 53 evaluator """rankbased""" +49 54 dataset """wn18rr""" +49 54 model """complex""" +49 54 loss """nssa""" +49 54 regularizer """no""" +49 54 optimizer """adam""" +49 54 training_loop """owa""" +49 54 negative_sampler """basic""" +49 54 evaluator """rankbased""" +49 55 dataset """wn18rr""" +49 55 model """complex""" +49 55 loss """nssa""" +49 55 regularizer """no""" +49 55 optimizer """adam""" +49 55 training_loop """owa""" +49 55 negative_sampler """basic""" +49 55 evaluator """rankbased""" +49 56 dataset """wn18rr""" +49 56 model """complex""" +49 56 loss """nssa""" +49 56 regularizer """no""" +49 56 optimizer """adam""" +49 56 training_loop """owa""" +49 56 negative_sampler """basic""" +49 56 evaluator """rankbased""" +49 57 dataset """wn18rr""" +49 57 model """complex""" +49 57 loss """nssa""" +49 57 regularizer """no""" +49 57 optimizer """adam""" +49 57 training_loop """owa""" +49 57 negative_sampler """basic""" +49 57 evaluator """rankbased""" +49 58 dataset """wn18rr""" +49 58 model """complex""" +49 58 loss """nssa""" +49 58 regularizer """no""" +49 58 optimizer """adam""" +49 58 training_loop """owa""" +49 58 negative_sampler """basic""" +49 58 evaluator """rankbased""" +49 59 dataset """wn18rr""" +49 59 model """complex""" +49 59 loss """nssa""" +49 59 regularizer """no""" +49 59 optimizer """adam""" +49 59 training_loop """owa""" +49 59 negative_sampler """basic""" +49 59 evaluator """rankbased""" +49 60 dataset """wn18rr""" +49 60 model """complex""" +49 60 loss """nssa""" +49 60 regularizer """no""" +49 60 optimizer """adam""" +49 60 training_loop """owa""" +49 60 negative_sampler """basic""" +49 60 evaluator """rankbased""" +49 61 dataset """wn18rr""" +49 61 model """complex""" +49 61 loss """nssa""" +49 61 regularizer """no""" +49 61 optimizer """adam""" +49 61 training_loop """owa""" +49 61 negative_sampler """basic""" +49 61 evaluator """rankbased""" +49 62 dataset """wn18rr""" +49 62 model """complex""" +49 62 loss """nssa""" +49 62 regularizer """no""" +49 62 optimizer """adam""" +49 62 training_loop """owa""" +49 62 negative_sampler """basic""" +49 62 evaluator """rankbased""" +49 63 dataset """wn18rr""" +49 63 model """complex""" +49 63 loss """nssa""" +49 63 regularizer """no""" +49 63 optimizer """adam""" +49 63 training_loop """owa""" +49 63 negative_sampler """basic""" +49 63 evaluator """rankbased""" +49 64 dataset """wn18rr""" +49 64 model """complex""" +49 64 loss """nssa""" +49 64 regularizer """no""" +49 64 optimizer """adam""" +49 64 training_loop """owa""" +49 64 negative_sampler """basic""" +49 64 evaluator """rankbased""" +49 65 dataset """wn18rr""" +49 65 model """complex""" +49 65 loss """nssa""" +49 65 regularizer """no""" +49 65 optimizer """adam""" +49 65 training_loop """owa""" +49 65 negative_sampler """basic""" +49 65 evaluator """rankbased""" +49 66 dataset """wn18rr""" +49 66 model """complex""" +49 66 loss """nssa""" +49 66 regularizer """no""" +49 66 optimizer """adam""" +49 66 training_loop """owa""" +49 66 negative_sampler """basic""" +49 66 evaluator """rankbased""" +49 67 dataset """wn18rr""" +49 67 model """complex""" +49 67 loss """nssa""" +49 67 regularizer """no""" +49 67 optimizer """adam""" +49 67 training_loop """owa""" +49 67 negative_sampler """basic""" +49 67 evaluator """rankbased""" +49 68 dataset """wn18rr""" +49 68 model """complex""" +49 68 loss """nssa""" +49 68 regularizer """no""" +49 68 optimizer """adam""" +49 68 training_loop """owa""" +49 68 negative_sampler """basic""" +49 68 evaluator """rankbased""" +49 69 dataset """wn18rr""" +49 69 model """complex""" +49 69 loss """nssa""" +49 69 regularizer """no""" +49 69 optimizer """adam""" +49 69 training_loop """owa""" +49 69 negative_sampler """basic""" +49 69 evaluator """rankbased""" +49 70 dataset """wn18rr""" +49 70 model """complex""" +49 70 loss """nssa""" +49 70 regularizer """no""" +49 70 optimizer """adam""" +49 70 training_loop """owa""" +49 70 negative_sampler """basic""" +49 70 evaluator """rankbased""" +49 71 dataset """wn18rr""" +49 71 model """complex""" +49 71 loss """nssa""" +49 71 regularizer """no""" +49 71 optimizer """adam""" +49 71 training_loop """owa""" +49 71 negative_sampler """basic""" +49 71 evaluator """rankbased""" +49 72 dataset """wn18rr""" +49 72 model """complex""" +49 72 loss """nssa""" +49 72 regularizer """no""" +49 72 optimizer """adam""" +49 72 training_loop """owa""" +49 72 negative_sampler """basic""" +49 72 evaluator """rankbased""" +49 73 dataset """wn18rr""" +49 73 model """complex""" +49 73 loss """nssa""" +49 73 regularizer """no""" +49 73 optimizer """adam""" +49 73 training_loop """owa""" +49 73 negative_sampler """basic""" +49 73 evaluator """rankbased""" +49 74 dataset """wn18rr""" +49 74 model """complex""" +49 74 loss """nssa""" +49 74 regularizer """no""" +49 74 optimizer """adam""" +49 74 training_loop """owa""" +49 74 negative_sampler """basic""" +49 74 evaluator """rankbased""" +49 75 dataset """wn18rr""" +49 75 model """complex""" +49 75 loss """nssa""" +49 75 regularizer """no""" +49 75 optimizer """adam""" +49 75 training_loop """owa""" +49 75 negative_sampler """basic""" +49 75 evaluator """rankbased""" +49 76 dataset """wn18rr""" +49 76 model """complex""" +49 76 loss """nssa""" +49 76 regularizer """no""" +49 76 optimizer """adam""" +49 76 training_loop """owa""" +49 76 negative_sampler """basic""" +49 76 evaluator """rankbased""" +49 77 dataset """wn18rr""" +49 77 model """complex""" +49 77 loss """nssa""" +49 77 regularizer """no""" +49 77 optimizer """adam""" +49 77 training_loop """owa""" +49 77 negative_sampler """basic""" +49 77 evaluator """rankbased""" +49 78 dataset """wn18rr""" +49 78 model """complex""" +49 78 loss """nssa""" +49 78 regularizer """no""" +49 78 optimizer """adam""" +49 78 training_loop """owa""" +49 78 negative_sampler """basic""" +49 78 evaluator """rankbased""" +49 79 dataset """wn18rr""" +49 79 model """complex""" +49 79 loss """nssa""" +49 79 regularizer """no""" +49 79 optimizer """adam""" +49 79 training_loop """owa""" +49 79 negative_sampler """basic""" +49 79 evaluator """rankbased""" +49 80 dataset """wn18rr""" +49 80 model """complex""" +49 80 loss """nssa""" +49 80 regularizer """no""" +49 80 optimizer """adam""" +49 80 training_loop """owa""" +49 80 negative_sampler """basic""" +49 80 evaluator """rankbased""" +49 81 dataset """wn18rr""" +49 81 model """complex""" +49 81 loss """nssa""" +49 81 regularizer """no""" +49 81 optimizer """adam""" +49 81 training_loop """owa""" +49 81 negative_sampler """basic""" +49 81 evaluator """rankbased""" +49 82 dataset """wn18rr""" +49 82 model """complex""" +49 82 loss """nssa""" +49 82 regularizer """no""" +49 82 optimizer """adam""" +49 82 training_loop """owa""" +49 82 negative_sampler """basic""" +49 82 evaluator """rankbased""" +49 83 dataset """wn18rr""" +49 83 model """complex""" +49 83 loss """nssa""" +49 83 regularizer """no""" +49 83 optimizer """adam""" +49 83 training_loop """owa""" +49 83 negative_sampler """basic""" +49 83 evaluator """rankbased""" +49 84 dataset """wn18rr""" +49 84 model """complex""" +49 84 loss """nssa""" +49 84 regularizer """no""" +49 84 optimizer """adam""" +49 84 training_loop """owa""" +49 84 negative_sampler """basic""" +49 84 evaluator """rankbased""" +49 85 dataset """wn18rr""" +49 85 model """complex""" +49 85 loss """nssa""" +49 85 regularizer """no""" +49 85 optimizer """adam""" +49 85 training_loop """owa""" +49 85 negative_sampler """basic""" +49 85 evaluator """rankbased""" +49 86 dataset """wn18rr""" +49 86 model """complex""" +49 86 loss """nssa""" +49 86 regularizer """no""" +49 86 optimizer """adam""" +49 86 training_loop """owa""" +49 86 negative_sampler """basic""" +49 86 evaluator """rankbased""" +49 87 dataset """wn18rr""" +49 87 model """complex""" +49 87 loss """nssa""" +49 87 regularizer """no""" +49 87 optimizer """adam""" +49 87 training_loop """owa""" +49 87 negative_sampler """basic""" +49 87 evaluator """rankbased""" +49 88 dataset """wn18rr""" +49 88 model """complex""" +49 88 loss """nssa""" +49 88 regularizer """no""" +49 88 optimizer """adam""" +49 88 training_loop """owa""" +49 88 negative_sampler """basic""" +49 88 evaluator """rankbased""" +49 89 dataset """wn18rr""" +49 89 model """complex""" +49 89 loss """nssa""" +49 89 regularizer """no""" +49 89 optimizer """adam""" +49 89 training_loop """owa""" +49 89 negative_sampler """basic""" +49 89 evaluator """rankbased""" +49 90 dataset """wn18rr""" +49 90 model """complex""" +49 90 loss """nssa""" +49 90 regularizer """no""" +49 90 optimizer """adam""" +49 90 training_loop """owa""" +49 90 negative_sampler """basic""" +49 90 evaluator """rankbased""" +49 91 dataset """wn18rr""" +49 91 model """complex""" +49 91 loss """nssa""" +49 91 regularizer """no""" +49 91 optimizer """adam""" +49 91 training_loop """owa""" +49 91 negative_sampler """basic""" +49 91 evaluator """rankbased""" +49 92 dataset """wn18rr""" +49 92 model """complex""" +49 92 loss """nssa""" +49 92 regularizer """no""" +49 92 optimizer """adam""" +49 92 training_loop """owa""" +49 92 negative_sampler """basic""" +49 92 evaluator """rankbased""" +49 93 dataset """wn18rr""" +49 93 model """complex""" +49 93 loss """nssa""" +49 93 regularizer """no""" +49 93 optimizer """adam""" +49 93 training_loop """owa""" +49 93 negative_sampler """basic""" +49 93 evaluator """rankbased""" +49 94 dataset """wn18rr""" +49 94 model """complex""" +49 94 loss """nssa""" +49 94 regularizer """no""" +49 94 optimizer """adam""" +49 94 training_loop """owa""" +49 94 negative_sampler """basic""" +49 94 evaluator """rankbased""" +49 95 dataset """wn18rr""" +49 95 model """complex""" +49 95 loss """nssa""" +49 95 regularizer """no""" +49 95 optimizer """adam""" +49 95 training_loop """owa""" +49 95 negative_sampler """basic""" +49 95 evaluator """rankbased""" +49 96 dataset """wn18rr""" +49 96 model """complex""" +49 96 loss """nssa""" +49 96 regularizer """no""" +49 96 optimizer """adam""" +49 96 training_loop """owa""" +49 96 negative_sampler """basic""" +49 96 evaluator """rankbased""" +49 97 dataset """wn18rr""" +49 97 model """complex""" +49 97 loss """nssa""" +49 97 regularizer """no""" +49 97 optimizer """adam""" +49 97 training_loop """owa""" +49 97 negative_sampler """basic""" +49 97 evaluator """rankbased""" +49 98 dataset """wn18rr""" +49 98 model """complex""" +49 98 loss """nssa""" +49 98 regularizer """no""" +49 98 optimizer """adam""" +49 98 training_loop """owa""" +49 98 negative_sampler """basic""" +49 98 evaluator """rankbased""" +49 99 dataset """wn18rr""" +49 99 model """complex""" +49 99 loss """nssa""" +49 99 regularizer """no""" +49 99 optimizer """adam""" +49 99 training_loop """owa""" +49 99 negative_sampler """basic""" +49 99 evaluator """rankbased""" +50 1 model.embedding_dim 2.0 +50 1 optimizer.lr 0.08731887029547021 +50 1 training.batch_size 2.0 +50 1 training.label_smoothing 0.7236536833082239 +50 1 dataset """wn18rr""" +50 1 model """complex""" +50 1 loss """crossentropy""" +50 1 regularizer """no""" +50 1 optimizer """adam""" +50 1 training_loop """lcwa""" +50 1 evaluator """rankbased""" +51 1 model.embedding_dim 0.0 +51 1 optimizer.lr 0.0028776037260098523 +51 1 training.batch_size 0.0 +51 1 training.label_smoothing 0.0014687916552482796 +51 2 model.embedding_dim 1.0 +51 2 optimizer.lr 0.0018284636926060019 +51 2 training.batch_size 2.0 +51 2 training.label_smoothing 0.6628146051390186 +51 3 model.embedding_dim 2.0 +51 3 optimizer.lr 0.07249612413800714 +51 3 training.batch_size 2.0 +51 3 training.label_smoothing 0.9194267437558326 +51 4 model.embedding_dim 0.0 +51 4 optimizer.lr 0.004640791021008287 +51 4 training.batch_size 2.0 +51 4 training.label_smoothing 0.006162735902952362 +51 5 model.embedding_dim 0.0 +51 5 optimizer.lr 0.0050029495315466 +51 5 training.batch_size 1.0 +51 5 training.label_smoothing 0.007597555066691428 +51 6 model.embedding_dim 2.0 +51 6 optimizer.lr 0.0034033595089203042 +51 6 training.batch_size 0.0 +51 6 training.label_smoothing 0.006467366065328523 +51 1 dataset """wn18rr""" +51 1 model """complex""" +51 1 loss """crossentropy""" +51 1 regularizer """no""" +51 1 optimizer """adam""" +51 1 training_loop """lcwa""" +51 1 evaluator """rankbased""" +51 2 dataset """wn18rr""" +51 2 model """complex""" +51 2 loss """crossentropy""" +51 2 regularizer """no""" +51 2 optimizer """adam""" +51 2 training_loop """lcwa""" +51 2 evaluator """rankbased""" +51 3 dataset """wn18rr""" +51 3 model """complex""" +51 3 loss """crossentropy""" +51 3 regularizer """no""" +51 3 optimizer """adam""" +51 3 training_loop """lcwa""" +51 3 evaluator """rankbased""" +51 4 dataset """wn18rr""" +51 4 model """complex""" +51 4 loss """crossentropy""" +51 4 regularizer """no""" +51 4 optimizer """adam""" +51 4 training_loop """lcwa""" +51 4 evaluator """rankbased""" +51 5 dataset """wn18rr""" +51 5 model """complex""" +51 5 loss """crossentropy""" +51 5 regularizer """no""" +51 5 optimizer """adam""" +51 5 training_loop """lcwa""" +51 5 evaluator """rankbased""" +51 6 dataset """wn18rr""" +51 6 model """complex""" +51 6 loss """crossentropy""" +51 6 regularizer """no""" +51 6 optimizer """adam""" +51 6 training_loop """lcwa""" +51 6 evaluator """rankbased""" +52 1 model.embedding_dim 2.0 +52 1 optimizer.lr 0.018575366802414724 +52 1 training.batch_size 2.0 +52 1 training.label_smoothing 0.0015281858809029587 +52 2 model.embedding_dim 2.0 +52 2 optimizer.lr 0.021008013195704677 +52 2 training.batch_size 0.0 +52 2 training.label_smoothing 0.2947522391589959 +52 1 dataset """wn18rr""" +52 1 model """complex""" +52 1 loss """bceaftersigmoid""" +52 1 regularizer """no""" +52 1 optimizer """adam""" +52 1 training_loop """lcwa""" +52 1 evaluator """rankbased""" +52 2 dataset """wn18rr""" +52 2 model """complex""" +52 2 loss """bceaftersigmoid""" +52 2 regularizer """no""" +52 2 optimizer """adam""" +52 2 training_loop """lcwa""" +52 2 evaluator """rankbased""" +53 1 model.embedding_dim 2.0 +53 1 optimizer.lr 0.047451960465699286 +53 1 training.batch_size 2.0 +53 1 training.label_smoothing 0.061095100318566097 +53 2 model.embedding_dim 0.0 +53 2 optimizer.lr 0.039638831579037144 +53 2 training.batch_size 0.0 +53 2 training.label_smoothing 0.005372926669386395 +53 3 model.embedding_dim 1.0 +53 3 optimizer.lr 0.036557890087490844 +53 3 training.batch_size 1.0 +53 3 training.label_smoothing 0.7729519282712272 +53 4 model.embedding_dim 0.0 +53 4 optimizer.lr 0.013006235862613316 +53 4 training.batch_size 2.0 +53 4 training.label_smoothing 0.021651469540395395 +53 5 model.embedding_dim 0.0 +53 5 optimizer.lr 0.0010957666182327872 +53 5 training.batch_size 0.0 +53 5 training.label_smoothing 0.12112199015824969 +53 1 dataset """wn18rr""" +53 1 model """complex""" +53 1 loss """bceaftersigmoid""" +53 1 regularizer """no""" +53 1 optimizer """adam""" +53 1 training_loop """lcwa""" +53 1 evaluator """rankbased""" +53 2 dataset """wn18rr""" +53 2 model """complex""" +53 2 loss """bceaftersigmoid""" +53 2 regularizer """no""" +53 2 optimizer """adam""" +53 2 training_loop """lcwa""" +53 2 evaluator """rankbased""" +53 3 dataset """wn18rr""" +53 3 model """complex""" +53 3 loss """bceaftersigmoid""" +53 3 regularizer """no""" +53 3 optimizer """adam""" +53 3 training_loop """lcwa""" +53 3 evaluator """rankbased""" +53 4 dataset """wn18rr""" +53 4 model """complex""" +53 4 loss """bceaftersigmoid""" +53 4 regularizer """no""" +53 4 optimizer """adam""" +53 4 training_loop """lcwa""" +53 4 evaluator """rankbased""" +53 5 dataset """wn18rr""" +53 5 model """complex""" +53 5 loss """bceaftersigmoid""" +53 5 regularizer """no""" +53 5 optimizer """adam""" +53 5 training_loop """lcwa""" +53 5 evaluator """rankbased""" +54 1 model.embedding_dim 1.0 +54 1 optimizer.lr 0.035378111658505 +54 1 training.batch_size 2.0 +54 1 training.label_smoothing 0.2391565200887023 +54 2 model.embedding_dim 1.0 +54 2 optimizer.lr 0.03611894443736477 +54 2 training.batch_size 1.0 +54 2 training.label_smoothing 0.027159830912608634 +54 3 model.embedding_dim 2.0 +54 3 optimizer.lr 0.015184448688283177 +54 3 training.batch_size 0.0 +54 3 training.label_smoothing 0.08419478220848278 +54 1 dataset """wn18rr""" +54 1 model """complex""" +54 1 loss """softplus""" +54 1 regularizer """no""" +54 1 optimizer """adam""" +54 1 training_loop """lcwa""" +54 1 evaluator """rankbased""" +54 2 dataset """wn18rr""" +54 2 model """complex""" +54 2 loss """softplus""" +54 2 regularizer """no""" +54 2 optimizer """adam""" +54 2 training_loop """lcwa""" +54 2 evaluator """rankbased""" +54 3 dataset """wn18rr""" +54 3 model """complex""" +54 3 loss """softplus""" +54 3 regularizer """no""" +54 3 optimizer """adam""" +54 3 training_loop """lcwa""" +54 3 evaluator """rankbased""" +55 1 model.embedding_dim 0.0 +55 1 optimizer.lr 0.003986965119764869 +55 1 training.batch_size 1.0 +55 1 training.label_smoothing 0.17418575306998021 +55 2 model.embedding_dim 2.0 +55 2 optimizer.lr 0.001112522140246614 +55 2 training.batch_size 0.0 +55 2 training.label_smoothing 0.024973892303643132 +55 3 model.embedding_dim 0.0 +55 3 optimizer.lr 0.058137967632454796 +55 3 training.batch_size 0.0 +55 3 training.label_smoothing 0.2764169196255947 +55 4 model.embedding_dim 1.0 +55 4 optimizer.lr 0.001238973081542888 +55 4 training.batch_size 1.0 +55 4 training.label_smoothing 0.01046840866967714 +55 5 model.embedding_dim 1.0 +55 5 optimizer.lr 0.0020099416116508262 +55 5 training.batch_size 0.0 +55 5 training.label_smoothing 0.017413146793711497 +55 6 model.embedding_dim 2.0 +55 6 optimizer.lr 0.0018716067030737093 +55 6 training.batch_size 0.0 +55 6 training.label_smoothing 0.001879756521891304 +55 1 dataset """wn18rr""" +55 1 model """complex""" +55 1 loss """softplus""" +55 1 regularizer """no""" +55 1 optimizer """adam""" +55 1 training_loop """lcwa""" +55 1 evaluator """rankbased""" +55 2 dataset """wn18rr""" +55 2 model """complex""" +55 2 loss """softplus""" +55 2 regularizer """no""" +55 2 optimizer """adam""" +55 2 training_loop """lcwa""" +55 2 evaluator """rankbased""" +55 3 dataset """wn18rr""" +55 3 model """complex""" +55 3 loss """softplus""" +55 3 regularizer """no""" +55 3 optimizer """adam""" +55 3 training_loop """lcwa""" +55 3 evaluator """rankbased""" +55 4 dataset """wn18rr""" +55 4 model """complex""" +55 4 loss """softplus""" +55 4 regularizer """no""" +55 4 optimizer """adam""" +55 4 training_loop """lcwa""" +55 4 evaluator """rankbased""" +55 5 dataset """wn18rr""" +55 5 model """complex""" +55 5 loss """softplus""" +55 5 regularizer """no""" +55 5 optimizer """adam""" +55 5 training_loop """lcwa""" +55 5 evaluator """rankbased""" +55 6 dataset """wn18rr""" +55 6 model """complex""" +55 6 loss """softplus""" +55 6 regularizer """no""" +55 6 optimizer """adam""" +55 6 training_loop """lcwa""" +55 6 evaluator """rankbased""" +56 1 model.embedding_dim 2.0 +56 1 optimizer.lr 0.0027170383084818947 +56 1 negative_sampler.num_negs_per_pos 48.0 +56 1 training.batch_size 3.0 +56 2 model.embedding_dim 0.0 +56 2 optimizer.lr 0.06288360890904206 +56 2 negative_sampler.num_negs_per_pos 47.0 +56 2 training.batch_size 3.0 +56 3 model.embedding_dim 2.0 +56 3 optimizer.lr 0.03910723460822675 +56 3 negative_sampler.num_negs_per_pos 21.0 +56 3 training.batch_size 1.0 +56 4 model.embedding_dim 1.0 +56 4 optimizer.lr 0.0033644019606552545 +56 4 negative_sampler.num_negs_per_pos 38.0 +56 4 training.batch_size 2.0 +56 5 model.embedding_dim 1.0 +56 5 optimizer.lr 0.026555500592761128 +56 5 negative_sampler.num_negs_per_pos 14.0 +56 5 training.batch_size 3.0 +56 6 model.embedding_dim 2.0 +56 6 optimizer.lr 0.009943215473455367 +56 6 negative_sampler.num_negs_per_pos 26.0 +56 6 training.batch_size 0.0 +56 7 model.embedding_dim 2.0 +56 7 optimizer.lr 0.020670005103560343 +56 7 negative_sampler.num_negs_per_pos 14.0 +56 7 training.batch_size 1.0 +56 8 model.embedding_dim 1.0 +56 8 optimizer.lr 0.01649470338223239 +56 8 negative_sampler.num_negs_per_pos 49.0 +56 8 training.batch_size 1.0 +56 9 model.embedding_dim 1.0 +56 9 optimizer.lr 0.011428914809776666 +56 9 negative_sampler.num_negs_per_pos 41.0 +56 9 training.batch_size 0.0 +56 1 dataset """yago310""" +56 1 model """complex""" +56 1 loss """bceaftersigmoid""" +56 1 regularizer """no""" +56 1 optimizer """adam""" +56 1 training_loop """owa""" +56 1 negative_sampler """basic""" +56 1 evaluator """rankbased""" +56 2 dataset """yago310""" +56 2 model """complex""" +56 2 loss """bceaftersigmoid""" +56 2 regularizer """no""" +56 2 optimizer """adam""" +56 2 training_loop """owa""" +56 2 negative_sampler """basic""" +56 2 evaluator """rankbased""" +56 3 dataset """yago310""" +56 3 model """complex""" +56 3 loss """bceaftersigmoid""" +56 3 regularizer """no""" +56 3 optimizer """adam""" +56 3 training_loop """owa""" +56 3 negative_sampler """basic""" +56 3 evaluator """rankbased""" +56 4 dataset """yago310""" +56 4 model """complex""" +56 4 loss """bceaftersigmoid""" +56 4 regularizer """no""" +56 4 optimizer """adam""" +56 4 training_loop """owa""" +56 4 negative_sampler """basic""" +56 4 evaluator """rankbased""" +56 5 dataset """yago310""" +56 5 model """complex""" +56 5 loss """bceaftersigmoid""" +56 5 regularizer """no""" +56 5 optimizer """adam""" +56 5 training_loop """owa""" +56 5 negative_sampler """basic""" +56 5 evaluator """rankbased""" +56 6 dataset """yago310""" +56 6 model """complex""" +56 6 loss """bceaftersigmoid""" +56 6 regularizer """no""" +56 6 optimizer """adam""" +56 6 training_loop """owa""" +56 6 negative_sampler """basic""" +56 6 evaluator """rankbased""" +56 7 dataset """yago310""" +56 7 model """complex""" +56 7 loss """bceaftersigmoid""" +56 7 regularizer """no""" +56 7 optimizer """adam""" +56 7 training_loop """owa""" +56 7 negative_sampler """basic""" +56 7 evaluator """rankbased""" +56 8 dataset """yago310""" +56 8 model """complex""" +56 8 loss """bceaftersigmoid""" +56 8 regularizer """no""" +56 8 optimizer """adam""" +56 8 training_loop """owa""" +56 8 negative_sampler """basic""" +56 8 evaluator """rankbased""" +56 9 dataset """yago310""" +56 9 model """complex""" +56 9 loss """bceaftersigmoid""" +56 9 regularizer """no""" +56 9 optimizer """adam""" +56 9 training_loop """owa""" +56 9 negative_sampler """basic""" +56 9 evaluator """rankbased""" +57 1 model.embedding_dim 2.0 +57 1 optimizer.lr 0.01641927072099744 +57 1 negative_sampler.num_negs_per_pos 18.0 +57 1 training.batch_size 1.0 +57 2 model.embedding_dim 2.0 +57 2 optimizer.lr 0.017802537983477716 +57 2 negative_sampler.num_negs_per_pos 18.0 +57 2 training.batch_size 2.0 +57 3 model.embedding_dim 1.0 +57 3 optimizer.lr 0.07609937283792857 +57 3 negative_sampler.num_negs_per_pos 2.0 +57 3 training.batch_size 2.0 +57 4 model.embedding_dim 1.0 +57 4 optimizer.lr 0.05028908176518546 +57 4 negative_sampler.num_negs_per_pos 8.0 +57 4 training.batch_size 0.0 +57 5 model.embedding_dim 1.0 +57 5 optimizer.lr 0.003675509584389205 +57 5 negative_sampler.num_negs_per_pos 23.0 +57 5 training.batch_size 0.0 +57 6 model.embedding_dim 0.0 +57 6 optimizer.lr 0.0064947764329682306 +57 6 negative_sampler.num_negs_per_pos 1.0 +57 6 training.batch_size 3.0 +57 7 model.embedding_dim 2.0 +57 7 optimizer.lr 0.04028728030765366 +57 7 negative_sampler.num_negs_per_pos 48.0 +57 7 training.batch_size 1.0 +57 8 model.embedding_dim 1.0 +57 8 optimizer.lr 0.00449790146917148 +57 8 negative_sampler.num_negs_per_pos 0.0 +57 8 training.batch_size 2.0 +57 9 model.embedding_dim 0.0 +57 9 optimizer.lr 0.010624343909807503 +57 9 negative_sampler.num_negs_per_pos 5.0 +57 9 training.batch_size 3.0 +57 10 model.embedding_dim 2.0 +57 10 optimizer.lr 0.05588611857489831 +57 10 negative_sampler.num_negs_per_pos 16.0 +57 10 training.batch_size 2.0 +57 11 model.embedding_dim 1.0 +57 11 optimizer.lr 0.003584914581178544 +57 11 negative_sampler.num_negs_per_pos 30.0 +57 11 training.batch_size 1.0 +57 12 model.embedding_dim 1.0 +57 12 optimizer.lr 0.07580203791974492 +57 12 negative_sampler.num_negs_per_pos 26.0 +57 12 training.batch_size 3.0 +57 13 model.embedding_dim 1.0 +57 13 optimizer.lr 0.07012142191354595 +57 13 negative_sampler.num_negs_per_pos 42.0 +57 13 training.batch_size 3.0 +57 14 model.embedding_dim 2.0 +57 14 optimizer.lr 0.048068420486291716 +57 14 negative_sampler.num_negs_per_pos 9.0 +57 14 training.batch_size 3.0 +57 15 model.embedding_dim 0.0 +57 15 optimizer.lr 0.00477563154990791 +57 15 negative_sampler.num_negs_per_pos 31.0 +57 15 training.batch_size 3.0 +57 16 model.embedding_dim 0.0 +57 16 optimizer.lr 0.0031396292612271377 +57 16 negative_sampler.num_negs_per_pos 40.0 +57 16 training.batch_size 0.0 +57 17 model.embedding_dim 2.0 +57 17 optimizer.lr 0.010662306520861989 +57 17 negative_sampler.num_negs_per_pos 12.0 +57 17 training.batch_size 2.0 +57 1 dataset """yago310""" +57 1 model """complex""" +57 1 loss """bceaftersigmoid""" +57 1 regularizer """no""" +57 1 optimizer """adam""" +57 1 training_loop """owa""" +57 1 negative_sampler """basic""" +57 1 evaluator """rankbased""" +57 2 dataset """yago310""" +57 2 model """complex""" +57 2 loss """bceaftersigmoid""" +57 2 regularizer """no""" +57 2 optimizer """adam""" +57 2 training_loop """owa""" +57 2 negative_sampler """basic""" +57 2 evaluator """rankbased""" +57 3 dataset """yago310""" +57 3 model """complex""" +57 3 loss """bceaftersigmoid""" +57 3 regularizer """no""" +57 3 optimizer """adam""" +57 3 training_loop """owa""" +57 3 negative_sampler """basic""" +57 3 evaluator """rankbased""" +57 4 dataset """yago310""" +57 4 model """complex""" +57 4 loss """bceaftersigmoid""" +57 4 regularizer """no""" +57 4 optimizer """adam""" +57 4 training_loop """owa""" +57 4 negative_sampler """basic""" +57 4 evaluator """rankbased""" +57 5 dataset """yago310""" +57 5 model """complex""" +57 5 loss """bceaftersigmoid""" +57 5 regularizer """no""" +57 5 optimizer """adam""" +57 5 training_loop """owa""" +57 5 negative_sampler """basic""" +57 5 evaluator """rankbased""" +57 6 dataset """yago310""" +57 6 model """complex""" +57 6 loss """bceaftersigmoid""" +57 6 regularizer """no""" +57 6 optimizer """adam""" +57 6 training_loop """owa""" +57 6 negative_sampler """basic""" +57 6 evaluator """rankbased""" +57 7 dataset """yago310""" +57 7 model """complex""" +57 7 loss """bceaftersigmoid""" +57 7 regularizer """no""" +57 7 optimizer """adam""" +57 7 training_loop """owa""" +57 7 negative_sampler """basic""" +57 7 evaluator """rankbased""" +57 8 dataset """yago310""" +57 8 model """complex""" +57 8 loss """bceaftersigmoid""" +57 8 regularizer """no""" +57 8 optimizer """adam""" +57 8 training_loop """owa""" +57 8 negative_sampler """basic""" +57 8 evaluator """rankbased""" +57 9 dataset """yago310""" +57 9 model """complex""" +57 9 loss """bceaftersigmoid""" +57 9 regularizer """no""" +57 9 optimizer """adam""" +57 9 training_loop """owa""" +57 9 negative_sampler """basic""" +57 9 evaluator """rankbased""" +57 10 dataset """yago310""" +57 10 model """complex""" +57 10 loss """bceaftersigmoid""" +57 10 regularizer """no""" +57 10 optimizer """adam""" +57 10 training_loop """owa""" +57 10 negative_sampler """basic""" +57 10 evaluator """rankbased""" +57 11 dataset """yago310""" +57 11 model """complex""" +57 11 loss """bceaftersigmoid""" +57 11 regularizer """no""" +57 11 optimizer """adam""" +57 11 training_loop """owa""" +57 11 negative_sampler """basic""" +57 11 evaluator """rankbased""" +57 12 dataset """yago310""" +57 12 model """complex""" +57 12 loss """bceaftersigmoid""" +57 12 regularizer """no""" +57 12 optimizer """adam""" +57 12 training_loop """owa""" +57 12 negative_sampler """basic""" +57 12 evaluator """rankbased""" +57 13 dataset """yago310""" +57 13 model """complex""" +57 13 loss """bceaftersigmoid""" +57 13 regularizer """no""" +57 13 optimizer """adam""" +57 13 training_loop """owa""" +57 13 negative_sampler """basic""" +57 13 evaluator """rankbased""" +57 14 dataset """yago310""" +57 14 model """complex""" +57 14 loss """bceaftersigmoid""" +57 14 regularizer """no""" +57 14 optimizer """adam""" +57 14 training_loop """owa""" +57 14 negative_sampler """basic""" +57 14 evaluator """rankbased""" +57 15 dataset """yago310""" +57 15 model """complex""" +57 15 loss """bceaftersigmoid""" +57 15 regularizer """no""" +57 15 optimizer """adam""" +57 15 training_loop """owa""" +57 15 negative_sampler """basic""" +57 15 evaluator """rankbased""" +57 16 dataset """yago310""" +57 16 model """complex""" +57 16 loss """bceaftersigmoid""" +57 16 regularizer """no""" +57 16 optimizer """adam""" +57 16 training_loop """owa""" +57 16 negative_sampler """basic""" +57 16 evaluator """rankbased""" +57 17 dataset """yago310""" +57 17 model """complex""" +57 17 loss """bceaftersigmoid""" +57 17 regularizer """no""" +57 17 optimizer """adam""" +57 17 training_loop """owa""" +57 17 negative_sampler """basic""" +57 17 evaluator """rankbased""" +58 1 model.embedding_dim 2.0 +58 1 optimizer.lr 0.0020239037344342045 +58 1 negative_sampler.num_negs_per_pos 12.0 +58 1 training.batch_size 1.0 +58 2 model.embedding_dim 0.0 +58 2 optimizer.lr 0.0031203314193578744 +58 2 negative_sampler.num_negs_per_pos 46.0 +58 2 training.batch_size 1.0 +58 3 model.embedding_dim 1.0 +58 3 optimizer.lr 0.004411596732657202 +58 3 negative_sampler.num_negs_per_pos 17.0 +58 3 training.batch_size 3.0 +58 4 model.embedding_dim 0.0 +58 4 optimizer.lr 0.013721174663177124 +58 4 negative_sampler.num_negs_per_pos 11.0 +58 4 training.batch_size 0.0 +58 5 model.embedding_dim 1.0 +58 5 optimizer.lr 0.032757964249224975 +58 5 negative_sampler.num_negs_per_pos 37.0 +58 5 training.batch_size 1.0 +58 6 model.embedding_dim 0.0 +58 6 optimizer.lr 0.03429517380512484 +58 6 negative_sampler.num_negs_per_pos 34.0 +58 6 training.batch_size 2.0 +58 7 model.embedding_dim 1.0 +58 7 optimizer.lr 0.003938378957438002 +58 7 negative_sampler.num_negs_per_pos 47.0 +58 7 training.batch_size 3.0 +58 8 model.embedding_dim 1.0 +58 8 optimizer.lr 0.0054039651424262584 +58 8 negative_sampler.num_negs_per_pos 41.0 +58 8 training.batch_size 1.0 +58 9 model.embedding_dim 1.0 +58 9 optimizer.lr 0.0026063208025757945 +58 9 negative_sampler.num_negs_per_pos 43.0 +58 9 training.batch_size 0.0 +58 10 model.embedding_dim 1.0 +58 10 optimizer.lr 0.08640152784789773 +58 10 negative_sampler.num_negs_per_pos 32.0 +58 10 training.batch_size 1.0 +58 1 dataset """yago310""" +58 1 model """complex""" +58 1 loss """softplus""" +58 1 regularizer """no""" +58 1 optimizer """adam""" +58 1 training_loop """owa""" +58 1 negative_sampler """basic""" +58 1 evaluator """rankbased""" +58 2 dataset """yago310""" +58 2 model """complex""" +58 2 loss """softplus""" +58 2 regularizer """no""" +58 2 optimizer """adam""" +58 2 training_loop """owa""" +58 2 negative_sampler """basic""" +58 2 evaluator """rankbased""" +58 3 dataset """yago310""" +58 3 model """complex""" +58 3 loss """softplus""" +58 3 regularizer """no""" +58 3 optimizer """adam""" +58 3 training_loop """owa""" +58 3 negative_sampler """basic""" +58 3 evaluator """rankbased""" +58 4 dataset """yago310""" +58 4 model """complex""" +58 4 loss """softplus""" +58 4 regularizer """no""" +58 4 optimizer """adam""" +58 4 training_loop """owa""" +58 4 negative_sampler """basic""" +58 4 evaluator """rankbased""" +58 5 dataset """yago310""" +58 5 model """complex""" +58 5 loss """softplus""" +58 5 regularizer """no""" +58 5 optimizer """adam""" +58 5 training_loop """owa""" +58 5 negative_sampler """basic""" +58 5 evaluator """rankbased""" +58 6 dataset """yago310""" +58 6 model """complex""" +58 6 loss """softplus""" +58 6 regularizer """no""" +58 6 optimizer """adam""" +58 6 training_loop """owa""" +58 6 negative_sampler """basic""" +58 6 evaluator """rankbased""" +58 7 dataset """yago310""" +58 7 model """complex""" +58 7 loss """softplus""" +58 7 regularizer """no""" +58 7 optimizer """adam""" +58 7 training_loop """owa""" +58 7 negative_sampler """basic""" +58 7 evaluator """rankbased""" +58 8 dataset """yago310""" +58 8 model """complex""" +58 8 loss """softplus""" +58 8 regularizer """no""" +58 8 optimizer """adam""" +58 8 training_loop """owa""" +58 8 negative_sampler """basic""" +58 8 evaluator """rankbased""" +58 9 dataset """yago310""" +58 9 model """complex""" +58 9 loss """softplus""" +58 9 regularizer """no""" +58 9 optimizer """adam""" +58 9 training_loop """owa""" +58 9 negative_sampler """basic""" +58 9 evaluator """rankbased""" +58 10 dataset """yago310""" +58 10 model """complex""" +58 10 loss """softplus""" +58 10 regularizer """no""" +58 10 optimizer """adam""" +58 10 training_loop """owa""" +58 10 negative_sampler """basic""" +58 10 evaluator """rankbased""" +59 1 model.embedding_dim 0.0 +59 1 optimizer.lr 0.07544429567277701 +59 1 negative_sampler.num_negs_per_pos 36.0 +59 1 training.batch_size 3.0 +59 2 model.embedding_dim 0.0 +59 2 optimizer.lr 0.028204289344615177 +59 2 negative_sampler.num_negs_per_pos 42.0 +59 2 training.batch_size 3.0 +59 3 model.embedding_dim 0.0 +59 3 optimizer.lr 0.0018015705532904835 +59 3 negative_sampler.num_negs_per_pos 15.0 +59 3 training.batch_size 2.0 +59 4 model.embedding_dim 2.0 +59 4 optimizer.lr 0.001723135381847608 +59 4 negative_sampler.num_negs_per_pos 31.0 +59 4 training.batch_size 3.0 +59 5 model.embedding_dim 1.0 +59 5 optimizer.lr 0.00364405033368572 +59 5 negative_sampler.num_negs_per_pos 20.0 +59 5 training.batch_size 2.0 +59 6 model.embedding_dim 0.0 +59 6 optimizer.lr 0.05893128478561078 +59 6 negative_sampler.num_negs_per_pos 2.0 +59 6 training.batch_size 3.0 +59 7 model.embedding_dim 2.0 +59 7 optimizer.lr 0.008695580700563706 +59 7 negative_sampler.num_negs_per_pos 27.0 +59 7 training.batch_size 3.0 +59 8 model.embedding_dim 0.0 +59 8 optimizer.lr 0.031709229902761996 +59 8 negative_sampler.num_negs_per_pos 12.0 +59 8 training.batch_size 1.0 +59 9 model.embedding_dim 1.0 +59 9 optimizer.lr 0.013661815075348588 +59 9 negative_sampler.num_negs_per_pos 10.0 +59 9 training.batch_size 0.0 +59 10 model.embedding_dim 0.0 +59 10 optimizer.lr 0.024141283315655203 +59 10 negative_sampler.num_negs_per_pos 9.0 +59 10 training.batch_size 2.0 +59 11 model.embedding_dim 0.0 +59 11 optimizer.lr 0.004777071329389744 +59 11 negative_sampler.num_negs_per_pos 12.0 +59 11 training.batch_size 0.0 +59 12 model.embedding_dim 2.0 +59 12 optimizer.lr 0.052194877382902 +59 12 negative_sampler.num_negs_per_pos 30.0 +59 12 training.batch_size 2.0 +59 13 model.embedding_dim 1.0 +59 13 optimizer.lr 0.004800412512763575 +59 13 negative_sampler.num_negs_per_pos 19.0 +59 13 training.batch_size 0.0 +59 1 dataset """yago310""" +59 1 model """complex""" +59 1 loss """softplus""" +59 1 regularizer """no""" +59 1 optimizer """adam""" +59 1 training_loop """owa""" +59 1 negative_sampler """basic""" +59 1 evaluator """rankbased""" +59 2 dataset """yago310""" +59 2 model """complex""" +59 2 loss """softplus""" +59 2 regularizer """no""" +59 2 optimizer """adam""" +59 2 training_loop """owa""" +59 2 negative_sampler """basic""" +59 2 evaluator """rankbased""" +59 3 dataset """yago310""" +59 3 model """complex""" +59 3 loss """softplus""" +59 3 regularizer """no""" +59 3 optimizer """adam""" +59 3 training_loop """owa""" +59 3 negative_sampler """basic""" +59 3 evaluator """rankbased""" +59 4 dataset """yago310""" +59 4 model """complex""" +59 4 loss """softplus""" +59 4 regularizer """no""" +59 4 optimizer """adam""" +59 4 training_loop """owa""" +59 4 negative_sampler """basic""" +59 4 evaluator """rankbased""" +59 5 dataset """yago310""" +59 5 model """complex""" +59 5 loss """softplus""" +59 5 regularizer """no""" +59 5 optimizer """adam""" +59 5 training_loop """owa""" +59 5 negative_sampler """basic""" +59 5 evaluator """rankbased""" +59 6 dataset """yago310""" +59 6 model """complex""" +59 6 loss """softplus""" +59 6 regularizer """no""" +59 6 optimizer """adam""" +59 6 training_loop """owa""" +59 6 negative_sampler """basic""" +59 6 evaluator """rankbased""" +59 7 dataset """yago310""" +59 7 model """complex""" +59 7 loss """softplus""" +59 7 regularizer """no""" +59 7 optimizer """adam""" +59 7 training_loop """owa""" +59 7 negative_sampler """basic""" +59 7 evaluator """rankbased""" +59 8 dataset """yago310""" +59 8 model """complex""" +59 8 loss """softplus""" +59 8 regularizer """no""" +59 8 optimizer """adam""" +59 8 training_loop """owa""" +59 8 negative_sampler """basic""" +59 8 evaluator """rankbased""" +59 9 dataset """yago310""" +59 9 model """complex""" +59 9 loss """softplus""" +59 9 regularizer """no""" +59 9 optimizer """adam""" +59 9 training_loop """owa""" +59 9 negative_sampler """basic""" +59 9 evaluator """rankbased""" +59 10 dataset """yago310""" +59 10 model """complex""" +59 10 loss """softplus""" +59 10 regularizer """no""" +59 10 optimizer """adam""" +59 10 training_loop """owa""" +59 10 negative_sampler """basic""" +59 10 evaluator """rankbased""" +59 11 dataset """yago310""" +59 11 model """complex""" +59 11 loss """softplus""" +59 11 regularizer """no""" +59 11 optimizer """adam""" +59 11 training_loop """owa""" +59 11 negative_sampler """basic""" +59 11 evaluator """rankbased""" +59 12 dataset """yago310""" +59 12 model """complex""" +59 12 loss """softplus""" +59 12 regularizer """no""" +59 12 optimizer """adam""" +59 12 training_loop """owa""" +59 12 negative_sampler """basic""" +59 12 evaluator """rankbased""" +59 13 dataset """yago310""" +59 13 model """complex""" +59 13 loss """softplus""" +59 13 regularizer """no""" +59 13 optimizer """adam""" +59 13 training_loop """owa""" +59 13 negative_sampler """basic""" +59 13 evaluator """rankbased""" +60 1 model.embedding_dim 0.0 +60 1 loss.margin 1.445332659037184 +60 1 loss.adversarial_temperature 0.3926542299694883 +60 1 optimizer.lr 0.0050725876581611444 +60 1 negative_sampler.num_negs_per_pos 7.0 +60 1 training.batch_size 2.0 +60 2 model.embedding_dim 0.0 +60 2 loss.margin 10.905903302529603 +60 2 loss.adversarial_temperature 0.21885625317156499 +60 2 optimizer.lr 0.0426246327176668 +60 2 negative_sampler.num_negs_per_pos 29.0 +60 2 training.batch_size 2.0 +60 3 model.embedding_dim 2.0 +60 3 loss.margin 29.548991420665836 +60 3 loss.adversarial_temperature 0.8072499290698856 +60 3 optimizer.lr 0.023692728863068512 +60 3 negative_sampler.num_negs_per_pos 5.0 +60 3 training.batch_size 2.0 +60 4 model.embedding_dim 0.0 +60 4 loss.margin 14.042280992541832 +60 4 loss.adversarial_temperature 0.26509171997351166 +60 4 optimizer.lr 0.0022717505644724743 +60 4 negative_sampler.num_negs_per_pos 0.0 +60 4 training.batch_size 2.0 +60 5 model.embedding_dim 1.0 +60 5 loss.margin 4.44225093850957 +60 5 loss.adversarial_temperature 0.8643779844790087 +60 5 optimizer.lr 0.009981085984582215 +60 5 negative_sampler.num_negs_per_pos 1.0 +60 5 training.batch_size 1.0 +60 6 model.embedding_dim 1.0 +60 6 loss.margin 13.808995641429332 +60 6 loss.adversarial_temperature 0.1006225350513569 +60 6 optimizer.lr 0.003840883080163123 +60 6 negative_sampler.num_negs_per_pos 6.0 +60 6 training.batch_size 2.0 +60 7 model.embedding_dim 0.0 +60 7 loss.margin 12.708566908853562 +60 7 loss.adversarial_temperature 0.4634975851245928 +60 7 optimizer.lr 0.0526577408542922 +60 7 negative_sampler.num_negs_per_pos 23.0 +60 7 training.batch_size 2.0 +60 8 model.embedding_dim 0.0 +60 8 loss.margin 17.0626599531358 +60 8 loss.adversarial_temperature 0.9933345624394795 +60 8 optimizer.lr 0.041934255735105515 +60 8 negative_sampler.num_negs_per_pos 33.0 +60 8 training.batch_size 1.0 +60 9 model.embedding_dim 0.0 +60 9 loss.margin 10.94884759315115 +60 9 loss.adversarial_temperature 0.35019781024691643 +60 9 optimizer.lr 0.004247943312405998 +60 9 negative_sampler.num_negs_per_pos 37.0 +60 9 training.batch_size 2.0 +60 10 model.embedding_dim 2.0 +60 10 loss.margin 13.006452115247589 +60 10 loss.adversarial_temperature 0.2753096793852254 +60 10 optimizer.lr 0.001650228257788736 +60 10 negative_sampler.num_negs_per_pos 15.0 +60 10 training.batch_size 2.0 +60 1 dataset """yago310""" +60 1 model """complex""" +60 1 loss """nssa""" +60 1 regularizer """no""" +60 1 optimizer """adam""" +60 1 training_loop """owa""" +60 1 negative_sampler """basic""" +60 1 evaluator """rankbased""" +60 2 dataset """yago310""" +60 2 model """complex""" +60 2 loss """nssa""" +60 2 regularizer """no""" +60 2 optimizer """adam""" +60 2 training_loop """owa""" +60 2 negative_sampler """basic""" +60 2 evaluator """rankbased""" +60 3 dataset """yago310""" +60 3 model """complex""" +60 3 loss """nssa""" +60 3 regularizer """no""" +60 3 optimizer """adam""" +60 3 training_loop """owa""" +60 3 negative_sampler """basic""" +60 3 evaluator """rankbased""" +60 4 dataset """yago310""" +60 4 model """complex""" +60 4 loss """nssa""" +60 4 regularizer """no""" +60 4 optimizer """adam""" +60 4 training_loop """owa""" +60 4 negative_sampler """basic""" +60 4 evaluator """rankbased""" +60 5 dataset """yago310""" +60 5 model """complex""" +60 5 loss """nssa""" +60 5 regularizer """no""" +60 5 optimizer """adam""" +60 5 training_loop """owa""" +60 5 negative_sampler """basic""" +60 5 evaluator """rankbased""" +60 6 dataset """yago310""" +60 6 model """complex""" +60 6 loss """nssa""" +60 6 regularizer """no""" +60 6 optimizer """adam""" +60 6 training_loop """owa""" +60 6 negative_sampler """basic""" +60 6 evaluator """rankbased""" +60 7 dataset """yago310""" +60 7 model """complex""" +60 7 loss """nssa""" +60 7 regularizer """no""" +60 7 optimizer """adam""" +60 7 training_loop """owa""" +60 7 negative_sampler """basic""" +60 7 evaluator """rankbased""" +60 8 dataset """yago310""" +60 8 model """complex""" +60 8 loss """nssa""" +60 8 regularizer """no""" +60 8 optimizer """adam""" +60 8 training_loop """owa""" +60 8 negative_sampler """basic""" +60 8 evaluator """rankbased""" +60 9 dataset """yago310""" +60 9 model """complex""" +60 9 loss """nssa""" +60 9 regularizer """no""" +60 9 optimizer """adam""" +60 9 training_loop """owa""" +60 9 negative_sampler """basic""" +60 9 evaluator """rankbased""" +60 10 dataset """yago310""" +60 10 model """complex""" +60 10 loss """nssa""" +60 10 regularizer """no""" +60 10 optimizer """adam""" +60 10 training_loop """owa""" +60 10 negative_sampler """basic""" +60 10 evaluator """rankbased""" +61 1 model.embedding_dim 0.0 +61 1 loss.margin 24.301776333392507 +61 1 loss.adversarial_temperature 0.22344510367924986 +61 1 optimizer.lr 0.022681906140507926 +61 1 negative_sampler.num_negs_per_pos 44.0 +61 1 training.batch_size 0.0 +61 2 model.embedding_dim 1.0 +61 2 loss.margin 21.53266598618806 +61 2 loss.adversarial_temperature 0.8542427389860672 +61 2 optimizer.lr 0.0027784863589230455 +61 2 negative_sampler.num_negs_per_pos 23.0 +61 2 training.batch_size 1.0 +61 3 model.embedding_dim 2.0 +61 3 loss.margin 26.853015542919852 +61 3 loss.adversarial_temperature 0.7170716602801276 +61 3 optimizer.lr 0.0016718252573185953 +61 3 negative_sampler.num_negs_per_pos 29.0 +61 3 training.batch_size 2.0 +61 4 model.embedding_dim 2.0 +61 4 loss.margin 1.616249428514842 +61 4 loss.adversarial_temperature 0.21653745451402867 +61 4 optimizer.lr 0.004371351411954311 +61 4 negative_sampler.num_negs_per_pos 1.0 +61 4 training.batch_size 0.0 +61 5 model.embedding_dim 0.0 +61 5 loss.margin 14.763712748370803 +61 5 loss.adversarial_temperature 0.7001165076635484 +61 5 optimizer.lr 0.03596656052088639 +61 5 negative_sampler.num_negs_per_pos 46.0 +61 5 training.batch_size 3.0 +61 6 model.embedding_dim 2.0 +61 6 loss.margin 23.659615444804256 +61 6 loss.adversarial_temperature 0.7426964795833407 +61 6 optimizer.lr 0.013648751952245427 +61 6 negative_sampler.num_negs_per_pos 26.0 +61 6 training.batch_size 3.0 +61 7 model.embedding_dim 0.0 +61 7 loss.margin 22.767900179934447 +61 7 loss.adversarial_temperature 0.13768572802515855 +61 7 optimizer.lr 0.02416097315241268 +61 7 negative_sampler.num_negs_per_pos 20.0 +61 7 training.batch_size 3.0 +61 8 model.embedding_dim 1.0 +61 8 loss.margin 6.199847866206633 +61 8 loss.adversarial_temperature 0.98049951114489 +61 8 optimizer.lr 0.005031656128160748 +61 8 negative_sampler.num_negs_per_pos 0.0 +61 8 training.batch_size 0.0 +61 9 model.embedding_dim 2.0 +61 9 loss.margin 1.842006733637532 +61 9 loss.adversarial_temperature 0.19181589390783893 +61 9 optimizer.lr 0.0018009154218241482 +61 9 negative_sampler.num_negs_per_pos 39.0 +61 9 training.batch_size 1.0 +61 10 model.embedding_dim 1.0 +61 10 loss.margin 7.122990928868852 +61 10 loss.adversarial_temperature 0.843761973315874 +61 10 optimizer.lr 0.00859756758404846 +61 10 negative_sampler.num_negs_per_pos 38.0 +61 10 training.batch_size 2.0 +61 11 model.embedding_dim 2.0 +61 11 loss.margin 25.14695262464104 +61 11 loss.adversarial_temperature 0.7219641603585446 +61 11 optimizer.lr 0.008126569876865428 +61 11 negative_sampler.num_negs_per_pos 7.0 +61 11 training.batch_size 1.0 +61 1 dataset """yago310""" +61 1 model """complex""" +61 1 loss """nssa""" +61 1 regularizer """no""" +61 1 optimizer """adam""" +61 1 training_loop """owa""" +61 1 negative_sampler """basic""" +61 1 evaluator """rankbased""" +61 2 dataset """yago310""" +61 2 model """complex""" +61 2 loss """nssa""" +61 2 regularizer """no""" +61 2 optimizer """adam""" +61 2 training_loop """owa""" +61 2 negative_sampler """basic""" +61 2 evaluator """rankbased""" +61 3 dataset """yago310""" +61 3 model """complex""" +61 3 loss """nssa""" +61 3 regularizer """no""" +61 3 optimizer """adam""" +61 3 training_loop """owa""" +61 3 negative_sampler """basic""" +61 3 evaluator """rankbased""" +61 4 dataset """yago310""" +61 4 model """complex""" +61 4 loss """nssa""" +61 4 regularizer """no""" +61 4 optimizer """adam""" +61 4 training_loop """owa""" +61 4 negative_sampler """basic""" +61 4 evaluator """rankbased""" +61 5 dataset """yago310""" +61 5 model """complex""" +61 5 loss """nssa""" +61 5 regularizer """no""" +61 5 optimizer """adam""" +61 5 training_loop """owa""" +61 5 negative_sampler """basic""" +61 5 evaluator """rankbased""" +61 6 dataset """yago310""" +61 6 model """complex""" +61 6 loss """nssa""" +61 6 regularizer """no""" +61 6 optimizer """adam""" +61 6 training_loop """owa""" +61 6 negative_sampler """basic""" +61 6 evaluator """rankbased""" +61 7 dataset """yago310""" +61 7 model """complex""" +61 7 loss """nssa""" +61 7 regularizer """no""" +61 7 optimizer """adam""" +61 7 training_loop """owa""" +61 7 negative_sampler """basic""" +61 7 evaluator """rankbased""" +61 8 dataset """yago310""" +61 8 model """complex""" +61 8 loss """nssa""" +61 8 regularizer """no""" +61 8 optimizer """adam""" +61 8 training_loop """owa""" +61 8 negative_sampler """basic""" +61 8 evaluator """rankbased""" +61 9 dataset """yago310""" +61 9 model """complex""" +61 9 loss """nssa""" +61 9 regularizer """no""" +61 9 optimizer """adam""" +61 9 training_loop """owa""" +61 9 negative_sampler """basic""" +61 9 evaluator """rankbased""" +61 10 dataset """yago310""" +61 10 model """complex""" +61 10 loss """nssa""" +61 10 regularizer """no""" +61 10 optimizer """adam""" +61 10 training_loop """owa""" +61 10 negative_sampler """basic""" +61 10 evaluator """rankbased""" +61 11 dataset """yago310""" +61 11 model """complex""" +61 11 loss """nssa""" +61 11 regularizer """no""" +61 11 optimizer """adam""" +61 11 training_loop """owa""" +61 11 negative_sampler """basic""" +61 11 evaluator """rankbased""" +62 1 model.embedding_dim 0.0 +62 1 loss.margin 7.924066856734403 +62 1 optimizer.lr 0.029872399686471762 +62 1 negative_sampler.num_negs_per_pos 39.0 +62 1 training.batch_size 1.0 +62 2 model.embedding_dim 2.0 +62 2 loss.margin 5.915418661658161 +62 2 optimizer.lr 0.049207152520208144 +62 2 negative_sampler.num_negs_per_pos 39.0 +62 2 training.batch_size 3.0 +62 3 model.embedding_dim 1.0 +62 3 loss.margin 7.531798972914836 +62 3 optimizer.lr 0.0023697130914649097 +62 3 negative_sampler.num_negs_per_pos 5.0 +62 3 training.batch_size 2.0 +62 4 model.embedding_dim 2.0 +62 4 loss.margin 5.376659357347199 +62 4 optimizer.lr 0.020460819005275202 +62 4 negative_sampler.num_negs_per_pos 3.0 +62 4 training.batch_size 2.0 +62 5 model.embedding_dim 1.0 +62 5 loss.margin 4.834163383702519 +62 5 optimizer.lr 0.0019698565379195634 +62 5 negative_sampler.num_negs_per_pos 42.0 +62 5 training.batch_size 0.0 +62 6 model.embedding_dim 1.0 +62 6 loss.margin 6.111890166390173 +62 6 optimizer.lr 0.08822169569417096 +62 6 negative_sampler.num_negs_per_pos 4.0 +62 6 training.batch_size 2.0 +62 7 model.embedding_dim 1.0 +62 7 loss.margin 8.78912269822894 +62 7 optimizer.lr 0.05216706105633118 +62 7 negative_sampler.num_negs_per_pos 37.0 +62 7 training.batch_size 2.0 +62 1 dataset """yago310""" +62 1 model """complex""" +62 1 loss """marginranking""" +62 1 regularizer """no""" +62 1 optimizer """adam""" +62 1 training_loop """owa""" +62 1 negative_sampler """basic""" +62 1 evaluator """rankbased""" +62 2 dataset """yago310""" +62 2 model """complex""" +62 2 loss """marginranking""" +62 2 regularizer """no""" +62 2 optimizer """adam""" +62 2 training_loop """owa""" +62 2 negative_sampler """basic""" +62 2 evaluator """rankbased""" +62 3 dataset """yago310""" +62 3 model """complex""" +62 3 loss """marginranking""" +62 3 regularizer """no""" +62 3 optimizer """adam""" +62 3 training_loop """owa""" +62 3 negative_sampler """basic""" +62 3 evaluator """rankbased""" +62 4 dataset """yago310""" +62 4 model """complex""" +62 4 loss """marginranking""" +62 4 regularizer """no""" +62 4 optimizer """adam""" +62 4 training_loop """owa""" +62 4 negative_sampler """basic""" +62 4 evaluator """rankbased""" +62 5 dataset """yago310""" +62 5 model """complex""" +62 5 loss """marginranking""" +62 5 regularizer """no""" +62 5 optimizer """adam""" +62 5 training_loop """owa""" +62 5 negative_sampler """basic""" +62 5 evaluator """rankbased""" +62 6 dataset """yago310""" +62 6 model """complex""" +62 6 loss """marginranking""" +62 6 regularizer """no""" +62 6 optimizer """adam""" +62 6 training_loop """owa""" +62 6 negative_sampler """basic""" +62 6 evaluator """rankbased""" +62 7 dataset """yago310""" +62 7 model """complex""" +62 7 loss """marginranking""" +62 7 regularizer """no""" +62 7 optimizer """adam""" +62 7 training_loop """owa""" +62 7 negative_sampler """basic""" +62 7 evaluator """rankbased""" +63 1 model.embedding_dim 1.0 +63 1 loss.margin 3.351261532035935 +63 1 optimizer.lr 0.036194542038525436 +63 1 negative_sampler.num_negs_per_pos 10.0 +63 1 training.batch_size 3.0 +63 2 model.embedding_dim 2.0 +63 2 loss.margin 8.938914154537347 +63 2 optimizer.lr 0.06285903363580812 +63 2 negative_sampler.num_negs_per_pos 46.0 +63 2 training.batch_size 0.0 +63 3 model.embedding_dim 0.0 +63 3 loss.margin 4.101956547260322 +63 3 optimizer.lr 0.025600560012617686 +63 3 negative_sampler.num_negs_per_pos 24.0 +63 3 training.batch_size 0.0 +63 4 model.embedding_dim 2.0 +63 4 loss.margin 2.466901322265122 +63 4 optimizer.lr 0.010005229690985416 +63 4 negative_sampler.num_negs_per_pos 30.0 +63 4 training.batch_size 2.0 +63 5 model.embedding_dim 2.0 +63 5 loss.margin 7.051136916334929 +63 5 optimizer.lr 0.0010700422813582928 +63 5 negative_sampler.num_negs_per_pos 19.0 +63 5 training.batch_size 1.0 +63 6 model.embedding_dim 0.0 +63 6 loss.margin 9.936669242851163 +63 6 optimizer.lr 0.020286979112284737 +63 6 negative_sampler.num_negs_per_pos 37.0 +63 6 training.batch_size 0.0 +63 7 model.embedding_dim 1.0 +63 7 loss.margin 2.6062541381352435 +63 7 optimizer.lr 0.08139919665126512 +63 7 negative_sampler.num_negs_per_pos 47.0 +63 7 training.batch_size 1.0 +63 8 model.embedding_dim 1.0 +63 8 loss.margin 1.0437778861615485 +63 8 optimizer.lr 0.001100983819043179 +63 8 negative_sampler.num_negs_per_pos 19.0 +63 8 training.batch_size 3.0 +63 9 model.embedding_dim 2.0 +63 9 loss.margin 9.010824760665612 +63 9 optimizer.lr 0.007546469530215451 +63 9 negative_sampler.num_negs_per_pos 6.0 +63 9 training.batch_size 1.0 +63 10 model.embedding_dim 1.0 +63 10 loss.margin 9.62866541231516 +63 10 optimizer.lr 0.051945106193039645 +63 10 negative_sampler.num_negs_per_pos 48.0 +63 10 training.batch_size 0.0 +63 1 dataset """yago310""" +63 1 model """complex""" +63 1 loss """marginranking""" +63 1 regularizer """no""" +63 1 optimizer """adam""" +63 1 training_loop """owa""" +63 1 negative_sampler """basic""" +63 1 evaluator """rankbased""" +63 2 dataset """yago310""" +63 2 model """complex""" +63 2 loss """marginranking""" +63 2 regularizer """no""" +63 2 optimizer """adam""" +63 2 training_loop """owa""" +63 2 negative_sampler """basic""" +63 2 evaluator """rankbased""" +63 3 dataset """yago310""" +63 3 model """complex""" +63 3 loss """marginranking""" +63 3 regularizer """no""" +63 3 optimizer """adam""" +63 3 training_loop """owa""" +63 3 negative_sampler """basic""" +63 3 evaluator """rankbased""" +63 4 dataset """yago310""" +63 4 model """complex""" +63 4 loss """marginranking""" +63 4 regularizer """no""" +63 4 optimizer """adam""" +63 4 training_loop """owa""" +63 4 negative_sampler """basic""" +63 4 evaluator """rankbased""" +63 5 dataset """yago310""" +63 5 model """complex""" +63 5 loss """marginranking""" +63 5 regularizer """no""" +63 5 optimizer """adam""" +63 5 training_loop """owa""" +63 5 negative_sampler """basic""" +63 5 evaluator """rankbased""" +63 6 dataset """yago310""" +63 6 model """complex""" +63 6 loss """marginranking""" +63 6 regularizer """no""" +63 6 optimizer """adam""" +63 6 training_loop """owa""" +63 6 negative_sampler """basic""" +63 6 evaluator """rankbased""" +63 7 dataset """yago310""" +63 7 model """complex""" +63 7 loss """marginranking""" +63 7 regularizer """no""" +63 7 optimizer """adam""" +63 7 training_loop """owa""" +63 7 negative_sampler """basic""" +63 7 evaluator """rankbased""" +63 8 dataset """yago310""" +63 8 model """complex""" +63 8 loss """marginranking""" +63 8 regularizer """no""" +63 8 optimizer """adam""" +63 8 training_loop """owa""" +63 8 negative_sampler """basic""" +63 8 evaluator """rankbased""" +63 9 dataset """yago310""" +63 9 model """complex""" +63 9 loss """marginranking""" +63 9 regularizer """no""" +63 9 optimizer """adam""" +63 9 training_loop """owa""" +63 9 negative_sampler """basic""" +63 9 evaluator """rankbased""" +63 10 dataset """yago310""" +63 10 model """complex""" +63 10 loss """marginranking""" +63 10 regularizer """no""" +63 10 optimizer """adam""" +63 10 training_loop """owa""" +63 10 negative_sampler """basic""" +63 10 evaluator """rankbased""" +64 1 model.output_channels 31.0 +64 1 model.input_dropout 0.18649327048420322 +64 1 model.output_dropout 0.4805119402050447 +64 1 model.feature_map_dropout 0.15364145132714419 +64 1 model.embedding_dim 0.0 +64 1 training.batch_size 2.0 +64 1 training.label_smoothing 0.001490940952472274 +64 2 model.output_channels 25.0 +64 2 model.input_dropout 0.20424280700367536 +64 2 model.output_dropout 0.41806294754424894 +64 2 model.feature_map_dropout 0.09005544998925075 +64 2 model.embedding_dim 2.0 +64 2 training.batch_size 0.0 +64 2 training.label_smoothing 0.0023009444344781705 +64 3 model.output_channels 60.0 +64 3 model.input_dropout 0.31308715187190905 +64 3 model.output_dropout 0.18274811788036488 +64 3 model.feature_map_dropout 0.23566910483993458 +64 3 model.embedding_dim 2.0 +64 3 training.batch_size 2.0 +64 3 training.label_smoothing 0.08524256143727249 +64 4 model.output_channels 33.0 +64 4 model.input_dropout 0.43216673131013744 +64 4 model.output_dropout 0.44848601578284614 +64 4 model.feature_map_dropout 0.2414091190457041 +64 4 model.embedding_dim 2.0 +64 4 training.batch_size 1.0 +64 4 training.label_smoothing 0.033414419479676466 +64 5 model.output_channels 52.0 +64 5 model.input_dropout 0.2313073340058751 +64 5 model.output_dropout 0.41842343933766446 +64 5 model.feature_map_dropout 0.2652323909004833 +64 5 model.embedding_dim 0.0 +64 5 training.batch_size 2.0 +64 5 training.label_smoothing 0.03173661757057888 +64 6 model.output_channels 49.0 +64 6 model.input_dropout 0.3996124218982713 +64 6 model.output_dropout 0.11165535666072463 +64 6 model.feature_map_dropout 0.28383704137026894 +64 6 model.embedding_dim 1.0 +64 6 training.batch_size 1.0 +64 6 training.label_smoothing 0.023870078729602447 +64 7 model.output_channels 53.0 +64 7 model.input_dropout 0.42668244226716356 +64 7 model.output_dropout 0.18842973539463287 +64 7 model.feature_map_dropout 0.4939950189510551 +64 7 model.embedding_dim 0.0 +64 7 training.batch_size 2.0 +64 7 training.label_smoothing 0.006901555570020871 +64 8 model.output_channels 63.0 +64 8 model.input_dropout 0.03853760387607669 +64 8 model.output_dropout 0.16842152409016659 +64 8 model.feature_map_dropout 0.23857276066937055 +64 8 model.embedding_dim 1.0 +64 8 training.batch_size 2.0 +64 8 training.label_smoothing 0.03656390871698281 +64 9 model.output_channels 42.0 +64 9 model.input_dropout 0.12606977981016948 +64 9 model.output_dropout 0.32576703857027134 +64 9 model.feature_map_dropout 0.02305816932411736 +64 9 model.embedding_dim 0.0 +64 9 training.batch_size 2.0 +64 9 training.label_smoothing 0.2875621519382408 +64 10 model.output_channels 46.0 +64 10 model.input_dropout 0.10334550186397468 +64 10 model.output_dropout 0.4895426585911872 +64 10 model.feature_map_dropout 0.31852018524029174 +64 10 model.embedding_dim 1.0 +64 10 training.batch_size 1.0 +64 10 training.label_smoothing 0.004775974035028509 +64 11 model.output_channels 30.0 +64 11 model.input_dropout 0.023473747531903955 +64 11 model.output_dropout 0.2456976747379795 +64 11 model.feature_map_dropout 0.11770480424854518 +64 11 model.embedding_dim 1.0 +64 11 training.batch_size 0.0 +64 11 training.label_smoothing 0.10487421349711447 +64 12 model.output_channels 16.0 +64 12 model.input_dropout 0.17497568427166632 +64 12 model.output_dropout 0.16948732409626788 +64 12 model.feature_map_dropout 0.2777410298221322 +64 12 model.embedding_dim 1.0 +64 12 training.batch_size 2.0 +64 12 training.label_smoothing 0.008015941809092725 +64 13 model.output_channels 33.0 +64 13 model.input_dropout 0.3584289922727892 +64 13 model.output_dropout 0.3455574639709909 +64 13 model.feature_map_dropout 0.33187320078807225 +64 13 model.embedding_dim 1.0 +64 13 training.batch_size 0.0 +64 13 training.label_smoothing 0.8869310653754059 +64 14 model.output_channels 44.0 +64 14 model.input_dropout 0.4154150482091515 +64 14 model.output_dropout 0.27237409823439745 +64 14 model.feature_map_dropout 0.05983029997164746 +64 14 model.embedding_dim 1.0 +64 14 training.batch_size 1.0 +64 14 training.label_smoothing 0.2522827245351792 +64 15 model.output_channels 40.0 +64 15 model.input_dropout 0.1516111128107947 +64 15 model.output_dropout 0.17712366698103332 +64 15 model.feature_map_dropout 0.32376241299841557 +64 15 model.embedding_dim 0.0 +64 15 training.batch_size 0.0 +64 15 training.label_smoothing 0.003026379885445412 +64 16 model.output_channels 51.0 +64 16 model.input_dropout 0.234871267756538 +64 16 model.output_dropout 0.36168779556509406 +64 16 model.feature_map_dropout 0.304285177357177 +64 16 model.embedding_dim 2.0 +64 16 training.batch_size 2.0 +64 16 training.label_smoothing 0.020590262923705537 +64 17 model.output_channels 30.0 +64 17 model.input_dropout 0.2714864110659995 +64 17 model.output_dropout 0.34418100427903703 +64 17 model.feature_map_dropout 0.17423363396195968 +64 17 model.embedding_dim 1.0 +64 17 training.batch_size 0.0 +64 17 training.label_smoothing 0.2416386528646225 +64 18 model.output_channels 38.0 +64 18 model.input_dropout 0.47739120353824815 +64 18 model.output_dropout 0.09291748919783549 +64 18 model.feature_map_dropout 0.4035924522094403 +64 18 model.embedding_dim 1.0 +64 18 training.batch_size 0.0 +64 18 training.label_smoothing 0.032885089807618006 +64 19 model.output_channels 29.0 +64 19 model.input_dropout 0.4639747297125426 +64 19 model.output_dropout 0.11371646754255071 +64 19 model.feature_map_dropout 0.22558497368026653 +64 19 model.embedding_dim 0.0 +64 19 training.batch_size 0.0 +64 19 training.label_smoothing 0.5340422538680473 +64 20 model.output_channels 44.0 +64 20 model.input_dropout 0.3512068624450603 +64 20 model.output_dropout 0.41818722926200874 +64 20 model.feature_map_dropout 0.21876331104062852 +64 20 model.embedding_dim 2.0 +64 20 training.batch_size 2.0 +64 20 training.label_smoothing 0.0036547405229763936 +64 21 model.output_channels 17.0 +64 21 model.input_dropout 0.044962694610485576 +64 21 model.output_dropout 0.1751941156179075 +64 21 model.feature_map_dropout 0.3869248967338244 +64 21 model.embedding_dim 0.0 +64 21 training.batch_size 0.0 +64 21 training.label_smoothing 0.0913828708002568 +64 22 model.output_channels 22.0 +64 22 model.input_dropout 0.41907773069161813 +64 22 model.output_dropout 0.2107744714645593 +64 22 model.feature_map_dropout 0.12871158628221674 +64 22 model.embedding_dim 0.0 +64 22 training.batch_size 1.0 +64 22 training.label_smoothing 0.012726412232577911 +64 23 model.output_channels 25.0 +64 23 model.input_dropout 0.45477371241265924 +64 23 model.output_dropout 0.22405677398395585 +64 23 model.feature_map_dropout 0.2588953642976773 +64 23 model.embedding_dim 2.0 +64 23 training.batch_size 1.0 +64 23 training.label_smoothing 0.027940295217445822 +64 24 model.output_channels 62.0 +64 24 model.input_dropout 0.37348122493915265 +64 24 model.output_dropout 0.16712473930566685 +64 24 model.feature_map_dropout 0.05459871005390843 +64 24 model.embedding_dim 1.0 +64 24 training.batch_size 1.0 +64 24 training.label_smoothing 0.0016926582121424363 +64 25 model.output_channels 30.0 +64 25 model.input_dropout 0.24540483017352427 +64 25 model.output_dropout 0.41644985511119953 +64 25 model.feature_map_dropout 0.3970533834630608 +64 25 model.embedding_dim 0.0 +64 25 training.batch_size 2.0 +64 25 training.label_smoothing 0.038904826739076005 +64 26 model.output_channels 47.0 +64 26 model.input_dropout 0.4463224028241073 +64 26 model.output_dropout 0.16759014818257767 +64 26 model.feature_map_dropout 0.4311656693673857 +64 26 model.embedding_dim 0.0 +64 26 training.batch_size 1.0 +64 26 training.label_smoothing 0.023630891269976178 +64 27 model.output_channels 27.0 +64 27 model.input_dropout 0.03684217313492766 +64 27 model.output_dropout 0.151338318900143 +64 27 model.feature_map_dropout 0.3223005678186204 +64 27 model.embedding_dim 1.0 +64 27 training.batch_size 1.0 +64 27 training.label_smoothing 0.0011431295866126303 +64 28 model.output_channels 57.0 +64 28 model.input_dropout 0.07254716197907057 +64 28 model.output_dropout 0.25546877658999523 +64 28 model.feature_map_dropout 0.11063802513571996 +64 28 model.embedding_dim 2.0 +64 28 training.batch_size 0.0 +64 28 training.label_smoothing 0.0011343850989030202 +64 29 model.output_channels 36.0 +64 29 model.input_dropout 0.039657422181560376 +64 29 model.output_dropout 0.17711869773672612 +64 29 model.feature_map_dropout 0.27666105514162315 +64 29 model.embedding_dim 2.0 +64 29 training.batch_size 1.0 +64 29 training.label_smoothing 0.03728956905795103 +64 30 model.output_channels 38.0 +64 30 model.input_dropout 0.14432552334633247 +64 30 model.output_dropout 0.24166351083940796 +64 30 model.feature_map_dropout 0.12014156639942375 +64 30 model.embedding_dim 2.0 +64 30 training.batch_size 1.0 +64 30 training.label_smoothing 0.07423521005812231 +64 31 model.output_channels 22.0 +64 31 model.input_dropout 0.1517485963931141 +64 31 model.output_dropout 0.1577255490811696 +64 31 model.feature_map_dropout 0.26367813013524716 +64 31 model.embedding_dim 0.0 +64 31 training.batch_size 1.0 +64 31 training.label_smoothing 0.0011344495580198346 +64 32 model.output_channels 22.0 +64 32 model.input_dropout 0.3908194250678014 +64 32 model.output_dropout 0.1561092848660156 +64 32 model.feature_map_dropout 0.06689095742015999 +64 32 model.embedding_dim 1.0 +64 32 training.batch_size 0.0 +64 32 training.label_smoothing 0.0023592595316046907 +64 33 model.output_channels 40.0 +64 33 model.input_dropout 0.40666400246148504 +64 33 model.output_dropout 0.46872001712237515 +64 33 model.feature_map_dropout 0.2683161568319965 +64 33 model.embedding_dim 2.0 +64 33 training.batch_size 0.0 +64 33 training.label_smoothing 0.0012222195721114434 +64 34 model.output_channels 61.0 +64 34 model.input_dropout 0.3785266800981566 +64 34 model.output_dropout 0.12472263851960402 +64 34 model.feature_map_dropout 0.034899086865353146 +64 34 model.embedding_dim 2.0 +64 34 training.batch_size 2.0 +64 34 training.label_smoothing 0.016987810509971748 +64 35 model.output_channels 61.0 +64 35 model.input_dropout 0.1426159352750686 +64 35 model.output_dropout 0.05758842830240718 +64 35 model.feature_map_dropout 0.12238519465118786 +64 35 model.embedding_dim 0.0 +64 35 training.batch_size 1.0 +64 35 training.label_smoothing 0.0021415063419783728 +64 36 model.output_channels 25.0 +64 36 model.input_dropout 0.06830975809711681 +64 36 model.output_dropout 0.34884328933833975 +64 36 model.feature_map_dropout 0.41645334004996715 +64 36 model.embedding_dim 2.0 +64 36 training.batch_size 0.0 +64 36 training.label_smoothing 0.012146203934888586 +64 37 model.output_channels 43.0 +64 37 model.input_dropout 0.38180198776365754 +64 37 model.output_dropout 0.21751170806331444 +64 37 model.feature_map_dropout 0.42602672097262184 +64 37 model.embedding_dim 1.0 +64 37 training.batch_size 0.0 +64 37 training.label_smoothing 0.0030239542213671487 +64 38 model.output_channels 20.0 +64 38 model.input_dropout 0.4683056621547423 +64 38 model.output_dropout 0.37627110633031396 +64 38 model.feature_map_dropout 0.49444144165606135 +64 38 model.embedding_dim 2.0 +64 38 training.batch_size 2.0 +64 38 training.label_smoothing 0.0037512629388957645 +64 39 model.output_channels 25.0 +64 39 model.input_dropout 0.48470721428405794 +64 39 model.output_dropout 0.03043984894477958 +64 39 model.feature_map_dropout 0.3551611689857297 +64 39 model.embedding_dim 0.0 +64 39 training.batch_size 2.0 +64 39 training.label_smoothing 0.004041172131276778 +64 40 model.output_channels 41.0 +64 40 model.input_dropout 0.49081511473872225 +64 40 model.output_dropout 0.4856833992465404 +64 40 model.feature_map_dropout 0.24487219357799395 +64 40 model.embedding_dim 0.0 +64 40 training.batch_size 1.0 +64 40 training.label_smoothing 0.002336200078765146 +64 41 model.output_channels 18.0 +64 41 model.input_dropout 0.49359981780389084 +64 41 model.output_dropout 0.008809750198069066 +64 41 model.feature_map_dropout 0.2131021072109308 +64 41 model.embedding_dim 1.0 +64 41 training.batch_size 2.0 +64 41 training.label_smoothing 0.004118629882754678 +64 42 model.output_channels 63.0 +64 42 model.input_dropout 0.43898344573262355 +64 42 model.output_dropout 0.48990493958457937 +64 42 model.feature_map_dropout 0.1315203110198424 +64 42 model.embedding_dim 2.0 +64 42 training.batch_size 0.0 +64 42 training.label_smoothing 0.336069455745821 +64 43 model.output_channels 27.0 +64 43 model.input_dropout 0.4577787742375845 +64 43 model.output_dropout 0.03352753007500925 +64 43 model.feature_map_dropout 0.2556284179525722 +64 43 model.embedding_dim 2.0 +64 43 training.batch_size 0.0 +64 43 training.label_smoothing 0.0010365416882339707 +64 44 model.output_channels 25.0 +64 44 model.input_dropout 0.22716408533936583 +64 44 model.output_dropout 0.05678037949550402 +64 44 model.feature_map_dropout 0.05062259751049969 +64 44 model.embedding_dim 0.0 +64 44 training.batch_size 2.0 +64 44 training.label_smoothing 0.05595447127794273 +64 45 model.output_channels 43.0 +64 45 model.input_dropout 0.141309563694619 +64 45 model.output_dropout 0.3306786646290393 +64 45 model.feature_map_dropout 0.3901711903360622 +64 45 model.embedding_dim 1.0 +64 45 training.batch_size 0.0 +64 45 training.label_smoothing 0.011984772508542789 +64 46 model.output_channels 40.0 +64 46 model.input_dropout 0.35600981879422694 +64 46 model.output_dropout 0.39464816955431015 +64 46 model.feature_map_dropout 0.4852162454805659 +64 46 model.embedding_dim 0.0 +64 46 training.batch_size 2.0 +64 46 training.label_smoothing 0.006106633395700088 +64 47 model.output_channels 37.0 +64 47 model.input_dropout 0.35216394359011116 +64 47 model.output_dropout 0.09120371236443564 +64 47 model.feature_map_dropout 0.07582024098391121 +64 47 model.embedding_dim 1.0 +64 47 training.batch_size 1.0 +64 47 training.label_smoothing 0.061780823419045845 +64 48 model.output_channels 40.0 +64 48 model.input_dropout 0.00144355700463078 +64 48 model.output_dropout 0.35787092018571287 +64 48 model.feature_map_dropout 0.369153769935935 +64 48 model.embedding_dim 0.0 +64 48 training.batch_size 1.0 +64 48 training.label_smoothing 0.4049060915036181 +64 49 model.output_channels 52.0 +64 49 model.input_dropout 0.24154176235746683 +64 49 model.output_dropout 0.2730161447025393 +64 49 model.feature_map_dropout 0.1946990962152495 +64 49 model.embedding_dim 1.0 +64 49 training.batch_size 1.0 +64 49 training.label_smoothing 0.16488630112790756 +64 50 model.output_channels 51.0 +64 50 model.input_dropout 0.24171897079825044 +64 50 model.output_dropout 0.2966781122354333 +64 50 model.feature_map_dropout 0.2179547355960172 +64 50 model.embedding_dim 2.0 +64 50 training.batch_size 0.0 +64 50 training.label_smoothing 0.006561011606636365 +64 51 model.output_channels 61.0 +64 51 model.input_dropout 0.3306942727641607 +64 51 model.output_dropout 0.39383830100575895 +64 51 model.feature_map_dropout 0.3473388687914307 +64 51 model.embedding_dim 1.0 +64 51 training.batch_size 1.0 +64 51 training.label_smoothing 0.2602341283867238 +64 52 model.output_channels 42.0 +64 52 model.input_dropout 0.2619292242443919 +64 52 model.output_dropout 0.21985272308251197 +64 52 model.feature_map_dropout 0.4114341615273851 +64 52 model.embedding_dim 1.0 +64 52 training.batch_size 1.0 +64 52 training.label_smoothing 0.3475332184383729 +64 53 model.output_channels 51.0 +64 53 model.input_dropout 0.33407338662095315 +64 53 model.output_dropout 0.3984256177813372 +64 53 model.feature_map_dropout 0.3683666839965176 +64 53 model.embedding_dim 1.0 +64 53 training.batch_size 2.0 +64 53 training.label_smoothing 0.8564491472181164 +64 54 model.output_channels 61.0 +64 54 model.input_dropout 0.2094396327729866 +64 54 model.output_dropout 0.49117231161280633 +64 54 model.feature_map_dropout 0.000719067601354817 +64 54 model.embedding_dim 0.0 +64 54 training.batch_size 1.0 +64 54 training.label_smoothing 0.04737312346016776 +64 55 model.output_channels 55.0 +64 55 model.input_dropout 0.39392156053947 +64 55 model.output_dropout 0.4015300533842502 +64 55 model.feature_map_dropout 0.1653234468571314 +64 55 model.embedding_dim 1.0 +64 55 training.batch_size 1.0 +64 55 training.label_smoothing 0.0017343231478872143 +64 56 model.output_channels 31.0 +64 56 model.input_dropout 0.0986612430504093 +64 56 model.output_dropout 0.34894058537461575 +64 56 model.feature_map_dropout 0.21925890650008695 +64 56 model.embedding_dim 2.0 +64 56 training.batch_size 2.0 +64 56 training.label_smoothing 0.020753969376186183 +64 57 model.output_channels 57.0 +64 57 model.input_dropout 0.46567695635569883 +64 57 model.output_dropout 0.3378297674362342 +64 57 model.feature_map_dropout 0.048254258764402314 +64 57 model.embedding_dim 2.0 +64 57 training.batch_size 1.0 +64 57 training.label_smoothing 0.006285546309067663 +64 58 model.output_channels 54.0 +64 58 model.input_dropout 0.32466055843285535 +64 58 model.output_dropout 0.22261590581077723 +64 58 model.feature_map_dropout 0.35185112320109485 +64 58 model.embedding_dim 2.0 +64 58 training.batch_size 1.0 +64 58 training.label_smoothing 0.8764471293999838 +64 59 model.output_channels 24.0 +64 59 model.input_dropout 0.017746501426998296 +64 59 model.output_dropout 0.307929607687409 +64 59 model.feature_map_dropout 0.4777508576673612 +64 59 model.embedding_dim 2.0 +64 59 training.batch_size 1.0 +64 59 training.label_smoothing 0.025503133288803412 +64 60 model.output_channels 45.0 +64 60 model.input_dropout 0.2632211367533913 +64 60 model.output_dropout 0.08977917965337123 +64 60 model.feature_map_dropout 0.2356591314978405 +64 60 model.embedding_dim 2.0 +64 60 training.batch_size 1.0 +64 60 training.label_smoothing 0.002314677491128782 +64 61 model.output_channels 58.0 +64 61 model.input_dropout 0.03611854855732466 +64 61 model.output_dropout 0.07286344995067012 +64 61 model.feature_map_dropout 0.001662370605942487 +64 61 model.embedding_dim 1.0 +64 61 training.batch_size 2.0 +64 61 training.label_smoothing 0.16051581926445826 +64 62 model.output_channels 32.0 +64 62 model.input_dropout 0.4025736638368423 +64 62 model.output_dropout 0.13230400815032328 +64 62 model.feature_map_dropout 0.2982350900718344 +64 62 model.embedding_dim 2.0 +64 62 training.batch_size 1.0 +64 62 training.label_smoothing 0.0019337300546395059 +64 63 model.output_channels 54.0 +64 63 model.input_dropout 0.3752759738726497 +64 63 model.output_dropout 0.14618445317918993 +64 63 model.feature_map_dropout 0.32349189300320896 +64 63 model.embedding_dim 2.0 +64 63 training.batch_size 1.0 +64 63 training.label_smoothing 0.010899626111617546 +64 64 model.output_channels 45.0 +64 64 model.input_dropout 0.14916639196914516 +64 64 model.output_dropout 0.38049945051033207 +64 64 model.feature_map_dropout 0.0864198793762701 +64 64 model.embedding_dim 1.0 +64 64 training.batch_size 2.0 +64 64 training.label_smoothing 0.0020025559831430514 +64 65 model.output_channels 17.0 +64 65 model.input_dropout 0.18002644384571426 +64 65 model.output_dropout 0.04228200675417132 +64 65 model.feature_map_dropout 0.39312506810973197 +64 65 model.embedding_dim 0.0 +64 65 training.batch_size 1.0 +64 65 training.label_smoothing 0.05087050105724877 +64 66 model.output_channels 52.0 +64 66 model.input_dropout 0.002478392769003135 +64 66 model.output_dropout 0.290155047966339 +64 66 model.feature_map_dropout 0.43843673956242196 +64 66 model.embedding_dim 0.0 +64 66 training.batch_size 1.0 +64 66 training.label_smoothing 0.13872460816961169 +64 67 model.output_channels 40.0 +64 67 model.input_dropout 0.2785028214695116 +64 67 model.output_dropout 0.4412739804651992 +64 67 model.feature_map_dropout 0.15246823374777385 +64 67 model.embedding_dim 1.0 +64 67 training.batch_size 0.0 +64 67 training.label_smoothing 0.026237261768287338 +64 68 model.output_channels 53.0 +64 68 model.input_dropout 0.46846451958876767 +64 68 model.output_dropout 0.05239790097590008 +64 68 model.feature_map_dropout 0.20648969129514017 +64 68 model.embedding_dim 2.0 +64 68 training.batch_size 0.0 +64 68 training.label_smoothing 0.08109278110630908 +64 69 model.output_channels 50.0 +64 69 model.input_dropout 0.4416023809567389 +64 69 model.output_dropout 0.3692082807398074 +64 69 model.feature_map_dropout 0.48991955367600326 +64 69 model.embedding_dim 2.0 +64 69 training.batch_size 1.0 +64 69 training.label_smoothing 0.08455069639229092 +64 70 model.output_channels 47.0 +64 70 model.input_dropout 0.006541869625951768 +64 70 model.output_dropout 0.07379227810035194 +64 70 model.feature_map_dropout 0.21074160527430796 +64 70 model.embedding_dim 0.0 +64 70 training.batch_size 0.0 +64 70 training.label_smoothing 0.019780136338187846 +64 71 model.output_channels 59.0 +64 71 model.input_dropout 0.13855800076314845 +64 71 model.output_dropout 0.3064503751629771 +64 71 model.feature_map_dropout 0.4998599372815754 +64 71 model.embedding_dim 0.0 +64 71 training.batch_size 0.0 +64 71 training.label_smoothing 0.28960796536435424 +64 72 model.output_channels 49.0 +64 72 model.input_dropout 0.21706766161821917 +64 72 model.output_dropout 0.41576490190271 +64 72 model.feature_map_dropout 0.23526563676902584 +64 72 model.embedding_dim 2.0 +64 72 training.batch_size 1.0 +64 72 training.label_smoothing 0.008911112673938423 +64 73 model.output_channels 41.0 +64 73 model.input_dropout 0.10379663728910732 +64 73 model.output_dropout 0.032224434878369734 +64 73 model.feature_map_dropout 0.3627587562231335 +64 73 model.embedding_dim 0.0 +64 73 training.batch_size 2.0 +64 73 training.label_smoothing 0.0014775349125971254 +64 74 model.output_channels 45.0 +64 74 model.input_dropout 0.4518386342862914 +64 74 model.output_dropout 0.18510787106425824 +64 74 model.feature_map_dropout 0.3953243695287089 +64 74 model.embedding_dim 1.0 +64 74 training.batch_size 2.0 +64 74 training.label_smoothing 0.026938766842731866 +64 75 model.output_channels 60.0 +64 75 model.input_dropout 0.16656371013658577 +64 75 model.output_dropout 0.049436902631645185 +64 75 model.feature_map_dropout 0.13140451047102125 +64 75 model.embedding_dim 2.0 +64 75 training.batch_size 1.0 +64 75 training.label_smoothing 0.007744534141810205 +64 76 model.output_channels 53.0 +64 76 model.input_dropout 0.03738691974403102 +64 76 model.output_dropout 0.4856942670599985 +64 76 model.feature_map_dropout 0.01625623758768263 +64 76 model.embedding_dim 1.0 +64 76 training.batch_size 2.0 +64 76 training.label_smoothing 0.03599601853744392 +64 77 model.output_channels 23.0 +64 77 model.input_dropout 0.0008537737748354757 +64 77 model.output_dropout 0.005283966510719107 +64 77 model.feature_map_dropout 0.4080188785166255 +64 77 model.embedding_dim 1.0 +64 77 training.batch_size 2.0 +64 77 training.label_smoothing 0.047603834972197646 +64 78 model.output_channels 43.0 +64 78 model.input_dropout 0.30458550403779416 +64 78 model.output_dropout 0.49226512986873844 +64 78 model.feature_map_dropout 0.12338885545640094 +64 78 model.embedding_dim 1.0 +64 78 training.batch_size 0.0 +64 78 training.label_smoothing 0.03229151050831918 +64 79 model.output_channels 25.0 +64 79 model.input_dropout 0.1079561314525681 +64 79 model.output_dropout 0.13213719168398824 +64 79 model.feature_map_dropout 0.2593787980015315 +64 79 model.embedding_dim 1.0 +64 79 training.batch_size 0.0 +64 79 training.label_smoothing 0.006457507830001369 +64 80 model.output_channels 27.0 +64 80 model.input_dropout 0.34986751282780276 +64 80 model.output_dropout 0.44585339669332197 +64 80 model.feature_map_dropout 0.3402639324222158 +64 80 model.embedding_dim 1.0 +64 80 training.batch_size 0.0 +64 80 training.label_smoothing 0.014207581422509167 +64 81 model.output_channels 63.0 +64 81 model.input_dropout 0.3526410654561288 +64 81 model.output_dropout 0.4245469640132766 +64 81 model.feature_map_dropout 0.17635613760692676 +64 81 model.embedding_dim 2.0 +64 81 training.batch_size 1.0 +64 81 training.label_smoothing 0.2753844996865914 +64 82 model.output_channels 36.0 +64 82 model.input_dropout 0.051614771078742905 +64 82 model.output_dropout 0.15060026499965973 +64 82 model.feature_map_dropout 0.3619459620263375 +64 82 model.embedding_dim 0.0 +64 82 training.batch_size 0.0 +64 82 training.label_smoothing 0.0011462881724381587 +64 83 model.output_channels 20.0 +64 83 model.input_dropout 0.17508394253823012 +64 83 model.output_dropout 0.1092583099683338 +64 83 model.feature_map_dropout 0.2711899606789271 +64 83 model.embedding_dim 1.0 +64 83 training.batch_size 2.0 +64 83 training.label_smoothing 0.5656205546815856 +64 84 model.output_channels 56.0 +64 84 model.input_dropout 0.09239282044676317 +64 84 model.output_dropout 0.20264965309727723 +64 84 model.feature_map_dropout 0.46092547450534543 +64 84 model.embedding_dim 1.0 +64 84 training.batch_size 0.0 +64 84 training.label_smoothing 0.016200342008263622 +64 85 model.output_channels 30.0 +64 85 model.input_dropout 0.15483292373630575 +64 85 model.output_dropout 0.04760709731694124 +64 85 model.feature_map_dropout 0.17037246823927032 +64 85 model.embedding_dim 1.0 +64 85 training.batch_size 2.0 +64 85 training.label_smoothing 0.3491535435650074 +64 86 model.output_channels 21.0 +64 86 model.input_dropout 0.25246341404902606 +64 86 model.output_dropout 0.4668159146309519 +64 86 model.feature_map_dropout 0.18005702934474122 +64 86 model.embedding_dim 1.0 +64 86 training.batch_size 0.0 +64 86 training.label_smoothing 0.02565803504607864 +64 87 model.output_channels 40.0 +64 87 model.input_dropout 0.03625112883481402 +64 87 model.output_dropout 0.44528256978752795 +64 87 model.feature_map_dropout 0.25156995624248457 +64 87 model.embedding_dim 2.0 +64 87 training.batch_size 0.0 +64 87 training.label_smoothing 0.6190258081091679 +64 88 model.output_channels 59.0 +64 88 model.input_dropout 0.4087264454434957 +64 88 model.output_dropout 0.02034085978337785 +64 88 model.feature_map_dropout 0.1597569799953854 +64 88 model.embedding_dim 2.0 +64 88 training.batch_size 2.0 +64 88 training.label_smoothing 0.13422426150003805 +64 89 model.output_channels 34.0 +64 89 model.input_dropout 0.4518438867177818 +64 89 model.output_dropout 0.17113322553419874 +64 89 model.feature_map_dropout 0.45443055620746325 +64 89 model.embedding_dim 1.0 +64 89 training.batch_size 0.0 +64 89 training.label_smoothing 0.002694967084667852 +64 90 model.output_channels 30.0 +64 90 model.input_dropout 0.013795652145049686 +64 90 model.output_dropout 0.14385842116131775 +64 90 model.feature_map_dropout 0.3936761885116417 +64 90 model.embedding_dim 1.0 +64 90 training.batch_size 2.0 +64 90 training.label_smoothing 0.3265811037019558 +64 91 model.output_channels 34.0 +64 91 model.input_dropout 0.3529846032424898 +64 91 model.output_dropout 0.059299238314514 +64 91 model.feature_map_dropout 0.4525435123320083 +64 91 model.embedding_dim 1.0 +64 91 training.batch_size 2.0 +64 91 training.label_smoothing 0.06900051442016947 +64 92 model.output_channels 61.0 +64 92 model.input_dropout 0.05421944478780466 +64 92 model.output_dropout 0.06996539390889489 +64 92 model.feature_map_dropout 0.05057892955689608 +64 92 model.embedding_dim 1.0 +64 92 training.batch_size 1.0 +64 92 training.label_smoothing 0.0013176393545750306 +64 93 model.output_channels 23.0 +64 93 model.input_dropout 0.46121496638602666 +64 93 model.output_dropout 0.03925405413134336 +64 93 model.feature_map_dropout 0.36154852701400475 +64 93 model.embedding_dim 2.0 +64 93 training.batch_size 1.0 +64 93 training.label_smoothing 0.9176906795234521 +64 94 model.output_channels 51.0 +64 94 model.input_dropout 0.30145747947927004 +64 94 model.output_dropout 0.25225210066484477 +64 94 model.feature_map_dropout 0.49763977767054307 +64 94 model.embedding_dim 2.0 +64 94 training.batch_size 0.0 +64 94 training.label_smoothing 0.008905121523749402 +64 95 model.output_channels 18.0 +64 95 model.input_dropout 0.2659808274592378 +64 95 model.output_dropout 0.2521018657953846 +64 95 model.feature_map_dropout 0.38699327675456996 +64 95 model.embedding_dim 0.0 +64 95 training.batch_size 1.0 +64 95 training.label_smoothing 0.05985701219895964 +64 96 model.output_channels 31.0 +64 96 model.input_dropout 0.2684649544128503 +64 96 model.output_dropout 0.4155495302398244 +64 96 model.feature_map_dropout 0.26109079987974504 +64 96 model.embedding_dim 1.0 +64 96 training.batch_size 2.0 +64 96 training.label_smoothing 0.09215512158916796 +64 97 model.output_channels 46.0 +64 97 model.input_dropout 0.1806253166670122 +64 97 model.output_dropout 0.07461167347666975 +64 97 model.feature_map_dropout 0.47572510155345843 +64 97 model.embedding_dim 2.0 +64 97 training.batch_size 0.0 +64 97 training.label_smoothing 0.012606239861539999 +64 98 model.output_channels 62.0 +64 98 model.input_dropout 0.3304678190299257 +64 98 model.output_dropout 0.35597767282447457 +64 98 model.feature_map_dropout 0.09054457311728803 +64 98 model.embedding_dim 1.0 +64 98 training.batch_size 0.0 +64 98 training.label_smoothing 0.00622361092811663 +64 99 model.output_channels 59.0 +64 99 model.input_dropout 0.23383533139586332 +64 99 model.output_dropout 0.2281625904831573 +64 99 model.feature_map_dropout 0.38202855330336744 +64 99 model.embedding_dim 2.0 +64 99 training.batch_size 0.0 +64 99 training.label_smoothing 0.01712880547895656 +64 100 model.output_channels 34.0 +64 100 model.input_dropout 0.342395016071848 +64 100 model.output_dropout 0.35334799118995175 +64 100 model.feature_map_dropout 0.307642018324818 +64 100 model.embedding_dim 2.0 +64 100 training.batch_size 0.0 +64 100 training.label_smoothing 0.32375082990464793 +64 1 dataset """kinships""" +64 1 model """conve""" +64 1 loss """crossentropy""" +64 1 regularizer """no""" +64 1 optimizer """adadelta""" +64 1 training_loop """lcwa""" +64 1 evaluator """rankbased""" +64 2 dataset """kinships""" +64 2 model """conve""" +64 2 loss """crossentropy""" +64 2 regularizer """no""" +64 2 optimizer """adadelta""" +64 2 training_loop """lcwa""" +64 2 evaluator """rankbased""" +64 3 dataset """kinships""" +64 3 model """conve""" +64 3 loss """crossentropy""" +64 3 regularizer """no""" +64 3 optimizer """adadelta""" +64 3 training_loop """lcwa""" +64 3 evaluator """rankbased""" +64 4 dataset """kinships""" +64 4 model """conve""" +64 4 loss """crossentropy""" +64 4 regularizer """no""" +64 4 optimizer """adadelta""" +64 4 training_loop """lcwa""" +64 4 evaluator """rankbased""" +64 5 dataset """kinships""" +64 5 model """conve""" +64 5 loss """crossentropy""" +64 5 regularizer """no""" +64 5 optimizer """adadelta""" +64 5 training_loop """lcwa""" +64 5 evaluator """rankbased""" +64 6 dataset """kinships""" +64 6 model """conve""" +64 6 loss """crossentropy""" +64 6 regularizer """no""" +64 6 optimizer """adadelta""" +64 6 training_loop """lcwa""" +64 6 evaluator """rankbased""" +64 7 dataset """kinships""" +64 7 model """conve""" +64 7 loss """crossentropy""" +64 7 regularizer """no""" +64 7 optimizer """adadelta""" +64 7 training_loop """lcwa""" +64 7 evaluator """rankbased""" +64 8 dataset """kinships""" +64 8 model """conve""" +64 8 loss """crossentropy""" +64 8 regularizer """no""" +64 8 optimizer """adadelta""" +64 8 training_loop """lcwa""" +64 8 evaluator """rankbased""" +64 9 dataset """kinships""" +64 9 model """conve""" +64 9 loss """crossentropy""" +64 9 regularizer """no""" +64 9 optimizer """adadelta""" +64 9 training_loop """lcwa""" +64 9 evaluator """rankbased""" +64 10 dataset """kinships""" +64 10 model """conve""" +64 10 loss """crossentropy""" +64 10 regularizer """no""" +64 10 optimizer """adadelta""" +64 10 training_loop """lcwa""" +64 10 evaluator """rankbased""" +64 11 dataset """kinships""" +64 11 model """conve""" +64 11 loss """crossentropy""" +64 11 regularizer """no""" +64 11 optimizer """adadelta""" +64 11 training_loop """lcwa""" +64 11 evaluator """rankbased""" +64 12 dataset """kinships""" +64 12 model """conve""" +64 12 loss """crossentropy""" +64 12 regularizer """no""" +64 12 optimizer """adadelta""" +64 12 training_loop """lcwa""" +64 12 evaluator """rankbased""" +64 13 dataset """kinships""" +64 13 model """conve""" +64 13 loss """crossentropy""" +64 13 regularizer """no""" +64 13 optimizer """adadelta""" +64 13 training_loop """lcwa""" +64 13 evaluator """rankbased""" +64 14 dataset """kinships""" +64 14 model """conve""" +64 14 loss """crossentropy""" +64 14 regularizer """no""" +64 14 optimizer """adadelta""" +64 14 training_loop """lcwa""" +64 14 evaluator """rankbased""" +64 15 dataset """kinships""" +64 15 model """conve""" +64 15 loss """crossentropy""" +64 15 regularizer """no""" +64 15 optimizer """adadelta""" +64 15 training_loop """lcwa""" +64 15 evaluator """rankbased""" +64 16 dataset """kinships""" +64 16 model """conve""" +64 16 loss """crossentropy""" +64 16 regularizer """no""" +64 16 optimizer """adadelta""" +64 16 training_loop """lcwa""" +64 16 evaluator """rankbased""" +64 17 dataset """kinships""" +64 17 model """conve""" +64 17 loss """crossentropy""" +64 17 regularizer """no""" +64 17 optimizer """adadelta""" +64 17 training_loop """lcwa""" +64 17 evaluator """rankbased""" +64 18 dataset """kinships""" +64 18 model """conve""" +64 18 loss """crossentropy""" +64 18 regularizer """no""" +64 18 optimizer """adadelta""" +64 18 training_loop """lcwa""" +64 18 evaluator """rankbased""" +64 19 dataset """kinships""" +64 19 model """conve""" +64 19 loss """crossentropy""" +64 19 regularizer """no""" +64 19 optimizer """adadelta""" +64 19 training_loop """lcwa""" +64 19 evaluator """rankbased""" +64 20 dataset """kinships""" +64 20 model """conve""" +64 20 loss """crossentropy""" +64 20 regularizer """no""" +64 20 optimizer """adadelta""" +64 20 training_loop """lcwa""" +64 20 evaluator """rankbased""" +64 21 dataset """kinships""" +64 21 model """conve""" +64 21 loss """crossentropy""" +64 21 regularizer """no""" +64 21 optimizer """adadelta""" +64 21 training_loop """lcwa""" +64 21 evaluator """rankbased""" +64 22 dataset """kinships""" +64 22 model """conve""" +64 22 loss """crossentropy""" +64 22 regularizer """no""" +64 22 optimizer """adadelta""" +64 22 training_loop """lcwa""" +64 22 evaluator """rankbased""" +64 23 dataset """kinships""" +64 23 model """conve""" +64 23 loss """crossentropy""" +64 23 regularizer """no""" +64 23 optimizer """adadelta""" +64 23 training_loop """lcwa""" +64 23 evaluator """rankbased""" +64 24 dataset """kinships""" +64 24 model """conve""" +64 24 loss """crossentropy""" +64 24 regularizer """no""" +64 24 optimizer """adadelta""" +64 24 training_loop """lcwa""" +64 24 evaluator """rankbased""" +64 25 dataset """kinships""" +64 25 model """conve""" +64 25 loss """crossentropy""" +64 25 regularizer """no""" +64 25 optimizer """adadelta""" +64 25 training_loop """lcwa""" +64 25 evaluator """rankbased""" +64 26 dataset """kinships""" +64 26 model """conve""" +64 26 loss """crossentropy""" +64 26 regularizer """no""" +64 26 optimizer """adadelta""" +64 26 training_loop """lcwa""" +64 26 evaluator """rankbased""" +64 27 dataset """kinships""" +64 27 model """conve""" +64 27 loss """crossentropy""" +64 27 regularizer """no""" +64 27 optimizer """adadelta""" +64 27 training_loop """lcwa""" +64 27 evaluator """rankbased""" +64 28 dataset """kinships""" +64 28 model """conve""" +64 28 loss """crossentropy""" +64 28 regularizer """no""" +64 28 optimizer """adadelta""" +64 28 training_loop """lcwa""" +64 28 evaluator """rankbased""" +64 29 dataset """kinships""" +64 29 model """conve""" +64 29 loss """crossentropy""" +64 29 regularizer """no""" +64 29 optimizer """adadelta""" +64 29 training_loop """lcwa""" +64 29 evaluator """rankbased""" +64 30 dataset """kinships""" +64 30 model """conve""" +64 30 loss """crossentropy""" +64 30 regularizer """no""" +64 30 optimizer """adadelta""" +64 30 training_loop """lcwa""" +64 30 evaluator """rankbased""" +64 31 dataset """kinships""" +64 31 model """conve""" +64 31 loss """crossentropy""" +64 31 regularizer """no""" +64 31 optimizer """adadelta""" +64 31 training_loop """lcwa""" +64 31 evaluator """rankbased""" +64 32 dataset """kinships""" +64 32 model """conve""" +64 32 loss """crossentropy""" +64 32 regularizer """no""" +64 32 optimizer """adadelta""" +64 32 training_loop """lcwa""" +64 32 evaluator """rankbased""" +64 33 dataset """kinships""" +64 33 model """conve""" +64 33 loss """crossentropy""" +64 33 regularizer """no""" +64 33 optimizer """adadelta""" +64 33 training_loop """lcwa""" +64 33 evaluator """rankbased""" +64 34 dataset """kinships""" +64 34 model """conve""" +64 34 loss """crossentropy""" +64 34 regularizer """no""" +64 34 optimizer """adadelta""" +64 34 training_loop """lcwa""" +64 34 evaluator """rankbased""" +64 35 dataset """kinships""" +64 35 model """conve""" +64 35 loss """crossentropy""" +64 35 regularizer """no""" +64 35 optimizer """adadelta""" +64 35 training_loop """lcwa""" +64 35 evaluator """rankbased""" +64 36 dataset """kinships""" +64 36 model """conve""" +64 36 loss """crossentropy""" +64 36 regularizer """no""" +64 36 optimizer """adadelta""" +64 36 training_loop """lcwa""" +64 36 evaluator """rankbased""" +64 37 dataset """kinships""" +64 37 model """conve""" +64 37 loss """crossentropy""" +64 37 regularizer """no""" +64 37 optimizer """adadelta""" +64 37 training_loop """lcwa""" +64 37 evaluator """rankbased""" +64 38 dataset """kinships""" +64 38 model """conve""" +64 38 loss """crossentropy""" +64 38 regularizer """no""" +64 38 optimizer """adadelta""" +64 38 training_loop """lcwa""" +64 38 evaluator """rankbased""" +64 39 dataset """kinships""" +64 39 model """conve""" +64 39 loss """crossentropy""" +64 39 regularizer """no""" +64 39 optimizer """adadelta""" +64 39 training_loop """lcwa""" +64 39 evaluator """rankbased""" +64 40 dataset """kinships""" +64 40 model """conve""" +64 40 loss """crossentropy""" +64 40 regularizer """no""" +64 40 optimizer """adadelta""" +64 40 training_loop """lcwa""" +64 40 evaluator """rankbased""" +64 41 dataset """kinships""" +64 41 model """conve""" +64 41 loss """crossentropy""" +64 41 regularizer """no""" +64 41 optimizer """adadelta""" +64 41 training_loop """lcwa""" +64 41 evaluator """rankbased""" +64 42 dataset """kinships""" +64 42 model """conve""" +64 42 loss """crossentropy""" +64 42 regularizer """no""" +64 42 optimizer """adadelta""" +64 42 training_loop """lcwa""" +64 42 evaluator """rankbased""" +64 43 dataset """kinships""" +64 43 model """conve""" +64 43 loss """crossentropy""" +64 43 regularizer """no""" +64 43 optimizer """adadelta""" +64 43 training_loop """lcwa""" +64 43 evaluator """rankbased""" +64 44 dataset """kinships""" +64 44 model """conve""" +64 44 loss """crossentropy""" +64 44 regularizer """no""" +64 44 optimizer """adadelta""" +64 44 training_loop """lcwa""" +64 44 evaluator """rankbased""" +64 45 dataset """kinships""" +64 45 model """conve""" +64 45 loss """crossentropy""" +64 45 regularizer """no""" +64 45 optimizer """adadelta""" +64 45 training_loop """lcwa""" +64 45 evaluator """rankbased""" +64 46 dataset """kinships""" +64 46 model """conve""" +64 46 loss """crossentropy""" +64 46 regularizer """no""" +64 46 optimizer """adadelta""" +64 46 training_loop """lcwa""" +64 46 evaluator """rankbased""" +64 47 dataset """kinships""" +64 47 model """conve""" +64 47 loss """crossentropy""" +64 47 regularizer """no""" +64 47 optimizer """adadelta""" +64 47 training_loop """lcwa""" +64 47 evaluator """rankbased""" +64 48 dataset """kinships""" +64 48 model """conve""" +64 48 loss """crossentropy""" +64 48 regularizer """no""" +64 48 optimizer """adadelta""" +64 48 training_loop """lcwa""" +64 48 evaluator """rankbased""" +64 49 dataset """kinships""" +64 49 model """conve""" +64 49 loss """crossentropy""" +64 49 regularizer """no""" +64 49 optimizer """adadelta""" +64 49 training_loop """lcwa""" +64 49 evaluator """rankbased""" +64 50 dataset """kinships""" +64 50 model """conve""" +64 50 loss """crossentropy""" +64 50 regularizer """no""" +64 50 optimizer """adadelta""" +64 50 training_loop """lcwa""" +64 50 evaluator """rankbased""" +64 51 dataset """kinships""" +64 51 model """conve""" +64 51 loss """crossentropy""" +64 51 regularizer """no""" +64 51 optimizer """adadelta""" +64 51 training_loop """lcwa""" +64 51 evaluator """rankbased""" +64 52 dataset """kinships""" +64 52 model """conve""" +64 52 loss """crossentropy""" +64 52 regularizer """no""" +64 52 optimizer """adadelta""" +64 52 training_loop """lcwa""" +64 52 evaluator """rankbased""" +64 53 dataset """kinships""" +64 53 model """conve""" +64 53 loss """crossentropy""" +64 53 regularizer """no""" +64 53 optimizer """adadelta""" +64 53 training_loop """lcwa""" +64 53 evaluator """rankbased""" +64 54 dataset """kinships""" +64 54 model """conve""" +64 54 loss """crossentropy""" +64 54 regularizer """no""" +64 54 optimizer """adadelta""" +64 54 training_loop """lcwa""" +64 54 evaluator """rankbased""" +64 55 dataset """kinships""" +64 55 model """conve""" +64 55 loss """crossentropy""" +64 55 regularizer """no""" +64 55 optimizer """adadelta""" +64 55 training_loop """lcwa""" +64 55 evaluator """rankbased""" +64 56 dataset """kinships""" +64 56 model """conve""" +64 56 loss """crossentropy""" +64 56 regularizer """no""" +64 56 optimizer """adadelta""" +64 56 training_loop """lcwa""" +64 56 evaluator """rankbased""" +64 57 dataset """kinships""" +64 57 model """conve""" +64 57 loss """crossentropy""" +64 57 regularizer """no""" +64 57 optimizer """adadelta""" +64 57 training_loop """lcwa""" +64 57 evaluator """rankbased""" +64 58 dataset """kinships""" +64 58 model """conve""" +64 58 loss """crossentropy""" +64 58 regularizer """no""" +64 58 optimizer """adadelta""" +64 58 training_loop """lcwa""" +64 58 evaluator """rankbased""" +64 59 dataset """kinships""" +64 59 model """conve""" +64 59 loss """crossentropy""" +64 59 regularizer """no""" +64 59 optimizer """adadelta""" +64 59 training_loop """lcwa""" +64 59 evaluator """rankbased""" +64 60 dataset """kinships""" +64 60 model """conve""" +64 60 loss """crossentropy""" +64 60 regularizer """no""" +64 60 optimizer """adadelta""" +64 60 training_loop """lcwa""" +64 60 evaluator """rankbased""" +64 61 dataset """kinships""" +64 61 model """conve""" +64 61 loss """crossentropy""" +64 61 regularizer """no""" +64 61 optimizer """adadelta""" +64 61 training_loop """lcwa""" +64 61 evaluator """rankbased""" +64 62 dataset """kinships""" +64 62 model """conve""" +64 62 loss """crossentropy""" +64 62 regularizer """no""" +64 62 optimizer """adadelta""" +64 62 training_loop """lcwa""" +64 62 evaluator """rankbased""" +64 63 dataset """kinships""" +64 63 model """conve""" +64 63 loss """crossentropy""" +64 63 regularizer """no""" +64 63 optimizer """adadelta""" +64 63 training_loop """lcwa""" +64 63 evaluator """rankbased""" +64 64 dataset """kinships""" +64 64 model """conve""" +64 64 loss """crossentropy""" +64 64 regularizer """no""" +64 64 optimizer """adadelta""" +64 64 training_loop """lcwa""" +64 64 evaluator """rankbased""" +64 65 dataset """kinships""" +64 65 model """conve""" +64 65 loss """crossentropy""" +64 65 regularizer """no""" +64 65 optimizer """adadelta""" +64 65 training_loop """lcwa""" +64 65 evaluator """rankbased""" +64 66 dataset """kinships""" +64 66 model """conve""" +64 66 loss """crossentropy""" +64 66 regularizer """no""" +64 66 optimizer """adadelta""" +64 66 training_loop """lcwa""" +64 66 evaluator """rankbased""" +64 67 dataset """kinships""" +64 67 model """conve""" +64 67 loss """crossentropy""" +64 67 regularizer """no""" +64 67 optimizer """adadelta""" +64 67 training_loop """lcwa""" +64 67 evaluator """rankbased""" +64 68 dataset """kinships""" +64 68 model """conve""" +64 68 loss """crossentropy""" +64 68 regularizer """no""" +64 68 optimizer """adadelta""" +64 68 training_loop """lcwa""" +64 68 evaluator """rankbased""" +64 69 dataset """kinships""" +64 69 model """conve""" +64 69 loss """crossentropy""" +64 69 regularizer """no""" +64 69 optimizer """adadelta""" +64 69 training_loop """lcwa""" +64 69 evaluator """rankbased""" +64 70 dataset """kinships""" +64 70 model """conve""" +64 70 loss """crossentropy""" +64 70 regularizer """no""" +64 70 optimizer """adadelta""" +64 70 training_loop """lcwa""" +64 70 evaluator """rankbased""" +64 71 dataset """kinships""" +64 71 model """conve""" +64 71 loss """crossentropy""" +64 71 regularizer """no""" +64 71 optimizer """adadelta""" +64 71 training_loop """lcwa""" +64 71 evaluator """rankbased""" +64 72 dataset """kinships""" +64 72 model """conve""" +64 72 loss """crossentropy""" +64 72 regularizer """no""" +64 72 optimizer """adadelta""" +64 72 training_loop """lcwa""" +64 72 evaluator """rankbased""" +64 73 dataset """kinships""" +64 73 model """conve""" +64 73 loss """crossentropy""" +64 73 regularizer """no""" +64 73 optimizer """adadelta""" +64 73 training_loop """lcwa""" +64 73 evaluator """rankbased""" +64 74 dataset """kinships""" +64 74 model """conve""" +64 74 loss """crossentropy""" +64 74 regularizer """no""" +64 74 optimizer """adadelta""" +64 74 training_loop """lcwa""" +64 74 evaluator """rankbased""" +64 75 dataset """kinships""" +64 75 model """conve""" +64 75 loss """crossentropy""" +64 75 regularizer """no""" +64 75 optimizer """adadelta""" +64 75 training_loop """lcwa""" +64 75 evaluator """rankbased""" +64 76 dataset """kinships""" +64 76 model """conve""" +64 76 loss """crossentropy""" +64 76 regularizer """no""" +64 76 optimizer """adadelta""" +64 76 training_loop """lcwa""" +64 76 evaluator """rankbased""" +64 77 dataset """kinships""" +64 77 model """conve""" +64 77 loss """crossentropy""" +64 77 regularizer """no""" +64 77 optimizer """adadelta""" +64 77 training_loop """lcwa""" +64 77 evaluator """rankbased""" +64 78 dataset """kinships""" +64 78 model """conve""" +64 78 loss """crossentropy""" +64 78 regularizer """no""" +64 78 optimizer """adadelta""" +64 78 training_loop """lcwa""" +64 78 evaluator """rankbased""" +64 79 dataset """kinships""" +64 79 model """conve""" +64 79 loss """crossentropy""" +64 79 regularizer """no""" +64 79 optimizer """adadelta""" +64 79 training_loop """lcwa""" +64 79 evaluator """rankbased""" +64 80 dataset """kinships""" +64 80 model """conve""" +64 80 loss """crossentropy""" +64 80 regularizer """no""" +64 80 optimizer """adadelta""" +64 80 training_loop """lcwa""" +64 80 evaluator """rankbased""" +64 81 dataset """kinships""" +64 81 model """conve""" +64 81 loss """crossentropy""" +64 81 regularizer """no""" +64 81 optimizer """adadelta""" +64 81 training_loop """lcwa""" +64 81 evaluator """rankbased""" +64 82 dataset """kinships""" +64 82 model """conve""" +64 82 loss """crossentropy""" +64 82 regularizer """no""" +64 82 optimizer """adadelta""" +64 82 training_loop """lcwa""" +64 82 evaluator """rankbased""" +64 83 dataset """kinships""" +64 83 model """conve""" +64 83 loss """crossentropy""" +64 83 regularizer """no""" +64 83 optimizer """adadelta""" +64 83 training_loop """lcwa""" +64 83 evaluator """rankbased""" +64 84 dataset """kinships""" +64 84 model """conve""" +64 84 loss """crossentropy""" +64 84 regularizer """no""" +64 84 optimizer """adadelta""" +64 84 training_loop """lcwa""" +64 84 evaluator """rankbased""" +64 85 dataset """kinships""" +64 85 model """conve""" +64 85 loss """crossentropy""" +64 85 regularizer """no""" +64 85 optimizer """adadelta""" +64 85 training_loop """lcwa""" +64 85 evaluator """rankbased""" +64 86 dataset """kinships""" +64 86 model """conve""" +64 86 loss """crossentropy""" +64 86 regularizer """no""" +64 86 optimizer """adadelta""" +64 86 training_loop """lcwa""" +64 86 evaluator """rankbased""" +64 87 dataset """kinships""" +64 87 model """conve""" +64 87 loss """crossentropy""" +64 87 regularizer """no""" +64 87 optimizer """adadelta""" +64 87 training_loop """lcwa""" +64 87 evaluator """rankbased""" +64 88 dataset """kinships""" +64 88 model """conve""" +64 88 loss """crossentropy""" +64 88 regularizer """no""" +64 88 optimizer """adadelta""" +64 88 training_loop """lcwa""" +64 88 evaluator """rankbased""" +64 89 dataset """kinships""" +64 89 model """conve""" +64 89 loss """crossentropy""" +64 89 regularizer """no""" +64 89 optimizer """adadelta""" +64 89 training_loop """lcwa""" +64 89 evaluator """rankbased""" +64 90 dataset """kinships""" +64 90 model """conve""" +64 90 loss """crossentropy""" +64 90 regularizer """no""" +64 90 optimizer """adadelta""" +64 90 training_loop """lcwa""" +64 90 evaluator """rankbased""" +64 91 dataset """kinships""" +64 91 model """conve""" +64 91 loss """crossentropy""" +64 91 regularizer """no""" +64 91 optimizer """adadelta""" +64 91 training_loop """lcwa""" +64 91 evaluator """rankbased""" +64 92 dataset """kinships""" +64 92 model """conve""" +64 92 loss """crossentropy""" +64 92 regularizer """no""" +64 92 optimizer """adadelta""" +64 92 training_loop """lcwa""" +64 92 evaluator """rankbased""" +64 93 dataset """kinships""" +64 93 model """conve""" +64 93 loss """crossentropy""" +64 93 regularizer """no""" +64 93 optimizer """adadelta""" +64 93 training_loop """lcwa""" +64 93 evaluator """rankbased""" +64 94 dataset """kinships""" +64 94 model """conve""" +64 94 loss """crossentropy""" +64 94 regularizer """no""" +64 94 optimizer """adadelta""" +64 94 training_loop """lcwa""" +64 94 evaluator """rankbased""" +64 95 dataset """kinships""" +64 95 model """conve""" +64 95 loss """crossentropy""" +64 95 regularizer """no""" +64 95 optimizer """adadelta""" +64 95 training_loop """lcwa""" +64 95 evaluator """rankbased""" +64 96 dataset """kinships""" +64 96 model """conve""" +64 96 loss """crossentropy""" +64 96 regularizer """no""" +64 96 optimizer """adadelta""" +64 96 training_loop """lcwa""" +64 96 evaluator """rankbased""" +64 97 dataset """kinships""" +64 97 model """conve""" +64 97 loss """crossentropy""" +64 97 regularizer """no""" +64 97 optimizer """adadelta""" +64 97 training_loop """lcwa""" +64 97 evaluator """rankbased""" +64 98 dataset """kinships""" +64 98 model """conve""" +64 98 loss """crossentropy""" +64 98 regularizer """no""" +64 98 optimizer """adadelta""" +64 98 training_loop """lcwa""" +64 98 evaluator """rankbased""" +64 99 dataset """kinships""" +64 99 model """conve""" +64 99 loss """crossentropy""" +64 99 regularizer """no""" +64 99 optimizer """adadelta""" +64 99 training_loop """lcwa""" +64 99 evaluator """rankbased""" +64 100 dataset """kinships""" +64 100 model """conve""" +64 100 loss """crossentropy""" +64 100 regularizer """no""" +64 100 optimizer """adadelta""" +64 100 training_loop """lcwa""" +64 100 evaluator """rankbased""" +65 1 model.output_channels 53.0 +65 1 model.input_dropout 0.09515309053802629 +65 1 model.output_dropout 0.4168361820720068 +65 1 model.feature_map_dropout 0.2515137164356805 +65 1 model.embedding_dim 1.0 +65 1 training.batch_size 2.0 +65 1 training.label_smoothing 0.0010744873577499636 +65 2 model.output_channels 38.0 +65 2 model.input_dropout 0.2497745728989611 +65 2 model.output_dropout 0.23983642203936256 +65 2 model.feature_map_dropout 0.1050881710610786 +65 2 model.embedding_dim 1.0 +65 2 training.batch_size 0.0 +65 2 training.label_smoothing 0.5400129055259103 +65 3 model.output_channels 56.0 +65 3 model.input_dropout 0.4594026970263692 +65 3 model.output_dropout 0.1862759034039324 +65 3 model.feature_map_dropout 0.028943145569373663 +65 3 model.embedding_dim 0.0 +65 3 training.batch_size 1.0 +65 3 training.label_smoothing 0.11581951575001118 +65 4 model.output_channels 50.0 +65 4 model.input_dropout 0.30141193357474755 +65 4 model.output_dropout 0.3189437364204703 +65 4 model.feature_map_dropout 0.0014485689298845972 +65 4 model.embedding_dim 1.0 +65 4 training.batch_size 0.0 +65 4 training.label_smoothing 0.001265802112242786 +65 5 model.output_channels 39.0 +65 5 model.input_dropout 0.03262667507922984 +65 5 model.output_dropout 0.025586743505882192 +65 5 model.feature_map_dropout 0.12939326760611541 +65 5 model.embedding_dim 1.0 +65 5 training.batch_size 2.0 +65 5 training.label_smoothing 0.07130422062425057 +65 6 model.output_channels 50.0 +65 6 model.input_dropout 0.029492225885700607 +65 6 model.output_dropout 0.09186135458329875 +65 6 model.feature_map_dropout 0.07571959605053169 +65 6 model.embedding_dim 0.0 +65 6 training.batch_size 0.0 +65 6 training.label_smoothing 0.9974308720485623 +65 7 model.output_channels 63.0 +65 7 model.input_dropout 0.39791809584844773 +65 7 model.output_dropout 0.46127287126331984 +65 7 model.feature_map_dropout 0.3961557000932035 +65 7 model.embedding_dim 2.0 +65 7 training.batch_size 0.0 +65 7 training.label_smoothing 0.11953607941214886 +65 8 model.output_channels 43.0 +65 8 model.input_dropout 0.005602283447809553 +65 8 model.output_dropout 0.09348031001550883 +65 8 model.feature_map_dropout 0.4137440580024657 +65 8 model.embedding_dim 2.0 +65 8 training.batch_size 2.0 +65 8 training.label_smoothing 0.05118158903705586 +65 9 model.output_channels 30.0 +65 9 model.input_dropout 0.21476164268733977 +65 9 model.output_dropout 0.3063121856408568 +65 9 model.feature_map_dropout 0.15748132991923447 +65 9 model.embedding_dim 1.0 +65 9 training.batch_size 2.0 +65 9 training.label_smoothing 0.6665322879509195 +65 10 model.output_channels 30.0 +65 10 model.input_dropout 0.4451230922820861 +65 10 model.output_dropout 0.1579911403044898 +65 10 model.feature_map_dropout 0.4317879475128165 +65 10 model.embedding_dim 0.0 +65 10 training.batch_size 2.0 +65 10 training.label_smoothing 0.01440365119027736 +65 11 model.output_channels 50.0 +65 11 model.input_dropout 0.214649361842428 +65 11 model.output_dropout 0.06897054426560251 +65 11 model.feature_map_dropout 0.2930398807629018 +65 11 model.embedding_dim 0.0 +65 11 training.batch_size 2.0 +65 11 training.label_smoothing 0.0038082140487554544 +65 12 model.output_channels 28.0 +65 12 model.input_dropout 0.39887584655786057 +65 12 model.output_dropout 0.48813729915427845 +65 12 model.feature_map_dropout 0.2309252846010375 +65 12 model.embedding_dim 1.0 +65 12 training.batch_size 2.0 +65 12 training.label_smoothing 0.012861054132826021 +65 13 model.output_channels 53.0 +65 13 model.input_dropout 0.4013525950748115 +65 13 model.output_dropout 0.4722216401098381 +65 13 model.feature_map_dropout 0.31671152912980627 +65 13 model.embedding_dim 2.0 +65 13 training.batch_size 2.0 +65 13 training.label_smoothing 0.16168083261295374 +65 14 model.output_channels 58.0 +65 14 model.input_dropout 0.43285175645630675 +65 14 model.output_dropout 0.06186992325360652 +65 14 model.feature_map_dropout 0.35640290389973256 +65 14 model.embedding_dim 1.0 +65 14 training.batch_size 2.0 +65 14 training.label_smoothing 0.16122261072084906 +65 15 model.output_channels 37.0 +65 15 model.input_dropout 0.2296883253726406 +65 15 model.output_dropout 0.16312776889175762 +65 15 model.feature_map_dropout 0.4516178527186306 +65 15 model.embedding_dim 2.0 +65 15 training.batch_size 0.0 +65 15 training.label_smoothing 0.05016391500206391 +65 16 model.output_channels 52.0 +65 16 model.input_dropout 0.4477249802374705 +65 16 model.output_dropout 0.28281838105229934 +65 16 model.feature_map_dropout 0.4227384315306106 +65 16 model.embedding_dim 2.0 +65 16 training.batch_size 1.0 +65 16 training.label_smoothing 0.005057067619910347 +65 17 model.output_channels 57.0 +65 17 model.input_dropout 0.3843469861043805 +65 17 model.output_dropout 0.4723972083361421 +65 17 model.feature_map_dropout 0.1784332593199503 +65 17 model.embedding_dim 2.0 +65 17 training.batch_size 2.0 +65 17 training.label_smoothing 0.9638930359883884 +65 18 model.output_channels 42.0 +65 18 model.input_dropout 0.4002715698323879 +65 18 model.output_dropout 0.15312224722865925 +65 18 model.feature_map_dropout 0.20795837287439967 +65 18 model.embedding_dim 2.0 +65 18 training.batch_size 2.0 +65 18 training.label_smoothing 0.021098790098018048 +65 19 model.output_channels 20.0 +65 19 model.input_dropout 0.29745878956577176 +65 19 model.output_dropout 0.46745790689411987 +65 19 model.feature_map_dropout 0.4788908450962282 +65 19 model.embedding_dim 0.0 +65 19 training.batch_size 1.0 +65 19 training.label_smoothing 0.018937352257437937 +65 20 model.output_channels 29.0 +65 20 model.input_dropout 0.3462596325657728 +65 20 model.output_dropout 0.42716898558069344 +65 20 model.feature_map_dropout 0.30798554440590703 +65 20 model.embedding_dim 1.0 +65 20 training.batch_size 2.0 +65 20 training.label_smoothing 0.05939160442517752 +65 21 model.output_channels 53.0 +65 21 model.input_dropout 0.1812391507772736 +65 21 model.output_dropout 0.0358691166042342 +65 21 model.feature_map_dropout 0.4938671974853374 +65 21 model.embedding_dim 0.0 +65 21 training.batch_size 1.0 +65 21 training.label_smoothing 0.2787130985359168 +65 22 model.output_channels 47.0 +65 22 model.input_dropout 0.07509478742105274 +65 22 model.output_dropout 0.43103432118548335 +65 22 model.feature_map_dropout 0.20107534425034684 +65 22 model.embedding_dim 1.0 +65 22 training.batch_size 0.0 +65 22 training.label_smoothing 0.015899432404968043 +65 23 model.output_channels 22.0 +65 23 model.input_dropout 0.19531237102399435 +65 23 model.output_dropout 0.3250765394519348 +65 23 model.feature_map_dropout 0.2969621993823257 +65 23 model.embedding_dim 1.0 +65 23 training.batch_size 2.0 +65 23 training.label_smoothing 0.13544566551007753 +65 24 model.output_channels 48.0 +65 24 model.input_dropout 0.3761489742033843 +65 24 model.output_dropout 0.27326122668355113 +65 24 model.feature_map_dropout 0.2952358445287889 +65 24 model.embedding_dim 2.0 +65 24 training.batch_size 2.0 +65 24 training.label_smoothing 0.0013578786024932607 +65 25 model.output_channels 56.0 +65 25 model.input_dropout 0.2887033246431949 +65 25 model.output_dropout 0.08242106972649182 +65 25 model.feature_map_dropout 0.29300738823761774 +65 25 model.embedding_dim 0.0 +65 25 training.batch_size 0.0 +65 25 training.label_smoothing 0.01805401998143581 +65 26 model.output_channels 34.0 +65 26 model.input_dropout 0.48470978536555137 +65 26 model.output_dropout 0.4826601872389703 +65 26 model.feature_map_dropout 0.245568421097904 +65 26 model.embedding_dim 1.0 +65 26 training.batch_size 2.0 +65 26 training.label_smoothing 0.10279282868484298 +65 27 model.output_channels 55.0 +65 27 model.input_dropout 0.3840260971425135 +65 27 model.output_dropout 0.11362127965411423 +65 27 model.feature_map_dropout 0.3078472829115104 +65 27 model.embedding_dim 1.0 +65 27 training.batch_size 2.0 +65 27 training.label_smoothing 0.012426439651188058 +65 28 model.output_channels 34.0 +65 28 model.input_dropout 0.2607940826262062 +65 28 model.output_dropout 0.3474996785572688 +65 28 model.feature_map_dropout 0.13502745147681855 +65 28 model.embedding_dim 1.0 +65 28 training.batch_size 0.0 +65 28 training.label_smoothing 0.05881042838701328 +65 29 model.output_channels 30.0 +65 29 model.input_dropout 0.4894237341891004 +65 29 model.output_dropout 0.19118748153392517 +65 29 model.feature_map_dropout 0.10388255899232651 +65 29 model.embedding_dim 2.0 +65 29 training.batch_size 2.0 +65 29 training.label_smoothing 0.0015924528054939754 +65 30 model.output_channels 62.0 +65 30 model.input_dropout 0.2569867902055591 +65 30 model.output_dropout 0.2415596493521665 +65 30 model.feature_map_dropout 0.30889386241740024 +65 30 model.embedding_dim 1.0 +65 30 training.batch_size 1.0 +65 30 training.label_smoothing 0.2299551247581701 +65 31 model.output_channels 56.0 +65 31 model.input_dropout 0.14392680443075057 +65 31 model.output_dropout 0.15771244048561112 +65 31 model.feature_map_dropout 0.03071613204887652 +65 31 model.embedding_dim 2.0 +65 31 training.batch_size 1.0 +65 31 training.label_smoothing 0.0031947573861507983 +65 32 model.output_channels 53.0 +65 32 model.input_dropout 0.29095236668238095 +65 32 model.output_dropout 0.18272280147141906 +65 32 model.feature_map_dropout 0.09561045457458822 +65 32 model.embedding_dim 2.0 +65 32 training.batch_size 2.0 +65 32 training.label_smoothing 0.0036649237289321695 +65 33 model.output_channels 30.0 +65 33 model.input_dropout 0.2804234784793495 +65 33 model.output_dropout 0.358731027414144 +65 33 model.feature_map_dropout 0.11381446616416707 +65 33 model.embedding_dim 1.0 +65 33 training.batch_size 1.0 +65 33 training.label_smoothing 0.002914900208988368 +65 34 model.output_channels 39.0 +65 34 model.input_dropout 0.027960581749917224 +65 34 model.output_dropout 0.12150651676390023 +65 34 model.feature_map_dropout 0.24398405319661987 +65 34 model.embedding_dim 2.0 +65 34 training.batch_size 1.0 +65 34 training.label_smoothing 0.016914910735636036 +65 35 model.output_channels 28.0 +65 35 model.input_dropout 0.27540870573301335 +65 35 model.output_dropout 0.00633660232754385 +65 35 model.feature_map_dropout 0.02797802962976037 +65 35 model.embedding_dim 0.0 +65 35 training.batch_size 0.0 +65 35 training.label_smoothing 0.06398127186173323 +65 36 model.output_channels 50.0 +65 36 model.input_dropout 0.4421325713882502 +65 36 model.output_dropout 0.44020376721997184 +65 36 model.feature_map_dropout 0.23129434556883272 +65 36 model.embedding_dim 1.0 +65 36 training.batch_size 1.0 +65 36 training.label_smoothing 0.003300009704704696 +65 37 model.output_channels 58.0 +65 37 model.input_dropout 0.02600010166757971 +65 37 model.output_dropout 0.04325724355620081 +65 37 model.feature_map_dropout 0.19865171937104215 +65 37 model.embedding_dim 1.0 +65 37 training.batch_size 1.0 +65 37 training.label_smoothing 0.006547252072343543 +65 38 model.output_channels 16.0 +65 38 model.input_dropout 0.15983351997899586 +65 38 model.output_dropout 0.003099321352549478 +65 38 model.feature_map_dropout 0.35638970718036306 +65 38 model.embedding_dim 2.0 +65 38 training.batch_size 0.0 +65 38 training.label_smoothing 0.003391491046318294 +65 39 model.output_channels 46.0 +65 39 model.input_dropout 0.18831249707530895 +65 39 model.output_dropout 0.3316291575006171 +65 39 model.feature_map_dropout 0.17863523208002724 +65 39 model.embedding_dim 1.0 +65 39 training.batch_size 2.0 +65 39 training.label_smoothing 0.0021453664568551155 +65 40 model.output_channels 38.0 +65 40 model.input_dropout 0.39891894138858625 +65 40 model.output_dropout 0.12156056903030954 +65 40 model.feature_map_dropout 0.20973406563575075 +65 40 model.embedding_dim 1.0 +65 40 training.batch_size 1.0 +65 40 training.label_smoothing 0.45835072593828946 +65 41 model.output_channels 40.0 +65 41 model.input_dropout 0.29377534938984 +65 41 model.output_dropout 0.27069079112760164 +65 41 model.feature_map_dropout 0.08889684948839327 +65 41 model.embedding_dim 2.0 +65 41 training.batch_size 1.0 +65 41 training.label_smoothing 0.0014931787570748368 +65 42 model.output_channels 34.0 +65 42 model.input_dropout 0.3670722667845649 +65 42 model.output_dropout 0.4133589046411074 +65 42 model.feature_map_dropout 0.29317332368343835 +65 42 model.embedding_dim 2.0 +65 42 training.batch_size 2.0 +65 42 training.label_smoothing 0.0022431392428649147 +65 43 model.output_channels 43.0 +65 43 model.input_dropout 0.45576560922844683 +65 43 model.output_dropout 0.318751506562396 +65 43 model.feature_map_dropout 0.22105094185865787 +65 43 model.embedding_dim 0.0 +65 43 training.batch_size 2.0 +65 43 training.label_smoothing 0.45235609470913185 +65 44 model.output_channels 36.0 +65 44 model.input_dropout 0.34419099446789136 +65 44 model.output_dropout 0.15493704526467678 +65 44 model.feature_map_dropout 0.07697432278688021 +65 44 model.embedding_dim 2.0 +65 44 training.batch_size 0.0 +65 44 training.label_smoothing 0.1574457948330939 +65 45 model.output_channels 64.0 +65 45 model.input_dropout 0.24092246112999233 +65 45 model.output_dropout 0.1486178849223596 +65 45 model.feature_map_dropout 0.009600465618560095 +65 45 model.embedding_dim 2.0 +65 45 training.batch_size 2.0 +65 45 training.label_smoothing 0.5784335075454973 +65 46 model.output_channels 21.0 +65 46 model.input_dropout 0.4230746411712488 +65 46 model.output_dropout 0.028488177117792057 +65 46 model.feature_map_dropout 0.4375331767461787 +65 46 model.embedding_dim 1.0 +65 46 training.batch_size 0.0 +65 46 training.label_smoothing 0.0013523182423564569 +65 47 model.output_channels 21.0 +65 47 model.input_dropout 0.33182831963496995 +65 47 model.output_dropout 0.3025334454614135 +65 47 model.feature_map_dropout 0.3696036592900611 +65 47 model.embedding_dim 1.0 +65 47 training.batch_size 1.0 +65 47 training.label_smoothing 0.004817680159351905 +65 48 model.output_channels 61.0 +65 48 model.input_dropout 0.2298102317487663 +65 48 model.output_dropout 0.3802642198884625 +65 48 model.feature_map_dropout 0.023564622626075393 +65 48 model.embedding_dim 2.0 +65 48 training.batch_size 2.0 +65 48 training.label_smoothing 0.07483263553374175 +65 49 model.output_channels 17.0 +65 49 model.input_dropout 0.26492327538520705 +65 49 model.output_dropout 0.036570682736075666 +65 49 model.feature_map_dropout 0.07722559813135549 +65 49 model.embedding_dim 0.0 +65 49 training.batch_size 1.0 +65 49 training.label_smoothing 0.05705442330191708 +65 50 model.output_channels 35.0 +65 50 model.input_dropout 0.35099751124021533 +65 50 model.output_dropout 0.28621247741198613 +65 50 model.feature_map_dropout 0.3458897884506515 +65 50 model.embedding_dim 0.0 +65 50 training.batch_size 2.0 +65 50 training.label_smoothing 0.131665275715182 +65 51 model.output_channels 49.0 +65 51 model.input_dropout 0.06533451859641826 +65 51 model.output_dropout 0.0024271563333386204 +65 51 model.feature_map_dropout 0.22926317037979327 +65 51 model.embedding_dim 0.0 +65 51 training.batch_size 2.0 +65 51 training.label_smoothing 0.30678551173809976 +65 52 model.output_channels 41.0 +65 52 model.input_dropout 0.48693310715671956 +65 52 model.output_dropout 0.0711346937921768 +65 52 model.feature_map_dropout 0.1906601319936297 +65 52 model.embedding_dim 1.0 +65 52 training.batch_size 2.0 +65 52 training.label_smoothing 0.06764581021979549 +65 53 model.output_channels 41.0 +65 53 model.input_dropout 0.11716544445426452 +65 53 model.output_dropout 0.026828910139195195 +65 53 model.feature_map_dropout 0.29209643764063 +65 53 model.embedding_dim 0.0 +65 53 training.batch_size 1.0 +65 53 training.label_smoothing 0.034458567136778424 +65 54 model.output_channels 22.0 +65 54 model.input_dropout 0.41178576930969163 +65 54 model.output_dropout 0.08399776837302902 +65 54 model.feature_map_dropout 0.09156689034896398 +65 54 model.embedding_dim 2.0 +65 54 training.batch_size 1.0 +65 54 training.label_smoothing 0.3304970548283287 +65 55 model.output_channels 64.0 +65 55 model.input_dropout 0.43512761896076646 +65 55 model.output_dropout 0.37297332792983146 +65 55 model.feature_map_dropout 0.20503829795974327 +65 55 model.embedding_dim 1.0 +65 55 training.batch_size 2.0 +65 55 training.label_smoothing 0.06929128658272117 +65 56 model.output_channels 53.0 +65 56 model.input_dropout 0.20045821729480356 +65 56 model.output_dropout 0.03903846280482154 +65 56 model.feature_map_dropout 0.39126978678168794 +65 56 model.embedding_dim 2.0 +65 56 training.batch_size 0.0 +65 56 training.label_smoothing 0.3890565698706632 +65 57 model.output_channels 29.0 +65 57 model.input_dropout 0.010673530859346081 +65 57 model.output_dropout 0.3089048991756804 +65 57 model.feature_map_dropout 0.04404967391190612 +65 57 model.embedding_dim 2.0 +65 57 training.batch_size 0.0 +65 57 training.label_smoothing 0.3041092460146535 +65 58 model.output_channels 17.0 +65 58 model.input_dropout 0.05368222841328546 +65 58 model.output_dropout 0.49321443215095695 +65 58 model.feature_map_dropout 0.3719272567594538 +65 58 model.embedding_dim 2.0 +65 58 training.batch_size 2.0 +65 58 training.label_smoothing 0.0015000593015226714 +65 59 model.output_channels 47.0 +65 59 model.input_dropout 0.28544330190562806 +65 59 model.output_dropout 0.03599461056161607 +65 59 model.feature_map_dropout 0.2405318886859965 +65 59 model.embedding_dim 1.0 +65 59 training.batch_size 2.0 +65 59 training.label_smoothing 0.03442064312742287 +65 60 model.output_channels 43.0 +65 60 model.input_dropout 0.14599931583464903 +65 60 model.output_dropout 0.19704959311767078 +65 60 model.feature_map_dropout 0.17536966108726365 +65 60 model.embedding_dim 1.0 +65 60 training.batch_size 2.0 +65 60 training.label_smoothing 0.03340984756089193 +65 61 model.output_channels 56.0 +65 61 model.input_dropout 0.14317710607012601 +65 61 model.output_dropout 0.2574246216666473 +65 61 model.feature_map_dropout 0.16666446569953525 +65 61 model.embedding_dim 1.0 +65 61 training.batch_size 0.0 +65 61 training.label_smoothing 0.7265156825115483 +65 62 model.output_channels 61.0 +65 62 model.input_dropout 0.14371944101566553 +65 62 model.output_dropout 0.23184601968915447 +65 62 model.feature_map_dropout 0.2166913195775395 +65 62 model.embedding_dim 2.0 +65 62 training.batch_size 0.0 +65 62 training.label_smoothing 0.3660435406558794 +65 63 model.output_channels 59.0 +65 63 model.input_dropout 0.36070292656893055 +65 63 model.output_dropout 0.23847724480365062 +65 63 model.feature_map_dropout 0.10906711729506274 +65 63 model.embedding_dim 0.0 +65 63 training.batch_size 0.0 +65 63 training.label_smoothing 0.8760670308371233 +65 64 model.output_channels 64.0 +65 64 model.input_dropout 0.18337128339198322 +65 64 model.output_dropout 0.18473847459042575 +65 64 model.feature_map_dropout 0.0424753853893709 +65 64 model.embedding_dim 1.0 +65 64 training.batch_size 0.0 +65 64 training.label_smoothing 0.005401660122196082 +65 65 model.output_channels 29.0 +65 65 model.input_dropout 0.25898973109457873 +65 65 model.output_dropout 0.014776281905675537 +65 65 model.feature_map_dropout 0.02410447065398358 +65 65 model.embedding_dim 0.0 +65 65 training.batch_size 0.0 +65 65 training.label_smoothing 0.21256528709983089 +65 66 model.output_channels 37.0 +65 66 model.input_dropout 0.39797723885954533 +65 66 model.output_dropout 0.032315882979112864 +65 66 model.feature_map_dropout 0.46410588625399585 +65 66 model.embedding_dim 0.0 +65 66 training.batch_size 0.0 +65 66 training.label_smoothing 0.5310315784635832 +65 67 model.output_channels 34.0 +65 67 model.input_dropout 0.16971118015858488 +65 67 model.output_dropout 0.29020521709987207 +65 67 model.feature_map_dropout 0.15755300968598318 +65 67 model.embedding_dim 2.0 +65 67 training.batch_size 2.0 +65 67 training.label_smoothing 0.008173125007944393 +65 68 model.output_channels 50.0 +65 68 model.input_dropout 0.19252212279603403 +65 68 model.output_dropout 0.031364653652351415 +65 68 model.feature_map_dropout 0.4452007240945517 +65 68 model.embedding_dim 2.0 +65 68 training.batch_size 1.0 +65 68 training.label_smoothing 0.32919955503799064 +65 69 model.output_channels 35.0 +65 69 model.input_dropout 0.41187318499164044 +65 69 model.output_dropout 0.44779235564368786 +65 69 model.feature_map_dropout 0.40742041493905395 +65 69 model.embedding_dim 2.0 +65 69 training.batch_size 2.0 +65 69 training.label_smoothing 0.09795979566933616 +65 70 model.output_channels 51.0 +65 70 model.input_dropout 0.4375802360926118 +65 70 model.output_dropout 0.19037174930510764 +65 70 model.feature_map_dropout 0.24824404257222804 +65 70 model.embedding_dim 2.0 +65 70 training.batch_size 2.0 +65 70 training.label_smoothing 0.07054567623484592 +65 71 model.output_channels 34.0 +65 71 model.input_dropout 0.44875602777560664 +65 71 model.output_dropout 0.3577610402026846 +65 71 model.feature_map_dropout 0.4522369067064722 +65 71 model.embedding_dim 0.0 +65 71 training.batch_size 0.0 +65 71 training.label_smoothing 0.009823128016322444 +65 72 model.output_channels 30.0 +65 72 model.input_dropout 0.4891360653586265 +65 72 model.output_dropout 0.472163037508711 +65 72 model.feature_map_dropout 0.21976024418388213 +65 72 model.embedding_dim 1.0 +65 72 training.batch_size 2.0 +65 72 training.label_smoothing 0.0061697291440053585 +65 73 model.output_channels 60.0 +65 73 model.input_dropout 0.30433214019707944 +65 73 model.output_dropout 0.19278624360847202 +65 73 model.feature_map_dropout 0.10572905925375953 +65 73 model.embedding_dim 0.0 +65 73 training.batch_size 0.0 +65 73 training.label_smoothing 0.6044875478102928 +65 74 model.output_channels 48.0 +65 74 model.input_dropout 0.1724294789593535 +65 74 model.output_dropout 0.19042030436829005 +65 74 model.feature_map_dropout 0.34275433686465273 +65 74 model.embedding_dim 2.0 +65 74 training.batch_size 2.0 +65 74 training.label_smoothing 0.007088696108075146 +65 75 model.output_channels 20.0 +65 75 model.input_dropout 0.28094616242937565 +65 75 model.output_dropout 0.4931204054363505 +65 75 model.feature_map_dropout 0.3179430770424871 +65 75 model.embedding_dim 0.0 +65 75 training.batch_size 2.0 +65 75 training.label_smoothing 0.04595079004780914 +65 76 model.output_channels 44.0 +65 76 model.input_dropout 0.3350641704995562 +65 76 model.output_dropout 0.39935773707043604 +65 76 model.feature_map_dropout 0.4668086100392499 +65 76 model.embedding_dim 1.0 +65 76 training.batch_size 0.0 +65 76 training.label_smoothing 0.19179618391877964 +65 77 model.output_channels 17.0 +65 77 model.input_dropout 0.0229870449428195 +65 77 model.output_dropout 0.4196290260114757 +65 77 model.feature_map_dropout 0.32288373302493006 +65 77 model.embedding_dim 2.0 +65 77 training.batch_size 0.0 +65 77 training.label_smoothing 0.16436782900214014 +65 78 model.output_channels 18.0 +65 78 model.input_dropout 0.3413063437122853 +65 78 model.output_dropout 0.33333912931833465 +65 78 model.feature_map_dropout 0.23884018201768514 +65 78 model.embedding_dim 0.0 +65 78 training.batch_size 2.0 +65 78 training.label_smoothing 0.03188532051195365 +65 79 model.output_channels 51.0 +65 79 model.input_dropout 0.09500155825495793 +65 79 model.output_dropout 0.12742885337850368 +65 79 model.feature_map_dropout 0.15271680861963394 +65 79 model.embedding_dim 1.0 +65 79 training.batch_size 1.0 +65 79 training.label_smoothing 0.05710846756058042 +65 80 model.output_channels 49.0 +65 80 model.input_dropout 0.33566710738022093 +65 80 model.output_dropout 0.21519507720266506 +65 80 model.feature_map_dropout 0.12560245913158946 +65 80 model.embedding_dim 0.0 +65 80 training.batch_size 2.0 +65 80 training.label_smoothing 0.0029370099651188472 +65 81 model.output_channels 59.0 +65 81 model.input_dropout 0.17526999301960394 +65 81 model.output_dropout 0.4509967874947802 +65 81 model.feature_map_dropout 0.47325748150053604 +65 81 model.embedding_dim 1.0 +65 81 training.batch_size 0.0 +65 81 training.label_smoothing 0.0037895594015615068 +65 82 model.output_channels 33.0 +65 82 model.input_dropout 0.27006764984016485 +65 82 model.output_dropout 0.051601002313931776 +65 82 model.feature_map_dropout 0.12662251892436455 +65 82 model.embedding_dim 1.0 +65 82 training.batch_size 0.0 +65 82 training.label_smoothing 0.027547414127514634 +65 83 model.output_channels 26.0 +65 83 model.input_dropout 0.3837887355182242 +65 83 model.output_dropout 0.40734505404543053 +65 83 model.feature_map_dropout 0.3566815707239522 +65 83 model.embedding_dim 1.0 +65 83 training.batch_size 0.0 +65 83 training.label_smoothing 0.013152947548672371 +65 84 model.output_channels 36.0 +65 84 model.input_dropout 0.17536784528551252 +65 84 model.output_dropout 0.24391524819507815 +65 84 model.feature_map_dropout 0.08943756237748424 +65 84 model.embedding_dim 1.0 +65 84 training.batch_size 1.0 +65 84 training.label_smoothing 0.04396235166463839 +65 85 model.output_channels 17.0 +65 85 model.input_dropout 0.2393826158317987 +65 85 model.output_dropout 0.19601002590994993 +65 85 model.feature_map_dropout 0.1721363933118633 +65 85 model.embedding_dim 0.0 +65 85 training.batch_size 2.0 +65 85 training.label_smoothing 0.028594609076565556 +65 86 model.output_channels 39.0 +65 86 model.input_dropout 0.15387928706162618 +65 86 model.output_dropout 0.2779654043431632 +65 86 model.feature_map_dropout 0.08649510212905792 +65 86 model.embedding_dim 0.0 +65 86 training.batch_size 0.0 +65 86 training.label_smoothing 0.19448906433271612 +65 87 model.output_channels 64.0 +65 87 model.input_dropout 0.08997356716969801 +65 87 model.output_dropout 0.20786647376083361 +65 87 model.feature_map_dropout 0.3870586211916569 +65 87 model.embedding_dim 1.0 +65 87 training.batch_size 1.0 +65 87 training.label_smoothing 0.13129133458531933 +65 88 model.output_channels 19.0 +65 88 model.input_dropout 0.34426949003041335 +65 88 model.output_dropout 0.3391128240606971 +65 88 model.feature_map_dropout 0.24956184299226303 +65 88 model.embedding_dim 1.0 +65 88 training.batch_size 1.0 +65 88 training.label_smoothing 0.001127736895715043 +65 89 model.output_channels 34.0 +65 89 model.input_dropout 0.05550835837871115 +65 89 model.output_dropout 0.15014797532139523 +65 89 model.feature_map_dropout 0.2153334401559307 +65 89 model.embedding_dim 2.0 +65 89 training.batch_size 0.0 +65 89 training.label_smoothing 0.00315292185631313 +65 90 model.output_channels 61.0 +65 90 model.input_dropout 0.4832391487905214 +65 90 model.output_dropout 0.14550212903280224 +65 90 model.feature_map_dropout 0.3482953028256516 +65 90 model.embedding_dim 1.0 +65 90 training.batch_size 2.0 +65 90 training.label_smoothing 0.0020798775519183803 +65 91 model.output_channels 44.0 +65 91 model.input_dropout 0.24075134423516037 +65 91 model.output_dropout 0.1188690898907756 +65 91 model.feature_map_dropout 0.38552425977739585 +65 91 model.embedding_dim 2.0 +65 91 training.batch_size 0.0 +65 91 training.label_smoothing 0.006625808732946055 +65 92 model.output_channels 30.0 +65 92 model.input_dropout 0.2282564836429451 +65 92 model.output_dropout 0.2199948217429838 +65 92 model.feature_map_dropout 0.18132647020608983 +65 92 model.embedding_dim 0.0 +65 92 training.batch_size 1.0 +65 92 training.label_smoothing 0.0055674402529782676 +65 93 model.output_channels 38.0 +65 93 model.input_dropout 0.15670726186449274 +65 93 model.output_dropout 0.3565002505844434 +65 93 model.feature_map_dropout 0.3101903915396584 +65 93 model.embedding_dim 2.0 +65 93 training.batch_size 0.0 +65 93 training.label_smoothing 0.08931874812591913 +65 94 model.output_channels 31.0 +65 94 model.input_dropout 0.3249269087608381 +65 94 model.output_dropout 0.18604738123865533 +65 94 model.feature_map_dropout 0.10472341374472327 +65 94 model.embedding_dim 0.0 +65 94 training.batch_size 1.0 +65 94 training.label_smoothing 0.0015285034976729101 +65 95 model.output_channels 44.0 +65 95 model.input_dropout 0.4864935336774559 +65 95 model.output_dropout 0.2539956292377931 +65 95 model.feature_map_dropout 0.24405651667558842 +65 95 model.embedding_dim 0.0 +65 95 training.batch_size 0.0 +65 95 training.label_smoothing 0.04995079460288994 +65 96 model.output_channels 24.0 +65 96 model.input_dropout 0.03347196244192441 +65 96 model.output_dropout 0.36438384668351537 +65 96 model.feature_map_dropout 0.4541344603492656 +65 96 model.embedding_dim 0.0 +65 96 training.batch_size 2.0 +65 96 training.label_smoothing 0.001052677587709341 +65 97 model.output_channels 41.0 +65 97 model.input_dropout 0.16552913476691622 +65 97 model.output_dropout 0.4022736361998491 +65 97 model.feature_map_dropout 0.02154672998460727 +65 97 model.embedding_dim 2.0 +65 97 training.batch_size 1.0 +65 97 training.label_smoothing 0.5996026678102734 +65 98 model.output_channels 63.0 +65 98 model.input_dropout 0.25651957139454357 +65 98 model.output_dropout 0.02899840727618741 +65 98 model.feature_map_dropout 0.09371186474677162 +65 98 model.embedding_dim 1.0 +65 98 training.batch_size 1.0 +65 98 training.label_smoothing 0.07410536962418811 +65 99 model.output_channels 37.0 +65 99 model.input_dropout 0.004823797662163565 +65 99 model.output_dropout 0.2830978561346502 +65 99 model.feature_map_dropout 0.4700443955885017 +65 99 model.embedding_dim 2.0 +65 99 training.batch_size 2.0 +65 99 training.label_smoothing 0.0034933502171439037 +65 100 model.output_channels 40.0 +65 100 model.input_dropout 0.34358005630153465 +65 100 model.output_dropout 0.4294048506237142 +65 100 model.feature_map_dropout 0.09987298085545404 +65 100 model.embedding_dim 1.0 +65 100 training.batch_size 1.0 +65 100 training.label_smoothing 0.04238584714237026 +65 1 dataset """kinships""" +65 1 model """conve""" +65 1 loss """crossentropy""" +65 1 regularizer """no""" +65 1 optimizer """adadelta""" +65 1 training_loop """lcwa""" +65 1 evaluator """rankbased""" +65 2 dataset """kinships""" +65 2 model """conve""" +65 2 loss """crossentropy""" +65 2 regularizer """no""" +65 2 optimizer """adadelta""" +65 2 training_loop """lcwa""" +65 2 evaluator """rankbased""" +65 3 dataset """kinships""" +65 3 model """conve""" +65 3 loss """crossentropy""" +65 3 regularizer """no""" +65 3 optimizer """adadelta""" +65 3 training_loop """lcwa""" +65 3 evaluator """rankbased""" +65 4 dataset """kinships""" +65 4 model """conve""" +65 4 loss """crossentropy""" +65 4 regularizer """no""" +65 4 optimizer """adadelta""" +65 4 training_loop """lcwa""" +65 4 evaluator """rankbased""" +65 5 dataset """kinships""" +65 5 model """conve""" +65 5 loss """crossentropy""" +65 5 regularizer """no""" +65 5 optimizer """adadelta""" +65 5 training_loop """lcwa""" +65 5 evaluator """rankbased""" +65 6 dataset """kinships""" +65 6 model """conve""" +65 6 loss """crossentropy""" +65 6 regularizer """no""" +65 6 optimizer """adadelta""" +65 6 training_loop """lcwa""" +65 6 evaluator """rankbased""" +65 7 dataset """kinships""" +65 7 model """conve""" +65 7 loss """crossentropy""" +65 7 regularizer """no""" +65 7 optimizer """adadelta""" +65 7 training_loop """lcwa""" +65 7 evaluator """rankbased""" +65 8 dataset """kinships""" +65 8 model """conve""" +65 8 loss """crossentropy""" +65 8 regularizer """no""" +65 8 optimizer """adadelta""" +65 8 training_loop """lcwa""" +65 8 evaluator """rankbased""" +65 9 dataset """kinships""" +65 9 model """conve""" +65 9 loss """crossentropy""" +65 9 regularizer """no""" +65 9 optimizer """adadelta""" +65 9 training_loop """lcwa""" +65 9 evaluator """rankbased""" +65 10 dataset """kinships""" +65 10 model """conve""" +65 10 loss """crossentropy""" +65 10 regularizer """no""" +65 10 optimizer """adadelta""" +65 10 training_loop """lcwa""" +65 10 evaluator """rankbased""" +65 11 dataset """kinships""" +65 11 model """conve""" +65 11 loss """crossentropy""" +65 11 regularizer """no""" +65 11 optimizer """adadelta""" +65 11 training_loop """lcwa""" +65 11 evaluator """rankbased""" +65 12 dataset """kinships""" +65 12 model """conve""" +65 12 loss """crossentropy""" +65 12 regularizer """no""" +65 12 optimizer """adadelta""" +65 12 training_loop """lcwa""" +65 12 evaluator """rankbased""" +65 13 dataset """kinships""" +65 13 model """conve""" +65 13 loss """crossentropy""" +65 13 regularizer """no""" +65 13 optimizer """adadelta""" +65 13 training_loop """lcwa""" +65 13 evaluator """rankbased""" +65 14 dataset """kinships""" +65 14 model """conve""" +65 14 loss """crossentropy""" +65 14 regularizer """no""" +65 14 optimizer """adadelta""" +65 14 training_loop """lcwa""" +65 14 evaluator """rankbased""" +65 15 dataset """kinships""" +65 15 model """conve""" +65 15 loss """crossentropy""" +65 15 regularizer """no""" +65 15 optimizer """adadelta""" +65 15 training_loop """lcwa""" +65 15 evaluator """rankbased""" +65 16 dataset """kinships""" +65 16 model """conve""" +65 16 loss """crossentropy""" +65 16 regularizer """no""" +65 16 optimizer """adadelta""" +65 16 training_loop """lcwa""" +65 16 evaluator """rankbased""" +65 17 dataset """kinships""" +65 17 model """conve""" +65 17 loss """crossentropy""" +65 17 regularizer """no""" +65 17 optimizer """adadelta""" +65 17 training_loop """lcwa""" +65 17 evaluator """rankbased""" +65 18 dataset """kinships""" +65 18 model """conve""" +65 18 loss """crossentropy""" +65 18 regularizer """no""" +65 18 optimizer """adadelta""" +65 18 training_loop """lcwa""" +65 18 evaluator """rankbased""" +65 19 dataset """kinships""" +65 19 model """conve""" +65 19 loss """crossentropy""" +65 19 regularizer """no""" +65 19 optimizer """adadelta""" +65 19 training_loop """lcwa""" +65 19 evaluator """rankbased""" +65 20 dataset """kinships""" +65 20 model """conve""" +65 20 loss """crossentropy""" +65 20 regularizer """no""" +65 20 optimizer """adadelta""" +65 20 training_loop """lcwa""" +65 20 evaluator """rankbased""" +65 21 dataset """kinships""" +65 21 model """conve""" +65 21 loss """crossentropy""" +65 21 regularizer """no""" +65 21 optimizer """adadelta""" +65 21 training_loop """lcwa""" +65 21 evaluator """rankbased""" +65 22 dataset """kinships""" +65 22 model """conve""" +65 22 loss """crossentropy""" +65 22 regularizer """no""" +65 22 optimizer """adadelta""" +65 22 training_loop """lcwa""" +65 22 evaluator """rankbased""" +65 23 dataset """kinships""" +65 23 model """conve""" +65 23 loss """crossentropy""" +65 23 regularizer """no""" +65 23 optimizer """adadelta""" +65 23 training_loop """lcwa""" +65 23 evaluator """rankbased""" +65 24 dataset """kinships""" +65 24 model """conve""" +65 24 loss """crossentropy""" +65 24 regularizer """no""" +65 24 optimizer """adadelta""" +65 24 training_loop """lcwa""" +65 24 evaluator """rankbased""" +65 25 dataset """kinships""" +65 25 model """conve""" +65 25 loss """crossentropy""" +65 25 regularizer """no""" +65 25 optimizer """adadelta""" +65 25 training_loop """lcwa""" +65 25 evaluator """rankbased""" +65 26 dataset """kinships""" +65 26 model """conve""" +65 26 loss """crossentropy""" +65 26 regularizer """no""" +65 26 optimizer """adadelta""" +65 26 training_loop """lcwa""" +65 26 evaluator """rankbased""" +65 27 dataset """kinships""" +65 27 model """conve""" +65 27 loss """crossentropy""" +65 27 regularizer """no""" +65 27 optimizer """adadelta""" +65 27 training_loop """lcwa""" +65 27 evaluator """rankbased""" +65 28 dataset """kinships""" +65 28 model """conve""" +65 28 loss """crossentropy""" +65 28 regularizer """no""" +65 28 optimizer """adadelta""" +65 28 training_loop """lcwa""" +65 28 evaluator """rankbased""" +65 29 dataset """kinships""" +65 29 model """conve""" +65 29 loss """crossentropy""" +65 29 regularizer """no""" +65 29 optimizer """adadelta""" +65 29 training_loop """lcwa""" +65 29 evaluator """rankbased""" +65 30 dataset """kinships""" +65 30 model """conve""" +65 30 loss """crossentropy""" +65 30 regularizer """no""" +65 30 optimizer """adadelta""" +65 30 training_loop """lcwa""" +65 30 evaluator """rankbased""" +65 31 dataset """kinships""" +65 31 model """conve""" +65 31 loss """crossentropy""" +65 31 regularizer """no""" +65 31 optimizer """adadelta""" +65 31 training_loop """lcwa""" +65 31 evaluator """rankbased""" +65 32 dataset """kinships""" +65 32 model """conve""" +65 32 loss """crossentropy""" +65 32 regularizer """no""" +65 32 optimizer """adadelta""" +65 32 training_loop """lcwa""" +65 32 evaluator """rankbased""" +65 33 dataset """kinships""" +65 33 model """conve""" +65 33 loss """crossentropy""" +65 33 regularizer """no""" +65 33 optimizer """adadelta""" +65 33 training_loop """lcwa""" +65 33 evaluator """rankbased""" +65 34 dataset """kinships""" +65 34 model """conve""" +65 34 loss """crossentropy""" +65 34 regularizer """no""" +65 34 optimizer """adadelta""" +65 34 training_loop """lcwa""" +65 34 evaluator """rankbased""" +65 35 dataset """kinships""" +65 35 model """conve""" +65 35 loss """crossentropy""" +65 35 regularizer """no""" +65 35 optimizer """adadelta""" +65 35 training_loop """lcwa""" +65 35 evaluator """rankbased""" +65 36 dataset """kinships""" +65 36 model """conve""" +65 36 loss """crossentropy""" +65 36 regularizer """no""" +65 36 optimizer """adadelta""" +65 36 training_loop """lcwa""" +65 36 evaluator """rankbased""" +65 37 dataset """kinships""" +65 37 model """conve""" +65 37 loss """crossentropy""" +65 37 regularizer """no""" +65 37 optimizer """adadelta""" +65 37 training_loop """lcwa""" +65 37 evaluator """rankbased""" +65 38 dataset """kinships""" +65 38 model """conve""" +65 38 loss """crossentropy""" +65 38 regularizer """no""" +65 38 optimizer """adadelta""" +65 38 training_loop """lcwa""" +65 38 evaluator """rankbased""" +65 39 dataset """kinships""" +65 39 model """conve""" +65 39 loss """crossentropy""" +65 39 regularizer """no""" +65 39 optimizer """adadelta""" +65 39 training_loop """lcwa""" +65 39 evaluator """rankbased""" +65 40 dataset """kinships""" +65 40 model """conve""" +65 40 loss """crossentropy""" +65 40 regularizer """no""" +65 40 optimizer """adadelta""" +65 40 training_loop """lcwa""" +65 40 evaluator """rankbased""" +65 41 dataset """kinships""" +65 41 model """conve""" +65 41 loss """crossentropy""" +65 41 regularizer """no""" +65 41 optimizer """adadelta""" +65 41 training_loop """lcwa""" +65 41 evaluator """rankbased""" +65 42 dataset """kinships""" +65 42 model """conve""" +65 42 loss """crossentropy""" +65 42 regularizer """no""" +65 42 optimizer """adadelta""" +65 42 training_loop """lcwa""" +65 42 evaluator """rankbased""" +65 43 dataset """kinships""" +65 43 model """conve""" +65 43 loss """crossentropy""" +65 43 regularizer """no""" +65 43 optimizer """adadelta""" +65 43 training_loop """lcwa""" +65 43 evaluator """rankbased""" +65 44 dataset """kinships""" +65 44 model """conve""" +65 44 loss """crossentropy""" +65 44 regularizer """no""" +65 44 optimizer """adadelta""" +65 44 training_loop """lcwa""" +65 44 evaluator """rankbased""" +65 45 dataset """kinships""" +65 45 model """conve""" +65 45 loss """crossentropy""" +65 45 regularizer """no""" +65 45 optimizer """adadelta""" +65 45 training_loop """lcwa""" +65 45 evaluator """rankbased""" +65 46 dataset """kinships""" +65 46 model """conve""" +65 46 loss """crossentropy""" +65 46 regularizer """no""" +65 46 optimizer """adadelta""" +65 46 training_loop """lcwa""" +65 46 evaluator """rankbased""" +65 47 dataset """kinships""" +65 47 model """conve""" +65 47 loss """crossentropy""" +65 47 regularizer """no""" +65 47 optimizer """adadelta""" +65 47 training_loop """lcwa""" +65 47 evaluator """rankbased""" +65 48 dataset """kinships""" +65 48 model """conve""" +65 48 loss """crossentropy""" +65 48 regularizer """no""" +65 48 optimizer """adadelta""" +65 48 training_loop """lcwa""" +65 48 evaluator """rankbased""" +65 49 dataset """kinships""" +65 49 model """conve""" +65 49 loss """crossentropy""" +65 49 regularizer """no""" +65 49 optimizer """adadelta""" +65 49 training_loop """lcwa""" +65 49 evaluator """rankbased""" +65 50 dataset """kinships""" +65 50 model """conve""" +65 50 loss """crossentropy""" +65 50 regularizer """no""" +65 50 optimizer """adadelta""" +65 50 training_loop """lcwa""" +65 50 evaluator """rankbased""" +65 51 dataset """kinships""" +65 51 model """conve""" +65 51 loss """crossentropy""" +65 51 regularizer """no""" +65 51 optimizer """adadelta""" +65 51 training_loop """lcwa""" +65 51 evaluator """rankbased""" +65 52 dataset """kinships""" +65 52 model """conve""" +65 52 loss """crossentropy""" +65 52 regularizer """no""" +65 52 optimizer """adadelta""" +65 52 training_loop """lcwa""" +65 52 evaluator """rankbased""" +65 53 dataset """kinships""" +65 53 model """conve""" +65 53 loss """crossentropy""" +65 53 regularizer """no""" +65 53 optimizer """adadelta""" +65 53 training_loop """lcwa""" +65 53 evaluator """rankbased""" +65 54 dataset """kinships""" +65 54 model """conve""" +65 54 loss """crossentropy""" +65 54 regularizer """no""" +65 54 optimizer """adadelta""" +65 54 training_loop """lcwa""" +65 54 evaluator """rankbased""" +65 55 dataset """kinships""" +65 55 model """conve""" +65 55 loss """crossentropy""" +65 55 regularizer """no""" +65 55 optimizer """adadelta""" +65 55 training_loop """lcwa""" +65 55 evaluator """rankbased""" +65 56 dataset """kinships""" +65 56 model """conve""" +65 56 loss """crossentropy""" +65 56 regularizer """no""" +65 56 optimizer """adadelta""" +65 56 training_loop """lcwa""" +65 56 evaluator """rankbased""" +65 57 dataset """kinships""" +65 57 model """conve""" +65 57 loss """crossentropy""" +65 57 regularizer """no""" +65 57 optimizer """adadelta""" +65 57 training_loop """lcwa""" +65 57 evaluator """rankbased""" +65 58 dataset """kinships""" +65 58 model """conve""" +65 58 loss """crossentropy""" +65 58 regularizer """no""" +65 58 optimizer """adadelta""" +65 58 training_loop """lcwa""" +65 58 evaluator """rankbased""" +65 59 dataset """kinships""" +65 59 model """conve""" +65 59 loss """crossentropy""" +65 59 regularizer """no""" +65 59 optimizer """adadelta""" +65 59 training_loop """lcwa""" +65 59 evaluator """rankbased""" +65 60 dataset """kinships""" +65 60 model """conve""" +65 60 loss """crossentropy""" +65 60 regularizer """no""" +65 60 optimizer """adadelta""" +65 60 training_loop """lcwa""" +65 60 evaluator """rankbased""" +65 61 dataset """kinships""" +65 61 model """conve""" +65 61 loss """crossentropy""" +65 61 regularizer """no""" +65 61 optimizer """adadelta""" +65 61 training_loop """lcwa""" +65 61 evaluator """rankbased""" +65 62 dataset """kinships""" +65 62 model """conve""" +65 62 loss """crossentropy""" +65 62 regularizer """no""" +65 62 optimizer """adadelta""" +65 62 training_loop """lcwa""" +65 62 evaluator """rankbased""" +65 63 dataset """kinships""" +65 63 model """conve""" +65 63 loss """crossentropy""" +65 63 regularizer """no""" +65 63 optimizer """adadelta""" +65 63 training_loop """lcwa""" +65 63 evaluator """rankbased""" +65 64 dataset """kinships""" +65 64 model """conve""" +65 64 loss """crossentropy""" +65 64 regularizer """no""" +65 64 optimizer """adadelta""" +65 64 training_loop """lcwa""" +65 64 evaluator """rankbased""" +65 65 dataset """kinships""" +65 65 model """conve""" +65 65 loss """crossentropy""" +65 65 regularizer """no""" +65 65 optimizer """adadelta""" +65 65 training_loop """lcwa""" +65 65 evaluator """rankbased""" +65 66 dataset """kinships""" +65 66 model """conve""" +65 66 loss """crossentropy""" +65 66 regularizer """no""" +65 66 optimizer """adadelta""" +65 66 training_loop """lcwa""" +65 66 evaluator """rankbased""" +65 67 dataset """kinships""" +65 67 model """conve""" +65 67 loss """crossentropy""" +65 67 regularizer """no""" +65 67 optimizer """adadelta""" +65 67 training_loop """lcwa""" +65 67 evaluator """rankbased""" +65 68 dataset """kinships""" +65 68 model """conve""" +65 68 loss """crossentropy""" +65 68 regularizer """no""" +65 68 optimizer """adadelta""" +65 68 training_loop """lcwa""" +65 68 evaluator """rankbased""" +65 69 dataset """kinships""" +65 69 model """conve""" +65 69 loss """crossentropy""" +65 69 regularizer """no""" +65 69 optimizer """adadelta""" +65 69 training_loop """lcwa""" +65 69 evaluator """rankbased""" +65 70 dataset """kinships""" +65 70 model """conve""" +65 70 loss """crossentropy""" +65 70 regularizer """no""" +65 70 optimizer """adadelta""" +65 70 training_loop """lcwa""" +65 70 evaluator """rankbased""" +65 71 dataset """kinships""" +65 71 model """conve""" +65 71 loss """crossentropy""" +65 71 regularizer """no""" +65 71 optimizer """adadelta""" +65 71 training_loop """lcwa""" +65 71 evaluator """rankbased""" +65 72 dataset """kinships""" +65 72 model """conve""" +65 72 loss """crossentropy""" +65 72 regularizer """no""" +65 72 optimizer """adadelta""" +65 72 training_loop """lcwa""" +65 72 evaluator """rankbased""" +65 73 dataset """kinships""" +65 73 model """conve""" +65 73 loss """crossentropy""" +65 73 regularizer """no""" +65 73 optimizer """adadelta""" +65 73 training_loop """lcwa""" +65 73 evaluator """rankbased""" +65 74 dataset """kinships""" +65 74 model """conve""" +65 74 loss """crossentropy""" +65 74 regularizer """no""" +65 74 optimizer """adadelta""" +65 74 training_loop """lcwa""" +65 74 evaluator """rankbased""" +65 75 dataset """kinships""" +65 75 model """conve""" +65 75 loss """crossentropy""" +65 75 regularizer """no""" +65 75 optimizer """adadelta""" +65 75 training_loop """lcwa""" +65 75 evaluator """rankbased""" +65 76 dataset """kinships""" +65 76 model """conve""" +65 76 loss """crossentropy""" +65 76 regularizer """no""" +65 76 optimizer """adadelta""" +65 76 training_loop """lcwa""" +65 76 evaluator """rankbased""" +65 77 dataset """kinships""" +65 77 model """conve""" +65 77 loss """crossentropy""" +65 77 regularizer """no""" +65 77 optimizer """adadelta""" +65 77 training_loop """lcwa""" +65 77 evaluator """rankbased""" +65 78 dataset """kinships""" +65 78 model """conve""" +65 78 loss """crossentropy""" +65 78 regularizer """no""" +65 78 optimizer """adadelta""" +65 78 training_loop """lcwa""" +65 78 evaluator """rankbased""" +65 79 dataset """kinships""" +65 79 model """conve""" +65 79 loss """crossentropy""" +65 79 regularizer """no""" +65 79 optimizer """adadelta""" +65 79 training_loop """lcwa""" +65 79 evaluator """rankbased""" +65 80 dataset """kinships""" +65 80 model """conve""" +65 80 loss """crossentropy""" +65 80 regularizer """no""" +65 80 optimizer """adadelta""" +65 80 training_loop """lcwa""" +65 80 evaluator """rankbased""" +65 81 dataset """kinships""" +65 81 model """conve""" +65 81 loss """crossentropy""" +65 81 regularizer """no""" +65 81 optimizer """adadelta""" +65 81 training_loop """lcwa""" +65 81 evaluator """rankbased""" +65 82 dataset """kinships""" +65 82 model """conve""" +65 82 loss """crossentropy""" +65 82 regularizer """no""" +65 82 optimizer """adadelta""" +65 82 training_loop """lcwa""" +65 82 evaluator """rankbased""" +65 83 dataset """kinships""" +65 83 model """conve""" +65 83 loss """crossentropy""" +65 83 regularizer """no""" +65 83 optimizer """adadelta""" +65 83 training_loop """lcwa""" +65 83 evaluator """rankbased""" +65 84 dataset """kinships""" +65 84 model """conve""" +65 84 loss """crossentropy""" +65 84 regularizer """no""" +65 84 optimizer """adadelta""" +65 84 training_loop """lcwa""" +65 84 evaluator """rankbased""" +65 85 dataset """kinships""" +65 85 model """conve""" +65 85 loss """crossentropy""" +65 85 regularizer """no""" +65 85 optimizer """adadelta""" +65 85 training_loop """lcwa""" +65 85 evaluator """rankbased""" +65 86 dataset """kinships""" +65 86 model """conve""" +65 86 loss """crossentropy""" +65 86 regularizer """no""" +65 86 optimizer """adadelta""" +65 86 training_loop """lcwa""" +65 86 evaluator """rankbased""" +65 87 dataset """kinships""" +65 87 model """conve""" +65 87 loss """crossentropy""" +65 87 regularizer """no""" +65 87 optimizer """adadelta""" +65 87 training_loop """lcwa""" +65 87 evaluator """rankbased""" +65 88 dataset """kinships""" +65 88 model """conve""" +65 88 loss """crossentropy""" +65 88 regularizer """no""" +65 88 optimizer """adadelta""" +65 88 training_loop """lcwa""" +65 88 evaluator """rankbased""" +65 89 dataset """kinships""" +65 89 model """conve""" +65 89 loss """crossentropy""" +65 89 regularizer """no""" +65 89 optimizer """adadelta""" +65 89 training_loop """lcwa""" +65 89 evaluator """rankbased""" +65 90 dataset """kinships""" +65 90 model """conve""" +65 90 loss """crossentropy""" +65 90 regularizer """no""" +65 90 optimizer """adadelta""" +65 90 training_loop """lcwa""" +65 90 evaluator """rankbased""" +65 91 dataset """kinships""" +65 91 model """conve""" +65 91 loss """crossentropy""" +65 91 regularizer """no""" +65 91 optimizer """adadelta""" +65 91 training_loop """lcwa""" +65 91 evaluator """rankbased""" +65 92 dataset """kinships""" +65 92 model """conve""" +65 92 loss """crossentropy""" +65 92 regularizer """no""" +65 92 optimizer """adadelta""" +65 92 training_loop """lcwa""" +65 92 evaluator """rankbased""" +65 93 dataset """kinships""" +65 93 model """conve""" +65 93 loss """crossentropy""" +65 93 regularizer """no""" +65 93 optimizer """adadelta""" +65 93 training_loop """lcwa""" +65 93 evaluator """rankbased""" +65 94 dataset """kinships""" +65 94 model """conve""" +65 94 loss """crossentropy""" +65 94 regularizer """no""" +65 94 optimizer """adadelta""" +65 94 training_loop """lcwa""" +65 94 evaluator """rankbased""" +65 95 dataset """kinships""" +65 95 model """conve""" +65 95 loss """crossentropy""" +65 95 regularizer """no""" +65 95 optimizer """adadelta""" +65 95 training_loop """lcwa""" +65 95 evaluator """rankbased""" +65 96 dataset """kinships""" +65 96 model """conve""" +65 96 loss """crossentropy""" +65 96 regularizer """no""" +65 96 optimizer """adadelta""" +65 96 training_loop """lcwa""" +65 96 evaluator """rankbased""" +65 97 dataset """kinships""" +65 97 model """conve""" +65 97 loss """crossentropy""" +65 97 regularizer """no""" +65 97 optimizer """adadelta""" +65 97 training_loop """lcwa""" +65 97 evaluator """rankbased""" +65 98 dataset """kinships""" +65 98 model """conve""" +65 98 loss """crossentropy""" +65 98 regularizer """no""" +65 98 optimizer """adadelta""" +65 98 training_loop """lcwa""" +65 98 evaluator """rankbased""" +65 99 dataset """kinships""" +65 99 model """conve""" +65 99 loss """crossentropy""" +65 99 regularizer """no""" +65 99 optimizer """adadelta""" +65 99 training_loop """lcwa""" +65 99 evaluator """rankbased""" +65 100 dataset """kinships""" +65 100 model """conve""" +65 100 loss """crossentropy""" +65 100 regularizer """no""" +65 100 optimizer """adadelta""" +65 100 training_loop """lcwa""" +65 100 evaluator """rankbased""" +66 1 model.output_channels 18.0 +66 1 model.input_dropout 0.4992635700781148 +66 1 model.output_dropout 0.21602976901819854 +66 1 model.feature_map_dropout 0.35432833205264325 +66 1 model.embedding_dim 0.0 +66 1 loss.margin 6.784036920780925 +66 1 negative_sampler.num_negs_per_pos 2.0 +66 1 training.batch_size 2.0 +66 2 model.output_channels 38.0 +66 2 model.input_dropout 0.46876923458473924 +66 2 model.output_dropout 0.1618411998699798 +66 2 model.feature_map_dropout 0.4640218918520602 +66 2 model.embedding_dim 2.0 +66 2 loss.margin 3.9566954887939683 +66 2 negative_sampler.num_negs_per_pos 27.0 +66 2 training.batch_size 2.0 +66 3 model.output_channels 57.0 +66 3 model.input_dropout 0.04141637451988606 +66 3 model.output_dropout 0.3144684884670363 +66 3 model.feature_map_dropout 0.3847387226735873 +66 3 model.embedding_dim 0.0 +66 3 loss.margin 3.609826904362965 +66 3 negative_sampler.num_negs_per_pos 44.0 +66 3 training.batch_size 2.0 +66 4 model.output_channels 56.0 +66 4 model.input_dropout 0.49175351369651854 +66 4 model.output_dropout 0.15860823330626367 +66 4 model.feature_map_dropout 0.12812420775429356 +66 4 model.embedding_dim 2.0 +66 4 loss.margin 0.6890510868560626 +66 4 negative_sampler.num_negs_per_pos 35.0 +66 4 training.batch_size 0.0 +66 5 model.output_channels 44.0 +66 5 model.input_dropout 0.4611866650458954 +66 5 model.output_dropout 0.2821184160427845 +66 5 model.feature_map_dropout 0.062044620627040026 +66 5 model.embedding_dim 1.0 +66 5 loss.margin 1.7131052694551618 +66 5 negative_sampler.num_negs_per_pos 0.0 +66 5 training.batch_size 0.0 +66 6 model.output_channels 16.0 +66 6 model.input_dropout 0.4127397350012917 +66 6 model.output_dropout 0.2695194756318921 +66 6 model.feature_map_dropout 0.31329190180634053 +66 6 model.embedding_dim 2.0 +66 6 loss.margin 2.723332103150074 +66 6 negative_sampler.num_negs_per_pos 70.0 +66 6 training.batch_size 0.0 +66 7 model.output_channels 26.0 +66 7 model.input_dropout 0.05018772123037235 +66 7 model.output_dropout 0.47674042896156027 +66 7 model.feature_map_dropout 0.2560317954173481 +66 7 model.embedding_dim 2.0 +66 7 loss.margin 4.8782393093119465 +66 7 negative_sampler.num_negs_per_pos 21.0 +66 7 training.batch_size 1.0 +66 8 model.output_channels 23.0 +66 8 model.input_dropout 0.3817813648876998 +66 8 model.output_dropout 0.2901438795503498 +66 8 model.feature_map_dropout 0.14997756195853584 +66 8 model.embedding_dim 2.0 +66 8 loss.margin 6.2560074228182225 +66 8 negative_sampler.num_negs_per_pos 80.0 +66 8 training.batch_size 0.0 +66 9 model.output_channels 31.0 +66 9 model.input_dropout 0.396467058111429 +66 9 model.output_dropout 0.17404197757658463 +66 9 model.feature_map_dropout 0.44556778194074076 +66 9 model.embedding_dim 1.0 +66 9 loss.margin 3.651336452044701 +66 9 negative_sampler.num_negs_per_pos 91.0 +66 9 training.batch_size 2.0 +66 10 model.output_channels 45.0 +66 10 model.input_dropout 0.026793048113310958 +66 10 model.output_dropout 0.46206111980944087 +66 10 model.feature_map_dropout 0.3927630645828381 +66 10 model.embedding_dim 1.0 +66 10 loss.margin 0.7012917222685906 +66 10 negative_sampler.num_negs_per_pos 15.0 +66 10 training.batch_size 0.0 +66 11 model.output_channels 54.0 +66 11 model.input_dropout 0.2936847471391564 +66 11 model.output_dropout 0.2908667687774911 +66 11 model.feature_map_dropout 0.010546769261019218 +66 11 model.embedding_dim 0.0 +66 11 loss.margin 6.71341741527333 +66 11 negative_sampler.num_negs_per_pos 53.0 +66 11 training.batch_size 0.0 +66 12 model.output_channels 46.0 +66 12 model.input_dropout 0.489551708154683 +66 12 model.output_dropout 0.0722032713258477 +66 12 model.feature_map_dropout 0.3314046566665853 +66 12 model.embedding_dim 2.0 +66 12 loss.margin 2.5249703445105043 +66 12 negative_sampler.num_negs_per_pos 41.0 +66 12 training.batch_size 1.0 +66 13 model.output_channels 50.0 +66 13 model.input_dropout 0.006032410883421657 +66 13 model.output_dropout 0.1456245832192533 +66 13 model.feature_map_dropout 0.0646984889573115 +66 13 model.embedding_dim 2.0 +66 13 loss.margin 3.273547733192549 +66 13 negative_sampler.num_negs_per_pos 48.0 +66 13 training.batch_size 0.0 +66 14 model.output_channels 39.0 +66 14 model.input_dropout 0.01163051290360373 +66 14 model.output_dropout 0.41598289422410045 +66 14 model.feature_map_dropout 0.44477095836484676 +66 14 model.embedding_dim 1.0 +66 14 loss.margin 4.223590559632651 +66 14 negative_sampler.num_negs_per_pos 13.0 +66 14 training.batch_size 1.0 +66 15 model.output_channels 17.0 +66 15 model.input_dropout 0.2840386026593866 +66 15 model.output_dropout 0.2344867194281514 +66 15 model.feature_map_dropout 0.23992763789876376 +66 15 model.embedding_dim 2.0 +66 15 loss.margin 6.656293463289489 +66 15 negative_sampler.num_negs_per_pos 32.0 +66 15 training.batch_size 1.0 +66 16 model.output_channels 40.0 +66 16 model.input_dropout 0.06603754479895668 +66 16 model.output_dropout 0.10977450335129463 +66 16 model.feature_map_dropout 0.4099449368867644 +66 16 model.embedding_dim 2.0 +66 16 loss.margin 6.372148827277944 +66 16 negative_sampler.num_negs_per_pos 6.0 +66 16 training.batch_size 1.0 +66 17 model.output_channels 23.0 +66 17 model.input_dropout 0.27515040592735873 +66 17 model.output_dropout 0.05810288503972405 +66 17 model.feature_map_dropout 0.3514537113109383 +66 17 model.embedding_dim 1.0 +66 17 loss.margin 1.8352620629643486 +66 17 negative_sampler.num_negs_per_pos 85.0 +66 17 training.batch_size 2.0 +66 18 model.output_channels 34.0 +66 18 model.input_dropout 0.3802759163099473 +66 18 model.output_dropout 0.32063453986980606 +66 18 model.feature_map_dropout 0.32864188978152725 +66 18 model.embedding_dim 2.0 +66 18 loss.margin 8.335977279958907 +66 18 negative_sampler.num_negs_per_pos 31.0 +66 18 training.batch_size 1.0 +66 19 model.output_channels 55.0 +66 19 model.input_dropout 0.022495846895496274 +66 19 model.output_dropout 0.20294464809830848 +66 19 model.feature_map_dropout 0.30999681047955613 +66 19 model.embedding_dim 2.0 +66 19 loss.margin 2.785931112026678 +66 19 negative_sampler.num_negs_per_pos 27.0 +66 19 training.batch_size 0.0 +66 20 model.output_channels 44.0 +66 20 model.input_dropout 0.07822471316399843 +66 20 model.output_dropout 0.4997247824295475 +66 20 model.feature_map_dropout 0.4838754870153326 +66 20 model.embedding_dim 1.0 +66 20 loss.margin 2.810112063497323 +66 20 negative_sampler.num_negs_per_pos 4.0 +66 20 training.batch_size 0.0 +66 21 model.output_channels 60.0 +66 21 model.input_dropout 0.41798432088066834 +66 21 model.output_dropout 0.12644038627977638 +66 21 model.feature_map_dropout 0.09355529784561861 +66 21 model.embedding_dim 0.0 +66 21 loss.margin 4.537828070397886 +66 21 negative_sampler.num_negs_per_pos 90.0 +66 21 training.batch_size 2.0 +66 22 model.output_channels 57.0 +66 22 model.input_dropout 0.39805345075937487 +66 22 model.output_dropout 0.030526921543654884 +66 22 model.feature_map_dropout 0.23732836932345563 +66 22 model.embedding_dim 1.0 +66 22 loss.margin 8.631433764025827 +66 22 negative_sampler.num_negs_per_pos 69.0 +66 22 training.batch_size 0.0 +66 23 model.output_channels 52.0 +66 23 model.input_dropout 0.44752247311066434 +66 23 model.output_dropout 0.37655517610822015 +66 23 model.feature_map_dropout 0.13032594091118055 +66 23 model.embedding_dim 2.0 +66 23 loss.margin 4.680253566483734 +66 23 negative_sampler.num_negs_per_pos 33.0 +66 23 training.batch_size 1.0 +66 24 model.output_channels 42.0 +66 24 model.input_dropout 0.03925557203254576 +66 24 model.output_dropout 0.2747034873574614 +66 24 model.feature_map_dropout 0.3938363650003491 +66 24 model.embedding_dim 2.0 +66 24 loss.margin 0.5595542877784676 +66 24 negative_sampler.num_negs_per_pos 76.0 +66 24 training.batch_size 1.0 +66 25 model.output_channels 27.0 +66 25 model.input_dropout 0.1872546998131725 +66 25 model.output_dropout 0.4499632087586383 +66 25 model.feature_map_dropout 0.3319422432411406 +66 25 model.embedding_dim 1.0 +66 25 loss.margin 8.974698039055596 +66 25 negative_sampler.num_negs_per_pos 15.0 +66 25 training.batch_size 1.0 +66 26 model.output_channels 38.0 +66 26 model.input_dropout 0.39993371006382455 +66 26 model.output_dropout 0.38594109616715866 +66 26 model.feature_map_dropout 0.3112971410878053 +66 26 model.embedding_dim 2.0 +66 26 loss.margin 6.68289337072131 +66 26 negative_sampler.num_negs_per_pos 68.0 +66 26 training.batch_size 1.0 +66 27 model.output_channels 21.0 +66 27 model.input_dropout 0.21239409561384265 +66 27 model.output_dropout 0.13084854893586034 +66 27 model.feature_map_dropout 0.17017606405866753 +66 27 model.embedding_dim 1.0 +66 27 loss.margin 2.7697066246968025 +66 27 negative_sampler.num_negs_per_pos 7.0 +66 27 training.batch_size 0.0 +66 28 model.output_channels 41.0 +66 28 model.input_dropout 0.20247668973597094 +66 28 model.output_dropout 0.17969555831841838 +66 28 model.feature_map_dropout 0.2497519598204887 +66 28 model.embedding_dim 2.0 +66 28 loss.margin 5.821697786938962 +66 28 negative_sampler.num_negs_per_pos 52.0 +66 28 training.batch_size 2.0 +66 29 model.output_channels 46.0 +66 29 model.input_dropout 0.02859011326223454 +66 29 model.output_dropout 0.24450299122845687 +66 29 model.feature_map_dropout 0.4130055050880921 +66 29 model.embedding_dim 2.0 +66 29 loss.margin 5.6341499960916215 +66 29 negative_sampler.num_negs_per_pos 77.0 +66 29 training.batch_size 1.0 +66 30 model.output_channels 35.0 +66 30 model.input_dropout 0.16911073625807665 +66 30 model.output_dropout 0.4134209870157584 +66 30 model.feature_map_dropout 0.460907659205094 +66 30 model.embedding_dim 2.0 +66 30 loss.margin 4.545832209677191 +66 30 negative_sampler.num_negs_per_pos 21.0 +66 30 training.batch_size 1.0 +66 31 model.output_channels 29.0 +66 31 model.input_dropout 0.1716165029300698 +66 31 model.output_dropout 0.08086667925307456 +66 31 model.feature_map_dropout 0.2230920170735437 +66 31 model.embedding_dim 2.0 +66 31 loss.margin 2.9889019976503746 +66 31 negative_sampler.num_negs_per_pos 54.0 +66 31 training.batch_size 1.0 +66 32 model.output_channels 38.0 +66 32 model.input_dropout 0.04134144038813048 +66 32 model.output_dropout 0.323400889335664 +66 32 model.feature_map_dropout 0.2922631825561196 +66 32 model.embedding_dim 1.0 +66 32 loss.margin 8.348192284202728 +66 32 negative_sampler.num_negs_per_pos 4.0 +66 32 training.batch_size 1.0 +66 33 model.output_channels 25.0 +66 33 model.input_dropout 0.036560171954425624 +66 33 model.output_dropout 0.49153071904602164 +66 33 model.feature_map_dropout 0.4844440538273074 +66 33 model.embedding_dim 1.0 +66 33 loss.margin 8.297094613874563 +66 33 negative_sampler.num_negs_per_pos 70.0 +66 33 training.batch_size 1.0 +66 34 model.output_channels 41.0 +66 34 model.input_dropout 0.07509676978108909 +66 34 model.output_dropout 0.3411261445778007 +66 34 model.feature_map_dropout 0.029562517430666457 +66 34 model.embedding_dim 1.0 +66 34 loss.margin 9.062578407298318 +66 34 negative_sampler.num_negs_per_pos 66.0 +66 34 training.batch_size 0.0 +66 35 model.output_channels 32.0 +66 35 model.input_dropout 0.39721873116350076 +66 35 model.output_dropout 0.20724956949301693 +66 35 model.feature_map_dropout 0.40064822327366184 +66 35 model.embedding_dim 0.0 +66 35 loss.margin 6.1155746037546725 +66 35 negative_sampler.num_negs_per_pos 32.0 +66 35 training.batch_size 2.0 +66 36 model.output_channels 59.0 +66 36 model.input_dropout 0.3261954589207182 +66 36 model.output_dropout 0.420861394540096 +66 36 model.feature_map_dropout 0.21463272926832266 +66 36 model.embedding_dim 2.0 +66 36 loss.margin 5.9365190808906325 +66 36 negative_sampler.num_negs_per_pos 54.0 +66 36 training.batch_size 2.0 +66 37 model.output_channels 58.0 +66 37 model.input_dropout 0.23108181744002781 +66 37 model.output_dropout 0.20418304607347176 +66 37 model.feature_map_dropout 0.4854597575318953 +66 37 model.embedding_dim 0.0 +66 37 loss.margin 9.194661915196777 +66 37 negative_sampler.num_negs_per_pos 16.0 +66 37 training.batch_size 0.0 +66 38 model.output_channels 45.0 +66 38 model.input_dropout 0.275264780437034 +66 38 model.output_dropout 0.2686439644920474 +66 38 model.feature_map_dropout 0.31937292055589944 +66 38 model.embedding_dim 2.0 +66 38 loss.margin 8.961205265231516 +66 38 negative_sampler.num_negs_per_pos 30.0 +66 38 training.batch_size 2.0 +66 39 model.output_channels 23.0 +66 39 model.input_dropout 0.023357746486354913 +66 39 model.output_dropout 0.1972056707784663 +66 39 model.feature_map_dropout 0.10997553073842964 +66 39 model.embedding_dim 1.0 +66 39 loss.margin 9.841619429793846 +66 39 negative_sampler.num_negs_per_pos 62.0 +66 39 training.batch_size 1.0 +66 40 model.output_channels 54.0 +66 40 model.input_dropout 0.2540192791894726 +66 40 model.output_dropout 0.15456299843659316 +66 40 model.feature_map_dropout 0.4109136522000401 +66 40 model.embedding_dim 1.0 +66 40 loss.margin 5.003819976505639 +66 40 negative_sampler.num_negs_per_pos 45.0 +66 40 training.batch_size 0.0 +66 41 model.output_channels 36.0 +66 41 model.input_dropout 0.14847597014749858 +66 41 model.output_dropout 0.2078842805888415 +66 41 model.feature_map_dropout 0.36740994513833847 +66 41 model.embedding_dim 1.0 +66 41 loss.margin 9.898991145598128 +66 41 negative_sampler.num_negs_per_pos 83.0 +66 41 training.batch_size 1.0 +66 42 model.output_channels 44.0 +66 42 model.input_dropout 0.3209589038723929 +66 42 model.output_dropout 0.146373783108112 +66 42 model.feature_map_dropout 0.49191157664350715 +66 42 model.embedding_dim 1.0 +66 42 loss.margin 7.10669354232483 +66 42 negative_sampler.num_negs_per_pos 86.0 +66 42 training.batch_size 0.0 +66 43 model.output_channels 62.0 +66 43 model.input_dropout 0.3611407515171492 +66 43 model.output_dropout 0.3321337400369716 +66 43 model.feature_map_dropout 0.03592989847499228 +66 43 model.embedding_dim 1.0 +66 43 loss.margin 5.8495803475821235 +66 43 negative_sampler.num_negs_per_pos 51.0 +66 43 training.batch_size 0.0 +66 44 model.output_channels 26.0 +66 44 model.input_dropout 0.1956286922260388 +66 44 model.output_dropout 0.4052715909570016 +66 44 model.feature_map_dropout 0.13845038370991553 +66 44 model.embedding_dim 2.0 +66 44 loss.margin 6.561111685893388 +66 44 negative_sampler.num_negs_per_pos 75.0 +66 44 training.batch_size 0.0 +66 45 model.output_channels 24.0 +66 45 model.input_dropout 0.39857145232499497 +66 45 model.output_dropout 0.4563177675726511 +66 45 model.feature_map_dropout 0.42848208521669484 +66 45 model.embedding_dim 2.0 +66 45 loss.margin 2.4929738517590225 +66 45 negative_sampler.num_negs_per_pos 7.0 +66 45 training.batch_size 0.0 +66 46 model.output_channels 29.0 +66 46 model.input_dropout 0.24778179405168538 +66 46 model.output_dropout 0.16237148191875161 +66 46 model.feature_map_dropout 0.4882842219231106 +66 46 model.embedding_dim 2.0 +66 46 loss.margin 6.150449641622304 +66 46 negative_sampler.num_negs_per_pos 96.0 +66 46 training.batch_size 2.0 +66 47 model.output_channels 59.0 +66 47 model.input_dropout 0.4968767550314706 +66 47 model.output_dropout 0.031156681193465763 +66 47 model.feature_map_dropout 0.24293575042424798 +66 47 model.embedding_dim 1.0 +66 47 loss.margin 2.254337192040576 +66 47 negative_sampler.num_negs_per_pos 11.0 +66 47 training.batch_size 1.0 +66 48 model.output_channels 30.0 +66 48 model.input_dropout 0.40369653298189656 +66 48 model.output_dropout 0.018618196131973963 +66 48 model.feature_map_dropout 0.3673644389234722 +66 48 model.embedding_dim 2.0 +66 48 loss.margin 9.827667095304236 +66 48 negative_sampler.num_negs_per_pos 2.0 +66 48 training.batch_size 2.0 +66 49 model.output_channels 28.0 +66 49 model.input_dropout 0.21522422904904115 +66 49 model.output_dropout 0.32830283385357606 +66 49 model.feature_map_dropout 0.28240428671331314 +66 49 model.embedding_dim 1.0 +66 49 loss.margin 1.090738835776783 +66 49 negative_sampler.num_negs_per_pos 65.0 +66 49 training.batch_size 0.0 +66 50 model.output_channels 17.0 +66 50 model.input_dropout 0.37211316308613235 +66 50 model.output_dropout 0.24656504995112088 +66 50 model.feature_map_dropout 0.05478386023280574 +66 50 model.embedding_dim 2.0 +66 50 loss.margin 3.860889375794351 +66 50 negative_sampler.num_negs_per_pos 15.0 +66 50 training.batch_size 1.0 +66 51 model.output_channels 45.0 +66 51 model.input_dropout 0.23432411996444363 +66 51 model.output_dropout 0.3036490574913651 +66 51 model.feature_map_dropout 0.189568968791327 +66 51 model.embedding_dim 2.0 +66 51 loss.margin 1.3765935753463945 +66 51 negative_sampler.num_negs_per_pos 60.0 +66 51 training.batch_size 1.0 +66 52 model.output_channels 26.0 +66 52 model.input_dropout 0.3320047842257578 +66 52 model.output_dropout 0.14866665592356415 +66 52 model.feature_map_dropout 0.04398241949713988 +66 52 model.embedding_dim 1.0 +66 52 loss.margin 5.547909189682302 +66 52 negative_sampler.num_negs_per_pos 18.0 +66 52 training.batch_size 1.0 +66 53 model.output_channels 37.0 +66 53 model.input_dropout 0.03193413778717774 +66 53 model.output_dropout 0.45879719891625803 +66 53 model.feature_map_dropout 0.11606858880045606 +66 53 model.embedding_dim 2.0 +66 53 loss.margin 8.90570925428789 +66 53 negative_sampler.num_negs_per_pos 51.0 +66 53 training.batch_size 0.0 +66 54 model.output_channels 64.0 +66 54 model.input_dropout 0.42039659892058284 +66 54 model.output_dropout 0.43981421282977934 +66 54 model.feature_map_dropout 0.1152719985969049 +66 54 model.embedding_dim 2.0 +66 54 loss.margin 3.426478014144043 +66 54 negative_sampler.num_negs_per_pos 90.0 +66 54 training.batch_size 2.0 +66 55 model.output_channels 30.0 +66 55 model.input_dropout 0.263817767743843 +66 55 model.output_dropout 0.27554263770079446 +66 55 model.feature_map_dropout 0.31557393622652913 +66 55 model.embedding_dim 2.0 +66 55 loss.margin 8.698765123498474 +66 55 negative_sampler.num_negs_per_pos 24.0 +66 55 training.batch_size 2.0 +66 56 model.output_channels 18.0 +66 56 model.input_dropout 0.41263595049480517 +66 56 model.output_dropout 0.2947397726872736 +66 56 model.feature_map_dropout 0.10642315822402837 +66 56 model.embedding_dim 2.0 +66 56 loss.margin 9.896076831833216 +66 56 negative_sampler.num_negs_per_pos 0.0 +66 56 training.batch_size 1.0 +66 57 model.output_channels 16.0 +66 57 model.input_dropout 0.2599636841047561 +66 57 model.output_dropout 0.04161717432415485 +66 57 model.feature_map_dropout 0.31854818432779136 +66 57 model.embedding_dim 2.0 +66 57 loss.margin 2.474214942183166 +66 57 negative_sampler.num_negs_per_pos 53.0 +66 57 training.batch_size 1.0 +66 58 model.output_channels 53.0 +66 58 model.input_dropout 0.037033363601428115 +66 58 model.output_dropout 0.3457688965830558 +66 58 model.feature_map_dropout 0.031836979873902105 +66 58 model.embedding_dim 0.0 +66 58 loss.margin 9.317139235402825 +66 58 negative_sampler.num_negs_per_pos 64.0 +66 58 training.batch_size 2.0 +66 59 model.output_channels 59.0 +66 59 model.input_dropout 0.43951229524322416 +66 59 model.output_dropout 0.16133640370347857 +66 59 model.feature_map_dropout 0.21065010638682663 +66 59 model.embedding_dim 0.0 +66 59 loss.margin 6.91905344699975 +66 59 negative_sampler.num_negs_per_pos 74.0 +66 59 training.batch_size 1.0 +66 60 model.output_channels 40.0 +66 60 model.input_dropout 0.19373840053369024 +66 60 model.output_dropout 0.33049286163862407 +66 60 model.feature_map_dropout 0.36187176560151935 +66 60 model.embedding_dim 1.0 +66 60 loss.margin 9.086196500184112 +66 60 negative_sampler.num_negs_per_pos 26.0 +66 60 training.batch_size 1.0 +66 61 model.output_channels 34.0 +66 61 model.input_dropout 0.11055613802885772 +66 61 model.output_dropout 0.3778670783878167 +66 61 model.feature_map_dropout 0.1729474322054511 +66 61 model.embedding_dim 1.0 +66 61 loss.margin 2.0720397723574253 +66 61 negative_sampler.num_negs_per_pos 18.0 +66 61 training.batch_size 1.0 +66 62 model.output_channels 59.0 +66 62 model.input_dropout 0.26238378115505445 +66 62 model.output_dropout 0.16775207929340707 +66 62 model.feature_map_dropout 0.0981409214229273 +66 62 model.embedding_dim 1.0 +66 62 loss.margin 9.238815012330935 +66 62 negative_sampler.num_negs_per_pos 81.0 +66 62 training.batch_size 2.0 +66 63 model.output_channels 32.0 +66 63 model.input_dropout 0.40516990061764546 +66 63 model.output_dropout 0.44634831577320805 +66 63 model.feature_map_dropout 0.12160318101015738 +66 63 model.embedding_dim 2.0 +66 63 loss.margin 2.2691144392394533 +66 63 negative_sampler.num_negs_per_pos 33.0 +66 63 training.batch_size 0.0 +66 64 model.output_channels 50.0 +66 64 model.input_dropout 0.024494230313939436 +66 64 model.output_dropout 0.04950908869758913 +66 64 model.feature_map_dropout 0.14822100715527797 +66 64 model.embedding_dim 1.0 +66 64 loss.margin 0.6267572127416288 +66 64 negative_sampler.num_negs_per_pos 98.0 +66 64 training.batch_size 2.0 +66 65 model.output_channels 34.0 +66 65 model.input_dropout 0.00755993345775785 +66 65 model.output_dropout 0.4931919212567625 +66 65 model.feature_map_dropout 0.32426882113750594 +66 65 model.embedding_dim 2.0 +66 65 loss.margin 2.1940273896763016 +66 65 negative_sampler.num_negs_per_pos 41.0 +66 65 training.batch_size 2.0 +66 66 model.output_channels 54.0 +66 66 model.input_dropout 0.40744751821049136 +66 66 model.output_dropout 0.03922186359769758 +66 66 model.feature_map_dropout 0.4562788500742206 +66 66 model.embedding_dim 2.0 +66 66 loss.margin 8.247433305370652 +66 66 negative_sampler.num_negs_per_pos 43.0 +66 66 training.batch_size 1.0 +66 67 model.output_channels 44.0 +66 67 model.input_dropout 0.36066056452828393 +66 67 model.output_dropout 0.4087020213620538 +66 67 model.feature_map_dropout 0.46635225054094176 +66 67 model.embedding_dim 1.0 +66 67 loss.margin 9.419108440517983 +66 67 negative_sampler.num_negs_per_pos 39.0 +66 67 training.batch_size 1.0 +66 68 model.output_channels 42.0 +66 68 model.input_dropout 0.24235825409093736 +66 68 model.output_dropout 0.24895017655798146 +66 68 model.feature_map_dropout 0.21258265990256664 +66 68 model.embedding_dim 2.0 +66 68 loss.margin 3.023995328670346 +66 68 negative_sampler.num_negs_per_pos 15.0 +66 68 training.batch_size 2.0 +66 69 model.output_channels 31.0 +66 69 model.input_dropout 0.35384541157339067 +66 69 model.output_dropout 0.430916127715856 +66 69 model.feature_map_dropout 0.2587876319297049 +66 69 model.embedding_dim 2.0 +66 69 loss.margin 9.293964557469964 +66 69 negative_sampler.num_negs_per_pos 41.0 +66 69 training.batch_size 2.0 +66 70 model.output_channels 43.0 +66 70 model.input_dropout 0.07887612141707062 +66 70 model.output_dropout 0.42704493146334066 +66 70 model.feature_map_dropout 0.0763589108775376 +66 70 model.embedding_dim 1.0 +66 70 loss.margin 1.1973473906286025 +66 70 negative_sampler.num_negs_per_pos 36.0 +66 70 training.batch_size 1.0 +66 71 model.output_channels 28.0 +66 71 model.input_dropout 0.4495564315638591 +66 71 model.output_dropout 0.3012424755788773 +66 71 model.feature_map_dropout 0.02256493005696586 +66 71 model.embedding_dim 1.0 +66 71 loss.margin 0.8328231329950297 +66 71 negative_sampler.num_negs_per_pos 22.0 +66 71 training.batch_size 2.0 +66 72 model.output_channels 16.0 +66 72 model.input_dropout 0.4620716281194675 +66 72 model.output_dropout 0.00805140738188781 +66 72 model.feature_map_dropout 0.3633420546540423 +66 72 model.embedding_dim 0.0 +66 72 loss.margin 1.0569856099738937 +66 72 negative_sampler.num_negs_per_pos 75.0 +66 72 training.batch_size 1.0 +66 73 model.output_channels 26.0 +66 73 model.input_dropout 0.13222523034741862 +66 73 model.output_dropout 0.38215710979372086 +66 73 model.feature_map_dropout 0.16902038624806492 +66 73 model.embedding_dim 2.0 +66 73 loss.margin 6.117682242547298 +66 73 negative_sampler.num_negs_per_pos 84.0 +66 73 training.batch_size 2.0 +66 74 model.output_channels 24.0 +66 74 model.input_dropout 0.06464005012245627 +66 74 model.output_dropout 0.043673320388529946 +66 74 model.feature_map_dropout 0.4941496086123896 +66 74 model.embedding_dim 0.0 +66 74 loss.margin 6.631461360618178 +66 74 negative_sampler.num_negs_per_pos 19.0 +66 74 training.batch_size 1.0 +66 75 model.output_channels 18.0 +66 75 model.input_dropout 0.13467851869246533 +66 75 model.output_dropout 0.02991577062136047 +66 75 model.feature_map_dropout 0.2487054122774467 +66 75 model.embedding_dim 2.0 +66 75 loss.margin 5.280816645770097 +66 75 negative_sampler.num_negs_per_pos 99.0 +66 75 training.batch_size 1.0 +66 76 model.output_channels 42.0 +66 76 model.input_dropout 0.2555096573497287 +66 76 model.output_dropout 0.145931129001021 +66 76 model.feature_map_dropout 0.09494813510968886 +66 76 model.embedding_dim 0.0 +66 76 loss.margin 4.725684411366493 +66 76 negative_sampler.num_negs_per_pos 61.0 +66 76 training.batch_size 0.0 +66 77 model.output_channels 63.0 +66 77 model.input_dropout 0.20856949296958854 +66 77 model.output_dropout 0.25904460535077817 +66 77 model.feature_map_dropout 0.2503480521966884 +66 77 model.embedding_dim 2.0 +66 77 loss.margin 9.19729330985077 +66 77 negative_sampler.num_negs_per_pos 71.0 +66 77 training.batch_size 1.0 +66 78 model.output_channels 45.0 +66 78 model.input_dropout 0.43300745369959537 +66 78 model.output_dropout 0.30111477216023663 +66 78 model.feature_map_dropout 0.4326473100398718 +66 78 model.embedding_dim 0.0 +66 78 loss.margin 9.532876218293053 +66 78 negative_sampler.num_negs_per_pos 99.0 +66 78 training.batch_size 2.0 +66 79 model.output_channels 54.0 +66 79 model.input_dropout 0.09423596607425172 +66 79 model.output_dropout 0.303977845653983 +66 79 model.feature_map_dropout 0.053779597079705976 +66 79 model.embedding_dim 1.0 +66 79 loss.margin 4.2859822809878 +66 79 negative_sampler.num_negs_per_pos 66.0 +66 79 training.batch_size 0.0 +66 80 model.output_channels 43.0 +66 80 model.input_dropout 0.03675062637561616 +66 80 model.output_dropout 0.026314467667956642 +66 80 model.feature_map_dropout 0.17046433546484474 +66 80 model.embedding_dim 0.0 +66 80 loss.margin 9.151966804420958 +66 80 negative_sampler.num_negs_per_pos 50.0 +66 80 training.batch_size 1.0 +66 81 model.output_channels 51.0 +66 81 model.input_dropout 0.08671168590655448 +66 81 model.output_dropout 0.3875843363862906 +66 81 model.feature_map_dropout 0.102045719319374 +66 81 model.embedding_dim 1.0 +66 81 loss.margin 5.827348475021714 +66 81 negative_sampler.num_negs_per_pos 38.0 +66 81 training.batch_size 1.0 +66 82 model.output_channels 51.0 +66 82 model.input_dropout 0.4150623276061088 +66 82 model.output_dropout 0.4047605816403991 +66 82 model.feature_map_dropout 0.3144465331941494 +66 82 model.embedding_dim 1.0 +66 82 loss.margin 5.114763213232752 +66 82 negative_sampler.num_negs_per_pos 10.0 +66 82 training.batch_size 0.0 +66 83 model.output_channels 30.0 +66 83 model.input_dropout 0.42730034940662204 +66 83 model.output_dropout 0.3653834568453568 +66 83 model.feature_map_dropout 0.20256120420189638 +66 83 model.embedding_dim 0.0 +66 83 loss.margin 1.8665387633314303 +66 83 negative_sampler.num_negs_per_pos 81.0 +66 83 training.batch_size 0.0 +66 84 model.output_channels 61.0 +66 84 model.input_dropout 0.05004556930713 +66 84 model.output_dropout 0.4880066691257286 +66 84 model.feature_map_dropout 0.4011041250037675 +66 84 model.embedding_dim 1.0 +66 84 loss.margin 4.800111006810945 +66 84 negative_sampler.num_negs_per_pos 39.0 +66 84 training.batch_size 0.0 +66 85 model.output_channels 41.0 +66 85 model.input_dropout 0.06391009908744799 +66 85 model.output_dropout 0.0668740013769597 +66 85 model.feature_map_dropout 0.0007283765170572054 +66 85 model.embedding_dim 0.0 +66 85 loss.margin 1.6060703904807934 +66 85 negative_sampler.num_negs_per_pos 36.0 +66 85 training.batch_size 1.0 +66 86 model.output_channels 29.0 +66 86 model.input_dropout 0.32036028589419424 +66 86 model.output_dropout 0.20298398589590078 +66 86 model.feature_map_dropout 0.34300316591718494 +66 86 model.embedding_dim 0.0 +66 86 loss.margin 6.690142643476748 +66 86 negative_sampler.num_negs_per_pos 52.0 +66 86 training.batch_size 0.0 +66 87 model.output_channels 39.0 +66 87 model.input_dropout 0.3380534144372389 +66 87 model.output_dropout 0.48021947183835484 +66 87 model.feature_map_dropout 0.21962094286611716 +66 87 model.embedding_dim 1.0 +66 87 loss.margin 2.635191479029973 +66 87 negative_sampler.num_negs_per_pos 15.0 +66 87 training.batch_size 0.0 +66 88 model.output_channels 29.0 +66 88 model.input_dropout 0.30503103573871065 +66 88 model.output_dropout 0.3068467033982679 +66 88 model.feature_map_dropout 0.4523003022029657 +66 88 model.embedding_dim 0.0 +66 88 loss.margin 9.30948136903311 +66 88 negative_sampler.num_negs_per_pos 84.0 +66 88 training.batch_size 1.0 +66 89 model.output_channels 63.0 +66 89 model.input_dropout 0.277587479818238 +66 89 model.output_dropout 0.028556269026188796 +66 89 model.feature_map_dropout 0.4974110299638643 +66 89 model.embedding_dim 0.0 +66 89 loss.margin 7.644259822125579 +66 89 negative_sampler.num_negs_per_pos 22.0 +66 89 training.batch_size 1.0 +66 90 model.output_channels 58.0 +66 90 model.input_dropout 0.37527599750855034 +66 90 model.output_dropout 0.4286078589169252 +66 90 model.feature_map_dropout 0.38386116281748434 +66 90 model.embedding_dim 1.0 +66 90 loss.margin 6.804716249088143 +66 90 negative_sampler.num_negs_per_pos 75.0 +66 90 training.batch_size 2.0 +66 91 model.output_channels 37.0 +66 91 model.input_dropout 0.0865532600777299 +66 91 model.output_dropout 0.05939495031847436 +66 91 model.feature_map_dropout 0.1579698010515923 +66 91 model.embedding_dim 2.0 +66 91 loss.margin 2.2665847241054573 +66 91 negative_sampler.num_negs_per_pos 68.0 +66 91 training.batch_size 0.0 +66 92 model.output_channels 23.0 +66 92 model.input_dropout 0.43685432078840575 +66 92 model.output_dropout 0.24072635895755368 +66 92 model.feature_map_dropout 0.2830087615877888 +66 92 model.embedding_dim 1.0 +66 92 loss.margin 6.73656088405357 +66 92 negative_sampler.num_negs_per_pos 28.0 +66 92 training.batch_size 1.0 +66 93 model.output_channels 45.0 +66 93 model.input_dropout 0.44931171635737127 +66 93 model.output_dropout 0.10896886139495171 +66 93 model.feature_map_dropout 0.317292430866712 +66 93 model.embedding_dim 1.0 +66 93 loss.margin 1.9082612532355159 +66 93 negative_sampler.num_negs_per_pos 97.0 +66 93 training.batch_size 2.0 +66 94 model.output_channels 36.0 +66 94 model.input_dropout 0.39278488928022326 +66 94 model.output_dropout 0.10348635438409093 +66 94 model.feature_map_dropout 0.12574358171332672 +66 94 model.embedding_dim 0.0 +66 94 loss.margin 2.8339035887045347 +66 94 negative_sampler.num_negs_per_pos 50.0 +66 94 training.batch_size 1.0 +66 95 model.output_channels 37.0 +66 95 model.input_dropout 0.3569007134207661 +66 95 model.output_dropout 0.08338567721646267 +66 95 model.feature_map_dropout 0.4873473079435685 +66 95 model.embedding_dim 0.0 +66 95 loss.margin 6.02287243924376 +66 95 negative_sampler.num_negs_per_pos 93.0 +66 95 training.batch_size 0.0 +66 96 model.output_channels 64.0 +66 96 model.input_dropout 0.019385191249062406 +66 96 model.output_dropout 0.3939331022740082 +66 96 model.feature_map_dropout 0.4433133717799212 +66 96 model.embedding_dim 1.0 +66 96 loss.margin 6.395974255917257 +66 96 negative_sampler.num_negs_per_pos 8.0 +66 96 training.batch_size 2.0 +66 97 model.output_channels 43.0 +66 97 model.input_dropout 0.40510398989163293 +66 97 model.output_dropout 0.15457759420861178 +66 97 model.feature_map_dropout 0.09132272899968524 +66 97 model.embedding_dim 2.0 +66 97 loss.margin 7.597081879791527 +66 97 negative_sampler.num_negs_per_pos 14.0 +66 97 training.batch_size 2.0 +66 98 model.output_channels 63.0 +66 98 model.input_dropout 0.026238018438899335 +66 98 model.output_dropout 0.21120569277203227 +66 98 model.feature_map_dropout 0.3232871688212554 +66 98 model.embedding_dim 2.0 +66 98 loss.margin 5.406261240690835 +66 98 negative_sampler.num_negs_per_pos 50.0 +66 98 training.batch_size 1.0 +66 99 model.output_channels 39.0 +66 99 model.input_dropout 0.13459587386493554 +66 99 model.output_dropout 0.4708764004082219 +66 99 model.feature_map_dropout 0.029751779514536947 +66 99 model.embedding_dim 2.0 +66 99 loss.margin 2.0424816974370907 +66 99 negative_sampler.num_negs_per_pos 20.0 +66 99 training.batch_size 1.0 +66 100 model.output_channels 16.0 +66 100 model.input_dropout 0.151321662659694 +66 100 model.output_dropout 0.1483953265312094 +66 100 model.feature_map_dropout 0.3834363468441186 +66 100 model.embedding_dim 2.0 +66 100 loss.margin 8.291517113573377 +66 100 negative_sampler.num_negs_per_pos 88.0 +66 100 training.batch_size 2.0 +66 1 dataset """kinships""" +66 1 model """conve""" +66 1 loss """marginranking""" +66 1 regularizer """no""" +66 1 optimizer """adadelta""" +66 1 training_loop """owa""" +66 1 negative_sampler """basic""" +66 1 evaluator """rankbased""" +66 2 dataset """kinships""" +66 2 model """conve""" +66 2 loss """marginranking""" +66 2 regularizer """no""" +66 2 optimizer """adadelta""" +66 2 training_loop """owa""" +66 2 negative_sampler """basic""" +66 2 evaluator """rankbased""" +66 3 dataset """kinships""" +66 3 model """conve""" +66 3 loss """marginranking""" +66 3 regularizer """no""" +66 3 optimizer """adadelta""" +66 3 training_loop """owa""" +66 3 negative_sampler """basic""" +66 3 evaluator """rankbased""" +66 4 dataset """kinships""" +66 4 model """conve""" +66 4 loss """marginranking""" +66 4 regularizer """no""" +66 4 optimizer """adadelta""" +66 4 training_loop """owa""" +66 4 negative_sampler """basic""" +66 4 evaluator """rankbased""" +66 5 dataset """kinships""" +66 5 model """conve""" +66 5 loss """marginranking""" +66 5 regularizer """no""" +66 5 optimizer """adadelta""" +66 5 training_loop """owa""" +66 5 negative_sampler """basic""" +66 5 evaluator """rankbased""" +66 6 dataset """kinships""" +66 6 model """conve""" +66 6 loss """marginranking""" +66 6 regularizer """no""" +66 6 optimizer """adadelta""" +66 6 training_loop """owa""" +66 6 negative_sampler """basic""" +66 6 evaluator """rankbased""" +66 7 dataset """kinships""" +66 7 model """conve""" +66 7 loss """marginranking""" +66 7 regularizer """no""" +66 7 optimizer """adadelta""" +66 7 training_loop """owa""" +66 7 negative_sampler """basic""" +66 7 evaluator """rankbased""" +66 8 dataset """kinships""" +66 8 model """conve""" +66 8 loss """marginranking""" +66 8 regularizer """no""" +66 8 optimizer """adadelta""" +66 8 training_loop """owa""" +66 8 negative_sampler """basic""" +66 8 evaluator """rankbased""" +66 9 dataset """kinships""" +66 9 model """conve""" +66 9 loss """marginranking""" +66 9 regularizer """no""" +66 9 optimizer """adadelta""" +66 9 training_loop """owa""" +66 9 negative_sampler """basic""" +66 9 evaluator """rankbased""" +66 10 dataset """kinships""" +66 10 model """conve""" +66 10 loss """marginranking""" +66 10 regularizer """no""" +66 10 optimizer """adadelta""" +66 10 training_loop """owa""" +66 10 negative_sampler """basic""" +66 10 evaluator """rankbased""" +66 11 dataset """kinships""" +66 11 model """conve""" +66 11 loss """marginranking""" +66 11 regularizer """no""" +66 11 optimizer """adadelta""" +66 11 training_loop """owa""" +66 11 negative_sampler """basic""" +66 11 evaluator """rankbased""" +66 12 dataset """kinships""" +66 12 model """conve""" +66 12 loss """marginranking""" +66 12 regularizer """no""" +66 12 optimizer """adadelta""" +66 12 training_loop """owa""" +66 12 negative_sampler """basic""" +66 12 evaluator """rankbased""" +66 13 dataset """kinships""" +66 13 model """conve""" +66 13 loss """marginranking""" +66 13 regularizer """no""" +66 13 optimizer """adadelta""" +66 13 training_loop """owa""" +66 13 negative_sampler """basic""" +66 13 evaluator """rankbased""" +66 14 dataset """kinships""" +66 14 model """conve""" +66 14 loss """marginranking""" +66 14 regularizer """no""" +66 14 optimizer """adadelta""" +66 14 training_loop """owa""" +66 14 negative_sampler """basic""" +66 14 evaluator """rankbased""" +66 15 dataset """kinships""" +66 15 model """conve""" +66 15 loss """marginranking""" +66 15 regularizer """no""" +66 15 optimizer """adadelta""" +66 15 training_loop """owa""" +66 15 negative_sampler """basic""" +66 15 evaluator """rankbased""" +66 16 dataset """kinships""" +66 16 model """conve""" +66 16 loss """marginranking""" +66 16 regularizer """no""" +66 16 optimizer """adadelta""" +66 16 training_loop """owa""" +66 16 negative_sampler """basic""" +66 16 evaluator """rankbased""" +66 17 dataset """kinships""" +66 17 model """conve""" +66 17 loss """marginranking""" +66 17 regularizer """no""" +66 17 optimizer """adadelta""" +66 17 training_loop """owa""" +66 17 negative_sampler """basic""" +66 17 evaluator """rankbased""" +66 18 dataset """kinships""" +66 18 model """conve""" +66 18 loss """marginranking""" +66 18 regularizer """no""" +66 18 optimizer """adadelta""" +66 18 training_loop """owa""" +66 18 negative_sampler """basic""" +66 18 evaluator """rankbased""" +66 19 dataset """kinships""" +66 19 model """conve""" +66 19 loss """marginranking""" +66 19 regularizer """no""" +66 19 optimizer """adadelta""" +66 19 training_loop """owa""" +66 19 negative_sampler """basic""" +66 19 evaluator """rankbased""" +66 20 dataset """kinships""" +66 20 model """conve""" +66 20 loss """marginranking""" +66 20 regularizer """no""" +66 20 optimizer """adadelta""" +66 20 training_loop """owa""" +66 20 negative_sampler """basic""" +66 20 evaluator """rankbased""" +66 21 dataset """kinships""" +66 21 model """conve""" +66 21 loss """marginranking""" +66 21 regularizer """no""" +66 21 optimizer """adadelta""" +66 21 training_loop """owa""" +66 21 negative_sampler """basic""" +66 21 evaluator """rankbased""" +66 22 dataset """kinships""" +66 22 model """conve""" +66 22 loss """marginranking""" +66 22 regularizer """no""" +66 22 optimizer """adadelta""" +66 22 training_loop """owa""" +66 22 negative_sampler """basic""" +66 22 evaluator """rankbased""" +66 23 dataset """kinships""" +66 23 model """conve""" +66 23 loss """marginranking""" +66 23 regularizer """no""" +66 23 optimizer """adadelta""" +66 23 training_loop """owa""" +66 23 negative_sampler """basic""" +66 23 evaluator """rankbased""" +66 24 dataset """kinships""" +66 24 model """conve""" +66 24 loss """marginranking""" +66 24 regularizer """no""" +66 24 optimizer """adadelta""" +66 24 training_loop """owa""" +66 24 negative_sampler """basic""" +66 24 evaluator """rankbased""" +66 25 dataset """kinships""" +66 25 model """conve""" +66 25 loss """marginranking""" +66 25 regularizer """no""" +66 25 optimizer """adadelta""" +66 25 training_loop """owa""" +66 25 negative_sampler """basic""" +66 25 evaluator """rankbased""" +66 26 dataset """kinships""" +66 26 model """conve""" +66 26 loss """marginranking""" +66 26 regularizer """no""" +66 26 optimizer """adadelta""" +66 26 training_loop """owa""" +66 26 negative_sampler """basic""" +66 26 evaluator """rankbased""" +66 27 dataset """kinships""" +66 27 model """conve""" +66 27 loss """marginranking""" +66 27 regularizer """no""" +66 27 optimizer """adadelta""" +66 27 training_loop """owa""" +66 27 negative_sampler """basic""" +66 27 evaluator """rankbased""" +66 28 dataset """kinships""" +66 28 model """conve""" +66 28 loss """marginranking""" +66 28 regularizer """no""" +66 28 optimizer """adadelta""" +66 28 training_loop """owa""" +66 28 negative_sampler """basic""" +66 28 evaluator """rankbased""" +66 29 dataset """kinships""" +66 29 model """conve""" +66 29 loss """marginranking""" +66 29 regularizer """no""" +66 29 optimizer """adadelta""" +66 29 training_loop """owa""" +66 29 negative_sampler """basic""" +66 29 evaluator """rankbased""" +66 30 dataset """kinships""" +66 30 model """conve""" +66 30 loss """marginranking""" +66 30 regularizer """no""" +66 30 optimizer """adadelta""" +66 30 training_loop """owa""" +66 30 negative_sampler """basic""" +66 30 evaluator """rankbased""" +66 31 dataset """kinships""" +66 31 model """conve""" +66 31 loss """marginranking""" +66 31 regularizer """no""" +66 31 optimizer """adadelta""" +66 31 training_loop """owa""" +66 31 negative_sampler """basic""" +66 31 evaluator """rankbased""" +66 32 dataset """kinships""" +66 32 model """conve""" +66 32 loss """marginranking""" +66 32 regularizer """no""" +66 32 optimizer """adadelta""" +66 32 training_loop """owa""" +66 32 negative_sampler """basic""" +66 32 evaluator """rankbased""" +66 33 dataset """kinships""" +66 33 model """conve""" +66 33 loss """marginranking""" +66 33 regularizer """no""" +66 33 optimizer """adadelta""" +66 33 training_loop """owa""" +66 33 negative_sampler """basic""" +66 33 evaluator """rankbased""" +66 34 dataset """kinships""" +66 34 model """conve""" +66 34 loss """marginranking""" +66 34 regularizer """no""" +66 34 optimizer """adadelta""" +66 34 training_loop """owa""" +66 34 negative_sampler """basic""" +66 34 evaluator """rankbased""" +66 35 dataset """kinships""" +66 35 model """conve""" +66 35 loss """marginranking""" +66 35 regularizer """no""" +66 35 optimizer """adadelta""" +66 35 training_loop """owa""" +66 35 negative_sampler """basic""" +66 35 evaluator """rankbased""" +66 36 dataset """kinships""" +66 36 model """conve""" +66 36 loss """marginranking""" +66 36 regularizer """no""" +66 36 optimizer """adadelta""" +66 36 training_loop """owa""" +66 36 negative_sampler """basic""" +66 36 evaluator """rankbased""" +66 37 dataset """kinships""" +66 37 model """conve""" +66 37 loss """marginranking""" +66 37 regularizer """no""" +66 37 optimizer """adadelta""" +66 37 training_loop """owa""" +66 37 negative_sampler """basic""" +66 37 evaluator """rankbased""" +66 38 dataset """kinships""" +66 38 model """conve""" +66 38 loss """marginranking""" +66 38 regularizer """no""" +66 38 optimizer """adadelta""" +66 38 training_loop """owa""" +66 38 negative_sampler """basic""" +66 38 evaluator """rankbased""" +66 39 dataset """kinships""" +66 39 model """conve""" +66 39 loss """marginranking""" +66 39 regularizer """no""" +66 39 optimizer """adadelta""" +66 39 training_loop """owa""" +66 39 negative_sampler """basic""" +66 39 evaluator """rankbased""" +66 40 dataset """kinships""" +66 40 model """conve""" +66 40 loss """marginranking""" +66 40 regularizer """no""" +66 40 optimizer """adadelta""" +66 40 training_loop """owa""" +66 40 negative_sampler """basic""" +66 40 evaluator """rankbased""" +66 41 dataset """kinships""" +66 41 model """conve""" +66 41 loss """marginranking""" +66 41 regularizer """no""" +66 41 optimizer """adadelta""" +66 41 training_loop """owa""" +66 41 negative_sampler """basic""" +66 41 evaluator """rankbased""" +66 42 dataset """kinships""" +66 42 model """conve""" +66 42 loss """marginranking""" +66 42 regularizer """no""" +66 42 optimizer """adadelta""" +66 42 training_loop """owa""" +66 42 negative_sampler """basic""" +66 42 evaluator """rankbased""" +66 43 dataset """kinships""" +66 43 model """conve""" +66 43 loss """marginranking""" +66 43 regularizer """no""" +66 43 optimizer """adadelta""" +66 43 training_loop """owa""" +66 43 negative_sampler """basic""" +66 43 evaluator """rankbased""" +66 44 dataset """kinships""" +66 44 model """conve""" +66 44 loss """marginranking""" +66 44 regularizer """no""" +66 44 optimizer """adadelta""" +66 44 training_loop """owa""" +66 44 negative_sampler """basic""" +66 44 evaluator """rankbased""" +66 45 dataset """kinships""" +66 45 model """conve""" +66 45 loss """marginranking""" +66 45 regularizer """no""" +66 45 optimizer """adadelta""" +66 45 training_loop """owa""" +66 45 negative_sampler """basic""" +66 45 evaluator """rankbased""" +66 46 dataset """kinships""" +66 46 model """conve""" +66 46 loss """marginranking""" +66 46 regularizer """no""" +66 46 optimizer """adadelta""" +66 46 training_loop """owa""" +66 46 negative_sampler """basic""" +66 46 evaluator """rankbased""" +66 47 dataset """kinships""" +66 47 model """conve""" +66 47 loss """marginranking""" +66 47 regularizer """no""" +66 47 optimizer """adadelta""" +66 47 training_loop """owa""" +66 47 negative_sampler """basic""" +66 47 evaluator """rankbased""" +66 48 dataset """kinships""" +66 48 model """conve""" +66 48 loss """marginranking""" +66 48 regularizer """no""" +66 48 optimizer """adadelta""" +66 48 training_loop """owa""" +66 48 negative_sampler """basic""" +66 48 evaluator """rankbased""" +66 49 dataset """kinships""" +66 49 model """conve""" +66 49 loss """marginranking""" +66 49 regularizer """no""" +66 49 optimizer """adadelta""" +66 49 training_loop """owa""" +66 49 negative_sampler """basic""" +66 49 evaluator """rankbased""" +66 50 dataset """kinships""" +66 50 model """conve""" +66 50 loss """marginranking""" +66 50 regularizer """no""" +66 50 optimizer """adadelta""" +66 50 training_loop """owa""" +66 50 negative_sampler """basic""" +66 50 evaluator """rankbased""" +66 51 dataset """kinships""" +66 51 model """conve""" +66 51 loss """marginranking""" +66 51 regularizer """no""" +66 51 optimizer """adadelta""" +66 51 training_loop """owa""" +66 51 negative_sampler """basic""" +66 51 evaluator """rankbased""" +66 52 dataset """kinships""" +66 52 model """conve""" +66 52 loss """marginranking""" +66 52 regularizer """no""" +66 52 optimizer """adadelta""" +66 52 training_loop """owa""" +66 52 negative_sampler """basic""" +66 52 evaluator """rankbased""" +66 53 dataset """kinships""" +66 53 model """conve""" +66 53 loss """marginranking""" +66 53 regularizer """no""" +66 53 optimizer """adadelta""" +66 53 training_loop """owa""" +66 53 negative_sampler """basic""" +66 53 evaluator """rankbased""" +66 54 dataset """kinships""" +66 54 model """conve""" +66 54 loss """marginranking""" +66 54 regularizer """no""" +66 54 optimizer """adadelta""" +66 54 training_loop """owa""" +66 54 negative_sampler """basic""" +66 54 evaluator """rankbased""" +66 55 dataset """kinships""" +66 55 model """conve""" +66 55 loss """marginranking""" +66 55 regularizer """no""" +66 55 optimizer """adadelta""" +66 55 training_loop """owa""" +66 55 negative_sampler """basic""" +66 55 evaluator """rankbased""" +66 56 dataset """kinships""" +66 56 model """conve""" +66 56 loss """marginranking""" +66 56 regularizer """no""" +66 56 optimizer """adadelta""" +66 56 training_loop """owa""" +66 56 negative_sampler """basic""" +66 56 evaluator """rankbased""" +66 57 dataset """kinships""" +66 57 model """conve""" +66 57 loss """marginranking""" +66 57 regularizer """no""" +66 57 optimizer """adadelta""" +66 57 training_loop """owa""" +66 57 negative_sampler """basic""" +66 57 evaluator """rankbased""" +66 58 dataset """kinships""" +66 58 model """conve""" +66 58 loss """marginranking""" +66 58 regularizer """no""" +66 58 optimizer """adadelta""" +66 58 training_loop """owa""" +66 58 negative_sampler """basic""" +66 58 evaluator """rankbased""" +66 59 dataset """kinships""" +66 59 model """conve""" +66 59 loss """marginranking""" +66 59 regularizer """no""" +66 59 optimizer """adadelta""" +66 59 training_loop """owa""" +66 59 negative_sampler """basic""" +66 59 evaluator """rankbased""" +66 60 dataset """kinships""" +66 60 model """conve""" +66 60 loss """marginranking""" +66 60 regularizer """no""" +66 60 optimizer """adadelta""" +66 60 training_loop """owa""" +66 60 negative_sampler """basic""" +66 60 evaluator """rankbased""" +66 61 dataset """kinships""" +66 61 model """conve""" +66 61 loss """marginranking""" +66 61 regularizer """no""" +66 61 optimizer """adadelta""" +66 61 training_loop """owa""" +66 61 negative_sampler """basic""" +66 61 evaluator """rankbased""" +66 62 dataset """kinships""" +66 62 model """conve""" +66 62 loss """marginranking""" +66 62 regularizer """no""" +66 62 optimizer """adadelta""" +66 62 training_loop """owa""" +66 62 negative_sampler """basic""" +66 62 evaluator """rankbased""" +66 63 dataset """kinships""" +66 63 model """conve""" +66 63 loss """marginranking""" +66 63 regularizer """no""" +66 63 optimizer """adadelta""" +66 63 training_loop """owa""" +66 63 negative_sampler """basic""" +66 63 evaluator """rankbased""" +66 64 dataset """kinships""" +66 64 model """conve""" +66 64 loss """marginranking""" +66 64 regularizer """no""" +66 64 optimizer """adadelta""" +66 64 training_loop """owa""" +66 64 negative_sampler """basic""" +66 64 evaluator """rankbased""" +66 65 dataset """kinships""" +66 65 model """conve""" +66 65 loss """marginranking""" +66 65 regularizer """no""" +66 65 optimizer """adadelta""" +66 65 training_loop """owa""" +66 65 negative_sampler """basic""" +66 65 evaluator """rankbased""" +66 66 dataset """kinships""" +66 66 model """conve""" +66 66 loss """marginranking""" +66 66 regularizer """no""" +66 66 optimizer """adadelta""" +66 66 training_loop """owa""" +66 66 negative_sampler """basic""" +66 66 evaluator """rankbased""" +66 67 dataset """kinships""" +66 67 model """conve""" +66 67 loss """marginranking""" +66 67 regularizer """no""" +66 67 optimizer """adadelta""" +66 67 training_loop """owa""" +66 67 negative_sampler """basic""" +66 67 evaluator """rankbased""" +66 68 dataset """kinships""" +66 68 model """conve""" +66 68 loss """marginranking""" +66 68 regularizer """no""" +66 68 optimizer """adadelta""" +66 68 training_loop """owa""" +66 68 negative_sampler """basic""" +66 68 evaluator """rankbased""" +66 69 dataset """kinships""" +66 69 model """conve""" +66 69 loss """marginranking""" +66 69 regularizer """no""" +66 69 optimizer """adadelta""" +66 69 training_loop """owa""" +66 69 negative_sampler """basic""" +66 69 evaluator """rankbased""" +66 70 dataset """kinships""" +66 70 model """conve""" +66 70 loss """marginranking""" +66 70 regularizer """no""" +66 70 optimizer """adadelta""" +66 70 training_loop """owa""" +66 70 negative_sampler """basic""" +66 70 evaluator """rankbased""" +66 71 dataset """kinships""" +66 71 model """conve""" +66 71 loss """marginranking""" +66 71 regularizer """no""" +66 71 optimizer """adadelta""" +66 71 training_loop """owa""" +66 71 negative_sampler """basic""" +66 71 evaluator """rankbased""" +66 72 dataset """kinships""" +66 72 model """conve""" +66 72 loss """marginranking""" +66 72 regularizer """no""" +66 72 optimizer """adadelta""" +66 72 training_loop """owa""" +66 72 negative_sampler """basic""" +66 72 evaluator """rankbased""" +66 73 dataset """kinships""" +66 73 model """conve""" +66 73 loss """marginranking""" +66 73 regularizer """no""" +66 73 optimizer """adadelta""" +66 73 training_loop """owa""" +66 73 negative_sampler """basic""" +66 73 evaluator """rankbased""" +66 74 dataset """kinships""" +66 74 model """conve""" +66 74 loss """marginranking""" +66 74 regularizer """no""" +66 74 optimizer """adadelta""" +66 74 training_loop """owa""" +66 74 negative_sampler """basic""" +66 74 evaluator """rankbased""" +66 75 dataset """kinships""" +66 75 model """conve""" +66 75 loss """marginranking""" +66 75 regularizer """no""" +66 75 optimizer """adadelta""" +66 75 training_loop """owa""" +66 75 negative_sampler """basic""" +66 75 evaluator """rankbased""" +66 76 dataset """kinships""" +66 76 model """conve""" +66 76 loss """marginranking""" +66 76 regularizer """no""" +66 76 optimizer """adadelta""" +66 76 training_loop """owa""" +66 76 negative_sampler """basic""" +66 76 evaluator """rankbased""" +66 77 dataset """kinships""" +66 77 model """conve""" +66 77 loss """marginranking""" +66 77 regularizer """no""" +66 77 optimizer """adadelta""" +66 77 training_loop """owa""" +66 77 negative_sampler """basic""" +66 77 evaluator """rankbased""" +66 78 dataset """kinships""" +66 78 model """conve""" +66 78 loss """marginranking""" +66 78 regularizer """no""" +66 78 optimizer """adadelta""" +66 78 training_loop """owa""" +66 78 negative_sampler """basic""" +66 78 evaluator """rankbased""" +66 79 dataset """kinships""" +66 79 model """conve""" +66 79 loss """marginranking""" +66 79 regularizer """no""" +66 79 optimizer """adadelta""" +66 79 training_loop """owa""" +66 79 negative_sampler """basic""" +66 79 evaluator """rankbased""" +66 80 dataset """kinships""" +66 80 model """conve""" +66 80 loss """marginranking""" +66 80 regularizer """no""" +66 80 optimizer """adadelta""" +66 80 training_loop """owa""" +66 80 negative_sampler """basic""" +66 80 evaluator """rankbased""" +66 81 dataset """kinships""" +66 81 model """conve""" +66 81 loss """marginranking""" +66 81 regularizer """no""" +66 81 optimizer """adadelta""" +66 81 training_loop """owa""" +66 81 negative_sampler """basic""" +66 81 evaluator """rankbased""" +66 82 dataset """kinships""" +66 82 model """conve""" +66 82 loss """marginranking""" +66 82 regularizer """no""" +66 82 optimizer """adadelta""" +66 82 training_loop """owa""" +66 82 negative_sampler """basic""" +66 82 evaluator """rankbased""" +66 83 dataset """kinships""" +66 83 model """conve""" +66 83 loss """marginranking""" +66 83 regularizer """no""" +66 83 optimizer """adadelta""" +66 83 training_loop """owa""" +66 83 negative_sampler """basic""" +66 83 evaluator """rankbased""" +66 84 dataset """kinships""" +66 84 model """conve""" +66 84 loss """marginranking""" +66 84 regularizer """no""" +66 84 optimizer """adadelta""" +66 84 training_loop """owa""" +66 84 negative_sampler """basic""" +66 84 evaluator """rankbased""" +66 85 dataset """kinships""" +66 85 model """conve""" +66 85 loss """marginranking""" +66 85 regularizer """no""" +66 85 optimizer """adadelta""" +66 85 training_loop """owa""" +66 85 negative_sampler """basic""" +66 85 evaluator """rankbased""" +66 86 dataset """kinships""" +66 86 model """conve""" +66 86 loss """marginranking""" +66 86 regularizer """no""" +66 86 optimizer """adadelta""" +66 86 training_loop """owa""" +66 86 negative_sampler """basic""" +66 86 evaluator """rankbased""" +66 87 dataset """kinships""" +66 87 model """conve""" +66 87 loss """marginranking""" +66 87 regularizer """no""" +66 87 optimizer """adadelta""" +66 87 training_loop """owa""" +66 87 negative_sampler """basic""" +66 87 evaluator """rankbased""" +66 88 dataset """kinships""" +66 88 model """conve""" +66 88 loss """marginranking""" +66 88 regularizer """no""" +66 88 optimizer """adadelta""" +66 88 training_loop """owa""" +66 88 negative_sampler """basic""" +66 88 evaluator """rankbased""" +66 89 dataset """kinships""" +66 89 model """conve""" +66 89 loss """marginranking""" +66 89 regularizer """no""" +66 89 optimizer """adadelta""" +66 89 training_loop """owa""" +66 89 negative_sampler """basic""" +66 89 evaluator """rankbased""" +66 90 dataset """kinships""" +66 90 model """conve""" +66 90 loss """marginranking""" +66 90 regularizer """no""" +66 90 optimizer """adadelta""" +66 90 training_loop """owa""" +66 90 negative_sampler """basic""" +66 90 evaluator """rankbased""" +66 91 dataset """kinships""" +66 91 model """conve""" +66 91 loss """marginranking""" +66 91 regularizer """no""" +66 91 optimizer """adadelta""" +66 91 training_loop """owa""" +66 91 negative_sampler """basic""" +66 91 evaluator """rankbased""" +66 92 dataset """kinships""" +66 92 model """conve""" +66 92 loss """marginranking""" +66 92 regularizer """no""" +66 92 optimizer """adadelta""" +66 92 training_loop """owa""" +66 92 negative_sampler """basic""" +66 92 evaluator """rankbased""" +66 93 dataset """kinships""" +66 93 model """conve""" +66 93 loss """marginranking""" +66 93 regularizer """no""" +66 93 optimizer """adadelta""" +66 93 training_loop """owa""" +66 93 negative_sampler """basic""" +66 93 evaluator """rankbased""" +66 94 dataset """kinships""" +66 94 model """conve""" +66 94 loss """marginranking""" +66 94 regularizer """no""" +66 94 optimizer """adadelta""" +66 94 training_loop """owa""" +66 94 negative_sampler """basic""" +66 94 evaluator """rankbased""" +66 95 dataset """kinships""" +66 95 model """conve""" +66 95 loss """marginranking""" +66 95 regularizer """no""" +66 95 optimizer """adadelta""" +66 95 training_loop """owa""" +66 95 negative_sampler """basic""" +66 95 evaluator """rankbased""" +66 96 dataset """kinships""" +66 96 model """conve""" +66 96 loss """marginranking""" +66 96 regularizer """no""" +66 96 optimizer """adadelta""" +66 96 training_loop """owa""" +66 96 negative_sampler """basic""" +66 96 evaluator """rankbased""" +66 97 dataset """kinships""" +66 97 model """conve""" +66 97 loss """marginranking""" +66 97 regularizer """no""" +66 97 optimizer """adadelta""" +66 97 training_loop """owa""" +66 97 negative_sampler """basic""" +66 97 evaluator """rankbased""" +66 98 dataset """kinships""" +66 98 model """conve""" +66 98 loss """marginranking""" +66 98 regularizer """no""" +66 98 optimizer """adadelta""" +66 98 training_loop """owa""" +66 98 negative_sampler """basic""" +66 98 evaluator """rankbased""" +66 99 dataset """kinships""" +66 99 model """conve""" +66 99 loss """marginranking""" +66 99 regularizer """no""" +66 99 optimizer """adadelta""" +66 99 training_loop """owa""" +66 99 negative_sampler """basic""" +66 99 evaluator """rankbased""" +66 100 dataset """kinships""" +66 100 model """conve""" +66 100 loss """marginranking""" +66 100 regularizer """no""" +66 100 optimizer """adadelta""" +66 100 training_loop """owa""" +66 100 negative_sampler """basic""" +66 100 evaluator """rankbased""" +67 1 model.output_channels 43.0 +67 1 model.input_dropout 0.3551672421600843 +67 1 model.output_dropout 0.17055188393982718 +67 1 model.feature_map_dropout 0.4947964504357717 +67 1 model.embedding_dim 2.0 +67 1 loss.margin 4.409254626293949 +67 1 negative_sampler.num_negs_per_pos 29.0 +67 1 training.batch_size 0.0 +67 2 model.output_channels 47.0 +67 2 model.input_dropout 0.3706980972216934 +67 2 model.output_dropout 0.16815317255172613 +67 2 model.feature_map_dropout 0.3875450724375436 +67 2 model.embedding_dim 1.0 +67 2 loss.margin 1.7761859315904558 +67 2 negative_sampler.num_negs_per_pos 82.0 +67 2 training.batch_size 0.0 +67 3 model.output_channels 26.0 +67 3 model.input_dropout 0.14462942356073227 +67 3 model.output_dropout 0.147427384063417 +67 3 model.feature_map_dropout 0.48455045065945374 +67 3 model.embedding_dim 0.0 +67 3 loss.margin 2.2599409575662484 +67 3 negative_sampler.num_negs_per_pos 4.0 +67 3 training.batch_size 2.0 +67 4 model.output_channels 28.0 +67 4 model.input_dropout 0.420985689088979 +67 4 model.output_dropout 0.22650416333093493 +67 4 model.feature_map_dropout 0.34869849708026307 +67 4 model.embedding_dim 2.0 +67 4 loss.margin 9.37251607043411 +67 4 negative_sampler.num_negs_per_pos 7.0 +67 4 training.batch_size 2.0 +67 5 model.output_channels 27.0 +67 5 model.input_dropout 0.45697709237007694 +67 5 model.output_dropout 0.36943751604734837 +67 5 model.feature_map_dropout 0.28655104636071893 +67 5 model.embedding_dim 0.0 +67 5 loss.margin 9.594187304246324 +67 5 negative_sampler.num_negs_per_pos 68.0 +67 5 training.batch_size 0.0 +67 6 model.output_channels 26.0 +67 6 model.input_dropout 0.3519562954113931 +67 6 model.output_dropout 0.05205303360903868 +67 6 model.feature_map_dropout 0.18316683701450498 +67 6 model.embedding_dim 2.0 +67 6 loss.margin 1.5231698118561963 +67 6 negative_sampler.num_negs_per_pos 88.0 +67 6 training.batch_size 0.0 +67 7 model.output_channels 62.0 +67 7 model.input_dropout 0.3377946795013598 +67 7 model.output_dropout 0.4218685889232608 +67 7 model.feature_map_dropout 0.41909119443425463 +67 7 model.embedding_dim 2.0 +67 7 loss.margin 5.240676896366024 +67 7 negative_sampler.num_negs_per_pos 72.0 +67 7 training.batch_size 2.0 +67 8 model.output_channels 33.0 +67 8 model.input_dropout 0.2992094893985132 +67 8 model.output_dropout 0.09470821505136473 +67 8 model.feature_map_dropout 0.3211256570260528 +67 8 model.embedding_dim 0.0 +67 8 loss.margin 6.046919629607022 +67 8 negative_sampler.num_negs_per_pos 96.0 +67 8 training.batch_size 2.0 +67 9 model.output_channels 48.0 +67 9 model.input_dropout 0.09574388221200741 +67 9 model.output_dropout 0.4525584149557056 +67 9 model.feature_map_dropout 0.21640415503579535 +67 9 model.embedding_dim 0.0 +67 9 loss.margin 4.431058956551565 +67 9 negative_sampler.num_negs_per_pos 60.0 +67 9 training.batch_size 2.0 +67 10 model.output_channels 36.0 +67 10 model.input_dropout 0.11875330436060333 +67 10 model.output_dropout 0.29166569525558467 +67 10 model.feature_map_dropout 0.3517703853199422 +67 10 model.embedding_dim 1.0 +67 10 loss.margin 1.970037262053332 +67 10 negative_sampler.num_negs_per_pos 42.0 +67 10 training.batch_size 0.0 +67 11 model.output_channels 57.0 +67 11 model.input_dropout 0.04179712310590439 +67 11 model.output_dropout 0.09027206343113647 +67 11 model.feature_map_dropout 0.13754263514651277 +67 11 model.embedding_dim 1.0 +67 11 loss.margin 6.419092243948277 +67 11 negative_sampler.num_negs_per_pos 57.0 +67 11 training.batch_size 0.0 +67 12 model.output_channels 35.0 +67 12 model.input_dropout 0.4609377451531205 +67 12 model.output_dropout 0.21551248861686728 +67 12 model.feature_map_dropout 0.24167792115994557 +67 12 model.embedding_dim 0.0 +67 12 loss.margin 6.540974505181787 +67 12 negative_sampler.num_negs_per_pos 59.0 +67 12 training.batch_size 2.0 +67 13 model.output_channels 31.0 +67 13 model.input_dropout 0.29419906735740364 +67 13 model.output_dropout 0.08286065713801172 +67 13 model.feature_map_dropout 0.4624010594583305 +67 13 model.embedding_dim 0.0 +67 13 loss.margin 2.114280146325764 +67 13 negative_sampler.num_negs_per_pos 11.0 +67 13 training.batch_size 1.0 +67 14 model.output_channels 64.0 +67 14 model.input_dropout 0.017803070435744694 +67 14 model.output_dropout 0.1634543697002378 +67 14 model.feature_map_dropout 0.004302624855168191 +67 14 model.embedding_dim 2.0 +67 14 loss.margin 4.020930931298401 +67 14 negative_sampler.num_negs_per_pos 46.0 +67 14 training.batch_size 1.0 +67 15 model.output_channels 61.0 +67 15 model.input_dropout 0.46522691469045413 +67 15 model.output_dropout 0.3573308871640801 +67 15 model.feature_map_dropout 0.09458057174691303 +67 15 model.embedding_dim 2.0 +67 15 loss.margin 2.0538755394098955 +67 15 negative_sampler.num_negs_per_pos 15.0 +67 15 training.batch_size 2.0 +67 16 model.output_channels 63.0 +67 16 model.input_dropout 0.017670136132765468 +67 16 model.output_dropout 0.28601649159964765 +67 16 model.feature_map_dropout 0.24298927215809452 +67 16 model.embedding_dim 2.0 +67 16 loss.margin 2.8076413006504426 +67 16 negative_sampler.num_negs_per_pos 88.0 +67 16 training.batch_size 1.0 +67 17 model.output_channels 44.0 +67 17 model.input_dropout 0.20743283824305708 +67 17 model.output_dropout 0.29506272325274724 +67 17 model.feature_map_dropout 0.3666422711388801 +67 17 model.embedding_dim 0.0 +67 17 loss.margin 8.043783442523125 +67 17 negative_sampler.num_negs_per_pos 80.0 +67 17 training.batch_size 0.0 +67 18 model.output_channels 59.0 +67 18 model.input_dropout 0.06939087488759055 +67 18 model.output_dropout 0.4068540599979482 +67 18 model.feature_map_dropout 0.4073298521961966 +67 18 model.embedding_dim 0.0 +67 18 loss.margin 5.791141089157686 +67 18 negative_sampler.num_negs_per_pos 68.0 +67 18 training.batch_size 0.0 +67 19 model.output_channels 19.0 +67 19 model.input_dropout 0.0077549820419803295 +67 19 model.output_dropout 0.2498514776779439 +67 19 model.feature_map_dropout 0.17924103701437138 +67 19 model.embedding_dim 0.0 +67 19 loss.margin 4.15746691643476 +67 19 negative_sampler.num_negs_per_pos 64.0 +67 19 training.batch_size 1.0 +67 20 model.output_channels 20.0 +67 20 model.input_dropout 0.45614761200236614 +67 20 model.output_dropout 0.035446985160346445 +67 20 model.feature_map_dropout 0.20930396227569537 +67 20 model.embedding_dim 0.0 +67 20 loss.margin 4.781511266358426 +67 20 negative_sampler.num_negs_per_pos 60.0 +67 20 training.batch_size 0.0 +67 21 model.output_channels 34.0 +67 21 model.input_dropout 0.46681464562770203 +67 21 model.output_dropout 0.30692174707503933 +67 21 model.feature_map_dropout 0.05083090337492796 +67 21 model.embedding_dim 2.0 +67 21 loss.margin 6.163291344708483 +67 21 negative_sampler.num_negs_per_pos 81.0 +67 21 training.batch_size 0.0 +67 22 model.output_channels 60.0 +67 22 model.input_dropout 0.0914351818070605 +67 22 model.output_dropout 0.41942867524464966 +67 22 model.feature_map_dropout 0.035265347833590144 +67 22 model.embedding_dim 1.0 +67 22 loss.margin 5.6234646054555695 +67 22 negative_sampler.num_negs_per_pos 92.0 +67 22 training.batch_size 0.0 +67 23 model.output_channels 51.0 +67 23 model.input_dropout 0.28719690570870227 +67 23 model.output_dropout 0.49086346123594915 +67 23 model.feature_map_dropout 0.2370317612041447 +67 23 model.embedding_dim 0.0 +67 23 loss.margin 8.109542823527299 +67 23 negative_sampler.num_negs_per_pos 63.0 +67 23 training.batch_size 1.0 +67 24 model.output_channels 50.0 +67 24 model.input_dropout 0.3470399911327007 +67 24 model.output_dropout 0.10257029270135748 +67 24 model.feature_map_dropout 0.2957795173501878 +67 24 model.embedding_dim 0.0 +67 24 loss.margin 7.500514413091126 +67 24 negative_sampler.num_negs_per_pos 73.0 +67 24 training.batch_size 0.0 +67 25 model.output_channels 40.0 +67 25 model.input_dropout 0.07385674636238088 +67 25 model.output_dropout 0.0648069222421313 +67 25 model.feature_map_dropout 0.06493588258290467 +67 25 model.embedding_dim 2.0 +67 25 loss.margin 7.197881819747938 +67 25 negative_sampler.num_negs_per_pos 85.0 +67 25 training.batch_size 2.0 +67 26 model.output_channels 48.0 +67 26 model.input_dropout 0.28762616807248526 +67 26 model.output_dropout 0.06089989234100046 +67 26 model.feature_map_dropout 0.24681737831108663 +67 26 model.embedding_dim 2.0 +67 26 loss.margin 8.317164393446156 +67 26 negative_sampler.num_negs_per_pos 49.0 +67 26 training.batch_size 1.0 +67 27 model.output_channels 58.0 +67 27 model.input_dropout 0.22291624623013873 +67 27 model.output_dropout 0.38739680361509254 +67 27 model.feature_map_dropout 0.36388022451304325 +67 27 model.embedding_dim 2.0 +67 27 loss.margin 4.203039803024712 +67 27 negative_sampler.num_negs_per_pos 63.0 +67 27 training.batch_size 2.0 +67 28 model.output_channels 49.0 +67 28 model.input_dropout 0.30433859817735615 +67 28 model.output_dropout 0.17679823817232204 +67 28 model.feature_map_dropout 0.4376906256428861 +67 28 model.embedding_dim 1.0 +67 28 loss.margin 2.6741459695279834 +67 28 negative_sampler.num_negs_per_pos 57.0 +67 28 training.batch_size 0.0 +67 29 model.output_channels 63.0 +67 29 model.input_dropout 0.019195665585955035 +67 29 model.output_dropout 0.3550863813411113 +67 29 model.feature_map_dropout 0.4628082800302221 +67 29 model.embedding_dim 1.0 +67 29 loss.margin 8.835729422676346 +67 29 negative_sampler.num_negs_per_pos 52.0 +67 29 training.batch_size 0.0 +67 30 model.output_channels 31.0 +67 30 model.input_dropout 0.2412498161152659 +67 30 model.output_dropout 0.03562575147264746 +67 30 model.feature_map_dropout 0.21745086550015202 +67 30 model.embedding_dim 1.0 +67 30 loss.margin 7.372990139139446 +67 30 negative_sampler.num_negs_per_pos 96.0 +67 30 training.batch_size 2.0 +67 31 model.output_channels 16.0 +67 31 model.input_dropout 0.4047729845370317 +67 31 model.output_dropout 0.4602423327340379 +67 31 model.feature_map_dropout 0.1277405949229165 +67 31 model.embedding_dim 2.0 +67 31 loss.margin 3.193440522641439 +67 31 negative_sampler.num_negs_per_pos 56.0 +67 31 training.batch_size 0.0 +67 32 model.output_channels 46.0 +67 32 model.input_dropout 0.31059425062584756 +67 32 model.output_dropout 0.30608119327841804 +67 32 model.feature_map_dropout 0.22907370267351618 +67 32 model.embedding_dim 1.0 +67 32 loss.margin 3.119472807992363 +67 32 negative_sampler.num_negs_per_pos 21.0 +67 32 training.batch_size 2.0 +67 33 model.output_channels 35.0 +67 33 model.input_dropout 0.32668486167062716 +67 33 model.output_dropout 0.4952066984012473 +67 33 model.feature_map_dropout 0.07942596791019496 +67 33 model.embedding_dim 2.0 +67 33 loss.margin 8.969817680295677 +67 33 negative_sampler.num_negs_per_pos 56.0 +67 33 training.batch_size 0.0 +67 34 model.output_channels 50.0 +67 34 model.input_dropout 0.026224474954763433 +67 34 model.output_dropout 0.4720463597182593 +67 34 model.feature_map_dropout 0.06629672339425036 +67 34 model.embedding_dim 0.0 +67 34 loss.margin 4.481508240082707 +67 34 negative_sampler.num_negs_per_pos 39.0 +67 34 training.batch_size 0.0 +67 35 model.output_channels 32.0 +67 35 model.input_dropout 0.0155941262986945 +67 35 model.output_dropout 0.3449779405256125 +67 35 model.feature_map_dropout 0.20002995827965014 +67 35 model.embedding_dim 2.0 +67 35 loss.margin 2.918387520531641 +67 35 negative_sampler.num_negs_per_pos 5.0 +67 35 training.batch_size 0.0 +67 36 model.output_channels 38.0 +67 36 model.input_dropout 0.1194876131453762 +67 36 model.output_dropout 0.43932245482332105 +67 36 model.feature_map_dropout 0.47200408386709647 +67 36 model.embedding_dim 1.0 +67 36 loss.margin 8.855567172860441 +67 36 negative_sampler.num_negs_per_pos 79.0 +67 36 training.batch_size 1.0 +67 37 model.output_channels 50.0 +67 37 model.input_dropout 0.4577904016614836 +67 37 model.output_dropout 0.3669464349708523 +67 37 model.feature_map_dropout 0.20283685732380774 +67 37 model.embedding_dim 1.0 +67 37 loss.margin 7.3575924599094105 +67 37 negative_sampler.num_negs_per_pos 54.0 +67 37 training.batch_size 2.0 +67 38 model.output_channels 54.0 +67 38 model.input_dropout 0.031840056496358626 +67 38 model.output_dropout 0.10721674582754659 +67 38 model.feature_map_dropout 0.0673721534575979 +67 38 model.embedding_dim 2.0 +67 38 loss.margin 4.159342614541819 +67 38 negative_sampler.num_negs_per_pos 52.0 +67 38 training.batch_size 0.0 +67 39 model.output_channels 16.0 +67 39 model.input_dropout 0.49105459842783855 +67 39 model.output_dropout 0.4956308962292718 +67 39 model.feature_map_dropout 0.018743296966969414 +67 39 model.embedding_dim 1.0 +67 39 loss.margin 2.8124661643299222 +67 39 negative_sampler.num_negs_per_pos 20.0 +67 39 training.batch_size 1.0 +67 40 model.output_channels 53.0 +67 40 model.input_dropout 0.2728976713278794 +67 40 model.output_dropout 0.2931149234445147 +67 40 model.feature_map_dropout 0.17627229663997157 +67 40 model.embedding_dim 0.0 +67 40 loss.margin 5.845445490810829 +67 40 negative_sampler.num_negs_per_pos 26.0 +67 40 training.batch_size 1.0 +67 41 model.output_channels 59.0 +67 41 model.input_dropout 0.4658535434302121 +67 41 model.output_dropout 0.2769364616866137 +67 41 model.feature_map_dropout 0.03895491987999489 +67 41 model.embedding_dim 2.0 +67 41 loss.margin 1.8799360989861849 +67 41 negative_sampler.num_negs_per_pos 92.0 +67 41 training.batch_size 1.0 +67 42 model.output_channels 25.0 +67 42 model.input_dropout 0.09237230592501255 +67 42 model.output_dropout 0.18656721619320454 +67 42 model.feature_map_dropout 0.01894804713381326 +67 42 model.embedding_dim 2.0 +67 42 loss.margin 7.565934710570429 +67 42 negative_sampler.num_negs_per_pos 57.0 +67 42 training.batch_size 1.0 +67 43 model.output_channels 53.0 +67 43 model.input_dropout 0.03294254615861336 +67 43 model.output_dropout 0.25291328332163604 +67 43 model.feature_map_dropout 0.08448361602773524 +67 43 model.embedding_dim 0.0 +67 43 loss.margin 7.193416131651948 +67 43 negative_sampler.num_negs_per_pos 33.0 +67 43 training.batch_size 1.0 +67 44 model.output_channels 44.0 +67 44 model.input_dropout 0.35323611106712116 +67 44 model.output_dropout 0.4337861250231216 +67 44 model.feature_map_dropout 0.4587253571205746 +67 44 model.embedding_dim 0.0 +67 44 loss.margin 6.814224288926329 +67 44 negative_sampler.num_negs_per_pos 81.0 +67 44 training.batch_size 2.0 +67 45 model.output_channels 61.0 +67 45 model.input_dropout 0.23245130817892717 +67 45 model.output_dropout 0.10120821349479575 +67 45 model.feature_map_dropout 0.197192051757828 +67 45 model.embedding_dim 1.0 +67 45 loss.margin 9.316519494785153 +67 45 negative_sampler.num_negs_per_pos 98.0 +67 45 training.batch_size 2.0 +67 46 model.output_channels 62.0 +67 46 model.input_dropout 0.2003981538958849 +67 46 model.output_dropout 0.19776451478523854 +67 46 model.feature_map_dropout 0.09497468322309466 +67 46 model.embedding_dim 1.0 +67 46 loss.margin 3.265531041575384 +67 46 negative_sampler.num_negs_per_pos 87.0 +67 46 training.batch_size 0.0 +67 47 model.output_channels 61.0 +67 47 model.input_dropout 0.12305760140426453 +67 47 model.output_dropout 0.07250443879579155 +67 47 model.feature_map_dropout 0.4426187277714531 +67 47 model.embedding_dim 2.0 +67 47 loss.margin 2.4474859750815012 +67 47 negative_sampler.num_negs_per_pos 63.0 +67 47 training.batch_size 1.0 +67 48 model.output_channels 55.0 +67 48 model.input_dropout 0.17978049033678528 +67 48 model.output_dropout 0.2293075308828635 +67 48 model.feature_map_dropout 0.23281173966709667 +67 48 model.embedding_dim 0.0 +67 48 loss.margin 9.39233013114634 +67 48 negative_sampler.num_negs_per_pos 92.0 +67 48 training.batch_size 0.0 +67 49 model.output_channels 63.0 +67 49 model.input_dropout 0.2279097471499109 +67 49 model.output_dropout 0.43705904386657046 +67 49 model.feature_map_dropout 0.049907630825507976 +67 49 model.embedding_dim 0.0 +67 49 loss.margin 0.9361275067026882 +67 49 negative_sampler.num_negs_per_pos 33.0 +67 49 training.batch_size 2.0 +67 50 model.output_channels 49.0 +67 50 model.input_dropout 0.23829121988383994 +67 50 model.output_dropout 0.4410161893224236 +67 50 model.feature_map_dropout 0.4682972194361592 +67 50 model.embedding_dim 0.0 +67 50 loss.margin 6.7817384506917655 +67 50 negative_sampler.num_negs_per_pos 4.0 +67 50 training.batch_size 1.0 +67 51 model.output_channels 26.0 +67 51 model.input_dropout 0.2715950456429816 +67 51 model.output_dropout 0.19186117219571014 +67 51 model.feature_map_dropout 0.0328013573476893 +67 51 model.embedding_dim 1.0 +67 51 loss.margin 1.8757250794849427 +67 51 negative_sampler.num_negs_per_pos 48.0 +67 51 training.batch_size 0.0 +67 52 model.output_channels 47.0 +67 52 model.input_dropout 0.28606713890489316 +67 52 model.output_dropout 0.09205997030151297 +67 52 model.feature_map_dropout 0.2502891087358312 +67 52 model.embedding_dim 1.0 +67 52 loss.margin 4.2093752491767304 +67 52 negative_sampler.num_negs_per_pos 61.0 +67 52 training.batch_size 1.0 +67 53 model.output_channels 55.0 +67 53 model.input_dropout 0.24377695626150153 +67 53 model.output_dropout 0.17200598709437415 +67 53 model.feature_map_dropout 0.43383442876460016 +67 53 model.embedding_dim 1.0 +67 53 loss.margin 2.6752519518355395 +67 53 negative_sampler.num_negs_per_pos 14.0 +67 53 training.batch_size 0.0 +67 54 model.output_channels 49.0 +67 54 model.input_dropout 0.003523829307713555 +67 54 model.output_dropout 0.36019450260282326 +67 54 model.feature_map_dropout 0.014537454495063051 +67 54 model.embedding_dim 0.0 +67 54 loss.margin 9.95612983556162 +67 54 negative_sampler.num_negs_per_pos 38.0 +67 54 training.batch_size 1.0 +67 55 model.output_channels 34.0 +67 55 model.input_dropout 0.4032783425532583 +67 55 model.output_dropout 0.311512839985591 +67 55 model.feature_map_dropout 0.2262801401069665 +67 55 model.embedding_dim 0.0 +67 55 loss.margin 8.275466378753858 +67 55 negative_sampler.num_negs_per_pos 47.0 +67 55 training.batch_size 0.0 +67 56 model.output_channels 31.0 +67 56 model.input_dropout 0.08837202456266258 +67 56 model.output_dropout 0.14789691867414628 +67 56 model.feature_map_dropout 0.2500015153081839 +67 56 model.embedding_dim 2.0 +67 56 loss.margin 8.180118021527818 +67 56 negative_sampler.num_negs_per_pos 50.0 +67 56 training.batch_size 1.0 +67 57 model.output_channels 35.0 +67 57 model.input_dropout 0.3050785264355717 +67 57 model.output_dropout 0.14095840339965965 +67 57 model.feature_map_dropout 0.33127271528144175 +67 57 model.embedding_dim 2.0 +67 57 loss.margin 8.121284350883656 +67 57 negative_sampler.num_negs_per_pos 28.0 +67 57 training.batch_size 1.0 +67 58 model.output_channels 44.0 +67 58 model.input_dropout 0.15667390149747462 +67 58 model.output_dropout 0.08512460847051956 +67 58 model.feature_map_dropout 0.027211455430727782 +67 58 model.embedding_dim 1.0 +67 58 loss.margin 3.4872803138097552 +67 58 negative_sampler.num_negs_per_pos 54.0 +67 58 training.batch_size 2.0 +67 59 model.output_channels 27.0 +67 59 model.input_dropout 0.4373195844576745 +67 59 model.output_dropout 0.13553039796131716 +67 59 model.feature_map_dropout 0.3544329221532544 +67 59 model.embedding_dim 1.0 +67 59 loss.margin 8.897095580536439 +67 59 negative_sampler.num_negs_per_pos 38.0 +67 59 training.batch_size 2.0 +67 60 model.output_channels 46.0 +67 60 model.input_dropout 0.07489827552539957 +67 60 model.output_dropout 0.48539266922349195 +67 60 model.feature_map_dropout 0.43653729416747555 +67 60 model.embedding_dim 1.0 +67 60 loss.margin 7.571759582794285 +67 60 negative_sampler.num_negs_per_pos 8.0 +67 60 training.batch_size 1.0 +67 61 model.output_channels 26.0 +67 61 model.input_dropout 0.19287318796371278 +67 61 model.output_dropout 0.07155605648745694 +67 61 model.feature_map_dropout 0.3863335159619847 +67 61 model.embedding_dim 0.0 +67 61 loss.margin 0.6201137215602761 +67 61 negative_sampler.num_negs_per_pos 70.0 +67 61 training.batch_size 2.0 +67 62 model.output_channels 39.0 +67 62 model.input_dropout 0.28862500443237626 +67 62 model.output_dropout 0.2224324357298187 +67 62 model.feature_map_dropout 0.35521845443498645 +67 62 model.embedding_dim 2.0 +67 62 loss.margin 2.68784385121028 +67 62 negative_sampler.num_negs_per_pos 25.0 +67 62 training.batch_size 0.0 +67 63 model.output_channels 18.0 +67 63 model.input_dropout 0.43088214252580903 +67 63 model.output_dropout 0.2540265567371262 +67 63 model.feature_map_dropout 0.3200988209693111 +67 63 model.embedding_dim 0.0 +67 63 loss.margin 9.867428798039864 +67 63 negative_sampler.num_negs_per_pos 75.0 +67 63 training.batch_size 0.0 +67 64 model.output_channels 31.0 +67 64 model.input_dropout 0.40365778066025537 +67 64 model.output_dropout 0.36723253986121446 +67 64 model.feature_map_dropout 0.20186186099672232 +67 64 model.embedding_dim 2.0 +67 64 loss.margin 2.9970645358198897 +67 64 negative_sampler.num_negs_per_pos 0.0 +67 64 training.batch_size 1.0 +67 65 model.output_channels 29.0 +67 65 model.input_dropout 0.3148238321841803 +67 65 model.output_dropout 0.3862885165693694 +67 65 model.feature_map_dropout 0.4200603178655032 +67 65 model.embedding_dim 0.0 +67 65 loss.margin 3.969761348842875 +67 65 negative_sampler.num_negs_per_pos 96.0 +67 65 training.batch_size 1.0 +67 66 model.output_channels 42.0 +67 66 model.input_dropout 0.2209922452303092 +67 66 model.output_dropout 0.4948483265916204 +67 66 model.feature_map_dropout 0.2230995672953488 +67 66 model.embedding_dim 2.0 +67 66 loss.margin 8.213741093010068 +67 66 negative_sampler.num_negs_per_pos 74.0 +67 66 training.batch_size 0.0 +67 67 model.output_channels 62.0 +67 67 model.input_dropout 0.29509150778700405 +67 67 model.output_dropout 0.25881631076962935 +67 67 model.feature_map_dropout 0.3971410704987041 +67 67 model.embedding_dim 0.0 +67 67 loss.margin 1.260498988496411 +67 67 negative_sampler.num_negs_per_pos 77.0 +67 67 training.batch_size 1.0 +67 68 model.output_channels 20.0 +67 68 model.input_dropout 0.34884334901360703 +67 68 model.output_dropout 0.3775598039815988 +67 68 model.feature_map_dropout 0.11876360850980461 +67 68 model.embedding_dim 2.0 +67 68 loss.margin 9.12128154025963 +67 68 negative_sampler.num_negs_per_pos 70.0 +67 68 training.batch_size 1.0 +67 69 model.output_channels 59.0 +67 69 model.input_dropout 0.19492806087823505 +67 69 model.output_dropout 0.3638175220618303 +67 69 model.feature_map_dropout 0.02834058505141468 +67 69 model.embedding_dim 2.0 +67 69 loss.margin 7.648531892637903 +67 69 negative_sampler.num_negs_per_pos 85.0 +67 69 training.batch_size 1.0 +67 70 model.output_channels 19.0 +67 70 model.input_dropout 0.345604349072765 +67 70 model.output_dropout 0.4136392013875117 +67 70 model.feature_map_dropout 0.009779886479993394 +67 70 model.embedding_dim 0.0 +67 70 loss.margin 9.452165668299578 +67 70 negative_sampler.num_negs_per_pos 7.0 +67 70 training.batch_size 0.0 +67 71 model.output_channels 59.0 +67 71 model.input_dropout 0.3461887659093981 +67 71 model.output_dropout 0.3679164793165318 +67 71 model.feature_map_dropout 0.32712028748437133 +67 71 model.embedding_dim 2.0 +67 71 loss.margin 4.609025987307087 +67 71 negative_sampler.num_negs_per_pos 51.0 +67 71 training.batch_size 2.0 +67 72 model.output_channels 48.0 +67 72 model.input_dropout 0.19761182046555487 +67 72 model.output_dropout 0.45969636454694807 +67 72 model.feature_map_dropout 0.12961392147721085 +67 72 model.embedding_dim 1.0 +67 72 loss.margin 3.2260406300821503 +67 72 negative_sampler.num_negs_per_pos 69.0 +67 72 training.batch_size 2.0 +67 73 model.output_channels 43.0 +67 73 model.input_dropout 0.15940962698305444 +67 73 model.output_dropout 0.25558154146623757 +67 73 model.feature_map_dropout 0.2647676628302821 +67 73 model.embedding_dim 2.0 +67 73 loss.margin 8.714453281053915 +67 73 negative_sampler.num_negs_per_pos 96.0 +67 73 training.batch_size 2.0 +67 74 model.output_channels 64.0 +67 74 model.input_dropout 0.037574753330037014 +67 74 model.output_dropout 0.19824358969349687 +67 74 model.feature_map_dropout 0.32646310132018097 +67 74 model.embedding_dim 1.0 +67 74 loss.margin 2.4818818733970516 +67 74 negative_sampler.num_negs_per_pos 3.0 +67 74 training.batch_size 2.0 +67 75 model.output_channels 55.0 +67 75 model.input_dropout 0.1297991007555237 +67 75 model.output_dropout 0.3756702186647862 +67 75 model.feature_map_dropout 0.11647568697164995 +67 75 model.embedding_dim 1.0 +67 75 loss.margin 1.5841708243625352 +67 75 negative_sampler.num_negs_per_pos 26.0 +67 75 training.batch_size 2.0 +67 76 model.output_channels 36.0 +67 76 model.input_dropout 0.027395088692762304 +67 76 model.output_dropout 0.4167317689458142 +67 76 model.feature_map_dropout 0.46278894287466965 +67 76 model.embedding_dim 1.0 +67 76 loss.margin 7.19174771304848 +67 76 negative_sampler.num_negs_per_pos 76.0 +67 76 training.batch_size 1.0 +67 77 model.output_channels 62.0 +67 77 model.input_dropout 0.11849446697580507 +67 77 model.output_dropout 0.1007864129173881 +67 77 model.feature_map_dropout 0.026117919632794084 +67 77 model.embedding_dim 1.0 +67 77 loss.margin 6.997092697158936 +67 77 negative_sampler.num_negs_per_pos 45.0 +67 77 training.batch_size 2.0 +67 78 model.output_channels 49.0 +67 78 model.input_dropout 0.25023179346018154 +67 78 model.output_dropout 0.33014194274529673 +67 78 model.feature_map_dropout 0.3691939260193418 +67 78 model.embedding_dim 1.0 +67 78 loss.margin 5.85532563534143 +67 78 negative_sampler.num_negs_per_pos 79.0 +67 78 training.batch_size 2.0 +67 79 model.output_channels 35.0 +67 79 model.input_dropout 0.01893864782439053 +67 79 model.output_dropout 0.21564711650437163 +67 79 model.feature_map_dropout 0.4218551895409218 +67 79 model.embedding_dim 0.0 +67 79 loss.margin 0.9698096393703344 +67 79 negative_sampler.num_negs_per_pos 51.0 +67 79 training.batch_size 2.0 +67 80 model.output_channels 54.0 +67 80 model.input_dropout 0.22406445362808397 +67 80 model.output_dropout 0.4548887980178987 +67 80 model.feature_map_dropout 0.22651135949270118 +67 80 model.embedding_dim 1.0 +67 80 loss.margin 2.947304791370492 +67 80 negative_sampler.num_negs_per_pos 36.0 +67 80 training.batch_size 2.0 +67 81 model.output_channels 62.0 +67 81 model.input_dropout 0.06362642230448567 +67 81 model.output_dropout 0.4315411653307001 +67 81 model.feature_map_dropout 0.21691731228450772 +67 81 model.embedding_dim 1.0 +67 81 loss.margin 8.74713039257595 +67 81 negative_sampler.num_negs_per_pos 17.0 +67 81 training.batch_size 1.0 +67 82 model.output_channels 54.0 +67 82 model.input_dropout 0.2663287439168225 +67 82 model.output_dropout 0.13042823875432485 +67 82 model.feature_map_dropout 0.11793024437138516 +67 82 model.embedding_dim 2.0 +67 82 loss.margin 9.018477180504437 +67 82 negative_sampler.num_negs_per_pos 19.0 +67 82 training.batch_size 0.0 +67 83 model.output_channels 22.0 +67 83 model.input_dropout 0.2699224553021262 +67 83 model.output_dropout 0.060025759479129415 +67 83 model.feature_map_dropout 0.4482428463344045 +67 83 model.embedding_dim 2.0 +67 83 loss.margin 2.937257127205625 +67 83 negative_sampler.num_negs_per_pos 88.0 +67 83 training.batch_size 2.0 +67 84 model.output_channels 41.0 +67 84 model.input_dropout 0.4389128151809103 +67 84 model.output_dropout 0.12236147244427237 +67 84 model.feature_map_dropout 0.006279522892281841 +67 84 model.embedding_dim 2.0 +67 84 loss.margin 5.1134189876049305 +67 84 negative_sampler.num_negs_per_pos 11.0 +67 84 training.batch_size 2.0 +67 85 model.output_channels 64.0 +67 85 model.input_dropout 0.37069843874469766 +67 85 model.output_dropout 0.41971025123661415 +67 85 model.feature_map_dropout 0.09062707593315172 +67 85 model.embedding_dim 2.0 +67 85 loss.margin 8.728408084665578 +67 85 negative_sampler.num_negs_per_pos 15.0 +67 85 training.batch_size 2.0 +67 86 model.output_channels 52.0 +67 86 model.input_dropout 0.4872904501114334 +67 86 model.output_dropout 0.4563549506397427 +67 86 model.feature_map_dropout 0.17717986526866586 +67 86 model.embedding_dim 1.0 +67 86 loss.margin 9.815795310169921 +67 86 negative_sampler.num_negs_per_pos 11.0 +67 86 training.batch_size 0.0 +67 87 model.output_channels 39.0 +67 87 model.input_dropout 0.1638163899042283 +67 87 model.output_dropout 0.07307014418097546 +67 87 model.feature_map_dropout 0.43047361585660854 +67 87 model.embedding_dim 1.0 +67 87 loss.margin 2.7082811819826365 +67 87 negative_sampler.num_negs_per_pos 55.0 +67 87 training.batch_size 2.0 +67 88 model.output_channels 28.0 +67 88 model.input_dropout 0.3120246438709717 +67 88 model.output_dropout 0.3972138582282098 +67 88 model.feature_map_dropout 0.42117227343451064 +67 88 model.embedding_dim 2.0 +67 88 loss.margin 3.492550247876728 +67 88 negative_sampler.num_negs_per_pos 78.0 +67 88 training.batch_size 2.0 +67 89 model.output_channels 21.0 +67 89 model.input_dropout 0.08577809593428859 +67 89 model.output_dropout 0.017977485839394514 +67 89 model.feature_map_dropout 0.22924976517170953 +67 89 model.embedding_dim 0.0 +67 89 loss.margin 5.701482767026676 +67 89 negative_sampler.num_negs_per_pos 52.0 +67 89 training.batch_size 1.0 +67 90 model.output_channels 56.0 +67 90 model.input_dropout 0.49395474977421533 +67 90 model.output_dropout 0.25698967550474383 +67 90 model.feature_map_dropout 0.015308916080114199 +67 90 model.embedding_dim 1.0 +67 90 loss.margin 2.890925363816324 +67 90 negative_sampler.num_negs_per_pos 79.0 +67 90 training.batch_size 2.0 +67 91 model.output_channels 58.0 +67 91 model.input_dropout 0.08097549535927967 +67 91 model.output_dropout 0.045577127548750374 +67 91 model.feature_map_dropout 0.31253337807952186 +67 91 model.embedding_dim 2.0 +67 91 loss.margin 8.350183125060077 +67 91 negative_sampler.num_negs_per_pos 2.0 +67 91 training.batch_size 2.0 +67 92 model.output_channels 60.0 +67 92 model.input_dropout 0.07687915389392946 +67 92 model.output_dropout 0.035900113500203557 +67 92 model.feature_map_dropout 0.18868915248419793 +67 92 model.embedding_dim 0.0 +67 92 loss.margin 4.497854807372576 +67 92 negative_sampler.num_negs_per_pos 55.0 +67 92 training.batch_size 0.0 +67 93 model.output_channels 19.0 +67 93 model.input_dropout 0.36836142822221457 +67 93 model.output_dropout 0.19512397140967913 +67 93 model.feature_map_dropout 0.16142177555084197 +67 93 model.embedding_dim 0.0 +67 93 loss.margin 0.7629930428748594 +67 93 negative_sampler.num_negs_per_pos 58.0 +67 93 training.batch_size 0.0 +67 94 model.output_channels 48.0 +67 94 model.input_dropout 0.48870017378536373 +67 94 model.output_dropout 0.03151056558543919 +67 94 model.feature_map_dropout 0.39244161306954245 +67 94 model.embedding_dim 2.0 +67 94 loss.margin 7.962007478247766 +67 94 negative_sampler.num_negs_per_pos 48.0 +67 94 training.batch_size 2.0 +67 95 model.output_channels 47.0 +67 95 model.input_dropout 0.3153597506274068 +67 95 model.output_dropout 0.06835041719773222 +67 95 model.feature_map_dropout 0.09002085814693173 +67 95 model.embedding_dim 0.0 +67 95 loss.margin 6.760886493058415 +67 95 negative_sampler.num_negs_per_pos 83.0 +67 95 training.batch_size 1.0 +67 96 model.output_channels 60.0 +67 96 model.input_dropout 0.23424543365445416 +67 96 model.output_dropout 0.23055575385767624 +67 96 model.feature_map_dropout 0.41660845840786676 +67 96 model.embedding_dim 2.0 +67 96 loss.margin 6.683999618518521 +67 96 negative_sampler.num_negs_per_pos 33.0 +67 96 training.batch_size 0.0 +67 97 model.output_channels 35.0 +67 97 model.input_dropout 0.16662551175118423 +67 97 model.output_dropout 0.37691140598169065 +67 97 model.feature_map_dropout 0.019261387803181984 +67 97 model.embedding_dim 2.0 +67 97 loss.margin 3.7158495986006685 +67 97 negative_sampler.num_negs_per_pos 70.0 +67 97 training.batch_size 1.0 +67 98 model.output_channels 60.0 +67 98 model.input_dropout 0.4243096937947771 +67 98 model.output_dropout 0.4134299537487595 +67 98 model.feature_map_dropout 0.33750746363253364 +67 98 model.embedding_dim 0.0 +67 98 loss.margin 4.414896183165149 +67 98 negative_sampler.num_negs_per_pos 92.0 +67 98 training.batch_size 0.0 +67 99 model.output_channels 39.0 +67 99 model.input_dropout 0.1380563289957487 +67 99 model.output_dropout 0.22824417330624508 +67 99 model.feature_map_dropout 0.26484415738867073 +67 99 model.embedding_dim 2.0 +67 99 loss.margin 0.7882696192197018 +67 99 negative_sampler.num_negs_per_pos 79.0 +67 99 training.batch_size 0.0 +67 100 model.output_channels 51.0 +67 100 model.input_dropout 0.29506003800899544 +67 100 model.output_dropout 0.2427942563063249 +67 100 model.feature_map_dropout 0.40320058064597525 +67 100 model.embedding_dim 2.0 +67 100 loss.margin 8.25163665768735 +67 100 negative_sampler.num_negs_per_pos 22.0 +67 100 training.batch_size 2.0 +67 1 dataset """kinships""" +67 1 model """conve""" +67 1 loss """marginranking""" +67 1 regularizer """no""" +67 1 optimizer """adadelta""" +67 1 training_loop """owa""" +67 1 negative_sampler """basic""" +67 1 evaluator """rankbased""" +67 2 dataset """kinships""" +67 2 model """conve""" +67 2 loss """marginranking""" +67 2 regularizer """no""" +67 2 optimizer """adadelta""" +67 2 training_loop """owa""" +67 2 negative_sampler """basic""" +67 2 evaluator """rankbased""" +67 3 dataset """kinships""" +67 3 model """conve""" +67 3 loss """marginranking""" +67 3 regularizer """no""" +67 3 optimizer """adadelta""" +67 3 training_loop """owa""" +67 3 negative_sampler """basic""" +67 3 evaluator """rankbased""" +67 4 dataset """kinships""" +67 4 model """conve""" +67 4 loss """marginranking""" +67 4 regularizer """no""" +67 4 optimizer """adadelta""" +67 4 training_loop """owa""" +67 4 negative_sampler """basic""" +67 4 evaluator """rankbased""" +67 5 dataset """kinships""" +67 5 model """conve""" +67 5 loss """marginranking""" +67 5 regularizer """no""" +67 5 optimizer """adadelta""" +67 5 training_loop """owa""" +67 5 negative_sampler """basic""" +67 5 evaluator """rankbased""" +67 6 dataset """kinships""" +67 6 model """conve""" +67 6 loss """marginranking""" +67 6 regularizer """no""" +67 6 optimizer """adadelta""" +67 6 training_loop """owa""" +67 6 negative_sampler """basic""" +67 6 evaluator """rankbased""" +67 7 dataset """kinships""" +67 7 model """conve""" +67 7 loss """marginranking""" +67 7 regularizer """no""" +67 7 optimizer """adadelta""" +67 7 training_loop """owa""" +67 7 negative_sampler """basic""" +67 7 evaluator """rankbased""" +67 8 dataset """kinships""" +67 8 model """conve""" +67 8 loss """marginranking""" +67 8 regularizer """no""" +67 8 optimizer """adadelta""" +67 8 training_loop """owa""" +67 8 negative_sampler """basic""" +67 8 evaluator """rankbased""" +67 9 dataset """kinships""" +67 9 model """conve""" +67 9 loss """marginranking""" +67 9 regularizer """no""" +67 9 optimizer """adadelta""" +67 9 training_loop """owa""" +67 9 negative_sampler """basic""" +67 9 evaluator """rankbased""" +67 10 dataset """kinships""" +67 10 model """conve""" +67 10 loss """marginranking""" +67 10 regularizer """no""" +67 10 optimizer """adadelta""" +67 10 training_loop """owa""" +67 10 negative_sampler """basic""" +67 10 evaluator """rankbased""" +67 11 dataset """kinships""" +67 11 model """conve""" +67 11 loss """marginranking""" +67 11 regularizer """no""" +67 11 optimizer """adadelta""" +67 11 training_loop """owa""" +67 11 negative_sampler """basic""" +67 11 evaluator """rankbased""" +67 12 dataset """kinships""" +67 12 model """conve""" +67 12 loss """marginranking""" +67 12 regularizer """no""" +67 12 optimizer """adadelta""" +67 12 training_loop """owa""" +67 12 negative_sampler """basic""" +67 12 evaluator """rankbased""" +67 13 dataset """kinships""" +67 13 model """conve""" +67 13 loss """marginranking""" +67 13 regularizer """no""" +67 13 optimizer """adadelta""" +67 13 training_loop """owa""" +67 13 negative_sampler """basic""" +67 13 evaluator """rankbased""" +67 14 dataset """kinships""" +67 14 model """conve""" +67 14 loss """marginranking""" +67 14 regularizer """no""" +67 14 optimizer """adadelta""" +67 14 training_loop """owa""" +67 14 negative_sampler """basic""" +67 14 evaluator """rankbased""" +67 15 dataset """kinships""" +67 15 model """conve""" +67 15 loss """marginranking""" +67 15 regularizer """no""" +67 15 optimizer """adadelta""" +67 15 training_loop """owa""" +67 15 negative_sampler """basic""" +67 15 evaluator """rankbased""" +67 16 dataset """kinships""" +67 16 model """conve""" +67 16 loss """marginranking""" +67 16 regularizer """no""" +67 16 optimizer """adadelta""" +67 16 training_loop """owa""" +67 16 negative_sampler """basic""" +67 16 evaluator """rankbased""" +67 17 dataset """kinships""" +67 17 model """conve""" +67 17 loss """marginranking""" +67 17 regularizer """no""" +67 17 optimizer """adadelta""" +67 17 training_loop """owa""" +67 17 negative_sampler """basic""" +67 17 evaluator """rankbased""" +67 18 dataset """kinships""" +67 18 model """conve""" +67 18 loss """marginranking""" +67 18 regularizer """no""" +67 18 optimizer """adadelta""" +67 18 training_loop """owa""" +67 18 negative_sampler """basic""" +67 18 evaluator """rankbased""" +67 19 dataset """kinships""" +67 19 model """conve""" +67 19 loss """marginranking""" +67 19 regularizer """no""" +67 19 optimizer """adadelta""" +67 19 training_loop """owa""" +67 19 negative_sampler """basic""" +67 19 evaluator """rankbased""" +67 20 dataset """kinships""" +67 20 model """conve""" +67 20 loss """marginranking""" +67 20 regularizer """no""" +67 20 optimizer """adadelta""" +67 20 training_loop """owa""" +67 20 negative_sampler """basic""" +67 20 evaluator """rankbased""" +67 21 dataset """kinships""" +67 21 model """conve""" +67 21 loss """marginranking""" +67 21 regularizer """no""" +67 21 optimizer """adadelta""" +67 21 training_loop """owa""" +67 21 negative_sampler """basic""" +67 21 evaluator """rankbased""" +67 22 dataset """kinships""" +67 22 model """conve""" +67 22 loss """marginranking""" +67 22 regularizer """no""" +67 22 optimizer """adadelta""" +67 22 training_loop """owa""" +67 22 negative_sampler """basic""" +67 22 evaluator """rankbased""" +67 23 dataset """kinships""" +67 23 model """conve""" +67 23 loss """marginranking""" +67 23 regularizer """no""" +67 23 optimizer """adadelta""" +67 23 training_loop """owa""" +67 23 negative_sampler """basic""" +67 23 evaluator """rankbased""" +67 24 dataset """kinships""" +67 24 model """conve""" +67 24 loss """marginranking""" +67 24 regularizer """no""" +67 24 optimizer """adadelta""" +67 24 training_loop """owa""" +67 24 negative_sampler """basic""" +67 24 evaluator """rankbased""" +67 25 dataset """kinships""" +67 25 model """conve""" +67 25 loss """marginranking""" +67 25 regularizer """no""" +67 25 optimizer """adadelta""" +67 25 training_loop """owa""" +67 25 negative_sampler """basic""" +67 25 evaluator """rankbased""" +67 26 dataset """kinships""" +67 26 model """conve""" +67 26 loss """marginranking""" +67 26 regularizer """no""" +67 26 optimizer """adadelta""" +67 26 training_loop """owa""" +67 26 negative_sampler """basic""" +67 26 evaluator """rankbased""" +67 27 dataset """kinships""" +67 27 model """conve""" +67 27 loss """marginranking""" +67 27 regularizer """no""" +67 27 optimizer """adadelta""" +67 27 training_loop """owa""" +67 27 negative_sampler """basic""" +67 27 evaluator """rankbased""" +67 28 dataset """kinships""" +67 28 model """conve""" +67 28 loss """marginranking""" +67 28 regularizer """no""" +67 28 optimizer """adadelta""" +67 28 training_loop """owa""" +67 28 negative_sampler """basic""" +67 28 evaluator """rankbased""" +67 29 dataset """kinships""" +67 29 model """conve""" +67 29 loss """marginranking""" +67 29 regularizer """no""" +67 29 optimizer """adadelta""" +67 29 training_loop """owa""" +67 29 negative_sampler """basic""" +67 29 evaluator """rankbased""" +67 30 dataset """kinships""" +67 30 model """conve""" +67 30 loss """marginranking""" +67 30 regularizer """no""" +67 30 optimizer """adadelta""" +67 30 training_loop """owa""" +67 30 negative_sampler """basic""" +67 30 evaluator """rankbased""" +67 31 dataset """kinships""" +67 31 model """conve""" +67 31 loss """marginranking""" +67 31 regularizer """no""" +67 31 optimizer """adadelta""" +67 31 training_loop """owa""" +67 31 negative_sampler """basic""" +67 31 evaluator """rankbased""" +67 32 dataset """kinships""" +67 32 model """conve""" +67 32 loss """marginranking""" +67 32 regularizer """no""" +67 32 optimizer """adadelta""" +67 32 training_loop """owa""" +67 32 negative_sampler """basic""" +67 32 evaluator """rankbased""" +67 33 dataset """kinships""" +67 33 model """conve""" +67 33 loss """marginranking""" +67 33 regularizer """no""" +67 33 optimizer """adadelta""" +67 33 training_loop """owa""" +67 33 negative_sampler """basic""" +67 33 evaluator """rankbased""" +67 34 dataset """kinships""" +67 34 model """conve""" +67 34 loss """marginranking""" +67 34 regularizer """no""" +67 34 optimizer """adadelta""" +67 34 training_loop """owa""" +67 34 negative_sampler """basic""" +67 34 evaluator """rankbased""" +67 35 dataset """kinships""" +67 35 model """conve""" +67 35 loss """marginranking""" +67 35 regularizer """no""" +67 35 optimizer """adadelta""" +67 35 training_loop """owa""" +67 35 negative_sampler """basic""" +67 35 evaluator """rankbased""" +67 36 dataset """kinships""" +67 36 model """conve""" +67 36 loss """marginranking""" +67 36 regularizer """no""" +67 36 optimizer """adadelta""" +67 36 training_loop """owa""" +67 36 negative_sampler """basic""" +67 36 evaluator """rankbased""" +67 37 dataset """kinships""" +67 37 model """conve""" +67 37 loss """marginranking""" +67 37 regularizer """no""" +67 37 optimizer """adadelta""" +67 37 training_loop """owa""" +67 37 negative_sampler """basic""" +67 37 evaluator """rankbased""" +67 38 dataset """kinships""" +67 38 model """conve""" +67 38 loss """marginranking""" +67 38 regularizer """no""" +67 38 optimizer """adadelta""" +67 38 training_loop """owa""" +67 38 negative_sampler """basic""" +67 38 evaluator """rankbased""" +67 39 dataset """kinships""" +67 39 model """conve""" +67 39 loss """marginranking""" +67 39 regularizer """no""" +67 39 optimizer """adadelta""" +67 39 training_loop """owa""" +67 39 negative_sampler """basic""" +67 39 evaluator """rankbased""" +67 40 dataset """kinships""" +67 40 model """conve""" +67 40 loss """marginranking""" +67 40 regularizer """no""" +67 40 optimizer """adadelta""" +67 40 training_loop """owa""" +67 40 negative_sampler """basic""" +67 40 evaluator """rankbased""" +67 41 dataset """kinships""" +67 41 model """conve""" +67 41 loss """marginranking""" +67 41 regularizer """no""" +67 41 optimizer """adadelta""" +67 41 training_loop """owa""" +67 41 negative_sampler """basic""" +67 41 evaluator """rankbased""" +67 42 dataset """kinships""" +67 42 model """conve""" +67 42 loss """marginranking""" +67 42 regularizer """no""" +67 42 optimizer """adadelta""" +67 42 training_loop """owa""" +67 42 negative_sampler """basic""" +67 42 evaluator """rankbased""" +67 43 dataset """kinships""" +67 43 model """conve""" +67 43 loss """marginranking""" +67 43 regularizer """no""" +67 43 optimizer """adadelta""" +67 43 training_loop """owa""" +67 43 negative_sampler """basic""" +67 43 evaluator """rankbased""" +67 44 dataset """kinships""" +67 44 model """conve""" +67 44 loss """marginranking""" +67 44 regularizer """no""" +67 44 optimizer """adadelta""" +67 44 training_loop """owa""" +67 44 negative_sampler """basic""" +67 44 evaluator """rankbased""" +67 45 dataset """kinships""" +67 45 model """conve""" +67 45 loss """marginranking""" +67 45 regularizer """no""" +67 45 optimizer """adadelta""" +67 45 training_loop """owa""" +67 45 negative_sampler """basic""" +67 45 evaluator """rankbased""" +67 46 dataset """kinships""" +67 46 model """conve""" +67 46 loss """marginranking""" +67 46 regularizer """no""" +67 46 optimizer """adadelta""" +67 46 training_loop """owa""" +67 46 negative_sampler """basic""" +67 46 evaluator """rankbased""" +67 47 dataset """kinships""" +67 47 model """conve""" +67 47 loss """marginranking""" +67 47 regularizer """no""" +67 47 optimizer """adadelta""" +67 47 training_loop """owa""" +67 47 negative_sampler """basic""" +67 47 evaluator """rankbased""" +67 48 dataset """kinships""" +67 48 model """conve""" +67 48 loss """marginranking""" +67 48 regularizer """no""" +67 48 optimizer """adadelta""" +67 48 training_loop """owa""" +67 48 negative_sampler """basic""" +67 48 evaluator """rankbased""" +67 49 dataset """kinships""" +67 49 model """conve""" +67 49 loss """marginranking""" +67 49 regularizer """no""" +67 49 optimizer """adadelta""" +67 49 training_loop """owa""" +67 49 negative_sampler """basic""" +67 49 evaluator """rankbased""" +67 50 dataset """kinships""" +67 50 model """conve""" +67 50 loss """marginranking""" +67 50 regularizer """no""" +67 50 optimizer """adadelta""" +67 50 training_loop """owa""" +67 50 negative_sampler """basic""" +67 50 evaluator """rankbased""" +67 51 dataset """kinships""" +67 51 model """conve""" +67 51 loss """marginranking""" +67 51 regularizer """no""" +67 51 optimizer """adadelta""" +67 51 training_loop """owa""" +67 51 negative_sampler """basic""" +67 51 evaluator """rankbased""" +67 52 dataset """kinships""" +67 52 model """conve""" +67 52 loss """marginranking""" +67 52 regularizer """no""" +67 52 optimizer """adadelta""" +67 52 training_loop """owa""" +67 52 negative_sampler """basic""" +67 52 evaluator """rankbased""" +67 53 dataset """kinships""" +67 53 model """conve""" +67 53 loss """marginranking""" +67 53 regularizer """no""" +67 53 optimizer """adadelta""" +67 53 training_loop """owa""" +67 53 negative_sampler """basic""" +67 53 evaluator """rankbased""" +67 54 dataset """kinships""" +67 54 model """conve""" +67 54 loss """marginranking""" +67 54 regularizer """no""" +67 54 optimizer """adadelta""" +67 54 training_loop """owa""" +67 54 negative_sampler """basic""" +67 54 evaluator """rankbased""" +67 55 dataset """kinships""" +67 55 model """conve""" +67 55 loss """marginranking""" +67 55 regularizer """no""" +67 55 optimizer """adadelta""" +67 55 training_loop """owa""" +67 55 negative_sampler """basic""" +67 55 evaluator """rankbased""" +67 56 dataset """kinships""" +67 56 model """conve""" +67 56 loss """marginranking""" +67 56 regularizer """no""" +67 56 optimizer """adadelta""" +67 56 training_loop """owa""" +67 56 negative_sampler """basic""" +67 56 evaluator """rankbased""" +67 57 dataset """kinships""" +67 57 model """conve""" +67 57 loss """marginranking""" +67 57 regularizer """no""" +67 57 optimizer """adadelta""" +67 57 training_loop """owa""" +67 57 negative_sampler """basic""" +67 57 evaluator """rankbased""" +67 58 dataset """kinships""" +67 58 model """conve""" +67 58 loss """marginranking""" +67 58 regularizer """no""" +67 58 optimizer """adadelta""" +67 58 training_loop """owa""" +67 58 negative_sampler """basic""" +67 58 evaluator """rankbased""" +67 59 dataset """kinships""" +67 59 model """conve""" +67 59 loss """marginranking""" +67 59 regularizer """no""" +67 59 optimizer """adadelta""" +67 59 training_loop """owa""" +67 59 negative_sampler """basic""" +67 59 evaluator """rankbased""" +67 60 dataset """kinships""" +67 60 model """conve""" +67 60 loss """marginranking""" +67 60 regularizer """no""" +67 60 optimizer """adadelta""" +67 60 training_loop """owa""" +67 60 negative_sampler """basic""" +67 60 evaluator """rankbased""" +67 61 dataset """kinships""" +67 61 model """conve""" +67 61 loss """marginranking""" +67 61 regularizer """no""" +67 61 optimizer """adadelta""" +67 61 training_loop """owa""" +67 61 negative_sampler """basic""" +67 61 evaluator """rankbased""" +67 62 dataset """kinships""" +67 62 model """conve""" +67 62 loss """marginranking""" +67 62 regularizer """no""" +67 62 optimizer """adadelta""" +67 62 training_loop """owa""" +67 62 negative_sampler """basic""" +67 62 evaluator """rankbased""" +67 63 dataset """kinships""" +67 63 model """conve""" +67 63 loss """marginranking""" +67 63 regularizer """no""" +67 63 optimizer """adadelta""" +67 63 training_loop """owa""" +67 63 negative_sampler """basic""" +67 63 evaluator """rankbased""" +67 64 dataset """kinships""" +67 64 model """conve""" +67 64 loss """marginranking""" +67 64 regularizer """no""" +67 64 optimizer """adadelta""" +67 64 training_loop """owa""" +67 64 negative_sampler """basic""" +67 64 evaluator """rankbased""" +67 65 dataset """kinships""" +67 65 model """conve""" +67 65 loss """marginranking""" +67 65 regularizer """no""" +67 65 optimizer """adadelta""" +67 65 training_loop """owa""" +67 65 negative_sampler """basic""" +67 65 evaluator """rankbased""" +67 66 dataset """kinships""" +67 66 model """conve""" +67 66 loss """marginranking""" +67 66 regularizer """no""" +67 66 optimizer """adadelta""" +67 66 training_loop """owa""" +67 66 negative_sampler """basic""" +67 66 evaluator """rankbased""" +67 67 dataset """kinships""" +67 67 model """conve""" +67 67 loss """marginranking""" +67 67 regularizer """no""" +67 67 optimizer """adadelta""" +67 67 training_loop """owa""" +67 67 negative_sampler """basic""" +67 67 evaluator """rankbased""" +67 68 dataset """kinships""" +67 68 model """conve""" +67 68 loss """marginranking""" +67 68 regularizer """no""" +67 68 optimizer """adadelta""" +67 68 training_loop """owa""" +67 68 negative_sampler """basic""" +67 68 evaluator """rankbased""" +67 69 dataset """kinships""" +67 69 model """conve""" +67 69 loss """marginranking""" +67 69 regularizer """no""" +67 69 optimizer """adadelta""" +67 69 training_loop """owa""" +67 69 negative_sampler """basic""" +67 69 evaluator """rankbased""" +67 70 dataset """kinships""" +67 70 model """conve""" +67 70 loss """marginranking""" +67 70 regularizer """no""" +67 70 optimizer """adadelta""" +67 70 training_loop """owa""" +67 70 negative_sampler """basic""" +67 70 evaluator """rankbased""" +67 71 dataset """kinships""" +67 71 model """conve""" +67 71 loss """marginranking""" +67 71 regularizer """no""" +67 71 optimizer """adadelta""" +67 71 training_loop """owa""" +67 71 negative_sampler """basic""" +67 71 evaluator """rankbased""" +67 72 dataset """kinships""" +67 72 model """conve""" +67 72 loss """marginranking""" +67 72 regularizer """no""" +67 72 optimizer """adadelta""" +67 72 training_loop """owa""" +67 72 negative_sampler """basic""" +67 72 evaluator """rankbased""" +67 73 dataset """kinships""" +67 73 model """conve""" +67 73 loss """marginranking""" +67 73 regularizer """no""" +67 73 optimizer """adadelta""" +67 73 training_loop """owa""" +67 73 negative_sampler """basic""" +67 73 evaluator """rankbased""" +67 74 dataset """kinships""" +67 74 model """conve""" +67 74 loss """marginranking""" +67 74 regularizer """no""" +67 74 optimizer """adadelta""" +67 74 training_loop """owa""" +67 74 negative_sampler """basic""" +67 74 evaluator """rankbased""" +67 75 dataset """kinships""" +67 75 model """conve""" +67 75 loss """marginranking""" +67 75 regularizer """no""" +67 75 optimizer """adadelta""" +67 75 training_loop """owa""" +67 75 negative_sampler """basic""" +67 75 evaluator """rankbased""" +67 76 dataset """kinships""" +67 76 model """conve""" +67 76 loss """marginranking""" +67 76 regularizer """no""" +67 76 optimizer """adadelta""" +67 76 training_loop """owa""" +67 76 negative_sampler """basic""" +67 76 evaluator """rankbased""" +67 77 dataset """kinships""" +67 77 model """conve""" +67 77 loss """marginranking""" +67 77 regularizer """no""" +67 77 optimizer """adadelta""" +67 77 training_loop """owa""" +67 77 negative_sampler """basic""" +67 77 evaluator """rankbased""" +67 78 dataset """kinships""" +67 78 model """conve""" +67 78 loss """marginranking""" +67 78 regularizer """no""" +67 78 optimizer """adadelta""" +67 78 training_loop """owa""" +67 78 negative_sampler """basic""" +67 78 evaluator """rankbased""" +67 79 dataset """kinships""" +67 79 model """conve""" +67 79 loss """marginranking""" +67 79 regularizer """no""" +67 79 optimizer """adadelta""" +67 79 training_loop """owa""" +67 79 negative_sampler """basic""" +67 79 evaluator """rankbased""" +67 80 dataset """kinships""" +67 80 model """conve""" +67 80 loss """marginranking""" +67 80 regularizer """no""" +67 80 optimizer """adadelta""" +67 80 training_loop """owa""" +67 80 negative_sampler """basic""" +67 80 evaluator """rankbased""" +67 81 dataset """kinships""" +67 81 model """conve""" +67 81 loss """marginranking""" +67 81 regularizer """no""" +67 81 optimizer """adadelta""" +67 81 training_loop """owa""" +67 81 negative_sampler """basic""" +67 81 evaluator """rankbased""" +67 82 dataset """kinships""" +67 82 model """conve""" +67 82 loss """marginranking""" +67 82 regularizer """no""" +67 82 optimizer """adadelta""" +67 82 training_loop """owa""" +67 82 negative_sampler """basic""" +67 82 evaluator """rankbased""" +67 83 dataset """kinships""" +67 83 model """conve""" +67 83 loss """marginranking""" +67 83 regularizer """no""" +67 83 optimizer """adadelta""" +67 83 training_loop """owa""" +67 83 negative_sampler """basic""" +67 83 evaluator """rankbased""" +67 84 dataset """kinships""" +67 84 model """conve""" +67 84 loss """marginranking""" +67 84 regularizer """no""" +67 84 optimizer """adadelta""" +67 84 training_loop """owa""" +67 84 negative_sampler """basic""" +67 84 evaluator """rankbased""" +67 85 dataset """kinships""" +67 85 model """conve""" +67 85 loss """marginranking""" +67 85 regularizer """no""" +67 85 optimizer """adadelta""" +67 85 training_loop """owa""" +67 85 negative_sampler """basic""" +67 85 evaluator """rankbased""" +67 86 dataset """kinships""" +67 86 model """conve""" +67 86 loss """marginranking""" +67 86 regularizer """no""" +67 86 optimizer """adadelta""" +67 86 training_loop """owa""" +67 86 negative_sampler """basic""" +67 86 evaluator """rankbased""" +67 87 dataset """kinships""" +67 87 model """conve""" +67 87 loss """marginranking""" +67 87 regularizer """no""" +67 87 optimizer """adadelta""" +67 87 training_loop """owa""" +67 87 negative_sampler """basic""" +67 87 evaluator """rankbased""" +67 88 dataset """kinships""" +67 88 model """conve""" +67 88 loss """marginranking""" +67 88 regularizer """no""" +67 88 optimizer """adadelta""" +67 88 training_loop """owa""" +67 88 negative_sampler """basic""" +67 88 evaluator """rankbased""" +67 89 dataset """kinships""" +67 89 model """conve""" +67 89 loss """marginranking""" +67 89 regularizer """no""" +67 89 optimizer """adadelta""" +67 89 training_loop """owa""" +67 89 negative_sampler """basic""" +67 89 evaluator """rankbased""" +67 90 dataset """kinships""" +67 90 model """conve""" +67 90 loss """marginranking""" +67 90 regularizer """no""" +67 90 optimizer """adadelta""" +67 90 training_loop """owa""" +67 90 negative_sampler """basic""" +67 90 evaluator """rankbased""" +67 91 dataset """kinships""" +67 91 model """conve""" +67 91 loss """marginranking""" +67 91 regularizer """no""" +67 91 optimizer """adadelta""" +67 91 training_loop """owa""" +67 91 negative_sampler """basic""" +67 91 evaluator """rankbased""" +67 92 dataset """kinships""" +67 92 model """conve""" +67 92 loss """marginranking""" +67 92 regularizer """no""" +67 92 optimizer """adadelta""" +67 92 training_loop """owa""" +67 92 negative_sampler """basic""" +67 92 evaluator """rankbased""" +67 93 dataset """kinships""" +67 93 model """conve""" +67 93 loss """marginranking""" +67 93 regularizer """no""" +67 93 optimizer """adadelta""" +67 93 training_loop """owa""" +67 93 negative_sampler """basic""" +67 93 evaluator """rankbased""" +67 94 dataset """kinships""" +67 94 model """conve""" +67 94 loss """marginranking""" +67 94 regularizer """no""" +67 94 optimizer """adadelta""" +67 94 training_loop """owa""" +67 94 negative_sampler """basic""" +67 94 evaluator """rankbased""" +67 95 dataset """kinships""" +67 95 model """conve""" +67 95 loss """marginranking""" +67 95 regularizer """no""" +67 95 optimizer """adadelta""" +67 95 training_loop """owa""" +67 95 negative_sampler """basic""" +67 95 evaluator """rankbased""" +67 96 dataset """kinships""" +67 96 model """conve""" +67 96 loss """marginranking""" +67 96 regularizer """no""" +67 96 optimizer """adadelta""" +67 96 training_loop """owa""" +67 96 negative_sampler """basic""" +67 96 evaluator """rankbased""" +67 97 dataset """kinships""" +67 97 model """conve""" +67 97 loss """marginranking""" +67 97 regularizer """no""" +67 97 optimizer """adadelta""" +67 97 training_loop """owa""" +67 97 negative_sampler """basic""" +67 97 evaluator """rankbased""" +67 98 dataset """kinships""" +67 98 model """conve""" +67 98 loss """marginranking""" +67 98 regularizer """no""" +67 98 optimizer """adadelta""" +67 98 training_loop """owa""" +67 98 negative_sampler """basic""" +67 98 evaluator """rankbased""" +67 99 dataset """kinships""" +67 99 model """conve""" +67 99 loss """marginranking""" +67 99 regularizer """no""" +67 99 optimizer """adadelta""" +67 99 training_loop """owa""" +67 99 negative_sampler """basic""" +67 99 evaluator """rankbased""" +67 100 dataset """kinships""" +67 100 model """conve""" +67 100 loss """marginranking""" +67 100 regularizer """no""" +67 100 optimizer """adadelta""" +67 100 training_loop """owa""" +67 100 negative_sampler """basic""" +67 100 evaluator """rankbased""" +68 1 model.output_channels 43.0 +68 1 model.input_dropout 0.18732890485742315 +68 1 model.output_dropout 0.4567907618146445 +68 1 model.feature_map_dropout 0.28701755952927843 +68 1 model.embedding_dim 0.0 +68 1 loss.margin 18.531230593154213 +68 1 loss.adversarial_temperature 0.49660451956538354 +68 1 negative_sampler.num_negs_per_pos 29.0 +68 1 training.batch_size 1.0 +68 2 model.output_channels 36.0 +68 2 model.input_dropout 0.2571463540274683 +68 2 model.output_dropout 0.43692271115303183 +68 2 model.feature_map_dropout 0.18731908610679765 +68 2 model.embedding_dim 0.0 +68 2 loss.margin 4.796542446772368 +68 2 loss.adversarial_temperature 0.7482758847935556 +68 2 negative_sampler.num_negs_per_pos 41.0 +68 2 training.batch_size 0.0 +68 3 model.output_channels 53.0 +68 3 model.input_dropout 0.14645427619807327 +68 3 model.output_dropout 0.3656293303341433 +68 3 model.feature_map_dropout 0.12917055733402866 +68 3 model.embedding_dim 1.0 +68 3 loss.margin 10.475724034827069 +68 3 loss.adversarial_temperature 0.7454222624523223 +68 3 negative_sampler.num_negs_per_pos 61.0 +68 3 training.batch_size 1.0 +68 4 model.output_channels 55.0 +68 4 model.input_dropout 0.4726746977182685 +68 4 model.output_dropout 0.42528281761138687 +68 4 model.feature_map_dropout 0.09504424844177545 +68 4 model.embedding_dim 2.0 +68 4 loss.margin 6.6934812456851684 +68 4 loss.adversarial_temperature 0.2904434314828065 +68 4 negative_sampler.num_negs_per_pos 81.0 +68 4 training.batch_size 2.0 +68 5 model.output_channels 34.0 +68 5 model.input_dropout 0.14951571318417672 +68 5 model.output_dropout 0.26485271005505484 +68 5 model.feature_map_dropout 0.4751974014463836 +68 5 model.embedding_dim 1.0 +68 5 loss.margin 6.968189962059503 +68 5 loss.adversarial_temperature 0.21604926103606914 +68 5 negative_sampler.num_negs_per_pos 16.0 +68 5 training.batch_size 0.0 +68 6 model.output_channels 22.0 +68 6 model.input_dropout 0.2539071706475129 +68 6 model.output_dropout 0.018520232048881646 +68 6 model.feature_map_dropout 0.48150124334051664 +68 6 model.embedding_dim 1.0 +68 6 loss.margin 7.002193206042482 +68 6 loss.adversarial_temperature 0.9511270894654092 +68 6 negative_sampler.num_negs_per_pos 43.0 +68 6 training.batch_size 1.0 +68 7 model.output_channels 48.0 +68 7 model.input_dropout 0.2318775767074842 +68 7 model.output_dropout 0.14407163260692235 +68 7 model.feature_map_dropout 0.2851146251203341 +68 7 model.embedding_dim 1.0 +68 7 loss.margin 19.918104542791983 +68 7 loss.adversarial_temperature 0.1472814664432917 +68 7 negative_sampler.num_negs_per_pos 23.0 +68 7 training.batch_size 1.0 +68 8 model.output_channels 19.0 +68 8 model.input_dropout 0.3385877469756641 +68 8 model.output_dropout 0.4912478710811637 +68 8 model.feature_map_dropout 0.28620099213078753 +68 8 model.embedding_dim 1.0 +68 8 loss.margin 17.20927126689231 +68 8 loss.adversarial_temperature 0.16185108260004155 +68 8 negative_sampler.num_negs_per_pos 4.0 +68 8 training.batch_size 1.0 +68 9 model.output_channels 22.0 +68 9 model.input_dropout 0.1141944530286712 +68 9 model.output_dropout 0.11997519154348463 +68 9 model.feature_map_dropout 0.25509436965994914 +68 9 model.embedding_dim 1.0 +68 9 loss.margin 16.159380703184606 +68 9 loss.adversarial_temperature 0.8607263251722275 +68 9 negative_sampler.num_negs_per_pos 68.0 +68 9 training.batch_size 2.0 +68 10 model.output_channels 43.0 +68 10 model.input_dropout 0.030818230345106445 +68 10 model.output_dropout 0.14547043968730722 +68 10 model.feature_map_dropout 0.14836040936903694 +68 10 model.embedding_dim 2.0 +68 10 loss.margin 5.880536627819952 +68 10 loss.adversarial_temperature 0.20400315358683896 +68 10 negative_sampler.num_negs_per_pos 71.0 +68 10 training.batch_size 2.0 +68 11 model.output_channels 49.0 +68 11 model.input_dropout 0.44403694113921066 +68 11 model.output_dropout 0.4780134097872114 +68 11 model.feature_map_dropout 0.004781886833717941 +68 11 model.embedding_dim 0.0 +68 11 loss.margin 15.110092330098867 +68 11 loss.adversarial_temperature 0.20115183616765603 +68 11 negative_sampler.num_negs_per_pos 15.0 +68 11 training.batch_size 0.0 +68 12 model.output_channels 45.0 +68 12 model.input_dropout 0.48488791876920273 +68 12 model.output_dropout 0.29937809594806203 +68 12 model.feature_map_dropout 0.49948565674086925 +68 12 model.embedding_dim 0.0 +68 12 loss.margin 4.0224376784601885 +68 12 loss.adversarial_temperature 0.3649845627405628 +68 12 negative_sampler.num_negs_per_pos 93.0 +68 12 training.batch_size 1.0 +68 13 model.output_channels 59.0 +68 13 model.input_dropout 0.3666680764067073 +68 13 model.output_dropout 0.0530092097273514 +68 13 model.feature_map_dropout 0.2201320876523693 +68 13 model.embedding_dim 2.0 +68 13 loss.margin 7.147604020963247 +68 13 loss.adversarial_temperature 0.10716135076641067 +68 13 negative_sampler.num_negs_per_pos 39.0 +68 13 training.batch_size 0.0 +68 14 model.output_channels 58.0 +68 14 model.input_dropout 0.35521250108659697 +68 14 model.output_dropout 0.33814518375136365 +68 14 model.feature_map_dropout 0.03190308129019326 +68 14 model.embedding_dim 2.0 +68 14 loss.margin 13.310323688500189 +68 14 loss.adversarial_temperature 0.5895782642861894 +68 14 negative_sampler.num_negs_per_pos 42.0 +68 14 training.batch_size 1.0 +68 15 model.output_channels 38.0 +68 15 model.input_dropout 0.10813965866930181 +68 15 model.output_dropout 0.3442964456348627 +68 15 model.feature_map_dropout 0.06974381564170179 +68 15 model.embedding_dim 0.0 +68 15 loss.margin 1.9018120139611745 +68 15 loss.adversarial_temperature 0.6718398313816614 +68 15 negative_sampler.num_negs_per_pos 86.0 +68 15 training.batch_size 2.0 +68 16 model.output_channels 50.0 +68 16 model.input_dropout 0.33263359797948944 +68 16 model.output_dropout 0.45063008456199266 +68 16 model.feature_map_dropout 0.026030112817412354 +68 16 model.embedding_dim 0.0 +68 16 loss.margin 24.03769941755882 +68 16 loss.adversarial_temperature 0.3413122351066732 +68 16 negative_sampler.num_negs_per_pos 94.0 +68 16 training.batch_size 0.0 +68 17 model.output_channels 26.0 +68 17 model.input_dropout 0.2948159821449379 +68 17 model.output_dropout 0.25106489468391063 +68 17 model.feature_map_dropout 0.49051874338917356 +68 17 model.embedding_dim 0.0 +68 17 loss.margin 13.336113980015027 +68 17 loss.adversarial_temperature 0.6284848030640331 +68 17 negative_sampler.num_negs_per_pos 46.0 +68 17 training.batch_size 0.0 +68 18 model.output_channels 58.0 +68 18 model.input_dropout 0.09362267024736604 +68 18 model.output_dropout 0.21007463174951235 +68 18 model.feature_map_dropout 0.2322086726838013 +68 18 model.embedding_dim 2.0 +68 18 loss.margin 22.99371845638725 +68 18 loss.adversarial_temperature 0.9969494920234561 +68 18 negative_sampler.num_negs_per_pos 45.0 +68 18 training.batch_size 1.0 +68 19 model.output_channels 51.0 +68 19 model.input_dropout 0.4549503561580944 +68 19 model.output_dropout 0.2053688316656207 +68 19 model.feature_map_dropout 0.4024054529806868 +68 19 model.embedding_dim 0.0 +68 19 loss.margin 6.821363527850547 +68 19 loss.adversarial_temperature 0.6056647891819361 +68 19 negative_sampler.num_negs_per_pos 93.0 +68 19 training.batch_size 1.0 +68 20 model.output_channels 32.0 +68 20 model.input_dropout 0.43375883010147365 +68 20 model.output_dropout 0.16763392239645541 +68 20 model.feature_map_dropout 0.3411696647784516 +68 20 model.embedding_dim 1.0 +68 20 loss.margin 21.154875779132276 +68 20 loss.adversarial_temperature 0.6953523903345388 +68 20 negative_sampler.num_negs_per_pos 35.0 +68 20 training.batch_size 2.0 +68 21 model.output_channels 38.0 +68 21 model.input_dropout 0.1406844097319372 +68 21 model.output_dropout 0.29272161898453725 +68 21 model.feature_map_dropout 0.29185685039165893 +68 21 model.embedding_dim 0.0 +68 21 loss.margin 2.558271435536525 +68 21 loss.adversarial_temperature 0.7410460813168195 +68 21 negative_sampler.num_negs_per_pos 95.0 +68 21 training.batch_size 0.0 +68 22 model.output_channels 47.0 +68 22 model.input_dropout 0.26069900731499795 +68 22 model.output_dropout 0.19766562637326396 +68 22 model.feature_map_dropout 0.025590974902568064 +68 22 model.embedding_dim 0.0 +68 22 loss.margin 17.31417799160897 +68 22 loss.adversarial_temperature 0.6319860887009947 +68 22 negative_sampler.num_negs_per_pos 95.0 +68 22 training.batch_size 1.0 +68 23 model.output_channels 16.0 +68 23 model.input_dropout 0.049607020046113504 +68 23 model.output_dropout 0.34429258365991633 +68 23 model.feature_map_dropout 0.1574771362972781 +68 23 model.embedding_dim 0.0 +68 23 loss.margin 28.798202004542425 +68 23 loss.adversarial_temperature 0.6350409904277847 +68 23 negative_sampler.num_negs_per_pos 41.0 +68 23 training.batch_size 2.0 +68 24 model.output_channels 37.0 +68 24 model.input_dropout 0.05178706415139006 +68 24 model.output_dropout 0.2908408170176896 +68 24 model.feature_map_dropout 0.20171924394853458 +68 24 model.embedding_dim 2.0 +68 24 loss.margin 13.276375340651743 +68 24 loss.adversarial_temperature 0.29411233572497275 +68 24 negative_sampler.num_negs_per_pos 41.0 +68 24 training.batch_size 2.0 +68 25 model.output_channels 25.0 +68 25 model.input_dropout 0.35198438708490104 +68 25 model.output_dropout 0.37913446452338284 +68 25 model.feature_map_dropout 0.18566488645256773 +68 25 model.embedding_dim 2.0 +68 25 loss.margin 27.169864478425975 +68 25 loss.adversarial_temperature 0.5877636156065551 +68 25 negative_sampler.num_negs_per_pos 74.0 +68 25 training.batch_size 1.0 +68 26 model.output_channels 43.0 +68 26 model.input_dropout 0.1053794690928721 +68 26 model.output_dropout 0.28719956723465334 +68 26 model.feature_map_dropout 0.16664458226696527 +68 26 model.embedding_dim 0.0 +68 26 loss.margin 24.870184019880476 +68 26 loss.adversarial_temperature 0.3726040742705794 +68 26 negative_sampler.num_negs_per_pos 9.0 +68 26 training.batch_size 2.0 +68 27 model.output_channels 37.0 +68 27 model.input_dropout 0.1825790171946532 +68 27 model.output_dropout 0.10523012519796038 +68 27 model.feature_map_dropout 0.22653385803038179 +68 27 model.embedding_dim 0.0 +68 27 loss.margin 20.50732759371219 +68 27 loss.adversarial_temperature 0.7923965563358075 +68 27 negative_sampler.num_negs_per_pos 37.0 +68 27 training.batch_size 0.0 +68 28 model.output_channels 43.0 +68 28 model.input_dropout 0.22838578406610094 +68 28 model.output_dropout 0.451527705278099 +68 28 model.feature_map_dropout 0.04092775480679389 +68 28 model.embedding_dim 2.0 +68 28 loss.margin 1.4728428537017866 +68 28 loss.adversarial_temperature 0.7752305450350103 +68 28 negative_sampler.num_negs_per_pos 40.0 +68 28 training.batch_size 2.0 +68 29 model.output_channels 38.0 +68 29 model.input_dropout 0.2452009816114612 +68 29 model.output_dropout 0.3553720027136221 +68 29 model.feature_map_dropout 0.3464287171917795 +68 29 model.embedding_dim 0.0 +68 29 loss.margin 8.66860355058499 +68 29 loss.adversarial_temperature 0.5479360044195043 +68 29 negative_sampler.num_negs_per_pos 96.0 +68 29 training.batch_size 2.0 +68 30 model.output_channels 41.0 +68 30 model.input_dropout 0.025572506913437043 +68 30 model.output_dropout 0.1470225334711464 +68 30 model.feature_map_dropout 0.21996392180726482 +68 30 model.embedding_dim 1.0 +68 30 loss.margin 13.430033698180635 +68 30 loss.adversarial_temperature 0.23651810928281777 +68 30 negative_sampler.num_negs_per_pos 52.0 +68 30 training.batch_size 0.0 +68 31 model.output_channels 40.0 +68 31 model.input_dropout 0.15349514321429708 +68 31 model.output_dropout 0.08224694226733453 +68 31 model.feature_map_dropout 0.10076025913139908 +68 31 model.embedding_dim 2.0 +68 31 loss.margin 11.644258509850088 +68 31 loss.adversarial_temperature 0.8118093680371881 +68 31 negative_sampler.num_negs_per_pos 91.0 +68 31 training.batch_size 1.0 +68 32 model.output_channels 46.0 +68 32 model.input_dropout 0.43341168849215267 +68 32 model.output_dropout 0.09018785201478824 +68 32 model.feature_map_dropout 0.4191901165256532 +68 32 model.embedding_dim 0.0 +68 32 loss.margin 9.105224315474251 +68 32 loss.adversarial_temperature 0.9308256634832294 +68 32 negative_sampler.num_negs_per_pos 75.0 +68 32 training.batch_size 1.0 +68 33 model.output_channels 21.0 +68 33 model.input_dropout 0.4408104722160879 +68 33 model.output_dropout 0.46134899210896735 +68 33 model.feature_map_dropout 0.1553035043646998 +68 33 model.embedding_dim 1.0 +68 33 loss.margin 20.342620221168964 +68 33 loss.adversarial_temperature 0.1105142764584123 +68 33 negative_sampler.num_negs_per_pos 90.0 +68 33 training.batch_size 2.0 +68 34 model.output_channels 28.0 +68 34 model.input_dropout 0.22744831705985058 +68 34 model.output_dropout 0.09350227582759912 +68 34 model.feature_map_dropout 0.05509830340652577 +68 34 model.embedding_dim 0.0 +68 34 loss.margin 26.66690275976028 +68 34 loss.adversarial_temperature 0.8720273873597818 +68 34 negative_sampler.num_negs_per_pos 96.0 +68 34 training.batch_size 1.0 +68 35 model.output_channels 50.0 +68 35 model.input_dropout 0.05692499298766074 +68 35 model.output_dropout 0.3029621026553332 +68 35 model.feature_map_dropout 0.14824364984057736 +68 35 model.embedding_dim 1.0 +68 35 loss.margin 21.967135916061846 +68 35 loss.adversarial_temperature 0.35061798217253715 +68 35 negative_sampler.num_negs_per_pos 97.0 +68 35 training.batch_size 0.0 +68 36 model.output_channels 59.0 +68 36 model.input_dropout 0.36012974168365164 +68 36 model.output_dropout 0.4075218516086167 +68 36 model.feature_map_dropout 0.07085618071760974 +68 36 model.embedding_dim 2.0 +68 36 loss.margin 21.538845193427036 +68 36 loss.adversarial_temperature 0.9635377640358168 +68 36 negative_sampler.num_negs_per_pos 6.0 +68 36 training.batch_size 0.0 +68 37 model.output_channels 40.0 +68 37 model.input_dropout 0.2816985788681355 +68 37 model.output_dropout 0.32883950483447 +68 37 model.feature_map_dropout 0.12195356048416428 +68 37 model.embedding_dim 2.0 +68 37 loss.margin 29.394862439078704 +68 37 loss.adversarial_temperature 0.3303507282934746 +68 37 negative_sampler.num_negs_per_pos 44.0 +68 37 training.batch_size 1.0 +68 38 model.output_channels 45.0 +68 38 model.input_dropout 0.36553525268495873 +68 38 model.output_dropout 0.48876221542335363 +68 38 model.feature_map_dropout 0.07555268962579004 +68 38 model.embedding_dim 2.0 +68 38 loss.margin 13.691787296209062 +68 38 loss.adversarial_temperature 0.37843432941894684 +68 38 negative_sampler.num_negs_per_pos 10.0 +68 38 training.batch_size 2.0 +68 39 model.output_channels 51.0 +68 39 model.input_dropout 0.45899952357386076 +68 39 model.output_dropout 0.4210069463034899 +68 39 model.feature_map_dropout 0.06818273502838934 +68 39 model.embedding_dim 1.0 +68 39 loss.margin 26.15241256056326 +68 39 loss.adversarial_temperature 0.9750891635106979 +68 39 negative_sampler.num_negs_per_pos 77.0 +68 39 training.batch_size 1.0 +68 40 model.output_channels 41.0 +68 40 model.input_dropout 0.20091889707825206 +68 40 model.output_dropout 0.011915671225937396 +68 40 model.feature_map_dropout 0.050224960082425274 +68 40 model.embedding_dim 0.0 +68 40 loss.margin 9.34738000101677 +68 40 loss.adversarial_temperature 0.9882991428124228 +68 40 negative_sampler.num_negs_per_pos 28.0 +68 40 training.batch_size 1.0 +68 41 model.output_channels 52.0 +68 41 model.input_dropout 0.28952081863653795 +68 41 model.output_dropout 0.1044062165937088 +68 41 model.feature_map_dropout 0.10297098426489942 +68 41 model.embedding_dim 1.0 +68 41 loss.margin 18.282311814324697 +68 41 loss.adversarial_temperature 0.7694017802185411 +68 41 negative_sampler.num_negs_per_pos 0.0 +68 41 training.batch_size 1.0 +68 42 model.output_channels 43.0 +68 42 model.input_dropout 0.13765358016246343 +68 42 model.output_dropout 0.21353803159320212 +68 42 model.feature_map_dropout 0.27985494979745307 +68 42 model.embedding_dim 1.0 +68 42 loss.margin 27.172627814096657 +68 42 loss.adversarial_temperature 0.8001640101382624 +68 42 negative_sampler.num_negs_per_pos 90.0 +68 42 training.batch_size 1.0 +68 43 model.output_channels 64.0 +68 43 model.input_dropout 0.01760249743946385 +68 43 model.output_dropout 0.46939409943717847 +68 43 model.feature_map_dropout 0.3782090723959003 +68 43 model.embedding_dim 1.0 +68 43 loss.margin 18.694936417756054 +68 43 loss.adversarial_temperature 0.2879114855063354 +68 43 negative_sampler.num_negs_per_pos 39.0 +68 43 training.batch_size 2.0 +68 44 model.output_channels 36.0 +68 44 model.input_dropout 0.1569548965380385 +68 44 model.output_dropout 0.2505125339413425 +68 44 model.feature_map_dropout 0.03318796398733159 +68 44 model.embedding_dim 0.0 +68 44 loss.margin 15.56875712481215 +68 44 loss.adversarial_temperature 0.724227001748386 +68 44 negative_sampler.num_negs_per_pos 62.0 +68 44 training.batch_size 2.0 +68 45 model.output_channels 46.0 +68 45 model.input_dropout 0.4558834911771816 +68 45 model.output_dropout 0.2531060039850781 +68 45 model.feature_map_dropout 0.21108286748123212 +68 45 model.embedding_dim 2.0 +68 45 loss.margin 15.42045280691585 +68 45 loss.adversarial_temperature 0.3751685032117691 +68 45 negative_sampler.num_negs_per_pos 10.0 +68 45 training.batch_size 0.0 +68 46 model.output_channels 22.0 +68 46 model.input_dropout 0.2971123194167422 +68 46 model.output_dropout 0.40922769176197427 +68 46 model.feature_map_dropout 0.3029185956381054 +68 46 model.embedding_dim 1.0 +68 46 loss.margin 17.47755865459752 +68 46 loss.adversarial_temperature 0.5086108680201292 +68 46 negative_sampler.num_negs_per_pos 33.0 +68 46 training.batch_size 0.0 +68 47 model.output_channels 28.0 +68 47 model.input_dropout 0.10092828972621976 +68 47 model.output_dropout 0.22660356117892305 +68 47 model.feature_map_dropout 0.4388231159096841 +68 47 model.embedding_dim 1.0 +68 47 loss.margin 9.189302832510661 +68 47 loss.adversarial_temperature 0.37315940492157007 +68 47 negative_sampler.num_negs_per_pos 67.0 +68 47 training.batch_size 1.0 +68 48 model.output_channels 17.0 +68 48 model.input_dropout 0.3278029890739311 +68 48 model.output_dropout 0.1661229433041128 +68 48 model.feature_map_dropout 0.1236266047511288 +68 48 model.embedding_dim 0.0 +68 48 loss.margin 9.262303709796859 +68 48 loss.adversarial_temperature 0.9496993693632833 +68 48 negative_sampler.num_negs_per_pos 40.0 +68 48 training.batch_size 1.0 +68 49 model.output_channels 62.0 +68 49 model.input_dropout 0.12447306284221571 +68 49 model.output_dropout 0.48885442307163535 +68 49 model.feature_map_dropout 0.14288088405501592 +68 49 model.embedding_dim 2.0 +68 49 loss.margin 4.119366441729863 +68 49 loss.adversarial_temperature 0.8621561175360857 +68 49 negative_sampler.num_negs_per_pos 86.0 +68 49 training.batch_size 1.0 +68 50 model.output_channels 19.0 +68 50 model.input_dropout 0.02792104245842314 +68 50 model.output_dropout 0.1358200778670321 +68 50 model.feature_map_dropout 0.14381481158736958 +68 50 model.embedding_dim 1.0 +68 50 loss.margin 29.959748335839695 +68 50 loss.adversarial_temperature 0.7886091553703382 +68 50 negative_sampler.num_negs_per_pos 53.0 +68 50 training.batch_size 1.0 +68 51 model.output_channels 63.0 +68 51 model.input_dropout 0.3186478098300853 +68 51 model.output_dropout 0.16921533429682006 +68 51 model.feature_map_dropout 0.3346997757289493 +68 51 model.embedding_dim 2.0 +68 51 loss.margin 3.871137607059367 +68 51 loss.adversarial_temperature 0.7100397370627312 +68 51 negative_sampler.num_negs_per_pos 90.0 +68 51 training.batch_size 2.0 +68 52 model.output_channels 22.0 +68 52 model.input_dropout 0.08671118876391803 +68 52 model.output_dropout 0.12334404235567398 +68 52 model.feature_map_dropout 0.27879888220826593 +68 52 model.embedding_dim 0.0 +68 52 loss.margin 12.157877699556673 +68 52 loss.adversarial_temperature 0.8359917111341544 +68 52 negative_sampler.num_negs_per_pos 50.0 +68 52 training.batch_size 2.0 +68 53 model.output_channels 57.0 +68 53 model.input_dropout 0.2539512745847506 +68 53 model.output_dropout 0.47394611752512983 +68 53 model.feature_map_dropout 0.23827834126349673 +68 53 model.embedding_dim 1.0 +68 53 loss.margin 22.919871786621357 +68 53 loss.adversarial_temperature 0.9892238880630934 +68 53 negative_sampler.num_negs_per_pos 89.0 +68 53 training.batch_size 0.0 +68 54 model.output_channels 61.0 +68 54 model.input_dropout 0.3394765598064481 +68 54 model.output_dropout 0.42754835986825335 +68 54 model.feature_map_dropout 0.021390583840239008 +68 54 model.embedding_dim 1.0 +68 54 loss.margin 22.563594178907866 +68 54 loss.adversarial_temperature 0.4528999844557392 +68 54 negative_sampler.num_negs_per_pos 76.0 +68 54 training.batch_size 1.0 +68 55 model.output_channels 43.0 +68 55 model.input_dropout 0.3817229784838143 +68 55 model.output_dropout 0.36793124867928617 +68 55 model.feature_map_dropout 0.409023046210403 +68 55 model.embedding_dim 2.0 +68 55 loss.margin 21.24558302886475 +68 55 loss.adversarial_temperature 0.10093646827595655 +68 55 negative_sampler.num_negs_per_pos 97.0 +68 55 training.batch_size 0.0 +68 56 model.output_channels 37.0 +68 56 model.input_dropout 0.43484439536567393 +68 56 model.output_dropout 0.1433891185779992 +68 56 model.feature_map_dropout 0.4144344274014343 +68 56 model.embedding_dim 2.0 +68 56 loss.margin 24.964534937257472 +68 56 loss.adversarial_temperature 0.2712659887539012 +68 56 negative_sampler.num_negs_per_pos 94.0 +68 56 training.batch_size 2.0 +68 57 model.output_channels 26.0 +68 57 model.input_dropout 0.4209387205200269 +68 57 model.output_dropout 0.4158867636415268 +68 57 model.feature_map_dropout 0.1855934041791315 +68 57 model.embedding_dim 2.0 +68 57 loss.margin 18.003904592244783 +68 57 loss.adversarial_temperature 0.8861534386032924 +68 57 negative_sampler.num_negs_per_pos 51.0 +68 57 training.batch_size 1.0 +68 58 model.output_channels 59.0 +68 58 model.input_dropout 0.26655980302874704 +68 58 model.output_dropout 0.32532998648084405 +68 58 model.feature_map_dropout 0.2841574036561219 +68 58 model.embedding_dim 0.0 +68 58 loss.margin 12.272989694533406 +68 58 loss.adversarial_temperature 0.8125078953020896 +68 58 negative_sampler.num_negs_per_pos 41.0 +68 58 training.batch_size 2.0 +68 59 model.output_channels 50.0 +68 59 model.input_dropout 0.22616974675882534 +68 59 model.output_dropout 0.27096189117357944 +68 59 model.feature_map_dropout 0.021716287955748037 +68 59 model.embedding_dim 2.0 +68 59 loss.margin 8.522572845856278 +68 59 loss.adversarial_temperature 0.609723583051585 +68 59 negative_sampler.num_negs_per_pos 35.0 +68 59 training.batch_size 0.0 +68 60 model.output_channels 33.0 +68 60 model.input_dropout 0.37025375258678384 +68 60 model.output_dropout 0.030654310450243638 +68 60 model.feature_map_dropout 0.23567924136937213 +68 60 model.embedding_dim 2.0 +68 60 loss.margin 26.98109023163131 +68 60 loss.adversarial_temperature 0.8482463880080506 +68 60 negative_sampler.num_negs_per_pos 73.0 +68 60 training.batch_size 0.0 +68 61 model.output_channels 37.0 +68 61 model.input_dropout 0.48528765990589867 +68 61 model.output_dropout 0.06147493760860551 +68 61 model.feature_map_dropout 0.07398016196100993 +68 61 model.embedding_dim 0.0 +68 61 loss.margin 17.12493000605125 +68 61 loss.adversarial_temperature 0.71596464533653 +68 61 negative_sampler.num_negs_per_pos 69.0 +68 61 training.batch_size 0.0 +68 62 model.output_channels 64.0 +68 62 model.input_dropout 0.14026232310851122 +68 62 model.output_dropout 0.46092786576510447 +68 62 model.feature_map_dropout 0.1400455851079856 +68 62 model.embedding_dim 2.0 +68 62 loss.margin 15.792817508124397 +68 62 loss.adversarial_temperature 0.4770394612594472 +68 62 negative_sampler.num_negs_per_pos 33.0 +68 62 training.batch_size 2.0 +68 63 model.output_channels 61.0 +68 63 model.input_dropout 0.39572509765241337 +68 63 model.output_dropout 0.38180363783615734 +68 63 model.feature_map_dropout 0.16048867173056813 +68 63 model.embedding_dim 1.0 +68 63 loss.margin 6.676723049097592 +68 63 loss.adversarial_temperature 0.42476022415514114 +68 63 negative_sampler.num_negs_per_pos 63.0 +68 63 training.batch_size 2.0 +68 64 model.output_channels 24.0 +68 64 model.input_dropout 0.016339609591060178 +68 64 model.output_dropout 0.06119604057171868 +68 64 model.feature_map_dropout 0.2658030222699878 +68 64 model.embedding_dim 0.0 +68 64 loss.margin 11.924084969882836 +68 64 loss.adversarial_temperature 0.6284409528648763 +68 64 negative_sampler.num_negs_per_pos 81.0 +68 64 training.batch_size 2.0 +68 65 model.output_channels 16.0 +68 65 model.input_dropout 0.4433499722846891 +68 65 model.output_dropout 0.22177889384439742 +68 65 model.feature_map_dropout 0.27714923614676257 +68 65 model.embedding_dim 1.0 +68 65 loss.margin 15.958263477837264 +68 65 loss.adversarial_temperature 0.6038219745253618 +68 65 negative_sampler.num_negs_per_pos 31.0 +68 65 training.batch_size 2.0 +68 66 model.output_channels 20.0 +68 66 model.input_dropout 0.23901548273770745 +68 66 model.output_dropout 0.3575387851849756 +68 66 model.feature_map_dropout 0.20295329922734168 +68 66 model.embedding_dim 0.0 +68 66 loss.margin 2.0424459841392997 +68 66 loss.adversarial_temperature 0.9431375333275064 +68 66 negative_sampler.num_negs_per_pos 91.0 +68 66 training.batch_size 2.0 +68 67 model.output_channels 17.0 +68 67 model.input_dropout 0.19226744707274462 +68 67 model.output_dropout 0.44752793524074175 +68 67 model.feature_map_dropout 0.49892483915500807 +68 67 model.embedding_dim 0.0 +68 67 loss.margin 21.790359216703717 +68 67 loss.adversarial_temperature 0.5569281513140667 +68 67 negative_sampler.num_negs_per_pos 3.0 +68 67 training.batch_size 1.0 +68 68 model.output_channels 42.0 +68 68 model.input_dropout 0.4755849287173257 +68 68 model.output_dropout 0.4980398747475332 +68 68 model.feature_map_dropout 0.24790180750460972 +68 68 model.embedding_dim 2.0 +68 68 loss.margin 14.051789067801844 +68 68 loss.adversarial_temperature 0.1961783999065059 +68 68 negative_sampler.num_negs_per_pos 87.0 +68 68 training.batch_size 2.0 +68 69 model.output_channels 31.0 +68 69 model.input_dropout 0.47769151748362954 +68 69 model.output_dropout 0.04530687637858549 +68 69 model.feature_map_dropout 0.3116567026560788 +68 69 model.embedding_dim 2.0 +68 69 loss.margin 3.055504993718552 +68 69 loss.adversarial_temperature 0.8263151771060212 +68 69 negative_sampler.num_negs_per_pos 45.0 +68 69 training.batch_size 0.0 +68 70 model.output_channels 45.0 +68 70 model.input_dropout 0.4784264401666582 +68 70 model.output_dropout 0.2237061427345513 +68 70 model.feature_map_dropout 0.23456552577586126 +68 70 model.embedding_dim 1.0 +68 70 loss.margin 26.242165092068042 +68 70 loss.adversarial_temperature 0.4439020518136072 +68 70 negative_sampler.num_negs_per_pos 68.0 +68 70 training.batch_size 0.0 +68 71 model.output_channels 29.0 +68 71 model.input_dropout 0.4747610037112089 +68 71 model.output_dropout 0.20177609415914755 +68 71 model.feature_map_dropout 0.035574414635009655 +68 71 model.embedding_dim 1.0 +68 71 loss.margin 4.393953155241452 +68 71 loss.adversarial_temperature 0.29329929945666483 +68 71 negative_sampler.num_negs_per_pos 53.0 +68 71 training.batch_size 1.0 +68 72 model.output_channels 19.0 +68 72 model.input_dropout 0.2400696494914495 +68 72 model.output_dropout 0.14812490147890206 +68 72 model.feature_map_dropout 0.3093868891840306 +68 72 model.embedding_dim 1.0 +68 72 loss.margin 28.834909038339607 +68 72 loss.adversarial_temperature 0.7202821128787952 +68 72 negative_sampler.num_negs_per_pos 30.0 +68 72 training.batch_size 1.0 +68 73 model.output_channels 59.0 +68 73 model.input_dropout 0.26767468802821237 +68 73 model.output_dropout 0.1697374664682047 +68 73 model.feature_map_dropout 0.22799032468045394 +68 73 model.embedding_dim 2.0 +68 73 loss.margin 7.605806256096011 +68 73 loss.adversarial_temperature 0.8955413531467301 +68 73 negative_sampler.num_negs_per_pos 34.0 +68 73 training.batch_size 0.0 +68 74 model.output_channels 21.0 +68 74 model.input_dropout 0.21745732809314466 +68 74 model.output_dropout 0.10795329308188761 +68 74 model.feature_map_dropout 0.2623746039347936 +68 74 model.embedding_dim 2.0 +68 74 loss.margin 10.24824824672873 +68 74 loss.adversarial_temperature 0.7366060176412571 +68 74 negative_sampler.num_negs_per_pos 52.0 +68 74 training.batch_size 1.0 +68 75 model.output_channels 59.0 +68 75 model.input_dropout 0.31659718568750156 +68 75 model.output_dropout 0.004129401342154504 +68 75 model.feature_map_dropout 0.12106128700947227 +68 75 model.embedding_dim 1.0 +68 75 loss.margin 26.574562829239536 +68 75 loss.adversarial_temperature 0.6328603047042763 +68 75 negative_sampler.num_negs_per_pos 66.0 +68 75 training.batch_size 2.0 +68 76 model.output_channels 59.0 +68 76 model.input_dropout 0.2872314321822525 +68 76 model.output_dropout 0.13191390904753125 +68 76 model.feature_map_dropout 0.4196383019126118 +68 76 model.embedding_dim 0.0 +68 76 loss.margin 19.52649111308072 +68 76 loss.adversarial_temperature 0.520836061040271 +68 76 negative_sampler.num_negs_per_pos 26.0 +68 76 training.batch_size 2.0 +68 77 model.output_channels 61.0 +68 77 model.input_dropout 0.3416518453908629 +68 77 model.output_dropout 0.3955540074916949 +68 77 model.feature_map_dropout 0.3775441138588687 +68 77 model.embedding_dim 0.0 +68 77 loss.margin 3.356067785102187 +68 77 loss.adversarial_temperature 0.7994112770034525 +68 77 negative_sampler.num_negs_per_pos 44.0 +68 77 training.batch_size 1.0 +68 78 model.output_channels 29.0 +68 78 model.input_dropout 0.2542315079819257 +68 78 model.output_dropout 0.15970141646696112 +68 78 model.feature_map_dropout 0.43025388979059037 +68 78 model.embedding_dim 0.0 +68 78 loss.margin 17.101730868222333 +68 78 loss.adversarial_temperature 0.514980262411724 +68 78 negative_sampler.num_negs_per_pos 16.0 +68 78 training.batch_size 1.0 +68 79 model.output_channels 38.0 +68 79 model.input_dropout 0.31459929719224605 +68 79 model.output_dropout 0.4419124454667052 +68 79 model.feature_map_dropout 0.1677155031478485 +68 79 model.embedding_dim 0.0 +68 79 loss.margin 29.996580385384117 +68 79 loss.adversarial_temperature 0.8523652411293196 +68 79 negative_sampler.num_negs_per_pos 36.0 +68 79 training.batch_size 0.0 +68 80 model.output_channels 19.0 +68 80 model.input_dropout 0.30423907371607517 +68 80 model.output_dropout 0.015815238900388384 +68 80 model.feature_map_dropout 0.32815227206212055 +68 80 model.embedding_dim 1.0 +68 80 loss.margin 7.642906809963543 +68 80 loss.adversarial_temperature 0.8761143950482402 +68 80 negative_sampler.num_negs_per_pos 71.0 +68 80 training.batch_size 1.0 +68 81 model.output_channels 18.0 +68 81 model.input_dropout 0.20666087022977592 +68 81 model.output_dropout 0.1582858253591814 +68 81 model.feature_map_dropout 0.016157482288375824 +68 81 model.embedding_dim 0.0 +68 81 loss.margin 10.20081179472499 +68 81 loss.adversarial_temperature 0.711619422693665 +68 81 negative_sampler.num_negs_per_pos 35.0 +68 81 training.batch_size 2.0 +68 82 model.output_channels 33.0 +68 82 model.input_dropout 0.036382110772762544 +68 82 model.output_dropout 0.16869923215701188 +68 82 model.feature_map_dropout 0.40866125571476825 +68 82 model.embedding_dim 0.0 +68 82 loss.margin 3.233631803514764 +68 82 loss.adversarial_temperature 0.34352909155311506 +68 82 negative_sampler.num_negs_per_pos 34.0 +68 82 training.batch_size 0.0 +68 83 model.output_channels 43.0 +68 83 model.input_dropout 0.029143459610439293 +68 83 model.output_dropout 0.48523574086812243 +68 83 model.feature_map_dropout 0.47580532200754444 +68 83 model.embedding_dim 2.0 +68 83 loss.margin 18.996711657029167 +68 83 loss.adversarial_temperature 0.13401744113914085 +68 83 negative_sampler.num_negs_per_pos 54.0 +68 83 training.batch_size 1.0 +68 84 model.output_channels 46.0 +68 84 model.input_dropout 0.2628622824852918 +68 84 model.output_dropout 0.09669001625371026 +68 84 model.feature_map_dropout 0.01937857694362921 +68 84 model.embedding_dim 0.0 +68 84 loss.margin 20.869954991696265 +68 84 loss.adversarial_temperature 0.3597126509361719 +68 84 negative_sampler.num_negs_per_pos 14.0 +68 84 training.batch_size 1.0 +68 85 model.output_channels 24.0 +68 85 model.input_dropout 0.4089174808550292 +68 85 model.output_dropout 0.13248878543857223 +68 85 model.feature_map_dropout 0.22876921242298792 +68 85 model.embedding_dim 0.0 +68 85 loss.margin 14.03366326215105 +68 85 loss.adversarial_temperature 0.7481520660432274 +68 85 negative_sampler.num_negs_per_pos 32.0 +68 85 training.batch_size 2.0 +68 86 model.output_channels 47.0 +68 86 model.input_dropout 0.4410238611179464 +68 86 model.output_dropout 0.45985381468638953 +68 86 model.feature_map_dropout 0.2352694894568692 +68 86 model.embedding_dim 2.0 +68 86 loss.margin 4.630504037360913 +68 86 loss.adversarial_temperature 0.7285516699771248 +68 86 negative_sampler.num_negs_per_pos 74.0 +68 86 training.batch_size 2.0 +68 87 model.output_channels 21.0 +68 87 model.input_dropout 0.011269270114932783 +68 87 model.output_dropout 0.20430289495876863 +68 87 model.feature_map_dropout 0.4007094587435242 +68 87 model.embedding_dim 0.0 +68 87 loss.margin 11.704911017613831 +68 87 loss.adversarial_temperature 0.8503904313223529 +68 87 negative_sampler.num_negs_per_pos 38.0 +68 87 training.batch_size 1.0 +68 88 model.output_channels 32.0 +68 88 model.input_dropout 0.37883239627496873 +68 88 model.output_dropout 0.3570319064796074 +68 88 model.feature_map_dropout 0.32873860332093235 +68 88 model.embedding_dim 1.0 +68 88 loss.margin 2.4763346617972477 +68 88 loss.adversarial_temperature 0.9351180301270449 +68 88 negative_sampler.num_negs_per_pos 5.0 +68 88 training.batch_size 1.0 +68 89 model.output_channels 20.0 +68 89 model.input_dropout 0.10074846697132522 +68 89 model.output_dropout 0.0656431671036466 +68 89 model.feature_map_dropout 0.27077658935334387 +68 89 model.embedding_dim 0.0 +68 89 loss.margin 25.522249130983386 +68 89 loss.adversarial_temperature 0.9614613557919289 +68 89 negative_sampler.num_negs_per_pos 72.0 +68 89 training.batch_size 0.0 +68 90 model.output_channels 20.0 +68 90 model.input_dropout 0.035814737852537804 +68 90 model.output_dropout 0.31624705441382556 +68 90 model.feature_map_dropout 0.09097979758680513 +68 90 model.embedding_dim 2.0 +68 90 loss.margin 25.335793161581975 +68 90 loss.adversarial_temperature 0.30419163493112333 +68 90 negative_sampler.num_negs_per_pos 4.0 +68 90 training.batch_size 2.0 +68 91 model.output_channels 64.0 +68 91 model.input_dropout 0.417916520480292 +68 91 model.output_dropout 0.3190573252666618 +68 91 model.feature_map_dropout 0.2865050153000715 +68 91 model.embedding_dim 2.0 +68 91 loss.margin 17.430461371391903 +68 91 loss.adversarial_temperature 0.956632965558747 +68 91 negative_sampler.num_negs_per_pos 12.0 +68 91 training.batch_size 1.0 +68 92 model.output_channels 22.0 +68 92 model.input_dropout 0.38566464623878455 +68 92 model.output_dropout 0.33307891140065055 +68 92 model.feature_map_dropout 0.1881563719226798 +68 92 model.embedding_dim 1.0 +68 92 loss.margin 4.6594753981973245 +68 92 loss.adversarial_temperature 0.6679918065772339 +68 92 negative_sampler.num_negs_per_pos 23.0 +68 92 training.batch_size 1.0 +68 93 model.output_channels 38.0 +68 93 model.input_dropout 0.08688275532714484 +68 93 model.output_dropout 0.11597744026301571 +68 93 model.feature_map_dropout 0.15197102191808615 +68 93 model.embedding_dim 2.0 +68 93 loss.margin 4.941135086724467 +68 93 loss.adversarial_temperature 0.23135331533477652 +68 93 negative_sampler.num_negs_per_pos 91.0 +68 93 training.batch_size 0.0 +68 94 model.output_channels 25.0 +68 94 model.input_dropout 0.3822062657707473 +68 94 model.output_dropout 0.13621120363329042 +68 94 model.feature_map_dropout 0.4796611042332125 +68 94 model.embedding_dim 1.0 +68 94 loss.margin 17.36086239576283 +68 94 loss.adversarial_temperature 0.4818878207753432 +68 94 negative_sampler.num_negs_per_pos 14.0 +68 94 training.batch_size 2.0 +68 95 model.output_channels 40.0 +68 95 model.input_dropout 0.025033505180133975 +68 95 model.output_dropout 0.060016557469952936 +68 95 model.feature_map_dropout 0.20766270161531342 +68 95 model.embedding_dim 1.0 +68 95 loss.margin 7.334037930807093 +68 95 loss.adversarial_temperature 0.7247351727523522 +68 95 negative_sampler.num_negs_per_pos 64.0 +68 95 training.batch_size 1.0 +68 96 model.output_channels 63.0 +68 96 model.input_dropout 0.24400204597909597 +68 96 model.output_dropout 0.042173788756105834 +68 96 model.feature_map_dropout 0.2944943598251583 +68 96 model.embedding_dim 0.0 +68 96 loss.margin 26.09588462078388 +68 96 loss.adversarial_temperature 0.3090074583450999 +68 96 negative_sampler.num_negs_per_pos 67.0 +68 96 training.batch_size 1.0 +68 97 model.output_channels 37.0 +68 97 model.input_dropout 0.2712095329076724 +68 97 model.output_dropout 0.3771663837775263 +68 97 model.feature_map_dropout 0.10446704290657388 +68 97 model.embedding_dim 2.0 +68 97 loss.margin 18.642053549968438 +68 97 loss.adversarial_temperature 0.21809130325490855 +68 97 negative_sampler.num_negs_per_pos 16.0 +68 97 training.batch_size 1.0 +68 98 model.output_channels 16.0 +68 98 model.input_dropout 0.2082057447430855 +68 98 model.output_dropout 0.4088585632323252 +68 98 model.feature_map_dropout 0.13732923373963063 +68 98 model.embedding_dim 2.0 +68 98 loss.margin 19.08384591478392 +68 98 loss.adversarial_temperature 0.6782205653958675 +68 98 negative_sampler.num_negs_per_pos 48.0 +68 98 training.batch_size 1.0 +68 99 model.output_channels 23.0 +68 99 model.input_dropout 0.28470090573193835 +68 99 model.output_dropout 0.26060449168562055 +68 99 model.feature_map_dropout 0.4496996392240586 +68 99 model.embedding_dim 1.0 +68 99 loss.margin 12.940665765105958 +68 99 loss.adversarial_temperature 0.9662032451701674 +68 99 negative_sampler.num_negs_per_pos 27.0 +68 99 training.batch_size 1.0 +68 100 model.output_channels 41.0 +68 100 model.input_dropout 0.33958361426804257 +68 100 model.output_dropout 0.39174986005422235 +68 100 model.feature_map_dropout 0.12521474295886276 +68 100 model.embedding_dim 0.0 +68 100 loss.margin 18.10780776672174 +68 100 loss.adversarial_temperature 0.13250326827108186 +68 100 negative_sampler.num_negs_per_pos 67.0 +68 100 training.batch_size 2.0 +68 1 dataset """kinships""" +68 1 model """conve""" +68 1 loss """nssa""" +68 1 regularizer """no""" +68 1 optimizer """adadelta""" +68 1 training_loop """owa""" +68 1 negative_sampler """basic""" +68 1 evaluator """rankbased""" +68 2 dataset """kinships""" +68 2 model """conve""" +68 2 loss """nssa""" +68 2 regularizer """no""" +68 2 optimizer """adadelta""" +68 2 training_loop """owa""" +68 2 negative_sampler """basic""" +68 2 evaluator """rankbased""" +68 3 dataset """kinships""" +68 3 model """conve""" +68 3 loss """nssa""" +68 3 regularizer """no""" +68 3 optimizer """adadelta""" +68 3 training_loop """owa""" +68 3 negative_sampler """basic""" +68 3 evaluator """rankbased""" +68 4 dataset """kinships""" +68 4 model """conve""" +68 4 loss """nssa""" +68 4 regularizer """no""" +68 4 optimizer """adadelta""" +68 4 training_loop """owa""" +68 4 negative_sampler """basic""" +68 4 evaluator """rankbased""" +68 5 dataset """kinships""" +68 5 model """conve""" +68 5 loss """nssa""" +68 5 regularizer """no""" +68 5 optimizer """adadelta""" +68 5 training_loop """owa""" +68 5 negative_sampler """basic""" +68 5 evaluator """rankbased""" +68 6 dataset """kinships""" +68 6 model """conve""" +68 6 loss """nssa""" +68 6 regularizer """no""" +68 6 optimizer """adadelta""" +68 6 training_loop """owa""" +68 6 negative_sampler """basic""" +68 6 evaluator """rankbased""" +68 7 dataset """kinships""" +68 7 model """conve""" +68 7 loss """nssa""" +68 7 regularizer """no""" +68 7 optimizer """adadelta""" +68 7 training_loop """owa""" +68 7 negative_sampler """basic""" +68 7 evaluator """rankbased""" +68 8 dataset """kinships""" +68 8 model """conve""" +68 8 loss """nssa""" +68 8 regularizer """no""" +68 8 optimizer """adadelta""" +68 8 training_loop """owa""" +68 8 negative_sampler """basic""" +68 8 evaluator """rankbased""" +68 9 dataset """kinships""" +68 9 model """conve""" +68 9 loss """nssa""" +68 9 regularizer """no""" +68 9 optimizer """adadelta""" +68 9 training_loop """owa""" +68 9 negative_sampler """basic""" +68 9 evaluator """rankbased""" +68 10 dataset """kinships""" +68 10 model """conve""" +68 10 loss """nssa""" +68 10 regularizer """no""" +68 10 optimizer """adadelta""" +68 10 training_loop """owa""" +68 10 negative_sampler """basic""" +68 10 evaluator """rankbased""" +68 11 dataset """kinships""" +68 11 model """conve""" +68 11 loss """nssa""" +68 11 regularizer """no""" +68 11 optimizer """adadelta""" +68 11 training_loop """owa""" +68 11 negative_sampler """basic""" +68 11 evaluator """rankbased""" +68 12 dataset """kinships""" +68 12 model """conve""" +68 12 loss """nssa""" +68 12 regularizer """no""" +68 12 optimizer """adadelta""" +68 12 training_loop """owa""" +68 12 negative_sampler """basic""" +68 12 evaluator """rankbased""" +68 13 dataset """kinships""" +68 13 model """conve""" +68 13 loss """nssa""" +68 13 regularizer """no""" +68 13 optimizer """adadelta""" +68 13 training_loop """owa""" +68 13 negative_sampler """basic""" +68 13 evaluator """rankbased""" +68 14 dataset """kinships""" +68 14 model """conve""" +68 14 loss """nssa""" +68 14 regularizer """no""" +68 14 optimizer """adadelta""" +68 14 training_loop """owa""" +68 14 negative_sampler """basic""" +68 14 evaluator """rankbased""" +68 15 dataset """kinships""" +68 15 model """conve""" +68 15 loss """nssa""" +68 15 regularizer """no""" +68 15 optimizer """adadelta""" +68 15 training_loop """owa""" +68 15 negative_sampler """basic""" +68 15 evaluator """rankbased""" +68 16 dataset """kinships""" +68 16 model """conve""" +68 16 loss """nssa""" +68 16 regularizer """no""" +68 16 optimizer """adadelta""" +68 16 training_loop """owa""" +68 16 negative_sampler """basic""" +68 16 evaluator """rankbased""" +68 17 dataset """kinships""" +68 17 model """conve""" +68 17 loss """nssa""" +68 17 regularizer """no""" +68 17 optimizer """adadelta""" +68 17 training_loop """owa""" +68 17 negative_sampler """basic""" +68 17 evaluator """rankbased""" +68 18 dataset """kinships""" +68 18 model """conve""" +68 18 loss """nssa""" +68 18 regularizer """no""" +68 18 optimizer """adadelta""" +68 18 training_loop """owa""" +68 18 negative_sampler """basic""" +68 18 evaluator """rankbased""" +68 19 dataset """kinships""" +68 19 model """conve""" +68 19 loss """nssa""" +68 19 regularizer """no""" +68 19 optimizer """adadelta""" +68 19 training_loop """owa""" +68 19 negative_sampler """basic""" +68 19 evaluator """rankbased""" +68 20 dataset """kinships""" +68 20 model """conve""" +68 20 loss """nssa""" +68 20 regularizer """no""" +68 20 optimizer """adadelta""" +68 20 training_loop """owa""" +68 20 negative_sampler """basic""" +68 20 evaluator """rankbased""" +68 21 dataset """kinships""" +68 21 model """conve""" +68 21 loss """nssa""" +68 21 regularizer """no""" +68 21 optimizer """adadelta""" +68 21 training_loop """owa""" +68 21 negative_sampler """basic""" +68 21 evaluator """rankbased""" +68 22 dataset """kinships""" +68 22 model """conve""" +68 22 loss """nssa""" +68 22 regularizer """no""" +68 22 optimizer """adadelta""" +68 22 training_loop """owa""" +68 22 negative_sampler """basic""" +68 22 evaluator """rankbased""" +68 23 dataset """kinships""" +68 23 model """conve""" +68 23 loss """nssa""" +68 23 regularizer """no""" +68 23 optimizer """adadelta""" +68 23 training_loop """owa""" +68 23 negative_sampler """basic""" +68 23 evaluator """rankbased""" +68 24 dataset """kinships""" +68 24 model """conve""" +68 24 loss """nssa""" +68 24 regularizer """no""" +68 24 optimizer """adadelta""" +68 24 training_loop """owa""" +68 24 negative_sampler """basic""" +68 24 evaluator """rankbased""" +68 25 dataset """kinships""" +68 25 model """conve""" +68 25 loss """nssa""" +68 25 regularizer """no""" +68 25 optimizer """adadelta""" +68 25 training_loop """owa""" +68 25 negative_sampler """basic""" +68 25 evaluator """rankbased""" +68 26 dataset """kinships""" +68 26 model """conve""" +68 26 loss """nssa""" +68 26 regularizer """no""" +68 26 optimizer """adadelta""" +68 26 training_loop """owa""" +68 26 negative_sampler """basic""" +68 26 evaluator """rankbased""" +68 27 dataset """kinships""" +68 27 model """conve""" +68 27 loss """nssa""" +68 27 regularizer """no""" +68 27 optimizer """adadelta""" +68 27 training_loop """owa""" +68 27 negative_sampler """basic""" +68 27 evaluator """rankbased""" +68 28 dataset """kinships""" +68 28 model """conve""" +68 28 loss """nssa""" +68 28 regularizer """no""" +68 28 optimizer """adadelta""" +68 28 training_loop """owa""" +68 28 negative_sampler """basic""" +68 28 evaluator """rankbased""" +68 29 dataset """kinships""" +68 29 model """conve""" +68 29 loss """nssa""" +68 29 regularizer """no""" +68 29 optimizer """adadelta""" +68 29 training_loop """owa""" +68 29 negative_sampler """basic""" +68 29 evaluator """rankbased""" +68 30 dataset """kinships""" +68 30 model """conve""" +68 30 loss """nssa""" +68 30 regularizer """no""" +68 30 optimizer """adadelta""" +68 30 training_loop """owa""" +68 30 negative_sampler """basic""" +68 30 evaluator """rankbased""" +68 31 dataset """kinships""" +68 31 model """conve""" +68 31 loss """nssa""" +68 31 regularizer """no""" +68 31 optimizer """adadelta""" +68 31 training_loop """owa""" +68 31 negative_sampler """basic""" +68 31 evaluator """rankbased""" +68 32 dataset """kinships""" +68 32 model """conve""" +68 32 loss """nssa""" +68 32 regularizer """no""" +68 32 optimizer """adadelta""" +68 32 training_loop """owa""" +68 32 negative_sampler """basic""" +68 32 evaluator """rankbased""" +68 33 dataset """kinships""" +68 33 model """conve""" +68 33 loss """nssa""" +68 33 regularizer """no""" +68 33 optimizer """adadelta""" +68 33 training_loop """owa""" +68 33 negative_sampler """basic""" +68 33 evaluator """rankbased""" +68 34 dataset """kinships""" +68 34 model """conve""" +68 34 loss """nssa""" +68 34 regularizer """no""" +68 34 optimizer """adadelta""" +68 34 training_loop """owa""" +68 34 negative_sampler """basic""" +68 34 evaluator """rankbased""" +68 35 dataset """kinships""" +68 35 model """conve""" +68 35 loss """nssa""" +68 35 regularizer """no""" +68 35 optimizer """adadelta""" +68 35 training_loop """owa""" +68 35 negative_sampler """basic""" +68 35 evaluator """rankbased""" +68 36 dataset """kinships""" +68 36 model """conve""" +68 36 loss """nssa""" +68 36 regularizer """no""" +68 36 optimizer """adadelta""" +68 36 training_loop """owa""" +68 36 negative_sampler """basic""" +68 36 evaluator """rankbased""" +68 37 dataset """kinships""" +68 37 model """conve""" +68 37 loss """nssa""" +68 37 regularizer """no""" +68 37 optimizer """adadelta""" +68 37 training_loop """owa""" +68 37 negative_sampler """basic""" +68 37 evaluator """rankbased""" +68 38 dataset """kinships""" +68 38 model """conve""" +68 38 loss """nssa""" +68 38 regularizer """no""" +68 38 optimizer """adadelta""" +68 38 training_loop """owa""" +68 38 negative_sampler """basic""" +68 38 evaluator """rankbased""" +68 39 dataset """kinships""" +68 39 model """conve""" +68 39 loss """nssa""" +68 39 regularizer """no""" +68 39 optimizer """adadelta""" +68 39 training_loop """owa""" +68 39 negative_sampler """basic""" +68 39 evaluator """rankbased""" +68 40 dataset """kinships""" +68 40 model """conve""" +68 40 loss """nssa""" +68 40 regularizer """no""" +68 40 optimizer """adadelta""" +68 40 training_loop """owa""" +68 40 negative_sampler """basic""" +68 40 evaluator """rankbased""" +68 41 dataset """kinships""" +68 41 model """conve""" +68 41 loss """nssa""" +68 41 regularizer """no""" +68 41 optimizer """adadelta""" +68 41 training_loop """owa""" +68 41 negative_sampler """basic""" +68 41 evaluator """rankbased""" +68 42 dataset """kinships""" +68 42 model """conve""" +68 42 loss """nssa""" +68 42 regularizer """no""" +68 42 optimizer """adadelta""" +68 42 training_loop """owa""" +68 42 negative_sampler """basic""" +68 42 evaluator """rankbased""" +68 43 dataset """kinships""" +68 43 model """conve""" +68 43 loss """nssa""" +68 43 regularizer """no""" +68 43 optimizer """adadelta""" +68 43 training_loop """owa""" +68 43 negative_sampler """basic""" +68 43 evaluator """rankbased""" +68 44 dataset """kinships""" +68 44 model """conve""" +68 44 loss """nssa""" +68 44 regularizer """no""" +68 44 optimizer """adadelta""" +68 44 training_loop """owa""" +68 44 negative_sampler """basic""" +68 44 evaluator """rankbased""" +68 45 dataset """kinships""" +68 45 model """conve""" +68 45 loss """nssa""" +68 45 regularizer """no""" +68 45 optimizer """adadelta""" +68 45 training_loop """owa""" +68 45 negative_sampler """basic""" +68 45 evaluator """rankbased""" +68 46 dataset """kinships""" +68 46 model """conve""" +68 46 loss """nssa""" +68 46 regularizer """no""" +68 46 optimizer """adadelta""" +68 46 training_loop """owa""" +68 46 negative_sampler """basic""" +68 46 evaluator """rankbased""" +68 47 dataset """kinships""" +68 47 model """conve""" +68 47 loss """nssa""" +68 47 regularizer """no""" +68 47 optimizer """adadelta""" +68 47 training_loop """owa""" +68 47 negative_sampler """basic""" +68 47 evaluator """rankbased""" +68 48 dataset """kinships""" +68 48 model """conve""" +68 48 loss """nssa""" +68 48 regularizer """no""" +68 48 optimizer """adadelta""" +68 48 training_loop """owa""" +68 48 negative_sampler """basic""" +68 48 evaluator """rankbased""" +68 49 dataset """kinships""" +68 49 model """conve""" +68 49 loss """nssa""" +68 49 regularizer """no""" +68 49 optimizer """adadelta""" +68 49 training_loop """owa""" +68 49 negative_sampler """basic""" +68 49 evaluator """rankbased""" +68 50 dataset """kinships""" +68 50 model """conve""" +68 50 loss """nssa""" +68 50 regularizer """no""" +68 50 optimizer """adadelta""" +68 50 training_loop """owa""" +68 50 negative_sampler """basic""" +68 50 evaluator """rankbased""" +68 51 dataset """kinships""" +68 51 model """conve""" +68 51 loss """nssa""" +68 51 regularizer """no""" +68 51 optimizer """adadelta""" +68 51 training_loop """owa""" +68 51 negative_sampler """basic""" +68 51 evaluator """rankbased""" +68 52 dataset """kinships""" +68 52 model """conve""" +68 52 loss """nssa""" +68 52 regularizer """no""" +68 52 optimizer """adadelta""" +68 52 training_loop """owa""" +68 52 negative_sampler """basic""" +68 52 evaluator """rankbased""" +68 53 dataset """kinships""" +68 53 model """conve""" +68 53 loss """nssa""" +68 53 regularizer """no""" +68 53 optimizer """adadelta""" +68 53 training_loop """owa""" +68 53 negative_sampler """basic""" +68 53 evaluator """rankbased""" +68 54 dataset """kinships""" +68 54 model """conve""" +68 54 loss """nssa""" +68 54 regularizer """no""" +68 54 optimizer """adadelta""" +68 54 training_loop """owa""" +68 54 negative_sampler """basic""" +68 54 evaluator """rankbased""" +68 55 dataset """kinships""" +68 55 model """conve""" +68 55 loss """nssa""" +68 55 regularizer """no""" +68 55 optimizer """adadelta""" +68 55 training_loop """owa""" +68 55 negative_sampler """basic""" +68 55 evaluator """rankbased""" +68 56 dataset """kinships""" +68 56 model """conve""" +68 56 loss """nssa""" +68 56 regularizer """no""" +68 56 optimizer """adadelta""" +68 56 training_loop """owa""" +68 56 negative_sampler """basic""" +68 56 evaluator """rankbased""" +68 57 dataset """kinships""" +68 57 model """conve""" +68 57 loss """nssa""" +68 57 regularizer """no""" +68 57 optimizer """adadelta""" +68 57 training_loop """owa""" +68 57 negative_sampler """basic""" +68 57 evaluator """rankbased""" +68 58 dataset """kinships""" +68 58 model """conve""" +68 58 loss """nssa""" +68 58 regularizer """no""" +68 58 optimizer """adadelta""" +68 58 training_loop """owa""" +68 58 negative_sampler """basic""" +68 58 evaluator """rankbased""" +68 59 dataset """kinships""" +68 59 model """conve""" +68 59 loss """nssa""" +68 59 regularizer """no""" +68 59 optimizer """adadelta""" +68 59 training_loop """owa""" +68 59 negative_sampler """basic""" +68 59 evaluator """rankbased""" +68 60 dataset """kinships""" +68 60 model """conve""" +68 60 loss """nssa""" +68 60 regularizer """no""" +68 60 optimizer """adadelta""" +68 60 training_loop """owa""" +68 60 negative_sampler """basic""" +68 60 evaluator """rankbased""" +68 61 dataset """kinships""" +68 61 model """conve""" +68 61 loss """nssa""" +68 61 regularizer """no""" +68 61 optimizer """adadelta""" +68 61 training_loop """owa""" +68 61 negative_sampler """basic""" +68 61 evaluator """rankbased""" +68 62 dataset """kinships""" +68 62 model """conve""" +68 62 loss """nssa""" +68 62 regularizer """no""" +68 62 optimizer """adadelta""" +68 62 training_loop """owa""" +68 62 negative_sampler """basic""" +68 62 evaluator """rankbased""" +68 63 dataset """kinships""" +68 63 model """conve""" +68 63 loss """nssa""" +68 63 regularizer """no""" +68 63 optimizer """adadelta""" +68 63 training_loop """owa""" +68 63 negative_sampler """basic""" +68 63 evaluator """rankbased""" +68 64 dataset """kinships""" +68 64 model """conve""" +68 64 loss """nssa""" +68 64 regularizer """no""" +68 64 optimizer """adadelta""" +68 64 training_loop """owa""" +68 64 negative_sampler """basic""" +68 64 evaluator """rankbased""" +68 65 dataset """kinships""" +68 65 model """conve""" +68 65 loss """nssa""" +68 65 regularizer """no""" +68 65 optimizer """adadelta""" +68 65 training_loop """owa""" +68 65 negative_sampler """basic""" +68 65 evaluator """rankbased""" +68 66 dataset """kinships""" +68 66 model """conve""" +68 66 loss """nssa""" +68 66 regularizer """no""" +68 66 optimizer """adadelta""" +68 66 training_loop """owa""" +68 66 negative_sampler """basic""" +68 66 evaluator """rankbased""" +68 67 dataset """kinships""" +68 67 model """conve""" +68 67 loss """nssa""" +68 67 regularizer """no""" +68 67 optimizer """adadelta""" +68 67 training_loop """owa""" +68 67 negative_sampler """basic""" +68 67 evaluator """rankbased""" +68 68 dataset """kinships""" +68 68 model """conve""" +68 68 loss """nssa""" +68 68 regularizer """no""" +68 68 optimizer """adadelta""" +68 68 training_loop """owa""" +68 68 negative_sampler """basic""" +68 68 evaluator """rankbased""" +68 69 dataset """kinships""" +68 69 model """conve""" +68 69 loss """nssa""" +68 69 regularizer """no""" +68 69 optimizer """adadelta""" +68 69 training_loop """owa""" +68 69 negative_sampler """basic""" +68 69 evaluator """rankbased""" +68 70 dataset """kinships""" +68 70 model """conve""" +68 70 loss """nssa""" +68 70 regularizer """no""" +68 70 optimizer """adadelta""" +68 70 training_loop """owa""" +68 70 negative_sampler """basic""" +68 70 evaluator """rankbased""" +68 71 dataset """kinships""" +68 71 model """conve""" +68 71 loss """nssa""" +68 71 regularizer """no""" +68 71 optimizer """adadelta""" +68 71 training_loop """owa""" +68 71 negative_sampler """basic""" +68 71 evaluator """rankbased""" +68 72 dataset """kinships""" +68 72 model """conve""" +68 72 loss """nssa""" +68 72 regularizer """no""" +68 72 optimizer """adadelta""" +68 72 training_loop """owa""" +68 72 negative_sampler """basic""" +68 72 evaluator """rankbased""" +68 73 dataset """kinships""" +68 73 model """conve""" +68 73 loss """nssa""" +68 73 regularizer """no""" +68 73 optimizer """adadelta""" +68 73 training_loop """owa""" +68 73 negative_sampler """basic""" +68 73 evaluator """rankbased""" +68 74 dataset """kinships""" +68 74 model """conve""" +68 74 loss """nssa""" +68 74 regularizer """no""" +68 74 optimizer """adadelta""" +68 74 training_loop """owa""" +68 74 negative_sampler """basic""" +68 74 evaluator """rankbased""" +68 75 dataset """kinships""" +68 75 model """conve""" +68 75 loss """nssa""" +68 75 regularizer """no""" +68 75 optimizer """adadelta""" +68 75 training_loop """owa""" +68 75 negative_sampler """basic""" +68 75 evaluator """rankbased""" +68 76 dataset """kinships""" +68 76 model """conve""" +68 76 loss """nssa""" +68 76 regularizer """no""" +68 76 optimizer """adadelta""" +68 76 training_loop """owa""" +68 76 negative_sampler """basic""" +68 76 evaluator """rankbased""" +68 77 dataset """kinships""" +68 77 model """conve""" +68 77 loss """nssa""" +68 77 regularizer """no""" +68 77 optimizer """adadelta""" +68 77 training_loop """owa""" +68 77 negative_sampler """basic""" +68 77 evaluator """rankbased""" +68 78 dataset """kinships""" +68 78 model """conve""" +68 78 loss """nssa""" +68 78 regularizer """no""" +68 78 optimizer """adadelta""" +68 78 training_loop """owa""" +68 78 negative_sampler """basic""" +68 78 evaluator """rankbased""" +68 79 dataset """kinships""" +68 79 model """conve""" +68 79 loss """nssa""" +68 79 regularizer """no""" +68 79 optimizer """adadelta""" +68 79 training_loop """owa""" +68 79 negative_sampler """basic""" +68 79 evaluator """rankbased""" +68 80 dataset """kinships""" +68 80 model """conve""" +68 80 loss """nssa""" +68 80 regularizer """no""" +68 80 optimizer """adadelta""" +68 80 training_loop """owa""" +68 80 negative_sampler """basic""" +68 80 evaluator """rankbased""" +68 81 dataset """kinships""" +68 81 model """conve""" +68 81 loss """nssa""" +68 81 regularizer """no""" +68 81 optimizer """adadelta""" +68 81 training_loop """owa""" +68 81 negative_sampler """basic""" +68 81 evaluator """rankbased""" +68 82 dataset """kinships""" +68 82 model """conve""" +68 82 loss """nssa""" +68 82 regularizer """no""" +68 82 optimizer """adadelta""" +68 82 training_loop """owa""" +68 82 negative_sampler """basic""" +68 82 evaluator """rankbased""" +68 83 dataset """kinships""" +68 83 model """conve""" +68 83 loss """nssa""" +68 83 regularizer """no""" +68 83 optimizer """adadelta""" +68 83 training_loop """owa""" +68 83 negative_sampler """basic""" +68 83 evaluator """rankbased""" +68 84 dataset """kinships""" +68 84 model """conve""" +68 84 loss """nssa""" +68 84 regularizer """no""" +68 84 optimizer """adadelta""" +68 84 training_loop """owa""" +68 84 negative_sampler """basic""" +68 84 evaluator """rankbased""" +68 85 dataset """kinships""" +68 85 model """conve""" +68 85 loss """nssa""" +68 85 regularizer """no""" +68 85 optimizer """adadelta""" +68 85 training_loop """owa""" +68 85 negative_sampler """basic""" +68 85 evaluator """rankbased""" +68 86 dataset """kinships""" +68 86 model """conve""" +68 86 loss """nssa""" +68 86 regularizer """no""" +68 86 optimizer """adadelta""" +68 86 training_loop """owa""" +68 86 negative_sampler """basic""" +68 86 evaluator """rankbased""" +68 87 dataset """kinships""" +68 87 model """conve""" +68 87 loss """nssa""" +68 87 regularizer """no""" +68 87 optimizer """adadelta""" +68 87 training_loop """owa""" +68 87 negative_sampler """basic""" +68 87 evaluator """rankbased""" +68 88 dataset """kinships""" +68 88 model """conve""" +68 88 loss """nssa""" +68 88 regularizer """no""" +68 88 optimizer """adadelta""" +68 88 training_loop """owa""" +68 88 negative_sampler """basic""" +68 88 evaluator """rankbased""" +68 89 dataset """kinships""" +68 89 model """conve""" +68 89 loss """nssa""" +68 89 regularizer """no""" +68 89 optimizer """adadelta""" +68 89 training_loop """owa""" +68 89 negative_sampler """basic""" +68 89 evaluator """rankbased""" +68 90 dataset """kinships""" +68 90 model """conve""" +68 90 loss """nssa""" +68 90 regularizer """no""" +68 90 optimizer """adadelta""" +68 90 training_loop """owa""" +68 90 negative_sampler """basic""" +68 90 evaluator """rankbased""" +68 91 dataset """kinships""" +68 91 model """conve""" +68 91 loss """nssa""" +68 91 regularizer """no""" +68 91 optimizer """adadelta""" +68 91 training_loop """owa""" +68 91 negative_sampler """basic""" +68 91 evaluator """rankbased""" +68 92 dataset """kinships""" +68 92 model """conve""" +68 92 loss """nssa""" +68 92 regularizer """no""" +68 92 optimizer """adadelta""" +68 92 training_loop """owa""" +68 92 negative_sampler """basic""" +68 92 evaluator """rankbased""" +68 93 dataset """kinships""" +68 93 model """conve""" +68 93 loss """nssa""" +68 93 regularizer """no""" +68 93 optimizer """adadelta""" +68 93 training_loop """owa""" +68 93 negative_sampler """basic""" +68 93 evaluator """rankbased""" +68 94 dataset """kinships""" +68 94 model """conve""" +68 94 loss """nssa""" +68 94 regularizer """no""" +68 94 optimizer """adadelta""" +68 94 training_loop """owa""" +68 94 negative_sampler """basic""" +68 94 evaluator """rankbased""" +68 95 dataset """kinships""" +68 95 model """conve""" +68 95 loss """nssa""" +68 95 regularizer """no""" +68 95 optimizer """adadelta""" +68 95 training_loop """owa""" +68 95 negative_sampler """basic""" +68 95 evaluator """rankbased""" +68 96 dataset """kinships""" +68 96 model """conve""" +68 96 loss """nssa""" +68 96 regularizer """no""" +68 96 optimizer """adadelta""" +68 96 training_loop """owa""" +68 96 negative_sampler """basic""" +68 96 evaluator """rankbased""" +68 97 dataset """kinships""" +68 97 model """conve""" +68 97 loss """nssa""" +68 97 regularizer """no""" +68 97 optimizer """adadelta""" +68 97 training_loop """owa""" +68 97 negative_sampler """basic""" +68 97 evaluator """rankbased""" +68 98 dataset """kinships""" +68 98 model """conve""" +68 98 loss """nssa""" +68 98 regularizer """no""" +68 98 optimizer """adadelta""" +68 98 training_loop """owa""" +68 98 negative_sampler """basic""" +68 98 evaluator """rankbased""" +68 99 dataset """kinships""" +68 99 model """conve""" +68 99 loss """nssa""" +68 99 regularizer """no""" +68 99 optimizer """adadelta""" +68 99 training_loop """owa""" +68 99 negative_sampler """basic""" +68 99 evaluator """rankbased""" +68 100 dataset """kinships""" +68 100 model """conve""" +68 100 loss """nssa""" +68 100 regularizer """no""" +68 100 optimizer """adadelta""" +68 100 training_loop """owa""" +68 100 negative_sampler """basic""" +68 100 evaluator """rankbased""" +69 1 model.output_channels 35.0 +69 1 model.input_dropout 0.39568950671397557 +69 1 model.output_dropout 0.2873147608907749 +69 1 model.feature_map_dropout 0.26592005132963153 +69 1 model.embedding_dim 2.0 +69 1 loss.margin 22.005925902514566 +69 1 loss.adversarial_temperature 0.5899062320055367 +69 1 negative_sampler.num_negs_per_pos 50.0 +69 1 training.batch_size 0.0 +69 2 model.output_channels 49.0 +69 2 model.input_dropout 0.040035865237932344 +69 2 model.output_dropout 0.15418938599557364 +69 2 model.feature_map_dropout 0.04826437740227979 +69 2 model.embedding_dim 2.0 +69 2 loss.margin 23.021203721418402 +69 2 loss.adversarial_temperature 0.6664543423045117 +69 2 negative_sampler.num_negs_per_pos 91.0 +69 2 training.batch_size 1.0 +69 3 model.output_channels 50.0 +69 3 model.input_dropout 0.1404196981846293 +69 3 model.output_dropout 0.3143948877986882 +69 3 model.feature_map_dropout 0.12437064536804027 +69 3 model.embedding_dim 0.0 +69 3 loss.margin 24.007244068966063 +69 3 loss.adversarial_temperature 0.9614498093842125 +69 3 negative_sampler.num_negs_per_pos 62.0 +69 3 training.batch_size 2.0 +69 4 model.output_channels 41.0 +69 4 model.input_dropout 0.3053800889629833 +69 4 model.output_dropout 0.17542616080148937 +69 4 model.feature_map_dropout 0.07842963249618828 +69 4 model.embedding_dim 0.0 +69 4 loss.margin 2.298242274642453 +69 4 loss.adversarial_temperature 0.6929502801290784 +69 4 negative_sampler.num_negs_per_pos 6.0 +69 4 training.batch_size 0.0 +69 5 model.output_channels 35.0 +69 5 model.input_dropout 0.11100307547904947 +69 5 model.output_dropout 0.1421528730385957 +69 5 model.feature_map_dropout 0.409456377854014 +69 5 model.embedding_dim 0.0 +69 5 loss.margin 14.851802430170432 +69 5 loss.adversarial_temperature 0.2655175916048907 +69 5 negative_sampler.num_negs_per_pos 19.0 +69 5 training.batch_size 1.0 +69 6 model.output_channels 39.0 +69 6 model.input_dropout 0.2603020041305569 +69 6 model.output_dropout 0.18302665673364688 +69 6 model.feature_map_dropout 0.062038191563375256 +69 6 model.embedding_dim 0.0 +69 6 loss.margin 4.995505300440754 +69 6 loss.adversarial_temperature 0.35900925163144193 +69 6 negative_sampler.num_negs_per_pos 24.0 +69 6 training.batch_size 1.0 +69 7 model.output_channels 18.0 +69 7 model.input_dropout 0.29803490602044636 +69 7 model.output_dropout 0.32766752359168305 +69 7 model.feature_map_dropout 0.3716790736228898 +69 7 model.embedding_dim 0.0 +69 7 loss.margin 4.40799951342069 +69 7 loss.adversarial_temperature 0.7082195702966521 +69 7 negative_sampler.num_negs_per_pos 59.0 +69 7 training.batch_size 2.0 +69 8 model.output_channels 42.0 +69 8 model.input_dropout 0.035685858431115824 +69 8 model.output_dropout 0.001991922194089002 +69 8 model.feature_map_dropout 0.47180946812401303 +69 8 model.embedding_dim 2.0 +69 8 loss.margin 4.566117805762239 +69 8 loss.adversarial_temperature 0.9109241799101229 +69 8 negative_sampler.num_negs_per_pos 1.0 +69 8 training.batch_size 2.0 +69 9 model.output_channels 41.0 +69 9 model.input_dropout 0.22143916286012638 +69 9 model.output_dropout 0.03312512435311432 +69 9 model.feature_map_dropout 0.2588667780592321 +69 9 model.embedding_dim 2.0 +69 9 loss.margin 23.3898341612123 +69 9 loss.adversarial_temperature 0.22560423273403768 +69 9 negative_sampler.num_negs_per_pos 90.0 +69 9 training.batch_size 2.0 +69 10 model.output_channels 35.0 +69 10 model.input_dropout 0.3628817589159202 +69 10 model.output_dropout 0.4341050267374149 +69 10 model.feature_map_dropout 0.0021027437975712493 +69 10 model.embedding_dim 0.0 +69 10 loss.margin 23.979606079291603 +69 10 loss.adversarial_temperature 0.39955883119423374 +69 10 negative_sampler.num_negs_per_pos 82.0 +69 10 training.batch_size 2.0 +69 11 model.output_channels 32.0 +69 11 model.input_dropout 0.20323131944987255 +69 11 model.output_dropout 0.2849600309538825 +69 11 model.feature_map_dropout 0.01798191197848731 +69 11 model.embedding_dim 0.0 +69 11 loss.margin 18.04103709163501 +69 11 loss.adversarial_temperature 0.5985981404369812 +69 11 negative_sampler.num_negs_per_pos 23.0 +69 11 training.batch_size 2.0 +69 12 model.output_channels 17.0 +69 12 model.input_dropout 0.39587970635315384 +69 12 model.output_dropout 0.2818816451179968 +69 12 model.feature_map_dropout 0.3627916001298213 +69 12 model.embedding_dim 1.0 +69 12 loss.margin 16.59527304848576 +69 12 loss.adversarial_temperature 0.10312785908005251 +69 12 negative_sampler.num_negs_per_pos 62.0 +69 12 training.batch_size 0.0 +69 13 model.output_channels 31.0 +69 13 model.input_dropout 0.26950892369478785 +69 13 model.output_dropout 0.09931539692731628 +69 13 model.feature_map_dropout 0.13538874962004505 +69 13 model.embedding_dim 0.0 +69 13 loss.margin 22.114203118479658 +69 13 loss.adversarial_temperature 0.17076097633495863 +69 13 negative_sampler.num_negs_per_pos 50.0 +69 13 training.batch_size 1.0 +69 14 model.output_channels 25.0 +69 14 model.input_dropout 0.06983676238520559 +69 14 model.output_dropout 0.4810386877116662 +69 14 model.feature_map_dropout 0.451313352413037 +69 14 model.embedding_dim 0.0 +69 14 loss.margin 6.451662386975385 +69 14 loss.adversarial_temperature 0.9275441129838993 +69 14 negative_sampler.num_negs_per_pos 16.0 +69 14 training.batch_size 1.0 +69 15 model.output_channels 60.0 +69 15 model.input_dropout 0.41384239655191984 +69 15 model.output_dropout 0.49822315679660106 +69 15 model.feature_map_dropout 0.4109638918594836 +69 15 model.embedding_dim 0.0 +69 15 loss.margin 29.94404814843638 +69 15 loss.adversarial_temperature 0.3226512678274028 +69 15 negative_sampler.num_negs_per_pos 2.0 +69 15 training.batch_size 0.0 +69 16 model.output_channels 18.0 +69 16 model.input_dropout 0.3009402484589055 +69 16 model.output_dropout 0.2275905820303999 +69 16 model.feature_map_dropout 0.42732323613997814 +69 16 model.embedding_dim 1.0 +69 16 loss.margin 21.262191410082934 +69 16 loss.adversarial_temperature 0.5866024021439143 +69 16 negative_sampler.num_negs_per_pos 9.0 +69 16 training.batch_size 1.0 +69 17 model.output_channels 62.0 +69 17 model.input_dropout 0.05731005147585638 +69 17 model.output_dropout 0.36474104972504695 +69 17 model.feature_map_dropout 0.44570808768183034 +69 17 model.embedding_dim 1.0 +69 17 loss.margin 28.30045487558347 +69 17 loss.adversarial_temperature 0.3031139358490733 +69 17 negative_sampler.num_negs_per_pos 76.0 +69 17 training.batch_size 0.0 +69 18 model.output_channels 17.0 +69 18 model.input_dropout 0.28073807982498716 +69 18 model.output_dropout 0.1246881293228368 +69 18 model.feature_map_dropout 0.1860872480414199 +69 18 model.embedding_dim 2.0 +69 18 loss.margin 26.985183547513827 +69 18 loss.adversarial_temperature 0.4784860227900256 +69 18 negative_sampler.num_negs_per_pos 86.0 +69 18 training.batch_size 0.0 +69 19 model.output_channels 52.0 +69 19 model.input_dropout 0.4091161279067257 +69 19 model.output_dropout 0.46836990102260045 +69 19 model.feature_map_dropout 0.15664958279199914 +69 19 model.embedding_dim 0.0 +69 19 loss.margin 16.3326174719089 +69 19 loss.adversarial_temperature 0.755177309580185 +69 19 negative_sampler.num_negs_per_pos 55.0 +69 19 training.batch_size 0.0 +69 20 model.output_channels 53.0 +69 20 model.input_dropout 0.1224595128428494 +69 20 model.output_dropout 0.0981166443470452 +69 20 model.feature_map_dropout 0.19035951442032134 +69 20 model.embedding_dim 2.0 +69 20 loss.margin 1.4963274361316223 +69 20 loss.adversarial_temperature 0.5501251960986625 +69 20 negative_sampler.num_negs_per_pos 42.0 +69 20 training.batch_size 0.0 +69 21 model.output_channels 57.0 +69 21 model.input_dropout 0.054800627417417536 +69 21 model.output_dropout 0.28645435175651746 +69 21 model.feature_map_dropout 0.22710472193405146 +69 21 model.embedding_dim 2.0 +69 21 loss.margin 7.827038868345248 +69 21 loss.adversarial_temperature 0.29438002206856984 +69 21 negative_sampler.num_negs_per_pos 77.0 +69 21 training.batch_size 0.0 +69 22 model.output_channels 32.0 +69 22 model.input_dropout 0.4895707441954523 +69 22 model.output_dropout 0.17812650835196453 +69 22 model.feature_map_dropout 0.03840675039608421 +69 22 model.embedding_dim 2.0 +69 22 loss.margin 3.683601799699519 +69 22 loss.adversarial_temperature 0.6988634221499366 +69 22 negative_sampler.num_negs_per_pos 86.0 +69 22 training.batch_size 2.0 +69 23 model.output_channels 23.0 +69 23 model.input_dropout 0.4919158163907871 +69 23 model.output_dropout 0.43654564498210696 +69 23 model.feature_map_dropout 0.19370343402523776 +69 23 model.embedding_dim 2.0 +69 23 loss.margin 23.299109486349476 +69 23 loss.adversarial_temperature 0.3016514475079513 +69 23 negative_sampler.num_negs_per_pos 22.0 +69 23 training.batch_size 1.0 +69 24 model.output_channels 49.0 +69 24 model.input_dropout 0.1574926107495463 +69 24 model.output_dropout 0.014501485777610235 +69 24 model.feature_map_dropout 0.15126266665002913 +69 24 model.embedding_dim 2.0 +69 24 loss.margin 24.201109673565174 +69 24 loss.adversarial_temperature 0.7711200129401782 +69 24 negative_sampler.num_negs_per_pos 22.0 +69 24 training.batch_size 0.0 +69 25 model.output_channels 51.0 +69 25 model.input_dropout 0.34366341730904637 +69 25 model.output_dropout 0.4281007520890496 +69 25 model.feature_map_dropout 0.10509338396751217 +69 25 model.embedding_dim 1.0 +69 25 loss.margin 20.155091520319566 +69 25 loss.adversarial_temperature 0.1843960561009775 +69 25 negative_sampler.num_negs_per_pos 17.0 +69 25 training.batch_size 1.0 +69 26 model.output_channels 30.0 +69 26 model.input_dropout 0.06765780563659868 +69 26 model.output_dropout 0.0033122016119767617 +69 26 model.feature_map_dropout 0.274533150200987 +69 26 model.embedding_dim 1.0 +69 26 loss.margin 23.80060460159941 +69 26 loss.adversarial_temperature 0.7954618910321424 +69 26 negative_sampler.num_negs_per_pos 85.0 +69 26 training.batch_size 2.0 +69 27 model.output_channels 20.0 +69 27 model.input_dropout 0.27824665164572704 +69 27 model.output_dropout 0.4712558661590039 +69 27 model.feature_map_dropout 0.49204707515061014 +69 27 model.embedding_dim 0.0 +69 27 loss.margin 15.368469578081198 +69 27 loss.adversarial_temperature 0.9939280198478715 +69 27 negative_sampler.num_negs_per_pos 45.0 +69 27 training.batch_size 2.0 +69 28 model.output_channels 49.0 +69 28 model.input_dropout 0.07291678352912889 +69 28 model.output_dropout 0.1878199893379905 +69 28 model.feature_map_dropout 0.39265821185524336 +69 28 model.embedding_dim 2.0 +69 28 loss.margin 15.28180605556103 +69 28 loss.adversarial_temperature 0.23154297343588526 +69 28 negative_sampler.num_negs_per_pos 23.0 +69 28 training.batch_size 0.0 +69 29 model.output_channels 16.0 +69 29 model.input_dropout 0.23023794333107445 +69 29 model.output_dropout 0.2372008544058437 +69 29 model.feature_map_dropout 0.34573971149357396 +69 29 model.embedding_dim 2.0 +69 29 loss.margin 29.930790684873404 +69 29 loss.adversarial_temperature 0.5049330271421147 +69 29 negative_sampler.num_negs_per_pos 71.0 +69 29 training.batch_size 2.0 +69 30 model.output_channels 57.0 +69 30 model.input_dropout 0.18432044548133553 +69 30 model.output_dropout 0.4466601628683159 +69 30 model.feature_map_dropout 0.22547682018430187 +69 30 model.embedding_dim 1.0 +69 30 loss.margin 3.5236680618675438 +69 30 loss.adversarial_temperature 0.5111516917583208 +69 30 negative_sampler.num_negs_per_pos 75.0 +69 30 training.batch_size 0.0 +69 31 model.output_channels 59.0 +69 31 model.input_dropout 0.23397508826541225 +69 31 model.output_dropout 0.2813845723494864 +69 31 model.feature_map_dropout 0.3191192478328294 +69 31 model.embedding_dim 1.0 +69 31 loss.margin 4.218489117470439 +69 31 loss.adversarial_temperature 0.7353165913713505 +69 31 negative_sampler.num_negs_per_pos 37.0 +69 31 training.batch_size 1.0 +69 32 model.output_channels 46.0 +69 32 model.input_dropout 0.3422992945987695 +69 32 model.output_dropout 0.021065470587225132 +69 32 model.feature_map_dropout 0.3769152822554099 +69 32 model.embedding_dim 1.0 +69 32 loss.margin 9.933785473809467 +69 32 loss.adversarial_temperature 0.28636264928888044 +69 32 negative_sampler.num_negs_per_pos 1.0 +69 32 training.batch_size 2.0 +69 33 model.output_channels 60.0 +69 33 model.input_dropout 0.24039893500636267 +69 33 model.output_dropout 0.45998693608491475 +69 33 model.feature_map_dropout 0.20662434014862252 +69 33 model.embedding_dim 0.0 +69 33 loss.margin 29.67979263372318 +69 33 loss.adversarial_temperature 0.8289655570282894 +69 33 negative_sampler.num_negs_per_pos 44.0 +69 33 training.batch_size 0.0 +69 34 model.output_channels 35.0 +69 34 model.input_dropout 0.43108685054455803 +69 34 model.output_dropout 0.1721356008649303 +69 34 model.feature_map_dropout 0.4095550967433837 +69 34 model.embedding_dim 1.0 +69 34 loss.margin 20.69139773641489 +69 34 loss.adversarial_temperature 0.8835466150951088 +69 34 negative_sampler.num_negs_per_pos 0.0 +69 34 training.batch_size 0.0 +69 35 model.output_channels 34.0 +69 35 model.input_dropout 0.3152061671939604 +69 35 model.output_dropout 0.2991505256926777 +69 35 model.feature_map_dropout 0.4476852445286637 +69 35 model.embedding_dim 0.0 +69 35 loss.margin 27.832042217657744 +69 35 loss.adversarial_temperature 0.6513152852016596 +69 35 negative_sampler.num_negs_per_pos 30.0 +69 35 training.batch_size 1.0 +69 36 model.output_channels 53.0 +69 36 model.input_dropout 0.33315011736675676 +69 36 model.output_dropout 0.41306523989112987 +69 36 model.feature_map_dropout 0.15561861656240966 +69 36 model.embedding_dim 0.0 +69 36 loss.margin 15.057997126354651 +69 36 loss.adversarial_temperature 0.24360327673140003 +69 36 negative_sampler.num_negs_per_pos 82.0 +69 36 training.batch_size 0.0 +69 37 model.output_channels 19.0 +69 37 model.input_dropout 0.030649440532110717 +69 37 model.output_dropout 0.47099315652143764 +69 37 model.feature_map_dropout 0.40259114488338615 +69 37 model.embedding_dim 1.0 +69 37 loss.margin 28.18067483459225 +69 37 loss.adversarial_temperature 0.6087684841872257 +69 37 negative_sampler.num_negs_per_pos 46.0 +69 37 training.batch_size 1.0 +69 38 model.output_channels 40.0 +69 38 model.input_dropout 0.40064355175681615 +69 38 model.output_dropout 0.26486984552163695 +69 38 model.feature_map_dropout 0.4374873606452479 +69 38 model.embedding_dim 1.0 +69 38 loss.margin 6.076628186880398 +69 38 loss.adversarial_temperature 0.44043903273024654 +69 38 negative_sampler.num_negs_per_pos 34.0 +69 38 training.batch_size 0.0 +69 39 model.output_channels 29.0 +69 39 model.input_dropout 0.008362405500078551 +69 39 model.output_dropout 0.1569857623231734 +69 39 model.feature_map_dropout 0.3387478151068157 +69 39 model.embedding_dim 2.0 +69 39 loss.margin 7.5956524703596155 +69 39 loss.adversarial_temperature 0.6901877153783124 +69 39 negative_sampler.num_negs_per_pos 60.0 +69 39 training.batch_size 1.0 +69 40 model.output_channels 56.0 +69 40 model.input_dropout 0.4532382343387826 +69 40 model.output_dropout 0.02130214806009645 +69 40 model.feature_map_dropout 0.27878088631415865 +69 40 model.embedding_dim 1.0 +69 40 loss.margin 18.467817101704348 +69 40 loss.adversarial_temperature 0.7191701827377486 +69 40 negative_sampler.num_negs_per_pos 90.0 +69 40 training.batch_size 2.0 +69 41 model.output_channels 47.0 +69 41 model.input_dropout 0.22469026081759702 +69 41 model.output_dropout 0.3301892116851303 +69 41 model.feature_map_dropout 0.16769847755704215 +69 41 model.embedding_dim 0.0 +69 41 loss.margin 4.561450468345534 +69 41 loss.adversarial_temperature 0.21020393672834584 +69 41 negative_sampler.num_negs_per_pos 37.0 +69 41 training.batch_size 2.0 +69 42 model.output_channels 20.0 +69 42 model.input_dropout 0.40772756545933014 +69 42 model.output_dropout 0.19459847429906169 +69 42 model.feature_map_dropout 0.37718161374812903 +69 42 model.embedding_dim 1.0 +69 42 loss.margin 29.431934103700588 +69 42 loss.adversarial_temperature 0.49283819654922034 +69 42 negative_sampler.num_negs_per_pos 52.0 +69 42 training.batch_size 0.0 +69 43 model.output_channels 24.0 +69 43 model.input_dropout 0.027662460813981993 +69 43 model.output_dropout 0.3626450743329154 +69 43 model.feature_map_dropout 0.03596565999912671 +69 43 model.embedding_dim 2.0 +69 43 loss.margin 10.559016376242452 +69 43 loss.adversarial_temperature 0.294121969374154 +69 43 negative_sampler.num_negs_per_pos 8.0 +69 43 training.batch_size 0.0 +69 44 model.output_channels 28.0 +69 44 model.input_dropout 0.11293040643174773 +69 44 model.output_dropout 0.22458372337036686 +69 44 model.feature_map_dropout 0.17169449843101803 +69 44 model.embedding_dim 1.0 +69 44 loss.margin 24.518004104875285 +69 44 loss.adversarial_temperature 0.5425902810868732 +69 44 negative_sampler.num_negs_per_pos 15.0 +69 44 training.batch_size 0.0 +69 45 model.output_channels 21.0 +69 45 model.input_dropout 0.25115758237771996 +69 45 model.output_dropout 0.28306591045088914 +69 45 model.feature_map_dropout 0.48275662289051163 +69 45 model.embedding_dim 2.0 +69 45 loss.margin 17.55856453866523 +69 45 loss.adversarial_temperature 0.49290170706575553 +69 45 negative_sampler.num_negs_per_pos 15.0 +69 45 training.batch_size 2.0 +69 46 model.output_channels 31.0 +69 46 model.input_dropout 0.3909556497401969 +69 46 model.output_dropout 0.44384223352505214 +69 46 model.feature_map_dropout 0.3468201015600907 +69 46 model.embedding_dim 1.0 +69 46 loss.margin 9.012483414504967 +69 46 loss.adversarial_temperature 0.7565998620678112 +69 46 negative_sampler.num_negs_per_pos 39.0 +69 46 training.batch_size 2.0 +69 47 model.output_channels 63.0 +69 47 model.input_dropout 0.19996871560116208 +69 47 model.output_dropout 0.41918465894991674 +69 47 model.feature_map_dropout 0.190929588224929 +69 47 model.embedding_dim 0.0 +69 47 loss.margin 16.28146677231208 +69 47 loss.adversarial_temperature 0.4441309064805028 +69 47 negative_sampler.num_negs_per_pos 71.0 +69 47 training.batch_size 2.0 +69 48 model.output_channels 36.0 +69 48 model.input_dropout 0.41658356209994357 +69 48 model.output_dropout 0.07397239436627434 +69 48 model.feature_map_dropout 0.40873923880300334 +69 48 model.embedding_dim 0.0 +69 48 loss.margin 20.559759964357067 +69 48 loss.adversarial_temperature 0.9928825398634913 +69 48 negative_sampler.num_negs_per_pos 52.0 +69 48 training.batch_size 1.0 +69 49 model.output_channels 31.0 +69 49 model.input_dropout 0.25083234354490286 +69 49 model.output_dropout 0.3296321370761437 +69 49 model.feature_map_dropout 0.19695973656863486 +69 49 model.embedding_dim 1.0 +69 49 loss.margin 1.3090278918189089 +69 49 loss.adversarial_temperature 0.7327813565618099 +69 49 negative_sampler.num_negs_per_pos 43.0 +69 49 training.batch_size 2.0 +69 50 model.output_channels 47.0 +69 50 model.input_dropout 0.11780495949017733 +69 50 model.output_dropout 0.18775938455076607 +69 50 model.feature_map_dropout 0.21245125104031298 +69 50 model.embedding_dim 2.0 +69 50 loss.margin 23.486258579507382 +69 50 loss.adversarial_temperature 0.9777594074582163 +69 50 negative_sampler.num_negs_per_pos 19.0 +69 50 training.batch_size 0.0 +69 51 model.output_channels 45.0 +69 51 model.input_dropout 0.3246411263668859 +69 51 model.output_dropout 0.1632196434145904 +69 51 model.feature_map_dropout 0.42519348977474547 +69 51 model.embedding_dim 1.0 +69 51 loss.margin 25.657140721525366 +69 51 loss.adversarial_temperature 0.8041377440907185 +69 51 negative_sampler.num_negs_per_pos 61.0 +69 51 training.batch_size 1.0 +69 52 model.output_channels 29.0 +69 52 model.input_dropout 0.08736463296673813 +69 52 model.output_dropout 0.4034651943525933 +69 52 model.feature_map_dropout 0.20300103832743727 +69 52 model.embedding_dim 2.0 +69 52 loss.margin 23.475683657548135 +69 52 loss.adversarial_temperature 0.16314579643899008 +69 52 negative_sampler.num_negs_per_pos 39.0 +69 52 training.batch_size 2.0 +69 53 model.output_channels 58.0 +69 53 model.input_dropout 0.29977759337802534 +69 53 model.output_dropout 0.4143366030597655 +69 53 model.feature_map_dropout 0.47623301522175826 +69 53 model.embedding_dim 1.0 +69 53 loss.margin 17.796848535964113 +69 53 loss.adversarial_temperature 0.8460359564376011 +69 53 negative_sampler.num_negs_per_pos 8.0 +69 53 training.batch_size 1.0 +69 54 model.output_channels 24.0 +69 54 model.input_dropout 0.41126711237124824 +69 54 model.output_dropout 0.20256714791371377 +69 54 model.feature_map_dropout 0.05034492503190502 +69 54 model.embedding_dim 0.0 +69 54 loss.margin 8.473273828976744 +69 54 loss.adversarial_temperature 0.8295197259652578 +69 54 negative_sampler.num_negs_per_pos 31.0 +69 54 training.batch_size 0.0 +69 55 model.output_channels 40.0 +69 55 model.input_dropout 0.47233165053958387 +69 55 model.output_dropout 0.2803095036659577 +69 55 model.feature_map_dropout 0.3166709758206479 +69 55 model.embedding_dim 2.0 +69 55 loss.margin 18.063467747099743 +69 55 loss.adversarial_temperature 0.9110874333458184 +69 55 negative_sampler.num_negs_per_pos 47.0 +69 55 training.batch_size 0.0 +69 56 model.output_channels 47.0 +69 56 model.input_dropout 0.006984177790463675 +69 56 model.output_dropout 0.006591557388392899 +69 56 model.feature_map_dropout 0.047189228651402626 +69 56 model.embedding_dim 2.0 +69 56 loss.margin 16.495285534761962 +69 56 loss.adversarial_temperature 0.927974561579944 +69 56 negative_sampler.num_negs_per_pos 91.0 +69 56 training.batch_size 1.0 +69 57 model.output_channels 39.0 +69 57 model.input_dropout 0.16534380084642064 +69 57 model.output_dropout 0.4520200100729564 +69 57 model.feature_map_dropout 0.34967513441339315 +69 57 model.embedding_dim 2.0 +69 57 loss.margin 28.524455172904236 +69 57 loss.adversarial_temperature 0.13773543989423426 +69 57 negative_sampler.num_negs_per_pos 68.0 +69 57 training.batch_size 1.0 +69 58 model.output_channels 57.0 +69 58 model.input_dropout 0.05303279695308749 +69 58 model.output_dropout 0.34373268638558585 +69 58 model.feature_map_dropout 0.03890970727911902 +69 58 model.embedding_dim 2.0 +69 58 loss.margin 25.240539763226934 +69 58 loss.adversarial_temperature 0.614101431942396 +69 58 negative_sampler.num_negs_per_pos 1.0 +69 58 training.batch_size 1.0 +69 59 model.output_channels 22.0 +69 59 model.input_dropout 0.24332807578578813 +69 59 model.output_dropout 0.22969081445910483 +69 59 model.feature_map_dropout 0.30112740034591545 +69 59 model.embedding_dim 2.0 +69 59 loss.margin 14.242156726218484 +69 59 loss.adversarial_temperature 0.7089892003132883 +69 59 negative_sampler.num_negs_per_pos 98.0 +69 59 training.batch_size 1.0 +69 60 model.output_channels 34.0 +69 60 model.input_dropout 0.10834839540282531 +69 60 model.output_dropout 0.3197865767565106 +69 60 model.feature_map_dropout 0.32733370335944845 +69 60 model.embedding_dim 1.0 +69 60 loss.margin 28.658248914212926 +69 60 loss.adversarial_temperature 0.4831325912216097 +69 60 negative_sampler.num_negs_per_pos 0.0 +69 60 training.batch_size 1.0 +69 61 model.output_channels 41.0 +69 61 model.input_dropout 0.24948727170237478 +69 61 model.output_dropout 0.07721415946039006 +69 61 model.feature_map_dropout 0.49249930901506295 +69 61 model.embedding_dim 1.0 +69 61 loss.margin 3.006848280878753 +69 61 loss.adversarial_temperature 0.7711290924373585 +69 61 negative_sampler.num_negs_per_pos 2.0 +69 61 training.batch_size 0.0 +69 62 model.output_channels 64.0 +69 62 model.input_dropout 0.020648008919226746 +69 62 model.output_dropout 0.39608335318979215 +69 62 model.feature_map_dropout 0.39075054403378273 +69 62 model.embedding_dim 2.0 +69 62 loss.margin 4.313242285405803 +69 62 loss.adversarial_temperature 0.5794895354421489 +69 62 negative_sampler.num_negs_per_pos 70.0 +69 62 training.batch_size 0.0 +69 63 model.output_channels 60.0 +69 63 model.input_dropout 0.0674420165083458 +69 63 model.output_dropout 0.33337177916647526 +69 63 model.feature_map_dropout 0.42761471892985997 +69 63 model.embedding_dim 1.0 +69 63 loss.margin 12.195136184563149 +69 63 loss.adversarial_temperature 0.618469624505973 +69 63 negative_sampler.num_negs_per_pos 81.0 +69 63 training.batch_size 2.0 +69 64 model.output_channels 52.0 +69 64 model.input_dropout 0.0858726466058124 +69 64 model.output_dropout 0.12731335812147426 +69 64 model.feature_map_dropout 0.12033800525366906 +69 64 model.embedding_dim 0.0 +69 64 loss.margin 28.727191384717774 +69 64 loss.adversarial_temperature 0.9186245022571221 +69 64 negative_sampler.num_negs_per_pos 31.0 +69 64 training.batch_size 2.0 +69 65 model.output_channels 22.0 +69 65 model.input_dropout 0.4679601812702997 +69 65 model.output_dropout 0.37260531967802185 +69 65 model.feature_map_dropout 0.3521080054390681 +69 65 model.embedding_dim 1.0 +69 65 loss.margin 1.916204099521138 +69 65 loss.adversarial_temperature 0.5647676687553761 +69 65 negative_sampler.num_negs_per_pos 41.0 +69 65 training.batch_size 1.0 +69 66 model.output_channels 42.0 +69 66 model.input_dropout 0.4232765608033128 +69 66 model.output_dropout 0.3877161070004306 +69 66 model.feature_map_dropout 0.1872692005335551 +69 66 model.embedding_dim 0.0 +69 66 loss.margin 9.1633269514281 +69 66 loss.adversarial_temperature 0.14572348012666653 +69 66 negative_sampler.num_negs_per_pos 94.0 +69 66 training.batch_size 2.0 +69 67 model.output_channels 26.0 +69 67 model.input_dropout 0.4082743638173051 +69 67 model.output_dropout 0.009626758512293887 +69 67 model.feature_map_dropout 0.2813816056123582 +69 67 model.embedding_dim 2.0 +69 67 loss.margin 19.96750617235878 +69 67 loss.adversarial_temperature 0.37879946047635604 +69 67 negative_sampler.num_negs_per_pos 6.0 +69 67 training.batch_size 0.0 +69 68 model.output_channels 34.0 +69 68 model.input_dropout 0.0559174217147545 +69 68 model.output_dropout 0.13531597515543747 +69 68 model.feature_map_dropout 0.43349889541736514 +69 68 model.embedding_dim 0.0 +69 68 loss.margin 24.128328246172654 +69 68 loss.adversarial_temperature 0.27477551806740097 +69 68 negative_sampler.num_negs_per_pos 59.0 +69 68 training.batch_size 1.0 +69 69 model.output_channels 32.0 +69 69 model.input_dropout 0.4519730480284708 +69 69 model.output_dropout 0.15944723749320155 +69 69 model.feature_map_dropout 0.10509080014346128 +69 69 model.embedding_dim 0.0 +69 69 loss.margin 29.47973289020249 +69 69 loss.adversarial_temperature 0.7602746428333008 +69 69 negative_sampler.num_negs_per_pos 88.0 +69 69 training.batch_size 2.0 +69 70 model.output_channels 58.0 +69 70 model.input_dropout 0.3022403830792157 +69 70 model.output_dropout 0.37077925155692637 +69 70 model.feature_map_dropout 0.21642538521069155 +69 70 model.embedding_dim 1.0 +69 70 loss.margin 22.477069455965317 +69 70 loss.adversarial_temperature 0.39913971805797244 +69 70 negative_sampler.num_negs_per_pos 24.0 +69 70 training.batch_size 0.0 +69 71 model.output_channels 16.0 +69 71 model.input_dropout 0.48687200645287093 +69 71 model.output_dropout 0.2908680466423948 +69 71 model.feature_map_dropout 0.4131340475278493 +69 71 model.embedding_dim 0.0 +69 71 loss.margin 23.04802324862063 +69 71 loss.adversarial_temperature 0.6548709818006259 +69 71 negative_sampler.num_negs_per_pos 49.0 +69 71 training.batch_size 2.0 +69 72 model.output_channels 20.0 +69 72 model.input_dropout 0.046608567427202596 +69 72 model.output_dropout 0.0893158678139081 +69 72 model.feature_map_dropout 0.3906692016426993 +69 72 model.embedding_dim 1.0 +69 72 loss.margin 15.18916020501867 +69 72 loss.adversarial_temperature 0.601623762786317 +69 72 negative_sampler.num_negs_per_pos 49.0 +69 72 training.batch_size 0.0 +69 73 model.output_channels 45.0 +69 73 model.input_dropout 0.3345217129099683 +69 73 model.output_dropout 0.058391836971426025 +69 73 model.feature_map_dropout 0.19359897974672746 +69 73 model.embedding_dim 0.0 +69 73 loss.margin 13.108690091650992 +69 73 loss.adversarial_temperature 0.6683724655423404 +69 73 negative_sampler.num_negs_per_pos 67.0 +69 73 training.batch_size 1.0 +69 74 model.output_channels 62.0 +69 74 model.input_dropout 0.07085564222045349 +69 74 model.output_dropout 0.04292843959555931 +69 74 model.feature_map_dropout 0.03236642353103125 +69 74 model.embedding_dim 0.0 +69 74 loss.margin 25.039980915432928 +69 74 loss.adversarial_temperature 0.814338862178433 +69 74 negative_sampler.num_negs_per_pos 89.0 +69 74 training.batch_size 0.0 +69 75 model.output_channels 38.0 +69 75 model.input_dropout 0.4252232295167515 +69 75 model.output_dropout 0.12131516856786578 +69 75 model.feature_map_dropout 0.4934833823650671 +69 75 model.embedding_dim 1.0 +69 75 loss.margin 6.201413237771342 +69 75 loss.adversarial_temperature 0.7179381458955375 +69 75 negative_sampler.num_negs_per_pos 2.0 +69 75 training.batch_size 0.0 +69 76 model.output_channels 39.0 +69 76 model.input_dropout 0.21053330755018262 +69 76 model.output_dropout 0.07368810582035679 +69 76 model.feature_map_dropout 0.352350486215181 +69 76 model.embedding_dim 2.0 +69 76 loss.margin 9.467120473115397 +69 76 loss.adversarial_temperature 0.7692414753438336 +69 76 negative_sampler.num_negs_per_pos 67.0 +69 76 training.batch_size 2.0 +69 77 model.output_channels 34.0 +69 77 model.input_dropout 0.10686834328910921 +69 77 model.output_dropout 0.0262282693465532 +69 77 model.feature_map_dropout 0.47518797949487096 +69 77 model.embedding_dim 0.0 +69 77 loss.margin 6.283055011476395 +69 77 loss.adversarial_temperature 0.8284465963539288 +69 77 negative_sampler.num_negs_per_pos 48.0 +69 77 training.batch_size 0.0 +69 78 model.output_channels 49.0 +69 78 model.input_dropout 0.33071116188974137 +69 78 model.output_dropout 0.2488023813143284 +69 78 model.feature_map_dropout 0.1439357416309101 +69 78 model.embedding_dim 0.0 +69 78 loss.margin 10.877270670498113 +69 78 loss.adversarial_temperature 0.465970690191416 +69 78 negative_sampler.num_negs_per_pos 18.0 +69 78 training.batch_size 1.0 +69 79 model.output_channels 49.0 +69 79 model.input_dropout 0.37829430684324017 +69 79 model.output_dropout 0.18837467436029276 +69 79 model.feature_map_dropout 0.24560055567229921 +69 79 model.embedding_dim 0.0 +69 79 loss.margin 11.683021167962531 +69 79 loss.adversarial_temperature 0.2188127644022334 +69 79 negative_sampler.num_negs_per_pos 45.0 +69 79 training.batch_size 0.0 +69 80 model.output_channels 36.0 +69 80 model.input_dropout 0.1903518880214618 +69 80 model.output_dropout 0.44532049079240227 +69 80 model.feature_map_dropout 0.1285549866613871 +69 80 model.embedding_dim 1.0 +69 80 loss.margin 19.421171199799794 +69 80 loss.adversarial_temperature 0.4165656305986011 +69 80 negative_sampler.num_negs_per_pos 37.0 +69 80 training.batch_size 0.0 +69 81 model.output_channels 19.0 +69 81 model.input_dropout 0.393183420859265 +69 81 model.output_dropout 0.05253867822967401 +69 81 model.feature_map_dropout 0.23949366321140786 +69 81 model.embedding_dim 1.0 +69 81 loss.margin 25.678525350202822 +69 81 loss.adversarial_temperature 0.29546120155777544 +69 81 negative_sampler.num_negs_per_pos 3.0 +69 81 training.batch_size 1.0 +69 82 model.output_channels 41.0 +69 82 model.input_dropout 0.0805330283585905 +69 82 model.output_dropout 0.23361016610437502 +69 82 model.feature_map_dropout 0.3532192476838789 +69 82 model.embedding_dim 0.0 +69 82 loss.margin 15.344438791766803 +69 82 loss.adversarial_temperature 0.3537455233668918 +69 82 negative_sampler.num_negs_per_pos 1.0 +69 82 training.batch_size 2.0 +69 83 model.output_channels 37.0 +69 83 model.input_dropout 0.3772622063633648 +69 83 model.output_dropout 0.2729176896481793 +69 83 model.feature_map_dropout 0.269909338292288 +69 83 model.embedding_dim 0.0 +69 83 loss.margin 19.862903876057743 +69 83 loss.adversarial_temperature 0.4570324931338927 +69 83 negative_sampler.num_negs_per_pos 93.0 +69 83 training.batch_size 0.0 +69 84 model.output_channels 25.0 +69 84 model.input_dropout 0.16954664715843965 +69 84 model.output_dropout 0.0373024783335153 +69 84 model.feature_map_dropout 0.48092585181393793 +69 84 model.embedding_dim 1.0 +69 84 loss.margin 27.125372304954695 +69 84 loss.adversarial_temperature 0.18055492883526614 +69 84 negative_sampler.num_negs_per_pos 23.0 +69 84 training.batch_size 0.0 +69 85 model.output_channels 30.0 +69 85 model.input_dropout 0.45768786467409245 +69 85 model.output_dropout 0.42295403348072885 +69 85 model.feature_map_dropout 0.3593883208086576 +69 85 model.embedding_dim 1.0 +69 85 loss.margin 11.468525020293626 +69 85 loss.adversarial_temperature 0.9469979226397132 +69 85 negative_sampler.num_negs_per_pos 55.0 +69 85 training.batch_size 2.0 +69 86 model.output_channels 44.0 +69 86 model.input_dropout 0.28072281102089863 +69 86 model.output_dropout 0.49509495122616665 +69 86 model.feature_map_dropout 0.2695114193407048 +69 86 model.embedding_dim 1.0 +69 86 loss.margin 22.046013715080626 +69 86 loss.adversarial_temperature 0.4105867590150498 +69 86 negative_sampler.num_negs_per_pos 73.0 +69 86 training.batch_size 1.0 +69 87 model.output_channels 23.0 +69 87 model.input_dropout 0.09077301117639608 +69 87 model.output_dropout 0.48280371088955754 +69 87 model.feature_map_dropout 0.0652581358862509 +69 87 model.embedding_dim 0.0 +69 87 loss.margin 17.281313164187466 +69 87 loss.adversarial_temperature 0.17115011939287078 +69 87 negative_sampler.num_negs_per_pos 64.0 +69 87 training.batch_size 0.0 +69 88 model.output_channels 46.0 +69 88 model.input_dropout 0.45168705785755836 +69 88 model.output_dropout 0.34787471891832544 +69 88 model.feature_map_dropout 0.32273079723007037 +69 88 model.embedding_dim 2.0 +69 88 loss.margin 16.145112317361363 +69 88 loss.adversarial_temperature 0.8952835657285609 +69 88 negative_sampler.num_negs_per_pos 52.0 +69 88 training.batch_size 2.0 +69 89 model.output_channels 62.0 +69 89 model.input_dropout 0.3465050023613877 +69 89 model.output_dropout 0.3867054188374029 +69 89 model.feature_map_dropout 0.10065278384476606 +69 89 model.embedding_dim 1.0 +69 89 loss.margin 9.807470297278023 +69 89 loss.adversarial_temperature 0.14243306107740744 +69 89 negative_sampler.num_negs_per_pos 44.0 +69 89 training.batch_size 0.0 +69 90 model.output_channels 48.0 +69 90 model.input_dropout 0.4749787430065761 +69 90 model.output_dropout 0.42255407627725594 +69 90 model.feature_map_dropout 0.47624946164048687 +69 90 model.embedding_dim 1.0 +69 90 loss.margin 13.887372947070888 +69 90 loss.adversarial_temperature 0.3569833845972359 +69 90 negative_sampler.num_negs_per_pos 31.0 +69 90 training.batch_size 0.0 +69 91 model.output_channels 62.0 +69 91 model.input_dropout 0.2811031359175886 +69 91 model.output_dropout 0.3560535506227817 +69 91 model.feature_map_dropout 0.15044558599542746 +69 91 model.embedding_dim 0.0 +69 91 loss.margin 7.889216547695796 +69 91 loss.adversarial_temperature 0.21567455976042982 +69 91 negative_sampler.num_negs_per_pos 75.0 +69 91 training.batch_size 1.0 +69 92 model.output_channels 39.0 +69 92 model.input_dropout 0.22384283844780328 +69 92 model.output_dropout 0.3612956314425663 +69 92 model.feature_map_dropout 0.02907077216977716 +69 92 model.embedding_dim 1.0 +69 92 loss.margin 22.88685410865454 +69 92 loss.adversarial_temperature 0.8301741675607949 +69 92 negative_sampler.num_negs_per_pos 58.0 +69 92 training.batch_size 0.0 +69 93 model.output_channels 46.0 +69 93 model.input_dropout 0.28991441236189863 +69 93 model.output_dropout 0.12601313688126037 +69 93 model.feature_map_dropout 0.13471937756855268 +69 93 model.embedding_dim 1.0 +69 93 loss.margin 8.5173527247609 +69 93 loss.adversarial_temperature 0.29550382184002544 +69 93 negative_sampler.num_negs_per_pos 1.0 +69 93 training.batch_size 1.0 +69 94 model.output_channels 48.0 +69 94 model.input_dropout 0.0029170443135788093 +69 94 model.output_dropout 0.37759894928831983 +69 94 model.feature_map_dropout 0.3528650712559649 +69 94 model.embedding_dim 2.0 +69 94 loss.margin 26.885507853080586 +69 94 loss.adversarial_temperature 0.7312056117401337 +69 94 negative_sampler.num_negs_per_pos 26.0 +69 94 training.batch_size 2.0 +69 95 model.output_channels 57.0 +69 95 model.input_dropout 0.09030219495084901 +69 95 model.output_dropout 0.4329708103709056 +69 95 model.feature_map_dropout 0.035223900718844625 +69 95 model.embedding_dim 1.0 +69 95 loss.margin 12.994210048763234 +69 95 loss.adversarial_temperature 0.48068700858299074 +69 95 negative_sampler.num_negs_per_pos 66.0 +69 95 training.batch_size 2.0 +69 96 model.output_channels 57.0 +69 96 model.input_dropout 0.16314082395723634 +69 96 model.output_dropout 0.13381931118510326 +69 96 model.feature_map_dropout 0.228929143908436 +69 96 model.embedding_dim 0.0 +69 96 loss.margin 21.2973189248062 +69 96 loss.adversarial_temperature 0.1814167761414353 +69 96 negative_sampler.num_negs_per_pos 75.0 +69 96 training.batch_size 2.0 +69 97 model.output_channels 18.0 +69 97 model.input_dropout 0.3246327141096136 +69 97 model.output_dropout 0.1270629367673093 +69 97 model.feature_map_dropout 0.4231993949686396 +69 97 model.embedding_dim 0.0 +69 97 loss.margin 24.37859101221804 +69 97 loss.adversarial_temperature 0.2132548139485847 +69 97 negative_sampler.num_negs_per_pos 33.0 +69 97 training.batch_size 1.0 +69 98 model.output_channels 45.0 +69 98 model.input_dropout 0.27019463388500164 +69 98 model.output_dropout 0.11891323979689866 +69 98 model.feature_map_dropout 0.3088914210584645 +69 98 model.embedding_dim 1.0 +69 98 loss.margin 8.218648304289509 +69 98 loss.adversarial_temperature 0.592127164465079 +69 98 negative_sampler.num_negs_per_pos 66.0 +69 98 training.batch_size 0.0 +69 99 model.output_channels 25.0 +69 99 model.input_dropout 0.45828058257353416 +69 99 model.output_dropout 0.28839845743465625 +69 99 model.feature_map_dropout 0.2011827896054154 +69 99 model.embedding_dim 2.0 +69 99 loss.margin 27.1259527078071 +69 99 loss.adversarial_temperature 0.33270061111983623 +69 99 negative_sampler.num_negs_per_pos 93.0 +69 99 training.batch_size 0.0 +69 100 model.output_channels 26.0 +69 100 model.input_dropout 0.30289301819820386 +69 100 model.output_dropout 0.39845370841643096 +69 100 model.feature_map_dropout 0.47174834916723085 +69 100 model.embedding_dim 0.0 +69 100 loss.margin 21.035804868101057 +69 100 loss.adversarial_temperature 0.6031197901472635 +69 100 negative_sampler.num_negs_per_pos 76.0 +69 100 training.batch_size 0.0 +69 1 dataset """kinships""" +69 1 model """conve""" +69 1 loss """nssa""" +69 1 regularizer """no""" +69 1 optimizer """adadelta""" +69 1 training_loop """owa""" +69 1 negative_sampler """basic""" +69 1 evaluator """rankbased""" +69 2 dataset """kinships""" +69 2 model """conve""" +69 2 loss """nssa""" +69 2 regularizer """no""" +69 2 optimizer """adadelta""" +69 2 training_loop """owa""" +69 2 negative_sampler """basic""" +69 2 evaluator """rankbased""" +69 3 dataset """kinships""" +69 3 model """conve""" +69 3 loss """nssa""" +69 3 regularizer """no""" +69 3 optimizer """adadelta""" +69 3 training_loop """owa""" +69 3 negative_sampler """basic""" +69 3 evaluator """rankbased""" +69 4 dataset """kinships""" +69 4 model """conve""" +69 4 loss """nssa""" +69 4 regularizer """no""" +69 4 optimizer """adadelta""" +69 4 training_loop """owa""" +69 4 negative_sampler """basic""" +69 4 evaluator """rankbased""" +69 5 dataset """kinships""" +69 5 model """conve""" +69 5 loss """nssa""" +69 5 regularizer """no""" +69 5 optimizer """adadelta""" +69 5 training_loop """owa""" +69 5 negative_sampler """basic""" +69 5 evaluator """rankbased""" +69 6 dataset """kinships""" +69 6 model """conve""" +69 6 loss """nssa""" +69 6 regularizer """no""" +69 6 optimizer """adadelta""" +69 6 training_loop """owa""" +69 6 negative_sampler """basic""" +69 6 evaluator """rankbased""" +69 7 dataset """kinships""" +69 7 model """conve""" +69 7 loss """nssa""" +69 7 regularizer """no""" +69 7 optimizer """adadelta""" +69 7 training_loop """owa""" +69 7 negative_sampler """basic""" +69 7 evaluator """rankbased""" +69 8 dataset """kinships""" +69 8 model """conve""" +69 8 loss """nssa""" +69 8 regularizer """no""" +69 8 optimizer """adadelta""" +69 8 training_loop """owa""" +69 8 negative_sampler """basic""" +69 8 evaluator """rankbased""" +69 9 dataset """kinships""" +69 9 model """conve""" +69 9 loss """nssa""" +69 9 regularizer """no""" +69 9 optimizer """adadelta""" +69 9 training_loop """owa""" +69 9 negative_sampler """basic""" +69 9 evaluator """rankbased""" +69 10 dataset """kinships""" +69 10 model """conve""" +69 10 loss """nssa""" +69 10 regularizer """no""" +69 10 optimizer """adadelta""" +69 10 training_loop """owa""" +69 10 negative_sampler """basic""" +69 10 evaluator """rankbased""" +69 11 dataset """kinships""" +69 11 model """conve""" +69 11 loss """nssa""" +69 11 regularizer """no""" +69 11 optimizer """adadelta""" +69 11 training_loop """owa""" +69 11 negative_sampler """basic""" +69 11 evaluator """rankbased""" +69 12 dataset """kinships""" +69 12 model """conve""" +69 12 loss """nssa""" +69 12 regularizer """no""" +69 12 optimizer """adadelta""" +69 12 training_loop """owa""" +69 12 negative_sampler """basic""" +69 12 evaluator """rankbased""" +69 13 dataset """kinships""" +69 13 model """conve""" +69 13 loss """nssa""" +69 13 regularizer """no""" +69 13 optimizer """adadelta""" +69 13 training_loop """owa""" +69 13 negative_sampler """basic""" +69 13 evaluator """rankbased""" +69 14 dataset """kinships""" +69 14 model """conve""" +69 14 loss """nssa""" +69 14 regularizer """no""" +69 14 optimizer """adadelta""" +69 14 training_loop """owa""" +69 14 negative_sampler """basic""" +69 14 evaluator """rankbased""" +69 15 dataset """kinships""" +69 15 model """conve""" +69 15 loss """nssa""" +69 15 regularizer """no""" +69 15 optimizer """adadelta""" +69 15 training_loop """owa""" +69 15 negative_sampler """basic""" +69 15 evaluator """rankbased""" +69 16 dataset """kinships""" +69 16 model """conve""" +69 16 loss """nssa""" +69 16 regularizer """no""" +69 16 optimizer """adadelta""" +69 16 training_loop """owa""" +69 16 negative_sampler """basic""" +69 16 evaluator """rankbased""" +69 17 dataset """kinships""" +69 17 model """conve""" +69 17 loss """nssa""" +69 17 regularizer """no""" +69 17 optimizer """adadelta""" +69 17 training_loop """owa""" +69 17 negative_sampler """basic""" +69 17 evaluator """rankbased""" +69 18 dataset """kinships""" +69 18 model """conve""" +69 18 loss """nssa""" +69 18 regularizer """no""" +69 18 optimizer """adadelta""" +69 18 training_loop """owa""" +69 18 negative_sampler """basic""" +69 18 evaluator """rankbased""" +69 19 dataset """kinships""" +69 19 model """conve""" +69 19 loss """nssa""" +69 19 regularizer """no""" +69 19 optimizer """adadelta""" +69 19 training_loop """owa""" +69 19 negative_sampler """basic""" +69 19 evaluator """rankbased""" +69 20 dataset """kinships""" +69 20 model """conve""" +69 20 loss """nssa""" +69 20 regularizer """no""" +69 20 optimizer """adadelta""" +69 20 training_loop """owa""" +69 20 negative_sampler """basic""" +69 20 evaluator """rankbased""" +69 21 dataset """kinships""" +69 21 model """conve""" +69 21 loss """nssa""" +69 21 regularizer """no""" +69 21 optimizer """adadelta""" +69 21 training_loop """owa""" +69 21 negative_sampler """basic""" +69 21 evaluator """rankbased""" +69 22 dataset """kinships""" +69 22 model """conve""" +69 22 loss """nssa""" +69 22 regularizer """no""" +69 22 optimizer """adadelta""" +69 22 training_loop """owa""" +69 22 negative_sampler """basic""" +69 22 evaluator """rankbased""" +69 23 dataset """kinships""" +69 23 model """conve""" +69 23 loss """nssa""" +69 23 regularizer """no""" +69 23 optimizer """adadelta""" +69 23 training_loop """owa""" +69 23 negative_sampler """basic""" +69 23 evaluator """rankbased""" +69 24 dataset """kinships""" +69 24 model """conve""" +69 24 loss """nssa""" +69 24 regularizer """no""" +69 24 optimizer """adadelta""" +69 24 training_loop """owa""" +69 24 negative_sampler """basic""" +69 24 evaluator """rankbased""" +69 25 dataset """kinships""" +69 25 model """conve""" +69 25 loss """nssa""" +69 25 regularizer """no""" +69 25 optimizer """adadelta""" +69 25 training_loop """owa""" +69 25 negative_sampler """basic""" +69 25 evaluator """rankbased""" +69 26 dataset """kinships""" +69 26 model """conve""" +69 26 loss """nssa""" +69 26 regularizer """no""" +69 26 optimizer """adadelta""" +69 26 training_loop """owa""" +69 26 negative_sampler """basic""" +69 26 evaluator """rankbased""" +69 27 dataset """kinships""" +69 27 model """conve""" +69 27 loss """nssa""" +69 27 regularizer """no""" +69 27 optimizer """adadelta""" +69 27 training_loop """owa""" +69 27 negative_sampler """basic""" +69 27 evaluator """rankbased""" +69 28 dataset """kinships""" +69 28 model """conve""" +69 28 loss """nssa""" +69 28 regularizer """no""" +69 28 optimizer """adadelta""" +69 28 training_loop """owa""" +69 28 negative_sampler """basic""" +69 28 evaluator """rankbased""" +69 29 dataset """kinships""" +69 29 model """conve""" +69 29 loss """nssa""" +69 29 regularizer """no""" +69 29 optimizer """adadelta""" +69 29 training_loop """owa""" +69 29 negative_sampler """basic""" +69 29 evaluator """rankbased""" +69 30 dataset """kinships""" +69 30 model """conve""" +69 30 loss """nssa""" +69 30 regularizer """no""" +69 30 optimizer """adadelta""" +69 30 training_loop """owa""" +69 30 negative_sampler """basic""" +69 30 evaluator """rankbased""" +69 31 dataset """kinships""" +69 31 model """conve""" +69 31 loss """nssa""" +69 31 regularizer """no""" +69 31 optimizer """adadelta""" +69 31 training_loop """owa""" +69 31 negative_sampler """basic""" +69 31 evaluator """rankbased""" +69 32 dataset """kinships""" +69 32 model """conve""" +69 32 loss """nssa""" +69 32 regularizer """no""" +69 32 optimizer """adadelta""" +69 32 training_loop """owa""" +69 32 negative_sampler """basic""" +69 32 evaluator """rankbased""" +69 33 dataset """kinships""" +69 33 model """conve""" +69 33 loss """nssa""" +69 33 regularizer """no""" +69 33 optimizer """adadelta""" +69 33 training_loop """owa""" +69 33 negative_sampler """basic""" +69 33 evaluator """rankbased""" +69 34 dataset """kinships""" +69 34 model """conve""" +69 34 loss """nssa""" +69 34 regularizer """no""" +69 34 optimizer """adadelta""" +69 34 training_loop """owa""" +69 34 negative_sampler """basic""" +69 34 evaluator """rankbased""" +69 35 dataset """kinships""" +69 35 model """conve""" +69 35 loss """nssa""" +69 35 regularizer """no""" +69 35 optimizer """adadelta""" +69 35 training_loop """owa""" +69 35 negative_sampler """basic""" +69 35 evaluator """rankbased""" +69 36 dataset """kinships""" +69 36 model """conve""" +69 36 loss """nssa""" +69 36 regularizer """no""" +69 36 optimizer """adadelta""" +69 36 training_loop """owa""" +69 36 negative_sampler """basic""" +69 36 evaluator """rankbased""" +69 37 dataset """kinships""" +69 37 model """conve""" +69 37 loss """nssa""" +69 37 regularizer """no""" +69 37 optimizer """adadelta""" +69 37 training_loop """owa""" +69 37 negative_sampler """basic""" +69 37 evaluator """rankbased""" +69 38 dataset """kinships""" +69 38 model """conve""" +69 38 loss """nssa""" +69 38 regularizer """no""" +69 38 optimizer """adadelta""" +69 38 training_loop """owa""" +69 38 negative_sampler """basic""" +69 38 evaluator """rankbased""" +69 39 dataset """kinships""" +69 39 model """conve""" +69 39 loss """nssa""" +69 39 regularizer """no""" +69 39 optimizer """adadelta""" +69 39 training_loop """owa""" +69 39 negative_sampler """basic""" +69 39 evaluator """rankbased""" +69 40 dataset """kinships""" +69 40 model """conve""" +69 40 loss """nssa""" +69 40 regularizer """no""" +69 40 optimizer """adadelta""" +69 40 training_loop """owa""" +69 40 negative_sampler """basic""" +69 40 evaluator """rankbased""" +69 41 dataset """kinships""" +69 41 model """conve""" +69 41 loss """nssa""" +69 41 regularizer """no""" +69 41 optimizer """adadelta""" +69 41 training_loop """owa""" +69 41 negative_sampler """basic""" +69 41 evaluator """rankbased""" +69 42 dataset """kinships""" +69 42 model """conve""" +69 42 loss """nssa""" +69 42 regularizer """no""" +69 42 optimizer """adadelta""" +69 42 training_loop """owa""" +69 42 negative_sampler """basic""" +69 42 evaluator """rankbased""" +69 43 dataset """kinships""" +69 43 model """conve""" +69 43 loss """nssa""" +69 43 regularizer """no""" +69 43 optimizer """adadelta""" +69 43 training_loop """owa""" +69 43 negative_sampler """basic""" +69 43 evaluator """rankbased""" +69 44 dataset """kinships""" +69 44 model """conve""" +69 44 loss """nssa""" +69 44 regularizer """no""" +69 44 optimizer """adadelta""" +69 44 training_loop """owa""" +69 44 negative_sampler """basic""" +69 44 evaluator """rankbased""" +69 45 dataset """kinships""" +69 45 model """conve""" +69 45 loss """nssa""" +69 45 regularizer """no""" +69 45 optimizer """adadelta""" +69 45 training_loop """owa""" +69 45 negative_sampler """basic""" +69 45 evaluator """rankbased""" +69 46 dataset """kinships""" +69 46 model """conve""" +69 46 loss """nssa""" +69 46 regularizer """no""" +69 46 optimizer """adadelta""" +69 46 training_loop """owa""" +69 46 negative_sampler """basic""" +69 46 evaluator """rankbased""" +69 47 dataset """kinships""" +69 47 model """conve""" +69 47 loss """nssa""" +69 47 regularizer """no""" +69 47 optimizer """adadelta""" +69 47 training_loop """owa""" +69 47 negative_sampler """basic""" +69 47 evaluator """rankbased""" +69 48 dataset """kinships""" +69 48 model """conve""" +69 48 loss """nssa""" +69 48 regularizer """no""" +69 48 optimizer """adadelta""" +69 48 training_loop """owa""" +69 48 negative_sampler """basic""" +69 48 evaluator """rankbased""" +69 49 dataset """kinships""" +69 49 model """conve""" +69 49 loss """nssa""" +69 49 regularizer """no""" +69 49 optimizer """adadelta""" +69 49 training_loop """owa""" +69 49 negative_sampler """basic""" +69 49 evaluator """rankbased""" +69 50 dataset """kinships""" +69 50 model """conve""" +69 50 loss """nssa""" +69 50 regularizer """no""" +69 50 optimizer """adadelta""" +69 50 training_loop """owa""" +69 50 negative_sampler """basic""" +69 50 evaluator """rankbased""" +69 51 dataset """kinships""" +69 51 model """conve""" +69 51 loss """nssa""" +69 51 regularizer """no""" +69 51 optimizer """adadelta""" +69 51 training_loop """owa""" +69 51 negative_sampler """basic""" +69 51 evaluator """rankbased""" +69 52 dataset """kinships""" +69 52 model """conve""" +69 52 loss """nssa""" +69 52 regularizer """no""" +69 52 optimizer """adadelta""" +69 52 training_loop """owa""" +69 52 negative_sampler """basic""" +69 52 evaluator """rankbased""" +69 53 dataset """kinships""" +69 53 model """conve""" +69 53 loss """nssa""" +69 53 regularizer """no""" +69 53 optimizer """adadelta""" +69 53 training_loop """owa""" +69 53 negative_sampler """basic""" +69 53 evaluator """rankbased""" +69 54 dataset """kinships""" +69 54 model """conve""" +69 54 loss """nssa""" +69 54 regularizer """no""" +69 54 optimizer """adadelta""" +69 54 training_loop """owa""" +69 54 negative_sampler """basic""" +69 54 evaluator """rankbased""" +69 55 dataset """kinships""" +69 55 model """conve""" +69 55 loss """nssa""" +69 55 regularizer """no""" +69 55 optimizer """adadelta""" +69 55 training_loop """owa""" +69 55 negative_sampler """basic""" +69 55 evaluator """rankbased""" +69 56 dataset """kinships""" +69 56 model """conve""" +69 56 loss """nssa""" +69 56 regularizer """no""" +69 56 optimizer """adadelta""" +69 56 training_loop """owa""" +69 56 negative_sampler """basic""" +69 56 evaluator """rankbased""" +69 57 dataset """kinships""" +69 57 model """conve""" +69 57 loss """nssa""" +69 57 regularizer """no""" +69 57 optimizer """adadelta""" +69 57 training_loop """owa""" +69 57 negative_sampler """basic""" +69 57 evaluator """rankbased""" +69 58 dataset """kinships""" +69 58 model """conve""" +69 58 loss """nssa""" +69 58 regularizer """no""" +69 58 optimizer """adadelta""" +69 58 training_loop """owa""" +69 58 negative_sampler """basic""" +69 58 evaluator """rankbased""" +69 59 dataset """kinships""" +69 59 model """conve""" +69 59 loss """nssa""" +69 59 regularizer """no""" +69 59 optimizer """adadelta""" +69 59 training_loop """owa""" +69 59 negative_sampler """basic""" +69 59 evaluator """rankbased""" +69 60 dataset """kinships""" +69 60 model """conve""" +69 60 loss """nssa""" +69 60 regularizer """no""" +69 60 optimizer """adadelta""" +69 60 training_loop """owa""" +69 60 negative_sampler """basic""" +69 60 evaluator """rankbased""" +69 61 dataset """kinships""" +69 61 model """conve""" +69 61 loss """nssa""" +69 61 regularizer """no""" +69 61 optimizer """adadelta""" +69 61 training_loop """owa""" +69 61 negative_sampler """basic""" +69 61 evaluator """rankbased""" +69 62 dataset """kinships""" +69 62 model """conve""" +69 62 loss """nssa""" +69 62 regularizer """no""" +69 62 optimizer """adadelta""" +69 62 training_loop """owa""" +69 62 negative_sampler """basic""" +69 62 evaluator """rankbased""" +69 63 dataset """kinships""" +69 63 model """conve""" +69 63 loss """nssa""" +69 63 regularizer """no""" +69 63 optimizer """adadelta""" +69 63 training_loop """owa""" +69 63 negative_sampler """basic""" +69 63 evaluator """rankbased""" +69 64 dataset """kinships""" +69 64 model """conve""" +69 64 loss """nssa""" +69 64 regularizer """no""" +69 64 optimizer """adadelta""" +69 64 training_loop """owa""" +69 64 negative_sampler """basic""" +69 64 evaluator """rankbased""" +69 65 dataset """kinships""" +69 65 model """conve""" +69 65 loss """nssa""" +69 65 regularizer """no""" +69 65 optimizer """adadelta""" +69 65 training_loop """owa""" +69 65 negative_sampler """basic""" +69 65 evaluator """rankbased""" +69 66 dataset """kinships""" +69 66 model """conve""" +69 66 loss """nssa""" +69 66 regularizer """no""" +69 66 optimizer """adadelta""" +69 66 training_loop """owa""" +69 66 negative_sampler """basic""" +69 66 evaluator """rankbased""" +69 67 dataset """kinships""" +69 67 model """conve""" +69 67 loss """nssa""" +69 67 regularizer """no""" +69 67 optimizer """adadelta""" +69 67 training_loop """owa""" +69 67 negative_sampler """basic""" +69 67 evaluator """rankbased""" +69 68 dataset """kinships""" +69 68 model """conve""" +69 68 loss """nssa""" +69 68 regularizer """no""" +69 68 optimizer """adadelta""" +69 68 training_loop """owa""" +69 68 negative_sampler """basic""" +69 68 evaluator """rankbased""" +69 69 dataset """kinships""" +69 69 model """conve""" +69 69 loss """nssa""" +69 69 regularizer """no""" +69 69 optimizer """adadelta""" +69 69 training_loop """owa""" +69 69 negative_sampler """basic""" +69 69 evaluator """rankbased""" +69 70 dataset """kinships""" +69 70 model """conve""" +69 70 loss """nssa""" +69 70 regularizer """no""" +69 70 optimizer """adadelta""" +69 70 training_loop """owa""" +69 70 negative_sampler """basic""" +69 70 evaluator """rankbased""" +69 71 dataset """kinships""" +69 71 model """conve""" +69 71 loss """nssa""" +69 71 regularizer """no""" +69 71 optimizer """adadelta""" +69 71 training_loop """owa""" +69 71 negative_sampler """basic""" +69 71 evaluator """rankbased""" +69 72 dataset """kinships""" +69 72 model """conve""" +69 72 loss """nssa""" +69 72 regularizer """no""" +69 72 optimizer """adadelta""" +69 72 training_loop """owa""" +69 72 negative_sampler """basic""" +69 72 evaluator """rankbased""" +69 73 dataset """kinships""" +69 73 model """conve""" +69 73 loss """nssa""" +69 73 regularizer """no""" +69 73 optimizer """adadelta""" +69 73 training_loop """owa""" +69 73 negative_sampler """basic""" +69 73 evaluator """rankbased""" +69 74 dataset """kinships""" +69 74 model """conve""" +69 74 loss """nssa""" +69 74 regularizer """no""" +69 74 optimizer """adadelta""" +69 74 training_loop """owa""" +69 74 negative_sampler """basic""" +69 74 evaluator """rankbased""" +69 75 dataset """kinships""" +69 75 model """conve""" +69 75 loss """nssa""" +69 75 regularizer """no""" +69 75 optimizer """adadelta""" +69 75 training_loop """owa""" +69 75 negative_sampler """basic""" +69 75 evaluator """rankbased""" +69 76 dataset """kinships""" +69 76 model """conve""" +69 76 loss """nssa""" +69 76 regularizer """no""" +69 76 optimizer """adadelta""" +69 76 training_loop """owa""" +69 76 negative_sampler """basic""" +69 76 evaluator """rankbased""" +69 77 dataset """kinships""" +69 77 model """conve""" +69 77 loss """nssa""" +69 77 regularizer """no""" +69 77 optimizer """adadelta""" +69 77 training_loop """owa""" +69 77 negative_sampler """basic""" +69 77 evaluator """rankbased""" +69 78 dataset """kinships""" +69 78 model """conve""" +69 78 loss """nssa""" +69 78 regularizer """no""" +69 78 optimizer """adadelta""" +69 78 training_loop """owa""" +69 78 negative_sampler """basic""" +69 78 evaluator """rankbased""" +69 79 dataset """kinships""" +69 79 model """conve""" +69 79 loss """nssa""" +69 79 regularizer """no""" +69 79 optimizer """adadelta""" +69 79 training_loop """owa""" +69 79 negative_sampler """basic""" +69 79 evaluator """rankbased""" +69 80 dataset """kinships""" +69 80 model """conve""" +69 80 loss """nssa""" +69 80 regularizer """no""" +69 80 optimizer """adadelta""" +69 80 training_loop """owa""" +69 80 negative_sampler """basic""" +69 80 evaluator """rankbased""" +69 81 dataset """kinships""" +69 81 model """conve""" +69 81 loss """nssa""" +69 81 regularizer """no""" +69 81 optimizer """adadelta""" +69 81 training_loop """owa""" +69 81 negative_sampler """basic""" +69 81 evaluator """rankbased""" +69 82 dataset """kinships""" +69 82 model """conve""" +69 82 loss """nssa""" +69 82 regularizer """no""" +69 82 optimizer """adadelta""" +69 82 training_loop """owa""" +69 82 negative_sampler """basic""" +69 82 evaluator """rankbased""" +69 83 dataset """kinships""" +69 83 model """conve""" +69 83 loss """nssa""" +69 83 regularizer """no""" +69 83 optimizer """adadelta""" +69 83 training_loop """owa""" +69 83 negative_sampler """basic""" +69 83 evaluator """rankbased""" +69 84 dataset """kinships""" +69 84 model """conve""" +69 84 loss """nssa""" +69 84 regularizer """no""" +69 84 optimizer """adadelta""" +69 84 training_loop """owa""" +69 84 negative_sampler """basic""" +69 84 evaluator """rankbased""" +69 85 dataset """kinships""" +69 85 model """conve""" +69 85 loss """nssa""" +69 85 regularizer """no""" +69 85 optimizer """adadelta""" +69 85 training_loop """owa""" +69 85 negative_sampler """basic""" +69 85 evaluator """rankbased""" +69 86 dataset """kinships""" +69 86 model """conve""" +69 86 loss """nssa""" +69 86 regularizer """no""" +69 86 optimizer """adadelta""" +69 86 training_loop """owa""" +69 86 negative_sampler """basic""" +69 86 evaluator """rankbased""" +69 87 dataset """kinships""" +69 87 model """conve""" +69 87 loss """nssa""" +69 87 regularizer """no""" +69 87 optimizer """adadelta""" +69 87 training_loop """owa""" +69 87 negative_sampler """basic""" +69 87 evaluator """rankbased""" +69 88 dataset """kinships""" +69 88 model """conve""" +69 88 loss """nssa""" +69 88 regularizer """no""" +69 88 optimizer """adadelta""" +69 88 training_loop """owa""" +69 88 negative_sampler """basic""" +69 88 evaluator """rankbased""" +69 89 dataset """kinships""" +69 89 model """conve""" +69 89 loss """nssa""" +69 89 regularizer """no""" +69 89 optimizer """adadelta""" +69 89 training_loop """owa""" +69 89 negative_sampler """basic""" +69 89 evaluator """rankbased""" +69 90 dataset """kinships""" +69 90 model """conve""" +69 90 loss """nssa""" +69 90 regularizer """no""" +69 90 optimizer """adadelta""" +69 90 training_loop """owa""" +69 90 negative_sampler """basic""" +69 90 evaluator """rankbased""" +69 91 dataset """kinships""" +69 91 model """conve""" +69 91 loss """nssa""" +69 91 regularizer """no""" +69 91 optimizer """adadelta""" +69 91 training_loop """owa""" +69 91 negative_sampler """basic""" +69 91 evaluator """rankbased""" +69 92 dataset """kinships""" +69 92 model """conve""" +69 92 loss """nssa""" +69 92 regularizer """no""" +69 92 optimizer """adadelta""" +69 92 training_loop """owa""" +69 92 negative_sampler """basic""" +69 92 evaluator """rankbased""" +69 93 dataset """kinships""" +69 93 model """conve""" +69 93 loss """nssa""" +69 93 regularizer """no""" +69 93 optimizer """adadelta""" +69 93 training_loop """owa""" +69 93 negative_sampler """basic""" +69 93 evaluator """rankbased""" +69 94 dataset """kinships""" +69 94 model """conve""" +69 94 loss """nssa""" +69 94 regularizer """no""" +69 94 optimizer """adadelta""" +69 94 training_loop """owa""" +69 94 negative_sampler """basic""" +69 94 evaluator """rankbased""" +69 95 dataset """kinships""" +69 95 model """conve""" +69 95 loss """nssa""" +69 95 regularizer """no""" +69 95 optimizer """adadelta""" +69 95 training_loop """owa""" +69 95 negative_sampler """basic""" +69 95 evaluator """rankbased""" +69 96 dataset """kinships""" +69 96 model """conve""" +69 96 loss """nssa""" +69 96 regularizer """no""" +69 96 optimizer """adadelta""" +69 96 training_loop """owa""" +69 96 negative_sampler """basic""" +69 96 evaluator """rankbased""" +69 97 dataset """kinships""" +69 97 model """conve""" +69 97 loss """nssa""" +69 97 regularizer """no""" +69 97 optimizer """adadelta""" +69 97 training_loop """owa""" +69 97 negative_sampler """basic""" +69 97 evaluator """rankbased""" +69 98 dataset """kinships""" +69 98 model """conve""" +69 98 loss """nssa""" +69 98 regularizer """no""" +69 98 optimizer """adadelta""" +69 98 training_loop """owa""" +69 98 negative_sampler """basic""" +69 98 evaluator """rankbased""" +69 99 dataset """kinships""" +69 99 model """conve""" +69 99 loss """nssa""" +69 99 regularizer """no""" +69 99 optimizer """adadelta""" +69 99 training_loop """owa""" +69 99 negative_sampler """basic""" +69 99 evaluator """rankbased""" +69 100 dataset """kinships""" +69 100 model """conve""" +69 100 loss """nssa""" +69 100 regularizer """no""" +69 100 optimizer """adadelta""" +69 100 training_loop """owa""" +69 100 negative_sampler """basic""" +69 100 evaluator """rankbased""" +70 1 model.output_channels 57.0 +70 1 model.input_dropout 0.014661004710646353 +70 1 model.output_dropout 0.29359201262683043 +70 1 model.feature_map_dropout 0.3596911319766277 +70 1 model.embedding_dim 2.0 +70 1 training.batch_size 0.0 +70 1 training.label_smoothing 0.13765495478598155 +70 2 model.output_channels 26.0 +70 2 model.input_dropout 0.46402364489018966 +70 2 model.output_dropout 0.23064812196159762 +70 2 model.feature_map_dropout 0.05765988422275142 +70 2 model.embedding_dim 2.0 +70 2 training.batch_size 1.0 +70 2 training.label_smoothing 0.0030350958706467372 +70 3 model.output_channels 27.0 +70 3 model.input_dropout 0.16986698259171046 +70 3 model.output_dropout 0.1345135547774997 +70 3 model.feature_map_dropout 0.3744692009198409 +70 3 model.embedding_dim 1.0 +70 3 training.batch_size 2.0 +70 3 training.label_smoothing 0.0024449732088073794 +70 4 model.output_channels 30.0 +70 4 model.input_dropout 0.10628328258453346 +70 4 model.output_dropout 0.010589483296975732 +70 4 model.feature_map_dropout 0.0807683148574645 +70 4 model.embedding_dim 2.0 +70 4 training.batch_size 2.0 +70 4 training.label_smoothing 0.594572629000462 +70 5 model.output_channels 64.0 +70 5 model.input_dropout 0.4753084942479594 +70 5 model.output_dropout 0.07477601391030986 +70 5 model.feature_map_dropout 0.41333746674065147 +70 5 model.embedding_dim 1.0 +70 5 training.batch_size 0.0 +70 5 training.label_smoothing 0.0028001552063428004 +70 6 model.output_channels 42.0 +70 6 model.input_dropout 0.03367303971329111 +70 6 model.output_dropout 0.30304803983097434 +70 6 model.feature_map_dropout 0.26031929903849194 +70 6 model.embedding_dim 1.0 +70 6 training.batch_size 0.0 +70 6 training.label_smoothing 0.5227436358033756 +70 7 model.output_channels 26.0 +70 7 model.input_dropout 0.35505259819613477 +70 7 model.output_dropout 0.4602891812126561 +70 7 model.feature_map_dropout 0.20338626924348252 +70 7 model.embedding_dim 1.0 +70 7 training.batch_size 2.0 +70 7 training.label_smoothing 0.025242306558789028 +70 8 model.output_channels 18.0 +70 8 model.input_dropout 0.16279088106724504 +70 8 model.output_dropout 0.4779362350689377 +70 8 model.feature_map_dropout 0.07063692683363659 +70 8 model.embedding_dim 0.0 +70 8 training.batch_size 0.0 +70 8 training.label_smoothing 0.16460286771234445 +70 9 model.output_channels 37.0 +70 9 model.input_dropout 0.4475303159771416 +70 9 model.output_dropout 0.05347416335019445 +70 9 model.feature_map_dropout 0.0011483741837599593 +70 9 model.embedding_dim 0.0 +70 9 training.batch_size 1.0 +70 9 training.label_smoothing 0.00352539099912422 +70 10 model.output_channels 56.0 +70 10 model.input_dropout 0.15700295218923604 +70 10 model.output_dropout 0.19610152872976389 +70 10 model.feature_map_dropout 0.2092163072593714 +70 10 model.embedding_dim 0.0 +70 10 training.batch_size 2.0 +70 10 training.label_smoothing 0.3841711792701082 +70 11 model.output_channels 50.0 +70 11 model.input_dropout 0.06114106314268508 +70 11 model.output_dropout 0.4903961150623271 +70 11 model.feature_map_dropout 0.1380056472336127 +70 11 model.embedding_dim 1.0 +70 11 training.batch_size 1.0 +70 11 training.label_smoothing 0.0025548218465044283 +70 12 model.output_channels 36.0 +70 12 model.input_dropout 0.2793804551588851 +70 12 model.output_dropout 0.3408656518805773 +70 12 model.feature_map_dropout 0.08283610915620815 +70 12 model.embedding_dim 2.0 +70 12 training.batch_size 2.0 +70 12 training.label_smoothing 0.9557474165502473 +70 13 model.output_channels 55.0 +70 13 model.input_dropout 0.10283669944084117 +70 13 model.output_dropout 0.3356535046304028 +70 13 model.feature_map_dropout 0.2361058328546416 +70 13 model.embedding_dim 2.0 +70 13 training.batch_size 2.0 +70 13 training.label_smoothing 0.0020173432654827426 +70 14 model.output_channels 53.0 +70 14 model.input_dropout 0.335639152034389 +70 14 model.output_dropout 0.4870765589853029 +70 14 model.feature_map_dropout 0.44881621517931186 +70 14 model.embedding_dim 2.0 +70 14 training.batch_size 1.0 +70 14 training.label_smoothing 0.19487655703044637 +70 15 model.output_channels 26.0 +70 15 model.input_dropout 0.3871377229397329 +70 15 model.output_dropout 0.335760027130171 +70 15 model.feature_map_dropout 0.3889263557018353 +70 15 model.embedding_dim 2.0 +70 15 training.batch_size 0.0 +70 15 training.label_smoothing 0.015443193332627178 +70 16 model.output_channels 40.0 +70 16 model.input_dropout 0.4473033842122223 +70 16 model.output_dropout 0.2928414442522881 +70 16 model.feature_map_dropout 0.1129673162771212 +70 16 model.embedding_dim 2.0 +70 16 training.batch_size 0.0 +70 16 training.label_smoothing 0.03659216237761382 +70 17 model.output_channels 27.0 +70 17 model.input_dropout 0.21870921819401695 +70 17 model.output_dropout 0.38130060841242236 +70 17 model.feature_map_dropout 0.02363681683628538 +70 17 model.embedding_dim 1.0 +70 17 training.batch_size 2.0 +70 17 training.label_smoothing 0.001724251013131957 +70 18 model.output_channels 33.0 +70 18 model.input_dropout 0.12561450905249882 +70 18 model.output_dropout 0.35559923557121725 +70 18 model.feature_map_dropout 0.30394237854202777 +70 18 model.embedding_dim 2.0 +70 18 training.batch_size 2.0 +70 18 training.label_smoothing 0.011712465932121737 +70 19 model.output_channels 34.0 +70 19 model.input_dropout 0.3793919772118561 +70 19 model.output_dropout 0.4645079189656499 +70 19 model.feature_map_dropout 0.37805793369566854 +70 19 model.embedding_dim 2.0 +70 19 training.batch_size 0.0 +70 19 training.label_smoothing 0.011808650418698111 +70 20 model.output_channels 59.0 +70 20 model.input_dropout 0.22536854553398833 +70 20 model.output_dropout 0.18026854258367464 +70 20 model.feature_map_dropout 0.23291570300152337 +70 20 model.embedding_dim 0.0 +70 20 training.batch_size 2.0 +70 20 training.label_smoothing 0.007098328257788571 +70 21 model.output_channels 41.0 +70 21 model.input_dropout 0.19079821654082296 +70 21 model.output_dropout 0.41095898093958183 +70 21 model.feature_map_dropout 0.20745058271333144 +70 21 model.embedding_dim 0.0 +70 21 training.batch_size 2.0 +70 21 training.label_smoothing 0.014343771681856056 +70 22 model.output_channels 30.0 +70 22 model.input_dropout 0.4453829837773076 +70 22 model.output_dropout 0.2401605800191634 +70 22 model.feature_map_dropout 0.27620352432024703 +70 22 model.embedding_dim 1.0 +70 22 training.batch_size 1.0 +70 22 training.label_smoothing 0.001855822823228516 +70 23 model.output_channels 37.0 +70 23 model.input_dropout 0.08590056675182511 +70 23 model.output_dropout 0.2718188548354374 +70 23 model.feature_map_dropout 0.3609776204110487 +70 23 model.embedding_dim 0.0 +70 23 training.batch_size 0.0 +70 23 training.label_smoothing 0.008862689436976202 +70 24 model.output_channels 42.0 +70 24 model.input_dropout 0.3726235214296112 +70 24 model.output_dropout 0.20573302268360627 +70 24 model.feature_map_dropout 0.03268228645616167 +70 24 model.embedding_dim 0.0 +70 24 training.batch_size 1.0 +70 24 training.label_smoothing 0.49152469314398617 +70 25 model.output_channels 60.0 +70 25 model.input_dropout 0.15037799731414092 +70 25 model.output_dropout 0.4029565469131577 +70 25 model.feature_map_dropout 0.29959456739336465 +70 25 model.embedding_dim 2.0 +70 25 training.batch_size 0.0 +70 25 training.label_smoothing 0.035410329625671005 +70 26 model.output_channels 22.0 +70 26 model.input_dropout 0.331142133408464 +70 26 model.output_dropout 0.12462542480219152 +70 26 model.feature_map_dropout 0.4789420246344848 +70 26 model.embedding_dim 0.0 +70 26 training.batch_size 2.0 +70 26 training.label_smoothing 0.006187165537787974 +70 27 model.output_channels 33.0 +70 27 model.input_dropout 0.0817578615147172 +70 27 model.output_dropout 0.026754000620930307 +70 27 model.feature_map_dropout 0.3143698713171746 +70 27 model.embedding_dim 1.0 +70 27 training.batch_size 0.0 +70 27 training.label_smoothing 0.0011331676833542144 +70 28 model.output_channels 47.0 +70 28 model.input_dropout 0.4648073821973411 +70 28 model.output_dropout 0.3030257688939476 +70 28 model.feature_map_dropout 0.057544056545557054 +70 28 model.embedding_dim 2.0 +70 28 training.batch_size 0.0 +70 28 training.label_smoothing 0.01491026889786834 +70 29 model.output_channels 28.0 +70 29 model.input_dropout 0.09887015543158728 +70 29 model.output_dropout 0.18182620573825792 +70 29 model.feature_map_dropout 0.27254594209064703 +70 29 model.embedding_dim 0.0 +70 29 training.batch_size 0.0 +70 29 training.label_smoothing 0.004094489702271628 +70 30 model.output_channels 19.0 +70 30 model.input_dropout 0.30643765436213943 +70 30 model.output_dropout 0.21517195165657665 +70 30 model.feature_map_dropout 0.4562154721369217 +70 30 model.embedding_dim 2.0 +70 30 training.batch_size 2.0 +70 30 training.label_smoothing 0.07145278184552563 +70 31 model.output_channels 26.0 +70 31 model.input_dropout 0.09994717596258712 +70 31 model.output_dropout 0.20093293002743584 +70 31 model.feature_map_dropout 0.23924338780894178 +70 31 model.embedding_dim 2.0 +70 31 training.batch_size 2.0 +70 31 training.label_smoothing 0.15816699835833134 +70 32 model.output_channels 49.0 +70 32 model.input_dropout 0.15577605098918113 +70 32 model.output_dropout 0.19078313694809196 +70 32 model.feature_map_dropout 0.41826945699463586 +70 32 model.embedding_dim 1.0 +70 32 training.batch_size 2.0 +70 32 training.label_smoothing 0.6310966477792563 +70 33 model.output_channels 33.0 +70 33 model.input_dropout 0.056981235785715845 +70 33 model.output_dropout 0.3836399387263945 +70 33 model.feature_map_dropout 0.414566531933572 +70 33 model.embedding_dim 0.0 +70 33 training.batch_size 1.0 +70 33 training.label_smoothing 0.8056258568584844 +70 34 model.output_channels 28.0 +70 34 model.input_dropout 0.4560373203123162 +70 34 model.output_dropout 0.35241089926452757 +70 34 model.feature_map_dropout 0.41522946239133873 +70 34 model.embedding_dim 1.0 +70 34 training.batch_size 1.0 +70 34 training.label_smoothing 0.02499673093689622 +70 35 model.output_channels 44.0 +70 35 model.input_dropout 0.11133715808241812 +70 35 model.output_dropout 0.3616500706609214 +70 35 model.feature_map_dropout 0.2618351055535864 +70 35 model.embedding_dim 1.0 +70 35 training.batch_size 0.0 +70 35 training.label_smoothing 0.0861612347595919 +70 36 model.output_channels 51.0 +70 36 model.input_dropout 0.4837888928253776 +70 36 model.output_dropout 0.13861994380519094 +70 36 model.feature_map_dropout 0.007823365395952964 +70 36 model.embedding_dim 2.0 +70 36 training.batch_size 2.0 +70 36 training.label_smoothing 0.005894821800510914 +70 37 model.output_channels 49.0 +70 37 model.input_dropout 0.08014311968649751 +70 37 model.output_dropout 0.3295185231982978 +70 37 model.feature_map_dropout 0.2118102074985117 +70 37 model.embedding_dim 1.0 +70 37 training.batch_size 0.0 +70 37 training.label_smoothing 0.1289771076452347 +70 38 model.output_channels 41.0 +70 38 model.input_dropout 0.44074655275455843 +70 38 model.output_dropout 0.42859096598286206 +70 38 model.feature_map_dropout 0.38604377609616497 +70 38 model.embedding_dim 0.0 +70 38 training.batch_size 0.0 +70 38 training.label_smoothing 0.0019791868631526934 +70 39 model.output_channels 28.0 +70 39 model.input_dropout 0.4853657847401577 +70 39 model.output_dropout 0.15765298049729998 +70 39 model.feature_map_dropout 0.03531742748731875 +70 39 model.embedding_dim 1.0 +70 39 training.batch_size 2.0 +70 39 training.label_smoothing 0.52210057562175 +70 40 model.output_channels 58.0 +70 40 model.input_dropout 0.4839861069845043 +70 40 model.output_dropout 0.16291067880006455 +70 40 model.feature_map_dropout 0.21200076820409125 +70 40 model.embedding_dim 1.0 +70 40 training.batch_size 2.0 +70 40 training.label_smoothing 0.0013007741283628744 +70 41 model.output_channels 43.0 +70 41 model.input_dropout 0.2182390204753109 +70 41 model.output_dropout 0.4095317194848963 +70 41 model.feature_map_dropout 0.11873707471563216 +70 41 model.embedding_dim 0.0 +70 41 training.batch_size 1.0 +70 41 training.label_smoothing 0.13298616411514655 +70 42 model.output_channels 26.0 +70 42 model.input_dropout 0.45164799066557065 +70 42 model.output_dropout 0.019587353615590997 +70 42 model.feature_map_dropout 0.3689930395176146 +70 42 model.embedding_dim 1.0 +70 42 training.batch_size 2.0 +70 42 training.label_smoothing 0.0024295025561788847 +70 43 model.output_channels 18.0 +70 43 model.input_dropout 0.4448268263187799 +70 43 model.output_dropout 0.31152897671037844 +70 43 model.feature_map_dropout 0.13107634293922005 +70 43 model.embedding_dim 0.0 +70 43 training.batch_size 2.0 +70 43 training.label_smoothing 0.0018135099000866108 +70 44 model.output_channels 39.0 +70 44 model.input_dropout 0.08737575853828883 +70 44 model.output_dropout 0.05754781404040271 +70 44 model.feature_map_dropout 0.203000223248265 +70 44 model.embedding_dim 0.0 +70 44 training.batch_size 0.0 +70 44 training.label_smoothing 0.0010088912501077585 +70 45 model.output_channels 53.0 +70 45 model.input_dropout 0.2759637024811786 +70 45 model.output_dropout 0.21602232809329563 +70 45 model.feature_map_dropout 0.4601485107884562 +70 45 model.embedding_dim 0.0 +70 45 training.batch_size 0.0 +70 45 training.label_smoothing 0.6917208525423455 +70 46 model.output_channels 28.0 +70 46 model.input_dropout 0.4806049308704458 +70 46 model.output_dropout 0.3104140106704251 +70 46 model.feature_map_dropout 0.36780278283783274 +70 46 model.embedding_dim 0.0 +70 46 training.batch_size 0.0 +70 46 training.label_smoothing 0.17306780021783275 +70 47 model.output_channels 46.0 +70 47 model.input_dropout 0.08605977687198141 +70 47 model.output_dropout 0.35162074621262784 +70 47 model.feature_map_dropout 0.16920140937051198 +70 47 model.embedding_dim 2.0 +70 47 training.batch_size 1.0 +70 47 training.label_smoothing 0.40162284656990016 +70 48 model.output_channels 19.0 +70 48 model.input_dropout 0.055286073816071035 +70 48 model.output_dropout 0.12031297499768301 +70 48 model.feature_map_dropout 0.004850179139836375 +70 48 model.embedding_dim 1.0 +70 48 training.batch_size 1.0 +70 48 training.label_smoothing 0.3406317244600626 +70 49 model.output_channels 53.0 +70 49 model.input_dropout 0.4075590030425318 +70 49 model.output_dropout 0.36787216499813524 +70 49 model.feature_map_dropout 0.3636915290200248 +70 49 model.embedding_dim 1.0 +70 49 training.batch_size 0.0 +70 49 training.label_smoothing 0.11222693847130827 +70 50 model.output_channels 27.0 +70 50 model.input_dropout 0.15945584448426703 +70 50 model.output_dropout 0.4354017607088266 +70 50 model.feature_map_dropout 0.1814686310782418 +70 50 model.embedding_dim 0.0 +70 50 training.batch_size 1.0 +70 50 training.label_smoothing 0.16555191870929636 +70 51 model.output_channels 27.0 +70 51 model.input_dropout 0.35887891501504654 +70 51 model.output_dropout 0.317444226543237 +70 51 model.feature_map_dropout 0.2078984831386046 +70 51 model.embedding_dim 0.0 +70 51 training.batch_size 1.0 +70 51 training.label_smoothing 0.035052024187366165 +70 52 model.output_channels 37.0 +70 52 model.input_dropout 0.2611457766711898 +70 52 model.output_dropout 0.25883879332928517 +70 52 model.feature_map_dropout 0.03730150159235235 +70 52 model.embedding_dim 1.0 +70 52 training.batch_size 2.0 +70 52 training.label_smoothing 0.10149983435481992 +70 53 model.output_channels 64.0 +70 53 model.input_dropout 0.07414872496800856 +70 53 model.output_dropout 0.2039353885848742 +70 53 model.feature_map_dropout 0.09191416730926888 +70 53 model.embedding_dim 1.0 +70 53 training.batch_size 0.0 +70 53 training.label_smoothing 0.01708908416956204 +70 54 model.output_channels 20.0 +70 54 model.input_dropout 0.38280435310519056 +70 54 model.output_dropout 0.46127126767371457 +70 54 model.feature_map_dropout 0.013009532476314145 +70 54 model.embedding_dim 0.0 +70 54 training.batch_size 2.0 +70 54 training.label_smoothing 0.4251330379007916 +70 55 model.output_channels 32.0 +70 55 model.input_dropout 0.3903893773389514 +70 55 model.output_dropout 0.3861686472495712 +70 55 model.feature_map_dropout 0.17564854834487437 +70 55 model.embedding_dim 1.0 +70 55 training.batch_size 1.0 +70 55 training.label_smoothing 0.23904933995096367 +70 56 model.output_channels 30.0 +70 56 model.input_dropout 0.41514998454640745 +70 56 model.output_dropout 0.16734085239419172 +70 56 model.feature_map_dropout 0.18532732486549308 +70 56 model.embedding_dim 0.0 +70 56 training.batch_size 1.0 +70 56 training.label_smoothing 0.004526418989735349 +70 57 model.output_channels 31.0 +70 57 model.input_dropout 0.34900127093771294 +70 57 model.output_dropout 0.204359701674575 +70 57 model.feature_map_dropout 0.42395965325023344 +70 57 model.embedding_dim 2.0 +70 57 training.batch_size 1.0 +70 57 training.label_smoothing 0.0458367576344868 +70 58 model.output_channels 57.0 +70 58 model.input_dropout 0.3299328941930374 +70 58 model.output_dropout 0.37959274127961584 +70 58 model.feature_map_dropout 0.17519689206651684 +70 58 model.embedding_dim 0.0 +70 58 training.batch_size 1.0 +70 58 training.label_smoothing 0.2829908839675048 +70 59 model.output_channels 21.0 +70 59 model.input_dropout 0.42820018710229774 +70 59 model.output_dropout 0.23095793755101968 +70 59 model.feature_map_dropout 0.35506976471191826 +70 59 model.embedding_dim 1.0 +70 59 training.batch_size 0.0 +70 59 training.label_smoothing 0.1481112128727185 +70 60 model.output_channels 26.0 +70 60 model.input_dropout 0.1787103728932929 +70 60 model.output_dropout 0.3466204906946239 +70 60 model.feature_map_dropout 0.13812565708708796 +70 60 model.embedding_dim 2.0 +70 60 training.batch_size 2.0 +70 60 training.label_smoothing 0.023503677208010543 +70 61 model.output_channels 54.0 +70 61 model.input_dropout 0.4240503754889846 +70 61 model.output_dropout 0.10435906958470847 +70 61 model.feature_map_dropout 0.12358616272730133 +70 61 model.embedding_dim 1.0 +70 61 training.batch_size 2.0 +70 61 training.label_smoothing 0.20823903687823347 +70 62 model.output_channels 35.0 +70 62 model.input_dropout 0.22748957241119278 +70 62 model.output_dropout 0.2703557764630103 +70 62 model.feature_map_dropout 0.21751192284682436 +70 62 model.embedding_dim 2.0 +70 62 training.batch_size 0.0 +70 62 training.label_smoothing 0.15755232046710174 +70 63 model.output_channels 22.0 +70 63 model.input_dropout 0.24899700982753664 +70 63 model.output_dropout 0.16007701451154194 +70 63 model.feature_map_dropout 0.3484616227277052 +70 63 model.embedding_dim 0.0 +70 63 training.batch_size 2.0 +70 63 training.label_smoothing 0.003972112672479229 +70 64 model.output_channels 26.0 +70 64 model.input_dropout 0.21222681298179014 +70 64 model.output_dropout 0.15795723111855003 +70 64 model.feature_map_dropout 0.05675227601024002 +70 64 model.embedding_dim 2.0 +70 64 training.batch_size 0.0 +70 64 training.label_smoothing 0.11267701258862124 +70 65 model.output_channels 60.0 +70 65 model.input_dropout 0.16069159249490084 +70 65 model.output_dropout 0.42558827260714294 +70 65 model.feature_map_dropout 0.18803213683937636 +70 65 model.embedding_dim 1.0 +70 65 training.batch_size 1.0 +70 65 training.label_smoothing 0.7554294309594087 +70 66 model.output_channels 23.0 +70 66 model.input_dropout 0.003348388462993679 +70 66 model.output_dropout 0.02552728327748488 +70 66 model.feature_map_dropout 0.4043769243673383 +70 66 model.embedding_dim 2.0 +70 66 training.batch_size 1.0 +70 66 training.label_smoothing 0.030840585741505087 +70 67 model.output_channels 44.0 +70 67 model.input_dropout 0.4626934022547146 +70 67 model.output_dropout 0.29232854493631805 +70 67 model.feature_map_dropout 0.078625877598956 +70 67 model.embedding_dim 1.0 +70 67 training.batch_size 1.0 +70 67 training.label_smoothing 0.7859216978776231 +70 68 model.output_channels 38.0 +70 68 model.input_dropout 0.24332310609685975 +70 68 model.output_dropout 0.10997147104878369 +70 68 model.feature_map_dropout 0.4850839179456343 +70 68 model.embedding_dim 1.0 +70 68 training.batch_size 0.0 +70 68 training.label_smoothing 0.0024896516713883645 +70 69 model.output_channels 22.0 +70 69 model.input_dropout 0.028835168148799883 +70 69 model.output_dropout 0.25633779864161305 +70 69 model.feature_map_dropout 0.4028768340838959 +70 69 model.embedding_dim 0.0 +70 69 training.batch_size 1.0 +70 69 training.label_smoothing 0.1583277943264127 +70 70 model.output_channels 27.0 +70 70 model.input_dropout 0.44658264708304535 +70 70 model.output_dropout 0.42308803548164736 +70 70 model.feature_map_dropout 0.4277912093229068 +70 70 model.embedding_dim 2.0 +70 70 training.batch_size 2.0 +70 70 training.label_smoothing 0.0013150552591960456 +70 71 model.output_channels 45.0 +70 71 model.input_dropout 0.1975391209671175 +70 71 model.output_dropout 0.22068383907906458 +70 71 model.feature_map_dropout 0.180232783451722 +70 71 model.embedding_dim 2.0 +70 71 training.batch_size 1.0 +70 71 training.label_smoothing 0.6480485469463914 +70 72 model.output_channels 35.0 +70 72 model.input_dropout 0.35386375682750326 +70 72 model.output_dropout 0.12040889750794231 +70 72 model.feature_map_dropout 0.14443010367117853 +70 72 model.embedding_dim 2.0 +70 72 training.batch_size 2.0 +70 72 training.label_smoothing 0.06172046258610119 +70 73 model.output_channels 60.0 +70 73 model.input_dropout 0.070617142057958 +70 73 model.output_dropout 0.46731009203833435 +70 73 model.feature_map_dropout 0.24886832234521417 +70 73 model.embedding_dim 1.0 +70 73 training.batch_size 2.0 +70 73 training.label_smoothing 0.2785014587032957 +70 74 model.output_channels 37.0 +70 74 model.input_dropout 0.4202617525997973 +70 74 model.output_dropout 0.3851657921571818 +70 74 model.feature_map_dropout 0.3085128938123284 +70 74 model.embedding_dim 1.0 +70 74 training.batch_size 1.0 +70 74 training.label_smoothing 0.0010278169492895439 +70 75 model.output_channels 48.0 +70 75 model.input_dropout 0.0638479700648581 +70 75 model.output_dropout 0.22696555350292102 +70 75 model.feature_map_dropout 0.06366393019878352 +70 75 model.embedding_dim 1.0 +70 75 training.batch_size 1.0 +70 75 training.label_smoothing 0.013132809815552328 +70 76 model.output_channels 38.0 +70 76 model.input_dropout 0.2959408045378457 +70 76 model.output_dropout 0.35305149724181073 +70 76 model.feature_map_dropout 0.3971176279861731 +70 76 model.embedding_dim 1.0 +70 76 training.batch_size 1.0 +70 76 training.label_smoothing 0.1498089039552948 +70 77 model.output_channels 63.0 +70 77 model.input_dropout 0.27615564315306307 +70 77 model.output_dropout 0.26371920760962675 +70 77 model.feature_map_dropout 0.34457477770725015 +70 77 model.embedding_dim 2.0 +70 77 training.batch_size 0.0 +70 77 training.label_smoothing 0.00957184736158323 +70 78 model.output_channels 44.0 +70 78 model.input_dropout 0.009052536478101891 +70 78 model.output_dropout 0.3147532524403875 +70 78 model.feature_map_dropout 0.18670611256750097 +70 78 model.embedding_dim 0.0 +70 78 training.batch_size 0.0 +70 78 training.label_smoothing 0.0068049443613874255 +70 79 model.output_channels 55.0 +70 79 model.input_dropout 0.4467538126747659 +70 79 model.output_dropout 0.3325505863126133 +70 79 model.feature_map_dropout 0.2147715774776479 +70 79 model.embedding_dim 1.0 +70 79 training.batch_size 1.0 +70 79 training.label_smoothing 0.0239083619926735 +70 80 model.output_channels 34.0 +70 80 model.input_dropout 0.47059998793596264 +70 80 model.output_dropout 0.030320917423295624 +70 80 model.feature_map_dropout 0.36788927259896825 +70 80 model.embedding_dim 0.0 +70 80 training.batch_size 1.0 +70 80 training.label_smoothing 0.5511734425162913 +70 81 model.output_channels 41.0 +70 81 model.input_dropout 0.28344324121593156 +70 81 model.output_dropout 0.26267676183406025 +70 81 model.feature_map_dropout 0.385487246268842 +70 81 model.embedding_dim 1.0 +70 81 training.batch_size 0.0 +70 81 training.label_smoothing 0.026580168565752037 +70 82 model.output_channels 51.0 +70 82 model.input_dropout 0.19752071211589006 +70 82 model.output_dropout 0.13247376704206165 +70 82 model.feature_map_dropout 0.46837777723793095 +70 82 model.embedding_dim 0.0 +70 82 training.batch_size 1.0 +70 82 training.label_smoothing 0.012957425928683887 +70 83 model.output_channels 48.0 +70 83 model.input_dropout 0.26070344703543097 +70 83 model.output_dropout 0.09287538439504844 +70 83 model.feature_map_dropout 0.27866751707001497 +70 83 model.embedding_dim 0.0 +70 83 training.batch_size 0.0 +70 83 training.label_smoothing 0.016045257411081072 +70 84 model.output_channels 31.0 +70 84 model.input_dropout 0.26226084649401726 +70 84 model.output_dropout 0.4179760479428628 +70 84 model.feature_map_dropout 0.1419968529469841 +70 84 model.embedding_dim 0.0 +70 84 training.batch_size 0.0 +70 84 training.label_smoothing 0.04894272277344455 +70 85 model.output_channels 26.0 +70 85 model.input_dropout 0.18986514264807325 +70 85 model.output_dropout 0.31146173266618543 +70 85 model.feature_map_dropout 0.4372097512705446 +70 85 model.embedding_dim 0.0 +70 85 training.batch_size 1.0 +70 85 training.label_smoothing 0.002360075347114652 +70 86 model.output_channels 44.0 +70 86 model.input_dropout 0.0633262399216028 +70 86 model.output_dropout 0.04588765924004923 +70 86 model.feature_map_dropout 0.061610388346314715 +70 86 model.embedding_dim 2.0 +70 86 training.batch_size 2.0 +70 86 training.label_smoothing 0.0016924767535364457 +70 87 model.output_channels 57.0 +70 87 model.input_dropout 0.4927276581098706 +70 87 model.output_dropout 0.223441857472446 +70 87 model.feature_map_dropout 0.2320223672841245 +70 87 model.embedding_dim 2.0 +70 87 training.batch_size 2.0 +70 87 training.label_smoothing 0.7089274477419988 +70 88 model.output_channels 25.0 +70 88 model.input_dropout 0.06429880865865834 +70 88 model.output_dropout 0.11882428758221092 +70 88 model.feature_map_dropout 0.2086972367039131 +70 88 model.embedding_dim 0.0 +70 88 training.batch_size 0.0 +70 88 training.label_smoothing 0.00977260879478734 +70 89 model.output_channels 41.0 +70 89 model.input_dropout 0.22476559536870772 +70 89 model.output_dropout 0.11519879648610354 +70 89 model.feature_map_dropout 0.48839585749215014 +70 89 model.embedding_dim 1.0 +70 89 training.batch_size 0.0 +70 89 training.label_smoothing 0.3040140207805776 +70 90 model.output_channels 50.0 +70 90 model.input_dropout 0.22390310483013925 +70 90 model.output_dropout 0.40537825180283815 +70 90 model.feature_map_dropout 0.15421429194536468 +70 90 model.embedding_dim 2.0 +70 90 training.batch_size 1.0 +70 90 training.label_smoothing 0.001227820233205044 +70 91 model.output_channels 35.0 +70 91 model.input_dropout 0.017643110744638868 +70 91 model.output_dropout 0.2580275112540383 +70 91 model.feature_map_dropout 0.3275312570732749 +70 91 model.embedding_dim 0.0 +70 91 training.batch_size 0.0 +70 91 training.label_smoothing 0.002179154299256739 +70 92 model.output_channels 43.0 +70 92 model.input_dropout 0.10481184293481233 +70 92 model.output_dropout 0.3142712328837764 +70 92 model.feature_map_dropout 0.11777044966065864 +70 92 model.embedding_dim 2.0 +70 92 training.batch_size 0.0 +70 92 training.label_smoothing 0.08093839793820562 +70 93 model.output_channels 20.0 +70 93 model.input_dropout 0.018290067782290087 +70 93 model.output_dropout 0.09492876069337702 +70 93 model.feature_map_dropout 0.4184426711967607 +70 93 model.embedding_dim 1.0 +70 93 training.batch_size 1.0 +70 93 training.label_smoothing 0.004233444580589844 +70 94 model.output_channels 59.0 +70 94 model.input_dropout 0.04232767746509214 +70 94 model.output_dropout 0.4548495792971271 +70 94 model.feature_map_dropout 0.02617952176696775 +70 94 model.embedding_dim 1.0 +70 94 training.batch_size 1.0 +70 94 training.label_smoothing 0.2893265521989186 +70 95 model.output_channels 46.0 +70 95 model.input_dropout 0.3163762982485451 +70 95 model.output_dropout 0.3242724521544641 +70 95 model.feature_map_dropout 0.27801088052313616 +70 95 model.embedding_dim 1.0 +70 95 training.batch_size 0.0 +70 95 training.label_smoothing 0.008654845765816507 +70 96 model.output_channels 54.0 +70 96 model.input_dropout 0.10984749770611107 +70 96 model.output_dropout 0.22948929325410367 +70 96 model.feature_map_dropout 0.2921999166620634 +70 96 model.embedding_dim 0.0 +70 96 training.batch_size 0.0 +70 96 training.label_smoothing 0.9852987473932733 +70 97 model.output_channels 39.0 +70 97 model.input_dropout 0.3200920119872484 +70 97 model.output_dropout 0.41089603416063974 +70 97 model.feature_map_dropout 0.1601895474843243 +70 97 model.embedding_dim 2.0 +70 97 training.batch_size 0.0 +70 97 training.label_smoothing 0.10594851676136502 +70 98 model.output_channels 22.0 +70 98 model.input_dropout 0.2507140865162249 +70 98 model.output_dropout 0.1984279268949415 +70 98 model.feature_map_dropout 0.46928117903360517 +70 98 model.embedding_dim 1.0 +70 98 training.batch_size 1.0 +70 98 training.label_smoothing 0.01305897645372306 +70 99 model.output_channels 19.0 +70 99 model.input_dropout 0.17332078694040126 +70 99 model.output_dropout 0.480037529056181 +70 99 model.feature_map_dropout 0.10210356839152623 +70 99 model.embedding_dim 1.0 +70 99 training.batch_size 2.0 +70 99 training.label_smoothing 0.01249096735527538 +70 100 model.output_channels 46.0 +70 100 model.input_dropout 0.2885158397481439 +70 100 model.output_dropout 0.1843338420555054 +70 100 model.feature_map_dropout 0.011524288428913954 +70 100 model.embedding_dim 1.0 +70 100 training.batch_size 0.0 +70 100 training.label_smoothing 0.8855647071557234 +70 1 dataset """kinships""" +70 1 model """conve""" +70 1 loss """bceaftersigmoid""" +70 1 regularizer """no""" +70 1 optimizer """adadelta""" +70 1 training_loop """lcwa""" +70 1 evaluator """rankbased""" +70 2 dataset """kinships""" +70 2 model """conve""" +70 2 loss """bceaftersigmoid""" +70 2 regularizer """no""" +70 2 optimizer """adadelta""" +70 2 training_loop """lcwa""" +70 2 evaluator """rankbased""" +70 3 dataset """kinships""" +70 3 model """conve""" +70 3 loss """bceaftersigmoid""" +70 3 regularizer """no""" +70 3 optimizer """adadelta""" +70 3 training_loop """lcwa""" +70 3 evaluator """rankbased""" +70 4 dataset """kinships""" +70 4 model """conve""" +70 4 loss """bceaftersigmoid""" +70 4 regularizer """no""" +70 4 optimizer """adadelta""" +70 4 training_loop """lcwa""" +70 4 evaluator """rankbased""" +70 5 dataset """kinships""" +70 5 model """conve""" +70 5 loss """bceaftersigmoid""" +70 5 regularizer """no""" +70 5 optimizer """adadelta""" +70 5 training_loop """lcwa""" +70 5 evaluator """rankbased""" +70 6 dataset """kinships""" +70 6 model """conve""" +70 6 loss """bceaftersigmoid""" +70 6 regularizer """no""" +70 6 optimizer """adadelta""" +70 6 training_loop """lcwa""" +70 6 evaluator """rankbased""" +70 7 dataset """kinships""" +70 7 model """conve""" +70 7 loss """bceaftersigmoid""" +70 7 regularizer """no""" +70 7 optimizer """adadelta""" +70 7 training_loop """lcwa""" +70 7 evaluator """rankbased""" +70 8 dataset """kinships""" +70 8 model """conve""" +70 8 loss """bceaftersigmoid""" +70 8 regularizer """no""" +70 8 optimizer """adadelta""" +70 8 training_loop """lcwa""" +70 8 evaluator """rankbased""" +70 9 dataset """kinships""" +70 9 model """conve""" +70 9 loss """bceaftersigmoid""" +70 9 regularizer """no""" +70 9 optimizer """adadelta""" +70 9 training_loop """lcwa""" +70 9 evaluator """rankbased""" +70 10 dataset """kinships""" +70 10 model """conve""" +70 10 loss """bceaftersigmoid""" +70 10 regularizer """no""" +70 10 optimizer """adadelta""" +70 10 training_loop """lcwa""" +70 10 evaluator """rankbased""" +70 11 dataset """kinships""" +70 11 model """conve""" +70 11 loss """bceaftersigmoid""" +70 11 regularizer """no""" +70 11 optimizer """adadelta""" +70 11 training_loop """lcwa""" +70 11 evaluator """rankbased""" +70 12 dataset """kinships""" +70 12 model """conve""" +70 12 loss """bceaftersigmoid""" +70 12 regularizer """no""" +70 12 optimizer """adadelta""" +70 12 training_loop """lcwa""" +70 12 evaluator """rankbased""" +70 13 dataset """kinships""" +70 13 model """conve""" +70 13 loss """bceaftersigmoid""" +70 13 regularizer """no""" +70 13 optimizer """adadelta""" +70 13 training_loop """lcwa""" +70 13 evaluator """rankbased""" +70 14 dataset """kinships""" +70 14 model """conve""" +70 14 loss """bceaftersigmoid""" +70 14 regularizer """no""" +70 14 optimizer """adadelta""" +70 14 training_loop """lcwa""" +70 14 evaluator """rankbased""" +70 15 dataset """kinships""" +70 15 model """conve""" +70 15 loss """bceaftersigmoid""" +70 15 regularizer """no""" +70 15 optimizer """adadelta""" +70 15 training_loop """lcwa""" +70 15 evaluator """rankbased""" +70 16 dataset """kinships""" +70 16 model """conve""" +70 16 loss """bceaftersigmoid""" +70 16 regularizer """no""" +70 16 optimizer """adadelta""" +70 16 training_loop """lcwa""" +70 16 evaluator """rankbased""" +70 17 dataset """kinships""" +70 17 model """conve""" +70 17 loss """bceaftersigmoid""" +70 17 regularizer """no""" +70 17 optimizer """adadelta""" +70 17 training_loop """lcwa""" +70 17 evaluator """rankbased""" +70 18 dataset """kinships""" +70 18 model """conve""" +70 18 loss """bceaftersigmoid""" +70 18 regularizer """no""" +70 18 optimizer """adadelta""" +70 18 training_loop """lcwa""" +70 18 evaluator """rankbased""" +70 19 dataset """kinships""" +70 19 model """conve""" +70 19 loss """bceaftersigmoid""" +70 19 regularizer """no""" +70 19 optimizer """adadelta""" +70 19 training_loop """lcwa""" +70 19 evaluator """rankbased""" +70 20 dataset """kinships""" +70 20 model """conve""" +70 20 loss """bceaftersigmoid""" +70 20 regularizer """no""" +70 20 optimizer """adadelta""" +70 20 training_loop """lcwa""" +70 20 evaluator """rankbased""" +70 21 dataset """kinships""" +70 21 model """conve""" +70 21 loss """bceaftersigmoid""" +70 21 regularizer """no""" +70 21 optimizer """adadelta""" +70 21 training_loop """lcwa""" +70 21 evaluator """rankbased""" +70 22 dataset """kinships""" +70 22 model """conve""" +70 22 loss """bceaftersigmoid""" +70 22 regularizer """no""" +70 22 optimizer """adadelta""" +70 22 training_loop """lcwa""" +70 22 evaluator """rankbased""" +70 23 dataset """kinships""" +70 23 model """conve""" +70 23 loss """bceaftersigmoid""" +70 23 regularizer """no""" +70 23 optimizer """adadelta""" +70 23 training_loop """lcwa""" +70 23 evaluator """rankbased""" +70 24 dataset """kinships""" +70 24 model """conve""" +70 24 loss """bceaftersigmoid""" +70 24 regularizer """no""" +70 24 optimizer """adadelta""" +70 24 training_loop """lcwa""" +70 24 evaluator """rankbased""" +70 25 dataset """kinships""" +70 25 model """conve""" +70 25 loss """bceaftersigmoid""" +70 25 regularizer """no""" +70 25 optimizer """adadelta""" +70 25 training_loop """lcwa""" +70 25 evaluator """rankbased""" +70 26 dataset """kinships""" +70 26 model """conve""" +70 26 loss """bceaftersigmoid""" +70 26 regularizer """no""" +70 26 optimizer """adadelta""" +70 26 training_loop """lcwa""" +70 26 evaluator """rankbased""" +70 27 dataset """kinships""" +70 27 model """conve""" +70 27 loss """bceaftersigmoid""" +70 27 regularizer """no""" +70 27 optimizer """adadelta""" +70 27 training_loop """lcwa""" +70 27 evaluator """rankbased""" +70 28 dataset """kinships""" +70 28 model """conve""" +70 28 loss """bceaftersigmoid""" +70 28 regularizer """no""" +70 28 optimizer """adadelta""" +70 28 training_loop """lcwa""" +70 28 evaluator """rankbased""" +70 29 dataset """kinships""" +70 29 model """conve""" +70 29 loss """bceaftersigmoid""" +70 29 regularizer """no""" +70 29 optimizer """adadelta""" +70 29 training_loop """lcwa""" +70 29 evaluator """rankbased""" +70 30 dataset """kinships""" +70 30 model """conve""" +70 30 loss """bceaftersigmoid""" +70 30 regularizer """no""" +70 30 optimizer """adadelta""" +70 30 training_loop """lcwa""" +70 30 evaluator """rankbased""" +70 31 dataset """kinships""" +70 31 model """conve""" +70 31 loss """bceaftersigmoid""" +70 31 regularizer """no""" +70 31 optimizer """adadelta""" +70 31 training_loop """lcwa""" +70 31 evaluator """rankbased""" +70 32 dataset """kinships""" +70 32 model """conve""" +70 32 loss """bceaftersigmoid""" +70 32 regularizer """no""" +70 32 optimizer """adadelta""" +70 32 training_loop """lcwa""" +70 32 evaluator """rankbased""" +70 33 dataset """kinships""" +70 33 model """conve""" +70 33 loss """bceaftersigmoid""" +70 33 regularizer """no""" +70 33 optimizer """adadelta""" +70 33 training_loop """lcwa""" +70 33 evaluator """rankbased""" +70 34 dataset """kinships""" +70 34 model """conve""" +70 34 loss """bceaftersigmoid""" +70 34 regularizer """no""" +70 34 optimizer """adadelta""" +70 34 training_loop """lcwa""" +70 34 evaluator """rankbased""" +70 35 dataset """kinships""" +70 35 model """conve""" +70 35 loss """bceaftersigmoid""" +70 35 regularizer """no""" +70 35 optimizer """adadelta""" +70 35 training_loop """lcwa""" +70 35 evaluator """rankbased""" +70 36 dataset """kinships""" +70 36 model """conve""" +70 36 loss """bceaftersigmoid""" +70 36 regularizer """no""" +70 36 optimizer """adadelta""" +70 36 training_loop """lcwa""" +70 36 evaluator """rankbased""" +70 37 dataset """kinships""" +70 37 model """conve""" +70 37 loss """bceaftersigmoid""" +70 37 regularizer """no""" +70 37 optimizer """adadelta""" +70 37 training_loop """lcwa""" +70 37 evaluator """rankbased""" +70 38 dataset """kinships""" +70 38 model """conve""" +70 38 loss """bceaftersigmoid""" +70 38 regularizer """no""" +70 38 optimizer """adadelta""" +70 38 training_loop """lcwa""" +70 38 evaluator """rankbased""" +70 39 dataset """kinships""" +70 39 model """conve""" +70 39 loss """bceaftersigmoid""" +70 39 regularizer """no""" +70 39 optimizer """adadelta""" +70 39 training_loop """lcwa""" +70 39 evaluator """rankbased""" +70 40 dataset """kinships""" +70 40 model """conve""" +70 40 loss """bceaftersigmoid""" +70 40 regularizer """no""" +70 40 optimizer """adadelta""" +70 40 training_loop """lcwa""" +70 40 evaluator """rankbased""" +70 41 dataset """kinships""" +70 41 model """conve""" +70 41 loss """bceaftersigmoid""" +70 41 regularizer """no""" +70 41 optimizer """adadelta""" +70 41 training_loop """lcwa""" +70 41 evaluator """rankbased""" +70 42 dataset """kinships""" +70 42 model """conve""" +70 42 loss """bceaftersigmoid""" +70 42 regularizer """no""" +70 42 optimizer """adadelta""" +70 42 training_loop """lcwa""" +70 42 evaluator """rankbased""" +70 43 dataset """kinships""" +70 43 model """conve""" +70 43 loss """bceaftersigmoid""" +70 43 regularizer """no""" +70 43 optimizer """adadelta""" +70 43 training_loop """lcwa""" +70 43 evaluator """rankbased""" +70 44 dataset """kinships""" +70 44 model """conve""" +70 44 loss """bceaftersigmoid""" +70 44 regularizer """no""" +70 44 optimizer """adadelta""" +70 44 training_loop """lcwa""" +70 44 evaluator """rankbased""" +70 45 dataset """kinships""" +70 45 model """conve""" +70 45 loss """bceaftersigmoid""" +70 45 regularizer """no""" +70 45 optimizer """adadelta""" +70 45 training_loop """lcwa""" +70 45 evaluator """rankbased""" +70 46 dataset """kinships""" +70 46 model """conve""" +70 46 loss """bceaftersigmoid""" +70 46 regularizer """no""" +70 46 optimizer """adadelta""" +70 46 training_loop """lcwa""" +70 46 evaluator """rankbased""" +70 47 dataset """kinships""" +70 47 model """conve""" +70 47 loss """bceaftersigmoid""" +70 47 regularizer """no""" +70 47 optimizer """adadelta""" +70 47 training_loop """lcwa""" +70 47 evaluator """rankbased""" +70 48 dataset """kinships""" +70 48 model """conve""" +70 48 loss """bceaftersigmoid""" +70 48 regularizer """no""" +70 48 optimizer """adadelta""" +70 48 training_loop """lcwa""" +70 48 evaluator """rankbased""" +70 49 dataset """kinships""" +70 49 model """conve""" +70 49 loss """bceaftersigmoid""" +70 49 regularizer """no""" +70 49 optimizer """adadelta""" +70 49 training_loop """lcwa""" +70 49 evaluator """rankbased""" +70 50 dataset """kinships""" +70 50 model """conve""" +70 50 loss """bceaftersigmoid""" +70 50 regularizer """no""" +70 50 optimizer """adadelta""" +70 50 training_loop """lcwa""" +70 50 evaluator """rankbased""" +70 51 dataset """kinships""" +70 51 model """conve""" +70 51 loss """bceaftersigmoid""" +70 51 regularizer """no""" +70 51 optimizer """adadelta""" +70 51 training_loop """lcwa""" +70 51 evaluator """rankbased""" +70 52 dataset """kinships""" +70 52 model """conve""" +70 52 loss """bceaftersigmoid""" +70 52 regularizer """no""" +70 52 optimizer """adadelta""" +70 52 training_loop """lcwa""" +70 52 evaluator """rankbased""" +70 53 dataset """kinships""" +70 53 model """conve""" +70 53 loss """bceaftersigmoid""" +70 53 regularizer """no""" +70 53 optimizer """adadelta""" +70 53 training_loop """lcwa""" +70 53 evaluator """rankbased""" +70 54 dataset """kinships""" +70 54 model """conve""" +70 54 loss """bceaftersigmoid""" +70 54 regularizer """no""" +70 54 optimizer """adadelta""" +70 54 training_loop """lcwa""" +70 54 evaluator """rankbased""" +70 55 dataset """kinships""" +70 55 model """conve""" +70 55 loss """bceaftersigmoid""" +70 55 regularizer """no""" +70 55 optimizer """adadelta""" +70 55 training_loop """lcwa""" +70 55 evaluator """rankbased""" +70 56 dataset """kinships""" +70 56 model """conve""" +70 56 loss """bceaftersigmoid""" +70 56 regularizer """no""" +70 56 optimizer """adadelta""" +70 56 training_loop """lcwa""" +70 56 evaluator """rankbased""" +70 57 dataset """kinships""" +70 57 model """conve""" +70 57 loss """bceaftersigmoid""" +70 57 regularizer """no""" +70 57 optimizer """adadelta""" +70 57 training_loop """lcwa""" +70 57 evaluator """rankbased""" +70 58 dataset """kinships""" +70 58 model """conve""" +70 58 loss """bceaftersigmoid""" +70 58 regularizer """no""" +70 58 optimizer """adadelta""" +70 58 training_loop """lcwa""" +70 58 evaluator """rankbased""" +70 59 dataset """kinships""" +70 59 model """conve""" +70 59 loss """bceaftersigmoid""" +70 59 regularizer """no""" +70 59 optimizer """adadelta""" +70 59 training_loop """lcwa""" +70 59 evaluator """rankbased""" +70 60 dataset """kinships""" +70 60 model """conve""" +70 60 loss """bceaftersigmoid""" +70 60 regularizer """no""" +70 60 optimizer """adadelta""" +70 60 training_loop """lcwa""" +70 60 evaluator """rankbased""" +70 61 dataset """kinships""" +70 61 model """conve""" +70 61 loss """bceaftersigmoid""" +70 61 regularizer """no""" +70 61 optimizer """adadelta""" +70 61 training_loop """lcwa""" +70 61 evaluator """rankbased""" +70 62 dataset """kinships""" +70 62 model """conve""" +70 62 loss """bceaftersigmoid""" +70 62 regularizer """no""" +70 62 optimizer """adadelta""" +70 62 training_loop """lcwa""" +70 62 evaluator """rankbased""" +70 63 dataset """kinships""" +70 63 model """conve""" +70 63 loss """bceaftersigmoid""" +70 63 regularizer """no""" +70 63 optimizer """adadelta""" +70 63 training_loop """lcwa""" +70 63 evaluator """rankbased""" +70 64 dataset """kinships""" +70 64 model """conve""" +70 64 loss """bceaftersigmoid""" +70 64 regularizer """no""" +70 64 optimizer """adadelta""" +70 64 training_loop """lcwa""" +70 64 evaluator """rankbased""" +70 65 dataset """kinships""" +70 65 model """conve""" +70 65 loss """bceaftersigmoid""" +70 65 regularizer """no""" +70 65 optimizer """adadelta""" +70 65 training_loop """lcwa""" +70 65 evaluator """rankbased""" +70 66 dataset """kinships""" +70 66 model """conve""" +70 66 loss """bceaftersigmoid""" +70 66 regularizer """no""" +70 66 optimizer """adadelta""" +70 66 training_loop """lcwa""" +70 66 evaluator """rankbased""" +70 67 dataset """kinships""" +70 67 model """conve""" +70 67 loss """bceaftersigmoid""" +70 67 regularizer """no""" +70 67 optimizer """adadelta""" +70 67 training_loop """lcwa""" +70 67 evaluator """rankbased""" +70 68 dataset """kinships""" +70 68 model """conve""" +70 68 loss """bceaftersigmoid""" +70 68 regularizer """no""" +70 68 optimizer """adadelta""" +70 68 training_loop """lcwa""" +70 68 evaluator """rankbased""" +70 69 dataset """kinships""" +70 69 model """conve""" +70 69 loss """bceaftersigmoid""" +70 69 regularizer """no""" +70 69 optimizer """adadelta""" +70 69 training_loop """lcwa""" +70 69 evaluator """rankbased""" +70 70 dataset """kinships""" +70 70 model """conve""" +70 70 loss """bceaftersigmoid""" +70 70 regularizer """no""" +70 70 optimizer """adadelta""" +70 70 training_loop """lcwa""" +70 70 evaluator """rankbased""" +70 71 dataset """kinships""" +70 71 model """conve""" +70 71 loss """bceaftersigmoid""" +70 71 regularizer """no""" +70 71 optimizer """adadelta""" +70 71 training_loop """lcwa""" +70 71 evaluator """rankbased""" +70 72 dataset """kinships""" +70 72 model """conve""" +70 72 loss """bceaftersigmoid""" +70 72 regularizer """no""" +70 72 optimizer """adadelta""" +70 72 training_loop """lcwa""" +70 72 evaluator """rankbased""" +70 73 dataset """kinships""" +70 73 model """conve""" +70 73 loss """bceaftersigmoid""" +70 73 regularizer """no""" +70 73 optimizer """adadelta""" +70 73 training_loop """lcwa""" +70 73 evaluator """rankbased""" +70 74 dataset """kinships""" +70 74 model """conve""" +70 74 loss """bceaftersigmoid""" +70 74 regularizer """no""" +70 74 optimizer """adadelta""" +70 74 training_loop """lcwa""" +70 74 evaluator """rankbased""" +70 75 dataset """kinships""" +70 75 model """conve""" +70 75 loss """bceaftersigmoid""" +70 75 regularizer """no""" +70 75 optimizer """adadelta""" +70 75 training_loop """lcwa""" +70 75 evaluator """rankbased""" +70 76 dataset """kinships""" +70 76 model """conve""" +70 76 loss """bceaftersigmoid""" +70 76 regularizer """no""" +70 76 optimizer """adadelta""" +70 76 training_loop """lcwa""" +70 76 evaluator """rankbased""" +70 77 dataset """kinships""" +70 77 model """conve""" +70 77 loss """bceaftersigmoid""" +70 77 regularizer """no""" +70 77 optimizer """adadelta""" +70 77 training_loop """lcwa""" +70 77 evaluator """rankbased""" +70 78 dataset """kinships""" +70 78 model """conve""" +70 78 loss """bceaftersigmoid""" +70 78 regularizer """no""" +70 78 optimizer """adadelta""" +70 78 training_loop """lcwa""" +70 78 evaluator """rankbased""" +70 79 dataset """kinships""" +70 79 model """conve""" +70 79 loss """bceaftersigmoid""" +70 79 regularizer """no""" +70 79 optimizer """adadelta""" +70 79 training_loop """lcwa""" +70 79 evaluator """rankbased""" +70 80 dataset """kinships""" +70 80 model """conve""" +70 80 loss """bceaftersigmoid""" +70 80 regularizer """no""" +70 80 optimizer """adadelta""" +70 80 training_loop """lcwa""" +70 80 evaluator """rankbased""" +70 81 dataset """kinships""" +70 81 model """conve""" +70 81 loss """bceaftersigmoid""" +70 81 regularizer """no""" +70 81 optimizer """adadelta""" +70 81 training_loop """lcwa""" +70 81 evaluator """rankbased""" +70 82 dataset """kinships""" +70 82 model """conve""" +70 82 loss """bceaftersigmoid""" +70 82 regularizer """no""" +70 82 optimizer """adadelta""" +70 82 training_loop """lcwa""" +70 82 evaluator """rankbased""" +70 83 dataset """kinships""" +70 83 model """conve""" +70 83 loss """bceaftersigmoid""" +70 83 regularizer """no""" +70 83 optimizer """adadelta""" +70 83 training_loop """lcwa""" +70 83 evaluator """rankbased""" +70 84 dataset """kinships""" +70 84 model """conve""" +70 84 loss """bceaftersigmoid""" +70 84 regularizer """no""" +70 84 optimizer """adadelta""" +70 84 training_loop """lcwa""" +70 84 evaluator """rankbased""" +70 85 dataset """kinships""" +70 85 model """conve""" +70 85 loss """bceaftersigmoid""" +70 85 regularizer """no""" +70 85 optimizer """adadelta""" +70 85 training_loop """lcwa""" +70 85 evaluator """rankbased""" +70 86 dataset """kinships""" +70 86 model """conve""" +70 86 loss """bceaftersigmoid""" +70 86 regularizer """no""" +70 86 optimizer """adadelta""" +70 86 training_loop """lcwa""" +70 86 evaluator """rankbased""" +70 87 dataset """kinships""" +70 87 model """conve""" +70 87 loss """bceaftersigmoid""" +70 87 regularizer """no""" +70 87 optimizer """adadelta""" +70 87 training_loop """lcwa""" +70 87 evaluator """rankbased""" +70 88 dataset """kinships""" +70 88 model """conve""" +70 88 loss """bceaftersigmoid""" +70 88 regularizer """no""" +70 88 optimizer """adadelta""" +70 88 training_loop """lcwa""" +70 88 evaluator """rankbased""" +70 89 dataset """kinships""" +70 89 model """conve""" +70 89 loss """bceaftersigmoid""" +70 89 regularizer """no""" +70 89 optimizer """adadelta""" +70 89 training_loop """lcwa""" +70 89 evaluator """rankbased""" +70 90 dataset """kinships""" +70 90 model """conve""" +70 90 loss """bceaftersigmoid""" +70 90 regularizer """no""" +70 90 optimizer """adadelta""" +70 90 training_loop """lcwa""" +70 90 evaluator """rankbased""" +70 91 dataset """kinships""" +70 91 model """conve""" +70 91 loss """bceaftersigmoid""" +70 91 regularizer """no""" +70 91 optimizer """adadelta""" +70 91 training_loop """lcwa""" +70 91 evaluator """rankbased""" +70 92 dataset """kinships""" +70 92 model """conve""" +70 92 loss """bceaftersigmoid""" +70 92 regularizer """no""" +70 92 optimizer """adadelta""" +70 92 training_loop """lcwa""" +70 92 evaluator """rankbased""" +70 93 dataset """kinships""" +70 93 model """conve""" +70 93 loss """bceaftersigmoid""" +70 93 regularizer """no""" +70 93 optimizer """adadelta""" +70 93 training_loop """lcwa""" +70 93 evaluator """rankbased""" +70 94 dataset """kinships""" +70 94 model """conve""" +70 94 loss """bceaftersigmoid""" +70 94 regularizer """no""" +70 94 optimizer """adadelta""" +70 94 training_loop """lcwa""" +70 94 evaluator """rankbased""" +70 95 dataset """kinships""" +70 95 model """conve""" +70 95 loss """bceaftersigmoid""" +70 95 regularizer """no""" +70 95 optimizer """adadelta""" +70 95 training_loop """lcwa""" +70 95 evaluator """rankbased""" +70 96 dataset """kinships""" +70 96 model """conve""" +70 96 loss """bceaftersigmoid""" +70 96 regularizer """no""" +70 96 optimizer """adadelta""" +70 96 training_loop """lcwa""" +70 96 evaluator """rankbased""" +70 97 dataset """kinships""" +70 97 model """conve""" +70 97 loss """bceaftersigmoid""" +70 97 regularizer """no""" +70 97 optimizer """adadelta""" +70 97 training_loop """lcwa""" +70 97 evaluator """rankbased""" +70 98 dataset """kinships""" +70 98 model """conve""" +70 98 loss """bceaftersigmoid""" +70 98 regularizer """no""" +70 98 optimizer """adadelta""" +70 98 training_loop """lcwa""" +70 98 evaluator """rankbased""" +70 99 dataset """kinships""" +70 99 model """conve""" +70 99 loss """bceaftersigmoid""" +70 99 regularizer """no""" +70 99 optimizer """adadelta""" +70 99 training_loop """lcwa""" +70 99 evaluator """rankbased""" +70 100 dataset """kinships""" +70 100 model """conve""" +70 100 loss """bceaftersigmoid""" +70 100 regularizer """no""" +70 100 optimizer """adadelta""" +70 100 training_loop """lcwa""" +70 100 evaluator """rankbased""" +71 1 model.output_channels 31.0 +71 1 model.input_dropout 0.26102790882970445 +71 1 model.output_dropout 0.35977687739245645 +71 1 model.feature_map_dropout 0.39185142857904953 +71 1 model.embedding_dim 2.0 +71 1 training.batch_size 2.0 +71 1 training.label_smoothing 0.01960434655586619 +71 2 model.output_channels 45.0 +71 2 model.input_dropout 0.3525901132417661 +71 2 model.output_dropout 0.07701901881510892 +71 2 model.feature_map_dropout 0.07199868128739467 +71 2 model.embedding_dim 0.0 +71 2 training.batch_size 1.0 +71 2 training.label_smoothing 0.0012420320075074783 +71 3 model.output_channels 60.0 +71 3 model.input_dropout 0.1594775078317225 +71 3 model.output_dropout 0.35430298102907615 +71 3 model.feature_map_dropout 0.3206225008770375 +71 3 model.embedding_dim 2.0 +71 3 training.batch_size 2.0 +71 3 training.label_smoothing 0.2508366966799545 +71 4 model.output_channels 50.0 +71 4 model.input_dropout 0.3845340838708541 +71 4 model.output_dropout 0.12286864374354672 +71 4 model.feature_map_dropout 0.11172028599011796 +71 4 model.embedding_dim 0.0 +71 4 training.batch_size 0.0 +71 4 training.label_smoothing 0.0013891783286969705 +71 5 model.output_channels 26.0 +71 5 model.input_dropout 0.12710712518611855 +71 5 model.output_dropout 0.4412163952863155 +71 5 model.feature_map_dropout 0.158419835154479 +71 5 model.embedding_dim 0.0 +71 5 training.batch_size 2.0 +71 5 training.label_smoothing 0.004032945573789691 +71 6 model.output_channels 31.0 +71 6 model.input_dropout 0.18162764257796732 +71 6 model.output_dropout 0.4321836776916848 +71 6 model.feature_map_dropout 0.1493205728842475 +71 6 model.embedding_dim 2.0 +71 6 training.batch_size 1.0 +71 6 training.label_smoothing 0.25516266287850725 +71 7 model.output_channels 26.0 +71 7 model.input_dropout 0.1361056903146115 +71 7 model.output_dropout 0.1553191294641239 +71 7 model.feature_map_dropout 0.2934255033357598 +71 7 model.embedding_dim 0.0 +71 7 training.batch_size 0.0 +71 7 training.label_smoothing 0.01784485806147783 +71 8 model.output_channels 31.0 +71 8 model.input_dropout 0.11048890374411713 +71 8 model.output_dropout 0.2636414864270683 +71 8 model.feature_map_dropout 0.3366284101177314 +71 8 model.embedding_dim 1.0 +71 8 training.batch_size 2.0 +71 8 training.label_smoothing 0.004752812692618223 +71 9 model.output_channels 54.0 +71 9 model.input_dropout 0.4730657274618271 +71 9 model.output_dropout 0.19676581654787845 +71 9 model.feature_map_dropout 0.4910611243305085 +71 9 model.embedding_dim 0.0 +71 9 training.batch_size 2.0 +71 9 training.label_smoothing 0.5471178039299623 +71 10 model.output_channels 29.0 +71 10 model.input_dropout 0.48380525046561385 +71 10 model.output_dropout 0.33770176245269723 +71 10 model.feature_map_dropout 0.4046983994404185 +71 10 model.embedding_dim 0.0 +71 10 training.batch_size 1.0 +71 10 training.label_smoothing 0.0011376393956626834 +71 11 model.output_channels 19.0 +71 11 model.input_dropout 0.3789612952835251 +71 11 model.output_dropout 0.48051232825719653 +71 11 model.feature_map_dropout 0.12497193716022709 +71 11 model.embedding_dim 0.0 +71 11 training.batch_size 1.0 +71 11 training.label_smoothing 0.07162184676884996 +71 12 model.output_channels 22.0 +71 12 model.input_dropout 0.22707105814221246 +71 12 model.output_dropout 0.21358908862909304 +71 12 model.feature_map_dropout 0.27942621554783925 +71 12 model.embedding_dim 0.0 +71 12 training.batch_size 0.0 +71 12 training.label_smoothing 0.05427500181683591 +71 13 model.output_channels 31.0 +71 13 model.input_dropout 0.3161159554040614 +71 13 model.output_dropout 0.44790160379979016 +71 13 model.feature_map_dropout 0.45309419846900784 +71 13 model.embedding_dim 1.0 +71 13 training.batch_size 0.0 +71 13 training.label_smoothing 0.0709706217762413 +71 14 model.output_channels 46.0 +71 14 model.input_dropout 0.49042033675291513 +71 14 model.output_dropout 0.11805649168999222 +71 14 model.feature_map_dropout 0.47431416595776826 +71 14 model.embedding_dim 1.0 +71 14 training.batch_size 2.0 +71 14 training.label_smoothing 0.0015804235667700047 +71 15 model.output_channels 35.0 +71 15 model.input_dropout 0.25424440464527137 +71 15 model.output_dropout 0.045880602881280985 +71 15 model.feature_map_dropout 0.0852143773704952 +71 15 model.embedding_dim 1.0 +71 15 training.batch_size 2.0 +71 15 training.label_smoothing 0.3665606661220408 +71 16 model.output_channels 27.0 +71 16 model.input_dropout 0.188595183103405 +71 16 model.output_dropout 0.4166483468686828 +71 16 model.feature_map_dropout 0.12345303709571087 +71 16 model.embedding_dim 2.0 +71 16 training.batch_size 2.0 +71 16 training.label_smoothing 0.3686657503459579 +71 17 model.output_channels 21.0 +71 17 model.input_dropout 0.312340213135031 +71 17 model.output_dropout 0.10474761155695572 +71 17 model.feature_map_dropout 0.308139943373502 +71 17 model.embedding_dim 2.0 +71 17 training.batch_size 2.0 +71 17 training.label_smoothing 0.0028437668710293044 +71 18 model.output_channels 43.0 +71 18 model.input_dropout 0.20754471169800837 +71 18 model.output_dropout 0.4889497964590465 +71 18 model.feature_map_dropout 0.4500308965311517 +71 18 model.embedding_dim 1.0 +71 18 training.batch_size 1.0 +71 18 training.label_smoothing 0.3635114559932357 +71 19 model.output_channels 59.0 +71 19 model.input_dropout 0.1936491017784318 +71 19 model.output_dropout 0.15585221926061432 +71 19 model.feature_map_dropout 0.02943323996378605 +71 19 model.embedding_dim 0.0 +71 19 training.batch_size 1.0 +71 19 training.label_smoothing 0.0012518373352116386 +71 20 model.output_channels 49.0 +71 20 model.input_dropout 0.19519521905826753 +71 20 model.output_dropout 0.14315082154749276 +71 20 model.feature_map_dropout 0.30281843229621336 +71 20 model.embedding_dim 0.0 +71 20 training.batch_size 2.0 +71 20 training.label_smoothing 0.0011895638320539017 +71 21 model.output_channels 48.0 +71 21 model.input_dropout 0.41097323266593666 +71 21 model.output_dropout 0.1779333963583727 +71 21 model.feature_map_dropout 0.03059371891731094 +71 21 model.embedding_dim 0.0 +71 21 training.batch_size 2.0 +71 21 training.label_smoothing 0.0011307029709086405 +71 22 model.output_channels 20.0 +71 22 model.input_dropout 0.2737443005701412 +71 22 model.output_dropout 0.037148235030245835 +71 22 model.feature_map_dropout 0.10545758672335503 +71 22 model.embedding_dim 1.0 +71 22 training.batch_size 1.0 +71 22 training.label_smoothing 0.5040476612648243 +71 23 model.output_channels 54.0 +71 23 model.input_dropout 0.42153732072684974 +71 23 model.output_dropout 0.36687313761487356 +71 23 model.feature_map_dropout 0.34767511433592846 +71 23 model.embedding_dim 2.0 +71 23 training.batch_size 1.0 +71 23 training.label_smoothing 0.3122814884866241 +71 24 model.output_channels 25.0 +71 24 model.input_dropout 0.1944469238806154 +71 24 model.output_dropout 0.23857355148243226 +71 24 model.feature_map_dropout 0.2202751021128868 +71 24 model.embedding_dim 0.0 +71 24 training.batch_size 0.0 +71 24 training.label_smoothing 0.15556973541081143 +71 25 model.output_channels 38.0 +71 25 model.input_dropout 0.036470461379353225 +71 25 model.output_dropout 0.4248250818559717 +71 25 model.feature_map_dropout 0.38571235635407614 +71 25 model.embedding_dim 2.0 +71 25 training.batch_size 1.0 +71 25 training.label_smoothing 0.0058861863580702105 +71 26 model.output_channels 21.0 +71 26 model.input_dropout 0.0372527953191385 +71 26 model.output_dropout 0.1352901946898718 +71 26 model.feature_map_dropout 0.22863621537212492 +71 26 model.embedding_dim 2.0 +71 26 training.batch_size 1.0 +71 26 training.label_smoothing 0.0012062344533536945 +71 27 model.output_channels 25.0 +71 27 model.input_dropout 0.15298182683901973 +71 27 model.output_dropout 0.3748143337088788 +71 27 model.feature_map_dropout 0.3002340016508104 +71 27 model.embedding_dim 1.0 +71 27 training.batch_size 0.0 +71 27 training.label_smoothing 0.013298501806669354 +71 28 model.output_channels 32.0 +71 28 model.input_dropout 0.44027979953101 +71 28 model.output_dropout 0.38172378530432954 +71 28 model.feature_map_dropout 0.026724142946117635 +71 28 model.embedding_dim 2.0 +71 28 training.batch_size 0.0 +71 28 training.label_smoothing 0.008391610131490437 +71 29 model.output_channels 63.0 +71 29 model.input_dropout 0.3111178455717242 +71 29 model.output_dropout 0.30252390945326385 +71 29 model.feature_map_dropout 0.25854119315967317 +71 29 model.embedding_dim 1.0 +71 29 training.batch_size 2.0 +71 29 training.label_smoothing 0.0016867230184438918 +71 30 model.output_channels 36.0 +71 30 model.input_dropout 0.3841615532255838 +71 30 model.output_dropout 0.028034877592250707 +71 30 model.feature_map_dropout 0.41631731462437604 +71 30 model.embedding_dim 0.0 +71 30 training.batch_size 0.0 +71 30 training.label_smoothing 0.001613179800351734 +71 31 model.output_channels 33.0 +71 31 model.input_dropout 0.2679465359830022 +71 31 model.output_dropout 0.4657797187633998 +71 31 model.feature_map_dropout 0.0836204444872628 +71 31 model.embedding_dim 0.0 +71 31 training.batch_size 2.0 +71 31 training.label_smoothing 0.0018371111277858158 +71 32 model.output_channels 37.0 +71 32 model.input_dropout 0.24303291833340995 +71 32 model.output_dropout 0.33356896305553807 +71 32 model.feature_map_dropout 0.11739624339958882 +71 32 model.embedding_dim 1.0 +71 32 training.batch_size 0.0 +71 32 training.label_smoothing 0.47360841104554363 +71 33 model.output_channels 19.0 +71 33 model.input_dropout 0.3595341225006019 +71 33 model.output_dropout 0.4673760286954303 +71 33 model.feature_map_dropout 0.39484498403237805 +71 33 model.embedding_dim 2.0 +71 33 training.batch_size 1.0 +71 33 training.label_smoothing 0.9806701426494562 +71 34 model.output_channels 21.0 +71 34 model.input_dropout 0.28259124432143684 +71 34 model.output_dropout 0.39602328251326496 +71 34 model.feature_map_dropout 0.056911975154031236 +71 34 model.embedding_dim 0.0 +71 34 training.batch_size 2.0 +71 34 training.label_smoothing 0.9797041962723231 +71 35 model.output_channels 58.0 +71 35 model.input_dropout 0.2841150985212694 +71 35 model.output_dropout 0.1774976453903781 +71 35 model.feature_map_dropout 0.4868117974568035 +71 35 model.embedding_dim 1.0 +71 35 training.batch_size 1.0 +71 35 training.label_smoothing 0.010754468629896893 +71 36 model.output_channels 38.0 +71 36 model.input_dropout 0.3036728001747891 +71 36 model.output_dropout 0.049999161438082806 +71 36 model.feature_map_dropout 0.2950742793966618 +71 36 model.embedding_dim 2.0 +71 36 training.batch_size 2.0 +71 36 training.label_smoothing 0.025462262879436905 +71 37 model.output_channels 57.0 +71 37 model.input_dropout 0.289269642199583 +71 37 model.output_dropout 0.17507799327215334 +71 37 model.feature_map_dropout 0.06908119444604521 +71 37 model.embedding_dim 2.0 +71 37 training.batch_size 0.0 +71 37 training.label_smoothing 0.07827692358203954 +71 38 model.output_channels 39.0 +71 38 model.input_dropout 0.018241595547289435 +71 38 model.output_dropout 0.04992738779023703 +71 38 model.feature_map_dropout 0.1156623282972582 +71 38 model.embedding_dim 2.0 +71 38 training.batch_size 2.0 +71 38 training.label_smoothing 0.30057792412598683 +71 39 model.output_channels 36.0 +71 39 model.input_dropout 0.10464000264097983 +71 39 model.output_dropout 0.13045958855284062 +71 39 model.feature_map_dropout 0.20678462426762456 +71 39 model.embedding_dim 0.0 +71 39 training.batch_size 2.0 +71 39 training.label_smoothing 0.07233404126405585 +71 40 model.output_channels 17.0 +71 40 model.input_dropout 0.14479743236991166 +71 40 model.output_dropout 0.47178276430554456 +71 40 model.feature_map_dropout 0.44895083804250663 +71 40 model.embedding_dim 0.0 +71 40 training.batch_size 0.0 +71 40 training.label_smoothing 0.006177535152196463 +71 41 model.output_channels 16.0 +71 41 model.input_dropout 0.49207768101792326 +71 41 model.output_dropout 0.12183809338342683 +71 41 model.feature_map_dropout 0.23314884278147346 +71 41 model.embedding_dim 1.0 +71 41 training.batch_size 1.0 +71 41 training.label_smoothing 0.39040745477778166 +71 42 model.output_channels 44.0 +71 42 model.input_dropout 0.2668057281842994 +71 42 model.output_dropout 0.48857362286767575 +71 42 model.feature_map_dropout 0.11430561071828266 +71 42 model.embedding_dim 2.0 +71 42 training.batch_size 1.0 +71 42 training.label_smoothing 0.01567969295474877 +71 43 model.output_channels 62.0 +71 43 model.input_dropout 0.0585705274658585 +71 43 model.output_dropout 0.4443779265919176 +71 43 model.feature_map_dropout 0.18225711768697478 +71 43 model.embedding_dim 0.0 +71 43 training.batch_size 1.0 +71 43 training.label_smoothing 0.04902740137451737 +71 44 model.output_channels 50.0 +71 44 model.input_dropout 0.21565938149088082 +71 44 model.output_dropout 0.43118230476026903 +71 44 model.feature_map_dropout 0.09944623501328986 +71 44 model.embedding_dim 2.0 +71 44 training.batch_size 2.0 +71 44 training.label_smoothing 0.11530850061436232 +71 45 model.output_channels 38.0 +71 45 model.input_dropout 0.3985192098716054 +71 45 model.output_dropout 0.4010557040622274 +71 45 model.feature_map_dropout 0.4127504234975225 +71 45 model.embedding_dim 1.0 +71 45 training.batch_size 2.0 +71 45 training.label_smoothing 0.06714506130721395 +71 46 model.output_channels 23.0 +71 46 model.input_dropout 0.11090898238226654 +71 46 model.output_dropout 0.19200093057029027 +71 46 model.feature_map_dropout 0.11811852409913703 +71 46 model.embedding_dim 0.0 +71 46 training.batch_size 1.0 +71 46 training.label_smoothing 0.010556475268473545 +71 47 model.output_channels 47.0 +71 47 model.input_dropout 0.13594156614676722 +71 47 model.output_dropout 0.06261924650547063 +71 47 model.feature_map_dropout 0.42448361178152283 +71 47 model.embedding_dim 1.0 +71 47 training.batch_size 2.0 +71 47 training.label_smoothing 0.03556374677735904 +71 48 model.output_channels 58.0 +71 48 model.input_dropout 0.29820615445262955 +71 48 model.output_dropout 0.2936452558025153 +71 48 model.feature_map_dropout 0.10669138235470466 +71 48 model.embedding_dim 1.0 +71 48 training.batch_size 1.0 +71 48 training.label_smoothing 0.5399463842723964 +71 49 model.output_channels 36.0 +71 49 model.input_dropout 0.48661965433520005 +71 49 model.output_dropout 0.39461172040402254 +71 49 model.feature_map_dropout 0.2029341130763413 +71 49 model.embedding_dim 2.0 +71 49 training.batch_size 2.0 +71 49 training.label_smoothing 0.15921995933766603 +71 50 model.output_channels 22.0 +71 50 model.input_dropout 0.05737228063867145 +71 50 model.output_dropout 0.03964602023247893 +71 50 model.feature_map_dropout 0.06918387677490007 +71 50 model.embedding_dim 1.0 +71 50 training.batch_size 1.0 +71 50 training.label_smoothing 0.9865443354584503 +71 51 model.output_channels 19.0 +71 51 model.input_dropout 0.22904274615192283 +71 51 model.output_dropout 0.04956188234199094 +71 51 model.feature_map_dropout 0.1555489408021825 +71 51 model.embedding_dim 1.0 +71 51 training.batch_size 2.0 +71 51 training.label_smoothing 0.013000209124540175 +71 52 model.output_channels 16.0 +71 52 model.input_dropout 0.2605890154284153 +71 52 model.output_dropout 0.04029705835749897 +71 52 model.feature_map_dropout 0.07335509958366382 +71 52 model.embedding_dim 0.0 +71 52 training.batch_size 0.0 +71 52 training.label_smoothing 0.01146784177201249 +71 53 model.output_channels 18.0 +71 53 model.input_dropout 0.01779347901020778 +71 53 model.output_dropout 0.06218773306180603 +71 53 model.feature_map_dropout 0.3854058792251244 +71 53 model.embedding_dim 2.0 +71 53 training.batch_size 1.0 +71 53 training.label_smoothing 0.4361842907628119 +71 54 model.output_channels 16.0 +71 54 model.input_dropout 0.44946260741183125 +71 54 model.output_dropout 0.06760986713637163 +71 54 model.feature_map_dropout 0.06989651176480954 +71 54 model.embedding_dim 0.0 +71 54 training.batch_size 1.0 +71 54 training.label_smoothing 0.6692369788539287 +71 55 model.output_channels 53.0 +71 55 model.input_dropout 0.4539408717956278 +71 55 model.output_dropout 0.08953384589601171 +71 55 model.feature_map_dropout 0.09810149961825843 +71 55 model.embedding_dim 2.0 +71 55 training.batch_size 0.0 +71 55 training.label_smoothing 0.20261591072080629 +71 56 model.output_channels 52.0 +71 56 model.input_dropout 0.03339899708838062 +71 56 model.output_dropout 0.445245877595471 +71 56 model.feature_map_dropout 0.08130497270466308 +71 56 model.embedding_dim 1.0 +71 56 training.batch_size 2.0 +71 56 training.label_smoothing 0.01855273579824984 +71 57 model.output_channels 62.0 +71 57 model.input_dropout 0.05215166033152252 +71 57 model.output_dropout 0.2671351898110721 +71 57 model.feature_map_dropout 0.3256706114699662 +71 57 model.embedding_dim 2.0 +71 57 training.batch_size 0.0 +71 57 training.label_smoothing 0.07109443654611387 +71 58 model.output_channels 41.0 +71 58 model.input_dropout 0.48695316912362263 +71 58 model.output_dropout 0.2071324395353844 +71 58 model.feature_map_dropout 0.15288069468003934 +71 58 model.embedding_dim 1.0 +71 58 training.batch_size 1.0 +71 58 training.label_smoothing 0.06223154395296906 +71 59 model.output_channels 48.0 +71 59 model.input_dropout 0.020633643710245786 +71 59 model.output_dropout 0.23096354817242404 +71 59 model.feature_map_dropout 0.37336584534906836 +71 59 model.embedding_dim 0.0 +71 59 training.batch_size 2.0 +71 59 training.label_smoothing 0.00825246509856729 +71 60 model.output_channels 36.0 +71 60 model.input_dropout 0.4983693982607932 +71 60 model.output_dropout 0.0814742020456895 +71 60 model.feature_map_dropout 0.38751739809559355 +71 60 model.embedding_dim 2.0 +71 60 training.batch_size 0.0 +71 60 training.label_smoothing 0.001185516870562201 +71 61 model.output_channels 26.0 +71 61 model.input_dropout 0.09185767223854713 +71 61 model.output_dropout 0.21789227735538558 +71 61 model.feature_map_dropout 0.37935848610283074 +71 61 model.embedding_dim 2.0 +71 61 training.batch_size 0.0 +71 61 training.label_smoothing 0.006571731712464745 +71 62 model.output_channels 42.0 +71 62 model.input_dropout 0.45841494332626975 +71 62 model.output_dropout 0.2754593534363184 +71 62 model.feature_map_dropout 0.42192448493936985 +71 62 model.embedding_dim 2.0 +71 62 training.batch_size 0.0 +71 62 training.label_smoothing 0.01092325564436096 +71 63 model.output_channels 16.0 +71 63 model.input_dropout 0.034958466477957084 +71 63 model.output_dropout 0.004390694252562555 +71 63 model.feature_map_dropout 0.08630768402486144 +71 63 model.embedding_dim 2.0 +71 63 training.batch_size 1.0 +71 63 training.label_smoothing 0.07242505090106831 +71 64 model.output_channels 26.0 +71 64 model.input_dropout 0.138575614942059 +71 64 model.output_dropout 0.183748183346175 +71 64 model.feature_map_dropout 0.464919964463591 +71 64 model.embedding_dim 0.0 +71 64 training.batch_size 0.0 +71 64 training.label_smoothing 0.03254827914618862 +71 65 model.output_channels 60.0 +71 65 model.input_dropout 0.4309429560198989 +71 65 model.output_dropout 0.3398897889481758 +71 65 model.feature_map_dropout 0.287324238336527 +71 65 model.embedding_dim 0.0 +71 65 training.batch_size 2.0 +71 65 training.label_smoothing 0.002652992842727638 +71 66 model.output_channels 29.0 +71 66 model.input_dropout 0.32058287096553084 +71 66 model.output_dropout 0.3100327066215251 +71 66 model.feature_map_dropout 0.15992560086101193 +71 66 model.embedding_dim 1.0 +71 66 training.batch_size 0.0 +71 66 training.label_smoothing 0.006212041645400948 +71 67 model.output_channels 40.0 +71 67 model.input_dropout 0.1205550521750805 +71 67 model.output_dropout 0.1600046042330352 +71 67 model.feature_map_dropout 0.03599018104284496 +71 67 model.embedding_dim 0.0 +71 67 training.batch_size 2.0 +71 67 training.label_smoothing 0.007857326310082706 +71 68 model.output_channels 34.0 +71 68 model.input_dropout 0.41382111466942245 +71 68 model.output_dropout 0.27879158367547546 +71 68 model.feature_map_dropout 0.422538990691797 +71 68 model.embedding_dim 1.0 +71 68 training.batch_size 0.0 +71 68 training.label_smoothing 0.00167963674933409 +71 69 model.output_channels 16.0 +71 69 model.input_dropout 0.09187345454452461 +71 69 model.output_dropout 0.07410727903340558 +71 69 model.feature_map_dropout 0.4848836316538113 +71 69 model.embedding_dim 2.0 +71 69 training.batch_size 0.0 +71 69 training.label_smoothing 0.02220380308144209 +71 70 model.output_channels 64.0 +71 70 model.input_dropout 0.14332624839295016 +71 70 model.output_dropout 0.2059265489048494 +71 70 model.feature_map_dropout 0.4146977042046806 +71 70 model.embedding_dim 1.0 +71 70 training.batch_size 1.0 +71 70 training.label_smoothing 0.08933098826611124 +71 71 model.output_channels 49.0 +71 71 model.input_dropout 0.4073934704593716 +71 71 model.output_dropout 0.3101855873490812 +71 71 model.feature_map_dropout 0.25782933064593255 +71 71 model.embedding_dim 2.0 +71 71 training.batch_size 1.0 +71 71 training.label_smoothing 0.14124288367103477 +71 72 model.output_channels 42.0 +71 72 model.input_dropout 0.04877635011949599 +71 72 model.output_dropout 0.28609468946252226 +71 72 model.feature_map_dropout 0.17123367478323248 +71 72 model.embedding_dim 1.0 +71 72 training.batch_size 1.0 +71 72 training.label_smoothing 0.7055367199739485 +71 73 model.output_channels 26.0 +71 73 model.input_dropout 0.480434156494904 +71 73 model.output_dropout 0.1273129022854237 +71 73 model.feature_map_dropout 0.04540309850438479 +71 73 model.embedding_dim 0.0 +71 73 training.batch_size 1.0 +71 73 training.label_smoothing 0.030363521485819077 +71 74 model.output_channels 37.0 +71 74 model.input_dropout 0.34780132249174683 +71 74 model.output_dropout 0.1780883251605186 +71 74 model.feature_map_dropout 0.2729057932458003 +71 74 model.embedding_dim 0.0 +71 74 training.batch_size 1.0 +71 74 training.label_smoothing 0.003067816899380519 +71 75 model.output_channels 27.0 +71 75 model.input_dropout 0.16669844601869388 +71 75 model.output_dropout 0.2914239950849097 +71 75 model.feature_map_dropout 0.3448965180471333 +71 75 model.embedding_dim 0.0 +71 75 training.batch_size 0.0 +71 75 training.label_smoothing 0.0021056148038485437 +71 76 model.output_channels 22.0 +71 76 model.input_dropout 0.2116739685953991 +71 76 model.output_dropout 0.4305250626012288 +71 76 model.feature_map_dropout 0.05572890558885529 +71 76 model.embedding_dim 0.0 +71 76 training.batch_size 2.0 +71 76 training.label_smoothing 0.7957110204894989 +71 77 model.output_channels 64.0 +71 77 model.input_dropout 0.3306442805143994 +71 77 model.output_dropout 0.41917920443597895 +71 77 model.feature_map_dropout 0.2860241710951512 +71 77 model.embedding_dim 1.0 +71 77 training.batch_size 0.0 +71 77 training.label_smoothing 0.0083938945457613 +71 78 model.output_channels 63.0 +71 78 model.input_dropout 0.16170701304140073 +71 78 model.output_dropout 0.27149447087414075 +71 78 model.feature_map_dropout 0.20807900041950245 +71 78 model.embedding_dim 1.0 +71 78 training.batch_size 2.0 +71 78 training.label_smoothing 0.022391542934105245 +71 79 model.output_channels 59.0 +71 79 model.input_dropout 0.456297943986065 +71 79 model.output_dropout 0.05713670133340609 +71 79 model.feature_map_dropout 0.43972831783598176 +71 79 model.embedding_dim 1.0 +71 79 training.batch_size 0.0 +71 79 training.label_smoothing 0.010705060035221568 +71 80 model.output_channels 46.0 +71 80 model.input_dropout 0.4715317297764781 +71 80 model.output_dropout 0.3114382464301207 +71 80 model.feature_map_dropout 0.2549614883274777 +71 80 model.embedding_dim 2.0 +71 80 training.batch_size 1.0 +71 80 training.label_smoothing 0.4864572178241295 +71 81 model.output_channels 41.0 +71 81 model.input_dropout 0.041891279052115815 +71 81 model.output_dropout 0.4092324233557679 +71 81 model.feature_map_dropout 0.047942753337739896 +71 81 model.embedding_dim 1.0 +71 81 training.batch_size 2.0 +71 81 training.label_smoothing 0.014458462503289155 +71 82 model.output_channels 55.0 +71 82 model.input_dropout 0.11277683574772013 +71 82 model.output_dropout 0.39602254666453135 +71 82 model.feature_map_dropout 0.16301591004802773 +71 82 model.embedding_dim 1.0 +71 82 training.batch_size 0.0 +71 82 training.label_smoothing 0.13474610481152638 +71 83 model.output_channels 42.0 +71 83 model.input_dropout 0.3586126926199212 +71 83 model.output_dropout 0.29993558735742915 +71 83 model.feature_map_dropout 0.34615749821408637 +71 83 model.embedding_dim 0.0 +71 83 training.batch_size 2.0 +71 83 training.label_smoothing 0.6623764576859364 +71 84 model.output_channels 22.0 +71 84 model.input_dropout 0.4607056958590526 +71 84 model.output_dropout 0.31789547165562193 +71 84 model.feature_map_dropout 0.24991366378296637 +71 84 model.embedding_dim 1.0 +71 84 training.batch_size 0.0 +71 84 training.label_smoothing 0.023594601925564944 +71 85 model.output_channels 37.0 +71 85 model.input_dropout 0.08342387626130904 +71 85 model.output_dropout 0.39613303048673526 +71 85 model.feature_map_dropout 0.47197825737727056 +71 85 model.embedding_dim 1.0 +71 85 training.batch_size 2.0 +71 85 training.label_smoothing 0.06966761487930048 +71 86 model.output_channels 26.0 +71 86 model.input_dropout 0.37307625943497474 +71 86 model.output_dropout 0.15840422201865162 +71 86 model.feature_map_dropout 0.32493000375829945 +71 86 model.embedding_dim 0.0 +71 86 training.batch_size 0.0 +71 86 training.label_smoothing 0.005720043339383368 +71 87 model.output_channels 52.0 +71 87 model.input_dropout 0.22101905971348257 +71 87 model.output_dropout 0.0046524747966957825 +71 87 model.feature_map_dropout 0.3179987404736732 +71 87 model.embedding_dim 1.0 +71 87 training.batch_size 1.0 +71 87 training.label_smoothing 0.0017039499815699785 +71 88 model.output_channels 38.0 +71 88 model.input_dropout 0.3696165443998578 +71 88 model.output_dropout 0.48230125107856975 +71 88 model.feature_map_dropout 0.48694454760410294 +71 88 model.embedding_dim 0.0 +71 88 training.batch_size 0.0 +71 88 training.label_smoothing 0.0012321568647416603 +71 89 model.output_channels 17.0 +71 89 model.input_dropout 0.10959011691123782 +71 89 model.output_dropout 0.1634580053896843 +71 89 model.feature_map_dropout 0.09607753520363527 +71 89 model.embedding_dim 0.0 +71 89 training.batch_size 1.0 +71 89 training.label_smoothing 0.005270496350175984 +71 90 model.output_channels 48.0 +71 90 model.input_dropout 0.12745055634640673 +71 90 model.output_dropout 0.19936924577574938 +71 90 model.feature_map_dropout 0.4426720569816699 +71 90 model.embedding_dim 1.0 +71 90 training.batch_size 0.0 +71 90 training.label_smoothing 0.010447247496184957 +71 91 model.output_channels 29.0 +71 91 model.input_dropout 0.20860892153216615 +71 91 model.output_dropout 0.24168899405270766 +71 91 model.feature_map_dropout 0.0002802705304564279 +71 91 model.embedding_dim 2.0 +71 91 training.batch_size 2.0 +71 91 training.label_smoothing 0.9527223428715751 +71 92 model.output_channels 40.0 +71 92 model.input_dropout 0.3519962481131955 +71 92 model.output_dropout 0.3475979990662338 +71 92 model.feature_map_dropout 0.3254259262686421 +71 92 model.embedding_dim 1.0 +71 92 training.batch_size 1.0 +71 92 training.label_smoothing 0.40020865014742457 +71 93 model.output_channels 37.0 +71 93 model.input_dropout 0.1453300641302958 +71 93 model.output_dropout 0.11088785993894884 +71 93 model.feature_map_dropout 0.13161555539593783 +71 93 model.embedding_dim 0.0 +71 93 training.batch_size 2.0 +71 93 training.label_smoothing 0.014995513885419424 +71 94 model.output_channels 44.0 +71 94 model.input_dropout 0.2489220370268045 +71 94 model.output_dropout 0.4033206294329333 +71 94 model.feature_map_dropout 0.016594620909058222 +71 94 model.embedding_dim 0.0 +71 94 training.batch_size 0.0 +71 94 training.label_smoothing 0.3253623338467878 +71 95 model.output_channels 23.0 +71 95 model.input_dropout 0.4050081217453443 +71 95 model.output_dropout 0.27466235808508804 +71 95 model.feature_map_dropout 0.10981123306886448 +71 95 model.embedding_dim 1.0 +71 95 training.batch_size 1.0 +71 95 training.label_smoothing 0.10846920837980494 +71 96 model.output_channels 37.0 +71 96 model.input_dropout 0.37496819587726193 +71 96 model.output_dropout 0.16444611397683123 +71 96 model.feature_map_dropout 0.05963447829597551 +71 96 model.embedding_dim 1.0 +71 96 training.batch_size 1.0 +71 96 training.label_smoothing 0.005069009281526898 +71 97 model.output_channels 32.0 +71 97 model.input_dropout 0.4734029834455084 +71 97 model.output_dropout 0.1795641383198387 +71 97 model.feature_map_dropout 0.4378346731608079 +71 97 model.embedding_dim 2.0 +71 97 training.batch_size 1.0 +71 97 training.label_smoothing 0.0018596296992064694 +71 98 model.output_channels 24.0 +71 98 model.input_dropout 0.3573646697234587 +71 98 model.output_dropout 0.4692862033512787 +71 98 model.feature_map_dropout 0.2342664001315714 +71 98 model.embedding_dim 2.0 +71 98 training.batch_size 0.0 +71 98 training.label_smoothing 0.09964661634301442 +71 99 model.output_channels 56.0 +71 99 model.input_dropout 0.042632198539906296 +71 99 model.output_dropout 0.3841912903833576 +71 99 model.feature_map_dropout 0.10885030178272004 +71 99 model.embedding_dim 0.0 +71 99 training.batch_size 0.0 +71 99 training.label_smoothing 0.09302487630354768 +71 100 model.output_channels 20.0 +71 100 model.input_dropout 0.1274472100158871 +71 100 model.output_dropout 0.21952179922541248 +71 100 model.feature_map_dropout 0.13771378721221916 +71 100 model.embedding_dim 1.0 +71 100 training.batch_size 0.0 +71 100 training.label_smoothing 0.47640887974987667 +71 1 dataset """kinships""" +71 1 model """conve""" +71 1 loss """softplus""" +71 1 regularizer """no""" +71 1 optimizer """adadelta""" +71 1 training_loop """lcwa""" +71 1 evaluator """rankbased""" +71 2 dataset """kinships""" +71 2 model """conve""" +71 2 loss """softplus""" +71 2 regularizer """no""" +71 2 optimizer """adadelta""" +71 2 training_loop """lcwa""" +71 2 evaluator """rankbased""" +71 3 dataset """kinships""" +71 3 model """conve""" +71 3 loss """softplus""" +71 3 regularizer """no""" +71 3 optimizer """adadelta""" +71 3 training_loop """lcwa""" +71 3 evaluator """rankbased""" +71 4 dataset """kinships""" +71 4 model """conve""" +71 4 loss """softplus""" +71 4 regularizer """no""" +71 4 optimizer """adadelta""" +71 4 training_loop """lcwa""" +71 4 evaluator """rankbased""" +71 5 dataset """kinships""" +71 5 model """conve""" +71 5 loss """softplus""" +71 5 regularizer """no""" +71 5 optimizer """adadelta""" +71 5 training_loop """lcwa""" +71 5 evaluator """rankbased""" +71 6 dataset """kinships""" +71 6 model """conve""" +71 6 loss """softplus""" +71 6 regularizer """no""" +71 6 optimizer """adadelta""" +71 6 training_loop """lcwa""" +71 6 evaluator """rankbased""" +71 7 dataset """kinships""" +71 7 model """conve""" +71 7 loss """softplus""" +71 7 regularizer """no""" +71 7 optimizer """adadelta""" +71 7 training_loop """lcwa""" +71 7 evaluator """rankbased""" +71 8 dataset """kinships""" +71 8 model """conve""" +71 8 loss """softplus""" +71 8 regularizer """no""" +71 8 optimizer """adadelta""" +71 8 training_loop """lcwa""" +71 8 evaluator """rankbased""" +71 9 dataset """kinships""" +71 9 model """conve""" +71 9 loss """softplus""" +71 9 regularizer """no""" +71 9 optimizer """adadelta""" +71 9 training_loop """lcwa""" +71 9 evaluator """rankbased""" +71 10 dataset """kinships""" +71 10 model """conve""" +71 10 loss """softplus""" +71 10 regularizer """no""" +71 10 optimizer """adadelta""" +71 10 training_loop """lcwa""" +71 10 evaluator """rankbased""" +71 11 dataset """kinships""" +71 11 model """conve""" +71 11 loss """softplus""" +71 11 regularizer """no""" +71 11 optimizer """adadelta""" +71 11 training_loop """lcwa""" +71 11 evaluator """rankbased""" +71 12 dataset """kinships""" +71 12 model """conve""" +71 12 loss """softplus""" +71 12 regularizer """no""" +71 12 optimizer """adadelta""" +71 12 training_loop """lcwa""" +71 12 evaluator """rankbased""" +71 13 dataset """kinships""" +71 13 model """conve""" +71 13 loss """softplus""" +71 13 regularizer """no""" +71 13 optimizer """adadelta""" +71 13 training_loop """lcwa""" +71 13 evaluator """rankbased""" +71 14 dataset """kinships""" +71 14 model """conve""" +71 14 loss """softplus""" +71 14 regularizer """no""" +71 14 optimizer """adadelta""" +71 14 training_loop """lcwa""" +71 14 evaluator """rankbased""" +71 15 dataset """kinships""" +71 15 model """conve""" +71 15 loss """softplus""" +71 15 regularizer """no""" +71 15 optimizer """adadelta""" +71 15 training_loop """lcwa""" +71 15 evaluator """rankbased""" +71 16 dataset """kinships""" +71 16 model """conve""" +71 16 loss """softplus""" +71 16 regularizer """no""" +71 16 optimizer """adadelta""" +71 16 training_loop """lcwa""" +71 16 evaluator """rankbased""" +71 17 dataset """kinships""" +71 17 model """conve""" +71 17 loss """softplus""" +71 17 regularizer """no""" +71 17 optimizer """adadelta""" +71 17 training_loop """lcwa""" +71 17 evaluator """rankbased""" +71 18 dataset """kinships""" +71 18 model """conve""" +71 18 loss """softplus""" +71 18 regularizer """no""" +71 18 optimizer """adadelta""" +71 18 training_loop """lcwa""" +71 18 evaluator """rankbased""" +71 19 dataset """kinships""" +71 19 model """conve""" +71 19 loss """softplus""" +71 19 regularizer """no""" +71 19 optimizer """adadelta""" +71 19 training_loop """lcwa""" +71 19 evaluator """rankbased""" +71 20 dataset """kinships""" +71 20 model """conve""" +71 20 loss """softplus""" +71 20 regularizer """no""" +71 20 optimizer """adadelta""" +71 20 training_loop """lcwa""" +71 20 evaluator """rankbased""" +71 21 dataset """kinships""" +71 21 model """conve""" +71 21 loss """softplus""" +71 21 regularizer """no""" +71 21 optimizer """adadelta""" +71 21 training_loop """lcwa""" +71 21 evaluator """rankbased""" +71 22 dataset """kinships""" +71 22 model """conve""" +71 22 loss """softplus""" +71 22 regularizer """no""" +71 22 optimizer """adadelta""" +71 22 training_loop """lcwa""" +71 22 evaluator """rankbased""" +71 23 dataset """kinships""" +71 23 model """conve""" +71 23 loss """softplus""" +71 23 regularizer """no""" +71 23 optimizer """adadelta""" +71 23 training_loop """lcwa""" +71 23 evaluator """rankbased""" +71 24 dataset """kinships""" +71 24 model """conve""" +71 24 loss """softplus""" +71 24 regularizer """no""" +71 24 optimizer """adadelta""" +71 24 training_loop """lcwa""" +71 24 evaluator """rankbased""" +71 25 dataset """kinships""" +71 25 model """conve""" +71 25 loss """softplus""" +71 25 regularizer """no""" +71 25 optimizer """adadelta""" +71 25 training_loop """lcwa""" +71 25 evaluator """rankbased""" +71 26 dataset """kinships""" +71 26 model """conve""" +71 26 loss """softplus""" +71 26 regularizer """no""" +71 26 optimizer """adadelta""" +71 26 training_loop """lcwa""" +71 26 evaluator """rankbased""" +71 27 dataset """kinships""" +71 27 model """conve""" +71 27 loss """softplus""" +71 27 regularizer """no""" +71 27 optimizer """adadelta""" +71 27 training_loop """lcwa""" +71 27 evaluator """rankbased""" +71 28 dataset """kinships""" +71 28 model """conve""" +71 28 loss """softplus""" +71 28 regularizer """no""" +71 28 optimizer """adadelta""" +71 28 training_loop """lcwa""" +71 28 evaluator """rankbased""" +71 29 dataset """kinships""" +71 29 model """conve""" +71 29 loss """softplus""" +71 29 regularizer """no""" +71 29 optimizer """adadelta""" +71 29 training_loop """lcwa""" +71 29 evaluator """rankbased""" +71 30 dataset """kinships""" +71 30 model """conve""" +71 30 loss """softplus""" +71 30 regularizer """no""" +71 30 optimizer """adadelta""" +71 30 training_loop """lcwa""" +71 30 evaluator """rankbased""" +71 31 dataset """kinships""" +71 31 model """conve""" +71 31 loss """softplus""" +71 31 regularizer """no""" +71 31 optimizer """adadelta""" +71 31 training_loop """lcwa""" +71 31 evaluator """rankbased""" +71 32 dataset """kinships""" +71 32 model """conve""" +71 32 loss """softplus""" +71 32 regularizer """no""" +71 32 optimizer """adadelta""" +71 32 training_loop """lcwa""" +71 32 evaluator """rankbased""" +71 33 dataset """kinships""" +71 33 model """conve""" +71 33 loss """softplus""" +71 33 regularizer """no""" +71 33 optimizer """adadelta""" +71 33 training_loop """lcwa""" +71 33 evaluator """rankbased""" +71 34 dataset """kinships""" +71 34 model """conve""" +71 34 loss """softplus""" +71 34 regularizer """no""" +71 34 optimizer """adadelta""" +71 34 training_loop """lcwa""" +71 34 evaluator """rankbased""" +71 35 dataset """kinships""" +71 35 model """conve""" +71 35 loss """softplus""" +71 35 regularizer """no""" +71 35 optimizer """adadelta""" +71 35 training_loop """lcwa""" +71 35 evaluator """rankbased""" +71 36 dataset """kinships""" +71 36 model """conve""" +71 36 loss """softplus""" +71 36 regularizer """no""" +71 36 optimizer """adadelta""" +71 36 training_loop """lcwa""" +71 36 evaluator """rankbased""" +71 37 dataset """kinships""" +71 37 model """conve""" +71 37 loss """softplus""" +71 37 regularizer """no""" +71 37 optimizer """adadelta""" +71 37 training_loop """lcwa""" +71 37 evaluator """rankbased""" +71 38 dataset """kinships""" +71 38 model """conve""" +71 38 loss """softplus""" +71 38 regularizer """no""" +71 38 optimizer """adadelta""" +71 38 training_loop """lcwa""" +71 38 evaluator """rankbased""" +71 39 dataset """kinships""" +71 39 model """conve""" +71 39 loss """softplus""" +71 39 regularizer """no""" +71 39 optimizer """adadelta""" +71 39 training_loop """lcwa""" +71 39 evaluator """rankbased""" +71 40 dataset """kinships""" +71 40 model """conve""" +71 40 loss """softplus""" +71 40 regularizer """no""" +71 40 optimizer """adadelta""" +71 40 training_loop """lcwa""" +71 40 evaluator """rankbased""" +71 41 dataset """kinships""" +71 41 model """conve""" +71 41 loss """softplus""" +71 41 regularizer """no""" +71 41 optimizer """adadelta""" +71 41 training_loop """lcwa""" +71 41 evaluator """rankbased""" +71 42 dataset """kinships""" +71 42 model """conve""" +71 42 loss """softplus""" +71 42 regularizer """no""" +71 42 optimizer """adadelta""" +71 42 training_loop """lcwa""" +71 42 evaluator """rankbased""" +71 43 dataset """kinships""" +71 43 model """conve""" +71 43 loss """softplus""" +71 43 regularizer """no""" +71 43 optimizer """adadelta""" +71 43 training_loop """lcwa""" +71 43 evaluator """rankbased""" +71 44 dataset """kinships""" +71 44 model """conve""" +71 44 loss """softplus""" +71 44 regularizer """no""" +71 44 optimizer """adadelta""" +71 44 training_loop """lcwa""" +71 44 evaluator """rankbased""" +71 45 dataset """kinships""" +71 45 model """conve""" +71 45 loss """softplus""" +71 45 regularizer """no""" +71 45 optimizer """adadelta""" +71 45 training_loop """lcwa""" +71 45 evaluator """rankbased""" +71 46 dataset """kinships""" +71 46 model """conve""" +71 46 loss """softplus""" +71 46 regularizer """no""" +71 46 optimizer """adadelta""" +71 46 training_loop """lcwa""" +71 46 evaluator """rankbased""" +71 47 dataset """kinships""" +71 47 model """conve""" +71 47 loss """softplus""" +71 47 regularizer """no""" +71 47 optimizer """adadelta""" +71 47 training_loop """lcwa""" +71 47 evaluator """rankbased""" +71 48 dataset """kinships""" +71 48 model """conve""" +71 48 loss """softplus""" +71 48 regularizer """no""" +71 48 optimizer """adadelta""" +71 48 training_loop """lcwa""" +71 48 evaluator """rankbased""" +71 49 dataset """kinships""" +71 49 model """conve""" +71 49 loss """softplus""" +71 49 regularizer """no""" +71 49 optimizer """adadelta""" +71 49 training_loop """lcwa""" +71 49 evaluator """rankbased""" +71 50 dataset """kinships""" +71 50 model """conve""" +71 50 loss """softplus""" +71 50 regularizer """no""" +71 50 optimizer """adadelta""" +71 50 training_loop """lcwa""" +71 50 evaluator """rankbased""" +71 51 dataset """kinships""" +71 51 model """conve""" +71 51 loss """softplus""" +71 51 regularizer """no""" +71 51 optimizer """adadelta""" +71 51 training_loop """lcwa""" +71 51 evaluator """rankbased""" +71 52 dataset """kinships""" +71 52 model """conve""" +71 52 loss """softplus""" +71 52 regularizer """no""" +71 52 optimizer """adadelta""" +71 52 training_loop """lcwa""" +71 52 evaluator """rankbased""" +71 53 dataset """kinships""" +71 53 model """conve""" +71 53 loss """softplus""" +71 53 regularizer """no""" +71 53 optimizer """adadelta""" +71 53 training_loop """lcwa""" +71 53 evaluator """rankbased""" +71 54 dataset """kinships""" +71 54 model """conve""" +71 54 loss """softplus""" +71 54 regularizer """no""" +71 54 optimizer """adadelta""" +71 54 training_loop """lcwa""" +71 54 evaluator """rankbased""" +71 55 dataset """kinships""" +71 55 model """conve""" +71 55 loss """softplus""" +71 55 regularizer """no""" +71 55 optimizer """adadelta""" +71 55 training_loop """lcwa""" +71 55 evaluator """rankbased""" +71 56 dataset """kinships""" +71 56 model """conve""" +71 56 loss """softplus""" +71 56 regularizer """no""" +71 56 optimizer """adadelta""" +71 56 training_loop """lcwa""" +71 56 evaluator """rankbased""" +71 57 dataset """kinships""" +71 57 model """conve""" +71 57 loss """softplus""" +71 57 regularizer """no""" +71 57 optimizer """adadelta""" +71 57 training_loop """lcwa""" +71 57 evaluator """rankbased""" +71 58 dataset """kinships""" +71 58 model """conve""" +71 58 loss """softplus""" +71 58 regularizer """no""" +71 58 optimizer """adadelta""" +71 58 training_loop """lcwa""" +71 58 evaluator """rankbased""" +71 59 dataset """kinships""" +71 59 model """conve""" +71 59 loss """softplus""" +71 59 regularizer """no""" +71 59 optimizer """adadelta""" +71 59 training_loop """lcwa""" +71 59 evaluator """rankbased""" +71 60 dataset """kinships""" +71 60 model """conve""" +71 60 loss """softplus""" +71 60 regularizer """no""" +71 60 optimizer """adadelta""" +71 60 training_loop """lcwa""" +71 60 evaluator """rankbased""" +71 61 dataset """kinships""" +71 61 model """conve""" +71 61 loss """softplus""" +71 61 regularizer """no""" +71 61 optimizer """adadelta""" +71 61 training_loop """lcwa""" +71 61 evaluator """rankbased""" +71 62 dataset """kinships""" +71 62 model """conve""" +71 62 loss """softplus""" +71 62 regularizer """no""" +71 62 optimizer """adadelta""" +71 62 training_loop """lcwa""" +71 62 evaluator """rankbased""" +71 63 dataset """kinships""" +71 63 model """conve""" +71 63 loss """softplus""" +71 63 regularizer """no""" +71 63 optimizer """adadelta""" +71 63 training_loop """lcwa""" +71 63 evaluator """rankbased""" +71 64 dataset """kinships""" +71 64 model """conve""" +71 64 loss """softplus""" +71 64 regularizer """no""" +71 64 optimizer """adadelta""" +71 64 training_loop """lcwa""" +71 64 evaluator """rankbased""" +71 65 dataset """kinships""" +71 65 model """conve""" +71 65 loss """softplus""" +71 65 regularizer """no""" +71 65 optimizer """adadelta""" +71 65 training_loop """lcwa""" +71 65 evaluator """rankbased""" +71 66 dataset """kinships""" +71 66 model """conve""" +71 66 loss """softplus""" +71 66 regularizer """no""" +71 66 optimizer """adadelta""" +71 66 training_loop """lcwa""" +71 66 evaluator """rankbased""" +71 67 dataset """kinships""" +71 67 model """conve""" +71 67 loss """softplus""" +71 67 regularizer """no""" +71 67 optimizer """adadelta""" +71 67 training_loop """lcwa""" +71 67 evaluator """rankbased""" +71 68 dataset """kinships""" +71 68 model """conve""" +71 68 loss """softplus""" +71 68 regularizer """no""" +71 68 optimizer """adadelta""" +71 68 training_loop """lcwa""" +71 68 evaluator """rankbased""" +71 69 dataset """kinships""" +71 69 model """conve""" +71 69 loss """softplus""" +71 69 regularizer """no""" +71 69 optimizer """adadelta""" +71 69 training_loop """lcwa""" +71 69 evaluator """rankbased""" +71 70 dataset """kinships""" +71 70 model """conve""" +71 70 loss """softplus""" +71 70 regularizer """no""" +71 70 optimizer """adadelta""" +71 70 training_loop """lcwa""" +71 70 evaluator """rankbased""" +71 71 dataset """kinships""" +71 71 model """conve""" +71 71 loss """softplus""" +71 71 regularizer """no""" +71 71 optimizer """adadelta""" +71 71 training_loop """lcwa""" +71 71 evaluator """rankbased""" +71 72 dataset """kinships""" +71 72 model """conve""" +71 72 loss """softplus""" +71 72 regularizer """no""" +71 72 optimizer """adadelta""" +71 72 training_loop """lcwa""" +71 72 evaluator """rankbased""" +71 73 dataset """kinships""" +71 73 model """conve""" +71 73 loss """softplus""" +71 73 regularizer """no""" +71 73 optimizer """adadelta""" +71 73 training_loop """lcwa""" +71 73 evaluator """rankbased""" +71 74 dataset """kinships""" +71 74 model """conve""" +71 74 loss """softplus""" +71 74 regularizer """no""" +71 74 optimizer """adadelta""" +71 74 training_loop """lcwa""" +71 74 evaluator """rankbased""" +71 75 dataset """kinships""" +71 75 model """conve""" +71 75 loss """softplus""" +71 75 regularizer """no""" +71 75 optimizer """adadelta""" +71 75 training_loop """lcwa""" +71 75 evaluator """rankbased""" +71 76 dataset """kinships""" +71 76 model """conve""" +71 76 loss """softplus""" +71 76 regularizer """no""" +71 76 optimizer """adadelta""" +71 76 training_loop """lcwa""" +71 76 evaluator """rankbased""" +71 77 dataset """kinships""" +71 77 model """conve""" +71 77 loss """softplus""" +71 77 regularizer """no""" +71 77 optimizer """adadelta""" +71 77 training_loop """lcwa""" +71 77 evaluator """rankbased""" +71 78 dataset """kinships""" +71 78 model """conve""" +71 78 loss """softplus""" +71 78 regularizer """no""" +71 78 optimizer """adadelta""" +71 78 training_loop """lcwa""" +71 78 evaluator """rankbased""" +71 79 dataset """kinships""" +71 79 model """conve""" +71 79 loss """softplus""" +71 79 regularizer """no""" +71 79 optimizer """adadelta""" +71 79 training_loop """lcwa""" +71 79 evaluator """rankbased""" +71 80 dataset """kinships""" +71 80 model """conve""" +71 80 loss """softplus""" +71 80 regularizer """no""" +71 80 optimizer """adadelta""" +71 80 training_loop """lcwa""" +71 80 evaluator """rankbased""" +71 81 dataset """kinships""" +71 81 model """conve""" +71 81 loss """softplus""" +71 81 regularizer """no""" +71 81 optimizer """adadelta""" +71 81 training_loop """lcwa""" +71 81 evaluator """rankbased""" +71 82 dataset """kinships""" +71 82 model """conve""" +71 82 loss """softplus""" +71 82 regularizer """no""" +71 82 optimizer """adadelta""" +71 82 training_loop """lcwa""" +71 82 evaluator """rankbased""" +71 83 dataset """kinships""" +71 83 model """conve""" +71 83 loss """softplus""" +71 83 regularizer """no""" +71 83 optimizer """adadelta""" +71 83 training_loop """lcwa""" +71 83 evaluator """rankbased""" +71 84 dataset """kinships""" +71 84 model """conve""" +71 84 loss """softplus""" +71 84 regularizer """no""" +71 84 optimizer """adadelta""" +71 84 training_loop """lcwa""" +71 84 evaluator """rankbased""" +71 85 dataset """kinships""" +71 85 model """conve""" +71 85 loss """softplus""" +71 85 regularizer """no""" +71 85 optimizer """adadelta""" +71 85 training_loop """lcwa""" +71 85 evaluator """rankbased""" +71 86 dataset """kinships""" +71 86 model """conve""" +71 86 loss """softplus""" +71 86 regularizer """no""" +71 86 optimizer """adadelta""" +71 86 training_loop """lcwa""" +71 86 evaluator """rankbased""" +71 87 dataset """kinships""" +71 87 model """conve""" +71 87 loss """softplus""" +71 87 regularizer """no""" +71 87 optimizer """adadelta""" +71 87 training_loop """lcwa""" +71 87 evaluator """rankbased""" +71 88 dataset """kinships""" +71 88 model """conve""" +71 88 loss """softplus""" +71 88 regularizer """no""" +71 88 optimizer """adadelta""" +71 88 training_loop """lcwa""" +71 88 evaluator """rankbased""" +71 89 dataset """kinships""" +71 89 model """conve""" +71 89 loss """softplus""" +71 89 regularizer """no""" +71 89 optimizer """adadelta""" +71 89 training_loop """lcwa""" +71 89 evaluator """rankbased""" +71 90 dataset """kinships""" +71 90 model """conve""" +71 90 loss """softplus""" +71 90 regularizer """no""" +71 90 optimizer """adadelta""" +71 90 training_loop """lcwa""" +71 90 evaluator """rankbased""" +71 91 dataset """kinships""" +71 91 model """conve""" +71 91 loss """softplus""" +71 91 regularizer """no""" +71 91 optimizer """adadelta""" +71 91 training_loop """lcwa""" +71 91 evaluator """rankbased""" +71 92 dataset """kinships""" +71 92 model """conve""" +71 92 loss """softplus""" +71 92 regularizer """no""" +71 92 optimizer """adadelta""" +71 92 training_loop """lcwa""" +71 92 evaluator """rankbased""" +71 93 dataset """kinships""" +71 93 model """conve""" +71 93 loss """softplus""" +71 93 regularizer """no""" +71 93 optimizer """adadelta""" +71 93 training_loop """lcwa""" +71 93 evaluator """rankbased""" +71 94 dataset """kinships""" +71 94 model """conve""" +71 94 loss """softplus""" +71 94 regularizer """no""" +71 94 optimizer """adadelta""" +71 94 training_loop """lcwa""" +71 94 evaluator """rankbased""" +71 95 dataset """kinships""" +71 95 model """conve""" +71 95 loss """softplus""" +71 95 regularizer """no""" +71 95 optimizer """adadelta""" +71 95 training_loop """lcwa""" +71 95 evaluator """rankbased""" +71 96 dataset """kinships""" +71 96 model """conve""" +71 96 loss """softplus""" +71 96 regularizer """no""" +71 96 optimizer """adadelta""" +71 96 training_loop """lcwa""" +71 96 evaluator """rankbased""" +71 97 dataset """kinships""" +71 97 model """conve""" +71 97 loss """softplus""" +71 97 regularizer """no""" +71 97 optimizer """adadelta""" +71 97 training_loop """lcwa""" +71 97 evaluator """rankbased""" +71 98 dataset """kinships""" +71 98 model """conve""" +71 98 loss """softplus""" +71 98 regularizer """no""" +71 98 optimizer """adadelta""" +71 98 training_loop """lcwa""" +71 98 evaluator """rankbased""" +71 99 dataset """kinships""" +71 99 model """conve""" +71 99 loss """softplus""" +71 99 regularizer """no""" +71 99 optimizer """adadelta""" +71 99 training_loop """lcwa""" +71 99 evaluator """rankbased""" +71 100 dataset """kinships""" +71 100 model """conve""" +71 100 loss """softplus""" +71 100 regularizer """no""" +71 100 optimizer """adadelta""" +71 100 training_loop """lcwa""" +71 100 evaluator """rankbased""" +72 1 model.output_channels 60.0 +72 1 model.input_dropout 0.24762830104134242 +72 1 model.output_dropout 0.13158733475349366 +72 1 model.feature_map_dropout 0.28424759168878005 +72 1 model.embedding_dim 2.0 +72 1 training.batch_size 1.0 +72 1 training.label_smoothing 0.1143284395937063 +72 2 model.output_channels 42.0 +72 2 model.input_dropout 0.31437413601163045 +72 2 model.output_dropout 0.10271851358302386 +72 2 model.feature_map_dropout 0.23629656705751495 +72 2 model.embedding_dim 0.0 +72 2 training.batch_size 0.0 +72 2 training.label_smoothing 0.08948570741542945 +72 3 model.output_channels 33.0 +72 3 model.input_dropout 0.4876627748477546 +72 3 model.output_dropout 0.1042203231836607 +72 3 model.feature_map_dropout 0.011674174834855833 +72 3 model.embedding_dim 1.0 +72 3 training.batch_size 2.0 +72 3 training.label_smoothing 0.6069843093816373 +72 4 model.output_channels 61.0 +72 4 model.input_dropout 0.17427585777744392 +72 4 model.output_dropout 0.32749511001430687 +72 4 model.feature_map_dropout 0.3609310089953135 +72 4 model.embedding_dim 1.0 +72 4 training.batch_size 1.0 +72 4 training.label_smoothing 0.029954577862488934 +72 5 model.output_channels 31.0 +72 5 model.input_dropout 0.2748837245717051 +72 5 model.output_dropout 0.3369139456669403 +72 5 model.feature_map_dropout 0.03379862834516212 +72 5 model.embedding_dim 0.0 +72 5 training.batch_size 1.0 +72 5 training.label_smoothing 0.08303157540269195 +72 6 model.output_channels 41.0 +72 6 model.input_dropout 0.47506004818426917 +72 6 model.output_dropout 0.0006493438593556666 +72 6 model.feature_map_dropout 0.1707780803744809 +72 6 model.embedding_dim 2.0 +72 6 training.batch_size 0.0 +72 6 training.label_smoothing 0.10192657539413022 +72 7 model.output_channels 41.0 +72 7 model.input_dropout 0.31972553808869586 +72 7 model.output_dropout 0.010210046955758834 +72 7 model.feature_map_dropout 0.1033472122077373 +72 7 model.embedding_dim 0.0 +72 7 training.batch_size 0.0 +72 7 training.label_smoothing 0.034714328490900094 +72 8 model.output_channels 61.0 +72 8 model.input_dropout 0.39037753350060683 +72 8 model.output_dropout 0.1705877103393732 +72 8 model.feature_map_dropout 0.3115214555911363 +72 8 model.embedding_dim 2.0 +72 8 training.batch_size 0.0 +72 8 training.label_smoothing 0.21820299145548644 +72 9 model.output_channels 39.0 +72 9 model.input_dropout 0.3534625643215745 +72 9 model.output_dropout 0.421002797844098 +72 9 model.feature_map_dropout 0.23995425763686618 +72 9 model.embedding_dim 1.0 +72 9 training.batch_size 2.0 +72 9 training.label_smoothing 0.15931120144086378 +72 10 model.output_channels 30.0 +72 10 model.input_dropout 0.15730669862426805 +72 10 model.output_dropout 0.07826352564065725 +72 10 model.feature_map_dropout 0.4735355094302863 +72 10 model.embedding_dim 1.0 +72 10 training.batch_size 1.0 +72 10 training.label_smoothing 0.2905426136494123 +72 11 model.output_channels 41.0 +72 11 model.input_dropout 0.262060842801769 +72 11 model.output_dropout 0.09158041797718647 +72 11 model.feature_map_dropout 0.05493048697529529 +72 11 model.embedding_dim 2.0 +72 11 training.batch_size 0.0 +72 11 training.label_smoothing 0.005903350157186056 +72 12 model.output_channels 29.0 +72 12 model.input_dropout 0.11929067990358949 +72 12 model.output_dropout 0.19236600909765278 +72 12 model.feature_map_dropout 0.1275915249130256 +72 12 model.embedding_dim 0.0 +72 12 training.batch_size 1.0 +72 12 training.label_smoothing 0.002205549660699455 +72 13 model.output_channels 58.0 +72 13 model.input_dropout 0.3096918629405931 +72 13 model.output_dropout 0.11281510023081903 +72 13 model.feature_map_dropout 0.21849242553283982 +72 13 model.embedding_dim 0.0 +72 13 training.batch_size 2.0 +72 13 training.label_smoothing 0.5325455505629848 +72 14 model.output_channels 63.0 +72 14 model.input_dropout 0.28086326140242707 +72 14 model.output_dropout 0.053788154959443335 +72 14 model.feature_map_dropout 0.2964197279130988 +72 14 model.embedding_dim 1.0 +72 14 training.batch_size 0.0 +72 14 training.label_smoothing 0.002571537335959417 +72 15 model.output_channels 64.0 +72 15 model.input_dropout 0.2531941107794955 +72 15 model.output_dropout 0.3189112432944273 +72 15 model.feature_map_dropout 0.3408045115336037 +72 15 model.embedding_dim 2.0 +72 15 training.batch_size 2.0 +72 15 training.label_smoothing 0.03438471262853712 +72 16 model.output_channels 27.0 +72 16 model.input_dropout 0.30566427097293186 +72 16 model.output_dropout 0.04348572571548398 +72 16 model.feature_map_dropout 0.2204575348399847 +72 16 model.embedding_dim 2.0 +72 16 training.batch_size 1.0 +72 16 training.label_smoothing 0.04573113679352836 +72 17 model.output_channels 63.0 +72 17 model.input_dropout 0.31153699323571893 +72 17 model.output_dropout 0.37186957044305036 +72 17 model.feature_map_dropout 0.3999626975331203 +72 17 model.embedding_dim 2.0 +72 17 training.batch_size 2.0 +72 17 training.label_smoothing 0.05749155445084669 +72 18 model.output_channels 37.0 +72 18 model.input_dropout 0.0851165709686838 +72 18 model.output_dropout 0.3088168629808132 +72 18 model.feature_map_dropout 0.17459715239082052 +72 18 model.embedding_dim 2.0 +72 18 training.batch_size 0.0 +72 18 training.label_smoothing 0.00281593857372914 +72 19 model.output_channels 55.0 +72 19 model.input_dropout 0.2350023838266867 +72 19 model.output_dropout 0.08752270979107857 +72 19 model.feature_map_dropout 0.4630553244530373 +72 19 model.embedding_dim 1.0 +72 19 training.batch_size 1.0 +72 19 training.label_smoothing 0.045404337532030326 +72 20 model.output_channels 47.0 +72 20 model.input_dropout 0.31569539953806014 +72 20 model.output_dropout 0.4052558134496171 +72 20 model.feature_map_dropout 0.27667866340248826 +72 20 model.embedding_dim 1.0 +72 20 training.batch_size 2.0 +72 20 training.label_smoothing 0.24161494255187474 +72 21 model.output_channels 51.0 +72 21 model.input_dropout 0.13813680005925766 +72 21 model.output_dropout 0.21086035305540485 +72 21 model.feature_map_dropout 0.14093320245915147 +72 21 model.embedding_dim 2.0 +72 21 training.batch_size 0.0 +72 21 training.label_smoothing 0.9720272120677375 +72 22 model.output_channels 45.0 +72 22 model.input_dropout 0.13680781342773507 +72 22 model.output_dropout 0.279924806164286 +72 22 model.feature_map_dropout 0.08340262614065647 +72 22 model.embedding_dim 0.0 +72 22 training.batch_size 1.0 +72 22 training.label_smoothing 0.19107468899395064 +72 23 model.output_channels 28.0 +72 23 model.input_dropout 0.39465438957048293 +72 23 model.output_dropout 0.33755772740850376 +72 23 model.feature_map_dropout 0.04592216124263998 +72 23 model.embedding_dim 0.0 +72 23 training.batch_size 1.0 +72 23 training.label_smoothing 0.5614063655643239 +72 24 model.output_channels 17.0 +72 24 model.input_dropout 0.04615614406638824 +72 24 model.output_dropout 0.010446317034828845 +72 24 model.feature_map_dropout 0.1583060462943831 +72 24 model.embedding_dim 0.0 +72 24 training.batch_size 0.0 +72 24 training.label_smoothing 0.09682835728975213 +72 25 model.output_channels 43.0 +72 25 model.input_dropout 0.16802842866411039 +72 25 model.output_dropout 0.3439214814553211 +72 25 model.feature_map_dropout 0.0045006459948597755 +72 25 model.embedding_dim 1.0 +72 25 training.batch_size 1.0 +72 25 training.label_smoothing 0.0013070637755205536 +72 26 model.output_channels 26.0 +72 26 model.input_dropout 0.34209791824139013 +72 26 model.output_dropout 0.20794239417781651 +72 26 model.feature_map_dropout 0.49805216977114136 +72 26 model.embedding_dim 1.0 +72 26 training.batch_size 2.0 +72 26 training.label_smoothing 0.003545422984653292 +72 27 model.output_channels 61.0 +72 27 model.input_dropout 0.32197675453148006 +72 27 model.output_dropout 0.44365627542958175 +72 27 model.feature_map_dropout 0.4136023090709836 +72 27 model.embedding_dim 0.0 +72 27 training.batch_size 2.0 +72 27 training.label_smoothing 0.058049305680618155 +72 28 model.output_channels 21.0 +72 28 model.input_dropout 0.1797491664568988 +72 28 model.output_dropout 0.20022911331417065 +72 28 model.feature_map_dropout 0.29523451300137077 +72 28 model.embedding_dim 2.0 +72 28 training.batch_size 0.0 +72 28 training.label_smoothing 0.001936923364525454 +72 29 model.output_channels 42.0 +72 29 model.input_dropout 0.031600783683266576 +72 29 model.output_dropout 0.45081706460582555 +72 29 model.feature_map_dropout 0.475611242692851 +72 29 model.embedding_dim 0.0 +72 29 training.batch_size 1.0 +72 29 training.label_smoothing 0.45953827078259635 +72 30 model.output_channels 22.0 +72 30 model.input_dropout 0.2531332879550084 +72 30 model.output_dropout 0.2631185436660078 +72 30 model.feature_map_dropout 0.3854911507123895 +72 30 model.embedding_dim 1.0 +72 30 training.batch_size 0.0 +72 30 training.label_smoothing 0.007987424442831017 +72 31 model.output_channels 64.0 +72 31 model.input_dropout 0.20676061714954536 +72 31 model.output_dropout 0.13007259567684704 +72 31 model.feature_map_dropout 0.3456698937543504 +72 31 model.embedding_dim 2.0 +72 31 training.batch_size 2.0 +72 31 training.label_smoothing 0.138001296046591 +72 32 model.output_channels 18.0 +72 32 model.input_dropout 0.40290579234626084 +72 32 model.output_dropout 0.0822096465912493 +72 32 model.feature_map_dropout 0.40147445472712917 +72 32 model.embedding_dim 2.0 +72 32 training.batch_size 2.0 +72 32 training.label_smoothing 0.05609993679538131 +72 33 model.output_channels 46.0 +72 33 model.input_dropout 0.0577735159804143 +72 33 model.output_dropout 0.34108936095767944 +72 33 model.feature_map_dropout 0.06763712488022511 +72 33 model.embedding_dim 0.0 +72 33 training.batch_size 2.0 +72 33 training.label_smoothing 0.02378004008548364 +72 34 model.output_channels 35.0 +72 34 model.input_dropout 0.4828553776318171 +72 34 model.output_dropout 0.1514564683754055 +72 34 model.feature_map_dropout 0.0351393421089633 +72 34 model.embedding_dim 0.0 +72 34 training.batch_size 0.0 +72 34 training.label_smoothing 0.003412893684802747 +72 35 model.output_channels 40.0 +72 35 model.input_dropout 0.10354409861520603 +72 35 model.output_dropout 0.21524243391364378 +72 35 model.feature_map_dropout 0.08111928638797372 +72 35 model.embedding_dim 0.0 +72 35 training.batch_size 2.0 +72 35 training.label_smoothing 0.3401354345381073 +72 36 model.output_channels 25.0 +72 36 model.input_dropout 0.4743411130259596 +72 36 model.output_dropout 0.019383512119764745 +72 36 model.feature_map_dropout 0.04678517224409323 +72 36 model.embedding_dim 1.0 +72 36 training.batch_size 2.0 +72 36 training.label_smoothing 0.005189770557229865 +72 37 model.output_channels 22.0 +72 37 model.input_dropout 0.35137125022813287 +72 37 model.output_dropout 0.2898503396888417 +72 37 model.feature_map_dropout 0.08417202031712478 +72 37 model.embedding_dim 2.0 +72 37 training.batch_size 0.0 +72 37 training.label_smoothing 0.0068817083502980885 +72 38 model.output_channels 53.0 +72 38 model.input_dropout 0.41589207398376243 +72 38 model.output_dropout 0.35048051423752696 +72 38 model.feature_map_dropout 0.25323786829516126 +72 38 model.embedding_dim 0.0 +72 38 training.batch_size 0.0 +72 38 training.label_smoothing 0.0015543284587625345 +72 39 model.output_channels 28.0 +72 39 model.input_dropout 0.25828230372902605 +72 39 model.output_dropout 0.47044228121235143 +72 39 model.feature_map_dropout 0.1834470776757487 +72 39 model.embedding_dim 1.0 +72 39 training.batch_size 1.0 +72 39 training.label_smoothing 0.03277058241375811 +72 40 model.output_channels 28.0 +72 40 model.input_dropout 0.3472228688452792 +72 40 model.output_dropout 0.09518079846280492 +72 40 model.feature_map_dropout 0.035947840633887285 +72 40 model.embedding_dim 2.0 +72 40 training.batch_size 0.0 +72 40 training.label_smoothing 0.4319781717441211 +72 41 model.output_channels 38.0 +72 41 model.input_dropout 0.4161928705832942 +72 41 model.output_dropout 0.016996905372786586 +72 41 model.feature_map_dropout 0.47059407834755107 +72 41 model.embedding_dim 0.0 +72 41 training.batch_size 2.0 +72 41 training.label_smoothing 0.1416406626732198 +72 42 model.output_channels 48.0 +72 42 model.input_dropout 0.3810613783217821 +72 42 model.output_dropout 0.40384795076162755 +72 42 model.feature_map_dropout 0.47015277777974696 +72 42 model.embedding_dim 1.0 +72 42 training.batch_size 2.0 +72 42 training.label_smoothing 0.009969898470185062 +72 43 model.output_channels 45.0 +72 43 model.input_dropout 0.4178022974827072 +72 43 model.output_dropout 0.47171139439156345 +72 43 model.feature_map_dropout 0.07358884560972423 +72 43 model.embedding_dim 2.0 +72 43 training.batch_size 1.0 +72 43 training.label_smoothing 0.04009146570403488 +72 44 model.output_channels 37.0 +72 44 model.input_dropout 0.28226907156381753 +72 44 model.output_dropout 0.2998573338440612 +72 44 model.feature_map_dropout 0.3879633222663921 +72 44 model.embedding_dim 1.0 +72 44 training.batch_size 1.0 +72 44 training.label_smoothing 0.001938268030006357 +72 45 model.output_channels 41.0 +72 45 model.input_dropout 0.4378331324711744 +72 45 model.output_dropout 0.24047303686385962 +72 45 model.feature_map_dropout 0.3969648679581999 +72 45 model.embedding_dim 2.0 +72 45 training.batch_size 0.0 +72 45 training.label_smoothing 0.01330355071609358 +72 46 model.output_channels 30.0 +72 46 model.input_dropout 0.027806404018255404 +72 46 model.output_dropout 0.048949048937362294 +72 46 model.feature_map_dropout 0.10979319412614974 +72 46 model.embedding_dim 1.0 +72 46 training.batch_size 0.0 +72 46 training.label_smoothing 0.17788299376706795 +72 47 model.output_channels 43.0 +72 47 model.input_dropout 0.20302304447342617 +72 47 model.output_dropout 0.37273144505752726 +72 47 model.feature_map_dropout 0.29733868539216035 +72 47 model.embedding_dim 1.0 +72 47 training.batch_size 1.0 +72 47 training.label_smoothing 0.0012857843883811665 +72 48 model.output_channels 25.0 +72 48 model.input_dropout 0.31716368297723035 +72 48 model.output_dropout 0.48728701140933683 +72 48 model.feature_map_dropout 0.05855139728080733 +72 48 model.embedding_dim 2.0 +72 48 training.batch_size 2.0 +72 48 training.label_smoothing 0.38770781722400466 +72 49 model.output_channels 24.0 +72 49 model.input_dropout 0.07870044982362412 +72 49 model.output_dropout 0.42630355584571694 +72 49 model.feature_map_dropout 0.3692168256929614 +72 49 model.embedding_dim 1.0 +72 49 training.batch_size 1.0 +72 49 training.label_smoothing 0.09221481469865588 +72 50 model.output_channels 47.0 +72 50 model.input_dropout 0.429339427409048 +72 50 model.output_dropout 0.38488741259930853 +72 50 model.feature_map_dropout 0.20486081854241184 +72 50 model.embedding_dim 2.0 +72 50 training.batch_size 2.0 +72 50 training.label_smoothing 0.21368039293662405 +72 51 model.output_channels 23.0 +72 51 model.input_dropout 0.2987273534301035 +72 51 model.output_dropout 0.38814391910655827 +72 51 model.feature_map_dropout 0.25176928475635135 +72 51 model.embedding_dim 2.0 +72 51 training.batch_size 0.0 +72 51 training.label_smoothing 0.005477383855686233 +72 52 model.output_channels 56.0 +72 52 model.input_dropout 0.23761613437214735 +72 52 model.output_dropout 0.05062093714967991 +72 52 model.feature_map_dropout 0.0013267631124228574 +72 52 model.embedding_dim 0.0 +72 52 training.batch_size 1.0 +72 52 training.label_smoothing 0.04476112403689625 +72 53 model.output_channels 48.0 +72 53 model.input_dropout 0.3971699142786119 +72 53 model.output_dropout 0.4756040627664161 +72 53 model.feature_map_dropout 0.021885033769318996 +72 53 model.embedding_dim 0.0 +72 53 training.batch_size 0.0 +72 53 training.label_smoothing 0.02678918272666824 +72 54 model.output_channels 43.0 +72 54 model.input_dropout 0.11621585653934108 +72 54 model.output_dropout 0.21890955278220603 +72 54 model.feature_map_dropout 0.4610050353017359 +72 54 model.embedding_dim 2.0 +72 54 training.batch_size 1.0 +72 54 training.label_smoothing 0.5687251722550525 +72 55 model.output_channels 45.0 +72 55 model.input_dropout 0.2534863032110035 +72 55 model.output_dropout 0.28975945997172525 +72 55 model.feature_map_dropout 0.003670561898805602 +72 55 model.embedding_dim 1.0 +72 55 training.batch_size 2.0 +72 55 training.label_smoothing 0.25612911684957684 +72 56 model.output_channels 50.0 +72 56 model.input_dropout 0.3163278372394345 +72 56 model.output_dropout 0.2377355168229212 +72 56 model.feature_map_dropout 0.07682668573246859 +72 56 model.embedding_dim 2.0 +72 56 training.batch_size 0.0 +72 56 training.label_smoothing 0.5396421349776537 +72 57 model.output_channels 29.0 +72 57 model.input_dropout 0.2101527471188751 +72 57 model.output_dropout 0.2821790903934697 +72 57 model.feature_map_dropout 0.0006813472205690618 +72 57 model.embedding_dim 0.0 +72 57 training.batch_size 2.0 +72 57 training.label_smoothing 0.002574678897991262 +72 58 model.output_channels 25.0 +72 58 model.input_dropout 0.47417793608037345 +72 58 model.output_dropout 0.20151597077484174 +72 58 model.feature_map_dropout 0.39773881061772226 +72 58 model.embedding_dim 2.0 +72 58 training.batch_size 2.0 +72 58 training.label_smoothing 0.8107562764809213 +72 59 model.output_channels 40.0 +72 59 model.input_dropout 0.17438939268606157 +72 59 model.output_dropout 0.42279570183152826 +72 59 model.feature_map_dropout 0.3233092896027983 +72 59 model.embedding_dim 2.0 +72 59 training.batch_size 0.0 +72 59 training.label_smoothing 0.1396504383737993 +72 60 model.output_channels 47.0 +72 60 model.input_dropout 0.39878729927760725 +72 60 model.output_dropout 0.4686329597745901 +72 60 model.feature_map_dropout 0.030751533225493233 +72 60 model.embedding_dim 2.0 +72 60 training.batch_size 0.0 +72 60 training.label_smoothing 0.22389885615967597 +72 61 model.output_channels 41.0 +72 61 model.input_dropout 0.3134195288906331 +72 61 model.output_dropout 0.04872828658464806 +72 61 model.feature_map_dropout 0.10238992457460927 +72 61 model.embedding_dim 0.0 +72 61 training.batch_size 2.0 +72 61 training.label_smoothing 0.010660468627731208 +72 62 model.output_channels 64.0 +72 62 model.input_dropout 0.061333573671723485 +72 62 model.output_dropout 0.239858346288722 +72 62 model.feature_map_dropout 0.1926640877373217 +72 62 model.embedding_dim 0.0 +72 62 training.batch_size 2.0 +72 62 training.label_smoothing 0.013177710128400775 +72 63 model.output_channels 58.0 +72 63 model.input_dropout 0.24282621780077035 +72 63 model.output_dropout 0.31898027567097503 +72 63 model.feature_map_dropout 0.2590457373849884 +72 63 model.embedding_dim 1.0 +72 63 training.batch_size 1.0 +72 63 training.label_smoothing 0.008468520312948702 +72 64 model.output_channels 40.0 +72 64 model.input_dropout 0.17572308729376518 +72 64 model.output_dropout 0.005656340050055331 +72 64 model.feature_map_dropout 0.3842563739095643 +72 64 model.embedding_dim 2.0 +72 64 training.batch_size 0.0 +72 64 training.label_smoothing 0.017991267149490734 +72 65 model.output_channels 63.0 +72 65 model.input_dropout 0.2373993701406535 +72 65 model.output_dropout 0.008066685838172494 +72 65 model.feature_map_dropout 0.4899357131713974 +72 65 model.embedding_dim 0.0 +72 65 training.batch_size 1.0 +72 65 training.label_smoothing 0.0024328425476288396 +72 66 model.output_channels 18.0 +72 66 model.input_dropout 0.34035559817714606 +72 66 model.output_dropout 0.46295142121838445 +72 66 model.feature_map_dropout 0.03101626392386425 +72 66 model.embedding_dim 1.0 +72 66 training.batch_size 0.0 +72 66 training.label_smoothing 0.0034212589761907592 +72 67 model.output_channels 32.0 +72 67 model.input_dropout 0.009348735683005427 +72 67 model.output_dropout 0.26721913838521477 +72 67 model.feature_map_dropout 0.23597895295232035 +72 67 model.embedding_dim 2.0 +72 67 training.batch_size 0.0 +72 67 training.label_smoothing 0.0015042367634686468 +72 68 model.output_channels 57.0 +72 68 model.input_dropout 0.3498830191677591 +72 68 model.output_dropout 0.42574392996209565 +72 68 model.feature_map_dropout 0.29393910363570697 +72 68 model.embedding_dim 2.0 +72 68 training.batch_size 1.0 +72 68 training.label_smoothing 0.001698291040396151 +72 69 model.output_channels 21.0 +72 69 model.input_dropout 0.17209736532370412 +72 69 model.output_dropout 0.02643271580190465 +72 69 model.feature_map_dropout 0.06660271992961198 +72 69 model.embedding_dim 0.0 +72 69 training.batch_size 2.0 +72 69 training.label_smoothing 0.004151830910129421 +72 70 model.output_channels 51.0 +72 70 model.input_dropout 0.21870391427032326 +72 70 model.output_dropout 0.3493607010890556 +72 70 model.feature_map_dropout 0.35595322997175555 +72 70 model.embedding_dim 0.0 +72 70 training.batch_size 1.0 +72 70 training.label_smoothing 0.6276290990809726 +72 71 model.output_channels 30.0 +72 71 model.input_dropout 0.3578784904213155 +72 71 model.output_dropout 0.3515301892391468 +72 71 model.feature_map_dropout 0.09717519249185214 +72 71 model.embedding_dim 1.0 +72 71 training.batch_size 0.0 +72 71 training.label_smoothing 0.10781601188741756 +72 72 model.output_channels 59.0 +72 72 model.input_dropout 0.20840990345046345 +72 72 model.output_dropout 0.16558561978756603 +72 72 model.feature_map_dropout 0.13171533564926646 +72 72 model.embedding_dim 0.0 +72 72 training.batch_size 1.0 +72 72 training.label_smoothing 0.016424937714158757 +72 73 model.output_channels 47.0 +72 73 model.input_dropout 0.43188570464561943 +72 73 model.output_dropout 0.13043500614415132 +72 73 model.feature_map_dropout 0.4887218036802904 +72 73 model.embedding_dim 2.0 +72 73 training.batch_size 0.0 +72 73 training.label_smoothing 0.8274455272712001 +72 74 model.output_channels 61.0 +72 74 model.input_dropout 0.3469962976165721 +72 74 model.output_dropout 0.40892240931053503 +72 74 model.feature_map_dropout 0.42417513846704036 +72 74 model.embedding_dim 0.0 +72 74 training.batch_size 2.0 +72 74 training.label_smoothing 0.0014247662935073927 +72 75 model.output_channels 46.0 +72 75 model.input_dropout 0.15199494633098304 +72 75 model.output_dropout 0.4308483282629306 +72 75 model.feature_map_dropout 0.0808765101743501 +72 75 model.embedding_dim 2.0 +72 75 training.batch_size 2.0 +72 75 training.label_smoothing 0.11013991998447893 +72 76 model.output_channels 44.0 +72 76 model.input_dropout 0.12332842719029652 +72 76 model.output_dropout 0.13312222498486043 +72 76 model.feature_map_dropout 0.25935249389102727 +72 76 model.embedding_dim 1.0 +72 76 training.batch_size 0.0 +72 76 training.label_smoothing 0.002120049428754663 +72 77 model.output_channels 59.0 +72 77 model.input_dropout 0.3558733583419428 +72 77 model.output_dropout 0.33805741262680344 +72 77 model.feature_map_dropout 0.3887839980602819 +72 77 model.embedding_dim 1.0 +72 77 training.batch_size 2.0 +72 77 training.label_smoothing 0.029946230976797408 +72 78 model.output_channels 31.0 +72 78 model.input_dropout 0.3602655838516747 +72 78 model.output_dropout 0.11836836326534383 +72 78 model.feature_map_dropout 0.3048371698471272 +72 78 model.embedding_dim 1.0 +72 78 training.batch_size 0.0 +72 78 training.label_smoothing 0.002075837863534892 +72 79 model.output_channels 53.0 +72 79 model.input_dropout 0.34442789783634464 +72 79 model.output_dropout 0.14840051503659873 +72 79 model.feature_map_dropout 0.43951414091659186 +72 79 model.embedding_dim 0.0 +72 79 training.batch_size 0.0 +72 79 training.label_smoothing 0.047571321136126034 +72 80 model.output_channels 28.0 +72 80 model.input_dropout 0.35664826257913584 +72 80 model.output_dropout 0.06843793789700742 +72 80 model.feature_map_dropout 0.25455040968427095 +72 80 model.embedding_dim 2.0 +72 80 training.batch_size 1.0 +72 80 training.label_smoothing 0.7318758856094558 +72 81 model.output_channels 29.0 +72 81 model.input_dropout 0.14293726353517805 +72 81 model.output_dropout 0.05797719321023881 +72 81 model.feature_map_dropout 0.19257933759065482 +72 81 model.embedding_dim 1.0 +72 81 training.batch_size 2.0 +72 81 training.label_smoothing 0.0051291809248020575 +72 82 model.output_channels 32.0 +72 82 model.input_dropout 0.2276646974627512 +72 82 model.output_dropout 0.013375289563765103 +72 82 model.feature_map_dropout 0.48189672906585773 +72 82 model.embedding_dim 0.0 +72 82 training.batch_size 1.0 +72 82 training.label_smoothing 0.019165364029274615 +72 83 model.output_channels 18.0 +72 83 model.input_dropout 0.4717339704112442 +72 83 model.output_dropout 0.30205757049053683 +72 83 model.feature_map_dropout 0.15831288116002018 +72 83 model.embedding_dim 1.0 +72 83 training.batch_size 0.0 +72 83 training.label_smoothing 0.001220739723898895 +72 84 model.output_channels 38.0 +72 84 model.input_dropout 0.13768286831342835 +72 84 model.output_dropout 0.1601138537715413 +72 84 model.feature_map_dropout 0.00778627335343951 +72 84 model.embedding_dim 1.0 +72 84 training.batch_size 2.0 +72 84 training.label_smoothing 0.49272702585488976 +72 85 model.output_channels 36.0 +72 85 model.input_dropout 0.4270571276734811 +72 85 model.output_dropout 0.10441461908883343 +72 85 model.feature_map_dropout 0.4930638420222771 +72 85 model.embedding_dim 2.0 +72 85 training.batch_size 2.0 +72 85 training.label_smoothing 0.0034240799788530576 +72 86 model.output_channels 37.0 +72 86 model.input_dropout 0.01044806267000592 +72 86 model.output_dropout 0.4424720692469424 +72 86 model.feature_map_dropout 0.3198305544790442 +72 86 model.embedding_dim 2.0 +72 86 training.batch_size 2.0 +72 86 training.label_smoothing 0.01634314366148987 +72 87 model.output_channels 17.0 +72 87 model.input_dropout 0.432716771007563 +72 87 model.output_dropout 0.2691418880205957 +72 87 model.feature_map_dropout 0.4966115361461049 +72 87 model.embedding_dim 0.0 +72 87 training.batch_size 2.0 +72 87 training.label_smoothing 0.022057804376882945 +72 88 model.output_channels 24.0 +72 88 model.input_dropout 0.444518886234637 +72 88 model.output_dropout 0.09392096609702172 +72 88 model.feature_map_dropout 0.355531503774198 +72 88 model.embedding_dim 2.0 +72 88 training.batch_size 2.0 +72 88 training.label_smoothing 0.02160495477919267 +72 89 model.output_channels 31.0 +72 89 model.input_dropout 0.4180601788429365 +72 89 model.output_dropout 0.00018950362786107622 +72 89 model.feature_map_dropout 0.4456834053565262 +72 89 model.embedding_dim 1.0 +72 89 training.batch_size 0.0 +72 89 training.label_smoothing 0.06950103955123149 +72 90 model.output_channels 30.0 +72 90 model.input_dropout 0.47505754987697546 +72 90 model.output_dropout 0.04436179589830325 +72 90 model.feature_map_dropout 0.4781256156903761 +72 90 model.embedding_dim 2.0 +72 90 training.batch_size 2.0 +72 90 training.label_smoothing 0.07318608344594238 +72 91 model.output_channels 64.0 +72 91 model.input_dropout 0.26051859314695264 +72 91 model.output_dropout 0.12491957387594493 +72 91 model.feature_map_dropout 0.2504445403450917 +72 91 model.embedding_dim 0.0 +72 91 training.batch_size 0.0 +72 91 training.label_smoothing 0.30698894294843687 +72 92 model.output_channels 19.0 +72 92 model.input_dropout 0.48365136262481784 +72 92 model.output_dropout 0.19243830128464728 +72 92 model.feature_map_dropout 0.04049017205588884 +72 92 model.embedding_dim 1.0 +72 92 training.batch_size 2.0 +72 92 training.label_smoothing 0.0038259788441929767 +72 93 model.output_channels 27.0 +72 93 model.input_dropout 0.20952309547411274 +72 93 model.output_dropout 0.08883029159339478 +72 93 model.feature_map_dropout 0.028552052790209836 +72 93 model.embedding_dim 2.0 +72 93 training.batch_size 1.0 +72 93 training.label_smoothing 0.004040936830435599 +72 94 model.output_channels 51.0 +72 94 model.input_dropout 0.13829324361141643 +72 94 model.output_dropout 0.058088206349466376 +72 94 model.feature_map_dropout 0.0912806417114222 +72 94 model.embedding_dim 2.0 +72 94 training.batch_size 1.0 +72 94 training.label_smoothing 0.004165910212375634 +72 95 model.output_channels 19.0 +72 95 model.input_dropout 0.0758224610484311 +72 95 model.output_dropout 0.4446778026645051 +72 95 model.feature_map_dropout 0.291141092557753 +72 95 model.embedding_dim 2.0 +72 95 training.batch_size 2.0 +72 95 training.label_smoothing 0.007418754569740878 +72 96 model.output_channels 60.0 +72 96 model.input_dropout 0.43686108462976564 +72 96 model.output_dropout 0.47651786632574644 +72 96 model.feature_map_dropout 0.2172683952745439 +72 96 model.embedding_dim 1.0 +72 96 training.batch_size 1.0 +72 96 training.label_smoothing 0.0012049283666970942 +72 97 model.output_channels 52.0 +72 97 model.input_dropout 0.25086579614382676 +72 97 model.output_dropout 0.14319417669781148 +72 97 model.feature_map_dropout 0.4564155114348263 +72 97 model.embedding_dim 1.0 +72 97 training.batch_size 0.0 +72 97 training.label_smoothing 0.04806835642791511 +72 98 model.output_channels 59.0 +72 98 model.input_dropout 0.4180591021494527 +72 98 model.output_dropout 0.22759374670125726 +72 98 model.feature_map_dropout 0.0034664241776851124 +72 98 model.embedding_dim 2.0 +72 98 training.batch_size 1.0 +72 98 training.label_smoothing 0.0035218728403781964 +72 99 model.output_channels 49.0 +72 99 model.input_dropout 0.2715190500182867 +72 99 model.output_dropout 0.48707557396772166 +72 99 model.feature_map_dropout 0.05385205263709175 +72 99 model.embedding_dim 2.0 +72 99 training.batch_size 1.0 +72 99 training.label_smoothing 0.6599785137031018 +72 100 model.output_channels 48.0 +72 100 model.input_dropout 0.2553266296942259 +72 100 model.output_dropout 0.4756789024126295 +72 100 model.feature_map_dropout 0.08588475329843043 +72 100 model.embedding_dim 2.0 +72 100 training.batch_size 2.0 +72 100 training.label_smoothing 0.0011904342031248331 +72 1 dataset """kinships""" +72 1 model """conve""" +72 1 loss """bceaftersigmoid""" +72 1 regularizer """no""" +72 1 optimizer """adadelta""" +72 1 training_loop """lcwa""" +72 1 evaluator """rankbased""" +72 2 dataset """kinships""" +72 2 model """conve""" +72 2 loss """bceaftersigmoid""" +72 2 regularizer """no""" +72 2 optimizer """adadelta""" +72 2 training_loop """lcwa""" +72 2 evaluator """rankbased""" +72 3 dataset """kinships""" +72 3 model """conve""" +72 3 loss """bceaftersigmoid""" +72 3 regularizer """no""" +72 3 optimizer """adadelta""" +72 3 training_loop """lcwa""" +72 3 evaluator """rankbased""" +72 4 dataset """kinships""" +72 4 model """conve""" +72 4 loss """bceaftersigmoid""" +72 4 regularizer """no""" +72 4 optimizer """adadelta""" +72 4 training_loop """lcwa""" +72 4 evaluator """rankbased""" +72 5 dataset """kinships""" +72 5 model """conve""" +72 5 loss """bceaftersigmoid""" +72 5 regularizer """no""" +72 5 optimizer """adadelta""" +72 5 training_loop """lcwa""" +72 5 evaluator """rankbased""" +72 6 dataset """kinships""" +72 6 model """conve""" +72 6 loss """bceaftersigmoid""" +72 6 regularizer """no""" +72 6 optimizer """adadelta""" +72 6 training_loop """lcwa""" +72 6 evaluator """rankbased""" +72 7 dataset """kinships""" +72 7 model """conve""" +72 7 loss """bceaftersigmoid""" +72 7 regularizer """no""" +72 7 optimizer """adadelta""" +72 7 training_loop """lcwa""" +72 7 evaluator """rankbased""" +72 8 dataset """kinships""" +72 8 model """conve""" +72 8 loss """bceaftersigmoid""" +72 8 regularizer """no""" +72 8 optimizer """adadelta""" +72 8 training_loop """lcwa""" +72 8 evaluator """rankbased""" +72 9 dataset """kinships""" +72 9 model """conve""" +72 9 loss """bceaftersigmoid""" +72 9 regularizer """no""" +72 9 optimizer """adadelta""" +72 9 training_loop """lcwa""" +72 9 evaluator """rankbased""" +72 10 dataset """kinships""" +72 10 model """conve""" +72 10 loss """bceaftersigmoid""" +72 10 regularizer """no""" +72 10 optimizer """adadelta""" +72 10 training_loop """lcwa""" +72 10 evaluator """rankbased""" +72 11 dataset """kinships""" +72 11 model """conve""" +72 11 loss """bceaftersigmoid""" +72 11 regularizer """no""" +72 11 optimizer """adadelta""" +72 11 training_loop """lcwa""" +72 11 evaluator """rankbased""" +72 12 dataset """kinships""" +72 12 model """conve""" +72 12 loss """bceaftersigmoid""" +72 12 regularizer """no""" +72 12 optimizer """adadelta""" +72 12 training_loop """lcwa""" +72 12 evaluator """rankbased""" +72 13 dataset """kinships""" +72 13 model """conve""" +72 13 loss """bceaftersigmoid""" +72 13 regularizer """no""" +72 13 optimizer """adadelta""" +72 13 training_loop """lcwa""" +72 13 evaluator """rankbased""" +72 14 dataset """kinships""" +72 14 model """conve""" +72 14 loss """bceaftersigmoid""" +72 14 regularizer """no""" +72 14 optimizer """adadelta""" +72 14 training_loop """lcwa""" +72 14 evaluator """rankbased""" +72 15 dataset """kinships""" +72 15 model """conve""" +72 15 loss """bceaftersigmoid""" +72 15 regularizer """no""" +72 15 optimizer """adadelta""" +72 15 training_loop """lcwa""" +72 15 evaluator """rankbased""" +72 16 dataset """kinships""" +72 16 model """conve""" +72 16 loss """bceaftersigmoid""" +72 16 regularizer """no""" +72 16 optimizer """adadelta""" +72 16 training_loop """lcwa""" +72 16 evaluator """rankbased""" +72 17 dataset """kinships""" +72 17 model """conve""" +72 17 loss """bceaftersigmoid""" +72 17 regularizer """no""" +72 17 optimizer """adadelta""" +72 17 training_loop """lcwa""" +72 17 evaluator """rankbased""" +72 18 dataset """kinships""" +72 18 model """conve""" +72 18 loss """bceaftersigmoid""" +72 18 regularizer """no""" +72 18 optimizer """adadelta""" +72 18 training_loop """lcwa""" +72 18 evaluator """rankbased""" +72 19 dataset """kinships""" +72 19 model """conve""" +72 19 loss """bceaftersigmoid""" +72 19 regularizer """no""" +72 19 optimizer """adadelta""" +72 19 training_loop """lcwa""" +72 19 evaluator """rankbased""" +72 20 dataset """kinships""" +72 20 model """conve""" +72 20 loss """bceaftersigmoid""" +72 20 regularizer """no""" +72 20 optimizer """adadelta""" +72 20 training_loop """lcwa""" +72 20 evaluator """rankbased""" +72 21 dataset """kinships""" +72 21 model """conve""" +72 21 loss """bceaftersigmoid""" +72 21 regularizer """no""" +72 21 optimizer """adadelta""" +72 21 training_loop """lcwa""" +72 21 evaluator """rankbased""" +72 22 dataset """kinships""" +72 22 model """conve""" +72 22 loss """bceaftersigmoid""" +72 22 regularizer """no""" +72 22 optimizer """adadelta""" +72 22 training_loop """lcwa""" +72 22 evaluator """rankbased""" +72 23 dataset """kinships""" +72 23 model """conve""" +72 23 loss """bceaftersigmoid""" +72 23 regularizer """no""" +72 23 optimizer """adadelta""" +72 23 training_loop """lcwa""" +72 23 evaluator """rankbased""" +72 24 dataset """kinships""" +72 24 model """conve""" +72 24 loss """bceaftersigmoid""" +72 24 regularizer """no""" +72 24 optimizer """adadelta""" +72 24 training_loop """lcwa""" +72 24 evaluator """rankbased""" +72 25 dataset """kinships""" +72 25 model """conve""" +72 25 loss """bceaftersigmoid""" +72 25 regularizer """no""" +72 25 optimizer """adadelta""" +72 25 training_loop """lcwa""" +72 25 evaluator """rankbased""" +72 26 dataset """kinships""" +72 26 model """conve""" +72 26 loss """bceaftersigmoid""" +72 26 regularizer """no""" +72 26 optimizer """adadelta""" +72 26 training_loop """lcwa""" +72 26 evaluator """rankbased""" +72 27 dataset """kinships""" +72 27 model """conve""" +72 27 loss """bceaftersigmoid""" +72 27 regularizer """no""" +72 27 optimizer """adadelta""" +72 27 training_loop """lcwa""" +72 27 evaluator """rankbased""" +72 28 dataset """kinships""" +72 28 model """conve""" +72 28 loss """bceaftersigmoid""" +72 28 regularizer """no""" +72 28 optimizer """adadelta""" +72 28 training_loop """lcwa""" +72 28 evaluator """rankbased""" +72 29 dataset """kinships""" +72 29 model """conve""" +72 29 loss """bceaftersigmoid""" +72 29 regularizer """no""" +72 29 optimizer """adadelta""" +72 29 training_loop """lcwa""" +72 29 evaluator """rankbased""" +72 30 dataset """kinships""" +72 30 model """conve""" +72 30 loss """bceaftersigmoid""" +72 30 regularizer """no""" +72 30 optimizer """adadelta""" +72 30 training_loop """lcwa""" +72 30 evaluator """rankbased""" +72 31 dataset """kinships""" +72 31 model """conve""" +72 31 loss """bceaftersigmoid""" +72 31 regularizer """no""" +72 31 optimizer """adadelta""" +72 31 training_loop """lcwa""" +72 31 evaluator """rankbased""" +72 32 dataset """kinships""" +72 32 model """conve""" +72 32 loss """bceaftersigmoid""" +72 32 regularizer """no""" +72 32 optimizer """adadelta""" +72 32 training_loop """lcwa""" +72 32 evaluator """rankbased""" +72 33 dataset """kinships""" +72 33 model """conve""" +72 33 loss """bceaftersigmoid""" +72 33 regularizer """no""" +72 33 optimizer """adadelta""" +72 33 training_loop """lcwa""" +72 33 evaluator """rankbased""" +72 34 dataset """kinships""" +72 34 model """conve""" +72 34 loss """bceaftersigmoid""" +72 34 regularizer """no""" +72 34 optimizer """adadelta""" +72 34 training_loop """lcwa""" +72 34 evaluator """rankbased""" +72 35 dataset """kinships""" +72 35 model """conve""" +72 35 loss """bceaftersigmoid""" +72 35 regularizer """no""" +72 35 optimizer """adadelta""" +72 35 training_loop """lcwa""" +72 35 evaluator """rankbased""" +72 36 dataset """kinships""" +72 36 model """conve""" +72 36 loss """bceaftersigmoid""" +72 36 regularizer """no""" +72 36 optimizer """adadelta""" +72 36 training_loop """lcwa""" +72 36 evaluator """rankbased""" +72 37 dataset """kinships""" +72 37 model """conve""" +72 37 loss """bceaftersigmoid""" +72 37 regularizer """no""" +72 37 optimizer """adadelta""" +72 37 training_loop """lcwa""" +72 37 evaluator """rankbased""" +72 38 dataset """kinships""" +72 38 model """conve""" +72 38 loss """bceaftersigmoid""" +72 38 regularizer """no""" +72 38 optimizer """adadelta""" +72 38 training_loop """lcwa""" +72 38 evaluator """rankbased""" +72 39 dataset """kinships""" +72 39 model """conve""" +72 39 loss """bceaftersigmoid""" +72 39 regularizer """no""" +72 39 optimizer """adadelta""" +72 39 training_loop """lcwa""" +72 39 evaluator """rankbased""" +72 40 dataset """kinships""" +72 40 model """conve""" +72 40 loss """bceaftersigmoid""" +72 40 regularizer """no""" +72 40 optimizer """adadelta""" +72 40 training_loop """lcwa""" +72 40 evaluator """rankbased""" +72 41 dataset """kinships""" +72 41 model """conve""" +72 41 loss """bceaftersigmoid""" +72 41 regularizer """no""" +72 41 optimizer """adadelta""" +72 41 training_loop """lcwa""" +72 41 evaluator """rankbased""" +72 42 dataset """kinships""" +72 42 model """conve""" +72 42 loss """bceaftersigmoid""" +72 42 regularizer """no""" +72 42 optimizer """adadelta""" +72 42 training_loop """lcwa""" +72 42 evaluator """rankbased""" +72 43 dataset """kinships""" +72 43 model """conve""" +72 43 loss """bceaftersigmoid""" +72 43 regularizer """no""" +72 43 optimizer """adadelta""" +72 43 training_loop """lcwa""" +72 43 evaluator """rankbased""" +72 44 dataset """kinships""" +72 44 model """conve""" +72 44 loss """bceaftersigmoid""" +72 44 regularizer """no""" +72 44 optimizer """adadelta""" +72 44 training_loop """lcwa""" +72 44 evaluator """rankbased""" +72 45 dataset """kinships""" +72 45 model """conve""" +72 45 loss """bceaftersigmoid""" +72 45 regularizer """no""" +72 45 optimizer """adadelta""" +72 45 training_loop """lcwa""" +72 45 evaluator """rankbased""" +72 46 dataset """kinships""" +72 46 model """conve""" +72 46 loss """bceaftersigmoid""" +72 46 regularizer """no""" +72 46 optimizer """adadelta""" +72 46 training_loop """lcwa""" +72 46 evaluator """rankbased""" +72 47 dataset """kinships""" +72 47 model """conve""" +72 47 loss """bceaftersigmoid""" +72 47 regularizer """no""" +72 47 optimizer """adadelta""" +72 47 training_loop """lcwa""" +72 47 evaluator """rankbased""" +72 48 dataset """kinships""" +72 48 model """conve""" +72 48 loss """bceaftersigmoid""" +72 48 regularizer """no""" +72 48 optimizer """adadelta""" +72 48 training_loop """lcwa""" +72 48 evaluator """rankbased""" +72 49 dataset """kinships""" +72 49 model """conve""" +72 49 loss """bceaftersigmoid""" +72 49 regularizer """no""" +72 49 optimizer """adadelta""" +72 49 training_loop """lcwa""" +72 49 evaluator """rankbased""" +72 50 dataset """kinships""" +72 50 model """conve""" +72 50 loss """bceaftersigmoid""" +72 50 regularizer """no""" +72 50 optimizer """adadelta""" +72 50 training_loop """lcwa""" +72 50 evaluator """rankbased""" +72 51 dataset """kinships""" +72 51 model """conve""" +72 51 loss """bceaftersigmoid""" +72 51 regularizer """no""" +72 51 optimizer """adadelta""" +72 51 training_loop """lcwa""" +72 51 evaluator """rankbased""" +72 52 dataset """kinships""" +72 52 model """conve""" +72 52 loss """bceaftersigmoid""" +72 52 regularizer """no""" +72 52 optimizer """adadelta""" +72 52 training_loop """lcwa""" +72 52 evaluator """rankbased""" +72 53 dataset """kinships""" +72 53 model """conve""" +72 53 loss """bceaftersigmoid""" +72 53 regularizer """no""" +72 53 optimizer """adadelta""" +72 53 training_loop """lcwa""" +72 53 evaluator """rankbased""" +72 54 dataset """kinships""" +72 54 model """conve""" +72 54 loss """bceaftersigmoid""" +72 54 regularizer """no""" +72 54 optimizer """adadelta""" +72 54 training_loop """lcwa""" +72 54 evaluator """rankbased""" +72 55 dataset """kinships""" +72 55 model """conve""" +72 55 loss """bceaftersigmoid""" +72 55 regularizer """no""" +72 55 optimizer """adadelta""" +72 55 training_loop """lcwa""" +72 55 evaluator """rankbased""" +72 56 dataset """kinships""" +72 56 model """conve""" +72 56 loss """bceaftersigmoid""" +72 56 regularizer """no""" +72 56 optimizer """adadelta""" +72 56 training_loop """lcwa""" +72 56 evaluator """rankbased""" +72 57 dataset """kinships""" +72 57 model """conve""" +72 57 loss """bceaftersigmoid""" +72 57 regularizer """no""" +72 57 optimizer """adadelta""" +72 57 training_loop """lcwa""" +72 57 evaluator """rankbased""" +72 58 dataset """kinships""" +72 58 model """conve""" +72 58 loss """bceaftersigmoid""" +72 58 regularizer """no""" +72 58 optimizer """adadelta""" +72 58 training_loop """lcwa""" +72 58 evaluator """rankbased""" +72 59 dataset """kinships""" +72 59 model """conve""" +72 59 loss """bceaftersigmoid""" +72 59 regularizer """no""" +72 59 optimizer """adadelta""" +72 59 training_loop """lcwa""" +72 59 evaluator """rankbased""" +72 60 dataset """kinships""" +72 60 model """conve""" +72 60 loss """bceaftersigmoid""" +72 60 regularizer """no""" +72 60 optimizer """adadelta""" +72 60 training_loop """lcwa""" +72 60 evaluator """rankbased""" +72 61 dataset """kinships""" +72 61 model """conve""" +72 61 loss """bceaftersigmoid""" +72 61 regularizer """no""" +72 61 optimizer """adadelta""" +72 61 training_loop """lcwa""" +72 61 evaluator """rankbased""" +72 62 dataset """kinships""" +72 62 model """conve""" +72 62 loss """bceaftersigmoid""" +72 62 regularizer """no""" +72 62 optimizer """adadelta""" +72 62 training_loop """lcwa""" +72 62 evaluator """rankbased""" +72 63 dataset """kinships""" +72 63 model """conve""" +72 63 loss """bceaftersigmoid""" +72 63 regularizer """no""" +72 63 optimizer """adadelta""" +72 63 training_loop """lcwa""" +72 63 evaluator """rankbased""" +72 64 dataset """kinships""" +72 64 model """conve""" +72 64 loss """bceaftersigmoid""" +72 64 regularizer """no""" +72 64 optimizer """adadelta""" +72 64 training_loop """lcwa""" +72 64 evaluator """rankbased""" +72 65 dataset """kinships""" +72 65 model """conve""" +72 65 loss """bceaftersigmoid""" +72 65 regularizer """no""" +72 65 optimizer """adadelta""" +72 65 training_loop """lcwa""" +72 65 evaluator """rankbased""" +72 66 dataset """kinships""" +72 66 model """conve""" +72 66 loss """bceaftersigmoid""" +72 66 regularizer """no""" +72 66 optimizer """adadelta""" +72 66 training_loop """lcwa""" +72 66 evaluator """rankbased""" +72 67 dataset """kinships""" +72 67 model """conve""" +72 67 loss """bceaftersigmoid""" +72 67 regularizer """no""" +72 67 optimizer """adadelta""" +72 67 training_loop """lcwa""" +72 67 evaluator """rankbased""" +72 68 dataset """kinships""" +72 68 model """conve""" +72 68 loss """bceaftersigmoid""" +72 68 regularizer """no""" +72 68 optimizer """adadelta""" +72 68 training_loop """lcwa""" +72 68 evaluator """rankbased""" +72 69 dataset """kinships""" +72 69 model """conve""" +72 69 loss """bceaftersigmoid""" +72 69 regularizer """no""" +72 69 optimizer """adadelta""" +72 69 training_loop """lcwa""" +72 69 evaluator """rankbased""" +72 70 dataset """kinships""" +72 70 model """conve""" +72 70 loss """bceaftersigmoid""" +72 70 regularizer """no""" +72 70 optimizer """adadelta""" +72 70 training_loop """lcwa""" +72 70 evaluator """rankbased""" +72 71 dataset """kinships""" +72 71 model """conve""" +72 71 loss """bceaftersigmoid""" +72 71 regularizer """no""" +72 71 optimizer """adadelta""" +72 71 training_loop """lcwa""" +72 71 evaluator """rankbased""" +72 72 dataset """kinships""" +72 72 model """conve""" +72 72 loss """bceaftersigmoid""" +72 72 regularizer """no""" +72 72 optimizer """adadelta""" +72 72 training_loop """lcwa""" +72 72 evaluator """rankbased""" +72 73 dataset """kinships""" +72 73 model """conve""" +72 73 loss """bceaftersigmoid""" +72 73 regularizer """no""" +72 73 optimizer """adadelta""" +72 73 training_loop """lcwa""" +72 73 evaluator """rankbased""" +72 74 dataset """kinships""" +72 74 model """conve""" +72 74 loss """bceaftersigmoid""" +72 74 regularizer """no""" +72 74 optimizer """adadelta""" +72 74 training_loop """lcwa""" +72 74 evaluator """rankbased""" +72 75 dataset """kinships""" +72 75 model """conve""" +72 75 loss """bceaftersigmoid""" +72 75 regularizer """no""" +72 75 optimizer """adadelta""" +72 75 training_loop """lcwa""" +72 75 evaluator """rankbased""" +72 76 dataset """kinships""" +72 76 model """conve""" +72 76 loss """bceaftersigmoid""" +72 76 regularizer """no""" +72 76 optimizer """adadelta""" +72 76 training_loop """lcwa""" +72 76 evaluator """rankbased""" +72 77 dataset """kinships""" +72 77 model """conve""" +72 77 loss """bceaftersigmoid""" +72 77 regularizer """no""" +72 77 optimizer """adadelta""" +72 77 training_loop """lcwa""" +72 77 evaluator """rankbased""" +72 78 dataset """kinships""" +72 78 model """conve""" +72 78 loss """bceaftersigmoid""" +72 78 regularizer """no""" +72 78 optimizer """adadelta""" +72 78 training_loop """lcwa""" +72 78 evaluator """rankbased""" +72 79 dataset """kinships""" +72 79 model """conve""" +72 79 loss """bceaftersigmoid""" +72 79 regularizer """no""" +72 79 optimizer """adadelta""" +72 79 training_loop """lcwa""" +72 79 evaluator """rankbased""" +72 80 dataset """kinships""" +72 80 model """conve""" +72 80 loss """bceaftersigmoid""" +72 80 regularizer """no""" +72 80 optimizer """adadelta""" +72 80 training_loop """lcwa""" +72 80 evaluator """rankbased""" +72 81 dataset """kinships""" +72 81 model """conve""" +72 81 loss """bceaftersigmoid""" +72 81 regularizer """no""" +72 81 optimizer """adadelta""" +72 81 training_loop """lcwa""" +72 81 evaluator """rankbased""" +72 82 dataset """kinships""" +72 82 model """conve""" +72 82 loss """bceaftersigmoid""" +72 82 regularizer """no""" +72 82 optimizer """adadelta""" +72 82 training_loop """lcwa""" +72 82 evaluator """rankbased""" +72 83 dataset """kinships""" +72 83 model """conve""" +72 83 loss """bceaftersigmoid""" +72 83 regularizer """no""" +72 83 optimizer """adadelta""" +72 83 training_loop """lcwa""" +72 83 evaluator """rankbased""" +72 84 dataset """kinships""" +72 84 model """conve""" +72 84 loss """bceaftersigmoid""" +72 84 regularizer """no""" +72 84 optimizer """adadelta""" +72 84 training_loop """lcwa""" +72 84 evaluator """rankbased""" +72 85 dataset """kinships""" +72 85 model """conve""" +72 85 loss """bceaftersigmoid""" +72 85 regularizer """no""" +72 85 optimizer """adadelta""" +72 85 training_loop """lcwa""" +72 85 evaluator """rankbased""" +72 86 dataset """kinships""" +72 86 model """conve""" +72 86 loss """bceaftersigmoid""" +72 86 regularizer """no""" +72 86 optimizer """adadelta""" +72 86 training_loop """lcwa""" +72 86 evaluator """rankbased""" +72 87 dataset """kinships""" +72 87 model """conve""" +72 87 loss """bceaftersigmoid""" +72 87 regularizer """no""" +72 87 optimizer """adadelta""" +72 87 training_loop """lcwa""" +72 87 evaluator """rankbased""" +72 88 dataset """kinships""" +72 88 model """conve""" +72 88 loss """bceaftersigmoid""" +72 88 regularizer """no""" +72 88 optimizer """adadelta""" +72 88 training_loop """lcwa""" +72 88 evaluator """rankbased""" +72 89 dataset """kinships""" +72 89 model """conve""" +72 89 loss """bceaftersigmoid""" +72 89 regularizer """no""" +72 89 optimizer """adadelta""" +72 89 training_loop """lcwa""" +72 89 evaluator """rankbased""" +72 90 dataset """kinships""" +72 90 model """conve""" +72 90 loss """bceaftersigmoid""" +72 90 regularizer """no""" +72 90 optimizer """adadelta""" +72 90 training_loop """lcwa""" +72 90 evaluator """rankbased""" +72 91 dataset """kinships""" +72 91 model """conve""" +72 91 loss """bceaftersigmoid""" +72 91 regularizer """no""" +72 91 optimizer """adadelta""" +72 91 training_loop """lcwa""" +72 91 evaluator """rankbased""" +72 92 dataset """kinships""" +72 92 model """conve""" +72 92 loss """bceaftersigmoid""" +72 92 regularizer """no""" +72 92 optimizer """adadelta""" +72 92 training_loop """lcwa""" +72 92 evaluator """rankbased""" +72 93 dataset """kinships""" +72 93 model """conve""" +72 93 loss """bceaftersigmoid""" +72 93 regularizer """no""" +72 93 optimizer """adadelta""" +72 93 training_loop """lcwa""" +72 93 evaluator """rankbased""" +72 94 dataset """kinships""" +72 94 model """conve""" +72 94 loss """bceaftersigmoid""" +72 94 regularizer """no""" +72 94 optimizer """adadelta""" +72 94 training_loop """lcwa""" +72 94 evaluator """rankbased""" +72 95 dataset """kinships""" +72 95 model """conve""" +72 95 loss """bceaftersigmoid""" +72 95 regularizer """no""" +72 95 optimizer """adadelta""" +72 95 training_loop """lcwa""" +72 95 evaluator """rankbased""" +72 96 dataset """kinships""" +72 96 model """conve""" +72 96 loss """bceaftersigmoid""" +72 96 regularizer """no""" +72 96 optimizer """adadelta""" +72 96 training_loop """lcwa""" +72 96 evaluator """rankbased""" +72 97 dataset """kinships""" +72 97 model """conve""" +72 97 loss """bceaftersigmoid""" +72 97 regularizer """no""" +72 97 optimizer """adadelta""" +72 97 training_loop """lcwa""" +72 97 evaluator """rankbased""" +72 98 dataset """kinships""" +72 98 model """conve""" +72 98 loss """bceaftersigmoid""" +72 98 regularizer """no""" +72 98 optimizer """adadelta""" +72 98 training_loop """lcwa""" +72 98 evaluator """rankbased""" +72 99 dataset """kinships""" +72 99 model """conve""" +72 99 loss """bceaftersigmoid""" +72 99 regularizer """no""" +72 99 optimizer """adadelta""" +72 99 training_loop """lcwa""" +72 99 evaluator """rankbased""" +72 100 dataset """kinships""" +72 100 model """conve""" +72 100 loss """bceaftersigmoid""" +72 100 regularizer """no""" +72 100 optimizer """adadelta""" +72 100 training_loop """lcwa""" +72 100 evaluator """rankbased""" +73 1 model.output_channels 37.0 +73 1 model.input_dropout 0.1744192518847677 +73 1 model.output_dropout 0.2784957193049664 +73 1 model.feature_map_dropout 0.35984980081340867 +73 1 model.embedding_dim 2.0 +73 1 training.batch_size 1.0 +73 1 training.label_smoothing 0.004141539527568721 +73 2 model.output_channels 41.0 +73 2 model.input_dropout 0.45625967331074235 +73 2 model.output_dropout 0.08332137466980166 +73 2 model.feature_map_dropout 0.050974837281196284 +73 2 model.embedding_dim 0.0 +73 2 training.batch_size 2.0 +73 2 training.label_smoothing 0.8965510468087077 +73 3 model.output_channels 29.0 +73 3 model.input_dropout 0.3353559074458356 +73 3 model.output_dropout 0.2894757746968194 +73 3 model.feature_map_dropout 0.30782851677868295 +73 3 model.embedding_dim 0.0 +73 3 training.batch_size 1.0 +73 3 training.label_smoothing 0.016992592622915016 +73 4 model.output_channels 37.0 +73 4 model.input_dropout 0.3460164070332956 +73 4 model.output_dropout 0.010791950634929826 +73 4 model.feature_map_dropout 0.28326578826128745 +73 4 model.embedding_dim 1.0 +73 4 training.batch_size 2.0 +73 4 training.label_smoothing 0.38408359703826406 +73 5 model.output_channels 51.0 +73 5 model.input_dropout 0.031200645733581056 +73 5 model.output_dropout 0.46420497322708953 +73 5 model.feature_map_dropout 0.2700650347334848 +73 5 model.embedding_dim 2.0 +73 5 training.batch_size 0.0 +73 5 training.label_smoothing 0.0032411683010980857 +73 6 model.output_channels 38.0 +73 6 model.input_dropout 0.2317261090660423 +73 6 model.output_dropout 0.05872464812660627 +73 6 model.feature_map_dropout 0.44767197703997846 +73 6 model.embedding_dim 0.0 +73 6 training.batch_size 2.0 +73 6 training.label_smoothing 0.9430459801474658 +73 7 model.output_channels 40.0 +73 7 model.input_dropout 0.3679818585169958 +73 7 model.output_dropout 0.06237932097305943 +73 7 model.feature_map_dropout 0.42293990964591743 +73 7 model.embedding_dim 1.0 +73 7 training.batch_size 1.0 +73 7 training.label_smoothing 0.003412612938293266 +73 8 model.output_channels 60.0 +73 8 model.input_dropout 0.059388665490702264 +73 8 model.output_dropout 0.25587525512128423 +73 8 model.feature_map_dropout 0.3154003127411627 +73 8 model.embedding_dim 1.0 +73 8 training.batch_size 1.0 +73 8 training.label_smoothing 0.8949076915012639 +73 9 model.output_channels 55.0 +73 9 model.input_dropout 0.07457547805112957 +73 9 model.output_dropout 0.4002746633246407 +73 9 model.feature_map_dropout 0.49349893338952444 +73 9 model.embedding_dim 1.0 +73 9 training.batch_size 0.0 +73 9 training.label_smoothing 0.002993274984178864 +73 10 model.output_channels 17.0 +73 10 model.input_dropout 0.3991812281227754 +73 10 model.output_dropout 0.10768867176087654 +73 10 model.feature_map_dropout 0.14644731827992652 +73 10 model.embedding_dim 0.0 +73 10 training.batch_size 1.0 +73 10 training.label_smoothing 0.006545320696609411 +73 11 model.output_channels 44.0 +73 11 model.input_dropout 0.052031636948419335 +73 11 model.output_dropout 0.4298580248244071 +73 11 model.feature_map_dropout 0.4598937797952355 +73 11 model.embedding_dim 1.0 +73 11 training.batch_size 2.0 +73 11 training.label_smoothing 0.05406101433408575 +73 12 model.output_channels 33.0 +73 12 model.input_dropout 0.14934878088044268 +73 12 model.output_dropout 0.02242419370285248 +73 12 model.feature_map_dropout 0.40553622183733784 +73 12 model.embedding_dim 2.0 +73 12 training.batch_size 1.0 +73 12 training.label_smoothing 0.011146406581466763 +73 13 model.output_channels 57.0 +73 13 model.input_dropout 0.07553475391306208 +73 13 model.output_dropout 0.09821984135239015 +73 13 model.feature_map_dropout 0.3373622018380635 +73 13 model.embedding_dim 0.0 +73 13 training.batch_size 1.0 +73 13 training.label_smoothing 0.010885883658623257 +73 14 model.output_channels 43.0 +73 14 model.input_dropout 0.18721355086642 +73 14 model.output_dropout 0.14576841764881188 +73 14 model.feature_map_dropout 0.39248845843923497 +73 14 model.embedding_dim 1.0 +73 14 training.batch_size 0.0 +73 14 training.label_smoothing 0.0046171801196947145 +73 15 model.output_channels 19.0 +73 15 model.input_dropout 0.08754304835919219 +73 15 model.output_dropout 0.4388994011951987 +73 15 model.feature_map_dropout 0.3443738092998731 +73 15 model.embedding_dim 0.0 +73 15 training.batch_size 1.0 +73 15 training.label_smoothing 0.09715534019073345 +73 16 model.output_channels 58.0 +73 16 model.input_dropout 0.0755163611705893 +73 16 model.output_dropout 0.16942154386113123 +73 16 model.feature_map_dropout 0.33162774104933773 +73 16 model.embedding_dim 0.0 +73 16 training.batch_size 1.0 +73 16 training.label_smoothing 0.001333822779823842 +73 17 model.output_channels 60.0 +73 17 model.input_dropout 0.06626015744799152 +73 17 model.output_dropout 0.24678455494833063 +73 17 model.feature_map_dropout 0.009993921136502049 +73 17 model.embedding_dim 0.0 +73 17 training.batch_size 2.0 +73 17 training.label_smoothing 0.7434624076187993 +73 18 model.output_channels 56.0 +73 18 model.input_dropout 0.2208775385308832 +73 18 model.output_dropout 0.2339537417354186 +73 18 model.feature_map_dropout 0.24699604629800465 +73 18 model.embedding_dim 0.0 +73 18 training.batch_size 0.0 +73 18 training.label_smoothing 0.0012958751928123494 +73 19 model.output_channels 62.0 +73 19 model.input_dropout 0.2680982158892869 +73 19 model.output_dropout 0.2159285179949691 +73 19 model.feature_map_dropout 0.3605665965125655 +73 19 model.embedding_dim 2.0 +73 19 training.batch_size 0.0 +73 19 training.label_smoothing 0.013015893932380437 +73 20 model.output_channels 53.0 +73 20 model.input_dropout 0.3698475631876855 +73 20 model.output_dropout 0.4800350803133827 +73 20 model.feature_map_dropout 0.4670678778768191 +73 20 model.embedding_dim 0.0 +73 20 training.batch_size 1.0 +73 20 training.label_smoothing 0.12228815229598511 +73 21 model.output_channels 17.0 +73 21 model.input_dropout 0.2367806479124811 +73 21 model.output_dropout 0.14667736997941988 +73 21 model.feature_map_dropout 0.030047253145280672 +73 21 model.embedding_dim 2.0 +73 21 training.batch_size 2.0 +73 21 training.label_smoothing 0.02566845337678194 +73 22 model.output_channels 47.0 +73 22 model.input_dropout 0.4483801467112704 +73 22 model.output_dropout 0.4377409261043226 +73 22 model.feature_map_dropout 0.12448409268990612 +73 22 model.embedding_dim 2.0 +73 22 training.batch_size 2.0 +73 22 training.label_smoothing 0.004049046497540195 +73 23 model.output_channels 60.0 +73 23 model.input_dropout 0.03293617304407742 +73 23 model.output_dropout 0.015448308771166375 +73 23 model.feature_map_dropout 0.4603122709266042 +73 23 model.embedding_dim 1.0 +73 23 training.batch_size 0.0 +73 23 training.label_smoothing 0.024347167993507116 +73 24 model.output_channels 64.0 +73 24 model.input_dropout 0.019540951185782085 +73 24 model.output_dropout 0.15411823345653092 +73 24 model.feature_map_dropout 0.24146805398559823 +73 24 model.embedding_dim 0.0 +73 24 training.batch_size 1.0 +73 24 training.label_smoothing 0.007969084171797118 +73 25 model.output_channels 51.0 +73 25 model.input_dropout 0.2170699789164452 +73 25 model.output_dropout 0.34093890204090527 +73 25 model.feature_map_dropout 0.0035037127152485747 +73 25 model.embedding_dim 2.0 +73 25 training.batch_size 0.0 +73 25 training.label_smoothing 0.03267068223874892 +73 26 model.output_channels 24.0 +73 26 model.input_dropout 0.42642699084371744 +73 26 model.output_dropout 0.28535427089929505 +73 26 model.feature_map_dropout 0.4205323949890947 +73 26 model.embedding_dim 1.0 +73 26 training.batch_size 2.0 +73 26 training.label_smoothing 0.10541740185657471 +73 27 model.output_channels 34.0 +73 27 model.input_dropout 0.1335085239045954 +73 27 model.output_dropout 0.15127750361248682 +73 27 model.feature_map_dropout 0.4805293154665424 +73 27 model.embedding_dim 2.0 +73 27 training.batch_size 2.0 +73 27 training.label_smoothing 0.008012904366458441 +73 28 model.output_channels 34.0 +73 28 model.input_dropout 0.28960651735052545 +73 28 model.output_dropout 0.43316701263256085 +73 28 model.feature_map_dropout 0.21075664325199728 +73 28 model.embedding_dim 0.0 +73 28 training.batch_size 2.0 +73 28 training.label_smoothing 0.007273299968178434 +73 29 model.output_channels 47.0 +73 29 model.input_dropout 0.3470777561273165 +73 29 model.output_dropout 0.3749601990159183 +73 29 model.feature_map_dropout 0.2788885159080453 +73 29 model.embedding_dim 2.0 +73 29 training.batch_size 1.0 +73 29 training.label_smoothing 0.0663478719765071 +73 30 model.output_channels 56.0 +73 30 model.input_dropout 0.022875119903502217 +73 30 model.output_dropout 0.42275819967670625 +73 30 model.feature_map_dropout 0.27589301385588416 +73 30 model.embedding_dim 0.0 +73 30 training.batch_size 1.0 +73 30 training.label_smoothing 0.5246517413386547 +73 31 model.output_channels 41.0 +73 31 model.input_dropout 0.1044017661386965 +73 31 model.output_dropout 0.05594745228202508 +73 31 model.feature_map_dropout 0.3606391380812771 +73 31 model.embedding_dim 0.0 +73 31 training.batch_size 0.0 +73 31 training.label_smoothing 0.1899023291225547 +73 32 model.output_channels 46.0 +73 32 model.input_dropout 0.19998871534058238 +73 32 model.output_dropout 0.26971454503430997 +73 32 model.feature_map_dropout 0.21202517486855577 +73 32 model.embedding_dim 1.0 +73 32 training.batch_size 2.0 +73 32 training.label_smoothing 0.13494790721423472 +73 33 model.output_channels 41.0 +73 33 model.input_dropout 0.43827048836500687 +73 33 model.output_dropout 0.27354611410116836 +73 33 model.feature_map_dropout 0.014483198263411101 +73 33 model.embedding_dim 2.0 +73 33 training.batch_size 1.0 +73 33 training.label_smoothing 0.7238414896991376 +73 34 model.output_channels 22.0 +73 34 model.input_dropout 0.056202562131797584 +73 34 model.output_dropout 0.42971570210347065 +73 34 model.feature_map_dropout 0.20589725553137522 +73 34 model.embedding_dim 2.0 +73 34 training.batch_size 1.0 +73 34 training.label_smoothing 0.046944668567707744 +73 35 model.output_channels 64.0 +73 35 model.input_dropout 0.033258549348430644 +73 35 model.output_dropout 0.3966316385274401 +73 35 model.feature_map_dropout 0.44314348018644756 +73 35 model.embedding_dim 0.0 +73 35 training.batch_size 1.0 +73 35 training.label_smoothing 0.15745172664965268 +73 36 model.output_channels 31.0 +73 36 model.input_dropout 0.32619506337168225 +73 36 model.output_dropout 0.07179825928753669 +73 36 model.feature_map_dropout 0.2583396193623698 +73 36 model.embedding_dim 2.0 +73 36 training.batch_size 2.0 +73 36 training.label_smoothing 0.0015239432688859992 +73 37 model.output_channels 53.0 +73 37 model.input_dropout 0.33000936171302936 +73 37 model.output_dropout 0.1681130868238317 +73 37 model.feature_map_dropout 0.10696163515821805 +73 37 model.embedding_dim 2.0 +73 37 training.batch_size 2.0 +73 37 training.label_smoothing 0.02425800087494053 +73 38 model.output_channels 57.0 +73 38 model.input_dropout 0.029823847535823123 +73 38 model.output_dropout 0.45866205969785184 +73 38 model.feature_map_dropout 0.41117741185388496 +73 38 model.embedding_dim 2.0 +73 38 training.batch_size 0.0 +73 38 training.label_smoothing 0.026634014547231542 +73 39 model.output_channels 62.0 +73 39 model.input_dropout 0.3741219849134719 +73 39 model.output_dropout 0.3905558922510428 +73 39 model.feature_map_dropout 0.2508215259593302 +73 39 model.embedding_dim 0.0 +73 39 training.batch_size 1.0 +73 39 training.label_smoothing 0.07841786637370404 +73 40 model.output_channels 43.0 +73 40 model.input_dropout 0.2874006163258606 +73 40 model.output_dropout 0.48087518626809006 +73 40 model.feature_map_dropout 0.04104867265289791 +73 40 model.embedding_dim 0.0 +73 40 training.batch_size 0.0 +73 40 training.label_smoothing 0.0022605773238213106 +73 41 model.output_channels 40.0 +73 41 model.input_dropout 0.26424699971015414 +73 41 model.output_dropout 0.38987960198431015 +73 41 model.feature_map_dropout 0.3092847077727789 +73 41 model.embedding_dim 2.0 +73 41 training.batch_size 1.0 +73 41 training.label_smoothing 0.06372037698625489 +73 42 model.output_channels 60.0 +73 42 model.input_dropout 0.2952584548990865 +73 42 model.output_dropout 0.4662599156924919 +73 42 model.feature_map_dropout 0.3739808925452998 +73 42 model.embedding_dim 1.0 +73 42 training.batch_size 1.0 +73 42 training.label_smoothing 0.5689619096962996 +73 43 model.output_channels 36.0 +73 43 model.input_dropout 0.040039353623123775 +73 43 model.output_dropout 0.4305368324533096 +73 43 model.feature_map_dropout 0.3597870811049028 +73 43 model.embedding_dim 0.0 +73 43 training.batch_size 2.0 +73 43 training.label_smoothing 0.021602958341435684 +73 44 model.output_channels 51.0 +73 44 model.input_dropout 0.4063981557025836 +73 44 model.output_dropout 0.2177175609380041 +73 44 model.feature_map_dropout 0.4784430324215631 +73 44 model.embedding_dim 1.0 +73 44 training.batch_size 0.0 +73 44 training.label_smoothing 0.06126689038114246 +73 45 model.output_channels 27.0 +73 45 model.input_dropout 0.07807038276855555 +73 45 model.output_dropout 0.3312456383246267 +73 45 model.feature_map_dropout 0.036852077262435 +73 45 model.embedding_dim 0.0 +73 45 training.batch_size 0.0 +73 45 training.label_smoothing 0.03888944393025961 +73 46 model.output_channels 38.0 +73 46 model.input_dropout 0.34368356602689426 +73 46 model.output_dropout 0.15232112471892717 +73 46 model.feature_map_dropout 0.25388814261254933 +73 46 model.embedding_dim 0.0 +73 46 training.batch_size 2.0 +73 46 training.label_smoothing 0.4797579030344963 +73 47 model.output_channels 21.0 +73 47 model.input_dropout 0.21640837241532845 +73 47 model.output_dropout 0.401183092894221 +73 47 model.feature_map_dropout 0.006458759966521732 +73 47 model.embedding_dim 2.0 +73 47 training.batch_size 0.0 +73 47 training.label_smoothing 0.009513150855706679 +73 48 model.output_channels 50.0 +73 48 model.input_dropout 0.49754009311097863 +73 48 model.output_dropout 0.459776122098557 +73 48 model.feature_map_dropout 0.026145415777775183 +73 48 model.embedding_dim 0.0 +73 48 training.batch_size 1.0 +73 48 training.label_smoothing 0.4822467652962947 +73 49 model.output_channels 55.0 +73 49 model.input_dropout 0.37704622056628695 +73 49 model.output_dropout 0.03829862974324094 +73 49 model.feature_map_dropout 0.49010711228128495 +73 49 model.embedding_dim 0.0 +73 49 training.batch_size 0.0 +73 49 training.label_smoothing 0.35163509661104936 +73 50 model.output_channels 33.0 +73 50 model.input_dropout 0.49028329358768585 +73 50 model.output_dropout 0.07782174633005573 +73 50 model.feature_map_dropout 0.123425212564159 +73 50 model.embedding_dim 0.0 +73 50 training.batch_size 2.0 +73 50 training.label_smoothing 0.019869128679773643 +73 51 model.output_channels 23.0 +73 51 model.input_dropout 0.2504210901146799 +73 51 model.output_dropout 0.011254811273212983 +73 51 model.feature_map_dropout 0.2748602957920267 +73 51 model.embedding_dim 1.0 +73 51 training.batch_size 1.0 +73 51 training.label_smoothing 0.020746917877186462 +73 52 model.output_channels 43.0 +73 52 model.input_dropout 0.4010291038312595 +73 52 model.output_dropout 0.05616111244440147 +73 52 model.feature_map_dropout 0.21811582407853425 +73 52 model.embedding_dim 0.0 +73 52 training.batch_size 1.0 +73 52 training.label_smoothing 0.3758585899039926 +73 53 model.output_channels 31.0 +73 53 model.input_dropout 0.3209913334532487 +73 53 model.output_dropout 0.42801025894788125 +73 53 model.feature_map_dropout 0.3637975673479752 +73 53 model.embedding_dim 2.0 +73 53 training.batch_size 2.0 +73 53 training.label_smoothing 0.0018213674327461655 +73 54 model.output_channels 16.0 +73 54 model.input_dropout 0.11460259817533591 +73 54 model.output_dropout 0.030844776123503448 +73 54 model.feature_map_dropout 0.3814200519302969 +73 54 model.embedding_dim 1.0 +73 54 training.batch_size 0.0 +73 54 training.label_smoothing 0.056392106894476705 +73 55 model.output_channels 53.0 +73 55 model.input_dropout 0.33051980712561174 +73 55 model.output_dropout 0.12784255852842974 +73 55 model.feature_map_dropout 0.2714342390848218 +73 55 model.embedding_dim 2.0 +73 55 training.batch_size 0.0 +73 55 training.label_smoothing 0.006842487028411316 +73 56 model.output_channels 46.0 +73 56 model.input_dropout 0.21708888845682023 +73 56 model.output_dropout 0.003117149193270663 +73 56 model.feature_map_dropout 0.3498002182563062 +73 56 model.embedding_dim 0.0 +73 56 training.batch_size 2.0 +73 56 training.label_smoothing 0.010433991098989955 +73 57 model.output_channels 57.0 +73 57 model.input_dropout 0.27165831001999147 +73 57 model.output_dropout 0.35584261497973946 +73 57 model.feature_map_dropout 0.19767739861319494 +73 57 model.embedding_dim 2.0 +73 57 training.batch_size 1.0 +73 57 training.label_smoothing 0.3256475527843184 +73 58 model.output_channels 25.0 +73 58 model.input_dropout 0.3187134320261212 +73 58 model.output_dropout 0.17745263005326994 +73 58 model.feature_map_dropout 0.10368353106927514 +73 58 model.embedding_dim 0.0 +73 58 training.batch_size 0.0 +73 58 training.label_smoothing 0.611611911616896 +73 59 model.output_channels 26.0 +73 59 model.input_dropout 0.07542422492067885 +73 59 model.output_dropout 0.3288241418506902 +73 59 model.feature_map_dropout 0.09913441474170637 +73 59 model.embedding_dim 0.0 +73 59 training.batch_size 2.0 +73 59 training.label_smoothing 0.046174813619450215 +73 60 model.output_channels 63.0 +73 60 model.input_dropout 0.10690366668359064 +73 60 model.output_dropout 0.30388676841164775 +73 60 model.feature_map_dropout 0.12238041635413038 +73 60 model.embedding_dim 1.0 +73 60 training.batch_size 1.0 +73 60 training.label_smoothing 0.01605416584568091 +73 61 model.output_channels 55.0 +73 61 model.input_dropout 0.2560282545727738 +73 61 model.output_dropout 0.22309044720074955 +73 61 model.feature_map_dropout 0.0335691014292504 +73 61 model.embedding_dim 2.0 +73 61 training.batch_size 0.0 +73 61 training.label_smoothing 0.4341332071960283 +73 62 model.output_channels 31.0 +73 62 model.input_dropout 0.028063782318955566 +73 62 model.output_dropout 0.22303076213200612 +73 62 model.feature_map_dropout 0.4383815475210101 +73 62 model.embedding_dim 1.0 +73 62 training.batch_size 2.0 +73 62 training.label_smoothing 0.08578073773077055 +73 63 model.output_channels 54.0 +73 63 model.input_dropout 0.015364993053953502 +73 63 model.output_dropout 0.0649593803239803 +73 63 model.feature_map_dropout 0.05300276908422319 +73 63 model.embedding_dim 0.0 +73 63 training.batch_size 2.0 +73 63 training.label_smoothing 0.003576557248124115 +73 64 model.output_channels 45.0 +73 64 model.input_dropout 0.34549792824782327 +73 64 model.output_dropout 0.4686581997768581 +73 64 model.feature_map_dropout 0.3274539489059712 +73 64 model.embedding_dim 2.0 +73 64 training.batch_size 0.0 +73 64 training.label_smoothing 0.10355823711520727 +73 65 model.output_channels 55.0 +73 65 model.input_dropout 0.21961901653504934 +73 65 model.output_dropout 0.36618308272865346 +73 65 model.feature_map_dropout 0.20431853700167074 +73 65 model.embedding_dim 0.0 +73 65 training.batch_size 0.0 +73 65 training.label_smoothing 0.007658740423699203 +73 66 model.output_channels 22.0 +73 66 model.input_dropout 0.38292084142043514 +73 66 model.output_dropout 0.48195868075366877 +73 66 model.feature_map_dropout 0.37957751890431024 +73 66 model.embedding_dim 2.0 +73 66 training.batch_size 1.0 +73 66 training.label_smoothing 0.06546336687469606 +73 67 model.output_channels 27.0 +73 67 model.input_dropout 0.21029581355558818 +73 67 model.output_dropout 0.30852757344499915 +73 67 model.feature_map_dropout 0.4392819168712416 +73 67 model.embedding_dim 0.0 +73 67 training.batch_size 1.0 +73 67 training.label_smoothing 0.001629266937479508 +73 68 model.output_channels 16.0 +73 68 model.input_dropout 0.1221282424191496 +73 68 model.output_dropout 0.1388572199695952 +73 68 model.feature_map_dropout 0.16462047956845 +73 68 model.embedding_dim 1.0 +73 68 training.batch_size 1.0 +73 68 training.label_smoothing 0.030151957958179296 +73 69 model.output_channels 29.0 +73 69 model.input_dropout 0.31602452258021446 +73 69 model.output_dropout 0.4878002102945848 +73 69 model.feature_map_dropout 0.2651405712274316 +73 69 model.embedding_dim 2.0 +73 69 training.batch_size 2.0 +73 69 training.label_smoothing 0.24756659005632692 +73 70 model.output_channels 50.0 +73 70 model.input_dropout 0.21506766688759227 +73 70 model.output_dropout 0.4625442153698765 +73 70 model.feature_map_dropout 0.14271087165330448 +73 70 model.embedding_dim 0.0 +73 70 training.batch_size 2.0 +73 70 training.label_smoothing 0.016835160156084186 +73 71 model.output_channels 55.0 +73 71 model.input_dropout 0.08488792800290196 +73 71 model.output_dropout 0.2090150542518422 +73 71 model.feature_map_dropout 0.0325467183826913 +73 71 model.embedding_dim 1.0 +73 71 training.batch_size 2.0 +73 71 training.label_smoothing 0.011528842141916152 +73 72 model.output_channels 18.0 +73 72 model.input_dropout 0.13014005654765592 +73 72 model.output_dropout 0.04944087801724739 +73 72 model.feature_map_dropout 0.13623559812222696 +73 72 model.embedding_dim 1.0 +73 72 training.batch_size 2.0 +73 72 training.label_smoothing 0.1744951247300896 +73 73 model.output_channels 56.0 +73 73 model.input_dropout 0.048996968827512255 +73 73 model.output_dropout 0.3264137412388796 +73 73 model.feature_map_dropout 0.3722139835802133 +73 73 model.embedding_dim 1.0 +73 73 training.batch_size 0.0 +73 73 training.label_smoothing 0.0012102400123158595 +73 74 model.output_channels 64.0 +73 74 model.input_dropout 0.259542493899807 +73 74 model.output_dropout 0.4181787369818771 +73 74 model.feature_map_dropout 0.38205150055567033 +73 74 model.embedding_dim 0.0 +73 74 training.batch_size 0.0 +73 74 training.label_smoothing 0.004247570374650164 +73 75 model.output_channels 40.0 +73 75 model.input_dropout 0.06796538470896057 +73 75 model.output_dropout 0.35151249855425976 +73 75 model.feature_map_dropout 0.17377780689402889 +73 75 model.embedding_dim 2.0 +73 75 training.batch_size 0.0 +73 75 training.label_smoothing 0.18433176618694763 +73 76 model.output_channels 29.0 +73 76 model.input_dropout 0.08848899419633494 +73 76 model.output_dropout 0.2508306867044712 +73 76 model.feature_map_dropout 0.1749164630756032 +73 76 model.embedding_dim 2.0 +73 76 training.batch_size 2.0 +73 76 training.label_smoothing 0.3562913781351104 +73 77 model.output_channels 40.0 +73 77 model.input_dropout 0.3762485186522587 +73 77 model.output_dropout 0.14286508377224288 +73 77 model.feature_map_dropout 0.47184895117484166 +73 77 model.embedding_dim 0.0 +73 77 training.batch_size 0.0 +73 77 training.label_smoothing 0.5788353874717063 +73 78 model.output_channels 16.0 +73 78 model.input_dropout 0.07338794191563436 +73 78 model.output_dropout 0.4432534106551639 +73 78 model.feature_map_dropout 0.007358739548857629 +73 78 model.embedding_dim 0.0 +73 78 training.batch_size 1.0 +73 78 training.label_smoothing 0.6061049788464616 +73 79 model.output_channels 60.0 +73 79 model.input_dropout 0.15837786011343247 +73 79 model.output_dropout 0.17922981370552682 +73 79 model.feature_map_dropout 0.3269139240426274 +73 79 model.embedding_dim 1.0 +73 79 training.batch_size 1.0 +73 79 training.label_smoothing 0.9760071384307719 +73 80 model.output_channels 16.0 +73 80 model.input_dropout 0.01523374514399628 +73 80 model.output_dropout 0.24392557206762916 +73 80 model.feature_map_dropout 0.3864027748240042 +73 80 model.embedding_dim 1.0 +73 80 training.batch_size 1.0 +73 80 training.label_smoothing 0.05427113228120765 +73 81 model.output_channels 55.0 +73 81 model.input_dropout 0.08058708869173808 +73 81 model.output_dropout 0.33895583809836183 +73 81 model.feature_map_dropout 0.44998343574912014 +73 81 model.embedding_dim 2.0 +73 81 training.batch_size 0.0 +73 81 training.label_smoothing 0.06362138766158235 +73 82 model.output_channels 29.0 +73 82 model.input_dropout 0.3688618383653582 +73 82 model.output_dropout 0.05360292593736282 +73 82 model.feature_map_dropout 0.1634928342901198 +73 82 model.embedding_dim 2.0 +73 82 training.batch_size 0.0 +73 82 training.label_smoothing 0.035493082742518514 +73 83 model.output_channels 17.0 +73 83 model.input_dropout 0.08867837203803464 +73 83 model.output_dropout 0.259676583745684 +73 83 model.feature_map_dropout 0.13544970625572234 +73 83 model.embedding_dim 2.0 +73 83 training.batch_size 2.0 +73 83 training.label_smoothing 0.12454835904460983 +73 84 model.output_channels 54.0 +73 84 model.input_dropout 0.13357649948702194 +73 84 model.output_dropout 0.3364108343415745 +73 84 model.feature_map_dropout 0.06808800687082733 +73 84 model.embedding_dim 0.0 +73 84 training.batch_size 2.0 +73 84 training.label_smoothing 0.13440096392562503 +73 85 model.output_channels 34.0 +73 85 model.input_dropout 0.34676656292508334 +73 85 model.output_dropout 0.033675178102719194 +73 85 model.feature_map_dropout 0.056531954552173413 +73 85 model.embedding_dim 1.0 +73 85 training.batch_size 0.0 +73 85 training.label_smoothing 0.44119499874891244 +73 86 model.output_channels 52.0 +73 86 model.input_dropout 0.21596485968350942 +73 86 model.output_dropout 0.3000435278432387 +73 86 model.feature_map_dropout 0.1632444810941746 +73 86 model.embedding_dim 1.0 +73 86 training.batch_size 1.0 +73 86 training.label_smoothing 0.01633077334711763 +73 87 model.output_channels 19.0 +73 87 model.input_dropout 0.21440903037097253 +73 87 model.output_dropout 0.20444491335017123 +73 87 model.feature_map_dropout 0.10033218643307529 +73 87 model.embedding_dim 0.0 +73 87 training.batch_size 2.0 +73 87 training.label_smoothing 0.1445619005276466 +73 88 model.output_channels 57.0 +73 88 model.input_dropout 0.42970559978816797 +73 88 model.output_dropout 0.20957005399579798 +73 88 model.feature_map_dropout 0.39766035471053995 +73 88 model.embedding_dim 1.0 +73 88 training.batch_size 2.0 +73 88 training.label_smoothing 0.013601107188761223 +73 89 model.output_channels 19.0 +73 89 model.input_dropout 0.1236167944517535 +73 89 model.output_dropout 0.4071579300928259 +73 89 model.feature_map_dropout 0.30522934941324703 +73 89 model.embedding_dim 0.0 +73 89 training.batch_size 2.0 +73 89 training.label_smoothing 0.001997739372604689 +73 90 model.output_channels 26.0 +73 90 model.input_dropout 0.3177928993627097 +73 90 model.output_dropout 0.44073672878364595 +73 90 model.feature_map_dropout 0.07171008999164924 +73 90 model.embedding_dim 2.0 +73 90 training.batch_size 0.0 +73 90 training.label_smoothing 0.0023643908277900796 +73 91 model.output_channels 62.0 +73 91 model.input_dropout 0.17029454071891348 +73 91 model.output_dropout 0.16822317854669594 +73 91 model.feature_map_dropout 0.22748068896610552 +73 91 model.embedding_dim 0.0 +73 91 training.batch_size 1.0 +73 91 training.label_smoothing 0.0020520662784293997 +73 92 model.output_channels 63.0 +73 92 model.input_dropout 0.42931879706468457 +73 92 model.output_dropout 0.2743583875614339 +73 92 model.feature_map_dropout 0.2498872843657784 +73 92 model.embedding_dim 1.0 +73 92 training.batch_size 2.0 +73 92 training.label_smoothing 0.8875074599968491 +73 93 model.output_channels 33.0 +73 93 model.input_dropout 0.4407062089041042 +73 93 model.output_dropout 0.33650356111283747 +73 93 model.feature_map_dropout 0.49677952216703086 +73 93 model.embedding_dim 2.0 +73 93 training.batch_size 0.0 +73 93 training.label_smoothing 0.0205958611543489 +73 94 model.output_channels 64.0 +73 94 model.input_dropout 0.29720740007906477 +73 94 model.output_dropout 0.28346074250848136 +73 94 model.feature_map_dropout 0.4707404854651631 +73 94 model.embedding_dim 2.0 +73 94 training.batch_size 2.0 +73 94 training.label_smoothing 0.01630100900479208 +73 95 model.output_channels 33.0 +73 95 model.input_dropout 0.48856311697872395 +73 95 model.output_dropout 0.3988157738003192 +73 95 model.feature_map_dropout 0.34642762377979996 +73 95 model.embedding_dim 1.0 +73 95 training.batch_size 0.0 +73 95 training.label_smoothing 0.30686836292092934 +73 96 model.output_channels 52.0 +73 96 model.input_dropout 0.2911474347932345 +73 96 model.output_dropout 0.4184430163885017 +73 96 model.feature_map_dropout 0.11102323036641937 +73 96 model.embedding_dim 2.0 +73 96 training.batch_size 1.0 +73 96 training.label_smoothing 0.05638374510121719 +73 97 model.output_channels 45.0 +73 97 model.input_dropout 0.10632681356955603 +73 97 model.output_dropout 0.4324213069389037 +73 97 model.feature_map_dropout 0.07966478752521677 +73 97 model.embedding_dim 0.0 +73 97 training.batch_size 1.0 +73 97 training.label_smoothing 0.17555174845240865 +73 98 model.output_channels 57.0 +73 98 model.input_dropout 0.457036865262259 +73 98 model.output_dropout 0.08402046459286061 +73 98 model.feature_map_dropout 0.285760485879605 +73 98 model.embedding_dim 0.0 +73 98 training.batch_size 1.0 +73 98 training.label_smoothing 0.04241499791739491 +73 99 model.output_channels 23.0 +73 99 model.input_dropout 0.48326784964806846 +73 99 model.output_dropout 0.35196099378226836 +73 99 model.feature_map_dropout 0.480648781522452 +73 99 model.embedding_dim 0.0 +73 99 training.batch_size 0.0 +73 99 training.label_smoothing 0.0015694957371615637 +73 100 model.output_channels 41.0 +73 100 model.input_dropout 0.2655958873128542 +73 100 model.output_dropout 0.16165439248301677 +73 100 model.feature_map_dropout 0.4996810199710891 +73 100 model.embedding_dim 1.0 +73 100 training.batch_size 0.0 +73 100 training.label_smoothing 0.0023691284901405803 +73 1 dataset """kinships""" +73 1 model """conve""" +73 1 loss """softplus""" +73 1 regularizer """no""" +73 1 optimizer """adadelta""" +73 1 training_loop """lcwa""" +73 1 evaluator """rankbased""" +73 2 dataset """kinships""" +73 2 model """conve""" +73 2 loss """softplus""" +73 2 regularizer """no""" +73 2 optimizer """adadelta""" +73 2 training_loop """lcwa""" +73 2 evaluator """rankbased""" +73 3 dataset """kinships""" +73 3 model """conve""" +73 3 loss """softplus""" +73 3 regularizer """no""" +73 3 optimizer """adadelta""" +73 3 training_loop """lcwa""" +73 3 evaluator """rankbased""" +73 4 dataset """kinships""" +73 4 model """conve""" +73 4 loss """softplus""" +73 4 regularizer """no""" +73 4 optimizer """adadelta""" +73 4 training_loop """lcwa""" +73 4 evaluator """rankbased""" +73 5 dataset """kinships""" +73 5 model """conve""" +73 5 loss """softplus""" +73 5 regularizer """no""" +73 5 optimizer """adadelta""" +73 5 training_loop """lcwa""" +73 5 evaluator """rankbased""" +73 6 dataset """kinships""" +73 6 model """conve""" +73 6 loss """softplus""" +73 6 regularizer """no""" +73 6 optimizer """adadelta""" +73 6 training_loop """lcwa""" +73 6 evaluator """rankbased""" +73 7 dataset """kinships""" +73 7 model """conve""" +73 7 loss """softplus""" +73 7 regularizer """no""" +73 7 optimizer """adadelta""" +73 7 training_loop """lcwa""" +73 7 evaluator """rankbased""" +73 8 dataset """kinships""" +73 8 model """conve""" +73 8 loss """softplus""" +73 8 regularizer """no""" +73 8 optimizer """adadelta""" +73 8 training_loop """lcwa""" +73 8 evaluator """rankbased""" +73 9 dataset """kinships""" +73 9 model """conve""" +73 9 loss """softplus""" +73 9 regularizer """no""" +73 9 optimizer """adadelta""" +73 9 training_loop """lcwa""" +73 9 evaluator """rankbased""" +73 10 dataset """kinships""" +73 10 model """conve""" +73 10 loss """softplus""" +73 10 regularizer """no""" +73 10 optimizer """adadelta""" +73 10 training_loop """lcwa""" +73 10 evaluator """rankbased""" +73 11 dataset """kinships""" +73 11 model """conve""" +73 11 loss """softplus""" +73 11 regularizer """no""" +73 11 optimizer """adadelta""" +73 11 training_loop """lcwa""" +73 11 evaluator """rankbased""" +73 12 dataset """kinships""" +73 12 model """conve""" +73 12 loss """softplus""" +73 12 regularizer """no""" +73 12 optimizer """adadelta""" +73 12 training_loop """lcwa""" +73 12 evaluator """rankbased""" +73 13 dataset """kinships""" +73 13 model """conve""" +73 13 loss """softplus""" +73 13 regularizer """no""" +73 13 optimizer """adadelta""" +73 13 training_loop """lcwa""" +73 13 evaluator """rankbased""" +73 14 dataset """kinships""" +73 14 model """conve""" +73 14 loss """softplus""" +73 14 regularizer """no""" +73 14 optimizer """adadelta""" +73 14 training_loop """lcwa""" +73 14 evaluator """rankbased""" +73 15 dataset """kinships""" +73 15 model """conve""" +73 15 loss """softplus""" +73 15 regularizer """no""" +73 15 optimizer """adadelta""" +73 15 training_loop """lcwa""" +73 15 evaluator """rankbased""" +73 16 dataset """kinships""" +73 16 model """conve""" +73 16 loss """softplus""" +73 16 regularizer """no""" +73 16 optimizer """adadelta""" +73 16 training_loop """lcwa""" +73 16 evaluator """rankbased""" +73 17 dataset """kinships""" +73 17 model """conve""" +73 17 loss """softplus""" +73 17 regularizer """no""" +73 17 optimizer """adadelta""" +73 17 training_loop """lcwa""" +73 17 evaluator """rankbased""" +73 18 dataset """kinships""" +73 18 model """conve""" +73 18 loss """softplus""" +73 18 regularizer """no""" +73 18 optimizer """adadelta""" +73 18 training_loop """lcwa""" +73 18 evaluator """rankbased""" +73 19 dataset """kinships""" +73 19 model """conve""" +73 19 loss """softplus""" +73 19 regularizer """no""" +73 19 optimizer """adadelta""" +73 19 training_loop """lcwa""" +73 19 evaluator """rankbased""" +73 20 dataset """kinships""" +73 20 model """conve""" +73 20 loss """softplus""" +73 20 regularizer """no""" +73 20 optimizer """adadelta""" +73 20 training_loop """lcwa""" +73 20 evaluator """rankbased""" +73 21 dataset """kinships""" +73 21 model """conve""" +73 21 loss """softplus""" +73 21 regularizer """no""" +73 21 optimizer """adadelta""" +73 21 training_loop """lcwa""" +73 21 evaluator """rankbased""" +73 22 dataset """kinships""" +73 22 model """conve""" +73 22 loss """softplus""" +73 22 regularizer """no""" +73 22 optimizer """adadelta""" +73 22 training_loop """lcwa""" +73 22 evaluator """rankbased""" +73 23 dataset """kinships""" +73 23 model """conve""" +73 23 loss """softplus""" +73 23 regularizer """no""" +73 23 optimizer """adadelta""" +73 23 training_loop """lcwa""" +73 23 evaluator """rankbased""" +73 24 dataset """kinships""" +73 24 model """conve""" +73 24 loss """softplus""" +73 24 regularizer """no""" +73 24 optimizer """adadelta""" +73 24 training_loop """lcwa""" +73 24 evaluator """rankbased""" +73 25 dataset """kinships""" +73 25 model """conve""" +73 25 loss """softplus""" +73 25 regularizer """no""" +73 25 optimizer """adadelta""" +73 25 training_loop """lcwa""" +73 25 evaluator """rankbased""" +73 26 dataset """kinships""" +73 26 model """conve""" +73 26 loss """softplus""" +73 26 regularizer """no""" +73 26 optimizer """adadelta""" +73 26 training_loop """lcwa""" +73 26 evaluator """rankbased""" +73 27 dataset """kinships""" +73 27 model """conve""" +73 27 loss """softplus""" +73 27 regularizer """no""" +73 27 optimizer """adadelta""" +73 27 training_loop """lcwa""" +73 27 evaluator """rankbased""" +73 28 dataset """kinships""" +73 28 model """conve""" +73 28 loss """softplus""" +73 28 regularizer """no""" +73 28 optimizer """adadelta""" +73 28 training_loop """lcwa""" +73 28 evaluator """rankbased""" +73 29 dataset """kinships""" +73 29 model """conve""" +73 29 loss """softplus""" +73 29 regularizer """no""" +73 29 optimizer """adadelta""" +73 29 training_loop """lcwa""" +73 29 evaluator """rankbased""" +73 30 dataset """kinships""" +73 30 model """conve""" +73 30 loss """softplus""" +73 30 regularizer """no""" +73 30 optimizer """adadelta""" +73 30 training_loop """lcwa""" +73 30 evaluator """rankbased""" +73 31 dataset """kinships""" +73 31 model """conve""" +73 31 loss """softplus""" +73 31 regularizer """no""" +73 31 optimizer """adadelta""" +73 31 training_loop """lcwa""" +73 31 evaluator """rankbased""" +73 32 dataset """kinships""" +73 32 model """conve""" +73 32 loss """softplus""" +73 32 regularizer """no""" +73 32 optimizer """adadelta""" +73 32 training_loop """lcwa""" +73 32 evaluator """rankbased""" +73 33 dataset """kinships""" +73 33 model """conve""" +73 33 loss """softplus""" +73 33 regularizer """no""" +73 33 optimizer """adadelta""" +73 33 training_loop """lcwa""" +73 33 evaluator """rankbased""" +73 34 dataset """kinships""" +73 34 model """conve""" +73 34 loss """softplus""" +73 34 regularizer """no""" +73 34 optimizer """adadelta""" +73 34 training_loop """lcwa""" +73 34 evaluator """rankbased""" +73 35 dataset """kinships""" +73 35 model """conve""" +73 35 loss """softplus""" +73 35 regularizer """no""" +73 35 optimizer """adadelta""" +73 35 training_loop """lcwa""" +73 35 evaluator """rankbased""" +73 36 dataset """kinships""" +73 36 model """conve""" +73 36 loss """softplus""" +73 36 regularizer """no""" +73 36 optimizer """adadelta""" +73 36 training_loop """lcwa""" +73 36 evaluator """rankbased""" +73 37 dataset """kinships""" +73 37 model """conve""" +73 37 loss """softplus""" +73 37 regularizer """no""" +73 37 optimizer """adadelta""" +73 37 training_loop """lcwa""" +73 37 evaluator """rankbased""" +73 38 dataset """kinships""" +73 38 model """conve""" +73 38 loss """softplus""" +73 38 regularizer """no""" +73 38 optimizer """adadelta""" +73 38 training_loop """lcwa""" +73 38 evaluator """rankbased""" +73 39 dataset """kinships""" +73 39 model """conve""" +73 39 loss """softplus""" +73 39 regularizer """no""" +73 39 optimizer """adadelta""" +73 39 training_loop """lcwa""" +73 39 evaluator """rankbased""" +73 40 dataset """kinships""" +73 40 model """conve""" +73 40 loss """softplus""" +73 40 regularizer """no""" +73 40 optimizer """adadelta""" +73 40 training_loop """lcwa""" +73 40 evaluator """rankbased""" +73 41 dataset """kinships""" +73 41 model """conve""" +73 41 loss """softplus""" +73 41 regularizer """no""" +73 41 optimizer """adadelta""" +73 41 training_loop """lcwa""" +73 41 evaluator """rankbased""" +73 42 dataset """kinships""" +73 42 model """conve""" +73 42 loss """softplus""" +73 42 regularizer """no""" +73 42 optimizer """adadelta""" +73 42 training_loop """lcwa""" +73 42 evaluator """rankbased""" +73 43 dataset """kinships""" +73 43 model """conve""" +73 43 loss """softplus""" +73 43 regularizer """no""" +73 43 optimizer """adadelta""" +73 43 training_loop """lcwa""" +73 43 evaluator """rankbased""" +73 44 dataset """kinships""" +73 44 model """conve""" +73 44 loss """softplus""" +73 44 regularizer """no""" +73 44 optimizer """adadelta""" +73 44 training_loop """lcwa""" +73 44 evaluator """rankbased""" +73 45 dataset """kinships""" +73 45 model """conve""" +73 45 loss """softplus""" +73 45 regularizer """no""" +73 45 optimizer """adadelta""" +73 45 training_loop """lcwa""" +73 45 evaluator """rankbased""" +73 46 dataset """kinships""" +73 46 model """conve""" +73 46 loss """softplus""" +73 46 regularizer """no""" +73 46 optimizer """adadelta""" +73 46 training_loop """lcwa""" +73 46 evaluator """rankbased""" +73 47 dataset """kinships""" +73 47 model """conve""" +73 47 loss """softplus""" +73 47 regularizer """no""" +73 47 optimizer """adadelta""" +73 47 training_loop """lcwa""" +73 47 evaluator """rankbased""" +73 48 dataset """kinships""" +73 48 model """conve""" +73 48 loss """softplus""" +73 48 regularizer """no""" +73 48 optimizer """adadelta""" +73 48 training_loop """lcwa""" +73 48 evaluator """rankbased""" +73 49 dataset """kinships""" +73 49 model """conve""" +73 49 loss """softplus""" +73 49 regularizer """no""" +73 49 optimizer """adadelta""" +73 49 training_loop """lcwa""" +73 49 evaluator """rankbased""" +73 50 dataset """kinships""" +73 50 model """conve""" +73 50 loss """softplus""" +73 50 regularizer """no""" +73 50 optimizer """adadelta""" +73 50 training_loop """lcwa""" +73 50 evaluator """rankbased""" +73 51 dataset """kinships""" +73 51 model """conve""" +73 51 loss """softplus""" +73 51 regularizer """no""" +73 51 optimizer """adadelta""" +73 51 training_loop """lcwa""" +73 51 evaluator """rankbased""" +73 52 dataset """kinships""" +73 52 model """conve""" +73 52 loss """softplus""" +73 52 regularizer """no""" +73 52 optimizer """adadelta""" +73 52 training_loop """lcwa""" +73 52 evaluator """rankbased""" +73 53 dataset """kinships""" +73 53 model """conve""" +73 53 loss """softplus""" +73 53 regularizer """no""" +73 53 optimizer """adadelta""" +73 53 training_loop """lcwa""" +73 53 evaluator """rankbased""" +73 54 dataset """kinships""" +73 54 model """conve""" +73 54 loss """softplus""" +73 54 regularizer """no""" +73 54 optimizer """adadelta""" +73 54 training_loop """lcwa""" +73 54 evaluator """rankbased""" +73 55 dataset """kinships""" +73 55 model """conve""" +73 55 loss """softplus""" +73 55 regularizer """no""" +73 55 optimizer """adadelta""" +73 55 training_loop """lcwa""" +73 55 evaluator """rankbased""" +73 56 dataset """kinships""" +73 56 model """conve""" +73 56 loss """softplus""" +73 56 regularizer """no""" +73 56 optimizer """adadelta""" +73 56 training_loop """lcwa""" +73 56 evaluator """rankbased""" +73 57 dataset """kinships""" +73 57 model """conve""" +73 57 loss """softplus""" +73 57 regularizer """no""" +73 57 optimizer """adadelta""" +73 57 training_loop """lcwa""" +73 57 evaluator """rankbased""" +73 58 dataset """kinships""" +73 58 model """conve""" +73 58 loss """softplus""" +73 58 regularizer """no""" +73 58 optimizer """adadelta""" +73 58 training_loop """lcwa""" +73 58 evaluator """rankbased""" +73 59 dataset """kinships""" +73 59 model """conve""" +73 59 loss """softplus""" +73 59 regularizer """no""" +73 59 optimizer """adadelta""" +73 59 training_loop """lcwa""" +73 59 evaluator """rankbased""" +73 60 dataset """kinships""" +73 60 model """conve""" +73 60 loss """softplus""" +73 60 regularizer """no""" +73 60 optimizer """adadelta""" +73 60 training_loop """lcwa""" +73 60 evaluator """rankbased""" +73 61 dataset """kinships""" +73 61 model """conve""" +73 61 loss """softplus""" +73 61 regularizer """no""" +73 61 optimizer """adadelta""" +73 61 training_loop """lcwa""" +73 61 evaluator """rankbased""" +73 62 dataset """kinships""" +73 62 model """conve""" +73 62 loss """softplus""" +73 62 regularizer """no""" +73 62 optimizer """adadelta""" +73 62 training_loop """lcwa""" +73 62 evaluator """rankbased""" +73 63 dataset """kinships""" +73 63 model """conve""" +73 63 loss """softplus""" +73 63 regularizer """no""" +73 63 optimizer """adadelta""" +73 63 training_loop """lcwa""" +73 63 evaluator """rankbased""" +73 64 dataset """kinships""" +73 64 model """conve""" +73 64 loss """softplus""" +73 64 regularizer """no""" +73 64 optimizer """adadelta""" +73 64 training_loop """lcwa""" +73 64 evaluator """rankbased""" +73 65 dataset """kinships""" +73 65 model """conve""" +73 65 loss """softplus""" +73 65 regularizer """no""" +73 65 optimizer """adadelta""" +73 65 training_loop """lcwa""" +73 65 evaluator """rankbased""" +73 66 dataset """kinships""" +73 66 model """conve""" +73 66 loss """softplus""" +73 66 regularizer """no""" +73 66 optimizer """adadelta""" +73 66 training_loop """lcwa""" +73 66 evaluator """rankbased""" +73 67 dataset """kinships""" +73 67 model """conve""" +73 67 loss """softplus""" +73 67 regularizer """no""" +73 67 optimizer """adadelta""" +73 67 training_loop """lcwa""" +73 67 evaluator """rankbased""" +73 68 dataset """kinships""" +73 68 model """conve""" +73 68 loss """softplus""" +73 68 regularizer """no""" +73 68 optimizer """adadelta""" +73 68 training_loop """lcwa""" +73 68 evaluator """rankbased""" +73 69 dataset """kinships""" +73 69 model """conve""" +73 69 loss """softplus""" +73 69 regularizer """no""" +73 69 optimizer """adadelta""" +73 69 training_loop """lcwa""" +73 69 evaluator """rankbased""" +73 70 dataset """kinships""" +73 70 model """conve""" +73 70 loss """softplus""" +73 70 regularizer """no""" +73 70 optimizer """adadelta""" +73 70 training_loop """lcwa""" +73 70 evaluator """rankbased""" +73 71 dataset """kinships""" +73 71 model """conve""" +73 71 loss """softplus""" +73 71 regularizer """no""" +73 71 optimizer """adadelta""" +73 71 training_loop """lcwa""" +73 71 evaluator """rankbased""" +73 72 dataset """kinships""" +73 72 model """conve""" +73 72 loss """softplus""" +73 72 regularizer """no""" +73 72 optimizer """adadelta""" +73 72 training_loop """lcwa""" +73 72 evaluator """rankbased""" +73 73 dataset """kinships""" +73 73 model """conve""" +73 73 loss """softplus""" +73 73 regularizer """no""" +73 73 optimizer """adadelta""" +73 73 training_loop """lcwa""" +73 73 evaluator """rankbased""" +73 74 dataset """kinships""" +73 74 model """conve""" +73 74 loss """softplus""" +73 74 regularizer """no""" +73 74 optimizer """adadelta""" +73 74 training_loop """lcwa""" +73 74 evaluator """rankbased""" +73 75 dataset """kinships""" +73 75 model """conve""" +73 75 loss """softplus""" +73 75 regularizer """no""" +73 75 optimizer """adadelta""" +73 75 training_loop """lcwa""" +73 75 evaluator """rankbased""" +73 76 dataset """kinships""" +73 76 model """conve""" +73 76 loss """softplus""" +73 76 regularizer """no""" +73 76 optimizer """adadelta""" +73 76 training_loop """lcwa""" +73 76 evaluator """rankbased""" +73 77 dataset """kinships""" +73 77 model """conve""" +73 77 loss """softplus""" +73 77 regularizer """no""" +73 77 optimizer """adadelta""" +73 77 training_loop """lcwa""" +73 77 evaluator """rankbased""" +73 78 dataset """kinships""" +73 78 model """conve""" +73 78 loss """softplus""" +73 78 regularizer """no""" +73 78 optimizer """adadelta""" +73 78 training_loop """lcwa""" +73 78 evaluator """rankbased""" +73 79 dataset """kinships""" +73 79 model """conve""" +73 79 loss """softplus""" +73 79 regularizer """no""" +73 79 optimizer """adadelta""" +73 79 training_loop """lcwa""" +73 79 evaluator """rankbased""" +73 80 dataset """kinships""" +73 80 model """conve""" +73 80 loss """softplus""" +73 80 regularizer """no""" +73 80 optimizer """adadelta""" +73 80 training_loop """lcwa""" +73 80 evaluator """rankbased""" +73 81 dataset """kinships""" +73 81 model """conve""" +73 81 loss """softplus""" +73 81 regularizer """no""" +73 81 optimizer """adadelta""" +73 81 training_loop """lcwa""" +73 81 evaluator """rankbased""" +73 82 dataset """kinships""" +73 82 model """conve""" +73 82 loss """softplus""" +73 82 regularizer """no""" +73 82 optimizer """adadelta""" +73 82 training_loop """lcwa""" +73 82 evaluator """rankbased""" +73 83 dataset """kinships""" +73 83 model """conve""" +73 83 loss """softplus""" +73 83 regularizer """no""" +73 83 optimizer """adadelta""" +73 83 training_loop """lcwa""" +73 83 evaluator """rankbased""" +73 84 dataset """kinships""" +73 84 model """conve""" +73 84 loss """softplus""" +73 84 regularizer """no""" +73 84 optimizer """adadelta""" +73 84 training_loop """lcwa""" +73 84 evaluator """rankbased""" +73 85 dataset """kinships""" +73 85 model """conve""" +73 85 loss """softplus""" +73 85 regularizer """no""" +73 85 optimizer """adadelta""" +73 85 training_loop """lcwa""" +73 85 evaluator """rankbased""" +73 86 dataset """kinships""" +73 86 model """conve""" +73 86 loss """softplus""" +73 86 regularizer """no""" +73 86 optimizer """adadelta""" +73 86 training_loop """lcwa""" +73 86 evaluator """rankbased""" +73 87 dataset """kinships""" +73 87 model """conve""" +73 87 loss """softplus""" +73 87 regularizer """no""" +73 87 optimizer """adadelta""" +73 87 training_loop """lcwa""" +73 87 evaluator """rankbased""" +73 88 dataset """kinships""" +73 88 model """conve""" +73 88 loss """softplus""" +73 88 regularizer """no""" +73 88 optimizer """adadelta""" +73 88 training_loop """lcwa""" +73 88 evaluator """rankbased""" +73 89 dataset """kinships""" +73 89 model """conve""" +73 89 loss """softplus""" +73 89 regularizer """no""" +73 89 optimizer """adadelta""" +73 89 training_loop """lcwa""" +73 89 evaluator """rankbased""" +73 90 dataset """kinships""" +73 90 model """conve""" +73 90 loss """softplus""" +73 90 regularizer """no""" +73 90 optimizer """adadelta""" +73 90 training_loop """lcwa""" +73 90 evaluator """rankbased""" +73 91 dataset """kinships""" +73 91 model """conve""" +73 91 loss """softplus""" +73 91 regularizer """no""" +73 91 optimizer """adadelta""" +73 91 training_loop """lcwa""" +73 91 evaluator """rankbased""" +73 92 dataset """kinships""" +73 92 model """conve""" +73 92 loss """softplus""" +73 92 regularizer """no""" +73 92 optimizer """adadelta""" +73 92 training_loop """lcwa""" +73 92 evaluator """rankbased""" +73 93 dataset """kinships""" +73 93 model """conve""" +73 93 loss """softplus""" +73 93 regularizer """no""" +73 93 optimizer """adadelta""" +73 93 training_loop """lcwa""" +73 93 evaluator """rankbased""" +73 94 dataset """kinships""" +73 94 model """conve""" +73 94 loss """softplus""" +73 94 regularizer """no""" +73 94 optimizer """adadelta""" +73 94 training_loop """lcwa""" +73 94 evaluator """rankbased""" +73 95 dataset """kinships""" +73 95 model """conve""" +73 95 loss """softplus""" +73 95 regularizer """no""" +73 95 optimizer """adadelta""" +73 95 training_loop """lcwa""" +73 95 evaluator """rankbased""" +73 96 dataset """kinships""" +73 96 model """conve""" +73 96 loss """softplus""" +73 96 regularizer """no""" +73 96 optimizer """adadelta""" +73 96 training_loop """lcwa""" +73 96 evaluator """rankbased""" +73 97 dataset """kinships""" +73 97 model """conve""" +73 97 loss """softplus""" +73 97 regularizer """no""" +73 97 optimizer """adadelta""" +73 97 training_loop """lcwa""" +73 97 evaluator """rankbased""" +73 98 dataset """kinships""" +73 98 model """conve""" +73 98 loss """softplus""" +73 98 regularizer """no""" +73 98 optimizer """adadelta""" +73 98 training_loop """lcwa""" +73 98 evaluator """rankbased""" +73 99 dataset """kinships""" +73 99 model """conve""" +73 99 loss """softplus""" +73 99 regularizer """no""" +73 99 optimizer """adadelta""" +73 99 training_loop """lcwa""" +73 99 evaluator """rankbased""" +73 100 dataset """kinships""" +73 100 model """conve""" +73 100 loss """softplus""" +73 100 regularizer """no""" +73 100 optimizer """adadelta""" +73 100 training_loop """lcwa""" +73 100 evaluator """rankbased""" +74 1 model.output_channels 58.0 +74 1 model.input_dropout 0.12738620711073595 +74 1 model.output_dropout 0.2953038669643527 +74 1 model.feature_map_dropout 0.40161248698140606 +74 1 model.embedding_dim 0.0 +74 1 negative_sampler.num_negs_per_pos 92.0 +74 1 training.batch_size 1.0 +74 2 model.output_channels 62.0 +74 2 model.input_dropout 0.24651563041635977 +74 2 model.output_dropout 0.028611267686365627 +74 2 model.feature_map_dropout 0.28038521896313906 +74 2 model.embedding_dim 1.0 +74 2 negative_sampler.num_negs_per_pos 75.0 +74 2 training.batch_size 0.0 +74 3 model.output_channels 53.0 +74 3 model.input_dropout 0.3112677390599534 +74 3 model.output_dropout 0.2704985088055194 +74 3 model.feature_map_dropout 0.4200616883756908 +74 3 model.embedding_dim 2.0 +74 3 negative_sampler.num_negs_per_pos 89.0 +74 3 training.batch_size 2.0 +74 4 model.output_channels 17.0 +74 4 model.input_dropout 0.016416014462812323 +74 4 model.output_dropout 0.28016005691124135 +74 4 model.feature_map_dropout 0.24590544854839919 +74 4 model.embedding_dim 0.0 +74 4 negative_sampler.num_negs_per_pos 44.0 +74 4 training.batch_size 0.0 +74 5 model.output_channels 28.0 +74 5 model.input_dropout 0.3779534956853442 +74 5 model.output_dropout 0.0626799934191199 +74 5 model.feature_map_dropout 0.07335916520892166 +74 5 model.embedding_dim 0.0 +74 5 negative_sampler.num_negs_per_pos 37.0 +74 5 training.batch_size 1.0 +74 6 model.output_channels 50.0 +74 6 model.input_dropout 0.18570409840742458 +74 6 model.output_dropout 0.471940078687106 +74 6 model.feature_map_dropout 0.27180842540821065 +74 6 model.embedding_dim 2.0 +74 6 negative_sampler.num_negs_per_pos 94.0 +74 6 training.batch_size 1.0 +74 7 model.output_channels 39.0 +74 7 model.input_dropout 0.4349767336758877 +74 7 model.output_dropout 0.2354002854960759 +74 7 model.feature_map_dropout 0.3432197924747677 +74 7 model.embedding_dim 2.0 +74 7 negative_sampler.num_negs_per_pos 16.0 +74 7 training.batch_size 0.0 +74 8 model.output_channels 46.0 +74 8 model.input_dropout 0.46037545807817026 +74 8 model.output_dropout 0.00215356130706712 +74 8 model.feature_map_dropout 0.1567977891536091 +74 8 model.embedding_dim 1.0 +74 8 negative_sampler.num_negs_per_pos 82.0 +74 8 training.batch_size 0.0 +74 9 model.output_channels 49.0 +74 9 model.input_dropout 0.28653892984932083 +74 9 model.output_dropout 0.043515393406039216 +74 9 model.feature_map_dropout 0.1404893751157476 +74 9 model.embedding_dim 2.0 +74 9 negative_sampler.num_negs_per_pos 89.0 +74 9 training.batch_size 2.0 +74 10 model.output_channels 38.0 +74 10 model.input_dropout 0.11080236618820777 +74 10 model.output_dropout 0.05017696394766907 +74 10 model.feature_map_dropout 0.4960210294177477 +74 10 model.embedding_dim 0.0 +74 10 negative_sampler.num_negs_per_pos 0.0 +74 10 training.batch_size 2.0 +74 11 model.output_channels 61.0 +74 11 model.input_dropout 0.3789774494673379 +74 11 model.output_dropout 0.4261171110758442 +74 11 model.feature_map_dropout 0.23350799103926762 +74 11 model.embedding_dim 1.0 +74 11 negative_sampler.num_negs_per_pos 13.0 +74 11 training.batch_size 0.0 +74 12 model.output_channels 18.0 +74 12 model.input_dropout 0.05195531950881199 +74 12 model.output_dropout 0.10904335176723362 +74 12 model.feature_map_dropout 0.31841855700015614 +74 12 model.embedding_dim 0.0 +74 12 negative_sampler.num_negs_per_pos 67.0 +74 12 training.batch_size 1.0 +74 13 model.output_channels 56.0 +74 13 model.input_dropout 0.13398373617309833 +74 13 model.output_dropout 0.12939794303281427 +74 13 model.feature_map_dropout 0.0959967795138536 +74 13 model.embedding_dim 1.0 +74 13 negative_sampler.num_negs_per_pos 73.0 +74 13 training.batch_size 0.0 +74 14 model.output_channels 43.0 +74 14 model.input_dropout 0.024677975959067733 +74 14 model.output_dropout 0.3030841884000866 +74 14 model.feature_map_dropout 0.05641803385615629 +74 14 model.embedding_dim 0.0 +74 14 negative_sampler.num_negs_per_pos 27.0 +74 14 training.batch_size 1.0 +74 15 model.output_channels 48.0 +74 15 model.input_dropout 0.18386553118228666 +74 15 model.output_dropout 0.0749748333390996 +74 15 model.feature_map_dropout 0.09324120287356041 +74 15 model.embedding_dim 2.0 +74 15 negative_sampler.num_negs_per_pos 84.0 +74 15 training.batch_size 2.0 +74 16 model.output_channels 29.0 +74 16 model.input_dropout 0.03203291217689053 +74 16 model.output_dropout 0.4718150027728983 +74 16 model.feature_map_dropout 0.06673326673207103 +74 16 model.embedding_dim 0.0 +74 16 negative_sampler.num_negs_per_pos 32.0 +74 16 training.batch_size 0.0 +74 17 model.output_channels 42.0 +74 17 model.input_dropout 0.13893783416889283 +74 17 model.output_dropout 0.12476418540490958 +74 17 model.feature_map_dropout 0.2504148903845996 +74 17 model.embedding_dim 0.0 +74 17 negative_sampler.num_negs_per_pos 11.0 +74 17 training.batch_size 0.0 +74 18 model.output_channels 24.0 +74 18 model.input_dropout 0.3567530692133036 +74 18 model.output_dropout 0.0873018686617042 +74 18 model.feature_map_dropout 0.4936655469132554 +74 18 model.embedding_dim 1.0 +74 18 negative_sampler.num_negs_per_pos 1.0 +74 18 training.batch_size 0.0 +74 19 model.output_channels 62.0 +74 19 model.input_dropout 0.34597435911432467 +74 19 model.output_dropout 0.28853102541756426 +74 19 model.feature_map_dropout 0.20778287819071362 +74 19 model.embedding_dim 1.0 +74 19 negative_sampler.num_negs_per_pos 74.0 +74 19 training.batch_size 0.0 +74 20 model.output_channels 43.0 +74 20 model.input_dropout 0.13772337001664237 +74 20 model.output_dropout 0.3372659326902419 +74 20 model.feature_map_dropout 0.30295497116781067 +74 20 model.embedding_dim 2.0 +74 20 negative_sampler.num_negs_per_pos 51.0 +74 20 training.batch_size 2.0 +74 21 model.output_channels 35.0 +74 21 model.input_dropout 0.07276838970938704 +74 21 model.output_dropout 0.4181828931383707 +74 21 model.feature_map_dropout 0.152389381881181 +74 21 model.embedding_dim 0.0 +74 21 negative_sampler.num_negs_per_pos 49.0 +74 21 training.batch_size 0.0 +74 22 model.output_channels 64.0 +74 22 model.input_dropout 0.11606405981594692 +74 22 model.output_dropout 0.48097236693113166 +74 22 model.feature_map_dropout 0.10779575054218443 +74 22 model.embedding_dim 2.0 +74 22 negative_sampler.num_negs_per_pos 56.0 +74 22 training.batch_size 0.0 +74 23 model.output_channels 43.0 +74 23 model.input_dropout 0.3915461701221697 +74 23 model.output_dropout 0.2530127820035174 +74 23 model.feature_map_dropout 0.08366491532723203 +74 23 model.embedding_dim 2.0 +74 23 negative_sampler.num_negs_per_pos 33.0 +74 23 training.batch_size 1.0 +74 24 model.output_channels 37.0 +74 24 model.input_dropout 0.3311389964245524 +74 24 model.output_dropout 0.45255423781984394 +74 24 model.feature_map_dropout 0.3564652137598998 +74 24 model.embedding_dim 1.0 +74 24 negative_sampler.num_negs_per_pos 16.0 +74 24 training.batch_size 1.0 +74 25 model.output_channels 26.0 +74 25 model.input_dropout 0.49918936835011374 +74 25 model.output_dropout 0.07502180494257216 +74 25 model.feature_map_dropout 0.09409661424749305 +74 25 model.embedding_dim 1.0 +74 25 negative_sampler.num_negs_per_pos 81.0 +74 25 training.batch_size 2.0 +74 26 model.output_channels 48.0 +74 26 model.input_dropout 0.014682267486511302 +74 26 model.output_dropout 0.32494714581820755 +74 26 model.feature_map_dropout 0.0770192485427898 +74 26 model.embedding_dim 1.0 +74 26 negative_sampler.num_negs_per_pos 40.0 +74 26 training.batch_size 2.0 +74 27 model.output_channels 54.0 +74 27 model.input_dropout 0.30733697360747697 +74 27 model.output_dropout 0.40917293191387927 +74 27 model.feature_map_dropout 0.46640302429528085 +74 27 model.embedding_dim 1.0 +74 27 negative_sampler.num_negs_per_pos 1.0 +74 27 training.batch_size 0.0 +74 28 model.output_channels 58.0 +74 28 model.input_dropout 0.4670791709491397 +74 28 model.output_dropout 0.30394144660633227 +74 28 model.feature_map_dropout 0.08805839074230781 +74 28 model.embedding_dim 2.0 +74 28 negative_sampler.num_negs_per_pos 15.0 +74 28 training.batch_size 0.0 +74 29 model.output_channels 18.0 +74 29 model.input_dropout 0.38424101192573323 +74 29 model.output_dropout 0.07204504053459665 +74 29 model.feature_map_dropout 0.11382621853556629 +74 29 model.embedding_dim 1.0 +74 29 negative_sampler.num_negs_per_pos 12.0 +74 29 training.batch_size 1.0 +74 30 model.output_channels 27.0 +74 30 model.input_dropout 0.4884612457577072 +74 30 model.output_dropout 0.350319405450032 +74 30 model.feature_map_dropout 0.10874714923703466 +74 30 model.embedding_dim 1.0 +74 30 negative_sampler.num_negs_per_pos 80.0 +74 30 training.batch_size 0.0 +74 31 model.output_channels 54.0 +74 31 model.input_dropout 0.40087106231730646 +74 31 model.output_dropout 0.3971202595557637 +74 31 model.feature_map_dropout 0.32026213422841165 +74 31 model.embedding_dim 0.0 +74 31 negative_sampler.num_negs_per_pos 22.0 +74 31 training.batch_size 1.0 +74 32 model.output_channels 50.0 +74 32 model.input_dropout 0.20891645822229837 +74 32 model.output_dropout 0.24854000428130818 +74 32 model.feature_map_dropout 0.24738873194127342 +74 32 model.embedding_dim 1.0 +74 32 negative_sampler.num_negs_per_pos 68.0 +74 32 training.batch_size 1.0 +74 33 model.output_channels 24.0 +74 33 model.input_dropout 0.4003619079430848 +74 33 model.output_dropout 0.04588988660922927 +74 33 model.feature_map_dropout 0.07764339414602489 +74 33 model.embedding_dim 2.0 +74 33 negative_sampler.num_negs_per_pos 82.0 +74 33 training.batch_size 1.0 +74 34 model.output_channels 18.0 +74 34 model.input_dropout 0.374692662892188 +74 34 model.output_dropout 0.24736212109336803 +74 34 model.feature_map_dropout 0.3210095985918677 +74 34 model.embedding_dim 0.0 +74 34 negative_sampler.num_negs_per_pos 3.0 +74 34 training.batch_size 2.0 +74 35 model.output_channels 25.0 +74 35 model.input_dropout 0.34062710044566197 +74 35 model.output_dropout 0.2858711324658833 +74 35 model.feature_map_dropout 0.11277411702888795 +74 35 model.embedding_dim 2.0 +74 35 negative_sampler.num_negs_per_pos 60.0 +74 35 training.batch_size 2.0 +74 36 model.output_channels 38.0 +74 36 model.input_dropout 0.08403951545087118 +74 36 model.output_dropout 0.16448622341534241 +74 36 model.feature_map_dropout 0.4671233321596922 +74 36 model.embedding_dim 2.0 +74 36 negative_sampler.num_negs_per_pos 22.0 +74 36 training.batch_size 0.0 +74 37 model.output_channels 17.0 +74 37 model.input_dropout 0.3857449230736628 +74 37 model.output_dropout 0.2838499537853068 +74 37 model.feature_map_dropout 0.19345129946207507 +74 37 model.embedding_dim 0.0 +74 37 negative_sampler.num_negs_per_pos 88.0 +74 37 training.batch_size 0.0 +74 38 model.output_channels 19.0 +74 38 model.input_dropout 0.3457281515070105 +74 38 model.output_dropout 0.049051400795222355 +74 38 model.feature_map_dropout 0.185988170506237 +74 38 model.embedding_dim 1.0 +74 38 negative_sampler.num_negs_per_pos 51.0 +74 38 training.batch_size 2.0 +74 39 model.output_channels 52.0 +74 39 model.input_dropout 0.13304910548369853 +74 39 model.output_dropout 0.027296065844681694 +74 39 model.feature_map_dropout 0.08590775294019726 +74 39 model.embedding_dim 2.0 +74 39 negative_sampler.num_negs_per_pos 89.0 +74 39 training.batch_size 2.0 +74 40 model.output_channels 25.0 +74 40 model.input_dropout 0.33277730321004184 +74 40 model.output_dropout 0.1221059657014742 +74 40 model.feature_map_dropout 0.33416271988312884 +74 40 model.embedding_dim 1.0 +74 40 negative_sampler.num_negs_per_pos 77.0 +74 40 training.batch_size 1.0 +74 41 model.output_channels 56.0 +74 41 model.input_dropout 0.05952124052350988 +74 41 model.output_dropout 0.3238697331255443 +74 41 model.feature_map_dropout 0.017631697897440735 +74 41 model.embedding_dim 2.0 +74 41 negative_sampler.num_negs_per_pos 24.0 +74 41 training.batch_size 2.0 +74 42 model.output_channels 31.0 +74 42 model.input_dropout 0.18198974336612106 +74 42 model.output_dropout 0.12565781048113306 +74 42 model.feature_map_dropout 0.029698226197093724 +74 42 model.embedding_dim 2.0 +74 42 negative_sampler.num_negs_per_pos 47.0 +74 42 training.batch_size 0.0 +74 43 model.output_channels 54.0 +74 43 model.input_dropout 0.30170151591933675 +74 43 model.output_dropout 0.3374148473133776 +74 43 model.feature_map_dropout 0.17428309425862265 +74 43 model.embedding_dim 0.0 +74 43 negative_sampler.num_negs_per_pos 50.0 +74 43 training.batch_size 0.0 +74 44 model.output_channels 36.0 +74 44 model.input_dropout 0.4112695131335026 +74 44 model.output_dropout 0.04434755395438866 +74 44 model.feature_map_dropout 0.1470871371726568 +74 44 model.embedding_dim 0.0 +74 44 negative_sampler.num_negs_per_pos 14.0 +74 44 training.batch_size 1.0 +74 45 model.output_channels 63.0 +74 45 model.input_dropout 0.32946246221139075 +74 45 model.output_dropout 0.3823345183334971 +74 45 model.feature_map_dropout 0.3185323997114688 +74 45 model.embedding_dim 1.0 +74 45 negative_sampler.num_negs_per_pos 40.0 +74 45 training.batch_size 0.0 +74 46 model.output_channels 50.0 +74 46 model.input_dropout 0.4896070419171827 +74 46 model.output_dropout 0.0018297000169643085 +74 46 model.feature_map_dropout 0.3481040779133627 +74 46 model.embedding_dim 2.0 +74 46 negative_sampler.num_negs_per_pos 33.0 +74 46 training.batch_size 0.0 +74 47 model.output_channels 63.0 +74 47 model.input_dropout 0.43228200184227533 +74 47 model.output_dropout 0.4400269467491394 +74 47 model.feature_map_dropout 0.14492263272018235 +74 47 model.embedding_dim 1.0 +74 47 negative_sampler.num_negs_per_pos 62.0 +74 47 training.batch_size 0.0 +74 48 model.output_channels 50.0 +74 48 model.input_dropout 0.2376930865992174 +74 48 model.output_dropout 0.024579469699373413 +74 48 model.feature_map_dropout 0.2906550713511014 +74 48 model.embedding_dim 1.0 +74 48 negative_sampler.num_negs_per_pos 81.0 +74 48 training.batch_size 0.0 +74 49 model.output_channels 39.0 +74 49 model.input_dropout 0.2977937564153963 +74 49 model.output_dropout 0.29719199056427953 +74 49 model.feature_map_dropout 0.30717332460431185 +74 49 model.embedding_dim 0.0 +74 49 negative_sampler.num_negs_per_pos 2.0 +74 49 training.batch_size 2.0 +74 50 model.output_channels 62.0 +74 50 model.input_dropout 0.3762374353012245 +74 50 model.output_dropout 0.0894656619689353 +74 50 model.feature_map_dropout 0.13729584472729028 +74 50 model.embedding_dim 2.0 +74 50 negative_sampler.num_negs_per_pos 91.0 +74 50 training.batch_size 0.0 +74 51 model.output_channels 25.0 +74 51 model.input_dropout 0.16394322955240304 +74 51 model.output_dropout 0.16006499915764244 +74 51 model.feature_map_dropout 0.3034312250951663 +74 51 model.embedding_dim 1.0 +74 51 negative_sampler.num_negs_per_pos 86.0 +74 51 training.batch_size 2.0 +74 52 model.output_channels 52.0 +74 52 model.input_dropout 0.3211338033861287 +74 52 model.output_dropout 0.1106934934960318 +74 52 model.feature_map_dropout 0.21952004883405635 +74 52 model.embedding_dim 0.0 +74 52 negative_sampler.num_negs_per_pos 48.0 +74 52 training.batch_size 0.0 +74 53 model.output_channels 47.0 +74 53 model.input_dropout 0.31694523655085605 +74 53 model.output_dropout 0.18962373134692767 +74 53 model.feature_map_dropout 0.12003787670449467 +74 53 model.embedding_dim 0.0 +74 53 negative_sampler.num_negs_per_pos 91.0 +74 53 training.batch_size 1.0 +74 1 dataset """kinships""" +74 1 model """conve""" +74 1 loss """bceaftersigmoid""" +74 1 regularizer """no""" +74 1 optimizer """adadelta""" +74 1 training_loop """owa""" +74 1 negative_sampler """basic""" +74 1 evaluator """rankbased""" +74 2 dataset """kinships""" +74 2 model """conve""" +74 2 loss """bceaftersigmoid""" +74 2 regularizer """no""" +74 2 optimizer """adadelta""" +74 2 training_loop """owa""" +74 2 negative_sampler """basic""" +74 2 evaluator """rankbased""" +74 3 dataset """kinships""" +74 3 model """conve""" +74 3 loss """bceaftersigmoid""" +74 3 regularizer """no""" +74 3 optimizer """adadelta""" +74 3 training_loop """owa""" +74 3 negative_sampler """basic""" +74 3 evaluator """rankbased""" +74 4 dataset """kinships""" +74 4 model """conve""" +74 4 loss """bceaftersigmoid""" +74 4 regularizer """no""" +74 4 optimizer """adadelta""" +74 4 training_loop """owa""" +74 4 negative_sampler """basic""" +74 4 evaluator """rankbased""" +74 5 dataset """kinships""" +74 5 model """conve""" +74 5 loss """bceaftersigmoid""" +74 5 regularizer """no""" +74 5 optimizer """adadelta""" +74 5 training_loop """owa""" +74 5 negative_sampler """basic""" +74 5 evaluator """rankbased""" +74 6 dataset """kinships""" +74 6 model """conve""" +74 6 loss """bceaftersigmoid""" +74 6 regularizer """no""" +74 6 optimizer """adadelta""" +74 6 training_loop """owa""" +74 6 negative_sampler """basic""" +74 6 evaluator """rankbased""" +74 7 dataset """kinships""" +74 7 model """conve""" +74 7 loss """bceaftersigmoid""" +74 7 regularizer """no""" +74 7 optimizer """adadelta""" +74 7 training_loop """owa""" +74 7 negative_sampler """basic""" +74 7 evaluator """rankbased""" +74 8 dataset """kinships""" +74 8 model """conve""" +74 8 loss """bceaftersigmoid""" +74 8 regularizer """no""" +74 8 optimizer """adadelta""" +74 8 training_loop """owa""" +74 8 negative_sampler """basic""" +74 8 evaluator """rankbased""" +74 9 dataset """kinships""" +74 9 model """conve""" +74 9 loss """bceaftersigmoid""" +74 9 regularizer """no""" +74 9 optimizer """adadelta""" +74 9 training_loop """owa""" +74 9 negative_sampler """basic""" +74 9 evaluator """rankbased""" +74 10 dataset """kinships""" +74 10 model """conve""" +74 10 loss """bceaftersigmoid""" +74 10 regularizer """no""" +74 10 optimizer """adadelta""" +74 10 training_loop """owa""" +74 10 negative_sampler """basic""" +74 10 evaluator """rankbased""" +74 11 dataset """kinships""" +74 11 model """conve""" +74 11 loss """bceaftersigmoid""" +74 11 regularizer """no""" +74 11 optimizer """adadelta""" +74 11 training_loop """owa""" +74 11 negative_sampler """basic""" +74 11 evaluator """rankbased""" +74 12 dataset """kinships""" +74 12 model """conve""" +74 12 loss """bceaftersigmoid""" +74 12 regularizer """no""" +74 12 optimizer """adadelta""" +74 12 training_loop """owa""" +74 12 negative_sampler """basic""" +74 12 evaluator """rankbased""" +74 13 dataset """kinships""" +74 13 model """conve""" +74 13 loss """bceaftersigmoid""" +74 13 regularizer """no""" +74 13 optimizer """adadelta""" +74 13 training_loop """owa""" +74 13 negative_sampler """basic""" +74 13 evaluator """rankbased""" +74 14 dataset """kinships""" +74 14 model """conve""" +74 14 loss """bceaftersigmoid""" +74 14 regularizer """no""" +74 14 optimizer """adadelta""" +74 14 training_loop """owa""" +74 14 negative_sampler """basic""" +74 14 evaluator """rankbased""" +74 15 dataset """kinships""" +74 15 model """conve""" +74 15 loss """bceaftersigmoid""" +74 15 regularizer """no""" +74 15 optimizer """adadelta""" +74 15 training_loop """owa""" +74 15 negative_sampler """basic""" +74 15 evaluator """rankbased""" +74 16 dataset """kinships""" +74 16 model """conve""" +74 16 loss """bceaftersigmoid""" +74 16 regularizer """no""" +74 16 optimizer """adadelta""" +74 16 training_loop """owa""" +74 16 negative_sampler """basic""" +74 16 evaluator """rankbased""" +74 17 dataset """kinships""" +74 17 model """conve""" +74 17 loss """bceaftersigmoid""" +74 17 regularizer """no""" +74 17 optimizer """adadelta""" +74 17 training_loop """owa""" +74 17 negative_sampler """basic""" +74 17 evaluator """rankbased""" +74 18 dataset """kinships""" +74 18 model """conve""" +74 18 loss """bceaftersigmoid""" +74 18 regularizer """no""" +74 18 optimizer """adadelta""" +74 18 training_loop """owa""" +74 18 negative_sampler """basic""" +74 18 evaluator """rankbased""" +74 19 dataset """kinships""" +74 19 model """conve""" +74 19 loss """bceaftersigmoid""" +74 19 regularizer """no""" +74 19 optimizer """adadelta""" +74 19 training_loop """owa""" +74 19 negative_sampler """basic""" +74 19 evaluator """rankbased""" +74 20 dataset """kinships""" +74 20 model """conve""" +74 20 loss """bceaftersigmoid""" +74 20 regularizer """no""" +74 20 optimizer """adadelta""" +74 20 training_loop """owa""" +74 20 negative_sampler """basic""" +74 20 evaluator """rankbased""" +74 21 dataset """kinships""" +74 21 model """conve""" +74 21 loss """bceaftersigmoid""" +74 21 regularizer """no""" +74 21 optimizer """adadelta""" +74 21 training_loop """owa""" +74 21 negative_sampler """basic""" +74 21 evaluator """rankbased""" +74 22 dataset """kinships""" +74 22 model """conve""" +74 22 loss """bceaftersigmoid""" +74 22 regularizer """no""" +74 22 optimizer """adadelta""" +74 22 training_loop """owa""" +74 22 negative_sampler """basic""" +74 22 evaluator """rankbased""" +74 23 dataset """kinships""" +74 23 model """conve""" +74 23 loss """bceaftersigmoid""" +74 23 regularizer """no""" +74 23 optimizer """adadelta""" +74 23 training_loop """owa""" +74 23 negative_sampler """basic""" +74 23 evaluator """rankbased""" +74 24 dataset """kinships""" +74 24 model """conve""" +74 24 loss """bceaftersigmoid""" +74 24 regularizer """no""" +74 24 optimizer """adadelta""" +74 24 training_loop """owa""" +74 24 negative_sampler """basic""" +74 24 evaluator """rankbased""" +74 25 dataset """kinships""" +74 25 model """conve""" +74 25 loss """bceaftersigmoid""" +74 25 regularizer """no""" +74 25 optimizer """adadelta""" +74 25 training_loop """owa""" +74 25 negative_sampler """basic""" +74 25 evaluator """rankbased""" +74 26 dataset """kinships""" +74 26 model """conve""" +74 26 loss """bceaftersigmoid""" +74 26 regularizer """no""" +74 26 optimizer """adadelta""" +74 26 training_loop """owa""" +74 26 negative_sampler """basic""" +74 26 evaluator """rankbased""" +74 27 dataset """kinships""" +74 27 model """conve""" +74 27 loss """bceaftersigmoid""" +74 27 regularizer """no""" +74 27 optimizer """adadelta""" +74 27 training_loop """owa""" +74 27 negative_sampler """basic""" +74 27 evaluator """rankbased""" +74 28 dataset """kinships""" +74 28 model """conve""" +74 28 loss """bceaftersigmoid""" +74 28 regularizer """no""" +74 28 optimizer """adadelta""" +74 28 training_loop """owa""" +74 28 negative_sampler """basic""" +74 28 evaluator """rankbased""" +74 29 dataset """kinships""" +74 29 model """conve""" +74 29 loss """bceaftersigmoid""" +74 29 regularizer """no""" +74 29 optimizer """adadelta""" +74 29 training_loop """owa""" +74 29 negative_sampler """basic""" +74 29 evaluator """rankbased""" +74 30 dataset """kinships""" +74 30 model """conve""" +74 30 loss """bceaftersigmoid""" +74 30 regularizer """no""" +74 30 optimizer """adadelta""" +74 30 training_loop """owa""" +74 30 negative_sampler """basic""" +74 30 evaluator """rankbased""" +74 31 dataset """kinships""" +74 31 model """conve""" +74 31 loss """bceaftersigmoid""" +74 31 regularizer """no""" +74 31 optimizer """adadelta""" +74 31 training_loop """owa""" +74 31 negative_sampler """basic""" +74 31 evaluator """rankbased""" +74 32 dataset """kinships""" +74 32 model """conve""" +74 32 loss """bceaftersigmoid""" +74 32 regularizer """no""" +74 32 optimizer """adadelta""" +74 32 training_loop """owa""" +74 32 negative_sampler """basic""" +74 32 evaluator """rankbased""" +74 33 dataset """kinships""" +74 33 model """conve""" +74 33 loss """bceaftersigmoid""" +74 33 regularizer """no""" +74 33 optimizer """adadelta""" +74 33 training_loop """owa""" +74 33 negative_sampler """basic""" +74 33 evaluator """rankbased""" +74 34 dataset """kinships""" +74 34 model """conve""" +74 34 loss """bceaftersigmoid""" +74 34 regularizer """no""" +74 34 optimizer """adadelta""" +74 34 training_loop """owa""" +74 34 negative_sampler """basic""" +74 34 evaluator """rankbased""" +74 35 dataset """kinships""" +74 35 model """conve""" +74 35 loss """bceaftersigmoid""" +74 35 regularizer """no""" +74 35 optimizer """adadelta""" +74 35 training_loop """owa""" +74 35 negative_sampler """basic""" +74 35 evaluator """rankbased""" +74 36 dataset """kinships""" +74 36 model """conve""" +74 36 loss """bceaftersigmoid""" +74 36 regularizer """no""" +74 36 optimizer """adadelta""" +74 36 training_loop """owa""" +74 36 negative_sampler """basic""" +74 36 evaluator """rankbased""" +74 37 dataset """kinships""" +74 37 model """conve""" +74 37 loss """bceaftersigmoid""" +74 37 regularizer """no""" +74 37 optimizer """adadelta""" +74 37 training_loop """owa""" +74 37 negative_sampler """basic""" +74 37 evaluator """rankbased""" +74 38 dataset """kinships""" +74 38 model """conve""" +74 38 loss """bceaftersigmoid""" +74 38 regularizer """no""" +74 38 optimizer """adadelta""" +74 38 training_loop """owa""" +74 38 negative_sampler """basic""" +74 38 evaluator """rankbased""" +74 39 dataset """kinships""" +74 39 model """conve""" +74 39 loss """bceaftersigmoid""" +74 39 regularizer """no""" +74 39 optimizer """adadelta""" +74 39 training_loop """owa""" +74 39 negative_sampler """basic""" +74 39 evaluator """rankbased""" +74 40 dataset """kinships""" +74 40 model """conve""" +74 40 loss """bceaftersigmoid""" +74 40 regularizer """no""" +74 40 optimizer """adadelta""" +74 40 training_loop """owa""" +74 40 negative_sampler """basic""" +74 40 evaluator """rankbased""" +74 41 dataset """kinships""" +74 41 model """conve""" +74 41 loss """bceaftersigmoid""" +74 41 regularizer """no""" +74 41 optimizer """adadelta""" +74 41 training_loop """owa""" +74 41 negative_sampler """basic""" +74 41 evaluator """rankbased""" +74 42 dataset """kinships""" +74 42 model """conve""" +74 42 loss """bceaftersigmoid""" +74 42 regularizer """no""" +74 42 optimizer """adadelta""" +74 42 training_loop """owa""" +74 42 negative_sampler """basic""" +74 42 evaluator """rankbased""" +74 43 dataset """kinships""" +74 43 model """conve""" +74 43 loss """bceaftersigmoid""" +74 43 regularizer """no""" +74 43 optimizer """adadelta""" +74 43 training_loop """owa""" +74 43 negative_sampler """basic""" +74 43 evaluator """rankbased""" +74 44 dataset """kinships""" +74 44 model """conve""" +74 44 loss """bceaftersigmoid""" +74 44 regularizer """no""" +74 44 optimizer """adadelta""" +74 44 training_loop """owa""" +74 44 negative_sampler """basic""" +74 44 evaluator """rankbased""" +74 45 dataset """kinships""" +74 45 model """conve""" +74 45 loss """bceaftersigmoid""" +74 45 regularizer """no""" +74 45 optimizer """adadelta""" +74 45 training_loop """owa""" +74 45 negative_sampler """basic""" +74 45 evaluator """rankbased""" +74 46 dataset """kinships""" +74 46 model """conve""" +74 46 loss """bceaftersigmoid""" +74 46 regularizer """no""" +74 46 optimizer """adadelta""" +74 46 training_loop """owa""" +74 46 negative_sampler """basic""" +74 46 evaluator """rankbased""" +74 47 dataset """kinships""" +74 47 model """conve""" +74 47 loss """bceaftersigmoid""" +74 47 regularizer """no""" +74 47 optimizer """adadelta""" +74 47 training_loop """owa""" +74 47 negative_sampler """basic""" +74 47 evaluator """rankbased""" +74 48 dataset """kinships""" +74 48 model """conve""" +74 48 loss """bceaftersigmoid""" +74 48 regularizer """no""" +74 48 optimizer """adadelta""" +74 48 training_loop """owa""" +74 48 negative_sampler """basic""" +74 48 evaluator """rankbased""" +74 49 dataset """kinships""" +74 49 model """conve""" +74 49 loss """bceaftersigmoid""" +74 49 regularizer """no""" +74 49 optimizer """adadelta""" +74 49 training_loop """owa""" +74 49 negative_sampler """basic""" +74 49 evaluator """rankbased""" +74 50 dataset """kinships""" +74 50 model """conve""" +74 50 loss """bceaftersigmoid""" +74 50 regularizer """no""" +74 50 optimizer """adadelta""" +74 50 training_loop """owa""" +74 50 negative_sampler """basic""" +74 50 evaluator """rankbased""" +74 51 dataset """kinships""" +74 51 model """conve""" +74 51 loss """bceaftersigmoid""" +74 51 regularizer """no""" +74 51 optimizer """adadelta""" +74 51 training_loop """owa""" +74 51 negative_sampler """basic""" +74 51 evaluator """rankbased""" +74 52 dataset """kinships""" +74 52 model """conve""" +74 52 loss """bceaftersigmoid""" +74 52 regularizer """no""" +74 52 optimizer """adadelta""" +74 52 training_loop """owa""" +74 52 negative_sampler """basic""" +74 52 evaluator """rankbased""" +74 53 dataset """kinships""" +74 53 model """conve""" +74 53 loss """bceaftersigmoid""" +74 53 regularizer """no""" +74 53 optimizer """adadelta""" +74 53 training_loop """owa""" +74 53 negative_sampler """basic""" +74 53 evaluator """rankbased""" +75 1 model.output_channels 38.0 +75 1 model.input_dropout 0.274516908302297 +75 1 model.output_dropout 0.18063362598028965 +75 1 model.feature_map_dropout 0.11039324367071651 +75 1 model.embedding_dim 1.0 +75 1 negative_sampler.num_negs_per_pos 72.0 +75 1 training.batch_size 1.0 +75 2 model.output_channels 58.0 +75 2 model.input_dropout 0.4577879417755018 +75 2 model.output_dropout 0.4253946412510977 +75 2 model.feature_map_dropout 0.2952954759006321 +75 2 model.embedding_dim 2.0 +75 2 negative_sampler.num_negs_per_pos 77.0 +75 2 training.batch_size 2.0 +75 3 model.output_channels 44.0 +75 3 model.input_dropout 0.13474233329508822 +75 3 model.output_dropout 0.2942618019062791 +75 3 model.feature_map_dropout 0.19613330223726633 +75 3 model.embedding_dim 2.0 +75 3 negative_sampler.num_negs_per_pos 63.0 +75 3 training.batch_size 2.0 +75 4 model.output_channels 40.0 +75 4 model.input_dropout 0.25985319753837394 +75 4 model.output_dropout 0.4683925959100405 +75 4 model.feature_map_dropout 0.0836992606189575 +75 4 model.embedding_dim 1.0 +75 4 negative_sampler.num_negs_per_pos 24.0 +75 4 training.batch_size 2.0 +75 5 model.output_channels 41.0 +75 5 model.input_dropout 0.19535714489919553 +75 5 model.output_dropout 0.3659535636504373 +75 5 model.feature_map_dropout 0.27961482940773896 +75 5 model.embedding_dim 0.0 +75 5 negative_sampler.num_negs_per_pos 32.0 +75 5 training.batch_size 2.0 +75 6 model.output_channels 51.0 +75 6 model.input_dropout 0.24097820575569995 +75 6 model.output_dropout 0.13467398982303652 +75 6 model.feature_map_dropout 0.11557756274821368 +75 6 model.embedding_dim 1.0 +75 6 negative_sampler.num_negs_per_pos 32.0 +75 6 training.batch_size 1.0 +75 7 model.output_channels 22.0 +75 7 model.input_dropout 0.24051314797671464 +75 7 model.output_dropout 0.30395013566921136 +75 7 model.feature_map_dropout 0.3301142272777999 +75 7 model.embedding_dim 0.0 +75 7 negative_sampler.num_negs_per_pos 88.0 +75 7 training.batch_size 1.0 +75 8 model.output_channels 60.0 +75 8 model.input_dropout 0.27704901857149267 +75 8 model.output_dropout 0.11497802415682679 +75 8 model.feature_map_dropout 0.1655766931693058 +75 8 model.embedding_dim 1.0 +75 8 negative_sampler.num_negs_per_pos 33.0 +75 8 training.batch_size 2.0 +75 9 model.output_channels 49.0 +75 9 model.input_dropout 0.42161765212901053 +75 9 model.output_dropout 0.47119166775208393 +75 9 model.feature_map_dropout 0.002943685691642939 +75 9 model.embedding_dim 2.0 +75 9 negative_sampler.num_negs_per_pos 4.0 +75 9 training.batch_size 2.0 +75 10 model.output_channels 62.0 +75 10 model.input_dropout 0.011048470792698606 +75 10 model.output_dropout 0.34645620432931146 +75 10 model.feature_map_dropout 0.1757191690348291 +75 10 model.embedding_dim 0.0 +75 10 negative_sampler.num_negs_per_pos 13.0 +75 10 training.batch_size 2.0 +75 11 model.output_channels 36.0 +75 11 model.input_dropout 0.1663989168214664 +75 11 model.output_dropout 0.35623204470951886 +75 11 model.feature_map_dropout 0.09338219462779912 +75 11 model.embedding_dim 0.0 +75 11 negative_sampler.num_negs_per_pos 85.0 +75 11 training.batch_size 2.0 +75 12 model.output_channels 41.0 +75 12 model.input_dropout 0.1292574622343326 +75 12 model.output_dropout 0.25242463792432557 +75 12 model.feature_map_dropout 0.4240112185055209 +75 12 model.embedding_dim 2.0 +75 12 negative_sampler.num_negs_per_pos 72.0 +75 12 training.batch_size 0.0 +75 13 model.output_channels 30.0 +75 13 model.input_dropout 0.19783601502264242 +75 13 model.output_dropout 0.4861753054894486 +75 13 model.feature_map_dropout 0.4744791564774507 +75 13 model.embedding_dim 2.0 +75 13 negative_sampler.num_negs_per_pos 65.0 +75 13 training.batch_size 1.0 +75 14 model.output_channels 18.0 +75 14 model.input_dropout 0.27503225616882804 +75 14 model.output_dropout 0.2183370748936127 +75 14 model.feature_map_dropout 0.23984863679209273 +75 14 model.embedding_dim 2.0 +75 14 negative_sampler.num_negs_per_pos 55.0 +75 14 training.batch_size 1.0 +75 15 model.output_channels 56.0 +75 15 model.input_dropout 0.25777950329824784 +75 15 model.output_dropout 0.022915272825113908 +75 15 model.feature_map_dropout 0.06928933578644264 +75 15 model.embedding_dim 0.0 +75 15 negative_sampler.num_negs_per_pos 67.0 +75 15 training.batch_size 0.0 +75 16 model.output_channels 62.0 +75 16 model.input_dropout 0.0466714801765199 +75 16 model.output_dropout 0.20328984092056784 +75 16 model.feature_map_dropout 0.4105648429018616 +75 16 model.embedding_dim 2.0 +75 16 negative_sampler.num_negs_per_pos 63.0 +75 16 training.batch_size 1.0 +75 17 model.output_channels 56.0 +75 17 model.input_dropout 0.11212985949015425 +75 17 model.output_dropout 0.489177299368081 +75 17 model.feature_map_dropout 0.4278377022601261 +75 17 model.embedding_dim 0.0 +75 17 negative_sampler.num_negs_per_pos 13.0 +75 17 training.batch_size 1.0 +75 18 model.output_channels 40.0 +75 18 model.input_dropout 0.02949290566881546 +75 18 model.output_dropout 0.4350305327156905 +75 18 model.feature_map_dropout 0.33645748929106956 +75 18 model.embedding_dim 1.0 +75 18 negative_sampler.num_negs_per_pos 43.0 +75 18 training.batch_size 2.0 +75 19 model.output_channels 40.0 +75 19 model.input_dropout 0.11153638612812528 +75 19 model.output_dropout 0.19977691019730726 +75 19 model.feature_map_dropout 0.2124441513518912 +75 19 model.embedding_dim 1.0 +75 19 negative_sampler.num_negs_per_pos 7.0 +75 19 training.batch_size 1.0 +75 20 model.output_channels 59.0 +75 20 model.input_dropout 0.29257721242555257 +75 20 model.output_dropout 0.34593873554559534 +75 20 model.feature_map_dropout 0.23620642566458538 +75 20 model.embedding_dim 2.0 +75 20 negative_sampler.num_negs_per_pos 77.0 +75 20 training.batch_size 1.0 +75 21 model.output_channels 56.0 +75 21 model.input_dropout 0.2653577344166213 +75 21 model.output_dropout 0.02512035302255866 +75 21 model.feature_map_dropout 0.34122325288342226 +75 21 model.embedding_dim 0.0 +75 21 negative_sampler.num_negs_per_pos 45.0 +75 21 training.batch_size 2.0 +75 22 model.output_channels 63.0 +75 22 model.input_dropout 0.011021879452644212 +75 22 model.output_dropout 0.3956756703511123 +75 22 model.feature_map_dropout 0.25518380323937767 +75 22 model.embedding_dim 1.0 +75 22 negative_sampler.num_negs_per_pos 12.0 +75 22 training.batch_size 2.0 +75 23 model.output_channels 30.0 +75 23 model.input_dropout 0.034881375168280326 +75 23 model.output_dropout 0.36645480358643945 +75 23 model.feature_map_dropout 0.3723365448604672 +75 23 model.embedding_dim 2.0 +75 23 negative_sampler.num_negs_per_pos 79.0 +75 23 training.batch_size 0.0 +75 24 model.output_channels 55.0 +75 24 model.input_dropout 0.4526879449225269 +75 24 model.output_dropout 0.14499310098374513 +75 24 model.feature_map_dropout 0.005298329859884099 +75 24 model.embedding_dim 2.0 +75 24 negative_sampler.num_negs_per_pos 94.0 +75 24 training.batch_size 1.0 +75 25 model.output_channels 41.0 +75 25 model.input_dropout 0.3158641423899717 +75 25 model.output_dropout 0.07584258469588168 +75 25 model.feature_map_dropout 0.25487619185801813 +75 25 model.embedding_dim 1.0 +75 25 negative_sampler.num_negs_per_pos 62.0 +75 25 training.batch_size 2.0 +75 26 model.output_channels 34.0 +75 26 model.input_dropout 0.3684067789348932 +75 26 model.output_dropout 0.007935970139266668 +75 26 model.feature_map_dropout 0.20109343190838075 +75 26 model.embedding_dim 1.0 +75 26 negative_sampler.num_negs_per_pos 64.0 +75 26 training.batch_size 2.0 +75 27 model.output_channels 38.0 +75 27 model.input_dropout 0.26585828720640703 +75 27 model.output_dropout 0.31531125327232146 +75 27 model.feature_map_dropout 0.16265715617236348 +75 27 model.embedding_dim 0.0 +75 27 negative_sampler.num_negs_per_pos 31.0 +75 27 training.batch_size 0.0 +75 28 model.output_channels 33.0 +75 28 model.input_dropout 0.3792386699985528 +75 28 model.output_dropout 0.4739294581552849 +75 28 model.feature_map_dropout 0.3363317465456893 +75 28 model.embedding_dim 2.0 +75 28 negative_sampler.num_negs_per_pos 50.0 +75 28 training.batch_size 1.0 +75 29 model.output_channels 42.0 +75 29 model.input_dropout 0.15064623264061128 +75 29 model.output_dropout 0.4287490330647968 +75 29 model.feature_map_dropout 0.21772545926216014 +75 29 model.embedding_dim 0.0 +75 29 negative_sampler.num_negs_per_pos 35.0 +75 29 training.batch_size 2.0 +75 30 model.output_channels 49.0 +75 30 model.input_dropout 0.3451058692858358 +75 30 model.output_dropout 0.026925413826392408 +75 30 model.feature_map_dropout 0.1018684435297858 +75 30 model.embedding_dim 1.0 +75 30 negative_sampler.num_negs_per_pos 59.0 +75 30 training.batch_size 0.0 +75 31 model.output_channels 29.0 +75 31 model.input_dropout 0.4513446718405021 +75 31 model.output_dropout 0.40254697549662144 +75 31 model.feature_map_dropout 0.0862575650253824 +75 31 model.embedding_dim 0.0 +75 31 negative_sampler.num_negs_per_pos 32.0 +75 31 training.batch_size 2.0 +75 32 model.output_channels 62.0 +75 32 model.input_dropout 0.030869212654995926 +75 32 model.output_dropout 0.033676992739467726 +75 32 model.feature_map_dropout 0.13868133728926413 +75 32 model.embedding_dim 2.0 +75 32 negative_sampler.num_negs_per_pos 15.0 +75 32 training.batch_size 0.0 +75 33 model.output_channels 45.0 +75 33 model.input_dropout 0.3307611409896378 +75 33 model.output_dropout 0.1971615028278988 +75 33 model.feature_map_dropout 0.2384344819782045 +75 33 model.embedding_dim 2.0 +75 33 negative_sampler.num_negs_per_pos 51.0 +75 33 training.batch_size 2.0 +75 34 model.output_channels 33.0 +75 34 model.input_dropout 0.3149126627762699 +75 34 model.output_dropout 0.4654130780656955 +75 34 model.feature_map_dropout 0.0724011452674847 +75 34 model.embedding_dim 2.0 +75 34 negative_sampler.num_negs_per_pos 19.0 +75 34 training.batch_size 0.0 +75 35 model.output_channels 63.0 +75 35 model.input_dropout 0.32796312464789096 +75 35 model.output_dropout 0.030130329213895468 +75 35 model.feature_map_dropout 0.03737396501686624 +75 35 model.embedding_dim 1.0 +75 35 negative_sampler.num_negs_per_pos 7.0 +75 35 training.batch_size 2.0 +75 36 model.output_channels 23.0 +75 36 model.input_dropout 0.46199752097362445 +75 36 model.output_dropout 0.16360162865448785 +75 36 model.feature_map_dropout 0.3847542545218979 +75 36 model.embedding_dim 1.0 +75 36 negative_sampler.num_negs_per_pos 11.0 +75 36 training.batch_size 2.0 +75 37 model.output_channels 48.0 +75 37 model.input_dropout 0.17681403599567647 +75 37 model.output_dropout 0.34870907801542433 +75 37 model.feature_map_dropout 0.46760107919281924 +75 37 model.embedding_dim 1.0 +75 37 negative_sampler.num_negs_per_pos 67.0 +75 37 training.batch_size 1.0 +75 38 model.output_channels 34.0 +75 38 model.input_dropout 0.30639157165129305 +75 38 model.output_dropout 0.44538496184601967 +75 38 model.feature_map_dropout 0.32180354325976057 +75 38 model.embedding_dim 1.0 +75 38 negative_sampler.num_negs_per_pos 67.0 +75 38 training.batch_size 2.0 +75 39 model.output_channels 21.0 +75 39 model.input_dropout 0.06388775032368466 +75 39 model.output_dropout 0.14121630467775143 +75 39 model.feature_map_dropout 0.4978969758044246 +75 39 model.embedding_dim 0.0 +75 39 negative_sampler.num_negs_per_pos 79.0 +75 39 training.batch_size 0.0 +75 40 model.output_channels 20.0 +75 40 model.input_dropout 0.11423690326335673 +75 40 model.output_dropout 0.3908145062712931 +75 40 model.feature_map_dropout 0.28717920671475367 +75 40 model.embedding_dim 2.0 +75 40 negative_sampler.num_negs_per_pos 42.0 +75 40 training.batch_size 1.0 +75 41 model.output_channels 58.0 +75 41 model.input_dropout 0.4902657283103054 +75 41 model.output_dropout 0.020171482995767986 +75 41 model.feature_map_dropout 0.15837359639247767 +75 41 model.embedding_dim 1.0 +75 41 negative_sampler.num_negs_per_pos 10.0 +75 41 training.batch_size 2.0 +75 42 model.output_channels 61.0 +75 42 model.input_dropout 0.08647363669809732 +75 42 model.output_dropout 0.11661011315313463 +75 42 model.feature_map_dropout 0.14884331875917023 +75 42 model.embedding_dim 2.0 +75 42 negative_sampler.num_negs_per_pos 35.0 +75 42 training.batch_size 0.0 +75 43 model.output_channels 41.0 +75 43 model.input_dropout 0.34584688101599914 +75 43 model.output_dropout 0.13509725693214253 +75 43 model.feature_map_dropout 0.315483495793823 +75 43 model.embedding_dim 0.0 +75 43 negative_sampler.num_negs_per_pos 85.0 +75 43 training.batch_size 0.0 +75 44 model.output_channels 34.0 +75 44 model.input_dropout 0.31077300088182613 +75 44 model.output_dropout 0.22921451150615013 +75 44 model.feature_map_dropout 0.37115542417296316 +75 44 model.embedding_dim 1.0 +75 44 negative_sampler.num_negs_per_pos 69.0 +75 44 training.batch_size 1.0 +75 45 model.output_channels 39.0 +75 45 model.input_dropout 0.33178387025807116 +75 45 model.output_dropout 0.3285883884833523 +75 45 model.feature_map_dropout 0.0013071281670705703 +75 45 model.embedding_dim 2.0 +75 45 negative_sampler.num_negs_per_pos 13.0 +75 45 training.batch_size 1.0 +75 46 model.output_channels 47.0 +75 46 model.input_dropout 0.20103719500455058 +75 46 model.output_dropout 0.43667087903925617 +75 46 model.feature_map_dropout 0.46546076590807084 +75 46 model.embedding_dim 2.0 +75 46 negative_sampler.num_negs_per_pos 84.0 +75 46 training.batch_size 2.0 +75 47 model.output_channels 38.0 +75 47 model.input_dropout 0.07413072089665174 +75 47 model.output_dropout 0.41370004508820113 +75 47 model.feature_map_dropout 0.1919410395504232 +75 47 model.embedding_dim 0.0 +75 47 negative_sampler.num_negs_per_pos 44.0 +75 47 training.batch_size 2.0 +75 48 model.output_channels 61.0 +75 48 model.input_dropout 0.24956255453502119 +75 48 model.output_dropout 0.001120882404359258 +75 48 model.feature_map_dropout 0.36700068123132645 +75 48 model.embedding_dim 1.0 +75 48 negative_sampler.num_negs_per_pos 94.0 +75 48 training.batch_size 2.0 +75 49 model.output_channels 44.0 +75 49 model.input_dropout 0.15496607040270094 +75 49 model.output_dropout 0.18833824123762333 +75 49 model.feature_map_dropout 0.18742840228738233 +75 49 model.embedding_dim 0.0 +75 49 negative_sampler.num_negs_per_pos 21.0 +75 49 training.batch_size 0.0 +75 50 model.output_channels 34.0 +75 50 model.input_dropout 0.3696337174130702 +75 50 model.output_dropout 0.2561516192104748 +75 50 model.feature_map_dropout 0.4176786308588411 +75 50 model.embedding_dim 0.0 +75 50 negative_sampler.num_negs_per_pos 23.0 +75 50 training.batch_size 1.0 +75 51 model.output_channels 60.0 +75 51 model.input_dropout 0.4058266484374217 +75 51 model.output_dropout 0.47432222561861687 +75 51 model.feature_map_dropout 0.0331511696285009 +75 51 model.embedding_dim 2.0 +75 51 negative_sampler.num_negs_per_pos 39.0 +75 51 training.batch_size 0.0 +75 52 model.output_channels 56.0 +75 52 model.input_dropout 0.029757498340759025 +75 52 model.output_dropout 0.10358204908868635 +75 52 model.feature_map_dropout 0.44009602132583064 +75 52 model.embedding_dim 0.0 +75 52 negative_sampler.num_negs_per_pos 71.0 +75 52 training.batch_size 1.0 +75 53 model.output_channels 26.0 +75 53 model.input_dropout 0.0008384784909468146 +75 53 model.output_dropout 0.437375643055724 +75 53 model.feature_map_dropout 0.3648887165605453 +75 53 model.embedding_dim 2.0 +75 53 negative_sampler.num_negs_per_pos 70.0 +75 53 training.batch_size 0.0 +75 54 model.output_channels 24.0 +75 54 model.input_dropout 0.12494823981667164 +75 54 model.output_dropout 0.4604094562730238 +75 54 model.feature_map_dropout 0.36557764158163575 +75 54 model.embedding_dim 2.0 +75 54 negative_sampler.num_negs_per_pos 19.0 +75 54 training.batch_size 1.0 +75 55 model.output_channels 62.0 +75 55 model.input_dropout 0.36387304704843954 +75 55 model.output_dropout 0.40163667631527195 +75 55 model.feature_map_dropout 0.43926184032322707 +75 55 model.embedding_dim 2.0 +75 55 negative_sampler.num_negs_per_pos 89.0 +75 55 training.batch_size 0.0 +75 1 dataset """kinships""" +75 1 model """conve""" +75 1 loss """softplus""" +75 1 regularizer """no""" +75 1 optimizer """adadelta""" +75 1 training_loop """owa""" +75 1 negative_sampler """basic""" +75 1 evaluator """rankbased""" +75 2 dataset """kinships""" +75 2 model """conve""" +75 2 loss """softplus""" +75 2 regularizer """no""" +75 2 optimizer """adadelta""" +75 2 training_loop """owa""" +75 2 negative_sampler """basic""" +75 2 evaluator """rankbased""" +75 3 dataset """kinships""" +75 3 model """conve""" +75 3 loss """softplus""" +75 3 regularizer """no""" +75 3 optimizer """adadelta""" +75 3 training_loop """owa""" +75 3 negative_sampler """basic""" +75 3 evaluator """rankbased""" +75 4 dataset """kinships""" +75 4 model """conve""" +75 4 loss """softplus""" +75 4 regularizer """no""" +75 4 optimizer """adadelta""" +75 4 training_loop """owa""" +75 4 negative_sampler """basic""" +75 4 evaluator """rankbased""" +75 5 dataset """kinships""" +75 5 model """conve""" +75 5 loss """softplus""" +75 5 regularizer """no""" +75 5 optimizer """adadelta""" +75 5 training_loop """owa""" +75 5 negative_sampler """basic""" +75 5 evaluator """rankbased""" +75 6 dataset """kinships""" +75 6 model """conve""" +75 6 loss """softplus""" +75 6 regularizer """no""" +75 6 optimizer """adadelta""" +75 6 training_loop """owa""" +75 6 negative_sampler """basic""" +75 6 evaluator """rankbased""" +75 7 dataset """kinships""" +75 7 model """conve""" +75 7 loss """softplus""" +75 7 regularizer """no""" +75 7 optimizer """adadelta""" +75 7 training_loop """owa""" +75 7 negative_sampler """basic""" +75 7 evaluator """rankbased""" +75 8 dataset """kinships""" +75 8 model """conve""" +75 8 loss """softplus""" +75 8 regularizer """no""" +75 8 optimizer """adadelta""" +75 8 training_loop """owa""" +75 8 negative_sampler """basic""" +75 8 evaluator """rankbased""" +75 9 dataset """kinships""" +75 9 model """conve""" +75 9 loss """softplus""" +75 9 regularizer """no""" +75 9 optimizer """adadelta""" +75 9 training_loop """owa""" +75 9 negative_sampler """basic""" +75 9 evaluator """rankbased""" +75 10 dataset """kinships""" +75 10 model """conve""" +75 10 loss """softplus""" +75 10 regularizer """no""" +75 10 optimizer """adadelta""" +75 10 training_loop """owa""" +75 10 negative_sampler """basic""" +75 10 evaluator """rankbased""" +75 11 dataset """kinships""" +75 11 model """conve""" +75 11 loss """softplus""" +75 11 regularizer """no""" +75 11 optimizer """adadelta""" +75 11 training_loop """owa""" +75 11 negative_sampler """basic""" +75 11 evaluator """rankbased""" +75 12 dataset """kinships""" +75 12 model """conve""" +75 12 loss """softplus""" +75 12 regularizer """no""" +75 12 optimizer """adadelta""" +75 12 training_loop """owa""" +75 12 negative_sampler """basic""" +75 12 evaluator """rankbased""" +75 13 dataset """kinships""" +75 13 model """conve""" +75 13 loss """softplus""" +75 13 regularizer """no""" +75 13 optimizer """adadelta""" +75 13 training_loop """owa""" +75 13 negative_sampler """basic""" +75 13 evaluator """rankbased""" +75 14 dataset """kinships""" +75 14 model """conve""" +75 14 loss """softplus""" +75 14 regularizer """no""" +75 14 optimizer """adadelta""" +75 14 training_loop """owa""" +75 14 negative_sampler """basic""" +75 14 evaluator """rankbased""" +75 15 dataset """kinships""" +75 15 model """conve""" +75 15 loss """softplus""" +75 15 regularizer """no""" +75 15 optimizer """adadelta""" +75 15 training_loop """owa""" +75 15 negative_sampler """basic""" +75 15 evaluator """rankbased""" +75 16 dataset """kinships""" +75 16 model """conve""" +75 16 loss """softplus""" +75 16 regularizer """no""" +75 16 optimizer """adadelta""" +75 16 training_loop """owa""" +75 16 negative_sampler """basic""" +75 16 evaluator """rankbased""" +75 17 dataset """kinships""" +75 17 model """conve""" +75 17 loss """softplus""" +75 17 regularizer """no""" +75 17 optimizer """adadelta""" +75 17 training_loop """owa""" +75 17 negative_sampler """basic""" +75 17 evaluator """rankbased""" +75 18 dataset """kinships""" +75 18 model """conve""" +75 18 loss """softplus""" +75 18 regularizer """no""" +75 18 optimizer """adadelta""" +75 18 training_loop """owa""" +75 18 negative_sampler """basic""" +75 18 evaluator """rankbased""" +75 19 dataset """kinships""" +75 19 model """conve""" +75 19 loss """softplus""" +75 19 regularizer """no""" +75 19 optimizer """adadelta""" +75 19 training_loop """owa""" +75 19 negative_sampler """basic""" +75 19 evaluator """rankbased""" +75 20 dataset """kinships""" +75 20 model """conve""" +75 20 loss """softplus""" +75 20 regularizer """no""" +75 20 optimizer """adadelta""" +75 20 training_loop """owa""" +75 20 negative_sampler """basic""" +75 20 evaluator """rankbased""" +75 21 dataset """kinships""" +75 21 model """conve""" +75 21 loss """softplus""" +75 21 regularizer """no""" +75 21 optimizer """adadelta""" +75 21 training_loop """owa""" +75 21 negative_sampler """basic""" +75 21 evaluator """rankbased""" +75 22 dataset """kinships""" +75 22 model """conve""" +75 22 loss """softplus""" +75 22 regularizer """no""" +75 22 optimizer """adadelta""" +75 22 training_loop """owa""" +75 22 negative_sampler """basic""" +75 22 evaluator """rankbased""" +75 23 dataset """kinships""" +75 23 model """conve""" +75 23 loss """softplus""" +75 23 regularizer """no""" +75 23 optimizer """adadelta""" +75 23 training_loop """owa""" +75 23 negative_sampler """basic""" +75 23 evaluator """rankbased""" +75 24 dataset """kinships""" +75 24 model """conve""" +75 24 loss """softplus""" +75 24 regularizer """no""" +75 24 optimizer """adadelta""" +75 24 training_loop """owa""" +75 24 negative_sampler """basic""" +75 24 evaluator """rankbased""" +75 25 dataset """kinships""" +75 25 model """conve""" +75 25 loss """softplus""" +75 25 regularizer """no""" +75 25 optimizer """adadelta""" +75 25 training_loop """owa""" +75 25 negative_sampler """basic""" +75 25 evaluator """rankbased""" +75 26 dataset """kinships""" +75 26 model """conve""" +75 26 loss """softplus""" +75 26 regularizer """no""" +75 26 optimizer """adadelta""" +75 26 training_loop """owa""" +75 26 negative_sampler """basic""" +75 26 evaluator """rankbased""" +75 27 dataset """kinships""" +75 27 model """conve""" +75 27 loss """softplus""" +75 27 regularizer """no""" +75 27 optimizer """adadelta""" +75 27 training_loop """owa""" +75 27 negative_sampler """basic""" +75 27 evaluator """rankbased""" +75 28 dataset """kinships""" +75 28 model """conve""" +75 28 loss """softplus""" +75 28 regularizer """no""" +75 28 optimizer """adadelta""" +75 28 training_loop """owa""" +75 28 negative_sampler """basic""" +75 28 evaluator """rankbased""" +75 29 dataset """kinships""" +75 29 model """conve""" +75 29 loss """softplus""" +75 29 regularizer """no""" +75 29 optimizer """adadelta""" +75 29 training_loop """owa""" +75 29 negative_sampler """basic""" +75 29 evaluator """rankbased""" +75 30 dataset """kinships""" +75 30 model """conve""" +75 30 loss """softplus""" +75 30 regularizer """no""" +75 30 optimizer """adadelta""" +75 30 training_loop """owa""" +75 30 negative_sampler """basic""" +75 30 evaluator """rankbased""" +75 31 dataset """kinships""" +75 31 model """conve""" +75 31 loss """softplus""" +75 31 regularizer """no""" +75 31 optimizer """adadelta""" +75 31 training_loop """owa""" +75 31 negative_sampler """basic""" +75 31 evaluator """rankbased""" +75 32 dataset """kinships""" +75 32 model """conve""" +75 32 loss """softplus""" +75 32 regularizer """no""" +75 32 optimizer """adadelta""" +75 32 training_loop """owa""" +75 32 negative_sampler """basic""" +75 32 evaluator """rankbased""" +75 33 dataset """kinships""" +75 33 model """conve""" +75 33 loss """softplus""" +75 33 regularizer """no""" +75 33 optimizer """adadelta""" +75 33 training_loop """owa""" +75 33 negative_sampler """basic""" +75 33 evaluator """rankbased""" +75 34 dataset """kinships""" +75 34 model """conve""" +75 34 loss """softplus""" +75 34 regularizer """no""" +75 34 optimizer """adadelta""" +75 34 training_loop """owa""" +75 34 negative_sampler """basic""" +75 34 evaluator """rankbased""" +75 35 dataset """kinships""" +75 35 model """conve""" +75 35 loss """softplus""" +75 35 regularizer """no""" +75 35 optimizer """adadelta""" +75 35 training_loop """owa""" +75 35 negative_sampler """basic""" +75 35 evaluator """rankbased""" +75 36 dataset """kinships""" +75 36 model """conve""" +75 36 loss """softplus""" +75 36 regularizer """no""" +75 36 optimizer """adadelta""" +75 36 training_loop """owa""" +75 36 negative_sampler """basic""" +75 36 evaluator """rankbased""" +75 37 dataset """kinships""" +75 37 model """conve""" +75 37 loss """softplus""" +75 37 regularizer """no""" +75 37 optimizer """adadelta""" +75 37 training_loop """owa""" +75 37 negative_sampler """basic""" +75 37 evaluator """rankbased""" +75 38 dataset """kinships""" +75 38 model """conve""" +75 38 loss """softplus""" +75 38 regularizer """no""" +75 38 optimizer """adadelta""" +75 38 training_loop """owa""" +75 38 negative_sampler """basic""" +75 38 evaluator """rankbased""" +75 39 dataset """kinships""" +75 39 model """conve""" +75 39 loss """softplus""" +75 39 regularizer """no""" +75 39 optimizer """adadelta""" +75 39 training_loop """owa""" +75 39 negative_sampler """basic""" +75 39 evaluator """rankbased""" +75 40 dataset """kinships""" +75 40 model """conve""" +75 40 loss """softplus""" +75 40 regularizer """no""" +75 40 optimizer """adadelta""" +75 40 training_loop """owa""" +75 40 negative_sampler """basic""" +75 40 evaluator """rankbased""" +75 41 dataset """kinships""" +75 41 model """conve""" +75 41 loss """softplus""" +75 41 regularizer """no""" +75 41 optimizer """adadelta""" +75 41 training_loop """owa""" +75 41 negative_sampler """basic""" +75 41 evaluator """rankbased""" +75 42 dataset """kinships""" +75 42 model """conve""" +75 42 loss """softplus""" +75 42 regularizer """no""" +75 42 optimizer """adadelta""" +75 42 training_loop """owa""" +75 42 negative_sampler """basic""" +75 42 evaluator """rankbased""" +75 43 dataset """kinships""" +75 43 model """conve""" +75 43 loss """softplus""" +75 43 regularizer """no""" +75 43 optimizer """adadelta""" +75 43 training_loop """owa""" +75 43 negative_sampler """basic""" +75 43 evaluator """rankbased""" +75 44 dataset """kinships""" +75 44 model """conve""" +75 44 loss """softplus""" +75 44 regularizer """no""" +75 44 optimizer """adadelta""" +75 44 training_loop """owa""" +75 44 negative_sampler """basic""" +75 44 evaluator """rankbased""" +75 45 dataset """kinships""" +75 45 model """conve""" +75 45 loss """softplus""" +75 45 regularizer """no""" +75 45 optimizer """adadelta""" +75 45 training_loop """owa""" +75 45 negative_sampler """basic""" +75 45 evaluator """rankbased""" +75 46 dataset """kinships""" +75 46 model """conve""" +75 46 loss """softplus""" +75 46 regularizer """no""" +75 46 optimizer """adadelta""" +75 46 training_loop """owa""" +75 46 negative_sampler """basic""" +75 46 evaluator """rankbased""" +75 47 dataset """kinships""" +75 47 model """conve""" +75 47 loss """softplus""" +75 47 regularizer """no""" +75 47 optimizer """adadelta""" +75 47 training_loop """owa""" +75 47 negative_sampler """basic""" +75 47 evaluator """rankbased""" +75 48 dataset """kinships""" +75 48 model """conve""" +75 48 loss """softplus""" +75 48 regularizer """no""" +75 48 optimizer """adadelta""" +75 48 training_loop """owa""" +75 48 negative_sampler """basic""" +75 48 evaluator """rankbased""" +75 49 dataset """kinships""" +75 49 model """conve""" +75 49 loss """softplus""" +75 49 regularizer """no""" +75 49 optimizer """adadelta""" +75 49 training_loop """owa""" +75 49 negative_sampler """basic""" +75 49 evaluator """rankbased""" +75 50 dataset """kinships""" +75 50 model """conve""" +75 50 loss """softplus""" +75 50 regularizer """no""" +75 50 optimizer """adadelta""" +75 50 training_loop """owa""" +75 50 negative_sampler """basic""" +75 50 evaluator """rankbased""" +75 51 dataset """kinships""" +75 51 model """conve""" +75 51 loss """softplus""" +75 51 regularizer """no""" +75 51 optimizer """adadelta""" +75 51 training_loop """owa""" +75 51 negative_sampler """basic""" +75 51 evaluator """rankbased""" +75 52 dataset """kinships""" +75 52 model """conve""" +75 52 loss """softplus""" +75 52 regularizer """no""" +75 52 optimizer """adadelta""" +75 52 training_loop """owa""" +75 52 negative_sampler """basic""" +75 52 evaluator """rankbased""" +75 53 dataset """kinships""" +75 53 model """conve""" +75 53 loss """softplus""" +75 53 regularizer """no""" +75 53 optimizer """adadelta""" +75 53 training_loop """owa""" +75 53 negative_sampler """basic""" +75 53 evaluator """rankbased""" +75 54 dataset """kinships""" +75 54 model """conve""" +75 54 loss """softplus""" +75 54 regularizer """no""" +75 54 optimizer """adadelta""" +75 54 training_loop """owa""" +75 54 negative_sampler """basic""" +75 54 evaluator """rankbased""" +75 55 dataset """kinships""" +75 55 model """conve""" +75 55 loss """softplus""" +75 55 regularizer """no""" +75 55 optimizer """adadelta""" +75 55 training_loop """owa""" +75 55 negative_sampler """basic""" +75 55 evaluator """rankbased""" +76 1 model.output_channels 58.0 +76 1 model.input_dropout 0.33041347182638436 +76 1 model.output_dropout 0.4599542702626834 +76 1 model.feature_map_dropout 0.0791710159934706 +76 1 model.embedding_dim 2.0 +76 1 negative_sampler.num_negs_per_pos 94.0 +76 1 training.batch_size 0.0 +76 2 model.output_channels 63.0 +76 2 model.input_dropout 0.3963428948833117 +76 2 model.output_dropout 0.06173499256524484 +76 2 model.feature_map_dropout 0.3056810655006535 +76 2 model.embedding_dim 2.0 +76 2 negative_sampler.num_negs_per_pos 58.0 +76 2 training.batch_size 1.0 +76 3 model.output_channels 18.0 +76 3 model.input_dropout 0.29438614065258295 +76 3 model.output_dropout 0.40185180985055324 +76 3 model.feature_map_dropout 0.2838667271745797 +76 3 model.embedding_dim 2.0 +76 3 negative_sampler.num_negs_per_pos 75.0 +76 3 training.batch_size 2.0 +76 4 model.output_channels 64.0 +76 4 model.input_dropout 0.17796275814891516 +76 4 model.output_dropout 0.4606622471597409 +76 4 model.feature_map_dropout 0.22478089194079698 +76 4 model.embedding_dim 2.0 +76 4 negative_sampler.num_negs_per_pos 17.0 +76 4 training.batch_size 1.0 +76 5 model.output_channels 63.0 +76 5 model.input_dropout 0.16291351845090524 +76 5 model.output_dropout 0.3852469513211422 +76 5 model.feature_map_dropout 0.2445966864256215 +76 5 model.embedding_dim 1.0 +76 5 negative_sampler.num_negs_per_pos 17.0 +76 5 training.batch_size 2.0 +76 6 model.output_channels 59.0 +76 6 model.input_dropout 0.11443855625864813 +76 6 model.output_dropout 0.1019571591007602 +76 6 model.feature_map_dropout 0.4168688768003689 +76 6 model.embedding_dim 2.0 +76 6 negative_sampler.num_negs_per_pos 70.0 +76 6 training.batch_size 1.0 +76 7 model.output_channels 35.0 +76 7 model.input_dropout 0.16909632389283175 +76 7 model.output_dropout 0.2770417273331071 +76 7 model.feature_map_dropout 0.1875396395256163 +76 7 model.embedding_dim 2.0 +76 7 negative_sampler.num_negs_per_pos 8.0 +76 7 training.batch_size 0.0 +76 8 model.output_channels 23.0 +76 8 model.input_dropout 0.46983507766927685 +76 8 model.output_dropout 0.4835229179082478 +76 8 model.feature_map_dropout 0.3542980210079785 +76 8 model.embedding_dim 2.0 +76 8 negative_sampler.num_negs_per_pos 8.0 +76 8 training.batch_size 1.0 +76 9 model.output_channels 51.0 +76 9 model.input_dropout 0.0946258034758487 +76 9 model.output_dropout 0.2938965636232522 +76 9 model.feature_map_dropout 0.27209442662667627 +76 9 model.embedding_dim 0.0 +76 9 negative_sampler.num_negs_per_pos 46.0 +76 9 training.batch_size 2.0 +76 10 model.output_channels 28.0 +76 10 model.input_dropout 0.42901281429264404 +76 10 model.output_dropout 0.03324729118814951 +76 10 model.feature_map_dropout 0.03795267733914004 +76 10 model.embedding_dim 0.0 +76 10 negative_sampler.num_negs_per_pos 83.0 +76 10 training.batch_size 2.0 +76 11 model.output_channels 30.0 +76 11 model.input_dropout 0.13982341570800666 +76 11 model.output_dropout 0.09063100629519533 +76 11 model.feature_map_dropout 0.18291667499824393 +76 11 model.embedding_dim 0.0 +76 11 negative_sampler.num_negs_per_pos 37.0 +76 11 training.batch_size 1.0 +76 12 model.output_channels 36.0 +76 12 model.input_dropout 0.14555800564441895 +76 12 model.output_dropout 0.26643568884665475 +76 12 model.feature_map_dropout 0.17053641182422075 +76 12 model.embedding_dim 2.0 +76 12 negative_sampler.num_negs_per_pos 56.0 +76 12 training.batch_size 2.0 +76 13 model.output_channels 52.0 +76 13 model.input_dropout 0.21281366156977138 +76 13 model.output_dropout 0.40081472080000186 +76 13 model.feature_map_dropout 0.19376024939614417 +76 13 model.embedding_dim 0.0 +76 13 negative_sampler.num_negs_per_pos 99.0 +76 13 training.batch_size 1.0 +76 14 model.output_channels 61.0 +76 14 model.input_dropout 0.38879331468055406 +76 14 model.output_dropout 0.3208893078637652 +76 14 model.feature_map_dropout 0.37597312158683177 +76 14 model.embedding_dim 1.0 +76 14 negative_sampler.num_negs_per_pos 13.0 +76 14 training.batch_size 0.0 +76 15 model.output_channels 39.0 +76 15 model.input_dropout 0.3435957090419295 +76 15 model.output_dropout 0.3954207807621609 +76 15 model.feature_map_dropout 0.39054935455544626 +76 15 model.embedding_dim 2.0 +76 15 negative_sampler.num_negs_per_pos 72.0 +76 15 training.batch_size 2.0 +76 16 model.output_channels 25.0 +76 16 model.input_dropout 0.2690832765909762 +76 16 model.output_dropout 0.1796771061467673 +76 16 model.feature_map_dropout 0.48638088962125675 +76 16 model.embedding_dim 2.0 +76 16 negative_sampler.num_negs_per_pos 3.0 +76 16 training.batch_size 1.0 +76 17 model.output_channels 16.0 +76 17 model.input_dropout 0.24987740953144838 +76 17 model.output_dropout 0.39151871830068335 +76 17 model.feature_map_dropout 0.36861201039186725 +76 17 model.embedding_dim 0.0 +76 17 negative_sampler.num_negs_per_pos 20.0 +76 17 training.batch_size 0.0 +76 18 model.output_channels 22.0 +76 18 model.input_dropout 0.41203049483610477 +76 18 model.output_dropout 0.3246204750011037 +76 18 model.feature_map_dropout 0.12555244161927298 +76 18 model.embedding_dim 1.0 +76 18 negative_sampler.num_negs_per_pos 29.0 +76 18 training.batch_size 0.0 +76 19 model.output_channels 30.0 +76 19 model.input_dropout 0.41582263632922445 +76 19 model.output_dropout 0.2366546078896843 +76 19 model.feature_map_dropout 0.03258383420896288 +76 19 model.embedding_dim 2.0 +76 19 negative_sampler.num_negs_per_pos 6.0 +76 19 training.batch_size 2.0 +76 20 model.output_channels 29.0 +76 20 model.input_dropout 0.0511939233078228 +76 20 model.output_dropout 0.3325250996927205 +76 20 model.feature_map_dropout 0.44411095846572357 +76 20 model.embedding_dim 2.0 +76 20 negative_sampler.num_negs_per_pos 94.0 +76 20 training.batch_size 1.0 +76 21 model.output_channels 48.0 +76 21 model.input_dropout 0.31590762985190307 +76 21 model.output_dropout 0.48884666056467396 +76 21 model.feature_map_dropout 0.48875396102179686 +76 21 model.embedding_dim 2.0 +76 21 negative_sampler.num_negs_per_pos 21.0 +76 21 training.batch_size 2.0 +76 22 model.output_channels 54.0 +76 22 model.input_dropout 0.42361354762657066 +76 22 model.output_dropout 0.3798862704819048 +76 22 model.feature_map_dropout 0.26111496921102173 +76 22 model.embedding_dim 0.0 +76 22 negative_sampler.num_negs_per_pos 16.0 +76 22 training.batch_size 1.0 +76 23 model.output_channels 60.0 +76 23 model.input_dropout 0.10583514544737305 +76 23 model.output_dropout 0.22865506667740754 +76 23 model.feature_map_dropout 0.014718583784843087 +76 23 model.embedding_dim 0.0 +76 23 negative_sampler.num_negs_per_pos 65.0 +76 23 training.batch_size 1.0 +76 24 model.output_channels 42.0 +76 24 model.input_dropout 0.043726297321109275 +76 24 model.output_dropout 0.13863350379936262 +76 24 model.feature_map_dropout 0.03654344510053953 +76 24 model.embedding_dim 2.0 +76 24 negative_sampler.num_negs_per_pos 72.0 +76 24 training.batch_size 2.0 +76 25 model.output_channels 53.0 +76 25 model.input_dropout 0.3492438792293655 +76 25 model.output_dropout 0.4586748445549972 +76 25 model.feature_map_dropout 0.17402695716740846 +76 25 model.embedding_dim 0.0 +76 25 negative_sampler.num_negs_per_pos 61.0 +76 25 training.batch_size 1.0 +76 26 model.output_channels 58.0 +76 26 model.input_dropout 0.39675269950104136 +76 26 model.output_dropout 0.1380671864632666 +76 26 model.feature_map_dropout 0.2992599823296568 +76 26 model.embedding_dim 2.0 +76 26 negative_sampler.num_negs_per_pos 80.0 +76 26 training.batch_size 0.0 +76 27 model.output_channels 62.0 +76 27 model.input_dropout 0.15065837078271987 +76 27 model.output_dropout 0.08199119244287134 +76 27 model.feature_map_dropout 0.42599942242989447 +76 27 model.embedding_dim 0.0 +76 27 negative_sampler.num_negs_per_pos 59.0 +76 27 training.batch_size 1.0 +76 28 model.output_channels 48.0 +76 28 model.input_dropout 0.12102067168301467 +76 28 model.output_dropout 0.05701617938658615 +76 28 model.feature_map_dropout 0.3844758194381853 +76 28 model.embedding_dim 0.0 +76 28 negative_sampler.num_negs_per_pos 58.0 +76 28 training.batch_size 2.0 +76 29 model.output_channels 59.0 +76 29 model.input_dropout 0.037456672857929973 +76 29 model.output_dropout 0.34884571088256294 +76 29 model.feature_map_dropout 0.21395897558053428 +76 29 model.embedding_dim 1.0 +76 29 negative_sampler.num_negs_per_pos 43.0 +76 29 training.batch_size 1.0 +76 30 model.output_channels 29.0 +76 30 model.input_dropout 0.46933585430379954 +76 30 model.output_dropout 0.16859562529904887 +76 30 model.feature_map_dropout 0.33087195899938227 +76 30 model.embedding_dim 0.0 +76 30 negative_sampler.num_negs_per_pos 25.0 +76 30 training.batch_size 1.0 +76 31 model.output_channels 44.0 +76 31 model.input_dropout 0.23264991992677558 +76 31 model.output_dropout 0.4209252343440033 +76 31 model.feature_map_dropout 0.07012378091828692 +76 31 model.embedding_dim 0.0 +76 31 negative_sampler.num_negs_per_pos 60.0 +76 31 training.batch_size 2.0 +76 32 model.output_channels 37.0 +76 32 model.input_dropout 0.017183519236050993 +76 32 model.output_dropout 0.47306620756819984 +76 32 model.feature_map_dropout 0.3074205880522263 +76 32 model.embedding_dim 1.0 +76 32 negative_sampler.num_negs_per_pos 88.0 +76 32 training.batch_size 0.0 +76 33 model.output_channels 54.0 +76 33 model.input_dropout 0.25842981773143675 +76 33 model.output_dropout 0.0286668636183679 +76 33 model.feature_map_dropout 0.29583906950983785 +76 33 model.embedding_dim 1.0 +76 33 negative_sampler.num_negs_per_pos 68.0 +76 33 training.batch_size 0.0 +76 34 model.output_channels 17.0 +76 34 model.input_dropout 0.3832766854278333 +76 34 model.output_dropout 0.023739459588720146 +76 34 model.feature_map_dropout 0.25044380608248684 +76 34 model.embedding_dim 2.0 +76 34 negative_sampler.num_negs_per_pos 60.0 +76 34 training.batch_size 2.0 +76 35 model.output_channels 43.0 +76 35 model.input_dropout 0.48858116625255665 +76 35 model.output_dropout 0.37065963115552164 +76 35 model.feature_map_dropout 0.39218964514154925 +76 35 model.embedding_dim 0.0 +76 35 negative_sampler.num_negs_per_pos 0.0 +76 35 training.batch_size 1.0 +76 36 model.output_channels 28.0 +76 36 model.input_dropout 0.023065364364452723 +76 36 model.output_dropout 0.4613485570738617 +76 36 model.feature_map_dropout 0.10665893185664027 +76 36 model.embedding_dim 1.0 +76 36 negative_sampler.num_negs_per_pos 10.0 +76 36 training.batch_size 2.0 +76 37 model.output_channels 17.0 +76 37 model.input_dropout 0.33436080634304194 +76 37 model.output_dropout 0.4859644596095876 +76 37 model.feature_map_dropout 0.017300408091262764 +76 37 model.embedding_dim 1.0 +76 37 negative_sampler.num_negs_per_pos 90.0 +76 37 training.batch_size 2.0 +76 38 model.output_channels 19.0 +76 38 model.input_dropout 0.349902895297973 +76 38 model.output_dropout 0.017925308341259216 +76 38 model.feature_map_dropout 0.12949339600719129 +76 38 model.embedding_dim 0.0 +76 38 negative_sampler.num_negs_per_pos 97.0 +76 38 training.batch_size 2.0 +76 39 model.output_channels 32.0 +76 39 model.input_dropout 0.18839306626341484 +76 39 model.output_dropout 0.15140089659726003 +76 39 model.feature_map_dropout 0.40673259566107695 +76 39 model.embedding_dim 2.0 +76 39 negative_sampler.num_negs_per_pos 10.0 +76 39 training.batch_size 2.0 +76 40 model.output_channels 34.0 +76 40 model.input_dropout 0.009588831352620009 +76 40 model.output_dropout 0.09017477402254381 +76 40 model.feature_map_dropout 0.4026221881982098 +76 40 model.embedding_dim 0.0 +76 40 negative_sampler.num_negs_per_pos 88.0 +76 40 training.batch_size 2.0 +76 41 model.output_channels 43.0 +76 41 model.input_dropout 0.05417789858326866 +76 41 model.output_dropout 0.4367953824229362 +76 41 model.feature_map_dropout 0.4870460314389562 +76 41 model.embedding_dim 0.0 +76 41 negative_sampler.num_negs_per_pos 67.0 +76 41 training.batch_size 2.0 +76 42 model.output_channels 50.0 +76 42 model.input_dropout 0.09703422799045747 +76 42 model.output_dropout 0.28064758381479854 +76 42 model.feature_map_dropout 0.4152531787362288 +76 42 model.embedding_dim 2.0 +76 42 negative_sampler.num_negs_per_pos 40.0 +76 42 training.batch_size 2.0 +76 43 model.output_channels 55.0 +76 43 model.input_dropout 0.46842490735523007 +76 43 model.output_dropout 0.286497683522705 +76 43 model.feature_map_dropout 0.0316239440139468 +76 43 model.embedding_dim 1.0 +76 43 negative_sampler.num_negs_per_pos 97.0 +76 43 training.batch_size 1.0 +76 44 model.output_channels 36.0 +76 44 model.input_dropout 0.46137021979923987 +76 44 model.output_dropout 0.36600524939079826 +76 44 model.feature_map_dropout 0.44310839951610115 +76 44 model.embedding_dim 0.0 +76 44 negative_sampler.num_negs_per_pos 11.0 +76 44 training.batch_size 2.0 +76 45 model.output_channels 43.0 +76 45 model.input_dropout 0.0168592355412312 +76 45 model.output_dropout 0.2899067936269213 +76 45 model.feature_map_dropout 0.3742605497667387 +76 45 model.embedding_dim 1.0 +76 45 negative_sampler.num_negs_per_pos 79.0 +76 45 training.batch_size 2.0 +76 46 model.output_channels 26.0 +76 46 model.input_dropout 0.00047824141209013016 +76 46 model.output_dropout 0.016707162869922187 +76 46 model.feature_map_dropout 0.40314676935065186 +76 46 model.embedding_dim 1.0 +76 46 negative_sampler.num_negs_per_pos 54.0 +76 46 training.batch_size 0.0 +76 47 model.output_channels 48.0 +76 47 model.input_dropout 0.3355689677152885 +76 47 model.output_dropout 0.08145922203468697 +76 47 model.feature_map_dropout 0.17789716463344413 +76 47 model.embedding_dim 2.0 +76 47 negative_sampler.num_negs_per_pos 91.0 +76 47 training.batch_size 2.0 +76 48 model.output_channels 44.0 +76 48 model.input_dropout 0.4756062608395288 +76 48 model.output_dropout 0.4564863035380607 +76 48 model.feature_map_dropout 0.4841303630285943 +76 48 model.embedding_dim 0.0 +76 48 negative_sampler.num_negs_per_pos 22.0 +76 48 training.batch_size 1.0 +76 49 model.output_channels 49.0 +76 49 model.input_dropout 0.4721690274256737 +76 49 model.output_dropout 0.3439402745153221 +76 49 model.feature_map_dropout 0.3854658867549087 +76 49 model.embedding_dim 2.0 +76 49 negative_sampler.num_negs_per_pos 80.0 +76 49 training.batch_size 2.0 +76 50 model.output_channels 57.0 +76 50 model.input_dropout 0.4202607694507751 +76 50 model.output_dropout 0.00478348303822107 +76 50 model.feature_map_dropout 0.021075007173011195 +76 50 model.embedding_dim 1.0 +76 50 negative_sampler.num_negs_per_pos 90.0 +76 50 training.batch_size 1.0 +76 51 model.output_channels 64.0 +76 51 model.input_dropout 0.3093805903444972 +76 51 model.output_dropout 0.04764314159763544 +76 51 model.feature_map_dropout 0.29995885042110887 +76 51 model.embedding_dim 0.0 +76 51 negative_sampler.num_negs_per_pos 81.0 +76 51 training.batch_size 0.0 +76 52 model.output_channels 54.0 +76 52 model.input_dropout 0.48443827847901205 +76 52 model.output_dropout 0.22628780776154422 +76 52 model.feature_map_dropout 0.05879977084084481 +76 52 model.embedding_dim 0.0 +76 52 negative_sampler.num_negs_per_pos 23.0 +76 52 training.batch_size 0.0 +76 53 model.output_channels 37.0 +76 53 model.input_dropout 0.49617426883111426 +76 53 model.output_dropout 0.4243348580385284 +76 53 model.feature_map_dropout 0.4438403936478009 +76 53 model.embedding_dim 1.0 +76 53 negative_sampler.num_negs_per_pos 69.0 +76 53 training.batch_size 0.0 +76 54 model.output_channels 31.0 +76 54 model.input_dropout 0.19770440242153936 +76 54 model.output_dropout 0.25131230483336964 +76 54 model.feature_map_dropout 0.3513434065177411 +76 54 model.embedding_dim 2.0 +76 54 negative_sampler.num_negs_per_pos 70.0 +76 54 training.batch_size 0.0 +76 55 model.output_channels 50.0 +76 55 model.input_dropout 0.005902061657174995 +76 55 model.output_dropout 0.08396474542396964 +76 55 model.feature_map_dropout 0.03975168354924824 +76 55 model.embedding_dim 0.0 +76 55 negative_sampler.num_negs_per_pos 28.0 +76 55 training.batch_size 0.0 +76 56 model.output_channels 28.0 +76 56 model.input_dropout 0.33750853500688893 +76 56 model.output_dropout 0.09013609926551203 +76 56 model.feature_map_dropout 0.4018383769939104 +76 56 model.embedding_dim 2.0 +76 56 negative_sampler.num_negs_per_pos 40.0 +76 56 training.batch_size 1.0 +76 57 model.output_channels 33.0 +76 57 model.input_dropout 0.3952040961810033 +76 57 model.output_dropout 0.4071795642395477 +76 57 model.feature_map_dropout 0.2788220018917051 +76 57 model.embedding_dim 2.0 +76 57 negative_sampler.num_negs_per_pos 80.0 +76 57 training.batch_size 2.0 +76 58 model.output_channels 27.0 +76 58 model.input_dropout 0.22733361818713044 +76 58 model.output_dropout 0.0024371927330762433 +76 58 model.feature_map_dropout 0.39822973241635845 +76 58 model.embedding_dim 0.0 +76 58 negative_sampler.num_negs_per_pos 46.0 +76 58 training.batch_size 0.0 +76 59 model.output_channels 47.0 +76 59 model.input_dropout 0.2588399009434698 +76 59 model.output_dropout 0.21475939563065355 +76 59 model.feature_map_dropout 0.397746295443261 +76 59 model.embedding_dim 1.0 +76 59 negative_sampler.num_negs_per_pos 85.0 +76 59 training.batch_size 1.0 +76 60 model.output_channels 35.0 +76 60 model.input_dropout 0.01060800434884468 +76 60 model.output_dropout 0.2225520847179635 +76 60 model.feature_map_dropout 0.15730101141389358 +76 60 model.embedding_dim 2.0 +76 60 negative_sampler.num_negs_per_pos 13.0 +76 60 training.batch_size 2.0 +76 61 model.output_channels 64.0 +76 61 model.input_dropout 0.4011052906480393 +76 61 model.output_dropout 0.4323745001518393 +76 61 model.feature_map_dropout 0.17642767524424285 +76 61 model.embedding_dim 2.0 +76 61 negative_sampler.num_negs_per_pos 22.0 +76 61 training.batch_size 1.0 +76 62 model.output_channels 27.0 +76 62 model.input_dropout 0.40893883104994994 +76 62 model.output_dropout 0.4766089462735371 +76 62 model.feature_map_dropout 0.3175156638962431 +76 62 model.embedding_dim 0.0 +76 62 negative_sampler.num_negs_per_pos 71.0 +76 62 training.batch_size 2.0 +76 63 model.output_channels 38.0 +76 63 model.input_dropout 0.3821259873670987 +76 63 model.output_dropout 0.11251193538125481 +76 63 model.feature_map_dropout 0.3492179248918903 +76 63 model.embedding_dim 2.0 +76 63 negative_sampler.num_negs_per_pos 25.0 +76 63 training.batch_size 0.0 +76 64 model.output_channels 22.0 +76 64 model.input_dropout 0.0478790297208303 +76 64 model.output_dropout 0.22611417094307312 +76 64 model.feature_map_dropout 0.4269965765137838 +76 64 model.embedding_dim 2.0 +76 64 negative_sampler.num_negs_per_pos 26.0 +76 64 training.batch_size 1.0 +76 65 model.output_channels 47.0 +76 65 model.input_dropout 0.18794024705057188 +76 65 model.output_dropout 0.27070496619733503 +76 65 model.feature_map_dropout 0.4095698060624451 +76 65 model.embedding_dim 1.0 +76 65 negative_sampler.num_negs_per_pos 86.0 +76 65 training.batch_size 0.0 +76 66 model.output_channels 31.0 +76 66 model.input_dropout 0.16105123711439284 +76 66 model.output_dropout 0.030300511496945015 +76 66 model.feature_map_dropout 0.32883483425380855 +76 66 model.embedding_dim 2.0 +76 66 negative_sampler.num_negs_per_pos 73.0 +76 66 training.batch_size 2.0 +76 67 model.output_channels 29.0 +76 67 model.input_dropout 0.4265132132825553 +76 67 model.output_dropout 0.42633851512484483 +76 67 model.feature_map_dropout 0.10631355770903778 +76 67 model.embedding_dim 1.0 +76 67 negative_sampler.num_negs_per_pos 72.0 +76 67 training.batch_size 0.0 +76 68 model.output_channels 44.0 +76 68 model.input_dropout 0.06745913114020335 +76 68 model.output_dropout 0.19026682997687527 +76 68 model.feature_map_dropout 0.35023962877361436 +76 68 model.embedding_dim 1.0 +76 68 negative_sampler.num_negs_per_pos 33.0 +76 68 training.batch_size 1.0 +76 69 model.output_channels 20.0 +76 69 model.input_dropout 0.36207384906109324 +76 69 model.output_dropout 0.0716162831541694 +76 69 model.feature_map_dropout 0.390813914429784 +76 69 model.embedding_dim 2.0 +76 69 negative_sampler.num_negs_per_pos 75.0 +76 69 training.batch_size 0.0 +76 1 dataset """kinships""" +76 1 model """conve""" +76 1 loss """bceaftersigmoid""" +76 1 regularizer """no""" +76 1 optimizer """adadelta""" +76 1 training_loop """owa""" +76 1 negative_sampler """basic""" +76 1 evaluator """rankbased""" +76 2 dataset """kinships""" +76 2 model """conve""" +76 2 loss """bceaftersigmoid""" +76 2 regularizer """no""" +76 2 optimizer """adadelta""" +76 2 training_loop """owa""" +76 2 negative_sampler """basic""" +76 2 evaluator """rankbased""" +76 3 dataset """kinships""" +76 3 model """conve""" +76 3 loss """bceaftersigmoid""" +76 3 regularizer """no""" +76 3 optimizer """adadelta""" +76 3 training_loop """owa""" +76 3 negative_sampler """basic""" +76 3 evaluator """rankbased""" +76 4 dataset """kinships""" +76 4 model """conve""" +76 4 loss """bceaftersigmoid""" +76 4 regularizer """no""" +76 4 optimizer """adadelta""" +76 4 training_loop """owa""" +76 4 negative_sampler """basic""" +76 4 evaluator """rankbased""" +76 5 dataset """kinships""" +76 5 model """conve""" +76 5 loss """bceaftersigmoid""" +76 5 regularizer """no""" +76 5 optimizer """adadelta""" +76 5 training_loop """owa""" +76 5 negative_sampler """basic""" +76 5 evaluator """rankbased""" +76 6 dataset """kinships""" +76 6 model """conve""" +76 6 loss """bceaftersigmoid""" +76 6 regularizer """no""" +76 6 optimizer """adadelta""" +76 6 training_loop """owa""" +76 6 negative_sampler """basic""" +76 6 evaluator """rankbased""" +76 7 dataset """kinships""" +76 7 model """conve""" +76 7 loss """bceaftersigmoid""" +76 7 regularizer """no""" +76 7 optimizer """adadelta""" +76 7 training_loop """owa""" +76 7 negative_sampler """basic""" +76 7 evaluator """rankbased""" +76 8 dataset """kinships""" +76 8 model """conve""" +76 8 loss """bceaftersigmoid""" +76 8 regularizer """no""" +76 8 optimizer """adadelta""" +76 8 training_loop """owa""" +76 8 negative_sampler """basic""" +76 8 evaluator """rankbased""" +76 9 dataset """kinships""" +76 9 model """conve""" +76 9 loss """bceaftersigmoid""" +76 9 regularizer """no""" +76 9 optimizer """adadelta""" +76 9 training_loop """owa""" +76 9 negative_sampler """basic""" +76 9 evaluator """rankbased""" +76 10 dataset """kinships""" +76 10 model """conve""" +76 10 loss """bceaftersigmoid""" +76 10 regularizer """no""" +76 10 optimizer """adadelta""" +76 10 training_loop """owa""" +76 10 negative_sampler """basic""" +76 10 evaluator """rankbased""" +76 11 dataset """kinships""" +76 11 model """conve""" +76 11 loss """bceaftersigmoid""" +76 11 regularizer """no""" +76 11 optimizer """adadelta""" +76 11 training_loop """owa""" +76 11 negative_sampler """basic""" +76 11 evaluator """rankbased""" +76 12 dataset """kinships""" +76 12 model """conve""" +76 12 loss """bceaftersigmoid""" +76 12 regularizer """no""" +76 12 optimizer """adadelta""" +76 12 training_loop """owa""" +76 12 negative_sampler """basic""" +76 12 evaluator """rankbased""" +76 13 dataset """kinships""" +76 13 model """conve""" +76 13 loss """bceaftersigmoid""" +76 13 regularizer """no""" +76 13 optimizer """adadelta""" +76 13 training_loop """owa""" +76 13 negative_sampler """basic""" +76 13 evaluator """rankbased""" +76 14 dataset """kinships""" +76 14 model """conve""" +76 14 loss """bceaftersigmoid""" +76 14 regularizer """no""" +76 14 optimizer """adadelta""" +76 14 training_loop """owa""" +76 14 negative_sampler """basic""" +76 14 evaluator """rankbased""" +76 15 dataset """kinships""" +76 15 model """conve""" +76 15 loss """bceaftersigmoid""" +76 15 regularizer """no""" +76 15 optimizer """adadelta""" +76 15 training_loop """owa""" +76 15 negative_sampler """basic""" +76 15 evaluator """rankbased""" +76 16 dataset """kinships""" +76 16 model """conve""" +76 16 loss """bceaftersigmoid""" +76 16 regularizer """no""" +76 16 optimizer """adadelta""" +76 16 training_loop """owa""" +76 16 negative_sampler """basic""" +76 16 evaluator """rankbased""" +76 17 dataset """kinships""" +76 17 model """conve""" +76 17 loss """bceaftersigmoid""" +76 17 regularizer """no""" +76 17 optimizer """adadelta""" +76 17 training_loop """owa""" +76 17 negative_sampler """basic""" +76 17 evaluator """rankbased""" +76 18 dataset """kinships""" +76 18 model """conve""" +76 18 loss """bceaftersigmoid""" +76 18 regularizer """no""" +76 18 optimizer """adadelta""" +76 18 training_loop """owa""" +76 18 negative_sampler """basic""" +76 18 evaluator """rankbased""" +76 19 dataset """kinships""" +76 19 model """conve""" +76 19 loss """bceaftersigmoid""" +76 19 regularizer """no""" +76 19 optimizer """adadelta""" +76 19 training_loop """owa""" +76 19 negative_sampler """basic""" +76 19 evaluator """rankbased""" +76 20 dataset """kinships""" +76 20 model """conve""" +76 20 loss """bceaftersigmoid""" +76 20 regularizer """no""" +76 20 optimizer """adadelta""" +76 20 training_loop """owa""" +76 20 negative_sampler """basic""" +76 20 evaluator """rankbased""" +76 21 dataset """kinships""" +76 21 model """conve""" +76 21 loss """bceaftersigmoid""" +76 21 regularizer """no""" +76 21 optimizer """adadelta""" +76 21 training_loop """owa""" +76 21 negative_sampler """basic""" +76 21 evaluator """rankbased""" +76 22 dataset """kinships""" +76 22 model """conve""" +76 22 loss """bceaftersigmoid""" +76 22 regularizer """no""" +76 22 optimizer """adadelta""" +76 22 training_loop """owa""" +76 22 negative_sampler """basic""" +76 22 evaluator """rankbased""" +76 23 dataset """kinships""" +76 23 model """conve""" +76 23 loss """bceaftersigmoid""" +76 23 regularizer """no""" +76 23 optimizer """adadelta""" +76 23 training_loop """owa""" +76 23 negative_sampler """basic""" +76 23 evaluator """rankbased""" +76 24 dataset """kinships""" +76 24 model """conve""" +76 24 loss """bceaftersigmoid""" +76 24 regularizer """no""" +76 24 optimizer """adadelta""" +76 24 training_loop """owa""" +76 24 negative_sampler """basic""" +76 24 evaluator """rankbased""" +76 25 dataset """kinships""" +76 25 model """conve""" +76 25 loss """bceaftersigmoid""" +76 25 regularizer """no""" +76 25 optimizer """adadelta""" +76 25 training_loop """owa""" +76 25 negative_sampler """basic""" +76 25 evaluator """rankbased""" +76 26 dataset """kinships""" +76 26 model """conve""" +76 26 loss """bceaftersigmoid""" +76 26 regularizer """no""" +76 26 optimizer """adadelta""" +76 26 training_loop """owa""" +76 26 negative_sampler """basic""" +76 26 evaluator """rankbased""" +76 27 dataset """kinships""" +76 27 model """conve""" +76 27 loss """bceaftersigmoid""" +76 27 regularizer """no""" +76 27 optimizer """adadelta""" +76 27 training_loop """owa""" +76 27 negative_sampler """basic""" +76 27 evaluator """rankbased""" +76 28 dataset """kinships""" +76 28 model """conve""" +76 28 loss """bceaftersigmoid""" +76 28 regularizer """no""" +76 28 optimizer """adadelta""" +76 28 training_loop """owa""" +76 28 negative_sampler """basic""" +76 28 evaluator """rankbased""" +76 29 dataset """kinships""" +76 29 model """conve""" +76 29 loss """bceaftersigmoid""" +76 29 regularizer """no""" +76 29 optimizer """adadelta""" +76 29 training_loop """owa""" +76 29 negative_sampler """basic""" +76 29 evaluator """rankbased""" +76 30 dataset """kinships""" +76 30 model """conve""" +76 30 loss """bceaftersigmoid""" +76 30 regularizer """no""" +76 30 optimizer """adadelta""" +76 30 training_loop """owa""" +76 30 negative_sampler """basic""" +76 30 evaluator """rankbased""" +76 31 dataset """kinships""" +76 31 model """conve""" +76 31 loss """bceaftersigmoid""" +76 31 regularizer """no""" +76 31 optimizer """adadelta""" +76 31 training_loop """owa""" +76 31 negative_sampler """basic""" +76 31 evaluator """rankbased""" +76 32 dataset """kinships""" +76 32 model """conve""" +76 32 loss """bceaftersigmoid""" +76 32 regularizer """no""" +76 32 optimizer """adadelta""" +76 32 training_loop """owa""" +76 32 negative_sampler """basic""" +76 32 evaluator """rankbased""" +76 33 dataset """kinships""" +76 33 model """conve""" +76 33 loss """bceaftersigmoid""" +76 33 regularizer """no""" +76 33 optimizer """adadelta""" +76 33 training_loop """owa""" +76 33 negative_sampler """basic""" +76 33 evaluator """rankbased""" +76 34 dataset """kinships""" +76 34 model """conve""" +76 34 loss """bceaftersigmoid""" +76 34 regularizer """no""" +76 34 optimizer """adadelta""" +76 34 training_loop """owa""" +76 34 negative_sampler """basic""" +76 34 evaluator """rankbased""" +76 35 dataset """kinships""" +76 35 model """conve""" +76 35 loss """bceaftersigmoid""" +76 35 regularizer """no""" +76 35 optimizer """adadelta""" +76 35 training_loop """owa""" +76 35 negative_sampler """basic""" +76 35 evaluator """rankbased""" +76 36 dataset """kinships""" +76 36 model """conve""" +76 36 loss """bceaftersigmoid""" +76 36 regularizer """no""" +76 36 optimizer """adadelta""" +76 36 training_loop """owa""" +76 36 negative_sampler """basic""" +76 36 evaluator """rankbased""" +76 37 dataset """kinships""" +76 37 model """conve""" +76 37 loss """bceaftersigmoid""" +76 37 regularizer """no""" +76 37 optimizer """adadelta""" +76 37 training_loop """owa""" +76 37 negative_sampler """basic""" +76 37 evaluator """rankbased""" +76 38 dataset """kinships""" +76 38 model """conve""" +76 38 loss """bceaftersigmoid""" +76 38 regularizer """no""" +76 38 optimizer """adadelta""" +76 38 training_loop """owa""" +76 38 negative_sampler """basic""" +76 38 evaluator """rankbased""" +76 39 dataset """kinships""" +76 39 model """conve""" +76 39 loss """bceaftersigmoid""" +76 39 regularizer """no""" +76 39 optimizer """adadelta""" +76 39 training_loop """owa""" +76 39 negative_sampler """basic""" +76 39 evaluator """rankbased""" +76 40 dataset """kinships""" +76 40 model """conve""" +76 40 loss """bceaftersigmoid""" +76 40 regularizer """no""" +76 40 optimizer """adadelta""" +76 40 training_loop """owa""" +76 40 negative_sampler """basic""" +76 40 evaluator """rankbased""" +76 41 dataset """kinships""" +76 41 model """conve""" +76 41 loss """bceaftersigmoid""" +76 41 regularizer """no""" +76 41 optimizer """adadelta""" +76 41 training_loop """owa""" +76 41 negative_sampler """basic""" +76 41 evaluator """rankbased""" +76 42 dataset """kinships""" +76 42 model """conve""" +76 42 loss """bceaftersigmoid""" +76 42 regularizer """no""" +76 42 optimizer """adadelta""" +76 42 training_loop """owa""" +76 42 negative_sampler """basic""" +76 42 evaluator """rankbased""" +76 43 dataset """kinships""" +76 43 model """conve""" +76 43 loss """bceaftersigmoid""" +76 43 regularizer """no""" +76 43 optimizer """adadelta""" +76 43 training_loop """owa""" +76 43 negative_sampler """basic""" +76 43 evaluator """rankbased""" +76 44 dataset """kinships""" +76 44 model """conve""" +76 44 loss """bceaftersigmoid""" +76 44 regularizer """no""" +76 44 optimizer """adadelta""" +76 44 training_loop """owa""" +76 44 negative_sampler """basic""" +76 44 evaluator """rankbased""" +76 45 dataset """kinships""" +76 45 model """conve""" +76 45 loss """bceaftersigmoid""" +76 45 regularizer """no""" +76 45 optimizer """adadelta""" +76 45 training_loop """owa""" +76 45 negative_sampler """basic""" +76 45 evaluator """rankbased""" +76 46 dataset """kinships""" +76 46 model """conve""" +76 46 loss """bceaftersigmoid""" +76 46 regularizer """no""" +76 46 optimizer """adadelta""" +76 46 training_loop """owa""" +76 46 negative_sampler """basic""" +76 46 evaluator """rankbased""" +76 47 dataset """kinships""" +76 47 model """conve""" +76 47 loss """bceaftersigmoid""" +76 47 regularizer """no""" +76 47 optimizer """adadelta""" +76 47 training_loop """owa""" +76 47 negative_sampler """basic""" +76 47 evaluator """rankbased""" +76 48 dataset """kinships""" +76 48 model """conve""" +76 48 loss """bceaftersigmoid""" +76 48 regularizer """no""" +76 48 optimizer """adadelta""" +76 48 training_loop """owa""" +76 48 negative_sampler """basic""" +76 48 evaluator """rankbased""" +76 49 dataset """kinships""" +76 49 model """conve""" +76 49 loss """bceaftersigmoid""" +76 49 regularizer """no""" +76 49 optimizer """adadelta""" +76 49 training_loop """owa""" +76 49 negative_sampler """basic""" +76 49 evaluator """rankbased""" +76 50 dataset """kinships""" +76 50 model """conve""" +76 50 loss """bceaftersigmoid""" +76 50 regularizer """no""" +76 50 optimizer """adadelta""" +76 50 training_loop """owa""" +76 50 negative_sampler """basic""" +76 50 evaluator """rankbased""" +76 51 dataset """kinships""" +76 51 model """conve""" +76 51 loss """bceaftersigmoid""" +76 51 regularizer """no""" +76 51 optimizer """adadelta""" +76 51 training_loop """owa""" +76 51 negative_sampler """basic""" +76 51 evaluator """rankbased""" +76 52 dataset """kinships""" +76 52 model """conve""" +76 52 loss """bceaftersigmoid""" +76 52 regularizer """no""" +76 52 optimizer """adadelta""" +76 52 training_loop """owa""" +76 52 negative_sampler """basic""" +76 52 evaluator """rankbased""" +76 53 dataset """kinships""" +76 53 model """conve""" +76 53 loss """bceaftersigmoid""" +76 53 regularizer """no""" +76 53 optimizer """adadelta""" +76 53 training_loop """owa""" +76 53 negative_sampler """basic""" +76 53 evaluator """rankbased""" +76 54 dataset """kinships""" +76 54 model """conve""" +76 54 loss """bceaftersigmoid""" +76 54 regularizer """no""" +76 54 optimizer """adadelta""" +76 54 training_loop """owa""" +76 54 negative_sampler """basic""" +76 54 evaluator """rankbased""" +76 55 dataset """kinships""" +76 55 model """conve""" +76 55 loss """bceaftersigmoid""" +76 55 regularizer """no""" +76 55 optimizer """adadelta""" +76 55 training_loop """owa""" +76 55 negative_sampler """basic""" +76 55 evaluator """rankbased""" +76 56 dataset """kinships""" +76 56 model """conve""" +76 56 loss """bceaftersigmoid""" +76 56 regularizer """no""" +76 56 optimizer """adadelta""" +76 56 training_loop """owa""" +76 56 negative_sampler """basic""" +76 56 evaluator """rankbased""" +76 57 dataset """kinships""" +76 57 model """conve""" +76 57 loss """bceaftersigmoid""" +76 57 regularizer """no""" +76 57 optimizer """adadelta""" +76 57 training_loop """owa""" +76 57 negative_sampler """basic""" +76 57 evaluator """rankbased""" +76 58 dataset """kinships""" +76 58 model """conve""" +76 58 loss """bceaftersigmoid""" +76 58 regularizer """no""" +76 58 optimizer """adadelta""" +76 58 training_loop """owa""" +76 58 negative_sampler """basic""" +76 58 evaluator """rankbased""" +76 59 dataset """kinships""" +76 59 model """conve""" +76 59 loss """bceaftersigmoid""" +76 59 regularizer """no""" +76 59 optimizer """adadelta""" +76 59 training_loop """owa""" +76 59 negative_sampler """basic""" +76 59 evaluator """rankbased""" +76 60 dataset """kinships""" +76 60 model """conve""" +76 60 loss """bceaftersigmoid""" +76 60 regularizer """no""" +76 60 optimizer """adadelta""" +76 60 training_loop """owa""" +76 60 negative_sampler """basic""" +76 60 evaluator """rankbased""" +76 61 dataset """kinships""" +76 61 model """conve""" +76 61 loss """bceaftersigmoid""" +76 61 regularizer """no""" +76 61 optimizer """adadelta""" +76 61 training_loop """owa""" +76 61 negative_sampler """basic""" +76 61 evaluator """rankbased""" +76 62 dataset """kinships""" +76 62 model """conve""" +76 62 loss """bceaftersigmoid""" +76 62 regularizer """no""" +76 62 optimizer """adadelta""" +76 62 training_loop """owa""" +76 62 negative_sampler """basic""" +76 62 evaluator """rankbased""" +76 63 dataset """kinships""" +76 63 model """conve""" +76 63 loss """bceaftersigmoid""" +76 63 regularizer """no""" +76 63 optimizer """adadelta""" +76 63 training_loop """owa""" +76 63 negative_sampler """basic""" +76 63 evaluator """rankbased""" +76 64 dataset """kinships""" +76 64 model """conve""" +76 64 loss """bceaftersigmoid""" +76 64 regularizer """no""" +76 64 optimizer """adadelta""" +76 64 training_loop """owa""" +76 64 negative_sampler """basic""" +76 64 evaluator """rankbased""" +76 65 dataset """kinships""" +76 65 model """conve""" +76 65 loss """bceaftersigmoid""" +76 65 regularizer """no""" +76 65 optimizer """adadelta""" +76 65 training_loop """owa""" +76 65 negative_sampler """basic""" +76 65 evaluator """rankbased""" +76 66 dataset """kinships""" +76 66 model """conve""" +76 66 loss """bceaftersigmoid""" +76 66 regularizer """no""" +76 66 optimizer """adadelta""" +76 66 training_loop """owa""" +76 66 negative_sampler """basic""" +76 66 evaluator """rankbased""" +76 67 dataset """kinships""" +76 67 model """conve""" +76 67 loss """bceaftersigmoid""" +76 67 regularizer """no""" +76 67 optimizer """adadelta""" +76 67 training_loop """owa""" +76 67 negative_sampler """basic""" +76 67 evaluator """rankbased""" +76 68 dataset """kinships""" +76 68 model """conve""" +76 68 loss """bceaftersigmoid""" +76 68 regularizer """no""" +76 68 optimizer """adadelta""" +76 68 training_loop """owa""" +76 68 negative_sampler """basic""" +76 68 evaluator """rankbased""" +76 69 dataset """kinships""" +76 69 model """conve""" +76 69 loss """bceaftersigmoid""" +76 69 regularizer """no""" +76 69 optimizer """adadelta""" +76 69 training_loop """owa""" +76 69 negative_sampler """basic""" +76 69 evaluator """rankbased""" +77 1 model.output_channels 28.0 +77 1 model.input_dropout 0.2774714396638874 +77 1 model.output_dropout 0.29847724698619005 +77 1 model.feature_map_dropout 0.3586425878994423 +77 1 model.embedding_dim 1.0 +77 1 negative_sampler.num_negs_per_pos 4.0 +77 1 training.batch_size 1.0 +77 2 model.output_channels 22.0 +77 2 model.input_dropout 0.2059795000529303 +77 2 model.output_dropout 0.4961987367311214 +77 2 model.feature_map_dropout 0.04196867331674048 +77 2 model.embedding_dim 0.0 +77 2 negative_sampler.num_negs_per_pos 5.0 +77 2 training.batch_size 2.0 +77 3 model.output_channels 32.0 +77 3 model.input_dropout 0.3124834700155238 +77 3 model.output_dropout 0.06159851200051619 +77 3 model.feature_map_dropout 0.47873221192306725 +77 3 model.embedding_dim 0.0 +77 3 negative_sampler.num_negs_per_pos 23.0 +77 3 training.batch_size 0.0 +77 4 model.output_channels 16.0 +77 4 model.input_dropout 0.3614241505397052 +77 4 model.output_dropout 0.0006692443566679862 +77 4 model.feature_map_dropout 0.36287061496710304 +77 4 model.embedding_dim 2.0 +77 4 negative_sampler.num_negs_per_pos 46.0 +77 4 training.batch_size 2.0 +77 5 model.output_channels 17.0 +77 5 model.input_dropout 0.44550172226707496 +77 5 model.output_dropout 0.37693124791997556 +77 5 model.feature_map_dropout 0.14131107451075275 +77 5 model.embedding_dim 1.0 +77 5 negative_sampler.num_negs_per_pos 59.0 +77 5 training.batch_size 1.0 +77 6 model.output_channels 37.0 +77 6 model.input_dropout 0.4984001434093634 +77 6 model.output_dropout 0.10805830105244696 +77 6 model.feature_map_dropout 0.4871798163434994 +77 6 model.embedding_dim 2.0 +77 6 negative_sampler.num_negs_per_pos 70.0 +77 6 training.batch_size 1.0 +77 7 model.output_channels 47.0 +77 7 model.input_dropout 0.24145597522317097 +77 7 model.output_dropout 0.2608279527413746 +77 7 model.feature_map_dropout 0.28934953772334404 +77 7 model.embedding_dim 0.0 +77 7 negative_sampler.num_negs_per_pos 28.0 +77 7 training.batch_size 2.0 +77 8 model.output_channels 60.0 +77 8 model.input_dropout 0.481466854697501 +77 8 model.output_dropout 0.1605973418275472 +77 8 model.feature_map_dropout 0.3724290251882924 +77 8 model.embedding_dim 1.0 +77 8 negative_sampler.num_negs_per_pos 91.0 +77 8 training.batch_size 0.0 +77 9 model.output_channels 36.0 +77 9 model.input_dropout 0.30978642919193816 +77 9 model.output_dropout 0.04916409627304369 +77 9 model.feature_map_dropout 0.36054020266090947 +77 9 model.embedding_dim 1.0 +77 9 negative_sampler.num_negs_per_pos 59.0 +77 9 training.batch_size 2.0 +77 10 model.output_channels 58.0 +77 10 model.input_dropout 0.11494745463435235 +77 10 model.output_dropout 0.29822077811338943 +77 10 model.feature_map_dropout 0.4047692134148683 +77 10 model.embedding_dim 2.0 +77 10 negative_sampler.num_negs_per_pos 68.0 +77 10 training.batch_size 1.0 +77 11 model.output_channels 38.0 +77 11 model.input_dropout 0.14920530267848103 +77 11 model.output_dropout 0.47851014312581985 +77 11 model.feature_map_dropout 0.10583649618723678 +77 11 model.embedding_dim 2.0 +77 11 negative_sampler.num_negs_per_pos 11.0 +77 11 training.batch_size 1.0 +77 12 model.output_channels 59.0 +77 12 model.input_dropout 0.3650342872506134 +77 12 model.output_dropout 0.20023215693724222 +77 12 model.feature_map_dropout 0.2943318230996356 +77 12 model.embedding_dim 2.0 +77 12 negative_sampler.num_negs_per_pos 33.0 +77 12 training.batch_size 2.0 +77 13 model.output_channels 30.0 +77 13 model.input_dropout 0.20996216133792273 +77 13 model.output_dropout 0.40473222621338817 +77 13 model.feature_map_dropout 0.4960846279793545 +77 13 model.embedding_dim 2.0 +77 13 negative_sampler.num_negs_per_pos 74.0 +77 13 training.batch_size 2.0 +77 14 model.output_channels 57.0 +77 14 model.input_dropout 0.26905089459445747 +77 14 model.output_dropout 0.4380901588851988 +77 14 model.feature_map_dropout 0.012974143572829888 +77 14 model.embedding_dim 0.0 +77 14 negative_sampler.num_negs_per_pos 74.0 +77 14 training.batch_size 2.0 +77 15 model.output_channels 29.0 +77 15 model.input_dropout 0.07721802454288912 +77 15 model.output_dropout 0.18447066781042948 +77 15 model.feature_map_dropout 0.2810133022656522 +77 15 model.embedding_dim 2.0 +77 15 negative_sampler.num_negs_per_pos 75.0 +77 15 training.batch_size 2.0 +77 16 model.output_channels 26.0 +77 16 model.input_dropout 0.39263478555052594 +77 16 model.output_dropout 0.4192608035465699 +77 16 model.feature_map_dropout 0.3653733700744054 +77 16 model.embedding_dim 1.0 +77 16 negative_sampler.num_negs_per_pos 82.0 +77 16 training.batch_size 0.0 +77 17 model.output_channels 16.0 +77 17 model.input_dropout 0.2301080367629656 +77 17 model.output_dropout 0.3010600598458597 +77 17 model.feature_map_dropout 0.32064488689132237 +77 17 model.embedding_dim 0.0 +77 17 negative_sampler.num_negs_per_pos 51.0 +77 17 training.batch_size 1.0 +77 18 model.output_channels 17.0 +77 18 model.input_dropout 0.3810308916957543 +77 18 model.output_dropout 0.015661748685661614 +77 18 model.feature_map_dropout 0.38379803640132615 +77 18 model.embedding_dim 1.0 +77 18 negative_sampler.num_negs_per_pos 11.0 +77 18 training.batch_size 1.0 +77 19 model.output_channels 22.0 +77 19 model.input_dropout 0.42051134580600746 +77 19 model.output_dropout 0.022610344151917317 +77 19 model.feature_map_dropout 0.005390917491822966 +77 19 model.embedding_dim 1.0 +77 19 negative_sampler.num_negs_per_pos 76.0 +77 19 training.batch_size 0.0 +77 20 model.output_channels 35.0 +77 20 model.input_dropout 0.054401355114783945 +77 20 model.output_dropout 0.07810520840946017 +77 20 model.feature_map_dropout 0.11173926135392526 +77 20 model.embedding_dim 2.0 +77 20 negative_sampler.num_negs_per_pos 35.0 +77 20 training.batch_size 2.0 +77 21 model.output_channels 34.0 +77 21 model.input_dropout 0.31421566197191886 +77 21 model.output_dropout 0.39112859281813983 +77 21 model.feature_map_dropout 0.2744650322011697 +77 21 model.embedding_dim 1.0 +77 21 negative_sampler.num_negs_per_pos 17.0 +77 21 training.batch_size 2.0 +77 22 model.output_channels 17.0 +77 22 model.input_dropout 0.4160532724586452 +77 22 model.output_dropout 0.021304470312443424 +77 22 model.feature_map_dropout 0.3126789258279035 +77 22 model.embedding_dim 1.0 +77 22 negative_sampler.num_negs_per_pos 43.0 +77 22 training.batch_size 0.0 +77 23 model.output_channels 53.0 +77 23 model.input_dropout 0.10420146814846226 +77 23 model.output_dropout 0.13349552597558112 +77 23 model.feature_map_dropout 0.4263711200929097 +77 23 model.embedding_dim 0.0 +77 23 negative_sampler.num_negs_per_pos 71.0 +77 23 training.batch_size 0.0 +77 24 model.output_channels 27.0 +77 24 model.input_dropout 0.3202815010024517 +77 24 model.output_dropout 0.055728030104503445 +77 24 model.feature_map_dropout 0.16489354383013538 +77 24 model.embedding_dim 0.0 +77 24 negative_sampler.num_negs_per_pos 31.0 +77 24 training.batch_size 2.0 +77 25 model.output_channels 55.0 +77 25 model.input_dropout 0.28740903918887256 +77 25 model.output_dropout 0.3813386669922574 +77 25 model.feature_map_dropout 0.10785331815701094 +77 25 model.embedding_dim 0.0 +77 25 negative_sampler.num_negs_per_pos 86.0 +77 25 training.batch_size 0.0 +77 26 model.output_channels 40.0 +77 26 model.input_dropout 0.4645638047355642 +77 26 model.output_dropout 0.40816071420810984 +77 26 model.feature_map_dropout 0.014883803763959702 +77 26 model.embedding_dim 0.0 +77 26 negative_sampler.num_negs_per_pos 10.0 +77 26 training.batch_size 0.0 +77 27 model.output_channels 30.0 +77 27 model.input_dropout 0.3076982934175564 +77 27 model.output_dropout 0.49636556051590436 +77 27 model.feature_map_dropout 0.10300946920585297 +77 27 model.embedding_dim 2.0 +77 27 negative_sampler.num_negs_per_pos 54.0 +77 27 training.batch_size 2.0 +77 28 model.output_channels 48.0 +77 28 model.input_dropout 0.13088939330629557 +77 28 model.output_dropout 0.10786145925980667 +77 28 model.feature_map_dropout 0.3682056521065353 +77 28 model.embedding_dim 0.0 +77 28 negative_sampler.num_negs_per_pos 80.0 +77 28 training.batch_size 0.0 +77 29 model.output_channels 19.0 +77 29 model.input_dropout 0.4282534404952861 +77 29 model.output_dropout 0.08650635669551987 +77 29 model.feature_map_dropout 0.19233942362964207 +77 29 model.embedding_dim 0.0 +77 29 negative_sampler.num_negs_per_pos 12.0 +77 29 training.batch_size 0.0 +77 30 model.output_channels 25.0 +77 30 model.input_dropout 0.2617452590990696 +77 30 model.output_dropout 0.06602429213759708 +77 30 model.feature_map_dropout 0.022393037946726646 +77 30 model.embedding_dim 1.0 +77 30 negative_sampler.num_negs_per_pos 8.0 +77 30 training.batch_size 2.0 +77 31 model.output_channels 36.0 +77 31 model.input_dropout 0.06271071023154845 +77 31 model.output_dropout 0.27622975310329834 +77 31 model.feature_map_dropout 0.21814852249051014 +77 31 model.embedding_dim 1.0 +77 31 negative_sampler.num_negs_per_pos 32.0 +77 31 training.batch_size 2.0 +77 32 model.output_channels 43.0 +77 32 model.input_dropout 0.3296542876086619 +77 32 model.output_dropout 0.059096776450153876 +77 32 model.feature_map_dropout 0.11363618547427479 +77 32 model.embedding_dim 0.0 +77 32 negative_sampler.num_negs_per_pos 82.0 +77 32 training.batch_size 0.0 +77 33 model.output_channels 47.0 +77 33 model.input_dropout 0.32834846099884835 +77 33 model.output_dropout 0.005932754740600454 +77 33 model.feature_map_dropout 0.17081007963932843 +77 33 model.embedding_dim 2.0 +77 33 negative_sampler.num_negs_per_pos 81.0 +77 33 training.batch_size 0.0 +77 34 model.output_channels 61.0 +77 34 model.input_dropout 0.29554910704080906 +77 34 model.output_dropout 0.45469716215311307 +77 34 model.feature_map_dropout 0.25977615373617813 +77 34 model.embedding_dim 0.0 +77 34 negative_sampler.num_negs_per_pos 15.0 +77 34 training.batch_size 0.0 +77 35 model.output_channels 54.0 +77 35 model.input_dropout 0.2802835494492051 +77 35 model.output_dropout 0.4402844256503495 +77 35 model.feature_map_dropout 0.2808799241287258 +77 35 model.embedding_dim 2.0 +77 35 negative_sampler.num_negs_per_pos 32.0 +77 35 training.batch_size 0.0 +77 36 model.output_channels 58.0 +77 36 model.input_dropout 0.04858393865893895 +77 36 model.output_dropout 0.40526361736603206 +77 36 model.feature_map_dropout 0.34071482984860085 +77 36 model.embedding_dim 2.0 +77 36 negative_sampler.num_negs_per_pos 55.0 +77 36 training.batch_size 2.0 +77 37 model.output_channels 22.0 +77 37 model.input_dropout 0.3049589861398741 +77 37 model.output_dropout 0.07260396571184152 +77 37 model.feature_map_dropout 0.4403458960170581 +77 37 model.embedding_dim 2.0 +77 37 negative_sampler.num_negs_per_pos 76.0 +77 37 training.batch_size 1.0 +77 38 model.output_channels 24.0 +77 38 model.input_dropout 0.17714645944117136 +77 38 model.output_dropout 0.3140719507307203 +77 38 model.feature_map_dropout 0.46111647186662313 +77 38 model.embedding_dim 0.0 +77 38 negative_sampler.num_negs_per_pos 51.0 +77 38 training.batch_size 2.0 +77 39 model.output_channels 41.0 +77 39 model.input_dropout 0.27853933613412474 +77 39 model.output_dropout 0.4419083034922088 +77 39 model.feature_map_dropout 0.4731708846993474 +77 39 model.embedding_dim 0.0 +77 39 negative_sampler.num_negs_per_pos 35.0 +77 39 training.batch_size 2.0 +77 40 model.output_channels 49.0 +77 40 model.input_dropout 0.46085911405860563 +77 40 model.output_dropout 0.4839022690611782 +77 40 model.feature_map_dropout 0.3800808088815551 +77 40 model.embedding_dim 0.0 +77 40 negative_sampler.num_negs_per_pos 37.0 +77 40 training.batch_size 1.0 +77 41 model.output_channels 31.0 +77 41 model.input_dropout 0.2852374626226855 +77 41 model.output_dropout 0.23793352323561345 +77 41 model.feature_map_dropout 0.11094822314570574 +77 41 model.embedding_dim 0.0 +77 41 negative_sampler.num_negs_per_pos 62.0 +77 41 training.batch_size 0.0 +77 42 model.output_channels 32.0 +77 42 model.input_dropout 0.03057536147127915 +77 42 model.output_dropout 0.15589543053531502 +77 42 model.feature_map_dropout 0.0027202406640606247 +77 42 model.embedding_dim 0.0 +77 42 negative_sampler.num_negs_per_pos 21.0 +77 42 training.batch_size 0.0 +77 43 model.output_channels 42.0 +77 43 model.input_dropout 0.43984363687410977 +77 43 model.output_dropout 0.39050467309701253 +77 43 model.feature_map_dropout 0.29832026918441257 +77 43 model.embedding_dim 0.0 +77 43 negative_sampler.num_negs_per_pos 29.0 +77 43 training.batch_size 0.0 +77 44 model.output_channels 19.0 +77 44 model.input_dropout 0.011089831510218973 +77 44 model.output_dropout 0.18634481843178646 +77 44 model.feature_map_dropout 0.33739672328990905 +77 44 model.embedding_dim 1.0 +77 44 negative_sampler.num_negs_per_pos 34.0 +77 44 training.batch_size 1.0 +77 45 model.output_channels 43.0 +77 45 model.input_dropout 0.04349573100013027 +77 45 model.output_dropout 0.47117987896260277 +77 45 model.feature_map_dropout 0.11769435718344107 +77 45 model.embedding_dim 0.0 +77 45 negative_sampler.num_negs_per_pos 36.0 +77 45 training.batch_size 1.0 +77 46 model.output_channels 41.0 +77 46 model.input_dropout 0.1998956067018794 +77 46 model.output_dropout 0.2565357426612437 +77 46 model.feature_map_dropout 0.14889724660731857 +77 46 model.embedding_dim 0.0 +77 46 negative_sampler.num_negs_per_pos 42.0 +77 46 training.batch_size 2.0 +77 47 model.output_channels 46.0 +77 47 model.input_dropout 0.39124078109421184 +77 47 model.output_dropout 0.27881598389066437 +77 47 model.feature_map_dropout 0.05534067042742813 +77 47 model.embedding_dim 2.0 +77 47 negative_sampler.num_negs_per_pos 63.0 +77 47 training.batch_size 1.0 +77 48 model.output_channels 35.0 +77 48 model.input_dropout 0.2353130328815511 +77 48 model.output_dropout 0.2429963170421242 +77 48 model.feature_map_dropout 0.3039435104507723 +77 48 model.embedding_dim 2.0 +77 48 negative_sampler.num_negs_per_pos 42.0 +77 48 training.batch_size 1.0 +77 49 model.output_channels 36.0 +77 49 model.input_dropout 0.2295128579415222 +77 49 model.output_dropout 0.21512083656411563 +77 49 model.feature_map_dropout 0.06577529591564352 +77 49 model.embedding_dim 2.0 +77 49 negative_sampler.num_negs_per_pos 25.0 +77 49 training.batch_size 1.0 +77 50 model.output_channels 41.0 +77 50 model.input_dropout 0.14349621867210027 +77 50 model.output_dropout 0.45224992523215163 +77 50 model.feature_map_dropout 0.4432477600366796 +77 50 model.embedding_dim 0.0 +77 50 negative_sampler.num_negs_per_pos 86.0 +77 50 training.batch_size 2.0 +77 51 model.output_channels 22.0 +77 51 model.input_dropout 0.39789999528812553 +77 51 model.output_dropout 0.4235739652586832 +77 51 model.feature_map_dropout 0.22515182283974694 +77 51 model.embedding_dim 2.0 +77 51 negative_sampler.num_negs_per_pos 18.0 +77 51 training.batch_size 0.0 +77 52 model.output_channels 39.0 +77 52 model.input_dropout 0.2057123771640081 +77 52 model.output_dropout 0.4148048836247998 +77 52 model.feature_map_dropout 0.42713492086118526 +77 52 model.embedding_dim 1.0 +77 52 negative_sampler.num_negs_per_pos 92.0 +77 52 training.batch_size 1.0 +77 53 model.output_channels 37.0 +77 53 model.input_dropout 0.4021692959972609 +77 53 model.output_dropout 0.14396910418480063 +77 53 model.feature_map_dropout 0.45570856887893807 +77 53 model.embedding_dim 2.0 +77 53 negative_sampler.num_negs_per_pos 34.0 +77 53 training.batch_size 2.0 +77 54 model.output_channels 41.0 +77 54 model.input_dropout 0.36074987758521454 +77 54 model.output_dropout 0.2815451527410436 +77 54 model.feature_map_dropout 0.08863371958716076 +77 54 model.embedding_dim 1.0 +77 54 negative_sampler.num_negs_per_pos 41.0 +77 54 training.batch_size 2.0 +77 55 model.output_channels 37.0 +77 55 model.input_dropout 0.3349868817501122 +77 55 model.output_dropout 0.0003872796986090221 +77 55 model.feature_map_dropout 0.190921049926027 +77 55 model.embedding_dim 1.0 +77 55 negative_sampler.num_negs_per_pos 70.0 +77 55 training.batch_size 0.0 +77 56 model.output_channels 50.0 +77 56 model.input_dropout 0.049134110101370754 +77 56 model.output_dropout 0.21021143798886 +77 56 model.feature_map_dropout 0.48067247172159105 +77 56 model.embedding_dim 2.0 +77 56 negative_sampler.num_negs_per_pos 23.0 +77 56 training.batch_size 1.0 +77 57 model.output_channels 29.0 +77 57 model.input_dropout 0.4351419940475042 +77 57 model.output_dropout 0.38043288771329314 +77 57 model.feature_map_dropout 0.039873736541999105 +77 57 model.embedding_dim 1.0 +77 57 negative_sampler.num_negs_per_pos 92.0 +77 57 training.batch_size 1.0 +77 58 model.output_channels 56.0 +77 58 model.input_dropout 0.3632430028683278 +77 58 model.output_dropout 0.387126448774205 +77 58 model.feature_map_dropout 0.004184177602593209 +77 58 model.embedding_dim 1.0 +77 58 negative_sampler.num_negs_per_pos 97.0 +77 58 training.batch_size 2.0 +77 59 model.output_channels 36.0 +77 59 model.input_dropout 0.02576200414149621 +77 59 model.output_dropout 0.0416499437779661 +77 59 model.feature_map_dropout 0.45884047269485206 +77 59 model.embedding_dim 2.0 +77 59 negative_sampler.num_negs_per_pos 73.0 +77 59 training.batch_size 0.0 +77 60 model.output_channels 61.0 +77 60 model.input_dropout 0.18247822412639436 +77 60 model.output_dropout 0.2599308419082212 +77 60 model.feature_map_dropout 0.27264662420851954 +77 60 model.embedding_dim 2.0 +77 60 negative_sampler.num_negs_per_pos 95.0 +77 60 training.batch_size 0.0 +77 61 model.output_channels 48.0 +77 61 model.input_dropout 0.11862688701941843 +77 61 model.output_dropout 0.06136644313947437 +77 61 model.feature_map_dropout 0.4188717872949677 +77 61 model.embedding_dim 0.0 +77 61 negative_sampler.num_negs_per_pos 75.0 +77 61 training.batch_size 2.0 +77 62 model.output_channels 51.0 +77 62 model.input_dropout 0.028957564907214572 +77 62 model.output_dropout 0.1154496043896232 +77 62 model.feature_map_dropout 0.1851477863198293 +77 62 model.embedding_dim 2.0 +77 62 negative_sampler.num_negs_per_pos 31.0 +77 62 training.batch_size 2.0 +77 63 model.output_channels 45.0 +77 63 model.input_dropout 0.3735754556345114 +77 63 model.output_dropout 0.36275120776132885 +77 63 model.feature_map_dropout 0.16113826789330477 +77 63 model.embedding_dim 1.0 +77 63 negative_sampler.num_negs_per_pos 21.0 +77 63 training.batch_size 2.0 +77 64 model.output_channels 27.0 +77 64 model.input_dropout 0.06703704182267373 +77 64 model.output_dropout 0.21806895926845 +77 64 model.feature_map_dropout 0.2106756717590268 +77 64 model.embedding_dim 2.0 +77 64 negative_sampler.num_negs_per_pos 87.0 +77 64 training.batch_size 2.0 +77 65 model.output_channels 20.0 +77 65 model.input_dropout 0.47037781222547065 +77 65 model.output_dropout 0.3885501746463695 +77 65 model.feature_map_dropout 0.17292849162579432 +77 65 model.embedding_dim 2.0 +77 65 negative_sampler.num_negs_per_pos 2.0 +77 65 training.batch_size 2.0 +77 66 model.output_channels 37.0 +77 66 model.input_dropout 0.08919045736877668 +77 66 model.output_dropout 0.011920234620303172 +77 66 model.feature_map_dropout 0.2546354017216032 +77 66 model.embedding_dim 2.0 +77 66 negative_sampler.num_negs_per_pos 40.0 +77 66 training.batch_size 1.0 +77 67 model.output_channels 29.0 +77 67 model.input_dropout 0.4804547857009155 +77 67 model.output_dropout 0.19127049312959954 +77 67 model.feature_map_dropout 0.1918022025703791 +77 67 model.embedding_dim 2.0 +77 67 negative_sampler.num_negs_per_pos 38.0 +77 67 training.batch_size 2.0 +77 68 model.output_channels 48.0 +77 68 model.input_dropout 0.018309218378574854 +77 68 model.output_dropout 0.3386999341492947 +77 68 model.feature_map_dropout 0.35552206499900957 +77 68 model.embedding_dim 1.0 +77 68 negative_sampler.num_negs_per_pos 35.0 +77 68 training.batch_size 2.0 +77 69 model.output_channels 52.0 +77 69 model.input_dropout 0.28259890804868987 +77 69 model.output_dropout 0.27167238370590613 +77 69 model.feature_map_dropout 0.12396987879881866 +77 69 model.embedding_dim 1.0 +77 69 negative_sampler.num_negs_per_pos 82.0 +77 69 training.batch_size 0.0 +77 70 model.output_channels 33.0 +77 70 model.input_dropout 0.409740323181425 +77 70 model.output_dropout 0.4151060948217763 +77 70 model.feature_map_dropout 0.2499058941399237 +77 70 model.embedding_dim 0.0 +77 70 negative_sampler.num_negs_per_pos 17.0 +77 70 training.batch_size 1.0 +77 71 model.output_channels 19.0 +77 71 model.input_dropout 0.39144983745402717 +77 71 model.output_dropout 0.0817952629787651 +77 71 model.feature_map_dropout 0.0792350214145704 +77 71 model.embedding_dim 0.0 +77 71 negative_sampler.num_negs_per_pos 8.0 +77 71 training.batch_size 1.0 +77 1 dataset """kinships""" +77 1 model """conve""" +77 1 loss """softplus""" +77 1 regularizer """no""" +77 1 optimizer """adadelta""" +77 1 training_loop """owa""" +77 1 negative_sampler """basic""" +77 1 evaluator """rankbased""" +77 2 dataset """kinships""" +77 2 model """conve""" +77 2 loss """softplus""" +77 2 regularizer """no""" +77 2 optimizer """adadelta""" +77 2 training_loop """owa""" +77 2 negative_sampler """basic""" +77 2 evaluator """rankbased""" +77 3 dataset """kinships""" +77 3 model """conve""" +77 3 loss """softplus""" +77 3 regularizer """no""" +77 3 optimizer """adadelta""" +77 3 training_loop """owa""" +77 3 negative_sampler """basic""" +77 3 evaluator """rankbased""" +77 4 dataset """kinships""" +77 4 model """conve""" +77 4 loss """softplus""" +77 4 regularizer """no""" +77 4 optimizer """adadelta""" +77 4 training_loop """owa""" +77 4 negative_sampler """basic""" +77 4 evaluator """rankbased""" +77 5 dataset """kinships""" +77 5 model """conve""" +77 5 loss """softplus""" +77 5 regularizer """no""" +77 5 optimizer """adadelta""" +77 5 training_loop """owa""" +77 5 negative_sampler """basic""" +77 5 evaluator """rankbased""" +77 6 dataset """kinships""" +77 6 model """conve""" +77 6 loss """softplus""" +77 6 regularizer """no""" +77 6 optimizer """adadelta""" +77 6 training_loop """owa""" +77 6 negative_sampler """basic""" +77 6 evaluator """rankbased""" +77 7 dataset """kinships""" +77 7 model """conve""" +77 7 loss """softplus""" +77 7 regularizer """no""" +77 7 optimizer """adadelta""" +77 7 training_loop """owa""" +77 7 negative_sampler """basic""" +77 7 evaluator """rankbased""" +77 8 dataset """kinships""" +77 8 model """conve""" +77 8 loss """softplus""" +77 8 regularizer """no""" +77 8 optimizer """adadelta""" +77 8 training_loop """owa""" +77 8 negative_sampler """basic""" +77 8 evaluator """rankbased""" +77 9 dataset """kinships""" +77 9 model """conve""" +77 9 loss """softplus""" +77 9 regularizer """no""" +77 9 optimizer """adadelta""" +77 9 training_loop """owa""" +77 9 negative_sampler """basic""" +77 9 evaluator """rankbased""" +77 10 dataset """kinships""" +77 10 model """conve""" +77 10 loss """softplus""" +77 10 regularizer """no""" +77 10 optimizer """adadelta""" +77 10 training_loop """owa""" +77 10 negative_sampler """basic""" +77 10 evaluator """rankbased""" +77 11 dataset """kinships""" +77 11 model """conve""" +77 11 loss """softplus""" +77 11 regularizer """no""" +77 11 optimizer """adadelta""" +77 11 training_loop """owa""" +77 11 negative_sampler """basic""" +77 11 evaluator """rankbased""" +77 12 dataset """kinships""" +77 12 model """conve""" +77 12 loss """softplus""" +77 12 regularizer """no""" +77 12 optimizer """adadelta""" +77 12 training_loop """owa""" +77 12 negative_sampler """basic""" +77 12 evaluator """rankbased""" +77 13 dataset """kinships""" +77 13 model """conve""" +77 13 loss """softplus""" +77 13 regularizer """no""" +77 13 optimizer """adadelta""" +77 13 training_loop """owa""" +77 13 negative_sampler """basic""" +77 13 evaluator """rankbased""" +77 14 dataset """kinships""" +77 14 model """conve""" +77 14 loss """softplus""" +77 14 regularizer """no""" +77 14 optimizer """adadelta""" +77 14 training_loop """owa""" +77 14 negative_sampler """basic""" +77 14 evaluator """rankbased""" +77 15 dataset """kinships""" +77 15 model """conve""" +77 15 loss """softplus""" +77 15 regularizer """no""" +77 15 optimizer """adadelta""" +77 15 training_loop """owa""" +77 15 negative_sampler """basic""" +77 15 evaluator """rankbased""" +77 16 dataset """kinships""" +77 16 model """conve""" +77 16 loss """softplus""" +77 16 regularizer """no""" +77 16 optimizer """adadelta""" +77 16 training_loop """owa""" +77 16 negative_sampler """basic""" +77 16 evaluator """rankbased""" +77 17 dataset """kinships""" +77 17 model """conve""" +77 17 loss """softplus""" +77 17 regularizer """no""" +77 17 optimizer """adadelta""" +77 17 training_loop """owa""" +77 17 negative_sampler """basic""" +77 17 evaluator """rankbased""" +77 18 dataset """kinships""" +77 18 model """conve""" +77 18 loss """softplus""" +77 18 regularizer """no""" +77 18 optimizer """adadelta""" +77 18 training_loop """owa""" +77 18 negative_sampler """basic""" +77 18 evaluator """rankbased""" +77 19 dataset """kinships""" +77 19 model """conve""" +77 19 loss """softplus""" +77 19 regularizer """no""" +77 19 optimizer """adadelta""" +77 19 training_loop """owa""" +77 19 negative_sampler """basic""" +77 19 evaluator """rankbased""" +77 20 dataset """kinships""" +77 20 model """conve""" +77 20 loss """softplus""" +77 20 regularizer """no""" +77 20 optimizer """adadelta""" +77 20 training_loop """owa""" +77 20 negative_sampler """basic""" +77 20 evaluator """rankbased""" +77 21 dataset """kinships""" +77 21 model """conve""" +77 21 loss """softplus""" +77 21 regularizer """no""" +77 21 optimizer """adadelta""" +77 21 training_loop """owa""" +77 21 negative_sampler """basic""" +77 21 evaluator """rankbased""" +77 22 dataset """kinships""" +77 22 model """conve""" +77 22 loss """softplus""" +77 22 regularizer """no""" +77 22 optimizer """adadelta""" +77 22 training_loop """owa""" +77 22 negative_sampler """basic""" +77 22 evaluator """rankbased""" +77 23 dataset """kinships""" +77 23 model """conve""" +77 23 loss """softplus""" +77 23 regularizer """no""" +77 23 optimizer """adadelta""" +77 23 training_loop """owa""" +77 23 negative_sampler """basic""" +77 23 evaluator """rankbased""" +77 24 dataset """kinships""" +77 24 model """conve""" +77 24 loss """softplus""" +77 24 regularizer """no""" +77 24 optimizer """adadelta""" +77 24 training_loop """owa""" +77 24 negative_sampler """basic""" +77 24 evaluator """rankbased""" +77 25 dataset """kinships""" +77 25 model """conve""" +77 25 loss """softplus""" +77 25 regularizer """no""" +77 25 optimizer """adadelta""" +77 25 training_loop """owa""" +77 25 negative_sampler """basic""" +77 25 evaluator """rankbased""" +77 26 dataset """kinships""" +77 26 model """conve""" +77 26 loss """softplus""" +77 26 regularizer """no""" +77 26 optimizer """adadelta""" +77 26 training_loop """owa""" +77 26 negative_sampler """basic""" +77 26 evaluator """rankbased""" +77 27 dataset """kinships""" +77 27 model """conve""" +77 27 loss """softplus""" +77 27 regularizer """no""" +77 27 optimizer """adadelta""" +77 27 training_loop """owa""" +77 27 negative_sampler """basic""" +77 27 evaluator """rankbased""" +77 28 dataset """kinships""" +77 28 model """conve""" +77 28 loss """softplus""" +77 28 regularizer """no""" +77 28 optimizer """adadelta""" +77 28 training_loop """owa""" +77 28 negative_sampler """basic""" +77 28 evaluator """rankbased""" +77 29 dataset """kinships""" +77 29 model """conve""" +77 29 loss """softplus""" +77 29 regularizer """no""" +77 29 optimizer """adadelta""" +77 29 training_loop """owa""" +77 29 negative_sampler """basic""" +77 29 evaluator """rankbased""" +77 30 dataset """kinships""" +77 30 model """conve""" +77 30 loss """softplus""" +77 30 regularizer """no""" +77 30 optimizer """adadelta""" +77 30 training_loop """owa""" +77 30 negative_sampler """basic""" +77 30 evaluator """rankbased""" +77 31 dataset """kinships""" +77 31 model """conve""" +77 31 loss """softplus""" +77 31 regularizer """no""" +77 31 optimizer """adadelta""" +77 31 training_loop """owa""" +77 31 negative_sampler """basic""" +77 31 evaluator """rankbased""" +77 32 dataset """kinships""" +77 32 model """conve""" +77 32 loss """softplus""" +77 32 regularizer """no""" +77 32 optimizer """adadelta""" +77 32 training_loop """owa""" +77 32 negative_sampler """basic""" +77 32 evaluator """rankbased""" +77 33 dataset """kinships""" +77 33 model """conve""" +77 33 loss """softplus""" +77 33 regularizer """no""" +77 33 optimizer """adadelta""" +77 33 training_loop """owa""" +77 33 negative_sampler """basic""" +77 33 evaluator """rankbased""" +77 34 dataset """kinships""" +77 34 model """conve""" +77 34 loss """softplus""" +77 34 regularizer """no""" +77 34 optimizer """adadelta""" +77 34 training_loop """owa""" +77 34 negative_sampler """basic""" +77 34 evaluator """rankbased""" +77 35 dataset """kinships""" +77 35 model """conve""" +77 35 loss """softplus""" +77 35 regularizer """no""" +77 35 optimizer """adadelta""" +77 35 training_loop """owa""" +77 35 negative_sampler """basic""" +77 35 evaluator """rankbased""" +77 36 dataset """kinships""" +77 36 model """conve""" +77 36 loss """softplus""" +77 36 regularizer """no""" +77 36 optimizer """adadelta""" +77 36 training_loop """owa""" +77 36 negative_sampler """basic""" +77 36 evaluator """rankbased""" +77 37 dataset """kinships""" +77 37 model """conve""" +77 37 loss """softplus""" +77 37 regularizer """no""" +77 37 optimizer """adadelta""" +77 37 training_loop """owa""" +77 37 negative_sampler """basic""" +77 37 evaluator """rankbased""" +77 38 dataset """kinships""" +77 38 model """conve""" +77 38 loss """softplus""" +77 38 regularizer """no""" +77 38 optimizer """adadelta""" +77 38 training_loop """owa""" +77 38 negative_sampler """basic""" +77 38 evaluator """rankbased""" +77 39 dataset """kinships""" +77 39 model """conve""" +77 39 loss """softplus""" +77 39 regularizer """no""" +77 39 optimizer """adadelta""" +77 39 training_loop """owa""" +77 39 negative_sampler """basic""" +77 39 evaluator """rankbased""" +77 40 dataset """kinships""" +77 40 model """conve""" +77 40 loss """softplus""" +77 40 regularizer """no""" +77 40 optimizer """adadelta""" +77 40 training_loop """owa""" +77 40 negative_sampler """basic""" +77 40 evaluator """rankbased""" +77 41 dataset """kinships""" +77 41 model """conve""" +77 41 loss """softplus""" +77 41 regularizer """no""" +77 41 optimizer """adadelta""" +77 41 training_loop """owa""" +77 41 negative_sampler """basic""" +77 41 evaluator """rankbased""" +77 42 dataset """kinships""" +77 42 model """conve""" +77 42 loss """softplus""" +77 42 regularizer """no""" +77 42 optimizer """adadelta""" +77 42 training_loop """owa""" +77 42 negative_sampler """basic""" +77 42 evaluator """rankbased""" +77 43 dataset """kinships""" +77 43 model """conve""" +77 43 loss """softplus""" +77 43 regularizer """no""" +77 43 optimizer """adadelta""" +77 43 training_loop """owa""" +77 43 negative_sampler """basic""" +77 43 evaluator """rankbased""" +77 44 dataset """kinships""" +77 44 model """conve""" +77 44 loss """softplus""" +77 44 regularizer """no""" +77 44 optimizer """adadelta""" +77 44 training_loop """owa""" +77 44 negative_sampler """basic""" +77 44 evaluator """rankbased""" +77 45 dataset """kinships""" +77 45 model """conve""" +77 45 loss """softplus""" +77 45 regularizer """no""" +77 45 optimizer """adadelta""" +77 45 training_loop """owa""" +77 45 negative_sampler """basic""" +77 45 evaluator """rankbased""" +77 46 dataset """kinships""" +77 46 model """conve""" +77 46 loss """softplus""" +77 46 regularizer """no""" +77 46 optimizer """adadelta""" +77 46 training_loop """owa""" +77 46 negative_sampler """basic""" +77 46 evaluator """rankbased""" +77 47 dataset """kinships""" +77 47 model """conve""" +77 47 loss """softplus""" +77 47 regularizer """no""" +77 47 optimizer """adadelta""" +77 47 training_loop """owa""" +77 47 negative_sampler """basic""" +77 47 evaluator """rankbased""" +77 48 dataset """kinships""" +77 48 model """conve""" +77 48 loss """softplus""" +77 48 regularizer """no""" +77 48 optimizer """adadelta""" +77 48 training_loop """owa""" +77 48 negative_sampler """basic""" +77 48 evaluator """rankbased""" +77 49 dataset """kinships""" +77 49 model """conve""" +77 49 loss """softplus""" +77 49 regularizer """no""" +77 49 optimizer """adadelta""" +77 49 training_loop """owa""" +77 49 negative_sampler """basic""" +77 49 evaluator """rankbased""" +77 50 dataset """kinships""" +77 50 model """conve""" +77 50 loss """softplus""" +77 50 regularizer """no""" +77 50 optimizer """adadelta""" +77 50 training_loop """owa""" +77 50 negative_sampler """basic""" +77 50 evaluator """rankbased""" +77 51 dataset """kinships""" +77 51 model """conve""" +77 51 loss """softplus""" +77 51 regularizer """no""" +77 51 optimizer """adadelta""" +77 51 training_loop """owa""" +77 51 negative_sampler """basic""" +77 51 evaluator """rankbased""" +77 52 dataset """kinships""" +77 52 model """conve""" +77 52 loss """softplus""" +77 52 regularizer """no""" +77 52 optimizer """adadelta""" +77 52 training_loop """owa""" +77 52 negative_sampler """basic""" +77 52 evaluator """rankbased""" +77 53 dataset """kinships""" +77 53 model """conve""" +77 53 loss """softplus""" +77 53 regularizer """no""" +77 53 optimizer """adadelta""" +77 53 training_loop """owa""" +77 53 negative_sampler """basic""" +77 53 evaluator """rankbased""" +77 54 dataset """kinships""" +77 54 model """conve""" +77 54 loss """softplus""" +77 54 regularizer """no""" +77 54 optimizer """adadelta""" +77 54 training_loop """owa""" +77 54 negative_sampler """basic""" +77 54 evaluator """rankbased""" +77 55 dataset """kinships""" +77 55 model """conve""" +77 55 loss """softplus""" +77 55 regularizer """no""" +77 55 optimizer """adadelta""" +77 55 training_loop """owa""" +77 55 negative_sampler """basic""" +77 55 evaluator """rankbased""" +77 56 dataset """kinships""" +77 56 model """conve""" +77 56 loss """softplus""" +77 56 regularizer """no""" +77 56 optimizer """adadelta""" +77 56 training_loop """owa""" +77 56 negative_sampler """basic""" +77 56 evaluator """rankbased""" +77 57 dataset """kinships""" +77 57 model """conve""" +77 57 loss """softplus""" +77 57 regularizer """no""" +77 57 optimizer """adadelta""" +77 57 training_loop """owa""" +77 57 negative_sampler """basic""" +77 57 evaluator """rankbased""" +77 58 dataset """kinships""" +77 58 model """conve""" +77 58 loss """softplus""" +77 58 regularizer """no""" +77 58 optimizer """adadelta""" +77 58 training_loop """owa""" +77 58 negative_sampler """basic""" +77 58 evaluator """rankbased""" +77 59 dataset """kinships""" +77 59 model """conve""" +77 59 loss """softplus""" +77 59 regularizer """no""" +77 59 optimizer """adadelta""" +77 59 training_loop """owa""" +77 59 negative_sampler """basic""" +77 59 evaluator """rankbased""" +77 60 dataset """kinships""" +77 60 model """conve""" +77 60 loss """softplus""" +77 60 regularizer """no""" +77 60 optimizer """adadelta""" +77 60 training_loop """owa""" +77 60 negative_sampler """basic""" +77 60 evaluator """rankbased""" +77 61 dataset """kinships""" +77 61 model """conve""" +77 61 loss """softplus""" +77 61 regularizer """no""" +77 61 optimizer """adadelta""" +77 61 training_loop """owa""" +77 61 negative_sampler """basic""" +77 61 evaluator """rankbased""" +77 62 dataset """kinships""" +77 62 model """conve""" +77 62 loss """softplus""" +77 62 regularizer """no""" +77 62 optimizer """adadelta""" +77 62 training_loop """owa""" +77 62 negative_sampler """basic""" +77 62 evaluator """rankbased""" +77 63 dataset """kinships""" +77 63 model """conve""" +77 63 loss """softplus""" +77 63 regularizer """no""" +77 63 optimizer """adadelta""" +77 63 training_loop """owa""" +77 63 negative_sampler """basic""" +77 63 evaluator """rankbased""" +77 64 dataset """kinships""" +77 64 model """conve""" +77 64 loss """softplus""" +77 64 regularizer """no""" +77 64 optimizer """adadelta""" +77 64 training_loop """owa""" +77 64 negative_sampler """basic""" +77 64 evaluator """rankbased""" +77 65 dataset """kinships""" +77 65 model """conve""" +77 65 loss """softplus""" +77 65 regularizer """no""" +77 65 optimizer """adadelta""" +77 65 training_loop """owa""" +77 65 negative_sampler """basic""" +77 65 evaluator """rankbased""" +77 66 dataset """kinships""" +77 66 model """conve""" +77 66 loss """softplus""" +77 66 regularizer """no""" +77 66 optimizer """adadelta""" +77 66 training_loop """owa""" +77 66 negative_sampler """basic""" +77 66 evaluator """rankbased""" +77 67 dataset """kinships""" +77 67 model """conve""" +77 67 loss """softplus""" +77 67 regularizer """no""" +77 67 optimizer """adadelta""" +77 67 training_loop """owa""" +77 67 negative_sampler """basic""" +77 67 evaluator """rankbased""" +77 68 dataset """kinships""" +77 68 model """conve""" +77 68 loss """softplus""" +77 68 regularizer """no""" +77 68 optimizer """adadelta""" +77 68 training_loop """owa""" +77 68 negative_sampler """basic""" +77 68 evaluator """rankbased""" +77 69 dataset """kinships""" +77 69 model """conve""" +77 69 loss """softplus""" +77 69 regularizer """no""" +77 69 optimizer """adadelta""" +77 69 training_loop """owa""" +77 69 negative_sampler """basic""" +77 69 evaluator """rankbased""" +77 70 dataset """kinships""" +77 70 model """conve""" +77 70 loss """softplus""" +77 70 regularizer """no""" +77 70 optimizer """adadelta""" +77 70 training_loop """owa""" +77 70 negative_sampler """basic""" +77 70 evaluator """rankbased""" +77 71 dataset """kinships""" +77 71 model """conve""" +77 71 loss """softplus""" +77 71 regularizer """no""" +77 71 optimizer """adadelta""" +77 71 training_loop """owa""" +77 71 negative_sampler """basic""" +77 71 evaluator """rankbased""" +78 1 model.output_channels 29.0 +78 1 model.input_dropout 0.4802285504863964 +78 1 model.output_dropout 0.2902746037172472 +78 1 model.feature_map_dropout 0.2798855240025296 +78 1 model.embedding_dim 2.0 +78 1 optimizer.lr 0.0013258035711026385 +78 1 training.batch_size 1.0 +78 1 training.label_smoothing 0.011703793473792428 +78 2 model.output_channels 63.0 +78 2 model.input_dropout 0.4189425829012818 +78 2 model.output_dropout 0.17431928707766886 +78 2 model.feature_map_dropout 0.24543505368520419 +78 2 model.embedding_dim 0.0 +78 2 optimizer.lr 0.0681817568630049 +78 2 training.batch_size 0.0 +78 2 training.label_smoothing 0.34571687414330354 +78 3 model.output_channels 38.0 +78 3 model.input_dropout 0.27353025378075435 +78 3 model.output_dropout 0.41448231878217273 +78 3 model.feature_map_dropout 0.4237879457322365 +78 3 model.embedding_dim 0.0 +78 3 optimizer.lr 0.0011655386070279039 +78 3 training.batch_size 1.0 +78 3 training.label_smoothing 0.06454462125703866 +78 4 model.output_channels 17.0 +78 4 model.input_dropout 0.2802807354594627 +78 4 model.output_dropout 0.14782948548759228 +78 4 model.feature_map_dropout 0.3487415582935995 +78 4 model.embedding_dim 2.0 +78 4 optimizer.lr 0.01290160110652494 +78 4 training.batch_size 2.0 +78 4 training.label_smoothing 0.09477496206188678 +78 5 model.output_channels 16.0 +78 5 model.input_dropout 0.3644064425130881 +78 5 model.output_dropout 0.22590199001519456 +78 5 model.feature_map_dropout 0.27118995323472983 +78 5 model.embedding_dim 1.0 +78 5 optimizer.lr 0.019694097947318467 +78 5 training.batch_size 1.0 +78 5 training.label_smoothing 0.0016697189460910245 +78 6 model.output_channels 64.0 +78 6 model.input_dropout 0.07074235289490116 +78 6 model.output_dropout 0.12901470657697706 +78 6 model.feature_map_dropout 0.15761093703309081 +78 6 model.embedding_dim 2.0 +78 6 optimizer.lr 0.0014783365766202093 +78 6 training.batch_size 1.0 +78 6 training.label_smoothing 0.0054224953586800425 +78 7 model.output_channels 35.0 +78 7 model.input_dropout 0.2743344391801525 +78 7 model.output_dropout 0.33610548210743313 +78 7 model.feature_map_dropout 0.17084518842314217 +78 7 model.embedding_dim 0.0 +78 7 optimizer.lr 0.0028625704105843326 +78 7 training.batch_size 1.0 +78 7 training.label_smoothing 0.02035583035507361 +78 8 model.output_channels 50.0 +78 8 model.input_dropout 0.3713056221985674 +78 8 model.output_dropout 0.1117698943844948 +78 8 model.feature_map_dropout 0.4656702330268432 +78 8 model.embedding_dim 0.0 +78 8 optimizer.lr 0.002174742751297946 +78 8 training.batch_size 1.0 +78 8 training.label_smoothing 0.015224421192969928 +78 9 model.output_channels 40.0 +78 9 model.input_dropout 0.4910522171304233 +78 9 model.output_dropout 0.35208194645177354 +78 9 model.feature_map_dropout 0.36152470582079455 +78 9 model.embedding_dim 2.0 +78 9 optimizer.lr 0.09837250677812116 +78 9 training.batch_size 0.0 +78 9 training.label_smoothing 0.015004054589561396 +78 10 model.output_channels 36.0 +78 10 model.input_dropout 0.03386490461207947 +78 10 model.output_dropout 0.004343176542847993 +78 10 model.feature_map_dropout 0.20378089500185753 +78 10 model.embedding_dim 0.0 +78 10 optimizer.lr 0.0551752677089965 +78 10 training.batch_size 2.0 +78 10 training.label_smoothing 0.5295435297975349 +78 11 model.output_channels 50.0 +78 11 model.input_dropout 0.12190572049291332 +78 11 model.output_dropout 0.17295905918673665 +78 11 model.feature_map_dropout 0.03583632436810036 +78 11 model.embedding_dim 1.0 +78 11 optimizer.lr 0.0036572154469431757 +78 11 training.batch_size 1.0 +78 11 training.label_smoothing 0.001625606227350874 +78 12 model.output_channels 56.0 +78 12 model.input_dropout 0.2635673981027854 +78 12 model.output_dropout 0.07452263218632188 +78 12 model.feature_map_dropout 0.15467504081824424 +78 12 model.embedding_dim 1.0 +78 12 optimizer.lr 0.0021484840489042843 +78 12 training.batch_size 2.0 +78 12 training.label_smoothing 0.00476418065418196 +78 13 model.output_channels 38.0 +78 13 model.input_dropout 0.24035849099665746 +78 13 model.output_dropout 0.3939154063256358 +78 13 model.feature_map_dropout 0.22299008222068223 +78 13 model.embedding_dim 1.0 +78 13 optimizer.lr 0.0032030714833466366 +78 13 training.batch_size 0.0 +78 13 training.label_smoothing 0.03670157216448789 +78 14 model.output_channels 25.0 +78 14 model.input_dropout 0.0006989383854549835 +78 14 model.output_dropout 0.17330397842225126 +78 14 model.feature_map_dropout 0.10391826561711542 +78 14 model.embedding_dim 2.0 +78 14 optimizer.lr 0.03345452428245668 +78 14 training.batch_size 1.0 +78 14 training.label_smoothing 0.19650793312853101 +78 15 model.output_channels 52.0 +78 15 model.input_dropout 0.4915057663030022 +78 15 model.output_dropout 0.17645256737606718 +78 15 model.feature_map_dropout 0.4393838931088536 +78 15 model.embedding_dim 1.0 +78 15 optimizer.lr 0.021118522300514905 +78 15 training.batch_size 2.0 +78 15 training.label_smoothing 0.3620535440512382 +78 16 model.output_channels 28.0 +78 16 model.input_dropout 0.43924513746239136 +78 16 model.output_dropout 0.06572209655727695 +78 16 model.feature_map_dropout 0.16495463019819184 +78 16 model.embedding_dim 2.0 +78 16 optimizer.lr 0.01915238062743164 +78 16 training.batch_size 0.0 +78 16 training.label_smoothing 0.028400082473721448 +78 17 model.output_channels 18.0 +78 17 model.input_dropout 0.04660020788680708 +78 17 model.output_dropout 0.44618045505431947 +78 17 model.feature_map_dropout 0.3151775685998464 +78 17 model.embedding_dim 2.0 +78 17 optimizer.lr 0.01408378768005671 +78 17 training.batch_size 0.0 +78 17 training.label_smoothing 0.04141226118384141 +78 18 model.output_channels 58.0 +78 18 model.input_dropout 0.25456886198320355 +78 18 model.output_dropout 0.11166709700415184 +78 18 model.feature_map_dropout 0.2558936011018831 +78 18 model.embedding_dim 2.0 +78 18 optimizer.lr 0.0069240278881833725 +78 18 training.batch_size 0.0 +78 18 training.label_smoothing 0.12652189637396216 +78 19 model.output_channels 56.0 +78 19 model.input_dropout 0.2746162510752513 +78 19 model.output_dropout 0.4654939388407822 +78 19 model.feature_map_dropout 0.3610080393395676 +78 19 model.embedding_dim 1.0 +78 19 optimizer.lr 0.006063046051816546 +78 19 training.batch_size 1.0 +78 19 training.label_smoothing 0.023826445623532602 +78 20 model.output_channels 58.0 +78 20 model.input_dropout 0.4113044945989846 +78 20 model.output_dropout 0.07004191342134558 +78 20 model.feature_map_dropout 0.3973155043866568 +78 20 model.embedding_dim 1.0 +78 20 optimizer.lr 0.009160583438839636 +78 20 training.batch_size 2.0 +78 20 training.label_smoothing 0.030161600000978107 +78 21 model.output_channels 50.0 +78 21 model.input_dropout 0.43483560131745 +78 21 model.output_dropout 0.05617632678780843 +78 21 model.feature_map_dropout 0.11112178849733378 +78 21 model.embedding_dim 2.0 +78 21 optimizer.lr 0.00829022976970303 +78 21 training.batch_size 0.0 +78 21 training.label_smoothing 0.020609203986604607 +78 22 model.output_channels 45.0 +78 22 model.input_dropout 0.2086860472203289 +78 22 model.output_dropout 0.3050713632267003 +78 22 model.feature_map_dropout 0.4302508721404415 +78 22 model.embedding_dim 0.0 +78 22 optimizer.lr 0.05865777500481804 +78 22 training.batch_size 0.0 +78 22 training.label_smoothing 0.0017779180317226834 +78 23 model.output_channels 38.0 +78 23 model.input_dropout 0.15410490124261134 +78 23 model.output_dropout 0.22725405339780125 +78 23 model.feature_map_dropout 0.334143228692829 +78 23 model.embedding_dim 0.0 +78 23 optimizer.lr 0.04122925054610061 +78 23 training.batch_size 2.0 +78 23 training.label_smoothing 0.040302162877565204 +78 24 model.output_channels 26.0 +78 24 model.input_dropout 0.06286451946518623 +78 24 model.output_dropout 0.27653101642752437 +78 24 model.feature_map_dropout 0.3702726728688835 +78 24 model.embedding_dim 2.0 +78 24 optimizer.lr 0.02779846517012576 +78 24 training.batch_size 2.0 +78 24 training.label_smoothing 0.034576415080690474 +78 25 model.output_channels 44.0 +78 25 model.input_dropout 0.33732464904338505 +78 25 model.output_dropout 0.23635986617188381 +78 25 model.feature_map_dropout 0.17965087870575874 +78 25 model.embedding_dim 0.0 +78 25 optimizer.lr 0.0034106174720647175 +78 25 training.batch_size 0.0 +78 25 training.label_smoothing 0.013100028653556741 +78 26 model.output_channels 33.0 +78 26 model.input_dropout 0.389599999552317 +78 26 model.output_dropout 0.13053523975457015 +78 26 model.feature_map_dropout 0.2729360029228443 +78 26 model.embedding_dim 1.0 +78 26 optimizer.lr 0.01963860366890056 +78 26 training.batch_size 0.0 +78 26 training.label_smoothing 0.19975790866739934 +78 27 model.output_channels 25.0 +78 27 model.input_dropout 0.07497758403920596 +78 27 model.output_dropout 0.2383842402717814 +78 27 model.feature_map_dropout 0.46445774509230653 +78 27 model.embedding_dim 0.0 +78 27 optimizer.lr 0.039903077769472925 +78 27 training.batch_size 0.0 +78 27 training.label_smoothing 0.10475007587103621 +78 28 model.output_channels 38.0 +78 28 model.input_dropout 0.03563348680154621 +78 28 model.output_dropout 0.2103551384597549 +78 28 model.feature_map_dropout 0.2968127839951306 +78 28 model.embedding_dim 1.0 +78 28 optimizer.lr 0.03082292473109263 +78 28 training.batch_size 2.0 +78 28 training.label_smoothing 0.13978674789979662 +78 29 model.output_channels 41.0 +78 29 model.input_dropout 0.4592856956645404 +78 29 model.output_dropout 0.226049187116305 +78 29 model.feature_map_dropout 0.4559957557418913 +78 29 model.embedding_dim 0.0 +78 29 optimizer.lr 0.08912640199052119 +78 29 training.batch_size 1.0 +78 29 training.label_smoothing 0.036652512608430575 +78 30 model.output_channels 39.0 +78 30 model.input_dropout 0.17097415687051032 +78 30 model.output_dropout 0.34268773966939275 +78 30 model.feature_map_dropout 0.23427148894155975 +78 30 model.embedding_dim 2.0 +78 30 optimizer.lr 0.02832894643544877 +78 30 training.batch_size 0.0 +78 30 training.label_smoothing 0.003908908358848907 +78 31 model.output_channels 46.0 +78 31 model.input_dropout 0.15265784082669764 +78 31 model.output_dropout 0.3979429712678747 +78 31 model.feature_map_dropout 0.09446887106567653 +78 31 model.embedding_dim 1.0 +78 31 optimizer.lr 0.007777610783971942 +78 31 training.batch_size 0.0 +78 31 training.label_smoothing 0.05797238204021824 +78 32 model.output_channels 61.0 +78 32 model.input_dropout 0.040874837577484946 +78 32 model.output_dropout 0.07130276658414747 +78 32 model.feature_map_dropout 0.31136293188594977 +78 32 model.embedding_dim 2.0 +78 32 optimizer.lr 0.04614673260108866 +78 32 training.batch_size 1.0 +78 32 training.label_smoothing 0.05004845356642603 +78 33 model.output_channels 43.0 +78 33 model.input_dropout 0.19061405754968208 +78 33 model.output_dropout 0.18273053057961713 +78 33 model.feature_map_dropout 0.193703551450224 +78 33 model.embedding_dim 2.0 +78 33 optimizer.lr 0.0138214097460308 +78 33 training.batch_size 0.0 +78 33 training.label_smoothing 0.009738808865158403 +78 34 model.output_channels 39.0 +78 34 model.input_dropout 0.15424668639881012 +78 34 model.output_dropout 0.09529338080199345 +78 34 model.feature_map_dropout 0.2561174018286175 +78 34 model.embedding_dim 2.0 +78 34 optimizer.lr 0.002323695756979264 +78 34 training.batch_size 1.0 +78 34 training.label_smoothing 0.4858737350298565 +78 35 model.output_channels 64.0 +78 35 model.input_dropout 0.4495373115032078 +78 35 model.output_dropout 0.17499197106440484 +78 35 model.feature_map_dropout 0.47955945390016236 +78 35 model.embedding_dim 1.0 +78 35 optimizer.lr 0.006027538921831613 +78 35 training.batch_size 1.0 +78 35 training.label_smoothing 0.26553360504979406 +78 36 model.output_channels 20.0 +78 36 model.input_dropout 0.31786923963814334 +78 36 model.output_dropout 0.0008796340072421138 +78 36 model.feature_map_dropout 0.4410476302773226 +78 36 model.embedding_dim 2.0 +78 36 optimizer.lr 0.025055890144650886 +78 36 training.batch_size 1.0 +78 36 training.label_smoothing 0.01444687258502307 +78 37 model.output_channels 42.0 +78 37 model.input_dropout 0.15553887395309707 +78 37 model.output_dropout 0.3962707122522745 +78 37 model.feature_map_dropout 0.32682475173099373 +78 37 model.embedding_dim 0.0 +78 37 optimizer.lr 0.09821327827557108 +78 37 training.batch_size 2.0 +78 37 training.label_smoothing 0.6563347351532614 +78 38 model.output_channels 52.0 +78 38 model.input_dropout 0.44075121254708866 +78 38 model.output_dropout 0.3405911177597112 +78 38 model.feature_map_dropout 0.21103285631220547 +78 38 model.embedding_dim 1.0 +78 38 optimizer.lr 0.0022295162007943013 +78 38 training.batch_size 2.0 +78 38 training.label_smoothing 0.021432376141968617 +78 39 model.output_channels 39.0 +78 39 model.input_dropout 0.4149141009309594 +78 39 model.output_dropout 0.17858399387579943 +78 39 model.feature_map_dropout 0.4089981320859199 +78 39 model.embedding_dim 0.0 +78 39 optimizer.lr 0.0017160149722312795 +78 39 training.batch_size 2.0 +78 39 training.label_smoothing 0.004954796821966043 +78 40 model.output_channels 30.0 +78 40 model.input_dropout 0.3644291703937168 +78 40 model.output_dropout 0.009037297942051714 +78 40 model.feature_map_dropout 0.03330718242051417 +78 40 model.embedding_dim 0.0 +78 40 optimizer.lr 0.09964722854118939 +78 40 training.batch_size 2.0 +78 40 training.label_smoothing 0.016756645290746722 +78 41 model.output_channels 55.0 +78 41 model.input_dropout 0.16266561877839097 +78 41 model.output_dropout 0.014241012571539424 +78 41 model.feature_map_dropout 0.4454602821680661 +78 41 model.embedding_dim 2.0 +78 41 optimizer.lr 0.007898222929670342 +78 41 training.batch_size 0.0 +78 41 training.label_smoothing 0.0010770336841503668 +78 42 model.output_channels 25.0 +78 42 model.input_dropout 0.2330947619827533 +78 42 model.output_dropout 0.04000947076319883 +78 42 model.feature_map_dropout 0.007727115304890231 +78 42 model.embedding_dim 2.0 +78 42 optimizer.lr 0.003481353952060334 +78 42 training.batch_size 1.0 +78 42 training.label_smoothing 0.002952966367120208 +78 43 model.output_channels 64.0 +78 43 model.input_dropout 0.007446259609108941 +78 43 model.output_dropout 0.4935925125988075 +78 43 model.feature_map_dropout 0.4248065439888714 +78 43 model.embedding_dim 1.0 +78 43 optimizer.lr 0.00460283488290993 +78 43 training.batch_size 2.0 +78 43 training.label_smoothing 0.8975661886241828 +78 44 model.output_channels 45.0 +78 44 model.input_dropout 0.2736492148817651 +78 44 model.output_dropout 0.052216585259850734 +78 44 model.feature_map_dropout 0.34397073154115354 +78 44 model.embedding_dim 1.0 +78 44 optimizer.lr 0.0036584654748958124 +78 44 training.batch_size 0.0 +78 44 training.label_smoothing 0.033396848024121246 +78 45 model.output_channels 56.0 +78 45 model.input_dropout 0.2591639715079775 +78 45 model.output_dropout 0.14743913729284897 +78 45 model.feature_map_dropout 0.27470662542006313 +78 45 model.embedding_dim 1.0 +78 45 optimizer.lr 0.017591959732895804 +78 45 training.batch_size 2.0 +78 45 training.label_smoothing 0.01040075128986456 +78 46 model.output_channels 51.0 +78 46 model.input_dropout 0.09818929173065488 +78 46 model.output_dropout 0.4599435165267097 +78 46 model.feature_map_dropout 0.33457730055549556 +78 46 model.embedding_dim 1.0 +78 46 optimizer.lr 0.0063238482296791056 +78 46 training.batch_size 0.0 +78 46 training.label_smoothing 0.012763878505047874 +78 47 model.output_channels 56.0 +78 47 model.input_dropout 0.14435137329625003 +78 47 model.output_dropout 0.3996406635636675 +78 47 model.feature_map_dropout 0.024285841780585626 +78 47 model.embedding_dim 1.0 +78 47 optimizer.lr 0.00811331259294016 +78 47 training.batch_size 2.0 +78 47 training.label_smoothing 0.01925883320478545 +78 48 model.output_channels 40.0 +78 48 model.input_dropout 0.29675640269033854 +78 48 model.output_dropout 0.07363023782110545 +78 48 model.feature_map_dropout 0.09525870259441344 +78 48 model.embedding_dim 2.0 +78 48 optimizer.lr 0.011912416905710307 +78 48 training.batch_size 2.0 +78 48 training.label_smoothing 0.20593037069146436 +78 49 model.output_channels 33.0 +78 49 model.input_dropout 0.3015631955635973 +78 49 model.output_dropout 0.1510639708988451 +78 49 model.feature_map_dropout 0.2573671881777711 +78 49 model.embedding_dim 2.0 +78 49 optimizer.lr 0.08773717450740685 +78 49 training.batch_size 0.0 +78 49 training.label_smoothing 0.02696287145221053 +78 50 model.output_channels 62.0 +78 50 model.input_dropout 0.2191707771526959 +78 50 model.output_dropout 0.3275866900531704 +78 50 model.feature_map_dropout 0.12088143052458472 +78 50 model.embedding_dim 2.0 +78 50 optimizer.lr 0.03009453353653111 +78 50 training.batch_size 0.0 +78 50 training.label_smoothing 0.10010791054153158 +78 51 model.output_channels 39.0 +78 51 model.input_dropout 0.02729886671465398 +78 51 model.output_dropout 0.09801852880551104 +78 51 model.feature_map_dropout 0.4754788539656701 +78 51 model.embedding_dim 2.0 +78 51 optimizer.lr 0.03319387082250967 +78 51 training.batch_size 2.0 +78 51 training.label_smoothing 0.021580007586319807 +78 52 model.output_channels 44.0 +78 52 model.input_dropout 0.2934447290872237 +78 52 model.output_dropout 0.14179918861542767 +78 52 model.feature_map_dropout 0.06160206256051276 +78 52 model.embedding_dim 0.0 +78 52 optimizer.lr 0.08617496942141424 +78 52 training.batch_size 0.0 +78 52 training.label_smoothing 0.0033816323982570314 +78 53 model.output_channels 18.0 +78 53 model.input_dropout 0.49137182762038306 +78 53 model.output_dropout 0.19444175248930506 +78 53 model.feature_map_dropout 0.13413994920626338 +78 53 model.embedding_dim 0.0 +78 53 optimizer.lr 0.007741864846574535 +78 53 training.batch_size 0.0 +78 53 training.label_smoothing 0.002580888384681857 +78 54 model.output_channels 17.0 +78 54 model.input_dropout 0.1327937755940689 +78 54 model.output_dropout 0.405072349845115 +78 54 model.feature_map_dropout 0.18558640888647687 +78 54 model.embedding_dim 0.0 +78 54 optimizer.lr 0.0018103240798810081 +78 54 training.batch_size 2.0 +78 54 training.label_smoothing 0.03630217680015641 +78 55 model.output_channels 17.0 +78 55 model.input_dropout 0.17526721073027463 +78 55 model.output_dropout 0.18797730628195491 +78 55 model.feature_map_dropout 0.44728585064380033 +78 55 model.embedding_dim 2.0 +78 55 optimizer.lr 0.0013089711021519923 +78 55 training.batch_size 2.0 +78 55 training.label_smoothing 0.0035304378382664927 +78 56 model.output_channels 62.0 +78 56 model.input_dropout 0.09026962077708339 +78 56 model.output_dropout 0.14687081948183628 +78 56 model.feature_map_dropout 0.2978203304895261 +78 56 model.embedding_dim 2.0 +78 56 optimizer.lr 0.0020794539795641015 +78 56 training.batch_size 0.0 +78 56 training.label_smoothing 0.4543002256455136 +78 57 model.output_channels 57.0 +78 57 model.input_dropout 0.4441740139433662 +78 57 model.output_dropout 0.25563978619443556 +78 57 model.feature_map_dropout 0.38915239951608277 +78 57 model.embedding_dim 1.0 +78 57 optimizer.lr 0.0034265500853722626 +78 57 training.batch_size 0.0 +78 57 training.label_smoothing 0.002428269395904336 +78 58 model.output_channels 38.0 +78 58 model.input_dropout 0.3487731584050772 +78 58 model.output_dropout 0.0027413920594402397 +78 58 model.feature_map_dropout 0.40638580473286984 +78 58 model.embedding_dim 1.0 +78 58 optimizer.lr 0.036970897883213394 +78 58 training.batch_size 1.0 +78 58 training.label_smoothing 0.009734418455300818 +78 59 model.output_channels 44.0 +78 59 model.input_dropout 0.3033908654113141 +78 59 model.output_dropout 0.4794864727305418 +78 59 model.feature_map_dropout 0.20045606209203393 +78 59 model.embedding_dim 1.0 +78 59 optimizer.lr 0.07140991538566184 +78 59 training.batch_size 2.0 +78 59 training.label_smoothing 0.3713392526351075 +78 60 model.output_channels 26.0 +78 60 model.input_dropout 0.18019534803055054 +78 60 model.output_dropout 0.46332432257781403 +78 60 model.feature_map_dropout 0.3295163499712781 +78 60 model.embedding_dim 1.0 +78 60 optimizer.lr 0.0038303192025967614 +78 60 training.batch_size 0.0 +78 60 training.label_smoothing 0.9161267711882521 +78 61 model.output_channels 62.0 +78 61 model.input_dropout 0.1936978666158457 +78 61 model.output_dropout 0.39441456735499275 +78 61 model.feature_map_dropout 0.3070098411569426 +78 61 model.embedding_dim 2.0 +78 61 optimizer.lr 0.024641086241058277 +78 61 training.batch_size 0.0 +78 61 training.label_smoothing 0.19191049608726318 +78 62 model.output_channels 40.0 +78 62 model.input_dropout 0.2038484038930169 +78 62 model.output_dropout 0.28013628587290107 +78 62 model.feature_map_dropout 0.2797876343605951 +78 62 model.embedding_dim 0.0 +78 62 optimizer.lr 0.009010374905528789 +78 62 training.batch_size 2.0 +78 62 training.label_smoothing 0.1749605199019582 +78 63 model.output_channels 45.0 +78 63 model.input_dropout 0.43211917393571875 +78 63 model.output_dropout 0.40828128734189334 +78 63 model.feature_map_dropout 0.15993647361155755 +78 63 model.embedding_dim 2.0 +78 63 optimizer.lr 0.031981088897329174 +78 63 training.batch_size 1.0 +78 63 training.label_smoothing 0.017641426851891455 +78 64 model.output_channels 63.0 +78 64 model.input_dropout 0.15015470779469225 +78 64 model.output_dropout 0.487034087812847 +78 64 model.feature_map_dropout 0.31856737574280275 +78 64 model.embedding_dim 2.0 +78 64 optimizer.lr 0.029598378235948967 +78 64 training.batch_size 0.0 +78 64 training.label_smoothing 0.02071534601473246 +78 65 model.output_channels 46.0 +78 65 model.input_dropout 0.13437206071393665 +78 65 model.output_dropout 0.4636665146152169 +78 65 model.feature_map_dropout 0.4919088776666393 +78 65 model.embedding_dim 1.0 +78 65 optimizer.lr 0.040749862136867265 +78 65 training.batch_size 0.0 +78 65 training.label_smoothing 0.7463251198524886 +78 66 model.output_channels 39.0 +78 66 model.input_dropout 0.4540316624657498 +78 66 model.output_dropout 0.1887009668472855 +78 66 model.feature_map_dropout 0.4035063936526871 +78 66 model.embedding_dim 2.0 +78 66 optimizer.lr 0.0010510641190020489 +78 66 training.batch_size 2.0 +78 66 training.label_smoothing 0.08835091824123208 +78 67 model.output_channels 41.0 +78 67 model.input_dropout 0.08326745126118934 +78 67 model.output_dropout 0.22966736644434138 +78 67 model.feature_map_dropout 0.30989789437720117 +78 67 model.embedding_dim 1.0 +78 67 optimizer.lr 0.0014754162867080347 +78 67 training.batch_size 0.0 +78 67 training.label_smoothing 0.6170590163644015 +78 68 model.output_channels 57.0 +78 68 model.input_dropout 0.17130958557472709 +78 68 model.output_dropout 0.047643145961678846 +78 68 model.feature_map_dropout 0.48095499372866357 +78 68 model.embedding_dim 0.0 +78 68 optimizer.lr 0.07125634067662091 +78 68 training.batch_size 1.0 +78 68 training.label_smoothing 0.0010900156295397415 +78 69 model.output_channels 33.0 +78 69 model.input_dropout 0.12968424622267782 +78 69 model.output_dropout 0.19600942636991447 +78 69 model.feature_map_dropout 0.16416723505161346 +78 69 model.embedding_dim 2.0 +78 69 optimizer.lr 0.0011977794724531469 +78 69 training.batch_size 0.0 +78 69 training.label_smoothing 0.047243366356771045 +78 70 model.output_channels 54.0 +78 70 model.input_dropout 0.30959256487373765 +78 70 model.output_dropout 0.2150700779764131 +78 70 model.feature_map_dropout 0.03695347299174084 +78 70 model.embedding_dim 1.0 +78 70 optimizer.lr 0.007114368251588657 +78 70 training.batch_size 1.0 +78 70 training.label_smoothing 0.21216831245882364 +78 71 model.output_channels 54.0 +78 71 model.input_dropout 0.17562822995820576 +78 71 model.output_dropout 0.4502313116757241 +78 71 model.feature_map_dropout 0.2805778612629356 +78 71 model.embedding_dim 0.0 +78 71 optimizer.lr 0.031948048184403804 +78 71 training.batch_size 2.0 +78 71 training.label_smoothing 0.0014985056628195105 +78 72 model.output_channels 31.0 +78 72 model.input_dropout 0.193209489644582 +78 72 model.output_dropout 0.48494473389469367 +78 72 model.feature_map_dropout 0.36860828840884813 +78 72 model.embedding_dim 2.0 +78 72 optimizer.lr 0.02100843135254723 +78 72 training.batch_size 0.0 +78 72 training.label_smoothing 0.004463000942531158 +78 73 model.output_channels 58.0 +78 73 model.input_dropout 0.03720675833919973 +78 73 model.output_dropout 0.03814668797821358 +78 73 model.feature_map_dropout 0.44742933261232715 +78 73 model.embedding_dim 1.0 +78 73 optimizer.lr 0.04693953139244328 +78 73 training.batch_size 2.0 +78 73 training.label_smoothing 0.00433082482366662 +78 74 model.output_channels 54.0 +78 74 model.input_dropout 0.30177245497240324 +78 74 model.output_dropout 0.36506165507003346 +78 74 model.feature_map_dropout 0.020123322548327383 +78 74 model.embedding_dim 1.0 +78 74 optimizer.lr 0.011939655896834182 +78 74 training.batch_size 0.0 +78 74 training.label_smoothing 0.004723145985128724 +78 75 model.output_channels 47.0 +78 75 model.input_dropout 0.262064869506703 +78 75 model.output_dropout 0.48473961255622283 +78 75 model.feature_map_dropout 0.11900272323456046 +78 75 model.embedding_dim 1.0 +78 75 optimizer.lr 0.05040536880327026 +78 75 training.batch_size 1.0 +78 75 training.label_smoothing 0.05880093403633646 +78 76 model.output_channels 41.0 +78 76 model.input_dropout 0.48322696155432854 +78 76 model.output_dropout 0.4696261764178969 +78 76 model.feature_map_dropout 0.22269037454982243 +78 76 model.embedding_dim 1.0 +78 76 optimizer.lr 0.005289178904064468 +78 76 training.batch_size 1.0 +78 76 training.label_smoothing 0.0044569628536692205 +78 77 model.output_channels 39.0 +78 77 model.input_dropout 0.1430713843584981 +78 77 model.output_dropout 0.13331100670704987 +78 77 model.feature_map_dropout 0.3795383043317083 +78 77 model.embedding_dim 2.0 +78 77 optimizer.lr 0.0016069499187066425 +78 77 training.batch_size 1.0 +78 77 training.label_smoothing 0.0038031583001946053 +78 78 model.output_channels 48.0 +78 78 model.input_dropout 0.2995571354554128 +78 78 model.output_dropout 0.09080052121945548 +78 78 model.feature_map_dropout 0.37012452928923584 +78 78 model.embedding_dim 2.0 +78 78 optimizer.lr 0.01023536848177458 +78 78 training.batch_size 1.0 +78 78 training.label_smoothing 0.0793112170823181 +78 79 model.output_channels 35.0 +78 79 model.input_dropout 0.4148040684403315 +78 79 model.output_dropout 0.3870788056366322 +78 79 model.feature_map_dropout 0.22755561050054568 +78 79 model.embedding_dim 0.0 +78 79 optimizer.lr 0.02827337390063627 +78 79 training.batch_size 2.0 +78 79 training.label_smoothing 0.062396887867439764 +78 80 model.output_channels 45.0 +78 80 model.input_dropout 0.11194714239581988 +78 80 model.output_dropout 0.061960297869334435 +78 80 model.feature_map_dropout 0.23054149100788346 +78 80 model.embedding_dim 0.0 +78 80 optimizer.lr 0.0011310866308318888 +78 80 training.batch_size 0.0 +78 80 training.label_smoothing 0.004389763593916153 +78 81 model.output_channels 53.0 +78 81 model.input_dropout 0.33987038531737385 +78 81 model.output_dropout 0.3303635296040148 +78 81 model.feature_map_dropout 0.2944288279345909 +78 81 model.embedding_dim 2.0 +78 81 optimizer.lr 0.0014962677789420583 +78 81 training.batch_size 0.0 +78 81 training.label_smoothing 0.014357754775652322 +78 82 model.output_channels 33.0 +78 82 model.input_dropout 0.2171457505380734 +78 82 model.output_dropout 0.42761798071819745 +78 82 model.feature_map_dropout 0.1022196083456901 +78 82 model.embedding_dim 2.0 +78 82 optimizer.lr 0.0012089966842565843 +78 82 training.batch_size 0.0 +78 82 training.label_smoothing 0.09407471424638984 +78 83 model.output_channels 16.0 +78 83 model.input_dropout 0.4409405855960832 +78 83 model.output_dropout 0.13227478669456494 +78 83 model.feature_map_dropout 0.13009188553654039 +78 83 model.embedding_dim 2.0 +78 83 optimizer.lr 0.00596181657322495 +78 83 training.batch_size 0.0 +78 83 training.label_smoothing 0.00489084828967044 +78 84 model.output_channels 27.0 +78 84 model.input_dropout 0.042960516940945714 +78 84 model.output_dropout 0.40750964007731505 +78 84 model.feature_map_dropout 0.011535532350674782 +78 84 model.embedding_dim 2.0 +78 84 optimizer.lr 0.01432303914519248 +78 84 training.batch_size 2.0 +78 84 training.label_smoothing 0.45938826493428037 +78 85 model.output_channels 51.0 +78 85 model.input_dropout 0.20196658439598852 +78 85 model.output_dropout 0.27287658946345883 +78 85 model.feature_map_dropout 0.13664580917550034 +78 85 model.embedding_dim 1.0 +78 85 optimizer.lr 0.014209438010386445 +78 85 training.batch_size 0.0 +78 85 training.label_smoothing 0.6409199301964368 +78 86 model.output_channels 58.0 +78 86 model.input_dropout 0.23480336046700062 +78 86 model.output_dropout 0.4422915349090423 +78 86 model.feature_map_dropout 0.2304580196991381 +78 86 model.embedding_dim 0.0 +78 86 optimizer.lr 0.00232762496850451 +78 86 training.batch_size 2.0 +78 86 training.label_smoothing 0.0030205759066013788 +78 87 model.output_channels 54.0 +78 87 model.input_dropout 0.21274080868547063 +78 87 model.output_dropout 0.3458681308682281 +78 87 model.feature_map_dropout 0.4261288359398752 +78 87 model.embedding_dim 2.0 +78 87 optimizer.lr 0.007340580011134119 +78 87 training.batch_size 2.0 +78 87 training.label_smoothing 0.3147557877153369 +78 88 model.output_channels 47.0 +78 88 model.input_dropout 0.049588719896554 +78 88 model.output_dropout 0.3258018187345059 +78 88 model.feature_map_dropout 0.41873421707413744 +78 88 model.embedding_dim 0.0 +78 88 optimizer.lr 0.023681301711550147 +78 88 training.batch_size 0.0 +78 88 training.label_smoothing 0.004352936518503779 +78 89 model.output_channels 32.0 +78 89 model.input_dropout 0.33747588799069567 +78 89 model.output_dropout 0.01431029240239201 +78 89 model.feature_map_dropout 0.08945140248457012 +78 89 model.embedding_dim 1.0 +78 89 optimizer.lr 0.06156372580227208 +78 89 training.batch_size 0.0 +78 89 training.label_smoothing 0.0025068853087486607 +78 90 model.output_channels 60.0 +78 90 model.input_dropout 0.42023234721396313 +78 90 model.output_dropout 0.01653874022564361 +78 90 model.feature_map_dropout 0.18540866705789139 +78 90 model.embedding_dim 1.0 +78 90 optimizer.lr 0.06232321792566169 +78 90 training.batch_size 0.0 +78 90 training.label_smoothing 0.1290419158121033 +78 91 model.output_channels 16.0 +78 91 model.input_dropout 0.0055097078026460355 +78 91 model.output_dropout 0.48142526095070803 +78 91 model.feature_map_dropout 0.42540160282534517 +78 91 model.embedding_dim 0.0 +78 91 optimizer.lr 0.003998661468703448 +78 91 training.batch_size 1.0 +78 91 training.label_smoothing 0.10127609651323312 +78 92 model.output_channels 39.0 +78 92 model.input_dropout 0.10396837056117336 +78 92 model.output_dropout 0.21957240685368928 +78 92 model.feature_map_dropout 0.03302653615964135 +78 92 model.embedding_dim 1.0 +78 92 optimizer.lr 0.004545224608699516 +78 92 training.batch_size 2.0 +78 92 training.label_smoothing 0.5702972636921008 +78 93 model.output_channels 26.0 +78 93 model.input_dropout 0.040983274415994086 +78 93 model.output_dropout 0.28287971793949884 +78 93 model.feature_map_dropout 0.4750774755214211 +78 93 model.embedding_dim 0.0 +78 93 optimizer.lr 0.008126058564269144 +78 93 training.batch_size 0.0 +78 93 training.label_smoothing 0.020629322491019123 +78 94 model.output_channels 60.0 +78 94 model.input_dropout 0.11581424949547053 +78 94 model.output_dropout 0.4507413071806997 +78 94 model.feature_map_dropout 0.24706819636328559 +78 94 model.embedding_dim 1.0 +78 94 optimizer.lr 0.01825099527617392 +78 94 training.batch_size 0.0 +78 94 training.label_smoothing 0.030955582692030144 +78 95 model.output_channels 62.0 +78 95 model.input_dropout 0.25982460096560583 +78 95 model.output_dropout 0.22986943171836516 +78 95 model.feature_map_dropout 0.46410013186441823 +78 95 model.embedding_dim 2.0 +78 95 optimizer.lr 0.01067279996211332 +78 95 training.batch_size 0.0 +78 95 training.label_smoothing 0.0018352112511962114 +78 96 model.output_channels 39.0 +78 96 model.input_dropout 0.24446409559445054 +78 96 model.output_dropout 0.48866788525083665 +78 96 model.feature_map_dropout 0.3393731162931345 +78 96 model.embedding_dim 0.0 +78 96 optimizer.lr 0.02460156386618434 +78 96 training.batch_size 1.0 +78 96 training.label_smoothing 0.001080521763048021 +78 97 model.output_channels 40.0 +78 97 model.input_dropout 0.4070041648939242 +78 97 model.output_dropout 0.47546274866876076 +78 97 model.feature_map_dropout 0.3335976745056121 +78 97 model.embedding_dim 0.0 +78 97 optimizer.lr 0.010793903652306609 +78 97 training.batch_size 2.0 +78 97 training.label_smoothing 0.017009467773929983 +78 98 model.output_channels 25.0 +78 98 model.input_dropout 0.3263655600217785 +78 98 model.output_dropout 0.07190590724317486 +78 98 model.feature_map_dropout 0.2842400625968969 +78 98 model.embedding_dim 1.0 +78 98 optimizer.lr 0.00137184403871377 +78 98 training.batch_size 0.0 +78 98 training.label_smoothing 0.007968351433634628 +78 99 model.output_channels 40.0 +78 99 model.input_dropout 0.3941263039058917 +78 99 model.output_dropout 0.13544521656037029 +78 99 model.feature_map_dropout 0.3436615901589404 +78 99 model.embedding_dim 0.0 +78 99 optimizer.lr 0.054517455053778745 +78 99 training.batch_size 2.0 +78 99 training.label_smoothing 0.022944540138335773 +78 100 model.output_channels 64.0 +78 100 model.input_dropout 0.22496648255276452 +78 100 model.output_dropout 0.4063570537630157 +78 100 model.feature_map_dropout 0.18440175663264546 +78 100 model.embedding_dim 1.0 +78 100 optimizer.lr 0.004947087725133102 +78 100 training.batch_size 1.0 +78 100 training.label_smoothing 0.003435623241935345 +78 1 dataset """kinships""" +78 1 model """conve""" +78 1 loss """bceaftersigmoid""" +78 1 regularizer """no""" +78 1 optimizer """adam""" +78 1 training_loop """lcwa""" +78 1 evaluator """rankbased""" +78 2 dataset """kinships""" +78 2 model """conve""" +78 2 loss """bceaftersigmoid""" +78 2 regularizer """no""" +78 2 optimizer """adam""" +78 2 training_loop """lcwa""" +78 2 evaluator """rankbased""" +78 3 dataset """kinships""" +78 3 model """conve""" +78 3 loss """bceaftersigmoid""" +78 3 regularizer """no""" +78 3 optimizer """adam""" +78 3 training_loop """lcwa""" +78 3 evaluator """rankbased""" +78 4 dataset """kinships""" +78 4 model """conve""" +78 4 loss """bceaftersigmoid""" +78 4 regularizer """no""" +78 4 optimizer """adam""" +78 4 training_loop """lcwa""" +78 4 evaluator """rankbased""" +78 5 dataset """kinships""" +78 5 model """conve""" +78 5 loss """bceaftersigmoid""" +78 5 regularizer """no""" +78 5 optimizer """adam""" +78 5 training_loop """lcwa""" +78 5 evaluator """rankbased""" +78 6 dataset """kinships""" +78 6 model """conve""" +78 6 loss """bceaftersigmoid""" +78 6 regularizer """no""" +78 6 optimizer """adam""" +78 6 training_loop """lcwa""" +78 6 evaluator """rankbased""" +78 7 dataset """kinships""" +78 7 model """conve""" +78 7 loss """bceaftersigmoid""" +78 7 regularizer """no""" +78 7 optimizer """adam""" +78 7 training_loop """lcwa""" +78 7 evaluator """rankbased""" +78 8 dataset """kinships""" +78 8 model """conve""" +78 8 loss """bceaftersigmoid""" +78 8 regularizer """no""" +78 8 optimizer """adam""" +78 8 training_loop """lcwa""" +78 8 evaluator """rankbased""" +78 9 dataset """kinships""" +78 9 model """conve""" +78 9 loss """bceaftersigmoid""" +78 9 regularizer """no""" +78 9 optimizer """adam""" +78 9 training_loop """lcwa""" +78 9 evaluator """rankbased""" +78 10 dataset """kinships""" +78 10 model """conve""" +78 10 loss """bceaftersigmoid""" +78 10 regularizer """no""" +78 10 optimizer """adam""" +78 10 training_loop """lcwa""" +78 10 evaluator """rankbased""" +78 11 dataset """kinships""" +78 11 model """conve""" +78 11 loss """bceaftersigmoid""" +78 11 regularizer """no""" +78 11 optimizer """adam""" +78 11 training_loop """lcwa""" +78 11 evaluator """rankbased""" +78 12 dataset """kinships""" +78 12 model """conve""" +78 12 loss """bceaftersigmoid""" +78 12 regularizer """no""" +78 12 optimizer """adam""" +78 12 training_loop """lcwa""" +78 12 evaluator """rankbased""" +78 13 dataset """kinships""" +78 13 model """conve""" +78 13 loss """bceaftersigmoid""" +78 13 regularizer """no""" +78 13 optimizer """adam""" +78 13 training_loop """lcwa""" +78 13 evaluator """rankbased""" +78 14 dataset """kinships""" +78 14 model """conve""" +78 14 loss """bceaftersigmoid""" +78 14 regularizer """no""" +78 14 optimizer """adam""" +78 14 training_loop """lcwa""" +78 14 evaluator """rankbased""" +78 15 dataset """kinships""" +78 15 model """conve""" +78 15 loss """bceaftersigmoid""" +78 15 regularizer """no""" +78 15 optimizer """adam""" +78 15 training_loop """lcwa""" +78 15 evaluator """rankbased""" +78 16 dataset """kinships""" +78 16 model """conve""" +78 16 loss """bceaftersigmoid""" +78 16 regularizer """no""" +78 16 optimizer """adam""" +78 16 training_loop """lcwa""" +78 16 evaluator """rankbased""" +78 17 dataset """kinships""" +78 17 model """conve""" +78 17 loss """bceaftersigmoid""" +78 17 regularizer """no""" +78 17 optimizer """adam""" +78 17 training_loop """lcwa""" +78 17 evaluator """rankbased""" +78 18 dataset """kinships""" +78 18 model """conve""" +78 18 loss """bceaftersigmoid""" +78 18 regularizer """no""" +78 18 optimizer """adam""" +78 18 training_loop """lcwa""" +78 18 evaluator """rankbased""" +78 19 dataset """kinships""" +78 19 model """conve""" +78 19 loss """bceaftersigmoid""" +78 19 regularizer """no""" +78 19 optimizer """adam""" +78 19 training_loop """lcwa""" +78 19 evaluator """rankbased""" +78 20 dataset """kinships""" +78 20 model """conve""" +78 20 loss """bceaftersigmoid""" +78 20 regularizer """no""" +78 20 optimizer """adam""" +78 20 training_loop """lcwa""" +78 20 evaluator """rankbased""" +78 21 dataset """kinships""" +78 21 model """conve""" +78 21 loss """bceaftersigmoid""" +78 21 regularizer """no""" +78 21 optimizer """adam""" +78 21 training_loop """lcwa""" +78 21 evaluator """rankbased""" +78 22 dataset """kinships""" +78 22 model """conve""" +78 22 loss """bceaftersigmoid""" +78 22 regularizer """no""" +78 22 optimizer """adam""" +78 22 training_loop """lcwa""" +78 22 evaluator """rankbased""" +78 23 dataset """kinships""" +78 23 model """conve""" +78 23 loss """bceaftersigmoid""" +78 23 regularizer """no""" +78 23 optimizer """adam""" +78 23 training_loop """lcwa""" +78 23 evaluator """rankbased""" +78 24 dataset """kinships""" +78 24 model """conve""" +78 24 loss """bceaftersigmoid""" +78 24 regularizer """no""" +78 24 optimizer """adam""" +78 24 training_loop """lcwa""" +78 24 evaluator """rankbased""" +78 25 dataset """kinships""" +78 25 model """conve""" +78 25 loss """bceaftersigmoid""" +78 25 regularizer """no""" +78 25 optimizer """adam""" +78 25 training_loop """lcwa""" +78 25 evaluator """rankbased""" +78 26 dataset """kinships""" +78 26 model """conve""" +78 26 loss """bceaftersigmoid""" +78 26 regularizer """no""" +78 26 optimizer """adam""" +78 26 training_loop """lcwa""" +78 26 evaluator """rankbased""" +78 27 dataset """kinships""" +78 27 model """conve""" +78 27 loss """bceaftersigmoid""" +78 27 regularizer """no""" +78 27 optimizer """adam""" +78 27 training_loop """lcwa""" +78 27 evaluator """rankbased""" +78 28 dataset """kinships""" +78 28 model """conve""" +78 28 loss """bceaftersigmoid""" +78 28 regularizer """no""" +78 28 optimizer """adam""" +78 28 training_loop """lcwa""" +78 28 evaluator """rankbased""" +78 29 dataset """kinships""" +78 29 model """conve""" +78 29 loss """bceaftersigmoid""" +78 29 regularizer """no""" +78 29 optimizer """adam""" +78 29 training_loop """lcwa""" +78 29 evaluator """rankbased""" +78 30 dataset """kinships""" +78 30 model """conve""" +78 30 loss """bceaftersigmoid""" +78 30 regularizer """no""" +78 30 optimizer """adam""" +78 30 training_loop """lcwa""" +78 30 evaluator """rankbased""" +78 31 dataset """kinships""" +78 31 model """conve""" +78 31 loss """bceaftersigmoid""" +78 31 regularizer """no""" +78 31 optimizer """adam""" +78 31 training_loop """lcwa""" +78 31 evaluator """rankbased""" +78 32 dataset """kinships""" +78 32 model """conve""" +78 32 loss """bceaftersigmoid""" +78 32 regularizer """no""" +78 32 optimizer """adam""" +78 32 training_loop """lcwa""" +78 32 evaluator """rankbased""" +78 33 dataset """kinships""" +78 33 model """conve""" +78 33 loss """bceaftersigmoid""" +78 33 regularizer """no""" +78 33 optimizer """adam""" +78 33 training_loop """lcwa""" +78 33 evaluator """rankbased""" +78 34 dataset """kinships""" +78 34 model """conve""" +78 34 loss """bceaftersigmoid""" +78 34 regularizer """no""" +78 34 optimizer """adam""" +78 34 training_loop """lcwa""" +78 34 evaluator """rankbased""" +78 35 dataset """kinships""" +78 35 model """conve""" +78 35 loss """bceaftersigmoid""" +78 35 regularizer """no""" +78 35 optimizer """adam""" +78 35 training_loop """lcwa""" +78 35 evaluator """rankbased""" +78 36 dataset """kinships""" +78 36 model """conve""" +78 36 loss """bceaftersigmoid""" +78 36 regularizer """no""" +78 36 optimizer """adam""" +78 36 training_loop """lcwa""" +78 36 evaluator """rankbased""" +78 37 dataset """kinships""" +78 37 model """conve""" +78 37 loss """bceaftersigmoid""" +78 37 regularizer """no""" +78 37 optimizer """adam""" +78 37 training_loop """lcwa""" +78 37 evaluator """rankbased""" +78 38 dataset """kinships""" +78 38 model """conve""" +78 38 loss """bceaftersigmoid""" +78 38 regularizer """no""" +78 38 optimizer """adam""" +78 38 training_loop """lcwa""" +78 38 evaluator """rankbased""" +78 39 dataset """kinships""" +78 39 model """conve""" +78 39 loss """bceaftersigmoid""" +78 39 regularizer """no""" +78 39 optimizer """adam""" +78 39 training_loop """lcwa""" +78 39 evaluator """rankbased""" +78 40 dataset """kinships""" +78 40 model """conve""" +78 40 loss """bceaftersigmoid""" +78 40 regularizer """no""" +78 40 optimizer """adam""" +78 40 training_loop """lcwa""" +78 40 evaluator """rankbased""" +78 41 dataset """kinships""" +78 41 model """conve""" +78 41 loss """bceaftersigmoid""" +78 41 regularizer """no""" +78 41 optimizer """adam""" +78 41 training_loop """lcwa""" +78 41 evaluator """rankbased""" +78 42 dataset """kinships""" +78 42 model """conve""" +78 42 loss """bceaftersigmoid""" +78 42 regularizer """no""" +78 42 optimizer """adam""" +78 42 training_loop """lcwa""" +78 42 evaluator """rankbased""" +78 43 dataset """kinships""" +78 43 model """conve""" +78 43 loss """bceaftersigmoid""" +78 43 regularizer """no""" +78 43 optimizer """adam""" +78 43 training_loop """lcwa""" +78 43 evaluator """rankbased""" +78 44 dataset """kinships""" +78 44 model """conve""" +78 44 loss """bceaftersigmoid""" +78 44 regularizer """no""" +78 44 optimizer """adam""" +78 44 training_loop """lcwa""" +78 44 evaluator """rankbased""" +78 45 dataset """kinships""" +78 45 model """conve""" +78 45 loss """bceaftersigmoid""" +78 45 regularizer """no""" +78 45 optimizer """adam""" +78 45 training_loop """lcwa""" +78 45 evaluator """rankbased""" +78 46 dataset """kinships""" +78 46 model """conve""" +78 46 loss """bceaftersigmoid""" +78 46 regularizer """no""" +78 46 optimizer """adam""" +78 46 training_loop """lcwa""" +78 46 evaluator """rankbased""" +78 47 dataset """kinships""" +78 47 model """conve""" +78 47 loss """bceaftersigmoid""" +78 47 regularizer """no""" +78 47 optimizer """adam""" +78 47 training_loop """lcwa""" +78 47 evaluator """rankbased""" +78 48 dataset """kinships""" +78 48 model """conve""" +78 48 loss """bceaftersigmoid""" +78 48 regularizer """no""" +78 48 optimizer """adam""" +78 48 training_loop """lcwa""" +78 48 evaluator """rankbased""" +78 49 dataset """kinships""" +78 49 model """conve""" +78 49 loss """bceaftersigmoid""" +78 49 regularizer """no""" +78 49 optimizer """adam""" +78 49 training_loop """lcwa""" +78 49 evaluator """rankbased""" +78 50 dataset """kinships""" +78 50 model """conve""" +78 50 loss """bceaftersigmoid""" +78 50 regularizer """no""" +78 50 optimizer """adam""" +78 50 training_loop """lcwa""" +78 50 evaluator """rankbased""" +78 51 dataset """kinships""" +78 51 model """conve""" +78 51 loss """bceaftersigmoid""" +78 51 regularizer """no""" +78 51 optimizer """adam""" +78 51 training_loop """lcwa""" +78 51 evaluator """rankbased""" +78 52 dataset """kinships""" +78 52 model """conve""" +78 52 loss """bceaftersigmoid""" +78 52 regularizer """no""" +78 52 optimizer """adam""" +78 52 training_loop """lcwa""" +78 52 evaluator """rankbased""" +78 53 dataset """kinships""" +78 53 model """conve""" +78 53 loss """bceaftersigmoid""" +78 53 regularizer """no""" +78 53 optimizer """adam""" +78 53 training_loop """lcwa""" +78 53 evaluator """rankbased""" +78 54 dataset """kinships""" +78 54 model """conve""" +78 54 loss """bceaftersigmoid""" +78 54 regularizer """no""" +78 54 optimizer """adam""" +78 54 training_loop """lcwa""" +78 54 evaluator """rankbased""" +78 55 dataset """kinships""" +78 55 model """conve""" +78 55 loss """bceaftersigmoid""" +78 55 regularizer """no""" +78 55 optimizer """adam""" +78 55 training_loop """lcwa""" +78 55 evaluator """rankbased""" +78 56 dataset """kinships""" +78 56 model """conve""" +78 56 loss """bceaftersigmoid""" +78 56 regularizer """no""" +78 56 optimizer """adam""" +78 56 training_loop """lcwa""" +78 56 evaluator """rankbased""" +78 57 dataset """kinships""" +78 57 model """conve""" +78 57 loss """bceaftersigmoid""" +78 57 regularizer """no""" +78 57 optimizer """adam""" +78 57 training_loop """lcwa""" +78 57 evaluator """rankbased""" +78 58 dataset """kinships""" +78 58 model """conve""" +78 58 loss """bceaftersigmoid""" +78 58 regularizer """no""" +78 58 optimizer """adam""" +78 58 training_loop """lcwa""" +78 58 evaluator """rankbased""" +78 59 dataset """kinships""" +78 59 model """conve""" +78 59 loss """bceaftersigmoid""" +78 59 regularizer """no""" +78 59 optimizer """adam""" +78 59 training_loop """lcwa""" +78 59 evaluator """rankbased""" +78 60 dataset """kinships""" +78 60 model """conve""" +78 60 loss """bceaftersigmoid""" +78 60 regularizer """no""" +78 60 optimizer """adam""" +78 60 training_loop """lcwa""" +78 60 evaluator """rankbased""" +78 61 dataset """kinships""" +78 61 model """conve""" +78 61 loss """bceaftersigmoid""" +78 61 regularizer """no""" +78 61 optimizer """adam""" +78 61 training_loop """lcwa""" +78 61 evaluator """rankbased""" +78 62 dataset """kinships""" +78 62 model """conve""" +78 62 loss """bceaftersigmoid""" +78 62 regularizer """no""" +78 62 optimizer """adam""" +78 62 training_loop """lcwa""" +78 62 evaluator """rankbased""" +78 63 dataset """kinships""" +78 63 model """conve""" +78 63 loss """bceaftersigmoid""" +78 63 regularizer """no""" +78 63 optimizer """adam""" +78 63 training_loop """lcwa""" +78 63 evaluator """rankbased""" +78 64 dataset """kinships""" +78 64 model """conve""" +78 64 loss """bceaftersigmoid""" +78 64 regularizer """no""" +78 64 optimizer """adam""" +78 64 training_loop """lcwa""" +78 64 evaluator """rankbased""" +78 65 dataset """kinships""" +78 65 model """conve""" +78 65 loss """bceaftersigmoid""" +78 65 regularizer """no""" +78 65 optimizer """adam""" +78 65 training_loop """lcwa""" +78 65 evaluator """rankbased""" +78 66 dataset """kinships""" +78 66 model """conve""" +78 66 loss """bceaftersigmoid""" +78 66 regularizer """no""" +78 66 optimizer """adam""" +78 66 training_loop """lcwa""" +78 66 evaluator """rankbased""" +78 67 dataset """kinships""" +78 67 model """conve""" +78 67 loss """bceaftersigmoid""" +78 67 regularizer """no""" +78 67 optimizer """adam""" +78 67 training_loop """lcwa""" +78 67 evaluator """rankbased""" +78 68 dataset """kinships""" +78 68 model """conve""" +78 68 loss """bceaftersigmoid""" +78 68 regularizer """no""" +78 68 optimizer """adam""" +78 68 training_loop """lcwa""" +78 68 evaluator """rankbased""" +78 69 dataset """kinships""" +78 69 model """conve""" +78 69 loss """bceaftersigmoid""" +78 69 regularizer """no""" +78 69 optimizer """adam""" +78 69 training_loop """lcwa""" +78 69 evaluator """rankbased""" +78 70 dataset """kinships""" +78 70 model """conve""" +78 70 loss """bceaftersigmoid""" +78 70 regularizer """no""" +78 70 optimizer """adam""" +78 70 training_loop """lcwa""" +78 70 evaluator """rankbased""" +78 71 dataset """kinships""" +78 71 model """conve""" +78 71 loss """bceaftersigmoid""" +78 71 regularizer """no""" +78 71 optimizer """adam""" +78 71 training_loop """lcwa""" +78 71 evaluator """rankbased""" +78 72 dataset """kinships""" +78 72 model """conve""" +78 72 loss """bceaftersigmoid""" +78 72 regularizer """no""" +78 72 optimizer """adam""" +78 72 training_loop """lcwa""" +78 72 evaluator """rankbased""" +78 73 dataset """kinships""" +78 73 model """conve""" +78 73 loss """bceaftersigmoid""" +78 73 regularizer """no""" +78 73 optimizer """adam""" +78 73 training_loop """lcwa""" +78 73 evaluator """rankbased""" +78 74 dataset """kinships""" +78 74 model """conve""" +78 74 loss """bceaftersigmoid""" +78 74 regularizer """no""" +78 74 optimizer """adam""" +78 74 training_loop """lcwa""" +78 74 evaluator """rankbased""" +78 75 dataset """kinships""" +78 75 model """conve""" +78 75 loss """bceaftersigmoid""" +78 75 regularizer """no""" +78 75 optimizer """adam""" +78 75 training_loop """lcwa""" +78 75 evaluator """rankbased""" +78 76 dataset """kinships""" +78 76 model """conve""" +78 76 loss """bceaftersigmoid""" +78 76 regularizer """no""" +78 76 optimizer """adam""" +78 76 training_loop """lcwa""" +78 76 evaluator """rankbased""" +78 77 dataset """kinships""" +78 77 model """conve""" +78 77 loss """bceaftersigmoid""" +78 77 regularizer """no""" +78 77 optimizer """adam""" +78 77 training_loop """lcwa""" +78 77 evaluator """rankbased""" +78 78 dataset """kinships""" +78 78 model """conve""" +78 78 loss """bceaftersigmoid""" +78 78 regularizer """no""" +78 78 optimizer """adam""" +78 78 training_loop """lcwa""" +78 78 evaluator """rankbased""" +78 79 dataset """kinships""" +78 79 model """conve""" +78 79 loss """bceaftersigmoid""" +78 79 regularizer """no""" +78 79 optimizer """adam""" +78 79 training_loop """lcwa""" +78 79 evaluator """rankbased""" +78 80 dataset """kinships""" +78 80 model """conve""" +78 80 loss """bceaftersigmoid""" +78 80 regularizer """no""" +78 80 optimizer """adam""" +78 80 training_loop """lcwa""" +78 80 evaluator """rankbased""" +78 81 dataset """kinships""" +78 81 model """conve""" +78 81 loss """bceaftersigmoid""" +78 81 regularizer """no""" +78 81 optimizer """adam""" +78 81 training_loop """lcwa""" +78 81 evaluator """rankbased""" +78 82 dataset """kinships""" +78 82 model """conve""" +78 82 loss """bceaftersigmoid""" +78 82 regularizer """no""" +78 82 optimizer """adam""" +78 82 training_loop """lcwa""" +78 82 evaluator """rankbased""" +78 83 dataset """kinships""" +78 83 model """conve""" +78 83 loss """bceaftersigmoid""" +78 83 regularizer """no""" +78 83 optimizer """adam""" +78 83 training_loop """lcwa""" +78 83 evaluator """rankbased""" +78 84 dataset """kinships""" +78 84 model """conve""" +78 84 loss """bceaftersigmoid""" +78 84 regularizer """no""" +78 84 optimizer """adam""" +78 84 training_loop """lcwa""" +78 84 evaluator """rankbased""" +78 85 dataset """kinships""" +78 85 model """conve""" +78 85 loss """bceaftersigmoid""" +78 85 regularizer """no""" +78 85 optimizer """adam""" +78 85 training_loop """lcwa""" +78 85 evaluator """rankbased""" +78 86 dataset """kinships""" +78 86 model """conve""" +78 86 loss """bceaftersigmoid""" +78 86 regularizer """no""" +78 86 optimizer """adam""" +78 86 training_loop """lcwa""" +78 86 evaluator """rankbased""" +78 87 dataset """kinships""" +78 87 model """conve""" +78 87 loss """bceaftersigmoid""" +78 87 regularizer """no""" +78 87 optimizer """adam""" +78 87 training_loop """lcwa""" +78 87 evaluator """rankbased""" +78 88 dataset """kinships""" +78 88 model """conve""" +78 88 loss """bceaftersigmoid""" +78 88 regularizer """no""" +78 88 optimizer """adam""" +78 88 training_loop """lcwa""" +78 88 evaluator """rankbased""" +78 89 dataset """kinships""" +78 89 model """conve""" +78 89 loss """bceaftersigmoid""" +78 89 regularizer """no""" +78 89 optimizer """adam""" +78 89 training_loop """lcwa""" +78 89 evaluator """rankbased""" +78 90 dataset """kinships""" +78 90 model """conve""" +78 90 loss """bceaftersigmoid""" +78 90 regularizer """no""" +78 90 optimizer """adam""" +78 90 training_loop """lcwa""" +78 90 evaluator """rankbased""" +78 91 dataset """kinships""" +78 91 model """conve""" +78 91 loss """bceaftersigmoid""" +78 91 regularizer """no""" +78 91 optimizer """adam""" +78 91 training_loop """lcwa""" +78 91 evaluator """rankbased""" +78 92 dataset """kinships""" +78 92 model """conve""" +78 92 loss """bceaftersigmoid""" +78 92 regularizer """no""" +78 92 optimizer """adam""" +78 92 training_loop """lcwa""" +78 92 evaluator """rankbased""" +78 93 dataset """kinships""" +78 93 model """conve""" +78 93 loss """bceaftersigmoid""" +78 93 regularizer """no""" +78 93 optimizer """adam""" +78 93 training_loop """lcwa""" +78 93 evaluator """rankbased""" +78 94 dataset """kinships""" +78 94 model """conve""" +78 94 loss """bceaftersigmoid""" +78 94 regularizer """no""" +78 94 optimizer """adam""" +78 94 training_loop """lcwa""" +78 94 evaluator """rankbased""" +78 95 dataset """kinships""" +78 95 model """conve""" +78 95 loss """bceaftersigmoid""" +78 95 regularizer """no""" +78 95 optimizer """adam""" +78 95 training_loop """lcwa""" +78 95 evaluator """rankbased""" +78 96 dataset """kinships""" +78 96 model """conve""" +78 96 loss """bceaftersigmoid""" +78 96 regularizer """no""" +78 96 optimizer """adam""" +78 96 training_loop """lcwa""" +78 96 evaluator """rankbased""" +78 97 dataset """kinships""" +78 97 model """conve""" +78 97 loss """bceaftersigmoid""" +78 97 regularizer """no""" +78 97 optimizer """adam""" +78 97 training_loop """lcwa""" +78 97 evaluator """rankbased""" +78 98 dataset """kinships""" +78 98 model """conve""" +78 98 loss """bceaftersigmoid""" +78 98 regularizer """no""" +78 98 optimizer """adam""" +78 98 training_loop """lcwa""" +78 98 evaluator """rankbased""" +78 99 dataset """kinships""" +78 99 model """conve""" +78 99 loss """bceaftersigmoid""" +78 99 regularizer """no""" +78 99 optimizer """adam""" +78 99 training_loop """lcwa""" +78 99 evaluator """rankbased""" +78 100 dataset """kinships""" +78 100 model """conve""" +78 100 loss """bceaftersigmoid""" +78 100 regularizer """no""" +78 100 optimizer """adam""" +78 100 training_loop """lcwa""" +78 100 evaluator """rankbased""" +79 1 model.output_channels 31.0 +79 1 model.input_dropout 0.2928590802284356 +79 1 model.output_dropout 0.3335469414897157 +79 1 model.feature_map_dropout 0.43334420482058483 +79 1 model.embedding_dim 2.0 +79 1 optimizer.lr 0.006668653017985162 +79 1 training.batch_size 1.0 +79 1 training.label_smoothing 0.9869335242990345 +79 2 model.output_channels 27.0 +79 2 model.input_dropout 0.07082122604532165 +79 2 model.output_dropout 0.32300305409557134 +79 2 model.feature_map_dropout 0.33052344991628757 +79 2 model.embedding_dim 1.0 +79 2 optimizer.lr 0.06137119552952664 +79 2 training.batch_size 0.0 +79 2 training.label_smoothing 0.0021143671283542385 +79 3 model.output_channels 32.0 +79 3 model.input_dropout 0.44196547851285267 +79 3 model.output_dropout 0.12246733558688394 +79 3 model.feature_map_dropout 0.07583255899838642 +79 3 model.embedding_dim 0.0 +79 3 optimizer.lr 0.009010233356326582 +79 3 training.batch_size 1.0 +79 3 training.label_smoothing 0.0016024625321567419 +79 4 model.output_channels 51.0 +79 4 model.input_dropout 0.12290807192995357 +79 4 model.output_dropout 0.2298173640867699 +79 4 model.feature_map_dropout 0.11736503532833414 +79 4 model.embedding_dim 0.0 +79 4 optimizer.lr 0.053285147839063775 +79 4 training.batch_size 1.0 +79 4 training.label_smoothing 0.004556428133532612 +79 5 model.output_channels 54.0 +79 5 model.input_dropout 0.2305152102059586 +79 5 model.output_dropout 0.3364111148198324 +79 5 model.feature_map_dropout 0.02261634375004734 +79 5 model.embedding_dim 2.0 +79 5 optimizer.lr 0.004196037416086704 +79 5 training.batch_size 1.0 +79 5 training.label_smoothing 0.4944749205155916 +79 6 model.output_channels 43.0 +79 6 model.input_dropout 0.4198735908793007 +79 6 model.output_dropout 0.44555125948993723 +79 6 model.feature_map_dropout 0.2726068926388071 +79 6 model.embedding_dim 1.0 +79 6 optimizer.lr 0.0012108182360410155 +79 6 training.batch_size 1.0 +79 6 training.label_smoothing 0.09833587580968138 +79 7 model.output_channels 49.0 +79 7 model.input_dropout 0.0647053963282831 +79 7 model.output_dropout 0.25908036771279797 +79 7 model.feature_map_dropout 0.4410557094530567 +79 7 model.embedding_dim 1.0 +79 7 optimizer.lr 0.009565603143789174 +79 7 training.batch_size 2.0 +79 7 training.label_smoothing 0.0021315514224472436 +79 8 model.output_channels 36.0 +79 8 model.input_dropout 0.40128695379843665 +79 8 model.output_dropout 0.11400012953177951 +79 8 model.feature_map_dropout 0.04184094568005803 +79 8 model.embedding_dim 1.0 +79 8 optimizer.lr 0.005838254340607166 +79 8 training.batch_size 0.0 +79 8 training.label_smoothing 0.013673410538858977 +79 9 model.output_channels 33.0 +79 9 model.input_dropout 0.2480822571825126 +79 9 model.output_dropout 0.10701744737232138 +79 9 model.feature_map_dropout 0.2599505106566657 +79 9 model.embedding_dim 0.0 +79 9 optimizer.lr 0.010107815363658283 +79 9 training.batch_size 2.0 +79 9 training.label_smoothing 0.03529153136043697 +79 10 model.output_channels 37.0 +79 10 model.input_dropout 0.1981252489848075 +79 10 model.output_dropout 0.15771895024257165 +79 10 model.feature_map_dropout 0.23689211474855504 +79 10 model.embedding_dim 2.0 +79 10 optimizer.lr 0.0018367969827669005 +79 10 training.batch_size 2.0 +79 10 training.label_smoothing 0.2617037738512707 +79 11 model.output_channels 64.0 +79 11 model.input_dropout 0.04810633845852175 +79 11 model.output_dropout 0.08934530298304322 +79 11 model.feature_map_dropout 0.04324456160344159 +79 11 model.embedding_dim 1.0 +79 11 optimizer.lr 0.0015870723538723432 +79 11 training.batch_size 0.0 +79 11 training.label_smoothing 0.3917149387177179 +79 12 model.output_channels 34.0 +79 12 model.input_dropout 0.2340226753511468 +79 12 model.output_dropout 0.454201910710384 +79 12 model.feature_map_dropout 0.01823531526277561 +79 12 model.embedding_dim 2.0 +79 12 optimizer.lr 0.07647896616673651 +79 12 training.batch_size 2.0 +79 12 training.label_smoothing 0.4101615798239709 +79 13 model.output_channels 36.0 +79 13 model.input_dropout 0.2544791088668961 +79 13 model.output_dropout 0.4245550235594892 +79 13 model.feature_map_dropout 0.1247863782300368 +79 13 model.embedding_dim 1.0 +79 13 optimizer.lr 0.0014193588165134425 +79 13 training.batch_size 2.0 +79 13 training.label_smoothing 0.8677291230185856 +79 14 model.output_channels 17.0 +79 14 model.input_dropout 0.475614646635198 +79 14 model.output_dropout 0.21146466625645816 +79 14 model.feature_map_dropout 0.05402627828623768 +79 14 model.embedding_dim 0.0 +79 14 optimizer.lr 0.006064899193010052 +79 14 training.batch_size 0.0 +79 14 training.label_smoothing 0.003690596070111555 +79 15 model.output_channels 32.0 +79 15 model.input_dropout 0.43165682504381964 +79 15 model.output_dropout 0.06884932830761548 +79 15 model.feature_map_dropout 0.4701085238868625 +79 15 model.embedding_dim 0.0 +79 15 optimizer.lr 0.046768270589720204 +79 15 training.batch_size 1.0 +79 15 training.label_smoothing 0.008302697689545242 +79 16 model.output_channels 64.0 +79 16 model.input_dropout 0.32459150935817166 +79 16 model.output_dropout 0.48222865044440155 +79 16 model.feature_map_dropout 0.08020831759810493 +79 16 model.embedding_dim 2.0 +79 16 optimizer.lr 0.05368138410729997 +79 16 training.batch_size 0.0 +79 16 training.label_smoothing 0.011700212426708457 +79 17 model.output_channels 38.0 +79 17 model.input_dropout 0.0359402997220965 +79 17 model.output_dropout 0.29454875207228565 +79 17 model.feature_map_dropout 0.08132954270256743 +79 17 model.embedding_dim 1.0 +79 17 optimizer.lr 0.03178353336558144 +79 17 training.batch_size 2.0 +79 17 training.label_smoothing 0.03463082748473392 +79 18 model.output_channels 46.0 +79 18 model.input_dropout 0.36451462659635026 +79 18 model.output_dropout 0.48749922363622034 +79 18 model.feature_map_dropout 0.2806900603303781 +79 18 model.embedding_dim 1.0 +79 18 optimizer.lr 0.0016612494667581216 +79 18 training.batch_size 1.0 +79 18 training.label_smoothing 0.0043300359837002015 +79 19 model.output_channels 24.0 +79 19 model.input_dropout 0.29028615007239383 +79 19 model.output_dropout 0.23123791711795189 +79 19 model.feature_map_dropout 0.22809748580608985 +79 19 model.embedding_dim 1.0 +79 19 optimizer.lr 0.007755395077119515 +79 19 training.batch_size 1.0 +79 19 training.label_smoothing 0.02149388544149177 +79 20 model.output_channels 23.0 +79 20 model.input_dropout 0.2242381036691286 +79 20 model.output_dropout 0.35849666252825674 +79 20 model.feature_map_dropout 0.4419357208395876 +79 20 model.embedding_dim 2.0 +79 20 optimizer.lr 0.020250398581785095 +79 20 training.batch_size 1.0 +79 20 training.label_smoothing 0.05222178697120997 +79 21 model.output_channels 16.0 +79 21 model.input_dropout 0.018853857929386986 +79 21 model.output_dropout 0.44407031040699674 +79 21 model.feature_map_dropout 0.39240001601572244 +79 21 model.embedding_dim 0.0 +79 21 optimizer.lr 0.012305264285033674 +79 21 training.batch_size 0.0 +79 21 training.label_smoothing 0.2411505508221953 +79 22 model.output_channels 48.0 +79 22 model.input_dropout 0.48647684121940504 +79 22 model.output_dropout 0.3636605936491827 +79 22 model.feature_map_dropout 0.3827499779565486 +79 22 model.embedding_dim 2.0 +79 22 optimizer.lr 0.009810808171252107 +79 22 training.batch_size 0.0 +79 22 training.label_smoothing 0.0208921455020955 +79 23 model.output_channels 21.0 +79 23 model.input_dropout 0.44321346647494736 +79 23 model.output_dropout 0.11334843980471992 +79 23 model.feature_map_dropout 0.28686752723702125 +79 23 model.embedding_dim 1.0 +79 23 optimizer.lr 0.015474980015231277 +79 23 training.batch_size 1.0 +79 23 training.label_smoothing 0.02915996049555207 +79 24 model.output_channels 51.0 +79 24 model.input_dropout 0.332895231454845 +79 24 model.output_dropout 0.3570199177965051 +79 24 model.feature_map_dropout 0.21513659748807268 +79 24 model.embedding_dim 1.0 +79 24 optimizer.lr 0.002680909422985316 +79 24 training.batch_size 0.0 +79 24 training.label_smoothing 0.005579107267160379 +79 25 model.output_channels 20.0 +79 25 model.input_dropout 0.38360249443117267 +79 25 model.output_dropout 0.2436599934469233 +79 25 model.feature_map_dropout 0.3241082644306382 +79 25 model.embedding_dim 1.0 +79 25 optimizer.lr 0.025448389821467778 +79 25 training.batch_size 1.0 +79 25 training.label_smoothing 0.0032833853324012647 +79 26 model.output_channels 39.0 +79 26 model.input_dropout 0.4486555586991632 +79 26 model.output_dropout 0.49608051965188044 +79 26 model.feature_map_dropout 0.43134026313682805 +79 26 model.embedding_dim 2.0 +79 26 optimizer.lr 0.07971594027134964 +79 26 training.batch_size 1.0 +79 26 training.label_smoothing 0.008526167208499153 +79 27 model.output_channels 26.0 +79 27 model.input_dropout 0.26186248770729575 +79 27 model.output_dropout 0.4158576750138519 +79 27 model.feature_map_dropout 0.07465723426503434 +79 27 model.embedding_dim 2.0 +79 27 optimizer.lr 0.03270106260999865 +79 27 training.batch_size 2.0 +79 27 training.label_smoothing 0.022926407172116057 +79 28 model.output_channels 38.0 +79 28 model.input_dropout 0.439363280637049 +79 28 model.output_dropout 0.14008752800180263 +79 28 model.feature_map_dropout 0.07279405787101734 +79 28 model.embedding_dim 2.0 +79 28 optimizer.lr 0.0018529222253374433 +79 28 training.batch_size 1.0 +79 28 training.label_smoothing 0.0011700048864810072 +79 29 model.output_channels 51.0 +79 29 model.input_dropout 0.32683573393775056 +79 29 model.output_dropout 0.1365612470706326 +79 29 model.feature_map_dropout 0.48364102207354953 +79 29 model.embedding_dim 2.0 +79 29 optimizer.lr 0.008175797096280966 +79 29 training.batch_size 2.0 +79 29 training.label_smoothing 0.02297817171271515 +79 30 model.output_channels 20.0 +79 30 model.input_dropout 0.32279441912013823 +79 30 model.output_dropout 0.02901181877186454 +79 30 model.feature_map_dropout 0.08387958109673288 +79 30 model.embedding_dim 2.0 +79 30 optimizer.lr 0.0012513008793808473 +79 30 training.batch_size 2.0 +79 30 training.label_smoothing 0.07122645998702791 +79 31 model.output_channels 29.0 +79 31 model.input_dropout 0.20864773670538128 +79 31 model.output_dropout 0.19998191886533095 +79 31 model.feature_map_dropout 0.3307019486066766 +79 31 model.embedding_dim 1.0 +79 31 optimizer.lr 0.0011735075541711854 +79 31 training.batch_size 2.0 +79 31 training.label_smoothing 0.025340377890461334 +79 32 model.output_channels 48.0 +79 32 model.input_dropout 0.15661851809509153 +79 32 model.output_dropout 0.11138683987288678 +79 32 model.feature_map_dropout 0.41595945534643836 +79 32 model.embedding_dim 1.0 +79 32 optimizer.lr 0.04866705790752375 +79 32 training.batch_size 2.0 +79 32 training.label_smoothing 0.011050661300709348 +79 33 model.output_channels 47.0 +79 33 model.input_dropout 0.34140093133070226 +79 33 model.output_dropout 0.3247462401142532 +79 33 model.feature_map_dropout 0.4208717070482217 +79 33 model.embedding_dim 1.0 +79 33 optimizer.lr 0.0719384044633006 +79 33 training.batch_size 2.0 +79 33 training.label_smoothing 0.352737750192227 +79 34 model.output_channels 60.0 +79 34 model.input_dropout 0.4243299856716462 +79 34 model.output_dropout 0.4319372904502019 +79 34 model.feature_map_dropout 0.21862559203235626 +79 34 model.embedding_dim 2.0 +79 34 optimizer.lr 0.026208089592300865 +79 34 training.batch_size 1.0 +79 34 training.label_smoothing 0.07134627324826295 +79 35 model.output_channels 48.0 +79 35 model.input_dropout 0.36654445791490986 +79 35 model.output_dropout 0.13214842175702618 +79 35 model.feature_map_dropout 0.23544201744544724 +79 35 model.embedding_dim 2.0 +79 35 optimizer.lr 0.01528898768377596 +79 35 training.batch_size 2.0 +79 35 training.label_smoothing 0.005338373443274995 +79 36 model.output_channels 30.0 +79 36 model.input_dropout 0.13767196286991012 +79 36 model.output_dropout 0.35751363438754524 +79 36 model.feature_map_dropout 0.2232037419198173 +79 36 model.embedding_dim 1.0 +79 36 optimizer.lr 0.0012281868737202518 +79 36 training.batch_size 2.0 +79 36 training.label_smoothing 0.03905301268452984 +79 37 model.output_channels 22.0 +79 37 model.input_dropout 0.49184483984525057 +79 37 model.output_dropout 0.13509974447381395 +79 37 model.feature_map_dropout 0.42760100844493903 +79 37 model.embedding_dim 0.0 +79 37 optimizer.lr 0.022936325427835046 +79 37 training.batch_size 0.0 +79 37 training.label_smoothing 0.005008453964443249 +79 38 model.output_channels 36.0 +79 38 model.input_dropout 0.4218752343375412 +79 38 model.output_dropout 0.0727362971179617 +79 38 model.feature_map_dropout 0.3597982977459757 +79 38 model.embedding_dim 2.0 +79 38 optimizer.lr 0.0031844876295060447 +79 38 training.batch_size 1.0 +79 38 training.label_smoothing 0.1386515084035325 +79 39 model.output_channels 45.0 +79 39 model.input_dropout 0.25806335098372035 +79 39 model.output_dropout 0.4427956024438319 +79 39 model.feature_map_dropout 0.05166035960822235 +79 39 model.embedding_dim 2.0 +79 39 optimizer.lr 0.003948830858250559 +79 39 training.batch_size 2.0 +79 39 training.label_smoothing 0.2655757185880547 +79 40 model.output_channels 41.0 +79 40 model.input_dropout 0.4717090784895424 +79 40 model.output_dropout 0.16499479508979115 +79 40 model.feature_map_dropout 0.11215422475149928 +79 40 model.embedding_dim 1.0 +79 40 optimizer.lr 0.03976077900811625 +79 40 training.batch_size 0.0 +79 40 training.label_smoothing 0.03739165591130821 +79 41 model.output_channels 33.0 +79 41 model.input_dropout 0.3609236577208223 +79 41 model.output_dropout 0.17484390035804148 +79 41 model.feature_map_dropout 0.1643004228683942 +79 41 model.embedding_dim 0.0 +79 41 optimizer.lr 0.0043058597944086755 +79 41 training.batch_size 0.0 +79 41 training.label_smoothing 0.09528186044166385 +79 42 model.output_channels 47.0 +79 42 model.input_dropout 0.03209268141967758 +79 42 model.output_dropout 0.37362555350328985 +79 42 model.feature_map_dropout 0.40872491008049355 +79 42 model.embedding_dim 2.0 +79 42 optimizer.lr 0.07563749476016593 +79 42 training.batch_size 2.0 +79 42 training.label_smoothing 0.05706142034045171 +79 43 model.output_channels 38.0 +79 43 model.input_dropout 0.029864587497250128 +79 43 model.output_dropout 0.009487664897904091 +79 43 model.feature_map_dropout 0.30216796412017116 +79 43 model.embedding_dim 1.0 +79 43 optimizer.lr 0.002207513395102243 +79 43 training.batch_size 2.0 +79 43 training.label_smoothing 0.5337440512447261 +79 44 model.output_channels 63.0 +79 44 model.input_dropout 0.31121506130497856 +79 44 model.output_dropout 0.21660217156083855 +79 44 model.feature_map_dropout 0.08674388572923275 +79 44 model.embedding_dim 1.0 +79 44 optimizer.lr 0.0017941001008635964 +79 44 training.batch_size 1.0 +79 44 training.label_smoothing 0.07502766066902858 +79 45 model.output_channels 41.0 +79 45 model.input_dropout 0.2298456314125631 +79 45 model.output_dropout 0.4631905688601065 +79 45 model.feature_map_dropout 0.2820487059840463 +79 45 model.embedding_dim 1.0 +79 45 optimizer.lr 0.029292445918091957 +79 45 training.batch_size 1.0 +79 45 training.label_smoothing 0.5958704349097561 +79 46 model.output_channels 45.0 +79 46 model.input_dropout 0.31393936900662334 +79 46 model.output_dropout 0.016620830712651247 +79 46 model.feature_map_dropout 0.4391534781546018 +79 46 model.embedding_dim 2.0 +79 46 optimizer.lr 0.004824700133348324 +79 46 training.batch_size 1.0 +79 46 training.label_smoothing 0.4155991138836561 +79 47 model.output_channels 41.0 +79 47 model.input_dropout 0.38812995949522994 +79 47 model.output_dropout 0.19081963779652156 +79 47 model.feature_map_dropout 0.18749187998164057 +79 47 model.embedding_dim 0.0 +79 47 optimizer.lr 0.00428905308637714 +79 47 training.batch_size 2.0 +79 47 training.label_smoothing 0.13406485947712227 +79 48 model.output_channels 35.0 +79 48 model.input_dropout 0.10871845460262658 +79 48 model.output_dropout 0.28338817356108825 +79 48 model.feature_map_dropout 0.4052536991095941 +79 48 model.embedding_dim 1.0 +79 48 optimizer.lr 0.04168794680085475 +79 48 training.batch_size 0.0 +79 48 training.label_smoothing 0.09760488703662232 +79 49 model.output_channels 61.0 +79 49 model.input_dropout 0.4643946717803854 +79 49 model.output_dropout 0.11157745096777233 +79 49 model.feature_map_dropout 0.30750654184206466 +79 49 model.embedding_dim 1.0 +79 49 optimizer.lr 0.001076142609170355 +79 49 training.batch_size 1.0 +79 49 training.label_smoothing 0.4988052960590396 +79 50 model.output_channels 35.0 +79 50 model.input_dropout 0.22866750980706352 +79 50 model.output_dropout 0.0724447503963846 +79 50 model.feature_map_dropout 0.24491335558268468 +79 50 model.embedding_dim 2.0 +79 50 optimizer.lr 0.0038703318955675115 +79 50 training.batch_size 0.0 +79 50 training.label_smoothing 0.010308448740266478 +79 51 model.output_channels 22.0 +79 51 model.input_dropout 0.19515821482791929 +79 51 model.output_dropout 0.323255953785023 +79 51 model.feature_map_dropout 0.4255852297430386 +79 51 model.embedding_dim 2.0 +79 51 optimizer.lr 0.001090420773617862 +79 51 training.batch_size 0.0 +79 51 training.label_smoothing 0.15809476045361778 +79 52 model.output_channels 18.0 +79 52 model.input_dropout 0.45687888623540673 +79 52 model.output_dropout 0.4315058744680474 +79 52 model.feature_map_dropout 0.44636553750046876 +79 52 model.embedding_dim 0.0 +79 52 optimizer.lr 0.08464762868468727 +79 52 training.batch_size 0.0 +79 52 training.label_smoothing 0.0014356361248836459 +79 53 model.output_channels 21.0 +79 53 model.input_dropout 0.3921932344205618 +79 53 model.output_dropout 0.486354322362481 +79 53 model.feature_map_dropout 0.17296344443737577 +79 53 model.embedding_dim 0.0 +79 53 optimizer.lr 0.004622339754656402 +79 53 training.batch_size 2.0 +79 53 training.label_smoothing 0.007364070043602007 +79 54 model.output_channels 58.0 +79 54 model.input_dropout 0.3177383316628344 +79 54 model.output_dropout 0.3025671572268044 +79 54 model.feature_map_dropout 0.31243004375199224 +79 54 model.embedding_dim 2.0 +79 54 optimizer.lr 0.0025409939909028442 +79 54 training.batch_size 2.0 +79 54 training.label_smoothing 0.002193626900117424 +79 55 model.output_channels 25.0 +79 55 model.input_dropout 0.33299978835459604 +79 55 model.output_dropout 0.22699879851173638 +79 55 model.feature_map_dropout 0.36180598806870634 +79 55 model.embedding_dim 2.0 +79 55 optimizer.lr 0.003026294013352463 +79 55 training.batch_size 0.0 +79 55 training.label_smoothing 0.8497964430213748 +79 56 model.output_channels 34.0 +79 56 model.input_dropout 0.3329452407075978 +79 56 model.output_dropout 0.07095880937766957 +79 56 model.feature_map_dropout 0.1518890955545592 +79 56 model.embedding_dim 1.0 +79 56 optimizer.lr 0.009462905851447814 +79 56 training.batch_size 0.0 +79 56 training.label_smoothing 0.20567534838896692 +79 57 model.output_channels 60.0 +79 57 model.input_dropout 0.20719424096320987 +79 57 model.output_dropout 0.19521006693699755 +79 57 model.feature_map_dropout 0.08887983767909396 +79 57 model.embedding_dim 2.0 +79 57 optimizer.lr 0.029660969827180653 +79 57 training.batch_size 1.0 +79 57 training.label_smoothing 0.2282168721237148 +79 58 model.output_channels 64.0 +79 58 model.input_dropout 0.3370132903362826 +79 58 model.output_dropout 0.35534038311089355 +79 58 model.feature_map_dropout 0.34439227938109157 +79 58 model.embedding_dim 1.0 +79 58 optimizer.lr 0.06038735227829053 +79 58 training.batch_size 1.0 +79 58 training.label_smoothing 0.5874199021944495 +79 59 model.output_channels 40.0 +79 59 model.input_dropout 0.28123172140475894 +79 59 model.output_dropout 0.3235235764967069 +79 59 model.feature_map_dropout 0.11042644459168166 +79 59 model.embedding_dim 2.0 +79 59 optimizer.lr 0.004842305564680915 +79 59 training.batch_size 0.0 +79 59 training.label_smoothing 0.08785377412568567 +79 60 model.output_channels 34.0 +79 60 model.input_dropout 0.03415594876509953 +79 60 model.output_dropout 0.3402023443233086 +79 60 model.feature_map_dropout 0.3862934749217699 +79 60 model.embedding_dim 2.0 +79 60 optimizer.lr 0.01792808331522182 +79 60 training.batch_size 0.0 +79 60 training.label_smoothing 0.2527230618244494 +79 61 model.output_channels 59.0 +79 61 model.input_dropout 0.07847982368618828 +79 61 model.output_dropout 0.4286386602207984 +79 61 model.feature_map_dropout 0.2612913340495336 +79 61 model.embedding_dim 0.0 +79 61 optimizer.lr 0.04204128652477912 +79 61 training.batch_size 0.0 +79 61 training.label_smoothing 0.0029434716235887397 +79 62 model.output_channels 61.0 +79 62 model.input_dropout 0.47784580823554607 +79 62 model.output_dropout 0.40604300054945014 +79 62 model.feature_map_dropout 0.18806922821342276 +79 62 model.embedding_dim 1.0 +79 62 optimizer.lr 0.006072850832029353 +79 62 training.batch_size 2.0 +79 62 training.label_smoothing 0.174109428225378 +79 63 model.output_channels 20.0 +79 63 model.input_dropout 0.4594674296847956 +79 63 model.output_dropout 0.11588616586840111 +79 63 model.feature_map_dropout 0.20049018530306528 +79 63 model.embedding_dim 0.0 +79 63 optimizer.lr 0.05758050628828372 +79 63 training.batch_size 1.0 +79 63 training.label_smoothing 0.008739481130794431 +79 64 model.output_channels 24.0 +79 64 model.input_dropout 0.2714492042123027 +79 64 model.output_dropout 0.3898610015953533 +79 64 model.feature_map_dropout 0.039904890613506494 +79 64 model.embedding_dim 1.0 +79 64 optimizer.lr 0.005954938611950197 +79 64 training.batch_size 2.0 +79 64 training.label_smoothing 0.0010185093590918033 +79 65 model.output_channels 52.0 +79 65 model.input_dropout 0.06733094972006892 +79 65 model.output_dropout 0.44659111210403163 +79 65 model.feature_map_dropout 0.016398089517778602 +79 65 model.embedding_dim 1.0 +79 65 optimizer.lr 0.023028491766176697 +79 65 training.batch_size 0.0 +79 65 training.label_smoothing 0.009283502226778796 +79 66 model.output_channels 64.0 +79 66 model.input_dropout 0.26145297816278273 +79 66 model.output_dropout 0.18840083307711192 +79 66 model.feature_map_dropout 0.060791749197979195 +79 66 model.embedding_dim 2.0 +79 66 optimizer.lr 0.0030172361786402905 +79 66 training.batch_size 2.0 +79 66 training.label_smoothing 0.0020872018014537605 +79 67 model.output_channels 48.0 +79 67 model.input_dropout 0.41349771720583517 +79 67 model.output_dropout 0.34909991009973074 +79 67 model.feature_map_dropout 0.41173970959948036 +79 67 model.embedding_dim 2.0 +79 67 optimizer.lr 0.043135717522198726 +79 67 training.batch_size 0.0 +79 67 training.label_smoothing 0.07289978699976578 +79 68 model.output_channels 36.0 +79 68 model.input_dropout 0.20954571009925366 +79 68 model.output_dropout 0.24621901279056452 +79 68 model.feature_map_dropout 0.42775041734643077 +79 68 model.embedding_dim 2.0 +79 68 optimizer.lr 0.04270116786262273 +79 68 training.batch_size 1.0 +79 68 training.label_smoothing 0.3309418317762778 +79 69 model.output_channels 46.0 +79 69 model.input_dropout 0.38497112569750336 +79 69 model.output_dropout 0.11663607542721216 +79 69 model.feature_map_dropout 0.2479184314829294 +79 69 model.embedding_dim 1.0 +79 69 optimizer.lr 0.009785066341921668 +79 69 training.batch_size 0.0 +79 69 training.label_smoothing 0.3096939113333182 +79 70 model.output_channels 27.0 +79 70 model.input_dropout 0.3013675793727434 +79 70 model.output_dropout 0.36293190857617424 +79 70 model.feature_map_dropout 0.23076754802503108 +79 70 model.embedding_dim 2.0 +79 70 optimizer.lr 0.005302498633187379 +79 70 training.batch_size 1.0 +79 70 training.label_smoothing 0.008339342407895616 +79 71 model.output_channels 50.0 +79 71 model.input_dropout 0.10481497278775093 +79 71 model.output_dropout 0.43168894711203726 +79 71 model.feature_map_dropout 0.2672248917040043 +79 71 model.embedding_dim 0.0 +79 71 optimizer.lr 0.08960248147484595 +79 71 training.batch_size 2.0 +79 71 training.label_smoothing 0.029964402574142582 +79 72 model.output_channels 36.0 +79 72 model.input_dropout 0.09100876379356532 +79 72 model.output_dropout 0.029404108566269238 +79 72 model.feature_map_dropout 0.21642230931200307 +79 72 model.embedding_dim 0.0 +79 72 optimizer.lr 0.005643873724915017 +79 72 training.batch_size 1.0 +79 72 training.label_smoothing 0.0028554682013057953 +79 73 model.output_channels 21.0 +79 73 model.input_dropout 0.4073259948567924 +79 73 model.output_dropout 0.18338017821733277 +79 73 model.feature_map_dropout 0.20166485457459443 +79 73 model.embedding_dim 1.0 +79 73 optimizer.lr 0.08820218041710365 +79 73 training.batch_size 1.0 +79 73 training.label_smoothing 0.2497897883875679 +79 74 model.output_channels 51.0 +79 74 model.input_dropout 0.09442001591564914 +79 74 model.output_dropout 0.18003723253917603 +79 74 model.feature_map_dropout 0.09987338157649045 +79 74 model.embedding_dim 0.0 +79 74 optimizer.lr 0.007088862926162279 +79 74 training.batch_size 1.0 +79 74 training.label_smoothing 0.0016051626224741966 +79 75 model.output_channels 38.0 +79 75 model.input_dropout 0.11260431086727923 +79 75 model.output_dropout 0.28148636377360736 +79 75 model.feature_map_dropout 0.0439749463885154 +79 75 model.embedding_dim 1.0 +79 75 optimizer.lr 0.09706356277884241 +79 75 training.batch_size 0.0 +79 75 training.label_smoothing 0.15643878410448772 +79 76 model.output_channels 40.0 +79 76 model.input_dropout 0.10255619312620085 +79 76 model.output_dropout 0.03346646030477063 +79 76 model.feature_map_dropout 0.29463493511615063 +79 76 model.embedding_dim 1.0 +79 76 optimizer.lr 0.0012026149304067819 +79 76 training.batch_size 0.0 +79 76 training.label_smoothing 0.001350941690436994 +79 77 model.output_channels 20.0 +79 77 model.input_dropout 0.19711102840744055 +79 77 model.output_dropout 0.4595900988660764 +79 77 model.feature_map_dropout 0.3383089098979433 +79 77 model.embedding_dim 0.0 +79 77 optimizer.lr 0.009419402362437644 +79 77 training.batch_size 1.0 +79 77 training.label_smoothing 0.006859185904171372 +79 78 model.output_channels 18.0 +79 78 model.input_dropout 0.008650192181205274 +79 78 model.output_dropout 0.13509587788496374 +79 78 model.feature_map_dropout 0.07763061423145556 +79 78 model.embedding_dim 1.0 +79 78 optimizer.lr 0.01449487346805347 +79 78 training.batch_size 2.0 +79 78 training.label_smoothing 0.007493533182720334 +79 79 model.output_channels 47.0 +79 79 model.input_dropout 0.4038922755819084 +79 79 model.output_dropout 0.22335635044391589 +79 79 model.feature_map_dropout 4.189817806088403e-06 +79 79 model.embedding_dim 0.0 +79 79 optimizer.lr 0.03791004082958283 +79 79 training.batch_size 0.0 +79 79 training.label_smoothing 0.0017007982249400515 +79 80 model.output_channels 61.0 +79 80 model.input_dropout 0.42697082001332254 +79 80 model.output_dropout 0.48510072879083055 +79 80 model.feature_map_dropout 0.06543521799928381 +79 80 model.embedding_dim 0.0 +79 80 optimizer.lr 0.001982310067935484 +79 80 training.batch_size 2.0 +79 80 training.label_smoothing 0.0018150251424550183 +79 81 model.output_channels 31.0 +79 81 model.input_dropout 0.38362473749863013 +79 81 model.output_dropout 0.13857282349297884 +79 81 model.feature_map_dropout 0.19088992521240428 +79 81 model.embedding_dim 1.0 +79 81 optimizer.lr 0.007375360249315897 +79 81 training.batch_size 2.0 +79 81 training.label_smoothing 0.004774587653900034 +79 82 model.output_channels 30.0 +79 82 model.input_dropout 0.048707986238377576 +79 82 model.output_dropout 0.2775666884685111 +79 82 model.feature_map_dropout 0.17826150673852315 +79 82 model.embedding_dim 1.0 +79 82 optimizer.lr 0.08949433706466961 +79 82 training.batch_size 2.0 +79 82 training.label_smoothing 0.17189685887249287 +79 83 model.output_channels 21.0 +79 83 model.input_dropout 0.13391643212796994 +79 83 model.output_dropout 0.3630432745661545 +79 83 model.feature_map_dropout 0.287239312496158 +79 83 model.embedding_dim 2.0 +79 83 optimizer.lr 0.0014168472681273998 +79 83 training.batch_size 2.0 +79 83 training.label_smoothing 0.3492147912185581 +79 84 model.output_channels 48.0 +79 84 model.input_dropout 0.389109446433758 +79 84 model.output_dropout 0.2360005953964584 +79 84 model.feature_map_dropout 0.32207017030667967 +79 84 model.embedding_dim 0.0 +79 84 optimizer.lr 0.02115292576528161 +79 84 training.batch_size 0.0 +79 84 training.label_smoothing 0.0010615870233814618 +79 85 model.output_channels 23.0 +79 85 model.input_dropout 0.05091485670725815 +79 85 model.output_dropout 0.28481048895192096 +79 85 model.feature_map_dropout 0.28481833075836743 +79 85 model.embedding_dim 0.0 +79 85 optimizer.lr 0.013897743945845628 +79 85 training.batch_size 2.0 +79 85 training.label_smoothing 0.014626795299222022 +79 86 model.output_channels 39.0 +79 86 model.input_dropout 0.47334990081734507 +79 86 model.output_dropout 0.3038662316577368 +79 86 model.feature_map_dropout 0.3765578450423219 +79 86 model.embedding_dim 2.0 +79 86 optimizer.lr 0.029230287892757403 +79 86 training.batch_size 1.0 +79 86 training.label_smoothing 0.5058783017652666 +79 87 model.output_channels 51.0 +79 87 model.input_dropout 0.21270842784614297 +79 87 model.output_dropout 0.28618856479687677 +79 87 model.feature_map_dropout 0.4067898061491086 +79 87 model.embedding_dim 2.0 +79 87 optimizer.lr 0.0025057637034402863 +79 87 training.batch_size 2.0 +79 87 training.label_smoothing 0.821256270274749 +79 88 model.output_channels 62.0 +79 88 model.input_dropout 0.2341198039891903 +79 88 model.output_dropout 0.048545009429453057 +79 88 model.feature_map_dropout 0.2911447504917512 +79 88 model.embedding_dim 0.0 +79 88 optimizer.lr 0.007578454411161894 +79 88 training.batch_size 1.0 +79 88 training.label_smoothing 0.002752807326466562 +79 89 model.output_channels 48.0 +79 89 model.input_dropout 0.09411538817216503 +79 89 model.output_dropout 0.44279423271209645 +79 89 model.feature_map_dropout 0.43887423309779305 +79 89 model.embedding_dim 2.0 +79 89 optimizer.lr 0.02020595722681144 +79 89 training.batch_size 2.0 +79 89 training.label_smoothing 0.17445490048549014 +79 90 model.output_channels 47.0 +79 90 model.input_dropout 0.1424837293022092 +79 90 model.output_dropout 0.1535335314005098 +79 90 model.feature_map_dropout 0.16187291475599946 +79 90 model.embedding_dim 2.0 +79 90 optimizer.lr 0.03628029647682507 +79 90 training.batch_size 0.0 +79 90 training.label_smoothing 0.7468876081662016 +79 91 model.output_channels 53.0 +79 91 model.input_dropout 0.20130677452781598 +79 91 model.output_dropout 0.19067218243194256 +79 91 model.feature_map_dropout 0.17152681149565863 +79 91 model.embedding_dim 2.0 +79 91 optimizer.lr 0.013468481544819078 +79 91 training.batch_size 0.0 +79 91 training.label_smoothing 0.009201363549280347 +79 92 model.output_channels 62.0 +79 92 model.input_dropout 0.46441356404605505 +79 92 model.output_dropout 0.05420749899452493 +79 92 model.feature_map_dropout 0.4550164054486483 +79 92 model.embedding_dim 2.0 +79 92 optimizer.lr 0.0014177221173203398 +79 92 training.batch_size 2.0 +79 92 training.label_smoothing 0.002000148143442669 +79 93 model.output_channels 36.0 +79 93 model.input_dropout 0.00880388864841486 +79 93 model.output_dropout 0.38651450617804883 +79 93 model.feature_map_dropout 0.4817902885449428 +79 93 model.embedding_dim 0.0 +79 93 optimizer.lr 0.005656843839271855 +79 93 training.batch_size 2.0 +79 93 training.label_smoothing 0.0026835652428994685 +79 94 model.output_channels 18.0 +79 94 model.input_dropout 0.05111121671730051 +79 94 model.output_dropout 0.07065533750196334 +79 94 model.feature_map_dropout 0.46425987301135124 +79 94 model.embedding_dim 1.0 +79 94 optimizer.lr 0.010022378527718444 +79 94 training.batch_size 0.0 +79 94 training.label_smoothing 0.012527498870722142 +79 95 model.output_channels 21.0 +79 95 model.input_dropout 0.4045659991517794 +79 95 model.output_dropout 0.20681855568852114 +79 95 model.feature_map_dropout 0.16906440691357205 +79 95 model.embedding_dim 1.0 +79 95 optimizer.lr 0.059521685008134685 +79 95 training.batch_size 1.0 +79 95 training.label_smoothing 0.7715172326878489 +79 96 model.output_channels 30.0 +79 96 model.input_dropout 0.06500424057251841 +79 96 model.output_dropout 0.14084970718387868 +79 96 model.feature_map_dropout 0.022284574405431246 +79 96 model.embedding_dim 2.0 +79 96 optimizer.lr 0.005447686765048293 +79 96 training.batch_size 0.0 +79 96 training.label_smoothing 0.016149173965574356 +79 97 model.output_channels 20.0 +79 97 model.input_dropout 0.15536378891355773 +79 97 model.output_dropout 0.4421551961075799 +79 97 model.feature_map_dropout 0.06807757821535837 +79 97 model.embedding_dim 2.0 +79 97 optimizer.lr 0.0037879025671183814 +79 97 training.batch_size 2.0 +79 97 training.label_smoothing 0.0017970152046587703 +79 98 model.output_channels 19.0 +79 98 model.input_dropout 0.19917253279332292 +79 98 model.output_dropout 0.0993987979032222 +79 98 model.feature_map_dropout 0.21102417053740197 +79 98 model.embedding_dim 2.0 +79 98 optimizer.lr 0.042719391304622635 +79 98 training.batch_size 2.0 +79 98 training.label_smoothing 0.0011391350163294286 +79 99 model.output_channels 58.0 +79 99 model.input_dropout 0.37323979111723404 +79 99 model.output_dropout 0.3894430897867993 +79 99 model.feature_map_dropout 0.20788037150669153 +79 99 model.embedding_dim 2.0 +79 99 optimizer.lr 0.0017058178381032113 +79 99 training.batch_size 2.0 +79 99 training.label_smoothing 0.19232992627912482 +79 100 model.output_channels 41.0 +79 100 model.input_dropout 0.3917738903587174 +79 100 model.output_dropout 0.22921863075242738 +79 100 model.feature_map_dropout 0.004163485554973423 +79 100 model.embedding_dim 0.0 +79 100 optimizer.lr 0.02897746494093271 +79 100 training.batch_size 1.0 +79 100 training.label_smoothing 0.007424268088498045 +79 1 dataset """kinships""" +79 1 model """conve""" +79 1 loss """softplus""" +79 1 regularizer """no""" +79 1 optimizer """adam""" +79 1 training_loop """lcwa""" +79 1 evaluator """rankbased""" +79 2 dataset """kinships""" +79 2 model """conve""" +79 2 loss """softplus""" +79 2 regularizer """no""" +79 2 optimizer """adam""" +79 2 training_loop """lcwa""" +79 2 evaluator """rankbased""" +79 3 dataset """kinships""" +79 3 model """conve""" +79 3 loss """softplus""" +79 3 regularizer """no""" +79 3 optimizer """adam""" +79 3 training_loop """lcwa""" +79 3 evaluator """rankbased""" +79 4 dataset """kinships""" +79 4 model """conve""" +79 4 loss """softplus""" +79 4 regularizer """no""" +79 4 optimizer """adam""" +79 4 training_loop """lcwa""" +79 4 evaluator """rankbased""" +79 5 dataset """kinships""" +79 5 model """conve""" +79 5 loss """softplus""" +79 5 regularizer """no""" +79 5 optimizer """adam""" +79 5 training_loop """lcwa""" +79 5 evaluator """rankbased""" +79 6 dataset """kinships""" +79 6 model """conve""" +79 6 loss """softplus""" +79 6 regularizer """no""" +79 6 optimizer """adam""" +79 6 training_loop """lcwa""" +79 6 evaluator """rankbased""" +79 7 dataset """kinships""" +79 7 model """conve""" +79 7 loss """softplus""" +79 7 regularizer """no""" +79 7 optimizer """adam""" +79 7 training_loop """lcwa""" +79 7 evaluator """rankbased""" +79 8 dataset """kinships""" +79 8 model """conve""" +79 8 loss """softplus""" +79 8 regularizer """no""" +79 8 optimizer """adam""" +79 8 training_loop """lcwa""" +79 8 evaluator """rankbased""" +79 9 dataset """kinships""" +79 9 model """conve""" +79 9 loss """softplus""" +79 9 regularizer """no""" +79 9 optimizer """adam""" +79 9 training_loop """lcwa""" +79 9 evaluator """rankbased""" +79 10 dataset """kinships""" +79 10 model """conve""" +79 10 loss """softplus""" +79 10 regularizer """no""" +79 10 optimizer """adam""" +79 10 training_loop """lcwa""" +79 10 evaluator """rankbased""" +79 11 dataset """kinships""" +79 11 model """conve""" +79 11 loss """softplus""" +79 11 regularizer """no""" +79 11 optimizer """adam""" +79 11 training_loop """lcwa""" +79 11 evaluator """rankbased""" +79 12 dataset """kinships""" +79 12 model """conve""" +79 12 loss """softplus""" +79 12 regularizer """no""" +79 12 optimizer """adam""" +79 12 training_loop """lcwa""" +79 12 evaluator """rankbased""" +79 13 dataset """kinships""" +79 13 model """conve""" +79 13 loss """softplus""" +79 13 regularizer """no""" +79 13 optimizer """adam""" +79 13 training_loop """lcwa""" +79 13 evaluator """rankbased""" +79 14 dataset """kinships""" +79 14 model """conve""" +79 14 loss """softplus""" +79 14 regularizer """no""" +79 14 optimizer """adam""" +79 14 training_loop """lcwa""" +79 14 evaluator """rankbased""" +79 15 dataset """kinships""" +79 15 model """conve""" +79 15 loss """softplus""" +79 15 regularizer """no""" +79 15 optimizer """adam""" +79 15 training_loop """lcwa""" +79 15 evaluator """rankbased""" +79 16 dataset """kinships""" +79 16 model """conve""" +79 16 loss """softplus""" +79 16 regularizer """no""" +79 16 optimizer """adam""" +79 16 training_loop """lcwa""" +79 16 evaluator """rankbased""" +79 17 dataset """kinships""" +79 17 model """conve""" +79 17 loss """softplus""" +79 17 regularizer """no""" +79 17 optimizer """adam""" +79 17 training_loop """lcwa""" +79 17 evaluator """rankbased""" +79 18 dataset """kinships""" +79 18 model """conve""" +79 18 loss """softplus""" +79 18 regularizer """no""" +79 18 optimizer """adam""" +79 18 training_loop """lcwa""" +79 18 evaluator """rankbased""" +79 19 dataset """kinships""" +79 19 model """conve""" +79 19 loss """softplus""" +79 19 regularizer """no""" +79 19 optimizer """adam""" +79 19 training_loop """lcwa""" +79 19 evaluator """rankbased""" +79 20 dataset """kinships""" +79 20 model """conve""" +79 20 loss """softplus""" +79 20 regularizer """no""" +79 20 optimizer """adam""" +79 20 training_loop """lcwa""" +79 20 evaluator """rankbased""" +79 21 dataset """kinships""" +79 21 model """conve""" +79 21 loss """softplus""" +79 21 regularizer """no""" +79 21 optimizer """adam""" +79 21 training_loop """lcwa""" +79 21 evaluator """rankbased""" +79 22 dataset """kinships""" +79 22 model """conve""" +79 22 loss """softplus""" +79 22 regularizer """no""" +79 22 optimizer """adam""" +79 22 training_loop """lcwa""" +79 22 evaluator """rankbased""" +79 23 dataset """kinships""" +79 23 model """conve""" +79 23 loss """softplus""" +79 23 regularizer """no""" +79 23 optimizer """adam""" +79 23 training_loop """lcwa""" +79 23 evaluator """rankbased""" +79 24 dataset """kinships""" +79 24 model """conve""" +79 24 loss """softplus""" +79 24 regularizer """no""" +79 24 optimizer """adam""" +79 24 training_loop """lcwa""" +79 24 evaluator """rankbased""" +79 25 dataset """kinships""" +79 25 model """conve""" +79 25 loss """softplus""" +79 25 regularizer """no""" +79 25 optimizer """adam""" +79 25 training_loop """lcwa""" +79 25 evaluator """rankbased""" +79 26 dataset """kinships""" +79 26 model """conve""" +79 26 loss """softplus""" +79 26 regularizer """no""" +79 26 optimizer """adam""" +79 26 training_loop """lcwa""" +79 26 evaluator """rankbased""" +79 27 dataset """kinships""" +79 27 model """conve""" +79 27 loss """softplus""" +79 27 regularizer """no""" +79 27 optimizer """adam""" +79 27 training_loop """lcwa""" +79 27 evaluator """rankbased""" +79 28 dataset """kinships""" +79 28 model """conve""" +79 28 loss """softplus""" +79 28 regularizer """no""" +79 28 optimizer """adam""" +79 28 training_loop """lcwa""" +79 28 evaluator """rankbased""" +79 29 dataset """kinships""" +79 29 model """conve""" +79 29 loss """softplus""" +79 29 regularizer """no""" +79 29 optimizer """adam""" +79 29 training_loop """lcwa""" +79 29 evaluator """rankbased""" +79 30 dataset """kinships""" +79 30 model """conve""" +79 30 loss """softplus""" +79 30 regularizer """no""" +79 30 optimizer """adam""" +79 30 training_loop """lcwa""" +79 30 evaluator """rankbased""" +79 31 dataset """kinships""" +79 31 model """conve""" +79 31 loss """softplus""" +79 31 regularizer """no""" +79 31 optimizer """adam""" +79 31 training_loop """lcwa""" +79 31 evaluator """rankbased""" +79 32 dataset """kinships""" +79 32 model """conve""" +79 32 loss """softplus""" +79 32 regularizer """no""" +79 32 optimizer """adam""" +79 32 training_loop """lcwa""" +79 32 evaluator """rankbased""" +79 33 dataset """kinships""" +79 33 model """conve""" +79 33 loss """softplus""" +79 33 regularizer """no""" +79 33 optimizer """adam""" +79 33 training_loop """lcwa""" +79 33 evaluator """rankbased""" +79 34 dataset """kinships""" +79 34 model """conve""" +79 34 loss """softplus""" +79 34 regularizer """no""" +79 34 optimizer """adam""" +79 34 training_loop """lcwa""" +79 34 evaluator """rankbased""" +79 35 dataset """kinships""" +79 35 model """conve""" +79 35 loss """softplus""" +79 35 regularizer """no""" +79 35 optimizer """adam""" +79 35 training_loop """lcwa""" +79 35 evaluator """rankbased""" +79 36 dataset """kinships""" +79 36 model """conve""" +79 36 loss """softplus""" +79 36 regularizer """no""" +79 36 optimizer """adam""" +79 36 training_loop """lcwa""" +79 36 evaluator """rankbased""" +79 37 dataset """kinships""" +79 37 model """conve""" +79 37 loss """softplus""" +79 37 regularizer """no""" +79 37 optimizer """adam""" +79 37 training_loop """lcwa""" +79 37 evaluator """rankbased""" +79 38 dataset """kinships""" +79 38 model """conve""" +79 38 loss """softplus""" +79 38 regularizer """no""" +79 38 optimizer """adam""" +79 38 training_loop """lcwa""" +79 38 evaluator """rankbased""" +79 39 dataset """kinships""" +79 39 model """conve""" +79 39 loss """softplus""" +79 39 regularizer """no""" +79 39 optimizer """adam""" +79 39 training_loop """lcwa""" +79 39 evaluator """rankbased""" +79 40 dataset """kinships""" +79 40 model """conve""" +79 40 loss """softplus""" +79 40 regularizer """no""" +79 40 optimizer """adam""" +79 40 training_loop """lcwa""" +79 40 evaluator """rankbased""" +79 41 dataset """kinships""" +79 41 model """conve""" +79 41 loss """softplus""" +79 41 regularizer """no""" +79 41 optimizer """adam""" +79 41 training_loop """lcwa""" +79 41 evaluator """rankbased""" +79 42 dataset """kinships""" +79 42 model """conve""" +79 42 loss """softplus""" +79 42 regularizer """no""" +79 42 optimizer """adam""" +79 42 training_loop """lcwa""" +79 42 evaluator """rankbased""" +79 43 dataset """kinships""" +79 43 model """conve""" +79 43 loss """softplus""" +79 43 regularizer """no""" +79 43 optimizer """adam""" +79 43 training_loop """lcwa""" +79 43 evaluator """rankbased""" +79 44 dataset """kinships""" +79 44 model """conve""" +79 44 loss """softplus""" +79 44 regularizer """no""" +79 44 optimizer """adam""" +79 44 training_loop """lcwa""" +79 44 evaluator """rankbased""" +79 45 dataset """kinships""" +79 45 model """conve""" +79 45 loss """softplus""" +79 45 regularizer """no""" +79 45 optimizer """adam""" +79 45 training_loop """lcwa""" +79 45 evaluator """rankbased""" +79 46 dataset """kinships""" +79 46 model """conve""" +79 46 loss """softplus""" +79 46 regularizer """no""" +79 46 optimizer """adam""" +79 46 training_loop """lcwa""" +79 46 evaluator """rankbased""" +79 47 dataset """kinships""" +79 47 model """conve""" +79 47 loss """softplus""" +79 47 regularizer """no""" +79 47 optimizer """adam""" +79 47 training_loop """lcwa""" +79 47 evaluator """rankbased""" +79 48 dataset """kinships""" +79 48 model """conve""" +79 48 loss """softplus""" +79 48 regularizer """no""" +79 48 optimizer """adam""" +79 48 training_loop """lcwa""" +79 48 evaluator """rankbased""" +79 49 dataset """kinships""" +79 49 model """conve""" +79 49 loss """softplus""" +79 49 regularizer """no""" +79 49 optimizer """adam""" +79 49 training_loop """lcwa""" +79 49 evaluator """rankbased""" +79 50 dataset """kinships""" +79 50 model """conve""" +79 50 loss """softplus""" +79 50 regularizer """no""" +79 50 optimizer """adam""" +79 50 training_loop """lcwa""" +79 50 evaluator """rankbased""" +79 51 dataset """kinships""" +79 51 model """conve""" +79 51 loss """softplus""" +79 51 regularizer """no""" +79 51 optimizer """adam""" +79 51 training_loop """lcwa""" +79 51 evaluator """rankbased""" +79 52 dataset """kinships""" +79 52 model """conve""" +79 52 loss """softplus""" +79 52 regularizer """no""" +79 52 optimizer """adam""" +79 52 training_loop """lcwa""" +79 52 evaluator """rankbased""" +79 53 dataset """kinships""" +79 53 model """conve""" +79 53 loss """softplus""" +79 53 regularizer """no""" +79 53 optimizer """adam""" +79 53 training_loop """lcwa""" +79 53 evaluator """rankbased""" +79 54 dataset """kinships""" +79 54 model """conve""" +79 54 loss """softplus""" +79 54 regularizer """no""" +79 54 optimizer """adam""" +79 54 training_loop """lcwa""" +79 54 evaluator """rankbased""" +79 55 dataset """kinships""" +79 55 model """conve""" +79 55 loss """softplus""" +79 55 regularizer """no""" +79 55 optimizer """adam""" +79 55 training_loop """lcwa""" +79 55 evaluator """rankbased""" +79 56 dataset """kinships""" +79 56 model """conve""" +79 56 loss """softplus""" +79 56 regularizer """no""" +79 56 optimizer """adam""" +79 56 training_loop """lcwa""" +79 56 evaluator """rankbased""" +79 57 dataset """kinships""" +79 57 model """conve""" +79 57 loss """softplus""" +79 57 regularizer """no""" +79 57 optimizer """adam""" +79 57 training_loop """lcwa""" +79 57 evaluator """rankbased""" +79 58 dataset """kinships""" +79 58 model """conve""" +79 58 loss """softplus""" +79 58 regularizer """no""" +79 58 optimizer """adam""" +79 58 training_loop """lcwa""" +79 58 evaluator """rankbased""" +79 59 dataset """kinships""" +79 59 model """conve""" +79 59 loss """softplus""" +79 59 regularizer """no""" +79 59 optimizer """adam""" +79 59 training_loop """lcwa""" +79 59 evaluator """rankbased""" +79 60 dataset """kinships""" +79 60 model """conve""" +79 60 loss """softplus""" +79 60 regularizer """no""" +79 60 optimizer """adam""" +79 60 training_loop """lcwa""" +79 60 evaluator """rankbased""" +79 61 dataset """kinships""" +79 61 model """conve""" +79 61 loss """softplus""" +79 61 regularizer """no""" +79 61 optimizer """adam""" +79 61 training_loop """lcwa""" +79 61 evaluator """rankbased""" +79 62 dataset """kinships""" +79 62 model """conve""" +79 62 loss """softplus""" +79 62 regularizer """no""" +79 62 optimizer """adam""" +79 62 training_loop """lcwa""" +79 62 evaluator """rankbased""" +79 63 dataset """kinships""" +79 63 model """conve""" +79 63 loss """softplus""" +79 63 regularizer """no""" +79 63 optimizer """adam""" +79 63 training_loop """lcwa""" +79 63 evaluator """rankbased""" +79 64 dataset """kinships""" +79 64 model """conve""" +79 64 loss """softplus""" +79 64 regularizer """no""" +79 64 optimizer """adam""" +79 64 training_loop """lcwa""" +79 64 evaluator """rankbased""" +79 65 dataset """kinships""" +79 65 model """conve""" +79 65 loss """softplus""" +79 65 regularizer """no""" +79 65 optimizer """adam""" +79 65 training_loop """lcwa""" +79 65 evaluator """rankbased""" +79 66 dataset """kinships""" +79 66 model """conve""" +79 66 loss """softplus""" +79 66 regularizer """no""" +79 66 optimizer """adam""" +79 66 training_loop """lcwa""" +79 66 evaluator """rankbased""" +79 67 dataset """kinships""" +79 67 model """conve""" +79 67 loss """softplus""" +79 67 regularizer """no""" +79 67 optimizer """adam""" +79 67 training_loop """lcwa""" +79 67 evaluator """rankbased""" +79 68 dataset """kinships""" +79 68 model """conve""" +79 68 loss """softplus""" +79 68 regularizer """no""" +79 68 optimizer """adam""" +79 68 training_loop """lcwa""" +79 68 evaluator """rankbased""" +79 69 dataset """kinships""" +79 69 model """conve""" +79 69 loss """softplus""" +79 69 regularizer """no""" +79 69 optimizer """adam""" +79 69 training_loop """lcwa""" +79 69 evaluator """rankbased""" +79 70 dataset """kinships""" +79 70 model """conve""" +79 70 loss """softplus""" +79 70 regularizer """no""" +79 70 optimizer """adam""" +79 70 training_loop """lcwa""" +79 70 evaluator """rankbased""" +79 71 dataset """kinships""" +79 71 model """conve""" +79 71 loss """softplus""" +79 71 regularizer """no""" +79 71 optimizer """adam""" +79 71 training_loop """lcwa""" +79 71 evaluator """rankbased""" +79 72 dataset """kinships""" +79 72 model """conve""" +79 72 loss """softplus""" +79 72 regularizer """no""" +79 72 optimizer """adam""" +79 72 training_loop """lcwa""" +79 72 evaluator """rankbased""" +79 73 dataset """kinships""" +79 73 model """conve""" +79 73 loss """softplus""" +79 73 regularizer """no""" +79 73 optimizer """adam""" +79 73 training_loop """lcwa""" +79 73 evaluator """rankbased""" +79 74 dataset """kinships""" +79 74 model """conve""" +79 74 loss """softplus""" +79 74 regularizer """no""" +79 74 optimizer """adam""" +79 74 training_loop """lcwa""" +79 74 evaluator """rankbased""" +79 75 dataset """kinships""" +79 75 model """conve""" +79 75 loss """softplus""" +79 75 regularizer """no""" +79 75 optimizer """adam""" +79 75 training_loop """lcwa""" +79 75 evaluator """rankbased""" +79 76 dataset """kinships""" +79 76 model """conve""" +79 76 loss """softplus""" +79 76 regularizer """no""" +79 76 optimizer """adam""" +79 76 training_loop """lcwa""" +79 76 evaluator """rankbased""" +79 77 dataset """kinships""" +79 77 model """conve""" +79 77 loss """softplus""" +79 77 regularizer """no""" +79 77 optimizer """adam""" +79 77 training_loop """lcwa""" +79 77 evaluator """rankbased""" +79 78 dataset """kinships""" +79 78 model """conve""" +79 78 loss """softplus""" +79 78 regularizer """no""" +79 78 optimizer """adam""" +79 78 training_loop """lcwa""" +79 78 evaluator """rankbased""" +79 79 dataset """kinships""" +79 79 model """conve""" +79 79 loss """softplus""" +79 79 regularizer """no""" +79 79 optimizer """adam""" +79 79 training_loop """lcwa""" +79 79 evaluator """rankbased""" +79 80 dataset """kinships""" +79 80 model """conve""" +79 80 loss """softplus""" +79 80 regularizer """no""" +79 80 optimizer """adam""" +79 80 training_loop """lcwa""" +79 80 evaluator """rankbased""" +79 81 dataset """kinships""" +79 81 model """conve""" +79 81 loss """softplus""" +79 81 regularizer """no""" +79 81 optimizer """adam""" +79 81 training_loop """lcwa""" +79 81 evaluator """rankbased""" +79 82 dataset """kinships""" +79 82 model """conve""" +79 82 loss """softplus""" +79 82 regularizer """no""" +79 82 optimizer """adam""" +79 82 training_loop """lcwa""" +79 82 evaluator """rankbased""" +79 83 dataset """kinships""" +79 83 model """conve""" +79 83 loss """softplus""" +79 83 regularizer """no""" +79 83 optimizer """adam""" +79 83 training_loop """lcwa""" +79 83 evaluator """rankbased""" +79 84 dataset """kinships""" +79 84 model """conve""" +79 84 loss """softplus""" +79 84 regularizer """no""" +79 84 optimizer """adam""" +79 84 training_loop """lcwa""" +79 84 evaluator """rankbased""" +79 85 dataset """kinships""" +79 85 model """conve""" +79 85 loss """softplus""" +79 85 regularizer """no""" +79 85 optimizer """adam""" +79 85 training_loop """lcwa""" +79 85 evaluator """rankbased""" +79 86 dataset """kinships""" +79 86 model """conve""" +79 86 loss """softplus""" +79 86 regularizer """no""" +79 86 optimizer """adam""" +79 86 training_loop """lcwa""" +79 86 evaluator """rankbased""" +79 87 dataset """kinships""" +79 87 model """conve""" +79 87 loss """softplus""" +79 87 regularizer """no""" +79 87 optimizer """adam""" +79 87 training_loop """lcwa""" +79 87 evaluator """rankbased""" +79 88 dataset """kinships""" +79 88 model """conve""" +79 88 loss """softplus""" +79 88 regularizer """no""" +79 88 optimizer """adam""" +79 88 training_loop """lcwa""" +79 88 evaluator """rankbased""" +79 89 dataset """kinships""" +79 89 model """conve""" +79 89 loss """softplus""" +79 89 regularizer """no""" +79 89 optimizer """adam""" +79 89 training_loop """lcwa""" +79 89 evaluator """rankbased""" +79 90 dataset """kinships""" +79 90 model """conve""" +79 90 loss """softplus""" +79 90 regularizer """no""" +79 90 optimizer """adam""" +79 90 training_loop """lcwa""" +79 90 evaluator """rankbased""" +79 91 dataset """kinships""" +79 91 model """conve""" +79 91 loss """softplus""" +79 91 regularizer """no""" +79 91 optimizer """adam""" +79 91 training_loop """lcwa""" +79 91 evaluator """rankbased""" +79 92 dataset """kinships""" +79 92 model """conve""" +79 92 loss """softplus""" +79 92 regularizer """no""" +79 92 optimizer """adam""" +79 92 training_loop """lcwa""" +79 92 evaluator """rankbased""" +79 93 dataset """kinships""" +79 93 model """conve""" +79 93 loss """softplus""" +79 93 regularizer """no""" +79 93 optimizer """adam""" +79 93 training_loop """lcwa""" +79 93 evaluator """rankbased""" +79 94 dataset """kinships""" +79 94 model """conve""" +79 94 loss """softplus""" +79 94 regularizer """no""" +79 94 optimizer """adam""" +79 94 training_loop """lcwa""" +79 94 evaluator """rankbased""" +79 95 dataset """kinships""" +79 95 model """conve""" +79 95 loss """softplus""" +79 95 regularizer """no""" +79 95 optimizer """adam""" +79 95 training_loop """lcwa""" +79 95 evaluator """rankbased""" +79 96 dataset """kinships""" +79 96 model """conve""" +79 96 loss """softplus""" +79 96 regularizer """no""" +79 96 optimizer """adam""" +79 96 training_loop """lcwa""" +79 96 evaluator """rankbased""" +79 97 dataset """kinships""" +79 97 model """conve""" +79 97 loss """softplus""" +79 97 regularizer """no""" +79 97 optimizer """adam""" +79 97 training_loop """lcwa""" +79 97 evaluator """rankbased""" +79 98 dataset """kinships""" +79 98 model """conve""" +79 98 loss """softplus""" +79 98 regularizer """no""" +79 98 optimizer """adam""" +79 98 training_loop """lcwa""" +79 98 evaluator """rankbased""" +79 99 dataset """kinships""" +79 99 model """conve""" +79 99 loss """softplus""" +79 99 regularizer """no""" +79 99 optimizer """adam""" +79 99 training_loop """lcwa""" +79 99 evaluator """rankbased""" +79 100 dataset """kinships""" +79 100 model """conve""" +79 100 loss """softplus""" +79 100 regularizer """no""" +79 100 optimizer """adam""" +79 100 training_loop """lcwa""" +79 100 evaluator """rankbased""" +80 1 model.output_channels 40.0 +80 1 model.input_dropout 0.2890700901776826 +80 1 model.output_dropout 0.0855333979385437 +80 1 model.feature_map_dropout 0.48600218883185625 +80 1 model.embedding_dim 2.0 +80 1 optimizer.lr 0.007895834094148438 +80 1 training.batch_size 1.0 +80 1 training.label_smoothing 0.7757982915209918 +80 2 model.output_channels 49.0 +80 2 model.input_dropout 0.40892710083171835 +80 2 model.output_dropout 0.014688906945110158 +80 2 model.feature_map_dropout 0.3745835299667925 +80 2 model.embedding_dim 2.0 +80 2 optimizer.lr 0.0023460644183528607 +80 2 training.batch_size 1.0 +80 2 training.label_smoothing 0.0017793024186847435 +80 3 model.output_channels 60.0 +80 3 model.input_dropout 0.39422515988110896 +80 3 model.output_dropout 0.44272659701591455 +80 3 model.feature_map_dropout 0.2284933304103383 +80 3 model.embedding_dim 0.0 +80 3 optimizer.lr 0.00879547305118099 +80 3 training.batch_size 2.0 +80 3 training.label_smoothing 0.010574921120094003 +80 4 model.output_channels 49.0 +80 4 model.input_dropout 0.36871415654971124 +80 4 model.output_dropout 0.34401148026058853 +80 4 model.feature_map_dropout 0.2550605132986981 +80 4 model.embedding_dim 1.0 +80 4 optimizer.lr 0.08448810972308719 +80 4 training.batch_size 0.0 +80 4 training.label_smoothing 0.003590088003438948 +80 5 model.output_channels 54.0 +80 5 model.input_dropout 0.09518857863136015 +80 5 model.output_dropout 0.12888103953475688 +80 5 model.feature_map_dropout 0.4893502947580916 +80 5 model.embedding_dim 0.0 +80 5 optimizer.lr 0.07315180850002027 +80 5 training.batch_size 2.0 +80 5 training.label_smoothing 0.13383073253785235 +80 6 model.output_channels 63.0 +80 6 model.input_dropout 0.049963759045163036 +80 6 model.output_dropout 0.1368948704126326 +80 6 model.feature_map_dropout 0.4474456249236925 +80 6 model.embedding_dim 0.0 +80 6 optimizer.lr 0.0025019161938298023 +80 6 training.batch_size 0.0 +80 6 training.label_smoothing 0.09211359925544085 +80 7 model.output_channels 43.0 +80 7 model.input_dropout 0.33577415027050594 +80 7 model.output_dropout 0.032318635333780765 +80 7 model.feature_map_dropout 0.39817132963588947 +80 7 model.embedding_dim 1.0 +80 7 optimizer.lr 0.01062878837854973 +80 7 training.batch_size 0.0 +80 7 training.label_smoothing 0.10670388409179601 +80 8 model.output_channels 18.0 +80 8 model.input_dropout 0.24330545972828233 +80 8 model.output_dropout 0.18876699748581188 +80 8 model.feature_map_dropout 0.07436103994063542 +80 8 model.embedding_dim 2.0 +80 8 optimizer.lr 0.0041039180891273425 +80 8 training.batch_size 0.0 +80 8 training.label_smoothing 0.31350017417391873 +80 9 model.output_channels 25.0 +80 9 model.input_dropout 0.3677396818582848 +80 9 model.output_dropout 0.006685996032839037 +80 9 model.feature_map_dropout 0.10386588662164997 +80 9 model.embedding_dim 1.0 +80 9 optimizer.lr 0.0025227979403507094 +80 9 training.batch_size 2.0 +80 9 training.label_smoothing 0.05650577613798305 +80 10 model.output_channels 43.0 +80 10 model.input_dropout 0.23577725979695746 +80 10 model.output_dropout 0.4393518740437818 +80 10 model.feature_map_dropout 0.03827996192282662 +80 10 model.embedding_dim 0.0 +80 10 optimizer.lr 0.01514254533768605 +80 10 training.batch_size 1.0 +80 10 training.label_smoothing 0.0017186278347714695 +80 11 model.output_channels 27.0 +80 11 model.input_dropout 0.4911604005235031 +80 11 model.output_dropout 0.3613933903693308 +80 11 model.feature_map_dropout 0.4640581519180056 +80 11 model.embedding_dim 2.0 +80 11 optimizer.lr 0.0031390221911274043 +80 11 training.batch_size 0.0 +80 11 training.label_smoothing 0.03046885118607046 +80 12 model.output_channels 35.0 +80 12 model.input_dropout 0.37280607871560617 +80 12 model.output_dropout 0.4704072301461073 +80 12 model.feature_map_dropout 0.4121756728667613 +80 12 model.embedding_dim 1.0 +80 12 optimizer.lr 0.041401568617984344 +80 12 training.batch_size 2.0 +80 12 training.label_smoothing 0.005226461213506434 +80 13 model.output_channels 25.0 +80 13 model.input_dropout 0.16409198710372075 +80 13 model.output_dropout 0.14979314689665285 +80 13 model.feature_map_dropout 0.345657739394619 +80 13 model.embedding_dim 2.0 +80 13 optimizer.lr 0.00419243151875477 +80 13 training.batch_size 1.0 +80 13 training.label_smoothing 0.0016555743229074722 +80 14 model.output_channels 51.0 +80 14 model.input_dropout 0.1929153904042552 +80 14 model.output_dropout 0.3903784209789122 +80 14 model.feature_map_dropout 0.04267545915702903 +80 14 model.embedding_dim 1.0 +80 14 optimizer.lr 0.005336297525359867 +80 14 training.batch_size 0.0 +80 14 training.label_smoothing 0.03723238454548612 +80 15 model.output_channels 61.0 +80 15 model.input_dropout 0.06004624882014342 +80 15 model.output_dropout 0.027518617570890025 +80 15 model.feature_map_dropout 0.18441184843419928 +80 15 model.embedding_dim 1.0 +80 15 optimizer.lr 0.007516512997983284 +80 15 training.batch_size 1.0 +80 15 training.label_smoothing 0.0032834107439542134 +80 16 model.output_channels 55.0 +80 16 model.input_dropout 0.1258308442670933 +80 16 model.output_dropout 0.4842221559781106 +80 16 model.feature_map_dropout 0.3260183446265956 +80 16 model.embedding_dim 1.0 +80 16 optimizer.lr 0.003411257381345149 +80 16 training.batch_size 0.0 +80 16 training.label_smoothing 0.01837789265393001 +80 17 model.output_channels 50.0 +80 17 model.input_dropout 0.00921179743267836 +80 17 model.output_dropout 0.3084188462455458 +80 17 model.feature_map_dropout 0.07642347113160153 +80 17 model.embedding_dim 1.0 +80 17 optimizer.lr 0.020766639926582543 +80 17 training.batch_size 2.0 +80 17 training.label_smoothing 0.008028193252940586 +80 18 model.output_channels 64.0 +80 18 model.input_dropout 0.15619993460066 +80 18 model.output_dropout 0.22094109170785725 +80 18 model.feature_map_dropout 0.1828294010786024 +80 18 model.embedding_dim 1.0 +80 18 optimizer.lr 0.01378074356286243 +80 18 training.batch_size 2.0 +80 18 training.label_smoothing 0.07783586233366388 +80 19 model.output_channels 44.0 +80 19 model.input_dropout 0.40997758716444427 +80 19 model.output_dropout 0.13284774404219624 +80 19 model.feature_map_dropout 0.3718833425844376 +80 19 model.embedding_dim 0.0 +80 19 optimizer.lr 0.006090252584709405 +80 19 training.batch_size 1.0 +80 19 training.label_smoothing 0.31513182712578447 +80 20 model.output_channels 60.0 +80 20 model.input_dropout 0.2845448223622372 +80 20 model.output_dropout 0.3059730688319127 +80 20 model.feature_map_dropout 0.4840676010235769 +80 20 model.embedding_dim 2.0 +80 20 optimizer.lr 0.008269748380339843 +80 20 training.batch_size 2.0 +80 20 training.label_smoothing 0.004304370407414892 +80 21 model.output_channels 46.0 +80 21 model.input_dropout 0.07335756229756296 +80 21 model.output_dropout 0.25439044516806153 +80 21 model.feature_map_dropout 0.2825295397790684 +80 21 model.embedding_dim 1.0 +80 21 optimizer.lr 0.004644573901452185 +80 21 training.batch_size 0.0 +80 21 training.label_smoothing 0.04174410799904745 +80 22 model.output_channels 16.0 +80 22 model.input_dropout 0.12724544587825365 +80 22 model.output_dropout 0.21271096447166332 +80 22 model.feature_map_dropout 0.38990080528439425 +80 22 model.embedding_dim 0.0 +80 22 optimizer.lr 0.0027687082683826563 +80 22 training.batch_size 2.0 +80 22 training.label_smoothing 0.0010671947470532602 +80 23 model.output_channels 33.0 +80 23 model.input_dropout 0.2712631584674336 +80 23 model.output_dropout 0.002404338212288859 +80 23 model.feature_map_dropout 0.4658127132432524 +80 23 model.embedding_dim 1.0 +80 23 optimizer.lr 0.04947704086264835 +80 23 training.batch_size 2.0 +80 23 training.label_smoothing 0.0016507001306504691 +80 24 model.output_channels 32.0 +80 24 model.input_dropout 0.3205843253341603 +80 24 model.output_dropout 0.2940672944853851 +80 24 model.feature_map_dropout 0.09735743987888074 +80 24 model.embedding_dim 2.0 +80 24 optimizer.lr 0.03470702413012761 +80 24 training.batch_size 1.0 +80 24 training.label_smoothing 0.004317840739427728 +80 25 model.output_channels 26.0 +80 25 model.input_dropout 0.22312223435769607 +80 25 model.output_dropout 0.38506546678165227 +80 25 model.feature_map_dropout 0.4615201558961387 +80 25 model.embedding_dim 1.0 +80 25 optimizer.lr 0.020220322510757227 +80 25 training.batch_size 2.0 +80 25 training.label_smoothing 0.015782903399389724 +80 26 model.output_channels 44.0 +80 26 model.input_dropout 0.3983663436307318 +80 26 model.output_dropout 0.26344287145754014 +80 26 model.feature_map_dropout 0.4813892737535129 +80 26 model.embedding_dim 2.0 +80 26 optimizer.lr 0.0011395344269175857 +80 26 training.batch_size 2.0 +80 26 training.label_smoothing 0.054936627510525186 +80 27 model.output_channels 50.0 +80 27 model.input_dropout 0.012814876139172426 +80 27 model.output_dropout 0.271036049696476 +80 27 model.feature_map_dropout 0.014891983966118238 +80 27 model.embedding_dim 2.0 +80 27 optimizer.lr 0.09229540338219692 +80 27 training.batch_size 1.0 +80 27 training.label_smoothing 0.002258702262520652 +80 28 model.output_channels 37.0 +80 28 model.input_dropout 0.057516070322646495 +80 28 model.output_dropout 0.47469988502336075 +80 28 model.feature_map_dropout 0.39015333702512556 +80 28 model.embedding_dim 2.0 +80 28 optimizer.lr 0.010786814806316913 +80 28 training.batch_size 2.0 +80 28 training.label_smoothing 0.029780423922763685 +80 29 model.output_channels 44.0 +80 29 model.input_dropout 0.23112739115484765 +80 29 model.output_dropout 0.22789239444072074 +80 29 model.feature_map_dropout 0.15269376772687332 +80 29 model.embedding_dim 1.0 +80 29 optimizer.lr 0.0012986625638545246 +80 29 training.batch_size 2.0 +80 29 training.label_smoothing 0.014878390232645268 +80 30 model.output_channels 29.0 +80 30 model.input_dropout 0.2853279265824316 +80 30 model.output_dropout 0.15833615888882735 +80 30 model.feature_map_dropout 0.10376020794127466 +80 30 model.embedding_dim 0.0 +80 30 optimizer.lr 0.028773871150604063 +80 30 training.batch_size 0.0 +80 30 training.label_smoothing 0.10464264284324554 +80 31 model.output_channels 30.0 +80 31 model.input_dropout 0.41756699733196573 +80 31 model.output_dropout 0.3853328324623613 +80 31 model.feature_map_dropout 0.4825129366026148 +80 31 model.embedding_dim 2.0 +80 31 optimizer.lr 0.03730124101372738 +80 31 training.batch_size 2.0 +80 31 training.label_smoothing 0.031956839063937245 +80 32 model.output_channels 33.0 +80 32 model.input_dropout 0.24739598701594961 +80 32 model.output_dropout 0.17564361669497525 +80 32 model.feature_map_dropout 0.0940899494148526 +80 32 model.embedding_dim 1.0 +80 32 optimizer.lr 0.01562750488922127 +80 32 training.batch_size 1.0 +80 32 training.label_smoothing 0.07613957757380678 +80 33 model.output_channels 63.0 +80 33 model.input_dropout 0.04196727068683087 +80 33 model.output_dropout 0.10301940914058583 +80 33 model.feature_map_dropout 0.34225275373174296 +80 33 model.embedding_dim 2.0 +80 33 optimizer.lr 0.0015758118048900841 +80 33 training.batch_size 1.0 +80 33 training.label_smoothing 0.001126962977142808 +80 34 model.output_channels 56.0 +80 34 model.input_dropout 0.10640458345547932 +80 34 model.output_dropout 0.3678972006691643 +80 34 model.feature_map_dropout 0.024289711224828148 +80 34 model.embedding_dim 0.0 +80 34 optimizer.lr 0.0011109861472440806 +80 34 training.batch_size 1.0 +80 34 training.label_smoothing 0.03395967048897649 +80 35 model.output_channels 60.0 +80 35 model.input_dropout 0.17300234224453237 +80 35 model.output_dropout 0.23267081457422684 +80 35 model.feature_map_dropout 0.25688337036352965 +80 35 model.embedding_dim 1.0 +80 35 optimizer.lr 0.024463473413030012 +80 35 training.batch_size 2.0 +80 35 training.label_smoothing 0.10964118716996267 +80 36 model.output_channels 47.0 +80 36 model.input_dropout 0.3334493142646348 +80 36 model.output_dropout 0.11998587580521719 +80 36 model.feature_map_dropout 0.09306056509562871 +80 36 model.embedding_dim 1.0 +80 36 optimizer.lr 0.020694886091577887 +80 36 training.batch_size 2.0 +80 36 training.label_smoothing 0.589497843699203 +80 37 model.output_channels 53.0 +80 37 model.input_dropout 0.31939767357414345 +80 37 model.output_dropout 0.4399453222227824 +80 37 model.feature_map_dropout 0.3929590206077552 +80 37 model.embedding_dim 2.0 +80 37 optimizer.lr 0.003281422810757328 +80 37 training.batch_size 1.0 +80 37 training.label_smoothing 0.028425060801234914 +80 38 model.output_channels 39.0 +80 38 model.input_dropout 0.2200227374249522 +80 38 model.output_dropout 0.06927760626086682 +80 38 model.feature_map_dropout 0.3604167897104198 +80 38 model.embedding_dim 0.0 +80 38 optimizer.lr 0.005350043072298036 +80 38 training.batch_size 1.0 +80 38 training.label_smoothing 0.006065004476231477 +80 39 model.output_channels 63.0 +80 39 model.input_dropout 0.15493877614522977 +80 39 model.output_dropout 0.3199001023474387 +80 39 model.feature_map_dropout 0.44163573734780226 +80 39 model.embedding_dim 2.0 +80 39 optimizer.lr 0.0014731427972230879 +80 39 training.batch_size 1.0 +80 39 training.label_smoothing 0.04210213568345224 +80 40 model.output_channels 28.0 +80 40 model.input_dropout 0.2678350232622947 +80 40 model.output_dropout 0.07586696568071227 +80 40 model.feature_map_dropout 0.3327994614963055 +80 40 model.embedding_dim 0.0 +80 40 optimizer.lr 0.0017529542532877423 +80 40 training.batch_size 1.0 +80 40 training.label_smoothing 0.018648165885824464 +80 41 model.output_channels 28.0 +80 41 model.input_dropout 0.20587534708441252 +80 41 model.output_dropout 0.05910361695109295 +80 41 model.feature_map_dropout 0.43659599023539075 +80 41 model.embedding_dim 0.0 +80 41 optimizer.lr 0.046749611275061734 +80 41 training.batch_size 1.0 +80 41 training.label_smoothing 0.00818278131457315 +80 42 model.output_channels 43.0 +80 42 model.input_dropout 0.20908726512925757 +80 42 model.output_dropout 0.07037338101563545 +80 42 model.feature_map_dropout 0.10227687590263801 +80 42 model.embedding_dim 2.0 +80 42 optimizer.lr 0.0017713199914254118 +80 42 training.batch_size 0.0 +80 42 training.label_smoothing 0.0024244610479978515 +80 43 model.output_channels 33.0 +80 43 model.input_dropout 0.39657443684148347 +80 43 model.output_dropout 0.4348253085411565 +80 43 model.feature_map_dropout 0.40248568817130703 +80 43 model.embedding_dim 0.0 +80 43 optimizer.lr 0.009059434036150855 +80 43 training.batch_size 0.0 +80 43 training.label_smoothing 0.010957974614356695 +80 44 model.output_channels 28.0 +80 44 model.input_dropout 0.32747984001615843 +80 44 model.output_dropout 0.15440885727592024 +80 44 model.feature_map_dropout 0.28916683565316537 +80 44 model.embedding_dim 0.0 +80 44 optimizer.lr 0.002118592936033141 +80 44 training.batch_size 1.0 +80 44 training.label_smoothing 0.0061079819713151075 +80 45 model.output_channels 50.0 +80 45 model.input_dropout 0.4415742614321074 +80 45 model.output_dropout 0.30671090625037534 +80 45 model.feature_map_dropout 0.17621379397530013 +80 45 model.embedding_dim 0.0 +80 45 optimizer.lr 0.0013326728695540406 +80 45 training.batch_size 2.0 +80 45 training.label_smoothing 0.0011306977924944494 +80 46 model.output_channels 22.0 +80 46 model.input_dropout 0.20156103135484232 +80 46 model.output_dropout 0.2361478173031752 +80 46 model.feature_map_dropout 0.20757996584424138 +80 46 model.embedding_dim 2.0 +80 46 optimizer.lr 0.003061927797289099 +80 46 training.batch_size 2.0 +80 46 training.label_smoothing 0.6240907580710887 +80 47 model.output_channels 38.0 +80 47 model.input_dropout 0.17296717263896416 +80 47 model.output_dropout 0.003961230662889836 +80 47 model.feature_map_dropout 0.14723834390087176 +80 47 model.embedding_dim 2.0 +80 47 optimizer.lr 0.006843944589528695 +80 47 training.batch_size 2.0 +80 47 training.label_smoothing 0.3241496225129534 +80 48 model.output_channels 36.0 +80 48 model.input_dropout 0.4153499060245384 +80 48 model.output_dropout 0.010424415328113612 +80 48 model.feature_map_dropout 0.35129116105854113 +80 48 model.embedding_dim 2.0 +80 48 optimizer.lr 0.013871746923190204 +80 48 training.batch_size 1.0 +80 48 training.label_smoothing 0.39526884974493465 +80 49 model.output_channels 43.0 +80 49 model.input_dropout 0.028146729706667817 +80 49 model.output_dropout 0.33672593465509537 +80 49 model.feature_map_dropout 0.3004881761605018 +80 49 model.embedding_dim 0.0 +80 49 optimizer.lr 0.0056881642888450875 +80 49 training.batch_size 1.0 +80 49 training.label_smoothing 0.04896482173880731 +80 50 model.output_channels 19.0 +80 50 model.input_dropout 0.06799386466115603 +80 50 model.output_dropout 0.015105199482497222 +80 50 model.feature_map_dropout 0.046154074671581 +80 50 model.embedding_dim 1.0 +80 50 optimizer.lr 0.05793724790913861 +80 50 training.batch_size 2.0 +80 50 training.label_smoothing 0.003066026992797772 +80 51 model.output_channels 37.0 +80 51 model.input_dropout 0.06898873280309636 +80 51 model.output_dropout 0.37019166367787765 +80 51 model.feature_map_dropout 0.42569022503147264 +80 51 model.embedding_dim 0.0 +80 51 optimizer.lr 0.01062246842344691 +80 51 training.batch_size 0.0 +80 51 training.label_smoothing 0.004961594073400039 +80 52 model.output_channels 53.0 +80 52 model.input_dropout 0.3439858361590754 +80 52 model.output_dropout 0.2680658318199802 +80 52 model.feature_map_dropout 0.022880947900993265 +80 52 model.embedding_dim 0.0 +80 52 optimizer.lr 0.0015453244784523342 +80 52 training.batch_size 0.0 +80 52 training.label_smoothing 0.0021176865776383028 +80 53 model.output_channels 32.0 +80 53 model.input_dropout 0.07331697643050689 +80 53 model.output_dropout 0.3013199307042967 +80 53 model.feature_map_dropout 0.18175001297373483 +80 53 model.embedding_dim 1.0 +80 53 optimizer.lr 0.0019572716022679395 +80 53 training.batch_size 1.0 +80 53 training.label_smoothing 0.009762713621493732 +80 54 model.output_channels 51.0 +80 54 model.input_dropout 0.07606438017906297 +80 54 model.output_dropout 0.48100520820601544 +80 54 model.feature_map_dropout 0.08885861191485245 +80 54 model.embedding_dim 0.0 +80 54 optimizer.lr 0.05168678986691868 +80 54 training.batch_size 1.0 +80 54 training.label_smoothing 0.015976261518842606 +80 55 model.output_channels 59.0 +80 55 model.input_dropout 0.08669473207854678 +80 55 model.output_dropout 0.16982209828670009 +80 55 model.feature_map_dropout 0.03929173597673791 +80 55 model.embedding_dim 2.0 +80 55 optimizer.lr 0.0051644883721791365 +80 55 training.batch_size 2.0 +80 55 training.label_smoothing 0.7608267521321692 +80 56 model.output_channels 31.0 +80 56 model.input_dropout 0.32564445254813473 +80 56 model.output_dropout 0.20147639018143115 +80 56 model.feature_map_dropout 0.47547488164161467 +80 56 model.embedding_dim 1.0 +80 56 optimizer.lr 0.01134215849779658 +80 56 training.batch_size 2.0 +80 56 training.label_smoothing 0.007088197704962836 +80 57 model.output_channels 24.0 +80 57 model.input_dropout 0.24159328974887795 +80 57 model.output_dropout 0.05959884529292403 +80 57 model.feature_map_dropout 0.06997820727888054 +80 57 model.embedding_dim 2.0 +80 57 optimizer.lr 0.002911334044288939 +80 57 training.batch_size 2.0 +80 57 training.label_smoothing 0.0018580555127911415 +80 58 model.output_channels 60.0 +80 58 model.input_dropout 0.010898004777422476 +80 58 model.output_dropout 0.07938929494901165 +80 58 model.feature_map_dropout 0.32520398671400363 +80 58 model.embedding_dim 2.0 +80 58 optimizer.lr 0.0022928514737562264 +80 58 training.batch_size 1.0 +80 58 training.label_smoothing 0.39323099755868446 +80 59 model.output_channels 37.0 +80 59 model.input_dropout 0.41201879164926125 +80 59 model.output_dropout 0.24380212117630712 +80 59 model.feature_map_dropout 0.4920237357936122 +80 59 model.embedding_dim 0.0 +80 59 optimizer.lr 0.001703941850949728 +80 59 training.batch_size 2.0 +80 59 training.label_smoothing 0.002401642838029811 +80 60 model.output_channels 38.0 +80 60 model.input_dropout 0.38237827115992024 +80 60 model.output_dropout 0.3519996234791427 +80 60 model.feature_map_dropout 0.2682919073114522 +80 60 model.embedding_dim 1.0 +80 60 optimizer.lr 0.004266917797552576 +80 60 training.batch_size 0.0 +80 60 training.label_smoothing 0.0014324348267672584 +80 61 model.output_channels 60.0 +80 61 model.input_dropout 0.4286211828841785 +80 61 model.output_dropout 0.031179076451308685 +80 61 model.feature_map_dropout 0.12535015880810146 +80 61 model.embedding_dim 2.0 +80 61 optimizer.lr 0.0010573507538741839 +80 61 training.batch_size 2.0 +80 61 training.label_smoothing 0.1006207106175866 +80 62 model.output_channels 26.0 +80 62 model.input_dropout 0.07383933790766178 +80 62 model.output_dropout 0.05160702849080706 +80 62 model.feature_map_dropout 0.07621092661196233 +80 62 model.embedding_dim 2.0 +80 62 optimizer.lr 0.07580312208632015 +80 62 training.batch_size 1.0 +80 62 training.label_smoothing 0.024963900403005206 +80 63 model.output_channels 25.0 +80 63 model.input_dropout 0.12721661329700773 +80 63 model.output_dropout 0.08070212838255053 +80 63 model.feature_map_dropout 0.3776956580810826 +80 63 model.embedding_dim 1.0 +80 63 optimizer.lr 0.01815872903793504 +80 63 training.batch_size 2.0 +80 63 training.label_smoothing 0.002876940843640052 +80 64 model.output_channels 36.0 +80 64 model.input_dropout 0.3852211633341634 +80 64 model.output_dropout 0.3382003707343309 +80 64 model.feature_map_dropout 0.22630738799014088 +80 64 model.embedding_dim 2.0 +80 64 optimizer.lr 0.0011836497443782588 +80 64 training.batch_size 2.0 +80 64 training.label_smoothing 0.004428610152291914 +80 65 model.output_channels 52.0 +80 65 model.input_dropout 0.26825150143485965 +80 65 model.output_dropout 0.30536831754879246 +80 65 model.feature_map_dropout 0.11696963729484794 +80 65 model.embedding_dim 2.0 +80 65 optimizer.lr 0.0020809157317828 +80 65 training.batch_size 2.0 +80 65 training.label_smoothing 0.012179057912628126 +80 66 model.output_channels 45.0 +80 66 model.input_dropout 0.3242110283308303 +80 66 model.output_dropout 0.14139350592297273 +80 66 model.feature_map_dropout 0.4188274110114348 +80 66 model.embedding_dim 0.0 +80 66 optimizer.lr 0.0012536237462544742 +80 66 training.batch_size 2.0 +80 66 training.label_smoothing 0.024131517032779826 +80 67 model.output_channels 47.0 +80 67 model.input_dropout 0.4947249366694143 +80 67 model.output_dropout 0.47014042375612897 +80 67 model.feature_map_dropout 0.16432851247071295 +80 67 model.embedding_dim 0.0 +80 67 optimizer.lr 0.019683303684765015 +80 67 training.batch_size 2.0 +80 67 training.label_smoothing 0.05937130772483235 +80 68 model.output_channels 16.0 +80 68 model.input_dropout 0.15772332998685107 +80 68 model.output_dropout 0.12051420793261547 +80 68 model.feature_map_dropout 0.1826236470203223 +80 68 model.embedding_dim 0.0 +80 68 optimizer.lr 0.05026978961079932 +80 68 training.batch_size 0.0 +80 68 training.label_smoothing 0.002392786817956158 +80 69 model.output_channels 49.0 +80 69 model.input_dropout 0.271000619243332 +80 69 model.output_dropout 0.22495149065390668 +80 69 model.feature_map_dropout 0.03544052180539348 +80 69 model.embedding_dim 1.0 +80 69 optimizer.lr 0.056181824437912885 +80 69 training.batch_size 0.0 +80 69 training.label_smoothing 0.13251568757017335 +80 70 model.output_channels 47.0 +80 70 model.input_dropout 0.20420193449070112 +80 70 model.output_dropout 0.367294745841537 +80 70 model.feature_map_dropout 0.07595649478585609 +80 70 model.embedding_dim 0.0 +80 70 optimizer.lr 0.0029621070129868796 +80 70 training.batch_size 2.0 +80 70 training.label_smoothing 0.8966876544766617 +80 71 model.output_channels 36.0 +80 71 model.input_dropout 0.30428375265445196 +80 71 model.output_dropout 0.06236470286383983 +80 71 model.feature_map_dropout 0.3831365448266372 +80 71 model.embedding_dim 1.0 +80 71 optimizer.lr 0.0017005245295914995 +80 71 training.batch_size 1.0 +80 71 training.label_smoothing 0.003259751571221822 +80 72 model.output_channels 21.0 +80 72 model.input_dropout 0.04615603820963987 +80 72 model.output_dropout 0.45544310942900085 +80 72 model.feature_map_dropout 0.3518350950850425 +80 72 model.embedding_dim 0.0 +80 72 optimizer.lr 0.0018169396869674452 +80 72 training.batch_size 1.0 +80 72 training.label_smoothing 0.4519925350629206 +80 73 model.output_channels 60.0 +80 73 model.input_dropout 0.4575156787595868 +80 73 model.output_dropout 0.4535364428290851 +80 73 model.feature_map_dropout 0.020913352912837246 +80 73 model.embedding_dim 1.0 +80 73 optimizer.lr 0.028973929887544114 +80 73 training.batch_size 2.0 +80 73 training.label_smoothing 0.03023003093412092 +80 74 model.output_channels 38.0 +80 74 model.input_dropout 0.06428393410209848 +80 74 model.output_dropout 0.2405477666851643 +80 74 model.feature_map_dropout 0.3299816248480116 +80 74 model.embedding_dim 0.0 +80 74 optimizer.lr 0.02294209123772547 +80 74 training.batch_size 0.0 +80 74 training.label_smoothing 0.0030425308459088394 +80 75 model.output_channels 44.0 +80 75 model.input_dropout 0.0708507161807378 +80 75 model.output_dropout 0.3663424689325825 +80 75 model.feature_map_dropout 0.39944567789029056 +80 75 model.embedding_dim 1.0 +80 75 optimizer.lr 0.006716671364030029 +80 75 training.batch_size 2.0 +80 75 training.label_smoothing 0.20914857587258284 +80 76 model.output_channels 51.0 +80 76 model.input_dropout 0.4069469076782097 +80 76 model.output_dropout 0.04682809481713096 +80 76 model.feature_map_dropout 0.10029955706531091 +80 76 model.embedding_dim 1.0 +80 76 optimizer.lr 0.03698611543227146 +80 76 training.batch_size 1.0 +80 76 training.label_smoothing 0.06073847012329744 +80 77 model.output_channels 23.0 +80 77 model.input_dropout 0.1721723704983385 +80 77 model.output_dropout 0.06625842579800634 +80 77 model.feature_map_dropout 0.16620327767952098 +80 77 model.embedding_dim 0.0 +80 77 optimizer.lr 0.0427554955499016 +80 77 training.batch_size 1.0 +80 77 training.label_smoothing 0.1168992601630594 +80 78 model.output_channels 26.0 +80 78 model.input_dropout 0.13980056487683185 +80 78 model.output_dropout 0.43478822584653815 +80 78 model.feature_map_dropout 0.24884942525322568 +80 78 model.embedding_dim 2.0 +80 78 optimizer.lr 0.039691781055297214 +80 78 training.batch_size 0.0 +80 78 training.label_smoothing 0.6647197190770247 +80 79 model.output_channels 38.0 +80 79 model.input_dropout 0.4252284210629007 +80 79 model.output_dropout 0.4259553490343057 +80 79 model.feature_map_dropout 0.24788252087431806 +80 79 model.embedding_dim 2.0 +80 79 optimizer.lr 0.0018675765136858897 +80 79 training.batch_size 0.0 +80 79 training.label_smoothing 0.18772934214993173 +80 80 model.output_channels 16.0 +80 80 model.input_dropout 0.26291095845600243 +80 80 model.output_dropout 0.2391252082149669 +80 80 model.feature_map_dropout 0.17579187427786286 +80 80 model.embedding_dim 2.0 +80 80 optimizer.lr 0.010033266228314141 +80 80 training.batch_size 2.0 +80 80 training.label_smoothing 0.002850240651574279 +80 81 model.output_channels 17.0 +80 81 model.input_dropout 0.23653266471505308 +80 81 model.output_dropout 0.4474690248870599 +80 81 model.feature_map_dropout 0.38499088480230426 +80 81 model.embedding_dim 1.0 +80 81 optimizer.lr 0.005320156516952819 +80 81 training.batch_size 2.0 +80 81 training.label_smoothing 0.13850764917554592 +80 82 model.output_channels 47.0 +80 82 model.input_dropout 0.3664376787630934 +80 82 model.output_dropout 0.17543789623300354 +80 82 model.feature_map_dropout 0.37862301045710806 +80 82 model.embedding_dim 1.0 +80 82 optimizer.lr 0.0021015973542070016 +80 82 training.batch_size 2.0 +80 82 training.label_smoothing 0.00520739278369781 +80 83 model.output_channels 31.0 +80 83 model.input_dropout 0.2760127550391373 +80 83 model.output_dropout 0.12952225980541832 +80 83 model.feature_map_dropout 0.39224730630994775 +80 83 model.embedding_dim 1.0 +80 83 optimizer.lr 0.006708871498461502 +80 83 training.batch_size 1.0 +80 83 training.label_smoothing 0.00905819496511209 +80 84 model.output_channels 36.0 +80 84 model.input_dropout 0.015988780344936615 +80 84 model.output_dropout 0.4037526732876577 +80 84 model.feature_map_dropout 0.10762140651902469 +80 84 model.embedding_dim 2.0 +80 84 optimizer.lr 0.0030215464047072876 +80 84 training.batch_size 1.0 +80 84 training.label_smoothing 0.39273691487753765 +80 85 model.output_channels 34.0 +80 85 model.input_dropout 0.02937460908331957 +80 85 model.output_dropout 0.05944166507625803 +80 85 model.feature_map_dropout 0.4139656371327516 +80 85 model.embedding_dim 1.0 +80 85 optimizer.lr 0.02682856506503253 +80 85 training.batch_size 0.0 +80 85 training.label_smoothing 0.00436035411926257 +80 86 model.output_channels 64.0 +80 86 model.input_dropout 0.3332988755784621 +80 86 model.output_dropout 0.3179175786523768 +80 86 model.feature_map_dropout 0.26151327437612887 +80 86 model.embedding_dim 2.0 +80 86 optimizer.lr 0.007137561532722731 +80 86 training.batch_size 0.0 +80 86 training.label_smoothing 0.608424276671262 +80 87 model.output_channels 64.0 +80 87 model.input_dropout 0.2620235053628296 +80 87 model.output_dropout 0.4671313329603249 +80 87 model.feature_map_dropout 0.19783954324234104 +80 87 model.embedding_dim 0.0 +80 87 optimizer.lr 0.05508415770158087 +80 87 training.batch_size 2.0 +80 87 training.label_smoothing 0.48968228239704364 +80 88 model.output_channels 43.0 +80 88 model.input_dropout 0.044738826596163406 +80 88 model.output_dropout 0.3694802554660994 +80 88 model.feature_map_dropout 0.43521921608490544 +80 88 model.embedding_dim 1.0 +80 88 optimizer.lr 0.0853392058103258 +80 88 training.batch_size 1.0 +80 88 training.label_smoothing 0.00614808567692716 +80 89 model.output_channels 40.0 +80 89 model.input_dropout 0.3628477690091758 +80 89 model.output_dropout 0.38877171444016745 +80 89 model.feature_map_dropout 0.1203443785296614 +80 89 model.embedding_dim 1.0 +80 89 optimizer.lr 0.04010297739410481 +80 89 training.batch_size 2.0 +80 89 training.label_smoothing 0.12774087016061803 +80 90 model.output_channels 28.0 +80 90 model.input_dropout 0.11497106417055059 +80 90 model.output_dropout 0.3228270982581544 +80 90 model.feature_map_dropout 0.33207150450858486 +80 90 model.embedding_dim 0.0 +80 90 optimizer.lr 0.002333290262809063 +80 90 training.batch_size 2.0 +80 90 training.label_smoothing 0.001515562601157377 +80 91 model.output_channels 48.0 +80 91 model.input_dropout 0.19515384286511134 +80 91 model.output_dropout 0.4929670128218493 +80 91 model.feature_map_dropout 0.23305262294002105 +80 91 model.embedding_dim 0.0 +80 91 optimizer.lr 0.002558934608482544 +80 91 training.batch_size 0.0 +80 91 training.label_smoothing 0.011073907954347694 +80 92 model.output_channels 56.0 +80 92 model.input_dropout 0.1523438865512557 +80 92 model.output_dropout 0.3857728961758527 +80 92 model.feature_map_dropout 0.3537390857926674 +80 92 model.embedding_dim 2.0 +80 92 optimizer.lr 0.03463882907936865 +80 92 training.batch_size 0.0 +80 92 training.label_smoothing 0.008552130030874369 +80 93 model.output_channels 19.0 +80 93 model.input_dropout 0.06566345644710025 +80 93 model.output_dropout 0.21448714395678592 +80 93 model.feature_map_dropout 0.10089945876989953 +80 93 model.embedding_dim 1.0 +80 93 optimizer.lr 0.002007860355412423 +80 93 training.batch_size 0.0 +80 93 training.label_smoothing 0.17703635801598974 +80 94 model.output_channels 57.0 +80 94 model.input_dropout 0.37925702614511936 +80 94 model.output_dropout 0.06430043043824285 +80 94 model.feature_map_dropout 0.11768741724405757 +80 94 model.embedding_dim 2.0 +80 94 optimizer.lr 0.005368368407283403 +80 94 training.batch_size 1.0 +80 94 training.label_smoothing 0.19250742417476704 +80 95 model.output_channels 31.0 +80 95 model.input_dropout 0.10604629662978515 +80 95 model.output_dropout 0.3339776037108833 +80 95 model.feature_map_dropout 0.14857889670432822 +80 95 model.embedding_dim 1.0 +80 95 optimizer.lr 0.00911795606717299 +80 95 training.batch_size 2.0 +80 95 training.label_smoothing 0.32156599881484943 +80 96 model.output_channels 49.0 +80 96 model.input_dropout 0.3153900298115937 +80 96 model.output_dropout 0.22673772555617366 +80 96 model.feature_map_dropout 0.11065546924165526 +80 96 model.embedding_dim 1.0 +80 96 optimizer.lr 0.004465551096953423 +80 96 training.batch_size 1.0 +80 96 training.label_smoothing 0.18994771068398808 +80 97 model.output_channels 28.0 +80 97 model.input_dropout 0.25695383822356505 +80 97 model.output_dropout 0.12845943026283818 +80 97 model.feature_map_dropout 0.00028376633519583416 +80 97 model.embedding_dim 0.0 +80 97 optimizer.lr 0.006744760560420777 +80 97 training.batch_size 0.0 +80 97 training.label_smoothing 0.030017439314512194 +80 98 model.output_channels 62.0 +80 98 model.input_dropout 0.17010404821017738 +80 98 model.output_dropout 0.047627648486092244 +80 98 model.feature_map_dropout 0.1510294389418783 +80 98 model.embedding_dim 0.0 +80 98 optimizer.lr 0.003837441423488231 +80 98 training.batch_size 2.0 +80 98 training.label_smoothing 0.4708932434430586 +80 99 model.output_channels 24.0 +80 99 model.input_dropout 0.24595720128741733 +80 99 model.output_dropout 0.14746264435406398 +80 99 model.feature_map_dropout 0.4141900802339576 +80 99 model.embedding_dim 0.0 +80 99 optimizer.lr 0.008293297604237675 +80 99 training.batch_size 0.0 +80 99 training.label_smoothing 0.040813530333293116 +80 100 model.output_channels 45.0 +80 100 model.input_dropout 0.17261391661666037 +80 100 model.output_dropout 0.1446122920839109 +80 100 model.feature_map_dropout 0.32582588278091257 +80 100 model.embedding_dim 2.0 +80 100 optimizer.lr 0.012235009601049918 +80 100 training.batch_size 2.0 +80 100 training.label_smoothing 0.49792359726219465 +80 1 dataset """kinships""" +80 1 model """conve""" +80 1 loss """bceaftersigmoid""" +80 1 regularizer """no""" +80 1 optimizer """adam""" +80 1 training_loop """lcwa""" +80 1 evaluator """rankbased""" +80 2 dataset """kinships""" +80 2 model """conve""" +80 2 loss """bceaftersigmoid""" +80 2 regularizer """no""" +80 2 optimizer """adam""" +80 2 training_loop """lcwa""" +80 2 evaluator """rankbased""" +80 3 dataset """kinships""" +80 3 model """conve""" +80 3 loss """bceaftersigmoid""" +80 3 regularizer """no""" +80 3 optimizer """adam""" +80 3 training_loop """lcwa""" +80 3 evaluator """rankbased""" +80 4 dataset """kinships""" +80 4 model """conve""" +80 4 loss """bceaftersigmoid""" +80 4 regularizer """no""" +80 4 optimizer """adam""" +80 4 training_loop """lcwa""" +80 4 evaluator """rankbased""" +80 5 dataset """kinships""" +80 5 model """conve""" +80 5 loss """bceaftersigmoid""" +80 5 regularizer """no""" +80 5 optimizer """adam""" +80 5 training_loop """lcwa""" +80 5 evaluator """rankbased""" +80 6 dataset """kinships""" +80 6 model """conve""" +80 6 loss """bceaftersigmoid""" +80 6 regularizer """no""" +80 6 optimizer """adam""" +80 6 training_loop """lcwa""" +80 6 evaluator """rankbased""" +80 7 dataset """kinships""" +80 7 model """conve""" +80 7 loss """bceaftersigmoid""" +80 7 regularizer """no""" +80 7 optimizer """adam""" +80 7 training_loop """lcwa""" +80 7 evaluator """rankbased""" +80 8 dataset """kinships""" +80 8 model """conve""" +80 8 loss """bceaftersigmoid""" +80 8 regularizer """no""" +80 8 optimizer """adam""" +80 8 training_loop """lcwa""" +80 8 evaluator """rankbased""" +80 9 dataset """kinships""" +80 9 model """conve""" +80 9 loss """bceaftersigmoid""" +80 9 regularizer """no""" +80 9 optimizer """adam""" +80 9 training_loop """lcwa""" +80 9 evaluator """rankbased""" +80 10 dataset """kinships""" +80 10 model """conve""" +80 10 loss """bceaftersigmoid""" +80 10 regularizer """no""" +80 10 optimizer """adam""" +80 10 training_loop """lcwa""" +80 10 evaluator """rankbased""" +80 11 dataset """kinships""" +80 11 model """conve""" +80 11 loss """bceaftersigmoid""" +80 11 regularizer """no""" +80 11 optimizer """adam""" +80 11 training_loop """lcwa""" +80 11 evaluator """rankbased""" +80 12 dataset """kinships""" +80 12 model """conve""" +80 12 loss """bceaftersigmoid""" +80 12 regularizer """no""" +80 12 optimizer """adam""" +80 12 training_loop """lcwa""" +80 12 evaluator """rankbased""" +80 13 dataset """kinships""" +80 13 model """conve""" +80 13 loss """bceaftersigmoid""" +80 13 regularizer """no""" +80 13 optimizer """adam""" +80 13 training_loop """lcwa""" +80 13 evaluator """rankbased""" +80 14 dataset """kinships""" +80 14 model """conve""" +80 14 loss """bceaftersigmoid""" +80 14 regularizer """no""" +80 14 optimizer """adam""" +80 14 training_loop """lcwa""" +80 14 evaluator """rankbased""" +80 15 dataset """kinships""" +80 15 model """conve""" +80 15 loss """bceaftersigmoid""" +80 15 regularizer """no""" +80 15 optimizer """adam""" +80 15 training_loop """lcwa""" +80 15 evaluator """rankbased""" +80 16 dataset """kinships""" +80 16 model """conve""" +80 16 loss """bceaftersigmoid""" +80 16 regularizer """no""" +80 16 optimizer """adam""" +80 16 training_loop """lcwa""" +80 16 evaluator """rankbased""" +80 17 dataset """kinships""" +80 17 model """conve""" +80 17 loss """bceaftersigmoid""" +80 17 regularizer """no""" +80 17 optimizer """adam""" +80 17 training_loop """lcwa""" +80 17 evaluator """rankbased""" +80 18 dataset """kinships""" +80 18 model """conve""" +80 18 loss """bceaftersigmoid""" +80 18 regularizer """no""" +80 18 optimizer """adam""" +80 18 training_loop """lcwa""" +80 18 evaluator """rankbased""" +80 19 dataset """kinships""" +80 19 model """conve""" +80 19 loss """bceaftersigmoid""" +80 19 regularizer """no""" +80 19 optimizer """adam""" +80 19 training_loop """lcwa""" +80 19 evaluator """rankbased""" +80 20 dataset """kinships""" +80 20 model """conve""" +80 20 loss """bceaftersigmoid""" +80 20 regularizer """no""" +80 20 optimizer """adam""" +80 20 training_loop """lcwa""" +80 20 evaluator """rankbased""" +80 21 dataset """kinships""" +80 21 model """conve""" +80 21 loss """bceaftersigmoid""" +80 21 regularizer """no""" +80 21 optimizer """adam""" +80 21 training_loop """lcwa""" +80 21 evaluator """rankbased""" +80 22 dataset """kinships""" +80 22 model """conve""" +80 22 loss """bceaftersigmoid""" +80 22 regularizer """no""" +80 22 optimizer """adam""" +80 22 training_loop """lcwa""" +80 22 evaluator """rankbased""" +80 23 dataset """kinships""" +80 23 model """conve""" +80 23 loss """bceaftersigmoid""" +80 23 regularizer """no""" +80 23 optimizer """adam""" +80 23 training_loop """lcwa""" +80 23 evaluator """rankbased""" +80 24 dataset """kinships""" +80 24 model """conve""" +80 24 loss """bceaftersigmoid""" +80 24 regularizer """no""" +80 24 optimizer """adam""" +80 24 training_loop """lcwa""" +80 24 evaluator """rankbased""" +80 25 dataset """kinships""" +80 25 model """conve""" +80 25 loss """bceaftersigmoid""" +80 25 regularizer """no""" +80 25 optimizer """adam""" +80 25 training_loop """lcwa""" +80 25 evaluator """rankbased""" +80 26 dataset """kinships""" +80 26 model """conve""" +80 26 loss """bceaftersigmoid""" +80 26 regularizer """no""" +80 26 optimizer """adam""" +80 26 training_loop """lcwa""" +80 26 evaluator """rankbased""" +80 27 dataset """kinships""" +80 27 model """conve""" +80 27 loss """bceaftersigmoid""" +80 27 regularizer """no""" +80 27 optimizer """adam""" +80 27 training_loop """lcwa""" +80 27 evaluator """rankbased""" +80 28 dataset """kinships""" +80 28 model """conve""" +80 28 loss """bceaftersigmoid""" +80 28 regularizer """no""" +80 28 optimizer """adam""" +80 28 training_loop """lcwa""" +80 28 evaluator """rankbased""" +80 29 dataset """kinships""" +80 29 model """conve""" +80 29 loss """bceaftersigmoid""" +80 29 regularizer """no""" +80 29 optimizer """adam""" +80 29 training_loop """lcwa""" +80 29 evaluator """rankbased""" +80 30 dataset """kinships""" +80 30 model """conve""" +80 30 loss """bceaftersigmoid""" +80 30 regularizer """no""" +80 30 optimizer """adam""" +80 30 training_loop """lcwa""" +80 30 evaluator """rankbased""" +80 31 dataset """kinships""" +80 31 model """conve""" +80 31 loss """bceaftersigmoid""" +80 31 regularizer """no""" +80 31 optimizer """adam""" +80 31 training_loop """lcwa""" +80 31 evaluator """rankbased""" +80 32 dataset """kinships""" +80 32 model """conve""" +80 32 loss """bceaftersigmoid""" +80 32 regularizer """no""" +80 32 optimizer """adam""" +80 32 training_loop """lcwa""" +80 32 evaluator """rankbased""" +80 33 dataset """kinships""" +80 33 model """conve""" +80 33 loss """bceaftersigmoid""" +80 33 regularizer """no""" +80 33 optimizer """adam""" +80 33 training_loop """lcwa""" +80 33 evaluator """rankbased""" +80 34 dataset """kinships""" +80 34 model """conve""" +80 34 loss """bceaftersigmoid""" +80 34 regularizer """no""" +80 34 optimizer """adam""" +80 34 training_loop """lcwa""" +80 34 evaluator """rankbased""" +80 35 dataset """kinships""" +80 35 model """conve""" +80 35 loss """bceaftersigmoid""" +80 35 regularizer """no""" +80 35 optimizer """adam""" +80 35 training_loop """lcwa""" +80 35 evaluator """rankbased""" +80 36 dataset """kinships""" +80 36 model """conve""" +80 36 loss """bceaftersigmoid""" +80 36 regularizer """no""" +80 36 optimizer """adam""" +80 36 training_loop """lcwa""" +80 36 evaluator """rankbased""" +80 37 dataset """kinships""" +80 37 model """conve""" +80 37 loss """bceaftersigmoid""" +80 37 regularizer """no""" +80 37 optimizer """adam""" +80 37 training_loop """lcwa""" +80 37 evaluator """rankbased""" +80 38 dataset """kinships""" +80 38 model """conve""" +80 38 loss """bceaftersigmoid""" +80 38 regularizer """no""" +80 38 optimizer """adam""" +80 38 training_loop """lcwa""" +80 38 evaluator """rankbased""" +80 39 dataset """kinships""" +80 39 model """conve""" +80 39 loss """bceaftersigmoid""" +80 39 regularizer """no""" +80 39 optimizer """adam""" +80 39 training_loop """lcwa""" +80 39 evaluator """rankbased""" +80 40 dataset """kinships""" +80 40 model """conve""" +80 40 loss """bceaftersigmoid""" +80 40 regularizer """no""" +80 40 optimizer """adam""" +80 40 training_loop """lcwa""" +80 40 evaluator """rankbased""" +80 41 dataset """kinships""" +80 41 model """conve""" +80 41 loss """bceaftersigmoid""" +80 41 regularizer """no""" +80 41 optimizer """adam""" +80 41 training_loop """lcwa""" +80 41 evaluator """rankbased""" +80 42 dataset """kinships""" +80 42 model """conve""" +80 42 loss """bceaftersigmoid""" +80 42 regularizer """no""" +80 42 optimizer """adam""" +80 42 training_loop """lcwa""" +80 42 evaluator """rankbased""" +80 43 dataset """kinships""" +80 43 model """conve""" +80 43 loss """bceaftersigmoid""" +80 43 regularizer """no""" +80 43 optimizer """adam""" +80 43 training_loop """lcwa""" +80 43 evaluator """rankbased""" +80 44 dataset """kinships""" +80 44 model """conve""" +80 44 loss """bceaftersigmoid""" +80 44 regularizer """no""" +80 44 optimizer """adam""" +80 44 training_loop """lcwa""" +80 44 evaluator """rankbased""" +80 45 dataset """kinships""" +80 45 model """conve""" +80 45 loss """bceaftersigmoid""" +80 45 regularizer """no""" +80 45 optimizer """adam""" +80 45 training_loop """lcwa""" +80 45 evaluator """rankbased""" +80 46 dataset """kinships""" +80 46 model """conve""" +80 46 loss """bceaftersigmoid""" +80 46 regularizer """no""" +80 46 optimizer """adam""" +80 46 training_loop """lcwa""" +80 46 evaluator """rankbased""" +80 47 dataset """kinships""" +80 47 model """conve""" +80 47 loss """bceaftersigmoid""" +80 47 regularizer """no""" +80 47 optimizer """adam""" +80 47 training_loop """lcwa""" +80 47 evaluator """rankbased""" +80 48 dataset """kinships""" +80 48 model """conve""" +80 48 loss """bceaftersigmoid""" +80 48 regularizer """no""" +80 48 optimizer """adam""" +80 48 training_loop """lcwa""" +80 48 evaluator """rankbased""" +80 49 dataset """kinships""" +80 49 model """conve""" +80 49 loss """bceaftersigmoid""" +80 49 regularizer """no""" +80 49 optimizer """adam""" +80 49 training_loop """lcwa""" +80 49 evaluator """rankbased""" +80 50 dataset """kinships""" +80 50 model """conve""" +80 50 loss """bceaftersigmoid""" +80 50 regularizer """no""" +80 50 optimizer """adam""" +80 50 training_loop """lcwa""" +80 50 evaluator """rankbased""" +80 51 dataset """kinships""" +80 51 model """conve""" +80 51 loss """bceaftersigmoid""" +80 51 regularizer """no""" +80 51 optimizer """adam""" +80 51 training_loop """lcwa""" +80 51 evaluator """rankbased""" +80 52 dataset """kinships""" +80 52 model """conve""" +80 52 loss """bceaftersigmoid""" +80 52 regularizer """no""" +80 52 optimizer """adam""" +80 52 training_loop """lcwa""" +80 52 evaluator """rankbased""" +80 53 dataset """kinships""" +80 53 model """conve""" +80 53 loss """bceaftersigmoid""" +80 53 regularizer """no""" +80 53 optimizer """adam""" +80 53 training_loop """lcwa""" +80 53 evaluator """rankbased""" +80 54 dataset """kinships""" +80 54 model """conve""" +80 54 loss """bceaftersigmoid""" +80 54 regularizer """no""" +80 54 optimizer """adam""" +80 54 training_loop """lcwa""" +80 54 evaluator """rankbased""" +80 55 dataset """kinships""" +80 55 model """conve""" +80 55 loss """bceaftersigmoid""" +80 55 regularizer """no""" +80 55 optimizer """adam""" +80 55 training_loop """lcwa""" +80 55 evaluator """rankbased""" +80 56 dataset """kinships""" +80 56 model """conve""" +80 56 loss """bceaftersigmoid""" +80 56 regularizer """no""" +80 56 optimizer """adam""" +80 56 training_loop """lcwa""" +80 56 evaluator """rankbased""" +80 57 dataset """kinships""" +80 57 model """conve""" +80 57 loss """bceaftersigmoid""" +80 57 regularizer """no""" +80 57 optimizer """adam""" +80 57 training_loop """lcwa""" +80 57 evaluator """rankbased""" +80 58 dataset """kinships""" +80 58 model """conve""" +80 58 loss """bceaftersigmoid""" +80 58 regularizer """no""" +80 58 optimizer """adam""" +80 58 training_loop """lcwa""" +80 58 evaluator """rankbased""" +80 59 dataset """kinships""" +80 59 model """conve""" +80 59 loss """bceaftersigmoid""" +80 59 regularizer """no""" +80 59 optimizer """adam""" +80 59 training_loop """lcwa""" +80 59 evaluator """rankbased""" +80 60 dataset """kinships""" +80 60 model """conve""" +80 60 loss """bceaftersigmoid""" +80 60 regularizer """no""" +80 60 optimizer """adam""" +80 60 training_loop """lcwa""" +80 60 evaluator """rankbased""" +80 61 dataset """kinships""" +80 61 model """conve""" +80 61 loss """bceaftersigmoid""" +80 61 regularizer """no""" +80 61 optimizer """adam""" +80 61 training_loop """lcwa""" +80 61 evaluator """rankbased""" +80 62 dataset """kinships""" +80 62 model """conve""" +80 62 loss """bceaftersigmoid""" +80 62 regularizer """no""" +80 62 optimizer """adam""" +80 62 training_loop """lcwa""" +80 62 evaluator """rankbased""" +80 63 dataset """kinships""" +80 63 model """conve""" +80 63 loss """bceaftersigmoid""" +80 63 regularizer """no""" +80 63 optimizer """adam""" +80 63 training_loop """lcwa""" +80 63 evaluator """rankbased""" +80 64 dataset """kinships""" +80 64 model """conve""" +80 64 loss """bceaftersigmoid""" +80 64 regularizer """no""" +80 64 optimizer """adam""" +80 64 training_loop """lcwa""" +80 64 evaluator """rankbased""" +80 65 dataset """kinships""" +80 65 model """conve""" +80 65 loss """bceaftersigmoid""" +80 65 regularizer """no""" +80 65 optimizer """adam""" +80 65 training_loop """lcwa""" +80 65 evaluator """rankbased""" +80 66 dataset """kinships""" +80 66 model """conve""" +80 66 loss """bceaftersigmoid""" +80 66 regularizer """no""" +80 66 optimizer """adam""" +80 66 training_loop """lcwa""" +80 66 evaluator """rankbased""" +80 67 dataset """kinships""" +80 67 model """conve""" +80 67 loss """bceaftersigmoid""" +80 67 regularizer """no""" +80 67 optimizer """adam""" +80 67 training_loop """lcwa""" +80 67 evaluator """rankbased""" +80 68 dataset """kinships""" +80 68 model """conve""" +80 68 loss """bceaftersigmoid""" +80 68 regularizer """no""" +80 68 optimizer """adam""" +80 68 training_loop """lcwa""" +80 68 evaluator """rankbased""" +80 69 dataset """kinships""" +80 69 model """conve""" +80 69 loss """bceaftersigmoid""" +80 69 regularizer """no""" +80 69 optimizer """adam""" +80 69 training_loop """lcwa""" +80 69 evaluator """rankbased""" +80 70 dataset """kinships""" +80 70 model """conve""" +80 70 loss """bceaftersigmoid""" +80 70 regularizer """no""" +80 70 optimizer """adam""" +80 70 training_loop """lcwa""" +80 70 evaluator """rankbased""" +80 71 dataset """kinships""" +80 71 model """conve""" +80 71 loss """bceaftersigmoid""" +80 71 regularizer """no""" +80 71 optimizer """adam""" +80 71 training_loop """lcwa""" +80 71 evaluator """rankbased""" +80 72 dataset """kinships""" +80 72 model """conve""" +80 72 loss """bceaftersigmoid""" +80 72 regularizer """no""" +80 72 optimizer """adam""" +80 72 training_loop """lcwa""" +80 72 evaluator """rankbased""" +80 73 dataset """kinships""" +80 73 model """conve""" +80 73 loss """bceaftersigmoid""" +80 73 regularizer """no""" +80 73 optimizer """adam""" +80 73 training_loop """lcwa""" +80 73 evaluator """rankbased""" +80 74 dataset """kinships""" +80 74 model """conve""" +80 74 loss """bceaftersigmoid""" +80 74 regularizer """no""" +80 74 optimizer """adam""" +80 74 training_loop """lcwa""" +80 74 evaluator """rankbased""" +80 75 dataset """kinships""" +80 75 model """conve""" +80 75 loss """bceaftersigmoid""" +80 75 regularizer """no""" +80 75 optimizer """adam""" +80 75 training_loop """lcwa""" +80 75 evaluator """rankbased""" +80 76 dataset """kinships""" +80 76 model """conve""" +80 76 loss """bceaftersigmoid""" +80 76 regularizer """no""" +80 76 optimizer """adam""" +80 76 training_loop """lcwa""" +80 76 evaluator """rankbased""" +80 77 dataset """kinships""" +80 77 model """conve""" +80 77 loss """bceaftersigmoid""" +80 77 regularizer """no""" +80 77 optimizer """adam""" +80 77 training_loop """lcwa""" +80 77 evaluator """rankbased""" +80 78 dataset """kinships""" +80 78 model """conve""" +80 78 loss """bceaftersigmoid""" +80 78 regularizer """no""" +80 78 optimizer """adam""" +80 78 training_loop """lcwa""" +80 78 evaluator """rankbased""" +80 79 dataset """kinships""" +80 79 model """conve""" +80 79 loss """bceaftersigmoid""" +80 79 regularizer """no""" +80 79 optimizer """adam""" +80 79 training_loop """lcwa""" +80 79 evaluator """rankbased""" +80 80 dataset """kinships""" +80 80 model """conve""" +80 80 loss """bceaftersigmoid""" +80 80 regularizer """no""" +80 80 optimizer """adam""" +80 80 training_loop """lcwa""" +80 80 evaluator """rankbased""" +80 81 dataset """kinships""" +80 81 model """conve""" +80 81 loss """bceaftersigmoid""" +80 81 regularizer """no""" +80 81 optimizer """adam""" +80 81 training_loop """lcwa""" +80 81 evaluator """rankbased""" +80 82 dataset """kinships""" +80 82 model """conve""" +80 82 loss """bceaftersigmoid""" +80 82 regularizer """no""" +80 82 optimizer """adam""" +80 82 training_loop """lcwa""" +80 82 evaluator """rankbased""" +80 83 dataset """kinships""" +80 83 model """conve""" +80 83 loss """bceaftersigmoid""" +80 83 regularizer """no""" +80 83 optimizer """adam""" +80 83 training_loop """lcwa""" +80 83 evaluator """rankbased""" +80 84 dataset """kinships""" +80 84 model """conve""" +80 84 loss """bceaftersigmoid""" +80 84 regularizer """no""" +80 84 optimizer """adam""" +80 84 training_loop """lcwa""" +80 84 evaluator """rankbased""" +80 85 dataset """kinships""" +80 85 model """conve""" +80 85 loss """bceaftersigmoid""" +80 85 regularizer """no""" +80 85 optimizer """adam""" +80 85 training_loop """lcwa""" +80 85 evaluator """rankbased""" +80 86 dataset """kinships""" +80 86 model """conve""" +80 86 loss """bceaftersigmoid""" +80 86 regularizer """no""" +80 86 optimizer """adam""" +80 86 training_loop """lcwa""" +80 86 evaluator """rankbased""" +80 87 dataset """kinships""" +80 87 model """conve""" +80 87 loss """bceaftersigmoid""" +80 87 regularizer """no""" +80 87 optimizer """adam""" +80 87 training_loop """lcwa""" +80 87 evaluator """rankbased""" +80 88 dataset """kinships""" +80 88 model """conve""" +80 88 loss """bceaftersigmoid""" +80 88 regularizer """no""" +80 88 optimizer """adam""" +80 88 training_loop """lcwa""" +80 88 evaluator """rankbased""" +80 89 dataset """kinships""" +80 89 model """conve""" +80 89 loss """bceaftersigmoid""" +80 89 regularizer """no""" +80 89 optimizer """adam""" +80 89 training_loop """lcwa""" +80 89 evaluator """rankbased""" +80 90 dataset """kinships""" +80 90 model """conve""" +80 90 loss """bceaftersigmoid""" +80 90 regularizer """no""" +80 90 optimizer """adam""" +80 90 training_loop """lcwa""" +80 90 evaluator """rankbased""" +80 91 dataset """kinships""" +80 91 model """conve""" +80 91 loss """bceaftersigmoid""" +80 91 regularizer """no""" +80 91 optimizer """adam""" +80 91 training_loop """lcwa""" +80 91 evaluator """rankbased""" +80 92 dataset """kinships""" +80 92 model """conve""" +80 92 loss """bceaftersigmoid""" +80 92 regularizer """no""" +80 92 optimizer """adam""" +80 92 training_loop """lcwa""" +80 92 evaluator """rankbased""" +80 93 dataset """kinships""" +80 93 model """conve""" +80 93 loss """bceaftersigmoid""" +80 93 regularizer """no""" +80 93 optimizer """adam""" +80 93 training_loop """lcwa""" +80 93 evaluator """rankbased""" +80 94 dataset """kinships""" +80 94 model """conve""" +80 94 loss """bceaftersigmoid""" +80 94 regularizer """no""" +80 94 optimizer """adam""" +80 94 training_loop """lcwa""" +80 94 evaluator """rankbased""" +80 95 dataset """kinships""" +80 95 model """conve""" +80 95 loss """bceaftersigmoid""" +80 95 regularizer """no""" +80 95 optimizer """adam""" +80 95 training_loop """lcwa""" +80 95 evaluator """rankbased""" +80 96 dataset """kinships""" +80 96 model """conve""" +80 96 loss """bceaftersigmoid""" +80 96 regularizer """no""" +80 96 optimizer """adam""" +80 96 training_loop """lcwa""" +80 96 evaluator """rankbased""" +80 97 dataset """kinships""" +80 97 model """conve""" +80 97 loss """bceaftersigmoid""" +80 97 regularizer """no""" +80 97 optimizer """adam""" +80 97 training_loop """lcwa""" +80 97 evaluator """rankbased""" +80 98 dataset """kinships""" +80 98 model """conve""" +80 98 loss """bceaftersigmoid""" +80 98 regularizer """no""" +80 98 optimizer """adam""" +80 98 training_loop """lcwa""" +80 98 evaluator """rankbased""" +80 99 dataset """kinships""" +80 99 model """conve""" +80 99 loss """bceaftersigmoid""" +80 99 regularizer """no""" +80 99 optimizer """adam""" +80 99 training_loop """lcwa""" +80 99 evaluator """rankbased""" +80 100 dataset """kinships""" +80 100 model """conve""" +80 100 loss """bceaftersigmoid""" +80 100 regularizer """no""" +80 100 optimizer """adam""" +80 100 training_loop """lcwa""" +80 100 evaluator """rankbased""" +81 1 model.output_channels 50.0 +81 1 model.input_dropout 0.23630707261265782 +81 1 model.output_dropout 0.40913344154398934 +81 1 model.feature_map_dropout 0.2815176884189487 +81 1 model.embedding_dim 2.0 +81 1 optimizer.lr 0.021551941388997832 +81 1 training.batch_size 0.0 +81 1 training.label_smoothing 0.29140199678907164 +81 2 model.output_channels 17.0 +81 2 model.input_dropout 0.18888956054586475 +81 2 model.output_dropout 0.27877912473192024 +81 2 model.feature_map_dropout 0.36850514448569643 +81 2 model.embedding_dim 1.0 +81 2 optimizer.lr 0.007504249121938152 +81 2 training.batch_size 1.0 +81 2 training.label_smoothing 0.09695364274416225 +81 3 model.output_channels 55.0 +81 3 model.input_dropout 0.42482682534023103 +81 3 model.output_dropout 0.006041857036729659 +81 3 model.feature_map_dropout 0.2633045533064941 +81 3 model.embedding_dim 2.0 +81 3 optimizer.lr 0.006622410696482717 +81 3 training.batch_size 0.0 +81 3 training.label_smoothing 0.2769439560079191 +81 4 model.output_channels 50.0 +81 4 model.input_dropout 0.35300791629275013 +81 4 model.output_dropout 0.4430486298420963 +81 4 model.feature_map_dropout 0.22705983245657846 +81 4 model.embedding_dim 1.0 +81 4 optimizer.lr 0.022012338267920827 +81 4 training.batch_size 0.0 +81 4 training.label_smoothing 0.031149666087848 +81 5 model.output_channels 63.0 +81 5 model.input_dropout 0.1541163799138915 +81 5 model.output_dropout 0.31460539680109917 +81 5 model.feature_map_dropout 0.19825477467183544 +81 5 model.embedding_dim 1.0 +81 5 optimizer.lr 0.046191259835142606 +81 5 training.batch_size 0.0 +81 5 training.label_smoothing 0.9754338077666407 +81 6 model.output_channels 50.0 +81 6 model.input_dropout 0.035887925853985814 +81 6 model.output_dropout 0.018871484697521035 +81 6 model.feature_map_dropout 0.25378200253084765 +81 6 model.embedding_dim 1.0 +81 6 optimizer.lr 0.08745399394373513 +81 6 training.batch_size 2.0 +81 6 training.label_smoothing 0.011089710469487021 +81 7 model.output_channels 50.0 +81 7 model.input_dropout 0.22332039015885857 +81 7 model.output_dropout 0.034398871824008304 +81 7 model.feature_map_dropout 0.02960781469631607 +81 7 model.embedding_dim 2.0 +81 7 optimizer.lr 0.0182851920640078 +81 7 training.batch_size 0.0 +81 7 training.label_smoothing 0.06397159871038237 +81 8 model.output_channels 59.0 +81 8 model.input_dropout 0.4754091419699803 +81 8 model.output_dropout 0.25978015701688145 +81 8 model.feature_map_dropout 0.22225161139866023 +81 8 model.embedding_dim 0.0 +81 8 optimizer.lr 0.0024006658178457574 +81 8 training.batch_size 2.0 +81 8 training.label_smoothing 0.05952013807478667 +81 9 model.output_channels 25.0 +81 9 model.input_dropout 0.21810080973882068 +81 9 model.output_dropout 0.20783846474537299 +81 9 model.feature_map_dropout 0.3820760268428817 +81 9 model.embedding_dim 0.0 +81 9 optimizer.lr 0.020842111030102334 +81 9 training.batch_size 0.0 +81 9 training.label_smoothing 0.021428062723292885 +81 10 model.output_channels 34.0 +81 10 model.input_dropout 0.14966015602195065 +81 10 model.output_dropout 0.13053009995672987 +81 10 model.feature_map_dropout 0.27946176039554205 +81 10 model.embedding_dim 2.0 +81 10 optimizer.lr 0.028131146829183905 +81 10 training.batch_size 2.0 +81 10 training.label_smoothing 0.3417706258900778 +81 11 model.output_channels 42.0 +81 11 model.input_dropout 0.24918007952903926 +81 11 model.output_dropout 0.13355159956831825 +81 11 model.feature_map_dropout 0.46471829004105075 +81 11 model.embedding_dim 0.0 +81 11 optimizer.lr 0.0039005739390322574 +81 11 training.batch_size 2.0 +81 11 training.label_smoothing 0.11686892248368377 +81 12 model.output_channels 49.0 +81 12 model.input_dropout 0.3896361898228119 +81 12 model.output_dropout 0.3718403955761608 +81 12 model.feature_map_dropout 0.16168774749683273 +81 12 model.embedding_dim 2.0 +81 12 optimizer.lr 0.002457797357566936 +81 12 training.batch_size 1.0 +81 12 training.label_smoothing 0.002376598195478367 +81 13 model.output_channels 51.0 +81 13 model.input_dropout 0.36762107381647424 +81 13 model.output_dropout 0.08675335038082999 +81 13 model.feature_map_dropout 0.4002416905890453 +81 13 model.embedding_dim 1.0 +81 13 optimizer.lr 0.004971788589460486 +81 13 training.batch_size 1.0 +81 13 training.label_smoothing 0.12550606030027692 +81 14 model.output_channels 33.0 +81 14 model.input_dropout 0.1036335547962286 +81 14 model.output_dropout 0.4438613897789764 +81 14 model.feature_map_dropout 0.22283405448728172 +81 14 model.embedding_dim 0.0 +81 14 optimizer.lr 0.00106228866161131 +81 14 training.batch_size 2.0 +81 14 training.label_smoothing 0.04103024960891947 +81 15 model.output_channels 39.0 +81 15 model.input_dropout 0.12826466733159791 +81 15 model.output_dropout 0.26285442854465446 +81 15 model.feature_map_dropout 0.28451656045039037 +81 15 model.embedding_dim 0.0 +81 15 optimizer.lr 0.0011785270978258299 +81 15 training.batch_size 1.0 +81 15 training.label_smoothing 0.0064442580175300785 +81 16 model.output_channels 43.0 +81 16 model.input_dropout 0.4036541048429394 +81 16 model.output_dropout 0.3486189875130072 +81 16 model.feature_map_dropout 0.4382343581617003 +81 16 model.embedding_dim 0.0 +81 16 optimizer.lr 0.0014742479924910162 +81 16 training.batch_size 0.0 +81 16 training.label_smoothing 0.01934847703033168 +81 17 model.output_channels 36.0 +81 17 model.input_dropout 0.2457530426625547 +81 17 model.output_dropout 0.10254085162600513 +81 17 model.feature_map_dropout 0.18388415182516327 +81 17 model.embedding_dim 2.0 +81 17 optimizer.lr 0.002283625100636163 +81 17 training.batch_size 0.0 +81 17 training.label_smoothing 0.9845390530904912 +81 18 model.output_channels 25.0 +81 18 model.input_dropout 0.06715030707894065 +81 18 model.output_dropout 0.45606979540343895 +81 18 model.feature_map_dropout 0.396963532245534 +81 18 model.embedding_dim 2.0 +81 18 optimizer.lr 0.06893691611539814 +81 18 training.batch_size 0.0 +81 18 training.label_smoothing 0.002603410830224971 +81 19 model.output_channels 61.0 +81 19 model.input_dropout 0.48264757878243647 +81 19 model.output_dropout 0.0004247451185548923 +81 19 model.feature_map_dropout 0.3032870180362098 +81 19 model.embedding_dim 2.0 +81 19 optimizer.lr 0.001774756030335091 +81 19 training.batch_size 2.0 +81 19 training.label_smoothing 0.01098044721507352 +81 20 model.output_channels 34.0 +81 20 model.input_dropout 0.010463118748053224 +81 20 model.output_dropout 0.06642723301162701 +81 20 model.feature_map_dropout 0.4897125905713011 +81 20 model.embedding_dim 2.0 +81 20 optimizer.lr 0.039368941285535516 +81 20 training.batch_size 2.0 +81 20 training.label_smoothing 0.0013454693854882335 +81 21 model.output_channels 30.0 +81 21 model.input_dropout 0.4662236819316915 +81 21 model.output_dropout 0.4320129729396089 +81 21 model.feature_map_dropout 0.3151273291900832 +81 21 model.embedding_dim 2.0 +81 21 optimizer.lr 0.0993871768067368 +81 21 training.batch_size 2.0 +81 21 training.label_smoothing 0.14108262124315724 +81 22 model.output_channels 22.0 +81 22 model.input_dropout 0.07850527904690091 +81 22 model.output_dropout 0.2480610016789872 +81 22 model.feature_map_dropout 0.06416019253615629 +81 22 model.embedding_dim 0.0 +81 22 optimizer.lr 0.010844764771681694 +81 22 training.batch_size 2.0 +81 22 training.label_smoothing 0.004737202397251839 +81 23 model.output_channels 37.0 +81 23 model.input_dropout 0.14594662610853237 +81 23 model.output_dropout 0.08904368874228796 +81 23 model.feature_map_dropout 0.17456843525347399 +81 23 model.embedding_dim 2.0 +81 23 optimizer.lr 0.003218546811611842 +81 23 training.batch_size 2.0 +81 23 training.label_smoothing 0.0012016499076256623 +81 24 model.output_channels 63.0 +81 24 model.input_dropout 0.25265082374756853 +81 24 model.output_dropout 0.08537583198853838 +81 24 model.feature_map_dropout 0.34630163024335897 +81 24 model.embedding_dim 0.0 +81 24 optimizer.lr 0.0013631764688202869 +81 24 training.batch_size 2.0 +81 24 training.label_smoothing 0.009446546708558856 +81 25 model.output_channels 18.0 +81 25 model.input_dropout 0.25544594051204134 +81 25 model.output_dropout 0.10277140548558195 +81 25 model.feature_map_dropout 0.4514242400615321 +81 25 model.embedding_dim 2.0 +81 25 optimizer.lr 0.007144463453243546 +81 25 training.batch_size 1.0 +81 25 training.label_smoothing 0.6652229311453423 +81 26 model.output_channels 37.0 +81 26 model.input_dropout 0.16781216393240111 +81 26 model.output_dropout 0.4632739686679537 +81 26 model.feature_map_dropout 0.23161409988717796 +81 26 model.embedding_dim 1.0 +81 26 optimizer.lr 0.01510885452844331 +81 26 training.batch_size 2.0 +81 26 training.label_smoothing 0.0020633743580327603 +81 27 model.output_channels 63.0 +81 27 model.input_dropout 0.36115367435795304 +81 27 model.output_dropout 0.2769185634077074 +81 27 model.feature_map_dropout 0.12846060595475683 +81 27 model.embedding_dim 0.0 +81 27 optimizer.lr 0.026039802693780503 +81 27 training.batch_size 0.0 +81 27 training.label_smoothing 0.7817642858729039 +81 28 model.output_channels 54.0 +81 28 model.input_dropout 0.32786436305138683 +81 28 model.output_dropout 0.3510681311295986 +81 28 model.feature_map_dropout 0.3066054985679842 +81 28 model.embedding_dim 1.0 +81 28 optimizer.lr 0.0012503136989193355 +81 28 training.batch_size 0.0 +81 28 training.label_smoothing 0.34974676261649756 +81 29 model.output_channels 34.0 +81 29 model.input_dropout 0.17827765791922295 +81 29 model.output_dropout 0.31274482075812277 +81 29 model.feature_map_dropout 0.40906013593931795 +81 29 model.embedding_dim 2.0 +81 29 optimizer.lr 0.0035018478560897067 +81 29 training.batch_size 1.0 +81 29 training.label_smoothing 0.02341504628985294 +81 30 model.output_channels 31.0 +81 30 model.input_dropout 0.16209430760129007 +81 30 model.output_dropout 0.06143013110969986 +81 30 model.feature_map_dropout 0.42527607349885477 +81 30 model.embedding_dim 0.0 +81 30 optimizer.lr 0.008260199530683108 +81 30 training.batch_size 1.0 +81 30 training.label_smoothing 0.5828244721289578 +81 31 model.output_channels 47.0 +81 31 model.input_dropout 0.04241167076771857 +81 31 model.output_dropout 0.05142752648051829 +81 31 model.feature_map_dropout 0.2036096629613371 +81 31 model.embedding_dim 2.0 +81 31 optimizer.lr 0.0312948848124683 +81 31 training.batch_size 0.0 +81 31 training.label_smoothing 0.011218093765998853 +81 32 model.output_channels 49.0 +81 32 model.input_dropout 0.4871064400801464 +81 32 model.output_dropout 0.02498858701681922 +81 32 model.feature_map_dropout 0.4803584954664735 +81 32 model.embedding_dim 1.0 +81 32 optimizer.lr 0.06428609398698176 +81 32 training.batch_size 2.0 +81 32 training.label_smoothing 0.0537292961526568 +81 33 model.output_channels 55.0 +81 33 model.input_dropout 0.14378064866685913 +81 33 model.output_dropout 0.0005225333056827486 +81 33 model.feature_map_dropout 0.26360184394353003 +81 33 model.embedding_dim 2.0 +81 33 optimizer.lr 0.00247381416555838 +81 33 training.batch_size 0.0 +81 33 training.label_smoothing 0.03694631982732322 +81 34 model.output_channels 28.0 +81 34 model.input_dropout 0.03369457560548722 +81 34 model.output_dropout 0.382786976744271 +81 34 model.feature_map_dropout 0.2899436983616302 +81 34 model.embedding_dim 1.0 +81 34 optimizer.lr 0.06268111444888147 +81 34 training.batch_size 1.0 +81 34 training.label_smoothing 0.0025672892432999357 +81 35 model.output_channels 62.0 +81 35 model.input_dropout 0.0406750035760598 +81 35 model.output_dropout 0.4916150772004704 +81 35 model.feature_map_dropout 0.2222520604822414 +81 35 model.embedding_dim 0.0 +81 35 optimizer.lr 0.004356892067616312 +81 35 training.batch_size 0.0 +81 35 training.label_smoothing 0.002202145754605675 +81 36 model.output_channels 42.0 +81 36 model.input_dropout 0.17048842736617725 +81 36 model.output_dropout 0.11596693465559177 +81 36 model.feature_map_dropout 0.372049467238535 +81 36 model.embedding_dim 0.0 +81 36 optimizer.lr 0.013352965834995798 +81 36 training.batch_size 2.0 +81 36 training.label_smoothing 0.08027161182442702 +81 37 model.output_channels 40.0 +81 37 model.input_dropout 0.2980189449292905 +81 37 model.output_dropout 0.4995667470410269 +81 37 model.feature_map_dropout 0.17567169265105587 +81 37 model.embedding_dim 1.0 +81 37 optimizer.lr 0.08242401096685228 +81 37 training.batch_size 1.0 +81 37 training.label_smoothing 0.0011663410576727093 +81 38 model.output_channels 62.0 +81 38 model.input_dropout 0.07454780281598844 +81 38 model.output_dropout 0.2831771573561711 +81 38 model.feature_map_dropout 0.12520793601225616 +81 38 model.embedding_dim 1.0 +81 38 optimizer.lr 0.058026227838070536 +81 38 training.batch_size 0.0 +81 38 training.label_smoothing 0.026227449527539914 +81 39 model.output_channels 24.0 +81 39 model.input_dropout 0.06386612184058521 +81 39 model.output_dropout 0.013625612927109565 +81 39 model.feature_map_dropout 0.4848656322740125 +81 39 model.embedding_dim 1.0 +81 39 optimizer.lr 0.003810591108734862 +81 39 training.batch_size 0.0 +81 39 training.label_smoothing 0.7540889662596267 +81 40 model.output_channels 41.0 +81 40 model.input_dropout 0.22723008528304028 +81 40 model.output_dropout 0.4119336975525546 +81 40 model.feature_map_dropout 0.18564942211920338 +81 40 model.embedding_dim 1.0 +81 40 optimizer.lr 0.004344164463357431 +81 40 training.batch_size 0.0 +81 40 training.label_smoothing 0.0016616277230709848 +81 41 model.output_channels 55.0 +81 41 model.input_dropout 0.48263836783752356 +81 41 model.output_dropout 0.10860169213503718 +81 41 model.feature_map_dropout 0.35778481667006956 +81 41 model.embedding_dim 1.0 +81 41 optimizer.lr 0.0016957991744324197 +81 41 training.batch_size 0.0 +81 41 training.label_smoothing 0.1753098004398064 +81 42 model.output_channels 22.0 +81 42 model.input_dropout 0.02077542072620786 +81 42 model.output_dropout 0.06687629496052733 +81 42 model.feature_map_dropout 0.2783424514036441 +81 42 model.embedding_dim 2.0 +81 42 optimizer.lr 0.0015294050621938533 +81 42 training.batch_size 1.0 +81 42 training.label_smoothing 0.01607475520212548 +81 43 model.output_channels 54.0 +81 43 model.input_dropout 0.30757388384413237 +81 43 model.output_dropout 0.1849630565030126 +81 43 model.feature_map_dropout 0.08719985605432906 +81 43 model.embedding_dim 0.0 +81 43 optimizer.lr 0.044668459418579415 +81 43 training.batch_size 1.0 +81 43 training.label_smoothing 0.004322425207827128 +81 44 model.output_channels 42.0 +81 44 model.input_dropout 0.061653668088075986 +81 44 model.output_dropout 0.3851102626678168 +81 44 model.feature_map_dropout 0.161059516694725 +81 44 model.embedding_dim 2.0 +81 44 optimizer.lr 0.0010660385694406045 +81 44 training.batch_size 0.0 +81 44 training.label_smoothing 0.0010676376344327225 +81 45 model.output_channels 28.0 +81 45 model.input_dropout 0.38903138954797023 +81 45 model.output_dropout 0.36059185299322205 +81 45 model.feature_map_dropout 0.2117399562115223 +81 45 model.embedding_dim 2.0 +81 45 optimizer.lr 0.008153679652727328 +81 45 training.batch_size 1.0 +81 45 training.label_smoothing 0.0029048116218178778 +81 46 model.output_channels 59.0 +81 46 model.input_dropout 0.13898978454864613 +81 46 model.output_dropout 0.0714907112029941 +81 46 model.feature_map_dropout 0.2634335629690067 +81 46 model.embedding_dim 2.0 +81 46 optimizer.lr 0.048445547543407616 +81 46 training.batch_size 0.0 +81 46 training.label_smoothing 0.009999602507559466 +81 47 model.output_channels 25.0 +81 47 model.input_dropout 0.021378444968515986 +81 47 model.output_dropout 0.30687549281422416 +81 47 model.feature_map_dropout 0.19041682305279972 +81 47 model.embedding_dim 1.0 +81 47 optimizer.lr 0.010903767567547901 +81 47 training.batch_size 2.0 +81 47 training.label_smoothing 0.005155018272017155 +81 48 model.output_channels 49.0 +81 48 model.input_dropout 0.2909689167234157 +81 48 model.output_dropout 0.27279497704161637 +81 48 model.feature_map_dropout 0.27351546351708067 +81 48 model.embedding_dim 2.0 +81 48 optimizer.lr 0.005086031744147589 +81 48 training.batch_size 1.0 +81 48 training.label_smoothing 0.014162416736217308 +81 49 model.output_channels 47.0 +81 49 model.input_dropout 0.4874294711033096 +81 49 model.output_dropout 0.4429963014192076 +81 49 model.feature_map_dropout 0.2949529809159165 +81 49 model.embedding_dim 1.0 +81 49 optimizer.lr 0.01369517576877454 +81 49 training.batch_size 0.0 +81 49 training.label_smoothing 0.49212474655265376 +81 50 model.output_channels 44.0 +81 50 model.input_dropout 0.17143169542952935 +81 50 model.output_dropout 0.41860890546907586 +81 50 model.feature_map_dropout 0.38079221205039343 +81 50 model.embedding_dim 2.0 +81 50 optimizer.lr 0.0013474068669156064 +81 50 training.batch_size 1.0 +81 50 training.label_smoothing 0.04492700630034711 +81 51 model.output_channels 37.0 +81 51 model.input_dropout 0.23517855418553318 +81 51 model.output_dropout 0.49043772026217763 +81 51 model.feature_map_dropout 0.40039089547753826 +81 51 model.embedding_dim 1.0 +81 51 optimizer.lr 0.020598897560332726 +81 51 training.batch_size 1.0 +81 51 training.label_smoothing 0.7330115797246939 +81 52 model.output_channels 60.0 +81 52 model.input_dropout 0.09220570016518176 +81 52 model.output_dropout 0.35032717484841436 +81 52 model.feature_map_dropout 0.32750742325370197 +81 52 model.embedding_dim 1.0 +81 52 optimizer.lr 0.03265084739556479 +81 52 training.batch_size 0.0 +81 52 training.label_smoothing 0.016362109985783545 +81 53 model.output_channels 30.0 +81 53 model.input_dropout 0.25254583389638696 +81 53 model.output_dropout 0.48382376257643217 +81 53 model.feature_map_dropout 0.04542664843191219 +81 53 model.embedding_dim 2.0 +81 53 optimizer.lr 0.002670038598180745 +81 53 training.batch_size 2.0 +81 53 training.label_smoothing 0.00316835423313644 +81 54 model.output_channels 38.0 +81 54 model.input_dropout 0.10273807508992067 +81 54 model.output_dropout 0.18211107569897417 +81 54 model.feature_map_dropout 0.0814838149062655 +81 54 model.embedding_dim 2.0 +81 54 optimizer.lr 0.039626685234908136 +81 54 training.batch_size 1.0 +81 54 training.label_smoothing 0.3482444380372737 +81 55 model.output_channels 52.0 +81 55 model.input_dropout 0.1547236278249587 +81 55 model.output_dropout 0.4880073946839793 +81 55 model.feature_map_dropout 0.3423740841668008 +81 55 model.embedding_dim 0.0 +81 55 optimizer.lr 0.001123826209686234 +81 55 training.batch_size 0.0 +81 55 training.label_smoothing 0.003517461004438625 +81 56 model.output_channels 26.0 +81 56 model.input_dropout 0.270334376720893 +81 56 model.output_dropout 0.4758830552747077 +81 56 model.feature_map_dropout 0.38458041836278434 +81 56 model.embedding_dim 1.0 +81 56 optimizer.lr 0.008871543610622035 +81 56 training.batch_size 1.0 +81 56 training.label_smoothing 0.1554844434365708 +81 57 model.output_channels 37.0 +81 57 model.input_dropout 0.24558763226690938 +81 57 model.output_dropout 0.4076250373314159 +81 57 model.feature_map_dropout 0.2753455267972894 +81 57 model.embedding_dim 1.0 +81 57 optimizer.lr 0.004404978370068534 +81 57 training.batch_size 0.0 +81 57 training.label_smoothing 0.061352267791314224 +81 58 model.output_channels 26.0 +81 58 model.input_dropout 0.23488728721314567 +81 58 model.output_dropout 0.10128334889202206 +81 58 model.feature_map_dropout 0.3052003829438204 +81 58 model.embedding_dim 2.0 +81 58 optimizer.lr 0.001488288454516063 +81 58 training.batch_size 0.0 +81 58 training.label_smoothing 0.003792744625820938 +81 59 model.output_channels 32.0 +81 59 model.input_dropout 0.11057975882970383 +81 59 model.output_dropout 0.49235344244166535 +81 59 model.feature_map_dropout 0.3315106437652862 +81 59 model.embedding_dim 0.0 +81 59 optimizer.lr 0.05684170615958503 +81 59 training.batch_size 0.0 +81 59 training.label_smoothing 0.40412813598014713 +81 60 model.output_channels 43.0 +81 60 model.input_dropout 0.4308879271242947 +81 60 model.output_dropout 0.3074393755029203 +81 60 model.feature_map_dropout 0.4024939848314962 +81 60 model.embedding_dim 1.0 +81 60 optimizer.lr 0.07539099947795484 +81 60 training.batch_size 0.0 +81 60 training.label_smoothing 0.14147679531800905 +81 61 model.output_channels 31.0 +81 61 model.input_dropout 0.095239734412713 +81 61 model.output_dropout 0.23608325350879467 +81 61 model.feature_map_dropout 0.08298135021343556 +81 61 model.embedding_dim 1.0 +81 61 optimizer.lr 0.0734223604942352 +81 61 training.batch_size 0.0 +81 61 training.label_smoothing 0.0024750216671260566 +81 62 model.output_channels 57.0 +81 62 model.input_dropout 0.1545867233433012 +81 62 model.output_dropout 0.05203447377686221 +81 62 model.feature_map_dropout 0.45071907428081265 +81 62 model.embedding_dim 2.0 +81 62 optimizer.lr 0.08473177267719997 +81 62 training.batch_size 0.0 +81 62 training.label_smoothing 0.00444399777445737 +81 63 model.output_channels 56.0 +81 63 model.input_dropout 0.18996572826301789 +81 63 model.output_dropout 0.35224749011429674 +81 63 model.feature_map_dropout 0.09241702459913953 +81 63 model.embedding_dim 0.0 +81 63 optimizer.lr 0.0032028836025185347 +81 63 training.batch_size 0.0 +81 63 training.label_smoothing 0.018308488594089473 +81 64 model.output_channels 53.0 +81 64 model.input_dropout 0.3993906252508307 +81 64 model.output_dropout 0.15130346825984486 +81 64 model.feature_map_dropout 0.3529921180887268 +81 64 model.embedding_dim 0.0 +81 64 optimizer.lr 0.0024524387754266655 +81 64 training.batch_size 1.0 +81 64 training.label_smoothing 0.14042904727910285 +81 65 model.output_channels 43.0 +81 65 model.input_dropout 0.4399784283291807 +81 65 model.output_dropout 0.45393412232228053 +81 65 model.feature_map_dropout 0.30665162004436247 +81 65 model.embedding_dim 2.0 +81 65 optimizer.lr 0.04323969307774804 +81 65 training.batch_size 1.0 +81 65 training.label_smoothing 0.0055657926152106975 +81 66 model.output_channels 61.0 +81 66 model.input_dropout 0.17759059989589143 +81 66 model.output_dropout 0.06273672434455041 +81 66 model.feature_map_dropout 0.49298635188330286 +81 66 model.embedding_dim 1.0 +81 66 optimizer.lr 0.03904868993751287 +81 66 training.batch_size 2.0 +81 66 training.label_smoothing 0.038852039186882195 +81 67 model.output_channels 43.0 +81 67 model.input_dropout 0.1584110443483347 +81 67 model.output_dropout 0.12051707239128812 +81 67 model.feature_map_dropout 0.2405865903463294 +81 67 model.embedding_dim 2.0 +81 67 optimizer.lr 0.08474607188703934 +81 67 training.batch_size 1.0 +81 67 training.label_smoothing 0.8215599638398379 +81 68 model.output_channels 30.0 +81 68 model.input_dropout 0.27545880548945373 +81 68 model.output_dropout 0.28225928669460687 +81 68 model.feature_map_dropout 0.38828105071812563 +81 68 model.embedding_dim 0.0 +81 68 optimizer.lr 0.008350385042908335 +81 68 training.batch_size 1.0 +81 68 training.label_smoothing 0.005527563498218685 +81 69 model.output_channels 28.0 +81 69 model.input_dropout 0.09220650569899014 +81 69 model.output_dropout 0.028960054868545193 +81 69 model.feature_map_dropout 0.36627821059680593 +81 69 model.embedding_dim 1.0 +81 69 optimizer.lr 0.0026309938552418932 +81 69 training.batch_size 1.0 +81 69 training.label_smoothing 0.0013325404521104315 +81 70 model.output_channels 41.0 +81 70 model.input_dropout 0.44781514230454406 +81 70 model.output_dropout 0.192956150532608 +81 70 model.feature_map_dropout 0.15069627939446362 +81 70 model.embedding_dim 2.0 +81 70 optimizer.lr 0.005795690508329671 +81 70 training.batch_size 1.0 +81 70 training.label_smoothing 0.16169221623081756 +81 71 model.output_channels 49.0 +81 71 model.input_dropout 0.3009763029805865 +81 71 model.output_dropout 0.4857463587472088 +81 71 model.feature_map_dropout 0.2951337139122379 +81 71 model.embedding_dim 2.0 +81 71 optimizer.lr 0.0425816030926658 +81 71 training.batch_size 1.0 +81 71 training.label_smoothing 0.001335817630970737 +81 72 model.output_channels 25.0 +81 72 model.input_dropout 0.3200906603805484 +81 72 model.output_dropout 0.04244850055437405 +81 72 model.feature_map_dropout 0.4300489071891941 +81 72 model.embedding_dim 0.0 +81 72 optimizer.lr 0.08955358287972962 +81 72 training.batch_size 2.0 +81 72 training.label_smoothing 0.030282579879205223 +81 73 model.output_channels 50.0 +81 73 model.input_dropout 0.10642193219142781 +81 73 model.output_dropout 0.48728086085891054 +81 73 model.feature_map_dropout 0.17647770699576104 +81 73 model.embedding_dim 1.0 +81 73 optimizer.lr 0.06900379508364944 +81 73 training.batch_size 1.0 +81 73 training.label_smoothing 0.040571655776145535 +81 74 model.output_channels 31.0 +81 74 model.input_dropout 0.40559092355673726 +81 74 model.output_dropout 0.09656385739905982 +81 74 model.feature_map_dropout 0.4315875737592548 +81 74 model.embedding_dim 2.0 +81 74 optimizer.lr 0.023036723133106746 +81 74 training.batch_size 1.0 +81 74 training.label_smoothing 0.005398322017998178 +81 75 model.output_channels 26.0 +81 75 model.input_dropout 0.3521602534447479 +81 75 model.output_dropout 0.24289720961375832 +81 75 model.feature_map_dropout 0.15823810293913304 +81 75 model.embedding_dim 1.0 +81 75 optimizer.lr 0.004401270237347287 +81 75 training.batch_size 1.0 +81 75 training.label_smoothing 0.46502311942840313 +81 76 model.output_channels 42.0 +81 76 model.input_dropout 0.2747397540396624 +81 76 model.output_dropout 0.4456950965942359 +81 76 model.feature_map_dropout 0.3554518812679668 +81 76 model.embedding_dim 2.0 +81 76 optimizer.lr 0.0016639248291583997 +81 76 training.batch_size 0.0 +81 76 training.label_smoothing 0.0082487819982765 +81 77 model.output_channels 61.0 +81 77 model.input_dropout 0.002635855740816817 +81 77 model.output_dropout 0.05903467737987139 +81 77 model.feature_map_dropout 0.09707358019990558 +81 77 model.embedding_dim 2.0 +81 77 optimizer.lr 0.0013766224742470937 +81 77 training.batch_size 2.0 +81 77 training.label_smoothing 0.019880151615517407 +81 78 model.output_channels 42.0 +81 78 model.input_dropout 0.3074427389220478 +81 78 model.output_dropout 0.10227882427112267 +81 78 model.feature_map_dropout 0.38247287368898725 +81 78 model.embedding_dim 2.0 +81 78 optimizer.lr 0.0014933274957059095 +81 78 training.batch_size 0.0 +81 78 training.label_smoothing 0.011569308939324143 +81 79 model.output_channels 23.0 +81 79 model.input_dropout 0.25198052049253966 +81 79 model.output_dropout 0.4712514501856323 +81 79 model.feature_map_dropout 0.26854526185183347 +81 79 model.embedding_dim 2.0 +81 79 optimizer.lr 0.0029626029193945565 +81 79 training.batch_size 1.0 +81 79 training.label_smoothing 0.012154082272784668 +81 80 model.output_channels 63.0 +81 80 model.input_dropout 0.03310666080586333 +81 80 model.output_dropout 0.40783161358650116 +81 80 model.feature_map_dropout 0.05976305654364267 +81 80 model.embedding_dim 1.0 +81 80 optimizer.lr 0.002310801255031421 +81 80 training.batch_size 0.0 +81 80 training.label_smoothing 0.028249290223168928 +81 81 model.output_channels 48.0 +81 81 model.input_dropout 0.03688485177805678 +81 81 model.output_dropout 0.46828235313618793 +81 81 model.feature_map_dropout 0.3592024305926607 +81 81 model.embedding_dim 0.0 +81 81 optimizer.lr 0.08228040375693137 +81 81 training.batch_size 1.0 +81 81 training.label_smoothing 0.0033796100868195263 +81 82 model.output_channels 56.0 +81 82 model.input_dropout 0.3535793272113197 +81 82 model.output_dropout 0.13139838440324064 +81 82 model.feature_map_dropout 0.37063015034144525 +81 82 model.embedding_dim 2.0 +81 82 optimizer.lr 0.001256617638217243 +81 82 training.batch_size 1.0 +81 82 training.label_smoothing 0.0024438187373827316 +81 83 model.output_channels 20.0 +81 83 model.input_dropout 0.1363665894591753 +81 83 model.output_dropout 0.03450229074655331 +81 83 model.feature_map_dropout 0.36810499578893463 +81 83 model.embedding_dim 1.0 +81 83 optimizer.lr 0.0026118150184647687 +81 83 training.batch_size 0.0 +81 83 training.label_smoothing 0.006701124932207066 +81 84 model.output_channels 62.0 +81 84 model.input_dropout 0.4051140548305853 +81 84 model.output_dropout 0.28271002683429164 +81 84 model.feature_map_dropout 0.11563310645036068 +81 84 model.embedding_dim 0.0 +81 84 optimizer.lr 0.006659389591770637 +81 84 training.batch_size 0.0 +81 84 training.label_smoothing 0.26562007378551344 +81 85 model.output_channels 50.0 +81 85 model.input_dropout 0.30448566163810864 +81 85 model.output_dropout 0.35443749817276843 +81 85 model.feature_map_dropout 0.05592868193461187 +81 85 model.embedding_dim 2.0 +81 85 optimizer.lr 0.0013712486827357877 +81 85 training.batch_size 2.0 +81 85 training.label_smoothing 0.0039141672118687115 +81 86 model.output_channels 64.0 +81 86 model.input_dropout 0.3311800039231269 +81 86 model.output_dropout 0.426172317255218 +81 86 model.feature_map_dropout 0.18301270164997657 +81 86 model.embedding_dim 0.0 +81 86 optimizer.lr 0.004706652840525803 +81 86 training.batch_size 0.0 +81 86 training.label_smoothing 0.0016976065215303987 +81 87 model.output_channels 53.0 +81 87 model.input_dropout 0.03832435456411848 +81 87 model.output_dropout 0.2504906475094325 +81 87 model.feature_map_dropout 0.4045314602764609 +81 87 model.embedding_dim 0.0 +81 87 optimizer.lr 0.013691653659971072 +81 87 training.batch_size 2.0 +81 87 training.label_smoothing 0.16275845856297627 +81 88 model.output_channels 28.0 +81 88 model.input_dropout 0.45867873553001026 +81 88 model.output_dropout 0.36228912015920356 +81 88 model.feature_map_dropout 0.18857616400431904 +81 88 model.embedding_dim 2.0 +81 88 optimizer.lr 0.0018093927533394671 +81 88 training.batch_size 0.0 +81 88 training.label_smoothing 0.021191608665186244 +81 89 model.output_channels 16.0 +81 89 model.input_dropout 0.35083830512589653 +81 89 model.output_dropout 0.20298064108889213 +81 89 model.feature_map_dropout 0.07948237821188409 +81 89 model.embedding_dim 1.0 +81 89 optimizer.lr 0.009166158469088805 +81 89 training.batch_size 0.0 +81 89 training.label_smoothing 0.015462082478327942 +81 90 model.output_channels 22.0 +81 90 model.input_dropout 0.18006799480865154 +81 90 model.output_dropout 0.018579382872826622 +81 90 model.feature_map_dropout 0.2716775597676874 +81 90 model.embedding_dim 1.0 +81 90 optimizer.lr 0.001879904231360671 +81 90 training.batch_size 0.0 +81 90 training.label_smoothing 0.15606539336437664 +81 91 model.output_channels 27.0 +81 91 model.input_dropout 0.07225100218030273 +81 91 model.output_dropout 0.43398460581048104 +81 91 model.feature_map_dropout 0.10186809060945606 +81 91 model.embedding_dim 2.0 +81 91 optimizer.lr 0.015239364135634804 +81 91 training.batch_size 1.0 +81 91 training.label_smoothing 0.39085965681973506 +81 92 model.output_channels 49.0 +81 92 model.input_dropout 0.3296539750929366 +81 92 model.output_dropout 0.4194331914894004 +81 92 model.feature_map_dropout 0.15673034826426235 +81 92 model.embedding_dim 2.0 +81 92 optimizer.lr 0.01607558726626039 +81 92 training.batch_size 1.0 +81 92 training.label_smoothing 0.022781832741610074 +81 93 model.output_channels 55.0 +81 93 model.input_dropout 0.09264628155749516 +81 93 model.output_dropout 0.379877286284575 +81 93 model.feature_map_dropout 0.1757453995613591 +81 93 model.embedding_dim 2.0 +81 93 optimizer.lr 0.02076955875976216 +81 93 training.batch_size 2.0 +81 93 training.label_smoothing 0.6100030369178631 +81 94 model.output_channels 20.0 +81 94 model.input_dropout 0.49709673178051017 +81 94 model.output_dropout 0.2620300974394401 +81 94 model.feature_map_dropout 0.3968006287319159 +81 94 model.embedding_dim 0.0 +81 94 optimizer.lr 0.014355195101095886 +81 94 training.batch_size 2.0 +81 94 training.label_smoothing 0.0973548283263762 +81 95 model.output_channels 25.0 +81 95 model.input_dropout 0.30912935813348225 +81 95 model.output_dropout 0.395544133450167 +81 95 model.feature_map_dropout 0.10538644702217098 +81 95 model.embedding_dim 0.0 +81 95 optimizer.lr 0.014902929551526594 +81 95 training.batch_size 0.0 +81 95 training.label_smoothing 0.0036770666254347445 +81 96 model.output_channels 50.0 +81 96 model.input_dropout 0.43318221565942905 +81 96 model.output_dropout 0.3456944755333845 +81 96 model.feature_map_dropout 0.4903454444044914 +81 96 model.embedding_dim 2.0 +81 96 optimizer.lr 0.00204032102158573 +81 96 training.batch_size 1.0 +81 96 training.label_smoothing 0.03914683550063683 +81 97 model.output_channels 52.0 +81 97 model.input_dropout 0.04481788496759814 +81 97 model.output_dropout 0.05156008764444159 +81 97 model.feature_map_dropout 0.3770024410291299 +81 97 model.embedding_dim 0.0 +81 97 optimizer.lr 0.05473550309674108 +81 97 training.batch_size 2.0 +81 97 training.label_smoothing 0.02019510848098325 +81 98 model.output_channels 16.0 +81 98 model.input_dropout 0.026299339231722008 +81 98 model.output_dropout 0.10652539827459795 +81 98 model.feature_map_dropout 0.1492457336846294 +81 98 model.embedding_dim 1.0 +81 98 optimizer.lr 0.006566202810677481 +81 98 training.batch_size 2.0 +81 98 training.label_smoothing 0.010963675842202196 +81 99 model.output_channels 28.0 +81 99 model.input_dropout 0.35555155697314156 +81 99 model.output_dropout 0.15629714164389652 +81 99 model.feature_map_dropout 0.15475761764029045 +81 99 model.embedding_dim 2.0 +81 99 optimizer.lr 0.038425921015869405 +81 99 training.batch_size 2.0 +81 99 training.label_smoothing 0.0015193798833870282 +81 100 model.output_channels 38.0 +81 100 model.input_dropout 0.2518729065748061 +81 100 model.output_dropout 0.22284784835554688 +81 100 model.feature_map_dropout 0.437478874049616 +81 100 model.embedding_dim 1.0 +81 100 optimizer.lr 0.06457427931455134 +81 100 training.batch_size 0.0 +81 100 training.label_smoothing 0.0020251932152449744 +81 1 dataset """kinships""" +81 1 model """conve""" +81 1 loss """softplus""" +81 1 regularizer """no""" +81 1 optimizer """adam""" +81 1 training_loop """lcwa""" +81 1 evaluator """rankbased""" +81 2 dataset """kinships""" +81 2 model """conve""" +81 2 loss """softplus""" +81 2 regularizer """no""" +81 2 optimizer """adam""" +81 2 training_loop """lcwa""" +81 2 evaluator """rankbased""" +81 3 dataset """kinships""" +81 3 model """conve""" +81 3 loss """softplus""" +81 3 regularizer """no""" +81 3 optimizer """adam""" +81 3 training_loop """lcwa""" +81 3 evaluator """rankbased""" +81 4 dataset """kinships""" +81 4 model """conve""" +81 4 loss """softplus""" +81 4 regularizer """no""" +81 4 optimizer """adam""" +81 4 training_loop """lcwa""" +81 4 evaluator """rankbased""" +81 5 dataset """kinships""" +81 5 model """conve""" +81 5 loss """softplus""" +81 5 regularizer """no""" +81 5 optimizer """adam""" +81 5 training_loop """lcwa""" +81 5 evaluator """rankbased""" +81 6 dataset """kinships""" +81 6 model """conve""" +81 6 loss """softplus""" +81 6 regularizer """no""" +81 6 optimizer """adam""" +81 6 training_loop """lcwa""" +81 6 evaluator """rankbased""" +81 7 dataset """kinships""" +81 7 model """conve""" +81 7 loss """softplus""" +81 7 regularizer """no""" +81 7 optimizer """adam""" +81 7 training_loop """lcwa""" +81 7 evaluator """rankbased""" +81 8 dataset """kinships""" +81 8 model """conve""" +81 8 loss """softplus""" +81 8 regularizer """no""" +81 8 optimizer """adam""" +81 8 training_loop """lcwa""" +81 8 evaluator """rankbased""" +81 9 dataset """kinships""" +81 9 model """conve""" +81 9 loss """softplus""" +81 9 regularizer """no""" +81 9 optimizer """adam""" +81 9 training_loop """lcwa""" +81 9 evaluator """rankbased""" +81 10 dataset """kinships""" +81 10 model """conve""" +81 10 loss """softplus""" +81 10 regularizer """no""" +81 10 optimizer """adam""" +81 10 training_loop """lcwa""" +81 10 evaluator """rankbased""" +81 11 dataset """kinships""" +81 11 model """conve""" +81 11 loss """softplus""" +81 11 regularizer """no""" +81 11 optimizer """adam""" +81 11 training_loop """lcwa""" +81 11 evaluator """rankbased""" +81 12 dataset """kinships""" +81 12 model """conve""" +81 12 loss """softplus""" +81 12 regularizer """no""" +81 12 optimizer """adam""" +81 12 training_loop """lcwa""" +81 12 evaluator """rankbased""" +81 13 dataset """kinships""" +81 13 model """conve""" +81 13 loss """softplus""" +81 13 regularizer """no""" +81 13 optimizer """adam""" +81 13 training_loop """lcwa""" +81 13 evaluator """rankbased""" +81 14 dataset """kinships""" +81 14 model """conve""" +81 14 loss """softplus""" +81 14 regularizer """no""" +81 14 optimizer """adam""" +81 14 training_loop """lcwa""" +81 14 evaluator """rankbased""" +81 15 dataset """kinships""" +81 15 model """conve""" +81 15 loss """softplus""" +81 15 regularizer """no""" +81 15 optimizer """adam""" +81 15 training_loop """lcwa""" +81 15 evaluator """rankbased""" +81 16 dataset """kinships""" +81 16 model """conve""" +81 16 loss """softplus""" +81 16 regularizer """no""" +81 16 optimizer """adam""" +81 16 training_loop """lcwa""" +81 16 evaluator """rankbased""" +81 17 dataset """kinships""" +81 17 model """conve""" +81 17 loss """softplus""" +81 17 regularizer """no""" +81 17 optimizer """adam""" +81 17 training_loop """lcwa""" +81 17 evaluator """rankbased""" +81 18 dataset """kinships""" +81 18 model """conve""" +81 18 loss """softplus""" +81 18 regularizer """no""" +81 18 optimizer """adam""" +81 18 training_loop """lcwa""" +81 18 evaluator """rankbased""" +81 19 dataset """kinships""" +81 19 model """conve""" +81 19 loss """softplus""" +81 19 regularizer """no""" +81 19 optimizer """adam""" +81 19 training_loop """lcwa""" +81 19 evaluator """rankbased""" +81 20 dataset """kinships""" +81 20 model """conve""" +81 20 loss """softplus""" +81 20 regularizer """no""" +81 20 optimizer """adam""" +81 20 training_loop """lcwa""" +81 20 evaluator """rankbased""" +81 21 dataset """kinships""" +81 21 model """conve""" +81 21 loss """softplus""" +81 21 regularizer """no""" +81 21 optimizer """adam""" +81 21 training_loop """lcwa""" +81 21 evaluator """rankbased""" +81 22 dataset """kinships""" +81 22 model """conve""" +81 22 loss """softplus""" +81 22 regularizer """no""" +81 22 optimizer """adam""" +81 22 training_loop """lcwa""" +81 22 evaluator """rankbased""" +81 23 dataset """kinships""" +81 23 model """conve""" +81 23 loss """softplus""" +81 23 regularizer """no""" +81 23 optimizer """adam""" +81 23 training_loop """lcwa""" +81 23 evaluator """rankbased""" +81 24 dataset """kinships""" +81 24 model """conve""" +81 24 loss """softplus""" +81 24 regularizer """no""" +81 24 optimizer """adam""" +81 24 training_loop """lcwa""" +81 24 evaluator """rankbased""" +81 25 dataset """kinships""" +81 25 model """conve""" +81 25 loss """softplus""" +81 25 regularizer """no""" +81 25 optimizer """adam""" +81 25 training_loop """lcwa""" +81 25 evaluator """rankbased""" +81 26 dataset """kinships""" +81 26 model """conve""" +81 26 loss """softplus""" +81 26 regularizer """no""" +81 26 optimizer """adam""" +81 26 training_loop """lcwa""" +81 26 evaluator """rankbased""" +81 27 dataset """kinships""" +81 27 model """conve""" +81 27 loss """softplus""" +81 27 regularizer """no""" +81 27 optimizer """adam""" +81 27 training_loop """lcwa""" +81 27 evaluator """rankbased""" +81 28 dataset """kinships""" +81 28 model """conve""" +81 28 loss """softplus""" +81 28 regularizer """no""" +81 28 optimizer """adam""" +81 28 training_loop """lcwa""" +81 28 evaluator """rankbased""" +81 29 dataset """kinships""" +81 29 model """conve""" +81 29 loss """softplus""" +81 29 regularizer """no""" +81 29 optimizer """adam""" +81 29 training_loop """lcwa""" +81 29 evaluator """rankbased""" +81 30 dataset """kinships""" +81 30 model """conve""" +81 30 loss """softplus""" +81 30 regularizer """no""" +81 30 optimizer """adam""" +81 30 training_loop """lcwa""" +81 30 evaluator """rankbased""" +81 31 dataset """kinships""" +81 31 model """conve""" +81 31 loss """softplus""" +81 31 regularizer """no""" +81 31 optimizer """adam""" +81 31 training_loop """lcwa""" +81 31 evaluator """rankbased""" +81 32 dataset """kinships""" +81 32 model """conve""" +81 32 loss """softplus""" +81 32 regularizer """no""" +81 32 optimizer """adam""" +81 32 training_loop """lcwa""" +81 32 evaluator """rankbased""" +81 33 dataset """kinships""" +81 33 model """conve""" +81 33 loss """softplus""" +81 33 regularizer """no""" +81 33 optimizer """adam""" +81 33 training_loop """lcwa""" +81 33 evaluator """rankbased""" +81 34 dataset """kinships""" +81 34 model """conve""" +81 34 loss """softplus""" +81 34 regularizer """no""" +81 34 optimizer """adam""" +81 34 training_loop """lcwa""" +81 34 evaluator """rankbased""" +81 35 dataset """kinships""" +81 35 model """conve""" +81 35 loss """softplus""" +81 35 regularizer """no""" +81 35 optimizer """adam""" +81 35 training_loop """lcwa""" +81 35 evaluator """rankbased""" +81 36 dataset """kinships""" +81 36 model """conve""" +81 36 loss """softplus""" +81 36 regularizer """no""" +81 36 optimizer """adam""" +81 36 training_loop """lcwa""" +81 36 evaluator """rankbased""" +81 37 dataset """kinships""" +81 37 model """conve""" +81 37 loss """softplus""" +81 37 regularizer """no""" +81 37 optimizer """adam""" +81 37 training_loop """lcwa""" +81 37 evaluator """rankbased""" +81 38 dataset """kinships""" +81 38 model """conve""" +81 38 loss """softplus""" +81 38 regularizer """no""" +81 38 optimizer """adam""" +81 38 training_loop """lcwa""" +81 38 evaluator """rankbased""" +81 39 dataset """kinships""" +81 39 model """conve""" +81 39 loss """softplus""" +81 39 regularizer """no""" +81 39 optimizer """adam""" +81 39 training_loop """lcwa""" +81 39 evaluator """rankbased""" +81 40 dataset """kinships""" +81 40 model """conve""" +81 40 loss """softplus""" +81 40 regularizer """no""" +81 40 optimizer """adam""" +81 40 training_loop """lcwa""" +81 40 evaluator """rankbased""" +81 41 dataset """kinships""" +81 41 model """conve""" +81 41 loss """softplus""" +81 41 regularizer """no""" +81 41 optimizer """adam""" +81 41 training_loop """lcwa""" +81 41 evaluator """rankbased""" +81 42 dataset """kinships""" +81 42 model """conve""" +81 42 loss """softplus""" +81 42 regularizer """no""" +81 42 optimizer """adam""" +81 42 training_loop """lcwa""" +81 42 evaluator """rankbased""" +81 43 dataset """kinships""" +81 43 model """conve""" +81 43 loss """softplus""" +81 43 regularizer """no""" +81 43 optimizer """adam""" +81 43 training_loop """lcwa""" +81 43 evaluator """rankbased""" +81 44 dataset """kinships""" +81 44 model """conve""" +81 44 loss """softplus""" +81 44 regularizer """no""" +81 44 optimizer """adam""" +81 44 training_loop """lcwa""" +81 44 evaluator """rankbased""" +81 45 dataset """kinships""" +81 45 model """conve""" +81 45 loss """softplus""" +81 45 regularizer """no""" +81 45 optimizer """adam""" +81 45 training_loop """lcwa""" +81 45 evaluator """rankbased""" +81 46 dataset """kinships""" +81 46 model """conve""" +81 46 loss """softplus""" +81 46 regularizer """no""" +81 46 optimizer """adam""" +81 46 training_loop """lcwa""" +81 46 evaluator """rankbased""" +81 47 dataset """kinships""" +81 47 model """conve""" +81 47 loss """softplus""" +81 47 regularizer """no""" +81 47 optimizer """adam""" +81 47 training_loop """lcwa""" +81 47 evaluator """rankbased""" +81 48 dataset """kinships""" +81 48 model """conve""" +81 48 loss """softplus""" +81 48 regularizer """no""" +81 48 optimizer """adam""" +81 48 training_loop """lcwa""" +81 48 evaluator """rankbased""" +81 49 dataset """kinships""" +81 49 model """conve""" +81 49 loss """softplus""" +81 49 regularizer """no""" +81 49 optimizer """adam""" +81 49 training_loop """lcwa""" +81 49 evaluator """rankbased""" +81 50 dataset """kinships""" +81 50 model """conve""" +81 50 loss """softplus""" +81 50 regularizer """no""" +81 50 optimizer """adam""" +81 50 training_loop """lcwa""" +81 50 evaluator """rankbased""" +81 51 dataset """kinships""" +81 51 model """conve""" +81 51 loss """softplus""" +81 51 regularizer """no""" +81 51 optimizer """adam""" +81 51 training_loop """lcwa""" +81 51 evaluator """rankbased""" +81 52 dataset """kinships""" +81 52 model """conve""" +81 52 loss """softplus""" +81 52 regularizer """no""" +81 52 optimizer """adam""" +81 52 training_loop """lcwa""" +81 52 evaluator """rankbased""" +81 53 dataset """kinships""" +81 53 model """conve""" +81 53 loss """softplus""" +81 53 regularizer """no""" +81 53 optimizer """adam""" +81 53 training_loop """lcwa""" +81 53 evaluator """rankbased""" +81 54 dataset """kinships""" +81 54 model """conve""" +81 54 loss """softplus""" +81 54 regularizer """no""" +81 54 optimizer """adam""" +81 54 training_loop """lcwa""" +81 54 evaluator """rankbased""" +81 55 dataset """kinships""" +81 55 model """conve""" +81 55 loss """softplus""" +81 55 regularizer """no""" +81 55 optimizer """adam""" +81 55 training_loop """lcwa""" +81 55 evaluator """rankbased""" +81 56 dataset """kinships""" +81 56 model """conve""" +81 56 loss """softplus""" +81 56 regularizer """no""" +81 56 optimizer """adam""" +81 56 training_loop """lcwa""" +81 56 evaluator """rankbased""" +81 57 dataset """kinships""" +81 57 model """conve""" +81 57 loss """softplus""" +81 57 regularizer """no""" +81 57 optimizer """adam""" +81 57 training_loop """lcwa""" +81 57 evaluator """rankbased""" +81 58 dataset """kinships""" +81 58 model """conve""" +81 58 loss """softplus""" +81 58 regularizer """no""" +81 58 optimizer """adam""" +81 58 training_loop """lcwa""" +81 58 evaluator """rankbased""" +81 59 dataset """kinships""" +81 59 model """conve""" +81 59 loss """softplus""" +81 59 regularizer """no""" +81 59 optimizer """adam""" +81 59 training_loop """lcwa""" +81 59 evaluator """rankbased""" +81 60 dataset """kinships""" +81 60 model """conve""" +81 60 loss """softplus""" +81 60 regularizer """no""" +81 60 optimizer """adam""" +81 60 training_loop """lcwa""" +81 60 evaluator """rankbased""" +81 61 dataset """kinships""" +81 61 model """conve""" +81 61 loss """softplus""" +81 61 regularizer """no""" +81 61 optimizer """adam""" +81 61 training_loop """lcwa""" +81 61 evaluator """rankbased""" +81 62 dataset """kinships""" +81 62 model """conve""" +81 62 loss """softplus""" +81 62 regularizer """no""" +81 62 optimizer """adam""" +81 62 training_loop """lcwa""" +81 62 evaluator """rankbased""" +81 63 dataset """kinships""" +81 63 model """conve""" +81 63 loss """softplus""" +81 63 regularizer """no""" +81 63 optimizer """adam""" +81 63 training_loop """lcwa""" +81 63 evaluator """rankbased""" +81 64 dataset """kinships""" +81 64 model """conve""" +81 64 loss """softplus""" +81 64 regularizer """no""" +81 64 optimizer """adam""" +81 64 training_loop """lcwa""" +81 64 evaluator """rankbased""" +81 65 dataset """kinships""" +81 65 model """conve""" +81 65 loss """softplus""" +81 65 regularizer """no""" +81 65 optimizer """adam""" +81 65 training_loop """lcwa""" +81 65 evaluator """rankbased""" +81 66 dataset """kinships""" +81 66 model """conve""" +81 66 loss """softplus""" +81 66 regularizer """no""" +81 66 optimizer """adam""" +81 66 training_loop """lcwa""" +81 66 evaluator """rankbased""" +81 67 dataset """kinships""" +81 67 model """conve""" +81 67 loss """softplus""" +81 67 regularizer """no""" +81 67 optimizer """adam""" +81 67 training_loop """lcwa""" +81 67 evaluator """rankbased""" +81 68 dataset """kinships""" +81 68 model """conve""" +81 68 loss """softplus""" +81 68 regularizer """no""" +81 68 optimizer """adam""" +81 68 training_loop """lcwa""" +81 68 evaluator """rankbased""" +81 69 dataset """kinships""" +81 69 model """conve""" +81 69 loss """softplus""" +81 69 regularizer """no""" +81 69 optimizer """adam""" +81 69 training_loop """lcwa""" +81 69 evaluator """rankbased""" +81 70 dataset """kinships""" +81 70 model """conve""" +81 70 loss """softplus""" +81 70 regularizer """no""" +81 70 optimizer """adam""" +81 70 training_loop """lcwa""" +81 70 evaluator """rankbased""" +81 71 dataset """kinships""" +81 71 model """conve""" +81 71 loss """softplus""" +81 71 regularizer """no""" +81 71 optimizer """adam""" +81 71 training_loop """lcwa""" +81 71 evaluator """rankbased""" +81 72 dataset """kinships""" +81 72 model """conve""" +81 72 loss """softplus""" +81 72 regularizer """no""" +81 72 optimizer """adam""" +81 72 training_loop """lcwa""" +81 72 evaluator """rankbased""" +81 73 dataset """kinships""" +81 73 model """conve""" +81 73 loss """softplus""" +81 73 regularizer """no""" +81 73 optimizer """adam""" +81 73 training_loop """lcwa""" +81 73 evaluator """rankbased""" +81 74 dataset """kinships""" +81 74 model """conve""" +81 74 loss """softplus""" +81 74 regularizer """no""" +81 74 optimizer """adam""" +81 74 training_loop """lcwa""" +81 74 evaluator """rankbased""" +81 75 dataset """kinships""" +81 75 model """conve""" +81 75 loss """softplus""" +81 75 regularizer """no""" +81 75 optimizer """adam""" +81 75 training_loop """lcwa""" +81 75 evaluator """rankbased""" +81 76 dataset """kinships""" +81 76 model """conve""" +81 76 loss """softplus""" +81 76 regularizer """no""" +81 76 optimizer """adam""" +81 76 training_loop """lcwa""" +81 76 evaluator """rankbased""" +81 77 dataset """kinships""" +81 77 model """conve""" +81 77 loss """softplus""" +81 77 regularizer """no""" +81 77 optimizer """adam""" +81 77 training_loop """lcwa""" +81 77 evaluator """rankbased""" +81 78 dataset """kinships""" +81 78 model """conve""" +81 78 loss """softplus""" +81 78 regularizer """no""" +81 78 optimizer """adam""" +81 78 training_loop """lcwa""" +81 78 evaluator """rankbased""" +81 79 dataset """kinships""" +81 79 model """conve""" +81 79 loss """softplus""" +81 79 regularizer """no""" +81 79 optimizer """adam""" +81 79 training_loop """lcwa""" +81 79 evaluator """rankbased""" +81 80 dataset """kinships""" +81 80 model """conve""" +81 80 loss """softplus""" +81 80 regularizer """no""" +81 80 optimizer """adam""" +81 80 training_loop """lcwa""" +81 80 evaluator """rankbased""" +81 81 dataset """kinships""" +81 81 model """conve""" +81 81 loss """softplus""" +81 81 regularizer """no""" +81 81 optimizer """adam""" +81 81 training_loop """lcwa""" +81 81 evaluator """rankbased""" +81 82 dataset """kinships""" +81 82 model """conve""" +81 82 loss """softplus""" +81 82 regularizer """no""" +81 82 optimizer """adam""" +81 82 training_loop """lcwa""" +81 82 evaluator """rankbased""" +81 83 dataset """kinships""" +81 83 model """conve""" +81 83 loss """softplus""" +81 83 regularizer """no""" +81 83 optimizer """adam""" +81 83 training_loop """lcwa""" +81 83 evaluator """rankbased""" +81 84 dataset """kinships""" +81 84 model """conve""" +81 84 loss """softplus""" +81 84 regularizer """no""" +81 84 optimizer """adam""" +81 84 training_loop """lcwa""" +81 84 evaluator """rankbased""" +81 85 dataset """kinships""" +81 85 model """conve""" +81 85 loss """softplus""" +81 85 regularizer """no""" +81 85 optimizer """adam""" +81 85 training_loop """lcwa""" +81 85 evaluator """rankbased""" +81 86 dataset """kinships""" +81 86 model """conve""" +81 86 loss """softplus""" +81 86 regularizer """no""" +81 86 optimizer """adam""" +81 86 training_loop """lcwa""" +81 86 evaluator """rankbased""" +81 87 dataset """kinships""" +81 87 model """conve""" +81 87 loss """softplus""" +81 87 regularizer """no""" +81 87 optimizer """adam""" +81 87 training_loop """lcwa""" +81 87 evaluator """rankbased""" +81 88 dataset """kinships""" +81 88 model """conve""" +81 88 loss """softplus""" +81 88 regularizer """no""" +81 88 optimizer """adam""" +81 88 training_loop """lcwa""" +81 88 evaluator """rankbased""" +81 89 dataset """kinships""" +81 89 model """conve""" +81 89 loss """softplus""" +81 89 regularizer """no""" +81 89 optimizer """adam""" +81 89 training_loop """lcwa""" +81 89 evaluator """rankbased""" +81 90 dataset """kinships""" +81 90 model """conve""" +81 90 loss """softplus""" +81 90 regularizer """no""" +81 90 optimizer """adam""" +81 90 training_loop """lcwa""" +81 90 evaluator """rankbased""" +81 91 dataset """kinships""" +81 91 model """conve""" +81 91 loss """softplus""" +81 91 regularizer """no""" +81 91 optimizer """adam""" +81 91 training_loop """lcwa""" +81 91 evaluator """rankbased""" +81 92 dataset """kinships""" +81 92 model """conve""" +81 92 loss """softplus""" +81 92 regularizer """no""" +81 92 optimizer """adam""" +81 92 training_loop """lcwa""" +81 92 evaluator """rankbased""" +81 93 dataset """kinships""" +81 93 model """conve""" +81 93 loss """softplus""" +81 93 regularizer """no""" +81 93 optimizer """adam""" +81 93 training_loop """lcwa""" +81 93 evaluator """rankbased""" +81 94 dataset """kinships""" +81 94 model """conve""" +81 94 loss """softplus""" +81 94 regularizer """no""" +81 94 optimizer """adam""" +81 94 training_loop """lcwa""" +81 94 evaluator """rankbased""" +81 95 dataset """kinships""" +81 95 model """conve""" +81 95 loss """softplus""" +81 95 regularizer """no""" +81 95 optimizer """adam""" +81 95 training_loop """lcwa""" +81 95 evaluator """rankbased""" +81 96 dataset """kinships""" +81 96 model """conve""" +81 96 loss """softplus""" +81 96 regularizer """no""" +81 96 optimizer """adam""" +81 96 training_loop """lcwa""" +81 96 evaluator """rankbased""" +81 97 dataset """kinships""" +81 97 model """conve""" +81 97 loss """softplus""" +81 97 regularizer """no""" +81 97 optimizer """adam""" +81 97 training_loop """lcwa""" +81 97 evaluator """rankbased""" +81 98 dataset """kinships""" +81 98 model """conve""" +81 98 loss """softplus""" +81 98 regularizer """no""" +81 98 optimizer """adam""" +81 98 training_loop """lcwa""" +81 98 evaluator """rankbased""" +81 99 dataset """kinships""" +81 99 model """conve""" +81 99 loss """softplus""" +81 99 regularizer """no""" +81 99 optimizer """adam""" +81 99 training_loop """lcwa""" +81 99 evaluator """rankbased""" +81 100 dataset """kinships""" +81 100 model """conve""" +81 100 loss """softplus""" +81 100 regularizer """no""" +81 100 optimizer """adam""" +81 100 training_loop """lcwa""" +81 100 evaluator """rankbased""" +82 1 model.output_channels 52.0 +82 1 model.input_dropout 0.48707826230854706 +82 1 model.output_dropout 0.41963200626408237 +82 1 model.feature_map_dropout 0.2170681206397817 +82 1 model.embedding_dim 2.0 +82 1 loss.margin 1.768013954733764 +82 1 optimizer.lr 0.015237058986472901 +82 1 negative_sampler.num_negs_per_pos 50.0 +82 1 training.batch_size 2.0 +82 2 model.output_channels 56.0 +82 2 model.input_dropout 0.4978409389623052 +82 2 model.output_dropout 0.15609743061989068 +82 2 model.feature_map_dropout 0.3454987471573917 +82 2 model.embedding_dim 0.0 +82 2 loss.margin 1.5129439039702621 +82 2 optimizer.lr 0.09220619202807667 +82 2 negative_sampler.num_negs_per_pos 53.0 +82 2 training.batch_size 0.0 +82 3 model.output_channels 53.0 +82 3 model.input_dropout 0.17773475253094656 +82 3 model.output_dropout 0.1696149227421334 +82 3 model.feature_map_dropout 0.05256517696928725 +82 3 model.embedding_dim 1.0 +82 3 loss.margin 3.836980355355253 +82 3 optimizer.lr 0.006292784238109455 +82 3 negative_sampler.num_negs_per_pos 77.0 +82 3 training.batch_size 1.0 +82 4 model.output_channels 55.0 +82 4 model.input_dropout 0.16270323895527528 +82 4 model.output_dropout 0.13050585060826297 +82 4 model.feature_map_dropout 0.1471588040913877 +82 4 model.embedding_dim 0.0 +82 4 loss.margin 5.908366730729615 +82 4 optimizer.lr 0.03811480465909464 +82 4 negative_sampler.num_negs_per_pos 0.0 +82 4 training.batch_size 0.0 +82 5 model.output_channels 17.0 +82 5 model.input_dropout 0.45259663490417634 +82 5 model.output_dropout 0.047987109899024605 +82 5 model.feature_map_dropout 0.037377199427241437 +82 5 model.embedding_dim 2.0 +82 5 loss.margin 5.558105274689653 +82 5 optimizer.lr 0.022798930883415287 +82 5 negative_sampler.num_negs_per_pos 16.0 +82 5 training.batch_size 2.0 +82 6 model.output_channels 39.0 +82 6 model.input_dropout 0.3097820502741709 +82 6 model.output_dropout 0.4973329073194084 +82 6 model.feature_map_dropout 0.08645444634911609 +82 6 model.embedding_dim 2.0 +82 6 loss.margin 5.777704950378306 +82 6 optimizer.lr 0.0010806743298140723 +82 6 negative_sampler.num_negs_per_pos 31.0 +82 6 training.batch_size 1.0 +82 7 model.output_channels 64.0 +82 7 model.input_dropout 0.13690967978191682 +82 7 model.output_dropout 0.47925366358940874 +82 7 model.feature_map_dropout 0.42587365918201053 +82 7 model.embedding_dim 1.0 +82 7 loss.margin 1.4883625842271593 +82 7 optimizer.lr 0.0777524344670003 +82 7 negative_sampler.num_negs_per_pos 60.0 +82 7 training.batch_size 2.0 +82 8 model.output_channels 54.0 +82 8 model.input_dropout 0.047236830898847615 +82 8 model.output_dropout 0.0328534055914389 +82 8 model.feature_map_dropout 0.28070651046005785 +82 8 model.embedding_dim 0.0 +82 8 loss.margin 8.291152893645815 +82 8 optimizer.lr 0.023735881160980264 +82 8 negative_sampler.num_negs_per_pos 58.0 +82 8 training.batch_size 2.0 +82 9 model.output_channels 36.0 +82 9 model.input_dropout 0.29519370905462755 +82 9 model.output_dropout 0.1386147655026419 +82 9 model.feature_map_dropout 0.3187142467598154 +82 9 model.embedding_dim 0.0 +82 9 loss.margin 7.515504528997507 +82 9 optimizer.lr 0.009120320933794546 +82 9 negative_sampler.num_negs_per_pos 20.0 +82 9 training.batch_size 2.0 +82 10 model.output_channels 46.0 +82 10 model.input_dropout 0.013143434033650703 +82 10 model.output_dropout 0.18635112122479724 +82 10 model.feature_map_dropout 0.37176508284928655 +82 10 model.embedding_dim 0.0 +82 10 loss.margin 2.6197350690270333 +82 10 optimizer.lr 0.006428256823488745 +82 10 negative_sampler.num_negs_per_pos 15.0 +82 10 training.batch_size 2.0 +82 11 model.output_channels 59.0 +82 11 model.input_dropout 0.2608022544939751 +82 11 model.output_dropout 0.1150621112364193 +82 11 model.feature_map_dropout 0.038640937952198195 +82 11 model.embedding_dim 1.0 +82 11 loss.margin 7.94845247866668 +82 11 optimizer.lr 0.004814502112906291 +82 11 negative_sampler.num_negs_per_pos 74.0 +82 11 training.batch_size 2.0 +82 12 model.output_channels 48.0 +82 12 model.input_dropout 0.08884360383130685 +82 12 model.output_dropout 0.3671179565450813 +82 12 model.feature_map_dropout 0.1313756331819676 +82 12 model.embedding_dim 1.0 +82 12 loss.margin 8.83295013277678 +82 12 optimizer.lr 0.004914409714301445 +82 12 negative_sampler.num_negs_per_pos 96.0 +82 12 training.batch_size 2.0 +82 13 model.output_channels 29.0 +82 13 model.input_dropout 0.016330872716383393 +82 13 model.output_dropout 0.43361507376356695 +82 13 model.feature_map_dropout 0.4733005958723279 +82 13 model.embedding_dim 2.0 +82 13 loss.margin 6.745037673244606 +82 13 optimizer.lr 0.010808406312205848 +82 13 negative_sampler.num_negs_per_pos 83.0 +82 13 training.batch_size 1.0 +82 14 model.output_channels 21.0 +82 14 model.input_dropout 0.4749523632265934 +82 14 model.output_dropout 0.08003401280688222 +82 14 model.feature_map_dropout 0.26858593487504423 +82 14 model.embedding_dim 1.0 +82 14 loss.margin 7.559496624156772 +82 14 optimizer.lr 0.07601516544720346 +82 14 negative_sampler.num_negs_per_pos 26.0 +82 14 training.batch_size 0.0 +82 15 model.output_channels 21.0 +82 15 model.input_dropout 0.4819979097529139 +82 15 model.output_dropout 0.19422311979867873 +82 15 model.feature_map_dropout 0.19928063294957576 +82 15 model.embedding_dim 2.0 +82 15 loss.margin 1.3684800665613421 +82 15 optimizer.lr 0.0043875389547667456 +82 15 negative_sampler.num_negs_per_pos 17.0 +82 15 training.batch_size 1.0 +82 16 model.output_channels 17.0 +82 16 model.input_dropout 0.3160333075444797 +82 16 model.output_dropout 0.3248456423057158 +82 16 model.feature_map_dropout 0.24094729943429216 +82 16 model.embedding_dim 0.0 +82 16 loss.margin 6.607672020612716 +82 16 optimizer.lr 0.007353568608422964 +82 16 negative_sampler.num_negs_per_pos 36.0 +82 16 training.batch_size 2.0 +82 17 model.output_channels 43.0 +82 17 model.input_dropout 0.39754145140577224 +82 17 model.output_dropout 0.49931401541811543 +82 17 model.feature_map_dropout 0.04949304034186364 +82 17 model.embedding_dim 0.0 +82 17 loss.margin 4.643206740808215 +82 17 optimizer.lr 0.07199977559514407 +82 17 negative_sampler.num_negs_per_pos 71.0 +82 17 training.batch_size 2.0 +82 18 model.output_channels 42.0 +82 18 model.input_dropout 0.3118806287441409 +82 18 model.output_dropout 0.004493406641233599 +82 18 model.feature_map_dropout 0.054848768202553355 +82 18 model.embedding_dim 0.0 +82 18 loss.margin 5.2914749822773555 +82 18 optimizer.lr 0.01755211392069806 +82 18 negative_sampler.num_negs_per_pos 37.0 +82 18 training.batch_size 1.0 +82 19 model.output_channels 43.0 +82 19 model.input_dropout 0.4835967201416554 +82 19 model.output_dropout 0.4783106977871994 +82 19 model.feature_map_dropout 0.009116235798248218 +82 19 model.embedding_dim 2.0 +82 19 loss.margin 4.324840698076124 +82 19 optimizer.lr 0.0019305213143208262 +82 19 negative_sampler.num_negs_per_pos 59.0 +82 19 training.batch_size 0.0 +82 20 model.output_channels 34.0 +82 20 model.input_dropout 0.45728231922868484 +82 20 model.output_dropout 0.016068477971075523 +82 20 model.feature_map_dropout 0.23477100195186718 +82 20 model.embedding_dim 2.0 +82 20 loss.margin 6.134927736615023 +82 20 optimizer.lr 0.04613102100295244 +82 20 negative_sampler.num_negs_per_pos 36.0 +82 20 training.batch_size 1.0 +82 21 model.output_channels 33.0 +82 21 model.input_dropout 0.14230554559611985 +82 21 model.output_dropout 0.2166401964005371 +82 21 model.feature_map_dropout 0.3087028336984186 +82 21 model.embedding_dim 0.0 +82 21 loss.margin 7.87456710903627 +82 21 optimizer.lr 0.005625219517918435 +82 21 negative_sampler.num_negs_per_pos 74.0 +82 21 training.batch_size 0.0 +82 22 model.output_channels 24.0 +82 22 model.input_dropout 0.21616841365649292 +82 22 model.output_dropout 0.47702923962651045 +82 22 model.feature_map_dropout 0.34949654718552436 +82 22 model.embedding_dim 1.0 +82 22 loss.margin 1.92679664154582 +82 22 optimizer.lr 0.09971202202736137 +82 22 negative_sampler.num_negs_per_pos 97.0 +82 22 training.batch_size 0.0 +82 23 model.output_channels 49.0 +82 23 model.input_dropout 0.12283001476474975 +82 23 model.output_dropout 0.28870659729176423 +82 23 model.feature_map_dropout 0.04656881743529723 +82 23 model.embedding_dim 1.0 +82 23 loss.margin 0.8926014591438969 +82 23 optimizer.lr 0.042594858284089945 +82 23 negative_sampler.num_negs_per_pos 53.0 +82 23 training.batch_size 2.0 +82 24 model.output_channels 63.0 +82 24 model.input_dropout 0.13568916591987906 +82 24 model.output_dropout 0.16421181904844484 +82 24 model.feature_map_dropout 0.4096420116774731 +82 24 model.embedding_dim 0.0 +82 24 loss.margin 9.800457067152477 +82 24 optimizer.lr 0.017181215503328212 +82 24 negative_sampler.num_negs_per_pos 91.0 +82 24 training.batch_size 2.0 +82 25 model.output_channels 21.0 +82 25 model.input_dropout 0.04631106139285046 +82 25 model.output_dropout 0.002560282738361619 +82 25 model.feature_map_dropout 0.21585378116117604 +82 25 model.embedding_dim 2.0 +82 25 loss.margin 5.412052267425868 +82 25 optimizer.lr 0.01253976346805593 +82 25 negative_sampler.num_negs_per_pos 71.0 +82 25 training.batch_size 0.0 +82 26 model.output_channels 47.0 +82 26 model.input_dropout 0.4644615634815685 +82 26 model.output_dropout 0.287180366774998 +82 26 model.feature_map_dropout 0.11766530232437439 +82 26 model.embedding_dim 2.0 +82 26 loss.margin 1.9002983936790057 +82 26 optimizer.lr 0.09727777692356535 +82 26 negative_sampler.num_negs_per_pos 66.0 +82 26 training.batch_size 0.0 +82 27 model.output_channels 57.0 +82 27 model.input_dropout 0.19085961242563665 +82 27 model.output_dropout 0.1280579326643303 +82 27 model.feature_map_dropout 0.3643668959831524 +82 27 model.embedding_dim 0.0 +82 27 loss.margin 9.71196709138166 +82 27 optimizer.lr 0.0014457039817025387 +82 27 negative_sampler.num_negs_per_pos 14.0 +82 27 training.batch_size 0.0 +82 28 model.output_channels 42.0 +82 28 model.input_dropout 0.4478255312587009 +82 28 model.output_dropout 0.1633492322613524 +82 28 model.feature_map_dropout 0.4213780196652429 +82 28 model.embedding_dim 1.0 +82 28 loss.margin 5.226426866978243 +82 28 optimizer.lr 0.02013287109857652 +82 28 negative_sampler.num_negs_per_pos 32.0 +82 28 training.batch_size 1.0 +82 29 model.output_channels 47.0 +82 29 model.input_dropout 0.38610695161814446 +82 29 model.output_dropout 0.05792052108517587 +82 29 model.feature_map_dropout 0.14489096682141367 +82 29 model.embedding_dim 2.0 +82 29 loss.margin 1.8093464192149966 +82 29 optimizer.lr 0.02492995511866533 +82 29 negative_sampler.num_negs_per_pos 93.0 +82 29 training.batch_size 0.0 +82 30 model.output_channels 25.0 +82 30 model.input_dropout 0.13718490661489496 +82 30 model.output_dropout 0.19217776503771383 +82 30 model.feature_map_dropout 0.3580654519338394 +82 30 model.embedding_dim 1.0 +82 30 loss.margin 1.9179285299403857 +82 30 optimizer.lr 0.006592533589320798 +82 30 negative_sampler.num_negs_per_pos 71.0 +82 30 training.batch_size 2.0 +82 31 model.output_channels 22.0 +82 31 model.input_dropout 0.1334244645771508 +82 31 model.output_dropout 0.022277195895645474 +82 31 model.feature_map_dropout 0.05701174053851055 +82 31 model.embedding_dim 0.0 +82 31 loss.margin 2.986732350264766 +82 31 optimizer.lr 0.08528833733062531 +82 31 negative_sampler.num_negs_per_pos 45.0 +82 31 training.batch_size 2.0 +82 32 model.output_channels 21.0 +82 32 model.input_dropout 0.40455890760020835 +82 32 model.output_dropout 0.1662532933562108 +82 32 model.feature_map_dropout 0.3938062274319211 +82 32 model.embedding_dim 0.0 +82 32 loss.margin 1.947715760700131 +82 32 optimizer.lr 0.00902069773252961 +82 32 negative_sampler.num_negs_per_pos 25.0 +82 32 training.batch_size 0.0 +82 33 model.output_channels 17.0 +82 33 model.input_dropout 0.07253040896648649 +82 33 model.output_dropout 0.3320016652160391 +82 33 model.feature_map_dropout 0.2720551235108359 +82 33 model.embedding_dim 0.0 +82 33 loss.margin 1.9389826866609534 +82 33 optimizer.lr 0.015214514070669895 +82 33 negative_sampler.num_negs_per_pos 65.0 +82 33 training.batch_size 1.0 +82 34 model.output_channels 35.0 +82 34 model.input_dropout 0.13178479878484717 +82 34 model.output_dropout 0.44796757550349137 +82 34 model.feature_map_dropout 0.4264607909536619 +82 34 model.embedding_dim 1.0 +82 34 loss.margin 1.019583595224016 +82 34 optimizer.lr 0.03388561188628809 +82 34 negative_sampler.num_negs_per_pos 46.0 +82 34 training.batch_size 1.0 +82 35 model.output_channels 57.0 +82 35 model.input_dropout 0.06615671069149365 +82 35 model.output_dropout 0.4266041844518923 +82 35 model.feature_map_dropout 0.43449267317395224 +82 35 model.embedding_dim 2.0 +82 35 loss.margin 1.158994123106735 +82 35 optimizer.lr 0.006640846532114071 +82 35 negative_sampler.num_negs_per_pos 89.0 +82 35 training.batch_size 1.0 +82 36 model.output_channels 16.0 +82 36 model.input_dropout 0.2409317653707057 +82 36 model.output_dropout 0.2413594889061142 +82 36 model.feature_map_dropout 0.32716663318481715 +82 36 model.embedding_dim 2.0 +82 36 loss.margin 8.300630599859964 +82 36 optimizer.lr 0.024557058647831156 +82 36 negative_sampler.num_negs_per_pos 90.0 +82 36 training.batch_size 0.0 +82 37 model.output_channels 53.0 +82 37 model.input_dropout 0.25370723736936485 +82 37 model.output_dropout 0.43464007686280853 +82 37 model.feature_map_dropout 0.4052237494530169 +82 37 model.embedding_dim 1.0 +82 37 loss.margin 4.056513730714499 +82 37 optimizer.lr 0.045615078656890254 +82 37 negative_sampler.num_negs_per_pos 92.0 +82 37 training.batch_size 1.0 +82 38 model.output_channels 62.0 +82 38 model.input_dropout 0.12154489052752238 +82 38 model.output_dropout 0.4679323065012222 +82 38 model.feature_map_dropout 0.4941138357847678 +82 38 model.embedding_dim 2.0 +82 38 loss.margin 6.920746286456565 +82 38 optimizer.lr 0.011494709921331918 +82 38 negative_sampler.num_negs_per_pos 63.0 +82 38 training.batch_size 1.0 +82 39 model.output_channels 31.0 +82 39 model.input_dropout 0.49972078800868824 +82 39 model.output_dropout 0.1552742495147441 +82 39 model.feature_map_dropout 0.34492698434456015 +82 39 model.embedding_dim 1.0 +82 39 loss.margin 8.612186432940845 +82 39 optimizer.lr 0.00680872513843957 +82 39 negative_sampler.num_negs_per_pos 8.0 +82 39 training.batch_size 1.0 +82 40 model.output_channels 21.0 +82 40 model.input_dropout 0.38943087747333 +82 40 model.output_dropout 0.4849316727780137 +82 40 model.feature_map_dropout 0.08668852647085973 +82 40 model.embedding_dim 2.0 +82 40 loss.margin 7.577270839212372 +82 40 optimizer.lr 0.0036256146525023712 +82 40 negative_sampler.num_negs_per_pos 1.0 +82 40 training.batch_size 0.0 +82 41 model.output_channels 57.0 +82 41 model.input_dropout 0.4900848185875981 +82 41 model.output_dropout 0.05454131621204339 +82 41 model.feature_map_dropout 0.15707505224604407 +82 41 model.embedding_dim 2.0 +82 41 loss.margin 4.61778373137819 +82 41 optimizer.lr 0.017113144874143175 +82 41 negative_sampler.num_negs_per_pos 85.0 +82 41 training.batch_size 0.0 +82 42 model.output_channels 29.0 +82 42 model.input_dropout 0.33291218005969847 +82 42 model.output_dropout 0.44224062163458133 +82 42 model.feature_map_dropout 0.2553704604273934 +82 42 model.embedding_dim 2.0 +82 42 loss.margin 0.5083499298147345 +82 42 optimizer.lr 0.0016757606838728183 +82 42 negative_sampler.num_negs_per_pos 8.0 +82 42 training.batch_size 1.0 +82 43 model.output_channels 43.0 +82 43 model.input_dropout 0.08606654243001544 +82 43 model.output_dropout 0.09644352772146259 +82 43 model.feature_map_dropout 0.4156667009698053 +82 43 model.embedding_dim 0.0 +82 43 loss.margin 9.064501009318258 +82 43 optimizer.lr 0.0011581463246408941 +82 43 negative_sampler.num_negs_per_pos 93.0 +82 43 training.batch_size 2.0 +82 44 model.output_channels 51.0 +82 44 model.input_dropout 0.2724468330631937 +82 44 model.output_dropout 0.10968096588664089 +82 44 model.feature_map_dropout 0.28530711435216793 +82 44 model.embedding_dim 2.0 +82 44 loss.margin 1.5980578819285187 +82 44 optimizer.lr 0.0034801479050824776 +82 44 negative_sampler.num_negs_per_pos 61.0 +82 44 training.batch_size 1.0 +82 45 model.output_channels 45.0 +82 45 model.input_dropout 0.49110987292449754 +82 45 model.output_dropout 0.36295622669458233 +82 45 model.feature_map_dropout 0.480385571429258 +82 45 model.embedding_dim 0.0 +82 45 loss.margin 5.256723960549923 +82 45 optimizer.lr 0.003034508177516688 +82 45 negative_sampler.num_negs_per_pos 34.0 +82 45 training.batch_size 2.0 +82 46 model.output_channels 48.0 +82 46 model.input_dropout 0.46381714952203273 +82 46 model.output_dropout 0.37712774772834934 +82 46 model.feature_map_dropout 0.1741168455031195 +82 46 model.embedding_dim 0.0 +82 46 loss.margin 4.958738485720887 +82 46 optimizer.lr 0.0024550286919057776 +82 46 negative_sampler.num_negs_per_pos 83.0 +82 46 training.batch_size 0.0 +82 47 model.output_channels 47.0 +82 47 model.input_dropout 0.47380649458691215 +82 47 model.output_dropout 0.004804215020650815 +82 47 model.feature_map_dropout 0.3112134106595449 +82 47 model.embedding_dim 1.0 +82 47 loss.margin 8.894137591797222 +82 47 optimizer.lr 0.005908433023722038 +82 47 negative_sampler.num_negs_per_pos 25.0 +82 47 training.batch_size 2.0 +82 48 model.output_channels 20.0 +82 48 model.input_dropout 0.2895322361775511 +82 48 model.output_dropout 0.345272791502928 +82 48 model.feature_map_dropout 0.14344753411585198 +82 48 model.embedding_dim 2.0 +82 48 loss.margin 2.3155831230336554 +82 48 optimizer.lr 0.014741760181484948 +82 48 negative_sampler.num_negs_per_pos 47.0 +82 48 training.batch_size 1.0 +82 49 model.output_channels 29.0 +82 49 model.input_dropout 0.33650011191166024 +82 49 model.output_dropout 0.15557760704341494 +82 49 model.feature_map_dropout 0.03135230435091507 +82 49 model.embedding_dim 0.0 +82 49 loss.margin 1.4469582988335576 +82 49 optimizer.lr 0.008870435072826048 +82 49 negative_sampler.num_negs_per_pos 78.0 +82 49 training.batch_size 0.0 +82 50 model.output_channels 49.0 +82 50 model.input_dropout 0.23842080702174684 +82 50 model.output_dropout 0.3383418426186541 +82 50 model.feature_map_dropout 0.3778243804347352 +82 50 model.embedding_dim 1.0 +82 50 loss.margin 2.7819920345740083 +82 50 optimizer.lr 0.010220681064847725 +82 50 negative_sampler.num_negs_per_pos 8.0 +82 50 training.batch_size 0.0 +82 51 model.output_channels 22.0 +82 51 model.input_dropout 0.037314876374355876 +82 51 model.output_dropout 0.0729286348417269 +82 51 model.feature_map_dropout 0.33591000328570186 +82 51 model.embedding_dim 1.0 +82 51 loss.margin 8.857344079346916 +82 51 optimizer.lr 0.00358846649538585 +82 51 negative_sampler.num_negs_per_pos 34.0 +82 51 training.batch_size 0.0 +82 52 model.output_channels 32.0 +82 52 model.input_dropout 0.13529106875233088 +82 52 model.output_dropout 0.1327359034183449 +82 52 model.feature_map_dropout 0.3579340915325007 +82 52 model.embedding_dim 0.0 +82 52 loss.margin 8.092106156566768 +82 52 optimizer.lr 0.004061127528821495 +82 52 negative_sampler.num_negs_per_pos 61.0 +82 52 training.batch_size 1.0 +82 53 model.output_channels 34.0 +82 53 model.input_dropout 0.42062227453761925 +82 53 model.output_dropout 0.30855171338396165 +82 53 model.feature_map_dropout 0.32226207665483525 +82 53 model.embedding_dim 1.0 +82 53 loss.margin 7.988406273034299 +82 53 optimizer.lr 0.09501473674121902 +82 53 negative_sampler.num_negs_per_pos 5.0 +82 53 training.batch_size 1.0 +82 54 model.output_channels 62.0 +82 54 model.input_dropout 0.27248462386098465 +82 54 model.output_dropout 0.12684288505690827 +82 54 model.feature_map_dropout 0.4803054816796182 +82 54 model.embedding_dim 0.0 +82 54 loss.margin 2.5111038481178998 +82 54 optimizer.lr 0.006658487767537933 +82 54 negative_sampler.num_negs_per_pos 95.0 +82 54 training.batch_size 2.0 +82 55 model.output_channels 31.0 +82 55 model.input_dropout 0.0594393450037452 +82 55 model.output_dropout 0.0946805988062393 +82 55 model.feature_map_dropout 0.49017297663873177 +82 55 model.embedding_dim 1.0 +82 55 loss.margin 7.2209435215463 +82 55 optimizer.lr 0.004939629623686238 +82 55 negative_sampler.num_negs_per_pos 1.0 +82 55 training.batch_size 0.0 +82 56 model.output_channels 18.0 +82 56 model.input_dropout 0.430978370169412 +82 56 model.output_dropout 0.3928019484929414 +82 56 model.feature_map_dropout 0.03912265283404187 +82 56 model.embedding_dim 2.0 +82 56 loss.margin 4.445763744284191 +82 56 optimizer.lr 0.0024177155150708137 +82 56 negative_sampler.num_negs_per_pos 7.0 +82 56 training.batch_size 1.0 +82 57 model.output_channels 63.0 +82 57 model.input_dropout 0.1496689090090868 +82 57 model.output_dropout 0.14504554113972612 +82 57 model.feature_map_dropout 0.17559558080692428 +82 57 model.embedding_dim 2.0 +82 57 loss.margin 8.803995554359291 +82 57 optimizer.lr 0.01705742269003853 +82 57 negative_sampler.num_negs_per_pos 9.0 +82 57 training.batch_size 2.0 +82 58 model.output_channels 22.0 +82 58 model.input_dropout 0.04782054564897814 +82 58 model.output_dropout 0.30889676316285614 +82 58 model.feature_map_dropout 0.38685676667914737 +82 58 model.embedding_dim 0.0 +82 58 loss.margin 6.8420842338529235 +82 58 optimizer.lr 0.0022223731153310977 +82 58 negative_sampler.num_negs_per_pos 51.0 +82 58 training.batch_size 1.0 +82 59 model.output_channels 56.0 +82 59 model.input_dropout 0.18846801830974003 +82 59 model.output_dropout 0.11356622455369236 +82 59 model.feature_map_dropout 0.2858031051936432 +82 59 model.embedding_dim 1.0 +82 59 loss.margin 3.032087450319681 +82 59 optimizer.lr 0.013243877519849567 +82 59 negative_sampler.num_negs_per_pos 11.0 +82 59 training.batch_size 0.0 +82 60 model.output_channels 50.0 +82 60 model.input_dropout 0.43874239057920006 +82 60 model.output_dropout 0.0320992041098227 +82 60 model.feature_map_dropout 0.3085710618995376 +82 60 model.embedding_dim 0.0 +82 60 loss.margin 8.752962641005901 +82 60 optimizer.lr 0.003757359293685932 +82 60 negative_sampler.num_negs_per_pos 16.0 +82 60 training.batch_size 1.0 +82 61 model.output_channels 63.0 +82 61 model.input_dropout 0.3769584012163911 +82 61 model.output_dropout 0.06016955425942849 +82 61 model.feature_map_dropout 0.17472033446500418 +82 61 model.embedding_dim 1.0 +82 61 loss.margin 2.22210681080553 +82 61 optimizer.lr 0.0051991742232890435 +82 61 negative_sampler.num_negs_per_pos 2.0 +82 61 training.batch_size 1.0 +82 62 model.output_channels 22.0 +82 62 model.input_dropout 0.32365810842891424 +82 62 model.output_dropout 0.341569829449995 +82 62 model.feature_map_dropout 0.022532873711516133 +82 62 model.embedding_dim 0.0 +82 62 loss.margin 0.7068685925150928 +82 62 optimizer.lr 0.040842741833374725 +82 62 negative_sampler.num_negs_per_pos 58.0 +82 62 training.batch_size 0.0 +82 63 model.output_channels 18.0 +82 63 model.input_dropout 0.42228248193820284 +82 63 model.output_dropout 0.3938966052728885 +82 63 model.feature_map_dropout 0.2593388134965906 +82 63 model.embedding_dim 2.0 +82 63 loss.margin 9.67677483816127 +82 63 optimizer.lr 0.027758804996278162 +82 63 negative_sampler.num_negs_per_pos 98.0 +82 63 training.batch_size 0.0 +82 64 model.output_channels 35.0 +82 64 model.input_dropout 0.23267765053391837 +82 64 model.output_dropout 0.1962362646538761 +82 64 model.feature_map_dropout 0.11567447326760288 +82 64 model.embedding_dim 0.0 +82 64 loss.margin 1.0599133061953085 +82 64 optimizer.lr 0.007083302778782854 +82 64 negative_sampler.num_negs_per_pos 93.0 +82 64 training.batch_size 1.0 +82 65 model.output_channels 38.0 +82 65 model.input_dropout 0.07363203840289217 +82 65 model.output_dropout 0.3078306558330247 +82 65 model.feature_map_dropout 0.17190603029054202 +82 65 model.embedding_dim 2.0 +82 65 loss.margin 7.377163085771911 +82 65 optimizer.lr 0.027268572365195455 +82 65 negative_sampler.num_negs_per_pos 5.0 +82 65 training.batch_size 1.0 +82 66 model.output_channels 18.0 +82 66 model.input_dropout 0.49510951929944524 +82 66 model.output_dropout 0.3281319825049065 +82 66 model.feature_map_dropout 0.40640782046147017 +82 66 model.embedding_dim 0.0 +82 66 loss.margin 1.3980246366708842 +82 66 optimizer.lr 0.008615788856570338 +82 66 negative_sampler.num_negs_per_pos 89.0 +82 66 training.batch_size 2.0 +82 67 model.output_channels 18.0 +82 67 model.input_dropout 0.4956181939051165 +82 67 model.output_dropout 0.36916986143004343 +82 67 model.feature_map_dropout 0.23947098716269377 +82 67 model.embedding_dim 1.0 +82 67 loss.margin 3.8091544054738944 +82 67 optimizer.lr 0.00944433078697181 +82 67 negative_sampler.num_negs_per_pos 74.0 +82 67 training.batch_size 0.0 +82 68 model.output_channels 41.0 +82 68 model.input_dropout 0.4177871240563727 +82 68 model.output_dropout 0.013624931035069732 +82 68 model.feature_map_dropout 0.29988049572982817 +82 68 model.embedding_dim 2.0 +82 68 loss.margin 0.9361590550896064 +82 68 optimizer.lr 0.0456287941785402 +82 68 negative_sampler.num_negs_per_pos 9.0 +82 68 training.batch_size 1.0 +82 69 model.output_channels 60.0 +82 69 model.input_dropout 0.3769800313389875 +82 69 model.output_dropout 0.004944559497487899 +82 69 model.feature_map_dropout 0.3608122120467246 +82 69 model.embedding_dim 0.0 +82 69 loss.margin 9.712364450375643 +82 69 optimizer.lr 0.009047171489443501 +82 69 negative_sampler.num_negs_per_pos 38.0 +82 69 training.batch_size 1.0 +82 70 model.output_channels 34.0 +82 70 model.input_dropout 0.03138890361109897 +82 70 model.output_dropout 0.4117732206782796 +82 70 model.feature_map_dropout 0.40599645543930224 +82 70 model.embedding_dim 1.0 +82 70 loss.margin 6.268413058745903 +82 70 optimizer.lr 0.005222424301945152 +82 70 negative_sampler.num_negs_per_pos 35.0 +82 70 training.batch_size 2.0 +82 71 model.output_channels 54.0 +82 71 model.input_dropout 0.45502136574431906 +82 71 model.output_dropout 0.08033407904522871 +82 71 model.feature_map_dropout 0.218082862589892 +82 71 model.embedding_dim 2.0 +82 71 loss.margin 6.90935310484752 +82 71 optimizer.lr 0.009999530411838067 +82 71 negative_sampler.num_negs_per_pos 81.0 +82 71 training.batch_size 1.0 +82 72 model.output_channels 36.0 +82 72 model.input_dropout 0.0900121116962887 +82 72 model.output_dropout 0.3158416645814241 +82 72 model.feature_map_dropout 0.044074669651998466 +82 72 model.embedding_dim 1.0 +82 72 loss.margin 2.6797874572175533 +82 72 optimizer.lr 0.0033160429680652833 +82 72 negative_sampler.num_negs_per_pos 74.0 +82 72 training.batch_size 2.0 +82 73 model.output_channels 25.0 +82 73 model.input_dropout 0.1340462268825503 +82 73 model.output_dropout 0.2796123109283223 +82 73 model.feature_map_dropout 0.032773590852989976 +82 73 model.embedding_dim 1.0 +82 73 loss.margin 4.345238245416644 +82 73 optimizer.lr 0.015144136874089307 +82 73 negative_sampler.num_negs_per_pos 59.0 +82 73 training.batch_size 2.0 +82 74 model.output_channels 59.0 +82 74 model.input_dropout 0.11200286347021621 +82 74 model.output_dropout 0.3058416482688199 +82 74 model.feature_map_dropout 0.4339177057710018 +82 74 model.embedding_dim 1.0 +82 74 loss.margin 6.733024098099634 +82 74 optimizer.lr 0.09131726612163571 +82 74 negative_sampler.num_negs_per_pos 1.0 +82 74 training.batch_size 0.0 +82 75 model.output_channels 50.0 +82 75 model.input_dropout 0.4471801942754522 +82 75 model.output_dropout 0.495739634446059 +82 75 model.feature_map_dropout 0.25957341535592093 +82 75 model.embedding_dim 0.0 +82 75 loss.margin 5.372908803017405 +82 75 optimizer.lr 0.01033681754883654 +82 75 negative_sampler.num_negs_per_pos 20.0 +82 75 training.batch_size 2.0 +82 76 model.output_channels 49.0 +82 76 model.input_dropout 0.046701637270626595 +82 76 model.output_dropout 0.056826395267482455 +82 76 model.feature_map_dropout 0.3520157727111951 +82 76 model.embedding_dim 1.0 +82 76 loss.margin 2.0551550589708416 +82 76 optimizer.lr 0.004729388561250789 +82 76 negative_sampler.num_negs_per_pos 27.0 +82 76 training.batch_size 2.0 +82 77 model.output_channels 24.0 +82 77 model.input_dropout 0.2429991954884826 +82 77 model.output_dropout 0.37937158056755227 +82 77 model.feature_map_dropout 0.2188557660064951 +82 77 model.embedding_dim 2.0 +82 77 loss.margin 5.892638988728993 +82 77 optimizer.lr 0.0072859421773875275 +82 77 negative_sampler.num_negs_per_pos 84.0 +82 77 training.batch_size 2.0 +82 78 model.output_channels 35.0 +82 78 model.input_dropout 0.440236360159276 +82 78 model.output_dropout 0.17662622874293427 +82 78 model.feature_map_dropout 0.08845937274704335 +82 78 model.embedding_dim 2.0 +82 78 loss.margin 4.2702138986764915 +82 78 optimizer.lr 0.06561379697940782 +82 78 negative_sampler.num_negs_per_pos 69.0 +82 78 training.batch_size 0.0 +82 79 model.output_channels 17.0 +82 79 model.input_dropout 0.2900731216408238 +82 79 model.output_dropout 0.1611515670611947 +82 79 model.feature_map_dropout 0.25469486937624986 +82 79 model.embedding_dim 0.0 +82 79 loss.margin 6.062587862513214 +82 79 optimizer.lr 0.0036507358790753928 +82 79 negative_sampler.num_negs_per_pos 24.0 +82 79 training.batch_size 0.0 +82 80 model.output_channels 51.0 +82 80 model.input_dropout 0.2061630149116362 +82 80 model.output_dropout 0.15600065720798917 +82 80 model.feature_map_dropout 0.0364373752127623 +82 80 model.embedding_dim 2.0 +82 80 loss.margin 0.8216006172398894 +82 80 optimizer.lr 0.006236412870344789 +82 80 negative_sampler.num_negs_per_pos 8.0 +82 80 training.batch_size 2.0 +82 81 model.output_channels 58.0 +82 81 model.input_dropout 0.10387956018591676 +82 81 model.output_dropout 0.4948884072252236 +82 81 model.feature_map_dropout 0.20337372341032495 +82 81 model.embedding_dim 0.0 +82 81 loss.margin 6.016740040303535 +82 81 optimizer.lr 0.0070933140969309345 +82 81 negative_sampler.num_negs_per_pos 73.0 +82 81 training.batch_size 2.0 +82 82 model.output_channels 37.0 +82 82 model.input_dropout 0.1475001595145528 +82 82 model.output_dropout 0.47475350192015864 +82 82 model.feature_map_dropout 0.2659448396135539 +82 82 model.embedding_dim 1.0 +82 82 loss.margin 5.270067785056157 +82 82 optimizer.lr 0.0011260665066255063 +82 82 negative_sampler.num_negs_per_pos 21.0 +82 82 training.batch_size 2.0 +82 83 model.output_channels 52.0 +82 83 model.input_dropout 0.24519090805242272 +82 83 model.output_dropout 0.47081019033073873 +82 83 model.feature_map_dropout 0.3254715901746559 +82 83 model.embedding_dim 0.0 +82 83 loss.margin 8.655012771844811 +82 83 optimizer.lr 0.03395605103015553 +82 83 negative_sampler.num_negs_per_pos 8.0 +82 83 training.batch_size 0.0 +82 84 model.output_channels 28.0 +82 84 model.input_dropout 0.41970409848675005 +82 84 model.output_dropout 0.43536900776195064 +82 84 model.feature_map_dropout 0.08566381878006374 +82 84 model.embedding_dim 2.0 +82 84 loss.margin 0.8742702834537761 +82 84 optimizer.lr 0.012813081076853384 +82 84 negative_sampler.num_negs_per_pos 80.0 +82 84 training.batch_size 1.0 +82 85 model.output_channels 60.0 +82 85 model.input_dropout 0.014758378327656141 +82 85 model.output_dropout 0.36016427803764167 +82 85 model.feature_map_dropout 0.27308149676444965 +82 85 model.embedding_dim 1.0 +82 85 loss.margin 3.728793715437251 +82 85 optimizer.lr 0.00359116616940507 +82 85 negative_sampler.num_negs_per_pos 77.0 +82 85 training.batch_size 2.0 +82 86 model.output_channels 36.0 +82 86 model.input_dropout 0.4430819548326437 +82 86 model.output_dropout 0.19642026192941947 +82 86 model.feature_map_dropout 0.3530935940123665 +82 86 model.embedding_dim 2.0 +82 86 loss.margin 9.909435034252526 +82 86 optimizer.lr 0.011344454531835524 +82 86 negative_sampler.num_negs_per_pos 64.0 +82 86 training.batch_size 2.0 +82 87 model.output_channels 23.0 +82 87 model.input_dropout 0.33248405944236503 +82 87 model.output_dropout 0.4880457056184513 +82 87 model.feature_map_dropout 0.4732218157719389 +82 87 model.embedding_dim 0.0 +82 87 loss.margin 8.750414241287054 +82 87 optimizer.lr 0.04345944218080035 +82 87 negative_sampler.num_negs_per_pos 87.0 +82 87 training.batch_size 0.0 +82 88 model.output_channels 26.0 +82 88 model.input_dropout 0.2032669618623305 +82 88 model.output_dropout 0.12445362838496915 +82 88 model.feature_map_dropout 0.3285478603317651 +82 88 model.embedding_dim 0.0 +82 88 loss.margin 5.286331255605272 +82 88 optimizer.lr 0.02947203917824405 +82 88 negative_sampler.num_negs_per_pos 37.0 +82 88 training.batch_size 0.0 +82 89 model.output_channels 61.0 +82 89 model.input_dropout 0.46018652825331985 +82 89 model.output_dropout 0.08538210157922027 +82 89 model.feature_map_dropout 0.10995764404325559 +82 89 model.embedding_dim 1.0 +82 89 loss.margin 3.799265009096616 +82 89 optimizer.lr 0.007164554422305517 +82 89 negative_sampler.num_negs_per_pos 70.0 +82 89 training.batch_size 0.0 +82 90 model.output_channels 53.0 +82 90 model.input_dropout 0.13608012878447334 +82 90 model.output_dropout 0.15480706402444488 +82 90 model.feature_map_dropout 0.10016313370284802 +82 90 model.embedding_dim 0.0 +82 90 loss.margin 5.300625862453643 +82 90 optimizer.lr 0.0027825309474481027 +82 90 negative_sampler.num_negs_per_pos 18.0 +82 90 training.batch_size 1.0 +82 91 model.output_channels 20.0 +82 91 model.input_dropout 0.17985087969380636 +82 91 model.output_dropout 0.2328901737875505 +82 91 model.feature_map_dropout 0.34004662879232944 +82 91 model.embedding_dim 1.0 +82 91 loss.margin 8.665523311218665 +82 91 optimizer.lr 0.016145152367899412 +82 91 negative_sampler.num_negs_per_pos 62.0 +82 91 training.batch_size 0.0 +82 1 dataset """kinships""" +82 1 model """conve""" +82 1 loss """marginranking""" +82 1 regularizer """no""" +82 1 optimizer """adam""" +82 1 training_loop """owa""" +82 1 negative_sampler """basic""" +82 1 evaluator """rankbased""" +82 2 dataset """kinships""" +82 2 model """conve""" +82 2 loss """marginranking""" +82 2 regularizer """no""" +82 2 optimizer """adam""" +82 2 training_loop """owa""" +82 2 negative_sampler """basic""" +82 2 evaluator """rankbased""" +82 3 dataset """kinships""" +82 3 model """conve""" +82 3 loss """marginranking""" +82 3 regularizer """no""" +82 3 optimizer """adam""" +82 3 training_loop """owa""" +82 3 negative_sampler """basic""" +82 3 evaluator """rankbased""" +82 4 dataset """kinships""" +82 4 model """conve""" +82 4 loss """marginranking""" +82 4 regularizer """no""" +82 4 optimizer """adam""" +82 4 training_loop """owa""" +82 4 negative_sampler """basic""" +82 4 evaluator """rankbased""" +82 5 dataset """kinships""" +82 5 model """conve""" +82 5 loss """marginranking""" +82 5 regularizer """no""" +82 5 optimizer """adam""" +82 5 training_loop """owa""" +82 5 negative_sampler """basic""" +82 5 evaluator """rankbased""" +82 6 dataset """kinships""" +82 6 model """conve""" +82 6 loss """marginranking""" +82 6 regularizer """no""" +82 6 optimizer """adam""" +82 6 training_loop """owa""" +82 6 negative_sampler """basic""" +82 6 evaluator """rankbased""" +82 7 dataset """kinships""" +82 7 model """conve""" +82 7 loss """marginranking""" +82 7 regularizer """no""" +82 7 optimizer """adam""" +82 7 training_loop """owa""" +82 7 negative_sampler """basic""" +82 7 evaluator """rankbased""" +82 8 dataset """kinships""" +82 8 model """conve""" +82 8 loss """marginranking""" +82 8 regularizer """no""" +82 8 optimizer """adam""" +82 8 training_loop """owa""" +82 8 negative_sampler """basic""" +82 8 evaluator """rankbased""" +82 9 dataset """kinships""" +82 9 model """conve""" +82 9 loss """marginranking""" +82 9 regularizer """no""" +82 9 optimizer """adam""" +82 9 training_loop """owa""" +82 9 negative_sampler """basic""" +82 9 evaluator """rankbased""" +82 10 dataset """kinships""" +82 10 model """conve""" +82 10 loss """marginranking""" +82 10 regularizer """no""" +82 10 optimizer """adam""" +82 10 training_loop """owa""" +82 10 negative_sampler """basic""" +82 10 evaluator """rankbased""" +82 11 dataset """kinships""" +82 11 model """conve""" +82 11 loss """marginranking""" +82 11 regularizer """no""" +82 11 optimizer """adam""" +82 11 training_loop """owa""" +82 11 negative_sampler """basic""" +82 11 evaluator """rankbased""" +82 12 dataset """kinships""" +82 12 model """conve""" +82 12 loss """marginranking""" +82 12 regularizer """no""" +82 12 optimizer """adam""" +82 12 training_loop """owa""" +82 12 negative_sampler """basic""" +82 12 evaluator """rankbased""" +82 13 dataset """kinships""" +82 13 model """conve""" +82 13 loss """marginranking""" +82 13 regularizer """no""" +82 13 optimizer """adam""" +82 13 training_loop """owa""" +82 13 negative_sampler """basic""" +82 13 evaluator """rankbased""" +82 14 dataset """kinships""" +82 14 model """conve""" +82 14 loss """marginranking""" +82 14 regularizer """no""" +82 14 optimizer """adam""" +82 14 training_loop """owa""" +82 14 negative_sampler """basic""" +82 14 evaluator """rankbased""" +82 15 dataset """kinships""" +82 15 model """conve""" +82 15 loss """marginranking""" +82 15 regularizer """no""" +82 15 optimizer """adam""" +82 15 training_loop """owa""" +82 15 negative_sampler """basic""" +82 15 evaluator """rankbased""" +82 16 dataset """kinships""" +82 16 model """conve""" +82 16 loss """marginranking""" +82 16 regularizer """no""" +82 16 optimizer """adam""" +82 16 training_loop """owa""" +82 16 negative_sampler """basic""" +82 16 evaluator """rankbased""" +82 17 dataset """kinships""" +82 17 model """conve""" +82 17 loss """marginranking""" +82 17 regularizer """no""" +82 17 optimizer """adam""" +82 17 training_loop """owa""" +82 17 negative_sampler """basic""" +82 17 evaluator """rankbased""" +82 18 dataset """kinships""" +82 18 model """conve""" +82 18 loss """marginranking""" +82 18 regularizer """no""" +82 18 optimizer """adam""" +82 18 training_loop """owa""" +82 18 negative_sampler """basic""" +82 18 evaluator """rankbased""" +82 19 dataset """kinships""" +82 19 model """conve""" +82 19 loss """marginranking""" +82 19 regularizer """no""" +82 19 optimizer """adam""" +82 19 training_loop """owa""" +82 19 negative_sampler """basic""" +82 19 evaluator """rankbased""" +82 20 dataset """kinships""" +82 20 model """conve""" +82 20 loss """marginranking""" +82 20 regularizer """no""" +82 20 optimizer """adam""" +82 20 training_loop """owa""" +82 20 negative_sampler """basic""" +82 20 evaluator """rankbased""" +82 21 dataset """kinships""" +82 21 model """conve""" +82 21 loss """marginranking""" +82 21 regularizer """no""" +82 21 optimizer """adam""" +82 21 training_loop """owa""" +82 21 negative_sampler """basic""" +82 21 evaluator """rankbased""" +82 22 dataset """kinships""" +82 22 model """conve""" +82 22 loss """marginranking""" +82 22 regularizer """no""" +82 22 optimizer """adam""" +82 22 training_loop """owa""" +82 22 negative_sampler """basic""" +82 22 evaluator """rankbased""" +82 23 dataset """kinships""" +82 23 model """conve""" +82 23 loss """marginranking""" +82 23 regularizer """no""" +82 23 optimizer """adam""" +82 23 training_loop """owa""" +82 23 negative_sampler """basic""" +82 23 evaluator """rankbased""" +82 24 dataset """kinships""" +82 24 model """conve""" +82 24 loss """marginranking""" +82 24 regularizer """no""" +82 24 optimizer """adam""" +82 24 training_loop """owa""" +82 24 negative_sampler """basic""" +82 24 evaluator """rankbased""" +82 25 dataset """kinships""" +82 25 model """conve""" +82 25 loss """marginranking""" +82 25 regularizer """no""" +82 25 optimizer """adam""" +82 25 training_loop """owa""" +82 25 negative_sampler """basic""" +82 25 evaluator """rankbased""" +82 26 dataset """kinships""" +82 26 model """conve""" +82 26 loss """marginranking""" +82 26 regularizer """no""" +82 26 optimizer """adam""" +82 26 training_loop """owa""" +82 26 negative_sampler """basic""" +82 26 evaluator """rankbased""" +82 27 dataset """kinships""" +82 27 model """conve""" +82 27 loss """marginranking""" +82 27 regularizer """no""" +82 27 optimizer """adam""" +82 27 training_loop """owa""" +82 27 negative_sampler """basic""" +82 27 evaluator """rankbased""" +82 28 dataset """kinships""" +82 28 model """conve""" +82 28 loss """marginranking""" +82 28 regularizer """no""" +82 28 optimizer """adam""" +82 28 training_loop """owa""" +82 28 negative_sampler """basic""" +82 28 evaluator """rankbased""" +82 29 dataset """kinships""" +82 29 model """conve""" +82 29 loss """marginranking""" +82 29 regularizer """no""" +82 29 optimizer """adam""" +82 29 training_loop """owa""" +82 29 negative_sampler """basic""" +82 29 evaluator """rankbased""" +82 30 dataset """kinships""" +82 30 model """conve""" +82 30 loss """marginranking""" +82 30 regularizer """no""" +82 30 optimizer """adam""" +82 30 training_loop """owa""" +82 30 negative_sampler """basic""" +82 30 evaluator """rankbased""" +82 31 dataset """kinships""" +82 31 model """conve""" +82 31 loss """marginranking""" +82 31 regularizer """no""" +82 31 optimizer """adam""" +82 31 training_loop """owa""" +82 31 negative_sampler """basic""" +82 31 evaluator """rankbased""" +82 32 dataset """kinships""" +82 32 model """conve""" +82 32 loss """marginranking""" +82 32 regularizer """no""" +82 32 optimizer """adam""" +82 32 training_loop """owa""" +82 32 negative_sampler """basic""" +82 32 evaluator """rankbased""" +82 33 dataset """kinships""" +82 33 model """conve""" +82 33 loss """marginranking""" +82 33 regularizer """no""" +82 33 optimizer """adam""" +82 33 training_loop """owa""" +82 33 negative_sampler """basic""" +82 33 evaluator """rankbased""" +82 34 dataset """kinships""" +82 34 model """conve""" +82 34 loss """marginranking""" +82 34 regularizer """no""" +82 34 optimizer """adam""" +82 34 training_loop """owa""" +82 34 negative_sampler """basic""" +82 34 evaluator """rankbased""" +82 35 dataset """kinships""" +82 35 model """conve""" +82 35 loss """marginranking""" +82 35 regularizer """no""" +82 35 optimizer """adam""" +82 35 training_loop """owa""" +82 35 negative_sampler """basic""" +82 35 evaluator """rankbased""" +82 36 dataset """kinships""" +82 36 model """conve""" +82 36 loss """marginranking""" +82 36 regularizer """no""" +82 36 optimizer """adam""" +82 36 training_loop """owa""" +82 36 negative_sampler """basic""" +82 36 evaluator """rankbased""" +82 37 dataset """kinships""" +82 37 model """conve""" +82 37 loss """marginranking""" +82 37 regularizer """no""" +82 37 optimizer """adam""" +82 37 training_loop """owa""" +82 37 negative_sampler """basic""" +82 37 evaluator """rankbased""" +82 38 dataset """kinships""" +82 38 model """conve""" +82 38 loss """marginranking""" +82 38 regularizer """no""" +82 38 optimizer """adam""" +82 38 training_loop """owa""" +82 38 negative_sampler """basic""" +82 38 evaluator """rankbased""" +82 39 dataset """kinships""" +82 39 model """conve""" +82 39 loss """marginranking""" +82 39 regularizer """no""" +82 39 optimizer """adam""" +82 39 training_loop """owa""" +82 39 negative_sampler """basic""" +82 39 evaluator """rankbased""" +82 40 dataset """kinships""" +82 40 model """conve""" +82 40 loss """marginranking""" +82 40 regularizer """no""" +82 40 optimizer """adam""" +82 40 training_loop """owa""" +82 40 negative_sampler """basic""" +82 40 evaluator """rankbased""" +82 41 dataset """kinships""" +82 41 model """conve""" +82 41 loss """marginranking""" +82 41 regularizer """no""" +82 41 optimizer """adam""" +82 41 training_loop """owa""" +82 41 negative_sampler """basic""" +82 41 evaluator """rankbased""" +82 42 dataset """kinships""" +82 42 model """conve""" +82 42 loss """marginranking""" +82 42 regularizer """no""" +82 42 optimizer """adam""" +82 42 training_loop """owa""" +82 42 negative_sampler """basic""" +82 42 evaluator """rankbased""" +82 43 dataset """kinships""" +82 43 model """conve""" +82 43 loss """marginranking""" +82 43 regularizer """no""" +82 43 optimizer """adam""" +82 43 training_loop """owa""" +82 43 negative_sampler """basic""" +82 43 evaluator """rankbased""" +82 44 dataset """kinships""" +82 44 model """conve""" +82 44 loss """marginranking""" +82 44 regularizer """no""" +82 44 optimizer """adam""" +82 44 training_loop """owa""" +82 44 negative_sampler """basic""" +82 44 evaluator """rankbased""" +82 45 dataset """kinships""" +82 45 model """conve""" +82 45 loss """marginranking""" +82 45 regularizer """no""" +82 45 optimizer """adam""" +82 45 training_loop """owa""" +82 45 negative_sampler """basic""" +82 45 evaluator """rankbased""" +82 46 dataset """kinships""" +82 46 model """conve""" +82 46 loss """marginranking""" +82 46 regularizer """no""" +82 46 optimizer """adam""" +82 46 training_loop """owa""" +82 46 negative_sampler """basic""" +82 46 evaluator """rankbased""" +82 47 dataset """kinships""" +82 47 model """conve""" +82 47 loss """marginranking""" +82 47 regularizer """no""" +82 47 optimizer """adam""" +82 47 training_loop """owa""" +82 47 negative_sampler """basic""" +82 47 evaluator """rankbased""" +82 48 dataset """kinships""" +82 48 model """conve""" +82 48 loss """marginranking""" +82 48 regularizer """no""" +82 48 optimizer """adam""" +82 48 training_loop """owa""" +82 48 negative_sampler """basic""" +82 48 evaluator """rankbased""" +82 49 dataset """kinships""" +82 49 model """conve""" +82 49 loss """marginranking""" +82 49 regularizer """no""" +82 49 optimizer """adam""" +82 49 training_loop """owa""" +82 49 negative_sampler """basic""" +82 49 evaluator """rankbased""" +82 50 dataset """kinships""" +82 50 model """conve""" +82 50 loss """marginranking""" +82 50 regularizer """no""" +82 50 optimizer """adam""" +82 50 training_loop """owa""" +82 50 negative_sampler """basic""" +82 50 evaluator """rankbased""" +82 51 dataset """kinships""" +82 51 model """conve""" +82 51 loss """marginranking""" +82 51 regularizer """no""" +82 51 optimizer """adam""" +82 51 training_loop """owa""" +82 51 negative_sampler """basic""" +82 51 evaluator """rankbased""" +82 52 dataset """kinships""" +82 52 model """conve""" +82 52 loss """marginranking""" +82 52 regularizer """no""" +82 52 optimizer """adam""" +82 52 training_loop """owa""" +82 52 negative_sampler """basic""" +82 52 evaluator """rankbased""" +82 53 dataset """kinships""" +82 53 model """conve""" +82 53 loss """marginranking""" +82 53 regularizer """no""" +82 53 optimizer """adam""" +82 53 training_loop """owa""" +82 53 negative_sampler """basic""" +82 53 evaluator """rankbased""" +82 54 dataset """kinships""" +82 54 model """conve""" +82 54 loss """marginranking""" +82 54 regularizer """no""" +82 54 optimizer """adam""" +82 54 training_loop """owa""" +82 54 negative_sampler """basic""" +82 54 evaluator """rankbased""" +82 55 dataset """kinships""" +82 55 model """conve""" +82 55 loss """marginranking""" +82 55 regularizer """no""" +82 55 optimizer """adam""" +82 55 training_loop """owa""" +82 55 negative_sampler """basic""" +82 55 evaluator """rankbased""" +82 56 dataset """kinships""" +82 56 model """conve""" +82 56 loss """marginranking""" +82 56 regularizer """no""" +82 56 optimizer """adam""" +82 56 training_loop """owa""" +82 56 negative_sampler """basic""" +82 56 evaluator """rankbased""" +82 57 dataset """kinships""" +82 57 model """conve""" +82 57 loss """marginranking""" +82 57 regularizer """no""" +82 57 optimizer """adam""" +82 57 training_loop """owa""" +82 57 negative_sampler """basic""" +82 57 evaluator """rankbased""" +82 58 dataset """kinships""" +82 58 model """conve""" +82 58 loss """marginranking""" +82 58 regularizer """no""" +82 58 optimizer """adam""" +82 58 training_loop """owa""" +82 58 negative_sampler """basic""" +82 58 evaluator """rankbased""" +82 59 dataset """kinships""" +82 59 model """conve""" +82 59 loss """marginranking""" +82 59 regularizer """no""" +82 59 optimizer """adam""" +82 59 training_loop """owa""" +82 59 negative_sampler """basic""" +82 59 evaluator """rankbased""" +82 60 dataset """kinships""" +82 60 model """conve""" +82 60 loss """marginranking""" +82 60 regularizer """no""" +82 60 optimizer """adam""" +82 60 training_loop """owa""" +82 60 negative_sampler """basic""" +82 60 evaluator """rankbased""" +82 61 dataset """kinships""" +82 61 model """conve""" +82 61 loss """marginranking""" +82 61 regularizer """no""" +82 61 optimizer """adam""" +82 61 training_loop """owa""" +82 61 negative_sampler """basic""" +82 61 evaluator """rankbased""" +82 62 dataset """kinships""" +82 62 model """conve""" +82 62 loss """marginranking""" +82 62 regularizer """no""" +82 62 optimizer """adam""" +82 62 training_loop """owa""" +82 62 negative_sampler """basic""" +82 62 evaluator """rankbased""" +82 63 dataset """kinships""" +82 63 model """conve""" +82 63 loss """marginranking""" +82 63 regularizer """no""" +82 63 optimizer """adam""" +82 63 training_loop """owa""" +82 63 negative_sampler """basic""" +82 63 evaluator """rankbased""" +82 64 dataset """kinships""" +82 64 model """conve""" +82 64 loss """marginranking""" +82 64 regularizer """no""" +82 64 optimizer """adam""" +82 64 training_loop """owa""" +82 64 negative_sampler """basic""" +82 64 evaluator """rankbased""" +82 65 dataset """kinships""" +82 65 model """conve""" +82 65 loss """marginranking""" +82 65 regularizer """no""" +82 65 optimizer """adam""" +82 65 training_loop """owa""" +82 65 negative_sampler """basic""" +82 65 evaluator """rankbased""" +82 66 dataset """kinships""" +82 66 model """conve""" +82 66 loss """marginranking""" +82 66 regularizer """no""" +82 66 optimizer """adam""" +82 66 training_loop """owa""" +82 66 negative_sampler """basic""" +82 66 evaluator """rankbased""" +82 67 dataset """kinships""" +82 67 model """conve""" +82 67 loss """marginranking""" +82 67 regularizer """no""" +82 67 optimizer """adam""" +82 67 training_loop """owa""" +82 67 negative_sampler """basic""" +82 67 evaluator """rankbased""" +82 68 dataset """kinships""" +82 68 model """conve""" +82 68 loss """marginranking""" +82 68 regularizer """no""" +82 68 optimizer """adam""" +82 68 training_loop """owa""" +82 68 negative_sampler """basic""" +82 68 evaluator """rankbased""" +82 69 dataset """kinships""" +82 69 model """conve""" +82 69 loss """marginranking""" +82 69 regularizer """no""" +82 69 optimizer """adam""" +82 69 training_loop """owa""" +82 69 negative_sampler """basic""" +82 69 evaluator """rankbased""" +82 70 dataset """kinships""" +82 70 model """conve""" +82 70 loss """marginranking""" +82 70 regularizer """no""" +82 70 optimizer """adam""" +82 70 training_loop """owa""" +82 70 negative_sampler """basic""" +82 70 evaluator """rankbased""" +82 71 dataset """kinships""" +82 71 model """conve""" +82 71 loss """marginranking""" +82 71 regularizer """no""" +82 71 optimizer """adam""" +82 71 training_loop """owa""" +82 71 negative_sampler """basic""" +82 71 evaluator """rankbased""" +82 72 dataset """kinships""" +82 72 model """conve""" +82 72 loss """marginranking""" +82 72 regularizer """no""" +82 72 optimizer """adam""" +82 72 training_loop """owa""" +82 72 negative_sampler """basic""" +82 72 evaluator """rankbased""" +82 73 dataset """kinships""" +82 73 model """conve""" +82 73 loss """marginranking""" +82 73 regularizer """no""" +82 73 optimizer """adam""" +82 73 training_loop """owa""" +82 73 negative_sampler """basic""" +82 73 evaluator """rankbased""" +82 74 dataset """kinships""" +82 74 model """conve""" +82 74 loss """marginranking""" +82 74 regularizer """no""" +82 74 optimizer """adam""" +82 74 training_loop """owa""" +82 74 negative_sampler """basic""" +82 74 evaluator """rankbased""" +82 75 dataset """kinships""" +82 75 model """conve""" +82 75 loss """marginranking""" +82 75 regularizer """no""" +82 75 optimizer """adam""" +82 75 training_loop """owa""" +82 75 negative_sampler """basic""" +82 75 evaluator """rankbased""" +82 76 dataset """kinships""" +82 76 model """conve""" +82 76 loss """marginranking""" +82 76 regularizer """no""" +82 76 optimizer """adam""" +82 76 training_loop """owa""" +82 76 negative_sampler """basic""" +82 76 evaluator """rankbased""" +82 77 dataset """kinships""" +82 77 model """conve""" +82 77 loss """marginranking""" +82 77 regularizer """no""" +82 77 optimizer """adam""" +82 77 training_loop """owa""" +82 77 negative_sampler """basic""" +82 77 evaluator """rankbased""" +82 78 dataset """kinships""" +82 78 model """conve""" +82 78 loss """marginranking""" +82 78 regularizer """no""" +82 78 optimizer """adam""" +82 78 training_loop """owa""" +82 78 negative_sampler """basic""" +82 78 evaluator """rankbased""" +82 79 dataset """kinships""" +82 79 model """conve""" +82 79 loss """marginranking""" +82 79 regularizer """no""" +82 79 optimizer """adam""" +82 79 training_loop """owa""" +82 79 negative_sampler """basic""" +82 79 evaluator """rankbased""" +82 80 dataset """kinships""" +82 80 model """conve""" +82 80 loss """marginranking""" +82 80 regularizer """no""" +82 80 optimizer """adam""" +82 80 training_loop """owa""" +82 80 negative_sampler """basic""" +82 80 evaluator """rankbased""" +82 81 dataset """kinships""" +82 81 model """conve""" +82 81 loss """marginranking""" +82 81 regularizer """no""" +82 81 optimizer """adam""" +82 81 training_loop """owa""" +82 81 negative_sampler """basic""" +82 81 evaluator """rankbased""" +82 82 dataset """kinships""" +82 82 model """conve""" +82 82 loss """marginranking""" +82 82 regularizer """no""" +82 82 optimizer """adam""" +82 82 training_loop """owa""" +82 82 negative_sampler """basic""" +82 82 evaluator """rankbased""" +82 83 dataset """kinships""" +82 83 model """conve""" +82 83 loss """marginranking""" +82 83 regularizer """no""" +82 83 optimizer """adam""" +82 83 training_loop """owa""" +82 83 negative_sampler """basic""" +82 83 evaluator """rankbased""" +82 84 dataset """kinships""" +82 84 model """conve""" +82 84 loss """marginranking""" +82 84 regularizer """no""" +82 84 optimizer """adam""" +82 84 training_loop """owa""" +82 84 negative_sampler """basic""" +82 84 evaluator """rankbased""" +82 85 dataset """kinships""" +82 85 model """conve""" +82 85 loss """marginranking""" +82 85 regularizer """no""" +82 85 optimizer """adam""" +82 85 training_loop """owa""" +82 85 negative_sampler """basic""" +82 85 evaluator """rankbased""" +82 86 dataset """kinships""" +82 86 model """conve""" +82 86 loss """marginranking""" +82 86 regularizer """no""" +82 86 optimizer """adam""" +82 86 training_loop """owa""" +82 86 negative_sampler """basic""" +82 86 evaluator """rankbased""" +82 87 dataset """kinships""" +82 87 model """conve""" +82 87 loss """marginranking""" +82 87 regularizer """no""" +82 87 optimizer """adam""" +82 87 training_loop """owa""" +82 87 negative_sampler """basic""" +82 87 evaluator """rankbased""" +82 88 dataset """kinships""" +82 88 model """conve""" +82 88 loss """marginranking""" +82 88 regularizer """no""" +82 88 optimizer """adam""" +82 88 training_loop """owa""" +82 88 negative_sampler """basic""" +82 88 evaluator """rankbased""" +82 89 dataset """kinships""" +82 89 model """conve""" +82 89 loss """marginranking""" +82 89 regularizer """no""" +82 89 optimizer """adam""" +82 89 training_loop """owa""" +82 89 negative_sampler """basic""" +82 89 evaluator """rankbased""" +82 90 dataset """kinships""" +82 90 model """conve""" +82 90 loss """marginranking""" +82 90 regularizer """no""" +82 90 optimizer """adam""" +82 90 training_loop """owa""" +82 90 negative_sampler """basic""" +82 90 evaluator """rankbased""" +82 91 dataset """kinships""" +82 91 model """conve""" +82 91 loss """marginranking""" +82 91 regularizer """no""" +82 91 optimizer """adam""" +82 91 training_loop """owa""" +82 91 negative_sampler """basic""" +82 91 evaluator """rankbased""" +83 1 model.output_channels 60.0 +83 1 model.input_dropout 0.08204321709537626 +83 1 model.output_dropout 0.13145464827408254 +83 1 model.feature_map_dropout 0.4874269297155934 +83 1 model.embedding_dim 0.0 +83 1 loss.margin 2.7512194123211167 +83 1 optimizer.lr 0.014998184025252494 +83 1 negative_sampler.num_negs_per_pos 62.0 +83 1 training.batch_size 0.0 +83 2 model.output_channels 32.0 +83 2 model.input_dropout 0.02004840947577685 +83 2 model.output_dropout 0.23422068571324373 +83 2 model.feature_map_dropout 0.447216661071742 +83 2 model.embedding_dim 1.0 +83 2 loss.margin 6.221362708577724 +83 2 optimizer.lr 0.0016303696262429245 +83 2 negative_sampler.num_negs_per_pos 82.0 +83 2 training.batch_size 2.0 +83 3 model.output_channels 23.0 +83 3 model.input_dropout 0.007584633725843393 +83 3 model.output_dropout 0.15098543662532576 +83 3 model.feature_map_dropout 0.15087884732770757 +83 3 model.embedding_dim 2.0 +83 3 loss.margin 6.524175954559114 +83 3 optimizer.lr 0.0018357326291173217 +83 3 negative_sampler.num_negs_per_pos 72.0 +83 3 training.batch_size 2.0 +83 4 model.output_channels 38.0 +83 4 model.input_dropout 0.3922990062283856 +83 4 model.output_dropout 0.4005617224255178 +83 4 model.feature_map_dropout 0.3732478287062534 +83 4 model.embedding_dim 0.0 +83 4 loss.margin 5.1873543467587355 +83 4 optimizer.lr 0.039991194603497926 +83 4 negative_sampler.num_negs_per_pos 75.0 +83 4 training.batch_size 2.0 +83 5 model.output_channels 37.0 +83 5 model.input_dropout 0.3424292912355857 +83 5 model.output_dropout 0.46906745892994955 +83 5 model.feature_map_dropout 0.26380230973636615 +83 5 model.embedding_dim 1.0 +83 5 loss.margin 4.781722347548441 +83 5 optimizer.lr 0.035642034649716804 +83 5 negative_sampler.num_negs_per_pos 6.0 +83 5 training.batch_size 1.0 +83 6 model.output_channels 50.0 +83 6 model.input_dropout 0.2654658720194683 +83 6 model.output_dropout 0.4497295957088416 +83 6 model.feature_map_dropout 0.16710242676017173 +83 6 model.embedding_dim 0.0 +83 6 loss.margin 9.450521717218297 +83 6 optimizer.lr 0.015066785171635956 +83 6 negative_sampler.num_negs_per_pos 28.0 +83 6 training.batch_size 1.0 +83 7 model.output_channels 51.0 +83 7 model.input_dropout 0.20092905537300504 +83 7 model.output_dropout 0.015588680641234587 +83 7 model.feature_map_dropout 0.14365008620770897 +83 7 model.embedding_dim 2.0 +83 7 loss.margin 1.3459302987995785 +83 7 optimizer.lr 0.007800400198829454 +83 7 negative_sampler.num_negs_per_pos 37.0 +83 7 training.batch_size 2.0 +83 8 model.output_channels 59.0 +83 8 model.input_dropout 0.05891217548359767 +83 8 model.output_dropout 0.21227202863624967 +83 8 model.feature_map_dropout 0.2702453593213053 +83 8 model.embedding_dim 1.0 +83 8 loss.margin 9.34058314477442 +83 8 optimizer.lr 0.005895494586980272 +83 8 negative_sampler.num_negs_per_pos 49.0 +83 8 training.batch_size 1.0 +83 9 model.output_channels 50.0 +83 9 model.input_dropout 0.2717896447413492 +83 9 model.output_dropout 0.10090114402900058 +83 9 model.feature_map_dropout 0.11184403800255144 +83 9 model.embedding_dim 2.0 +83 9 loss.margin 0.5987684508564807 +83 9 optimizer.lr 0.07755003870215003 +83 9 negative_sampler.num_negs_per_pos 89.0 +83 9 training.batch_size 0.0 +83 10 model.output_channels 62.0 +83 10 model.input_dropout 0.024138178241398967 +83 10 model.output_dropout 0.29195077333382546 +83 10 model.feature_map_dropout 0.32973348612420456 +83 10 model.embedding_dim 2.0 +83 10 loss.margin 3.4603800692776256 +83 10 optimizer.lr 0.0024692391359319157 +83 10 negative_sampler.num_negs_per_pos 21.0 +83 10 training.batch_size 2.0 +83 11 model.output_channels 43.0 +83 11 model.input_dropout 0.4685743105374199 +83 11 model.output_dropout 0.4251497433095267 +83 11 model.feature_map_dropout 0.10422581912741558 +83 11 model.embedding_dim 0.0 +83 11 loss.margin 2.4282267554535957 +83 11 optimizer.lr 0.003099901366539332 +83 11 negative_sampler.num_negs_per_pos 24.0 +83 11 training.batch_size 1.0 +83 12 model.output_channels 60.0 +83 12 model.input_dropout 0.2622604735310421 +83 12 model.output_dropout 0.31671991427041823 +83 12 model.feature_map_dropout 0.172461598383074 +83 12 model.embedding_dim 0.0 +83 12 loss.margin 9.973005786939039 +83 12 optimizer.lr 0.01476683574793553 +83 12 negative_sampler.num_negs_per_pos 36.0 +83 12 training.batch_size 2.0 +83 13 model.output_channels 41.0 +83 13 model.input_dropout 0.3153770025208758 +83 13 model.output_dropout 0.3277269817191311 +83 13 model.feature_map_dropout 0.17318895175253385 +83 13 model.embedding_dim 1.0 +83 13 loss.margin 0.7933918764836947 +83 13 optimizer.lr 0.012122537319689123 +83 13 negative_sampler.num_negs_per_pos 92.0 +83 13 training.batch_size 2.0 +83 14 model.output_channels 16.0 +83 14 model.input_dropout 0.2808993491102093 +83 14 model.output_dropout 0.11314560369364995 +83 14 model.feature_map_dropout 0.03703204905636259 +83 14 model.embedding_dim 2.0 +83 14 loss.margin 0.9812159758445539 +83 14 optimizer.lr 0.031063004649283438 +83 14 negative_sampler.num_negs_per_pos 70.0 +83 14 training.batch_size 1.0 +83 15 model.output_channels 58.0 +83 15 model.input_dropout 0.11233886633478346 +83 15 model.output_dropout 0.2825502285838064 +83 15 model.feature_map_dropout 0.37917406219677097 +83 15 model.embedding_dim 1.0 +83 15 loss.margin 8.571305569389548 +83 15 optimizer.lr 0.008890542125008548 +83 15 negative_sampler.num_negs_per_pos 58.0 +83 15 training.batch_size 1.0 +83 16 model.output_channels 23.0 +83 16 model.input_dropout 0.15569942906348638 +83 16 model.output_dropout 0.1569025861322687 +83 16 model.feature_map_dropout 0.40001213961547905 +83 16 model.embedding_dim 1.0 +83 16 loss.margin 1.2563469867154387 +83 16 optimizer.lr 0.005322260321019629 +83 16 negative_sampler.num_negs_per_pos 32.0 +83 16 training.batch_size 0.0 +83 17 model.output_channels 61.0 +83 17 model.input_dropout 0.3445021632903255 +83 17 model.output_dropout 0.21540542705373417 +83 17 model.feature_map_dropout 0.4100715274090128 +83 17 model.embedding_dim 0.0 +83 17 loss.margin 2.2597400914154653 +83 17 optimizer.lr 0.001437165313394134 +83 17 negative_sampler.num_negs_per_pos 2.0 +83 17 training.batch_size 1.0 +83 18 model.output_channels 46.0 +83 18 model.input_dropout 0.41524901703305445 +83 18 model.output_dropout 0.3713758517357692 +83 18 model.feature_map_dropout 0.4162168736765953 +83 18 model.embedding_dim 0.0 +83 18 loss.margin 5.54918564473568 +83 18 optimizer.lr 0.00454728264892296 +83 18 negative_sampler.num_negs_per_pos 75.0 +83 18 training.batch_size 0.0 +83 19 model.output_channels 63.0 +83 19 model.input_dropout 0.4879479013775745 +83 19 model.output_dropout 0.2625571658621008 +83 19 model.feature_map_dropout 0.3667273906223717 +83 19 model.embedding_dim 1.0 +83 19 loss.margin 8.554079588763512 +83 19 optimizer.lr 0.08396633052258248 +83 19 negative_sampler.num_negs_per_pos 96.0 +83 19 training.batch_size 2.0 +83 20 model.output_channels 43.0 +83 20 model.input_dropout 0.08307950445159296 +83 20 model.output_dropout 0.2869087589839972 +83 20 model.feature_map_dropout 0.3028515020912795 +83 20 model.embedding_dim 1.0 +83 20 loss.margin 2.4904537074838915 +83 20 optimizer.lr 0.05271580102371824 +83 20 negative_sampler.num_negs_per_pos 29.0 +83 20 training.batch_size 1.0 +83 21 model.output_channels 50.0 +83 21 model.input_dropout 0.1409538928192079 +83 21 model.output_dropout 0.1316404454246055 +83 21 model.feature_map_dropout 0.3535509630224517 +83 21 model.embedding_dim 0.0 +83 21 loss.margin 9.749109159266762 +83 21 optimizer.lr 0.010543347728843964 +83 21 negative_sampler.num_negs_per_pos 23.0 +83 21 training.batch_size 0.0 +83 22 model.output_channels 20.0 +83 22 model.input_dropout 0.47616091386599163 +83 22 model.output_dropout 0.4182083300994559 +83 22 model.feature_map_dropout 0.024742408179890263 +83 22 model.embedding_dim 1.0 +83 22 loss.margin 6.0226864287325315 +83 22 optimizer.lr 0.05109834758620144 +83 22 negative_sampler.num_negs_per_pos 48.0 +83 22 training.batch_size 2.0 +83 23 model.output_channels 25.0 +83 23 model.input_dropout 0.04736624066632972 +83 23 model.output_dropout 0.4305181951683088 +83 23 model.feature_map_dropout 0.03552182043110125 +83 23 model.embedding_dim 2.0 +83 23 loss.margin 9.953528852254898 +83 23 optimizer.lr 0.0779960638006888 +83 23 negative_sampler.num_negs_per_pos 83.0 +83 23 training.batch_size 2.0 +83 24 model.output_channels 33.0 +83 24 model.input_dropout 0.4885307999037151 +83 24 model.output_dropout 0.4123676328972236 +83 24 model.feature_map_dropout 0.1646484089267261 +83 24 model.embedding_dim 2.0 +83 24 loss.margin 1.9428861776046844 +83 24 optimizer.lr 0.008088396911216239 +83 24 negative_sampler.num_negs_per_pos 12.0 +83 24 training.batch_size 0.0 +83 25 model.output_channels 34.0 +83 25 model.input_dropout 0.31438961270369065 +83 25 model.output_dropout 0.3130443184782804 +83 25 model.feature_map_dropout 0.010521686199887303 +83 25 model.embedding_dim 0.0 +83 25 loss.margin 2.94023788130898 +83 25 optimizer.lr 0.04541556158807924 +83 25 negative_sampler.num_negs_per_pos 68.0 +83 25 training.batch_size 1.0 +83 26 model.output_channels 40.0 +83 26 model.input_dropout 0.29589499640201833 +83 26 model.output_dropout 0.004620838957048012 +83 26 model.feature_map_dropout 0.015224354415105057 +83 26 model.embedding_dim 2.0 +83 26 loss.margin 7.264536353900288 +83 26 optimizer.lr 0.0021187729969215246 +83 26 negative_sampler.num_negs_per_pos 38.0 +83 26 training.batch_size 0.0 +83 27 model.output_channels 31.0 +83 27 model.input_dropout 0.3932542977919987 +83 27 model.output_dropout 0.039871926918003564 +83 27 model.feature_map_dropout 0.19932958819984664 +83 27 model.embedding_dim 2.0 +83 27 loss.margin 1.8461261699244629 +83 27 optimizer.lr 0.014389245825711255 +83 27 negative_sampler.num_negs_per_pos 23.0 +83 27 training.batch_size 2.0 +83 28 model.output_channels 51.0 +83 28 model.input_dropout 0.2950146234638968 +83 28 model.output_dropout 0.07065554514728356 +83 28 model.feature_map_dropout 0.06010856588829627 +83 28 model.embedding_dim 2.0 +83 28 loss.margin 3.029532961928962 +83 28 optimizer.lr 0.09384557414069093 +83 28 negative_sampler.num_negs_per_pos 60.0 +83 28 training.batch_size 2.0 +83 29 model.output_channels 39.0 +83 29 model.input_dropout 0.47964229010146703 +83 29 model.output_dropout 0.3457104127639498 +83 29 model.feature_map_dropout 0.057665894311953514 +83 29 model.embedding_dim 0.0 +83 29 loss.margin 9.92758350128094 +83 29 optimizer.lr 0.09686211381161108 +83 29 negative_sampler.num_negs_per_pos 14.0 +83 29 training.batch_size 1.0 +83 30 model.output_channels 25.0 +83 30 model.input_dropout 0.266954086345898 +83 30 model.output_dropout 0.3449849836471708 +83 30 model.feature_map_dropout 0.40368054338805137 +83 30 model.embedding_dim 1.0 +83 30 loss.margin 5.142473917222992 +83 30 optimizer.lr 0.0028374847100680104 +83 30 negative_sampler.num_negs_per_pos 28.0 +83 30 training.batch_size 1.0 +83 31 model.output_channels 26.0 +83 31 model.input_dropout 0.14508984946527437 +83 31 model.output_dropout 0.10715717234292976 +83 31 model.feature_map_dropout 0.07041542405172457 +83 31 model.embedding_dim 0.0 +83 31 loss.margin 6.976209562016087 +83 31 optimizer.lr 0.021884457530079213 +83 31 negative_sampler.num_negs_per_pos 83.0 +83 31 training.batch_size 2.0 +83 32 model.output_channels 63.0 +83 32 model.input_dropout 0.007434254539387064 +83 32 model.output_dropout 0.26631451364814773 +83 32 model.feature_map_dropout 0.41920912171487473 +83 32 model.embedding_dim 2.0 +83 32 loss.margin 7.332633694911541 +83 32 optimizer.lr 0.0029889229910814523 +83 32 negative_sampler.num_negs_per_pos 49.0 +83 32 training.batch_size 2.0 +83 33 model.output_channels 28.0 +83 33 model.input_dropout 0.30390361771114816 +83 33 model.output_dropout 0.20642116798217874 +83 33 model.feature_map_dropout 0.01092619197537853 +83 33 model.embedding_dim 1.0 +83 33 loss.margin 1.645618946019631 +83 33 optimizer.lr 0.033488067275712845 +83 33 negative_sampler.num_negs_per_pos 65.0 +83 33 training.batch_size 1.0 +83 34 model.output_channels 18.0 +83 34 model.input_dropout 0.229798676109156 +83 34 model.output_dropout 0.31369767995577663 +83 34 model.feature_map_dropout 0.056278763293391265 +83 34 model.embedding_dim 2.0 +83 34 loss.margin 0.7567914967150896 +83 34 optimizer.lr 0.025265042713757027 +83 34 negative_sampler.num_negs_per_pos 29.0 +83 34 training.batch_size 0.0 +83 35 model.output_channels 63.0 +83 35 model.input_dropout 0.2722409521133492 +83 35 model.output_dropout 0.39579379098255113 +83 35 model.feature_map_dropout 0.16453817054975134 +83 35 model.embedding_dim 1.0 +83 35 loss.margin 5.686222705016371 +83 35 optimizer.lr 0.009291565545985515 +83 35 negative_sampler.num_negs_per_pos 41.0 +83 35 training.batch_size 2.0 +83 36 model.output_channels 63.0 +83 36 model.input_dropout 0.46041730534462966 +83 36 model.output_dropout 0.20911147338661906 +83 36 model.feature_map_dropout 0.40927146037194123 +83 36 model.embedding_dim 0.0 +83 36 loss.margin 2.759690813749287 +83 36 optimizer.lr 0.007054652623868443 +83 36 negative_sampler.num_negs_per_pos 69.0 +83 36 training.batch_size 2.0 +83 37 model.output_channels 26.0 +83 37 model.input_dropout 0.2392418441815179 +83 37 model.output_dropout 0.10794893514240722 +83 37 model.feature_map_dropout 0.06514849879361334 +83 37 model.embedding_dim 0.0 +83 37 loss.margin 9.198793508673836 +83 37 optimizer.lr 0.009277127711982256 +83 37 negative_sampler.num_negs_per_pos 19.0 +83 37 training.batch_size 2.0 +83 38 model.output_channels 27.0 +83 38 model.input_dropout 0.2153013658234041 +83 38 model.output_dropout 0.1768910324586101 +83 38 model.feature_map_dropout 0.008768812079678057 +83 38 model.embedding_dim 1.0 +83 38 loss.margin 8.09157142174109 +83 38 optimizer.lr 0.01024353097135488 +83 38 negative_sampler.num_negs_per_pos 37.0 +83 38 training.batch_size 2.0 +83 39 model.output_channels 24.0 +83 39 model.input_dropout 0.13992443619979322 +83 39 model.output_dropout 0.3503666084455309 +83 39 model.feature_map_dropout 0.0784920709753012 +83 39 model.embedding_dim 1.0 +83 39 loss.margin 7.6497595444123485 +83 39 optimizer.lr 0.046113141109240106 +83 39 negative_sampler.num_negs_per_pos 12.0 +83 39 training.batch_size 2.0 +83 40 model.output_channels 24.0 +83 40 model.input_dropout 0.0005352637547366701 +83 40 model.output_dropout 0.2536878001223848 +83 40 model.feature_map_dropout 0.25132112450957284 +83 40 model.embedding_dim 0.0 +83 40 loss.margin 8.7665223307608 +83 40 optimizer.lr 0.0014683772254739898 +83 40 negative_sampler.num_negs_per_pos 65.0 +83 40 training.batch_size 2.0 +83 41 model.output_channels 34.0 +83 41 model.input_dropout 0.009824437463659907 +83 41 model.output_dropout 0.3807392419912529 +83 41 model.feature_map_dropout 0.28357314238874104 +83 41 model.embedding_dim 1.0 +83 41 loss.margin 9.16447621687066 +83 41 optimizer.lr 0.004165187746507396 +83 41 negative_sampler.num_negs_per_pos 1.0 +83 41 training.batch_size 1.0 +83 42 model.output_channels 26.0 +83 42 model.input_dropout 0.008212389141948495 +83 42 model.output_dropout 0.16499026363070018 +83 42 model.feature_map_dropout 0.2907516186697833 +83 42 model.embedding_dim 0.0 +83 42 loss.margin 3.7694920551705193 +83 42 optimizer.lr 0.0024728910944283836 +83 42 negative_sampler.num_negs_per_pos 41.0 +83 42 training.batch_size 0.0 +83 43 model.output_channels 33.0 +83 43 model.input_dropout 0.49713414018489577 +83 43 model.output_dropout 0.3371141709889822 +83 43 model.feature_map_dropout 0.37688356637241965 +83 43 model.embedding_dim 1.0 +83 43 loss.margin 1.2387755160740177 +83 43 optimizer.lr 0.09599530557708204 +83 43 negative_sampler.num_negs_per_pos 54.0 +83 43 training.batch_size 2.0 +83 44 model.output_channels 62.0 +83 44 model.input_dropout 0.422712249572994 +83 44 model.output_dropout 0.1040299177604122 +83 44 model.feature_map_dropout 0.2947116017405768 +83 44 model.embedding_dim 2.0 +83 44 loss.margin 4.478266529045317 +83 44 optimizer.lr 0.006486910733822156 +83 44 negative_sampler.num_negs_per_pos 92.0 +83 44 training.batch_size 0.0 +83 45 model.output_channels 58.0 +83 45 model.input_dropout 0.14159269359440274 +83 45 model.output_dropout 0.4767187429237153 +83 45 model.feature_map_dropout 0.31924130915197685 +83 45 model.embedding_dim 1.0 +83 45 loss.margin 1.4273143734113942 +83 45 optimizer.lr 0.006185741953811626 +83 45 negative_sampler.num_negs_per_pos 1.0 +83 45 training.batch_size 2.0 +83 46 model.output_channels 17.0 +83 46 model.input_dropout 0.20724514379068043 +83 46 model.output_dropout 0.3809436141059099 +83 46 model.feature_map_dropout 0.15970339705323006 +83 46 model.embedding_dim 1.0 +83 46 loss.margin 7.879153649399041 +83 46 optimizer.lr 0.042309439619774396 +83 46 negative_sampler.num_negs_per_pos 1.0 +83 46 training.batch_size 0.0 +83 47 model.output_channels 32.0 +83 47 model.input_dropout 0.136667906290025 +83 47 model.output_dropout 0.32278088368856006 +83 47 model.feature_map_dropout 0.17635701115067093 +83 47 model.embedding_dim 2.0 +83 47 loss.margin 2.3107476495910424 +83 47 optimizer.lr 0.002966350647723414 +83 47 negative_sampler.num_negs_per_pos 90.0 +83 47 training.batch_size 0.0 +83 48 model.output_channels 52.0 +83 48 model.input_dropout 0.11821529871769149 +83 48 model.output_dropout 0.36644999320479243 +83 48 model.feature_map_dropout 0.4209894024563693 +83 48 model.embedding_dim 2.0 +83 48 loss.margin 3.0533406603172977 +83 48 optimizer.lr 0.0019099384954452516 +83 48 negative_sampler.num_negs_per_pos 49.0 +83 48 training.batch_size 1.0 +83 49 model.output_channels 42.0 +83 49 model.input_dropout 0.4448390046720836 +83 49 model.output_dropout 0.2853841203448425 +83 49 model.feature_map_dropout 0.49221359864058656 +83 49 model.embedding_dim 0.0 +83 49 loss.margin 3.739845036703086 +83 49 optimizer.lr 0.003069158633089049 +83 49 negative_sampler.num_negs_per_pos 39.0 +83 49 training.batch_size 0.0 +83 50 model.output_channels 26.0 +83 50 model.input_dropout 0.4394061354373828 +83 50 model.output_dropout 0.4694824566725774 +83 50 model.feature_map_dropout 0.2226148500969965 +83 50 model.embedding_dim 0.0 +83 50 loss.margin 7.844384086596513 +83 50 optimizer.lr 0.001921205109296189 +83 50 negative_sampler.num_negs_per_pos 0.0 +83 50 training.batch_size 0.0 +83 51 model.output_channels 44.0 +83 51 model.input_dropout 0.47830557769323545 +83 51 model.output_dropout 0.4883170850834628 +83 51 model.feature_map_dropout 0.09492547273033297 +83 51 model.embedding_dim 2.0 +83 51 loss.margin 4.151086793674489 +83 51 optimizer.lr 0.0032760939907174606 +83 51 negative_sampler.num_negs_per_pos 24.0 +83 51 training.batch_size 2.0 +83 52 model.output_channels 18.0 +83 52 model.input_dropout 0.43197641815737536 +83 52 model.output_dropout 0.1449127641361746 +83 52 model.feature_map_dropout 0.0884192672636358 +83 52 model.embedding_dim 0.0 +83 52 loss.margin 7.447698311472238 +83 52 optimizer.lr 0.0012809020133551562 +83 52 negative_sampler.num_negs_per_pos 59.0 +83 52 training.batch_size 1.0 +83 53 model.output_channels 19.0 +83 53 model.input_dropout 0.4199683792044959 +83 53 model.output_dropout 0.48564600506616873 +83 53 model.feature_map_dropout 0.18797679252803395 +83 53 model.embedding_dim 1.0 +83 53 loss.margin 5.320738613221836 +83 53 optimizer.lr 0.0635299629976656 +83 53 negative_sampler.num_negs_per_pos 74.0 +83 53 training.batch_size 1.0 +83 54 model.output_channels 34.0 +83 54 model.input_dropout 0.05940546085788162 +83 54 model.output_dropout 0.02435508158755173 +83 54 model.feature_map_dropout 0.37954908760073913 +83 54 model.embedding_dim 1.0 +83 54 loss.margin 8.818478724953444 +83 54 optimizer.lr 0.018794664304984974 +83 54 negative_sampler.num_negs_per_pos 18.0 +83 54 training.batch_size 0.0 +83 55 model.output_channels 51.0 +83 55 model.input_dropout 0.2344220009551069 +83 55 model.output_dropout 0.04756361947414345 +83 55 model.feature_map_dropout 0.044317711124327086 +83 55 model.embedding_dim 2.0 +83 55 loss.margin 6.0108671066820945 +83 55 optimizer.lr 0.08563342402655623 +83 55 negative_sampler.num_negs_per_pos 17.0 +83 55 training.batch_size 1.0 +83 56 model.output_channels 60.0 +83 56 model.input_dropout 0.34775013146984984 +83 56 model.output_dropout 0.08071067790828013 +83 56 model.feature_map_dropout 0.2906424797036174 +83 56 model.embedding_dim 1.0 +83 56 loss.margin 6.265044881162113 +83 56 optimizer.lr 0.01431809642774164 +83 56 negative_sampler.num_negs_per_pos 19.0 +83 56 training.batch_size 1.0 +83 57 model.output_channels 25.0 +83 57 model.input_dropout 0.17944618764826942 +83 57 model.output_dropout 0.2602468037356441 +83 57 model.feature_map_dropout 0.14841198737149652 +83 57 model.embedding_dim 1.0 +83 57 loss.margin 7.196212597822248 +83 57 optimizer.lr 0.012376332048218424 +83 57 negative_sampler.num_negs_per_pos 31.0 +83 57 training.batch_size 2.0 +83 58 model.output_channels 23.0 +83 58 model.input_dropout 0.4930076150738633 +83 58 model.output_dropout 0.499970802101764 +83 58 model.feature_map_dropout 0.028107632144609207 +83 58 model.embedding_dim 2.0 +83 58 loss.margin 2.6646183492181232 +83 58 optimizer.lr 0.0012266132345975568 +83 58 negative_sampler.num_negs_per_pos 33.0 +83 58 training.batch_size 2.0 +83 59 model.output_channels 37.0 +83 59 model.input_dropout 0.21863626294509564 +83 59 model.output_dropout 0.23888793140580167 +83 59 model.feature_map_dropout 0.4252880089393107 +83 59 model.embedding_dim 1.0 +83 59 loss.margin 8.142620326693475 +83 59 optimizer.lr 0.013616930813606796 +83 59 negative_sampler.num_negs_per_pos 5.0 +83 59 training.batch_size 0.0 +83 60 model.output_channels 17.0 +83 60 model.input_dropout 0.1656759042136805 +83 60 model.output_dropout 0.13682533496993748 +83 60 model.feature_map_dropout 0.4355767585162253 +83 60 model.embedding_dim 2.0 +83 60 loss.margin 1.3367043748121954 +83 60 optimizer.lr 0.003700739951850812 +83 60 negative_sampler.num_negs_per_pos 89.0 +83 60 training.batch_size 0.0 +83 61 model.output_channels 45.0 +83 61 model.input_dropout 0.25479175913954244 +83 61 model.output_dropout 0.4283833104402713 +83 61 model.feature_map_dropout 0.1387511087557754 +83 61 model.embedding_dim 2.0 +83 61 loss.margin 1.6351044178702177 +83 61 optimizer.lr 0.03862559615964905 +83 61 negative_sampler.num_negs_per_pos 31.0 +83 61 training.batch_size 1.0 +83 62 model.output_channels 34.0 +83 62 model.input_dropout 0.18840073219188835 +83 62 model.output_dropout 0.23196469139369486 +83 62 model.feature_map_dropout 0.201152781030347 +83 62 model.embedding_dim 0.0 +83 62 loss.margin 2.1716270318074202 +83 62 optimizer.lr 0.005949071666771199 +83 62 negative_sampler.num_negs_per_pos 62.0 +83 62 training.batch_size 0.0 +83 63 model.output_channels 47.0 +83 63 model.input_dropout 0.31616254384591636 +83 63 model.output_dropout 0.3234644965747462 +83 63 model.feature_map_dropout 0.4078563470560895 +83 63 model.embedding_dim 1.0 +83 63 loss.margin 9.37689694322561 +83 63 optimizer.lr 0.0011949859521228747 +83 63 negative_sampler.num_negs_per_pos 99.0 +83 63 training.batch_size 0.0 +83 64 model.output_channels 21.0 +83 64 model.input_dropout 0.31239799795580275 +83 64 model.output_dropout 0.49835130921711956 +83 64 model.feature_map_dropout 0.02535648189730988 +83 64 model.embedding_dim 0.0 +83 64 loss.margin 9.295358933787593 +83 64 optimizer.lr 0.011271266451333228 +83 64 negative_sampler.num_negs_per_pos 60.0 +83 64 training.batch_size 1.0 +83 65 model.output_channels 51.0 +83 65 model.input_dropout 0.434295491678214 +83 65 model.output_dropout 0.09893931615717044 +83 65 model.feature_map_dropout 0.05660177304607911 +83 65 model.embedding_dim 0.0 +83 65 loss.margin 7.876928138497917 +83 65 optimizer.lr 0.004143309612240058 +83 65 negative_sampler.num_negs_per_pos 53.0 +83 65 training.batch_size 2.0 +83 66 model.output_channels 58.0 +83 66 model.input_dropout 0.23660272868628712 +83 66 model.output_dropout 0.05762628785812307 +83 66 model.feature_map_dropout 0.1757049021460842 +83 66 model.embedding_dim 1.0 +83 66 loss.margin 5.118743327460326 +83 66 optimizer.lr 0.0018969450646565626 +83 66 negative_sampler.num_negs_per_pos 10.0 +83 66 training.batch_size 2.0 +83 67 model.output_channels 57.0 +83 67 model.input_dropout 0.36567409606802104 +83 67 model.output_dropout 0.45707968921671616 +83 67 model.feature_map_dropout 0.11051056319787544 +83 67 model.embedding_dim 0.0 +83 67 loss.margin 1.6666846131947293 +83 67 optimizer.lr 0.002508207091276366 +83 67 negative_sampler.num_negs_per_pos 20.0 +83 67 training.batch_size 1.0 +83 68 model.output_channels 26.0 +83 68 model.input_dropout 0.3564136445440799 +83 68 model.output_dropout 0.10817961334915482 +83 68 model.feature_map_dropout 0.21322553650084908 +83 68 model.embedding_dim 2.0 +83 68 loss.margin 6.848814877141637 +83 68 optimizer.lr 0.011786386331233075 +83 68 negative_sampler.num_negs_per_pos 5.0 +83 68 training.batch_size 1.0 +83 69 model.output_channels 44.0 +83 69 model.input_dropout 0.2318486281325427 +83 69 model.output_dropout 0.2789482914286935 +83 69 model.feature_map_dropout 0.28289058283425295 +83 69 model.embedding_dim 1.0 +83 69 loss.margin 9.106660780833352 +83 69 optimizer.lr 0.011151207004922429 +83 69 negative_sampler.num_negs_per_pos 73.0 +83 69 training.batch_size 0.0 +83 70 model.output_channels 58.0 +83 70 model.input_dropout 0.2538153635827539 +83 70 model.output_dropout 0.12158445994941691 +83 70 model.feature_map_dropout 0.16763531342854748 +83 70 model.embedding_dim 0.0 +83 70 loss.margin 0.7259194589447808 +83 70 optimizer.lr 0.0029855028353357777 +83 70 negative_sampler.num_negs_per_pos 58.0 +83 70 training.batch_size 0.0 +83 71 model.output_channels 55.0 +83 71 model.input_dropout 0.11060586789023835 +83 71 model.output_dropout 0.49818672997997615 +83 71 model.feature_map_dropout 0.4114565846971244 +83 71 model.embedding_dim 0.0 +83 71 loss.margin 8.907783278371054 +83 71 optimizer.lr 0.011978373510440135 +83 71 negative_sampler.num_negs_per_pos 57.0 +83 71 training.batch_size 0.0 +83 72 model.output_channels 30.0 +83 72 model.input_dropout 0.22384179399976445 +83 72 model.output_dropout 0.20255473646886224 +83 72 model.feature_map_dropout 0.17545025086964106 +83 72 model.embedding_dim 0.0 +83 72 loss.margin 6.2529107275768645 +83 72 optimizer.lr 0.014228993500271656 +83 72 negative_sampler.num_negs_per_pos 80.0 +83 72 training.batch_size 1.0 +83 73 model.output_channels 37.0 +83 73 model.input_dropout 0.27345198170387514 +83 73 model.output_dropout 0.12453740224059467 +83 73 model.feature_map_dropout 0.339912283216464 +83 73 model.embedding_dim 2.0 +83 73 loss.margin 4.603181426497437 +83 73 optimizer.lr 0.018719057085562985 +83 73 negative_sampler.num_negs_per_pos 54.0 +83 73 training.batch_size 2.0 +83 74 model.output_channels 59.0 +83 74 model.input_dropout 0.2318021939765748 +83 74 model.output_dropout 0.22419658499952183 +83 74 model.feature_map_dropout 0.09623243206408288 +83 74 model.embedding_dim 0.0 +83 74 loss.margin 3.597639054589055 +83 74 optimizer.lr 0.03089300694458799 +83 74 negative_sampler.num_negs_per_pos 47.0 +83 74 training.batch_size 2.0 +83 75 model.output_channels 54.0 +83 75 model.input_dropout 0.2949419103048374 +83 75 model.output_dropout 0.2364888223061627 +83 75 model.feature_map_dropout 0.00016970331384835724 +83 75 model.embedding_dim 2.0 +83 75 loss.margin 7.62522476989629 +83 75 optimizer.lr 0.0012204471835546846 +83 75 negative_sampler.num_negs_per_pos 13.0 +83 75 training.batch_size 1.0 +83 76 model.output_channels 24.0 +83 76 model.input_dropout 0.15142602065857896 +83 76 model.output_dropout 0.21010975935392773 +83 76 model.feature_map_dropout 0.12109633137148956 +83 76 model.embedding_dim 1.0 +83 76 loss.margin 4.102106619254794 +83 76 optimizer.lr 0.005368063103623659 +83 76 negative_sampler.num_negs_per_pos 5.0 +83 76 training.batch_size 2.0 +83 77 model.output_channels 37.0 +83 77 model.input_dropout 0.2750357656637046 +83 77 model.output_dropout 0.31984488717187987 +83 77 model.feature_map_dropout 0.10302163105504042 +83 77 model.embedding_dim 0.0 +83 77 loss.margin 7.7853697464992315 +83 77 optimizer.lr 0.0018025742436727705 +83 77 negative_sampler.num_negs_per_pos 18.0 +83 77 training.batch_size 2.0 +83 78 model.output_channels 17.0 +83 78 model.input_dropout 0.08888709989597993 +83 78 model.output_dropout 0.1935906056780381 +83 78 model.feature_map_dropout 0.44514279302662746 +83 78 model.embedding_dim 0.0 +83 78 loss.margin 4.45431433704594 +83 78 optimizer.lr 0.006999914773523249 +83 78 negative_sampler.num_negs_per_pos 53.0 +83 78 training.batch_size 2.0 +83 79 model.output_channels 60.0 +83 79 model.input_dropout 0.23494821936960453 +83 79 model.output_dropout 0.24424057107023744 +83 79 model.feature_map_dropout 0.07950538055185674 +83 79 model.embedding_dim 1.0 +83 79 loss.margin 3.401959124156497 +83 79 optimizer.lr 0.07026891101339044 +83 79 negative_sampler.num_negs_per_pos 50.0 +83 79 training.batch_size 2.0 +83 80 model.output_channels 20.0 +83 80 model.input_dropout 0.26540124847648033 +83 80 model.output_dropout 0.443235862169143 +83 80 model.feature_map_dropout 0.4957166156328852 +83 80 model.embedding_dim 1.0 +83 80 loss.margin 9.28597924913244 +83 80 optimizer.lr 0.0086138686250399 +83 80 negative_sampler.num_negs_per_pos 88.0 +83 80 training.batch_size 0.0 +83 81 model.output_channels 35.0 +83 81 model.input_dropout 0.164571415946267 +83 81 model.output_dropout 0.1262526426552495 +83 81 model.feature_map_dropout 0.4571791771189537 +83 81 model.embedding_dim 1.0 +83 81 loss.margin 3.7811266163313455 +83 81 optimizer.lr 0.015647683977922627 +83 81 negative_sampler.num_negs_per_pos 99.0 +83 81 training.batch_size 2.0 +83 82 model.output_channels 49.0 +83 82 model.input_dropout 0.13473426652154435 +83 82 model.output_dropout 0.15616509630150283 +83 82 model.feature_map_dropout 0.09541778767007425 +83 82 model.embedding_dim 2.0 +83 82 loss.margin 8.441454177247026 +83 82 optimizer.lr 0.02627835284812642 +83 82 negative_sampler.num_negs_per_pos 37.0 +83 82 training.batch_size 0.0 +83 83 model.output_channels 40.0 +83 83 model.input_dropout 0.269598306915936 +83 83 model.output_dropout 0.025917546322944518 +83 83 model.feature_map_dropout 0.06855015701370704 +83 83 model.embedding_dim 2.0 +83 83 loss.margin 5.77276249162456 +83 83 optimizer.lr 0.029643109330248957 +83 83 negative_sampler.num_negs_per_pos 83.0 +83 83 training.batch_size 1.0 +83 84 model.output_channels 42.0 +83 84 model.input_dropout 0.1300983263023252 +83 84 model.output_dropout 0.3457516445482368 +83 84 model.feature_map_dropout 0.2700295215813126 +83 84 model.embedding_dim 1.0 +83 84 loss.margin 7.164743963325046 +83 84 optimizer.lr 0.006655960917000728 +83 84 negative_sampler.num_negs_per_pos 57.0 +83 84 training.batch_size 1.0 +83 85 model.output_channels 29.0 +83 85 model.input_dropout 0.07681936354603924 +83 85 model.output_dropout 0.4139215873790082 +83 85 model.feature_map_dropout 0.24029535889995352 +83 85 model.embedding_dim 2.0 +83 85 loss.margin 7.571738552346087 +83 85 optimizer.lr 0.025419678833187333 +83 85 negative_sampler.num_negs_per_pos 29.0 +83 85 training.batch_size 1.0 +83 86 model.output_channels 36.0 +83 86 model.input_dropout 0.019336839586908128 +83 86 model.output_dropout 0.22798963993842725 +83 86 model.feature_map_dropout 0.36761247962097926 +83 86 model.embedding_dim 1.0 +83 86 loss.margin 3.012970263582492 +83 86 optimizer.lr 0.00937541056122108 +83 86 negative_sampler.num_negs_per_pos 26.0 +83 86 training.batch_size 0.0 +83 87 model.output_channels 45.0 +83 87 model.input_dropout 0.1846182116489875 +83 87 model.output_dropout 0.18457548905247212 +83 87 model.feature_map_dropout 0.07019069358533159 +83 87 model.embedding_dim 0.0 +83 87 loss.margin 3.186279096570287 +83 87 optimizer.lr 0.0037649236255889915 +83 87 negative_sampler.num_negs_per_pos 29.0 +83 87 training.batch_size 0.0 +83 88 model.output_channels 16.0 +83 88 model.input_dropout 0.16900579898974638 +83 88 model.output_dropout 0.28376139534861716 +83 88 model.feature_map_dropout 0.3502282318343605 +83 88 model.embedding_dim 1.0 +83 88 loss.margin 7.770415802748656 +83 88 optimizer.lr 0.010098378619434552 +83 88 negative_sampler.num_negs_per_pos 26.0 +83 88 training.batch_size 2.0 +83 89 model.output_channels 42.0 +83 89 model.input_dropout 0.24997916940186465 +83 89 model.output_dropout 0.39123512120510434 +83 89 model.feature_map_dropout 0.4934145216026298 +83 89 model.embedding_dim 1.0 +83 89 loss.margin 7.798308395508424 +83 89 optimizer.lr 0.09523770934826466 +83 89 negative_sampler.num_negs_per_pos 88.0 +83 89 training.batch_size 0.0 +83 90 model.output_channels 43.0 +83 90 model.input_dropout 0.27865210609862834 +83 90 model.output_dropout 0.28435494362785463 +83 90 model.feature_map_dropout 0.18131358750490228 +83 90 model.embedding_dim 1.0 +83 90 loss.margin 4.505056163240451 +83 90 optimizer.lr 0.04302791072279426 +83 90 negative_sampler.num_negs_per_pos 35.0 +83 90 training.batch_size 0.0 +83 91 model.output_channels 64.0 +83 91 model.input_dropout 0.13402515006802895 +83 91 model.output_dropout 0.3530023851510686 +83 91 model.feature_map_dropout 0.38781549409296373 +83 91 model.embedding_dim 1.0 +83 91 loss.margin 3.7775823591733193 +83 91 optimizer.lr 0.009394409503006431 +83 91 negative_sampler.num_negs_per_pos 48.0 +83 91 training.batch_size 2.0 +83 92 model.output_channels 18.0 +83 92 model.input_dropout 0.4918366546647444 +83 92 model.output_dropout 0.05980503461047676 +83 92 model.feature_map_dropout 0.3356533569282996 +83 92 model.embedding_dim 2.0 +83 92 loss.margin 4.162952381345097 +83 92 optimizer.lr 0.012781913782110799 +83 92 negative_sampler.num_negs_per_pos 60.0 +83 92 training.batch_size 1.0 +83 93 model.output_channels 47.0 +83 93 model.input_dropout 0.43287817655700794 +83 93 model.output_dropout 0.3911090796143754 +83 93 model.feature_map_dropout 0.06063952920058202 +83 93 model.embedding_dim 1.0 +83 93 loss.margin 1.1406560002034916 +83 93 optimizer.lr 0.014056325751623239 +83 93 negative_sampler.num_negs_per_pos 6.0 +83 93 training.batch_size 2.0 +83 94 model.output_channels 52.0 +83 94 model.input_dropout 0.2379932333738657 +83 94 model.output_dropout 0.058474888826349714 +83 94 model.feature_map_dropout 0.21478765094806973 +83 94 model.embedding_dim 1.0 +83 94 loss.margin 4.612734334185744 +83 94 optimizer.lr 0.02706354781266922 +83 94 negative_sampler.num_negs_per_pos 77.0 +83 94 training.batch_size 0.0 +83 95 model.output_channels 33.0 +83 95 model.input_dropout 0.4194604250291468 +83 95 model.output_dropout 0.10317793230619682 +83 95 model.feature_map_dropout 0.18033415557663013 +83 95 model.embedding_dim 2.0 +83 95 loss.margin 7.623648381725418 +83 95 optimizer.lr 0.01058822129789529 +83 95 negative_sampler.num_negs_per_pos 84.0 +83 95 training.batch_size 2.0 +83 96 model.output_channels 20.0 +83 96 model.input_dropout 0.31598299092435495 +83 96 model.output_dropout 0.17565465299086014 +83 96 model.feature_map_dropout 0.47743306969385496 +83 96 model.embedding_dim 1.0 +83 96 loss.margin 8.607215223187936 +83 96 optimizer.lr 0.005272188168282127 +83 96 negative_sampler.num_negs_per_pos 73.0 +83 96 training.batch_size 1.0 +83 97 model.output_channels 33.0 +83 97 model.input_dropout 0.2565604564875775 +83 97 model.output_dropout 0.25191770774210426 +83 97 model.feature_map_dropout 0.4432439526825137 +83 97 model.embedding_dim 1.0 +83 97 loss.margin 3.9503056481609313 +83 97 optimizer.lr 0.08817767088127858 +83 97 negative_sampler.num_negs_per_pos 57.0 +83 97 training.batch_size 1.0 +83 98 model.output_channels 30.0 +83 98 model.input_dropout 0.08826547795549106 +83 98 model.output_dropout 0.350396619569797 +83 98 model.feature_map_dropout 0.43835079295847873 +83 98 model.embedding_dim 0.0 +83 98 loss.margin 8.698075722040596 +83 98 optimizer.lr 0.0779721143439301 +83 98 negative_sampler.num_negs_per_pos 66.0 +83 98 training.batch_size 1.0 +83 99 model.output_channels 34.0 +83 99 model.input_dropout 0.018145342653325014 +83 99 model.output_dropout 0.38809540050393815 +83 99 model.feature_map_dropout 0.37389410653580374 +83 99 model.embedding_dim 1.0 +83 99 loss.margin 1.5436988277891532 +83 99 optimizer.lr 0.0032856989374618678 +83 99 negative_sampler.num_negs_per_pos 13.0 +83 99 training.batch_size 1.0 +83 100 model.output_channels 40.0 +83 100 model.input_dropout 0.32277749938062855 +83 100 model.output_dropout 0.17203845041530125 +83 100 model.feature_map_dropout 0.4204315595209095 +83 100 model.embedding_dim 2.0 +83 100 loss.margin 9.915546463976645 +83 100 optimizer.lr 0.0075523744661353046 +83 100 negative_sampler.num_negs_per_pos 92.0 +83 100 training.batch_size 0.0 +83 1 dataset """kinships""" +83 1 model """conve""" +83 1 loss """marginranking""" +83 1 regularizer """no""" +83 1 optimizer """adam""" +83 1 training_loop """owa""" +83 1 negative_sampler """basic""" +83 1 evaluator """rankbased""" +83 2 dataset """kinships""" +83 2 model """conve""" +83 2 loss """marginranking""" +83 2 regularizer """no""" +83 2 optimizer """adam""" +83 2 training_loop """owa""" +83 2 negative_sampler """basic""" +83 2 evaluator """rankbased""" +83 3 dataset """kinships""" +83 3 model """conve""" +83 3 loss """marginranking""" +83 3 regularizer """no""" +83 3 optimizer """adam""" +83 3 training_loop """owa""" +83 3 negative_sampler """basic""" +83 3 evaluator """rankbased""" +83 4 dataset """kinships""" +83 4 model """conve""" +83 4 loss """marginranking""" +83 4 regularizer """no""" +83 4 optimizer """adam""" +83 4 training_loop """owa""" +83 4 negative_sampler """basic""" +83 4 evaluator """rankbased""" +83 5 dataset """kinships""" +83 5 model """conve""" +83 5 loss """marginranking""" +83 5 regularizer """no""" +83 5 optimizer """adam""" +83 5 training_loop """owa""" +83 5 negative_sampler """basic""" +83 5 evaluator """rankbased""" +83 6 dataset """kinships""" +83 6 model """conve""" +83 6 loss """marginranking""" +83 6 regularizer """no""" +83 6 optimizer """adam""" +83 6 training_loop """owa""" +83 6 negative_sampler """basic""" +83 6 evaluator """rankbased""" +83 7 dataset """kinships""" +83 7 model """conve""" +83 7 loss """marginranking""" +83 7 regularizer """no""" +83 7 optimizer """adam""" +83 7 training_loop """owa""" +83 7 negative_sampler """basic""" +83 7 evaluator """rankbased""" +83 8 dataset """kinships""" +83 8 model """conve""" +83 8 loss """marginranking""" +83 8 regularizer """no""" +83 8 optimizer """adam""" +83 8 training_loop """owa""" +83 8 negative_sampler """basic""" +83 8 evaluator """rankbased""" +83 9 dataset """kinships""" +83 9 model """conve""" +83 9 loss """marginranking""" +83 9 regularizer """no""" +83 9 optimizer """adam""" +83 9 training_loop """owa""" +83 9 negative_sampler """basic""" +83 9 evaluator """rankbased""" +83 10 dataset """kinships""" +83 10 model """conve""" +83 10 loss """marginranking""" +83 10 regularizer """no""" +83 10 optimizer """adam""" +83 10 training_loop """owa""" +83 10 negative_sampler """basic""" +83 10 evaluator """rankbased""" +83 11 dataset """kinships""" +83 11 model """conve""" +83 11 loss """marginranking""" +83 11 regularizer """no""" +83 11 optimizer """adam""" +83 11 training_loop """owa""" +83 11 negative_sampler """basic""" +83 11 evaluator """rankbased""" +83 12 dataset """kinships""" +83 12 model """conve""" +83 12 loss """marginranking""" +83 12 regularizer """no""" +83 12 optimizer """adam""" +83 12 training_loop """owa""" +83 12 negative_sampler """basic""" +83 12 evaluator """rankbased""" +83 13 dataset """kinships""" +83 13 model """conve""" +83 13 loss """marginranking""" +83 13 regularizer """no""" +83 13 optimizer """adam""" +83 13 training_loop """owa""" +83 13 negative_sampler """basic""" +83 13 evaluator """rankbased""" +83 14 dataset """kinships""" +83 14 model """conve""" +83 14 loss """marginranking""" +83 14 regularizer """no""" +83 14 optimizer """adam""" +83 14 training_loop """owa""" +83 14 negative_sampler """basic""" +83 14 evaluator """rankbased""" +83 15 dataset """kinships""" +83 15 model """conve""" +83 15 loss """marginranking""" +83 15 regularizer """no""" +83 15 optimizer """adam""" +83 15 training_loop """owa""" +83 15 negative_sampler """basic""" +83 15 evaluator """rankbased""" +83 16 dataset """kinships""" +83 16 model """conve""" +83 16 loss """marginranking""" +83 16 regularizer """no""" +83 16 optimizer """adam""" +83 16 training_loop """owa""" +83 16 negative_sampler """basic""" +83 16 evaluator """rankbased""" +83 17 dataset """kinships""" +83 17 model """conve""" +83 17 loss """marginranking""" +83 17 regularizer """no""" +83 17 optimizer """adam""" +83 17 training_loop """owa""" +83 17 negative_sampler """basic""" +83 17 evaluator """rankbased""" +83 18 dataset """kinships""" +83 18 model """conve""" +83 18 loss """marginranking""" +83 18 regularizer """no""" +83 18 optimizer """adam""" +83 18 training_loop """owa""" +83 18 negative_sampler """basic""" +83 18 evaluator """rankbased""" +83 19 dataset """kinships""" +83 19 model """conve""" +83 19 loss """marginranking""" +83 19 regularizer """no""" +83 19 optimizer """adam""" +83 19 training_loop """owa""" +83 19 negative_sampler """basic""" +83 19 evaluator """rankbased""" +83 20 dataset """kinships""" +83 20 model """conve""" +83 20 loss """marginranking""" +83 20 regularizer """no""" +83 20 optimizer """adam""" +83 20 training_loop """owa""" +83 20 negative_sampler """basic""" +83 20 evaluator """rankbased""" +83 21 dataset """kinships""" +83 21 model """conve""" +83 21 loss """marginranking""" +83 21 regularizer """no""" +83 21 optimizer """adam""" +83 21 training_loop """owa""" +83 21 negative_sampler """basic""" +83 21 evaluator """rankbased""" +83 22 dataset """kinships""" +83 22 model """conve""" +83 22 loss """marginranking""" +83 22 regularizer """no""" +83 22 optimizer """adam""" +83 22 training_loop """owa""" +83 22 negative_sampler """basic""" +83 22 evaluator """rankbased""" +83 23 dataset """kinships""" +83 23 model """conve""" +83 23 loss """marginranking""" +83 23 regularizer """no""" +83 23 optimizer """adam""" +83 23 training_loop """owa""" +83 23 negative_sampler """basic""" +83 23 evaluator """rankbased""" +83 24 dataset """kinships""" +83 24 model """conve""" +83 24 loss """marginranking""" +83 24 regularizer """no""" +83 24 optimizer """adam""" +83 24 training_loop """owa""" +83 24 negative_sampler """basic""" +83 24 evaluator """rankbased""" +83 25 dataset """kinships""" +83 25 model """conve""" +83 25 loss """marginranking""" +83 25 regularizer """no""" +83 25 optimizer """adam""" +83 25 training_loop """owa""" +83 25 negative_sampler """basic""" +83 25 evaluator """rankbased""" +83 26 dataset """kinships""" +83 26 model """conve""" +83 26 loss """marginranking""" +83 26 regularizer """no""" +83 26 optimizer """adam""" +83 26 training_loop """owa""" +83 26 negative_sampler """basic""" +83 26 evaluator """rankbased""" +83 27 dataset """kinships""" +83 27 model """conve""" +83 27 loss """marginranking""" +83 27 regularizer """no""" +83 27 optimizer """adam""" +83 27 training_loop """owa""" +83 27 negative_sampler """basic""" +83 27 evaluator """rankbased""" +83 28 dataset """kinships""" +83 28 model """conve""" +83 28 loss """marginranking""" +83 28 regularizer """no""" +83 28 optimizer """adam""" +83 28 training_loop """owa""" +83 28 negative_sampler """basic""" +83 28 evaluator """rankbased""" +83 29 dataset """kinships""" +83 29 model """conve""" +83 29 loss """marginranking""" +83 29 regularizer """no""" +83 29 optimizer """adam""" +83 29 training_loop """owa""" +83 29 negative_sampler """basic""" +83 29 evaluator """rankbased""" +83 30 dataset """kinships""" +83 30 model """conve""" +83 30 loss """marginranking""" +83 30 regularizer """no""" +83 30 optimizer """adam""" +83 30 training_loop """owa""" +83 30 negative_sampler """basic""" +83 30 evaluator """rankbased""" +83 31 dataset """kinships""" +83 31 model """conve""" +83 31 loss """marginranking""" +83 31 regularizer """no""" +83 31 optimizer """adam""" +83 31 training_loop """owa""" +83 31 negative_sampler """basic""" +83 31 evaluator """rankbased""" +83 32 dataset """kinships""" +83 32 model """conve""" +83 32 loss """marginranking""" +83 32 regularizer """no""" +83 32 optimizer """adam""" +83 32 training_loop """owa""" +83 32 negative_sampler """basic""" +83 32 evaluator """rankbased""" +83 33 dataset """kinships""" +83 33 model """conve""" +83 33 loss """marginranking""" +83 33 regularizer """no""" +83 33 optimizer """adam""" +83 33 training_loop """owa""" +83 33 negative_sampler """basic""" +83 33 evaluator """rankbased""" +83 34 dataset """kinships""" +83 34 model """conve""" +83 34 loss """marginranking""" +83 34 regularizer """no""" +83 34 optimizer """adam""" +83 34 training_loop """owa""" +83 34 negative_sampler """basic""" +83 34 evaluator """rankbased""" +83 35 dataset """kinships""" +83 35 model """conve""" +83 35 loss """marginranking""" +83 35 regularizer """no""" +83 35 optimizer """adam""" +83 35 training_loop """owa""" +83 35 negative_sampler """basic""" +83 35 evaluator """rankbased""" +83 36 dataset """kinships""" +83 36 model """conve""" +83 36 loss """marginranking""" +83 36 regularizer """no""" +83 36 optimizer """adam""" +83 36 training_loop """owa""" +83 36 negative_sampler """basic""" +83 36 evaluator """rankbased""" +83 37 dataset """kinships""" +83 37 model """conve""" +83 37 loss """marginranking""" +83 37 regularizer """no""" +83 37 optimizer """adam""" +83 37 training_loop """owa""" +83 37 negative_sampler """basic""" +83 37 evaluator """rankbased""" +83 38 dataset """kinships""" +83 38 model """conve""" +83 38 loss """marginranking""" +83 38 regularizer """no""" +83 38 optimizer """adam""" +83 38 training_loop """owa""" +83 38 negative_sampler """basic""" +83 38 evaluator """rankbased""" +83 39 dataset """kinships""" +83 39 model """conve""" +83 39 loss """marginranking""" +83 39 regularizer """no""" +83 39 optimizer """adam""" +83 39 training_loop """owa""" +83 39 negative_sampler """basic""" +83 39 evaluator """rankbased""" +83 40 dataset """kinships""" +83 40 model """conve""" +83 40 loss """marginranking""" +83 40 regularizer """no""" +83 40 optimizer """adam""" +83 40 training_loop """owa""" +83 40 negative_sampler """basic""" +83 40 evaluator """rankbased""" +83 41 dataset """kinships""" +83 41 model """conve""" +83 41 loss """marginranking""" +83 41 regularizer """no""" +83 41 optimizer """adam""" +83 41 training_loop """owa""" +83 41 negative_sampler """basic""" +83 41 evaluator """rankbased""" +83 42 dataset """kinships""" +83 42 model """conve""" +83 42 loss """marginranking""" +83 42 regularizer """no""" +83 42 optimizer """adam""" +83 42 training_loop """owa""" +83 42 negative_sampler """basic""" +83 42 evaluator """rankbased""" +83 43 dataset """kinships""" +83 43 model """conve""" +83 43 loss """marginranking""" +83 43 regularizer """no""" +83 43 optimizer """adam""" +83 43 training_loop """owa""" +83 43 negative_sampler """basic""" +83 43 evaluator """rankbased""" +83 44 dataset """kinships""" +83 44 model """conve""" +83 44 loss """marginranking""" +83 44 regularizer """no""" +83 44 optimizer """adam""" +83 44 training_loop """owa""" +83 44 negative_sampler """basic""" +83 44 evaluator """rankbased""" +83 45 dataset """kinships""" +83 45 model """conve""" +83 45 loss """marginranking""" +83 45 regularizer """no""" +83 45 optimizer """adam""" +83 45 training_loop """owa""" +83 45 negative_sampler """basic""" +83 45 evaluator """rankbased""" +83 46 dataset """kinships""" +83 46 model """conve""" +83 46 loss """marginranking""" +83 46 regularizer """no""" +83 46 optimizer """adam""" +83 46 training_loop """owa""" +83 46 negative_sampler """basic""" +83 46 evaluator """rankbased""" +83 47 dataset """kinships""" +83 47 model """conve""" +83 47 loss """marginranking""" +83 47 regularizer """no""" +83 47 optimizer """adam""" +83 47 training_loop """owa""" +83 47 negative_sampler """basic""" +83 47 evaluator """rankbased""" +83 48 dataset """kinships""" +83 48 model """conve""" +83 48 loss """marginranking""" +83 48 regularizer """no""" +83 48 optimizer """adam""" +83 48 training_loop """owa""" +83 48 negative_sampler """basic""" +83 48 evaluator """rankbased""" +83 49 dataset """kinships""" +83 49 model """conve""" +83 49 loss """marginranking""" +83 49 regularizer """no""" +83 49 optimizer """adam""" +83 49 training_loop """owa""" +83 49 negative_sampler """basic""" +83 49 evaluator """rankbased""" +83 50 dataset """kinships""" +83 50 model """conve""" +83 50 loss """marginranking""" +83 50 regularizer """no""" +83 50 optimizer """adam""" +83 50 training_loop """owa""" +83 50 negative_sampler """basic""" +83 50 evaluator """rankbased""" +83 51 dataset """kinships""" +83 51 model """conve""" +83 51 loss """marginranking""" +83 51 regularizer """no""" +83 51 optimizer """adam""" +83 51 training_loop """owa""" +83 51 negative_sampler """basic""" +83 51 evaluator """rankbased""" +83 52 dataset """kinships""" +83 52 model """conve""" +83 52 loss """marginranking""" +83 52 regularizer """no""" +83 52 optimizer """adam""" +83 52 training_loop """owa""" +83 52 negative_sampler """basic""" +83 52 evaluator """rankbased""" +83 53 dataset """kinships""" +83 53 model """conve""" +83 53 loss """marginranking""" +83 53 regularizer """no""" +83 53 optimizer """adam""" +83 53 training_loop """owa""" +83 53 negative_sampler """basic""" +83 53 evaluator """rankbased""" +83 54 dataset """kinships""" +83 54 model """conve""" +83 54 loss """marginranking""" +83 54 regularizer """no""" +83 54 optimizer """adam""" +83 54 training_loop """owa""" +83 54 negative_sampler """basic""" +83 54 evaluator """rankbased""" +83 55 dataset """kinships""" +83 55 model """conve""" +83 55 loss """marginranking""" +83 55 regularizer """no""" +83 55 optimizer """adam""" +83 55 training_loop """owa""" +83 55 negative_sampler """basic""" +83 55 evaluator """rankbased""" +83 56 dataset """kinships""" +83 56 model """conve""" +83 56 loss """marginranking""" +83 56 regularizer """no""" +83 56 optimizer """adam""" +83 56 training_loop """owa""" +83 56 negative_sampler """basic""" +83 56 evaluator """rankbased""" +83 57 dataset """kinships""" +83 57 model """conve""" +83 57 loss """marginranking""" +83 57 regularizer """no""" +83 57 optimizer """adam""" +83 57 training_loop """owa""" +83 57 negative_sampler """basic""" +83 57 evaluator """rankbased""" +83 58 dataset """kinships""" +83 58 model """conve""" +83 58 loss """marginranking""" +83 58 regularizer """no""" +83 58 optimizer """adam""" +83 58 training_loop """owa""" +83 58 negative_sampler """basic""" +83 58 evaluator """rankbased""" +83 59 dataset """kinships""" +83 59 model """conve""" +83 59 loss """marginranking""" +83 59 regularizer """no""" +83 59 optimizer """adam""" +83 59 training_loop """owa""" +83 59 negative_sampler """basic""" +83 59 evaluator """rankbased""" +83 60 dataset """kinships""" +83 60 model """conve""" +83 60 loss """marginranking""" +83 60 regularizer """no""" +83 60 optimizer """adam""" +83 60 training_loop """owa""" +83 60 negative_sampler """basic""" +83 60 evaluator """rankbased""" +83 61 dataset """kinships""" +83 61 model """conve""" +83 61 loss """marginranking""" +83 61 regularizer """no""" +83 61 optimizer """adam""" +83 61 training_loop """owa""" +83 61 negative_sampler """basic""" +83 61 evaluator """rankbased""" +83 62 dataset """kinships""" +83 62 model """conve""" +83 62 loss """marginranking""" +83 62 regularizer """no""" +83 62 optimizer """adam""" +83 62 training_loop """owa""" +83 62 negative_sampler """basic""" +83 62 evaluator """rankbased""" +83 63 dataset """kinships""" +83 63 model """conve""" +83 63 loss """marginranking""" +83 63 regularizer """no""" +83 63 optimizer """adam""" +83 63 training_loop """owa""" +83 63 negative_sampler """basic""" +83 63 evaluator """rankbased""" +83 64 dataset """kinships""" +83 64 model """conve""" +83 64 loss """marginranking""" +83 64 regularizer """no""" +83 64 optimizer """adam""" +83 64 training_loop """owa""" +83 64 negative_sampler """basic""" +83 64 evaluator """rankbased""" +83 65 dataset """kinships""" +83 65 model """conve""" +83 65 loss """marginranking""" +83 65 regularizer """no""" +83 65 optimizer """adam""" +83 65 training_loop """owa""" +83 65 negative_sampler """basic""" +83 65 evaluator """rankbased""" +83 66 dataset """kinships""" +83 66 model """conve""" +83 66 loss """marginranking""" +83 66 regularizer """no""" +83 66 optimizer """adam""" +83 66 training_loop """owa""" +83 66 negative_sampler """basic""" +83 66 evaluator """rankbased""" +83 67 dataset """kinships""" +83 67 model """conve""" +83 67 loss """marginranking""" +83 67 regularizer """no""" +83 67 optimizer """adam""" +83 67 training_loop """owa""" +83 67 negative_sampler """basic""" +83 67 evaluator """rankbased""" +83 68 dataset """kinships""" +83 68 model """conve""" +83 68 loss """marginranking""" +83 68 regularizer """no""" +83 68 optimizer """adam""" +83 68 training_loop """owa""" +83 68 negative_sampler """basic""" +83 68 evaluator """rankbased""" +83 69 dataset """kinships""" +83 69 model """conve""" +83 69 loss """marginranking""" +83 69 regularizer """no""" +83 69 optimizer """adam""" +83 69 training_loop """owa""" +83 69 negative_sampler """basic""" +83 69 evaluator """rankbased""" +83 70 dataset """kinships""" +83 70 model """conve""" +83 70 loss """marginranking""" +83 70 regularizer """no""" +83 70 optimizer """adam""" +83 70 training_loop """owa""" +83 70 negative_sampler """basic""" +83 70 evaluator """rankbased""" +83 71 dataset """kinships""" +83 71 model """conve""" +83 71 loss """marginranking""" +83 71 regularizer """no""" +83 71 optimizer """adam""" +83 71 training_loop """owa""" +83 71 negative_sampler """basic""" +83 71 evaluator """rankbased""" +83 72 dataset """kinships""" +83 72 model """conve""" +83 72 loss """marginranking""" +83 72 regularizer """no""" +83 72 optimizer """adam""" +83 72 training_loop """owa""" +83 72 negative_sampler """basic""" +83 72 evaluator """rankbased""" +83 73 dataset """kinships""" +83 73 model """conve""" +83 73 loss """marginranking""" +83 73 regularizer """no""" +83 73 optimizer """adam""" +83 73 training_loop """owa""" +83 73 negative_sampler """basic""" +83 73 evaluator """rankbased""" +83 74 dataset """kinships""" +83 74 model """conve""" +83 74 loss """marginranking""" +83 74 regularizer """no""" +83 74 optimizer """adam""" +83 74 training_loop """owa""" +83 74 negative_sampler """basic""" +83 74 evaluator """rankbased""" +83 75 dataset """kinships""" +83 75 model """conve""" +83 75 loss """marginranking""" +83 75 regularizer """no""" +83 75 optimizer """adam""" +83 75 training_loop """owa""" +83 75 negative_sampler """basic""" +83 75 evaluator """rankbased""" +83 76 dataset """kinships""" +83 76 model """conve""" +83 76 loss """marginranking""" +83 76 regularizer """no""" +83 76 optimizer """adam""" +83 76 training_loop """owa""" +83 76 negative_sampler """basic""" +83 76 evaluator """rankbased""" +83 77 dataset """kinships""" +83 77 model """conve""" +83 77 loss """marginranking""" +83 77 regularizer """no""" +83 77 optimizer """adam""" +83 77 training_loop """owa""" +83 77 negative_sampler """basic""" +83 77 evaluator """rankbased""" +83 78 dataset """kinships""" +83 78 model """conve""" +83 78 loss """marginranking""" +83 78 regularizer """no""" +83 78 optimizer """adam""" +83 78 training_loop """owa""" +83 78 negative_sampler """basic""" +83 78 evaluator """rankbased""" +83 79 dataset """kinships""" +83 79 model """conve""" +83 79 loss """marginranking""" +83 79 regularizer """no""" +83 79 optimizer """adam""" +83 79 training_loop """owa""" +83 79 negative_sampler """basic""" +83 79 evaluator """rankbased""" +83 80 dataset """kinships""" +83 80 model """conve""" +83 80 loss """marginranking""" +83 80 regularizer """no""" +83 80 optimizer """adam""" +83 80 training_loop """owa""" +83 80 negative_sampler """basic""" +83 80 evaluator """rankbased""" +83 81 dataset """kinships""" +83 81 model """conve""" +83 81 loss """marginranking""" +83 81 regularizer """no""" +83 81 optimizer """adam""" +83 81 training_loop """owa""" +83 81 negative_sampler """basic""" +83 81 evaluator """rankbased""" +83 82 dataset """kinships""" +83 82 model """conve""" +83 82 loss """marginranking""" +83 82 regularizer """no""" +83 82 optimizer """adam""" +83 82 training_loop """owa""" +83 82 negative_sampler """basic""" +83 82 evaluator """rankbased""" +83 83 dataset """kinships""" +83 83 model """conve""" +83 83 loss """marginranking""" +83 83 regularizer """no""" +83 83 optimizer """adam""" +83 83 training_loop """owa""" +83 83 negative_sampler """basic""" +83 83 evaluator """rankbased""" +83 84 dataset """kinships""" +83 84 model """conve""" +83 84 loss """marginranking""" +83 84 regularizer """no""" +83 84 optimizer """adam""" +83 84 training_loop """owa""" +83 84 negative_sampler """basic""" +83 84 evaluator """rankbased""" +83 85 dataset """kinships""" +83 85 model """conve""" +83 85 loss """marginranking""" +83 85 regularizer """no""" +83 85 optimizer """adam""" +83 85 training_loop """owa""" +83 85 negative_sampler """basic""" +83 85 evaluator """rankbased""" +83 86 dataset """kinships""" +83 86 model """conve""" +83 86 loss """marginranking""" +83 86 regularizer """no""" +83 86 optimizer """adam""" +83 86 training_loop """owa""" +83 86 negative_sampler """basic""" +83 86 evaluator """rankbased""" +83 87 dataset """kinships""" +83 87 model """conve""" +83 87 loss """marginranking""" +83 87 regularizer """no""" +83 87 optimizer """adam""" +83 87 training_loop """owa""" +83 87 negative_sampler """basic""" +83 87 evaluator """rankbased""" +83 88 dataset """kinships""" +83 88 model """conve""" +83 88 loss """marginranking""" +83 88 regularizer """no""" +83 88 optimizer """adam""" +83 88 training_loop """owa""" +83 88 negative_sampler """basic""" +83 88 evaluator """rankbased""" +83 89 dataset """kinships""" +83 89 model """conve""" +83 89 loss """marginranking""" +83 89 regularizer """no""" +83 89 optimizer """adam""" +83 89 training_loop """owa""" +83 89 negative_sampler """basic""" +83 89 evaluator """rankbased""" +83 90 dataset """kinships""" +83 90 model """conve""" +83 90 loss """marginranking""" +83 90 regularizer """no""" +83 90 optimizer """adam""" +83 90 training_loop """owa""" +83 90 negative_sampler """basic""" +83 90 evaluator """rankbased""" +83 91 dataset """kinships""" +83 91 model """conve""" +83 91 loss """marginranking""" +83 91 regularizer """no""" +83 91 optimizer """adam""" +83 91 training_loop """owa""" +83 91 negative_sampler """basic""" +83 91 evaluator """rankbased""" +83 92 dataset """kinships""" +83 92 model """conve""" +83 92 loss """marginranking""" +83 92 regularizer """no""" +83 92 optimizer """adam""" +83 92 training_loop """owa""" +83 92 negative_sampler """basic""" +83 92 evaluator """rankbased""" +83 93 dataset """kinships""" +83 93 model """conve""" +83 93 loss """marginranking""" +83 93 regularizer """no""" +83 93 optimizer """adam""" +83 93 training_loop """owa""" +83 93 negative_sampler """basic""" +83 93 evaluator """rankbased""" +83 94 dataset """kinships""" +83 94 model """conve""" +83 94 loss """marginranking""" +83 94 regularizer """no""" +83 94 optimizer """adam""" +83 94 training_loop """owa""" +83 94 negative_sampler """basic""" +83 94 evaluator """rankbased""" +83 95 dataset """kinships""" +83 95 model """conve""" +83 95 loss """marginranking""" +83 95 regularizer """no""" +83 95 optimizer """adam""" +83 95 training_loop """owa""" +83 95 negative_sampler """basic""" +83 95 evaluator """rankbased""" +83 96 dataset """kinships""" +83 96 model """conve""" +83 96 loss """marginranking""" +83 96 regularizer """no""" +83 96 optimizer """adam""" +83 96 training_loop """owa""" +83 96 negative_sampler """basic""" +83 96 evaluator """rankbased""" +83 97 dataset """kinships""" +83 97 model """conve""" +83 97 loss """marginranking""" +83 97 regularizer """no""" +83 97 optimizer """adam""" +83 97 training_loop """owa""" +83 97 negative_sampler """basic""" +83 97 evaluator """rankbased""" +83 98 dataset """kinships""" +83 98 model """conve""" +83 98 loss """marginranking""" +83 98 regularizer """no""" +83 98 optimizer """adam""" +83 98 training_loop """owa""" +83 98 negative_sampler """basic""" +83 98 evaluator """rankbased""" +83 99 dataset """kinships""" +83 99 model """conve""" +83 99 loss """marginranking""" +83 99 regularizer """no""" +83 99 optimizer """adam""" +83 99 training_loop """owa""" +83 99 negative_sampler """basic""" +83 99 evaluator """rankbased""" +83 100 dataset """kinships""" +83 100 model """conve""" +83 100 loss """marginranking""" +83 100 regularizer """no""" +83 100 optimizer """adam""" +83 100 training_loop """owa""" +83 100 negative_sampler """basic""" +83 100 evaluator """rankbased""" +84 1 model.output_channels 23.0 +84 1 model.input_dropout 0.38254538612405975 +84 1 model.output_dropout 0.4576895386251895 +84 1 model.feature_map_dropout 0.20562999385906133 +84 1 model.embedding_dim 2.0 +84 1 loss.margin 26.60837254549021 +84 1 loss.adversarial_temperature 0.1369652946268166 +84 1 optimizer.lr 0.0752150921364792 +84 1 negative_sampler.num_negs_per_pos 73.0 +84 1 training.batch_size 1.0 +84 2 model.output_channels 34.0 +84 2 model.input_dropout 0.34040182079893 +84 2 model.output_dropout 0.03757062726038918 +84 2 model.feature_map_dropout 0.2836718699056206 +84 2 model.embedding_dim 2.0 +84 2 loss.margin 27.654154444359413 +84 2 loss.adversarial_temperature 0.31497699751543823 +84 2 optimizer.lr 0.0011364784009824343 +84 2 negative_sampler.num_negs_per_pos 23.0 +84 2 training.batch_size 2.0 +84 3 model.output_channels 24.0 +84 3 model.input_dropout 0.14953040183418764 +84 3 model.output_dropout 0.3150598252170849 +84 3 model.feature_map_dropout 0.48620351005433843 +84 3 model.embedding_dim 2.0 +84 3 loss.margin 23.310888789218765 +84 3 loss.adversarial_temperature 0.27089300675850625 +84 3 optimizer.lr 0.05401574806889998 +84 3 negative_sampler.num_negs_per_pos 86.0 +84 3 training.batch_size 2.0 +84 4 model.output_channels 30.0 +84 4 model.input_dropout 0.295924264962232 +84 4 model.output_dropout 0.10052381605610089 +84 4 model.feature_map_dropout 0.17062661051984018 +84 4 model.embedding_dim 2.0 +84 4 loss.margin 15.482079061311989 +84 4 loss.adversarial_temperature 0.3147191049003115 +84 4 optimizer.lr 0.001668498912565667 +84 4 negative_sampler.num_negs_per_pos 71.0 +84 4 training.batch_size 0.0 +84 5 model.output_channels 43.0 +84 5 model.input_dropout 0.4039463921452938 +84 5 model.output_dropout 0.3206996588357911 +84 5 model.feature_map_dropout 0.041461359969392875 +84 5 model.embedding_dim 0.0 +84 5 loss.margin 15.844068163472928 +84 5 loss.adversarial_temperature 0.2844315758077532 +84 5 optimizer.lr 0.005580322455670139 +84 5 negative_sampler.num_negs_per_pos 69.0 +84 5 training.batch_size 2.0 +84 6 model.output_channels 20.0 +84 6 model.input_dropout 0.04560942607220714 +84 6 model.output_dropout 0.44336694485004025 +84 6 model.feature_map_dropout 0.4978527452632107 +84 6 model.embedding_dim 0.0 +84 6 loss.margin 15.256584476651934 +84 6 loss.adversarial_temperature 0.48860576662867927 +84 6 optimizer.lr 0.05304497803396217 +84 6 negative_sampler.num_negs_per_pos 21.0 +84 6 training.batch_size 2.0 +84 7 model.output_channels 55.0 +84 7 model.input_dropout 0.2467219460954742 +84 7 model.output_dropout 0.2452967352965142 +84 7 model.feature_map_dropout 0.28273583854635576 +84 7 model.embedding_dim 2.0 +84 7 loss.margin 12.634172541434156 +84 7 loss.adversarial_temperature 0.7869733200663689 +84 7 optimizer.lr 0.017987192071056316 +84 7 negative_sampler.num_negs_per_pos 39.0 +84 7 training.batch_size 1.0 +84 8 model.output_channels 43.0 +84 8 model.input_dropout 0.34776638077019845 +84 8 model.output_dropout 0.339187816968634 +84 8 model.feature_map_dropout 0.3987331486492541 +84 8 model.embedding_dim 2.0 +84 8 loss.margin 22.711961712509076 +84 8 loss.adversarial_temperature 0.9550391027828062 +84 8 optimizer.lr 0.0978348933653787 +84 8 negative_sampler.num_negs_per_pos 19.0 +84 8 training.batch_size 0.0 +84 9 model.output_channels 58.0 +84 9 model.input_dropout 0.3091882295931128 +84 9 model.output_dropout 0.1146367946542231 +84 9 model.feature_map_dropout 0.44977746703000415 +84 9 model.embedding_dim 2.0 +84 9 loss.margin 5.4252323425517375 +84 9 loss.adversarial_temperature 0.4732822541100531 +84 9 optimizer.lr 0.030723669211967525 +84 9 negative_sampler.num_negs_per_pos 60.0 +84 9 training.batch_size 1.0 +84 10 model.output_channels 34.0 +84 10 model.input_dropout 0.34112016677978263 +84 10 model.output_dropout 0.0990584852916383 +84 10 model.feature_map_dropout 0.4067842282361876 +84 10 model.embedding_dim 2.0 +84 10 loss.margin 26.049882563983886 +84 10 loss.adversarial_temperature 0.44372971633923697 +84 10 optimizer.lr 0.0315518120924952 +84 10 negative_sampler.num_negs_per_pos 39.0 +84 10 training.batch_size 2.0 +84 11 model.output_channels 46.0 +84 11 model.input_dropout 0.37843796909440025 +84 11 model.output_dropout 0.20719985573827437 +84 11 model.feature_map_dropout 0.295916415378658 +84 11 model.embedding_dim 1.0 +84 11 loss.margin 27.54462587916519 +84 11 loss.adversarial_temperature 0.39461435767371245 +84 11 optimizer.lr 0.0716142302576373 +84 11 negative_sampler.num_negs_per_pos 62.0 +84 11 training.batch_size 2.0 +84 12 model.output_channels 25.0 +84 12 model.input_dropout 0.06239620754393127 +84 12 model.output_dropout 0.299066697881963 +84 12 model.feature_map_dropout 0.22677068752951302 +84 12 model.embedding_dim 0.0 +84 12 loss.margin 6.493670177605359 +84 12 loss.adversarial_temperature 0.25835370593833906 +84 12 optimizer.lr 0.07576070689242254 +84 12 negative_sampler.num_negs_per_pos 12.0 +84 12 training.batch_size 2.0 +84 13 model.output_channels 21.0 +84 13 model.input_dropout 0.47920708946076135 +84 13 model.output_dropout 0.3123308924913481 +84 13 model.feature_map_dropout 0.12328438042930967 +84 13 model.embedding_dim 1.0 +84 13 loss.margin 13.103270674082506 +84 13 loss.adversarial_temperature 0.15399764088214069 +84 13 optimizer.lr 0.03800794634392012 +84 13 negative_sampler.num_negs_per_pos 50.0 +84 13 training.batch_size 2.0 +84 14 model.output_channels 57.0 +84 14 model.input_dropout 0.4979102933436213 +84 14 model.output_dropout 0.058715704155181125 +84 14 model.feature_map_dropout 0.3113419473386313 +84 14 model.embedding_dim 0.0 +84 14 loss.margin 20.12615567006385 +84 14 loss.adversarial_temperature 0.7875276353440482 +84 14 optimizer.lr 0.0013811513007008218 +84 14 negative_sampler.num_negs_per_pos 8.0 +84 14 training.batch_size 2.0 +84 15 model.output_channels 64.0 +84 15 model.input_dropout 0.02028656816369312 +84 15 model.output_dropout 0.490393994929285 +84 15 model.feature_map_dropout 0.30659757196654 +84 15 model.embedding_dim 0.0 +84 15 loss.margin 20.34564320318437 +84 15 loss.adversarial_temperature 0.8502709778492633 +84 15 optimizer.lr 0.0014260592737769064 +84 15 negative_sampler.num_negs_per_pos 92.0 +84 15 training.batch_size 2.0 +84 16 model.output_channels 16.0 +84 16 model.input_dropout 0.3977139139451744 +84 16 model.output_dropout 0.4384144090554013 +84 16 model.feature_map_dropout 0.38481164441954757 +84 16 model.embedding_dim 2.0 +84 16 loss.margin 7.761180494208314 +84 16 loss.adversarial_temperature 0.10234245607116249 +84 16 optimizer.lr 0.0026697864231942123 +84 16 negative_sampler.num_negs_per_pos 14.0 +84 16 training.batch_size 1.0 +84 17 model.output_channels 36.0 +84 17 model.input_dropout 0.32076452906104685 +84 17 model.output_dropout 0.18077018240002946 +84 17 model.feature_map_dropout 0.46668557067911487 +84 17 model.embedding_dim 0.0 +84 17 loss.margin 26.443067146651348 +84 17 loss.adversarial_temperature 0.7935407751374292 +84 17 optimizer.lr 0.08840194989375635 +84 17 negative_sampler.num_negs_per_pos 30.0 +84 17 training.batch_size 2.0 +84 18 model.output_channels 62.0 +84 18 model.input_dropout 0.35415962684625885 +84 18 model.output_dropout 0.4346707995119266 +84 18 model.feature_map_dropout 0.42090253317512405 +84 18 model.embedding_dim 1.0 +84 18 loss.margin 17.79393338324657 +84 18 loss.adversarial_temperature 0.8936565986590791 +84 18 optimizer.lr 0.021539356299528335 +84 18 negative_sampler.num_negs_per_pos 67.0 +84 18 training.batch_size 0.0 +84 19 model.output_channels 58.0 +84 19 model.input_dropout 0.1539479097271259 +84 19 model.output_dropout 0.4883435522838664 +84 19 model.feature_map_dropout 0.38517374879173527 +84 19 model.embedding_dim 0.0 +84 19 loss.margin 23.88414924587418 +84 19 loss.adversarial_temperature 0.12772189734163925 +84 19 optimizer.lr 0.023589618662035 +84 19 negative_sampler.num_negs_per_pos 33.0 +84 19 training.batch_size 0.0 +84 20 model.output_channels 48.0 +84 20 model.input_dropout 0.3184600531131835 +84 20 model.output_dropout 0.4747664162221894 +84 20 model.feature_map_dropout 0.006085558174875938 +84 20 model.embedding_dim 1.0 +84 20 loss.margin 27.184957651070157 +84 20 loss.adversarial_temperature 0.16566267739170393 +84 20 optimizer.lr 0.024385162793636856 +84 20 negative_sampler.num_negs_per_pos 88.0 +84 20 training.batch_size 2.0 +84 21 model.output_channels 40.0 +84 21 model.input_dropout 0.13807007571682622 +84 21 model.output_dropout 0.3916179554279775 +84 21 model.feature_map_dropout 0.4801967689389182 +84 21 model.embedding_dim 1.0 +84 21 loss.margin 28.963695546738606 +84 21 loss.adversarial_temperature 0.2337461819535978 +84 21 optimizer.lr 0.052557190418567365 +84 21 negative_sampler.num_negs_per_pos 34.0 +84 21 training.batch_size 2.0 +84 22 model.output_channels 42.0 +84 22 model.input_dropout 0.14204622736007727 +84 22 model.output_dropout 0.029541631756764608 +84 22 model.feature_map_dropout 0.16211754261551498 +84 22 model.embedding_dim 1.0 +84 22 loss.margin 13.10763882948212 +84 22 loss.adversarial_temperature 0.584586894883913 +84 22 optimizer.lr 0.02212297755253109 +84 22 negative_sampler.num_negs_per_pos 28.0 +84 22 training.batch_size 0.0 +84 23 model.output_channels 51.0 +84 23 model.input_dropout 0.16521845548275405 +84 23 model.output_dropout 0.03258551849647029 +84 23 model.feature_map_dropout 0.4429240614246482 +84 23 model.embedding_dim 0.0 +84 23 loss.margin 24.184386795959224 +84 23 loss.adversarial_temperature 0.8606121634095589 +84 23 optimizer.lr 0.027703259420686577 +84 23 negative_sampler.num_negs_per_pos 24.0 +84 23 training.batch_size 2.0 +84 24 model.output_channels 39.0 +84 24 model.input_dropout 0.49711060704466037 +84 24 model.output_dropout 0.30280232682849334 +84 24 model.feature_map_dropout 0.3125131183821544 +84 24 model.embedding_dim 2.0 +84 24 loss.margin 26.425181805360815 +84 24 loss.adversarial_temperature 0.8417389224259046 +84 24 optimizer.lr 0.0018928801410350434 +84 24 negative_sampler.num_negs_per_pos 91.0 +84 24 training.batch_size 1.0 +84 25 model.output_channels 32.0 +84 25 model.input_dropout 0.3671946839505254 +84 25 model.output_dropout 0.19747981410956544 +84 25 model.feature_map_dropout 0.43634769787901645 +84 25 model.embedding_dim 1.0 +84 25 loss.margin 16.888859570046684 +84 25 loss.adversarial_temperature 0.37818234910049786 +84 25 optimizer.lr 0.007733689607815831 +84 25 negative_sampler.num_negs_per_pos 82.0 +84 25 training.batch_size 1.0 +84 26 model.output_channels 33.0 +84 26 model.input_dropout 0.29595498649240964 +84 26 model.output_dropout 0.21696417866572154 +84 26 model.feature_map_dropout 0.2038423761242002 +84 26 model.embedding_dim 0.0 +84 26 loss.margin 24.729256997991925 +84 26 loss.adversarial_temperature 0.8421809339104813 +84 26 optimizer.lr 0.0060698509167525175 +84 26 negative_sampler.num_negs_per_pos 61.0 +84 26 training.batch_size 1.0 +84 27 model.output_channels 54.0 +84 27 model.input_dropout 0.4615098107753618 +84 27 model.output_dropout 0.27405161071371864 +84 27 model.feature_map_dropout 0.3206450929799716 +84 27 model.embedding_dim 0.0 +84 27 loss.margin 27.956746401494332 +84 27 loss.adversarial_temperature 0.5078566892458176 +84 27 optimizer.lr 0.002018811238184266 +84 27 negative_sampler.num_negs_per_pos 79.0 +84 27 training.batch_size 0.0 +84 28 model.output_channels 35.0 +84 28 model.input_dropout 0.03751693218484137 +84 28 model.output_dropout 0.03285597251260142 +84 28 model.feature_map_dropout 0.13381707050398067 +84 28 model.embedding_dim 1.0 +84 28 loss.margin 13.975305499952706 +84 28 loss.adversarial_temperature 0.6576476235962079 +84 28 optimizer.lr 0.023041840809141734 +84 28 negative_sampler.num_negs_per_pos 19.0 +84 28 training.batch_size 1.0 +84 29 model.output_channels 27.0 +84 29 model.input_dropout 0.24603631375615054 +84 29 model.output_dropout 0.2484747117509798 +84 29 model.feature_map_dropout 0.19000450220378345 +84 29 model.embedding_dim 0.0 +84 29 loss.margin 22.904504393712976 +84 29 loss.adversarial_temperature 0.3016888086095219 +84 29 optimizer.lr 0.0046710250375599435 +84 29 negative_sampler.num_negs_per_pos 24.0 +84 29 training.batch_size 1.0 +84 30 model.output_channels 32.0 +84 30 model.input_dropout 0.3510923838111809 +84 30 model.output_dropout 0.06482831609125578 +84 30 model.feature_map_dropout 0.3147861955295709 +84 30 model.embedding_dim 0.0 +84 30 loss.margin 6.742059960010492 +84 30 loss.adversarial_temperature 0.751080763824101 +84 30 optimizer.lr 0.0013991578037848825 +84 30 negative_sampler.num_negs_per_pos 86.0 +84 30 training.batch_size 0.0 +84 31 model.output_channels 33.0 +84 31 model.input_dropout 0.0141164412877266 +84 31 model.output_dropout 0.3519325830786899 +84 31 model.feature_map_dropout 0.36897602993072937 +84 31 model.embedding_dim 0.0 +84 31 loss.margin 21.130233210528406 +84 31 loss.adversarial_temperature 0.742789009162259 +84 31 optimizer.lr 0.011365211549931406 +84 31 negative_sampler.num_negs_per_pos 40.0 +84 31 training.batch_size 2.0 +84 32 model.output_channels 20.0 +84 32 model.input_dropout 0.24624937274853048 +84 32 model.output_dropout 0.26862670481594564 +84 32 model.feature_map_dropout 0.22632124220662764 +84 32 model.embedding_dim 1.0 +84 32 loss.margin 11.192979731823756 +84 32 loss.adversarial_temperature 0.4912202722097796 +84 32 optimizer.lr 0.0011613744779829373 +84 32 negative_sampler.num_negs_per_pos 0.0 +84 32 training.batch_size 1.0 +84 33 model.output_channels 39.0 +84 33 model.input_dropout 0.2187330442820235 +84 33 model.output_dropout 0.30812709614385075 +84 33 model.feature_map_dropout 0.008135685796623049 +84 33 model.embedding_dim 2.0 +84 33 loss.margin 21.367823480972493 +84 33 loss.adversarial_temperature 0.6330661658038781 +84 33 optimizer.lr 0.00545079330209466 +84 33 negative_sampler.num_negs_per_pos 15.0 +84 33 training.batch_size 1.0 +84 34 model.output_channels 56.0 +84 34 model.input_dropout 0.17828436159536337 +84 34 model.output_dropout 0.4009746301718072 +84 34 model.feature_map_dropout 0.44480821439139445 +84 34 model.embedding_dim 1.0 +84 34 loss.margin 15.142276380771776 +84 34 loss.adversarial_temperature 0.6307734588658327 +84 34 optimizer.lr 0.008962837254412206 +84 34 negative_sampler.num_negs_per_pos 42.0 +84 34 training.batch_size 2.0 +84 35 model.output_channels 58.0 +84 35 model.input_dropout 0.42239785766512766 +84 35 model.output_dropout 0.3083549705065357 +84 35 model.feature_map_dropout 0.14905808462099784 +84 35 model.embedding_dim 0.0 +84 35 loss.margin 16.950728825102882 +84 35 loss.adversarial_temperature 0.2520820017247308 +84 35 optimizer.lr 0.004163435071946062 +84 35 negative_sampler.num_negs_per_pos 80.0 +84 35 training.batch_size 0.0 +84 36 model.output_channels 22.0 +84 36 model.input_dropout 0.20281605678582038 +84 36 model.output_dropout 0.33643145856927825 +84 36 model.feature_map_dropout 0.3264400487661498 +84 36 model.embedding_dim 0.0 +84 36 loss.margin 23.10336574793351 +84 36 loss.adversarial_temperature 0.9577387678110311 +84 36 optimizer.lr 0.0725091647789788 +84 36 negative_sampler.num_negs_per_pos 98.0 +84 36 training.batch_size 0.0 +84 37 model.output_channels 58.0 +84 37 model.input_dropout 0.0616861279533657 +84 37 model.output_dropout 0.1394446878030876 +84 37 model.feature_map_dropout 0.47409012757336794 +84 37 model.embedding_dim 1.0 +84 37 loss.margin 1.068704228295997 +84 37 loss.adversarial_temperature 0.12001739344472714 +84 37 optimizer.lr 0.09418596929889782 +84 37 negative_sampler.num_negs_per_pos 77.0 +84 37 training.batch_size 2.0 +84 38 model.output_channels 24.0 +84 38 model.input_dropout 0.32470102049278193 +84 38 model.output_dropout 0.4331786558368861 +84 38 model.feature_map_dropout 0.09300115998218939 +84 38 model.embedding_dim 1.0 +84 38 loss.margin 1.4411367645113011 +84 38 loss.adversarial_temperature 0.1474635330387996 +84 38 optimizer.lr 0.004578389975776997 +84 38 negative_sampler.num_negs_per_pos 30.0 +84 38 training.batch_size 0.0 +84 39 model.output_channels 49.0 +84 39 model.input_dropout 0.32876243924870674 +84 39 model.output_dropout 0.17706548193471489 +84 39 model.feature_map_dropout 0.26902686036412143 +84 39 model.embedding_dim 0.0 +84 39 loss.margin 8.243520356605755 +84 39 loss.adversarial_temperature 0.6430001415950151 +84 39 optimizer.lr 0.06113346014109547 +84 39 negative_sampler.num_negs_per_pos 70.0 +84 39 training.batch_size 2.0 +84 40 model.output_channels 21.0 +84 40 model.input_dropout 0.07824396073964496 +84 40 model.output_dropout 0.07410855622531676 +84 40 model.feature_map_dropout 0.4569959253310792 +84 40 model.embedding_dim 0.0 +84 40 loss.margin 18.75780708993134 +84 40 loss.adversarial_temperature 0.4265811078327325 +84 40 optimizer.lr 0.04004308089316344 +84 40 negative_sampler.num_negs_per_pos 40.0 +84 40 training.batch_size 1.0 +84 41 model.output_channels 17.0 +84 41 model.input_dropout 0.12314984852187072 +84 41 model.output_dropout 0.49430636771885866 +84 41 model.feature_map_dropout 0.018001191339309552 +84 41 model.embedding_dim 0.0 +84 41 loss.margin 25.72304083953039 +84 41 loss.adversarial_temperature 0.8819703150447669 +84 41 optimizer.lr 0.004098096731968965 +84 41 negative_sampler.num_negs_per_pos 32.0 +84 41 training.batch_size 0.0 +84 42 model.output_channels 39.0 +84 42 model.input_dropout 0.3272235000231375 +84 42 model.output_dropout 0.3522517552061998 +84 42 model.feature_map_dropout 0.36746043783092197 +84 42 model.embedding_dim 2.0 +84 42 loss.margin 20.387034151020345 +84 42 loss.adversarial_temperature 0.9485358580284646 +84 42 optimizer.lr 0.014179042332520174 +84 42 negative_sampler.num_negs_per_pos 92.0 +84 42 training.batch_size 1.0 +84 43 model.output_channels 27.0 +84 43 model.input_dropout 0.44920399683078943 +84 43 model.output_dropout 0.20866743970507923 +84 43 model.feature_map_dropout 0.04429274296691654 +84 43 model.embedding_dim 0.0 +84 43 loss.margin 22.026026054972206 +84 43 loss.adversarial_temperature 0.7558775478387809 +84 43 optimizer.lr 0.028405623288432363 +84 43 negative_sampler.num_negs_per_pos 77.0 +84 43 training.batch_size 2.0 +84 44 model.output_channels 31.0 +84 44 model.input_dropout 0.21573194574107124 +84 44 model.output_dropout 0.3427763179569042 +84 44 model.feature_map_dropout 0.22823266521696928 +84 44 model.embedding_dim 2.0 +84 44 loss.margin 28.61432722554744 +84 44 loss.adversarial_temperature 0.5384755100455321 +84 44 optimizer.lr 0.038215227271786195 +84 44 negative_sampler.num_negs_per_pos 39.0 +84 44 training.batch_size 0.0 +84 45 model.output_channels 43.0 +84 45 model.input_dropout 0.23227936468506055 +84 45 model.output_dropout 0.16533341078875596 +84 45 model.feature_map_dropout 0.15119674029937535 +84 45 model.embedding_dim 0.0 +84 45 loss.margin 26.90412362319348 +84 45 loss.adversarial_temperature 0.7529035478245674 +84 45 optimizer.lr 0.056921438910017996 +84 45 negative_sampler.num_negs_per_pos 64.0 +84 45 training.batch_size 1.0 +84 46 model.output_channels 49.0 +84 46 model.input_dropout 0.04213495650850968 +84 46 model.output_dropout 0.4177084001895439 +84 46 model.feature_map_dropout 0.3006705250713052 +84 46 model.embedding_dim 1.0 +84 46 loss.margin 29.809021694620156 +84 46 loss.adversarial_temperature 0.17402954517949693 +84 46 optimizer.lr 0.006026306248709029 +84 46 negative_sampler.num_negs_per_pos 27.0 +84 46 training.batch_size 0.0 +84 47 model.output_channels 47.0 +84 47 model.input_dropout 0.04491105476317209 +84 47 model.output_dropout 0.3346715155865532 +84 47 model.feature_map_dropout 0.44046635561157893 +84 47 model.embedding_dim 0.0 +84 47 loss.margin 2.224955581808801 +84 47 loss.adversarial_temperature 0.33559571063545557 +84 47 optimizer.lr 0.013931058820175366 +84 47 negative_sampler.num_negs_per_pos 39.0 +84 47 training.batch_size 2.0 +84 48 model.output_channels 18.0 +84 48 model.input_dropout 0.30311877848733415 +84 48 model.output_dropout 0.14752102186396154 +84 48 model.feature_map_dropout 0.3071323841267636 +84 48 model.embedding_dim 1.0 +84 48 loss.margin 18.286984974097233 +84 48 loss.adversarial_temperature 0.39353148490195666 +84 48 optimizer.lr 0.002874736767822953 +84 48 negative_sampler.num_negs_per_pos 34.0 +84 48 training.batch_size 0.0 +84 49 model.output_channels 39.0 +84 49 model.input_dropout 0.12906550873461242 +84 49 model.output_dropout 0.39655084268984647 +84 49 model.feature_map_dropout 0.10020455301978926 +84 49 model.embedding_dim 0.0 +84 49 loss.margin 5.464834511724452 +84 49 loss.adversarial_temperature 0.7256321561896262 +84 49 optimizer.lr 0.0678563645587275 +84 49 negative_sampler.num_negs_per_pos 91.0 +84 49 training.batch_size 2.0 +84 50 model.output_channels 37.0 +84 50 model.input_dropout 0.06020556755648704 +84 50 model.output_dropout 0.3603129428034074 +84 50 model.feature_map_dropout 0.1886393621558431 +84 50 model.embedding_dim 2.0 +84 50 loss.margin 15.596462813369532 +84 50 loss.adversarial_temperature 0.9993049101454974 +84 50 optimizer.lr 0.0037932994092169624 +84 50 negative_sampler.num_negs_per_pos 86.0 +84 50 training.batch_size 0.0 +84 51 model.output_channels 43.0 +84 51 model.input_dropout 0.14272213046524523 +84 51 model.output_dropout 0.26958165308712 +84 51 model.feature_map_dropout 0.09382283998982588 +84 51 model.embedding_dim 2.0 +84 51 loss.margin 16.621160926332415 +84 51 loss.adversarial_temperature 0.8275510904554398 +84 51 optimizer.lr 0.0014675756189624824 +84 51 negative_sampler.num_negs_per_pos 78.0 +84 51 training.batch_size 1.0 +84 52 model.output_channels 34.0 +84 52 model.input_dropout 0.06982241651367654 +84 52 model.output_dropout 0.41441816921468205 +84 52 model.feature_map_dropout 0.14162672374008656 +84 52 model.embedding_dim 0.0 +84 52 loss.margin 21.772794537332246 +84 52 loss.adversarial_temperature 0.8924992255430001 +84 52 optimizer.lr 0.0018281894041286134 +84 52 negative_sampler.num_negs_per_pos 37.0 +84 52 training.batch_size 1.0 +84 53 model.output_channels 57.0 +84 53 model.input_dropout 0.056789519928010734 +84 53 model.output_dropout 0.020379587480425843 +84 53 model.feature_map_dropout 0.2704341564094179 +84 53 model.embedding_dim 2.0 +84 53 loss.margin 27.267668802465728 +84 53 loss.adversarial_temperature 0.552046249458889 +84 53 optimizer.lr 0.02591438246842144 +84 53 negative_sampler.num_negs_per_pos 48.0 +84 53 training.batch_size 0.0 +84 54 model.output_channels 29.0 +84 54 model.input_dropout 0.3377261619609481 +84 54 model.output_dropout 0.10068563176193618 +84 54 model.feature_map_dropout 0.042487613434133265 +84 54 model.embedding_dim 0.0 +84 54 loss.margin 13.985885686688128 +84 54 loss.adversarial_temperature 0.48229432632238467 +84 54 optimizer.lr 0.001628479665012882 +84 54 negative_sampler.num_negs_per_pos 52.0 +84 54 training.batch_size 1.0 +84 55 model.output_channels 17.0 +84 55 model.input_dropout 0.0640568694850428 +84 55 model.output_dropout 0.3974723625592458 +84 55 model.feature_map_dropout 0.3257242968289809 +84 55 model.embedding_dim 2.0 +84 55 loss.margin 11.650949521934159 +84 55 loss.adversarial_temperature 0.38927482993082185 +84 55 optimizer.lr 0.006761156387348313 +84 55 negative_sampler.num_negs_per_pos 63.0 +84 55 training.batch_size 2.0 +84 56 model.output_channels 28.0 +84 56 model.input_dropout 0.16471535543508015 +84 56 model.output_dropout 0.06250227588350987 +84 56 model.feature_map_dropout 0.3842185282691766 +84 56 model.embedding_dim 1.0 +84 56 loss.margin 25.94334439361634 +84 56 loss.adversarial_temperature 0.1124834450600199 +84 56 optimizer.lr 0.04261732899442534 +84 56 negative_sampler.num_negs_per_pos 94.0 +84 56 training.batch_size 0.0 +84 57 model.output_channels 23.0 +84 57 model.input_dropout 0.2898939129261594 +84 57 model.output_dropout 0.12471885146389067 +84 57 model.feature_map_dropout 0.29814229893367666 +84 57 model.embedding_dim 1.0 +84 57 loss.margin 7.501582938436707 +84 57 loss.adversarial_temperature 0.6929397228599369 +84 57 optimizer.lr 0.02000910498607603 +84 57 negative_sampler.num_negs_per_pos 15.0 +84 57 training.batch_size 0.0 +84 58 model.output_channels 43.0 +84 58 model.input_dropout 0.369600602008516 +84 58 model.output_dropout 0.15107264749257227 +84 58 model.feature_map_dropout 0.029427904042804987 +84 58 model.embedding_dim 2.0 +84 58 loss.margin 10.850895916868893 +84 58 loss.adversarial_temperature 0.6204638634079693 +84 58 optimizer.lr 0.0031337159739875105 +84 58 negative_sampler.num_negs_per_pos 81.0 +84 58 training.batch_size 1.0 +84 59 model.output_channels 17.0 +84 59 model.input_dropout 0.13480024810616026 +84 59 model.output_dropout 0.39967346692781336 +84 59 model.feature_map_dropout 0.46476903345473936 +84 59 model.embedding_dim 2.0 +84 59 loss.margin 25.502997038153655 +84 59 loss.adversarial_temperature 0.4954951445083451 +84 59 optimizer.lr 0.0030296011485642237 +84 59 negative_sampler.num_negs_per_pos 5.0 +84 59 training.batch_size 1.0 +84 60 model.output_channels 17.0 +84 60 model.input_dropout 0.4345246542364402 +84 60 model.output_dropout 0.4280829866618057 +84 60 model.feature_map_dropout 0.36364438129534443 +84 60 model.embedding_dim 2.0 +84 60 loss.margin 10.423720605254685 +84 60 loss.adversarial_temperature 0.3229917545135684 +84 60 optimizer.lr 0.00173207372319227 +84 60 negative_sampler.num_negs_per_pos 29.0 +84 60 training.batch_size 2.0 +84 61 model.output_channels 17.0 +84 61 model.input_dropout 0.20171990965612763 +84 61 model.output_dropout 0.28088270114073105 +84 61 model.feature_map_dropout 0.16321886053743784 +84 61 model.embedding_dim 2.0 +84 61 loss.margin 15.905289844651286 +84 61 loss.adversarial_temperature 0.28245107932752 +84 61 optimizer.lr 0.023241125723828273 +84 61 negative_sampler.num_negs_per_pos 17.0 +84 61 training.batch_size 2.0 +84 62 model.output_channels 58.0 +84 62 model.input_dropout 0.49269127767174303 +84 62 model.output_dropout 0.05288004564449533 +84 62 model.feature_map_dropout 0.013417881806396581 +84 62 model.embedding_dim 1.0 +84 62 loss.margin 11.152372966621117 +84 62 loss.adversarial_temperature 0.7390860138183709 +84 62 optimizer.lr 0.0026397305291720173 +84 62 negative_sampler.num_negs_per_pos 44.0 +84 62 training.batch_size 0.0 +84 63 model.output_channels 62.0 +84 63 model.input_dropout 0.43840318899969255 +84 63 model.output_dropout 0.4852610552798628 +84 63 model.feature_map_dropout 0.04175529646169446 +84 63 model.embedding_dim 1.0 +84 63 loss.margin 9.417816103393804 +84 63 loss.adversarial_temperature 0.9188678975936361 +84 63 optimizer.lr 0.060490262022521626 +84 63 negative_sampler.num_negs_per_pos 86.0 +84 63 training.batch_size 0.0 +84 64 model.output_channels 21.0 +84 64 model.input_dropout 0.19924508350039916 +84 64 model.output_dropout 0.4126279937379589 +84 64 model.feature_map_dropout 0.45569826677375525 +84 64 model.embedding_dim 1.0 +84 64 loss.margin 3.6808881743778583 +84 64 loss.adversarial_temperature 0.9222900114770789 +84 64 optimizer.lr 0.001694942771358187 +84 64 negative_sampler.num_negs_per_pos 95.0 +84 64 training.batch_size 1.0 +84 65 model.output_channels 47.0 +84 65 model.input_dropout 0.2339320337509066 +84 65 model.output_dropout 0.0680398449320484 +84 65 model.feature_map_dropout 0.27127915300023037 +84 65 model.embedding_dim 1.0 +84 65 loss.margin 2.2431274058888815 +84 65 loss.adversarial_temperature 0.17405249826280744 +84 65 optimizer.lr 0.006514236538094985 +84 65 negative_sampler.num_negs_per_pos 78.0 +84 65 training.batch_size 1.0 +84 66 model.output_channels 36.0 +84 66 model.input_dropout 0.19046924363091194 +84 66 model.output_dropout 0.40417329850830697 +84 66 model.feature_map_dropout 0.31545334615028636 +84 66 model.embedding_dim 1.0 +84 66 loss.margin 13.620865335767611 +84 66 loss.adversarial_temperature 0.7726135915754624 +84 66 optimizer.lr 0.003349206098559976 +84 66 negative_sampler.num_negs_per_pos 28.0 +84 66 training.batch_size 2.0 +84 67 model.output_channels 63.0 +84 67 model.input_dropout 0.03866911590559696 +84 67 model.output_dropout 0.266388976275571 +84 67 model.feature_map_dropout 0.3718696778840839 +84 67 model.embedding_dim 2.0 +84 67 loss.margin 3.4811365075684906 +84 67 loss.adversarial_temperature 0.6337538567351785 +84 67 optimizer.lr 0.0025070320905229318 +84 67 negative_sampler.num_negs_per_pos 89.0 +84 67 training.batch_size 0.0 +84 68 model.output_channels 42.0 +84 68 model.input_dropout 0.10744870919309102 +84 68 model.output_dropout 0.4453745639516397 +84 68 model.feature_map_dropout 0.07555388692859516 +84 68 model.embedding_dim 1.0 +84 68 loss.margin 22.00511872284957 +84 68 loss.adversarial_temperature 0.8842707000783953 +84 68 optimizer.lr 0.0013172726364516897 +84 68 negative_sampler.num_negs_per_pos 53.0 +84 68 training.batch_size 1.0 +84 69 model.output_channels 53.0 +84 69 model.input_dropout 0.34593853353407 +84 69 model.output_dropout 0.41083801296643024 +84 69 model.feature_map_dropout 0.14270139532182413 +84 69 model.embedding_dim 1.0 +84 69 loss.margin 4.228281465095536 +84 69 loss.adversarial_temperature 0.11219887041447597 +84 69 optimizer.lr 0.02513022178189786 +84 69 negative_sampler.num_negs_per_pos 61.0 +84 69 training.batch_size 0.0 +84 70 model.output_channels 50.0 +84 70 model.input_dropout 0.2959286932212281 +84 70 model.output_dropout 0.24483727394090488 +84 70 model.feature_map_dropout 0.06078384412593707 +84 70 model.embedding_dim 1.0 +84 70 loss.margin 4.358651969257622 +84 70 loss.adversarial_temperature 0.11666575533680817 +84 70 optimizer.lr 0.01305205146271996 +84 70 negative_sampler.num_negs_per_pos 22.0 +84 70 training.batch_size 1.0 +84 71 model.output_channels 39.0 +84 71 model.input_dropout 0.19952110538961254 +84 71 model.output_dropout 0.4710452887822822 +84 71 model.feature_map_dropout 0.19088423428576384 +84 71 model.embedding_dim 2.0 +84 71 loss.margin 28.65428447398695 +84 71 loss.adversarial_temperature 0.5559865923540399 +84 71 optimizer.lr 0.004498615617660716 +84 71 negative_sampler.num_negs_per_pos 50.0 +84 71 training.batch_size 0.0 +84 72 model.output_channels 17.0 +84 72 model.input_dropout 0.3899438984291764 +84 72 model.output_dropout 0.3738117352330778 +84 72 model.feature_map_dropout 0.17529869424139277 +84 72 model.embedding_dim 2.0 +84 72 loss.margin 19.635773711578413 +84 72 loss.adversarial_temperature 0.2559687279287172 +84 72 optimizer.lr 0.01705416478594253 +84 72 negative_sampler.num_negs_per_pos 86.0 +84 72 training.batch_size 0.0 +84 1 dataset """kinships""" +84 1 model """conve""" +84 1 loss """nssa""" +84 1 regularizer """no""" +84 1 optimizer """adam""" +84 1 training_loop """owa""" +84 1 negative_sampler """basic""" +84 1 evaluator """rankbased""" +84 2 dataset """kinships""" +84 2 model """conve""" +84 2 loss """nssa""" +84 2 regularizer """no""" +84 2 optimizer """adam""" +84 2 training_loop """owa""" +84 2 negative_sampler """basic""" +84 2 evaluator """rankbased""" +84 3 dataset """kinships""" +84 3 model """conve""" +84 3 loss """nssa""" +84 3 regularizer """no""" +84 3 optimizer """adam""" +84 3 training_loop """owa""" +84 3 negative_sampler """basic""" +84 3 evaluator """rankbased""" +84 4 dataset """kinships""" +84 4 model """conve""" +84 4 loss """nssa""" +84 4 regularizer """no""" +84 4 optimizer """adam""" +84 4 training_loop """owa""" +84 4 negative_sampler """basic""" +84 4 evaluator """rankbased""" +84 5 dataset """kinships""" +84 5 model """conve""" +84 5 loss """nssa""" +84 5 regularizer """no""" +84 5 optimizer """adam""" +84 5 training_loop """owa""" +84 5 negative_sampler """basic""" +84 5 evaluator """rankbased""" +84 6 dataset """kinships""" +84 6 model """conve""" +84 6 loss """nssa""" +84 6 regularizer """no""" +84 6 optimizer """adam""" +84 6 training_loop """owa""" +84 6 negative_sampler """basic""" +84 6 evaluator """rankbased""" +84 7 dataset """kinships""" +84 7 model """conve""" +84 7 loss """nssa""" +84 7 regularizer """no""" +84 7 optimizer """adam""" +84 7 training_loop """owa""" +84 7 negative_sampler """basic""" +84 7 evaluator """rankbased""" +84 8 dataset """kinships""" +84 8 model """conve""" +84 8 loss """nssa""" +84 8 regularizer """no""" +84 8 optimizer """adam""" +84 8 training_loop """owa""" +84 8 negative_sampler """basic""" +84 8 evaluator """rankbased""" +84 9 dataset """kinships""" +84 9 model """conve""" +84 9 loss """nssa""" +84 9 regularizer """no""" +84 9 optimizer """adam""" +84 9 training_loop """owa""" +84 9 negative_sampler """basic""" +84 9 evaluator """rankbased""" +84 10 dataset """kinships""" +84 10 model """conve""" +84 10 loss """nssa""" +84 10 regularizer """no""" +84 10 optimizer """adam""" +84 10 training_loop """owa""" +84 10 negative_sampler """basic""" +84 10 evaluator """rankbased""" +84 11 dataset """kinships""" +84 11 model """conve""" +84 11 loss """nssa""" +84 11 regularizer """no""" +84 11 optimizer """adam""" +84 11 training_loop """owa""" +84 11 negative_sampler """basic""" +84 11 evaluator """rankbased""" +84 12 dataset """kinships""" +84 12 model """conve""" +84 12 loss """nssa""" +84 12 regularizer """no""" +84 12 optimizer """adam""" +84 12 training_loop """owa""" +84 12 negative_sampler """basic""" +84 12 evaluator """rankbased""" +84 13 dataset """kinships""" +84 13 model """conve""" +84 13 loss """nssa""" +84 13 regularizer """no""" +84 13 optimizer """adam""" +84 13 training_loop """owa""" +84 13 negative_sampler """basic""" +84 13 evaluator """rankbased""" +84 14 dataset """kinships""" +84 14 model """conve""" +84 14 loss """nssa""" +84 14 regularizer """no""" +84 14 optimizer """adam""" +84 14 training_loop """owa""" +84 14 negative_sampler """basic""" +84 14 evaluator """rankbased""" +84 15 dataset """kinships""" +84 15 model """conve""" +84 15 loss """nssa""" +84 15 regularizer """no""" +84 15 optimizer """adam""" +84 15 training_loop """owa""" +84 15 negative_sampler """basic""" +84 15 evaluator """rankbased""" +84 16 dataset """kinships""" +84 16 model """conve""" +84 16 loss """nssa""" +84 16 regularizer """no""" +84 16 optimizer """adam""" +84 16 training_loop """owa""" +84 16 negative_sampler """basic""" +84 16 evaluator """rankbased""" +84 17 dataset """kinships""" +84 17 model """conve""" +84 17 loss """nssa""" +84 17 regularizer """no""" +84 17 optimizer """adam""" +84 17 training_loop """owa""" +84 17 negative_sampler """basic""" +84 17 evaluator """rankbased""" +84 18 dataset """kinships""" +84 18 model """conve""" +84 18 loss """nssa""" +84 18 regularizer """no""" +84 18 optimizer """adam""" +84 18 training_loop """owa""" +84 18 negative_sampler """basic""" +84 18 evaluator """rankbased""" +84 19 dataset """kinships""" +84 19 model """conve""" +84 19 loss """nssa""" +84 19 regularizer """no""" +84 19 optimizer """adam""" +84 19 training_loop """owa""" +84 19 negative_sampler """basic""" +84 19 evaluator """rankbased""" +84 20 dataset """kinships""" +84 20 model """conve""" +84 20 loss """nssa""" +84 20 regularizer """no""" +84 20 optimizer """adam""" +84 20 training_loop """owa""" +84 20 negative_sampler """basic""" +84 20 evaluator """rankbased""" +84 21 dataset """kinships""" +84 21 model """conve""" +84 21 loss """nssa""" +84 21 regularizer """no""" +84 21 optimizer """adam""" +84 21 training_loop """owa""" +84 21 negative_sampler """basic""" +84 21 evaluator """rankbased""" +84 22 dataset """kinships""" +84 22 model """conve""" +84 22 loss """nssa""" +84 22 regularizer """no""" +84 22 optimizer """adam""" +84 22 training_loop """owa""" +84 22 negative_sampler """basic""" +84 22 evaluator """rankbased""" +84 23 dataset """kinships""" +84 23 model """conve""" +84 23 loss """nssa""" +84 23 regularizer """no""" +84 23 optimizer """adam""" +84 23 training_loop """owa""" +84 23 negative_sampler """basic""" +84 23 evaluator """rankbased""" +84 24 dataset """kinships""" +84 24 model """conve""" +84 24 loss """nssa""" +84 24 regularizer """no""" +84 24 optimizer """adam""" +84 24 training_loop """owa""" +84 24 negative_sampler """basic""" +84 24 evaluator """rankbased""" +84 25 dataset """kinships""" +84 25 model """conve""" +84 25 loss """nssa""" +84 25 regularizer """no""" +84 25 optimizer """adam""" +84 25 training_loop """owa""" +84 25 negative_sampler """basic""" +84 25 evaluator """rankbased""" +84 26 dataset """kinships""" +84 26 model """conve""" +84 26 loss """nssa""" +84 26 regularizer """no""" +84 26 optimizer """adam""" +84 26 training_loop """owa""" +84 26 negative_sampler """basic""" +84 26 evaluator """rankbased""" +84 27 dataset """kinships""" +84 27 model """conve""" +84 27 loss """nssa""" +84 27 regularizer """no""" +84 27 optimizer """adam""" +84 27 training_loop """owa""" +84 27 negative_sampler """basic""" +84 27 evaluator """rankbased""" +84 28 dataset """kinships""" +84 28 model """conve""" +84 28 loss """nssa""" +84 28 regularizer """no""" +84 28 optimizer """adam""" +84 28 training_loop """owa""" +84 28 negative_sampler """basic""" +84 28 evaluator """rankbased""" +84 29 dataset """kinships""" +84 29 model """conve""" +84 29 loss """nssa""" +84 29 regularizer """no""" +84 29 optimizer """adam""" +84 29 training_loop """owa""" +84 29 negative_sampler """basic""" +84 29 evaluator """rankbased""" +84 30 dataset """kinships""" +84 30 model """conve""" +84 30 loss """nssa""" +84 30 regularizer """no""" +84 30 optimizer """adam""" +84 30 training_loop """owa""" +84 30 negative_sampler """basic""" +84 30 evaluator """rankbased""" +84 31 dataset """kinships""" +84 31 model """conve""" +84 31 loss """nssa""" +84 31 regularizer """no""" +84 31 optimizer """adam""" +84 31 training_loop """owa""" +84 31 negative_sampler """basic""" +84 31 evaluator """rankbased""" +84 32 dataset """kinships""" +84 32 model """conve""" +84 32 loss """nssa""" +84 32 regularizer """no""" +84 32 optimizer """adam""" +84 32 training_loop """owa""" +84 32 negative_sampler """basic""" +84 32 evaluator """rankbased""" +84 33 dataset """kinships""" +84 33 model """conve""" +84 33 loss """nssa""" +84 33 regularizer """no""" +84 33 optimizer """adam""" +84 33 training_loop """owa""" +84 33 negative_sampler """basic""" +84 33 evaluator """rankbased""" +84 34 dataset """kinships""" +84 34 model """conve""" +84 34 loss """nssa""" +84 34 regularizer """no""" +84 34 optimizer """adam""" +84 34 training_loop """owa""" +84 34 negative_sampler """basic""" +84 34 evaluator """rankbased""" +84 35 dataset """kinships""" +84 35 model """conve""" +84 35 loss """nssa""" +84 35 regularizer """no""" +84 35 optimizer """adam""" +84 35 training_loop """owa""" +84 35 negative_sampler """basic""" +84 35 evaluator """rankbased""" +84 36 dataset """kinships""" +84 36 model """conve""" +84 36 loss """nssa""" +84 36 regularizer """no""" +84 36 optimizer """adam""" +84 36 training_loop """owa""" +84 36 negative_sampler """basic""" +84 36 evaluator """rankbased""" +84 37 dataset """kinships""" +84 37 model """conve""" +84 37 loss """nssa""" +84 37 regularizer """no""" +84 37 optimizer """adam""" +84 37 training_loop """owa""" +84 37 negative_sampler """basic""" +84 37 evaluator """rankbased""" +84 38 dataset """kinships""" +84 38 model """conve""" +84 38 loss """nssa""" +84 38 regularizer """no""" +84 38 optimizer """adam""" +84 38 training_loop """owa""" +84 38 negative_sampler """basic""" +84 38 evaluator """rankbased""" +84 39 dataset """kinships""" +84 39 model """conve""" +84 39 loss """nssa""" +84 39 regularizer """no""" +84 39 optimizer """adam""" +84 39 training_loop """owa""" +84 39 negative_sampler """basic""" +84 39 evaluator """rankbased""" +84 40 dataset """kinships""" +84 40 model """conve""" +84 40 loss """nssa""" +84 40 regularizer """no""" +84 40 optimizer """adam""" +84 40 training_loop """owa""" +84 40 negative_sampler """basic""" +84 40 evaluator """rankbased""" +84 41 dataset """kinships""" +84 41 model """conve""" +84 41 loss """nssa""" +84 41 regularizer """no""" +84 41 optimizer """adam""" +84 41 training_loop """owa""" +84 41 negative_sampler """basic""" +84 41 evaluator """rankbased""" +84 42 dataset """kinships""" +84 42 model """conve""" +84 42 loss """nssa""" +84 42 regularizer """no""" +84 42 optimizer """adam""" +84 42 training_loop """owa""" +84 42 negative_sampler """basic""" +84 42 evaluator """rankbased""" +84 43 dataset """kinships""" +84 43 model """conve""" +84 43 loss """nssa""" +84 43 regularizer """no""" +84 43 optimizer """adam""" +84 43 training_loop """owa""" +84 43 negative_sampler """basic""" +84 43 evaluator """rankbased""" +84 44 dataset """kinships""" +84 44 model """conve""" +84 44 loss """nssa""" +84 44 regularizer """no""" +84 44 optimizer """adam""" +84 44 training_loop """owa""" +84 44 negative_sampler """basic""" +84 44 evaluator """rankbased""" +84 45 dataset """kinships""" +84 45 model """conve""" +84 45 loss """nssa""" +84 45 regularizer """no""" +84 45 optimizer """adam""" +84 45 training_loop """owa""" +84 45 negative_sampler """basic""" +84 45 evaluator """rankbased""" +84 46 dataset """kinships""" +84 46 model """conve""" +84 46 loss """nssa""" +84 46 regularizer """no""" +84 46 optimizer """adam""" +84 46 training_loop """owa""" +84 46 negative_sampler """basic""" +84 46 evaluator """rankbased""" +84 47 dataset """kinships""" +84 47 model """conve""" +84 47 loss """nssa""" +84 47 regularizer """no""" +84 47 optimizer """adam""" +84 47 training_loop """owa""" +84 47 negative_sampler """basic""" +84 47 evaluator """rankbased""" +84 48 dataset """kinships""" +84 48 model """conve""" +84 48 loss """nssa""" +84 48 regularizer """no""" +84 48 optimizer """adam""" +84 48 training_loop """owa""" +84 48 negative_sampler """basic""" +84 48 evaluator """rankbased""" +84 49 dataset """kinships""" +84 49 model """conve""" +84 49 loss """nssa""" +84 49 regularizer """no""" +84 49 optimizer """adam""" +84 49 training_loop """owa""" +84 49 negative_sampler """basic""" +84 49 evaluator """rankbased""" +84 50 dataset """kinships""" +84 50 model """conve""" +84 50 loss """nssa""" +84 50 regularizer """no""" +84 50 optimizer """adam""" +84 50 training_loop """owa""" +84 50 negative_sampler """basic""" +84 50 evaluator """rankbased""" +84 51 dataset """kinships""" +84 51 model """conve""" +84 51 loss """nssa""" +84 51 regularizer """no""" +84 51 optimizer """adam""" +84 51 training_loop """owa""" +84 51 negative_sampler """basic""" +84 51 evaluator """rankbased""" +84 52 dataset """kinships""" +84 52 model """conve""" +84 52 loss """nssa""" +84 52 regularizer """no""" +84 52 optimizer """adam""" +84 52 training_loop """owa""" +84 52 negative_sampler """basic""" +84 52 evaluator """rankbased""" +84 53 dataset """kinships""" +84 53 model """conve""" +84 53 loss """nssa""" +84 53 regularizer """no""" +84 53 optimizer """adam""" +84 53 training_loop """owa""" +84 53 negative_sampler """basic""" +84 53 evaluator """rankbased""" +84 54 dataset """kinships""" +84 54 model """conve""" +84 54 loss """nssa""" +84 54 regularizer """no""" +84 54 optimizer """adam""" +84 54 training_loop """owa""" +84 54 negative_sampler """basic""" +84 54 evaluator """rankbased""" +84 55 dataset """kinships""" +84 55 model """conve""" +84 55 loss """nssa""" +84 55 regularizer """no""" +84 55 optimizer """adam""" +84 55 training_loop """owa""" +84 55 negative_sampler """basic""" +84 55 evaluator """rankbased""" +84 56 dataset """kinships""" +84 56 model """conve""" +84 56 loss """nssa""" +84 56 regularizer """no""" +84 56 optimizer """adam""" +84 56 training_loop """owa""" +84 56 negative_sampler """basic""" +84 56 evaluator """rankbased""" +84 57 dataset """kinships""" +84 57 model """conve""" +84 57 loss """nssa""" +84 57 regularizer """no""" +84 57 optimizer """adam""" +84 57 training_loop """owa""" +84 57 negative_sampler """basic""" +84 57 evaluator """rankbased""" +84 58 dataset """kinships""" +84 58 model """conve""" +84 58 loss """nssa""" +84 58 regularizer """no""" +84 58 optimizer """adam""" +84 58 training_loop """owa""" +84 58 negative_sampler """basic""" +84 58 evaluator """rankbased""" +84 59 dataset """kinships""" +84 59 model """conve""" +84 59 loss """nssa""" +84 59 regularizer """no""" +84 59 optimizer """adam""" +84 59 training_loop """owa""" +84 59 negative_sampler """basic""" +84 59 evaluator """rankbased""" +84 60 dataset """kinships""" +84 60 model """conve""" +84 60 loss """nssa""" +84 60 regularizer """no""" +84 60 optimizer """adam""" +84 60 training_loop """owa""" +84 60 negative_sampler """basic""" +84 60 evaluator """rankbased""" +84 61 dataset """kinships""" +84 61 model """conve""" +84 61 loss """nssa""" +84 61 regularizer """no""" +84 61 optimizer """adam""" +84 61 training_loop """owa""" +84 61 negative_sampler """basic""" +84 61 evaluator """rankbased""" +84 62 dataset """kinships""" +84 62 model """conve""" +84 62 loss """nssa""" +84 62 regularizer """no""" +84 62 optimizer """adam""" +84 62 training_loop """owa""" +84 62 negative_sampler """basic""" +84 62 evaluator """rankbased""" +84 63 dataset """kinships""" +84 63 model """conve""" +84 63 loss """nssa""" +84 63 regularizer """no""" +84 63 optimizer """adam""" +84 63 training_loop """owa""" +84 63 negative_sampler """basic""" +84 63 evaluator """rankbased""" +84 64 dataset """kinships""" +84 64 model """conve""" +84 64 loss """nssa""" +84 64 regularizer """no""" +84 64 optimizer """adam""" +84 64 training_loop """owa""" +84 64 negative_sampler """basic""" +84 64 evaluator """rankbased""" +84 65 dataset """kinships""" +84 65 model """conve""" +84 65 loss """nssa""" +84 65 regularizer """no""" +84 65 optimizer """adam""" +84 65 training_loop """owa""" +84 65 negative_sampler """basic""" +84 65 evaluator """rankbased""" +84 66 dataset """kinships""" +84 66 model """conve""" +84 66 loss """nssa""" +84 66 regularizer """no""" +84 66 optimizer """adam""" +84 66 training_loop """owa""" +84 66 negative_sampler """basic""" +84 66 evaluator """rankbased""" +84 67 dataset """kinships""" +84 67 model """conve""" +84 67 loss """nssa""" +84 67 regularizer """no""" +84 67 optimizer """adam""" +84 67 training_loop """owa""" +84 67 negative_sampler """basic""" +84 67 evaluator """rankbased""" +84 68 dataset """kinships""" +84 68 model """conve""" +84 68 loss """nssa""" +84 68 regularizer """no""" +84 68 optimizer """adam""" +84 68 training_loop """owa""" +84 68 negative_sampler """basic""" +84 68 evaluator """rankbased""" +84 69 dataset """kinships""" +84 69 model """conve""" +84 69 loss """nssa""" +84 69 regularizer """no""" +84 69 optimizer """adam""" +84 69 training_loop """owa""" +84 69 negative_sampler """basic""" +84 69 evaluator """rankbased""" +84 70 dataset """kinships""" +84 70 model """conve""" +84 70 loss """nssa""" +84 70 regularizer """no""" +84 70 optimizer """adam""" +84 70 training_loop """owa""" +84 70 negative_sampler """basic""" +84 70 evaluator """rankbased""" +84 71 dataset """kinships""" +84 71 model """conve""" +84 71 loss """nssa""" +84 71 regularizer """no""" +84 71 optimizer """adam""" +84 71 training_loop """owa""" +84 71 negative_sampler """basic""" +84 71 evaluator """rankbased""" +84 72 dataset """kinships""" +84 72 model """conve""" +84 72 loss """nssa""" +84 72 regularizer """no""" +84 72 optimizer """adam""" +84 72 training_loop """owa""" +84 72 negative_sampler """basic""" +84 72 evaluator """rankbased""" +85 1 model.output_channels 35.0 +85 1 model.input_dropout 0.4292949934659044 +85 1 model.output_dropout 0.3023493860283794 +85 1 model.feature_map_dropout 0.26859269493638743 +85 1 model.embedding_dim 0.0 +85 1 loss.margin 15.594882746838229 +85 1 loss.adversarial_temperature 0.7908853143612029 +85 1 optimizer.lr 0.0018938463691316875 +85 1 negative_sampler.num_negs_per_pos 87.0 +85 1 training.batch_size 0.0 +85 2 model.output_channels 25.0 +85 2 model.input_dropout 0.227778605086987 +85 2 model.output_dropout 0.3091246540897716 +85 2 model.feature_map_dropout 0.49463037319366043 +85 2 model.embedding_dim 2.0 +85 2 loss.margin 1.7615170096526325 +85 2 loss.adversarial_temperature 0.880010528510002 +85 2 optimizer.lr 0.004447672055364934 +85 2 negative_sampler.num_negs_per_pos 49.0 +85 2 training.batch_size 1.0 +85 3 model.output_channels 31.0 +85 3 model.input_dropout 0.2576925979580651 +85 3 model.output_dropout 0.2860967476326186 +85 3 model.feature_map_dropout 0.13662058105612895 +85 3 model.embedding_dim 0.0 +85 3 loss.margin 25.145163230397266 +85 3 loss.adversarial_temperature 0.6414647694459795 +85 3 optimizer.lr 0.028729964353997654 +85 3 negative_sampler.num_negs_per_pos 39.0 +85 3 training.batch_size 1.0 +85 4 model.output_channels 31.0 +85 4 model.input_dropout 0.13345224095887565 +85 4 model.output_dropout 0.0271860111328725 +85 4 model.feature_map_dropout 0.0868631335624494 +85 4 model.embedding_dim 1.0 +85 4 loss.margin 5.994646043659025 +85 4 loss.adversarial_temperature 0.9381531896420281 +85 4 optimizer.lr 0.002436264884962052 +85 4 negative_sampler.num_negs_per_pos 80.0 +85 4 training.batch_size 0.0 +85 5 model.output_channels 61.0 +85 5 model.input_dropout 0.08705621594997814 +85 5 model.output_dropout 0.15044293344095422 +85 5 model.feature_map_dropout 0.11033327871635112 +85 5 model.embedding_dim 1.0 +85 5 loss.margin 15.975842049844557 +85 5 loss.adversarial_temperature 0.4699392905418345 +85 5 optimizer.lr 0.032371409734430694 +85 5 negative_sampler.num_negs_per_pos 10.0 +85 5 training.batch_size 0.0 +85 6 model.output_channels 44.0 +85 6 model.input_dropout 0.40232181909129233 +85 6 model.output_dropout 0.1155284736343749 +85 6 model.feature_map_dropout 0.4545614775228245 +85 6 model.embedding_dim 0.0 +85 6 loss.margin 15.7233832543898 +85 6 loss.adversarial_temperature 0.5518080424275513 +85 6 optimizer.lr 0.02762467416456095 +85 6 negative_sampler.num_negs_per_pos 96.0 +85 6 training.batch_size 0.0 +85 7 model.output_channels 47.0 +85 7 model.input_dropout 0.4047310232402231 +85 7 model.output_dropout 0.45187238288758413 +85 7 model.feature_map_dropout 0.05452055841534115 +85 7 model.embedding_dim 2.0 +85 7 loss.margin 8.229758503125304 +85 7 loss.adversarial_temperature 0.4738601850233227 +85 7 optimizer.lr 0.03532486617756329 +85 7 negative_sampler.num_negs_per_pos 56.0 +85 7 training.batch_size 1.0 +85 8 model.output_channels 51.0 +85 8 model.input_dropout 0.325700833879527 +85 8 model.output_dropout 0.14069032597041775 +85 8 model.feature_map_dropout 0.07747899283910165 +85 8 model.embedding_dim 2.0 +85 8 loss.margin 24.12745030441717 +85 8 loss.adversarial_temperature 0.20384145463802295 +85 8 optimizer.lr 0.015318640278579276 +85 8 negative_sampler.num_negs_per_pos 79.0 +85 8 training.batch_size 1.0 +85 9 model.output_channels 29.0 +85 9 model.input_dropout 0.1684622326048943 +85 9 model.output_dropout 0.13192017585484467 +85 9 model.feature_map_dropout 0.48886113919941027 +85 9 model.embedding_dim 0.0 +85 9 loss.margin 9.33219341947717 +85 9 loss.adversarial_temperature 0.7986801136741486 +85 9 optimizer.lr 0.08855276793941175 +85 9 negative_sampler.num_negs_per_pos 83.0 +85 9 training.batch_size 1.0 +85 10 model.output_channels 48.0 +85 10 model.input_dropout 0.41714322764222617 +85 10 model.output_dropout 0.4559186344154535 +85 10 model.feature_map_dropout 0.3381008043390648 +85 10 model.embedding_dim 2.0 +85 10 loss.margin 11.639541925141906 +85 10 loss.adversarial_temperature 0.10083159679452255 +85 10 optimizer.lr 0.005338753716637416 +85 10 negative_sampler.num_negs_per_pos 21.0 +85 10 training.batch_size 1.0 +85 11 model.output_channels 54.0 +85 11 model.input_dropout 0.4681169535958994 +85 11 model.output_dropout 0.3093975865781916 +85 11 model.feature_map_dropout 0.07253307829494182 +85 11 model.embedding_dim 2.0 +85 11 loss.margin 29.265135519572308 +85 11 loss.adversarial_temperature 0.2792287523602768 +85 11 optimizer.lr 0.002152101041633843 +85 11 negative_sampler.num_negs_per_pos 97.0 +85 11 training.batch_size 2.0 +85 12 model.output_channels 53.0 +85 12 model.input_dropout 0.2721865740130636 +85 12 model.output_dropout 0.09517438834932485 +85 12 model.feature_map_dropout 0.2365913683573171 +85 12 model.embedding_dim 1.0 +85 12 loss.margin 26.71833114583495 +85 12 loss.adversarial_temperature 0.19197832935774484 +85 12 optimizer.lr 0.05424644767020664 +85 12 negative_sampler.num_negs_per_pos 3.0 +85 12 training.batch_size 2.0 +85 13 model.output_channels 37.0 +85 13 model.input_dropout 0.2573159272407284 +85 13 model.output_dropout 0.43991756148678063 +85 13 model.feature_map_dropout 0.34166807981410197 +85 13 model.embedding_dim 1.0 +85 13 loss.margin 18.59192214612623 +85 13 loss.adversarial_temperature 0.22633739355772076 +85 13 optimizer.lr 0.01176683439214364 +85 13 negative_sampler.num_negs_per_pos 1.0 +85 13 training.batch_size 2.0 +85 14 model.output_channels 54.0 +85 14 model.input_dropout 0.4760253324921274 +85 14 model.output_dropout 0.030674079748493843 +85 14 model.feature_map_dropout 0.4184279946218459 +85 14 model.embedding_dim 2.0 +85 14 loss.margin 17.95082107911169 +85 14 loss.adversarial_temperature 0.9613549358694197 +85 14 optimizer.lr 0.009734430622805048 +85 14 negative_sampler.num_negs_per_pos 88.0 +85 14 training.batch_size 2.0 +85 15 model.output_channels 47.0 +85 15 model.input_dropout 0.08716499816514323 +85 15 model.output_dropout 0.4700871391258461 +85 15 model.feature_map_dropout 0.16723519097299083 +85 15 model.embedding_dim 2.0 +85 15 loss.margin 12.870966132843849 +85 15 loss.adversarial_temperature 0.98085704228683 +85 15 optimizer.lr 0.0016132391465623539 +85 15 negative_sampler.num_negs_per_pos 39.0 +85 15 training.batch_size 2.0 +85 16 model.output_channels 50.0 +85 16 model.input_dropout 0.1152956291772228 +85 16 model.output_dropout 0.3181708632091454 +85 16 model.feature_map_dropout 0.46398069315532886 +85 16 model.embedding_dim 0.0 +85 16 loss.margin 23.796910805228563 +85 16 loss.adversarial_temperature 0.7643985807166102 +85 16 optimizer.lr 0.010161903123162185 +85 16 negative_sampler.num_negs_per_pos 6.0 +85 16 training.batch_size 0.0 +85 17 model.output_channels 16.0 +85 17 model.input_dropout 0.3412885284667282 +85 17 model.output_dropout 0.29864685805063207 +85 17 model.feature_map_dropout 0.4250268904948293 +85 17 model.embedding_dim 1.0 +85 17 loss.margin 16.453341176121143 +85 17 loss.adversarial_temperature 0.5157259029613296 +85 17 optimizer.lr 0.0035219035194425192 +85 17 negative_sampler.num_negs_per_pos 96.0 +85 17 training.batch_size 0.0 +85 18 model.output_channels 35.0 +85 18 model.input_dropout 0.2985535472172301 +85 18 model.output_dropout 0.22267985411324193 +85 18 model.feature_map_dropout 0.4562269097587271 +85 18 model.embedding_dim 0.0 +85 18 loss.margin 19.75360256886196 +85 18 loss.adversarial_temperature 0.7726330465076402 +85 18 optimizer.lr 0.0030043796107542893 +85 18 negative_sampler.num_negs_per_pos 30.0 +85 18 training.batch_size 2.0 +85 19 model.output_channels 50.0 +85 19 model.input_dropout 0.11080161865457239 +85 19 model.output_dropout 0.13449144425020737 +85 19 model.feature_map_dropout 0.004410238206970896 +85 19 model.embedding_dim 1.0 +85 19 loss.margin 10.434075893302314 +85 19 loss.adversarial_temperature 0.6376471359199355 +85 19 optimizer.lr 0.03407320775872933 +85 19 negative_sampler.num_negs_per_pos 83.0 +85 19 training.batch_size 1.0 +85 20 model.output_channels 42.0 +85 20 model.input_dropout 0.09064824728691856 +85 20 model.output_dropout 0.31597066473638596 +85 20 model.feature_map_dropout 0.31653910932569584 +85 20 model.embedding_dim 1.0 +85 20 loss.margin 26.461917162438233 +85 20 loss.adversarial_temperature 0.2392683271452485 +85 20 optimizer.lr 0.0997564180229602 +85 20 negative_sampler.num_negs_per_pos 77.0 +85 20 training.batch_size 2.0 +85 21 model.output_channels 49.0 +85 21 model.input_dropout 0.18864542540888501 +85 21 model.output_dropout 0.38015346961462243 +85 21 model.feature_map_dropout 0.4966488947671122 +85 21 model.embedding_dim 1.0 +85 21 loss.margin 20.14610381006304 +85 21 loss.adversarial_temperature 0.7898988210058068 +85 21 optimizer.lr 0.0012564940288649843 +85 21 negative_sampler.num_negs_per_pos 83.0 +85 21 training.batch_size 2.0 +85 22 model.output_channels 21.0 +85 22 model.input_dropout 0.38081593330918634 +85 22 model.output_dropout 0.06027627479381292 +85 22 model.feature_map_dropout 0.03983094791655728 +85 22 model.embedding_dim 0.0 +85 22 loss.margin 1.7823452961700785 +85 22 loss.adversarial_temperature 0.6819148799167589 +85 22 optimizer.lr 0.07544652291427228 +85 22 negative_sampler.num_negs_per_pos 77.0 +85 22 training.batch_size 1.0 +85 23 model.output_channels 56.0 +85 23 model.input_dropout 0.0015704401872869145 +85 23 model.output_dropout 0.20093537056522975 +85 23 model.feature_map_dropout 0.4591763712124265 +85 23 model.embedding_dim 0.0 +85 23 loss.margin 20.985721838632237 +85 23 loss.adversarial_temperature 0.31418920180271787 +85 23 optimizer.lr 0.004273378874090643 +85 23 negative_sampler.num_negs_per_pos 67.0 +85 23 training.batch_size 0.0 +85 24 model.output_channels 28.0 +85 24 model.input_dropout 0.10783761347446419 +85 24 model.output_dropout 0.49133116762585566 +85 24 model.feature_map_dropout 0.21189720081257446 +85 24 model.embedding_dim 2.0 +85 24 loss.margin 3.5468922276332 +85 24 loss.adversarial_temperature 0.11687365737761528 +85 24 optimizer.lr 0.025838466544373054 +85 24 negative_sampler.num_negs_per_pos 52.0 +85 24 training.batch_size 2.0 +85 25 model.output_channels 62.0 +85 25 model.input_dropout 0.008154041474016493 +85 25 model.output_dropout 0.32128894942060754 +85 25 model.feature_map_dropout 0.27872185290002016 +85 25 model.embedding_dim 1.0 +85 25 loss.margin 10.739571661495683 +85 25 loss.adversarial_temperature 0.5374381643128905 +85 25 optimizer.lr 0.0029180814490458416 +85 25 negative_sampler.num_negs_per_pos 57.0 +85 25 training.batch_size 1.0 +85 26 model.output_channels 34.0 +85 26 model.input_dropout 0.0858017973806049 +85 26 model.output_dropout 0.2086878089254005 +85 26 model.feature_map_dropout 0.45601491613411566 +85 26 model.embedding_dim 2.0 +85 26 loss.margin 6.23543557167145 +85 26 loss.adversarial_temperature 0.4664475127725428 +85 26 optimizer.lr 0.0018191282487962667 +85 26 negative_sampler.num_negs_per_pos 36.0 +85 26 training.batch_size 0.0 +85 27 model.output_channels 39.0 +85 27 model.input_dropout 0.352206168872858 +85 27 model.output_dropout 0.1214245443375006 +85 27 model.feature_map_dropout 0.058180751235767825 +85 27 model.embedding_dim 0.0 +85 27 loss.margin 5.302686309560817 +85 27 loss.adversarial_temperature 0.2928043588206799 +85 27 optimizer.lr 0.0661964690546559 +85 27 negative_sampler.num_negs_per_pos 10.0 +85 27 training.batch_size 0.0 +85 28 model.output_channels 30.0 +85 28 model.input_dropout 0.2956327082063792 +85 28 model.output_dropout 0.26699750857634774 +85 28 model.feature_map_dropout 0.24541122536784138 +85 28 model.embedding_dim 2.0 +85 28 loss.margin 6.163406177615772 +85 28 loss.adversarial_temperature 0.21121634152666635 +85 28 optimizer.lr 0.00808390053307633 +85 28 negative_sampler.num_negs_per_pos 78.0 +85 28 training.batch_size 2.0 +85 29 model.output_channels 44.0 +85 29 model.input_dropout 0.2956411237131204 +85 29 model.output_dropout 0.3367219800242818 +85 29 model.feature_map_dropout 0.20743350059265292 +85 29 model.embedding_dim 2.0 +85 29 loss.margin 18.18954366570077 +85 29 loss.adversarial_temperature 0.9086404823350974 +85 29 optimizer.lr 0.0029340960412003714 +85 29 negative_sampler.num_negs_per_pos 45.0 +85 29 training.batch_size 0.0 +85 30 model.output_channels 19.0 +85 30 model.input_dropout 0.3382031777541296 +85 30 model.output_dropout 0.16942312301431328 +85 30 model.feature_map_dropout 0.4217267708410573 +85 30 model.embedding_dim 0.0 +85 30 loss.margin 29.150563650503198 +85 30 loss.adversarial_temperature 0.3479860941476452 +85 30 optimizer.lr 0.007587051657885528 +85 30 negative_sampler.num_negs_per_pos 16.0 +85 30 training.batch_size 0.0 +85 31 model.output_channels 59.0 +85 31 model.input_dropout 0.4837575761700023 +85 31 model.output_dropout 0.1294898168674044 +85 31 model.feature_map_dropout 0.1060928392900834 +85 31 model.embedding_dim 2.0 +85 31 loss.margin 29.84575281811866 +85 31 loss.adversarial_temperature 0.8884862027977187 +85 31 optimizer.lr 0.09083647701257544 +85 31 negative_sampler.num_negs_per_pos 7.0 +85 31 training.batch_size 2.0 +85 32 model.output_channels 32.0 +85 32 model.input_dropout 0.31993493192440636 +85 32 model.output_dropout 0.19856159627732906 +85 32 model.feature_map_dropout 0.22501799575411624 +85 32 model.embedding_dim 2.0 +85 32 loss.margin 23.180535117307492 +85 32 loss.adversarial_temperature 0.38430818015825163 +85 32 optimizer.lr 0.0016804673664548516 +85 32 negative_sampler.num_negs_per_pos 71.0 +85 32 training.batch_size 2.0 +85 33 model.output_channels 44.0 +85 33 model.input_dropout 0.08254576091356342 +85 33 model.output_dropout 0.441686110148533 +85 33 model.feature_map_dropout 0.4175285824013822 +85 33 model.embedding_dim 0.0 +85 33 loss.margin 17.13770087911686 +85 33 loss.adversarial_temperature 0.8084840118044374 +85 33 optimizer.lr 0.0017166965255024883 +85 33 negative_sampler.num_negs_per_pos 23.0 +85 33 training.batch_size 2.0 +85 34 model.output_channels 52.0 +85 34 model.input_dropout 0.18189883899369635 +85 34 model.output_dropout 0.45626748311810617 +85 34 model.feature_map_dropout 0.05894724243533017 +85 34 model.embedding_dim 2.0 +85 34 loss.margin 20.034745812164648 +85 34 loss.adversarial_temperature 0.41308998953443543 +85 34 optimizer.lr 0.00631053470301162 +85 34 negative_sampler.num_negs_per_pos 87.0 +85 34 training.batch_size 1.0 +85 35 model.output_channels 39.0 +85 35 model.input_dropout 0.4478551614407765 +85 35 model.output_dropout 0.35989793600444003 +85 35 model.feature_map_dropout 0.10358953823139955 +85 35 model.embedding_dim 1.0 +85 35 loss.margin 19.094372065339464 +85 35 loss.adversarial_temperature 0.2027747072092661 +85 35 optimizer.lr 0.025942503084342827 +85 35 negative_sampler.num_negs_per_pos 26.0 +85 35 training.batch_size 0.0 +85 36 model.output_channels 49.0 +85 36 model.input_dropout 0.2739965914645573 +85 36 model.output_dropout 0.28962872074245816 +85 36 model.feature_map_dropout 0.28952227334767655 +85 36 model.embedding_dim 1.0 +85 36 loss.margin 5.918963957719305 +85 36 loss.adversarial_temperature 0.21147132255915305 +85 36 optimizer.lr 0.0026092453530356975 +85 36 negative_sampler.num_negs_per_pos 87.0 +85 36 training.batch_size 1.0 +85 37 model.output_channels 61.0 +85 37 model.input_dropout 0.11863642538936009 +85 37 model.output_dropout 0.4239396082663933 +85 37 model.feature_map_dropout 0.48668530139944427 +85 37 model.embedding_dim 2.0 +85 37 loss.margin 14.082334379078954 +85 37 loss.adversarial_temperature 0.7628755238445216 +85 37 optimizer.lr 0.0010395520154547742 +85 37 negative_sampler.num_negs_per_pos 36.0 +85 37 training.batch_size 0.0 +85 38 model.output_channels 43.0 +85 38 model.input_dropout 0.45959778850415384 +85 38 model.output_dropout 0.48972134147713536 +85 38 model.feature_map_dropout 0.1157954011160372 +85 38 model.embedding_dim 1.0 +85 38 loss.margin 23.472928068472378 +85 38 loss.adversarial_temperature 0.32682954636969885 +85 38 optimizer.lr 0.02150230670682995 +85 38 negative_sampler.num_negs_per_pos 38.0 +85 38 training.batch_size 1.0 +85 39 model.output_channels 57.0 +85 39 model.input_dropout 0.14392547603554062 +85 39 model.output_dropout 0.1778104998717136 +85 39 model.feature_map_dropout 0.21351034528928714 +85 39 model.embedding_dim 0.0 +85 39 loss.margin 29.07541175654928 +85 39 loss.adversarial_temperature 0.7145461350618898 +85 39 optimizer.lr 0.0025358572020218193 +85 39 negative_sampler.num_negs_per_pos 67.0 +85 39 training.batch_size 1.0 +85 40 model.output_channels 44.0 +85 40 model.input_dropout 0.000863772921934125 +85 40 model.output_dropout 0.342265592053201 +85 40 model.feature_map_dropout 0.36310957286871937 +85 40 model.embedding_dim 0.0 +85 40 loss.margin 11.133282907755392 +85 40 loss.adversarial_temperature 0.44567353498490003 +85 40 optimizer.lr 0.014222243264671744 +85 40 negative_sampler.num_negs_per_pos 71.0 +85 40 training.batch_size 0.0 +85 41 model.output_channels 17.0 +85 41 model.input_dropout 0.30091041613121955 +85 41 model.output_dropout 0.21773302588190574 +85 41 model.feature_map_dropout 0.3045752803732632 +85 41 model.embedding_dim 2.0 +85 41 loss.margin 6.003490603888049 +85 41 loss.adversarial_temperature 0.5487029903434988 +85 41 optimizer.lr 0.00652768316436784 +85 41 negative_sampler.num_negs_per_pos 24.0 +85 41 training.batch_size 1.0 +85 42 model.output_channels 22.0 +85 42 model.input_dropout 0.04896820849224959 +85 42 model.output_dropout 0.20973066410606195 +85 42 model.feature_map_dropout 0.19240711934792626 +85 42 model.embedding_dim 2.0 +85 42 loss.margin 17.230500893208774 +85 42 loss.adversarial_temperature 0.11277609687845293 +85 42 optimizer.lr 0.05693958014134784 +85 42 negative_sampler.num_negs_per_pos 93.0 +85 42 training.batch_size 2.0 +85 43 model.output_channels 35.0 +85 43 model.input_dropout 0.018312723105444795 +85 43 model.output_dropout 0.28211509465704476 +85 43 model.feature_map_dropout 0.3249532325559623 +85 43 model.embedding_dim 1.0 +85 43 loss.margin 19.45105455584476 +85 43 loss.adversarial_temperature 0.1866947511132866 +85 43 optimizer.lr 0.011751463220148209 +85 43 negative_sampler.num_negs_per_pos 54.0 +85 43 training.batch_size 2.0 +85 44 model.output_channels 57.0 +85 44 model.input_dropout 0.37132855733591125 +85 44 model.output_dropout 0.3749967194451001 +85 44 model.feature_map_dropout 0.3106985985667104 +85 44 model.embedding_dim 1.0 +85 44 loss.margin 2.4939394699251762 +85 44 loss.adversarial_temperature 0.9254999000123494 +85 44 optimizer.lr 0.027518274881879724 +85 44 negative_sampler.num_negs_per_pos 94.0 +85 44 training.batch_size 2.0 +85 45 model.output_channels 45.0 +85 45 model.input_dropout 0.11563152452735664 +85 45 model.output_dropout 0.24931863926743453 +85 45 model.feature_map_dropout 0.021934011932192743 +85 45 model.embedding_dim 0.0 +85 45 loss.margin 21.664234613369175 +85 45 loss.adversarial_temperature 0.6849071421718962 +85 45 optimizer.lr 0.002855171694168727 +85 45 negative_sampler.num_negs_per_pos 78.0 +85 45 training.batch_size 0.0 +85 46 model.output_channels 42.0 +85 46 model.input_dropout 0.3343767968787569 +85 46 model.output_dropout 0.08533210952431447 +85 46 model.feature_map_dropout 0.0643573762870629 +85 46 model.embedding_dim 2.0 +85 46 loss.margin 16.018302175008685 +85 46 loss.adversarial_temperature 0.603056139721308 +85 46 optimizer.lr 0.001789254781242665 +85 46 negative_sampler.num_negs_per_pos 15.0 +85 46 training.batch_size 2.0 +85 47 model.output_channels 16.0 +85 47 model.input_dropout 0.39727455226238445 +85 47 model.output_dropout 0.036957703028634936 +85 47 model.feature_map_dropout 0.4299964029784771 +85 47 model.embedding_dim 1.0 +85 47 loss.margin 3.428967024651709 +85 47 loss.adversarial_temperature 0.47774075255161996 +85 47 optimizer.lr 0.029778889449389598 +85 47 negative_sampler.num_negs_per_pos 39.0 +85 47 training.batch_size 1.0 +85 48 model.output_channels 45.0 +85 48 model.input_dropout 0.04955215407514374 +85 48 model.output_dropout 0.3899978768574536 +85 48 model.feature_map_dropout 0.3989948050223676 +85 48 model.embedding_dim 2.0 +85 48 loss.margin 18.47267862733143 +85 48 loss.adversarial_temperature 0.4712919021371512 +85 48 optimizer.lr 0.004389750681317657 +85 48 negative_sampler.num_negs_per_pos 89.0 +85 48 training.batch_size 1.0 +85 49 model.output_channels 39.0 +85 49 model.input_dropout 0.4810279133052709 +85 49 model.output_dropout 0.4926194332223777 +85 49 model.feature_map_dropout 0.47357324920002647 +85 49 model.embedding_dim 1.0 +85 49 loss.margin 23.095218166931485 +85 49 loss.adversarial_temperature 0.5645750622913176 +85 49 optimizer.lr 0.009928654830945433 +85 49 negative_sampler.num_negs_per_pos 96.0 +85 49 training.batch_size 0.0 +85 50 model.output_channels 55.0 +85 50 model.input_dropout 0.4663607272494706 +85 50 model.output_dropout 0.4181897925509201 +85 50 model.feature_map_dropout 0.15352405342260228 +85 50 model.embedding_dim 1.0 +85 50 loss.margin 29.775284292845438 +85 50 loss.adversarial_temperature 0.5764341961219593 +85 50 optimizer.lr 0.0029232383021076604 +85 50 negative_sampler.num_negs_per_pos 21.0 +85 50 training.batch_size 1.0 +85 51 model.output_channels 18.0 +85 51 model.input_dropout 0.3408011917841946 +85 51 model.output_dropout 0.38119085098799543 +85 51 model.feature_map_dropout 0.25586941712480293 +85 51 model.embedding_dim 0.0 +85 51 loss.margin 5.371863724160427 +85 51 loss.adversarial_temperature 0.5064116178715292 +85 51 optimizer.lr 0.00244319472111076 +85 51 negative_sampler.num_negs_per_pos 13.0 +85 51 training.batch_size 0.0 +85 52 model.output_channels 37.0 +85 52 model.input_dropout 0.23816213560162336 +85 52 model.output_dropout 0.27514104855637955 +85 52 model.feature_map_dropout 0.4735434276291993 +85 52 model.embedding_dim 2.0 +85 52 loss.margin 9.684262495184162 +85 52 loss.adversarial_temperature 0.9572298242735018 +85 52 optimizer.lr 0.025346784005227 +85 52 negative_sampler.num_negs_per_pos 15.0 +85 52 training.batch_size 2.0 +85 53 model.output_channels 40.0 +85 53 model.input_dropout 0.34791849433187033 +85 53 model.output_dropout 0.012386698395497553 +85 53 model.feature_map_dropout 0.24774143876551125 +85 53 model.embedding_dim 1.0 +85 53 loss.margin 1.9092002158722432 +85 53 loss.adversarial_temperature 0.9801063373567462 +85 53 optimizer.lr 0.00264324364855602 +85 53 negative_sampler.num_negs_per_pos 23.0 +85 53 training.batch_size 0.0 +85 54 model.output_channels 28.0 +85 54 model.input_dropout 0.22370344360024902 +85 54 model.output_dropout 0.303140155700619 +85 54 model.feature_map_dropout 0.46680275880849725 +85 54 model.embedding_dim 2.0 +85 54 loss.margin 20.376103830248688 +85 54 loss.adversarial_temperature 0.7597606083096596 +85 54 optimizer.lr 0.015834340801951126 +85 54 negative_sampler.num_negs_per_pos 19.0 +85 54 training.batch_size 1.0 +85 55 model.output_channels 43.0 +85 55 model.input_dropout 0.1552541824586413 +85 55 model.output_dropout 0.1533798970626331 +85 55 model.feature_map_dropout 0.3708169614233268 +85 55 model.embedding_dim 2.0 +85 55 loss.margin 23.996517786013655 +85 55 loss.adversarial_temperature 0.4054617810400446 +85 55 optimizer.lr 0.0013353902010397364 +85 55 negative_sampler.num_negs_per_pos 41.0 +85 55 training.batch_size 2.0 +85 56 model.output_channels 57.0 +85 56 model.input_dropout 0.10705799160495982 +85 56 model.output_dropout 0.2262361458358721 +85 56 model.feature_map_dropout 0.2574419919303572 +85 56 model.embedding_dim 2.0 +85 56 loss.margin 5.440913671787689 +85 56 loss.adversarial_temperature 0.9025228194125908 +85 56 optimizer.lr 0.013457751140890357 +85 56 negative_sampler.num_negs_per_pos 0.0 +85 56 training.batch_size 0.0 +85 57 model.output_channels 35.0 +85 57 model.input_dropout 0.44647255095402605 +85 57 model.output_dropout 0.2806669809054243 +85 57 model.feature_map_dropout 0.4172142423948499 +85 57 model.embedding_dim 1.0 +85 57 loss.margin 20.536098123527626 +85 57 loss.adversarial_temperature 0.7224434719734952 +85 57 optimizer.lr 0.021125834621665143 +85 57 negative_sampler.num_negs_per_pos 20.0 +85 57 training.batch_size 1.0 +85 58 model.output_channels 50.0 +85 58 model.input_dropout 0.33130028887967805 +85 58 model.output_dropout 0.38286269614231166 +85 58 model.feature_map_dropout 0.2018279100306859 +85 58 model.embedding_dim 0.0 +85 58 loss.margin 29.626146579987736 +85 58 loss.adversarial_temperature 0.7549264631516635 +85 58 optimizer.lr 0.008848635017930784 +85 58 negative_sampler.num_negs_per_pos 30.0 +85 58 training.batch_size 1.0 +85 59 model.output_channels 52.0 +85 59 model.input_dropout 0.08847532865977026 +85 59 model.output_dropout 0.4676452115686831 +85 59 model.feature_map_dropout 0.37177338309554764 +85 59 model.embedding_dim 0.0 +85 59 loss.margin 23.62724120483368 +85 59 loss.adversarial_temperature 0.1088942041892861 +85 59 optimizer.lr 0.00564027851981763 +85 59 negative_sampler.num_negs_per_pos 12.0 +85 59 training.batch_size 0.0 +85 60 model.output_channels 60.0 +85 60 model.input_dropout 0.13564284955576794 +85 60 model.output_dropout 0.30245531639790435 +85 60 model.feature_map_dropout 0.1409752267608146 +85 60 model.embedding_dim 2.0 +85 60 loss.margin 9.50617915597476 +85 60 loss.adversarial_temperature 0.18232172072025252 +85 60 optimizer.lr 0.07983836630707994 +85 60 negative_sampler.num_negs_per_pos 42.0 +85 60 training.batch_size 1.0 +85 61 model.output_channels 31.0 +85 61 model.input_dropout 0.22441627556479143 +85 61 model.output_dropout 0.021110985257805137 +85 61 model.feature_map_dropout 0.06506475872500633 +85 61 model.embedding_dim 2.0 +85 61 loss.margin 13.525528241534806 +85 61 loss.adversarial_temperature 0.7368194057177048 +85 61 optimizer.lr 0.001556198541284996 +85 61 negative_sampler.num_negs_per_pos 37.0 +85 61 training.batch_size 1.0 +85 62 model.output_channels 51.0 +85 62 model.input_dropout 0.358133242219274 +85 62 model.output_dropout 0.060084400089730094 +85 62 model.feature_map_dropout 0.3640049125971549 +85 62 model.embedding_dim 0.0 +85 62 loss.margin 2.1622283092748065 +85 62 loss.adversarial_temperature 0.9718511271575191 +85 62 optimizer.lr 0.004270635910428186 +85 62 negative_sampler.num_negs_per_pos 48.0 +85 62 training.batch_size 1.0 +85 63 model.output_channels 16.0 +85 63 model.input_dropout 0.09228693100019919 +85 63 model.output_dropout 0.07328408502077588 +85 63 model.feature_map_dropout 0.17008112786798513 +85 63 model.embedding_dim 2.0 +85 63 loss.margin 24.22475469826946 +85 63 loss.adversarial_temperature 0.9863565485384831 +85 63 optimizer.lr 0.05153241131411964 +85 63 negative_sampler.num_negs_per_pos 24.0 +85 63 training.batch_size 1.0 +85 64 model.output_channels 39.0 +85 64 model.input_dropout 0.09962433426974632 +85 64 model.output_dropout 0.3520304420148076 +85 64 model.feature_map_dropout 0.26203431320806225 +85 64 model.embedding_dim 2.0 +85 64 loss.margin 6.6938735664883104 +85 64 loss.adversarial_temperature 0.8257250103850039 +85 64 optimizer.lr 0.0014261284878152986 +85 64 negative_sampler.num_negs_per_pos 64.0 +85 64 training.batch_size 2.0 +85 65 model.output_channels 43.0 +85 65 model.input_dropout 0.13930448255153327 +85 65 model.output_dropout 0.1749461213714411 +85 65 model.feature_map_dropout 0.20865097283012807 +85 65 model.embedding_dim 1.0 +85 65 loss.margin 20.805525252836738 +85 65 loss.adversarial_temperature 0.26336763480311187 +85 65 optimizer.lr 0.002607439558408148 +85 65 negative_sampler.num_negs_per_pos 97.0 +85 65 training.batch_size 2.0 +85 66 model.output_channels 50.0 +85 66 model.input_dropout 0.15501979113359093 +85 66 model.output_dropout 0.07325602823755006 +85 66 model.feature_map_dropout 0.45798171751812744 +85 66 model.embedding_dim 1.0 +85 66 loss.margin 22.948396072000957 +85 66 loss.adversarial_temperature 0.39057195297598746 +85 66 optimizer.lr 0.0015750821070548257 +85 66 negative_sampler.num_negs_per_pos 41.0 +85 66 training.batch_size 0.0 +85 67 model.output_channels 17.0 +85 67 model.input_dropout 0.10624281122082974 +85 67 model.output_dropout 0.22583818304321457 +85 67 model.feature_map_dropout 0.3713202464135455 +85 67 model.embedding_dim 2.0 +85 67 loss.margin 11.080083280019688 +85 67 loss.adversarial_temperature 0.2555668509174003 +85 67 optimizer.lr 0.00311591938318739 +85 67 negative_sampler.num_negs_per_pos 14.0 +85 67 training.batch_size 0.0 +85 68 model.output_channels 52.0 +85 68 model.input_dropout 0.42662437631854383 +85 68 model.output_dropout 0.16108810084991088 +85 68 model.feature_map_dropout 0.04146321605732006 +85 68 model.embedding_dim 2.0 +85 68 loss.margin 22.73120586600945 +85 68 loss.adversarial_temperature 0.5443394883262624 +85 68 optimizer.lr 0.05154429108263617 +85 68 negative_sampler.num_negs_per_pos 45.0 +85 68 training.batch_size 1.0 +85 69 model.output_channels 20.0 +85 69 model.input_dropout 0.2860794363123973 +85 69 model.output_dropout 0.24872428513451733 +85 69 model.feature_map_dropout 0.18834410947093144 +85 69 model.embedding_dim 0.0 +85 69 loss.margin 7.614366845546549 +85 69 loss.adversarial_temperature 0.72008076762558 +85 69 optimizer.lr 0.09719245650806746 +85 69 negative_sampler.num_negs_per_pos 44.0 +85 69 training.batch_size 0.0 +85 70 model.output_channels 22.0 +85 70 model.input_dropout 0.24787947897060297 +85 70 model.output_dropout 0.3067860669036227 +85 70 model.feature_map_dropout 0.2295377935371457 +85 70 model.embedding_dim 1.0 +85 70 loss.margin 21.41928936725871 +85 70 loss.adversarial_temperature 0.30723626217050976 +85 70 optimizer.lr 0.0033595908650023007 +85 70 negative_sampler.num_negs_per_pos 95.0 +85 70 training.batch_size 1.0 +85 71 model.output_channels 48.0 +85 71 model.input_dropout 0.044234861050530705 +85 71 model.output_dropout 0.09060824028165088 +85 71 model.feature_map_dropout 0.4493787426467524 +85 71 model.embedding_dim 0.0 +85 71 loss.margin 20.489642793197945 +85 71 loss.adversarial_temperature 0.8856455399227983 +85 71 optimizer.lr 0.002384564241362298 +85 71 negative_sampler.num_negs_per_pos 94.0 +85 71 training.batch_size 0.0 +85 72 model.output_channels 37.0 +85 72 model.input_dropout 0.402690155782304 +85 72 model.output_dropout 0.16836938663729256 +85 72 model.feature_map_dropout 0.12188373077823822 +85 72 model.embedding_dim 0.0 +85 72 loss.margin 5.835909624555456 +85 72 loss.adversarial_temperature 0.34173206262358946 +85 72 optimizer.lr 0.006706885470800821 +85 72 negative_sampler.num_negs_per_pos 91.0 +85 72 training.batch_size 1.0 +85 73 model.output_channels 38.0 +85 73 model.input_dropout 0.07223692086200362 +85 73 model.output_dropout 0.06359975409790208 +85 73 model.feature_map_dropout 0.30402644943220525 +85 73 model.embedding_dim 2.0 +85 73 loss.margin 10.806115028276743 +85 73 loss.adversarial_temperature 0.6260270814416925 +85 73 optimizer.lr 0.0036073559293319808 +85 73 negative_sampler.num_negs_per_pos 64.0 +85 73 training.batch_size 0.0 +85 74 model.output_channels 40.0 +85 74 model.input_dropout 0.10075078953433969 +85 74 model.output_dropout 0.3078574730066544 +85 74 model.feature_map_dropout 0.13997592949437765 +85 74 model.embedding_dim 0.0 +85 74 loss.margin 18.336637548643864 +85 74 loss.adversarial_temperature 0.2585198097462027 +85 74 optimizer.lr 0.09293820508198467 +85 74 negative_sampler.num_negs_per_pos 50.0 +85 74 training.batch_size 0.0 +85 75 model.output_channels 24.0 +85 75 model.input_dropout 0.16868155146897312 +85 75 model.output_dropout 0.35330130751578803 +85 75 model.feature_map_dropout 0.13744404290693457 +85 75 model.embedding_dim 1.0 +85 75 loss.margin 10.435223691172341 +85 75 loss.adversarial_temperature 0.3514965598682882 +85 75 optimizer.lr 0.004449803926252457 +85 75 negative_sampler.num_negs_per_pos 12.0 +85 75 training.batch_size 2.0 +85 76 model.output_channels 62.0 +85 76 model.input_dropout 0.2551932990780046 +85 76 model.output_dropout 0.429928361275966 +85 76 model.feature_map_dropout 0.35791514686598386 +85 76 model.embedding_dim 1.0 +85 76 loss.margin 14.564264566559231 +85 76 loss.adversarial_temperature 0.27349946007974624 +85 76 optimizer.lr 0.0039469074210484265 +85 76 negative_sampler.num_negs_per_pos 44.0 +85 76 training.batch_size 0.0 +85 77 model.output_channels 20.0 +85 77 model.input_dropout 0.028034655942865416 +85 77 model.output_dropout 0.27952770370277114 +85 77 model.feature_map_dropout 0.14823448620032198 +85 77 model.embedding_dim 2.0 +85 77 loss.margin 27.235815273426518 +85 77 loss.adversarial_temperature 0.19269461194510695 +85 77 optimizer.lr 0.0028714793841375713 +85 77 negative_sampler.num_negs_per_pos 25.0 +85 77 training.batch_size 2.0 +85 78 model.output_channels 37.0 +85 78 model.input_dropout 0.38683340224552637 +85 78 model.output_dropout 0.14722373817290024 +85 78 model.feature_map_dropout 0.39682479923899994 +85 78 model.embedding_dim 0.0 +85 78 loss.margin 27.822611998393796 +85 78 loss.adversarial_temperature 0.9204007620759307 +85 78 optimizer.lr 0.01324375070471573 +85 78 negative_sampler.num_negs_per_pos 45.0 +85 78 training.batch_size 1.0 +85 79 model.output_channels 22.0 +85 79 model.input_dropout 0.40578688721311784 +85 79 model.output_dropout 0.3938724357729211 +85 79 model.feature_map_dropout 0.32856972808667273 +85 79 model.embedding_dim 1.0 +85 79 loss.margin 19.60816470530305 +85 79 loss.adversarial_temperature 0.8190752209320491 +85 79 optimizer.lr 0.0016709425795795388 +85 79 negative_sampler.num_negs_per_pos 64.0 +85 79 training.batch_size 2.0 +85 80 model.output_channels 50.0 +85 80 model.input_dropout 0.23273097112187535 +85 80 model.output_dropout 0.016187821537668068 +85 80 model.feature_map_dropout 0.11259226725131716 +85 80 model.embedding_dim 0.0 +85 80 loss.margin 23.13852009367224 +85 80 loss.adversarial_temperature 0.8988895810009535 +85 80 optimizer.lr 0.005825429943836882 +85 80 negative_sampler.num_negs_per_pos 19.0 +85 80 training.batch_size 2.0 +85 81 model.output_channels 21.0 +85 81 model.input_dropout 0.49862083907045873 +85 81 model.output_dropout 0.41959301589597764 +85 81 model.feature_map_dropout 0.2546388767046067 +85 81 model.embedding_dim 2.0 +85 81 loss.margin 27.98256648547782 +85 81 loss.adversarial_temperature 0.9708818255289066 +85 81 optimizer.lr 0.03759502421063883 +85 81 negative_sampler.num_negs_per_pos 30.0 +85 81 training.batch_size 0.0 +85 82 model.output_channels 44.0 +85 82 model.input_dropout 0.31212871907339196 +85 82 model.output_dropout 0.2606809371931875 +85 82 model.feature_map_dropout 0.14887660701222755 +85 82 model.embedding_dim 2.0 +85 82 loss.margin 17.099655989203697 +85 82 loss.adversarial_temperature 0.994913057155759 +85 82 optimizer.lr 0.03621194315022987 +85 82 negative_sampler.num_negs_per_pos 67.0 +85 82 training.batch_size 1.0 +85 83 model.output_channels 46.0 +85 83 model.input_dropout 0.49166356515764253 +85 83 model.output_dropout 0.16468372841618667 +85 83 model.feature_map_dropout 0.41988489496300296 +85 83 model.embedding_dim 0.0 +85 83 loss.margin 5.1263270169270045 +85 83 loss.adversarial_temperature 0.2915690378180893 +85 83 optimizer.lr 0.007077597673579844 +85 83 negative_sampler.num_negs_per_pos 51.0 +85 83 training.batch_size 1.0 +85 84 model.output_channels 37.0 +85 84 model.input_dropout 0.30935847124027266 +85 84 model.output_dropout 0.3005848471536776 +85 84 model.feature_map_dropout 0.17416451499966362 +85 84 model.embedding_dim 2.0 +85 84 loss.margin 7.934270598411159 +85 84 loss.adversarial_temperature 0.1934892835244349 +85 84 optimizer.lr 0.0537025020976019 +85 84 negative_sampler.num_negs_per_pos 84.0 +85 84 training.batch_size 2.0 +85 85 model.output_channels 56.0 +85 85 model.input_dropout 0.31741993238243443 +85 85 model.output_dropout 0.1861848969862226 +85 85 model.feature_map_dropout 0.07180191129061542 +85 85 model.embedding_dim 2.0 +85 85 loss.margin 14.875996729925202 +85 85 loss.adversarial_temperature 0.6796933874259182 +85 85 optimizer.lr 0.001331799198225886 +85 85 negative_sampler.num_negs_per_pos 78.0 +85 85 training.batch_size 1.0 +85 86 model.output_channels 51.0 +85 86 model.input_dropout 0.14992859621382398 +85 86 model.output_dropout 0.05498810267160642 +85 86 model.feature_map_dropout 0.15452294889685797 +85 86 model.embedding_dim 1.0 +85 86 loss.margin 5.326929966845538 +85 86 loss.adversarial_temperature 0.3784368784657183 +85 86 optimizer.lr 0.0017056347434655876 +85 86 negative_sampler.num_negs_per_pos 69.0 +85 86 training.batch_size 0.0 +85 87 model.output_channels 38.0 +85 87 model.input_dropout 0.031848591341088195 +85 87 model.output_dropout 0.16653708256250288 +85 87 model.feature_map_dropout 0.17445094300788216 +85 87 model.embedding_dim 2.0 +85 87 loss.margin 19.023361145279104 +85 87 loss.adversarial_temperature 0.49018403655767673 +85 87 optimizer.lr 0.0010989226116833756 +85 87 negative_sampler.num_negs_per_pos 67.0 +85 87 training.batch_size 1.0 +85 88 model.output_channels 24.0 +85 88 model.input_dropout 0.1725411148520794 +85 88 model.output_dropout 0.11756322748897485 +85 88 model.feature_map_dropout 0.030655397611363988 +85 88 model.embedding_dim 2.0 +85 88 loss.margin 3.8075534060961607 +85 88 loss.adversarial_temperature 0.7174586542404268 +85 88 optimizer.lr 0.0021214794336180884 +85 88 negative_sampler.num_negs_per_pos 13.0 +85 88 training.batch_size 1.0 +85 89 model.output_channels 53.0 +85 89 model.input_dropout 0.24738778128580946 +85 89 model.output_dropout 0.21680148927101772 +85 89 model.feature_map_dropout 0.3996726055683188 +85 89 model.embedding_dim 0.0 +85 89 loss.margin 17.469569434266457 +85 89 loss.adversarial_temperature 0.41334932589224394 +85 89 optimizer.lr 0.03817156352126047 +85 89 negative_sampler.num_negs_per_pos 88.0 +85 89 training.batch_size 0.0 +85 90 model.output_channels 62.0 +85 90 model.input_dropout 0.2850509668819131 +85 90 model.output_dropout 0.12230805450628107 +85 90 model.feature_map_dropout 0.435436142432538 +85 90 model.embedding_dim 2.0 +85 90 loss.margin 26.36403486352988 +85 90 loss.adversarial_temperature 0.13306140176488374 +85 90 optimizer.lr 0.014613788401276608 +85 90 negative_sampler.num_negs_per_pos 94.0 +85 90 training.batch_size 1.0 +85 91 model.output_channels 40.0 +85 91 model.input_dropout 0.3577554057133215 +85 91 model.output_dropout 0.23656212419134132 +85 91 model.feature_map_dropout 0.2392762886153149 +85 91 model.embedding_dim 1.0 +85 91 loss.margin 24.151909059915862 +85 91 loss.adversarial_temperature 0.4459372736436147 +85 91 optimizer.lr 0.012764577166521733 +85 91 negative_sampler.num_negs_per_pos 14.0 +85 91 training.batch_size 1.0 +85 92 model.output_channels 21.0 +85 92 model.input_dropout 0.03708626331464254 +85 92 model.output_dropout 0.24788219303989606 +85 92 model.feature_map_dropout 0.2874479664269769 +85 92 model.embedding_dim 2.0 +85 92 loss.margin 21.783025211042307 +85 92 loss.adversarial_temperature 0.9377649594900752 +85 92 optimizer.lr 0.0011141655506199685 +85 92 negative_sampler.num_negs_per_pos 86.0 +85 92 training.batch_size 1.0 +85 93 model.output_channels 63.0 +85 93 model.input_dropout 0.3587572368126851 +85 93 model.output_dropout 0.2424495355677162 +85 93 model.feature_map_dropout 0.4094271725171885 +85 93 model.embedding_dim 2.0 +85 93 loss.margin 12.990940879440414 +85 93 loss.adversarial_temperature 0.831088228184341 +85 93 optimizer.lr 0.001557176604312899 +85 93 negative_sampler.num_negs_per_pos 51.0 +85 93 training.batch_size 2.0 +85 94 model.output_channels 52.0 +85 94 model.input_dropout 0.24824445314422033 +85 94 model.output_dropout 0.4476988367202222 +85 94 model.feature_map_dropout 0.24324968677869907 +85 94 model.embedding_dim 0.0 +85 94 loss.margin 21.6136742632646 +85 94 loss.adversarial_temperature 0.8631887277311089 +85 94 optimizer.lr 0.0010716815771296222 +85 94 negative_sampler.num_negs_per_pos 57.0 +85 94 training.batch_size 1.0 +85 95 model.output_channels 46.0 +85 95 model.input_dropout 0.48721274289176814 +85 95 model.output_dropout 0.012295255937388117 +85 95 model.feature_map_dropout 0.29154447867767946 +85 95 model.embedding_dim 2.0 +85 95 loss.margin 10.826292948329009 +85 95 loss.adversarial_temperature 0.37825934779448067 +85 95 optimizer.lr 0.007962936682294565 +85 95 negative_sampler.num_negs_per_pos 54.0 +85 95 training.batch_size 2.0 +85 96 model.output_channels 63.0 +85 96 model.input_dropout 0.19761166013156578 +85 96 model.output_dropout 0.02432427028678963 +85 96 model.feature_map_dropout 0.36283064514656554 +85 96 model.embedding_dim 1.0 +85 96 loss.margin 28.994374913239977 +85 96 loss.adversarial_temperature 0.6123696522157074 +85 96 optimizer.lr 0.019709872362630984 +85 96 negative_sampler.num_negs_per_pos 13.0 +85 96 training.batch_size 0.0 +85 97 model.output_channels 57.0 +85 97 model.input_dropout 0.39821587278352416 +85 97 model.output_dropout 0.27798763823307 +85 97 model.feature_map_dropout 0.08903576259841628 +85 97 model.embedding_dim 1.0 +85 97 loss.margin 20.514624609599146 +85 97 loss.adversarial_temperature 0.9971588026782164 +85 97 optimizer.lr 0.0711561368473911 +85 97 negative_sampler.num_negs_per_pos 62.0 +85 97 training.batch_size 0.0 +85 98 model.output_channels 25.0 +85 98 model.input_dropout 0.29497596986316016 +85 98 model.output_dropout 0.4472748663984191 +85 98 model.feature_map_dropout 0.031967344680316534 +85 98 model.embedding_dim 2.0 +85 98 loss.margin 23.671461354968322 +85 98 loss.adversarial_temperature 0.10843450704142965 +85 98 optimizer.lr 0.0036768880651896437 +85 98 negative_sampler.num_negs_per_pos 2.0 +85 98 training.batch_size 0.0 +85 99 model.output_channels 61.0 +85 99 model.input_dropout 0.11753080249494036 +85 99 model.output_dropout 0.005111639873053575 +85 99 model.feature_map_dropout 0.3954328252577649 +85 99 model.embedding_dim 2.0 +85 99 loss.margin 4.987629071718288 +85 99 loss.adversarial_temperature 0.6831754185353677 +85 99 optimizer.lr 0.005721233712407807 +85 99 negative_sampler.num_negs_per_pos 74.0 +85 99 training.batch_size 0.0 +85 100 model.output_channels 55.0 +85 100 model.input_dropout 0.4018945408661435 +85 100 model.output_dropout 0.05164222727554574 +85 100 model.feature_map_dropout 0.24534489919967178 +85 100 model.embedding_dim 0.0 +85 100 loss.margin 1.0035687603202228 +85 100 loss.adversarial_temperature 0.7556944189634441 +85 100 optimizer.lr 0.0036574142431523545 +85 100 negative_sampler.num_negs_per_pos 61.0 +85 100 training.batch_size 2.0 +85 1 dataset """kinships""" +85 1 model """conve""" +85 1 loss """nssa""" +85 1 regularizer """no""" +85 1 optimizer """adam""" +85 1 training_loop """owa""" +85 1 negative_sampler """basic""" +85 1 evaluator """rankbased""" +85 2 dataset """kinships""" +85 2 model """conve""" +85 2 loss """nssa""" +85 2 regularizer """no""" +85 2 optimizer """adam""" +85 2 training_loop """owa""" +85 2 negative_sampler """basic""" +85 2 evaluator """rankbased""" +85 3 dataset """kinships""" +85 3 model """conve""" +85 3 loss """nssa""" +85 3 regularizer """no""" +85 3 optimizer """adam""" +85 3 training_loop """owa""" +85 3 negative_sampler """basic""" +85 3 evaluator """rankbased""" +85 4 dataset """kinships""" +85 4 model """conve""" +85 4 loss """nssa""" +85 4 regularizer """no""" +85 4 optimizer """adam""" +85 4 training_loop """owa""" +85 4 negative_sampler """basic""" +85 4 evaluator """rankbased""" +85 5 dataset """kinships""" +85 5 model """conve""" +85 5 loss """nssa""" +85 5 regularizer """no""" +85 5 optimizer """adam""" +85 5 training_loop """owa""" +85 5 negative_sampler """basic""" +85 5 evaluator """rankbased""" +85 6 dataset """kinships""" +85 6 model """conve""" +85 6 loss """nssa""" +85 6 regularizer """no""" +85 6 optimizer """adam""" +85 6 training_loop """owa""" +85 6 negative_sampler """basic""" +85 6 evaluator """rankbased""" +85 7 dataset """kinships""" +85 7 model """conve""" +85 7 loss """nssa""" +85 7 regularizer """no""" +85 7 optimizer """adam""" +85 7 training_loop """owa""" +85 7 negative_sampler """basic""" +85 7 evaluator """rankbased""" +85 8 dataset """kinships""" +85 8 model """conve""" +85 8 loss """nssa""" +85 8 regularizer """no""" +85 8 optimizer """adam""" +85 8 training_loop """owa""" +85 8 negative_sampler """basic""" +85 8 evaluator """rankbased""" +85 9 dataset """kinships""" +85 9 model """conve""" +85 9 loss """nssa""" +85 9 regularizer """no""" +85 9 optimizer """adam""" +85 9 training_loop """owa""" +85 9 negative_sampler """basic""" +85 9 evaluator """rankbased""" +85 10 dataset """kinships""" +85 10 model """conve""" +85 10 loss """nssa""" +85 10 regularizer """no""" +85 10 optimizer """adam""" +85 10 training_loop """owa""" +85 10 negative_sampler """basic""" +85 10 evaluator """rankbased""" +85 11 dataset """kinships""" +85 11 model """conve""" +85 11 loss """nssa""" +85 11 regularizer """no""" +85 11 optimizer """adam""" +85 11 training_loop """owa""" +85 11 negative_sampler """basic""" +85 11 evaluator """rankbased""" +85 12 dataset """kinships""" +85 12 model """conve""" +85 12 loss """nssa""" +85 12 regularizer """no""" +85 12 optimizer """adam""" +85 12 training_loop """owa""" +85 12 negative_sampler """basic""" +85 12 evaluator """rankbased""" +85 13 dataset """kinships""" +85 13 model """conve""" +85 13 loss """nssa""" +85 13 regularizer """no""" +85 13 optimizer """adam""" +85 13 training_loop """owa""" +85 13 negative_sampler """basic""" +85 13 evaluator """rankbased""" +85 14 dataset """kinships""" +85 14 model """conve""" +85 14 loss """nssa""" +85 14 regularizer """no""" +85 14 optimizer """adam""" +85 14 training_loop """owa""" +85 14 negative_sampler """basic""" +85 14 evaluator """rankbased""" +85 15 dataset """kinships""" +85 15 model """conve""" +85 15 loss """nssa""" +85 15 regularizer """no""" +85 15 optimizer """adam""" +85 15 training_loop """owa""" +85 15 negative_sampler """basic""" +85 15 evaluator """rankbased""" +85 16 dataset """kinships""" +85 16 model """conve""" +85 16 loss """nssa""" +85 16 regularizer """no""" +85 16 optimizer """adam""" +85 16 training_loop """owa""" +85 16 negative_sampler """basic""" +85 16 evaluator """rankbased""" +85 17 dataset """kinships""" +85 17 model """conve""" +85 17 loss """nssa""" +85 17 regularizer """no""" +85 17 optimizer """adam""" +85 17 training_loop """owa""" +85 17 negative_sampler """basic""" +85 17 evaluator """rankbased""" +85 18 dataset """kinships""" +85 18 model """conve""" +85 18 loss """nssa""" +85 18 regularizer """no""" +85 18 optimizer """adam""" +85 18 training_loop """owa""" +85 18 negative_sampler """basic""" +85 18 evaluator """rankbased""" +85 19 dataset """kinships""" +85 19 model """conve""" +85 19 loss """nssa""" +85 19 regularizer """no""" +85 19 optimizer """adam""" +85 19 training_loop """owa""" +85 19 negative_sampler """basic""" +85 19 evaluator """rankbased""" +85 20 dataset """kinships""" +85 20 model """conve""" +85 20 loss """nssa""" +85 20 regularizer """no""" +85 20 optimizer """adam""" +85 20 training_loop """owa""" +85 20 negative_sampler """basic""" +85 20 evaluator """rankbased""" +85 21 dataset """kinships""" +85 21 model """conve""" +85 21 loss """nssa""" +85 21 regularizer """no""" +85 21 optimizer """adam""" +85 21 training_loop """owa""" +85 21 negative_sampler """basic""" +85 21 evaluator """rankbased""" +85 22 dataset """kinships""" +85 22 model """conve""" +85 22 loss """nssa""" +85 22 regularizer """no""" +85 22 optimizer """adam""" +85 22 training_loop """owa""" +85 22 negative_sampler """basic""" +85 22 evaluator """rankbased""" +85 23 dataset """kinships""" +85 23 model """conve""" +85 23 loss """nssa""" +85 23 regularizer """no""" +85 23 optimizer """adam""" +85 23 training_loop """owa""" +85 23 negative_sampler """basic""" +85 23 evaluator """rankbased""" +85 24 dataset """kinships""" +85 24 model """conve""" +85 24 loss """nssa""" +85 24 regularizer """no""" +85 24 optimizer """adam""" +85 24 training_loop """owa""" +85 24 negative_sampler """basic""" +85 24 evaluator """rankbased""" +85 25 dataset """kinships""" +85 25 model """conve""" +85 25 loss """nssa""" +85 25 regularizer """no""" +85 25 optimizer """adam""" +85 25 training_loop """owa""" +85 25 negative_sampler """basic""" +85 25 evaluator """rankbased""" +85 26 dataset """kinships""" +85 26 model """conve""" +85 26 loss """nssa""" +85 26 regularizer """no""" +85 26 optimizer """adam""" +85 26 training_loop """owa""" +85 26 negative_sampler """basic""" +85 26 evaluator """rankbased""" +85 27 dataset """kinships""" +85 27 model """conve""" +85 27 loss """nssa""" +85 27 regularizer """no""" +85 27 optimizer """adam""" +85 27 training_loop """owa""" +85 27 negative_sampler """basic""" +85 27 evaluator """rankbased""" +85 28 dataset """kinships""" +85 28 model """conve""" +85 28 loss """nssa""" +85 28 regularizer """no""" +85 28 optimizer """adam""" +85 28 training_loop """owa""" +85 28 negative_sampler """basic""" +85 28 evaluator """rankbased""" +85 29 dataset """kinships""" +85 29 model """conve""" +85 29 loss """nssa""" +85 29 regularizer """no""" +85 29 optimizer """adam""" +85 29 training_loop """owa""" +85 29 negative_sampler """basic""" +85 29 evaluator """rankbased""" +85 30 dataset """kinships""" +85 30 model """conve""" +85 30 loss """nssa""" +85 30 regularizer """no""" +85 30 optimizer """adam""" +85 30 training_loop """owa""" +85 30 negative_sampler """basic""" +85 30 evaluator """rankbased""" +85 31 dataset """kinships""" +85 31 model """conve""" +85 31 loss """nssa""" +85 31 regularizer """no""" +85 31 optimizer """adam""" +85 31 training_loop """owa""" +85 31 negative_sampler """basic""" +85 31 evaluator """rankbased""" +85 32 dataset """kinships""" +85 32 model """conve""" +85 32 loss """nssa""" +85 32 regularizer """no""" +85 32 optimizer """adam""" +85 32 training_loop """owa""" +85 32 negative_sampler """basic""" +85 32 evaluator """rankbased""" +85 33 dataset """kinships""" +85 33 model """conve""" +85 33 loss """nssa""" +85 33 regularizer """no""" +85 33 optimizer """adam""" +85 33 training_loop """owa""" +85 33 negative_sampler """basic""" +85 33 evaluator """rankbased""" +85 34 dataset """kinships""" +85 34 model """conve""" +85 34 loss """nssa""" +85 34 regularizer """no""" +85 34 optimizer """adam""" +85 34 training_loop """owa""" +85 34 negative_sampler """basic""" +85 34 evaluator """rankbased""" +85 35 dataset """kinships""" +85 35 model """conve""" +85 35 loss """nssa""" +85 35 regularizer """no""" +85 35 optimizer """adam""" +85 35 training_loop """owa""" +85 35 negative_sampler """basic""" +85 35 evaluator """rankbased""" +85 36 dataset """kinships""" +85 36 model """conve""" +85 36 loss """nssa""" +85 36 regularizer """no""" +85 36 optimizer """adam""" +85 36 training_loop """owa""" +85 36 negative_sampler """basic""" +85 36 evaluator """rankbased""" +85 37 dataset """kinships""" +85 37 model """conve""" +85 37 loss """nssa""" +85 37 regularizer """no""" +85 37 optimizer """adam""" +85 37 training_loop """owa""" +85 37 negative_sampler """basic""" +85 37 evaluator """rankbased""" +85 38 dataset """kinships""" +85 38 model """conve""" +85 38 loss """nssa""" +85 38 regularizer """no""" +85 38 optimizer """adam""" +85 38 training_loop """owa""" +85 38 negative_sampler """basic""" +85 38 evaluator """rankbased""" +85 39 dataset """kinships""" +85 39 model """conve""" +85 39 loss """nssa""" +85 39 regularizer """no""" +85 39 optimizer """adam""" +85 39 training_loop """owa""" +85 39 negative_sampler """basic""" +85 39 evaluator """rankbased""" +85 40 dataset """kinships""" +85 40 model """conve""" +85 40 loss """nssa""" +85 40 regularizer """no""" +85 40 optimizer """adam""" +85 40 training_loop """owa""" +85 40 negative_sampler """basic""" +85 40 evaluator """rankbased""" +85 41 dataset """kinships""" +85 41 model """conve""" +85 41 loss """nssa""" +85 41 regularizer """no""" +85 41 optimizer """adam""" +85 41 training_loop """owa""" +85 41 negative_sampler """basic""" +85 41 evaluator """rankbased""" +85 42 dataset """kinships""" +85 42 model """conve""" +85 42 loss """nssa""" +85 42 regularizer """no""" +85 42 optimizer """adam""" +85 42 training_loop """owa""" +85 42 negative_sampler """basic""" +85 42 evaluator """rankbased""" +85 43 dataset """kinships""" +85 43 model """conve""" +85 43 loss """nssa""" +85 43 regularizer """no""" +85 43 optimizer """adam""" +85 43 training_loop """owa""" +85 43 negative_sampler """basic""" +85 43 evaluator """rankbased""" +85 44 dataset """kinships""" +85 44 model """conve""" +85 44 loss """nssa""" +85 44 regularizer """no""" +85 44 optimizer """adam""" +85 44 training_loop """owa""" +85 44 negative_sampler """basic""" +85 44 evaluator """rankbased""" +85 45 dataset """kinships""" +85 45 model """conve""" +85 45 loss """nssa""" +85 45 regularizer """no""" +85 45 optimizer """adam""" +85 45 training_loop """owa""" +85 45 negative_sampler """basic""" +85 45 evaluator """rankbased""" +85 46 dataset """kinships""" +85 46 model """conve""" +85 46 loss """nssa""" +85 46 regularizer """no""" +85 46 optimizer """adam""" +85 46 training_loop """owa""" +85 46 negative_sampler """basic""" +85 46 evaluator """rankbased""" +85 47 dataset """kinships""" +85 47 model """conve""" +85 47 loss """nssa""" +85 47 regularizer """no""" +85 47 optimizer """adam""" +85 47 training_loop """owa""" +85 47 negative_sampler """basic""" +85 47 evaluator """rankbased""" +85 48 dataset """kinships""" +85 48 model """conve""" +85 48 loss """nssa""" +85 48 regularizer """no""" +85 48 optimizer """adam""" +85 48 training_loop """owa""" +85 48 negative_sampler """basic""" +85 48 evaluator """rankbased""" +85 49 dataset """kinships""" +85 49 model """conve""" +85 49 loss """nssa""" +85 49 regularizer """no""" +85 49 optimizer """adam""" +85 49 training_loop """owa""" +85 49 negative_sampler """basic""" +85 49 evaluator """rankbased""" +85 50 dataset """kinships""" +85 50 model """conve""" +85 50 loss """nssa""" +85 50 regularizer """no""" +85 50 optimizer """adam""" +85 50 training_loop """owa""" +85 50 negative_sampler """basic""" +85 50 evaluator """rankbased""" +85 51 dataset """kinships""" +85 51 model """conve""" +85 51 loss """nssa""" +85 51 regularizer """no""" +85 51 optimizer """adam""" +85 51 training_loop """owa""" +85 51 negative_sampler """basic""" +85 51 evaluator """rankbased""" +85 52 dataset """kinships""" +85 52 model """conve""" +85 52 loss """nssa""" +85 52 regularizer """no""" +85 52 optimizer """adam""" +85 52 training_loop """owa""" +85 52 negative_sampler """basic""" +85 52 evaluator """rankbased""" +85 53 dataset """kinships""" +85 53 model """conve""" +85 53 loss """nssa""" +85 53 regularizer """no""" +85 53 optimizer """adam""" +85 53 training_loop """owa""" +85 53 negative_sampler """basic""" +85 53 evaluator """rankbased""" +85 54 dataset """kinships""" +85 54 model """conve""" +85 54 loss """nssa""" +85 54 regularizer """no""" +85 54 optimizer """adam""" +85 54 training_loop """owa""" +85 54 negative_sampler """basic""" +85 54 evaluator """rankbased""" +85 55 dataset """kinships""" +85 55 model """conve""" +85 55 loss """nssa""" +85 55 regularizer """no""" +85 55 optimizer """adam""" +85 55 training_loop """owa""" +85 55 negative_sampler """basic""" +85 55 evaluator """rankbased""" +85 56 dataset """kinships""" +85 56 model """conve""" +85 56 loss """nssa""" +85 56 regularizer """no""" +85 56 optimizer """adam""" +85 56 training_loop """owa""" +85 56 negative_sampler """basic""" +85 56 evaluator """rankbased""" +85 57 dataset """kinships""" +85 57 model """conve""" +85 57 loss """nssa""" +85 57 regularizer """no""" +85 57 optimizer """adam""" +85 57 training_loop """owa""" +85 57 negative_sampler """basic""" +85 57 evaluator """rankbased""" +85 58 dataset """kinships""" +85 58 model """conve""" +85 58 loss """nssa""" +85 58 regularizer """no""" +85 58 optimizer """adam""" +85 58 training_loop """owa""" +85 58 negative_sampler """basic""" +85 58 evaluator """rankbased""" +85 59 dataset """kinships""" +85 59 model """conve""" +85 59 loss """nssa""" +85 59 regularizer """no""" +85 59 optimizer """adam""" +85 59 training_loop """owa""" +85 59 negative_sampler """basic""" +85 59 evaluator """rankbased""" +85 60 dataset """kinships""" +85 60 model """conve""" +85 60 loss """nssa""" +85 60 regularizer """no""" +85 60 optimizer """adam""" +85 60 training_loop """owa""" +85 60 negative_sampler """basic""" +85 60 evaluator """rankbased""" +85 61 dataset """kinships""" +85 61 model """conve""" +85 61 loss """nssa""" +85 61 regularizer """no""" +85 61 optimizer """adam""" +85 61 training_loop """owa""" +85 61 negative_sampler """basic""" +85 61 evaluator """rankbased""" +85 62 dataset """kinships""" +85 62 model """conve""" +85 62 loss """nssa""" +85 62 regularizer """no""" +85 62 optimizer """adam""" +85 62 training_loop """owa""" +85 62 negative_sampler """basic""" +85 62 evaluator """rankbased""" +85 63 dataset """kinships""" +85 63 model """conve""" +85 63 loss """nssa""" +85 63 regularizer """no""" +85 63 optimizer """adam""" +85 63 training_loop """owa""" +85 63 negative_sampler """basic""" +85 63 evaluator """rankbased""" +85 64 dataset """kinships""" +85 64 model """conve""" +85 64 loss """nssa""" +85 64 regularizer """no""" +85 64 optimizer """adam""" +85 64 training_loop """owa""" +85 64 negative_sampler """basic""" +85 64 evaluator """rankbased""" +85 65 dataset """kinships""" +85 65 model """conve""" +85 65 loss """nssa""" +85 65 regularizer """no""" +85 65 optimizer """adam""" +85 65 training_loop """owa""" +85 65 negative_sampler """basic""" +85 65 evaluator """rankbased""" +85 66 dataset """kinships""" +85 66 model """conve""" +85 66 loss """nssa""" +85 66 regularizer """no""" +85 66 optimizer """adam""" +85 66 training_loop """owa""" +85 66 negative_sampler """basic""" +85 66 evaluator """rankbased""" +85 67 dataset """kinships""" +85 67 model """conve""" +85 67 loss """nssa""" +85 67 regularizer """no""" +85 67 optimizer """adam""" +85 67 training_loop """owa""" +85 67 negative_sampler """basic""" +85 67 evaluator """rankbased""" +85 68 dataset """kinships""" +85 68 model """conve""" +85 68 loss """nssa""" +85 68 regularizer """no""" +85 68 optimizer """adam""" +85 68 training_loop """owa""" +85 68 negative_sampler """basic""" +85 68 evaluator """rankbased""" +85 69 dataset """kinships""" +85 69 model """conve""" +85 69 loss """nssa""" +85 69 regularizer """no""" +85 69 optimizer """adam""" +85 69 training_loop """owa""" +85 69 negative_sampler """basic""" +85 69 evaluator """rankbased""" +85 70 dataset """kinships""" +85 70 model """conve""" +85 70 loss """nssa""" +85 70 regularizer """no""" +85 70 optimizer """adam""" +85 70 training_loop """owa""" +85 70 negative_sampler """basic""" +85 70 evaluator """rankbased""" +85 71 dataset """kinships""" +85 71 model """conve""" +85 71 loss """nssa""" +85 71 regularizer """no""" +85 71 optimizer """adam""" +85 71 training_loop """owa""" +85 71 negative_sampler """basic""" +85 71 evaluator """rankbased""" +85 72 dataset """kinships""" +85 72 model """conve""" +85 72 loss """nssa""" +85 72 regularizer """no""" +85 72 optimizer """adam""" +85 72 training_loop """owa""" +85 72 negative_sampler """basic""" +85 72 evaluator """rankbased""" +85 73 dataset """kinships""" +85 73 model """conve""" +85 73 loss """nssa""" +85 73 regularizer """no""" +85 73 optimizer """adam""" +85 73 training_loop """owa""" +85 73 negative_sampler """basic""" +85 73 evaluator """rankbased""" +85 74 dataset """kinships""" +85 74 model """conve""" +85 74 loss """nssa""" +85 74 regularizer """no""" +85 74 optimizer """adam""" +85 74 training_loop """owa""" +85 74 negative_sampler """basic""" +85 74 evaluator """rankbased""" +85 75 dataset """kinships""" +85 75 model """conve""" +85 75 loss """nssa""" +85 75 regularizer """no""" +85 75 optimizer """adam""" +85 75 training_loop """owa""" +85 75 negative_sampler """basic""" +85 75 evaluator """rankbased""" +85 76 dataset """kinships""" +85 76 model """conve""" +85 76 loss """nssa""" +85 76 regularizer """no""" +85 76 optimizer """adam""" +85 76 training_loop """owa""" +85 76 negative_sampler """basic""" +85 76 evaluator """rankbased""" +85 77 dataset """kinships""" +85 77 model """conve""" +85 77 loss """nssa""" +85 77 regularizer """no""" +85 77 optimizer """adam""" +85 77 training_loop """owa""" +85 77 negative_sampler """basic""" +85 77 evaluator """rankbased""" +85 78 dataset """kinships""" +85 78 model """conve""" +85 78 loss """nssa""" +85 78 regularizer """no""" +85 78 optimizer """adam""" +85 78 training_loop """owa""" +85 78 negative_sampler """basic""" +85 78 evaluator """rankbased""" +85 79 dataset """kinships""" +85 79 model """conve""" +85 79 loss """nssa""" +85 79 regularizer """no""" +85 79 optimizer """adam""" +85 79 training_loop """owa""" +85 79 negative_sampler """basic""" +85 79 evaluator """rankbased""" +85 80 dataset """kinships""" +85 80 model """conve""" +85 80 loss """nssa""" +85 80 regularizer """no""" +85 80 optimizer """adam""" +85 80 training_loop """owa""" +85 80 negative_sampler """basic""" +85 80 evaluator """rankbased""" +85 81 dataset """kinships""" +85 81 model """conve""" +85 81 loss """nssa""" +85 81 regularizer """no""" +85 81 optimizer """adam""" +85 81 training_loop """owa""" +85 81 negative_sampler """basic""" +85 81 evaluator """rankbased""" +85 82 dataset """kinships""" +85 82 model """conve""" +85 82 loss """nssa""" +85 82 regularizer """no""" +85 82 optimizer """adam""" +85 82 training_loop """owa""" +85 82 negative_sampler """basic""" +85 82 evaluator """rankbased""" +85 83 dataset """kinships""" +85 83 model """conve""" +85 83 loss """nssa""" +85 83 regularizer """no""" +85 83 optimizer """adam""" +85 83 training_loop """owa""" +85 83 negative_sampler """basic""" +85 83 evaluator """rankbased""" +85 84 dataset """kinships""" +85 84 model """conve""" +85 84 loss """nssa""" +85 84 regularizer """no""" +85 84 optimizer """adam""" +85 84 training_loop """owa""" +85 84 negative_sampler """basic""" +85 84 evaluator """rankbased""" +85 85 dataset """kinships""" +85 85 model """conve""" +85 85 loss """nssa""" +85 85 regularizer """no""" +85 85 optimizer """adam""" +85 85 training_loop """owa""" +85 85 negative_sampler """basic""" +85 85 evaluator """rankbased""" +85 86 dataset """kinships""" +85 86 model """conve""" +85 86 loss """nssa""" +85 86 regularizer """no""" +85 86 optimizer """adam""" +85 86 training_loop """owa""" +85 86 negative_sampler """basic""" +85 86 evaluator """rankbased""" +85 87 dataset """kinships""" +85 87 model """conve""" +85 87 loss """nssa""" +85 87 regularizer """no""" +85 87 optimizer """adam""" +85 87 training_loop """owa""" +85 87 negative_sampler """basic""" +85 87 evaluator """rankbased""" +85 88 dataset """kinships""" +85 88 model """conve""" +85 88 loss """nssa""" +85 88 regularizer """no""" +85 88 optimizer """adam""" +85 88 training_loop """owa""" +85 88 negative_sampler """basic""" +85 88 evaluator """rankbased""" +85 89 dataset """kinships""" +85 89 model """conve""" +85 89 loss """nssa""" +85 89 regularizer """no""" +85 89 optimizer """adam""" +85 89 training_loop """owa""" +85 89 negative_sampler """basic""" +85 89 evaluator """rankbased""" +85 90 dataset """kinships""" +85 90 model """conve""" +85 90 loss """nssa""" +85 90 regularizer """no""" +85 90 optimizer """adam""" +85 90 training_loop """owa""" +85 90 negative_sampler """basic""" +85 90 evaluator """rankbased""" +85 91 dataset """kinships""" +85 91 model """conve""" +85 91 loss """nssa""" +85 91 regularizer """no""" +85 91 optimizer """adam""" +85 91 training_loop """owa""" +85 91 negative_sampler """basic""" +85 91 evaluator """rankbased""" +85 92 dataset """kinships""" +85 92 model """conve""" +85 92 loss """nssa""" +85 92 regularizer """no""" +85 92 optimizer """adam""" +85 92 training_loop """owa""" +85 92 negative_sampler """basic""" +85 92 evaluator """rankbased""" +85 93 dataset """kinships""" +85 93 model """conve""" +85 93 loss """nssa""" +85 93 regularizer """no""" +85 93 optimizer """adam""" +85 93 training_loop """owa""" +85 93 negative_sampler """basic""" +85 93 evaluator """rankbased""" +85 94 dataset """kinships""" +85 94 model """conve""" +85 94 loss """nssa""" +85 94 regularizer """no""" +85 94 optimizer """adam""" +85 94 training_loop """owa""" +85 94 negative_sampler """basic""" +85 94 evaluator """rankbased""" +85 95 dataset """kinships""" +85 95 model """conve""" +85 95 loss """nssa""" +85 95 regularizer """no""" +85 95 optimizer """adam""" +85 95 training_loop """owa""" +85 95 negative_sampler """basic""" +85 95 evaluator """rankbased""" +85 96 dataset """kinships""" +85 96 model """conve""" +85 96 loss """nssa""" +85 96 regularizer """no""" +85 96 optimizer """adam""" +85 96 training_loop """owa""" +85 96 negative_sampler """basic""" +85 96 evaluator """rankbased""" +85 97 dataset """kinships""" +85 97 model """conve""" +85 97 loss """nssa""" +85 97 regularizer """no""" +85 97 optimizer """adam""" +85 97 training_loop """owa""" +85 97 negative_sampler """basic""" +85 97 evaluator """rankbased""" +85 98 dataset """kinships""" +85 98 model """conve""" +85 98 loss """nssa""" +85 98 regularizer """no""" +85 98 optimizer """adam""" +85 98 training_loop """owa""" +85 98 negative_sampler """basic""" +85 98 evaluator """rankbased""" +85 99 dataset """kinships""" +85 99 model """conve""" +85 99 loss """nssa""" +85 99 regularizer """no""" +85 99 optimizer """adam""" +85 99 training_loop """owa""" +85 99 negative_sampler """basic""" +85 99 evaluator """rankbased""" +85 100 dataset """kinships""" +85 100 model """conve""" +85 100 loss """nssa""" +85 100 regularizer """no""" +85 100 optimizer """adam""" +85 100 training_loop """owa""" +85 100 negative_sampler """basic""" +85 100 evaluator """rankbased""" +86 1 model.output_channels 44.0 +86 1 model.input_dropout 0.4602075905447389 +86 1 model.output_dropout 0.10763861689486942 +86 1 model.feature_map_dropout 0.4093344445671078 +86 1 model.embedding_dim 2.0 +86 1 optimizer.lr 0.05955157249859537 +86 1 training.batch_size 0.0 +86 1 training.label_smoothing 0.009308815610656107 +86 2 model.output_channels 38.0 +86 2 model.input_dropout 0.22349904610199706 +86 2 model.output_dropout 0.4028489366098387 +86 2 model.feature_map_dropout 0.33403386014170205 +86 2 model.embedding_dim 0.0 +86 2 optimizer.lr 0.0015813053047316298 +86 2 training.batch_size 2.0 +86 2 training.label_smoothing 0.05654371100788662 +86 3 model.output_channels 63.0 +86 3 model.input_dropout 0.18653270784944087 +86 3 model.output_dropout 0.08572894667221354 +86 3 model.feature_map_dropout 0.44457642162006183 +86 3 model.embedding_dim 0.0 +86 3 optimizer.lr 0.003538592555384162 +86 3 training.batch_size 1.0 +86 3 training.label_smoothing 0.013643637986611485 +86 4 model.output_channels 44.0 +86 4 model.input_dropout 0.04625071049245344 +86 4 model.output_dropout 0.4307446210928032 +86 4 model.feature_map_dropout 0.1693647016472155 +86 4 model.embedding_dim 1.0 +86 4 optimizer.lr 0.001735886675037761 +86 4 training.batch_size 2.0 +86 4 training.label_smoothing 0.004021266455201176 +86 5 model.output_channels 57.0 +86 5 model.input_dropout 0.1991688415051065 +86 5 model.output_dropout 0.4516724915297226 +86 5 model.feature_map_dropout 0.4213910013891136 +86 5 model.embedding_dim 1.0 +86 5 optimizer.lr 0.03925012448468083 +86 5 training.batch_size 2.0 +86 5 training.label_smoothing 0.001404044195207203 +86 6 model.output_channels 45.0 +86 6 model.input_dropout 0.19061649163408667 +86 6 model.output_dropout 0.14241142805990004 +86 6 model.feature_map_dropout 0.35066283946956345 +86 6 model.embedding_dim 2.0 +86 6 optimizer.lr 0.0013689934908472858 +86 6 training.batch_size 0.0 +86 6 training.label_smoothing 0.006246443528907151 +86 7 model.output_channels 62.0 +86 7 model.input_dropout 0.44914970457660713 +86 7 model.output_dropout 0.4535752810935867 +86 7 model.feature_map_dropout 0.49008796908671803 +86 7 model.embedding_dim 0.0 +86 7 optimizer.lr 0.013686257761796387 +86 7 training.batch_size 2.0 +86 7 training.label_smoothing 0.053653435323129925 +86 8 model.output_channels 51.0 +86 8 model.input_dropout 0.486069241328477 +86 8 model.output_dropout 0.05580484693601234 +86 8 model.feature_map_dropout 0.26142121704538984 +86 8 model.embedding_dim 0.0 +86 8 optimizer.lr 0.00925071747131323 +86 8 training.batch_size 0.0 +86 8 training.label_smoothing 0.013771363097747263 +86 9 model.output_channels 54.0 +86 9 model.input_dropout 0.04067036457592804 +86 9 model.output_dropout 0.18375913667146565 +86 9 model.feature_map_dropout 0.23720749763893495 +86 9 model.embedding_dim 2.0 +86 9 optimizer.lr 0.0166979126914668 +86 9 training.batch_size 0.0 +86 9 training.label_smoothing 0.0019572407547028586 +86 10 model.output_channels 35.0 +86 10 model.input_dropout 0.21780931175171236 +86 10 model.output_dropout 0.24469012308710986 +86 10 model.feature_map_dropout 0.18775119008286656 +86 10 model.embedding_dim 2.0 +86 10 optimizer.lr 0.07333411724347497 +86 10 training.batch_size 1.0 +86 10 training.label_smoothing 0.7842717239129438 +86 11 model.output_channels 35.0 +86 11 model.input_dropout 0.32239980349287584 +86 11 model.output_dropout 0.2580937227620904 +86 11 model.feature_map_dropout 0.4529751347225133 +86 11 model.embedding_dim 1.0 +86 11 optimizer.lr 0.0046175060058025776 +86 11 training.batch_size 2.0 +86 11 training.label_smoothing 0.002287629513697274 +86 12 model.output_channels 55.0 +86 12 model.input_dropout 0.39870303465610313 +86 12 model.output_dropout 0.007512928287886189 +86 12 model.feature_map_dropout 0.3320385660924961 +86 12 model.embedding_dim 0.0 +86 12 optimizer.lr 0.0013165130300324308 +86 12 training.batch_size 0.0 +86 12 training.label_smoothing 0.0072200812668944 +86 13 model.output_channels 42.0 +86 13 model.input_dropout 0.3465867408179156 +86 13 model.output_dropout 0.08779836670484414 +86 13 model.feature_map_dropout 0.10664597033362921 +86 13 model.embedding_dim 0.0 +86 13 optimizer.lr 0.018662235784385788 +86 13 training.batch_size 0.0 +86 13 training.label_smoothing 0.6213869866349746 +86 14 model.output_channels 36.0 +86 14 model.input_dropout 0.27521376979476125 +86 14 model.output_dropout 0.4230478789846858 +86 14 model.feature_map_dropout 0.35698608551326283 +86 14 model.embedding_dim 0.0 +86 14 optimizer.lr 0.0024516917667754975 +86 14 training.batch_size 1.0 +86 14 training.label_smoothing 0.001760295683855104 +86 15 model.output_channels 19.0 +86 15 model.input_dropout 0.07577137463765043 +86 15 model.output_dropout 0.2630933803673002 +86 15 model.feature_map_dropout 0.3255330442303517 +86 15 model.embedding_dim 2.0 +86 15 optimizer.lr 0.0058077249510777055 +86 15 training.batch_size 2.0 +86 15 training.label_smoothing 0.36505408887362867 +86 16 model.output_channels 22.0 +86 16 model.input_dropout 0.44703328183888397 +86 16 model.output_dropout 0.3892185735904909 +86 16 model.feature_map_dropout 0.45487089884264686 +86 16 model.embedding_dim 2.0 +86 16 optimizer.lr 0.0010900184504778507 +86 16 training.batch_size 0.0 +86 16 training.label_smoothing 0.029838039732292945 +86 17 model.output_channels 17.0 +86 17 model.input_dropout 0.09554263800855833 +86 17 model.output_dropout 0.44987203753577487 +86 17 model.feature_map_dropout 0.37027140769738176 +86 17 model.embedding_dim 1.0 +86 17 optimizer.lr 0.011943902546331792 +86 17 training.batch_size 2.0 +86 17 training.label_smoothing 0.0324733095912865 +86 18 model.output_channels 55.0 +86 18 model.input_dropout 0.11143499067729346 +86 18 model.output_dropout 0.29487140500897757 +86 18 model.feature_map_dropout 0.0625491481275301 +86 18 model.embedding_dim 2.0 +86 18 optimizer.lr 0.0015860065205686726 +86 18 training.batch_size 2.0 +86 18 training.label_smoothing 0.007316870546763746 +86 19 model.output_channels 63.0 +86 19 model.input_dropout 0.033442069329629065 +86 19 model.output_dropout 0.4455372237347874 +86 19 model.feature_map_dropout 0.33057899266604535 +86 19 model.embedding_dim 2.0 +86 19 optimizer.lr 0.0019426916583264171 +86 19 training.batch_size 2.0 +86 19 training.label_smoothing 0.11108688892528368 +86 20 model.output_channels 59.0 +86 20 model.input_dropout 0.05130435927247978 +86 20 model.output_dropout 0.21363095470807159 +86 20 model.feature_map_dropout 0.4289563271991774 +86 20 model.embedding_dim 1.0 +86 20 optimizer.lr 0.0015765003697377742 +86 20 training.batch_size 1.0 +86 20 training.label_smoothing 0.0018946915502916779 +86 21 model.output_channels 53.0 +86 21 model.input_dropout 0.4261324478782484 +86 21 model.output_dropout 0.2594058741162565 +86 21 model.feature_map_dropout 0.4573451084906868 +86 21 model.embedding_dim 1.0 +86 21 optimizer.lr 0.0019338007923382597 +86 21 training.batch_size 0.0 +86 21 training.label_smoothing 0.5643545923713335 +86 22 model.output_channels 41.0 +86 22 model.input_dropout 0.17726400986836327 +86 22 model.output_dropout 0.02390695964449685 +86 22 model.feature_map_dropout 0.36800510349632387 +86 22 model.embedding_dim 1.0 +86 22 optimizer.lr 0.007484251259632769 +86 22 training.batch_size 0.0 +86 22 training.label_smoothing 0.0035338137991463045 +86 23 model.output_channels 24.0 +86 23 model.input_dropout 0.3767250734265747 +86 23 model.output_dropout 0.4279640632053837 +86 23 model.feature_map_dropout 0.053123628518603805 +86 23 model.embedding_dim 0.0 +86 23 optimizer.lr 0.00226538609525176 +86 23 training.batch_size 1.0 +86 23 training.label_smoothing 0.003187083729353266 +86 24 model.output_channels 55.0 +86 24 model.input_dropout 0.0005285821759047904 +86 24 model.output_dropout 0.46381836779221297 +86 24 model.feature_map_dropout 0.009203710879507832 +86 24 model.embedding_dim 2.0 +86 24 optimizer.lr 0.003490777358928527 +86 24 training.batch_size 1.0 +86 24 training.label_smoothing 0.0011148354724850936 +86 25 model.output_channels 45.0 +86 25 model.input_dropout 0.049343957720863585 +86 25 model.output_dropout 0.24244495508619524 +86 25 model.feature_map_dropout 0.20074337598699304 +86 25 model.embedding_dim 2.0 +86 25 optimizer.lr 0.0016762475690027352 +86 25 training.batch_size 1.0 +86 25 training.label_smoothing 0.003258221520048872 +86 26 model.output_channels 33.0 +86 26 model.input_dropout 0.3904056801594293 +86 26 model.output_dropout 0.09547842005970764 +86 26 model.feature_map_dropout 0.18752794806346695 +86 26 model.embedding_dim 1.0 +86 26 optimizer.lr 0.02037748906449342 +86 26 training.batch_size 2.0 +86 26 training.label_smoothing 0.001127248990780876 +86 27 model.output_channels 50.0 +86 27 model.input_dropout 0.07713133073305456 +86 27 model.output_dropout 0.07699946312959766 +86 27 model.feature_map_dropout 0.2084737280451323 +86 27 model.embedding_dim 0.0 +86 27 optimizer.lr 0.009173958817484323 +86 27 training.batch_size 2.0 +86 27 training.label_smoothing 0.0026076857020867913 +86 28 model.output_channels 22.0 +86 28 model.input_dropout 0.25516959619554247 +86 28 model.output_dropout 0.3037113794333618 +86 28 model.feature_map_dropout 0.12181826263138473 +86 28 model.embedding_dim 1.0 +86 28 optimizer.lr 0.031011156704257987 +86 28 training.batch_size 2.0 +86 28 training.label_smoothing 0.02576016086170195 +86 29 model.output_channels 59.0 +86 29 model.input_dropout 0.08895788511305464 +86 29 model.output_dropout 0.26213455254530116 +86 29 model.feature_map_dropout 0.3264868209971695 +86 29 model.embedding_dim 1.0 +86 29 optimizer.lr 0.03796643529621513 +86 29 training.batch_size 0.0 +86 29 training.label_smoothing 0.0015938136152620682 +86 30 model.output_channels 20.0 +86 30 model.input_dropout 0.08099884099307769 +86 30 model.output_dropout 0.08589475702153088 +86 30 model.feature_map_dropout 0.07914182111490731 +86 30 model.embedding_dim 1.0 +86 30 optimizer.lr 0.0017604071543030407 +86 30 training.batch_size 0.0 +86 30 training.label_smoothing 0.04468440417622225 +86 31 model.output_channels 29.0 +86 31 model.input_dropout 0.4803878885899457 +86 31 model.output_dropout 0.032485936698545825 +86 31 model.feature_map_dropout 0.16512920371901302 +86 31 model.embedding_dim 1.0 +86 31 optimizer.lr 0.0014308900445247107 +86 31 training.batch_size 2.0 +86 31 training.label_smoothing 0.019014288646094772 +86 32 model.output_channels 30.0 +86 32 model.input_dropout 0.31223072816034514 +86 32 model.output_dropout 0.008394608755093957 +86 32 model.feature_map_dropout 0.06624751742927826 +86 32 model.embedding_dim 2.0 +86 32 optimizer.lr 0.001283613579750626 +86 32 training.batch_size 1.0 +86 32 training.label_smoothing 0.005323625660163716 +86 33 model.output_channels 55.0 +86 33 model.input_dropout 0.4582668832087081 +86 33 model.output_dropout 0.16568333532112284 +86 33 model.feature_map_dropout 0.06139547586964689 +86 33 model.embedding_dim 0.0 +86 33 optimizer.lr 0.0015041322260527293 +86 33 training.batch_size 1.0 +86 33 training.label_smoothing 0.02662280871673111 +86 34 model.output_channels 44.0 +86 34 model.input_dropout 0.21771533493093465 +86 34 model.output_dropout 0.39899638053994835 +86 34 model.feature_map_dropout 0.42985275845756277 +86 34 model.embedding_dim 2.0 +86 34 optimizer.lr 0.0036118491850194016 +86 34 training.batch_size 1.0 +86 34 training.label_smoothing 0.004867668772616885 +86 35 model.output_channels 43.0 +86 35 model.input_dropout 0.053109066250563886 +86 35 model.output_dropout 0.29769363961406126 +86 35 model.feature_map_dropout 0.33975810290143227 +86 35 model.embedding_dim 0.0 +86 35 optimizer.lr 0.0012760091024276556 +86 35 training.batch_size 1.0 +86 35 training.label_smoothing 0.4993255596237086 +86 36 model.output_channels 39.0 +86 36 model.input_dropout 0.33903921577343915 +86 36 model.output_dropout 0.017128894872461553 +86 36 model.feature_map_dropout 0.23332761895998178 +86 36 model.embedding_dim 1.0 +86 36 optimizer.lr 0.03606484394532118 +86 36 training.batch_size 0.0 +86 36 training.label_smoothing 0.05648842309203251 +86 37 model.output_channels 32.0 +86 37 model.input_dropout 0.1547658368424531 +86 37 model.output_dropout 0.10104836509111254 +86 37 model.feature_map_dropout 0.17223505372437437 +86 37 model.embedding_dim 0.0 +86 37 optimizer.lr 0.0028603718895842084 +86 37 training.batch_size 1.0 +86 37 training.label_smoothing 0.01925446787166296 +86 38 model.output_channels 53.0 +86 38 model.input_dropout 0.336095602855612 +86 38 model.output_dropout 0.11376914452917997 +86 38 model.feature_map_dropout 0.053116720637845616 +86 38 model.embedding_dim 0.0 +86 38 optimizer.lr 0.018976930072462834 +86 38 training.batch_size 1.0 +86 38 training.label_smoothing 0.07266727554016056 +86 39 model.output_channels 28.0 +86 39 model.input_dropout 0.025770402328184172 +86 39 model.output_dropout 0.0856473537527942 +86 39 model.feature_map_dropout 0.48796488982426867 +86 39 model.embedding_dim 2.0 +86 39 optimizer.lr 0.002068175320497204 +86 39 training.batch_size 2.0 +86 39 training.label_smoothing 0.8375301465047462 +86 40 model.output_channels 31.0 +86 40 model.input_dropout 0.00951188934358993 +86 40 model.output_dropout 0.06850080141801757 +86 40 model.feature_map_dropout 0.057278976299719175 +86 40 model.embedding_dim 1.0 +86 40 optimizer.lr 0.04302486608691382 +86 40 training.batch_size 1.0 +86 40 training.label_smoothing 0.5360340697965469 +86 41 model.output_channels 39.0 +86 41 model.input_dropout 0.2501175733266977 +86 41 model.output_dropout 0.3366039556012212 +86 41 model.feature_map_dropout 0.2943367263432114 +86 41 model.embedding_dim 1.0 +86 41 optimizer.lr 0.056654801931713475 +86 41 training.batch_size 2.0 +86 41 training.label_smoothing 0.005515004945780222 +86 42 model.output_channels 21.0 +86 42 model.input_dropout 0.4188168959300683 +86 42 model.output_dropout 0.2691132301132444 +86 42 model.feature_map_dropout 0.02624173896218146 +86 42 model.embedding_dim 1.0 +86 42 optimizer.lr 0.0022510453855767083 +86 42 training.batch_size 0.0 +86 42 training.label_smoothing 0.0059988619772813065 +86 43 model.output_channels 20.0 +86 43 model.input_dropout 0.23739441049743804 +86 43 model.output_dropout 0.11962625702032775 +86 43 model.feature_map_dropout 0.28369174268959774 +86 43 model.embedding_dim 2.0 +86 43 optimizer.lr 0.04696624744591812 +86 43 training.batch_size 0.0 +86 43 training.label_smoothing 0.7258166103401054 +86 44 model.output_channels 18.0 +86 44 model.input_dropout 0.3918055880707215 +86 44 model.output_dropout 0.3990539215946891 +86 44 model.feature_map_dropout 0.13105102549051573 +86 44 model.embedding_dim 0.0 +86 44 optimizer.lr 0.03993219407051418 +86 44 training.batch_size 1.0 +86 44 training.label_smoothing 0.006943367534640379 +86 45 model.output_channels 27.0 +86 45 model.input_dropout 0.2088860822263658 +86 45 model.output_dropout 0.21272139311211258 +86 45 model.feature_map_dropout 0.0030325847405101025 +86 45 model.embedding_dim 2.0 +86 45 optimizer.lr 0.002229133233361668 +86 45 training.batch_size 2.0 +86 45 training.label_smoothing 0.33089261954980675 +86 46 model.output_channels 21.0 +86 46 model.input_dropout 0.1651214780220841 +86 46 model.output_dropout 0.41216266846965754 +86 46 model.feature_map_dropout 0.3543043295925015 +86 46 model.embedding_dim 0.0 +86 46 optimizer.lr 0.031638794529304626 +86 46 training.batch_size 0.0 +86 46 training.label_smoothing 0.024094064468678268 +86 47 model.output_channels 57.0 +86 47 model.input_dropout 0.33245305565679356 +86 47 model.output_dropout 0.040838050656075375 +86 47 model.feature_map_dropout 0.1380876117513971 +86 47 model.embedding_dim 0.0 +86 47 optimizer.lr 0.006516905811947604 +86 47 training.batch_size 2.0 +86 47 training.label_smoothing 0.7173908975713214 +86 48 model.output_channels 34.0 +86 48 model.input_dropout 0.4174471560247666 +86 48 model.output_dropout 0.16870264498428922 +86 48 model.feature_map_dropout 0.3516326536999438 +86 48 model.embedding_dim 1.0 +86 48 optimizer.lr 0.0461725597056303 +86 48 training.batch_size 0.0 +86 48 training.label_smoothing 0.06008081050410572 +86 49 model.output_channels 30.0 +86 49 model.input_dropout 0.08736212691790435 +86 49 model.output_dropout 0.19232276892631067 +86 49 model.feature_map_dropout 0.4240047806108999 +86 49 model.embedding_dim 2.0 +86 49 optimizer.lr 0.004864505866488686 +86 49 training.batch_size 1.0 +86 49 training.label_smoothing 0.005285581706420904 +86 50 model.output_channels 33.0 +86 50 model.input_dropout 0.28403694370413163 +86 50 model.output_dropout 0.07878424468229328 +86 50 model.feature_map_dropout 0.3077301257201695 +86 50 model.embedding_dim 1.0 +86 50 optimizer.lr 0.0136121980080444 +86 50 training.batch_size 0.0 +86 50 training.label_smoothing 0.12081976300751768 +86 51 model.output_channels 61.0 +86 51 model.input_dropout 0.2777951574350745 +86 51 model.output_dropout 0.050145477367486035 +86 51 model.feature_map_dropout 0.4566755164937594 +86 51 model.embedding_dim 1.0 +86 51 optimizer.lr 0.0027671434668795343 +86 51 training.batch_size 0.0 +86 51 training.label_smoothing 0.5479386707414999 +86 52 model.output_channels 25.0 +86 52 model.input_dropout 0.4944600582954693 +86 52 model.output_dropout 0.4656451196243244 +86 52 model.feature_map_dropout 0.083300744295288 +86 52 model.embedding_dim 1.0 +86 52 optimizer.lr 0.03503069360779071 +86 52 training.batch_size 0.0 +86 52 training.label_smoothing 0.25643848487505455 +86 53 model.output_channels 28.0 +86 53 model.input_dropout 0.19428781480442459 +86 53 model.output_dropout 0.4390969602427047 +86 53 model.feature_map_dropout 0.18794915100588344 +86 53 model.embedding_dim 2.0 +86 53 optimizer.lr 0.05765923459490289 +86 53 training.batch_size 1.0 +86 53 training.label_smoothing 0.028044196483832228 +86 54 model.output_channels 57.0 +86 54 model.input_dropout 0.45481191040161584 +86 54 model.output_dropout 0.3541259933557456 +86 54 model.feature_map_dropout 0.46394551266294365 +86 54 model.embedding_dim 0.0 +86 54 optimizer.lr 0.0018760989985232982 +86 54 training.batch_size 0.0 +86 54 training.label_smoothing 0.4901750585843958 +86 55 model.output_channels 51.0 +86 55 model.input_dropout 0.48049317062422997 +86 55 model.output_dropout 0.1615568855240035 +86 55 model.feature_map_dropout 0.22413905297530012 +86 55 model.embedding_dim 1.0 +86 55 optimizer.lr 0.05025390419826973 +86 55 training.batch_size 1.0 +86 55 training.label_smoothing 0.003434580938689743 +86 56 model.output_channels 61.0 +86 56 model.input_dropout 0.11096897013189094 +86 56 model.output_dropout 0.22320840321224839 +86 56 model.feature_map_dropout 0.33183689861340726 +86 56 model.embedding_dim 1.0 +86 56 optimizer.lr 0.03769845661602933 +86 56 training.batch_size 0.0 +86 56 training.label_smoothing 0.001664374252850394 +86 57 model.output_channels 21.0 +86 57 model.input_dropout 0.39911923368806895 +86 57 model.output_dropout 0.2688898703956661 +86 57 model.feature_map_dropout 0.22966662105025404 +86 57 model.embedding_dim 0.0 +86 57 optimizer.lr 0.019958852664717606 +86 57 training.batch_size 1.0 +86 57 training.label_smoothing 0.015058433800189494 +86 58 model.output_channels 17.0 +86 58 model.input_dropout 0.3655479476764546 +86 58 model.output_dropout 0.38699358652998395 +86 58 model.feature_map_dropout 0.0700161817891557 +86 58 model.embedding_dim 0.0 +86 58 optimizer.lr 0.008653865945130788 +86 58 training.batch_size 0.0 +86 58 training.label_smoothing 0.0025875742729943993 +86 59 model.output_channels 57.0 +86 59 model.input_dropout 0.11341128186786059 +86 59 model.output_dropout 0.3164804844938265 +86 59 model.feature_map_dropout 0.029676238366005858 +86 59 model.embedding_dim 0.0 +86 59 optimizer.lr 0.05659561491224358 +86 59 training.batch_size 2.0 +86 59 training.label_smoothing 0.029669624535666055 +86 60 model.output_channels 25.0 +86 60 model.input_dropout 0.40863890017947163 +86 60 model.output_dropout 0.1488656314238483 +86 60 model.feature_map_dropout 0.26078546341894554 +86 60 model.embedding_dim 1.0 +86 60 optimizer.lr 0.0026331625606358638 +86 60 training.batch_size 1.0 +86 60 training.label_smoothing 0.0045083358982627095 +86 61 model.output_channels 38.0 +86 61 model.input_dropout 0.3328167084430863 +86 61 model.output_dropout 0.37176892271105927 +86 61 model.feature_map_dropout 0.09192884132624479 +86 61 model.embedding_dim 2.0 +86 61 optimizer.lr 0.001467882561381058 +86 61 training.batch_size 2.0 +86 61 training.label_smoothing 0.18167337105508805 +86 62 model.output_channels 62.0 +86 62 model.input_dropout 0.4581141562491906 +86 62 model.output_dropout 0.45870307681438693 +86 62 model.feature_map_dropout 0.36627812302428736 +86 62 model.embedding_dim 0.0 +86 62 optimizer.lr 0.09204808709849914 +86 62 training.batch_size 1.0 +86 62 training.label_smoothing 0.004273757109080678 +86 63 model.output_channels 39.0 +86 63 model.input_dropout 0.0191961095986809 +86 63 model.output_dropout 0.3933888822044956 +86 63 model.feature_map_dropout 0.4804052058953471 +86 63 model.embedding_dim 1.0 +86 63 optimizer.lr 0.001892736845247904 +86 63 training.batch_size 2.0 +86 63 training.label_smoothing 0.002534002126267338 +86 64 model.output_channels 60.0 +86 64 model.input_dropout 0.002168107433182631 +86 64 model.output_dropout 0.20463374866527712 +86 64 model.feature_map_dropout 0.21202344055012778 +86 64 model.embedding_dim 2.0 +86 64 optimizer.lr 0.01849337849288255 +86 64 training.batch_size 2.0 +86 64 training.label_smoothing 0.017314051877898046 +86 65 model.output_channels 33.0 +86 65 model.input_dropout 0.033434829538513955 +86 65 model.output_dropout 0.21348938769136222 +86 65 model.feature_map_dropout 0.49900845196023796 +86 65 model.embedding_dim 2.0 +86 65 optimizer.lr 0.0021323155908467063 +86 65 training.batch_size 2.0 +86 65 training.label_smoothing 0.00869152391691117 +86 66 model.output_channels 33.0 +86 66 model.input_dropout 0.1690311856964779 +86 66 model.output_dropout 0.3889963372037227 +86 66 model.feature_map_dropout 0.3886430098018231 +86 66 model.embedding_dim 0.0 +86 66 optimizer.lr 0.008422998958762392 +86 66 training.batch_size 2.0 +86 66 training.label_smoothing 0.0012707007915137248 +86 67 model.output_channels 25.0 +86 67 model.input_dropout 0.3663828344603356 +86 67 model.output_dropout 0.08571793823306773 +86 67 model.feature_map_dropout 0.46487462901587573 +86 67 model.embedding_dim 1.0 +86 67 optimizer.lr 0.008666833172553842 +86 67 training.batch_size 2.0 +86 67 training.label_smoothing 0.21239888607198795 +86 68 model.output_channels 59.0 +86 68 model.input_dropout 0.31402285098737864 +86 68 model.output_dropout 0.3549188968614364 +86 68 model.feature_map_dropout 0.25439311891611904 +86 68 model.embedding_dim 1.0 +86 68 optimizer.lr 0.0592931014169723 +86 68 training.batch_size 1.0 +86 68 training.label_smoothing 0.06191596723493483 +86 69 model.output_channels 41.0 +86 69 model.input_dropout 0.361316778838227 +86 69 model.output_dropout 0.23049325351377764 +86 69 model.feature_map_dropout 0.13385983542098928 +86 69 model.embedding_dim 2.0 +86 69 optimizer.lr 0.06660219256616377 +86 69 training.batch_size 0.0 +86 69 training.label_smoothing 0.03917855653550182 +86 70 model.output_channels 52.0 +86 70 model.input_dropout 0.32136017973943204 +86 70 model.output_dropout 0.40727476007939417 +86 70 model.feature_map_dropout 0.18709193323332846 +86 70 model.embedding_dim 1.0 +86 70 optimizer.lr 0.006385381583255099 +86 70 training.batch_size 0.0 +86 70 training.label_smoothing 0.00599034433619205 +86 71 model.output_channels 60.0 +86 71 model.input_dropout 0.014156736083679344 +86 71 model.output_dropout 0.2359826789169629 +86 71 model.feature_map_dropout 0.47785137859082183 +86 71 model.embedding_dim 2.0 +86 71 optimizer.lr 0.012310298687453473 +86 71 training.batch_size 0.0 +86 71 training.label_smoothing 0.032198000008463845 +86 72 model.output_channels 51.0 +86 72 model.input_dropout 0.09434773405432506 +86 72 model.output_dropout 0.039359224029596795 +86 72 model.feature_map_dropout 0.08162888712694666 +86 72 model.embedding_dim 2.0 +86 72 optimizer.lr 0.00518630179212113 +86 72 training.batch_size 2.0 +86 72 training.label_smoothing 0.021989588177328497 +86 73 model.output_channels 53.0 +86 73 model.input_dropout 0.2232045995170963 +86 73 model.output_dropout 0.004125159950373325 +86 73 model.feature_map_dropout 0.17387419494593115 +86 73 model.embedding_dim 2.0 +86 73 optimizer.lr 0.09002193661204941 +86 73 training.batch_size 2.0 +86 73 training.label_smoothing 0.38375483588920895 +86 74 model.output_channels 55.0 +86 74 model.input_dropout 0.22023941102955386 +86 74 model.output_dropout 0.25845885002198354 +86 74 model.feature_map_dropout 0.20267208893727356 +86 74 model.embedding_dim 0.0 +86 74 optimizer.lr 0.011554179866103274 +86 74 training.batch_size 2.0 +86 74 training.label_smoothing 0.0030524434613238088 +86 75 model.output_channels 41.0 +86 75 model.input_dropout 0.2547161889182879 +86 75 model.output_dropout 0.23495036068380176 +86 75 model.feature_map_dropout 0.06177345974868764 +86 75 model.embedding_dim 2.0 +86 75 optimizer.lr 0.006781054321226117 +86 75 training.batch_size 2.0 +86 75 training.label_smoothing 0.4459477000300443 +86 76 model.output_channels 41.0 +86 76 model.input_dropout 0.07550603753381624 +86 76 model.output_dropout 0.01650069329414311 +86 76 model.feature_map_dropout 0.40791556510883514 +86 76 model.embedding_dim 0.0 +86 76 optimizer.lr 0.08448204395224082 +86 76 training.batch_size 1.0 +86 76 training.label_smoothing 0.7864493158195069 +86 77 model.output_channels 20.0 +86 77 model.input_dropout 0.10675074054146838 +86 77 model.output_dropout 0.4494657592579916 +86 77 model.feature_map_dropout 0.03947499169042812 +86 77 model.embedding_dim 2.0 +86 77 optimizer.lr 0.0862599134775147 +86 77 training.batch_size 0.0 +86 77 training.label_smoothing 0.38832001196493815 +86 78 model.output_channels 58.0 +86 78 model.input_dropout 0.4838228315807722 +86 78 model.output_dropout 0.3854675132347769 +86 78 model.feature_map_dropout 0.06184636284000394 +86 78 model.embedding_dim 2.0 +86 78 optimizer.lr 0.007377238191980906 +86 78 training.batch_size 1.0 +86 78 training.label_smoothing 0.002947460703369453 +86 79 model.output_channels 24.0 +86 79 model.input_dropout 0.44845167480084097 +86 79 model.output_dropout 0.06882736992957583 +86 79 model.feature_map_dropout 0.32542697438412793 +86 79 model.embedding_dim 1.0 +86 79 optimizer.lr 0.08842719431760357 +86 79 training.batch_size 0.0 +86 79 training.label_smoothing 0.4389094028011428 +86 80 model.output_channels 21.0 +86 80 model.input_dropout 0.22145834335781073 +86 80 model.output_dropout 0.14922157189363933 +86 80 model.feature_map_dropout 0.07980043307154328 +86 80 model.embedding_dim 2.0 +86 80 optimizer.lr 0.010457212272426764 +86 80 training.batch_size 1.0 +86 80 training.label_smoothing 0.003318321577949201 +86 81 model.output_channels 26.0 +86 81 model.input_dropout 0.03775770011469731 +86 81 model.output_dropout 0.05512180992590687 +86 81 model.feature_map_dropout 0.305021887603569 +86 81 model.embedding_dim 0.0 +86 81 optimizer.lr 0.03315086171624552 +86 81 training.batch_size 1.0 +86 81 training.label_smoothing 0.10857456018036511 +86 82 model.output_channels 56.0 +86 82 model.input_dropout 0.3740569119493006 +86 82 model.output_dropout 0.1867929110691826 +86 82 model.feature_map_dropout 0.294434820657301 +86 82 model.embedding_dim 2.0 +86 82 optimizer.lr 0.009025938426840407 +86 82 training.batch_size 1.0 +86 82 training.label_smoothing 0.06793532293066079 +86 83 model.output_channels 47.0 +86 83 model.input_dropout 0.2661416177062021 +86 83 model.output_dropout 0.13052702659812376 +86 83 model.feature_map_dropout 0.48766959351272965 +86 83 model.embedding_dim 1.0 +86 83 optimizer.lr 0.0015984922085741148 +86 83 training.batch_size 1.0 +86 83 training.label_smoothing 0.04229500879295503 +86 84 model.output_channels 16.0 +86 84 model.input_dropout 0.4051342939686811 +86 84 model.output_dropout 0.32406003743253947 +86 84 model.feature_map_dropout 0.14310553277520915 +86 84 model.embedding_dim 0.0 +86 84 optimizer.lr 0.0611568444059766 +86 84 training.batch_size 2.0 +86 84 training.label_smoothing 0.1764094300980648 +86 85 model.output_channels 24.0 +86 85 model.input_dropout 0.48955983987020507 +86 85 model.output_dropout 0.4030872208137456 +86 85 model.feature_map_dropout 0.36123514740870294 +86 85 model.embedding_dim 0.0 +86 85 optimizer.lr 0.0027811680040722747 +86 85 training.batch_size 0.0 +86 85 training.label_smoothing 0.0015064580525787818 +86 86 model.output_channels 59.0 +86 86 model.input_dropout 0.48967910480899185 +86 86 model.output_dropout 0.06698587130662992 +86 86 model.feature_map_dropout 0.10641535390835255 +86 86 model.embedding_dim 1.0 +86 86 optimizer.lr 0.011057964809640991 +86 86 training.batch_size 1.0 +86 86 training.label_smoothing 0.002017161540132916 +86 87 model.output_channels 59.0 +86 87 model.input_dropout 0.10606216766637722 +86 87 model.output_dropout 0.1848448361305745 +86 87 model.feature_map_dropout 0.4039812533706556 +86 87 model.embedding_dim 2.0 +86 87 optimizer.lr 0.005552998436043546 +86 87 training.batch_size 0.0 +86 87 training.label_smoothing 0.8119715079821664 +86 88 model.output_channels 27.0 +86 88 model.input_dropout 0.040285243693909345 +86 88 model.output_dropout 0.31400007925403767 +86 88 model.feature_map_dropout 0.11249589684303923 +86 88 model.embedding_dim 2.0 +86 88 optimizer.lr 0.06789850086436301 +86 88 training.batch_size 2.0 +86 88 training.label_smoothing 0.2253582807586149 +86 89 model.output_channels 37.0 +86 89 model.input_dropout 0.08010450658711099 +86 89 model.output_dropout 0.18207310330826876 +86 89 model.feature_map_dropout 0.36672471673363055 +86 89 model.embedding_dim 2.0 +86 89 optimizer.lr 0.002313846713613809 +86 89 training.batch_size 1.0 +86 89 training.label_smoothing 0.24780214035244275 +86 90 model.output_channels 21.0 +86 90 model.input_dropout 0.4774993228820077 +86 90 model.output_dropout 0.19970992981844615 +86 90 model.feature_map_dropout 0.2725113630547711 +86 90 model.embedding_dim 1.0 +86 90 optimizer.lr 0.03201232608629606 +86 90 training.batch_size 1.0 +86 90 training.label_smoothing 0.36950078863069163 +86 91 model.output_channels 36.0 +86 91 model.input_dropout 0.3420438886269655 +86 91 model.output_dropout 0.43735685419849024 +86 91 model.feature_map_dropout 0.47891455465253496 +86 91 model.embedding_dim 2.0 +86 91 optimizer.lr 0.007709629586427068 +86 91 training.batch_size 2.0 +86 91 training.label_smoothing 0.005190930688868103 +86 92 model.output_channels 20.0 +86 92 model.input_dropout 0.293366560321832 +86 92 model.output_dropout 0.15302651951390783 +86 92 model.feature_map_dropout 0.11648896559373068 +86 92 model.embedding_dim 0.0 +86 92 optimizer.lr 0.024387156787113163 +86 92 training.batch_size 1.0 +86 92 training.label_smoothing 0.09167811138631433 +86 93 model.output_channels 34.0 +86 93 model.input_dropout 0.2807273573466139 +86 93 model.output_dropout 0.269300228258368 +86 93 model.feature_map_dropout 0.4888459522762852 +86 93 model.embedding_dim 2.0 +86 93 optimizer.lr 0.03012411110855072 +86 93 training.batch_size 2.0 +86 93 training.label_smoothing 0.022448559876691997 +86 94 model.output_channels 48.0 +86 94 model.input_dropout 0.06740199074458991 +86 94 model.output_dropout 0.49840334962646304 +86 94 model.feature_map_dropout 0.00737697916324942 +86 94 model.embedding_dim 2.0 +86 94 optimizer.lr 0.003168877762820185 +86 94 training.batch_size 2.0 +86 94 training.label_smoothing 0.0017175133997184891 +86 95 model.output_channels 52.0 +86 95 model.input_dropout 0.3033063009389759 +86 95 model.output_dropout 0.050716886828379326 +86 95 model.feature_map_dropout 0.21497130801354458 +86 95 model.embedding_dim 1.0 +86 95 optimizer.lr 0.0017965602219652487 +86 95 training.batch_size 1.0 +86 95 training.label_smoothing 0.047287447203151825 +86 96 model.output_channels 53.0 +86 96 model.input_dropout 0.2809657969929947 +86 96 model.output_dropout 0.11039668134671443 +86 96 model.feature_map_dropout 0.4345665642897576 +86 96 model.embedding_dim 2.0 +86 96 optimizer.lr 0.08177978957142587 +86 96 training.batch_size 0.0 +86 96 training.label_smoothing 0.002305443720457977 +86 97 model.output_channels 44.0 +86 97 model.input_dropout 0.41581581278393054 +86 97 model.output_dropout 0.4187382177107506 +86 97 model.feature_map_dropout 0.09281120118929481 +86 97 model.embedding_dim 0.0 +86 97 optimizer.lr 0.012294236786226 +86 97 training.batch_size 1.0 +86 97 training.label_smoothing 0.0012374255399407851 +86 98 model.output_channels 46.0 +86 98 model.input_dropout 0.2837068716037552 +86 98 model.output_dropout 0.4208312026059782 +86 98 model.feature_map_dropout 0.4110182241731367 +86 98 model.embedding_dim 2.0 +86 98 optimizer.lr 0.017412984238141114 +86 98 training.batch_size 0.0 +86 98 training.label_smoothing 0.5039409688666798 +86 99 model.output_channels 17.0 +86 99 model.input_dropout 0.1422530342332679 +86 99 model.output_dropout 0.12048546492770451 +86 99 model.feature_map_dropout 0.13590380430402949 +86 99 model.embedding_dim 1.0 +86 99 optimizer.lr 0.009516392771852034 +86 99 training.batch_size 2.0 +86 99 training.label_smoothing 0.001024425456063692 +86 100 model.output_channels 20.0 +86 100 model.input_dropout 0.2294132742938465 +86 100 model.output_dropout 0.4684316049685323 +86 100 model.feature_map_dropout 0.22400280761706753 +86 100 model.embedding_dim 0.0 +86 100 optimizer.lr 0.023118142284782716 +86 100 training.batch_size 0.0 +86 100 training.label_smoothing 0.018214413342156247 +86 1 dataset """kinships""" +86 1 model """conve""" +86 1 loss """crossentropy""" +86 1 regularizer """no""" +86 1 optimizer """adam""" +86 1 training_loop """lcwa""" +86 1 evaluator """rankbased""" +86 2 dataset """kinships""" +86 2 model """conve""" +86 2 loss """crossentropy""" +86 2 regularizer """no""" +86 2 optimizer """adam""" +86 2 training_loop """lcwa""" +86 2 evaluator """rankbased""" +86 3 dataset """kinships""" +86 3 model """conve""" +86 3 loss """crossentropy""" +86 3 regularizer """no""" +86 3 optimizer """adam""" +86 3 training_loop """lcwa""" +86 3 evaluator """rankbased""" +86 4 dataset """kinships""" +86 4 model """conve""" +86 4 loss """crossentropy""" +86 4 regularizer """no""" +86 4 optimizer """adam""" +86 4 training_loop """lcwa""" +86 4 evaluator """rankbased""" +86 5 dataset """kinships""" +86 5 model """conve""" +86 5 loss """crossentropy""" +86 5 regularizer """no""" +86 5 optimizer """adam""" +86 5 training_loop """lcwa""" +86 5 evaluator """rankbased""" +86 6 dataset """kinships""" +86 6 model """conve""" +86 6 loss """crossentropy""" +86 6 regularizer """no""" +86 6 optimizer """adam""" +86 6 training_loop """lcwa""" +86 6 evaluator """rankbased""" +86 7 dataset """kinships""" +86 7 model """conve""" +86 7 loss """crossentropy""" +86 7 regularizer """no""" +86 7 optimizer """adam""" +86 7 training_loop """lcwa""" +86 7 evaluator """rankbased""" +86 8 dataset """kinships""" +86 8 model """conve""" +86 8 loss """crossentropy""" +86 8 regularizer """no""" +86 8 optimizer """adam""" +86 8 training_loop """lcwa""" +86 8 evaluator """rankbased""" +86 9 dataset """kinships""" +86 9 model """conve""" +86 9 loss """crossentropy""" +86 9 regularizer """no""" +86 9 optimizer """adam""" +86 9 training_loop """lcwa""" +86 9 evaluator """rankbased""" +86 10 dataset """kinships""" +86 10 model """conve""" +86 10 loss """crossentropy""" +86 10 regularizer """no""" +86 10 optimizer """adam""" +86 10 training_loop """lcwa""" +86 10 evaluator """rankbased""" +86 11 dataset """kinships""" +86 11 model """conve""" +86 11 loss """crossentropy""" +86 11 regularizer """no""" +86 11 optimizer """adam""" +86 11 training_loop """lcwa""" +86 11 evaluator """rankbased""" +86 12 dataset """kinships""" +86 12 model """conve""" +86 12 loss """crossentropy""" +86 12 regularizer """no""" +86 12 optimizer """adam""" +86 12 training_loop """lcwa""" +86 12 evaluator """rankbased""" +86 13 dataset """kinships""" +86 13 model """conve""" +86 13 loss """crossentropy""" +86 13 regularizer """no""" +86 13 optimizer """adam""" +86 13 training_loop """lcwa""" +86 13 evaluator """rankbased""" +86 14 dataset """kinships""" +86 14 model """conve""" +86 14 loss """crossentropy""" +86 14 regularizer """no""" +86 14 optimizer """adam""" +86 14 training_loop """lcwa""" +86 14 evaluator """rankbased""" +86 15 dataset """kinships""" +86 15 model """conve""" +86 15 loss """crossentropy""" +86 15 regularizer """no""" +86 15 optimizer """adam""" +86 15 training_loop """lcwa""" +86 15 evaluator """rankbased""" +86 16 dataset """kinships""" +86 16 model """conve""" +86 16 loss """crossentropy""" +86 16 regularizer """no""" +86 16 optimizer """adam""" +86 16 training_loop """lcwa""" +86 16 evaluator """rankbased""" +86 17 dataset """kinships""" +86 17 model """conve""" +86 17 loss """crossentropy""" +86 17 regularizer """no""" +86 17 optimizer """adam""" +86 17 training_loop """lcwa""" +86 17 evaluator """rankbased""" +86 18 dataset """kinships""" +86 18 model """conve""" +86 18 loss """crossentropy""" +86 18 regularizer """no""" +86 18 optimizer """adam""" +86 18 training_loop """lcwa""" +86 18 evaluator """rankbased""" +86 19 dataset """kinships""" +86 19 model """conve""" +86 19 loss """crossentropy""" +86 19 regularizer """no""" +86 19 optimizer """adam""" +86 19 training_loop """lcwa""" +86 19 evaluator """rankbased""" +86 20 dataset """kinships""" +86 20 model """conve""" +86 20 loss """crossentropy""" +86 20 regularizer """no""" +86 20 optimizer """adam""" +86 20 training_loop """lcwa""" +86 20 evaluator """rankbased""" +86 21 dataset """kinships""" +86 21 model """conve""" +86 21 loss """crossentropy""" +86 21 regularizer """no""" +86 21 optimizer """adam""" +86 21 training_loop """lcwa""" +86 21 evaluator """rankbased""" +86 22 dataset """kinships""" +86 22 model """conve""" +86 22 loss """crossentropy""" +86 22 regularizer """no""" +86 22 optimizer """adam""" +86 22 training_loop """lcwa""" +86 22 evaluator """rankbased""" +86 23 dataset """kinships""" +86 23 model """conve""" +86 23 loss """crossentropy""" +86 23 regularizer """no""" +86 23 optimizer """adam""" +86 23 training_loop """lcwa""" +86 23 evaluator """rankbased""" +86 24 dataset """kinships""" +86 24 model """conve""" +86 24 loss """crossentropy""" +86 24 regularizer """no""" +86 24 optimizer """adam""" +86 24 training_loop """lcwa""" +86 24 evaluator """rankbased""" +86 25 dataset """kinships""" +86 25 model """conve""" +86 25 loss """crossentropy""" +86 25 regularizer """no""" +86 25 optimizer """adam""" +86 25 training_loop """lcwa""" +86 25 evaluator """rankbased""" +86 26 dataset """kinships""" +86 26 model """conve""" +86 26 loss """crossentropy""" +86 26 regularizer """no""" +86 26 optimizer """adam""" +86 26 training_loop """lcwa""" +86 26 evaluator """rankbased""" +86 27 dataset """kinships""" +86 27 model """conve""" +86 27 loss """crossentropy""" +86 27 regularizer """no""" +86 27 optimizer """adam""" +86 27 training_loop """lcwa""" +86 27 evaluator """rankbased""" +86 28 dataset """kinships""" +86 28 model """conve""" +86 28 loss """crossentropy""" +86 28 regularizer """no""" +86 28 optimizer """adam""" +86 28 training_loop """lcwa""" +86 28 evaluator """rankbased""" +86 29 dataset """kinships""" +86 29 model """conve""" +86 29 loss """crossentropy""" +86 29 regularizer """no""" +86 29 optimizer """adam""" +86 29 training_loop """lcwa""" +86 29 evaluator """rankbased""" +86 30 dataset """kinships""" +86 30 model """conve""" +86 30 loss """crossentropy""" +86 30 regularizer """no""" +86 30 optimizer """adam""" +86 30 training_loop """lcwa""" +86 30 evaluator """rankbased""" +86 31 dataset """kinships""" +86 31 model """conve""" +86 31 loss """crossentropy""" +86 31 regularizer """no""" +86 31 optimizer """adam""" +86 31 training_loop """lcwa""" +86 31 evaluator """rankbased""" +86 32 dataset """kinships""" +86 32 model """conve""" +86 32 loss """crossentropy""" +86 32 regularizer """no""" +86 32 optimizer """adam""" +86 32 training_loop """lcwa""" +86 32 evaluator """rankbased""" +86 33 dataset """kinships""" +86 33 model """conve""" +86 33 loss """crossentropy""" +86 33 regularizer """no""" +86 33 optimizer """adam""" +86 33 training_loop """lcwa""" +86 33 evaluator """rankbased""" +86 34 dataset """kinships""" +86 34 model """conve""" +86 34 loss """crossentropy""" +86 34 regularizer """no""" +86 34 optimizer """adam""" +86 34 training_loop """lcwa""" +86 34 evaluator """rankbased""" +86 35 dataset """kinships""" +86 35 model """conve""" +86 35 loss """crossentropy""" +86 35 regularizer """no""" +86 35 optimizer """adam""" +86 35 training_loop """lcwa""" +86 35 evaluator """rankbased""" +86 36 dataset """kinships""" +86 36 model """conve""" +86 36 loss """crossentropy""" +86 36 regularizer """no""" +86 36 optimizer """adam""" +86 36 training_loop """lcwa""" +86 36 evaluator """rankbased""" +86 37 dataset """kinships""" +86 37 model """conve""" +86 37 loss """crossentropy""" +86 37 regularizer """no""" +86 37 optimizer """adam""" +86 37 training_loop """lcwa""" +86 37 evaluator """rankbased""" +86 38 dataset """kinships""" +86 38 model """conve""" +86 38 loss """crossentropy""" +86 38 regularizer """no""" +86 38 optimizer """adam""" +86 38 training_loop """lcwa""" +86 38 evaluator """rankbased""" +86 39 dataset """kinships""" +86 39 model """conve""" +86 39 loss """crossentropy""" +86 39 regularizer """no""" +86 39 optimizer """adam""" +86 39 training_loop """lcwa""" +86 39 evaluator """rankbased""" +86 40 dataset """kinships""" +86 40 model """conve""" +86 40 loss """crossentropy""" +86 40 regularizer """no""" +86 40 optimizer """adam""" +86 40 training_loop """lcwa""" +86 40 evaluator """rankbased""" +86 41 dataset """kinships""" +86 41 model """conve""" +86 41 loss """crossentropy""" +86 41 regularizer """no""" +86 41 optimizer """adam""" +86 41 training_loop """lcwa""" +86 41 evaluator """rankbased""" +86 42 dataset """kinships""" +86 42 model """conve""" +86 42 loss """crossentropy""" +86 42 regularizer """no""" +86 42 optimizer """adam""" +86 42 training_loop """lcwa""" +86 42 evaluator """rankbased""" +86 43 dataset """kinships""" +86 43 model """conve""" +86 43 loss """crossentropy""" +86 43 regularizer """no""" +86 43 optimizer """adam""" +86 43 training_loop """lcwa""" +86 43 evaluator """rankbased""" +86 44 dataset """kinships""" +86 44 model """conve""" +86 44 loss """crossentropy""" +86 44 regularizer """no""" +86 44 optimizer """adam""" +86 44 training_loop """lcwa""" +86 44 evaluator """rankbased""" +86 45 dataset """kinships""" +86 45 model """conve""" +86 45 loss """crossentropy""" +86 45 regularizer """no""" +86 45 optimizer """adam""" +86 45 training_loop """lcwa""" +86 45 evaluator """rankbased""" +86 46 dataset """kinships""" +86 46 model """conve""" +86 46 loss """crossentropy""" +86 46 regularizer """no""" +86 46 optimizer """adam""" +86 46 training_loop """lcwa""" +86 46 evaluator """rankbased""" +86 47 dataset """kinships""" +86 47 model """conve""" +86 47 loss """crossentropy""" +86 47 regularizer """no""" +86 47 optimizer """adam""" +86 47 training_loop """lcwa""" +86 47 evaluator """rankbased""" +86 48 dataset """kinships""" +86 48 model """conve""" +86 48 loss """crossentropy""" +86 48 regularizer """no""" +86 48 optimizer """adam""" +86 48 training_loop """lcwa""" +86 48 evaluator """rankbased""" +86 49 dataset """kinships""" +86 49 model """conve""" +86 49 loss """crossentropy""" +86 49 regularizer """no""" +86 49 optimizer """adam""" +86 49 training_loop """lcwa""" +86 49 evaluator """rankbased""" +86 50 dataset """kinships""" +86 50 model """conve""" +86 50 loss """crossentropy""" +86 50 regularizer """no""" +86 50 optimizer """adam""" +86 50 training_loop """lcwa""" +86 50 evaluator """rankbased""" +86 51 dataset """kinships""" +86 51 model """conve""" +86 51 loss """crossentropy""" +86 51 regularizer """no""" +86 51 optimizer """adam""" +86 51 training_loop """lcwa""" +86 51 evaluator """rankbased""" +86 52 dataset """kinships""" +86 52 model """conve""" +86 52 loss """crossentropy""" +86 52 regularizer """no""" +86 52 optimizer """adam""" +86 52 training_loop """lcwa""" +86 52 evaluator """rankbased""" +86 53 dataset """kinships""" +86 53 model """conve""" +86 53 loss """crossentropy""" +86 53 regularizer """no""" +86 53 optimizer """adam""" +86 53 training_loop """lcwa""" +86 53 evaluator """rankbased""" +86 54 dataset """kinships""" +86 54 model """conve""" +86 54 loss """crossentropy""" +86 54 regularizer """no""" +86 54 optimizer """adam""" +86 54 training_loop """lcwa""" +86 54 evaluator """rankbased""" +86 55 dataset """kinships""" +86 55 model """conve""" +86 55 loss """crossentropy""" +86 55 regularizer """no""" +86 55 optimizer """adam""" +86 55 training_loop """lcwa""" +86 55 evaluator """rankbased""" +86 56 dataset """kinships""" +86 56 model """conve""" +86 56 loss """crossentropy""" +86 56 regularizer """no""" +86 56 optimizer """adam""" +86 56 training_loop """lcwa""" +86 56 evaluator """rankbased""" +86 57 dataset """kinships""" +86 57 model """conve""" +86 57 loss """crossentropy""" +86 57 regularizer """no""" +86 57 optimizer """adam""" +86 57 training_loop """lcwa""" +86 57 evaluator """rankbased""" +86 58 dataset """kinships""" +86 58 model """conve""" +86 58 loss """crossentropy""" +86 58 regularizer """no""" +86 58 optimizer """adam""" +86 58 training_loop """lcwa""" +86 58 evaluator """rankbased""" +86 59 dataset """kinships""" +86 59 model """conve""" +86 59 loss """crossentropy""" +86 59 regularizer """no""" +86 59 optimizer """adam""" +86 59 training_loop """lcwa""" +86 59 evaluator """rankbased""" +86 60 dataset """kinships""" +86 60 model """conve""" +86 60 loss """crossentropy""" +86 60 regularizer """no""" +86 60 optimizer """adam""" +86 60 training_loop """lcwa""" +86 60 evaluator """rankbased""" +86 61 dataset """kinships""" +86 61 model """conve""" +86 61 loss """crossentropy""" +86 61 regularizer """no""" +86 61 optimizer """adam""" +86 61 training_loop """lcwa""" +86 61 evaluator """rankbased""" +86 62 dataset """kinships""" +86 62 model """conve""" +86 62 loss """crossentropy""" +86 62 regularizer """no""" +86 62 optimizer """adam""" +86 62 training_loop """lcwa""" +86 62 evaluator """rankbased""" +86 63 dataset """kinships""" +86 63 model """conve""" +86 63 loss """crossentropy""" +86 63 regularizer """no""" +86 63 optimizer """adam""" +86 63 training_loop """lcwa""" +86 63 evaluator """rankbased""" +86 64 dataset """kinships""" +86 64 model """conve""" +86 64 loss """crossentropy""" +86 64 regularizer """no""" +86 64 optimizer """adam""" +86 64 training_loop """lcwa""" +86 64 evaluator """rankbased""" +86 65 dataset """kinships""" +86 65 model """conve""" +86 65 loss """crossentropy""" +86 65 regularizer """no""" +86 65 optimizer """adam""" +86 65 training_loop """lcwa""" +86 65 evaluator """rankbased""" +86 66 dataset """kinships""" +86 66 model """conve""" +86 66 loss """crossentropy""" +86 66 regularizer """no""" +86 66 optimizer """adam""" +86 66 training_loop """lcwa""" +86 66 evaluator """rankbased""" +86 67 dataset """kinships""" +86 67 model """conve""" +86 67 loss """crossentropy""" +86 67 regularizer """no""" +86 67 optimizer """adam""" +86 67 training_loop """lcwa""" +86 67 evaluator """rankbased""" +86 68 dataset """kinships""" +86 68 model """conve""" +86 68 loss """crossentropy""" +86 68 regularizer """no""" +86 68 optimizer """adam""" +86 68 training_loop """lcwa""" +86 68 evaluator """rankbased""" +86 69 dataset """kinships""" +86 69 model """conve""" +86 69 loss """crossentropy""" +86 69 regularizer """no""" +86 69 optimizer """adam""" +86 69 training_loop """lcwa""" +86 69 evaluator """rankbased""" +86 70 dataset """kinships""" +86 70 model """conve""" +86 70 loss """crossentropy""" +86 70 regularizer """no""" +86 70 optimizer """adam""" +86 70 training_loop """lcwa""" +86 70 evaluator """rankbased""" +86 71 dataset """kinships""" +86 71 model """conve""" +86 71 loss """crossentropy""" +86 71 regularizer """no""" +86 71 optimizer """adam""" +86 71 training_loop """lcwa""" +86 71 evaluator """rankbased""" +86 72 dataset """kinships""" +86 72 model """conve""" +86 72 loss """crossentropy""" +86 72 regularizer """no""" +86 72 optimizer """adam""" +86 72 training_loop """lcwa""" +86 72 evaluator """rankbased""" +86 73 dataset """kinships""" +86 73 model """conve""" +86 73 loss """crossentropy""" +86 73 regularizer """no""" +86 73 optimizer """adam""" +86 73 training_loop """lcwa""" +86 73 evaluator """rankbased""" +86 74 dataset """kinships""" +86 74 model """conve""" +86 74 loss """crossentropy""" +86 74 regularizer """no""" +86 74 optimizer """adam""" +86 74 training_loop """lcwa""" +86 74 evaluator """rankbased""" +86 75 dataset """kinships""" +86 75 model """conve""" +86 75 loss """crossentropy""" +86 75 regularizer """no""" +86 75 optimizer """adam""" +86 75 training_loop """lcwa""" +86 75 evaluator """rankbased""" +86 76 dataset """kinships""" +86 76 model """conve""" +86 76 loss """crossentropy""" +86 76 regularizer """no""" +86 76 optimizer """adam""" +86 76 training_loop """lcwa""" +86 76 evaluator """rankbased""" +86 77 dataset """kinships""" +86 77 model """conve""" +86 77 loss """crossentropy""" +86 77 regularizer """no""" +86 77 optimizer """adam""" +86 77 training_loop """lcwa""" +86 77 evaluator """rankbased""" +86 78 dataset """kinships""" +86 78 model """conve""" +86 78 loss """crossentropy""" +86 78 regularizer """no""" +86 78 optimizer """adam""" +86 78 training_loop """lcwa""" +86 78 evaluator """rankbased""" +86 79 dataset """kinships""" +86 79 model """conve""" +86 79 loss """crossentropy""" +86 79 regularizer """no""" +86 79 optimizer """adam""" +86 79 training_loop """lcwa""" +86 79 evaluator """rankbased""" +86 80 dataset """kinships""" +86 80 model """conve""" +86 80 loss """crossentropy""" +86 80 regularizer """no""" +86 80 optimizer """adam""" +86 80 training_loop """lcwa""" +86 80 evaluator """rankbased""" +86 81 dataset """kinships""" +86 81 model """conve""" +86 81 loss """crossentropy""" +86 81 regularizer """no""" +86 81 optimizer """adam""" +86 81 training_loop """lcwa""" +86 81 evaluator """rankbased""" +86 82 dataset """kinships""" +86 82 model """conve""" +86 82 loss """crossentropy""" +86 82 regularizer """no""" +86 82 optimizer """adam""" +86 82 training_loop """lcwa""" +86 82 evaluator """rankbased""" +86 83 dataset """kinships""" +86 83 model """conve""" +86 83 loss """crossentropy""" +86 83 regularizer """no""" +86 83 optimizer """adam""" +86 83 training_loop """lcwa""" +86 83 evaluator """rankbased""" +86 84 dataset """kinships""" +86 84 model """conve""" +86 84 loss """crossentropy""" +86 84 regularizer """no""" +86 84 optimizer """adam""" +86 84 training_loop """lcwa""" +86 84 evaluator """rankbased""" +86 85 dataset """kinships""" +86 85 model """conve""" +86 85 loss """crossentropy""" +86 85 regularizer """no""" +86 85 optimizer """adam""" +86 85 training_loop """lcwa""" +86 85 evaluator """rankbased""" +86 86 dataset """kinships""" +86 86 model """conve""" +86 86 loss """crossentropy""" +86 86 regularizer """no""" +86 86 optimizer """adam""" +86 86 training_loop """lcwa""" +86 86 evaluator """rankbased""" +86 87 dataset """kinships""" +86 87 model """conve""" +86 87 loss """crossentropy""" +86 87 regularizer """no""" +86 87 optimizer """adam""" +86 87 training_loop """lcwa""" +86 87 evaluator """rankbased""" +86 88 dataset """kinships""" +86 88 model """conve""" +86 88 loss """crossentropy""" +86 88 regularizer """no""" +86 88 optimizer """adam""" +86 88 training_loop """lcwa""" +86 88 evaluator """rankbased""" +86 89 dataset """kinships""" +86 89 model """conve""" +86 89 loss """crossentropy""" +86 89 regularizer """no""" +86 89 optimizer """adam""" +86 89 training_loop """lcwa""" +86 89 evaluator """rankbased""" +86 90 dataset """kinships""" +86 90 model """conve""" +86 90 loss """crossentropy""" +86 90 regularizer """no""" +86 90 optimizer """adam""" +86 90 training_loop """lcwa""" +86 90 evaluator """rankbased""" +86 91 dataset """kinships""" +86 91 model """conve""" +86 91 loss """crossentropy""" +86 91 regularizer """no""" +86 91 optimizer """adam""" +86 91 training_loop """lcwa""" +86 91 evaluator """rankbased""" +86 92 dataset """kinships""" +86 92 model """conve""" +86 92 loss """crossentropy""" +86 92 regularizer """no""" +86 92 optimizer """adam""" +86 92 training_loop """lcwa""" +86 92 evaluator """rankbased""" +86 93 dataset """kinships""" +86 93 model """conve""" +86 93 loss """crossentropy""" +86 93 regularizer """no""" +86 93 optimizer """adam""" +86 93 training_loop """lcwa""" +86 93 evaluator """rankbased""" +86 94 dataset """kinships""" +86 94 model """conve""" +86 94 loss """crossentropy""" +86 94 regularizer """no""" +86 94 optimizer """adam""" +86 94 training_loop """lcwa""" +86 94 evaluator """rankbased""" +86 95 dataset """kinships""" +86 95 model """conve""" +86 95 loss """crossentropy""" +86 95 regularizer """no""" +86 95 optimizer """adam""" +86 95 training_loop """lcwa""" +86 95 evaluator """rankbased""" +86 96 dataset """kinships""" +86 96 model """conve""" +86 96 loss """crossentropy""" +86 96 regularizer """no""" +86 96 optimizer """adam""" +86 96 training_loop """lcwa""" +86 96 evaluator """rankbased""" +86 97 dataset """kinships""" +86 97 model """conve""" +86 97 loss """crossentropy""" +86 97 regularizer """no""" +86 97 optimizer """adam""" +86 97 training_loop """lcwa""" +86 97 evaluator """rankbased""" +86 98 dataset """kinships""" +86 98 model """conve""" +86 98 loss """crossentropy""" +86 98 regularizer """no""" +86 98 optimizer """adam""" +86 98 training_loop """lcwa""" +86 98 evaluator """rankbased""" +86 99 dataset """kinships""" +86 99 model """conve""" +86 99 loss """crossentropy""" +86 99 regularizer """no""" +86 99 optimizer """adam""" +86 99 training_loop """lcwa""" +86 99 evaluator """rankbased""" +86 100 dataset """kinships""" +86 100 model """conve""" +86 100 loss """crossentropy""" +86 100 regularizer """no""" +86 100 optimizer """adam""" +86 100 training_loop """lcwa""" +86 100 evaluator """rankbased""" +87 1 model.output_channels 51.0 +87 1 model.input_dropout 0.37231314825635226 +87 1 model.output_dropout 0.2531462731985167 +87 1 model.feature_map_dropout 0.024157939373247006 +87 1 model.embedding_dim 0.0 +87 1 optimizer.lr 0.013063971996986896 +87 1 training.batch_size 1.0 +87 1 training.label_smoothing 0.0030865098768742823 +87 2 model.output_channels 36.0 +87 2 model.input_dropout 0.23864752456971544 +87 2 model.output_dropout 0.30626220751143374 +87 2 model.feature_map_dropout 0.04068519944822446 +87 2 model.embedding_dim 0.0 +87 2 optimizer.lr 0.05442635074442477 +87 2 training.batch_size 1.0 +87 2 training.label_smoothing 0.05403581033349323 +87 3 model.output_channels 16.0 +87 3 model.input_dropout 0.3099851620126757 +87 3 model.output_dropout 0.4395469973174085 +87 3 model.feature_map_dropout 0.20800821100970818 +87 3 model.embedding_dim 1.0 +87 3 optimizer.lr 0.03120707742808491 +87 3 training.batch_size 0.0 +87 3 training.label_smoothing 0.01597324427883034 +87 4 model.output_channels 64.0 +87 4 model.input_dropout 0.38269050093696566 +87 4 model.output_dropout 0.49797515326585123 +87 4 model.feature_map_dropout 0.4083541802190436 +87 4 model.embedding_dim 0.0 +87 4 optimizer.lr 0.001443255937181915 +87 4 training.batch_size 2.0 +87 4 training.label_smoothing 0.026657967276515798 +87 5 model.output_channels 46.0 +87 5 model.input_dropout 0.24533835478276228 +87 5 model.output_dropout 0.38751282518241376 +87 5 model.feature_map_dropout 0.15885635861974146 +87 5 model.embedding_dim 2.0 +87 5 optimizer.lr 0.02923331906818995 +87 5 training.batch_size 1.0 +87 5 training.label_smoothing 0.1142124062564346 +87 6 model.output_channels 26.0 +87 6 model.input_dropout 0.436568796505372 +87 6 model.output_dropout 0.12293875970935991 +87 6 model.feature_map_dropout 0.3337525194204895 +87 6 model.embedding_dim 0.0 +87 6 optimizer.lr 0.007999768159624561 +87 6 training.batch_size 0.0 +87 6 training.label_smoothing 0.005156343945444042 +87 7 model.output_channels 31.0 +87 7 model.input_dropout 0.3902317841065326 +87 7 model.output_dropout 0.011322591739440169 +87 7 model.feature_map_dropout 0.11916317197016185 +87 7 model.embedding_dim 2.0 +87 7 optimizer.lr 0.0010014416408036652 +87 7 training.batch_size 1.0 +87 7 training.label_smoothing 0.05230603904129453 +87 8 model.output_channels 19.0 +87 8 model.input_dropout 0.2310031760210421 +87 8 model.output_dropout 0.29370943419594353 +87 8 model.feature_map_dropout 0.3528796746583891 +87 8 model.embedding_dim 1.0 +87 8 optimizer.lr 0.007910922493407507 +87 8 training.batch_size 2.0 +87 8 training.label_smoothing 0.0496463509178699 +87 9 model.output_channels 40.0 +87 9 model.input_dropout 0.2814874303506837 +87 9 model.output_dropout 0.06854544093876902 +87 9 model.feature_map_dropout 0.4372669254903044 +87 9 model.embedding_dim 0.0 +87 9 optimizer.lr 0.0027281451200973504 +87 9 training.batch_size 2.0 +87 9 training.label_smoothing 0.01782139877758519 +87 10 model.output_channels 32.0 +87 10 model.input_dropout 0.3076533639895 +87 10 model.output_dropout 0.3190943681370299 +87 10 model.feature_map_dropout 0.379730002207314 +87 10 model.embedding_dim 0.0 +87 10 optimizer.lr 0.06719738981635827 +87 10 training.batch_size 2.0 +87 10 training.label_smoothing 0.004546742758649445 +87 11 model.output_channels 19.0 +87 11 model.input_dropout 0.12958614472094676 +87 11 model.output_dropout 0.32866368658283623 +87 11 model.feature_map_dropout 0.45702288059568436 +87 11 model.embedding_dim 1.0 +87 11 optimizer.lr 0.004474026136682429 +87 11 training.batch_size 0.0 +87 11 training.label_smoothing 0.14212579491273158 +87 12 model.output_channels 25.0 +87 12 model.input_dropout 0.44840504205291154 +87 12 model.output_dropout 0.4270809707043511 +87 12 model.feature_map_dropout 0.3998543583075134 +87 12 model.embedding_dim 1.0 +87 12 optimizer.lr 0.019283653762242205 +87 12 training.batch_size 0.0 +87 12 training.label_smoothing 0.13650321586643033 +87 13 model.output_channels 28.0 +87 13 model.input_dropout 0.3085054411556683 +87 13 model.output_dropout 0.4205895604016648 +87 13 model.feature_map_dropout 0.02477648537190874 +87 13 model.embedding_dim 1.0 +87 13 optimizer.lr 0.014526062635998729 +87 13 training.batch_size 0.0 +87 13 training.label_smoothing 0.025055633843714377 +87 14 model.output_channels 42.0 +87 14 model.input_dropout 0.4054570434384067 +87 14 model.output_dropout 0.022580816400782522 +87 14 model.feature_map_dropout 0.41091355411746217 +87 14 model.embedding_dim 0.0 +87 14 optimizer.lr 0.006781427783230149 +87 14 training.batch_size 1.0 +87 14 training.label_smoothing 0.0013860147264205106 +87 15 model.output_channels 32.0 +87 15 model.input_dropout 0.11505621392883403 +87 15 model.output_dropout 0.024438401228742246 +87 15 model.feature_map_dropout 0.3972595164070679 +87 15 model.embedding_dim 0.0 +87 15 optimizer.lr 0.015991861056903835 +87 15 training.batch_size 2.0 +87 15 training.label_smoothing 0.013840634856713316 +87 16 model.output_channels 24.0 +87 16 model.input_dropout 0.23055428142011675 +87 16 model.output_dropout 0.0035004683125113845 +87 16 model.feature_map_dropout 0.04731712780842501 +87 16 model.embedding_dim 2.0 +87 16 optimizer.lr 0.003401204222655735 +87 16 training.batch_size 1.0 +87 16 training.label_smoothing 0.006469958712824287 +87 17 model.output_channels 25.0 +87 17 model.input_dropout 0.28586629003818503 +87 17 model.output_dropout 0.08301287817344621 +87 17 model.feature_map_dropout 0.12737661002472428 +87 17 model.embedding_dim 2.0 +87 17 optimizer.lr 0.06058262827445557 +87 17 training.batch_size 0.0 +87 17 training.label_smoothing 0.0027716004122940955 +87 18 model.output_channels 54.0 +87 18 model.input_dropout 0.15749435878107454 +87 18 model.output_dropout 0.004017221125750259 +87 18 model.feature_map_dropout 0.4130515495570875 +87 18 model.embedding_dim 2.0 +87 18 optimizer.lr 0.013043054091403307 +87 18 training.batch_size 0.0 +87 18 training.label_smoothing 0.21028462646572593 +87 19 model.output_channels 44.0 +87 19 model.input_dropout 0.31290628611456217 +87 19 model.output_dropout 0.25926816674508757 +87 19 model.feature_map_dropout 0.4543174970964501 +87 19 model.embedding_dim 2.0 +87 19 optimizer.lr 0.0013119077723646995 +87 19 training.batch_size 2.0 +87 19 training.label_smoothing 0.0012339957339474162 +87 20 model.output_channels 46.0 +87 20 model.input_dropout 0.3630163926883195 +87 20 model.output_dropout 0.14008331874670998 +87 20 model.feature_map_dropout 0.0926766311768617 +87 20 model.embedding_dim 0.0 +87 20 optimizer.lr 0.0740937831355702 +87 20 training.batch_size 0.0 +87 20 training.label_smoothing 0.01362246056977053 +87 21 model.output_channels 20.0 +87 21 model.input_dropout 0.022429207450076094 +87 21 model.output_dropout 0.35768700296649103 +87 21 model.feature_map_dropout 0.30037803171642696 +87 21 model.embedding_dim 0.0 +87 21 optimizer.lr 0.0018600144674608588 +87 21 training.batch_size 1.0 +87 21 training.label_smoothing 0.5963435383561336 +87 22 model.output_channels 19.0 +87 22 model.input_dropout 0.447803487092398 +87 22 model.output_dropout 0.30418060199340663 +87 22 model.feature_map_dropout 0.29033221807394954 +87 22 model.embedding_dim 1.0 +87 22 optimizer.lr 0.001942299303244051 +87 22 training.batch_size 1.0 +87 22 training.label_smoothing 0.32136024883730585 +87 23 model.output_channels 22.0 +87 23 model.input_dropout 0.47133196796756904 +87 23 model.output_dropout 0.45584452606129094 +87 23 model.feature_map_dropout 0.1377974477446619 +87 23 model.embedding_dim 0.0 +87 23 optimizer.lr 0.051623483722569904 +87 23 training.batch_size 1.0 +87 23 training.label_smoothing 0.2673205894519746 +87 24 model.output_channels 22.0 +87 24 model.input_dropout 0.1290878492199204 +87 24 model.output_dropout 0.18922960436387115 +87 24 model.feature_map_dropout 0.033310971680163504 +87 24 model.embedding_dim 0.0 +87 24 optimizer.lr 0.007590465204581209 +87 24 training.batch_size 2.0 +87 24 training.label_smoothing 0.011057491449149463 +87 25 model.output_channels 43.0 +87 25 model.input_dropout 0.06124249115234576 +87 25 model.output_dropout 0.4466415236938021 +87 25 model.feature_map_dropout 0.13006772072105804 +87 25 model.embedding_dim 2.0 +87 25 optimizer.lr 0.0026688721345672394 +87 25 training.batch_size 1.0 +87 25 training.label_smoothing 0.013916878962126476 +87 26 model.output_channels 25.0 +87 26 model.input_dropout 0.4258695168037893 +87 26 model.output_dropout 0.0938370232592809 +87 26 model.feature_map_dropout 0.16038018506697088 +87 26 model.embedding_dim 1.0 +87 26 optimizer.lr 0.014529162010449098 +87 26 training.batch_size 2.0 +87 26 training.label_smoothing 0.009095952110711038 +87 27 model.output_channels 54.0 +87 27 model.input_dropout 0.20419654170951335 +87 27 model.output_dropout 0.4057530920401861 +87 27 model.feature_map_dropout 0.46886122624901244 +87 27 model.embedding_dim 2.0 +87 27 optimizer.lr 0.009216882666267918 +87 27 training.batch_size 1.0 +87 27 training.label_smoothing 0.003294406526135026 +87 28 model.output_channels 49.0 +87 28 model.input_dropout 0.47947485476325186 +87 28 model.output_dropout 0.47754367848047974 +87 28 model.feature_map_dropout 0.4603610400538304 +87 28 model.embedding_dim 1.0 +87 28 optimizer.lr 0.030696778823814188 +87 28 training.batch_size 2.0 +87 28 training.label_smoothing 0.08588423997032497 +87 29 model.output_channels 31.0 +87 29 model.input_dropout 0.17779311845393847 +87 29 model.output_dropout 0.2744680727947274 +87 29 model.feature_map_dropout 0.2037139617739417 +87 29 model.embedding_dim 0.0 +87 29 optimizer.lr 0.06439534880471362 +87 29 training.batch_size 0.0 +87 29 training.label_smoothing 0.005394991731759914 +87 30 model.output_channels 26.0 +87 30 model.input_dropout 0.12920213620248694 +87 30 model.output_dropout 0.2286787458267232 +87 30 model.feature_map_dropout 0.14306734396525622 +87 30 model.embedding_dim 2.0 +87 30 optimizer.lr 0.004005845036855307 +87 30 training.batch_size 0.0 +87 30 training.label_smoothing 0.03762214857485177 +87 31 model.output_channels 46.0 +87 31 model.input_dropout 0.4928230565263643 +87 31 model.output_dropout 0.4131213937805488 +87 31 model.feature_map_dropout 0.1559547666583771 +87 31 model.embedding_dim 2.0 +87 31 optimizer.lr 0.015596410020683784 +87 31 training.batch_size 2.0 +87 31 training.label_smoothing 0.03360224267286723 +87 32 model.output_channels 43.0 +87 32 model.input_dropout 0.2874238564332586 +87 32 model.output_dropout 0.40080735507238235 +87 32 model.feature_map_dropout 0.13465730786505364 +87 32 model.embedding_dim 0.0 +87 32 optimizer.lr 0.016714468299528943 +87 32 training.batch_size 2.0 +87 32 training.label_smoothing 0.8132186751585029 +87 33 model.output_channels 54.0 +87 33 model.input_dropout 0.06955170418527745 +87 33 model.output_dropout 0.28717783527804763 +87 33 model.feature_map_dropout 0.24014159491471626 +87 33 model.embedding_dim 1.0 +87 33 optimizer.lr 0.01656726516174252 +87 33 training.batch_size 2.0 +87 33 training.label_smoothing 0.9786826957623158 +87 34 model.output_channels 46.0 +87 34 model.input_dropout 0.48896287929440496 +87 34 model.output_dropout 0.26473456922213146 +87 34 model.feature_map_dropout 0.2760077073739646 +87 34 model.embedding_dim 2.0 +87 34 optimizer.lr 0.007994924524426489 +87 34 training.batch_size 1.0 +87 34 training.label_smoothing 0.017960447380495296 +87 35 model.output_channels 46.0 +87 35 model.input_dropout 0.4146267132047711 +87 35 model.output_dropout 0.004320745850090513 +87 35 model.feature_map_dropout 0.09124035853544865 +87 35 model.embedding_dim 0.0 +87 35 optimizer.lr 0.006042802431482127 +87 35 training.batch_size 1.0 +87 35 training.label_smoothing 0.0014056823344742453 +87 36 model.output_channels 44.0 +87 36 model.input_dropout 0.08848824209466699 +87 36 model.output_dropout 0.3018919519334829 +87 36 model.feature_map_dropout 0.22688761276693575 +87 36 model.embedding_dim 1.0 +87 36 optimizer.lr 0.06648806972886469 +87 36 training.batch_size 0.0 +87 36 training.label_smoothing 0.025373447199171652 +87 37 model.output_channels 60.0 +87 37 model.input_dropout 0.1921055982123263 +87 37 model.output_dropout 0.12904510010572828 +87 37 model.feature_map_dropout 0.06758970265278968 +87 37 model.embedding_dim 0.0 +87 37 optimizer.lr 0.047436450008128746 +87 37 training.batch_size 1.0 +87 37 training.label_smoothing 0.5857410733699067 +87 38 model.output_channels 62.0 +87 38 model.input_dropout 0.23647909016809232 +87 38 model.output_dropout 0.49957380895485903 +87 38 model.feature_map_dropout 0.19194298233453627 +87 38 model.embedding_dim 2.0 +87 38 optimizer.lr 0.013996623464090788 +87 38 training.batch_size 0.0 +87 38 training.label_smoothing 0.0013461094465873698 +87 39 model.output_channels 55.0 +87 39 model.input_dropout 0.12255025235712741 +87 39 model.output_dropout 0.2725518358314852 +87 39 model.feature_map_dropout 0.09204438616635202 +87 39 model.embedding_dim 2.0 +87 39 optimizer.lr 0.005451900076388654 +87 39 training.batch_size 0.0 +87 39 training.label_smoothing 0.10200577764690645 +87 40 model.output_channels 48.0 +87 40 model.input_dropout 0.40088732157567886 +87 40 model.output_dropout 0.4325099849235557 +87 40 model.feature_map_dropout 0.4196981747594017 +87 40 model.embedding_dim 0.0 +87 40 optimizer.lr 0.009555327569985837 +87 40 training.batch_size 1.0 +87 40 training.label_smoothing 0.001093300922791572 +87 41 model.output_channels 37.0 +87 41 model.input_dropout 0.1861932424440269 +87 41 model.output_dropout 0.20898726394291306 +87 41 model.feature_map_dropout 0.0580094583327338 +87 41 model.embedding_dim 1.0 +87 41 optimizer.lr 0.0022468434414546173 +87 41 training.batch_size 2.0 +87 41 training.label_smoothing 0.6388835647650534 +87 42 model.output_channels 49.0 +87 42 model.input_dropout 0.20448247219201565 +87 42 model.output_dropout 0.32998909801222903 +87 42 model.feature_map_dropout 0.0834842250701649 +87 42 model.embedding_dim 0.0 +87 42 optimizer.lr 0.001565523294997616 +87 42 training.batch_size 1.0 +87 42 training.label_smoothing 0.019954302438357305 +87 43 model.output_channels 59.0 +87 43 model.input_dropout 0.18710882748489316 +87 43 model.output_dropout 0.2658728159056964 +87 43 model.feature_map_dropout 0.4337612924193119 +87 43 model.embedding_dim 0.0 +87 43 optimizer.lr 0.018265577389805436 +87 43 training.batch_size 1.0 +87 43 training.label_smoothing 0.001866731220245633 +87 44 model.output_channels 39.0 +87 44 model.input_dropout 0.4580407300062907 +87 44 model.output_dropout 0.46338074605189333 +87 44 model.feature_map_dropout 0.17354185688587576 +87 44 model.embedding_dim 0.0 +87 44 optimizer.lr 0.07571034300118323 +87 44 training.batch_size 2.0 +87 44 training.label_smoothing 0.0012852046296458778 +87 45 model.output_channels 26.0 +87 45 model.input_dropout 0.2990374192105911 +87 45 model.output_dropout 0.21730781554871098 +87 45 model.feature_map_dropout 0.11983297651188979 +87 45 model.embedding_dim 0.0 +87 45 optimizer.lr 0.0068300431176096955 +87 45 training.batch_size 0.0 +87 45 training.label_smoothing 0.04546744590414586 +87 46 model.output_channels 36.0 +87 46 model.input_dropout 0.16438841561019307 +87 46 model.output_dropout 0.3294267099565674 +87 46 model.feature_map_dropout 0.03227427588205051 +87 46 model.embedding_dim 1.0 +87 46 optimizer.lr 0.004274508317751163 +87 46 training.batch_size 1.0 +87 46 training.label_smoothing 0.01018884689966729 +87 47 model.output_channels 27.0 +87 47 model.input_dropout 0.41947184663842324 +87 47 model.output_dropout 0.3373626221017932 +87 47 model.feature_map_dropout 0.1751194840259837 +87 47 model.embedding_dim 2.0 +87 47 optimizer.lr 0.01502095008342522 +87 47 training.batch_size 1.0 +87 47 training.label_smoothing 0.2131351075663477 +87 48 model.output_channels 61.0 +87 48 model.input_dropout 0.3018016466490978 +87 48 model.output_dropout 0.24272853715477777 +87 48 model.feature_map_dropout 0.4603312317049967 +87 48 model.embedding_dim 1.0 +87 48 optimizer.lr 0.007679420795467504 +87 48 training.batch_size 2.0 +87 48 training.label_smoothing 0.0024409421571510498 +87 49 model.output_channels 37.0 +87 49 model.input_dropout 0.36763729055901706 +87 49 model.output_dropout 0.005133657225594057 +87 49 model.feature_map_dropout 0.37461099352710286 +87 49 model.embedding_dim 1.0 +87 49 optimizer.lr 0.03287661398644378 +87 49 training.batch_size 2.0 +87 49 training.label_smoothing 0.06544271985612424 +87 50 model.output_channels 26.0 +87 50 model.input_dropout 0.26413781880736065 +87 50 model.output_dropout 0.15371797959041933 +87 50 model.feature_map_dropout 0.49973521839149615 +87 50 model.embedding_dim 2.0 +87 50 optimizer.lr 0.039887241242584456 +87 50 training.batch_size 1.0 +87 50 training.label_smoothing 0.003019095811597021 +87 51 model.output_channels 18.0 +87 51 model.input_dropout 0.49831804514014466 +87 51 model.output_dropout 0.3606727057004767 +87 51 model.feature_map_dropout 0.35182598631872697 +87 51 model.embedding_dim 2.0 +87 51 optimizer.lr 0.05815048229242245 +87 51 training.batch_size 2.0 +87 51 training.label_smoothing 0.3384868047602768 +87 52 model.output_channels 47.0 +87 52 model.input_dropout 0.23688989790698312 +87 52 model.output_dropout 0.027687161476736888 +87 52 model.feature_map_dropout 0.48801040013024644 +87 52 model.embedding_dim 2.0 +87 52 optimizer.lr 0.01892272999171927 +87 52 training.batch_size 1.0 +87 52 training.label_smoothing 0.018362868545096257 +87 53 model.output_channels 60.0 +87 53 model.input_dropout 0.41117017526715205 +87 53 model.output_dropout 0.49818986214001804 +87 53 model.feature_map_dropout 0.23610545935018024 +87 53 model.embedding_dim 0.0 +87 53 optimizer.lr 0.00402075432045149 +87 53 training.batch_size 0.0 +87 53 training.label_smoothing 0.06548343811239042 +87 54 model.output_channels 58.0 +87 54 model.input_dropout 0.4068858923167005 +87 54 model.output_dropout 0.16340901081588521 +87 54 model.feature_map_dropout 0.17093239613560934 +87 54 model.embedding_dim 2.0 +87 54 optimizer.lr 0.02511422014427574 +87 54 training.batch_size 0.0 +87 54 training.label_smoothing 0.15757151729102714 +87 55 model.output_channels 40.0 +87 55 model.input_dropout 0.4823320481219773 +87 55 model.output_dropout 0.36387963941666435 +87 55 model.feature_map_dropout 0.4963645481283068 +87 55 model.embedding_dim 0.0 +87 55 optimizer.lr 0.008105687996866371 +87 55 training.batch_size 2.0 +87 55 training.label_smoothing 0.0839796528005086 +87 56 model.output_channels 43.0 +87 56 model.input_dropout 0.06960772357888256 +87 56 model.output_dropout 0.4256784317410299 +87 56 model.feature_map_dropout 0.29207799533706097 +87 56 model.embedding_dim 0.0 +87 56 optimizer.lr 0.048975265168940095 +87 56 training.batch_size 0.0 +87 56 training.label_smoothing 0.8536670419148 +87 57 model.output_channels 35.0 +87 57 model.input_dropout 0.2726017256742611 +87 57 model.output_dropout 0.48659072180086826 +87 57 model.feature_map_dropout 0.2138349493223724 +87 57 model.embedding_dim 1.0 +87 57 optimizer.lr 0.04446782533750675 +87 57 training.batch_size 0.0 +87 57 training.label_smoothing 0.006101254599964265 +87 58 model.output_channels 43.0 +87 58 model.input_dropout 0.031652582845916544 +87 58 model.output_dropout 0.44363162572980275 +87 58 model.feature_map_dropout 0.09583624884030617 +87 58 model.embedding_dim 2.0 +87 58 optimizer.lr 0.0011569274447832396 +87 58 training.batch_size 0.0 +87 58 training.label_smoothing 0.02867203291888649 +87 59 model.output_channels 63.0 +87 59 model.input_dropout 0.13596500032523706 +87 59 model.output_dropout 0.16744814858344603 +87 59 model.feature_map_dropout 0.019003660907397912 +87 59 model.embedding_dim 0.0 +87 59 optimizer.lr 0.03665348002608364 +87 59 training.batch_size 2.0 +87 59 training.label_smoothing 0.004425054777066032 +87 60 model.output_channels 41.0 +87 60 model.input_dropout 0.1273419688091279 +87 60 model.output_dropout 0.2596305447778091 +87 60 model.feature_map_dropout 0.19959193987019047 +87 60 model.embedding_dim 2.0 +87 60 optimizer.lr 0.002835833565332476 +87 60 training.batch_size 2.0 +87 60 training.label_smoothing 0.004467847738095279 +87 61 model.output_channels 50.0 +87 61 model.input_dropout 0.17132329473407004 +87 61 model.output_dropout 0.3687366466501417 +87 61 model.feature_map_dropout 0.17692332756252316 +87 61 model.embedding_dim 0.0 +87 61 optimizer.lr 0.06602015964356082 +87 61 training.batch_size 0.0 +87 61 training.label_smoothing 0.2412989440864193 +87 62 model.output_channels 40.0 +87 62 model.input_dropout 0.46012239286734913 +87 62 model.output_dropout 0.15426467361748114 +87 62 model.feature_map_dropout 0.3587396276720083 +87 62 model.embedding_dim 0.0 +87 62 optimizer.lr 0.011778328454818916 +87 62 training.batch_size 2.0 +87 62 training.label_smoothing 0.9052404675803877 +87 63 model.output_channels 64.0 +87 63 model.input_dropout 0.3748232149597954 +87 63 model.output_dropout 0.4752190519505458 +87 63 model.feature_map_dropout 0.41413619674148894 +87 63 model.embedding_dim 1.0 +87 63 optimizer.lr 0.030747619413413092 +87 63 training.batch_size 1.0 +87 63 training.label_smoothing 0.01588190264949586 +87 64 model.output_channels 21.0 +87 64 model.input_dropout 0.09820191029111597 +87 64 model.output_dropout 0.255761726394952 +87 64 model.feature_map_dropout 0.3125278112513674 +87 64 model.embedding_dim 2.0 +87 64 optimizer.lr 0.003039276854128988 +87 64 training.batch_size 2.0 +87 64 training.label_smoothing 0.30984125367718734 +87 65 model.output_channels 23.0 +87 65 model.input_dropout 0.33827168008947034 +87 65 model.output_dropout 0.021849828687658945 +87 65 model.feature_map_dropout 0.0016836940394407418 +87 65 model.embedding_dim 2.0 +87 65 optimizer.lr 0.07711579370092635 +87 65 training.batch_size 1.0 +87 65 training.label_smoothing 0.651153054117696 +87 66 model.output_channels 22.0 +87 66 model.input_dropout 0.10057736765160558 +87 66 model.output_dropout 0.1975434036478026 +87 66 model.feature_map_dropout 0.2998398168821266 +87 66 model.embedding_dim 2.0 +87 66 optimizer.lr 0.024178595328015973 +87 66 training.batch_size 1.0 +87 66 training.label_smoothing 0.0018975200767523625 +87 67 model.output_channels 28.0 +87 67 model.input_dropout 0.14961304014931753 +87 67 model.output_dropout 0.13959895154844432 +87 67 model.feature_map_dropout 0.43824789445771906 +87 67 model.embedding_dim 1.0 +87 67 optimizer.lr 0.001508605575651274 +87 67 training.batch_size 2.0 +87 67 training.label_smoothing 0.002931801348914993 +87 68 model.output_channels 59.0 +87 68 model.input_dropout 0.29236938409143765 +87 68 model.output_dropout 0.08392040410669638 +87 68 model.feature_map_dropout 0.2691657545621421 +87 68 model.embedding_dim 0.0 +87 68 optimizer.lr 0.0068746661596940555 +87 68 training.batch_size 2.0 +87 68 training.label_smoothing 0.0024566562544148644 +87 69 model.output_channels 36.0 +87 69 model.input_dropout 0.04669030235620608 +87 69 model.output_dropout 0.459341161591131 +87 69 model.feature_map_dropout 0.35836979245760314 +87 69 model.embedding_dim 0.0 +87 69 optimizer.lr 0.003072120740114053 +87 69 training.batch_size 2.0 +87 69 training.label_smoothing 0.001244855901306316 +87 70 model.output_channels 34.0 +87 70 model.input_dropout 0.3251526702944888 +87 70 model.output_dropout 0.49560151035610844 +87 70 model.feature_map_dropout 0.362165657766903 +87 70 model.embedding_dim 2.0 +87 70 optimizer.lr 0.0016085081142593544 +87 70 training.batch_size 0.0 +87 70 training.label_smoothing 0.0594722251504799 +87 71 model.output_channels 35.0 +87 71 model.input_dropout 0.1790332158700902 +87 71 model.output_dropout 0.11935112186621516 +87 71 model.feature_map_dropout 0.07942570066645166 +87 71 model.embedding_dim 2.0 +87 71 optimizer.lr 0.05228732729443458 +87 71 training.batch_size 0.0 +87 71 training.label_smoothing 0.0027487825399186514 +87 72 model.output_channels 51.0 +87 72 model.input_dropout 0.42183657552584525 +87 72 model.output_dropout 0.11006054531193515 +87 72 model.feature_map_dropout 0.0841971204291817 +87 72 model.embedding_dim 0.0 +87 72 optimizer.lr 0.005532013413542317 +87 72 training.batch_size 1.0 +87 72 training.label_smoothing 0.022680025327127786 +87 73 model.output_channels 62.0 +87 73 model.input_dropout 0.22166543377638825 +87 73 model.output_dropout 0.43546100691627704 +87 73 model.feature_map_dropout 0.3127503288650713 +87 73 model.embedding_dim 0.0 +87 73 optimizer.lr 0.06691897640421011 +87 73 training.batch_size 2.0 +87 73 training.label_smoothing 0.0024688560016123263 +87 74 model.output_channels 47.0 +87 74 model.input_dropout 0.23626882664443216 +87 74 model.output_dropout 0.12784317631101477 +87 74 model.feature_map_dropout 0.07127322471274128 +87 74 model.embedding_dim 1.0 +87 74 optimizer.lr 0.05351536660892237 +87 74 training.batch_size 0.0 +87 74 training.label_smoothing 0.003482703330401268 +87 75 model.output_channels 61.0 +87 75 model.input_dropout 0.1709828426859007 +87 75 model.output_dropout 0.33783591426034976 +87 75 model.feature_map_dropout 0.4232391172354255 +87 75 model.embedding_dim 0.0 +87 75 optimizer.lr 0.004565353558736622 +87 75 training.batch_size 0.0 +87 75 training.label_smoothing 0.12868710486961032 +87 76 model.output_channels 57.0 +87 76 model.input_dropout 0.3492726357032801 +87 76 model.output_dropout 0.4839795219940878 +87 76 model.feature_map_dropout 0.15177369180971656 +87 76 model.embedding_dim 2.0 +87 76 optimizer.lr 0.07189013792846154 +87 76 training.batch_size 1.0 +87 76 training.label_smoothing 0.9419460193995702 +87 77 model.output_channels 46.0 +87 77 model.input_dropout 0.47376359537196544 +87 77 model.output_dropout 0.20902223584911767 +87 77 model.feature_map_dropout 0.40259352682680155 +87 77 model.embedding_dim 1.0 +87 77 optimizer.lr 0.001732014260952522 +87 77 training.batch_size 2.0 +87 77 training.label_smoothing 0.8669870733001693 +87 78 model.output_channels 61.0 +87 78 model.input_dropout 0.057106418207725496 +87 78 model.output_dropout 0.1734573094707113 +87 78 model.feature_map_dropout 0.28756570544164706 +87 78 model.embedding_dim 0.0 +87 78 optimizer.lr 0.024755710197870075 +87 78 training.batch_size 1.0 +87 78 training.label_smoothing 0.0020884856626661283 +87 79 model.output_channels 44.0 +87 79 model.input_dropout 0.25619285266019065 +87 79 model.output_dropout 0.08552755654281602 +87 79 model.feature_map_dropout 0.0015300183004963164 +87 79 model.embedding_dim 1.0 +87 79 optimizer.lr 0.03875340197143375 +87 79 training.batch_size 1.0 +87 79 training.label_smoothing 0.42509639449944087 +87 80 model.output_channels 35.0 +87 80 model.input_dropout 0.2934112942376206 +87 80 model.output_dropout 0.14254776202974984 +87 80 model.feature_map_dropout 0.01078350924531929 +87 80 model.embedding_dim 2.0 +87 80 optimizer.lr 0.001436342710079704 +87 80 training.batch_size 1.0 +87 80 training.label_smoothing 0.2884638630923356 +87 81 model.output_channels 48.0 +87 81 model.input_dropout 0.4754880629841997 +87 81 model.output_dropout 0.27077665463297756 +87 81 model.feature_map_dropout 0.32936044782963747 +87 81 model.embedding_dim 2.0 +87 81 optimizer.lr 0.009115971971176801 +87 81 training.batch_size 1.0 +87 81 training.label_smoothing 0.01751816028152469 +87 82 model.output_channels 43.0 +87 82 model.input_dropout 0.3094639820345862 +87 82 model.output_dropout 0.06574370094855125 +87 82 model.feature_map_dropout 0.3025469230834869 +87 82 model.embedding_dim 2.0 +87 82 optimizer.lr 0.005031799219303724 +87 82 training.batch_size 2.0 +87 82 training.label_smoothing 0.21823781551110424 +87 83 model.output_channels 24.0 +87 83 model.input_dropout 0.4036608272459908 +87 83 model.output_dropout 0.2925153384663853 +87 83 model.feature_map_dropout 0.48907394533812243 +87 83 model.embedding_dim 2.0 +87 83 optimizer.lr 0.03569258275411313 +87 83 training.batch_size 2.0 +87 83 training.label_smoothing 0.08368424133182215 +87 84 model.output_channels 35.0 +87 84 model.input_dropout 0.4154987115289495 +87 84 model.output_dropout 0.31388605352312726 +87 84 model.feature_map_dropout 0.27578674749913373 +87 84 model.embedding_dim 0.0 +87 84 optimizer.lr 0.04444935895497963 +87 84 training.batch_size 2.0 +87 84 training.label_smoothing 0.9460911204752774 +87 85 model.output_channels 20.0 +87 85 model.input_dropout 0.1982769230232142 +87 85 model.output_dropout 0.32762979956763627 +87 85 model.feature_map_dropout 0.44984401969952154 +87 85 model.embedding_dim 2.0 +87 85 optimizer.lr 0.003194090674132636 +87 85 training.batch_size 0.0 +87 85 training.label_smoothing 0.002451162482946588 +87 86 model.output_channels 39.0 +87 86 model.input_dropout 0.0778423330989152 +87 86 model.output_dropout 0.47854415073972684 +87 86 model.feature_map_dropout 0.08467010785677292 +87 86 model.embedding_dim 2.0 +87 86 optimizer.lr 0.005112902316884787 +87 86 training.batch_size 0.0 +87 86 training.label_smoothing 0.0019496657937935972 +87 87 model.output_channels 45.0 +87 87 model.input_dropout 0.3181742371887175 +87 87 model.output_dropout 0.1749135188599092 +87 87 model.feature_map_dropout 0.1854587865801649 +87 87 model.embedding_dim 1.0 +87 87 optimizer.lr 0.0023734926677113095 +87 87 training.batch_size 2.0 +87 87 training.label_smoothing 0.004115090035045967 +87 88 model.output_channels 52.0 +87 88 model.input_dropout 0.28803416363101947 +87 88 model.output_dropout 0.17761946883818497 +87 88 model.feature_map_dropout 0.4798390519475611 +87 88 model.embedding_dim 0.0 +87 88 optimizer.lr 0.05727361544835719 +87 88 training.batch_size 2.0 +87 88 training.label_smoothing 0.021866377869782933 +87 89 model.output_channels 23.0 +87 89 model.input_dropout 0.11740139036855918 +87 89 model.output_dropout 0.45661648351700185 +87 89 model.feature_map_dropout 0.2140589535733466 +87 89 model.embedding_dim 1.0 +87 89 optimizer.lr 0.0013387763305090972 +87 89 training.batch_size 2.0 +87 89 training.label_smoothing 0.4677370950226307 +87 90 model.output_channels 36.0 +87 90 model.input_dropout 0.16921193153064812 +87 90 model.output_dropout 0.0902202897581969 +87 90 model.feature_map_dropout 0.434532084964024 +87 90 model.embedding_dim 1.0 +87 90 optimizer.lr 0.003907061250772312 +87 90 training.batch_size 0.0 +87 90 training.label_smoothing 0.005615706006272127 +87 91 model.output_channels 55.0 +87 91 model.input_dropout 0.40781420155698445 +87 91 model.output_dropout 0.35116333469547883 +87 91 model.feature_map_dropout 0.31684533388881364 +87 91 model.embedding_dim 1.0 +87 91 optimizer.lr 0.0020369871204793002 +87 91 training.batch_size 2.0 +87 91 training.label_smoothing 0.8363486112778211 +87 92 model.output_channels 64.0 +87 92 model.input_dropout 0.0032281912892925813 +87 92 model.output_dropout 0.28086385056355995 +87 92 model.feature_map_dropout 0.36216491247744004 +87 92 model.embedding_dim 0.0 +87 92 optimizer.lr 0.07639872177591635 +87 92 training.batch_size 2.0 +87 92 training.label_smoothing 0.07433690809820236 +87 93 model.output_channels 22.0 +87 93 model.input_dropout 0.2251342082132336 +87 93 model.output_dropout 0.44811635929124044 +87 93 model.feature_map_dropout 0.1512557387720409 +87 93 model.embedding_dim 2.0 +87 93 optimizer.lr 0.005870036868725039 +87 93 training.batch_size 2.0 +87 93 training.label_smoothing 0.027174375543704946 +87 94 model.output_channels 50.0 +87 94 model.input_dropout 0.11713863756985687 +87 94 model.output_dropout 0.3814899950312205 +87 94 model.feature_map_dropout 0.07061788028185972 +87 94 model.embedding_dim 2.0 +87 94 optimizer.lr 0.012900164654921383 +87 94 training.batch_size 1.0 +87 94 training.label_smoothing 0.9225354807823795 +87 95 model.output_channels 16.0 +87 95 model.input_dropout 0.18241121254513826 +87 95 model.output_dropout 0.13890608540257166 +87 95 model.feature_map_dropout 0.38635550690758125 +87 95 model.embedding_dim 0.0 +87 95 optimizer.lr 0.04896417838498948 +87 95 training.batch_size 2.0 +87 95 training.label_smoothing 0.002672866330523611 +87 96 model.output_channels 53.0 +87 96 model.input_dropout 0.24762278005351368 +87 96 model.output_dropout 0.49487703021556967 +87 96 model.feature_map_dropout 0.29150747433819396 +87 96 model.embedding_dim 2.0 +87 96 optimizer.lr 0.005235590121206253 +87 96 training.batch_size 2.0 +87 96 training.label_smoothing 0.05441189140114058 +87 97 model.output_channels 55.0 +87 97 model.input_dropout 0.3122493834104689 +87 97 model.output_dropout 0.1381356129977051 +87 97 model.feature_map_dropout 0.1543276219034111 +87 97 model.embedding_dim 0.0 +87 97 optimizer.lr 0.09975721786663753 +87 97 training.batch_size 2.0 +87 97 training.label_smoothing 0.03263697950785532 +87 98 model.output_channels 40.0 +87 98 model.input_dropout 0.2975014993256152 +87 98 model.output_dropout 0.058196832480180094 +87 98 model.feature_map_dropout 0.005539777828465153 +87 98 model.embedding_dim 0.0 +87 98 optimizer.lr 0.001881701623516915 +87 98 training.batch_size 2.0 +87 98 training.label_smoothing 0.03314745715720545 +87 99 model.output_channels 32.0 +87 99 model.input_dropout 0.4120274916760347 +87 99 model.output_dropout 0.4358540144680693 +87 99 model.feature_map_dropout 0.24918228150031158 +87 99 model.embedding_dim 1.0 +87 99 optimizer.lr 0.01909567099963611 +87 99 training.batch_size 1.0 +87 99 training.label_smoothing 0.2629680402629734 +87 100 model.output_channels 52.0 +87 100 model.input_dropout 0.3238585805693474 +87 100 model.output_dropout 0.20863064090012884 +87 100 model.feature_map_dropout 0.07710857282436429 +87 100 model.embedding_dim 0.0 +87 100 optimizer.lr 0.0015808908040392763 +87 100 training.batch_size 0.0 +87 100 training.label_smoothing 0.05374573841954608 +87 1 dataset """kinships""" +87 1 model """conve""" +87 1 loss """crossentropy""" +87 1 regularizer """no""" +87 1 optimizer """adam""" +87 1 training_loop """lcwa""" +87 1 evaluator """rankbased""" +87 2 dataset """kinships""" +87 2 model """conve""" +87 2 loss """crossentropy""" +87 2 regularizer """no""" +87 2 optimizer """adam""" +87 2 training_loop """lcwa""" +87 2 evaluator """rankbased""" +87 3 dataset """kinships""" +87 3 model """conve""" +87 3 loss """crossentropy""" +87 3 regularizer """no""" +87 3 optimizer """adam""" +87 3 training_loop """lcwa""" +87 3 evaluator """rankbased""" +87 4 dataset """kinships""" +87 4 model """conve""" +87 4 loss """crossentropy""" +87 4 regularizer """no""" +87 4 optimizer """adam""" +87 4 training_loop """lcwa""" +87 4 evaluator """rankbased""" +87 5 dataset """kinships""" +87 5 model """conve""" +87 5 loss """crossentropy""" +87 5 regularizer """no""" +87 5 optimizer """adam""" +87 5 training_loop """lcwa""" +87 5 evaluator """rankbased""" +87 6 dataset """kinships""" +87 6 model """conve""" +87 6 loss """crossentropy""" +87 6 regularizer """no""" +87 6 optimizer """adam""" +87 6 training_loop """lcwa""" +87 6 evaluator """rankbased""" +87 7 dataset """kinships""" +87 7 model """conve""" +87 7 loss """crossentropy""" +87 7 regularizer """no""" +87 7 optimizer """adam""" +87 7 training_loop """lcwa""" +87 7 evaluator """rankbased""" +87 8 dataset """kinships""" +87 8 model """conve""" +87 8 loss """crossentropy""" +87 8 regularizer """no""" +87 8 optimizer """adam""" +87 8 training_loop """lcwa""" +87 8 evaluator """rankbased""" +87 9 dataset """kinships""" +87 9 model """conve""" +87 9 loss """crossentropy""" +87 9 regularizer """no""" +87 9 optimizer """adam""" +87 9 training_loop """lcwa""" +87 9 evaluator """rankbased""" +87 10 dataset """kinships""" +87 10 model """conve""" +87 10 loss """crossentropy""" +87 10 regularizer """no""" +87 10 optimizer """adam""" +87 10 training_loop """lcwa""" +87 10 evaluator """rankbased""" +87 11 dataset """kinships""" +87 11 model """conve""" +87 11 loss """crossentropy""" +87 11 regularizer """no""" +87 11 optimizer """adam""" +87 11 training_loop """lcwa""" +87 11 evaluator """rankbased""" +87 12 dataset """kinships""" +87 12 model """conve""" +87 12 loss """crossentropy""" +87 12 regularizer """no""" +87 12 optimizer """adam""" +87 12 training_loop """lcwa""" +87 12 evaluator """rankbased""" +87 13 dataset """kinships""" +87 13 model """conve""" +87 13 loss """crossentropy""" +87 13 regularizer """no""" +87 13 optimizer """adam""" +87 13 training_loop """lcwa""" +87 13 evaluator """rankbased""" +87 14 dataset """kinships""" +87 14 model """conve""" +87 14 loss """crossentropy""" +87 14 regularizer """no""" +87 14 optimizer """adam""" +87 14 training_loop """lcwa""" +87 14 evaluator """rankbased""" +87 15 dataset """kinships""" +87 15 model """conve""" +87 15 loss """crossentropy""" +87 15 regularizer """no""" +87 15 optimizer """adam""" +87 15 training_loop """lcwa""" +87 15 evaluator """rankbased""" +87 16 dataset """kinships""" +87 16 model """conve""" +87 16 loss """crossentropy""" +87 16 regularizer """no""" +87 16 optimizer """adam""" +87 16 training_loop """lcwa""" +87 16 evaluator """rankbased""" +87 17 dataset """kinships""" +87 17 model """conve""" +87 17 loss """crossentropy""" +87 17 regularizer """no""" +87 17 optimizer """adam""" +87 17 training_loop """lcwa""" +87 17 evaluator """rankbased""" +87 18 dataset """kinships""" +87 18 model """conve""" +87 18 loss """crossentropy""" +87 18 regularizer """no""" +87 18 optimizer """adam""" +87 18 training_loop """lcwa""" +87 18 evaluator """rankbased""" +87 19 dataset """kinships""" +87 19 model """conve""" +87 19 loss """crossentropy""" +87 19 regularizer """no""" +87 19 optimizer """adam""" +87 19 training_loop """lcwa""" +87 19 evaluator """rankbased""" +87 20 dataset """kinships""" +87 20 model """conve""" +87 20 loss """crossentropy""" +87 20 regularizer """no""" +87 20 optimizer """adam""" +87 20 training_loop """lcwa""" +87 20 evaluator """rankbased""" +87 21 dataset """kinships""" +87 21 model """conve""" +87 21 loss """crossentropy""" +87 21 regularizer """no""" +87 21 optimizer """adam""" +87 21 training_loop """lcwa""" +87 21 evaluator """rankbased""" +87 22 dataset """kinships""" +87 22 model """conve""" +87 22 loss """crossentropy""" +87 22 regularizer """no""" +87 22 optimizer """adam""" +87 22 training_loop """lcwa""" +87 22 evaluator """rankbased""" +87 23 dataset """kinships""" +87 23 model """conve""" +87 23 loss """crossentropy""" +87 23 regularizer """no""" +87 23 optimizer """adam""" +87 23 training_loop """lcwa""" +87 23 evaluator """rankbased""" +87 24 dataset """kinships""" +87 24 model """conve""" +87 24 loss """crossentropy""" +87 24 regularizer """no""" +87 24 optimizer """adam""" +87 24 training_loop """lcwa""" +87 24 evaluator """rankbased""" +87 25 dataset """kinships""" +87 25 model """conve""" +87 25 loss """crossentropy""" +87 25 regularizer """no""" +87 25 optimizer """adam""" +87 25 training_loop """lcwa""" +87 25 evaluator """rankbased""" +87 26 dataset """kinships""" +87 26 model """conve""" +87 26 loss """crossentropy""" +87 26 regularizer """no""" +87 26 optimizer """adam""" +87 26 training_loop """lcwa""" +87 26 evaluator """rankbased""" +87 27 dataset """kinships""" +87 27 model """conve""" +87 27 loss """crossentropy""" +87 27 regularizer """no""" +87 27 optimizer """adam""" +87 27 training_loop """lcwa""" +87 27 evaluator """rankbased""" +87 28 dataset """kinships""" +87 28 model """conve""" +87 28 loss """crossentropy""" +87 28 regularizer """no""" +87 28 optimizer """adam""" +87 28 training_loop """lcwa""" +87 28 evaluator """rankbased""" +87 29 dataset """kinships""" +87 29 model """conve""" +87 29 loss """crossentropy""" +87 29 regularizer """no""" +87 29 optimizer """adam""" +87 29 training_loop """lcwa""" +87 29 evaluator """rankbased""" +87 30 dataset """kinships""" +87 30 model """conve""" +87 30 loss """crossentropy""" +87 30 regularizer """no""" +87 30 optimizer """adam""" +87 30 training_loop """lcwa""" +87 30 evaluator """rankbased""" +87 31 dataset """kinships""" +87 31 model """conve""" +87 31 loss """crossentropy""" +87 31 regularizer """no""" +87 31 optimizer """adam""" +87 31 training_loop """lcwa""" +87 31 evaluator """rankbased""" +87 32 dataset """kinships""" +87 32 model """conve""" +87 32 loss """crossentropy""" +87 32 regularizer """no""" +87 32 optimizer """adam""" +87 32 training_loop """lcwa""" +87 32 evaluator """rankbased""" +87 33 dataset """kinships""" +87 33 model """conve""" +87 33 loss """crossentropy""" +87 33 regularizer """no""" +87 33 optimizer """adam""" +87 33 training_loop """lcwa""" +87 33 evaluator """rankbased""" +87 34 dataset """kinships""" +87 34 model """conve""" +87 34 loss """crossentropy""" +87 34 regularizer """no""" +87 34 optimizer """adam""" +87 34 training_loop """lcwa""" +87 34 evaluator """rankbased""" +87 35 dataset """kinships""" +87 35 model """conve""" +87 35 loss """crossentropy""" +87 35 regularizer """no""" +87 35 optimizer """adam""" +87 35 training_loop """lcwa""" +87 35 evaluator """rankbased""" +87 36 dataset """kinships""" +87 36 model """conve""" +87 36 loss """crossentropy""" +87 36 regularizer """no""" +87 36 optimizer """adam""" +87 36 training_loop """lcwa""" +87 36 evaluator """rankbased""" +87 37 dataset """kinships""" +87 37 model """conve""" +87 37 loss """crossentropy""" +87 37 regularizer """no""" +87 37 optimizer """adam""" +87 37 training_loop """lcwa""" +87 37 evaluator """rankbased""" +87 38 dataset """kinships""" +87 38 model """conve""" +87 38 loss """crossentropy""" +87 38 regularizer """no""" +87 38 optimizer """adam""" +87 38 training_loop """lcwa""" +87 38 evaluator """rankbased""" +87 39 dataset """kinships""" +87 39 model """conve""" +87 39 loss """crossentropy""" +87 39 regularizer """no""" +87 39 optimizer """adam""" +87 39 training_loop """lcwa""" +87 39 evaluator """rankbased""" +87 40 dataset """kinships""" +87 40 model """conve""" +87 40 loss """crossentropy""" +87 40 regularizer """no""" +87 40 optimizer """adam""" +87 40 training_loop """lcwa""" +87 40 evaluator """rankbased""" +87 41 dataset """kinships""" +87 41 model """conve""" +87 41 loss """crossentropy""" +87 41 regularizer """no""" +87 41 optimizer """adam""" +87 41 training_loop """lcwa""" +87 41 evaluator """rankbased""" +87 42 dataset """kinships""" +87 42 model """conve""" +87 42 loss """crossentropy""" +87 42 regularizer """no""" +87 42 optimizer """adam""" +87 42 training_loop """lcwa""" +87 42 evaluator """rankbased""" +87 43 dataset """kinships""" +87 43 model """conve""" +87 43 loss """crossentropy""" +87 43 regularizer """no""" +87 43 optimizer """adam""" +87 43 training_loop """lcwa""" +87 43 evaluator """rankbased""" +87 44 dataset """kinships""" +87 44 model """conve""" +87 44 loss """crossentropy""" +87 44 regularizer """no""" +87 44 optimizer """adam""" +87 44 training_loop """lcwa""" +87 44 evaluator """rankbased""" +87 45 dataset """kinships""" +87 45 model """conve""" +87 45 loss """crossentropy""" +87 45 regularizer """no""" +87 45 optimizer """adam""" +87 45 training_loop """lcwa""" +87 45 evaluator """rankbased""" +87 46 dataset """kinships""" +87 46 model """conve""" +87 46 loss """crossentropy""" +87 46 regularizer """no""" +87 46 optimizer """adam""" +87 46 training_loop """lcwa""" +87 46 evaluator """rankbased""" +87 47 dataset """kinships""" +87 47 model """conve""" +87 47 loss """crossentropy""" +87 47 regularizer """no""" +87 47 optimizer """adam""" +87 47 training_loop """lcwa""" +87 47 evaluator """rankbased""" +87 48 dataset """kinships""" +87 48 model """conve""" +87 48 loss """crossentropy""" +87 48 regularizer """no""" +87 48 optimizer """adam""" +87 48 training_loop """lcwa""" +87 48 evaluator """rankbased""" +87 49 dataset """kinships""" +87 49 model """conve""" +87 49 loss """crossentropy""" +87 49 regularizer """no""" +87 49 optimizer """adam""" +87 49 training_loop """lcwa""" +87 49 evaluator """rankbased""" +87 50 dataset """kinships""" +87 50 model """conve""" +87 50 loss """crossentropy""" +87 50 regularizer """no""" +87 50 optimizer """adam""" +87 50 training_loop """lcwa""" +87 50 evaluator """rankbased""" +87 51 dataset """kinships""" +87 51 model """conve""" +87 51 loss """crossentropy""" +87 51 regularizer """no""" +87 51 optimizer """adam""" +87 51 training_loop """lcwa""" +87 51 evaluator """rankbased""" +87 52 dataset """kinships""" +87 52 model """conve""" +87 52 loss """crossentropy""" +87 52 regularizer """no""" +87 52 optimizer """adam""" +87 52 training_loop """lcwa""" +87 52 evaluator """rankbased""" +87 53 dataset """kinships""" +87 53 model """conve""" +87 53 loss """crossentropy""" +87 53 regularizer """no""" +87 53 optimizer """adam""" +87 53 training_loop """lcwa""" +87 53 evaluator """rankbased""" +87 54 dataset """kinships""" +87 54 model """conve""" +87 54 loss """crossentropy""" +87 54 regularizer """no""" +87 54 optimizer """adam""" +87 54 training_loop """lcwa""" +87 54 evaluator """rankbased""" +87 55 dataset """kinships""" +87 55 model """conve""" +87 55 loss """crossentropy""" +87 55 regularizer """no""" +87 55 optimizer """adam""" +87 55 training_loop """lcwa""" +87 55 evaluator """rankbased""" +87 56 dataset """kinships""" +87 56 model """conve""" +87 56 loss """crossentropy""" +87 56 regularizer """no""" +87 56 optimizer """adam""" +87 56 training_loop """lcwa""" +87 56 evaluator """rankbased""" +87 57 dataset """kinships""" +87 57 model """conve""" +87 57 loss """crossentropy""" +87 57 regularizer """no""" +87 57 optimizer """adam""" +87 57 training_loop """lcwa""" +87 57 evaluator """rankbased""" +87 58 dataset """kinships""" +87 58 model """conve""" +87 58 loss """crossentropy""" +87 58 regularizer """no""" +87 58 optimizer """adam""" +87 58 training_loop """lcwa""" +87 58 evaluator """rankbased""" +87 59 dataset """kinships""" +87 59 model """conve""" +87 59 loss """crossentropy""" +87 59 regularizer """no""" +87 59 optimizer """adam""" +87 59 training_loop """lcwa""" +87 59 evaluator """rankbased""" +87 60 dataset """kinships""" +87 60 model """conve""" +87 60 loss """crossentropy""" +87 60 regularizer """no""" +87 60 optimizer """adam""" +87 60 training_loop """lcwa""" +87 60 evaluator """rankbased""" +87 61 dataset """kinships""" +87 61 model """conve""" +87 61 loss """crossentropy""" +87 61 regularizer """no""" +87 61 optimizer """adam""" +87 61 training_loop """lcwa""" +87 61 evaluator """rankbased""" +87 62 dataset """kinships""" +87 62 model """conve""" +87 62 loss """crossentropy""" +87 62 regularizer """no""" +87 62 optimizer """adam""" +87 62 training_loop """lcwa""" +87 62 evaluator """rankbased""" +87 63 dataset """kinships""" +87 63 model """conve""" +87 63 loss """crossentropy""" +87 63 regularizer """no""" +87 63 optimizer """adam""" +87 63 training_loop """lcwa""" +87 63 evaluator """rankbased""" +87 64 dataset """kinships""" +87 64 model """conve""" +87 64 loss """crossentropy""" +87 64 regularizer """no""" +87 64 optimizer """adam""" +87 64 training_loop """lcwa""" +87 64 evaluator """rankbased""" +87 65 dataset """kinships""" +87 65 model """conve""" +87 65 loss """crossentropy""" +87 65 regularizer """no""" +87 65 optimizer """adam""" +87 65 training_loop """lcwa""" +87 65 evaluator """rankbased""" +87 66 dataset """kinships""" +87 66 model """conve""" +87 66 loss """crossentropy""" +87 66 regularizer """no""" +87 66 optimizer """adam""" +87 66 training_loop """lcwa""" +87 66 evaluator """rankbased""" +87 67 dataset """kinships""" +87 67 model """conve""" +87 67 loss """crossentropy""" +87 67 regularizer """no""" +87 67 optimizer """adam""" +87 67 training_loop """lcwa""" +87 67 evaluator """rankbased""" +87 68 dataset """kinships""" +87 68 model """conve""" +87 68 loss """crossentropy""" +87 68 regularizer """no""" +87 68 optimizer """adam""" +87 68 training_loop """lcwa""" +87 68 evaluator """rankbased""" +87 69 dataset """kinships""" +87 69 model """conve""" +87 69 loss """crossentropy""" +87 69 regularizer """no""" +87 69 optimizer """adam""" +87 69 training_loop """lcwa""" +87 69 evaluator """rankbased""" +87 70 dataset """kinships""" +87 70 model """conve""" +87 70 loss """crossentropy""" +87 70 regularizer """no""" +87 70 optimizer """adam""" +87 70 training_loop """lcwa""" +87 70 evaluator """rankbased""" +87 71 dataset """kinships""" +87 71 model """conve""" +87 71 loss """crossentropy""" +87 71 regularizer """no""" +87 71 optimizer """adam""" +87 71 training_loop """lcwa""" +87 71 evaluator """rankbased""" +87 72 dataset """kinships""" +87 72 model """conve""" +87 72 loss """crossentropy""" +87 72 regularizer """no""" +87 72 optimizer """adam""" +87 72 training_loop """lcwa""" +87 72 evaluator """rankbased""" +87 73 dataset """kinships""" +87 73 model """conve""" +87 73 loss """crossentropy""" +87 73 regularizer """no""" +87 73 optimizer """adam""" +87 73 training_loop """lcwa""" +87 73 evaluator """rankbased""" +87 74 dataset """kinships""" +87 74 model """conve""" +87 74 loss """crossentropy""" +87 74 regularizer """no""" +87 74 optimizer """adam""" +87 74 training_loop """lcwa""" +87 74 evaluator """rankbased""" +87 75 dataset """kinships""" +87 75 model """conve""" +87 75 loss """crossentropy""" +87 75 regularizer """no""" +87 75 optimizer """adam""" +87 75 training_loop """lcwa""" +87 75 evaluator """rankbased""" +87 76 dataset """kinships""" +87 76 model """conve""" +87 76 loss """crossentropy""" +87 76 regularizer """no""" +87 76 optimizer """adam""" +87 76 training_loop """lcwa""" +87 76 evaluator """rankbased""" +87 77 dataset """kinships""" +87 77 model """conve""" +87 77 loss """crossentropy""" +87 77 regularizer """no""" +87 77 optimizer """adam""" +87 77 training_loop """lcwa""" +87 77 evaluator """rankbased""" +87 78 dataset """kinships""" +87 78 model """conve""" +87 78 loss """crossentropy""" +87 78 regularizer """no""" +87 78 optimizer """adam""" +87 78 training_loop """lcwa""" +87 78 evaluator """rankbased""" +87 79 dataset """kinships""" +87 79 model """conve""" +87 79 loss """crossentropy""" +87 79 regularizer """no""" +87 79 optimizer """adam""" +87 79 training_loop """lcwa""" +87 79 evaluator """rankbased""" +87 80 dataset """kinships""" +87 80 model """conve""" +87 80 loss """crossentropy""" +87 80 regularizer """no""" +87 80 optimizer """adam""" +87 80 training_loop """lcwa""" +87 80 evaluator """rankbased""" +87 81 dataset """kinships""" +87 81 model """conve""" +87 81 loss """crossentropy""" +87 81 regularizer """no""" +87 81 optimizer """adam""" +87 81 training_loop """lcwa""" +87 81 evaluator """rankbased""" +87 82 dataset """kinships""" +87 82 model """conve""" +87 82 loss """crossentropy""" +87 82 regularizer """no""" +87 82 optimizer """adam""" +87 82 training_loop """lcwa""" +87 82 evaluator """rankbased""" +87 83 dataset """kinships""" +87 83 model """conve""" +87 83 loss """crossentropy""" +87 83 regularizer """no""" +87 83 optimizer """adam""" +87 83 training_loop """lcwa""" +87 83 evaluator """rankbased""" +87 84 dataset """kinships""" +87 84 model """conve""" +87 84 loss """crossentropy""" +87 84 regularizer """no""" +87 84 optimizer """adam""" +87 84 training_loop """lcwa""" +87 84 evaluator """rankbased""" +87 85 dataset """kinships""" +87 85 model """conve""" +87 85 loss """crossentropy""" +87 85 regularizer """no""" +87 85 optimizer """adam""" +87 85 training_loop """lcwa""" +87 85 evaluator """rankbased""" +87 86 dataset """kinships""" +87 86 model """conve""" +87 86 loss """crossentropy""" +87 86 regularizer """no""" +87 86 optimizer """adam""" +87 86 training_loop """lcwa""" +87 86 evaluator """rankbased""" +87 87 dataset """kinships""" +87 87 model """conve""" +87 87 loss """crossentropy""" +87 87 regularizer """no""" +87 87 optimizer """adam""" +87 87 training_loop """lcwa""" +87 87 evaluator """rankbased""" +87 88 dataset """kinships""" +87 88 model """conve""" +87 88 loss """crossentropy""" +87 88 regularizer """no""" +87 88 optimizer """adam""" +87 88 training_loop """lcwa""" +87 88 evaluator """rankbased""" +87 89 dataset """kinships""" +87 89 model """conve""" +87 89 loss """crossentropy""" +87 89 regularizer """no""" +87 89 optimizer """adam""" +87 89 training_loop """lcwa""" +87 89 evaluator """rankbased""" +87 90 dataset """kinships""" +87 90 model """conve""" +87 90 loss """crossentropy""" +87 90 regularizer """no""" +87 90 optimizer """adam""" +87 90 training_loop """lcwa""" +87 90 evaluator """rankbased""" +87 91 dataset """kinships""" +87 91 model """conve""" +87 91 loss """crossentropy""" +87 91 regularizer """no""" +87 91 optimizer """adam""" +87 91 training_loop """lcwa""" +87 91 evaluator """rankbased""" +87 92 dataset """kinships""" +87 92 model """conve""" +87 92 loss """crossentropy""" +87 92 regularizer """no""" +87 92 optimizer """adam""" +87 92 training_loop """lcwa""" +87 92 evaluator """rankbased""" +87 93 dataset """kinships""" +87 93 model """conve""" +87 93 loss """crossentropy""" +87 93 regularizer """no""" +87 93 optimizer """adam""" +87 93 training_loop """lcwa""" +87 93 evaluator """rankbased""" +87 94 dataset """kinships""" +87 94 model """conve""" +87 94 loss """crossentropy""" +87 94 regularizer """no""" +87 94 optimizer """adam""" +87 94 training_loop """lcwa""" +87 94 evaluator """rankbased""" +87 95 dataset """kinships""" +87 95 model """conve""" +87 95 loss """crossentropy""" +87 95 regularizer """no""" +87 95 optimizer """adam""" +87 95 training_loop """lcwa""" +87 95 evaluator """rankbased""" +87 96 dataset """kinships""" +87 96 model """conve""" +87 96 loss """crossentropy""" +87 96 regularizer """no""" +87 96 optimizer """adam""" +87 96 training_loop """lcwa""" +87 96 evaluator """rankbased""" +87 97 dataset """kinships""" +87 97 model """conve""" +87 97 loss """crossentropy""" +87 97 regularizer """no""" +87 97 optimizer """adam""" +87 97 training_loop """lcwa""" +87 97 evaluator """rankbased""" +87 98 dataset """kinships""" +87 98 model """conve""" +87 98 loss """crossentropy""" +87 98 regularizer """no""" +87 98 optimizer """adam""" +87 98 training_loop """lcwa""" +87 98 evaluator """rankbased""" +87 99 dataset """kinships""" +87 99 model """conve""" +87 99 loss """crossentropy""" +87 99 regularizer """no""" +87 99 optimizer """adam""" +87 99 training_loop """lcwa""" +87 99 evaluator """rankbased""" +87 100 dataset """kinships""" +87 100 model """conve""" +87 100 loss """crossentropy""" +87 100 regularizer """no""" +87 100 optimizer """adam""" +87 100 training_loop """lcwa""" +87 100 evaluator """rankbased""" +88 1 model.output_channels 57.0 +88 1 model.input_dropout 0.22629164916823452 +88 1 model.output_dropout 0.19043808231612708 +88 1 model.feature_map_dropout 0.21249635606525358 +88 1 model.embedding_dim 2.0 +88 1 optimizer.lr 0.012084309026955918 +88 1 negative_sampler.num_negs_per_pos 42.0 +88 1 training.batch_size 0.0 +88 2 model.output_channels 61.0 +88 2 model.input_dropout 0.2689259829529001 +88 2 model.output_dropout 0.0065899681344986916 +88 2 model.feature_map_dropout 0.3876878303425053 +88 2 model.embedding_dim 2.0 +88 2 optimizer.lr 0.026177574504858935 +88 2 negative_sampler.num_negs_per_pos 59.0 +88 2 training.batch_size 2.0 +88 3 model.output_channels 51.0 +88 3 model.input_dropout 0.16960633556043309 +88 3 model.output_dropout 0.05840974585279829 +88 3 model.feature_map_dropout 0.05065227694636659 +88 3 model.embedding_dim 2.0 +88 3 optimizer.lr 0.06607126591411594 +88 3 negative_sampler.num_negs_per_pos 94.0 +88 3 training.batch_size 0.0 +88 4 model.output_channels 30.0 +88 4 model.input_dropout 0.3785504911905975 +88 4 model.output_dropout 0.2942882853118313 +88 4 model.feature_map_dropout 0.07036038468556904 +88 4 model.embedding_dim 1.0 +88 4 optimizer.lr 0.011878280508494453 +88 4 negative_sampler.num_negs_per_pos 78.0 +88 4 training.batch_size 1.0 +88 5 model.output_channels 58.0 +88 5 model.input_dropout 0.46606910812326147 +88 5 model.output_dropout 0.16461536635894897 +88 5 model.feature_map_dropout 0.09474233699682039 +88 5 model.embedding_dim 1.0 +88 5 optimizer.lr 0.050567233215573694 +88 5 negative_sampler.num_negs_per_pos 11.0 +88 5 training.batch_size 2.0 +88 6 model.output_channels 39.0 +88 6 model.input_dropout 0.17270697410994484 +88 6 model.output_dropout 0.27476643585456867 +88 6 model.feature_map_dropout 0.2589370769294479 +88 6 model.embedding_dim 1.0 +88 6 optimizer.lr 0.021271242519082165 +88 6 negative_sampler.num_negs_per_pos 75.0 +88 6 training.batch_size 0.0 +88 7 model.output_channels 45.0 +88 7 model.input_dropout 0.343544775305759 +88 7 model.output_dropout 0.03108005719524387 +88 7 model.feature_map_dropout 0.0014165333843101924 +88 7 model.embedding_dim 2.0 +88 7 optimizer.lr 0.017463140693198556 +88 7 negative_sampler.num_negs_per_pos 87.0 +88 7 training.batch_size 2.0 +88 8 model.output_channels 26.0 +88 8 model.input_dropout 0.13043112852320715 +88 8 model.output_dropout 0.08364621700084479 +88 8 model.feature_map_dropout 0.04989232608188948 +88 8 model.embedding_dim 0.0 +88 8 optimizer.lr 0.004761703466560188 +88 8 negative_sampler.num_negs_per_pos 93.0 +88 8 training.batch_size 0.0 +88 9 model.output_channels 63.0 +88 9 model.input_dropout 0.10311413156091759 +88 9 model.output_dropout 0.44018223973874343 +88 9 model.feature_map_dropout 0.4785469237586747 +88 9 model.embedding_dim 1.0 +88 9 optimizer.lr 0.02826265344463202 +88 9 negative_sampler.num_negs_per_pos 0.0 +88 9 training.batch_size 2.0 +88 10 model.output_channels 45.0 +88 10 model.input_dropout 0.010531087242382609 +88 10 model.output_dropout 0.006736446911265459 +88 10 model.feature_map_dropout 0.2558912966829018 +88 10 model.embedding_dim 2.0 +88 10 optimizer.lr 0.0012055261024944482 +88 10 negative_sampler.num_negs_per_pos 94.0 +88 10 training.batch_size 1.0 +88 11 model.output_channels 27.0 +88 11 model.input_dropout 0.20600735001216502 +88 11 model.output_dropout 0.4345141242107184 +88 11 model.feature_map_dropout 0.18825586060681576 +88 11 model.embedding_dim 0.0 +88 11 optimizer.lr 0.0032399936254294856 +88 11 negative_sampler.num_negs_per_pos 98.0 +88 11 training.batch_size 2.0 +88 12 model.output_channels 43.0 +88 12 model.input_dropout 0.21014345871339285 +88 12 model.output_dropout 0.2977590337967038 +88 12 model.feature_map_dropout 0.44634609596051295 +88 12 model.embedding_dim 1.0 +88 12 optimizer.lr 0.008165946070872563 +88 12 negative_sampler.num_negs_per_pos 56.0 +88 12 training.batch_size 2.0 +88 13 model.output_channels 18.0 +88 13 model.input_dropout 0.30495164583675616 +88 13 model.output_dropout 0.026659053811229183 +88 13 model.feature_map_dropout 0.21193315533751317 +88 13 model.embedding_dim 1.0 +88 13 optimizer.lr 0.008835162652645527 +88 13 negative_sampler.num_negs_per_pos 19.0 +88 13 training.batch_size 2.0 +88 14 model.output_channels 22.0 +88 14 model.input_dropout 0.17455037244086397 +88 14 model.output_dropout 0.29711078168396826 +88 14 model.feature_map_dropout 0.3953817268006748 +88 14 model.embedding_dim 0.0 +88 14 optimizer.lr 0.0017338271898185936 +88 14 negative_sampler.num_negs_per_pos 34.0 +88 14 training.batch_size 2.0 +88 15 model.output_channels 53.0 +88 15 model.input_dropout 0.17664231721881263 +88 15 model.output_dropout 0.07102163838769432 +88 15 model.feature_map_dropout 0.37241829673686466 +88 15 model.embedding_dim 1.0 +88 15 optimizer.lr 0.0028248704531352534 +88 15 negative_sampler.num_negs_per_pos 12.0 +88 15 training.batch_size 0.0 +88 16 model.output_channels 39.0 +88 16 model.input_dropout 0.0029788110848333327 +88 16 model.output_dropout 0.38256975722788106 +88 16 model.feature_map_dropout 0.29264816066026395 +88 16 model.embedding_dim 0.0 +88 16 optimizer.lr 0.0025213698934111895 +88 16 negative_sampler.num_negs_per_pos 24.0 +88 16 training.batch_size 2.0 +88 17 model.output_channels 40.0 +88 17 model.input_dropout 0.4308855285045792 +88 17 model.output_dropout 0.3128570047051398 +88 17 model.feature_map_dropout 0.4853064844987286 +88 17 model.embedding_dim 0.0 +88 17 optimizer.lr 0.0027291364093482034 +88 17 negative_sampler.num_negs_per_pos 14.0 +88 17 training.batch_size 2.0 +88 18 model.output_channels 28.0 +88 18 model.input_dropout 0.14853424789267322 +88 18 model.output_dropout 0.46248403735929317 +88 18 model.feature_map_dropout 0.08647836370077422 +88 18 model.embedding_dim 2.0 +88 18 optimizer.lr 0.0686299299672285 +88 18 negative_sampler.num_negs_per_pos 62.0 +88 18 training.batch_size 2.0 +88 19 model.output_channels 63.0 +88 19 model.input_dropout 0.32371642086834995 +88 19 model.output_dropout 0.10596310838745282 +88 19 model.feature_map_dropout 0.22290179645761865 +88 19 model.embedding_dim 2.0 +88 19 optimizer.lr 0.024860934437779234 +88 19 negative_sampler.num_negs_per_pos 33.0 +88 19 training.batch_size 2.0 +88 20 model.output_channels 18.0 +88 20 model.input_dropout 0.16834471593069117 +88 20 model.output_dropout 0.274048997083211 +88 20 model.feature_map_dropout 0.1348283984011216 +88 20 model.embedding_dim 2.0 +88 20 optimizer.lr 0.011531753245025577 +88 20 negative_sampler.num_negs_per_pos 69.0 +88 20 training.batch_size 2.0 +88 21 model.output_channels 51.0 +88 21 model.input_dropout 0.3227192635286626 +88 21 model.output_dropout 0.49067880962111093 +88 21 model.feature_map_dropout 0.1346047161710875 +88 21 model.embedding_dim 0.0 +88 21 optimizer.lr 0.00790565831132777 +88 21 negative_sampler.num_negs_per_pos 46.0 +88 21 training.batch_size 1.0 +88 22 model.output_channels 35.0 +88 22 model.input_dropout 0.1911281011760897 +88 22 model.output_dropout 0.156070541321516 +88 22 model.feature_map_dropout 0.16354547137316272 +88 22 model.embedding_dim 0.0 +88 22 optimizer.lr 0.001309158257467318 +88 22 negative_sampler.num_negs_per_pos 82.0 +88 22 training.batch_size 0.0 +88 23 model.output_channels 26.0 +88 23 model.input_dropout 0.2841697589502578 +88 23 model.output_dropout 0.37618295949996083 +88 23 model.feature_map_dropout 0.09280303073737944 +88 23 model.embedding_dim 1.0 +88 23 optimizer.lr 0.004074644049899739 +88 23 negative_sampler.num_negs_per_pos 39.0 +88 23 training.batch_size 0.0 +88 24 model.output_channels 64.0 +88 24 model.input_dropout 0.2874475135242592 +88 24 model.output_dropout 0.028804547628464794 +88 24 model.feature_map_dropout 0.3087871920154486 +88 24 model.embedding_dim 0.0 +88 24 optimizer.lr 0.008065387740699809 +88 24 negative_sampler.num_negs_per_pos 3.0 +88 24 training.batch_size 0.0 +88 25 model.output_channels 19.0 +88 25 model.input_dropout 0.12594308464188325 +88 25 model.output_dropout 0.291972434004725 +88 25 model.feature_map_dropout 0.2861226433076725 +88 25 model.embedding_dim 2.0 +88 25 optimizer.lr 0.003954550232364635 +88 25 negative_sampler.num_negs_per_pos 69.0 +88 25 training.batch_size 0.0 +88 26 model.output_channels 48.0 +88 26 model.input_dropout 0.15831459206815673 +88 26 model.output_dropout 0.4141294985740272 +88 26 model.feature_map_dropout 0.16035820037345028 +88 26 model.embedding_dim 2.0 +88 26 optimizer.lr 0.0029028489143743297 +88 26 negative_sampler.num_negs_per_pos 35.0 +88 26 training.batch_size 1.0 +88 27 model.output_channels 52.0 +88 27 model.input_dropout 0.16749439340241518 +88 27 model.output_dropout 0.3569914260862752 +88 27 model.feature_map_dropout 0.2744938333981764 +88 27 model.embedding_dim 2.0 +88 27 optimizer.lr 0.0019029182721357338 +88 27 negative_sampler.num_negs_per_pos 48.0 +88 27 training.batch_size 0.0 +88 28 model.output_channels 60.0 +88 28 model.input_dropout 0.26425672514284176 +88 28 model.output_dropout 0.47495034009463244 +88 28 model.feature_map_dropout 0.15874987053278344 +88 28 model.embedding_dim 0.0 +88 28 optimizer.lr 0.03383474657002429 +88 28 negative_sampler.num_negs_per_pos 0.0 +88 28 training.batch_size 2.0 +88 29 model.output_channels 60.0 +88 29 model.input_dropout 0.07099798050373085 +88 29 model.output_dropout 0.12931220403073035 +88 29 model.feature_map_dropout 0.4738411015460879 +88 29 model.embedding_dim 2.0 +88 29 optimizer.lr 0.014895166983452605 +88 29 negative_sampler.num_negs_per_pos 43.0 +88 29 training.batch_size 1.0 +88 30 model.output_channels 28.0 +88 30 model.input_dropout 0.18294539377393287 +88 30 model.output_dropout 0.42853748231219474 +88 30 model.feature_map_dropout 0.4811636706597248 +88 30 model.embedding_dim 0.0 +88 30 optimizer.lr 0.002974447616550263 +88 30 negative_sampler.num_negs_per_pos 25.0 +88 30 training.batch_size 0.0 +88 31 model.output_channels 49.0 +88 31 model.input_dropout 0.37114495271870973 +88 31 model.output_dropout 0.47133429527701437 +88 31 model.feature_map_dropout 0.4858249438220956 +88 31 model.embedding_dim 0.0 +88 31 optimizer.lr 0.002699468050092167 +88 31 negative_sampler.num_negs_per_pos 35.0 +88 31 training.batch_size 1.0 +88 32 model.output_channels 16.0 +88 32 model.input_dropout 0.3233019074684756 +88 32 model.output_dropout 0.03938302243925634 +88 32 model.feature_map_dropout 0.3051476638222167 +88 32 model.embedding_dim 1.0 +88 32 optimizer.lr 0.022833494181529575 +88 32 negative_sampler.num_negs_per_pos 1.0 +88 32 training.batch_size 0.0 +88 33 model.output_channels 44.0 +88 33 model.input_dropout 0.3958721950050622 +88 33 model.output_dropout 0.2718297248687919 +88 33 model.feature_map_dropout 0.3415100589183811 +88 33 model.embedding_dim 2.0 +88 33 optimizer.lr 0.029690413288366968 +88 33 negative_sampler.num_negs_per_pos 60.0 +88 33 training.batch_size 0.0 +88 34 model.output_channels 34.0 +88 34 model.input_dropout 0.3599183049853477 +88 34 model.output_dropout 0.3090384986877384 +88 34 model.feature_map_dropout 0.4941331782926175 +88 34 model.embedding_dim 1.0 +88 34 optimizer.lr 0.09172775848172517 +88 34 negative_sampler.num_negs_per_pos 97.0 +88 34 training.batch_size 1.0 +88 35 model.output_channels 51.0 +88 35 model.input_dropout 0.07876346502206355 +88 35 model.output_dropout 0.4081328934069527 +88 35 model.feature_map_dropout 0.38587343414072633 +88 35 model.embedding_dim 1.0 +88 35 optimizer.lr 0.020148885846702835 +88 35 negative_sampler.num_negs_per_pos 69.0 +88 35 training.batch_size 1.0 +88 36 model.output_channels 49.0 +88 36 model.input_dropout 0.2200537263574055 +88 36 model.output_dropout 0.04231518816114138 +88 36 model.feature_map_dropout 0.22510415475762746 +88 36 model.embedding_dim 1.0 +88 36 optimizer.lr 0.005228662040496955 +88 36 negative_sampler.num_negs_per_pos 92.0 +88 36 training.batch_size 0.0 +88 37 model.output_channels 48.0 +88 37 model.input_dropout 0.3708061405490209 +88 37 model.output_dropout 0.24315621840030283 +88 37 model.feature_map_dropout 0.3186208686326312 +88 37 model.embedding_dim 1.0 +88 37 optimizer.lr 0.01022286584110078 +88 37 negative_sampler.num_negs_per_pos 76.0 +88 37 training.batch_size 2.0 +88 38 model.output_channels 60.0 +88 38 model.input_dropout 0.4635006188358091 +88 38 model.output_dropout 0.04731826752958679 +88 38 model.feature_map_dropout 0.2964594015801307 +88 38 model.embedding_dim 2.0 +88 38 optimizer.lr 0.007857934882561405 +88 38 negative_sampler.num_negs_per_pos 27.0 +88 38 training.batch_size 1.0 +88 39 model.output_channels 18.0 +88 39 model.input_dropout 0.24543295955942163 +88 39 model.output_dropout 0.02125995099007011 +88 39 model.feature_map_dropout 0.040361116927527474 +88 39 model.embedding_dim 1.0 +88 39 optimizer.lr 0.004903613116357328 +88 39 negative_sampler.num_negs_per_pos 26.0 +88 39 training.batch_size 0.0 +88 40 model.output_channels 36.0 +88 40 model.input_dropout 0.13899266004627775 +88 40 model.output_dropout 0.23140221523405047 +88 40 model.feature_map_dropout 0.4681695180377409 +88 40 model.embedding_dim 0.0 +88 40 optimizer.lr 0.005181410225402178 +88 40 negative_sampler.num_negs_per_pos 66.0 +88 40 training.batch_size 1.0 +88 41 model.output_channels 26.0 +88 41 model.input_dropout 0.14407469393894434 +88 41 model.output_dropout 0.1078210843567422 +88 41 model.feature_map_dropout 0.43480143715725744 +88 41 model.embedding_dim 2.0 +88 41 optimizer.lr 0.0015957429026495417 +88 41 negative_sampler.num_negs_per_pos 82.0 +88 41 training.batch_size 2.0 +88 42 model.output_channels 42.0 +88 42 model.input_dropout 0.16310488455083066 +88 42 model.output_dropout 0.20604033313249115 +88 42 model.feature_map_dropout 0.38998350992067826 +88 42 model.embedding_dim 0.0 +88 42 optimizer.lr 0.04086012661432949 +88 42 negative_sampler.num_negs_per_pos 87.0 +88 42 training.batch_size 2.0 +88 43 model.output_channels 20.0 +88 43 model.input_dropout 0.4191707787352306 +88 43 model.output_dropout 0.32434853466842006 +88 43 model.feature_map_dropout 0.15657625694378285 +88 43 model.embedding_dim 0.0 +88 43 optimizer.lr 0.0018694593429619962 +88 43 negative_sampler.num_negs_per_pos 93.0 +88 43 training.batch_size 1.0 +88 44 model.output_channels 38.0 +88 44 model.input_dropout 0.251304440941024 +88 44 model.output_dropout 0.2595581621971405 +88 44 model.feature_map_dropout 0.4718175571880311 +88 44 model.embedding_dim 0.0 +88 44 optimizer.lr 0.0063130508421623144 +88 44 negative_sampler.num_negs_per_pos 32.0 +88 44 training.batch_size 0.0 +88 45 model.output_channels 28.0 +88 45 model.input_dropout 0.44326893620830765 +88 45 model.output_dropout 0.09191287927201358 +88 45 model.feature_map_dropout 0.2518151293994819 +88 45 model.embedding_dim 1.0 +88 45 optimizer.lr 0.029521159220981783 +88 45 negative_sampler.num_negs_per_pos 23.0 +88 45 training.batch_size 1.0 +88 46 model.output_channels 26.0 +88 46 model.input_dropout 0.38665339611568417 +88 46 model.output_dropout 0.2186909911810731 +88 46 model.feature_map_dropout 0.05409038602067723 +88 46 model.embedding_dim 1.0 +88 46 optimizer.lr 0.02753635998313545 +88 46 negative_sampler.num_negs_per_pos 38.0 +88 46 training.batch_size 1.0 +88 47 model.output_channels 35.0 +88 47 model.input_dropout 0.019329738581562506 +88 47 model.output_dropout 0.44860583378511565 +88 47 model.feature_map_dropout 0.17563324188255874 +88 47 model.embedding_dim 2.0 +88 47 optimizer.lr 0.00399281732787433 +88 47 negative_sampler.num_negs_per_pos 7.0 +88 47 training.batch_size 1.0 +88 48 model.output_channels 49.0 +88 48 model.input_dropout 0.37066543979713495 +88 48 model.output_dropout 0.32233313297787136 +88 48 model.feature_map_dropout 0.2973668181416792 +88 48 model.embedding_dim 2.0 +88 48 optimizer.lr 0.0029883985832394283 +88 48 negative_sampler.num_negs_per_pos 91.0 +88 48 training.batch_size 2.0 +88 49 model.output_channels 34.0 +88 49 model.input_dropout 0.10292484791252315 +88 49 model.output_dropout 0.3374204916686551 +88 49 model.feature_map_dropout 0.19737142694411708 +88 49 model.embedding_dim 2.0 +88 49 optimizer.lr 0.0554306108044731 +88 49 negative_sampler.num_negs_per_pos 53.0 +88 49 training.batch_size 0.0 +88 50 model.output_channels 16.0 +88 50 model.input_dropout 0.1335343130290797 +88 50 model.output_dropout 0.24093830775486724 +88 50 model.feature_map_dropout 0.03582532528408178 +88 50 model.embedding_dim 0.0 +88 50 optimizer.lr 0.0012465011198809167 +88 50 negative_sampler.num_negs_per_pos 14.0 +88 50 training.batch_size 2.0 +88 51 model.output_channels 36.0 +88 51 model.input_dropout 0.4421021639184146 +88 51 model.output_dropout 0.47835368157299346 +88 51 model.feature_map_dropout 0.16096637863854119 +88 51 model.embedding_dim 1.0 +88 51 optimizer.lr 0.04973582300064773 +88 51 negative_sampler.num_negs_per_pos 64.0 +88 51 training.batch_size 2.0 +88 52 model.output_channels 51.0 +88 52 model.input_dropout 0.062373012368700353 +88 52 model.output_dropout 0.36606490304142236 +88 52 model.feature_map_dropout 0.4990473901678899 +88 52 model.embedding_dim 0.0 +88 52 optimizer.lr 0.03745817602186214 +88 52 negative_sampler.num_negs_per_pos 73.0 +88 52 training.batch_size 0.0 +88 53 model.output_channels 62.0 +88 53 model.input_dropout 0.13490933513321868 +88 53 model.output_dropout 0.4702522869343962 +88 53 model.feature_map_dropout 0.30038490152645136 +88 53 model.embedding_dim 1.0 +88 53 optimizer.lr 0.012644585924790966 +88 53 negative_sampler.num_negs_per_pos 82.0 +88 53 training.batch_size 0.0 +88 54 model.output_channels 36.0 +88 54 model.input_dropout 0.19003873969025925 +88 54 model.output_dropout 0.01901626578479776 +88 54 model.feature_map_dropout 0.24568393069995148 +88 54 model.embedding_dim 2.0 +88 54 optimizer.lr 0.04959410845074256 +88 54 negative_sampler.num_negs_per_pos 22.0 +88 54 training.batch_size 0.0 +88 55 model.output_channels 48.0 +88 55 model.input_dropout 0.11056503098469145 +88 55 model.output_dropout 0.3703232721370568 +88 55 model.feature_map_dropout 0.3153819595905194 +88 55 model.embedding_dim 2.0 +88 55 optimizer.lr 0.007396893954672269 +88 55 negative_sampler.num_negs_per_pos 9.0 +88 55 training.batch_size 0.0 +88 56 model.output_channels 49.0 +88 56 model.input_dropout 0.1299030806820482 +88 56 model.output_dropout 0.4994103696504919 +88 56 model.feature_map_dropout 0.10180299436112256 +88 56 model.embedding_dim 2.0 +88 56 optimizer.lr 0.00298095928593716 +88 56 negative_sampler.num_negs_per_pos 46.0 +88 56 training.batch_size 0.0 +88 57 model.output_channels 56.0 +88 57 model.input_dropout 0.2736031503908936 +88 57 model.output_dropout 0.4156657536320457 +88 57 model.feature_map_dropout 0.31617635011459466 +88 57 model.embedding_dim 1.0 +88 57 optimizer.lr 0.035109414298594 +88 57 negative_sampler.num_negs_per_pos 75.0 +88 57 training.batch_size 1.0 +88 58 model.output_channels 54.0 +88 58 model.input_dropout 0.27714011030963426 +88 58 model.output_dropout 0.48710190938288644 +88 58 model.feature_map_dropout 0.3309644774877161 +88 58 model.embedding_dim 1.0 +88 58 optimizer.lr 0.0020321649686175776 +88 58 negative_sampler.num_negs_per_pos 86.0 +88 58 training.batch_size 1.0 +88 59 model.output_channels 30.0 +88 59 model.input_dropout 0.29764259424488637 +88 59 model.output_dropout 0.08411336951330445 +88 59 model.feature_map_dropout 0.4013226595497622 +88 59 model.embedding_dim 2.0 +88 59 optimizer.lr 0.002710045521388437 +88 59 negative_sampler.num_negs_per_pos 79.0 +88 59 training.batch_size 0.0 +88 60 model.output_channels 17.0 +88 60 model.input_dropout 0.31554705291218293 +88 60 model.output_dropout 0.1260934982973667 +88 60 model.feature_map_dropout 0.06392558706440266 +88 60 model.embedding_dim 2.0 +88 60 optimizer.lr 0.08476421784190104 +88 60 negative_sampler.num_negs_per_pos 75.0 +88 60 training.batch_size 1.0 +88 61 model.output_channels 46.0 +88 61 model.input_dropout 0.01856099809078654 +88 61 model.output_dropout 0.09805883456414316 +88 61 model.feature_map_dropout 0.3402762817384895 +88 61 model.embedding_dim 0.0 +88 61 optimizer.lr 0.07054668769803044 +88 61 negative_sampler.num_negs_per_pos 40.0 +88 61 training.batch_size 0.0 +88 62 model.output_channels 48.0 +88 62 model.input_dropout 0.03163613758318701 +88 62 model.output_dropout 0.22270376804693942 +88 62 model.feature_map_dropout 0.1824983828300457 +88 62 model.embedding_dim 2.0 +88 62 optimizer.lr 0.0017719652872146374 +88 62 negative_sampler.num_negs_per_pos 43.0 +88 62 training.batch_size 2.0 +88 63 model.output_channels 50.0 +88 63 model.input_dropout 0.11772352653465451 +88 63 model.output_dropout 0.48960676492318334 +88 63 model.feature_map_dropout 0.07875366473741513 +88 63 model.embedding_dim 1.0 +88 63 optimizer.lr 0.001946104817866961 +88 63 negative_sampler.num_negs_per_pos 68.0 +88 63 training.batch_size 2.0 +88 64 model.output_channels 55.0 +88 64 model.input_dropout 0.40853068742415677 +88 64 model.output_dropout 0.21593386581063534 +88 64 model.feature_map_dropout 0.38343032169986513 +88 64 model.embedding_dim 2.0 +88 64 optimizer.lr 0.02258167724973567 +88 64 negative_sampler.num_negs_per_pos 69.0 +88 64 training.batch_size 2.0 +88 65 model.output_channels 43.0 +88 65 model.input_dropout 0.38036281023116303 +88 65 model.output_dropout 0.39009066686581845 +88 65 model.feature_map_dropout 0.07459140322702018 +88 65 model.embedding_dim 2.0 +88 65 optimizer.lr 0.009708412432485217 +88 65 negative_sampler.num_negs_per_pos 95.0 +88 65 training.batch_size 0.0 +88 66 model.output_channels 44.0 +88 66 model.input_dropout 0.08418945802694927 +88 66 model.output_dropout 0.43451699406775834 +88 66 model.feature_map_dropout 0.1284965886513758 +88 66 model.embedding_dim 1.0 +88 66 optimizer.lr 0.0015417354903844846 +88 66 negative_sampler.num_negs_per_pos 98.0 +88 66 training.batch_size 0.0 +88 67 model.output_channels 52.0 +88 67 model.input_dropout 0.0170605270942491 +88 67 model.output_dropout 0.3792019292435355 +88 67 model.feature_map_dropout 0.17297145369872796 +88 67 model.embedding_dim 0.0 +88 67 optimizer.lr 0.06243725145531881 +88 67 negative_sampler.num_negs_per_pos 68.0 +88 67 training.batch_size 1.0 +88 68 model.output_channels 21.0 +88 68 model.input_dropout 0.14342605353369275 +88 68 model.output_dropout 0.41366689227043907 +88 68 model.feature_map_dropout 0.16209792402716172 +88 68 model.embedding_dim 2.0 +88 68 optimizer.lr 0.0018664619993677377 +88 68 negative_sampler.num_negs_per_pos 78.0 +88 68 training.batch_size 2.0 +88 69 model.output_channels 50.0 +88 69 model.input_dropout 0.4994103377044195 +88 69 model.output_dropout 0.45028788019635874 +88 69 model.feature_map_dropout 0.2236063571117725 +88 69 model.embedding_dim 2.0 +88 69 optimizer.lr 0.0010764376091841246 +88 69 negative_sampler.num_negs_per_pos 25.0 +88 69 training.batch_size 1.0 +88 70 model.output_channels 35.0 +88 70 model.input_dropout 0.3214530105760041 +88 70 model.output_dropout 0.4591993734658406 +88 70 model.feature_map_dropout 0.07649310507316509 +88 70 model.embedding_dim 2.0 +88 70 optimizer.lr 0.0016785735787403428 +88 70 negative_sampler.num_negs_per_pos 0.0 +88 70 training.batch_size 2.0 +88 71 model.output_channels 25.0 +88 71 model.input_dropout 0.08137203910819679 +88 71 model.output_dropout 0.23213849275677728 +88 71 model.feature_map_dropout 0.36928049242327093 +88 71 model.embedding_dim 1.0 +88 71 optimizer.lr 0.007439242009964623 +88 71 negative_sampler.num_negs_per_pos 14.0 +88 71 training.batch_size 0.0 +88 72 model.output_channels 63.0 +88 72 model.input_dropout 0.3443799701126252 +88 72 model.output_dropout 0.2590248850256364 +88 72 model.feature_map_dropout 0.2946483852946696 +88 72 model.embedding_dim 0.0 +88 72 optimizer.lr 0.005931660930020407 +88 72 negative_sampler.num_negs_per_pos 93.0 +88 72 training.batch_size 2.0 +88 73 model.output_channels 28.0 +88 73 model.input_dropout 0.4140815248128865 +88 73 model.output_dropout 0.13425412572768258 +88 73 model.feature_map_dropout 0.08653643171961817 +88 73 model.embedding_dim 2.0 +88 73 optimizer.lr 0.0017375694357296375 +88 73 negative_sampler.num_negs_per_pos 54.0 +88 73 training.batch_size 2.0 +88 74 model.output_channels 39.0 +88 74 model.input_dropout 0.15562886928630604 +88 74 model.output_dropout 0.114262433486648 +88 74 model.feature_map_dropout 0.35000121166132675 +88 74 model.embedding_dim 1.0 +88 74 optimizer.lr 0.01240406500920375 +88 74 negative_sampler.num_negs_per_pos 6.0 +88 74 training.batch_size 2.0 +88 75 model.output_channels 51.0 +88 75 model.input_dropout 0.16097502429105304 +88 75 model.output_dropout 0.15317131641041937 +88 75 model.feature_map_dropout 0.35979912292999516 +88 75 model.embedding_dim 2.0 +88 75 optimizer.lr 0.006156374469645272 +88 75 negative_sampler.num_negs_per_pos 21.0 +88 75 training.batch_size 2.0 +88 76 model.output_channels 56.0 +88 76 model.input_dropout 0.4784665674663774 +88 76 model.output_dropout 0.27101950347812 +88 76 model.feature_map_dropout 0.4020397204362468 +88 76 model.embedding_dim 1.0 +88 76 optimizer.lr 0.0021927513410246323 +88 76 negative_sampler.num_negs_per_pos 39.0 +88 76 training.batch_size 0.0 +88 77 model.output_channels 37.0 +88 77 model.input_dropout 0.22027941818289998 +88 77 model.output_dropout 0.4176989963096297 +88 77 model.feature_map_dropout 0.23448197550330485 +88 77 model.embedding_dim 2.0 +88 77 optimizer.lr 0.0014774763331255733 +88 77 negative_sampler.num_negs_per_pos 96.0 +88 77 training.batch_size 1.0 +88 1 dataset """kinships""" +88 1 model """conve""" +88 1 loss """bceaftersigmoid""" +88 1 regularizer """no""" +88 1 optimizer """adam""" +88 1 training_loop """owa""" +88 1 negative_sampler """basic""" +88 1 evaluator """rankbased""" +88 2 dataset """kinships""" +88 2 model """conve""" +88 2 loss """bceaftersigmoid""" +88 2 regularizer """no""" +88 2 optimizer """adam""" +88 2 training_loop """owa""" +88 2 negative_sampler """basic""" +88 2 evaluator """rankbased""" +88 3 dataset """kinships""" +88 3 model """conve""" +88 3 loss """bceaftersigmoid""" +88 3 regularizer """no""" +88 3 optimizer """adam""" +88 3 training_loop """owa""" +88 3 negative_sampler """basic""" +88 3 evaluator """rankbased""" +88 4 dataset """kinships""" +88 4 model """conve""" +88 4 loss """bceaftersigmoid""" +88 4 regularizer """no""" +88 4 optimizer """adam""" +88 4 training_loop """owa""" +88 4 negative_sampler """basic""" +88 4 evaluator """rankbased""" +88 5 dataset """kinships""" +88 5 model """conve""" +88 5 loss """bceaftersigmoid""" +88 5 regularizer """no""" +88 5 optimizer """adam""" +88 5 training_loop """owa""" +88 5 negative_sampler """basic""" +88 5 evaluator """rankbased""" +88 6 dataset """kinships""" +88 6 model """conve""" +88 6 loss """bceaftersigmoid""" +88 6 regularizer """no""" +88 6 optimizer """adam""" +88 6 training_loop """owa""" +88 6 negative_sampler """basic""" +88 6 evaluator """rankbased""" +88 7 dataset """kinships""" +88 7 model """conve""" +88 7 loss """bceaftersigmoid""" +88 7 regularizer """no""" +88 7 optimizer """adam""" +88 7 training_loop """owa""" +88 7 negative_sampler """basic""" +88 7 evaluator """rankbased""" +88 8 dataset """kinships""" +88 8 model """conve""" +88 8 loss """bceaftersigmoid""" +88 8 regularizer """no""" +88 8 optimizer """adam""" +88 8 training_loop """owa""" +88 8 negative_sampler """basic""" +88 8 evaluator """rankbased""" +88 9 dataset """kinships""" +88 9 model """conve""" +88 9 loss """bceaftersigmoid""" +88 9 regularizer """no""" +88 9 optimizer """adam""" +88 9 training_loop """owa""" +88 9 negative_sampler """basic""" +88 9 evaluator """rankbased""" +88 10 dataset """kinships""" +88 10 model """conve""" +88 10 loss """bceaftersigmoid""" +88 10 regularizer """no""" +88 10 optimizer """adam""" +88 10 training_loop """owa""" +88 10 negative_sampler """basic""" +88 10 evaluator """rankbased""" +88 11 dataset """kinships""" +88 11 model """conve""" +88 11 loss """bceaftersigmoid""" +88 11 regularizer """no""" +88 11 optimizer """adam""" +88 11 training_loop """owa""" +88 11 negative_sampler """basic""" +88 11 evaluator """rankbased""" +88 12 dataset """kinships""" +88 12 model """conve""" +88 12 loss """bceaftersigmoid""" +88 12 regularizer """no""" +88 12 optimizer """adam""" +88 12 training_loop """owa""" +88 12 negative_sampler """basic""" +88 12 evaluator """rankbased""" +88 13 dataset """kinships""" +88 13 model """conve""" +88 13 loss """bceaftersigmoid""" +88 13 regularizer """no""" +88 13 optimizer """adam""" +88 13 training_loop """owa""" +88 13 negative_sampler """basic""" +88 13 evaluator """rankbased""" +88 14 dataset """kinships""" +88 14 model """conve""" +88 14 loss """bceaftersigmoid""" +88 14 regularizer """no""" +88 14 optimizer """adam""" +88 14 training_loop """owa""" +88 14 negative_sampler """basic""" +88 14 evaluator """rankbased""" +88 15 dataset """kinships""" +88 15 model """conve""" +88 15 loss """bceaftersigmoid""" +88 15 regularizer """no""" +88 15 optimizer """adam""" +88 15 training_loop """owa""" +88 15 negative_sampler """basic""" +88 15 evaluator """rankbased""" +88 16 dataset """kinships""" +88 16 model """conve""" +88 16 loss """bceaftersigmoid""" +88 16 regularizer """no""" +88 16 optimizer """adam""" +88 16 training_loop """owa""" +88 16 negative_sampler """basic""" +88 16 evaluator """rankbased""" +88 17 dataset """kinships""" +88 17 model """conve""" +88 17 loss """bceaftersigmoid""" +88 17 regularizer """no""" +88 17 optimizer """adam""" +88 17 training_loop """owa""" +88 17 negative_sampler """basic""" +88 17 evaluator """rankbased""" +88 18 dataset """kinships""" +88 18 model """conve""" +88 18 loss """bceaftersigmoid""" +88 18 regularizer """no""" +88 18 optimizer """adam""" +88 18 training_loop """owa""" +88 18 negative_sampler """basic""" +88 18 evaluator """rankbased""" +88 19 dataset """kinships""" +88 19 model """conve""" +88 19 loss """bceaftersigmoid""" +88 19 regularizer """no""" +88 19 optimizer """adam""" +88 19 training_loop """owa""" +88 19 negative_sampler """basic""" +88 19 evaluator """rankbased""" +88 20 dataset """kinships""" +88 20 model """conve""" +88 20 loss """bceaftersigmoid""" +88 20 regularizer """no""" +88 20 optimizer """adam""" +88 20 training_loop """owa""" +88 20 negative_sampler """basic""" +88 20 evaluator """rankbased""" +88 21 dataset """kinships""" +88 21 model """conve""" +88 21 loss """bceaftersigmoid""" +88 21 regularizer """no""" +88 21 optimizer """adam""" +88 21 training_loop """owa""" +88 21 negative_sampler """basic""" +88 21 evaluator """rankbased""" +88 22 dataset """kinships""" +88 22 model """conve""" +88 22 loss """bceaftersigmoid""" +88 22 regularizer """no""" +88 22 optimizer """adam""" +88 22 training_loop """owa""" +88 22 negative_sampler """basic""" +88 22 evaluator """rankbased""" +88 23 dataset """kinships""" +88 23 model """conve""" +88 23 loss """bceaftersigmoid""" +88 23 regularizer """no""" +88 23 optimizer """adam""" +88 23 training_loop """owa""" +88 23 negative_sampler """basic""" +88 23 evaluator """rankbased""" +88 24 dataset """kinships""" +88 24 model """conve""" +88 24 loss """bceaftersigmoid""" +88 24 regularizer """no""" +88 24 optimizer """adam""" +88 24 training_loop """owa""" +88 24 negative_sampler """basic""" +88 24 evaluator """rankbased""" +88 25 dataset """kinships""" +88 25 model """conve""" +88 25 loss """bceaftersigmoid""" +88 25 regularizer """no""" +88 25 optimizer """adam""" +88 25 training_loop """owa""" +88 25 negative_sampler """basic""" +88 25 evaluator """rankbased""" +88 26 dataset """kinships""" +88 26 model """conve""" +88 26 loss """bceaftersigmoid""" +88 26 regularizer """no""" +88 26 optimizer """adam""" +88 26 training_loop """owa""" +88 26 negative_sampler """basic""" +88 26 evaluator """rankbased""" +88 27 dataset """kinships""" +88 27 model """conve""" +88 27 loss """bceaftersigmoid""" +88 27 regularizer """no""" +88 27 optimizer """adam""" +88 27 training_loop """owa""" +88 27 negative_sampler """basic""" +88 27 evaluator """rankbased""" +88 28 dataset """kinships""" +88 28 model """conve""" +88 28 loss """bceaftersigmoid""" +88 28 regularizer """no""" +88 28 optimizer """adam""" +88 28 training_loop """owa""" +88 28 negative_sampler """basic""" +88 28 evaluator """rankbased""" +88 29 dataset """kinships""" +88 29 model """conve""" +88 29 loss """bceaftersigmoid""" +88 29 regularizer """no""" +88 29 optimizer """adam""" +88 29 training_loop """owa""" +88 29 negative_sampler """basic""" +88 29 evaluator """rankbased""" +88 30 dataset """kinships""" +88 30 model """conve""" +88 30 loss """bceaftersigmoid""" +88 30 regularizer """no""" +88 30 optimizer """adam""" +88 30 training_loop """owa""" +88 30 negative_sampler """basic""" +88 30 evaluator """rankbased""" +88 31 dataset """kinships""" +88 31 model """conve""" +88 31 loss """bceaftersigmoid""" +88 31 regularizer """no""" +88 31 optimizer """adam""" +88 31 training_loop """owa""" +88 31 negative_sampler """basic""" +88 31 evaluator """rankbased""" +88 32 dataset """kinships""" +88 32 model """conve""" +88 32 loss """bceaftersigmoid""" +88 32 regularizer """no""" +88 32 optimizer """adam""" +88 32 training_loop """owa""" +88 32 negative_sampler """basic""" +88 32 evaluator """rankbased""" +88 33 dataset """kinships""" +88 33 model """conve""" +88 33 loss """bceaftersigmoid""" +88 33 regularizer """no""" +88 33 optimizer """adam""" +88 33 training_loop """owa""" +88 33 negative_sampler """basic""" +88 33 evaluator """rankbased""" +88 34 dataset """kinships""" +88 34 model """conve""" +88 34 loss """bceaftersigmoid""" +88 34 regularizer """no""" +88 34 optimizer """adam""" +88 34 training_loop """owa""" +88 34 negative_sampler """basic""" +88 34 evaluator """rankbased""" +88 35 dataset """kinships""" +88 35 model """conve""" +88 35 loss """bceaftersigmoid""" +88 35 regularizer """no""" +88 35 optimizer """adam""" +88 35 training_loop """owa""" +88 35 negative_sampler """basic""" +88 35 evaluator """rankbased""" +88 36 dataset """kinships""" +88 36 model """conve""" +88 36 loss """bceaftersigmoid""" +88 36 regularizer """no""" +88 36 optimizer """adam""" +88 36 training_loop """owa""" +88 36 negative_sampler """basic""" +88 36 evaluator """rankbased""" +88 37 dataset """kinships""" +88 37 model """conve""" +88 37 loss """bceaftersigmoid""" +88 37 regularizer """no""" +88 37 optimizer """adam""" +88 37 training_loop """owa""" +88 37 negative_sampler """basic""" +88 37 evaluator """rankbased""" +88 38 dataset """kinships""" +88 38 model """conve""" +88 38 loss """bceaftersigmoid""" +88 38 regularizer """no""" +88 38 optimizer """adam""" +88 38 training_loop """owa""" +88 38 negative_sampler """basic""" +88 38 evaluator """rankbased""" +88 39 dataset """kinships""" +88 39 model """conve""" +88 39 loss """bceaftersigmoid""" +88 39 regularizer """no""" +88 39 optimizer """adam""" +88 39 training_loop """owa""" +88 39 negative_sampler """basic""" +88 39 evaluator """rankbased""" +88 40 dataset """kinships""" +88 40 model """conve""" +88 40 loss """bceaftersigmoid""" +88 40 regularizer """no""" +88 40 optimizer """adam""" +88 40 training_loop """owa""" +88 40 negative_sampler """basic""" +88 40 evaluator """rankbased""" +88 41 dataset """kinships""" +88 41 model """conve""" +88 41 loss """bceaftersigmoid""" +88 41 regularizer """no""" +88 41 optimizer """adam""" +88 41 training_loop """owa""" +88 41 negative_sampler """basic""" +88 41 evaluator """rankbased""" +88 42 dataset """kinships""" +88 42 model """conve""" +88 42 loss """bceaftersigmoid""" +88 42 regularizer """no""" +88 42 optimizer """adam""" +88 42 training_loop """owa""" +88 42 negative_sampler """basic""" +88 42 evaluator """rankbased""" +88 43 dataset """kinships""" +88 43 model """conve""" +88 43 loss """bceaftersigmoid""" +88 43 regularizer """no""" +88 43 optimizer """adam""" +88 43 training_loop """owa""" +88 43 negative_sampler """basic""" +88 43 evaluator """rankbased""" +88 44 dataset """kinships""" +88 44 model """conve""" +88 44 loss """bceaftersigmoid""" +88 44 regularizer """no""" +88 44 optimizer """adam""" +88 44 training_loop """owa""" +88 44 negative_sampler """basic""" +88 44 evaluator """rankbased""" +88 45 dataset """kinships""" +88 45 model """conve""" +88 45 loss """bceaftersigmoid""" +88 45 regularizer """no""" +88 45 optimizer """adam""" +88 45 training_loop """owa""" +88 45 negative_sampler """basic""" +88 45 evaluator """rankbased""" +88 46 dataset """kinships""" +88 46 model """conve""" +88 46 loss """bceaftersigmoid""" +88 46 regularizer """no""" +88 46 optimizer """adam""" +88 46 training_loop """owa""" +88 46 negative_sampler """basic""" +88 46 evaluator """rankbased""" +88 47 dataset """kinships""" +88 47 model """conve""" +88 47 loss """bceaftersigmoid""" +88 47 regularizer """no""" +88 47 optimizer """adam""" +88 47 training_loop """owa""" +88 47 negative_sampler """basic""" +88 47 evaluator """rankbased""" +88 48 dataset """kinships""" +88 48 model """conve""" +88 48 loss """bceaftersigmoid""" +88 48 regularizer """no""" +88 48 optimizer """adam""" +88 48 training_loop """owa""" +88 48 negative_sampler """basic""" +88 48 evaluator """rankbased""" +88 49 dataset """kinships""" +88 49 model """conve""" +88 49 loss """bceaftersigmoid""" +88 49 regularizer """no""" +88 49 optimizer """adam""" +88 49 training_loop """owa""" +88 49 negative_sampler """basic""" +88 49 evaluator """rankbased""" +88 50 dataset """kinships""" +88 50 model """conve""" +88 50 loss """bceaftersigmoid""" +88 50 regularizer """no""" +88 50 optimizer """adam""" +88 50 training_loop """owa""" +88 50 negative_sampler """basic""" +88 50 evaluator """rankbased""" +88 51 dataset """kinships""" +88 51 model """conve""" +88 51 loss """bceaftersigmoid""" +88 51 regularizer """no""" +88 51 optimizer """adam""" +88 51 training_loop """owa""" +88 51 negative_sampler """basic""" +88 51 evaluator """rankbased""" +88 52 dataset """kinships""" +88 52 model """conve""" +88 52 loss """bceaftersigmoid""" +88 52 regularizer """no""" +88 52 optimizer """adam""" +88 52 training_loop """owa""" +88 52 negative_sampler """basic""" +88 52 evaluator """rankbased""" +88 53 dataset """kinships""" +88 53 model """conve""" +88 53 loss """bceaftersigmoid""" +88 53 regularizer """no""" +88 53 optimizer """adam""" +88 53 training_loop """owa""" +88 53 negative_sampler """basic""" +88 53 evaluator """rankbased""" +88 54 dataset """kinships""" +88 54 model """conve""" +88 54 loss """bceaftersigmoid""" +88 54 regularizer """no""" +88 54 optimizer """adam""" +88 54 training_loop """owa""" +88 54 negative_sampler """basic""" +88 54 evaluator """rankbased""" +88 55 dataset """kinships""" +88 55 model """conve""" +88 55 loss """bceaftersigmoid""" +88 55 regularizer """no""" +88 55 optimizer """adam""" +88 55 training_loop """owa""" +88 55 negative_sampler """basic""" +88 55 evaluator """rankbased""" +88 56 dataset """kinships""" +88 56 model """conve""" +88 56 loss """bceaftersigmoid""" +88 56 regularizer """no""" +88 56 optimizer """adam""" +88 56 training_loop """owa""" +88 56 negative_sampler """basic""" +88 56 evaluator """rankbased""" +88 57 dataset """kinships""" +88 57 model """conve""" +88 57 loss """bceaftersigmoid""" +88 57 regularizer """no""" +88 57 optimizer """adam""" +88 57 training_loop """owa""" +88 57 negative_sampler """basic""" +88 57 evaluator """rankbased""" +88 58 dataset """kinships""" +88 58 model """conve""" +88 58 loss """bceaftersigmoid""" +88 58 regularizer """no""" +88 58 optimizer """adam""" +88 58 training_loop """owa""" +88 58 negative_sampler """basic""" +88 58 evaluator """rankbased""" +88 59 dataset """kinships""" +88 59 model """conve""" +88 59 loss """bceaftersigmoid""" +88 59 regularizer """no""" +88 59 optimizer """adam""" +88 59 training_loop """owa""" +88 59 negative_sampler """basic""" +88 59 evaluator """rankbased""" +88 60 dataset """kinships""" +88 60 model """conve""" +88 60 loss """bceaftersigmoid""" +88 60 regularizer """no""" +88 60 optimizer """adam""" +88 60 training_loop """owa""" +88 60 negative_sampler """basic""" +88 60 evaluator """rankbased""" +88 61 dataset """kinships""" +88 61 model """conve""" +88 61 loss """bceaftersigmoid""" +88 61 regularizer """no""" +88 61 optimizer """adam""" +88 61 training_loop """owa""" +88 61 negative_sampler """basic""" +88 61 evaluator """rankbased""" +88 62 dataset """kinships""" +88 62 model """conve""" +88 62 loss """bceaftersigmoid""" +88 62 regularizer """no""" +88 62 optimizer """adam""" +88 62 training_loop """owa""" +88 62 negative_sampler """basic""" +88 62 evaluator """rankbased""" +88 63 dataset """kinships""" +88 63 model """conve""" +88 63 loss """bceaftersigmoid""" +88 63 regularizer """no""" +88 63 optimizer """adam""" +88 63 training_loop """owa""" +88 63 negative_sampler """basic""" +88 63 evaluator """rankbased""" +88 64 dataset """kinships""" +88 64 model """conve""" +88 64 loss """bceaftersigmoid""" +88 64 regularizer """no""" +88 64 optimizer """adam""" +88 64 training_loop """owa""" +88 64 negative_sampler """basic""" +88 64 evaluator """rankbased""" +88 65 dataset """kinships""" +88 65 model """conve""" +88 65 loss """bceaftersigmoid""" +88 65 regularizer """no""" +88 65 optimizer """adam""" +88 65 training_loop """owa""" +88 65 negative_sampler """basic""" +88 65 evaluator """rankbased""" +88 66 dataset """kinships""" +88 66 model """conve""" +88 66 loss """bceaftersigmoid""" +88 66 regularizer """no""" +88 66 optimizer """adam""" +88 66 training_loop """owa""" +88 66 negative_sampler """basic""" +88 66 evaluator """rankbased""" +88 67 dataset """kinships""" +88 67 model """conve""" +88 67 loss """bceaftersigmoid""" +88 67 regularizer """no""" +88 67 optimizer """adam""" +88 67 training_loop """owa""" +88 67 negative_sampler """basic""" +88 67 evaluator """rankbased""" +88 68 dataset """kinships""" +88 68 model """conve""" +88 68 loss """bceaftersigmoid""" +88 68 regularizer """no""" +88 68 optimizer """adam""" +88 68 training_loop """owa""" +88 68 negative_sampler """basic""" +88 68 evaluator """rankbased""" +88 69 dataset """kinships""" +88 69 model """conve""" +88 69 loss """bceaftersigmoid""" +88 69 regularizer """no""" +88 69 optimizer """adam""" +88 69 training_loop """owa""" +88 69 negative_sampler """basic""" +88 69 evaluator """rankbased""" +88 70 dataset """kinships""" +88 70 model """conve""" +88 70 loss """bceaftersigmoid""" +88 70 regularizer """no""" +88 70 optimizer """adam""" +88 70 training_loop """owa""" +88 70 negative_sampler """basic""" +88 70 evaluator """rankbased""" +88 71 dataset """kinships""" +88 71 model """conve""" +88 71 loss """bceaftersigmoid""" +88 71 regularizer """no""" +88 71 optimizer """adam""" +88 71 training_loop """owa""" +88 71 negative_sampler """basic""" +88 71 evaluator """rankbased""" +88 72 dataset """kinships""" +88 72 model """conve""" +88 72 loss """bceaftersigmoid""" +88 72 regularizer """no""" +88 72 optimizer """adam""" +88 72 training_loop """owa""" +88 72 negative_sampler """basic""" +88 72 evaluator """rankbased""" +88 73 dataset """kinships""" +88 73 model """conve""" +88 73 loss """bceaftersigmoid""" +88 73 regularizer """no""" +88 73 optimizer """adam""" +88 73 training_loop """owa""" +88 73 negative_sampler """basic""" +88 73 evaluator """rankbased""" +88 74 dataset """kinships""" +88 74 model """conve""" +88 74 loss """bceaftersigmoid""" +88 74 regularizer """no""" +88 74 optimizer """adam""" +88 74 training_loop """owa""" +88 74 negative_sampler """basic""" +88 74 evaluator """rankbased""" +88 75 dataset """kinships""" +88 75 model """conve""" +88 75 loss """bceaftersigmoid""" +88 75 regularizer """no""" +88 75 optimizer """adam""" +88 75 training_loop """owa""" +88 75 negative_sampler """basic""" +88 75 evaluator """rankbased""" +88 76 dataset """kinships""" +88 76 model """conve""" +88 76 loss """bceaftersigmoid""" +88 76 regularizer """no""" +88 76 optimizer """adam""" +88 76 training_loop """owa""" +88 76 negative_sampler """basic""" +88 76 evaluator """rankbased""" +88 77 dataset """kinships""" +88 77 model """conve""" +88 77 loss """bceaftersigmoid""" +88 77 regularizer """no""" +88 77 optimizer """adam""" +88 77 training_loop """owa""" +88 77 negative_sampler """basic""" +88 77 evaluator """rankbased""" +89 1 model.output_channels 55.0 +89 1 model.input_dropout 0.009350857370956367 +89 1 model.output_dropout 0.3532261871926672 +89 1 model.feature_map_dropout 0.24221487912272682 +89 1 model.embedding_dim 0.0 +89 1 optimizer.lr 0.04233525364685179 +89 1 negative_sampler.num_negs_per_pos 29.0 +89 1 training.batch_size 2.0 +89 2 model.output_channels 28.0 +89 2 model.input_dropout 0.335134128431041 +89 2 model.output_dropout 0.4889524440270346 +89 2 model.feature_map_dropout 0.10678692759527847 +89 2 model.embedding_dim 1.0 +89 2 optimizer.lr 0.025253104844642462 +89 2 negative_sampler.num_negs_per_pos 93.0 +89 2 training.batch_size 1.0 +89 3 model.output_channels 45.0 +89 3 model.input_dropout 0.17920350237481592 +89 3 model.output_dropout 0.3602679939456754 +89 3 model.feature_map_dropout 0.36622162251481827 +89 3 model.embedding_dim 1.0 +89 3 optimizer.lr 0.09519144124977115 +89 3 negative_sampler.num_negs_per_pos 95.0 +89 3 training.batch_size 1.0 +89 4 model.output_channels 27.0 +89 4 model.input_dropout 0.03704585654604042 +89 4 model.output_dropout 0.05888024942092607 +89 4 model.feature_map_dropout 0.2982534618491599 +89 4 model.embedding_dim 2.0 +89 4 optimizer.lr 0.008205123765768459 +89 4 negative_sampler.num_negs_per_pos 60.0 +89 4 training.batch_size 1.0 +89 5 model.output_channels 33.0 +89 5 model.input_dropout 0.3306471386028818 +89 5 model.output_dropout 0.005501224946430128 +89 5 model.feature_map_dropout 0.09531810991060186 +89 5 model.embedding_dim 1.0 +89 5 optimizer.lr 0.007444695076481891 +89 5 negative_sampler.num_negs_per_pos 91.0 +89 5 training.batch_size 2.0 +89 6 model.output_channels 16.0 +89 6 model.input_dropout 0.3201547852146572 +89 6 model.output_dropout 0.45808135374894926 +89 6 model.feature_map_dropout 0.2905558611659152 +89 6 model.embedding_dim 2.0 +89 6 optimizer.lr 0.0011410345525512385 +89 6 negative_sampler.num_negs_per_pos 94.0 +89 6 training.batch_size 0.0 +89 7 model.output_channels 34.0 +89 7 model.input_dropout 0.4701247754058517 +89 7 model.output_dropout 0.3623253427244113 +89 7 model.feature_map_dropout 0.47481742816242606 +89 7 model.embedding_dim 0.0 +89 7 optimizer.lr 0.010880401224034235 +89 7 negative_sampler.num_negs_per_pos 94.0 +89 7 training.batch_size 0.0 +89 8 model.output_channels 61.0 +89 8 model.input_dropout 0.21381197489691472 +89 8 model.output_dropout 0.1708933091474465 +89 8 model.feature_map_dropout 0.18860035193641422 +89 8 model.embedding_dim 1.0 +89 8 optimizer.lr 0.07958176786638674 +89 8 negative_sampler.num_negs_per_pos 10.0 +89 8 training.batch_size 2.0 +89 9 model.output_channels 54.0 +89 9 model.input_dropout 0.465309292845321 +89 9 model.output_dropout 0.4944766270685007 +89 9 model.feature_map_dropout 0.28274273203518296 +89 9 model.embedding_dim 1.0 +89 9 optimizer.lr 0.0015217827449341313 +89 9 negative_sampler.num_negs_per_pos 2.0 +89 9 training.batch_size 2.0 +89 10 model.output_channels 64.0 +89 10 model.input_dropout 0.1902545401630526 +89 10 model.output_dropout 0.2870205986224712 +89 10 model.feature_map_dropout 0.40809806302654167 +89 10 model.embedding_dim 1.0 +89 10 optimizer.lr 0.09080579043981779 +89 10 negative_sampler.num_negs_per_pos 46.0 +89 10 training.batch_size 1.0 +89 11 model.output_channels 26.0 +89 11 model.input_dropout 0.10475572189896215 +89 11 model.output_dropout 0.2437537105365929 +89 11 model.feature_map_dropout 0.008841640732522804 +89 11 model.embedding_dim 2.0 +89 11 optimizer.lr 0.002657514755575835 +89 11 negative_sampler.num_negs_per_pos 42.0 +89 11 training.batch_size 2.0 +89 12 model.output_channels 26.0 +89 12 model.input_dropout 0.40135794101961886 +89 12 model.output_dropout 0.24956843332343231 +89 12 model.feature_map_dropout 0.14636018881613388 +89 12 model.embedding_dim 1.0 +89 12 optimizer.lr 0.0016392029091048582 +89 12 negative_sampler.num_negs_per_pos 18.0 +89 12 training.batch_size 1.0 +89 13 model.output_channels 62.0 +89 13 model.input_dropout 0.1294342175998353 +89 13 model.output_dropout 0.33997196142534836 +89 13 model.feature_map_dropout 0.11902242147307507 +89 13 model.embedding_dim 1.0 +89 13 optimizer.lr 0.0013678624555935029 +89 13 negative_sampler.num_negs_per_pos 47.0 +89 13 training.batch_size 2.0 +89 14 model.output_channels 41.0 +89 14 model.input_dropout 0.10266257578786847 +89 14 model.output_dropout 0.04060660164040858 +89 14 model.feature_map_dropout 0.3208014377263987 +89 14 model.embedding_dim 2.0 +89 14 optimizer.lr 0.006253363901922977 +89 14 negative_sampler.num_negs_per_pos 87.0 +89 14 training.batch_size 2.0 +89 15 model.output_channels 62.0 +89 15 model.input_dropout 0.47717132319454186 +89 15 model.output_dropout 0.4808514187680685 +89 15 model.feature_map_dropout 0.1402424505419277 +89 15 model.embedding_dim 1.0 +89 15 optimizer.lr 0.006955077957963263 +89 15 negative_sampler.num_negs_per_pos 60.0 +89 15 training.batch_size 1.0 +89 16 model.output_channels 29.0 +89 16 model.input_dropout 0.17687723786829074 +89 16 model.output_dropout 0.27057554198152745 +89 16 model.feature_map_dropout 0.4409604441067008 +89 16 model.embedding_dim 1.0 +89 16 optimizer.lr 0.0055317599553841084 +89 16 negative_sampler.num_negs_per_pos 49.0 +89 16 training.batch_size 1.0 +89 17 model.output_channels 54.0 +89 17 model.input_dropout 0.4270045120534846 +89 17 model.output_dropout 0.24343728398662695 +89 17 model.feature_map_dropout 0.4300801792055298 +89 17 model.embedding_dim 1.0 +89 17 optimizer.lr 0.0047482311725362825 +89 17 negative_sampler.num_negs_per_pos 25.0 +89 17 training.batch_size 0.0 +89 18 model.output_channels 53.0 +89 18 model.input_dropout 0.2048416529574284 +89 18 model.output_dropout 0.24731250354234946 +89 18 model.feature_map_dropout 0.23566886957595912 +89 18 model.embedding_dim 1.0 +89 18 optimizer.lr 0.0027553434304541094 +89 18 negative_sampler.num_negs_per_pos 83.0 +89 18 training.batch_size 1.0 +89 19 model.output_channels 28.0 +89 19 model.input_dropout 0.2913729353879259 +89 19 model.output_dropout 0.14417297421055858 +89 19 model.feature_map_dropout 0.1714598181540859 +89 19 model.embedding_dim 2.0 +89 19 optimizer.lr 0.0019003932944514227 +89 19 negative_sampler.num_negs_per_pos 39.0 +89 19 training.batch_size 1.0 +89 20 model.output_channels 38.0 +89 20 model.input_dropout 0.4233269363972759 +89 20 model.output_dropout 0.28056244439240813 +89 20 model.feature_map_dropout 0.4754851962108645 +89 20 model.embedding_dim 0.0 +89 20 optimizer.lr 0.0023721379023412263 +89 20 negative_sampler.num_negs_per_pos 10.0 +89 20 training.batch_size 1.0 +89 21 model.output_channels 51.0 +89 21 model.input_dropout 0.22054542956059925 +89 21 model.output_dropout 0.18605789031982362 +89 21 model.feature_map_dropout 0.32795478551682916 +89 21 model.embedding_dim 0.0 +89 21 optimizer.lr 0.0928535808234753 +89 21 negative_sampler.num_negs_per_pos 64.0 +89 21 training.batch_size 2.0 +89 22 model.output_channels 53.0 +89 22 model.input_dropout 0.24656886703402736 +89 22 model.output_dropout 0.43672168864532646 +89 22 model.feature_map_dropout 0.13580188709599855 +89 22 model.embedding_dim 2.0 +89 22 optimizer.lr 0.010163397886610158 +89 22 negative_sampler.num_negs_per_pos 59.0 +89 22 training.batch_size 2.0 +89 23 model.output_channels 41.0 +89 23 model.input_dropout 0.06615714485224433 +89 23 model.output_dropout 0.12359967676900074 +89 23 model.feature_map_dropout 0.1039709957502219 +89 23 model.embedding_dim 2.0 +89 23 optimizer.lr 0.006603750127607786 +89 23 negative_sampler.num_negs_per_pos 93.0 +89 23 training.batch_size 2.0 +89 24 model.output_channels 53.0 +89 24 model.input_dropout 0.0449680739601247 +89 24 model.output_dropout 0.3572643382677786 +89 24 model.feature_map_dropout 0.14838365979945073 +89 24 model.embedding_dim 2.0 +89 24 optimizer.lr 0.0027491885036223574 +89 24 negative_sampler.num_negs_per_pos 36.0 +89 24 training.batch_size 2.0 +89 25 model.output_channels 44.0 +89 25 model.input_dropout 0.30597550624520775 +89 25 model.output_dropout 0.4739474041029596 +89 25 model.feature_map_dropout 0.18866450466831886 +89 25 model.embedding_dim 2.0 +89 25 optimizer.lr 0.07915078967268636 +89 25 negative_sampler.num_negs_per_pos 84.0 +89 25 training.batch_size 0.0 +89 26 model.output_channels 42.0 +89 26 model.input_dropout 0.47105538495198285 +89 26 model.output_dropout 0.07798861440617982 +89 26 model.feature_map_dropout 0.09570101535026809 +89 26 model.embedding_dim 1.0 +89 26 optimizer.lr 0.0033683345278811084 +89 26 negative_sampler.num_negs_per_pos 46.0 +89 26 training.batch_size 2.0 +89 27 model.output_channels 17.0 +89 27 model.input_dropout 0.039971885602704316 +89 27 model.output_dropout 0.31701784937415667 +89 27 model.feature_map_dropout 0.15500453254580304 +89 27 model.embedding_dim 1.0 +89 27 optimizer.lr 0.031164039371662327 +89 27 negative_sampler.num_negs_per_pos 64.0 +89 27 training.batch_size 1.0 +89 28 model.output_channels 26.0 +89 28 model.input_dropout 0.24124554422199307 +89 28 model.output_dropout 0.08182200596335437 +89 28 model.feature_map_dropout 0.21027413426690827 +89 28 model.embedding_dim 1.0 +89 28 optimizer.lr 0.001233320934915719 +89 28 negative_sampler.num_negs_per_pos 51.0 +89 28 training.batch_size 0.0 +89 29 model.output_channels 17.0 +89 29 model.input_dropout 0.0138792788365662 +89 29 model.output_dropout 0.47724655901011187 +89 29 model.feature_map_dropout 0.2777548769082996 +89 29 model.embedding_dim 1.0 +89 29 optimizer.lr 0.004582122626717547 +89 29 negative_sampler.num_negs_per_pos 56.0 +89 29 training.batch_size 0.0 +89 30 model.output_channels 35.0 +89 30 model.input_dropout 0.23153493721988022 +89 30 model.output_dropout 0.35889268792513107 +89 30 model.feature_map_dropout 0.019355573844823615 +89 30 model.embedding_dim 1.0 +89 30 optimizer.lr 0.00179751111460919 +89 30 negative_sampler.num_negs_per_pos 0.0 +89 30 training.batch_size 2.0 +89 31 model.output_channels 57.0 +89 31 model.input_dropout 0.2565737881026387 +89 31 model.output_dropout 0.41679347076882023 +89 31 model.feature_map_dropout 0.23011781562331302 +89 31 model.embedding_dim 0.0 +89 31 optimizer.lr 0.03975247675540951 +89 31 negative_sampler.num_negs_per_pos 77.0 +89 31 training.batch_size 1.0 +89 32 model.output_channels 38.0 +89 32 model.input_dropout 0.41850829798512396 +89 32 model.output_dropout 0.13342006991918048 +89 32 model.feature_map_dropout 0.024589128353342038 +89 32 model.embedding_dim 1.0 +89 32 optimizer.lr 0.0023377120245080163 +89 32 negative_sampler.num_negs_per_pos 69.0 +89 32 training.batch_size 1.0 +89 33 model.output_channels 27.0 +89 33 model.input_dropout 0.3129131700641735 +89 33 model.output_dropout 0.07835950803659564 +89 33 model.feature_map_dropout 0.26933369023660947 +89 33 model.embedding_dim 2.0 +89 33 optimizer.lr 0.0081646656626673 +89 33 negative_sampler.num_negs_per_pos 47.0 +89 33 training.batch_size 1.0 +89 34 model.output_channels 25.0 +89 34 model.input_dropout 0.4781203833724107 +89 34 model.output_dropout 0.1258634358506645 +89 34 model.feature_map_dropout 0.3777349305380576 +89 34 model.embedding_dim 0.0 +89 34 optimizer.lr 0.06541771779531831 +89 34 negative_sampler.num_negs_per_pos 99.0 +89 34 training.batch_size 2.0 +89 35 model.output_channels 49.0 +89 35 model.input_dropout 0.005830539487731157 +89 35 model.output_dropout 0.4595018911129789 +89 35 model.feature_map_dropout 0.03868575346235548 +89 35 model.embedding_dim 0.0 +89 35 optimizer.lr 0.028972931393307835 +89 35 negative_sampler.num_negs_per_pos 58.0 +89 35 training.batch_size 0.0 +89 36 model.output_channels 25.0 +89 36 model.input_dropout 0.1342558555967971 +89 36 model.output_dropout 0.38441235618322056 +89 36 model.feature_map_dropout 0.272921374056915 +89 36 model.embedding_dim 1.0 +89 36 optimizer.lr 0.0028545471747613168 +89 36 negative_sampler.num_negs_per_pos 22.0 +89 36 training.batch_size 2.0 +89 37 model.output_channels 42.0 +89 37 model.input_dropout 0.42888645423776267 +89 37 model.output_dropout 0.4594924099380772 +89 37 model.feature_map_dropout 0.3892737536727084 +89 37 model.embedding_dim 2.0 +89 37 optimizer.lr 0.010312232833562507 +89 37 negative_sampler.num_negs_per_pos 23.0 +89 37 training.batch_size 2.0 +89 38 model.output_channels 45.0 +89 38 model.input_dropout 0.3178503402524104 +89 38 model.output_dropout 0.15177397029492967 +89 38 model.feature_map_dropout 0.15027657527860477 +89 38 model.embedding_dim 1.0 +89 38 optimizer.lr 0.041220987211001 +89 38 negative_sampler.num_negs_per_pos 85.0 +89 38 training.batch_size 0.0 +89 39 model.output_channels 42.0 +89 39 model.input_dropout 0.3243800180138297 +89 39 model.output_dropout 0.25458444511098416 +89 39 model.feature_map_dropout 0.36712650871758395 +89 39 model.embedding_dim 2.0 +89 39 optimizer.lr 0.0018808539911313816 +89 39 negative_sampler.num_negs_per_pos 94.0 +89 39 training.batch_size 2.0 +89 40 model.output_channels 31.0 +89 40 model.input_dropout 0.10421973682697822 +89 40 model.output_dropout 0.49185688287290086 +89 40 model.feature_map_dropout 0.3163919450855043 +89 40 model.embedding_dim 0.0 +89 40 optimizer.lr 0.023477538018597772 +89 40 negative_sampler.num_negs_per_pos 30.0 +89 40 training.batch_size 2.0 +89 41 model.output_channels 46.0 +89 41 model.input_dropout 0.35824574505772805 +89 41 model.output_dropout 0.42063582018757567 +89 41 model.feature_map_dropout 0.4452141428513205 +89 41 model.embedding_dim 1.0 +89 41 optimizer.lr 0.0874301337720409 +89 41 negative_sampler.num_negs_per_pos 99.0 +89 41 training.batch_size 0.0 +89 42 model.output_channels 29.0 +89 42 model.input_dropout 0.040151630012136896 +89 42 model.output_dropout 0.043025840346628375 +89 42 model.feature_map_dropout 0.27997579430714065 +89 42 model.embedding_dim 2.0 +89 42 optimizer.lr 0.07007632232784256 +89 42 negative_sampler.num_negs_per_pos 59.0 +89 42 training.batch_size 2.0 +89 43 model.output_channels 33.0 +89 43 model.input_dropout 0.051058894825412704 +89 43 model.output_dropout 0.03686714401392838 +89 43 model.feature_map_dropout 0.026894373594052168 +89 43 model.embedding_dim 2.0 +89 43 optimizer.lr 0.06581789348225665 +89 43 negative_sampler.num_negs_per_pos 49.0 +89 43 training.batch_size 0.0 +89 44 model.output_channels 42.0 +89 44 model.input_dropout 0.3844973647336425 +89 44 model.output_dropout 0.2106198932820862 +89 44 model.feature_map_dropout 0.381702652249373 +89 44 model.embedding_dim 1.0 +89 44 optimizer.lr 0.022746187771109146 +89 44 negative_sampler.num_negs_per_pos 97.0 +89 44 training.batch_size 0.0 +89 45 model.output_channels 42.0 +89 45 model.input_dropout 0.04318750899982726 +89 45 model.output_dropout 0.4461647290701898 +89 45 model.feature_map_dropout 0.09846211423649409 +89 45 model.embedding_dim 0.0 +89 45 optimizer.lr 0.007629431310670742 +89 45 negative_sampler.num_negs_per_pos 48.0 +89 45 training.batch_size 2.0 +89 46 model.output_channels 33.0 +89 46 model.input_dropout 0.37014474568753486 +89 46 model.output_dropout 0.3076057248263447 +89 46 model.feature_map_dropout 0.21748482699962146 +89 46 model.embedding_dim 0.0 +89 46 optimizer.lr 0.003742507322579753 +89 46 negative_sampler.num_negs_per_pos 89.0 +89 46 training.batch_size 1.0 +89 47 model.output_channels 36.0 +89 47 model.input_dropout 0.3701869459726507 +89 47 model.output_dropout 0.2091985284583487 +89 47 model.feature_map_dropout 0.09259002009555278 +89 47 model.embedding_dim 2.0 +89 47 optimizer.lr 0.0013490871341872593 +89 47 negative_sampler.num_negs_per_pos 54.0 +89 47 training.batch_size 2.0 +89 48 model.output_channels 23.0 +89 48 model.input_dropout 0.49925875155693095 +89 48 model.output_dropout 0.218381557285524 +89 48 model.feature_map_dropout 0.14489216551660622 +89 48 model.embedding_dim 2.0 +89 48 optimizer.lr 0.01142992954437358 +89 48 negative_sampler.num_negs_per_pos 94.0 +89 48 training.batch_size 2.0 +89 49 model.output_channels 44.0 +89 49 model.input_dropout 0.1272282861501508 +89 49 model.output_dropout 0.41994589834648427 +89 49 model.feature_map_dropout 0.06335170379813654 +89 49 model.embedding_dim 0.0 +89 49 optimizer.lr 0.014288306023696764 +89 49 negative_sampler.num_negs_per_pos 8.0 +89 49 training.batch_size 2.0 +89 50 model.output_channels 24.0 +89 50 model.input_dropout 0.11032504525962922 +89 50 model.output_dropout 0.022382707249874678 +89 50 model.feature_map_dropout 0.1934153565705643 +89 50 model.embedding_dim 0.0 +89 50 optimizer.lr 0.014391420200412204 +89 50 negative_sampler.num_negs_per_pos 81.0 +89 50 training.batch_size 2.0 +89 51 model.output_channels 40.0 +89 51 model.input_dropout 0.21318753247289868 +89 51 model.output_dropout 0.3854792867938581 +89 51 model.feature_map_dropout 0.30551102024971055 +89 51 model.embedding_dim 2.0 +89 51 optimizer.lr 0.0012570256971152353 +89 51 negative_sampler.num_negs_per_pos 57.0 +89 51 training.batch_size 1.0 +89 52 model.output_channels 41.0 +89 52 model.input_dropout 0.4554488265740507 +89 52 model.output_dropout 0.33233503092481814 +89 52 model.feature_map_dropout 0.43985110239439396 +89 52 model.embedding_dim 0.0 +89 52 optimizer.lr 0.02211486722549904 +89 52 negative_sampler.num_negs_per_pos 4.0 +89 52 training.batch_size 2.0 +89 53 model.output_channels 28.0 +89 53 model.input_dropout 0.08591211204010851 +89 53 model.output_dropout 0.3663883720930819 +89 53 model.feature_map_dropout 0.3780622980479313 +89 53 model.embedding_dim 1.0 +89 53 optimizer.lr 0.002902549763020936 +89 53 negative_sampler.num_negs_per_pos 21.0 +89 53 training.batch_size 0.0 +89 54 model.output_channels 56.0 +89 54 model.input_dropout 0.03206593599468416 +89 54 model.output_dropout 0.2746865536947017 +89 54 model.feature_map_dropout 0.1908552936491577 +89 54 model.embedding_dim 1.0 +89 54 optimizer.lr 0.0011279765364712263 +89 54 negative_sampler.num_negs_per_pos 21.0 +89 54 training.batch_size 2.0 +89 55 model.output_channels 55.0 +89 55 model.input_dropout 0.39106098813042384 +89 55 model.output_dropout 0.010083187525133652 +89 55 model.feature_map_dropout 0.4846929964489957 +89 55 model.embedding_dim 0.0 +89 55 optimizer.lr 0.02168881827777765 +89 55 negative_sampler.num_negs_per_pos 5.0 +89 55 training.batch_size 2.0 +89 56 model.output_channels 62.0 +89 56 model.input_dropout 0.4169218937993216 +89 56 model.output_dropout 0.07257909498749204 +89 56 model.feature_map_dropout 0.40714042410657936 +89 56 model.embedding_dim 0.0 +89 56 optimizer.lr 0.0717979951823613 +89 56 negative_sampler.num_negs_per_pos 65.0 +89 56 training.batch_size 0.0 +89 57 model.output_channels 28.0 +89 57 model.input_dropout 0.17503140913143156 +89 57 model.output_dropout 0.4080360287524276 +89 57 model.feature_map_dropout 0.43722419929970546 +89 57 model.embedding_dim 0.0 +89 57 optimizer.lr 0.001779019677491359 +89 57 negative_sampler.num_negs_per_pos 14.0 +89 57 training.batch_size 0.0 +89 58 model.output_channels 51.0 +89 58 model.input_dropout 0.42094492450786714 +89 58 model.output_dropout 0.1206338749671308 +89 58 model.feature_map_dropout 0.04136201976919829 +89 58 model.embedding_dim 0.0 +89 58 optimizer.lr 0.009982352040668078 +89 58 negative_sampler.num_negs_per_pos 47.0 +89 58 training.batch_size 1.0 +89 59 model.output_channels 46.0 +89 59 model.input_dropout 0.057520267656380186 +89 59 model.output_dropout 0.15744535491769784 +89 59 model.feature_map_dropout 0.32568358293065774 +89 59 model.embedding_dim 1.0 +89 59 optimizer.lr 0.0318741132603328 +89 59 negative_sampler.num_negs_per_pos 73.0 +89 59 training.batch_size 2.0 +89 60 model.output_channels 41.0 +89 60 model.input_dropout 0.49456669873092557 +89 60 model.output_dropout 0.09891056899781409 +89 60 model.feature_map_dropout 0.07075751501661326 +89 60 model.embedding_dim 1.0 +89 60 optimizer.lr 0.032483026422965355 +89 60 negative_sampler.num_negs_per_pos 93.0 +89 60 training.batch_size 2.0 +89 61 model.output_channels 34.0 +89 61 model.input_dropout 0.017769347686633796 +89 61 model.output_dropout 0.4599696417528906 +89 61 model.feature_map_dropout 0.2755275542448275 +89 61 model.embedding_dim 0.0 +89 61 optimizer.lr 0.0045562902805864275 +89 61 negative_sampler.num_negs_per_pos 78.0 +89 61 training.batch_size 0.0 +89 62 model.output_channels 49.0 +89 62 model.input_dropout 0.030144528728632936 +89 62 model.output_dropout 0.33627773952840756 +89 62 model.feature_map_dropout 0.1057446957618502 +89 62 model.embedding_dim 0.0 +89 62 optimizer.lr 0.004584367132236179 +89 62 negative_sampler.num_negs_per_pos 45.0 +89 62 training.batch_size 2.0 +89 63 model.output_channels 48.0 +89 63 model.input_dropout 0.3046162502035748 +89 63 model.output_dropout 0.4381379510554628 +89 63 model.feature_map_dropout 0.21287320694442496 +89 63 model.embedding_dim 2.0 +89 63 optimizer.lr 0.043562224255728796 +89 63 negative_sampler.num_negs_per_pos 91.0 +89 63 training.batch_size 0.0 +89 64 model.output_channels 19.0 +89 64 model.input_dropout 0.49684781740513234 +89 64 model.output_dropout 0.1135853916213016 +89 64 model.feature_map_dropout 0.33515737626009123 +89 64 model.embedding_dim 0.0 +89 64 optimizer.lr 0.009672679917584266 +89 64 negative_sampler.num_negs_per_pos 27.0 +89 64 training.batch_size 0.0 +89 65 model.output_channels 60.0 +89 65 model.input_dropout 0.2733536684268982 +89 65 model.output_dropout 0.2672767233934037 +89 65 model.feature_map_dropout 0.163512102824447 +89 65 model.embedding_dim 2.0 +89 65 optimizer.lr 0.028894281632997534 +89 65 negative_sampler.num_negs_per_pos 19.0 +89 65 training.batch_size 0.0 +89 66 model.output_channels 25.0 +89 66 model.input_dropout 0.2632789786006784 +89 66 model.output_dropout 0.10768471933070634 +89 66 model.feature_map_dropout 0.03999887921247297 +89 66 model.embedding_dim 1.0 +89 66 optimizer.lr 0.036338770589182466 +89 66 negative_sampler.num_negs_per_pos 74.0 +89 66 training.batch_size 2.0 +89 67 model.output_channels 25.0 +89 67 model.input_dropout 0.16760721674783302 +89 67 model.output_dropout 0.28010128639995513 +89 67 model.feature_map_dropout 0.3157639501977394 +89 67 model.embedding_dim 0.0 +89 67 optimizer.lr 0.018053815389873808 +89 67 negative_sampler.num_negs_per_pos 53.0 +89 67 training.batch_size 1.0 +89 68 model.output_channels 33.0 +89 68 model.input_dropout 0.2512243775057634 +89 68 model.output_dropout 0.21611798530907822 +89 68 model.feature_map_dropout 0.30126711591127336 +89 68 model.embedding_dim 2.0 +89 68 optimizer.lr 0.0265032249193347 +89 68 negative_sampler.num_negs_per_pos 62.0 +89 68 training.batch_size 1.0 +89 69 model.output_channels 49.0 +89 69 model.input_dropout 0.3835498627318226 +89 69 model.output_dropout 0.2213828284753971 +89 69 model.feature_map_dropout 0.4907958315828995 +89 69 model.embedding_dim 0.0 +89 69 optimizer.lr 0.006224826310837567 +89 69 negative_sampler.num_negs_per_pos 0.0 +89 69 training.batch_size 1.0 +89 70 model.output_channels 37.0 +89 70 model.input_dropout 0.25236558004870513 +89 70 model.output_dropout 0.4777643218477287 +89 70 model.feature_map_dropout 0.3940243856685761 +89 70 model.embedding_dim 1.0 +89 70 optimizer.lr 0.052887998061186575 +89 70 negative_sampler.num_negs_per_pos 44.0 +89 70 training.batch_size 0.0 +89 71 model.output_channels 52.0 +89 71 model.input_dropout 0.023199342913064414 +89 71 model.output_dropout 0.06440944778198171 +89 71 model.feature_map_dropout 0.07886316558170797 +89 71 model.embedding_dim 2.0 +89 71 optimizer.lr 0.0010200796423368522 +89 71 negative_sampler.num_negs_per_pos 91.0 +89 71 training.batch_size 1.0 +89 72 model.output_channels 20.0 +89 72 model.input_dropout 0.26472732710761476 +89 72 model.output_dropout 0.13073385862646175 +89 72 model.feature_map_dropout 0.09992893419920496 +89 72 model.embedding_dim 2.0 +89 72 optimizer.lr 0.03472379864654394 +89 72 negative_sampler.num_negs_per_pos 96.0 +89 72 training.batch_size 1.0 +89 73 model.output_channels 26.0 +89 73 model.input_dropout 0.2275865003720009 +89 73 model.output_dropout 0.40888517390707285 +89 73 model.feature_map_dropout 0.08785167060513654 +89 73 model.embedding_dim 1.0 +89 73 optimizer.lr 0.08891870777689496 +89 73 negative_sampler.num_negs_per_pos 75.0 +89 73 training.batch_size 0.0 +89 74 model.output_channels 40.0 +89 74 model.input_dropout 0.1486848468361292 +89 74 model.output_dropout 0.02939512736120814 +89 74 model.feature_map_dropout 0.15783872407185517 +89 74 model.embedding_dim 0.0 +89 74 optimizer.lr 0.023186897339622477 +89 74 negative_sampler.num_negs_per_pos 10.0 +89 74 training.batch_size 1.0 +89 75 model.output_channels 29.0 +89 75 model.input_dropout 0.09321851440366041 +89 75 model.output_dropout 0.16326720981518272 +89 75 model.feature_map_dropout 0.05684321731816816 +89 75 model.embedding_dim 1.0 +89 75 optimizer.lr 0.07172668375490737 +89 75 negative_sampler.num_negs_per_pos 56.0 +89 75 training.batch_size 1.0 +89 76 model.output_channels 34.0 +89 76 model.input_dropout 0.14783942229236002 +89 76 model.output_dropout 0.39488661683812604 +89 76 model.feature_map_dropout 0.4096937484944602 +89 76 model.embedding_dim 0.0 +89 76 optimizer.lr 0.031125355304805587 +89 76 negative_sampler.num_negs_per_pos 39.0 +89 76 training.batch_size 0.0 +89 77 model.output_channels 43.0 +89 77 model.input_dropout 0.2725291304140698 +89 77 model.output_dropout 0.028226165397484182 +89 77 model.feature_map_dropout 0.2766865034366877 +89 77 model.embedding_dim 2.0 +89 77 optimizer.lr 0.001927081443236143 +89 77 negative_sampler.num_negs_per_pos 72.0 +89 77 training.batch_size 2.0 +89 78 model.output_channels 27.0 +89 78 model.input_dropout 0.24329138016200558 +89 78 model.output_dropout 0.32331551653507684 +89 78 model.feature_map_dropout 0.3776747642614243 +89 78 model.embedding_dim 2.0 +89 78 optimizer.lr 0.02998460000903844 +89 78 negative_sampler.num_negs_per_pos 75.0 +89 78 training.batch_size 0.0 +89 79 model.output_channels 25.0 +89 79 model.input_dropout 0.2869117703767488 +89 79 model.output_dropout 0.2553216212146508 +89 79 model.feature_map_dropout 0.17384282289571534 +89 79 model.embedding_dim 0.0 +89 79 optimizer.lr 0.06999905823874941 +89 79 negative_sampler.num_negs_per_pos 48.0 +89 79 training.batch_size 0.0 +89 80 model.output_channels 23.0 +89 80 model.input_dropout 0.06332218497481029 +89 80 model.output_dropout 0.47246782790334363 +89 80 model.feature_map_dropout 0.3711530315428658 +89 80 model.embedding_dim 2.0 +89 80 optimizer.lr 0.0366727169030388 +89 80 negative_sampler.num_negs_per_pos 36.0 +89 80 training.batch_size 2.0 +89 81 model.output_channels 43.0 +89 81 model.input_dropout 0.07921786461943364 +89 81 model.output_dropout 0.057273486540723784 +89 81 model.feature_map_dropout 0.2214456829174548 +89 81 model.embedding_dim 2.0 +89 81 optimizer.lr 0.08134197482939642 +89 81 negative_sampler.num_negs_per_pos 0.0 +89 81 training.batch_size 0.0 +89 82 model.output_channels 52.0 +89 82 model.input_dropout 0.061904460539822403 +89 82 model.output_dropout 0.00012838645346169297 +89 82 model.feature_map_dropout 0.10309566787148394 +89 82 model.embedding_dim 2.0 +89 82 optimizer.lr 0.0673775132569629 +89 82 negative_sampler.num_negs_per_pos 87.0 +89 82 training.batch_size 2.0 +89 83 model.output_channels 34.0 +89 83 model.input_dropout 0.36218513999981955 +89 83 model.output_dropout 0.2822242818325564 +89 83 model.feature_map_dropout 0.16769848306944274 +89 83 model.embedding_dim 2.0 +89 83 optimizer.lr 0.03521620368361548 +89 83 negative_sampler.num_negs_per_pos 3.0 +89 83 training.batch_size 0.0 +89 84 model.output_channels 40.0 +89 84 model.input_dropout 0.46987219020664006 +89 84 model.output_dropout 0.3139928410584405 +89 84 model.feature_map_dropout 0.19801432370838506 +89 84 model.embedding_dim 2.0 +89 84 optimizer.lr 0.07778906166556146 +89 84 negative_sampler.num_negs_per_pos 25.0 +89 84 training.batch_size 2.0 +89 85 model.output_channels 33.0 +89 85 model.input_dropout 0.49489271945751767 +89 85 model.output_dropout 0.0843829110587831 +89 85 model.feature_map_dropout 0.1750691525823615 +89 85 model.embedding_dim 2.0 +89 85 optimizer.lr 0.01438093034879384 +89 85 negative_sampler.num_negs_per_pos 67.0 +89 85 training.batch_size 1.0 +89 1 dataset """kinships""" +89 1 model """conve""" +89 1 loss """softplus""" +89 1 regularizer """no""" +89 1 optimizer """adam""" +89 1 training_loop """owa""" +89 1 negative_sampler """basic""" +89 1 evaluator """rankbased""" +89 2 dataset """kinships""" +89 2 model """conve""" +89 2 loss """softplus""" +89 2 regularizer """no""" +89 2 optimizer """adam""" +89 2 training_loop """owa""" +89 2 negative_sampler """basic""" +89 2 evaluator """rankbased""" +89 3 dataset """kinships""" +89 3 model """conve""" +89 3 loss """softplus""" +89 3 regularizer """no""" +89 3 optimizer """adam""" +89 3 training_loop """owa""" +89 3 negative_sampler """basic""" +89 3 evaluator """rankbased""" +89 4 dataset """kinships""" +89 4 model """conve""" +89 4 loss """softplus""" +89 4 regularizer """no""" +89 4 optimizer """adam""" +89 4 training_loop """owa""" +89 4 negative_sampler """basic""" +89 4 evaluator """rankbased""" +89 5 dataset """kinships""" +89 5 model """conve""" +89 5 loss """softplus""" +89 5 regularizer """no""" +89 5 optimizer """adam""" +89 5 training_loop """owa""" +89 5 negative_sampler """basic""" +89 5 evaluator """rankbased""" +89 6 dataset """kinships""" +89 6 model """conve""" +89 6 loss """softplus""" +89 6 regularizer """no""" +89 6 optimizer """adam""" +89 6 training_loop """owa""" +89 6 negative_sampler """basic""" +89 6 evaluator """rankbased""" +89 7 dataset """kinships""" +89 7 model """conve""" +89 7 loss """softplus""" +89 7 regularizer """no""" +89 7 optimizer """adam""" +89 7 training_loop """owa""" +89 7 negative_sampler """basic""" +89 7 evaluator """rankbased""" +89 8 dataset """kinships""" +89 8 model """conve""" +89 8 loss """softplus""" +89 8 regularizer """no""" +89 8 optimizer """adam""" +89 8 training_loop """owa""" +89 8 negative_sampler """basic""" +89 8 evaluator """rankbased""" +89 9 dataset """kinships""" +89 9 model """conve""" +89 9 loss """softplus""" +89 9 regularizer """no""" +89 9 optimizer """adam""" +89 9 training_loop """owa""" +89 9 negative_sampler """basic""" +89 9 evaluator """rankbased""" +89 10 dataset """kinships""" +89 10 model """conve""" +89 10 loss """softplus""" +89 10 regularizer """no""" +89 10 optimizer """adam""" +89 10 training_loop """owa""" +89 10 negative_sampler """basic""" +89 10 evaluator """rankbased""" +89 11 dataset """kinships""" +89 11 model """conve""" +89 11 loss """softplus""" +89 11 regularizer """no""" +89 11 optimizer """adam""" +89 11 training_loop """owa""" +89 11 negative_sampler """basic""" +89 11 evaluator """rankbased""" +89 12 dataset """kinships""" +89 12 model """conve""" +89 12 loss """softplus""" +89 12 regularizer """no""" +89 12 optimizer """adam""" +89 12 training_loop """owa""" +89 12 negative_sampler """basic""" +89 12 evaluator """rankbased""" +89 13 dataset """kinships""" +89 13 model """conve""" +89 13 loss """softplus""" +89 13 regularizer """no""" +89 13 optimizer """adam""" +89 13 training_loop """owa""" +89 13 negative_sampler """basic""" +89 13 evaluator """rankbased""" +89 14 dataset """kinships""" +89 14 model """conve""" +89 14 loss """softplus""" +89 14 regularizer """no""" +89 14 optimizer """adam""" +89 14 training_loop """owa""" +89 14 negative_sampler """basic""" +89 14 evaluator """rankbased""" +89 15 dataset """kinships""" +89 15 model """conve""" +89 15 loss """softplus""" +89 15 regularizer """no""" +89 15 optimizer """adam""" +89 15 training_loop """owa""" +89 15 negative_sampler """basic""" +89 15 evaluator """rankbased""" +89 16 dataset """kinships""" +89 16 model """conve""" +89 16 loss """softplus""" +89 16 regularizer """no""" +89 16 optimizer """adam""" +89 16 training_loop """owa""" +89 16 negative_sampler """basic""" +89 16 evaluator """rankbased""" +89 17 dataset """kinships""" +89 17 model """conve""" +89 17 loss """softplus""" +89 17 regularizer """no""" +89 17 optimizer """adam""" +89 17 training_loop """owa""" +89 17 negative_sampler """basic""" +89 17 evaluator """rankbased""" +89 18 dataset """kinships""" +89 18 model """conve""" +89 18 loss """softplus""" +89 18 regularizer """no""" +89 18 optimizer """adam""" +89 18 training_loop """owa""" +89 18 negative_sampler """basic""" +89 18 evaluator """rankbased""" +89 19 dataset """kinships""" +89 19 model """conve""" +89 19 loss """softplus""" +89 19 regularizer """no""" +89 19 optimizer """adam""" +89 19 training_loop """owa""" +89 19 negative_sampler """basic""" +89 19 evaluator """rankbased""" +89 20 dataset """kinships""" +89 20 model """conve""" +89 20 loss """softplus""" +89 20 regularizer """no""" +89 20 optimizer """adam""" +89 20 training_loop """owa""" +89 20 negative_sampler """basic""" +89 20 evaluator """rankbased""" +89 21 dataset """kinships""" +89 21 model """conve""" +89 21 loss """softplus""" +89 21 regularizer """no""" +89 21 optimizer """adam""" +89 21 training_loop """owa""" +89 21 negative_sampler """basic""" +89 21 evaluator """rankbased""" +89 22 dataset """kinships""" +89 22 model """conve""" +89 22 loss """softplus""" +89 22 regularizer """no""" +89 22 optimizer """adam""" +89 22 training_loop """owa""" +89 22 negative_sampler """basic""" +89 22 evaluator """rankbased""" +89 23 dataset """kinships""" +89 23 model """conve""" +89 23 loss """softplus""" +89 23 regularizer """no""" +89 23 optimizer """adam""" +89 23 training_loop """owa""" +89 23 negative_sampler """basic""" +89 23 evaluator """rankbased""" +89 24 dataset """kinships""" +89 24 model """conve""" +89 24 loss """softplus""" +89 24 regularizer """no""" +89 24 optimizer """adam""" +89 24 training_loop """owa""" +89 24 negative_sampler """basic""" +89 24 evaluator """rankbased""" +89 25 dataset """kinships""" +89 25 model """conve""" +89 25 loss """softplus""" +89 25 regularizer """no""" +89 25 optimizer """adam""" +89 25 training_loop """owa""" +89 25 negative_sampler """basic""" +89 25 evaluator """rankbased""" +89 26 dataset """kinships""" +89 26 model """conve""" +89 26 loss """softplus""" +89 26 regularizer """no""" +89 26 optimizer """adam""" +89 26 training_loop """owa""" +89 26 negative_sampler """basic""" +89 26 evaluator """rankbased""" +89 27 dataset """kinships""" +89 27 model """conve""" +89 27 loss """softplus""" +89 27 regularizer """no""" +89 27 optimizer """adam""" +89 27 training_loop """owa""" +89 27 negative_sampler """basic""" +89 27 evaluator """rankbased""" +89 28 dataset """kinships""" +89 28 model """conve""" +89 28 loss """softplus""" +89 28 regularizer """no""" +89 28 optimizer """adam""" +89 28 training_loop """owa""" +89 28 negative_sampler """basic""" +89 28 evaluator """rankbased""" +89 29 dataset """kinships""" +89 29 model """conve""" +89 29 loss """softplus""" +89 29 regularizer """no""" +89 29 optimizer """adam""" +89 29 training_loop """owa""" +89 29 negative_sampler """basic""" +89 29 evaluator """rankbased""" +89 30 dataset """kinships""" +89 30 model """conve""" +89 30 loss """softplus""" +89 30 regularizer """no""" +89 30 optimizer """adam""" +89 30 training_loop """owa""" +89 30 negative_sampler """basic""" +89 30 evaluator """rankbased""" +89 31 dataset """kinships""" +89 31 model """conve""" +89 31 loss """softplus""" +89 31 regularizer """no""" +89 31 optimizer """adam""" +89 31 training_loop """owa""" +89 31 negative_sampler """basic""" +89 31 evaluator """rankbased""" +89 32 dataset """kinships""" +89 32 model """conve""" +89 32 loss """softplus""" +89 32 regularizer """no""" +89 32 optimizer """adam""" +89 32 training_loop """owa""" +89 32 negative_sampler """basic""" +89 32 evaluator """rankbased""" +89 33 dataset """kinships""" +89 33 model """conve""" +89 33 loss """softplus""" +89 33 regularizer """no""" +89 33 optimizer """adam""" +89 33 training_loop """owa""" +89 33 negative_sampler """basic""" +89 33 evaluator """rankbased""" +89 34 dataset """kinships""" +89 34 model """conve""" +89 34 loss """softplus""" +89 34 regularizer """no""" +89 34 optimizer """adam""" +89 34 training_loop """owa""" +89 34 negative_sampler """basic""" +89 34 evaluator """rankbased""" +89 35 dataset """kinships""" +89 35 model """conve""" +89 35 loss """softplus""" +89 35 regularizer """no""" +89 35 optimizer """adam""" +89 35 training_loop """owa""" +89 35 negative_sampler """basic""" +89 35 evaluator """rankbased""" +89 36 dataset """kinships""" +89 36 model """conve""" +89 36 loss """softplus""" +89 36 regularizer """no""" +89 36 optimizer """adam""" +89 36 training_loop """owa""" +89 36 negative_sampler """basic""" +89 36 evaluator """rankbased""" +89 37 dataset """kinships""" +89 37 model """conve""" +89 37 loss """softplus""" +89 37 regularizer """no""" +89 37 optimizer """adam""" +89 37 training_loop """owa""" +89 37 negative_sampler """basic""" +89 37 evaluator """rankbased""" +89 38 dataset """kinships""" +89 38 model """conve""" +89 38 loss """softplus""" +89 38 regularizer """no""" +89 38 optimizer """adam""" +89 38 training_loop """owa""" +89 38 negative_sampler """basic""" +89 38 evaluator """rankbased""" +89 39 dataset """kinships""" +89 39 model """conve""" +89 39 loss """softplus""" +89 39 regularizer """no""" +89 39 optimizer """adam""" +89 39 training_loop """owa""" +89 39 negative_sampler """basic""" +89 39 evaluator """rankbased""" +89 40 dataset """kinships""" +89 40 model """conve""" +89 40 loss """softplus""" +89 40 regularizer """no""" +89 40 optimizer """adam""" +89 40 training_loop """owa""" +89 40 negative_sampler """basic""" +89 40 evaluator """rankbased""" +89 41 dataset """kinships""" +89 41 model """conve""" +89 41 loss """softplus""" +89 41 regularizer """no""" +89 41 optimizer """adam""" +89 41 training_loop """owa""" +89 41 negative_sampler """basic""" +89 41 evaluator """rankbased""" +89 42 dataset """kinships""" +89 42 model """conve""" +89 42 loss """softplus""" +89 42 regularizer """no""" +89 42 optimizer """adam""" +89 42 training_loop """owa""" +89 42 negative_sampler """basic""" +89 42 evaluator """rankbased""" +89 43 dataset """kinships""" +89 43 model """conve""" +89 43 loss """softplus""" +89 43 regularizer """no""" +89 43 optimizer """adam""" +89 43 training_loop """owa""" +89 43 negative_sampler """basic""" +89 43 evaluator """rankbased""" +89 44 dataset """kinships""" +89 44 model """conve""" +89 44 loss """softplus""" +89 44 regularizer """no""" +89 44 optimizer """adam""" +89 44 training_loop """owa""" +89 44 negative_sampler """basic""" +89 44 evaluator """rankbased""" +89 45 dataset """kinships""" +89 45 model """conve""" +89 45 loss """softplus""" +89 45 regularizer """no""" +89 45 optimizer """adam""" +89 45 training_loop """owa""" +89 45 negative_sampler """basic""" +89 45 evaluator """rankbased""" +89 46 dataset """kinships""" +89 46 model """conve""" +89 46 loss """softplus""" +89 46 regularizer """no""" +89 46 optimizer """adam""" +89 46 training_loop """owa""" +89 46 negative_sampler """basic""" +89 46 evaluator """rankbased""" +89 47 dataset """kinships""" +89 47 model """conve""" +89 47 loss """softplus""" +89 47 regularizer """no""" +89 47 optimizer """adam""" +89 47 training_loop """owa""" +89 47 negative_sampler """basic""" +89 47 evaluator """rankbased""" +89 48 dataset """kinships""" +89 48 model """conve""" +89 48 loss """softplus""" +89 48 regularizer """no""" +89 48 optimizer """adam""" +89 48 training_loop """owa""" +89 48 negative_sampler """basic""" +89 48 evaluator """rankbased""" +89 49 dataset """kinships""" +89 49 model """conve""" +89 49 loss """softplus""" +89 49 regularizer """no""" +89 49 optimizer """adam""" +89 49 training_loop """owa""" +89 49 negative_sampler """basic""" +89 49 evaluator """rankbased""" +89 50 dataset """kinships""" +89 50 model """conve""" +89 50 loss """softplus""" +89 50 regularizer """no""" +89 50 optimizer """adam""" +89 50 training_loop """owa""" +89 50 negative_sampler """basic""" +89 50 evaluator """rankbased""" +89 51 dataset """kinships""" +89 51 model """conve""" +89 51 loss """softplus""" +89 51 regularizer """no""" +89 51 optimizer """adam""" +89 51 training_loop """owa""" +89 51 negative_sampler """basic""" +89 51 evaluator """rankbased""" +89 52 dataset """kinships""" +89 52 model """conve""" +89 52 loss """softplus""" +89 52 regularizer """no""" +89 52 optimizer """adam""" +89 52 training_loop """owa""" +89 52 negative_sampler """basic""" +89 52 evaluator """rankbased""" +89 53 dataset """kinships""" +89 53 model """conve""" +89 53 loss """softplus""" +89 53 regularizer """no""" +89 53 optimizer """adam""" +89 53 training_loop """owa""" +89 53 negative_sampler """basic""" +89 53 evaluator """rankbased""" +89 54 dataset """kinships""" +89 54 model """conve""" +89 54 loss """softplus""" +89 54 regularizer """no""" +89 54 optimizer """adam""" +89 54 training_loop """owa""" +89 54 negative_sampler """basic""" +89 54 evaluator """rankbased""" +89 55 dataset """kinships""" +89 55 model """conve""" +89 55 loss """softplus""" +89 55 regularizer """no""" +89 55 optimizer """adam""" +89 55 training_loop """owa""" +89 55 negative_sampler """basic""" +89 55 evaluator """rankbased""" +89 56 dataset """kinships""" +89 56 model """conve""" +89 56 loss """softplus""" +89 56 regularizer """no""" +89 56 optimizer """adam""" +89 56 training_loop """owa""" +89 56 negative_sampler """basic""" +89 56 evaluator """rankbased""" +89 57 dataset """kinships""" +89 57 model """conve""" +89 57 loss """softplus""" +89 57 regularizer """no""" +89 57 optimizer """adam""" +89 57 training_loop """owa""" +89 57 negative_sampler """basic""" +89 57 evaluator """rankbased""" +89 58 dataset """kinships""" +89 58 model """conve""" +89 58 loss """softplus""" +89 58 regularizer """no""" +89 58 optimizer """adam""" +89 58 training_loop """owa""" +89 58 negative_sampler """basic""" +89 58 evaluator """rankbased""" +89 59 dataset """kinships""" +89 59 model """conve""" +89 59 loss """softplus""" +89 59 regularizer """no""" +89 59 optimizer """adam""" +89 59 training_loop """owa""" +89 59 negative_sampler """basic""" +89 59 evaluator """rankbased""" +89 60 dataset """kinships""" +89 60 model """conve""" +89 60 loss """softplus""" +89 60 regularizer """no""" +89 60 optimizer """adam""" +89 60 training_loop """owa""" +89 60 negative_sampler """basic""" +89 60 evaluator """rankbased""" +89 61 dataset """kinships""" +89 61 model """conve""" +89 61 loss """softplus""" +89 61 regularizer """no""" +89 61 optimizer """adam""" +89 61 training_loop """owa""" +89 61 negative_sampler """basic""" +89 61 evaluator """rankbased""" +89 62 dataset """kinships""" +89 62 model """conve""" +89 62 loss """softplus""" +89 62 regularizer """no""" +89 62 optimizer """adam""" +89 62 training_loop """owa""" +89 62 negative_sampler """basic""" +89 62 evaluator """rankbased""" +89 63 dataset """kinships""" +89 63 model """conve""" +89 63 loss """softplus""" +89 63 regularizer """no""" +89 63 optimizer """adam""" +89 63 training_loop """owa""" +89 63 negative_sampler """basic""" +89 63 evaluator """rankbased""" +89 64 dataset """kinships""" +89 64 model """conve""" +89 64 loss """softplus""" +89 64 regularizer """no""" +89 64 optimizer """adam""" +89 64 training_loop """owa""" +89 64 negative_sampler """basic""" +89 64 evaluator """rankbased""" +89 65 dataset """kinships""" +89 65 model """conve""" +89 65 loss """softplus""" +89 65 regularizer """no""" +89 65 optimizer """adam""" +89 65 training_loop """owa""" +89 65 negative_sampler """basic""" +89 65 evaluator """rankbased""" +89 66 dataset """kinships""" +89 66 model """conve""" +89 66 loss """softplus""" +89 66 regularizer """no""" +89 66 optimizer """adam""" +89 66 training_loop """owa""" +89 66 negative_sampler """basic""" +89 66 evaluator """rankbased""" +89 67 dataset """kinships""" +89 67 model """conve""" +89 67 loss """softplus""" +89 67 regularizer """no""" +89 67 optimizer """adam""" +89 67 training_loop """owa""" +89 67 negative_sampler """basic""" +89 67 evaluator """rankbased""" +89 68 dataset """kinships""" +89 68 model """conve""" +89 68 loss """softplus""" +89 68 regularizer """no""" +89 68 optimizer """adam""" +89 68 training_loop """owa""" +89 68 negative_sampler """basic""" +89 68 evaluator """rankbased""" +89 69 dataset """kinships""" +89 69 model """conve""" +89 69 loss """softplus""" +89 69 regularizer """no""" +89 69 optimizer """adam""" +89 69 training_loop """owa""" +89 69 negative_sampler """basic""" +89 69 evaluator """rankbased""" +89 70 dataset """kinships""" +89 70 model """conve""" +89 70 loss """softplus""" +89 70 regularizer """no""" +89 70 optimizer """adam""" +89 70 training_loop """owa""" +89 70 negative_sampler """basic""" +89 70 evaluator """rankbased""" +89 71 dataset """kinships""" +89 71 model """conve""" +89 71 loss """softplus""" +89 71 regularizer """no""" +89 71 optimizer """adam""" +89 71 training_loop """owa""" +89 71 negative_sampler """basic""" +89 71 evaluator """rankbased""" +89 72 dataset """kinships""" +89 72 model """conve""" +89 72 loss """softplus""" +89 72 regularizer """no""" +89 72 optimizer """adam""" +89 72 training_loop """owa""" +89 72 negative_sampler """basic""" +89 72 evaluator """rankbased""" +89 73 dataset """kinships""" +89 73 model """conve""" +89 73 loss """softplus""" +89 73 regularizer """no""" +89 73 optimizer """adam""" +89 73 training_loop """owa""" +89 73 negative_sampler """basic""" +89 73 evaluator """rankbased""" +89 74 dataset """kinships""" +89 74 model """conve""" +89 74 loss """softplus""" +89 74 regularizer """no""" +89 74 optimizer """adam""" +89 74 training_loop """owa""" +89 74 negative_sampler """basic""" +89 74 evaluator """rankbased""" +89 75 dataset """kinships""" +89 75 model """conve""" +89 75 loss """softplus""" +89 75 regularizer """no""" +89 75 optimizer """adam""" +89 75 training_loop """owa""" +89 75 negative_sampler """basic""" +89 75 evaluator """rankbased""" +89 76 dataset """kinships""" +89 76 model """conve""" +89 76 loss """softplus""" +89 76 regularizer """no""" +89 76 optimizer """adam""" +89 76 training_loop """owa""" +89 76 negative_sampler """basic""" +89 76 evaluator """rankbased""" +89 77 dataset """kinships""" +89 77 model """conve""" +89 77 loss """softplus""" +89 77 regularizer """no""" +89 77 optimizer """adam""" +89 77 training_loop """owa""" +89 77 negative_sampler """basic""" +89 77 evaluator """rankbased""" +89 78 dataset """kinships""" +89 78 model """conve""" +89 78 loss """softplus""" +89 78 regularizer """no""" +89 78 optimizer """adam""" +89 78 training_loop """owa""" +89 78 negative_sampler """basic""" +89 78 evaluator """rankbased""" +89 79 dataset """kinships""" +89 79 model """conve""" +89 79 loss """softplus""" +89 79 regularizer """no""" +89 79 optimizer """adam""" +89 79 training_loop """owa""" +89 79 negative_sampler """basic""" +89 79 evaluator """rankbased""" +89 80 dataset """kinships""" +89 80 model """conve""" +89 80 loss """softplus""" +89 80 regularizer """no""" +89 80 optimizer """adam""" +89 80 training_loop """owa""" +89 80 negative_sampler """basic""" +89 80 evaluator """rankbased""" +89 81 dataset """kinships""" +89 81 model """conve""" +89 81 loss """softplus""" +89 81 regularizer """no""" +89 81 optimizer """adam""" +89 81 training_loop """owa""" +89 81 negative_sampler """basic""" +89 81 evaluator """rankbased""" +89 82 dataset """kinships""" +89 82 model """conve""" +89 82 loss """softplus""" +89 82 regularizer """no""" +89 82 optimizer """adam""" +89 82 training_loop """owa""" +89 82 negative_sampler """basic""" +89 82 evaluator """rankbased""" +89 83 dataset """kinships""" +89 83 model """conve""" +89 83 loss """softplus""" +89 83 regularizer """no""" +89 83 optimizer """adam""" +89 83 training_loop """owa""" +89 83 negative_sampler """basic""" +89 83 evaluator """rankbased""" +89 84 dataset """kinships""" +89 84 model """conve""" +89 84 loss """softplus""" +89 84 regularizer """no""" +89 84 optimizer """adam""" +89 84 training_loop """owa""" +89 84 negative_sampler """basic""" +89 84 evaluator """rankbased""" +89 85 dataset """kinships""" +89 85 model """conve""" +89 85 loss """softplus""" +89 85 regularizer """no""" +89 85 optimizer """adam""" +89 85 training_loop """owa""" +89 85 negative_sampler """basic""" +89 85 evaluator """rankbased""" +90 1 model.output_channels 63.0 +90 1 model.input_dropout 0.2523582286077725 +90 1 model.output_dropout 0.30518890265510784 +90 1 model.feature_map_dropout 0.40230647501836714 +90 1 model.embedding_dim 1.0 +90 1 optimizer.lr 0.004321652299547963 +90 1 negative_sampler.num_negs_per_pos 87.0 +90 1 training.batch_size 2.0 +90 2 model.output_channels 22.0 +90 2 model.input_dropout 0.47650458454897404 +90 2 model.output_dropout 0.46286122529063506 +90 2 model.feature_map_dropout 0.40037951616560247 +90 2 model.embedding_dim 1.0 +90 2 optimizer.lr 0.053582453397504004 +90 2 negative_sampler.num_negs_per_pos 14.0 +90 2 training.batch_size 2.0 +90 3 model.output_channels 54.0 +90 3 model.input_dropout 0.07929061619577504 +90 3 model.output_dropout 0.18340149236654946 +90 3 model.feature_map_dropout 0.045993603741620426 +90 3 model.embedding_dim 1.0 +90 3 optimizer.lr 0.013006268803253887 +90 3 negative_sampler.num_negs_per_pos 12.0 +90 3 training.batch_size 0.0 +90 4 model.output_channels 28.0 +90 4 model.input_dropout 0.20681153954073528 +90 4 model.output_dropout 0.16631113724972008 +90 4 model.feature_map_dropout 0.094248964003321 +90 4 model.embedding_dim 2.0 +90 4 optimizer.lr 0.00792400248633229 +90 4 negative_sampler.num_negs_per_pos 48.0 +90 4 training.batch_size 0.0 +90 5 model.output_channels 37.0 +90 5 model.input_dropout 0.3948427603428791 +90 5 model.output_dropout 0.17614582024218894 +90 5 model.feature_map_dropout 0.030450462118481958 +90 5 model.embedding_dim 1.0 +90 5 optimizer.lr 0.06324121390922853 +90 5 negative_sampler.num_negs_per_pos 37.0 +90 5 training.batch_size 0.0 +90 6 model.output_channels 39.0 +90 6 model.input_dropout 0.4316694735072104 +90 6 model.output_dropout 0.26385605350664865 +90 6 model.feature_map_dropout 0.46434451879886246 +90 6 model.embedding_dim 2.0 +90 6 optimizer.lr 0.07306830805964637 +90 6 negative_sampler.num_negs_per_pos 90.0 +90 6 training.batch_size 1.0 +90 7 model.output_channels 32.0 +90 7 model.input_dropout 0.37627925860492434 +90 7 model.output_dropout 0.09098968803172564 +90 7 model.feature_map_dropout 0.4068213377934671 +90 7 model.embedding_dim 1.0 +90 7 optimizer.lr 0.04540162637090997 +90 7 negative_sampler.num_negs_per_pos 89.0 +90 7 training.batch_size 0.0 +90 8 model.output_channels 64.0 +90 8 model.input_dropout 0.033608489415793474 +90 8 model.output_dropout 0.3026718104851576 +90 8 model.feature_map_dropout 0.10247578984030004 +90 8 model.embedding_dim 0.0 +90 8 optimizer.lr 0.0013051982866304696 +90 8 negative_sampler.num_negs_per_pos 45.0 +90 8 training.batch_size 0.0 +90 9 model.output_channels 21.0 +90 9 model.input_dropout 0.037351444984002946 +90 9 model.output_dropout 0.06282220653461484 +90 9 model.feature_map_dropout 0.11462388791286682 +90 9 model.embedding_dim 0.0 +90 9 optimizer.lr 0.016442793844952834 +90 9 negative_sampler.num_negs_per_pos 99.0 +90 9 training.batch_size 0.0 +90 10 model.output_channels 48.0 +90 10 model.input_dropout 0.40363644314602626 +90 10 model.output_dropout 0.14200750592289685 +90 10 model.feature_map_dropout 0.3570655408811891 +90 10 model.embedding_dim 1.0 +90 10 optimizer.lr 0.006530930203117478 +90 10 negative_sampler.num_negs_per_pos 52.0 +90 10 training.batch_size 1.0 +90 11 model.output_channels 44.0 +90 11 model.input_dropout 0.18966549843029107 +90 11 model.output_dropout 0.1289969602964427 +90 11 model.feature_map_dropout 0.19652199211212085 +90 11 model.embedding_dim 2.0 +90 11 optimizer.lr 0.005589296437342303 +90 11 negative_sampler.num_negs_per_pos 54.0 +90 11 training.batch_size 0.0 +90 12 model.output_channels 52.0 +90 12 model.input_dropout 0.3509617057697803 +90 12 model.output_dropout 0.424208829318281 +90 12 model.feature_map_dropout 0.09281371792823256 +90 12 model.embedding_dim 1.0 +90 12 optimizer.lr 0.001106865413777826 +90 12 negative_sampler.num_negs_per_pos 4.0 +90 12 training.batch_size 0.0 +90 13 model.output_channels 62.0 +90 13 model.input_dropout 0.3252258259560743 +90 13 model.output_dropout 0.477265861125131 +90 13 model.feature_map_dropout 0.18196340771653752 +90 13 model.embedding_dim 1.0 +90 13 optimizer.lr 0.050451749197440755 +90 13 negative_sampler.num_negs_per_pos 44.0 +90 13 training.batch_size 2.0 +90 14 model.output_channels 16.0 +90 14 model.input_dropout 0.4438937421680005 +90 14 model.output_dropout 0.2301896543436745 +90 14 model.feature_map_dropout 0.1318170115729912 +90 14 model.embedding_dim 2.0 +90 14 optimizer.lr 0.0033842733229283647 +90 14 negative_sampler.num_negs_per_pos 35.0 +90 14 training.batch_size 0.0 +90 15 model.output_channels 41.0 +90 15 model.input_dropout 0.18450495789008287 +90 15 model.output_dropout 0.1700859627881371 +90 15 model.feature_map_dropout 0.2705087205978032 +90 15 model.embedding_dim 1.0 +90 15 optimizer.lr 0.002749676991206866 +90 15 negative_sampler.num_negs_per_pos 77.0 +90 15 training.batch_size 0.0 +90 16 model.output_channels 41.0 +90 16 model.input_dropout 0.44640534944503785 +90 16 model.output_dropout 0.49079174971308115 +90 16 model.feature_map_dropout 0.38633986013763516 +90 16 model.embedding_dim 0.0 +90 16 optimizer.lr 0.03474180788283017 +90 16 negative_sampler.num_negs_per_pos 45.0 +90 16 training.batch_size 1.0 +90 17 model.output_channels 26.0 +90 17 model.input_dropout 0.43009051585945995 +90 17 model.output_dropout 0.4338029178275247 +90 17 model.feature_map_dropout 0.2118706577316275 +90 17 model.embedding_dim 0.0 +90 17 optimizer.lr 0.008568174672307619 +90 17 negative_sampler.num_negs_per_pos 60.0 +90 17 training.batch_size 0.0 +90 18 model.output_channels 29.0 +90 18 model.input_dropout 0.13497947147594824 +90 18 model.output_dropout 0.4797705666849412 +90 18 model.feature_map_dropout 0.10856032136115085 +90 18 model.embedding_dim 2.0 +90 18 optimizer.lr 0.0985045290433752 +90 18 negative_sampler.num_negs_per_pos 12.0 +90 18 training.batch_size 0.0 +90 19 model.output_channels 36.0 +90 19 model.input_dropout 0.040362344672077055 +90 19 model.output_dropout 0.38294830109335937 +90 19 model.feature_map_dropout 0.291948593115203 +90 19 model.embedding_dim 1.0 +90 19 optimizer.lr 0.0046066702954028985 +90 19 negative_sampler.num_negs_per_pos 50.0 +90 19 training.batch_size 0.0 +90 20 model.output_channels 36.0 +90 20 model.input_dropout 0.3619184290060994 +90 20 model.output_dropout 0.14785052169386514 +90 20 model.feature_map_dropout 0.3039133809608987 +90 20 model.embedding_dim 0.0 +90 20 optimizer.lr 0.04087894663107615 +90 20 negative_sampler.num_negs_per_pos 9.0 +90 20 training.batch_size 1.0 +90 21 model.output_channels 52.0 +90 21 model.input_dropout 0.016995608133448692 +90 21 model.output_dropout 0.1504689314890214 +90 21 model.feature_map_dropout 0.44989301360018297 +90 21 model.embedding_dim 1.0 +90 21 optimizer.lr 0.012992861745384911 +90 21 negative_sampler.num_negs_per_pos 62.0 +90 21 training.batch_size 1.0 +90 22 model.output_channels 20.0 +90 22 model.input_dropout 0.40826238074110505 +90 22 model.output_dropout 0.0017195224822663668 +90 22 model.feature_map_dropout 0.19339405961243383 +90 22 model.embedding_dim 2.0 +90 22 optimizer.lr 0.001212832433547664 +90 22 negative_sampler.num_negs_per_pos 79.0 +90 22 training.batch_size 0.0 +90 23 model.output_channels 35.0 +90 23 model.input_dropout 0.4045481479134924 +90 23 model.output_dropout 0.498251493385496 +90 23 model.feature_map_dropout 0.28529333474299934 +90 23 model.embedding_dim 0.0 +90 23 optimizer.lr 0.032481372940391495 +90 23 negative_sampler.num_negs_per_pos 73.0 +90 23 training.batch_size 1.0 +90 24 model.output_channels 39.0 +90 24 model.input_dropout 0.0656593805632073 +90 24 model.output_dropout 0.2822165006754383 +90 24 model.feature_map_dropout 0.43152032122717726 +90 24 model.embedding_dim 1.0 +90 24 optimizer.lr 0.019391591924230917 +90 24 negative_sampler.num_negs_per_pos 65.0 +90 24 training.batch_size 2.0 +90 25 model.output_channels 64.0 +90 25 model.input_dropout 0.33369083161624674 +90 25 model.output_dropout 0.12156675344703088 +90 25 model.feature_map_dropout 0.07435136728711383 +90 25 model.embedding_dim 2.0 +90 25 optimizer.lr 0.04993331622436343 +90 25 negative_sampler.num_negs_per_pos 48.0 +90 25 training.batch_size 2.0 +90 26 model.output_channels 39.0 +90 26 model.input_dropout 0.33334699375832894 +90 26 model.output_dropout 0.27421406780699414 +90 26 model.feature_map_dropout 0.2365265174162518 +90 26 model.embedding_dim 2.0 +90 26 optimizer.lr 0.001549280650042095 +90 26 negative_sampler.num_negs_per_pos 15.0 +90 26 training.batch_size 1.0 +90 27 model.output_channels 33.0 +90 27 model.input_dropout 0.3079123683662467 +90 27 model.output_dropout 0.3174926864392651 +90 27 model.feature_map_dropout 0.37827249051493445 +90 27 model.embedding_dim 1.0 +90 27 optimizer.lr 0.002945157038606728 +90 27 negative_sampler.num_negs_per_pos 67.0 +90 27 training.batch_size 2.0 +90 28 model.output_channels 39.0 +90 28 model.input_dropout 0.07219604940242247 +90 28 model.output_dropout 0.32873502744318944 +90 28 model.feature_map_dropout 0.4124714369039705 +90 28 model.embedding_dim 1.0 +90 28 optimizer.lr 0.003360389697573334 +90 28 negative_sampler.num_negs_per_pos 88.0 +90 28 training.batch_size 1.0 +90 29 model.output_channels 16.0 +90 29 model.input_dropout 0.30866532248624706 +90 29 model.output_dropout 0.4154007047141078 +90 29 model.feature_map_dropout 0.23441666866997446 +90 29 model.embedding_dim 1.0 +90 29 optimizer.lr 0.00616357206563267 +90 29 negative_sampler.num_negs_per_pos 66.0 +90 29 training.batch_size 0.0 +90 30 model.output_channels 23.0 +90 30 model.input_dropout 0.23383741064738073 +90 30 model.output_dropout 0.44267457793937187 +90 30 model.feature_map_dropout 0.359189580297579 +90 30 model.embedding_dim 1.0 +90 30 optimizer.lr 0.005407089782547811 +90 30 negative_sampler.num_negs_per_pos 57.0 +90 30 training.batch_size 1.0 +90 31 model.output_channels 52.0 +90 31 model.input_dropout 0.14000121516040814 +90 31 model.output_dropout 0.48610101568953323 +90 31 model.feature_map_dropout 0.3293084023234241 +90 31 model.embedding_dim 1.0 +90 31 optimizer.lr 0.07352998102467789 +90 31 negative_sampler.num_negs_per_pos 45.0 +90 31 training.batch_size 0.0 +90 32 model.output_channels 18.0 +90 32 model.input_dropout 0.4138587441187831 +90 32 model.output_dropout 0.2278999662340891 +90 32 model.feature_map_dropout 0.2602915285887962 +90 32 model.embedding_dim 2.0 +90 32 optimizer.lr 0.002078793079332455 +90 32 negative_sampler.num_negs_per_pos 80.0 +90 32 training.batch_size 0.0 +90 33 model.output_channels 48.0 +90 33 model.input_dropout 0.37310461702499687 +90 33 model.output_dropout 0.06143821894857604 +90 33 model.feature_map_dropout 0.33679837232144383 +90 33 model.embedding_dim 0.0 +90 33 optimizer.lr 0.0022830359364575423 +90 33 negative_sampler.num_negs_per_pos 96.0 +90 33 training.batch_size 2.0 +90 34 model.output_channels 27.0 +90 34 model.input_dropout 0.11723443308595366 +90 34 model.output_dropout 0.39871829489046196 +90 34 model.feature_map_dropout 0.46162448164126374 +90 34 model.embedding_dim 2.0 +90 34 optimizer.lr 0.026937615619011524 +90 34 negative_sampler.num_negs_per_pos 84.0 +90 34 training.batch_size 0.0 +90 35 model.output_channels 33.0 +90 35 model.input_dropout 0.3001431097727504 +90 35 model.output_dropout 0.17957463301805598 +90 35 model.feature_map_dropout 0.06069639446999503 +90 35 model.embedding_dim 0.0 +90 35 optimizer.lr 0.003520701103030355 +90 35 negative_sampler.num_negs_per_pos 80.0 +90 35 training.batch_size 0.0 +90 36 model.output_channels 51.0 +90 36 model.input_dropout 0.3568772686963546 +90 36 model.output_dropout 0.4585638855308502 +90 36 model.feature_map_dropout 0.1064593410240397 +90 36 model.embedding_dim 1.0 +90 36 optimizer.lr 0.0031921644033634997 +90 36 negative_sampler.num_negs_per_pos 89.0 +90 36 training.batch_size 2.0 +90 37 model.output_channels 38.0 +90 37 model.input_dropout 0.10434151181290602 +90 37 model.output_dropout 0.48133704492060164 +90 37 model.feature_map_dropout 0.04348666387252126 +90 37 model.embedding_dim 1.0 +90 37 optimizer.lr 0.001084328083253175 +90 37 negative_sampler.num_negs_per_pos 68.0 +90 37 training.batch_size 1.0 +90 38 model.output_channels 26.0 +90 38 model.input_dropout 0.48720740871194823 +90 38 model.output_dropout 0.10723483702166353 +90 38 model.feature_map_dropout 0.22893838104340652 +90 38 model.embedding_dim 2.0 +90 38 optimizer.lr 0.0012941433915955088 +90 38 negative_sampler.num_negs_per_pos 14.0 +90 38 training.batch_size 1.0 +90 39 model.output_channels 52.0 +90 39 model.input_dropout 0.41280531276153953 +90 39 model.output_dropout 0.006159393475110964 +90 39 model.feature_map_dropout 0.028108629155715825 +90 39 model.embedding_dim 2.0 +90 39 optimizer.lr 0.0020277176992025965 +90 39 negative_sampler.num_negs_per_pos 35.0 +90 39 training.batch_size 2.0 +90 40 model.output_channels 23.0 +90 40 model.input_dropout 0.12164061930177666 +90 40 model.output_dropout 0.3131988518737275 +90 40 model.feature_map_dropout 0.354905815863313 +90 40 model.embedding_dim 1.0 +90 40 optimizer.lr 0.06561644149155262 +90 40 negative_sampler.num_negs_per_pos 16.0 +90 40 training.batch_size 0.0 +90 41 model.output_channels 60.0 +90 41 model.input_dropout 0.3083196074305044 +90 41 model.output_dropout 0.13684409103348766 +90 41 model.feature_map_dropout 0.2934172732854042 +90 41 model.embedding_dim 1.0 +90 41 optimizer.lr 0.001881493379747937 +90 41 negative_sampler.num_negs_per_pos 83.0 +90 41 training.batch_size 0.0 +90 42 model.output_channels 30.0 +90 42 model.input_dropout 0.01739747268653974 +90 42 model.output_dropout 0.21141236756636267 +90 42 model.feature_map_dropout 0.34510708350980673 +90 42 model.embedding_dim 2.0 +90 42 optimizer.lr 0.01578012093415971 +90 42 negative_sampler.num_negs_per_pos 61.0 +90 42 training.batch_size 2.0 +90 43 model.output_channels 64.0 +90 43 model.input_dropout 0.47010391465515394 +90 43 model.output_dropout 0.367269783613442 +90 43 model.feature_map_dropout 6.935787556233697e-05 +90 43 model.embedding_dim 1.0 +90 43 optimizer.lr 0.004205509492849303 +90 43 negative_sampler.num_negs_per_pos 21.0 +90 43 training.batch_size 1.0 +90 44 model.output_channels 20.0 +90 44 model.input_dropout 0.1330112169237032 +90 44 model.output_dropout 0.27843659623291395 +90 44 model.feature_map_dropout 0.02340015937915213 +90 44 model.embedding_dim 0.0 +90 44 optimizer.lr 0.0017824729633908614 +90 44 negative_sampler.num_negs_per_pos 58.0 +90 44 training.batch_size 2.0 +90 45 model.output_channels 36.0 +90 45 model.input_dropout 0.14768658453394057 +90 45 model.output_dropout 0.25112298378250814 +90 45 model.feature_map_dropout 0.14457585703126347 +90 45 model.embedding_dim 0.0 +90 45 optimizer.lr 0.04282580688123255 +90 45 negative_sampler.num_negs_per_pos 5.0 +90 45 training.batch_size 0.0 +90 46 model.output_channels 40.0 +90 46 model.input_dropout 0.05929808956459959 +90 46 model.output_dropout 0.3727556082780214 +90 46 model.feature_map_dropout 0.4976088262824228 +90 46 model.embedding_dim 0.0 +90 46 optimizer.lr 0.0027034249852447676 +90 46 negative_sampler.num_negs_per_pos 77.0 +90 46 training.batch_size 1.0 +90 47 model.output_channels 18.0 +90 47 model.input_dropout 0.36585399628636545 +90 47 model.output_dropout 0.48904192359699133 +90 47 model.feature_map_dropout 0.13078610625541565 +90 47 model.embedding_dim 0.0 +90 47 optimizer.lr 0.0013202000196241453 +90 47 negative_sampler.num_negs_per_pos 99.0 +90 47 training.batch_size 0.0 +90 48 model.output_channels 63.0 +90 48 model.input_dropout 0.4674811726691211 +90 48 model.output_dropout 0.0817372285011182 +90 48 model.feature_map_dropout 0.45820561944937055 +90 48 model.embedding_dim 0.0 +90 48 optimizer.lr 0.0010535210501214833 +90 48 negative_sampler.num_negs_per_pos 93.0 +90 48 training.batch_size 0.0 +90 49 model.output_channels 34.0 +90 49 model.input_dropout 0.06414567919212799 +90 49 model.output_dropout 0.3647142639059179 +90 49 model.feature_map_dropout 0.42484062836304676 +90 49 model.embedding_dim 2.0 +90 49 optimizer.lr 0.0010467159940015464 +90 49 negative_sampler.num_negs_per_pos 58.0 +90 49 training.batch_size 1.0 +90 50 model.output_channels 24.0 +90 50 model.input_dropout 0.12098482999618904 +90 50 model.output_dropout 0.45482571757161194 +90 50 model.feature_map_dropout 0.37750293089143955 +90 50 model.embedding_dim 2.0 +90 50 optimizer.lr 0.017550369934610864 +90 50 negative_sampler.num_negs_per_pos 25.0 +90 50 training.batch_size 0.0 +90 51 model.output_channels 55.0 +90 51 model.input_dropout 0.034530235550627786 +90 51 model.output_dropout 0.010109430761315819 +90 51 model.feature_map_dropout 0.4522436249023324 +90 51 model.embedding_dim 0.0 +90 51 optimizer.lr 0.015281531155255297 +90 51 negative_sampler.num_negs_per_pos 79.0 +90 51 training.batch_size 2.0 +90 52 model.output_channels 62.0 +90 52 model.input_dropout 0.14793409660250378 +90 52 model.output_dropout 0.06608084874295256 +90 52 model.feature_map_dropout 0.4544559599714554 +90 52 model.embedding_dim 2.0 +90 52 optimizer.lr 0.045533625870823595 +90 52 negative_sampler.num_negs_per_pos 45.0 +90 52 training.batch_size 2.0 +90 53 model.output_channels 63.0 +90 53 model.input_dropout 0.4074401426687496 +90 53 model.output_dropout 0.3956613284173754 +90 53 model.feature_map_dropout 0.030132618995990734 +90 53 model.embedding_dim 1.0 +90 53 optimizer.lr 0.0013035013372594008 +90 53 negative_sampler.num_negs_per_pos 26.0 +90 53 training.batch_size 2.0 +90 54 model.output_channels 36.0 +90 54 model.input_dropout 0.00044818425938125683 +90 54 model.output_dropout 0.3351821093861559 +90 54 model.feature_map_dropout 0.47574105591952237 +90 54 model.embedding_dim 1.0 +90 54 optimizer.lr 0.0647726489925762 +90 54 negative_sampler.num_negs_per_pos 99.0 +90 54 training.batch_size 1.0 +90 55 model.output_channels 30.0 +90 55 model.input_dropout 0.45390192948739966 +90 55 model.output_dropout 0.21466944822282674 +90 55 model.feature_map_dropout 0.2523204317695933 +90 55 model.embedding_dim 0.0 +90 55 optimizer.lr 0.025043878581882215 +90 55 negative_sampler.num_negs_per_pos 46.0 +90 55 training.batch_size 2.0 +90 56 model.output_channels 50.0 +90 56 model.input_dropout 0.4787333894768075 +90 56 model.output_dropout 0.33356691775033626 +90 56 model.feature_map_dropout 0.22465472075331422 +90 56 model.embedding_dim 1.0 +90 56 optimizer.lr 0.0014279289863363908 +90 56 negative_sampler.num_negs_per_pos 41.0 +90 56 training.batch_size 2.0 +90 57 model.output_channels 19.0 +90 57 model.input_dropout 0.25439493938436614 +90 57 model.output_dropout 0.1887538102250219 +90 57 model.feature_map_dropout 0.09436394178334406 +90 57 model.embedding_dim 0.0 +90 57 optimizer.lr 0.0010642873506252002 +90 57 negative_sampler.num_negs_per_pos 40.0 +90 57 training.batch_size 1.0 +90 58 model.output_channels 23.0 +90 58 model.input_dropout 0.24882290336959273 +90 58 model.output_dropout 0.4891362658237651 +90 58 model.feature_map_dropout 0.42889314250248595 +90 58 model.embedding_dim 2.0 +90 58 optimizer.lr 0.013498744698449026 +90 58 negative_sampler.num_negs_per_pos 18.0 +90 58 training.batch_size 0.0 +90 59 model.output_channels 41.0 +90 59 model.input_dropout 0.17057303114385208 +90 59 model.output_dropout 0.23301818965568027 +90 59 model.feature_map_dropout 0.36454181067688274 +90 59 model.embedding_dim 0.0 +90 59 optimizer.lr 0.004200254018841901 +90 59 negative_sampler.num_negs_per_pos 25.0 +90 59 training.batch_size 2.0 +90 60 model.output_channels 18.0 +90 60 model.input_dropout 0.2452633048472706 +90 60 model.output_dropout 0.3031183790484381 +90 60 model.feature_map_dropout 0.06504881500052007 +90 60 model.embedding_dim 0.0 +90 60 optimizer.lr 0.040613052444903375 +90 60 negative_sampler.num_negs_per_pos 63.0 +90 60 training.batch_size 2.0 +90 61 model.output_channels 22.0 +90 61 model.input_dropout 0.11341866946952284 +90 61 model.output_dropout 0.047897431306753335 +90 61 model.feature_map_dropout 0.14245074321097045 +90 61 model.embedding_dim 2.0 +90 61 optimizer.lr 0.0010840083345360263 +90 61 negative_sampler.num_negs_per_pos 80.0 +90 61 training.batch_size 0.0 +90 62 model.output_channels 51.0 +90 62 model.input_dropout 0.1481583059916326 +90 62 model.output_dropout 0.3768077413185608 +90 62 model.feature_map_dropout 0.11304972191612461 +90 62 model.embedding_dim 0.0 +90 62 optimizer.lr 0.059461520219705015 +90 62 negative_sampler.num_negs_per_pos 29.0 +90 62 training.batch_size 0.0 +90 63 model.output_channels 47.0 +90 63 model.input_dropout 0.48831826797258715 +90 63 model.output_dropout 0.3008150112955384 +90 63 model.feature_map_dropout 0.49065338969077177 +90 63 model.embedding_dim 0.0 +90 63 optimizer.lr 0.008424477131545661 +90 63 negative_sampler.num_negs_per_pos 60.0 +90 63 training.batch_size 1.0 +90 64 model.output_channels 51.0 +90 64 model.input_dropout 0.15508902944029435 +90 64 model.output_dropout 0.41777889356983394 +90 64 model.feature_map_dropout 0.1729264600458365 +90 64 model.embedding_dim 1.0 +90 64 optimizer.lr 0.055339571260909756 +90 64 negative_sampler.num_negs_per_pos 16.0 +90 64 training.batch_size 0.0 +90 65 model.output_channels 48.0 +90 65 model.input_dropout 0.46863066663331454 +90 65 model.output_dropout 0.25456629035447365 +90 65 model.feature_map_dropout 0.14268265344682146 +90 65 model.embedding_dim 1.0 +90 65 optimizer.lr 0.008034377901492086 +90 65 negative_sampler.num_negs_per_pos 11.0 +90 65 training.batch_size 1.0 +90 66 model.output_channels 54.0 +90 66 model.input_dropout 0.12125413399497276 +90 66 model.output_dropout 0.32205771910834535 +90 66 model.feature_map_dropout 0.1053831290592771 +90 66 model.embedding_dim 0.0 +90 66 optimizer.lr 0.002018165656654229 +90 66 negative_sampler.num_negs_per_pos 58.0 +90 66 training.batch_size 2.0 +90 67 model.output_channels 28.0 +90 67 model.input_dropout 0.23663951089953084 +90 67 model.output_dropout 0.49895070770810895 +90 67 model.feature_map_dropout 0.24296670137310616 +90 67 model.embedding_dim 0.0 +90 67 optimizer.lr 0.001048814263056457 +90 67 negative_sampler.num_negs_per_pos 47.0 +90 67 training.batch_size 0.0 +90 68 model.output_channels 42.0 +90 68 model.input_dropout 0.42139046112401624 +90 68 model.output_dropout 0.36551799163295834 +90 68 model.feature_map_dropout 0.3220564530684162 +90 68 model.embedding_dim 2.0 +90 68 optimizer.lr 0.04892572603425938 +90 68 negative_sampler.num_negs_per_pos 42.0 +90 68 training.batch_size 1.0 +90 69 model.output_channels 40.0 +90 69 model.input_dropout 0.20569665094238065 +90 69 model.output_dropout 0.15714142846847412 +90 69 model.feature_map_dropout 0.3026302691684797 +90 69 model.embedding_dim 0.0 +90 69 optimizer.lr 0.052513177094779144 +90 69 negative_sampler.num_negs_per_pos 73.0 +90 69 training.batch_size 1.0 +90 70 model.output_channels 33.0 +90 70 model.input_dropout 0.1973396603784257 +90 70 model.output_dropout 0.12339957029841303 +90 70 model.feature_map_dropout 0.21202542754898668 +90 70 model.embedding_dim 0.0 +90 70 optimizer.lr 0.005395742988268586 +90 70 negative_sampler.num_negs_per_pos 1.0 +90 70 training.batch_size 0.0 +90 71 model.output_channels 44.0 +90 71 model.input_dropout 0.23157395603621822 +90 71 model.output_dropout 0.2878213699351863 +90 71 model.feature_map_dropout 0.4596025170742962 +90 71 model.embedding_dim 0.0 +90 71 optimizer.lr 0.016247365296233888 +90 71 negative_sampler.num_negs_per_pos 9.0 +90 71 training.batch_size 0.0 +90 72 model.output_channels 25.0 +90 72 model.input_dropout 0.16971223158534832 +90 72 model.output_dropout 0.03773443771662033 +90 72 model.feature_map_dropout 0.006970731271424058 +90 72 model.embedding_dim 1.0 +90 72 optimizer.lr 0.003337447742982309 +90 72 negative_sampler.num_negs_per_pos 61.0 +90 72 training.batch_size 1.0 +90 73 model.output_channels 59.0 +90 73 model.input_dropout 0.4997333944573024 +90 73 model.output_dropout 0.27136191102371826 +90 73 model.feature_map_dropout 0.4648359053961538 +90 73 model.embedding_dim 1.0 +90 73 optimizer.lr 0.002176256333574455 +90 73 negative_sampler.num_negs_per_pos 2.0 +90 73 training.batch_size 1.0 +90 74 model.output_channels 56.0 +90 74 model.input_dropout 0.38371279623474785 +90 74 model.output_dropout 0.4726453669510954 +90 74 model.feature_map_dropout 0.13355559975093884 +90 74 model.embedding_dim 1.0 +90 74 optimizer.lr 0.00297785079644732 +90 74 negative_sampler.num_negs_per_pos 55.0 +90 74 training.batch_size 1.0 +90 75 model.output_channels 64.0 +90 75 model.input_dropout 0.01705533767061357 +90 75 model.output_dropout 0.3145975695948033 +90 75 model.feature_map_dropout 0.40634005377564647 +90 75 model.embedding_dim 0.0 +90 75 optimizer.lr 0.02230945056976787 +90 75 negative_sampler.num_negs_per_pos 43.0 +90 75 training.batch_size 2.0 +90 76 model.output_channels 45.0 +90 76 model.input_dropout 0.04307384238841255 +90 76 model.output_dropout 0.013214719253431373 +90 76 model.feature_map_dropout 0.23321966555275625 +90 76 model.embedding_dim 2.0 +90 76 optimizer.lr 0.015561452286032952 +90 76 negative_sampler.num_negs_per_pos 22.0 +90 76 training.batch_size 0.0 +90 77 model.output_channels 48.0 +90 77 model.input_dropout 0.1883644142742858 +90 77 model.output_dropout 0.29348402937498397 +90 77 model.feature_map_dropout 0.0991792650423613 +90 77 model.embedding_dim 0.0 +90 77 optimizer.lr 0.027665858346800718 +90 77 negative_sampler.num_negs_per_pos 65.0 +90 77 training.batch_size 0.0 +90 78 model.output_channels 23.0 +90 78 model.input_dropout 0.31343068428321 +90 78 model.output_dropout 0.047756713168586096 +90 78 model.feature_map_dropout 0.23830715742498698 +90 78 model.embedding_dim 2.0 +90 78 optimizer.lr 0.001029447786978896 +90 78 negative_sampler.num_negs_per_pos 97.0 +90 78 training.batch_size 2.0 +90 79 model.output_channels 49.0 +90 79 model.input_dropout 0.41509609392599595 +90 79 model.output_dropout 0.049497965264428734 +90 79 model.feature_map_dropout 0.33778078933592665 +90 79 model.embedding_dim 2.0 +90 79 optimizer.lr 0.003515228897388779 +90 79 negative_sampler.num_negs_per_pos 85.0 +90 79 training.batch_size 2.0 +90 80 model.output_channels 33.0 +90 80 model.input_dropout 0.16858788073843728 +90 80 model.output_dropout 0.4866197168160556 +90 80 model.feature_map_dropout 0.06063297418637509 +90 80 model.embedding_dim 1.0 +90 80 optimizer.lr 0.052725909722099173 +90 80 negative_sampler.num_negs_per_pos 23.0 +90 80 training.batch_size 1.0 +90 81 model.output_channels 58.0 +90 81 model.input_dropout 0.03261768089631811 +90 81 model.output_dropout 0.41022996214486634 +90 81 model.feature_map_dropout 0.25688532247936746 +90 81 model.embedding_dim 1.0 +90 81 optimizer.lr 0.07201208595228457 +90 81 negative_sampler.num_negs_per_pos 80.0 +90 81 training.batch_size 1.0 +90 82 model.output_channels 24.0 +90 82 model.input_dropout 0.4996525238876248 +90 82 model.output_dropout 0.18436445171722798 +90 82 model.feature_map_dropout 0.0474294885103565 +90 82 model.embedding_dim 0.0 +90 82 optimizer.lr 0.001547550088418484 +90 82 negative_sampler.num_negs_per_pos 68.0 +90 82 training.batch_size 0.0 +90 83 model.output_channels 30.0 +90 83 model.input_dropout 0.3205588531317295 +90 83 model.output_dropout 0.4530024166320496 +90 83 model.feature_map_dropout 0.3043244329334405 +90 83 model.embedding_dim 2.0 +90 83 optimizer.lr 0.025274707600455377 +90 83 negative_sampler.num_negs_per_pos 76.0 +90 83 training.batch_size 1.0 +90 84 model.output_channels 61.0 +90 84 model.input_dropout 0.1804866482793177 +90 84 model.output_dropout 0.2932044995441707 +90 84 model.feature_map_dropout 0.07456662556084509 +90 84 model.embedding_dim 1.0 +90 84 optimizer.lr 0.001029867363754327 +90 84 negative_sampler.num_negs_per_pos 56.0 +90 84 training.batch_size 0.0 +90 85 model.output_channels 26.0 +90 85 model.input_dropout 0.4512799178326657 +90 85 model.output_dropout 0.21847697269802363 +90 85 model.feature_map_dropout 0.44830223210099496 +90 85 model.embedding_dim 1.0 +90 85 optimizer.lr 0.0076546584500297495 +90 85 negative_sampler.num_negs_per_pos 65.0 +90 85 training.batch_size 2.0 +90 86 model.output_channels 52.0 +90 86 model.input_dropout 0.48274787114372525 +90 86 model.output_dropout 0.23544340273521475 +90 86 model.feature_map_dropout 0.3085819633625612 +90 86 model.embedding_dim 0.0 +90 86 optimizer.lr 0.07955506919828918 +90 86 negative_sampler.num_negs_per_pos 41.0 +90 86 training.batch_size 1.0 +90 87 model.output_channels 43.0 +90 87 model.input_dropout 0.3705859146379232 +90 87 model.output_dropout 0.01603656802270892 +90 87 model.feature_map_dropout 0.4470601408282047 +90 87 model.embedding_dim 1.0 +90 87 optimizer.lr 0.004776818945289665 +90 87 negative_sampler.num_negs_per_pos 60.0 +90 87 training.batch_size 2.0 +90 88 model.output_channels 61.0 +90 88 model.input_dropout 0.36605287063692893 +90 88 model.output_dropout 0.003737626972015351 +90 88 model.feature_map_dropout 0.13954869839652329 +90 88 model.embedding_dim 1.0 +90 88 optimizer.lr 0.0029059033156893315 +90 88 negative_sampler.num_negs_per_pos 7.0 +90 88 training.batch_size 0.0 +90 89 model.output_channels 29.0 +90 89 model.input_dropout 0.3821318316767535 +90 89 model.output_dropout 0.4108459138989309 +90 89 model.feature_map_dropout 0.3949550276647503 +90 89 model.embedding_dim 1.0 +90 89 optimizer.lr 0.003074734374269313 +90 89 negative_sampler.num_negs_per_pos 73.0 +90 89 training.batch_size 2.0 +90 90 model.output_channels 38.0 +90 90 model.input_dropout 0.44522995246114305 +90 90 model.output_dropout 0.31609707387047525 +90 90 model.feature_map_dropout 0.3967865912293262 +90 90 model.embedding_dim 0.0 +90 90 optimizer.lr 0.006653519858687085 +90 90 negative_sampler.num_negs_per_pos 50.0 +90 90 training.batch_size 2.0 +90 91 model.output_channels 22.0 +90 91 model.input_dropout 0.010005454487194088 +90 91 model.output_dropout 0.10922905031146457 +90 91 model.feature_map_dropout 0.41788295820360905 +90 91 model.embedding_dim 2.0 +90 91 optimizer.lr 0.05571290375038021 +90 91 negative_sampler.num_negs_per_pos 70.0 +90 91 training.batch_size 2.0 +90 92 model.output_channels 41.0 +90 92 model.input_dropout 0.3506424532582015 +90 92 model.output_dropout 0.08492556928994571 +90 92 model.feature_map_dropout 0.49331481238419306 +90 92 model.embedding_dim 1.0 +90 92 optimizer.lr 0.00418070199600239 +90 92 negative_sampler.num_negs_per_pos 71.0 +90 92 training.batch_size 2.0 +90 93 model.output_channels 57.0 +90 93 model.input_dropout 0.2757374654222152 +90 93 model.output_dropout 0.058042343671527 +90 93 model.feature_map_dropout 0.12472682291134923 +90 93 model.embedding_dim 0.0 +90 93 optimizer.lr 0.031985716916738634 +90 93 negative_sampler.num_negs_per_pos 76.0 +90 93 training.batch_size 0.0 +90 94 model.output_channels 31.0 +90 94 model.input_dropout 0.17161472304561043 +90 94 model.output_dropout 0.22007524880163132 +90 94 model.feature_map_dropout 0.07515132259669094 +90 94 model.embedding_dim 0.0 +90 94 optimizer.lr 0.005551531594527267 +90 94 negative_sampler.num_negs_per_pos 30.0 +90 94 training.batch_size 0.0 +90 95 model.output_channels 59.0 +90 95 model.input_dropout 0.2621233444556638 +90 95 model.output_dropout 0.4572860508978607 +90 95 model.feature_map_dropout 0.42105196266837347 +90 95 model.embedding_dim 0.0 +90 95 optimizer.lr 0.0018556383995809288 +90 95 negative_sampler.num_negs_per_pos 46.0 +90 95 training.batch_size 1.0 +90 96 model.output_channels 47.0 +90 96 model.input_dropout 0.14990633995961888 +90 96 model.output_dropout 0.2099266817698729 +90 96 model.feature_map_dropout 0.38088922499308353 +90 96 model.embedding_dim 0.0 +90 96 optimizer.lr 0.01821648615626555 +90 96 negative_sampler.num_negs_per_pos 21.0 +90 96 training.batch_size 2.0 +90 97 model.output_channels 64.0 +90 97 model.input_dropout 0.33804064208535833 +90 97 model.output_dropout 0.1988731672916224 +90 97 model.feature_map_dropout 0.48708734785394386 +90 97 model.embedding_dim 2.0 +90 97 optimizer.lr 0.025142089160776343 +90 97 negative_sampler.num_negs_per_pos 28.0 +90 97 training.batch_size 2.0 +90 98 model.output_channels 18.0 +90 98 model.input_dropout 0.05543701336206064 +90 98 model.output_dropout 0.49650518715202596 +90 98 model.feature_map_dropout 0.11851865362335556 +90 98 model.embedding_dim 1.0 +90 98 optimizer.lr 0.002626725290350773 +90 98 negative_sampler.num_negs_per_pos 57.0 +90 98 training.batch_size 0.0 +90 99 model.output_channels 25.0 +90 99 model.input_dropout 0.020648884164485515 +90 99 model.output_dropout 0.476955902933921 +90 99 model.feature_map_dropout 0.44952389331689935 +90 99 model.embedding_dim 1.0 +90 99 optimizer.lr 0.007941113920863175 +90 99 negative_sampler.num_negs_per_pos 85.0 +90 99 training.batch_size 2.0 +90 100 model.output_channels 35.0 +90 100 model.input_dropout 0.163014234052839 +90 100 model.output_dropout 0.4263367903146567 +90 100 model.feature_map_dropout 0.06120836292336357 +90 100 model.embedding_dim 2.0 +90 100 optimizer.lr 0.014578324161777977 +90 100 negative_sampler.num_negs_per_pos 81.0 +90 100 training.batch_size 0.0 +90 1 dataset """kinships""" +90 1 model """conve""" +90 1 loss """bceaftersigmoid""" +90 1 regularizer """no""" +90 1 optimizer """adam""" +90 1 training_loop """owa""" +90 1 negative_sampler """basic""" +90 1 evaluator """rankbased""" +90 2 dataset """kinships""" +90 2 model """conve""" +90 2 loss """bceaftersigmoid""" +90 2 regularizer """no""" +90 2 optimizer """adam""" +90 2 training_loop """owa""" +90 2 negative_sampler """basic""" +90 2 evaluator """rankbased""" +90 3 dataset """kinships""" +90 3 model """conve""" +90 3 loss """bceaftersigmoid""" +90 3 regularizer """no""" +90 3 optimizer """adam""" +90 3 training_loop """owa""" +90 3 negative_sampler """basic""" +90 3 evaluator """rankbased""" +90 4 dataset """kinships""" +90 4 model """conve""" +90 4 loss """bceaftersigmoid""" +90 4 regularizer """no""" +90 4 optimizer """adam""" +90 4 training_loop """owa""" +90 4 negative_sampler """basic""" +90 4 evaluator """rankbased""" +90 5 dataset """kinships""" +90 5 model """conve""" +90 5 loss """bceaftersigmoid""" +90 5 regularizer """no""" +90 5 optimizer """adam""" +90 5 training_loop """owa""" +90 5 negative_sampler """basic""" +90 5 evaluator """rankbased""" +90 6 dataset """kinships""" +90 6 model """conve""" +90 6 loss """bceaftersigmoid""" +90 6 regularizer """no""" +90 6 optimizer """adam""" +90 6 training_loop """owa""" +90 6 negative_sampler """basic""" +90 6 evaluator """rankbased""" +90 7 dataset """kinships""" +90 7 model """conve""" +90 7 loss """bceaftersigmoid""" +90 7 regularizer """no""" +90 7 optimizer """adam""" +90 7 training_loop """owa""" +90 7 negative_sampler """basic""" +90 7 evaluator """rankbased""" +90 8 dataset """kinships""" +90 8 model """conve""" +90 8 loss """bceaftersigmoid""" +90 8 regularizer """no""" +90 8 optimizer """adam""" +90 8 training_loop """owa""" +90 8 negative_sampler """basic""" +90 8 evaluator """rankbased""" +90 9 dataset """kinships""" +90 9 model """conve""" +90 9 loss """bceaftersigmoid""" +90 9 regularizer """no""" +90 9 optimizer """adam""" +90 9 training_loop """owa""" +90 9 negative_sampler """basic""" +90 9 evaluator """rankbased""" +90 10 dataset """kinships""" +90 10 model """conve""" +90 10 loss """bceaftersigmoid""" +90 10 regularizer """no""" +90 10 optimizer """adam""" +90 10 training_loop """owa""" +90 10 negative_sampler """basic""" +90 10 evaluator """rankbased""" +90 11 dataset """kinships""" +90 11 model """conve""" +90 11 loss """bceaftersigmoid""" +90 11 regularizer """no""" +90 11 optimizer """adam""" +90 11 training_loop """owa""" +90 11 negative_sampler """basic""" +90 11 evaluator """rankbased""" +90 12 dataset """kinships""" +90 12 model """conve""" +90 12 loss """bceaftersigmoid""" +90 12 regularizer """no""" +90 12 optimizer """adam""" +90 12 training_loop """owa""" +90 12 negative_sampler """basic""" +90 12 evaluator """rankbased""" +90 13 dataset """kinships""" +90 13 model """conve""" +90 13 loss """bceaftersigmoid""" +90 13 regularizer """no""" +90 13 optimizer """adam""" +90 13 training_loop """owa""" +90 13 negative_sampler """basic""" +90 13 evaluator """rankbased""" +90 14 dataset """kinships""" +90 14 model """conve""" +90 14 loss """bceaftersigmoid""" +90 14 regularizer """no""" +90 14 optimizer """adam""" +90 14 training_loop """owa""" +90 14 negative_sampler """basic""" +90 14 evaluator """rankbased""" +90 15 dataset """kinships""" +90 15 model """conve""" +90 15 loss """bceaftersigmoid""" +90 15 regularizer """no""" +90 15 optimizer """adam""" +90 15 training_loop """owa""" +90 15 negative_sampler """basic""" +90 15 evaluator """rankbased""" +90 16 dataset """kinships""" +90 16 model """conve""" +90 16 loss """bceaftersigmoid""" +90 16 regularizer """no""" +90 16 optimizer """adam""" +90 16 training_loop """owa""" +90 16 negative_sampler """basic""" +90 16 evaluator """rankbased""" +90 17 dataset """kinships""" +90 17 model """conve""" +90 17 loss """bceaftersigmoid""" +90 17 regularizer """no""" +90 17 optimizer """adam""" +90 17 training_loop """owa""" +90 17 negative_sampler """basic""" +90 17 evaluator """rankbased""" +90 18 dataset """kinships""" +90 18 model """conve""" +90 18 loss """bceaftersigmoid""" +90 18 regularizer """no""" +90 18 optimizer """adam""" +90 18 training_loop """owa""" +90 18 negative_sampler """basic""" +90 18 evaluator """rankbased""" +90 19 dataset """kinships""" +90 19 model """conve""" +90 19 loss """bceaftersigmoid""" +90 19 regularizer """no""" +90 19 optimizer """adam""" +90 19 training_loop """owa""" +90 19 negative_sampler """basic""" +90 19 evaluator """rankbased""" +90 20 dataset """kinships""" +90 20 model """conve""" +90 20 loss """bceaftersigmoid""" +90 20 regularizer """no""" +90 20 optimizer """adam""" +90 20 training_loop """owa""" +90 20 negative_sampler """basic""" +90 20 evaluator """rankbased""" +90 21 dataset """kinships""" +90 21 model """conve""" +90 21 loss """bceaftersigmoid""" +90 21 regularizer """no""" +90 21 optimizer """adam""" +90 21 training_loop """owa""" +90 21 negative_sampler """basic""" +90 21 evaluator """rankbased""" +90 22 dataset """kinships""" +90 22 model """conve""" +90 22 loss """bceaftersigmoid""" +90 22 regularizer """no""" +90 22 optimizer """adam""" +90 22 training_loop """owa""" +90 22 negative_sampler """basic""" +90 22 evaluator """rankbased""" +90 23 dataset """kinships""" +90 23 model """conve""" +90 23 loss """bceaftersigmoid""" +90 23 regularizer """no""" +90 23 optimizer """adam""" +90 23 training_loop """owa""" +90 23 negative_sampler """basic""" +90 23 evaluator """rankbased""" +90 24 dataset """kinships""" +90 24 model """conve""" +90 24 loss """bceaftersigmoid""" +90 24 regularizer """no""" +90 24 optimizer """adam""" +90 24 training_loop """owa""" +90 24 negative_sampler """basic""" +90 24 evaluator """rankbased""" +90 25 dataset """kinships""" +90 25 model """conve""" +90 25 loss """bceaftersigmoid""" +90 25 regularizer """no""" +90 25 optimizer """adam""" +90 25 training_loop """owa""" +90 25 negative_sampler """basic""" +90 25 evaluator """rankbased""" +90 26 dataset """kinships""" +90 26 model """conve""" +90 26 loss """bceaftersigmoid""" +90 26 regularizer """no""" +90 26 optimizer """adam""" +90 26 training_loop """owa""" +90 26 negative_sampler """basic""" +90 26 evaluator """rankbased""" +90 27 dataset """kinships""" +90 27 model """conve""" +90 27 loss """bceaftersigmoid""" +90 27 regularizer """no""" +90 27 optimizer """adam""" +90 27 training_loop """owa""" +90 27 negative_sampler """basic""" +90 27 evaluator """rankbased""" +90 28 dataset """kinships""" +90 28 model """conve""" +90 28 loss """bceaftersigmoid""" +90 28 regularizer """no""" +90 28 optimizer """adam""" +90 28 training_loop """owa""" +90 28 negative_sampler """basic""" +90 28 evaluator """rankbased""" +90 29 dataset """kinships""" +90 29 model """conve""" +90 29 loss """bceaftersigmoid""" +90 29 regularizer """no""" +90 29 optimizer """adam""" +90 29 training_loop """owa""" +90 29 negative_sampler """basic""" +90 29 evaluator """rankbased""" +90 30 dataset """kinships""" +90 30 model """conve""" +90 30 loss """bceaftersigmoid""" +90 30 regularizer """no""" +90 30 optimizer """adam""" +90 30 training_loop """owa""" +90 30 negative_sampler """basic""" +90 30 evaluator """rankbased""" +90 31 dataset """kinships""" +90 31 model """conve""" +90 31 loss """bceaftersigmoid""" +90 31 regularizer """no""" +90 31 optimizer """adam""" +90 31 training_loop """owa""" +90 31 negative_sampler """basic""" +90 31 evaluator """rankbased""" +90 32 dataset """kinships""" +90 32 model """conve""" +90 32 loss """bceaftersigmoid""" +90 32 regularizer """no""" +90 32 optimizer """adam""" +90 32 training_loop """owa""" +90 32 negative_sampler """basic""" +90 32 evaluator """rankbased""" +90 33 dataset """kinships""" +90 33 model """conve""" +90 33 loss """bceaftersigmoid""" +90 33 regularizer """no""" +90 33 optimizer """adam""" +90 33 training_loop """owa""" +90 33 negative_sampler """basic""" +90 33 evaluator """rankbased""" +90 34 dataset """kinships""" +90 34 model """conve""" +90 34 loss """bceaftersigmoid""" +90 34 regularizer """no""" +90 34 optimizer """adam""" +90 34 training_loop """owa""" +90 34 negative_sampler """basic""" +90 34 evaluator """rankbased""" +90 35 dataset """kinships""" +90 35 model """conve""" +90 35 loss """bceaftersigmoid""" +90 35 regularizer """no""" +90 35 optimizer """adam""" +90 35 training_loop """owa""" +90 35 negative_sampler """basic""" +90 35 evaluator """rankbased""" +90 36 dataset """kinships""" +90 36 model """conve""" +90 36 loss """bceaftersigmoid""" +90 36 regularizer """no""" +90 36 optimizer """adam""" +90 36 training_loop """owa""" +90 36 negative_sampler """basic""" +90 36 evaluator """rankbased""" +90 37 dataset """kinships""" +90 37 model """conve""" +90 37 loss """bceaftersigmoid""" +90 37 regularizer """no""" +90 37 optimizer """adam""" +90 37 training_loop """owa""" +90 37 negative_sampler """basic""" +90 37 evaluator """rankbased""" +90 38 dataset """kinships""" +90 38 model """conve""" +90 38 loss """bceaftersigmoid""" +90 38 regularizer """no""" +90 38 optimizer """adam""" +90 38 training_loop """owa""" +90 38 negative_sampler """basic""" +90 38 evaluator """rankbased""" +90 39 dataset """kinships""" +90 39 model """conve""" +90 39 loss """bceaftersigmoid""" +90 39 regularizer """no""" +90 39 optimizer """adam""" +90 39 training_loop """owa""" +90 39 negative_sampler """basic""" +90 39 evaluator """rankbased""" +90 40 dataset """kinships""" +90 40 model """conve""" +90 40 loss """bceaftersigmoid""" +90 40 regularizer """no""" +90 40 optimizer """adam""" +90 40 training_loop """owa""" +90 40 negative_sampler """basic""" +90 40 evaluator """rankbased""" +90 41 dataset """kinships""" +90 41 model """conve""" +90 41 loss """bceaftersigmoid""" +90 41 regularizer """no""" +90 41 optimizer """adam""" +90 41 training_loop """owa""" +90 41 negative_sampler """basic""" +90 41 evaluator """rankbased""" +90 42 dataset """kinships""" +90 42 model """conve""" +90 42 loss """bceaftersigmoid""" +90 42 regularizer """no""" +90 42 optimizer """adam""" +90 42 training_loop """owa""" +90 42 negative_sampler """basic""" +90 42 evaluator """rankbased""" +90 43 dataset """kinships""" +90 43 model """conve""" +90 43 loss """bceaftersigmoid""" +90 43 regularizer """no""" +90 43 optimizer """adam""" +90 43 training_loop """owa""" +90 43 negative_sampler """basic""" +90 43 evaluator """rankbased""" +90 44 dataset """kinships""" +90 44 model """conve""" +90 44 loss """bceaftersigmoid""" +90 44 regularizer """no""" +90 44 optimizer """adam""" +90 44 training_loop """owa""" +90 44 negative_sampler """basic""" +90 44 evaluator """rankbased""" +90 45 dataset """kinships""" +90 45 model """conve""" +90 45 loss """bceaftersigmoid""" +90 45 regularizer """no""" +90 45 optimizer """adam""" +90 45 training_loop """owa""" +90 45 negative_sampler """basic""" +90 45 evaluator """rankbased""" +90 46 dataset """kinships""" +90 46 model """conve""" +90 46 loss """bceaftersigmoid""" +90 46 regularizer """no""" +90 46 optimizer """adam""" +90 46 training_loop """owa""" +90 46 negative_sampler """basic""" +90 46 evaluator """rankbased""" +90 47 dataset """kinships""" +90 47 model """conve""" +90 47 loss """bceaftersigmoid""" +90 47 regularizer """no""" +90 47 optimizer """adam""" +90 47 training_loop """owa""" +90 47 negative_sampler """basic""" +90 47 evaluator """rankbased""" +90 48 dataset """kinships""" +90 48 model """conve""" +90 48 loss """bceaftersigmoid""" +90 48 regularizer """no""" +90 48 optimizer """adam""" +90 48 training_loop """owa""" +90 48 negative_sampler """basic""" +90 48 evaluator """rankbased""" +90 49 dataset """kinships""" +90 49 model """conve""" +90 49 loss """bceaftersigmoid""" +90 49 regularizer """no""" +90 49 optimizer """adam""" +90 49 training_loop """owa""" +90 49 negative_sampler """basic""" +90 49 evaluator """rankbased""" +90 50 dataset """kinships""" +90 50 model """conve""" +90 50 loss """bceaftersigmoid""" +90 50 regularizer """no""" +90 50 optimizer """adam""" +90 50 training_loop """owa""" +90 50 negative_sampler """basic""" +90 50 evaluator """rankbased""" +90 51 dataset """kinships""" +90 51 model """conve""" +90 51 loss """bceaftersigmoid""" +90 51 regularizer """no""" +90 51 optimizer """adam""" +90 51 training_loop """owa""" +90 51 negative_sampler """basic""" +90 51 evaluator """rankbased""" +90 52 dataset """kinships""" +90 52 model """conve""" +90 52 loss """bceaftersigmoid""" +90 52 regularizer """no""" +90 52 optimizer """adam""" +90 52 training_loop """owa""" +90 52 negative_sampler """basic""" +90 52 evaluator """rankbased""" +90 53 dataset """kinships""" +90 53 model """conve""" +90 53 loss """bceaftersigmoid""" +90 53 regularizer """no""" +90 53 optimizer """adam""" +90 53 training_loop """owa""" +90 53 negative_sampler """basic""" +90 53 evaluator """rankbased""" +90 54 dataset """kinships""" +90 54 model """conve""" +90 54 loss """bceaftersigmoid""" +90 54 regularizer """no""" +90 54 optimizer """adam""" +90 54 training_loop """owa""" +90 54 negative_sampler """basic""" +90 54 evaluator """rankbased""" +90 55 dataset """kinships""" +90 55 model """conve""" +90 55 loss """bceaftersigmoid""" +90 55 regularizer """no""" +90 55 optimizer """adam""" +90 55 training_loop """owa""" +90 55 negative_sampler """basic""" +90 55 evaluator """rankbased""" +90 56 dataset """kinships""" +90 56 model """conve""" +90 56 loss """bceaftersigmoid""" +90 56 regularizer """no""" +90 56 optimizer """adam""" +90 56 training_loop """owa""" +90 56 negative_sampler """basic""" +90 56 evaluator """rankbased""" +90 57 dataset """kinships""" +90 57 model """conve""" +90 57 loss """bceaftersigmoid""" +90 57 regularizer """no""" +90 57 optimizer """adam""" +90 57 training_loop """owa""" +90 57 negative_sampler """basic""" +90 57 evaluator """rankbased""" +90 58 dataset """kinships""" +90 58 model """conve""" +90 58 loss """bceaftersigmoid""" +90 58 regularizer """no""" +90 58 optimizer """adam""" +90 58 training_loop """owa""" +90 58 negative_sampler """basic""" +90 58 evaluator """rankbased""" +90 59 dataset """kinships""" +90 59 model """conve""" +90 59 loss """bceaftersigmoid""" +90 59 regularizer """no""" +90 59 optimizer """adam""" +90 59 training_loop """owa""" +90 59 negative_sampler """basic""" +90 59 evaluator """rankbased""" +90 60 dataset """kinships""" +90 60 model """conve""" +90 60 loss """bceaftersigmoid""" +90 60 regularizer """no""" +90 60 optimizer """adam""" +90 60 training_loop """owa""" +90 60 negative_sampler """basic""" +90 60 evaluator """rankbased""" +90 61 dataset """kinships""" +90 61 model """conve""" +90 61 loss """bceaftersigmoid""" +90 61 regularizer """no""" +90 61 optimizer """adam""" +90 61 training_loop """owa""" +90 61 negative_sampler """basic""" +90 61 evaluator """rankbased""" +90 62 dataset """kinships""" +90 62 model """conve""" +90 62 loss """bceaftersigmoid""" +90 62 regularizer """no""" +90 62 optimizer """adam""" +90 62 training_loop """owa""" +90 62 negative_sampler """basic""" +90 62 evaluator """rankbased""" +90 63 dataset """kinships""" +90 63 model """conve""" +90 63 loss """bceaftersigmoid""" +90 63 regularizer """no""" +90 63 optimizer """adam""" +90 63 training_loop """owa""" +90 63 negative_sampler """basic""" +90 63 evaluator """rankbased""" +90 64 dataset """kinships""" +90 64 model """conve""" +90 64 loss """bceaftersigmoid""" +90 64 regularizer """no""" +90 64 optimizer """adam""" +90 64 training_loop """owa""" +90 64 negative_sampler """basic""" +90 64 evaluator """rankbased""" +90 65 dataset """kinships""" +90 65 model """conve""" +90 65 loss """bceaftersigmoid""" +90 65 regularizer """no""" +90 65 optimizer """adam""" +90 65 training_loop """owa""" +90 65 negative_sampler """basic""" +90 65 evaluator """rankbased""" +90 66 dataset """kinships""" +90 66 model """conve""" +90 66 loss """bceaftersigmoid""" +90 66 regularizer """no""" +90 66 optimizer """adam""" +90 66 training_loop """owa""" +90 66 negative_sampler """basic""" +90 66 evaluator """rankbased""" +90 67 dataset """kinships""" +90 67 model """conve""" +90 67 loss """bceaftersigmoid""" +90 67 regularizer """no""" +90 67 optimizer """adam""" +90 67 training_loop """owa""" +90 67 negative_sampler """basic""" +90 67 evaluator """rankbased""" +90 68 dataset """kinships""" +90 68 model """conve""" +90 68 loss """bceaftersigmoid""" +90 68 regularizer """no""" +90 68 optimizer """adam""" +90 68 training_loop """owa""" +90 68 negative_sampler """basic""" +90 68 evaluator """rankbased""" +90 69 dataset """kinships""" +90 69 model """conve""" +90 69 loss """bceaftersigmoid""" +90 69 regularizer """no""" +90 69 optimizer """adam""" +90 69 training_loop """owa""" +90 69 negative_sampler """basic""" +90 69 evaluator """rankbased""" +90 70 dataset """kinships""" +90 70 model """conve""" +90 70 loss """bceaftersigmoid""" +90 70 regularizer """no""" +90 70 optimizer """adam""" +90 70 training_loop """owa""" +90 70 negative_sampler """basic""" +90 70 evaluator """rankbased""" +90 71 dataset """kinships""" +90 71 model """conve""" +90 71 loss """bceaftersigmoid""" +90 71 regularizer """no""" +90 71 optimizer """adam""" +90 71 training_loop """owa""" +90 71 negative_sampler """basic""" +90 71 evaluator """rankbased""" +90 72 dataset """kinships""" +90 72 model """conve""" +90 72 loss """bceaftersigmoid""" +90 72 regularizer """no""" +90 72 optimizer """adam""" +90 72 training_loop """owa""" +90 72 negative_sampler """basic""" +90 72 evaluator """rankbased""" +90 73 dataset """kinships""" +90 73 model """conve""" +90 73 loss """bceaftersigmoid""" +90 73 regularizer """no""" +90 73 optimizer """adam""" +90 73 training_loop """owa""" +90 73 negative_sampler """basic""" +90 73 evaluator """rankbased""" +90 74 dataset """kinships""" +90 74 model """conve""" +90 74 loss """bceaftersigmoid""" +90 74 regularizer """no""" +90 74 optimizer """adam""" +90 74 training_loop """owa""" +90 74 negative_sampler """basic""" +90 74 evaluator """rankbased""" +90 75 dataset """kinships""" +90 75 model """conve""" +90 75 loss """bceaftersigmoid""" +90 75 regularizer """no""" +90 75 optimizer """adam""" +90 75 training_loop """owa""" +90 75 negative_sampler """basic""" +90 75 evaluator """rankbased""" +90 76 dataset """kinships""" +90 76 model """conve""" +90 76 loss """bceaftersigmoid""" +90 76 regularizer """no""" +90 76 optimizer """adam""" +90 76 training_loop """owa""" +90 76 negative_sampler """basic""" +90 76 evaluator """rankbased""" +90 77 dataset """kinships""" +90 77 model """conve""" +90 77 loss """bceaftersigmoid""" +90 77 regularizer """no""" +90 77 optimizer """adam""" +90 77 training_loop """owa""" +90 77 negative_sampler """basic""" +90 77 evaluator """rankbased""" +90 78 dataset """kinships""" +90 78 model """conve""" +90 78 loss """bceaftersigmoid""" +90 78 regularizer """no""" +90 78 optimizer """adam""" +90 78 training_loop """owa""" +90 78 negative_sampler """basic""" +90 78 evaluator """rankbased""" +90 79 dataset """kinships""" +90 79 model """conve""" +90 79 loss """bceaftersigmoid""" +90 79 regularizer """no""" +90 79 optimizer """adam""" +90 79 training_loop """owa""" +90 79 negative_sampler """basic""" +90 79 evaluator """rankbased""" +90 80 dataset """kinships""" +90 80 model """conve""" +90 80 loss """bceaftersigmoid""" +90 80 regularizer """no""" +90 80 optimizer """adam""" +90 80 training_loop """owa""" +90 80 negative_sampler """basic""" +90 80 evaluator """rankbased""" +90 81 dataset """kinships""" +90 81 model """conve""" +90 81 loss """bceaftersigmoid""" +90 81 regularizer """no""" +90 81 optimizer """adam""" +90 81 training_loop """owa""" +90 81 negative_sampler """basic""" +90 81 evaluator """rankbased""" +90 82 dataset """kinships""" +90 82 model """conve""" +90 82 loss """bceaftersigmoid""" +90 82 regularizer """no""" +90 82 optimizer """adam""" +90 82 training_loop """owa""" +90 82 negative_sampler """basic""" +90 82 evaluator """rankbased""" +90 83 dataset """kinships""" +90 83 model """conve""" +90 83 loss """bceaftersigmoid""" +90 83 regularizer """no""" +90 83 optimizer """adam""" +90 83 training_loop """owa""" +90 83 negative_sampler """basic""" +90 83 evaluator """rankbased""" +90 84 dataset """kinships""" +90 84 model """conve""" +90 84 loss """bceaftersigmoid""" +90 84 regularizer """no""" +90 84 optimizer """adam""" +90 84 training_loop """owa""" +90 84 negative_sampler """basic""" +90 84 evaluator """rankbased""" +90 85 dataset """kinships""" +90 85 model """conve""" +90 85 loss """bceaftersigmoid""" +90 85 regularizer """no""" +90 85 optimizer """adam""" +90 85 training_loop """owa""" +90 85 negative_sampler """basic""" +90 85 evaluator """rankbased""" +90 86 dataset """kinships""" +90 86 model """conve""" +90 86 loss """bceaftersigmoid""" +90 86 regularizer """no""" +90 86 optimizer """adam""" +90 86 training_loop """owa""" +90 86 negative_sampler """basic""" +90 86 evaluator """rankbased""" +90 87 dataset """kinships""" +90 87 model """conve""" +90 87 loss """bceaftersigmoid""" +90 87 regularizer """no""" +90 87 optimizer """adam""" +90 87 training_loop """owa""" +90 87 negative_sampler """basic""" +90 87 evaluator """rankbased""" +90 88 dataset """kinships""" +90 88 model """conve""" +90 88 loss """bceaftersigmoid""" +90 88 regularizer """no""" +90 88 optimizer """adam""" +90 88 training_loop """owa""" +90 88 negative_sampler """basic""" +90 88 evaluator """rankbased""" +90 89 dataset """kinships""" +90 89 model """conve""" +90 89 loss """bceaftersigmoid""" +90 89 regularizer """no""" +90 89 optimizer """adam""" +90 89 training_loop """owa""" +90 89 negative_sampler """basic""" +90 89 evaluator """rankbased""" +90 90 dataset """kinships""" +90 90 model """conve""" +90 90 loss """bceaftersigmoid""" +90 90 regularizer """no""" +90 90 optimizer """adam""" +90 90 training_loop """owa""" +90 90 negative_sampler """basic""" +90 90 evaluator """rankbased""" +90 91 dataset """kinships""" +90 91 model """conve""" +90 91 loss """bceaftersigmoid""" +90 91 regularizer """no""" +90 91 optimizer """adam""" +90 91 training_loop """owa""" +90 91 negative_sampler """basic""" +90 91 evaluator """rankbased""" +90 92 dataset """kinships""" +90 92 model """conve""" +90 92 loss """bceaftersigmoid""" +90 92 regularizer """no""" +90 92 optimizer """adam""" +90 92 training_loop """owa""" +90 92 negative_sampler """basic""" +90 92 evaluator """rankbased""" +90 93 dataset """kinships""" +90 93 model """conve""" +90 93 loss """bceaftersigmoid""" +90 93 regularizer """no""" +90 93 optimizer """adam""" +90 93 training_loop """owa""" +90 93 negative_sampler """basic""" +90 93 evaluator """rankbased""" +90 94 dataset """kinships""" +90 94 model """conve""" +90 94 loss """bceaftersigmoid""" +90 94 regularizer """no""" +90 94 optimizer """adam""" +90 94 training_loop """owa""" +90 94 negative_sampler """basic""" +90 94 evaluator """rankbased""" +90 95 dataset """kinships""" +90 95 model """conve""" +90 95 loss """bceaftersigmoid""" +90 95 regularizer """no""" +90 95 optimizer """adam""" +90 95 training_loop """owa""" +90 95 negative_sampler """basic""" +90 95 evaluator """rankbased""" +90 96 dataset """kinships""" +90 96 model """conve""" +90 96 loss """bceaftersigmoid""" +90 96 regularizer """no""" +90 96 optimizer """adam""" +90 96 training_loop """owa""" +90 96 negative_sampler """basic""" +90 96 evaluator """rankbased""" +90 97 dataset """kinships""" +90 97 model """conve""" +90 97 loss """bceaftersigmoid""" +90 97 regularizer """no""" +90 97 optimizer """adam""" +90 97 training_loop """owa""" +90 97 negative_sampler """basic""" +90 97 evaluator """rankbased""" +90 98 dataset """kinships""" +90 98 model """conve""" +90 98 loss """bceaftersigmoid""" +90 98 regularizer """no""" +90 98 optimizer """adam""" +90 98 training_loop """owa""" +90 98 negative_sampler """basic""" +90 98 evaluator """rankbased""" +90 99 dataset """kinships""" +90 99 model """conve""" +90 99 loss """bceaftersigmoid""" +90 99 regularizer """no""" +90 99 optimizer """adam""" +90 99 training_loop """owa""" +90 99 negative_sampler """basic""" +90 99 evaluator """rankbased""" +90 100 dataset """kinships""" +90 100 model """conve""" +90 100 loss """bceaftersigmoid""" +90 100 regularizer """no""" +90 100 optimizer """adam""" +90 100 training_loop """owa""" +90 100 negative_sampler """basic""" +90 100 evaluator """rankbased""" +91 1 model.output_channels 49.0 +91 1 model.input_dropout 0.28974952835792517 +91 1 model.output_dropout 0.40733193825703823 +91 1 model.feature_map_dropout 0.4577473780980889 +91 1 model.embedding_dim 0.0 +91 1 optimizer.lr 0.0505207785878461 +91 1 negative_sampler.num_negs_per_pos 34.0 +91 1 training.batch_size 1.0 +91 2 model.output_channels 41.0 +91 2 model.input_dropout 0.46768391326111564 +91 2 model.output_dropout 0.4599360225524994 +91 2 model.feature_map_dropout 0.05062164276905373 +91 2 model.embedding_dim 1.0 +91 2 optimizer.lr 0.08319455932137483 +91 2 negative_sampler.num_negs_per_pos 50.0 +91 2 training.batch_size 1.0 +91 3 model.output_channels 35.0 +91 3 model.input_dropout 0.26452535439787445 +91 3 model.output_dropout 0.3772009191227998 +91 3 model.feature_map_dropout 0.17395848902520644 +91 3 model.embedding_dim 0.0 +91 3 optimizer.lr 0.013394386373278645 +91 3 negative_sampler.num_negs_per_pos 39.0 +91 3 training.batch_size 0.0 +91 4 model.output_channels 60.0 +91 4 model.input_dropout 0.06292897522301044 +91 4 model.output_dropout 0.3980653818005752 +91 4 model.feature_map_dropout 0.4847555974032816 +91 4 model.embedding_dim 1.0 +91 4 optimizer.lr 0.012510131974558318 +91 4 negative_sampler.num_negs_per_pos 61.0 +91 4 training.batch_size 0.0 +91 5 model.output_channels 57.0 +91 5 model.input_dropout 0.3703988478014706 +91 5 model.output_dropout 0.3271535462001669 +91 5 model.feature_map_dropout 0.17813942617212886 +91 5 model.embedding_dim 0.0 +91 5 optimizer.lr 0.0016899547228734363 +91 5 negative_sampler.num_negs_per_pos 92.0 +91 5 training.batch_size 1.0 +91 6 model.output_channels 44.0 +91 6 model.input_dropout 0.21636229042434918 +91 6 model.output_dropout 0.1135968668768787 +91 6 model.feature_map_dropout 0.4290108111736091 +91 6 model.embedding_dim 2.0 +91 6 optimizer.lr 0.023411300371425272 +91 6 negative_sampler.num_negs_per_pos 44.0 +91 6 training.batch_size 1.0 +91 7 model.output_channels 49.0 +91 7 model.input_dropout 0.12755561308078817 +91 7 model.output_dropout 0.25431701986779 +91 7 model.feature_map_dropout 0.42671957243864667 +91 7 model.embedding_dim 0.0 +91 7 optimizer.lr 0.005359339400142008 +91 7 negative_sampler.num_negs_per_pos 38.0 +91 7 training.batch_size 0.0 +91 8 model.output_channels 64.0 +91 8 model.input_dropout 0.35939154652374256 +91 8 model.output_dropout 0.23651801882080187 +91 8 model.feature_map_dropout 0.08309298200885601 +91 8 model.embedding_dim 1.0 +91 8 optimizer.lr 0.041590572416833865 +91 8 negative_sampler.num_negs_per_pos 61.0 +91 8 training.batch_size 0.0 +91 9 model.output_channels 47.0 +91 9 model.input_dropout 0.44605324714596745 +91 9 model.output_dropout 0.004869290652251612 +91 9 model.feature_map_dropout 0.2152703468150045 +91 9 model.embedding_dim 0.0 +91 9 optimizer.lr 0.027431745691451466 +91 9 negative_sampler.num_negs_per_pos 53.0 +91 9 training.batch_size 2.0 +91 10 model.output_channels 51.0 +91 10 model.input_dropout 0.05182570907141276 +91 10 model.output_dropout 0.4049623665072224 +91 10 model.feature_map_dropout 0.32453684589512966 +91 10 model.embedding_dim 1.0 +91 10 optimizer.lr 0.0011681882615682422 +91 10 negative_sampler.num_negs_per_pos 24.0 +91 10 training.batch_size 1.0 +91 11 model.output_channels 22.0 +91 11 model.input_dropout 0.2230914090374675 +91 11 model.output_dropout 0.2509462762946177 +91 11 model.feature_map_dropout 0.23281452670830544 +91 11 model.embedding_dim 2.0 +91 11 optimizer.lr 0.010196353296882833 +91 11 negative_sampler.num_negs_per_pos 19.0 +91 11 training.batch_size 2.0 +91 12 model.output_channels 50.0 +91 12 model.input_dropout 0.4307956802702974 +91 12 model.output_dropout 0.2740009368656798 +91 12 model.feature_map_dropout 0.1344201888252904 +91 12 model.embedding_dim 0.0 +91 12 optimizer.lr 0.00368091339756929 +91 12 negative_sampler.num_negs_per_pos 25.0 +91 12 training.batch_size 0.0 +91 13 model.output_channels 54.0 +91 13 model.input_dropout 0.18078937184888316 +91 13 model.output_dropout 0.4167310124356705 +91 13 model.feature_map_dropout 0.2848200492996454 +91 13 model.embedding_dim 2.0 +91 13 optimizer.lr 0.0019078728914422207 +91 13 negative_sampler.num_negs_per_pos 31.0 +91 13 training.batch_size 0.0 +91 14 model.output_channels 17.0 +91 14 model.input_dropout 0.22061053317147683 +91 14 model.output_dropout 0.13871761421518036 +91 14 model.feature_map_dropout 0.11998268617885022 +91 14 model.embedding_dim 1.0 +91 14 optimizer.lr 0.006734521721432735 +91 14 negative_sampler.num_negs_per_pos 57.0 +91 14 training.batch_size 1.0 +91 15 model.output_channels 48.0 +91 15 model.input_dropout 0.3055925976571996 +91 15 model.output_dropout 0.3887214693415743 +91 15 model.feature_map_dropout 0.19525829882215884 +91 15 model.embedding_dim 1.0 +91 15 optimizer.lr 0.0022059747139364285 +91 15 negative_sampler.num_negs_per_pos 25.0 +91 15 training.batch_size 0.0 +91 16 model.output_channels 61.0 +91 16 model.input_dropout 0.2785595446490172 +91 16 model.output_dropout 0.3381132793672875 +91 16 model.feature_map_dropout 0.4744035058589701 +91 16 model.embedding_dim 0.0 +91 16 optimizer.lr 0.023203401188795755 +91 16 negative_sampler.num_negs_per_pos 12.0 +91 16 training.batch_size 2.0 +91 17 model.output_channels 62.0 +91 17 model.input_dropout 0.06138143162403087 +91 17 model.output_dropout 0.3017232939247008 +91 17 model.feature_map_dropout 0.14052055363515437 +91 17 model.embedding_dim 2.0 +91 17 optimizer.lr 0.001038430328916751 +91 17 negative_sampler.num_negs_per_pos 61.0 +91 17 training.batch_size 2.0 +91 18 model.output_channels 38.0 +91 18 model.input_dropout 0.14459521732077624 +91 18 model.output_dropout 0.2238175047521161 +91 18 model.feature_map_dropout 0.18961394639826967 +91 18 model.embedding_dim 0.0 +91 18 optimizer.lr 0.0036340706138131757 +91 18 negative_sampler.num_negs_per_pos 87.0 +91 18 training.batch_size 0.0 +91 19 model.output_channels 40.0 +91 19 model.input_dropout 0.3399030896466125 +91 19 model.output_dropout 0.4434286319674572 +91 19 model.feature_map_dropout 0.4131217517130623 +91 19 model.embedding_dim 0.0 +91 19 optimizer.lr 0.005067174683542423 +91 19 negative_sampler.num_negs_per_pos 26.0 +91 19 training.batch_size 0.0 +91 20 model.output_channels 36.0 +91 20 model.input_dropout 0.07908190732190196 +91 20 model.output_dropout 0.04015015881126799 +91 20 model.feature_map_dropout 0.3110259450799489 +91 20 model.embedding_dim 2.0 +91 20 optimizer.lr 0.004526516456999747 +91 20 negative_sampler.num_negs_per_pos 70.0 +91 20 training.batch_size 2.0 +91 21 model.output_channels 48.0 +91 21 model.input_dropout 0.37513549454996914 +91 21 model.output_dropout 0.36236056650740006 +91 21 model.feature_map_dropout 0.4558778258250689 +91 21 model.embedding_dim 1.0 +91 21 optimizer.lr 0.021957882629363237 +91 21 negative_sampler.num_negs_per_pos 4.0 +91 21 training.batch_size 2.0 +91 22 model.output_channels 30.0 +91 22 model.input_dropout 0.39528726561123106 +91 22 model.output_dropout 0.08678871429656643 +91 22 model.feature_map_dropout 0.08195528946038788 +91 22 model.embedding_dim 1.0 +91 22 optimizer.lr 0.0548570459424596 +91 22 negative_sampler.num_negs_per_pos 19.0 +91 22 training.batch_size 1.0 +91 23 model.output_channels 25.0 +91 23 model.input_dropout 0.47087025768576773 +91 23 model.output_dropout 0.4557104408936566 +91 23 model.feature_map_dropout 0.3774205581181998 +91 23 model.embedding_dim 2.0 +91 23 optimizer.lr 0.04846923377046196 +91 23 negative_sampler.num_negs_per_pos 39.0 +91 23 training.batch_size 1.0 +91 24 model.output_channels 17.0 +91 24 model.input_dropout 0.19289839840547152 +91 24 model.output_dropout 0.29758353607788207 +91 24 model.feature_map_dropout 0.23930258387649334 +91 24 model.embedding_dim 2.0 +91 24 optimizer.lr 0.0014285681298495995 +91 24 negative_sampler.num_negs_per_pos 15.0 +91 24 training.batch_size 1.0 +91 25 model.output_channels 24.0 +91 25 model.input_dropout 0.43149725326524824 +91 25 model.output_dropout 0.25714128694936417 +91 25 model.feature_map_dropout 0.22932762531301176 +91 25 model.embedding_dim 0.0 +91 25 optimizer.lr 0.014928212826372352 +91 25 negative_sampler.num_negs_per_pos 21.0 +91 25 training.batch_size 2.0 +91 26 model.output_channels 18.0 +91 26 model.input_dropout 0.03463301244723066 +91 26 model.output_dropout 0.42544509954701876 +91 26 model.feature_map_dropout 0.49433577778262966 +91 26 model.embedding_dim 1.0 +91 26 optimizer.lr 0.09658577799265113 +91 26 negative_sampler.num_negs_per_pos 21.0 +91 26 training.batch_size 1.0 +91 27 model.output_channels 36.0 +91 27 model.input_dropout 0.2120220230964675 +91 27 model.output_dropout 0.24153072207501552 +91 27 model.feature_map_dropout 0.4566720689686689 +91 27 model.embedding_dim 0.0 +91 27 optimizer.lr 0.036479822768313845 +91 27 negative_sampler.num_negs_per_pos 78.0 +91 27 training.batch_size 1.0 +91 28 model.output_channels 18.0 +91 28 model.input_dropout 0.22178083432094792 +91 28 model.output_dropout 0.4059383917893277 +91 28 model.feature_map_dropout 0.15234317270225817 +91 28 model.embedding_dim 2.0 +91 28 optimizer.lr 0.00896845646974379 +91 28 negative_sampler.num_negs_per_pos 94.0 +91 28 training.batch_size 1.0 +91 29 model.output_channels 27.0 +91 29 model.input_dropout 0.006521390658581094 +91 29 model.output_dropout 0.2676864804404317 +91 29 model.feature_map_dropout 0.4918377454741498 +91 29 model.embedding_dim 2.0 +91 29 optimizer.lr 0.044652480761255144 +91 29 negative_sampler.num_negs_per_pos 49.0 +91 29 training.batch_size 0.0 +91 30 model.output_channels 60.0 +91 30 model.input_dropout 0.4226870783449093 +91 30 model.output_dropout 0.27147924819012625 +91 30 model.feature_map_dropout 0.025995921228303798 +91 30 model.embedding_dim 2.0 +91 30 optimizer.lr 0.0031867863936185146 +91 30 negative_sampler.num_negs_per_pos 46.0 +91 30 training.batch_size 2.0 +91 31 model.output_channels 58.0 +91 31 model.input_dropout 0.3202094044884769 +91 31 model.output_dropout 0.2838871774180774 +91 31 model.feature_map_dropout 0.35365790649466505 +91 31 model.embedding_dim 0.0 +91 31 optimizer.lr 0.0022680702909925695 +91 31 negative_sampler.num_negs_per_pos 22.0 +91 31 training.batch_size 1.0 +91 32 model.output_channels 52.0 +91 32 model.input_dropout 0.454124038516189 +91 32 model.output_dropout 0.448483366439006 +91 32 model.feature_map_dropout 0.41579068690842347 +91 32 model.embedding_dim 2.0 +91 32 optimizer.lr 0.0010756992482750078 +91 32 negative_sampler.num_negs_per_pos 21.0 +91 32 training.batch_size 0.0 +91 33 model.output_channels 22.0 +91 33 model.input_dropout 0.22322734337106215 +91 33 model.output_dropout 0.4988548054180404 +91 33 model.feature_map_dropout 0.04475506639851362 +91 33 model.embedding_dim 2.0 +91 33 optimizer.lr 0.030723045485613675 +91 33 negative_sampler.num_negs_per_pos 18.0 +91 33 training.batch_size 1.0 +91 34 model.output_channels 51.0 +91 34 model.input_dropout 0.44186657153771397 +91 34 model.output_dropout 0.02882559515133487 +91 34 model.feature_map_dropout 0.4706746503727479 +91 34 model.embedding_dim 0.0 +91 34 optimizer.lr 0.00215866614109893 +91 34 negative_sampler.num_negs_per_pos 6.0 +91 34 training.batch_size 2.0 +91 35 model.output_channels 52.0 +91 35 model.input_dropout 0.030662817678547005 +91 35 model.output_dropout 0.4794126464069065 +91 35 model.feature_map_dropout 0.26002323504945357 +91 35 model.embedding_dim 0.0 +91 35 optimizer.lr 0.0015774271392082187 +91 35 negative_sampler.num_negs_per_pos 34.0 +91 35 training.batch_size 0.0 +91 36 model.output_channels 18.0 +91 36 model.input_dropout 0.3134640164661998 +91 36 model.output_dropout 0.14640280763925417 +91 36 model.feature_map_dropout 0.2808951617955936 +91 36 model.embedding_dim 2.0 +91 36 optimizer.lr 0.03915300001612194 +91 36 negative_sampler.num_negs_per_pos 34.0 +91 36 training.batch_size 1.0 +91 37 model.output_channels 48.0 +91 37 model.input_dropout 0.19398770456653652 +91 37 model.output_dropout 0.174038680241419 +91 37 model.feature_map_dropout 0.05828023912499847 +91 37 model.embedding_dim 2.0 +91 37 optimizer.lr 0.0010757070655004815 +91 37 negative_sampler.num_negs_per_pos 89.0 +91 37 training.batch_size 2.0 +91 38 model.output_channels 49.0 +91 38 model.input_dropout 0.43493276555784793 +91 38 model.output_dropout 0.38456285577630944 +91 38 model.feature_map_dropout 0.2893509564446317 +91 38 model.embedding_dim 0.0 +91 38 optimizer.lr 0.010578117481415105 +91 38 negative_sampler.num_negs_per_pos 67.0 +91 38 training.batch_size 2.0 +91 39 model.output_channels 26.0 +91 39 model.input_dropout 0.07901505405831977 +91 39 model.output_dropout 0.16499231670876574 +91 39 model.feature_map_dropout 0.4906082074837782 +91 39 model.embedding_dim 1.0 +91 39 optimizer.lr 0.002654291484032562 +91 39 negative_sampler.num_negs_per_pos 52.0 +91 39 training.batch_size 2.0 +91 40 model.output_channels 62.0 +91 40 model.input_dropout 0.403722640291352 +91 40 model.output_dropout 0.11773026719479984 +91 40 model.feature_map_dropout 0.10371732285418178 +91 40 model.embedding_dim 1.0 +91 40 optimizer.lr 0.09965592130458874 +91 40 negative_sampler.num_negs_per_pos 37.0 +91 40 training.batch_size 1.0 +91 41 model.output_channels 19.0 +91 41 model.input_dropout 0.18195251153229985 +91 41 model.output_dropout 0.31999875999891625 +91 41 model.feature_map_dropout 0.098240379209044 +91 41 model.embedding_dim 2.0 +91 41 optimizer.lr 0.002538536510330266 +91 41 negative_sampler.num_negs_per_pos 10.0 +91 41 training.batch_size 1.0 +91 42 model.output_channels 48.0 +91 42 model.input_dropout 0.2737843085663492 +91 42 model.output_dropout 0.20833593196788353 +91 42 model.feature_map_dropout 0.45818092087345896 +91 42 model.embedding_dim 2.0 +91 42 optimizer.lr 0.0017995574702267057 +91 42 negative_sampler.num_negs_per_pos 76.0 +91 42 training.batch_size 1.0 +91 43 model.output_channels 47.0 +91 43 model.input_dropout 0.3214003489717164 +91 43 model.output_dropout 0.3094756585477771 +91 43 model.feature_map_dropout 0.2169362087109331 +91 43 model.embedding_dim 2.0 +91 43 optimizer.lr 0.03992817601788315 +91 43 negative_sampler.num_negs_per_pos 47.0 +91 43 training.batch_size 0.0 +91 44 model.output_channels 49.0 +91 44 model.input_dropout 0.40107451416062534 +91 44 model.output_dropout 0.27280462413545364 +91 44 model.feature_map_dropout 0.2368787263282504 +91 44 model.embedding_dim 2.0 +91 44 optimizer.lr 0.08328847002935771 +91 44 negative_sampler.num_negs_per_pos 7.0 +91 44 training.batch_size 1.0 +91 45 model.output_channels 42.0 +91 45 model.input_dropout 0.4328715703285311 +91 45 model.output_dropout 0.14174758972083634 +91 45 model.feature_map_dropout 0.12090174404826509 +91 45 model.embedding_dim 0.0 +91 45 optimizer.lr 0.0077529899401715015 +91 45 negative_sampler.num_negs_per_pos 16.0 +91 45 training.batch_size 1.0 +91 46 model.output_channels 20.0 +91 46 model.input_dropout 0.3288402917875043 +91 46 model.output_dropout 0.03845152012320435 +91 46 model.feature_map_dropout 0.14329452207794763 +91 46 model.embedding_dim 2.0 +91 46 optimizer.lr 0.026066747278355798 +91 46 negative_sampler.num_negs_per_pos 71.0 +91 46 training.batch_size 0.0 +91 47 model.output_channels 26.0 +91 47 model.input_dropout 0.4703743371409431 +91 47 model.output_dropout 0.37447180064458463 +91 47 model.feature_map_dropout 0.15848954863901493 +91 47 model.embedding_dim 2.0 +91 47 optimizer.lr 0.006361014527662698 +91 47 negative_sampler.num_negs_per_pos 44.0 +91 47 training.batch_size 2.0 +91 48 model.output_channels 39.0 +91 48 model.input_dropout 0.39780046969495547 +91 48 model.output_dropout 0.3998310860859072 +91 48 model.feature_map_dropout 0.06291955985964298 +91 48 model.embedding_dim 0.0 +91 48 optimizer.lr 0.001918643112284935 +91 48 negative_sampler.num_negs_per_pos 50.0 +91 48 training.batch_size 2.0 +91 49 model.output_channels 50.0 +91 49 model.input_dropout 0.2794047766299161 +91 49 model.output_dropout 0.1077921255179421 +91 49 model.feature_map_dropout 0.014245801278744197 +91 49 model.embedding_dim 1.0 +91 49 optimizer.lr 0.002442268709541936 +91 49 negative_sampler.num_negs_per_pos 82.0 +91 49 training.batch_size 0.0 +91 50 model.output_channels 33.0 +91 50 model.input_dropout 0.2788183431337979 +91 50 model.output_dropout 0.16955893392246046 +91 50 model.feature_map_dropout 0.48226609988552993 +91 50 model.embedding_dim 0.0 +91 50 optimizer.lr 0.0011046005564539375 +91 50 negative_sampler.num_negs_per_pos 51.0 +91 50 training.batch_size 1.0 +91 51 model.output_channels 62.0 +91 51 model.input_dropout 0.447032203112556 +91 51 model.output_dropout 0.45117707374288185 +91 51 model.feature_map_dropout 0.31605525956600605 +91 51 model.embedding_dim 2.0 +91 51 optimizer.lr 0.06270717007966911 +91 51 negative_sampler.num_negs_per_pos 10.0 +91 51 training.batch_size 1.0 +91 52 model.output_channels 48.0 +91 52 model.input_dropout 0.13905019841396182 +91 52 model.output_dropout 0.057640942849044974 +91 52 model.feature_map_dropout 0.05886412482570347 +91 52 model.embedding_dim 1.0 +91 52 optimizer.lr 0.039820067164815565 +91 52 negative_sampler.num_negs_per_pos 23.0 +91 52 training.batch_size 1.0 +91 53 model.output_channels 29.0 +91 53 model.input_dropout 0.2895331820104012 +91 53 model.output_dropout 0.49712340554619133 +91 53 model.feature_map_dropout 0.2297058508487142 +91 53 model.embedding_dim 2.0 +91 53 optimizer.lr 0.0012849605185947979 +91 53 negative_sampler.num_negs_per_pos 34.0 +91 53 training.batch_size 0.0 +91 54 model.output_channels 51.0 +91 54 model.input_dropout 0.10091664564484903 +91 54 model.output_dropout 0.40622442394805003 +91 54 model.feature_map_dropout 0.03173413755309584 +91 54 model.embedding_dim 0.0 +91 54 optimizer.lr 0.0013865973233417926 +91 54 negative_sampler.num_negs_per_pos 17.0 +91 54 training.batch_size 1.0 +91 55 model.output_channels 42.0 +91 55 model.input_dropout 0.11567469233007488 +91 55 model.output_dropout 0.2467154273360977 +91 55 model.feature_map_dropout 0.03883010437718648 +91 55 model.embedding_dim 2.0 +91 55 optimizer.lr 0.001982956440248754 +91 55 negative_sampler.num_negs_per_pos 40.0 +91 55 training.batch_size 1.0 +91 56 model.output_channels 45.0 +91 56 model.input_dropout 0.04393450800076826 +91 56 model.output_dropout 0.19379973760332336 +91 56 model.feature_map_dropout 0.41290225730724195 +91 56 model.embedding_dim 0.0 +91 56 optimizer.lr 0.06882483805240895 +91 56 negative_sampler.num_negs_per_pos 53.0 +91 56 training.batch_size 1.0 +91 57 model.output_channels 41.0 +91 57 model.input_dropout 0.35874926816104064 +91 57 model.output_dropout 0.2967181917439368 +91 57 model.feature_map_dropout 0.13736241371920266 +91 57 model.embedding_dim 1.0 +91 57 optimizer.lr 0.00641372015222385 +91 57 negative_sampler.num_negs_per_pos 0.0 +91 57 training.batch_size 2.0 +91 58 model.output_channels 21.0 +91 58 model.input_dropout 0.16060479145151407 +91 58 model.output_dropout 0.2584889862871506 +91 58 model.feature_map_dropout 0.46039998482080474 +91 58 model.embedding_dim 0.0 +91 58 optimizer.lr 0.05653012218459204 +91 58 negative_sampler.num_negs_per_pos 80.0 +91 58 training.batch_size 1.0 +91 59 model.output_channels 59.0 +91 59 model.input_dropout 0.27642855324273347 +91 59 model.output_dropout 0.46384614580674954 +91 59 model.feature_map_dropout 0.2168150778618746 +91 59 model.embedding_dim 0.0 +91 59 optimizer.lr 0.03194808698509609 +91 59 negative_sampler.num_negs_per_pos 91.0 +91 59 training.batch_size 2.0 +91 60 model.output_channels 39.0 +91 60 model.input_dropout 0.4749586639608253 +91 60 model.output_dropout 0.00027643279203476423 +91 60 model.feature_map_dropout 0.43467553309702295 +91 60 model.embedding_dim 2.0 +91 60 optimizer.lr 0.024581604638863046 +91 60 negative_sampler.num_negs_per_pos 97.0 +91 60 training.batch_size 2.0 +91 61 model.output_channels 34.0 +91 61 model.input_dropout 0.11070039350999972 +91 61 model.output_dropout 0.03104351102771058 +91 61 model.feature_map_dropout 0.10689188266221394 +91 61 model.embedding_dim 2.0 +91 61 optimizer.lr 0.0018051814371707155 +91 61 negative_sampler.num_negs_per_pos 40.0 +91 61 training.batch_size 0.0 +91 62 model.output_channels 64.0 +91 62 model.input_dropout 0.01175104917284231 +91 62 model.output_dropout 0.28007742919209794 +91 62 model.feature_map_dropout 0.3481188180715292 +91 62 model.embedding_dim 1.0 +91 62 optimizer.lr 0.03777637410678758 +91 62 negative_sampler.num_negs_per_pos 23.0 +91 62 training.batch_size 1.0 +91 63 model.output_channels 49.0 +91 63 model.input_dropout 0.3378001731152759 +91 63 model.output_dropout 0.4841116498234307 +91 63 model.feature_map_dropout 0.010198774360501373 +91 63 model.embedding_dim 0.0 +91 63 optimizer.lr 0.0026819440293578026 +91 63 negative_sampler.num_negs_per_pos 14.0 +91 63 training.batch_size 0.0 +91 64 model.output_channels 16.0 +91 64 model.input_dropout 0.21071428512179097 +91 64 model.output_dropout 0.01706542732684002 +91 64 model.feature_map_dropout 0.049898645880413484 +91 64 model.embedding_dim 0.0 +91 64 optimizer.lr 0.004626084911024034 +91 64 negative_sampler.num_negs_per_pos 33.0 +91 64 training.batch_size 2.0 +91 65 model.output_channels 57.0 +91 65 model.input_dropout 0.28766453291395927 +91 65 model.output_dropout 0.15524311477935182 +91 65 model.feature_map_dropout 0.1812517320342551 +91 65 model.embedding_dim 1.0 +91 65 optimizer.lr 0.028701633293978755 +91 65 negative_sampler.num_negs_per_pos 71.0 +91 65 training.batch_size 1.0 +91 66 model.output_channels 53.0 +91 66 model.input_dropout 0.2780789914294439 +91 66 model.output_dropout 0.04221332047337589 +91 66 model.feature_map_dropout 0.10515265170649879 +91 66 model.embedding_dim 1.0 +91 66 optimizer.lr 0.032754257763144404 +91 66 negative_sampler.num_negs_per_pos 81.0 +91 66 training.batch_size 1.0 +91 67 model.output_channels 37.0 +91 67 model.input_dropout 0.4463072945663591 +91 67 model.output_dropout 0.2552793705335345 +91 67 model.feature_map_dropout 0.37052406895182144 +91 67 model.embedding_dim 2.0 +91 67 optimizer.lr 0.019493418961599545 +91 67 negative_sampler.num_negs_per_pos 99.0 +91 67 training.batch_size 2.0 +91 68 model.output_channels 27.0 +91 68 model.input_dropout 0.3338191289277459 +91 68 model.output_dropout 0.3595270159941577 +91 68 model.feature_map_dropout 0.3931347651412791 +91 68 model.embedding_dim 1.0 +91 68 optimizer.lr 0.0859516047469832 +91 68 negative_sampler.num_negs_per_pos 16.0 +91 68 training.batch_size 0.0 +91 69 model.output_channels 60.0 +91 69 model.input_dropout 0.23399720656405026 +91 69 model.output_dropout 0.021148249163515764 +91 69 model.feature_map_dropout 0.09036808169380789 +91 69 model.embedding_dim 2.0 +91 69 optimizer.lr 0.06885315503902559 +91 69 negative_sampler.num_negs_per_pos 24.0 +91 69 training.batch_size 0.0 +91 70 model.output_channels 49.0 +91 70 model.input_dropout 0.11095499890030441 +91 70 model.output_dropout 0.19051058635064622 +91 70 model.feature_map_dropout 0.3288719563185023 +91 70 model.embedding_dim 2.0 +91 70 optimizer.lr 0.009530273179151127 +91 70 negative_sampler.num_negs_per_pos 3.0 +91 70 training.batch_size 2.0 +91 71 model.output_channels 43.0 +91 71 model.input_dropout 0.42731220273039777 +91 71 model.output_dropout 0.15452852055421712 +91 71 model.feature_map_dropout 0.19441847560509018 +91 71 model.embedding_dim 0.0 +91 71 optimizer.lr 0.019975061580502947 +91 71 negative_sampler.num_negs_per_pos 87.0 +91 71 training.batch_size 0.0 +91 72 model.output_channels 46.0 +91 72 model.input_dropout 0.40026466902432684 +91 72 model.output_dropout 0.3864552479308243 +91 72 model.feature_map_dropout 0.22891194307734208 +91 72 model.embedding_dim 2.0 +91 72 optimizer.lr 0.051497425786614766 +91 72 negative_sampler.num_negs_per_pos 68.0 +91 72 training.batch_size 0.0 +91 73 model.output_channels 49.0 +91 73 model.input_dropout 0.18341383765272684 +91 73 model.output_dropout 0.12339436937172066 +91 73 model.feature_map_dropout 0.32328391020595626 +91 73 model.embedding_dim 2.0 +91 73 optimizer.lr 0.016317260468675574 +91 73 negative_sampler.num_negs_per_pos 73.0 +91 73 training.batch_size 2.0 +91 74 model.output_channels 20.0 +91 74 model.input_dropout 0.43945988744393677 +91 74 model.output_dropout 0.4093166936800442 +91 74 model.feature_map_dropout 0.022563835052553105 +91 74 model.embedding_dim 0.0 +91 74 optimizer.lr 0.02438604178410073 +91 74 negative_sampler.num_negs_per_pos 95.0 +91 74 training.batch_size 0.0 +91 75 model.output_channels 22.0 +91 75 model.input_dropout 0.10835226155732264 +91 75 model.output_dropout 0.07777692657110163 +91 75 model.feature_map_dropout 0.27990190155660066 +91 75 model.embedding_dim 2.0 +91 75 optimizer.lr 0.0012591387661111018 +91 75 negative_sampler.num_negs_per_pos 15.0 +91 75 training.batch_size 2.0 +91 76 model.output_channels 62.0 +91 76 model.input_dropout 0.1343475677354513 +91 76 model.output_dropout 0.049330124281976695 +91 76 model.feature_map_dropout 0.44697207215894547 +91 76 model.embedding_dim 2.0 +91 76 optimizer.lr 0.023392757914654897 +91 76 negative_sampler.num_negs_per_pos 27.0 +91 76 training.batch_size 0.0 +91 77 model.output_channels 64.0 +91 77 model.input_dropout 0.14012108716433563 +91 77 model.output_dropout 0.18232024140290154 +91 77 model.feature_map_dropout 0.3728493688235644 +91 77 model.embedding_dim 0.0 +91 77 optimizer.lr 0.009494391641711397 +91 77 negative_sampler.num_negs_per_pos 86.0 +91 77 training.batch_size 2.0 +91 78 model.output_channels 42.0 +91 78 model.input_dropout 0.12901785640993657 +91 78 model.output_dropout 0.21349938220050435 +91 78 model.feature_map_dropout 0.06723805601781563 +91 78 model.embedding_dim 0.0 +91 78 optimizer.lr 0.007442292266416992 +91 78 negative_sampler.num_negs_per_pos 57.0 +91 78 training.batch_size 0.0 +91 79 model.output_channels 59.0 +91 79 model.input_dropout 0.21823149083945687 +91 79 model.output_dropout 0.25647940599674124 +91 79 model.feature_map_dropout 0.05698949687753191 +91 79 model.embedding_dim 0.0 +91 79 optimizer.lr 0.07046706668846137 +91 79 negative_sampler.num_negs_per_pos 95.0 +91 79 training.batch_size 2.0 +91 80 model.output_channels 56.0 +91 80 model.input_dropout 0.4737025190604505 +91 80 model.output_dropout 0.1391175028722496 +91 80 model.feature_map_dropout 0.17736685640527772 +91 80 model.embedding_dim 0.0 +91 80 optimizer.lr 0.0014299399246241963 +91 80 negative_sampler.num_negs_per_pos 26.0 +91 80 training.batch_size 2.0 +91 81 model.output_channels 47.0 +91 81 model.input_dropout 0.10015040607588599 +91 81 model.output_dropout 0.04722360184886992 +91 81 model.feature_map_dropout 0.17232732859957 +91 81 model.embedding_dim 1.0 +91 81 optimizer.lr 0.019828860069535728 +91 81 negative_sampler.num_negs_per_pos 92.0 +91 81 training.batch_size 1.0 +91 82 model.output_channels 46.0 +91 82 model.input_dropout 0.07810648962789163 +91 82 model.output_dropout 0.41758239744473324 +91 82 model.feature_map_dropout 0.01178996774026736 +91 82 model.embedding_dim 0.0 +91 82 optimizer.lr 0.042503897648007216 +91 82 negative_sampler.num_negs_per_pos 75.0 +91 82 training.batch_size 0.0 +91 83 model.output_channels 23.0 +91 83 model.input_dropout 0.19913225368359155 +91 83 model.output_dropout 0.3733767024134744 +91 83 model.feature_map_dropout 0.18003492256473996 +91 83 model.embedding_dim 0.0 +91 83 optimizer.lr 0.06693963383773632 +91 83 negative_sampler.num_negs_per_pos 75.0 +91 83 training.batch_size 2.0 +91 84 model.output_channels 45.0 +91 84 model.input_dropout 0.007030841584558578 +91 84 model.output_dropout 0.22170354594344527 +91 84 model.feature_map_dropout 0.17328415319914348 +91 84 model.embedding_dim 0.0 +91 84 optimizer.lr 0.00782520416467484 +91 84 negative_sampler.num_negs_per_pos 69.0 +91 84 training.batch_size 2.0 +91 85 model.output_channels 31.0 +91 85 model.input_dropout 0.4925830995102525 +91 85 model.output_dropout 0.1431164026899377 +91 85 model.feature_map_dropout 0.09581005161112022 +91 85 model.embedding_dim 2.0 +91 85 optimizer.lr 0.014540349680286741 +91 85 negative_sampler.num_negs_per_pos 99.0 +91 85 training.batch_size 1.0 +91 86 model.output_channels 31.0 +91 86 model.input_dropout 0.08844001105547233 +91 86 model.output_dropout 0.04372596954826341 +91 86 model.feature_map_dropout 0.4634042308705319 +91 86 model.embedding_dim 2.0 +91 86 optimizer.lr 0.06382642911064898 +91 86 negative_sampler.num_negs_per_pos 43.0 +91 86 training.batch_size 2.0 +91 87 model.output_channels 22.0 +91 87 model.input_dropout 0.3582131516434101 +91 87 model.output_dropout 0.397270849028888 +91 87 model.feature_map_dropout 0.15940983525850316 +91 87 model.embedding_dim 0.0 +91 87 optimizer.lr 0.002841839404989448 +91 87 negative_sampler.num_negs_per_pos 96.0 +91 87 training.batch_size 0.0 +91 88 model.output_channels 30.0 +91 88 model.input_dropout 0.24382104875053096 +91 88 model.output_dropout 0.176796900956327 +91 88 model.feature_map_dropout 0.395392844752812 +91 88 model.embedding_dim 2.0 +91 88 optimizer.lr 0.0015502331409748256 +91 88 negative_sampler.num_negs_per_pos 61.0 +91 88 training.batch_size 0.0 +91 89 model.output_channels 56.0 +91 89 model.input_dropout 0.2029530614743591 +91 89 model.output_dropout 0.3497647462275133 +91 89 model.feature_map_dropout 0.3254070694390984 +91 89 model.embedding_dim 2.0 +91 89 optimizer.lr 0.0042219977344835685 +91 89 negative_sampler.num_negs_per_pos 91.0 +91 89 training.batch_size 1.0 +91 90 model.output_channels 39.0 +91 90 model.input_dropout 0.33639692421699596 +91 90 model.output_dropout 0.3088017806380687 +91 90 model.feature_map_dropout 0.3260705758681772 +91 90 model.embedding_dim 2.0 +91 90 optimizer.lr 0.001687017502382262 +91 90 negative_sampler.num_negs_per_pos 3.0 +91 90 training.batch_size 0.0 +91 91 model.output_channels 48.0 +91 91 model.input_dropout 0.08840031566744777 +91 91 model.output_dropout 0.22901108195531295 +91 91 model.feature_map_dropout 0.49746605266370303 +91 91 model.embedding_dim 0.0 +91 91 optimizer.lr 0.004471859413229224 +91 91 negative_sampler.num_negs_per_pos 67.0 +91 91 training.batch_size 1.0 +91 92 model.output_channels 56.0 +91 92 model.input_dropout 0.11184279411674308 +91 92 model.output_dropout 0.2724321506464072 +91 92 model.feature_map_dropout 0.49626773135681485 +91 92 model.embedding_dim 2.0 +91 92 optimizer.lr 0.00835493705722265 +91 92 negative_sampler.num_negs_per_pos 67.0 +91 92 training.batch_size 2.0 +91 93 model.output_channels 64.0 +91 93 model.input_dropout 0.44752389416575783 +91 93 model.output_dropout 0.46460966372210555 +91 93 model.feature_map_dropout 0.11610414568199884 +91 93 model.embedding_dim 1.0 +91 93 optimizer.lr 0.005944849666613967 +91 93 negative_sampler.num_negs_per_pos 91.0 +91 93 training.batch_size 1.0 +91 94 model.output_channels 28.0 +91 94 model.input_dropout 0.12535705174212358 +91 94 model.output_dropout 0.06256118528894472 +91 94 model.feature_map_dropout 0.23619828840505774 +91 94 model.embedding_dim 2.0 +91 94 optimizer.lr 0.0011877772832903997 +91 94 negative_sampler.num_negs_per_pos 11.0 +91 94 training.batch_size 2.0 +91 95 model.output_channels 54.0 +91 95 model.input_dropout 0.10127873078657856 +91 95 model.output_dropout 0.29566118719354184 +91 95 model.feature_map_dropout 0.3669265318739469 +91 95 model.embedding_dim 1.0 +91 95 optimizer.lr 0.008270505660975775 +91 95 negative_sampler.num_negs_per_pos 46.0 +91 95 training.batch_size 1.0 +91 96 model.output_channels 63.0 +91 96 model.input_dropout 0.16591656979717623 +91 96 model.output_dropout 0.2520676916388593 +91 96 model.feature_map_dropout 0.3866347266598441 +91 96 model.embedding_dim 0.0 +91 96 optimizer.lr 0.0037893814656324693 +91 96 negative_sampler.num_negs_per_pos 25.0 +91 96 training.batch_size 0.0 +91 97 model.output_channels 50.0 +91 97 model.input_dropout 0.0021413827457158474 +91 97 model.output_dropout 0.09353241961866265 +91 97 model.feature_map_dropout 0.27145663535487446 +91 97 model.embedding_dim 1.0 +91 97 optimizer.lr 0.0025092112597646563 +91 97 negative_sampler.num_negs_per_pos 3.0 +91 97 training.batch_size 1.0 +91 98 model.output_channels 29.0 +91 98 model.input_dropout 0.04502340891644496 +91 98 model.output_dropout 0.3187135872014647 +91 98 model.feature_map_dropout 0.415769278220145 +91 98 model.embedding_dim 2.0 +91 98 optimizer.lr 0.0016472520260229627 +91 98 negative_sampler.num_negs_per_pos 58.0 +91 98 training.batch_size 1.0 +91 99 model.output_channels 27.0 +91 99 model.input_dropout 0.08026225832303718 +91 99 model.output_dropout 0.20683003803825661 +91 99 model.feature_map_dropout 0.39527706903532883 +91 99 model.embedding_dim 2.0 +91 99 optimizer.lr 0.004708904953787453 +91 99 negative_sampler.num_negs_per_pos 46.0 +91 99 training.batch_size 1.0 +91 100 model.output_channels 61.0 +91 100 model.input_dropout 0.3750383902724858 +91 100 model.output_dropout 0.37867543174132207 +91 100 model.feature_map_dropout 0.21671207877261756 +91 100 model.embedding_dim 2.0 +91 100 optimizer.lr 0.03738053932795214 +91 100 negative_sampler.num_negs_per_pos 91.0 +91 100 training.batch_size 2.0 +91 1 dataset """kinships""" +91 1 model """conve""" +91 1 loss """softplus""" +91 1 regularizer """no""" +91 1 optimizer """adam""" +91 1 training_loop """owa""" +91 1 negative_sampler """basic""" +91 1 evaluator """rankbased""" +91 2 dataset """kinships""" +91 2 model """conve""" +91 2 loss """softplus""" +91 2 regularizer """no""" +91 2 optimizer """adam""" +91 2 training_loop """owa""" +91 2 negative_sampler """basic""" +91 2 evaluator """rankbased""" +91 3 dataset """kinships""" +91 3 model """conve""" +91 3 loss """softplus""" +91 3 regularizer """no""" +91 3 optimizer """adam""" +91 3 training_loop """owa""" +91 3 negative_sampler """basic""" +91 3 evaluator """rankbased""" +91 4 dataset """kinships""" +91 4 model """conve""" +91 4 loss """softplus""" +91 4 regularizer """no""" +91 4 optimizer """adam""" +91 4 training_loop """owa""" +91 4 negative_sampler """basic""" +91 4 evaluator """rankbased""" +91 5 dataset """kinships""" +91 5 model """conve""" +91 5 loss """softplus""" +91 5 regularizer """no""" +91 5 optimizer """adam""" +91 5 training_loop """owa""" +91 5 negative_sampler """basic""" +91 5 evaluator """rankbased""" +91 6 dataset """kinships""" +91 6 model """conve""" +91 6 loss """softplus""" +91 6 regularizer """no""" +91 6 optimizer """adam""" +91 6 training_loop """owa""" +91 6 negative_sampler """basic""" +91 6 evaluator """rankbased""" +91 7 dataset """kinships""" +91 7 model """conve""" +91 7 loss """softplus""" +91 7 regularizer """no""" +91 7 optimizer """adam""" +91 7 training_loop """owa""" +91 7 negative_sampler """basic""" +91 7 evaluator """rankbased""" +91 8 dataset """kinships""" +91 8 model """conve""" +91 8 loss """softplus""" +91 8 regularizer """no""" +91 8 optimizer """adam""" +91 8 training_loop """owa""" +91 8 negative_sampler """basic""" +91 8 evaluator """rankbased""" +91 9 dataset """kinships""" +91 9 model """conve""" +91 9 loss """softplus""" +91 9 regularizer """no""" +91 9 optimizer """adam""" +91 9 training_loop """owa""" +91 9 negative_sampler """basic""" +91 9 evaluator """rankbased""" +91 10 dataset """kinships""" +91 10 model """conve""" +91 10 loss """softplus""" +91 10 regularizer """no""" +91 10 optimizer """adam""" +91 10 training_loop """owa""" +91 10 negative_sampler """basic""" +91 10 evaluator """rankbased""" +91 11 dataset """kinships""" +91 11 model """conve""" +91 11 loss """softplus""" +91 11 regularizer """no""" +91 11 optimizer """adam""" +91 11 training_loop """owa""" +91 11 negative_sampler """basic""" +91 11 evaluator """rankbased""" +91 12 dataset """kinships""" +91 12 model """conve""" +91 12 loss """softplus""" +91 12 regularizer """no""" +91 12 optimizer """adam""" +91 12 training_loop """owa""" +91 12 negative_sampler """basic""" +91 12 evaluator """rankbased""" +91 13 dataset """kinships""" +91 13 model """conve""" +91 13 loss """softplus""" +91 13 regularizer """no""" +91 13 optimizer """adam""" +91 13 training_loop """owa""" +91 13 negative_sampler """basic""" +91 13 evaluator """rankbased""" +91 14 dataset """kinships""" +91 14 model """conve""" +91 14 loss """softplus""" +91 14 regularizer """no""" +91 14 optimizer """adam""" +91 14 training_loop """owa""" +91 14 negative_sampler """basic""" +91 14 evaluator """rankbased""" +91 15 dataset """kinships""" +91 15 model """conve""" +91 15 loss """softplus""" +91 15 regularizer """no""" +91 15 optimizer """adam""" +91 15 training_loop """owa""" +91 15 negative_sampler """basic""" +91 15 evaluator """rankbased""" +91 16 dataset """kinships""" +91 16 model """conve""" +91 16 loss """softplus""" +91 16 regularizer """no""" +91 16 optimizer """adam""" +91 16 training_loop """owa""" +91 16 negative_sampler """basic""" +91 16 evaluator """rankbased""" +91 17 dataset """kinships""" +91 17 model """conve""" +91 17 loss """softplus""" +91 17 regularizer """no""" +91 17 optimizer """adam""" +91 17 training_loop """owa""" +91 17 negative_sampler """basic""" +91 17 evaluator """rankbased""" +91 18 dataset """kinships""" +91 18 model """conve""" +91 18 loss """softplus""" +91 18 regularizer """no""" +91 18 optimizer """adam""" +91 18 training_loop """owa""" +91 18 negative_sampler """basic""" +91 18 evaluator """rankbased""" +91 19 dataset """kinships""" +91 19 model """conve""" +91 19 loss """softplus""" +91 19 regularizer """no""" +91 19 optimizer """adam""" +91 19 training_loop """owa""" +91 19 negative_sampler """basic""" +91 19 evaluator """rankbased""" +91 20 dataset """kinships""" +91 20 model """conve""" +91 20 loss """softplus""" +91 20 regularizer """no""" +91 20 optimizer """adam""" +91 20 training_loop """owa""" +91 20 negative_sampler """basic""" +91 20 evaluator """rankbased""" +91 21 dataset """kinships""" +91 21 model """conve""" +91 21 loss """softplus""" +91 21 regularizer """no""" +91 21 optimizer """adam""" +91 21 training_loop """owa""" +91 21 negative_sampler """basic""" +91 21 evaluator """rankbased""" +91 22 dataset """kinships""" +91 22 model """conve""" +91 22 loss """softplus""" +91 22 regularizer """no""" +91 22 optimizer """adam""" +91 22 training_loop """owa""" +91 22 negative_sampler """basic""" +91 22 evaluator """rankbased""" +91 23 dataset """kinships""" +91 23 model """conve""" +91 23 loss """softplus""" +91 23 regularizer """no""" +91 23 optimizer """adam""" +91 23 training_loop """owa""" +91 23 negative_sampler """basic""" +91 23 evaluator """rankbased""" +91 24 dataset """kinships""" +91 24 model """conve""" +91 24 loss """softplus""" +91 24 regularizer """no""" +91 24 optimizer """adam""" +91 24 training_loop """owa""" +91 24 negative_sampler """basic""" +91 24 evaluator """rankbased""" +91 25 dataset """kinships""" +91 25 model """conve""" +91 25 loss """softplus""" +91 25 regularizer """no""" +91 25 optimizer """adam""" +91 25 training_loop """owa""" +91 25 negative_sampler """basic""" +91 25 evaluator """rankbased""" +91 26 dataset """kinships""" +91 26 model """conve""" +91 26 loss """softplus""" +91 26 regularizer """no""" +91 26 optimizer """adam""" +91 26 training_loop """owa""" +91 26 negative_sampler """basic""" +91 26 evaluator """rankbased""" +91 27 dataset """kinships""" +91 27 model """conve""" +91 27 loss """softplus""" +91 27 regularizer """no""" +91 27 optimizer """adam""" +91 27 training_loop """owa""" +91 27 negative_sampler """basic""" +91 27 evaluator """rankbased""" +91 28 dataset """kinships""" +91 28 model """conve""" +91 28 loss """softplus""" +91 28 regularizer """no""" +91 28 optimizer """adam""" +91 28 training_loop """owa""" +91 28 negative_sampler """basic""" +91 28 evaluator """rankbased""" +91 29 dataset """kinships""" +91 29 model """conve""" +91 29 loss """softplus""" +91 29 regularizer """no""" +91 29 optimizer """adam""" +91 29 training_loop """owa""" +91 29 negative_sampler """basic""" +91 29 evaluator """rankbased""" +91 30 dataset """kinships""" +91 30 model """conve""" +91 30 loss """softplus""" +91 30 regularizer """no""" +91 30 optimizer """adam""" +91 30 training_loop """owa""" +91 30 negative_sampler """basic""" +91 30 evaluator """rankbased""" +91 31 dataset """kinships""" +91 31 model """conve""" +91 31 loss """softplus""" +91 31 regularizer """no""" +91 31 optimizer """adam""" +91 31 training_loop """owa""" +91 31 negative_sampler """basic""" +91 31 evaluator """rankbased""" +91 32 dataset """kinships""" +91 32 model """conve""" +91 32 loss """softplus""" +91 32 regularizer """no""" +91 32 optimizer """adam""" +91 32 training_loop """owa""" +91 32 negative_sampler """basic""" +91 32 evaluator """rankbased""" +91 33 dataset """kinships""" +91 33 model """conve""" +91 33 loss """softplus""" +91 33 regularizer """no""" +91 33 optimizer """adam""" +91 33 training_loop """owa""" +91 33 negative_sampler """basic""" +91 33 evaluator """rankbased""" +91 34 dataset """kinships""" +91 34 model """conve""" +91 34 loss """softplus""" +91 34 regularizer """no""" +91 34 optimizer """adam""" +91 34 training_loop """owa""" +91 34 negative_sampler """basic""" +91 34 evaluator """rankbased""" +91 35 dataset """kinships""" +91 35 model """conve""" +91 35 loss """softplus""" +91 35 regularizer """no""" +91 35 optimizer """adam""" +91 35 training_loop """owa""" +91 35 negative_sampler """basic""" +91 35 evaluator """rankbased""" +91 36 dataset """kinships""" +91 36 model """conve""" +91 36 loss """softplus""" +91 36 regularizer """no""" +91 36 optimizer """adam""" +91 36 training_loop """owa""" +91 36 negative_sampler """basic""" +91 36 evaluator """rankbased""" +91 37 dataset """kinships""" +91 37 model """conve""" +91 37 loss """softplus""" +91 37 regularizer """no""" +91 37 optimizer """adam""" +91 37 training_loop """owa""" +91 37 negative_sampler """basic""" +91 37 evaluator """rankbased""" +91 38 dataset """kinships""" +91 38 model """conve""" +91 38 loss """softplus""" +91 38 regularizer """no""" +91 38 optimizer """adam""" +91 38 training_loop """owa""" +91 38 negative_sampler """basic""" +91 38 evaluator """rankbased""" +91 39 dataset """kinships""" +91 39 model """conve""" +91 39 loss """softplus""" +91 39 regularizer """no""" +91 39 optimizer """adam""" +91 39 training_loop """owa""" +91 39 negative_sampler """basic""" +91 39 evaluator """rankbased""" +91 40 dataset """kinships""" +91 40 model """conve""" +91 40 loss """softplus""" +91 40 regularizer """no""" +91 40 optimizer """adam""" +91 40 training_loop """owa""" +91 40 negative_sampler """basic""" +91 40 evaluator """rankbased""" +91 41 dataset """kinships""" +91 41 model """conve""" +91 41 loss """softplus""" +91 41 regularizer """no""" +91 41 optimizer """adam""" +91 41 training_loop """owa""" +91 41 negative_sampler """basic""" +91 41 evaluator """rankbased""" +91 42 dataset """kinships""" +91 42 model """conve""" +91 42 loss """softplus""" +91 42 regularizer """no""" +91 42 optimizer """adam""" +91 42 training_loop """owa""" +91 42 negative_sampler """basic""" +91 42 evaluator """rankbased""" +91 43 dataset """kinships""" +91 43 model """conve""" +91 43 loss """softplus""" +91 43 regularizer """no""" +91 43 optimizer """adam""" +91 43 training_loop """owa""" +91 43 negative_sampler """basic""" +91 43 evaluator """rankbased""" +91 44 dataset """kinships""" +91 44 model """conve""" +91 44 loss """softplus""" +91 44 regularizer """no""" +91 44 optimizer """adam""" +91 44 training_loop """owa""" +91 44 negative_sampler """basic""" +91 44 evaluator """rankbased""" +91 45 dataset """kinships""" +91 45 model """conve""" +91 45 loss """softplus""" +91 45 regularizer """no""" +91 45 optimizer """adam""" +91 45 training_loop """owa""" +91 45 negative_sampler """basic""" +91 45 evaluator """rankbased""" +91 46 dataset """kinships""" +91 46 model """conve""" +91 46 loss """softplus""" +91 46 regularizer """no""" +91 46 optimizer """adam""" +91 46 training_loop """owa""" +91 46 negative_sampler """basic""" +91 46 evaluator """rankbased""" +91 47 dataset """kinships""" +91 47 model """conve""" +91 47 loss """softplus""" +91 47 regularizer """no""" +91 47 optimizer """adam""" +91 47 training_loop """owa""" +91 47 negative_sampler """basic""" +91 47 evaluator """rankbased""" +91 48 dataset """kinships""" +91 48 model """conve""" +91 48 loss """softplus""" +91 48 regularizer """no""" +91 48 optimizer """adam""" +91 48 training_loop """owa""" +91 48 negative_sampler """basic""" +91 48 evaluator """rankbased""" +91 49 dataset """kinships""" +91 49 model """conve""" +91 49 loss """softplus""" +91 49 regularizer """no""" +91 49 optimizer """adam""" +91 49 training_loop """owa""" +91 49 negative_sampler """basic""" +91 49 evaluator """rankbased""" +91 50 dataset """kinships""" +91 50 model """conve""" +91 50 loss """softplus""" +91 50 regularizer """no""" +91 50 optimizer """adam""" +91 50 training_loop """owa""" +91 50 negative_sampler """basic""" +91 50 evaluator """rankbased""" +91 51 dataset """kinships""" +91 51 model """conve""" +91 51 loss """softplus""" +91 51 regularizer """no""" +91 51 optimizer """adam""" +91 51 training_loop """owa""" +91 51 negative_sampler """basic""" +91 51 evaluator """rankbased""" +91 52 dataset """kinships""" +91 52 model """conve""" +91 52 loss """softplus""" +91 52 regularizer """no""" +91 52 optimizer """adam""" +91 52 training_loop """owa""" +91 52 negative_sampler """basic""" +91 52 evaluator """rankbased""" +91 53 dataset """kinships""" +91 53 model """conve""" +91 53 loss """softplus""" +91 53 regularizer """no""" +91 53 optimizer """adam""" +91 53 training_loop """owa""" +91 53 negative_sampler """basic""" +91 53 evaluator """rankbased""" +91 54 dataset """kinships""" +91 54 model """conve""" +91 54 loss """softplus""" +91 54 regularizer """no""" +91 54 optimizer """adam""" +91 54 training_loop """owa""" +91 54 negative_sampler """basic""" +91 54 evaluator """rankbased""" +91 55 dataset """kinships""" +91 55 model """conve""" +91 55 loss """softplus""" +91 55 regularizer """no""" +91 55 optimizer """adam""" +91 55 training_loop """owa""" +91 55 negative_sampler """basic""" +91 55 evaluator """rankbased""" +91 56 dataset """kinships""" +91 56 model """conve""" +91 56 loss """softplus""" +91 56 regularizer """no""" +91 56 optimizer """adam""" +91 56 training_loop """owa""" +91 56 negative_sampler """basic""" +91 56 evaluator """rankbased""" +91 57 dataset """kinships""" +91 57 model """conve""" +91 57 loss """softplus""" +91 57 regularizer """no""" +91 57 optimizer """adam""" +91 57 training_loop """owa""" +91 57 negative_sampler """basic""" +91 57 evaluator """rankbased""" +91 58 dataset """kinships""" +91 58 model """conve""" +91 58 loss """softplus""" +91 58 regularizer """no""" +91 58 optimizer """adam""" +91 58 training_loop """owa""" +91 58 negative_sampler """basic""" +91 58 evaluator """rankbased""" +91 59 dataset """kinships""" +91 59 model """conve""" +91 59 loss """softplus""" +91 59 regularizer """no""" +91 59 optimizer """adam""" +91 59 training_loop """owa""" +91 59 negative_sampler """basic""" +91 59 evaluator """rankbased""" +91 60 dataset """kinships""" +91 60 model """conve""" +91 60 loss """softplus""" +91 60 regularizer """no""" +91 60 optimizer """adam""" +91 60 training_loop """owa""" +91 60 negative_sampler """basic""" +91 60 evaluator """rankbased""" +91 61 dataset """kinships""" +91 61 model """conve""" +91 61 loss """softplus""" +91 61 regularizer """no""" +91 61 optimizer """adam""" +91 61 training_loop """owa""" +91 61 negative_sampler """basic""" +91 61 evaluator """rankbased""" +91 62 dataset """kinships""" +91 62 model """conve""" +91 62 loss """softplus""" +91 62 regularizer """no""" +91 62 optimizer """adam""" +91 62 training_loop """owa""" +91 62 negative_sampler """basic""" +91 62 evaluator """rankbased""" +91 63 dataset """kinships""" +91 63 model """conve""" +91 63 loss """softplus""" +91 63 regularizer """no""" +91 63 optimizer """adam""" +91 63 training_loop """owa""" +91 63 negative_sampler """basic""" +91 63 evaluator """rankbased""" +91 64 dataset """kinships""" +91 64 model """conve""" +91 64 loss """softplus""" +91 64 regularizer """no""" +91 64 optimizer """adam""" +91 64 training_loop """owa""" +91 64 negative_sampler """basic""" +91 64 evaluator """rankbased""" +91 65 dataset """kinships""" +91 65 model """conve""" +91 65 loss """softplus""" +91 65 regularizer """no""" +91 65 optimizer """adam""" +91 65 training_loop """owa""" +91 65 negative_sampler """basic""" +91 65 evaluator """rankbased""" +91 66 dataset """kinships""" +91 66 model """conve""" +91 66 loss """softplus""" +91 66 regularizer """no""" +91 66 optimizer """adam""" +91 66 training_loop """owa""" +91 66 negative_sampler """basic""" +91 66 evaluator """rankbased""" +91 67 dataset """kinships""" +91 67 model """conve""" +91 67 loss """softplus""" +91 67 regularizer """no""" +91 67 optimizer """adam""" +91 67 training_loop """owa""" +91 67 negative_sampler """basic""" +91 67 evaluator """rankbased""" +91 68 dataset """kinships""" +91 68 model """conve""" +91 68 loss """softplus""" +91 68 regularizer """no""" +91 68 optimizer """adam""" +91 68 training_loop """owa""" +91 68 negative_sampler """basic""" +91 68 evaluator """rankbased""" +91 69 dataset """kinships""" +91 69 model """conve""" +91 69 loss """softplus""" +91 69 regularizer """no""" +91 69 optimizer """adam""" +91 69 training_loop """owa""" +91 69 negative_sampler """basic""" +91 69 evaluator """rankbased""" +91 70 dataset """kinships""" +91 70 model """conve""" +91 70 loss """softplus""" +91 70 regularizer """no""" +91 70 optimizer """adam""" +91 70 training_loop """owa""" +91 70 negative_sampler """basic""" +91 70 evaluator """rankbased""" +91 71 dataset """kinships""" +91 71 model """conve""" +91 71 loss """softplus""" +91 71 regularizer """no""" +91 71 optimizer """adam""" +91 71 training_loop """owa""" +91 71 negative_sampler """basic""" +91 71 evaluator """rankbased""" +91 72 dataset """kinships""" +91 72 model """conve""" +91 72 loss """softplus""" +91 72 regularizer """no""" +91 72 optimizer """adam""" +91 72 training_loop """owa""" +91 72 negative_sampler """basic""" +91 72 evaluator """rankbased""" +91 73 dataset """kinships""" +91 73 model """conve""" +91 73 loss """softplus""" +91 73 regularizer """no""" +91 73 optimizer """adam""" +91 73 training_loop """owa""" +91 73 negative_sampler """basic""" +91 73 evaluator """rankbased""" +91 74 dataset """kinships""" +91 74 model """conve""" +91 74 loss """softplus""" +91 74 regularizer """no""" +91 74 optimizer """adam""" +91 74 training_loop """owa""" +91 74 negative_sampler """basic""" +91 74 evaluator """rankbased""" +91 75 dataset """kinships""" +91 75 model """conve""" +91 75 loss """softplus""" +91 75 regularizer """no""" +91 75 optimizer """adam""" +91 75 training_loop """owa""" +91 75 negative_sampler """basic""" +91 75 evaluator """rankbased""" +91 76 dataset """kinships""" +91 76 model """conve""" +91 76 loss """softplus""" +91 76 regularizer """no""" +91 76 optimizer """adam""" +91 76 training_loop """owa""" +91 76 negative_sampler """basic""" +91 76 evaluator """rankbased""" +91 77 dataset """kinships""" +91 77 model """conve""" +91 77 loss """softplus""" +91 77 regularizer """no""" +91 77 optimizer """adam""" +91 77 training_loop """owa""" +91 77 negative_sampler """basic""" +91 77 evaluator """rankbased""" +91 78 dataset """kinships""" +91 78 model """conve""" +91 78 loss """softplus""" +91 78 regularizer """no""" +91 78 optimizer """adam""" +91 78 training_loop """owa""" +91 78 negative_sampler """basic""" +91 78 evaluator """rankbased""" +91 79 dataset """kinships""" +91 79 model """conve""" +91 79 loss """softplus""" +91 79 regularizer """no""" +91 79 optimizer """adam""" +91 79 training_loop """owa""" +91 79 negative_sampler """basic""" +91 79 evaluator """rankbased""" +91 80 dataset """kinships""" +91 80 model """conve""" +91 80 loss """softplus""" +91 80 regularizer """no""" +91 80 optimizer """adam""" +91 80 training_loop """owa""" +91 80 negative_sampler """basic""" +91 80 evaluator """rankbased""" +91 81 dataset """kinships""" +91 81 model """conve""" +91 81 loss """softplus""" +91 81 regularizer """no""" +91 81 optimizer """adam""" +91 81 training_loop """owa""" +91 81 negative_sampler """basic""" +91 81 evaluator """rankbased""" +91 82 dataset """kinships""" +91 82 model """conve""" +91 82 loss """softplus""" +91 82 regularizer """no""" +91 82 optimizer """adam""" +91 82 training_loop """owa""" +91 82 negative_sampler """basic""" +91 82 evaluator """rankbased""" +91 83 dataset """kinships""" +91 83 model """conve""" +91 83 loss """softplus""" +91 83 regularizer """no""" +91 83 optimizer """adam""" +91 83 training_loop """owa""" +91 83 negative_sampler """basic""" +91 83 evaluator """rankbased""" +91 84 dataset """kinships""" +91 84 model """conve""" +91 84 loss """softplus""" +91 84 regularizer """no""" +91 84 optimizer """adam""" +91 84 training_loop """owa""" +91 84 negative_sampler """basic""" +91 84 evaluator """rankbased""" +91 85 dataset """kinships""" +91 85 model """conve""" +91 85 loss """softplus""" +91 85 regularizer """no""" +91 85 optimizer """adam""" +91 85 training_loop """owa""" +91 85 negative_sampler """basic""" +91 85 evaluator """rankbased""" +91 86 dataset """kinships""" +91 86 model """conve""" +91 86 loss """softplus""" +91 86 regularizer """no""" +91 86 optimizer """adam""" +91 86 training_loop """owa""" +91 86 negative_sampler """basic""" +91 86 evaluator """rankbased""" +91 87 dataset """kinships""" +91 87 model """conve""" +91 87 loss """softplus""" +91 87 regularizer """no""" +91 87 optimizer """adam""" +91 87 training_loop """owa""" +91 87 negative_sampler """basic""" +91 87 evaluator """rankbased""" +91 88 dataset """kinships""" +91 88 model """conve""" +91 88 loss """softplus""" +91 88 regularizer """no""" +91 88 optimizer """adam""" +91 88 training_loop """owa""" +91 88 negative_sampler """basic""" +91 88 evaluator """rankbased""" +91 89 dataset """kinships""" +91 89 model """conve""" +91 89 loss """softplus""" +91 89 regularizer """no""" +91 89 optimizer """adam""" +91 89 training_loop """owa""" +91 89 negative_sampler """basic""" +91 89 evaluator """rankbased""" +91 90 dataset """kinships""" +91 90 model """conve""" +91 90 loss """softplus""" +91 90 regularizer """no""" +91 90 optimizer """adam""" +91 90 training_loop """owa""" +91 90 negative_sampler """basic""" +91 90 evaluator """rankbased""" +91 91 dataset """kinships""" +91 91 model """conve""" +91 91 loss """softplus""" +91 91 regularizer """no""" +91 91 optimizer """adam""" +91 91 training_loop """owa""" +91 91 negative_sampler """basic""" +91 91 evaluator """rankbased""" +91 92 dataset """kinships""" +91 92 model """conve""" +91 92 loss """softplus""" +91 92 regularizer """no""" +91 92 optimizer """adam""" +91 92 training_loop """owa""" +91 92 negative_sampler """basic""" +91 92 evaluator """rankbased""" +91 93 dataset """kinships""" +91 93 model """conve""" +91 93 loss """softplus""" +91 93 regularizer """no""" +91 93 optimizer """adam""" +91 93 training_loop """owa""" +91 93 negative_sampler """basic""" +91 93 evaluator """rankbased""" +91 94 dataset """kinships""" +91 94 model """conve""" +91 94 loss """softplus""" +91 94 regularizer """no""" +91 94 optimizer """adam""" +91 94 training_loop """owa""" +91 94 negative_sampler """basic""" +91 94 evaluator """rankbased""" +91 95 dataset """kinships""" +91 95 model """conve""" +91 95 loss """softplus""" +91 95 regularizer """no""" +91 95 optimizer """adam""" +91 95 training_loop """owa""" +91 95 negative_sampler """basic""" +91 95 evaluator """rankbased""" +91 96 dataset """kinships""" +91 96 model """conve""" +91 96 loss """softplus""" +91 96 regularizer """no""" +91 96 optimizer """adam""" +91 96 training_loop """owa""" +91 96 negative_sampler """basic""" +91 96 evaluator """rankbased""" +91 97 dataset """kinships""" +91 97 model """conve""" +91 97 loss """softplus""" +91 97 regularizer """no""" +91 97 optimizer """adam""" +91 97 training_loop """owa""" +91 97 negative_sampler """basic""" +91 97 evaluator """rankbased""" +91 98 dataset """kinships""" +91 98 model """conve""" +91 98 loss """softplus""" +91 98 regularizer """no""" +91 98 optimizer """adam""" +91 98 training_loop """owa""" +91 98 negative_sampler """basic""" +91 98 evaluator """rankbased""" +91 99 dataset """kinships""" +91 99 model """conve""" +91 99 loss """softplus""" +91 99 regularizer """no""" +91 99 optimizer """adam""" +91 99 training_loop """owa""" +91 99 negative_sampler """basic""" +91 99 evaluator """rankbased""" +91 100 dataset """kinships""" +91 100 model """conve""" +91 100 loss """softplus""" +91 100 regularizer """no""" +91 100 optimizer """adam""" +91 100 training_loop """owa""" +91 100 negative_sampler """basic""" +91 100 evaluator """rankbased""" +92 1 model.output_channels 36.0 +92 1 model.input_dropout 0.4399202331400132 +92 1 model.output_dropout 0.4322904260345477 +92 1 model.feature_map_dropout 0.12556662600685792 +92 1 model.embedding_dim 0.0 +92 1 loss.margin 27.87212857079916 +92 1 loss.adversarial_temperature 0.6694377440234093 +92 1 optimizer.lr 0.061866876603482265 +92 1 negative_sampler.num_negs_per_pos 40.0 +92 1 training.batch_size 2.0 +92 2 model.output_channels 41.0 +92 2 model.input_dropout 0.4335471457941601 +92 2 model.output_dropout 0.23724145299625982 +92 2 model.feature_map_dropout 0.4094195157849325 +92 2 model.embedding_dim 2.0 +92 2 loss.margin 8.819581045099115 +92 2 loss.adversarial_temperature 0.7830968483113031 +92 2 optimizer.lr 0.057020062473508625 +92 2 negative_sampler.num_negs_per_pos 61.0 +92 2 training.batch_size 0.0 +92 3 model.output_channels 30.0 +92 3 model.input_dropout 0.05422846446448931 +92 3 model.output_dropout 0.3187643041366175 +92 3 model.feature_map_dropout 0.37500360216769857 +92 3 model.embedding_dim 1.0 +92 3 loss.margin 27.081024155786174 +92 3 loss.adversarial_temperature 0.3217381549139633 +92 3 optimizer.lr 0.0023160272374697036 +92 3 negative_sampler.num_negs_per_pos 22.0 +92 3 training.batch_size 1.0 +92 4 model.output_channels 19.0 +92 4 model.input_dropout 0.04468338600233901 +92 4 model.output_dropout 0.3964135950327991 +92 4 model.feature_map_dropout 0.12078595109082813 +92 4 model.embedding_dim 2.0 +92 4 loss.margin 18.65690403798453 +92 4 loss.adversarial_temperature 0.3225213560661873 +92 4 optimizer.lr 0.03925872325491271 +92 4 negative_sampler.num_negs_per_pos 72.0 +92 4 training.batch_size 0.0 +92 5 model.output_channels 25.0 +92 5 model.input_dropout 0.12166301634193072 +92 5 model.output_dropout 0.20241995934193413 +92 5 model.feature_map_dropout 0.4853641627431759 +92 5 model.embedding_dim 1.0 +92 5 loss.margin 25.543041567347906 +92 5 loss.adversarial_temperature 0.4360377518467309 +92 5 optimizer.lr 0.00623529731059432 +92 5 negative_sampler.num_negs_per_pos 14.0 +92 5 training.batch_size 0.0 +92 6 model.output_channels 37.0 +92 6 model.input_dropout 0.3860352526481179 +92 6 model.output_dropout 0.11251670579093426 +92 6 model.feature_map_dropout 0.28202398370705206 +92 6 model.embedding_dim 1.0 +92 6 loss.margin 17.652266049076243 +92 6 loss.adversarial_temperature 0.28344247785726767 +92 6 optimizer.lr 0.002156077005372345 +92 6 negative_sampler.num_negs_per_pos 16.0 +92 6 training.batch_size 2.0 +92 7 model.output_channels 60.0 +92 7 model.input_dropout 0.4857646910365295 +92 7 model.output_dropout 0.44095696513065913 +92 7 model.feature_map_dropout 0.2484728542802807 +92 7 model.embedding_dim 1.0 +92 7 loss.margin 17.211036474359467 +92 7 loss.adversarial_temperature 0.1350094334741651 +92 7 optimizer.lr 0.0025422923458328856 +92 7 negative_sampler.num_negs_per_pos 60.0 +92 7 training.batch_size 1.0 +92 8 model.output_channels 21.0 +92 8 model.input_dropout 0.4820211575390626 +92 8 model.output_dropout 0.27638222948196334 +92 8 model.feature_map_dropout 0.08789334914444513 +92 8 model.embedding_dim 2.0 +92 8 loss.margin 8.183809622524503 +92 8 loss.adversarial_temperature 0.6841450520412321 +92 8 optimizer.lr 0.0056438739006368695 +92 8 negative_sampler.num_negs_per_pos 21.0 +92 8 training.batch_size 1.0 +92 9 model.output_channels 58.0 +92 9 model.input_dropout 0.04691590266940476 +92 9 model.output_dropout 0.10566427608140866 +92 9 model.feature_map_dropout 0.1536798134132485 +92 9 model.embedding_dim 2.0 +92 9 loss.margin 20.801026114505685 +92 9 loss.adversarial_temperature 0.5359020814661947 +92 9 optimizer.lr 0.01600217945316706 +92 9 negative_sampler.num_negs_per_pos 17.0 +92 9 training.batch_size 0.0 +92 10 model.output_channels 41.0 +92 10 model.input_dropout 0.06895313021183175 +92 10 model.output_dropout 0.2674381081321401 +92 10 model.feature_map_dropout 0.017216699503036392 +92 10 model.embedding_dim 2.0 +92 10 loss.margin 6.098769331794349 +92 10 loss.adversarial_temperature 0.9564803959466115 +92 10 optimizer.lr 0.06829906703373229 +92 10 negative_sampler.num_negs_per_pos 54.0 +92 10 training.batch_size 1.0 +92 11 model.output_channels 61.0 +92 11 model.input_dropout 0.24123499672633847 +92 11 model.output_dropout 0.29203612836006126 +92 11 model.feature_map_dropout 0.4070033394240806 +92 11 model.embedding_dim 1.0 +92 11 loss.margin 3.431043562013462 +92 11 loss.adversarial_temperature 0.9921222280101204 +92 11 optimizer.lr 0.07576744473025306 +92 11 negative_sampler.num_negs_per_pos 6.0 +92 11 training.batch_size 2.0 +92 12 model.output_channels 35.0 +92 12 model.input_dropout 0.4506674614128408 +92 12 model.output_dropout 0.02648169384419935 +92 12 model.feature_map_dropout 0.02910000532001783 +92 12 model.embedding_dim 2.0 +92 12 loss.margin 6.1848559501332305 +92 12 loss.adversarial_temperature 0.4492613279892285 +92 12 optimizer.lr 0.012731583436629335 +92 12 negative_sampler.num_negs_per_pos 0.0 +92 12 training.batch_size 1.0 +92 13 model.output_channels 62.0 +92 13 model.input_dropout 0.464534299441787 +92 13 model.output_dropout 0.07067786877335447 +92 13 model.feature_map_dropout 0.4549517514378171 +92 13 model.embedding_dim 1.0 +92 13 loss.margin 5.674847362776971 +92 13 loss.adversarial_temperature 0.6130055958258682 +92 13 optimizer.lr 0.002942463767180437 +92 13 negative_sampler.num_negs_per_pos 66.0 +92 13 training.batch_size 0.0 +92 14 model.output_channels 54.0 +92 14 model.input_dropout 0.2876073910620034 +92 14 model.output_dropout 0.12420361879235231 +92 14 model.feature_map_dropout 0.03207873008747736 +92 14 model.embedding_dim 1.0 +92 14 loss.margin 20.54126408894885 +92 14 loss.adversarial_temperature 0.14842748290596752 +92 14 optimizer.lr 0.007944178580913744 +92 14 negative_sampler.num_negs_per_pos 78.0 +92 14 training.batch_size 0.0 +92 15 model.output_channels 17.0 +92 15 model.input_dropout 0.3747200919417245 +92 15 model.output_dropout 0.2762037326259139 +92 15 model.feature_map_dropout 0.24731562963399067 +92 15 model.embedding_dim 2.0 +92 15 loss.margin 27.991006426345706 +92 15 loss.adversarial_temperature 0.6128553152335007 +92 15 optimizer.lr 0.001860126453820436 +92 15 negative_sampler.num_negs_per_pos 42.0 +92 15 training.batch_size 2.0 +92 16 model.output_channels 49.0 +92 16 model.input_dropout 0.4538306048146124 +92 16 model.output_dropout 0.3105636283360032 +92 16 model.feature_map_dropout 0.1139246220190292 +92 16 model.embedding_dim 0.0 +92 16 loss.margin 28.094227242851854 +92 16 loss.adversarial_temperature 0.6228588585902752 +92 16 optimizer.lr 0.0015304405122674217 +92 16 negative_sampler.num_negs_per_pos 74.0 +92 16 training.batch_size 1.0 +92 17 model.output_channels 35.0 +92 17 model.input_dropout 0.03641330090073308 +92 17 model.output_dropout 0.025145560478796236 +92 17 model.feature_map_dropout 0.38074946295092515 +92 17 model.embedding_dim 2.0 +92 17 loss.margin 23.641067044265522 +92 17 loss.adversarial_temperature 0.3258065180329535 +92 17 optimizer.lr 0.018255959822559457 +92 17 negative_sampler.num_negs_per_pos 8.0 +92 17 training.batch_size 0.0 +92 18 model.output_channels 18.0 +92 18 model.input_dropout 0.28253974199537807 +92 18 model.output_dropout 0.2559924539800053 +92 18 model.feature_map_dropout 0.23327937491348677 +92 18 model.embedding_dim 2.0 +92 18 loss.margin 4.8885225816797835 +92 18 loss.adversarial_temperature 0.2939762539537867 +92 18 optimizer.lr 0.03687930865552855 +92 18 negative_sampler.num_negs_per_pos 72.0 +92 18 training.batch_size 1.0 +92 1 dataset """wn18rr""" +92 1 model """conve""" +92 1 loss """nssa""" +92 1 regularizer """no""" +92 1 optimizer """adam""" +92 1 training_loop """owa""" +92 1 negative_sampler """basic""" +92 1 evaluator """rankbased""" +92 2 dataset """wn18rr""" +92 2 model """conve""" +92 2 loss """nssa""" +92 2 regularizer """no""" +92 2 optimizer """adam""" +92 2 training_loop """owa""" +92 2 negative_sampler """basic""" +92 2 evaluator """rankbased""" +92 3 dataset """wn18rr""" +92 3 model """conve""" +92 3 loss """nssa""" +92 3 regularizer """no""" +92 3 optimizer """adam""" +92 3 training_loop """owa""" +92 3 negative_sampler """basic""" +92 3 evaluator """rankbased""" +92 4 dataset """wn18rr""" +92 4 model """conve""" +92 4 loss """nssa""" +92 4 regularizer """no""" +92 4 optimizer """adam""" +92 4 training_loop """owa""" +92 4 negative_sampler """basic""" +92 4 evaluator """rankbased""" +92 5 dataset """wn18rr""" +92 5 model """conve""" +92 5 loss """nssa""" +92 5 regularizer """no""" +92 5 optimizer """adam""" +92 5 training_loop """owa""" +92 5 negative_sampler """basic""" +92 5 evaluator """rankbased""" +92 6 dataset """wn18rr""" +92 6 model """conve""" +92 6 loss """nssa""" +92 6 regularizer """no""" +92 6 optimizer """adam""" +92 6 training_loop """owa""" +92 6 negative_sampler """basic""" +92 6 evaluator """rankbased""" +92 7 dataset """wn18rr""" +92 7 model """conve""" +92 7 loss """nssa""" +92 7 regularizer """no""" +92 7 optimizer """adam""" +92 7 training_loop """owa""" +92 7 negative_sampler """basic""" +92 7 evaluator """rankbased""" +92 8 dataset """wn18rr""" +92 8 model """conve""" +92 8 loss """nssa""" +92 8 regularizer """no""" +92 8 optimizer """adam""" +92 8 training_loop """owa""" +92 8 negative_sampler """basic""" +92 8 evaluator """rankbased""" +92 9 dataset """wn18rr""" +92 9 model """conve""" +92 9 loss """nssa""" +92 9 regularizer """no""" +92 9 optimizer """adam""" +92 9 training_loop """owa""" +92 9 negative_sampler """basic""" +92 9 evaluator """rankbased""" +92 10 dataset """wn18rr""" +92 10 model """conve""" +92 10 loss """nssa""" +92 10 regularizer """no""" +92 10 optimizer """adam""" +92 10 training_loop """owa""" +92 10 negative_sampler """basic""" +92 10 evaluator """rankbased""" +92 11 dataset """wn18rr""" +92 11 model """conve""" +92 11 loss """nssa""" +92 11 regularizer """no""" +92 11 optimizer """adam""" +92 11 training_loop """owa""" +92 11 negative_sampler """basic""" +92 11 evaluator """rankbased""" +92 12 dataset """wn18rr""" +92 12 model """conve""" +92 12 loss """nssa""" +92 12 regularizer """no""" +92 12 optimizer """adam""" +92 12 training_loop """owa""" +92 12 negative_sampler """basic""" +92 12 evaluator """rankbased""" +92 13 dataset """wn18rr""" +92 13 model """conve""" +92 13 loss """nssa""" +92 13 regularizer """no""" +92 13 optimizer """adam""" +92 13 training_loop """owa""" +92 13 negative_sampler """basic""" +92 13 evaluator """rankbased""" +92 14 dataset """wn18rr""" +92 14 model """conve""" +92 14 loss """nssa""" +92 14 regularizer """no""" +92 14 optimizer """adam""" +92 14 training_loop """owa""" +92 14 negative_sampler """basic""" +92 14 evaluator """rankbased""" +92 15 dataset """wn18rr""" +92 15 model """conve""" +92 15 loss """nssa""" +92 15 regularizer """no""" +92 15 optimizer """adam""" +92 15 training_loop """owa""" +92 15 negative_sampler """basic""" +92 15 evaluator """rankbased""" +92 16 dataset """wn18rr""" +92 16 model """conve""" +92 16 loss """nssa""" +92 16 regularizer """no""" +92 16 optimizer """adam""" +92 16 training_loop """owa""" +92 16 negative_sampler """basic""" +92 16 evaluator """rankbased""" +92 17 dataset """wn18rr""" +92 17 model """conve""" +92 17 loss """nssa""" +92 17 regularizer """no""" +92 17 optimizer """adam""" +92 17 training_loop """owa""" +92 17 negative_sampler """basic""" +92 17 evaluator """rankbased""" +92 18 dataset """wn18rr""" +92 18 model """conve""" +92 18 loss """nssa""" +92 18 regularizer """no""" +92 18 optimizer """adam""" +92 18 training_loop """owa""" +92 18 negative_sampler """basic""" +92 18 evaluator """rankbased""" +93 1 model.output_channels 57.0 +93 1 model.input_dropout 0.2687376062082373 +93 1 model.output_dropout 0.13648288983555767 +93 1 model.feature_map_dropout 0.45032191478561046 +93 1 model.embedding_dim 0.0 +93 1 loss.margin 8.465963220589607 +93 1 loss.adversarial_temperature 0.2329801823633577 +93 1 optimizer.lr 0.0010348438248689233 +93 1 negative_sampler.num_negs_per_pos 21.0 +93 1 training.batch_size 2.0 +93 2 model.output_channels 51.0 +93 2 model.input_dropout 0.14946254902560185 +93 2 model.output_dropout 0.20242874223432245 +93 2 model.feature_map_dropout 0.29000394039117205 +93 2 model.embedding_dim 1.0 +93 2 loss.margin 17.073771616356716 +93 2 loss.adversarial_temperature 0.16180642442151547 +93 2 optimizer.lr 0.00967702209432577 +93 2 negative_sampler.num_negs_per_pos 76.0 +93 2 training.batch_size 1.0 +93 3 model.output_channels 36.0 +93 3 model.input_dropout 0.3193585883402403 +93 3 model.output_dropout 0.3790544943679649 +93 3 model.feature_map_dropout 0.1147880651365698 +93 3 model.embedding_dim 2.0 +93 3 loss.margin 6.967666738432565 +93 3 loss.adversarial_temperature 0.5594583425518855 +93 3 optimizer.lr 0.015321743573259732 +93 3 negative_sampler.num_negs_per_pos 78.0 +93 3 training.batch_size 2.0 +93 4 model.output_channels 17.0 +93 4 model.input_dropout 0.048498660418899775 +93 4 model.output_dropout 0.17157469379334733 +93 4 model.feature_map_dropout 0.13271711006267534 +93 4 model.embedding_dim 2.0 +93 4 loss.margin 16.09100989534341 +93 4 loss.adversarial_temperature 0.39861453831539373 +93 4 optimizer.lr 0.010960993998567417 +93 4 negative_sampler.num_negs_per_pos 88.0 +93 4 training.batch_size 0.0 +93 5 model.output_channels 61.0 +93 5 model.input_dropout 0.3011326947317549 +93 5 model.output_dropout 0.45653808613085306 +93 5 model.feature_map_dropout 0.012557884688824517 +93 5 model.embedding_dim 2.0 +93 5 loss.margin 7.2529003308676385 +93 5 loss.adversarial_temperature 0.970053462328024 +93 5 optimizer.lr 0.002500019569532971 +93 5 negative_sampler.num_negs_per_pos 97.0 +93 5 training.batch_size 2.0 +93 6 model.output_channels 57.0 +93 6 model.input_dropout 0.48922345920258686 +93 6 model.output_dropout 0.12322491645887212 +93 6 model.feature_map_dropout 0.24340909961321638 +93 6 model.embedding_dim 1.0 +93 6 loss.margin 5.464282137838166 +93 6 loss.adversarial_temperature 0.8734042419675961 +93 6 optimizer.lr 0.030877067713409753 +93 6 negative_sampler.num_negs_per_pos 96.0 +93 6 training.batch_size 2.0 +93 7 model.output_channels 18.0 +93 7 model.input_dropout 0.46715911920869246 +93 7 model.output_dropout 0.4162882081305308 +93 7 model.feature_map_dropout 0.29425383277953315 +93 7 model.embedding_dim 1.0 +93 7 loss.margin 23.129163869162287 +93 7 loss.adversarial_temperature 0.5426632638471411 +93 7 optimizer.lr 0.020508139687769506 +93 7 negative_sampler.num_negs_per_pos 70.0 +93 7 training.batch_size 2.0 +93 8 model.output_channels 28.0 +93 8 model.input_dropout 0.07562226657186344 +93 8 model.output_dropout 0.2277989492090255 +93 8 model.feature_map_dropout 0.3401626002653153 +93 8 model.embedding_dim 2.0 +93 8 loss.margin 16.926691316880298 +93 8 loss.adversarial_temperature 0.706701662216066 +93 8 optimizer.lr 0.004110290419749088 +93 8 negative_sampler.num_negs_per_pos 39.0 +93 8 training.batch_size 1.0 +93 9 model.output_channels 49.0 +93 9 model.input_dropout 0.06403416233301529 +93 9 model.output_dropout 0.2599217881631669 +93 9 model.feature_map_dropout 0.40338249299494006 +93 9 model.embedding_dim 2.0 +93 9 loss.margin 7.089791427714308 +93 9 loss.adversarial_temperature 0.7307309725296909 +93 9 optimizer.lr 0.0028332690958576636 +93 9 negative_sampler.num_negs_per_pos 74.0 +93 9 training.batch_size 1.0 +93 10 model.output_channels 55.0 +93 10 model.input_dropout 0.35668489646819973 +93 10 model.output_dropout 0.4683847739087055 +93 10 model.feature_map_dropout 0.29406090080046343 +93 10 model.embedding_dim 2.0 +93 10 loss.margin 13.365280125163375 +93 10 loss.adversarial_temperature 0.7001318501674346 +93 10 optimizer.lr 0.002136241683045153 +93 10 negative_sampler.num_negs_per_pos 54.0 +93 10 training.batch_size 0.0 +93 11 model.output_channels 32.0 +93 11 model.input_dropout 0.3694478212847131 +93 11 model.output_dropout 0.1322752777335704 +93 11 model.feature_map_dropout 0.04888145510676917 +93 11 model.embedding_dim 1.0 +93 11 loss.margin 13.423056471962646 +93 11 loss.adversarial_temperature 0.6513529737500894 +93 11 optimizer.lr 0.011916518747677476 +93 11 negative_sampler.num_negs_per_pos 97.0 +93 11 training.batch_size 0.0 +93 12 model.output_channels 36.0 +93 12 model.input_dropout 0.28666342535124667 +93 12 model.output_dropout 0.14887003291965878 +93 12 model.feature_map_dropout 0.3548251848076326 +93 12 model.embedding_dim 0.0 +93 12 loss.margin 26.840012075269108 +93 12 loss.adversarial_temperature 0.4694019570051255 +93 12 optimizer.lr 0.004275040161895094 +93 12 negative_sampler.num_negs_per_pos 81.0 +93 12 training.batch_size 2.0 +93 13 model.output_channels 53.0 +93 13 model.input_dropout 0.24916033448985037 +93 13 model.output_dropout 0.3415834067131241 +93 13 model.feature_map_dropout 0.30403211956389886 +93 13 model.embedding_dim 2.0 +93 13 loss.margin 28.569792680869124 +93 13 loss.adversarial_temperature 0.6420355826439509 +93 13 optimizer.lr 0.0035750270773009943 +93 13 negative_sampler.num_negs_per_pos 47.0 +93 13 training.batch_size 0.0 +93 1 dataset """wn18rr""" +93 1 model """conve""" +93 1 loss """nssa""" +93 1 regularizer """no""" +93 1 optimizer """adam""" +93 1 training_loop """owa""" +93 1 negative_sampler """basic""" +93 1 evaluator """rankbased""" +93 2 dataset """wn18rr""" +93 2 model """conve""" +93 2 loss """nssa""" +93 2 regularizer """no""" +93 2 optimizer """adam""" +93 2 training_loop """owa""" +93 2 negative_sampler """basic""" +93 2 evaluator """rankbased""" +93 3 dataset """wn18rr""" +93 3 model """conve""" +93 3 loss """nssa""" +93 3 regularizer """no""" +93 3 optimizer """adam""" +93 3 training_loop """owa""" +93 3 negative_sampler """basic""" +93 3 evaluator """rankbased""" +93 4 dataset """wn18rr""" +93 4 model """conve""" +93 4 loss """nssa""" +93 4 regularizer """no""" +93 4 optimizer """adam""" +93 4 training_loop """owa""" +93 4 negative_sampler """basic""" +93 4 evaluator """rankbased""" +93 5 dataset """wn18rr""" +93 5 model """conve""" +93 5 loss """nssa""" +93 5 regularizer """no""" +93 5 optimizer """adam""" +93 5 training_loop """owa""" +93 5 negative_sampler """basic""" +93 5 evaluator """rankbased""" +93 6 dataset """wn18rr""" +93 6 model """conve""" +93 6 loss """nssa""" +93 6 regularizer """no""" +93 6 optimizer """adam""" +93 6 training_loop """owa""" +93 6 negative_sampler """basic""" +93 6 evaluator """rankbased""" +93 7 dataset """wn18rr""" +93 7 model """conve""" +93 7 loss """nssa""" +93 7 regularizer """no""" +93 7 optimizer """adam""" +93 7 training_loop """owa""" +93 7 negative_sampler """basic""" +93 7 evaluator """rankbased""" +93 8 dataset """wn18rr""" +93 8 model """conve""" +93 8 loss """nssa""" +93 8 regularizer """no""" +93 8 optimizer """adam""" +93 8 training_loop """owa""" +93 8 negative_sampler """basic""" +93 8 evaluator """rankbased""" +93 9 dataset """wn18rr""" +93 9 model """conve""" +93 9 loss """nssa""" +93 9 regularizer """no""" +93 9 optimizer """adam""" +93 9 training_loop """owa""" +93 9 negative_sampler """basic""" +93 9 evaluator """rankbased""" +93 10 dataset """wn18rr""" +93 10 model """conve""" +93 10 loss """nssa""" +93 10 regularizer """no""" +93 10 optimizer """adam""" +93 10 training_loop """owa""" +93 10 negative_sampler """basic""" +93 10 evaluator """rankbased""" +93 11 dataset """wn18rr""" +93 11 model """conve""" +93 11 loss """nssa""" +93 11 regularizer """no""" +93 11 optimizer """adam""" +93 11 training_loop """owa""" +93 11 negative_sampler """basic""" +93 11 evaluator """rankbased""" +93 12 dataset """wn18rr""" +93 12 model """conve""" +93 12 loss """nssa""" +93 12 regularizer """no""" +93 12 optimizer """adam""" +93 12 training_loop """owa""" +93 12 negative_sampler """basic""" +93 12 evaluator """rankbased""" +93 13 dataset """wn18rr""" +93 13 model """conve""" +93 13 loss """nssa""" +93 13 regularizer """no""" +93 13 optimizer """adam""" +93 13 training_loop """owa""" +93 13 negative_sampler """basic""" +93 13 evaluator """rankbased""" +94 1 model.output_channels 38.0 +94 1 model.input_dropout 0.17244091174308174 +94 1 model.output_dropout 0.3571554387997359 +94 1 model.feature_map_dropout 0.25080139575212623 +94 1 model.embedding_dim 1.0 +94 1 optimizer.lr 0.0078040231347127195 +94 1 training.batch_size 1.0 +94 1 training.label_smoothing 0.010264404400241097 +94 2 model.output_channels 41.0 +94 2 model.input_dropout 0.4167188813942929 +94 2 model.output_dropout 0.48563428565116373 +94 2 model.feature_map_dropout 0.3793974314019544 +94 2 model.embedding_dim 1.0 +94 2 optimizer.lr 0.0014588671004445452 +94 2 training.batch_size 2.0 +94 2 training.label_smoothing 0.08986575884683062 +94 3 model.output_channels 31.0 +94 3 model.input_dropout 0.19402274357365534 +94 3 model.output_dropout 0.3281095613175454 +94 3 model.feature_map_dropout 0.45773983774611504 +94 3 model.embedding_dim 1.0 +94 3 optimizer.lr 0.011699278911271413 +94 3 training.batch_size 1.0 +94 3 training.label_smoothing 0.009648032342531237 +94 4 model.output_channels 33.0 +94 4 model.input_dropout 0.3059949417362009 +94 4 model.output_dropout 0.444490534437821 +94 4 model.feature_map_dropout 0.14361294882902087 +94 4 model.embedding_dim 1.0 +94 4 optimizer.lr 0.025697293294804423 +94 4 training.batch_size 1.0 +94 4 training.label_smoothing 0.4668839769895158 +94 5 model.output_channels 50.0 +94 5 model.input_dropout 0.04101582469806436 +94 5 model.output_dropout 0.00814712529092182 +94 5 model.feature_map_dropout 0.4676886724150779 +94 5 model.embedding_dim 2.0 +94 5 optimizer.lr 0.016977379006558237 +94 5 training.batch_size 0.0 +94 5 training.label_smoothing 0.004014826737765339 +94 6 model.output_channels 19.0 +94 6 model.input_dropout 0.3130292978787166 +94 6 model.output_dropout 0.3261247215392965 +94 6 model.feature_map_dropout 0.4349398802399229 +94 6 model.embedding_dim 1.0 +94 6 optimizer.lr 0.01776881579483067 +94 6 training.batch_size 0.0 +94 6 training.label_smoothing 0.06090861546713818 +94 7 model.output_channels 23.0 +94 7 model.input_dropout 0.16905673057849574 +94 7 model.output_dropout 0.43843218490646757 +94 7 model.feature_map_dropout 0.2807854511389014 +94 7 model.embedding_dim 1.0 +94 7 optimizer.lr 0.002528704215021349 +94 7 training.batch_size 2.0 +94 7 training.label_smoothing 0.03609643132967214 +94 8 model.output_channels 30.0 +94 8 model.input_dropout 0.29399473484279687 +94 8 model.output_dropout 0.13976889243577623 +94 8 model.feature_map_dropout 0.48743376953987255 +94 8 model.embedding_dim 2.0 +94 8 optimizer.lr 0.004257894864962261 +94 8 training.batch_size 1.0 +94 8 training.label_smoothing 0.1533155129164967 +94 9 model.output_channels 28.0 +94 9 model.input_dropout 0.11540003807438537 +94 9 model.output_dropout 0.06279107117415816 +94 9 model.feature_map_dropout 0.375289105649648 +94 9 model.embedding_dim 2.0 +94 9 optimizer.lr 0.005905132183396187 +94 9 training.batch_size 0.0 +94 9 training.label_smoothing 0.006655694794879317 +94 10 model.output_channels 47.0 +94 10 model.input_dropout 0.3850527696049521 +94 10 model.output_dropout 0.4464301466772007 +94 10 model.feature_map_dropout 0.1973495624228438 +94 10 model.embedding_dim 0.0 +94 10 optimizer.lr 0.009092048033621387 +94 10 training.batch_size 0.0 +94 10 training.label_smoothing 0.008707365766051536 +94 11 model.output_channels 29.0 +94 11 model.input_dropout 0.04544316671163273 +94 11 model.output_dropout 0.13848612811740413 +94 11 model.feature_map_dropout 0.4190192301516454 +94 11 model.embedding_dim 1.0 +94 11 optimizer.lr 0.0011053288755667153 +94 11 training.batch_size 2.0 +94 11 training.label_smoothing 0.03930108674664984 +94 12 model.output_channels 43.0 +94 12 model.input_dropout 0.08132852783998995 +94 12 model.output_dropout 0.16584981530035675 +94 12 model.feature_map_dropout 0.021046988207997164 +94 12 model.embedding_dim 0.0 +94 12 optimizer.lr 0.028133226632497555 +94 12 training.batch_size 0.0 +94 12 training.label_smoothing 0.0014977468009514303 +94 13 model.output_channels 30.0 +94 13 model.input_dropout 0.37460382927822666 +94 13 model.output_dropout 0.4685980854644519 +94 13 model.feature_map_dropout 0.0683711042491193 +94 13 model.embedding_dim 0.0 +94 13 optimizer.lr 0.0030829937990670213 +94 13 training.batch_size 2.0 +94 13 training.label_smoothing 0.01627868649870513 +94 14 model.output_channels 30.0 +94 14 model.input_dropout 0.41069966773043426 +94 14 model.output_dropout 0.33256774728259747 +94 14 model.feature_map_dropout 0.39044130173477104 +94 14 model.embedding_dim 2.0 +94 14 optimizer.lr 0.027291865354216384 +94 14 training.batch_size 2.0 +94 14 training.label_smoothing 0.019945656790123716 +94 15 model.output_channels 19.0 +94 15 model.input_dropout 0.18267127670324373 +94 15 model.output_dropout 0.4409160724310929 +94 15 model.feature_map_dropout 0.3988456536852118 +94 15 model.embedding_dim 1.0 +94 15 optimizer.lr 0.001266538017013644 +94 15 training.batch_size 2.0 +94 15 training.label_smoothing 0.008685522068296205 +94 16 model.output_channels 56.0 +94 16 model.input_dropout 0.007159010882967443 +94 16 model.output_dropout 0.0882190944577837 +94 16 model.feature_map_dropout 0.32142747918432446 +94 16 model.embedding_dim 2.0 +94 16 optimizer.lr 0.001020652291488897 +94 16 training.batch_size 0.0 +94 16 training.label_smoothing 0.0031445888264331895 +94 17 model.output_channels 40.0 +94 17 model.input_dropout 0.3978176197061078 +94 17 model.output_dropout 0.4188942529910187 +94 17 model.feature_map_dropout 0.3733292553003216 +94 17 model.embedding_dim 1.0 +94 17 optimizer.lr 0.009301744944574936 +94 17 training.batch_size 0.0 +94 17 training.label_smoothing 0.4012362967412284 +94 1 dataset """wn18RR""" +94 1 model """conve""" +94 1 loss """bceaftersigmoid""" +94 1 regularizer """no""" +94 1 optimizer """adam""" +94 1 training_loop """lcwa""" +94 1 evaluator """rankbased""" +94 2 dataset """wn18RR""" +94 2 model """conve""" +94 2 loss """bceaftersigmoid""" +94 2 regularizer """no""" +94 2 optimizer """adam""" +94 2 training_loop """lcwa""" +94 2 evaluator """rankbased""" +94 3 dataset """wn18RR""" +94 3 model """conve""" +94 3 loss """bceaftersigmoid""" +94 3 regularizer """no""" +94 3 optimizer """adam""" +94 3 training_loop """lcwa""" +94 3 evaluator """rankbased""" +94 4 dataset """wn18RR""" +94 4 model """conve""" +94 4 loss """bceaftersigmoid""" +94 4 regularizer """no""" +94 4 optimizer """adam""" +94 4 training_loop """lcwa""" +94 4 evaluator """rankbased""" +94 5 dataset """wn18RR""" +94 5 model """conve""" +94 5 loss """bceaftersigmoid""" +94 5 regularizer """no""" +94 5 optimizer """adam""" +94 5 training_loop """lcwa""" +94 5 evaluator """rankbased""" +94 6 dataset """wn18RR""" +94 6 model """conve""" +94 6 loss """bceaftersigmoid""" +94 6 regularizer """no""" +94 6 optimizer """adam""" +94 6 training_loop """lcwa""" +94 6 evaluator """rankbased""" +94 7 dataset """wn18RR""" +94 7 model """conve""" +94 7 loss """bceaftersigmoid""" +94 7 regularizer """no""" +94 7 optimizer """adam""" +94 7 training_loop """lcwa""" +94 7 evaluator """rankbased""" +94 8 dataset """wn18RR""" +94 8 model """conve""" +94 8 loss """bceaftersigmoid""" +94 8 regularizer """no""" +94 8 optimizer """adam""" +94 8 training_loop """lcwa""" +94 8 evaluator """rankbased""" +94 9 dataset """wn18RR""" +94 9 model """conve""" +94 9 loss """bceaftersigmoid""" +94 9 regularizer """no""" +94 9 optimizer """adam""" +94 9 training_loop """lcwa""" +94 9 evaluator """rankbased""" +94 10 dataset """wn18RR""" +94 10 model """conve""" +94 10 loss """bceaftersigmoid""" +94 10 regularizer """no""" +94 10 optimizer """adam""" +94 10 training_loop """lcwa""" +94 10 evaluator """rankbased""" +94 11 dataset """wn18RR""" +94 11 model """conve""" +94 11 loss """bceaftersigmoid""" +94 11 regularizer """no""" +94 11 optimizer """adam""" +94 11 training_loop """lcwa""" +94 11 evaluator """rankbased""" +94 12 dataset """wn18RR""" +94 12 model """conve""" +94 12 loss """bceaftersigmoid""" +94 12 regularizer """no""" +94 12 optimizer """adam""" +94 12 training_loop """lcwa""" +94 12 evaluator """rankbased""" +94 13 dataset """wn18RR""" +94 13 model """conve""" +94 13 loss """bceaftersigmoid""" +94 13 regularizer """no""" +94 13 optimizer """adam""" +94 13 training_loop """lcwa""" +94 13 evaluator """rankbased""" +94 14 dataset """wn18RR""" +94 14 model """conve""" +94 14 loss """bceaftersigmoid""" +94 14 regularizer """no""" +94 14 optimizer """adam""" +94 14 training_loop """lcwa""" +94 14 evaluator """rankbased""" +94 15 dataset """wn18RR""" +94 15 model """conve""" +94 15 loss """bceaftersigmoid""" +94 15 regularizer """no""" +94 15 optimizer """adam""" +94 15 training_loop """lcwa""" +94 15 evaluator """rankbased""" +94 16 dataset """wn18RR""" +94 16 model """conve""" +94 16 loss """bceaftersigmoid""" +94 16 regularizer """no""" +94 16 optimizer """adam""" +94 16 training_loop """lcwa""" +94 16 evaluator """rankbased""" +94 17 dataset """wn18RR""" +94 17 model """conve""" +94 17 loss """bceaftersigmoid""" +94 17 regularizer """no""" +94 17 optimizer """adam""" +94 17 training_loop """lcwa""" +94 17 evaluator """rankbased""" +95 1 model.output_channels 20.0 +95 1 model.input_dropout 0.18488377609308154 +95 1 model.output_dropout 0.21354343099188572 +95 1 model.feature_map_dropout 0.25667250776461825 +95 1 model.embedding_dim 1.0 +95 1 optimizer.lr 0.004999288936962538 +95 1 training.batch_size 2.0 +95 1 training.label_smoothing 0.1302788618508229 +95 2 model.output_channels 48.0 +95 2 model.input_dropout 0.05605101155225034 +95 2 model.output_dropout 0.20198189475932993 +95 2 model.feature_map_dropout 0.39942766758801457 +95 2 model.embedding_dim 2.0 +95 2 optimizer.lr 0.0010825440201128497 +95 2 training.batch_size 1.0 +95 2 training.label_smoothing 0.49994533179325235 +95 3 model.output_channels 24.0 +95 3 model.input_dropout 0.2943007447460347 +95 3 model.output_dropout 0.360170450868421 +95 3 model.feature_map_dropout 0.009912562200876274 +95 3 model.embedding_dim 1.0 +95 3 optimizer.lr 0.006814116503416831 +95 3 training.batch_size 2.0 +95 3 training.label_smoothing 0.011501649431368534 +95 4 model.output_channels 44.0 +95 4 model.input_dropout 0.447860466187622 +95 4 model.output_dropout 0.44531569993223025 +95 4 model.feature_map_dropout 0.39319810258939064 +95 4 model.embedding_dim 0.0 +95 4 optimizer.lr 0.02879943996650034 +95 4 training.batch_size 0.0 +95 4 training.label_smoothing 0.03351565871993869 +95 5 model.output_channels 24.0 +95 5 model.input_dropout 0.13112161939291844 +95 5 model.output_dropout 0.28303668461933185 +95 5 model.feature_map_dropout 0.3110912230692171 +95 5 model.embedding_dim 2.0 +95 5 optimizer.lr 0.007062425145914537 +95 5 training.batch_size 0.0 +95 5 training.label_smoothing 0.0021068441392815946 +95 6 model.output_channels 24.0 +95 6 model.input_dropout 0.11342429660310827 +95 6 model.output_dropout 0.2843641065123316 +95 6 model.feature_map_dropout 0.10865393200628365 +95 6 model.embedding_dim 2.0 +95 6 optimizer.lr 0.0064455379379594275 +95 6 training.batch_size 2.0 +95 6 training.label_smoothing 0.030660393789205202 +95 7 model.output_channels 50.0 +95 7 model.input_dropout 0.25075449820518575 +95 7 model.output_dropout 0.33491822025796536 +95 7 model.feature_map_dropout 0.34852468204195064 +95 7 model.embedding_dim 0.0 +95 7 optimizer.lr 0.0016598942956648685 +95 7 training.batch_size 2.0 +95 7 training.label_smoothing 0.010292682700875978 +95 8 model.output_channels 41.0 +95 8 model.input_dropout 0.1157875267685523 +95 8 model.output_dropout 0.19325403336549174 +95 8 model.feature_map_dropout 0.2694946921802096 +95 8 model.embedding_dim 2.0 +95 8 optimizer.lr 0.015352916291248632 +95 8 training.batch_size 0.0 +95 8 training.label_smoothing 0.4167548842221143 +95 9 model.output_channels 64.0 +95 9 model.input_dropout 0.15388014652644588 +95 9 model.output_dropout 0.23306345920783372 +95 9 model.feature_map_dropout 0.47125250427957793 +95 9 model.embedding_dim 1.0 +95 9 optimizer.lr 0.007565710701819598 +95 9 training.batch_size 0.0 +95 9 training.label_smoothing 0.013633389110015404 +95 10 model.output_channels 16.0 +95 10 model.input_dropout 0.3065104577144833 +95 10 model.output_dropout 0.47240181006649173 +95 10 model.feature_map_dropout 0.26707647210401825 +95 10 model.embedding_dim 1.0 +95 10 optimizer.lr 0.003022155677099354 +95 10 training.batch_size 0.0 +95 10 training.label_smoothing 0.10399263235423231 +95 11 model.output_channels 29.0 +95 11 model.input_dropout 0.28444524311458047 +95 11 model.output_dropout 0.40892345037802214 +95 11 model.feature_map_dropout 0.22686827132165682 +95 11 model.embedding_dim 1.0 +95 11 optimizer.lr 0.007997132563580383 +95 11 training.batch_size 0.0 +95 11 training.label_smoothing 0.01801196963349191 +95 12 model.output_channels 46.0 +95 12 model.input_dropout 0.3392003050725052 +95 12 model.output_dropout 0.18416509676461035 +95 12 model.feature_map_dropout 0.26664697185922065 +95 12 model.embedding_dim 1.0 +95 12 optimizer.lr 0.0014330645869452987 +95 12 training.batch_size 0.0 +95 12 training.label_smoothing 0.25113322331904697 +95 13 model.output_channels 53.0 +95 13 model.input_dropout 0.14654295513809845 +95 13 model.output_dropout 0.14481099399472092 +95 13 model.feature_map_dropout 0.14822920146256963 +95 13 model.embedding_dim 2.0 +95 13 optimizer.lr 0.010046709791469526 +95 13 training.batch_size 1.0 +95 13 training.label_smoothing 0.007925257913747636 +95 14 model.output_channels 55.0 +95 14 model.input_dropout 0.348796757033841 +95 14 model.output_dropout 0.1280431091243358 +95 14 model.feature_map_dropout 0.4390504099223882 +95 14 model.embedding_dim 1.0 +95 14 optimizer.lr 0.0628506188563525 +95 14 training.batch_size 1.0 +95 14 training.label_smoothing 0.19819152816625402 +95 15 model.output_channels 61.0 +95 15 model.input_dropout 0.09283672591144876 +95 15 model.output_dropout 0.4978169260953651 +95 15 model.feature_map_dropout 0.4536068510201219 +95 15 model.embedding_dim 1.0 +95 15 optimizer.lr 0.07036944984926859 +95 15 training.batch_size 2.0 +95 15 training.label_smoothing 0.5065965877747238 +95 1 dataset """wn18RR""" +95 1 model """conve""" +95 1 loss """bceaftersigmoid""" +95 1 regularizer """no""" +95 1 optimizer """adam""" +95 1 training_loop """lcwa""" +95 1 evaluator """rankbased""" +95 2 dataset """wn18RR""" +95 2 model """conve""" +95 2 loss """bceaftersigmoid""" +95 2 regularizer """no""" +95 2 optimizer """adam""" +95 2 training_loop """lcwa""" +95 2 evaluator """rankbased""" +95 3 dataset """wn18RR""" +95 3 model """conve""" +95 3 loss """bceaftersigmoid""" +95 3 regularizer """no""" +95 3 optimizer """adam""" +95 3 training_loop """lcwa""" +95 3 evaluator """rankbased""" +95 4 dataset """wn18RR""" +95 4 model """conve""" +95 4 loss """bceaftersigmoid""" +95 4 regularizer """no""" +95 4 optimizer """adam""" +95 4 training_loop """lcwa""" +95 4 evaluator """rankbased""" +95 5 dataset """wn18RR""" +95 5 model """conve""" +95 5 loss """bceaftersigmoid""" +95 5 regularizer """no""" +95 5 optimizer """adam""" +95 5 training_loop """lcwa""" +95 5 evaluator """rankbased""" +95 6 dataset """wn18RR""" +95 6 model """conve""" +95 6 loss """bceaftersigmoid""" +95 6 regularizer """no""" +95 6 optimizer """adam""" +95 6 training_loop """lcwa""" +95 6 evaluator """rankbased""" +95 7 dataset """wn18RR""" +95 7 model """conve""" +95 7 loss """bceaftersigmoid""" +95 7 regularizer """no""" +95 7 optimizer """adam""" +95 7 training_loop """lcwa""" +95 7 evaluator """rankbased""" +95 8 dataset """wn18RR""" +95 8 model """conve""" +95 8 loss """bceaftersigmoid""" +95 8 regularizer """no""" +95 8 optimizer """adam""" +95 8 training_loop """lcwa""" +95 8 evaluator """rankbased""" +95 9 dataset """wn18RR""" +95 9 model """conve""" +95 9 loss """bceaftersigmoid""" +95 9 regularizer """no""" +95 9 optimizer """adam""" +95 9 training_loop """lcwa""" +95 9 evaluator """rankbased""" +95 10 dataset """wn18RR""" +95 10 model """conve""" +95 10 loss """bceaftersigmoid""" +95 10 regularizer """no""" +95 10 optimizer """adam""" +95 10 training_loop """lcwa""" +95 10 evaluator """rankbased""" +95 11 dataset """wn18RR""" +95 11 model """conve""" +95 11 loss """bceaftersigmoid""" +95 11 regularizer """no""" +95 11 optimizer """adam""" +95 11 training_loop """lcwa""" +95 11 evaluator """rankbased""" +95 12 dataset """wn18RR""" +95 12 model """conve""" +95 12 loss """bceaftersigmoid""" +95 12 regularizer """no""" +95 12 optimizer """adam""" +95 12 training_loop """lcwa""" +95 12 evaluator """rankbased""" +95 13 dataset """wn18RR""" +95 13 model """conve""" +95 13 loss """bceaftersigmoid""" +95 13 regularizer """no""" +95 13 optimizer """adam""" +95 13 training_loop """lcwa""" +95 13 evaluator """rankbased""" +95 14 dataset """wn18RR""" +95 14 model """conve""" +95 14 loss """bceaftersigmoid""" +95 14 regularizer """no""" +95 14 optimizer """adam""" +95 14 training_loop """lcwa""" +95 14 evaluator """rankbased""" +95 15 dataset """wn18RR""" +95 15 model """conve""" +95 15 loss """bceaftersigmoid""" +95 15 regularizer """no""" +95 15 optimizer """adam""" +95 15 training_loop """lcwa""" +95 15 evaluator """rankbased""" +96 1 model.output_channels 33.0 +96 1 model.input_dropout 0.32076232378365616 +96 1 model.output_dropout 0.4263208764331753 +96 1 model.feature_map_dropout 0.14897037270466207 +96 1 model.embedding_dim 2.0 +96 1 optimizer.lr 0.03392297031405268 +96 1 negative_sampler.num_negs_per_pos 23.0 +96 1 training.batch_size 2.0 +96 2 model.output_channels 34.0 +96 2 model.input_dropout 0.22845816212132736 +96 2 model.output_dropout 0.3677014993053566 +96 2 model.feature_map_dropout 0.33276560875350203 +96 2 model.embedding_dim 1.0 +96 2 optimizer.lr 0.00688250153560519 +96 2 negative_sampler.num_negs_per_pos 18.0 +96 2 training.batch_size 2.0 +96 3 model.output_channels 18.0 +96 3 model.input_dropout 0.11562778303770804 +96 3 model.output_dropout 0.3428642201298995 +96 3 model.feature_map_dropout 0.06454224714471152 +96 3 model.embedding_dim 2.0 +96 3 optimizer.lr 0.0010514974741021523 +96 3 negative_sampler.num_negs_per_pos 7.0 +96 3 training.batch_size 0.0 +96 4 model.output_channels 33.0 +96 4 model.input_dropout 0.40547620904117576 +96 4 model.output_dropout 0.14432830474263175 +96 4 model.feature_map_dropout 0.4355375059323167 +96 4 model.embedding_dim 2.0 +96 4 optimizer.lr 0.0024909539050104327 +96 4 negative_sampler.num_negs_per_pos 43.0 +96 4 training.batch_size 0.0 +96 5 model.output_channels 16.0 +96 5 model.input_dropout 0.07838893448629697 +96 5 model.output_dropout 0.059992914104078476 +96 5 model.feature_map_dropout 0.48054746839490087 +96 5 model.embedding_dim 0.0 +96 5 optimizer.lr 0.011449147394905863 +96 5 negative_sampler.num_negs_per_pos 7.0 +96 5 training.batch_size 0.0 +96 6 model.output_channels 29.0 +96 6 model.input_dropout 0.4502580071504818 +96 6 model.output_dropout 0.20012015816473527 +96 6 model.feature_map_dropout 0.056094869492653576 +96 6 model.embedding_dim 0.0 +96 6 optimizer.lr 0.04066552846784279 +96 6 negative_sampler.num_negs_per_pos 13.0 +96 6 training.batch_size 0.0 +96 7 model.output_channels 22.0 +96 7 model.input_dropout 0.048936258671337995 +96 7 model.output_dropout 0.1668356854090136 +96 7 model.feature_map_dropout 0.10050486524838748 +96 7 model.embedding_dim 0.0 +96 7 optimizer.lr 0.03496032587581917 +96 7 negative_sampler.num_negs_per_pos 52.0 +96 7 training.batch_size 0.0 +96 8 model.output_channels 62.0 +96 8 model.input_dropout 0.269154674228295 +96 8 model.output_dropout 0.44991108237711863 +96 8 model.feature_map_dropout 0.12309092940827909 +96 8 model.embedding_dim 1.0 +96 8 optimizer.lr 0.005004066453384125 +96 8 negative_sampler.num_negs_per_pos 59.0 +96 8 training.batch_size 2.0 +96 9 model.output_channels 45.0 +96 9 model.input_dropout 0.15891891235534317 +96 9 model.output_dropout 0.20101228965935292 +96 9 model.feature_map_dropout 0.08397932433406613 +96 9 model.embedding_dim 2.0 +96 9 optimizer.lr 0.02114302562102047 +96 9 negative_sampler.num_negs_per_pos 56.0 +96 9 training.batch_size 1.0 +96 10 model.output_channels 18.0 +96 10 model.input_dropout 0.24389466940401366 +96 10 model.output_dropout 0.22936653649693683 +96 10 model.feature_map_dropout 0.44066806384875545 +96 10 model.embedding_dim 2.0 +96 10 optimizer.lr 0.002622163170628495 +96 10 negative_sampler.num_negs_per_pos 84.0 +96 10 training.batch_size 0.0 +96 11 model.output_channels 33.0 +96 11 model.input_dropout 0.057467312113373525 +96 11 model.output_dropout 0.4475165468175918 +96 11 model.feature_map_dropout 0.49246103179890793 +96 11 model.embedding_dim 1.0 +96 11 optimizer.lr 0.033059846299367646 +96 11 negative_sampler.num_negs_per_pos 42.0 +96 11 training.batch_size 2.0 +96 12 model.output_channels 43.0 +96 12 model.input_dropout 0.09720637473861615 +96 12 model.output_dropout 0.1877629103840504 +96 12 model.feature_map_dropout 0.015411803407013924 +96 12 model.embedding_dim 0.0 +96 12 optimizer.lr 0.012479299888784632 +96 12 negative_sampler.num_negs_per_pos 0.0 +96 12 training.batch_size 0.0 +96 13 model.output_channels 51.0 +96 13 model.input_dropout 0.2085727619542505 +96 13 model.output_dropout 0.3571609684985909 +96 13 model.feature_map_dropout 0.1069424898714415 +96 13 model.embedding_dim 1.0 +96 13 optimizer.lr 0.0019408091665623498 +96 13 negative_sampler.num_negs_per_pos 93.0 +96 13 training.batch_size 1.0 +96 14 model.output_channels 19.0 +96 14 model.input_dropout 0.36709789634290274 +96 14 model.output_dropout 0.22219125360918102 +96 14 model.feature_map_dropout 0.24718911448463027 +96 14 model.embedding_dim 2.0 +96 14 optimizer.lr 0.0021552167174265055 +96 14 negative_sampler.num_negs_per_pos 76.0 +96 14 training.batch_size 1.0 +96 1 dataset """wn18rr""" +96 1 model """conve""" +96 1 loss """bceaftersigmoid""" +96 1 regularizer """no""" +96 1 optimizer """adam""" +96 1 training_loop """owa""" +96 1 negative_sampler """basic""" +96 1 evaluator """rankbased""" +96 2 dataset """wn18rr""" +96 2 model """conve""" +96 2 loss """bceaftersigmoid""" +96 2 regularizer """no""" +96 2 optimizer """adam""" +96 2 training_loop """owa""" +96 2 negative_sampler """basic""" +96 2 evaluator """rankbased""" +96 3 dataset """wn18rr""" +96 3 model """conve""" +96 3 loss """bceaftersigmoid""" +96 3 regularizer """no""" +96 3 optimizer """adam""" +96 3 training_loop """owa""" +96 3 negative_sampler """basic""" +96 3 evaluator """rankbased""" +96 4 dataset """wn18rr""" +96 4 model """conve""" +96 4 loss """bceaftersigmoid""" +96 4 regularizer """no""" +96 4 optimizer """adam""" +96 4 training_loop """owa""" +96 4 negative_sampler """basic""" +96 4 evaluator """rankbased""" +96 5 dataset """wn18rr""" +96 5 model """conve""" +96 5 loss """bceaftersigmoid""" +96 5 regularizer """no""" +96 5 optimizer """adam""" +96 5 training_loop """owa""" +96 5 negative_sampler """basic""" +96 5 evaluator """rankbased""" +96 6 dataset """wn18rr""" +96 6 model """conve""" +96 6 loss """bceaftersigmoid""" +96 6 regularizer """no""" +96 6 optimizer """adam""" +96 6 training_loop """owa""" +96 6 negative_sampler """basic""" +96 6 evaluator """rankbased""" +96 7 dataset """wn18rr""" +96 7 model """conve""" +96 7 loss """bceaftersigmoid""" +96 7 regularizer """no""" +96 7 optimizer """adam""" +96 7 training_loop """owa""" +96 7 negative_sampler """basic""" +96 7 evaluator """rankbased""" +96 8 dataset """wn18rr""" +96 8 model """conve""" +96 8 loss """bceaftersigmoid""" +96 8 regularizer """no""" +96 8 optimizer """adam""" +96 8 training_loop """owa""" +96 8 negative_sampler """basic""" +96 8 evaluator """rankbased""" +96 9 dataset """wn18rr""" +96 9 model """conve""" +96 9 loss """bceaftersigmoid""" +96 9 regularizer """no""" +96 9 optimizer """adam""" +96 9 training_loop """owa""" +96 9 negative_sampler """basic""" +96 9 evaluator """rankbased""" +96 10 dataset """wn18rr""" +96 10 model """conve""" +96 10 loss """bceaftersigmoid""" +96 10 regularizer """no""" +96 10 optimizer """adam""" +96 10 training_loop """owa""" +96 10 negative_sampler """basic""" +96 10 evaluator """rankbased""" +96 11 dataset """wn18rr""" +96 11 model """conve""" +96 11 loss """bceaftersigmoid""" +96 11 regularizer """no""" +96 11 optimizer """adam""" +96 11 training_loop """owa""" +96 11 negative_sampler """basic""" +96 11 evaluator """rankbased""" +96 12 dataset """wn18rr""" +96 12 model """conve""" +96 12 loss """bceaftersigmoid""" +96 12 regularizer """no""" +96 12 optimizer """adam""" +96 12 training_loop """owa""" +96 12 negative_sampler """basic""" +96 12 evaluator """rankbased""" +96 13 dataset """wn18rr""" +96 13 model """conve""" +96 13 loss """bceaftersigmoid""" +96 13 regularizer """no""" +96 13 optimizer """adam""" +96 13 training_loop """owa""" +96 13 negative_sampler """basic""" +96 13 evaluator """rankbased""" +96 14 dataset """wn18rr""" +96 14 model """conve""" +96 14 loss """bceaftersigmoid""" +96 14 regularizer """no""" +96 14 optimizer """adam""" +96 14 training_loop """owa""" +96 14 negative_sampler """basic""" +96 14 evaluator """rankbased""" +97 1 model.output_channels 51.0 +97 1 model.input_dropout 0.016067225039857025 +97 1 model.output_dropout 0.3423712943734783 +97 1 model.feature_map_dropout 0.1203677827892603 +97 1 model.embedding_dim 1.0 +97 1 optimizer.lr 0.0038044084958663306 +97 1 negative_sampler.num_negs_per_pos 76.0 +97 1 training.batch_size 2.0 +97 2 model.output_channels 37.0 +97 2 model.input_dropout 0.06450586043021012 +97 2 model.output_dropout 0.25260011292306295 +97 2 model.feature_map_dropout 0.27307157041048435 +97 2 model.embedding_dim 1.0 +97 2 optimizer.lr 0.0034367952212861225 +97 2 negative_sampler.num_negs_per_pos 82.0 +97 2 training.batch_size 0.0 +97 3 model.output_channels 32.0 +97 3 model.input_dropout 0.4147911380909954 +97 3 model.output_dropout 0.17806566825049425 +97 3 model.feature_map_dropout 0.26314816845318106 +97 3 model.embedding_dim 2.0 +97 3 optimizer.lr 0.026515670640245504 +97 3 negative_sampler.num_negs_per_pos 97.0 +97 3 training.batch_size 0.0 +97 4 model.output_channels 40.0 +97 4 model.input_dropout 0.04000000892294925 +97 4 model.output_dropout 0.194388774161467 +97 4 model.feature_map_dropout 0.27072038620148126 +97 4 model.embedding_dim 2.0 +97 4 optimizer.lr 0.029408017982547347 +97 4 negative_sampler.num_negs_per_pos 9.0 +97 4 training.batch_size 2.0 +97 5 model.output_channels 23.0 +97 5 model.input_dropout 0.38338310362889383 +97 5 model.output_dropout 0.11654326671710125 +97 5 model.feature_map_dropout 0.048150920649463025 +97 5 model.embedding_dim 2.0 +97 5 optimizer.lr 0.08294152509062787 +97 5 negative_sampler.num_negs_per_pos 68.0 +97 5 training.batch_size 0.0 +97 6 model.output_channels 36.0 +97 6 model.input_dropout 0.40464665750447526 +97 6 model.output_dropout 0.3529496457896798 +97 6 model.feature_map_dropout 0.21323471219363094 +97 6 model.embedding_dim 1.0 +97 6 optimizer.lr 0.021720908919189846 +97 6 negative_sampler.num_negs_per_pos 75.0 +97 6 training.batch_size 1.0 +97 7 model.output_channels 44.0 +97 7 model.input_dropout 0.04220587190077091 +97 7 model.output_dropout 0.4274329317565097 +97 7 model.feature_map_dropout 0.42952111000560705 +97 7 model.embedding_dim 0.0 +97 7 optimizer.lr 0.0014075559012057231 +97 7 negative_sampler.num_negs_per_pos 99.0 +97 7 training.batch_size 0.0 +97 8 model.output_channels 19.0 +97 8 model.input_dropout 0.3607998672961114 +97 8 model.output_dropout 0.3111991809870813 +97 8 model.feature_map_dropout 0.4776998414885018 +97 8 model.embedding_dim 0.0 +97 8 optimizer.lr 0.002484171165173687 +97 8 negative_sampler.num_negs_per_pos 96.0 +97 8 training.batch_size 1.0 +97 9 model.output_channels 27.0 +97 9 model.input_dropout 0.0765213703934059 +97 9 model.output_dropout 0.3220839457363048 +97 9 model.feature_map_dropout 0.3107572399019997 +97 9 model.embedding_dim 2.0 +97 9 optimizer.lr 0.0024962872132476698 +97 9 negative_sampler.num_negs_per_pos 20.0 +97 9 training.batch_size 0.0 +97 10 model.output_channels 52.0 +97 10 model.input_dropout 0.36201486507705866 +97 10 model.output_dropout 0.13125315205171673 +97 10 model.feature_map_dropout 0.33222028888981353 +97 10 model.embedding_dim 2.0 +97 10 optimizer.lr 0.086218383478463 +97 10 negative_sampler.num_negs_per_pos 74.0 +97 10 training.batch_size 0.0 +97 11 model.output_channels 28.0 +97 11 model.input_dropout 0.12678863278882352 +97 11 model.output_dropout 0.4751258900433762 +97 11 model.feature_map_dropout 0.41215962682183294 +97 11 model.embedding_dim 2.0 +97 11 optimizer.lr 0.0026358056915873275 +97 11 negative_sampler.num_negs_per_pos 43.0 +97 11 training.batch_size 1.0 +97 12 model.output_channels 48.0 +97 12 model.input_dropout 0.3255950028651814 +97 12 model.output_dropout 0.282946940153455 +97 12 model.feature_map_dropout 0.3003915094091448 +97 12 model.embedding_dim 1.0 +97 12 optimizer.lr 0.009971056890467579 +97 12 negative_sampler.num_negs_per_pos 16.0 +97 12 training.batch_size 1.0 +97 13 model.output_channels 44.0 +97 13 model.input_dropout 0.16269627685361776 +97 13 model.output_dropout 0.36112226031096684 +97 13 model.feature_map_dropout 0.48666137454669617 +97 13 model.embedding_dim 2.0 +97 13 optimizer.lr 0.0030837604321150634 +97 13 negative_sampler.num_negs_per_pos 43.0 +97 13 training.batch_size 2.0 +97 14 model.output_channels 24.0 +97 14 model.input_dropout 0.15370786134261505 +97 14 model.output_dropout 0.2752050807575139 +97 14 model.feature_map_dropout 0.12149231395783927 +97 14 model.embedding_dim 0.0 +97 14 optimizer.lr 0.0013666650472382857 +97 14 negative_sampler.num_negs_per_pos 45.0 +97 14 training.batch_size 2.0 +97 15 model.output_channels 46.0 +97 15 model.input_dropout 0.2309543246138248 +97 15 model.output_dropout 0.44753655300447404 +97 15 model.feature_map_dropout 0.4075731430099228 +97 15 model.embedding_dim 2.0 +97 15 optimizer.lr 0.0010580414690631464 +97 15 negative_sampler.num_negs_per_pos 80.0 +97 15 training.batch_size 0.0 +97 1 dataset """wn18rr""" +97 1 model """conve""" +97 1 loss """bceaftersigmoid""" +97 1 regularizer """no""" +97 1 optimizer """adam""" +97 1 training_loop """owa""" +97 1 negative_sampler """basic""" +97 1 evaluator """rankbased""" +97 2 dataset """wn18rr""" +97 2 model """conve""" +97 2 loss """bceaftersigmoid""" +97 2 regularizer """no""" +97 2 optimizer """adam""" +97 2 training_loop """owa""" +97 2 negative_sampler """basic""" +97 2 evaluator """rankbased""" +97 3 dataset """wn18rr""" +97 3 model """conve""" +97 3 loss """bceaftersigmoid""" +97 3 regularizer """no""" +97 3 optimizer """adam""" +97 3 training_loop """owa""" +97 3 negative_sampler """basic""" +97 3 evaluator """rankbased""" +97 4 dataset """wn18rr""" +97 4 model """conve""" +97 4 loss """bceaftersigmoid""" +97 4 regularizer """no""" +97 4 optimizer """adam""" +97 4 training_loop """owa""" +97 4 negative_sampler """basic""" +97 4 evaluator """rankbased""" +97 5 dataset """wn18rr""" +97 5 model """conve""" +97 5 loss """bceaftersigmoid""" +97 5 regularizer """no""" +97 5 optimizer """adam""" +97 5 training_loop """owa""" +97 5 negative_sampler """basic""" +97 5 evaluator """rankbased""" +97 6 dataset """wn18rr""" +97 6 model """conve""" +97 6 loss """bceaftersigmoid""" +97 6 regularizer """no""" +97 6 optimizer """adam""" +97 6 training_loop """owa""" +97 6 negative_sampler """basic""" +97 6 evaluator """rankbased""" +97 7 dataset """wn18rr""" +97 7 model """conve""" +97 7 loss """bceaftersigmoid""" +97 7 regularizer """no""" +97 7 optimizer """adam""" +97 7 training_loop """owa""" +97 7 negative_sampler """basic""" +97 7 evaluator """rankbased""" +97 8 dataset """wn18rr""" +97 8 model """conve""" +97 8 loss """bceaftersigmoid""" +97 8 regularizer """no""" +97 8 optimizer """adam""" +97 8 training_loop """owa""" +97 8 negative_sampler """basic""" +97 8 evaluator """rankbased""" +97 9 dataset """wn18rr""" +97 9 model """conve""" +97 9 loss """bceaftersigmoid""" +97 9 regularizer """no""" +97 9 optimizer """adam""" +97 9 training_loop """owa""" +97 9 negative_sampler """basic""" +97 9 evaluator """rankbased""" +97 10 dataset """wn18rr""" +97 10 model """conve""" +97 10 loss """bceaftersigmoid""" +97 10 regularizer """no""" +97 10 optimizer """adam""" +97 10 training_loop """owa""" +97 10 negative_sampler """basic""" +97 10 evaluator """rankbased""" +97 11 dataset """wn18rr""" +97 11 model """conve""" +97 11 loss """bceaftersigmoid""" +97 11 regularizer """no""" +97 11 optimizer """adam""" +97 11 training_loop """owa""" +97 11 negative_sampler """basic""" +97 11 evaluator """rankbased""" +97 12 dataset """wn18rr""" +97 12 model """conve""" +97 12 loss """bceaftersigmoid""" +97 12 regularizer """no""" +97 12 optimizer """adam""" +97 12 training_loop """owa""" +97 12 negative_sampler """basic""" +97 12 evaluator """rankbased""" +97 13 dataset """wn18rr""" +97 13 model """conve""" +97 13 loss """bceaftersigmoid""" +97 13 regularizer """no""" +97 13 optimizer """adam""" +97 13 training_loop """owa""" +97 13 negative_sampler """basic""" +97 13 evaluator """rankbased""" +97 14 dataset """wn18rr""" +97 14 model """conve""" +97 14 loss """bceaftersigmoid""" +97 14 regularizer """no""" +97 14 optimizer """adam""" +97 14 training_loop """owa""" +97 14 negative_sampler """basic""" +97 14 evaluator """rankbased""" +97 15 dataset """wn18rr""" +97 15 model """conve""" +97 15 loss """bceaftersigmoid""" +97 15 regularizer """no""" +97 15 optimizer """adam""" +97 15 training_loop """owa""" +97 15 negative_sampler """basic""" +97 15 evaluator """rankbased""" +98 1 model.output_channels 48.0 +98 1 model.input_dropout 0.12277097191698305 +98 1 model.output_dropout 0.12762037693995498 +98 1 model.feature_map_dropout 0.4072776410125495 +98 1 model.embedding_dim 2.0 +98 1 optimizer.lr 0.0010363585744306682 +98 1 training.batch_size 1.0 +98 1 training.label_smoothing 0.14287234360779352 +98 2 model.output_channels 53.0 +98 2 model.input_dropout 0.4642054999193863 +98 2 model.output_dropout 0.061483341532827696 +98 2 model.feature_map_dropout 0.49886205032130376 +98 2 model.embedding_dim 2.0 +98 2 optimizer.lr 0.006567524171177799 +98 2 training.batch_size 0.0 +98 2 training.label_smoothing 0.23935388012907208 +98 3 model.output_channels 63.0 +98 3 model.input_dropout 0.1105738214917873 +98 3 model.output_dropout 0.47116277737840573 +98 3 model.feature_map_dropout 0.3071820926193031 +98 3 model.embedding_dim 0.0 +98 3 optimizer.lr 0.08225942151140701 +98 3 training.batch_size 1.0 +98 3 training.label_smoothing 0.001299718174837043 +98 4 model.output_channels 51.0 +98 4 model.input_dropout 0.009734800046857173 +98 4 model.output_dropout 0.058894795061174676 +98 4 model.feature_map_dropout 0.18959906636922275 +98 4 model.embedding_dim 2.0 +98 4 optimizer.lr 0.08562957449402253 +98 4 training.batch_size 1.0 +98 4 training.label_smoothing 0.4114936718072195 +98 5 model.output_channels 38.0 +98 5 model.input_dropout 0.059808546688633835 +98 5 model.output_dropout 0.003989999763556462 +98 5 model.feature_map_dropout 0.36011687854181795 +98 5 model.embedding_dim 2.0 +98 5 optimizer.lr 0.007448771676801395 +98 5 training.batch_size 1.0 +98 5 training.label_smoothing 0.0012685468004512831 +98 6 model.output_channels 42.0 +98 6 model.input_dropout 0.37883516097009784 +98 6 model.output_dropout 0.2519443605255578 +98 6 model.feature_map_dropout 0.4501021656719673 +98 6 model.embedding_dim 0.0 +98 6 optimizer.lr 0.0023613858363340603 +98 6 training.batch_size 0.0 +98 6 training.label_smoothing 0.0011663236515392053 +98 7 model.output_channels 54.0 +98 7 model.input_dropout 0.18244782490542777 +98 7 model.output_dropout 0.27275245082971106 +98 7 model.feature_map_dropout 0.3592077097901028 +98 7 model.embedding_dim 0.0 +98 7 optimizer.lr 0.006304775853664379 +98 7 training.batch_size 0.0 +98 7 training.label_smoothing 0.0429085042987945 +98 8 model.output_channels 47.0 +98 8 model.input_dropout 0.4671122782181629 +98 8 model.output_dropout 0.32454304412908874 +98 8 model.feature_map_dropout 0.28589935298713803 +98 8 model.embedding_dim 2.0 +98 8 optimizer.lr 0.0013527416109107738 +98 8 training.batch_size 1.0 +98 8 training.label_smoothing 0.0350576406721521 +98 9 model.output_channels 47.0 +98 9 model.input_dropout 0.3816637056174807 +98 9 model.output_dropout 0.14639529164005782 +98 9 model.feature_map_dropout 0.39680093020619533 +98 9 model.embedding_dim 1.0 +98 9 optimizer.lr 0.019355319089215374 +98 9 training.batch_size 2.0 +98 9 training.label_smoothing 0.0010857754049008266 +98 10 model.output_channels 28.0 +98 10 model.input_dropout 0.4715429956913999 +98 10 model.output_dropout 0.2907776720258107 +98 10 model.feature_map_dropout 0.23374129496335228 +98 10 model.embedding_dim 2.0 +98 10 optimizer.lr 0.0012640246375960125 +98 10 training.batch_size 0.0 +98 10 training.label_smoothing 0.06542027570252223 +98 11 model.output_channels 60.0 +98 11 model.input_dropout 0.01265312301051108 +98 11 model.output_dropout 0.07377393200029614 +98 11 model.feature_map_dropout 0.4012656402771272 +98 11 model.embedding_dim 0.0 +98 11 optimizer.lr 0.011783137657056112 +98 11 training.batch_size 0.0 +98 11 training.label_smoothing 0.4567060649657873 +98 12 model.output_channels 64.0 +98 12 model.input_dropout 0.16450030427951345 +98 12 model.output_dropout 0.11645823800439148 +98 12 model.feature_map_dropout 0.005261081777061027 +98 12 model.embedding_dim 2.0 +98 12 optimizer.lr 0.04685529291062936 +98 12 training.batch_size 0.0 +98 12 training.label_smoothing 0.0019015354211507996 +98 13 model.output_channels 30.0 +98 13 model.input_dropout 0.2921313635238533 +98 13 model.output_dropout 0.25270175492802616 +98 13 model.feature_map_dropout 0.11790148779372422 +98 13 model.embedding_dim 2.0 +98 13 optimizer.lr 0.015009309981271637 +98 13 training.batch_size 2.0 +98 13 training.label_smoothing 0.005082228398766022 +98 14 model.output_channels 24.0 +98 14 model.input_dropout 0.2462317273670166 +98 14 model.output_dropout 0.03205416510413989 +98 14 model.feature_map_dropout 0.16862450988833544 +98 14 model.embedding_dim 0.0 +98 14 optimizer.lr 0.0022365959328519915 +98 14 training.batch_size 2.0 +98 14 training.label_smoothing 0.001306939026671064 +98 15 model.output_channels 41.0 +98 15 model.input_dropout 0.24427497442998164 +98 15 model.output_dropout 0.4419746820050455 +98 15 model.feature_map_dropout 0.4114452414683634 +98 15 model.embedding_dim 1.0 +98 15 optimizer.lr 0.0046579590556040835 +98 15 training.batch_size 0.0 +98 15 training.label_smoothing 0.16762030870819242 +98 16 model.output_channels 60.0 +98 16 model.input_dropout 0.3716124169989048 +98 16 model.output_dropout 0.37397680404406786 +98 16 model.feature_map_dropout 0.3912814046377215 +98 16 model.embedding_dim 0.0 +98 16 optimizer.lr 0.051982626169060644 +98 16 training.batch_size 0.0 +98 16 training.label_smoothing 0.003243214069799629 +98 17 model.output_channels 48.0 +98 17 model.input_dropout 0.41683770748481125 +98 17 model.output_dropout 0.09064006448638906 +98 17 model.feature_map_dropout 0.27250246865418837 +98 17 model.embedding_dim 2.0 +98 17 optimizer.lr 0.025859117850387452 +98 17 training.batch_size 1.0 +98 17 training.label_smoothing 0.0048397542006021225 +98 18 model.output_channels 34.0 +98 18 model.input_dropout 0.014025933773667398 +98 18 model.output_dropout 0.24497422088567644 +98 18 model.feature_map_dropout 0.49783829889832515 +98 18 model.embedding_dim 1.0 +98 18 optimizer.lr 0.010109511471856637 +98 18 training.batch_size 0.0 +98 18 training.label_smoothing 0.011057130943412338 +98 19 model.output_channels 32.0 +98 19 model.input_dropout 0.3527057587481529 +98 19 model.output_dropout 0.034655342126778343 +98 19 model.feature_map_dropout 0.13507050164029621 +98 19 model.embedding_dim 0.0 +98 19 optimizer.lr 0.0022145710167827395 +98 19 training.batch_size 2.0 +98 19 training.label_smoothing 0.023526929744852473 +98 20 model.output_channels 46.0 +98 20 model.input_dropout 0.3755751086347973 +98 20 model.output_dropout 0.10071459282667711 +98 20 model.feature_map_dropout 0.3276262716911349 +98 20 model.embedding_dim 2.0 +98 20 optimizer.lr 0.004044310092293596 +98 20 training.batch_size 1.0 +98 20 training.label_smoothing 0.038137013971275904 +98 21 model.output_channels 22.0 +98 21 model.input_dropout 0.32280460087522966 +98 21 model.output_dropout 0.08385405170669868 +98 21 model.feature_map_dropout 0.13684872815300841 +98 21 model.embedding_dim 2.0 +98 21 optimizer.lr 0.044737565539164435 +98 21 training.batch_size 1.0 +98 21 training.label_smoothing 0.005939160872034947 +98 22 model.output_channels 28.0 +98 22 model.input_dropout 0.39778803533672413 +98 22 model.output_dropout 0.30144479105820243 +98 22 model.feature_map_dropout 0.485281134167566 +98 22 model.embedding_dim 1.0 +98 22 optimizer.lr 0.0016744682382074517 +98 22 training.batch_size 0.0 +98 22 training.label_smoothing 0.01287658772740829 +98 23 model.output_channels 18.0 +98 23 model.input_dropout 0.04905570155644934 +98 23 model.output_dropout 0.4243367612941149 +98 23 model.feature_map_dropout 0.1551572023871322 +98 23 model.embedding_dim 0.0 +98 23 optimizer.lr 0.07148058710550027 +98 23 training.batch_size 1.0 +98 23 training.label_smoothing 0.11891195559089979 +98 24 model.output_channels 47.0 +98 24 model.input_dropout 0.13060848904474331 +98 24 model.output_dropout 0.2061666649358319 +98 24 model.feature_map_dropout 0.47076480431695444 +98 24 model.embedding_dim 2.0 +98 24 optimizer.lr 0.004799675205102816 +98 24 training.batch_size 1.0 +98 24 training.label_smoothing 0.11561796645258453 +98 25 model.output_channels 26.0 +98 25 model.input_dropout 0.3504324521115593 +98 25 model.output_dropout 0.4374207433373699 +98 25 model.feature_map_dropout 0.3198317214443454 +98 25 model.embedding_dim 0.0 +98 25 optimizer.lr 0.0011770636560888518 +98 25 training.batch_size 0.0 +98 25 training.label_smoothing 0.014510526823588523 +98 26 model.output_channels 36.0 +98 26 model.input_dropout 0.42909631274617704 +98 26 model.output_dropout 0.3167975796083792 +98 26 model.feature_map_dropout 0.4517493588226937 +98 26 model.embedding_dim 1.0 +98 26 optimizer.lr 0.029140501525019895 +98 26 training.batch_size 2.0 +98 26 training.label_smoothing 0.4444871249493944 +98 1 dataset """wn18RR""" +98 1 model """conve""" +98 1 loss """softplus""" +98 1 regularizer """no""" +98 1 optimizer """adam""" +98 1 training_loop """lcwa""" +98 1 evaluator """rankbased""" +98 2 dataset """wn18RR""" +98 2 model """conve""" +98 2 loss """softplus""" +98 2 regularizer """no""" +98 2 optimizer """adam""" +98 2 training_loop """lcwa""" +98 2 evaluator """rankbased""" +98 3 dataset """wn18RR""" +98 3 model """conve""" +98 3 loss """softplus""" +98 3 regularizer """no""" +98 3 optimizer """adam""" +98 3 training_loop """lcwa""" +98 3 evaluator """rankbased""" +98 4 dataset """wn18RR""" +98 4 model """conve""" +98 4 loss """softplus""" +98 4 regularizer """no""" +98 4 optimizer """adam""" +98 4 training_loop """lcwa""" +98 4 evaluator """rankbased""" +98 5 dataset """wn18RR""" +98 5 model """conve""" +98 5 loss """softplus""" +98 5 regularizer """no""" +98 5 optimizer """adam""" +98 5 training_loop """lcwa""" +98 5 evaluator """rankbased""" +98 6 dataset """wn18RR""" +98 6 model """conve""" +98 6 loss """softplus""" +98 6 regularizer """no""" +98 6 optimizer """adam""" +98 6 training_loop """lcwa""" +98 6 evaluator """rankbased""" +98 7 dataset """wn18RR""" +98 7 model """conve""" +98 7 loss """softplus""" +98 7 regularizer """no""" +98 7 optimizer """adam""" +98 7 training_loop """lcwa""" +98 7 evaluator """rankbased""" +98 8 dataset """wn18RR""" +98 8 model """conve""" +98 8 loss """softplus""" +98 8 regularizer """no""" +98 8 optimizer """adam""" +98 8 training_loop """lcwa""" +98 8 evaluator """rankbased""" +98 9 dataset """wn18RR""" +98 9 model """conve""" +98 9 loss """softplus""" +98 9 regularizer """no""" +98 9 optimizer """adam""" +98 9 training_loop """lcwa""" +98 9 evaluator """rankbased""" +98 10 dataset """wn18RR""" +98 10 model """conve""" +98 10 loss """softplus""" +98 10 regularizer """no""" +98 10 optimizer """adam""" +98 10 training_loop """lcwa""" +98 10 evaluator """rankbased""" +98 11 dataset """wn18RR""" +98 11 model """conve""" +98 11 loss """softplus""" +98 11 regularizer """no""" +98 11 optimizer """adam""" +98 11 training_loop """lcwa""" +98 11 evaluator """rankbased""" +98 12 dataset """wn18RR""" +98 12 model """conve""" +98 12 loss """softplus""" +98 12 regularizer """no""" +98 12 optimizer """adam""" +98 12 training_loop """lcwa""" +98 12 evaluator """rankbased""" +98 13 dataset """wn18RR""" +98 13 model """conve""" +98 13 loss """softplus""" +98 13 regularizer """no""" +98 13 optimizer """adam""" +98 13 training_loop """lcwa""" +98 13 evaluator """rankbased""" +98 14 dataset """wn18RR""" +98 14 model """conve""" +98 14 loss """softplus""" +98 14 regularizer """no""" +98 14 optimizer """adam""" +98 14 training_loop """lcwa""" +98 14 evaluator """rankbased""" +98 15 dataset """wn18RR""" +98 15 model """conve""" +98 15 loss """softplus""" +98 15 regularizer """no""" +98 15 optimizer """adam""" +98 15 training_loop """lcwa""" +98 15 evaluator """rankbased""" +98 16 dataset """wn18RR""" +98 16 model """conve""" +98 16 loss """softplus""" +98 16 regularizer """no""" +98 16 optimizer """adam""" +98 16 training_loop """lcwa""" +98 16 evaluator """rankbased""" +98 17 dataset """wn18RR""" +98 17 model """conve""" +98 17 loss """softplus""" +98 17 regularizer """no""" +98 17 optimizer """adam""" +98 17 training_loop """lcwa""" +98 17 evaluator """rankbased""" +98 18 dataset """wn18RR""" +98 18 model """conve""" +98 18 loss """softplus""" +98 18 regularizer """no""" +98 18 optimizer """adam""" +98 18 training_loop """lcwa""" +98 18 evaluator """rankbased""" +98 19 dataset """wn18RR""" +98 19 model """conve""" +98 19 loss """softplus""" +98 19 regularizer """no""" +98 19 optimizer """adam""" +98 19 training_loop """lcwa""" +98 19 evaluator """rankbased""" +98 20 dataset """wn18RR""" +98 20 model """conve""" +98 20 loss """softplus""" +98 20 regularizer """no""" +98 20 optimizer """adam""" +98 20 training_loop """lcwa""" +98 20 evaluator """rankbased""" +98 21 dataset """wn18RR""" +98 21 model """conve""" +98 21 loss """softplus""" +98 21 regularizer """no""" +98 21 optimizer """adam""" +98 21 training_loop """lcwa""" +98 21 evaluator """rankbased""" +98 22 dataset """wn18RR""" +98 22 model """conve""" +98 22 loss """softplus""" +98 22 regularizer """no""" +98 22 optimizer """adam""" +98 22 training_loop """lcwa""" +98 22 evaluator """rankbased""" +98 23 dataset """wn18RR""" +98 23 model """conve""" +98 23 loss """softplus""" +98 23 regularizer """no""" +98 23 optimizer """adam""" +98 23 training_loop """lcwa""" +98 23 evaluator """rankbased""" +98 24 dataset """wn18RR""" +98 24 model """conve""" +98 24 loss """softplus""" +98 24 regularizer """no""" +98 24 optimizer """adam""" +98 24 training_loop """lcwa""" +98 24 evaluator """rankbased""" +98 25 dataset """wn18RR""" +98 25 model """conve""" +98 25 loss """softplus""" +98 25 regularizer """no""" +98 25 optimizer """adam""" +98 25 training_loop """lcwa""" +98 25 evaluator """rankbased""" +98 26 dataset """wn18RR""" +98 26 model """conve""" +98 26 loss """softplus""" +98 26 regularizer """no""" +98 26 optimizer """adam""" +98 26 training_loop """lcwa""" +98 26 evaluator """rankbased""" +99 1 model.output_channels 58.0 +99 1 model.input_dropout 0.17437632403364312 +99 1 model.output_dropout 0.3723999970811062 +99 1 model.feature_map_dropout 0.07593506273179557 +99 1 model.embedding_dim 1.0 +99 1 optimizer.lr 0.0788731118976246 +99 1 training.batch_size 0.0 +99 1 training.label_smoothing 0.1030726020202757 +99 2 model.output_channels 44.0 +99 2 model.input_dropout 0.4843308335328361 +99 2 model.output_dropout 0.0829456019486342 +99 2 model.feature_map_dropout 0.41344982549095904 +99 2 model.embedding_dim 1.0 +99 2 optimizer.lr 0.012569902301043049 +99 2 training.batch_size 2.0 +99 2 training.label_smoothing 0.11114031000239424 +99 3 model.output_channels 29.0 +99 3 model.input_dropout 0.003889309278010955 +99 3 model.output_dropout 0.2989381052307023 +99 3 model.feature_map_dropout 0.02596669598675011 +99 3 model.embedding_dim 0.0 +99 3 optimizer.lr 0.012670243140775176 +99 3 training.batch_size 1.0 +99 3 training.label_smoothing 0.011671409027314634 +99 4 model.output_channels 32.0 +99 4 model.input_dropout 0.4170133391551912 +99 4 model.output_dropout 0.0634756991444484 +99 4 model.feature_map_dropout 0.15216955628119366 +99 4 model.embedding_dim 1.0 +99 4 optimizer.lr 0.008550990249185163 +99 4 training.batch_size 2.0 +99 4 training.label_smoothing 0.10466839492648618 +99 5 model.output_channels 17.0 +99 5 model.input_dropout 0.3384536877552397 +99 5 model.output_dropout 0.4766335510568177 +99 5 model.feature_map_dropout 0.19269536047417513 +99 5 model.embedding_dim 2.0 +99 5 optimizer.lr 0.007763545024988028 +99 5 training.batch_size 1.0 +99 5 training.label_smoothing 0.002952785301245672 +99 6 model.output_channels 43.0 +99 6 model.input_dropout 0.46165913648256296 +99 6 model.output_dropout 0.1984866840231052 +99 6 model.feature_map_dropout 0.3779705276158576 +99 6 model.embedding_dim 2.0 +99 6 optimizer.lr 0.016876969084256215 +99 6 training.batch_size 2.0 +99 6 training.label_smoothing 0.43086011802282703 +99 7 model.output_channels 30.0 +99 7 model.input_dropout 0.1279879652439856 +99 7 model.output_dropout 0.29174481813616054 +99 7 model.feature_map_dropout 0.2275063889774151 +99 7 model.embedding_dim 2.0 +99 7 optimizer.lr 0.03676464735518334 +99 7 training.batch_size 2.0 +99 7 training.label_smoothing 0.39067238150168787 +99 8 model.output_channels 50.0 +99 8 model.input_dropout 0.045657022229674105 +99 8 model.output_dropout 0.2670253440700118 +99 8 model.feature_map_dropout 0.4506614340480792 +99 8 model.embedding_dim 2.0 +99 8 optimizer.lr 0.08328216120598497 +99 8 training.batch_size 2.0 +99 8 training.label_smoothing 0.0014507163312169463 +99 9 model.output_channels 64.0 +99 9 model.input_dropout 0.011697729029779558 +99 9 model.output_dropout 0.19445004222711154 +99 9 model.feature_map_dropout 0.41903382716878146 +99 9 model.embedding_dim 1.0 +99 9 optimizer.lr 0.0018013506048654559 +99 9 training.batch_size 0.0 +99 9 training.label_smoothing 0.13926478627430863 +99 10 model.output_channels 50.0 +99 10 model.input_dropout 0.2720445784211876 +99 10 model.output_dropout 0.05808238572977714 +99 10 model.feature_map_dropout 0.36146791118078714 +99 10 model.embedding_dim 0.0 +99 10 optimizer.lr 0.0027497615724663576 +99 10 training.batch_size 0.0 +99 10 training.label_smoothing 0.003602591168834107 +99 11 model.output_channels 29.0 +99 11 model.input_dropout 0.2981873847394389 +99 11 model.output_dropout 0.46852493821370544 +99 11 model.feature_map_dropout 0.4694674575922877 +99 11 model.embedding_dim 1.0 +99 11 optimizer.lr 0.057877233123109904 +99 11 training.batch_size 2.0 +99 11 training.label_smoothing 0.0026353279038030266 +99 12 model.output_channels 47.0 +99 12 model.input_dropout 0.305431579343337 +99 12 model.output_dropout 0.12415647712237493 +99 12 model.feature_map_dropout 0.13892766217652475 +99 12 model.embedding_dim 2.0 +99 12 optimizer.lr 0.006861388198766408 +99 12 training.batch_size 1.0 +99 12 training.label_smoothing 0.02903794854505372 +99 13 model.output_channels 21.0 +99 13 model.input_dropout 0.01895004810067269 +99 13 model.output_dropout 0.07690286694496323 +99 13 model.feature_map_dropout 0.08180054975872014 +99 13 model.embedding_dim 2.0 +99 13 optimizer.lr 0.025825908855895252 +99 13 training.batch_size 1.0 +99 13 training.label_smoothing 0.1937754323677875 +99 14 model.output_channels 27.0 +99 14 model.input_dropout 0.29019007830385835 +99 14 model.output_dropout 0.2848393705270445 +99 14 model.feature_map_dropout 0.17079767958716752 +99 14 model.embedding_dim 1.0 +99 14 optimizer.lr 0.030436233484654306 +99 14 training.batch_size 2.0 +99 14 training.label_smoothing 0.11000647746220882 +99 15 model.output_channels 62.0 +99 15 model.input_dropout 0.034648412754727353 +99 15 model.output_dropout 0.42809307119810247 +99 15 model.feature_map_dropout 0.4509705016181554 +99 15 model.embedding_dim 1.0 +99 15 optimizer.lr 0.013469041117472763 +99 15 training.batch_size 1.0 +99 15 training.label_smoothing 0.014678920222371139 +99 16 model.output_channels 54.0 +99 16 model.input_dropout 0.40390896255616715 +99 16 model.output_dropout 0.23697340121774974 +99 16 model.feature_map_dropout 0.48197034877456074 +99 16 model.embedding_dim 0.0 +99 16 optimizer.lr 0.019580841845396546 +99 16 training.batch_size 2.0 +99 16 training.label_smoothing 0.0051100033069083825 +99 17 model.output_channels 50.0 +99 17 model.input_dropout 0.0024374895218404746 +99 17 model.output_dropout 0.030296348364483583 +99 17 model.feature_map_dropout 0.2945578446005578 +99 17 model.embedding_dim 0.0 +99 17 optimizer.lr 0.00257749591267369 +99 17 training.batch_size 0.0 +99 17 training.label_smoothing 0.255528057558933 +99 18 model.output_channels 38.0 +99 18 model.input_dropout 0.330696499676265 +99 18 model.output_dropout 0.38029133480234445 +99 18 model.feature_map_dropout 0.0420480589750728 +99 18 model.embedding_dim 1.0 +99 18 optimizer.lr 0.0017461240231553964 +99 18 training.batch_size 0.0 +99 18 training.label_smoothing 0.2858317508812973 +99 1 dataset """wn18RR""" +99 1 model """conve""" +99 1 loss """softplus""" +99 1 regularizer """no""" +99 1 optimizer """adam""" +99 1 training_loop """lcwa""" +99 1 evaluator """rankbased""" +99 2 dataset """wn18RR""" +99 2 model """conve""" +99 2 loss """softplus""" +99 2 regularizer """no""" +99 2 optimizer """adam""" +99 2 training_loop """lcwa""" +99 2 evaluator """rankbased""" +99 3 dataset """wn18RR""" +99 3 model """conve""" +99 3 loss """softplus""" +99 3 regularizer """no""" +99 3 optimizer """adam""" +99 3 training_loop """lcwa""" +99 3 evaluator """rankbased""" +99 4 dataset """wn18RR""" +99 4 model """conve""" +99 4 loss """softplus""" +99 4 regularizer """no""" +99 4 optimizer """adam""" +99 4 training_loop """lcwa""" +99 4 evaluator """rankbased""" +99 5 dataset """wn18RR""" +99 5 model """conve""" +99 5 loss """softplus""" +99 5 regularizer """no""" +99 5 optimizer """adam""" +99 5 training_loop """lcwa""" +99 5 evaluator """rankbased""" +99 6 dataset """wn18RR""" +99 6 model """conve""" +99 6 loss """softplus""" +99 6 regularizer """no""" +99 6 optimizer """adam""" +99 6 training_loop """lcwa""" +99 6 evaluator """rankbased""" +99 7 dataset """wn18RR""" +99 7 model """conve""" +99 7 loss """softplus""" +99 7 regularizer """no""" +99 7 optimizer """adam""" +99 7 training_loop """lcwa""" +99 7 evaluator """rankbased""" +99 8 dataset """wn18RR""" +99 8 model """conve""" +99 8 loss """softplus""" +99 8 regularizer """no""" +99 8 optimizer """adam""" +99 8 training_loop """lcwa""" +99 8 evaluator """rankbased""" +99 9 dataset """wn18RR""" +99 9 model """conve""" +99 9 loss """softplus""" +99 9 regularizer """no""" +99 9 optimizer """adam""" +99 9 training_loop """lcwa""" +99 9 evaluator """rankbased""" +99 10 dataset """wn18RR""" +99 10 model """conve""" +99 10 loss """softplus""" +99 10 regularizer """no""" +99 10 optimizer """adam""" +99 10 training_loop """lcwa""" +99 10 evaluator """rankbased""" +99 11 dataset """wn18RR""" +99 11 model """conve""" +99 11 loss """softplus""" +99 11 regularizer """no""" +99 11 optimizer """adam""" +99 11 training_loop """lcwa""" +99 11 evaluator """rankbased""" +99 12 dataset """wn18RR""" +99 12 model """conve""" +99 12 loss """softplus""" +99 12 regularizer """no""" +99 12 optimizer """adam""" +99 12 training_loop """lcwa""" +99 12 evaluator """rankbased""" +99 13 dataset """wn18RR""" +99 13 model """conve""" +99 13 loss """softplus""" +99 13 regularizer """no""" +99 13 optimizer """adam""" +99 13 training_loop """lcwa""" +99 13 evaluator """rankbased""" +99 14 dataset """wn18RR""" +99 14 model """conve""" +99 14 loss """softplus""" +99 14 regularizer """no""" +99 14 optimizer """adam""" +99 14 training_loop """lcwa""" +99 14 evaluator """rankbased""" +99 15 dataset """wn18RR""" +99 15 model """conve""" +99 15 loss """softplus""" +99 15 regularizer """no""" +99 15 optimizer """adam""" +99 15 training_loop """lcwa""" +99 15 evaluator """rankbased""" +99 16 dataset """wn18RR""" +99 16 model """conve""" +99 16 loss """softplus""" +99 16 regularizer """no""" +99 16 optimizer """adam""" +99 16 training_loop """lcwa""" +99 16 evaluator """rankbased""" +99 17 dataset """wn18RR""" +99 17 model """conve""" +99 17 loss """softplus""" +99 17 regularizer """no""" +99 17 optimizer """adam""" +99 17 training_loop """lcwa""" +99 17 evaluator """rankbased""" +99 18 dataset """wn18RR""" +99 18 model """conve""" +99 18 loss """softplus""" +99 18 regularizer """no""" +99 18 optimizer """adam""" +99 18 training_loop """lcwa""" +99 18 evaluator """rankbased""" +100 1 model.output_channels 44.0 +100 1 model.input_dropout 0.14496095766781864 +100 1 model.output_dropout 0.1717759735979072 +100 1 model.feature_map_dropout 0.05953739512319722 +100 1 model.embedding_dim 1.0 +100 1 optimizer.lr 0.014758061813119804 +100 1 training.batch_size 0.0 +100 1 training.label_smoothing 0.7077853993532142 +100 2 model.output_channels 36.0 +100 2 model.input_dropout 0.29943298775355937 +100 2 model.output_dropout 0.2975475911195599 +100 2 model.feature_map_dropout 0.17584150220654454 +100 2 model.embedding_dim 1.0 +100 2 optimizer.lr 0.05097048340638071 +100 2 training.batch_size 2.0 +100 2 training.label_smoothing 0.640611499104883 +100 3 model.output_channels 46.0 +100 3 model.input_dropout 0.0312348645382729 +100 3 model.output_dropout 0.4268391628772117 +100 3 model.feature_map_dropout 0.20174122309974957 +100 3 model.embedding_dim 2.0 +100 3 optimizer.lr 0.036236974917701205 +100 3 training.batch_size 1.0 +100 3 training.label_smoothing 0.002750815650835284 +100 4 model.output_channels 38.0 +100 4 model.input_dropout 0.05967942684040217 +100 4 model.output_dropout 0.3188375869131904 +100 4 model.feature_map_dropout 0.2755376521871075 +100 4 model.embedding_dim 2.0 +100 4 optimizer.lr 0.0369507311030291 +100 4 training.batch_size 1.0 +100 4 training.label_smoothing 0.001863817313756555 +100 5 model.output_channels 34.0 +100 5 model.input_dropout 0.06276993831432848 +100 5 model.output_dropout 0.061885898894038116 +100 5 model.feature_map_dropout 0.1997054683559945 +100 5 model.embedding_dim 2.0 +100 5 optimizer.lr 0.0016798352374970717 +100 5 training.batch_size 2.0 +100 5 training.label_smoothing 0.2195049627915705 +100 6 model.output_channels 54.0 +100 6 model.input_dropout 0.42917406101194505 +100 6 model.output_dropout 0.1346017837954112 +100 6 model.feature_map_dropout 0.2943809925073133 +100 6 model.embedding_dim 1.0 +100 6 optimizer.lr 0.007962201901925455 +100 6 training.batch_size 2.0 +100 6 training.label_smoothing 0.07932439238706519 +100 7 model.output_channels 19.0 +100 7 model.input_dropout 0.32567753256116144 +100 7 model.output_dropout 0.20210557591751932 +100 7 model.feature_map_dropout 0.013590817997921223 +100 7 model.embedding_dim 1.0 +100 7 optimizer.lr 0.011195831544199234 +100 7 training.batch_size 0.0 +100 7 training.label_smoothing 0.8426608557761404 +100 8 model.output_channels 57.0 +100 8 model.input_dropout 0.08857394402804691 +100 8 model.output_dropout 0.4759415120435636 +100 8 model.feature_map_dropout 0.26218580026186883 +100 8 model.embedding_dim 2.0 +100 8 optimizer.lr 0.005842585006853365 +100 8 training.batch_size 0.0 +100 8 training.label_smoothing 0.021786354688054257 +100 9 model.output_channels 44.0 +100 9 model.input_dropout 0.3355596711202009 +100 9 model.output_dropout 0.09580574932824565 +100 9 model.feature_map_dropout 0.3792930429918183 +100 9 model.embedding_dim 2.0 +100 9 optimizer.lr 0.0682198308039555 +100 9 training.batch_size 0.0 +100 9 training.label_smoothing 0.004068107674798227 +100 10 model.output_channels 24.0 +100 10 model.input_dropout 0.474219923546745 +100 10 model.output_dropout 0.3898950430618742 +100 10 model.feature_map_dropout 0.4925545250659876 +100 10 model.embedding_dim 2.0 +100 10 optimizer.lr 0.0019305389790931005 +100 10 training.batch_size 1.0 +100 10 training.label_smoothing 0.05942711125706541 +100 11 model.output_channels 23.0 +100 11 model.input_dropout 0.37795439653200047 +100 11 model.output_dropout 0.2617969433284882 +100 11 model.feature_map_dropout 0.060573045815536464 +100 11 model.embedding_dim 1.0 +100 11 optimizer.lr 0.03138411702483113 +100 11 training.batch_size 1.0 +100 11 training.label_smoothing 0.014465541599965553 +100 12 model.output_channels 46.0 +100 12 model.input_dropout 0.4784938919242887 +100 12 model.output_dropout 0.2960184718772011 +100 12 model.feature_map_dropout 0.07245468309179937 +100 12 model.embedding_dim 1.0 +100 12 optimizer.lr 0.012993327910968732 +100 12 training.batch_size 1.0 +100 12 training.label_smoothing 0.056955216711466056 +100 13 model.output_channels 37.0 +100 13 model.input_dropout 0.27968782016456295 +100 13 model.output_dropout 0.37412414316622006 +100 13 model.feature_map_dropout 0.07402367161287687 +100 13 model.embedding_dim 0.0 +100 13 optimizer.lr 0.08798622728870414 +100 13 training.batch_size 1.0 +100 13 training.label_smoothing 0.07713129834565681 +100 14 model.output_channels 57.0 +100 14 model.input_dropout 0.09420926146945519 +100 14 model.output_dropout 0.027382675483945407 +100 14 model.feature_map_dropout 0.3358101014235557 +100 14 model.embedding_dim 1.0 +100 14 optimizer.lr 0.0019624402990904236 +100 14 training.batch_size 0.0 +100 14 training.label_smoothing 0.11873096796209513 +100 15 model.output_channels 28.0 +100 15 model.input_dropout 0.13662045258451827 +100 15 model.output_dropout 0.3947578282049681 +100 15 model.feature_map_dropout 0.06388377215207763 +100 15 model.embedding_dim 1.0 +100 15 optimizer.lr 0.004507553565963743 +100 15 training.batch_size 0.0 +100 15 training.label_smoothing 0.001802785684028589 +100 16 model.output_channels 27.0 +100 16 model.input_dropout 0.3738110367324488 +100 16 model.output_dropout 0.4598078311847786 +100 16 model.feature_map_dropout 0.21969167540833145 +100 16 model.embedding_dim 1.0 +100 16 optimizer.lr 0.0015640153246253687 +100 16 training.batch_size 1.0 +100 16 training.label_smoothing 0.003261077338126352 +100 17 model.output_channels 51.0 +100 17 model.input_dropout 0.10548805364055758 +100 17 model.output_dropout 0.3211472266645875 +100 17 model.feature_map_dropout 0.25944699906054447 +100 17 model.embedding_dim 0.0 +100 17 optimizer.lr 0.004556116871544186 +100 17 training.batch_size 0.0 +100 17 training.label_smoothing 0.019267955039989633 +100 18 model.output_channels 59.0 +100 18 model.input_dropout 0.4112448725168669 +100 18 model.output_dropout 0.011374093795101237 +100 18 model.feature_map_dropout 0.18637513963989083 +100 18 model.embedding_dim 0.0 +100 18 optimizer.lr 0.002099181324690061 +100 18 training.batch_size 2.0 +100 18 training.label_smoothing 0.12416747394817877 +100 1 dataset """wn18RR""" +100 1 model """conve""" +100 1 loss """crossentropy""" +100 1 regularizer """no""" +100 1 optimizer """adam""" +100 1 training_loop """lcwa""" +100 1 evaluator """rankbased""" +100 2 dataset """wn18RR""" +100 2 model """conve""" +100 2 loss """crossentropy""" +100 2 regularizer """no""" +100 2 optimizer """adam""" +100 2 training_loop """lcwa""" +100 2 evaluator """rankbased""" +100 3 dataset """wn18RR""" +100 3 model """conve""" +100 3 loss """crossentropy""" +100 3 regularizer """no""" +100 3 optimizer """adam""" +100 3 training_loop """lcwa""" +100 3 evaluator """rankbased""" +100 4 dataset """wn18RR""" +100 4 model """conve""" +100 4 loss """crossentropy""" +100 4 regularizer """no""" +100 4 optimizer """adam""" +100 4 training_loop """lcwa""" +100 4 evaluator """rankbased""" +100 5 dataset """wn18RR""" +100 5 model """conve""" +100 5 loss """crossentropy""" +100 5 regularizer """no""" +100 5 optimizer """adam""" +100 5 training_loop """lcwa""" +100 5 evaluator """rankbased""" +100 6 dataset """wn18RR""" +100 6 model """conve""" +100 6 loss """crossentropy""" +100 6 regularizer """no""" +100 6 optimizer """adam""" +100 6 training_loop """lcwa""" +100 6 evaluator """rankbased""" +100 7 dataset """wn18RR""" +100 7 model """conve""" +100 7 loss """crossentropy""" +100 7 regularizer """no""" +100 7 optimizer """adam""" +100 7 training_loop """lcwa""" +100 7 evaluator """rankbased""" +100 8 dataset """wn18RR""" +100 8 model """conve""" +100 8 loss """crossentropy""" +100 8 regularizer """no""" +100 8 optimizer """adam""" +100 8 training_loop """lcwa""" +100 8 evaluator """rankbased""" +100 9 dataset """wn18RR""" +100 9 model """conve""" +100 9 loss """crossentropy""" +100 9 regularizer """no""" +100 9 optimizer """adam""" +100 9 training_loop """lcwa""" +100 9 evaluator """rankbased""" +100 10 dataset """wn18RR""" +100 10 model """conve""" +100 10 loss """crossentropy""" +100 10 regularizer """no""" +100 10 optimizer """adam""" +100 10 training_loop """lcwa""" +100 10 evaluator """rankbased""" +100 11 dataset """wn18RR""" +100 11 model """conve""" +100 11 loss """crossentropy""" +100 11 regularizer """no""" +100 11 optimizer """adam""" +100 11 training_loop """lcwa""" +100 11 evaluator """rankbased""" +100 12 dataset """wn18RR""" +100 12 model """conve""" +100 12 loss """crossentropy""" +100 12 regularizer """no""" +100 12 optimizer """adam""" +100 12 training_loop """lcwa""" +100 12 evaluator """rankbased""" +100 13 dataset """wn18RR""" +100 13 model """conve""" +100 13 loss """crossentropy""" +100 13 regularizer """no""" +100 13 optimizer """adam""" +100 13 training_loop """lcwa""" +100 13 evaluator """rankbased""" +100 14 dataset """wn18RR""" +100 14 model """conve""" +100 14 loss """crossentropy""" +100 14 regularizer """no""" +100 14 optimizer """adam""" +100 14 training_loop """lcwa""" +100 14 evaluator """rankbased""" +100 15 dataset """wn18RR""" +100 15 model """conve""" +100 15 loss """crossentropy""" +100 15 regularizer """no""" +100 15 optimizer """adam""" +100 15 training_loop """lcwa""" +100 15 evaluator """rankbased""" +100 16 dataset """wn18RR""" +100 16 model """conve""" +100 16 loss """crossentropy""" +100 16 regularizer """no""" +100 16 optimizer """adam""" +100 16 training_loop """lcwa""" +100 16 evaluator """rankbased""" +100 17 dataset """wn18RR""" +100 17 model """conve""" +100 17 loss """crossentropy""" +100 17 regularizer """no""" +100 17 optimizer """adam""" +100 17 training_loop """lcwa""" +100 17 evaluator """rankbased""" +100 18 dataset """wn18RR""" +100 18 model """conve""" +100 18 loss """crossentropy""" +100 18 regularizer """no""" +100 18 optimizer """adam""" +100 18 training_loop """lcwa""" +100 18 evaluator """rankbased""" +101 1 model.output_channels 35.0 +101 1 model.input_dropout 0.12306579350604202 +101 1 model.output_dropout 0.321623464203097 +101 1 model.feature_map_dropout 0.20405932395619164 +101 1 model.embedding_dim 1.0 +101 1 optimizer.lr 0.06495075044085406 +101 1 training.batch_size 0.0 +101 1 training.label_smoothing 0.006296720475905462 +101 2 model.output_channels 25.0 +101 2 model.input_dropout 0.44606090622719 +101 2 model.output_dropout 0.3080385471406709 +101 2 model.feature_map_dropout 0.03211307094899152 +101 2 model.embedding_dim 1.0 +101 2 optimizer.lr 0.005214660757105356 +101 2 training.batch_size 1.0 +101 2 training.label_smoothing 0.0025409056544795526 +101 3 model.output_channels 27.0 +101 3 model.input_dropout 0.22875463010480496 +101 3 model.output_dropout 0.20760870422258398 +101 3 model.feature_map_dropout 0.03559839405214177 +101 3 model.embedding_dim 1.0 +101 3 optimizer.lr 0.0021437804051349855 +101 3 training.batch_size 2.0 +101 3 training.label_smoothing 0.01986560280418848 +101 4 model.output_channels 17.0 +101 4 model.input_dropout 0.00484286236173237 +101 4 model.output_dropout 0.19447149966052252 +101 4 model.feature_map_dropout 0.02104647360048645 +101 4 model.embedding_dim 0.0 +101 4 optimizer.lr 0.038751696770089196 +101 4 training.batch_size 0.0 +101 4 training.label_smoothing 0.08629671310956072 +101 5 model.output_channels 21.0 +101 5 model.input_dropout 0.1977600198675577 +101 5 model.output_dropout 0.2983503859114761 +101 5 model.feature_map_dropout 0.020869423626629302 +101 5 model.embedding_dim 0.0 +101 5 optimizer.lr 0.018971390400499912 +101 5 training.batch_size 0.0 +101 5 training.label_smoothing 0.0013520953369300327 +101 6 model.output_channels 19.0 +101 6 model.input_dropout 0.028898675989250944 +101 6 model.output_dropout 0.37343406310525157 +101 6 model.feature_map_dropout 0.05864875325175589 +101 6 model.embedding_dim 2.0 +101 6 optimizer.lr 0.08835873440196225 +101 6 training.batch_size 1.0 +101 6 training.label_smoothing 0.1770409985861556 +101 7 model.output_channels 61.0 +101 7 model.input_dropout 0.1239481522940441 +101 7 model.output_dropout 0.4354868113006373 +101 7 model.feature_map_dropout 0.2532351865052726 +101 7 model.embedding_dim 2.0 +101 7 optimizer.lr 0.015081719086897689 +101 7 training.batch_size 0.0 +101 7 training.label_smoothing 0.8952966834788929 +101 8 model.output_channels 55.0 +101 8 model.input_dropout 0.20381257923040103 +101 8 model.output_dropout 0.21689511648337667 +101 8 model.feature_map_dropout 0.39332729075375245 +101 8 model.embedding_dim 2.0 +101 8 optimizer.lr 0.005985258459542496 +101 8 training.batch_size 2.0 +101 8 training.label_smoothing 0.0014969147984311055 +101 9 model.output_channels 56.0 +101 9 model.input_dropout 0.11985943661588405 +101 9 model.output_dropout 0.41709721412734235 +101 9 model.feature_map_dropout 0.4831359043708978 +101 9 model.embedding_dim 1.0 +101 9 optimizer.lr 0.04813840978263673 +101 9 training.batch_size 1.0 +101 9 training.label_smoothing 0.4050377883869765 +101 10 model.output_channels 49.0 +101 10 model.input_dropout 0.26474296374475004 +101 10 model.output_dropout 0.33345326019399385 +101 10 model.feature_map_dropout 0.1162435948739659 +101 10 model.embedding_dim 2.0 +101 10 optimizer.lr 0.008194528185893288 +101 10 training.batch_size 2.0 +101 10 training.label_smoothing 0.1024170931861409 +101 11 model.output_channels 51.0 +101 11 model.input_dropout 0.49661838323442087 +101 11 model.output_dropout 0.41318451980766663 +101 11 model.feature_map_dropout 0.04778994138325138 +101 11 model.embedding_dim 2.0 +101 11 optimizer.lr 0.061764244119984414 +101 11 training.batch_size 1.0 +101 11 training.label_smoothing 0.02715459120066979 +101 12 model.output_channels 36.0 +101 12 model.input_dropout 0.4884785524321659 +101 12 model.output_dropout 0.30987539543471343 +101 12 model.feature_map_dropout 0.03545059522802674 +101 12 model.embedding_dim 2.0 +101 12 optimizer.lr 0.005724341604743095 +101 12 training.batch_size 0.0 +101 12 training.label_smoothing 0.8344419040344084 +101 13 model.output_channels 47.0 +101 13 model.input_dropout 0.20812508870365892 +101 13 model.output_dropout 0.3570881341101922 +101 13 model.feature_map_dropout 0.039782482786380635 +101 13 model.embedding_dim 1.0 +101 13 optimizer.lr 0.00919882600947233 +101 13 training.batch_size 0.0 +101 13 training.label_smoothing 0.006383730454269552 +101 14 model.output_channels 45.0 +101 14 model.input_dropout 0.42795793559077455 +101 14 model.output_dropout 0.058033734135163495 +101 14 model.feature_map_dropout 0.42854129039462496 +101 14 model.embedding_dim 1.0 +101 14 optimizer.lr 0.0072306604064202185 +101 14 training.batch_size 1.0 +101 14 training.label_smoothing 0.06721349486641494 +101 15 model.output_channels 27.0 +101 15 model.input_dropout 0.19321967571061027 +101 15 model.output_dropout 0.02454678593154852 +101 15 model.feature_map_dropout 0.46511748595530233 +101 15 model.embedding_dim 1.0 +101 15 optimizer.lr 0.03259620456898523 +101 15 training.batch_size 1.0 +101 15 training.label_smoothing 0.0013473685412981019 +101 16 model.output_channels 62.0 +101 16 model.input_dropout 0.004804970324845004 +101 16 model.output_dropout 0.43451634917232074 +101 16 model.feature_map_dropout 0.12369160377223004 +101 16 model.embedding_dim 2.0 +101 16 optimizer.lr 0.041953181368036115 +101 16 training.batch_size 1.0 +101 16 training.label_smoothing 0.005726817738497428 +101 17 model.output_channels 35.0 +101 17 model.input_dropout 0.07355511946443405 +101 17 model.output_dropout 0.4050137996568263 +101 17 model.feature_map_dropout 0.3335050260990026 +101 17 model.embedding_dim 0.0 +101 17 optimizer.lr 0.03557290921597388 +101 17 training.batch_size 0.0 +101 17 training.label_smoothing 0.02962038659171112 +101 18 model.output_channels 53.0 +101 18 model.input_dropout 0.44162650270596004 +101 18 model.output_dropout 0.18528025520074276 +101 18 model.feature_map_dropout 0.11547112058117676 +101 18 model.embedding_dim 2.0 +101 18 optimizer.lr 0.01858349430075717 +101 18 training.batch_size 0.0 +101 18 training.label_smoothing 0.001967728558520671 +101 19 model.output_channels 50.0 +101 19 model.input_dropout 0.49171005227469383 +101 19 model.output_dropout 0.4295649227527115 +101 19 model.feature_map_dropout 0.29190850134522933 +101 19 model.embedding_dim 0.0 +101 19 optimizer.lr 0.0011987077526318334 +101 19 training.batch_size 0.0 +101 19 training.label_smoothing 0.011784948217630592 +101 20 model.output_channels 53.0 +101 20 model.input_dropout 0.46205134632032996 +101 20 model.output_dropout 0.11613491643311696 +101 20 model.feature_map_dropout 0.1728695614359675 +101 20 model.embedding_dim 2.0 +101 20 optimizer.lr 0.07974387869142285 +101 20 training.batch_size 0.0 +101 20 training.label_smoothing 0.009955524630898672 +101 21 model.output_channels 24.0 +101 21 model.input_dropout 0.010629987281329467 +101 21 model.output_dropout 0.02417394141629342 +101 21 model.feature_map_dropout 0.19853078903122678 +101 21 model.embedding_dim 0.0 +101 21 optimizer.lr 0.0010466882547643629 +101 21 training.batch_size 0.0 +101 21 training.label_smoothing 0.0017676483522546305 +101 1 dataset """wn18RR""" +101 1 model """conve""" +101 1 loss """crossentropy""" +101 1 regularizer """no""" +101 1 optimizer """adam""" +101 1 training_loop """lcwa""" +101 1 evaluator """rankbased""" +101 2 dataset """wn18RR""" +101 2 model """conve""" +101 2 loss """crossentropy""" +101 2 regularizer """no""" +101 2 optimizer """adam""" +101 2 training_loop """lcwa""" +101 2 evaluator """rankbased""" +101 3 dataset """wn18RR""" +101 3 model """conve""" +101 3 loss """crossentropy""" +101 3 regularizer """no""" +101 3 optimizer """adam""" +101 3 training_loop """lcwa""" +101 3 evaluator """rankbased""" +101 4 dataset """wn18RR""" +101 4 model """conve""" +101 4 loss """crossentropy""" +101 4 regularizer """no""" +101 4 optimizer """adam""" +101 4 training_loop """lcwa""" +101 4 evaluator """rankbased""" +101 5 dataset """wn18RR""" +101 5 model """conve""" +101 5 loss """crossentropy""" +101 5 regularizer """no""" +101 5 optimizer """adam""" +101 5 training_loop """lcwa""" +101 5 evaluator """rankbased""" +101 6 dataset """wn18RR""" +101 6 model """conve""" +101 6 loss """crossentropy""" +101 6 regularizer """no""" +101 6 optimizer """adam""" +101 6 training_loop """lcwa""" +101 6 evaluator """rankbased""" +101 7 dataset """wn18RR""" +101 7 model """conve""" +101 7 loss """crossentropy""" +101 7 regularizer """no""" +101 7 optimizer """adam""" +101 7 training_loop """lcwa""" +101 7 evaluator """rankbased""" +101 8 dataset """wn18RR""" +101 8 model """conve""" +101 8 loss """crossentropy""" +101 8 regularizer """no""" +101 8 optimizer """adam""" +101 8 training_loop """lcwa""" +101 8 evaluator """rankbased""" +101 9 dataset """wn18RR""" +101 9 model """conve""" +101 9 loss """crossentropy""" +101 9 regularizer """no""" +101 9 optimizer """adam""" +101 9 training_loop """lcwa""" +101 9 evaluator """rankbased""" +101 10 dataset """wn18RR""" +101 10 model """conve""" +101 10 loss """crossentropy""" +101 10 regularizer """no""" +101 10 optimizer """adam""" +101 10 training_loop """lcwa""" +101 10 evaluator """rankbased""" +101 11 dataset """wn18RR""" +101 11 model """conve""" +101 11 loss """crossentropy""" +101 11 regularizer """no""" +101 11 optimizer """adam""" +101 11 training_loop """lcwa""" +101 11 evaluator """rankbased""" +101 12 dataset """wn18RR""" +101 12 model """conve""" +101 12 loss """crossentropy""" +101 12 regularizer """no""" +101 12 optimizer """adam""" +101 12 training_loop """lcwa""" +101 12 evaluator """rankbased""" +101 13 dataset """wn18RR""" +101 13 model """conve""" +101 13 loss """crossentropy""" +101 13 regularizer """no""" +101 13 optimizer """adam""" +101 13 training_loop """lcwa""" +101 13 evaluator """rankbased""" +101 14 dataset """wn18RR""" +101 14 model """conve""" +101 14 loss """crossentropy""" +101 14 regularizer """no""" +101 14 optimizer """adam""" +101 14 training_loop """lcwa""" +101 14 evaluator """rankbased""" +101 15 dataset """wn18RR""" +101 15 model """conve""" +101 15 loss """crossentropy""" +101 15 regularizer """no""" +101 15 optimizer """adam""" +101 15 training_loop """lcwa""" +101 15 evaluator """rankbased""" +101 16 dataset """wn18RR""" +101 16 model """conve""" +101 16 loss """crossentropy""" +101 16 regularizer """no""" +101 16 optimizer """adam""" +101 16 training_loop """lcwa""" +101 16 evaluator """rankbased""" +101 17 dataset """wn18RR""" +101 17 model """conve""" +101 17 loss """crossentropy""" +101 17 regularizer """no""" +101 17 optimizer """adam""" +101 17 training_loop """lcwa""" +101 17 evaluator """rankbased""" +101 18 dataset """wn18RR""" +101 18 model """conve""" +101 18 loss """crossentropy""" +101 18 regularizer """no""" +101 18 optimizer """adam""" +101 18 training_loop """lcwa""" +101 18 evaluator """rankbased""" +101 19 dataset """wn18RR""" +101 19 model """conve""" +101 19 loss """crossentropy""" +101 19 regularizer """no""" +101 19 optimizer """adam""" +101 19 training_loop """lcwa""" +101 19 evaluator """rankbased""" +101 20 dataset """wn18RR""" +101 20 model """conve""" +101 20 loss """crossentropy""" +101 20 regularizer """no""" +101 20 optimizer """adam""" +101 20 training_loop """lcwa""" +101 20 evaluator """rankbased""" +101 21 dataset """wn18RR""" +101 21 model """conve""" +101 21 loss """crossentropy""" +101 21 regularizer """no""" +101 21 optimizer """adam""" +101 21 training_loop """lcwa""" +101 21 evaluator """rankbased""" +102 1 model.output_channels 43.0 +102 1 model.input_dropout 0.04767863349827611 +102 1 model.output_dropout 0.2581938634992961 +102 1 model.feature_map_dropout 0.4685482390884817 +102 1 model.embedding_dim 0.0 +102 1 optimizer.lr 0.052026157849991234 +102 1 negative_sampler.num_negs_per_pos 18.0 +102 1 training.batch_size 2.0 +102 2 model.output_channels 22.0 +102 2 model.input_dropout 0.15248687085306578 +102 2 model.output_dropout 0.2726401283608511 +102 2 model.feature_map_dropout 0.4720838307666712 +102 2 model.embedding_dim 0.0 +102 2 optimizer.lr 0.07689156165245599 +102 2 negative_sampler.num_negs_per_pos 88.0 +102 2 training.batch_size 0.0 +102 3 model.output_channels 60.0 +102 3 model.input_dropout 0.21424121015695052 +102 3 model.output_dropout 0.059032605784341885 +102 3 model.feature_map_dropout 0.3086856370841591 +102 3 model.embedding_dim 2.0 +102 3 optimizer.lr 0.09283194355845577 +102 3 negative_sampler.num_negs_per_pos 92.0 +102 3 training.batch_size 1.0 +102 4 model.output_channels 19.0 +102 4 model.input_dropout 0.19026185864380196 +102 4 model.output_dropout 0.07245180194141504 +102 4 model.feature_map_dropout 0.09031931610225019 +102 4 model.embedding_dim 1.0 +102 4 optimizer.lr 0.008602164676936771 +102 4 negative_sampler.num_negs_per_pos 85.0 +102 4 training.batch_size 0.0 +102 5 model.output_channels 21.0 +102 5 model.input_dropout 0.07167763978769431 +102 5 model.output_dropout 0.12466734212317754 +102 5 model.feature_map_dropout 0.13798944369064464 +102 5 model.embedding_dim 2.0 +102 5 optimizer.lr 0.08114302062952117 +102 5 negative_sampler.num_negs_per_pos 90.0 +102 5 training.batch_size 2.0 +102 6 model.output_channels 56.0 +102 6 model.input_dropout 0.14011765251983194 +102 6 model.output_dropout 0.02533522317385256 +102 6 model.feature_map_dropout 0.3781499731159234 +102 6 model.embedding_dim 1.0 +102 6 optimizer.lr 0.006716405346665058 +102 6 negative_sampler.num_negs_per_pos 4.0 +102 6 training.batch_size 1.0 +102 7 model.output_channels 17.0 +102 7 model.input_dropout 0.2293065159844051 +102 7 model.output_dropout 0.24796730179282456 +102 7 model.feature_map_dropout 0.2803151286384795 +102 7 model.embedding_dim 2.0 +102 7 optimizer.lr 0.02178457745723149 +102 7 negative_sampler.num_negs_per_pos 62.0 +102 7 training.batch_size 2.0 +102 8 model.output_channels 27.0 +102 8 model.input_dropout 0.04754241364105716 +102 8 model.output_dropout 0.006987204103167055 +102 8 model.feature_map_dropout 0.03851869942765207 +102 8 model.embedding_dim 1.0 +102 8 optimizer.lr 0.004917928467950543 +102 8 negative_sampler.num_negs_per_pos 80.0 +102 8 training.batch_size 2.0 +102 9 model.output_channels 24.0 +102 9 model.input_dropout 0.22981036121000425 +102 9 model.output_dropout 0.06936077943533081 +102 9 model.feature_map_dropout 0.13610114398916984 +102 9 model.embedding_dim 1.0 +102 9 optimizer.lr 0.0026576133081563785 +102 9 negative_sampler.num_negs_per_pos 36.0 +102 9 training.batch_size 2.0 +102 10 model.output_channels 31.0 +102 10 model.input_dropout 0.2931475118538936 +102 10 model.output_dropout 0.31045292285655424 +102 10 model.feature_map_dropout 0.23400857112552093 +102 10 model.embedding_dim 1.0 +102 10 optimizer.lr 0.013956313448288364 +102 10 negative_sampler.num_negs_per_pos 30.0 +102 10 training.batch_size 2.0 +102 11 model.output_channels 55.0 +102 11 model.input_dropout 0.4091945298856053 +102 11 model.output_dropout 0.47604618972515667 +102 11 model.feature_map_dropout 0.3026377220459283 +102 11 model.embedding_dim 0.0 +102 11 optimizer.lr 0.0038859684220959503 +102 11 negative_sampler.num_negs_per_pos 6.0 +102 11 training.batch_size 0.0 +102 12 model.output_channels 60.0 +102 12 model.input_dropout 0.457969104874472 +102 12 model.output_dropout 0.10481572669017819 +102 12 model.feature_map_dropout 0.06017150689005746 +102 12 model.embedding_dim 2.0 +102 12 optimizer.lr 0.06767336804458138 +102 12 negative_sampler.num_negs_per_pos 45.0 +102 12 training.batch_size 2.0 +102 13 model.output_channels 31.0 +102 13 model.input_dropout 0.08870521564449801 +102 13 model.output_dropout 0.21981199532153284 +102 13 model.feature_map_dropout 0.09690840685889102 +102 13 model.embedding_dim 0.0 +102 13 optimizer.lr 0.009062784928540572 +102 13 negative_sampler.num_negs_per_pos 85.0 +102 13 training.batch_size 0.0 +102 14 model.output_channels 20.0 +102 14 model.input_dropout 0.09145046285377079 +102 14 model.output_dropout 0.005754529401535258 +102 14 model.feature_map_dropout 0.3848051086363739 +102 14 model.embedding_dim 2.0 +102 14 optimizer.lr 0.05798622321636611 +102 14 negative_sampler.num_negs_per_pos 24.0 +102 14 training.batch_size 2.0 +102 1 dataset """wn18rr""" +102 1 model """conve""" +102 1 loss """softplus""" +102 1 regularizer """no""" +102 1 optimizer """adam""" +102 1 training_loop """owa""" +102 1 negative_sampler """basic""" +102 1 evaluator """rankbased""" +102 2 dataset """wn18rr""" +102 2 model """conve""" +102 2 loss """softplus""" +102 2 regularizer """no""" +102 2 optimizer """adam""" +102 2 training_loop """owa""" +102 2 negative_sampler """basic""" +102 2 evaluator """rankbased""" +102 3 dataset """wn18rr""" +102 3 model """conve""" +102 3 loss """softplus""" +102 3 regularizer """no""" +102 3 optimizer """adam""" +102 3 training_loop """owa""" +102 3 negative_sampler """basic""" +102 3 evaluator """rankbased""" +102 4 dataset """wn18rr""" +102 4 model """conve""" +102 4 loss """softplus""" +102 4 regularizer """no""" +102 4 optimizer """adam""" +102 4 training_loop """owa""" +102 4 negative_sampler """basic""" +102 4 evaluator """rankbased""" +102 5 dataset """wn18rr""" +102 5 model """conve""" +102 5 loss """softplus""" +102 5 regularizer """no""" +102 5 optimizer """adam""" +102 5 training_loop """owa""" +102 5 negative_sampler """basic""" +102 5 evaluator """rankbased""" +102 6 dataset """wn18rr""" +102 6 model """conve""" +102 6 loss """softplus""" +102 6 regularizer """no""" +102 6 optimizer """adam""" +102 6 training_loop """owa""" +102 6 negative_sampler """basic""" +102 6 evaluator """rankbased""" +102 7 dataset """wn18rr""" +102 7 model """conve""" +102 7 loss """softplus""" +102 7 regularizer """no""" +102 7 optimizer """adam""" +102 7 training_loop """owa""" +102 7 negative_sampler """basic""" +102 7 evaluator """rankbased""" +102 8 dataset """wn18rr""" +102 8 model """conve""" +102 8 loss """softplus""" +102 8 regularizer """no""" +102 8 optimizer """adam""" +102 8 training_loop """owa""" +102 8 negative_sampler """basic""" +102 8 evaluator """rankbased""" +102 9 dataset """wn18rr""" +102 9 model """conve""" +102 9 loss """softplus""" +102 9 regularizer """no""" +102 9 optimizer """adam""" +102 9 training_loop """owa""" +102 9 negative_sampler """basic""" +102 9 evaluator """rankbased""" +102 10 dataset """wn18rr""" +102 10 model """conve""" +102 10 loss """softplus""" +102 10 regularizer """no""" +102 10 optimizer """adam""" +102 10 training_loop """owa""" +102 10 negative_sampler """basic""" +102 10 evaluator """rankbased""" +102 11 dataset """wn18rr""" +102 11 model """conve""" +102 11 loss """softplus""" +102 11 regularizer """no""" +102 11 optimizer """adam""" +102 11 training_loop """owa""" +102 11 negative_sampler """basic""" +102 11 evaluator """rankbased""" +102 12 dataset """wn18rr""" +102 12 model """conve""" +102 12 loss """softplus""" +102 12 regularizer """no""" +102 12 optimizer """adam""" +102 12 training_loop """owa""" +102 12 negative_sampler """basic""" +102 12 evaluator """rankbased""" +102 13 dataset """wn18rr""" +102 13 model """conve""" +102 13 loss """softplus""" +102 13 regularizer """no""" +102 13 optimizer """adam""" +102 13 training_loop """owa""" +102 13 negative_sampler """basic""" +102 13 evaluator """rankbased""" +102 14 dataset """wn18rr""" +102 14 model """conve""" +102 14 loss """softplus""" +102 14 regularizer """no""" +102 14 optimizer """adam""" +102 14 training_loop """owa""" +102 14 negative_sampler """basic""" +102 14 evaluator """rankbased""" +103 1 model.output_channels 40.0 +103 1 model.input_dropout 0.25210734988389233 +103 1 model.output_dropout 0.3714095871081095 +103 1 model.feature_map_dropout 0.42743465560212834 +103 1 model.embedding_dim 0.0 +103 1 optimizer.lr 0.013561075387422126 +103 1 negative_sampler.num_negs_per_pos 38.0 +103 1 training.batch_size 2.0 +103 2 model.output_channels 54.0 +103 2 model.input_dropout 0.1153191184229212 +103 2 model.output_dropout 0.4855504762891499 +103 2 model.feature_map_dropout 0.09928668713709893 +103 2 model.embedding_dim 2.0 +103 2 optimizer.lr 0.002072256476270106 +103 2 negative_sampler.num_negs_per_pos 26.0 +103 2 training.batch_size 2.0 +103 3 model.output_channels 44.0 +103 3 model.input_dropout 0.2590169083548074 +103 3 model.output_dropout 0.4864004837085372 +103 3 model.feature_map_dropout 0.1824414561623301 +103 3 model.embedding_dim 2.0 +103 3 optimizer.lr 0.0197132801951562 +103 3 negative_sampler.num_negs_per_pos 18.0 +103 3 training.batch_size 2.0 +103 4 model.output_channels 42.0 +103 4 model.input_dropout 0.254624750440295 +103 4 model.output_dropout 0.401842099959109 +103 4 model.feature_map_dropout 0.2623401052205813 +103 4 model.embedding_dim 0.0 +103 4 optimizer.lr 0.004922448991213755 +103 4 negative_sampler.num_negs_per_pos 23.0 +103 4 training.batch_size 2.0 +103 5 model.output_channels 42.0 +103 5 model.input_dropout 0.49208221044765155 +103 5 model.output_dropout 0.2869345599654395 +103 5 model.feature_map_dropout 0.26458339188346963 +103 5 model.embedding_dim 1.0 +103 5 optimizer.lr 0.002352752805427414 +103 5 negative_sampler.num_negs_per_pos 87.0 +103 5 training.batch_size 1.0 +103 6 model.output_channels 33.0 +103 6 model.input_dropout 0.12948582081072385 +103 6 model.output_dropout 0.18491799794803832 +103 6 model.feature_map_dropout 0.3998372843065869 +103 6 model.embedding_dim 0.0 +103 6 optimizer.lr 0.08770550238957106 +103 6 negative_sampler.num_negs_per_pos 51.0 +103 6 training.batch_size 2.0 +103 7 model.output_channels 63.0 +103 7 model.input_dropout 0.27477593414053125 +103 7 model.output_dropout 0.4411773993806358 +103 7 model.feature_map_dropout 0.3575730870094042 +103 7 model.embedding_dim 1.0 +103 7 optimizer.lr 0.016611067604566532 +103 7 negative_sampler.num_negs_per_pos 61.0 +103 7 training.batch_size 0.0 +103 8 model.output_channels 46.0 +103 8 model.input_dropout 0.39729999711716435 +103 8 model.output_dropout 0.15561190344728254 +103 8 model.feature_map_dropout 0.3535153512450715 +103 8 model.embedding_dim 2.0 +103 8 optimizer.lr 0.06669136417208084 +103 8 negative_sampler.num_negs_per_pos 77.0 +103 8 training.batch_size 2.0 +103 9 model.output_channels 45.0 +103 9 model.input_dropout 0.3271066623115839 +103 9 model.output_dropout 0.34655372251380606 +103 9 model.feature_map_dropout 0.05689598056546241 +103 9 model.embedding_dim 1.0 +103 9 optimizer.lr 0.07323731284409744 +103 9 negative_sampler.num_negs_per_pos 31.0 +103 9 training.batch_size 1.0 +103 10 model.output_channels 40.0 +103 10 model.input_dropout 0.28213452285986135 +103 10 model.output_dropout 0.21323160231780836 +103 10 model.feature_map_dropout 0.4311668011141816 +103 10 model.embedding_dim 2.0 +103 10 optimizer.lr 0.005762457437885017 +103 10 negative_sampler.num_negs_per_pos 67.0 +103 10 training.batch_size 1.0 +103 11 model.output_channels 60.0 +103 11 model.input_dropout 0.4057938324459121 +103 11 model.output_dropout 0.10814304032610628 +103 11 model.feature_map_dropout 0.0007912528562742938 +103 11 model.embedding_dim 2.0 +103 11 optimizer.lr 0.013073685634291786 +103 11 negative_sampler.num_negs_per_pos 62.0 +103 11 training.batch_size 2.0 +103 12 model.output_channels 44.0 +103 12 model.input_dropout 0.07921649777864959 +103 12 model.output_dropout 0.12862105356201337 +103 12 model.feature_map_dropout 0.3624407265714243 +103 12 model.embedding_dim 1.0 +103 12 optimizer.lr 0.0018452200951448053 +103 12 negative_sampler.num_negs_per_pos 5.0 +103 12 training.batch_size 0.0 +103 13 model.output_channels 37.0 +103 13 model.input_dropout 0.40422168574189693 +103 13 model.output_dropout 0.3212443720523608 +103 13 model.feature_map_dropout 0.40655561502220566 +103 13 model.embedding_dim 0.0 +103 13 optimizer.lr 0.020216456088624105 +103 13 negative_sampler.num_negs_per_pos 36.0 +103 13 training.batch_size 1.0 +103 14 model.output_channels 63.0 +103 14 model.input_dropout 0.0951496848537195 +103 14 model.output_dropout 0.41046255977860274 +103 14 model.feature_map_dropout 0.2831086101190743 +103 14 model.embedding_dim 2.0 +103 14 optimizer.lr 0.00139381005382948 +103 14 negative_sampler.num_negs_per_pos 57.0 +103 14 training.batch_size 1.0 +103 15 model.output_channels 59.0 +103 15 model.input_dropout 0.17858969721870943 +103 15 model.output_dropout 0.15677283111550366 +103 15 model.feature_map_dropout 0.13965499123042296 +103 15 model.embedding_dim 1.0 +103 15 optimizer.lr 0.006553185658723272 +103 15 negative_sampler.num_negs_per_pos 11.0 +103 15 training.batch_size 0.0 +103 16 model.output_channels 54.0 +103 16 model.input_dropout 0.4019814846809551 +103 16 model.output_dropout 0.22024622162169954 +103 16 model.feature_map_dropout 0.247925033524578 +103 16 model.embedding_dim 2.0 +103 16 optimizer.lr 0.0108031419311787 +103 16 negative_sampler.num_negs_per_pos 71.0 +103 16 training.batch_size 2.0 +103 1 dataset """wn18rr""" +103 1 model """conve""" +103 1 loss """softplus""" +103 1 regularizer """no""" +103 1 optimizer """adam""" +103 1 training_loop """owa""" +103 1 negative_sampler """basic""" +103 1 evaluator """rankbased""" +103 2 dataset """wn18rr""" +103 2 model """conve""" +103 2 loss """softplus""" +103 2 regularizer """no""" +103 2 optimizer """adam""" +103 2 training_loop """owa""" +103 2 negative_sampler """basic""" +103 2 evaluator """rankbased""" +103 3 dataset """wn18rr""" +103 3 model """conve""" +103 3 loss """softplus""" +103 3 regularizer """no""" +103 3 optimizer """adam""" +103 3 training_loop """owa""" +103 3 negative_sampler """basic""" +103 3 evaluator """rankbased""" +103 4 dataset """wn18rr""" +103 4 model """conve""" +103 4 loss """softplus""" +103 4 regularizer """no""" +103 4 optimizer """adam""" +103 4 training_loop """owa""" +103 4 negative_sampler """basic""" +103 4 evaluator """rankbased""" +103 5 dataset """wn18rr""" +103 5 model """conve""" +103 5 loss """softplus""" +103 5 regularizer """no""" +103 5 optimizer """adam""" +103 5 training_loop """owa""" +103 5 negative_sampler """basic""" +103 5 evaluator """rankbased""" +103 6 dataset """wn18rr""" +103 6 model """conve""" +103 6 loss """softplus""" +103 6 regularizer """no""" +103 6 optimizer """adam""" +103 6 training_loop """owa""" +103 6 negative_sampler """basic""" +103 6 evaluator """rankbased""" +103 7 dataset """wn18rr""" +103 7 model """conve""" +103 7 loss """softplus""" +103 7 regularizer """no""" +103 7 optimizer """adam""" +103 7 training_loop """owa""" +103 7 negative_sampler """basic""" +103 7 evaluator """rankbased""" +103 8 dataset """wn18rr""" +103 8 model """conve""" +103 8 loss """softplus""" +103 8 regularizer """no""" +103 8 optimizer """adam""" +103 8 training_loop """owa""" +103 8 negative_sampler """basic""" +103 8 evaluator """rankbased""" +103 9 dataset """wn18rr""" +103 9 model """conve""" +103 9 loss """softplus""" +103 9 regularizer """no""" +103 9 optimizer """adam""" +103 9 training_loop """owa""" +103 9 negative_sampler """basic""" +103 9 evaluator """rankbased""" +103 10 dataset """wn18rr""" +103 10 model """conve""" +103 10 loss """softplus""" +103 10 regularizer """no""" +103 10 optimizer """adam""" +103 10 training_loop """owa""" +103 10 negative_sampler """basic""" +103 10 evaluator """rankbased""" +103 11 dataset """wn18rr""" +103 11 model """conve""" +103 11 loss """softplus""" +103 11 regularizer """no""" +103 11 optimizer """adam""" +103 11 training_loop """owa""" +103 11 negative_sampler """basic""" +103 11 evaluator """rankbased""" +103 12 dataset """wn18rr""" +103 12 model """conve""" +103 12 loss """softplus""" +103 12 regularizer """no""" +103 12 optimizer """adam""" +103 12 training_loop """owa""" +103 12 negative_sampler """basic""" +103 12 evaluator """rankbased""" +103 13 dataset """wn18rr""" +103 13 model """conve""" +103 13 loss """softplus""" +103 13 regularizer """no""" +103 13 optimizer """adam""" +103 13 training_loop """owa""" +103 13 negative_sampler """basic""" +103 13 evaluator """rankbased""" +103 14 dataset """wn18rr""" +103 14 model """conve""" +103 14 loss """softplus""" +103 14 regularizer """no""" +103 14 optimizer """adam""" +103 14 training_loop """owa""" +103 14 negative_sampler """basic""" +103 14 evaluator """rankbased""" +103 15 dataset """wn18rr""" +103 15 model """conve""" +103 15 loss """softplus""" +103 15 regularizer """no""" +103 15 optimizer """adam""" +103 15 training_loop """owa""" +103 15 negative_sampler """basic""" +103 15 evaluator """rankbased""" +103 16 dataset """wn18rr""" +103 16 model """conve""" +103 16 loss """softplus""" +103 16 regularizer """no""" +103 16 optimizer """adam""" +103 16 training_loop """owa""" +103 16 negative_sampler """basic""" +103 16 evaluator """rankbased""" +104 1 model.output_channels 62.0 +104 1 model.input_dropout 0.22039804463501717 +104 1 model.output_dropout 0.1298883918904435 +104 1 model.feature_map_dropout 0.38901355645811064 +104 1 model.embedding_dim 1.0 +104 1 loss.margin 4.672361616526906 +104 1 optimizer.lr 0.0010097353788880124 +104 1 negative_sampler.num_negs_per_pos 48.0 +104 1 training.batch_size 0.0 +104 2 model.output_channels 37.0 +104 2 model.input_dropout 0.34725910561977924 +104 2 model.output_dropout 0.2775606430474868 +104 2 model.feature_map_dropout 0.012549745091219755 +104 2 model.embedding_dim 1.0 +104 2 loss.margin 2.6221155139289722 +104 2 optimizer.lr 0.001438539546878506 +104 2 negative_sampler.num_negs_per_pos 53.0 +104 2 training.batch_size 1.0 +104 3 model.output_channels 54.0 +104 3 model.input_dropout 0.07528411691955772 +104 3 model.output_dropout 0.2604051695675493 +104 3 model.feature_map_dropout 0.162558145537271 +104 3 model.embedding_dim 1.0 +104 3 loss.margin 1.6805086313534567 +104 3 optimizer.lr 0.006962720814896974 +104 3 negative_sampler.num_negs_per_pos 20.0 +104 3 training.batch_size 2.0 +104 4 model.output_channels 33.0 +104 4 model.input_dropout 0.46419000001011645 +104 4 model.output_dropout 0.12735093228523875 +104 4 model.feature_map_dropout 0.17732060459247 +104 4 model.embedding_dim 0.0 +104 4 loss.margin 9.563734624702315 +104 4 optimizer.lr 0.023968280050880236 +104 4 negative_sampler.num_negs_per_pos 23.0 +104 4 training.batch_size 2.0 +104 5 model.output_channels 47.0 +104 5 model.input_dropout 0.4903477160930119 +104 5 model.output_dropout 0.11497079497154594 +104 5 model.feature_map_dropout 0.0878762581844083 +104 5 model.embedding_dim 0.0 +104 5 loss.margin 9.047104920387484 +104 5 optimizer.lr 0.006149375849275567 +104 5 negative_sampler.num_negs_per_pos 4.0 +104 5 training.batch_size 1.0 +104 6 model.output_channels 23.0 +104 6 model.input_dropout 0.02967813419614096 +104 6 model.output_dropout 0.4227597570723068 +104 6 model.feature_map_dropout 0.24465927426764827 +104 6 model.embedding_dim 1.0 +104 6 loss.margin 5.248448860872717 +104 6 optimizer.lr 0.015861658329967228 +104 6 negative_sampler.num_negs_per_pos 94.0 +104 6 training.batch_size 2.0 +104 7 model.output_channels 29.0 +104 7 model.input_dropout 0.3290889145278057 +104 7 model.output_dropout 0.4595035187929994 +104 7 model.feature_map_dropout 0.47244331908346554 +104 7 model.embedding_dim 2.0 +104 7 loss.margin 9.528531083961285 +104 7 optimizer.lr 0.0010724310813393122 +104 7 negative_sampler.num_negs_per_pos 70.0 +104 7 training.batch_size 2.0 +104 8 model.output_channels 46.0 +104 8 model.input_dropout 0.21488618559043177 +104 8 model.output_dropout 0.06415000177633506 +104 8 model.feature_map_dropout 0.33306668338096296 +104 8 model.embedding_dim 2.0 +104 8 loss.margin 1.4973545653580973 +104 8 optimizer.lr 0.004203588875236176 +104 8 negative_sampler.num_negs_per_pos 85.0 +104 8 training.batch_size 2.0 +104 9 model.output_channels 20.0 +104 9 model.input_dropout 0.3851867487304846 +104 9 model.output_dropout 0.20041342864864203 +104 9 model.feature_map_dropout 0.4982899661107287 +104 9 model.embedding_dim 0.0 +104 9 loss.margin 1.7893275721424886 +104 9 optimizer.lr 0.03391390082118434 +104 9 negative_sampler.num_negs_per_pos 17.0 +104 9 training.batch_size 1.0 +104 10 model.output_channels 39.0 +104 10 model.input_dropout 0.07872786821700622 +104 10 model.output_dropout 0.41437154803034576 +104 10 model.feature_map_dropout 0.2550256869699939 +104 10 model.embedding_dim 0.0 +104 10 loss.margin 0.885629124841109 +104 10 optimizer.lr 0.03500405737187496 +104 10 negative_sampler.num_negs_per_pos 16.0 +104 10 training.batch_size 1.0 +104 11 model.output_channels 22.0 +104 11 model.input_dropout 0.4373744883026286 +104 11 model.output_dropout 0.341994630041764 +104 11 model.feature_map_dropout 0.2040896583089215 +104 11 model.embedding_dim 2.0 +104 11 loss.margin 3.2326915618768326 +104 11 optimizer.lr 0.011292082232260732 +104 11 negative_sampler.num_negs_per_pos 14.0 +104 11 training.batch_size 2.0 +104 12 model.output_channels 39.0 +104 12 model.input_dropout 0.1569263074935986 +104 12 model.output_dropout 0.2416550750452069 +104 12 model.feature_map_dropout 0.2636911742617514 +104 12 model.embedding_dim 2.0 +104 12 loss.margin 1.7584201060511133 +104 12 optimizer.lr 0.0022550929084272516 +104 12 negative_sampler.num_negs_per_pos 13.0 +104 12 training.batch_size 1.0 +104 13 model.output_channels 52.0 +104 13 model.input_dropout 0.15787918015607533 +104 13 model.output_dropout 0.2563241892020382 +104 13 model.feature_map_dropout 0.30998082149289574 +104 13 model.embedding_dim 1.0 +104 13 loss.margin 9.814044832795295 +104 13 optimizer.lr 0.08977648881666087 +104 13 negative_sampler.num_negs_per_pos 67.0 +104 13 training.batch_size 1.0 +104 14 model.output_channels 59.0 +104 14 model.input_dropout 0.2824944207526674 +104 14 model.output_dropout 0.35818502929064994 +104 14 model.feature_map_dropout 0.3423613909855687 +104 14 model.embedding_dim 0.0 +104 14 loss.margin 2.9268066758623807 +104 14 optimizer.lr 0.011811433772006861 +104 14 negative_sampler.num_negs_per_pos 86.0 +104 14 training.batch_size 0.0 +104 15 model.output_channels 43.0 +104 15 model.input_dropout 0.20612363764611358 +104 15 model.output_dropout 0.2394999571331411 +104 15 model.feature_map_dropout 0.03567768010747313 +104 15 model.embedding_dim 2.0 +104 15 loss.margin 5.93755905768527 +104 15 optimizer.lr 0.04667400447870364 +104 15 negative_sampler.num_negs_per_pos 88.0 +104 15 training.batch_size 2.0 +104 1 dataset """wn18rr""" +104 1 model """conve""" +104 1 loss """marginranking""" +104 1 regularizer """no""" +104 1 optimizer """adam""" +104 1 training_loop """owa""" +104 1 negative_sampler """basic""" +104 1 evaluator """rankbased""" +104 2 dataset """wn18rr""" +104 2 model """conve""" +104 2 loss """marginranking""" +104 2 regularizer """no""" +104 2 optimizer """adam""" +104 2 training_loop """owa""" +104 2 negative_sampler """basic""" +104 2 evaluator """rankbased""" +104 3 dataset """wn18rr""" +104 3 model """conve""" +104 3 loss """marginranking""" +104 3 regularizer """no""" +104 3 optimizer """adam""" +104 3 training_loop """owa""" +104 3 negative_sampler """basic""" +104 3 evaluator """rankbased""" +104 4 dataset """wn18rr""" +104 4 model """conve""" +104 4 loss """marginranking""" +104 4 regularizer """no""" +104 4 optimizer """adam""" +104 4 training_loop """owa""" +104 4 negative_sampler """basic""" +104 4 evaluator """rankbased""" +104 5 dataset """wn18rr""" +104 5 model """conve""" +104 5 loss """marginranking""" +104 5 regularizer """no""" +104 5 optimizer """adam""" +104 5 training_loop """owa""" +104 5 negative_sampler """basic""" +104 5 evaluator """rankbased""" +104 6 dataset """wn18rr""" +104 6 model """conve""" +104 6 loss """marginranking""" +104 6 regularizer """no""" +104 6 optimizer """adam""" +104 6 training_loop """owa""" +104 6 negative_sampler """basic""" +104 6 evaluator """rankbased""" +104 7 dataset """wn18rr""" +104 7 model """conve""" +104 7 loss """marginranking""" +104 7 regularizer """no""" +104 7 optimizer """adam""" +104 7 training_loop """owa""" +104 7 negative_sampler """basic""" +104 7 evaluator """rankbased""" +104 8 dataset """wn18rr""" +104 8 model """conve""" +104 8 loss """marginranking""" +104 8 regularizer """no""" +104 8 optimizer """adam""" +104 8 training_loop """owa""" +104 8 negative_sampler """basic""" +104 8 evaluator """rankbased""" +104 9 dataset """wn18rr""" +104 9 model """conve""" +104 9 loss """marginranking""" +104 9 regularizer """no""" +104 9 optimizer """adam""" +104 9 training_loop """owa""" +104 9 negative_sampler """basic""" +104 9 evaluator """rankbased""" +104 10 dataset """wn18rr""" +104 10 model """conve""" +104 10 loss """marginranking""" +104 10 regularizer """no""" +104 10 optimizer """adam""" +104 10 training_loop """owa""" +104 10 negative_sampler """basic""" +104 10 evaluator """rankbased""" +104 11 dataset """wn18rr""" +104 11 model """conve""" +104 11 loss """marginranking""" +104 11 regularizer """no""" +104 11 optimizer """adam""" +104 11 training_loop """owa""" +104 11 negative_sampler """basic""" +104 11 evaluator """rankbased""" +104 12 dataset """wn18rr""" +104 12 model """conve""" +104 12 loss """marginranking""" +104 12 regularizer """no""" +104 12 optimizer """adam""" +104 12 training_loop """owa""" +104 12 negative_sampler """basic""" +104 12 evaluator """rankbased""" +104 13 dataset """wn18rr""" +104 13 model """conve""" +104 13 loss """marginranking""" +104 13 regularizer """no""" +104 13 optimizer """adam""" +104 13 training_loop """owa""" +104 13 negative_sampler """basic""" +104 13 evaluator """rankbased""" +104 14 dataset """wn18rr""" +104 14 model """conve""" +104 14 loss """marginranking""" +104 14 regularizer """no""" +104 14 optimizer """adam""" +104 14 training_loop """owa""" +104 14 negative_sampler """basic""" +104 14 evaluator """rankbased""" +104 15 dataset """wn18rr""" +104 15 model """conve""" +104 15 loss """marginranking""" +104 15 regularizer """no""" +104 15 optimizer """adam""" +104 15 training_loop """owa""" +104 15 negative_sampler """basic""" +104 15 evaluator """rankbased""" +105 1 model.output_channels 62.0 +105 1 model.input_dropout 0.08772947343486864 +105 1 model.output_dropout 0.08093731928658493 +105 1 model.feature_map_dropout 0.3780973947178808 +105 1 model.embedding_dim 1.0 +105 1 loss.margin 1.888622812470643 +105 1 optimizer.lr 0.004340487013313915 +105 1 negative_sampler.num_negs_per_pos 38.0 +105 1 training.batch_size 0.0 +105 2 model.output_channels 18.0 +105 2 model.input_dropout 0.08762533355357366 +105 2 model.output_dropout 0.07495851559286448 +105 2 model.feature_map_dropout 0.39827117688158004 +105 2 model.embedding_dim 0.0 +105 2 loss.margin 6.8215520560415435 +105 2 optimizer.lr 0.002320577835134573 +105 2 negative_sampler.num_negs_per_pos 56.0 +105 2 training.batch_size 0.0 +105 3 model.output_channels 25.0 +105 3 model.input_dropout 0.43016779781689934 +105 3 model.output_dropout 0.3836143002703357 +105 3 model.feature_map_dropout 0.4263665815799674 +105 3 model.embedding_dim 2.0 +105 3 loss.margin 6.054841654467832 +105 3 optimizer.lr 0.002126239409492182 +105 3 negative_sampler.num_negs_per_pos 21.0 +105 3 training.batch_size 0.0 +105 4 model.output_channels 31.0 +105 4 model.input_dropout 0.37519041463524166 +105 4 model.output_dropout 0.08538402811539847 +105 4 model.feature_map_dropout 0.005210026247180255 +105 4 model.embedding_dim 0.0 +105 4 loss.margin 7.690902408540735 +105 4 optimizer.lr 0.0013029734208517471 +105 4 negative_sampler.num_negs_per_pos 16.0 +105 4 training.batch_size 1.0 +105 5 model.output_channels 32.0 +105 5 model.input_dropout 0.2502512000598394 +105 5 model.output_dropout 0.0956132242007744 +105 5 model.feature_map_dropout 0.21120820962894682 +105 5 model.embedding_dim 1.0 +105 5 loss.margin 4.792055464830452 +105 5 optimizer.lr 0.036434293234602705 +105 5 negative_sampler.num_negs_per_pos 56.0 +105 5 training.batch_size 0.0 +105 6 model.output_channels 27.0 +105 6 model.input_dropout 0.4653186471298528 +105 6 model.output_dropout 0.1949373307194195 +105 6 model.feature_map_dropout 0.2665518178780724 +105 6 model.embedding_dim 2.0 +105 6 loss.margin 9.323286975606813 +105 6 optimizer.lr 0.0014833331806143103 +105 6 negative_sampler.num_negs_per_pos 67.0 +105 6 training.batch_size 1.0 +105 7 model.output_channels 56.0 +105 7 model.input_dropout 0.05805258627109777 +105 7 model.output_dropout 0.06415375766673426 +105 7 model.feature_map_dropout 0.09293802878413582 +105 7 model.embedding_dim 2.0 +105 7 loss.margin 6.727228716639847 +105 7 optimizer.lr 0.008037824634725637 +105 7 negative_sampler.num_negs_per_pos 67.0 +105 7 training.batch_size 0.0 +105 8 model.output_channels 59.0 +105 8 model.input_dropout 0.11752174212182986 +105 8 model.output_dropout 0.19283803472320066 +105 8 model.feature_map_dropout 0.2635254044609513 +105 8 model.embedding_dim 1.0 +105 8 loss.margin 3.8908318636564023 +105 8 optimizer.lr 0.002603995996471688 +105 8 negative_sampler.num_negs_per_pos 32.0 +105 8 training.batch_size 1.0 +105 9 model.output_channels 56.0 +105 9 model.input_dropout 0.38950570872449425 +105 9 model.output_dropout 0.31367057278328614 +105 9 model.feature_map_dropout 0.33898988401202534 +105 9 model.embedding_dim 0.0 +105 9 loss.margin 2.7633599986298814 +105 9 optimizer.lr 0.02867888623742434 +105 9 negative_sampler.num_negs_per_pos 19.0 +105 9 training.batch_size 2.0 +105 10 model.output_channels 25.0 +105 10 model.input_dropout 0.15778919269272407 +105 10 model.output_dropout 0.1694219880489276 +105 10 model.feature_map_dropout 0.015302805876878656 +105 10 model.embedding_dim 2.0 +105 10 loss.margin 0.6084729353523498 +105 10 optimizer.lr 0.06998239358147083 +105 10 negative_sampler.num_negs_per_pos 55.0 +105 10 training.batch_size 1.0 +105 11 model.output_channels 19.0 +105 11 model.input_dropout 0.32183607801929576 +105 11 model.output_dropout 0.14236384614184922 +105 11 model.feature_map_dropout 0.46534338010780135 +105 11 model.embedding_dim 1.0 +105 11 loss.margin 0.9791042050896986 +105 11 optimizer.lr 0.06889522626625201 +105 11 negative_sampler.num_negs_per_pos 18.0 +105 11 training.batch_size 0.0 +105 12 model.output_channels 24.0 +105 12 model.input_dropout 0.13266393204536459 +105 12 model.output_dropout 0.23709619020947786 +105 12 model.feature_map_dropout 0.2680469039786679 +105 12 model.embedding_dim 1.0 +105 12 loss.margin 8.112321581234843 +105 12 optimizer.lr 0.021413756198809634 +105 12 negative_sampler.num_negs_per_pos 70.0 +105 12 training.batch_size 0.0 +105 13 model.output_channels 22.0 +105 13 model.input_dropout 0.4379381346850463 +105 13 model.output_dropout 0.47525346179527495 +105 13 model.feature_map_dropout 0.48297296287212127 +105 13 model.embedding_dim 1.0 +105 13 loss.margin 6.901959790726333 +105 13 optimizer.lr 0.004459234938545423 +105 13 negative_sampler.num_negs_per_pos 10.0 +105 13 training.batch_size 1.0 +105 14 model.output_channels 29.0 +105 14 model.input_dropout 0.05251635805145938 +105 14 model.output_dropout 0.06520187884715634 +105 14 model.feature_map_dropout 0.043562552453153824 +105 14 model.embedding_dim 2.0 +105 14 loss.margin 1.3121139621326043 +105 14 optimizer.lr 0.025890013141062568 +105 14 negative_sampler.num_negs_per_pos 3.0 +105 14 training.batch_size 2.0 +105 15 model.output_channels 34.0 +105 15 model.input_dropout 0.22151527245463604 +105 15 model.output_dropout 0.2668275317731304 +105 15 model.feature_map_dropout 0.07058788766392754 +105 15 model.embedding_dim 1.0 +105 15 loss.margin 3.0328579631006907 +105 15 optimizer.lr 0.005811419444218514 +105 15 negative_sampler.num_negs_per_pos 69.0 +105 15 training.batch_size 2.0 +105 16 model.output_channels 23.0 +105 16 model.input_dropout 0.36606788464804635 +105 16 model.output_dropout 0.30675984950859075 +105 16 model.feature_map_dropout 0.22925494706009836 +105 16 model.embedding_dim 0.0 +105 16 loss.margin 0.849057014014297 +105 16 optimizer.lr 0.0049681072903454025 +105 16 negative_sampler.num_negs_per_pos 20.0 +105 16 training.batch_size 1.0 +105 17 model.output_channels 25.0 +105 17 model.input_dropout 0.013494312840185818 +105 17 model.output_dropout 0.4579158761904929 +105 17 model.feature_map_dropout 0.07628222343745705 +105 17 model.embedding_dim 2.0 +105 17 loss.margin 9.425756848962578 +105 17 optimizer.lr 0.0026160553472154846 +105 17 negative_sampler.num_negs_per_pos 61.0 +105 17 training.batch_size 0.0 +105 18 model.output_channels 55.0 +105 18 model.input_dropout 0.0871708999300696 +105 18 model.output_dropout 0.4393079017544185 +105 18 model.feature_map_dropout 0.12113097209156709 +105 18 model.embedding_dim 1.0 +105 18 loss.margin 2.9769019545144593 +105 18 optimizer.lr 0.030115465767040293 +105 18 negative_sampler.num_negs_per_pos 52.0 +105 18 training.batch_size 1.0 +105 19 model.output_channels 51.0 +105 19 model.input_dropout 0.4182625035666573 +105 19 model.output_dropout 0.3763612961835336 +105 19 model.feature_map_dropout 0.4323010905044073 +105 19 model.embedding_dim 1.0 +105 19 loss.margin 7.768154361485382 +105 19 optimizer.lr 0.014356287751200124 +105 19 negative_sampler.num_negs_per_pos 43.0 +105 19 training.batch_size 2.0 +105 20 model.output_channels 39.0 +105 20 model.input_dropout 0.2039028595728673 +105 20 model.output_dropout 0.24109604046823907 +105 20 model.feature_map_dropout 0.3901257311663624 +105 20 model.embedding_dim 2.0 +105 20 loss.margin 1.9983177775983667 +105 20 optimizer.lr 0.0039372286184529495 +105 20 negative_sampler.num_negs_per_pos 11.0 +105 20 training.batch_size 0.0 +105 21 model.output_channels 30.0 +105 21 model.input_dropout 0.36631890648296644 +105 21 model.output_dropout 0.01585490286161817 +105 21 model.feature_map_dropout 0.05979738718528127 +105 21 model.embedding_dim 1.0 +105 21 loss.margin 8.188428125519353 +105 21 optimizer.lr 0.0028428790099912786 +105 21 negative_sampler.num_negs_per_pos 26.0 +105 21 training.batch_size 2.0 +105 22 model.output_channels 51.0 +105 22 model.input_dropout 0.38086578856219694 +105 22 model.output_dropout 0.12406110267245835 +105 22 model.feature_map_dropout 0.18338797723129596 +105 22 model.embedding_dim 1.0 +105 22 loss.margin 4.874001641782022 +105 22 optimizer.lr 0.022988645564632124 +105 22 negative_sampler.num_negs_per_pos 46.0 +105 22 training.batch_size 0.0 +105 1 dataset """wn18rr""" +105 1 model """conve""" +105 1 loss """marginranking""" +105 1 regularizer """no""" +105 1 optimizer """adam""" +105 1 training_loop """owa""" +105 1 negative_sampler """basic""" +105 1 evaluator """rankbased""" +105 2 dataset """wn18rr""" +105 2 model """conve""" +105 2 loss """marginranking""" +105 2 regularizer """no""" +105 2 optimizer """adam""" +105 2 training_loop """owa""" +105 2 negative_sampler """basic""" +105 2 evaluator """rankbased""" +105 3 dataset """wn18rr""" +105 3 model """conve""" +105 3 loss """marginranking""" +105 3 regularizer """no""" +105 3 optimizer """adam""" +105 3 training_loop """owa""" +105 3 negative_sampler """basic""" +105 3 evaluator """rankbased""" +105 4 dataset """wn18rr""" +105 4 model """conve""" +105 4 loss """marginranking""" +105 4 regularizer """no""" +105 4 optimizer """adam""" +105 4 training_loop """owa""" +105 4 negative_sampler """basic""" +105 4 evaluator """rankbased""" +105 5 dataset """wn18rr""" +105 5 model """conve""" +105 5 loss """marginranking""" +105 5 regularizer """no""" +105 5 optimizer """adam""" +105 5 training_loop """owa""" +105 5 negative_sampler """basic""" +105 5 evaluator """rankbased""" +105 6 dataset """wn18rr""" +105 6 model """conve""" +105 6 loss """marginranking""" +105 6 regularizer """no""" +105 6 optimizer """adam""" +105 6 training_loop """owa""" +105 6 negative_sampler """basic""" +105 6 evaluator """rankbased""" +105 7 dataset """wn18rr""" +105 7 model """conve""" +105 7 loss """marginranking""" +105 7 regularizer """no""" +105 7 optimizer """adam""" +105 7 training_loop """owa""" +105 7 negative_sampler """basic""" +105 7 evaluator """rankbased""" +105 8 dataset """wn18rr""" +105 8 model """conve""" +105 8 loss """marginranking""" +105 8 regularizer """no""" +105 8 optimizer """adam""" +105 8 training_loop """owa""" +105 8 negative_sampler """basic""" +105 8 evaluator """rankbased""" +105 9 dataset """wn18rr""" +105 9 model """conve""" +105 9 loss """marginranking""" +105 9 regularizer """no""" +105 9 optimizer """adam""" +105 9 training_loop """owa""" +105 9 negative_sampler """basic""" +105 9 evaluator """rankbased""" +105 10 dataset """wn18rr""" +105 10 model """conve""" +105 10 loss """marginranking""" +105 10 regularizer """no""" +105 10 optimizer """adam""" +105 10 training_loop """owa""" +105 10 negative_sampler """basic""" +105 10 evaluator """rankbased""" +105 11 dataset """wn18rr""" +105 11 model """conve""" +105 11 loss """marginranking""" +105 11 regularizer """no""" +105 11 optimizer """adam""" +105 11 training_loop """owa""" +105 11 negative_sampler """basic""" +105 11 evaluator """rankbased""" +105 12 dataset """wn18rr""" +105 12 model """conve""" +105 12 loss """marginranking""" +105 12 regularizer """no""" +105 12 optimizer """adam""" +105 12 training_loop """owa""" +105 12 negative_sampler """basic""" +105 12 evaluator """rankbased""" +105 13 dataset """wn18rr""" +105 13 model """conve""" +105 13 loss """marginranking""" +105 13 regularizer """no""" +105 13 optimizer """adam""" +105 13 training_loop """owa""" +105 13 negative_sampler """basic""" +105 13 evaluator """rankbased""" +105 14 dataset """wn18rr""" +105 14 model """conve""" +105 14 loss """marginranking""" +105 14 regularizer """no""" +105 14 optimizer """adam""" +105 14 training_loop """owa""" +105 14 negative_sampler """basic""" +105 14 evaluator """rankbased""" +105 15 dataset """wn18rr""" +105 15 model """conve""" +105 15 loss """marginranking""" +105 15 regularizer """no""" +105 15 optimizer """adam""" +105 15 training_loop """owa""" +105 15 negative_sampler """basic""" +105 15 evaluator """rankbased""" +105 16 dataset """wn18rr""" +105 16 model """conve""" +105 16 loss """marginranking""" +105 16 regularizer """no""" +105 16 optimizer """adam""" +105 16 training_loop """owa""" +105 16 negative_sampler """basic""" +105 16 evaluator """rankbased""" +105 17 dataset """wn18rr""" +105 17 model """conve""" +105 17 loss """marginranking""" +105 17 regularizer """no""" +105 17 optimizer """adam""" +105 17 training_loop """owa""" +105 17 negative_sampler """basic""" +105 17 evaluator """rankbased""" +105 18 dataset """wn18rr""" +105 18 model """conve""" +105 18 loss """marginranking""" +105 18 regularizer """no""" +105 18 optimizer """adam""" +105 18 training_loop """owa""" +105 18 negative_sampler """basic""" +105 18 evaluator """rankbased""" +105 19 dataset """wn18rr""" +105 19 model """conve""" +105 19 loss """marginranking""" +105 19 regularizer """no""" +105 19 optimizer """adam""" +105 19 training_loop """owa""" +105 19 negative_sampler """basic""" +105 19 evaluator """rankbased""" +105 20 dataset """wn18rr""" +105 20 model """conve""" +105 20 loss """marginranking""" +105 20 regularizer """no""" +105 20 optimizer """adam""" +105 20 training_loop """owa""" +105 20 negative_sampler """basic""" +105 20 evaluator """rankbased""" +105 21 dataset """wn18rr""" +105 21 model """conve""" +105 21 loss """marginranking""" +105 21 regularizer """no""" +105 21 optimizer """adam""" +105 21 training_loop """owa""" +105 21 negative_sampler """basic""" +105 21 evaluator """rankbased""" +105 22 dataset """wn18rr""" +105 22 model """conve""" +105 22 loss """marginranking""" +105 22 regularizer """no""" +105 22 optimizer """adam""" +105 22 training_loop """owa""" +105 22 negative_sampler """basic""" +105 22 evaluator """rankbased""" +106 1 model.embedding_dim 1.0 +106 1 model.hidden_dropout_rate 0.2659062039817537 +106 1 model.num_filters 9.0 +106 1 training.batch_size 2.0 +106 1 training.label_smoothing 0.0975351495597724 +106 2 model.embedding_dim 2.0 +106 2 model.hidden_dropout_rate 0.31075209708138285 +106 2 model.num_filters 3.0 +106 2 training.batch_size 2.0 +106 2 training.label_smoothing 0.0027675796257303187 +106 3 model.embedding_dim 1.0 +106 3 model.hidden_dropout_rate 0.13603923411381316 +106 3 model.num_filters 0.0 +106 3 training.batch_size 1.0 +106 3 training.label_smoothing 0.060214046150956345 +106 4 model.embedding_dim 0.0 +106 4 model.hidden_dropout_rate 0.33944585708192876 +106 4 model.num_filters 0.0 +106 4 training.batch_size 1.0 +106 4 training.label_smoothing 0.33287743685927607 +106 5 model.embedding_dim 0.0 +106 5 model.hidden_dropout_rate 0.13632692276635733 +106 5 model.num_filters 4.0 +106 5 training.batch_size 1.0 +106 5 training.label_smoothing 0.08810777416670106 +106 6 model.embedding_dim 1.0 +106 6 model.hidden_dropout_rate 0.3097318101971751 +106 6 model.num_filters 1.0 +106 6 training.batch_size 1.0 +106 6 training.label_smoothing 0.389460396547229 +106 7 model.embedding_dim 1.0 +106 7 model.hidden_dropout_rate 0.3096710800611414 +106 7 model.num_filters 8.0 +106 7 training.batch_size 2.0 +106 7 training.label_smoothing 0.028821412452420953 +106 8 model.embedding_dim 2.0 +106 8 model.hidden_dropout_rate 0.27377491228616824 +106 8 model.num_filters 0.0 +106 8 training.batch_size 0.0 +106 8 training.label_smoothing 0.8975760874239616 +106 9 model.embedding_dim 0.0 +106 9 model.hidden_dropout_rate 0.35384907579352853 +106 9 model.num_filters 7.0 +106 9 training.batch_size 1.0 +106 9 training.label_smoothing 0.41303363865286613 +106 10 model.embedding_dim 2.0 +106 10 model.hidden_dropout_rate 0.4023941888764502 +106 10 model.num_filters 1.0 +106 10 training.batch_size 1.0 +106 10 training.label_smoothing 0.08966246033311479 +106 11 model.embedding_dim 2.0 +106 11 model.hidden_dropout_rate 0.1999984427065618 +106 11 model.num_filters 2.0 +106 11 training.batch_size 0.0 +106 11 training.label_smoothing 0.0020137738441754886 +106 12 model.embedding_dim 1.0 +106 12 model.hidden_dropout_rate 0.21306582131590385 +106 12 model.num_filters 9.0 +106 12 training.batch_size 2.0 +106 12 training.label_smoothing 0.24970243364989705 +106 13 model.embedding_dim 2.0 +106 13 model.hidden_dropout_rate 0.24239940765562978 +106 13 model.num_filters 3.0 +106 13 training.batch_size 1.0 +106 13 training.label_smoothing 0.003969190734955562 +106 14 model.embedding_dim 2.0 +106 14 model.hidden_dropout_rate 0.16763566386637718 +106 14 model.num_filters 6.0 +106 14 training.batch_size 1.0 +106 14 training.label_smoothing 0.0016431238659956682 +106 15 model.embedding_dim 0.0 +106 15 model.hidden_dropout_rate 0.28398767454211027 +106 15 model.num_filters 7.0 +106 15 training.batch_size 0.0 +106 15 training.label_smoothing 0.003030312552095049 +106 16 model.embedding_dim 0.0 +106 16 model.hidden_dropout_rate 0.2710390521596906 +106 16 model.num_filters 8.0 +106 16 training.batch_size 0.0 +106 16 training.label_smoothing 0.013888104992511141 +106 17 model.embedding_dim 1.0 +106 17 model.hidden_dropout_rate 0.31213186087551037 +106 17 model.num_filters 2.0 +106 17 training.batch_size 1.0 +106 17 training.label_smoothing 0.0014154242492180138 +106 18 model.embedding_dim 2.0 +106 18 model.hidden_dropout_rate 0.12854374726192033 +106 18 model.num_filters 2.0 +106 18 training.batch_size 0.0 +106 18 training.label_smoothing 0.004219170262032434 +106 19 model.embedding_dim 1.0 +106 19 model.hidden_dropout_rate 0.1051151590565917 +106 19 model.num_filters 6.0 +106 19 training.batch_size 1.0 +106 19 training.label_smoothing 0.013857174755917405 +106 20 model.embedding_dim 1.0 +106 20 model.hidden_dropout_rate 0.35642048259343706 +106 20 model.num_filters 0.0 +106 20 training.batch_size 1.0 +106 20 training.label_smoothing 0.001993593014917883 +106 21 model.embedding_dim 1.0 +106 21 model.hidden_dropout_rate 0.20880257024186355 +106 21 model.num_filters 3.0 +106 21 training.batch_size 1.0 +106 21 training.label_smoothing 0.849779437194131 +106 22 model.embedding_dim 2.0 +106 22 model.hidden_dropout_rate 0.3142070903606772 +106 22 model.num_filters 6.0 +106 22 training.batch_size 0.0 +106 22 training.label_smoothing 0.24192824890319467 +106 23 model.embedding_dim 0.0 +106 23 model.hidden_dropout_rate 0.3811857688124385 +106 23 model.num_filters 7.0 +106 23 training.batch_size 1.0 +106 23 training.label_smoothing 0.014367051862036604 +106 24 model.embedding_dim 1.0 +106 24 model.hidden_dropout_rate 0.23504922835330647 +106 24 model.num_filters 3.0 +106 24 training.batch_size 2.0 +106 24 training.label_smoothing 0.15655516306315453 +106 25 model.embedding_dim 1.0 +106 25 model.hidden_dropout_rate 0.4098760825100688 +106 25 model.num_filters 7.0 +106 25 training.batch_size 1.0 +106 25 training.label_smoothing 0.020443003453875862 +106 26 model.embedding_dim 2.0 +106 26 model.hidden_dropout_rate 0.44730136690750055 +106 26 model.num_filters 1.0 +106 26 training.batch_size 0.0 +106 26 training.label_smoothing 0.0042762867533526665 +106 27 model.embedding_dim 1.0 +106 27 model.hidden_dropout_rate 0.25009611572020607 +106 27 model.num_filters 2.0 +106 27 training.batch_size 2.0 +106 27 training.label_smoothing 0.004757339289033212 +106 28 model.embedding_dim 0.0 +106 28 model.hidden_dropout_rate 0.2100026193207631 +106 28 model.num_filters 5.0 +106 28 training.batch_size 2.0 +106 28 training.label_smoothing 0.16093392378250884 +106 29 model.embedding_dim 0.0 +106 29 model.hidden_dropout_rate 0.19360806353385637 +106 29 model.num_filters 7.0 +106 29 training.batch_size 1.0 +106 29 training.label_smoothing 0.008005254779241947 +106 30 model.embedding_dim 0.0 +106 30 model.hidden_dropout_rate 0.4073652355062978 +106 30 model.num_filters 7.0 +106 30 training.batch_size 2.0 +106 30 training.label_smoothing 0.008888414406395023 +106 31 model.embedding_dim 0.0 +106 31 model.hidden_dropout_rate 0.24433185705433003 +106 31 model.num_filters 4.0 +106 31 training.batch_size 2.0 +106 31 training.label_smoothing 0.08783506876505211 +106 32 model.embedding_dim 1.0 +106 32 model.hidden_dropout_rate 0.13370038840919818 +106 32 model.num_filters 7.0 +106 32 training.batch_size 2.0 +106 32 training.label_smoothing 0.0024389319675011344 +106 33 model.embedding_dim 0.0 +106 33 model.hidden_dropout_rate 0.4972026767876655 +106 33 model.num_filters 5.0 +106 33 training.batch_size 1.0 +106 33 training.label_smoothing 0.010739108442673125 +106 34 model.embedding_dim 2.0 +106 34 model.hidden_dropout_rate 0.4293291788490542 +106 34 model.num_filters 2.0 +106 34 training.batch_size 0.0 +106 34 training.label_smoothing 0.7419580106467851 +106 35 model.embedding_dim 0.0 +106 35 model.hidden_dropout_rate 0.17777533004615684 +106 35 model.num_filters 6.0 +106 35 training.batch_size 1.0 +106 35 training.label_smoothing 0.3286102566439559 +106 36 model.embedding_dim 0.0 +106 36 model.hidden_dropout_rate 0.47361366068990296 +106 36 model.num_filters 6.0 +106 36 training.batch_size 0.0 +106 36 training.label_smoothing 0.017800910738635184 +106 37 model.embedding_dim 0.0 +106 37 model.hidden_dropout_rate 0.43568919697492026 +106 37 model.num_filters 3.0 +106 37 training.batch_size 2.0 +106 37 training.label_smoothing 0.0011425899466289164 +106 38 model.embedding_dim 0.0 +106 38 model.hidden_dropout_rate 0.2872190272740007 +106 38 model.num_filters 6.0 +106 38 training.batch_size 1.0 +106 38 training.label_smoothing 0.037039136781101475 +106 39 model.embedding_dim 1.0 +106 39 model.hidden_dropout_rate 0.3929231969581133 +106 39 model.num_filters 2.0 +106 39 training.batch_size 1.0 +106 39 training.label_smoothing 0.06819772604243439 +106 40 model.embedding_dim 1.0 +106 40 model.hidden_dropout_rate 0.3457585131740353 +106 40 model.num_filters 6.0 +106 40 training.batch_size 0.0 +106 40 training.label_smoothing 0.20172627300202395 +106 41 model.embedding_dim 2.0 +106 41 model.hidden_dropout_rate 0.23228437734251478 +106 41 model.num_filters 8.0 +106 41 training.batch_size 0.0 +106 41 training.label_smoothing 0.017499797301843315 +106 42 model.embedding_dim 2.0 +106 42 model.hidden_dropout_rate 0.23511652779278785 +106 42 model.num_filters 2.0 +106 42 training.batch_size 0.0 +106 42 training.label_smoothing 0.022237146778273903 +106 43 model.embedding_dim 1.0 +106 43 model.hidden_dropout_rate 0.40946889547254894 +106 43 model.num_filters 1.0 +106 43 training.batch_size 2.0 +106 43 training.label_smoothing 0.02428132902095519 +106 44 model.embedding_dim 2.0 +106 44 model.hidden_dropout_rate 0.4578831642666277 +106 44 model.num_filters 8.0 +106 44 training.batch_size 2.0 +106 44 training.label_smoothing 0.7804165590817297 +106 45 model.embedding_dim 0.0 +106 45 model.hidden_dropout_rate 0.23609667957624092 +106 45 model.num_filters 7.0 +106 45 training.batch_size 2.0 +106 45 training.label_smoothing 0.0021364509894045264 +106 46 model.embedding_dim 2.0 +106 46 model.hidden_dropout_rate 0.39742088923911845 +106 46 model.num_filters 7.0 +106 46 training.batch_size 1.0 +106 46 training.label_smoothing 0.009215511660033676 +106 47 model.embedding_dim 1.0 +106 47 model.hidden_dropout_rate 0.15069134395180928 +106 47 model.num_filters 3.0 +106 47 training.batch_size 2.0 +106 47 training.label_smoothing 0.02286136054025561 +106 48 model.embedding_dim 0.0 +106 48 model.hidden_dropout_rate 0.44548791791854214 +106 48 model.num_filters 6.0 +106 48 training.batch_size 2.0 +106 48 training.label_smoothing 0.013777232970451655 +106 49 model.embedding_dim 1.0 +106 49 model.hidden_dropout_rate 0.33870891067523107 +106 49 model.num_filters 7.0 +106 49 training.batch_size 2.0 +106 49 training.label_smoothing 0.4183160633632865 +106 50 model.embedding_dim 2.0 +106 50 model.hidden_dropout_rate 0.1553389924772021 +106 50 model.num_filters 9.0 +106 50 training.batch_size 2.0 +106 50 training.label_smoothing 0.09598458901567641 +106 1 dataset """kinships""" +106 1 model """convkb""" +106 1 loss """bceaftersigmoid""" +106 1 regularizer """no""" +106 1 optimizer """adadelta""" +106 1 training_loop """lcwa""" +106 1 evaluator """rankbased""" +106 2 dataset """kinships""" +106 2 model """convkb""" +106 2 loss """bceaftersigmoid""" +106 2 regularizer """no""" +106 2 optimizer """adadelta""" +106 2 training_loop """lcwa""" +106 2 evaluator """rankbased""" +106 3 dataset """kinships""" +106 3 model """convkb""" +106 3 loss """bceaftersigmoid""" +106 3 regularizer """no""" +106 3 optimizer """adadelta""" +106 3 training_loop """lcwa""" +106 3 evaluator """rankbased""" +106 4 dataset """kinships""" +106 4 model """convkb""" +106 4 loss """bceaftersigmoid""" +106 4 regularizer """no""" +106 4 optimizer """adadelta""" +106 4 training_loop """lcwa""" +106 4 evaluator """rankbased""" +106 5 dataset """kinships""" +106 5 model """convkb""" +106 5 loss """bceaftersigmoid""" +106 5 regularizer """no""" +106 5 optimizer """adadelta""" +106 5 training_loop """lcwa""" +106 5 evaluator """rankbased""" +106 6 dataset """kinships""" +106 6 model """convkb""" +106 6 loss """bceaftersigmoid""" +106 6 regularizer """no""" +106 6 optimizer """adadelta""" +106 6 training_loop """lcwa""" +106 6 evaluator """rankbased""" +106 7 dataset """kinships""" +106 7 model """convkb""" +106 7 loss """bceaftersigmoid""" +106 7 regularizer """no""" +106 7 optimizer """adadelta""" +106 7 training_loop """lcwa""" +106 7 evaluator """rankbased""" +106 8 dataset """kinships""" +106 8 model """convkb""" +106 8 loss """bceaftersigmoid""" +106 8 regularizer """no""" +106 8 optimizer """adadelta""" +106 8 training_loop """lcwa""" +106 8 evaluator """rankbased""" +106 9 dataset """kinships""" +106 9 model """convkb""" +106 9 loss """bceaftersigmoid""" +106 9 regularizer """no""" +106 9 optimizer """adadelta""" +106 9 training_loop """lcwa""" +106 9 evaluator """rankbased""" +106 10 dataset """kinships""" +106 10 model """convkb""" +106 10 loss """bceaftersigmoid""" +106 10 regularizer """no""" +106 10 optimizer """adadelta""" +106 10 training_loop """lcwa""" +106 10 evaluator """rankbased""" +106 11 dataset """kinships""" +106 11 model """convkb""" +106 11 loss """bceaftersigmoid""" +106 11 regularizer """no""" +106 11 optimizer """adadelta""" +106 11 training_loop """lcwa""" +106 11 evaluator """rankbased""" +106 12 dataset """kinships""" +106 12 model """convkb""" +106 12 loss """bceaftersigmoid""" +106 12 regularizer """no""" +106 12 optimizer """adadelta""" +106 12 training_loop """lcwa""" +106 12 evaluator """rankbased""" +106 13 dataset """kinships""" +106 13 model """convkb""" +106 13 loss """bceaftersigmoid""" +106 13 regularizer """no""" +106 13 optimizer """adadelta""" +106 13 training_loop """lcwa""" +106 13 evaluator """rankbased""" +106 14 dataset """kinships""" +106 14 model """convkb""" +106 14 loss """bceaftersigmoid""" +106 14 regularizer """no""" +106 14 optimizer """adadelta""" +106 14 training_loop """lcwa""" +106 14 evaluator """rankbased""" +106 15 dataset """kinships""" +106 15 model """convkb""" +106 15 loss """bceaftersigmoid""" +106 15 regularizer """no""" +106 15 optimizer """adadelta""" +106 15 training_loop """lcwa""" +106 15 evaluator """rankbased""" +106 16 dataset """kinships""" +106 16 model """convkb""" +106 16 loss """bceaftersigmoid""" +106 16 regularizer """no""" +106 16 optimizer """adadelta""" +106 16 training_loop """lcwa""" +106 16 evaluator """rankbased""" +106 17 dataset """kinships""" +106 17 model """convkb""" +106 17 loss """bceaftersigmoid""" +106 17 regularizer """no""" +106 17 optimizer """adadelta""" +106 17 training_loop """lcwa""" +106 17 evaluator """rankbased""" +106 18 dataset """kinships""" +106 18 model """convkb""" +106 18 loss """bceaftersigmoid""" +106 18 regularizer """no""" +106 18 optimizer """adadelta""" +106 18 training_loop """lcwa""" +106 18 evaluator """rankbased""" +106 19 dataset """kinships""" +106 19 model """convkb""" +106 19 loss """bceaftersigmoid""" +106 19 regularizer """no""" +106 19 optimizer """adadelta""" +106 19 training_loop """lcwa""" +106 19 evaluator """rankbased""" +106 20 dataset """kinships""" +106 20 model """convkb""" +106 20 loss """bceaftersigmoid""" +106 20 regularizer """no""" +106 20 optimizer """adadelta""" +106 20 training_loop """lcwa""" +106 20 evaluator """rankbased""" +106 21 dataset """kinships""" +106 21 model """convkb""" +106 21 loss """bceaftersigmoid""" +106 21 regularizer """no""" +106 21 optimizer """adadelta""" +106 21 training_loop """lcwa""" +106 21 evaluator """rankbased""" +106 22 dataset """kinships""" +106 22 model """convkb""" +106 22 loss """bceaftersigmoid""" +106 22 regularizer """no""" +106 22 optimizer """adadelta""" +106 22 training_loop """lcwa""" +106 22 evaluator """rankbased""" +106 23 dataset """kinships""" +106 23 model """convkb""" +106 23 loss """bceaftersigmoid""" +106 23 regularizer """no""" +106 23 optimizer """adadelta""" +106 23 training_loop """lcwa""" +106 23 evaluator """rankbased""" +106 24 dataset """kinships""" +106 24 model """convkb""" +106 24 loss """bceaftersigmoid""" +106 24 regularizer """no""" +106 24 optimizer """adadelta""" +106 24 training_loop """lcwa""" +106 24 evaluator """rankbased""" +106 25 dataset """kinships""" +106 25 model """convkb""" +106 25 loss """bceaftersigmoid""" +106 25 regularizer """no""" +106 25 optimizer """adadelta""" +106 25 training_loop """lcwa""" +106 25 evaluator """rankbased""" +106 26 dataset """kinships""" +106 26 model """convkb""" +106 26 loss """bceaftersigmoid""" +106 26 regularizer """no""" +106 26 optimizer """adadelta""" +106 26 training_loop """lcwa""" +106 26 evaluator """rankbased""" +106 27 dataset """kinships""" +106 27 model """convkb""" +106 27 loss """bceaftersigmoid""" +106 27 regularizer """no""" +106 27 optimizer """adadelta""" +106 27 training_loop """lcwa""" +106 27 evaluator """rankbased""" +106 28 dataset """kinships""" +106 28 model """convkb""" +106 28 loss """bceaftersigmoid""" +106 28 regularizer """no""" +106 28 optimizer """adadelta""" +106 28 training_loop """lcwa""" +106 28 evaluator """rankbased""" +106 29 dataset """kinships""" +106 29 model """convkb""" +106 29 loss """bceaftersigmoid""" +106 29 regularizer """no""" +106 29 optimizer """adadelta""" +106 29 training_loop """lcwa""" +106 29 evaluator """rankbased""" +106 30 dataset """kinships""" +106 30 model """convkb""" +106 30 loss """bceaftersigmoid""" +106 30 regularizer """no""" +106 30 optimizer """adadelta""" +106 30 training_loop """lcwa""" +106 30 evaluator """rankbased""" +106 31 dataset """kinships""" +106 31 model """convkb""" +106 31 loss """bceaftersigmoid""" +106 31 regularizer """no""" +106 31 optimizer """adadelta""" +106 31 training_loop """lcwa""" +106 31 evaluator """rankbased""" +106 32 dataset """kinships""" +106 32 model """convkb""" +106 32 loss """bceaftersigmoid""" +106 32 regularizer """no""" +106 32 optimizer """adadelta""" +106 32 training_loop """lcwa""" +106 32 evaluator """rankbased""" +106 33 dataset """kinships""" +106 33 model """convkb""" +106 33 loss """bceaftersigmoid""" +106 33 regularizer """no""" +106 33 optimizer """adadelta""" +106 33 training_loop """lcwa""" +106 33 evaluator """rankbased""" +106 34 dataset """kinships""" +106 34 model """convkb""" +106 34 loss """bceaftersigmoid""" +106 34 regularizer """no""" +106 34 optimizer """adadelta""" +106 34 training_loop """lcwa""" +106 34 evaluator """rankbased""" +106 35 dataset """kinships""" +106 35 model """convkb""" +106 35 loss """bceaftersigmoid""" +106 35 regularizer """no""" +106 35 optimizer """adadelta""" +106 35 training_loop """lcwa""" +106 35 evaluator """rankbased""" +106 36 dataset """kinships""" +106 36 model """convkb""" +106 36 loss """bceaftersigmoid""" +106 36 regularizer """no""" +106 36 optimizer """adadelta""" +106 36 training_loop """lcwa""" +106 36 evaluator """rankbased""" +106 37 dataset """kinships""" +106 37 model """convkb""" +106 37 loss """bceaftersigmoid""" +106 37 regularizer """no""" +106 37 optimizer """adadelta""" +106 37 training_loop """lcwa""" +106 37 evaluator """rankbased""" +106 38 dataset """kinships""" +106 38 model """convkb""" +106 38 loss """bceaftersigmoid""" +106 38 regularizer """no""" +106 38 optimizer """adadelta""" +106 38 training_loop """lcwa""" +106 38 evaluator """rankbased""" +106 39 dataset """kinships""" +106 39 model """convkb""" +106 39 loss """bceaftersigmoid""" +106 39 regularizer """no""" +106 39 optimizer """adadelta""" +106 39 training_loop """lcwa""" +106 39 evaluator """rankbased""" +106 40 dataset """kinships""" +106 40 model """convkb""" +106 40 loss """bceaftersigmoid""" +106 40 regularizer """no""" +106 40 optimizer """adadelta""" +106 40 training_loop """lcwa""" +106 40 evaluator """rankbased""" +106 41 dataset """kinships""" +106 41 model """convkb""" +106 41 loss """bceaftersigmoid""" +106 41 regularizer """no""" +106 41 optimizer """adadelta""" +106 41 training_loop """lcwa""" +106 41 evaluator """rankbased""" +106 42 dataset """kinships""" +106 42 model """convkb""" +106 42 loss """bceaftersigmoid""" +106 42 regularizer """no""" +106 42 optimizer """adadelta""" +106 42 training_loop """lcwa""" +106 42 evaluator """rankbased""" +106 43 dataset """kinships""" +106 43 model """convkb""" +106 43 loss """bceaftersigmoid""" +106 43 regularizer """no""" +106 43 optimizer """adadelta""" +106 43 training_loop """lcwa""" +106 43 evaluator """rankbased""" +106 44 dataset """kinships""" +106 44 model """convkb""" +106 44 loss """bceaftersigmoid""" +106 44 regularizer """no""" +106 44 optimizer """adadelta""" +106 44 training_loop """lcwa""" +106 44 evaluator """rankbased""" +106 45 dataset """kinships""" +106 45 model """convkb""" +106 45 loss """bceaftersigmoid""" +106 45 regularizer """no""" +106 45 optimizer """adadelta""" +106 45 training_loop """lcwa""" +106 45 evaluator """rankbased""" +106 46 dataset """kinships""" +106 46 model """convkb""" +106 46 loss """bceaftersigmoid""" +106 46 regularizer """no""" +106 46 optimizer """adadelta""" +106 46 training_loop """lcwa""" +106 46 evaluator """rankbased""" +106 47 dataset """kinships""" +106 47 model """convkb""" +106 47 loss """bceaftersigmoid""" +106 47 regularizer """no""" +106 47 optimizer """adadelta""" +106 47 training_loop """lcwa""" +106 47 evaluator """rankbased""" +106 48 dataset """kinships""" +106 48 model """convkb""" +106 48 loss """bceaftersigmoid""" +106 48 regularizer """no""" +106 48 optimizer """adadelta""" +106 48 training_loop """lcwa""" +106 48 evaluator """rankbased""" +106 49 dataset """kinships""" +106 49 model """convkb""" +106 49 loss """bceaftersigmoid""" +106 49 regularizer """no""" +106 49 optimizer """adadelta""" +106 49 training_loop """lcwa""" +106 49 evaluator """rankbased""" +106 50 dataset """kinships""" +106 50 model """convkb""" +106 50 loss """bceaftersigmoid""" +106 50 regularizer """no""" +106 50 optimizer """adadelta""" +106 50 training_loop """lcwa""" +106 50 evaluator """rankbased""" +107 1 model.embedding_dim 0.0 +107 1 model.hidden_dropout_rate 0.41572520473587327 +107 1 model.num_filters 2.0 +107 1 training.batch_size 1.0 +107 1 training.label_smoothing 0.19621721212104237 +107 2 model.embedding_dim 0.0 +107 2 model.hidden_dropout_rate 0.2760019001080529 +107 2 model.num_filters 3.0 +107 2 training.batch_size 2.0 +107 2 training.label_smoothing 0.01369437277717486 +107 3 model.embedding_dim 2.0 +107 3 model.hidden_dropout_rate 0.45349175966287936 +107 3 model.num_filters 5.0 +107 3 training.batch_size 1.0 +107 3 training.label_smoothing 0.03221852611352687 +107 4 model.embedding_dim 0.0 +107 4 model.hidden_dropout_rate 0.418980939188749 +107 4 model.num_filters 7.0 +107 4 training.batch_size 1.0 +107 4 training.label_smoothing 0.11083834055824089 +107 5 model.embedding_dim 1.0 +107 5 model.hidden_dropout_rate 0.34394492310654534 +107 5 model.num_filters 8.0 +107 5 training.batch_size 1.0 +107 5 training.label_smoothing 0.004620294136542326 +107 6 model.embedding_dim 2.0 +107 6 model.hidden_dropout_rate 0.4622734943907445 +107 6 model.num_filters 3.0 +107 6 training.batch_size 1.0 +107 6 training.label_smoothing 0.8577912582663519 +107 7 model.embedding_dim 2.0 +107 7 model.hidden_dropout_rate 0.4238952575250675 +107 7 model.num_filters 9.0 +107 7 training.batch_size 2.0 +107 7 training.label_smoothing 0.002974971671110185 +107 8 model.embedding_dim 0.0 +107 8 model.hidden_dropout_rate 0.19463206093537205 +107 8 model.num_filters 7.0 +107 8 training.batch_size 0.0 +107 8 training.label_smoothing 0.4199013271983872 +107 9 model.embedding_dim 0.0 +107 9 model.hidden_dropout_rate 0.3218787292913513 +107 9 model.num_filters 4.0 +107 9 training.batch_size 2.0 +107 9 training.label_smoothing 0.23907835213717332 +107 10 model.embedding_dim 0.0 +107 10 model.hidden_dropout_rate 0.22110984009921708 +107 10 model.num_filters 8.0 +107 10 training.batch_size 2.0 +107 10 training.label_smoothing 0.011908783123087237 +107 11 model.embedding_dim 0.0 +107 11 model.hidden_dropout_rate 0.11985934551714901 +107 11 model.num_filters 1.0 +107 11 training.batch_size 1.0 +107 11 training.label_smoothing 0.8582598542058066 +107 12 model.embedding_dim 0.0 +107 12 model.hidden_dropout_rate 0.15789593878873498 +107 12 model.num_filters 9.0 +107 12 training.batch_size 0.0 +107 12 training.label_smoothing 0.006140801044481805 +107 13 model.embedding_dim 1.0 +107 13 model.hidden_dropout_rate 0.4158220779709151 +107 13 model.num_filters 9.0 +107 13 training.batch_size 0.0 +107 13 training.label_smoothing 0.005630525888848078 +107 14 model.embedding_dim 1.0 +107 14 model.hidden_dropout_rate 0.13097313428284046 +107 14 model.num_filters 3.0 +107 14 training.batch_size 2.0 +107 14 training.label_smoothing 0.053245795049805374 +107 15 model.embedding_dim 0.0 +107 15 model.hidden_dropout_rate 0.13788635459219686 +107 15 model.num_filters 0.0 +107 15 training.batch_size 0.0 +107 15 training.label_smoothing 0.11584790725521465 +107 16 model.embedding_dim 2.0 +107 16 model.hidden_dropout_rate 0.274777590131992 +107 16 model.num_filters 8.0 +107 16 training.batch_size 2.0 +107 16 training.label_smoothing 0.0013138225892947682 +107 17 model.embedding_dim 0.0 +107 17 model.hidden_dropout_rate 0.4924637731408573 +107 17 model.num_filters 4.0 +107 17 training.batch_size 2.0 +107 17 training.label_smoothing 0.011213314629451193 +107 18 model.embedding_dim 2.0 +107 18 model.hidden_dropout_rate 0.48254229571306506 +107 18 model.num_filters 7.0 +107 18 training.batch_size 1.0 +107 18 training.label_smoothing 0.0030255472154812058 +107 19 model.embedding_dim 2.0 +107 19 model.hidden_dropout_rate 0.20515123566360233 +107 19 model.num_filters 3.0 +107 19 training.batch_size 0.0 +107 19 training.label_smoothing 0.005205024032929266 +107 20 model.embedding_dim 1.0 +107 20 model.hidden_dropout_rate 0.3199848504599194 +107 20 model.num_filters 7.0 +107 20 training.batch_size 1.0 +107 20 training.label_smoothing 0.0019268619737964098 +107 21 model.embedding_dim 2.0 +107 21 model.hidden_dropout_rate 0.42716012646885715 +107 21 model.num_filters 3.0 +107 21 training.batch_size 2.0 +107 21 training.label_smoothing 0.002926527358573601 +107 22 model.embedding_dim 1.0 +107 22 model.hidden_dropout_rate 0.37730557037945867 +107 22 model.num_filters 4.0 +107 22 training.batch_size 1.0 +107 22 training.label_smoothing 0.0366435808252694 +107 23 model.embedding_dim 0.0 +107 23 model.hidden_dropout_rate 0.4854717842447229 +107 23 model.num_filters 0.0 +107 23 training.batch_size 1.0 +107 23 training.label_smoothing 0.040006973218808216 +107 24 model.embedding_dim 2.0 +107 24 model.hidden_dropout_rate 0.22425083453962205 +107 24 model.num_filters 6.0 +107 24 training.batch_size 1.0 +107 24 training.label_smoothing 0.06312259988192916 +107 25 model.embedding_dim 0.0 +107 25 model.hidden_dropout_rate 0.26975483161430225 +107 25 model.num_filters 3.0 +107 25 training.batch_size 0.0 +107 25 training.label_smoothing 0.023781048242248238 +107 26 model.embedding_dim 0.0 +107 26 model.hidden_dropout_rate 0.39524805129056595 +107 26 model.num_filters 1.0 +107 26 training.batch_size 2.0 +107 26 training.label_smoothing 0.00804567942500812 +107 27 model.embedding_dim 1.0 +107 27 model.hidden_dropout_rate 0.3583532593400064 +107 27 model.num_filters 6.0 +107 27 training.batch_size 0.0 +107 27 training.label_smoothing 0.7561603179912871 +107 28 model.embedding_dim 1.0 +107 28 model.hidden_dropout_rate 0.3057433258878719 +107 28 model.num_filters 9.0 +107 28 training.batch_size 2.0 +107 28 training.label_smoothing 0.009241720669081296 +107 29 model.embedding_dim 2.0 +107 29 model.hidden_dropout_rate 0.31532854913350106 +107 29 model.num_filters 9.0 +107 29 training.batch_size 0.0 +107 29 training.label_smoothing 0.30192037437422237 +107 30 model.embedding_dim 2.0 +107 30 model.hidden_dropout_rate 0.19063087855038938 +107 30 model.num_filters 9.0 +107 30 training.batch_size 1.0 +107 30 training.label_smoothing 0.6681559857860729 +107 31 model.embedding_dim 0.0 +107 31 model.hidden_dropout_rate 0.43459363561330966 +107 31 model.num_filters 6.0 +107 31 training.batch_size 0.0 +107 31 training.label_smoothing 0.7230003621613811 +107 32 model.embedding_dim 1.0 +107 32 model.hidden_dropout_rate 0.1444461049510836 +107 32 model.num_filters 5.0 +107 32 training.batch_size 2.0 +107 32 training.label_smoothing 0.006892671305032487 +107 33 model.embedding_dim 2.0 +107 33 model.hidden_dropout_rate 0.2855258894974477 +107 33 model.num_filters 8.0 +107 33 training.batch_size 2.0 +107 33 training.label_smoothing 0.41702664733819717 +107 34 model.embedding_dim 2.0 +107 34 model.hidden_dropout_rate 0.2959810791699678 +107 34 model.num_filters 8.0 +107 34 training.batch_size 0.0 +107 34 training.label_smoothing 0.006926089655093508 +107 35 model.embedding_dim 2.0 +107 35 model.hidden_dropout_rate 0.355595993466167 +107 35 model.num_filters 7.0 +107 35 training.batch_size 1.0 +107 35 training.label_smoothing 0.04421871396050186 +107 36 model.embedding_dim 1.0 +107 36 model.hidden_dropout_rate 0.146412306174976 +107 36 model.num_filters 9.0 +107 36 training.batch_size 1.0 +107 36 training.label_smoothing 0.05507813345219219 +107 37 model.embedding_dim 1.0 +107 37 model.hidden_dropout_rate 0.28159433711475435 +107 37 model.num_filters 8.0 +107 37 training.batch_size 1.0 +107 37 training.label_smoothing 0.778676751463684 +107 38 model.embedding_dim 1.0 +107 38 model.hidden_dropout_rate 0.3392154323319497 +107 38 model.num_filters 1.0 +107 38 training.batch_size 2.0 +107 38 training.label_smoothing 0.03058895392579096 +107 39 model.embedding_dim 0.0 +107 39 model.hidden_dropout_rate 0.2014577642393831 +107 39 model.num_filters 3.0 +107 39 training.batch_size 1.0 +107 39 training.label_smoothing 0.2348637522484855 +107 40 model.embedding_dim 0.0 +107 40 model.hidden_dropout_rate 0.4140962827927741 +107 40 model.num_filters 8.0 +107 40 training.batch_size 2.0 +107 40 training.label_smoothing 0.0017795539954476233 +107 41 model.embedding_dim 0.0 +107 41 model.hidden_dropout_rate 0.39142305944427347 +107 41 model.num_filters 7.0 +107 41 training.batch_size 2.0 +107 41 training.label_smoothing 0.13987326040345605 +107 42 model.embedding_dim 2.0 +107 42 model.hidden_dropout_rate 0.13263029414219185 +107 42 model.num_filters 3.0 +107 42 training.batch_size 0.0 +107 42 training.label_smoothing 0.039908681078405346 +107 43 model.embedding_dim 1.0 +107 43 model.hidden_dropout_rate 0.31317774094040773 +107 43 model.num_filters 7.0 +107 43 training.batch_size 1.0 +107 43 training.label_smoothing 0.023894050539261674 +107 44 model.embedding_dim 0.0 +107 44 model.hidden_dropout_rate 0.30074713604454784 +107 44 model.num_filters 8.0 +107 44 training.batch_size 0.0 +107 44 training.label_smoothing 0.006306502915495996 +107 45 model.embedding_dim 2.0 +107 45 model.hidden_dropout_rate 0.41017636784142175 +107 45 model.num_filters 5.0 +107 45 training.batch_size 1.0 +107 45 training.label_smoothing 0.03151699484966857 +107 1 dataset """kinships""" +107 1 model """convkb""" +107 1 loss """softplus""" +107 1 regularizer """no""" +107 1 optimizer """adadelta""" +107 1 training_loop """lcwa""" +107 1 evaluator """rankbased""" +107 2 dataset """kinships""" +107 2 model """convkb""" +107 2 loss """softplus""" +107 2 regularizer """no""" +107 2 optimizer """adadelta""" +107 2 training_loop """lcwa""" +107 2 evaluator """rankbased""" +107 3 dataset """kinships""" +107 3 model """convkb""" +107 3 loss """softplus""" +107 3 regularizer """no""" +107 3 optimizer """adadelta""" +107 3 training_loop """lcwa""" +107 3 evaluator """rankbased""" +107 4 dataset """kinships""" +107 4 model """convkb""" +107 4 loss """softplus""" +107 4 regularizer """no""" +107 4 optimizer """adadelta""" +107 4 training_loop """lcwa""" +107 4 evaluator """rankbased""" +107 5 dataset """kinships""" +107 5 model """convkb""" +107 5 loss """softplus""" +107 5 regularizer """no""" +107 5 optimizer """adadelta""" +107 5 training_loop """lcwa""" +107 5 evaluator """rankbased""" +107 6 dataset """kinships""" +107 6 model """convkb""" +107 6 loss """softplus""" +107 6 regularizer """no""" +107 6 optimizer """adadelta""" +107 6 training_loop """lcwa""" +107 6 evaluator """rankbased""" +107 7 dataset """kinships""" +107 7 model """convkb""" +107 7 loss """softplus""" +107 7 regularizer """no""" +107 7 optimizer """adadelta""" +107 7 training_loop """lcwa""" +107 7 evaluator """rankbased""" +107 8 dataset """kinships""" +107 8 model """convkb""" +107 8 loss """softplus""" +107 8 regularizer """no""" +107 8 optimizer """adadelta""" +107 8 training_loop """lcwa""" +107 8 evaluator """rankbased""" +107 9 dataset """kinships""" +107 9 model """convkb""" +107 9 loss """softplus""" +107 9 regularizer """no""" +107 9 optimizer """adadelta""" +107 9 training_loop """lcwa""" +107 9 evaluator """rankbased""" +107 10 dataset """kinships""" +107 10 model """convkb""" +107 10 loss """softplus""" +107 10 regularizer """no""" +107 10 optimizer """adadelta""" +107 10 training_loop """lcwa""" +107 10 evaluator """rankbased""" +107 11 dataset """kinships""" +107 11 model """convkb""" +107 11 loss """softplus""" +107 11 regularizer """no""" +107 11 optimizer """adadelta""" +107 11 training_loop """lcwa""" +107 11 evaluator """rankbased""" +107 12 dataset """kinships""" +107 12 model """convkb""" +107 12 loss """softplus""" +107 12 regularizer """no""" +107 12 optimizer """adadelta""" +107 12 training_loop """lcwa""" +107 12 evaluator """rankbased""" +107 13 dataset """kinships""" +107 13 model """convkb""" +107 13 loss """softplus""" +107 13 regularizer """no""" +107 13 optimizer """adadelta""" +107 13 training_loop """lcwa""" +107 13 evaluator """rankbased""" +107 14 dataset """kinships""" +107 14 model """convkb""" +107 14 loss """softplus""" +107 14 regularizer """no""" +107 14 optimizer """adadelta""" +107 14 training_loop """lcwa""" +107 14 evaluator """rankbased""" +107 15 dataset """kinships""" +107 15 model """convkb""" +107 15 loss """softplus""" +107 15 regularizer """no""" +107 15 optimizer """adadelta""" +107 15 training_loop """lcwa""" +107 15 evaluator """rankbased""" +107 16 dataset """kinships""" +107 16 model """convkb""" +107 16 loss """softplus""" +107 16 regularizer """no""" +107 16 optimizer """adadelta""" +107 16 training_loop """lcwa""" +107 16 evaluator """rankbased""" +107 17 dataset """kinships""" +107 17 model """convkb""" +107 17 loss """softplus""" +107 17 regularizer """no""" +107 17 optimizer """adadelta""" +107 17 training_loop """lcwa""" +107 17 evaluator """rankbased""" +107 18 dataset """kinships""" +107 18 model """convkb""" +107 18 loss """softplus""" +107 18 regularizer """no""" +107 18 optimizer """adadelta""" +107 18 training_loop """lcwa""" +107 18 evaluator """rankbased""" +107 19 dataset """kinships""" +107 19 model """convkb""" +107 19 loss """softplus""" +107 19 regularizer """no""" +107 19 optimizer """adadelta""" +107 19 training_loop """lcwa""" +107 19 evaluator """rankbased""" +107 20 dataset """kinships""" +107 20 model """convkb""" +107 20 loss """softplus""" +107 20 regularizer """no""" +107 20 optimizer """adadelta""" +107 20 training_loop """lcwa""" +107 20 evaluator """rankbased""" +107 21 dataset """kinships""" +107 21 model """convkb""" +107 21 loss """softplus""" +107 21 regularizer """no""" +107 21 optimizer """adadelta""" +107 21 training_loop """lcwa""" +107 21 evaluator """rankbased""" +107 22 dataset """kinships""" +107 22 model """convkb""" +107 22 loss """softplus""" +107 22 regularizer """no""" +107 22 optimizer """adadelta""" +107 22 training_loop """lcwa""" +107 22 evaluator """rankbased""" +107 23 dataset """kinships""" +107 23 model """convkb""" +107 23 loss """softplus""" +107 23 regularizer """no""" +107 23 optimizer """adadelta""" +107 23 training_loop """lcwa""" +107 23 evaluator """rankbased""" +107 24 dataset """kinships""" +107 24 model """convkb""" +107 24 loss """softplus""" +107 24 regularizer """no""" +107 24 optimizer """adadelta""" +107 24 training_loop """lcwa""" +107 24 evaluator """rankbased""" +107 25 dataset """kinships""" +107 25 model """convkb""" +107 25 loss """softplus""" +107 25 regularizer """no""" +107 25 optimizer """adadelta""" +107 25 training_loop """lcwa""" +107 25 evaluator """rankbased""" +107 26 dataset """kinships""" +107 26 model """convkb""" +107 26 loss """softplus""" +107 26 regularizer """no""" +107 26 optimizer """adadelta""" +107 26 training_loop """lcwa""" +107 26 evaluator """rankbased""" +107 27 dataset """kinships""" +107 27 model """convkb""" +107 27 loss """softplus""" +107 27 regularizer """no""" +107 27 optimizer """adadelta""" +107 27 training_loop """lcwa""" +107 27 evaluator """rankbased""" +107 28 dataset """kinships""" +107 28 model """convkb""" +107 28 loss """softplus""" +107 28 regularizer """no""" +107 28 optimizer """adadelta""" +107 28 training_loop """lcwa""" +107 28 evaluator """rankbased""" +107 29 dataset """kinships""" +107 29 model """convkb""" +107 29 loss """softplus""" +107 29 regularizer """no""" +107 29 optimizer """adadelta""" +107 29 training_loop """lcwa""" +107 29 evaluator """rankbased""" +107 30 dataset """kinships""" +107 30 model """convkb""" +107 30 loss """softplus""" +107 30 regularizer """no""" +107 30 optimizer """adadelta""" +107 30 training_loop """lcwa""" +107 30 evaluator """rankbased""" +107 31 dataset """kinships""" +107 31 model """convkb""" +107 31 loss """softplus""" +107 31 regularizer """no""" +107 31 optimizer """adadelta""" +107 31 training_loop """lcwa""" +107 31 evaluator """rankbased""" +107 32 dataset """kinships""" +107 32 model """convkb""" +107 32 loss """softplus""" +107 32 regularizer """no""" +107 32 optimizer """adadelta""" +107 32 training_loop """lcwa""" +107 32 evaluator """rankbased""" +107 33 dataset """kinships""" +107 33 model """convkb""" +107 33 loss """softplus""" +107 33 regularizer """no""" +107 33 optimizer """adadelta""" +107 33 training_loop """lcwa""" +107 33 evaluator """rankbased""" +107 34 dataset """kinships""" +107 34 model """convkb""" +107 34 loss """softplus""" +107 34 regularizer """no""" +107 34 optimizer """adadelta""" +107 34 training_loop """lcwa""" +107 34 evaluator """rankbased""" +107 35 dataset """kinships""" +107 35 model """convkb""" +107 35 loss """softplus""" +107 35 regularizer """no""" +107 35 optimizer """adadelta""" +107 35 training_loop """lcwa""" +107 35 evaluator """rankbased""" +107 36 dataset """kinships""" +107 36 model """convkb""" +107 36 loss """softplus""" +107 36 regularizer """no""" +107 36 optimizer """adadelta""" +107 36 training_loop """lcwa""" +107 36 evaluator """rankbased""" +107 37 dataset """kinships""" +107 37 model """convkb""" +107 37 loss """softplus""" +107 37 regularizer """no""" +107 37 optimizer """adadelta""" +107 37 training_loop """lcwa""" +107 37 evaluator """rankbased""" +107 38 dataset """kinships""" +107 38 model """convkb""" +107 38 loss """softplus""" +107 38 regularizer """no""" +107 38 optimizer """adadelta""" +107 38 training_loop """lcwa""" +107 38 evaluator """rankbased""" +107 39 dataset """kinships""" +107 39 model """convkb""" +107 39 loss """softplus""" +107 39 regularizer """no""" +107 39 optimizer """adadelta""" +107 39 training_loop """lcwa""" +107 39 evaluator """rankbased""" +107 40 dataset """kinships""" +107 40 model """convkb""" +107 40 loss """softplus""" +107 40 regularizer """no""" +107 40 optimizer """adadelta""" +107 40 training_loop """lcwa""" +107 40 evaluator """rankbased""" +107 41 dataset """kinships""" +107 41 model """convkb""" +107 41 loss """softplus""" +107 41 regularizer """no""" +107 41 optimizer """adadelta""" +107 41 training_loop """lcwa""" +107 41 evaluator """rankbased""" +107 42 dataset """kinships""" +107 42 model """convkb""" +107 42 loss """softplus""" +107 42 regularizer """no""" +107 42 optimizer """adadelta""" +107 42 training_loop """lcwa""" +107 42 evaluator """rankbased""" +107 43 dataset """kinships""" +107 43 model """convkb""" +107 43 loss """softplus""" +107 43 regularizer """no""" +107 43 optimizer """adadelta""" +107 43 training_loop """lcwa""" +107 43 evaluator """rankbased""" +107 44 dataset """kinships""" +107 44 model """convkb""" +107 44 loss """softplus""" +107 44 regularizer """no""" +107 44 optimizer """adadelta""" +107 44 training_loop """lcwa""" +107 44 evaluator """rankbased""" +107 45 dataset """kinships""" +107 45 model """convkb""" +107 45 loss """softplus""" +107 45 regularizer """no""" +107 45 optimizer """adadelta""" +107 45 training_loop """lcwa""" +107 45 evaluator """rankbased""" +108 1 model.embedding_dim 0.0 +108 1 model.hidden_dropout_rate 0.20128508673701073 +108 1 model.num_filters 9.0 +108 1 training.batch_size 0.0 +108 1 training.label_smoothing 0.3398579745344787 +108 2 model.embedding_dim 1.0 +108 2 model.hidden_dropout_rate 0.3326140801971782 +108 2 model.num_filters 3.0 +108 2 training.batch_size 0.0 +108 2 training.label_smoothing 0.2717438297548743 +108 3 model.embedding_dim 0.0 +108 3 model.hidden_dropout_rate 0.3316812371045691 +108 3 model.num_filters 6.0 +108 3 training.batch_size 1.0 +108 3 training.label_smoothing 0.024602809414490723 +108 4 model.embedding_dim 1.0 +108 4 model.hidden_dropout_rate 0.39714783905798245 +108 4 model.num_filters 9.0 +108 4 training.batch_size 2.0 +108 4 training.label_smoothing 0.39034730593150985 +108 5 model.embedding_dim 2.0 +108 5 model.hidden_dropout_rate 0.4385073814495515 +108 5 model.num_filters 8.0 +108 5 training.batch_size 1.0 +108 5 training.label_smoothing 0.0010058271379758794 +108 6 model.embedding_dim 0.0 +108 6 model.hidden_dropout_rate 0.4200843474017678 +108 6 model.num_filters 1.0 +108 6 training.batch_size 2.0 +108 6 training.label_smoothing 0.23535345328191246 +108 7 model.embedding_dim 2.0 +108 7 model.hidden_dropout_rate 0.3753232087899553 +108 7 model.num_filters 8.0 +108 7 training.batch_size 0.0 +108 7 training.label_smoothing 0.025168698512371086 +108 8 model.embedding_dim 2.0 +108 8 model.hidden_dropout_rate 0.14870586641329442 +108 8 model.num_filters 0.0 +108 8 training.batch_size 2.0 +108 8 training.label_smoothing 0.3866357076436703 +108 9 model.embedding_dim 2.0 +108 9 model.hidden_dropout_rate 0.21922501875247358 +108 9 model.num_filters 3.0 +108 9 training.batch_size 2.0 +108 9 training.label_smoothing 0.0033882571858351233 +108 10 model.embedding_dim 0.0 +108 10 model.hidden_dropout_rate 0.4489365424936106 +108 10 model.num_filters 3.0 +108 10 training.batch_size 1.0 +108 10 training.label_smoothing 0.015503881863477184 +108 11 model.embedding_dim 1.0 +108 11 model.hidden_dropout_rate 0.46789304693224343 +108 11 model.num_filters 7.0 +108 11 training.batch_size 2.0 +108 11 training.label_smoothing 0.012931851200153239 +108 12 model.embedding_dim 1.0 +108 12 model.hidden_dropout_rate 0.10788276350775959 +108 12 model.num_filters 7.0 +108 12 training.batch_size 2.0 +108 12 training.label_smoothing 0.6083122353223146 +108 13 model.embedding_dim 2.0 +108 13 model.hidden_dropout_rate 0.3945817075081465 +108 13 model.num_filters 9.0 +108 13 training.batch_size 1.0 +108 13 training.label_smoothing 0.0040584912193300645 +108 14 model.embedding_dim 2.0 +108 14 model.hidden_dropout_rate 0.43132638076755214 +108 14 model.num_filters 9.0 +108 14 training.batch_size 2.0 +108 14 training.label_smoothing 0.0027940645018996765 +108 15 model.embedding_dim 0.0 +108 15 model.hidden_dropout_rate 0.20569133454794702 +108 15 model.num_filters 2.0 +108 15 training.batch_size 2.0 +108 15 training.label_smoothing 0.04251466383309923 +108 16 model.embedding_dim 0.0 +108 16 model.hidden_dropout_rate 0.31318544151392136 +108 16 model.num_filters 9.0 +108 16 training.batch_size 0.0 +108 16 training.label_smoothing 0.012886754711615033 +108 17 model.embedding_dim 2.0 +108 17 model.hidden_dropout_rate 0.47839685510434815 +108 17 model.num_filters 1.0 +108 17 training.batch_size 1.0 +108 17 training.label_smoothing 0.0011910146812686695 +108 18 model.embedding_dim 1.0 +108 18 model.hidden_dropout_rate 0.3117285158762566 +108 18 model.num_filters 8.0 +108 18 training.batch_size 0.0 +108 18 training.label_smoothing 0.15447777349649225 +108 19 model.embedding_dim 1.0 +108 19 model.hidden_dropout_rate 0.25807134740626836 +108 19 model.num_filters 4.0 +108 19 training.batch_size 2.0 +108 19 training.label_smoothing 0.0016828748279638273 +108 20 model.embedding_dim 2.0 +108 20 model.hidden_dropout_rate 0.19540299291654326 +108 20 model.num_filters 9.0 +108 20 training.batch_size 0.0 +108 20 training.label_smoothing 0.0504300000769703 +108 21 model.embedding_dim 2.0 +108 21 model.hidden_dropout_rate 0.321352523370575 +108 21 model.num_filters 3.0 +108 21 training.batch_size 0.0 +108 21 training.label_smoothing 0.05172175397050018 +108 22 model.embedding_dim 0.0 +108 22 model.hidden_dropout_rate 0.49376880849487714 +108 22 model.num_filters 8.0 +108 22 training.batch_size 0.0 +108 22 training.label_smoothing 0.0018768729619353713 +108 23 model.embedding_dim 0.0 +108 23 model.hidden_dropout_rate 0.20952814629364594 +108 23 model.num_filters 6.0 +108 23 training.batch_size 2.0 +108 23 training.label_smoothing 0.5699501651142641 +108 24 model.embedding_dim 1.0 +108 24 model.hidden_dropout_rate 0.30684736494704146 +108 24 model.num_filters 0.0 +108 24 training.batch_size 0.0 +108 24 training.label_smoothing 0.45237474756444745 +108 25 model.embedding_dim 0.0 +108 25 model.hidden_dropout_rate 0.20803400307627395 +108 25 model.num_filters 2.0 +108 25 training.batch_size 1.0 +108 25 training.label_smoothing 0.5330488818493226 +108 26 model.embedding_dim 1.0 +108 26 model.hidden_dropout_rate 0.40161554320926907 +108 26 model.num_filters 6.0 +108 26 training.batch_size 0.0 +108 26 training.label_smoothing 0.0016361209427764886 +108 27 model.embedding_dim 2.0 +108 27 model.hidden_dropout_rate 0.4273585839628615 +108 27 model.num_filters 6.0 +108 27 training.batch_size 0.0 +108 27 training.label_smoothing 0.5580123379345981 +108 28 model.embedding_dim 2.0 +108 28 model.hidden_dropout_rate 0.26462863148534865 +108 28 model.num_filters 3.0 +108 28 training.batch_size 0.0 +108 28 training.label_smoothing 0.014894898298288942 +108 29 model.embedding_dim 2.0 +108 29 model.hidden_dropout_rate 0.2097448674279886 +108 29 model.num_filters 1.0 +108 29 training.batch_size 2.0 +108 29 training.label_smoothing 0.1690268842036478 +108 30 model.embedding_dim 0.0 +108 30 model.hidden_dropout_rate 0.3395152814687801 +108 30 model.num_filters 2.0 +108 30 training.batch_size 1.0 +108 30 training.label_smoothing 0.38589296628780984 +108 31 model.embedding_dim 1.0 +108 31 model.hidden_dropout_rate 0.28780147832307024 +108 31 model.num_filters 6.0 +108 31 training.batch_size 0.0 +108 31 training.label_smoothing 0.001462822620450268 +108 32 model.embedding_dim 2.0 +108 32 model.hidden_dropout_rate 0.285148303958525 +108 32 model.num_filters 9.0 +108 32 training.batch_size 0.0 +108 32 training.label_smoothing 0.0011747515189178047 +108 33 model.embedding_dim 0.0 +108 33 model.hidden_dropout_rate 0.3429329452090645 +108 33 model.num_filters 2.0 +108 33 training.batch_size 1.0 +108 33 training.label_smoothing 0.11444096321112923 +108 34 model.embedding_dim 1.0 +108 34 model.hidden_dropout_rate 0.180239507703735 +108 34 model.num_filters 2.0 +108 34 training.batch_size 2.0 +108 34 training.label_smoothing 0.6161064200378841 +108 35 model.embedding_dim 2.0 +108 35 model.hidden_dropout_rate 0.15561294669542916 +108 35 model.num_filters 6.0 +108 35 training.batch_size 0.0 +108 35 training.label_smoothing 0.02070064489523789 +108 36 model.embedding_dim 2.0 +108 36 model.hidden_dropout_rate 0.3459808032770828 +108 36 model.num_filters 6.0 +108 36 training.batch_size 0.0 +108 36 training.label_smoothing 0.6940712395772691 +108 37 model.embedding_dim 2.0 +108 37 model.hidden_dropout_rate 0.2502543053681906 +108 37 model.num_filters 9.0 +108 37 training.batch_size 1.0 +108 37 training.label_smoothing 0.01926522111407966 +108 38 model.embedding_dim 2.0 +108 38 model.hidden_dropout_rate 0.3844655964001672 +108 38 model.num_filters 1.0 +108 38 training.batch_size 0.0 +108 38 training.label_smoothing 0.027735338974275592 +108 39 model.embedding_dim 1.0 +108 39 model.hidden_dropout_rate 0.3227131525245638 +108 39 model.num_filters 3.0 +108 39 training.batch_size 2.0 +108 39 training.label_smoothing 0.05648272279078052 +108 40 model.embedding_dim 2.0 +108 40 model.hidden_dropout_rate 0.2676428751585398 +108 40 model.num_filters 7.0 +108 40 training.batch_size 0.0 +108 40 training.label_smoothing 0.05403485179856742 +108 41 model.embedding_dim 1.0 +108 41 model.hidden_dropout_rate 0.15030747343589881 +108 41 model.num_filters 7.0 +108 41 training.batch_size 1.0 +108 41 training.label_smoothing 0.03464415135066034 +108 42 model.embedding_dim 1.0 +108 42 model.hidden_dropout_rate 0.2560475140823082 +108 42 model.num_filters 2.0 +108 42 training.batch_size 0.0 +108 42 training.label_smoothing 0.10855271170511668 +108 43 model.embedding_dim 2.0 +108 43 model.hidden_dropout_rate 0.16993553745300166 +108 43 model.num_filters 9.0 +108 43 training.batch_size 1.0 +108 43 training.label_smoothing 0.1163073988086855 +108 44 model.embedding_dim 1.0 +108 44 model.hidden_dropout_rate 0.3503205676998131 +108 44 model.num_filters 3.0 +108 44 training.batch_size 0.0 +108 44 training.label_smoothing 0.0036965256250639 +108 45 model.embedding_dim 0.0 +108 45 model.hidden_dropout_rate 0.3898572355426476 +108 45 model.num_filters 7.0 +108 45 training.batch_size 0.0 +108 45 training.label_smoothing 0.0027935611916521377 +108 46 model.embedding_dim 1.0 +108 46 model.hidden_dropout_rate 0.37787297882474435 +108 46 model.num_filters 8.0 +108 46 training.batch_size 0.0 +108 46 training.label_smoothing 0.005629036196168275 +108 47 model.embedding_dim 2.0 +108 47 model.hidden_dropout_rate 0.351456017412196 +108 47 model.num_filters 3.0 +108 47 training.batch_size 2.0 +108 47 training.label_smoothing 0.24572687892777012 +108 48 model.embedding_dim 1.0 +108 48 model.hidden_dropout_rate 0.43035424064486016 +108 48 model.num_filters 0.0 +108 48 training.batch_size 1.0 +108 48 training.label_smoothing 0.5076900675205765 +108 49 model.embedding_dim 2.0 +108 49 model.hidden_dropout_rate 0.11438682705832459 +108 49 model.num_filters 5.0 +108 49 training.batch_size 2.0 +108 49 training.label_smoothing 0.03758602435585713 +108 50 model.embedding_dim 0.0 +108 50 model.hidden_dropout_rate 0.26660182098084234 +108 50 model.num_filters 9.0 +108 50 training.batch_size 1.0 +108 50 training.label_smoothing 0.12897398226454415 +108 51 model.embedding_dim 1.0 +108 51 model.hidden_dropout_rate 0.2525955287401246 +108 51 model.num_filters 5.0 +108 51 training.batch_size 2.0 +108 51 training.label_smoothing 0.13093586396860296 +108 52 model.embedding_dim 1.0 +108 52 model.hidden_dropout_rate 0.2239955450373768 +108 52 model.num_filters 0.0 +108 52 training.batch_size 2.0 +108 52 training.label_smoothing 0.7174393028442192 +108 53 model.embedding_dim 1.0 +108 53 model.hidden_dropout_rate 0.29724586429411703 +108 53 model.num_filters 1.0 +108 53 training.batch_size 1.0 +108 53 training.label_smoothing 0.05706073975789145 +108 54 model.embedding_dim 1.0 +108 54 model.hidden_dropout_rate 0.3822428828847292 +108 54 model.num_filters 4.0 +108 54 training.batch_size 2.0 +108 54 training.label_smoothing 0.009581489899887101 +108 55 model.embedding_dim 2.0 +108 55 model.hidden_dropout_rate 0.3415016588686116 +108 55 model.num_filters 8.0 +108 55 training.batch_size 2.0 +108 55 training.label_smoothing 0.0015160506458607832 +108 56 model.embedding_dim 2.0 +108 56 model.hidden_dropout_rate 0.4279262696535975 +108 56 model.num_filters 8.0 +108 56 training.batch_size 0.0 +108 56 training.label_smoothing 0.004734391266777376 +108 57 model.embedding_dim 0.0 +108 57 model.hidden_dropout_rate 0.11828407683150571 +108 57 model.num_filters 7.0 +108 57 training.batch_size 0.0 +108 57 training.label_smoothing 0.005209953813940065 +108 58 model.embedding_dim 0.0 +108 58 model.hidden_dropout_rate 0.2714840632437696 +108 58 model.num_filters 0.0 +108 58 training.batch_size 0.0 +108 58 training.label_smoothing 0.021544843479843763 +108 59 model.embedding_dim 0.0 +108 59 model.hidden_dropout_rate 0.4079209519357495 +108 59 model.num_filters 5.0 +108 59 training.batch_size 0.0 +108 59 training.label_smoothing 0.6942195602651086 +108 60 model.embedding_dim 2.0 +108 60 model.hidden_dropout_rate 0.4729012040588576 +108 60 model.num_filters 5.0 +108 60 training.batch_size 0.0 +108 60 training.label_smoothing 0.005595437947653012 +108 61 model.embedding_dim 0.0 +108 61 model.hidden_dropout_rate 0.1175124446570056 +108 61 model.num_filters 7.0 +108 61 training.batch_size 2.0 +108 61 training.label_smoothing 0.06123686383535955 +108 62 model.embedding_dim 2.0 +108 62 model.hidden_dropout_rate 0.2637168742269845 +108 62 model.num_filters 4.0 +108 62 training.batch_size 0.0 +108 62 training.label_smoothing 0.032971357653868855 +108 63 model.embedding_dim 1.0 +108 63 model.hidden_dropout_rate 0.1783611296404605 +108 63 model.num_filters 2.0 +108 63 training.batch_size 2.0 +108 63 training.label_smoothing 0.03629630214843603 +108 64 model.embedding_dim 1.0 +108 64 model.hidden_dropout_rate 0.16301631166147473 +108 64 model.num_filters 0.0 +108 64 training.batch_size 2.0 +108 64 training.label_smoothing 0.02091768792089245 +108 65 model.embedding_dim 2.0 +108 65 model.hidden_dropout_rate 0.2776292141645348 +108 65 model.num_filters 7.0 +108 65 training.batch_size 1.0 +108 65 training.label_smoothing 0.003173370312804064 +108 66 model.embedding_dim 1.0 +108 66 model.hidden_dropout_rate 0.27200919442809934 +108 66 model.num_filters 7.0 +108 66 training.batch_size 2.0 +108 66 training.label_smoothing 0.5422832639230265 +108 67 model.embedding_dim 2.0 +108 67 model.hidden_dropout_rate 0.4652855539681431 +108 67 model.num_filters 3.0 +108 67 training.batch_size 0.0 +108 67 training.label_smoothing 0.002665446847676952 +108 68 model.embedding_dim 0.0 +108 68 model.hidden_dropout_rate 0.1830195592162082 +108 68 model.num_filters 2.0 +108 68 training.batch_size 0.0 +108 68 training.label_smoothing 0.005628178345645097 +108 69 model.embedding_dim 1.0 +108 69 model.hidden_dropout_rate 0.11514358139546901 +108 69 model.num_filters 3.0 +108 69 training.batch_size 2.0 +108 69 training.label_smoothing 0.026373467688688097 +108 70 model.embedding_dim 2.0 +108 70 model.hidden_dropout_rate 0.2940877678577672 +108 70 model.num_filters 2.0 +108 70 training.batch_size 1.0 +108 70 training.label_smoothing 0.002114658492693641 +108 71 model.embedding_dim 2.0 +108 71 model.hidden_dropout_rate 0.2189378887161583 +108 71 model.num_filters 7.0 +108 71 training.batch_size 0.0 +108 71 training.label_smoothing 0.0018158914568874274 +108 1 dataset """kinships""" +108 1 model """convkb""" +108 1 loss """bceaftersigmoid""" +108 1 regularizer """no""" +108 1 optimizer """adadelta""" +108 1 training_loop """lcwa""" +108 1 evaluator """rankbased""" +108 2 dataset """kinships""" +108 2 model """convkb""" +108 2 loss """bceaftersigmoid""" +108 2 regularizer """no""" +108 2 optimizer """adadelta""" +108 2 training_loop """lcwa""" +108 2 evaluator """rankbased""" +108 3 dataset """kinships""" +108 3 model """convkb""" +108 3 loss """bceaftersigmoid""" +108 3 regularizer """no""" +108 3 optimizer """adadelta""" +108 3 training_loop """lcwa""" +108 3 evaluator """rankbased""" +108 4 dataset """kinships""" +108 4 model """convkb""" +108 4 loss """bceaftersigmoid""" +108 4 regularizer """no""" +108 4 optimizer """adadelta""" +108 4 training_loop """lcwa""" +108 4 evaluator """rankbased""" +108 5 dataset """kinships""" +108 5 model """convkb""" +108 5 loss """bceaftersigmoid""" +108 5 regularizer """no""" +108 5 optimizer """adadelta""" +108 5 training_loop """lcwa""" +108 5 evaluator """rankbased""" +108 6 dataset """kinships""" +108 6 model """convkb""" +108 6 loss """bceaftersigmoid""" +108 6 regularizer """no""" +108 6 optimizer """adadelta""" +108 6 training_loop """lcwa""" +108 6 evaluator """rankbased""" +108 7 dataset """kinships""" +108 7 model """convkb""" +108 7 loss """bceaftersigmoid""" +108 7 regularizer """no""" +108 7 optimizer """adadelta""" +108 7 training_loop """lcwa""" +108 7 evaluator """rankbased""" +108 8 dataset """kinships""" +108 8 model """convkb""" +108 8 loss """bceaftersigmoid""" +108 8 regularizer """no""" +108 8 optimizer """adadelta""" +108 8 training_loop """lcwa""" +108 8 evaluator """rankbased""" +108 9 dataset """kinships""" +108 9 model """convkb""" +108 9 loss """bceaftersigmoid""" +108 9 regularizer """no""" +108 9 optimizer """adadelta""" +108 9 training_loop """lcwa""" +108 9 evaluator """rankbased""" +108 10 dataset """kinships""" +108 10 model """convkb""" +108 10 loss """bceaftersigmoid""" +108 10 regularizer """no""" +108 10 optimizer """adadelta""" +108 10 training_loop """lcwa""" +108 10 evaluator """rankbased""" +108 11 dataset """kinships""" +108 11 model """convkb""" +108 11 loss """bceaftersigmoid""" +108 11 regularizer """no""" +108 11 optimizer """adadelta""" +108 11 training_loop """lcwa""" +108 11 evaluator """rankbased""" +108 12 dataset """kinships""" +108 12 model """convkb""" +108 12 loss """bceaftersigmoid""" +108 12 regularizer """no""" +108 12 optimizer """adadelta""" +108 12 training_loop """lcwa""" +108 12 evaluator """rankbased""" +108 13 dataset """kinships""" +108 13 model """convkb""" +108 13 loss """bceaftersigmoid""" +108 13 regularizer """no""" +108 13 optimizer """adadelta""" +108 13 training_loop """lcwa""" +108 13 evaluator """rankbased""" +108 14 dataset """kinships""" +108 14 model """convkb""" +108 14 loss """bceaftersigmoid""" +108 14 regularizer """no""" +108 14 optimizer """adadelta""" +108 14 training_loop """lcwa""" +108 14 evaluator """rankbased""" +108 15 dataset """kinships""" +108 15 model """convkb""" +108 15 loss """bceaftersigmoid""" +108 15 regularizer """no""" +108 15 optimizer """adadelta""" +108 15 training_loop """lcwa""" +108 15 evaluator """rankbased""" +108 16 dataset """kinships""" +108 16 model """convkb""" +108 16 loss """bceaftersigmoid""" +108 16 regularizer """no""" +108 16 optimizer """adadelta""" +108 16 training_loop """lcwa""" +108 16 evaluator """rankbased""" +108 17 dataset """kinships""" +108 17 model """convkb""" +108 17 loss """bceaftersigmoid""" +108 17 regularizer """no""" +108 17 optimizer """adadelta""" +108 17 training_loop """lcwa""" +108 17 evaluator """rankbased""" +108 18 dataset """kinships""" +108 18 model """convkb""" +108 18 loss """bceaftersigmoid""" +108 18 regularizer """no""" +108 18 optimizer """adadelta""" +108 18 training_loop """lcwa""" +108 18 evaluator """rankbased""" +108 19 dataset """kinships""" +108 19 model """convkb""" +108 19 loss """bceaftersigmoid""" +108 19 regularizer """no""" +108 19 optimizer """adadelta""" +108 19 training_loop """lcwa""" +108 19 evaluator """rankbased""" +108 20 dataset """kinships""" +108 20 model """convkb""" +108 20 loss """bceaftersigmoid""" +108 20 regularizer """no""" +108 20 optimizer """adadelta""" +108 20 training_loop """lcwa""" +108 20 evaluator """rankbased""" +108 21 dataset """kinships""" +108 21 model """convkb""" +108 21 loss """bceaftersigmoid""" +108 21 regularizer """no""" +108 21 optimizer """adadelta""" +108 21 training_loop """lcwa""" +108 21 evaluator """rankbased""" +108 22 dataset """kinships""" +108 22 model """convkb""" +108 22 loss """bceaftersigmoid""" +108 22 regularizer """no""" +108 22 optimizer """adadelta""" +108 22 training_loop """lcwa""" +108 22 evaluator """rankbased""" +108 23 dataset """kinships""" +108 23 model """convkb""" +108 23 loss """bceaftersigmoid""" +108 23 regularizer """no""" +108 23 optimizer """adadelta""" +108 23 training_loop """lcwa""" +108 23 evaluator """rankbased""" +108 24 dataset """kinships""" +108 24 model """convkb""" +108 24 loss """bceaftersigmoid""" +108 24 regularizer """no""" +108 24 optimizer """adadelta""" +108 24 training_loop """lcwa""" +108 24 evaluator """rankbased""" +108 25 dataset """kinships""" +108 25 model """convkb""" +108 25 loss """bceaftersigmoid""" +108 25 regularizer """no""" +108 25 optimizer """adadelta""" +108 25 training_loop """lcwa""" +108 25 evaluator """rankbased""" +108 26 dataset """kinships""" +108 26 model """convkb""" +108 26 loss """bceaftersigmoid""" +108 26 regularizer """no""" +108 26 optimizer """adadelta""" +108 26 training_loop """lcwa""" +108 26 evaluator """rankbased""" +108 27 dataset """kinships""" +108 27 model """convkb""" +108 27 loss """bceaftersigmoid""" +108 27 regularizer """no""" +108 27 optimizer """adadelta""" +108 27 training_loop """lcwa""" +108 27 evaluator """rankbased""" +108 28 dataset """kinships""" +108 28 model """convkb""" +108 28 loss """bceaftersigmoid""" +108 28 regularizer """no""" +108 28 optimizer """adadelta""" +108 28 training_loop """lcwa""" +108 28 evaluator """rankbased""" +108 29 dataset """kinships""" +108 29 model """convkb""" +108 29 loss """bceaftersigmoid""" +108 29 regularizer """no""" +108 29 optimizer """adadelta""" +108 29 training_loop """lcwa""" +108 29 evaluator """rankbased""" +108 30 dataset """kinships""" +108 30 model """convkb""" +108 30 loss """bceaftersigmoid""" +108 30 regularizer """no""" +108 30 optimizer """adadelta""" +108 30 training_loop """lcwa""" +108 30 evaluator """rankbased""" +108 31 dataset """kinships""" +108 31 model """convkb""" +108 31 loss """bceaftersigmoid""" +108 31 regularizer """no""" +108 31 optimizer """adadelta""" +108 31 training_loop """lcwa""" +108 31 evaluator """rankbased""" +108 32 dataset """kinships""" +108 32 model """convkb""" +108 32 loss """bceaftersigmoid""" +108 32 regularizer """no""" +108 32 optimizer """adadelta""" +108 32 training_loop """lcwa""" +108 32 evaluator """rankbased""" +108 33 dataset """kinships""" +108 33 model """convkb""" +108 33 loss """bceaftersigmoid""" +108 33 regularizer """no""" +108 33 optimizer """adadelta""" +108 33 training_loop """lcwa""" +108 33 evaluator """rankbased""" +108 34 dataset """kinships""" +108 34 model """convkb""" +108 34 loss """bceaftersigmoid""" +108 34 regularizer """no""" +108 34 optimizer """adadelta""" +108 34 training_loop """lcwa""" +108 34 evaluator """rankbased""" +108 35 dataset """kinships""" +108 35 model """convkb""" +108 35 loss """bceaftersigmoid""" +108 35 regularizer """no""" +108 35 optimizer """adadelta""" +108 35 training_loop """lcwa""" +108 35 evaluator """rankbased""" +108 36 dataset """kinships""" +108 36 model """convkb""" +108 36 loss """bceaftersigmoid""" +108 36 regularizer """no""" +108 36 optimizer """adadelta""" +108 36 training_loop """lcwa""" +108 36 evaluator """rankbased""" +108 37 dataset """kinships""" +108 37 model """convkb""" +108 37 loss """bceaftersigmoid""" +108 37 regularizer """no""" +108 37 optimizer """adadelta""" +108 37 training_loop """lcwa""" +108 37 evaluator """rankbased""" +108 38 dataset """kinships""" +108 38 model """convkb""" +108 38 loss """bceaftersigmoid""" +108 38 regularizer """no""" +108 38 optimizer """adadelta""" +108 38 training_loop """lcwa""" +108 38 evaluator """rankbased""" +108 39 dataset """kinships""" +108 39 model """convkb""" +108 39 loss """bceaftersigmoid""" +108 39 regularizer """no""" +108 39 optimizer """adadelta""" +108 39 training_loop """lcwa""" +108 39 evaluator """rankbased""" +108 40 dataset """kinships""" +108 40 model """convkb""" +108 40 loss """bceaftersigmoid""" +108 40 regularizer """no""" +108 40 optimizer """adadelta""" +108 40 training_loop """lcwa""" +108 40 evaluator """rankbased""" +108 41 dataset """kinships""" +108 41 model """convkb""" +108 41 loss """bceaftersigmoid""" +108 41 regularizer """no""" +108 41 optimizer """adadelta""" +108 41 training_loop """lcwa""" +108 41 evaluator """rankbased""" +108 42 dataset """kinships""" +108 42 model """convkb""" +108 42 loss """bceaftersigmoid""" +108 42 regularizer """no""" +108 42 optimizer """adadelta""" +108 42 training_loop """lcwa""" +108 42 evaluator """rankbased""" +108 43 dataset """kinships""" +108 43 model """convkb""" +108 43 loss """bceaftersigmoid""" +108 43 regularizer """no""" +108 43 optimizer """adadelta""" +108 43 training_loop """lcwa""" +108 43 evaluator """rankbased""" +108 44 dataset """kinships""" +108 44 model """convkb""" +108 44 loss """bceaftersigmoid""" +108 44 regularizer """no""" +108 44 optimizer """adadelta""" +108 44 training_loop """lcwa""" +108 44 evaluator """rankbased""" +108 45 dataset """kinships""" +108 45 model """convkb""" +108 45 loss """bceaftersigmoid""" +108 45 regularizer """no""" +108 45 optimizer """adadelta""" +108 45 training_loop """lcwa""" +108 45 evaluator """rankbased""" +108 46 dataset """kinships""" +108 46 model """convkb""" +108 46 loss """bceaftersigmoid""" +108 46 regularizer """no""" +108 46 optimizer """adadelta""" +108 46 training_loop """lcwa""" +108 46 evaluator """rankbased""" +108 47 dataset """kinships""" +108 47 model """convkb""" +108 47 loss """bceaftersigmoid""" +108 47 regularizer """no""" +108 47 optimizer """adadelta""" +108 47 training_loop """lcwa""" +108 47 evaluator """rankbased""" +108 48 dataset """kinships""" +108 48 model """convkb""" +108 48 loss """bceaftersigmoid""" +108 48 regularizer """no""" +108 48 optimizer """adadelta""" +108 48 training_loop """lcwa""" +108 48 evaluator """rankbased""" +108 49 dataset """kinships""" +108 49 model """convkb""" +108 49 loss """bceaftersigmoid""" +108 49 regularizer """no""" +108 49 optimizer """adadelta""" +108 49 training_loop """lcwa""" +108 49 evaluator """rankbased""" +108 50 dataset """kinships""" +108 50 model """convkb""" +108 50 loss """bceaftersigmoid""" +108 50 regularizer """no""" +108 50 optimizer """adadelta""" +108 50 training_loop """lcwa""" +108 50 evaluator """rankbased""" +108 51 dataset """kinships""" +108 51 model """convkb""" +108 51 loss """bceaftersigmoid""" +108 51 regularizer """no""" +108 51 optimizer """adadelta""" +108 51 training_loop """lcwa""" +108 51 evaluator """rankbased""" +108 52 dataset """kinships""" +108 52 model """convkb""" +108 52 loss """bceaftersigmoid""" +108 52 regularizer """no""" +108 52 optimizer """adadelta""" +108 52 training_loop """lcwa""" +108 52 evaluator """rankbased""" +108 53 dataset """kinships""" +108 53 model """convkb""" +108 53 loss """bceaftersigmoid""" +108 53 regularizer """no""" +108 53 optimizer """adadelta""" +108 53 training_loop """lcwa""" +108 53 evaluator """rankbased""" +108 54 dataset """kinships""" +108 54 model """convkb""" +108 54 loss """bceaftersigmoid""" +108 54 regularizer """no""" +108 54 optimizer """adadelta""" +108 54 training_loop """lcwa""" +108 54 evaluator """rankbased""" +108 55 dataset """kinships""" +108 55 model """convkb""" +108 55 loss """bceaftersigmoid""" +108 55 regularizer """no""" +108 55 optimizer """adadelta""" +108 55 training_loop """lcwa""" +108 55 evaluator """rankbased""" +108 56 dataset """kinships""" +108 56 model """convkb""" +108 56 loss """bceaftersigmoid""" +108 56 regularizer """no""" +108 56 optimizer """adadelta""" +108 56 training_loop """lcwa""" +108 56 evaluator """rankbased""" +108 57 dataset """kinships""" +108 57 model """convkb""" +108 57 loss """bceaftersigmoid""" +108 57 regularizer """no""" +108 57 optimizer """adadelta""" +108 57 training_loop """lcwa""" +108 57 evaluator """rankbased""" +108 58 dataset """kinships""" +108 58 model """convkb""" +108 58 loss """bceaftersigmoid""" +108 58 regularizer """no""" +108 58 optimizer """adadelta""" +108 58 training_loop """lcwa""" +108 58 evaluator """rankbased""" +108 59 dataset """kinships""" +108 59 model """convkb""" +108 59 loss """bceaftersigmoid""" +108 59 regularizer """no""" +108 59 optimizer """adadelta""" +108 59 training_loop """lcwa""" +108 59 evaluator """rankbased""" +108 60 dataset """kinships""" +108 60 model """convkb""" +108 60 loss """bceaftersigmoid""" +108 60 regularizer """no""" +108 60 optimizer """adadelta""" +108 60 training_loop """lcwa""" +108 60 evaluator """rankbased""" +108 61 dataset """kinships""" +108 61 model """convkb""" +108 61 loss """bceaftersigmoid""" +108 61 regularizer """no""" +108 61 optimizer """adadelta""" +108 61 training_loop """lcwa""" +108 61 evaluator """rankbased""" +108 62 dataset """kinships""" +108 62 model """convkb""" +108 62 loss """bceaftersigmoid""" +108 62 regularizer """no""" +108 62 optimizer """adadelta""" +108 62 training_loop """lcwa""" +108 62 evaluator """rankbased""" +108 63 dataset """kinships""" +108 63 model """convkb""" +108 63 loss """bceaftersigmoid""" +108 63 regularizer """no""" +108 63 optimizer """adadelta""" +108 63 training_loop """lcwa""" +108 63 evaluator """rankbased""" +108 64 dataset """kinships""" +108 64 model """convkb""" +108 64 loss """bceaftersigmoid""" +108 64 regularizer """no""" +108 64 optimizer """adadelta""" +108 64 training_loop """lcwa""" +108 64 evaluator """rankbased""" +108 65 dataset """kinships""" +108 65 model """convkb""" +108 65 loss """bceaftersigmoid""" +108 65 regularizer """no""" +108 65 optimizer """adadelta""" +108 65 training_loop """lcwa""" +108 65 evaluator """rankbased""" +108 66 dataset """kinships""" +108 66 model """convkb""" +108 66 loss """bceaftersigmoid""" +108 66 regularizer """no""" +108 66 optimizer """adadelta""" +108 66 training_loop """lcwa""" +108 66 evaluator """rankbased""" +108 67 dataset """kinships""" +108 67 model """convkb""" +108 67 loss """bceaftersigmoid""" +108 67 regularizer """no""" +108 67 optimizer """adadelta""" +108 67 training_loop """lcwa""" +108 67 evaluator """rankbased""" +108 68 dataset """kinships""" +108 68 model """convkb""" +108 68 loss """bceaftersigmoid""" +108 68 regularizer """no""" +108 68 optimizer """adadelta""" +108 68 training_loop """lcwa""" +108 68 evaluator """rankbased""" +108 69 dataset """kinships""" +108 69 model """convkb""" +108 69 loss """bceaftersigmoid""" +108 69 regularizer """no""" +108 69 optimizer """adadelta""" +108 69 training_loop """lcwa""" +108 69 evaluator """rankbased""" +108 70 dataset """kinships""" +108 70 model """convkb""" +108 70 loss """bceaftersigmoid""" +108 70 regularizer """no""" +108 70 optimizer """adadelta""" +108 70 training_loop """lcwa""" +108 70 evaluator """rankbased""" +108 71 dataset """kinships""" +108 71 model """convkb""" +108 71 loss """bceaftersigmoid""" +108 71 regularizer """no""" +108 71 optimizer """adadelta""" +108 71 training_loop """lcwa""" +108 71 evaluator """rankbased""" +109 1 model.embedding_dim 1.0 +109 1 model.hidden_dropout_rate 0.36342895412460263 +109 1 model.num_filters 3.0 +109 1 training.batch_size 1.0 +109 1 training.label_smoothing 0.0018573533338453296 +109 2 model.embedding_dim 1.0 +109 2 model.hidden_dropout_rate 0.498368952189701 +109 2 model.num_filters 6.0 +109 2 training.batch_size 0.0 +109 2 training.label_smoothing 0.001871810874471004 +109 3 model.embedding_dim 1.0 +109 3 model.hidden_dropout_rate 0.19623944069557037 +109 3 model.num_filters 7.0 +109 3 training.batch_size 1.0 +109 3 training.label_smoothing 0.06113352972725579 +109 4 model.embedding_dim 1.0 +109 4 model.hidden_dropout_rate 0.1632954268504615 +109 4 model.num_filters 1.0 +109 4 training.batch_size 0.0 +109 4 training.label_smoothing 0.14854038801536928 +109 5 model.embedding_dim 0.0 +109 5 model.hidden_dropout_rate 0.18406672333782403 +109 5 model.num_filters 1.0 +109 5 training.batch_size 0.0 +109 5 training.label_smoothing 0.023233028349418396 +109 6 model.embedding_dim 2.0 +109 6 model.hidden_dropout_rate 0.46082626123971293 +109 6 model.num_filters 8.0 +109 6 training.batch_size 1.0 +109 6 training.label_smoothing 0.008184073709945854 +109 7 model.embedding_dim 1.0 +109 7 model.hidden_dropout_rate 0.13716190062781153 +109 7 model.num_filters 9.0 +109 7 training.batch_size 0.0 +109 7 training.label_smoothing 0.04826195062388659 +109 8 model.embedding_dim 0.0 +109 8 model.hidden_dropout_rate 0.2958047536113303 +109 8 model.num_filters 8.0 +109 8 training.batch_size 1.0 +109 8 training.label_smoothing 0.0010490826692318392 +109 9 model.embedding_dim 2.0 +109 9 model.hidden_dropout_rate 0.4839290971831478 +109 9 model.num_filters 8.0 +109 9 training.batch_size 1.0 +109 9 training.label_smoothing 0.0868939110305519 +109 10 model.embedding_dim 2.0 +109 10 model.hidden_dropout_rate 0.38487043787743336 +109 10 model.num_filters 6.0 +109 10 training.batch_size 2.0 +109 10 training.label_smoothing 0.557791917795145 +109 11 model.embedding_dim 0.0 +109 11 model.hidden_dropout_rate 0.14383020485210737 +109 11 model.num_filters 7.0 +109 11 training.batch_size 0.0 +109 11 training.label_smoothing 0.050240524192028575 +109 12 model.embedding_dim 2.0 +109 12 model.hidden_dropout_rate 0.37120547959542327 +109 12 model.num_filters 1.0 +109 12 training.batch_size 0.0 +109 12 training.label_smoothing 0.3781503698366373 +109 13 model.embedding_dim 1.0 +109 13 model.hidden_dropout_rate 0.4529843201212539 +109 13 model.num_filters 7.0 +109 13 training.batch_size 1.0 +109 13 training.label_smoothing 0.0023558892518096087 +109 14 model.embedding_dim 0.0 +109 14 model.hidden_dropout_rate 0.40388342732793037 +109 14 model.num_filters 0.0 +109 14 training.batch_size 2.0 +109 14 training.label_smoothing 0.6304326481394521 +109 15 model.embedding_dim 1.0 +109 15 model.hidden_dropout_rate 0.21114637408521794 +109 15 model.num_filters 1.0 +109 15 training.batch_size 2.0 +109 15 training.label_smoothing 0.00258517434720863 +109 16 model.embedding_dim 0.0 +109 16 model.hidden_dropout_rate 0.1396220008805827 +109 16 model.num_filters 6.0 +109 16 training.batch_size 1.0 +109 16 training.label_smoothing 0.0312850632968398 +109 17 model.embedding_dim 2.0 +109 17 model.hidden_dropout_rate 0.3946106300002735 +109 17 model.num_filters 9.0 +109 17 training.batch_size 1.0 +109 17 training.label_smoothing 0.4387028856076606 +109 18 model.embedding_dim 0.0 +109 18 model.hidden_dropout_rate 0.32426532825955157 +109 18 model.num_filters 1.0 +109 18 training.batch_size 1.0 +109 18 training.label_smoothing 0.6122835751187345 +109 19 model.embedding_dim 2.0 +109 19 model.hidden_dropout_rate 0.4571950376864141 +109 19 model.num_filters 0.0 +109 19 training.batch_size 2.0 +109 19 training.label_smoothing 0.0018029581242081083 +109 20 model.embedding_dim 2.0 +109 20 model.hidden_dropout_rate 0.43517933590249924 +109 20 model.num_filters 5.0 +109 20 training.batch_size 0.0 +109 20 training.label_smoothing 0.001229977270340693 +109 21 model.embedding_dim 2.0 +109 21 model.hidden_dropout_rate 0.3986957327789239 +109 21 model.num_filters 4.0 +109 21 training.batch_size 0.0 +109 21 training.label_smoothing 0.010959363294530355 +109 22 model.embedding_dim 1.0 +109 22 model.hidden_dropout_rate 0.3272713346600773 +109 22 model.num_filters 2.0 +109 22 training.batch_size 1.0 +109 22 training.label_smoothing 0.09885776098479276 +109 23 model.embedding_dim 0.0 +109 23 model.hidden_dropout_rate 0.4338286868040505 +109 23 model.num_filters 9.0 +109 23 training.batch_size 2.0 +109 23 training.label_smoothing 0.0015919442296375137 +109 24 model.embedding_dim 2.0 +109 24 model.hidden_dropout_rate 0.44883095007223517 +109 24 model.num_filters 3.0 +109 24 training.batch_size 1.0 +109 24 training.label_smoothing 0.0017263862445794271 +109 25 model.embedding_dim 2.0 +109 25 model.hidden_dropout_rate 0.3423644620747113 +109 25 model.num_filters 9.0 +109 25 training.batch_size 0.0 +109 25 training.label_smoothing 0.5394158211884422 +109 26 model.embedding_dim 0.0 +109 26 model.hidden_dropout_rate 0.4388315854775172 +109 26 model.num_filters 3.0 +109 26 training.batch_size 1.0 +109 26 training.label_smoothing 0.009436787381000839 +109 27 model.embedding_dim 1.0 +109 27 model.hidden_dropout_rate 0.1796717645724496 +109 27 model.num_filters 0.0 +109 27 training.batch_size 0.0 +109 27 training.label_smoothing 0.08656410683767982 +109 28 model.embedding_dim 1.0 +109 28 model.hidden_dropout_rate 0.295768055206711 +109 28 model.num_filters 3.0 +109 28 training.batch_size 2.0 +109 28 training.label_smoothing 0.013638297006462404 +109 29 model.embedding_dim 1.0 +109 29 model.hidden_dropout_rate 0.49223990131221174 +109 29 model.num_filters 0.0 +109 29 training.batch_size 2.0 +109 29 training.label_smoothing 0.5049452406623887 +109 30 model.embedding_dim 1.0 +109 30 model.hidden_dropout_rate 0.41450472348434886 +109 30 model.num_filters 6.0 +109 30 training.batch_size 1.0 +109 30 training.label_smoothing 0.2792085957681204 +109 31 model.embedding_dim 0.0 +109 31 model.hidden_dropout_rate 0.3776172913671427 +109 31 model.num_filters 3.0 +109 31 training.batch_size 1.0 +109 31 training.label_smoothing 0.01451282197542367 +109 32 model.embedding_dim 1.0 +109 32 model.hidden_dropout_rate 0.4277091087888216 +109 32 model.num_filters 9.0 +109 32 training.batch_size 0.0 +109 32 training.label_smoothing 0.020793761226562103 +109 33 model.embedding_dim 1.0 +109 33 model.hidden_dropout_rate 0.1717205726923221 +109 33 model.num_filters 3.0 +109 33 training.batch_size 1.0 +109 33 training.label_smoothing 0.4716214468412138 +109 34 model.embedding_dim 2.0 +109 34 model.hidden_dropout_rate 0.30194086488580907 +109 34 model.num_filters 7.0 +109 34 training.batch_size 1.0 +109 34 training.label_smoothing 0.019905362767645002 +109 35 model.embedding_dim 1.0 +109 35 model.hidden_dropout_rate 0.3411037728616364 +109 35 model.num_filters 5.0 +109 35 training.batch_size 1.0 +109 35 training.label_smoothing 0.017478606725346933 +109 36 model.embedding_dim 1.0 +109 36 model.hidden_dropout_rate 0.490939456305193 +109 36 model.num_filters 8.0 +109 36 training.batch_size 1.0 +109 36 training.label_smoothing 0.0025558826291826155 +109 37 model.embedding_dim 0.0 +109 37 model.hidden_dropout_rate 0.49632059350496205 +109 37 model.num_filters 4.0 +109 37 training.batch_size 1.0 +109 37 training.label_smoothing 0.08911602643578109 +109 38 model.embedding_dim 2.0 +109 38 model.hidden_dropout_rate 0.30586101000896704 +109 38 model.num_filters 6.0 +109 38 training.batch_size 2.0 +109 38 training.label_smoothing 0.025813201108940892 +109 39 model.embedding_dim 0.0 +109 39 model.hidden_dropout_rate 0.2080163275010637 +109 39 model.num_filters 7.0 +109 39 training.batch_size 1.0 +109 39 training.label_smoothing 0.7309617491857604 +109 40 model.embedding_dim 0.0 +109 40 model.hidden_dropout_rate 0.13985796603573974 +109 40 model.num_filters 0.0 +109 40 training.batch_size 1.0 +109 40 training.label_smoothing 0.05174930100822914 +109 41 model.embedding_dim 2.0 +109 41 model.hidden_dropout_rate 0.2102423762522949 +109 41 model.num_filters 1.0 +109 41 training.batch_size 0.0 +109 41 training.label_smoothing 0.17641344020727323 +109 42 model.embedding_dim 1.0 +109 42 model.hidden_dropout_rate 0.1992973840531899 +109 42 model.num_filters 1.0 +109 42 training.batch_size 0.0 +109 42 training.label_smoothing 0.011493823473590029 +109 43 model.embedding_dim 0.0 +109 43 model.hidden_dropout_rate 0.2908999280342577 +109 43 model.num_filters 4.0 +109 43 training.batch_size 1.0 +109 43 training.label_smoothing 0.50632733949134 +109 44 model.embedding_dim 1.0 +109 44 model.hidden_dropout_rate 0.38887361741062143 +109 44 model.num_filters 6.0 +109 44 training.batch_size 2.0 +109 44 training.label_smoothing 0.037105316290482165 +109 45 model.embedding_dim 0.0 +109 45 model.hidden_dropout_rate 0.34460449262763193 +109 45 model.num_filters 1.0 +109 45 training.batch_size 1.0 +109 45 training.label_smoothing 0.02284691400600458 +109 46 model.embedding_dim 2.0 +109 46 model.hidden_dropout_rate 0.29722948669117855 +109 46 model.num_filters 1.0 +109 46 training.batch_size 1.0 +109 46 training.label_smoothing 0.0034611670942821093 +109 47 model.embedding_dim 0.0 +109 47 model.hidden_dropout_rate 0.3100424523748838 +109 47 model.num_filters 7.0 +109 47 training.batch_size 0.0 +109 47 training.label_smoothing 0.06846874012047031 +109 48 model.embedding_dim 2.0 +109 48 model.hidden_dropout_rate 0.4531502649064777 +109 48 model.num_filters 9.0 +109 48 training.batch_size 2.0 +109 48 training.label_smoothing 0.11141894961065284 +109 49 model.embedding_dim 0.0 +109 49 model.hidden_dropout_rate 0.47297617668058933 +109 49 model.num_filters 9.0 +109 49 training.batch_size 1.0 +109 49 training.label_smoothing 0.15238065326067798 +109 50 model.embedding_dim 1.0 +109 50 model.hidden_dropout_rate 0.1917478602320395 +109 50 model.num_filters 5.0 +109 50 training.batch_size 0.0 +109 50 training.label_smoothing 0.021070422200419534 +109 51 model.embedding_dim 2.0 +109 51 model.hidden_dropout_rate 0.4577432462098415 +109 51 model.num_filters 0.0 +109 51 training.batch_size 1.0 +109 51 training.label_smoothing 0.22973926400824862 +109 52 model.embedding_dim 1.0 +109 52 model.hidden_dropout_rate 0.4720770483984168 +109 52 model.num_filters 2.0 +109 52 training.batch_size 0.0 +109 52 training.label_smoothing 0.0015027407980253878 +109 53 model.embedding_dim 1.0 +109 53 model.hidden_dropout_rate 0.10055692695492856 +109 53 model.num_filters 3.0 +109 53 training.batch_size 1.0 +109 53 training.label_smoothing 0.0072305225762262235 +109 54 model.embedding_dim 0.0 +109 54 model.hidden_dropout_rate 0.34140487302269973 +109 54 model.num_filters 1.0 +109 54 training.batch_size 2.0 +109 54 training.label_smoothing 0.02392046377683442 +109 55 model.embedding_dim 0.0 +109 55 model.hidden_dropout_rate 0.10935275145990704 +109 55 model.num_filters 1.0 +109 55 training.batch_size 2.0 +109 55 training.label_smoothing 0.003266613005300135 +109 56 model.embedding_dim 0.0 +109 56 model.hidden_dropout_rate 0.3621573757111618 +109 56 model.num_filters 3.0 +109 56 training.batch_size 1.0 +109 56 training.label_smoothing 0.049694720308722136 +109 57 model.embedding_dim 1.0 +109 57 model.hidden_dropout_rate 0.4029729568757926 +109 57 model.num_filters 8.0 +109 57 training.batch_size 0.0 +109 57 training.label_smoothing 0.5018448984878129 +109 58 model.embedding_dim 1.0 +109 58 model.hidden_dropout_rate 0.11226114837581527 +109 58 model.num_filters 3.0 +109 58 training.batch_size 1.0 +109 58 training.label_smoothing 0.04637686553372199 +109 59 model.embedding_dim 2.0 +109 59 model.hidden_dropout_rate 0.16130433631428823 +109 59 model.num_filters 4.0 +109 59 training.batch_size 1.0 +109 59 training.label_smoothing 0.1421469741216059 +109 60 model.embedding_dim 1.0 +109 60 model.hidden_dropout_rate 0.17529476914873282 +109 60 model.num_filters 8.0 +109 60 training.batch_size 1.0 +109 60 training.label_smoothing 0.005531278600397073 +109 61 model.embedding_dim 1.0 +109 61 model.hidden_dropout_rate 0.26941149957593424 +109 61 model.num_filters 0.0 +109 61 training.batch_size 1.0 +109 61 training.label_smoothing 0.027266835043613047 +109 62 model.embedding_dim 0.0 +109 62 model.hidden_dropout_rate 0.1349420611761996 +109 62 model.num_filters 2.0 +109 62 training.batch_size 2.0 +109 62 training.label_smoothing 0.003050641835155957 +109 63 model.embedding_dim 0.0 +109 63 model.hidden_dropout_rate 0.36008897347797797 +109 63 model.num_filters 9.0 +109 63 training.batch_size 1.0 +109 63 training.label_smoothing 0.0019016972988329225 +109 64 model.embedding_dim 2.0 +109 64 model.hidden_dropout_rate 0.15955500523496702 +109 64 model.num_filters 2.0 +109 64 training.batch_size 2.0 +109 64 training.label_smoothing 0.0016622235291123173 +109 65 model.embedding_dim 2.0 +109 65 model.hidden_dropout_rate 0.41604093092514194 +109 65 model.num_filters 8.0 +109 65 training.batch_size 1.0 +109 65 training.label_smoothing 0.08765408380434778 +109 66 model.embedding_dim 1.0 +109 66 model.hidden_dropout_rate 0.1735110445112849 +109 66 model.num_filters 2.0 +109 66 training.batch_size 2.0 +109 66 training.label_smoothing 0.11386363256443859 +109 67 model.embedding_dim 2.0 +109 67 model.hidden_dropout_rate 0.123397671493816 +109 67 model.num_filters 0.0 +109 67 training.batch_size 1.0 +109 67 training.label_smoothing 0.0024334042007851846 +109 68 model.embedding_dim 2.0 +109 68 model.hidden_dropout_rate 0.43783879034361395 +109 68 model.num_filters 6.0 +109 68 training.batch_size 2.0 +109 68 training.label_smoothing 0.0062318513041351 +109 69 model.embedding_dim 0.0 +109 69 model.hidden_dropout_rate 0.18021463383708 +109 69 model.num_filters 2.0 +109 69 training.batch_size 1.0 +109 69 training.label_smoothing 0.0026571319893182632 +109 70 model.embedding_dim 0.0 +109 70 model.hidden_dropout_rate 0.18379261358251697 +109 70 model.num_filters 7.0 +109 70 training.batch_size 1.0 +109 70 training.label_smoothing 0.012304050858999948 +109 71 model.embedding_dim 1.0 +109 71 model.hidden_dropout_rate 0.41969431383398315 +109 71 model.num_filters 9.0 +109 71 training.batch_size 2.0 +109 71 training.label_smoothing 0.010556408917807033 +109 72 model.embedding_dim 1.0 +109 72 model.hidden_dropout_rate 0.3614975133503431 +109 72 model.num_filters 3.0 +109 72 training.batch_size 0.0 +109 72 training.label_smoothing 0.6817893385437412 +109 73 model.embedding_dim 1.0 +109 73 model.hidden_dropout_rate 0.41105229896612316 +109 73 model.num_filters 2.0 +109 73 training.batch_size 1.0 +109 73 training.label_smoothing 0.006072831561875349 +109 74 model.embedding_dim 0.0 +109 74 model.hidden_dropout_rate 0.10069218660978181 +109 74 model.num_filters 0.0 +109 74 training.batch_size 2.0 +109 74 training.label_smoothing 0.003395753215847178 +109 75 model.embedding_dim 2.0 +109 75 model.hidden_dropout_rate 0.2757681274627613 +109 75 model.num_filters 7.0 +109 75 training.batch_size 2.0 +109 75 training.label_smoothing 0.014466074656255936 +109 76 model.embedding_dim 1.0 +109 76 model.hidden_dropout_rate 0.22714778995867652 +109 76 model.num_filters 5.0 +109 76 training.batch_size 1.0 +109 76 training.label_smoothing 0.0287357539124704 +109 77 model.embedding_dim 2.0 +109 77 model.hidden_dropout_rate 0.20547005342969432 +109 77 model.num_filters 0.0 +109 77 training.batch_size 0.0 +109 77 training.label_smoothing 0.01854730749857115 +109 78 model.embedding_dim 1.0 +109 78 model.hidden_dropout_rate 0.3631767250884199 +109 78 model.num_filters 7.0 +109 78 training.batch_size 0.0 +109 78 training.label_smoothing 0.06722635705352417 +109 79 model.embedding_dim 0.0 +109 79 model.hidden_dropout_rate 0.16149482429367806 +109 79 model.num_filters 8.0 +109 79 training.batch_size 2.0 +109 79 training.label_smoothing 0.0021212984827624973 +109 80 model.embedding_dim 1.0 +109 80 model.hidden_dropout_rate 0.2403895143481316 +109 80 model.num_filters 5.0 +109 80 training.batch_size 2.0 +109 80 training.label_smoothing 0.4035079268251125 +109 81 model.embedding_dim 2.0 +109 81 model.hidden_dropout_rate 0.34403583664405557 +109 81 model.num_filters 4.0 +109 81 training.batch_size 2.0 +109 81 training.label_smoothing 0.198357784812065 +109 82 model.embedding_dim 0.0 +109 82 model.hidden_dropout_rate 0.14051705360497044 +109 82 model.num_filters 4.0 +109 82 training.batch_size 2.0 +109 82 training.label_smoothing 0.0016432948231165693 +109 83 model.embedding_dim 1.0 +109 83 model.hidden_dropout_rate 0.3495530690274512 +109 83 model.num_filters 5.0 +109 83 training.batch_size 1.0 +109 83 training.label_smoothing 0.3013717513414317 +109 84 model.embedding_dim 0.0 +109 84 model.hidden_dropout_rate 0.2090042833973471 +109 84 model.num_filters 2.0 +109 84 training.batch_size 1.0 +109 84 training.label_smoothing 0.017860746565079497 +109 85 model.embedding_dim 1.0 +109 85 model.hidden_dropout_rate 0.14162143387291468 +109 85 model.num_filters 9.0 +109 85 training.batch_size 2.0 +109 85 training.label_smoothing 0.22160785928841542 +109 86 model.embedding_dim 2.0 +109 86 model.hidden_dropout_rate 0.43482331108918304 +109 86 model.num_filters 5.0 +109 86 training.batch_size 0.0 +109 86 training.label_smoothing 0.002092587942632744 +109 87 model.embedding_dim 1.0 +109 87 model.hidden_dropout_rate 0.36921982645667284 +109 87 model.num_filters 0.0 +109 87 training.batch_size 0.0 +109 87 training.label_smoothing 0.020535809072075265 +109 88 model.embedding_dim 0.0 +109 88 model.hidden_dropout_rate 0.26594493672316055 +109 88 model.num_filters 5.0 +109 88 training.batch_size 1.0 +109 88 training.label_smoothing 0.33148840697472703 +109 89 model.embedding_dim 1.0 +109 89 model.hidden_dropout_rate 0.40542601989474414 +109 89 model.num_filters 4.0 +109 89 training.batch_size 0.0 +109 89 training.label_smoothing 0.01806223332641573 +109 90 model.embedding_dim 2.0 +109 90 model.hidden_dropout_rate 0.4393549382088967 +109 90 model.num_filters 4.0 +109 90 training.batch_size 1.0 +109 90 training.label_smoothing 0.09319235938360888 +109 91 model.embedding_dim 2.0 +109 91 model.hidden_dropout_rate 0.294311975392232 +109 91 model.num_filters 3.0 +109 91 training.batch_size 0.0 +109 91 training.label_smoothing 0.027676702255392812 +109 92 model.embedding_dim 0.0 +109 92 model.hidden_dropout_rate 0.13447797840060424 +109 92 model.num_filters 2.0 +109 92 training.batch_size 0.0 +109 92 training.label_smoothing 0.1486769671219232 +109 93 model.embedding_dim 0.0 +109 93 model.hidden_dropout_rate 0.49109323994531184 +109 93 model.num_filters 0.0 +109 93 training.batch_size 0.0 +109 93 training.label_smoothing 0.8024320270600254 +109 94 model.embedding_dim 1.0 +109 94 model.hidden_dropout_rate 0.2725482375738346 +109 94 model.num_filters 0.0 +109 94 training.batch_size 2.0 +109 94 training.label_smoothing 0.6670478268870685 +109 95 model.embedding_dim 2.0 +109 95 model.hidden_dropout_rate 0.2392921622905914 +109 95 model.num_filters 2.0 +109 95 training.batch_size 2.0 +109 95 training.label_smoothing 0.007785264684886524 +109 96 model.embedding_dim 2.0 +109 96 model.hidden_dropout_rate 0.30397611050116613 +109 96 model.num_filters 8.0 +109 96 training.batch_size 2.0 +109 96 training.label_smoothing 0.0024405831943621467 +109 97 model.embedding_dim 2.0 +109 97 model.hidden_dropout_rate 0.431186308583549 +109 97 model.num_filters 9.0 +109 97 training.batch_size 0.0 +109 97 training.label_smoothing 0.0684161305298021 +109 1 dataset """kinships""" +109 1 model """convkb""" +109 1 loss """softplus""" +109 1 regularizer """no""" +109 1 optimizer """adadelta""" +109 1 training_loop """lcwa""" +109 1 evaluator """rankbased""" +109 2 dataset """kinships""" +109 2 model """convkb""" +109 2 loss """softplus""" +109 2 regularizer """no""" +109 2 optimizer """adadelta""" +109 2 training_loop """lcwa""" +109 2 evaluator """rankbased""" +109 3 dataset """kinships""" +109 3 model """convkb""" +109 3 loss """softplus""" +109 3 regularizer """no""" +109 3 optimizer """adadelta""" +109 3 training_loop """lcwa""" +109 3 evaluator """rankbased""" +109 4 dataset """kinships""" +109 4 model """convkb""" +109 4 loss """softplus""" +109 4 regularizer """no""" +109 4 optimizer """adadelta""" +109 4 training_loop """lcwa""" +109 4 evaluator """rankbased""" +109 5 dataset """kinships""" +109 5 model """convkb""" +109 5 loss """softplus""" +109 5 regularizer """no""" +109 5 optimizer """adadelta""" +109 5 training_loop """lcwa""" +109 5 evaluator """rankbased""" +109 6 dataset """kinships""" +109 6 model """convkb""" +109 6 loss """softplus""" +109 6 regularizer """no""" +109 6 optimizer """adadelta""" +109 6 training_loop """lcwa""" +109 6 evaluator """rankbased""" +109 7 dataset """kinships""" +109 7 model """convkb""" +109 7 loss """softplus""" +109 7 regularizer """no""" +109 7 optimizer """adadelta""" +109 7 training_loop """lcwa""" +109 7 evaluator """rankbased""" +109 8 dataset """kinships""" +109 8 model """convkb""" +109 8 loss """softplus""" +109 8 regularizer """no""" +109 8 optimizer """adadelta""" +109 8 training_loop """lcwa""" +109 8 evaluator """rankbased""" +109 9 dataset """kinships""" +109 9 model """convkb""" +109 9 loss """softplus""" +109 9 regularizer """no""" +109 9 optimizer """adadelta""" +109 9 training_loop """lcwa""" +109 9 evaluator """rankbased""" +109 10 dataset """kinships""" +109 10 model """convkb""" +109 10 loss """softplus""" +109 10 regularizer """no""" +109 10 optimizer """adadelta""" +109 10 training_loop """lcwa""" +109 10 evaluator """rankbased""" +109 11 dataset """kinships""" +109 11 model """convkb""" +109 11 loss """softplus""" +109 11 regularizer """no""" +109 11 optimizer """adadelta""" +109 11 training_loop """lcwa""" +109 11 evaluator """rankbased""" +109 12 dataset """kinships""" +109 12 model """convkb""" +109 12 loss """softplus""" +109 12 regularizer """no""" +109 12 optimizer """adadelta""" +109 12 training_loop """lcwa""" +109 12 evaluator """rankbased""" +109 13 dataset """kinships""" +109 13 model """convkb""" +109 13 loss """softplus""" +109 13 regularizer """no""" +109 13 optimizer """adadelta""" +109 13 training_loop """lcwa""" +109 13 evaluator """rankbased""" +109 14 dataset """kinships""" +109 14 model """convkb""" +109 14 loss """softplus""" +109 14 regularizer """no""" +109 14 optimizer """adadelta""" +109 14 training_loop """lcwa""" +109 14 evaluator """rankbased""" +109 15 dataset """kinships""" +109 15 model """convkb""" +109 15 loss """softplus""" +109 15 regularizer """no""" +109 15 optimizer """adadelta""" +109 15 training_loop """lcwa""" +109 15 evaluator """rankbased""" +109 16 dataset """kinships""" +109 16 model """convkb""" +109 16 loss """softplus""" +109 16 regularizer """no""" +109 16 optimizer """adadelta""" +109 16 training_loop """lcwa""" +109 16 evaluator """rankbased""" +109 17 dataset """kinships""" +109 17 model """convkb""" +109 17 loss """softplus""" +109 17 regularizer """no""" +109 17 optimizer """adadelta""" +109 17 training_loop """lcwa""" +109 17 evaluator """rankbased""" +109 18 dataset """kinships""" +109 18 model """convkb""" +109 18 loss """softplus""" +109 18 regularizer """no""" +109 18 optimizer """adadelta""" +109 18 training_loop """lcwa""" +109 18 evaluator """rankbased""" +109 19 dataset """kinships""" +109 19 model """convkb""" +109 19 loss """softplus""" +109 19 regularizer """no""" +109 19 optimizer """adadelta""" +109 19 training_loop """lcwa""" +109 19 evaluator """rankbased""" +109 20 dataset """kinships""" +109 20 model """convkb""" +109 20 loss """softplus""" +109 20 regularizer """no""" +109 20 optimizer """adadelta""" +109 20 training_loop """lcwa""" +109 20 evaluator """rankbased""" +109 21 dataset """kinships""" +109 21 model """convkb""" +109 21 loss """softplus""" +109 21 regularizer """no""" +109 21 optimizer """adadelta""" +109 21 training_loop """lcwa""" +109 21 evaluator """rankbased""" +109 22 dataset """kinships""" +109 22 model """convkb""" +109 22 loss """softplus""" +109 22 regularizer """no""" +109 22 optimizer """adadelta""" +109 22 training_loop """lcwa""" +109 22 evaluator """rankbased""" +109 23 dataset """kinships""" +109 23 model """convkb""" +109 23 loss """softplus""" +109 23 regularizer """no""" +109 23 optimizer """adadelta""" +109 23 training_loop """lcwa""" +109 23 evaluator """rankbased""" +109 24 dataset """kinships""" +109 24 model """convkb""" +109 24 loss """softplus""" +109 24 regularizer """no""" +109 24 optimizer """adadelta""" +109 24 training_loop """lcwa""" +109 24 evaluator """rankbased""" +109 25 dataset """kinships""" +109 25 model """convkb""" +109 25 loss """softplus""" +109 25 regularizer """no""" +109 25 optimizer """adadelta""" +109 25 training_loop """lcwa""" +109 25 evaluator """rankbased""" +109 26 dataset """kinships""" +109 26 model """convkb""" +109 26 loss """softplus""" +109 26 regularizer """no""" +109 26 optimizer """adadelta""" +109 26 training_loop """lcwa""" +109 26 evaluator """rankbased""" +109 27 dataset """kinships""" +109 27 model """convkb""" +109 27 loss """softplus""" +109 27 regularizer """no""" +109 27 optimizer """adadelta""" +109 27 training_loop """lcwa""" +109 27 evaluator """rankbased""" +109 28 dataset """kinships""" +109 28 model """convkb""" +109 28 loss """softplus""" +109 28 regularizer """no""" +109 28 optimizer """adadelta""" +109 28 training_loop """lcwa""" +109 28 evaluator """rankbased""" +109 29 dataset """kinships""" +109 29 model """convkb""" +109 29 loss """softplus""" +109 29 regularizer """no""" +109 29 optimizer """adadelta""" +109 29 training_loop """lcwa""" +109 29 evaluator """rankbased""" +109 30 dataset """kinships""" +109 30 model """convkb""" +109 30 loss """softplus""" +109 30 regularizer """no""" +109 30 optimizer """adadelta""" +109 30 training_loop """lcwa""" +109 30 evaluator """rankbased""" +109 31 dataset """kinships""" +109 31 model """convkb""" +109 31 loss """softplus""" +109 31 regularizer """no""" +109 31 optimizer """adadelta""" +109 31 training_loop """lcwa""" +109 31 evaluator """rankbased""" +109 32 dataset """kinships""" +109 32 model """convkb""" +109 32 loss """softplus""" +109 32 regularizer """no""" +109 32 optimizer """adadelta""" +109 32 training_loop """lcwa""" +109 32 evaluator """rankbased""" +109 33 dataset """kinships""" +109 33 model """convkb""" +109 33 loss """softplus""" +109 33 regularizer """no""" +109 33 optimizer """adadelta""" +109 33 training_loop """lcwa""" +109 33 evaluator """rankbased""" +109 34 dataset """kinships""" +109 34 model """convkb""" +109 34 loss """softplus""" +109 34 regularizer """no""" +109 34 optimizer """adadelta""" +109 34 training_loop """lcwa""" +109 34 evaluator """rankbased""" +109 35 dataset """kinships""" +109 35 model """convkb""" +109 35 loss """softplus""" +109 35 regularizer """no""" +109 35 optimizer """adadelta""" +109 35 training_loop """lcwa""" +109 35 evaluator """rankbased""" +109 36 dataset """kinships""" +109 36 model """convkb""" +109 36 loss """softplus""" +109 36 regularizer """no""" +109 36 optimizer """adadelta""" +109 36 training_loop """lcwa""" +109 36 evaluator """rankbased""" +109 37 dataset """kinships""" +109 37 model """convkb""" +109 37 loss """softplus""" +109 37 regularizer """no""" +109 37 optimizer """adadelta""" +109 37 training_loop """lcwa""" +109 37 evaluator """rankbased""" +109 38 dataset """kinships""" +109 38 model """convkb""" +109 38 loss """softplus""" +109 38 regularizer """no""" +109 38 optimizer """adadelta""" +109 38 training_loop """lcwa""" +109 38 evaluator """rankbased""" +109 39 dataset """kinships""" +109 39 model """convkb""" +109 39 loss """softplus""" +109 39 regularizer """no""" +109 39 optimizer """adadelta""" +109 39 training_loop """lcwa""" +109 39 evaluator """rankbased""" +109 40 dataset """kinships""" +109 40 model """convkb""" +109 40 loss """softplus""" +109 40 regularizer """no""" +109 40 optimizer """adadelta""" +109 40 training_loop """lcwa""" +109 40 evaluator """rankbased""" +109 41 dataset """kinships""" +109 41 model """convkb""" +109 41 loss """softplus""" +109 41 regularizer """no""" +109 41 optimizer """adadelta""" +109 41 training_loop """lcwa""" +109 41 evaluator """rankbased""" +109 42 dataset """kinships""" +109 42 model """convkb""" +109 42 loss """softplus""" +109 42 regularizer """no""" +109 42 optimizer """adadelta""" +109 42 training_loop """lcwa""" +109 42 evaluator """rankbased""" +109 43 dataset """kinships""" +109 43 model """convkb""" +109 43 loss """softplus""" +109 43 regularizer """no""" +109 43 optimizer """adadelta""" +109 43 training_loop """lcwa""" +109 43 evaluator """rankbased""" +109 44 dataset """kinships""" +109 44 model """convkb""" +109 44 loss """softplus""" +109 44 regularizer """no""" +109 44 optimizer """adadelta""" +109 44 training_loop """lcwa""" +109 44 evaluator """rankbased""" +109 45 dataset """kinships""" +109 45 model """convkb""" +109 45 loss """softplus""" +109 45 regularizer """no""" +109 45 optimizer """adadelta""" +109 45 training_loop """lcwa""" +109 45 evaluator """rankbased""" +109 46 dataset """kinships""" +109 46 model """convkb""" +109 46 loss """softplus""" +109 46 regularizer """no""" +109 46 optimizer """adadelta""" +109 46 training_loop """lcwa""" +109 46 evaluator """rankbased""" +109 47 dataset """kinships""" +109 47 model """convkb""" +109 47 loss """softplus""" +109 47 regularizer """no""" +109 47 optimizer """adadelta""" +109 47 training_loop """lcwa""" +109 47 evaluator """rankbased""" +109 48 dataset """kinships""" +109 48 model """convkb""" +109 48 loss """softplus""" +109 48 regularizer """no""" +109 48 optimizer """adadelta""" +109 48 training_loop """lcwa""" +109 48 evaluator """rankbased""" +109 49 dataset """kinships""" +109 49 model """convkb""" +109 49 loss """softplus""" +109 49 regularizer """no""" +109 49 optimizer """adadelta""" +109 49 training_loop """lcwa""" +109 49 evaluator """rankbased""" +109 50 dataset """kinships""" +109 50 model """convkb""" +109 50 loss """softplus""" +109 50 regularizer """no""" +109 50 optimizer """adadelta""" +109 50 training_loop """lcwa""" +109 50 evaluator """rankbased""" +109 51 dataset """kinships""" +109 51 model """convkb""" +109 51 loss """softplus""" +109 51 regularizer """no""" +109 51 optimizer """adadelta""" +109 51 training_loop """lcwa""" +109 51 evaluator """rankbased""" +109 52 dataset """kinships""" +109 52 model """convkb""" +109 52 loss """softplus""" +109 52 regularizer """no""" +109 52 optimizer """adadelta""" +109 52 training_loop """lcwa""" +109 52 evaluator """rankbased""" +109 53 dataset """kinships""" +109 53 model """convkb""" +109 53 loss """softplus""" +109 53 regularizer """no""" +109 53 optimizer """adadelta""" +109 53 training_loop """lcwa""" +109 53 evaluator """rankbased""" +109 54 dataset """kinships""" +109 54 model """convkb""" +109 54 loss """softplus""" +109 54 regularizer """no""" +109 54 optimizer """adadelta""" +109 54 training_loop """lcwa""" +109 54 evaluator """rankbased""" +109 55 dataset """kinships""" +109 55 model """convkb""" +109 55 loss """softplus""" +109 55 regularizer """no""" +109 55 optimizer """adadelta""" +109 55 training_loop """lcwa""" +109 55 evaluator """rankbased""" +109 56 dataset """kinships""" +109 56 model """convkb""" +109 56 loss """softplus""" +109 56 regularizer """no""" +109 56 optimizer """adadelta""" +109 56 training_loop """lcwa""" +109 56 evaluator """rankbased""" +109 57 dataset """kinships""" +109 57 model """convkb""" +109 57 loss """softplus""" +109 57 regularizer """no""" +109 57 optimizer """adadelta""" +109 57 training_loop """lcwa""" +109 57 evaluator """rankbased""" +109 58 dataset """kinships""" +109 58 model """convkb""" +109 58 loss """softplus""" +109 58 regularizer """no""" +109 58 optimizer """adadelta""" +109 58 training_loop """lcwa""" +109 58 evaluator """rankbased""" +109 59 dataset """kinships""" +109 59 model """convkb""" +109 59 loss """softplus""" +109 59 regularizer """no""" +109 59 optimizer """adadelta""" +109 59 training_loop """lcwa""" +109 59 evaluator """rankbased""" +109 60 dataset """kinships""" +109 60 model """convkb""" +109 60 loss """softplus""" +109 60 regularizer """no""" +109 60 optimizer """adadelta""" +109 60 training_loop """lcwa""" +109 60 evaluator """rankbased""" +109 61 dataset """kinships""" +109 61 model """convkb""" +109 61 loss """softplus""" +109 61 regularizer """no""" +109 61 optimizer """adadelta""" +109 61 training_loop """lcwa""" +109 61 evaluator """rankbased""" +109 62 dataset """kinships""" +109 62 model """convkb""" +109 62 loss """softplus""" +109 62 regularizer """no""" +109 62 optimizer """adadelta""" +109 62 training_loop """lcwa""" +109 62 evaluator """rankbased""" +109 63 dataset """kinships""" +109 63 model """convkb""" +109 63 loss """softplus""" +109 63 regularizer """no""" +109 63 optimizer """adadelta""" +109 63 training_loop """lcwa""" +109 63 evaluator """rankbased""" +109 64 dataset """kinships""" +109 64 model """convkb""" +109 64 loss """softplus""" +109 64 regularizer """no""" +109 64 optimizer """adadelta""" +109 64 training_loop """lcwa""" +109 64 evaluator """rankbased""" +109 65 dataset """kinships""" +109 65 model """convkb""" +109 65 loss """softplus""" +109 65 regularizer """no""" +109 65 optimizer """adadelta""" +109 65 training_loop """lcwa""" +109 65 evaluator """rankbased""" +109 66 dataset """kinships""" +109 66 model """convkb""" +109 66 loss """softplus""" +109 66 regularizer """no""" +109 66 optimizer """adadelta""" +109 66 training_loop """lcwa""" +109 66 evaluator """rankbased""" +109 67 dataset """kinships""" +109 67 model """convkb""" +109 67 loss """softplus""" +109 67 regularizer """no""" +109 67 optimizer """adadelta""" +109 67 training_loop """lcwa""" +109 67 evaluator """rankbased""" +109 68 dataset """kinships""" +109 68 model """convkb""" +109 68 loss """softplus""" +109 68 regularizer """no""" +109 68 optimizer """adadelta""" +109 68 training_loop """lcwa""" +109 68 evaluator """rankbased""" +109 69 dataset """kinships""" +109 69 model """convkb""" +109 69 loss """softplus""" +109 69 regularizer """no""" +109 69 optimizer """adadelta""" +109 69 training_loop """lcwa""" +109 69 evaluator """rankbased""" +109 70 dataset """kinships""" +109 70 model """convkb""" +109 70 loss """softplus""" +109 70 regularizer """no""" +109 70 optimizer """adadelta""" +109 70 training_loop """lcwa""" +109 70 evaluator """rankbased""" +109 71 dataset """kinships""" +109 71 model """convkb""" +109 71 loss """softplus""" +109 71 regularizer """no""" +109 71 optimizer """adadelta""" +109 71 training_loop """lcwa""" +109 71 evaluator """rankbased""" +109 72 dataset """kinships""" +109 72 model """convkb""" +109 72 loss """softplus""" +109 72 regularizer """no""" +109 72 optimizer """adadelta""" +109 72 training_loop """lcwa""" +109 72 evaluator """rankbased""" +109 73 dataset """kinships""" +109 73 model """convkb""" +109 73 loss """softplus""" +109 73 regularizer """no""" +109 73 optimizer """adadelta""" +109 73 training_loop """lcwa""" +109 73 evaluator """rankbased""" +109 74 dataset """kinships""" +109 74 model """convkb""" +109 74 loss """softplus""" +109 74 regularizer """no""" +109 74 optimizer """adadelta""" +109 74 training_loop """lcwa""" +109 74 evaluator """rankbased""" +109 75 dataset """kinships""" +109 75 model """convkb""" +109 75 loss """softplus""" +109 75 regularizer """no""" +109 75 optimizer """adadelta""" +109 75 training_loop """lcwa""" +109 75 evaluator """rankbased""" +109 76 dataset """kinships""" +109 76 model """convkb""" +109 76 loss """softplus""" +109 76 regularizer """no""" +109 76 optimizer """adadelta""" +109 76 training_loop """lcwa""" +109 76 evaluator """rankbased""" +109 77 dataset """kinships""" +109 77 model """convkb""" +109 77 loss """softplus""" +109 77 regularizer """no""" +109 77 optimizer """adadelta""" +109 77 training_loop """lcwa""" +109 77 evaluator """rankbased""" +109 78 dataset """kinships""" +109 78 model """convkb""" +109 78 loss """softplus""" +109 78 regularizer """no""" +109 78 optimizer """adadelta""" +109 78 training_loop """lcwa""" +109 78 evaluator """rankbased""" +109 79 dataset """kinships""" +109 79 model """convkb""" +109 79 loss """softplus""" +109 79 regularizer """no""" +109 79 optimizer """adadelta""" +109 79 training_loop """lcwa""" +109 79 evaluator """rankbased""" +109 80 dataset """kinships""" +109 80 model """convkb""" +109 80 loss """softplus""" +109 80 regularizer """no""" +109 80 optimizer """adadelta""" +109 80 training_loop """lcwa""" +109 80 evaluator """rankbased""" +109 81 dataset """kinships""" +109 81 model """convkb""" +109 81 loss """softplus""" +109 81 regularizer """no""" +109 81 optimizer """adadelta""" +109 81 training_loop """lcwa""" +109 81 evaluator """rankbased""" +109 82 dataset """kinships""" +109 82 model """convkb""" +109 82 loss """softplus""" +109 82 regularizer """no""" +109 82 optimizer """adadelta""" +109 82 training_loop """lcwa""" +109 82 evaluator """rankbased""" +109 83 dataset """kinships""" +109 83 model """convkb""" +109 83 loss """softplus""" +109 83 regularizer """no""" +109 83 optimizer """adadelta""" +109 83 training_loop """lcwa""" +109 83 evaluator """rankbased""" +109 84 dataset """kinships""" +109 84 model """convkb""" +109 84 loss """softplus""" +109 84 regularizer """no""" +109 84 optimizer """adadelta""" +109 84 training_loop """lcwa""" +109 84 evaluator """rankbased""" +109 85 dataset """kinships""" +109 85 model """convkb""" +109 85 loss """softplus""" +109 85 regularizer """no""" +109 85 optimizer """adadelta""" +109 85 training_loop """lcwa""" +109 85 evaluator """rankbased""" +109 86 dataset """kinships""" +109 86 model """convkb""" +109 86 loss """softplus""" +109 86 regularizer """no""" +109 86 optimizer """adadelta""" +109 86 training_loop """lcwa""" +109 86 evaluator """rankbased""" +109 87 dataset """kinships""" +109 87 model """convkb""" +109 87 loss """softplus""" +109 87 regularizer """no""" +109 87 optimizer """adadelta""" +109 87 training_loop """lcwa""" +109 87 evaluator """rankbased""" +109 88 dataset """kinships""" +109 88 model """convkb""" +109 88 loss """softplus""" +109 88 regularizer """no""" +109 88 optimizer """adadelta""" +109 88 training_loop """lcwa""" +109 88 evaluator """rankbased""" +109 89 dataset """kinships""" +109 89 model """convkb""" +109 89 loss """softplus""" +109 89 regularizer """no""" +109 89 optimizer """adadelta""" +109 89 training_loop """lcwa""" +109 89 evaluator """rankbased""" +109 90 dataset """kinships""" +109 90 model """convkb""" +109 90 loss """softplus""" +109 90 regularizer """no""" +109 90 optimizer """adadelta""" +109 90 training_loop """lcwa""" +109 90 evaluator """rankbased""" +109 91 dataset """kinships""" +109 91 model """convkb""" +109 91 loss """softplus""" +109 91 regularizer """no""" +109 91 optimizer """adadelta""" +109 91 training_loop """lcwa""" +109 91 evaluator """rankbased""" +109 92 dataset """kinships""" +109 92 model """convkb""" +109 92 loss """softplus""" +109 92 regularizer """no""" +109 92 optimizer """adadelta""" +109 92 training_loop """lcwa""" +109 92 evaluator """rankbased""" +109 93 dataset """kinships""" +109 93 model """convkb""" +109 93 loss """softplus""" +109 93 regularizer """no""" +109 93 optimizer """adadelta""" +109 93 training_loop """lcwa""" +109 93 evaluator """rankbased""" +109 94 dataset """kinships""" +109 94 model """convkb""" +109 94 loss """softplus""" +109 94 regularizer """no""" +109 94 optimizer """adadelta""" +109 94 training_loop """lcwa""" +109 94 evaluator """rankbased""" +109 95 dataset """kinships""" +109 95 model """convkb""" +109 95 loss """softplus""" +109 95 regularizer """no""" +109 95 optimizer """adadelta""" +109 95 training_loop """lcwa""" +109 95 evaluator """rankbased""" +109 96 dataset """kinships""" +109 96 model """convkb""" +109 96 loss """softplus""" +109 96 regularizer """no""" +109 96 optimizer """adadelta""" +109 96 training_loop """lcwa""" +109 96 evaluator """rankbased""" +109 97 dataset """kinships""" +109 97 model """convkb""" +109 97 loss """softplus""" +109 97 regularizer """no""" +109 97 optimizer """adadelta""" +109 97 training_loop """lcwa""" +109 97 evaluator """rankbased""" +110 1 model.embedding_dim 2.0 +110 1 model.hidden_dropout_rate 0.24981343014225318 +110 1 model.num_filters 2.0 +110 1 training.batch_size 2.0 +110 1 training.label_smoothing 0.0030857210353219416 +110 2 model.embedding_dim 0.0 +110 2 model.hidden_dropout_rate 0.15352148368546314 +110 2 model.num_filters 1.0 +110 2 training.batch_size 0.0 +110 2 training.label_smoothing 0.0448695684672325 +110 3 model.embedding_dim 1.0 +110 3 model.hidden_dropout_rate 0.19373808991683547 +110 3 model.num_filters 1.0 +110 3 training.batch_size 1.0 +110 3 training.label_smoothing 0.024994584294399005 +110 4 model.embedding_dim 0.0 +110 4 model.hidden_dropout_rate 0.10997312485996807 +110 4 model.num_filters 4.0 +110 4 training.batch_size 0.0 +110 4 training.label_smoothing 0.030262694236223627 +110 5 model.embedding_dim 0.0 +110 5 model.hidden_dropout_rate 0.16945733640978078 +110 5 model.num_filters 4.0 +110 5 training.batch_size 2.0 +110 5 training.label_smoothing 0.17724919253349397 +110 6 model.embedding_dim 1.0 +110 6 model.hidden_dropout_rate 0.239210521877956 +110 6 model.num_filters 4.0 +110 6 training.batch_size 0.0 +110 6 training.label_smoothing 0.8510952439221109 +110 7 model.embedding_dim 1.0 +110 7 model.hidden_dropout_rate 0.4643434398873886 +110 7 model.num_filters 7.0 +110 7 training.batch_size 1.0 +110 7 training.label_smoothing 0.031241875747404394 +110 8 model.embedding_dim 2.0 +110 8 model.hidden_dropout_rate 0.4674078568580827 +110 8 model.num_filters 0.0 +110 8 training.batch_size 2.0 +110 8 training.label_smoothing 0.576484822233968 +110 9 model.embedding_dim 2.0 +110 9 model.hidden_dropout_rate 0.36195372463793296 +110 9 model.num_filters 8.0 +110 9 training.batch_size 0.0 +110 9 training.label_smoothing 0.014346176278125405 +110 10 model.embedding_dim 2.0 +110 10 model.hidden_dropout_rate 0.21702245702227901 +110 10 model.num_filters 8.0 +110 10 training.batch_size 0.0 +110 10 training.label_smoothing 0.008694338526874628 +110 11 model.embedding_dim 0.0 +110 11 model.hidden_dropout_rate 0.4088892181678162 +110 11 model.num_filters 8.0 +110 11 training.batch_size 2.0 +110 11 training.label_smoothing 0.08696630476147671 +110 12 model.embedding_dim 1.0 +110 12 model.hidden_dropout_rate 0.16251952391784333 +110 12 model.num_filters 2.0 +110 12 training.batch_size 2.0 +110 12 training.label_smoothing 0.0010398795262181618 +110 13 model.embedding_dim 2.0 +110 13 model.hidden_dropout_rate 0.41401235256212365 +110 13 model.num_filters 5.0 +110 13 training.batch_size 0.0 +110 13 training.label_smoothing 0.23765667293225495 +110 14 model.embedding_dim 0.0 +110 14 model.hidden_dropout_rate 0.4815606799157489 +110 14 model.num_filters 4.0 +110 14 training.batch_size 0.0 +110 14 training.label_smoothing 0.0010961252542942834 +110 15 model.embedding_dim 2.0 +110 15 model.hidden_dropout_rate 0.3868722429175585 +110 15 model.num_filters 6.0 +110 15 training.batch_size 1.0 +110 15 training.label_smoothing 0.1617913941177001 +110 16 model.embedding_dim 2.0 +110 16 model.hidden_dropout_rate 0.23845655024309573 +110 16 model.num_filters 1.0 +110 16 training.batch_size 1.0 +110 16 training.label_smoothing 0.1602913775892961 +110 17 model.embedding_dim 2.0 +110 17 model.hidden_dropout_rate 0.27800368444839585 +110 17 model.num_filters 5.0 +110 17 training.batch_size 2.0 +110 17 training.label_smoothing 0.10599759908544365 +110 18 model.embedding_dim 2.0 +110 18 model.hidden_dropout_rate 0.18686657825608913 +110 18 model.num_filters 8.0 +110 18 training.batch_size 0.0 +110 18 training.label_smoothing 0.012526428750754886 +110 19 model.embedding_dim 0.0 +110 19 model.hidden_dropout_rate 0.43797608549604006 +110 19 model.num_filters 8.0 +110 19 training.batch_size 1.0 +110 19 training.label_smoothing 0.45598869936287706 +110 20 model.embedding_dim 2.0 +110 20 model.hidden_dropout_rate 0.4276434434869302 +110 20 model.num_filters 1.0 +110 20 training.batch_size 1.0 +110 20 training.label_smoothing 0.017710743370467227 +110 21 model.embedding_dim 1.0 +110 21 model.hidden_dropout_rate 0.4639773059848631 +110 21 model.num_filters 8.0 +110 21 training.batch_size 1.0 +110 21 training.label_smoothing 0.10556107740702073 +110 22 model.embedding_dim 2.0 +110 22 model.hidden_dropout_rate 0.4454261194811787 +110 22 model.num_filters 8.0 +110 22 training.batch_size 2.0 +110 22 training.label_smoothing 0.0654414484711327 +110 23 model.embedding_dim 0.0 +110 23 model.hidden_dropout_rate 0.27460724383935997 +110 23 model.num_filters 0.0 +110 23 training.batch_size 2.0 +110 23 training.label_smoothing 0.0030092183686849998 +110 24 model.embedding_dim 0.0 +110 24 model.hidden_dropout_rate 0.14094924349832982 +110 24 model.num_filters 0.0 +110 24 training.batch_size 0.0 +110 24 training.label_smoothing 0.5694683580141547 +110 25 model.embedding_dim 0.0 +110 25 model.hidden_dropout_rate 0.42430001800526973 +110 25 model.num_filters 3.0 +110 25 training.batch_size 0.0 +110 25 training.label_smoothing 0.028408338452135605 +110 26 model.embedding_dim 2.0 +110 26 model.hidden_dropout_rate 0.3326616232787463 +110 26 model.num_filters 4.0 +110 26 training.batch_size 2.0 +110 26 training.label_smoothing 0.18498067811907137 +110 27 model.embedding_dim 1.0 +110 27 model.hidden_dropout_rate 0.2419637295403322 +110 27 model.num_filters 1.0 +110 27 training.batch_size 0.0 +110 27 training.label_smoothing 0.004109218639068167 +110 28 model.embedding_dim 2.0 +110 28 model.hidden_dropout_rate 0.44374981537241287 +110 28 model.num_filters 2.0 +110 28 training.batch_size 1.0 +110 28 training.label_smoothing 0.7317581953386052 +110 29 model.embedding_dim 1.0 +110 29 model.hidden_dropout_rate 0.10817582607473751 +110 29 model.num_filters 7.0 +110 29 training.batch_size 0.0 +110 29 training.label_smoothing 0.4480066425677869 +110 1 dataset """kinships""" +110 1 model """convkb""" +110 1 loss """crossentropy""" +110 1 regularizer """no""" +110 1 optimizer """adadelta""" +110 1 training_loop """lcwa""" +110 1 evaluator """rankbased""" +110 2 dataset """kinships""" +110 2 model """convkb""" +110 2 loss """crossentropy""" +110 2 regularizer """no""" +110 2 optimizer """adadelta""" +110 2 training_loop """lcwa""" +110 2 evaluator """rankbased""" +110 3 dataset """kinships""" +110 3 model """convkb""" +110 3 loss """crossentropy""" +110 3 regularizer """no""" +110 3 optimizer """adadelta""" +110 3 training_loop """lcwa""" +110 3 evaluator """rankbased""" +110 4 dataset """kinships""" +110 4 model """convkb""" +110 4 loss """crossentropy""" +110 4 regularizer """no""" +110 4 optimizer """adadelta""" +110 4 training_loop """lcwa""" +110 4 evaluator """rankbased""" +110 5 dataset """kinships""" +110 5 model """convkb""" +110 5 loss """crossentropy""" +110 5 regularizer """no""" +110 5 optimizer """adadelta""" +110 5 training_loop """lcwa""" +110 5 evaluator """rankbased""" +110 6 dataset """kinships""" +110 6 model """convkb""" +110 6 loss """crossentropy""" +110 6 regularizer """no""" +110 6 optimizer """adadelta""" +110 6 training_loop """lcwa""" +110 6 evaluator """rankbased""" +110 7 dataset """kinships""" +110 7 model """convkb""" +110 7 loss """crossentropy""" +110 7 regularizer """no""" +110 7 optimizer """adadelta""" +110 7 training_loop """lcwa""" +110 7 evaluator """rankbased""" +110 8 dataset """kinships""" +110 8 model """convkb""" +110 8 loss """crossentropy""" +110 8 regularizer """no""" +110 8 optimizer """adadelta""" +110 8 training_loop """lcwa""" +110 8 evaluator """rankbased""" +110 9 dataset """kinships""" +110 9 model """convkb""" +110 9 loss """crossentropy""" +110 9 regularizer """no""" +110 9 optimizer """adadelta""" +110 9 training_loop """lcwa""" +110 9 evaluator """rankbased""" +110 10 dataset """kinships""" +110 10 model """convkb""" +110 10 loss """crossentropy""" +110 10 regularizer """no""" +110 10 optimizer """adadelta""" +110 10 training_loop """lcwa""" +110 10 evaluator """rankbased""" +110 11 dataset """kinships""" +110 11 model """convkb""" +110 11 loss """crossentropy""" +110 11 regularizer """no""" +110 11 optimizer """adadelta""" +110 11 training_loop """lcwa""" +110 11 evaluator """rankbased""" +110 12 dataset """kinships""" +110 12 model """convkb""" +110 12 loss """crossentropy""" +110 12 regularizer """no""" +110 12 optimizer """adadelta""" +110 12 training_loop """lcwa""" +110 12 evaluator """rankbased""" +110 13 dataset """kinships""" +110 13 model """convkb""" +110 13 loss """crossentropy""" +110 13 regularizer """no""" +110 13 optimizer """adadelta""" +110 13 training_loop """lcwa""" +110 13 evaluator """rankbased""" +110 14 dataset """kinships""" +110 14 model """convkb""" +110 14 loss """crossentropy""" +110 14 regularizer """no""" +110 14 optimizer """adadelta""" +110 14 training_loop """lcwa""" +110 14 evaluator """rankbased""" +110 15 dataset """kinships""" +110 15 model """convkb""" +110 15 loss """crossentropy""" +110 15 regularizer """no""" +110 15 optimizer """adadelta""" +110 15 training_loop """lcwa""" +110 15 evaluator """rankbased""" +110 16 dataset """kinships""" +110 16 model """convkb""" +110 16 loss """crossentropy""" +110 16 regularizer """no""" +110 16 optimizer """adadelta""" +110 16 training_loop """lcwa""" +110 16 evaluator """rankbased""" +110 17 dataset """kinships""" +110 17 model """convkb""" +110 17 loss """crossentropy""" +110 17 regularizer """no""" +110 17 optimizer """adadelta""" +110 17 training_loop """lcwa""" +110 17 evaluator """rankbased""" +110 18 dataset """kinships""" +110 18 model """convkb""" +110 18 loss """crossentropy""" +110 18 regularizer """no""" +110 18 optimizer """adadelta""" +110 18 training_loop """lcwa""" +110 18 evaluator """rankbased""" +110 19 dataset """kinships""" +110 19 model """convkb""" +110 19 loss """crossentropy""" +110 19 regularizer """no""" +110 19 optimizer """adadelta""" +110 19 training_loop """lcwa""" +110 19 evaluator """rankbased""" +110 20 dataset """kinships""" +110 20 model """convkb""" +110 20 loss """crossentropy""" +110 20 regularizer """no""" +110 20 optimizer """adadelta""" +110 20 training_loop """lcwa""" +110 20 evaluator """rankbased""" +110 21 dataset """kinships""" +110 21 model """convkb""" +110 21 loss """crossentropy""" +110 21 regularizer """no""" +110 21 optimizer """adadelta""" +110 21 training_loop """lcwa""" +110 21 evaluator """rankbased""" +110 22 dataset """kinships""" +110 22 model """convkb""" +110 22 loss """crossentropy""" +110 22 regularizer """no""" +110 22 optimizer """adadelta""" +110 22 training_loop """lcwa""" +110 22 evaluator """rankbased""" +110 23 dataset """kinships""" +110 23 model """convkb""" +110 23 loss """crossentropy""" +110 23 regularizer """no""" +110 23 optimizer """adadelta""" +110 23 training_loop """lcwa""" +110 23 evaluator """rankbased""" +110 24 dataset """kinships""" +110 24 model """convkb""" +110 24 loss """crossentropy""" +110 24 regularizer """no""" +110 24 optimizer """adadelta""" +110 24 training_loop """lcwa""" +110 24 evaluator """rankbased""" +110 25 dataset """kinships""" +110 25 model """convkb""" +110 25 loss """crossentropy""" +110 25 regularizer """no""" +110 25 optimizer """adadelta""" +110 25 training_loop """lcwa""" +110 25 evaluator """rankbased""" +110 26 dataset """kinships""" +110 26 model """convkb""" +110 26 loss """crossentropy""" +110 26 regularizer """no""" +110 26 optimizer """adadelta""" +110 26 training_loop """lcwa""" +110 26 evaluator """rankbased""" +110 27 dataset """kinships""" +110 27 model """convkb""" +110 27 loss """crossentropy""" +110 27 regularizer """no""" +110 27 optimizer """adadelta""" +110 27 training_loop """lcwa""" +110 27 evaluator """rankbased""" +110 28 dataset """kinships""" +110 28 model """convkb""" +110 28 loss """crossentropy""" +110 28 regularizer """no""" +110 28 optimizer """adadelta""" +110 28 training_loop """lcwa""" +110 28 evaluator """rankbased""" +110 29 dataset """kinships""" +110 29 model """convkb""" +110 29 loss """crossentropy""" +110 29 regularizer """no""" +110 29 optimizer """adadelta""" +110 29 training_loop """lcwa""" +110 29 evaluator """rankbased""" +111 1 model.embedding_dim 0.0 +111 1 model.hidden_dropout_rate 0.17003292144603044 +111 1 model.num_filters 6.0 +111 1 training.batch_size 1.0 +111 1 training.label_smoothing 0.23350637923864706 +111 2 model.embedding_dim 1.0 +111 2 model.hidden_dropout_rate 0.49480685531301216 +111 2 model.num_filters 2.0 +111 2 training.batch_size 2.0 +111 2 training.label_smoothing 0.00826318322004104 +111 3 model.embedding_dim 2.0 +111 3 model.hidden_dropout_rate 0.2340679626299397 +111 3 model.num_filters 1.0 +111 3 training.batch_size 1.0 +111 3 training.label_smoothing 0.003255364855366796 +111 4 model.embedding_dim 2.0 +111 4 model.hidden_dropout_rate 0.3702185268581345 +111 4 model.num_filters 5.0 +111 4 training.batch_size 1.0 +111 4 training.label_smoothing 0.7237267157477691 +111 5 model.embedding_dim 0.0 +111 5 model.hidden_dropout_rate 0.1303977494915614 +111 5 model.num_filters 9.0 +111 5 training.batch_size 1.0 +111 5 training.label_smoothing 0.04262615206627891 +111 6 model.embedding_dim 2.0 +111 6 model.hidden_dropout_rate 0.11221721517116956 +111 6 model.num_filters 8.0 +111 6 training.batch_size 0.0 +111 6 training.label_smoothing 0.029380402588222835 +111 7 model.embedding_dim 0.0 +111 7 model.hidden_dropout_rate 0.28840631793011773 +111 7 model.num_filters 8.0 +111 7 training.batch_size 1.0 +111 7 training.label_smoothing 0.009840003847137862 +111 8 model.embedding_dim 1.0 +111 8 model.hidden_dropout_rate 0.3191167335149886 +111 8 model.num_filters 0.0 +111 8 training.batch_size 2.0 +111 8 training.label_smoothing 0.002186969415194187 +111 9 model.embedding_dim 0.0 +111 9 model.hidden_dropout_rate 0.40685802163662677 +111 9 model.num_filters 9.0 +111 9 training.batch_size 0.0 +111 9 training.label_smoothing 0.021413844434714384 +111 10 model.embedding_dim 2.0 +111 10 model.hidden_dropout_rate 0.45263685283209765 +111 10 model.num_filters 6.0 +111 10 training.batch_size 2.0 +111 10 training.label_smoothing 0.033700633780006664 +111 11 model.embedding_dim 2.0 +111 11 model.hidden_dropout_rate 0.24359419508047783 +111 11 model.num_filters 0.0 +111 11 training.batch_size 2.0 +111 11 training.label_smoothing 0.19077965067432878 +111 12 model.embedding_dim 2.0 +111 12 model.hidden_dropout_rate 0.46022184284943324 +111 12 model.num_filters 3.0 +111 12 training.batch_size 0.0 +111 12 training.label_smoothing 0.024296425652673745 +111 13 model.embedding_dim 2.0 +111 13 model.hidden_dropout_rate 0.3450451637693589 +111 13 model.num_filters 8.0 +111 13 training.batch_size 0.0 +111 13 training.label_smoothing 0.14424414803467667 +111 14 model.embedding_dim 1.0 +111 14 model.hidden_dropout_rate 0.2277681605760126 +111 14 model.num_filters 6.0 +111 14 training.batch_size 0.0 +111 14 training.label_smoothing 0.7986694583930365 +111 15 model.embedding_dim 2.0 +111 15 model.hidden_dropout_rate 0.4589229828737952 +111 15 model.num_filters 0.0 +111 15 training.batch_size 2.0 +111 15 training.label_smoothing 0.03007744533515116 +111 16 model.embedding_dim 2.0 +111 16 model.hidden_dropout_rate 0.10310611141623341 +111 16 model.num_filters 2.0 +111 16 training.batch_size 2.0 +111 16 training.label_smoothing 0.06856651127216394 +111 17 model.embedding_dim 2.0 +111 17 model.hidden_dropout_rate 0.44646011650226913 +111 17 model.num_filters 8.0 +111 17 training.batch_size 0.0 +111 17 training.label_smoothing 0.01672601726764483 +111 18 model.embedding_dim 1.0 +111 18 model.hidden_dropout_rate 0.4408396691361023 +111 18 model.num_filters 7.0 +111 18 training.batch_size 0.0 +111 18 training.label_smoothing 0.0016855449814440428 +111 19 model.embedding_dim 1.0 +111 19 model.hidden_dropout_rate 0.22892214111947085 +111 19 model.num_filters 5.0 +111 19 training.batch_size 0.0 +111 19 training.label_smoothing 0.9954696159917361 +111 20 model.embedding_dim 1.0 +111 20 model.hidden_dropout_rate 0.2934123437962497 +111 20 model.num_filters 7.0 +111 20 training.batch_size 2.0 +111 20 training.label_smoothing 0.04037273057700672 +111 21 model.embedding_dim 0.0 +111 21 model.hidden_dropout_rate 0.31278950614348516 +111 21 model.num_filters 8.0 +111 21 training.batch_size 1.0 +111 21 training.label_smoothing 0.011746476094120082 +111 22 model.embedding_dim 2.0 +111 22 model.hidden_dropout_rate 0.19932578553070954 +111 22 model.num_filters 2.0 +111 22 training.batch_size 1.0 +111 22 training.label_smoothing 0.004008707146008714 +111 23 model.embedding_dim 1.0 +111 23 model.hidden_dropout_rate 0.19424552158001906 +111 23 model.num_filters 0.0 +111 23 training.batch_size 1.0 +111 23 training.label_smoothing 0.0024642970488108398 +111 24 model.embedding_dim 2.0 +111 24 model.hidden_dropout_rate 0.21421773428917845 +111 24 model.num_filters 4.0 +111 24 training.batch_size 1.0 +111 24 training.label_smoothing 0.8550554429540019 +111 25 model.embedding_dim 1.0 +111 25 model.hidden_dropout_rate 0.25237064160062883 +111 25 model.num_filters 7.0 +111 25 training.batch_size 2.0 +111 25 training.label_smoothing 0.007925470976415092 +111 26 model.embedding_dim 0.0 +111 26 model.hidden_dropout_rate 0.41427657348835745 +111 26 model.num_filters 4.0 +111 26 training.batch_size 2.0 +111 26 training.label_smoothing 0.015322519532161917 +111 27 model.embedding_dim 1.0 +111 27 model.hidden_dropout_rate 0.45263494410811367 +111 27 model.num_filters 9.0 +111 27 training.batch_size 0.0 +111 27 training.label_smoothing 0.0726230649698299 +111 28 model.embedding_dim 0.0 +111 28 model.hidden_dropout_rate 0.26643827451785557 +111 28 model.num_filters 1.0 +111 28 training.batch_size 0.0 +111 28 training.label_smoothing 0.05930932076782329 +111 29 model.embedding_dim 1.0 +111 29 model.hidden_dropout_rate 0.25854529363558687 +111 29 model.num_filters 8.0 +111 29 training.batch_size 0.0 +111 29 training.label_smoothing 0.027903499365508552 +111 30 model.embedding_dim 0.0 +111 30 model.hidden_dropout_rate 0.321377740281363 +111 30 model.num_filters 1.0 +111 30 training.batch_size 1.0 +111 30 training.label_smoothing 0.0011070614345237194 +111 31 model.embedding_dim 0.0 +111 31 model.hidden_dropout_rate 0.2099841614732347 +111 31 model.num_filters 1.0 +111 31 training.batch_size 2.0 +111 31 training.label_smoothing 0.07946424465891776 +111 32 model.embedding_dim 1.0 +111 32 model.hidden_dropout_rate 0.4696390342808331 +111 32 model.num_filters 5.0 +111 32 training.batch_size 1.0 +111 32 training.label_smoothing 0.007873488432918498 +111 33 model.embedding_dim 2.0 +111 33 model.hidden_dropout_rate 0.2728770239328526 +111 33 model.num_filters 8.0 +111 33 training.batch_size 2.0 +111 33 training.label_smoothing 0.004022574552546431 +111 34 model.embedding_dim 1.0 +111 34 model.hidden_dropout_rate 0.2758932158483025 +111 34 model.num_filters 8.0 +111 34 training.batch_size 0.0 +111 34 training.label_smoothing 0.04950132187588892 +111 35 model.embedding_dim 1.0 +111 35 model.hidden_dropout_rate 0.49342888158812725 +111 35 model.num_filters 6.0 +111 35 training.batch_size 2.0 +111 35 training.label_smoothing 0.0014172782266981927 +111 36 model.embedding_dim 0.0 +111 36 model.hidden_dropout_rate 0.21283113902989853 +111 36 model.num_filters 3.0 +111 36 training.batch_size 2.0 +111 36 training.label_smoothing 0.309990038056013 +111 37 model.embedding_dim 0.0 +111 37 model.hidden_dropout_rate 0.10264771873021866 +111 37 model.num_filters 6.0 +111 37 training.batch_size 1.0 +111 37 training.label_smoothing 0.05221372364642567 +111 38 model.embedding_dim 2.0 +111 38 model.hidden_dropout_rate 0.24226290008717247 +111 38 model.num_filters 9.0 +111 38 training.batch_size 1.0 +111 38 training.label_smoothing 0.005356058388408601 +111 39 model.embedding_dim 0.0 +111 39 model.hidden_dropout_rate 0.19825602982832038 +111 39 model.num_filters 4.0 +111 39 training.batch_size 1.0 +111 39 training.label_smoothing 0.014923050088848694 +111 40 model.embedding_dim 0.0 +111 40 model.hidden_dropout_rate 0.20335187699847684 +111 40 model.num_filters 2.0 +111 40 training.batch_size 2.0 +111 40 training.label_smoothing 0.010125831747459652 +111 41 model.embedding_dim 1.0 +111 41 model.hidden_dropout_rate 0.13573061402778933 +111 41 model.num_filters 7.0 +111 41 training.batch_size 0.0 +111 41 training.label_smoothing 0.0011458945382116805 +111 42 model.embedding_dim 0.0 +111 42 model.hidden_dropout_rate 0.24233176692560796 +111 42 model.num_filters 0.0 +111 42 training.batch_size 0.0 +111 42 training.label_smoothing 0.23946062647715174 +111 43 model.embedding_dim 0.0 +111 43 model.hidden_dropout_rate 0.13917997153508424 +111 43 model.num_filters 9.0 +111 43 training.batch_size 2.0 +111 43 training.label_smoothing 0.1352047756979959 +111 44 model.embedding_dim 1.0 +111 44 model.hidden_dropout_rate 0.30687928855517443 +111 44 model.num_filters 2.0 +111 44 training.batch_size 1.0 +111 44 training.label_smoothing 0.0026966056502983897 +111 45 model.embedding_dim 2.0 +111 45 model.hidden_dropout_rate 0.14830360684281124 +111 45 model.num_filters 2.0 +111 45 training.batch_size 0.0 +111 45 training.label_smoothing 0.00142842700113223 +111 46 model.embedding_dim 1.0 +111 46 model.hidden_dropout_rate 0.3445205299611798 +111 46 model.num_filters 7.0 +111 46 training.batch_size 2.0 +111 46 training.label_smoothing 0.5231853419021311 +111 47 model.embedding_dim 1.0 +111 47 model.hidden_dropout_rate 0.47775868973957103 +111 47 model.num_filters 1.0 +111 47 training.batch_size 0.0 +111 47 training.label_smoothing 0.0479298947993045 +111 48 model.embedding_dim 1.0 +111 48 model.hidden_dropout_rate 0.32385650732043314 +111 48 model.num_filters 9.0 +111 48 training.batch_size 0.0 +111 48 training.label_smoothing 0.3006431413970913 +111 49 model.embedding_dim 1.0 +111 49 model.hidden_dropout_rate 0.1384555238544306 +111 49 model.num_filters 7.0 +111 49 training.batch_size 2.0 +111 49 training.label_smoothing 0.0011453198441137755 +111 50 model.embedding_dim 0.0 +111 50 model.hidden_dropout_rate 0.32277666878565975 +111 50 model.num_filters 7.0 +111 50 training.batch_size 1.0 +111 50 training.label_smoothing 0.004922962216230476 +111 51 model.embedding_dim 0.0 +111 51 model.hidden_dropout_rate 0.3693488588596844 +111 51 model.num_filters 7.0 +111 51 training.batch_size 2.0 +111 51 training.label_smoothing 0.03819509989164238 +111 52 model.embedding_dim 0.0 +111 52 model.hidden_dropout_rate 0.44807527969048555 +111 52 model.num_filters 2.0 +111 52 training.batch_size 1.0 +111 52 training.label_smoothing 0.013100646181284974 +111 53 model.embedding_dim 1.0 +111 53 model.hidden_dropout_rate 0.15808702462510682 +111 53 model.num_filters 0.0 +111 53 training.batch_size 2.0 +111 53 training.label_smoothing 0.08776320627180934 +111 54 model.embedding_dim 0.0 +111 54 model.hidden_dropout_rate 0.4516110172216592 +111 54 model.num_filters 3.0 +111 54 training.batch_size 1.0 +111 54 training.label_smoothing 0.010584282168381483 +111 55 model.embedding_dim 0.0 +111 55 model.hidden_dropout_rate 0.12413742264487504 +111 55 model.num_filters 0.0 +111 55 training.batch_size 2.0 +111 55 training.label_smoothing 0.014551600367689644 +111 56 model.embedding_dim 1.0 +111 56 model.hidden_dropout_rate 0.3678933184240163 +111 56 model.num_filters 1.0 +111 56 training.batch_size 1.0 +111 56 training.label_smoothing 0.04945522423861751 +111 57 model.embedding_dim 0.0 +111 57 model.hidden_dropout_rate 0.19513960701230693 +111 57 model.num_filters 2.0 +111 57 training.batch_size 2.0 +111 57 training.label_smoothing 0.003968973195972136 +111 58 model.embedding_dim 1.0 +111 58 model.hidden_dropout_rate 0.13840139351923555 +111 58 model.num_filters 0.0 +111 58 training.batch_size 1.0 +111 58 training.label_smoothing 0.009377102059038892 +111 59 model.embedding_dim 2.0 +111 59 model.hidden_dropout_rate 0.2771730997253839 +111 59 model.num_filters 8.0 +111 59 training.batch_size 2.0 +111 59 training.label_smoothing 0.008811483212674436 +111 60 model.embedding_dim 0.0 +111 60 model.hidden_dropout_rate 0.21353652618409558 +111 60 model.num_filters 3.0 +111 60 training.batch_size 1.0 +111 60 training.label_smoothing 0.13059124799925415 +111 61 model.embedding_dim 0.0 +111 61 model.hidden_dropout_rate 0.32669997614536184 +111 61 model.num_filters 1.0 +111 61 training.batch_size 0.0 +111 61 training.label_smoothing 0.9979604580401202 +111 62 model.embedding_dim 0.0 +111 62 model.hidden_dropout_rate 0.34915917979174627 +111 62 model.num_filters 7.0 +111 62 training.batch_size 1.0 +111 62 training.label_smoothing 0.03537722000825721 +111 63 model.embedding_dim 2.0 +111 63 model.hidden_dropout_rate 0.43252210261012336 +111 63 model.num_filters 8.0 +111 63 training.batch_size 0.0 +111 63 training.label_smoothing 0.07340883673886875 +111 64 model.embedding_dim 1.0 +111 64 model.hidden_dropout_rate 0.13039304305529886 +111 64 model.num_filters 1.0 +111 64 training.batch_size 2.0 +111 64 training.label_smoothing 0.00196850907792515 +111 65 model.embedding_dim 2.0 +111 65 model.hidden_dropout_rate 0.22487138848053548 +111 65 model.num_filters 5.0 +111 65 training.batch_size 2.0 +111 65 training.label_smoothing 0.00481371268340712 +111 66 model.embedding_dim 2.0 +111 66 model.hidden_dropout_rate 0.3090556379154068 +111 66 model.num_filters 7.0 +111 66 training.batch_size 2.0 +111 66 training.label_smoothing 0.36283919233425554 +111 67 model.embedding_dim 1.0 +111 67 model.hidden_dropout_rate 0.24241288621633902 +111 67 model.num_filters 7.0 +111 67 training.batch_size 0.0 +111 67 training.label_smoothing 0.0024203311381304307 +111 68 model.embedding_dim 1.0 +111 68 model.hidden_dropout_rate 0.45248134686158237 +111 68 model.num_filters 6.0 +111 68 training.batch_size 2.0 +111 68 training.label_smoothing 0.013364643799548943 +111 69 model.embedding_dim 2.0 +111 69 model.hidden_dropout_rate 0.14778135043827823 +111 69 model.num_filters 6.0 +111 69 training.batch_size 1.0 +111 69 training.label_smoothing 0.004772092927002812 +111 70 model.embedding_dim 1.0 +111 70 model.hidden_dropout_rate 0.23485092241421826 +111 70 model.num_filters 7.0 +111 70 training.batch_size 0.0 +111 70 training.label_smoothing 0.035152914610677356 +111 1 dataset """kinships""" +111 1 model """convkb""" +111 1 loss """crossentropy""" +111 1 regularizer """no""" +111 1 optimizer """adadelta""" +111 1 training_loop """lcwa""" +111 1 evaluator """rankbased""" +111 2 dataset """kinships""" +111 2 model """convkb""" +111 2 loss """crossentropy""" +111 2 regularizer """no""" +111 2 optimizer """adadelta""" +111 2 training_loop """lcwa""" +111 2 evaluator """rankbased""" +111 3 dataset """kinships""" +111 3 model """convkb""" +111 3 loss """crossentropy""" +111 3 regularizer """no""" +111 3 optimizer """adadelta""" +111 3 training_loop """lcwa""" +111 3 evaluator """rankbased""" +111 4 dataset """kinships""" +111 4 model """convkb""" +111 4 loss """crossentropy""" +111 4 regularizer """no""" +111 4 optimizer """adadelta""" +111 4 training_loop """lcwa""" +111 4 evaluator """rankbased""" +111 5 dataset """kinships""" +111 5 model """convkb""" +111 5 loss """crossentropy""" +111 5 regularizer """no""" +111 5 optimizer """adadelta""" +111 5 training_loop """lcwa""" +111 5 evaluator """rankbased""" +111 6 dataset """kinships""" +111 6 model """convkb""" +111 6 loss """crossentropy""" +111 6 regularizer """no""" +111 6 optimizer """adadelta""" +111 6 training_loop """lcwa""" +111 6 evaluator """rankbased""" +111 7 dataset """kinships""" +111 7 model """convkb""" +111 7 loss """crossentropy""" +111 7 regularizer """no""" +111 7 optimizer """adadelta""" +111 7 training_loop """lcwa""" +111 7 evaluator """rankbased""" +111 8 dataset """kinships""" +111 8 model """convkb""" +111 8 loss """crossentropy""" +111 8 regularizer """no""" +111 8 optimizer """adadelta""" +111 8 training_loop """lcwa""" +111 8 evaluator """rankbased""" +111 9 dataset """kinships""" +111 9 model """convkb""" +111 9 loss """crossentropy""" +111 9 regularizer """no""" +111 9 optimizer """adadelta""" +111 9 training_loop """lcwa""" +111 9 evaluator """rankbased""" +111 10 dataset """kinships""" +111 10 model """convkb""" +111 10 loss """crossentropy""" +111 10 regularizer """no""" +111 10 optimizer """adadelta""" +111 10 training_loop """lcwa""" +111 10 evaluator """rankbased""" +111 11 dataset """kinships""" +111 11 model """convkb""" +111 11 loss """crossentropy""" +111 11 regularizer """no""" +111 11 optimizer """adadelta""" +111 11 training_loop """lcwa""" +111 11 evaluator """rankbased""" +111 12 dataset """kinships""" +111 12 model """convkb""" +111 12 loss """crossentropy""" +111 12 regularizer """no""" +111 12 optimizer """adadelta""" +111 12 training_loop """lcwa""" +111 12 evaluator """rankbased""" +111 13 dataset """kinships""" +111 13 model """convkb""" +111 13 loss """crossentropy""" +111 13 regularizer """no""" +111 13 optimizer """adadelta""" +111 13 training_loop """lcwa""" +111 13 evaluator """rankbased""" +111 14 dataset """kinships""" +111 14 model """convkb""" +111 14 loss """crossentropy""" +111 14 regularizer """no""" +111 14 optimizer """adadelta""" +111 14 training_loop """lcwa""" +111 14 evaluator """rankbased""" +111 15 dataset """kinships""" +111 15 model """convkb""" +111 15 loss """crossentropy""" +111 15 regularizer """no""" +111 15 optimizer """adadelta""" +111 15 training_loop """lcwa""" +111 15 evaluator """rankbased""" +111 16 dataset """kinships""" +111 16 model """convkb""" +111 16 loss """crossentropy""" +111 16 regularizer """no""" +111 16 optimizer """adadelta""" +111 16 training_loop """lcwa""" +111 16 evaluator """rankbased""" +111 17 dataset """kinships""" +111 17 model """convkb""" +111 17 loss """crossentropy""" +111 17 regularizer """no""" +111 17 optimizer """adadelta""" +111 17 training_loop """lcwa""" +111 17 evaluator """rankbased""" +111 18 dataset """kinships""" +111 18 model """convkb""" +111 18 loss """crossentropy""" +111 18 regularizer """no""" +111 18 optimizer """adadelta""" +111 18 training_loop """lcwa""" +111 18 evaluator """rankbased""" +111 19 dataset """kinships""" +111 19 model """convkb""" +111 19 loss """crossentropy""" +111 19 regularizer """no""" +111 19 optimizer """adadelta""" +111 19 training_loop """lcwa""" +111 19 evaluator """rankbased""" +111 20 dataset """kinships""" +111 20 model """convkb""" +111 20 loss """crossentropy""" +111 20 regularizer """no""" +111 20 optimizer """adadelta""" +111 20 training_loop """lcwa""" +111 20 evaluator """rankbased""" +111 21 dataset """kinships""" +111 21 model """convkb""" +111 21 loss """crossentropy""" +111 21 regularizer """no""" +111 21 optimizer """adadelta""" +111 21 training_loop """lcwa""" +111 21 evaluator """rankbased""" +111 22 dataset """kinships""" +111 22 model """convkb""" +111 22 loss """crossentropy""" +111 22 regularizer """no""" +111 22 optimizer """adadelta""" +111 22 training_loop """lcwa""" +111 22 evaluator """rankbased""" +111 23 dataset """kinships""" +111 23 model """convkb""" +111 23 loss """crossentropy""" +111 23 regularizer """no""" +111 23 optimizer """adadelta""" +111 23 training_loop """lcwa""" +111 23 evaluator """rankbased""" +111 24 dataset """kinships""" +111 24 model """convkb""" +111 24 loss """crossentropy""" +111 24 regularizer """no""" +111 24 optimizer """adadelta""" +111 24 training_loop """lcwa""" +111 24 evaluator """rankbased""" +111 25 dataset """kinships""" +111 25 model """convkb""" +111 25 loss """crossentropy""" +111 25 regularizer """no""" +111 25 optimizer """adadelta""" +111 25 training_loop """lcwa""" +111 25 evaluator """rankbased""" +111 26 dataset """kinships""" +111 26 model """convkb""" +111 26 loss """crossentropy""" +111 26 regularizer """no""" +111 26 optimizer """adadelta""" +111 26 training_loop """lcwa""" +111 26 evaluator """rankbased""" +111 27 dataset """kinships""" +111 27 model """convkb""" +111 27 loss """crossentropy""" +111 27 regularizer """no""" +111 27 optimizer """adadelta""" +111 27 training_loop """lcwa""" +111 27 evaluator """rankbased""" +111 28 dataset """kinships""" +111 28 model """convkb""" +111 28 loss """crossentropy""" +111 28 regularizer """no""" +111 28 optimizer """adadelta""" +111 28 training_loop """lcwa""" +111 28 evaluator """rankbased""" +111 29 dataset """kinships""" +111 29 model """convkb""" +111 29 loss """crossentropy""" +111 29 regularizer """no""" +111 29 optimizer """adadelta""" +111 29 training_loop """lcwa""" +111 29 evaluator """rankbased""" +111 30 dataset """kinships""" +111 30 model """convkb""" +111 30 loss """crossentropy""" +111 30 regularizer """no""" +111 30 optimizer """adadelta""" +111 30 training_loop """lcwa""" +111 30 evaluator """rankbased""" +111 31 dataset """kinships""" +111 31 model """convkb""" +111 31 loss """crossentropy""" +111 31 regularizer """no""" +111 31 optimizer """adadelta""" +111 31 training_loop """lcwa""" +111 31 evaluator """rankbased""" +111 32 dataset """kinships""" +111 32 model """convkb""" +111 32 loss """crossentropy""" +111 32 regularizer """no""" +111 32 optimizer """adadelta""" +111 32 training_loop """lcwa""" +111 32 evaluator """rankbased""" +111 33 dataset """kinships""" +111 33 model """convkb""" +111 33 loss """crossentropy""" +111 33 regularizer """no""" +111 33 optimizer """adadelta""" +111 33 training_loop """lcwa""" +111 33 evaluator """rankbased""" +111 34 dataset """kinships""" +111 34 model """convkb""" +111 34 loss """crossentropy""" +111 34 regularizer """no""" +111 34 optimizer """adadelta""" +111 34 training_loop """lcwa""" +111 34 evaluator """rankbased""" +111 35 dataset """kinships""" +111 35 model """convkb""" +111 35 loss """crossentropy""" +111 35 regularizer """no""" +111 35 optimizer """adadelta""" +111 35 training_loop """lcwa""" +111 35 evaluator """rankbased""" +111 36 dataset """kinships""" +111 36 model """convkb""" +111 36 loss """crossentropy""" +111 36 regularizer """no""" +111 36 optimizer """adadelta""" +111 36 training_loop """lcwa""" +111 36 evaluator """rankbased""" +111 37 dataset """kinships""" +111 37 model """convkb""" +111 37 loss """crossentropy""" +111 37 regularizer """no""" +111 37 optimizer """adadelta""" +111 37 training_loop """lcwa""" +111 37 evaluator """rankbased""" +111 38 dataset """kinships""" +111 38 model """convkb""" +111 38 loss """crossentropy""" +111 38 regularizer """no""" +111 38 optimizer """adadelta""" +111 38 training_loop """lcwa""" +111 38 evaluator """rankbased""" +111 39 dataset """kinships""" +111 39 model """convkb""" +111 39 loss """crossentropy""" +111 39 regularizer """no""" +111 39 optimizer """adadelta""" +111 39 training_loop """lcwa""" +111 39 evaluator """rankbased""" +111 40 dataset """kinships""" +111 40 model """convkb""" +111 40 loss """crossentropy""" +111 40 regularizer """no""" +111 40 optimizer """adadelta""" +111 40 training_loop """lcwa""" +111 40 evaluator """rankbased""" +111 41 dataset """kinships""" +111 41 model """convkb""" +111 41 loss """crossentropy""" +111 41 regularizer """no""" +111 41 optimizer """adadelta""" +111 41 training_loop """lcwa""" +111 41 evaluator """rankbased""" +111 42 dataset """kinships""" +111 42 model """convkb""" +111 42 loss """crossentropy""" +111 42 regularizer """no""" +111 42 optimizer """adadelta""" +111 42 training_loop """lcwa""" +111 42 evaluator """rankbased""" +111 43 dataset """kinships""" +111 43 model """convkb""" +111 43 loss """crossentropy""" +111 43 regularizer """no""" +111 43 optimizer """adadelta""" +111 43 training_loop """lcwa""" +111 43 evaluator """rankbased""" +111 44 dataset """kinships""" +111 44 model """convkb""" +111 44 loss """crossentropy""" +111 44 regularizer """no""" +111 44 optimizer """adadelta""" +111 44 training_loop """lcwa""" +111 44 evaluator """rankbased""" +111 45 dataset """kinships""" +111 45 model """convkb""" +111 45 loss """crossentropy""" +111 45 regularizer """no""" +111 45 optimizer """adadelta""" +111 45 training_loop """lcwa""" +111 45 evaluator """rankbased""" +111 46 dataset """kinships""" +111 46 model """convkb""" +111 46 loss """crossentropy""" +111 46 regularizer """no""" +111 46 optimizer """adadelta""" +111 46 training_loop """lcwa""" +111 46 evaluator """rankbased""" +111 47 dataset """kinships""" +111 47 model """convkb""" +111 47 loss """crossentropy""" +111 47 regularizer """no""" +111 47 optimizer """adadelta""" +111 47 training_loop """lcwa""" +111 47 evaluator """rankbased""" +111 48 dataset """kinships""" +111 48 model """convkb""" +111 48 loss """crossentropy""" +111 48 regularizer """no""" +111 48 optimizer """adadelta""" +111 48 training_loop """lcwa""" +111 48 evaluator """rankbased""" +111 49 dataset """kinships""" +111 49 model """convkb""" +111 49 loss """crossentropy""" +111 49 regularizer """no""" +111 49 optimizer """adadelta""" +111 49 training_loop """lcwa""" +111 49 evaluator """rankbased""" +111 50 dataset """kinships""" +111 50 model """convkb""" +111 50 loss """crossentropy""" +111 50 regularizer """no""" +111 50 optimizer """adadelta""" +111 50 training_loop """lcwa""" +111 50 evaluator """rankbased""" +111 51 dataset """kinships""" +111 51 model """convkb""" +111 51 loss """crossentropy""" +111 51 regularizer """no""" +111 51 optimizer """adadelta""" +111 51 training_loop """lcwa""" +111 51 evaluator """rankbased""" +111 52 dataset """kinships""" +111 52 model """convkb""" +111 52 loss """crossentropy""" +111 52 regularizer """no""" +111 52 optimizer """adadelta""" +111 52 training_loop """lcwa""" +111 52 evaluator """rankbased""" +111 53 dataset """kinships""" +111 53 model """convkb""" +111 53 loss """crossentropy""" +111 53 regularizer """no""" +111 53 optimizer """adadelta""" +111 53 training_loop """lcwa""" +111 53 evaluator """rankbased""" +111 54 dataset """kinships""" +111 54 model """convkb""" +111 54 loss """crossentropy""" +111 54 regularizer """no""" +111 54 optimizer """adadelta""" +111 54 training_loop """lcwa""" +111 54 evaluator """rankbased""" +111 55 dataset """kinships""" +111 55 model """convkb""" +111 55 loss """crossentropy""" +111 55 regularizer """no""" +111 55 optimizer """adadelta""" +111 55 training_loop """lcwa""" +111 55 evaluator """rankbased""" +111 56 dataset """kinships""" +111 56 model """convkb""" +111 56 loss """crossentropy""" +111 56 regularizer """no""" +111 56 optimizer """adadelta""" +111 56 training_loop """lcwa""" +111 56 evaluator """rankbased""" +111 57 dataset """kinships""" +111 57 model """convkb""" +111 57 loss """crossentropy""" +111 57 regularizer """no""" +111 57 optimizer """adadelta""" +111 57 training_loop """lcwa""" +111 57 evaluator """rankbased""" +111 58 dataset """kinships""" +111 58 model """convkb""" +111 58 loss """crossentropy""" +111 58 regularizer """no""" +111 58 optimizer """adadelta""" +111 58 training_loop """lcwa""" +111 58 evaluator """rankbased""" +111 59 dataset """kinships""" +111 59 model """convkb""" +111 59 loss """crossentropy""" +111 59 regularizer """no""" +111 59 optimizer """adadelta""" +111 59 training_loop """lcwa""" +111 59 evaluator """rankbased""" +111 60 dataset """kinships""" +111 60 model """convkb""" +111 60 loss """crossentropy""" +111 60 regularizer """no""" +111 60 optimizer """adadelta""" +111 60 training_loop """lcwa""" +111 60 evaluator """rankbased""" +111 61 dataset """kinships""" +111 61 model """convkb""" +111 61 loss """crossentropy""" +111 61 regularizer """no""" +111 61 optimizer """adadelta""" +111 61 training_loop """lcwa""" +111 61 evaluator """rankbased""" +111 62 dataset """kinships""" +111 62 model """convkb""" +111 62 loss """crossentropy""" +111 62 regularizer """no""" +111 62 optimizer """adadelta""" +111 62 training_loop """lcwa""" +111 62 evaluator """rankbased""" +111 63 dataset """kinships""" +111 63 model """convkb""" +111 63 loss """crossentropy""" +111 63 regularizer """no""" +111 63 optimizer """adadelta""" +111 63 training_loop """lcwa""" +111 63 evaluator """rankbased""" +111 64 dataset """kinships""" +111 64 model """convkb""" +111 64 loss """crossentropy""" +111 64 regularizer """no""" +111 64 optimizer """adadelta""" +111 64 training_loop """lcwa""" +111 64 evaluator """rankbased""" +111 65 dataset """kinships""" +111 65 model """convkb""" +111 65 loss """crossentropy""" +111 65 regularizer """no""" +111 65 optimizer """adadelta""" +111 65 training_loop """lcwa""" +111 65 evaluator """rankbased""" +111 66 dataset """kinships""" +111 66 model """convkb""" +111 66 loss """crossentropy""" +111 66 regularizer """no""" +111 66 optimizer """adadelta""" +111 66 training_loop """lcwa""" +111 66 evaluator """rankbased""" +111 67 dataset """kinships""" +111 67 model """convkb""" +111 67 loss """crossentropy""" +111 67 regularizer """no""" +111 67 optimizer """adadelta""" +111 67 training_loop """lcwa""" +111 67 evaluator """rankbased""" +111 68 dataset """kinships""" +111 68 model """convkb""" +111 68 loss """crossentropy""" +111 68 regularizer """no""" +111 68 optimizer """adadelta""" +111 68 training_loop """lcwa""" +111 68 evaluator """rankbased""" +111 69 dataset """kinships""" +111 69 model """convkb""" +111 69 loss """crossentropy""" +111 69 regularizer """no""" +111 69 optimizer """adadelta""" +111 69 training_loop """lcwa""" +111 69 evaluator """rankbased""" +111 70 dataset """kinships""" +111 70 model """convkb""" +111 70 loss """crossentropy""" +111 70 regularizer """no""" +111 70 optimizer """adadelta""" +111 70 training_loop """lcwa""" +111 70 evaluator """rankbased""" +112 1 model.embedding_dim 0.0 +112 1 model.hidden_dropout_rate 0.1652611129981783 +112 1 model.num_filters 2.0 +112 1 negative_sampler.num_negs_per_pos 29.0 +112 1 training.batch_size 2.0 +112 2 model.embedding_dim 0.0 +112 2 model.hidden_dropout_rate 0.22703636428228116 +112 2 model.num_filters 0.0 +112 2 negative_sampler.num_negs_per_pos 23.0 +112 2 training.batch_size 0.0 +112 3 model.embedding_dim 2.0 +112 3 model.hidden_dropout_rate 0.20598767956541222 +112 3 model.num_filters 8.0 +112 3 negative_sampler.num_negs_per_pos 7.0 +112 3 training.batch_size 2.0 +112 4 model.embedding_dim 2.0 +112 4 model.hidden_dropout_rate 0.4081294783362813 +112 4 model.num_filters 6.0 +112 4 negative_sampler.num_negs_per_pos 2.0 +112 4 training.batch_size 1.0 +112 5 model.embedding_dim 1.0 +112 5 model.hidden_dropout_rate 0.25517641627449617 +112 5 model.num_filters 3.0 +112 5 negative_sampler.num_negs_per_pos 6.0 +112 5 training.batch_size 1.0 +112 6 model.embedding_dim 2.0 +112 6 model.hidden_dropout_rate 0.21388536663837382 +112 6 model.num_filters 6.0 +112 6 negative_sampler.num_negs_per_pos 53.0 +112 6 training.batch_size 2.0 +112 7 model.embedding_dim 0.0 +112 7 model.hidden_dropout_rate 0.19763677556649983 +112 7 model.num_filters 8.0 +112 7 negative_sampler.num_negs_per_pos 96.0 +112 7 training.batch_size 0.0 +112 8 model.embedding_dim 1.0 +112 8 model.hidden_dropout_rate 0.23271711480395285 +112 8 model.num_filters 2.0 +112 8 negative_sampler.num_negs_per_pos 11.0 +112 8 training.batch_size 0.0 +112 9 model.embedding_dim 1.0 +112 9 model.hidden_dropout_rate 0.43718671458735225 +112 9 model.num_filters 2.0 +112 9 negative_sampler.num_negs_per_pos 42.0 +112 9 training.batch_size 2.0 +112 10 model.embedding_dim 2.0 +112 10 model.hidden_dropout_rate 0.17323833603894384 +112 10 model.num_filters 1.0 +112 10 negative_sampler.num_negs_per_pos 69.0 +112 10 training.batch_size 0.0 +112 11 model.embedding_dim 1.0 +112 11 model.hidden_dropout_rate 0.4570358495598913 +112 11 model.num_filters 3.0 +112 11 negative_sampler.num_negs_per_pos 63.0 +112 11 training.batch_size 0.0 +112 12 model.embedding_dim 1.0 +112 12 model.hidden_dropout_rate 0.180639422947519 +112 12 model.num_filters 9.0 +112 12 negative_sampler.num_negs_per_pos 90.0 +112 12 training.batch_size 0.0 +112 13 model.embedding_dim 2.0 +112 13 model.hidden_dropout_rate 0.24062253802396652 +112 13 model.num_filters 9.0 +112 13 negative_sampler.num_negs_per_pos 56.0 +112 13 training.batch_size 1.0 +112 14 model.embedding_dim 0.0 +112 14 model.hidden_dropout_rate 0.18816484687662777 +112 14 model.num_filters 5.0 +112 14 negative_sampler.num_negs_per_pos 61.0 +112 14 training.batch_size 1.0 +112 15 model.embedding_dim 2.0 +112 15 model.hidden_dropout_rate 0.3833290133495345 +112 15 model.num_filters 7.0 +112 15 negative_sampler.num_negs_per_pos 45.0 +112 15 training.batch_size 2.0 +112 16 model.embedding_dim 0.0 +112 16 model.hidden_dropout_rate 0.34225864982166876 +112 16 model.num_filters 1.0 +112 16 negative_sampler.num_negs_per_pos 80.0 +112 16 training.batch_size 2.0 +112 17 model.embedding_dim 1.0 +112 17 model.hidden_dropout_rate 0.10027145453673475 +112 17 model.num_filters 8.0 +112 17 negative_sampler.num_negs_per_pos 3.0 +112 17 training.batch_size 1.0 +112 18 model.embedding_dim 1.0 +112 18 model.hidden_dropout_rate 0.144432630213312 +112 18 model.num_filters 1.0 +112 18 negative_sampler.num_negs_per_pos 30.0 +112 18 training.batch_size 2.0 +112 19 model.embedding_dim 2.0 +112 19 model.hidden_dropout_rate 0.44640243482840025 +112 19 model.num_filters 4.0 +112 19 negative_sampler.num_negs_per_pos 7.0 +112 19 training.batch_size 2.0 +112 20 model.embedding_dim 1.0 +112 20 model.hidden_dropout_rate 0.317936569195843 +112 20 model.num_filters 8.0 +112 20 negative_sampler.num_negs_per_pos 56.0 +112 20 training.batch_size 2.0 +112 21 model.embedding_dim 0.0 +112 21 model.hidden_dropout_rate 0.13618163554259977 +112 21 model.num_filters 5.0 +112 21 negative_sampler.num_negs_per_pos 81.0 +112 21 training.batch_size 1.0 +112 22 model.embedding_dim 2.0 +112 22 model.hidden_dropout_rate 0.4633786256241915 +112 22 model.num_filters 1.0 +112 22 negative_sampler.num_negs_per_pos 80.0 +112 22 training.batch_size 2.0 +112 23 model.embedding_dim 1.0 +112 23 model.hidden_dropout_rate 0.1753664394779688 +112 23 model.num_filters 7.0 +112 23 negative_sampler.num_negs_per_pos 10.0 +112 23 training.batch_size 0.0 +112 24 model.embedding_dim 2.0 +112 24 model.hidden_dropout_rate 0.4345864664701349 +112 24 model.num_filters 3.0 +112 24 negative_sampler.num_negs_per_pos 36.0 +112 24 training.batch_size 0.0 +112 25 model.embedding_dim 0.0 +112 25 model.hidden_dropout_rate 0.2804404608924318 +112 25 model.num_filters 8.0 +112 25 negative_sampler.num_negs_per_pos 77.0 +112 25 training.batch_size 1.0 +112 26 model.embedding_dim 1.0 +112 26 model.hidden_dropout_rate 0.24646928905990204 +112 26 model.num_filters 6.0 +112 26 negative_sampler.num_negs_per_pos 34.0 +112 26 training.batch_size 0.0 +112 27 model.embedding_dim 0.0 +112 27 model.hidden_dropout_rate 0.1742486008529375 +112 27 model.num_filters 0.0 +112 27 negative_sampler.num_negs_per_pos 70.0 +112 27 training.batch_size 2.0 +112 28 model.embedding_dim 1.0 +112 28 model.hidden_dropout_rate 0.44936262581841757 +112 28 model.num_filters 1.0 +112 28 negative_sampler.num_negs_per_pos 91.0 +112 28 training.batch_size 1.0 +112 29 model.embedding_dim 0.0 +112 29 model.hidden_dropout_rate 0.12839116488555147 +112 29 model.num_filters 8.0 +112 29 negative_sampler.num_negs_per_pos 86.0 +112 29 training.batch_size 0.0 +112 30 model.embedding_dim 2.0 +112 30 model.hidden_dropout_rate 0.2849309087915143 +112 30 model.num_filters 7.0 +112 30 negative_sampler.num_negs_per_pos 84.0 +112 30 training.batch_size 1.0 +112 1 dataset """kinships""" +112 1 model """convkb""" +112 1 loss """bceaftersigmoid""" +112 1 regularizer """no""" +112 1 optimizer """adadelta""" +112 1 training_loop """owa""" +112 1 negative_sampler """basic""" +112 1 evaluator """rankbased""" +112 2 dataset """kinships""" +112 2 model """convkb""" +112 2 loss """bceaftersigmoid""" +112 2 regularizer """no""" +112 2 optimizer """adadelta""" +112 2 training_loop """owa""" +112 2 negative_sampler """basic""" +112 2 evaluator """rankbased""" +112 3 dataset """kinships""" +112 3 model """convkb""" +112 3 loss """bceaftersigmoid""" +112 3 regularizer """no""" +112 3 optimizer """adadelta""" +112 3 training_loop """owa""" +112 3 negative_sampler """basic""" +112 3 evaluator """rankbased""" +112 4 dataset """kinships""" +112 4 model """convkb""" +112 4 loss """bceaftersigmoid""" +112 4 regularizer """no""" +112 4 optimizer """adadelta""" +112 4 training_loop """owa""" +112 4 negative_sampler """basic""" +112 4 evaluator """rankbased""" +112 5 dataset """kinships""" +112 5 model """convkb""" +112 5 loss """bceaftersigmoid""" +112 5 regularizer """no""" +112 5 optimizer """adadelta""" +112 5 training_loop """owa""" +112 5 negative_sampler """basic""" +112 5 evaluator """rankbased""" +112 6 dataset """kinships""" +112 6 model """convkb""" +112 6 loss """bceaftersigmoid""" +112 6 regularizer """no""" +112 6 optimizer """adadelta""" +112 6 training_loop """owa""" +112 6 negative_sampler """basic""" +112 6 evaluator """rankbased""" +112 7 dataset """kinships""" +112 7 model """convkb""" +112 7 loss """bceaftersigmoid""" +112 7 regularizer """no""" +112 7 optimizer """adadelta""" +112 7 training_loop """owa""" +112 7 negative_sampler """basic""" +112 7 evaluator """rankbased""" +112 8 dataset """kinships""" +112 8 model """convkb""" +112 8 loss """bceaftersigmoid""" +112 8 regularizer """no""" +112 8 optimizer """adadelta""" +112 8 training_loop """owa""" +112 8 negative_sampler """basic""" +112 8 evaluator """rankbased""" +112 9 dataset """kinships""" +112 9 model """convkb""" +112 9 loss """bceaftersigmoid""" +112 9 regularizer """no""" +112 9 optimizer """adadelta""" +112 9 training_loop """owa""" +112 9 negative_sampler """basic""" +112 9 evaluator """rankbased""" +112 10 dataset """kinships""" +112 10 model """convkb""" +112 10 loss """bceaftersigmoid""" +112 10 regularizer """no""" +112 10 optimizer """adadelta""" +112 10 training_loop """owa""" +112 10 negative_sampler """basic""" +112 10 evaluator """rankbased""" +112 11 dataset """kinships""" +112 11 model """convkb""" +112 11 loss """bceaftersigmoid""" +112 11 regularizer """no""" +112 11 optimizer """adadelta""" +112 11 training_loop """owa""" +112 11 negative_sampler """basic""" +112 11 evaluator """rankbased""" +112 12 dataset """kinships""" +112 12 model """convkb""" +112 12 loss """bceaftersigmoid""" +112 12 regularizer """no""" +112 12 optimizer """adadelta""" +112 12 training_loop """owa""" +112 12 negative_sampler """basic""" +112 12 evaluator """rankbased""" +112 13 dataset """kinships""" +112 13 model """convkb""" +112 13 loss """bceaftersigmoid""" +112 13 regularizer """no""" +112 13 optimizer """adadelta""" +112 13 training_loop """owa""" +112 13 negative_sampler """basic""" +112 13 evaluator """rankbased""" +112 14 dataset """kinships""" +112 14 model """convkb""" +112 14 loss """bceaftersigmoid""" +112 14 regularizer """no""" +112 14 optimizer """adadelta""" +112 14 training_loop """owa""" +112 14 negative_sampler """basic""" +112 14 evaluator """rankbased""" +112 15 dataset """kinships""" +112 15 model """convkb""" +112 15 loss """bceaftersigmoid""" +112 15 regularizer """no""" +112 15 optimizer """adadelta""" +112 15 training_loop """owa""" +112 15 negative_sampler """basic""" +112 15 evaluator """rankbased""" +112 16 dataset """kinships""" +112 16 model """convkb""" +112 16 loss """bceaftersigmoid""" +112 16 regularizer """no""" +112 16 optimizer """adadelta""" +112 16 training_loop """owa""" +112 16 negative_sampler """basic""" +112 16 evaluator """rankbased""" +112 17 dataset """kinships""" +112 17 model """convkb""" +112 17 loss """bceaftersigmoid""" +112 17 regularizer """no""" +112 17 optimizer """adadelta""" +112 17 training_loop """owa""" +112 17 negative_sampler """basic""" +112 17 evaluator """rankbased""" +112 18 dataset """kinships""" +112 18 model """convkb""" +112 18 loss """bceaftersigmoid""" +112 18 regularizer """no""" +112 18 optimizer """adadelta""" +112 18 training_loop """owa""" +112 18 negative_sampler """basic""" +112 18 evaluator """rankbased""" +112 19 dataset """kinships""" +112 19 model """convkb""" +112 19 loss """bceaftersigmoid""" +112 19 regularizer """no""" +112 19 optimizer """adadelta""" +112 19 training_loop """owa""" +112 19 negative_sampler """basic""" +112 19 evaluator """rankbased""" +112 20 dataset """kinships""" +112 20 model """convkb""" +112 20 loss """bceaftersigmoid""" +112 20 regularizer """no""" +112 20 optimizer """adadelta""" +112 20 training_loop """owa""" +112 20 negative_sampler """basic""" +112 20 evaluator """rankbased""" +112 21 dataset """kinships""" +112 21 model """convkb""" +112 21 loss """bceaftersigmoid""" +112 21 regularizer """no""" +112 21 optimizer """adadelta""" +112 21 training_loop """owa""" +112 21 negative_sampler """basic""" +112 21 evaluator """rankbased""" +112 22 dataset """kinships""" +112 22 model """convkb""" +112 22 loss """bceaftersigmoid""" +112 22 regularizer """no""" +112 22 optimizer """adadelta""" +112 22 training_loop """owa""" +112 22 negative_sampler """basic""" +112 22 evaluator """rankbased""" +112 23 dataset """kinships""" +112 23 model """convkb""" +112 23 loss """bceaftersigmoid""" +112 23 regularizer """no""" +112 23 optimizer """adadelta""" +112 23 training_loop """owa""" +112 23 negative_sampler """basic""" +112 23 evaluator """rankbased""" +112 24 dataset """kinships""" +112 24 model """convkb""" +112 24 loss """bceaftersigmoid""" +112 24 regularizer """no""" +112 24 optimizer """adadelta""" +112 24 training_loop """owa""" +112 24 negative_sampler """basic""" +112 24 evaluator """rankbased""" +112 25 dataset """kinships""" +112 25 model """convkb""" +112 25 loss """bceaftersigmoid""" +112 25 regularizer """no""" +112 25 optimizer """adadelta""" +112 25 training_loop """owa""" +112 25 negative_sampler """basic""" +112 25 evaluator """rankbased""" +112 26 dataset """kinships""" +112 26 model """convkb""" +112 26 loss """bceaftersigmoid""" +112 26 regularizer """no""" +112 26 optimizer """adadelta""" +112 26 training_loop """owa""" +112 26 negative_sampler """basic""" +112 26 evaluator """rankbased""" +112 27 dataset """kinships""" +112 27 model """convkb""" +112 27 loss """bceaftersigmoid""" +112 27 regularizer """no""" +112 27 optimizer """adadelta""" +112 27 training_loop """owa""" +112 27 negative_sampler """basic""" +112 27 evaluator """rankbased""" +112 28 dataset """kinships""" +112 28 model """convkb""" +112 28 loss """bceaftersigmoid""" +112 28 regularizer """no""" +112 28 optimizer """adadelta""" +112 28 training_loop """owa""" +112 28 negative_sampler """basic""" +112 28 evaluator """rankbased""" +112 29 dataset """kinships""" +112 29 model """convkb""" +112 29 loss """bceaftersigmoid""" +112 29 regularizer """no""" +112 29 optimizer """adadelta""" +112 29 training_loop """owa""" +112 29 negative_sampler """basic""" +112 29 evaluator """rankbased""" +112 30 dataset """kinships""" +112 30 model """convkb""" +112 30 loss """bceaftersigmoid""" +112 30 regularizer """no""" +112 30 optimizer """adadelta""" +112 30 training_loop """owa""" +112 30 negative_sampler """basic""" +112 30 evaluator """rankbased""" +113 1 model.embedding_dim 2.0 +113 1 model.hidden_dropout_rate 0.16063109128658312 +113 1 model.num_filters 9.0 +113 1 negative_sampler.num_negs_per_pos 83.0 +113 1 training.batch_size 2.0 +113 2 model.embedding_dim 2.0 +113 2 model.hidden_dropout_rate 0.15328734626604823 +113 2 model.num_filters 7.0 +113 2 negative_sampler.num_negs_per_pos 58.0 +113 2 training.batch_size 1.0 +113 3 model.embedding_dim 1.0 +113 3 model.hidden_dropout_rate 0.4693018011307607 +113 3 model.num_filters 4.0 +113 3 negative_sampler.num_negs_per_pos 57.0 +113 3 training.batch_size 1.0 +113 4 model.embedding_dim 0.0 +113 4 model.hidden_dropout_rate 0.28231584635743273 +113 4 model.num_filters 2.0 +113 4 negative_sampler.num_negs_per_pos 92.0 +113 4 training.batch_size 1.0 +113 5 model.embedding_dim 2.0 +113 5 model.hidden_dropout_rate 0.3736674073638486 +113 5 model.num_filters 8.0 +113 5 negative_sampler.num_negs_per_pos 57.0 +113 5 training.batch_size 0.0 +113 6 model.embedding_dim 1.0 +113 6 model.hidden_dropout_rate 0.18421860792436276 +113 6 model.num_filters 8.0 +113 6 negative_sampler.num_negs_per_pos 63.0 +113 6 training.batch_size 1.0 +113 7 model.embedding_dim 2.0 +113 7 model.hidden_dropout_rate 0.484005355595048 +113 7 model.num_filters 9.0 +113 7 negative_sampler.num_negs_per_pos 59.0 +113 7 training.batch_size 1.0 +113 8 model.embedding_dim 0.0 +113 8 model.hidden_dropout_rate 0.46644978627062994 +113 8 model.num_filters 8.0 +113 8 negative_sampler.num_negs_per_pos 39.0 +113 8 training.batch_size 2.0 +113 9 model.embedding_dim 1.0 +113 9 model.hidden_dropout_rate 0.4965534909369943 +113 9 model.num_filters 8.0 +113 9 negative_sampler.num_negs_per_pos 35.0 +113 9 training.batch_size 1.0 +113 10 model.embedding_dim 0.0 +113 10 model.hidden_dropout_rate 0.4654199902099603 +113 10 model.num_filters 4.0 +113 10 negative_sampler.num_negs_per_pos 19.0 +113 10 training.batch_size 2.0 +113 11 model.embedding_dim 1.0 +113 11 model.hidden_dropout_rate 0.3802221751093443 +113 11 model.num_filters 0.0 +113 11 negative_sampler.num_negs_per_pos 44.0 +113 11 training.batch_size 1.0 +113 12 model.embedding_dim 0.0 +113 12 model.hidden_dropout_rate 0.14372940503963694 +113 12 model.num_filters 2.0 +113 12 negative_sampler.num_negs_per_pos 50.0 +113 12 training.batch_size 2.0 +113 13 model.embedding_dim 2.0 +113 13 model.hidden_dropout_rate 0.16674715711313706 +113 13 model.num_filters 7.0 +113 13 negative_sampler.num_negs_per_pos 9.0 +113 13 training.batch_size 1.0 +113 14 model.embedding_dim 1.0 +113 14 model.hidden_dropout_rate 0.1087637264170867 +113 14 model.num_filters 7.0 +113 14 negative_sampler.num_negs_per_pos 66.0 +113 14 training.batch_size 1.0 +113 15 model.embedding_dim 2.0 +113 15 model.hidden_dropout_rate 0.2373322176424323 +113 15 model.num_filters 1.0 +113 15 negative_sampler.num_negs_per_pos 81.0 +113 15 training.batch_size 0.0 +113 16 model.embedding_dim 0.0 +113 16 model.hidden_dropout_rate 0.31180065038446736 +113 16 model.num_filters 3.0 +113 16 negative_sampler.num_negs_per_pos 73.0 +113 16 training.batch_size 1.0 +113 17 model.embedding_dim 0.0 +113 17 model.hidden_dropout_rate 0.1052525519475359 +113 17 model.num_filters 5.0 +113 17 negative_sampler.num_negs_per_pos 26.0 +113 17 training.batch_size 2.0 +113 18 model.embedding_dim 1.0 +113 18 model.hidden_dropout_rate 0.38607552924325705 +113 18 model.num_filters 9.0 +113 18 negative_sampler.num_negs_per_pos 39.0 +113 18 training.batch_size 0.0 +113 19 model.embedding_dim 0.0 +113 19 model.hidden_dropout_rate 0.3614173833293237 +113 19 model.num_filters 1.0 +113 19 negative_sampler.num_negs_per_pos 71.0 +113 19 training.batch_size 0.0 +113 20 model.embedding_dim 0.0 +113 20 model.hidden_dropout_rate 0.24790471748716453 +113 20 model.num_filters 6.0 +113 20 negative_sampler.num_negs_per_pos 65.0 +113 20 training.batch_size 1.0 +113 1 dataset """kinships""" +113 1 model """convkb""" +113 1 loss """softplus""" +113 1 regularizer """no""" +113 1 optimizer """adadelta""" +113 1 training_loop """owa""" +113 1 negative_sampler """basic""" +113 1 evaluator """rankbased""" +113 2 dataset """kinships""" +113 2 model """convkb""" +113 2 loss """softplus""" +113 2 regularizer """no""" +113 2 optimizer """adadelta""" +113 2 training_loop """owa""" +113 2 negative_sampler """basic""" +113 2 evaluator """rankbased""" +113 3 dataset """kinships""" +113 3 model """convkb""" +113 3 loss """softplus""" +113 3 regularizer """no""" +113 3 optimizer """adadelta""" +113 3 training_loop """owa""" +113 3 negative_sampler """basic""" +113 3 evaluator """rankbased""" +113 4 dataset """kinships""" +113 4 model """convkb""" +113 4 loss """softplus""" +113 4 regularizer """no""" +113 4 optimizer """adadelta""" +113 4 training_loop """owa""" +113 4 negative_sampler """basic""" +113 4 evaluator """rankbased""" +113 5 dataset """kinships""" +113 5 model """convkb""" +113 5 loss """softplus""" +113 5 regularizer """no""" +113 5 optimizer """adadelta""" +113 5 training_loop """owa""" +113 5 negative_sampler """basic""" +113 5 evaluator """rankbased""" +113 6 dataset """kinships""" +113 6 model """convkb""" +113 6 loss """softplus""" +113 6 regularizer """no""" +113 6 optimizer """adadelta""" +113 6 training_loop """owa""" +113 6 negative_sampler """basic""" +113 6 evaluator """rankbased""" +113 7 dataset """kinships""" +113 7 model """convkb""" +113 7 loss """softplus""" +113 7 regularizer """no""" +113 7 optimizer """adadelta""" +113 7 training_loop """owa""" +113 7 negative_sampler """basic""" +113 7 evaluator """rankbased""" +113 8 dataset """kinships""" +113 8 model """convkb""" +113 8 loss """softplus""" +113 8 regularizer """no""" +113 8 optimizer """adadelta""" +113 8 training_loop """owa""" +113 8 negative_sampler """basic""" +113 8 evaluator """rankbased""" +113 9 dataset """kinships""" +113 9 model """convkb""" +113 9 loss """softplus""" +113 9 regularizer """no""" +113 9 optimizer """adadelta""" +113 9 training_loop """owa""" +113 9 negative_sampler """basic""" +113 9 evaluator """rankbased""" +113 10 dataset """kinships""" +113 10 model """convkb""" +113 10 loss """softplus""" +113 10 regularizer """no""" +113 10 optimizer """adadelta""" +113 10 training_loop """owa""" +113 10 negative_sampler """basic""" +113 10 evaluator """rankbased""" +113 11 dataset """kinships""" +113 11 model """convkb""" +113 11 loss """softplus""" +113 11 regularizer """no""" +113 11 optimizer """adadelta""" +113 11 training_loop """owa""" +113 11 negative_sampler """basic""" +113 11 evaluator """rankbased""" +113 12 dataset """kinships""" +113 12 model """convkb""" +113 12 loss """softplus""" +113 12 regularizer """no""" +113 12 optimizer """adadelta""" +113 12 training_loop """owa""" +113 12 negative_sampler """basic""" +113 12 evaluator """rankbased""" +113 13 dataset """kinships""" +113 13 model """convkb""" +113 13 loss """softplus""" +113 13 regularizer """no""" +113 13 optimizer """adadelta""" +113 13 training_loop """owa""" +113 13 negative_sampler """basic""" +113 13 evaluator """rankbased""" +113 14 dataset """kinships""" +113 14 model """convkb""" +113 14 loss """softplus""" +113 14 regularizer """no""" +113 14 optimizer """adadelta""" +113 14 training_loop """owa""" +113 14 negative_sampler """basic""" +113 14 evaluator """rankbased""" +113 15 dataset """kinships""" +113 15 model """convkb""" +113 15 loss """softplus""" +113 15 regularizer """no""" +113 15 optimizer """adadelta""" +113 15 training_loop """owa""" +113 15 negative_sampler """basic""" +113 15 evaluator """rankbased""" +113 16 dataset """kinships""" +113 16 model """convkb""" +113 16 loss """softplus""" +113 16 regularizer """no""" +113 16 optimizer """adadelta""" +113 16 training_loop """owa""" +113 16 negative_sampler """basic""" +113 16 evaluator """rankbased""" +113 17 dataset """kinships""" +113 17 model """convkb""" +113 17 loss """softplus""" +113 17 regularizer """no""" +113 17 optimizer """adadelta""" +113 17 training_loop """owa""" +113 17 negative_sampler """basic""" +113 17 evaluator """rankbased""" +113 18 dataset """kinships""" +113 18 model """convkb""" +113 18 loss """softplus""" +113 18 regularizer """no""" +113 18 optimizer """adadelta""" +113 18 training_loop """owa""" +113 18 negative_sampler """basic""" +113 18 evaluator """rankbased""" +113 19 dataset """kinships""" +113 19 model """convkb""" +113 19 loss """softplus""" +113 19 regularizer """no""" +113 19 optimizer """adadelta""" +113 19 training_loop """owa""" +113 19 negative_sampler """basic""" +113 19 evaluator """rankbased""" +113 20 dataset """kinships""" +113 20 model """convkb""" +113 20 loss """softplus""" +113 20 regularizer """no""" +113 20 optimizer """adadelta""" +113 20 training_loop """owa""" +113 20 negative_sampler """basic""" +113 20 evaluator """rankbased""" +114 1 model.embedding_dim 2.0 +114 1 model.hidden_dropout_rate 0.33892665189165655 +114 1 model.num_filters 8.0 +114 1 negative_sampler.num_negs_per_pos 55.0 +114 1 training.batch_size 0.0 +114 2 model.embedding_dim 0.0 +114 2 model.hidden_dropout_rate 0.33416405025313456 +114 2 model.num_filters 1.0 +114 2 negative_sampler.num_negs_per_pos 61.0 +114 2 training.batch_size 0.0 +114 3 model.embedding_dim 0.0 +114 3 model.hidden_dropout_rate 0.3465157144443487 +114 3 model.num_filters 9.0 +114 3 negative_sampler.num_negs_per_pos 21.0 +114 3 training.batch_size 0.0 +114 4 model.embedding_dim 0.0 +114 4 model.hidden_dropout_rate 0.13162958993545537 +114 4 model.num_filters 9.0 +114 4 negative_sampler.num_negs_per_pos 54.0 +114 4 training.batch_size 1.0 +114 5 model.embedding_dim 2.0 +114 5 model.hidden_dropout_rate 0.13284403745493445 +114 5 model.num_filters 6.0 +114 5 negative_sampler.num_negs_per_pos 4.0 +114 5 training.batch_size 1.0 +114 6 model.embedding_dim 1.0 +114 6 model.hidden_dropout_rate 0.36434141949087306 +114 6 model.num_filters 7.0 +114 6 negative_sampler.num_negs_per_pos 77.0 +114 6 training.batch_size 2.0 +114 7 model.embedding_dim 1.0 +114 7 model.hidden_dropout_rate 0.17425567825351998 +114 7 model.num_filters 1.0 +114 7 negative_sampler.num_negs_per_pos 67.0 +114 7 training.batch_size 1.0 +114 8 model.embedding_dim 0.0 +114 8 model.hidden_dropout_rate 0.34539406195514993 +114 8 model.num_filters 1.0 +114 8 negative_sampler.num_negs_per_pos 45.0 +114 8 training.batch_size 2.0 +114 9 model.embedding_dim 1.0 +114 9 model.hidden_dropout_rate 0.42744054066503423 +114 9 model.num_filters 2.0 +114 9 negative_sampler.num_negs_per_pos 31.0 +114 9 training.batch_size 0.0 +114 10 model.embedding_dim 0.0 +114 10 model.hidden_dropout_rate 0.26793098192952225 +114 10 model.num_filters 0.0 +114 10 negative_sampler.num_negs_per_pos 86.0 +114 10 training.batch_size 0.0 +114 11 model.embedding_dim 0.0 +114 11 model.hidden_dropout_rate 0.23385902474695208 +114 11 model.num_filters 8.0 +114 11 negative_sampler.num_negs_per_pos 83.0 +114 11 training.batch_size 2.0 +114 12 model.embedding_dim 1.0 +114 12 model.hidden_dropout_rate 0.10896644109956055 +114 12 model.num_filters 4.0 +114 12 negative_sampler.num_negs_per_pos 45.0 +114 12 training.batch_size 1.0 +114 13 model.embedding_dim 1.0 +114 13 model.hidden_dropout_rate 0.4106429283801628 +114 13 model.num_filters 9.0 +114 13 negative_sampler.num_negs_per_pos 99.0 +114 13 training.batch_size 1.0 +114 14 model.embedding_dim 1.0 +114 14 model.hidden_dropout_rate 0.2370340999903439 +114 14 model.num_filters 3.0 +114 14 negative_sampler.num_negs_per_pos 91.0 +114 14 training.batch_size 1.0 +114 15 model.embedding_dim 2.0 +114 15 model.hidden_dropout_rate 0.20986829156636616 +114 15 model.num_filters 2.0 +114 15 negative_sampler.num_negs_per_pos 56.0 +114 15 training.batch_size 0.0 +114 16 model.embedding_dim 0.0 +114 16 model.hidden_dropout_rate 0.16336634163898855 +114 16 model.num_filters 0.0 +114 16 negative_sampler.num_negs_per_pos 6.0 +114 16 training.batch_size 1.0 +114 17 model.embedding_dim 1.0 +114 17 model.hidden_dropout_rate 0.2846006875062688 +114 17 model.num_filters 5.0 +114 17 negative_sampler.num_negs_per_pos 84.0 +114 17 training.batch_size 0.0 +114 18 model.embedding_dim 1.0 +114 18 model.hidden_dropout_rate 0.43598755224650454 +114 18 model.num_filters 1.0 +114 18 negative_sampler.num_negs_per_pos 98.0 +114 18 training.batch_size 1.0 +114 19 model.embedding_dim 0.0 +114 19 model.hidden_dropout_rate 0.32540001089763515 +114 19 model.num_filters 0.0 +114 19 negative_sampler.num_negs_per_pos 40.0 +114 19 training.batch_size 1.0 +114 20 model.embedding_dim 0.0 +114 20 model.hidden_dropout_rate 0.3428772376081146 +114 20 model.num_filters 1.0 +114 20 negative_sampler.num_negs_per_pos 81.0 +114 20 training.batch_size 2.0 +114 21 model.embedding_dim 1.0 +114 21 model.hidden_dropout_rate 0.21557522193544312 +114 21 model.num_filters 0.0 +114 21 negative_sampler.num_negs_per_pos 94.0 +114 21 training.batch_size 0.0 +114 22 model.embedding_dim 0.0 +114 22 model.hidden_dropout_rate 0.20764852928596264 +114 22 model.num_filters 5.0 +114 22 negative_sampler.num_negs_per_pos 85.0 +114 22 training.batch_size 0.0 +114 23 model.embedding_dim 0.0 +114 23 model.hidden_dropout_rate 0.30363984449890613 +114 23 model.num_filters 0.0 +114 23 negative_sampler.num_negs_per_pos 59.0 +114 23 training.batch_size 2.0 +114 24 model.embedding_dim 1.0 +114 24 model.hidden_dropout_rate 0.17338666254340135 +114 24 model.num_filters 4.0 +114 24 negative_sampler.num_negs_per_pos 22.0 +114 24 training.batch_size 1.0 +114 25 model.embedding_dim 2.0 +114 25 model.hidden_dropout_rate 0.2673591068284079 +114 25 model.num_filters 4.0 +114 25 negative_sampler.num_negs_per_pos 62.0 +114 25 training.batch_size 1.0 +114 26 model.embedding_dim 0.0 +114 26 model.hidden_dropout_rate 0.16562667524612346 +114 26 model.num_filters 5.0 +114 26 negative_sampler.num_negs_per_pos 65.0 +114 26 training.batch_size 0.0 +114 27 model.embedding_dim 1.0 +114 27 model.hidden_dropout_rate 0.3709479391582067 +114 27 model.num_filters 7.0 +114 27 negative_sampler.num_negs_per_pos 53.0 +114 27 training.batch_size 0.0 +114 28 model.embedding_dim 0.0 +114 28 model.hidden_dropout_rate 0.3162624876999381 +114 28 model.num_filters 6.0 +114 28 negative_sampler.num_negs_per_pos 95.0 +114 28 training.batch_size 0.0 +114 29 model.embedding_dim 1.0 +114 29 model.hidden_dropout_rate 0.2461217766514584 +114 29 model.num_filters 0.0 +114 29 negative_sampler.num_negs_per_pos 46.0 +114 29 training.batch_size 1.0 +114 30 model.embedding_dim 2.0 +114 30 model.hidden_dropout_rate 0.4041610573361397 +114 30 model.num_filters 0.0 +114 30 negative_sampler.num_negs_per_pos 16.0 +114 30 training.batch_size 1.0 +114 31 model.embedding_dim 1.0 +114 31 model.hidden_dropout_rate 0.3606355739482715 +114 31 model.num_filters 6.0 +114 31 negative_sampler.num_negs_per_pos 32.0 +114 31 training.batch_size 2.0 +114 32 model.embedding_dim 1.0 +114 32 model.hidden_dropout_rate 0.4442507521537472 +114 32 model.num_filters 8.0 +114 32 negative_sampler.num_negs_per_pos 48.0 +114 32 training.batch_size 1.0 +114 33 model.embedding_dim 2.0 +114 33 model.hidden_dropout_rate 0.37384787830453503 +114 33 model.num_filters 7.0 +114 33 negative_sampler.num_negs_per_pos 18.0 +114 33 training.batch_size 1.0 +114 34 model.embedding_dim 1.0 +114 34 model.hidden_dropout_rate 0.1272141529268155 +114 34 model.num_filters 4.0 +114 34 negative_sampler.num_negs_per_pos 3.0 +114 34 training.batch_size 1.0 +114 35 model.embedding_dim 0.0 +114 35 model.hidden_dropout_rate 0.41247264735507716 +114 35 model.num_filters 8.0 +114 35 negative_sampler.num_negs_per_pos 86.0 +114 35 training.batch_size 2.0 +114 36 model.embedding_dim 1.0 +114 36 model.hidden_dropout_rate 0.40403923772765826 +114 36 model.num_filters 7.0 +114 36 negative_sampler.num_negs_per_pos 83.0 +114 36 training.batch_size 2.0 +114 37 model.embedding_dim 1.0 +114 37 model.hidden_dropout_rate 0.4516333953283247 +114 37 model.num_filters 8.0 +114 37 negative_sampler.num_negs_per_pos 89.0 +114 37 training.batch_size 0.0 +114 38 model.embedding_dim 1.0 +114 38 model.hidden_dropout_rate 0.15402657200338876 +114 38 model.num_filters 2.0 +114 38 negative_sampler.num_negs_per_pos 52.0 +114 38 training.batch_size 2.0 +114 39 model.embedding_dim 2.0 +114 39 model.hidden_dropout_rate 0.13693257494472297 +114 39 model.num_filters 7.0 +114 39 negative_sampler.num_negs_per_pos 28.0 +114 39 training.batch_size 1.0 +114 40 model.embedding_dim 1.0 +114 40 model.hidden_dropout_rate 0.16821017772826147 +114 40 model.num_filters 5.0 +114 40 negative_sampler.num_negs_per_pos 69.0 +114 40 training.batch_size 2.0 +114 41 model.embedding_dim 0.0 +114 41 model.hidden_dropout_rate 0.15678331448146623 +114 41 model.num_filters 4.0 +114 41 negative_sampler.num_negs_per_pos 92.0 +114 41 training.batch_size 0.0 +114 42 model.embedding_dim 2.0 +114 42 model.hidden_dropout_rate 0.40192205842962225 +114 42 model.num_filters 9.0 +114 42 negative_sampler.num_negs_per_pos 32.0 +114 42 training.batch_size 1.0 +114 43 model.embedding_dim 1.0 +114 43 model.hidden_dropout_rate 0.3973064374221814 +114 43 model.num_filters 5.0 +114 43 negative_sampler.num_negs_per_pos 26.0 +114 43 training.batch_size 1.0 +114 44 model.embedding_dim 0.0 +114 44 model.hidden_dropout_rate 0.21839789936850426 +114 44 model.num_filters 2.0 +114 44 negative_sampler.num_negs_per_pos 48.0 +114 44 training.batch_size 0.0 +114 45 model.embedding_dim 0.0 +114 45 model.hidden_dropout_rate 0.3716371454648506 +114 45 model.num_filters 4.0 +114 45 negative_sampler.num_negs_per_pos 65.0 +114 45 training.batch_size 2.0 +114 46 model.embedding_dim 2.0 +114 46 model.hidden_dropout_rate 0.14838103191486543 +114 46 model.num_filters 1.0 +114 46 negative_sampler.num_negs_per_pos 88.0 +114 46 training.batch_size 2.0 +114 47 model.embedding_dim 1.0 +114 47 model.hidden_dropout_rate 0.34648584771241564 +114 47 model.num_filters 6.0 +114 47 negative_sampler.num_negs_per_pos 41.0 +114 47 training.batch_size 0.0 +114 48 model.embedding_dim 0.0 +114 48 model.hidden_dropout_rate 0.3842714634809171 +114 48 model.num_filters 8.0 +114 48 negative_sampler.num_negs_per_pos 23.0 +114 48 training.batch_size 2.0 +114 49 model.embedding_dim 1.0 +114 49 model.hidden_dropout_rate 0.20231738128280907 +114 49 model.num_filters 1.0 +114 49 negative_sampler.num_negs_per_pos 66.0 +114 49 training.batch_size 2.0 +114 50 model.embedding_dim 2.0 +114 50 model.hidden_dropout_rate 0.19195023645647835 +114 50 model.num_filters 9.0 +114 50 negative_sampler.num_negs_per_pos 48.0 +114 50 training.batch_size 0.0 +114 51 model.embedding_dim 1.0 +114 51 model.hidden_dropout_rate 0.4410839044881467 +114 51 model.num_filters 6.0 +114 51 negative_sampler.num_negs_per_pos 75.0 +114 51 training.batch_size 0.0 +114 52 model.embedding_dim 2.0 +114 52 model.hidden_dropout_rate 0.4152652008831218 +114 52 model.num_filters 8.0 +114 52 negative_sampler.num_negs_per_pos 1.0 +114 52 training.batch_size 0.0 +114 53 model.embedding_dim 2.0 +114 53 model.hidden_dropout_rate 0.404062903724998 +114 53 model.num_filters 1.0 +114 53 negative_sampler.num_negs_per_pos 11.0 +114 53 training.batch_size 0.0 +114 54 model.embedding_dim 1.0 +114 54 model.hidden_dropout_rate 0.40724820465462896 +114 54 model.num_filters 4.0 +114 54 negative_sampler.num_negs_per_pos 40.0 +114 54 training.batch_size 2.0 +114 55 model.embedding_dim 1.0 +114 55 model.hidden_dropout_rate 0.20257329120678383 +114 55 model.num_filters 9.0 +114 55 negative_sampler.num_negs_per_pos 30.0 +114 55 training.batch_size 2.0 +114 56 model.embedding_dim 1.0 +114 56 model.hidden_dropout_rate 0.3187022307640135 +114 56 model.num_filters 6.0 +114 56 negative_sampler.num_negs_per_pos 84.0 +114 56 training.batch_size 2.0 +114 57 model.embedding_dim 0.0 +114 57 model.hidden_dropout_rate 0.19096232415580322 +114 57 model.num_filters 5.0 +114 57 negative_sampler.num_negs_per_pos 52.0 +114 57 training.batch_size 1.0 +114 58 model.embedding_dim 0.0 +114 58 model.hidden_dropout_rate 0.4559718590620194 +114 58 model.num_filters 9.0 +114 58 negative_sampler.num_negs_per_pos 55.0 +114 58 training.batch_size 1.0 +114 59 model.embedding_dim 1.0 +114 59 model.hidden_dropout_rate 0.3561027410660832 +114 59 model.num_filters 4.0 +114 59 negative_sampler.num_negs_per_pos 3.0 +114 59 training.batch_size 2.0 +114 60 model.embedding_dim 0.0 +114 60 model.hidden_dropout_rate 0.40441187204668727 +114 60 model.num_filters 1.0 +114 60 negative_sampler.num_negs_per_pos 38.0 +114 60 training.batch_size 0.0 +114 61 model.embedding_dim 1.0 +114 61 model.hidden_dropout_rate 0.17078186746489504 +114 61 model.num_filters 4.0 +114 61 negative_sampler.num_negs_per_pos 89.0 +114 61 training.batch_size 2.0 +114 62 model.embedding_dim 2.0 +114 62 model.hidden_dropout_rate 0.4414533334144962 +114 62 model.num_filters 5.0 +114 62 negative_sampler.num_negs_per_pos 43.0 +114 62 training.batch_size 1.0 +114 63 model.embedding_dim 1.0 +114 63 model.hidden_dropout_rate 0.2528327526753072 +114 63 model.num_filters 5.0 +114 63 negative_sampler.num_negs_per_pos 5.0 +114 63 training.batch_size 1.0 +114 64 model.embedding_dim 0.0 +114 64 model.hidden_dropout_rate 0.48785681983204987 +114 64 model.num_filters 3.0 +114 64 negative_sampler.num_negs_per_pos 71.0 +114 64 training.batch_size 1.0 +114 65 model.embedding_dim 1.0 +114 65 model.hidden_dropout_rate 0.195800123740115 +114 65 model.num_filters 8.0 +114 65 negative_sampler.num_negs_per_pos 91.0 +114 65 training.batch_size 1.0 +114 1 dataset """kinships""" +114 1 model """convkb""" +114 1 loss """bceaftersigmoid""" +114 1 regularizer """no""" +114 1 optimizer """adadelta""" +114 1 training_loop """owa""" +114 1 negative_sampler """basic""" +114 1 evaluator """rankbased""" +114 2 dataset """kinships""" +114 2 model """convkb""" +114 2 loss """bceaftersigmoid""" +114 2 regularizer """no""" +114 2 optimizer """adadelta""" +114 2 training_loop """owa""" +114 2 negative_sampler """basic""" +114 2 evaluator """rankbased""" +114 3 dataset """kinships""" +114 3 model """convkb""" +114 3 loss """bceaftersigmoid""" +114 3 regularizer """no""" +114 3 optimizer """adadelta""" +114 3 training_loop """owa""" +114 3 negative_sampler """basic""" +114 3 evaluator """rankbased""" +114 4 dataset """kinships""" +114 4 model """convkb""" +114 4 loss """bceaftersigmoid""" +114 4 regularizer """no""" +114 4 optimizer """adadelta""" +114 4 training_loop """owa""" +114 4 negative_sampler """basic""" +114 4 evaluator """rankbased""" +114 5 dataset """kinships""" +114 5 model """convkb""" +114 5 loss """bceaftersigmoid""" +114 5 regularizer """no""" +114 5 optimizer """adadelta""" +114 5 training_loop """owa""" +114 5 negative_sampler """basic""" +114 5 evaluator """rankbased""" +114 6 dataset """kinships""" +114 6 model """convkb""" +114 6 loss """bceaftersigmoid""" +114 6 regularizer """no""" +114 6 optimizer """adadelta""" +114 6 training_loop """owa""" +114 6 negative_sampler """basic""" +114 6 evaluator """rankbased""" +114 7 dataset """kinships""" +114 7 model """convkb""" +114 7 loss """bceaftersigmoid""" +114 7 regularizer """no""" +114 7 optimizer """adadelta""" +114 7 training_loop """owa""" +114 7 negative_sampler """basic""" +114 7 evaluator """rankbased""" +114 8 dataset """kinships""" +114 8 model """convkb""" +114 8 loss """bceaftersigmoid""" +114 8 regularizer """no""" +114 8 optimizer """adadelta""" +114 8 training_loop """owa""" +114 8 negative_sampler """basic""" +114 8 evaluator """rankbased""" +114 9 dataset """kinships""" +114 9 model """convkb""" +114 9 loss """bceaftersigmoid""" +114 9 regularizer """no""" +114 9 optimizer """adadelta""" +114 9 training_loop """owa""" +114 9 negative_sampler """basic""" +114 9 evaluator """rankbased""" +114 10 dataset """kinships""" +114 10 model """convkb""" +114 10 loss """bceaftersigmoid""" +114 10 regularizer """no""" +114 10 optimizer """adadelta""" +114 10 training_loop """owa""" +114 10 negative_sampler """basic""" +114 10 evaluator """rankbased""" +114 11 dataset """kinships""" +114 11 model """convkb""" +114 11 loss """bceaftersigmoid""" +114 11 regularizer """no""" +114 11 optimizer """adadelta""" +114 11 training_loop """owa""" +114 11 negative_sampler """basic""" +114 11 evaluator """rankbased""" +114 12 dataset """kinships""" +114 12 model """convkb""" +114 12 loss """bceaftersigmoid""" +114 12 regularizer """no""" +114 12 optimizer """adadelta""" +114 12 training_loop """owa""" +114 12 negative_sampler """basic""" +114 12 evaluator """rankbased""" +114 13 dataset """kinships""" +114 13 model """convkb""" +114 13 loss """bceaftersigmoid""" +114 13 regularizer """no""" +114 13 optimizer """adadelta""" +114 13 training_loop """owa""" +114 13 negative_sampler """basic""" +114 13 evaluator """rankbased""" +114 14 dataset """kinships""" +114 14 model """convkb""" +114 14 loss """bceaftersigmoid""" +114 14 regularizer """no""" +114 14 optimizer """adadelta""" +114 14 training_loop """owa""" +114 14 negative_sampler """basic""" +114 14 evaluator """rankbased""" +114 15 dataset """kinships""" +114 15 model """convkb""" +114 15 loss """bceaftersigmoid""" +114 15 regularizer """no""" +114 15 optimizer """adadelta""" +114 15 training_loop """owa""" +114 15 negative_sampler """basic""" +114 15 evaluator """rankbased""" +114 16 dataset """kinships""" +114 16 model """convkb""" +114 16 loss """bceaftersigmoid""" +114 16 regularizer """no""" +114 16 optimizer """adadelta""" +114 16 training_loop """owa""" +114 16 negative_sampler """basic""" +114 16 evaluator """rankbased""" +114 17 dataset """kinships""" +114 17 model """convkb""" +114 17 loss """bceaftersigmoid""" +114 17 regularizer """no""" +114 17 optimizer """adadelta""" +114 17 training_loop """owa""" +114 17 negative_sampler """basic""" +114 17 evaluator """rankbased""" +114 18 dataset """kinships""" +114 18 model """convkb""" +114 18 loss """bceaftersigmoid""" +114 18 regularizer """no""" +114 18 optimizer """adadelta""" +114 18 training_loop """owa""" +114 18 negative_sampler """basic""" +114 18 evaluator """rankbased""" +114 19 dataset """kinships""" +114 19 model """convkb""" +114 19 loss """bceaftersigmoid""" +114 19 regularizer """no""" +114 19 optimizer """adadelta""" +114 19 training_loop """owa""" +114 19 negative_sampler """basic""" +114 19 evaluator """rankbased""" +114 20 dataset """kinships""" +114 20 model """convkb""" +114 20 loss """bceaftersigmoid""" +114 20 regularizer """no""" +114 20 optimizer """adadelta""" +114 20 training_loop """owa""" +114 20 negative_sampler """basic""" +114 20 evaluator """rankbased""" +114 21 dataset """kinships""" +114 21 model """convkb""" +114 21 loss """bceaftersigmoid""" +114 21 regularizer """no""" +114 21 optimizer """adadelta""" +114 21 training_loop """owa""" +114 21 negative_sampler """basic""" +114 21 evaluator """rankbased""" +114 22 dataset """kinships""" +114 22 model """convkb""" +114 22 loss """bceaftersigmoid""" +114 22 regularizer """no""" +114 22 optimizer """adadelta""" +114 22 training_loop """owa""" +114 22 negative_sampler """basic""" +114 22 evaluator """rankbased""" +114 23 dataset """kinships""" +114 23 model """convkb""" +114 23 loss """bceaftersigmoid""" +114 23 regularizer """no""" +114 23 optimizer """adadelta""" +114 23 training_loop """owa""" +114 23 negative_sampler """basic""" +114 23 evaluator """rankbased""" +114 24 dataset """kinships""" +114 24 model """convkb""" +114 24 loss """bceaftersigmoid""" +114 24 regularizer """no""" +114 24 optimizer """adadelta""" +114 24 training_loop """owa""" +114 24 negative_sampler """basic""" +114 24 evaluator """rankbased""" +114 25 dataset """kinships""" +114 25 model """convkb""" +114 25 loss """bceaftersigmoid""" +114 25 regularizer """no""" +114 25 optimizer """adadelta""" +114 25 training_loop """owa""" +114 25 negative_sampler """basic""" +114 25 evaluator """rankbased""" +114 26 dataset """kinships""" +114 26 model """convkb""" +114 26 loss """bceaftersigmoid""" +114 26 regularizer """no""" +114 26 optimizer """adadelta""" +114 26 training_loop """owa""" +114 26 negative_sampler """basic""" +114 26 evaluator """rankbased""" +114 27 dataset """kinships""" +114 27 model """convkb""" +114 27 loss """bceaftersigmoid""" +114 27 regularizer """no""" +114 27 optimizer """adadelta""" +114 27 training_loop """owa""" +114 27 negative_sampler """basic""" +114 27 evaluator """rankbased""" +114 28 dataset """kinships""" +114 28 model """convkb""" +114 28 loss """bceaftersigmoid""" +114 28 regularizer """no""" +114 28 optimizer """adadelta""" +114 28 training_loop """owa""" +114 28 negative_sampler """basic""" +114 28 evaluator """rankbased""" +114 29 dataset """kinships""" +114 29 model """convkb""" +114 29 loss """bceaftersigmoid""" +114 29 regularizer """no""" +114 29 optimizer """adadelta""" +114 29 training_loop """owa""" +114 29 negative_sampler """basic""" +114 29 evaluator """rankbased""" +114 30 dataset """kinships""" +114 30 model """convkb""" +114 30 loss """bceaftersigmoid""" +114 30 regularizer """no""" +114 30 optimizer """adadelta""" +114 30 training_loop """owa""" +114 30 negative_sampler """basic""" +114 30 evaluator """rankbased""" +114 31 dataset """kinships""" +114 31 model """convkb""" +114 31 loss """bceaftersigmoid""" +114 31 regularizer """no""" +114 31 optimizer """adadelta""" +114 31 training_loop """owa""" +114 31 negative_sampler """basic""" +114 31 evaluator """rankbased""" +114 32 dataset """kinships""" +114 32 model """convkb""" +114 32 loss """bceaftersigmoid""" +114 32 regularizer """no""" +114 32 optimizer """adadelta""" +114 32 training_loop """owa""" +114 32 negative_sampler """basic""" +114 32 evaluator """rankbased""" +114 33 dataset """kinships""" +114 33 model """convkb""" +114 33 loss """bceaftersigmoid""" +114 33 regularizer """no""" +114 33 optimizer """adadelta""" +114 33 training_loop """owa""" +114 33 negative_sampler """basic""" +114 33 evaluator """rankbased""" +114 34 dataset """kinships""" +114 34 model """convkb""" +114 34 loss """bceaftersigmoid""" +114 34 regularizer """no""" +114 34 optimizer """adadelta""" +114 34 training_loop """owa""" +114 34 negative_sampler """basic""" +114 34 evaluator """rankbased""" +114 35 dataset """kinships""" +114 35 model """convkb""" +114 35 loss """bceaftersigmoid""" +114 35 regularizer """no""" +114 35 optimizer """adadelta""" +114 35 training_loop """owa""" +114 35 negative_sampler """basic""" +114 35 evaluator """rankbased""" +114 36 dataset """kinships""" +114 36 model """convkb""" +114 36 loss """bceaftersigmoid""" +114 36 regularizer """no""" +114 36 optimizer """adadelta""" +114 36 training_loop """owa""" +114 36 negative_sampler """basic""" +114 36 evaluator """rankbased""" +114 37 dataset """kinships""" +114 37 model """convkb""" +114 37 loss """bceaftersigmoid""" +114 37 regularizer """no""" +114 37 optimizer """adadelta""" +114 37 training_loop """owa""" +114 37 negative_sampler """basic""" +114 37 evaluator """rankbased""" +114 38 dataset """kinships""" +114 38 model """convkb""" +114 38 loss """bceaftersigmoid""" +114 38 regularizer """no""" +114 38 optimizer """adadelta""" +114 38 training_loop """owa""" +114 38 negative_sampler """basic""" +114 38 evaluator """rankbased""" +114 39 dataset """kinships""" +114 39 model """convkb""" +114 39 loss """bceaftersigmoid""" +114 39 regularizer """no""" +114 39 optimizer """adadelta""" +114 39 training_loop """owa""" +114 39 negative_sampler """basic""" +114 39 evaluator """rankbased""" +114 40 dataset """kinships""" +114 40 model """convkb""" +114 40 loss """bceaftersigmoid""" +114 40 regularizer """no""" +114 40 optimizer """adadelta""" +114 40 training_loop """owa""" +114 40 negative_sampler """basic""" +114 40 evaluator """rankbased""" +114 41 dataset """kinships""" +114 41 model """convkb""" +114 41 loss """bceaftersigmoid""" +114 41 regularizer """no""" +114 41 optimizer """adadelta""" +114 41 training_loop """owa""" +114 41 negative_sampler """basic""" +114 41 evaluator """rankbased""" +114 42 dataset """kinships""" +114 42 model """convkb""" +114 42 loss """bceaftersigmoid""" +114 42 regularizer """no""" +114 42 optimizer """adadelta""" +114 42 training_loop """owa""" +114 42 negative_sampler """basic""" +114 42 evaluator """rankbased""" +114 43 dataset """kinships""" +114 43 model """convkb""" +114 43 loss """bceaftersigmoid""" +114 43 regularizer """no""" +114 43 optimizer """adadelta""" +114 43 training_loop """owa""" +114 43 negative_sampler """basic""" +114 43 evaluator """rankbased""" +114 44 dataset """kinships""" +114 44 model """convkb""" +114 44 loss """bceaftersigmoid""" +114 44 regularizer """no""" +114 44 optimizer """adadelta""" +114 44 training_loop """owa""" +114 44 negative_sampler """basic""" +114 44 evaluator """rankbased""" +114 45 dataset """kinships""" +114 45 model """convkb""" +114 45 loss """bceaftersigmoid""" +114 45 regularizer """no""" +114 45 optimizer """adadelta""" +114 45 training_loop """owa""" +114 45 negative_sampler """basic""" +114 45 evaluator """rankbased""" +114 46 dataset """kinships""" +114 46 model """convkb""" +114 46 loss """bceaftersigmoid""" +114 46 regularizer """no""" +114 46 optimizer """adadelta""" +114 46 training_loop """owa""" +114 46 negative_sampler """basic""" +114 46 evaluator """rankbased""" +114 47 dataset """kinships""" +114 47 model """convkb""" +114 47 loss """bceaftersigmoid""" +114 47 regularizer """no""" +114 47 optimizer """adadelta""" +114 47 training_loop """owa""" +114 47 negative_sampler """basic""" +114 47 evaluator """rankbased""" +114 48 dataset """kinships""" +114 48 model """convkb""" +114 48 loss """bceaftersigmoid""" +114 48 regularizer """no""" +114 48 optimizer """adadelta""" +114 48 training_loop """owa""" +114 48 negative_sampler """basic""" +114 48 evaluator """rankbased""" +114 49 dataset """kinships""" +114 49 model """convkb""" +114 49 loss """bceaftersigmoid""" +114 49 regularizer """no""" +114 49 optimizer """adadelta""" +114 49 training_loop """owa""" +114 49 negative_sampler """basic""" +114 49 evaluator """rankbased""" +114 50 dataset """kinships""" +114 50 model """convkb""" +114 50 loss """bceaftersigmoid""" +114 50 regularizer """no""" +114 50 optimizer """adadelta""" +114 50 training_loop """owa""" +114 50 negative_sampler """basic""" +114 50 evaluator """rankbased""" +114 51 dataset """kinships""" +114 51 model """convkb""" +114 51 loss """bceaftersigmoid""" +114 51 regularizer """no""" +114 51 optimizer """adadelta""" +114 51 training_loop """owa""" +114 51 negative_sampler """basic""" +114 51 evaluator """rankbased""" +114 52 dataset """kinships""" +114 52 model """convkb""" +114 52 loss """bceaftersigmoid""" +114 52 regularizer """no""" +114 52 optimizer """adadelta""" +114 52 training_loop """owa""" +114 52 negative_sampler """basic""" +114 52 evaluator """rankbased""" +114 53 dataset """kinships""" +114 53 model """convkb""" +114 53 loss """bceaftersigmoid""" +114 53 regularizer """no""" +114 53 optimizer """adadelta""" +114 53 training_loop """owa""" +114 53 negative_sampler """basic""" +114 53 evaluator """rankbased""" +114 54 dataset """kinships""" +114 54 model """convkb""" +114 54 loss """bceaftersigmoid""" +114 54 regularizer """no""" +114 54 optimizer """adadelta""" +114 54 training_loop """owa""" +114 54 negative_sampler """basic""" +114 54 evaluator """rankbased""" +114 55 dataset """kinships""" +114 55 model """convkb""" +114 55 loss """bceaftersigmoid""" +114 55 regularizer """no""" +114 55 optimizer """adadelta""" +114 55 training_loop """owa""" +114 55 negative_sampler """basic""" +114 55 evaluator """rankbased""" +114 56 dataset """kinships""" +114 56 model """convkb""" +114 56 loss """bceaftersigmoid""" +114 56 regularizer """no""" +114 56 optimizer """adadelta""" +114 56 training_loop """owa""" +114 56 negative_sampler """basic""" +114 56 evaluator """rankbased""" +114 57 dataset """kinships""" +114 57 model """convkb""" +114 57 loss """bceaftersigmoid""" +114 57 regularizer """no""" +114 57 optimizer """adadelta""" +114 57 training_loop """owa""" +114 57 negative_sampler """basic""" +114 57 evaluator """rankbased""" +114 58 dataset """kinships""" +114 58 model """convkb""" +114 58 loss """bceaftersigmoid""" +114 58 regularizer """no""" +114 58 optimizer """adadelta""" +114 58 training_loop """owa""" +114 58 negative_sampler """basic""" +114 58 evaluator """rankbased""" +114 59 dataset """kinships""" +114 59 model """convkb""" +114 59 loss """bceaftersigmoid""" +114 59 regularizer """no""" +114 59 optimizer """adadelta""" +114 59 training_loop """owa""" +114 59 negative_sampler """basic""" +114 59 evaluator """rankbased""" +114 60 dataset """kinships""" +114 60 model """convkb""" +114 60 loss """bceaftersigmoid""" +114 60 regularizer """no""" +114 60 optimizer """adadelta""" +114 60 training_loop """owa""" +114 60 negative_sampler """basic""" +114 60 evaluator """rankbased""" +114 61 dataset """kinships""" +114 61 model """convkb""" +114 61 loss """bceaftersigmoid""" +114 61 regularizer """no""" +114 61 optimizer """adadelta""" +114 61 training_loop """owa""" +114 61 negative_sampler """basic""" +114 61 evaluator """rankbased""" +114 62 dataset """kinships""" +114 62 model """convkb""" +114 62 loss """bceaftersigmoid""" +114 62 regularizer """no""" +114 62 optimizer """adadelta""" +114 62 training_loop """owa""" +114 62 negative_sampler """basic""" +114 62 evaluator """rankbased""" +114 63 dataset """kinships""" +114 63 model """convkb""" +114 63 loss """bceaftersigmoid""" +114 63 regularizer """no""" +114 63 optimizer """adadelta""" +114 63 training_loop """owa""" +114 63 negative_sampler """basic""" +114 63 evaluator """rankbased""" +114 64 dataset """kinships""" +114 64 model """convkb""" +114 64 loss """bceaftersigmoid""" +114 64 regularizer """no""" +114 64 optimizer """adadelta""" +114 64 training_loop """owa""" +114 64 negative_sampler """basic""" +114 64 evaluator """rankbased""" +114 65 dataset """kinships""" +114 65 model """convkb""" +114 65 loss """bceaftersigmoid""" +114 65 regularizer """no""" +114 65 optimizer """adadelta""" +114 65 training_loop """owa""" +114 65 negative_sampler """basic""" +114 65 evaluator """rankbased""" +115 1 model.embedding_dim 2.0 +115 1 model.hidden_dropout_rate 0.4172275742946835 +115 1 model.num_filters 7.0 +115 1 negative_sampler.num_negs_per_pos 32.0 +115 1 training.batch_size 2.0 +115 2 model.embedding_dim 0.0 +115 2 model.hidden_dropout_rate 0.4763720606655115 +115 2 model.num_filters 2.0 +115 2 negative_sampler.num_negs_per_pos 11.0 +115 2 training.batch_size 2.0 +115 3 model.embedding_dim 2.0 +115 3 model.hidden_dropout_rate 0.28798283509777545 +115 3 model.num_filters 6.0 +115 3 negative_sampler.num_negs_per_pos 62.0 +115 3 training.batch_size 2.0 +115 4 model.embedding_dim 1.0 +115 4 model.hidden_dropout_rate 0.43887279507447485 +115 4 model.num_filters 2.0 +115 4 negative_sampler.num_negs_per_pos 10.0 +115 4 training.batch_size 1.0 +115 5 model.embedding_dim 1.0 +115 5 model.hidden_dropout_rate 0.2559411669120828 +115 5 model.num_filters 8.0 +115 5 negative_sampler.num_negs_per_pos 1.0 +115 5 training.batch_size 1.0 +115 6 model.embedding_dim 0.0 +115 6 model.hidden_dropout_rate 0.44637592928794145 +115 6 model.num_filters 7.0 +115 6 negative_sampler.num_negs_per_pos 29.0 +115 6 training.batch_size 1.0 +115 7 model.embedding_dim 2.0 +115 7 model.hidden_dropout_rate 0.4740311633154406 +115 7 model.num_filters 6.0 +115 7 negative_sampler.num_negs_per_pos 71.0 +115 7 training.batch_size 1.0 +115 8 model.embedding_dim 0.0 +115 8 model.hidden_dropout_rate 0.4286519192139475 +115 8 model.num_filters 6.0 +115 8 negative_sampler.num_negs_per_pos 59.0 +115 8 training.batch_size 1.0 +115 9 model.embedding_dim 1.0 +115 9 model.hidden_dropout_rate 0.45086839961984126 +115 9 model.num_filters 2.0 +115 9 negative_sampler.num_negs_per_pos 48.0 +115 9 training.batch_size 2.0 +115 10 model.embedding_dim 0.0 +115 10 model.hidden_dropout_rate 0.4584522267936776 +115 10 model.num_filters 5.0 +115 10 negative_sampler.num_negs_per_pos 91.0 +115 10 training.batch_size 0.0 +115 11 model.embedding_dim 2.0 +115 11 model.hidden_dropout_rate 0.44377210282582624 +115 11 model.num_filters 9.0 +115 11 negative_sampler.num_negs_per_pos 41.0 +115 11 training.batch_size 2.0 +115 12 model.embedding_dim 0.0 +115 12 model.hidden_dropout_rate 0.11089152401080385 +115 12 model.num_filters 1.0 +115 12 negative_sampler.num_negs_per_pos 85.0 +115 12 training.batch_size 0.0 +115 13 model.embedding_dim 0.0 +115 13 model.hidden_dropout_rate 0.1387787793009992 +115 13 model.num_filters 0.0 +115 13 negative_sampler.num_negs_per_pos 89.0 +115 13 training.batch_size 2.0 +115 14 model.embedding_dim 2.0 +115 14 model.hidden_dropout_rate 0.25498930786531077 +115 14 model.num_filters 8.0 +115 14 negative_sampler.num_negs_per_pos 74.0 +115 14 training.batch_size 0.0 +115 15 model.embedding_dim 1.0 +115 15 model.hidden_dropout_rate 0.30004646608869845 +115 15 model.num_filters 0.0 +115 15 negative_sampler.num_negs_per_pos 17.0 +115 15 training.batch_size 0.0 +115 16 model.embedding_dim 1.0 +115 16 model.hidden_dropout_rate 0.2781001358384264 +115 16 model.num_filters 9.0 +115 16 negative_sampler.num_negs_per_pos 23.0 +115 16 training.batch_size 1.0 +115 17 model.embedding_dim 1.0 +115 17 model.hidden_dropout_rate 0.1603092952815243 +115 17 model.num_filters 9.0 +115 17 negative_sampler.num_negs_per_pos 96.0 +115 17 training.batch_size 2.0 +115 18 model.embedding_dim 0.0 +115 18 model.hidden_dropout_rate 0.3732717715637581 +115 18 model.num_filters 9.0 +115 18 negative_sampler.num_negs_per_pos 60.0 +115 18 training.batch_size 0.0 +115 19 model.embedding_dim 2.0 +115 19 model.hidden_dropout_rate 0.22646317550261694 +115 19 model.num_filters 5.0 +115 19 negative_sampler.num_negs_per_pos 49.0 +115 19 training.batch_size 2.0 +115 20 model.embedding_dim 0.0 +115 20 model.hidden_dropout_rate 0.41109544030065837 +115 20 model.num_filters 2.0 +115 20 negative_sampler.num_negs_per_pos 72.0 +115 20 training.batch_size 1.0 +115 21 model.embedding_dim 1.0 +115 21 model.hidden_dropout_rate 0.3239302804605425 +115 21 model.num_filters 8.0 +115 21 negative_sampler.num_negs_per_pos 9.0 +115 21 training.batch_size 1.0 +115 22 model.embedding_dim 2.0 +115 22 model.hidden_dropout_rate 0.43841540526254064 +115 22 model.num_filters 6.0 +115 22 negative_sampler.num_negs_per_pos 69.0 +115 22 training.batch_size 2.0 +115 23 model.embedding_dim 1.0 +115 23 model.hidden_dropout_rate 0.4567639122312134 +115 23 model.num_filters 5.0 +115 23 negative_sampler.num_negs_per_pos 90.0 +115 23 training.batch_size 0.0 +115 24 model.embedding_dim 1.0 +115 24 model.hidden_dropout_rate 0.41947119974982405 +115 24 model.num_filters 8.0 +115 24 negative_sampler.num_negs_per_pos 4.0 +115 24 training.batch_size 0.0 +115 25 model.embedding_dim 1.0 +115 25 model.hidden_dropout_rate 0.19614204064145496 +115 25 model.num_filters 1.0 +115 25 negative_sampler.num_negs_per_pos 83.0 +115 25 training.batch_size 2.0 +115 26 model.embedding_dim 0.0 +115 26 model.hidden_dropout_rate 0.31142215892227554 +115 26 model.num_filters 9.0 +115 26 negative_sampler.num_negs_per_pos 8.0 +115 26 training.batch_size 0.0 +115 27 model.embedding_dim 1.0 +115 27 model.hidden_dropout_rate 0.29569472900874705 +115 27 model.num_filters 1.0 +115 27 negative_sampler.num_negs_per_pos 29.0 +115 27 training.batch_size 2.0 +115 28 model.embedding_dim 1.0 +115 28 model.hidden_dropout_rate 0.10696559128673876 +115 28 model.num_filters 1.0 +115 28 negative_sampler.num_negs_per_pos 89.0 +115 28 training.batch_size 0.0 +115 29 model.embedding_dim 2.0 +115 29 model.hidden_dropout_rate 0.14138336298851417 +115 29 model.num_filters 6.0 +115 29 negative_sampler.num_negs_per_pos 54.0 +115 29 training.batch_size 1.0 +115 30 model.embedding_dim 2.0 +115 30 model.hidden_dropout_rate 0.39300464781141475 +115 30 model.num_filters 3.0 +115 30 negative_sampler.num_negs_per_pos 45.0 +115 30 training.batch_size 2.0 +115 31 model.embedding_dim 2.0 +115 31 model.hidden_dropout_rate 0.4505204059841178 +115 31 model.num_filters 9.0 +115 31 negative_sampler.num_negs_per_pos 83.0 +115 31 training.batch_size 2.0 +115 32 model.embedding_dim 1.0 +115 32 model.hidden_dropout_rate 0.1729775463222714 +115 32 model.num_filters 8.0 +115 32 negative_sampler.num_negs_per_pos 59.0 +115 32 training.batch_size 2.0 +115 33 model.embedding_dim 0.0 +115 33 model.hidden_dropout_rate 0.38479933178885783 +115 33 model.num_filters 5.0 +115 33 negative_sampler.num_negs_per_pos 13.0 +115 33 training.batch_size 0.0 +115 34 model.embedding_dim 0.0 +115 34 model.hidden_dropout_rate 0.13672884707952396 +115 34 model.num_filters 6.0 +115 34 negative_sampler.num_negs_per_pos 80.0 +115 34 training.batch_size 1.0 +115 35 model.embedding_dim 0.0 +115 35 model.hidden_dropout_rate 0.1020304723730705 +115 35 model.num_filters 0.0 +115 35 negative_sampler.num_negs_per_pos 28.0 +115 35 training.batch_size 0.0 +115 36 model.embedding_dim 2.0 +115 36 model.hidden_dropout_rate 0.40598507165767783 +115 36 model.num_filters 4.0 +115 36 negative_sampler.num_negs_per_pos 32.0 +115 36 training.batch_size 1.0 +115 37 model.embedding_dim 1.0 +115 37 model.hidden_dropout_rate 0.36707138374170334 +115 37 model.num_filters 1.0 +115 37 negative_sampler.num_negs_per_pos 21.0 +115 37 training.batch_size 1.0 +115 38 model.embedding_dim 1.0 +115 38 model.hidden_dropout_rate 0.1781391321801039 +115 38 model.num_filters 3.0 +115 38 negative_sampler.num_negs_per_pos 78.0 +115 38 training.batch_size 0.0 +115 39 model.embedding_dim 0.0 +115 39 model.hidden_dropout_rate 0.3365003172302479 +115 39 model.num_filters 7.0 +115 39 negative_sampler.num_negs_per_pos 37.0 +115 39 training.batch_size 2.0 +115 40 model.embedding_dim 2.0 +115 40 model.hidden_dropout_rate 0.15783678404209672 +115 40 model.num_filters 6.0 +115 40 negative_sampler.num_negs_per_pos 16.0 +115 40 training.batch_size 1.0 +115 41 model.embedding_dim 0.0 +115 41 model.hidden_dropout_rate 0.24531256211295582 +115 41 model.num_filters 2.0 +115 41 negative_sampler.num_negs_per_pos 63.0 +115 41 training.batch_size 2.0 +115 42 model.embedding_dim 2.0 +115 42 model.hidden_dropout_rate 0.1900992533038619 +115 42 model.num_filters 3.0 +115 42 negative_sampler.num_negs_per_pos 30.0 +115 42 training.batch_size 2.0 +115 43 model.embedding_dim 2.0 +115 43 model.hidden_dropout_rate 0.16882833558981786 +115 43 model.num_filters 8.0 +115 43 negative_sampler.num_negs_per_pos 82.0 +115 43 training.batch_size 2.0 +115 44 model.embedding_dim 2.0 +115 44 model.hidden_dropout_rate 0.35208367299030974 +115 44 model.num_filters 4.0 +115 44 negative_sampler.num_negs_per_pos 36.0 +115 44 training.batch_size 0.0 +115 45 model.embedding_dim 2.0 +115 45 model.hidden_dropout_rate 0.3219456834057631 +115 45 model.num_filters 5.0 +115 45 negative_sampler.num_negs_per_pos 4.0 +115 45 training.batch_size 0.0 +115 46 model.embedding_dim 0.0 +115 46 model.hidden_dropout_rate 0.15561115248342713 +115 46 model.num_filters 7.0 +115 46 negative_sampler.num_negs_per_pos 17.0 +115 46 training.batch_size 2.0 +115 47 model.embedding_dim 2.0 +115 47 model.hidden_dropout_rate 0.4981952001696923 +115 47 model.num_filters 6.0 +115 47 negative_sampler.num_negs_per_pos 7.0 +115 47 training.batch_size 1.0 +115 48 model.embedding_dim 0.0 +115 48 model.hidden_dropout_rate 0.4898431920804359 +115 48 model.num_filters 8.0 +115 48 negative_sampler.num_negs_per_pos 17.0 +115 48 training.batch_size 2.0 +115 49 model.embedding_dim 1.0 +115 49 model.hidden_dropout_rate 0.23777024040173594 +115 49 model.num_filters 2.0 +115 49 negative_sampler.num_negs_per_pos 8.0 +115 49 training.batch_size 1.0 +115 50 model.embedding_dim 2.0 +115 50 model.hidden_dropout_rate 0.3766844436950779 +115 50 model.num_filters 6.0 +115 50 negative_sampler.num_negs_per_pos 97.0 +115 50 training.batch_size 1.0 +115 1 dataset """kinships""" +115 1 model """convkb""" +115 1 loss """softplus""" +115 1 regularizer """no""" +115 1 optimizer """adadelta""" +115 1 training_loop """owa""" +115 1 negative_sampler """basic""" +115 1 evaluator """rankbased""" +115 2 dataset """kinships""" +115 2 model """convkb""" +115 2 loss """softplus""" +115 2 regularizer """no""" +115 2 optimizer """adadelta""" +115 2 training_loop """owa""" +115 2 negative_sampler """basic""" +115 2 evaluator """rankbased""" +115 3 dataset """kinships""" +115 3 model """convkb""" +115 3 loss """softplus""" +115 3 regularizer """no""" +115 3 optimizer """adadelta""" +115 3 training_loop """owa""" +115 3 negative_sampler """basic""" +115 3 evaluator """rankbased""" +115 4 dataset """kinships""" +115 4 model """convkb""" +115 4 loss """softplus""" +115 4 regularizer """no""" +115 4 optimizer """adadelta""" +115 4 training_loop """owa""" +115 4 negative_sampler """basic""" +115 4 evaluator """rankbased""" +115 5 dataset """kinships""" +115 5 model """convkb""" +115 5 loss """softplus""" +115 5 regularizer """no""" +115 5 optimizer """adadelta""" +115 5 training_loop """owa""" +115 5 negative_sampler """basic""" +115 5 evaluator """rankbased""" +115 6 dataset """kinships""" +115 6 model """convkb""" +115 6 loss """softplus""" +115 6 regularizer """no""" +115 6 optimizer """adadelta""" +115 6 training_loop """owa""" +115 6 negative_sampler """basic""" +115 6 evaluator """rankbased""" +115 7 dataset """kinships""" +115 7 model """convkb""" +115 7 loss """softplus""" +115 7 regularizer """no""" +115 7 optimizer """adadelta""" +115 7 training_loop """owa""" +115 7 negative_sampler """basic""" +115 7 evaluator """rankbased""" +115 8 dataset """kinships""" +115 8 model """convkb""" +115 8 loss """softplus""" +115 8 regularizer """no""" +115 8 optimizer """adadelta""" +115 8 training_loop """owa""" +115 8 negative_sampler """basic""" +115 8 evaluator """rankbased""" +115 9 dataset """kinships""" +115 9 model """convkb""" +115 9 loss """softplus""" +115 9 regularizer """no""" +115 9 optimizer """adadelta""" +115 9 training_loop """owa""" +115 9 negative_sampler """basic""" +115 9 evaluator """rankbased""" +115 10 dataset """kinships""" +115 10 model """convkb""" +115 10 loss """softplus""" +115 10 regularizer """no""" +115 10 optimizer """adadelta""" +115 10 training_loop """owa""" +115 10 negative_sampler """basic""" +115 10 evaluator """rankbased""" +115 11 dataset """kinships""" +115 11 model """convkb""" +115 11 loss """softplus""" +115 11 regularizer """no""" +115 11 optimizer """adadelta""" +115 11 training_loop """owa""" +115 11 negative_sampler """basic""" +115 11 evaluator """rankbased""" +115 12 dataset """kinships""" +115 12 model """convkb""" +115 12 loss """softplus""" +115 12 regularizer """no""" +115 12 optimizer """adadelta""" +115 12 training_loop """owa""" +115 12 negative_sampler """basic""" +115 12 evaluator """rankbased""" +115 13 dataset """kinships""" +115 13 model """convkb""" +115 13 loss """softplus""" +115 13 regularizer """no""" +115 13 optimizer """adadelta""" +115 13 training_loop """owa""" +115 13 negative_sampler """basic""" +115 13 evaluator """rankbased""" +115 14 dataset """kinships""" +115 14 model """convkb""" +115 14 loss """softplus""" +115 14 regularizer """no""" +115 14 optimizer """adadelta""" +115 14 training_loop """owa""" +115 14 negative_sampler """basic""" +115 14 evaluator """rankbased""" +115 15 dataset """kinships""" +115 15 model """convkb""" +115 15 loss """softplus""" +115 15 regularizer """no""" +115 15 optimizer """adadelta""" +115 15 training_loop """owa""" +115 15 negative_sampler """basic""" +115 15 evaluator """rankbased""" +115 16 dataset """kinships""" +115 16 model """convkb""" +115 16 loss """softplus""" +115 16 regularizer """no""" +115 16 optimizer """adadelta""" +115 16 training_loop """owa""" +115 16 negative_sampler """basic""" +115 16 evaluator """rankbased""" +115 17 dataset """kinships""" +115 17 model """convkb""" +115 17 loss """softplus""" +115 17 regularizer """no""" +115 17 optimizer """adadelta""" +115 17 training_loop """owa""" +115 17 negative_sampler """basic""" +115 17 evaluator """rankbased""" +115 18 dataset """kinships""" +115 18 model """convkb""" +115 18 loss """softplus""" +115 18 regularizer """no""" +115 18 optimizer """adadelta""" +115 18 training_loop """owa""" +115 18 negative_sampler """basic""" +115 18 evaluator """rankbased""" +115 19 dataset """kinships""" +115 19 model """convkb""" +115 19 loss """softplus""" +115 19 regularizer """no""" +115 19 optimizer """adadelta""" +115 19 training_loop """owa""" +115 19 negative_sampler """basic""" +115 19 evaluator """rankbased""" +115 20 dataset """kinships""" +115 20 model """convkb""" +115 20 loss """softplus""" +115 20 regularizer """no""" +115 20 optimizer """adadelta""" +115 20 training_loop """owa""" +115 20 negative_sampler """basic""" +115 20 evaluator """rankbased""" +115 21 dataset """kinships""" +115 21 model """convkb""" +115 21 loss """softplus""" +115 21 regularizer """no""" +115 21 optimizer """adadelta""" +115 21 training_loop """owa""" +115 21 negative_sampler """basic""" +115 21 evaluator """rankbased""" +115 22 dataset """kinships""" +115 22 model """convkb""" +115 22 loss """softplus""" +115 22 regularizer """no""" +115 22 optimizer """adadelta""" +115 22 training_loop """owa""" +115 22 negative_sampler """basic""" +115 22 evaluator """rankbased""" +115 23 dataset """kinships""" +115 23 model """convkb""" +115 23 loss """softplus""" +115 23 regularizer """no""" +115 23 optimizer """adadelta""" +115 23 training_loop """owa""" +115 23 negative_sampler """basic""" +115 23 evaluator """rankbased""" +115 24 dataset """kinships""" +115 24 model """convkb""" +115 24 loss """softplus""" +115 24 regularizer """no""" +115 24 optimizer """adadelta""" +115 24 training_loop """owa""" +115 24 negative_sampler """basic""" +115 24 evaluator """rankbased""" +115 25 dataset """kinships""" +115 25 model """convkb""" +115 25 loss """softplus""" +115 25 regularizer """no""" +115 25 optimizer """adadelta""" +115 25 training_loop """owa""" +115 25 negative_sampler """basic""" +115 25 evaluator """rankbased""" +115 26 dataset """kinships""" +115 26 model """convkb""" +115 26 loss """softplus""" +115 26 regularizer """no""" +115 26 optimizer """adadelta""" +115 26 training_loop """owa""" +115 26 negative_sampler """basic""" +115 26 evaluator """rankbased""" +115 27 dataset """kinships""" +115 27 model """convkb""" +115 27 loss """softplus""" +115 27 regularizer """no""" +115 27 optimizer """adadelta""" +115 27 training_loop """owa""" +115 27 negative_sampler """basic""" +115 27 evaluator """rankbased""" +115 28 dataset """kinships""" +115 28 model """convkb""" +115 28 loss """softplus""" +115 28 regularizer """no""" +115 28 optimizer """adadelta""" +115 28 training_loop """owa""" +115 28 negative_sampler """basic""" +115 28 evaluator """rankbased""" +115 29 dataset """kinships""" +115 29 model """convkb""" +115 29 loss """softplus""" +115 29 regularizer """no""" +115 29 optimizer """adadelta""" +115 29 training_loop """owa""" +115 29 negative_sampler """basic""" +115 29 evaluator """rankbased""" +115 30 dataset """kinships""" +115 30 model """convkb""" +115 30 loss """softplus""" +115 30 regularizer """no""" +115 30 optimizer """adadelta""" +115 30 training_loop """owa""" +115 30 negative_sampler """basic""" +115 30 evaluator """rankbased""" +115 31 dataset """kinships""" +115 31 model """convkb""" +115 31 loss """softplus""" +115 31 regularizer """no""" +115 31 optimizer """adadelta""" +115 31 training_loop """owa""" +115 31 negative_sampler """basic""" +115 31 evaluator """rankbased""" +115 32 dataset """kinships""" +115 32 model """convkb""" +115 32 loss """softplus""" +115 32 regularizer """no""" +115 32 optimizer """adadelta""" +115 32 training_loop """owa""" +115 32 negative_sampler """basic""" +115 32 evaluator """rankbased""" +115 33 dataset """kinships""" +115 33 model """convkb""" +115 33 loss """softplus""" +115 33 regularizer """no""" +115 33 optimizer """adadelta""" +115 33 training_loop """owa""" +115 33 negative_sampler """basic""" +115 33 evaluator """rankbased""" +115 34 dataset """kinships""" +115 34 model """convkb""" +115 34 loss """softplus""" +115 34 regularizer """no""" +115 34 optimizer """adadelta""" +115 34 training_loop """owa""" +115 34 negative_sampler """basic""" +115 34 evaluator """rankbased""" +115 35 dataset """kinships""" +115 35 model """convkb""" +115 35 loss """softplus""" +115 35 regularizer """no""" +115 35 optimizer """adadelta""" +115 35 training_loop """owa""" +115 35 negative_sampler """basic""" +115 35 evaluator """rankbased""" +115 36 dataset """kinships""" +115 36 model """convkb""" +115 36 loss """softplus""" +115 36 regularizer """no""" +115 36 optimizer """adadelta""" +115 36 training_loop """owa""" +115 36 negative_sampler """basic""" +115 36 evaluator """rankbased""" +115 37 dataset """kinships""" +115 37 model """convkb""" +115 37 loss """softplus""" +115 37 regularizer """no""" +115 37 optimizer """adadelta""" +115 37 training_loop """owa""" +115 37 negative_sampler """basic""" +115 37 evaluator """rankbased""" +115 38 dataset """kinships""" +115 38 model """convkb""" +115 38 loss """softplus""" +115 38 regularizer """no""" +115 38 optimizer """adadelta""" +115 38 training_loop """owa""" +115 38 negative_sampler """basic""" +115 38 evaluator """rankbased""" +115 39 dataset """kinships""" +115 39 model """convkb""" +115 39 loss """softplus""" +115 39 regularizer """no""" +115 39 optimizer """adadelta""" +115 39 training_loop """owa""" +115 39 negative_sampler """basic""" +115 39 evaluator """rankbased""" +115 40 dataset """kinships""" +115 40 model """convkb""" +115 40 loss """softplus""" +115 40 regularizer """no""" +115 40 optimizer """adadelta""" +115 40 training_loop """owa""" +115 40 negative_sampler """basic""" +115 40 evaluator """rankbased""" +115 41 dataset """kinships""" +115 41 model """convkb""" +115 41 loss """softplus""" +115 41 regularizer """no""" +115 41 optimizer """adadelta""" +115 41 training_loop """owa""" +115 41 negative_sampler """basic""" +115 41 evaluator """rankbased""" +115 42 dataset """kinships""" +115 42 model """convkb""" +115 42 loss """softplus""" +115 42 regularizer """no""" +115 42 optimizer """adadelta""" +115 42 training_loop """owa""" +115 42 negative_sampler """basic""" +115 42 evaluator """rankbased""" +115 43 dataset """kinships""" +115 43 model """convkb""" +115 43 loss """softplus""" +115 43 regularizer """no""" +115 43 optimizer """adadelta""" +115 43 training_loop """owa""" +115 43 negative_sampler """basic""" +115 43 evaluator """rankbased""" +115 44 dataset """kinships""" +115 44 model """convkb""" +115 44 loss """softplus""" +115 44 regularizer """no""" +115 44 optimizer """adadelta""" +115 44 training_loop """owa""" +115 44 negative_sampler """basic""" +115 44 evaluator """rankbased""" +115 45 dataset """kinships""" +115 45 model """convkb""" +115 45 loss """softplus""" +115 45 regularizer """no""" +115 45 optimizer """adadelta""" +115 45 training_loop """owa""" +115 45 negative_sampler """basic""" +115 45 evaluator """rankbased""" +115 46 dataset """kinships""" +115 46 model """convkb""" +115 46 loss """softplus""" +115 46 regularizer """no""" +115 46 optimizer """adadelta""" +115 46 training_loop """owa""" +115 46 negative_sampler """basic""" +115 46 evaluator """rankbased""" +115 47 dataset """kinships""" +115 47 model """convkb""" +115 47 loss """softplus""" +115 47 regularizer """no""" +115 47 optimizer """adadelta""" +115 47 training_loop """owa""" +115 47 negative_sampler """basic""" +115 47 evaluator """rankbased""" +115 48 dataset """kinships""" +115 48 model """convkb""" +115 48 loss """softplus""" +115 48 regularizer """no""" +115 48 optimizer """adadelta""" +115 48 training_loop """owa""" +115 48 negative_sampler """basic""" +115 48 evaluator """rankbased""" +115 49 dataset """kinships""" +115 49 model """convkb""" +115 49 loss """softplus""" +115 49 regularizer """no""" +115 49 optimizer """adadelta""" +115 49 training_loop """owa""" +115 49 negative_sampler """basic""" +115 49 evaluator """rankbased""" +115 50 dataset """kinships""" +115 50 model """convkb""" +115 50 loss """softplus""" +115 50 regularizer """no""" +115 50 optimizer """adadelta""" +115 50 training_loop """owa""" +115 50 negative_sampler """basic""" +115 50 evaluator """rankbased""" +116 1 model.embedding_dim 0.0 +116 1 model.hidden_dropout_rate 0.1325661186211797 +116 1 model.num_filters 5.0 +116 1 loss.margin 9.566909392316322 +116 1 negative_sampler.num_negs_per_pos 76.0 +116 1 training.batch_size 2.0 +116 2 model.embedding_dim 2.0 +116 2 model.hidden_dropout_rate 0.46723011381759316 +116 2 model.num_filters 6.0 +116 2 loss.margin 7.37251529224922 +116 2 negative_sampler.num_negs_per_pos 68.0 +116 2 training.batch_size 2.0 +116 3 model.embedding_dim 1.0 +116 3 model.hidden_dropout_rate 0.1902550370393255 +116 3 model.num_filters 3.0 +116 3 loss.margin 9.185224043547 +116 3 negative_sampler.num_negs_per_pos 10.0 +116 3 training.batch_size 1.0 +116 4 model.embedding_dim 1.0 +116 4 model.hidden_dropout_rate 0.4730190419326259 +116 4 model.num_filters 8.0 +116 4 loss.margin 5.01448683959539 +116 4 negative_sampler.num_negs_per_pos 48.0 +116 4 training.batch_size 1.0 +116 5 model.embedding_dim 1.0 +116 5 model.hidden_dropout_rate 0.35765810324277747 +116 5 model.num_filters 6.0 +116 5 loss.margin 0.7731046986178576 +116 5 negative_sampler.num_negs_per_pos 92.0 +116 5 training.batch_size 2.0 +116 6 model.embedding_dim 2.0 +116 6 model.hidden_dropout_rate 0.378912183897799 +116 6 model.num_filters 2.0 +116 6 loss.margin 6.147964381648611 +116 6 negative_sampler.num_negs_per_pos 34.0 +116 6 training.batch_size 0.0 +116 7 model.embedding_dim 2.0 +116 7 model.hidden_dropout_rate 0.1979794480199665 +116 7 model.num_filters 3.0 +116 7 loss.margin 2.2539118249846286 +116 7 negative_sampler.num_negs_per_pos 19.0 +116 7 training.batch_size 0.0 +116 8 model.embedding_dim 2.0 +116 8 model.hidden_dropout_rate 0.1562867855574128 +116 8 model.num_filters 5.0 +116 8 loss.margin 0.8142085664740935 +116 8 negative_sampler.num_negs_per_pos 46.0 +116 8 training.batch_size 2.0 +116 9 model.embedding_dim 0.0 +116 9 model.hidden_dropout_rate 0.3560572446820739 +116 9 model.num_filters 5.0 +116 9 loss.margin 6.157450975528436 +116 9 negative_sampler.num_negs_per_pos 89.0 +116 9 training.batch_size 1.0 +116 10 model.embedding_dim 1.0 +116 10 model.hidden_dropout_rate 0.23674439228050567 +116 10 model.num_filters 3.0 +116 10 loss.margin 3.004111531362305 +116 10 negative_sampler.num_negs_per_pos 22.0 +116 10 training.batch_size 2.0 +116 11 model.embedding_dim 0.0 +116 11 model.hidden_dropout_rate 0.32782242411463347 +116 11 model.num_filters 3.0 +116 11 loss.margin 2.988849154978369 +116 11 negative_sampler.num_negs_per_pos 78.0 +116 11 training.batch_size 0.0 +116 12 model.embedding_dim 0.0 +116 12 model.hidden_dropout_rate 0.30411864607397077 +116 12 model.num_filters 1.0 +116 12 loss.margin 9.217221170290284 +116 12 negative_sampler.num_negs_per_pos 27.0 +116 12 training.batch_size 1.0 +116 13 model.embedding_dim 2.0 +116 13 model.hidden_dropout_rate 0.28957935439366833 +116 13 model.num_filters 8.0 +116 13 loss.margin 9.749872072832448 +116 13 negative_sampler.num_negs_per_pos 67.0 +116 13 training.batch_size 0.0 +116 14 model.embedding_dim 2.0 +116 14 model.hidden_dropout_rate 0.4827764089883885 +116 14 model.num_filters 0.0 +116 14 loss.margin 6.271953735905457 +116 14 negative_sampler.num_negs_per_pos 74.0 +116 14 training.batch_size 2.0 +116 15 model.embedding_dim 2.0 +116 15 model.hidden_dropout_rate 0.365637258331737 +116 15 model.num_filters 0.0 +116 15 loss.margin 7.576562573448883 +116 15 negative_sampler.num_negs_per_pos 42.0 +116 15 training.batch_size 0.0 +116 16 model.embedding_dim 0.0 +116 16 model.hidden_dropout_rate 0.34449556332648634 +116 16 model.num_filters 7.0 +116 16 loss.margin 5.49185109733927 +116 16 negative_sampler.num_negs_per_pos 60.0 +116 16 training.batch_size 1.0 +116 17 model.embedding_dim 1.0 +116 17 model.hidden_dropout_rate 0.35906547318898374 +116 17 model.num_filters 7.0 +116 17 loss.margin 5.320930009802797 +116 17 negative_sampler.num_negs_per_pos 75.0 +116 17 training.batch_size 0.0 +116 18 model.embedding_dim 2.0 +116 18 model.hidden_dropout_rate 0.3506654629859107 +116 18 model.num_filters 3.0 +116 18 loss.margin 1.6228673066865187 +116 18 negative_sampler.num_negs_per_pos 15.0 +116 18 training.batch_size 2.0 +116 19 model.embedding_dim 1.0 +116 19 model.hidden_dropout_rate 0.18371414317108148 +116 19 model.num_filters 3.0 +116 19 loss.margin 1.367678405579647 +116 19 negative_sampler.num_negs_per_pos 32.0 +116 19 training.batch_size 1.0 +116 1 dataset """kinships""" +116 1 model """convkb""" +116 1 loss """marginranking""" +116 1 regularizer """no""" +116 1 optimizer """adadelta""" +116 1 training_loop """owa""" +116 1 negative_sampler """basic""" +116 1 evaluator """rankbased""" +116 2 dataset """kinships""" +116 2 model """convkb""" +116 2 loss """marginranking""" +116 2 regularizer """no""" +116 2 optimizer """adadelta""" +116 2 training_loop """owa""" +116 2 negative_sampler """basic""" +116 2 evaluator """rankbased""" +116 3 dataset """kinships""" +116 3 model """convkb""" +116 3 loss """marginranking""" +116 3 regularizer """no""" +116 3 optimizer """adadelta""" +116 3 training_loop """owa""" +116 3 negative_sampler """basic""" +116 3 evaluator """rankbased""" +116 4 dataset """kinships""" +116 4 model """convkb""" +116 4 loss """marginranking""" +116 4 regularizer """no""" +116 4 optimizer """adadelta""" +116 4 training_loop """owa""" +116 4 negative_sampler """basic""" +116 4 evaluator """rankbased""" +116 5 dataset """kinships""" +116 5 model """convkb""" +116 5 loss """marginranking""" +116 5 regularizer """no""" +116 5 optimizer """adadelta""" +116 5 training_loop """owa""" +116 5 negative_sampler """basic""" +116 5 evaluator """rankbased""" +116 6 dataset """kinships""" +116 6 model """convkb""" +116 6 loss """marginranking""" +116 6 regularizer """no""" +116 6 optimizer """adadelta""" +116 6 training_loop """owa""" +116 6 negative_sampler """basic""" +116 6 evaluator """rankbased""" +116 7 dataset """kinships""" +116 7 model """convkb""" +116 7 loss """marginranking""" +116 7 regularizer """no""" +116 7 optimizer """adadelta""" +116 7 training_loop """owa""" +116 7 negative_sampler """basic""" +116 7 evaluator """rankbased""" +116 8 dataset """kinships""" +116 8 model """convkb""" +116 8 loss """marginranking""" +116 8 regularizer """no""" +116 8 optimizer """adadelta""" +116 8 training_loop """owa""" +116 8 negative_sampler """basic""" +116 8 evaluator """rankbased""" +116 9 dataset """kinships""" +116 9 model """convkb""" +116 9 loss """marginranking""" +116 9 regularizer """no""" +116 9 optimizer """adadelta""" +116 9 training_loop """owa""" +116 9 negative_sampler """basic""" +116 9 evaluator """rankbased""" +116 10 dataset """kinships""" +116 10 model """convkb""" +116 10 loss """marginranking""" +116 10 regularizer """no""" +116 10 optimizer """adadelta""" +116 10 training_loop """owa""" +116 10 negative_sampler """basic""" +116 10 evaluator """rankbased""" +116 11 dataset """kinships""" +116 11 model """convkb""" +116 11 loss """marginranking""" +116 11 regularizer """no""" +116 11 optimizer """adadelta""" +116 11 training_loop """owa""" +116 11 negative_sampler """basic""" +116 11 evaluator """rankbased""" +116 12 dataset """kinships""" +116 12 model """convkb""" +116 12 loss """marginranking""" +116 12 regularizer """no""" +116 12 optimizer """adadelta""" +116 12 training_loop """owa""" +116 12 negative_sampler """basic""" +116 12 evaluator """rankbased""" +116 13 dataset """kinships""" +116 13 model """convkb""" +116 13 loss """marginranking""" +116 13 regularizer """no""" +116 13 optimizer """adadelta""" +116 13 training_loop """owa""" +116 13 negative_sampler """basic""" +116 13 evaluator """rankbased""" +116 14 dataset """kinships""" +116 14 model """convkb""" +116 14 loss """marginranking""" +116 14 regularizer """no""" +116 14 optimizer """adadelta""" +116 14 training_loop """owa""" +116 14 negative_sampler """basic""" +116 14 evaluator """rankbased""" +116 15 dataset """kinships""" +116 15 model """convkb""" +116 15 loss """marginranking""" +116 15 regularizer """no""" +116 15 optimizer """adadelta""" +116 15 training_loop """owa""" +116 15 negative_sampler """basic""" +116 15 evaluator """rankbased""" +116 16 dataset """kinships""" +116 16 model """convkb""" +116 16 loss """marginranking""" +116 16 regularizer """no""" +116 16 optimizer """adadelta""" +116 16 training_loop """owa""" +116 16 negative_sampler """basic""" +116 16 evaluator """rankbased""" +116 17 dataset """kinships""" +116 17 model """convkb""" +116 17 loss """marginranking""" +116 17 regularizer """no""" +116 17 optimizer """adadelta""" +116 17 training_loop """owa""" +116 17 negative_sampler """basic""" +116 17 evaluator """rankbased""" +116 18 dataset """kinships""" +116 18 model """convkb""" +116 18 loss """marginranking""" +116 18 regularizer """no""" +116 18 optimizer """adadelta""" +116 18 training_loop """owa""" +116 18 negative_sampler """basic""" +116 18 evaluator """rankbased""" +116 19 dataset """kinships""" +116 19 model """convkb""" +116 19 loss """marginranking""" +116 19 regularizer """no""" +116 19 optimizer """adadelta""" +116 19 training_loop """owa""" +116 19 negative_sampler """basic""" +116 19 evaluator """rankbased""" +117 1 model.embedding_dim 0.0 +117 1 model.hidden_dropout_rate 0.2451220444319434 +117 1 model.num_filters 8.0 +117 1 loss.margin 3.30525339063314 +117 1 negative_sampler.num_negs_per_pos 20.0 +117 1 training.batch_size 1.0 +117 2 model.embedding_dim 2.0 +117 2 model.hidden_dropout_rate 0.3168162148982244 +117 2 model.num_filters 1.0 +117 2 loss.margin 3.52542370157728 +117 2 negative_sampler.num_negs_per_pos 70.0 +117 2 training.batch_size 2.0 +117 3 model.embedding_dim 2.0 +117 3 model.hidden_dropout_rate 0.1976963348675525 +117 3 model.num_filters 1.0 +117 3 loss.margin 1.0892716554726656 +117 3 negative_sampler.num_negs_per_pos 26.0 +117 3 training.batch_size 1.0 +117 4 model.embedding_dim 2.0 +117 4 model.hidden_dropout_rate 0.47355360901228505 +117 4 model.num_filters 8.0 +117 4 loss.margin 2.5255627053112137 +117 4 negative_sampler.num_negs_per_pos 94.0 +117 4 training.batch_size 2.0 +117 5 model.embedding_dim 2.0 +117 5 model.hidden_dropout_rate 0.4464338719131429 +117 5 model.num_filters 7.0 +117 5 loss.margin 8.836850556434193 +117 5 negative_sampler.num_negs_per_pos 32.0 +117 5 training.batch_size 1.0 +117 6 model.embedding_dim 1.0 +117 6 model.hidden_dropout_rate 0.13810991607427328 +117 6 model.num_filters 4.0 +117 6 loss.margin 9.79345651960707 +117 6 negative_sampler.num_negs_per_pos 83.0 +117 6 training.batch_size 0.0 +117 7 model.embedding_dim 1.0 +117 7 model.hidden_dropout_rate 0.13973260012338495 +117 7 model.num_filters 3.0 +117 7 loss.margin 7.423828044707866 +117 7 negative_sampler.num_negs_per_pos 48.0 +117 7 training.batch_size 0.0 +117 8 model.embedding_dim 2.0 +117 8 model.hidden_dropout_rate 0.205367270090961 +117 8 model.num_filters 5.0 +117 8 loss.margin 1.8657878947854523 +117 8 negative_sampler.num_negs_per_pos 55.0 +117 8 training.batch_size 1.0 +117 9 model.embedding_dim 2.0 +117 9 model.hidden_dropout_rate 0.24107963320440537 +117 9 model.num_filters 6.0 +117 9 loss.margin 4.16835675820753 +117 9 negative_sampler.num_negs_per_pos 13.0 +117 9 training.batch_size 1.0 +117 10 model.embedding_dim 0.0 +117 10 model.hidden_dropout_rate 0.39509072075142515 +117 10 model.num_filters 4.0 +117 10 loss.margin 6.481329060649958 +117 10 negative_sampler.num_negs_per_pos 33.0 +117 10 training.batch_size 2.0 +117 11 model.embedding_dim 1.0 +117 11 model.hidden_dropout_rate 0.21709458052942857 +117 11 model.num_filters 4.0 +117 11 loss.margin 4.527724119256166 +117 11 negative_sampler.num_negs_per_pos 88.0 +117 11 training.batch_size 1.0 +117 12 model.embedding_dim 0.0 +117 12 model.hidden_dropout_rate 0.255616256542903 +117 12 model.num_filters 6.0 +117 12 loss.margin 3.7677698001830238 +117 12 negative_sampler.num_negs_per_pos 96.0 +117 12 training.batch_size 2.0 +117 13 model.embedding_dim 2.0 +117 13 model.hidden_dropout_rate 0.3755245736004017 +117 13 model.num_filters 4.0 +117 13 loss.margin 4.406021261616788 +117 13 negative_sampler.num_negs_per_pos 43.0 +117 13 training.batch_size 2.0 +117 14 model.embedding_dim 0.0 +117 14 model.hidden_dropout_rate 0.4424917052060996 +117 14 model.num_filters 8.0 +117 14 loss.margin 6.010836529824592 +117 14 negative_sampler.num_negs_per_pos 76.0 +117 14 training.batch_size 0.0 +117 15 model.embedding_dim 0.0 +117 15 model.hidden_dropout_rate 0.38862362433958575 +117 15 model.num_filters 8.0 +117 15 loss.margin 5.292518952706904 +117 15 negative_sampler.num_negs_per_pos 91.0 +117 15 training.batch_size 0.0 +117 16 model.embedding_dim 1.0 +117 16 model.hidden_dropout_rate 0.3094311059339936 +117 16 model.num_filters 7.0 +117 16 loss.margin 3.404918228683864 +117 16 negative_sampler.num_negs_per_pos 26.0 +117 16 training.batch_size 1.0 +117 17 model.embedding_dim 1.0 +117 17 model.hidden_dropout_rate 0.31966625493226647 +117 17 model.num_filters 0.0 +117 17 loss.margin 2.527495387812491 +117 17 negative_sampler.num_negs_per_pos 23.0 +117 17 training.batch_size 2.0 +117 18 model.embedding_dim 1.0 +117 18 model.hidden_dropout_rate 0.46390855283288246 +117 18 model.num_filters 2.0 +117 18 loss.margin 6.283041030672062 +117 18 negative_sampler.num_negs_per_pos 29.0 +117 18 training.batch_size 1.0 +117 19 model.embedding_dim 0.0 +117 19 model.hidden_dropout_rate 0.4852224606146207 +117 19 model.num_filters 5.0 +117 19 loss.margin 6.2765483983776855 +117 19 negative_sampler.num_negs_per_pos 7.0 +117 19 training.batch_size 1.0 +117 20 model.embedding_dim 0.0 +117 20 model.hidden_dropout_rate 0.2535510122065886 +117 20 model.num_filters 8.0 +117 20 loss.margin 3.3767201361832164 +117 20 negative_sampler.num_negs_per_pos 24.0 +117 20 training.batch_size 1.0 +117 21 model.embedding_dim 2.0 +117 21 model.hidden_dropout_rate 0.38743216300787864 +117 21 model.num_filters 6.0 +117 21 loss.margin 3.411324016985535 +117 21 negative_sampler.num_negs_per_pos 27.0 +117 21 training.batch_size 2.0 +117 22 model.embedding_dim 1.0 +117 22 model.hidden_dropout_rate 0.18180436896265106 +117 22 model.num_filters 0.0 +117 22 loss.margin 1.2255845814856232 +117 22 negative_sampler.num_negs_per_pos 41.0 +117 22 training.batch_size 1.0 +117 23 model.embedding_dim 0.0 +117 23 model.hidden_dropout_rate 0.49074794846308467 +117 23 model.num_filters 4.0 +117 23 loss.margin 1.1860393249574686 +117 23 negative_sampler.num_negs_per_pos 38.0 +117 23 training.batch_size 1.0 +117 24 model.embedding_dim 1.0 +117 24 model.hidden_dropout_rate 0.2049664572665195 +117 24 model.num_filters 3.0 +117 24 loss.margin 2.4025502451504828 +117 24 negative_sampler.num_negs_per_pos 11.0 +117 24 training.batch_size 2.0 +117 25 model.embedding_dim 1.0 +117 25 model.hidden_dropout_rate 0.34023887131147257 +117 25 model.num_filters 2.0 +117 25 loss.margin 8.104289426871048 +117 25 negative_sampler.num_negs_per_pos 99.0 +117 25 training.batch_size 0.0 +117 26 model.embedding_dim 0.0 +117 26 model.hidden_dropout_rate 0.4782117996084553 +117 26 model.num_filters 6.0 +117 26 loss.margin 8.97237722655583 +117 26 negative_sampler.num_negs_per_pos 59.0 +117 26 training.batch_size 1.0 +117 27 model.embedding_dim 2.0 +117 27 model.hidden_dropout_rate 0.32738757778437944 +117 27 model.num_filters 7.0 +117 27 loss.margin 1.1839636241167788 +117 27 negative_sampler.num_negs_per_pos 56.0 +117 27 training.batch_size 2.0 +117 28 model.embedding_dim 1.0 +117 28 model.hidden_dropout_rate 0.37390609595378566 +117 28 model.num_filters 7.0 +117 28 loss.margin 2.567734493902849 +117 28 negative_sampler.num_negs_per_pos 94.0 +117 28 training.batch_size 0.0 +117 29 model.embedding_dim 0.0 +117 29 model.hidden_dropout_rate 0.2717863254057321 +117 29 model.num_filters 2.0 +117 29 loss.margin 6.073881139776103 +117 29 negative_sampler.num_negs_per_pos 69.0 +117 29 training.batch_size 0.0 +117 30 model.embedding_dim 0.0 +117 30 model.hidden_dropout_rate 0.19195164493355119 +117 30 model.num_filters 3.0 +117 30 loss.margin 7.428669686559998 +117 30 negative_sampler.num_negs_per_pos 2.0 +117 30 training.batch_size 0.0 +117 31 model.embedding_dim 0.0 +117 31 model.hidden_dropout_rate 0.27491945742732804 +117 31 model.num_filters 3.0 +117 31 loss.margin 4.577049173384928 +117 31 negative_sampler.num_negs_per_pos 73.0 +117 31 training.batch_size 1.0 +117 32 model.embedding_dim 1.0 +117 32 model.hidden_dropout_rate 0.36124614779574493 +117 32 model.num_filters 3.0 +117 32 loss.margin 3.0246432363144278 +117 32 negative_sampler.num_negs_per_pos 36.0 +117 32 training.batch_size 2.0 +117 33 model.embedding_dim 1.0 +117 33 model.hidden_dropout_rate 0.1882992411492942 +117 33 model.num_filters 8.0 +117 33 loss.margin 2.7412109492091496 +117 33 negative_sampler.num_negs_per_pos 52.0 +117 33 training.batch_size 1.0 +117 34 model.embedding_dim 2.0 +117 34 model.hidden_dropout_rate 0.3271511118223285 +117 34 model.num_filters 6.0 +117 34 loss.margin 7.792691999294678 +117 34 negative_sampler.num_negs_per_pos 82.0 +117 34 training.batch_size 0.0 +117 35 model.embedding_dim 2.0 +117 35 model.hidden_dropout_rate 0.3649545659657625 +117 35 model.num_filters 0.0 +117 35 loss.margin 8.457176787991484 +117 35 negative_sampler.num_negs_per_pos 91.0 +117 35 training.batch_size 1.0 +117 36 model.embedding_dim 2.0 +117 36 model.hidden_dropout_rate 0.377795942829538 +117 36 model.num_filters 4.0 +117 36 loss.margin 5.268911774264447 +117 36 negative_sampler.num_negs_per_pos 4.0 +117 36 training.batch_size 0.0 +117 37 model.embedding_dim 0.0 +117 37 model.hidden_dropout_rate 0.4996558279580675 +117 37 model.num_filters 7.0 +117 37 loss.margin 4.768626148536293 +117 37 negative_sampler.num_negs_per_pos 12.0 +117 37 training.batch_size 2.0 +117 38 model.embedding_dim 1.0 +117 38 model.hidden_dropout_rate 0.273840815890958 +117 38 model.num_filters 3.0 +117 38 loss.margin 1.1028843826650963 +117 38 negative_sampler.num_negs_per_pos 34.0 +117 38 training.batch_size 0.0 +117 39 model.embedding_dim 0.0 +117 39 model.hidden_dropout_rate 0.21363266889188504 +117 39 model.num_filters 7.0 +117 39 loss.margin 6.3947617885213965 +117 39 negative_sampler.num_negs_per_pos 7.0 +117 39 training.batch_size 0.0 +117 40 model.embedding_dim 2.0 +117 40 model.hidden_dropout_rate 0.17266711132757773 +117 40 model.num_filters 5.0 +117 40 loss.margin 4.967103468369948 +117 40 negative_sampler.num_negs_per_pos 47.0 +117 40 training.batch_size 2.0 +117 41 model.embedding_dim 2.0 +117 41 model.hidden_dropout_rate 0.13252931894303238 +117 41 model.num_filters 8.0 +117 41 loss.margin 3.0685769070351965 +117 41 negative_sampler.num_negs_per_pos 90.0 +117 41 training.batch_size 0.0 +117 1 dataset """kinships""" +117 1 model """convkb""" +117 1 loss """marginranking""" +117 1 regularizer """no""" +117 1 optimizer """adadelta""" +117 1 training_loop """owa""" +117 1 negative_sampler """basic""" +117 1 evaluator """rankbased""" +117 2 dataset """kinships""" +117 2 model """convkb""" +117 2 loss """marginranking""" +117 2 regularizer """no""" +117 2 optimizer """adadelta""" +117 2 training_loop """owa""" +117 2 negative_sampler """basic""" +117 2 evaluator """rankbased""" +117 3 dataset """kinships""" +117 3 model """convkb""" +117 3 loss """marginranking""" +117 3 regularizer """no""" +117 3 optimizer """adadelta""" +117 3 training_loop """owa""" +117 3 negative_sampler """basic""" +117 3 evaluator """rankbased""" +117 4 dataset """kinships""" +117 4 model """convkb""" +117 4 loss """marginranking""" +117 4 regularizer """no""" +117 4 optimizer """adadelta""" +117 4 training_loop """owa""" +117 4 negative_sampler """basic""" +117 4 evaluator """rankbased""" +117 5 dataset """kinships""" +117 5 model """convkb""" +117 5 loss """marginranking""" +117 5 regularizer """no""" +117 5 optimizer """adadelta""" +117 5 training_loop """owa""" +117 5 negative_sampler """basic""" +117 5 evaluator """rankbased""" +117 6 dataset """kinships""" +117 6 model """convkb""" +117 6 loss """marginranking""" +117 6 regularizer """no""" +117 6 optimizer """adadelta""" +117 6 training_loop """owa""" +117 6 negative_sampler """basic""" +117 6 evaluator """rankbased""" +117 7 dataset """kinships""" +117 7 model """convkb""" +117 7 loss """marginranking""" +117 7 regularizer """no""" +117 7 optimizer """adadelta""" +117 7 training_loop """owa""" +117 7 negative_sampler """basic""" +117 7 evaluator """rankbased""" +117 8 dataset """kinships""" +117 8 model """convkb""" +117 8 loss """marginranking""" +117 8 regularizer """no""" +117 8 optimizer """adadelta""" +117 8 training_loop """owa""" +117 8 negative_sampler """basic""" +117 8 evaluator """rankbased""" +117 9 dataset """kinships""" +117 9 model """convkb""" +117 9 loss """marginranking""" +117 9 regularizer """no""" +117 9 optimizer """adadelta""" +117 9 training_loop """owa""" +117 9 negative_sampler """basic""" +117 9 evaluator """rankbased""" +117 10 dataset """kinships""" +117 10 model """convkb""" +117 10 loss """marginranking""" +117 10 regularizer """no""" +117 10 optimizer """adadelta""" +117 10 training_loop """owa""" +117 10 negative_sampler """basic""" +117 10 evaluator """rankbased""" +117 11 dataset """kinships""" +117 11 model """convkb""" +117 11 loss """marginranking""" +117 11 regularizer """no""" +117 11 optimizer """adadelta""" +117 11 training_loop """owa""" +117 11 negative_sampler """basic""" +117 11 evaluator """rankbased""" +117 12 dataset """kinships""" +117 12 model """convkb""" +117 12 loss """marginranking""" +117 12 regularizer """no""" +117 12 optimizer """adadelta""" +117 12 training_loop """owa""" +117 12 negative_sampler """basic""" +117 12 evaluator """rankbased""" +117 13 dataset """kinships""" +117 13 model """convkb""" +117 13 loss """marginranking""" +117 13 regularizer """no""" +117 13 optimizer """adadelta""" +117 13 training_loop """owa""" +117 13 negative_sampler """basic""" +117 13 evaluator """rankbased""" +117 14 dataset """kinships""" +117 14 model """convkb""" +117 14 loss """marginranking""" +117 14 regularizer """no""" +117 14 optimizer """adadelta""" +117 14 training_loop """owa""" +117 14 negative_sampler """basic""" +117 14 evaluator """rankbased""" +117 15 dataset """kinships""" +117 15 model """convkb""" +117 15 loss """marginranking""" +117 15 regularizer """no""" +117 15 optimizer """adadelta""" +117 15 training_loop """owa""" +117 15 negative_sampler """basic""" +117 15 evaluator """rankbased""" +117 16 dataset """kinships""" +117 16 model """convkb""" +117 16 loss """marginranking""" +117 16 regularizer """no""" +117 16 optimizer """adadelta""" +117 16 training_loop """owa""" +117 16 negative_sampler """basic""" +117 16 evaluator """rankbased""" +117 17 dataset """kinships""" +117 17 model """convkb""" +117 17 loss """marginranking""" +117 17 regularizer """no""" +117 17 optimizer """adadelta""" +117 17 training_loop """owa""" +117 17 negative_sampler """basic""" +117 17 evaluator """rankbased""" +117 18 dataset """kinships""" +117 18 model """convkb""" +117 18 loss """marginranking""" +117 18 regularizer """no""" +117 18 optimizer """adadelta""" +117 18 training_loop """owa""" +117 18 negative_sampler """basic""" +117 18 evaluator """rankbased""" +117 19 dataset """kinships""" +117 19 model """convkb""" +117 19 loss """marginranking""" +117 19 regularizer """no""" +117 19 optimizer """adadelta""" +117 19 training_loop """owa""" +117 19 negative_sampler """basic""" +117 19 evaluator """rankbased""" +117 20 dataset """kinships""" +117 20 model """convkb""" +117 20 loss """marginranking""" +117 20 regularizer """no""" +117 20 optimizer """adadelta""" +117 20 training_loop """owa""" +117 20 negative_sampler """basic""" +117 20 evaluator """rankbased""" +117 21 dataset """kinships""" +117 21 model """convkb""" +117 21 loss """marginranking""" +117 21 regularizer """no""" +117 21 optimizer """adadelta""" +117 21 training_loop """owa""" +117 21 negative_sampler """basic""" +117 21 evaluator """rankbased""" +117 22 dataset """kinships""" +117 22 model """convkb""" +117 22 loss """marginranking""" +117 22 regularizer """no""" +117 22 optimizer """adadelta""" +117 22 training_loop """owa""" +117 22 negative_sampler """basic""" +117 22 evaluator """rankbased""" +117 23 dataset """kinships""" +117 23 model """convkb""" +117 23 loss """marginranking""" +117 23 regularizer """no""" +117 23 optimizer """adadelta""" +117 23 training_loop """owa""" +117 23 negative_sampler """basic""" +117 23 evaluator """rankbased""" +117 24 dataset """kinships""" +117 24 model """convkb""" +117 24 loss """marginranking""" +117 24 regularizer """no""" +117 24 optimizer """adadelta""" +117 24 training_loop """owa""" +117 24 negative_sampler """basic""" +117 24 evaluator """rankbased""" +117 25 dataset """kinships""" +117 25 model """convkb""" +117 25 loss """marginranking""" +117 25 regularizer """no""" +117 25 optimizer """adadelta""" +117 25 training_loop """owa""" +117 25 negative_sampler """basic""" +117 25 evaluator """rankbased""" +117 26 dataset """kinships""" +117 26 model """convkb""" +117 26 loss """marginranking""" +117 26 regularizer """no""" +117 26 optimizer """adadelta""" +117 26 training_loop """owa""" +117 26 negative_sampler """basic""" +117 26 evaluator """rankbased""" +117 27 dataset """kinships""" +117 27 model """convkb""" +117 27 loss """marginranking""" +117 27 regularizer """no""" +117 27 optimizer """adadelta""" +117 27 training_loop """owa""" +117 27 negative_sampler """basic""" +117 27 evaluator """rankbased""" +117 28 dataset """kinships""" +117 28 model """convkb""" +117 28 loss """marginranking""" +117 28 regularizer """no""" +117 28 optimizer """adadelta""" +117 28 training_loop """owa""" +117 28 negative_sampler """basic""" +117 28 evaluator """rankbased""" +117 29 dataset """kinships""" +117 29 model """convkb""" +117 29 loss """marginranking""" +117 29 regularizer """no""" +117 29 optimizer """adadelta""" +117 29 training_loop """owa""" +117 29 negative_sampler """basic""" +117 29 evaluator """rankbased""" +117 30 dataset """kinships""" +117 30 model """convkb""" +117 30 loss """marginranking""" +117 30 regularizer """no""" +117 30 optimizer """adadelta""" +117 30 training_loop """owa""" +117 30 negative_sampler """basic""" +117 30 evaluator """rankbased""" +117 31 dataset """kinships""" +117 31 model """convkb""" +117 31 loss """marginranking""" +117 31 regularizer """no""" +117 31 optimizer """adadelta""" +117 31 training_loop """owa""" +117 31 negative_sampler """basic""" +117 31 evaluator """rankbased""" +117 32 dataset """kinships""" +117 32 model """convkb""" +117 32 loss """marginranking""" +117 32 regularizer """no""" +117 32 optimizer """adadelta""" +117 32 training_loop """owa""" +117 32 negative_sampler """basic""" +117 32 evaluator """rankbased""" +117 33 dataset """kinships""" +117 33 model """convkb""" +117 33 loss """marginranking""" +117 33 regularizer """no""" +117 33 optimizer """adadelta""" +117 33 training_loop """owa""" +117 33 negative_sampler """basic""" +117 33 evaluator """rankbased""" +117 34 dataset """kinships""" +117 34 model """convkb""" +117 34 loss """marginranking""" +117 34 regularizer """no""" +117 34 optimizer """adadelta""" +117 34 training_loop """owa""" +117 34 negative_sampler """basic""" +117 34 evaluator """rankbased""" +117 35 dataset """kinships""" +117 35 model """convkb""" +117 35 loss """marginranking""" +117 35 regularizer """no""" +117 35 optimizer """adadelta""" +117 35 training_loop """owa""" +117 35 negative_sampler """basic""" +117 35 evaluator """rankbased""" +117 36 dataset """kinships""" +117 36 model """convkb""" +117 36 loss """marginranking""" +117 36 regularizer """no""" +117 36 optimizer """adadelta""" +117 36 training_loop """owa""" +117 36 negative_sampler """basic""" +117 36 evaluator """rankbased""" +117 37 dataset """kinships""" +117 37 model """convkb""" +117 37 loss """marginranking""" +117 37 regularizer """no""" +117 37 optimizer """adadelta""" +117 37 training_loop """owa""" +117 37 negative_sampler """basic""" +117 37 evaluator """rankbased""" +117 38 dataset """kinships""" +117 38 model """convkb""" +117 38 loss """marginranking""" +117 38 regularizer """no""" +117 38 optimizer """adadelta""" +117 38 training_loop """owa""" +117 38 negative_sampler """basic""" +117 38 evaluator """rankbased""" +117 39 dataset """kinships""" +117 39 model """convkb""" +117 39 loss """marginranking""" +117 39 regularizer """no""" +117 39 optimizer """adadelta""" +117 39 training_loop """owa""" +117 39 negative_sampler """basic""" +117 39 evaluator """rankbased""" +117 40 dataset """kinships""" +117 40 model """convkb""" +117 40 loss """marginranking""" +117 40 regularizer """no""" +117 40 optimizer """adadelta""" +117 40 training_loop """owa""" +117 40 negative_sampler """basic""" +117 40 evaluator """rankbased""" +117 41 dataset """kinships""" +117 41 model """convkb""" +117 41 loss """marginranking""" +117 41 regularizer """no""" +117 41 optimizer """adadelta""" +117 41 training_loop """owa""" +117 41 negative_sampler """basic""" +117 41 evaluator """rankbased""" +118 1 model.embedding_dim 0.0 +118 1 model.hidden_dropout_rate 0.4176200563541085 +118 1 model.num_filters 3.0 +118 1 loss.margin 1.5709102096743537 +118 1 loss.adversarial_temperature 0.11920295184157376 +118 1 negative_sampler.num_negs_per_pos 2.0 +118 1 training.batch_size 1.0 +118 2 model.embedding_dim 1.0 +118 2 model.hidden_dropout_rate 0.3283812481566366 +118 2 model.num_filters 4.0 +118 2 loss.margin 24.28327499650009 +118 2 loss.adversarial_temperature 0.6497025441644607 +118 2 negative_sampler.num_negs_per_pos 16.0 +118 2 training.batch_size 0.0 +118 3 model.embedding_dim 1.0 +118 3 model.hidden_dropout_rate 0.32979788427073875 +118 3 model.num_filters 8.0 +118 3 loss.margin 5.136747129846882 +118 3 loss.adversarial_temperature 0.29140686450280817 +118 3 negative_sampler.num_negs_per_pos 78.0 +118 3 training.batch_size 2.0 +118 4 model.embedding_dim 0.0 +118 4 model.hidden_dropout_rate 0.1684000936847609 +118 4 model.num_filters 5.0 +118 4 loss.margin 28.464404751276255 +118 4 loss.adversarial_temperature 0.8530877452700282 +118 4 negative_sampler.num_negs_per_pos 55.0 +118 4 training.batch_size 2.0 +118 5 model.embedding_dim 1.0 +118 5 model.hidden_dropout_rate 0.39260079516538493 +118 5 model.num_filters 0.0 +118 5 loss.margin 7.6625262991207235 +118 5 loss.adversarial_temperature 0.5846418787252446 +118 5 negative_sampler.num_negs_per_pos 70.0 +118 5 training.batch_size 2.0 +118 6 model.embedding_dim 2.0 +118 6 model.hidden_dropout_rate 0.3359516887401752 +118 6 model.num_filters 4.0 +118 6 loss.margin 22.74292521259088 +118 6 loss.adversarial_temperature 0.3478272808965538 +118 6 negative_sampler.num_negs_per_pos 54.0 +118 6 training.batch_size 2.0 +118 7 model.embedding_dim 2.0 +118 7 model.hidden_dropout_rate 0.31351239633189054 +118 7 model.num_filters 9.0 +118 7 loss.margin 8.605407659426863 +118 7 loss.adversarial_temperature 0.6139846808327087 +118 7 negative_sampler.num_negs_per_pos 64.0 +118 7 training.batch_size 0.0 +118 8 model.embedding_dim 0.0 +118 8 model.hidden_dropout_rate 0.14607635396790172 +118 8 model.num_filters 6.0 +118 8 loss.margin 23.406663479106662 +118 8 loss.adversarial_temperature 0.6543896366434949 +118 8 negative_sampler.num_negs_per_pos 85.0 +118 8 training.batch_size 1.0 +118 9 model.embedding_dim 0.0 +118 9 model.hidden_dropout_rate 0.2866497526912589 +118 9 model.num_filters 5.0 +118 9 loss.margin 8.492626662542412 +118 9 loss.adversarial_temperature 0.6007760528699182 +118 9 negative_sampler.num_negs_per_pos 48.0 +118 9 training.batch_size 1.0 +118 10 model.embedding_dim 1.0 +118 10 model.hidden_dropout_rate 0.4185295083345999 +118 10 model.num_filters 0.0 +118 10 loss.margin 17.130226269928162 +118 10 loss.adversarial_temperature 0.9860470053411013 +118 10 negative_sampler.num_negs_per_pos 81.0 +118 10 training.batch_size 1.0 +118 11 model.embedding_dim 2.0 +118 11 model.hidden_dropout_rate 0.11826029262620806 +118 11 model.num_filters 8.0 +118 11 loss.margin 4.519305736387391 +118 11 loss.adversarial_temperature 0.43337376382690684 +118 11 negative_sampler.num_negs_per_pos 29.0 +118 11 training.batch_size 0.0 +118 12 model.embedding_dim 2.0 +118 12 model.hidden_dropout_rate 0.15159952897975743 +118 12 model.num_filters 2.0 +118 12 loss.margin 18.666503342779393 +118 12 loss.adversarial_temperature 0.9296744471831052 +118 12 negative_sampler.num_negs_per_pos 77.0 +118 12 training.batch_size 1.0 +118 13 model.embedding_dim 2.0 +118 13 model.hidden_dropout_rate 0.35296417364565624 +118 13 model.num_filters 6.0 +118 13 loss.margin 3.476106958751125 +118 13 loss.adversarial_temperature 0.3762212630650049 +118 13 negative_sampler.num_negs_per_pos 53.0 +118 13 training.batch_size 2.0 +118 1 dataset """kinships""" +118 1 model """convkb""" +118 1 loss """nssa""" +118 1 regularizer """no""" +118 1 optimizer """adadelta""" +118 1 training_loop """owa""" +118 1 negative_sampler """basic""" +118 1 evaluator """rankbased""" +118 2 dataset """kinships""" +118 2 model """convkb""" +118 2 loss """nssa""" +118 2 regularizer """no""" +118 2 optimizer """adadelta""" +118 2 training_loop """owa""" +118 2 negative_sampler """basic""" +118 2 evaluator """rankbased""" +118 3 dataset """kinships""" +118 3 model """convkb""" +118 3 loss """nssa""" +118 3 regularizer """no""" +118 3 optimizer """adadelta""" +118 3 training_loop """owa""" +118 3 negative_sampler """basic""" +118 3 evaluator """rankbased""" +118 4 dataset """kinships""" +118 4 model """convkb""" +118 4 loss """nssa""" +118 4 regularizer """no""" +118 4 optimizer """adadelta""" +118 4 training_loop """owa""" +118 4 negative_sampler """basic""" +118 4 evaluator """rankbased""" +118 5 dataset """kinships""" +118 5 model """convkb""" +118 5 loss """nssa""" +118 5 regularizer """no""" +118 5 optimizer """adadelta""" +118 5 training_loop """owa""" +118 5 negative_sampler """basic""" +118 5 evaluator """rankbased""" +118 6 dataset """kinships""" +118 6 model """convkb""" +118 6 loss """nssa""" +118 6 regularizer """no""" +118 6 optimizer """adadelta""" +118 6 training_loop """owa""" +118 6 negative_sampler """basic""" +118 6 evaluator """rankbased""" +118 7 dataset """kinships""" +118 7 model """convkb""" +118 7 loss """nssa""" +118 7 regularizer """no""" +118 7 optimizer """adadelta""" +118 7 training_loop """owa""" +118 7 negative_sampler """basic""" +118 7 evaluator """rankbased""" +118 8 dataset """kinships""" +118 8 model """convkb""" +118 8 loss """nssa""" +118 8 regularizer """no""" +118 8 optimizer """adadelta""" +118 8 training_loop """owa""" +118 8 negative_sampler """basic""" +118 8 evaluator """rankbased""" +118 9 dataset """kinships""" +118 9 model """convkb""" +118 9 loss """nssa""" +118 9 regularizer """no""" +118 9 optimizer """adadelta""" +118 9 training_loop """owa""" +118 9 negative_sampler """basic""" +118 9 evaluator """rankbased""" +118 10 dataset """kinships""" +118 10 model """convkb""" +118 10 loss """nssa""" +118 10 regularizer """no""" +118 10 optimizer """adadelta""" +118 10 training_loop """owa""" +118 10 negative_sampler """basic""" +118 10 evaluator """rankbased""" +118 11 dataset """kinships""" +118 11 model """convkb""" +118 11 loss """nssa""" +118 11 regularizer """no""" +118 11 optimizer """adadelta""" +118 11 training_loop """owa""" +118 11 negative_sampler """basic""" +118 11 evaluator """rankbased""" +118 12 dataset """kinships""" +118 12 model """convkb""" +118 12 loss """nssa""" +118 12 regularizer """no""" +118 12 optimizer """adadelta""" +118 12 training_loop """owa""" +118 12 negative_sampler """basic""" +118 12 evaluator """rankbased""" +118 13 dataset """kinships""" +118 13 model """convkb""" +118 13 loss """nssa""" +118 13 regularizer """no""" +118 13 optimizer """adadelta""" +118 13 training_loop """owa""" +118 13 negative_sampler """basic""" +118 13 evaluator """rankbased""" +119 1 model.embedding_dim 0.0 +119 1 model.hidden_dropout_rate 0.47270543464610737 +119 1 model.num_filters 2.0 +119 1 loss.margin 20.08959847280728 +119 1 loss.adversarial_temperature 0.3406017045769447 +119 1 negative_sampler.num_negs_per_pos 26.0 +119 1 training.batch_size 2.0 +119 2 model.embedding_dim 1.0 +119 2 model.hidden_dropout_rate 0.2972174357665033 +119 2 model.num_filters 9.0 +119 2 loss.margin 29.858513658335713 +119 2 loss.adversarial_temperature 0.451169355874789 +119 2 negative_sampler.num_negs_per_pos 43.0 +119 2 training.batch_size 1.0 +119 3 model.embedding_dim 0.0 +119 3 model.hidden_dropout_rate 0.2773810544432497 +119 3 model.num_filters 4.0 +119 3 loss.margin 28.55943939636855 +119 3 loss.adversarial_temperature 0.5191117994705643 +119 3 negative_sampler.num_negs_per_pos 97.0 +119 3 training.batch_size 1.0 +119 4 model.embedding_dim 1.0 +119 4 model.hidden_dropout_rate 0.3918204514534054 +119 4 model.num_filters 1.0 +119 4 loss.margin 3.916481235417765 +119 4 loss.adversarial_temperature 0.34876772890582797 +119 4 negative_sampler.num_negs_per_pos 53.0 +119 4 training.batch_size 0.0 +119 5 model.embedding_dim 2.0 +119 5 model.hidden_dropout_rate 0.4142304743805326 +119 5 model.num_filters 4.0 +119 5 loss.margin 6.986889910546066 +119 5 loss.adversarial_temperature 0.5061832861861375 +119 5 negative_sampler.num_negs_per_pos 92.0 +119 5 training.batch_size 0.0 +119 6 model.embedding_dim 0.0 +119 6 model.hidden_dropout_rate 0.45233309652079334 +119 6 model.num_filters 1.0 +119 6 loss.margin 3.5935754224957406 +119 6 loss.adversarial_temperature 0.2624970781621472 +119 6 negative_sampler.num_negs_per_pos 91.0 +119 6 training.batch_size 0.0 +119 7 model.embedding_dim 2.0 +119 7 model.hidden_dropout_rate 0.14832477194134674 +119 7 model.num_filters 5.0 +119 7 loss.margin 29.907246170335803 +119 7 loss.adversarial_temperature 0.9181910214706154 +119 7 negative_sampler.num_negs_per_pos 78.0 +119 7 training.batch_size 0.0 +119 8 model.embedding_dim 1.0 +119 8 model.hidden_dropout_rate 0.3581695407606392 +119 8 model.num_filters 5.0 +119 8 loss.margin 13.722043525641336 +119 8 loss.adversarial_temperature 0.2152680494451943 +119 8 negative_sampler.num_negs_per_pos 70.0 +119 8 training.batch_size 0.0 +119 9 model.embedding_dim 2.0 +119 9 model.hidden_dropout_rate 0.11752966820075561 +119 9 model.num_filters 0.0 +119 9 loss.margin 8.520292102942552 +119 9 loss.adversarial_temperature 0.22207277243605028 +119 9 negative_sampler.num_negs_per_pos 37.0 +119 9 training.batch_size 2.0 +119 10 model.embedding_dim 0.0 +119 10 model.hidden_dropout_rate 0.37298025152759773 +119 10 model.num_filters 6.0 +119 10 loss.margin 20.302609515199684 +119 10 loss.adversarial_temperature 0.29086807811183474 +119 10 negative_sampler.num_negs_per_pos 46.0 +119 10 training.batch_size 0.0 +119 11 model.embedding_dim 2.0 +119 11 model.hidden_dropout_rate 0.4239291942259671 +119 11 model.num_filters 4.0 +119 11 loss.margin 3.0161394351911124 +119 11 loss.adversarial_temperature 0.871815421463592 +119 11 negative_sampler.num_negs_per_pos 89.0 +119 11 training.batch_size 2.0 +119 12 model.embedding_dim 2.0 +119 12 model.hidden_dropout_rate 0.1341947141504366 +119 12 model.num_filters 2.0 +119 12 loss.margin 22.447495535467045 +119 12 loss.adversarial_temperature 0.21872747649083302 +119 12 negative_sampler.num_negs_per_pos 77.0 +119 12 training.batch_size 0.0 +119 13 model.embedding_dim 2.0 +119 13 model.hidden_dropout_rate 0.10641695352700298 +119 13 model.num_filters 3.0 +119 13 loss.margin 10.00922658100849 +119 13 loss.adversarial_temperature 0.7872646769878797 +119 13 negative_sampler.num_negs_per_pos 42.0 +119 13 training.batch_size 2.0 +119 14 model.embedding_dim 2.0 +119 14 model.hidden_dropout_rate 0.37048922910596416 +119 14 model.num_filters 9.0 +119 14 loss.margin 14.508868773041227 +119 14 loss.adversarial_temperature 0.20329177382682584 +119 14 negative_sampler.num_negs_per_pos 31.0 +119 14 training.batch_size 1.0 +119 15 model.embedding_dim 0.0 +119 15 model.hidden_dropout_rate 0.12768967120418828 +119 15 model.num_filters 3.0 +119 15 loss.margin 29.63682470115342 +119 15 loss.adversarial_temperature 0.988395264938795 +119 15 negative_sampler.num_negs_per_pos 81.0 +119 15 training.batch_size 0.0 +119 16 model.embedding_dim 0.0 +119 16 model.hidden_dropout_rate 0.17786277411492857 +119 16 model.num_filters 7.0 +119 16 loss.margin 25.721182931488233 +119 16 loss.adversarial_temperature 0.7458577992111484 +119 16 negative_sampler.num_negs_per_pos 63.0 +119 16 training.batch_size 1.0 +119 17 model.embedding_dim 1.0 +119 17 model.hidden_dropout_rate 0.1236139050561545 +119 17 model.num_filters 7.0 +119 17 loss.margin 17.849671488476126 +119 17 loss.adversarial_temperature 0.8241800815519635 +119 17 negative_sampler.num_negs_per_pos 33.0 +119 17 training.batch_size 1.0 +119 18 model.embedding_dim 1.0 +119 18 model.hidden_dropout_rate 0.2829871319279603 +119 18 model.num_filters 6.0 +119 18 loss.margin 10.295772070303062 +119 18 loss.adversarial_temperature 0.3153252975467026 +119 18 negative_sampler.num_negs_per_pos 95.0 +119 18 training.batch_size 2.0 +119 19 model.embedding_dim 2.0 +119 19 model.hidden_dropout_rate 0.23102133965376034 +119 19 model.num_filters 2.0 +119 19 loss.margin 18.449956931758095 +119 19 loss.adversarial_temperature 0.2959165601387611 +119 19 negative_sampler.num_negs_per_pos 68.0 +119 19 training.batch_size 0.0 +119 20 model.embedding_dim 0.0 +119 20 model.hidden_dropout_rate 0.334829071525917 +119 20 model.num_filters 7.0 +119 20 loss.margin 21.883134830648174 +119 20 loss.adversarial_temperature 0.5159041758687701 +119 20 negative_sampler.num_negs_per_pos 44.0 +119 20 training.batch_size 1.0 +119 21 model.embedding_dim 0.0 +119 21 model.hidden_dropout_rate 0.12446841687031768 +119 21 model.num_filters 2.0 +119 21 loss.margin 8.310960177522414 +119 21 loss.adversarial_temperature 0.6054755230358443 +119 21 negative_sampler.num_negs_per_pos 32.0 +119 21 training.batch_size 1.0 +119 22 model.embedding_dim 2.0 +119 22 model.hidden_dropout_rate 0.112758858243728 +119 22 model.num_filters 1.0 +119 22 loss.margin 15.45988187004843 +119 22 loss.adversarial_temperature 0.5223924841419211 +119 22 negative_sampler.num_negs_per_pos 90.0 +119 22 training.batch_size 0.0 +119 1 dataset """kinships""" +119 1 model """convkb""" +119 1 loss """nssa""" +119 1 regularizer """no""" +119 1 optimizer """adadelta""" +119 1 training_loop """owa""" +119 1 negative_sampler """basic""" +119 1 evaluator """rankbased""" +119 2 dataset """kinships""" +119 2 model """convkb""" +119 2 loss """nssa""" +119 2 regularizer """no""" +119 2 optimizer """adadelta""" +119 2 training_loop """owa""" +119 2 negative_sampler """basic""" +119 2 evaluator """rankbased""" +119 3 dataset """kinships""" +119 3 model """convkb""" +119 3 loss """nssa""" +119 3 regularizer """no""" +119 3 optimizer """adadelta""" +119 3 training_loop """owa""" +119 3 negative_sampler """basic""" +119 3 evaluator """rankbased""" +119 4 dataset """kinships""" +119 4 model """convkb""" +119 4 loss """nssa""" +119 4 regularizer """no""" +119 4 optimizer """adadelta""" +119 4 training_loop """owa""" +119 4 negative_sampler """basic""" +119 4 evaluator """rankbased""" +119 5 dataset """kinships""" +119 5 model """convkb""" +119 5 loss """nssa""" +119 5 regularizer """no""" +119 5 optimizer """adadelta""" +119 5 training_loop """owa""" +119 5 negative_sampler """basic""" +119 5 evaluator """rankbased""" +119 6 dataset """kinships""" +119 6 model """convkb""" +119 6 loss """nssa""" +119 6 regularizer """no""" +119 6 optimizer """adadelta""" +119 6 training_loop """owa""" +119 6 negative_sampler """basic""" +119 6 evaluator """rankbased""" +119 7 dataset """kinships""" +119 7 model """convkb""" +119 7 loss """nssa""" +119 7 regularizer """no""" +119 7 optimizer """adadelta""" +119 7 training_loop """owa""" +119 7 negative_sampler """basic""" +119 7 evaluator """rankbased""" +119 8 dataset """kinships""" +119 8 model """convkb""" +119 8 loss """nssa""" +119 8 regularizer """no""" +119 8 optimizer """adadelta""" +119 8 training_loop """owa""" +119 8 negative_sampler """basic""" +119 8 evaluator """rankbased""" +119 9 dataset """kinships""" +119 9 model """convkb""" +119 9 loss """nssa""" +119 9 regularizer """no""" +119 9 optimizer """adadelta""" +119 9 training_loop """owa""" +119 9 negative_sampler """basic""" +119 9 evaluator """rankbased""" +119 10 dataset """kinships""" +119 10 model """convkb""" +119 10 loss """nssa""" +119 10 regularizer """no""" +119 10 optimizer """adadelta""" +119 10 training_loop """owa""" +119 10 negative_sampler """basic""" +119 10 evaluator """rankbased""" +119 11 dataset """kinships""" +119 11 model """convkb""" +119 11 loss """nssa""" +119 11 regularizer """no""" +119 11 optimizer """adadelta""" +119 11 training_loop """owa""" +119 11 negative_sampler """basic""" +119 11 evaluator """rankbased""" +119 12 dataset """kinships""" +119 12 model """convkb""" +119 12 loss """nssa""" +119 12 regularizer """no""" +119 12 optimizer """adadelta""" +119 12 training_loop """owa""" +119 12 negative_sampler """basic""" +119 12 evaluator """rankbased""" +119 13 dataset """kinships""" +119 13 model """convkb""" +119 13 loss """nssa""" +119 13 regularizer """no""" +119 13 optimizer """adadelta""" +119 13 training_loop """owa""" +119 13 negative_sampler """basic""" +119 13 evaluator """rankbased""" +119 14 dataset """kinships""" +119 14 model """convkb""" +119 14 loss """nssa""" +119 14 regularizer """no""" +119 14 optimizer """adadelta""" +119 14 training_loop """owa""" +119 14 negative_sampler """basic""" +119 14 evaluator """rankbased""" +119 15 dataset """kinships""" +119 15 model """convkb""" +119 15 loss """nssa""" +119 15 regularizer """no""" +119 15 optimizer """adadelta""" +119 15 training_loop """owa""" +119 15 negative_sampler """basic""" +119 15 evaluator """rankbased""" +119 16 dataset """kinships""" +119 16 model """convkb""" +119 16 loss """nssa""" +119 16 regularizer """no""" +119 16 optimizer """adadelta""" +119 16 training_loop """owa""" +119 16 negative_sampler """basic""" +119 16 evaluator """rankbased""" +119 17 dataset """kinships""" +119 17 model """convkb""" +119 17 loss """nssa""" +119 17 regularizer """no""" +119 17 optimizer """adadelta""" +119 17 training_loop """owa""" +119 17 negative_sampler """basic""" +119 17 evaluator """rankbased""" +119 18 dataset """kinships""" +119 18 model """convkb""" +119 18 loss """nssa""" +119 18 regularizer """no""" +119 18 optimizer """adadelta""" +119 18 training_loop """owa""" +119 18 negative_sampler """basic""" +119 18 evaluator """rankbased""" +119 19 dataset """kinships""" +119 19 model """convkb""" +119 19 loss """nssa""" +119 19 regularizer """no""" +119 19 optimizer """adadelta""" +119 19 training_loop """owa""" +119 19 negative_sampler """basic""" +119 19 evaluator """rankbased""" +119 20 dataset """kinships""" +119 20 model """convkb""" +119 20 loss """nssa""" +119 20 regularizer """no""" +119 20 optimizer """adadelta""" +119 20 training_loop """owa""" +119 20 negative_sampler """basic""" +119 20 evaluator """rankbased""" +119 21 dataset """kinships""" +119 21 model """convkb""" +119 21 loss """nssa""" +119 21 regularizer """no""" +119 21 optimizer """adadelta""" +119 21 training_loop """owa""" +119 21 negative_sampler """basic""" +119 21 evaluator """rankbased""" +119 22 dataset """kinships""" +119 22 model """convkb""" +119 22 loss """nssa""" +119 22 regularizer """no""" +119 22 optimizer """adadelta""" +119 22 training_loop """owa""" +119 22 negative_sampler """basic""" +119 22 evaluator """rankbased""" +120 1 model.embedding_dim 0.0 +120 1 model.hidden_dropout_rate 0.1182384928199697 +120 1 model.num_filters 2.0 +120 1 optimizer.lr 0.07164450280512624 +120 1 negative_sampler.num_negs_per_pos 62.0 +120 1 training.batch_size 2.0 +120 2 model.embedding_dim 1.0 +120 2 model.hidden_dropout_rate 0.23045708756633682 +120 2 model.num_filters 1.0 +120 2 optimizer.lr 0.012200258150879452 +120 2 negative_sampler.num_negs_per_pos 50.0 +120 2 training.batch_size 1.0 +120 3 model.embedding_dim 0.0 +120 3 model.hidden_dropout_rate 0.441783863832047 +120 3 model.num_filters 1.0 +120 3 optimizer.lr 0.023172074444718048 +120 3 negative_sampler.num_negs_per_pos 35.0 +120 3 training.batch_size 1.0 +120 4 model.embedding_dim 1.0 +120 4 model.hidden_dropout_rate 0.36399128767310185 +120 4 model.num_filters 7.0 +120 4 optimizer.lr 0.0027327389369742943 +120 4 negative_sampler.num_negs_per_pos 19.0 +120 4 training.batch_size 2.0 +120 5 model.embedding_dim 0.0 +120 5 model.hidden_dropout_rate 0.2663312451282635 +120 5 model.num_filters 3.0 +120 5 optimizer.lr 0.03278127322864651 +120 5 negative_sampler.num_negs_per_pos 69.0 +120 5 training.batch_size 0.0 +120 6 model.embedding_dim 1.0 +120 6 model.hidden_dropout_rate 0.36068854056756705 +120 6 model.num_filters 7.0 +120 6 optimizer.lr 0.014398540433011657 +120 6 negative_sampler.num_negs_per_pos 30.0 +120 6 training.batch_size 0.0 +120 7 model.embedding_dim 0.0 +120 7 model.hidden_dropout_rate 0.2496129424287387 +120 7 model.num_filters 8.0 +120 7 optimizer.lr 0.017050835456991154 +120 7 negative_sampler.num_negs_per_pos 20.0 +120 7 training.batch_size 0.0 +120 8 model.embedding_dim 0.0 +120 8 model.hidden_dropout_rate 0.27240325241103 +120 8 model.num_filters 6.0 +120 8 optimizer.lr 0.0016926642846437328 +120 8 negative_sampler.num_negs_per_pos 80.0 +120 8 training.batch_size 2.0 +120 9 model.embedding_dim 2.0 +120 9 model.hidden_dropout_rate 0.45126093516231114 +120 9 model.num_filters 1.0 +120 9 optimizer.lr 0.0018443786389304505 +120 9 negative_sampler.num_negs_per_pos 53.0 +120 9 training.batch_size 1.0 +120 10 model.embedding_dim 0.0 +120 10 model.hidden_dropout_rate 0.24263155948975018 +120 10 model.num_filters 4.0 +120 10 optimizer.lr 0.002981972917459561 +120 10 negative_sampler.num_negs_per_pos 34.0 +120 10 training.batch_size 0.0 +120 11 model.embedding_dim 1.0 +120 11 model.hidden_dropout_rate 0.10473533404821263 +120 11 model.num_filters 0.0 +120 11 optimizer.lr 0.00771798258608782 +120 11 negative_sampler.num_negs_per_pos 99.0 +120 11 training.batch_size 0.0 +120 12 model.embedding_dim 1.0 +120 12 model.hidden_dropout_rate 0.40655567145851346 +120 12 model.num_filters 6.0 +120 12 optimizer.lr 0.003134844085697107 +120 12 negative_sampler.num_negs_per_pos 5.0 +120 12 training.batch_size 1.0 +120 13 model.embedding_dim 0.0 +120 13 model.hidden_dropout_rate 0.45525954463255347 +120 13 model.num_filters 1.0 +120 13 optimizer.lr 0.006431682691930527 +120 13 negative_sampler.num_negs_per_pos 20.0 +120 13 training.batch_size 2.0 +120 14 model.embedding_dim 2.0 +120 14 model.hidden_dropout_rate 0.47580781362344005 +120 14 model.num_filters 4.0 +120 14 optimizer.lr 0.0018373708271124268 +120 14 negative_sampler.num_negs_per_pos 38.0 +120 14 training.batch_size 2.0 +120 15 model.embedding_dim 1.0 +120 15 model.hidden_dropout_rate 0.16575816748681677 +120 15 model.num_filters 5.0 +120 15 optimizer.lr 0.022179794642165476 +120 15 negative_sampler.num_negs_per_pos 44.0 +120 15 training.batch_size 0.0 +120 16 model.embedding_dim 1.0 +120 16 model.hidden_dropout_rate 0.20111511854049313 +120 16 model.num_filters 7.0 +120 16 optimizer.lr 0.0019173955480836292 +120 16 negative_sampler.num_negs_per_pos 62.0 +120 16 training.batch_size 0.0 +120 17 model.embedding_dim 2.0 +120 17 model.hidden_dropout_rate 0.12782024593049815 +120 17 model.num_filters 7.0 +120 17 optimizer.lr 0.05288996246576378 +120 17 negative_sampler.num_negs_per_pos 64.0 +120 17 training.batch_size 1.0 +120 18 model.embedding_dim 1.0 +120 18 model.hidden_dropout_rate 0.24991820728178216 +120 18 model.num_filters 7.0 +120 18 optimizer.lr 0.027402067992618424 +120 18 negative_sampler.num_negs_per_pos 86.0 +120 18 training.batch_size 2.0 +120 19 model.embedding_dim 0.0 +120 19 model.hidden_dropout_rate 0.28533628815837553 +120 19 model.num_filters 2.0 +120 19 optimizer.lr 0.020641992174895384 +120 19 negative_sampler.num_negs_per_pos 45.0 +120 19 training.batch_size 1.0 +120 20 model.embedding_dim 1.0 +120 20 model.hidden_dropout_rate 0.14629422582359966 +120 20 model.num_filters 6.0 +120 20 optimizer.lr 0.002216447023128812 +120 20 negative_sampler.num_negs_per_pos 58.0 +120 20 training.batch_size 2.0 +120 21 model.embedding_dim 2.0 +120 21 model.hidden_dropout_rate 0.25649513981827415 +120 21 model.num_filters 0.0 +120 21 optimizer.lr 0.05617554971362212 +120 21 negative_sampler.num_negs_per_pos 73.0 +120 21 training.batch_size 0.0 +120 22 model.embedding_dim 1.0 +120 22 model.hidden_dropout_rate 0.40292807045783524 +120 22 model.num_filters 7.0 +120 22 optimizer.lr 0.001801510957798398 +120 22 negative_sampler.num_negs_per_pos 60.0 +120 22 training.batch_size 2.0 +120 23 model.embedding_dim 1.0 +120 23 model.hidden_dropout_rate 0.4844898966096955 +120 23 model.num_filters 1.0 +120 23 optimizer.lr 0.002149385607158463 +120 23 negative_sampler.num_negs_per_pos 5.0 +120 23 training.batch_size 1.0 +120 24 model.embedding_dim 0.0 +120 24 model.hidden_dropout_rate 0.4308114444773452 +120 24 model.num_filters 4.0 +120 24 optimizer.lr 0.08289137357129933 +120 24 negative_sampler.num_negs_per_pos 36.0 +120 24 training.batch_size 2.0 +120 25 model.embedding_dim 2.0 +120 25 model.hidden_dropout_rate 0.17918076239439348 +120 25 model.num_filters 1.0 +120 25 optimizer.lr 0.04295120199411552 +120 25 negative_sampler.num_negs_per_pos 65.0 +120 25 training.batch_size 0.0 +120 26 model.embedding_dim 0.0 +120 26 model.hidden_dropout_rate 0.4080828582727183 +120 26 model.num_filters 2.0 +120 26 optimizer.lr 0.022628950901515347 +120 26 negative_sampler.num_negs_per_pos 93.0 +120 26 training.batch_size 0.0 +120 27 model.embedding_dim 2.0 +120 27 model.hidden_dropout_rate 0.43995037941695 +120 27 model.num_filters 9.0 +120 27 optimizer.lr 0.028794273802531925 +120 27 negative_sampler.num_negs_per_pos 50.0 +120 27 training.batch_size 2.0 +120 28 model.embedding_dim 2.0 +120 28 model.hidden_dropout_rate 0.24880671604883908 +120 28 model.num_filters 3.0 +120 28 optimizer.lr 0.02756200312564828 +120 28 negative_sampler.num_negs_per_pos 48.0 +120 28 training.batch_size 2.0 +120 29 model.embedding_dim 1.0 +120 29 model.hidden_dropout_rate 0.12899208648747215 +120 29 model.num_filters 1.0 +120 29 optimizer.lr 0.08111533819167119 +120 29 negative_sampler.num_negs_per_pos 15.0 +120 29 training.batch_size 1.0 +120 30 model.embedding_dim 2.0 +120 30 model.hidden_dropout_rate 0.2788793977990149 +120 30 model.num_filters 5.0 +120 30 optimizer.lr 0.01009412935217017 +120 30 negative_sampler.num_negs_per_pos 40.0 +120 30 training.batch_size 1.0 +120 31 model.embedding_dim 1.0 +120 31 model.hidden_dropout_rate 0.33160828290609967 +120 31 model.num_filters 4.0 +120 31 optimizer.lr 0.0010381905854588553 +120 31 negative_sampler.num_negs_per_pos 28.0 +120 31 training.batch_size 1.0 +120 32 model.embedding_dim 1.0 +120 32 model.hidden_dropout_rate 0.1647835340545894 +120 32 model.num_filters 0.0 +120 32 optimizer.lr 0.004963895009145773 +120 32 negative_sampler.num_negs_per_pos 27.0 +120 32 training.batch_size 2.0 +120 33 model.embedding_dim 2.0 +120 33 model.hidden_dropout_rate 0.3565557700958723 +120 33 model.num_filters 5.0 +120 33 optimizer.lr 0.08147770000798157 +120 33 negative_sampler.num_negs_per_pos 61.0 +120 33 training.batch_size 0.0 +120 34 model.embedding_dim 1.0 +120 34 model.hidden_dropout_rate 0.44956861701155626 +120 34 model.num_filters 1.0 +120 34 optimizer.lr 0.042875868581161346 +120 34 negative_sampler.num_negs_per_pos 69.0 +120 34 training.batch_size 0.0 +120 35 model.embedding_dim 2.0 +120 35 model.hidden_dropout_rate 0.4962514703884898 +120 35 model.num_filters 1.0 +120 35 optimizer.lr 0.005039509348588348 +120 35 negative_sampler.num_negs_per_pos 79.0 +120 35 training.batch_size 2.0 +120 36 model.embedding_dim 0.0 +120 36 model.hidden_dropout_rate 0.4292532958372378 +120 36 model.num_filters 8.0 +120 36 optimizer.lr 0.005004063872525864 +120 36 negative_sampler.num_negs_per_pos 97.0 +120 36 training.batch_size 1.0 +120 37 model.embedding_dim 2.0 +120 37 model.hidden_dropout_rate 0.28502563058555075 +120 37 model.num_filters 6.0 +120 37 optimizer.lr 0.07021490277725022 +120 37 negative_sampler.num_negs_per_pos 50.0 +120 37 training.batch_size 0.0 +120 38 model.embedding_dim 0.0 +120 38 model.hidden_dropout_rate 0.34643353436682517 +120 38 model.num_filters 1.0 +120 38 optimizer.lr 0.0036117575891518316 +120 38 negative_sampler.num_negs_per_pos 27.0 +120 38 training.batch_size 2.0 +120 39 model.embedding_dim 2.0 +120 39 model.hidden_dropout_rate 0.15587244357383656 +120 39 model.num_filters 7.0 +120 39 optimizer.lr 0.0016638168932302903 +120 39 negative_sampler.num_negs_per_pos 99.0 +120 39 training.batch_size 0.0 +120 1 dataset """kinships""" +120 1 model """convkb""" +120 1 loss """bceaftersigmoid""" +120 1 regularizer """no""" +120 1 optimizer """adam""" +120 1 training_loop """owa""" +120 1 negative_sampler """basic""" +120 1 evaluator """rankbased""" +120 2 dataset """kinships""" +120 2 model """convkb""" +120 2 loss """bceaftersigmoid""" +120 2 regularizer """no""" +120 2 optimizer """adam""" +120 2 training_loop """owa""" +120 2 negative_sampler """basic""" +120 2 evaluator """rankbased""" +120 3 dataset """kinships""" +120 3 model """convkb""" +120 3 loss """bceaftersigmoid""" +120 3 regularizer """no""" +120 3 optimizer """adam""" +120 3 training_loop """owa""" +120 3 negative_sampler """basic""" +120 3 evaluator """rankbased""" +120 4 dataset """kinships""" +120 4 model """convkb""" +120 4 loss """bceaftersigmoid""" +120 4 regularizer """no""" +120 4 optimizer """adam""" +120 4 training_loop """owa""" +120 4 negative_sampler """basic""" +120 4 evaluator """rankbased""" +120 5 dataset """kinships""" +120 5 model """convkb""" +120 5 loss """bceaftersigmoid""" +120 5 regularizer """no""" +120 5 optimizer """adam""" +120 5 training_loop """owa""" +120 5 negative_sampler """basic""" +120 5 evaluator """rankbased""" +120 6 dataset """kinships""" +120 6 model """convkb""" +120 6 loss """bceaftersigmoid""" +120 6 regularizer """no""" +120 6 optimizer """adam""" +120 6 training_loop """owa""" +120 6 negative_sampler """basic""" +120 6 evaluator """rankbased""" +120 7 dataset """kinships""" +120 7 model """convkb""" +120 7 loss """bceaftersigmoid""" +120 7 regularizer """no""" +120 7 optimizer """adam""" +120 7 training_loop """owa""" +120 7 negative_sampler """basic""" +120 7 evaluator """rankbased""" +120 8 dataset """kinships""" +120 8 model """convkb""" +120 8 loss """bceaftersigmoid""" +120 8 regularizer """no""" +120 8 optimizer """adam""" +120 8 training_loop """owa""" +120 8 negative_sampler """basic""" +120 8 evaluator """rankbased""" +120 9 dataset """kinships""" +120 9 model """convkb""" +120 9 loss """bceaftersigmoid""" +120 9 regularizer """no""" +120 9 optimizer """adam""" +120 9 training_loop """owa""" +120 9 negative_sampler """basic""" +120 9 evaluator """rankbased""" +120 10 dataset """kinships""" +120 10 model """convkb""" +120 10 loss """bceaftersigmoid""" +120 10 regularizer """no""" +120 10 optimizer """adam""" +120 10 training_loop """owa""" +120 10 negative_sampler """basic""" +120 10 evaluator """rankbased""" +120 11 dataset """kinships""" +120 11 model """convkb""" +120 11 loss """bceaftersigmoid""" +120 11 regularizer """no""" +120 11 optimizer """adam""" +120 11 training_loop """owa""" +120 11 negative_sampler """basic""" +120 11 evaluator """rankbased""" +120 12 dataset """kinships""" +120 12 model """convkb""" +120 12 loss """bceaftersigmoid""" +120 12 regularizer """no""" +120 12 optimizer """adam""" +120 12 training_loop """owa""" +120 12 negative_sampler """basic""" +120 12 evaluator """rankbased""" +120 13 dataset """kinships""" +120 13 model """convkb""" +120 13 loss """bceaftersigmoid""" +120 13 regularizer """no""" +120 13 optimizer """adam""" +120 13 training_loop """owa""" +120 13 negative_sampler """basic""" +120 13 evaluator """rankbased""" +120 14 dataset """kinships""" +120 14 model """convkb""" +120 14 loss """bceaftersigmoid""" +120 14 regularizer """no""" +120 14 optimizer """adam""" +120 14 training_loop """owa""" +120 14 negative_sampler """basic""" +120 14 evaluator """rankbased""" +120 15 dataset """kinships""" +120 15 model """convkb""" +120 15 loss """bceaftersigmoid""" +120 15 regularizer """no""" +120 15 optimizer """adam""" +120 15 training_loop """owa""" +120 15 negative_sampler """basic""" +120 15 evaluator """rankbased""" +120 16 dataset """kinships""" +120 16 model """convkb""" +120 16 loss """bceaftersigmoid""" +120 16 regularizer """no""" +120 16 optimizer """adam""" +120 16 training_loop """owa""" +120 16 negative_sampler """basic""" +120 16 evaluator """rankbased""" +120 17 dataset """kinships""" +120 17 model """convkb""" +120 17 loss """bceaftersigmoid""" +120 17 regularizer """no""" +120 17 optimizer """adam""" +120 17 training_loop """owa""" +120 17 negative_sampler """basic""" +120 17 evaluator """rankbased""" +120 18 dataset """kinships""" +120 18 model """convkb""" +120 18 loss """bceaftersigmoid""" +120 18 regularizer """no""" +120 18 optimizer """adam""" +120 18 training_loop """owa""" +120 18 negative_sampler """basic""" +120 18 evaluator """rankbased""" +120 19 dataset """kinships""" +120 19 model """convkb""" +120 19 loss """bceaftersigmoid""" +120 19 regularizer """no""" +120 19 optimizer """adam""" +120 19 training_loop """owa""" +120 19 negative_sampler """basic""" +120 19 evaluator """rankbased""" +120 20 dataset """kinships""" +120 20 model """convkb""" +120 20 loss """bceaftersigmoid""" +120 20 regularizer """no""" +120 20 optimizer """adam""" +120 20 training_loop """owa""" +120 20 negative_sampler """basic""" +120 20 evaluator """rankbased""" +120 21 dataset """kinships""" +120 21 model """convkb""" +120 21 loss """bceaftersigmoid""" +120 21 regularizer """no""" +120 21 optimizer """adam""" +120 21 training_loop """owa""" +120 21 negative_sampler """basic""" +120 21 evaluator """rankbased""" +120 22 dataset """kinships""" +120 22 model """convkb""" +120 22 loss """bceaftersigmoid""" +120 22 regularizer """no""" +120 22 optimizer """adam""" +120 22 training_loop """owa""" +120 22 negative_sampler """basic""" +120 22 evaluator """rankbased""" +120 23 dataset """kinships""" +120 23 model """convkb""" +120 23 loss """bceaftersigmoid""" +120 23 regularizer """no""" +120 23 optimizer """adam""" +120 23 training_loop """owa""" +120 23 negative_sampler """basic""" +120 23 evaluator """rankbased""" +120 24 dataset """kinships""" +120 24 model """convkb""" +120 24 loss """bceaftersigmoid""" +120 24 regularizer """no""" +120 24 optimizer """adam""" +120 24 training_loop """owa""" +120 24 negative_sampler """basic""" +120 24 evaluator """rankbased""" +120 25 dataset """kinships""" +120 25 model """convkb""" +120 25 loss """bceaftersigmoid""" +120 25 regularizer """no""" +120 25 optimizer """adam""" +120 25 training_loop """owa""" +120 25 negative_sampler """basic""" +120 25 evaluator """rankbased""" +120 26 dataset """kinships""" +120 26 model """convkb""" +120 26 loss """bceaftersigmoid""" +120 26 regularizer """no""" +120 26 optimizer """adam""" +120 26 training_loop """owa""" +120 26 negative_sampler """basic""" +120 26 evaluator """rankbased""" +120 27 dataset """kinships""" +120 27 model """convkb""" +120 27 loss """bceaftersigmoid""" +120 27 regularizer """no""" +120 27 optimizer """adam""" +120 27 training_loop """owa""" +120 27 negative_sampler """basic""" +120 27 evaluator """rankbased""" +120 28 dataset """kinships""" +120 28 model """convkb""" +120 28 loss """bceaftersigmoid""" +120 28 regularizer """no""" +120 28 optimizer """adam""" +120 28 training_loop """owa""" +120 28 negative_sampler """basic""" +120 28 evaluator """rankbased""" +120 29 dataset """kinships""" +120 29 model """convkb""" +120 29 loss """bceaftersigmoid""" +120 29 regularizer """no""" +120 29 optimizer """adam""" +120 29 training_loop """owa""" +120 29 negative_sampler """basic""" +120 29 evaluator """rankbased""" +120 30 dataset """kinships""" +120 30 model """convkb""" +120 30 loss """bceaftersigmoid""" +120 30 regularizer """no""" +120 30 optimizer """adam""" +120 30 training_loop """owa""" +120 30 negative_sampler """basic""" +120 30 evaluator """rankbased""" +120 31 dataset """kinships""" +120 31 model """convkb""" +120 31 loss """bceaftersigmoid""" +120 31 regularizer """no""" +120 31 optimizer """adam""" +120 31 training_loop """owa""" +120 31 negative_sampler """basic""" +120 31 evaluator """rankbased""" +120 32 dataset """kinships""" +120 32 model """convkb""" +120 32 loss """bceaftersigmoid""" +120 32 regularizer """no""" +120 32 optimizer """adam""" +120 32 training_loop """owa""" +120 32 negative_sampler """basic""" +120 32 evaluator """rankbased""" +120 33 dataset """kinships""" +120 33 model """convkb""" +120 33 loss """bceaftersigmoid""" +120 33 regularizer """no""" +120 33 optimizer """adam""" +120 33 training_loop """owa""" +120 33 negative_sampler """basic""" +120 33 evaluator """rankbased""" +120 34 dataset """kinships""" +120 34 model """convkb""" +120 34 loss """bceaftersigmoid""" +120 34 regularizer """no""" +120 34 optimizer """adam""" +120 34 training_loop """owa""" +120 34 negative_sampler """basic""" +120 34 evaluator """rankbased""" +120 35 dataset """kinships""" +120 35 model """convkb""" +120 35 loss """bceaftersigmoid""" +120 35 regularizer """no""" +120 35 optimizer """adam""" +120 35 training_loop """owa""" +120 35 negative_sampler """basic""" +120 35 evaluator """rankbased""" +120 36 dataset """kinships""" +120 36 model """convkb""" +120 36 loss """bceaftersigmoid""" +120 36 regularizer """no""" +120 36 optimizer """adam""" +120 36 training_loop """owa""" +120 36 negative_sampler """basic""" +120 36 evaluator """rankbased""" +120 37 dataset """kinships""" +120 37 model """convkb""" +120 37 loss """bceaftersigmoid""" +120 37 regularizer """no""" +120 37 optimizer """adam""" +120 37 training_loop """owa""" +120 37 negative_sampler """basic""" +120 37 evaluator """rankbased""" +120 38 dataset """kinships""" +120 38 model """convkb""" +120 38 loss """bceaftersigmoid""" +120 38 regularizer """no""" +120 38 optimizer """adam""" +120 38 training_loop """owa""" +120 38 negative_sampler """basic""" +120 38 evaluator """rankbased""" +120 39 dataset """kinships""" +120 39 model """convkb""" +120 39 loss """bceaftersigmoid""" +120 39 regularizer """no""" +120 39 optimizer """adam""" +120 39 training_loop """owa""" +120 39 negative_sampler """basic""" +120 39 evaluator """rankbased""" +121 1 model.embedding_dim 0.0 +121 1 model.hidden_dropout_rate 0.4412211988291227 +121 1 model.num_filters 9.0 +121 1 optimizer.lr 0.03137779135126005 +121 1 negative_sampler.num_negs_per_pos 46.0 +121 1 training.batch_size 2.0 +121 2 model.embedding_dim 0.0 +121 2 model.hidden_dropout_rate 0.21940743386899209 +121 2 model.num_filters 1.0 +121 2 optimizer.lr 0.0832077187054337 +121 2 negative_sampler.num_negs_per_pos 47.0 +121 2 training.batch_size 0.0 +121 3 model.embedding_dim 2.0 +121 3 model.hidden_dropout_rate 0.28982200100311 +121 3 model.num_filters 3.0 +121 3 optimizer.lr 0.00125938768591103 +121 3 negative_sampler.num_negs_per_pos 91.0 +121 3 training.batch_size 2.0 +121 4 model.embedding_dim 2.0 +121 4 model.hidden_dropout_rate 0.18550450819352046 +121 4 model.num_filters 3.0 +121 4 optimizer.lr 0.019029604568289997 +121 4 negative_sampler.num_negs_per_pos 34.0 +121 4 training.batch_size 2.0 +121 5 model.embedding_dim 2.0 +121 5 model.hidden_dropout_rate 0.282024986516111 +121 5 model.num_filters 1.0 +121 5 optimizer.lr 0.001959464765249007 +121 5 negative_sampler.num_negs_per_pos 13.0 +121 5 training.batch_size 1.0 +121 6 model.embedding_dim 1.0 +121 6 model.hidden_dropout_rate 0.36100112490000086 +121 6 model.num_filters 8.0 +121 6 optimizer.lr 0.053819385947568674 +121 6 negative_sampler.num_negs_per_pos 75.0 +121 6 training.batch_size 1.0 +121 7 model.embedding_dim 1.0 +121 7 model.hidden_dropout_rate 0.17873524321006046 +121 7 model.num_filters 5.0 +121 7 optimizer.lr 0.0017581766176561953 +121 7 negative_sampler.num_negs_per_pos 28.0 +121 7 training.batch_size 1.0 +121 8 model.embedding_dim 2.0 +121 8 model.hidden_dropout_rate 0.34948746280260423 +121 8 model.num_filters 5.0 +121 8 optimizer.lr 0.004825802357060178 +121 8 negative_sampler.num_negs_per_pos 52.0 +121 8 training.batch_size 1.0 +121 9 model.embedding_dim 0.0 +121 9 model.hidden_dropout_rate 0.16181454120756 +121 9 model.num_filters 6.0 +121 9 optimizer.lr 0.07284805347135655 +121 9 negative_sampler.num_negs_per_pos 26.0 +121 9 training.batch_size 1.0 +121 10 model.embedding_dim 2.0 +121 10 model.hidden_dropout_rate 0.10559454589110927 +121 10 model.num_filters 9.0 +121 10 optimizer.lr 0.0354784108067814 +121 10 negative_sampler.num_negs_per_pos 90.0 +121 10 training.batch_size 0.0 +121 11 model.embedding_dim 1.0 +121 11 model.hidden_dropout_rate 0.38745074649382005 +121 11 model.num_filters 2.0 +121 11 optimizer.lr 0.05637987055659637 +121 11 negative_sampler.num_negs_per_pos 72.0 +121 11 training.batch_size 0.0 +121 12 model.embedding_dim 0.0 +121 12 model.hidden_dropout_rate 0.14495941660051723 +121 12 model.num_filters 7.0 +121 12 optimizer.lr 0.0024491510875898153 +121 12 negative_sampler.num_negs_per_pos 14.0 +121 12 training.batch_size 2.0 +121 13 model.embedding_dim 0.0 +121 13 model.hidden_dropout_rate 0.23108905570158195 +121 13 model.num_filters 9.0 +121 13 optimizer.lr 0.003984964902872177 +121 13 negative_sampler.num_negs_per_pos 61.0 +121 13 training.batch_size 2.0 +121 14 model.embedding_dim 1.0 +121 14 model.hidden_dropout_rate 0.329785133360218 +121 14 model.num_filters 0.0 +121 14 optimizer.lr 0.017922381006842257 +121 14 negative_sampler.num_negs_per_pos 13.0 +121 14 training.batch_size 2.0 +121 15 model.embedding_dim 1.0 +121 15 model.hidden_dropout_rate 0.229185547080305 +121 15 model.num_filters 1.0 +121 15 optimizer.lr 0.049440250344110294 +121 15 negative_sampler.num_negs_per_pos 83.0 +121 15 training.batch_size 2.0 +121 16 model.embedding_dim 2.0 +121 16 model.hidden_dropout_rate 0.24706004070822085 +121 16 model.num_filters 9.0 +121 16 optimizer.lr 0.043276763004475215 +121 16 negative_sampler.num_negs_per_pos 70.0 +121 16 training.batch_size 0.0 +121 17 model.embedding_dim 1.0 +121 17 model.hidden_dropout_rate 0.1299954107036135 +121 17 model.num_filters 2.0 +121 17 optimizer.lr 0.03120309733376143 +121 17 negative_sampler.num_negs_per_pos 74.0 +121 17 training.batch_size 1.0 +121 18 model.embedding_dim 2.0 +121 18 model.hidden_dropout_rate 0.1648170359022971 +121 18 model.num_filters 7.0 +121 18 optimizer.lr 0.0976544224014088 +121 18 negative_sampler.num_negs_per_pos 4.0 +121 18 training.batch_size 1.0 +121 19 model.embedding_dim 0.0 +121 19 model.hidden_dropout_rate 0.17107149450837117 +121 19 model.num_filters 6.0 +121 19 optimizer.lr 0.005388226401531017 +121 19 negative_sampler.num_negs_per_pos 17.0 +121 19 training.batch_size 2.0 +121 20 model.embedding_dim 1.0 +121 20 model.hidden_dropout_rate 0.35250061767928126 +121 20 model.num_filters 7.0 +121 20 optimizer.lr 0.00859104106272352 +121 20 negative_sampler.num_negs_per_pos 88.0 +121 20 training.batch_size 2.0 +121 21 model.embedding_dim 2.0 +121 21 model.hidden_dropout_rate 0.17516775521720024 +121 21 model.num_filters 9.0 +121 21 optimizer.lr 0.0021354153030787096 +121 21 negative_sampler.num_negs_per_pos 15.0 +121 21 training.batch_size 1.0 +121 22 model.embedding_dim 0.0 +121 22 model.hidden_dropout_rate 0.2948776186264473 +121 22 model.num_filters 1.0 +121 22 optimizer.lr 0.007647332643600397 +121 22 negative_sampler.num_negs_per_pos 69.0 +121 22 training.batch_size 1.0 +121 23 model.embedding_dim 0.0 +121 23 model.hidden_dropout_rate 0.24993513348913324 +121 23 model.num_filters 1.0 +121 23 optimizer.lr 0.008531466982359882 +121 23 negative_sampler.num_negs_per_pos 3.0 +121 23 training.batch_size 1.0 +121 24 model.embedding_dim 2.0 +121 24 model.hidden_dropout_rate 0.34269006210179564 +121 24 model.num_filters 1.0 +121 24 optimizer.lr 0.016207951312194244 +121 24 negative_sampler.num_negs_per_pos 80.0 +121 24 training.batch_size 1.0 +121 25 model.embedding_dim 0.0 +121 25 model.hidden_dropout_rate 0.1678836946148881 +121 25 model.num_filters 6.0 +121 25 optimizer.lr 0.00475845718123829 +121 25 negative_sampler.num_negs_per_pos 27.0 +121 25 training.batch_size 0.0 +121 26 model.embedding_dim 0.0 +121 26 model.hidden_dropout_rate 0.2790084991363871 +121 26 model.num_filters 6.0 +121 26 optimizer.lr 0.0021990936771698094 +121 26 negative_sampler.num_negs_per_pos 34.0 +121 26 training.batch_size 0.0 +121 27 model.embedding_dim 1.0 +121 27 model.hidden_dropout_rate 0.29509675994702156 +121 27 model.num_filters 6.0 +121 27 optimizer.lr 0.00453155466624195 +121 27 negative_sampler.num_negs_per_pos 89.0 +121 27 training.batch_size 1.0 +121 28 model.embedding_dim 0.0 +121 28 model.hidden_dropout_rate 0.3142743431243768 +121 28 model.num_filters 9.0 +121 28 optimizer.lr 0.06653092628273655 +121 28 negative_sampler.num_negs_per_pos 80.0 +121 28 training.batch_size 0.0 +121 29 model.embedding_dim 0.0 +121 29 model.hidden_dropout_rate 0.36624545514066287 +121 29 model.num_filters 6.0 +121 29 optimizer.lr 0.0014598801757685525 +121 29 negative_sampler.num_negs_per_pos 48.0 +121 29 training.batch_size 1.0 +121 1 dataset """kinships""" +121 1 model """convkb""" +121 1 loss """softplus""" +121 1 regularizer """no""" +121 1 optimizer """adam""" +121 1 training_loop """owa""" +121 1 negative_sampler """basic""" +121 1 evaluator """rankbased""" +121 2 dataset """kinships""" +121 2 model """convkb""" +121 2 loss """softplus""" +121 2 regularizer """no""" +121 2 optimizer """adam""" +121 2 training_loop """owa""" +121 2 negative_sampler """basic""" +121 2 evaluator """rankbased""" +121 3 dataset """kinships""" +121 3 model """convkb""" +121 3 loss """softplus""" +121 3 regularizer """no""" +121 3 optimizer """adam""" +121 3 training_loop """owa""" +121 3 negative_sampler """basic""" +121 3 evaluator """rankbased""" +121 4 dataset """kinships""" +121 4 model """convkb""" +121 4 loss """softplus""" +121 4 regularizer """no""" +121 4 optimizer """adam""" +121 4 training_loop """owa""" +121 4 negative_sampler """basic""" +121 4 evaluator """rankbased""" +121 5 dataset """kinships""" +121 5 model """convkb""" +121 5 loss """softplus""" +121 5 regularizer """no""" +121 5 optimizer """adam""" +121 5 training_loop """owa""" +121 5 negative_sampler """basic""" +121 5 evaluator """rankbased""" +121 6 dataset """kinships""" +121 6 model """convkb""" +121 6 loss """softplus""" +121 6 regularizer """no""" +121 6 optimizer """adam""" +121 6 training_loop """owa""" +121 6 negative_sampler """basic""" +121 6 evaluator """rankbased""" +121 7 dataset """kinships""" +121 7 model """convkb""" +121 7 loss """softplus""" +121 7 regularizer """no""" +121 7 optimizer """adam""" +121 7 training_loop """owa""" +121 7 negative_sampler """basic""" +121 7 evaluator """rankbased""" +121 8 dataset """kinships""" +121 8 model """convkb""" +121 8 loss """softplus""" +121 8 regularizer """no""" +121 8 optimizer """adam""" +121 8 training_loop """owa""" +121 8 negative_sampler """basic""" +121 8 evaluator """rankbased""" +121 9 dataset """kinships""" +121 9 model """convkb""" +121 9 loss """softplus""" +121 9 regularizer """no""" +121 9 optimizer """adam""" +121 9 training_loop """owa""" +121 9 negative_sampler """basic""" +121 9 evaluator """rankbased""" +121 10 dataset """kinships""" +121 10 model """convkb""" +121 10 loss """softplus""" +121 10 regularizer """no""" +121 10 optimizer """adam""" +121 10 training_loop """owa""" +121 10 negative_sampler """basic""" +121 10 evaluator """rankbased""" +121 11 dataset """kinships""" +121 11 model """convkb""" +121 11 loss """softplus""" +121 11 regularizer """no""" +121 11 optimizer """adam""" +121 11 training_loop """owa""" +121 11 negative_sampler """basic""" +121 11 evaluator """rankbased""" +121 12 dataset """kinships""" +121 12 model """convkb""" +121 12 loss """softplus""" +121 12 regularizer """no""" +121 12 optimizer """adam""" +121 12 training_loop """owa""" +121 12 negative_sampler """basic""" +121 12 evaluator """rankbased""" +121 13 dataset """kinships""" +121 13 model """convkb""" +121 13 loss """softplus""" +121 13 regularizer """no""" +121 13 optimizer """adam""" +121 13 training_loop """owa""" +121 13 negative_sampler """basic""" +121 13 evaluator """rankbased""" +121 14 dataset """kinships""" +121 14 model """convkb""" +121 14 loss """softplus""" +121 14 regularizer """no""" +121 14 optimizer """adam""" +121 14 training_loop """owa""" +121 14 negative_sampler """basic""" +121 14 evaluator """rankbased""" +121 15 dataset """kinships""" +121 15 model """convkb""" +121 15 loss """softplus""" +121 15 regularizer """no""" +121 15 optimizer """adam""" +121 15 training_loop """owa""" +121 15 negative_sampler """basic""" +121 15 evaluator """rankbased""" +121 16 dataset """kinships""" +121 16 model """convkb""" +121 16 loss """softplus""" +121 16 regularizer """no""" +121 16 optimizer """adam""" +121 16 training_loop """owa""" +121 16 negative_sampler """basic""" +121 16 evaluator """rankbased""" +121 17 dataset """kinships""" +121 17 model """convkb""" +121 17 loss """softplus""" +121 17 regularizer """no""" +121 17 optimizer """adam""" +121 17 training_loop """owa""" +121 17 negative_sampler """basic""" +121 17 evaluator """rankbased""" +121 18 dataset """kinships""" +121 18 model """convkb""" +121 18 loss """softplus""" +121 18 regularizer """no""" +121 18 optimizer """adam""" +121 18 training_loop """owa""" +121 18 negative_sampler """basic""" +121 18 evaluator """rankbased""" +121 19 dataset """kinships""" +121 19 model """convkb""" +121 19 loss """softplus""" +121 19 regularizer """no""" +121 19 optimizer """adam""" +121 19 training_loop """owa""" +121 19 negative_sampler """basic""" +121 19 evaluator """rankbased""" +121 20 dataset """kinships""" +121 20 model """convkb""" +121 20 loss """softplus""" +121 20 regularizer """no""" +121 20 optimizer """adam""" +121 20 training_loop """owa""" +121 20 negative_sampler """basic""" +121 20 evaluator """rankbased""" +121 21 dataset """kinships""" +121 21 model """convkb""" +121 21 loss """softplus""" +121 21 regularizer """no""" +121 21 optimizer """adam""" +121 21 training_loop """owa""" +121 21 negative_sampler """basic""" +121 21 evaluator """rankbased""" +121 22 dataset """kinships""" +121 22 model """convkb""" +121 22 loss """softplus""" +121 22 regularizer """no""" +121 22 optimizer """adam""" +121 22 training_loop """owa""" +121 22 negative_sampler """basic""" +121 22 evaluator """rankbased""" +121 23 dataset """kinships""" +121 23 model """convkb""" +121 23 loss """softplus""" +121 23 regularizer """no""" +121 23 optimizer """adam""" +121 23 training_loop """owa""" +121 23 negative_sampler """basic""" +121 23 evaluator """rankbased""" +121 24 dataset """kinships""" +121 24 model """convkb""" +121 24 loss """softplus""" +121 24 regularizer """no""" +121 24 optimizer """adam""" +121 24 training_loop """owa""" +121 24 negative_sampler """basic""" +121 24 evaluator """rankbased""" +121 25 dataset """kinships""" +121 25 model """convkb""" +121 25 loss """softplus""" +121 25 regularizer """no""" +121 25 optimizer """adam""" +121 25 training_loop """owa""" +121 25 negative_sampler """basic""" +121 25 evaluator """rankbased""" +121 26 dataset """kinships""" +121 26 model """convkb""" +121 26 loss """softplus""" +121 26 regularizer """no""" +121 26 optimizer """adam""" +121 26 training_loop """owa""" +121 26 negative_sampler """basic""" +121 26 evaluator """rankbased""" +121 27 dataset """kinships""" +121 27 model """convkb""" +121 27 loss """softplus""" +121 27 regularizer """no""" +121 27 optimizer """adam""" +121 27 training_loop """owa""" +121 27 negative_sampler """basic""" +121 27 evaluator """rankbased""" +121 28 dataset """kinships""" +121 28 model """convkb""" +121 28 loss """softplus""" +121 28 regularizer """no""" +121 28 optimizer """adam""" +121 28 training_loop """owa""" +121 28 negative_sampler """basic""" +121 28 evaluator """rankbased""" +121 29 dataset """kinships""" +121 29 model """convkb""" +121 29 loss """softplus""" +121 29 regularizer """no""" +121 29 optimizer """adam""" +121 29 training_loop """owa""" +121 29 negative_sampler """basic""" +121 29 evaluator """rankbased""" +122 1 model.embedding_dim 0.0 +122 1 model.hidden_dropout_rate 0.146715514435366 +122 1 model.num_filters 9.0 +122 1 optimizer.lr 0.044713072593174884 +122 1 negative_sampler.num_negs_per_pos 69.0 +122 1 training.batch_size 2.0 +122 2 model.embedding_dim 2.0 +122 2 model.hidden_dropout_rate 0.2360848698415249 +122 2 model.num_filters 2.0 +122 2 optimizer.lr 0.005017411243505941 +122 2 negative_sampler.num_negs_per_pos 46.0 +122 2 training.batch_size 2.0 +122 3 model.embedding_dim 0.0 +122 3 model.hidden_dropout_rate 0.13419195740914827 +122 3 model.num_filters 8.0 +122 3 optimizer.lr 0.0013481088478687541 +122 3 negative_sampler.num_negs_per_pos 36.0 +122 3 training.batch_size 0.0 +122 4 model.embedding_dim 0.0 +122 4 model.hidden_dropout_rate 0.12938519722584482 +122 4 model.num_filters 2.0 +122 4 optimizer.lr 0.011432321966216038 +122 4 negative_sampler.num_negs_per_pos 35.0 +122 4 training.batch_size 0.0 +122 5 model.embedding_dim 0.0 +122 5 model.hidden_dropout_rate 0.10585454366269556 +122 5 model.num_filters 0.0 +122 5 optimizer.lr 0.033522789511956745 +122 5 negative_sampler.num_negs_per_pos 76.0 +122 5 training.batch_size 0.0 +122 6 model.embedding_dim 0.0 +122 6 model.hidden_dropout_rate 0.3764233565929469 +122 6 model.num_filters 4.0 +122 6 optimizer.lr 0.0060024113034048455 +122 6 negative_sampler.num_negs_per_pos 47.0 +122 6 training.batch_size 0.0 +122 7 model.embedding_dim 2.0 +122 7 model.hidden_dropout_rate 0.3254222683793748 +122 7 model.num_filters 1.0 +122 7 optimizer.lr 0.015619241459026946 +122 7 negative_sampler.num_negs_per_pos 20.0 +122 7 training.batch_size 1.0 +122 8 model.embedding_dim 1.0 +122 8 model.hidden_dropout_rate 0.4687945119505182 +122 8 model.num_filters 9.0 +122 8 optimizer.lr 0.005965174325786937 +122 8 negative_sampler.num_negs_per_pos 48.0 +122 8 training.batch_size 2.0 +122 9 model.embedding_dim 2.0 +122 9 model.hidden_dropout_rate 0.4924585168362608 +122 9 model.num_filters 7.0 +122 9 optimizer.lr 0.04123845201211262 +122 9 negative_sampler.num_negs_per_pos 60.0 +122 9 training.batch_size 0.0 +122 10 model.embedding_dim 1.0 +122 10 model.hidden_dropout_rate 0.23528628811290384 +122 10 model.num_filters 7.0 +122 10 optimizer.lr 0.01193788564371591 +122 10 negative_sampler.num_negs_per_pos 1.0 +122 10 training.batch_size 2.0 +122 11 model.embedding_dim 2.0 +122 11 model.hidden_dropout_rate 0.33863131506166894 +122 11 model.num_filters 4.0 +122 11 optimizer.lr 0.09471601750452972 +122 11 negative_sampler.num_negs_per_pos 4.0 +122 11 training.batch_size 1.0 +122 12 model.embedding_dim 2.0 +122 12 model.hidden_dropout_rate 0.339299485213067 +122 12 model.num_filters 4.0 +122 12 optimizer.lr 0.09618930744203918 +122 12 negative_sampler.num_negs_per_pos 31.0 +122 12 training.batch_size 2.0 +122 13 model.embedding_dim 2.0 +122 13 model.hidden_dropout_rate 0.2744609939940781 +122 13 model.num_filters 4.0 +122 13 optimizer.lr 0.0018286322694908825 +122 13 negative_sampler.num_negs_per_pos 74.0 +122 13 training.batch_size 0.0 +122 14 model.embedding_dim 2.0 +122 14 model.hidden_dropout_rate 0.24005640156204341 +122 14 model.num_filters 3.0 +122 14 optimizer.lr 0.00798442319474697 +122 14 negative_sampler.num_negs_per_pos 22.0 +122 14 training.batch_size 2.0 +122 15 model.embedding_dim 1.0 +122 15 model.hidden_dropout_rate 0.46458866377525304 +122 15 model.num_filters 3.0 +122 15 optimizer.lr 0.016975264080199295 +122 15 negative_sampler.num_negs_per_pos 98.0 +122 15 training.batch_size 1.0 +122 16 model.embedding_dim 1.0 +122 16 model.hidden_dropout_rate 0.1545268990403429 +122 16 model.num_filters 8.0 +122 16 optimizer.lr 0.07823765971536793 +122 16 negative_sampler.num_negs_per_pos 34.0 +122 16 training.batch_size 0.0 +122 17 model.embedding_dim 0.0 +122 17 model.hidden_dropout_rate 0.4004490883703724 +122 17 model.num_filters 0.0 +122 17 optimizer.lr 0.00901073987591104 +122 17 negative_sampler.num_negs_per_pos 80.0 +122 17 training.batch_size 1.0 +122 18 model.embedding_dim 1.0 +122 18 model.hidden_dropout_rate 0.37223845210000905 +122 18 model.num_filters 1.0 +122 18 optimizer.lr 0.013493503178328254 +122 18 negative_sampler.num_negs_per_pos 77.0 +122 18 training.batch_size 2.0 +122 19 model.embedding_dim 2.0 +122 19 model.hidden_dropout_rate 0.23743788751023942 +122 19 model.num_filters 5.0 +122 19 optimizer.lr 0.023200651370179043 +122 19 negative_sampler.num_negs_per_pos 42.0 +122 19 training.batch_size 1.0 +122 20 model.embedding_dim 2.0 +122 20 model.hidden_dropout_rate 0.1327445068527791 +122 20 model.num_filters 3.0 +122 20 optimizer.lr 0.08837233550393131 +122 20 negative_sampler.num_negs_per_pos 80.0 +122 20 training.batch_size 2.0 +122 21 model.embedding_dim 0.0 +122 21 model.hidden_dropout_rate 0.18566094204998407 +122 21 model.num_filters 1.0 +122 21 optimizer.lr 0.022037150907623534 +122 21 negative_sampler.num_negs_per_pos 29.0 +122 21 training.batch_size 0.0 +122 22 model.embedding_dim 0.0 +122 22 model.hidden_dropout_rate 0.4810793243240088 +122 22 model.num_filters 5.0 +122 22 optimizer.lr 0.010820078504321123 +122 22 negative_sampler.num_negs_per_pos 77.0 +122 22 training.batch_size 0.0 +122 23 model.embedding_dim 1.0 +122 23 model.hidden_dropout_rate 0.13567517784150285 +122 23 model.num_filters 9.0 +122 23 optimizer.lr 0.0187704777671923 +122 23 negative_sampler.num_negs_per_pos 70.0 +122 23 training.batch_size 2.0 +122 24 model.embedding_dim 1.0 +122 24 model.hidden_dropout_rate 0.3578294297571167 +122 24 model.num_filters 7.0 +122 24 optimizer.lr 0.04249597139495302 +122 24 negative_sampler.num_negs_per_pos 24.0 +122 24 training.batch_size 1.0 +122 25 model.embedding_dim 2.0 +122 25 model.hidden_dropout_rate 0.34922554967446673 +122 25 model.num_filters 0.0 +122 25 optimizer.lr 0.012552179463555295 +122 25 negative_sampler.num_negs_per_pos 21.0 +122 25 training.batch_size 2.0 +122 26 model.embedding_dim 0.0 +122 26 model.hidden_dropout_rate 0.2274113613297303 +122 26 model.num_filters 8.0 +122 26 optimizer.lr 0.026912826572818342 +122 26 negative_sampler.num_negs_per_pos 0.0 +122 26 training.batch_size 2.0 +122 27 model.embedding_dim 2.0 +122 27 model.hidden_dropout_rate 0.32603679399662094 +122 27 model.num_filters 0.0 +122 27 optimizer.lr 0.038425120829281574 +122 27 negative_sampler.num_negs_per_pos 66.0 +122 27 training.batch_size 0.0 +122 28 model.embedding_dim 0.0 +122 28 model.hidden_dropout_rate 0.23367493669111258 +122 28 model.num_filters 6.0 +122 28 optimizer.lr 0.019635022102828813 +122 28 negative_sampler.num_negs_per_pos 0.0 +122 28 training.batch_size 2.0 +122 29 model.embedding_dim 1.0 +122 29 model.hidden_dropout_rate 0.1819119986603891 +122 29 model.num_filters 3.0 +122 29 optimizer.lr 0.006093564645921804 +122 29 negative_sampler.num_negs_per_pos 51.0 +122 29 training.batch_size 2.0 +122 30 model.embedding_dim 1.0 +122 30 model.hidden_dropout_rate 0.49308396709360575 +122 30 model.num_filters 0.0 +122 30 optimizer.lr 0.009880950426552435 +122 30 negative_sampler.num_negs_per_pos 64.0 +122 30 training.batch_size 1.0 +122 31 model.embedding_dim 0.0 +122 31 model.hidden_dropout_rate 0.3183981624900585 +122 31 model.num_filters 8.0 +122 31 optimizer.lr 0.001116660385362771 +122 31 negative_sampler.num_negs_per_pos 89.0 +122 31 training.batch_size 2.0 +122 32 model.embedding_dim 1.0 +122 32 model.hidden_dropout_rate 0.21680811607734768 +122 32 model.num_filters 2.0 +122 32 optimizer.lr 0.03110617715649428 +122 32 negative_sampler.num_negs_per_pos 55.0 +122 32 training.batch_size 1.0 +122 33 model.embedding_dim 1.0 +122 33 model.hidden_dropout_rate 0.47824220853490806 +122 33 model.num_filters 6.0 +122 33 optimizer.lr 0.08492848538118881 +122 33 negative_sampler.num_negs_per_pos 93.0 +122 33 training.batch_size 2.0 +122 34 model.embedding_dim 0.0 +122 34 model.hidden_dropout_rate 0.384235292212437 +122 34 model.num_filters 5.0 +122 34 optimizer.lr 0.0271872007133186 +122 34 negative_sampler.num_negs_per_pos 22.0 +122 34 training.batch_size 0.0 +122 35 model.embedding_dim 1.0 +122 35 model.hidden_dropout_rate 0.3913814266484675 +122 35 model.num_filters 8.0 +122 35 optimizer.lr 0.03756120826356939 +122 35 negative_sampler.num_negs_per_pos 3.0 +122 35 training.batch_size 2.0 +122 36 model.embedding_dim 1.0 +122 36 model.hidden_dropout_rate 0.44935347303448814 +122 36 model.num_filters 6.0 +122 36 optimizer.lr 0.011435920921085245 +122 36 negative_sampler.num_negs_per_pos 9.0 +122 36 training.batch_size 0.0 +122 37 model.embedding_dim 0.0 +122 37 model.hidden_dropout_rate 0.23986472602989528 +122 37 model.num_filters 2.0 +122 37 optimizer.lr 0.0818608191581962 +122 37 negative_sampler.num_negs_per_pos 78.0 +122 37 training.batch_size 1.0 +122 38 model.embedding_dim 1.0 +122 38 model.hidden_dropout_rate 0.31782188317660953 +122 38 model.num_filters 5.0 +122 38 optimizer.lr 0.09058167546840273 +122 38 negative_sampler.num_negs_per_pos 17.0 +122 38 training.batch_size 2.0 +122 39 model.embedding_dim 1.0 +122 39 model.hidden_dropout_rate 0.3443378119009366 +122 39 model.num_filters 6.0 +122 39 optimizer.lr 0.005139000963206507 +122 39 negative_sampler.num_negs_per_pos 15.0 +122 39 training.batch_size 1.0 +122 40 model.embedding_dim 0.0 +122 40 model.hidden_dropout_rate 0.18677274986570258 +122 40 model.num_filters 7.0 +122 40 optimizer.lr 0.017672245810497835 +122 40 negative_sampler.num_negs_per_pos 67.0 +122 40 training.batch_size 1.0 +122 41 model.embedding_dim 0.0 +122 41 model.hidden_dropout_rate 0.4809727231482954 +122 41 model.num_filters 8.0 +122 41 optimizer.lr 0.0040675145254007875 +122 41 negative_sampler.num_negs_per_pos 13.0 +122 41 training.batch_size 0.0 +122 42 model.embedding_dim 2.0 +122 42 model.hidden_dropout_rate 0.3624863711659536 +122 42 model.num_filters 8.0 +122 42 optimizer.lr 0.030267833231633658 +122 42 negative_sampler.num_negs_per_pos 12.0 +122 42 training.batch_size 0.0 +122 43 model.embedding_dim 2.0 +122 43 model.hidden_dropout_rate 0.12523240787275766 +122 43 model.num_filters 8.0 +122 43 optimizer.lr 0.071927274644988 +122 43 negative_sampler.num_negs_per_pos 61.0 +122 43 training.batch_size 1.0 +122 44 model.embedding_dim 1.0 +122 44 model.hidden_dropout_rate 0.331402058523051 +122 44 model.num_filters 0.0 +122 44 optimizer.lr 0.05692489402045558 +122 44 negative_sampler.num_negs_per_pos 67.0 +122 44 training.batch_size 1.0 +122 45 model.embedding_dim 2.0 +122 45 model.hidden_dropout_rate 0.300635723119167 +122 45 model.num_filters 3.0 +122 45 optimizer.lr 0.0017781231129756772 +122 45 negative_sampler.num_negs_per_pos 64.0 +122 45 training.batch_size 1.0 +122 46 model.embedding_dim 2.0 +122 46 model.hidden_dropout_rate 0.48431786708907376 +122 46 model.num_filters 1.0 +122 46 optimizer.lr 0.0037477001227965746 +122 46 negative_sampler.num_negs_per_pos 23.0 +122 46 training.batch_size 0.0 +122 47 model.embedding_dim 2.0 +122 47 model.hidden_dropout_rate 0.41881772143714097 +122 47 model.num_filters 8.0 +122 47 optimizer.lr 0.010695764741966785 +122 47 negative_sampler.num_negs_per_pos 42.0 +122 47 training.batch_size 2.0 +122 48 model.embedding_dim 0.0 +122 48 model.hidden_dropout_rate 0.4504926099822825 +122 48 model.num_filters 9.0 +122 48 optimizer.lr 0.021746861760992393 +122 48 negative_sampler.num_negs_per_pos 85.0 +122 48 training.batch_size 2.0 +122 49 model.embedding_dim 0.0 +122 49 model.hidden_dropout_rate 0.2951137190930586 +122 49 model.num_filters 7.0 +122 49 optimizer.lr 0.048726665514249506 +122 49 negative_sampler.num_negs_per_pos 60.0 +122 49 training.batch_size 1.0 +122 50 model.embedding_dim 2.0 +122 50 model.hidden_dropout_rate 0.1014909023933559 +122 50 model.num_filters 3.0 +122 50 optimizer.lr 0.0294275032578123 +122 50 negative_sampler.num_negs_per_pos 98.0 +122 50 training.batch_size 2.0 +122 51 model.embedding_dim 1.0 +122 51 model.hidden_dropout_rate 0.3208446971791699 +122 51 model.num_filters 7.0 +122 51 optimizer.lr 0.015217451768738689 +122 51 negative_sampler.num_negs_per_pos 31.0 +122 51 training.batch_size 1.0 +122 52 model.embedding_dim 2.0 +122 52 model.hidden_dropout_rate 0.23254662969515805 +122 52 model.num_filters 6.0 +122 52 optimizer.lr 0.001527062626966909 +122 52 negative_sampler.num_negs_per_pos 52.0 +122 52 training.batch_size 2.0 +122 53 model.embedding_dim 2.0 +122 53 model.hidden_dropout_rate 0.2692647139397796 +122 53 model.num_filters 4.0 +122 53 optimizer.lr 0.05713781701103257 +122 53 negative_sampler.num_negs_per_pos 27.0 +122 53 training.batch_size 0.0 +122 54 model.embedding_dim 1.0 +122 54 model.hidden_dropout_rate 0.46626856829177205 +122 54 model.num_filters 3.0 +122 54 optimizer.lr 0.005856059815193769 +122 54 negative_sampler.num_negs_per_pos 3.0 +122 54 training.batch_size 0.0 +122 55 model.embedding_dim 1.0 +122 55 model.hidden_dropout_rate 0.2589082676547965 +122 55 model.num_filters 5.0 +122 55 optimizer.lr 0.003618082858731261 +122 55 negative_sampler.num_negs_per_pos 3.0 +122 55 training.batch_size 0.0 +122 56 model.embedding_dim 1.0 +122 56 model.hidden_dropout_rate 0.2681314784305573 +122 56 model.num_filters 4.0 +122 56 optimizer.lr 0.0017175868742384727 +122 56 negative_sampler.num_negs_per_pos 55.0 +122 56 training.batch_size 1.0 +122 57 model.embedding_dim 1.0 +122 57 model.hidden_dropout_rate 0.28759755934022785 +122 57 model.num_filters 6.0 +122 57 optimizer.lr 0.0029048113526084665 +122 57 negative_sampler.num_negs_per_pos 43.0 +122 57 training.batch_size 2.0 +122 58 model.embedding_dim 2.0 +122 58 model.hidden_dropout_rate 0.20144726841976812 +122 58 model.num_filters 3.0 +122 58 optimizer.lr 0.0015595971135242237 +122 58 negative_sampler.num_negs_per_pos 21.0 +122 58 training.batch_size 2.0 +122 59 model.embedding_dim 0.0 +122 59 model.hidden_dropout_rate 0.2038659139643676 +122 59 model.num_filters 6.0 +122 59 optimizer.lr 0.006588241914791131 +122 59 negative_sampler.num_negs_per_pos 70.0 +122 59 training.batch_size 1.0 +122 60 model.embedding_dim 2.0 +122 60 model.hidden_dropout_rate 0.31348551869432223 +122 60 model.num_filters 5.0 +122 60 optimizer.lr 0.005866449847010728 +122 60 negative_sampler.num_negs_per_pos 57.0 +122 60 training.batch_size 1.0 +122 61 model.embedding_dim 0.0 +122 61 model.hidden_dropout_rate 0.4335125275456288 +122 61 model.num_filters 1.0 +122 61 optimizer.lr 0.0655003603498244 +122 61 negative_sampler.num_negs_per_pos 89.0 +122 61 training.batch_size 2.0 +122 62 model.embedding_dim 2.0 +122 62 model.hidden_dropout_rate 0.23437323565884616 +122 62 model.num_filters 5.0 +122 62 optimizer.lr 0.0017640026517218306 +122 62 negative_sampler.num_negs_per_pos 60.0 +122 62 training.batch_size 1.0 +122 63 model.embedding_dim 0.0 +122 63 model.hidden_dropout_rate 0.32942590495390023 +122 63 model.num_filters 5.0 +122 63 optimizer.lr 0.08715684601204172 +122 63 negative_sampler.num_negs_per_pos 10.0 +122 63 training.batch_size 1.0 +122 64 model.embedding_dim 0.0 +122 64 model.hidden_dropout_rate 0.24728175357464555 +122 64 model.num_filters 7.0 +122 64 optimizer.lr 0.026846359567409037 +122 64 negative_sampler.num_negs_per_pos 34.0 +122 64 training.batch_size 2.0 +122 65 model.embedding_dim 2.0 +122 65 model.hidden_dropout_rate 0.2691026601612831 +122 65 model.num_filters 3.0 +122 65 optimizer.lr 0.008642353209893011 +122 65 negative_sampler.num_negs_per_pos 42.0 +122 65 training.batch_size 0.0 +122 66 model.embedding_dim 2.0 +122 66 model.hidden_dropout_rate 0.11372085638624237 +122 66 model.num_filters 4.0 +122 66 optimizer.lr 0.0015205179893444587 +122 66 negative_sampler.num_negs_per_pos 59.0 +122 66 training.batch_size 1.0 +122 67 model.embedding_dim 2.0 +122 67 model.hidden_dropout_rate 0.3449930096617965 +122 67 model.num_filters 6.0 +122 67 optimizer.lr 0.07024195131572641 +122 67 negative_sampler.num_negs_per_pos 84.0 +122 67 training.batch_size 0.0 +122 68 model.embedding_dim 2.0 +122 68 model.hidden_dropout_rate 0.24708344105013935 +122 68 model.num_filters 5.0 +122 68 optimizer.lr 0.018485818551390667 +122 68 negative_sampler.num_negs_per_pos 35.0 +122 68 training.batch_size 0.0 +122 69 model.embedding_dim 0.0 +122 69 model.hidden_dropout_rate 0.2015333607163226 +122 69 model.num_filters 2.0 +122 69 optimizer.lr 0.027652636172624822 +122 69 negative_sampler.num_negs_per_pos 48.0 +122 69 training.batch_size 2.0 +122 70 model.embedding_dim 0.0 +122 70 model.hidden_dropout_rate 0.48000903024447905 +122 70 model.num_filters 2.0 +122 70 optimizer.lr 0.004704071993595728 +122 70 negative_sampler.num_negs_per_pos 59.0 +122 70 training.batch_size 1.0 +122 71 model.embedding_dim 2.0 +122 71 model.hidden_dropout_rate 0.21835233963254352 +122 71 model.num_filters 3.0 +122 71 optimizer.lr 0.011938198088737553 +122 71 negative_sampler.num_negs_per_pos 13.0 +122 71 training.batch_size 0.0 +122 72 model.embedding_dim 1.0 +122 72 model.hidden_dropout_rate 0.26685564846988585 +122 72 model.num_filters 9.0 +122 72 optimizer.lr 0.04983301930032921 +122 72 negative_sampler.num_negs_per_pos 63.0 +122 72 training.batch_size 0.0 +122 73 model.embedding_dim 1.0 +122 73 model.hidden_dropout_rate 0.24784530772541427 +122 73 model.num_filters 4.0 +122 73 optimizer.lr 0.003954162877302518 +122 73 negative_sampler.num_negs_per_pos 95.0 +122 73 training.batch_size 2.0 +122 1 dataset """kinships""" +122 1 model """convkb""" +122 1 loss """bceaftersigmoid""" +122 1 regularizer """no""" +122 1 optimizer """adam""" +122 1 training_loop """owa""" +122 1 negative_sampler """basic""" +122 1 evaluator """rankbased""" +122 2 dataset """kinships""" +122 2 model """convkb""" +122 2 loss """bceaftersigmoid""" +122 2 regularizer """no""" +122 2 optimizer """adam""" +122 2 training_loop """owa""" +122 2 negative_sampler """basic""" +122 2 evaluator """rankbased""" +122 3 dataset """kinships""" +122 3 model """convkb""" +122 3 loss """bceaftersigmoid""" +122 3 regularizer """no""" +122 3 optimizer """adam""" +122 3 training_loop """owa""" +122 3 negative_sampler """basic""" +122 3 evaluator """rankbased""" +122 4 dataset """kinships""" +122 4 model """convkb""" +122 4 loss """bceaftersigmoid""" +122 4 regularizer """no""" +122 4 optimizer """adam""" +122 4 training_loop """owa""" +122 4 negative_sampler """basic""" +122 4 evaluator """rankbased""" +122 5 dataset """kinships""" +122 5 model """convkb""" +122 5 loss """bceaftersigmoid""" +122 5 regularizer """no""" +122 5 optimizer """adam""" +122 5 training_loop """owa""" +122 5 negative_sampler """basic""" +122 5 evaluator """rankbased""" +122 6 dataset """kinships""" +122 6 model """convkb""" +122 6 loss """bceaftersigmoid""" +122 6 regularizer """no""" +122 6 optimizer """adam""" +122 6 training_loop """owa""" +122 6 negative_sampler """basic""" +122 6 evaluator """rankbased""" +122 7 dataset """kinships""" +122 7 model """convkb""" +122 7 loss """bceaftersigmoid""" +122 7 regularizer """no""" +122 7 optimizer """adam""" +122 7 training_loop """owa""" +122 7 negative_sampler """basic""" +122 7 evaluator """rankbased""" +122 8 dataset """kinships""" +122 8 model """convkb""" +122 8 loss """bceaftersigmoid""" +122 8 regularizer """no""" +122 8 optimizer """adam""" +122 8 training_loop """owa""" +122 8 negative_sampler """basic""" +122 8 evaluator """rankbased""" +122 9 dataset """kinships""" +122 9 model """convkb""" +122 9 loss """bceaftersigmoid""" +122 9 regularizer """no""" +122 9 optimizer """adam""" +122 9 training_loop """owa""" +122 9 negative_sampler """basic""" +122 9 evaluator """rankbased""" +122 10 dataset """kinships""" +122 10 model """convkb""" +122 10 loss """bceaftersigmoid""" +122 10 regularizer """no""" +122 10 optimizer """adam""" +122 10 training_loop """owa""" +122 10 negative_sampler """basic""" +122 10 evaluator """rankbased""" +122 11 dataset """kinships""" +122 11 model """convkb""" +122 11 loss """bceaftersigmoid""" +122 11 regularizer """no""" +122 11 optimizer """adam""" +122 11 training_loop """owa""" +122 11 negative_sampler """basic""" +122 11 evaluator """rankbased""" +122 12 dataset """kinships""" +122 12 model """convkb""" +122 12 loss """bceaftersigmoid""" +122 12 regularizer """no""" +122 12 optimizer """adam""" +122 12 training_loop """owa""" +122 12 negative_sampler """basic""" +122 12 evaluator """rankbased""" +122 13 dataset """kinships""" +122 13 model """convkb""" +122 13 loss """bceaftersigmoid""" +122 13 regularizer """no""" +122 13 optimizer """adam""" +122 13 training_loop """owa""" +122 13 negative_sampler """basic""" +122 13 evaluator """rankbased""" +122 14 dataset """kinships""" +122 14 model """convkb""" +122 14 loss """bceaftersigmoid""" +122 14 regularizer """no""" +122 14 optimizer """adam""" +122 14 training_loop """owa""" +122 14 negative_sampler """basic""" +122 14 evaluator """rankbased""" +122 15 dataset """kinships""" +122 15 model """convkb""" +122 15 loss """bceaftersigmoid""" +122 15 regularizer """no""" +122 15 optimizer """adam""" +122 15 training_loop """owa""" +122 15 negative_sampler """basic""" +122 15 evaluator """rankbased""" +122 16 dataset """kinships""" +122 16 model """convkb""" +122 16 loss """bceaftersigmoid""" +122 16 regularizer """no""" +122 16 optimizer """adam""" +122 16 training_loop """owa""" +122 16 negative_sampler """basic""" +122 16 evaluator """rankbased""" +122 17 dataset """kinships""" +122 17 model """convkb""" +122 17 loss """bceaftersigmoid""" +122 17 regularizer """no""" +122 17 optimizer """adam""" +122 17 training_loop """owa""" +122 17 negative_sampler """basic""" +122 17 evaluator """rankbased""" +122 18 dataset """kinships""" +122 18 model """convkb""" +122 18 loss """bceaftersigmoid""" +122 18 regularizer """no""" +122 18 optimizer """adam""" +122 18 training_loop """owa""" +122 18 negative_sampler """basic""" +122 18 evaluator """rankbased""" +122 19 dataset """kinships""" +122 19 model """convkb""" +122 19 loss """bceaftersigmoid""" +122 19 regularizer """no""" +122 19 optimizer """adam""" +122 19 training_loop """owa""" +122 19 negative_sampler """basic""" +122 19 evaluator """rankbased""" +122 20 dataset """kinships""" +122 20 model """convkb""" +122 20 loss """bceaftersigmoid""" +122 20 regularizer """no""" +122 20 optimizer """adam""" +122 20 training_loop """owa""" +122 20 negative_sampler """basic""" +122 20 evaluator """rankbased""" +122 21 dataset """kinships""" +122 21 model """convkb""" +122 21 loss """bceaftersigmoid""" +122 21 regularizer """no""" +122 21 optimizer """adam""" +122 21 training_loop """owa""" +122 21 negative_sampler """basic""" +122 21 evaluator """rankbased""" +122 22 dataset """kinships""" +122 22 model """convkb""" +122 22 loss """bceaftersigmoid""" +122 22 regularizer """no""" +122 22 optimizer """adam""" +122 22 training_loop """owa""" +122 22 negative_sampler """basic""" +122 22 evaluator """rankbased""" +122 23 dataset """kinships""" +122 23 model """convkb""" +122 23 loss """bceaftersigmoid""" +122 23 regularizer """no""" +122 23 optimizer """adam""" +122 23 training_loop """owa""" +122 23 negative_sampler """basic""" +122 23 evaluator """rankbased""" +122 24 dataset """kinships""" +122 24 model """convkb""" +122 24 loss """bceaftersigmoid""" +122 24 regularizer """no""" +122 24 optimizer """adam""" +122 24 training_loop """owa""" +122 24 negative_sampler """basic""" +122 24 evaluator """rankbased""" +122 25 dataset """kinships""" +122 25 model """convkb""" +122 25 loss """bceaftersigmoid""" +122 25 regularizer """no""" +122 25 optimizer """adam""" +122 25 training_loop """owa""" +122 25 negative_sampler """basic""" +122 25 evaluator """rankbased""" +122 26 dataset """kinships""" +122 26 model """convkb""" +122 26 loss """bceaftersigmoid""" +122 26 regularizer """no""" +122 26 optimizer """adam""" +122 26 training_loop """owa""" +122 26 negative_sampler """basic""" +122 26 evaluator """rankbased""" +122 27 dataset """kinships""" +122 27 model """convkb""" +122 27 loss """bceaftersigmoid""" +122 27 regularizer """no""" +122 27 optimizer """adam""" +122 27 training_loop """owa""" +122 27 negative_sampler """basic""" +122 27 evaluator """rankbased""" +122 28 dataset """kinships""" +122 28 model """convkb""" +122 28 loss """bceaftersigmoid""" +122 28 regularizer """no""" +122 28 optimizer """adam""" +122 28 training_loop """owa""" +122 28 negative_sampler """basic""" +122 28 evaluator """rankbased""" +122 29 dataset """kinships""" +122 29 model """convkb""" +122 29 loss """bceaftersigmoid""" +122 29 regularizer """no""" +122 29 optimizer """adam""" +122 29 training_loop """owa""" +122 29 negative_sampler """basic""" +122 29 evaluator """rankbased""" +122 30 dataset """kinships""" +122 30 model """convkb""" +122 30 loss """bceaftersigmoid""" +122 30 regularizer """no""" +122 30 optimizer """adam""" +122 30 training_loop """owa""" +122 30 negative_sampler """basic""" +122 30 evaluator """rankbased""" +122 31 dataset """kinships""" +122 31 model """convkb""" +122 31 loss """bceaftersigmoid""" +122 31 regularizer """no""" +122 31 optimizer """adam""" +122 31 training_loop """owa""" +122 31 negative_sampler """basic""" +122 31 evaluator """rankbased""" +122 32 dataset """kinships""" +122 32 model """convkb""" +122 32 loss """bceaftersigmoid""" +122 32 regularizer """no""" +122 32 optimizer """adam""" +122 32 training_loop """owa""" +122 32 negative_sampler """basic""" +122 32 evaluator """rankbased""" +122 33 dataset """kinships""" +122 33 model """convkb""" +122 33 loss """bceaftersigmoid""" +122 33 regularizer """no""" +122 33 optimizer """adam""" +122 33 training_loop """owa""" +122 33 negative_sampler """basic""" +122 33 evaluator """rankbased""" +122 34 dataset """kinships""" +122 34 model """convkb""" +122 34 loss """bceaftersigmoid""" +122 34 regularizer """no""" +122 34 optimizer """adam""" +122 34 training_loop """owa""" +122 34 negative_sampler """basic""" +122 34 evaluator """rankbased""" +122 35 dataset """kinships""" +122 35 model """convkb""" +122 35 loss """bceaftersigmoid""" +122 35 regularizer """no""" +122 35 optimizer """adam""" +122 35 training_loop """owa""" +122 35 negative_sampler """basic""" +122 35 evaluator """rankbased""" +122 36 dataset """kinships""" +122 36 model """convkb""" +122 36 loss """bceaftersigmoid""" +122 36 regularizer """no""" +122 36 optimizer """adam""" +122 36 training_loop """owa""" +122 36 negative_sampler """basic""" +122 36 evaluator """rankbased""" +122 37 dataset """kinships""" +122 37 model """convkb""" +122 37 loss """bceaftersigmoid""" +122 37 regularizer """no""" +122 37 optimizer """adam""" +122 37 training_loop """owa""" +122 37 negative_sampler """basic""" +122 37 evaluator """rankbased""" +122 38 dataset """kinships""" +122 38 model """convkb""" +122 38 loss """bceaftersigmoid""" +122 38 regularizer """no""" +122 38 optimizer """adam""" +122 38 training_loop """owa""" +122 38 negative_sampler """basic""" +122 38 evaluator """rankbased""" +122 39 dataset """kinships""" +122 39 model """convkb""" +122 39 loss """bceaftersigmoid""" +122 39 regularizer """no""" +122 39 optimizer """adam""" +122 39 training_loop """owa""" +122 39 negative_sampler """basic""" +122 39 evaluator """rankbased""" +122 40 dataset """kinships""" +122 40 model """convkb""" +122 40 loss """bceaftersigmoid""" +122 40 regularizer """no""" +122 40 optimizer """adam""" +122 40 training_loop """owa""" +122 40 negative_sampler """basic""" +122 40 evaluator """rankbased""" +122 41 dataset """kinships""" +122 41 model """convkb""" +122 41 loss """bceaftersigmoid""" +122 41 regularizer """no""" +122 41 optimizer """adam""" +122 41 training_loop """owa""" +122 41 negative_sampler """basic""" +122 41 evaluator """rankbased""" +122 42 dataset """kinships""" +122 42 model """convkb""" +122 42 loss """bceaftersigmoid""" +122 42 regularizer """no""" +122 42 optimizer """adam""" +122 42 training_loop """owa""" +122 42 negative_sampler """basic""" +122 42 evaluator """rankbased""" +122 43 dataset """kinships""" +122 43 model """convkb""" +122 43 loss """bceaftersigmoid""" +122 43 regularizer """no""" +122 43 optimizer """adam""" +122 43 training_loop """owa""" +122 43 negative_sampler """basic""" +122 43 evaluator """rankbased""" +122 44 dataset """kinships""" +122 44 model """convkb""" +122 44 loss """bceaftersigmoid""" +122 44 regularizer """no""" +122 44 optimizer """adam""" +122 44 training_loop """owa""" +122 44 negative_sampler """basic""" +122 44 evaluator """rankbased""" +122 45 dataset """kinships""" +122 45 model """convkb""" +122 45 loss """bceaftersigmoid""" +122 45 regularizer """no""" +122 45 optimizer """adam""" +122 45 training_loop """owa""" +122 45 negative_sampler """basic""" +122 45 evaluator """rankbased""" +122 46 dataset """kinships""" +122 46 model """convkb""" +122 46 loss """bceaftersigmoid""" +122 46 regularizer """no""" +122 46 optimizer """adam""" +122 46 training_loop """owa""" +122 46 negative_sampler """basic""" +122 46 evaluator """rankbased""" +122 47 dataset """kinships""" +122 47 model """convkb""" +122 47 loss """bceaftersigmoid""" +122 47 regularizer """no""" +122 47 optimizer """adam""" +122 47 training_loop """owa""" +122 47 negative_sampler """basic""" +122 47 evaluator """rankbased""" +122 48 dataset """kinships""" +122 48 model """convkb""" +122 48 loss """bceaftersigmoid""" +122 48 regularizer """no""" +122 48 optimizer """adam""" +122 48 training_loop """owa""" +122 48 negative_sampler """basic""" +122 48 evaluator """rankbased""" +122 49 dataset """kinships""" +122 49 model """convkb""" +122 49 loss """bceaftersigmoid""" +122 49 regularizer """no""" +122 49 optimizer """adam""" +122 49 training_loop """owa""" +122 49 negative_sampler """basic""" +122 49 evaluator """rankbased""" +122 50 dataset """kinships""" +122 50 model """convkb""" +122 50 loss """bceaftersigmoid""" +122 50 regularizer """no""" +122 50 optimizer """adam""" +122 50 training_loop """owa""" +122 50 negative_sampler """basic""" +122 50 evaluator """rankbased""" +122 51 dataset """kinships""" +122 51 model """convkb""" +122 51 loss """bceaftersigmoid""" +122 51 regularizer """no""" +122 51 optimizer """adam""" +122 51 training_loop """owa""" +122 51 negative_sampler """basic""" +122 51 evaluator """rankbased""" +122 52 dataset """kinships""" +122 52 model """convkb""" +122 52 loss """bceaftersigmoid""" +122 52 regularizer """no""" +122 52 optimizer """adam""" +122 52 training_loop """owa""" +122 52 negative_sampler """basic""" +122 52 evaluator """rankbased""" +122 53 dataset """kinships""" +122 53 model """convkb""" +122 53 loss """bceaftersigmoid""" +122 53 regularizer """no""" +122 53 optimizer """adam""" +122 53 training_loop """owa""" +122 53 negative_sampler """basic""" +122 53 evaluator """rankbased""" +122 54 dataset """kinships""" +122 54 model """convkb""" +122 54 loss """bceaftersigmoid""" +122 54 regularizer """no""" +122 54 optimizer """adam""" +122 54 training_loop """owa""" +122 54 negative_sampler """basic""" +122 54 evaluator """rankbased""" +122 55 dataset """kinships""" +122 55 model """convkb""" +122 55 loss """bceaftersigmoid""" +122 55 regularizer """no""" +122 55 optimizer """adam""" +122 55 training_loop """owa""" +122 55 negative_sampler """basic""" +122 55 evaluator """rankbased""" +122 56 dataset """kinships""" +122 56 model """convkb""" +122 56 loss """bceaftersigmoid""" +122 56 regularizer """no""" +122 56 optimizer """adam""" +122 56 training_loop """owa""" +122 56 negative_sampler """basic""" +122 56 evaluator """rankbased""" +122 57 dataset """kinships""" +122 57 model """convkb""" +122 57 loss """bceaftersigmoid""" +122 57 regularizer """no""" +122 57 optimizer """adam""" +122 57 training_loop """owa""" +122 57 negative_sampler """basic""" +122 57 evaluator """rankbased""" +122 58 dataset """kinships""" +122 58 model """convkb""" +122 58 loss """bceaftersigmoid""" +122 58 regularizer """no""" +122 58 optimizer """adam""" +122 58 training_loop """owa""" +122 58 negative_sampler """basic""" +122 58 evaluator """rankbased""" +122 59 dataset """kinships""" +122 59 model """convkb""" +122 59 loss """bceaftersigmoid""" +122 59 regularizer """no""" +122 59 optimizer """adam""" +122 59 training_loop """owa""" +122 59 negative_sampler """basic""" +122 59 evaluator """rankbased""" +122 60 dataset """kinships""" +122 60 model """convkb""" +122 60 loss """bceaftersigmoid""" +122 60 regularizer """no""" +122 60 optimizer """adam""" +122 60 training_loop """owa""" +122 60 negative_sampler """basic""" +122 60 evaluator """rankbased""" +122 61 dataset """kinships""" +122 61 model """convkb""" +122 61 loss """bceaftersigmoid""" +122 61 regularizer """no""" +122 61 optimizer """adam""" +122 61 training_loop """owa""" +122 61 negative_sampler """basic""" +122 61 evaluator """rankbased""" +122 62 dataset """kinships""" +122 62 model """convkb""" +122 62 loss """bceaftersigmoid""" +122 62 regularizer """no""" +122 62 optimizer """adam""" +122 62 training_loop """owa""" +122 62 negative_sampler """basic""" +122 62 evaluator """rankbased""" +122 63 dataset """kinships""" +122 63 model """convkb""" +122 63 loss """bceaftersigmoid""" +122 63 regularizer """no""" +122 63 optimizer """adam""" +122 63 training_loop """owa""" +122 63 negative_sampler """basic""" +122 63 evaluator """rankbased""" +122 64 dataset """kinships""" +122 64 model """convkb""" +122 64 loss """bceaftersigmoid""" +122 64 regularizer """no""" +122 64 optimizer """adam""" +122 64 training_loop """owa""" +122 64 negative_sampler """basic""" +122 64 evaluator """rankbased""" +122 65 dataset """kinships""" +122 65 model """convkb""" +122 65 loss """bceaftersigmoid""" +122 65 regularizer """no""" +122 65 optimizer """adam""" +122 65 training_loop """owa""" +122 65 negative_sampler """basic""" +122 65 evaluator """rankbased""" +122 66 dataset """kinships""" +122 66 model """convkb""" +122 66 loss """bceaftersigmoid""" +122 66 regularizer """no""" +122 66 optimizer """adam""" +122 66 training_loop """owa""" +122 66 negative_sampler """basic""" +122 66 evaluator """rankbased""" +122 67 dataset """kinships""" +122 67 model """convkb""" +122 67 loss """bceaftersigmoid""" +122 67 regularizer """no""" +122 67 optimizer """adam""" +122 67 training_loop """owa""" +122 67 negative_sampler """basic""" +122 67 evaluator """rankbased""" +122 68 dataset """kinships""" +122 68 model """convkb""" +122 68 loss """bceaftersigmoid""" +122 68 regularizer """no""" +122 68 optimizer """adam""" +122 68 training_loop """owa""" +122 68 negative_sampler """basic""" +122 68 evaluator """rankbased""" +122 69 dataset """kinships""" +122 69 model """convkb""" +122 69 loss """bceaftersigmoid""" +122 69 regularizer """no""" +122 69 optimizer """adam""" +122 69 training_loop """owa""" +122 69 negative_sampler """basic""" +122 69 evaluator """rankbased""" +122 70 dataset """kinships""" +122 70 model """convkb""" +122 70 loss """bceaftersigmoid""" +122 70 regularizer """no""" +122 70 optimizer """adam""" +122 70 training_loop """owa""" +122 70 negative_sampler """basic""" +122 70 evaluator """rankbased""" +122 71 dataset """kinships""" +122 71 model """convkb""" +122 71 loss """bceaftersigmoid""" +122 71 regularizer """no""" +122 71 optimizer """adam""" +122 71 training_loop """owa""" +122 71 negative_sampler """basic""" +122 71 evaluator """rankbased""" +122 72 dataset """kinships""" +122 72 model """convkb""" +122 72 loss """bceaftersigmoid""" +122 72 regularizer """no""" +122 72 optimizer """adam""" +122 72 training_loop """owa""" +122 72 negative_sampler """basic""" +122 72 evaluator """rankbased""" +122 73 dataset """kinships""" +122 73 model """convkb""" +122 73 loss """bceaftersigmoid""" +122 73 regularizer """no""" +122 73 optimizer """adam""" +122 73 training_loop """owa""" +122 73 negative_sampler """basic""" +122 73 evaluator """rankbased""" +123 1 model.embedding_dim 0.0 +123 1 model.hidden_dropout_rate 0.3249409636424586 +123 1 model.num_filters 1.0 +123 1 optimizer.lr 0.0016198338711300225 +123 1 negative_sampler.num_negs_per_pos 75.0 +123 1 training.batch_size 2.0 +123 2 model.embedding_dim 1.0 +123 2 model.hidden_dropout_rate 0.19829192207959162 +123 2 model.num_filters 8.0 +123 2 optimizer.lr 0.04663859721753439 +123 2 negative_sampler.num_negs_per_pos 90.0 +123 2 training.batch_size 1.0 +123 3 model.embedding_dim 1.0 +123 3 model.hidden_dropout_rate 0.16998470464037263 +123 3 model.num_filters 0.0 +123 3 optimizer.lr 0.02603639846395416 +123 3 negative_sampler.num_negs_per_pos 15.0 +123 3 training.batch_size 0.0 +123 4 model.embedding_dim 1.0 +123 4 model.hidden_dropout_rate 0.3576826232640339 +123 4 model.num_filters 3.0 +123 4 optimizer.lr 0.08397845525471578 +123 4 negative_sampler.num_negs_per_pos 51.0 +123 4 training.batch_size 2.0 +123 5 model.embedding_dim 0.0 +123 5 model.hidden_dropout_rate 0.1222581380978845 +123 5 model.num_filters 3.0 +123 5 optimizer.lr 0.019832093850033573 +123 5 negative_sampler.num_negs_per_pos 86.0 +123 5 training.batch_size 0.0 +123 6 model.embedding_dim 1.0 +123 6 model.hidden_dropout_rate 0.44348606206229535 +123 6 model.num_filters 1.0 +123 6 optimizer.lr 0.0010734016313238466 +123 6 negative_sampler.num_negs_per_pos 75.0 +123 6 training.batch_size 1.0 +123 7 model.embedding_dim 0.0 +123 7 model.hidden_dropout_rate 0.30376654676956555 +123 7 model.num_filters 7.0 +123 7 optimizer.lr 0.002171874800175634 +123 7 negative_sampler.num_negs_per_pos 52.0 +123 7 training.batch_size 1.0 +123 8 model.embedding_dim 0.0 +123 8 model.hidden_dropout_rate 0.3422092044170681 +123 8 model.num_filters 0.0 +123 8 optimizer.lr 0.022944488467657537 +123 8 negative_sampler.num_negs_per_pos 95.0 +123 8 training.batch_size 2.0 +123 9 model.embedding_dim 0.0 +123 9 model.hidden_dropout_rate 0.42830665702142534 +123 9 model.num_filters 0.0 +123 9 optimizer.lr 0.0012277582702427238 +123 9 negative_sampler.num_negs_per_pos 83.0 +123 9 training.batch_size 0.0 +123 10 model.embedding_dim 1.0 +123 10 model.hidden_dropout_rate 0.3249942469747508 +123 10 model.num_filters 9.0 +123 10 optimizer.lr 0.0026460507479113245 +123 10 negative_sampler.num_negs_per_pos 48.0 +123 10 training.batch_size 1.0 +123 11 model.embedding_dim 1.0 +123 11 model.hidden_dropout_rate 0.13748716206537873 +123 11 model.num_filters 6.0 +123 11 optimizer.lr 0.007354301436759236 +123 11 negative_sampler.num_negs_per_pos 68.0 +123 11 training.batch_size 2.0 +123 12 model.embedding_dim 0.0 +123 12 model.hidden_dropout_rate 0.2594329756296975 +123 12 model.num_filters 0.0 +123 12 optimizer.lr 0.0818402643635389 +123 12 negative_sampler.num_negs_per_pos 64.0 +123 12 training.batch_size 0.0 +123 13 model.embedding_dim 1.0 +123 13 model.hidden_dropout_rate 0.3913437539775731 +123 13 model.num_filters 0.0 +123 13 optimizer.lr 0.0633148204847143 +123 13 negative_sampler.num_negs_per_pos 71.0 +123 13 training.batch_size 2.0 +123 14 model.embedding_dim 2.0 +123 14 model.hidden_dropout_rate 0.17892289548848084 +123 14 model.num_filters 0.0 +123 14 optimizer.lr 0.049269215804759355 +123 14 negative_sampler.num_negs_per_pos 80.0 +123 14 training.batch_size 2.0 +123 15 model.embedding_dim 0.0 +123 15 model.hidden_dropout_rate 0.2519736353539594 +123 15 model.num_filters 4.0 +123 15 optimizer.lr 0.0020274385687945587 +123 15 negative_sampler.num_negs_per_pos 76.0 +123 15 training.batch_size 0.0 +123 16 model.embedding_dim 2.0 +123 16 model.hidden_dropout_rate 0.43169515961385135 +123 16 model.num_filters 9.0 +123 16 optimizer.lr 0.00581084900392776 +123 16 negative_sampler.num_negs_per_pos 97.0 +123 16 training.batch_size 0.0 +123 17 model.embedding_dim 0.0 +123 17 model.hidden_dropout_rate 0.19044562032562956 +123 17 model.num_filters 8.0 +123 17 optimizer.lr 0.002910996788552598 +123 17 negative_sampler.num_negs_per_pos 70.0 +123 17 training.batch_size 2.0 +123 18 model.embedding_dim 2.0 +123 18 model.hidden_dropout_rate 0.2599421533511192 +123 18 model.num_filters 5.0 +123 18 optimizer.lr 0.004250692369611018 +123 18 negative_sampler.num_negs_per_pos 58.0 +123 18 training.batch_size 0.0 +123 19 model.embedding_dim 0.0 +123 19 model.hidden_dropout_rate 0.3573471667535886 +123 19 model.num_filters 5.0 +123 19 optimizer.lr 0.0015016867256218416 +123 19 negative_sampler.num_negs_per_pos 17.0 +123 19 training.batch_size 1.0 +123 20 model.embedding_dim 0.0 +123 20 model.hidden_dropout_rate 0.4085127767339934 +123 20 model.num_filters 5.0 +123 20 optimizer.lr 0.05915976477104604 +123 20 negative_sampler.num_negs_per_pos 31.0 +123 20 training.batch_size 2.0 +123 21 model.embedding_dim 2.0 +123 21 model.hidden_dropout_rate 0.277938585910632 +123 21 model.num_filters 3.0 +123 21 optimizer.lr 0.012786972467860893 +123 21 negative_sampler.num_negs_per_pos 63.0 +123 21 training.batch_size 1.0 +123 22 model.embedding_dim 2.0 +123 22 model.hidden_dropout_rate 0.18997128706568944 +123 22 model.num_filters 3.0 +123 22 optimizer.lr 0.0010908737389480304 +123 22 negative_sampler.num_negs_per_pos 78.0 +123 22 training.batch_size 0.0 +123 23 model.embedding_dim 2.0 +123 23 model.hidden_dropout_rate 0.14624239379708875 +123 23 model.num_filters 2.0 +123 23 optimizer.lr 0.014131703682923331 +123 23 negative_sampler.num_negs_per_pos 12.0 +123 23 training.batch_size 2.0 +123 24 model.embedding_dim 0.0 +123 24 model.hidden_dropout_rate 0.183835380458325 +123 24 model.num_filters 5.0 +123 24 optimizer.lr 0.012526334214615138 +123 24 negative_sampler.num_negs_per_pos 31.0 +123 24 training.batch_size 2.0 +123 25 model.embedding_dim 1.0 +123 25 model.hidden_dropout_rate 0.29658015030152207 +123 25 model.num_filters 1.0 +123 25 optimizer.lr 0.07648684943566125 +123 25 negative_sampler.num_negs_per_pos 82.0 +123 25 training.batch_size 1.0 +123 26 model.embedding_dim 2.0 +123 26 model.hidden_dropout_rate 0.42229427111786033 +123 26 model.num_filters 5.0 +123 26 optimizer.lr 0.029536257196014402 +123 26 negative_sampler.num_negs_per_pos 72.0 +123 26 training.batch_size 2.0 +123 27 model.embedding_dim 1.0 +123 27 model.hidden_dropout_rate 0.19700326626651937 +123 27 model.num_filters 8.0 +123 27 optimizer.lr 0.05374331857160021 +123 27 negative_sampler.num_negs_per_pos 49.0 +123 27 training.batch_size 1.0 +123 28 model.embedding_dim 2.0 +123 28 model.hidden_dropout_rate 0.20182913628329868 +123 28 model.num_filters 3.0 +123 28 optimizer.lr 0.003676190764209847 +123 28 negative_sampler.num_negs_per_pos 92.0 +123 28 training.batch_size 2.0 +123 29 model.embedding_dim 1.0 +123 29 model.hidden_dropout_rate 0.21912988171333622 +123 29 model.num_filters 4.0 +123 29 optimizer.lr 0.005620004697723571 +123 29 negative_sampler.num_negs_per_pos 85.0 +123 29 training.batch_size 0.0 +123 30 model.embedding_dim 1.0 +123 30 model.hidden_dropout_rate 0.3935849153325055 +123 30 model.num_filters 5.0 +123 30 optimizer.lr 0.00899200060425413 +123 30 negative_sampler.num_negs_per_pos 83.0 +123 30 training.batch_size 1.0 +123 31 model.embedding_dim 1.0 +123 31 model.hidden_dropout_rate 0.11364388381679041 +123 31 model.num_filters 1.0 +123 31 optimizer.lr 0.008669506231677325 +123 31 negative_sampler.num_negs_per_pos 2.0 +123 31 training.batch_size 2.0 +123 32 model.embedding_dim 2.0 +123 32 model.hidden_dropout_rate 0.11708346192823621 +123 32 model.num_filters 0.0 +123 32 optimizer.lr 0.0011815525998507207 +123 32 negative_sampler.num_negs_per_pos 14.0 +123 32 training.batch_size 0.0 +123 33 model.embedding_dim 0.0 +123 33 model.hidden_dropout_rate 0.11139034492085882 +123 33 model.num_filters 8.0 +123 33 optimizer.lr 0.0027276913374554637 +123 33 negative_sampler.num_negs_per_pos 71.0 +123 33 training.batch_size 2.0 +123 34 model.embedding_dim 1.0 +123 34 model.hidden_dropout_rate 0.48735522066673 +123 34 model.num_filters 4.0 +123 34 optimizer.lr 0.08513941522574163 +123 34 negative_sampler.num_negs_per_pos 58.0 +123 34 training.batch_size 2.0 +123 35 model.embedding_dim 0.0 +123 35 model.hidden_dropout_rate 0.2854252124940542 +123 35 model.num_filters 1.0 +123 35 optimizer.lr 0.004796284920027923 +123 35 negative_sampler.num_negs_per_pos 40.0 +123 35 training.batch_size 1.0 +123 36 model.embedding_dim 1.0 +123 36 model.hidden_dropout_rate 0.4831300449078638 +123 36 model.num_filters 9.0 +123 36 optimizer.lr 0.0029988567713580315 +123 36 negative_sampler.num_negs_per_pos 97.0 +123 36 training.batch_size 1.0 +123 37 model.embedding_dim 1.0 +123 37 model.hidden_dropout_rate 0.21609967486645554 +123 37 model.num_filters 3.0 +123 37 optimizer.lr 0.001725296681164707 +123 37 negative_sampler.num_negs_per_pos 82.0 +123 37 training.batch_size 2.0 +123 38 model.embedding_dim 2.0 +123 38 model.hidden_dropout_rate 0.27736819435512766 +123 38 model.num_filters 1.0 +123 38 optimizer.lr 0.0056339540435980185 +123 38 negative_sampler.num_negs_per_pos 23.0 +123 38 training.batch_size 2.0 +123 39 model.embedding_dim 1.0 +123 39 model.hidden_dropout_rate 0.2992740175961924 +123 39 model.num_filters 1.0 +123 39 optimizer.lr 0.009983877606862564 +123 39 negative_sampler.num_negs_per_pos 20.0 +123 39 training.batch_size 2.0 +123 40 model.embedding_dim 0.0 +123 40 model.hidden_dropout_rate 0.10476227281943795 +123 40 model.num_filters 6.0 +123 40 optimizer.lr 0.04377238732198205 +123 40 negative_sampler.num_negs_per_pos 52.0 +123 40 training.batch_size 0.0 +123 41 model.embedding_dim 2.0 +123 41 model.hidden_dropout_rate 0.23235580487221658 +123 41 model.num_filters 0.0 +123 41 optimizer.lr 0.002056061233480169 +123 41 negative_sampler.num_negs_per_pos 57.0 +123 41 training.batch_size 2.0 +123 42 model.embedding_dim 0.0 +123 42 model.hidden_dropout_rate 0.2885891476160733 +123 42 model.num_filters 8.0 +123 42 optimizer.lr 0.0010301691983782952 +123 42 negative_sampler.num_negs_per_pos 4.0 +123 42 training.batch_size 1.0 +123 43 model.embedding_dim 1.0 +123 43 model.hidden_dropout_rate 0.10474452370052303 +123 43 model.num_filters 1.0 +123 43 optimizer.lr 0.007595893261225413 +123 43 negative_sampler.num_negs_per_pos 77.0 +123 43 training.batch_size 2.0 +123 44 model.embedding_dim 2.0 +123 44 model.hidden_dropout_rate 0.47125948425698205 +123 44 model.num_filters 0.0 +123 44 optimizer.lr 0.0038174789930316023 +123 44 negative_sampler.num_negs_per_pos 13.0 +123 44 training.batch_size 2.0 +123 45 model.embedding_dim 0.0 +123 45 model.hidden_dropout_rate 0.2771524597748307 +123 45 model.num_filters 0.0 +123 45 optimizer.lr 0.029915537877411618 +123 45 negative_sampler.num_negs_per_pos 9.0 +123 45 training.batch_size 2.0 +123 46 model.embedding_dim 2.0 +123 46 model.hidden_dropout_rate 0.3999582364129084 +123 46 model.num_filters 7.0 +123 46 optimizer.lr 0.002478257779232872 +123 46 negative_sampler.num_negs_per_pos 22.0 +123 46 training.batch_size 0.0 +123 47 model.embedding_dim 2.0 +123 47 model.hidden_dropout_rate 0.3826455859295937 +123 47 model.num_filters 4.0 +123 47 optimizer.lr 0.005119960492329582 +123 47 negative_sampler.num_negs_per_pos 62.0 +123 47 training.batch_size 0.0 +123 48 model.embedding_dim 0.0 +123 48 model.hidden_dropout_rate 0.2950450228561853 +123 48 model.num_filters 6.0 +123 48 optimizer.lr 0.030345107432043657 +123 48 negative_sampler.num_negs_per_pos 79.0 +123 48 training.batch_size 2.0 +123 49 model.embedding_dim 1.0 +123 49 model.hidden_dropout_rate 0.19592461011659612 +123 49 model.num_filters 6.0 +123 49 optimizer.lr 0.028578506998657095 +123 49 negative_sampler.num_negs_per_pos 15.0 +123 49 training.batch_size 2.0 +123 50 model.embedding_dim 0.0 +123 50 model.hidden_dropout_rate 0.18273078437440626 +123 50 model.num_filters 4.0 +123 50 optimizer.lr 0.0021358491714634116 +123 50 negative_sampler.num_negs_per_pos 3.0 +123 50 training.batch_size 1.0 +123 51 model.embedding_dim 0.0 +123 51 model.hidden_dropout_rate 0.4358656740762854 +123 51 model.num_filters 8.0 +123 51 optimizer.lr 0.02627005450376228 +123 51 negative_sampler.num_negs_per_pos 93.0 +123 51 training.batch_size 0.0 +123 52 model.embedding_dim 2.0 +123 52 model.hidden_dropout_rate 0.21490032474389764 +123 52 model.num_filters 1.0 +123 52 optimizer.lr 0.047638743008877785 +123 52 negative_sampler.num_negs_per_pos 74.0 +123 52 training.batch_size 2.0 +123 53 model.embedding_dim 1.0 +123 53 model.hidden_dropout_rate 0.321710997212096 +123 53 model.num_filters 4.0 +123 53 optimizer.lr 0.046944729020906205 +123 53 negative_sampler.num_negs_per_pos 78.0 +123 53 training.batch_size 1.0 +123 54 model.embedding_dim 2.0 +123 54 model.hidden_dropout_rate 0.485896105301932 +123 54 model.num_filters 7.0 +123 54 optimizer.lr 0.005233074124217345 +123 54 negative_sampler.num_negs_per_pos 82.0 +123 54 training.batch_size 0.0 +123 55 model.embedding_dim 2.0 +123 55 model.hidden_dropout_rate 0.37348976490211916 +123 55 model.num_filters 3.0 +123 55 optimizer.lr 0.04518000977022549 +123 55 negative_sampler.num_negs_per_pos 89.0 +123 55 training.batch_size 2.0 +123 56 model.embedding_dim 1.0 +123 56 model.hidden_dropout_rate 0.12738322313548975 +123 56 model.num_filters 1.0 +123 56 optimizer.lr 0.0019006388360512765 +123 56 negative_sampler.num_negs_per_pos 5.0 +123 56 training.batch_size 0.0 +123 57 model.embedding_dim 1.0 +123 57 model.hidden_dropout_rate 0.21781470050818053 +123 57 model.num_filters 6.0 +123 57 optimizer.lr 0.006997201547269772 +123 57 negative_sampler.num_negs_per_pos 75.0 +123 57 training.batch_size 1.0 +123 58 model.embedding_dim 1.0 +123 58 model.hidden_dropout_rate 0.22640780593747387 +123 58 model.num_filters 4.0 +123 58 optimizer.lr 0.08487041183621478 +123 58 negative_sampler.num_negs_per_pos 12.0 +123 58 training.batch_size 1.0 +123 59 model.embedding_dim 1.0 +123 59 model.hidden_dropout_rate 0.44112621293346455 +123 59 model.num_filters 6.0 +123 59 optimizer.lr 0.06291668525899206 +123 59 negative_sampler.num_negs_per_pos 77.0 +123 59 training.batch_size 2.0 +123 60 model.embedding_dim 0.0 +123 60 model.hidden_dropout_rate 0.2924549645018962 +123 60 model.num_filters 1.0 +123 60 optimizer.lr 0.011326556901631523 +123 60 negative_sampler.num_negs_per_pos 60.0 +123 60 training.batch_size 0.0 +123 61 model.embedding_dim 0.0 +123 61 model.hidden_dropout_rate 0.4353823331146849 +123 61 model.num_filters 6.0 +123 61 optimizer.lr 0.0010423183537142857 +123 61 negative_sampler.num_negs_per_pos 3.0 +123 61 training.batch_size 0.0 +123 62 model.embedding_dim 0.0 +123 62 model.hidden_dropout_rate 0.4996953569598529 +123 62 model.num_filters 7.0 +123 62 optimizer.lr 0.0031181550207283444 +123 62 negative_sampler.num_negs_per_pos 34.0 +123 62 training.batch_size 1.0 +123 1 dataset """kinships""" +123 1 model """convkb""" +123 1 loss """softplus""" +123 1 regularizer """no""" +123 1 optimizer """adam""" +123 1 training_loop """owa""" +123 1 negative_sampler """basic""" +123 1 evaluator """rankbased""" +123 2 dataset """kinships""" +123 2 model """convkb""" +123 2 loss """softplus""" +123 2 regularizer """no""" +123 2 optimizer """adam""" +123 2 training_loop """owa""" +123 2 negative_sampler """basic""" +123 2 evaluator """rankbased""" +123 3 dataset """kinships""" +123 3 model """convkb""" +123 3 loss """softplus""" +123 3 regularizer """no""" +123 3 optimizer """adam""" +123 3 training_loop """owa""" +123 3 negative_sampler """basic""" +123 3 evaluator """rankbased""" +123 4 dataset """kinships""" +123 4 model """convkb""" +123 4 loss """softplus""" +123 4 regularizer """no""" +123 4 optimizer """adam""" +123 4 training_loop """owa""" +123 4 negative_sampler """basic""" +123 4 evaluator """rankbased""" +123 5 dataset """kinships""" +123 5 model """convkb""" +123 5 loss """softplus""" +123 5 regularizer """no""" +123 5 optimizer """adam""" +123 5 training_loop """owa""" +123 5 negative_sampler """basic""" +123 5 evaluator """rankbased""" +123 6 dataset """kinships""" +123 6 model """convkb""" +123 6 loss """softplus""" +123 6 regularizer """no""" +123 6 optimizer """adam""" +123 6 training_loop """owa""" +123 6 negative_sampler """basic""" +123 6 evaluator """rankbased""" +123 7 dataset """kinships""" +123 7 model """convkb""" +123 7 loss """softplus""" +123 7 regularizer """no""" +123 7 optimizer """adam""" +123 7 training_loop """owa""" +123 7 negative_sampler """basic""" +123 7 evaluator """rankbased""" +123 8 dataset """kinships""" +123 8 model """convkb""" +123 8 loss """softplus""" +123 8 regularizer """no""" +123 8 optimizer """adam""" +123 8 training_loop """owa""" +123 8 negative_sampler """basic""" +123 8 evaluator """rankbased""" +123 9 dataset """kinships""" +123 9 model """convkb""" +123 9 loss """softplus""" +123 9 regularizer """no""" +123 9 optimizer """adam""" +123 9 training_loop """owa""" +123 9 negative_sampler """basic""" +123 9 evaluator """rankbased""" +123 10 dataset """kinships""" +123 10 model """convkb""" +123 10 loss """softplus""" +123 10 regularizer """no""" +123 10 optimizer """adam""" +123 10 training_loop """owa""" +123 10 negative_sampler """basic""" +123 10 evaluator """rankbased""" +123 11 dataset """kinships""" +123 11 model """convkb""" +123 11 loss """softplus""" +123 11 regularizer """no""" +123 11 optimizer """adam""" +123 11 training_loop """owa""" +123 11 negative_sampler """basic""" +123 11 evaluator """rankbased""" +123 12 dataset """kinships""" +123 12 model """convkb""" +123 12 loss """softplus""" +123 12 regularizer """no""" +123 12 optimizer """adam""" +123 12 training_loop """owa""" +123 12 negative_sampler """basic""" +123 12 evaluator """rankbased""" +123 13 dataset """kinships""" +123 13 model """convkb""" +123 13 loss """softplus""" +123 13 regularizer """no""" +123 13 optimizer """adam""" +123 13 training_loop """owa""" +123 13 negative_sampler """basic""" +123 13 evaluator """rankbased""" +123 14 dataset """kinships""" +123 14 model """convkb""" +123 14 loss """softplus""" +123 14 regularizer """no""" +123 14 optimizer """adam""" +123 14 training_loop """owa""" +123 14 negative_sampler """basic""" +123 14 evaluator """rankbased""" +123 15 dataset """kinships""" +123 15 model """convkb""" +123 15 loss """softplus""" +123 15 regularizer """no""" +123 15 optimizer """adam""" +123 15 training_loop """owa""" +123 15 negative_sampler """basic""" +123 15 evaluator """rankbased""" +123 16 dataset """kinships""" +123 16 model """convkb""" +123 16 loss """softplus""" +123 16 regularizer """no""" +123 16 optimizer """adam""" +123 16 training_loop """owa""" +123 16 negative_sampler """basic""" +123 16 evaluator """rankbased""" +123 17 dataset """kinships""" +123 17 model """convkb""" +123 17 loss """softplus""" +123 17 regularizer """no""" +123 17 optimizer """adam""" +123 17 training_loop """owa""" +123 17 negative_sampler """basic""" +123 17 evaluator """rankbased""" +123 18 dataset """kinships""" +123 18 model """convkb""" +123 18 loss """softplus""" +123 18 regularizer """no""" +123 18 optimizer """adam""" +123 18 training_loop """owa""" +123 18 negative_sampler """basic""" +123 18 evaluator """rankbased""" +123 19 dataset """kinships""" +123 19 model """convkb""" +123 19 loss """softplus""" +123 19 regularizer """no""" +123 19 optimizer """adam""" +123 19 training_loop """owa""" +123 19 negative_sampler """basic""" +123 19 evaluator """rankbased""" +123 20 dataset """kinships""" +123 20 model """convkb""" +123 20 loss """softplus""" +123 20 regularizer """no""" +123 20 optimizer """adam""" +123 20 training_loop """owa""" +123 20 negative_sampler """basic""" +123 20 evaluator """rankbased""" +123 21 dataset """kinships""" +123 21 model """convkb""" +123 21 loss """softplus""" +123 21 regularizer """no""" +123 21 optimizer """adam""" +123 21 training_loop """owa""" +123 21 negative_sampler """basic""" +123 21 evaluator """rankbased""" +123 22 dataset """kinships""" +123 22 model """convkb""" +123 22 loss """softplus""" +123 22 regularizer """no""" +123 22 optimizer """adam""" +123 22 training_loop """owa""" +123 22 negative_sampler """basic""" +123 22 evaluator """rankbased""" +123 23 dataset """kinships""" +123 23 model """convkb""" +123 23 loss """softplus""" +123 23 regularizer """no""" +123 23 optimizer """adam""" +123 23 training_loop """owa""" +123 23 negative_sampler """basic""" +123 23 evaluator """rankbased""" +123 24 dataset """kinships""" +123 24 model """convkb""" +123 24 loss """softplus""" +123 24 regularizer """no""" +123 24 optimizer """adam""" +123 24 training_loop """owa""" +123 24 negative_sampler """basic""" +123 24 evaluator """rankbased""" +123 25 dataset """kinships""" +123 25 model """convkb""" +123 25 loss """softplus""" +123 25 regularizer """no""" +123 25 optimizer """adam""" +123 25 training_loop """owa""" +123 25 negative_sampler """basic""" +123 25 evaluator """rankbased""" +123 26 dataset """kinships""" +123 26 model """convkb""" +123 26 loss """softplus""" +123 26 regularizer """no""" +123 26 optimizer """adam""" +123 26 training_loop """owa""" +123 26 negative_sampler """basic""" +123 26 evaluator """rankbased""" +123 27 dataset """kinships""" +123 27 model """convkb""" +123 27 loss """softplus""" +123 27 regularizer """no""" +123 27 optimizer """adam""" +123 27 training_loop """owa""" +123 27 negative_sampler """basic""" +123 27 evaluator """rankbased""" +123 28 dataset """kinships""" +123 28 model """convkb""" +123 28 loss """softplus""" +123 28 regularizer """no""" +123 28 optimizer """adam""" +123 28 training_loop """owa""" +123 28 negative_sampler """basic""" +123 28 evaluator """rankbased""" +123 29 dataset """kinships""" +123 29 model """convkb""" +123 29 loss """softplus""" +123 29 regularizer """no""" +123 29 optimizer """adam""" +123 29 training_loop """owa""" +123 29 negative_sampler """basic""" +123 29 evaluator """rankbased""" +123 30 dataset """kinships""" +123 30 model """convkb""" +123 30 loss """softplus""" +123 30 regularizer """no""" +123 30 optimizer """adam""" +123 30 training_loop """owa""" +123 30 negative_sampler """basic""" +123 30 evaluator """rankbased""" +123 31 dataset """kinships""" +123 31 model """convkb""" +123 31 loss """softplus""" +123 31 regularizer """no""" +123 31 optimizer """adam""" +123 31 training_loop """owa""" +123 31 negative_sampler """basic""" +123 31 evaluator """rankbased""" +123 32 dataset """kinships""" +123 32 model """convkb""" +123 32 loss """softplus""" +123 32 regularizer """no""" +123 32 optimizer """adam""" +123 32 training_loop """owa""" +123 32 negative_sampler """basic""" +123 32 evaluator """rankbased""" +123 33 dataset """kinships""" +123 33 model """convkb""" +123 33 loss """softplus""" +123 33 regularizer """no""" +123 33 optimizer """adam""" +123 33 training_loop """owa""" +123 33 negative_sampler """basic""" +123 33 evaluator """rankbased""" +123 34 dataset """kinships""" +123 34 model """convkb""" +123 34 loss """softplus""" +123 34 regularizer """no""" +123 34 optimizer """adam""" +123 34 training_loop """owa""" +123 34 negative_sampler """basic""" +123 34 evaluator """rankbased""" +123 35 dataset """kinships""" +123 35 model """convkb""" +123 35 loss """softplus""" +123 35 regularizer """no""" +123 35 optimizer """adam""" +123 35 training_loop """owa""" +123 35 negative_sampler """basic""" +123 35 evaluator """rankbased""" +123 36 dataset """kinships""" +123 36 model """convkb""" +123 36 loss """softplus""" +123 36 regularizer """no""" +123 36 optimizer """adam""" +123 36 training_loop """owa""" +123 36 negative_sampler """basic""" +123 36 evaluator """rankbased""" +123 37 dataset """kinships""" +123 37 model """convkb""" +123 37 loss """softplus""" +123 37 regularizer """no""" +123 37 optimizer """adam""" +123 37 training_loop """owa""" +123 37 negative_sampler """basic""" +123 37 evaluator """rankbased""" +123 38 dataset """kinships""" +123 38 model """convkb""" +123 38 loss """softplus""" +123 38 regularizer """no""" +123 38 optimizer """adam""" +123 38 training_loop """owa""" +123 38 negative_sampler """basic""" +123 38 evaluator """rankbased""" +123 39 dataset """kinships""" +123 39 model """convkb""" +123 39 loss """softplus""" +123 39 regularizer """no""" +123 39 optimizer """adam""" +123 39 training_loop """owa""" +123 39 negative_sampler """basic""" +123 39 evaluator """rankbased""" +123 40 dataset """kinships""" +123 40 model """convkb""" +123 40 loss """softplus""" +123 40 regularizer """no""" +123 40 optimizer """adam""" +123 40 training_loop """owa""" +123 40 negative_sampler """basic""" +123 40 evaluator """rankbased""" +123 41 dataset """kinships""" +123 41 model """convkb""" +123 41 loss """softplus""" +123 41 regularizer """no""" +123 41 optimizer """adam""" +123 41 training_loop """owa""" +123 41 negative_sampler """basic""" +123 41 evaluator """rankbased""" +123 42 dataset """kinships""" +123 42 model """convkb""" +123 42 loss """softplus""" +123 42 regularizer """no""" +123 42 optimizer """adam""" +123 42 training_loop """owa""" +123 42 negative_sampler """basic""" +123 42 evaluator """rankbased""" +123 43 dataset """kinships""" +123 43 model """convkb""" +123 43 loss """softplus""" +123 43 regularizer """no""" +123 43 optimizer """adam""" +123 43 training_loop """owa""" +123 43 negative_sampler """basic""" +123 43 evaluator """rankbased""" +123 44 dataset """kinships""" +123 44 model """convkb""" +123 44 loss """softplus""" +123 44 regularizer """no""" +123 44 optimizer """adam""" +123 44 training_loop """owa""" +123 44 negative_sampler """basic""" +123 44 evaluator """rankbased""" +123 45 dataset """kinships""" +123 45 model """convkb""" +123 45 loss """softplus""" +123 45 regularizer """no""" +123 45 optimizer """adam""" +123 45 training_loop """owa""" +123 45 negative_sampler """basic""" +123 45 evaluator """rankbased""" +123 46 dataset """kinships""" +123 46 model """convkb""" +123 46 loss """softplus""" +123 46 regularizer """no""" +123 46 optimizer """adam""" +123 46 training_loop """owa""" +123 46 negative_sampler """basic""" +123 46 evaluator """rankbased""" +123 47 dataset """kinships""" +123 47 model """convkb""" +123 47 loss """softplus""" +123 47 regularizer """no""" +123 47 optimizer """adam""" +123 47 training_loop """owa""" +123 47 negative_sampler """basic""" +123 47 evaluator """rankbased""" +123 48 dataset """kinships""" +123 48 model """convkb""" +123 48 loss """softplus""" +123 48 regularizer """no""" +123 48 optimizer """adam""" +123 48 training_loop """owa""" +123 48 negative_sampler """basic""" +123 48 evaluator """rankbased""" +123 49 dataset """kinships""" +123 49 model """convkb""" +123 49 loss """softplus""" +123 49 regularizer """no""" +123 49 optimizer """adam""" +123 49 training_loop """owa""" +123 49 negative_sampler """basic""" +123 49 evaluator """rankbased""" +123 50 dataset """kinships""" +123 50 model """convkb""" +123 50 loss """softplus""" +123 50 regularizer """no""" +123 50 optimizer """adam""" +123 50 training_loop """owa""" +123 50 negative_sampler """basic""" +123 50 evaluator """rankbased""" +123 51 dataset """kinships""" +123 51 model """convkb""" +123 51 loss """softplus""" +123 51 regularizer """no""" +123 51 optimizer """adam""" +123 51 training_loop """owa""" +123 51 negative_sampler """basic""" +123 51 evaluator """rankbased""" +123 52 dataset """kinships""" +123 52 model """convkb""" +123 52 loss """softplus""" +123 52 regularizer """no""" +123 52 optimizer """adam""" +123 52 training_loop """owa""" +123 52 negative_sampler """basic""" +123 52 evaluator """rankbased""" +123 53 dataset """kinships""" +123 53 model """convkb""" +123 53 loss """softplus""" +123 53 regularizer """no""" +123 53 optimizer """adam""" +123 53 training_loop """owa""" +123 53 negative_sampler """basic""" +123 53 evaluator """rankbased""" +123 54 dataset """kinships""" +123 54 model """convkb""" +123 54 loss """softplus""" +123 54 regularizer """no""" +123 54 optimizer """adam""" +123 54 training_loop """owa""" +123 54 negative_sampler """basic""" +123 54 evaluator """rankbased""" +123 55 dataset """kinships""" +123 55 model """convkb""" +123 55 loss """softplus""" +123 55 regularizer """no""" +123 55 optimizer """adam""" +123 55 training_loop """owa""" +123 55 negative_sampler """basic""" +123 55 evaluator """rankbased""" +123 56 dataset """kinships""" +123 56 model """convkb""" +123 56 loss """softplus""" +123 56 regularizer """no""" +123 56 optimizer """adam""" +123 56 training_loop """owa""" +123 56 negative_sampler """basic""" +123 56 evaluator """rankbased""" +123 57 dataset """kinships""" +123 57 model """convkb""" +123 57 loss """softplus""" +123 57 regularizer """no""" +123 57 optimizer """adam""" +123 57 training_loop """owa""" +123 57 negative_sampler """basic""" +123 57 evaluator """rankbased""" +123 58 dataset """kinships""" +123 58 model """convkb""" +123 58 loss """softplus""" +123 58 regularizer """no""" +123 58 optimizer """adam""" +123 58 training_loop """owa""" +123 58 negative_sampler """basic""" +123 58 evaluator """rankbased""" +123 59 dataset """kinships""" +123 59 model """convkb""" +123 59 loss """softplus""" +123 59 regularizer """no""" +123 59 optimizer """adam""" +123 59 training_loop """owa""" +123 59 negative_sampler """basic""" +123 59 evaluator """rankbased""" +123 60 dataset """kinships""" +123 60 model """convkb""" +123 60 loss """softplus""" +123 60 regularizer """no""" +123 60 optimizer """adam""" +123 60 training_loop """owa""" +123 60 negative_sampler """basic""" +123 60 evaluator """rankbased""" +123 61 dataset """kinships""" +123 61 model """convkb""" +123 61 loss """softplus""" +123 61 regularizer """no""" +123 61 optimizer """adam""" +123 61 training_loop """owa""" +123 61 negative_sampler """basic""" +123 61 evaluator """rankbased""" +123 62 dataset """kinships""" +123 62 model """convkb""" +123 62 loss """softplus""" +123 62 regularizer """no""" +123 62 optimizer """adam""" +123 62 training_loop """owa""" +123 62 negative_sampler """basic""" +123 62 evaluator """rankbased""" +124 1 model.embedding_dim 0.0 +124 1 model.hidden_dropout_rate 0.43915215384173617 +124 1 model.num_filters 5.0 +124 1 loss.margin 6.34618046334551 +124 1 optimizer.lr 0.06528551548795715 +124 1 negative_sampler.num_negs_per_pos 91.0 +124 1 training.batch_size 2.0 +124 2 model.embedding_dim 0.0 +124 2 model.hidden_dropout_rate 0.2278852717141011 +124 2 model.num_filters 7.0 +124 2 loss.margin 3.9286326051186977 +124 2 optimizer.lr 0.01220020046033396 +124 2 negative_sampler.num_negs_per_pos 54.0 +124 2 training.batch_size 1.0 +124 3 model.embedding_dim 0.0 +124 3 model.hidden_dropout_rate 0.15732000193637702 +124 3 model.num_filters 8.0 +124 3 loss.margin 1.0347584555182565 +124 3 optimizer.lr 0.002305258200070545 +124 3 negative_sampler.num_negs_per_pos 52.0 +124 3 training.batch_size 0.0 +124 4 model.embedding_dim 1.0 +124 4 model.hidden_dropout_rate 0.3339788078478786 +124 4 model.num_filters 4.0 +124 4 loss.margin 2.230489875963262 +124 4 optimizer.lr 0.05170268228824632 +124 4 negative_sampler.num_negs_per_pos 76.0 +124 4 training.batch_size 1.0 +124 5 model.embedding_dim 1.0 +124 5 model.hidden_dropout_rate 0.16470617840210738 +124 5 model.num_filters 9.0 +124 5 loss.margin 7.689962595213423 +124 5 optimizer.lr 0.002356213059414661 +124 5 negative_sampler.num_negs_per_pos 79.0 +124 5 training.batch_size 2.0 +124 6 model.embedding_dim 2.0 +124 6 model.hidden_dropout_rate 0.47149717452762857 +124 6 model.num_filters 0.0 +124 6 loss.margin 6.048962985816623 +124 6 optimizer.lr 0.012033938199696581 +124 6 negative_sampler.num_negs_per_pos 92.0 +124 6 training.batch_size 0.0 +124 7 model.embedding_dim 0.0 +124 7 model.hidden_dropout_rate 0.19278375287404292 +124 7 model.num_filters 2.0 +124 7 loss.margin 6.823534871776767 +124 7 optimizer.lr 0.0012496740712896904 +124 7 negative_sampler.num_negs_per_pos 51.0 +124 7 training.batch_size 0.0 +124 8 model.embedding_dim 2.0 +124 8 model.hidden_dropout_rate 0.15407272358627278 +124 8 model.num_filters 2.0 +124 8 loss.margin 8.187085050190099 +124 8 optimizer.lr 0.0017530630992054967 +124 8 negative_sampler.num_negs_per_pos 4.0 +124 8 training.batch_size 0.0 +124 9 model.embedding_dim 2.0 +124 9 model.hidden_dropout_rate 0.4125868673048869 +124 9 model.num_filters 9.0 +124 9 loss.margin 6.2031468745874685 +124 9 optimizer.lr 0.03590225059734754 +124 9 negative_sampler.num_negs_per_pos 65.0 +124 9 training.batch_size 2.0 +124 10 model.embedding_dim 1.0 +124 10 model.hidden_dropout_rate 0.376903621337558 +124 10 model.num_filters 0.0 +124 10 loss.margin 3.3381719439708677 +124 10 optimizer.lr 0.0013189258431646645 +124 10 negative_sampler.num_negs_per_pos 99.0 +124 10 training.batch_size 0.0 +124 11 model.embedding_dim 1.0 +124 11 model.hidden_dropout_rate 0.25373467920438 +124 11 model.num_filters 3.0 +124 11 loss.margin 7.6153303622201385 +124 11 optimizer.lr 0.010114801432154461 +124 11 negative_sampler.num_negs_per_pos 1.0 +124 11 training.batch_size 2.0 +124 12 model.embedding_dim 2.0 +124 12 model.hidden_dropout_rate 0.34796105289047174 +124 12 model.num_filters 6.0 +124 12 loss.margin 8.979768859527919 +124 12 optimizer.lr 0.005425394664841892 +124 12 negative_sampler.num_negs_per_pos 17.0 +124 12 training.batch_size 2.0 +124 13 model.embedding_dim 1.0 +124 13 model.hidden_dropout_rate 0.46071844639867804 +124 13 model.num_filters 2.0 +124 13 loss.margin 8.755011831084731 +124 13 optimizer.lr 0.011147989621130045 +124 13 negative_sampler.num_negs_per_pos 82.0 +124 13 training.batch_size 2.0 +124 14 model.embedding_dim 0.0 +124 14 model.hidden_dropout_rate 0.318351217431378 +124 14 model.num_filters 7.0 +124 14 loss.margin 3.563213404392719 +124 14 optimizer.lr 0.010480866883315973 +124 14 negative_sampler.num_negs_per_pos 64.0 +124 14 training.batch_size 0.0 +124 15 model.embedding_dim 1.0 +124 15 model.hidden_dropout_rate 0.3498676324932535 +124 15 model.num_filters 6.0 +124 15 loss.margin 5.034245193785463 +124 15 optimizer.lr 0.005175391798721567 +124 15 negative_sampler.num_negs_per_pos 23.0 +124 15 training.batch_size 0.0 +124 16 model.embedding_dim 2.0 +124 16 model.hidden_dropout_rate 0.24103576748471733 +124 16 model.num_filters 2.0 +124 16 loss.margin 2.082076151966428 +124 16 optimizer.lr 0.0017949718341946023 +124 16 negative_sampler.num_negs_per_pos 41.0 +124 16 training.batch_size 0.0 +124 17 model.embedding_dim 0.0 +124 17 model.hidden_dropout_rate 0.14183174108199567 +124 17 model.num_filters 7.0 +124 17 loss.margin 4.231172260729291 +124 17 optimizer.lr 0.012865128281815628 +124 17 negative_sampler.num_negs_per_pos 47.0 +124 17 training.batch_size 2.0 +124 18 model.embedding_dim 2.0 +124 18 model.hidden_dropout_rate 0.1460981912252329 +124 18 model.num_filters 4.0 +124 18 loss.margin 9.76423930919724 +124 18 optimizer.lr 0.001033743117770613 +124 18 negative_sampler.num_negs_per_pos 57.0 +124 18 training.batch_size 2.0 +124 19 model.embedding_dim 1.0 +124 19 model.hidden_dropout_rate 0.445382748507331 +124 19 model.num_filters 7.0 +124 19 loss.margin 9.998449745035497 +124 19 optimizer.lr 0.009485622588286535 +124 19 negative_sampler.num_negs_per_pos 60.0 +124 19 training.batch_size 2.0 +124 20 model.embedding_dim 1.0 +124 20 model.hidden_dropout_rate 0.44073490953272204 +124 20 model.num_filters 3.0 +124 20 loss.margin 2.7927230175807924 +124 20 optimizer.lr 0.002937037179967342 +124 20 negative_sampler.num_negs_per_pos 72.0 +124 20 training.batch_size 1.0 +124 21 model.embedding_dim 2.0 +124 21 model.hidden_dropout_rate 0.12277460685176816 +124 21 model.num_filters 3.0 +124 21 loss.margin 8.404206459523841 +124 21 optimizer.lr 0.011961115684881014 +124 21 negative_sampler.num_negs_per_pos 14.0 +124 21 training.batch_size 1.0 +124 22 model.embedding_dim 2.0 +124 22 model.hidden_dropout_rate 0.1596269042688969 +124 22 model.num_filters 0.0 +124 22 loss.margin 0.9193289813470513 +124 22 optimizer.lr 0.02151170399118083 +124 22 negative_sampler.num_negs_per_pos 82.0 +124 22 training.batch_size 2.0 +124 23 model.embedding_dim 1.0 +124 23 model.hidden_dropout_rate 0.35395705824588075 +124 23 model.num_filters 6.0 +124 23 loss.margin 1.8519124112560883 +124 23 optimizer.lr 0.02091154096786571 +124 23 negative_sampler.num_negs_per_pos 28.0 +124 23 training.batch_size 2.0 +124 24 model.embedding_dim 2.0 +124 24 model.hidden_dropout_rate 0.12039356162499498 +124 24 model.num_filters 0.0 +124 24 loss.margin 2.6717389289146576 +124 24 optimizer.lr 0.06444421990343824 +124 24 negative_sampler.num_negs_per_pos 15.0 +124 24 training.batch_size 1.0 +124 25 model.embedding_dim 0.0 +124 25 model.hidden_dropout_rate 0.11219128229134517 +124 25 model.num_filters 3.0 +124 25 loss.margin 5.016515140801812 +124 25 optimizer.lr 0.060359325499915045 +124 25 negative_sampler.num_negs_per_pos 18.0 +124 25 training.batch_size 1.0 +124 26 model.embedding_dim 2.0 +124 26 model.hidden_dropout_rate 0.1967761376096161 +124 26 model.num_filters 8.0 +124 26 loss.margin 3.2150961598211523 +124 26 optimizer.lr 0.09798553825354994 +124 26 negative_sampler.num_negs_per_pos 8.0 +124 26 training.batch_size 1.0 +124 27 model.embedding_dim 1.0 +124 27 model.hidden_dropout_rate 0.22927058184320828 +124 27 model.num_filters 4.0 +124 27 loss.margin 4.627504539521961 +124 27 optimizer.lr 0.04203793411536732 +124 27 negative_sampler.num_negs_per_pos 57.0 +124 27 training.batch_size 0.0 +124 28 model.embedding_dim 1.0 +124 28 model.hidden_dropout_rate 0.41666521329160533 +124 28 model.num_filters 8.0 +124 28 loss.margin 1.7350080400662389 +124 28 optimizer.lr 0.0012885430687670305 +124 28 negative_sampler.num_negs_per_pos 42.0 +124 28 training.batch_size 0.0 +124 29 model.embedding_dim 1.0 +124 29 model.hidden_dropout_rate 0.2902680124099753 +124 29 model.num_filters 0.0 +124 29 loss.margin 8.658292054066697 +124 29 optimizer.lr 0.004199303506567509 +124 29 negative_sampler.num_negs_per_pos 94.0 +124 29 training.batch_size 2.0 +124 30 model.embedding_dim 0.0 +124 30 model.hidden_dropout_rate 0.3388350699798088 +124 30 model.num_filters 8.0 +124 30 loss.margin 5.714297288409601 +124 30 optimizer.lr 0.0021619190436608536 +124 30 negative_sampler.num_negs_per_pos 54.0 +124 30 training.batch_size 0.0 +124 31 model.embedding_dim 0.0 +124 31 model.hidden_dropout_rate 0.16138166546486624 +124 31 model.num_filters 5.0 +124 31 loss.margin 5.862216257481996 +124 31 optimizer.lr 0.06430400425754501 +124 31 negative_sampler.num_negs_per_pos 5.0 +124 31 training.batch_size 1.0 +124 32 model.embedding_dim 0.0 +124 32 model.hidden_dropout_rate 0.24726438791458968 +124 32 model.num_filters 8.0 +124 32 loss.margin 7.671098127925542 +124 32 optimizer.lr 0.011201437193386754 +124 32 negative_sampler.num_negs_per_pos 70.0 +124 32 training.batch_size 1.0 +124 33 model.embedding_dim 1.0 +124 33 model.hidden_dropout_rate 0.4488680986899904 +124 33 model.num_filters 1.0 +124 33 loss.margin 0.5279601513070408 +124 33 optimizer.lr 0.014147848435507322 +124 33 negative_sampler.num_negs_per_pos 52.0 +124 33 training.batch_size 1.0 +124 34 model.embedding_dim 2.0 +124 34 model.hidden_dropout_rate 0.2395090877998284 +124 34 model.num_filters 6.0 +124 34 loss.margin 9.06766974313081 +124 34 optimizer.lr 0.026578331214228923 +124 34 negative_sampler.num_negs_per_pos 44.0 +124 34 training.batch_size 1.0 +124 1 dataset """kinships""" +124 1 model """convkb""" +124 1 loss """marginranking""" +124 1 regularizer """no""" +124 1 optimizer """adam""" +124 1 training_loop """owa""" +124 1 negative_sampler """basic""" +124 1 evaluator """rankbased""" +124 2 dataset """kinships""" +124 2 model """convkb""" +124 2 loss """marginranking""" +124 2 regularizer """no""" +124 2 optimizer """adam""" +124 2 training_loop """owa""" +124 2 negative_sampler """basic""" +124 2 evaluator """rankbased""" +124 3 dataset """kinships""" +124 3 model """convkb""" +124 3 loss """marginranking""" +124 3 regularizer """no""" +124 3 optimizer """adam""" +124 3 training_loop """owa""" +124 3 negative_sampler """basic""" +124 3 evaluator """rankbased""" +124 4 dataset """kinships""" +124 4 model """convkb""" +124 4 loss """marginranking""" +124 4 regularizer """no""" +124 4 optimizer """adam""" +124 4 training_loop """owa""" +124 4 negative_sampler """basic""" +124 4 evaluator """rankbased""" +124 5 dataset """kinships""" +124 5 model """convkb""" +124 5 loss """marginranking""" +124 5 regularizer """no""" +124 5 optimizer """adam""" +124 5 training_loop """owa""" +124 5 negative_sampler """basic""" +124 5 evaluator """rankbased""" +124 6 dataset """kinships""" +124 6 model """convkb""" +124 6 loss """marginranking""" +124 6 regularizer """no""" +124 6 optimizer """adam""" +124 6 training_loop """owa""" +124 6 negative_sampler """basic""" +124 6 evaluator """rankbased""" +124 7 dataset """kinships""" +124 7 model """convkb""" +124 7 loss """marginranking""" +124 7 regularizer """no""" +124 7 optimizer """adam""" +124 7 training_loop """owa""" +124 7 negative_sampler """basic""" +124 7 evaluator """rankbased""" +124 8 dataset """kinships""" +124 8 model """convkb""" +124 8 loss """marginranking""" +124 8 regularizer """no""" +124 8 optimizer """adam""" +124 8 training_loop """owa""" +124 8 negative_sampler """basic""" +124 8 evaluator """rankbased""" +124 9 dataset """kinships""" +124 9 model """convkb""" +124 9 loss """marginranking""" +124 9 regularizer """no""" +124 9 optimizer """adam""" +124 9 training_loop """owa""" +124 9 negative_sampler """basic""" +124 9 evaluator """rankbased""" +124 10 dataset """kinships""" +124 10 model """convkb""" +124 10 loss """marginranking""" +124 10 regularizer """no""" +124 10 optimizer """adam""" +124 10 training_loop """owa""" +124 10 negative_sampler """basic""" +124 10 evaluator """rankbased""" +124 11 dataset """kinships""" +124 11 model """convkb""" +124 11 loss """marginranking""" +124 11 regularizer """no""" +124 11 optimizer """adam""" +124 11 training_loop """owa""" +124 11 negative_sampler """basic""" +124 11 evaluator """rankbased""" +124 12 dataset """kinships""" +124 12 model """convkb""" +124 12 loss """marginranking""" +124 12 regularizer """no""" +124 12 optimizer """adam""" +124 12 training_loop """owa""" +124 12 negative_sampler """basic""" +124 12 evaluator """rankbased""" +124 13 dataset """kinships""" +124 13 model """convkb""" +124 13 loss """marginranking""" +124 13 regularizer """no""" +124 13 optimizer """adam""" +124 13 training_loop """owa""" +124 13 negative_sampler """basic""" +124 13 evaluator """rankbased""" +124 14 dataset """kinships""" +124 14 model """convkb""" +124 14 loss """marginranking""" +124 14 regularizer """no""" +124 14 optimizer """adam""" +124 14 training_loop """owa""" +124 14 negative_sampler """basic""" +124 14 evaluator """rankbased""" +124 15 dataset """kinships""" +124 15 model """convkb""" +124 15 loss """marginranking""" +124 15 regularizer """no""" +124 15 optimizer """adam""" +124 15 training_loop """owa""" +124 15 negative_sampler """basic""" +124 15 evaluator """rankbased""" +124 16 dataset """kinships""" +124 16 model """convkb""" +124 16 loss """marginranking""" +124 16 regularizer """no""" +124 16 optimizer """adam""" +124 16 training_loop """owa""" +124 16 negative_sampler """basic""" +124 16 evaluator """rankbased""" +124 17 dataset """kinships""" +124 17 model """convkb""" +124 17 loss """marginranking""" +124 17 regularizer """no""" +124 17 optimizer """adam""" +124 17 training_loop """owa""" +124 17 negative_sampler """basic""" +124 17 evaluator """rankbased""" +124 18 dataset """kinships""" +124 18 model """convkb""" +124 18 loss """marginranking""" +124 18 regularizer """no""" +124 18 optimizer """adam""" +124 18 training_loop """owa""" +124 18 negative_sampler """basic""" +124 18 evaluator """rankbased""" +124 19 dataset """kinships""" +124 19 model """convkb""" +124 19 loss """marginranking""" +124 19 regularizer """no""" +124 19 optimizer """adam""" +124 19 training_loop """owa""" +124 19 negative_sampler """basic""" +124 19 evaluator """rankbased""" +124 20 dataset """kinships""" +124 20 model """convkb""" +124 20 loss """marginranking""" +124 20 regularizer """no""" +124 20 optimizer """adam""" +124 20 training_loop """owa""" +124 20 negative_sampler """basic""" +124 20 evaluator """rankbased""" +124 21 dataset """kinships""" +124 21 model """convkb""" +124 21 loss """marginranking""" +124 21 regularizer """no""" +124 21 optimizer """adam""" +124 21 training_loop """owa""" +124 21 negative_sampler """basic""" +124 21 evaluator """rankbased""" +124 22 dataset """kinships""" +124 22 model """convkb""" +124 22 loss """marginranking""" +124 22 regularizer """no""" +124 22 optimizer """adam""" +124 22 training_loop """owa""" +124 22 negative_sampler """basic""" +124 22 evaluator """rankbased""" +124 23 dataset """kinships""" +124 23 model """convkb""" +124 23 loss """marginranking""" +124 23 regularizer """no""" +124 23 optimizer """adam""" +124 23 training_loop """owa""" +124 23 negative_sampler """basic""" +124 23 evaluator """rankbased""" +124 24 dataset """kinships""" +124 24 model """convkb""" +124 24 loss """marginranking""" +124 24 regularizer """no""" +124 24 optimizer """adam""" +124 24 training_loop """owa""" +124 24 negative_sampler """basic""" +124 24 evaluator """rankbased""" +124 25 dataset """kinships""" +124 25 model """convkb""" +124 25 loss """marginranking""" +124 25 regularizer """no""" +124 25 optimizer """adam""" +124 25 training_loop """owa""" +124 25 negative_sampler """basic""" +124 25 evaluator """rankbased""" +124 26 dataset """kinships""" +124 26 model """convkb""" +124 26 loss """marginranking""" +124 26 regularizer """no""" +124 26 optimizer """adam""" +124 26 training_loop """owa""" +124 26 negative_sampler """basic""" +124 26 evaluator """rankbased""" +124 27 dataset """kinships""" +124 27 model """convkb""" +124 27 loss """marginranking""" +124 27 regularizer """no""" +124 27 optimizer """adam""" +124 27 training_loop """owa""" +124 27 negative_sampler """basic""" +124 27 evaluator """rankbased""" +124 28 dataset """kinships""" +124 28 model """convkb""" +124 28 loss """marginranking""" +124 28 regularizer """no""" +124 28 optimizer """adam""" +124 28 training_loop """owa""" +124 28 negative_sampler """basic""" +124 28 evaluator """rankbased""" +124 29 dataset """kinships""" +124 29 model """convkb""" +124 29 loss """marginranking""" +124 29 regularizer """no""" +124 29 optimizer """adam""" +124 29 training_loop """owa""" +124 29 negative_sampler """basic""" +124 29 evaluator """rankbased""" +124 30 dataset """kinships""" +124 30 model """convkb""" +124 30 loss """marginranking""" +124 30 regularizer """no""" +124 30 optimizer """adam""" +124 30 training_loop """owa""" +124 30 negative_sampler """basic""" +124 30 evaluator """rankbased""" +124 31 dataset """kinships""" +124 31 model """convkb""" +124 31 loss """marginranking""" +124 31 regularizer """no""" +124 31 optimizer """adam""" +124 31 training_loop """owa""" +124 31 negative_sampler """basic""" +124 31 evaluator """rankbased""" +124 32 dataset """kinships""" +124 32 model """convkb""" +124 32 loss """marginranking""" +124 32 regularizer """no""" +124 32 optimizer """adam""" +124 32 training_loop """owa""" +124 32 negative_sampler """basic""" +124 32 evaluator """rankbased""" +124 33 dataset """kinships""" +124 33 model """convkb""" +124 33 loss """marginranking""" +124 33 regularizer """no""" +124 33 optimizer """adam""" +124 33 training_loop """owa""" +124 33 negative_sampler """basic""" +124 33 evaluator """rankbased""" +124 34 dataset """kinships""" +124 34 model """convkb""" +124 34 loss """marginranking""" +124 34 regularizer """no""" +124 34 optimizer """adam""" +124 34 training_loop """owa""" +124 34 negative_sampler """basic""" +124 34 evaluator """rankbased""" +125 1 model.embedding_dim 1.0 +125 1 model.hidden_dropout_rate 0.2732528077435701 +125 1 model.num_filters 5.0 +125 1 loss.margin 6.624728884584682 +125 1 optimizer.lr 0.003228903600863779 +125 1 negative_sampler.num_negs_per_pos 68.0 +125 1 training.batch_size 0.0 +125 2 model.embedding_dim 2.0 +125 2 model.hidden_dropout_rate 0.20623941861620582 +125 2 model.num_filters 4.0 +125 2 loss.margin 6.636917142603933 +125 2 optimizer.lr 0.029709847255273415 +125 2 negative_sampler.num_negs_per_pos 80.0 +125 2 training.batch_size 1.0 +125 3 model.embedding_dim 2.0 +125 3 model.hidden_dropout_rate 0.22581081072576215 +125 3 model.num_filters 6.0 +125 3 loss.margin 1.6397284727499641 +125 3 optimizer.lr 0.038304323500432044 +125 3 negative_sampler.num_negs_per_pos 92.0 +125 3 training.batch_size 1.0 +125 4 model.embedding_dim 0.0 +125 4 model.hidden_dropout_rate 0.43320635874696806 +125 4 model.num_filters 9.0 +125 4 loss.margin 3.800331420607652 +125 4 optimizer.lr 0.0795941065286011 +125 4 negative_sampler.num_negs_per_pos 67.0 +125 4 training.batch_size 0.0 +125 5 model.embedding_dim 0.0 +125 5 model.hidden_dropout_rate 0.14422643225344017 +125 5 model.num_filters 3.0 +125 5 loss.margin 2.35689978631355 +125 5 optimizer.lr 0.023385531003338865 +125 5 negative_sampler.num_negs_per_pos 27.0 +125 5 training.batch_size 0.0 +125 6 model.embedding_dim 2.0 +125 6 model.hidden_dropout_rate 0.12677491900194204 +125 6 model.num_filters 7.0 +125 6 loss.margin 6.368324776590277 +125 6 optimizer.lr 0.0018578913333644473 +125 6 negative_sampler.num_negs_per_pos 45.0 +125 6 training.batch_size 2.0 +125 7 model.embedding_dim 1.0 +125 7 model.hidden_dropout_rate 0.44341265357764137 +125 7 model.num_filters 2.0 +125 7 loss.margin 7.058716628991355 +125 7 optimizer.lr 0.002373983244110501 +125 7 negative_sampler.num_negs_per_pos 29.0 +125 7 training.batch_size 2.0 +125 8 model.embedding_dim 0.0 +125 8 model.hidden_dropout_rate 0.19262345220031368 +125 8 model.num_filters 3.0 +125 8 loss.margin 8.436382661280831 +125 8 optimizer.lr 0.03724767212369283 +125 8 negative_sampler.num_negs_per_pos 13.0 +125 8 training.batch_size 2.0 +125 9 model.embedding_dim 2.0 +125 9 model.hidden_dropout_rate 0.4430190279412029 +125 9 model.num_filters 6.0 +125 9 loss.margin 0.8021394135757631 +125 9 optimizer.lr 0.0011085889699111457 +125 9 negative_sampler.num_negs_per_pos 77.0 +125 9 training.batch_size 1.0 +125 10 model.embedding_dim 1.0 +125 10 model.hidden_dropout_rate 0.16843138064793367 +125 10 model.num_filters 3.0 +125 10 loss.margin 8.929148345103252 +125 10 optimizer.lr 0.0018605030387237004 +125 10 negative_sampler.num_negs_per_pos 82.0 +125 10 training.batch_size 1.0 +125 11 model.embedding_dim 1.0 +125 11 model.hidden_dropout_rate 0.29987566531476073 +125 11 model.num_filters 1.0 +125 11 loss.margin 1.4509687431806788 +125 11 optimizer.lr 0.0045979951425151044 +125 11 negative_sampler.num_negs_per_pos 12.0 +125 11 training.batch_size 2.0 +125 12 model.embedding_dim 1.0 +125 12 model.hidden_dropout_rate 0.2702524514682736 +125 12 model.num_filters 9.0 +125 12 loss.margin 3.658552913600973 +125 12 optimizer.lr 0.021472058032819416 +125 12 negative_sampler.num_negs_per_pos 43.0 +125 12 training.batch_size 2.0 +125 13 model.embedding_dim 2.0 +125 13 model.hidden_dropout_rate 0.4178090672959985 +125 13 model.num_filters 8.0 +125 13 loss.margin 8.078755669465048 +125 13 optimizer.lr 0.003062069372018422 +125 13 negative_sampler.num_negs_per_pos 2.0 +125 13 training.batch_size 1.0 +125 14 model.embedding_dim 1.0 +125 14 model.hidden_dropout_rate 0.2834363556387044 +125 14 model.num_filters 3.0 +125 14 loss.margin 6.030137602155585 +125 14 optimizer.lr 0.01683721004447861 +125 14 negative_sampler.num_negs_per_pos 5.0 +125 14 training.batch_size 0.0 +125 15 model.embedding_dim 0.0 +125 15 model.hidden_dropout_rate 0.38787896135579836 +125 15 model.num_filters 4.0 +125 15 loss.margin 4.776497232638694 +125 15 optimizer.lr 0.027692780359479828 +125 15 negative_sampler.num_negs_per_pos 13.0 +125 15 training.batch_size 1.0 +125 16 model.embedding_dim 0.0 +125 16 model.hidden_dropout_rate 0.10778586469324046 +125 16 model.num_filters 1.0 +125 16 loss.margin 5.3685564130049634 +125 16 optimizer.lr 0.007785357487980829 +125 16 negative_sampler.num_negs_per_pos 24.0 +125 16 training.batch_size 1.0 +125 17 model.embedding_dim 1.0 +125 17 model.hidden_dropout_rate 0.4850164881856316 +125 17 model.num_filters 6.0 +125 17 loss.margin 1.9720289430926412 +125 17 optimizer.lr 0.005051708024488231 +125 17 negative_sampler.num_negs_per_pos 7.0 +125 17 training.batch_size 0.0 +125 18 model.embedding_dim 0.0 +125 18 model.hidden_dropout_rate 0.2857618488458403 +125 18 model.num_filters 6.0 +125 18 loss.margin 0.6420559398180589 +125 18 optimizer.lr 0.003485732630736433 +125 18 negative_sampler.num_negs_per_pos 6.0 +125 18 training.batch_size 0.0 +125 19 model.embedding_dim 2.0 +125 19 model.hidden_dropout_rate 0.45535553789918504 +125 19 model.num_filters 5.0 +125 19 loss.margin 4.907765500374501 +125 19 optimizer.lr 0.014419505905253496 +125 19 negative_sampler.num_negs_per_pos 22.0 +125 19 training.batch_size 2.0 +125 20 model.embedding_dim 2.0 +125 20 model.hidden_dropout_rate 0.3893051686020669 +125 20 model.num_filters 4.0 +125 20 loss.margin 9.900292956197518 +125 20 optimizer.lr 0.013784378280972448 +125 20 negative_sampler.num_negs_per_pos 2.0 +125 20 training.batch_size 2.0 +125 21 model.embedding_dim 2.0 +125 21 model.hidden_dropout_rate 0.32555871966886213 +125 21 model.num_filters 7.0 +125 21 loss.margin 3.3417896826404334 +125 21 optimizer.lr 0.011433906976246955 +125 21 negative_sampler.num_negs_per_pos 24.0 +125 21 training.batch_size 1.0 +125 22 model.embedding_dim 2.0 +125 22 model.hidden_dropout_rate 0.401515785418492 +125 22 model.num_filters 7.0 +125 22 loss.margin 8.92654224383711 +125 22 optimizer.lr 0.0010696557757918725 +125 22 negative_sampler.num_negs_per_pos 16.0 +125 22 training.batch_size 1.0 +125 23 model.embedding_dim 1.0 +125 23 model.hidden_dropout_rate 0.42806861806401 +125 23 model.num_filters 0.0 +125 23 loss.margin 9.995065821554972 +125 23 optimizer.lr 0.01898468887091084 +125 23 negative_sampler.num_negs_per_pos 47.0 +125 23 training.batch_size 1.0 +125 24 model.embedding_dim 1.0 +125 24 model.hidden_dropout_rate 0.1290072433674758 +125 24 model.num_filters 2.0 +125 24 loss.margin 9.843828177545184 +125 24 optimizer.lr 0.005931238220417997 +125 24 negative_sampler.num_negs_per_pos 36.0 +125 24 training.batch_size 0.0 +125 25 model.embedding_dim 2.0 +125 25 model.hidden_dropout_rate 0.14715369486539626 +125 25 model.num_filters 6.0 +125 25 loss.margin 6.670912145570817 +125 25 optimizer.lr 0.0579119624340759 +125 25 negative_sampler.num_negs_per_pos 88.0 +125 25 training.batch_size 2.0 +125 26 model.embedding_dim 0.0 +125 26 model.hidden_dropout_rate 0.35236698327148974 +125 26 model.num_filters 5.0 +125 26 loss.margin 5.517550828601485 +125 26 optimizer.lr 0.0016204877758509634 +125 26 negative_sampler.num_negs_per_pos 42.0 +125 26 training.batch_size 1.0 +125 27 model.embedding_dim 1.0 +125 27 model.hidden_dropout_rate 0.3746768378571555 +125 27 model.num_filters 0.0 +125 27 loss.margin 5.771176904234114 +125 27 optimizer.lr 0.01590974577256397 +125 27 negative_sampler.num_negs_per_pos 51.0 +125 27 training.batch_size 0.0 +125 28 model.embedding_dim 0.0 +125 28 model.hidden_dropout_rate 0.1678145316555272 +125 28 model.num_filters 1.0 +125 28 loss.margin 1.0744606870921996 +125 28 optimizer.lr 0.0051438983809583925 +125 28 negative_sampler.num_negs_per_pos 96.0 +125 28 training.batch_size 1.0 +125 29 model.embedding_dim 0.0 +125 29 model.hidden_dropout_rate 0.4484838117131385 +125 29 model.num_filters 7.0 +125 29 loss.margin 4.111291450830278 +125 29 optimizer.lr 0.0052191324412702205 +125 29 negative_sampler.num_negs_per_pos 52.0 +125 29 training.batch_size 0.0 +125 30 model.embedding_dim 1.0 +125 30 model.hidden_dropout_rate 0.14845348229822128 +125 30 model.num_filters 7.0 +125 30 loss.margin 4.6946488486935145 +125 30 optimizer.lr 0.005446154968593681 +125 30 negative_sampler.num_negs_per_pos 59.0 +125 30 training.batch_size 1.0 +125 31 model.embedding_dim 2.0 +125 31 model.hidden_dropout_rate 0.35378951417398924 +125 31 model.num_filters 8.0 +125 31 loss.margin 9.764735146490128 +125 31 optimizer.lr 0.004060535608169171 +125 31 negative_sampler.num_negs_per_pos 46.0 +125 31 training.batch_size 2.0 +125 32 model.embedding_dim 0.0 +125 32 model.hidden_dropout_rate 0.22772575018579563 +125 32 model.num_filters 4.0 +125 32 loss.margin 6.776482529873003 +125 32 optimizer.lr 0.006798610389542326 +125 32 negative_sampler.num_negs_per_pos 26.0 +125 32 training.batch_size 2.0 +125 33 model.embedding_dim 1.0 +125 33 model.hidden_dropout_rate 0.23552800395087956 +125 33 model.num_filters 3.0 +125 33 loss.margin 7.812808275871584 +125 33 optimizer.lr 0.004451629422206363 +125 33 negative_sampler.num_negs_per_pos 83.0 +125 33 training.batch_size 0.0 +125 34 model.embedding_dim 2.0 +125 34 model.hidden_dropout_rate 0.28670244678678763 +125 34 model.num_filters 8.0 +125 34 loss.margin 1.6873178490210656 +125 34 optimizer.lr 0.023402306530449404 +125 34 negative_sampler.num_negs_per_pos 35.0 +125 34 training.batch_size 1.0 +125 35 model.embedding_dim 1.0 +125 35 model.hidden_dropout_rate 0.31508783360664694 +125 35 model.num_filters 8.0 +125 35 loss.margin 8.078608631049775 +125 35 optimizer.lr 0.01294641824411186 +125 35 negative_sampler.num_negs_per_pos 79.0 +125 35 training.batch_size 0.0 +125 36 model.embedding_dim 2.0 +125 36 model.hidden_dropout_rate 0.24437352279108066 +125 36 model.num_filters 0.0 +125 36 loss.margin 5.288743580179989 +125 36 optimizer.lr 0.0011897444302094598 +125 36 negative_sampler.num_negs_per_pos 38.0 +125 36 training.batch_size 2.0 +125 37 model.embedding_dim 2.0 +125 37 model.hidden_dropout_rate 0.24403022879522449 +125 37 model.num_filters 0.0 +125 37 loss.margin 8.668681694497199 +125 37 optimizer.lr 0.013776884858623873 +125 37 negative_sampler.num_negs_per_pos 90.0 +125 37 training.batch_size 0.0 +125 38 model.embedding_dim 1.0 +125 38 model.hidden_dropout_rate 0.143218242124384 +125 38 model.num_filters 1.0 +125 38 loss.margin 6.306490342205053 +125 38 optimizer.lr 0.056312344946176505 +125 38 negative_sampler.num_negs_per_pos 15.0 +125 38 training.batch_size 0.0 +125 39 model.embedding_dim 2.0 +125 39 model.hidden_dropout_rate 0.383012661755243 +125 39 model.num_filters 5.0 +125 39 loss.margin 7.929222437912367 +125 39 optimizer.lr 0.0046083903410058386 +125 39 negative_sampler.num_negs_per_pos 95.0 +125 39 training.batch_size 2.0 +125 40 model.embedding_dim 0.0 +125 40 model.hidden_dropout_rate 0.3179891111119375 +125 40 model.num_filters 3.0 +125 40 loss.margin 7.696912232488896 +125 40 optimizer.lr 0.008359793281778705 +125 40 negative_sampler.num_negs_per_pos 52.0 +125 40 training.batch_size 0.0 +125 41 model.embedding_dim 2.0 +125 41 model.hidden_dropout_rate 0.3996837132545082 +125 41 model.num_filters 8.0 +125 41 loss.margin 4.493365902792089 +125 41 optimizer.lr 0.025147293094925232 +125 41 negative_sampler.num_negs_per_pos 34.0 +125 41 training.batch_size 1.0 +125 42 model.embedding_dim 1.0 +125 42 model.hidden_dropout_rate 0.28584387901465685 +125 42 model.num_filters 2.0 +125 42 loss.margin 9.427714383455251 +125 42 optimizer.lr 0.0016021607011235867 +125 42 negative_sampler.num_negs_per_pos 58.0 +125 42 training.batch_size 2.0 +125 43 model.embedding_dim 0.0 +125 43 model.hidden_dropout_rate 0.19235832479380904 +125 43 model.num_filters 7.0 +125 43 loss.margin 3.1728485723833195 +125 43 optimizer.lr 0.006035290026733904 +125 43 negative_sampler.num_negs_per_pos 74.0 +125 43 training.batch_size 0.0 +125 44 model.embedding_dim 0.0 +125 44 model.hidden_dropout_rate 0.44257165376581964 +125 44 model.num_filters 0.0 +125 44 loss.margin 2.373432605513077 +125 44 optimizer.lr 0.0018572382003794814 +125 44 negative_sampler.num_negs_per_pos 52.0 +125 44 training.batch_size 0.0 +125 45 model.embedding_dim 2.0 +125 45 model.hidden_dropout_rate 0.4823236424435382 +125 45 model.num_filters 4.0 +125 45 loss.margin 2.3484314884733557 +125 45 optimizer.lr 0.010351167492077028 +125 45 negative_sampler.num_negs_per_pos 35.0 +125 45 training.batch_size 1.0 +125 46 model.embedding_dim 1.0 +125 46 model.hidden_dropout_rate 0.3167705713103307 +125 46 model.num_filters 1.0 +125 46 loss.margin 6.771630876488793 +125 46 optimizer.lr 0.009521839767738034 +125 46 negative_sampler.num_negs_per_pos 92.0 +125 46 training.batch_size 2.0 +125 47 model.embedding_dim 0.0 +125 47 model.hidden_dropout_rate 0.10816282070597127 +125 47 model.num_filters 3.0 +125 47 loss.margin 5.6496734773634785 +125 47 optimizer.lr 0.003094675302763751 +125 47 negative_sampler.num_negs_per_pos 65.0 +125 47 training.batch_size 2.0 +125 48 model.embedding_dim 2.0 +125 48 model.hidden_dropout_rate 0.11854105696216194 +125 48 model.num_filters 8.0 +125 48 loss.margin 6.7412200354119305 +125 48 optimizer.lr 0.020439623839088778 +125 48 negative_sampler.num_negs_per_pos 90.0 +125 48 training.batch_size 1.0 +125 1 dataset """kinships""" +125 1 model """convkb""" +125 1 loss """marginranking""" +125 1 regularizer """no""" +125 1 optimizer """adam""" +125 1 training_loop """owa""" +125 1 negative_sampler """basic""" +125 1 evaluator """rankbased""" +125 2 dataset """kinships""" +125 2 model """convkb""" +125 2 loss """marginranking""" +125 2 regularizer """no""" +125 2 optimizer """adam""" +125 2 training_loop """owa""" +125 2 negative_sampler """basic""" +125 2 evaluator """rankbased""" +125 3 dataset """kinships""" +125 3 model """convkb""" +125 3 loss """marginranking""" +125 3 regularizer """no""" +125 3 optimizer """adam""" +125 3 training_loop """owa""" +125 3 negative_sampler """basic""" +125 3 evaluator """rankbased""" +125 4 dataset """kinships""" +125 4 model """convkb""" +125 4 loss """marginranking""" +125 4 regularizer """no""" +125 4 optimizer """adam""" +125 4 training_loop """owa""" +125 4 negative_sampler """basic""" +125 4 evaluator """rankbased""" +125 5 dataset """kinships""" +125 5 model """convkb""" +125 5 loss """marginranking""" +125 5 regularizer """no""" +125 5 optimizer """adam""" +125 5 training_loop """owa""" +125 5 negative_sampler """basic""" +125 5 evaluator """rankbased""" +125 6 dataset """kinships""" +125 6 model """convkb""" +125 6 loss """marginranking""" +125 6 regularizer """no""" +125 6 optimizer """adam""" +125 6 training_loop """owa""" +125 6 negative_sampler """basic""" +125 6 evaluator """rankbased""" +125 7 dataset """kinships""" +125 7 model """convkb""" +125 7 loss """marginranking""" +125 7 regularizer """no""" +125 7 optimizer """adam""" +125 7 training_loop """owa""" +125 7 negative_sampler """basic""" +125 7 evaluator """rankbased""" +125 8 dataset """kinships""" +125 8 model """convkb""" +125 8 loss """marginranking""" +125 8 regularizer """no""" +125 8 optimizer """adam""" +125 8 training_loop """owa""" +125 8 negative_sampler """basic""" +125 8 evaluator """rankbased""" +125 9 dataset """kinships""" +125 9 model """convkb""" +125 9 loss """marginranking""" +125 9 regularizer """no""" +125 9 optimizer """adam""" +125 9 training_loop """owa""" +125 9 negative_sampler """basic""" +125 9 evaluator """rankbased""" +125 10 dataset """kinships""" +125 10 model """convkb""" +125 10 loss """marginranking""" +125 10 regularizer """no""" +125 10 optimizer """adam""" +125 10 training_loop """owa""" +125 10 negative_sampler """basic""" +125 10 evaluator """rankbased""" +125 11 dataset """kinships""" +125 11 model """convkb""" +125 11 loss """marginranking""" +125 11 regularizer """no""" +125 11 optimizer """adam""" +125 11 training_loop """owa""" +125 11 negative_sampler """basic""" +125 11 evaluator """rankbased""" +125 12 dataset """kinships""" +125 12 model """convkb""" +125 12 loss """marginranking""" +125 12 regularizer """no""" +125 12 optimizer """adam""" +125 12 training_loop """owa""" +125 12 negative_sampler """basic""" +125 12 evaluator """rankbased""" +125 13 dataset """kinships""" +125 13 model """convkb""" +125 13 loss """marginranking""" +125 13 regularizer """no""" +125 13 optimizer """adam""" +125 13 training_loop """owa""" +125 13 negative_sampler """basic""" +125 13 evaluator """rankbased""" +125 14 dataset """kinships""" +125 14 model """convkb""" +125 14 loss """marginranking""" +125 14 regularizer """no""" +125 14 optimizer """adam""" +125 14 training_loop """owa""" +125 14 negative_sampler """basic""" +125 14 evaluator """rankbased""" +125 15 dataset """kinships""" +125 15 model """convkb""" +125 15 loss """marginranking""" +125 15 regularizer """no""" +125 15 optimizer """adam""" +125 15 training_loop """owa""" +125 15 negative_sampler """basic""" +125 15 evaluator """rankbased""" +125 16 dataset """kinships""" +125 16 model """convkb""" +125 16 loss """marginranking""" +125 16 regularizer """no""" +125 16 optimizer """adam""" +125 16 training_loop """owa""" +125 16 negative_sampler """basic""" +125 16 evaluator """rankbased""" +125 17 dataset """kinships""" +125 17 model """convkb""" +125 17 loss """marginranking""" +125 17 regularizer """no""" +125 17 optimizer """adam""" +125 17 training_loop """owa""" +125 17 negative_sampler """basic""" +125 17 evaluator """rankbased""" +125 18 dataset """kinships""" +125 18 model """convkb""" +125 18 loss """marginranking""" +125 18 regularizer """no""" +125 18 optimizer """adam""" +125 18 training_loop """owa""" +125 18 negative_sampler """basic""" +125 18 evaluator """rankbased""" +125 19 dataset """kinships""" +125 19 model """convkb""" +125 19 loss """marginranking""" +125 19 regularizer """no""" +125 19 optimizer """adam""" +125 19 training_loop """owa""" +125 19 negative_sampler """basic""" +125 19 evaluator """rankbased""" +125 20 dataset """kinships""" +125 20 model """convkb""" +125 20 loss """marginranking""" +125 20 regularizer """no""" +125 20 optimizer """adam""" +125 20 training_loop """owa""" +125 20 negative_sampler """basic""" +125 20 evaluator """rankbased""" +125 21 dataset """kinships""" +125 21 model """convkb""" +125 21 loss """marginranking""" +125 21 regularizer """no""" +125 21 optimizer """adam""" +125 21 training_loop """owa""" +125 21 negative_sampler """basic""" +125 21 evaluator """rankbased""" +125 22 dataset """kinships""" +125 22 model """convkb""" +125 22 loss """marginranking""" +125 22 regularizer """no""" +125 22 optimizer """adam""" +125 22 training_loop """owa""" +125 22 negative_sampler """basic""" +125 22 evaluator """rankbased""" +125 23 dataset """kinships""" +125 23 model """convkb""" +125 23 loss """marginranking""" +125 23 regularizer """no""" +125 23 optimizer """adam""" +125 23 training_loop """owa""" +125 23 negative_sampler """basic""" +125 23 evaluator """rankbased""" +125 24 dataset """kinships""" +125 24 model """convkb""" +125 24 loss """marginranking""" +125 24 regularizer """no""" +125 24 optimizer """adam""" +125 24 training_loop """owa""" +125 24 negative_sampler """basic""" +125 24 evaluator """rankbased""" +125 25 dataset """kinships""" +125 25 model """convkb""" +125 25 loss """marginranking""" +125 25 regularizer """no""" +125 25 optimizer """adam""" +125 25 training_loop """owa""" +125 25 negative_sampler """basic""" +125 25 evaluator """rankbased""" +125 26 dataset """kinships""" +125 26 model """convkb""" +125 26 loss """marginranking""" +125 26 regularizer """no""" +125 26 optimizer """adam""" +125 26 training_loop """owa""" +125 26 negative_sampler """basic""" +125 26 evaluator """rankbased""" +125 27 dataset """kinships""" +125 27 model """convkb""" +125 27 loss """marginranking""" +125 27 regularizer """no""" +125 27 optimizer """adam""" +125 27 training_loop """owa""" +125 27 negative_sampler """basic""" +125 27 evaluator """rankbased""" +125 28 dataset """kinships""" +125 28 model """convkb""" +125 28 loss """marginranking""" +125 28 regularizer """no""" +125 28 optimizer """adam""" +125 28 training_loop """owa""" +125 28 negative_sampler """basic""" +125 28 evaluator """rankbased""" +125 29 dataset """kinships""" +125 29 model """convkb""" +125 29 loss """marginranking""" +125 29 regularizer """no""" +125 29 optimizer """adam""" +125 29 training_loop """owa""" +125 29 negative_sampler """basic""" +125 29 evaluator """rankbased""" +125 30 dataset """kinships""" +125 30 model """convkb""" +125 30 loss """marginranking""" +125 30 regularizer """no""" +125 30 optimizer """adam""" +125 30 training_loop """owa""" +125 30 negative_sampler """basic""" +125 30 evaluator """rankbased""" +125 31 dataset """kinships""" +125 31 model """convkb""" +125 31 loss """marginranking""" +125 31 regularizer """no""" +125 31 optimizer """adam""" +125 31 training_loop """owa""" +125 31 negative_sampler """basic""" +125 31 evaluator """rankbased""" +125 32 dataset """kinships""" +125 32 model """convkb""" +125 32 loss """marginranking""" +125 32 regularizer """no""" +125 32 optimizer """adam""" +125 32 training_loop """owa""" +125 32 negative_sampler """basic""" +125 32 evaluator """rankbased""" +125 33 dataset """kinships""" +125 33 model """convkb""" +125 33 loss """marginranking""" +125 33 regularizer """no""" +125 33 optimizer """adam""" +125 33 training_loop """owa""" +125 33 negative_sampler """basic""" +125 33 evaluator """rankbased""" +125 34 dataset """kinships""" +125 34 model """convkb""" +125 34 loss """marginranking""" +125 34 regularizer """no""" +125 34 optimizer """adam""" +125 34 training_loop """owa""" +125 34 negative_sampler """basic""" +125 34 evaluator """rankbased""" +125 35 dataset """kinships""" +125 35 model """convkb""" +125 35 loss """marginranking""" +125 35 regularizer """no""" +125 35 optimizer """adam""" +125 35 training_loop """owa""" +125 35 negative_sampler """basic""" +125 35 evaluator """rankbased""" +125 36 dataset """kinships""" +125 36 model """convkb""" +125 36 loss """marginranking""" +125 36 regularizer """no""" +125 36 optimizer """adam""" +125 36 training_loop """owa""" +125 36 negative_sampler """basic""" +125 36 evaluator """rankbased""" +125 37 dataset """kinships""" +125 37 model """convkb""" +125 37 loss """marginranking""" +125 37 regularizer """no""" +125 37 optimizer """adam""" +125 37 training_loop """owa""" +125 37 negative_sampler """basic""" +125 37 evaluator """rankbased""" +125 38 dataset """kinships""" +125 38 model """convkb""" +125 38 loss """marginranking""" +125 38 regularizer """no""" +125 38 optimizer """adam""" +125 38 training_loop """owa""" +125 38 negative_sampler """basic""" +125 38 evaluator """rankbased""" +125 39 dataset """kinships""" +125 39 model """convkb""" +125 39 loss """marginranking""" +125 39 regularizer """no""" +125 39 optimizer """adam""" +125 39 training_loop """owa""" +125 39 negative_sampler """basic""" +125 39 evaluator """rankbased""" +125 40 dataset """kinships""" +125 40 model """convkb""" +125 40 loss """marginranking""" +125 40 regularizer """no""" +125 40 optimizer """adam""" +125 40 training_loop """owa""" +125 40 negative_sampler """basic""" +125 40 evaluator """rankbased""" +125 41 dataset """kinships""" +125 41 model """convkb""" +125 41 loss """marginranking""" +125 41 regularizer """no""" +125 41 optimizer """adam""" +125 41 training_loop """owa""" +125 41 negative_sampler """basic""" +125 41 evaluator """rankbased""" +125 42 dataset """kinships""" +125 42 model """convkb""" +125 42 loss """marginranking""" +125 42 regularizer """no""" +125 42 optimizer """adam""" +125 42 training_loop """owa""" +125 42 negative_sampler """basic""" +125 42 evaluator """rankbased""" +125 43 dataset """kinships""" +125 43 model """convkb""" +125 43 loss """marginranking""" +125 43 regularizer """no""" +125 43 optimizer """adam""" +125 43 training_loop """owa""" +125 43 negative_sampler """basic""" +125 43 evaluator """rankbased""" +125 44 dataset """kinships""" +125 44 model """convkb""" +125 44 loss """marginranking""" +125 44 regularizer """no""" +125 44 optimizer """adam""" +125 44 training_loop """owa""" +125 44 negative_sampler """basic""" +125 44 evaluator """rankbased""" +125 45 dataset """kinships""" +125 45 model """convkb""" +125 45 loss """marginranking""" +125 45 regularizer """no""" +125 45 optimizer """adam""" +125 45 training_loop """owa""" +125 45 negative_sampler """basic""" +125 45 evaluator """rankbased""" +125 46 dataset """kinships""" +125 46 model """convkb""" +125 46 loss """marginranking""" +125 46 regularizer """no""" +125 46 optimizer """adam""" +125 46 training_loop """owa""" +125 46 negative_sampler """basic""" +125 46 evaluator """rankbased""" +125 47 dataset """kinships""" +125 47 model """convkb""" +125 47 loss """marginranking""" +125 47 regularizer """no""" +125 47 optimizer """adam""" +125 47 training_loop """owa""" +125 47 negative_sampler """basic""" +125 47 evaluator """rankbased""" +125 48 dataset """kinships""" +125 48 model """convkb""" +125 48 loss """marginranking""" +125 48 regularizer """no""" +125 48 optimizer """adam""" +125 48 training_loop """owa""" +125 48 negative_sampler """basic""" +125 48 evaluator """rankbased""" +126 1 model.embedding_dim 2.0 +126 1 model.hidden_dropout_rate 0.2689335377422944 +126 1 model.num_filters 3.0 +126 1 loss.margin 23.739988024182004 +126 1 loss.adversarial_temperature 0.49350479915231316 +126 1 optimizer.lr 0.02967890919774347 +126 1 negative_sampler.num_negs_per_pos 17.0 +126 1 training.batch_size 0.0 +126 2 model.embedding_dim 0.0 +126 2 model.hidden_dropout_rate 0.47153302461174473 +126 2 model.num_filters 1.0 +126 2 loss.margin 15.099961185874207 +126 2 loss.adversarial_temperature 0.7754623486309226 +126 2 optimizer.lr 0.0014665204302031665 +126 2 negative_sampler.num_negs_per_pos 94.0 +126 2 training.batch_size 2.0 +126 3 model.embedding_dim 2.0 +126 3 model.hidden_dropout_rate 0.24811335943377802 +126 3 model.num_filters 2.0 +126 3 loss.margin 19.403402519828 +126 3 loss.adversarial_temperature 0.22390177386377302 +126 3 optimizer.lr 0.001377582749858562 +126 3 negative_sampler.num_negs_per_pos 39.0 +126 3 training.batch_size 1.0 +126 4 model.embedding_dim 2.0 +126 4 model.hidden_dropout_rate 0.3612580953932719 +126 4 model.num_filters 7.0 +126 4 loss.margin 27.602532757574544 +126 4 loss.adversarial_temperature 0.3603638491325015 +126 4 optimizer.lr 0.054179495992317424 +126 4 negative_sampler.num_negs_per_pos 13.0 +126 4 training.batch_size 0.0 +126 5 model.embedding_dim 0.0 +126 5 model.hidden_dropout_rate 0.46881944017992183 +126 5 model.num_filters 4.0 +126 5 loss.margin 18.866826735789477 +126 5 loss.adversarial_temperature 0.1991878493339148 +126 5 optimizer.lr 0.009620110396287325 +126 5 negative_sampler.num_negs_per_pos 77.0 +126 5 training.batch_size 2.0 +126 6 model.embedding_dim 1.0 +126 6 model.hidden_dropout_rate 0.3617349995958892 +126 6 model.num_filters 3.0 +126 6 loss.margin 27.704793007699596 +126 6 loss.adversarial_temperature 0.5669894526593694 +126 6 optimizer.lr 0.018648494713437217 +126 6 negative_sampler.num_negs_per_pos 8.0 +126 6 training.batch_size 1.0 +126 7 model.embedding_dim 0.0 +126 7 model.hidden_dropout_rate 0.2995860032872387 +126 7 model.num_filters 7.0 +126 7 loss.margin 28.148908555744654 +126 7 loss.adversarial_temperature 0.9506514978947016 +126 7 optimizer.lr 0.01567527829169109 +126 7 negative_sampler.num_negs_per_pos 60.0 +126 7 training.batch_size 2.0 +126 8 model.embedding_dim 2.0 +126 8 model.hidden_dropout_rate 0.24026687017949178 +126 8 model.num_filters 5.0 +126 8 loss.margin 18.898354087835678 +126 8 loss.adversarial_temperature 0.21536779980989101 +126 8 optimizer.lr 0.009744537930320783 +126 8 negative_sampler.num_negs_per_pos 20.0 +126 8 training.batch_size 0.0 +126 9 model.embedding_dim 0.0 +126 9 model.hidden_dropout_rate 0.48108338101132 +126 9 model.num_filters 9.0 +126 9 loss.margin 12.35177782307491 +126 9 loss.adversarial_temperature 0.9427653401915768 +126 9 optimizer.lr 0.09073830109451932 +126 9 negative_sampler.num_negs_per_pos 6.0 +126 9 training.batch_size 0.0 +126 10 model.embedding_dim 0.0 +126 10 model.hidden_dropout_rate 0.44220530810308467 +126 10 model.num_filters 0.0 +126 10 loss.margin 21.077541819697643 +126 10 loss.adversarial_temperature 0.615827804297765 +126 10 optimizer.lr 0.0017739819732832452 +126 10 negative_sampler.num_negs_per_pos 56.0 +126 10 training.batch_size 2.0 +126 11 model.embedding_dim 0.0 +126 11 model.hidden_dropout_rate 0.27416835714133037 +126 11 model.num_filters 6.0 +126 11 loss.margin 11.113607604126587 +126 11 loss.adversarial_temperature 0.34901279668219687 +126 11 optimizer.lr 0.05476621596467696 +126 11 negative_sampler.num_negs_per_pos 22.0 +126 11 training.batch_size 0.0 +126 12 model.embedding_dim 0.0 +126 12 model.hidden_dropout_rate 0.30126272057195314 +126 12 model.num_filters 9.0 +126 12 loss.margin 9.050760005125861 +126 12 loss.adversarial_temperature 0.3366381138596696 +126 12 optimizer.lr 0.0029280536559678828 +126 12 negative_sampler.num_negs_per_pos 70.0 +126 12 training.batch_size 0.0 +126 13 model.embedding_dim 1.0 +126 13 model.hidden_dropout_rate 0.2911565406307669 +126 13 model.num_filters 3.0 +126 13 loss.margin 8.245343185424636 +126 13 loss.adversarial_temperature 0.15425025800823658 +126 13 optimizer.lr 0.0064800592684558355 +126 13 negative_sampler.num_negs_per_pos 60.0 +126 13 training.batch_size 1.0 +126 14 model.embedding_dim 0.0 +126 14 model.hidden_dropout_rate 0.26300580493055736 +126 14 model.num_filters 0.0 +126 14 loss.margin 15.057151279488012 +126 14 loss.adversarial_temperature 0.8004402977553376 +126 14 optimizer.lr 0.0820021753392346 +126 14 negative_sampler.num_negs_per_pos 52.0 +126 14 training.batch_size 1.0 +126 15 model.embedding_dim 0.0 +126 15 model.hidden_dropout_rate 0.13915476587902262 +126 15 model.num_filters 2.0 +126 15 loss.margin 3.481285513925008 +126 15 loss.adversarial_temperature 0.5065873037842149 +126 15 optimizer.lr 0.009227723283329149 +126 15 negative_sampler.num_negs_per_pos 29.0 +126 15 training.batch_size 1.0 +126 16 model.embedding_dim 2.0 +126 16 model.hidden_dropout_rate 0.10964263581607958 +126 16 model.num_filters 3.0 +126 16 loss.margin 28.185269449839222 +126 16 loss.adversarial_temperature 0.9795014390500311 +126 16 optimizer.lr 0.0011264091869098828 +126 16 negative_sampler.num_negs_per_pos 62.0 +126 16 training.batch_size 1.0 +126 17 model.embedding_dim 0.0 +126 17 model.hidden_dropout_rate 0.441167026368678 +126 17 model.num_filters 0.0 +126 17 loss.margin 16.457155868949783 +126 17 loss.adversarial_temperature 0.5920982685405825 +126 17 optimizer.lr 0.0033934665461769656 +126 17 negative_sampler.num_negs_per_pos 72.0 +126 17 training.batch_size 1.0 +126 18 model.embedding_dim 2.0 +126 18 model.hidden_dropout_rate 0.31613864557610105 +126 18 model.num_filters 2.0 +126 18 loss.margin 21.823468692369477 +126 18 loss.adversarial_temperature 0.3570784381495846 +126 18 optimizer.lr 0.048105049602349216 +126 18 negative_sampler.num_negs_per_pos 58.0 +126 18 training.batch_size 1.0 +126 19 model.embedding_dim 1.0 +126 19 model.hidden_dropout_rate 0.17660943960240552 +126 19 model.num_filters 2.0 +126 19 loss.margin 3.049539516556491 +126 19 loss.adversarial_temperature 0.6633524796146829 +126 19 optimizer.lr 0.009795705653872042 +126 19 negative_sampler.num_negs_per_pos 41.0 +126 19 training.batch_size 1.0 +126 20 model.embedding_dim 2.0 +126 20 model.hidden_dropout_rate 0.24870089402220744 +126 20 model.num_filters 6.0 +126 20 loss.margin 8.558575165707778 +126 20 loss.adversarial_temperature 0.7572492607420253 +126 20 optimizer.lr 0.001198726925722017 +126 20 negative_sampler.num_negs_per_pos 2.0 +126 20 training.batch_size 0.0 +126 21 model.embedding_dim 1.0 +126 21 model.hidden_dropout_rate 0.29083409132841964 +126 21 model.num_filters 8.0 +126 21 loss.margin 29.04382942901568 +126 21 loss.adversarial_temperature 0.3330467995098306 +126 21 optimizer.lr 0.0015424859654991326 +126 21 negative_sampler.num_negs_per_pos 20.0 +126 21 training.batch_size 0.0 +126 22 model.embedding_dim 2.0 +126 22 model.hidden_dropout_rate 0.28273466764813704 +126 22 model.num_filters 3.0 +126 22 loss.margin 2.3579795535382013 +126 22 loss.adversarial_temperature 0.16330546897955883 +126 22 optimizer.lr 0.05078914417120298 +126 22 negative_sampler.num_negs_per_pos 33.0 +126 22 training.batch_size 2.0 +126 23 model.embedding_dim 1.0 +126 23 model.hidden_dropout_rate 0.3519375940129553 +126 23 model.num_filters 4.0 +126 23 loss.margin 17.535424648278653 +126 23 loss.adversarial_temperature 0.3574978472997298 +126 23 optimizer.lr 0.01251841228536513 +126 23 negative_sampler.num_negs_per_pos 92.0 +126 23 training.batch_size 1.0 +126 24 model.embedding_dim 2.0 +126 24 model.hidden_dropout_rate 0.4674969444691368 +126 24 model.num_filters 1.0 +126 24 loss.margin 17.77783687450624 +126 24 loss.adversarial_temperature 0.37338301478368396 +126 24 optimizer.lr 0.007077463688071681 +126 24 negative_sampler.num_negs_per_pos 79.0 +126 24 training.batch_size 1.0 +126 25 model.embedding_dim 0.0 +126 25 model.hidden_dropout_rate 0.34813441698708547 +126 25 model.num_filters 6.0 +126 25 loss.margin 16.272939221418746 +126 25 loss.adversarial_temperature 0.49566569830410356 +126 25 optimizer.lr 0.044838100986675115 +126 25 negative_sampler.num_negs_per_pos 76.0 +126 25 training.batch_size 1.0 +126 26 model.embedding_dim 2.0 +126 26 model.hidden_dropout_rate 0.27605359776881194 +126 26 model.num_filters 3.0 +126 26 loss.margin 21.828425934581045 +126 26 loss.adversarial_temperature 0.16430203081612565 +126 26 optimizer.lr 0.034131353438233585 +126 26 negative_sampler.num_negs_per_pos 92.0 +126 26 training.batch_size 0.0 +126 27 model.embedding_dim 0.0 +126 27 model.hidden_dropout_rate 0.22532376692024392 +126 27 model.num_filters 7.0 +126 27 loss.margin 9.818879666397184 +126 27 loss.adversarial_temperature 0.5470322426002803 +126 27 optimizer.lr 0.007349404942778091 +126 27 negative_sampler.num_negs_per_pos 41.0 +126 27 training.batch_size 1.0 +126 28 model.embedding_dim 0.0 +126 28 model.hidden_dropout_rate 0.15141313423137698 +126 28 model.num_filters 9.0 +126 28 loss.margin 28.69422620878512 +126 28 loss.adversarial_temperature 0.8166233829573808 +126 28 optimizer.lr 0.008922408832483165 +126 28 negative_sampler.num_negs_per_pos 11.0 +126 28 training.batch_size 1.0 +126 29 model.embedding_dim 2.0 +126 29 model.hidden_dropout_rate 0.12461713678883801 +126 29 model.num_filters 5.0 +126 29 loss.margin 25.251347798343723 +126 29 loss.adversarial_temperature 0.7704031605230657 +126 29 optimizer.lr 0.001900516294690531 +126 29 negative_sampler.num_negs_per_pos 37.0 +126 29 training.batch_size 2.0 +126 30 model.embedding_dim 1.0 +126 30 model.hidden_dropout_rate 0.209744429170189 +126 30 model.num_filters 7.0 +126 30 loss.margin 9.96475446878008 +126 30 loss.adversarial_temperature 0.9715434317578772 +126 30 optimizer.lr 0.0018787313934758758 +126 30 negative_sampler.num_negs_per_pos 99.0 +126 30 training.batch_size 0.0 +126 1 dataset """kinships""" +126 1 model """convkb""" +126 1 loss """nssa""" +126 1 regularizer """no""" +126 1 optimizer """adam""" +126 1 training_loop """owa""" +126 1 negative_sampler """basic""" +126 1 evaluator """rankbased""" +126 2 dataset """kinships""" +126 2 model """convkb""" +126 2 loss """nssa""" +126 2 regularizer """no""" +126 2 optimizer """adam""" +126 2 training_loop """owa""" +126 2 negative_sampler """basic""" +126 2 evaluator """rankbased""" +126 3 dataset """kinships""" +126 3 model """convkb""" +126 3 loss """nssa""" +126 3 regularizer """no""" +126 3 optimizer """adam""" +126 3 training_loop """owa""" +126 3 negative_sampler """basic""" +126 3 evaluator """rankbased""" +126 4 dataset """kinships""" +126 4 model """convkb""" +126 4 loss """nssa""" +126 4 regularizer """no""" +126 4 optimizer """adam""" +126 4 training_loop """owa""" +126 4 negative_sampler """basic""" +126 4 evaluator """rankbased""" +126 5 dataset """kinships""" +126 5 model """convkb""" +126 5 loss """nssa""" +126 5 regularizer """no""" +126 5 optimizer """adam""" +126 5 training_loop """owa""" +126 5 negative_sampler """basic""" +126 5 evaluator """rankbased""" +126 6 dataset """kinships""" +126 6 model """convkb""" +126 6 loss """nssa""" +126 6 regularizer """no""" +126 6 optimizer """adam""" +126 6 training_loop """owa""" +126 6 negative_sampler """basic""" +126 6 evaluator """rankbased""" +126 7 dataset """kinships""" +126 7 model """convkb""" +126 7 loss """nssa""" +126 7 regularizer """no""" +126 7 optimizer """adam""" +126 7 training_loop """owa""" +126 7 negative_sampler """basic""" +126 7 evaluator """rankbased""" +126 8 dataset """kinships""" +126 8 model """convkb""" +126 8 loss """nssa""" +126 8 regularizer """no""" +126 8 optimizer """adam""" +126 8 training_loop """owa""" +126 8 negative_sampler """basic""" +126 8 evaluator """rankbased""" +126 9 dataset """kinships""" +126 9 model """convkb""" +126 9 loss """nssa""" +126 9 regularizer """no""" +126 9 optimizer """adam""" +126 9 training_loop """owa""" +126 9 negative_sampler """basic""" +126 9 evaluator """rankbased""" +126 10 dataset """kinships""" +126 10 model """convkb""" +126 10 loss """nssa""" +126 10 regularizer """no""" +126 10 optimizer """adam""" +126 10 training_loop """owa""" +126 10 negative_sampler """basic""" +126 10 evaluator """rankbased""" +126 11 dataset """kinships""" +126 11 model """convkb""" +126 11 loss """nssa""" +126 11 regularizer """no""" +126 11 optimizer """adam""" +126 11 training_loop """owa""" +126 11 negative_sampler """basic""" +126 11 evaluator """rankbased""" +126 12 dataset """kinships""" +126 12 model """convkb""" +126 12 loss """nssa""" +126 12 regularizer """no""" +126 12 optimizer """adam""" +126 12 training_loop """owa""" +126 12 negative_sampler """basic""" +126 12 evaluator """rankbased""" +126 13 dataset """kinships""" +126 13 model """convkb""" +126 13 loss """nssa""" +126 13 regularizer """no""" +126 13 optimizer """adam""" +126 13 training_loop """owa""" +126 13 negative_sampler """basic""" +126 13 evaluator """rankbased""" +126 14 dataset """kinships""" +126 14 model """convkb""" +126 14 loss """nssa""" +126 14 regularizer """no""" +126 14 optimizer """adam""" +126 14 training_loop """owa""" +126 14 negative_sampler """basic""" +126 14 evaluator """rankbased""" +126 15 dataset """kinships""" +126 15 model """convkb""" +126 15 loss """nssa""" +126 15 regularizer """no""" +126 15 optimizer """adam""" +126 15 training_loop """owa""" +126 15 negative_sampler """basic""" +126 15 evaluator """rankbased""" +126 16 dataset """kinships""" +126 16 model """convkb""" +126 16 loss """nssa""" +126 16 regularizer """no""" +126 16 optimizer """adam""" +126 16 training_loop """owa""" +126 16 negative_sampler """basic""" +126 16 evaluator """rankbased""" +126 17 dataset """kinships""" +126 17 model """convkb""" +126 17 loss """nssa""" +126 17 regularizer """no""" +126 17 optimizer """adam""" +126 17 training_loop """owa""" +126 17 negative_sampler """basic""" +126 17 evaluator """rankbased""" +126 18 dataset """kinships""" +126 18 model """convkb""" +126 18 loss """nssa""" +126 18 regularizer """no""" +126 18 optimizer """adam""" +126 18 training_loop """owa""" +126 18 negative_sampler """basic""" +126 18 evaluator """rankbased""" +126 19 dataset """kinships""" +126 19 model """convkb""" +126 19 loss """nssa""" +126 19 regularizer """no""" +126 19 optimizer """adam""" +126 19 training_loop """owa""" +126 19 negative_sampler """basic""" +126 19 evaluator """rankbased""" +126 20 dataset """kinships""" +126 20 model """convkb""" +126 20 loss """nssa""" +126 20 regularizer """no""" +126 20 optimizer """adam""" +126 20 training_loop """owa""" +126 20 negative_sampler """basic""" +126 20 evaluator """rankbased""" +126 21 dataset """kinships""" +126 21 model """convkb""" +126 21 loss """nssa""" +126 21 regularizer """no""" +126 21 optimizer """adam""" +126 21 training_loop """owa""" +126 21 negative_sampler """basic""" +126 21 evaluator """rankbased""" +126 22 dataset """kinships""" +126 22 model """convkb""" +126 22 loss """nssa""" +126 22 regularizer """no""" +126 22 optimizer """adam""" +126 22 training_loop """owa""" +126 22 negative_sampler """basic""" +126 22 evaluator """rankbased""" +126 23 dataset """kinships""" +126 23 model """convkb""" +126 23 loss """nssa""" +126 23 regularizer """no""" +126 23 optimizer """adam""" +126 23 training_loop """owa""" +126 23 negative_sampler """basic""" +126 23 evaluator """rankbased""" +126 24 dataset """kinships""" +126 24 model """convkb""" +126 24 loss """nssa""" +126 24 regularizer """no""" +126 24 optimizer """adam""" +126 24 training_loop """owa""" +126 24 negative_sampler """basic""" +126 24 evaluator """rankbased""" +126 25 dataset """kinships""" +126 25 model """convkb""" +126 25 loss """nssa""" +126 25 regularizer """no""" +126 25 optimizer """adam""" +126 25 training_loop """owa""" +126 25 negative_sampler """basic""" +126 25 evaluator """rankbased""" +126 26 dataset """kinships""" +126 26 model """convkb""" +126 26 loss """nssa""" +126 26 regularizer """no""" +126 26 optimizer """adam""" +126 26 training_loop """owa""" +126 26 negative_sampler """basic""" +126 26 evaluator """rankbased""" +126 27 dataset """kinships""" +126 27 model """convkb""" +126 27 loss """nssa""" +126 27 regularizer """no""" +126 27 optimizer """adam""" +126 27 training_loop """owa""" +126 27 negative_sampler """basic""" +126 27 evaluator """rankbased""" +126 28 dataset """kinships""" +126 28 model """convkb""" +126 28 loss """nssa""" +126 28 regularizer """no""" +126 28 optimizer """adam""" +126 28 training_loop """owa""" +126 28 negative_sampler """basic""" +126 28 evaluator """rankbased""" +126 29 dataset """kinships""" +126 29 model """convkb""" +126 29 loss """nssa""" +126 29 regularizer """no""" +126 29 optimizer """adam""" +126 29 training_loop """owa""" +126 29 negative_sampler """basic""" +126 29 evaluator """rankbased""" +126 30 dataset """kinships""" +126 30 model """convkb""" +126 30 loss """nssa""" +126 30 regularizer """no""" +126 30 optimizer """adam""" +126 30 training_loop """owa""" +126 30 negative_sampler """basic""" +126 30 evaluator """rankbased""" +127 1 model.embedding_dim 2.0 +127 1 model.hidden_dropout_rate 0.48992176182674696 +127 1 model.num_filters 1.0 +127 1 loss.margin 24.73895927349559 +127 1 loss.adversarial_temperature 0.2872583003166447 +127 1 optimizer.lr 0.0015905898219252695 +127 1 negative_sampler.num_negs_per_pos 75.0 +127 1 training.batch_size 1.0 +127 2 model.embedding_dim 0.0 +127 2 model.hidden_dropout_rate 0.2542628632524414 +127 2 model.num_filters 6.0 +127 2 loss.margin 6.1415757288998725 +127 2 loss.adversarial_temperature 0.6383237325146185 +127 2 optimizer.lr 0.02812644603635348 +127 2 negative_sampler.num_negs_per_pos 51.0 +127 2 training.batch_size 2.0 +127 3 model.embedding_dim 1.0 +127 3 model.hidden_dropout_rate 0.2740639197940289 +127 3 model.num_filters 6.0 +127 3 loss.margin 8.64537499450942 +127 3 loss.adversarial_temperature 0.4104833832199821 +127 3 optimizer.lr 0.00513655048808358 +127 3 negative_sampler.num_negs_per_pos 97.0 +127 3 training.batch_size 2.0 +127 4 model.embedding_dim 2.0 +127 4 model.hidden_dropout_rate 0.21566608376225124 +127 4 model.num_filters 6.0 +127 4 loss.margin 29.93612399711471 +127 4 loss.adversarial_temperature 0.3791524936404943 +127 4 optimizer.lr 0.014846952208586346 +127 4 negative_sampler.num_negs_per_pos 8.0 +127 4 training.batch_size 1.0 +127 5 model.embedding_dim 2.0 +127 5 model.hidden_dropout_rate 0.29628538874911 +127 5 model.num_filters 0.0 +127 5 loss.margin 9.334396924737923 +127 5 loss.adversarial_temperature 0.14548004370697804 +127 5 optimizer.lr 0.015331332872301815 +127 5 negative_sampler.num_negs_per_pos 77.0 +127 5 training.batch_size 1.0 +127 6 model.embedding_dim 2.0 +127 6 model.hidden_dropout_rate 0.17301105650241466 +127 6 model.num_filters 3.0 +127 6 loss.margin 15.177064268354808 +127 6 loss.adversarial_temperature 0.6822697509990172 +127 6 optimizer.lr 0.03466543337701906 +127 6 negative_sampler.num_negs_per_pos 1.0 +127 6 training.batch_size 2.0 +127 7 model.embedding_dim 1.0 +127 7 model.hidden_dropout_rate 0.23799048389235053 +127 7 model.num_filters 8.0 +127 7 loss.margin 18.15117626322403 +127 7 loss.adversarial_temperature 0.6273817708587083 +127 7 optimizer.lr 0.00106654064110687 +127 7 negative_sampler.num_negs_per_pos 80.0 +127 7 training.batch_size 2.0 +127 8 model.embedding_dim 1.0 +127 8 model.hidden_dropout_rate 0.32097590311867086 +127 8 model.num_filters 9.0 +127 8 loss.margin 3.5396663066356955 +127 8 loss.adversarial_temperature 0.5643312988598825 +127 8 optimizer.lr 0.013289297396787202 +127 8 negative_sampler.num_negs_per_pos 51.0 +127 8 training.batch_size 0.0 +127 9 model.embedding_dim 0.0 +127 9 model.hidden_dropout_rate 0.328943841304398 +127 9 model.num_filters 7.0 +127 9 loss.margin 4.448016722127669 +127 9 loss.adversarial_temperature 0.8059903113486401 +127 9 optimizer.lr 0.019973864142476778 +127 9 negative_sampler.num_negs_per_pos 79.0 +127 9 training.batch_size 1.0 +127 10 model.embedding_dim 0.0 +127 10 model.hidden_dropout_rate 0.19321621520162893 +127 10 model.num_filters 6.0 +127 10 loss.margin 7.0887313799961955 +127 10 loss.adversarial_temperature 0.29781974054702687 +127 10 optimizer.lr 0.021024146607647516 +127 10 negative_sampler.num_negs_per_pos 12.0 +127 10 training.batch_size 2.0 +127 11 model.embedding_dim 0.0 +127 11 model.hidden_dropout_rate 0.10473459386208064 +127 11 model.num_filters 9.0 +127 11 loss.margin 15.62789572317013 +127 11 loss.adversarial_temperature 0.17382079936199402 +127 11 optimizer.lr 0.0875644772190355 +127 11 negative_sampler.num_negs_per_pos 59.0 +127 11 training.batch_size 2.0 +127 12 model.embedding_dim 2.0 +127 12 model.hidden_dropout_rate 0.2504328460380865 +127 12 model.num_filters 1.0 +127 12 loss.margin 23.55229150588932 +127 12 loss.adversarial_temperature 0.552993254542449 +127 12 optimizer.lr 0.0012106858154087932 +127 12 negative_sampler.num_negs_per_pos 31.0 +127 12 training.batch_size 0.0 +127 13 model.embedding_dim 2.0 +127 13 model.hidden_dropout_rate 0.190738338298025 +127 13 model.num_filters 9.0 +127 13 loss.margin 7.862190465225014 +127 13 loss.adversarial_temperature 0.2030053972518377 +127 13 optimizer.lr 0.004028725379729588 +127 13 negative_sampler.num_negs_per_pos 90.0 +127 13 training.batch_size 1.0 +127 14 model.embedding_dim 2.0 +127 14 model.hidden_dropout_rate 0.11668840460119045 +127 14 model.num_filters 3.0 +127 14 loss.margin 8.73078266274665 +127 14 loss.adversarial_temperature 0.6443264300575898 +127 14 optimizer.lr 0.014481322239133433 +127 14 negative_sampler.num_negs_per_pos 6.0 +127 14 training.batch_size 2.0 +127 15 model.embedding_dim 2.0 +127 15 model.hidden_dropout_rate 0.14288399527107387 +127 15 model.num_filters 2.0 +127 15 loss.margin 5.022826236445965 +127 15 loss.adversarial_temperature 0.9390810446146273 +127 15 optimizer.lr 0.011495458032243966 +127 15 negative_sampler.num_negs_per_pos 32.0 +127 15 training.batch_size 0.0 +127 16 model.embedding_dim 1.0 +127 16 model.hidden_dropout_rate 0.3116934177750629 +127 16 model.num_filters 9.0 +127 16 loss.margin 16.30007123380981 +127 16 loss.adversarial_temperature 0.8089380247958525 +127 16 optimizer.lr 0.03156354031621187 +127 16 negative_sampler.num_negs_per_pos 73.0 +127 16 training.batch_size 2.0 +127 17 model.embedding_dim 0.0 +127 17 model.hidden_dropout_rate 0.2724233106082461 +127 17 model.num_filters 7.0 +127 17 loss.margin 29.359686460610117 +127 17 loss.adversarial_temperature 0.21456555902197005 +127 17 optimizer.lr 0.009784288328377033 +127 17 negative_sampler.num_negs_per_pos 80.0 +127 17 training.batch_size 0.0 +127 18 model.embedding_dim 2.0 +127 18 model.hidden_dropout_rate 0.11598952480288492 +127 18 model.num_filters 2.0 +127 18 loss.margin 7.7049317721550885 +127 18 loss.adversarial_temperature 0.44996116698392263 +127 18 optimizer.lr 0.009266516483671093 +127 18 negative_sampler.num_negs_per_pos 79.0 +127 18 training.batch_size 1.0 +127 19 model.embedding_dim 2.0 +127 19 model.hidden_dropout_rate 0.126609978408142 +127 19 model.num_filters 7.0 +127 19 loss.margin 3.149217755736646 +127 19 loss.adversarial_temperature 0.6870307348846225 +127 19 optimizer.lr 0.006729660124543258 +127 19 negative_sampler.num_negs_per_pos 35.0 +127 19 training.batch_size 2.0 +127 20 model.embedding_dim 0.0 +127 20 model.hidden_dropout_rate 0.3886018511815079 +127 20 model.num_filters 2.0 +127 20 loss.margin 26.093225208569958 +127 20 loss.adversarial_temperature 0.616700182824501 +127 20 optimizer.lr 0.09614235895193653 +127 20 negative_sampler.num_negs_per_pos 90.0 +127 20 training.batch_size 1.0 +127 21 model.embedding_dim 2.0 +127 21 model.hidden_dropout_rate 0.16160933075136957 +127 21 model.num_filters 6.0 +127 21 loss.margin 6.015832417548446 +127 21 loss.adversarial_temperature 0.607168734038786 +127 21 optimizer.lr 0.001206123966368476 +127 21 negative_sampler.num_negs_per_pos 39.0 +127 21 training.batch_size 1.0 +127 22 model.embedding_dim 0.0 +127 22 model.hidden_dropout_rate 0.30240287870772176 +127 22 model.num_filters 9.0 +127 22 loss.margin 5.5160838072270355 +127 22 loss.adversarial_temperature 0.1759358336367362 +127 22 optimizer.lr 0.018451415154023234 +127 22 negative_sampler.num_negs_per_pos 51.0 +127 22 training.batch_size 1.0 +127 23 model.embedding_dim 2.0 +127 23 model.hidden_dropout_rate 0.1304286105227922 +127 23 model.num_filters 3.0 +127 23 loss.margin 3.9078828038863183 +127 23 loss.adversarial_temperature 0.44532879820625926 +127 23 optimizer.lr 0.0013676435972992161 +127 23 negative_sampler.num_negs_per_pos 91.0 +127 23 training.batch_size 0.0 +127 24 model.embedding_dim 1.0 +127 24 model.hidden_dropout_rate 0.26490557598094333 +127 24 model.num_filters 8.0 +127 24 loss.margin 22.401605503128458 +127 24 loss.adversarial_temperature 0.9144387145776529 +127 24 optimizer.lr 0.08278070534910215 +127 24 negative_sampler.num_negs_per_pos 83.0 +127 24 training.batch_size 1.0 +127 1 dataset """kinships""" +127 1 model """convkb""" +127 1 loss """nssa""" +127 1 regularizer """no""" +127 1 optimizer """adam""" +127 1 training_loop """owa""" +127 1 negative_sampler """basic""" +127 1 evaluator """rankbased""" +127 2 dataset """kinships""" +127 2 model """convkb""" +127 2 loss """nssa""" +127 2 regularizer """no""" +127 2 optimizer """adam""" +127 2 training_loop """owa""" +127 2 negative_sampler """basic""" +127 2 evaluator """rankbased""" +127 3 dataset """kinships""" +127 3 model """convkb""" +127 3 loss """nssa""" +127 3 regularizer """no""" +127 3 optimizer """adam""" +127 3 training_loop """owa""" +127 3 negative_sampler """basic""" +127 3 evaluator """rankbased""" +127 4 dataset """kinships""" +127 4 model """convkb""" +127 4 loss """nssa""" +127 4 regularizer """no""" +127 4 optimizer """adam""" +127 4 training_loop """owa""" +127 4 negative_sampler """basic""" +127 4 evaluator """rankbased""" +127 5 dataset """kinships""" +127 5 model """convkb""" +127 5 loss """nssa""" +127 5 regularizer """no""" +127 5 optimizer """adam""" +127 5 training_loop """owa""" +127 5 negative_sampler """basic""" +127 5 evaluator """rankbased""" +127 6 dataset """kinships""" +127 6 model """convkb""" +127 6 loss """nssa""" +127 6 regularizer """no""" +127 6 optimizer """adam""" +127 6 training_loop """owa""" +127 6 negative_sampler """basic""" +127 6 evaluator """rankbased""" +127 7 dataset """kinships""" +127 7 model """convkb""" +127 7 loss """nssa""" +127 7 regularizer """no""" +127 7 optimizer """adam""" +127 7 training_loop """owa""" +127 7 negative_sampler """basic""" +127 7 evaluator """rankbased""" +127 8 dataset """kinships""" +127 8 model """convkb""" +127 8 loss """nssa""" +127 8 regularizer """no""" +127 8 optimizer """adam""" +127 8 training_loop """owa""" +127 8 negative_sampler """basic""" +127 8 evaluator """rankbased""" +127 9 dataset """kinships""" +127 9 model """convkb""" +127 9 loss """nssa""" +127 9 regularizer """no""" +127 9 optimizer """adam""" +127 9 training_loop """owa""" +127 9 negative_sampler """basic""" +127 9 evaluator """rankbased""" +127 10 dataset """kinships""" +127 10 model """convkb""" +127 10 loss """nssa""" +127 10 regularizer """no""" +127 10 optimizer """adam""" +127 10 training_loop """owa""" +127 10 negative_sampler """basic""" +127 10 evaluator """rankbased""" +127 11 dataset """kinships""" +127 11 model """convkb""" +127 11 loss """nssa""" +127 11 regularizer """no""" +127 11 optimizer """adam""" +127 11 training_loop """owa""" +127 11 negative_sampler """basic""" +127 11 evaluator """rankbased""" +127 12 dataset """kinships""" +127 12 model """convkb""" +127 12 loss """nssa""" +127 12 regularizer """no""" +127 12 optimizer """adam""" +127 12 training_loop """owa""" +127 12 negative_sampler """basic""" +127 12 evaluator """rankbased""" +127 13 dataset """kinships""" +127 13 model """convkb""" +127 13 loss """nssa""" +127 13 regularizer """no""" +127 13 optimizer """adam""" +127 13 training_loop """owa""" +127 13 negative_sampler """basic""" +127 13 evaluator """rankbased""" +127 14 dataset """kinships""" +127 14 model """convkb""" +127 14 loss """nssa""" +127 14 regularizer """no""" +127 14 optimizer """adam""" +127 14 training_loop """owa""" +127 14 negative_sampler """basic""" +127 14 evaluator """rankbased""" +127 15 dataset """kinships""" +127 15 model """convkb""" +127 15 loss """nssa""" +127 15 regularizer """no""" +127 15 optimizer """adam""" +127 15 training_loop """owa""" +127 15 negative_sampler """basic""" +127 15 evaluator """rankbased""" +127 16 dataset """kinships""" +127 16 model """convkb""" +127 16 loss """nssa""" +127 16 regularizer """no""" +127 16 optimizer """adam""" +127 16 training_loop """owa""" +127 16 negative_sampler """basic""" +127 16 evaluator """rankbased""" +127 17 dataset """kinships""" +127 17 model """convkb""" +127 17 loss """nssa""" +127 17 regularizer """no""" +127 17 optimizer """adam""" +127 17 training_loop """owa""" +127 17 negative_sampler """basic""" +127 17 evaluator """rankbased""" +127 18 dataset """kinships""" +127 18 model """convkb""" +127 18 loss """nssa""" +127 18 regularizer """no""" +127 18 optimizer """adam""" +127 18 training_loop """owa""" +127 18 negative_sampler """basic""" +127 18 evaluator """rankbased""" +127 19 dataset """kinships""" +127 19 model """convkb""" +127 19 loss """nssa""" +127 19 regularizer """no""" +127 19 optimizer """adam""" +127 19 training_loop """owa""" +127 19 negative_sampler """basic""" +127 19 evaluator """rankbased""" +127 20 dataset """kinships""" +127 20 model """convkb""" +127 20 loss """nssa""" +127 20 regularizer """no""" +127 20 optimizer """adam""" +127 20 training_loop """owa""" +127 20 negative_sampler """basic""" +127 20 evaluator """rankbased""" +127 21 dataset """kinships""" +127 21 model """convkb""" +127 21 loss """nssa""" +127 21 regularizer """no""" +127 21 optimizer """adam""" +127 21 training_loop """owa""" +127 21 negative_sampler """basic""" +127 21 evaluator """rankbased""" +127 22 dataset """kinships""" +127 22 model """convkb""" +127 22 loss """nssa""" +127 22 regularizer """no""" +127 22 optimizer """adam""" +127 22 training_loop """owa""" +127 22 negative_sampler """basic""" +127 22 evaluator """rankbased""" +127 23 dataset """kinships""" +127 23 model """convkb""" +127 23 loss """nssa""" +127 23 regularizer """no""" +127 23 optimizer """adam""" +127 23 training_loop """owa""" +127 23 negative_sampler """basic""" +127 23 evaluator """rankbased""" +127 24 dataset """kinships""" +127 24 model """convkb""" +127 24 loss """nssa""" +127 24 regularizer """no""" +127 24 optimizer """adam""" +127 24 training_loop """owa""" +127 24 negative_sampler """basic""" +127 24 evaluator """rankbased""" +128 1 model.embedding_dim 2.0 +128 1 model.hidden_dropout_rate 0.41710437393043776 +128 1 model.num_filters 4.0 +128 1 optimizer.lr 0.00704857356000007 +128 1 training.batch_size 0.0 +128 1 training.label_smoothing 0.007945173171080238 +128 2 model.embedding_dim 0.0 +128 2 model.hidden_dropout_rate 0.21535752204794997 +128 2 model.num_filters 6.0 +128 2 optimizer.lr 0.031033857444423132 +128 2 training.batch_size 2.0 +128 2 training.label_smoothing 0.0028253191398261036 +128 3 model.embedding_dim 0.0 +128 3 model.hidden_dropout_rate 0.39870579392897554 +128 3 model.num_filters 2.0 +128 3 optimizer.lr 0.007518327708844102 +128 3 training.batch_size 2.0 +128 3 training.label_smoothing 0.012374713426645683 +128 4 model.embedding_dim 2.0 +128 4 model.hidden_dropout_rate 0.11227233654152041 +128 4 model.num_filters 8.0 +128 4 optimizer.lr 0.003227873653773943 +128 4 training.batch_size 2.0 +128 4 training.label_smoothing 0.0010636368519558487 +128 5 model.embedding_dim 1.0 +128 5 model.hidden_dropout_rate 0.11484660666690477 +128 5 model.num_filters 1.0 +128 5 optimizer.lr 0.09226876406403915 +128 5 training.batch_size 1.0 +128 5 training.label_smoothing 0.31160435237373296 +128 6 model.embedding_dim 0.0 +128 6 model.hidden_dropout_rate 0.31016380740347677 +128 6 model.num_filters 9.0 +128 6 optimizer.lr 0.0020059164303440893 +128 6 training.batch_size 1.0 +128 6 training.label_smoothing 0.10290361606284872 +128 7 model.embedding_dim 0.0 +128 7 model.hidden_dropout_rate 0.336319998126723 +128 7 model.num_filters 0.0 +128 7 optimizer.lr 0.02041889439524617 +128 7 training.batch_size 2.0 +128 7 training.label_smoothing 0.08450994466892006 +128 8 model.embedding_dim 0.0 +128 8 model.hidden_dropout_rate 0.1413479099860621 +128 8 model.num_filters 3.0 +128 8 optimizer.lr 0.007425911769263633 +128 8 training.batch_size 2.0 +128 8 training.label_smoothing 0.0017000088205336903 +128 9 model.embedding_dim 2.0 +128 9 model.hidden_dropout_rate 0.49891221182664647 +128 9 model.num_filters 9.0 +128 9 optimizer.lr 0.0017825733829707922 +128 9 training.batch_size 0.0 +128 9 training.label_smoothing 0.5086287773438788 +128 10 model.embedding_dim 0.0 +128 10 model.hidden_dropout_rate 0.20348993984177344 +128 10 model.num_filters 7.0 +128 10 optimizer.lr 0.06536152477471374 +128 10 training.batch_size 2.0 +128 10 training.label_smoothing 0.0021769115204463767 +128 11 model.embedding_dim 0.0 +128 11 model.hidden_dropout_rate 0.3666824134239378 +128 11 model.num_filters 6.0 +128 11 optimizer.lr 0.03307177222116162 +128 11 training.batch_size 0.0 +128 11 training.label_smoothing 0.07146045942439516 +128 12 model.embedding_dim 2.0 +128 12 model.hidden_dropout_rate 0.40696749802887366 +128 12 model.num_filters 0.0 +128 12 optimizer.lr 0.0037652547612668785 +128 12 training.batch_size 2.0 +128 12 training.label_smoothing 0.06674025909021489 +128 13 model.embedding_dim 0.0 +128 13 model.hidden_dropout_rate 0.47169656329971377 +128 13 model.num_filters 9.0 +128 13 optimizer.lr 0.0022615366212820867 +128 13 training.batch_size 2.0 +128 13 training.label_smoothing 0.4252593639743286 +128 14 model.embedding_dim 1.0 +128 14 model.hidden_dropout_rate 0.4077497025292055 +128 14 model.num_filters 7.0 +128 14 optimizer.lr 0.015090864260278423 +128 14 training.batch_size 2.0 +128 14 training.label_smoothing 0.022022546340051626 +128 15 model.embedding_dim 1.0 +128 15 model.hidden_dropout_rate 0.17206329753435756 +128 15 model.num_filters 2.0 +128 15 optimizer.lr 0.01877022910022299 +128 15 training.batch_size 0.0 +128 15 training.label_smoothing 0.039126548306828816 +128 16 model.embedding_dim 1.0 +128 16 model.hidden_dropout_rate 0.42409852050096297 +128 16 model.num_filters 1.0 +128 16 optimizer.lr 0.005021050534803191 +128 16 training.batch_size 0.0 +128 16 training.label_smoothing 0.15865487139762396 +128 17 model.embedding_dim 1.0 +128 17 model.hidden_dropout_rate 0.32115391236649093 +128 17 model.num_filters 7.0 +128 17 optimizer.lr 0.008401795558190444 +128 17 training.batch_size 1.0 +128 17 training.label_smoothing 0.025516757204253126 +128 18 model.embedding_dim 1.0 +128 18 model.hidden_dropout_rate 0.4748125334138885 +128 18 model.num_filters 6.0 +128 18 optimizer.lr 0.0033708197830266116 +128 18 training.batch_size 1.0 +128 18 training.label_smoothing 0.23134286216154543 +128 19 model.embedding_dim 0.0 +128 19 model.hidden_dropout_rate 0.30939515440630144 +128 19 model.num_filters 2.0 +128 19 optimizer.lr 0.006194361605897524 +128 19 training.batch_size 0.0 +128 19 training.label_smoothing 0.4941927317672302 +128 20 model.embedding_dim 0.0 +128 20 model.hidden_dropout_rate 0.2906103456938024 +128 20 model.num_filters 9.0 +128 20 optimizer.lr 0.006783826390819957 +128 20 training.batch_size 1.0 +128 20 training.label_smoothing 0.05441618525891011 +128 21 model.embedding_dim 2.0 +128 21 model.hidden_dropout_rate 0.30064157802787606 +128 21 model.num_filters 6.0 +128 21 optimizer.lr 0.01941768890689609 +128 21 training.batch_size 1.0 +128 21 training.label_smoothing 0.06315474509287647 +128 22 model.embedding_dim 1.0 +128 22 model.hidden_dropout_rate 0.20529896488466212 +128 22 model.num_filters 2.0 +128 22 optimizer.lr 0.013091070697118238 +128 22 training.batch_size 0.0 +128 22 training.label_smoothing 0.0016900870510773872 +128 23 model.embedding_dim 0.0 +128 23 model.hidden_dropout_rate 0.35075790219221203 +128 23 model.num_filters 5.0 +128 23 optimizer.lr 0.015181724170895167 +128 23 training.batch_size 2.0 +128 23 training.label_smoothing 0.01128035252622947 +128 24 model.embedding_dim 0.0 +128 24 model.hidden_dropout_rate 0.19373604633206354 +128 24 model.num_filters 4.0 +128 24 optimizer.lr 0.07628729100061206 +128 24 training.batch_size 0.0 +128 24 training.label_smoothing 0.04535333129986377 +128 25 model.embedding_dim 2.0 +128 25 model.hidden_dropout_rate 0.4380836293860383 +128 25 model.num_filters 4.0 +128 25 optimizer.lr 0.04569293842535083 +128 25 training.batch_size 1.0 +128 25 training.label_smoothing 0.013790337195715183 +128 26 model.embedding_dim 1.0 +128 26 model.hidden_dropout_rate 0.1023216501056786 +128 26 model.num_filters 0.0 +128 26 optimizer.lr 0.0021869690359300653 +128 26 training.batch_size 0.0 +128 26 training.label_smoothing 0.011756276373940734 +128 27 model.embedding_dim 0.0 +128 27 model.hidden_dropout_rate 0.17887088216810168 +128 27 model.num_filters 4.0 +128 27 optimizer.lr 0.0015030445384144804 +128 27 training.batch_size 2.0 +128 27 training.label_smoothing 0.013022902295377633 +128 28 model.embedding_dim 2.0 +128 28 model.hidden_dropout_rate 0.4576336162053748 +128 28 model.num_filters 7.0 +128 28 optimizer.lr 0.01157083038352691 +128 28 training.batch_size 2.0 +128 28 training.label_smoothing 0.05236836465691629 +128 29 model.embedding_dim 1.0 +128 29 model.hidden_dropout_rate 0.4347641974573534 +128 29 model.num_filters 6.0 +128 29 optimizer.lr 0.00364188799533724 +128 29 training.batch_size 1.0 +128 29 training.label_smoothing 0.5637655018789612 +128 30 model.embedding_dim 0.0 +128 30 model.hidden_dropout_rate 0.3035005898998181 +128 30 model.num_filters 1.0 +128 30 optimizer.lr 0.037914808482803715 +128 30 training.batch_size 1.0 +128 30 training.label_smoothing 0.00404068952652689 +128 31 model.embedding_dim 2.0 +128 31 model.hidden_dropout_rate 0.15399680241388358 +128 31 model.num_filters 4.0 +128 31 optimizer.lr 0.016995930799423017 +128 31 training.batch_size 0.0 +128 31 training.label_smoothing 0.09966656051906321 +128 32 model.embedding_dim 2.0 +128 32 model.hidden_dropout_rate 0.2032963952350189 +128 32 model.num_filters 8.0 +128 32 optimizer.lr 0.011262783440463565 +128 32 training.batch_size 0.0 +128 32 training.label_smoothing 0.05134688880172063 +128 33 model.embedding_dim 2.0 +128 33 model.hidden_dropout_rate 0.28989788749680323 +128 33 model.num_filters 6.0 +128 33 optimizer.lr 0.01002718386310754 +128 33 training.batch_size 1.0 +128 33 training.label_smoothing 0.002534194315064897 +128 34 model.embedding_dim 0.0 +128 34 model.hidden_dropout_rate 0.2267723274115805 +128 34 model.num_filters 8.0 +128 34 optimizer.lr 0.08929731620880238 +128 34 training.batch_size 1.0 +128 34 training.label_smoothing 0.1578196060493553 +128 35 model.embedding_dim 2.0 +128 35 model.hidden_dropout_rate 0.2482387503174926 +128 35 model.num_filters 3.0 +128 35 optimizer.lr 0.03738769964950507 +128 35 training.batch_size 2.0 +128 35 training.label_smoothing 0.001333914663524036 +128 36 model.embedding_dim 2.0 +128 36 model.hidden_dropout_rate 0.2273873201146384 +128 36 model.num_filters 3.0 +128 36 optimizer.lr 0.002446689873590754 +128 36 training.batch_size 0.0 +128 36 training.label_smoothing 0.003756566515083499 +128 37 model.embedding_dim 2.0 +128 37 model.hidden_dropout_rate 0.20480772379593334 +128 37 model.num_filters 0.0 +128 37 optimizer.lr 0.008060941541844113 +128 37 training.batch_size 0.0 +128 37 training.label_smoothing 0.00201692699391648 +128 38 model.embedding_dim 0.0 +128 38 model.hidden_dropout_rate 0.4246594652717738 +128 38 model.num_filters 5.0 +128 38 optimizer.lr 0.07557790890803244 +128 38 training.batch_size 0.0 +128 38 training.label_smoothing 0.992266175728427 +128 39 model.embedding_dim 1.0 +128 39 model.hidden_dropout_rate 0.30005608918800286 +128 39 model.num_filters 5.0 +128 39 optimizer.lr 0.0023723645654535647 +128 39 training.batch_size 0.0 +128 39 training.label_smoothing 0.04965199123091231 +128 40 model.embedding_dim 0.0 +128 40 model.hidden_dropout_rate 0.2729970548435561 +128 40 model.num_filters 3.0 +128 40 optimizer.lr 0.005211092139568392 +128 40 training.batch_size 2.0 +128 40 training.label_smoothing 0.014635922979163777 +128 41 model.embedding_dim 1.0 +128 41 model.hidden_dropout_rate 0.4643514203815634 +128 41 model.num_filters 2.0 +128 41 optimizer.lr 0.03455971477150061 +128 41 training.batch_size 2.0 +128 41 training.label_smoothing 0.7085037663598229 +128 1 dataset """kinships""" +128 1 model """convkb""" +128 1 loss """crossentropy""" +128 1 regularizer """no""" +128 1 optimizer """adam""" +128 1 training_loop """lcwa""" +128 1 evaluator """rankbased""" +128 2 dataset """kinships""" +128 2 model """convkb""" +128 2 loss """crossentropy""" +128 2 regularizer """no""" +128 2 optimizer """adam""" +128 2 training_loop """lcwa""" +128 2 evaluator """rankbased""" +128 3 dataset """kinships""" +128 3 model """convkb""" +128 3 loss """crossentropy""" +128 3 regularizer """no""" +128 3 optimizer """adam""" +128 3 training_loop """lcwa""" +128 3 evaluator """rankbased""" +128 4 dataset """kinships""" +128 4 model """convkb""" +128 4 loss """crossentropy""" +128 4 regularizer """no""" +128 4 optimizer """adam""" +128 4 training_loop """lcwa""" +128 4 evaluator """rankbased""" +128 5 dataset """kinships""" +128 5 model """convkb""" +128 5 loss """crossentropy""" +128 5 regularizer """no""" +128 5 optimizer """adam""" +128 5 training_loop """lcwa""" +128 5 evaluator """rankbased""" +128 6 dataset """kinships""" +128 6 model """convkb""" +128 6 loss """crossentropy""" +128 6 regularizer """no""" +128 6 optimizer """adam""" +128 6 training_loop """lcwa""" +128 6 evaluator """rankbased""" +128 7 dataset """kinships""" +128 7 model """convkb""" +128 7 loss """crossentropy""" +128 7 regularizer """no""" +128 7 optimizer """adam""" +128 7 training_loop """lcwa""" +128 7 evaluator """rankbased""" +128 8 dataset """kinships""" +128 8 model """convkb""" +128 8 loss """crossentropy""" +128 8 regularizer """no""" +128 8 optimizer """adam""" +128 8 training_loop """lcwa""" +128 8 evaluator """rankbased""" +128 9 dataset """kinships""" +128 9 model """convkb""" +128 9 loss """crossentropy""" +128 9 regularizer """no""" +128 9 optimizer """adam""" +128 9 training_loop """lcwa""" +128 9 evaluator """rankbased""" +128 10 dataset """kinships""" +128 10 model """convkb""" +128 10 loss """crossentropy""" +128 10 regularizer """no""" +128 10 optimizer """adam""" +128 10 training_loop """lcwa""" +128 10 evaluator """rankbased""" +128 11 dataset """kinships""" +128 11 model """convkb""" +128 11 loss """crossentropy""" +128 11 regularizer """no""" +128 11 optimizer """adam""" +128 11 training_loop """lcwa""" +128 11 evaluator """rankbased""" +128 12 dataset """kinships""" +128 12 model """convkb""" +128 12 loss """crossentropy""" +128 12 regularizer """no""" +128 12 optimizer """adam""" +128 12 training_loop """lcwa""" +128 12 evaluator """rankbased""" +128 13 dataset """kinships""" +128 13 model """convkb""" +128 13 loss """crossentropy""" +128 13 regularizer """no""" +128 13 optimizer """adam""" +128 13 training_loop """lcwa""" +128 13 evaluator """rankbased""" +128 14 dataset """kinships""" +128 14 model """convkb""" +128 14 loss """crossentropy""" +128 14 regularizer """no""" +128 14 optimizer """adam""" +128 14 training_loop """lcwa""" +128 14 evaluator """rankbased""" +128 15 dataset """kinships""" +128 15 model """convkb""" +128 15 loss """crossentropy""" +128 15 regularizer """no""" +128 15 optimizer """adam""" +128 15 training_loop """lcwa""" +128 15 evaluator """rankbased""" +128 16 dataset """kinships""" +128 16 model """convkb""" +128 16 loss """crossentropy""" +128 16 regularizer """no""" +128 16 optimizer """adam""" +128 16 training_loop """lcwa""" +128 16 evaluator """rankbased""" +128 17 dataset """kinships""" +128 17 model """convkb""" +128 17 loss """crossentropy""" +128 17 regularizer """no""" +128 17 optimizer """adam""" +128 17 training_loop """lcwa""" +128 17 evaluator """rankbased""" +128 18 dataset """kinships""" +128 18 model """convkb""" +128 18 loss """crossentropy""" +128 18 regularizer """no""" +128 18 optimizer """adam""" +128 18 training_loop """lcwa""" +128 18 evaluator """rankbased""" +128 19 dataset """kinships""" +128 19 model """convkb""" +128 19 loss """crossentropy""" +128 19 regularizer """no""" +128 19 optimizer """adam""" +128 19 training_loop """lcwa""" +128 19 evaluator """rankbased""" +128 20 dataset """kinships""" +128 20 model """convkb""" +128 20 loss """crossentropy""" +128 20 regularizer """no""" +128 20 optimizer """adam""" +128 20 training_loop """lcwa""" +128 20 evaluator """rankbased""" +128 21 dataset """kinships""" +128 21 model """convkb""" +128 21 loss """crossentropy""" +128 21 regularizer """no""" +128 21 optimizer """adam""" +128 21 training_loop """lcwa""" +128 21 evaluator """rankbased""" +128 22 dataset """kinships""" +128 22 model """convkb""" +128 22 loss """crossentropy""" +128 22 regularizer """no""" +128 22 optimizer """adam""" +128 22 training_loop """lcwa""" +128 22 evaluator """rankbased""" +128 23 dataset """kinships""" +128 23 model """convkb""" +128 23 loss """crossentropy""" +128 23 regularizer """no""" +128 23 optimizer """adam""" +128 23 training_loop """lcwa""" +128 23 evaluator """rankbased""" +128 24 dataset """kinships""" +128 24 model """convkb""" +128 24 loss """crossentropy""" +128 24 regularizer """no""" +128 24 optimizer """adam""" +128 24 training_loop """lcwa""" +128 24 evaluator """rankbased""" +128 25 dataset """kinships""" +128 25 model """convkb""" +128 25 loss """crossentropy""" +128 25 regularizer """no""" +128 25 optimizer """adam""" +128 25 training_loop """lcwa""" +128 25 evaluator """rankbased""" +128 26 dataset """kinships""" +128 26 model """convkb""" +128 26 loss """crossentropy""" +128 26 regularizer """no""" +128 26 optimizer """adam""" +128 26 training_loop """lcwa""" +128 26 evaluator """rankbased""" +128 27 dataset """kinships""" +128 27 model """convkb""" +128 27 loss """crossentropy""" +128 27 regularizer """no""" +128 27 optimizer """adam""" +128 27 training_loop """lcwa""" +128 27 evaluator """rankbased""" +128 28 dataset """kinships""" +128 28 model """convkb""" +128 28 loss """crossentropy""" +128 28 regularizer """no""" +128 28 optimizer """adam""" +128 28 training_loop """lcwa""" +128 28 evaluator """rankbased""" +128 29 dataset """kinships""" +128 29 model """convkb""" +128 29 loss """crossentropy""" +128 29 regularizer """no""" +128 29 optimizer """adam""" +128 29 training_loop """lcwa""" +128 29 evaluator """rankbased""" +128 30 dataset """kinships""" +128 30 model """convkb""" +128 30 loss """crossentropy""" +128 30 regularizer """no""" +128 30 optimizer """adam""" +128 30 training_loop """lcwa""" +128 30 evaluator """rankbased""" +128 31 dataset """kinships""" +128 31 model """convkb""" +128 31 loss """crossentropy""" +128 31 regularizer """no""" +128 31 optimizer """adam""" +128 31 training_loop """lcwa""" +128 31 evaluator """rankbased""" +128 32 dataset """kinships""" +128 32 model """convkb""" +128 32 loss """crossentropy""" +128 32 regularizer """no""" +128 32 optimizer """adam""" +128 32 training_loop """lcwa""" +128 32 evaluator """rankbased""" +128 33 dataset """kinships""" +128 33 model """convkb""" +128 33 loss """crossentropy""" +128 33 regularizer """no""" +128 33 optimizer """adam""" +128 33 training_loop """lcwa""" +128 33 evaluator """rankbased""" +128 34 dataset """kinships""" +128 34 model """convkb""" +128 34 loss """crossentropy""" +128 34 regularizer """no""" +128 34 optimizer """adam""" +128 34 training_loop """lcwa""" +128 34 evaluator """rankbased""" +128 35 dataset """kinships""" +128 35 model """convkb""" +128 35 loss """crossentropy""" +128 35 regularizer """no""" +128 35 optimizer """adam""" +128 35 training_loop """lcwa""" +128 35 evaluator """rankbased""" +128 36 dataset """kinships""" +128 36 model """convkb""" +128 36 loss """crossentropy""" +128 36 regularizer """no""" +128 36 optimizer """adam""" +128 36 training_loop """lcwa""" +128 36 evaluator """rankbased""" +128 37 dataset """kinships""" +128 37 model """convkb""" +128 37 loss """crossentropy""" +128 37 regularizer """no""" +128 37 optimizer """adam""" +128 37 training_loop """lcwa""" +128 37 evaluator """rankbased""" +128 38 dataset """kinships""" +128 38 model """convkb""" +128 38 loss """crossentropy""" +128 38 regularizer """no""" +128 38 optimizer """adam""" +128 38 training_loop """lcwa""" +128 38 evaluator """rankbased""" +128 39 dataset """kinships""" +128 39 model """convkb""" +128 39 loss """crossentropy""" +128 39 regularizer """no""" +128 39 optimizer """adam""" +128 39 training_loop """lcwa""" +128 39 evaluator """rankbased""" +128 40 dataset """kinships""" +128 40 model """convkb""" +128 40 loss """crossentropy""" +128 40 regularizer """no""" +128 40 optimizer """adam""" +128 40 training_loop """lcwa""" +128 40 evaluator """rankbased""" +128 41 dataset """kinships""" +128 41 model """convkb""" +128 41 loss """crossentropy""" +128 41 regularizer """no""" +128 41 optimizer """adam""" +128 41 training_loop """lcwa""" +128 41 evaluator """rankbased""" +129 1 model.embedding_dim 1.0 +129 1 model.hidden_dropout_rate 0.1711660108987445 +129 1 model.num_filters 9.0 +129 1 optimizer.lr 0.0015167479443813521 +129 1 training.batch_size 0.0 +129 1 training.label_smoothing 0.00999521467285175 +129 2 model.embedding_dim 1.0 +129 2 model.hidden_dropout_rate 0.3417089349013642 +129 2 model.num_filters 6.0 +129 2 optimizer.lr 0.012884113370365033 +129 2 training.batch_size 0.0 +129 2 training.label_smoothing 0.004229762618071372 +129 3 model.embedding_dim 2.0 +129 3 model.hidden_dropout_rate 0.3068206622639459 +129 3 model.num_filters 8.0 +129 3 optimizer.lr 0.018928987965951855 +129 3 training.batch_size 2.0 +129 3 training.label_smoothing 0.6445105521834171 +129 4 model.embedding_dim 1.0 +129 4 model.hidden_dropout_rate 0.1971679952337742 +129 4 model.num_filters 2.0 +129 4 optimizer.lr 0.045550328672353535 +129 4 training.batch_size 2.0 +129 4 training.label_smoothing 0.03605479059427176 +129 5 model.embedding_dim 0.0 +129 5 model.hidden_dropout_rate 0.20929986698128983 +129 5 model.num_filters 0.0 +129 5 optimizer.lr 0.025852168469085373 +129 5 training.batch_size 0.0 +129 5 training.label_smoothing 0.017085819368908604 +129 6 model.embedding_dim 1.0 +129 6 model.hidden_dropout_rate 0.11018140349286028 +129 6 model.num_filters 6.0 +129 6 optimizer.lr 0.013666304250724183 +129 6 training.batch_size 2.0 +129 6 training.label_smoothing 0.15113900397048324 +129 7 model.embedding_dim 1.0 +129 7 model.hidden_dropout_rate 0.2660605976839794 +129 7 model.num_filters 1.0 +129 7 optimizer.lr 0.004328526684335822 +129 7 training.batch_size 2.0 +129 7 training.label_smoothing 0.017748819515434187 +129 8 model.embedding_dim 1.0 +129 8 model.hidden_dropout_rate 0.17553796951419365 +129 8 model.num_filters 2.0 +129 8 optimizer.lr 0.001085041384465145 +129 8 training.batch_size 0.0 +129 8 training.label_smoothing 0.4759472173262428 +129 9 model.embedding_dim 2.0 +129 9 model.hidden_dropout_rate 0.30442304946787446 +129 9 model.num_filters 7.0 +129 9 optimizer.lr 0.0364583484854928 +129 9 training.batch_size 1.0 +129 9 training.label_smoothing 0.566581938552528 +129 10 model.embedding_dim 1.0 +129 10 model.hidden_dropout_rate 0.4883891332212825 +129 10 model.num_filters 8.0 +129 10 optimizer.lr 0.002929401147072493 +129 10 training.batch_size 2.0 +129 10 training.label_smoothing 0.004112426191717446 +129 11 model.embedding_dim 2.0 +129 11 model.hidden_dropout_rate 0.47793708303643945 +129 11 model.num_filters 1.0 +129 11 optimizer.lr 0.014453245642918666 +129 11 training.batch_size 1.0 +129 11 training.label_smoothing 0.5998748898247049 +129 12 model.embedding_dim 1.0 +129 12 model.hidden_dropout_rate 0.30073513547417585 +129 12 model.num_filters 8.0 +129 12 optimizer.lr 0.004301503930866238 +129 12 training.batch_size 1.0 +129 12 training.label_smoothing 0.0015021212691243677 +129 13 model.embedding_dim 2.0 +129 13 model.hidden_dropout_rate 0.3289831233315688 +129 13 model.num_filters 9.0 +129 13 optimizer.lr 0.04585273859763939 +129 13 training.batch_size 2.0 +129 13 training.label_smoothing 0.07331716852441988 +129 14 model.embedding_dim 1.0 +129 14 model.hidden_dropout_rate 0.46062346439556445 +129 14 model.num_filters 2.0 +129 14 optimizer.lr 0.04729735539788695 +129 14 training.batch_size 1.0 +129 14 training.label_smoothing 0.7291472725925889 +129 15 model.embedding_dim 0.0 +129 15 model.hidden_dropout_rate 0.47499867970945375 +129 15 model.num_filters 0.0 +129 15 optimizer.lr 0.001058549055498343 +129 15 training.batch_size 0.0 +129 15 training.label_smoothing 0.00859061983682421 +129 16 model.embedding_dim 0.0 +129 16 model.hidden_dropout_rate 0.23847069667821152 +129 16 model.num_filters 6.0 +129 16 optimizer.lr 0.022879903707423203 +129 16 training.batch_size 1.0 +129 16 training.label_smoothing 0.08398577282954867 +129 17 model.embedding_dim 2.0 +129 17 model.hidden_dropout_rate 0.34936025689488087 +129 17 model.num_filters 5.0 +129 17 optimizer.lr 0.013839329377255972 +129 17 training.batch_size 1.0 +129 17 training.label_smoothing 0.015651861205824597 +129 18 model.embedding_dim 1.0 +129 18 model.hidden_dropout_rate 0.16595479871597868 +129 18 model.num_filters 5.0 +129 18 optimizer.lr 0.007689791151692933 +129 18 training.batch_size 0.0 +129 18 training.label_smoothing 0.008002306404685489 +129 19 model.embedding_dim 2.0 +129 19 model.hidden_dropout_rate 0.15398382856950624 +129 19 model.num_filters 7.0 +129 19 optimizer.lr 0.023352556416504345 +129 19 training.batch_size 2.0 +129 19 training.label_smoothing 0.013776779905313763 +129 20 model.embedding_dim 2.0 +129 20 model.hidden_dropout_rate 0.4814516168560623 +129 20 model.num_filters 3.0 +129 20 optimizer.lr 0.001686245483384576 +129 20 training.batch_size 0.0 +129 20 training.label_smoothing 0.001256402207428319 +129 21 model.embedding_dim 2.0 +129 21 model.hidden_dropout_rate 0.14464923699689833 +129 21 model.num_filters 7.0 +129 21 optimizer.lr 0.026195588024011 +129 21 training.batch_size 0.0 +129 21 training.label_smoothing 0.04207338882149393 +129 22 model.embedding_dim 0.0 +129 22 model.hidden_dropout_rate 0.4508052138417017 +129 22 model.num_filters 9.0 +129 22 optimizer.lr 0.01921114736251509 +129 22 training.batch_size 2.0 +129 22 training.label_smoothing 0.20444685979740523 +129 23 model.embedding_dim 2.0 +129 23 model.hidden_dropout_rate 0.4517120573771378 +129 23 model.num_filters 2.0 +129 23 optimizer.lr 0.0021187436346938432 +129 23 training.batch_size 0.0 +129 23 training.label_smoothing 0.8456908630401333 +129 24 model.embedding_dim 1.0 +129 24 model.hidden_dropout_rate 0.16345155881508255 +129 24 model.num_filters 0.0 +129 24 optimizer.lr 0.09717963732457986 +129 24 training.batch_size 2.0 +129 24 training.label_smoothing 0.0012015125718976148 +129 25 model.embedding_dim 1.0 +129 25 model.hidden_dropout_rate 0.2173481073143123 +129 25 model.num_filters 2.0 +129 25 optimizer.lr 0.021561700154545452 +129 25 training.batch_size 0.0 +129 25 training.label_smoothing 0.0023551100769159832 +129 26 model.embedding_dim 0.0 +129 26 model.hidden_dropout_rate 0.3338327150906756 +129 26 model.num_filters 2.0 +129 26 optimizer.lr 0.015876469316583177 +129 26 training.batch_size 0.0 +129 26 training.label_smoothing 0.006709342962624742 +129 27 model.embedding_dim 0.0 +129 27 model.hidden_dropout_rate 0.3844422089641616 +129 27 model.num_filters 8.0 +129 27 optimizer.lr 0.0025769598977380638 +129 27 training.batch_size 1.0 +129 27 training.label_smoothing 0.04511427083153255 +129 28 model.embedding_dim 1.0 +129 28 model.hidden_dropout_rate 0.3709212854438113 +129 28 model.num_filters 9.0 +129 28 optimizer.lr 0.014448887017609586 +129 28 training.batch_size 1.0 +129 28 training.label_smoothing 0.17912873823776104 +129 29 model.embedding_dim 0.0 +129 29 model.hidden_dropout_rate 0.17143918706822409 +129 29 model.num_filters 7.0 +129 29 optimizer.lr 0.014200358549208949 +129 29 training.batch_size 1.0 +129 29 training.label_smoothing 0.20892732383893683 +129 30 model.embedding_dim 0.0 +129 30 model.hidden_dropout_rate 0.13194653664491707 +129 30 model.num_filters 5.0 +129 30 optimizer.lr 0.03832606937636041 +129 30 training.batch_size 1.0 +129 30 training.label_smoothing 0.37646090832958085 +129 31 model.embedding_dim 2.0 +129 31 model.hidden_dropout_rate 0.2799382777699756 +129 31 model.num_filters 6.0 +129 31 optimizer.lr 0.04066313838691631 +129 31 training.batch_size 2.0 +129 31 training.label_smoothing 0.034352736124181556 +129 32 model.embedding_dim 0.0 +129 32 model.hidden_dropout_rate 0.2508728459854175 +129 32 model.num_filters 7.0 +129 32 optimizer.lr 0.010367932587526935 +129 32 training.batch_size 2.0 +129 32 training.label_smoothing 0.013931394058141009 +129 33 model.embedding_dim 2.0 +129 33 model.hidden_dropout_rate 0.4165097854302724 +129 33 model.num_filters 9.0 +129 33 optimizer.lr 0.0011426050363140866 +129 33 training.batch_size 0.0 +129 33 training.label_smoothing 0.10272333239404909 +129 34 model.embedding_dim 2.0 +129 34 model.hidden_dropout_rate 0.4222795357226685 +129 34 model.num_filters 1.0 +129 34 optimizer.lr 0.007928328625212753 +129 34 training.batch_size 2.0 +129 34 training.label_smoothing 0.0811939653557405 +129 35 model.embedding_dim 0.0 +129 35 model.hidden_dropout_rate 0.1183537655131946 +129 35 model.num_filters 1.0 +129 35 optimizer.lr 0.09573365272445584 +129 35 training.batch_size 2.0 +129 35 training.label_smoothing 0.0010149479452835279 +129 36 model.embedding_dim 2.0 +129 36 model.hidden_dropout_rate 0.1550606864797945 +129 36 model.num_filters 2.0 +129 36 optimizer.lr 0.015643197310261748 +129 36 training.batch_size 2.0 +129 36 training.label_smoothing 0.9458664422873981 +129 37 model.embedding_dim 1.0 +129 37 model.hidden_dropout_rate 0.2619717021794845 +129 37 model.num_filters 9.0 +129 37 optimizer.lr 0.06739062666497049 +129 37 training.batch_size 0.0 +129 37 training.label_smoothing 0.007260888205322454 +129 38 model.embedding_dim 2.0 +129 38 model.hidden_dropout_rate 0.4342564291679035 +129 38 model.num_filters 6.0 +129 38 optimizer.lr 0.04087925178485424 +129 38 training.batch_size 2.0 +129 38 training.label_smoothing 0.0015296780200017262 +129 39 model.embedding_dim 0.0 +129 39 model.hidden_dropout_rate 0.3620327402099097 +129 39 model.num_filters 1.0 +129 39 optimizer.lr 0.0049808447722303325 +129 39 training.batch_size 2.0 +129 39 training.label_smoothing 0.41689799561242896 +129 40 model.embedding_dim 0.0 +129 40 model.hidden_dropout_rate 0.22831797222200484 +129 40 model.num_filters 7.0 +129 40 optimizer.lr 0.007337121743989323 +129 40 training.batch_size 0.0 +129 40 training.label_smoothing 0.0034456174061750352 +129 41 model.embedding_dim 1.0 +129 41 model.hidden_dropout_rate 0.22322990150685984 +129 41 model.num_filters 4.0 +129 41 optimizer.lr 0.004185422577227561 +129 41 training.batch_size 1.0 +129 41 training.label_smoothing 0.302168105623587 +129 42 model.embedding_dim 0.0 +129 42 model.hidden_dropout_rate 0.19564787334171224 +129 42 model.num_filters 9.0 +129 42 optimizer.lr 0.00438748057389219 +129 42 training.batch_size 1.0 +129 42 training.label_smoothing 0.0015385167416331865 +129 43 model.embedding_dim 2.0 +129 43 model.hidden_dropout_rate 0.24348334732016969 +129 43 model.num_filters 8.0 +129 43 optimizer.lr 0.005827928823055237 +129 43 training.batch_size 0.0 +129 43 training.label_smoothing 0.0017262353101698648 +129 44 model.embedding_dim 0.0 +129 44 model.hidden_dropout_rate 0.43594750665087945 +129 44 model.num_filters 8.0 +129 44 optimizer.lr 0.0044705261140111 +129 44 training.batch_size 1.0 +129 44 training.label_smoothing 0.002655783698392217 +129 45 model.embedding_dim 1.0 +129 45 model.hidden_dropout_rate 0.3702522914939572 +129 45 model.num_filters 0.0 +129 45 optimizer.lr 0.05325765722668091 +129 45 training.batch_size 1.0 +129 45 training.label_smoothing 0.006482352613904275 +129 46 model.embedding_dim 2.0 +129 46 model.hidden_dropout_rate 0.41027118746102875 +129 46 model.num_filters 5.0 +129 46 optimizer.lr 0.01827694332054177 +129 46 training.batch_size 0.0 +129 46 training.label_smoothing 0.0028092734088078796 +129 47 model.embedding_dim 1.0 +129 47 model.hidden_dropout_rate 0.4540371140043411 +129 47 model.num_filters 1.0 +129 47 optimizer.lr 0.002552276875248662 +129 47 training.batch_size 1.0 +129 47 training.label_smoothing 0.41955215251069516 +129 48 model.embedding_dim 0.0 +129 48 model.hidden_dropout_rate 0.11477187197253708 +129 48 model.num_filters 5.0 +129 48 optimizer.lr 0.03852445318656952 +129 48 training.batch_size 2.0 +129 48 training.label_smoothing 0.007734933959774214 +129 49 model.embedding_dim 0.0 +129 49 model.hidden_dropout_rate 0.25179704858590235 +129 49 model.num_filters 5.0 +129 49 optimizer.lr 0.0451323138590498 +129 49 training.batch_size 2.0 +129 49 training.label_smoothing 0.0017767535452163856 +129 50 model.embedding_dim 2.0 +129 50 model.hidden_dropout_rate 0.3332968568885043 +129 50 model.num_filters 8.0 +129 50 optimizer.lr 0.00417272916840445 +129 50 training.batch_size 0.0 +129 50 training.label_smoothing 0.02284547962550461 +129 51 model.embedding_dim 1.0 +129 51 model.hidden_dropout_rate 0.19093237497197435 +129 51 model.num_filters 4.0 +129 51 optimizer.lr 0.029954006876172005 +129 51 training.batch_size 1.0 +129 51 training.label_smoothing 0.07697459324786068 +129 52 model.embedding_dim 2.0 +129 52 model.hidden_dropout_rate 0.17613375546174118 +129 52 model.num_filters 3.0 +129 52 optimizer.lr 0.0017359787797301513 +129 52 training.batch_size 2.0 +129 52 training.label_smoothing 0.0022071494854135516 +129 53 model.embedding_dim 2.0 +129 53 model.hidden_dropout_rate 0.45190509120664635 +129 53 model.num_filters 0.0 +129 53 optimizer.lr 0.0012990002025323926 +129 53 training.batch_size 1.0 +129 53 training.label_smoothing 0.014819979841812127 +129 54 model.embedding_dim 1.0 +129 54 model.hidden_dropout_rate 0.1215295016573048 +129 54 model.num_filters 0.0 +129 54 optimizer.lr 0.057176664097011574 +129 54 training.batch_size 0.0 +129 54 training.label_smoothing 0.01142876581135671 +129 55 model.embedding_dim 2.0 +129 55 model.hidden_dropout_rate 0.3143808842474689 +129 55 model.num_filters 5.0 +129 55 optimizer.lr 0.007658764151350332 +129 55 training.batch_size 0.0 +129 55 training.label_smoothing 0.029466764665644456 +129 1 dataset """kinships""" +129 1 model """convkb""" +129 1 loss """crossentropy""" +129 1 regularizer """no""" +129 1 optimizer """adam""" +129 1 training_loop """lcwa""" +129 1 evaluator """rankbased""" +129 2 dataset """kinships""" +129 2 model """convkb""" +129 2 loss """crossentropy""" +129 2 regularizer """no""" +129 2 optimizer """adam""" +129 2 training_loop """lcwa""" +129 2 evaluator """rankbased""" +129 3 dataset """kinships""" +129 3 model """convkb""" +129 3 loss """crossentropy""" +129 3 regularizer """no""" +129 3 optimizer """adam""" +129 3 training_loop """lcwa""" +129 3 evaluator """rankbased""" +129 4 dataset """kinships""" +129 4 model """convkb""" +129 4 loss """crossentropy""" +129 4 regularizer """no""" +129 4 optimizer """adam""" +129 4 training_loop """lcwa""" +129 4 evaluator """rankbased""" +129 5 dataset """kinships""" +129 5 model """convkb""" +129 5 loss """crossentropy""" +129 5 regularizer """no""" +129 5 optimizer """adam""" +129 5 training_loop """lcwa""" +129 5 evaluator """rankbased""" +129 6 dataset """kinships""" +129 6 model """convkb""" +129 6 loss """crossentropy""" +129 6 regularizer """no""" +129 6 optimizer """adam""" +129 6 training_loop """lcwa""" +129 6 evaluator """rankbased""" +129 7 dataset """kinships""" +129 7 model """convkb""" +129 7 loss """crossentropy""" +129 7 regularizer """no""" +129 7 optimizer """adam""" +129 7 training_loop """lcwa""" +129 7 evaluator """rankbased""" +129 8 dataset """kinships""" +129 8 model """convkb""" +129 8 loss """crossentropy""" +129 8 regularizer """no""" +129 8 optimizer """adam""" +129 8 training_loop """lcwa""" +129 8 evaluator """rankbased""" +129 9 dataset """kinships""" +129 9 model """convkb""" +129 9 loss """crossentropy""" +129 9 regularizer """no""" +129 9 optimizer """adam""" +129 9 training_loop """lcwa""" +129 9 evaluator """rankbased""" +129 10 dataset """kinships""" +129 10 model """convkb""" +129 10 loss """crossentropy""" +129 10 regularizer """no""" +129 10 optimizer """adam""" +129 10 training_loop """lcwa""" +129 10 evaluator """rankbased""" +129 11 dataset """kinships""" +129 11 model """convkb""" +129 11 loss """crossentropy""" +129 11 regularizer """no""" +129 11 optimizer """adam""" +129 11 training_loop """lcwa""" +129 11 evaluator """rankbased""" +129 12 dataset """kinships""" +129 12 model """convkb""" +129 12 loss """crossentropy""" +129 12 regularizer """no""" +129 12 optimizer """adam""" +129 12 training_loop """lcwa""" +129 12 evaluator """rankbased""" +129 13 dataset """kinships""" +129 13 model """convkb""" +129 13 loss """crossentropy""" +129 13 regularizer """no""" +129 13 optimizer """adam""" +129 13 training_loop """lcwa""" +129 13 evaluator """rankbased""" +129 14 dataset """kinships""" +129 14 model """convkb""" +129 14 loss """crossentropy""" +129 14 regularizer """no""" +129 14 optimizer """adam""" +129 14 training_loop """lcwa""" +129 14 evaluator """rankbased""" +129 15 dataset """kinships""" +129 15 model """convkb""" +129 15 loss """crossentropy""" +129 15 regularizer """no""" +129 15 optimizer """adam""" +129 15 training_loop """lcwa""" +129 15 evaluator """rankbased""" +129 16 dataset """kinships""" +129 16 model """convkb""" +129 16 loss """crossentropy""" +129 16 regularizer """no""" +129 16 optimizer """adam""" +129 16 training_loop """lcwa""" +129 16 evaluator """rankbased""" +129 17 dataset """kinships""" +129 17 model """convkb""" +129 17 loss """crossentropy""" +129 17 regularizer """no""" +129 17 optimizer """adam""" +129 17 training_loop """lcwa""" +129 17 evaluator """rankbased""" +129 18 dataset """kinships""" +129 18 model """convkb""" +129 18 loss """crossentropy""" +129 18 regularizer """no""" +129 18 optimizer """adam""" +129 18 training_loop """lcwa""" +129 18 evaluator """rankbased""" +129 19 dataset """kinships""" +129 19 model """convkb""" +129 19 loss """crossentropy""" +129 19 regularizer """no""" +129 19 optimizer """adam""" +129 19 training_loop """lcwa""" +129 19 evaluator """rankbased""" +129 20 dataset """kinships""" +129 20 model """convkb""" +129 20 loss """crossentropy""" +129 20 regularizer """no""" +129 20 optimizer """adam""" +129 20 training_loop """lcwa""" +129 20 evaluator """rankbased""" +129 21 dataset """kinships""" +129 21 model """convkb""" +129 21 loss """crossentropy""" +129 21 regularizer """no""" +129 21 optimizer """adam""" +129 21 training_loop """lcwa""" +129 21 evaluator """rankbased""" +129 22 dataset """kinships""" +129 22 model """convkb""" +129 22 loss """crossentropy""" +129 22 regularizer """no""" +129 22 optimizer """adam""" +129 22 training_loop """lcwa""" +129 22 evaluator """rankbased""" +129 23 dataset """kinships""" +129 23 model """convkb""" +129 23 loss """crossentropy""" +129 23 regularizer """no""" +129 23 optimizer """adam""" +129 23 training_loop """lcwa""" +129 23 evaluator """rankbased""" +129 24 dataset """kinships""" +129 24 model """convkb""" +129 24 loss """crossentropy""" +129 24 regularizer """no""" +129 24 optimizer """adam""" +129 24 training_loop """lcwa""" +129 24 evaluator """rankbased""" +129 25 dataset """kinships""" +129 25 model """convkb""" +129 25 loss """crossentropy""" +129 25 regularizer """no""" +129 25 optimizer """adam""" +129 25 training_loop """lcwa""" +129 25 evaluator """rankbased""" +129 26 dataset """kinships""" +129 26 model """convkb""" +129 26 loss """crossentropy""" +129 26 regularizer """no""" +129 26 optimizer """adam""" +129 26 training_loop """lcwa""" +129 26 evaluator """rankbased""" +129 27 dataset """kinships""" +129 27 model """convkb""" +129 27 loss """crossentropy""" +129 27 regularizer """no""" +129 27 optimizer """adam""" +129 27 training_loop """lcwa""" +129 27 evaluator """rankbased""" +129 28 dataset """kinships""" +129 28 model """convkb""" +129 28 loss """crossentropy""" +129 28 regularizer """no""" +129 28 optimizer """adam""" +129 28 training_loop """lcwa""" +129 28 evaluator """rankbased""" +129 29 dataset """kinships""" +129 29 model """convkb""" +129 29 loss """crossentropy""" +129 29 regularizer """no""" +129 29 optimizer """adam""" +129 29 training_loop """lcwa""" +129 29 evaluator """rankbased""" +129 30 dataset """kinships""" +129 30 model """convkb""" +129 30 loss """crossentropy""" +129 30 regularizer """no""" +129 30 optimizer """adam""" +129 30 training_loop """lcwa""" +129 30 evaluator """rankbased""" +129 31 dataset """kinships""" +129 31 model """convkb""" +129 31 loss """crossentropy""" +129 31 regularizer """no""" +129 31 optimizer """adam""" +129 31 training_loop """lcwa""" +129 31 evaluator """rankbased""" +129 32 dataset """kinships""" +129 32 model """convkb""" +129 32 loss """crossentropy""" +129 32 regularizer """no""" +129 32 optimizer """adam""" +129 32 training_loop """lcwa""" +129 32 evaluator """rankbased""" +129 33 dataset """kinships""" +129 33 model """convkb""" +129 33 loss """crossentropy""" +129 33 regularizer """no""" +129 33 optimizer """adam""" +129 33 training_loop """lcwa""" +129 33 evaluator """rankbased""" +129 34 dataset """kinships""" +129 34 model """convkb""" +129 34 loss """crossentropy""" +129 34 regularizer """no""" +129 34 optimizer """adam""" +129 34 training_loop """lcwa""" +129 34 evaluator """rankbased""" +129 35 dataset """kinships""" +129 35 model """convkb""" +129 35 loss """crossentropy""" +129 35 regularizer """no""" +129 35 optimizer """adam""" +129 35 training_loop """lcwa""" +129 35 evaluator """rankbased""" +129 36 dataset """kinships""" +129 36 model """convkb""" +129 36 loss """crossentropy""" +129 36 regularizer """no""" +129 36 optimizer """adam""" +129 36 training_loop """lcwa""" +129 36 evaluator """rankbased""" +129 37 dataset """kinships""" +129 37 model """convkb""" +129 37 loss """crossentropy""" +129 37 regularizer """no""" +129 37 optimizer """adam""" +129 37 training_loop """lcwa""" +129 37 evaluator """rankbased""" +129 38 dataset """kinships""" +129 38 model """convkb""" +129 38 loss """crossentropy""" +129 38 regularizer """no""" +129 38 optimizer """adam""" +129 38 training_loop """lcwa""" +129 38 evaluator """rankbased""" +129 39 dataset """kinships""" +129 39 model """convkb""" +129 39 loss """crossentropy""" +129 39 regularizer """no""" +129 39 optimizer """adam""" +129 39 training_loop """lcwa""" +129 39 evaluator """rankbased""" +129 40 dataset """kinships""" +129 40 model """convkb""" +129 40 loss """crossentropy""" +129 40 regularizer """no""" +129 40 optimizer """adam""" +129 40 training_loop """lcwa""" +129 40 evaluator """rankbased""" +129 41 dataset """kinships""" +129 41 model """convkb""" +129 41 loss """crossentropy""" +129 41 regularizer """no""" +129 41 optimizer """adam""" +129 41 training_loop """lcwa""" +129 41 evaluator """rankbased""" +129 42 dataset """kinships""" +129 42 model """convkb""" +129 42 loss """crossentropy""" +129 42 regularizer """no""" +129 42 optimizer """adam""" +129 42 training_loop """lcwa""" +129 42 evaluator """rankbased""" +129 43 dataset """kinships""" +129 43 model """convkb""" +129 43 loss """crossentropy""" +129 43 regularizer """no""" +129 43 optimizer """adam""" +129 43 training_loop """lcwa""" +129 43 evaluator """rankbased""" +129 44 dataset """kinships""" +129 44 model """convkb""" +129 44 loss """crossentropy""" +129 44 regularizer """no""" +129 44 optimizer """adam""" +129 44 training_loop """lcwa""" +129 44 evaluator """rankbased""" +129 45 dataset """kinships""" +129 45 model """convkb""" +129 45 loss """crossentropy""" +129 45 regularizer """no""" +129 45 optimizer """adam""" +129 45 training_loop """lcwa""" +129 45 evaluator """rankbased""" +129 46 dataset """kinships""" +129 46 model """convkb""" +129 46 loss """crossentropy""" +129 46 regularizer """no""" +129 46 optimizer """adam""" +129 46 training_loop """lcwa""" +129 46 evaluator """rankbased""" +129 47 dataset """kinships""" +129 47 model """convkb""" +129 47 loss """crossentropy""" +129 47 regularizer """no""" +129 47 optimizer """adam""" +129 47 training_loop """lcwa""" +129 47 evaluator """rankbased""" +129 48 dataset """kinships""" +129 48 model """convkb""" +129 48 loss """crossentropy""" +129 48 regularizer """no""" +129 48 optimizer """adam""" +129 48 training_loop """lcwa""" +129 48 evaluator """rankbased""" +129 49 dataset """kinships""" +129 49 model """convkb""" +129 49 loss """crossentropy""" +129 49 regularizer """no""" +129 49 optimizer """adam""" +129 49 training_loop """lcwa""" +129 49 evaluator """rankbased""" +129 50 dataset """kinships""" +129 50 model """convkb""" +129 50 loss """crossentropy""" +129 50 regularizer """no""" +129 50 optimizer """adam""" +129 50 training_loop """lcwa""" +129 50 evaluator """rankbased""" +129 51 dataset """kinships""" +129 51 model """convkb""" +129 51 loss """crossentropy""" +129 51 regularizer """no""" +129 51 optimizer """adam""" +129 51 training_loop """lcwa""" +129 51 evaluator """rankbased""" +129 52 dataset """kinships""" +129 52 model """convkb""" +129 52 loss """crossentropy""" +129 52 regularizer """no""" +129 52 optimizer """adam""" +129 52 training_loop """lcwa""" +129 52 evaluator """rankbased""" +129 53 dataset """kinships""" +129 53 model """convkb""" +129 53 loss """crossentropy""" +129 53 regularizer """no""" +129 53 optimizer """adam""" +129 53 training_loop """lcwa""" +129 53 evaluator """rankbased""" +129 54 dataset """kinships""" +129 54 model """convkb""" +129 54 loss """crossentropy""" +129 54 regularizer """no""" +129 54 optimizer """adam""" +129 54 training_loop """lcwa""" +129 54 evaluator """rankbased""" +129 55 dataset """kinships""" +129 55 model """convkb""" +129 55 loss """crossentropy""" +129 55 regularizer """no""" +129 55 optimizer """adam""" +129 55 training_loop """lcwa""" +129 55 evaluator """rankbased""" +130 1 model.embedding_dim 1.0 +130 1 model.hidden_dropout_rate 0.44615960916597064 +130 1 model.num_filters 2.0 +130 1 optimizer.lr 0.03840324990534639 +130 1 training.batch_size 2.0 +130 1 training.label_smoothing 0.24237997503925346 +130 2 model.embedding_dim 0.0 +130 2 model.hidden_dropout_rate 0.41171670883620537 +130 2 model.num_filters 1.0 +130 2 optimizer.lr 0.0029056720902163186 +130 2 training.batch_size 1.0 +130 2 training.label_smoothing 0.7558393341402118 +130 3 model.embedding_dim 0.0 +130 3 model.hidden_dropout_rate 0.27039983858548855 +130 3 model.num_filters 1.0 +130 3 optimizer.lr 0.005824654637760303 +130 3 training.batch_size 1.0 +130 3 training.label_smoothing 0.5010491586603482 +130 4 model.embedding_dim 1.0 +130 4 model.hidden_dropout_rate 0.4544590740291351 +130 4 model.num_filters 8.0 +130 4 optimizer.lr 0.005325530094886234 +130 4 training.batch_size 2.0 +130 4 training.label_smoothing 0.26606159132214685 +130 5 model.embedding_dim 0.0 +130 5 model.hidden_dropout_rate 0.2365717414389486 +130 5 model.num_filters 5.0 +130 5 optimizer.lr 0.0031349861519648675 +130 5 training.batch_size 2.0 +130 5 training.label_smoothing 0.005811000640573722 +130 6 model.embedding_dim 2.0 +130 6 model.hidden_dropout_rate 0.39630324084832125 +130 6 model.num_filters 5.0 +130 6 optimizer.lr 0.005193909922173835 +130 6 training.batch_size 1.0 +130 6 training.label_smoothing 0.42281493831401024 +130 7 model.embedding_dim 0.0 +130 7 model.hidden_dropout_rate 0.3603874698191746 +130 7 model.num_filters 6.0 +130 7 optimizer.lr 0.08111975686540433 +130 7 training.batch_size 2.0 +130 7 training.label_smoothing 0.001619152043128877 +130 8 model.embedding_dim 0.0 +130 8 model.hidden_dropout_rate 0.1352406497470284 +130 8 model.num_filters 5.0 +130 8 optimizer.lr 0.008906457043064118 +130 8 training.batch_size 2.0 +130 8 training.label_smoothing 0.020584478354802625 +130 9 model.embedding_dim 1.0 +130 9 model.hidden_dropout_rate 0.21507516714714423 +130 9 model.num_filters 9.0 +130 9 optimizer.lr 0.016293470019222826 +130 9 training.batch_size 2.0 +130 9 training.label_smoothing 0.5438246375928067 +130 10 model.embedding_dim 2.0 +130 10 model.hidden_dropout_rate 0.2705916743374759 +130 10 model.num_filters 8.0 +130 10 optimizer.lr 0.031107226222941535 +130 10 training.batch_size 2.0 +130 10 training.label_smoothing 0.058353588949379434 +130 11 model.embedding_dim 1.0 +130 11 model.hidden_dropout_rate 0.14778756956279945 +130 11 model.num_filters 2.0 +130 11 optimizer.lr 0.00568381339292527 +130 11 training.batch_size 2.0 +130 11 training.label_smoothing 0.01881068182997191 +130 12 model.embedding_dim 1.0 +130 12 model.hidden_dropout_rate 0.24681443028279598 +130 12 model.num_filters 9.0 +130 12 optimizer.lr 0.02728835521608734 +130 12 training.batch_size 1.0 +130 12 training.label_smoothing 0.07520495560034363 +130 13 model.embedding_dim 0.0 +130 13 model.hidden_dropout_rate 0.22091565782318537 +130 13 model.num_filters 5.0 +130 13 optimizer.lr 0.0016859116311535134 +130 13 training.batch_size 1.0 +130 13 training.label_smoothing 0.011621546931150238 +130 14 model.embedding_dim 2.0 +130 14 model.hidden_dropout_rate 0.29767173817862924 +130 14 model.num_filters 5.0 +130 14 optimizer.lr 0.0036005491337295532 +130 14 training.batch_size 0.0 +130 14 training.label_smoothing 0.5348607502689396 +130 15 model.embedding_dim 2.0 +130 15 model.hidden_dropout_rate 0.38885060266289573 +130 15 model.num_filters 1.0 +130 15 optimizer.lr 0.0012277015260381346 +130 15 training.batch_size 0.0 +130 15 training.label_smoothing 0.003690571505423473 +130 16 model.embedding_dim 2.0 +130 16 model.hidden_dropout_rate 0.19410330496234238 +130 16 model.num_filters 5.0 +130 16 optimizer.lr 0.016915339634592422 +130 16 training.batch_size 0.0 +130 16 training.label_smoothing 0.02431854771706756 +130 17 model.embedding_dim 2.0 +130 17 model.hidden_dropout_rate 0.32515502987617834 +130 17 model.num_filters 2.0 +130 17 optimizer.lr 0.0662151626945612 +130 17 training.batch_size 2.0 +130 17 training.label_smoothing 0.29170483687364357 +130 18 model.embedding_dim 1.0 +130 18 model.hidden_dropout_rate 0.2050235354459209 +130 18 model.num_filters 3.0 +130 18 optimizer.lr 0.008471488345433929 +130 18 training.batch_size 2.0 +130 18 training.label_smoothing 0.003095518327525668 +130 19 model.embedding_dim 2.0 +130 19 model.hidden_dropout_rate 0.18799218959549469 +130 19 model.num_filters 0.0 +130 19 optimizer.lr 0.0013709841645963786 +130 19 training.batch_size 0.0 +130 19 training.label_smoothing 0.0038523377949364833 +130 20 model.embedding_dim 1.0 +130 20 model.hidden_dropout_rate 0.3783744370057628 +130 20 model.num_filters 9.0 +130 20 optimizer.lr 0.031588393227730924 +130 20 training.batch_size 1.0 +130 20 training.label_smoothing 0.09078503549143283 +130 21 model.embedding_dim 2.0 +130 21 model.hidden_dropout_rate 0.4097888057827185 +130 21 model.num_filters 7.0 +130 21 optimizer.lr 0.05612006535794924 +130 21 training.batch_size 0.0 +130 21 training.label_smoothing 0.30578068030970484 +130 22 model.embedding_dim 1.0 +130 22 model.hidden_dropout_rate 0.38888878956407913 +130 22 model.num_filters 3.0 +130 22 optimizer.lr 0.0710381283955943 +130 22 training.batch_size 2.0 +130 22 training.label_smoothing 0.037221107025063455 +130 23 model.embedding_dim 1.0 +130 23 model.hidden_dropout_rate 0.32683303442419726 +130 23 model.num_filters 9.0 +130 23 optimizer.lr 0.021643074402452955 +130 23 training.batch_size 1.0 +130 23 training.label_smoothing 0.07815601410447064 +130 24 model.embedding_dim 0.0 +130 24 model.hidden_dropout_rate 0.32488900170418766 +130 24 model.num_filters 0.0 +130 24 optimizer.lr 0.0028667532681469314 +130 24 training.batch_size 0.0 +130 24 training.label_smoothing 0.04154018515858614 +130 25 model.embedding_dim 2.0 +130 25 model.hidden_dropout_rate 0.4852098053154503 +130 25 model.num_filters 3.0 +130 25 optimizer.lr 0.08185894495187235 +130 25 training.batch_size 0.0 +130 25 training.label_smoothing 0.039557629887791296 +130 26 model.embedding_dim 0.0 +130 26 model.hidden_dropout_rate 0.3737123844604092 +130 26 model.num_filters 0.0 +130 26 optimizer.lr 0.012458487777109607 +130 26 training.batch_size 2.0 +130 26 training.label_smoothing 0.015457808893170564 +130 27 model.embedding_dim 1.0 +130 27 model.hidden_dropout_rate 0.13735949836428551 +130 27 model.num_filters 1.0 +130 27 optimizer.lr 0.04385733048972513 +130 27 training.batch_size 2.0 +130 27 training.label_smoothing 0.06111147036615909 +130 28 model.embedding_dim 0.0 +130 28 model.hidden_dropout_rate 0.25385628774310875 +130 28 model.num_filters 7.0 +130 28 optimizer.lr 0.0046170498768597105 +130 28 training.batch_size 0.0 +130 28 training.label_smoothing 0.052282660604829334 +130 29 model.embedding_dim 1.0 +130 29 model.hidden_dropout_rate 0.2459745747070349 +130 29 model.num_filters 8.0 +130 29 optimizer.lr 0.014448959224102807 +130 29 training.batch_size 1.0 +130 29 training.label_smoothing 0.008078034267690852 +130 30 model.embedding_dim 0.0 +130 30 model.hidden_dropout_rate 0.23024185781650366 +130 30 model.num_filters 8.0 +130 30 optimizer.lr 0.013389358834450965 +130 30 training.batch_size 0.0 +130 30 training.label_smoothing 0.002671943546050214 +130 31 model.embedding_dim 0.0 +130 31 model.hidden_dropout_rate 0.2948737886409356 +130 31 model.num_filters 9.0 +130 31 optimizer.lr 0.0010596588097684556 +130 31 training.batch_size 2.0 +130 31 training.label_smoothing 0.014043926062260743 +130 32 model.embedding_dim 0.0 +130 32 model.hidden_dropout_rate 0.1417061378585209 +130 32 model.num_filters 6.0 +130 32 optimizer.lr 0.028954500201748914 +130 32 training.batch_size 1.0 +130 32 training.label_smoothing 0.006633396060821057 +130 33 model.embedding_dim 0.0 +130 33 model.hidden_dropout_rate 0.48637680243269454 +130 33 model.num_filters 0.0 +130 33 optimizer.lr 0.002375710802294945 +130 33 training.batch_size 0.0 +130 33 training.label_smoothing 0.0011416635162570879 +130 34 model.embedding_dim 2.0 +130 34 model.hidden_dropout_rate 0.1565410656921753 +130 34 model.num_filters 1.0 +130 34 optimizer.lr 0.0029974727305875277 +130 34 training.batch_size 1.0 +130 34 training.label_smoothing 0.0016037844821158904 +130 35 model.embedding_dim 0.0 +130 35 model.hidden_dropout_rate 0.38930468249167594 +130 35 model.num_filters 8.0 +130 35 optimizer.lr 0.027041670961463305 +130 35 training.batch_size 1.0 +130 35 training.label_smoothing 0.27861302160601487 +130 36 model.embedding_dim 1.0 +130 36 model.hidden_dropout_rate 0.37371788681762563 +130 36 model.num_filters 3.0 +130 36 optimizer.lr 0.0023832182436547113 +130 36 training.batch_size 0.0 +130 36 training.label_smoothing 0.36905257379503154 +130 37 model.embedding_dim 1.0 +130 37 model.hidden_dropout_rate 0.28894453539004517 +130 37 model.num_filters 3.0 +130 37 optimizer.lr 0.04615298944029046 +130 37 training.batch_size 0.0 +130 37 training.label_smoothing 0.03507954884812195 +130 38 model.embedding_dim 0.0 +130 38 model.hidden_dropout_rate 0.3011725150063175 +130 38 model.num_filters 3.0 +130 38 optimizer.lr 0.03461631604780427 +130 38 training.batch_size 2.0 +130 38 training.label_smoothing 0.00552626321030768 +130 39 model.embedding_dim 1.0 +130 39 model.hidden_dropout_rate 0.3863291864476963 +130 39 model.num_filters 1.0 +130 39 optimizer.lr 0.013092823740619772 +130 39 training.batch_size 0.0 +130 39 training.label_smoothing 0.0010232882693576746 +130 40 model.embedding_dim 1.0 +130 40 model.hidden_dropout_rate 0.38796073605355597 +130 40 model.num_filters 1.0 +130 40 optimizer.lr 0.010899196905668658 +130 40 training.batch_size 1.0 +130 40 training.label_smoothing 0.22298887306424892 +130 41 model.embedding_dim 2.0 +130 41 model.hidden_dropout_rate 0.4779276099510791 +130 41 model.num_filters 6.0 +130 41 optimizer.lr 0.0030478519516784655 +130 41 training.batch_size 0.0 +130 41 training.label_smoothing 0.23829389009925908 +130 42 model.embedding_dim 2.0 +130 42 model.hidden_dropout_rate 0.24104081428842672 +130 42 model.num_filters 7.0 +130 42 optimizer.lr 0.049339539368589716 +130 42 training.batch_size 0.0 +130 42 training.label_smoothing 0.00913158998760373 +130 43 model.embedding_dim 1.0 +130 43 model.hidden_dropout_rate 0.44969717493385597 +130 43 model.num_filters 4.0 +130 43 optimizer.lr 0.08215108785781072 +130 43 training.batch_size 0.0 +130 43 training.label_smoothing 0.09531559357107149 +130 44 model.embedding_dim 1.0 +130 44 model.hidden_dropout_rate 0.33229076618090836 +130 44 model.num_filters 8.0 +130 44 optimizer.lr 0.018047804365440772 +130 44 training.batch_size 2.0 +130 44 training.label_smoothing 0.5742004754562177 +130 45 model.embedding_dim 1.0 +130 45 model.hidden_dropout_rate 0.47620198084710263 +130 45 model.num_filters 9.0 +130 45 optimizer.lr 0.0614521681596038 +130 45 training.batch_size 1.0 +130 45 training.label_smoothing 0.0037295517509877336 +130 46 model.embedding_dim 0.0 +130 46 model.hidden_dropout_rate 0.475410459936464 +130 46 model.num_filters 1.0 +130 46 optimizer.lr 0.026935005624712172 +130 46 training.batch_size 1.0 +130 46 training.label_smoothing 0.0017772733196731014 +130 47 model.embedding_dim 1.0 +130 47 model.hidden_dropout_rate 0.31249630322453203 +130 47 model.num_filters 8.0 +130 47 optimizer.lr 0.007542153178542622 +130 47 training.batch_size 2.0 +130 47 training.label_smoothing 0.009103129523342452 +130 48 model.embedding_dim 0.0 +130 48 model.hidden_dropout_rate 0.4110375328082112 +130 48 model.num_filters 6.0 +130 48 optimizer.lr 0.009008586222054788 +130 48 training.batch_size 1.0 +130 48 training.label_smoothing 0.9376640739878565 +130 49 model.embedding_dim 0.0 +130 49 model.hidden_dropout_rate 0.2028556869411 +130 49 model.num_filters 7.0 +130 49 optimizer.lr 0.012433460894186978 +130 49 training.batch_size 1.0 +130 49 training.label_smoothing 0.02968847610138762 +130 50 model.embedding_dim 1.0 +130 50 model.hidden_dropout_rate 0.4657427641094718 +130 50 model.num_filters 5.0 +130 50 optimizer.lr 0.0208027871226924 +130 50 training.batch_size 0.0 +130 50 training.label_smoothing 0.9489315121703337 +130 51 model.embedding_dim 1.0 +130 51 model.hidden_dropout_rate 0.12879397808837725 +130 51 model.num_filters 2.0 +130 51 optimizer.lr 0.07091270587544876 +130 51 training.batch_size 2.0 +130 51 training.label_smoothing 0.0027042996980770916 +130 52 model.embedding_dim 2.0 +130 52 model.hidden_dropout_rate 0.24846023838868422 +130 52 model.num_filters 4.0 +130 52 optimizer.lr 0.005581693748583862 +130 52 training.batch_size 1.0 +130 52 training.label_smoothing 0.0789148976532988 +130 53 model.embedding_dim 2.0 +130 53 model.hidden_dropout_rate 0.4425214038034535 +130 53 model.num_filters 5.0 +130 53 optimizer.lr 0.0012627398371098375 +130 53 training.batch_size 2.0 +130 53 training.label_smoothing 0.15991113683780875 +130 54 model.embedding_dim 0.0 +130 54 model.hidden_dropout_rate 0.2164286809740862 +130 54 model.num_filters 1.0 +130 54 optimizer.lr 0.06792507152733819 +130 54 training.batch_size 2.0 +130 54 training.label_smoothing 0.3346222905349846 +130 55 model.embedding_dim 0.0 +130 55 model.hidden_dropout_rate 0.4100206770804393 +130 55 model.num_filters 0.0 +130 55 optimizer.lr 0.03277783478204172 +130 55 training.batch_size 1.0 +130 55 training.label_smoothing 0.10940704369306234 +130 56 model.embedding_dim 1.0 +130 56 model.hidden_dropout_rate 0.334458908483543 +130 56 model.num_filters 3.0 +130 56 optimizer.lr 0.003454806981452737 +130 56 training.batch_size 1.0 +130 56 training.label_smoothing 0.04405282958481936 +130 57 model.embedding_dim 1.0 +130 57 model.hidden_dropout_rate 0.1781120224514001 +130 57 model.num_filters 5.0 +130 57 optimizer.lr 0.04601291591667838 +130 57 training.batch_size 0.0 +130 57 training.label_smoothing 0.026993435837792172 +130 58 model.embedding_dim 1.0 +130 58 model.hidden_dropout_rate 0.4274517834505366 +130 58 model.num_filters 3.0 +130 58 optimizer.lr 0.006056153580742726 +130 58 training.batch_size 0.0 +130 58 training.label_smoothing 0.005380019892268377 +130 59 model.embedding_dim 0.0 +130 59 model.hidden_dropout_rate 0.24821796526450457 +130 59 model.num_filters 9.0 +130 59 optimizer.lr 0.0032209316876366093 +130 59 training.batch_size 1.0 +130 59 training.label_smoothing 0.013962003302868415 +130 60 model.embedding_dim 1.0 +130 60 model.hidden_dropout_rate 0.4710997335833077 +130 60 model.num_filters 0.0 +130 60 optimizer.lr 0.001312767945630298 +130 60 training.batch_size 1.0 +130 60 training.label_smoothing 0.18133000200248406 +130 61 model.embedding_dim 1.0 +130 61 model.hidden_dropout_rate 0.46189235574039256 +130 61 model.num_filters 7.0 +130 61 optimizer.lr 0.0031683457719278757 +130 61 training.batch_size 0.0 +130 61 training.label_smoothing 0.02176238954473499 +130 62 model.embedding_dim 1.0 +130 62 model.hidden_dropout_rate 0.10444603267959135 +130 62 model.num_filters 8.0 +130 62 optimizer.lr 0.014374790207332285 +130 62 training.batch_size 2.0 +130 62 training.label_smoothing 0.024452361424021327 +130 63 model.embedding_dim 1.0 +130 63 model.hidden_dropout_rate 0.1655032928979387 +130 63 model.num_filters 9.0 +130 63 optimizer.lr 0.0023166705791719264 +130 63 training.batch_size 0.0 +130 63 training.label_smoothing 0.021890377375675767 +130 64 model.embedding_dim 1.0 +130 64 model.hidden_dropout_rate 0.41951785235780814 +130 64 model.num_filters 8.0 +130 64 optimizer.lr 0.005644117155929267 +130 64 training.batch_size 1.0 +130 64 training.label_smoothing 0.00734698858886238 +130 65 model.embedding_dim 1.0 +130 65 model.hidden_dropout_rate 0.47051166170947367 +130 65 model.num_filters 8.0 +130 65 optimizer.lr 0.004612625934339523 +130 65 training.batch_size 0.0 +130 65 training.label_smoothing 0.014827061430068079 +130 66 model.embedding_dim 0.0 +130 66 model.hidden_dropout_rate 0.31792336532277754 +130 66 model.num_filters 2.0 +130 66 optimizer.lr 0.03291270474024132 +130 66 training.batch_size 1.0 +130 66 training.label_smoothing 0.3376526667591875 +130 67 model.embedding_dim 0.0 +130 67 model.hidden_dropout_rate 0.2487839661911787 +130 67 model.num_filters 3.0 +130 67 optimizer.lr 0.0014308064396169459 +130 67 training.batch_size 1.0 +130 67 training.label_smoothing 0.9435860799455079 +130 68 model.embedding_dim 1.0 +130 68 model.hidden_dropout_rate 0.119137287090176 +130 68 model.num_filters 5.0 +130 68 optimizer.lr 0.00815155956441232 +130 68 training.batch_size 2.0 +130 68 training.label_smoothing 0.10100790926429823 +130 69 model.embedding_dim 0.0 +130 69 model.hidden_dropout_rate 0.4228381465953849 +130 69 model.num_filters 2.0 +130 69 optimizer.lr 0.027785392426235712 +130 69 training.batch_size 2.0 +130 69 training.label_smoothing 0.05426876139550616 +130 70 model.embedding_dim 2.0 +130 70 model.hidden_dropout_rate 0.3802988236456667 +130 70 model.num_filters 7.0 +130 70 optimizer.lr 0.014166350858774749 +130 70 training.batch_size 0.0 +130 70 training.label_smoothing 0.03783692110940017 +130 71 model.embedding_dim 0.0 +130 71 model.hidden_dropout_rate 0.12336864859668024 +130 71 model.num_filters 2.0 +130 71 optimizer.lr 0.0028201099525891346 +130 71 training.batch_size 0.0 +130 71 training.label_smoothing 0.001213455098173214 +130 72 model.embedding_dim 1.0 +130 72 model.hidden_dropout_rate 0.4234453384562614 +130 72 model.num_filters 0.0 +130 72 optimizer.lr 0.006711134156360611 +130 72 training.batch_size 0.0 +130 72 training.label_smoothing 0.003702934153208708 +130 73 model.embedding_dim 0.0 +130 73 model.hidden_dropout_rate 0.25300195453473884 +130 73 model.num_filters 7.0 +130 73 optimizer.lr 0.02411820189824925 +130 73 training.batch_size 0.0 +130 73 training.label_smoothing 0.22333039694318668 +130 74 model.embedding_dim 2.0 +130 74 model.hidden_dropout_rate 0.17170746311127977 +130 74 model.num_filters 0.0 +130 74 optimizer.lr 0.0011106382886404463 +130 74 training.batch_size 1.0 +130 74 training.label_smoothing 0.8475095337195111 +130 75 model.embedding_dim 2.0 +130 75 model.hidden_dropout_rate 0.2573759438716753 +130 75 model.num_filters 6.0 +130 75 optimizer.lr 0.006120721099511984 +130 75 training.batch_size 1.0 +130 75 training.label_smoothing 0.17347545971774045 +130 76 model.embedding_dim 0.0 +130 76 model.hidden_dropout_rate 0.25257288190655736 +130 76 model.num_filters 1.0 +130 76 optimizer.lr 0.00377943327397326 +130 76 training.batch_size 2.0 +130 76 training.label_smoothing 0.6462673877360812 +130 77 model.embedding_dim 0.0 +130 77 model.hidden_dropout_rate 0.24945631865686485 +130 77 model.num_filters 4.0 +130 77 optimizer.lr 0.017633552922741838 +130 77 training.batch_size 1.0 +130 77 training.label_smoothing 0.05194268003057068 +130 78 model.embedding_dim 0.0 +130 78 model.hidden_dropout_rate 0.38350238074642995 +130 78 model.num_filters 6.0 +130 78 optimizer.lr 0.002519779967000155 +130 78 training.batch_size 2.0 +130 78 training.label_smoothing 0.0014626443384258836 +130 1 dataset """kinships""" +130 1 model """convkb""" +130 1 loss """bceaftersigmoid""" +130 1 regularizer """no""" +130 1 optimizer """adam""" +130 1 training_loop """lcwa""" +130 1 evaluator """rankbased""" +130 2 dataset """kinships""" +130 2 model """convkb""" +130 2 loss """bceaftersigmoid""" +130 2 regularizer """no""" +130 2 optimizer """adam""" +130 2 training_loop """lcwa""" +130 2 evaluator """rankbased""" +130 3 dataset """kinships""" +130 3 model """convkb""" +130 3 loss """bceaftersigmoid""" +130 3 regularizer """no""" +130 3 optimizer """adam""" +130 3 training_loop """lcwa""" +130 3 evaluator """rankbased""" +130 4 dataset """kinships""" +130 4 model """convkb""" +130 4 loss """bceaftersigmoid""" +130 4 regularizer """no""" +130 4 optimizer """adam""" +130 4 training_loop """lcwa""" +130 4 evaluator """rankbased""" +130 5 dataset """kinships""" +130 5 model """convkb""" +130 5 loss """bceaftersigmoid""" +130 5 regularizer """no""" +130 5 optimizer """adam""" +130 5 training_loop """lcwa""" +130 5 evaluator """rankbased""" +130 6 dataset """kinships""" +130 6 model """convkb""" +130 6 loss """bceaftersigmoid""" +130 6 regularizer """no""" +130 6 optimizer """adam""" +130 6 training_loop """lcwa""" +130 6 evaluator """rankbased""" +130 7 dataset """kinships""" +130 7 model """convkb""" +130 7 loss """bceaftersigmoid""" +130 7 regularizer """no""" +130 7 optimizer """adam""" +130 7 training_loop """lcwa""" +130 7 evaluator """rankbased""" +130 8 dataset """kinships""" +130 8 model """convkb""" +130 8 loss """bceaftersigmoid""" +130 8 regularizer """no""" +130 8 optimizer """adam""" +130 8 training_loop """lcwa""" +130 8 evaluator """rankbased""" +130 9 dataset """kinships""" +130 9 model """convkb""" +130 9 loss """bceaftersigmoid""" +130 9 regularizer """no""" +130 9 optimizer """adam""" +130 9 training_loop """lcwa""" +130 9 evaluator """rankbased""" +130 10 dataset """kinships""" +130 10 model """convkb""" +130 10 loss """bceaftersigmoid""" +130 10 regularizer """no""" +130 10 optimizer """adam""" +130 10 training_loop """lcwa""" +130 10 evaluator """rankbased""" +130 11 dataset """kinships""" +130 11 model """convkb""" +130 11 loss """bceaftersigmoid""" +130 11 regularizer """no""" +130 11 optimizer """adam""" +130 11 training_loop """lcwa""" +130 11 evaluator """rankbased""" +130 12 dataset """kinships""" +130 12 model """convkb""" +130 12 loss """bceaftersigmoid""" +130 12 regularizer """no""" +130 12 optimizer """adam""" +130 12 training_loop """lcwa""" +130 12 evaluator """rankbased""" +130 13 dataset """kinships""" +130 13 model """convkb""" +130 13 loss """bceaftersigmoid""" +130 13 regularizer """no""" +130 13 optimizer """adam""" +130 13 training_loop """lcwa""" +130 13 evaluator """rankbased""" +130 14 dataset """kinships""" +130 14 model """convkb""" +130 14 loss """bceaftersigmoid""" +130 14 regularizer """no""" +130 14 optimizer """adam""" +130 14 training_loop """lcwa""" +130 14 evaluator """rankbased""" +130 15 dataset """kinships""" +130 15 model """convkb""" +130 15 loss """bceaftersigmoid""" +130 15 regularizer """no""" +130 15 optimizer """adam""" +130 15 training_loop """lcwa""" +130 15 evaluator """rankbased""" +130 16 dataset """kinships""" +130 16 model """convkb""" +130 16 loss """bceaftersigmoid""" +130 16 regularizer """no""" +130 16 optimizer """adam""" +130 16 training_loop """lcwa""" +130 16 evaluator """rankbased""" +130 17 dataset """kinships""" +130 17 model """convkb""" +130 17 loss """bceaftersigmoid""" +130 17 regularizer """no""" +130 17 optimizer """adam""" +130 17 training_loop """lcwa""" +130 17 evaluator """rankbased""" +130 18 dataset """kinships""" +130 18 model """convkb""" +130 18 loss """bceaftersigmoid""" +130 18 regularizer """no""" +130 18 optimizer """adam""" +130 18 training_loop """lcwa""" +130 18 evaluator """rankbased""" +130 19 dataset """kinships""" +130 19 model """convkb""" +130 19 loss """bceaftersigmoid""" +130 19 regularizer """no""" +130 19 optimizer """adam""" +130 19 training_loop """lcwa""" +130 19 evaluator """rankbased""" +130 20 dataset """kinships""" +130 20 model """convkb""" +130 20 loss """bceaftersigmoid""" +130 20 regularizer """no""" +130 20 optimizer """adam""" +130 20 training_loop """lcwa""" +130 20 evaluator """rankbased""" +130 21 dataset """kinships""" +130 21 model """convkb""" +130 21 loss """bceaftersigmoid""" +130 21 regularizer """no""" +130 21 optimizer """adam""" +130 21 training_loop """lcwa""" +130 21 evaluator """rankbased""" +130 22 dataset """kinships""" +130 22 model """convkb""" +130 22 loss """bceaftersigmoid""" +130 22 regularizer """no""" +130 22 optimizer """adam""" +130 22 training_loop """lcwa""" +130 22 evaluator """rankbased""" +130 23 dataset """kinships""" +130 23 model """convkb""" +130 23 loss """bceaftersigmoid""" +130 23 regularizer """no""" +130 23 optimizer """adam""" +130 23 training_loop """lcwa""" +130 23 evaluator """rankbased""" +130 24 dataset """kinships""" +130 24 model """convkb""" +130 24 loss """bceaftersigmoid""" +130 24 regularizer """no""" +130 24 optimizer """adam""" +130 24 training_loop """lcwa""" +130 24 evaluator """rankbased""" +130 25 dataset """kinships""" +130 25 model """convkb""" +130 25 loss """bceaftersigmoid""" +130 25 regularizer """no""" +130 25 optimizer """adam""" +130 25 training_loop """lcwa""" +130 25 evaluator """rankbased""" +130 26 dataset """kinships""" +130 26 model """convkb""" +130 26 loss """bceaftersigmoid""" +130 26 regularizer """no""" +130 26 optimizer """adam""" +130 26 training_loop """lcwa""" +130 26 evaluator """rankbased""" +130 27 dataset """kinships""" +130 27 model """convkb""" +130 27 loss """bceaftersigmoid""" +130 27 regularizer """no""" +130 27 optimizer """adam""" +130 27 training_loop """lcwa""" +130 27 evaluator """rankbased""" +130 28 dataset """kinships""" +130 28 model """convkb""" +130 28 loss """bceaftersigmoid""" +130 28 regularizer """no""" +130 28 optimizer """adam""" +130 28 training_loop """lcwa""" +130 28 evaluator """rankbased""" +130 29 dataset """kinships""" +130 29 model """convkb""" +130 29 loss """bceaftersigmoid""" +130 29 regularizer """no""" +130 29 optimizer """adam""" +130 29 training_loop """lcwa""" +130 29 evaluator """rankbased""" +130 30 dataset """kinships""" +130 30 model """convkb""" +130 30 loss """bceaftersigmoid""" +130 30 regularizer """no""" +130 30 optimizer """adam""" +130 30 training_loop """lcwa""" +130 30 evaluator """rankbased""" +130 31 dataset """kinships""" +130 31 model """convkb""" +130 31 loss """bceaftersigmoid""" +130 31 regularizer """no""" +130 31 optimizer """adam""" +130 31 training_loop """lcwa""" +130 31 evaluator """rankbased""" +130 32 dataset """kinships""" +130 32 model """convkb""" +130 32 loss """bceaftersigmoid""" +130 32 regularizer """no""" +130 32 optimizer """adam""" +130 32 training_loop """lcwa""" +130 32 evaluator """rankbased""" +130 33 dataset """kinships""" +130 33 model """convkb""" +130 33 loss """bceaftersigmoid""" +130 33 regularizer """no""" +130 33 optimizer """adam""" +130 33 training_loop """lcwa""" +130 33 evaluator """rankbased""" +130 34 dataset """kinships""" +130 34 model """convkb""" +130 34 loss """bceaftersigmoid""" +130 34 regularizer """no""" +130 34 optimizer """adam""" +130 34 training_loop """lcwa""" +130 34 evaluator """rankbased""" +130 35 dataset """kinships""" +130 35 model """convkb""" +130 35 loss """bceaftersigmoid""" +130 35 regularizer """no""" +130 35 optimizer """adam""" +130 35 training_loop """lcwa""" +130 35 evaluator """rankbased""" +130 36 dataset """kinships""" +130 36 model """convkb""" +130 36 loss """bceaftersigmoid""" +130 36 regularizer """no""" +130 36 optimizer """adam""" +130 36 training_loop """lcwa""" +130 36 evaluator """rankbased""" +130 37 dataset """kinships""" +130 37 model """convkb""" +130 37 loss """bceaftersigmoid""" +130 37 regularizer """no""" +130 37 optimizer """adam""" +130 37 training_loop """lcwa""" +130 37 evaluator """rankbased""" +130 38 dataset """kinships""" +130 38 model """convkb""" +130 38 loss """bceaftersigmoid""" +130 38 regularizer """no""" +130 38 optimizer """adam""" +130 38 training_loop """lcwa""" +130 38 evaluator """rankbased""" +130 39 dataset """kinships""" +130 39 model """convkb""" +130 39 loss """bceaftersigmoid""" +130 39 regularizer """no""" +130 39 optimizer """adam""" +130 39 training_loop """lcwa""" +130 39 evaluator """rankbased""" +130 40 dataset """kinships""" +130 40 model """convkb""" +130 40 loss """bceaftersigmoid""" +130 40 regularizer """no""" +130 40 optimizer """adam""" +130 40 training_loop """lcwa""" +130 40 evaluator """rankbased""" +130 41 dataset """kinships""" +130 41 model """convkb""" +130 41 loss """bceaftersigmoid""" +130 41 regularizer """no""" +130 41 optimizer """adam""" +130 41 training_loop """lcwa""" +130 41 evaluator """rankbased""" +130 42 dataset """kinships""" +130 42 model """convkb""" +130 42 loss """bceaftersigmoid""" +130 42 regularizer """no""" +130 42 optimizer """adam""" +130 42 training_loop """lcwa""" +130 42 evaluator """rankbased""" +130 43 dataset """kinships""" +130 43 model """convkb""" +130 43 loss """bceaftersigmoid""" +130 43 regularizer """no""" +130 43 optimizer """adam""" +130 43 training_loop """lcwa""" +130 43 evaluator """rankbased""" +130 44 dataset """kinships""" +130 44 model """convkb""" +130 44 loss """bceaftersigmoid""" +130 44 regularizer """no""" +130 44 optimizer """adam""" +130 44 training_loop """lcwa""" +130 44 evaluator """rankbased""" +130 45 dataset """kinships""" +130 45 model """convkb""" +130 45 loss """bceaftersigmoid""" +130 45 regularizer """no""" +130 45 optimizer """adam""" +130 45 training_loop """lcwa""" +130 45 evaluator """rankbased""" +130 46 dataset """kinships""" +130 46 model """convkb""" +130 46 loss """bceaftersigmoid""" +130 46 regularizer """no""" +130 46 optimizer """adam""" +130 46 training_loop """lcwa""" +130 46 evaluator """rankbased""" +130 47 dataset """kinships""" +130 47 model """convkb""" +130 47 loss """bceaftersigmoid""" +130 47 regularizer """no""" +130 47 optimizer """adam""" +130 47 training_loop """lcwa""" +130 47 evaluator """rankbased""" +130 48 dataset """kinships""" +130 48 model """convkb""" +130 48 loss """bceaftersigmoid""" +130 48 regularizer """no""" +130 48 optimizer """adam""" +130 48 training_loop """lcwa""" +130 48 evaluator """rankbased""" +130 49 dataset """kinships""" +130 49 model """convkb""" +130 49 loss """bceaftersigmoid""" +130 49 regularizer """no""" +130 49 optimizer """adam""" +130 49 training_loop """lcwa""" +130 49 evaluator """rankbased""" +130 50 dataset """kinships""" +130 50 model """convkb""" +130 50 loss """bceaftersigmoid""" +130 50 regularizer """no""" +130 50 optimizer """adam""" +130 50 training_loop """lcwa""" +130 50 evaluator """rankbased""" +130 51 dataset """kinships""" +130 51 model """convkb""" +130 51 loss """bceaftersigmoid""" +130 51 regularizer """no""" +130 51 optimizer """adam""" +130 51 training_loop """lcwa""" +130 51 evaluator """rankbased""" +130 52 dataset """kinships""" +130 52 model """convkb""" +130 52 loss """bceaftersigmoid""" +130 52 regularizer """no""" +130 52 optimizer """adam""" +130 52 training_loop """lcwa""" +130 52 evaluator """rankbased""" +130 53 dataset """kinships""" +130 53 model """convkb""" +130 53 loss """bceaftersigmoid""" +130 53 regularizer """no""" +130 53 optimizer """adam""" +130 53 training_loop """lcwa""" +130 53 evaluator """rankbased""" +130 54 dataset """kinships""" +130 54 model """convkb""" +130 54 loss """bceaftersigmoid""" +130 54 regularizer """no""" +130 54 optimizer """adam""" +130 54 training_loop """lcwa""" +130 54 evaluator """rankbased""" +130 55 dataset """kinships""" +130 55 model """convkb""" +130 55 loss """bceaftersigmoid""" +130 55 regularizer """no""" +130 55 optimizer """adam""" +130 55 training_loop """lcwa""" +130 55 evaluator """rankbased""" +130 56 dataset """kinships""" +130 56 model """convkb""" +130 56 loss """bceaftersigmoid""" +130 56 regularizer """no""" +130 56 optimizer """adam""" +130 56 training_loop """lcwa""" +130 56 evaluator """rankbased""" +130 57 dataset """kinships""" +130 57 model """convkb""" +130 57 loss """bceaftersigmoid""" +130 57 regularizer """no""" +130 57 optimizer """adam""" +130 57 training_loop """lcwa""" +130 57 evaluator """rankbased""" +130 58 dataset """kinships""" +130 58 model """convkb""" +130 58 loss """bceaftersigmoid""" +130 58 regularizer """no""" +130 58 optimizer """adam""" +130 58 training_loop """lcwa""" +130 58 evaluator """rankbased""" +130 59 dataset """kinships""" +130 59 model """convkb""" +130 59 loss """bceaftersigmoid""" +130 59 regularizer """no""" +130 59 optimizer """adam""" +130 59 training_loop """lcwa""" +130 59 evaluator """rankbased""" +130 60 dataset """kinships""" +130 60 model """convkb""" +130 60 loss """bceaftersigmoid""" +130 60 regularizer """no""" +130 60 optimizer """adam""" +130 60 training_loop """lcwa""" +130 60 evaluator """rankbased""" +130 61 dataset """kinships""" +130 61 model """convkb""" +130 61 loss """bceaftersigmoid""" +130 61 regularizer """no""" +130 61 optimizer """adam""" +130 61 training_loop """lcwa""" +130 61 evaluator """rankbased""" +130 62 dataset """kinships""" +130 62 model """convkb""" +130 62 loss """bceaftersigmoid""" +130 62 regularizer """no""" +130 62 optimizer """adam""" +130 62 training_loop """lcwa""" +130 62 evaluator """rankbased""" +130 63 dataset """kinships""" +130 63 model """convkb""" +130 63 loss """bceaftersigmoid""" +130 63 regularizer """no""" +130 63 optimizer """adam""" +130 63 training_loop """lcwa""" +130 63 evaluator """rankbased""" +130 64 dataset """kinships""" +130 64 model """convkb""" +130 64 loss """bceaftersigmoid""" +130 64 regularizer """no""" +130 64 optimizer """adam""" +130 64 training_loop """lcwa""" +130 64 evaluator """rankbased""" +130 65 dataset """kinships""" +130 65 model """convkb""" +130 65 loss """bceaftersigmoid""" +130 65 regularizer """no""" +130 65 optimizer """adam""" +130 65 training_loop """lcwa""" +130 65 evaluator """rankbased""" +130 66 dataset """kinships""" +130 66 model """convkb""" +130 66 loss """bceaftersigmoid""" +130 66 regularizer """no""" +130 66 optimizer """adam""" +130 66 training_loop """lcwa""" +130 66 evaluator """rankbased""" +130 67 dataset """kinships""" +130 67 model """convkb""" +130 67 loss """bceaftersigmoid""" +130 67 regularizer """no""" +130 67 optimizer """adam""" +130 67 training_loop """lcwa""" +130 67 evaluator """rankbased""" +130 68 dataset """kinships""" +130 68 model """convkb""" +130 68 loss """bceaftersigmoid""" +130 68 regularizer """no""" +130 68 optimizer """adam""" +130 68 training_loop """lcwa""" +130 68 evaluator """rankbased""" +130 69 dataset """kinships""" +130 69 model """convkb""" +130 69 loss """bceaftersigmoid""" +130 69 regularizer """no""" +130 69 optimizer """adam""" +130 69 training_loop """lcwa""" +130 69 evaluator """rankbased""" +130 70 dataset """kinships""" +130 70 model """convkb""" +130 70 loss """bceaftersigmoid""" +130 70 regularizer """no""" +130 70 optimizer """adam""" +130 70 training_loop """lcwa""" +130 70 evaluator """rankbased""" +130 71 dataset """kinships""" +130 71 model """convkb""" +130 71 loss """bceaftersigmoid""" +130 71 regularizer """no""" +130 71 optimizer """adam""" +130 71 training_loop """lcwa""" +130 71 evaluator """rankbased""" +130 72 dataset """kinships""" +130 72 model """convkb""" +130 72 loss """bceaftersigmoid""" +130 72 regularizer """no""" +130 72 optimizer """adam""" +130 72 training_loop """lcwa""" +130 72 evaluator """rankbased""" +130 73 dataset """kinships""" +130 73 model """convkb""" +130 73 loss """bceaftersigmoid""" +130 73 regularizer """no""" +130 73 optimizer """adam""" +130 73 training_loop """lcwa""" +130 73 evaluator """rankbased""" +130 74 dataset """kinships""" +130 74 model """convkb""" +130 74 loss """bceaftersigmoid""" +130 74 regularizer """no""" +130 74 optimizer """adam""" +130 74 training_loop """lcwa""" +130 74 evaluator """rankbased""" +130 75 dataset """kinships""" +130 75 model """convkb""" +130 75 loss """bceaftersigmoid""" +130 75 regularizer """no""" +130 75 optimizer """adam""" +130 75 training_loop """lcwa""" +130 75 evaluator """rankbased""" +130 76 dataset """kinships""" +130 76 model """convkb""" +130 76 loss """bceaftersigmoid""" +130 76 regularizer """no""" +130 76 optimizer """adam""" +130 76 training_loop """lcwa""" +130 76 evaluator """rankbased""" +130 77 dataset """kinships""" +130 77 model """convkb""" +130 77 loss """bceaftersigmoid""" +130 77 regularizer """no""" +130 77 optimizer """adam""" +130 77 training_loop """lcwa""" +130 77 evaluator """rankbased""" +130 78 dataset """kinships""" +130 78 model """convkb""" +130 78 loss """bceaftersigmoid""" +130 78 regularizer """no""" +130 78 optimizer """adam""" +130 78 training_loop """lcwa""" +130 78 evaluator """rankbased""" +131 1 model.embedding_dim 0.0 +131 1 model.hidden_dropout_rate 0.4225692567305587 +131 1 model.num_filters 7.0 +131 1 optimizer.lr 0.04831499482521154 +131 1 training.batch_size 0.0 +131 1 training.label_smoothing 0.012581191160330041 +131 2 model.embedding_dim 2.0 +131 2 model.hidden_dropout_rate 0.46567361405714214 +131 2 model.num_filters 4.0 +131 2 optimizer.lr 0.008457909612192999 +131 2 training.batch_size 1.0 +131 2 training.label_smoothing 0.022329954012682934 +131 3 model.embedding_dim 1.0 +131 3 model.hidden_dropout_rate 0.37303085952953463 +131 3 model.num_filters 1.0 +131 3 optimizer.lr 0.01542434891291263 +131 3 training.batch_size 1.0 +131 3 training.label_smoothing 0.0025009113777631773 +131 4 model.embedding_dim 2.0 +131 4 model.hidden_dropout_rate 0.4339051659895956 +131 4 model.num_filters 7.0 +131 4 optimizer.lr 0.031503236530078915 +131 4 training.batch_size 2.0 +131 4 training.label_smoothing 0.026119171138287527 +131 5 model.embedding_dim 2.0 +131 5 model.hidden_dropout_rate 0.17495123012493866 +131 5 model.num_filters 0.0 +131 5 optimizer.lr 0.005720021114071863 +131 5 training.batch_size 1.0 +131 5 training.label_smoothing 0.0026556055158199593 +131 6 model.embedding_dim 0.0 +131 6 model.hidden_dropout_rate 0.4277041597910307 +131 6 model.num_filters 1.0 +131 6 optimizer.lr 0.022522235036748096 +131 6 training.batch_size 1.0 +131 6 training.label_smoothing 0.009067244444010093 +131 7 model.embedding_dim 1.0 +131 7 model.hidden_dropout_rate 0.39151941181079997 +131 7 model.num_filters 5.0 +131 7 optimizer.lr 0.0027480544071152068 +131 7 training.batch_size 1.0 +131 7 training.label_smoothing 0.19896964706395373 +131 8 model.embedding_dim 0.0 +131 8 model.hidden_dropout_rate 0.3642909470730783 +131 8 model.num_filters 8.0 +131 8 optimizer.lr 0.0036159719545729883 +131 8 training.batch_size 1.0 +131 8 training.label_smoothing 0.03272431367096035 +131 9 model.embedding_dim 1.0 +131 9 model.hidden_dropout_rate 0.39765870981169404 +131 9 model.num_filters 1.0 +131 9 optimizer.lr 0.008578561998897364 +131 9 training.batch_size 2.0 +131 9 training.label_smoothing 0.009691582204773768 +131 10 model.embedding_dim 2.0 +131 10 model.hidden_dropout_rate 0.40177130069311207 +131 10 model.num_filters 4.0 +131 10 optimizer.lr 0.052287491405345665 +131 10 training.batch_size 0.0 +131 10 training.label_smoothing 0.47868998012730635 +131 11 model.embedding_dim 1.0 +131 11 model.hidden_dropout_rate 0.26747785840789984 +131 11 model.num_filters 6.0 +131 11 optimizer.lr 0.049714684717649435 +131 11 training.batch_size 1.0 +131 11 training.label_smoothing 0.018012481345146922 +131 12 model.embedding_dim 1.0 +131 12 model.hidden_dropout_rate 0.4069950895384359 +131 12 model.num_filters 9.0 +131 12 optimizer.lr 0.007630361469807731 +131 12 training.batch_size 0.0 +131 12 training.label_smoothing 0.04471591369950539 +131 13 model.embedding_dim 0.0 +131 13 model.hidden_dropout_rate 0.29019481395432145 +131 13 model.num_filters 4.0 +131 13 optimizer.lr 0.014991495415215059 +131 13 training.batch_size 0.0 +131 13 training.label_smoothing 0.208479554025314 +131 14 model.embedding_dim 0.0 +131 14 model.hidden_dropout_rate 0.14352670992354155 +131 14 model.num_filters 9.0 +131 14 optimizer.lr 0.011765566216951141 +131 14 training.batch_size 1.0 +131 14 training.label_smoothing 0.2528331131838901 +131 15 model.embedding_dim 1.0 +131 15 model.hidden_dropout_rate 0.10606773390349788 +131 15 model.num_filters 5.0 +131 15 optimizer.lr 0.0709969218532699 +131 15 training.batch_size 1.0 +131 15 training.label_smoothing 0.002823090476286926 +131 16 model.embedding_dim 1.0 +131 16 model.hidden_dropout_rate 0.18638350742665144 +131 16 model.num_filters 9.0 +131 16 optimizer.lr 0.003742066025194958 +131 16 training.batch_size 2.0 +131 16 training.label_smoothing 0.49002009373896166 +131 17 model.embedding_dim 2.0 +131 17 model.hidden_dropout_rate 0.25031787968244357 +131 17 model.num_filters 2.0 +131 17 optimizer.lr 0.0047286659859269485 +131 17 training.batch_size 0.0 +131 17 training.label_smoothing 0.42228687596464354 +131 18 model.embedding_dim 0.0 +131 18 model.hidden_dropout_rate 0.3002212343067545 +131 18 model.num_filters 4.0 +131 18 optimizer.lr 0.0017834887020636017 +131 18 training.batch_size 1.0 +131 18 training.label_smoothing 0.0061326986384014695 +131 19 model.embedding_dim 2.0 +131 19 model.hidden_dropout_rate 0.45402597991504057 +131 19 model.num_filters 8.0 +131 19 optimizer.lr 0.004263852086141031 +131 19 training.batch_size 0.0 +131 19 training.label_smoothing 0.07729900093555503 +131 20 model.embedding_dim 1.0 +131 20 model.hidden_dropout_rate 0.364827265825025 +131 20 model.num_filters 8.0 +131 20 optimizer.lr 0.004481277195418237 +131 20 training.batch_size 0.0 +131 20 training.label_smoothing 0.002027113495354146 +131 21 model.embedding_dim 1.0 +131 21 model.hidden_dropout_rate 0.24190048931332186 +131 21 model.num_filters 9.0 +131 21 optimizer.lr 0.0014918851026018776 +131 21 training.batch_size 2.0 +131 21 training.label_smoothing 0.04345372651871797 +131 22 model.embedding_dim 2.0 +131 22 model.hidden_dropout_rate 0.4856435390816798 +131 22 model.num_filters 3.0 +131 22 optimizer.lr 0.005148772387834775 +131 22 training.batch_size 2.0 +131 22 training.label_smoothing 0.016628548508876573 +131 23 model.embedding_dim 1.0 +131 23 model.hidden_dropout_rate 0.16970245945561127 +131 23 model.num_filters 0.0 +131 23 optimizer.lr 0.046510251699013794 +131 23 training.batch_size 0.0 +131 23 training.label_smoothing 0.02466271036461952 +131 24 model.embedding_dim 2.0 +131 24 model.hidden_dropout_rate 0.29617282155043034 +131 24 model.num_filters 1.0 +131 24 optimizer.lr 0.007289900384163822 +131 24 training.batch_size 1.0 +131 24 training.label_smoothing 0.034336675995103545 +131 25 model.embedding_dim 0.0 +131 25 model.hidden_dropout_rate 0.26304305606905043 +131 25 model.num_filters 3.0 +131 25 optimizer.lr 0.007921880374361303 +131 25 training.batch_size 0.0 +131 25 training.label_smoothing 0.17353021871552274 +131 26 model.embedding_dim 0.0 +131 26 model.hidden_dropout_rate 0.21935955836173138 +131 26 model.num_filters 4.0 +131 26 optimizer.lr 0.011946384084301502 +131 26 training.batch_size 1.0 +131 26 training.label_smoothing 0.012851848564906686 +131 27 model.embedding_dim 2.0 +131 27 model.hidden_dropout_rate 0.2669911655338061 +131 27 model.num_filters 4.0 +131 27 optimizer.lr 0.058632368059890354 +131 27 training.batch_size 2.0 +131 27 training.label_smoothing 0.005155253699842299 +131 28 model.embedding_dim 1.0 +131 28 model.hidden_dropout_rate 0.2788414256837213 +131 28 model.num_filters 6.0 +131 28 optimizer.lr 0.010579020605608367 +131 28 training.batch_size 1.0 +131 28 training.label_smoothing 0.012048659079017643 +131 29 model.embedding_dim 2.0 +131 29 model.hidden_dropout_rate 0.18037340723408435 +131 29 model.num_filters 5.0 +131 29 optimizer.lr 0.03752179761354119 +131 29 training.batch_size 0.0 +131 29 training.label_smoothing 0.0378413242409794 +131 30 model.embedding_dim 2.0 +131 30 model.hidden_dropout_rate 0.39864186791984 +131 30 model.num_filters 3.0 +131 30 optimizer.lr 0.028611465656675845 +131 30 training.batch_size 1.0 +131 30 training.label_smoothing 0.04242842304895787 +131 31 model.embedding_dim 2.0 +131 31 model.hidden_dropout_rate 0.45757371917079637 +131 31 model.num_filters 6.0 +131 31 optimizer.lr 0.013145671642087966 +131 31 training.batch_size 1.0 +131 31 training.label_smoothing 0.2587438219425594 +131 32 model.embedding_dim 2.0 +131 32 model.hidden_dropout_rate 0.4587204344804131 +131 32 model.num_filters 3.0 +131 32 optimizer.lr 0.012242368740056249 +131 32 training.batch_size 0.0 +131 32 training.label_smoothing 0.012722183196583114 +131 33 model.embedding_dim 1.0 +131 33 model.hidden_dropout_rate 0.29625853539828484 +131 33 model.num_filters 7.0 +131 33 optimizer.lr 0.01407370354792825 +131 33 training.batch_size 2.0 +131 33 training.label_smoothing 0.18498518116963808 +131 34 model.embedding_dim 0.0 +131 34 model.hidden_dropout_rate 0.18044616959661952 +131 34 model.num_filters 1.0 +131 34 optimizer.lr 0.008156032652126558 +131 34 training.batch_size 2.0 +131 34 training.label_smoothing 0.6064221916586389 +131 35 model.embedding_dim 0.0 +131 35 model.hidden_dropout_rate 0.22092556732905588 +131 35 model.num_filters 0.0 +131 35 optimizer.lr 0.0028744464379365843 +131 35 training.batch_size 1.0 +131 35 training.label_smoothing 0.026534330669943935 +131 36 model.embedding_dim 1.0 +131 36 model.hidden_dropout_rate 0.3768175611764176 +131 36 model.num_filters 3.0 +131 36 optimizer.lr 0.0024737367109634086 +131 36 training.batch_size 2.0 +131 36 training.label_smoothing 0.01125954458692714 +131 37 model.embedding_dim 0.0 +131 37 model.hidden_dropout_rate 0.21006725287810613 +131 37 model.num_filters 3.0 +131 37 optimizer.lr 0.0032025574506985266 +131 37 training.batch_size 1.0 +131 37 training.label_smoothing 0.15886239164883578 +131 38 model.embedding_dim 2.0 +131 38 model.hidden_dropout_rate 0.13312512813480595 +131 38 model.num_filters 1.0 +131 38 optimizer.lr 0.07840969897524123 +131 38 training.batch_size 1.0 +131 38 training.label_smoothing 0.4425546756851594 +131 39 model.embedding_dim 1.0 +131 39 model.hidden_dropout_rate 0.40511800904492357 +131 39 model.num_filters 8.0 +131 39 optimizer.lr 0.047544028248834254 +131 39 training.batch_size 1.0 +131 39 training.label_smoothing 0.29931566016028965 +131 40 model.embedding_dim 1.0 +131 40 model.hidden_dropout_rate 0.24810967495496528 +131 40 model.num_filters 0.0 +131 40 optimizer.lr 0.03134726928502081 +131 40 training.batch_size 2.0 +131 40 training.label_smoothing 0.0019900150764500715 +131 41 model.embedding_dim 1.0 +131 41 model.hidden_dropout_rate 0.33820377137446267 +131 41 model.num_filters 4.0 +131 41 optimizer.lr 0.017259528406792957 +131 41 training.batch_size 0.0 +131 41 training.label_smoothing 0.2159044045607239 +131 42 model.embedding_dim 0.0 +131 42 model.hidden_dropout_rate 0.23603184441992006 +131 42 model.num_filters 6.0 +131 42 optimizer.lr 0.008708014431802595 +131 42 training.batch_size 1.0 +131 42 training.label_smoothing 0.0025874854861101847 +131 43 model.embedding_dim 2.0 +131 43 model.hidden_dropout_rate 0.469141701467677 +131 43 model.num_filters 5.0 +131 43 optimizer.lr 0.0012053403666791966 +131 43 training.batch_size 1.0 +131 43 training.label_smoothing 0.0010457515563338574 +131 44 model.embedding_dim 0.0 +131 44 model.hidden_dropout_rate 0.2944896546167932 +131 44 model.num_filters 0.0 +131 44 optimizer.lr 0.017957334362302502 +131 44 training.batch_size 0.0 +131 44 training.label_smoothing 0.011426619506533597 +131 45 model.embedding_dim 2.0 +131 45 model.hidden_dropout_rate 0.28987962035983955 +131 45 model.num_filters 3.0 +131 45 optimizer.lr 0.053230855065078006 +131 45 training.batch_size 2.0 +131 45 training.label_smoothing 0.0021863239681402173 +131 46 model.embedding_dim 1.0 +131 46 model.hidden_dropout_rate 0.12047283094351516 +131 46 model.num_filters 1.0 +131 46 optimizer.lr 0.0012048733924218823 +131 46 training.batch_size 1.0 +131 46 training.label_smoothing 0.0025357938099405744 +131 47 model.embedding_dim 2.0 +131 47 model.hidden_dropout_rate 0.17928373632900974 +131 47 model.num_filters 7.0 +131 47 optimizer.lr 0.008706149078236351 +131 47 training.batch_size 2.0 +131 47 training.label_smoothing 0.003565944987474559 +131 48 model.embedding_dim 0.0 +131 48 model.hidden_dropout_rate 0.37314580511655915 +131 48 model.num_filters 7.0 +131 48 optimizer.lr 0.002947550472514459 +131 48 training.batch_size 1.0 +131 48 training.label_smoothing 0.2770550341813728 +131 49 model.embedding_dim 1.0 +131 49 model.hidden_dropout_rate 0.14835085911172988 +131 49 model.num_filters 8.0 +131 49 optimizer.lr 0.008029773188544746 +131 49 training.batch_size 0.0 +131 49 training.label_smoothing 0.010797781167915861 +131 50 model.embedding_dim 0.0 +131 50 model.hidden_dropout_rate 0.2295191505014038 +131 50 model.num_filters 5.0 +131 50 optimizer.lr 0.005377633362006978 +131 50 training.batch_size 1.0 +131 50 training.label_smoothing 0.6198154807109872 +131 51 model.embedding_dim 0.0 +131 51 model.hidden_dropout_rate 0.23142809176299006 +131 51 model.num_filters 1.0 +131 51 optimizer.lr 0.009525768922134427 +131 51 training.batch_size 2.0 +131 51 training.label_smoothing 0.007982005203755969 +131 52 model.embedding_dim 2.0 +131 52 model.hidden_dropout_rate 0.18300253068012215 +131 52 model.num_filters 6.0 +131 52 optimizer.lr 0.002324931386110108 +131 52 training.batch_size 2.0 +131 52 training.label_smoothing 0.0015031867701090407 +131 53 model.embedding_dim 1.0 +131 53 model.hidden_dropout_rate 0.12976651178692095 +131 53 model.num_filters 7.0 +131 53 optimizer.lr 0.009161836155042402 +131 53 training.batch_size 0.0 +131 53 training.label_smoothing 0.7402466619816425 +131 54 model.embedding_dim 2.0 +131 54 model.hidden_dropout_rate 0.49008790361071963 +131 54 model.num_filters 1.0 +131 54 optimizer.lr 0.004467191966906005 +131 54 training.batch_size 1.0 +131 54 training.label_smoothing 0.0021925335561166588 +131 55 model.embedding_dim 2.0 +131 55 model.hidden_dropout_rate 0.1969180466340925 +131 55 model.num_filters 3.0 +131 55 optimizer.lr 0.03767313938200749 +131 55 training.batch_size 2.0 +131 55 training.label_smoothing 0.005770362565564888 +131 56 model.embedding_dim 2.0 +131 56 model.hidden_dropout_rate 0.41308905677303465 +131 56 model.num_filters 0.0 +131 56 optimizer.lr 0.023684473931585653 +131 56 training.batch_size 1.0 +131 56 training.label_smoothing 0.6605300506341204 +131 1 dataset """kinships""" +131 1 model """convkb""" +131 1 loss """softplus""" +131 1 regularizer """no""" +131 1 optimizer """adam""" +131 1 training_loop """lcwa""" +131 1 evaluator """rankbased""" +131 2 dataset """kinships""" +131 2 model """convkb""" +131 2 loss """softplus""" +131 2 regularizer """no""" +131 2 optimizer """adam""" +131 2 training_loop """lcwa""" +131 2 evaluator """rankbased""" +131 3 dataset """kinships""" +131 3 model """convkb""" +131 3 loss """softplus""" +131 3 regularizer """no""" +131 3 optimizer """adam""" +131 3 training_loop """lcwa""" +131 3 evaluator """rankbased""" +131 4 dataset """kinships""" +131 4 model """convkb""" +131 4 loss """softplus""" +131 4 regularizer """no""" +131 4 optimizer """adam""" +131 4 training_loop """lcwa""" +131 4 evaluator """rankbased""" +131 5 dataset """kinships""" +131 5 model """convkb""" +131 5 loss """softplus""" +131 5 regularizer """no""" +131 5 optimizer """adam""" +131 5 training_loop """lcwa""" +131 5 evaluator """rankbased""" +131 6 dataset """kinships""" +131 6 model """convkb""" +131 6 loss """softplus""" +131 6 regularizer """no""" +131 6 optimizer """adam""" +131 6 training_loop """lcwa""" +131 6 evaluator """rankbased""" +131 7 dataset """kinships""" +131 7 model """convkb""" +131 7 loss """softplus""" +131 7 regularizer """no""" +131 7 optimizer """adam""" +131 7 training_loop """lcwa""" +131 7 evaluator """rankbased""" +131 8 dataset """kinships""" +131 8 model """convkb""" +131 8 loss """softplus""" +131 8 regularizer """no""" +131 8 optimizer """adam""" +131 8 training_loop """lcwa""" +131 8 evaluator """rankbased""" +131 9 dataset """kinships""" +131 9 model """convkb""" +131 9 loss """softplus""" +131 9 regularizer """no""" +131 9 optimizer """adam""" +131 9 training_loop """lcwa""" +131 9 evaluator """rankbased""" +131 10 dataset """kinships""" +131 10 model """convkb""" +131 10 loss """softplus""" +131 10 regularizer """no""" +131 10 optimizer """adam""" +131 10 training_loop """lcwa""" +131 10 evaluator """rankbased""" +131 11 dataset """kinships""" +131 11 model """convkb""" +131 11 loss """softplus""" +131 11 regularizer """no""" +131 11 optimizer """adam""" +131 11 training_loop """lcwa""" +131 11 evaluator """rankbased""" +131 12 dataset """kinships""" +131 12 model """convkb""" +131 12 loss """softplus""" +131 12 regularizer """no""" +131 12 optimizer """adam""" +131 12 training_loop """lcwa""" +131 12 evaluator """rankbased""" +131 13 dataset """kinships""" +131 13 model """convkb""" +131 13 loss """softplus""" +131 13 regularizer """no""" +131 13 optimizer """adam""" +131 13 training_loop """lcwa""" +131 13 evaluator """rankbased""" +131 14 dataset """kinships""" +131 14 model """convkb""" +131 14 loss """softplus""" +131 14 regularizer """no""" +131 14 optimizer """adam""" +131 14 training_loop """lcwa""" +131 14 evaluator """rankbased""" +131 15 dataset """kinships""" +131 15 model """convkb""" +131 15 loss """softplus""" +131 15 regularizer """no""" +131 15 optimizer """adam""" +131 15 training_loop """lcwa""" +131 15 evaluator """rankbased""" +131 16 dataset """kinships""" +131 16 model """convkb""" +131 16 loss """softplus""" +131 16 regularizer """no""" +131 16 optimizer """adam""" +131 16 training_loop """lcwa""" +131 16 evaluator """rankbased""" +131 17 dataset """kinships""" +131 17 model """convkb""" +131 17 loss """softplus""" +131 17 regularizer """no""" +131 17 optimizer """adam""" +131 17 training_loop """lcwa""" +131 17 evaluator """rankbased""" +131 18 dataset """kinships""" +131 18 model """convkb""" +131 18 loss """softplus""" +131 18 regularizer """no""" +131 18 optimizer """adam""" +131 18 training_loop """lcwa""" +131 18 evaluator """rankbased""" +131 19 dataset """kinships""" +131 19 model """convkb""" +131 19 loss """softplus""" +131 19 regularizer """no""" +131 19 optimizer """adam""" +131 19 training_loop """lcwa""" +131 19 evaluator """rankbased""" +131 20 dataset """kinships""" +131 20 model """convkb""" +131 20 loss """softplus""" +131 20 regularizer """no""" +131 20 optimizer """adam""" +131 20 training_loop """lcwa""" +131 20 evaluator """rankbased""" +131 21 dataset """kinships""" +131 21 model """convkb""" +131 21 loss """softplus""" +131 21 regularizer """no""" +131 21 optimizer """adam""" +131 21 training_loop """lcwa""" +131 21 evaluator """rankbased""" +131 22 dataset """kinships""" +131 22 model """convkb""" +131 22 loss """softplus""" +131 22 regularizer """no""" +131 22 optimizer """adam""" +131 22 training_loop """lcwa""" +131 22 evaluator """rankbased""" +131 23 dataset """kinships""" +131 23 model """convkb""" +131 23 loss """softplus""" +131 23 regularizer """no""" +131 23 optimizer """adam""" +131 23 training_loop """lcwa""" +131 23 evaluator """rankbased""" +131 24 dataset """kinships""" +131 24 model """convkb""" +131 24 loss """softplus""" +131 24 regularizer """no""" +131 24 optimizer """adam""" +131 24 training_loop """lcwa""" +131 24 evaluator """rankbased""" +131 25 dataset """kinships""" +131 25 model """convkb""" +131 25 loss """softplus""" +131 25 regularizer """no""" +131 25 optimizer """adam""" +131 25 training_loop """lcwa""" +131 25 evaluator """rankbased""" +131 26 dataset """kinships""" +131 26 model """convkb""" +131 26 loss """softplus""" +131 26 regularizer """no""" +131 26 optimizer """adam""" +131 26 training_loop """lcwa""" +131 26 evaluator """rankbased""" +131 27 dataset """kinships""" +131 27 model """convkb""" +131 27 loss """softplus""" +131 27 regularizer """no""" +131 27 optimizer """adam""" +131 27 training_loop """lcwa""" +131 27 evaluator """rankbased""" +131 28 dataset """kinships""" +131 28 model """convkb""" +131 28 loss """softplus""" +131 28 regularizer """no""" +131 28 optimizer """adam""" +131 28 training_loop """lcwa""" +131 28 evaluator """rankbased""" +131 29 dataset """kinships""" +131 29 model """convkb""" +131 29 loss """softplus""" +131 29 regularizer """no""" +131 29 optimizer """adam""" +131 29 training_loop """lcwa""" +131 29 evaluator """rankbased""" +131 30 dataset """kinships""" +131 30 model """convkb""" +131 30 loss """softplus""" +131 30 regularizer """no""" +131 30 optimizer """adam""" +131 30 training_loop """lcwa""" +131 30 evaluator """rankbased""" +131 31 dataset """kinships""" +131 31 model """convkb""" +131 31 loss """softplus""" +131 31 regularizer """no""" +131 31 optimizer """adam""" +131 31 training_loop """lcwa""" +131 31 evaluator """rankbased""" +131 32 dataset """kinships""" +131 32 model """convkb""" +131 32 loss """softplus""" +131 32 regularizer """no""" +131 32 optimizer """adam""" +131 32 training_loop """lcwa""" +131 32 evaluator """rankbased""" +131 33 dataset """kinships""" +131 33 model """convkb""" +131 33 loss """softplus""" +131 33 regularizer """no""" +131 33 optimizer """adam""" +131 33 training_loop """lcwa""" +131 33 evaluator """rankbased""" +131 34 dataset """kinships""" +131 34 model """convkb""" +131 34 loss """softplus""" +131 34 regularizer """no""" +131 34 optimizer """adam""" +131 34 training_loop """lcwa""" +131 34 evaluator """rankbased""" +131 35 dataset """kinships""" +131 35 model """convkb""" +131 35 loss """softplus""" +131 35 regularizer """no""" +131 35 optimizer """adam""" +131 35 training_loop """lcwa""" +131 35 evaluator """rankbased""" +131 36 dataset """kinships""" +131 36 model """convkb""" +131 36 loss """softplus""" +131 36 regularizer """no""" +131 36 optimizer """adam""" +131 36 training_loop """lcwa""" +131 36 evaluator """rankbased""" +131 37 dataset """kinships""" +131 37 model """convkb""" +131 37 loss """softplus""" +131 37 regularizer """no""" +131 37 optimizer """adam""" +131 37 training_loop """lcwa""" +131 37 evaluator """rankbased""" +131 38 dataset """kinships""" +131 38 model """convkb""" +131 38 loss """softplus""" +131 38 regularizer """no""" +131 38 optimizer """adam""" +131 38 training_loop """lcwa""" +131 38 evaluator """rankbased""" +131 39 dataset """kinships""" +131 39 model """convkb""" +131 39 loss """softplus""" +131 39 regularizer """no""" +131 39 optimizer """adam""" +131 39 training_loop """lcwa""" +131 39 evaluator """rankbased""" +131 40 dataset """kinships""" +131 40 model """convkb""" +131 40 loss """softplus""" +131 40 regularizer """no""" +131 40 optimizer """adam""" +131 40 training_loop """lcwa""" +131 40 evaluator """rankbased""" +131 41 dataset """kinships""" +131 41 model """convkb""" +131 41 loss """softplus""" +131 41 regularizer """no""" +131 41 optimizer """adam""" +131 41 training_loop """lcwa""" +131 41 evaluator """rankbased""" +131 42 dataset """kinships""" +131 42 model """convkb""" +131 42 loss """softplus""" +131 42 regularizer """no""" +131 42 optimizer """adam""" +131 42 training_loop """lcwa""" +131 42 evaluator """rankbased""" +131 43 dataset """kinships""" +131 43 model """convkb""" +131 43 loss """softplus""" +131 43 regularizer """no""" +131 43 optimizer """adam""" +131 43 training_loop """lcwa""" +131 43 evaluator """rankbased""" +131 44 dataset """kinships""" +131 44 model """convkb""" +131 44 loss """softplus""" +131 44 regularizer """no""" +131 44 optimizer """adam""" +131 44 training_loop """lcwa""" +131 44 evaluator """rankbased""" +131 45 dataset """kinships""" +131 45 model """convkb""" +131 45 loss """softplus""" +131 45 regularizer """no""" +131 45 optimizer """adam""" +131 45 training_loop """lcwa""" +131 45 evaluator """rankbased""" +131 46 dataset """kinships""" +131 46 model """convkb""" +131 46 loss """softplus""" +131 46 regularizer """no""" +131 46 optimizer """adam""" +131 46 training_loop """lcwa""" +131 46 evaluator """rankbased""" +131 47 dataset """kinships""" +131 47 model """convkb""" +131 47 loss """softplus""" +131 47 regularizer """no""" +131 47 optimizer """adam""" +131 47 training_loop """lcwa""" +131 47 evaluator """rankbased""" +131 48 dataset """kinships""" +131 48 model """convkb""" +131 48 loss """softplus""" +131 48 regularizer """no""" +131 48 optimizer """adam""" +131 48 training_loop """lcwa""" +131 48 evaluator """rankbased""" +131 49 dataset """kinships""" +131 49 model """convkb""" +131 49 loss """softplus""" +131 49 regularizer """no""" +131 49 optimizer """adam""" +131 49 training_loop """lcwa""" +131 49 evaluator """rankbased""" +131 50 dataset """kinships""" +131 50 model """convkb""" +131 50 loss """softplus""" +131 50 regularizer """no""" +131 50 optimizer """adam""" +131 50 training_loop """lcwa""" +131 50 evaluator """rankbased""" +131 51 dataset """kinships""" +131 51 model """convkb""" +131 51 loss """softplus""" +131 51 regularizer """no""" +131 51 optimizer """adam""" +131 51 training_loop """lcwa""" +131 51 evaluator """rankbased""" +131 52 dataset """kinships""" +131 52 model """convkb""" +131 52 loss """softplus""" +131 52 regularizer """no""" +131 52 optimizer """adam""" +131 52 training_loop """lcwa""" +131 52 evaluator """rankbased""" +131 53 dataset """kinships""" +131 53 model """convkb""" +131 53 loss """softplus""" +131 53 regularizer """no""" +131 53 optimizer """adam""" +131 53 training_loop """lcwa""" +131 53 evaluator """rankbased""" +131 54 dataset """kinships""" +131 54 model """convkb""" +131 54 loss """softplus""" +131 54 regularizer """no""" +131 54 optimizer """adam""" +131 54 training_loop """lcwa""" +131 54 evaluator """rankbased""" +131 55 dataset """kinships""" +131 55 model """convkb""" +131 55 loss """softplus""" +131 55 regularizer """no""" +131 55 optimizer """adam""" +131 55 training_loop """lcwa""" +131 55 evaluator """rankbased""" +131 56 dataset """kinships""" +131 56 model """convkb""" +131 56 loss """softplus""" +131 56 regularizer """no""" +131 56 optimizer """adam""" +131 56 training_loop """lcwa""" +131 56 evaluator """rankbased""" +132 1 model.embedding_dim 2.0 +132 1 model.hidden_dropout_rate 0.3316052488497317 +132 1 model.num_filters 7.0 +132 1 optimizer.lr 0.009638431536825509 +132 1 training.batch_size 1.0 +132 1 training.label_smoothing 0.036282316485898664 +132 2 model.embedding_dim 0.0 +132 2 model.hidden_dropout_rate 0.37515183308352795 +132 2 model.num_filters 7.0 +132 2 optimizer.lr 0.04163505942813297 +132 2 training.batch_size 2.0 +132 2 training.label_smoothing 0.025185351761823686 +132 3 model.embedding_dim 1.0 +132 3 model.hidden_dropout_rate 0.45544181530476147 +132 3 model.num_filters 0.0 +132 3 optimizer.lr 0.026394277347672978 +132 3 training.batch_size 1.0 +132 3 training.label_smoothing 0.07374616096756058 +132 4 model.embedding_dim 2.0 +132 4 model.hidden_dropout_rate 0.2335434614996259 +132 4 model.num_filters 8.0 +132 4 optimizer.lr 0.01612227000804735 +132 4 training.batch_size 1.0 +132 4 training.label_smoothing 0.002009500720761716 +132 5 model.embedding_dim 2.0 +132 5 model.hidden_dropout_rate 0.14648199280934487 +132 5 model.num_filters 7.0 +132 5 optimizer.lr 0.0028037134200287984 +132 5 training.batch_size 1.0 +132 5 training.label_smoothing 0.04807123565379233 +132 6 model.embedding_dim 2.0 +132 6 model.hidden_dropout_rate 0.2148880012504146 +132 6 model.num_filters 5.0 +132 6 optimizer.lr 0.005660498681223088 +132 6 training.batch_size 0.0 +132 6 training.label_smoothing 0.04303933152895446 +132 7 model.embedding_dim 0.0 +132 7 model.hidden_dropout_rate 0.4753693221137629 +132 7 model.num_filters 9.0 +132 7 optimizer.lr 0.005947911170661775 +132 7 training.batch_size 0.0 +132 7 training.label_smoothing 0.15493029938582406 +132 8 model.embedding_dim 0.0 +132 8 model.hidden_dropout_rate 0.2703975388614176 +132 8 model.num_filters 2.0 +132 8 optimizer.lr 0.026218599362749728 +132 8 training.batch_size 2.0 +132 8 training.label_smoothing 0.006075533353575266 +132 9 model.embedding_dim 0.0 +132 9 model.hidden_dropout_rate 0.17834457368737566 +132 9 model.num_filters 6.0 +132 9 optimizer.lr 0.004588244420856479 +132 9 training.batch_size 2.0 +132 9 training.label_smoothing 0.05668280092807182 +132 10 model.embedding_dim 0.0 +132 10 model.hidden_dropout_rate 0.22516258245803566 +132 10 model.num_filters 2.0 +132 10 optimizer.lr 0.0262159254838578 +132 10 training.batch_size 0.0 +132 10 training.label_smoothing 0.008952939343681063 +132 11 model.embedding_dim 2.0 +132 11 model.hidden_dropout_rate 0.14993117009167867 +132 11 model.num_filters 1.0 +132 11 optimizer.lr 0.003126404695628346 +132 11 training.batch_size 2.0 +132 11 training.label_smoothing 0.09362249455522487 +132 12 model.embedding_dim 1.0 +132 12 model.hidden_dropout_rate 0.11747955814398243 +132 12 model.num_filters 1.0 +132 12 optimizer.lr 0.002766168777418694 +132 12 training.batch_size 2.0 +132 12 training.label_smoothing 0.21511931825342215 +132 13 model.embedding_dim 0.0 +132 13 model.hidden_dropout_rate 0.35648993296979503 +132 13 model.num_filters 4.0 +132 13 optimizer.lr 0.0663197187420695 +132 13 training.batch_size 0.0 +132 13 training.label_smoothing 0.11964473873953395 +132 14 model.embedding_dim 2.0 +132 14 model.hidden_dropout_rate 0.2851798549387299 +132 14 model.num_filters 9.0 +132 14 optimizer.lr 0.07621443388388345 +132 14 training.batch_size 0.0 +132 14 training.label_smoothing 0.002839833211592506 +132 15 model.embedding_dim 1.0 +132 15 model.hidden_dropout_rate 0.3242053552872801 +132 15 model.num_filters 4.0 +132 15 optimizer.lr 0.016145689630529456 +132 15 training.batch_size 1.0 +132 15 training.label_smoothing 0.00309617603979226 +132 16 model.embedding_dim 1.0 +132 16 model.hidden_dropout_rate 0.41480730395913945 +132 16 model.num_filters 3.0 +132 16 optimizer.lr 0.015550993854433124 +132 16 training.batch_size 2.0 +132 16 training.label_smoothing 0.001525757677954074 +132 17 model.embedding_dim 2.0 +132 17 model.hidden_dropout_rate 0.4352174193599344 +132 17 model.num_filters 6.0 +132 17 optimizer.lr 0.0015408758677172881 +132 17 training.batch_size 1.0 +132 17 training.label_smoothing 0.4672683337007287 +132 18 model.embedding_dim 1.0 +132 18 model.hidden_dropout_rate 0.42998997228817637 +132 18 model.num_filters 5.0 +132 18 optimizer.lr 0.0014495620251012084 +132 18 training.batch_size 2.0 +132 18 training.label_smoothing 0.011630151975370324 +132 19 model.embedding_dim 2.0 +132 19 model.hidden_dropout_rate 0.26645165971552176 +132 19 model.num_filters 2.0 +132 19 optimizer.lr 0.01619895567062282 +132 19 training.batch_size 2.0 +132 19 training.label_smoothing 0.9628962960661344 +132 20 model.embedding_dim 1.0 +132 20 model.hidden_dropout_rate 0.23067208923737886 +132 20 model.num_filters 1.0 +132 20 optimizer.lr 0.004030011424477036 +132 20 training.batch_size 1.0 +132 20 training.label_smoothing 0.15142312152827808 +132 21 model.embedding_dim 2.0 +132 21 model.hidden_dropout_rate 0.4513678799466151 +132 21 model.num_filters 2.0 +132 21 optimizer.lr 0.002811290166286752 +132 21 training.batch_size 2.0 +132 21 training.label_smoothing 0.034689262647328585 +132 22 model.embedding_dim 2.0 +132 22 model.hidden_dropout_rate 0.1423761619828141 +132 22 model.num_filters 7.0 +132 22 optimizer.lr 0.05314804323434734 +132 22 training.batch_size 2.0 +132 22 training.label_smoothing 0.00712713187121465 +132 23 model.embedding_dim 0.0 +132 23 model.hidden_dropout_rate 0.13428235147449905 +132 23 model.num_filters 8.0 +132 23 optimizer.lr 0.055836637654453104 +132 23 training.batch_size 0.0 +132 23 training.label_smoothing 0.0015671010528717476 +132 24 model.embedding_dim 0.0 +132 24 model.hidden_dropout_rate 0.17738659354602104 +132 24 model.num_filters 9.0 +132 24 optimizer.lr 0.0012249417240405658 +132 24 training.batch_size 0.0 +132 24 training.label_smoothing 0.09922915786381777 +132 25 model.embedding_dim 0.0 +132 25 model.hidden_dropout_rate 0.12334316842228797 +132 25 model.num_filters 5.0 +132 25 optimizer.lr 0.032020846767442566 +132 25 training.batch_size 2.0 +132 25 training.label_smoothing 0.043507252771849254 +132 26 model.embedding_dim 0.0 +132 26 model.hidden_dropout_rate 0.29134966766730197 +132 26 model.num_filters 0.0 +132 26 optimizer.lr 0.006391878917154308 +132 26 training.batch_size 1.0 +132 26 training.label_smoothing 0.0011293579203725632 +132 27 model.embedding_dim 1.0 +132 27 model.hidden_dropout_rate 0.34641712340978414 +132 27 model.num_filters 6.0 +132 27 optimizer.lr 0.03146518952091869 +132 27 training.batch_size 2.0 +132 27 training.label_smoothing 0.02907655157716399 +132 28 model.embedding_dim 0.0 +132 28 model.hidden_dropout_rate 0.2724992969319774 +132 28 model.num_filters 0.0 +132 28 optimizer.lr 0.0018166001640837922 +132 28 training.batch_size 1.0 +132 28 training.label_smoothing 0.003994090729715608 +132 29 model.embedding_dim 2.0 +132 29 model.hidden_dropout_rate 0.3455588048680378 +132 29 model.num_filters 2.0 +132 29 optimizer.lr 0.006990412160985244 +132 29 training.batch_size 0.0 +132 29 training.label_smoothing 0.011382306675178068 +132 30 model.embedding_dim 1.0 +132 30 model.hidden_dropout_rate 0.4228543324644859 +132 30 model.num_filters 3.0 +132 30 optimizer.lr 0.009235506755856172 +132 30 training.batch_size 2.0 +132 30 training.label_smoothing 0.41458621839273274 +132 31 model.embedding_dim 1.0 +132 31 model.hidden_dropout_rate 0.1955340586157032 +132 31 model.num_filters 8.0 +132 31 optimizer.lr 0.063084844043486 +132 31 training.batch_size 2.0 +132 31 training.label_smoothing 0.01825911945905897 +132 32 model.embedding_dim 1.0 +132 32 model.hidden_dropout_rate 0.3845225612545343 +132 32 model.num_filters 2.0 +132 32 optimizer.lr 0.0012270715130473537 +132 32 training.batch_size 1.0 +132 32 training.label_smoothing 0.08047220645617484 +132 33 model.embedding_dim 0.0 +132 33 model.hidden_dropout_rate 0.3119526136383033 +132 33 model.num_filters 2.0 +132 33 optimizer.lr 0.032821305315890704 +132 33 training.batch_size 2.0 +132 33 training.label_smoothing 0.011238979719425108 +132 34 model.embedding_dim 2.0 +132 34 model.hidden_dropout_rate 0.42394276588174307 +132 34 model.num_filters 1.0 +132 34 optimizer.lr 0.006172787104085221 +132 34 training.batch_size 2.0 +132 34 training.label_smoothing 0.0038415021897928057 +132 35 model.embedding_dim 2.0 +132 35 model.hidden_dropout_rate 0.22894386939944 +132 35 model.num_filters 5.0 +132 35 optimizer.lr 0.09473932241336758 +132 35 training.batch_size 1.0 +132 35 training.label_smoothing 0.10596954739139842 +132 36 model.embedding_dim 2.0 +132 36 model.hidden_dropout_rate 0.21229601097831857 +132 36 model.num_filters 1.0 +132 36 optimizer.lr 0.013796011349642342 +132 36 training.batch_size 1.0 +132 36 training.label_smoothing 0.0014956529606798576 +132 37 model.embedding_dim 0.0 +132 37 model.hidden_dropout_rate 0.40490545835716707 +132 37 model.num_filters 8.0 +132 37 optimizer.lr 0.04287580015600547 +132 37 training.batch_size 2.0 +132 37 training.label_smoothing 0.03629468567981398 +132 38 model.embedding_dim 1.0 +132 38 model.hidden_dropout_rate 0.14770241070213783 +132 38 model.num_filters 8.0 +132 38 optimizer.lr 0.0032000775186869744 +132 38 training.batch_size 0.0 +132 38 training.label_smoothing 0.0010680666857417322 +132 39 model.embedding_dim 2.0 +132 39 model.hidden_dropout_rate 0.48639864175700587 +132 39 model.num_filters 9.0 +132 39 optimizer.lr 0.03660188863770883 +132 39 training.batch_size 2.0 +132 39 training.label_smoothing 0.01260523264853541 +132 40 model.embedding_dim 2.0 +132 40 model.hidden_dropout_rate 0.18170739009801787 +132 40 model.num_filters 8.0 +132 40 optimizer.lr 0.0034634679175172844 +132 40 training.batch_size 0.0 +132 40 training.label_smoothing 0.12581867290482812 +132 41 model.embedding_dim 1.0 +132 41 model.hidden_dropout_rate 0.14893216651063346 +132 41 model.num_filters 9.0 +132 41 optimizer.lr 0.010589242024182493 +132 41 training.batch_size 1.0 +132 41 training.label_smoothing 0.0030656156908749174 +132 42 model.embedding_dim 1.0 +132 42 model.hidden_dropout_rate 0.42903898842408406 +132 42 model.num_filters 7.0 +132 42 optimizer.lr 0.012004673403431485 +132 42 training.batch_size 1.0 +132 42 training.label_smoothing 0.049661427619857396 +132 43 model.embedding_dim 2.0 +132 43 model.hidden_dropout_rate 0.3410888720638804 +132 43 model.num_filters 4.0 +132 43 optimizer.lr 0.0022598833635181226 +132 43 training.batch_size 2.0 +132 43 training.label_smoothing 0.46822941729217304 +132 44 model.embedding_dim 1.0 +132 44 model.hidden_dropout_rate 0.16069175606048228 +132 44 model.num_filters 0.0 +132 44 optimizer.lr 0.0014269533713249984 +132 44 training.batch_size 0.0 +132 44 training.label_smoothing 0.28039795852484956 +132 45 model.embedding_dim 2.0 +132 45 model.hidden_dropout_rate 0.25535695444418327 +132 45 model.num_filters 4.0 +132 45 optimizer.lr 0.0013590653454396535 +132 45 training.batch_size 1.0 +132 45 training.label_smoothing 0.03300318613926247 +132 46 model.embedding_dim 0.0 +132 46 model.hidden_dropout_rate 0.22676086643632043 +132 46 model.num_filters 1.0 +132 46 optimizer.lr 0.0064856269435048615 +132 46 training.batch_size 2.0 +132 46 training.label_smoothing 0.08268963068861598 +132 47 model.embedding_dim 2.0 +132 47 model.hidden_dropout_rate 0.30950934107337785 +132 47 model.num_filters 4.0 +132 47 optimizer.lr 0.0027320799188243292 +132 47 training.batch_size 2.0 +132 47 training.label_smoothing 0.03934329991289621 +132 48 model.embedding_dim 2.0 +132 48 model.hidden_dropout_rate 0.3453533290818952 +132 48 model.num_filters 7.0 +132 48 optimizer.lr 0.05596311029130835 +132 48 training.batch_size 1.0 +132 48 training.label_smoothing 0.23650023402675702 +132 49 model.embedding_dim 1.0 +132 49 model.hidden_dropout_rate 0.484799827427826 +132 49 model.num_filters 4.0 +132 49 optimizer.lr 0.007275977941332422 +132 49 training.batch_size 2.0 +132 49 training.label_smoothing 0.07832022704322003 +132 50 model.embedding_dim 1.0 +132 50 model.hidden_dropout_rate 0.3260726388376609 +132 50 model.num_filters 8.0 +132 50 optimizer.lr 0.004304998231933231 +132 50 training.batch_size 2.0 +132 50 training.label_smoothing 0.056486601720992444 +132 51 model.embedding_dim 0.0 +132 51 model.hidden_dropout_rate 0.4315490696798425 +132 51 model.num_filters 2.0 +132 51 optimizer.lr 0.02308215148060564 +132 51 training.batch_size 0.0 +132 51 training.label_smoothing 0.0534070928924414 +132 52 model.embedding_dim 0.0 +132 52 model.hidden_dropout_rate 0.4130882043580367 +132 52 model.num_filters 6.0 +132 52 optimizer.lr 0.039679167272741345 +132 52 training.batch_size 0.0 +132 52 training.label_smoothing 0.07814605576936005 +132 53 model.embedding_dim 1.0 +132 53 model.hidden_dropout_rate 0.18474824735342976 +132 53 model.num_filters 0.0 +132 53 optimizer.lr 0.007109758307591201 +132 53 training.batch_size 2.0 +132 53 training.label_smoothing 0.01492493528475155 +132 54 model.embedding_dim 0.0 +132 54 model.hidden_dropout_rate 0.37057283479909486 +132 54 model.num_filters 8.0 +132 54 optimizer.lr 0.0147797190346112 +132 54 training.batch_size 1.0 +132 54 training.label_smoothing 0.0011781864669136327 +132 55 model.embedding_dim 1.0 +132 55 model.hidden_dropout_rate 0.3178166453684872 +132 55 model.num_filters 4.0 +132 55 optimizer.lr 0.0025485434816352693 +132 55 training.batch_size 0.0 +132 55 training.label_smoothing 0.0031103067653041622 +132 56 model.embedding_dim 2.0 +132 56 model.hidden_dropout_rate 0.16134471073680065 +132 56 model.num_filters 1.0 +132 56 optimizer.lr 0.028030180919551256 +132 56 training.batch_size 0.0 +132 56 training.label_smoothing 0.21626081061320473 +132 57 model.embedding_dim 0.0 +132 57 model.hidden_dropout_rate 0.10702598712012851 +132 57 model.num_filters 6.0 +132 57 optimizer.lr 0.0869177931783083 +132 57 training.batch_size 0.0 +132 57 training.label_smoothing 0.04907056972351349 +132 58 model.embedding_dim 0.0 +132 58 model.hidden_dropout_rate 0.479455082409587 +132 58 model.num_filters 8.0 +132 58 optimizer.lr 0.012230400054140713 +132 58 training.batch_size 1.0 +132 58 training.label_smoothing 0.05655259168445968 +132 59 model.embedding_dim 2.0 +132 59 model.hidden_dropout_rate 0.16238773017364228 +132 59 model.num_filters 9.0 +132 59 optimizer.lr 0.0012674989589143719 +132 59 training.batch_size 2.0 +132 59 training.label_smoothing 0.0031805899404807655 +132 60 model.embedding_dim 2.0 +132 60 model.hidden_dropout_rate 0.4294403579707453 +132 60 model.num_filters 1.0 +132 60 optimizer.lr 0.0010400127042421315 +132 60 training.batch_size 0.0 +132 60 training.label_smoothing 0.7591756510685775 +132 61 model.embedding_dim 1.0 +132 61 model.hidden_dropout_rate 0.2864612651323155 +132 61 model.num_filters 6.0 +132 61 optimizer.lr 0.01720688159474754 +132 61 training.batch_size 0.0 +132 61 training.label_smoothing 0.10982461067533815 +132 62 model.embedding_dim 2.0 +132 62 model.hidden_dropout_rate 0.201734490328841 +132 62 model.num_filters 2.0 +132 62 optimizer.lr 0.0013547222252789882 +132 62 training.batch_size 1.0 +132 62 training.label_smoothing 0.00820290715320647 +132 63 model.embedding_dim 1.0 +132 63 model.hidden_dropout_rate 0.385354558143129 +132 63 model.num_filters 7.0 +132 63 optimizer.lr 0.07570883171471487 +132 63 training.batch_size 2.0 +132 63 training.label_smoothing 0.0032115463803881602 +132 64 model.embedding_dim 0.0 +132 64 model.hidden_dropout_rate 0.1751765550460848 +132 64 model.num_filters 3.0 +132 64 optimizer.lr 0.007236068158397556 +132 64 training.batch_size 1.0 +132 64 training.label_smoothing 0.25595781089764535 +132 65 model.embedding_dim 2.0 +132 65 model.hidden_dropout_rate 0.48689994692127747 +132 65 model.num_filters 2.0 +132 65 optimizer.lr 0.04134026549460542 +132 65 training.batch_size 0.0 +132 65 training.label_smoothing 0.032878996804286346 +132 66 model.embedding_dim 0.0 +132 66 model.hidden_dropout_rate 0.28079116475524135 +132 66 model.num_filters 9.0 +132 66 optimizer.lr 0.01136964483273579 +132 66 training.batch_size 0.0 +132 66 training.label_smoothing 0.0071207278421933215 +132 67 model.embedding_dim 1.0 +132 67 model.hidden_dropout_rate 0.220860731449078 +132 67 model.num_filters 1.0 +132 67 optimizer.lr 0.015254476237673308 +132 67 training.batch_size 2.0 +132 67 training.label_smoothing 0.921094071435774 +132 68 model.embedding_dim 2.0 +132 68 model.hidden_dropout_rate 0.21766480694777812 +132 68 model.num_filters 5.0 +132 68 optimizer.lr 0.017204686823882334 +132 68 training.batch_size 1.0 +132 68 training.label_smoothing 0.10788780822940391 +132 69 model.embedding_dim 0.0 +132 69 model.hidden_dropout_rate 0.29432314162970574 +132 69 model.num_filters 4.0 +132 69 optimizer.lr 0.015245683874056802 +132 69 training.batch_size 1.0 +132 69 training.label_smoothing 0.002150721530660896 +132 70 model.embedding_dim 2.0 +132 70 model.hidden_dropout_rate 0.24644961580946464 +132 70 model.num_filters 0.0 +132 70 optimizer.lr 0.030171967787913805 +132 70 training.batch_size 0.0 +132 70 training.label_smoothing 0.003953132497562499 +132 71 model.embedding_dim 0.0 +132 71 model.hidden_dropout_rate 0.2437065218803085 +132 71 model.num_filters 1.0 +132 71 optimizer.lr 0.07296652800084916 +132 71 training.batch_size 0.0 +132 71 training.label_smoothing 0.003324167318426217 +132 72 model.embedding_dim 2.0 +132 72 model.hidden_dropout_rate 0.3410920416163763 +132 72 model.num_filters 1.0 +132 72 optimizer.lr 0.002471415919840807 +132 72 training.batch_size 1.0 +132 72 training.label_smoothing 0.0010773965834783917 +132 73 model.embedding_dim 1.0 +132 73 model.hidden_dropout_rate 0.13483328643542813 +132 73 model.num_filters 2.0 +132 73 optimizer.lr 0.021960503055591038 +132 73 training.batch_size 0.0 +132 73 training.label_smoothing 0.013445323280814823 +132 74 model.embedding_dim 0.0 +132 74 model.hidden_dropout_rate 0.4909593078891663 +132 74 model.num_filters 9.0 +132 74 optimizer.lr 0.010709142403085346 +132 74 training.batch_size 2.0 +132 74 training.label_smoothing 0.055619298016021 +132 75 model.embedding_dim 2.0 +132 75 model.hidden_dropout_rate 0.47883498006804226 +132 75 model.num_filters 4.0 +132 75 optimizer.lr 0.09996159322463105 +132 75 training.batch_size 0.0 +132 75 training.label_smoothing 0.3444609912907229 +132 76 model.embedding_dim 1.0 +132 76 model.hidden_dropout_rate 0.3096458865450682 +132 76 model.num_filters 9.0 +132 76 optimizer.lr 0.001808191805946451 +132 76 training.batch_size 1.0 +132 76 training.label_smoothing 0.0011204952725758464 +132 77 model.embedding_dim 2.0 +132 77 model.hidden_dropout_rate 0.21016054337102075 +132 77 model.num_filters 0.0 +132 77 optimizer.lr 0.01622992558499055 +132 77 training.batch_size 0.0 +132 77 training.label_smoothing 0.4174740032858647 +132 78 model.embedding_dim 2.0 +132 78 model.hidden_dropout_rate 0.3977813599692581 +132 78 model.num_filters 0.0 +132 78 optimizer.lr 0.0031132295579164404 +132 78 training.batch_size 2.0 +132 78 training.label_smoothing 0.01638468737479937 +132 79 model.embedding_dim 2.0 +132 79 model.hidden_dropout_rate 0.21518857179842543 +132 79 model.num_filters 8.0 +132 79 optimizer.lr 0.011055926640405321 +132 79 training.batch_size 2.0 +132 79 training.label_smoothing 0.002660532964448016 +132 80 model.embedding_dim 0.0 +132 80 model.hidden_dropout_rate 0.26037721409416814 +132 80 model.num_filters 5.0 +132 80 optimizer.lr 0.028431739492904864 +132 80 training.batch_size 2.0 +132 80 training.label_smoothing 0.002496560561904917 +132 81 model.embedding_dim 1.0 +132 81 model.hidden_dropout_rate 0.23003492671588638 +132 81 model.num_filters 8.0 +132 81 optimizer.lr 0.0037510076656243813 +132 81 training.batch_size 0.0 +132 81 training.label_smoothing 0.40770924000729536 +132 82 model.embedding_dim 2.0 +132 82 model.hidden_dropout_rate 0.12303731569822146 +132 82 model.num_filters 0.0 +132 82 optimizer.lr 0.00676681753090656 +132 82 training.batch_size 2.0 +132 82 training.label_smoothing 0.1940357698951107 +132 83 model.embedding_dim 2.0 +132 83 model.hidden_dropout_rate 0.45816368221995496 +132 83 model.num_filters 0.0 +132 83 optimizer.lr 0.001126706125263654 +132 83 training.batch_size 2.0 +132 83 training.label_smoothing 0.01620833615508489 +132 84 model.embedding_dim 2.0 +132 84 model.hidden_dropout_rate 0.4487238693547956 +132 84 model.num_filters 2.0 +132 84 optimizer.lr 0.00427519091110039 +132 84 training.batch_size 2.0 +132 84 training.label_smoothing 0.047827383945032866 +132 85 model.embedding_dim 2.0 +132 85 model.hidden_dropout_rate 0.3965881678662291 +132 85 model.num_filters 3.0 +132 85 optimizer.lr 0.055832030612144395 +132 85 training.batch_size 0.0 +132 85 training.label_smoothing 0.005563494784547737 +132 86 model.embedding_dim 0.0 +132 86 model.hidden_dropout_rate 0.13636679834886795 +132 86 model.num_filters 6.0 +132 86 optimizer.lr 0.04948079829261991 +132 86 training.batch_size 0.0 +132 86 training.label_smoothing 0.172996423561923 +132 87 model.embedding_dim 1.0 +132 87 model.hidden_dropout_rate 0.4005782993189343 +132 87 model.num_filters 8.0 +132 87 optimizer.lr 0.001889681491681666 +132 87 training.batch_size 1.0 +132 87 training.label_smoothing 0.0013498949824978022 +132 88 model.embedding_dim 0.0 +132 88 model.hidden_dropout_rate 0.1253693266288984 +132 88 model.num_filters 7.0 +132 88 optimizer.lr 0.0015866638501159146 +132 88 training.batch_size 2.0 +132 88 training.label_smoothing 0.02047202152984387 +132 89 model.embedding_dim 1.0 +132 89 model.hidden_dropout_rate 0.4916524223992803 +132 89 model.num_filters 0.0 +132 89 optimizer.lr 0.0285171150840992 +132 89 training.batch_size 1.0 +132 89 training.label_smoothing 0.013467521198986934 +132 90 model.embedding_dim 0.0 +132 90 model.hidden_dropout_rate 0.2800387453251506 +132 90 model.num_filters 3.0 +132 90 optimizer.lr 0.05168143822591123 +132 90 training.batch_size 1.0 +132 90 training.label_smoothing 0.0013880227278288725 +132 91 model.embedding_dim 2.0 +132 91 model.hidden_dropout_rate 0.22232437033503852 +132 91 model.num_filters 0.0 +132 91 optimizer.lr 0.08915450858072221 +132 91 training.batch_size 1.0 +132 91 training.label_smoothing 0.029973112703140486 +132 92 model.embedding_dim 1.0 +132 92 model.hidden_dropout_rate 0.4090414837270071 +132 92 model.num_filters 6.0 +132 92 optimizer.lr 0.009643649083208765 +132 92 training.batch_size 2.0 +132 92 training.label_smoothing 0.567957347934545 +132 93 model.embedding_dim 0.0 +132 93 model.hidden_dropout_rate 0.37980945202574756 +132 93 model.num_filters 0.0 +132 93 optimizer.lr 0.0021758170868064705 +132 93 training.batch_size 0.0 +132 93 training.label_smoothing 0.003111304910902611 +132 94 model.embedding_dim 2.0 +132 94 model.hidden_dropout_rate 0.4575749763864724 +132 94 model.num_filters 8.0 +132 94 optimizer.lr 0.0014076270532530028 +132 94 training.batch_size 1.0 +132 94 training.label_smoothing 0.02775826655580489 +132 95 model.embedding_dim 2.0 +132 95 model.hidden_dropout_rate 0.10589065534153207 +132 95 model.num_filters 7.0 +132 95 optimizer.lr 0.04832184579423552 +132 95 training.batch_size 0.0 +132 95 training.label_smoothing 0.031233688822725824 +132 96 model.embedding_dim 1.0 +132 96 model.hidden_dropout_rate 0.4629928981946767 +132 96 model.num_filters 6.0 +132 96 optimizer.lr 0.08036656321851582 +132 96 training.batch_size 1.0 +132 96 training.label_smoothing 0.018637469554620093 +132 97 model.embedding_dim 2.0 +132 97 model.hidden_dropout_rate 0.3212403131444733 +132 97 model.num_filters 2.0 +132 97 optimizer.lr 0.001388608723132931 +132 97 training.batch_size 2.0 +132 97 training.label_smoothing 0.1270976330827368 +132 1 dataset """kinships""" +132 1 model """convkb""" +132 1 loss """bceaftersigmoid""" +132 1 regularizer """no""" +132 1 optimizer """adam""" +132 1 training_loop """lcwa""" +132 1 evaluator """rankbased""" +132 2 dataset """kinships""" +132 2 model """convkb""" +132 2 loss """bceaftersigmoid""" +132 2 regularizer """no""" +132 2 optimizer """adam""" +132 2 training_loop """lcwa""" +132 2 evaluator """rankbased""" +132 3 dataset """kinships""" +132 3 model """convkb""" +132 3 loss """bceaftersigmoid""" +132 3 regularizer """no""" +132 3 optimizer """adam""" +132 3 training_loop """lcwa""" +132 3 evaluator """rankbased""" +132 4 dataset """kinships""" +132 4 model """convkb""" +132 4 loss """bceaftersigmoid""" +132 4 regularizer """no""" +132 4 optimizer """adam""" +132 4 training_loop """lcwa""" +132 4 evaluator """rankbased""" +132 5 dataset """kinships""" +132 5 model """convkb""" +132 5 loss """bceaftersigmoid""" +132 5 regularizer """no""" +132 5 optimizer """adam""" +132 5 training_loop """lcwa""" +132 5 evaluator """rankbased""" +132 6 dataset """kinships""" +132 6 model """convkb""" +132 6 loss """bceaftersigmoid""" +132 6 regularizer """no""" +132 6 optimizer """adam""" +132 6 training_loop """lcwa""" +132 6 evaluator """rankbased""" +132 7 dataset """kinships""" +132 7 model """convkb""" +132 7 loss """bceaftersigmoid""" +132 7 regularizer """no""" +132 7 optimizer """adam""" +132 7 training_loop """lcwa""" +132 7 evaluator """rankbased""" +132 8 dataset """kinships""" +132 8 model """convkb""" +132 8 loss """bceaftersigmoid""" +132 8 regularizer """no""" +132 8 optimizer """adam""" +132 8 training_loop """lcwa""" +132 8 evaluator """rankbased""" +132 9 dataset """kinships""" +132 9 model """convkb""" +132 9 loss """bceaftersigmoid""" +132 9 regularizer """no""" +132 9 optimizer """adam""" +132 9 training_loop """lcwa""" +132 9 evaluator """rankbased""" +132 10 dataset """kinships""" +132 10 model """convkb""" +132 10 loss """bceaftersigmoid""" +132 10 regularizer """no""" +132 10 optimizer """adam""" +132 10 training_loop """lcwa""" +132 10 evaluator """rankbased""" +132 11 dataset """kinships""" +132 11 model """convkb""" +132 11 loss """bceaftersigmoid""" +132 11 regularizer """no""" +132 11 optimizer """adam""" +132 11 training_loop """lcwa""" +132 11 evaluator """rankbased""" +132 12 dataset """kinships""" +132 12 model """convkb""" +132 12 loss """bceaftersigmoid""" +132 12 regularizer """no""" +132 12 optimizer """adam""" +132 12 training_loop """lcwa""" +132 12 evaluator """rankbased""" +132 13 dataset """kinships""" +132 13 model """convkb""" +132 13 loss """bceaftersigmoid""" +132 13 regularizer """no""" +132 13 optimizer """adam""" +132 13 training_loop """lcwa""" +132 13 evaluator """rankbased""" +132 14 dataset """kinships""" +132 14 model """convkb""" +132 14 loss """bceaftersigmoid""" +132 14 regularizer """no""" +132 14 optimizer """adam""" +132 14 training_loop """lcwa""" +132 14 evaluator """rankbased""" +132 15 dataset """kinships""" +132 15 model """convkb""" +132 15 loss """bceaftersigmoid""" +132 15 regularizer """no""" +132 15 optimizer """adam""" +132 15 training_loop """lcwa""" +132 15 evaluator """rankbased""" +132 16 dataset """kinships""" +132 16 model """convkb""" +132 16 loss """bceaftersigmoid""" +132 16 regularizer """no""" +132 16 optimizer """adam""" +132 16 training_loop """lcwa""" +132 16 evaluator """rankbased""" +132 17 dataset """kinships""" +132 17 model """convkb""" +132 17 loss """bceaftersigmoid""" +132 17 regularizer """no""" +132 17 optimizer """adam""" +132 17 training_loop """lcwa""" +132 17 evaluator """rankbased""" +132 18 dataset """kinships""" +132 18 model """convkb""" +132 18 loss """bceaftersigmoid""" +132 18 regularizer """no""" +132 18 optimizer """adam""" +132 18 training_loop """lcwa""" +132 18 evaluator """rankbased""" +132 19 dataset """kinships""" +132 19 model """convkb""" +132 19 loss """bceaftersigmoid""" +132 19 regularizer """no""" +132 19 optimizer """adam""" +132 19 training_loop """lcwa""" +132 19 evaluator """rankbased""" +132 20 dataset """kinships""" +132 20 model """convkb""" +132 20 loss """bceaftersigmoid""" +132 20 regularizer """no""" +132 20 optimizer """adam""" +132 20 training_loop """lcwa""" +132 20 evaluator """rankbased""" +132 21 dataset """kinships""" +132 21 model """convkb""" +132 21 loss """bceaftersigmoid""" +132 21 regularizer """no""" +132 21 optimizer """adam""" +132 21 training_loop """lcwa""" +132 21 evaluator """rankbased""" +132 22 dataset """kinships""" +132 22 model """convkb""" +132 22 loss """bceaftersigmoid""" +132 22 regularizer """no""" +132 22 optimizer """adam""" +132 22 training_loop """lcwa""" +132 22 evaluator """rankbased""" +132 23 dataset """kinships""" +132 23 model """convkb""" +132 23 loss """bceaftersigmoid""" +132 23 regularizer """no""" +132 23 optimizer """adam""" +132 23 training_loop """lcwa""" +132 23 evaluator """rankbased""" +132 24 dataset """kinships""" +132 24 model """convkb""" +132 24 loss """bceaftersigmoid""" +132 24 regularizer """no""" +132 24 optimizer """adam""" +132 24 training_loop """lcwa""" +132 24 evaluator """rankbased""" +132 25 dataset """kinships""" +132 25 model """convkb""" +132 25 loss """bceaftersigmoid""" +132 25 regularizer """no""" +132 25 optimizer """adam""" +132 25 training_loop """lcwa""" +132 25 evaluator """rankbased""" +132 26 dataset """kinships""" +132 26 model """convkb""" +132 26 loss """bceaftersigmoid""" +132 26 regularizer """no""" +132 26 optimizer """adam""" +132 26 training_loop """lcwa""" +132 26 evaluator """rankbased""" +132 27 dataset """kinships""" +132 27 model """convkb""" +132 27 loss """bceaftersigmoid""" +132 27 regularizer """no""" +132 27 optimizer """adam""" +132 27 training_loop """lcwa""" +132 27 evaluator """rankbased""" +132 28 dataset """kinships""" +132 28 model """convkb""" +132 28 loss """bceaftersigmoid""" +132 28 regularizer """no""" +132 28 optimizer """adam""" +132 28 training_loop """lcwa""" +132 28 evaluator """rankbased""" +132 29 dataset """kinships""" +132 29 model """convkb""" +132 29 loss """bceaftersigmoid""" +132 29 regularizer """no""" +132 29 optimizer """adam""" +132 29 training_loop """lcwa""" +132 29 evaluator """rankbased""" +132 30 dataset """kinships""" +132 30 model """convkb""" +132 30 loss """bceaftersigmoid""" +132 30 regularizer """no""" +132 30 optimizer """adam""" +132 30 training_loop """lcwa""" +132 30 evaluator """rankbased""" +132 31 dataset """kinships""" +132 31 model """convkb""" +132 31 loss """bceaftersigmoid""" +132 31 regularizer """no""" +132 31 optimizer """adam""" +132 31 training_loop """lcwa""" +132 31 evaluator """rankbased""" +132 32 dataset """kinships""" +132 32 model """convkb""" +132 32 loss """bceaftersigmoid""" +132 32 regularizer """no""" +132 32 optimizer """adam""" +132 32 training_loop """lcwa""" +132 32 evaluator """rankbased""" +132 33 dataset """kinships""" +132 33 model """convkb""" +132 33 loss """bceaftersigmoid""" +132 33 regularizer """no""" +132 33 optimizer """adam""" +132 33 training_loop """lcwa""" +132 33 evaluator """rankbased""" +132 34 dataset """kinships""" +132 34 model """convkb""" +132 34 loss """bceaftersigmoid""" +132 34 regularizer """no""" +132 34 optimizer """adam""" +132 34 training_loop """lcwa""" +132 34 evaluator """rankbased""" +132 35 dataset """kinships""" +132 35 model """convkb""" +132 35 loss """bceaftersigmoid""" +132 35 regularizer """no""" +132 35 optimizer """adam""" +132 35 training_loop """lcwa""" +132 35 evaluator """rankbased""" +132 36 dataset """kinships""" +132 36 model """convkb""" +132 36 loss """bceaftersigmoid""" +132 36 regularizer """no""" +132 36 optimizer """adam""" +132 36 training_loop """lcwa""" +132 36 evaluator """rankbased""" +132 37 dataset """kinships""" +132 37 model """convkb""" +132 37 loss """bceaftersigmoid""" +132 37 regularizer """no""" +132 37 optimizer """adam""" +132 37 training_loop """lcwa""" +132 37 evaluator """rankbased""" +132 38 dataset """kinships""" +132 38 model """convkb""" +132 38 loss """bceaftersigmoid""" +132 38 regularizer """no""" +132 38 optimizer """adam""" +132 38 training_loop """lcwa""" +132 38 evaluator """rankbased""" +132 39 dataset """kinships""" +132 39 model """convkb""" +132 39 loss """bceaftersigmoid""" +132 39 regularizer """no""" +132 39 optimizer """adam""" +132 39 training_loop """lcwa""" +132 39 evaluator """rankbased""" +132 40 dataset """kinships""" +132 40 model """convkb""" +132 40 loss """bceaftersigmoid""" +132 40 regularizer """no""" +132 40 optimizer """adam""" +132 40 training_loop """lcwa""" +132 40 evaluator """rankbased""" +132 41 dataset """kinships""" +132 41 model """convkb""" +132 41 loss """bceaftersigmoid""" +132 41 regularizer """no""" +132 41 optimizer """adam""" +132 41 training_loop """lcwa""" +132 41 evaluator """rankbased""" +132 42 dataset """kinships""" +132 42 model """convkb""" +132 42 loss """bceaftersigmoid""" +132 42 regularizer """no""" +132 42 optimizer """adam""" +132 42 training_loop """lcwa""" +132 42 evaluator """rankbased""" +132 43 dataset """kinships""" +132 43 model """convkb""" +132 43 loss """bceaftersigmoid""" +132 43 regularizer """no""" +132 43 optimizer """adam""" +132 43 training_loop """lcwa""" +132 43 evaluator """rankbased""" +132 44 dataset """kinships""" +132 44 model """convkb""" +132 44 loss """bceaftersigmoid""" +132 44 regularizer """no""" +132 44 optimizer """adam""" +132 44 training_loop """lcwa""" +132 44 evaluator """rankbased""" +132 45 dataset """kinships""" +132 45 model """convkb""" +132 45 loss """bceaftersigmoid""" +132 45 regularizer """no""" +132 45 optimizer """adam""" +132 45 training_loop """lcwa""" +132 45 evaluator """rankbased""" +132 46 dataset """kinships""" +132 46 model """convkb""" +132 46 loss """bceaftersigmoid""" +132 46 regularizer """no""" +132 46 optimizer """adam""" +132 46 training_loop """lcwa""" +132 46 evaluator """rankbased""" +132 47 dataset """kinships""" +132 47 model """convkb""" +132 47 loss """bceaftersigmoid""" +132 47 regularizer """no""" +132 47 optimizer """adam""" +132 47 training_loop """lcwa""" +132 47 evaluator """rankbased""" +132 48 dataset """kinships""" +132 48 model """convkb""" +132 48 loss """bceaftersigmoid""" +132 48 regularizer """no""" +132 48 optimizer """adam""" +132 48 training_loop """lcwa""" +132 48 evaluator """rankbased""" +132 49 dataset """kinships""" +132 49 model """convkb""" +132 49 loss """bceaftersigmoid""" +132 49 regularizer """no""" +132 49 optimizer """adam""" +132 49 training_loop """lcwa""" +132 49 evaluator """rankbased""" +132 50 dataset """kinships""" +132 50 model """convkb""" +132 50 loss """bceaftersigmoid""" +132 50 regularizer """no""" +132 50 optimizer """adam""" +132 50 training_loop """lcwa""" +132 50 evaluator """rankbased""" +132 51 dataset """kinships""" +132 51 model """convkb""" +132 51 loss """bceaftersigmoid""" +132 51 regularizer """no""" +132 51 optimizer """adam""" +132 51 training_loop """lcwa""" +132 51 evaluator """rankbased""" +132 52 dataset """kinships""" +132 52 model """convkb""" +132 52 loss """bceaftersigmoid""" +132 52 regularizer """no""" +132 52 optimizer """adam""" +132 52 training_loop """lcwa""" +132 52 evaluator """rankbased""" +132 53 dataset """kinships""" +132 53 model """convkb""" +132 53 loss """bceaftersigmoid""" +132 53 regularizer """no""" +132 53 optimizer """adam""" +132 53 training_loop """lcwa""" +132 53 evaluator """rankbased""" +132 54 dataset """kinships""" +132 54 model """convkb""" +132 54 loss """bceaftersigmoid""" +132 54 regularizer """no""" +132 54 optimizer """adam""" +132 54 training_loop """lcwa""" +132 54 evaluator """rankbased""" +132 55 dataset """kinships""" +132 55 model """convkb""" +132 55 loss """bceaftersigmoid""" +132 55 regularizer """no""" +132 55 optimizer """adam""" +132 55 training_loop """lcwa""" +132 55 evaluator """rankbased""" +132 56 dataset """kinships""" +132 56 model """convkb""" +132 56 loss """bceaftersigmoid""" +132 56 regularizer """no""" +132 56 optimizer """adam""" +132 56 training_loop """lcwa""" +132 56 evaluator """rankbased""" +132 57 dataset """kinships""" +132 57 model """convkb""" +132 57 loss """bceaftersigmoid""" +132 57 regularizer """no""" +132 57 optimizer """adam""" +132 57 training_loop """lcwa""" +132 57 evaluator """rankbased""" +132 58 dataset """kinships""" +132 58 model """convkb""" +132 58 loss """bceaftersigmoid""" +132 58 regularizer """no""" +132 58 optimizer """adam""" +132 58 training_loop """lcwa""" +132 58 evaluator """rankbased""" +132 59 dataset """kinships""" +132 59 model """convkb""" +132 59 loss """bceaftersigmoid""" +132 59 regularizer """no""" +132 59 optimizer """adam""" +132 59 training_loop """lcwa""" +132 59 evaluator """rankbased""" +132 60 dataset """kinships""" +132 60 model """convkb""" +132 60 loss """bceaftersigmoid""" +132 60 regularizer """no""" +132 60 optimizer """adam""" +132 60 training_loop """lcwa""" +132 60 evaluator """rankbased""" +132 61 dataset """kinships""" +132 61 model """convkb""" +132 61 loss """bceaftersigmoid""" +132 61 regularizer """no""" +132 61 optimizer """adam""" +132 61 training_loop """lcwa""" +132 61 evaluator """rankbased""" +132 62 dataset """kinships""" +132 62 model """convkb""" +132 62 loss """bceaftersigmoid""" +132 62 regularizer """no""" +132 62 optimizer """adam""" +132 62 training_loop """lcwa""" +132 62 evaluator """rankbased""" +132 63 dataset """kinships""" +132 63 model """convkb""" +132 63 loss """bceaftersigmoid""" +132 63 regularizer """no""" +132 63 optimizer """adam""" +132 63 training_loop """lcwa""" +132 63 evaluator """rankbased""" +132 64 dataset """kinships""" +132 64 model """convkb""" +132 64 loss """bceaftersigmoid""" +132 64 regularizer """no""" +132 64 optimizer """adam""" +132 64 training_loop """lcwa""" +132 64 evaluator """rankbased""" +132 65 dataset """kinships""" +132 65 model """convkb""" +132 65 loss """bceaftersigmoid""" +132 65 regularizer """no""" +132 65 optimizer """adam""" +132 65 training_loop """lcwa""" +132 65 evaluator """rankbased""" +132 66 dataset """kinships""" +132 66 model """convkb""" +132 66 loss """bceaftersigmoid""" +132 66 regularizer """no""" +132 66 optimizer """adam""" +132 66 training_loop """lcwa""" +132 66 evaluator """rankbased""" +132 67 dataset """kinships""" +132 67 model """convkb""" +132 67 loss """bceaftersigmoid""" +132 67 regularizer """no""" +132 67 optimizer """adam""" +132 67 training_loop """lcwa""" +132 67 evaluator """rankbased""" +132 68 dataset """kinships""" +132 68 model """convkb""" +132 68 loss """bceaftersigmoid""" +132 68 regularizer """no""" +132 68 optimizer """adam""" +132 68 training_loop """lcwa""" +132 68 evaluator """rankbased""" +132 69 dataset """kinships""" +132 69 model """convkb""" +132 69 loss """bceaftersigmoid""" +132 69 regularizer """no""" +132 69 optimizer """adam""" +132 69 training_loop """lcwa""" +132 69 evaluator """rankbased""" +132 70 dataset """kinships""" +132 70 model """convkb""" +132 70 loss """bceaftersigmoid""" +132 70 regularizer """no""" +132 70 optimizer """adam""" +132 70 training_loop """lcwa""" +132 70 evaluator """rankbased""" +132 71 dataset """kinships""" +132 71 model """convkb""" +132 71 loss """bceaftersigmoid""" +132 71 regularizer """no""" +132 71 optimizer """adam""" +132 71 training_loop """lcwa""" +132 71 evaluator """rankbased""" +132 72 dataset """kinships""" +132 72 model """convkb""" +132 72 loss """bceaftersigmoid""" +132 72 regularizer """no""" +132 72 optimizer """adam""" +132 72 training_loop """lcwa""" +132 72 evaluator """rankbased""" +132 73 dataset """kinships""" +132 73 model """convkb""" +132 73 loss """bceaftersigmoid""" +132 73 regularizer """no""" +132 73 optimizer """adam""" +132 73 training_loop """lcwa""" +132 73 evaluator """rankbased""" +132 74 dataset """kinships""" +132 74 model """convkb""" +132 74 loss """bceaftersigmoid""" +132 74 regularizer """no""" +132 74 optimizer """adam""" +132 74 training_loop """lcwa""" +132 74 evaluator """rankbased""" +132 75 dataset """kinships""" +132 75 model """convkb""" +132 75 loss """bceaftersigmoid""" +132 75 regularizer """no""" +132 75 optimizer """adam""" +132 75 training_loop """lcwa""" +132 75 evaluator """rankbased""" +132 76 dataset """kinships""" +132 76 model """convkb""" +132 76 loss """bceaftersigmoid""" +132 76 regularizer """no""" +132 76 optimizer """adam""" +132 76 training_loop """lcwa""" +132 76 evaluator """rankbased""" +132 77 dataset """kinships""" +132 77 model """convkb""" +132 77 loss """bceaftersigmoid""" +132 77 regularizer """no""" +132 77 optimizer """adam""" +132 77 training_loop """lcwa""" +132 77 evaluator """rankbased""" +132 78 dataset """kinships""" +132 78 model """convkb""" +132 78 loss """bceaftersigmoid""" +132 78 regularizer """no""" +132 78 optimizer """adam""" +132 78 training_loop """lcwa""" +132 78 evaluator """rankbased""" +132 79 dataset """kinships""" +132 79 model """convkb""" +132 79 loss """bceaftersigmoid""" +132 79 regularizer """no""" +132 79 optimizer """adam""" +132 79 training_loop """lcwa""" +132 79 evaluator """rankbased""" +132 80 dataset """kinships""" +132 80 model """convkb""" +132 80 loss """bceaftersigmoid""" +132 80 regularizer """no""" +132 80 optimizer """adam""" +132 80 training_loop """lcwa""" +132 80 evaluator """rankbased""" +132 81 dataset """kinships""" +132 81 model """convkb""" +132 81 loss """bceaftersigmoid""" +132 81 regularizer """no""" +132 81 optimizer """adam""" +132 81 training_loop """lcwa""" +132 81 evaluator """rankbased""" +132 82 dataset """kinships""" +132 82 model """convkb""" +132 82 loss """bceaftersigmoid""" +132 82 regularizer """no""" +132 82 optimizer """adam""" +132 82 training_loop """lcwa""" +132 82 evaluator """rankbased""" +132 83 dataset """kinships""" +132 83 model """convkb""" +132 83 loss """bceaftersigmoid""" +132 83 regularizer """no""" +132 83 optimizer """adam""" +132 83 training_loop """lcwa""" +132 83 evaluator """rankbased""" +132 84 dataset """kinships""" +132 84 model """convkb""" +132 84 loss """bceaftersigmoid""" +132 84 regularizer """no""" +132 84 optimizer """adam""" +132 84 training_loop """lcwa""" +132 84 evaluator """rankbased""" +132 85 dataset """kinships""" +132 85 model """convkb""" +132 85 loss """bceaftersigmoid""" +132 85 regularizer """no""" +132 85 optimizer """adam""" +132 85 training_loop """lcwa""" +132 85 evaluator """rankbased""" +132 86 dataset """kinships""" +132 86 model """convkb""" +132 86 loss """bceaftersigmoid""" +132 86 regularizer """no""" +132 86 optimizer """adam""" +132 86 training_loop """lcwa""" +132 86 evaluator """rankbased""" +132 87 dataset """kinships""" +132 87 model """convkb""" +132 87 loss """bceaftersigmoid""" +132 87 regularizer """no""" +132 87 optimizer """adam""" +132 87 training_loop """lcwa""" +132 87 evaluator """rankbased""" +132 88 dataset """kinships""" +132 88 model """convkb""" +132 88 loss """bceaftersigmoid""" +132 88 regularizer """no""" +132 88 optimizer """adam""" +132 88 training_loop """lcwa""" +132 88 evaluator """rankbased""" +132 89 dataset """kinships""" +132 89 model """convkb""" +132 89 loss """bceaftersigmoid""" +132 89 regularizer """no""" +132 89 optimizer """adam""" +132 89 training_loop """lcwa""" +132 89 evaluator """rankbased""" +132 90 dataset """kinships""" +132 90 model """convkb""" +132 90 loss """bceaftersigmoid""" +132 90 regularizer """no""" +132 90 optimizer """adam""" +132 90 training_loop """lcwa""" +132 90 evaluator """rankbased""" +132 91 dataset """kinships""" +132 91 model """convkb""" +132 91 loss """bceaftersigmoid""" +132 91 regularizer """no""" +132 91 optimizer """adam""" +132 91 training_loop """lcwa""" +132 91 evaluator """rankbased""" +132 92 dataset """kinships""" +132 92 model """convkb""" +132 92 loss """bceaftersigmoid""" +132 92 regularizer """no""" +132 92 optimizer """adam""" +132 92 training_loop """lcwa""" +132 92 evaluator """rankbased""" +132 93 dataset """kinships""" +132 93 model """convkb""" +132 93 loss """bceaftersigmoid""" +132 93 regularizer """no""" +132 93 optimizer """adam""" +132 93 training_loop """lcwa""" +132 93 evaluator """rankbased""" +132 94 dataset """kinships""" +132 94 model """convkb""" +132 94 loss """bceaftersigmoid""" +132 94 regularizer """no""" +132 94 optimizer """adam""" +132 94 training_loop """lcwa""" +132 94 evaluator """rankbased""" +132 95 dataset """kinships""" +132 95 model """convkb""" +132 95 loss """bceaftersigmoid""" +132 95 regularizer """no""" +132 95 optimizer """adam""" +132 95 training_loop """lcwa""" +132 95 evaluator """rankbased""" +132 96 dataset """kinships""" +132 96 model """convkb""" +132 96 loss """bceaftersigmoid""" +132 96 regularizer """no""" +132 96 optimizer """adam""" +132 96 training_loop """lcwa""" +132 96 evaluator """rankbased""" +132 97 dataset """kinships""" +132 97 model """convkb""" +132 97 loss """bceaftersigmoid""" +132 97 regularizer """no""" +132 97 optimizer """adam""" +132 97 training_loop """lcwa""" +132 97 evaluator """rankbased""" +133 1 model.embedding_dim 2.0 +133 1 model.hidden_dropout_rate 0.25840731863000543 +133 1 model.num_filters 5.0 +133 1 optimizer.lr 0.0020493062498812458 +133 1 training.batch_size 1.0 +133 1 training.label_smoothing 0.32337798367155235 +133 2 model.embedding_dim 0.0 +133 2 model.hidden_dropout_rate 0.36370745529693616 +133 2 model.num_filters 1.0 +133 2 optimizer.lr 0.0018361395740945222 +133 2 training.batch_size 1.0 +133 2 training.label_smoothing 0.010083946496252051 +133 3 model.embedding_dim 1.0 +133 3 model.hidden_dropout_rate 0.4045192856003601 +133 3 model.num_filters 0.0 +133 3 optimizer.lr 0.0019183521526559921 +133 3 training.batch_size 0.0 +133 3 training.label_smoothing 0.6119622561529003 +133 4 model.embedding_dim 1.0 +133 4 model.hidden_dropout_rate 0.13551569951913495 +133 4 model.num_filters 0.0 +133 4 optimizer.lr 0.0026957723898418827 +133 4 training.batch_size 2.0 +133 4 training.label_smoothing 0.08285922989964777 +133 5 model.embedding_dim 1.0 +133 5 model.hidden_dropout_rate 0.24431153260431585 +133 5 model.num_filters 9.0 +133 5 optimizer.lr 0.006471243876091931 +133 5 training.batch_size 1.0 +133 5 training.label_smoothing 0.13639868056968343 +133 6 model.embedding_dim 0.0 +133 6 model.hidden_dropout_rate 0.3893158591640874 +133 6 model.num_filters 3.0 +133 6 optimizer.lr 0.018402937358548095 +133 6 training.batch_size 1.0 +133 6 training.label_smoothing 0.6325499494555586 +133 7 model.embedding_dim 2.0 +133 7 model.hidden_dropout_rate 0.1984519851477871 +133 7 model.num_filters 2.0 +133 7 optimizer.lr 0.024319933522766197 +133 7 training.batch_size 0.0 +133 7 training.label_smoothing 0.00999924264586548 +133 8 model.embedding_dim 1.0 +133 8 model.hidden_dropout_rate 0.2489768058942918 +133 8 model.num_filters 1.0 +133 8 optimizer.lr 0.0037742115590160244 +133 8 training.batch_size 1.0 +133 8 training.label_smoothing 0.023948668455604276 +133 9 model.embedding_dim 2.0 +133 9 model.hidden_dropout_rate 0.15692707947638748 +133 9 model.num_filters 1.0 +133 9 optimizer.lr 0.014357901079468499 +133 9 training.batch_size 0.0 +133 9 training.label_smoothing 0.5796901823242399 +133 10 model.embedding_dim 0.0 +133 10 model.hidden_dropout_rate 0.40558969378964294 +133 10 model.num_filters 4.0 +133 10 optimizer.lr 0.00118145135330833 +133 10 training.batch_size 1.0 +133 10 training.label_smoothing 0.07654050089575462 +133 11 model.embedding_dim 0.0 +133 11 model.hidden_dropout_rate 0.34765925698903155 +133 11 model.num_filters 4.0 +133 11 optimizer.lr 0.0032592661639774885 +133 11 training.batch_size 1.0 +133 11 training.label_smoothing 0.1692539983184139 +133 12 model.embedding_dim 2.0 +133 12 model.hidden_dropout_rate 0.3447586914445486 +133 12 model.num_filters 1.0 +133 12 optimizer.lr 0.007253567691237082 +133 12 training.batch_size 2.0 +133 12 training.label_smoothing 0.7957592794916352 +133 13 model.embedding_dim 0.0 +133 13 model.hidden_dropout_rate 0.4086381116959582 +133 13 model.num_filters 0.0 +133 13 optimizer.lr 0.06240033891145503 +133 13 training.batch_size 0.0 +133 13 training.label_smoothing 0.12367006408442359 +133 14 model.embedding_dim 1.0 +133 14 model.hidden_dropout_rate 0.4532395156813034 +133 14 model.num_filters 0.0 +133 14 optimizer.lr 0.03410056821234621 +133 14 training.batch_size 2.0 +133 14 training.label_smoothing 0.0033936111270287863 +133 15 model.embedding_dim 2.0 +133 15 model.hidden_dropout_rate 0.2424151923729229 +133 15 model.num_filters 0.0 +133 15 optimizer.lr 0.02241712604808112 +133 15 training.batch_size 0.0 +133 15 training.label_smoothing 0.6062162288327386 +133 16 model.embedding_dim 2.0 +133 16 model.hidden_dropout_rate 0.16179012174549295 +133 16 model.num_filters 9.0 +133 16 optimizer.lr 0.007856978389098119 +133 16 training.batch_size 2.0 +133 16 training.label_smoothing 0.004353785930312426 +133 17 model.embedding_dim 2.0 +133 17 model.hidden_dropout_rate 0.13342994226401889 +133 17 model.num_filters 3.0 +133 17 optimizer.lr 0.009883683733008616 +133 17 training.batch_size 2.0 +133 17 training.label_smoothing 0.00221057083430832 +133 18 model.embedding_dim 2.0 +133 18 model.hidden_dropout_rate 0.18647217254388987 +133 18 model.num_filters 5.0 +133 18 optimizer.lr 0.016132185850874527 +133 18 training.batch_size 2.0 +133 18 training.label_smoothing 0.41598158099030536 +133 19 model.embedding_dim 1.0 +133 19 model.hidden_dropout_rate 0.30022335213713436 +133 19 model.num_filters 5.0 +133 19 optimizer.lr 0.011995349771002313 +133 19 training.batch_size 1.0 +133 19 training.label_smoothing 0.04923638827163482 +133 20 model.embedding_dim 1.0 +133 20 model.hidden_dropout_rate 0.14986376537333218 +133 20 model.num_filters 5.0 +133 20 optimizer.lr 0.009138269025447428 +133 20 training.batch_size 2.0 +133 20 training.label_smoothing 0.0010807811439289615 +133 21 model.embedding_dim 1.0 +133 21 model.hidden_dropout_rate 0.2347181728196267 +133 21 model.num_filters 8.0 +133 21 optimizer.lr 0.003749688586222975 +133 21 training.batch_size 2.0 +133 21 training.label_smoothing 0.11926293143043963 +133 22 model.embedding_dim 1.0 +133 22 model.hidden_dropout_rate 0.22945395144892458 +133 22 model.num_filters 5.0 +133 22 optimizer.lr 0.00494593266417337 +133 22 training.batch_size 1.0 +133 22 training.label_smoothing 0.025599719476820503 +133 23 model.embedding_dim 2.0 +133 23 model.hidden_dropout_rate 0.3587292016687703 +133 23 model.num_filters 1.0 +133 23 optimizer.lr 0.04225437323817314 +133 23 training.batch_size 0.0 +133 23 training.label_smoothing 0.01730335006364768 +133 24 model.embedding_dim 1.0 +133 24 model.hidden_dropout_rate 0.4539664322982284 +133 24 model.num_filters 1.0 +133 24 optimizer.lr 0.0053278661286255856 +133 24 training.batch_size 2.0 +133 24 training.label_smoothing 0.013549087287124228 +133 25 model.embedding_dim 2.0 +133 25 model.hidden_dropout_rate 0.19354671900707615 +133 25 model.num_filters 9.0 +133 25 optimizer.lr 0.056892616324408056 +133 25 training.batch_size 2.0 +133 25 training.label_smoothing 0.01955265195645993 +133 26 model.embedding_dim 0.0 +133 26 model.hidden_dropout_rate 0.10224786736552588 +133 26 model.num_filters 9.0 +133 26 optimizer.lr 0.001393148888017781 +133 26 training.batch_size 2.0 +133 26 training.label_smoothing 0.2731763293544887 +133 27 model.embedding_dim 0.0 +133 27 model.hidden_dropout_rate 0.30004258376808035 +133 27 model.num_filters 0.0 +133 27 optimizer.lr 0.03761578781572347 +133 27 training.batch_size 1.0 +133 27 training.label_smoothing 0.08875371462475957 +133 28 model.embedding_dim 1.0 +133 28 model.hidden_dropout_rate 0.2413073996061985 +133 28 model.num_filters 0.0 +133 28 optimizer.lr 0.020304645271664583 +133 28 training.batch_size 2.0 +133 28 training.label_smoothing 0.0545497121669244 +133 29 model.embedding_dim 0.0 +133 29 model.hidden_dropout_rate 0.4531601063409423 +133 29 model.num_filters 8.0 +133 29 optimizer.lr 0.002026653359861274 +133 29 training.batch_size 1.0 +133 29 training.label_smoothing 0.8285070775928098 +133 30 model.embedding_dim 1.0 +133 30 model.hidden_dropout_rate 0.1292097326427141 +133 30 model.num_filters 7.0 +133 30 optimizer.lr 0.03976661872312168 +133 30 training.batch_size 1.0 +133 30 training.label_smoothing 0.013776495450927073 +133 31 model.embedding_dim 0.0 +133 31 model.hidden_dropout_rate 0.36202489264769977 +133 31 model.num_filters 2.0 +133 31 optimizer.lr 0.003937437525377361 +133 31 training.batch_size 2.0 +133 31 training.label_smoothing 0.021532509028509613 +133 32 model.embedding_dim 2.0 +133 32 model.hidden_dropout_rate 0.30942349477518094 +133 32 model.num_filters 8.0 +133 32 optimizer.lr 0.08787515433461206 +133 32 training.batch_size 1.0 +133 32 training.label_smoothing 0.07044134688835077 +133 33 model.embedding_dim 0.0 +133 33 model.hidden_dropout_rate 0.132527835724903 +133 33 model.num_filters 5.0 +133 33 optimizer.lr 0.0603486351939826 +133 33 training.batch_size 0.0 +133 33 training.label_smoothing 0.2634525051223771 +133 34 model.embedding_dim 0.0 +133 34 model.hidden_dropout_rate 0.20389456286228788 +133 34 model.num_filters 3.0 +133 34 optimizer.lr 0.03564488999705891 +133 34 training.batch_size 2.0 +133 34 training.label_smoothing 0.003243877234881083 +133 35 model.embedding_dim 0.0 +133 35 model.hidden_dropout_rate 0.1462486876589391 +133 35 model.num_filters 8.0 +133 35 optimizer.lr 0.009544106958686146 +133 35 training.batch_size 1.0 +133 35 training.label_smoothing 0.8115375224855169 +133 36 model.embedding_dim 1.0 +133 36 model.hidden_dropout_rate 0.11278221513103222 +133 36 model.num_filters 5.0 +133 36 optimizer.lr 0.01184764990796341 +133 36 training.batch_size 0.0 +133 36 training.label_smoothing 0.0012993387651229857 +133 37 model.embedding_dim 0.0 +133 37 model.hidden_dropout_rate 0.196203269568161 +133 37 model.num_filters 6.0 +133 37 optimizer.lr 0.08075787679769839 +133 37 training.batch_size 2.0 +133 37 training.label_smoothing 0.40066909742493856 +133 38 model.embedding_dim 0.0 +133 38 model.hidden_dropout_rate 0.420186981831125 +133 38 model.num_filters 6.0 +133 38 optimizer.lr 0.0037142411573111876 +133 38 training.batch_size 0.0 +133 38 training.label_smoothing 0.08681133450396356 +133 39 model.embedding_dim 1.0 +133 39 model.hidden_dropout_rate 0.32143229002454876 +133 39 model.num_filters 8.0 +133 39 optimizer.lr 0.02471425539941041 +133 39 training.batch_size 1.0 +133 39 training.label_smoothing 0.00768046992983588 +133 40 model.embedding_dim 2.0 +133 40 model.hidden_dropout_rate 0.3423916554424804 +133 40 model.num_filters 1.0 +133 40 optimizer.lr 0.04273004677926275 +133 40 training.batch_size 2.0 +133 40 training.label_smoothing 0.5408971474018441 +133 41 model.embedding_dim 2.0 +133 41 model.hidden_dropout_rate 0.40035798473614614 +133 41 model.num_filters 4.0 +133 41 optimizer.lr 0.052714240416464 +133 41 training.batch_size 2.0 +133 41 training.label_smoothing 0.47257442602115013 +133 42 model.embedding_dim 1.0 +133 42 model.hidden_dropout_rate 0.10182633767706735 +133 42 model.num_filters 2.0 +133 42 optimizer.lr 0.0016191211697412595 +133 42 training.batch_size 0.0 +133 42 training.label_smoothing 0.0011392033669168347 +133 43 model.embedding_dim 2.0 +133 43 model.hidden_dropout_rate 0.4729639361882161 +133 43 model.num_filters 1.0 +133 43 optimizer.lr 0.0036523809475221282 +133 43 training.batch_size 1.0 +133 43 training.label_smoothing 0.005695426107199815 +133 44 model.embedding_dim 1.0 +133 44 model.hidden_dropout_rate 0.352369902777362 +133 44 model.num_filters 5.0 +133 44 optimizer.lr 0.0035474750459258127 +133 44 training.batch_size 0.0 +133 44 training.label_smoothing 0.31480242198762287 +133 45 model.embedding_dim 0.0 +133 45 model.hidden_dropout_rate 0.4219518467112986 +133 45 model.num_filters 2.0 +133 45 optimizer.lr 0.0022233256026510592 +133 45 training.batch_size 1.0 +133 45 training.label_smoothing 0.005637181021319254 +133 46 model.embedding_dim 0.0 +133 46 model.hidden_dropout_rate 0.3759719264669895 +133 46 model.num_filters 4.0 +133 46 optimizer.lr 0.008173789972976353 +133 46 training.batch_size 0.0 +133 46 training.label_smoothing 0.0034062922172572833 +133 47 model.embedding_dim 1.0 +133 47 model.hidden_dropout_rate 0.1154196077616808 +133 47 model.num_filters 6.0 +133 47 optimizer.lr 0.0014697422500269586 +133 47 training.batch_size 0.0 +133 47 training.label_smoothing 0.11369615589332643 +133 48 model.embedding_dim 1.0 +133 48 model.hidden_dropout_rate 0.22704284652109452 +133 48 model.num_filters 5.0 +133 48 optimizer.lr 0.01558143224184729 +133 48 training.batch_size 0.0 +133 48 training.label_smoothing 0.16330169006560938 +133 49 model.embedding_dim 0.0 +133 49 model.hidden_dropout_rate 0.23259424793338312 +133 49 model.num_filters 7.0 +133 49 optimizer.lr 0.002467782977528416 +133 49 training.batch_size 2.0 +133 49 training.label_smoothing 0.00655704409981149 +133 50 model.embedding_dim 1.0 +133 50 model.hidden_dropout_rate 0.19484137381406988 +133 50 model.num_filters 8.0 +133 50 optimizer.lr 0.0015180343490000924 +133 50 training.batch_size 2.0 +133 50 training.label_smoothing 0.5286118500948221 +133 51 model.embedding_dim 1.0 +133 51 model.hidden_dropout_rate 0.37498785964608683 +133 51 model.num_filters 9.0 +133 51 optimizer.lr 0.03072056483238895 +133 51 training.batch_size 2.0 +133 51 training.label_smoothing 0.04985517616381577 +133 52 model.embedding_dim 0.0 +133 52 model.hidden_dropout_rate 0.38477152406065684 +133 52 model.num_filters 8.0 +133 52 optimizer.lr 0.0029917786402654857 +133 52 training.batch_size 2.0 +133 52 training.label_smoothing 0.010827610120731848 +133 53 model.embedding_dim 0.0 +133 53 model.hidden_dropout_rate 0.21537113122886278 +133 53 model.num_filters 7.0 +133 53 optimizer.lr 0.0014833011007461207 +133 53 training.batch_size 1.0 +133 53 training.label_smoothing 0.002461716895607691 +133 54 model.embedding_dim 0.0 +133 54 model.hidden_dropout_rate 0.4566523035613752 +133 54 model.num_filters 3.0 +133 54 optimizer.lr 0.021915452584402115 +133 54 training.batch_size 1.0 +133 54 training.label_smoothing 0.515031331682553 +133 55 model.embedding_dim 1.0 +133 55 model.hidden_dropout_rate 0.3726169569816552 +133 55 model.num_filters 5.0 +133 55 optimizer.lr 0.003809734343961382 +133 55 training.batch_size 0.0 +133 55 training.label_smoothing 0.017015361115863072 +133 56 model.embedding_dim 0.0 +133 56 model.hidden_dropout_rate 0.3274401158780019 +133 56 model.num_filters 7.0 +133 56 optimizer.lr 0.0022364492559189845 +133 56 training.batch_size 2.0 +133 56 training.label_smoothing 0.027846732851735025 +133 57 model.embedding_dim 2.0 +133 57 model.hidden_dropout_rate 0.4711864175545083 +133 57 model.num_filters 5.0 +133 57 optimizer.lr 0.007158709554851202 +133 57 training.batch_size 2.0 +133 57 training.label_smoothing 0.03769950364932089 +133 58 model.embedding_dim 2.0 +133 58 model.hidden_dropout_rate 0.22120220796874426 +133 58 model.num_filters 2.0 +133 58 optimizer.lr 0.003015019987950593 +133 58 training.batch_size 2.0 +133 58 training.label_smoothing 0.010921952972999667 +133 59 model.embedding_dim 0.0 +133 59 model.hidden_dropout_rate 0.47586800448812094 +133 59 model.num_filters 4.0 +133 59 optimizer.lr 0.009066284528257295 +133 59 training.batch_size 0.0 +133 59 training.label_smoothing 0.008206950861613157 +133 60 model.embedding_dim 2.0 +133 60 model.hidden_dropout_rate 0.2632369578870426 +133 60 model.num_filters 0.0 +133 60 optimizer.lr 0.05488790327775265 +133 60 training.batch_size 0.0 +133 60 training.label_smoothing 0.0015453775296942296 +133 61 model.embedding_dim 0.0 +133 61 model.hidden_dropout_rate 0.26695066751639074 +133 61 model.num_filters 5.0 +133 61 optimizer.lr 0.0014300520750547914 +133 61 training.batch_size 1.0 +133 61 training.label_smoothing 0.0035313639338610463 +133 62 model.embedding_dim 1.0 +133 62 model.hidden_dropout_rate 0.21170761747304534 +133 62 model.num_filters 2.0 +133 62 optimizer.lr 0.05005416932771066 +133 62 training.batch_size 2.0 +133 62 training.label_smoothing 0.0014607932439298149 +133 63 model.embedding_dim 2.0 +133 63 model.hidden_dropout_rate 0.44261978896605747 +133 63 model.num_filters 8.0 +133 63 optimizer.lr 0.0010165354137350933 +133 63 training.batch_size 0.0 +133 63 training.label_smoothing 0.023187361462737553 +133 64 model.embedding_dim 2.0 +133 64 model.hidden_dropout_rate 0.12188471598244402 +133 64 model.num_filters 2.0 +133 64 optimizer.lr 0.0031395239412529306 +133 64 training.batch_size 2.0 +133 64 training.label_smoothing 0.09683479931748525 +133 65 model.embedding_dim 1.0 +133 65 model.hidden_dropout_rate 0.2434786802880607 +133 65 model.num_filters 8.0 +133 65 optimizer.lr 0.01054576282646672 +133 65 training.batch_size 1.0 +133 65 training.label_smoothing 0.042557263238677645 +133 66 model.embedding_dim 0.0 +133 66 model.hidden_dropout_rate 0.15249790615908984 +133 66 model.num_filters 0.0 +133 66 optimizer.lr 0.007984118406305168 +133 66 training.batch_size 0.0 +133 66 training.label_smoothing 0.00144998863445634 +133 67 model.embedding_dim 1.0 +133 67 model.hidden_dropout_rate 0.17152836487771544 +133 67 model.num_filters 9.0 +133 67 optimizer.lr 0.005110202382602253 +133 67 training.batch_size 0.0 +133 67 training.label_smoothing 0.0017102946769029749 +133 68 model.embedding_dim 2.0 +133 68 model.hidden_dropout_rate 0.36709074300488564 +133 68 model.num_filters 8.0 +133 68 optimizer.lr 0.041290565535958626 +133 68 training.batch_size 1.0 +133 68 training.label_smoothing 0.27870467201457816 +133 69 model.embedding_dim 0.0 +133 69 model.hidden_dropout_rate 0.33294522290400264 +133 69 model.num_filters 3.0 +133 69 optimizer.lr 0.0056818636684470136 +133 69 training.batch_size 2.0 +133 69 training.label_smoothing 0.017831897295184775 +133 70 model.embedding_dim 1.0 +133 70 model.hidden_dropout_rate 0.44781464519845005 +133 70 model.num_filters 9.0 +133 70 optimizer.lr 0.004549022473104307 +133 70 training.batch_size 2.0 +133 70 training.label_smoothing 0.7777308796862221 +133 71 model.embedding_dim 0.0 +133 71 model.hidden_dropout_rate 0.26101127660063617 +133 71 model.num_filters 2.0 +133 71 optimizer.lr 0.022394422939607018 +133 71 training.batch_size 1.0 +133 71 training.label_smoothing 0.0011818982646600983 +133 72 model.embedding_dim 1.0 +133 72 model.hidden_dropout_rate 0.42933048961247844 +133 72 model.num_filters 3.0 +133 72 optimizer.lr 0.0032807732685879755 +133 72 training.batch_size 0.0 +133 72 training.label_smoothing 0.0031922064886998652 +133 73 model.embedding_dim 2.0 +133 73 model.hidden_dropout_rate 0.3623607861254321 +133 73 model.num_filters 8.0 +133 73 optimizer.lr 0.0020641156015300272 +133 73 training.batch_size 1.0 +133 73 training.label_smoothing 0.001094650726434611 +133 74 model.embedding_dim 2.0 +133 74 model.hidden_dropout_rate 0.11517233366923207 +133 74 model.num_filters 1.0 +133 74 optimizer.lr 0.05768539952780633 +133 74 training.batch_size 0.0 +133 74 training.label_smoothing 0.0014756197280061032 +133 75 model.embedding_dim 2.0 +133 75 model.hidden_dropout_rate 0.3412054470172132 +133 75 model.num_filters 6.0 +133 75 optimizer.lr 0.09555212510381599 +133 75 training.batch_size 0.0 +133 75 training.label_smoothing 0.01010157220817743 +133 76 model.embedding_dim 2.0 +133 76 model.hidden_dropout_rate 0.45169298855250195 +133 76 model.num_filters 9.0 +133 76 optimizer.lr 0.005122260789725503 +133 76 training.batch_size 0.0 +133 76 training.label_smoothing 0.13751697376554597 +133 77 model.embedding_dim 1.0 +133 77 model.hidden_dropout_rate 0.2992658787172636 +133 77 model.num_filters 2.0 +133 77 optimizer.lr 0.0018014908139529367 +133 77 training.batch_size 0.0 +133 77 training.label_smoothing 0.0033893821865824208 +133 78 model.embedding_dim 2.0 +133 78 model.hidden_dropout_rate 0.3844157445626154 +133 78 model.num_filters 1.0 +133 78 optimizer.lr 0.021498009018624035 +133 78 training.batch_size 0.0 +133 78 training.label_smoothing 0.8913915347841825 +133 79 model.embedding_dim 2.0 +133 79 model.hidden_dropout_rate 0.33803196882991277 +133 79 model.num_filters 8.0 +133 79 optimizer.lr 0.006854804981388122 +133 79 training.batch_size 1.0 +133 79 training.label_smoothing 0.659496999260134 +133 80 model.embedding_dim 2.0 +133 80 model.hidden_dropout_rate 0.40642070393078167 +133 80 model.num_filters 9.0 +133 80 optimizer.lr 0.09505673176932218 +133 80 training.batch_size 2.0 +133 80 training.label_smoothing 0.007905789158710609 +133 81 model.embedding_dim 1.0 +133 81 model.hidden_dropout_rate 0.4021852027120294 +133 81 model.num_filters 6.0 +133 81 optimizer.lr 0.0016837823337922414 +133 81 training.batch_size 1.0 +133 81 training.label_smoothing 0.0013964223257050873 +133 82 model.embedding_dim 0.0 +133 82 model.hidden_dropout_rate 0.21668354262314532 +133 82 model.num_filters 8.0 +133 82 optimizer.lr 0.06923915277325683 +133 82 training.batch_size 0.0 +133 82 training.label_smoothing 0.4377031036835964 +133 83 model.embedding_dim 0.0 +133 83 model.hidden_dropout_rate 0.225552201075202 +133 83 model.num_filters 6.0 +133 83 optimizer.lr 0.0044810345831577565 +133 83 training.batch_size 0.0 +133 83 training.label_smoothing 0.01885007136245436 +133 84 model.embedding_dim 0.0 +133 84 model.hidden_dropout_rate 0.3438183813464487 +133 84 model.num_filters 6.0 +133 84 optimizer.lr 0.003078308277272269 +133 84 training.batch_size 0.0 +133 84 training.label_smoothing 0.8661534015993613 +133 85 model.embedding_dim 0.0 +133 85 model.hidden_dropout_rate 0.16124011246115613 +133 85 model.num_filters 1.0 +133 85 optimizer.lr 0.025789724077862983 +133 85 training.batch_size 2.0 +133 85 training.label_smoothing 0.00817549776643872 +133 86 model.embedding_dim 2.0 +133 86 model.hidden_dropout_rate 0.17567993599542994 +133 86 model.num_filters 8.0 +133 86 optimizer.lr 0.02095614640924271 +133 86 training.batch_size 0.0 +133 86 training.label_smoothing 0.09254671880731782 +133 87 model.embedding_dim 1.0 +133 87 model.hidden_dropout_rate 0.22391401275575806 +133 87 model.num_filters 2.0 +133 87 optimizer.lr 0.03333684136755151 +133 87 training.batch_size 0.0 +133 87 training.label_smoothing 0.19444825103998217 +133 88 model.embedding_dim 0.0 +133 88 model.hidden_dropout_rate 0.24182422813824656 +133 88 model.num_filters 5.0 +133 88 optimizer.lr 0.0406759043453602 +133 88 training.batch_size 0.0 +133 88 training.label_smoothing 0.009410103073264639 +133 89 model.embedding_dim 2.0 +133 89 model.hidden_dropout_rate 0.2743861435623871 +133 89 model.num_filters 7.0 +133 89 optimizer.lr 0.016834499984072215 +133 89 training.batch_size 2.0 +133 89 training.label_smoothing 0.0017700058888531578 +133 90 model.embedding_dim 1.0 +133 90 model.hidden_dropout_rate 0.2704801410232574 +133 90 model.num_filters 6.0 +133 90 optimizer.lr 0.0022661342309862756 +133 90 training.batch_size 2.0 +133 90 training.label_smoothing 0.12279376414317976 +133 91 model.embedding_dim 1.0 +133 91 model.hidden_dropout_rate 0.3551696580080015 +133 91 model.num_filters 0.0 +133 91 optimizer.lr 0.0015405195612145175 +133 91 training.batch_size 0.0 +133 91 training.label_smoothing 0.0015862731391585686 +133 92 model.embedding_dim 0.0 +133 92 model.hidden_dropout_rate 0.25898412323221576 +133 92 model.num_filters 7.0 +133 92 optimizer.lr 0.0014441703970016573 +133 92 training.batch_size 1.0 +133 92 training.label_smoothing 0.004396604753780082 +133 93 model.embedding_dim 1.0 +133 93 model.hidden_dropout_rate 0.265254084452867 +133 93 model.num_filters 3.0 +133 93 optimizer.lr 0.024744755576826382 +133 93 training.batch_size 1.0 +133 93 training.label_smoothing 0.036302987104991864 +133 94 model.embedding_dim 0.0 +133 94 model.hidden_dropout_rate 0.30715238242979553 +133 94 model.num_filters 1.0 +133 94 optimizer.lr 0.04003767288360675 +133 94 training.batch_size 1.0 +133 94 training.label_smoothing 0.014402300501744979 +133 95 model.embedding_dim 2.0 +133 95 model.hidden_dropout_rate 0.3287831211678678 +133 95 model.num_filters 6.0 +133 95 optimizer.lr 0.0128399083869723 +133 95 training.batch_size 1.0 +133 95 training.label_smoothing 0.002699113293695055 +133 1 dataset """kinships""" +133 1 model """convkb""" +133 1 loss """softplus""" +133 1 regularizer """no""" +133 1 optimizer """adam""" +133 1 training_loop """lcwa""" +133 1 evaluator """rankbased""" +133 2 dataset """kinships""" +133 2 model """convkb""" +133 2 loss """softplus""" +133 2 regularizer """no""" +133 2 optimizer """adam""" +133 2 training_loop """lcwa""" +133 2 evaluator """rankbased""" +133 3 dataset """kinships""" +133 3 model """convkb""" +133 3 loss """softplus""" +133 3 regularizer """no""" +133 3 optimizer """adam""" +133 3 training_loop """lcwa""" +133 3 evaluator """rankbased""" +133 4 dataset """kinships""" +133 4 model """convkb""" +133 4 loss """softplus""" +133 4 regularizer """no""" +133 4 optimizer """adam""" +133 4 training_loop """lcwa""" +133 4 evaluator """rankbased""" +133 5 dataset """kinships""" +133 5 model """convkb""" +133 5 loss """softplus""" +133 5 regularizer """no""" +133 5 optimizer """adam""" +133 5 training_loop """lcwa""" +133 5 evaluator """rankbased""" +133 6 dataset """kinships""" +133 6 model """convkb""" +133 6 loss """softplus""" +133 6 regularizer """no""" +133 6 optimizer """adam""" +133 6 training_loop """lcwa""" +133 6 evaluator """rankbased""" +133 7 dataset """kinships""" +133 7 model """convkb""" +133 7 loss """softplus""" +133 7 regularizer """no""" +133 7 optimizer """adam""" +133 7 training_loop """lcwa""" +133 7 evaluator """rankbased""" +133 8 dataset """kinships""" +133 8 model """convkb""" +133 8 loss """softplus""" +133 8 regularizer """no""" +133 8 optimizer """adam""" +133 8 training_loop """lcwa""" +133 8 evaluator """rankbased""" +133 9 dataset """kinships""" +133 9 model """convkb""" +133 9 loss """softplus""" +133 9 regularizer """no""" +133 9 optimizer """adam""" +133 9 training_loop """lcwa""" +133 9 evaluator """rankbased""" +133 10 dataset """kinships""" +133 10 model """convkb""" +133 10 loss """softplus""" +133 10 regularizer """no""" +133 10 optimizer """adam""" +133 10 training_loop """lcwa""" +133 10 evaluator """rankbased""" +133 11 dataset """kinships""" +133 11 model """convkb""" +133 11 loss """softplus""" +133 11 regularizer """no""" +133 11 optimizer """adam""" +133 11 training_loop """lcwa""" +133 11 evaluator """rankbased""" +133 12 dataset """kinships""" +133 12 model """convkb""" +133 12 loss """softplus""" +133 12 regularizer """no""" +133 12 optimizer """adam""" +133 12 training_loop """lcwa""" +133 12 evaluator """rankbased""" +133 13 dataset """kinships""" +133 13 model """convkb""" +133 13 loss """softplus""" +133 13 regularizer """no""" +133 13 optimizer """adam""" +133 13 training_loop """lcwa""" +133 13 evaluator """rankbased""" +133 14 dataset """kinships""" +133 14 model """convkb""" +133 14 loss """softplus""" +133 14 regularizer """no""" +133 14 optimizer """adam""" +133 14 training_loop """lcwa""" +133 14 evaluator """rankbased""" +133 15 dataset """kinships""" +133 15 model """convkb""" +133 15 loss """softplus""" +133 15 regularizer """no""" +133 15 optimizer """adam""" +133 15 training_loop """lcwa""" +133 15 evaluator """rankbased""" +133 16 dataset """kinships""" +133 16 model """convkb""" +133 16 loss """softplus""" +133 16 regularizer """no""" +133 16 optimizer """adam""" +133 16 training_loop """lcwa""" +133 16 evaluator """rankbased""" +133 17 dataset """kinships""" +133 17 model """convkb""" +133 17 loss """softplus""" +133 17 regularizer """no""" +133 17 optimizer """adam""" +133 17 training_loop """lcwa""" +133 17 evaluator """rankbased""" +133 18 dataset """kinships""" +133 18 model """convkb""" +133 18 loss """softplus""" +133 18 regularizer """no""" +133 18 optimizer """adam""" +133 18 training_loop """lcwa""" +133 18 evaluator """rankbased""" +133 19 dataset """kinships""" +133 19 model """convkb""" +133 19 loss """softplus""" +133 19 regularizer """no""" +133 19 optimizer """adam""" +133 19 training_loop """lcwa""" +133 19 evaluator """rankbased""" +133 20 dataset """kinships""" +133 20 model """convkb""" +133 20 loss """softplus""" +133 20 regularizer """no""" +133 20 optimizer """adam""" +133 20 training_loop """lcwa""" +133 20 evaluator """rankbased""" +133 21 dataset """kinships""" +133 21 model """convkb""" +133 21 loss """softplus""" +133 21 regularizer """no""" +133 21 optimizer """adam""" +133 21 training_loop """lcwa""" +133 21 evaluator """rankbased""" +133 22 dataset """kinships""" +133 22 model """convkb""" +133 22 loss """softplus""" +133 22 regularizer """no""" +133 22 optimizer """adam""" +133 22 training_loop """lcwa""" +133 22 evaluator """rankbased""" +133 23 dataset """kinships""" +133 23 model """convkb""" +133 23 loss """softplus""" +133 23 regularizer """no""" +133 23 optimizer """adam""" +133 23 training_loop """lcwa""" +133 23 evaluator """rankbased""" +133 24 dataset """kinships""" +133 24 model """convkb""" +133 24 loss """softplus""" +133 24 regularizer """no""" +133 24 optimizer """adam""" +133 24 training_loop """lcwa""" +133 24 evaluator """rankbased""" +133 25 dataset """kinships""" +133 25 model """convkb""" +133 25 loss """softplus""" +133 25 regularizer """no""" +133 25 optimizer """adam""" +133 25 training_loop """lcwa""" +133 25 evaluator """rankbased""" +133 26 dataset """kinships""" +133 26 model """convkb""" +133 26 loss """softplus""" +133 26 regularizer """no""" +133 26 optimizer """adam""" +133 26 training_loop """lcwa""" +133 26 evaluator """rankbased""" +133 27 dataset """kinships""" +133 27 model """convkb""" +133 27 loss """softplus""" +133 27 regularizer """no""" +133 27 optimizer """adam""" +133 27 training_loop """lcwa""" +133 27 evaluator """rankbased""" +133 28 dataset """kinships""" +133 28 model """convkb""" +133 28 loss """softplus""" +133 28 regularizer """no""" +133 28 optimizer """adam""" +133 28 training_loop """lcwa""" +133 28 evaluator """rankbased""" +133 29 dataset """kinships""" +133 29 model """convkb""" +133 29 loss """softplus""" +133 29 regularizer """no""" +133 29 optimizer """adam""" +133 29 training_loop """lcwa""" +133 29 evaluator """rankbased""" +133 30 dataset """kinships""" +133 30 model """convkb""" +133 30 loss """softplus""" +133 30 regularizer """no""" +133 30 optimizer """adam""" +133 30 training_loop """lcwa""" +133 30 evaluator """rankbased""" +133 31 dataset """kinships""" +133 31 model """convkb""" +133 31 loss """softplus""" +133 31 regularizer """no""" +133 31 optimizer """adam""" +133 31 training_loop """lcwa""" +133 31 evaluator """rankbased""" +133 32 dataset """kinships""" +133 32 model """convkb""" +133 32 loss """softplus""" +133 32 regularizer """no""" +133 32 optimizer """adam""" +133 32 training_loop """lcwa""" +133 32 evaluator """rankbased""" +133 33 dataset """kinships""" +133 33 model """convkb""" +133 33 loss """softplus""" +133 33 regularizer """no""" +133 33 optimizer """adam""" +133 33 training_loop """lcwa""" +133 33 evaluator """rankbased""" +133 34 dataset """kinships""" +133 34 model """convkb""" +133 34 loss """softplus""" +133 34 regularizer """no""" +133 34 optimizer """adam""" +133 34 training_loop """lcwa""" +133 34 evaluator """rankbased""" +133 35 dataset """kinships""" +133 35 model """convkb""" +133 35 loss """softplus""" +133 35 regularizer """no""" +133 35 optimizer """adam""" +133 35 training_loop """lcwa""" +133 35 evaluator """rankbased""" +133 36 dataset """kinships""" +133 36 model """convkb""" +133 36 loss """softplus""" +133 36 regularizer """no""" +133 36 optimizer """adam""" +133 36 training_loop """lcwa""" +133 36 evaluator """rankbased""" +133 37 dataset """kinships""" +133 37 model """convkb""" +133 37 loss """softplus""" +133 37 regularizer """no""" +133 37 optimizer """adam""" +133 37 training_loop """lcwa""" +133 37 evaluator """rankbased""" +133 38 dataset """kinships""" +133 38 model """convkb""" +133 38 loss """softplus""" +133 38 regularizer """no""" +133 38 optimizer """adam""" +133 38 training_loop """lcwa""" +133 38 evaluator """rankbased""" +133 39 dataset """kinships""" +133 39 model """convkb""" +133 39 loss """softplus""" +133 39 regularizer """no""" +133 39 optimizer """adam""" +133 39 training_loop """lcwa""" +133 39 evaluator """rankbased""" +133 40 dataset """kinships""" +133 40 model """convkb""" +133 40 loss """softplus""" +133 40 regularizer """no""" +133 40 optimizer """adam""" +133 40 training_loop """lcwa""" +133 40 evaluator """rankbased""" +133 41 dataset """kinships""" +133 41 model """convkb""" +133 41 loss """softplus""" +133 41 regularizer """no""" +133 41 optimizer """adam""" +133 41 training_loop """lcwa""" +133 41 evaluator """rankbased""" +133 42 dataset """kinships""" +133 42 model """convkb""" +133 42 loss """softplus""" +133 42 regularizer """no""" +133 42 optimizer """adam""" +133 42 training_loop """lcwa""" +133 42 evaluator """rankbased""" +133 43 dataset """kinships""" +133 43 model """convkb""" +133 43 loss """softplus""" +133 43 regularizer """no""" +133 43 optimizer """adam""" +133 43 training_loop """lcwa""" +133 43 evaluator """rankbased""" +133 44 dataset """kinships""" +133 44 model """convkb""" +133 44 loss """softplus""" +133 44 regularizer """no""" +133 44 optimizer """adam""" +133 44 training_loop """lcwa""" +133 44 evaluator """rankbased""" +133 45 dataset """kinships""" +133 45 model """convkb""" +133 45 loss """softplus""" +133 45 regularizer """no""" +133 45 optimizer """adam""" +133 45 training_loop """lcwa""" +133 45 evaluator """rankbased""" +133 46 dataset """kinships""" +133 46 model """convkb""" +133 46 loss """softplus""" +133 46 regularizer """no""" +133 46 optimizer """adam""" +133 46 training_loop """lcwa""" +133 46 evaluator """rankbased""" +133 47 dataset """kinships""" +133 47 model """convkb""" +133 47 loss """softplus""" +133 47 regularizer """no""" +133 47 optimizer """adam""" +133 47 training_loop """lcwa""" +133 47 evaluator """rankbased""" +133 48 dataset """kinships""" +133 48 model """convkb""" +133 48 loss """softplus""" +133 48 regularizer """no""" +133 48 optimizer """adam""" +133 48 training_loop """lcwa""" +133 48 evaluator """rankbased""" +133 49 dataset """kinships""" +133 49 model """convkb""" +133 49 loss """softplus""" +133 49 regularizer """no""" +133 49 optimizer """adam""" +133 49 training_loop """lcwa""" +133 49 evaluator """rankbased""" +133 50 dataset """kinships""" +133 50 model """convkb""" +133 50 loss """softplus""" +133 50 regularizer """no""" +133 50 optimizer """adam""" +133 50 training_loop """lcwa""" +133 50 evaluator """rankbased""" +133 51 dataset """kinships""" +133 51 model """convkb""" +133 51 loss """softplus""" +133 51 regularizer """no""" +133 51 optimizer """adam""" +133 51 training_loop """lcwa""" +133 51 evaluator """rankbased""" +133 52 dataset """kinships""" +133 52 model """convkb""" +133 52 loss """softplus""" +133 52 regularizer """no""" +133 52 optimizer """adam""" +133 52 training_loop """lcwa""" +133 52 evaluator """rankbased""" +133 53 dataset """kinships""" +133 53 model """convkb""" +133 53 loss """softplus""" +133 53 regularizer """no""" +133 53 optimizer """adam""" +133 53 training_loop """lcwa""" +133 53 evaluator """rankbased""" +133 54 dataset """kinships""" +133 54 model """convkb""" +133 54 loss """softplus""" +133 54 regularizer """no""" +133 54 optimizer """adam""" +133 54 training_loop """lcwa""" +133 54 evaluator """rankbased""" +133 55 dataset """kinships""" +133 55 model """convkb""" +133 55 loss """softplus""" +133 55 regularizer """no""" +133 55 optimizer """adam""" +133 55 training_loop """lcwa""" +133 55 evaluator """rankbased""" +133 56 dataset """kinships""" +133 56 model """convkb""" +133 56 loss """softplus""" +133 56 regularizer """no""" +133 56 optimizer """adam""" +133 56 training_loop """lcwa""" +133 56 evaluator """rankbased""" +133 57 dataset """kinships""" +133 57 model """convkb""" +133 57 loss """softplus""" +133 57 regularizer """no""" +133 57 optimizer """adam""" +133 57 training_loop """lcwa""" +133 57 evaluator """rankbased""" +133 58 dataset """kinships""" +133 58 model """convkb""" +133 58 loss """softplus""" +133 58 regularizer """no""" +133 58 optimizer """adam""" +133 58 training_loop """lcwa""" +133 58 evaluator """rankbased""" +133 59 dataset """kinships""" +133 59 model """convkb""" +133 59 loss """softplus""" +133 59 regularizer """no""" +133 59 optimizer """adam""" +133 59 training_loop """lcwa""" +133 59 evaluator """rankbased""" +133 60 dataset """kinships""" +133 60 model """convkb""" +133 60 loss """softplus""" +133 60 regularizer """no""" +133 60 optimizer """adam""" +133 60 training_loop """lcwa""" +133 60 evaluator """rankbased""" +133 61 dataset """kinships""" +133 61 model """convkb""" +133 61 loss """softplus""" +133 61 regularizer """no""" +133 61 optimizer """adam""" +133 61 training_loop """lcwa""" +133 61 evaluator """rankbased""" +133 62 dataset """kinships""" +133 62 model """convkb""" +133 62 loss """softplus""" +133 62 regularizer """no""" +133 62 optimizer """adam""" +133 62 training_loop """lcwa""" +133 62 evaluator """rankbased""" +133 63 dataset """kinships""" +133 63 model """convkb""" +133 63 loss """softplus""" +133 63 regularizer """no""" +133 63 optimizer """adam""" +133 63 training_loop """lcwa""" +133 63 evaluator """rankbased""" +133 64 dataset """kinships""" +133 64 model """convkb""" +133 64 loss """softplus""" +133 64 regularizer """no""" +133 64 optimizer """adam""" +133 64 training_loop """lcwa""" +133 64 evaluator """rankbased""" +133 65 dataset """kinships""" +133 65 model """convkb""" +133 65 loss """softplus""" +133 65 regularizer """no""" +133 65 optimizer """adam""" +133 65 training_loop """lcwa""" +133 65 evaluator """rankbased""" +133 66 dataset """kinships""" +133 66 model """convkb""" +133 66 loss """softplus""" +133 66 regularizer """no""" +133 66 optimizer """adam""" +133 66 training_loop """lcwa""" +133 66 evaluator """rankbased""" +133 67 dataset """kinships""" +133 67 model """convkb""" +133 67 loss """softplus""" +133 67 regularizer """no""" +133 67 optimizer """adam""" +133 67 training_loop """lcwa""" +133 67 evaluator """rankbased""" +133 68 dataset """kinships""" +133 68 model """convkb""" +133 68 loss """softplus""" +133 68 regularizer """no""" +133 68 optimizer """adam""" +133 68 training_loop """lcwa""" +133 68 evaluator """rankbased""" +133 69 dataset """kinships""" +133 69 model """convkb""" +133 69 loss """softplus""" +133 69 regularizer """no""" +133 69 optimizer """adam""" +133 69 training_loop """lcwa""" +133 69 evaluator """rankbased""" +133 70 dataset """kinships""" +133 70 model """convkb""" +133 70 loss """softplus""" +133 70 regularizer """no""" +133 70 optimizer """adam""" +133 70 training_loop """lcwa""" +133 70 evaluator """rankbased""" +133 71 dataset """kinships""" +133 71 model """convkb""" +133 71 loss """softplus""" +133 71 regularizer """no""" +133 71 optimizer """adam""" +133 71 training_loop """lcwa""" +133 71 evaluator """rankbased""" +133 72 dataset """kinships""" +133 72 model """convkb""" +133 72 loss """softplus""" +133 72 regularizer """no""" +133 72 optimizer """adam""" +133 72 training_loop """lcwa""" +133 72 evaluator """rankbased""" +133 73 dataset """kinships""" +133 73 model """convkb""" +133 73 loss """softplus""" +133 73 regularizer """no""" +133 73 optimizer """adam""" +133 73 training_loop """lcwa""" +133 73 evaluator """rankbased""" +133 74 dataset """kinships""" +133 74 model """convkb""" +133 74 loss """softplus""" +133 74 regularizer """no""" +133 74 optimizer """adam""" +133 74 training_loop """lcwa""" +133 74 evaluator """rankbased""" +133 75 dataset """kinships""" +133 75 model """convkb""" +133 75 loss """softplus""" +133 75 regularizer """no""" +133 75 optimizer """adam""" +133 75 training_loop """lcwa""" +133 75 evaluator """rankbased""" +133 76 dataset """kinships""" +133 76 model """convkb""" +133 76 loss """softplus""" +133 76 regularizer """no""" +133 76 optimizer """adam""" +133 76 training_loop """lcwa""" +133 76 evaluator """rankbased""" +133 77 dataset """kinships""" +133 77 model """convkb""" +133 77 loss """softplus""" +133 77 regularizer """no""" +133 77 optimizer """adam""" +133 77 training_loop """lcwa""" +133 77 evaluator """rankbased""" +133 78 dataset """kinships""" +133 78 model """convkb""" +133 78 loss """softplus""" +133 78 regularizer """no""" +133 78 optimizer """adam""" +133 78 training_loop """lcwa""" +133 78 evaluator """rankbased""" +133 79 dataset """kinships""" +133 79 model """convkb""" +133 79 loss """softplus""" +133 79 regularizer """no""" +133 79 optimizer """adam""" +133 79 training_loop """lcwa""" +133 79 evaluator """rankbased""" +133 80 dataset """kinships""" +133 80 model """convkb""" +133 80 loss """softplus""" +133 80 regularizer """no""" +133 80 optimizer """adam""" +133 80 training_loop """lcwa""" +133 80 evaluator """rankbased""" +133 81 dataset """kinships""" +133 81 model """convkb""" +133 81 loss """softplus""" +133 81 regularizer """no""" +133 81 optimizer """adam""" +133 81 training_loop """lcwa""" +133 81 evaluator """rankbased""" +133 82 dataset """kinships""" +133 82 model """convkb""" +133 82 loss """softplus""" +133 82 regularizer """no""" +133 82 optimizer """adam""" +133 82 training_loop """lcwa""" +133 82 evaluator """rankbased""" +133 83 dataset """kinships""" +133 83 model """convkb""" +133 83 loss """softplus""" +133 83 regularizer """no""" +133 83 optimizer """adam""" +133 83 training_loop """lcwa""" +133 83 evaluator """rankbased""" +133 84 dataset """kinships""" +133 84 model """convkb""" +133 84 loss """softplus""" +133 84 regularizer """no""" +133 84 optimizer """adam""" +133 84 training_loop """lcwa""" +133 84 evaluator """rankbased""" +133 85 dataset """kinships""" +133 85 model """convkb""" +133 85 loss """softplus""" +133 85 regularizer """no""" +133 85 optimizer """adam""" +133 85 training_loop """lcwa""" +133 85 evaluator """rankbased""" +133 86 dataset """kinships""" +133 86 model """convkb""" +133 86 loss """softplus""" +133 86 regularizer """no""" +133 86 optimizer """adam""" +133 86 training_loop """lcwa""" +133 86 evaluator """rankbased""" +133 87 dataset """kinships""" +133 87 model """convkb""" +133 87 loss """softplus""" +133 87 regularizer """no""" +133 87 optimizer """adam""" +133 87 training_loop """lcwa""" +133 87 evaluator """rankbased""" +133 88 dataset """kinships""" +133 88 model """convkb""" +133 88 loss """softplus""" +133 88 regularizer """no""" +133 88 optimizer """adam""" +133 88 training_loop """lcwa""" +133 88 evaluator """rankbased""" +133 89 dataset """kinships""" +133 89 model """convkb""" +133 89 loss """softplus""" +133 89 regularizer """no""" +133 89 optimizer """adam""" +133 89 training_loop """lcwa""" +133 89 evaluator """rankbased""" +133 90 dataset """kinships""" +133 90 model """convkb""" +133 90 loss """softplus""" +133 90 regularizer """no""" +133 90 optimizer """adam""" +133 90 training_loop """lcwa""" +133 90 evaluator """rankbased""" +133 91 dataset """kinships""" +133 91 model """convkb""" +133 91 loss """softplus""" +133 91 regularizer """no""" +133 91 optimizer """adam""" +133 91 training_loop """lcwa""" +133 91 evaluator """rankbased""" +133 92 dataset """kinships""" +133 92 model """convkb""" +133 92 loss """softplus""" +133 92 regularizer """no""" +133 92 optimizer """adam""" +133 92 training_loop """lcwa""" +133 92 evaluator """rankbased""" +133 93 dataset """kinships""" +133 93 model """convkb""" +133 93 loss """softplus""" +133 93 regularizer """no""" +133 93 optimizer """adam""" +133 93 training_loop """lcwa""" +133 93 evaluator """rankbased""" +133 94 dataset """kinships""" +133 94 model """convkb""" +133 94 loss """softplus""" +133 94 regularizer """no""" +133 94 optimizer """adam""" +133 94 training_loop """lcwa""" +133 94 evaluator """rankbased""" +133 95 dataset """kinships""" +133 95 model """convkb""" +133 95 loss """softplus""" +133 95 regularizer """no""" +133 95 optimizer """adam""" +133 95 training_loop """lcwa""" +133 95 evaluator """rankbased""" +134 1 model.embedding_dim 0.0 +134 1 model.hidden_dropout_rate 0.400566398854536 +134 1 model.num_filters 0.0 +134 1 loss.margin 25.805137994995857 +134 1 loss.adversarial_temperature 0.7114536173335655 +134 1 optimizer.lr 0.0012733213817093158 +134 1 negative_sampler.num_negs_per_pos 48.0 +134 1 training.batch_size 2.0 +134 2 model.embedding_dim 0.0 +134 2 model.hidden_dropout_rate 0.4183857678568185 +134 2 model.num_filters 3.0 +134 2 loss.margin 14.67568390013301 +134 2 loss.adversarial_temperature 0.4033636935401454 +134 2 optimizer.lr 0.01026484055321093 +134 2 negative_sampler.num_negs_per_pos 25.0 +134 2 training.batch_size 1.0 +134 3 model.embedding_dim 0.0 +134 3 model.hidden_dropout_rate 0.3464328467801234 +134 3 model.num_filters 1.0 +134 3 loss.margin 25.631198814515724 +134 3 loss.adversarial_temperature 0.7688129363349382 +134 3 optimizer.lr 0.008234901336464047 +134 3 negative_sampler.num_negs_per_pos 18.0 +134 3 training.batch_size 0.0 +134 4 model.embedding_dim 0.0 +134 4 model.hidden_dropout_rate 0.1960491160135144 +134 4 model.num_filters 3.0 +134 4 loss.margin 18.650963390352626 +134 4 loss.adversarial_temperature 0.34477290184481707 +134 4 optimizer.lr 0.006984144830488324 +134 4 negative_sampler.num_negs_per_pos 24.0 +134 4 training.batch_size 1.0 +134 5 model.embedding_dim 2.0 +134 5 model.hidden_dropout_rate 0.1430786316314765 +134 5 model.num_filters 3.0 +134 5 loss.margin 24.39554686060271 +134 5 loss.adversarial_temperature 0.38931689702969874 +134 5 optimizer.lr 0.0832462133724981 +134 5 negative_sampler.num_negs_per_pos 1.0 +134 5 training.batch_size 0.0 +134 6 model.embedding_dim 3.0 +134 6 model.hidden_dropout_rate 0.10619246824063944 +134 6 model.num_filters 3.0 +134 6 loss.margin 7.166438448967488 +134 6 loss.adversarial_temperature 0.9560394930467272 +134 6 optimizer.lr 0.002091851266751155 +134 6 negative_sampler.num_negs_per_pos 30.0 +134 6 training.batch_size 2.0 +134 7 model.embedding_dim 2.0 +134 7 model.hidden_dropout_rate 0.472205248576012 +134 7 model.num_filters 1.0 +134 7 loss.margin 23.88057860857579 +134 7 loss.adversarial_temperature 0.4344973960481222 +134 7 optimizer.lr 0.07260113575418879 +134 7 negative_sampler.num_negs_per_pos 11.0 +134 7 training.batch_size 2.0 +134 8 model.embedding_dim 3.0 +134 8 model.hidden_dropout_rate 0.27249493044663287 +134 8 model.num_filters 3.0 +134 8 loss.margin 25.646706090111742 +134 8 loss.adversarial_temperature 0.12246199859655552 +134 8 optimizer.lr 0.04608433076707033 +134 8 negative_sampler.num_negs_per_pos 23.0 +134 8 training.batch_size 0.0 +134 9 model.embedding_dim 3.0 +134 9 model.hidden_dropout_rate 0.33838713563827794 +134 9 model.num_filters 2.0 +134 9 loss.margin 25.385515995158045 +134 9 loss.adversarial_temperature 0.999041004529101 +134 9 optimizer.lr 0.007433134945562998 +134 9 negative_sampler.num_negs_per_pos 3.0 +134 9 training.batch_size 0.0 +134 10 model.embedding_dim 1.0 +134 10 model.hidden_dropout_rate 0.42405127135918225 +134 10 model.num_filters 3.0 +134 10 loss.margin 3.7644990708845563 +134 10 loss.adversarial_temperature 0.46922307762749244 +134 10 optimizer.lr 0.0017424568945549896 +134 10 negative_sampler.num_negs_per_pos 40.0 +134 10 training.batch_size 1.0 +134 11 model.embedding_dim 0.0 +134 11 model.hidden_dropout_rate 0.422015796308483 +134 11 model.num_filters 1.0 +134 11 loss.margin 10.748257178553912 +134 11 loss.adversarial_temperature 0.1796259077905704 +134 11 optimizer.lr 0.04256457821555856 +134 11 negative_sampler.num_negs_per_pos 21.0 +134 11 training.batch_size 2.0 +134 12 model.embedding_dim 2.0 +134 12 model.hidden_dropout_rate 0.2053445132904634 +134 12 model.num_filters 4.0 +134 12 loss.margin 16.445526145956944 +134 12 loss.adversarial_temperature 0.8724929387020425 +134 12 optimizer.lr 0.07769539293896346 +134 12 negative_sampler.num_negs_per_pos 20.0 +134 12 training.batch_size 2.0 +134 13 model.embedding_dim 3.0 +134 13 model.hidden_dropout_rate 0.3329034301155962 +134 13 model.num_filters 0.0 +134 13 loss.margin 10.179642964855292 +134 13 loss.adversarial_temperature 0.5358002853458601 +134 13 optimizer.lr 0.002557067865195212 +134 13 negative_sampler.num_negs_per_pos 35.0 +134 13 training.batch_size 2.0 +134 14 model.embedding_dim 2.0 +134 14 model.hidden_dropout_rate 0.25898044093976025 +134 14 model.num_filters 5.0 +134 14 loss.margin 12.026136926870512 +134 14 loss.adversarial_temperature 0.3266395836585899 +134 14 optimizer.lr 0.001887730359131224 +134 14 negative_sampler.num_negs_per_pos 13.0 +134 14 training.batch_size 1.0 +134 1 dataset """wn18rr""" +134 1 model """convkb""" +134 1 loss """nssa""" +134 1 regularizer """no""" +134 1 optimizer """adam""" +134 1 training_loop """owa""" +134 1 negative_sampler """basic""" +134 1 evaluator """rankbased""" +134 2 dataset """wn18rr""" +134 2 model """convkb""" +134 2 loss """nssa""" +134 2 regularizer """no""" +134 2 optimizer """adam""" +134 2 training_loop """owa""" +134 2 negative_sampler """basic""" +134 2 evaluator """rankbased""" +134 3 dataset """wn18rr""" +134 3 model """convkb""" +134 3 loss """nssa""" +134 3 regularizer """no""" +134 3 optimizer """adam""" +134 3 training_loop """owa""" +134 3 negative_sampler """basic""" +134 3 evaluator """rankbased""" +134 4 dataset """wn18rr""" +134 4 model """convkb""" +134 4 loss """nssa""" +134 4 regularizer """no""" +134 4 optimizer """adam""" +134 4 training_loop """owa""" +134 4 negative_sampler """basic""" +134 4 evaluator """rankbased""" +134 5 dataset """wn18rr""" +134 5 model """convkb""" +134 5 loss """nssa""" +134 5 regularizer """no""" +134 5 optimizer """adam""" +134 5 training_loop """owa""" +134 5 negative_sampler """basic""" +134 5 evaluator """rankbased""" +134 6 dataset """wn18rr""" +134 6 model """convkb""" +134 6 loss """nssa""" +134 6 regularizer """no""" +134 6 optimizer """adam""" +134 6 training_loop """owa""" +134 6 negative_sampler """basic""" +134 6 evaluator """rankbased""" +134 7 dataset """wn18rr""" +134 7 model """convkb""" +134 7 loss """nssa""" +134 7 regularizer """no""" +134 7 optimizer """adam""" +134 7 training_loop """owa""" +134 7 negative_sampler """basic""" +134 7 evaluator """rankbased""" +134 8 dataset """wn18rr""" +134 8 model """convkb""" +134 8 loss """nssa""" +134 8 regularizer """no""" +134 8 optimizer """adam""" +134 8 training_loop """owa""" +134 8 negative_sampler """basic""" +134 8 evaluator """rankbased""" +134 9 dataset """wn18rr""" +134 9 model """convkb""" +134 9 loss """nssa""" +134 9 regularizer """no""" +134 9 optimizer """adam""" +134 9 training_loop """owa""" +134 9 negative_sampler """basic""" +134 9 evaluator """rankbased""" +134 10 dataset """wn18rr""" +134 10 model """convkb""" +134 10 loss """nssa""" +134 10 regularizer """no""" +134 10 optimizer """adam""" +134 10 training_loop """owa""" +134 10 negative_sampler """basic""" +134 10 evaluator """rankbased""" +134 11 dataset """wn18rr""" +134 11 model """convkb""" +134 11 loss """nssa""" +134 11 regularizer """no""" +134 11 optimizer """adam""" +134 11 training_loop """owa""" +134 11 negative_sampler """basic""" +134 11 evaluator """rankbased""" +134 12 dataset """wn18rr""" +134 12 model """convkb""" +134 12 loss """nssa""" +134 12 regularizer """no""" +134 12 optimizer """adam""" +134 12 training_loop """owa""" +134 12 negative_sampler """basic""" +134 12 evaluator """rankbased""" +134 13 dataset """wn18rr""" +134 13 model """convkb""" +134 13 loss """nssa""" +134 13 regularizer """no""" +134 13 optimizer """adam""" +134 13 training_loop """owa""" +134 13 negative_sampler """basic""" +134 13 evaluator """rankbased""" +134 14 dataset """wn18rr""" +134 14 model """convkb""" +134 14 loss """nssa""" +134 14 regularizer """no""" +134 14 optimizer """adam""" +134 14 training_loop """owa""" +134 14 negative_sampler """basic""" +134 14 evaluator """rankbased""" +135 1 model.embedding_dim 0.0 +135 1 model.hidden_dropout_rate 0.21789372664461945 +135 1 model.num_filters 5.0 +135 1 loss.margin 29.53640457199414 +135 1 loss.adversarial_temperature 0.6348491746299496 +135 1 optimizer.lr 0.0010366285166665435 +135 1 negative_sampler.num_negs_per_pos 31.0 +135 1 training.batch_size 0.0 +135 2 model.embedding_dim 3.0 +135 2 model.hidden_dropout_rate 0.22815246894447974 +135 2 model.num_filters 5.0 +135 2 loss.margin 6.3252107983705255 +135 2 loss.adversarial_temperature 0.42322379666026055 +135 2 optimizer.lr 0.01219336170949789 +135 2 negative_sampler.num_negs_per_pos 6.0 +135 2 training.batch_size 0.0 +135 3 model.embedding_dim 1.0 +135 3 model.hidden_dropout_rate 0.20517753123472568 +135 3 model.num_filters 2.0 +135 3 loss.margin 13.457713517819194 +135 3 loss.adversarial_temperature 0.5814057630164208 +135 3 optimizer.lr 0.03655655952497116 +135 3 negative_sampler.num_negs_per_pos 18.0 +135 3 training.batch_size 2.0 +135 4 model.embedding_dim 3.0 +135 4 model.hidden_dropout_rate 0.23418725328876075 +135 4 model.num_filters 1.0 +135 4 loss.margin 23.935996839231482 +135 4 loss.adversarial_temperature 0.3400427005699249 +135 4 optimizer.lr 0.0033303549090513518 +135 4 negative_sampler.num_negs_per_pos 16.0 +135 4 training.batch_size 2.0 +135 5 model.embedding_dim 0.0 +135 5 model.hidden_dropout_rate 0.40990943029211796 +135 5 model.num_filters 5.0 +135 5 loss.margin 1.6953220812938674 +135 5 loss.adversarial_temperature 0.5130595133429744 +135 5 optimizer.lr 0.011025692288933997 +135 5 negative_sampler.num_negs_per_pos 44.0 +135 5 training.batch_size 2.0 +135 6 model.embedding_dim 2.0 +135 6 model.hidden_dropout_rate 0.3585000405568035 +135 6 model.num_filters 2.0 +135 6 loss.margin 16.44570947691882 +135 6 loss.adversarial_temperature 0.7293799100286866 +135 6 optimizer.lr 0.021155906713571 +135 6 negative_sampler.num_negs_per_pos 45.0 +135 6 training.batch_size 0.0 +135 7 model.embedding_dim 0.0 +135 7 model.hidden_dropout_rate 0.178494961275806 +135 7 model.num_filters 3.0 +135 7 loss.margin 7.639043636255668 +135 7 loss.adversarial_temperature 0.34445800116829633 +135 7 optimizer.lr 0.004008838515250232 +135 7 negative_sampler.num_negs_per_pos 44.0 +135 7 training.batch_size 1.0 +135 8 model.embedding_dim 1.0 +135 8 model.hidden_dropout_rate 0.35557951023574336 +135 8 model.num_filters 1.0 +135 8 loss.margin 1.4563247243796493 +135 8 loss.adversarial_temperature 0.7120068309222868 +135 8 optimizer.lr 0.009327976028524386 +135 8 negative_sampler.num_negs_per_pos 25.0 +135 8 training.batch_size 2.0 +135 9 model.embedding_dim 3.0 +135 9 model.hidden_dropout_rate 0.2692035659800453 +135 9 model.num_filters 2.0 +135 9 loss.margin 1.2204456529069616 +135 9 loss.adversarial_temperature 0.28283156892172545 +135 9 optimizer.lr 0.05435779856336618 +135 9 negative_sampler.num_negs_per_pos 20.0 +135 9 training.batch_size 1.0 +135 10 model.embedding_dim 0.0 +135 10 model.hidden_dropout_rate 0.23355807745674606 +135 10 model.num_filters 1.0 +135 10 loss.margin 26.291834088815957 +135 10 loss.adversarial_temperature 0.7756266119758256 +135 10 optimizer.lr 0.004306313258770693 +135 10 negative_sampler.num_negs_per_pos 21.0 +135 10 training.batch_size 0.0 +135 11 model.embedding_dim 2.0 +135 11 model.hidden_dropout_rate 0.10076981111337675 +135 11 model.num_filters 1.0 +135 11 loss.margin 3.17960928628679 +135 11 loss.adversarial_temperature 0.23597364961004735 +135 11 optimizer.lr 0.04655312330544453 +135 11 negative_sampler.num_negs_per_pos 28.0 +135 11 training.batch_size 2.0 +135 12 model.embedding_dim 3.0 +135 12 model.hidden_dropout_rate 0.36205992946787613 +135 12 model.num_filters 1.0 +135 12 loss.margin 5.567503412241073 +135 12 loss.adversarial_temperature 0.9955141894888262 +135 12 optimizer.lr 0.024764615116119777 +135 12 negative_sampler.num_negs_per_pos 20.0 +135 12 training.batch_size 2.0 +135 13 model.embedding_dim 0.0 +135 13 model.hidden_dropout_rate 0.20888942070477576 +135 13 model.num_filters 4.0 +135 13 loss.margin 12.8696722407759 +135 13 loss.adversarial_temperature 0.7248467501472978 +135 13 optimizer.lr 0.04204412772055907 +135 13 negative_sampler.num_negs_per_pos 24.0 +135 13 training.batch_size 0.0 +135 14 model.embedding_dim 0.0 +135 14 model.hidden_dropout_rate 0.43740790158751275 +135 14 model.num_filters 0.0 +135 14 loss.margin 18.074440456882098 +135 14 loss.adversarial_temperature 0.7693651074860595 +135 14 optimizer.lr 0.01197522999258292 +135 14 negative_sampler.num_negs_per_pos 19.0 +135 14 training.batch_size 0.0 +135 15 model.embedding_dim 0.0 +135 15 model.hidden_dropout_rate 0.18462843288975278 +135 15 model.num_filters 0.0 +135 15 loss.margin 1.9256435429516219 +135 15 loss.adversarial_temperature 0.10469459277215054 +135 15 optimizer.lr 0.009914008635844547 +135 15 negative_sampler.num_negs_per_pos 17.0 +135 15 training.batch_size 0.0 +135 16 model.embedding_dim 3.0 +135 16 model.hidden_dropout_rate 0.4991651112696742 +135 16 model.num_filters 3.0 +135 16 loss.margin 5.7942834295869465 +135 16 loss.adversarial_temperature 0.34440279625339265 +135 16 optimizer.lr 0.019146143781547827 +135 16 negative_sampler.num_negs_per_pos 34.0 +135 16 training.batch_size 2.0 +135 17 model.embedding_dim 2.0 +135 17 model.hidden_dropout_rate 0.48052553063592474 +135 17 model.num_filters 4.0 +135 17 loss.margin 1.2144015550828922 +135 17 loss.adversarial_temperature 0.8990377236308732 +135 17 optimizer.lr 0.013515845540232047 +135 17 negative_sampler.num_negs_per_pos 15.0 +135 17 training.batch_size 2.0 +135 18 model.embedding_dim 2.0 +135 18 model.hidden_dropout_rate 0.25023733315607943 +135 18 model.num_filters 0.0 +135 18 loss.margin 9.253829154376874 +135 18 loss.adversarial_temperature 0.7792661543494088 +135 18 optimizer.lr 0.027878397654004516 +135 18 negative_sampler.num_negs_per_pos 28.0 +135 18 training.batch_size 2.0 +135 19 model.embedding_dim 0.0 +135 19 model.hidden_dropout_rate 0.13067582345148507 +135 19 model.num_filters 1.0 +135 19 loss.margin 29.24102958471734 +135 19 loss.adversarial_temperature 0.6091926642104638 +135 19 optimizer.lr 0.09715008892686273 +135 19 negative_sampler.num_negs_per_pos 30.0 +135 19 training.batch_size 1.0 +135 20 model.embedding_dim 3.0 +135 20 model.hidden_dropout_rate 0.34331910570965596 +135 20 model.num_filters 4.0 +135 20 loss.margin 23.809039519886998 +135 20 loss.adversarial_temperature 0.9468524186659631 +135 20 optimizer.lr 0.004459824362273368 +135 20 negative_sampler.num_negs_per_pos 43.0 +135 20 training.batch_size 2.0 +135 1 dataset """wn18rr""" +135 1 model """convkb""" +135 1 loss """nssa""" +135 1 regularizer """no""" +135 1 optimizer """adam""" +135 1 training_loop """owa""" +135 1 negative_sampler """basic""" +135 1 evaluator """rankbased""" +135 2 dataset """wn18rr""" +135 2 model """convkb""" +135 2 loss """nssa""" +135 2 regularizer """no""" +135 2 optimizer """adam""" +135 2 training_loop """owa""" +135 2 negative_sampler """basic""" +135 2 evaluator """rankbased""" +135 3 dataset """wn18rr""" +135 3 model """convkb""" +135 3 loss """nssa""" +135 3 regularizer """no""" +135 3 optimizer """adam""" +135 3 training_loop """owa""" +135 3 negative_sampler """basic""" +135 3 evaluator """rankbased""" +135 4 dataset """wn18rr""" +135 4 model """convkb""" +135 4 loss """nssa""" +135 4 regularizer """no""" +135 4 optimizer """adam""" +135 4 training_loop """owa""" +135 4 negative_sampler """basic""" +135 4 evaluator """rankbased""" +135 5 dataset """wn18rr""" +135 5 model """convkb""" +135 5 loss """nssa""" +135 5 regularizer """no""" +135 5 optimizer """adam""" +135 5 training_loop """owa""" +135 5 negative_sampler """basic""" +135 5 evaluator """rankbased""" +135 6 dataset """wn18rr""" +135 6 model """convkb""" +135 6 loss """nssa""" +135 6 regularizer """no""" +135 6 optimizer """adam""" +135 6 training_loop """owa""" +135 6 negative_sampler """basic""" +135 6 evaluator """rankbased""" +135 7 dataset """wn18rr""" +135 7 model """convkb""" +135 7 loss """nssa""" +135 7 regularizer """no""" +135 7 optimizer """adam""" +135 7 training_loop """owa""" +135 7 negative_sampler """basic""" +135 7 evaluator """rankbased""" +135 8 dataset """wn18rr""" +135 8 model """convkb""" +135 8 loss """nssa""" +135 8 regularizer """no""" +135 8 optimizer """adam""" +135 8 training_loop """owa""" +135 8 negative_sampler """basic""" +135 8 evaluator """rankbased""" +135 9 dataset """wn18rr""" +135 9 model """convkb""" +135 9 loss """nssa""" +135 9 regularizer """no""" +135 9 optimizer """adam""" +135 9 training_loop """owa""" +135 9 negative_sampler """basic""" +135 9 evaluator """rankbased""" +135 10 dataset """wn18rr""" +135 10 model """convkb""" +135 10 loss """nssa""" +135 10 regularizer """no""" +135 10 optimizer """adam""" +135 10 training_loop """owa""" +135 10 negative_sampler """basic""" +135 10 evaluator """rankbased""" +135 11 dataset """wn18rr""" +135 11 model """convkb""" +135 11 loss """nssa""" +135 11 regularizer """no""" +135 11 optimizer """adam""" +135 11 training_loop """owa""" +135 11 negative_sampler """basic""" +135 11 evaluator """rankbased""" +135 12 dataset """wn18rr""" +135 12 model """convkb""" +135 12 loss """nssa""" +135 12 regularizer """no""" +135 12 optimizer """adam""" +135 12 training_loop """owa""" +135 12 negative_sampler """basic""" +135 12 evaluator """rankbased""" +135 13 dataset """wn18rr""" +135 13 model """convkb""" +135 13 loss """nssa""" +135 13 regularizer """no""" +135 13 optimizer """adam""" +135 13 training_loop """owa""" +135 13 negative_sampler """basic""" +135 13 evaluator """rankbased""" +135 14 dataset """wn18rr""" +135 14 model """convkb""" +135 14 loss """nssa""" +135 14 regularizer """no""" +135 14 optimizer """adam""" +135 14 training_loop """owa""" +135 14 negative_sampler """basic""" +135 14 evaluator """rankbased""" +135 15 dataset """wn18rr""" +135 15 model """convkb""" +135 15 loss """nssa""" +135 15 regularizer """no""" +135 15 optimizer """adam""" +135 15 training_loop """owa""" +135 15 negative_sampler """basic""" +135 15 evaluator """rankbased""" +135 16 dataset """wn18rr""" +135 16 model """convkb""" +135 16 loss """nssa""" +135 16 regularizer """no""" +135 16 optimizer """adam""" +135 16 training_loop """owa""" +135 16 negative_sampler """basic""" +135 16 evaluator """rankbased""" +135 17 dataset """wn18rr""" +135 17 model """convkb""" +135 17 loss """nssa""" +135 17 regularizer """no""" +135 17 optimizer """adam""" +135 17 training_loop """owa""" +135 17 negative_sampler """basic""" +135 17 evaluator """rankbased""" +135 18 dataset """wn18rr""" +135 18 model """convkb""" +135 18 loss """nssa""" +135 18 regularizer """no""" +135 18 optimizer """adam""" +135 18 training_loop """owa""" +135 18 negative_sampler """basic""" +135 18 evaluator """rankbased""" +135 19 dataset """wn18rr""" +135 19 model """convkb""" +135 19 loss """nssa""" +135 19 regularizer """no""" +135 19 optimizer """adam""" +135 19 training_loop """owa""" +135 19 negative_sampler """basic""" +135 19 evaluator """rankbased""" +135 20 dataset """wn18rr""" +135 20 model """convkb""" +135 20 loss """nssa""" +135 20 regularizer """no""" +135 20 optimizer """adam""" +135 20 training_loop """owa""" +135 20 negative_sampler """basic""" +135 20 evaluator """rankbased""" +136 1 model.embedding_dim 1.0 +136 1 model.hidden_dropout_rate 0.20343882553748763 +136 1 model.num_filters 4.0 +136 1 optimizer.lr 0.001844037285779324 +136 1 negative_sampler.num_negs_per_pos 48.0 +136 1 training.batch_size 0.0 +136 2 model.embedding_dim 1.0 +136 2 model.hidden_dropout_rate 0.15327156785407517 +136 2 model.num_filters 3.0 +136 2 optimizer.lr 0.011161167123003584 +136 2 negative_sampler.num_negs_per_pos 38.0 +136 2 training.batch_size 2.0 +136 3 model.embedding_dim 2.0 +136 3 model.hidden_dropout_rate 0.3979500558272047 +136 3 model.num_filters 5.0 +136 3 optimizer.lr 0.004630845071229511 +136 3 negative_sampler.num_negs_per_pos 8.0 +136 3 training.batch_size 2.0 +136 4 model.embedding_dim 3.0 +136 4 model.hidden_dropout_rate 0.31690911126529014 +136 4 model.num_filters 2.0 +136 4 optimizer.lr 0.0065901053581288 +136 4 negative_sampler.num_negs_per_pos 23.0 +136 4 training.batch_size 2.0 +136 5 model.embedding_dim 0.0 +136 5 model.hidden_dropout_rate 0.397807567348872 +136 5 model.num_filters 3.0 +136 5 optimizer.lr 0.05872225049505192 +136 5 negative_sampler.num_negs_per_pos 42.0 +136 5 training.batch_size 2.0 +136 6 model.embedding_dim 2.0 +136 6 model.hidden_dropout_rate 0.1758129594424509 +136 6 model.num_filters 0.0 +136 6 optimizer.lr 0.0011630992022161563 +136 6 negative_sampler.num_negs_per_pos 10.0 +136 6 training.batch_size 2.0 +136 7 model.embedding_dim 2.0 +136 7 model.hidden_dropout_rate 0.16296112383721995 +136 7 model.num_filters 2.0 +136 7 optimizer.lr 0.0026435601411511763 +136 7 negative_sampler.num_negs_per_pos 22.0 +136 7 training.batch_size 0.0 +136 8 model.embedding_dim 3.0 +136 8 model.hidden_dropout_rate 0.48678857043883444 +136 8 model.num_filters 0.0 +136 8 optimizer.lr 0.0037562068235654825 +136 8 negative_sampler.num_negs_per_pos 21.0 +136 8 training.batch_size 1.0 +136 9 model.embedding_dim 0.0 +136 9 model.hidden_dropout_rate 0.4470577061714101 +136 9 model.num_filters 3.0 +136 9 optimizer.lr 0.015348595924223319 +136 9 negative_sampler.num_negs_per_pos 6.0 +136 9 training.batch_size 2.0 +136 10 model.embedding_dim 0.0 +136 10 model.hidden_dropout_rate 0.3471185416630425 +136 10 model.num_filters 5.0 +136 10 optimizer.lr 0.01977484068848156 +136 10 negative_sampler.num_negs_per_pos 30.0 +136 10 training.batch_size 0.0 +136 11 model.embedding_dim 3.0 +136 11 model.hidden_dropout_rate 0.44598293432399383 +136 11 model.num_filters 1.0 +136 11 optimizer.lr 0.0010496310503717813 +136 11 negative_sampler.num_negs_per_pos 42.0 +136 11 training.batch_size 2.0 +136 12 model.embedding_dim 1.0 +136 12 model.hidden_dropout_rate 0.463911890056952 +136 12 model.num_filters 0.0 +136 12 optimizer.lr 0.07604946846176358 +136 12 negative_sampler.num_negs_per_pos 1.0 +136 12 training.batch_size 2.0 +136 13 model.embedding_dim 2.0 +136 13 model.hidden_dropout_rate 0.3651484064676436 +136 13 model.num_filters 2.0 +136 13 optimizer.lr 0.0022923738922046317 +136 13 negative_sampler.num_negs_per_pos 15.0 +136 13 training.batch_size 0.0 +136 1 dataset """wn18rr""" +136 1 model """convkb""" +136 1 loss """bceaftersigmoid""" +136 1 regularizer """no""" +136 1 optimizer """adam""" +136 1 training_loop """owa""" +136 1 negative_sampler """basic""" +136 1 evaluator """rankbased""" +136 2 dataset """wn18rr""" +136 2 model """convkb""" +136 2 loss """bceaftersigmoid""" +136 2 regularizer """no""" +136 2 optimizer """adam""" +136 2 training_loop """owa""" +136 2 negative_sampler """basic""" +136 2 evaluator """rankbased""" +136 3 dataset """wn18rr""" +136 3 model """convkb""" +136 3 loss """bceaftersigmoid""" +136 3 regularizer """no""" +136 3 optimizer """adam""" +136 3 training_loop """owa""" +136 3 negative_sampler """basic""" +136 3 evaluator """rankbased""" +136 4 dataset """wn18rr""" +136 4 model """convkb""" +136 4 loss """bceaftersigmoid""" +136 4 regularizer """no""" +136 4 optimizer """adam""" +136 4 training_loop """owa""" +136 4 negative_sampler """basic""" +136 4 evaluator """rankbased""" +136 5 dataset """wn18rr""" +136 5 model """convkb""" +136 5 loss """bceaftersigmoid""" +136 5 regularizer """no""" +136 5 optimizer """adam""" +136 5 training_loop """owa""" +136 5 negative_sampler """basic""" +136 5 evaluator """rankbased""" +136 6 dataset """wn18rr""" +136 6 model """convkb""" +136 6 loss """bceaftersigmoid""" +136 6 regularizer """no""" +136 6 optimizer """adam""" +136 6 training_loop """owa""" +136 6 negative_sampler """basic""" +136 6 evaluator """rankbased""" +136 7 dataset """wn18rr""" +136 7 model """convkb""" +136 7 loss """bceaftersigmoid""" +136 7 regularizer """no""" +136 7 optimizer """adam""" +136 7 training_loop """owa""" +136 7 negative_sampler """basic""" +136 7 evaluator """rankbased""" +136 8 dataset """wn18rr""" +136 8 model """convkb""" +136 8 loss """bceaftersigmoid""" +136 8 regularizer """no""" +136 8 optimizer """adam""" +136 8 training_loop """owa""" +136 8 negative_sampler """basic""" +136 8 evaluator """rankbased""" +136 9 dataset """wn18rr""" +136 9 model """convkb""" +136 9 loss """bceaftersigmoid""" +136 9 regularizer """no""" +136 9 optimizer """adam""" +136 9 training_loop """owa""" +136 9 negative_sampler """basic""" +136 9 evaluator """rankbased""" +136 10 dataset """wn18rr""" +136 10 model """convkb""" +136 10 loss """bceaftersigmoid""" +136 10 regularizer """no""" +136 10 optimizer """adam""" +136 10 training_loop """owa""" +136 10 negative_sampler """basic""" +136 10 evaluator """rankbased""" +136 11 dataset """wn18rr""" +136 11 model """convkb""" +136 11 loss """bceaftersigmoid""" +136 11 regularizer """no""" +136 11 optimizer """adam""" +136 11 training_loop """owa""" +136 11 negative_sampler """basic""" +136 11 evaluator """rankbased""" +136 12 dataset """wn18rr""" +136 12 model """convkb""" +136 12 loss """bceaftersigmoid""" +136 12 regularizer """no""" +136 12 optimizer """adam""" +136 12 training_loop """owa""" +136 12 negative_sampler """basic""" +136 12 evaluator """rankbased""" +136 13 dataset """wn18rr""" +136 13 model """convkb""" +136 13 loss """bceaftersigmoid""" +136 13 regularizer """no""" +136 13 optimizer """adam""" +136 13 training_loop """owa""" +136 13 negative_sampler """basic""" +136 13 evaluator """rankbased""" +137 1 model.embedding_dim 0.0 +137 1 model.hidden_dropout_rate 0.22123792880666643 +137 1 model.num_filters 5.0 +137 1 optimizer.lr 0.08738982774895951 +137 1 negative_sampler.num_negs_per_pos 26.0 +137 1 training.batch_size 2.0 +137 2 model.embedding_dim 2.0 +137 2 model.hidden_dropout_rate 0.19502158960729998 +137 2 model.num_filters 2.0 +137 2 optimizer.lr 0.006573066127479868 +137 2 negative_sampler.num_negs_per_pos 28.0 +137 2 training.batch_size 1.0 +137 3 model.embedding_dim 3.0 +137 3 model.hidden_dropout_rate 0.33427186009022253 +137 3 model.num_filters 1.0 +137 3 optimizer.lr 0.031804910018235016 +137 3 negative_sampler.num_negs_per_pos 44.0 +137 3 training.batch_size 0.0 +137 4 model.embedding_dim 3.0 +137 4 model.hidden_dropout_rate 0.1441404202133215 +137 4 model.num_filters 3.0 +137 4 optimizer.lr 0.0769704778668888 +137 4 negative_sampler.num_negs_per_pos 24.0 +137 4 training.batch_size 0.0 +137 5 model.embedding_dim 3.0 +137 5 model.hidden_dropout_rate 0.200312066658331 +137 5 model.num_filters 2.0 +137 5 optimizer.lr 0.0188152490576239 +137 5 negative_sampler.num_negs_per_pos 40.0 +137 5 training.batch_size 2.0 +137 6 model.embedding_dim 1.0 +137 6 model.hidden_dropout_rate 0.2955477191165606 +137 6 model.num_filters 1.0 +137 6 optimizer.lr 0.003047441112233229 +137 6 negative_sampler.num_negs_per_pos 24.0 +137 6 training.batch_size 0.0 +137 7 model.embedding_dim 2.0 +137 7 model.hidden_dropout_rate 0.49558931165972386 +137 7 model.num_filters 0.0 +137 7 optimizer.lr 0.015580270492779338 +137 7 negative_sampler.num_negs_per_pos 8.0 +137 7 training.batch_size 2.0 +137 8 model.embedding_dim 3.0 +137 8 model.hidden_dropout_rate 0.421994725464612 +137 8 model.num_filters 4.0 +137 8 optimizer.lr 0.04103725487029204 +137 8 negative_sampler.num_negs_per_pos 14.0 +137 8 training.batch_size 2.0 +137 9 model.embedding_dim 0.0 +137 9 model.hidden_dropout_rate 0.380890982750117 +137 9 model.num_filters 2.0 +137 9 optimizer.lr 0.008975157839815949 +137 9 negative_sampler.num_negs_per_pos 44.0 +137 9 training.batch_size 2.0 +137 10 model.embedding_dim 1.0 +137 10 model.hidden_dropout_rate 0.2730753364217677 +137 10 model.num_filters 5.0 +137 10 optimizer.lr 0.06764917000046741 +137 10 negative_sampler.num_negs_per_pos 1.0 +137 10 training.batch_size 1.0 +137 11 model.embedding_dim 0.0 +137 11 model.hidden_dropout_rate 0.46976436651144443 +137 11 model.num_filters 2.0 +137 11 optimizer.lr 0.020216172169915857 +137 11 negative_sampler.num_negs_per_pos 39.0 +137 11 training.batch_size 1.0 +137 12 model.embedding_dim 1.0 +137 12 model.hidden_dropout_rate 0.41194335148682015 +137 12 model.num_filters 1.0 +137 12 optimizer.lr 0.003171195892083793 +137 12 negative_sampler.num_negs_per_pos 23.0 +137 12 training.batch_size 0.0 +137 13 model.embedding_dim 3.0 +137 13 model.hidden_dropout_rate 0.35676432792668766 +137 13 model.num_filters 1.0 +137 13 optimizer.lr 0.008981529235954983 +137 13 negative_sampler.num_negs_per_pos 28.0 +137 13 training.batch_size 1.0 +137 14 model.embedding_dim 3.0 +137 14 model.hidden_dropout_rate 0.3416995825811361 +137 14 model.num_filters 0.0 +137 14 optimizer.lr 0.05410577439686769 +137 14 negative_sampler.num_negs_per_pos 24.0 +137 14 training.batch_size 2.0 +137 15 model.embedding_dim 3.0 +137 15 model.hidden_dropout_rate 0.19121667506695147 +137 15 model.num_filters 2.0 +137 15 optimizer.lr 0.08823434230153002 +137 15 negative_sampler.num_negs_per_pos 34.0 +137 15 training.batch_size 2.0 +137 16 model.embedding_dim 1.0 +137 16 model.hidden_dropout_rate 0.37704923661567025 +137 16 model.num_filters 4.0 +137 16 optimizer.lr 0.006951416228906426 +137 16 negative_sampler.num_negs_per_pos 22.0 +137 16 training.batch_size 0.0 +137 17 model.embedding_dim 3.0 +137 17 model.hidden_dropout_rate 0.28268445704874173 +137 17 model.num_filters 1.0 +137 17 optimizer.lr 0.05230452546111231 +137 17 negative_sampler.num_negs_per_pos 37.0 +137 17 training.batch_size 1.0 +137 18 model.embedding_dim 3.0 +137 18 model.hidden_dropout_rate 0.16623068627059265 +137 18 model.num_filters 4.0 +137 18 optimizer.lr 0.011175153907985779 +137 18 negative_sampler.num_negs_per_pos 23.0 +137 18 training.batch_size 1.0 +137 19 model.embedding_dim 2.0 +137 19 model.hidden_dropout_rate 0.4576932150549099 +137 19 model.num_filters 2.0 +137 19 optimizer.lr 0.0066495412738257305 +137 19 negative_sampler.num_negs_per_pos 3.0 +137 19 training.batch_size 2.0 +137 20 model.embedding_dim 0.0 +137 20 model.hidden_dropout_rate 0.18418565128910666 +137 20 model.num_filters 5.0 +137 20 optimizer.lr 0.0019123575487323045 +137 20 negative_sampler.num_negs_per_pos 40.0 +137 20 training.batch_size 0.0 +137 21 model.embedding_dim 3.0 +137 21 model.hidden_dropout_rate 0.11918187887702408 +137 21 model.num_filters 5.0 +137 21 optimizer.lr 0.016619926905660148 +137 21 negative_sampler.num_negs_per_pos 34.0 +137 21 training.batch_size 2.0 +137 22 model.embedding_dim 0.0 +137 22 model.hidden_dropout_rate 0.11144351564534052 +137 22 model.num_filters 0.0 +137 22 optimizer.lr 0.004127354015184951 +137 22 negative_sampler.num_negs_per_pos 11.0 +137 22 training.batch_size 2.0 +137 23 model.embedding_dim 0.0 +137 23 model.hidden_dropout_rate 0.4825571602891812 +137 23 model.num_filters 3.0 +137 23 optimizer.lr 0.09947788772195465 +137 23 negative_sampler.num_negs_per_pos 14.0 +137 23 training.batch_size 2.0 +137 24 model.embedding_dim 3.0 +137 24 model.hidden_dropout_rate 0.3147046362900988 +137 24 model.num_filters 5.0 +137 24 optimizer.lr 0.00105333345097455 +137 24 negative_sampler.num_negs_per_pos 13.0 +137 24 training.batch_size 0.0 +137 25 model.embedding_dim 0.0 +137 25 model.hidden_dropout_rate 0.45573507110802114 +137 25 model.num_filters 1.0 +137 25 optimizer.lr 0.056206671979987785 +137 25 negative_sampler.num_negs_per_pos 19.0 +137 25 training.batch_size 2.0 +137 26 model.embedding_dim 0.0 +137 26 model.hidden_dropout_rate 0.24140389536225482 +137 26 model.num_filters 4.0 +137 26 optimizer.lr 0.04026000246759484 +137 26 negative_sampler.num_negs_per_pos 41.0 +137 26 training.batch_size 1.0 +137 27 model.embedding_dim 3.0 +137 27 model.hidden_dropout_rate 0.1521431011033114 +137 27 model.num_filters 5.0 +137 27 optimizer.lr 0.01289188810492373 +137 27 negative_sampler.num_negs_per_pos 10.0 +137 27 training.batch_size 2.0 +137 28 model.embedding_dim 2.0 +137 28 model.hidden_dropout_rate 0.349927291162467 +137 28 model.num_filters 0.0 +137 28 optimizer.lr 0.010572740042071432 +137 28 negative_sampler.num_negs_per_pos 1.0 +137 28 training.batch_size 0.0 +137 29 model.embedding_dim 1.0 +137 29 model.hidden_dropout_rate 0.45064656309783296 +137 29 model.num_filters 3.0 +137 29 optimizer.lr 0.021594624463514636 +137 29 negative_sampler.num_negs_per_pos 8.0 +137 29 training.batch_size 1.0 +137 1 dataset """wn18rr""" +137 1 model """convkb""" +137 1 loss """bceaftersigmoid""" +137 1 regularizer """no""" +137 1 optimizer """adam""" +137 1 training_loop """owa""" +137 1 negative_sampler """basic""" +137 1 evaluator """rankbased""" +137 2 dataset """wn18rr""" +137 2 model """convkb""" +137 2 loss """bceaftersigmoid""" +137 2 regularizer """no""" +137 2 optimizer """adam""" +137 2 training_loop """owa""" +137 2 negative_sampler """basic""" +137 2 evaluator """rankbased""" +137 3 dataset """wn18rr""" +137 3 model """convkb""" +137 3 loss """bceaftersigmoid""" +137 3 regularizer """no""" +137 3 optimizer """adam""" +137 3 training_loop """owa""" +137 3 negative_sampler """basic""" +137 3 evaluator """rankbased""" +137 4 dataset """wn18rr""" +137 4 model """convkb""" +137 4 loss """bceaftersigmoid""" +137 4 regularizer """no""" +137 4 optimizer """adam""" +137 4 training_loop """owa""" +137 4 negative_sampler """basic""" +137 4 evaluator """rankbased""" +137 5 dataset """wn18rr""" +137 5 model """convkb""" +137 5 loss """bceaftersigmoid""" +137 5 regularizer """no""" +137 5 optimizer """adam""" +137 5 training_loop """owa""" +137 5 negative_sampler """basic""" +137 5 evaluator """rankbased""" +137 6 dataset """wn18rr""" +137 6 model """convkb""" +137 6 loss """bceaftersigmoid""" +137 6 regularizer """no""" +137 6 optimizer """adam""" +137 6 training_loop """owa""" +137 6 negative_sampler """basic""" +137 6 evaluator """rankbased""" +137 7 dataset """wn18rr""" +137 7 model """convkb""" +137 7 loss """bceaftersigmoid""" +137 7 regularizer """no""" +137 7 optimizer """adam""" +137 7 training_loop """owa""" +137 7 negative_sampler """basic""" +137 7 evaluator """rankbased""" +137 8 dataset """wn18rr""" +137 8 model """convkb""" +137 8 loss """bceaftersigmoid""" +137 8 regularizer """no""" +137 8 optimizer """adam""" +137 8 training_loop """owa""" +137 8 negative_sampler """basic""" +137 8 evaluator """rankbased""" +137 9 dataset """wn18rr""" +137 9 model """convkb""" +137 9 loss """bceaftersigmoid""" +137 9 regularizer """no""" +137 9 optimizer """adam""" +137 9 training_loop """owa""" +137 9 negative_sampler """basic""" +137 9 evaluator """rankbased""" +137 10 dataset """wn18rr""" +137 10 model """convkb""" +137 10 loss """bceaftersigmoid""" +137 10 regularizer """no""" +137 10 optimizer """adam""" +137 10 training_loop """owa""" +137 10 negative_sampler """basic""" +137 10 evaluator """rankbased""" +137 11 dataset """wn18rr""" +137 11 model """convkb""" +137 11 loss """bceaftersigmoid""" +137 11 regularizer """no""" +137 11 optimizer """adam""" +137 11 training_loop """owa""" +137 11 negative_sampler """basic""" +137 11 evaluator """rankbased""" +137 12 dataset """wn18rr""" +137 12 model """convkb""" +137 12 loss """bceaftersigmoid""" +137 12 regularizer """no""" +137 12 optimizer """adam""" +137 12 training_loop """owa""" +137 12 negative_sampler """basic""" +137 12 evaluator """rankbased""" +137 13 dataset """wn18rr""" +137 13 model """convkb""" +137 13 loss """bceaftersigmoid""" +137 13 regularizer """no""" +137 13 optimizer """adam""" +137 13 training_loop """owa""" +137 13 negative_sampler """basic""" +137 13 evaluator """rankbased""" +137 14 dataset """wn18rr""" +137 14 model """convkb""" +137 14 loss """bceaftersigmoid""" +137 14 regularizer """no""" +137 14 optimizer """adam""" +137 14 training_loop """owa""" +137 14 negative_sampler """basic""" +137 14 evaluator """rankbased""" +137 15 dataset """wn18rr""" +137 15 model """convkb""" +137 15 loss """bceaftersigmoid""" +137 15 regularizer """no""" +137 15 optimizer """adam""" +137 15 training_loop """owa""" +137 15 negative_sampler """basic""" +137 15 evaluator """rankbased""" +137 16 dataset """wn18rr""" +137 16 model """convkb""" +137 16 loss """bceaftersigmoid""" +137 16 regularizer """no""" +137 16 optimizer """adam""" +137 16 training_loop """owa""" +137 16 negative_sampler """basic""" +137 16 evaluator """rankbased""" +137 17 dataset """wn18rr""" +137 17 model """convkb""" +137 17 loss """bceaftersigmoid""" +137 17 regularizer """no""" +137 17 optimizer """adam""" +137 17 training_loop """owa""" +137 17 negative_sampler """basic""" +137 17 evaluator """rankbased""" +137 18 dataset """wn18rr""" +137 18 model """convkb""" +137 18 loss """bceaftersigmoid""" +137 18 regularizer """no""" +137 18 optimizer """adam""" +137 18 training_loop """owa""" +137 18 negative_sampler """basic""" +137 18 evaluator """rankbased""" +137 19 dataset """wn18rr""" +137 19 model """convkb""" +137 19 loss """bceaftersigmoid""" +137 19 regularizer """no""" +137 19 optimizer """adam""" +137 19 training_loop """owa""" +137 19 negative_sampler """basic""" +137 19 evaluator """rankbased""" +137 20 dataset """wn18rr""" +137 20 model """convkb""" +137 20 loss """bceaftersigmoid""" +137 20 regularizer """no""" +137 20 optimizer """adam""" +137 20 training_loop """owa""" +137 20 negative_sampler """basic""" +137 20 evaluator """rankbased""" +137 21 dataset """wn18rr""" +137 21 model """convkb""" +137 21 loss """bceaftersigmoid""" +137 21 regularizer """no""" +137 21 optimizer """adam""" +137 21 training_loop """owa""" +137 21 negative_sampler """basic""" +137 21 evaluator """rankbased""" +137 22 dataset """wn18rr""" +137 22 model """convkb""" +137 22 loss """bceaftersigmoid""" +137 22 regularizer """no""" +137 22 optimizer """adam""" +137 22 training_loop """owa""" +137 22 negative_sampler """basic""" +137 22 evaluator """rankbased""" +137 23 dataset """wn18rr""" +137 23 model """convkb""" +137 23 loss """bceaftersigmoid""" +137 23 regularizer """no""" +137 23 optimizer """adam""" +137 23 training_loop """owa""" +137 23 negative_sampler """basic""" +137 23 evaluator """rankbased""" +137 24 dataset """wn18rr""" +137 24 model """convkb""" +137 24 loss """bceaftersigmoid""" +137 24 regularizer """no""" +137 24 optimizer """adam""" +137 24 training_loop """owa""" +137 24 negative_sampler """basic""" +137 24 evaluator """rankbased""" +137 25 dataset """wn18rr""" +137 25 model """convkb""" +137 25 loss """bceaftersigmoid""" +137 25 regularizer """no""" +137 25 optimizer """adam""" +137 25 training_loop """owa""" +137 25 negative_sampler """basic""" +137 25 evaluator """rankbased""" +137 26 dataset """wn18rr""" +137 26 model """convkb""" +137 26 loss """bceaftersigmoid""" +137 26 regularizer """no""" +137 26 optimizer """adam""" +137 26 training_loop """owa""" +137 26 negative_sampler """basic""" +137 26 evaluator """rankbased""" +137 27 dataset """wn18rr""" +137 27 model """convkb""" +137 27 loss """bceaftersigmoid""" +137 27 regularizer """no""" +137 27 optimizer """adam""" +137 27 training_loop """owa""" +137 27 negative_sampler """basic""" +137 27 evaluator """rankbased""" +137 28 dataset """wn18rr""" +137 28 model """convkb""" +137 28 loss """bceaftersigmoid""" +137 28 regularizer """no""" +137 28 optimizer """adam""" +137 28 training_loop """owa""" +137 28 negative_sampler """basic""" +137 28 evaluator """rankbased""" +137 29 dataset """wn18rr""" +137 29 model """convkb""" +137 29 loss """bceaftersigmoid""" +137 29 regularizer """no""" +137 29 optimizer """adam""" +137 29 training_loop """owa""" +137 29 negative_sampler """basic""" +137 29 evaluator """rankbased""" +138 1 model.embedding_dim 0.0 +138 1 model.hidden_dropout_rate 0.3098720421603543 +138 1 model.num_filters 5.0 +138 1 optimizer.lr 0.0012205082982589324 +138 1 negative_sampler.num_negs_per_pos 12.0 +138 1 training.batch_size 1.0 +138 2 model.embedding_dim 1.0 +138 2 model.hidden_dropout_rate 0.11434891894813984 +138 2 model.num_filters 3.0 +138 2 optimizer.lr 0.008408331409398971 +138 2 negative_sampler.num_negs_per_pos 44.0 +138 2 training.batch_size 0.0 +138 3 model.embedding_dim 3.0 +138 3 model.hidden_dropout_rate 0.28391230697457953 +138 3 model.num_filters 1.0 +138 3 optimizer.lr 0.005960160633935983 +138 3 negative_sampler.num_negs_per_pos 32.0 +138 3 training.batch_size 1.0 +138 4 model.embedding_dim 2.0 +138 4 model.hidden_dropout_rate 0.14334527861356397 +138 4 model.num_filters 3.0 +138 4 optimizer.lr 0.0038605801395422165 +138 4 negative_sampler.num_negs_per_pos 7.0 +138 4 training.batch_size 0.0 +138 5 model.embedding_dim 1.0 +138 5 model.hidden_dropout_rate 0.48256193184372526 +138 5 model.num_filters 3.0 +138 5 optimizer.lr 0.007453750774524019 +138 5 negative_sampler.num_negs_per_pos 11.0 +138 5 training.batch_size 1.0 +138 6 model.embedding_dim 2.0 +138 6 model.hidden_dropout_rate 0.2617061882749933 +138 6 model.num_filters 3.0 +138 6 optimizer.lr 0.09909658609634141 +138 6 negative_sampler.num_negs_per_pos 49.0 +138 6 training.batch_size 1.0 +138 7 model.embedding_dim 0.0 +138 7 model.hidden_dropout_rate 0.18246182837988234 +138 7 model.num_filters 5.0 +138 7 optimizer.lr 0.052486430979472136 +138 7 negative_sampler.num_negs_per_pos 10.0 +138 7 training.batch_size 2.0 +138 8 model.embedding_dim 0.0 +138 8 model.hidden_dropout_rate 0.38607532717420817 +138 8 model.num_filters 2.0 +138 8 optimizer.lr 0.04918506629200737 +138 8 negative_sampler.num_negs_per_pos 17.0 +138 8 training.batch_size 1.0 +138 9 model.embedding_dim 1.0 +138 9 model.hidden_dropout_rate 0.4590285337817651 +138 9 model.num_filters 0.0 +138 9 optimizer.lr 0.002062609556688845 +138 9 negative_sampler.num_negs_per_pos 16.0 +138 9 training.batch_size 0.0 +138 10 model.embedding_dim 3.0 +138 10 model.hidden_dropout_rate 0.2837374876996211 +138 10 model.num_filters 0.0 +138 10 optimizer.lr 0.027979620412183638 +138 10 negative_sampler.num_negs_per_pos 23.0 +138 10 training.batch_size 0.0 +138 11 model.embedding_dim 2.0 +138 11 model.hidden_dropout_rate 0.31203283776213775 +138 11 model.num_filters 5.0 +138 11 optimizer.lr 0.008780024069179097 +138 11 negative_sampler.num_negs_per_pos 21.0 +138 11 training.batch_size 2.0 +138 12 model.embedding_dim 3.0 +138 12 model.hidden_dropout_rate 0.24071606277173255 +138 12 model.num_filters 0.0 +138 12 optimizer.lr 0.003162392294975397 +138 12 negative_sampler.num_negs_per_pos 15.0 +138 12 training.batch_size 1.0 +138 13 model.embedding_dim 3.0 +138 13 model.hidden_dropout_rate 0.35417410412307054 +138 13 model.num_filters 5.0 +138 13 optimizer.lr 0.046844341452151474 +138 13 negative_sampler.num_negs_per_pos 10.0 +138 13 training.batch_size 0.0 +138 14 model.embedding_dim 3.0 +138 14 model.hidden_dropout_rate 0.39610999354763643 +138 14 model.num_filters 0.0 +138 14 optimizer.lr 0.08820061991002531 +138 14 negative_sampler.num_negs_per_pos 35.0 +138 14 training.batch_size 1.0 +138 15 model.embedding_dim 3.0 +138 15 model.hidden_dropout_rate 0.31005579410545 +138 15 model.num_filters 2.0 +138 15 optimizer.lr 0.06046316152676722 +138 15 negative_sampler.num_negs_per_pos 9.0 +138 15 training.batch_size 1.0 +138 16 model.embedding_dim 1.0 +138 16 model.hidden_dropout_rate 0.23680521186412246 +138 16 model.num_filters 3.0 +138 16 optimizer.lr 0.0021213224703440295 +138 16 negative_sampler.num_negs_per_pos 25.0 +138 16 training.batch_size 0.0 +138 17 model.embedding_dim 2.0 +138 17 model.hidden_dropout_rate 0.340877039879943 +138 17 model.num_filters 3.0 +138 17 optimizer.lr 0.006230563343054289 +138 17 negative_sampler.num_negs_per_pos 26.0 +138 17 training.batch_size 0.0 +138 1 dataset """wn18rr""" +138 1 model """convkb""" +138 1 loss """softplus""" +138 1 regularizer """no""" +138 1 optimizer """adam""" +138 1 training_loop """owa""" +138 1 negative_sampler """basic""" +138 1 evaluator """rankbased""" +138 2 dataset """wn18rr""" +138 2 model """convkb""" +138 2 loss """softplus""" +138 2 regularizer """no""" +138 2 optimizer """adam""" +138 2 training_loop """owa""" +138 2 negative_sampler """basic""" +138 2 evaluator """rankbased""" +138 3 dataset """wn18rr""" +138 3 model """convkb""" +138 3 loss """softplus""" +138 3 regularizer """no""" +138 3 optimizer """adam""" +138 3 training_loop """owa""" +138 3 negative_sampler """basic""" +138 3 evaluator """rankbased""" +138 4 dataset """wn18rr""" +138 4 model """convkb""" +138 4 loss """softplus""" +138 4 regularizer """no""" +138 4 optimizer """adam""" +138 4 training_loop """owa""" +138 4 negative_sampler """basic""" +138 4 evaluator """rankbased""" +138 5 dataset """wn18rr""" +138 5 model """convkb""" +138 5 loss """softplus""" +138 5 regularizer """no""" +138 5 optimizer """adam""" +138 5 training_loop """owa""" +138 5 negative_sampler """basic""" +138 5 evaluator """rankbased""" +138 6 dataset """wn18rr""" +138 6 model """convkb""" +138 6 loss """softplus""" +138 6 regularizer """no""" +138 6 optimizer """adam""" +138 6 training_loop """owa""" +138 6 negative_sampler """basic""" +138 6 evaluator """rankbased""" +138 7 dataset """wn18rr""" +138 7 model """convkb""" +138 7 loss """softplus""" +138 7 regularizer """no""" +138 7 optimizer """adam""" +138 7 training_loop """owa""" +138 7 negative_sampler """basic""" +138 7 evaluator """rankbased""" +138 8 dataset """wn18rr""" +138 8 model """convkb""" +138 8 loss """softplus""" +138 8 regularizer """no""" +138 8 optimizer """adam""" +138 8 training_loop """owa""" +138 8 negative_sampler """basic""" +138 8 evaluator """rankbased""" +138 9 dataset """wn18rr""" +138 9 model """convkb""" +138 9 loss """softplus""" +138 9 regularizer """no""" +138 9 optimizer """adam""" +138 9 training_loop """owa""" +138 9 negative_sampler """basic""" +138 9 evaluator """rankbased""" +138 10 dataset """wn18rr""" +138 10 model """convkb""" +138 10 loss """softplus""" +138 10 regularizer """no""" +138 10 optimizer """adam""" +138 10 training_loop """owa""" +138 10 negative_sampler """basic""" +138 10 evaluator """rankbased""" +138 11 dataset """wn18rr""" +138 11 model """convkb""" +138 11 loss """softplus""" +138 11 regularizer """no""" +138 11 optimizer """adam""" +138 11 training_loop """owa""" +138 11 negative_sampler """basic""" +138 11 evaluator """rankbased""" +138 12 dataset """wn18rr""" +138 12 model """convkb""" +138 12 loss """softplus""" +138 12 regularizer """no""" +138 12 optimizer """adam""" +138 12 training_loop """owa""" +138 12 negative_sampler """basic""" +138 12 evaluator """rankbased""" +138 13 dataset """wn18rr""" +138 13 model """convkb""" +138 13 loss """softplus""" +138 13 regularizer """no""" +138 13 optimizer """adam""" +138 13 training_loop """owa""" +138 13 negative_sampler """basic""" +138 13 evaluator """rankbased""" +138 14 dataset """wn18rr""" +138 14 model """convkb""" +138 14 loss """softplus""" +138 14 regularizer """no""" +138 14 optimizer """adam""" +138 14 training_loop """owa""" +138 14 negative_sampler """basic""" +138 14 evaluator """rankbased""" +138 15 dataset """wn18rr""" +138 15 model """convkb""" +138 15 loss """softplus""" +138 15 regularizer """no""" +138 15 optimizer """adam""" +138 15 training_loop """owa""" +138 15 negative_sampler """basic""" +138 15 evaluator """rankbased""" +138 16 dataset """wn18rr""" +138 16 model """convkb""" +138 16 loss """softplus""" +138 16 regularizer """no""" +138 16 optimizer """adam""" +138 16 training_loop """owa""" +138 16 negative_sampler """basic""" +138 16 evaluator """rankbased""" +138 17 dataset """wn18rr""" +138 17 model """convkb""" +138 17 loss """softplus""" +138 17 regularizer """no""" +138 17 optimizer """adam""" +138 17 training_loop """owa""" +138 17 negative_sampler """basic""" +138 17 evaluator """rankbased""" +139 1 model.embedding_dim 2.0 +139 1 model.hidden_dropout_rate 0.393131883615303 +139 1 model.num_filters 2.0 +139 1 optimizer.lr 0.004632425983405459 +139 1 negative_sampler.num_negs_per_pos 11.0 +139 1 training.batch_size 0.0 +139 2 model.embedding_dim 2.0 +139 2 model.hidden_dropout_rate 0.13145736342791406 +139 2 model.num_filters 0.0 +139 2 optimizer.lr 0.001058228950796373 +139 2 negative_sampler.num_negs_per_pos 1.0 +139 2 training.batch_size 1.0 +139 3 model.embedding_dim 2.0 +139 3 model.hidden_dropout_rate 0.11254853642981835 +139 3 model.num_filters 0.0 +139 3 optimizer.lr 0.018366257404557492 +139 3 negative_sampler.num_negs_per_pos 18.0 +139 3 training.batch_size 0.0 +139 4 model.embedding_dim 2.0 +139 4 model.hidden_dropout_rate 0.32027228160334076 +139 4 model.num_filters 3.0 +139 4 optimizer.lr 0.002643790482010458 +139 4 negative_sampler.num_negs_per_pos 11.0 +139 4 training.batch_size 1.0 +139 5 model.embedding_dim 1.0 +139 5 model.hidden_dropout_rate 0.4146980095966672 +139 5 model.num_filters 1.0 +139 5 optimizer.lr 0.061795081058353865 +139 5 negative_sampler.num_negs_per_pos 18.0 +139 5 training.batch_size 2.0 +139 6 model.embedding_dim 0.0 +139 6 model.hidden_dropout_rate 0.2565655221183322 +139 6 model.num_filters 4.0 +139 6 optimizer.lr 0.04133536282533015 +139 6 negative_sampler.num_negs_per_pos 21.0 +139 6 training.batch_size 0.0 +139 7 model.embedding_dim 1.0 +139 7 model.hidden_dropout_rate 0.3714997480499511 +139 7 model.num_filters 0.0 +139 7 optimizer.lr 0.0010114867739078946 +139 7 negative_sampler.num_negs_per_pos 37.0 +139 7 training.batch_size 1.0 +139 8 model.embedding_dim 0.0 +139 8 model.hidden_dropout_rate 0.2561066134391122 +139 8 model.num_filters 4.0 +139 8 optimizer.lr 0.002486126210124274 +139 8 negative_sampler.num_negs_per_pos 48.0 +139 8 training.batch_size 1.0 +139 9 model.embedding_dim 0.0 +139 9 model.hidden_dropout_rate 0.2439662318382445 +139 9 model.num_filters 4.0 +139 9 optimizer.lr 0.00413183873377061 +139 9 negative_sampler.num_negs_per_pos 21.0 +139 9 training.batch_size 2.0 +139 10 model.embedding_dim 2.0 +139 10 model.hidden_dropout_rate 0.22823910728210822 +139 10 model.num_filters 5.0 +139 10 optimizer.lr 0.01179937729801907 +139 10 negative_sampler.num_negs_per_pos 23.0 +139 10 training.batch_size 0.0 +139 11 model.embedding_dim 0.0 +139 11 model.hidden_dropout_rate 0.27585329407255055 +139 11 model.num_filters 1.0 +139 11 optimizer.lr 0.009757798523848964 +139 11 negative_sampler.num_negs_per_pos 23.0 +139 11 training.batch_size 0.0 +139 12 model.embedding_dim 0.0 +139 12 model.hidden_dropout_rate 0.22655004973273335 +139 12 model.num_filters 5.0 +139 12 optimizer.lr 0.06448651053158858 +139 12 negative_sampler.num_negs_per_pos 17.0 +139 12 training.batch_size 1.0 +139 13 model.embedding_dim 0.0 +139 13 model.hidden_dropout_rate 0.1932767485527379 +139 13 model.num_filters 4.0 +139 13 optimizer.lr 0.011561927546409119 +139 13 negative_sampler.num_negs_per_pos 27.0 +139 13 training.batch_size 0.0 +139 14 model.embedding_dim 0.0 +139 14 model.hidden_dropout_rate 0.21018161729070872 +139 14 model.num_filters 3.0 +139 14 optimizer.lr 0.002401650386957612 +139 14 negative_sampler.num_negs_per_pos 5.0 +139 14 training.batch_size 1.0 +139 15 model.embedding_dim 2.0 +139 15 model.hidden_dropout_rate 0.4099803966146156 +139 15 model.num_filters 4.0 +139 15 optimizer.lr 0.01558648104641203 +139 15 negative_sampler.num_negs_per_pos 32.0 +139 15 training.batch_size 2.0 +139 16 model.embedding_dim 2.0 +139 16 model.hidden_dropout_rate 0.23965147448400495 +139 16 model.num_filters 3.0 +139 16 optimizer.lr 0.012074071879384729 +139 16 negative_sampler.num_negs_per_pos 12.0 +139 16 training.batch_size 1.0 +139 17 model.embedding_dim 1.0 +139 17 model.hidden_dropout_rate 0.12466140318957014 +139 17 model.num_filters 0.0 +139 17 optimizer.lr 0.08453417356523367 +139 17 negative_sampler.num_negs_per_pos 44.0 +139 17 training.batch_size 2.0 +139 18 model.embedding_dim 1.0 +139 18 model.hidden_dropout_rate 0.296619554323747 +139 18 model.num_filters 1.0 +139 18 optimizer.lr 0.01637673416234052 +139 18 negative_sampler.num_negs_per_pos 10.0 +139 18 training.batch_size 0.0 +139 19 model.embedding_dim 0.0 +139 19 model.hidden_dropout_rate 0.13827909237873368 +139 19 model.num_filters 2.0 +139 19 optimizer.lr 0.004699244951988597 +139 19 negative_sampler.num_negs_per_pos 33.0 +139 19 training.batch_size 0.0 +139 20 model.embedding_dim 0.0 +139 20 model.hidden_dropout_rate 0.4388642453229137 +139 20 model.num_filters 5.0 +139 20 optimizer.lr 0.015581132589587442 +139 20 negative_sampler.num_negs_per_pos 29.0 +139 20 training.batch_size 1.0 +139 21 model.embedding_dim 3.0 +139 21 model.hidden_dropout_rate 0.28663973605455845 +139 21 model.num_filters 4.0 +139 21 optimizer.lr 0.047281312985562554 +139 21 negative_sampler.num_negs_per_pos 15.0 +139 21 training.batch_size 1.0 +139 22 model.embedding_dim 0.0 +139 22 model.hidden_dropout_rate 0.13176056209765583 +139 22 model.num_filters 4.0 +139 22 optimizer.lr 0.04806913824288121 +139 22 negative_sampler.num_negs_per_pos 29.0 +139 22 training.batch_size 0.0 +139 23 model.embedding_dim 0.0 +139 23 model.hidden_dropout_rate 0.46596211791242353 +139 23 model.num_filters 5.0 +139 23 optimizer.lr 0.07359880554533449 +139 23 negative_sampler.num_negs_per_pos 22.0 +139 23 training.batch_size 0.0 +139 24 model.embedding_dim 1.0 +139 24 model.hidden_dropout_rate 0.33329131423039615 +139 24 model.num_filters 3.0 +139 24 optimizer.lr 0.002382198255758609 +139 24 negative_sampler.num_negs_per_pos 45.0 +139 24 training.batch_size 0.0 +139 1 dataset """wn18rr""" +139 1 model """convkb""" +139 1 loss """softplus""" +139 1 regularizer """no""" +139 1 optimizer """adam""" +139 1 training_loop """owa""" +139 1 negative_sampler """basic""" +139 1 evaluator """rankbased""" +139 2 dataset """wn18rr""" +139 2 model """convkb""" +139 2 loss """softplus""" +139 2 regularizer """no""" +139 2 optimizer """adam""" +139 2 training_loop """owa""" +139 2 negative_sampler """basic""" +139 2 evaluator """rankbased""" +139 3 dataset """wn18rr""" +139 3 model """convkb""" +139 3 loss """softplus""" +139 3 regularizer """no""" +139 3 optimizer """adam""" +139 3 training_loop """owa""" +139 3 negative_sampler """basic""" +139 3 evaluator """rankbased""" +139 4 dataset """wn18rr""" +139 4 model """convkb""" +139 4 loss """softplus""" +139 4 regularizer """no""" +139 4 optimizer """adam""" +139 4 training_loop """owa""" +139 4 negative_sampler """basic""" +139 4 evaluator """rankbased""" +139 5 dataset """wn18rr""" +139 5 model """convkb""" +139 5 loss """softplus""" +139 5 regularizer """no""" +139 5 optimizer """adam""" +139 5 training_loop """owa""" +139 5 negative_sampler """basic""" +139 5 evaluator """rankbased""" +139 6 dataset """wn18rr""" +139 6 model """convkb""" +139 6 loss """softplus""" +139 6 regularizer """no""" +139 6 optimizer """adam""" +139 6 training_loop """owa""" +139 6 negative_sampler """basic""" +139 6 evaluator """rankbased""" +139 7 dataset """wn18rr""" +139 7 model """convkb""" +139 7 loss """softplus""" +139 7 regularizer """no""" +139 7 optimizer """adam""" +139 7 training_loop """owa""" +139 7 negative_sampler """basic""" +139 7 evaluator """rankbased""" +139 8 dataset """wn18rr""" +139 8 model """convkb""" +139 8 loss """softplus""" +139 8 regularizer """no""" +139 8 optimizer """adam""" +139 8 training_loop """owa""" +139 8 negative_sampler """basic""" +139 8 evaluator """rankbased""" +139 9 dataset """wn18rr""" +139 9 model """convkb""" +139 9 loss """softplus""" +139 9 regularizer """no""" +139 9 optimizer """adam""" +139 9 training_loop """owa""" +139 9 negative_sampler """basic""" +139 9 evaluator """rankbased""" +139 10 dataset """wn18rr""" +139 10 model """convkb""" +139 10 loss """softplus""" +139 10 regularizer """no""" +139 10 optimizer """adam""" +139 10 training_loop """owa""" +139 10 negative_sampler """basic""" +139 10 evaluator """rankbased""" +139 11 dataset """wn18rr""" +139 11 model """convkb""" +139 11 loss """softplus""" +139 11 regularizer """no""" +139 11 optimizer """adam""" +139 11 training_loop """owa""" +139 11 negative_sampler """basic""" +139 11 evaluator """rankbased""" +139 12 dataset """wn18rr""" +139 12 model """convkb""" +139 12 loss """softplus""" +139 12 regularizer """no""" +139 12 optimizer """adam""" +139 12 training_loop """owa""" +139 12 negative_sampler """basic""" +139 12 evaluator """rankbased""" +139 13 dataset """wn18rr""" +139 13 model """convkb""" +139 13 loss """softplus""" +139 13 regularizer """no""" +139 13 optimizer """adam""" +139 13 training_loop """owa""" +139 13 negative_sampler """basic""" +139 13 evaluator """rankbased""" +139 14 dataset """wn18rr""" +139 14 model """convkb""" +139 14 loss """softplus""" +139 14 regularizer """no""" +139 14 optimizer """adam""" +139 14 training_loop """owa""" +139 14 negative_sampler """basic""" +139 14 evaluator """rankbased""" +139 15 dataset """wn18rr""" +139 15 model """convkb""" +139 15 loss """softplus""" +139 15 regularizer """no""" +139 15 optimizer """adam""" +139 15 training_loop """owa""" +139 15 negative_sampler """basic""" +139 15 evaluator """rankbased""" +139 16 dataset """wn18rr""" +139 16 model """convkb""" +139 16 loss """softplus""" +139 16 regularizer """no""" +139 16 optimizer """adam""" +139 16 training_loop """owa""" +139 16 negative_sampler """basic""" +139 16 evaluator """rankbased""" +139 17 dataset """wn18rr""" +139 17 model """convkb""" +139 17 loss """softplus""" +139 17 regularizer """no""" +139 17 optimizer """adam""" +139 17 training_loop """owa""" +139 17 negative_sampler """basic""" +139 17 evaluator """rankbased""" +139 18 dataset """wn18rr""" +139 18 model """convkb""" +139 18 loss """softplus""" +139 18 regularizer """no""" +139 18 optimizer """adam""" +139 18 training_loop """owa""" +139 18 negative_sampler """basic""" +139 18 evaluator """rankbased""" +139 19 dataset """wn18rr""" +139 19 model """convkb""" +139 19 loss """softplus""" +139 19 regularizer """no""" +139 19 optimizer """adam""" +139 19 training_loop """owa""" +139 19 negative_sampler """basic""" +139 19 evaluator """rankbased""" +139 20 dataset """wn18rr""" +139 20 model """convkb""" +139 20 loss """softplus""" +139 20 regularizer """no""" +139 20 optimizer """adam""" +139 20 training_loop """owa""" +139 20 negative_sampler """basic""" +139 20 evaluator """rankbased""" +139 21 dataset """wn18rr""" +139 21 model """convkb""" +139 21 loss """softplus""" +139 21 regularizer """no""" +139 21 optimizer """adam""" +139 21 training_loop """owa""" +139 21 negative_sampler """basic""" +139 21 evaluator """rankbased""" +139 22 dataset """wn18rr""" +139 22 model """convkb""" +139 22 loss """softplus""" +139 22 regularizer """no""" +139 22 optimizer """adam""" +139 22 training_loop """owa""" +139 22 negative_sampler """basic""" +139 22 evaluator """rankbased""" +139 23 dataset """wn18rr""" +139 23 model """convkb""" +139 23 loss """softplus""" +139 23 regularizer """no""" +139 23 optimizer """adam""" +139 23 training_loop """owa""" +139 23 negative_sampler """basic""" +139 23 evaluator """rankbased""" +139 24 dataset """wn18rr""" +139 24 model """convkb""" +139 24 loss """softplus""" +139 24 regularizer """no""" +139 24 optimizer """adam""" +139 24 training_loop """owa""" +139 24 negative_sampler """basic""" +139 24 evaluator """rankbased""" +140 1 model.embedding_dim 2.0 +140 1 model.hidden_dropout_rate 0.42591633676471263 +140 1 model.num_filters 0.0 +140 1 loss.margin 8.403220210897553 +140 1 optimizer.lr 0.05704501892692497 +140 1 negative_sampler.num_negs_per_pos 11.0 +140 1 training.batch_size 0.0 +140 2 model.embedding_dim 1.0 +140 2 model.hidden_dropout_rate 0.2161987831775318 +140 2 model.num_filters 4.0 +140 2 loss.margin 2.1113652977615756 +140 2 optimizer.lr 0.0010431276929026923 +140 2 negative_sampler.num_negs_per_pos 27.0 +140 2 training.batch_size 2.0 +140 3 model.embedding_dim 0.0 +140 3 model.hidden_dropout_rate 0.265930565126932 +140 3 model.num_filters 5.0 +140 3 loss.margin 5.059475934759293 +140 3 optimizer.lr 0.08190886815765483 +140 3 negative_sampler.num_negs_per_pos 13.0 +140 3 training.batch_size 1.0 +140 4 model.embedding_dim 0.0 +140 4 model.hidden_dropout_rate 0.21848663590680317 +140 4 model.num_filters 5.0 +140 4 loss.margin 2.6927412727359803 +140 4 optimizer.lr 0.00518517359805719 +140 4 negative_sampler.num_negs_per_pos 36.0 +140 4 training.batch_size 0.0 +140 5 model.embedding_dim 3.0 +140 5 model.hidden_dropout_rate 0.33755393420224644 +140 5 model.num_filters 2.0 +140 5 loss.margin 6.118243503848916 +140 5 optimizer.lr 0.016467561245904688 +140 5 negative_sampler.num_negs_per_pos 36.0 +140 5 training.batch_size 1.0 +140 6 model.embedding_dim 0.0 +140 6 model.hidden_dropout_rate 0.1938775086475355 +140 6 model.num_filters 4.0 +140 6 loss.margin 7.599869040936717 +140 6 optimizer.lr 0.023089086540654984 +140 6 negative_sampler.num_negs_per_pos 47.0 +140 6 training.batch_size 2.0 +140 7 model.embedding_dim 0.0 +140 7 model.hidden_dropout_rate 0.1753641706506044 +140 7 model.num_filters 2.0 +140 7 loss.margin 2.9956510495290587 +140 7 optimizer.lr 0.022701684792519074 +140 7 negative_sampler.num_negs_per_pos 31.0 +140 7 training.batch_size 1.0 +140 8 model.embedding_dim 2.0 +140 8 model.hidden_dropout_rate 0.34413837473464304 +140 8 model.num_filters 5.0 +140 8 loss.margin 8.98657459009642 +140 8 optimizer.lr 0.013699107756617559 +140 8 negative_sampler.num_negs_per_pos 7.0 +140 8 training.batch_size 1.0 +140 1 dataset """wn18rr""" +140 1 model """convkb""" +140 1 loss """marginranking""" +140 1 regularizer """no""" +140 1 optimizer """adam""" +140 1 training_loop """owa""" +140 1 negative_sampler """basic""" +140 1 evaluator """rankbased""" +140 2 dataset """wn18rr""" +140 2 model """convkb""" +140 2 loss """marginranking""" +140 2 regularizer """no""" +140 2 optimizer """adam""" +140 2 training_loop """owa""" +140 2 negative_sampler """basic""" +140 2 evaluator """rankbased""" +140 3 dataset """wn18rr""" +140 3 model """convkb""" +140 3 loss """marginranking""" +140 3 regularizer """no""" +140 3 optimizer """adam""" +140 3 training_loop """owa""" +140 3 negative_sampler """basic""" +140 3 evaluator """rankbased""" +140 4 dataset """wn18rr""" +140 4 model """convkb""" +140 4 loss """marginranking""" +140 4 regularizer """no""" +140 4 optimizer """adam""" +140 4 training_loop """owa""" +140 4 negative_sampler """basic""" +140 4 evaluator """rankbased""" +140 5 dataset """wn18rr""" +140 5 model """convkb""" +140 5 loss """marginranking""" +140 5 regularizer """no""" +140 5 optimizer """adam""" +140 5 training_loop """owa""" +140 5 negative_sampler """basic""" +140 5 evaluator """rankbased""" +140 6 dataset """wn18rr""" +140 6 model """convkb""" +140 6 loss """marginranking""" +140 6 regularizer """no""" +140 6 optimizer """adam""" +140 6 training_loop """owa""" +140 6 negative_sampler """basic""" +140 6 evaluator """rankbased""" +140 7 dataset """wn18rr""" +140 7 model """convkb""" +140 7 loss """marginranking""" +140 7 regularizer """no""" +140 7 optimizer """adam""" +140 7 training_loop """owa""" +140 7 negative_sampler """basic""" +140 7 evaluator """rankbased""" +140 8 dataset """wn18rr""" +140 8 model """convkb""" +140 8 loss """marginranking""" +140 8 regularizer """no""" +140 8 optimizer """adam""" +140 8 training_loop """owa""" +140 8 negative_sampler """basic""" +140 8 evaluator """rankbased""" +141 1 model.embedding_dim 2.0 +141 1 model.hidden_dropout_rate 0.4732290554192993 +141 1 model.num_filters 5.0 +141 1 loss.margin 7.922949796338877 +141 1 optimizer.lr 0.00653600221084436 +141 1 negative_sampler.num_negs_per_pos 49.0 +141 1 training.batch_size 1.0 +141 2 model.embedding_dim 1.0 +141 2 model.hidden_dropout_rate 0.23464703160343522 +141 2 model.num_filters 3.0 +141 2 loss.margin 2.3411645981371696 +141 2 optimizer.lr 0.004683594548734053 +141 2 negative_sampler.num_negs_per_pos 27.0 +141 2 training.batch_size 1.0 +141 3 model.embedding_dim 3.0 +141 3 model.hidden_dropout_rate 0.1269940350985365 +141 3 model.num_filters 4.0 +141 3 loss.margin 5.654901774265204 +141 3 optimizer.lr 0.004200094517241722 +141 3 negative_sampler.num_negs_per_pos 14.0 +141 3 training.batch_size 0.0 +141 4 model.embedding_dim 2.0 +141 4 model.hidden_dropout_rate 0.49900609835125187 +141 4 model.num_filters 1.0 +141 4 loss.margin 2.4025325010231886 +141 4 optimizer.lr 0.0011155909377009094 +141 4 negative_sampler.num_negs_per_pos 19.0 +141 4 training.batch_size 1.0 +141 5 model.embedding_dim 3.0 +141 5 model.hidden_dropout_rate 0.25512661823177923 +141 5 model.num_filters 2.0 +141 5 loss.margin 1.094864099970485 +141 5 optimizer.lr 0.012736928856249958 +141 5 negative_sampler.num_negs_per_pos 46.0 +141 5 training.batch_size 2.0 +141 6 model.embedding_dim 0.0 +141 6 model.hidden_dropout_rate 0.44956576465584286 +141 6 model.num_filters 0.0 +141 6 loss.margin 4.932290744083902 +141 6 optimizer.lr 0.07068022331110924 +141 6 negative_sampler.num_negs_per_pos 12.0 +141 6 training.batch_size 2.0 +141 7 model.embedding_dim 0.0 +141 7 model.hidden_dropout_rate 0.17030270223882193 +141 7 model.num_filters 3.0 +141 7 loss.margin 3.088924873849266 +141 7 optimizer.lr 0.002822269906667614 +141 7 negative_sampler.num_negs_per_pos 18.0 +141 7 training.batch_size 2.0 +141 8 model.embedding_dim 2.0 +141 8 model.hidden_dropout_rate 0.4091975426319035 +141 8 model.num_filters 1.0 +141 8 loss.margin 7.168231553974584 +141 8 optimizer.lr 0.010469528922253068 +141 8 negative_sampler.num_negs_per_pos 20.0 +141 8 training.batch_size 0.0 +141 9 model.embedding_dim 3.0 +141 9 model.hidden_dropout_rate 0.3245078109701899 +141 9 model.num_filters 3.0 +141 9 loss.margin 3.3249229164628704 +141 9 optimizer.lr 0.0039654557768788404 +141 9 negative_sampler.num_negs_per_pos 4.0 +141 9 training.batch_size 2.0 +141 1 dataset """wn18rr""" +141 1 model """convkb""" +141 1 loss """marginranking""" +141 1 regularizer """no""" +141 1 optimizer """adam""" +141 1 training_loop """owa""" +141 1 negative_sampler """basic""" +141 1 evaluator """rankbased""" +141 2 dataset """wn18rr""" +141 2 model """convkb""" +141 2 loss """marginranking""" +141 2 regularizer """no""" +141 2 optimizer """adam""" +141 2 training_loop """owa""" +141 2 negative_sampler """basic""" +141 2 evaluator """rankbased""" +141 3 dataset """wn18rr""" +141 3 model """convkb""" +141 3 loss """marginranking""" +141 3 regularizer """no""" +141 3 optimizer """adam""" +141 3 training_loop """owa""" +141 3 negative_sampler """basic""" +141 3 evaluator """rankbased""" +141 4 dataset """wn18rr""" +141 4 model """convkb""" +141 4 loss """marginranking""" +141 4 regularizer """no""" +141 4 optimizer """adam""" +141 4 training_loop """owa""" +141 4 negative_sampler """basic""" +141 4 evaluator """rankbased""" +141 5 dataset """wn18rr""" +141 5 model """convkb""" +141 5 loss """marginranking""" +141 5 regularizer """no""" +141 5 optimizer """adam""" +141 5 training_loop """owa""" +141 5 negative_sampler """basic""" +141 5 evaluator """rankbased""" +141 6 dataset """wn18rr""" +141 6 model """convkb""" +141 6 loss """marginranking""" +141 6 regularizer """no""" +141 6 optimizer """adam""" +141 6 training_loop """owa""" +141 6 negative_sampler """basic""" +141 6 evaluator """rankbased""" +141 7 dataset """wn18rr""" +141 7 model """convkb""" +141 7 loss """marginranking""" +141 7 regularizer """no""" +141 7 optimizer """adam""" +141 7 training_loop """owa""" +141 7 negative_sampler """basic""" +141 7 evaluator """rankbased""" +141 8 dataset """wn18rr""" +141 8 model """convkb""" +141 8 loss """marginranking""" +141 8 regularizer """no""" +141 8 optimizer """adam""" +141 8 training_loop """owa""" +141 8 negative_sampler """basic""" +141 8 evaluator """rankbased""" +141 9 dataset """wn18rr""" +141 9 model """convkb""" +141 9 loss """marginranking""" +141 9 regularizer """no""" +141 9 optimizer """adam""" +141 9 training_loop """owa""" +141 9 negative_sampler """basic""" +141 9 evaluator """rankbased""" +142 1 model.embedding_dim 2.0 +142 1 model.hidden_dropout_rate 0.31074741971420317 +142 1 model.num_filters 1.0 +142 1 optimizer.lr 0.016338253377723464 +142 1 negative_sampler.num_negs_per_pos 25.0 +142 1 training.batch_size 0.0 +142 2 model.embedding_dim 0.0 +142 2 model.hidden_dropout_rate 0.21114338056585016 +142 2 model.num_filters 0.0 +142 2 optimizer.lr 0.05479617038420803 +142 2 negative_sampler.num_negs_per_pos 49.0 +142 2 training.batch_size 3.0 +142 3 model.embedding_dim 2.0 +142 3 model.hidden_dropout_rate 0.4156952672234565 +142 3 model.num_filters 7.0 +142 3 optimizer.lr 0.003870057748134389 +142 3 negative_sampler.num_negs_per_pos 36.0 +142 3 training.batch_size 1.0 +142 4 model.embedding_dim 2.0 +142 4 model.hidden_dropout_rate 0.14823911782514296 +142 4 model.num_filters 9.0 +142 4 optimizer.lr 0.005553074186611944 +142 4 negative_sampler.num_negs_per_pos 8.0 +142 4 training.batch_size 3.0 +142 5 model.embedding_dim 2.0 +142 5 model.hidden_dropout_rate 0.4138431148935062 +142 5 model.num_filters 6.0 +142 5 optimizer.lr 0.02639107456742501 +142 5 negative_sampler.num_negs_per_pos 28.0 +142 5 training.batch_size 3.0 +142 6 model.embedding_dim 1.0 +142 6 model.hidden_dropout_rate 0.37003656012679265 +142 6 model.num_filters 9.0 +142 6 optimizer.lr 0.0145961601461672 +142 6 negative_sampler.num_negs_per_pos 15.0 +142 6 training.batch_size 2.0 +142 7 model.embedding_dim 2.0 +142 7 model.hidden_dropout_rate 0.34946350929491415 +142 7 model.num_filters 0.0 +142 7 optimizer.lr 0.0024564614087706944 +142 7 negative_sampler.num_negs_per_pos 4.0 +142 7 training.batch_size 0.0 +142 8 model.embedding_dim 2.0 +142 8 model.hidden_dropout_rate 0.4578436872080722 +142 8 model.num_filters 9.0 +142 8 optimizer.lr 0.09546595876627327 +142 8 negative_sampler.num_negs_per_pos 23.0 +142 8 training.batch_size 0.0 +142 9 model.embedding_dim 1.0 +142 9 model.hidden_dropout_rate 0.4790174086860999 +142 9 model.num_filters 4.0 +142 9 optimizer.lr 0.020916111174012715 +142 9 negative_sampler.num_negs_per_pos 40.0 +142 9 training.batch_size 1.0 +142 10 model.embedding_dim 0.0 +142 10 model.hidden_dropout_rate 0.4331517635746748 +142 10 model.num_filters 9.0 +142 10 optimizer.lr 0.002043213367751268 +142 10 negative_sampler.num_negs_per_pos 37.0 +142 10 training.batch_size 1.0 +142 11 model.embedding_dim 1.0 +142 11 model.hidden_dropout_rate 0.2820198200761873 +142 11 model.num_filters 4.0 +142 11 optimizer.lr 0.00157906587727954 +142 11 negative_sampler.num_negs_per_pos 11.0 +142 11 training.batch_size 2.0 +142 12 model.embedding_dim 2.0 +142 12 model.hidden_dropout_rate 0.30608242506880523 +142 12 model.num_filters 7.0 +142 12 optimizer.lr 0.0013187235120609255 +142 12 negative_sampler.num_negs_per_pos 43.0 +142 12 training.batch_size 0.0 +142 13 model.embedding_dim 1.0 +142 13 model.hidden_dropout_rate 0.2500404317299592 +142 13 model.num_filters 7.0 +142 13 optimizer.lr 0.002923430600779473 +142 13 negative_sampler.num_negs_per_pos 24.0 +142 13 training.batch_size 1.0 +142 14 model.embedding_dim 1.0 +142 14 model.hidden_dropout_rate 0.11591514869326916 +142 14 model.num_filters 6.0 +142 14 optimizer.lr 0.015220775524101855 +142 14 negative_sampler.num_negs_per_pos 11.0 +142 14 training.batch_size 1.0 +142 15 model.embedding_dim 1.0 +142 15 model.hidden_dropout_rate 0.46449851732127956 +142 15 model.num_filters 0.0 +142 15 optimizer.lr 0.028028774646213766 +142 15 negative_sampler.num_negs_per_pos 24.0 +142 15 training.batch_size 2.0 +142 1 dataset """yago310""" +142 1 model """convkb""" +142 1 loss """bceaftersigmoid""" +142 1 regularizer """no""" +142 1 optimizer """adam""" +142 1 training_loop """owa""" +142 1 negative_sampler """basic""" +142 1 evaluator """rankbased""" +142 2 dataset """yago310""" +142 2 model """convkb""" +142 2 loss """bceaftersigmoid""" +142 2 regularizer """no""" +142 2 optimizer """adam""" +142 2 training_loop """owa""" +142 2 negative_sampler """basic""" +142 2 evaluator """rankbased""" +142 3 dataset """yago310""" +142 3 model """convkb""" +142 3 loss """bceaftersigmoid""" +142 3 regularizer """no""" +142 3 optimizer """adam""" +142 3 training_loop """owa""" +142 3 negative_sampler """basic""" +142 3 evaluator """rankbased""" +142 4 dataset """yago310""" +142 4 model """convkb""" +142 4 loss """bceaftersigmoid""" +142 4 regularizer """no""" +142 4 optimizer """adam""" +142 4 training_loop """owa""" +142 4 negative_sampler """basic""" +142 4 evaluator """rankbased""" +142 5 dataset """yago310""" +142 5 model """convkb""" +142 5 loss """bceaftersigmoid""" +142 5 regularizer """no""" +142 5 optimizer """adam""" +142 5 training_loop """owa""" +142 5 negative_sampler """basic""" +142 5 evaluator """rankbased""" +142 6 dataset """yago310""" +142 6 model """convkb""" +142 6 loss """bceaftersigmoid""" +142 6 regularizer """no""" +142 6 optimizer """adam""" +142 6 training_loop """owa""" +142 6 negative_sampler """basic""" +142 6 evaluator """rankbased""" +142 7 dataset """yago310""" +142 7 model """convkb""" +142 7 loss """bceaftersigmoid""" +142 7 regularizer """no""" +142 7 optimizer """adam""" +142 7 training_loop """owa""" +142 7 negative_sampler """basic""" +142 7 evaluator """rankbased""" +142 8 dataset """yago310""" +142 8 model """convkb""" +142 8 loss """bceaftersigmoid""" +142 8 regularizer """no""" +142 8 optimizer """adam""" +142 8 training_loop """owa""" +142 8 negative_sampler """basic""" +142 8 evaluator """rankbased""" +142 9 dataset """yago310""" +142 9 model """convkb""" +142 9 loss """bceaftersigmoid""" +142 9 regularizer """no""" +142 9 optimizer """adam""" +142 9 training_loop """owa""" +142 9 negative_sampler """basic""" +142 9 evaluator """rankbased""" +142 10 dataset """yago310""" +142 10 model """convkb""" +142 10 loss """bceaftersigmoid""" +142 10 regularizer """no""" +142 10 optimizer """adam""" +142 10 training_loop """owa""" +142 10 negative_sampler """basic""" +142 10 evaluator """rankbased""" +142 11 dataset """yago310""" +142 11 model """convkb""" +142 11 loss """bceaftersigmoid""" +142 11 regularizer """no""" +142 11 optimizer """adam""" +142 11 training_loop """owa""" +142 11 negative_sampler """basic""" +142 11 evaluator """rankbased""" +142 12 dataset """yago310""" +142 12 model """convkb""" +142 12 loss """bceaftersigmoid""" +142 12 regularizer """no""" +142 12 optimizer """adam""" +142 12 training_loop """owa""" +142 12 negative_sampler """basic""" +142 12 evaluator """rankbased""" +142 13 dataset """yago310""" +142 13 model """convkb""" +142 13 loss """bceaftersigmoid""" +142 13 regularizer """no""" +142 13 optimizer """adam""" +142 13 training_loop """owa""" +142 13 negative_sampler """basic""" +142 13 evaluator """rankbased""" +142 14 dataset """yago310""" +142 14 model """convkb""" +142 14 loss """bceaftersigmoid""" +142 14 regularizer """no""" +142 14 optimizer """adam""" +142 14 training_loop """owa""" +142 14 negative_sampler """basic""" +142 14 evaluator """rankbased""" +142 15 dataset """yago310""" +142 15 model """convkb""" +142 15 loss """bceaftersigmoid""" +142 15 regularizer """no""" +142 15 optimizer """adam""" +142 15 training_loop """owa""" +142 15 negative_sampler """basic""" +142 15 evaluator """rankbased""" +143 1 model.embedding_dim 1.0 +143 1 model.hidden_dropout_rate 0.10374019636539478 +143 1 model.num_filters 0.0 +143 1 optimizer.lr 0.010823406330349132 +143 1 negative_sampler.num_negs_per_pos 12.0 +143 1 training.batch_size 1.0 +143 2 model.embedding_dim 0.0 +143 2 model.hidden_dropout_rate 0.21613945854244832 +143 2 model.num_filters 4.0 +143 2 optimizer.lr 0.0011969068489170096 +143 2 negative_sampler.num_negs_per_pos 16.0 +143 2 training.batch_size 1.0 +143 3 model.embedding_dim 2.0 +143 3 model.hidden_dropout_rate 0.2025759978303451 +143 3 model.num_filters 8.0 +143 3 optimizer.lr 0.0471649981880322 +143 3 negative_sampler.num_negs_per_pos 2.0 +143 3 training.batch_size 1.0 +143 4 model.embedding_dim 2.0 +143 4 model.hidden_dropout_rate 0.14342118007227064 +143 4 model.num_filters 8.0 +143 4 optimizer.lr 0.01359660462692896 +143 4 negative_sampler.num_negs_per_pos 30.0 +143 4 training.batch_size 3.0 +143 5 model.embedding_dim 1.0 +143 5 model.hidden_dropout_rate 0.280995994836972 +143 5 model.num_filters 2.0 +143 5 optimizer.lr 0.02240902906020124 +143 5 negative_sampler.num_negs_per_pos 39.0 +143 5 training.batch_size 2.0 +143 6 model.embedding_dim 2.0 +143 6 model.hidden_dropout_rate 0.10755415109774398 +143 6 model.num_filters 5.0 +143 6 optimizer.lr 0.03558403237589352 +143 6 negative_sampler.num_negs_per_pos 22.0 +143 6 training.batch_size 0.0 +143 7 model.embedding_dim 0.0 +143 7 model.hidden_dropout_rate 0.4069320095866361 +143 7 model.num_filters 6.0 +143 7 optimizer.lr 0.009085511346751562 +143 7 negative_sampler.num_negs_per_pos 16.0 +143 7 training.batch_size 0.0 +143 8 model.embedding_dim 2.0 +143 8 model.hidden_dropout_rate 0.28518120110215806 +143 8 model.num_filters 9.0 +143 8 optimizer.lr 0.012895838231797657 +143 8 negative_sampler.num_negs_per_pos 10.0 +143 8 training.batch_size 0.0 +143 9 model.embedding_dim 2.0 +143 9 model.hidden_dropout_rate 0.34952146832951875 +143 9 model.num_filters 6.0 +143 9 optimizer.lr 0.012458807639679891 +143 9 negative_sampler.num_negs_per_pos 1.0 +143 9 training.batch_size 2.0 +143 10 model.embedding_dim 2.0 +143 10 model.hidden_dropout_rate 0.31782730483060184 +143 10 model.num_filters 3.0 +143 10 optimizer.lr 0.0027436639633017677 +143 10 negative_sampler.num_negs_per_pos 46.0 +143 10 training.batch_size 2.0 +143 11 model.embedding_dim 2.0 +143 11 model.hidden_dropout_rate 0.12699747782166868 +143 11 model.num_filters 4.0 +143 11 optimizer.lr 0.007972681652419138 +143 11 negative_sampler.num_negs_per_pos 12.0 +143 11 training.batch_size 0.0 +143 12 model.embedding_dim 2.0 +143 12 model.hidden_dropout_rate 0.10765862205640137 +143 12 model.num_filters 0.0 +143 12 optimizer.lr 0.029197099829166063 +143 12 negative_sampler.num_negs_per_pos 38.0 +143 12 training.batch_size 2.0 +143 13 model.embedding_dim 1.0 +143 13 model.hidden_dropout_rate 0.4691128820425857 +143 13 model.num_filters 8.0 +143 13 optimizer.lr 0.05236749982179663 +143 13 negative_sampler.num_negs_per_pos 11.0 +143 13 training.batch_size 3.0 +143 14 model.embedding_dim 1.0 +143 14 model.hidden_dropout_rate 0.39115358793804733 +143 14 model.num_filters 3.0 +143 14 optimizer.lr 0.027665950805697073 +143 14 negative_sampler.num_negs_per_pos 11.0 +143 14 training.batch_size 2.0 +143 15 model.embedding_dim 0.0 +143 15 model.hidden_dropout_rate 0.30870320504132853 +143 15 model.num_filters 3.0 +143 15 optimizer.lr 0.015352937770001488 +143 15 negative_sampler.num_negs_per_pos 41.0 +143 15 training.batch_size 3.0 +143 16 model.embedding_dim 2.0 +143 16 model.hidden_dropout_rate 0.29018746964389164 +143 16 model.num_filters 3.0 +143 16 optimizer.lr 0.004662767916680009 +143 16 negative_sampler.num_negs_per_pos 49.0 +143 16 training.batch_size 3.0 +143 17 model.embedding_dim 0.0 +143 17 model.hidden_dropout_rate 0.42239331545547376 +143 17 model.num_filters 7.0 +143 17 optimizer.lr 0.0016295150942314388 +143 17 negative_sampler.num_negs_per_pos 17.0 +143 17 training.batch_size 3.0 +143 18 model.embedding_dim 2.0 +143 18 model.hidden_dropout_rate 0.44996019695092704 +143 18 model.num_filters 0.0 +143 18 optimizer.lr 0.011504181348717295 +143 18 negative_sampler.num_negs_per_pos 46.0 +143 18 training.batch_size 2.0 +143 19 model.embedding_dim 1.0 +143 19 model.hidden_dropout_rate 0.3752643551780759 +143 19 model.num_filters 7.0 +143 19 optimizer.lr 0.025262449243699137 +143 19 negative_sampler.num_negs_per_pos 22.0 +143 19 training.batch_size 2.0 +143 20 model.embedding_dim 2.0 +143 20 model.hidden_dropout_rate 0.4422965603447552 +143 20 model.num_filters 9.0 +143 20 optimizer.lr 0.04647763789018766 +143 20 negative_sampler.num_negs_per_pos 23.0 +143 20 training.batch_size 0.0 +143 21 model.embedding_dim 1.0 +143 21 model.hidden_dropout_rate 0.49928795114061764 +143 21 model.num_filters 9.0 +143 21 optimizer.lr 0.018296213033907396 +143 21 negative_sampler.num_negs_per_pos 26.0 +143 21 training.batch_size 1.0 +143 22 model.embedding_dim 1.0 +143 22 model.hidden_dropout_rate 0.19059914218433602 +143 22 model.num_filters 3.0 +143 22 optimizer.lr 0.006122486900465374 +143 22 negative_sampler.num_negs_per_pos 1.0 +143 22 training.batch_size 0.0 +143 23 model.embedding_dim 2.0 +143 23 model.hidden_dropout_rate 0.3762573024469845 +143 23 model.num_filters 2.0 +143 23 optimizer.lr 0.0689938229335265 +143 23 negative_sampler.num_negs_per_pos 45.0 +143 23 training.batch_size 3.0 +143 24 model.embedding_dim 0.0 +143 24 model.hidden_dropout_rate 0.43807927148499604 +143 24 model.num_filters 9.0 +143 24 optimizer.lr 0.004557524328227321 +143 24 negative_sampler.num_negs_per_pos 9.0 +143 24 training.batch_size 3.0 +143 25 model.embedding_dim 2.0 +143 25 model.hidden_dropout_rate 0.3027897705136172 +143 25 model.num_filters 9.0 +143 25 optimizer.lr 0.001437610003977139 +143 25 negative_sampler.num_negs_per_pos 41.0 +143 25 training.batch_size 3.0 +143 26 model.embedding_dim 1.0 +143 26 model.hidden_dropout_rate 0.4913270122138981 +143 26 model.num_filters 2.0 +143 26 optimizer.lr 0.001796459436276782 +143 26 negative_sampler.num_negs_per_pos 35.0 +143 26 training.batch_size 3.0 +143 27 model.embedding_dim 0.0 +143 27 model.hidden_dropout_rate 0.13016720697006715 +143 27 model.num_filters 1.0 +143 27 optimizer.lr 0.05253593830637327 +143 27 negative_sampler.num_negs_per_pos 2.0 +143 27 training.batch_size 3.0 +143 1 dataset """yago310""" +143 1 model """convkb""" +143 1 loss """bceaftersigmoid""" +143 1 regularizer """no""" +143 1 optimizer """adam""" +143 1 training_loop """owa""" +143 1 negative_sampler """basic""" +143 1 evaluator """rankbased""" +143 2 dataset """yago310""" +143 2 model """convkb""" +143 2 loss """bceaftersigmoid""" +143 2 regularizer """no""" +143 2 optimizer """adam""" +143 2 training_loop """owa""" +143 2 negative_sampler """basic""" +143 2 evaluator """rankbased""" +143 3 dataset """yago310""" +143 3 model """convkb""" +143 3 loss """bceaftersigmoid""" +143 3 regularizer """no""" +143 3 optimizer """adam""" +143 3 training_loop """owa""" +143 3 negative_sampler """basic""" +143 3 evaluator """rankbased""" +143 4 dataset """yago310""" +143 4 model """convkb""" +143 4 loss """bceaftersigmoid""" +143 4 regularizer """no""" +143 4 optimizer """adam""" +143 4 training_loop """owa""" +143 4 negative_sampler """basic""" +143 4 evaluator """rankbased""" +143 5 dataset """yago310""" +143 5 model """convkb""" +143 5 loss """bceaftersigmoid""" +143 5 regularizer """no""" +143 5 optimizer """adam""" +143 5 training_loop """owa""" +143 5 negative_sampler """basic""" +143 5 evaluator """rankbased""" +143 6 dataset """yago310""" +143 6 model """convkb""" +143 6 loss """bceaftersigmoid""" +143 6 regularizer """no""" +143 6 optimizer """adam""" +143 6 training_loop """owa""" +143 6 negative_sampler """basic""" +143 6 evaluator """rankbased""" +143 7 dataset """yago310""" +143 7 model """convkb""" +143 7 loss """bceaftersigmoid""" +143 7 regularizer """no""" +143 7 optimizer """adam""" +143 7 training_loop """owa""" +143 7 negative_sampler """basic""" +143 7 evaluator """rankbased""" +143 8 dataset """yago310""" +143 8 model """convkb""" +143 8 loss """bceaftersigmoid""" +143 8 regularizer """no""" +143 8 optimizer """adam""" +143 8 training_loop """owa""" +143 8 negative_sampler """basic""" +143 8 evaluator """rankbased""" +143 9 dataset """yago310""" +143 9 model """convkb""" +143 9 loss """bceaftersigmoid""" +143 9 regularizer """no""" +143 9 optimizer """adam""" +143 9 training_loop """owa""" +143 9 negative_sampler """basic""" +143 9 evaluator """rankbased""" +143 10 dataset """yago310""" +143 10 model """convkb""" +143 10 loss """bceaftersigmoid""" +143 10 regularizer """no""" +143 10 optimizer """adam""" +143 10 training_loop """owa""" +143 10 negative_sampler """basic""" +143 10 evaluator """rankbased""" +143 11 dataset """yago310""" +143 11 model """convkb""" +143 11 loss """bceaftersigmoid""" +143 11 regularizer """no""" +143 11 optimizer """adam""" +143 11 training_loop """owa""" +143 11 negative_sampler """basic""" +143 11 evaluator """rankbased""" +143 12 dataset """yago310""" +143 12 model """convkb""" +143 12 loss """bceaftersigmoid""" +143 12 regularizer """no""" +143 12 optimizer """adam""" +143 12 training_loop """owa""" +143 12 negative_sampler """basic""" +143 12 evaluator """rankbased""" +143 13 dataset """yago310""" +143 13 model """convkb""" +143 13 loss """bceaftersigmoid""" +143 13 regularizer """no""" +143 13 optimizer """adam""" +143 13 training_loop """owa""" +143 13 negative_sampler """basic""" +143 13 evaluator """rankbased""" +143 14 dataset """yago310""" +143 14 model """convkb""" +143 14 loss """bceaftersigmoid""" +143 14 regularizer """no""" +143 14 optimizer """adam""" +143 14 training_loop """owa""" +143 14 negative_sampler """basic""" +143 14 evaluator """rankbased""" +143 15 dataset """yago310""" +143 15 model """convkb""" +143 15 loss """bceaftersigmoid""" +143 15 regularizer """no""" +143 15 optimizer """adam""" +143 15 training_loop """owa""" +143 15 negative_sampler """basic""" +143 15 evaluator """rankbased""" +143 16 dataset """yago310""" +143 16 model """convkb""" +143 16 loss """bceaftersigmoid""" +143 16 regularizer """no""" +143 16 optimizer """adam""" +143 16 training_loop """owa""" +143 16 negative_sampler """basic""" +143 16 evaluator """rankbased""" +143 17 dataset """yago310""" +143 17 model """convkb""" +143 17 loss """bceaftersigmoid""" +143 17 regularizer """no""" +143 17 optimizer """adam""" +143 17 training_loop """owa""" +143 17 negative_sampler """basic""" +143 17 evaluator """rankbased""" +143 18 dataset """yago310""" +143 18 model """convkb""" +143 18 loss """bceaftersigmoid""" +143 18 regularizer """no""" +143 18 optimizer """adam""" +143 18 training_loop """owa""" +143 18 negative_sampler """basic""" +143 18 evaluator """rankbased""" +143 19 dataset """yago310""" +143 19 model """convkb""" +143 19 loss """bceaftersigmoid""" +143 19 regularizer """no""" +143 19 optimizer """adam""" +143 19 training_loop """owa""" +143 19 negative_sampler """basic""" +143 19 evaluator """rankbased""" +143 20 dataset """yago310""" +143 20 model """convkb""" +143 20 loss """bceaftersigmoid""" +143 20 regularizer """no""" +143 20 optimizer """adam""" +143 20 training_loop """owa""" +143 20 negative_sampler """basic""" +143 20 evaluator """rankbased""" +143 21 dataset """yago310""" +143 21 model """convkb""" +143 21 loss """bceaftersigmoid""" +143 21 regularizer """no""" +143 21 optimizer """adam""" +143 21 training_loop """owa""" +143 21 negative_sampler """basic""" +143 21 evaluator """rankbased""" +143 22 dataset """yago310""" +143 22 model """convkb""" +143 22 loss """bceaftersigmoid""" +143 22 regularizer """no""" +143 22 optimizer """adam""" +143 22 training_loop """owa""" +143 22 negative_sampler """basic""" +143 22 evaluator """rankbased""" +143 23 dataset """yago310""" +143 23 model """convkb""" +143 23 loss """bceaftersigmoid""" +143 23 regularizer """no""" +143 23 optimizer """adam""" +143 23 training_loop """owa""" +143 23 negative_sampler """basic""" +143 23 evaluator """rankbased""" +143 24 dataset """yago310""" +143 24 model """convkb""" +143 24 loss """bceaftersigmoid""" +143 24 regularizer """no""" +143 24 optimizer """adam""" +143 24 training_loop """owa""" +143 24 negative_sampler """basic""" +143 24 evaluator """rankbased""" +143 25 dataset """yago310""" +143 25 model """convkb""" +143 25 loss """bceaftersigmoid""" +143 25 regularizer """no""" +143 25 optimizer """adam""" +143 25 training_loop """owa""" +143 25 negative_sampler """basic""" +143 25 evaluator """rankbased""" +143 26 dataset """yago310""" +143 26 model """convkb""" +143 26 loss """bceaftersigmoid""" +143 26 regularizer """no""" +143 26 optimizer """adam""" +143 26 training_loop """owa""" +143 26 negative_sampler """basic""" +143 26 evaluator """rankbased""" +143 27 dataset """yago310""" +143 27 model """convkb""" +143 27 loss """bceaftersigmoid""" +143 27 regularizer """no""" +143 27 optimizer """adam""" +143 27 training_loop """owa""" +143 27 negative_sampler """basic""" +143 27 evaluator """rankbased""" +144 1 model.embedding_dim 0.0 +144 1 model.hidden_dropout_rate 0.16476926520905638 +144 1 model.num_filters 9.0 +144 1 optimizer.lr 0.005120020440742698 +144 1 negative_sampler.num_negs_per_pos 27.0 +144 1 training.batch_size 3.0 +144 2 model.embedding_dim 1.0 +144 2 model.hidden_dropout_rate 0.31887053182337 +144 2 model.num_filters 9.0 +144 2 optimizer.lr 0.006903126241921778 +144 2 negative_sampler.num_negs_per_pos 5.0 +144 2 training.batch_size 1.0 +144 3 model.embedding_dim 0.0 +144 3 model.hidden_dropout_rate 0.4403315062535217 +144 3 model.num_filters 9.0 +144 3 optimizer.lr 0.014223692554857713 +144 3 negative_sampler.num_negs_per_pos 3.0 +144 3 training.batch_size 1.0 +144 4 model.embedding_dim 1.0 +144 4 model.hidden_dropout_rate 0.24458041833633454 +144 4 model.num_filters 6.0 +144 4 optimizer.lr 0.0022427834869231584 +144 4 negative_sampler.num_negs_per_pos 31.0 +144 4 training.batch_size 1.0 +144 5 model.embedding_dim 0.0 +144 5 model.hidden_dropout_rate 0.3804793146307941 +144 5 model.num_filters 6.0 +144 5 optimizer.lr 0.06097995231897807 +144 5 negative_sampler.num_negs_per_pos 42.0 +144 5 training.batch_size 2.0 +144 6 model.embedding_dim 2.0 +144 6 model.hidden_dropout_rate 0.4433942082942094 +144 6 model.num_filters 4.0 +144 6 optimizer.lr 0.001086502928756409 +144 6 negative_sampler.num_negs_per_pos 38.0 +144 6 training.batch_size 1.0 +144 7 model.embedding_dim 2.0 +144 7 model.hidden_dropout_rate 0.43963788444199625 +144 7 model.num_filters 5.0 +144 7 optimizer.lr 0.004282055434856084 +144 7 negative_sampler.num_negs_per_pos 23.0 +144 7 training.batch_size 0.0 +144 8 model.embedding_dim 0.0 +144 8 model.hidden_dropout_rate 0.47029691448137045 +144 8 model.num_filters 4.0 +144 8 optimizer.lr 0.008330343150487687 +144 8 negative_sampler.num_negs_per_pos 43.0 +144 8 training.batch_size 1.0 +144 9 model.embedding_dim 1.0 +144 9 model.hidden_dropout_rate 0.46863702891995274 +144 9 model.num_filters 0.0 +144 9 optimizer.lr 0.0014878134556527717 +144 9 negative_sampler.num_negs_per_pos 12.0 +144 9 training.batch_size 1.0 +144 10 model.embedding_dim 0.0 +144 10 model.hidden_dropout_rate 0.27478070097134016 +144 10 model.num_filters 6.0 +144 10 optimizer.lr 0.0014045639985726843 +144 10 negative_sampler.num_negs_per_pos 2.0 +144 10 training.batch_size 3.0 +144 11 model.embedding_dim 1.0 +144 11 model.hidden_dropout_rate 0.14393361683778136 +144 11 model.num_filters 6.0 +144 11 optimizer.lr 0.028653593107837376 +144 11 negative_sampler.num_negs_per_pos 20.0 +144 11 training.batch_size 3.0 +144 12 model.embedding_dim 2.0 +144 12 model.hidden_dropout_rate 0.1505979989899094 +144 12 model.num_filters 4.0 +144 12 optimizer.lr 0.002494664571040204 +144 12 negative_sampler.num_negs_per_pos 39.0 +144 12 training.batch_size 3.0 +144 13 model.embedding_dim 1.0 +144 13 model.hidden_dropout_rate 0.36072788596755356 +144 13 model.num_filters 7.0 +144 13 optimizer.lr 0.06738830782595054 +144 13 negative_sampler.num_negs_per_pos 20.0 +144 13 training.batch_size 1.0 +144 14 model.embedding_dim 2.0 +144 14 model.hidden_dropout_rate 0.19568855086390766 +144 14 model.num_filters 8.0 +144 14 optimizer.lr 0.016766546673414612 +144 14 negative_sampler.num_negs_per_pos 40.0 +144 14 training.batch_size 2.0 +144 15 model.embedding_dim 1.0 +144 15 model.hidden_dropout_rate 0.4672347291399297 +144 15 model.num_filters 2.0 +144 15 optimizer.lr 0.04686755373581272 +144 15 negative_sampler.num_negs_per_pos 17.0 +144 15 training.batch_size 1.0 +144 16 model.embedding_dim 2.0 +144 16 model.hidden_dropout_rate 0.17429639708103112 +144 16 model.num_filters 4.0 +144 16 optimizer.lr 0.006421940460898603 +144 16 negative_sampler.num_negs_per_pos 25.0 +144 16 training.batch_size 1.0 +144 17 model.embedding_dim 1.0 +144 17 model.hidden_dropout_rate 0.4457382980041036 +144 17 model.num_filters 7.0 +144 17 optimizer.lr 0.020383874129579097 +144 17 negative_sampler.num_negs_per_pos 20.0 +144 17 training.batch_size 1.0 +144 18 model.embedding_dim 0.0 +144 18 model.hidden_dropout_rate 0.3335025022926715 +144 18 model.num_filters 3.0 +144 18 optimizer.lr 0.002210233152696977 +144 18 negative_sampler.num_negs_per_pos 47.0 +144 18 training.batch_size 0.0 +144 19 model.embedding_dim 2.0 +144 19 model.hidden_dropout_rate 0.16390744424324372 +144 19 model.num_filters 7.0 +144 19 optimizer.lr 0.024724424601541412 +144 19 negative_sampler.num_negs_per_pos 1.0 +144 19 training.batch_size 3.0 +144 20 model.embedding_dim 1.0 +144 20 model.hidden_dropout_rate 0.3391835637348348 +144 20 model.num_filters 7.0 +144 20 optimizer.lr 0.03417200540308141 +144 20 negative_sampler.num_negs_per_pos 37.0 +144 20 training.batch_size 1.0 +144 21 model.embedding_dim 0.0 +144 21 model.hidden_dropout_rate 0.3205135877960973 +144 21 model.num_filters 3.0 +144 21 optimizer.lr 0.0022565295720708644 +144 21 negative_sampler.num_negs_per_pos 40.0 +144 21 training.batch_size 0.0 +144 22 model.embedding_dim 2.0 +144 22 model.hidden_dropout_rate 0.30218430038609695 +144 22 model.num_filters 5.0 +144 22 optimizer.lr 0.002019644307734652 +144 22 negative_sampler.num_negs_per_pos 31.0 +144 22 training.batch_size 3.0 +144 23 model.embedding_dim 0.0 +144 23 model.hidden_dropout_rate 0.2732803612823585 +144 23 model.num_filters 8.0 +144 23 optimizer.lr 0.0027413321695531063 +144 23 negative_sampler.num_negs_per_pos 9.0 +144 23 training.batch_size 3.0 +144 24 model.embedding_dim 2.0 +144 24 model.hidden_dropout_rate 0.4176144607140194 +144 24 model.num_filters 0.0 +144 24 optimizer.lr 0.003785432169813271 +144 24 negative_sampler.num_negs_per_pos 27.0 +144 24 training.batch_size 2.0 +144 25 model.embedding_dim 0.0 +144 25 model.hidden_dropout_rate 0.2780481313732871 +144 25 model.num_filters 9.0 +144 25 optimizer.lr 0.0331223246744143 +144 25 negative_sampler.num_negs_per_pos 5.0 +144 25 training.batch_size 0.0 +144 26 model.embedding_dim 0.0 +144 26 model.hidden_dropout_rate 0.3767236859243135 +144 26 model.num_filters 3.0 +144 26 optimizer.lr 0.0013783336834596969 +144 26 negative_sampler.num_negs_per_pos 23.0 +144 26 training.batch_size 1.0 +144 27 model.embedding_dim 2.0 +144 27 model.hidden_dropout_rate 0.1304010302256955 +144 27 model.num_filters 3.0 +144 27 optimizer.lr 0.004768545477264598 +144 27 negative_sampler.num_negs_per_pos 49.0 +144 27 training.batch_size 3.0 +144 28 model.embedding_dim 1.0 +144 28 model.hidden_dropout_rate 0.2525083992680855 +144 28 model.num_filters 8.0 +144 28 optimizer.lr 0.056835201922383005 +144 28 negative_sampler.num_negs_per_pos 14.0 +144 28 training.batch_size 2.0 +144 29 model.embedding_dim 1.0 +144 29 model.hidden_dropout_rate 0.2857607450750391 +144 29 model.num_filters 1.0 +144 29 optimizer.lr 0.002537106782588949 +144 29 negative_sampler.num_negs_per_pos 40.0 +144 29 training.batch_size 0.0 +144 30 model.embedding_dim 1.0 +144 30 model.hidden_dropout_rate 0.3144179485225216 +144 30 model.num_filters 8.0 +144 30 optimizer.lr 0.0021344491222666673 +144 30 negative_sampler.num_negs_per_pos 33.0 +144 30 training.batch_size 0.0 +144 31 model.embedding_dim 0.0 +144 31 model.hidden_dropout_rate 0.2687270410941138 +144 31 model.num_filters 3.0 +144 31 optimizer.lr 0.05299204273884923 +144 31 negative_sampler.num_negs_per_pos 31.0 +144 31 training.batch_size 0.0 +144 32 model.embedding_dim 2.0 +144 32 model.hidden_dropout_rate 0.22533554592622626 +144 32 model.num_filters 2.0 +144 32 optimizer.lr 0.04021695337292201 +144 32 negative_sampler.num_negs_per_pos 44.0 +144 32 training.batch_size 3.0 +144 33 model.embedding_dim 0.0 +144 33 model.hidden_dropout_rate 0.4779415749692798 +144 33 model.num_filters 5.0 +144 33 optimizer.lr 0.02570783527094666 +144 33 negative_sampler.num_negs_per_pos 30.0 +144 33 training.batch_size 2.0 +144 34 model.embedding_dim 0.0 +144 34 model.hidden_dropout_rate 0.25828549023272995 +144 34 model.num_filters 8.0 +144 34 optimizer.lr 0.011296412838051547 +144 34 negative_sampler.num_negs_per_pos 20.0 +144 34 training.batch_size 1.0 +144 35 model.embedding_dim 1.0 +144 35 model.hidden_dropout_rate 0.4083502449675631 +144 35 model.num_filters 8.0 +144 35 optimizer.lr 0.003124447804639924 +144 35 negative_sampler.num_negs_per_pos 25.0 +144 35 training.batch_size 1.0 +144 36 model.embedding_dim 0.0 +144 36 model.hidden_dropout_rate 0.4683935057997939 +144 36 model.num_filters 2.0 +144 36 optimizer.lr 0.0015481786735960941 +144 36 negative_sampler.num_negs_per_pos 7.0 +144 36 training.batch_size 1.0 +144 37 model.embedding_dim 0.0 +144 37 model.hidden_dropout_rate 0.18551618437008488 +144 37 model.num_filters 4.0 +144 37 optimizer.lr 0.08087009703120651 +144 37 negative_sampler.num_negs_per_pos 32.0 +144 37 training.batch_size 0.0 +144 38 model.embedding_dim 2.0 +144 38 model.hidden_dropout_rate 0.47823767833888076 +144 38 model.num_filters 5.0 +144 38 optimizer.lr 0.025842122152004387 +144 38 negative_sampler.num_negs_per_pos 16.0 +144 38 training.batch_size 0.0 +144 39 model.embedding_dim 1.0 +144 39 model.hidden_dropout_rate 0.4592024225932233 +144 39 model.num_filters 0.0 +144 39 optimizer.lr 0.007327464240426292 +144 39 negative_sampler.num_negs_per_pos 3.0 +144 39 training.batch_size 1.0 +144 1 dataset """yago310""" +144 1 model """convkb""" +144 1 loss """softplus""" +144 1 regularizer """no""" +144 1 optimizer """adam""" +144 1 training_loop """owa""" +144 1 negative_sampler """basic""" +144 1 evaluator """rankbased""" +144 2 dataset """yago310""" +144 2 model """convkb""" +144 2 loss """softplus""" +144 2 regularizer """no""" +144 2 optimizer """adam""" +144 2 training_loop """owa""" +144 2 negative_sampler """basic""" +144 2 evaluator """rankbased""" +144 3 dataset """yago310""" +144 3 model """convkb""" +144 3 loss """softplus""" +144 3 regularizer """no""" +144 3 optimizer """adam""" +144 3 training_loop """owa""" +144 3 negative_sampler """basic""" +144 3 evaluator """rankbased""" +144 4 dataset """yago310""" +144 4 model """convkb""" +144 4 loss """softplus""" +144 4 regularizer """no""" +144 4 optimizer """adam""" +144 4 training_loop """owa""" +144 4 negative_sampler """basic""" +144 4 evaluator """rankbased""" +144 5 dataset """yago310""" +144 5 model """convkb""" +144 5 loss """softplus""" +144 5 regularizer """no""" +144 5 optimizer """adam""" +144 5 training_loop """owa""" +144 5 negative_sampler """basic""" +144 5 evaluator """rankbased""" +144 6 dataset """yago310""" +144 6 model """convkb""" +144 6 loss """softplus""" +144 6 regularizer """no""" +144 6 optimizer """adam""" +144 6 training_loop """owa""" +144 6 negative_sampler """basic""" +144 6 evaluator """rankbased""" +144 7 dataset """yago310""" +144 7 model """convkb""" +144 7 loss """softplus""" +144 7 regularizer """no""" +144 7 optimizer """adam""" +144 7 training_loop """owa""" +144 7 negative_sampler """basic""" +144 7 evaluator """rankbased""" +144 8 dataset """yago310""" +144 8 model """convkb""" +144 8 loss """softplus""" +144 8 regularizer """no""" +144 8 optimizer """adam""" +144 8 training_loop """owa""" +144 8 negative_sampler """basic""" +144 8 evaluator """rankbased""" +144 9 dataset """yago310""" +144 9 model """convkb""" +144 9 loss """softplus""" +144 9 regularizer """no""" +144 9 optimizer """adam""" +144 9 training_loop """owa""" +144 9 negative_sampler """basic""" +144 9 evaluator """rankbased""" +144 10 dataset """yago310""" +144 10 model """convkb""" +144 10 loss """softplus""" +144 10 regularizer """no""" +144 10 optimizer """adam""" +144 10 training_loop """owa""" +144 10 negative_sampler """basic""" +144 10 evaluator """rankbased""" +144 11 dataset """yago310""" +144 11 model """convkb""" +144 11 loss """softplus""" +144 11 regularizer """no""" +144 11 optimizer """adam""" +144 11 training_loop """owa""" +144 11 negative_sampler """basic""" +144 11 evaluator """rankbased""" +144 12 dataset """yago310""" +144 12 model """convkb""" +144 12 loss """softplus""" +144 12 regularizer """no""" +144 12 optimizer """adam""" +144 12 training_loop """owa""" +144 12 negative_sampler """basic""" +144 12 evaluator """rankbased""" +144 13 dataset """yago310""" +144 13 model """convkb""" +144 13 loss """softplus""" +144 13 regularizer """no""" +144 13 optimizer """adam""" +144 13 training_loop """owa""" +144 13 negative_sampler """basic""" +144 13 evaluator """rankbased""" +144 14 dataset """yago310""" +144 14 model """convkb""" +144 14 loss """softplus""" +144 14 regularizer """no""" +144 14 optimizer """adam""" +144 14 training_loop """owa""" +144 14 negative_sampler """basic""" +144 14 evaluator """rankbased""" +144 15 dataset """yago310""" +144 15 model """convkb""" +144 15 loss """softplus""" +144 15 regularizer """no""" +144 15 optimizer """adam""" +144 15 training_loop """owa""" +144 15 negative_sampler """basic""" +144 15 evaluator """rankbased""" +144 16 dataset """yago310""" +144 16 model """convkb""" +144 16 loss """softplus""" +144 16 regularizer """no""" +144 16 optimizer """adam""" +144 16 training_loop """owa""" +144 16 negative_sampler """basic""" +144 16 evaluator """rankbased""" +144 17 dataset """yago310""" +144 17 model """convkb""" +144 17 loss """softplus""" +144 17 regularizer """no""" +144 17 optimizer """adam""" +144 17 training_loop """owa""" +144 17 negative_sampler """basic""" +144 17 evaluator """rankbased""" +144 18 dataset """yago310""" +144 18 model """convkb""" +144 18 loss """softplus""" +144 18 regularizer """no""" +144 18 optimizer """adam""" +144 18 training_loop """owa""" +144 18 negative_sampler """basic""" +144 18 evaluator """rankbased""" +144 19 dataset """yago310""" +144 19 model """convkb""" +144 19 loss """softplus""" +144 19 regularizer """no""" +144 19 optimizer """adam""" +144 19 training_loop """owa""" +144 19 negative_sampler """basic""" +144 19 evaluator """rankbased""" +144 20 dataset """yago310""" +144 20 model """convkb""" +144 20 loss """softplus""" +144 20 regularizer """no""" +144 20 optimizer """adam""" +144 20 training_loop """owa""" +144 20 negative_sampler """basic""" +144 20 evaluator """rankbased""" +144 21 dataset """yago310""" +144 21 model """convkb""" +144 21 loss """softplus""" +144 21 regularizer """no""" +144 21 optimizer """adam""" +144 21 training_loop """owa""" +144 21 negative_sampler """basic""" +144 21 evaluator """rankbased""" +144 22 dataset """yago310""" +144 22 model """convkb""" +144 22 loss """softplus""" +144 22 regularizer """no""" +144 22 optimizer """adam""" +144 22 training_loop """owa""" +144 22 negative_sampler """basic""" +144 22 evaluator """rankbased""" +144 23 dataset """yago310""" +144 23 model """convkb""" +144 23 loss """softplus""" +144 23 regularizer """no""" +144 23 optimizer """adam""" +144 23 training_loop """owa""" +144 23 negative_sampler """basic""" +144 23 evaluator """rankbased""" +144 24 dataset """yago310""" +144 24 model """convkb""" +144 24 loss """softplus""" +144 24 regularizer """no""" +144 24 optimizer """adam""" +144 24 training_loop """owa""" +144 24 negative_sampler """basic""" +144 24 evaluator """rankbased""" +144 25 dataset """yago310""" +144 25 model """convkb""" +144 25 loss """softplus""" +144 25 regularizer """no""" +144 25 optimizer """adam""" +144 25 training_loop """owa""" +144 25 negative_sampler """basic""" +144 25 evaluator """rankbased""" +144 26 dataset """yago310""" +144 26 model """convkb""" +144 26 loss """softplus""" +144 26 regularizer """no""" +144 26 optimizer """adam""" +144 26 training_loop """owa""" +144 26 negative_sampler """basic""" +144 26 evaluator """rankbased""" +144 27 dataset """yago310""" +144 27 model """convkb""" +144 27 loss """softplus""" +144 27 regularizer """no""" +144 27 optimizer """adam""" +144 27 training_loop """owa""" +144 27 negative_sampler """basic""" +144 27 evaluator """rankbased""" +144 28 dataset """yago310""" +144 28 model """convkb""" +144 28 loss """softplus""" +144 28 regularizer """no""" +144 28 optimizer """adam""" +144 28 training_loop """owa""" +144 28 negative_sampler """basic""" +144 28 evaluator """rankbased""" +144 29 dataset """yago310""" +144 29 model """convkb""" +144 29 loss """softplus""" +144 29 regularizer """no""" +144 29 optimizer """adam""" +144 29 training_loop """owa""" +144 29 negative_sampler """basic""" +144 29 evaluator """rankbased""" +144 30 dataset """yago310""" +144 30 model """convkb""" +144 30 loss """softplus""" +144 30 regularizer """no""" +144 30 optimizer """adam""" +144 30 training_loop """owa""" +144 30 negative_sampler """basic""" +144 30 evaluator """rankbased""" +144 31 dataset """yago310""" +144 31 model """convkb""" +144 31 loss """softplus""" +144 31 regularizer """no""" +144 31 optimizer """adam""" +144 31 training_loop """owa""" +144 31 negative_sampler """basic""" +144 31 evaluator """rankbased""" +144 32 dataset """yago310""" +144 32 model """convkb""" +144 32 loss """softplus""" +144 32 regularizer """no""" +144 32 optimizer """adam""" +144 32 training_loop """owa""" +144 32 negative_sampler """basic""" +144 32 evaluator """rankbased""" +144 33 dataset """yago310""" +144 33 model """convkb""" +144 33 loss """softplus""" +144 33 regularizer """no""" +144 33 optimizer """adam""" +144 33 training_loop """owa""" +144 33 negative_sampler """basic""" +144 33 evaluator """rankbased""" +144 34 dataset """yago310""" +144 34 model """convkb""" +144 34 loss """softplus""" +144 34 regularizer """no""" +144 34 optimizer """adam""" +144 34 training_loop """owa""" +144 34 negative_sampler """basic""" +144 34 evaluator """rankbased""" +144 35 dataset """yago310""" +144 35 model """convkb""" +144 35 loss """softplus""" +144 35 regularizer """no""" +144 35 optimizer """adam""" +144 35 training_loop """owa""" +144 35 negative_sampler """basic""" +144 35 evaluator """rankbased""" +144 36 dataset """yago310""" +144 36 model """convkb""" +144 36 loss """softplus""" +144 36 regularizer """no""" +144 36 optimizer """adam""" +144 36 training_loop """owa""" +144 36 negative_sampler """basic""" +144 36 evaluator """rankbased""" +144 37 dataset """yago310""" +144 37 model """convkb""" +144 37 loss """softplus""" +144 37 regularizer """no""" +144 37 optimizer """adam""" +144 37 training_loop """owa""" +144 37 negative_sampler """basic""" +144 37 evaluator """rankbased""" +144 38 dataset """yago310""" +144 38 model """convkb""" +144 38 loss """softplus""" +144 38 regularizer """no""" +144 38 optimizer """adam""" +144 38 training_loop """owa""" +144 38 negative_sampler """basic""" +144 38 evaluator """rankbased""" +144 39 dataset """yago310""" +144 39 model """convkb""" +144 39 loss """softplus""" +144 39 regularizer """no""" +144 39 optimizer """adam""" +144 39 training_loop """owa""" +144 39 negative_sampler """basic""" +144 39 evaluator """rankbased""" +145 1 model.embedding_dim 1.0 +145 1 model.hidden_dropout_rate 0.20520219167975984 +145 1 model.num_filters 2.0 +145 1 optimizer.lr 0.031970549757396355 +145 1 negative_sampler.num_negs_per_pos 16.0 +145 1 training.batch_size 2.0 +145 2 model.embedding_dim 0.0 +145 2 model.hidden_dropout_rate 0.37831760147387206 +145 2 model.num_filters 0.0 +145 2 optimizer.lr 0.08623877164475273 +145 2 negative_sampler.num_negs_per_pos 31.0 +145 2 training.batch_size 0.0 +145 3 model.embedding_dim 0.0 +145 3 model.hidden_dropout_rate 0.34024874395913784 +145 3 model.num_filters 1.0 +145 3 optimizer.lr 0.005251461661229927 +145 3 negative_sampler.num_negs_per_pos 25.0 +145 3 training.batch_size 2.0 +145 1 dataset """yago310""" +145 1 model """convkb""" +145 1 loss """softplus""" +145 1 regularizer """no""" +145 1 optimizer """adam""" +145 1 training_loop """owa""" +145 1 negative_sampler """basic""" +145 1 evaluator """rankbased""" +145 2 dataset """yago310""" +145 2 model """convkb""" +145 2 loss """softplus""" +145 2 regularizer """no""" +145 2 optimizer """adam""" +145 2 training_loop """owa""" +145 2 negative_sampler """basic""" +145 2 evaluator """rankbased""" +145 3 dataset """yago310""" +145 3 model """convkb""" +145 3 loss """softplus""" +145 3 regularizer """no""" +145 3 optimizer """adam""" +145 3 training_loop """owa""" +145 3 negative_sampler """basic""" +145 3 evaluator """rankbased""" +146 1 model.embedding_dim 2.0 +146 1 optimizer.lr 0.00167361149004918 +146 1 negative_sampler.num_negs_per_pos 49.0 +146 1 training.batch_size 2.0 +146 2 model.embedding_dim 0.0 +146 2 optimizer.lr 0.0010157331410579008 +146 2 negative_sampler.num_negs_per_pos 26.0 +146 2 training.batch_size 3.0 +146 3 model.embedding_dim 2.0 +146 3 optimizer.lr 0.005686524553479544 +146 3 negative_sampler.num_negs_per_pos 34.0 +146 3 training.batch_size 2.0 +146 4 model.embedding_dim 2.0 +146 4 optimizer.lr 0.0010168002171307632 +146 4 negative_sampler.num_negs_per_pos 21.0 +146 4 training.batch_size 3.0 +146 5 model.embedding_dim 1.0 +146 5 optimizer.lr 0.001094076012729922 +146 5 negative_sampler.num_negs_per_pos 5.0 +146 5 training.batch_size 2.0 +146 6 model.embedding_dim 1.0 +146 6 optimizer.lr 0.08775037065775543 +146 6 negative_sampler.num_negs_per_pos 40.0 +146 6 training.batch_size 1.0 +146 7 model.embedding_dim 1.0 +146 7 optimizer.lr 0.0035071125490104983 +146 7 negative_sampler.num_negs_per_pos 36.0 +146 7 training.batch_size 0.0 +146 1 dataset """yago310""" +146 1 model """ermlp""" +146 1 loss """bceaftersigmoid""" +146 1 regularizer """no""" +146 1 optimizer """adam""" +146 1 training_loop """owa""" +146 1 negative_sampler """basic""" +146 1 evaluator """rankbased""" +146 2 dataset """yago310""" +146 2 model """ermlp""" +146 2 loss """bceaftersigmoid""" +146 2 regularizer """no""" +146 2 optimizer """adam""" +146 2 training_loop """owa""" +146 2 negative_sampler """basic""" +146 2 evaluator """rankbased""" +146 3 dataset """yago310""" +146 3 model """ermlp""" +146 3 loss """bceaftersigmoid""" +146 3 regularizer """no""" +146 3 optimizer """adam""" +146 3 training_loop """owa""" +146 3 negative_sampler """basic""" +146 3 evaluator """rankbased""" +146 4 dataset """yago310""" +146 4 model """ermlp""" +146 4 loss """bceaftersigmoid""" +146 4 regularizer """no""" +146 4 optimizer """adam""" +146 4 training_loop """owa""" +146 4 negative_sampler """basic""" +146 4 evaluator """rankbased""" +146 5 dataset """yago310""" +146 5 model """ermlp""" +146 5 loss """bceaftersigmoid""" +146 5 regularizer """no""" +146 5 optimizer """adam""" +146 5 training_loop """owa""" +146 5 negative_sampler """basic""" +146 5 evaluator """rankbased""" +146 6 dataset """yago310""" +146 6 model """ermlp""" +146 6 loss """bceaftersigmoid""" +146 6 regularizer """no""" +146 6 optimizer """adam""" +146 6 training_loop """owa""" +146 6 negative_sampler """basic""" +146 6 evaluator """rankbased""" +146 7 dataset """yago310""" +146 7 model """ermlp""" +146 7 loss """bceaftersigmoid""" +146 7 regularizer """no""" +146 7 optimizer """adam""" +146 7 training_loop """owa""" +146 7 negative_sampler """basic""" +146 7 evaluator """rankbased""" +147 1 model.embedding_dim 1.0 +147 1 optimizer.lr 0.00947772247714081 +147 1 negative_sampler.num_negs_per_pos 35.0 +147 1 training.batch_size 3.0 +147 2 model.embedding_dim 2.0 +147 2 optimizer.lr 0.006968326563651091 +147 2 negative_sampler.num_negs_per_pos 2.0 +147 2 training.batch_size 0.0 +147 3 model.embedding_dim 0.0 +147 3 optimizer.lr 0.004273358571560813 +147 3 negative_sampler.num_negs_per_pos 15.0 +147 3 training.batch_size 2.0 +147 4 model.embedding_dim 2.0 +147 4 optimizer.lr 0.0190670684585565 +147 4 negative_sampler.num_negs_per_pos 27.0 +147 4 training.batch_size 0.0 +147 5 model.embedding_dim 2.0 +147 5 optimizer.lr 0.002103289148881089 +147 5 negative_sampler.num_negs_per_pos 6.0 +147 5 training.batch_size 1.0 +147 6 model.embedding_dim 1.0 +147 6 optimizer.lr 0.0024233423421468423 +147 6 negative_sampler.num_negs_per_pos 4.0 +147 6 training.batch_size 3.0 +147 7 model.embedding_dim 2.0 +147 7 optimizer.lr 0.029384044702970857 +147 7 negative_sampler.num_negs_per_pos 27.0 +147 7 training.batch_size 0.0 +147 8 model.embedding_dim 2.0 +147 8 optimizer.lr 0.0010918468004160373 +147 8 negative_sampler.num_negs_per_pos 47.0 +147 8 training.batch_size 2.0 +147 9 model.embedding_dim 1.0 +147 9 optimizer.lr 0.0395488793119134 +147 9 negative_sampler.num_negs_per_pos 29.0 +147 9 training.batch_size 1.0 +147 10 model.embedding_dim 2.0 +147 10 optimizer.lr 0.041480398839306244 +147 10 negative_sampler.num_negs_per_pos 27.0 +147 10 training.batch_size 0.0 +147 11 model.embedding_dim 0.0 +147 11 optimizer.lr 0.03377497991919168 +147 11 negative_sampler.num_negs_per_pos 10.0 +147 11 training.batch_size 0.0 +147 12 model.embedding_dim 2.0 +147 12 optimizer.lr 0.00311333014209865 +147 12 negative_sampler.num_negs_per_pos 47.0 +147 12 training.batch_size 3.0 +147 13 model.embedding_dim 0.0 +147 13 optimizer.lr 0.0029164399424618966 +147 13 negative_sampler.num_negs_per_pos 42.0 +147 13 training.batch_size 0.0 +147 14 model.embedding_dim 1.0 +147 14 optimizer.lr 0.09928277292021961 +147 14 negative_sampler.num_negs_per_pos 16.0 +147 14 training.batch_size 2.0 +147 15 model.embedding_dim 2.0 +147 15 optimizer.lr 0.0035887901985711236 +147 15 negative_sampler.num_negs_per_pos 2.0 +147 15 training.batch_size 2.0 +147 16 model.embedding_dim 2.0 +147 16 optimizer.lr 0.03142298970653618 +147 16 negative_sampler.num_negs_per_pos 17.0 +147 16 training.batch_size 1.0 +147 17 model.embedding_dim 2.0 +147 17 optimizer.lr 0.016669000523708448 +147 17 negative_sampler.num_negs_per_pos 26.0 +147 17 training.batch_size 3.0 +147 18 model.embedding_dim 1.0 +147 18 optimizer.lr 0.0020597506982639536 +147 18 negative_sampler.num_negs_per_pos 48.0 +147 18 training.batch_size 1.0 +147 19 model.embedding_dim 1.0 +147 19 optimizer.lr 0.0012540903936732448 +147 19 negative_sampler.num_negs_per_pos 22.0 +147 19 training.batch_size 0.0 +147 20 model.embedding_dim 0.0 +147 20 optimizer.lr 0.004619192001023339 +147 20 negative_sampler.num_negs_per_pos 14.0 +147 20 training.batch_size 2.0 +147 21 model.embedding_dim 0.0 +147 21 optimizer.lr 0.0881917860107427 +147 21 negative_sampler.num_negs_per_pos 32.0 +147 21 training.batch_size 2.0 +147 22 model.embedding_dim 0.0 +147 22 optimizer.lr 0.0024817041823741418 +147 22 negative_sampler.num_negs_per_pos 21.0 +147 22 training.batch_size 3.0 +147 1 dataset """yago310""" +147 1 model """ermlp""" +147 1 loss """bceaftersigmoid""" +147 1 regularizer """no""" +147 1 optimizer """adam""" +147 1 training_loop """owa""" +147 1 negative_sampler """basic""" +147 1 evaluator """rankbased""" +147 2 dataset """yago310""" +147 2 model """ermlp""" +147 2 loss """bceaftersigmoid""" +147 2 regularizer """no""" +147 2 optimizer """adam""" +147 2 training_loop """owa""" +147 2 negative_sampler """basic""" +147 2 evaluator """rankbased""" +147 3 dataset """yago310""" +147 3 model """ermlp""" +147 3 loss """bceaftersigmoid""" +147 3 regularizer """no""" +147 3 optimizer """adam""" +147 3 training_loop """owa""" +147 3 negative_sampler """basic""" +147 3 evaluator """rankbased""" +147 4 dataset """yago310""" +147 4 model """ermlp""" +147 4 loss """bceaftersigmoid""" +147 4 regularizer """no""" +147 4 optimizer """adam""" +147 4 training_loop """owa""" +147 4 negative_sampler """basic""" +147 4 evaluator """rankbased""" +147 5 dataset """yago310""" +147 5 model """ermlp""" +147 5 loss """bceaftersigmoid""" +147 5 regularizer """no""" +147 5 optimizer """adam""" +147 5 training_loop """owa""" +147 5 negative_sampler """basic""" +147 5 evaluator """rankbased""" +147 6 dataset """yago310""" +147 6 model """ermlp""" +147 6 loss """bceaftersigmoid""" +147 6 regularizer """no""" +147 6 optimizer """adam""" +147 6 training_loop """owa""" +147 6 negative_sampler """basic""" +147 6 evaluator """rankbased""" +147 7 dataset """yago310""" +147 7 model """ermlp""" +147 7 loss """bceaftersigmoid""" +147 7 regularizer """no""" +147 7 optimizer """adam""" +147 7 training_loop """owa""" +147 7 negative_sampler """basic""" +147 7 evaluator """rankbased""" +147 8 dataset """yago310""" +147 8 model """ermlp""" +147 8 loss """bceaftersigmoid""" +147 8 regularizer """no""" +147 8 optimizer """adam""" +147 8 training_loop """owa""" +147 8 negative_sampler """basic""" +147 8 evaluator """rankbased""" +147 9 dataset """yago310""" +147 9 model """ermlp""" +147 9 loss """bceaftersigmoid""" +147 9 regularizer """no""" +147 9 optimizer """adam""" +147 9 training_loop """owa""" +147 9 negative_sampler """basic""" +147 9 evaluator """rankbased""" +147 10 dataset """yago310""" +147 10 model """ermlp""" +147 10 loss """bceaftersigmoid""" +147 10 regularizer """no""" +147 10 optimizer """adam""" +147 10 training_loop """owa""" +147 10 negative_sampler """basic""" +147 10 evaluator """rankbased""" +147 11 dataset """yago310""" +147 11 model """ermlp""" +147 11 loss """bceaftersigmoid""" +147 11 regularizer """no""" +147 11 optimizer """adam""" +147 11 training_loop """owa""" +147 11 negative_sampler """basic""" +147 11 evaluator """rankbased""" +147 12 dataset """yago310""" +147 12 model """ermlp""" +147 12 loss """bceaftersigmoid""" +147 12 regularizer """no""" +147 12 optimizer """adam""" +147 12 training_loop """owa""" +147 12 negative_sampler """basic""" +147 12 evaluator """rankbased""" +147 13 dataset """yago310""" +147 13 model """ermlp""" +147 13 loss """bceaftersigmoid""" +147 13 regularizer """no""" +147 13 optimizer """adam""" +147 13 training_loop """owa""" +147 13 negative_sampler """basic""" +147 13 evaluator """rankbased""" +147 14 dataset """yago310""" +147 14 model """ermlp""" +147 14 loss """bceaftersigmoid""" +147 14 regularizer """no""" +147 14 optimizer """adam""" +147 14 training_loop """owa""" +147 14 negative_sampler """basic""" +147 14 evaluator """rankbased""" +147 15 dataset """yago310""" +147 15 model """ermlp""" +147 15 loss """bceaftersigmoid""" +147 15 regularizer """no""" +147 15 optimizer """adam""" +147 15 training_loop """owa""" +147 15 negative_sampler """basic""" +147 15 evaluator """rankbased""" +147 16 dataset """yago310""" +147 16 model """ermlp""" +147 16 loss """bceaftersigmoid""" +147 16 regularizer """no""" +147 16 optimizer """adam""" +147 16 training_loop """owa""" +147 16 negative_sampler """basic""" +147 16 evaluator """rankbased""" +147 17 dataset """yago310""" +147 17 model """ermlp""" +147 17 loss """bceaftersigmoid""" +147 17 regularizer """no""" +147 17 optimizer """adam""" +147 17 training_loop """owa""" +147 17 negative_sampler """basic""" +147 17 evaluator """rankbased""" +147 18 dataset """yago310""" +147 18 model """ermlp""" +147 18 loss """bceaftersigmoid""" +147 18 regularizer """no""" +147 18 optimizer """adam""" +147 18 training_loop """owa""" +147 18 negative_sampler """basic""" +147 18 evaluator """rankbased""" +147 19 dataset """yago310""" +147 19 model """ermlp""" +147 19 loss """bceaftersigmoid""" +147 19 regularizer """no""" +147 19 optimizer """adam""" +147 19 training_loop """owa""" +147 19 negative_sampler """basic""" +147 19 evaluator """rankbased""" +147 20 dataset """yago310""" +147 20 model """ermlp""" +147 20 loss """bceaftersigmoid""" +147 20 regularizer """no""" +147 20 optimizer """adam""" +147 20 training_loop """owa""" +147 20 negative_sampler """basic""" +147 20 evaluator """rankbased""" +147 21 dataset """yago310""" +147 21 model """ermlp""" +147 21 loss """bceaftersigmoid""" +147 21 regularizer """no""" +147 21 optimizer """adam""" +147 21 training_loop """owa""" +147 21 negative_sampler """basic""" +147 21 evaluator """rankbased""" +147 22 dataset """yago310""" +147 22 model """ermlp""" +147 22 loss """bceaftersigmoid""" +147 22 regularizer """no""" +147 22 optimizer """adam""" +147 22 training_loop """owa""" +147 22 negative_sampler """basic""" +147 22 evaluator """rankbased""" +148 1 model.embedding_dim 2.0 +148 1 optimizer.lr 0.0010532292679831057 +148 1 negative_sampler.num_negs_per_pos 16.0 +148 1 training.batch_size 3.0 +148 2 model.embedding_dim 2.0 +148 2 optimizer.lr 0.007327453864057703 +148 2 negative_sampler.num_negs_per_pos 48.0 +148 2 training.batch_size 1.0 +148 3 model.embedding_dim 1.0 +148 3 optimizer.lr 0.0597220897422602 +148 3 negative_sampler.num_negs_per_pos 34.0 +148 3 training.batch_size 0.0 +148 4 model.embedding_dim 2.0 +148 4 optimizer.lr 0.013023375990576241 +148 4 negative_sampler.num_negs_per_pos 21.0 +148 4 training.batch_size 3.0 +148 5 model.embedding_dim 0.0 +148 5 optimizer.lr 0.05940283008488553 +148 5 negative_sampler.num_negs_per_pos 36.0 +148 5 training.batch_size 1.0 +148 6 model.embedding_dim 1.0 +148 6 optimizer.lr 0.01848287493876666 +148 6 negative_sampler.num_negs_per_pos 4.0 +148 6 training.batch_size 2.0 +148 7 model.embedding_dim 2.0 +148 7 optimizer.lr 0.08329233786513117 +148 7 negative_sampler.num_negs_per_pos 27.0 +148 7 training.batch_size 3.0 +148 8 model.embedding_dim 1.0 +148 8 optimizer.lr 0.021054407039118577 +148 8 negative_sampler.num_negs_per_pos 28.0 +148 8 training.batch_size 3.0 +148 9 model.embedding_dim 2.0 +148 9 optimizer.lr 0.09259904657081762 +148 9 negative_sampler.num_negs_per_pos 45.0 +148 9 training.batch_size 1.0 +148 10 model.embedding_dim 1.0 +148 10 optimizer.lr 0.033957303514470114 +148 10 negative_sampler.num_negs_per_pos 44.0 +148 10 training.batch_size 3.0 +148 11 model.embedding_dim 0.0 +148 11 optimizer.lr 0.0848335805502582 +148 11 negative_sampler.num_negs_per_pos 34.0 +148 11 training.batch_size 3.0 +148 12 model.embedding_dim 2.0 +148 12 optimizer.lr 0.007195836798287633 +148 12 negative_sampler.num_negs_per_pos 1.0 +148 12 training.batch_size 1.0 +148 13 model.embedding_dim 2.0 +148 13 optimizer.lr 0.029671168570669107 +148 13 negative_sampler.num_negs_per_pos 38.0 +148 13 training.batch_size 2.0 +148 14 model.embedding_dim 2.0 +148 14 optimizer.lr 0.008030895517618802 +148 14 negative_sampler.num_negs_per_pos 10.0 +148 14 training.batch_size 1.0 +148 15 model.embedding_dim 0.0 +148 15 optimizer.lr 0.01880437705981929 +148 15 negative_sampler.num_negs_per_pos 35.0 +148 15 training.batch_size 2.0 +148 16 model.embedding_dim 0.0 +148 16 optimizer.lr 0.00527282069233928 +148 16 negative_sampler.num_negs_per_pos 27.0 +148 16 training.batch_size 0.0 +148 17 model.embedding_dim 0.0 +148 17 optimizer.lr 0.08731810405183534 +148 17 negative_sampler.num_negs_per_pos 15.0 +148 17 training.batch_size 3.0 +148 18 model.embedding_dim 0.0 +148 18 optimizer.lr 0.02264911853757037 +148 18 negative_sampler.num_negs_per_pos 14.0 +148 18 training.batch_size 3.0 +148 19 model.embedding_dim 0.0 +148 19 optimizer.lr 0.024548953325134817 +148 19 negative_sampler.num_negs_per_pos 45.0 +148 19 training.batch_size 2.0 +148 20 model.embedding_dim 2.0 +148 20 optimizer.lr 0.005278500100344044 +148 20 negative_sampler.num_negs_per_pos 37.0 +148 20 training.batch_size 2.0 +148 21 model.embedding_dim 2.0 +148 21 optimizer.lr 0.00734488162485897 +148 21 negative_sampler.num_negs_per_pos 44.0 +148 21 training.batch_size 0.0 +148 22 model.embedding_dim 2.0 +148 22 optimizer.lr 0.04546070393197521 +148 22 negative_sampler.num_negs_per_pos 21.0 +148 22 training.batch_size 1.0 +148 23 model.embedding_dim 2.0 +148 23 optimizer.lr 0.004917083902875451 +148 23 negative_sampler.num_negs_per_pos 49.0 +148 23 training.batch_size 0.0 +148 24 model.embedding_dim 0.0 +148 24 optimizer.lr 0.0013046375418026106 +148 24 negative_sampler.num_negs_per_pos 24.0 +148 24 training.batch_size 0.0 +148 25 model.embedding_dim 1.0 +148 25 optimizer.lr 0.005537093522743038 +148 25 negative_sampler.num_negs_per_pos 10.0 +148 25 training.batch_size 0.0 +148 1 dataset """yago310""" +148 1 model """ermlp""" +148 1 loss """softplus""" +148 1 regularizer """no""" +148 1 optimizer """adam""" +148 1 training_loop """owa""" +148 1 negative_sampler """basic""" +148 1 evaluator """rankbased""" +148 2 dataset """yago310""" +148 2 model """ermlp""" +148 2 loss """softplus""" +148 2 regularizer """no""" +148 2 optimizer """adam""" +148 2 training_loop """owa""" +148 2 negative_sampler """basic""" +148 2 evaluator """rankbased""" +148 3 dataset """yago310""" +148 3 model """ermlp""" +148 3 loss """softplus""" +148 3 regularizer """no""" +148 3 optimizer """adam""" +148 3 training_loop """owa""" +148 3 negative_sampler """basic""" +148 3 evaluator """rankbased""" +148 4 dataset """yago310""" +148 4 model """ermlp""" +148 4 loss """softplus""" +148 4 regularizer """no""" +148 4 optimizer """adam""" +148 4 training_loop """owa""" +148 4 negative_sampler """basic""" +148 4 evaluator """rankbased""" +148 5 dataset """yago310""" +148 5 model """ermlp""" +148 5 loss """softplus""" +148 5 regularizer """no""" +148 5 optimizer """adam""" +148 5 training_loop """owa""" +148 5 negative_sampler """basic""" +148 5 evaluator """rankbased""" +148 6 dataset """yago310""" +148 6 model """ermlp""" +148 6 loss """softplus""" +148 6 regularizer """no""" +148 6 optimizer """adam""" +148 6 training_loop """owa""" +148 6 negative_sampler """basic""" +148 6 evaluator """rankbased""" +148 7 dataset """yago310""" +148 7 model """ermlp""" +148 7 loss """softplus""" +148 7 regularizer """no""" +148 7 optimizer """adam""" +148 7 training_loop """owa""" +148 7 negative_sampler """basic""" +148 7 evaluator """rankbased""" +148 8 dataset """yago310""" +148 8 model """ermlp""" +148 8 loss """softplus""" +148 8 regularizer """no""" +148 8 optimizer """adam""" +148 8 training_loop """owa""" +148 8 negative_sampler """basic""" +148 8 evaluator """rankbased""" +148 9 dataset """yago310""" +148 9 model """ermlp""" +148 9 loss """softplus""" +148 9 regularizer """no""" +148 9 optimizer """adam""" +148 9 training_loop """owa""" +148 9 negative_sampler """basic""" +148 9 evaluator """rankbased""" +148 10 dataset """yago310""" +148 10 model """ermlp""" +148 10 loss """softplus""" +148 10 regularizer """no""" +148 10 optimizer """adam""" +148 10 training_loop """owa""" +148 10 negative_sampler """basic""" +148 10 evaluator """rankbased""" +148 11 dataset """yago310""" +148 11 model """ermlp""" +148 11 loss """softplus""" +148 11 regularizer """no""" +148 11 optimizer """adam""" +148 11 training_loop """owa""" +148 11 negative_sampler """basic""" +148 11 evaluator """rankbased""" +148 12 dataset """yago310""" +148 12 model """ermlp""" +148 12 loss """softplus""" +148 12 regularizer """no""" +148 12 optimizer """adam""" +148 12 training_loop """owa""" +148 12 negative_sampler """basic""" +148 12 evaluator """rankbased""" +148 13 dataset """yago310""" +148 13 model """ermlp""" +148 13 loss """softplus""" +148 13 regularizer """no""" +148 13 optimizer """adam""" +148 13 training_loop """owa""" +148 13 negative_sampler """basic""" +148 13 evaluator """rankbased""" +148 14 dataset """yago310""" +148 14 model """ermlp""" +148 14 loss """softplus""" +148 14 regularizer """no""" +148 14 optimizer """adam""" +148 14 training_loop """owa""" +148 14 negative_sampler """basic""" +148 14 evaluator """rankbased""" +148 15 dataset """yago310""" +148 15 model """ermlp""" +148 15 loss """softplus""" +148 15 regularizer """no""" +148 15 optimizer """adam""" +148 15 training_loop """owa""" +148 15 negative_sampler """basic""" +148 15 evaluator """rankbased""" +148 16 dataset """yago310""" +148 16 model """ermlp""" +148 16 loss """softplus""" +148 16 regularizer """no""" +148 16 optimizer """adam""" +148 16 training_loop """owa""" +148 16 negative_sampler """basic""" +148 16 evaluator """rankbased""" +148 17 dataset """yago310""" +148 17 model """ermlp""" +148 17 loss """softplus""" +148 17 regularizer """no""" +148 17 optimizer """adam""" +148 17 training_loop """owa""" +148 17 negative_sampler """basic""" +148 17 evaluator """rankbased""" +148 18 dataset """yago310""" +148 18 model """ermlp""" +148 18 loss """softplus""" +148 18 regularizer """no""" +148 18 optimizer """adam""" +148 18 training_loop """owa""" +148 18 negative_sampler """basic""" +148 18 evaluator """rankbased""" +148 19 dataset """yago310""" +148 19 model """ermlp""" +148 19 loss """softplus""" +148 19 regularizer """no""" +148 19 optimizer """adam""" +148 19 training_loop """owa""" +148 19 negative_sampler """basic""" +148 19 evaluator """rankbased""" +148 20 dataset """yago310""" +148 20 model """ermlp""" +148 20 loss """softplus""" +148 20 regularizer """no""" +148 20 optimizer """adam""" +148 20 training_loop """owa""" +148 20 negative_sampler """basic""" +148 20 evaluator """rankbased""" +148 21 dataset """yago310""" +148 21 model """ermlp""" +148 21 loss """softplus""" +148 21 regularizer """no""" +148 21 optimizer """adam""" +148 21 training_loop """owa""" +148 21 negative_sampler """basic""" +148 21 evaluator """rankbased""" +148 22 dataset """yago310""" +148 22 model """ermlp""" +148 22 loss """softplus""" +148 22 regularizer """no""" +148 22 optimizer """adam""" +148 22 training_loop """owa""" +148 22 negative_sampler """basic""" +148 22 evaluator """rankbased""" +148 23 dataset """yago310""" +148 23 model """ermlp""" +148 23 loss """softplus""" +148 23 regularizer """no""" +148 23 optimizer """adam""" +148 23 training_loop """owa""" +148 23 negative_sampler """basic""" +148 23 evaluator """rankbased""" +148 24 dataset """yago310""" +148 24 model """ermlp""" +148 24 loss """softplus""" +148 24 regularizer """no""" +148 24 optimizer """adam""" +148 24 training_loop """owa""" +148 24 negative_sampler """basic""" +148 24 evaluator """rankbased""" +148 25 dataset """yago310""" +148 25 model """ermlp""" +148 25 loss """softplus""" +148 25 regularizer """no""" +148 25 optimizer """adam""" +148 25 training_loop """owa""" +148 25 negative_sampler """basic""" +148 25 evaluator """rankbased""" +149 1 model.embedding_dim 1.0 +149 1 optimizer.lr 0.002578716491264209 +149 1 negative_sampler.num_negs_per_pos 36.0 +149 1 training.batch_size 0.0 +149 2 model.embedding_dim 0.0 +149 2 optimizer.lr 0.09429390897171185 +149 2 negative_sampler.num_negs_per_pos 13.0 +149 2 training.batch_size 2.0 +149 3 model.embedding_dim 2.0 +149 3 optimizer.lr 0.013429834716377919 +149 3 negative_sampler.num_negs_per_pos 3.0 +149 3 training.batch_size 3.0 +149 4 model.embedding_dim 0.0 +149 4 optimizer.lr 0.0014464071208566422 +149 4 negative_sampler.num_negs_per_pos 30.0 +149 4 training.batch_size 1.0 +149 5 model.embedding_dim 2.0 +149 5 optimizer.lr 0.03436550410847844 +149 5 negative_sampler.num_negs_per_pos 31.0 +149 5 training.batch_size 1.0 +149 6 model.embedding_dim 1.0 +149 6 optimizer.lr 0.002265740884169822 +149 6 negative_sampler.num_negs_per_pos 35.0 +149 6 training.batch_size 0.0 +149 7 model.embedding_dim 1.0 +149 7 optimizer.lr 0.009183110705675308 +149 7 negative_sampler.num_negs_per_pos 23.0 +149 7 training.batch_size 3.0 +149 8 model.embedding_dim 1.0 +149 8 optimizer.lr 0.004186071542160027 +149 8 negative_sampler.num_negs_per_pos 49.0 +149 8 training.batch_size 0.0 +149 9 model.embedding_dim 2.0 +149 9 optimizer.lr 0.037352199796538955 +149 9 negative_sampler.num_negs_per_pos 12.0 +149 9 training.batch_size 3.0 +149 10 model.embedding_dim 0.0 +149 10 optimizer.lr 0.0026092696793957736 +149 10 negative_sampler.num_negs_per_pos 7.0 +149 10 training.batch_size 1.0 +149 11 model.embedding_dim 0.0 +149 11 optimizer.lr 0.0028757042399666873 +149 11 negative_sampler.num_negs_per_pos 2.0 +149 11 training.batch_size 1.0 +149 12 model.embedding_dim 1.0 +149 12 optimizer.lr 0.09268057345482775 +149 12 negative_sampler.num_negs_per_pos 33.0 +149 12 training.batch_size 3.0 +149 13 model.embedding_dim 0.0 +149 13 optimizer.lr 0.007860402319974516 +149 13 negative_sampler.num_negs_per_pos 34.0 +149 13 training.batch_size 3.0 +149 14 model.embedding_dim 2.0 +149 14 optimizer.lr 0.0016489189017354397 +149 14 negative_sampler.num_negs_per_pos 42.0 +149 14 training.batch_size 0.0 +149 15 model.embedding_dim 2.0 +149 15 optimizer.lr 0.002726730009083287 +149 15 negative_sampler.num_negs_per_pos 46.0 +149 15 training.batch_size 1.0 +149 16 model.embedding_dim 2.0 +149 16 optimizer.lr 0.07407304532542919 +149 16 negative_sampler.num_negs_per_pos 46.0 +149 16 training.batch_size 0.0 +149 17 model.embedding_dim 0.0 +149 17 optimizer.lr 0.001736108927551852 +149 17 negative_sampler.num_negs_per_pos 48.0 +149 17 training.batch_size 3.0 +149 1 dataset """yago310""" +149 1 model """ermlp""" +149 1 loss """softplus""" +149 1 regularizer """no""" +149 1 optimizer """adam""" +149 1 training_loop """owa""" +149 1 negative_sampler """basic""" +149 1 evaluator """rankbased""" +149 2 dataset """yago310""" +149 2 model """ermlp""" +149 2 loss """softplus""" +149 2 regularizer """no""" +149 2 optimizer """adam""" +149 2 training_loop """owa""" +149 2 negative_sampler """basic""" +149 2 evaluator """rankbased""" +149 3 dataset """yago310""" +149 3 model """ermlp""" +149 3 loss """softplus""" +149 3 regularizer """no""" +149 3 optimizer """adam""" +149 3 training_loop """owa""" +149 3 negative_sampler """basic""" +149 3 evaluator """rankbased""" +149 4 dataset """yago310""" +149 4 model """ermlp""" +149 4 loss """softplus""" +149 4 regularizer """no""" +149 4 optimizer """adam""" +149 4 training_loop """owa""" +149 4 negative_sampler """basic""" +149 4 evaluator """rankbased""" +149 5 dataset """yago310""" +149 5 model """ermlp""" +149 5 loss """softplus""" +149 5 regularizer """no""" +149 5 optimizer """adam""" +149 5 training_loop """owa""" +149 5 negative_sampler """basic""" +149 5 evaluator """rankbased""" +149 6 dataset """yago310""" +149 6 model """ermlp""" +149 6 loss """softplus""" +149 6 regularizer """no""" +149 6 optimizer """adam""" +149 6 training_loop """owa""" +149 6 negative_sampler """basic""" +149 6 evaluator """rankbased""" +149 7 dataset """yago310""" +149 7 model """ermlp""" +149 7 loss """softplus""" +149 7 regularizer """no""" +149 7 optimizer """adam""" +149 7 training_loop """owa""" +149 7 negative_sampler """basic""" +149 7 evaluator """rankbased""" +149 8 dataset """yago310""" +149 8 model """ermlp""" +149 8 loss """softplus""" +149 8 regularizer """no""" +149 8 optimizer """adam""" +149 8 training_loop """owa""" +149 8 negative_sampler """basic""" +149 8 evaluator """rankbased""" +149 9 dataset """yago310""" +149 9 model """ermlp""" +149 9 loss """softplus""" +149 9 regularizer """no""" +149 9 optimizer """adam""" +149 9 training_loop """owa""" +149 9 negative_sampler """basic""" +149 9 evaluator """rankbased""" +149 10 dataset """yago310""" +149 10 model """ermlp""" +149 10 loss """softplus""" +149 10 regularizer """no""" +149 10 optimizer """adam""" +149 10 training_loop """owa""" +149 10 negative_sampler """basic""" +149 10 evaluator """rankbased""" +149 11 dataset """yago310""" +149 11 model """ermlp""" +149 11 loss """softplus""" +149 11 regularizer """no""" +149 11 optimizer """adam""" +149 11 training_loop """owa""" +149 11 negative_sampler """basic""" +149 11 evaluator """rankbased""" +149 12 dataset """yago310""" +149 12 model """ermlp""" +149 12 loss """softplus""" +149 12 regularizer """no""" +149 12 optimizer """adam""" +149 12 training_loop """owa""" +149 12 negative_sampler """basic""" +149 12 evaluator """rankbased""" +149 13 dataset """yago310""" +149 13 model """ermlp""" +149 13 loss """softplus""" +149 13 regularizer """no""" +149 13 optimizer """adam""" +149 13 training_loop """owa""" +149 13 negative_sampler """basic""" +149 13 evaluator """rankbased""" +149 14 dataset """yago310""" +149 14 model """ermlp""" +149 14 loss """softplus""" +149 14 regularizer """no""" +149 14 optimizer """adam""" +149 14 training_loop """owa""" +149 14 negative_sampler """basic""" +149 14 evaluator """rankbased""" +149 15 dataset """yago310""" +149 15 model """ermlp""" +149 15 loss """softplus""" +149 15 regularizer """no""" +149 15 optimizer """adam""" +149 15 training_loop """owa""" +149 15 negative_sampler """basic""" +149 15 evaluator """rankbased""" +149 16 dataset """yago310""" +149 16 model """ermlp""" +149 16 loss """softplus""" +149 16 regularizer """no""" +149 16 optimizer """adam""" +149 16 training_loop """owa""" +149 16 negative_sampler """basic""" +149 16 evaluator """rankbased""" +149 17 dataset """yago310""" +149 17 model """ermlp""" +149 17 loss """softplus""" +149 17 regularizer """no""" +149 17 optimizer """adam""" +149 17 training_loop """owa""" +149 17 negative_sampler """basic""" +149 17 evaluator """rankbased""" +150 1 model.embedding_dim 2.0 +150 1 model.hidden_dropout_rate 0.26663075627292376 +150 1 model.num_filters 6.0 +150 1 loss.margin 7.592244529632684 +150 1 optimizer.lr 0.00980979267361138 +150 1 negative_sampler.num_negs_per_pos 34.0 +150 1 training.batch_size 0.0 +150 2 model.embedding_dim 2.0 +150 2 model.hidden_dropout_rate 0.48726091077282996 +150 2 model.num_filters 4.0 +150 2 loss.margin 2.7708563139890385 +150 2 optimizer.lr 0.0027373044492329736 +150 2 negative_sampler.num_negs_per_pos 9.0 +150 2 training.batch_size 0.0 +150 3 model.embedding_dim 0.0 +150 3 model.hidden_dropout_rate 0.36746294508572563 +150 3 model.num_filters 6.0 +150 3 loss.margin 4.560327914424123 +150 3 optimizer.lr 0.03933659361204037 +150 3 negative_sampler.num_negs_per_pos 14.0 +150 3 training.batch_size 1.0 +150 4 model.embedding_dim 1.0 +150 4 model.hidden_dropout_rate 0.36370449598247656 +150 4 model.num_filters 9.0 +150 4 loss.margin 1.9140792225400476 +150 4 optimizer.lr 0.008480257498933224 +150 4 negative_sampler.num_negs_per_pos 6.0 +150 4 training.batch_size 0.0 +150 5 model.embedding_dim 0.0 +150 5 model.hidden_dropout_rate 0.44471333841479954 +150 5 model.num_filters 0.0 +150 5 loss.margin 9.483096926901815 +150 5 optimizer.lr 0.05013676891491243 +150 5 negative_sampler.num_negs_per_pos 27.0 +150 5 training.batch_size 1.0 +150 6 model.embedding_dim 0.0 +150 6 model.hidden_dropout_rate 0.11239729019406425 +150 6 model.num_filters 5.0 +150 6 loss.margin 9.147922221121595 +150 6 optimizer.lr 0.026574732368950443 +150 6 negative_sampler.num_negs_per_pos 10.0 +150 6 training.batch_size 2.0 +150 7 model.embedding_dim 1.0 +150 7 model.hidden_dropout_rate 0.11108572934633183 +150 7 model.num_filters 4.0 +150 7 loss.margin 2.269362807925334 +150 7 optimizer.lr 0.004305917452356513 +150 7 negative_sampler.num_negs_per_pos 3.0 +150 7 training.batch_size 1.0 +150 8 model.embedding_dim 0.0 +150 8 model.hidden_dropout_rate 0.3587847925556611 +150 8 model.num_filters 0.0 +150 8 loss.margin 5.4109096989329295 +150 8 optimizer.lr 0.003982577915495002 +150 8 negative_sampler.num_negs_per_pos 34.0 +150 8 training.batch_size 1.0 +150 1 dataset """yago310""" +150 1 model """convkb""" +150 1 loss """marginranking""" +150 1 regularizer """no""" +150 1 optimizer """adam""" +150 1 training_loop """owa""" +150 1 negative_sampler """basic""" +150 1 evaluator """rankbased""" +150 2 dataset """yago310""" +150 2 model """convkb""" +150 2 loss """marginranking""" +150 2 regularizer """no""" +150 2 optimizer """adam""" +150 2 training_loop """owa""" +150 2 negative_sampler """basic""" +150 2 evaluator """rankbased""" +150 3 dataset """yago310""" +150 3 model """convkb""" +150 3 loss """marginranking""" +150 3 regularizer """no""" +150 3 optimizer """adam""" +150 3 training_loop """owa""" +150 3 negative_sampler """basic""" +150 3 evaluator """rankbased""" +150 4 dataset """yago310""" +150 4 model """convkb""" +150 4 loss """marginranking""" +150 4 regularizer """no""" +150 4 optimizer """adam""" +150 4 training_loop """owa""" +150 4 negative_sampler """basic""" +150 4 evaluator """rankbased""" +150 5 dataset """yago310""" +150 5 model """convkb""" +150 5 loss """marginranking""" +150 5 regularizer """no""" +150 5 optimizer """adam""" +150 5 training_loop """owa""" +150 5 negative_sampler """basic""" +150 5 evaluator """rankbased""" +150 6 dataset """yago310""" +150 6 model """convkb""" +150 6 loss """marginranking""" +150 6 regularizer """no""" +150 6 optimizer """adam""" +150 6 training_loop """owa""" +150 6 negative_sampler """basic""" +150 6 evaluator """rankbased""" +150 7 dataset """yago310""" +150 7 model """convkb""" +150 7 loss """marginranking""" +150 7 regularizer """no""" +150 7 optimizer """adam""" +150 7 training_loop """owa""" +150 7 negative_sampler """basic""" +150 7 evaluator """rankbased""" +150 8 dataset """yago310""" +150 8 model """convkb""" +150 8 loss """marginranking""" +150 8 regularizer """no""" +150 8 optimizer """adam""" +150 8 training_loop """owa""" +150 8 negative_sampler """basic""" +150 8 evaluator """rankbased""" +151 1 model.embedding_dim 2.0 +151 1 model.hidden_dropout_rate 0.2841831134413911 +151 1 model.num_filters 9.0 +151 1 loss.margin 1.834584172614714 +151 1 optimizer.lr 0.001995449750197921 +151 1 negative_sampler.num_negs_per_pos 19.0 +151 1 training.batch_size 2.0 +151 2 model.embedding_dim 1.0 +151 2 model.hidden_dropout_rate 0.19495582275129786 +151 2 model.num_filters 2.0 +151 2 loss.margin 6.8196030286970135 +151 2 optimizer.lr 0.012430040313443212 +151 2 negative_sampler.num_negs_per_pos 5.0 +151 2 training.batch_size 2.0 +151 3 model.embedding_dim 2.0 +151 3 model.hidden_dropout_rate 0.41131589915314637 +151 3 model.num_filters 5.0 +151 3 loss.margin 3.9182155444467694 +151 3 optimizer.lr 0.004260366814527787 +151 3 negative_sampler.num_negs_per_pos 24.0 +151 3 training.batch_size 0.0 +151 4 model.embedding_dim 1.0 +151 4 model.hidden_dropout_rate 0.1392797377680137 +151 4 model.num_filters 3.0 +151 4 loss.margin 8.070459415626349 +151 4 optimizer.lr 0.024614159719679347 +151 4 negative_sampler.num_negs_per_pos 34.0 +151 4 training.batch_size 2.0 +151 5 model.embedding_dim 1.0 +151 5 model.hidden_dropout_rate 0.33424725659681487 +151 5 model.num_filters 3.0 +151 5 loss.margin 4.795470759189262 +151 5 optimizer.lr 0.07539798435588102 +151 5 negative_sampler.num_negs_per_pos 0.0 +151 5 training.batch_size 0.0 +151 6 model.embedding_dim 1.0 +151 6 model.hidden_dropout_rate 0.3085248208224677 +151 6 model.num_filters 4.0 +151 6 loss.margin 8.680662428880083 +151 6 optimizer.lr 0.018860330519857825 +151 6 negative_sampler.num_negs_per_pos 42.0 +151 6 training.batch_size 2.0 +151 7 model.embedding_dim 0.0 +151 7 model.hidden_dropout_rate 0.47699379685709475 +151 7 model.num_filters 1.0 +151 7 loss.margin 7.486904525670331 +151 7 optimizer.lr 0.03401451651790095 +151 7 negative_sampler.num_negs_per_pos 7.0 +151 7 training.batch_size 2.0 +151 8 model.embedding_dim 0.0 +151 8 model.hidden_dropout_rate 0.45828212531130674 +151 8 model.num_filters 8.0 +151 8 loss.margin 0.7024535803525249 +151 8 optimizer.lr 0.001697127048152794 +151 8 negative_sampler.num_negs_per_pos 26.0 +151 8 training.batch_size 1.0 +151 9 model.embedding_dim 2.0 +151 9 model.hidden_dropout_rate 0.29974643783506993 +151 9 model.num_filters 3.0 +151 9 loss.margin 5.905168310602315 +151 9 optimizer.lr 0.003138411402023506 +151 9 negative_sampler.num_negs_per_pos 36.0 +151 9 training.batch_size 1.0 +151 10 model.embedding_dim 2.0 +151 10 model.hidden_dropout_rate 0.36668430931213447 +151 10 model.num_filters 1.0 +151 10 loss.margin 1.0131923883026377 +151 10 optimizer.lr 0.001180301213407368 +151 10 negative_sampler.num_negs_per_pos 41.0 +151 10 training.batch_size 3.0 +151 11 model.embedding_dim 1.0 +151 11 model.hidden_dropout_rate 0.48392453310955486 +151 11 model.num_filters 4.0 +151 11 loss.margin 3.049651935287237 +151 11 optimizer.lr 0.001164942472654004 +151 11 negative_sampler.num_negs_per_pos 24.0 +151 11 training.batch_size 1.0 +151 12 model.embedding_dim 1.0 +151 12 model.hidden_dropout_rate 0.421862830664016 +151 12 model.num_filters 3.0 +151 12 loss.margin 2.8352273878262917 +151 12 optimizer.lr 0.009353613750355365 +151 12 negative_sampler.num_negs_per_pos 43.0 +151 12 training.batch_size 2.0 +151 13 model.embedding_dim 2.0 +151 13 model.hidden_dropout_rate 0.2825479518586802 +151 13 model.num_filters 0.0 +151 13 loss.margin 6.276371372740568 +151 13 optimizer.lr 0.0012379548216144552 +151 13 negative_sampler.num_negs_per_pos 23.0 +151 13 training.batch_size 0.0 +151 14 model.embedding_dim 2.0 +151 14 model.hidden_dropout_rate 0.153848728625793 +151 14 model.num_filters 4.0 +151 14 loss.margin 5.716745981706081 +151 14 optimizer.lr 0.012647685131365653 +151 14 negative_sampler.num_negs_per_pos 34.0 +151 14 training.batch_size 0.0 +151 15 model.embedding_dim 2.0 +151 15 model.hidden_dropout_rate 0.3156379842608357 +151 15 model.num_filters 0.0 +151 15 loss.margin 0.515311089677811 +151 15 optimizer.lr 0.0029659956572789686 +151 15 negative_sampler.num_negs_per_pos 8.0 +151 15 training.batch_size 3.0 +151 16 model.embedding_dim 0.0 +151 16 model.hidden_dropout_rate 0.19148190325000636 +151 16 model.num_filters 3.0 +151 16 loss.margin 8.806162616919718 +151 16 optimizer.lr 0.029839977187466175 +151 16 negative_sampler.num_negs_per_pos 24.0 +151 16 training.batch_size 1.0 +151 17 model.embedding_dim 1.0 +151 17 model.hidden_dropout_rate 0.21943907626371925 +151 17 model.num_filters 5.0 +151 17 loss.margin 8.562664387076808 +151 17 optimizer.lr 0.04855865350112364 +151 17 negative_sampler.num_negs_per_pos 38.0 +151 17 training.batch_size 0.0 +151 18 model.embedding_dim 2.0 +151 18 model.hidden_dropout_rate 0.1220145490565948 +151 18 model.num_filters 7.0 +151 18 loss.margin 4.8848699129891555 +151 18 optimizer.lr 0.006974058894031022 +151 18 negative_sampler.num_negs_per_pos 29.0 +151 18 training.batch_size 0.0 +151 19 model.embedding_dim 1.0 +151 19 model.hidden_dropout_rate 0.4637152931149937 +151 19 model.num_filters 2.0 +151 19 loss.margin 4.114832068075487 +151 19 optimizer.lr 0.036118176348532725 +151 19 negative_sampler.num_negs_per_pos 28.0 +151 19 training.batch_size 1.0 +151 20 model.embedding_dim 1.0 +151 20 model.hidden_dropout_rate 0.48942280689981343 +151 20 model.num_filters 2.0 +151 20 loss.margin 9.572280372053411 +151 20 optimizer.lr 0.027069496147023038 +151 20 negative_sampler.num_negs_per_pos 43.0 +151 20 training.batch_size 3.0 +151 21 model.embedding_dim 1.0 +151 21 model.hidden_dropout_rate 0.3115204895109949 +151 21 model.num_filters 9.0 +151 21 loss.margin 2.0325382141268324 +151 21 optimizer.lr 0.032744220660748856 +151 21 negative_sampler.num_negs_per_pos 1.0 +151 21 training.batch_size 2.0 +151 22 model.embedding_dim 0.0 +151 22 model.hidden_dropout_rate 0.3999280356738898 +151 22 model.num_filters 3.0 +151 22 loss.margin 9.019953477988006 +151 22 optimizer.lr 0.00145807671437903 +151 22 negative_sampler.num_negs_per_pos 0.0 +151 22 training.batch_size 3.0 +151 23 model.embedding_dim 0.0 +151 23 model.hidden_dropout_rate 0.32297043885895327 +151 23 model.num_filters 9.0 +151 23 loss.margin 8.667506446655409 +151 23 optimizer.lr 0.0029824305688235797 +151 23 negative_sampler.num_negs_per_pos 21.0 +151 23 training.batch_size 3.0 +151 24 model.embedding_dim 2.0 +151 24 model.hidden_dropout_rate 0.48999592707932316 +151 24 model.num_filters 4.0 +151 24 loss.margin 8.351738824638634 +151 24 optimizer.lr 0.08948745446665503 +151 24 negative_sampler.num_negs_per_pos 19.0 +151 24 training.batch_size 2.0 +151 25 model.embedding_dim 1.0 +151 25 model.hidden_dropout_rate 0.49817073758244346 +151 25 model.num_filters 6.0 +151 25 loss.margin 3.9318125833951125 +151 25 optimizer.lr 0.016637357251829927 +151 25 negative_sampler.num_negs_per_pos 10.0 +151 25 training.batch_size 0.0 +151 26 model.embedding_dim 2.0 +151 26 model.hidden_dropout_rate 0.3275689956088162 +151 26 model.num_filters 0.0 +151 26 loss.margin 5.907832845842476 +151 26 optimizer.lr 0.014718827792930777 +151 26 negative_sampler.num_negs_per_pos 29.0 +151 26 training.batch_size 3.0 +151 27 model.embedding_dim 2.0 +151 27 model.hidden_dropout_rate 0.27578209983382307 +151 27 model.num_filters 6.0 +151 27 loss.margin 9.43930987563449 +151 27 optimizer.lr 0.013309869332425991 +151 27 negative_sampler.num_negs_per_pos 28.0 +151 27 training.batch_size 2.0 +151 28 model.embedding_dim 2.0 +151 28 model.hidden_dropout_rate 0.14152736118238357 +151 28 model.num_filters 9.0 +151 28 loss.margin 4.515714126623591 +151 28 optimizer.lr 0.004437645455833751 +151 28 negative_sampler.num_negs_per_pos 44.0 +151 28 training.batch_size 1.0 +151 29 model.embedding_dim 2.0 +151 29 model.hidden_dropout_rate 0.2605644955784423 +151 29 model.num_filters 7.0 +151 29 loss.margin 5.884930642940162 +151 29 optimizer.lr 0.002362712061306689 +151 29 negative_sampler.num_negs_per_pos 3.0 +151 29 training.batch_size 3.0 +151 30 model.embedding_dim 0.0 +151 30 model.hidden_dropout_rate 0.34001383198876156 +151 30 model.num_filters 5.0 +151 30 loss.margin 0.7272661111840496 +151 30 optimizer.lr 0.02161674734597272 +151 30 negative_sampler.num_negs_per_pos 36.0 +151 30 training.batch_size 1.0 +151 31 model.embedding_dim 1.0 +151 31 model.hidden_dropout_rate 0.3805105023388795 +151 31 model.num_filters 6.0 +151 31 loss.margin 8.341451460763276 +151 31 optimizer.lr 0.001319093217458769 +151 31 negative_sampler.num_negs_per_pos 34.0 +151 31 training.batch_size 0.0 +151 32 model.embedding_dim 2.0 +151 32 model.hidden_dropout_rate 0.22292582372846237 +151 32 model.num_filters 8.0 +151 32 loss.margin 7.802330392703553 +151 32 optimizer.lr 0.031063646476658664 +151 32 negative_sampler.num_negs_per_pos 4.0 +151 32 training.batch_size 0.0 +151 33 model.embedding_dim 0.0 +151 33 model.hidden_dropout_rate 0.4336117311285187 +151 33 model.num_filters 4.0 +151 33 loss.margin 5.881226654296884 +151 33 optimizer.lr 0.03894929007499617 +151 33 negative_sampler.num_negs_per_pos 11.0 +151 33 training.batch_size 2.0 +151 34 model.embedding_dim 1.0 +151 34 model.hidden_dropout_rate 0.15592765496597255 +151 34 model.num_filters 3.0 +151 34 loss.margin 9.45975173131505 +151 34 optimizer.lr 0.0010459991818148485 +151 34 negative_sampler.num_negs_per_pos 2.0 +151 34 training.batch_size 2.0 +151 35 model.embedding_dim 1.0 +151 35 model.hidden_dropout_rate 0.19748838377318673 +151 35 model.num_filters 1.0 +151 35 loss.margin 1.4134801472633816 +151 35 optimizer.lr 0.02976032292396414 +151 35 negative_sampler.num_negs_per_pos 8.0 +151 35 training.batch_size 1.0 +151 36 model.embedding_dim 1.0 +151 36 model.hidden_dropout_rate 0.14689442250319307 +151 36 model.num_filters 0.0 +151 36 loss.margin 6.800213568991932 +151 36 optimizer.lr 0.0266021200842123 +151 36 negative_sampler.num_negs_per_pos 48.0 +151 36 training.batch_size 2.0 +151 37 model.embedding_dim 1.0 +151 37 model.hidden_dropout_rate 0.12288428839675883 +151 37 model.num_filters 8.0 +151 37 loss.margin 6.832971965628154 +151 37 optimizer.lr 0.01399357518396304 +151 37 negative_sampler.num_negs_per_pos 10.0 +151 37 training.batch_size 1.0 +151 38 model.embedding_dim 2.0 +151 38 model.hidden_dropout_rate 0.3030096137439354 +151 38 model.num_filters 4.0 +151 38 loss.margin 5.23858474580138 +151 38 optimizer.lr 0.0029433570247686222 +151 38 negative_sampler.num_negs_per_pos 11.0 +151 38 training.batch_size 1.0 +151 39 model.embedding_dim 1.0 +151 39 model.hidden_dropout_rate 0.3755833396943874 +151 39 model.num_filters 3.0 +151 39 loss.margin 9.301374311761881 +151 39 optimizer.lr 0.006436368816833195 +151 39 negative_sampler.num_negs_per_pos 28.0 +151 39 training.batch_size 0.0 +151 40 model.embedding_dim 1.0 +151 40 model.hidden_dropout_rate 0.30937696504998247 +151 40 model.num_filters 0.0 +151 40 loss.margin 0.7900018346275179 +151 40 optimizer.lr 0.0032682917123388884 +151 40 negative_sampler.num_negs_per_pos 0.0 +151 40 training.batch_size 0.0 +151 1 dataset """yago310""" +151 1 model """convkb""" +151 1 loss """marginranking""" +151 1 regularizer """no""" +151 1 optimizer """adam""" +151 1 training_loop """owa""" +151 1 negative_sampler """basic""" +151 1 evaluator """rankbased""" +151 2 dataset """yago310""" +151 2 model """convkb""" +151 2 loss """marginranking""" +151 2 regularizer """no""" +151 2 optimizer """adam""" +151 2 training_loop """owa""" +151 2 negative_sampler """basic""" +151 2 evaluator """rankbased""" +151 3 dataset """yago310""" +151 3 model """convkb""" +151 3 loss """marginranking""" +151 3 regularizer """no""" +151 3 optimizer """adam""" +151 3 training_loop """owa""" +151 3 negative_sampler """basic""" +151 3 evaluator """rankbased""" +151 4 dataset """yago310""" +151 4 model """convkb""" +151 4 loss """marginranking""" +151 4 regularizer """no""" +151 4 optimizer """adam""" +151 4 training_loop """owa""" +151 4 negative_sampler """basic""" +151 4 evaluator """rankbased""" +151 5 dataset """yago310""" +151 5 model """convkb""" +151 5 loss """marginranking""" +151 5 regularizer """no""" +151 5 optimizer """adam""" +151 5 training_loop """owa""" +151 5 negative_sampler """basic""" +151 5 evaluator """rankbased""" +151 6 dataset """yago310""" +151 6 model """convkb""" +151 6 loss """marginranking""" +151 6 regularizer """no""" +151 6 optimizer """adam""" +151 6 training_loop """owa""" +151 6 negative_sampler """basic""" +151 6 evaluator """rankbased""" +151 7 dataset """yago310""" +151 7 model """convkb""" +151 7 loss """marginranking""" +151 7 regularizer """no""" +151 7 optimizer """adam""" +151 7 training_loop """owa""" +151 7 negative_sampler """basic""" +151 7 evaluator """rankbased""" +151 8 dataset """yago310""" +151 8 model """convkb""" +151 8 loss """marginranking""" +151 8 regularizer """no""" +151 8 optimizer """adam""" +151 8 training_loop """owa""" +151 8 negative_sampler """basic""" +151 8 evaluator """rankbased""" +151 9 dataset """yago310""" +151 9 model """convkb""" +151 9 loss """marginranking""" +151 9 regularizer """no""" +151 9 optimizer """adam""" +151 9 training_loop """owa""" +151 9 negative_sampler """basic""" +151 9 evaluator """rankbased""" +151 10 dataset """yago310""" +151 10 model """convkb""" +151 10 loss """marginranking""" +151 10 regularizer """no""" +151 10 optimizer """adam""" +151 10 training_loop """owa""" +151 10 negative_sampler """basic""" +151 10 evaluator """rankbased""" +151 11 dataset """yago310""" +151 11 model """convkb""" +151 11 loss """marginranking""" +151 11 regularizer """no""" +151 11 optimizer """adam""" +151 11 training_loop """owa""" +151 11 negative_sampler """basic""" +151 11 evaluator """rankbased""" +151 12 dataset """yago310""" +151 12 model """convkb""" +151 12 loss """marginranking""" +151 12 regularizer """no""" +151 12 optimizer """adam""" +151 12 training_loop """owa""" +151 12 negative_sampler """basic""" +151 12 evaluator """rankbased""" +151 13 dataset """yago310""" +151 13 model """convkb""" +151 13 loss """marginranking""" +151 13 regularizer """no""" +151 13 optimizer """adam""" +151 13 training_loop """owa""" +151 13 negative_sampler """basic""" +151 13 evaluator """rankbased""" +151 14 dataset """yago310""" +151 14 model """convkb""" +151 14 loss """marginranking""" +151 14 regularizer """no""" +151 14 optimizer """adam""" +151 14 training_loop """owa""" +151 14 negative_sampler """basic""" +151 14 evaluator """rankbased""" +151 15 dataset """yago310""" +151 15 model """convkb""" +151 15 loss """marginranking""" +151 15 regularizer """no""" +151 15 optimizer """adam""" +151 15 training_loop """owa""" +151 15 negative_sampler """basic""" +151 15 evaluator """rankbased""" +151 16 dataset """yago310""" +151 16 model """convkb""" +151 16 loss """marginranking""" +151 16 regularizer """no""" +151 16 optimizer """adam""" +151 16 training_loop """owa""" +151 16 negative_sampler """basic""" +151 16 evaluator """rankbased""" +151 17 dataset """yago310""" +151 17 model """convkb""" +151 17 loss """marginranking""" +151 17 regularizer """no""" +151 17 optimizer """adam""" +151 17 training_loop """owa""" +151 17 negative_sampler """basic""" +151 17 evaluator """rankbased""" +151 18 dataset """yago310""" +151 18 model """convkb""" +151 18 loss """marginranking""" +151 18 regularizer """no""" +151 18 optimizer """adam""" +151 18 training_loop """owa""" +151 18 negative_sampler """basic""" +151 18 evaluator """rankbased""" +151 19 dataset """yago310""" +151 19 model """convkb""" +151 19 loss """marginranking""" +151 19 regularizer """no""" +151 19 optimizer """adam""" +151 19 training_loop """owa""" +151 19 negative_sampler """basic""" +151 19 evaluator """rankbased""" +151 20 dataset """yago310""" +151 20 model """convkb""" +151 20 loss """marginranking""" +151 20 regularizer """no""" +151 20 optimizer """adam""" +151 20 training_loop """owa""" +151 20 negative_sampler """basic""" +151 20 evaluator """rankbased""" +151 21 dataset """yago310""" +151 21 model """convkb""" +151 21 loss """marginranking""" +151 21 regularizer """no""" +151 21 optimizer """adam""" +151 21 training_loop """owa""" +151 21 negative_sampler """basic""" +151 21 evaluator """rankbased""" +151 22 dataset """yago310""" +151 22 model """convkb""" +151 22 loss """marginranking""" +151 22 regularizer """no""" +151 22 optimizer """adam""" +151 22 training_loop """owa""" +151 22 negative_sampler """basic""" +151 22 evaluator """rankbased""" +151 23 dataset """yago310""" +151 23 model """convkb""" +151 23 loss """marginranking""" +151 23 regularizer """no""" +151 23 optimizer """adam""" +151 23 training_loop """owa""" +151 23 negative_sampler """basic""" +151 23 evaluator """rankbased""" +151 24 dataset """yago310""" +151 24 model """convkb""" +151 24 loss """marginranking""" +151 24 regularizer """no""" +151 24 optimizer """adam""" +151 24 training_loop """owa""" +151 24 negative_sampler """basic""" +151 24 evaluator """rankbased""" +151 25 dataset """yago310""" +151 25 model """convkb""" +151 25 loss """marginranking""" +151 25 regularizer """no""" +151 25 optimizer """adam""" +151 25 training_loop """owa""" +151 25 negative_sampler """basic""" +151 25 evaluator """rankbased""" +151 26 dataset """yago310""" +151 26 model """convkb""" +151 26 loss """marginranking""" +151 26 regularizer """no""" +151 26 optimizer """adam""" +151 26 training_loop """owa""" +151 26 negative_sampler """basic""" +151 26 evaluator """rankbased""" +151 27 dataset """yago310""" +151 27 model """convkb""" +151 27 loss """marginranking""" +151 27 regularizer """no""" +151 27 optimizer """adam""" +151 27 training_loop """owa""" +151 27 negative_sampler """basic""" +151 27 evaluator """rankbased""" +151 28 dataset """yago310""" +151 28 model """convkb""" +151 28 loss """marginranking""" +151 28 regularizer """no""" +151 28 optimizer """adam""" +151 28 training_loop """owa""" +151 28 negative_sampler """basic""" +151 28 evaluator """rankbased""" +151 29 dataset """yago310""" +151 29 model """convkb""" +151 29 loss """marginranking""" +151 29 regularizer """no""" +151 29 optimizer """adam""" +151 29 training_loop """owa""" +151 29 negative_sampler """basic""" +151 29 evaluator """rankbased""" +151 30 dataset """yago310""" +151 30 model """convkb""" +151 30 loss """marginranking""" +151 30 regularizer """no""" +151 30 optimizer """adam""" +151 30 training_loop """owa""" +151 30 negative_sampler """basic""" +151 30 evaluator """rankbased""" +151 31 dataset """yago310""" +151 31 model """convkb""" +151 31 loss """marginranking""" +151 31 regularizer """no""" +151 31 optimizer """adam""" +151 31 training_loop """owa""" +151 31 negative_sampler """basic""" +151 31 evaluator """rankbased""" +151 32 dataset """yago310""" +151 32 model """convkb""" +151 32 loss """marginranking""" +151 32 regularizer """no""" +151 32 optimizer """adam""" +151 32 training_loop """owa""" +151 32 negative_sampler """basic""" +151 32 evaluator """rankbased""" +151 33 dataset """yago310""" +151 33 model """convkb""" +151 33 loss """marginranking""" +151 33 regularizer """no""" +151 33 optimizer """adam""" +151 33 training_loop """owa""" +151 33 negative_sampler """basic""" +151 33 evaluator """rankbased""" +151 34 dataset """yago310""" +151 34 model """convkb""" +151 34 loss """marginranking""" +151 34 regularizer """no""" +151 34 optimizer """adam""" +151 34 training_loop """owa""" +151 34 negative_sampler """basic""" +151 34 evaluator """rankbased""" +151 35 dataset """yago310""" +151 35 model """convkb""" +151 35 loss """marginranking""" +151 35 regularizer """no""" +151 35 optimizer """adam""" +151 35 training_loop """owa""" +151 35 negative_sampler """basic""" +151 35 evaluator """rankbased""" +151 36 dataset """yago310""" +151 36 model """convkb""" +151 36 loss """marginranking""" +151 36 regularizer """no""" +151 36 optimizer """adam""" +151 36 training_loop """owa""" +151 36 negative_sampler """basic""" +151 36 evaluator """rankbased""" +151 37 dataset """yago310""" +151 37 model """convkb""" +151 37 loss """marginranking""" +151 37 regularizer """no""" +151 37 optimizer """adam""" +151 37 training_loop """owa""" +151 37 negative_sampler """basic""" +151 37 evaluator """rankbased""" +151 38 dataset """yago310""" +151 38 model """convkb""" +151 38 loss """marginranking""" +151 38 regularizer """no""" +151 38 optimizer """adam""" +151 38 training_loop """owa""" +151 38 negative_sampler """basic""" +151 38 evaluator """rankbased""" +151 39 dataset """yago310""" +151 39 model """convkb""" +151 39 loss """marginranking""" +151 39 regularizer """no""" +151 39 optimizer """adam""" +151 39 training_loop """owa""" +151 39 negative_sampler """basic""" +151 39 evaluator """rankbased""" +151 40 dataset """yago310""" +151 40 model """convkb""" +151 40 loss """marginranking""" +151 40 regularizer """no""" +151 40 optimizer """adam""" +151 40 training_loop """owa""" +151 40 negative_sampler """basic""" +151 40 evaluator """rankbased""" +152 1 model.embedding_dim 1.0 +152 1 model.hidden_dropout_rate 0.15197731987580357 +152 1 model.num_filters 1.0 +152 1 loss.margin 5.4255228526409605 +152 1 loss.adversarial_temperature 0.39115579680549895 +152 1 optimizer.lr 0.04895932184562075 +152 1 negative_sampler.num_negs_per_pos 43.0 +152 1 training.batch_size 0.0 +152 2 model.embedding_dim 1.0 +152 2 model.hidden_dropout_rate 0.392185316941977 +152 2 model.num_filters 3.0 +152 2 loss.margin 9.983280009497689 +152 2 loss.adversarial_temperature 0.6619447570047092 +152 2 optimizer.lr 0.066056172001132 +152 2 negative_sampler.num_negs_per_pos 39.0 +152 2 training.batch_size 2.0 +152 3 model.embedding_dim 1.0 +152 3 model.hidden_dropout_rate 0.1244661042489736 +152 3 model.num_filters 0.0 +152 3 loss.margin 8.837582855215187 +152 3 loss.adversarial_temperature 0.6093822169940744 +152 3 optimizer.lr 0.031279086813762305 +152 3 negative_sampler.num_negs_per_pos 30.0 +152 3 training.batch_size 0.0 +152 4 model.embedding_dim 0.0 +152 4 model.hidden_dropout_rate 0.30075029249329055 +152 4 model.num_filters 9.0 +152 4 loss.margin 3.237982931033759 +152 4 loss.adversarial_temperature 0.46507933667555734 +152 4 optimizer.lr 0.011852695084315018 +152 4 negative_sampler.num_negs_per_pos 35.0 +152 4 training.batch_size 3.0 +152 5 model.embedding_dim 2.0 +152 5 model.hidden_dropout_rate 0.4394688073835996 +152 5 model.num_filters 8.0 +152 5 loss.margin 29.674191156123438 +152 5 loss.adversarial_temperature 0.31842054757490446 +152 5 optimizer.lr 0.0770111971630048 +152 5 negative_sampler.num_negs_per_pos 30.0 +152 5 training.batch_size 1.0 +152 6 model.embedding_dim 2.0 +152 6 model.hidden_dropout_rate 0.1229803259941774 +152 6 model.num_filters 8.0 +152 6 loss.margin 8.858614737288896 +152 6 loss.adversarial_temperature 0.6599708213020861 +152 6 optimizer.lr 0.018450161935376123 +152 6 negative_sampler.num_negs_per_pos 9.0 +152 6 training.batch_size 2.0 +152 7 model.embedding_dim 1.0 +152 7 model.hidden_dropout_rate 0.25142655650453427 +152 7 model.num_filters 0.0 +152 7 loss.margin 22.909589219851775 +152 7 loss.adversarial_temperature 0.31290807017535455 +152 7 optimizer.lr 0.08093946642765638 +152 7 negative_sampler.num_negs_per_pos 42.0 +152 7 training.batch_size 0.0 +152 1 dataset """yago310""" +152 1 model """convkb""" +152 1 loss """nssa""" +152 1 regularizer """no""" +152 1 optimizer """adam""" +152 1 training_loop """owa""" +152 1 negative_sampler """basic""" +152 1 evaluator """rankbased""" +152 2 dataset """yago310""" +152 2 model """convkb""" +152 2 loss """nssa""" +152 2 regularizer """no""" +152 2 optimizer """adam""" +152 2 training_loop """owa""" +152 2 negative_sampler """basic""" +152 2 evaluator """rankbased""" +152 3 dataset """yago310""" +152 3 model """convkb""" +152 3 loss """nssa""" +152 3 regularizer """no""" +152 3 optimizer """adam""" +152 3 training_loop """owa""" +152 3 negative_sampler """basic""" +152 3 evaluator """rankbased""" +152 4 dataset """yago310""" +152 4 model """convkb""" +152 4 loss """nssa""" +152 4 regularizer """no""" +152 4 optimizer """adam""" +152 4 training_loop """owa""" +152 4 negative_sampler """basic""" +152 4 evaluator """rankbased""" +152 5 dataset """yago310""" +152 5 model """convkb""" +152 5 loss """nssa""" +152 5 regularizer """no""" +152 5 optimizer """adam""" +152 5 training_loop """owa""" +152 5 negative_sampler """basic""" +152 5 evaluator """rankbased""" +152 6 dataset """yago310""" +152 6 model """convkb""" +152 6 loss """nssa""" +152 6 regularizer """no""" +152 6 optimizer """adam""" +152 6 training_loop """owa""" +152 6 negative_sampler """basic""" +152 6 evaluator """rankbased""" +152 7 dataset """yago310""" +152 7 model """convkb""" +152 7 loss """nssa""" +152 7 regularizer """no""" +152 7 optimizer """adam""" +152 7 training_loop """owa""" +152 7 negative_sampler """basic""" +152 7 evaluator """rankbased""" +153 1 model.embedding_dim 0.0 +153 1 model.hidden_dropout_rate 0.39168263759956345 +153 1 model.num_filters 7.0 +153 1 loss.margin 11.280342653690711 +153 1 loss.adversarial_temperature 0.1588486410241387 +153 1 optimizer.lr 0.010796483355768896 +153 1 negative_sampler.num_negs_per_pos 31.0 +153 1 training.batch_size 1.0 +153 2 model.embedding_dim 2.0 +153 2 model.hidden_dropout_rate 0.46805023478933183 +153 2 model.num_filters 9.0 +153 2 loss.margin 21.82937922306978 +153 2 loss.adversarial_temperature 0.973085190124024 +153 2 optimizer.lr 0.001332418515173236 +153 2 negative_sampler.num_negs_per_pos 41.0 +153 2 training.batch_size 2.0 +153 3 model.embedding_dim 0.0 +153 3 model.hidden_dropout_rate 0.3488396373802383 +153 3 model.num_filters 0.0 +153 3 loss.margin 27.72597243186004 +153 3 loss.adversarial_temperature 0.36045395990772255 +153 3 optimizer.lr 0.003017354192896165 +153 3 negative_sampler.num_negs_per_pos 16.0 +153 3 training.batch_size 0.0 +153 4 model.embedding_dim 0.0 +153 4 model.hidden_dropout_rate 0.2628867789662073 +153 4 model.num_filters 7.0 +153 4 loss.margin 2.8525732595962836 +153 4 loss.adversarial_temperature 0.7955974646843083 +153 4 optimizer.lr 0.08036525498247002 +153 4 negative_sampler.num_negs_per_pos 1.0 +153 4 training.batch_size 1.0 +153 5 model.embedding_dim 2.0 +153 5 model.hidden_dropout_rate 0.39835799156887286 +153 5 model.num_filters 9.0 +153 5 loss.margin 16.275035601890878 +153 5 loss.adversarial_temperature 0.44423351414832135 +153 5 optimizer.lr 0.07221403564370986 +153 5 negative_sampler.num_negs_per_pos 9.0 +153 5 training.batch_size 3.0 +153 6 model.embedding_dim 1.0 +153 6 model.hidden_dropout_rate 0.4370921945491013 +153 6 model.num_filters 3.0 +153 6 loss.margin 21.75469551870521 +153 6 loss.adversarial_temperature 0.5497803845188352 +153 6 optimizer.lr 0.08045583648629362 +153 6 negative_sampler.num_negs_per_pos 40.0 +153 6 training.batch_size 2.0 +153 7 model.embedding_dim 2.0 +153 7 model.hidden_dropout_rate 0.15858853972965573 +153 7 model.num_filters 1.0 +153 7 loss.margin 24.985632331997774 +153 7 loss.adversarial_temperature 0.7217745364342412 +153 7 optimizer.lr 0.0023013509813474154 +153 7 negative_sampler.num_negs_per_pos 14.0 +153 7 training.batch_size 2.0 +153 8 model.embedding_dim 0.0 +153 8 model.hidden_dropout_rate 0.28108229566276166 +153 8 model.num_filters 0.0 +153 8 loss.margin 22.99134011879876 +153 8 loss.adversarial_temperature 0.49484405902027706 +153 8 optimizer.lr 0.0679450052414416 +153 8 negative_sampler.num_negs_per_pos 31.0 +153 8 training.batch_size 0.0 +153 9 model.embedding_dim 2.0 +153 9 model.hidden_dropout_rate 0.4862347280265581 +153 9 model.num_filters 7.0 +153 9 loss.margin 18.63393464482576 +153 9 loss.adversarial_temperature 0.8805618596240874 +153 9 optimizer.lr 0.04335619468355919 +153 9 negative_sampler.num_negs_per_pos 32.0 +153 9 training.batch_size 0.0 +153 10 model.embedding_dim 0.0 +153 10 model.hidden_dropout_rate 0.1816134000284019 +153 10 model.num_filters 2.0 +153 10 loss.margin 1.713221989528086 +153 10 loss.adversarial_temperature 0.23864051891538046 +153 10 optimizer.lr 0.0012237011087325473 +153 10 negative_sampler.num_negs_per_pos 6.0 +153 10 training.batch_size 0.0 +153 11 model.embedding_dim 0.0 +153 11 model.hidden_dropout_rate 0.455012984044491 +153 11 model.num_filters 2.0 +153 11 loss.margin 10.679277716303629 +153 11 loss.adversarial_temperature 0.8953365223391094 +153 11 optimizer.lr 0.007818056012185846 +153 11 negative_sampler.num_negs_per_pos 7.0 +153 11 training.batch_size 0.0 +153 12 model.embedding_dim 0.0 +153 12 model.hidden_dropout_rate 0.40529855801388603 +153 12 model.num_filters 9.0 +153 12 loss.margin 10.320827115284894 +153 12 loss.adversarial_temperature 0.8388452679561063 +153 12 optimizer.lr 0.050408576599431244 +153 12 negative_sampler.num_negs_per_pos 0.0 +153 12 training.batch_size 3.0 +153 13 model.embedding_dim 2.0 +153 13 model.hidden_dropout_rate 0.15135824409382886 +153 13 model.num_filters 9.0 +153 13 loss.margin 29.20016869445179 +153 13 loss.adversarial_temperature 0.6413010129364992 +153 13 optimizer.lr 0.037970905197992885 +153 13 negative_sampler.num_negs_per_pos 10.0 +153 13 training.batch_size 0.0 +153 14 model.embedding_dim 2.0 +153 14 model.hidden_dropout_rate 0.2591200188281471 +153 14 model.num_filters 0.0 +153 14 loss.margin 10.842713697964495 +153 14 loss.adversarial_temperature 0.11102750085582871 +153 14 optimizer.lr 0.0010804287980463309 +153 14 negative_sampler.num_negs_per_pos 40.0 +153 14 training.batch_size 3.0 +153 15 model.embedding_dim 1.0 +153 15 model.hidden_dropout_rate 0.21566683065480677 +153 15 model.num_filters 2.0 +153 15 loss.margin 18.657556746979807 +153 15 loss.adversarial_temperature 0.8937183876837868 +153 15 optimizer.lr 0.007782220270842093 +153 15 negative_sampler.num_negs_per_pos 2.0 +153 15 training.batch_size 1.0 +153 16 model.embedding_dim 2.0 +153 16 model.hidden_dropout_rate 0.1847769805448353 +153 16 model.num_filters 9.0 +153 16 loss.margin 17.462299900817214 +153 16 loss.adversarial_temperature 0.5780346623378958 +153 16 optimizer.lr 0.007714198896803158 +153 16 negative_sampler.num_negs_per_pos 48.0 +153 16 training.batch_size 0.0 +153 17 model.embedding_dim 0.0 +153 17 model.hidden_dropout_rate 0.30477184629141896 +153 17 model.num_filters 5.0 +153 17 loss.margin 13.675525078988192 +153 17 loss.adversarial_temperature 0.781697302737915 +153 17 optimizer.lr 0.007449374005998672 +153 17 negative_sampler.num_negs_per_pos 19.0 +153 17 training.batch_size 3.0 +153 18 model.embedding_dim 2.0 +153 18 model.hidden_dropout_rate 0.28187819114839996 +153 18 model.num_filters 7.0 +153 18 loss.margin 29.79021475887264 +153 18 loss.adversarial_temperature 0.17853974834910533 +153 18 optimizer.lr 0.017539417152246713 +153 18 negative_sampler.num_negs_per_pos 14.0 +153 18 training.batch_size 0.0 +153 19 model.embedding_dim 0.0 +153 19 model.hidden_dropout_rate 0.3354143458850779 +153 19 model.num_filters 4.0 +153 19 loss.margin 7.904507545560213 +153 19 loss.adversarial_temperature 0.9593149170862477 +153 19 optimizer.lr 0.0015139604292599936 +153 19 negative_sampler.num_negs_per_pos 14.0 +153 19 training.batch_size 1.0 +153 20 model.embedding_dim 1.0 +153 20 model.hidden_dropout_rate 0.3586494147030741 +153 20 model.num_filters 9.0 +153 20 loss.margin 12.319800781130217 +153 20 loss.adversarial_temperature 0.9966178839577138 +153 20 optimizer.lr 0.00799016194139504 +153 20 negative_sampler.num_negs_per_pos 27.0 +153 20 training.batch_size 3.0 +153 21 model.embedding_dim 1.0 +153 21 model.hidden_dropout_rate 0.13226792803957263 +153 21 model.num_filters 3.0 +153 21 loss.margin 2.710038493733823 +153 21 loss.adversarial_temperature 0.41114781582065285 +153 21 optimizer.lr 0.009038847447825566 +153 21 negative_sampler.num_negs_per_pos 24.0 +153 21 training.batch_size 1.0 +153 22 model.embedding_dim 2.0 +153 22 model.hidden_dropout_rate 0.4560480769637485 +153 22 model.num_filters 4.0 +153 22 loss.margin 12.526794154735406 +153 22 loss.adversarial_temperature 0.49371065807297254 +153 22 optimizer.lr 0.0021077414591277556 +153 22 negative_sampler.num_negs_per_pos 9.0 +153 22 training.batch_size 3.0 +153 23 model.embedding_dim 1.0 +153 23 model.hidden_dropout_rate 0.3882460348615051 +153 23 model.num_filters 6.0 +153 23 loss.margin 17.391078448220682 +153 23 loss.adversarial_temperature 0.3624068456402274 +153 23 optimizer.lr 0.05408386998591028 +153 23 negative_sampler.num_negs_per_pos 49.0 +153 23 training.batch_size 3.0 +153 24 model.embedding_dim 0.0 +153 24 model.hidden_dropout_rate 0.464095909133884 +153 24 model.num_filters 6.0 +153 24 loss.margin 19.219479502407133 +153 24 loss.adversarial_temperature 0.37182994993118457 +153 24 optimizer.lr 0.003660381487153157 +153 24 negative_sampler.num_negs_per_pos 8.0 +153 24 training.batch_size 1.0 +153 25 model.embedding_dim 0.0 +153 25 model.hidden_dropout_rate 0.1070991018310735 +153 25 model.num_filters 6.0 +153 25 loss.margin 12.977126362054998 +153 25 loss.adversarial_temperature 0.2956118763957988 +153 25 optimizer.lr 0.0054843610176931986 +153 25 negative_sampler.num_negs_per_pos 11.0 +153 25 training.batch_size 0.0 +153 26 model.embedding_dim 1.0 +153 26 model.hidden_dropout_rate 0.17929072219605813 +153 26 model.num_filters 8.0 +153 26 loss.margin 27.479432625187403 +153 26 loss.adversarial_temperature 0.14907874006631092 +153 26 optimizer.lr 0.0013224200546726873 +153 26 negative_sampler.num_negs_per_pos 16.0 +153 26 training.batch_size 2.0 +153 27 model.embedding_dim 0.0 +153 27 model.hidden_dropout_rate 0.3307733144647238 +153 27 model.num_filters 3.0 +153 27 loss.margin 3.011477660053851 +153 27 loss.adversarial_temperature 0.7176438868575291 +153 27 optimizer.lr 0.044228677587670844 +153 27 negative_sampler.num_negs_per_pos 22.0 +153 27 training.batch_size 0.0 +153 28 model.embedding_dim 0.0 +153 28 model.hidden_dropout_rate 0.10748055367367293 +153 28 model.num_filters 0.0 +153 28 loss.margin 11.90239803032641 +153 28 loss.adversarial_temperature 0.195617518338784 +153 28 optimizer.lr 0.008764094444805491 +153 28 negative_sampler.num_negs_per_pos 41.0 +153 28 training.batch_size 2.0 +153 1 dataset """yago310""" +153 1 model """convkb""" +153 1 loss """nssa""" +153 1 regularizer """no""" +153 1 optimizer """adam""" +153 1 training_loop """owa""" +153 1 negative_sampler """basic""" +153 1 evaluator """rankbased""" +153 2 dataset """yago310""" +153 2 model """convkb""" +153 2 loss """nssa""" +153 2 regularizer """no""" +153 2 optimizer """adam""" +153 2 training_loop """owa""" +153 2 negative_sampler """basic""" +153 2 evaluator """rankbased""" +153 3 dataset """yago310""" +153 3 model """convkb""" +153 3 loss """nssa""" +153 3 regularizer """no""" +153 3 optimizer """adam""" +153 3 training_loop """owa""" +153 3 negative_sampler """basic""" +153 3 evaluator """rankbased""" +153 4 dataset """yago310""" +153 4 model """convkb""" +153 4 loss """nssa""" +153 4 regularizer """no""" +153 4 optimizer """adam""" +153 4 training_loop """owa""" +153 4 negative_sampler """basic""" +153 4 evaluator """rankbased""" +153 5 dataset """yago310""" +153 5 model """convkb""" +153 5 loss """nssa""" +153 5 regularizer """no""" +153 5 optimizer """adam""" +153 5 training_loop """owa""" +153 5 negative_sampler """basic""" +153 5 evaluator """rankbased""" +153 6 dataset """yago310""" +153 6 model """convkb""" +153 6 loss """nssa""" +153 6 regularizer """no""" +153 6 optimizer """adam""" +153 6 training_loop """owa""" +153 6 negative_sampler """basic""" +153 6 evaluator """rankbased""" +153 7 dataset """yago310""" +153 7 model """convkb""" +153 7 loss """nssa""" +153 7 regularizer """no""" +153 7 optimizer """adam""" +153 7 training_loop """owa""" +153 7 negative_sampler """basic""" +153 7 evaluator """rankbased""" +153 8 dataset """yago310""" +153 8 model """convkb""" +153 8 loss """nssa""" +153 8 regularizer """no""" +153 8 optimizer """adam""" +153 8 training_loop """owa""" +153 8 negative_sampler """basic""" +153 8 evaluator """rankbased""" +153 9 dataset """yago310""" +153 9 model """convkb""" +153 9 loss """nssa""" +153 9 regularizer """no""" +153 9 optimizer """adam""" +153 9 training_loop """owa""" +153 9 negative_sampler """basic""" +153 9 evaluator """rankbased""" +153 10 dataset """yago310""" +153 10 model """convkb""" +153 10 loss """nssa""" +153 10 regularizer """no""" +153 10 optimizer """adam""" +153 10 training_loop """owa""" +153 10 negative_sampler """basic""" +153 10 evaluator """rankbased""" +153 11 dataset """yago310""" +153 11 model """convkb""" +153 11 loss """nssa""" +153 11 regularizer """no""" +153 11 optimizer """adam""" +153 11 training_loop """owa""" +153 11 negative_sampler """basic""" +153 11 evaluator """rankbased""" +153 12 dataset """yago310""" +153 12 model """convkb""" +153 12 loss """nssa""" +153 12 regularizer """no""" +153 12 optimizer """adam""" +153 12 training_loop """owa""" +153 12 negative_sampler """basic""" +153 12 evaluator """rankbased""" +153 13 dataset """yago310""" +153 13 model """convkb""" +153 13 loss """nssa""" +153 13 regularizer """no""" +153 13 optimizer """adam""" +153 13 training_loop """owa""" +153 13 negative_sampler """basic""" +153 13 evaluator """rankbased""" +153 14 dataset """yago310""" +153 14 model """convkb""" +153 14 loss """nssa""" +153 14 regularizer """no""" +153 14 optimizer """adam""" +153 14 training_loop """owa""" +153 14 negative_sampler """basic""" +153 14 evaluator """rankbased""" +153 15 dataset """yago310""" +153 15 model """convkb""" +153 15 loss """nssa""" +153 15 regularizer """no""" +153 15 optimizer """adam""" +153 15 training_loop """owa""" +153 15 negative_sampler """basic""" +153 15 evaluator """rankbased""" +153 16 dataset """yago310""" +153 16 model """convkb""" +153 16 loss """nssa""" +153 16 regularizer """no""" +153 16 optimizer """adam""" +153 16 training_loop """owa""" +153 16 negative_sampler """basic""" +153 16 evaluator """rankbased""" +153 17 dataset """yago310""" +153 17 model """convkb""" +153 17 loss """nssa""" +153 17 regularizer """no""" +153 17 optimizer """adam""" +153 17 training_loop """owa""" +153 17 negative_sampler """basic""" +153 17 evaluator """rankbased""" +153 18 dataset """yago310""" +153 18 model """convkb""" +153 18 loss """nssa""" +153 18 regularizer """no""" +153 18 optimizer """adam""" +153 18 training_loop """owa""" +153 18 negative_sampler """basic""" +153 18 evaluator """rankbased""" +153 19 dataset """yago310""" +153 19 model """convkb""" +153 19 loss """nssa""" +153 19 regularizer """no""" +153 19 optimizer """adam""" +153 19 training_loop """owa""" +153 19 negative_sampler """basic""" +153 19 evaluator """rankbased""" +153 20 dataset """yago310""" +153 20 model """convkb""" +153 20 loss """nssa""" +153 20 regularizer """no""" +153 20 optimizer """adam""" +153 20 training_loop """owa""" +153 20 negative_sampler """basic""" +153 20 evaluator """rankbased""" +153 21 dataset """yago310""" +153 21 model """convkb""" +153 21 loss """nssa""" +153 21 regularizer """no""" +153 21 optimizer """adam""" +153 21 training_loop """owa""" +153 21 negative_sampler """basic""" +153 21 evaluator """rankbased""" +153 22 dataset """yago310""" +153 22 model """convkb""" +153 22 loss """nssa""" +153 22 regularizer """no""" +153 22 optimizer """adam""" +153 22 training_loop """owa""" +153 22 negative_sampler """basic""" +153 22 evaluator """rankbased""" +153 23 dataset """yago310""" +153 23 model """convkb""" +153 23 loss """nssa""" +153 23 regularizer """no""" +153 23 optimizer """adam""" +153 23 training_loop """owa""" +153 23 negative_sampler """basic""" +153 23 evaluator """rankbased""" +153 24 dataset """yago310""" +153 24 model """convkb""" +153 24 loss """nssa""" +153 24 regularizer """no""" +153 24 optimizer """adam""" +153 24 training_loop """owa""" +153 24 negative_sampler """basic""" +153 24 evaluator """rankbased""" +153 25 dataset """yago310""" +153 25 model """convkb""" +153 25 loss """nssa""" +153 25 regularizer """no""" +153 25 optimizer """adam""" +153 25 training_loop """owa""" +153 25 negative_sampler """basic""" +153 25 evaluator """rankbased""" +153 26 dataset """yago310""" +153 26 model """convkb""" +153 26 loss """nssa""" +153 26 regularizer """no""" +153 26 optimizer """adam""" +153 26 training_loop """owa""" +153 26 negative_sampler """basic""" +153 26 evaluator """rankbased""" +153 27 dataset """yago310""" +153 27 model """convkb""" +153 27 loss """nssa""" +153 27 regularizer """no""" +153 27 optimizer """adam""" +153 27 training_loop """owa""" +153 27 negative_sampler """basic""" +153 27 evaluator """rankbased""" +153 28 dataset """yago310""" +153 28 model """convkb""" +153 28 loss """nssa""" +153 28 regularizer """no""" +153 28 optimizer """adam""" +153 28 training_loop """owa""" +153 28 negative_sampler """basic""" +153 28 evaluator """rankbased""" +154 1 model.embedding_dim 2.0 +154 1 optimizer.lr 0.012293299608450603 +154 1 negative_sampler.num_negs_per_pos 45.0 +154 1 training.batch_size 2.0 +154 2 model.embedding_dim 0.0 +154 2 optimizer.lr 0.06555122188338298 +154 2 negative_sampler.num_negs_per_pos 70.0 +154 2 training.batch_size 0.0 +154 3 model.embedding_dim 2.0 +154 3 optimizer.lr 0.009075277026110315 +154 3 negative_sampler.num_negs_per_pos 36.0 +154 3 training.batch_size 2.0 +154 4 model.embedding_dim 2.0 +154 4 optimizer.lr 0.0847899603908822 +154 4 negative_sampler.num_negs_per_pos 27.0 +154 4 training.batch_size 1.0 +154 5 model.embedding_dim 1.0 +154 5 optimizer.lr 0.07644415887547844 +154 5 negative_sampler.num_negs_per_pos 4.0 +154 5 training.batch_size 2.0 +154 6 model.embedding_dim 2.0 +154 6 optimizer.lr 0.00241533937162227 +154 6 negative_sampler.num_negs_per_pos 78.0 +154 6 training.batch_size 1.0 +154 7 model.embedding_dim 2.0 +154 7 optimizer.lr 0.02710243078020779 +154 7 negative_sampler.num_negs_per_pos 15.0 +154 7 training.batch_size 2.0 +154 8 model.embedding_dim 0.0 +154 8 optimizer.lr 0.0012010367182682454 +154 8 negative_sampler.num_negs_per_pos 28.0 +154 8 training.batch_size 0.0 +154 9 model.embedding_dim 0.0 +154 9 optimizer.lr 0.005823264895235011 +154 9 negative_sampler.num_negs_per_pos 76.0 +154 9 training.batch_size 2.0 +154 10 model.embedding_dim 0.0 +154 10 optimizer.lr 0.01918981471605716 +154 10 negative_sampler.num_negs_per_pos 66.0 +154 10 training.batch_size 2.0 +154 11 model.embedding_dim 1.0 +154 11 optimizer.lr 0.003080962417211297 +154 11 negative_sampler.num_negs_per_pos 53.0 +154 11 training.batch_size 2.0 +154 12 model.embedding_dim 0.0 +154 12 optimizer.lr 0.03466559972438212 +154 12 negative_sampler.num_negs_per_pos 47.0 +154 12 training.batch_size 1.0 +154 13 model.embedding_dim 2.0 +154 13 optimizer.lr 0.011259424713424612 +154 13 negative_sampler.num_negs_per_pos 27.0 +154 13 training.batch_size 2.0 +154 14 model.embedding_dim 1.0 +154 14 optimizer.lr 0.032617518279493114 +154 14 negative_sampler.num_negs_per_pos 91.0 +154 14 training.batch_size 1.0 +154 15 model.embedding_dim 0.0 +154 15 optimizer.lr 0.006078009837476232 +154 15 negative_sampler.num_negs_per_pos 61.0 +154 15 training.batch_size 2.0 +154 16 model.embedding_dim 0.0 +154 16 optimizer.lr 0.058376176973937995 +154 16 negative_sampler.num_negs_per_pos 99.0 +154 16 training.batch_size 0.0 +154 17 model.embedding_dim 2.0 +154 17 optimizer.lr 0.015879776333559397 +154 17 negative_sampler.num_negs_per_pos 44.0 +154 17 training.batch_size 1.0 +154 18 model.embedding_dim 2.0 +154 18 optimizer.lr 0.01268729097227086 +154 18 negative_sampler.num_negs_per_pos 7.0 +154 18 training.batch_size 1.0 +154 19 model.embedding_dim 1.0 +154 19 optimizer.lr 0.006283440105331105 +154 19 negative_sampler.num_negs_per_pos 28.0 +154 19 training.batch_size 2.0 +154 20 model.embedding_dim 0.0 +154 20 optimizer.lr 0.04814404534340069 +154 20 negative_sampler.num_negs_per_pos 0.0 +154 20 training.batch_size 2.0 +154 21 model.embedding_dim 2.0 +154 21 optimizer.lr 0.018628267609274475 +154 21 negative_sampler.num_negs_per_pos 66.0 +154 21 training.batch_size 2.0 +154 22 model.embedding_dim 1.0 +154 22 optimizer.lr 0.0050213884355883144 +154 22 negative_sampler.num_negs_per_pos 96.0 +154 22 training.batch_size 2.0 +154 1 dataset """fb15k237""" +154 1 model """distmult""" +154 1 loss """bceaftersigmoid""" +154 1 regularizer """no""" +154 1 optimizer """adam""" +154 1 training_loop """owa""" +154 1 negative_sampler """basic""" +154 1 evaluator """rankbased""" +154 2 dataset """fb15k237""" +154 2 model """distmult""" +154 2 loss """bceaftersigmoid""" +154 2 regularizer """no""" +154 2 optimizer """adam""" +154 2 training_loop """owa""" +154 2 negative_sampler """basic""" +154 2 evaluator """rankbased""" +154 3 dataset """fb15k237""" +154 3 model """distmult""" +154 3 loss """bceaftersigmoid""" +154 3 regularizer """no""" +154 3 optimizer """adam""" +154 3 training_loop """owa""" +154 3 negative_sampler """basic""" +154 3 evaluator """rankbased""" +154 4 dataset """fb15k237""" +154 4 model """distmult""" +154 4 loss """bceaftersigmoid""" +154 4 regularizer """no""" +154 4 optimizer """adam""" +154 4 training_loop """owa""" +154 4 negative_sampler """basic""" +154 4 evaluator """rankbased""" +154 5 dataset """fb15k237""" +154 5 model """distmult""" +154 5 loss """bceaftersigmoid""" +154 5 regularizer """no""" +154 5 optimizer """adam""" +154 5 training_loop """owa""" +154 5 negative_sampler """basic""" +154 5 evaluator """rankbased""" +154 6 dataset """fb15k237""" +154 6 model """distmult""" +154 6 loss """bceaftersigmoid""" +154 6 regularizer """no""" +154 6 optimizer """adam""" +154 6 training_loop """owa""" +154 6 negative_sampler """basic""" +154 6 evaluator """rankbased""" +154 7 dataset """fb15k237""" +154 7 model """distmult""" +154 7 loss """bceaftersigmoid""" +154 7 regularizer """no""" +154 7 optimizer """adam""" +154 7 training_loop """owa""" +154 7 negative_sampler """basic""" +154 7 evaluator """rankbased""" +154 8 dataset """fb15k237""" +154 8 model """distmult""" +154 8 loss """bceaftersigmoid""" +154 8 regularizer """no""" +154 8 optimizer """adam""" +154 8 training_loop """owa""" +154 8 negative_sampler """basic""" +154 8 evaluator """rankbased""" +154 9 dataset """fb15k237""" +154 9 model """distmult""" +154 9 loss """bceaftersigmoid""" +154 9 regularizer """no""" +154 9 optimizer """adam""" +154 9 training_loop """owa""" +154 9 negative_sampler """basic""" +154 9 evaluator """rankbased""" +154 10 dataset """fb15k237""" +154 10 model """distmult""" +154 10 loss """bceaftersigmoid""" +154 10 regularizer """no""" +154 10 optimizer """adam""" +154 10 training_loop """owa""" +154 10 negative_sampler """basic""" +154 10 evaluator """rankbased""" +154 11 dataset """fb15k237""" +154 11 model """distmult""" +154 11 loss """bceaftersigmoid""" +154 11 regularizer """no""" +154 11 optimizer """adam""" +154 11 training_loop """owa""" +154 11 negative_sampler """basic""" +154 11 evaluator """rankbased""" +154 12 dataset """fb15k237""" +154 12 model """distmult""" +154 12 loss """bceaftersigmoid""" +154 12 regularizer """no""" +154 12 optimizer """adam""" +154 12 training_loop """owa""" +154 12 negative_sampler """basic""" +154 12 evaluator """rankbased""" +154 13 dataset """fb15k237""" +154 13 model """distmult""" +154 13 loss """bceaftersigmoid""" +154 13 regularizer """no""" +154 13 optimizer """adam""" +154 13 training_loop """owa""" +154 13 negative_sampler """basic""" +154 13 evaluator """rankbased""" +154 14 dataset """fb15k237""" +154 14 model """distmult""" +154 14 loss """bceaftersigmoid""" +154 14 regularizer """no""" +154 14 optimizer """adam""" +154 14 training_loop """owa""" +154 14 negative_sampler """basic""" +154 14 evaluator """rankbased""" +154 15 dataset """fb15k237""" +154 15 model """distmult""" +154 15 loss """bceaftersigmoid""" +154 15 regularizer """no""" +154 15 optimizer """adam""" +154 15 training_loop """owa""" +154 15 negative_sampler """basic""" +154 15 evaluator """rankbased""" +154 16 dataset """fb15k237""" +154 16 model """distmult""" +154 16 loss """bceaftersigmoid""" +154 16 regularizer """no""" +154 16 optimizer """adam""" +154 16 training_loop """owa""" +154 16 negative_sampler """basic""" +154 16 evaluator """rankbased""" +154 17 dataset """fb15k237""" +154 17 model """distmult""" +154 17 loss """bceaftersigmoid""" +154 17 regularizer """no""" +154 17 optimizer """adam""" +154 17 training_loop """owa""" +154 17 negative_sampler """basic""" +154 17 evaluator """rankbased""" +154 18 dataset """fb15k237""" +154 18 model """distmult""" +154 18 loss """bceaftersigmoid""" +154 18 regularizer """no""" +154 18 optimizer """adam""" +154 18 training_loop """owa""" +154 18 negative_sampler """basic""" +154 18 evaluator """rankbased""" +154 19 dataset """fb15k237""" +154 19 model """distmult""" +154 19 loss """bceaftersigmoid""" +154 19 regularizer """no""" +154 19 optimizer """adam""" +154 19 training_loop """owa""" +154 19 negative_sampler """basic""" +154 19 evaluator """rankbased""" +154 20 dataset """fb15k237""" +154 20 model """distmult""" +154 20 loss """bceaftersigmoid""" +154 20 regularizer """no""" +154 20 optimizer """adam""" +154 20 training_loop """owa""" +154 20 negative_sampler """basic""" +154 20 evaluator """rankbased""" +154 21 dataset """fb15k237""" +154 21 model """distmult""" +154 21 loss """bceaftersigmoid""" +154 21 regularizer """no""" +154 21 optimizer """adam""" +154 21 training_loop """owa""" +154 21 negative_sampler """basic""" +154 21 evaluator """rankbased""" +154 22 dataset """fb15k237""" +154 22 model """distmult""" +154 22 loss """bceaftersigmoid""" +154 22 regularizer """no""" +154 22 optimizer """adam""" +154 22 training_loop """owa""" +154 22 negative_sampler """basic""" +154 22 evaluator """rankbased""" +155 1 model.embedding_dim 0.0 +155 1 optimizer.lr 0.003669746137636231 +155 1 negative_sampler.num_negs_per_pos 11.0 +155 1 training.batch_size 0.0 +155 2 model.embedding_dim 1.0 +155 2 optimizer.lr 0.0025279622268933673 +155 2 negative_sampler.num_negs_per_pos 24.0 +155 2 training.batch_size 2.0 +155 3 model.embedding_dim 0.0 +155 3 optimizer.lr 0.03189743532408613 +155 3 negative_sampler.num_negs_per_pos 62.0 +155 3 training.batch_size 0.0 +155 4 model.embedding_dim 2.0 +155 4 optimizer.lr 0.0238842655275899 +155 4 negative_sampler.num_negs_per_pos 50.0 +155 4 training.batch_size 0.0 +155 5 model.embedding_dim 1.0 +155 5 optimizer.lr 0.005669201686030133 +155 5 negative_sampler.num_negs_per_pos 38.0 +155 5 training.batch_size 2.0 +155 6 model.embedding_dim 2.0 +155 6 optimizer.lr 0.003981203744781827 +155 6 negative_sampler.num_negs_per_pos 98.0 +155 6 training.batch_size 2.0 +155 7 model.embedding_dim 2.0 +155 7 optimizer.lr 0.0020829062289508464 +155 7 negative_sampler.num_negs_per_pos 48.0 +155 7 training.batch_size 1.0 +155 8 model.embedding_dim 1.0 +155 8 optimizer.lr 0.016505537541165228 +155 8 negative_sampler.num_negs_per_pos 57.0 +155 8 training.batch_size 2.0 +155 9 model.embedding_dim 2.0 +155 9 optimizer.lr 0.07229102279791509 +155 9 negative_sampler.num_negs_per_pos 49.0 +155 9 training.batch_size 2.0 +155 10 model.embedding_dim 2.0 +155 10 optimizer.lr 0.08668461961033454 +155 10 negative_sampler.num_negs_per_pos 9.0 +155 10 training.batch_size 0.0 +155 11 model.embedding_dim 0.0 +155 11 optimizer.lr 0.0019787154345326193 +155 11 negative_sampler.num_negs_per_pos 86.0 +155 11 training.batch_size 2.0 +155 12 model.embedding_dim 0.0 +155 12 optimizer.lr 0.0033333104851179774 +155 12 negative_sampler.num_negs_per_pos 43.0 +155 12 training.batch_size 1.0 +155 13 model.embedding_dim 1.0 +155 13 optimizer.lr 0.0050534707819256475 +155 13 negative_sampler.num_negs_per_pos 96.0 +155 13 training.batch_size 2.0 +155 14 model.embedding_dim 2.0 +155 14 optimizer.lr 0.07467215884241904 +155 14 negative_sampler.num_negs_per_pos 43.0 +155 14 training.batch_size 2.0 +155 15 model.embedding_dim 1.0 +155 15 optimizer.lr 0.029871492141386232 +155 15 negative_sampler.num_negs_per_pos 96.0 +155 15 training.batch_size 0.0 +155 16 model.embedding_dim 1.0 +155 16 optimizer.lr 0.003924437047021443 +155 16 negative_sampler.num_negs_per_pos 14.0 +155 16 training.batch_size 1.0 +155 17 model.embedding_dim 0.0 +155 17 optimizer.lr 0.022381315386898696 +155 17 negative_sampler.num_negs_per_pos 35.0 +155 17 training.batch_size 1.0 +155 18 model.embedding_dim 1.0 +155 18 optimizer.lr 0.0466363045377713 +155 18 negative_sampler.num_negs_per_pos 26.0 +155 18 training.batch_size 0.0 +155 19 model.embedding_dim 0.0 +155 19 optimizer.lr 0.0031336451362697264 +155 19 negative_sampler.num_negs_per_pos 36.0 +155 19 training.batch_size 2.0 +155 20 model.embedding_dim 2.0 +155 20 optimizer.lr 0.04733127355457067 +155 20 negative_sampler.num_negs_per_pos 54.0 +155 20 training.batch_size 1.0 +155 21 model.embedding_dim 1.0 +155 21 optimizer.lr 0.00127986072965658 +155 21 negative_sampler.num_negs_per_pos 73.0 +155 21 training.batch_size 2.0 +155 22 model.embedding_dim 2.0 +155 22 optimizer.lr 0.012120512679947433 +155 22 negative_sampler.num_negs_per_pos 36.0 +155 22 training.batch_size 2.0 +155 23 model.embedding_dim 1.0 +155 23 optimizer.lr 0.01633395830067992 +155 23 negative_sampler.num_negs_per_pos 70.0 +155 23 training.batch_size 1.0 +155 24 model.embedding_dim 0.0 +155 24 optimizer.lr 0.0019639534517052865 +155 24 negative_sampler.num_negs_per_pos 4.0 +155 24 training.batch_size 1.0 +155 25 model.embedding_dim 2.0 +155 25 optimizer.lr 0.006094137593093781 +155 25 negative_sampler.num_negs_per_pos 45.0 +155 25 training.batch_size 1.0 +155 26 model.embedding_dim 1.0 +155 26 optimizer.lr 0.00118446434311934 +155 26 negative_sampler.num_negs_per_pos 42.0 +155 26 training.batch_size 1.0 +155 27 model.embedding_dim 1.0 +155 27 optimizer.lr 0.0016834760791348262 +155 27 negative_sampler.num_negs_per_pos 56.0 +155 27 training.batch_size 1.0 +155 28 model.embedding_dim 2.0 +155 28 optimizer.lr 0.003788847406794182 +155 28 negative_sampler.num_negs_per_pos 97.0 +155 28 training.batch_size 0.0 +155 29 model.embedding_dim 0.0 +155 29 optimizer.lr 0.012804450220273312 +155 29 negative_sampler.num_negs_per_pos 25.0 +155 29 training.batch_size 1.0 +155 30 model.embedding_dim 1.0 +155 30 optimizer.lr 0.01801021604578291 +155 30 negative_sampler.num_negs_per_pos 91.0 +155 30 training.batch_size 1.0 +155 1 dataset """fb15k237""" +155 1 model """distmult""" +155 1 loss """bceaftersigmoid""" +155 1 regularizer """no""" +155 1 optimizer """adam""" +155 1 training_loop """owa""" +155 1 negative_sampler """basic""" +155 1 evaluator """rankbased""" +155 2 dataset """fb15k237""" +155 2 model """distmult""" +155 2 loss """bceaftersigmoid""" +155 2 regularizer """no""" +155 2 optimizer """adam""" +155 2 training_loop """owa""" +155 2 negative_sampler """basic""" +155 2 evaluator """rankbased""" +155 3 dataset """fb15k237""" +155 3 model """distmult""" +155 3 loss """bceaftersigmoid""" +155 3 regularizer """no""" +155 3 optimizer """adam""" +155 3 training_loop """owa""" +155 3 negative_sampler """basic""" +155 3 evaluator """rankbased""" +155 4 dataset """fb15k237""" +155 4 model """distmult""" +155 4 loss """bceaftersigmoid""" +155 4 regularizer """no""" +155 4 optimizer """adam""" +155 4 training_loop """owa""" +155 4 negative_sampler """basic""" +155 4 evaluator """rankbased""" +155 5 dataset """fb15k237""" +155 5 model """distmult""" +155 5 loss """bceaftersigmoid""" +155 5 regularizer """no""" +155 5 optimizer """adam""" +155 5 training_loop """owa""" +155 5 negative_sampler """basic""" +155 5 evaluator """rankbased""" +155 6 dataset """fb15k237""" +155 6 model """distmult""" +155 6 loss """bceaftersigmoid""" +155 6 regularizer """no""" +155 6 optimizer """adam""" +155 6 training_loop """owa""" +155 6 negative_sampler """basic""" +155 6 evaluator """rankbased""" +155 7 dataset """fb15k237""" +155 7 model """distmult""" +155 7 loss """bceaftersigmoid""" +155 7 regularizer """no""" +155 7 optimizer """adam""" +155 7 training_loop """owa""" +155 7 negative_sampler """basic""" +155 7 evaluator """rankbased""" +155 8 dataset """fb15k237""" +155 8 model """distmult""" +155 8 loss """bceaftersigmoid""" +155 8 regularizer """no""" +155 8 optimizer """adam""" +155 8 training_loop """owa""" +155 8 negative_sampler """basic""" +155 8 evaluator """rankbased""" +155 9 dataset """fb15k237""" +155 9 model """distmult""" +155 9 loss """bceaftersigmoid""" +155 9 regularizer """no""" +155 9 optimizer """adam""" +155 9 training_loop """owa""" +155 9 negative_sampler """basic""" +155 9 evaluator """rankbased""" +155 10 dataset """fb15k237""" +155 10 model """distmult""" +155 10 loss """bceaftersigmoid""" +155 10 regularizer """no""" +155 10 optimizer """adam""" +155 10 training_loop """owa""" +155 10 negative_sampler """basic""" +155 10 evaluator """rankbased""" +155 11 dataset """fb15k237""" +155 11 model """distmult""" +155 11 loss """bceaftersigmoid""" +155 11 regularizer """no""" +155 11 optimizer """adam""" +155 11 training_loop """owa""" +155 11 negative_sampler """basic""" +155 11 evaluator """rankbased""" +155 12 dataset """fb15k237""" +155 12 model """distmult""" +155 12 loss """bceaftersigmoid""" +155 12 regularizer """no""" +155 12 optimizer """adam""" +155 12 training_loop """owa""" +155 12 negative_sampler """basic""" +155 12 evaluator """rankbased""" +155 13 dataset """fb15k237""" +155 13 model """distmult""" +155 13 loss """bceaftersigmoid""" +155 13 regularizer """no""" +155 13 optimizer """adam""" +155 13 training_loop """owa""" +155 13 negative_sampler """basic""" +155 13 evaluator """rankbased""" +155 14 dataset """fb15k237""" +155 14 model """distmult""" +155 14 loss """bceaftersigmoid""" +155 14 regularizer """no""" +155 14 optimizer """adam""" +155 14 training_loop """owa""" +155 14 negative_sampler """basic""" +155 14 evaluator """rankbased""" +155 15 dataset """fb15k237""" +155 15 model """distmult""" +155 15 loss """bceaftersigmoid""" +155 15 regularizer """no""" +155 15 optimizer """adam""" +155 15 training_loop """owa""" +155 15 negative_sampler """basic""" +155 15 evaluator """rankbased""" +155 16 dataset """fb15k237""" +155 16 model """distmult""" +155 16 loss """bceaftersigmoid""" +155 16 regularizer """no""" +155 16 optimizer """adam""" +155 16 training_loop """owa""" +155 16 negative_sampler """basic""" +155 16 evaluator """rankbased""" +155 17 dataset """fb15k237""" +155 17 model """distmult""" +155 17 loss """bceaftersigmoid""" +155 17 regularizer """no""" +155 17 optimizer """adam""" +155 17 training_loop """owa""" +155 17 negative_sampler """basic""" +155 17 evaluator """rankbased""" +155 18 dataset """fb15k237""" +155 18 model """distmult""" +155 18 loss """bceaftersigmoid""" +155 18 regularizer """no""" +155 18 optimizer """adam""" +155 18 training_loop """owa""" +155 18 negative_sampler """basic""" +155 18 evaluator """rankbased""" +155 19 dataset """fb15k237""" +155 19 model """distmult""" +155 19 loss """bceaftersigmoid""" +155 19 regularizer """no""" +155 19 optimizer """adam""" +155 19 training_loop """owa""" +155 19 negative_sampler """basic""" +155 19 evaluator """rankbased""" +155 20 dataset """fb15k237""" +155 20 model """distmult""" +155 20 loss """bceaftersigmoid""" +155 20 regularizer """no""" +155 20 optimizer """adam""" +155 20 training_loop """owa""" +155 20 negative_sampler """basic""" +155 20 evaluator """rankbased""" +155 21 dataset """fb15k237""" +155 21 model """distmult""" +155 21 loss """bceaftersigmoid""" +155 21 regularizer """no""" +155 21 optimizer """adam""" +155 21 training_loop """owa""" +155 21 negative_sampler """basic""" +155 21 evaluator """rankbased""" +155 22 dataset """fb15k237""" +155 22 model """distmult""" +155 22 loss """bceaftersigmoid""" +155 22 regularizer """no""" +155 22 optimizer """adam""" +155 22 training_loop """owa""" +155 22 negative_sampler """basic""" +155 22 evaluator """rankbased""" +155 23 dataset """fb15k237""" +155 23 model """distmult""" +155 23 loss """bceaftersigmoid""" +155 23 regularizer """no""" +155 23 optimizer """adam""" +155 23 training_loop """owa""" +155 23 negative_sampler """basic""" +155 23 evaluator """rankbased""" +155 24 dataset """fb15k237""" +155 24 model """distmult""" +155 24 loss """bceaftersigmoid""" +155 24 regularizer """no""" +155 24 optimizer """adam""" +155 24 training_loop """owa""" +155 24 negative_sampler """basic""" +155 24 evaluator """rankbased""" +155 25 dataset """fb15k237""" +155 25 model """distmult""" +155 25 loss """bceaftersigmoid""" +155 25 regularizer """no""" +155 25 optimizer """adam""" +155 25 training_loop """owa""" +155 25 negative_sampler """basic""" +155 25 evaluator """rankbased""" +155 26 dataset """fb15k237""" +155 26 model """distmult""" +155 26 loss """bceaftersigmoid""" +155 26 regularizer """no""" +155 26 optimizer """adam""" +155 26 training_loop """owa""" +155 26 negative_sampler """basic""" +155 26 evaluator """rankbased""" +155 27 dataset """fb15k237""" +155 27 model """distmult""" +155 27 loss """bceaftersigmoid""" +155 27 regularizer """no""" +155 27 optimizer """adam""" +155 27 training_loop """owa""" +155 27 negative_sampler """basic""" +155 27 evaluator """rankbased""" +155 28 dataset """fb15k237""" +155 28 model """distmult""" +155 28 loss """bceaftersigmoid""" +155 28 regularizer """no""" +155 28 optimizer """adam""" +155 28 training_loop """owa""" +155 28 negative_sampler """basic""" +155 28 evaluator """rankbased""" +155 29 dataset """fb15k237""" +155 29 model """distmult""" +155 29 loss """bceaftersigmoid""" +155 29 regularizer """no""" +155 29 optimizer """adam""" +155 29 training_loop """owa""" +155 29 negative_sampler """basic""" +155 29 evaluator """rankbased""" +155 30 dataset """fb15k237""" +155 30 model """distmult""" +155 30 loss """bceaftersigmoid""" +155 30 regularizer """no""" +155 30 optimizer """adam""" +155 30 training_loop """owa""" +155 30 negative_sampler """basic""" +155 30 evaluator """rankbased""" +156 1 model.embedding_dim 1.0 +156 1 optimizer.lr 0.0021698857181447592 +156 1 training.batch_size 0.0 +156 1 training.label_smoothing 0.15708936355735148 +156 2 model.embedding_dim 2.0 +156 2 optimizer.lr 0.07941299646504948 +156 2 training.batch_size 0.0 +156 2 training.label_smoothing 0.005992717009117137 +156 3 model.embedding_dim 0.0 +156 3 optimizer.lr 0.015005792207068646 +156 3 training.batch_size 0.0 +156 3 training.label_smoothing 0.033242374089720875 +156 4 model.embedding_dim 2.0 +156 4 optimizer.lr 0.025627023911437365 +156 4 training.batch_size 2.0 +156 4 training.label_smoothing 0.0011045375471216581 +156 5 model.embedding_dim 1.0 +156 5 optimizer.lr 0.002920283181881394 +156 5 training.batch_size 1.0 +156 5 training.label_smoothing 0.15318825738331518 +156 6 model.embedding_dim 2.0 +156 6 optimizer.lr 0.005137358500197247 +156 6 training.batch_size 2.0 +156 6 training.label_smoothing 0.2332423446112342 +156 7 model.embedding_dim 2.0 +156 7 optimizer.lr 0.002577441836484684 +156 7 training.batch_size 1.0 +156 7 training.label_smoothing 0.005170136750711433 +156 8 model.embedding_dim 1.0 +156 8 optimizer.lr 0.004337727224571805 +156 8 training.batch_size 2.0 +156 8 training.label_smoothing 0.005626785887292989 +156 9 model.embedding_dim 0.0 +156 9 optimizer.lr 0.03426518001313578 +156 9 training.batch_size 0.0 +156 9 training.label_smoothing 0.008758064949190958 +156 10 model.embedding_dim 2.0 +156 10 optimizer.lr 0.003712161112175182 +156 10 training.batch_size 2.0 +156 10 training.label_smoothing 0.2397645795111641 +156 11 model.embedding_dim 2.0 +156 11 optimizer.lr 0.03695901437645606 +156 11 training.batch_size 2.0 +156 11 training.label_smoothing 0.38535117117279233 +156 12 model.embedding_dim 2.0 +156 12 optimizer.lr 0.06226330911330597 +156 12 training.batch_size 1.0 +156 12 training.label_smoothing 0.2883633289132751 +156 13 model.embedding_dim 0.0 +156 13 optimizer.lr 0.007872451930251248 +156 13 training.batch_size 2.0 +156 13 training.label_smoothing 0.002507150205080035 +156 14 model.embedding_dim 0.0 +156 14 optimizer.lr 0.053608772829953896 +156 14 training.batch_size 1.0 +156 14 training.label_smoothing 0.037468632914925824 +156 15 model.embedding_dim 0.0 +156 15 optimizer.lr 0.019092073258974794 +156 15 training.batch_size 2.0 +156 15 training.label_smoothing 0.07969078970217044 +156 16 model.embedding_dim 0.0 +156 16 optimizer.lr 0.001849662035249092 +156 16 training.batch_size 1.0 +156 16 training.label_smoothing 0.9169391394613792 +156 17 model.embedding_dim 0.0 +156 17 optimizer.lr 0.08338834710807087 +156 17 training.batch_size 2.0 +156 17 training.label_smoothing 0.33422829816725885 +156 18 model.embedding_dim 0.0 +156 18 optimizer.lr 0.005873106108445707 +156 18 training.batch_size 1.0 +156 18 training.label_smoothing 0.12237695863120898 +156 19 model.embedding_dim 0.0 +156 19 optimizer.lr 0.04628329188828555 +156 19 training.batch_size 2.0 +156 19 training.label_smoothing 0.009396847960627234 +156 20 model.embedding_dim 1.0 +156 20 optimizer.lr 0.0022073784453363787 +156 20 training.batch_size 0.0 +156 20 training.label_smoothing 0.008529097304856405 +156 1 dataset """fb15k237""" +156 1 model """distmult""" +156 1 loss """crossentropy""" +156 1 regularizer """no""" +156 1 optimizer """adam""" +156 1 training_loop """lcwa""" +156 1 evaluator """rankbased""" +156 2 dataset """fb15k237""" +156 2 model """distmult""" +156 2 loss """crossentropy""" +156 2 regularizer """no""" +156 2 optimizer """adam""" +156 2 training_loop """lcwa""" +156 2 evaluator """rankbased""" +156 3 dataset """fb15k237""" +156 3 model """distmult""" +156 3 loss """crossentropy""" +156 3 regularizer """no""" +156 3 optimizer """adam""" +156 3 training_loop """lcwa""" +156 3 evaluator """rankbased""" +156 4 dataset """fb15k237""" +156 4 model """distmult""" +156 4 loss """crossentropy""" +156 4 regularizer """no""" +156 4 optimizer """adam""" +156 4 training_loop """lcwa""" +156 4 evaluator """rankbased""" +156 5 dataset """fb15k237""" +156 5 model """distmult""" +156 5 loss """crossentropy""" +156 5 regularizer """no""" +156 5 optimizer """adam""" +156 5 training_loop """lcwa""" +156 5 evaluator """rankbased""" +156 6 dataset """fb15k237""" +156 6 model """distmult""" +156 6 loss """crossentropy""" +156 6 regularizer """no""" +156 6 optimizer """adam""" +156 6 training_loop """lcwa""" +156 6 evaluator """rankbased""" +156 7 dataset """fb15k237""" +156 7 model """distmult""" +156 7 loss """crossentropy""" +156 7 regularizer """no""" +156 7 optimizer """adam""" +156 7 training_loop """lcwa""" +156 7 evaluator """rankbased""" +156 8 dataset """fb15k237""" +156 8 model """distmult""" +156 8 loss """crossentropy""" +156 8 regularizer """no""" +156 8 optimizer """adam""" +156 8 training_loop """lcwa""" +156 8 evaluator """rankbased""" +156 9 dataset """fb15k237""" +156 9 model """distmult""" +156 9 loss """crossentropy""" +156 9 regularizer """no""" +156 9 optimizer """adam""" +156 9 training_loop """lcwa""" +156 9 evaluator """rankbased""" +156 10 dataset """fb15k237""" +156 10 model """distmult""" +156 10 loss """crossentropy""" +156 10 regularizer """no""" +156 10 optimizer """adam""" +156 10 training_loop """lcwa""" +156 10 evaluator """rankbased""" +156 11 dataset """fb15k237""" +156 11 model """distmult""" +156 11 loss """crossentropy""" +156 11 regularizer """no""" +156 11 optimizer """adam""" +156 11 training_loop """lcwa""" +156 11 evaluator """rankbased""" +156 12 dataset """fb15k237""" +156 12 model """distmult""" +156 12 loss """crossentropy""" +156 12 regularizer """no""" +156 12 optimizer """adam""" +156 12 training_loop """lcwa""" +156 12 evaluator """rankbased""" +156 13 dataset """fb15k237""" +156 13 model """distmult""" +156 13 loss """crossentropy""" +156 13 regularizer """no""" +156 13 optimizer """adam""" +156 13 training_loop """lcwa""" +156 13 evaluator """rankbased""" +156 14 dataset """fb15k237""" +156 14 model """distmult""" +156 14 loss """crossentropy""" +156 14 regularizer """no""" +156 14 optimizer """adam""" +156 14 training_loop """lcwa""" +156 14 evaluator """rankbased""" +156 15 dataset """fb15k237""" +156 15 model """distmult""" +156 15 loss """crossentropy""" +156 15 regularizer """no""" +156 15 optimizer """adam""" +156 15 training_loop """lcwa""" +156 15 evaluator """rankbased""" +156 16 dataset """fb15k237""" +156 16 model """distmult""" +156 16 loss """crossentropy""" +156 16 regularizer """no""" +156 16 optimizer """adam""" +156 16 training_loop """lcwa""" +156 16 evaluator """rankbased""" +156 17 dataset """fb15k237""" +156 17 model """distmult""" +156 17 loss """crossentropy""" +156 17 regularizer """no""" +156 17 optimizer """adam""" +156 17 training_loop """lcwa""" +156 17 evaluator """rankbased""" +156 18 dataset """fb15k237""" +156 18 model """distmult""" +156 18 loss """crossentropy""" +156 18 regularizer """no""" +156 18 optimizer """adam""" +156 18 training_loop """lcwa""" +156 18 evaluator """rankbased""" +156 19 dataset """fb15k237""" +156 19 model """distmult""" +156 19 loss """crossentropy""" +156 19 regularizer """no""" +156 19 optimizer """adam""" +156 19 training_loop """lcwa""" +156 19 evaluator """rankbased""" +156 20 dataset """fb15k237""" +156 20 model """distmult""" +156 20 loss """crossentropy""" +156 20 regularizer """no""" +156 20 optimizer """adam""" +156 20 training_loop """lcwa""" +156 20 evaluator """rankbased""" +157 1 model.embedding_dim 2.0 +157 1 optimizer.lr 0.026949637996511972 +157 1 training.batch_size 1.0 +157 1 training.label_smoothing 0.07403127844797931 +157 2 model.embedding_dim 1.0 +157 2 optimizer.lr 0.004984426465896501 +157 2 training.batch_size 0.0 +157 2 training.label_smoothing 0.37306050967823934 +157 3 model.embedding_dim 0.0 +157 3 optimizer.lr 0.00576228654315374 +157 3 training.batch_size 1.0 +157 3 training.label_smoothing 0.35630141188611225 +157 4 model.embedding_dim 0.0 +157 4 optimizer.lr 0.06431770780726656 +157 4 training.batch_size 2.0 +157 4 training.label_smoothing 0.835033178852683 +157 5 model.embedding_dim 0.0 +157 5 optimizer.lr 0.03471808581107142 +157 5 training.batch_size 2.0 +157 5 training.label_smoothing 0.1615767584038809 +157 6 model.embedding_dim 1.0 +157 6 optimizer.lr 0.0012155370831626398 +157 6 training.batch_size 0.0 +157 6 training.label_smoothing 0.04456790985420898 +157 7 model.embedding_dim 0.0 +157 7 optimizer.lr 0.08731439756442445 +157 7 training.batch_size 2.0 +157 7 training.label_smoothing 0.12063235013437454 +157 8 model.embedding_dim 0.0 +157 8 optimizer.lr 0.07930880507624856 +157 8 training.batch_size 1.0 +157 8 training.label_smoothing 0.0064977455430452 +157 9 model.embedding_dim 0.0 +157 9 optimizer.lr 0.028278443668890752 +157 9 training.batch_size 1.0 +157 9 training.label_smoothing 0.04276721965548084 +157 10 model.embedding_dim 2.0 +157 10 optimizer.lr 0.018357395510990877 +157 10 training.batch_size 2.0 +157 10 training.label_smoothing 0.6651895115251897 +157 11 model.embedding_dim 2.0 +157 11 optimizer.lr 0.036869732210100724 +157 11 training.batch_size 1.0 +157 11 training.label_smoothing 0.007285402050215947 +157 12 model.embedding_dim 1.0 +157 12 optimizer.lr 0.001118943378441982 +157 12 training.batch_size 1.0 +157 12 training.label_smoothing 0.060111896353131494 +157 13 model.embedding_dim 2.0 +157 13 optimizer.lr 0.0758542352591699 +157 13 training.batch_size 2.0 +157 13 training.label_smoothing 0.0017156717231941831 +157 14 model.embedding_dim 0.0 +157 14 optimizer.lr 0.025047897944569613 +157 14 training.batch_size 2.0 +157 14 training.label_smoothing 0.0015927921106321612 +157 15 model.embedding_dim 2.0 +157 15 optimizer.lr 0.0035666877566718283 +157 15 training.batch_size 1.0 +157 15 training.label_smoothing 0.03726044699715989 +157 16 model.embedding_dim 1.0 +157 16 optimizer.lr 0.0774963591275538 +157 16 training.batch_size 1.0 +157 16 training.label_smoothing 0.9271567559838105 +157 17 model.embedding_dim 1.0 +157 17 optimizer.lr 0.042739784825630046 +157 17 training.batch_size 0.0 +157 17 training.label_smoothing 0.10952819176739638 +157 18 model.embedding_dim 0.0 +157 18 optimizer.lr 0.0017899129031901189 +157 18 training.batch_size 2.0 +157 18 training.label_smoothing 0.010697359897992607 +157 19 model.embedding_dim 0.0 +157 19 optimizer.lr 0.0011904392958915627 +157 19 training.batch_size 1.0 +157 19 training.label_smoothing 0.23225565504498139 +157 20 model.embedding_dim 2.0 +157 20 optimizer.lr 0.005996087669758142 +157 20 training.batch_size 2.0 +157 20 training.label_smoothing 0.5596625230127544 +157 21 model.embedding_dim 2.0 +157 21 optimizer.lr 0.0017009199167284576 +157 21 training.batch_size 0.0 +157 21 training.label_smoothing 0.3890142570990486 +157 22 model.embedding_dim 0.0 +157 22 optimizer.lr 0.09197343069462363 +157 22 training.batch_size 2.0 +157 22 training.label_smoothing 0.3781012739021137 +157 23 model.embedding_dim 1.0 +157 23 optimizer.lr 0.07191592549726587 +157 23 training.batch_size 2.0 +157 23 training.label_smoothing 0.0016220095552540374 +157 24 model.embedding_dim 1.0 +157 24 optimizer.lr 0.02566268537379148 +157 24 training.batch_size 2.0 +157 24 training.label_smoothing 0.0010797259348389666 +157 25 model.embedding_dim 2.0 +157 25 optimizer.lr 0.007904618953588216 +157 25 training.batch_size 0.0 +157 25 training.label_smoothing 0.039971309118305653 +157 26 model.embedding_dim 2.0 +157 26 optimizer.lr 0.001428350881016231 +157 26 training.batch_size 2.0 +157 26 training.label_smoothing 0.0018456327333703966 +157 27 model.embedding_dim 2.0 +157 27 optimizer.lr 0.023765442472221145 +157 27 training.batch_size 0.0 +157 27 training.label_smoothing 0.022895725774683157 +157 28 model.embedding_dim 0.0 +157 28 optimizer.lr 0.006955399721994482 +157 28 training.batch_size 2.0 +157 28 training.label_smoothing 0.04486151979400027 +157 29 model.embedding_dim 2.0 +157 29 optimizer.lr 0.00120039194528149 +157 29 training.batch_size 1.0 +157 29 training.label_smoothing 0.2437767296146506 +157 30 model.embedding_dim 2.0 +157 30 optimizer.lr 0.005982188918521937 +157 30 training.batch_size 0.0 +157 30 training.label_smoothing 0.0014912959576425943 +157 31 model.embedding_dim 2.0 +157 31 optimizer.lr 0.012659818095851554 +157 31 training.batch_size 2.0 +157 31 training.label_smoothing 0.4728835069082307 +157 1 dataset """fb15k237""" +157 1 model """distmult""" +157 1 loss """crossentropy""" +157 1 regularizer """no""" +157 1 optimizer """adam""" +157 1 training_loop """lcwa""" +157 1 evaluator """rankbased""" +157 2 dataset """fb15k237""" +157 2 model """distmult""" +157 2 loss """crossentropy""" +157 2 regularizer """no""" +157 2 optimizer """adam""" +157 2 training_loop """lcwa""" +157 2 evaluator """rankbased""" +157 3 dataset """fb15k237""" +157 3 model """distmult""" +157 3 loss """crossentropy""" +157 3 regularizer """no""" +157 3 optimizer """adam""" +157 3 training_loop """lcwa""" +157 3 evaluator """rankbased""" +157 4 dataset """fb15k237""" +157 4 model """distmult""" +157 4 loss """crossentropy""" +157 4 regularizer """no""" +157 4 optimizer """adam""" +157 4 training_loop """lcwa""" +157 4 evaluator """rankbased""" +157 5 dataset """fb15k237""" +157 5 model """distmult""" +157 5 loss """crossentropy""" +157 5 regularizer """no""" +157 5 optimizer """adam""" +157 5 training_loop """lcwa""" +157 5 evaluator """rankbased""" +157 6 dataset """fb15k237""" +157 6 model """distmult""" +157 6 loss """crossentropy""" +157 6 regularizer """no""" +157 6 optimizer """adam""" +157 6 training_loop """lcwa""" +157 6 evaluator """rankbased""" +157 7 dataset """fb15k237""" +157 7 model """distmult""" +157 7 loss """crossentropy""" +157 7 regularizer """no""" +157 7 optimizer """adam""" +157 7 training_loop """lcwa""" +157 7 evaluator """rankbased""" +157 8 dataset """fb15k237""" +157 8 model """distmult""" +157 8 loss """crossentropy""" +157 8 regularizer """no""" +157 8 optimizer """adam""" +157 8 training_loop """lcwa""" +157 8 evaluator """rankbased""" +157 9 dataset """fb15k237""" +157 9 model """distmult""" +157 9 loss """crossentropy""" +157 9 regularizer """no""" +157 9 optimizer """adam""" +157 9 training_loop """lcwa""" +157 9 evaluator """rankbased""" +157 10 dataset """fb15k237""" +157 10 model """distmult""" +157 10 loss """crossentropy""" +157 10 regularizer """no""" +157 10 optimizer """adam""" +157 10 training_loop """lcwa""" +157 10 evaluator """rankbased""" +157 11 dataset """fb15k237""" +157 11 model """distmult""" +157 11 loss """crossentropy""" +157 11 regularizer """no""" +157 11 optimizer """adam""" +157 11 training_loop """lcwa""" +157 11 evaluator """rankbased""" +157 12 dataset """fb15k237""" +157 12 model """distmult""" +157 12 loss """crossentropy""" +157 12 regularizer """no""" +157 12 optimizer """adam""" +157 12 training_loop """lcwa""" +157 12 evaluator """rankbased""" +157 13 dataset """fb15k237""" +157 13 model """distmult""" +157 13 loss """crossentropy""" +157 13 regularizer """no""" +157 13 optimizer """adam""" +157 13 training_loop """lcwa""" +157 13 evaluator """rankbased""" +157 14 dataset """fb15k237""" +157 14 model """distmult""" +157 14 loss """crossentropy""" +157 14 regularizer """no""" +157 14 optimizer """adam""" +157 14 training_loop """lcwa""" +157 14 evaluator """rankbased""" +157 15 dataset """fb15k237""" +157 15 model """distmult""" +157 15 loss """crossentropy""" +157 15 regularizer """no""" +157 15 optimizer """adam""" +157 15 training_loop """lcwa""" +157 15 evaluator """rankbased""" +157 16 dataset """fb15k237""" +157 16 model """distmult""" +157 16 loss """crossentropy""" +157 16 regularizer """no""" +157 16 optimizer """adam""" +157 16 training_loop """lcwa""" +157 16 evaluator """rankbased""" +157 17 dataset """fb15k237""" +157 17 model """distmult""" +157 17 loss """crossentropy""" +157 17 regularizer """no""" +157 17 optimizer """adam""" +157 17 training_loop """lcwa""" +157 17 evaluator """rankbased""" +157 18 dataset """fb15k237""" +157 18 model """distmult""" +157 18 loss """crossentropy""" +157 18 regularizer """no""" +157 18 optimizer """adam""" +157 18 training_loop """lcwa""" +157 18 evaluator """rankbased""" +157 19 dataset """fb15k237""" +157 19 model """distmult""" +157 19 loss """crossentropy""" +157 19 regularizer """no""" +157 19 optimizer """adam""" +157 19 training_loop """lcwa""" +157 19 evaluator """rankbased""" +157 20 dataset """fb15k237""" +157 20 model """distmult""" +157 20 loss """crossentropy""" +157 20 regularizer """no""" +157 20 optimizer """adam""" +157 20 training_loop """lcwa""" +157 20 evaluator """rankbased""" +157 21 dataset """fb15k237""" +157 21 model """distmult""" +157 21 loss """crossentropy""" +157 21 regularizer """no""" +157 21 optimizer """adam""" +157 21 training_loop """lcwa""" +157 21 evaluator """rankbased""" +157 22 dataset """fb15k237""" +157 22 model """distmult""" +157 22 loss """crossentropy""" +157 22 regularizer """no""" +157 22 optimizer """adam""" +157 22 training_loop """lcwa""" +157 22 evaluator """rankbased""" +157 23 dataset """fb15k237""" +157 23 model """distmult""" +157 23 loss """crossentropy""" +157 23 regularizer """no""" +157 23 optimizer """adam""" +157 23 training_loop """lcwa""" +157 23 evaluator """rankbased""" +157 24 dataset """fb15k237""" +157 24 model """distmult""" +157 24 loss """crossentropy""" +157 24 regularizer """no""" +157 24 optimizer """adam""" +157 24 training_loop """lcwa""" +157 24 evaluator """rankbased""" +157 25 dataset """fb15k237""" +157 25 model """distmult""" +157 25 loss """crossentropy""" +157 25 regularizer """no""" +157 25 optimizer """adam""" +157 25 training_loop """lcwa""" +157 25 evaluator """rankbased""" +157 26 dataset """fb15k237""" +157 26 model """distmult""" +157 26 loss """crossentropy""" +157 26 regularizer """no""" +157 26 optimizer """adam""" +157 26 training_loop """lcwa""" +157 26 evaluator """rankbased""" +157 27 dataset """fb15k237""" +157 27 model """distmult""" +157 27 loss """crossentropy""" +157 27 regularizer """no""" +157 27 optimizer """adam""" +157 27 training_loop """lcwa""" +157 27 evaluator """rankbased""" +157 28 dataset """fb15k237""" +157 28 model """distmult""" +157 28 loss """crossentropy""" +157 28 regularizer """no""" +157 28 optimizer """adam""" +157 28 training_loop """lcwa""" +157 28 evaluator """rankbased""" +157 29 dataset """fb15k237""" +157 29 model """distmult""" +157 29 loss """crossentropy""" +157 29 regularizer """no""" +157 29 optimizer """adam""" +157 29 training_loop """lcwa""" +157 29 evaluator """rankbased""" +157 30 dataset """fb15k237""" +157 30 model """distmult""" +157 30 loss """crossentropy""" +157 30 regularizer """no""" +157 30 optimizer """adam""" +157 30 training_loop """lcwa""" +157 30 evaluator """rankbased""" +157 31 dataset """fb15k237""" +157 31 model """distmult""" +157 31 loss """crossentropy""" +157 31 regularizer """no""" +157 31 optimizer """adam""" +157 31 training_loop """lcwa""" +157 31 evaluator """rankbased""" +158 1 model.embedding_dim 2.0 +158 1 optimizer.lr 0.020765600930123102 +158 1 training.batch_size 0.0 +158 1 training.label_smoothing 0.36686650962024286 +158 2 model.embedding_dim 0.0 +158 2 optimizer.lr 0.009547107385632395 +158 2 training.batch_size 1.0 +158 2 training.label_smoothing 0.014632808089569427 +158 3 model.embedding_dim 2.0 +158 3 optimizer.lr 0.03673813295584985 +158 3 training.batch_size 1.0 +158 3 training.label_smoothing 0.22666378395161998 +158 4 model.embedding_dim 1.0 +158 4 optimizer.lr 0.002147336937042549 +158 4 training.batch_size 2.0 +158 4 training.label_smoothing 0.48689663334806 +158 5 model.embedding_dim 1.0 +158 5 optimizer.lr 0.00369662972779471 +158 5 training.batch_size 2.0 +158 5 training.label_smoothing 0.48815187459575443 +158 6 model.embedding_dim 1.0 +158 6 optimizer.lr 0.019215804011475386 +158 6 training.batch_size 1.0 +158 6 training.label_smoothing 0.029947938499844895 +158 7 model.embedding_dim 2.0 +158 7 optimizer.lr 0.001162910321132441 +158 7 training.batch_size 0.0 +158 7 training.label_smoothing 0.00281706030572385 +158 1 dataset """fb15k237""" +158 1 model """distmult""" +158 1 loss """softplus""" +158 1 regularizer """no""" +158 1 optimizer """adam""" +158 1 training_loop """lcwa""" +158 1 evaluator """rankbased""" +158 2 dataset """fb15k237""" +158 2 model """distmult""" +158 2 loss """softplus""" +158 2 regularizer """no""" +158 2 optimizer """adam""" +158 2 training_loop """lcwa""" +158 2 evaluator """rankbased""" +158 3 dataset """fb15k237""" +158 3 model """distmult""" +158 3 loss """softplus""" +158 3 regularizer """no""" +158 3 optimizer """adam""" +158 3 training_loop """lcwa""" +158 3 evaluator """rankbased""" +158 4 dataset """fb15k237""" +158 4 model """distmult""" +158 4 loss """softplus""" +158 4 regularizer """no""" +158 4 optimizer """adam""" +158 4 training_loop """lcwa""" +158 4 evaluator """rankbased""" +158 5 dataset """fb15k237""" +158 5 model """distmult""" +158 5 loss """softplus""" +158 5 regularizer """no""" +158 5 optimizer """adam""" +158 5 training_loop """lcwa""" +158 5 evaluator """rankbased""" +158 6 dataset """fb15k237""" +158 6 model """distmult""" +158 6 loss """softplus""" +158 6 regularizer """no""" +158 6 optimizer """adam""" +158 6 training_loop """lcwa""" +158 6 evaluator """rankbased""" +158 7 dataset """fb15k237""" +158 7 model """distmult""" +158 7 loss """softplus""" +158 7 regularizer """no""" +158 7 optimizer """adam""" +158 7 training_loop """lcwa""" +158 7 evaluator """rankbased""" +159 1 model.embedding_dim 1.0 +159 1 optimizer.lr 0.09218890457365415 +159 1 training.batch_size 0.0 +159 1 training.label_smoothing 0.9420444197365406 +159 2 model.embedding_dim 0.0 +159 2 optimizer.lr 0.031410962208053304 +159 2 training.batch_size 0.0 +159 2 training.label_smoothing 0.0010259184939099264 +159 3 model.embedding_dim 1.0 +159 3 optimizer.lr 0.0825382611399144 +159 3 training.batch_size 1.0 +159 3 training.label_smoothing 0.01073102636425934 +159 4 model.embedding_dim 0.0 +159 4 optimizer.lr 0.017637293370014486 +159 4 training.batch_size 1.0 +159 4 training.label_smoothing 0.04500141341707999 +159 5 model.embedding_dim 2.0 +159 5 optimizer.lr 0.009319799422825068 +159 5 training.batch_size 2.0 +159 5 training.label_smoothing 0.0017025403844580166 +159 6 model.embedding_dim 1.0 +159 6 optimizer.lr 0.05652913225900903 +159 6 training.batch_size 2.0 +159 6 training.label_smoothing 0.003597582742292763 +159 7 model.embedding_dim 2.0 +159 7 optimizer.lr 0.0034160134618196176 +159 7 training.batch_size 1.0 +159 7 training.label_smoothing 0.003373626514315481 +159 8 model.embedding_dim 1.0 +159 8 optimizer.lr 0.07151542969343395 +159 8 training.batch_size 0.0 +159 8 training.label_smoothing 0.1854229279529542 +159 9 model.embedding_dim 1.0 +159 9 optimizer.lr 0.016566894213482966 +159 9 training.batch_size 1.0 +159 9 training.label_smoothing 0.4709482943490199 +159 10 model.embedding_dim 2.0 +159 10 optimizer.lr 0.0024492374663100946 +159 10 training.batch_size 2.0 +159 10 training.label_smoothing 0.04915317834270302 +159 11 model.embedding_dim 0.0 +159 11 optimizer.lr 0.00515001129420384 +159 11 training.batch_size 2.0 +159 11 training.label_smoothing 0.24672141770591252 +159 12 model.embedding_dim 2.0 +159 12 optimizer.lr 0.006538836244801979 +159 12 training.batch_size 1.0 +159 12 training.label_smoothing 0.0029129742749826174 +159 13 model.embedding_dim 1.0 +159 13 optimizer.lr 0.004062765201323249 +159 13 training.batch_size 1.0 +159 13 training.label_smoothing 0.7336569890645873 +159 14 model.embedding_dim 1.0 +159 14 optimizer.lr 0.013974027439973608 +159 14 training.batch_size 1.0 +159 14 training.label_smoothing 0.04605079552519423 +159 15 model.embedding_dim 2.0 +159 15 optimizer.lr 0.0685378780920382 +159 15 training.batch_size 1.0 +159 15 training.label_smoothing 0.1623038340612113 +159 16 model.embedding_dim 0.0 +159 16 optimizer.lr 0.0011752135602110547 +159 16 training.batch_size 0.0 +159 16 training.label_smoothing 0.6999861772403909 +159 17 model.embedding_dim 0.0 +159 17 optimizer.lr 0.06392250583782134 +159 17 training.batch_size 0.0 +159 17 training.label_smoothing 0.06508876687079818 +159 18 model.embedding_dim 1.0 +159 18 optimizer.lr 0.05285941561322425 +159 18 training.batch_size 2.0 +159 18 training.label_smoothing 0.022318794014927076 +159 19 model.embedding_dim 0.0 +159 19 optimizer.lr 0.0027901287988583087 +159 19 training.batch_size 1.0 +159 19 training.label_smoothing 0.022898018084159734 +159 20 model.embedding_dim 2.0 +159 20 optimizer.lr 0.07330699495471135 +159 20 training.batch_size 1.0 +159 20 training.label_smoothing 0.06476136584522409 +159 21 model.embedding_dim 2.0 +159 21 optimizer.lr 0.012023888164263506 +159 21 training.batch_size 2.0 +159 21 training.label_smoothing 0.4306365170885904 +159 1 dataset """fb15k237""" +159 1 model """distmult""" +159 1 loss """softplus""" +159 1 regularizer """no""" +159 1 optimizer """adam""" +159 1 training_loop """lcwa""" +159 1 evaluator """rankbased""" +159 2 dataset """fb15k237""" +159 2 model """distmult""" +159 2 loss """softplus""" +159 2 regularizer """no""" +159 2 optimizer """adam""" +159 2 training_loop """lcwa""" +159 2 evaluator """rankbased""" +159 3 dataset """fb15k237""" +159 3 model """distmult""" +159 3 loss """softplus""" +159 3 regularizer """no""" +159 3 optimizer """adam""" +159 3 training_loop """lcwa""" +159 3 evaluator """rankbased""" +159 4 dataset """fb15k237""" +159 4 model """distmult""" +159 4 loss """softplus""" +159 4 regularizer """no""" +159 4 optimizer """adam""" +159 4 training_loop """lcwa""" +159 4 evaluator """rankbased""" +159 5 dataset """fb15k237""" +159 5 model """distmult""" +159 5 loss """softplus""" +159 5 regularizer """no""" +159 5 optimizer """adam""" +159 5 training_loop """lcwa""" +159 5 evaluator """rankbased""" +159 6 dataset """fb15k237""" +159 6 model """distmult""" +159 6 loss """softplus""" +159 6 regularizer """no""" +159 6 optimizer """adam""" +159 6 training_loop """lcwa""" +159 6 evaluator """rankbased""" +159 7 dataset """fb15k237""" +159 7 model """distmult""" +159 7 loss """softplus""" +159 7 regularizer """no""" +159 7 optimizer """adam""" +159 7 training_loop """lcwa""" +159 7 evaluator """rankbased""" +159 8 dataset """fb15k237""" +159 8 model """distmult""" +159 8 loss """softplus""" +159 8 regularizer """no""" +159 8 optimizer """adam""" +159 8 training_loop """lcwa""" +159 8 evaluator """rankbased""" +159 9 dataset """fb15k237""" +159 9 model """distmult""" +159 9 loss """softplus""" +159 9 regularizer """no""" +159 9 optimizer """adam""" +159 9 training_loop """lcwa""" +159 9 evaluator """rankbased""" +159 10 dataset """fb15k237""" +159 10 model """distmult""" +159 10 loss """softplus""" +159 10 regularizer """no""" +159 10 optimizer """adam""" +159 10 training_loop """lcwa""" +159 10 evaluator """rankbased""" +159 11 dataset """fb15k237""" +159 11 model """distmult""" +159 11 loss """softplus""" +159 11 regularizer """no""" +159 11 optimizer """adam""" +159 11 training_loop """lcwa""" +159 11 evaluator """rankbased""" +159 12 dataset """fb15k237""" +159 12 model """distmult""" +159 12 loss """softplus""" +159 12 regularizer """no""" +159 12 optimizer """adam""" +159 12 training_loop """lcwa""" +159 12 evaluator """rankbased""" +159 13 dataset """fb15k237""" +159 13 model """distmult""" +159 13 loss """softplus""" +159 13 regularizer """no""" +159 13 optimizer """adam""" +159 13 training_loop """lcwa""" +159 13 evaluator """rankbased""" +159 14 dataset """fb15k237""" +159 14 model """distmult""" +159 14 loss """softplus""" +159 14 regularizer """no""" +159 14 optimizer """adam""" +159 14 training_loop """lcwa""" +159 14 evaluator """rankbased""" +159 15 dataset """fb15k237""" +159 15 model """distmult""" +159 15 loss """softplus""" +159 15 regularizer """no""" +159 15 optimizer """adam""" +159 15 training_loop """lcwa""" +159 15 evaluator """rankbased""" +159 16 dataset """fb15k237""" +159 16 model """distmult""" +159 16 loss """softplus""" +159 16 regularizer """no""" +159 16 optimizer """adam""" +159 16 training_loop """lcwa""" +159 16 evaluator """rankbased""" +159 17 dataset """fb15k237""" +159 17 model """distmult""" +159 17 loss """softplus""" +159 17 regularizer """no""" +159 17 optimizer """adam""" +159 17 training_loop """lcwa""" +159 17 evaluator """rankbased""" +159 18 dataset """fb15k237""" +159 18 model """distmult""" +159 18 loss """softplus""" +159 18 regularizer """no""" +159 18 optimizer """adam""" +159 18 training_loop """lcwa""" +159 18 evaluator """rankbased""" +159 19 dataset """fb15k237""" +159 19 model """distmult""" +159 19 loss """softplus""" +159 19 regularizer """no""" +159 19 optimizer """adam""" +159 19 training_loop """lcwa""" +159 19 evaluator """rankbased""" +159 20 dataset """fb15k237""" +159 20 model """distmult""" +159 20 loss """softplus""" +159 20 regularizer """no""" +159 20 optimizer """adam""" +159 20 training_loop """lcwa""" +159 20 evaluator """rankbased""" +159 21 dataset """fb15k237""" +159 21 model """distmult""" +159 21 loss """softplus""" +159 21 regularizer """no""" +159 21 optimizer """adam""" +159 21 training_loop """lcwa""" +159 21 evaluator """rankbased""" +160 1 model.embedding_dim 0.0 +160 1 optimizer.lr 0.0034032895084570077 +160 1 negative_sampler.num_negs_per_pos 12.0 +160 1 training.batch_size 0.0 +160 2 model.embedding_dim 2.0 +160 2 optimizer.lr 0.09685950022026005 +160 2 negative_sampler.num_negs_per_pos 91.0 +160 2 training.batch_size 1.0 +160 3 model.embedding_dim 2.0 +160 3 optimizer.lr 0.015228536107897025 +160 3 negative_sampler.num_negs_per_pos 72.0 +160 3 training.batch_size 1.0 +160 4 model.embedding_dim 1.0 +160 4 optimizer.lr 0.002507680289882894 +160 4 negative_sampler.num_negs_per_pos 84.0 +160 4 training.batch_size 1.0 +160 5 model.embedding_dim 2.0 +160 5 optimizer.lr 0.0033099088467113584 +160 5 negative_sampler.num_negs_per_pos 61.0 +160 5 training.batch_size 0.0 +160 6 model.embedding_dim 2.0 +160 6 optimizer.lr 0.004104108787374334 +160 6 negative_sampler.num_negs_per_pos 98.0 +160 6 training.batch_size 0.0 +160 7 model.embedding_dim 1.0 +160 7 optimizer.lr 0.0011385444698621796 +160 7 negative_sampler.num_negs_per_pos 23.0 +160 7 training.batch_size 0.0 +160 8 model.embedding_dim 2.0 +160 8 optimizer.lr 0.06446906574144862 +160 8 negative_sampler.num_negs_per_pos 86.0 +160 8 training.batch_size 0.0 +160 9 model.embedding_dim 1.0 +160 9 optimizer.lr 0.0069668976905383805 +160 9 negative_sampler.num_negs_per_pos 37.0 +160 9 training.batch_size 1.0 +160 10 model.embedding_dim 2.0 +160 10 optimizer.lr 0.002065755245800902 +160 10 negative_sampler.num_negs_per_pos 64.0 +160 10 training.batch_size 1.0 +160 11 model.embedding_dim 2.0 +160 11 optimizer.lr 0.04548937299384272 +160 11 negative_sampler.num_negs_per_pos 93.0 +160 11 training.batch_size 0.0 +160 12 model.embedding_dim 2.0 +160 12 optimizer.lr 0.05313511054746943 +160 12 negative_sampler.num_negs_per_pos 77.0 +160 12 training.batch_size 0.0 +160 13 model.embedding_dim 0.0 +160 13 optimizer.lr 0.001414258965189016 +160 13 negative_sampler.num_negs_per_pos 70.0 +160 13 training.batch_size 1.0 +160 1 dataset """fb15k237""" +160 1 model """distmult""" +160 1 loss """softplus""" +160 1 regularizer """no""" +160 1 optimizer """adam""" +160 1 training_loop """owa""" +160 1 negative_sampler """basic""" +160 1 evaluator """rankbased""" +160 2 dataset """fb15k237""" +160 2 model """distmult""" +160 2 loss """softplus""" +160 2 regularizer """no""" +160 2 optimizer """adam""" +160 2 training_loop """owa""" +160 2 negative_sampler """basic""" +160 2 evaluator """rankbased""" +160 3 dataset """fb15k237""" +160 3 model """distmult""" +160 3 loss """softplus""" +160 3 regularizer """no""" +160 3 optimizer """adam""" +160 3 training_loop """owa""" +160 3 negative_sampler """basic""" +160 3 evaluator """rankbased""" +160 4 dataset """fb15k237""" +160 4 model """distmult""" +160 4 loss """softplus""" +160 4 regularizer """no""" +160 4 optimizer """adam""" +160 4 training_loop """owa""" +160 4 negative_sampler """basic""" +160 4 evaluator """rankbased""" +160 5 dataset """fb15k237""" +160 5 model """distmult""" +160 5 loss """softplus""" +160 5 regularizer """no""" +160 5 optimizer """adam""" +160 5 training_loop """owa""" +160 5 negative_sampler """basic""" +160 5 evaluator """rankbased""" +160 6 dataset """fb15k237""" +160 6 model """distmult""" +160 6 loss """softplus""" +160 6 regularizer """no""" +160 6 optimizer """adam""" +160 6 training_loop """owa""" +160 6 negative_sampler """basic""" +160 6 evaluator """rankbased""" +160 7 dataset """fb15k237""" +160 7 model """distmult""" +160 7 loss """softplus""" +160 7 regularizer """no""" +160 7 optimizer """adam""" +160 7 training_loop """owa""" +160 7 negative_sampler """basic""" +160 7 evaluator """rankbased""" +160 8 dataset """fb15k237""" +160 8 model """distmult""" +160 8 loss """softplus""" +160 8 regularizer """no""" +160 8 optimizer """adam""" +160 8 training_loop """owa""" +160 8 negative_sampler """basic""" +160 8 evaluator """rankbased""" +160 9 dataset """fb15k237""" +160 9 model """distmult""" +160 9 loss """softplus""" +160 9 regularizer """no""" +160 9 optimizer """adam""" +160 9 training_loop """owa""" +160 9 negative_sampler """basic""" +160 9 evaluator """rankbased""" +160 10 dataset """fb15k237""" +160 10 model """distmult""" +160 10 loss """softplus""" +160 10 regularizer """no""" +160 10 optimizer """adam""" +160 10 training_loop """owa""" +160 10 negative_sampler """basic""" +160 10 evaluator """rankbased""" +160 11 dataset """fb15k237""" +160 11 model """distmult""" +160 11 loss """softplus""" +160 11 regularizer """no""" +160 11 optimizer """adam""" +160 11 training_loop """owa""" +160 11 negative_sampler """basic""" +160 11 evaluator """rankbased""" +160 12 dataset """fb15k237""" +160 12 model """distmult""" +160 12 loss """softplus""" +160 12 regularizer """no""" +160 12 optimizer """adam""" +160 12 training_loop """owa""" +160 12 negative_sampler """basic""" +160 12 evaluator """rankbased""" +160 13 dataset """fb15k237""" +160 13 model """distmult""" +160 13 loss """softplus""" +160 13 regularizer """no""" +160 13 optimizer """adam""" +160 13 training_loop """owa""" +160 13 negative_sampler """basic""" +160 13 evaluator """rankbased""" +161 1 model.embedding_dim 0.0 +161 1 optimizer.lr 0.004191578266397646 +161 1 negative_sampler.num_negs_per_pos 17.0 +161 1 training.batch_size 1.0 +161 2 model.embedding_dim 1.0 +161 2 optimizer.lr 0.029172202768443813 +161 2 negative_sampler.num_negs_per_pos 13.0 +161 2 training.batch_size 1.0 +161 3 model.embedding_dim 2.0 +161 3 optimizer.lr 0.004328477481791771 +161 3 negative_sampler.num_negs_per_pos 99.0 +161 3 training.batch_size 0.0 +161 4 model.embedding_dim 1.0 +161 4 optimizer.lr 0.019410554402852293 +161 4 negative_sampler.num_negs_per_pos 71.0 +161 4 training.batch_size 2.0 +161 5 model.embedding_dim 2.0 +161 5 optimizer.lr 0.0010133522755957003 +161 5 negative_sampler.num_negs_per_pos 85.0 +161 5 training.batch_size 1.0 +161 6 model.embedding_dim 2.0 +161 6 optimizer.lr 0.0010489940737632149 +161 6 negative_sampler.num_negs_per_pos 57.0 +161 6 training.batch_size 0.0 +161 7 model.embedding_dim 1.0 +161 7 optimizer.lr 0.007848373888716474 +161 7 negative_sampler.num_negs_per_pos 86.0 +161 7 training.batch_size 0.0 +161 8 model.embedding_dim 1.0 +161 8 optimizer.lr 0.0018906786422924276 +161 8 negative_sampler.num_negs_per_pos 9.0 +161 8 training.batch_size 1.0 +161 9 model.embedding_dim 2.0 +161 9 optimizer.lr 0.003962928725331017 +161 9 negative_sampler.num_negs_per_pos 27.0 +161 9 training.batch_size 0.0 +161 10 model.embedding_dim 1.0 +161 10 optimizer.lr 0.0018315880068693526 +161 10 negative_sampler.num_negs_per_pos 88.0 +161 10 training.batch_size 1.0 +161 11 model.embedding_dim 2.0 +161 11 optimizer.lr 0.0047873978072453275 +161 11 negative_sampler.num_negs_per_pos 31.0 +161 11 training.batch_size 2.0 +161 12 model.embedding_dim 1.0 +161 12 optimizer.lr 0.001989114097826665 +161 12 negative_sampler.num_negs_per_pos 46.0 +161 12 training.batch_size 2.0 +161 13 model.embedding_dim 0.0 +161 13 optimizer.lr 0.005459908500216026 +161 13 negative_sampler.num_negs_per_pos 28.0 +161 13 training.batch_size 2.0 +161 14 model.embedding_dim 1.0 +161 14 optimizer.lr 0.008000602533849429 +161 14 negative_sampler.num_negs_per_pos 72.0 +161 14 training.batch_size 2.0 +161 15 model.embedding_dim 2.0 +161 15 optimizer.lr 0.0011463929646847385 +161 15 negative_sampler.num_negs_per_pos 72.0 +161 15 training.batch_size 0.0 +161 16 model.embedding_dim 2.0 +161 16 optimizer.lr 0.007117178042224545 +161 16 negative_sampler.num_negs_per_pos 40.0 +161 16 training.batch_size 2.0 +161 17 model.embedding_dim 1.0 +161 17 optimizer.lr 0.036447096802519265 +161 17 negative_sampler.num_negs_per_pos 38.0 +161 17 training.batch_size 0.0 +161 18 model.embedding_dim 0.0 +161 18 optimizer.lr 0.014904421976454425 +161 18 negative_sampler.num_negs_per_pos 31.0 +161 18 training.batch_size 1.0 +161 19 model.embedding_dim 1.0 +161 19 optimizer.lr 0.001039575452866517 +161 19 negative_sampler.num_negs_per_pos 43.0 +161 19 training.batch_size 0.0 +161 20 model.embedding_dim 0.0 +161 20 optimizer.lr 0.07185589090784882 +161 20 negative_sampler.num_negs_per_pos 54.0 +161 20 training.batch_size 0.0 +161 21 model.embedding_dim 2.0 +161 21 optimizer.lr 0.047366982325576075 +161 21 negative_sampler.num_negs_per_pos 66.0 +161 21 training.batch_size 0.0 +161 22 model.embedding_dim 1.0 +161 22 optimizer.lr 0.0394738815144923 +161 22 negative_sampler.num_negs_per_pos 19.0 +161 22 training.batch_size 1.0 +161 23 model.embedding_dim 1.0 +161 23 optimizer.lr 0.05372021011213137 +161 23 negative_sampler.num_negs_per_pos 42.0 +161 23 training.batch_size 1.0 +161 24 model.embedding_dim 0.0 +161 24 optimizer.lr 0.045242387249144894 +161 24 negative_sampler.num_negs_per_pos 96.0 +161 24 training.batch_size 0.0 +161 25 model.embedding_dim 0.0 +161 25 optimizer.lr 0.06820187672158128 +161 25 negative_sampler.num_negs_per_pos 15.0 +161 25 training.batch_size 2.0 +161 26 model.embedding_dim 0.0 +161 26 optimizer.lr 0.004538271208333608 +161 26 negative_sampler.num_negs_per_pos 1.0 +161 26 training.batch_size 0.0 +161 27 model.embedding_dim 0.0 +161 27 optimizer.lr 0.006784144759374765 +161 27 negative_sampler.num_negs_per_pos 22.0 +161 27 training.batch_size 1.0 +161 28 model.embedding_dim 2.0 +161 28 optimizer.lr 0.0020807528097904733 +161 28 negative_sampler.num_negs_per_pos 17.0 +161 28 training.batch_size 0.0 +161 1 dataset """fb15k237""" +161 1 model """distmult""" +161 1 loss """softplus""" +161 1 regularizer """no""" +161 1 optimizer """adam""" +161 1 training_loop """owa""" +161 1 negative_sampler """basic""" +161 1 evaluator """rankbased""" +161 2 dataset """fb15k237""" +161 2 model """distmult""" +161 2 loss """softplus""" +161 2 regularizer """no""" +161 2 optimizer """adam""" +161 2 training_loop """owa""" +161 2 negative_sampler """basic""" +161 2 evaluator """rankbased""" +161 3 dataset """fb15k237""" +161 3 model """distmult""" +161 3 loss """softplus""" +161 3 regularizer """no""" +161 3 optimizer """adam""" +161 3 training_loop """owa""" +161 3 negative_sampler """basic""" +161 3 evaluator """rankbased""" +161 4 dataset """fb15k237""" +161 4 model """distmult""" +161 4 loss """softplus""" +161 4 regularizer """no""" +161 4 optimizer """adam""" +161 4 training_loop """owa""" +161 4 negative_sampler """basic""" +161 4 evaluator """rankbased""" +161 5 dataset """fb15k237""" +161 5 model """distmult""" +161 5 loss """softplus""" +161 5 regularizer """no""" +161 5 optimizer """adam""" +161 5 training_loop """owa""" +161 5 negative_sampler """basic""" +161 5 evaluator """rankbased""" +161 6 dataset """fb15k237""" +161 6 model """distmult""" +161 6 loss """softplus""" +161 6 regularizer """no""" +161 6 optimizer """adam""" +161 6 training_loop """owa""" +161 6 negative_sampler """basic""" +161 6 evaluator """rankbased""" +161 7 dataset """fb15k237""" +161 7 model """distmult""" +161 7 loss """softplus""" +161 7 regularizer """no""" +161 7 optimizer """adam""" +161 7 training_loop """owa""" +161 7 negative_sampler """basic""" +161 7 evaluator """rankbased""" +161 8 dataset """fb15k237""" +161 8 model """distmult""" +161 8 loss """softplus""" +161 8 regularizer """no""" +161 8 optimizer """adam""" +161 8 training_loop """owa""" +161 8 negative_sampler """basic""" +161 8 evaluator """rankbased""" +161 9 dataset """fb15k237""" +161 9 model """distmult""" +161 9 loss """softplus""" +161 9 regularizer """no""" +161 9 optimizer """adam""" +161 9 training_loop """owa""" +161 9 negative_sampler """basic""" +161 9 evaluator """rankbased""" +161 10 dataset """fb15k237""" +161 10 model """distmult""" +161 10 loss """softplus""" +161 10 regularizer """no""" +161 10 optimizer """adam""" +161 10 training_loop """owa""" +161 10 negative_sampler """basic""" +161 10 evaluator """rankbased""" +161 11 dataset """fb15k237""" +161 11 model """distmult""" +161 11 loss """softplus""" +161 11 regularizer """no""" +161 11 optimizer """adam""" +161 11 training_loop """owa""" +161 11 negative_sampler """basic""" +161 11 evaluator """rankbased""" +161 12 dataset """fb15k237""" +161 12 model """distmult""" +161 12 loss """softplus""" +161 12 regularizer """no""" +161 12 optimizer """adam""" +161 12 training_loop """owa""" +161 12 negative_sampler """basic""" +161 12 evaluator """rankbased""" +161 13 dataset """fb15k237""" +161 13 model """distmult""" +161 13 loss """softplus""" +161 13 regularizer """no""" +161 13 optimizer """adam""" +161 13 training_loop """owa""" +161 13 negative_sampler """basic""" +161 13 evaluator """rankbased""" +161 14 dataset """fb15k237""" +161 14 model """distmult""" +161 14 loss """softplus""" +161 14 regularizer """no""" +161 14 optimizer """adam""" +161 14 training_loop """owa""" +161 14 negative_sampler """basic""" +161 14 evaluator """rankbased""" +161 15 dataset """fb15k237""" +161 15 model """distmult""" +161 15 loss """softplus""" +161 15 regularizer """no""" +161 15 optimizer """adam""" +161 15 training_loop """owa""" +161 15 negative_sampler """basic""" +161 15 evaluator """rankbased""" +161 16 dataset """fb15k237""" +161 16 model """distmult""" +161 16 loss """softplus""" +161 16 regularizer """no""" +161 16 optimizer """adam""" +161 16 training_loop """owa""" +161 16 negative_sampler """basic""" +161 16 evaluator """rankbased""" +161 17 dataset """fb15k237""" +161 17 model """distmult""" +161 17 loss """softplus""" +161 17 regularizer """no""" +161 17 optimizer """adam""" +161 17 training_loop """owa""" +161 17 negative_sampler """basic""" +161 17 evaluator """rankbased""" +161 18 dataset """fb15k237""" +161 18 model """distmult""" +161 18 loss """softplus""" +161 18 regularizer """no""" +161 18 optimizer """adam""" +161 18 training_loop """owa""" +161 18 negative_sampler """basic""" +161 18 evaluator """rankbased""" +161 19 dataset """fb15k237""" +161 19 model """distmult""" +161 19 loss """softplus""" +161 19 regularizer """no""" +161 19 optimizer """adam""" +161 19 training_loop """owa""" +161 19 negative_sampler """basic""" +161 19 evaluator """rankbased""" +161 20 dataset """fb15k237""" +161 20 model """distmult""" +161 20 loss """softplus""" +161 20 regularizer """no""" +161 20 optimizer """adam""" +161 20 training_loop """owa""" +161 20 negative_sampler """basic""" +161 20 evaluator """rankbased""" +161 21 dataset """fb15k237""" +161 21 model """distmult""" +161 21 loss """softplus""" +161 21 regularizer """no""" +161 21 optimizer """adam""" +161 21 training_loop """owa""" +161 21 negative_sampler """basic""" +161 21 evaluator """rankbased""" +161 22 dataset """fb15k237""" +161 22 model """distmult""" +161 22 loss """softplus""" +161 22 regularizer """no""" +161 22 optimizer """adam""" +161 22 training_loop """owa""" +161 22 negative_sampler """basic""" +161 22 evaluator """rankbased""" +161 23 dataset """fb15k237""" +161 23 model """distmult""" +161 23 loss """softplus""" +161 23 regularizer """no""" +161 23 optimizer """adam""" +161 23 training_loop """owa""" +161 23 negative_sampler """basic""" +161 23 evaluator """rankbased""" +161 24 dataset """fb15k237""" +161 24 model """distmult""" +161 24 loss """softplus""" +161 24 regularizer """no""" +161 24 optimizer """adam""" +161 24 training_loop """owa""" +161 24 negative_sampler """basic""" +161 24 evaluator """rankbased""" +161 25 dataset """fb15k237""" +161 25 model """distmult""" +161 25 loss """softplus""" +161 25 regularizer """no""" +161 25 optimizer """adam""" +161 25 training_loop """owa""" +161 25 negative_sampler """basic""" +161 25 evaluator """rankbased""" +161 26 dataset """fb15k237""" +161 26 model """distmult""" +161 26 loss """softplus""" +161 26 regularizer """no""" +161 26 optimizer """adam""" +161 26 training_loop """owa""" +161 26 negative_sampler """basic""" +161 26 evaluator """rankbased""" +161 27 dataset """fb15k237""" +161 27 model """distmult""" +161 27 loss """softplus""" +161 27 regularizer """no""" +161 27 optimizer """adam""" +161 27 training_loop """owa""" +161 27 negative_sampler """basic""" +161 27 evaluator """rankbased""" +161 28 dataset """fb15k237""" +161 28 model """distmult""" +161 28 loss """softplus""" +161 28 regularizer """no""" +161 28 optimizer """adam""" +161 28 training_loop """owa""" +161 28 negative_sampler """basic""" +161 28 evaluator """rankbased""" +162 1 model.embedding_dim 0.0 +162 1 loss.margin 4.7016015489387994 +162 1 optimizer.lr 0.003874925733680744 +162 1 negative_sampler.num_negs_per_pos 10.0 +162 1 training.batch_size 0.0 +162 2 model.embedding_dim 2.0 +162 2 loss.margin 2.0268448540308825 +162 2 optimizer.lr 0.008960448676119392 +162 2 negative_sampler.num_negs_per_pos 21.0 +162 2 training.batch_size 0.0 +162 3 model.embedding_dim 0.0 +162 3 loss.margin 3.701607205627065 +162 3 optimizer.lr 0.0064749521385702715 +162 3 negative_sampler.num_negs_per_pos 52.0 +162 3 training.batch_size 1.0 +162 4 model.embedding_dim 1.0 +162 4 loss.margin 2.8461035984397034 +162 4 optimizer.lr 0.03692812156847677 +162 4 negative_sampler.num_negs_per_pos 23.0 +162 4 training.batch_size 2.0 +162 5 model.embedding_dim 0.0 +162 5 loss.margin 7.5737823986924155 +162 5 optimizer.lr 0.013818737237558219 +162 5 negative_sampler.num_negs_per_pos 72.0 +162 5 training.batch_size 0.0 +162 6 model.embedding_dim 0.0 +162 6 loss.margin 6.3817874117958 +162 6 optimizer.lr 0.03366423053778451 +162 6 negative_sampler.num_negs_per_pos 44.0 +162 6 training.batch_size 1.0 +162 7 model.embedding_dim 1.0 +162 7 loss.margin 1.3627715544511236 +162 7 optimizer.lr 0.06252952313279171 +162 7 negative_sampler.num_negs_per_pos 13.0 +162 7 training.batch_size 0.0 +162 8 model.embedding_dim 0.0 +162 8 loss.margin 7.426112620545705 +162 8 optimizer.lr 0.08164361513998564 +162 8 negative_sampler.num_negs_per_pos 10.0 +162 8 training.batch_size 1.0 +162 9 model.embedding_dim 1.0 +162 9 loss.margin 0.7516171999377379 +162 9 optimizer.lr 0.05333794946393714 +162 9 negative_sampler.num_negs_per_pos 23.0 +162 9 training.batch_size 2.0 +162 10 model.embedding_dim 0.0 +162 10 loss.margin 1.241496051841191 +162 10 optimizer.lr 0.0012245258596975108 +162 10 negative_sampler.num_negs_per_pos 95.0 +162 10 training.batch_size 0.0 +162 11 model.embedding_dim 1.0 +162 11 loss.margin 3.0018000677371752 +162 11 optimizer.lr 0.0016525732709651998 +162 11 negative_sampler.num_negs_per_pos 15.0 +162 11 training.batch_size 0.0 +162 12 model.embedding_dim 0.0 +162 12 loss.margin 1.7140168317073305 +162 12 optimizer.lr 0.003662204595267274 +162 12 negative_sampler.num_negs_per_pos 32.0 +162 12 training.batch_size 2.0 +162 13 model.embedding_dim 2.0 +162 13 loss.margin 7.521202764653378 +162 13 optimizer.lr 0.00625082158716711 +162 13 negative_sampler.num_negs_per_pos 47.0 +162 13 training.batch_size 0.0 +162 14 model.embedding_dim 0.0 +162 14 loss.margin 4.493838267007879 +162 14 optimizer.lr 0.09255045601054664 +162 14 negative_sampler.num_negs_per_pos 8.0 +162 14 training.batch_size 1.0 +162 15 model.embedding_dim 2.0 +162 15 loss.margin 7.1128219028879665 +162 15 optimizer.lr 0.013201993830821055 +162 15 negative_sampler.num_negs_per_pos 8.0 +162 15 training.batch_size 2.0 +162 16 model.embedding_dim 1.0 +162 16 loss.margin 3.630353930948666 +162 16 optimizer.lr 0.020159834684334167 +162 16 negative_sampler.num_negs_per_pos 96.0 +162 16 training.batch_size 0.0 +162 17 model.embedding_dim 0.0 +162 17 loss.margin 1.5957863079945205 +162 17 optimizer.lr 0.03182631352762735 +162 17 negative_sampler.num_negs_per_pos 96.0 +162 17 training.batch_size 1.0 +162 18 model.embedding_dim 1.0 +162 18 loss.margin 9.93252455951757 +162 18 optimizer.lr 0.022406946013612837 +162 18 negative_sampler.num_negs_per_pos 4.0 +162 18 training.batch_size 0.0 +162 19 model.embedding_dim 0.0 +162 19 loss.margin 3.6980651639087165 +162 19 optimizer.lr 0.0072979229923041435 +162 19 negative_sampler.num_negs_per_pos 79.0 +162 19 training.batch_size 2.0 +162 20 model.embedding_dim 0.0 +162 20 loss.margin 3.3074618178467197 +162 20 optimizer.lr 0.0045924253786149335 +162 20 negative_sampler.num_negs_per_pos 55.0 +162 20 training.batch_size 1.0 +162 21 model.embedding_dim 0.0 +162 21 loss.margin 1.7993392806107265 +162 21 optimizer.lr 0.009189462284341055 +162 21 negative_sampler.num_negs_per_pos 48.0 +162 21 training.batch_size 0.0 +162 22 model.embedding_dim 0.0 +162 22 loss.margin 9.845415385681001 +162 22 optimizer.lr 0.01046309426067675 +162 22 negative_sampler.num_negs_per_pos 64.0 +162 22 training.batch_size 0.0 +162 23 model.embedding_dim 2.0 +162 23 loss.margin 2.9790265054199736 +162 23 optimizer.lr 0.006695758670469291 +162 23 negative_sampler.num_negs_per_pos 46.0 +162 23 training.batch_size 1.0 +162 24 model.embedding_dim 1.0 +162 24 loss.margin 5.488892094519016 +162 24 optimizer.lr 0.001981638907457316 +162 24 negative_sampler.num_negs_per_pos 5.0 +162 24 training.batch_size 1.0 +162 25 model.embedding_dim 1.0 +162 25 loss.margin 1.427081156412602 +162 25 optimizer.lr 0.018821073885768008 +162 25 negative_sampler.num_negs_per_pos 63.0 +162 25 training.batch_size 2.0 +162 26 model.embedding_dim 1.0 +162 26 loss.margin 8.658080922325539 +162 26 optimizer.lr 0.03651825045436513 +162 26 negative_sampler.num_negs_per_pos 76.0 +162 26 training.batch_size 2.0 +162 27 model.embedding_dim 0.0 +162 27 loss.margin 6.346529423584632 +162 27 optimizer.lr 0.003678635933398212 +162 27 negative_sampler.num_negs_per_pos 77.0 +162 27 training.batch_size 2.0 +162 28 model.embedding_dim 2.0 +162 28 loss.margin 4.909957445997762 +162 28 optimizer.lr 0.0234528987847664 +162 28 negative_sampler.num_negs_per_pos 46.0 +162 28 training.batch_size 2.0 +162 29 model.embedding_dim 2.0 +162 29 loss.margin 7.104344744080424 +162 29 optimizer.lr 0.0010686030819296768 +162 29 negative_sampler.num_negs_per_pos 66.0 +162 29 training.batch_size 2.0 +162 1 dataset """fb15k237""" +162 1 model """distmult""" +162 1 loss """marginranking""" +162 1 regularizer """no""" +162 1 optimizer """adam""" +162 1 training_loop """owa""" +162 1 negative_sampler """basic""" +162 1 evaluator """rankbased""" +162 2 dataset """fb15k237""" +162 2 model """distmult""" +162 2 loss """marginranking""" +162 2 regularizer """no""" +162 2 optimizer """adam""" +162 2 training_loop """owa""" +162 2 negative_sampler """basic""" +162 2 evaluator """rankbased""" +162 3 dataset """fb15k237""" +162 3 model """distmult""" +162 3 loss """marginranking""" +162 3 regularizer """no""" +162 3 optimizer """adam""" +162 3 training_loop """owa""" +162 3 negative_sampler """basic""" +162 3 evaluator """rankbased""" +162 4 dataset """fb15k237""" +162 4 model """distmult""" +162 4 loss """marginranking""" +162 4 regularizer """no""" +162 4 optimizer """adam""" +162 4 training_loop """owa""" +162 4 negative_sampler """basic""" +162 4 evaluator """rankbased""" +162 5 dataset """fb15k237""" +162 5 model """distmult""" +162 5 loss """marginranking""" +162 5 regularizer """no""" +162 5 optimizer """adam""" +162 5 training_loop """owa""" +162 5 negative_sampler """basic""" +162 5 evaluator """rankbased""" +162 6 dataset """fb15k237""" +162 6 model """distmult""" +162 6 loss """marginranking""" +162 6 regularizer """no""" +162 6 optimizer """adam""" +162 6 training_loop """owa""" +162 6 negative_sampler """basic""" +162 6 evaluator """rankbased""" +162 7 dataset """fb15k237""" +162 7 model """distmult""" +162 7 loss """marginranking""" +162 7 regularizer """no""" +162 7 optimizer """adam""" +162 7 training_loop """owa""" +162 7 negative_sampler """basic""" +162 7 evaluator """rankbased""" +162 8 dataset """fb15k237""" +162 8 model """distmult""" +162 8 loss """marginranking""" +162 8 regularizer """no""" +162 8 optimizer """adam""" +162 8 training_loop """owa""" +162 8 negative_sampler """basic""" +162 8 evaluator """rankbased""" +162 9 dataset """fb15k237""" +162 9 model """distmult""" +162 9 loss """marginranking""" +162 9 regularizer """no""" +162 9 optimizer """adam""" +162 9 training_loop """owa""" +162 9 negative_sampler """basic""" +162 9 evaluator """rankbased""" +162 10 dataset """fb15k237""" +162 10 model """distmult""" +162 10 loss """marginranking""" +162 10 regularizer """no""" +162 10 optimizer """adam""" +162 10 training_loop """owa""" +162 10 negative_sampler """basic""" +162 10 evaluator """rankbased""" +162 11 dataset """fb15k237""" +162 11 model """distmult""" +162 11 loss """marginranking""" +162 11 regularizer """no""" +162 11 optimizer """adam""" +162 11 training_loop """owa""" +162 11 negative_sampler """basic""" +162 11 evaluator """rankbased""" +162 12 dataset """fb15k237""" +162 12 model """distmult""" +162 12 loss """marginranking""" +162 12 regularizer """no""" +162 12 optimizer """adam""" +162 12 training_loop """owa""" +162 12 negative_sampler """basic""" +162 12 evaluator """rankbased""" +162 13 dataset """fb15k237""" +162 13 model """distmult""" +162 13 loss """marginranking""" +162 13 regularizer """no""" +162 13 optimizer """adam""" +162 13 training_loop """owa""" +162 13 negative_sampler """basic""" +162 13 evaluator """rankbased""" +162 14 dataset """fb15k237""" +162 14 model """distmult""" +162 14 loss """marginranking""" +162 14 regularizer """no""" +162 14 optimizer """adam""" +162 14 training_loop """owa""" +162 14 negative_sampler """basic""" +162 14 evaluator """rankbased""" +162 15 dataset """fb15k237""" +162 15 model """distmult""" +162 15 loss """marginranking""" +162 15 regularizer """no""" +162 15 optimizer """adam""" +162 15 training_loop """owa""" +162 15 negative_sampler """basic""" +162 15 evaluator """rankbased""" +162 16 dataset """fb15k237""" +162 16 model """distmult""" +162 16 loss """marginranking""" +162 16 regularizer """no""" +162 16 optimizer """adam""" +162 16 training_loop """owa""" +162 16 negative_sampler """basic""" +162 16 evaluator """rankbased""" +162 17 dataset """fb15k237""" +162 17 model """distmult""" +162 17 loss """marginranking""" +162 17 regularizer """no""" +162 17 optimizer """adam""" +162 17 training_loop """owa""" +162 17 negative_sampler """basic""" +162 17 evaluator """rankbased""" +162 18 dataset """fb15k237""" +162 18 model """distmult""" +162 18 loss """marginranking""" +162 18 regularizer """no""" +162 18 optimizer """adam""" +162 18 training_loop """owa""" +162 18 negative_sampler """basic""" +162 18 evaluator """rankbased""" +162 19 dataset """fb15k237""" +162 19 model """distmult""" +162 19 loss """marginranking""" +162 19 regularizer """no""" +162 19 optimizer """adam""" +162 19 training_loop """owa""" +162 19 negative_sampler """basic""" +162 19 evaluator """rankbased""" +162 20 dataset """fb15k237""" +162 20 model """distmult""" +162 20 loss """marginranking""" +162 20 regularizer """no""" +162 20 optimizer """adam""" +162 20 training_loop """owa""" +162 20 negative_sampler """basic""" +162 20 evaluator """rankbased""" +162 21 dataset """fb15k237""" +162 21 model """distmult""" +162 21 loss """marginranking""" +162 21 regularizer """no""" +162 21 optimizer """adam""" +162 21 training_loop """owa""" +162 21 negative_sampler """basic""" +162 21 evaluator """rankbased""" +162 22 dataset """fb15k237""" +162 22 model """distmult""" +162 22 loss """marginranking""" +162 22 regularizer """no""" +162 22 optimizer """adam""" +162 22 training_loop """owa""" +162 22 negative_sampler """basic""" +162 22 evaluator """rankbased""" +162 23 dataset """fb15k237""" +162 23 model """distmult""" +162 23 loss """marginranking""" +162 23 regularizer """no""" +162 23 optimizer """adam""" +162 23 training_loop """owa""" +162 23 negative_sampler """basic""" +162 23 evaluator """rankbased""" +162 24 dataset """fb15k237""" +162 24 model """distmult""" +162 24 loss """marginranking""" +162 24 regularizer """no""" +162 24 optimizer """adam""" +162 24 training_loop """owa""" +162 24 negative_sampler """basic""" +162 24 evaluator """rankbased""" +162 25 dataset """fb15k237""" +162 25 model """distmult""" +162 25 loss """marginranking""" +162 25 regularizer """no""" +162 25 optimizer """adam""" +162 25 training_loop """owa""" +162 25 negative_sampler """basic""" +162 25 evaluator """rankbased""" +162 26 dataset """fb15k237""" +162 26 model """distmult""" +162 26 loss """marginranking""" +162 26 regularizer """no""" +162 26 optimizer """adam""" +162 26 training_loop """owa""" +162 26 negative_sampler """basic""" +162 26 evaluator """rankbased""" +162 27 dataset """fb15k237""" +162 27 model """distmult""" +162 27 loss """marginranking""" +162 27 regularizer """no""" +162 27 optimizer """adam""" +162 27 training_loop """owa""" +162 27 negative_sampler """basic""" +162 27 evaluator """rankbased""" +162 28 dataset """fb15k237""" +162 28 model """distmult""" +162 28 loss """marginranking""" +162 28 regularizer """no""" +162 28 optimizer """adam""" +162 28 training_loop """owa""" +162 28 negative_sampler """basic""" +162 28 evaluator """rankbased""" +162 29 dataset """fb15k237""" +162 29 model """distmult""" +162 29 loss """marginranking""" +162 29 regularizer """no""" +162 29 optimizer """adam""" +162 29 training_loop """owa""" +162 29 negative_sampler """basic""" +162 29 evaluator """rankbased""" +163 1 model.embedding_dim 0.0 +163 1 loss.margin 6.591667791997211 +163 1 optimizer.lr 0.02100095743341296 +163 1 negative_sampler.num_negs_per_pos 51.0 +163 1 training.batch_size 0.0 +163 2 model.embedding_dim 2.0 +163 2 loss.margin 0.991361282133425 +163 2 optimizer.lr 0.04043223233525612 +163 2 negative_sampler.num_negs_per_pos 69.0 +163 2 training.batch_size 0.0 +163 3 model.embedding_dim 0.0 +163 3 loss.margin 1.194387673847361 +163 3 optimizer.lr 0.0035673733619937635 +163 3 negative_sampler.num_negs_per_pos 39.0 +163 3 training.batch_size 0.0 +163 4 model.embedding_dim 1.0 +163 4 loss.margin 7.1178530249891425 +163 4 optimizer.lr 0.003296544423616242 +163 4 negative_sampler.num_negs_per_pos 34.0 +163 4 training.batch_size 1.0 +163 5 model.embedding_dim 0.0 +163 5 loss.margin 5.06954552493258 +163 5 optimizer.lr 0.04725213970414682 +163 5 negative_sampler.num_negs_per_pos 43.0 +163 5 training.batch_size 2.0 +163 6 model.embedding_dim 2.0 +163 6 loss.margin 1.1434333559228318 +163 6 optimizer.lr 0.00733247546393515 +163 6 negative_sampler.num_negs_per_pos 72.0 +163 6 training.batch_size 0.0 +163 7 model.embedding_dim 2.0 +163 7 loss.margin 7.93076200461611 +163 7 optimizer.lr 0.0013276970343232366 +163 7 negative_sampler.num_negs_per_pos 84.0 +163 7 training.batch_size 1.0 +163 8 model.embedding_dim 0.0 +163 8 loss.margin 6.336500742672094 +163 8 optimizer.lr 0.0012863420195453402 +163 8 negative_sampler.num_negs_per_pos 86.0 +163 8 training.batch_size 0.0 +163 9 model.embedding_dim 0.0 +163 9 loss.margin 8.395350297756325 +163 9 optimizer.lr 0.009494370812264978 +163 9 negative_sampler.num_negs_per_pos 93.0 +163 9 training.batch_size 1.0 +163 10 model.embedding_dim 0.0 +163 10 loss.margin 4.8592209696781925 +163 10 optimizer.lr 0.021369619452105207 +163 10 negative_sampler.num_negs_per_pos 18.0 +163 10 training.batch_size 1.0 +163 11 model.embedding_dim 0.0 +163 11 loss.margin 5.4365287272873255 +163 11 optimizer.lr 0.0011087715522545996 +163 11 negative_sampler.num_negs_per_pos 98.0 +163 11 training.batch_size 2.0 +163 12 model.embedding_dim 0.0 +163 12 loss.margin 2.68318053385085 +163 12 optimizer.lr 0.0722705699764009 +163 12 negative_sampler.num_negs_per_pos 78.0 +163 12 training.batch_size 1.0 +163 13 model.embedding_dim 2.0 +163 13 loss.margin 6.6588991661469334 +163 13 optimizer.lr 0.0032626399743661805 +163 13 negative_sampler.num_negs_per_pos 71.0 +163 13 training.batch_size 0.0 +163 14 model.embedding_dim 0.0 +163 14 loss.margin 2.4425696742499605 +163 14 optimizer.lr 0.0025731317955150204 +163 14 negative_sampler.num_negs_per_pos 14.0 +163 14 training.batch_size 0.0 +163 15 model.embedding_dim 1.0 +163 15 loss.margin 0.544958164394336 +163 15 optimizer.lr 0.006147754115442014 +163 15 negative_sampler.num_negs_per_pos 19.0 +163 15 training.batch_size 0.0 +163 16 model.embedding_dim 2.0 +163 16 loss.margin 8.31229962570029 +163 16 optimizer.lr 0.0026643664374866455 +163 16 negative_sampler.num_negs_per_pos 95.0 +163 16 training.batch_size 2.0 +163 17 model.embedding_dim 1.0 +163 17 loss.margin 6.503646734196494 +163 17 optimizer.lr 0.011987403299750874 +163 17 negative_sampler.num_negs_per_pos 38.0 +163 17 training.batch_size 2.0 +163 18 model.embedding_dim 0.0 +163 18 loss.margin 1.0209464815291693 +163 18 optimizer.lr 0.04516385062637997 +163 18 negative_sampler.num_negs_per_pos 74.0 +163 18 training.batch_size 0.0 +163 19 model.embedding_dim 1.0 +163 19 loss.margin 3.8092572304281562 +163 19 optimizer.lr 0.04289544773772927 +163 19 negative_sampler.num_negs_per_pos 13.0 +163 19 training.batch_size 2.0 +163 20 model.embedding_dim 0.0 +163 20 loss.margin 3.445234989826761 +163 20 optimizer.lr 0.02374353068102855 +163 20 negative_sampler.num_negs_per_pos 29.0 +163 20 training.batch_size 0.0 +163 21 model.embedding_dim 2.0 +163 21 loss.margin 5.165263439235338 +163 21 optimizer.lr 0.04058459991696223 +163 21 negative_sampler.num_negs_per_pos 85.0 +163 21 training.batch_size 2.0 +163 22 model.embedding_dim 2.0 +163 22 loss.margin 9.616556060948945 +163 22 optimizer.lr 0.04549029009930484 +163 22 negative_sampler.num_negs_per_pos 70.0 +163 22 training.batch_size 2.0 +163 23 model.embedding_dim 1.0 +163 23 loss.margin 4.171278581965675 +163 23 optimizer.lr 0.05722200313090686 +163 23 negative_sampler.num_negs_per_pos 15.0 +163 23 training.batch_size 1.0 +163 24 model.embedding_dim 0.0 +163 24 loss.margin 7.34092871290114 +163 24 optimizer.lr 0.03078975962046194 +163 24 negative_sampler.num_negs_per_pos 44.0 +163 24 training.batch_size 0.0 +163 25 model.embedding_dim 1.0 +163 25 loss.margin 2.7155687604476557 +163 25 optimizer.lr 0.005285687995834069 +163 25 negative_sampler.num_negs_per_pos 89.0 +163 25 training.batch_size 0.0 +163 26 model.embedding_dim 1.0 +163 26 loss.margin 4.83835309261526 +163 26 optimizer.lr 0.0031354029981751426 +163 26 negative_sampler.num_negs_per_pos 30.0 +163 26 training.batch_size 0.0 +163 27 model.embedding_dim 2.0 +163 27 loss.margin 4.096439937599544 +163 27 optimizer.lr 0.0019061119759159785 +163 27 negative_sampler.num_negs_per_pos 7.0 +163 27 training.batch_size 1.0 +163 28 model.embedding_dim 1.0 +163 28 loss.margin 7.4850301263687165 +163 28 optimizer.lr 0.008250796127823128 +163 28 negative_sampler.num_negs_per_pos 6.0 +163 28 training.batch_size 0.0 +163 29 model.embedding_dim 1.0 +163 29 loss.margin 8.937029315619288 +163 29 optimizer.lr 0.05179605234540108 +163 29 negative_sampler.num_negs_per_pos 31.0 +163 29 training.batch_size 1.0 +163 30 model.embedding_dim 1.0 +163 30 loss.margin 9.858201191877075 +163 30 optimizer.lr 0.004388757018886682 +163 30 negative_sampler.num_negs_per_pos 78.0 +163 30 training.batch_size 0.0 +163 31 model.embedding_dim 0.0 +163 31 loss.margin 3.2242327478186934 +163 31 optimizer.lr 0.013240729514086573 +163 31 negative_sampler.num_negs_per_pos 99.0 +163 31 training.batch_size 1.0 +163 32 model.embedding_dim 0.0 +163 32 loss.margin 8.88282188781583 +163 32 optimizer.lr 0.010228235114495287 +163 32 negative_sampler.num_negs_per_pos 96.0 +163 32 training.batch_size 2.0 +163 33 model.embedding_dim 0.0 +163 33 loss.margin 5.551346351083138 +163 33 optimizer.lr 0.00287167709475728 +163 33 negative_sampler.num_negs_per_pos 83.0 +163 33 training.batch_size 1.0 +163 34 model.embedding_dim 2.0 +163 34 loss.margin 7.925027147650721 +163 34 optimizer.lr 0.008831536790720167 +163 34 negative_sampler.num_negs_per_pos 7.0 +163 34 training.batch_size 0.0 +163 35 model.embedding_dim 1.0 +163 35 loss.margin 7.904799383026774 +163 35 optimizer.lr 0.019036967552004434 +163 35 negative_sampler.num_negs_per_pos 71.0 +163 35 training.batch_size 0.0 +163 36 model.embedding_dim 1.0 +163 36 loss.margin 5.117120609941127 +163 36 optimizer.lr 0.0022518958272236575 +163 36 negative_sampler.num_negs_per_pos 66.0 +163 36 training.batch_size 1.0 +163 37 model.embedding_dim 0.0 +163 37 loss.margin 4.560475581417901 +163 37 optimizer.lr 0.008943892014851046 +163 37 negative_sampler.num_negs_per_pos 20.0 +163 37 training.batch_size 2.0 +163 38 model.embedding_dim 0.0 +163 38 loss.margin 9.018863542976371 +163 38 optimizer.lr 0.03537768415331274 +163 38 negative_sampler.num_negs_per_pos 24.0 +163 38 training.batch_size 2.0 +163 39 model.embedding_dim 1.0 +163 39 loss.margin 6.370921088997107 +163 39 optimizer.lr 0.01793413449742773 +163 39 negative_sampler.num_negs_per_pos 43.0 +163 39 training.batch_size 0.0 +163 40 model.embedding_dim 1.0 +163 40 loss.margin 1.0117173435705586 +163 40 optimizer.lr 0.0031627410013209327 +163 40 negative_sampler.num_negs_per_pos 22.0 +163 40 training.batch_size 1.0 +163 41 model.embedding_dim 0.0 +163 41 loss.margin 0.7351843604633179 +163 41 optimizer.lr 0.0053756751145281395 +163 41 negative_sampler.num_negs_per_pos 57.0 +163 41 training.batch_size 2.0 +163 42 model.embedding_dim 0.0 +163 42 loss.margin 7.022612629313322 +163 42 optimizer.lr 0.0019170436315363868 +163 42 negative_sampler.num_negs_per_pos 71.0 +163 42 training.batch_size 0.0 +163 43 model.embedding_dim 0.0 +163 43 loss.margin 2.1001525683657984 +163 43 optimizer.lr 0.05768650319285454 +163 43 negative_sampler.num_negs_per_pos 43.0 +163 43 training.batch_size 2.0 +163 44 model.embedding_dim 1.0 +163 44 loss.margin 6.963688489290127 +163 44 optimizer.lr 0.0010949658895832513 +163 44 negative_sampler.num_negs_per_pos 87.0 +163 44 training.batch_size 1.0 +163 1 dataset """fb15k237""" +163 1 model """distmult""" +163 1 loss """marginranking""" +163 1 regularizer """no""" +163 1 optimizer """adam""" +163 1 training_loop """owa""" +163 1 negative_sampler """basic""" +163 1 evaluator """rankbased""" +163 2 dataset """fb15k237""" +163 2 model """distmult""" +163 2 loss """marginranking""" +163 2 regularizer """no""" +163 2 optimizer """adam""" +163 2 training_loop """owa""" +163 2 negative_sampler """basic""" +163 2 evaluator """rankbased""" +163 3 dataset """fb15k237""" +163 3 model """distmult""" +163 3 loss """marginranking""" +163 3 regularizer """no""" +163 3 optimizer """adam""" +163 3 training_loop """owa""" +163 3 negative_sampler """basic""" +163 3 evaluator """rankbased""" +163 4 dataset """fb15k237""" +163 4 model """distmult""" +163 4 loss """marginranking""" +163 4 regularizer """no""" +163 4 optimizer """adam""" +163 4 training_loop """owa""" +163 4 negative_sampler """basic""" +163 4 evaluator """rankbased""" +163 5 dataset """fb15k237""" +163 5 model """distmult""" +163 5 loss """marginranking""" +163 5 regularizer """no""" +163 5 optimizer """adam""" +163 5 training_loop """owa""" +163 5 negative_sampler """basic""" +163 5 evaluator """rankbased""" +163 6 dataset """fb15k237""" +163 6 model """distmult""" +163 6 loss """marginranking""" +163 6 regularizer """no""" +163 6 optimizer """adam""" +163 6 training_loop """owa""" +163 6 negative_sampler """basic""" +163 6 evaluator """rankbased""" +163 7 dataset """fb15k237""" +163 7 model """distmult""" +163 7 loss """marginranking""" +163 7 regularizer """no""" +163 7 optimizer """adam""" +163 7 training_loop """owa""" +163 7 negative_sampler """basic""" +163 7 evaluator """rankbased""" +163 8 dataset """fb15k237""" +163 8 model """distmult""" +163 8 loss """marginranking""" +163 8 regularizer """no""" +163 8 optimizer """adam""" +163 8 training_loop """owa""" +163 8 negative_sampler """basic""" +163 8 evaluator """rankbased""" +163 9 dataset """fb15k237""" +163 9 model """distmult""" +163 9 loss """marginranking""" +163 9 regularizer """no""" +163 9 optimizer """adam""" +163 9 training_loop """owa""" +163 9 negative_sampler """basic""" +163 9 evaluator """rankbased""" +163 10 dataset """fb15k237""" +163 10 model """distmult""" +163 10 loss """marginranking""" +163 10 regularizer """no""" +163 10 optimizer """adam""" +163 10 training_loop """owa""" +163 10 negative_sampler """basic""" +163 10 evaluator """rankbased""" +163 11 dataset """fb15k237""" +163 11 model """distmult""" +163 11 loss """marginranking""" +163 11 regularizer """no""" +163 11 optimizer """adam""" +163 11 training_loop """owa""" +163 11 negative_sampler """basic""" +163 11 evaluator """rankbased""" +163 12 dataset """fb15k237""" +163 12 model """distmult""" +163 12 loss """marginranking""" +163 12 regularizer """no""" +163 12 optimizer """adam""" +163 12 training_loop """owa""" +163 12 negative_sampler """basic""" +163 12 evaluator """rankbased""" +163 13 dataset """fb15k237""" +163 13 model """distmult""" +163 13 loss """marginranking""" +163 13 regularizer """no""" +163 13 optimizer """adam""" +163 13 training_loop """owa""" +163 13 negative_sampler """basic""" +163 13 evaluator """rankbased""" +163 14 dataset """fb15k237""" +163 14 model """distmult""" +163 14 loss """marginranking""" +163 14 regularizer """no""" +163 14 optimizer """adam""" +163 14 training_loop """owa""" +163 14 negative_sampler """basic""" +163 14 evaluator """rankbased""" +163 15 dataset """fb15k237""" +163 15 model """distmult""" +163 15 loss """marginranking""" +163 15 regularizer """no""" +163 15 optimizer """adam""" +163 15 training_loop """owa""" +163 15 negative_sampler """basic""" +163 15 evaluator """rankbased""" +163 16 dataset """fb15k237""" +163 16 model """distmult""" +163 16 loss """marginranking""" +163 16 regularizer """no""" +163 16 optimizer """adam""" +163 16 training_loop """owa""" +163 16 negative_sampler """basic""" +163 16 evaluator """rankbased""" +163 17 dataset """fb15k237""" +163 17 model """distmult""" +163 17 loss """marginranking""" +163 17 regularizer """no""" +163 17 optimizer """adam""" +163 17 training_loop """owa""" +163 17 negative_sampler """basic""" +163 17 evaluator """rankbased""" +163 18 dataset """fb15k237""" +163 18 model """distmult""" +163 18 loss """marginranking""" +163 18 regularizer """no""" +163 18 optimizer """adam""" +163 18 training_loop """owa""" +163 18 negative_sampler """basic""" +163 18 evaluator """rankbased""" +163 19 dataset """fb15k237""" +163 19 model """distmult""" +163 19 loss """marginranking""" +163 19 regularizer """no""" +163 19 optimizer """adam""" +163 19 training_loop """owa""" +163 19 negative_sampler """basic""" +163 19 evaluator """rankbased""" +163 20 dataset """fb15k237""" +163 20 model """distmult""" +163 20 loss """marginranking""" +163 20 regularizer """no""" +163 20 optimizer """adam""" +163 20 training_loop """owa""" +163 20 negative_sampler """basic""" +163 20 evaluator """rankbased""" +163 21 dataset """fb15k237""" +163 21 model """distmult""" +163 21 loss """marginranking""" +163 21 regularizer """no""" +163 21 optimizer """adam""" +163 21 training_loop """owa""" +163 21 negative_sampler """basic""" +163 21 evaluator """rankbased""" +163 22 dataset """fb15k237""" +163 22 model """distmult""" +163 22 loss """marginranking""" +163 22 regularizer """no""" +163 22 optimizer """adam""" +163 22 training_loop """owa""" +163 22 negative_sampler """basic""" +163 22 evaluator """rankbased""" +163 23 dataset """fb15k237""" +163 23 model """distmult""" +163 23 loss """marginranking""" +163 23 regularizer """no""" +163 23 optimizer """adam""" +163 23 training_loop """owa""" +163 23 negative_sampler """basic""" +163 23 evaluator """rankbased""" +163 24 dataset """fb15k237""" +163 24 model """distmult""" +163 24 loss """marginranking""" +163 24 regularizer """no""" +163 24 optimizer """adam""" +163 24 training_loop """owa""" +163 24 negative_sampler """basic""" +163 24 evaluator """rankbased""" +163 25 dataset """fb15k237""" +163 25 model """distmult""" +163 25 loss """marginranking""" +163 25 regularizer """no""" +163 25 optimizer """adam""" +163 25 training_loop """owa""" +163 25 negative_sampler """basic""" +163 25 evaluator """rankbased""" +163 26 dataset """fb15k237""" +163 26 model """distmult""" +163 26 loss """marginranking""" +163 26 regularizer """no""" +163 26 optimizer """adam""" +163 26 training_loop """owa""" +163 26 negative_sampler """basic""" +163 26 evaluator """rankbased""" +163 27 dataset """fb15k237""" +163 27 model """distmult""" +163 27 loss """marginranking""" +163 27 regularizer """no""" +163 27 optimizer """adam""" +163 27 training_loop """owa""" +163 27 negative_sampler """basic""" +163 27 evaluator """rankbased""" +163 28 dataset """fb15k237""" +163 28 model """distmult""" +163 28 loss """marginranking""" +163 28 regularizer """no""" +163 28 optimizer """adam""" +163 28 training_loop """owa""" +163 28 negative_sampler """basic""" +163 28 evaluator """rankbased""" +163 29 dataset """fb15k237""" +163 29 model """distmult""" +163 29 loss """marginranking""" +163 29 regularizer """no""" +163 29 optimizer """adam""" +163 29 training_loop """owa""" +163 29 negative_sampler """basic""" +163 29 evaluator """rankbased""" +163 30 dataset """fb15k237""" +163 30 model """distmult""" +163 30 loss """marginranking""" +163 30 regularizer """no""" +163 30 optimizer """adam""" +163 30 training_loop """owa""" +163 30 negative_sampler """basic""" +163 30 evaluator """rankbased""" +163 31 dataset """fb15k237""" +163 31 model """distmult""" +163 31 loss """marginranking""" +163 31 regularizer """no""" +163 31 optimizer """adam""" +163 31 training_loop """owa""" +163 31 negative_sampler """basic""" +163 31 evaluator """rankbased""" +163 32 dataset """fb15k237""" +163 32 model """distmult""" +163 32 loss """marginranking""" +163 32 regularizer """no""" +163 32 optimizer """adam""" +163 32 training_loop """owa""" +163 32 negative_sampler """basic""" +163 32 evaluator """rankbased""" +163 33 dataset """fb15k237""" +163 33 model """distmult""" +163 33 loss """marginranking""" +163 33 regularizer """no""" +163 33 optimizer """adam""" +163 33 training_loop """owa""" +163 33 negative_sampler """basic""" +163 33 evaluator """rankbased""" +163 34 dataset """fb15k237""" +163 34 model """distmult""" +163 34 loss """marginranking""" +163 34 regularizer """no""" +163 34 optimizer """adam""" +163 34 training_loop """owa""" +163 34 negative_sampler """basic""" +163 34 evaluator """rankbased""" +163 35 dataset """fb15k237""" +163 35 model """distmult""" +163 35 loss """marginranking""" +163 35 regularizer """no""" +163 35 optimizer """adam""" +163 35 training_loop """owa""" +163 35 negative_sampler """basic""" +163 35 evaluator """rankbased""" +163 36 dataset """fb15k237""" +163 36 model """distmult""" +163 36 loss """marginranking""" +163 36 regularizer """no""" +163 36 optimizer """adam""" +163 36 training_loop """owa""" +163 36 negative_sampler """basic""" +163 36 evaluator """rankbased""" +163 37 dataset """fb15k237""" +163 37 model """distmult""" +163 37 loss """marginranking""" +163 37 regularizer """no""" +163 37 optimizer """adam""" +163 37 training_loop """owa""" +163 37 negative_sampler """basic""" +163 37 evaluator """rankbased""" +163 38 dataset """fb15k237""" +163 38 model """distmult""" +163 38 loss """marginranking""" +163 38 regularizer """no""" +163 38 optimizer """adam""" +163 38 training_loop """owa""" +163 38 negative_sampler """basic""" +163 38 evaluator """rankbased""" +163 39 dataset """fb15k237""" +163 39 model """distmult""" +163 39 loss """marginranking""" +163 39 regularizer """no""" +163 39 optimizer """adam""" +163 39 training_loop """owa""" +163 39 negative_sampler """basic""" +163 39 evaluator """rankbased""" +163 40 dataset """fb15k237""" +163 40 model """distmult""" +163 40 loss """marginranking""" +163 40 regularizer """no""" +163 40 optimizer """adam""" +163 40 training_loop """owa""" +163 40 negative_sampler """basic""" +163 40 evaluator """rankbased""" +163 41 dataset """fb15k237""" +163 41 model """distmult""" +163 41 loss """marginranking""" +163 41 regularizer """no""" +163 41 optimizer """adam""" +163 41 training_loop """owa""" +163 41 negative_sampler """basic""" +163 41 evaluator """rankbased""" +163 42 dataset """fb15k237""" +163 42 model """distmult""" +163 42 loss """marginranking""" +163 42 regularizer """no""" +163 42 optimizer """adam""" +163 42 training_loop """owa""" +163 42 negative_sampler """basic""" +163 42 evaluator """rankbased""" +163 43 dataset """fb15k237""" +163 43 model """distmult""" +163 43 loss """marginranking""" +163 43 regularizer """no""" +163 43 optimizer """adam""" +163 43 training_loop """owa""" +163 43 negative_sampler """basic""" +163 43 evaluator """rankbased""" +163 44 dataset """fb15k237""" +163 44 model """distmult""" +163 44 loss """marginranking""" +163 44 regularizer """no""" +163 44 optimizer """adam""" +163 44 training_loop """owa""" +163 44 negative_sampler """basic""" +163 44 evaluator """rankbased""" +164 1 model.embedding_dim 1.0 +164 1 loss.margin 15.734198829390115 +164 1 loss.adversarial_temperature 0.6101808092884242 +164 1 optimizer.lr 0.003721755252561407 +164 1 negative_sampler.num_negs_per_pos 31.0 +164 1 training.batch_size 2.0 +164 2 model.embedding_dim 2.0 +164 2 loss.margin 12.658035981554042 +164 2 loss.adversarial_temperature 0.6510760105247411 +164 2 optimizer.lr 0.012184883593167654 +164 2 negative_sampler.num_negs_per_pos 77.0 +164 2 training.batch_size 2.0 +164 3 model.embedding_dim 2.0 +164 3 loss.margin 7.942667592251867 +164 3 loss.adversarial_temperature 0.5549870086598898 +164 3 optimizer.lr 0.013805120915343663 +164 3 negative_sampler.num_negs_per_pos 80.0 +164 3 training.batch_size 0.0 +164 4 model.embedding_dim 2.0 +164 4 loss.margin 5.9718435523116655 +164 4 loss.adversarial_temperature 0.4044290815229851 +164 4 optimizer.lr 0.07961798548702621 +164 4 negative_sampler.num_negs_per_pos 58.0 +164 4 training.batch_size 0.0 +164 5 model.embedding_dim 0.0 +164 5 loss.margin 26.94035601142109 +164 5 loss.adversarial_temperature 0.6722841787494561 +164 5 optimizer.lr 0.001969478375155383 +164 5 negative_sampler.num_negs_per_pos 33.0 +164 5 training.batch_size 0.0 +164 6 model.embedding_dim 1.0 +164 6 loss.margin 13.134179831779464 +164 6 loss.adversarial_temperature 0.8817776364482427 +164 6 optimizer.lr 0.001954097964980237 +164 6 negative_sampler.num_negs_per_pos 28.0 +164 6 training.batch_size 0.0 +164 7 model.embedding_dim 0.0 +164 7 loss.margin 9.861281508511441 +164 7 loss.adversarial_temperature 0.3715721732081835 +164 7 optimizer.lr 0.001781789930163737 +164 7 negative_sampler.num_negs_per_pos 27.0 +164 7 training.batch_size 1.0 +164 8 model.embedding_dim 1.0 +164 8 loss.margin 21.341241682122206 +164 8 loss.adversarial_temperature 0.19890928216587017 +164 8 optimizer.lr 0.020275993531884483 +164 8 negative_sampler.num_negs_per_pos 95.0 +164 8 training.batch_size 2.0 +164 9 model.embedding_dim 1.0 +164 9 loss.margin 3.7356774013895606 +164 9 loss.adversarial_temperature 0.8504729209482019 +164 9 optimizer.lr 0.001131018019142957 +164 9 negative_sampler.num_negs_per_pos 89.0 +164 9 training.batch_size 1.0 +164 10 model.embedding_dim 0.0 +164 10 loss.margin 17.677597239291767 +164 10 loss.adversarial_temperature 0.9357630949245063 +164 10 optimizer.lr 0.08567126309860747 +164 10 negative_sampler.num_negs_per_pos 15.0 +164 10 training.batch_size 0.0 +164 11 model.embedding_dim 0.0 +164 11 loss.margin 11.963800296906298 +164 11 loss.adversarial_temperature 0.6819346453960993 +164 11 optimizer.lr 0.006879078037066252 +164 11 negative_sampler.num_negs_per_pos 94.0 +164 11 training.batch_size 1.0 +164 1 dataset """fb15k237""" +164 1 model """distmult""" +164 1 loss """nssa""" +164 1 regularizer """no""" +164 1 optimizer """adam""" +164 1 training_loop """owa""" +164 1 negative_sampler """basic""" +164 1 evaluator """rankbased""" +164 2 dataset """fb15k237""" +164 2 model """distmult""" +164 2 loss """nssa""" +164 2 regularizer """no""" +164 2 optimizer """adam""" +164 2 training_loop """owa""" +164 2 negative_sampler """basic""" +164 2 evaluator """rankbased""" +164 3 dataset """fb15k237""" +164 3 model """distmult""" +164 3 loss """nssa""" +164 3 regularizer """no""" +164 3 optimizer """adam""" +164 3 training_loop """owa""" +164 3 negative_sampler """basic""" +164 3 evaluator """rankbased""" +164 4 dataset """fb15k237""" +164 4 model """distmult""" +164 4 loss """nssa""" +164 4 regularizer """no""" +164 4 optimizer """adam""" +164 4 training_loop """owa""" +164 4 negative_sampler """basic""" +164 4 evaluator """rankbased""" +164 5 dataset """fb15k237""" +164 5 model """distmult""" +164 5 loss """nssa""" +164 5 regularizer """no""" +164 5 optimizer """adam""" +164 5 training_loop """owa""" +164 5 negative_sampler """basic""" +164 5 evaluator """rankbased""" +164 6 dataset """fb15k237""" +164 6 model """distmult""" +164 6 loss """nssa""" +164 6 regularizer """no""" +164 6 optimizer """adam""" +164 6 training_loop """owa""" +164 6 negative_sampler """basic""" +164 6 evaluator """rankbased""" +164 7 dataset """fb15k237""" +164 7 model """distmult""" +164 7 loss """nssa""" +164 7 regularizer """no""" +164 7 optimizer """adam""" +164 7 training_loop """owa""" +164 7 negative_sampler """basic""" +164 7 evaluator """rankbased""" +164 8 dataset """fb15k237""" +164 8 model """distmult""" +164 8 loss """nssa""" +164 8 regularizer """no""" +164 8 optimizer """adam""" +164 8 training_loop """owa""" +164 8 negative_sampler """basic""" +164 8 evaluator """rankbased""" +164 9 dataset """fb15k237""" +164 9 model """distmult""" +164 9 loss """nssa""" +164 9 regularizer """no""" +164 9 optimizer """adam""" +164 9 training_loop """owa""" +164 9 negative_sampler """basic""" +164 9 evaluator """rankbased""" +164 10 dataset """fb15k237""" +164 10 model """distmult""" +164 10 loss """nssa""" +164 10 regularizer """no""" +164 10 optimizer """adam""" +164 10 training_loop """owa""" +164 10 negative_sampler """basic""" +164 10 evaluator """rankbased""" +164 11 dataset """fb15k237""" +164 11 model """distmult""" +164 11 loss """nssa""" +164 11 regularizer """no""" +164 11 optimizer """adam""" +164 11 training_loop """owa""" +164 11 negative_sampler """basic""" +164 11 evaluator """rankbased""" +165 1 model.embedding_dim 0.0 +165 1 loss.margin 4.31915449037566 +165 1 loss.adversarial_temperature 0.1588337921189162 +165 1 optimizer.lr 0.001049155288073695 +165 1 negative_sampler.num_negs_per_pos 92.0 +165 1 training.batch_size 0.0 +165 2 model.embedding_dim 2.0 +165 2 loss.margin 13.731811274558964 +165 2 loss.adversarial_temperature 0.12720705455578488 +165 2 optimizer.lr 0.001541455766151704 +165 2 negative_sampler.num_negs_per_pos 84.0 +165 2 training.batch_size 0.0 +165 3 model.embedding_dim 1.0 +165 3 loss.margin 10.990953733794997 +165 3 loss.adversarial_temperature 0.7459902972149661 +165 3 optimizer.lr 0.0013427576091971966 +165 3 negative_sampler.num_negs_per_pos 90.0 +165 3 training.batch_size 1.0 +165 4 model.embedding_dim 1.0 +165 4 loss.margin 15.258087505125928 +165 4 loss.adversarial_temperature 0.5928493188451857 +165 4 optimizer.lr 0.014445573683282247 +165 4 negative_sampler.num_negs_per_pos 50.0 +165 4 training.batch_size 1.0 +165 5 model.embedding_dim 2.0 +165 5 loss.margin 4.620818128072372 +165 5 loss.adversarial_temperature 0.31972860840023093 +165 5 optimizer.lr 0.004710789865283489 +165 5 negative_sampler.num_negs_per_pos 0.0 +165 5 training.batch_size 0.0 +165 6 model.embedding_dim 2.0 +165 6 loss.margin 28.811176898639534 +165 6 loss.adversarial_temperature 0.3214417731150091 +165 6 optimizer.lr 0.003589144763346008 +165 6 negative_sampler.num_negs_per_pos 95.0 +165 6 training.batch_size 1.0 +165 7 model.embedding_dim 0.0 +165 7 loss.margin 25.055720702288028 +165 7 loss.adversarial_temperature 0.6274707232146842 +165 7 optimizer.lr 0.007218560187209265 +165 7 negative_sampler.num_negs_per_pos 57.0 +165 7 training.batch_size 1.0 +165 8 model.embedding_dim 0.0 +165 8 loss.margin 1.1949031266065187 +165 8 loss.adversarial_temperature 0.4508632557065998 +165 8 optimizer.lr 0.0015124871119810664 +165 8 negative_sampler.num_negs_per_pos 60.0 +165 8 training.batch_size 0.0 +165 9 model.embedding_dim 0.0 +165 9 loss.margin 8.305116359105025 +165 9 loss.adversarial_temperature 0.4407132272955472 +165 9 optimizer.lr 0.04629546970848846 +165 9 negative_sampler.num_negs_per_pos 74.0 +165 9 training.batch_size 0.0 +165 10 model.embedding_dim 0.0 +165 10 loss.margin 4.044803624709549 +165 10 loss.adversarial_temperature 0.6047705360413157 +165 10 optimizer.lr 0.04616594894490919 +165 10 negative_sampler.num_negs_per_pos 23.0 +165 10 training.batch_size 1.0 +165 11 model.embedding_dim 1.0 +165 11 loss.margin 4.331491524821164 +165 11 loss.adversarial_temperature 0.8463902548166485 +165 11 optimizer.lr 0.01169982891724346 +165 11 negative_sampler.num_negs_per_pos 74.0 +165 11 training.batch_size 1.0 +165 12 model.embedding_dim 0.0 +165 12 loss.margin 29.849749593126784 +165 12 loss.adversarial_temperature 0.5563958482005466 +165 12 optimizer.lr 0.010627843364736048 +165 12 negative_sampler.num_negs_per_pos 70.0 +165 12 training.batch_size 2.0 +165 13 model.embedding_dim 0.0 +165 13 loss.margin 19.0907616951126 +165 13 loss.adversarial_temperature 0.913049484975128 +165 13 optimizer.lr 0.06484042360385725 +165 13 negative_sampler.num_negs_per_pos 71.0 +165 13 training.batch_size 0.0 +165 14 model.embedding_dim 2.0 +165 14 loss.margin 29.00969180976425 +165 14 loss.adversarial_temperature 0.4446994405595606 +165 14 optimizer.lr 0.08185146050266126 +165 14 negative_sampler.num_negs_per_pos 79.0 +165 14 training.batch_size 0.0 +165 15 model.embedding_dim 0.0 +165 15 loss.margin 5.733848077401176 +165 15 loss.adversarial_temperature 0.16889744466540912 +165 15 optimizer.lr 0.008032140215574388 +165 15 negative_sampler.num_negs_per_pos 54.0 +165 15 training.batch_size 2.0 +165 16 model.embedding_dim 2.0 +165 16 loss.margin 21.4408676217884 +165 16 loss.adversarial_temperature 0.8872787909018747 +165 16 optimizer.lr 0.007335032453771449 +165 16 negative_sampler.num_negs_per_pos 8.0 +165 16 training.batch_size 2.0 +165 17 model.embedding_dim 2.0 +165 17 loss.margin 26.798889626971892 +165 17 loss.adversarial_temperature 0.625368768036364 +165 17 optimizer.lr 0.01244946672136942 +165 17 negative_sampler.num_negs_per_pos 54.0 +165 17 training.batch_size 2.0 +165 18 model.embedding_dim 0.0 +165 18 loss.margin 23.46819186807551 +165 18 loss.adversarial_temperature 0.5662209774768053 +165 18 optimizer.lr 0.020810760148273152 +165 18 negative_sampler.num_negs_per_pos 67.0 +165 18 training.batch_size 2.0 +165 19 model.embedding_dim 2.0 +165 19 loss.margin 28.731268727064805 +165 19 loss.adversarial_temperature 0.9212827295426588 +165 19 optimizer.lr 0.0062669626267636775 +165 19 negative_sampler.num_negs_per_pos 90.0 +165 19 training.batch_size 0.0 +165 20 model.embedding_dim 2.0 +165 20 loss.margin 3.284140482549362 +165 20 loss.adversarial_temperature 0.9852719276126658 +165 20 optimizer.lr 0.015204490073833165 +165 20 negative_sampler.num_negs_per_pos 67.0 +165 20 training.batch_size 0.0 +165 21 model.embedding_dim 0.0 +165 21 loss.margin 24.08153402372489 +165 21 loss.adversarial_temperature 0.10031956342551944 +165 21 optimizer.lr 0.0011355767628328628 +165 21 negative_sampler.num_negs_per_pos 88.0 +165 21 training.batch_size 0.0 +165 22 model.embedding_dim 2.0 +165 22 loss.margin 20.379087295911987 +165 22 loss.adversarial_temperature 0.6652252953948219 +165 22 optimizer.lr 0.019667821933303416 +165 22 negative_sampler.num_negs_per_pos 20.0 +165 22 training.batch_size 2.0 +165 23 model.embedding_dim 2.0 +165 23 loss.margin 19.985088316078635 +165 23 loss.adversarial_temperature 0.978504065153041 +165 23 optimizer.lr 0.0017915589100235736 +165 23 negative_sampler.num_negs_per_pos 86.0 +165 23 training.batch_size 2.0 +165 24 model.embedding_dim 2.0 +165 24 loss.margin 15.281102365823966 +165 24 loss.adversarial_temperature 0.956549487363654 +165 24 optimizer.lr 0.004507466915766671 +165 24 negative_sampler.num_negs_per_pos 11.0 +165 24 training.batch_size 2.0 +165 1 dataset """fb15k237""" +165 1 model """distmult""" +165 1 loss """nssa""" +165 1 regularizer """no""" +165 1 optimizer """adam""" +165 1 training_loop """owa""" +165 1 negative_sampler """basic""" +165 1 evaluator """rankbased""" +165 2 dataset """fb15k237""" +165 2 model """distmult""" +165 2 loss """nssa""" +165 2 regularizer """no""" +165 2 optimizer """adam""" +165 2 training_loop """owa""" +165 2 negative_sampler """basic""" +165 2 evaluator """rankbased""" +165 3 dataset """fb15k237""" +165 3 model """distmult""" +165 3 loss """nssa""" +165 3 regularizer """no""" +165 3 optimizer """adam""" +165 3 training_loop """owa""" +165 3 negative_sampler """basic""" +165 3 evaluator """rankbased""" +165 4 dataset """fb15k237""" +165 4 model """distmult""" +165 4 loss """nssa""" +165 4 regularizer """no""" +165 4 optimizer """adam""" +165 4 training_loop """owa""" +165 4 negative_sampler """basic""" +165 4 evaluator """rankbased""" +165 5 dataset """fb15k237""" +165 5 model """distmult""" +165 5 loss """nssa""" +165 5 regularizer """no""" +165 5 optimizer """adam""" +165 5 training_loop """owa""" +165 5 negative_sampler """basic""" +165 5 evaluator """rankbased""" +165 6 dataset """fb15k237""" +165 6 model """distmult""" +165 6 loss """nssa""" +165 6 regularizer """no""" +165 6 optimizer """adam""" +165 6 training_loop """owa""" +165 6 negative_sampler """basic""" +165 6 evaluator """rankbased""" +165 7 dataset """fb15k237""" +165 7 model """distmult""" +165 7 loss """nssa""" +165 7 regularizer """no""" +165 7 optimizer """adam""" +165 7 training_loop """owa""" +165 7 negative_sampler """basic""" +165 7 evaluator """rankbased""" +165 8 dataset """fb15k237""" +165 8 model """distmult""" +165 8 loss """nssa""" +165 8 regularizer """no""" +165 8 optimizer """adam""" +165 8 training_loop """owa""" +165 8 negative_sampler """basic""" +165 8 evaluator """rankbased""" +165 9 dataset """fb15k237""" +165 9 model """distmult""" +165 9 loss """nssa""" +165 9 regularizer """no""" +165 9 optimizer """adam""" +165 9 training_loop """owa""" +165 9 negative_sampler """basic""" +165 9 evaluator """rankbased""" +165 10 dataset """fb15k237""" +165 10 model """distmult""" +165 10 loss """nssa""" +165 10 regularizer """no""" +165 10 optimizer """adam""" +165 10 training_loop """owa""" +165 10 negative_sampler """basic""" +165 10 evaluator """rankbased""" +165 11 dataset """fb15k237""" +165 11 model """distmult""" +165 11 loss """nssa""" +165 11 regularizer """no""" +165 11 optimizer """adam""" +165 11 training_loop """owa""" +165 11 negative_sampler """basic""" +165 11 evaluator """rankbased""" +165 12 dataset """fb15k237""" +165 12 model """distmult""" +165 12 loss """nssa""" +165 12 regularizer """no""" +165 12 optimizer """adam""" +165 12 training_loop """owa""" +165 12 negative_sampler """basic""" +165 12 evaluator """rankbased""" +165 13 dataset """fb15k237""" +165 13 model """distmult""" +165 13 loss """nssa""" +165 13 regularizer """no""" +165 13 optimizer """adam""" +165 13 training_loop """owa""" +165 13 negative_sampler """basic""" +165 13 evaluator """rankbased""" +165 14 dataset """fb15k237""" +165 14 model """distmult""" +165 14 loss """nssa""" +165 14 regularizer """no""" +165 14 optimizer """adam""" +165 14 training_loop """owa""" +165 14 negative_sampler """basic""" +165 14 evaluator """rankbased""" +165 15 dataset """fb15k237""" +165 15 model """distmult""" +165 15 loss """nssa""" +165 15 regularizer """no""" +165 15 optimizer """adam""" +165 15 training_loop """owa""" +165 15 negative_sampler """basic""" +165 15 evaluator """rankbased""" +165 16 dataset """fb15k237""" +165 16 model """distmult""" +165 16 loss """nssa""" +165 16 regularizer """no""" +165 16 optimizer """adam""" +165 16 training_loop """owa""" +165 16 negative_sampler """basic""" +165 16 evaluator """rankbased""" +165 17 dataset """fb15k237""" +165 17 model """distmult""" +165 17 loss """nssa""" +165 17 regularizer """no""" +165 17 optimizer """adam""" +165 17 training_loop """owa""" +165 17 negative_sampler """basic""" +165 17 evaluator """rankbased""" +165 18 dataset """fb15k237""" +165 18 model """distmult""" +165 18 loss """nssa""" +165 18 regularizer """no""" +165 18 optimizer """adam""" +165 18 training_loop """owa""" +165 18 negative_sampler """basic""" +165 18 evaluator """rankbased""" +165 19 dataset """fb15k237""" +165 19 model """distmult""" +165 19 loss """nssa""" +165 19 regularizer """no""" +165 19 optimizer """adam""" +165 19 training_loop """owa""" +165 19 negative_sampler """basic""" +165 19 evaluator """rankbased""" +165 20 dataset """fb15k237""" +165 20 model """distmult""" +165 20 loss """nssa""" +165 20 regularizer """no""" +165 20 optimizer """adam""" +165 20 training_loop """owa""" +165 20 negative_sampler """basic""" +165 20 evaluator """rankbased""" +165 21 dataset """fb15k237""" +165 21 model """distmult""" +165 21 loss """nssa""" +165 21 regularizer """no""" +165 21 optimizer """adam""" +165 21 training_loop """owa""" +165 21 negative_sampler """basic""" +165 21 evaluator """rankbased""" +165 22 dataset """fb15k237""" +165 22 model """distmult""" +165 22 loss """nssa""" +165 22 regularizer """no""" +165 22 optimizer """adam""" +165 22 training_loop """owa""" +165 22 negative_sampler """basic""" +165 22 evaluator """rankbased""" +165 23 dataset """fb15k237""" +165 23 model """distmult""" +165 23 loss """nssa""" +165 23 regularizer """no""" +165 23 optimizer """adam""" +165 23 training_loop """owa""" +165 23 negative_sampler """basic""" +165 23 evaluator """rankbased""" +165 24 dataset """fb15k237""" +165 24 model """distmult""" +165 24 loss """nssa""" +165 24 regularizer """no""" +165 24 optimizer """adam""" +165 24 training_loop """owa""" +165 24 negative_sampler """basic""" +165 24 evaluator """rankbased""" +166 1 model.embedding_dim 2.0 +166 1 optimizer.lr 0.09372921812230658 +166 1 training.batch_size 2.0 +166 1 training.label_smoothing 0.00894238576204626 +166 2 model.embedding_dim 2.0 +166 2 optimizer.lr 0.05663844475613614 +166 2 training.batch_size 0.0 +166 2 training.label_smoothing 0.6760016273246339 +166 3 model.embedding_dim 1.0 +166 3 optimizer.lr 0.0029069965021745942 +166 3 training.batch_size 1.0 +166 3 training.label_smoothing 0.5580829877515427 +166 4 model.embedding_dim 0.0 +166 4 optimizer.lr 0.0031916096565122 +166 4 training.batch_size 0.0 +166 4 training.label_smoothing 0.04045012987465943 +166 5 model.embedding_dim 1.0 +166 5 optimizer.lr 0.0011064548582154054 +166 5 training.batch_size 2.0 +166 5 training.label_smoothing 0.005150147305280085 +166 6 model.embedding_dim 2.0 +166 6 optimizer.lr 0.04152363664739229 +166 6 training.batch_size 0.0 +166 6 training.label_smoothing 0.001279609615218405 +166 7 model.embedding_dim 1.0 +166 7 optimizer.lr 0.003023891563665858 +166 7 training.batch_size 0.0 +166 7 training.label_smoothing 0.09484143135958994 +166 8 model.embedding_dim 1.0 +166 8 optimizer.lr 0.09921212815211365 +166 8 training.batch_size 1.0 +166 8 training.label_smoothing 0.7074308976385142 +166 9 model.embedding_dim 1.0 +166 9 optimizer.lr 0.021189229735503676 +166 9 training.batch_size 1.0 +166 9 training.label_smoothing 0.07500652957553602 +166 10 model.embedding_dim 0.0 +166 10 optimizer.lr 0.0022431951644686847 +166 10 training.batch_size 0.0 +166 10 training.label_smoothing 0.06261585189354013 +166 11 model.embedding_dim 1.0 +166 11 optimizer.lr 0.06546073955445336 +166 11 training.batch_size 0.0 +166 11 training.label_smoothing 0.30130617180056407 +166 12 model.embedding_dim 2.0 +166 12 optimizer.lr 0.0037868699018735043 +166 12 training.batch_size 2.0 +166 12 training.label_smoothing 0.020490245222611204 +166 1 dataset """fb15k237""" +166 1 model """distmult""" +166 1 loss """bceaftersigmoid""" +166 1 regularizer """no""" +166 1 optimizer """adam""" +166 1 training_loop """lcwa""" +166 1 evaluator """rankbased""" +166 2 dataset """fb15k237""" +166 2 model """distmult""" +166 2 loss """bceaftersigmoid""" +166 2 regularizer """no""" +166 2 optimizer """adam""" +166 2 training_loop """lcwa""" +166 2 evaluator """rankbased""" +166 3 dataset """fb15k237""" +166 3 model """distmult""" +166 3 loss """bceaftersigmoid""" +166 3 regularizer """no""" +166 3 optimizer """adam""" +166 3 training_loop """lcwa""" +166 3 evaluator """rankbased""" +166 4 dataset """fb15k237""" +166 4 model """distmult""" +166 4 loss """bceaftersigmoid""" +166 4 regularizer """no""" +166 4 optimizer """adam""" +166 4 training_loop """lcwa""" +166 4 evaluator """rankbased""" +166 5 dataset """fb15k237""" +166 5 model """distmult""" +166 5 loss """bceaftersigmoid""" +166 5 regularizer """no""" +166 5 optimizer """adam""" +166 5 training_loop """lcwa""" +166 5 evaluator """rankbased""" +166 6 dataset """fb15k237""" +166 6 model """distmult""" +166 6 loss """bceaftersigmoid""" +166 6 regularizer """no""" +166 6 optimizer """adam""" +166 6 training_loop """lcwa""" +166 6 evaluator """rankbased""" +166 7 dataset """fb15k237""" +166 7 model """distmult""" +166 7 loss """bceaftersigmoid""" +166 7 regularizer """no""" +166 7 optimizer """adam""" +166 7 training_loop """lcwa""" +166 7 evaluator """rankbased""" +166 8 dataset """fb15k237""" +166 8 model """distmult""" +166 8 loss """bceaftersigmoid""" +166 8 regularizer """no""" +166 8 optimizer """adam""" +166 8 training_loop """lcwa""" +166 8 evaluator """rankbased""" +166 9 dataset """fb15k237""" +166 9 model """distmult""" +166 9 loss """bceaftersigmoid""" +166 9 regularizer """no""" +166 9 optimizer """adam""" +166 9 training_loop """lcwa""" +166 9 evaluator """rankbased""" +166 10 dataset """fb15k237""" +166 10 model """distmult""" +166 10 loss """bceaftersigmoid""" +166 10 regularizer """no""" +166 10 optimizer """adam""" +166 10 training_loop """lcwa""" +166 10 evaluator """rankbased""" +166 11 dataset """fb15k237""" +166 11 model """distmult""" +166 11 loss """bceaftersigmoid""" +166 11 regularizer """no""" +166 11 optimizer """adam""" +166 11 training_loop """lcwa""" +166 11 evaluator """rankbased""" +166 12 dataset """fb15k237""" +166 12 model """distmult""" +166 12 loss """bceaftersigmoid""" +166 12 regularizer """no""" +166 12 optimizer """adam""" +166 12 training_loop """lcwa""" +166 12 evaluator """rankbased""" +167 1 model.embedding_dim 1.0 +167 1 optimizer.lr 0.019843676498872004 +167 1 training.batch_size 0.0 +167 1 training.label_smoothing 0.0010056677048537182 +167 2 model.embedding_dim 2.0 +167 2 optimizer.lr 0.012337147659652836 +167 2 training.batch_size 1.0 +167 2 training.label_smoothing 0.8929488017623954 +167 3 model.embedding_dim 0.0 +167 3 optimizer.lr 0.009277184382519439 +167 3 training.batch_size 2.0 +167 3 training.label_smoothing 0.0016041259842892281 +167 4 model.embedding_dim 1.0 +167 4 optimizer.lr 0.015598371759896017 +167 4 training.batch_size 1.0 +167 4 training.label_smoothing 0.019736134498264432 +167 5 model.embedding_dim 2.0 +167 5 optimizer.lr 0.01653843164900965 +167 5 training.batch_size 2.0 +167 5 training.label_smoothing 0.020820145737899684 +167 6 model.embedding_dim 1.0 +167 6 optimizer.lr 0.003982070070379064 +167 6 training.batch_size 1.0 +167 6 training.label_smoothing 0.4393177851435236 +167 7 model.embedding_dim 0.0 +167 7 optimizer.lr 0.010565490324561309 +167 7 training.batch_size 2.0 +167 7 training.label_smoothing 0.15636313316062572 +167 8 model.embedding_dim 2.0 +167 8 optimizer.lr 0.01492974297078345 +167 8 training.batch_size 0.0 +167 8 training.label_smoothing 0.029592991588158216 +167 9 model.embedding_dim 0.0 +167 9 optimizer.lr 0.0016213063152220322 +167 9 training.batch_size 0.0 +167 9 training.label_smoothing 0.5009632658934696 +167 10 model.embedding_dim 1.0 +167 10 optimizer.lr 0.026768423149334496 +167 10 training.batch_size 2.0 +167 10 training.label_smoothing 0.005257227723710143 +167 11 model.embedding_dim 1.0 +167 11 optimizer.lr 0.070855611206323 +167 11 training.batch_size 1.0 +167 11 training.label_smoothing 0.5242421244603495 +167 12 model.embedding_dim 1.0 +167 12 optimizer.lr 0.004472449302058343 +167 12 training.batch_size 1.0 +167 12 training.label_smoothing 0.12426124370866234 +167 13 model.embedding_dim 2.0 +167 13 optimizer.lr 0.0015108282152031126 +167 13 training.batch_size 2.0 +167 13 training.label_smoothing 0.004274364036958012 +167 14 model.embedding_dim 0.0 +167 14 optimizer.lr 0.006019880273886061 +167 14 training.batch_size 1.0 +167 14 training.label_smoothing 0.0031410597950166075 +167 15 model.embedding_dim 2.0 +167 15 optimizer.lr 0.02328931378256286 +167 15 training.batch_size 1.0 +167 15 training.label_smoothing 0.00846643778996209 +167 16 model.embedding_dim 0.0 +167 16 optimizer.lr 0.005493244706803322 +167 16 training.batch_size 0.0 +167 16 training.label_smoothing 0.01385658524907398 +167 17 model.embedding_dim 0.0 +167 17 optimizer.lr 0.042033508515895025 +167 17 training.batch_size 1.0 +167 17 training.label_smoothing 0.027730233482714632 +167 18 model.embedding_dim 0.0 +167 18 optimizer.lr 0.006707901401327441 +167 18 training.batch_size 1.0 +167 18 training.label_smoothing 0.0014650396811017913 +167 19 model.embedding_dim 2.0 +167 19 optimizer.lr 0.0013448712315053974 +167 19 training.batch_size 2.0 +167 19 training.label_smoothing 0.17826023859290285 +167 1 dataset """fb15k237""" +167 1 model """distmult""" +167 1 loss """bceaftersigmoid""" +167 1 regularizer """no""" +167 1 optimizer """adam""" +167 1 training_loop """lcwa""" +167 1 evaluator """rankbased""" +167 2 dataset """fb15k237""" +167 2 model """distmult""" +167 2 loss """bceaftersigmoid""" +167 2 regularizer """no""" +167 2 optimizer """adam""" +167 2 training_loop """lcwa""" +167 2 evaluator """rankbased""" +167 3 dataset """fb15k237""" +167 3 model """distmult""" +167 3 loss """bceaftersigmoid""" +167 3 regularizer """no""" +167 3 optimizer """adam""" +167 3 training_loop """lcwa""" +167 3 evaluator """rankbased""" +167 4 dataset """fb15k237""" +167 4 model """distmult""" +167 4 loss """bceaftersigmoid""" +167 4 regularizer """no""" +167 4 optimizer """adam""" +167 4 training_loop """lcwa""" +167 4 evaluator """rankbased""" +167 5 dataset """fb15k237""" +167 5 model """distmult""" +167 5 loss """bceaftersigmoid""" +167 5 regularizer """no""" +167 5 optimizer """adam""" +167 5 training_loop """lcwa""" +167 5 evaluator """rankbased""" +167 6 dataset """fb15k237""" +167 6 model """distmult""" +167 6 loss """bceaftersigmoid""" +167 6 regularizer """no""" +167 6 optimizer """adam""" +167 6 training_loop """lcwa""" +167 6 evaluator """rankbased""" +167 7 dataset """fb15k237""" +167 7 model """distmult""" +167 7 loss """bceaftersigmoid""" +167 7 regularizer """no""" +167 7 optimizer """adam""" +167 7 training_loop """lcwa""" +167 7 evaluator """rankbased""" +167 8 dataset """fb15k237""" +167 8 model """distmult""" +167 8 loss """bceaftersigmoid""" +167 8 regularizer """no""" +167 8 optimizer """adam""" +167 8 training_loop """lcwa""" +167 8 evaluator """rankbased""" +167 9 dataset """fb15k237""" +167 9 model """distmult""" +167 9 loss """bceaftersigmoid""" +167 9 regularizer """no""" +167 9 optimizer """adam""" +167 9 training_loop """lcwa""" +167 9 evaluator """rankbased""" +167 10 dataset """fb15k237""" +167 10 model """distmult""" +167 10 loss """bceaftersigmoid""" +167 10 regularizer """no""" +167 10 optimizer """adam""" +167 10 training_loop """lcwa""" +167 10 evaluator """rankbased""" +167 11 dataset """fb15k237""" +167 11 model """distmult""" +167 11 loss """bceaftersigmoid""" +167 11 regularizer """no""" +167 11 optimizer """adam""" +167 11 training_loop """lcwa""" +167 11 evaluator """rankbased""" +167 12 dataset """fb15k237""" +167 12 model """distmult""" +167 12 loss """bceaftersigmoid""" +167 12 regularizer """no""" +167 12 optimizer """adam""" +167 12 training_loop """lcwa""" +167 12 evaluator """rankbased""" +167 13 dataset """fb15k237""" +167 13 model """distmult""" +167 13 loss """bceaftersigmoid""" +167 13 regularizer """no""" +167 13 optimizer """adam""" +167 13 training_loop """lcwa""" +167 13 evaluator """rankbased""" +167 14 dataset """fb15k237""" +167 14 model """distmult""" +167 14 loss """bceaftersigmoid""" +167 14 regularizer """no""" +167 14 optimizer """adam""" +167 14 training_loop """lcwa""" +167 14 evaluator """rankbased""" +167 15 dataset """fb15k237""" +167 15 model """distmult""" +167 15 loss """bceaftersigmoid""" +167 15 regularizer """no""" +167 15 optimizer """adam""" +167 15 training_loop """lcwa""" +167 15 evaluator """rankbased""" +167 16 dataset """fb15k237""" +167 16 model """distmult""" +167 16 loss """bceaftersigmoid""" +167 16 regularizer """no""" +167 16 optimizer """adam""" +167 16 training_loop """lcwa""" +167 16 evaluator """rankbased""" +167 17 dataset """fb15k237""" +167 17 model """distmult""" +167 17 loss """bceaftersigmoid""" +167 17 regularizer """no""" +167 17 optimizer """adam""" +167 17 training_loop """lcwa""" +167 17 evaluator """rankbased""" +167 18 dataset """fb15k237""" +167 18 model """distmult""" +167 18 loss """bceaftersigmoid""" +167 18 regularizer """no""" +167 18 optimizer """adam""" +167 18 training_loop """lcwa""" +167 18 evaluator """rankbased""" +167 19 dataset """fb15k237""" +167 19 model """distmult""" +167 19 loss """bceaftersigmoid""" +167 19 regularizer """no""" +167 19 optimizer """adam""" +167 19 training_loop """lcwa""" +167 19 evaluator """rankbased""" +168 1 model.embedding_dim 0.0 +168 1 training.batch_size 2.0 +168 1 training.label_smoothing 0.08519015839183733 +168 2 model.embedding_dim 2.0 +168 2 training.batch_size 2.0 +168 2 training.label_smoothing 0.07529043519682009 +168 3 model.embedding_dim 2.0 +168 3 training.batch_size 1.0 +168 3 training.label_smoothing 0.023793383822717975 +168 4 model.embedding_dim 0.0 +168 4 training.batch_size 0.0 +168 4 training.label_smoothing 0.15005799859971347 +168 5 model.embedding_dim 0.0 +168 5 training.batch_size 0.0 +168 5 training.label_smoothing 0.038240969814166255 +168 6 model.embedding_dim 0.0 +168 6 training.batch_size 1.0 +168 6 training.label_smoothing 0.0024792605150134 +168 7 model.embedding_dim 2.0 +168 7 training.batch_size 1.0 +168 7 training.label_smoothing 0.0010372063629125693 +168 8 model.embedding_dim 2.0 +168 8 training.batch_size 2.0 +168 8 training.label_smoothing 0.008571968781487109 +168 9 model.embedding_dim 2.0 +168 9 training.batch_size 2.0 +168 9 training.label_smoothing 0.003193963636254266 +168 10 model.embedding_dim 2.0 +168 10 training.batch_size 0.0 +168 10 training.label_smoothing 0.6666521666141971 +168 11 model.embedding_dim 1.0 +168 11 training.batch_size 2.0 +168 11 training.label_smoothing 0.03237460704036755 +168 12 model.embedding_dim 2.0 +168 12 training.batch_size 2.0 +168 12 training.label_smoothing 0.04947868525730713 +168 13 model.embedding_dim 0.0 +168 13 training.batch_size 0.0 +168 13 training.label_smoothing 0.044243780339719575 +168 14 model.embedding_dim 0.0 +168 14 training.batch_size 1.0 +168 14 training.label_smoothing 0.8019396959156173 +168 15 model.embedding_dim 0.0 +168 15 training.batch_size 2.0 +168 15 training.label_smoothing 0.02646584375902487 +168 16 model.embedding_dim 1.0 +168 16 training.batch_size 2.0 +168 16 training.label_smoothing 0.011363279762435709 +168 17 model.embedding_dim 2.0 +168 17 training.batch_size 0.0 +168 17 training.label_smoothing 0.041156446906184106 +168 18 model.embedding_dim 2.0 +168 18 training.batch_size 0.0 +168 18 training.label_smoothing 0.009521059962923385 +168 19 model.embedding_dim 0.0 +168 19 training.batch_size 0.0 +168 19 training.label_smoothing 0.023763527400556185 +168 20 model.embedding_dim 1.0 +168 20 training.batch_size 1.0 +168 20 training.label_smoothing 0.306859549132925 +168 21 model.embedding_dim 2.0 +168 21 training.batch_size 2.0 +168 21 training.label_smoothing 0.0036307378912895354 +168 22 model.embedding_dim 2.0 +168 22 training.batch_size 2.0 +168 22 training.label_smoothing 0.018717585253352646 +168 23 model.embedding_dim 2.0 +168 23 training.batch_size 2.0 +168 23 training.label_smoothing 0.0479659420917322 +168 24 model.embedding_dim 2.0 +168 24 training.batch_size 2.0 +168 24 training.label_smoothing 0.42181208586356234 +168 25 model.embedding_dim 2.0 +168 25 training.batch_size 1.0 +168 25 training.label_smoothing 0.9577751843722055 +168 26 model.embedding_dim 0.0 +168 26 training.batch_size 0.0 +168 26 training.label_smoothing 0.010509666195863038 +168 27 model.embedding_dim 2.0 +168 27 training.batch_size 0.0 +168 27 training.label_smoothing 0.09012391099840344 +168 28 model.embedding_dim 2.0 +168 28 training.batch_size 1.0 +168 28 training.label_smoothing 0.010180029949753476 +168 29 model.embedding_dim 1.0 +168 29 training.batch_size 0.0 +168 29 training.label_smoothing 0.06606457048869868 +168 30 model.embedding_dim 0.0 +168 30 training.batch_size 0.0 +168 30 training.label_smoothing 0.6779089448757296 +168 31 model.embedding_dim 0.0 +168 31 training.batch_size 0.0 +168 31 training.label_smoothing 0.9172197621774942 +168 32 model.embedding_dim 0.0 +168 32 training.batch_size 0.0 +168 32 training.label_smoothing 0.013724569158690903 +168 33 model.embedding_dim 2.0 +168 33 training.batch_size 0.0 +168 33 training.label_smoothing 0.00400199813772254 +168 34 model.embedding_dim 1.0 +168 34 training.batch_size 2.0 +168 34 training.label_smoothing 0.0022590307845189047 +168 35 model.embedding_dim 1.0 +168 35 training.batch_size 0.0 +168 35 training.label_smoothing 0.040776582065130317 +168 36 model.embedding_dim 2.0 +168 36 training.batch_size 2.0 +168 36 training.label_smoothing 0.012190460736025532 +168 37 model.embedding_dim 0.0 +168 37 training.batch_size 0.0 +168 37 training.label_smoothing 0.19382670297955432 +168 38 model.embedding_dim 0.0 +168 38 training.batch_size 1.0 +168 38 training.label_smoothing 0.12364815792712955 +168 39 model.embedding_dim 2.0 +168 39 training.batch_size 2.0 +168 39 training.label_smoothing 0.19949779504884965 +168 40 model.embedding_dim 1.0 +168 40 training.batch_size 2.0 +168 40 training.label_smoothing 0.01020288527780854 +168 41 model.embedding_dim 0.0 +168 41 training.batch_size 2.0 +168 41 training.label_smoothing 0.014580658669809692 +168 42 model.embedding_dim 1.0 +168 42 training.batch_size 0.0 +168 42 training.label_smoothing 0.004950845455701394 +168 43 model.embedding_dim 2.0 +168 43 training.batch_size 0.0 +168 43 training.label_smoothing 0.0016371700950114434 +168 44 model.embedding_dim 0.0 +168 44 training.batch_size 2.0 +168 44 training.label_smoothing 0.029642113137830828 +168 45 model.embedding_dim 2.0 +168 45 training.batch_size 0.0 +168 45 training.label_smoothing 0.8116295117452582 +168 46 model.embedding_dim 2.0 +168 46 training.batch_size 1.0 +168 46 training.label_smoothing 0.19413252596715366 +168 47 model.embedding_dim 0.0 +168 47 training.batch_size 2.0 +168 47 training.label_smoothing 0.007545714352828586 +168 48 model.embedding_dim 2.0 +168 48 training.batch_size 1.0 +168 48 training.label_smoothing 0.018196250073490825 +168 49 model.embedding_dim 0.0 +168 49 training.batch_size 1.0 +168 49 training.label_smoothing 0.23332603977829966 +168 50 model.embedding_dim 0.0 +168 50 training.batch_size 2.0 +168 50 training.label_smoothing 0.06876469050983261 +168 51 model.embedding_dim 2.0 +168 51 training.batch_size 2.0 +168 51 training.label_smoothing 0.5071473985084222 +168 52 model.embedding_dim 2.0 +168 52 training.batch_size 2.0 +168 52 training.label_smoothing 0.004853940271201458 +168 53 model.embedding_dim 0.0 +168 53 training.batch_size 0.0 +168 53 training.label_smoothing 0.03040079868029004 +168 54 model.embedding_dim 2.0 +168 54 training.batch_size 0.0 +168 54 training.label_smoothing 0.002061944116539452 +168 55 model.embedding_dim 0.0 +168 55 training.batch_size 0.0 +168 55 training.label_smoothing 0.9559546170845752 +168 56 model.embedding_dim 0.0 +168 56 training.batch_size 2.0 +168 56 training.label_smoothing 0.002931153330398871 +168 57 model.embedding_dim 1.0 +168 57 training.batch_size 0.0 +168 57 training.label_smoothing 0.017799673042812096 +168 58 model.embedding_dim 1.0 +168 58 training.batch_size 1.0 +168 58 training.label_smoothing 0.0054413064117502935 +168 59 model.embedding_dim 2.0 +168 59 training.batch_size 2.0 +168 59 training.label_smoothing 0.12865493711238854 +168 60 model.embedding_dim 1.0 +168 60 training.batch_size 2.0 +168 60 training.label_smoothing 0.07996529703785268 +168 61 model.embedding_dim 1.0 +168 61 training.batch_size 2.0 +168 61 training.label_smoothing 0.03090261565574179 +168 62 model.embedding_dim 1.0 +168 62 training.batch_size 1.0 +168 62 training.label_smoothing 0.3839784609150672 +168 63 model.embedding_dim 1.0 +168 63 training.batch_size 0.0 +168 63 training.label_smoothing 0.021678100573786867 +168 64 model.embedding_dim 1.0 +168 64 training.batch_size 0.0 +168 64 training.label_smoothing 0.464259939928908 +168 65 model.embedding_dim 2.0 +168 65 training.batch_size 2.0 +168 65 training.label_smoothing 0.030176514620028162 +168 66 model.embedding_dim 1.0 +168 66 training.batch_size 2.0 +168 66 training.label_smoothing 0.0023557890583654983 +168 67 model.embedding_dim 1.0 +168 67 training.batch_size 1.0 +168 67 training.label_smoothing 0.0011638096023598326 +168 68 model.embedding_dim 0.0 +168 68 training.batch_size 0.0 +168 68 training.label_smoothing 0.0014622260307647784 +168 69 model.embedding_dim 0.0 +168 69 training.batch_size 2.0 +168 69 training.label_smoothing 0.916615920391873 +168 70 model.embedding_dim 2.0 +168 70 training.batch_size 1.0 +168 70 training.label_smoothing 0.00501128883251304 +168 71 model.embedding_dim 2.0 +168 71 training.batch_size 1.0 +168 71 training.label_smoothing 0.7106037887194739 +168 72 model.embedding_dim 0.0 +168 72 training.batch_size 2.0 +168 72 training.label_smoothing 0.009123099335646432 +168 73 model.embedding_dim 1.0 +168 73 training.batch_size 2.0 +168 73 training.label_smoothing 0.05431474336910678 +168 74 model.embedding_dim 0.0 +168 74 training.batch_size 2.0 +168 74 training.label_smoothing 0.013328974746078434 +168 75 model.embedding_dim 1.0 +168 75 training.batch_size 1.0 +168 75 training.label_smoothing 0.35998073183459983 +168 76 model.embedding_dim 0.0 +168 76 training.batch_size 0.0 +168 76 training.label_smoothing 0.0011133632318855269 +168 77 model.embedding_dim 2.0 +168 77 training.batch_size 2.0 +168 77 training.label_smoothing 0.2295281768758879 +168 78 model.embedding_dim 1.0 +168 78 training.batch_size 0.0 +168 78 training.label_smoothing 0.004480891463819871 +168 79 model.embedding_dim 1.0 +168 79 training.batch_size 0.0 +168 79 training.label_smoothing 0.027047340899940504 +168 80 model.embedding_dim 1.0 +168 80 training.batch_size 1.0 +168 80 training.label_smoothing 0.0021329013384514053 +168 81 model.embedding_dim 0.0 +168 81 training.batch_size 2.0 +168 81 training.label_smoothing 0.49599242234004554 +168 82 model.embedding_dim 0.0 +168 82 training.batch_size 0.0 +168 82 training.label_smoothing 0.6570836201117859 +168 83 model.embedding_dim 2.0 +168 83 training.batch_size 0.0 +168 83 training.label_smoothing 0.11139284618387096 +168 84 model.embedding_dim 1.0 +168 84 training.batch_size 2.0 +168 84 training.label_smoothing 0.1555949931232288 +168 85 model.embedding_dim 0.0 +168 85 training.batch_size 1.0 +168 85 training.label_smoothing 0.40567535787559245 +168 86 model.embedding_dim 1.0 +168 86 training.batch_size 1.0 +168 86 training.label_smoothing 0.00467843613425712 +168 87 model.embedding_dim 2.0 +168 87 training.batch_size 0.0 +168 87 training.label_smoothing 0.5504843703710935 +168 88 model.embedding_dim 2.0 +168 88 training.batch_size 1.0 +168 88 training.label_smoothing 0.199163128047539 +168 89 model.embedding_dim 2.0 +168 89 training.batch_size 1.0 +168 89 training.label_smoothing 0.0013888503645118283 +168 90 model.embedding_dim 2.0 +168 90 training.batch_size 0.0 +168 90 training.label_smoothing 0.002171378415442529 +168 91 model.embedding_dim 0.0 +168 91 training.batch_size 1.0 +168 91 training.label_smoothing 0.0019795077370946498 +168 92 model.embedding_dim 1.0 +168 92 training.batch_size 1.0 +168 92 training.label_smoothing 0.7514488038300529 +168 93 model.embedding_dim 2.0 +168 93 training.batch_size 2.0 +168 93 training.label_smoothing 0.0013102829906348338 +168 94 model.embedding_dim 2.0 +168 94 training.batch_size 2.0 +168 94 training.label_smoothing 0.0994345355586382 +168 95 model.embedding_dim 0.0 +168 95 training.batch_size 2.0 +168 95 training.label_smoothing 0.0017967358338120275 +168 96 model.embedding_dim 2.0 +168 96 training.batch_size 0.0 +168 96 training.label_smoothing 0.08645886373717684 +168 97 model.embedding_dim 1.0 +168 97 training.batch_size 0.0 +168 97 training.label_smoothing 0.4931468222952137 +168 98 model.embedding_dim 2.0 +168 98 training.batch_size 0.0 +168 98 training.label_smoothing 0.01690726659504098 +168 99 model.embedding_dim 2.0 +168 99 training.batch_size 0.0 +168 99 training.label_smoothing 0.016090737759652144 +168 100 model.embedding_dim 2.0 +168 100 training.batch_size 0.0 +168 100 training.label_smoothing 0.09478806196731179 +168 1 dataset """kinships""" +168 1 model """distmult""" +168 1 loss """bceaftersigmoid""" +168 1 regularizer """no""" +168 1 optimizer """adadelta""" +168 1 training_loop """lcwa""" +168 1 evaluator """rankbased""" +168 2 dataset """kinships""" +168 2 model """distmult""" +168 2 loss """bceaftersigmoid""" +168 2 regularizer """no""" +168 2 optimizer """adadelta""" +168 2 training_loop """lcwa""" +168 2 evaluator """rankbased""" +168 3 dataset """kinships""" +168 3 model """distmult""" +168 3 loss """bceaftersigmoid""" +168 3 regularizer """no""" +168 3 optimizer """adadelta""" +168 3 training_loop """lcwa""" +168 3 evaluator """rankbased""" +168 4 dataset """kinships""" +168 4 model """distmult""" +168 4 loss """bceaftersigmoid""" +168 4 regularizer """no""" +168 4 optimizer """adadelta""" +168 4 training_loop """lcwa""" +168 4 evaluator """rankbased""" +168 5 dataset """kinships""" +168 5 model """distmult""" +168 5 loss """bceaftersigmoid""" +168 5 regularizer """no""" +168 5 optimizer """adadelta""" +168 5 training_loop """lcwa""" +168 5 evaluator """rankbased""" +168 6 dataset """kinships""" +168 6 model """distmult""" +168 6 loss """bceaftersigmoid""" +168 6 regularizer """no""" +168 6 optimizer """adadelta""" +168 6 training_loop """lcwa""" +168 6 evaluator """rankbased""" +168 7 dataset """kinships""" +168 7 model """distmult""" +168 7 loss """bceaftersigmoid""" +168 7 regularizer """no""" +168 7 optimizer """adadelta""" +168 7 training_loop """lcwa""" +168 7 evaluator """rankbased""" +168 8 dataset """kinships""" +168 8 model """distmult""" +168 8 loss """bceaftersigmoid""" +168 8 regularizer """no""" +168 8 optimizer """adadelta""" +168 8 training_loop """lcwa""" +168 8 evaluator """rankbased""" +168 9 dataset """kinships""" +168 9 model """distmult""" +168 9 loss """bceaftersigmoid""" +168 9 regularizer """no""" +168 9 optimizer """adadelta""" +168 9 training_loop """lcwa""" +168 9 evaluator """rankbased""" +168 10 dataset """kinships""" +168 10 model """distmult""" +168 10 loss """bceaftersigmoid""" +168 10 regularizer """no""" +168 10 optimizer """adadelta""" +168 10 training_loop """lcwa""" +168 10 evaluator """rankbased""" +168 11 dataset """kinships""" +168 11 model """distmult""" +168 11 loss """bceaftersigmoid""" +168 11 regularizer """no""" +168 11 optimizer """adadelta""" +168 11 training_loop """lcwa""" +168 11 evaluator """rankbased""" +168 12 dataset """kinships""" +168 12 model """distmult""" +168 12 loss """bceaftersigmoid""" +168 12 regularizer """no""" +168 12 optimizer """adadelta""" +168 12 training_loop """lcwa""" +168 12 evaluator """rankbased""" +168 13 dataset """kinships""" +168 13 model """distmult""" +168 13 loss """bceaftersigmoid""" +168 13 regularizer """no""" +168 13 optimizer """adadelta""" +168 13 training_loop """lcwa""" +168 13 evaluator """rankbased""" +168 14 dataset """kinships""" +168 14 model """distmult""" +168 14 loss """bceaftersigmoid""" +168 14 regularizer """no""" +168 14 optimizer """adadelta""" +168 14 training_loop """lcwa""" +168 14 evaluator """rankbased""" +168 15 dataset """kinships""" +168 15 model """distmult""" +168 15 loss """bceaftersigmoid""" +168 15 regularizer """no""" +168 15 optimizer """adadelta""" +168 15 training_loop """lcwa""" +168 15 evaluator """rankbased""" +168 16 dataset """kinships""" +168 16 model """distmult""" +168 16 loss """bceaftersigmoid""" +168 16 regularizer """no""" +168 16 optimizer """adadelta""" +168 16 training_loop """lcwa""" +168 16 evaluator """rankbased""" +168 17 dataset """kinships""" +168 17 model """distmult""" +168 17 loss """bceaftersigmoid""" +168 17 regularizer """no""" +168 17 optimizer """adadelta""" +168 17 training_loop """lcwa""" +168 17 evaluator """rankbased""" +168 18 dataset """kinships""" +168 18 model """distmult""" +168 18 loss """bceaftersigmoid""" +168 18 regularizer """no""" +168 18 optimizer """adadelta""" +168 18 training_loop """lcwa""" +168 18 evaluator """rankbased""" +168 19 dataset """kinships""" +168 19 model """distmult""" +168 19 loss """bceaftersigmoid""" +168 19 regularizer """no""" +168 19 optimizer """adadelta""" +168 19 training_loop """lcwa""" +168 19 evaluator """rankbased""" +168 20 dataset """kinships""" +168 20 model """distmult""" +168 20 loss """bceaftersigmoid""" +168 20 regularizer """no""" +168 20 optimizer """adadelta""" +168 20 training_loop """lcwa""" +168 20 evaluator """rankbased""" +168 21 dataset """kinships""" +168 21 model """distmult""" +168 21 loss """bceaftersigmoid""" +168 21 regularizer """no""" +168 21 optimizer """adadelta""" +168 21 training_loop """lcwa""" +168 21 evaluator """rankbased""" +168 22 dataset """kinships""" +168 22 model """distmult""" +168 22 loss """bceaftersigmoid""" +168 22 regularizer """no""" +168 22 optimizer """adadelta""" +168 22 training_loop """lcwa""" +168 22 evaluator """rankbased""" +168 23 dataset """kinships""" +168 23 model """distmult""" +168 23 loss """bceaftersigmoid""" +168 23 regularizer """no""" +168 23 optimizer """adadelta""" +168 23 training_loop """lcwa""" +168 23 evaluator """rankbased""" +168 24 dataset """kinships""" +168 24 model """distmult""" +168 24 loss """bceaftersigmoid""" +168 24 regularizer """no""" +168 24 optimizer """adadelta""" +168 24 training_loop """lcwa""" +168 24 evaluator """rankbased""" +168 25 dataset """kinships""" +168 25 model """distmult""" +168 25 loss """bceaftersigmoid""" +168 25 regularizer """no""" +168 25 optimizer """adadelta""" +168 25 training_loop """lcwa""" +168 25 evaluator """rankbased""" +168 26 dataset """kinships""" +168 26 model """distmult""" +168 26 loss """bceaftersigmoid""" +168 26 regularizer """no""" +168 26 optimizer """adadelta""" +168 26 training_loop """lcwa""" +168 26 evaluator """rankbased""" +168 27 dataset """kinships""" +168 27 model """distmult""" +168 27 loss """bceaftersigmoid""" +168 27 regularizer """no""" +168 27 optimizer """adadelta""" +168 27 training_loop """lcwa""" +168 27 evaluator """rankbased""" +168 28 dataset """kinships""" +168 28 model """distmult""" +168 28 loss """bceaftersigmoid""" +168 28 regularizer """no""" +168 28 optimizer """adadelta""" +168 28 training_loop """lcwa""" +168 28 evaluator """rankbased""" +168 29 dataset """kinships""" +168 29 model """distmult""" +168 29 loss """bceaftersigmoid""" +168 29 regularizer """no""" +168 29 optimizer """adadelta""" +168 29 training_loop """lcwa""" +168 29 evaluator """rankbased""" +168 30 dataset """kinships""" +168 30 model """distmult""" +168 30 loss """bceaftersigmoid""" +168 30 regularizer """no""" +168 30 optimizer """adadelta""" +168 30 training_loop """lcwa""" +168 30 evaluator """rankbased""" +168 31 dataset """kinships""" +168 31 model """distmult""" +168 31 loss """bceaftersigmoid""" +168 31 regularizer """no""" +168 31 optimizer """adadelta""" +168 31 training_loop """lcwa""" +168 31 evaluator """rankbased""" +168 32 dataset """kinships""" +168 32 model """distmult""" +168 32 loss """bceaftersigmoid""" +168 32 regularizer """no""" +168 32 optimizer """adadelta""" +168 32 training_loop """lcwa""" +168 32 evaluator """rankbased""" +168 33 dataset """kinships""" +168 33 model """distmult""" +168 33 loss """bceaftersigmoid""" +168 33 regularizer """no""" +168 33 optimizer """adadelta""" +168 33 training_loop """lcwa""" +168 33 evaluator """rankbased""" +168 34 dataset """kinships""" +168 34 model """distmult""" +168 34 loss """bceaftersigmoid""" +168 34 regularizer """no""" +168 34 optimizer """adadelta""" +168 34 training_loop """lcwa""" +168 34 evaluator """rankbased""" +168 35 dataset """kinships""" +168 35 model """distmult""" +168 35 loss """bceaftersigmoid""" +168 35 regularizer """no""" +168 35 optimizer """adadelta""" +168 35 training_loop """lcwa""" +168 35 evaluator """rankbased""" +168 36 dataset """kinships""" +168 36 model """distmult""" +168 36 loss """bceaftersigmoid""" +168 36 regularizer """no""" +168 36 optimizer """adadelta""" +168 36 training_loop """lcwa""" +168 36 evaluator """rankbased""" +168 37 dataset """kinships""" +168 37 model """distmult""" +168 37 loss """bceaftersigmoid""" +168 37 regularizer """no""" +168 37 optimizer """adadelta""" +168 37 training_loop """lcwa""" +168 37 evaluator """rankbased""" +168 38 dataset """kinships""" +168 38 model """distmult""" +168 38 loss """bceaftersigmoid""" +168 38 regularizer """no""" +168 38 optimizer """adadelta""" +168 38 training_loop """lcwa""" +168 38 evaluator """rankbased""" +168 39 dataset """kinships""" +168 39 model """distmult""" +168 39 loss """bceaftersigmoid""" +168 39 regularizer """no""" +168 39 optimizer """adadelta""" +168 39 training_loop """lcwa""" +168 39 evaluator """rankbased""" +168 40 dataset """kinships""" +168 40 model """distmult""" +168 40 loss """bceaftersigmoid""" +168 40 regularizer """no""" +168 40 optimizer """adadelta""" +168 40 training_loop """lcwa""" +168 40 evaluator """rankbased""" +168 41 dataset """kinships""" +168 41 model """distmult""" +168 41 loss """bceaftersigmoid""" +168 41 regularizer """no""" +168 41 optimizer """adadelta""" +168 41 training_loop """lcwa""" +168 41 evaluator """rankbased""" +168 42 dataset """kinships""" +168 42 model """distmult""" +168 42 loss """bceaftersigmoid""" +168 42 regularizer """no""" +168 42 optimizer """adadelta""" +168 42 training_loop """lcwa""" +168 42 evaluator """rankbased""" +168 43 dataset """kinships""" +168 43 model """distmult""" +168 43 loss """bceaftersigmoid""" +168 43 regularizer """no""" +168 43 optimizer """adadelta""" +168 43 training_loop """lcwa""" +168 43 evaluator """rankbased""" +168 44 dataset """kinships""" +168 44 model """distmult""" +168 44 loss """bceaftersigmoid""" +168 44 regularizer """no""" +168 44 optimizer """adadelta""" +168 44 training_loop """lcwa""" +168 44 evaluator """rankbased""" +168 45 dataset """kinships""" +168 45 model """distmult""" +168 45 loss """bceaftersigmoid""" +168 45 regularizer """no""" +168 45 optimizer """adadelta""" +168 45 training_loop """lcwa""" +168 45 evaluator """rankbased""" +168 46 dataset """kinships""" +168 46 model """distmult""" +168 46 loss """bceaftersigmoid""" +168 46 regularizer """no""" +168 46 optimizer """adadelta""" +168 46 training_loop """lcwa""" +168 46 evaluator """rankbased""" +168 47 dataset """kinships""" +168 47 model """distmult""" +168 47 loss """bceaftersigmoid""" +168 47 regularizer """no""" +168 47 optimizer """adadelta""" +168 47 training_loop """lcwa""" +168 47 evaluator """rankbased""" +168 48 dataset """kinships""" +168 48 model """distmult""" +168 48 loss """bceaftersigmoid""" +168 48 regularizer """no""" +168 48 optimizer """adadelta""" +168 48 training_loop """lcwa""" +168 48 evaluator """rankbased""" +168 49 dataset """kinships""" +168 49 model """distmult""" +168 49 loss """bceaftersigmoid""" +168 49 regularizer """no""" +168 49 optimizer """adadelta""" +168 49 training_loop """lcwa""" +168 49 evaluator """rankbased""" +168 50 dataset """kinships""" +168 50 model """distmult""" +168 50 loss """bceaftersigmoid""" +168 50 regularizer """no""" +168 50 optimizer """adadelta""" +168 50 training_loop """lcwa""" +168 50 evaluator """rankbased""" +168 51 dataset """kinships""" +168 51 model """distmult""" +168 51 loss """bceaftersigmoid""" +168 51 regularizer """no""" +168 51 optimizer """adadelta""" +168 51 training_loop """lcwa""" +168 51 evaluator """rankbased""" +168 52 dataset """kinships""" +168 52 model """distmult""" +168 52 loss """bceaftersigmoid""" +168 52 regularizer """no""" +168 52 optimizer """adadelta""" +168 52 training_loop """lcwa""" +168 52 evaluator """rankbased""" +168 53 dataset """kinships""" +168 53 model """distmult""" +168 53 loss """bceaftersigmoid""" +168 53 regularizer """no""" +168 53 optimizer """adadelta""" +168 53 training_loop """lcwa""" +168 53 evaluator """rankbased""" +168 54 dataset """kinships""" +168 54 model """distmult""" +168 54 loss """bceaftersigmoid""" +168 54 regularizer """no""" +168 54 optimizer """adadelta""" +168 54 training_loop """lcwa""" +168 54 evaluator """rankbased""" +168 55 dataset """kinships""" +168 55 model """distmult""" +168 55 loss """bceaftersigmoid""" +168 55 regularizer """no""" +168 55 optimizer """adadelta""" +168 55 training_loop """lcwa""" +168 55 evaluator """rankbased""" +168 56 dataset """kinships""" +168 56 model """distmult""" +168 56 loss """bceaftersigmoid""" +168 56 regularizer """no""" +168 56 optimizer """adadelta""" +168 56 training_loop """lcwa""" +168 56 evaluator """rankbased""" +168 57 dataset """kinships""" +168 57 model """distmult""" +168 57 loss """bceaftersigmoid""" +168 57 regularizer """no""" +168 57 optimizer """adadelta""" +168 57 training_loop """lcwa""" +168 57 evaluator """rankbased""" +168 58 dataset """kinships""" +168 58 model """distmult""" +168 58 loss """bceaftersigmoid""" +168 58 regularizer """no""" +168 58 optimizer """adadelta""" +168 58 training_loop """lcwa""" +168 58 evaluator """rankbased""" +168 59 dataset """kinships""" +168 59 model """distmult""" +168 59 loss """bceaftersigmoid""" +168 59 regularizer """no""" +168 59 optimizer """adadelta""" +168 59 training_loop """lcwa""" +168 59 evaluator """rankbased""" +168 60 dataset """kinships""" +168 60 model """distmult""" +168 60 loss """bceaftersigmoid""" +168 60 regularizer """no""" +168 60 optimizer """adadelta""" +168 60 training_loop """lcwa""" +168 60 evaluator """rankbased""" +168 61 dataset """kinships""" +168 61 model """distmult""" +168 61 loss """bceaftersigmoid""" +168 61 regularizer """no""" +168 61 optimizer """adadelta""" +168 61 training_loop """lcwa""" +168 61 evaluator """rankbased""" +168 62 dataset """kinships""" +168 62 model """distmult""" +168 62 loss """bceaftersigmoid""" +168 62 regularizer """no""" +168 62 optimizer """adadelta""" +168 62 training_loop """lcwa""" +168 62 evaluator """rankbased""" +168 63 dataset """kinships""" +168 63 model """distmult""" +168 63 loss """bceaftersigmoid""" +168 63 regularizer """no""" +168 63 optimizer """adadelta""" +168 63 training_loop """lcwa""" +168 63 evaluator """rankbased""" +168 64 dataset """kinships""" +168 64 model """distmult""" +168 64 loss """bceaftersigmoid""" +168 64 regularizer """no""" +168 64 optimizer """adadelta""" +168 64 training_loop """lcwa""" +168 64 evaluator """rankbased""" +168 65 dataset """kinships""" +168 65 model """distmult""" +168 65 loss """bceaftersigmoid""" +168 65 regularizer """no""" +168 65 optimizer """adadelta""" +168 65 training_loop """lcwa""" +168 65 evaluator """rankbased""" +168 66 dataset """kinships""" +168 66 model """distmult""" +168 66 loss """bceaftersigmoid""" +168 66 regularizer """no""" +168 66 optimizer """adadelta""" +168 66 training_loop """lcwa""" +168 66 evaluator """rankbased""" +168 67 dataset """kinships""" +168 67 model """distmult""" +168 67 loss """bceaftersigmoid""" +168 67 regularizer """no""" +168 67 optimizer """adadelta""" +168 67 training_loop """lcwa""" +168 67 evaluator """rankbased""" +168 68 dataset """kinships""" +168 68 model """distmult""" +168 68 loss """bceaftersigmoid""" +168 68 regularizer """no""" +168 68 optimizer """adadelta""" +168 68 training_loop """lcwa""" +168 68 evaluator """rankbased""" +168 69 dataset """kinships""" +168 69 model """distmult""" +168 69 loss """bceaftersigmoid""" +168 69 regularizer """no""" +168 69 optimizer """adadelta""" +168 69 training_loop """lcwa""" +168 69 evaluator """rankbased""" +168 70 dataset """kinships""" +168 70 model """distmult""" +168 70 loss """bceaftersigmoid""" +168 70 regularizer """no""" +168 70 optimizer """adadelta""" +168 70 training_loop """lcwa""" +168 70 evaluator """rankbased""" +168 71 dataset """kinships""" +168 71 model """distmult""" +168 71 loss """bceaftersigmoid""" +168 71 regularizer """no""" +168 71 optimizer """adadelta""" +168 71 training_loop """lcwa""" +168 71 evaluator """rankbased""" +168 72 dataset """kinships""" +168 72 model """distmult""" +168 72 loss """bceaftersigmoid""" +168 72 regularizer """no""" +168 72 optimizer """adadelta""" +168 72 training_loop """lcwa""" +168 72 evaluator """rankbased""" +168 73 dataset """kinships""" +168 73 model """distmult""" +168 73 loss """bceaftersigmoid""" +168 73 regularizer """no""" +168 73 optimizer """adadelta""" +168 73 training_loop """lcwa""" +168 73 evaluator """rankbased""" +168 74 dataset """kinships""" +168 74 model """distmult""" +168 74 loss """bceaftersigmoid""" +168 74 regularizer """no""" +168 74 optimizer """adadelta""" +168 74 training_loop """lcwa""" +168 74 evaluator """rankbased""" +168 75 dataset """kinships""" +168 75 model """distmult""" +168 75 loss """bceaftersigmoid""" +168 75 regularizer """no""" +168 75 optimizer """adadelta""" +168 75 training_loop """lcwa""" +168 75 evaluator """rankbased""" +168 76 dataset """kinships""" +168 76 model """distmult""" +168 76 loss """bceaftersigmoid""" +168 76 regularizer """no""" +168 76 optimizer """adadelta""" +168 76 training_loop """lcwa""" +168 76 evaluator """rankbased""" +168 77 dataset """kinships""" +168 77 model """distmult""" +168 77 loss """bceaftersigmoid""" +168 77 regularizer """no""" +168 77 optimizer """adadelta""" +168 77 training_loop """lcwa""" +168 77 evaluator """rankbased""" +168 78 dataset """kinships""" +168 78 model """distmult""" +168 78 loss """bceaftersigmoid""" +168 78 regularizer """no""" +168 78 optimizer """adadelta""" +168 78 training_loop """lcwa""" +168 78 evaluator """rankbased""" +168 79 dataset """kinships""" +168 79 model """distmult""" +168 79 loss """bceaftersigmoid""" +168 79 regularizer """no""" +168 79 optimizer """adadelta""" +168 79 training_loop """lcwa""" +168 79 evaluator """rankbased""" +168 80 dataset """kinships""" +168 80 model """distmult""" +168 80 loss """bceaftersigmoid""" +168 80 regularizer """no""" +168 80 optimizer """adadelta""" +168 80 training_loop """lcwa""" +168 80 evaluator """rankbased""" +168 81 dataset """kinships""" +168 81 model """distmult""" +168 81 loss """bceaftersigmoid""" +168 81 regularizer """no""" +168 81 optimizer """adadelta""" +168 81 training_loop """lcwa""" +168 81 evaluator """rankbased""" +168 82 dataset """kinships""" +168 82 model """distmult""" +168 82 loss """bceaftersigmoid""" +168 82 regularizer """no""" +168 82 optimizer """adadelta""" +168 82 training_loop """lcwa""" +168 82 evaluator """rankbased""" +168 83 dataset """kinships""" +168 83 model """distmult""" +168 83 loss """bceaftersigmoid""" +168 83 regularizer """no""" +168 83 optimizer """adadelta""" +168 83 training_loop """lcwa""" +168 83 evaluator """rankbased""" +168 84 dataset """kinships""" +168 84 model """distmult""" +168 84 loss """bceaftersigmoid""" +168 84 regularizer """no""" +168 84 optimizer """adadelta""" +168 84 training_loop """lcwa""" +168 84 evaluator """rankbased""" +168 85 dataset """kinships""" +168 85 model """distmult""" +168 85 loss """bceaftersigmoid""" +168 85 regularizer """no""" +168 85 optimizer """adadelta""" +168 85 training_loop """lcwa""" +168 85 evaluator """rankbased""" +168 86 dataset """kinships""" +168 86 model """distmult""" +168 86 loss """bceaftersigmoid""" +168 86 regularizer """no""" +168 86 optimizer """adadelta""" +168 86 training_loop """lcwa""" +168 86 evaluator """rankbased""" +168 87 dataset """kinships""" +168 87 model """distmult""" +168 87 loss """bceaftersigmoid""" +168 87 regularizer """no""" +168 87 optimizer """adadelta""" +168 87 training_loop """lcwa""" +168 87 evaluator """rankbased""" +168 88 dataset """kinships""" +168 88 model """distmult""" +168 88 loss """bceaftersigmoid""" +168 88 regularizer """no""" +168 88 optimizer """adadelta""" +168 88 training_loop """lcwa""" +168 88 evaluator """rankbased""" +168 89 dataset """kinships""" +168 89 model """distmult""" +168 89 loss """bceaftersigmoid""" +168 89 regularizer """no""" +168 89 optimizer """adadelta""" +168 89 training_loop """lcwa""" +168 89 evaluator """rankbased""" +168 90 dataset """kinships""" +168 90 model """distmult""" +168 90 loss """bceaftersigmoid""" +168 90 regularizer """no""" +168 90 optimizer """adadelta""" +168 90 training_loop """lcwa""" +168 90 evaluator """rankbased""" +168 91 dataset """kinships""" +168 91 model """distmult""" +168 91 loss """bceaftersigmoid""" +168 91 regularizer """no""" +168 91 optimizer """adadelta""" +168 91 training_loop """lcwa""" +168 91 evaluator """rankbased""" +168 92 dataset """kinships""" +168 92 model """distmult""" +168 92 loss """bceaftersigmoid""" +168 92 regularizer """no""" +168 92 optimizer """adadelta""" +168 92 training_loop """lcwa""" +168 92 evaluator """rankbased""" +168 93 dataset """kinships""" +168 93 model """distmult""" +168 93 loss """bceaftersigmoid""" +168 93 regularizer """no""" +168 93 optimizer """adadelta""" +168 93 training_loop """lcwa""" +168 93 evaluator """rankbased""" +168 94 dataset """kinships""" +168 94 model """distmult""" +168 94 loss """bceaftersigmoid""" +168 94 regularizer """no""" +168 94 optimizer """adadelta""" +168 94 training_loop """lcwa""" +168 94 evaluator """rankbased""" +168 95 dataset """kinships""" +168 95 model """distmult""" +168 95 loss """bceaftersigmoid""" +168 95 regularizer """no""" +168 95 optimizer """adadelta""" +168 95 training_loop """lcwa""" +168 95 evaluator """rankbased""" +168 96 dataset """kinships""" +168 96 model """distmult""" +168 96 loss """bceaftersigmoid""" +168 96 regularizer """no""" +168 96 optimizer """adadelta""" +168 96 training_loop """lcwa""" +168 96 evaluator """rankbased""" +168 97 dataset """kinships""" +168 97 model """distmult""" +168 97 loss """bceaftersigmoid""" +168 97 regularizer """no""" +168 97 optimizer """adadelta""" +168 97 training_loop """lcwa""" +168 97 evaluator """rankbased""" +168 98 dataset """kinships""" +168 98 model """distmult""" +168 98 loss """bceaftersigmoid""" +168 98 regularizer """no""" +168 98 optimizer """adadelta""" +168 98 training_loop """lcwa""" +168 98 evaluator """rankbased""" +168 99 dataset """kinships""" +168 99 model """distmult""" +168 99 loss """bceaftersigmoid""" +168 99 regularizer """no""" +168 99 optimizer """adadelta""" +168 99 training_loop """lcwa""" +168 99 evaluator """rankbased""" +168 100 dataset """kinships""" +168 100 model """distmult""" +168 100 loss """bceaftersigmoid""" +168 100 regularizer """no""" +168 100 optimizer """adadelta""" +168 100 training_loop """lcwa""" +168 100 evaluator """rankbased""" +169 1 model.embedding_dim 2.0 +169 1 training.batch_size 0.0 +169 1 training.label_smoothing 0.04439680876506374 +169 2 model.embedding_dim 1.0 +169 2 training.batch_size 2.0 +169 2 training.label_smoothing 0.16412255450450136 +169 3 model.embedding_dim 2.0 +169 3 training.batch_size 1.0 +169 3 training.label_smoothing 0.04678685387621773 +169 4 model.embedding_dim 2.0 +169 4 training.batch_size 0.0 +169 4 training.label_smoothing 0.004677673351347015 +169 5 model.embedding_dim 1.0 +169 5 training.batch_size 2.0 +169 5 training.label_smoothing 0.11655292437452201 +169 6 model.embedding_dim 2.0 +169 6 training.batch_size 1.0 +169 6 training.label_smoothing 0.00139613100845807 +169 7 model.embedding_dim 0.0 +169 7 training.batch_size 0.0 +169 7 training.label_smoothing 0.17201214645535723 +169 8 model.embedding_dim 0.0 +169 8 training.batch_size 0.0 +169 8 training.label_smoothing 0.013468180590380482 +169 9 model.embedding_dim 0.0 +169 9 training.batch_size 2.0 +169 9 training.label_smoothing 0.001075391925401144 +169 10 model.embedding_dim 1.0 +169 10 training.batch_size 0.0 +169 10 training.label_smoothing 0.088115076439584 +169 11 model.embedding_dim 2.0 +169 11 training.batch_size 0.0 +169 11 training.label_smoothing 0.09661885412477966 +169 12 model.embedding_dim 1.0 +169 12 training.batch_size 0.0 +169 12 training.label_smoothing 0.039026147896867605 +169 13 model.embedding_dim 2.0 +169 13 training.batch_size 2.0 +169 13 training.label_smoothing 0.0014105429198366648 +169 14 model.embedding_dim 2.0 +169 14 training.batch_size 1.0 +169 14 training.label_smoothing 0.05930621035044778 +169 15 model.embedding_dim 0.0 +169 15 training.batch_size 1.0 +169 15 training.label_smoothing 0.0016458215408538604 +169 16 model.embedding_dim 1.0 +169 16 training.batch_size 1.0 +169 16 training.label_smoothing 0.05568253949438053 +169 17 model.embedding_dim 0.0 +169 17 training.batch_size 0.0 +169 17 training.label_smoothing 0.18469105942538996 +169 18 model.embedding_dim 0.0 +169 18 training.batch_size 1.0 +169 18 training.label_smoothing 0.7188306933948005 +169 19 model.embedding_dim 1.0 +169 19 training.batch_size 2.0 +169 19 training.label_smoothing 0.5647045920927111 +169 20 model.embedding_dim 0.0 +169 20 training.batch_size 1.0 +169 20 training.label_smoothing 0.016175458898817878 +169 21 model.embedding_dim 0.0 +169 21 training.batch_size 0.0 +169 21 training.label_smoothing 0.006180431275926523 +169 22 model.embedding_dim 0.0 +169 22 training.batch_size 0.0 +169 22 training.label_smoothing 0.07972459685166068 +169 23 model.embedding_dim 1.0 +169 23 training.batch_size 2.0 +169 23 training.label_smoothing 0.008851224123772837 +169 24 model.embedding_dim 1.0 +169 24 training.batch_size 1.0 +169 24 training.label_smoothing 0.038462025576248816 +169 25 model.embedding_dim 2.0 +169 25 training.batch_size 1.0 +169 25 training.label_smoothing 0.6699645066501538 +169 26 model.embedding_dim 1.0 +169 26 training.batch_size 1.0 +169 26 training.label_smoothing 0.11225673608523971 +169 27 model.embedding_dim 1.0 +169 27 training.batch_size 0.0 +169 27 training.label_smoothing 0.002344416585182735 +169 28 model.embedding_dim 2.0 +169 28 training.batch_size 0.0 +169 28 training.label_smoothing 0.8724068605067152 +169 29 model.embedding_dim 1.0 +169 29 training.batch_size 2.0 +169 29 training.label_smoothing 0.0050811676912662935 +169 30 model.embedding_dim 0.0 +169 30 training.batch_size 0.0 +169 30 training.label_smoothing 0.001616664682446361 +169 31 model.embedding_dim 1.0 +169 31 training.batch_size 0.0 +169 31 training.label_smoothing 0.013797985611385698 +169 32 model.embedding_dim 1.0 +169 32 training.batch_size 1.0 +169 32 training.label_smoothing 0.21331463014256757 +169 33 model.embedding_dim 0.0 +169 33 training.batch_size 2.0 +169 33 training.label_smoothing 0.3100431906457446 +169 34 model.embedding_dim 1.0 +169 34 training.batch_size 0.0 +169 34 training.label_smoothing 0.052672091491729335 +169 35 model.embedding_dim 2.0 +169 35 training.batch_size 0.0 +169 35 training.label_smoothing 0.14330432039271385 +169 36 model.embedding_dim 0.0 +169 36 training.batch_size 0.0 +169 36 training.label_smoothing 0.0021669536252681663 +169 37 model.embedding_dim 0.0 +169 37 training.batch_size 0.0 +169 37 training.label_smoothing 0.342631626017944 +169 38 model.embedding_dim 0.0 +169 38 training.batch_size 0.0 +169 38 training.label_smoothing 0.9258355566626097 +169 39 model.embedding_dim 0.0 +169 39 training.batch_size 0.0 +169 39 training.label_smoothing 0.013841735777593856 +169 40 model.embedding_dim 2.0 +169 40 training.batch_size 0.0 +169 40 training.label_smoothing 0.002115745596108813 +169 41 model.embedding_dim 0.0 +169 41 training.batch_size 1.0 +169 41 training.label_smoothing 0.002539321758740166 +169 42 model.embedding_dim 1.0 +169 42 training.batch_size 1.0 +169 42 training.label_smoothing 0.01026295875560741 +169 43 model.embedding_dim 2.0 +169 43 training.batch_size 1.0 +169 43 training.label_smoothing 0.5052661745656689 +169 44 model.embedding_dim 2.0 +169 44 training.batch_size 0.0 +169 44 training.label_smoothing 0.02196457791210676 +169 45 model.embedding_dim 0.0 +169 45 training.batch_size 1.0 +169 45 training.label_smoothing 0.8419122937543598 +169 46 model.embedding_dim 2.0 +169 46 training.batch_size 1.0 +169 46 training.label_smoothing 0.023026105327065286 +169 47 model.embedding_dim 2.0 +169 47 training.batch_size 2.0 +169 47 training.label_smoothing 0.8816137924174284 +169 48 model.embedding_dim 2.0 +169 48 training.batch_size 2.0 +169 48 training.label_smoothing 0.03680575062826633 +169 49 model.embedding_dim 2.0 +169 49 training.batch_size 1.0 +169 49 training.label_smoothing 0.04309595108273573 +169 50 model.embedding_dim 2.0 +169 50 training.batch_size 1.0 +169 50 training.label_smoothing 0.22856157635402152 +169 51 model.embedding_dim 0.0 +169 51 training.batch_size 2.0 +169 51 training.label_smoothing 0.4058103471849078 +169 52 model.embedding_dim 0.0 +169 52 training.batch_size 1.0 +169 52 training.label_smoothing 0.038973799035231106 +169 53 model.embedding_dim 0.0 +169 53 training.batch_size 0.0 +169 53 training.label_smoothing 0.202511679218751 +169 54 model.embedding_dim 0.0 +169 54 training.batch_size 2.0 +169 54 training.label_smoothing 0.0013662912168577886 +169 55 model.embedding_dim 1.0 +169 55 training.batch_size 2.0 +169 55 training.label_smoothing 0.02256928492075048 +169 56 model.embedding_dim 2.0 +169 56 training.batch_size 2.0 +169 56 training.label_smoothing 0.7191750672373433 +169 57 model.embedding_dim 1.0 +169 57 training.batch_size 0.0 +169 57 training.label_smoothing 0.0027602398115970763 +169 58 model.embedding_dim 0.0 +169 58 training.batch_size 2.0 +169 58 training.label_smoothing 0.15781928704727047 +169 59 model.embedding_dim 2.0 +169 59 training.batch_size 2.0 +169 59 training.label_smoothing 0.0027514215568165985 +169 60 model.embedding_dim 0.0 +169 60 training.batch_size 0.0 +169 60 training.label_smoothing 0.03254312439850867 +169 61 model.embedding_dim 0.0 +169 61 training.batch_size 1.0 +169 61 training.label_smoothing 0.6213889144313028 +169 62 model.embedding_dim 0.0 +169 62 training.batch_size 2.0 +169 62 training.label_smoothing 0.01216937932185795 +169 63 model.embedding_dim 0.0 +169 63 training.batch_size 2.0 +169 63 training.label_smoothing 0.023006630985967294 +169 64 model.embedding_dim 0.0 +169 64 training.batch_size 0.0 +169 64 training.label_smoothing 0.7021083317028896 +169 65 model.embedding_dim 2.0 +169 65 training.batch_size 0.0 +169 65 training.label_smoothing 0.005970305901402708 +169 66 model.embedding_dim 2.0 +169 66 training.batch_size 2.0 +169 66 training.label_smoothing 0.17727841918586887 +169 67 model.embedding_dim 0.0 +169 67 training.batch_size 0.0 +169 67 training.label_smoothing 0.015233573917536426 +169 68 model.embedding_dim 2.0 +169 68 training.batch_size 1.0 +169 68 training.label_smoothing 0.007187757167103776 +169 69 model.embedding_dim 0.0 +169 69 training.batch_size 0.0 +169 69 training.label_smoothing 0.006967097332320459 +169 70 model.embedding_dim 0.0 +169 70 training.batch_size 2.0 +169 70 training.label_smoothing 0.6812725847696334 +169 71 model.embedding_dim 2.0 +169 71 training.batch_size 0.0 +169 71 training.label_smoothing 0.008360771185144554 +169 72 model.embedding_dim 1.0 +169 72 training.batch_size 1.0 +169 72 training.label_smoothing 0.547053357466717 +169 73 model.embedding_dim 2.0 +169 73 training.batch_size 0.0 +169 73 training.label_smoothing 0.18198160775939537 +169 74 model.embedding_dim 0.0 +169 74 training.batch_size 1.0 +169 74 training.label_smoothing 0.7639967986604274 +169 75 model.embedding_dim 0.0 +169 75 training.batch_size 2.0 +169 75 training.label_smoothing 0.013229463211979396 +169 76 model.embedding_dim 2.0 +169 76 training.batch_size 0.0 +169 76 training.label_smoothing 0.9970013143787401 +169 77 model.embedding_dim 2.0 +169 77 training.batch_size 0.0 +169 77 training.label_smoothing 0.1285174873154065 +169 78 model.embedding_dim 1.0 +169 78 training.batch_size 1.0 +169 78 training.label_smoothing 0.002585996171290145 +169 79 model.embedding_dim 0.0 +169 79 training.batch_size 1.0 +169 79 training.label_smoothing 0.0017583258918151658 +169 80 model.embedding_dim 1.0 +169 80 training.batch_size 1.0 +169 80 training.label_smoothing 0.001497803976281152 +169 81 model.embedding_dim 1.0 +169 81 training.batch_size 2.0 +169 81 training.label_smoothing 0.0059479858931052254 +169 82 model.embedding_dim 1.0 +169 82 training.batch_size 1.0 +169 82 training.label_smoothing 0.1349461745289772 +169 83 model.embedding_dim 2.0 +169 83 training.batch_size 1.0 +169 83 training.label_smoothing 0.3751012347954685 +169 84 model.embedding_dim 0.0 +169 84 training.batch_size 2.0 +169 84 training.label_smoothing 0.13653556093965752 +169 85 model.embedding_dim 2.0 +169 85 training.batch_size 1.0 +169 85 training.label_smoothing 0.19277643820039125 +169 86 model.embedding_dim 2.0 +169 86 training.batch_size 0.0 +169 86 training.label_smoothing 0.06535064858421316 +169 87 model.embedding_dim 2.0 +169 87 training.batch_size 2.0 +169 87 training.label_smoothing 0.02782558379153808 +169 88 model.embedding_dim 2.0 +169 88 training.batch_size 0.0 +169 88 training.label_smoothing 0.018057156604492332 +169 89 model.embedding_dim 1.0 +169 89 training.batch_size 2.0 +169 89 training.label_smoothing 0.0027778943643951047 +169 90 model.embedding_dim 2.0 +169 90 training.batch_size 2.0 +169 90 training.label_smoothing 0.0010230507123507669 +169 91 model.embedding_dim 0.0 +169 91 training.batch_size 1.0 +169 91 training.label_smoothing 0.0028621979251344347 +169 92 model.embedding_dim 2.0 +169 92 training.batch_size 1.0 +169 92 training.label_smoothing 0.001820797777123628 +169 93 model.embedding_dim 0.0 +169 93 training.batch_size 0.0 +169 93 training.label_smoothing 0.04622780225140046 +169 94 model.embedding_dim 2.0 +169 94 training.batch_size 0.0 +169 94 training.label_smoothing 0.22700117774881856 +169 95 model.embedding_dim 1.0 +169 95 training.batch_size 2.0 +169 95 training.label_smoothing 0.0012959905181830515 +169 96 model.embedding_dim 2.0 +169 96 training.batch_size 2.0 +169 96 training.label_smoothing 0.0060468599016471536 +169 97 model.embedding_dim 1.0 +169 97 training.batch_size 1.0 +169 97 training.label_smoothing 0.014936643399519473 +169 98 model.embedding_dim 1.0 +169 98 training.batch_size 1.0 +169 98 training.label_smoothing 0.0040715936475460065 +169 99 model.embedding_dim 1.0 +169 99 training.batch_size 1.0 +169 99 training.label_smoothing 0.0017366579186383839 +169 100 model.embedding_dim 0.0 +169 100 training.batch_size 1.0 +169 100 training.label_smoothing 0.06637224382810977 +169 1 dataset """kinships""" +169 1 model """distmult""" +169 1 loss """softplus""" +169 1 regularizer """no""" +169 1 optimizer """adadelta""" +169 1 training_loop """lcwa""" +169 1 evaluator """rankbased""" +169 2 dataset """kinships""" +169 2 model """distmult""" +169 2 loss """softplus""" +169 2 regularizer """no""" +169 2 optimizer """adadelta""" +169 2 training_loop """lcwa""" +169 2 evaluator """rankbased""" +169 3 dataset """kinships""" +169 3 model """distmult""" +169 3 loss """softplus""" +169 3 regularizer """no""" +169 3 optimizer """adadelta""" +169 3 training_loop """lcwa""" +169 3 evaluator """rankbased""" +169 4 dataset """kinships""" +169 4 model """distmult""" +169 4 loss """softplus""" +169 4 regularizer """no""" +169 4 optimizer """adadelta""" +169 4 training_loop """lcwa""" +169 4 evaluator """rankbased""" +169 5 dataset """kinships""" +169 5 model """distmult""" +169 5 loss """softplus""" +169 5 regularizer """no""" +169 5 optimizer """adadelta""" +169 5 training_loop """lcwa""" +169 5 evaluator """rankbased""" +169 6 dataset """kinships""" +169 6 model """distmult""" +169 6 loss """softplus""" +169 6 regularizer """no""" +169 6 optimizer """adadelta""" +169 6 training_loop """lcwa""" +169 6 evaluator """rankbased""" +169 7 dataset """kinships""" +169 7 model """distmult""" +169 7 loss """softplus""" +169 7 regularizer """no""" +169 7 optimizer """adadelta""" +169 7 training_loop """lcwa""" +169 7 evaluator """rankbased""" +169 8 dataset """kinships""" +169 8 model """distmult""" +169 8 loss """softplus""" +169 8 regularizer """no""" +169 8 optimizer """adadelta""" +169 8 training_loop """lcwa""" +169 8 evaluator """rankbased""" +169 9 dataset """kinships""" +169 9 model """distmult""" +169 9 loss """softplus""" +169 9 regularizer """no""" +169 9 optimizer """adadelta""" +169 9 training_loop """lcwa""" +169 9 evaluator """rankbased""" +169 10 dataset """kinships""" +169 10 model """distmult""" +169 10 loss """softplus""" +169 10 regularizer """no""" +169 10 optimizer """adadelta""" +169 10 training_loop """lcwa""" +169 10 evaluator """rankbased""" +169 11 dataset """kinships""" +169 11 model """distmult""" +169 11 loss """softplus""" +169 11 regularizer """no""" +169 11 optimizer """adadelta""" +169 11 training_loop """lcwa""" +169 11 evaluator """rankbased""" +169 12 dataset """kinships""" +169 12 model """distmult""" +169 12 loss """softplus""" +169 12 regularizer """no""" +169 12 optimizer """adadelta""" +169 12 training_loop """lcwa""" +169 12 evaluator """rankbased""" +169 13 dataset """kinships""" +169 13 model """distmult""" +169 13 loss """softplus""" +169 13 regularizer """no""" +169 13 optimizer """adadelta""" +169 13 training_loop """lcwa""" +169 13 evaluator """rankbased""" +169 14 dataset """kinships""" +169 14 model """distmult""" +169 14 loss """softplus""" +169 14 regularizer """no""" +169 14 optimizer """adadelta""" +169 14 training_loop """lcwa""" +169 14 evaluator """rankbased""" +169 15 dataset """kinships""" +169 15 model """distmult""" +169 15 loss """softplus""" +169 15 regularizer """no""" +169 15 optimizer """adadelta""" +169 15 training_loop """lcwa""" +169 15 evaluator """rankbased""" +169 16 dataset """kinships""" +169 16 model """distmult""" +169 16 loss """softplus""" +169 16 regularizer """no""" +169 16 optimizer """adadelta""" +169 16 training_loop """lcwa""" +169 16 evaluator """rankbased""" +169 17 dataset """kinships""" +169 17 model """distmult""" +169 17 loss """softplus""" +169 17 regularizer """no""" +169 17 optimizer """adadelta""" +169 17 training_loop """lcwa""" +169 17 evaluator """rankbased""" +169 18 dataset """kinships""" +169 18 model """distmult""" +169 18 loss """softplus""" +169 18 regularizer """no""" +169 18 optimizer """adadelta""" +169 18 training_loop """lcwa""" +169 18 evaluator """rankbased""" +169 19 dataset """kinships""" +169 19 model """distmult""" +169 19 loss """softplus""" +169 19 regularizer """no""" +169 19 optimizer """adadelta""" +169 19 training_loop """lcwa""" +169 19 evaluator """rankbased""" +169 20 dataset """kinships""" +169 20 model """distmult""" +169 20 loss """softplus""" +169 20 regularizer """no""" +169 20 optimizer """adadelta""" +169 20 training_loop """lcwa""" +169 20 evaluator """rankbased""" +169 21 dataset """kinships""" +169 21 model """distmult""" +169 21 loss """softplus""" +169 21 regularizer """no""" +169 21 optimizer """adadelta""" +169 21 training_loop """lcwa""" +169 21 evaluator """rankbased""" +169 22 dataset """kinships""" +169 22 model """distmult""" +169 22 loss """softplus""" +169 22 regularizer """no""" +169 22 optimizer """adadelta""" +169 22 training_loop """lcwa""" +169 22 evaluator """rankbased""" +169 23 dataset """kinships""" +169 23 model """distmult""" +169 23 loss """softplus""" +169 23 regularizer """no""" +169 23 optimizer """adadelta""" +169 23 training_loop """lcwa""" +169 23 evaluator """rankbased""" +169 24 dataset """kinships""" +169 24 model """distmult""" +169 24 loss """softplus""" +169 24 regularizer """no""" +169 24 optimizer """adadelta""" +169 24 training_loop """lcwa""" +169 24 evaluator """rankbased""" +169 25 dataset """kinships""" +169 25 model """distmult""" +169 25 loss """softplus""" +169 25 regularizer """no""" +169 25 optimizer """adadelta""" +169 25 training_loop """lcwa""" +169 25 evaluator """rankbased""" +169 26 dataset """kinships""" +169 26 model """distmult""" +169 26 loss """softplus""" +169 26 regularizer """no""" +169 26 optimizer """adadelta""" +169 26 training_loop """lcwa""" +169 26 evaluator """rankbased""" +169 27 dataset """kinships""" +169 27 model """distmult""" +169 27 loss """softplus""" +169 27 regularizer """no""" +169 27 optimizer """adadelta""" +169 27 training_loop """lcwa""" +169 27 evaluator """rankbased""" +169 28 dataset """kinships""" +169 28 model """distmult""" +169 28 loss """softplus""" +169 28 regularizer """no""" +169 28 optimizer """adadelta""" +169 28 training_loop """lcwa""" +169 28 evaluator """rankbased""" +169 29 dataset """kinships""" +169 29 model """distmult""" +169 29 loss """softplus""" +169 29 regularizer """no""" +169 29 optimizer """adadelta""" +169 29 training_loop """lcwa""" +169 29 evaluator """rankbased""" +169 30 dataset """kinships""" +169 30 model """distmult""" +169 30 loss """softplus""" +169 30 regularizer """no""" +169 30 optimizer """adadelta""" +169 30 training_loop """lcwa""" +169 30 evaluator """rankbased""" +169 31 dataset """kinships""" +169 31 model """distmult""" +169 31 loss """softplus""" +169 31 regularizer """no""" +169 31 optimizer """adadelta""" +169 31 training_loop """lcwa""" +169 31 evaluator """rankbased""" +169 32 dataset """kinships""" +169 32 model """distmult""" +169 32 loss """softplus""" +169 32 regularizer """no""" +169 32 optimizer """adadelta""" +169 32 training_loop """lcwa""" +169 32 evaluator """rankbased""" +169 33 dataset """kinships""" +169 33 model """distmult""" +169 33 loss """softplus""" +169 33 regularizer """no""" +169 33 optimizer """adadelta""" +169 33 training_loop """lcwa""" +169 33 evaluator """rankbased""" +169 34 dataset """kinships""" +169 34 model """distmult""" +169 34 loss """softplus""" +169 34 regularizer """no""" +169 34 optimizer """adadelta""" +169 34 training_loop """lcwa""" +169 34 evaluator """rankbased""" +169 35 dataset """kinships""" +169 35 model """distmult""" +169 35 loss """softplus""" +169 35 regularizer """no""" +169 35 optimizer """adadelta""" +169 35 training_loop """lcwa""" +169 35 evaluator """rankbased""" +169 36 dataset """kinships""" +169 36 model """distmult""" +169 36 loss """softplus""" +169 36 regularizer """no""" +169 36 optimizer """adadelta""" +169 36 training_loop """lcwa""" +169 36 evaluator """rankbased""" +169 37 dataset """kinships""" +169 37 model """distmult""" +169 37 loss """softplus""" +169 37 regularizer """no""" +169 37 optimizer """adadelta""" +169 37 training_loop """lcwa""" +169 37 evaluator """rankbased""" +169 38 dataset """kinships""" +169 38 model """distmult""" +169 38 loss """softplus""" +169 38 regularizer """no""" +169 38 optimizer """adadelta""" +169 38 training_loop """lcwa""" +169 38 evaluator """rankbased""" +169 39 dataset """kinships""" +169 39 model """distmult""" +169 39 loss """softplus""" +169 39 regularizer """no""" +169 39 optimizer """adadelta""" +169 39 training_loop """lcwa""" +169 39 evaluator """rankbased""" +169 40 dataset """kinships""" +169 40 model """distmult""" +169 40 loss """softplus""" +169 40 regularizer """no""" +169 40 optimizer """adadelta""" +169 40 training_loop """lcwa""" +169 40 evaluator """rankbased""" +169 41 dataset """kinships""" +169 41 model """distmult""" +169 41 loss """softplus""" +169 41 regularizer """no""" +169 41 optimizer """adadelta""" +169 41 training_loop """lcwa""" +169 41 evaluator """rankbased""" +169 42 dataset """kinships""" +169 42 model """distmult""" +169 42 loss """softplus""" +169 42 regularizer """no""" +169 42 optimizer """adadelta""" +169 42 training_loop """lcwa""" +169 42 evaluator """rankbased""" +169 43 dataset """kinships""" +169 43 model """distmult""" +169 43 loss """softplus""" +169 43 regularizer """no""" +169 43 optimizer """adadelta""" +169 43 training_loop """lcwa""" +169 43 evaluator """rankbased""" +169 44 dataset """kinships""" +169 44 model """distmult""" +169 44 loss """softplus""" +169 44 regularizer """no""" +169 44 optimizer """adadelta""" +169 44 training_loop """lcwa""" +169 44 evaluator """rankbased""" +169 45 dataset """kinships""" +169 45 model """distmult""" +169 45 loss """softplus""" +169 45 regularizer """no""" +169 45 optimizer """adadelta""" +169 45 training_loop """lcwa""" +169 45 evaluator """rankbased""" +169 46 dataset """kinships""" +169 46 model """distmult""" +169 46 loss """softplus""" +169 46 regularizer """no""" +169 46 optimizer """adadelta""" +169 46 training_loop """lcwa""" +169 46 evaluator """rankbased""" +169 47 dataset """kinships""" +169 47 model """distmult""" +169 47 loss """softplus""" +169 47 regularizer """no""" +169 47 optimizer """adadelta""" +169 47 training_loop """lcwa""" +169 47 evaluator """rankbased""" +169 48 dataset """kinships""" +169 48 model """distmult""" +169 48 loss """softplus""" +169 48 regularizer """no""" +169 48 optimizer """adadelta""" +169 48 training_loop """lcwa""" +169 48 evaluator """rankbased""" +169 49 dataset """kinships""" +169 49 model """distmult""" +169 49 loss """softplus""" +169 49 regularizer """no""" +169 49 optimizer """adadelta""" +169 49 training_loop """lcwa""" +169 49 evaluator """rankbased""" +169 50 dataset """kinships""" +169 50 model """distmult""" +169 50 loss """softplus""" +169 50 regularizer """no""" +169 50 optimizer """adadelta""" +169 50 training_loop """lcwa""" +169 50 evaluator """rankbased""" +169 51 dataset """kinships""" +169 51 model """distmult""" +169 51 loss """softplus""" +169 51 regularizer """no""" +169 51 optimizer """adadelta""" +169 51 training_loop """lcwa""" +169 51 evaluator """rankbased""" +169 52 dataset """kinships""" +169 52 model """distmult""" +169 52 loss """softplus""" +169 52 regularizer """no""" +169 52 optimizer """adadelta""" +169 52 training_loop """lcwa""" +169 52 evaluator """rankbased""" +169 53 dataset """kinships""" +169 53 model """distmult""" +169 53 loss """softplus""" +169 53 regularizer """no""" +169 53 optimizer """adadelta""" +169 53 training_loop """lcwa""" +169 53 evaluator """rankbased""" +169 54 dataset """kinships""" +169 54 model """distmult""" +169 54 loss """softplus""" +169 54 regularizer """no""" +169 54 optimizer """adadelta""" +169 54 training_loop """lcwa""" +169 54 evaluator """rankbased""" +169 55 dataset """kinships""" +169 55 model """distmult""" +169 55 loss """softplus""" +169 55 regularizer """no""" +169 55 optimizer """adadelta""" +169 55 training_loop """lcwa""" +169 55 evaluator """rankbased""" +169 56 dataset """kinships""" +169 56 model """distmult""" +169 56 loss """softplus""" +169 56 regularizer """no""" +169 56 optimizer """adadelta""" +169 56 training_loop """lcwa""" +169 56 evaluator """rankbased""" +169 57 dataset """kinships""" +169 57 model """distmult""" +169 57 loss """softplus""" +169 57 regularizer """no""" +169 57 optimizer """adadelta""" +169 57 training_loop """lcwa""" +169 57 evaluator """rankbased""" +169 58 dataset """kinships""" +169 58 model """distmult""" +169 58 loss """softplus""" +169 58 regularizer """no""" +169 58 optimizer """adadelta""" +169 58 training_loop """lcwa""" +169 58 evaluator """rankbased""" +169 59 dataset """kinships""" +169 59 model """distmult""" +169 59 loss """softplus""" +169 59 regularizer """no""" +169 59 optimizer """adadelta""" +169 59 training_loop """lcwa""" +169 59 evaluator """rankbased""" +169 60 dataset """kinships""" +169 60 model """distmult""" +169 60 loss """softplus""" +169 60 regularizer """no""" +169 60 optimizer """adadelta""" +169 60 training_loop """lcwa""" +169 60 evaluator """rankbased""" +169 61 dataset """kinships""" +169 61 model """distmult""" +169 61 loss """softplus""" +169 61 regularizer """no""" +169 61 optimizer """adadelta""" +169 61 training_loop """lcwa""" +169 61 evaluator """rankbased""" +169 62 dataset """kinships""" +169 62 model """distmult""" +169 62 loss """softplus""" +169 62 regularizer """no""" +169 62 optimizer """adadelta""" +169 62 training_loop """lcwa""" +169 62 evaluator """rankbased""" +169 63 dataset """kinships""" +169 63 model """distmult""" +169 63 loss """softplus""" +169 63 regularizer """no""" +169 63 optimizer """adadelta""" +169 63 training_loop """lcwa""" +169 63 evaluator """rankbased""" +169 64 dataset """kinships""" +169 64 model """distmult""" +169 64 loss """softplus""" +169 64 regularizer """no""" +169 64 optimizer """adadelta""" +169 64 training_loop """lcwa""" +169 64 evaluator """rankbased""" +169 65 dataset """kinships""" +169 65 model """distmult""" +169 65 loss """softplus""" +169 65 regularizer """no""" +169 65 optimizer """adadelta""" +169 65 training_loop """lcwa""" +169 65 evaluator """rankbased""" +169 66 dataset """kinships""" +169 66 model """distmult""" +169 66 loss """softplus""" +169 66 regularizer """no""" +169 66 optimizer """adadelta""" +169 66 training_loop """lcwa""" +169 66 evaluator """rankbased""" +169 67 dataset """kinships""" +169 67 model """distmult""" +169 67 loss """softplus""" +169 67 regularizer """no""" +169 67 optimizer """adadelta""" +169 67 training_loop """lcwa""" +169 67 evaluator """rankbased""" +169 68 dataset """kinships""" +169 68 model """distmult""" +169 68 loss """softplus""" +169 68 regularizer """no""" +169 68 optimizer """adadelta""" +169 68 training_loop """lcwa""" +169 68 evaluator """rankbased""" +169 69 dataset """kinships""" +169 69 model """distmult""" +169 69 loss """softplus""" +169 69 regularizer """no""" +169 69 optimizer """adadelta""" +169 69 training_loop """lcwa""" +169 69 evaluator """rankbased""" +169 70 dataset """kinships""" +169 70 model """distmult""" +169 70 loss """softplus""" +169 70 regularizer """no""" +169 70 optimizer """adadelta""" +169 70 training_loop """lcwa""" +169 70 evaluator """rankbased""" +169 71 dataset """kinships""" +169 71 model """distmult""" +169 71 loss """softplus""" +169 71 regularizer """no""" +169 71 optimizer """adadelta""" +169 71 training_loop """lcwa""" +169 71 evaluator """rankbased""" +169 72 dataset """kinships""" +169 72 model """distmult""" +169 72 loss """softplus""" +169 72 regularizer """no""" +169 72 optimizer """adadelta""" +169 72 training_loop """lcwa""" +169 72 evaluator """rankbased""" +169 73 dataset """kinships""" +169 73 model """distmult""" +169 73 loss """softplus""" +169 73 regularizer """no""" +169 73 optimizer """adadelta""" +169 73 training_loop """lcwa""" +169 73 evaluator """rankbased""" +169 74 dataset """kinships""" +169 74 model """distmult""" +169 74 loss """softplus""" +169 74 regularizer """no""" +169 74 optimizer """adadelta""" +169 74 training_loop """lcwa""" +169 74 evaluator """rankbased""" +169 75 dataset """kinships""" +169 75 model """distmult""" +169 75 loss """softplus""" +169 75 regularizer """no""" +169 75 optimizer """adadelta""" +169 75 training_loop """lcwa""" +169 75 evaluator """rankbased""" +169 76 dataset """kinships""" +169 76 model """distmult""" +169 76 loss """softplus""" +169 76 regularizer """no""" +169 76 optimizer """adadelta""" +169 76 training_loop """lcwa""" +169 76 evaluator """rankbased""" +169 77 dataset """kinships""" +169 77 model """distmult""" +169 77 loss """softplus""" +169 77 regularizer """no""" +169 77 optimizer """adadelta""" +169 77 training_loop """lcwa""" +169 77 evaluator """rankbased""" +169 78 dataset """kinships""" +169 78 model """distmult""" +169 78 loss """softplus""" +169 78 regularizer """no""" +169 78 optimizer """adadelta""" +169 78 training_loop """lcwa""" +169 78 evaluator """rankbased""" +169 79 dataset """kinships""" +169 79 model """distmult""" +169 79 loss """softplus""" +169 79 regularizer """no""" +169 79 optimizer """adadelta""" +169 79 training_loop """lcwa""" +169 79 evaluator """rankbased""" +169 80 dataset """kinships""" +169 80 model """distmult""" +169 80 loss """softplus""" +169 80 regularizer """no""" +169 80 optimizer """adadelta""" +169 80 training_loop """lcwa""" +169 80 evaluator """rankbased""" +169 81 dataset """kinships""" +169 81 model """distmult""" +169 81 loss """softplus""" +169 81 regularizer """no""" +169 81 optimizer """adadelta""" +169 81 training_loop """lcwa""" +169 81 evaluator """rankbased""" +169 82 dataset """kinships""" +169 82 model """distmult""" +169 82 loss """softplus""" +169 82 regularizer """no""" +169 82 optimizer """adadelta""" +169 82 training_loop """lcwa""" +169 82 evaluator """rankbased""" +169 83 dataset """kinships""" +169 83 model """distmult""" +169 83 loss """softplus""" +169 83 regularizer """no""" +169 83 optimizer """adadelta""" +169 83 training_loop """lcwa""" +169 83 evaluator """rankbased""" +169 84 dataset """kinships""" +169 84 model """distmult""" +169 84 loss """softplus""" +169 84 regularizer """no""" +169 84 optimizer """adadelta""" +169 84 training_loop """lcwa""" +169 84 evaluator """rankbased""" +169 85 dataset """kinships""" +169 85 model """distmult""" +169 85 loss """softplus""" +169 85 regularizer """no""" +169 85 optimizer """adadelta""" +169 85 training_loop """lcwa""" +169 85 evaluator """rankbased""" +169 86 dataset """kinships""" +169 86 model """distmult""" +169 86 loss """softplus""" +169 86 regularizer """no""" +169 86 optimizer """adadelta""" +169 86 training_loop """lcwa""" +169 86 evaluator """rankbased""" +169 87 dataset """kinships""" +169 87 model """distmult""" +169 87 loss """softplus""" +169 87 regularizer """no""" +169 87 optimizer """adadelta""" +169 87 training_loop """lcwa""" +169 87 evaluator """rankbased""" +169 88 dataset """kinships""" +169 88 model """distmult""" +169 88 loss """softplus""" +169 88 regularizer """no""" +169 88 optimizer """adadelta""" +169 88 training_loop """lcwa""" +169 88 evaluator """rankbased""" +169 89 dataset """kinships""" +169 89 model """distmult""" +169 89 loss """softplus""" +169 89 regularizer """no""" +169 89 optimizer """adadelta""" +169 89 training_loop """lcwa""" +169 89 evaluator """rankbased""" +169 90 dataset """kinships""" +169 90 model """distmult""" +169 90 loss """softplus""" +169 90 regularizer """no""" +169 90 optimizer """adadelta""" +169 90 training_loop """lcwa""" +169 90 evaluator """rankbased""" +169 91 dataset """kinships""" +169 91 model """distmult""" +169 91 loss """softplus""" +169 91 regularizer """no""" +169 91 optimizer """adadelta""" +169 91 training_loop """lcwa""" +169 91 evaluator """rankbased""" +169 92 dataset """kinships""" +169 92 model """distmult""" +169 92 loss """softplus""" +169 92 regularizer """no""" +169 92 optimizer """adadelta""" +169 92 training_loop """lcwa""" +169 92 evaluator """rankbased""" +169 93 dataset """kinships""" +169 93 model """distmult""" +169 93 loss """softplus""" +169 93 regularizer """no""" +169 93 optimizer """adadelta""" +169 93 training_loop """lcwa""" +169 93 evaluator """rankbased""" +169 94 dataset """kinships""" +169 94 model """distmult""" +169 94 loss """softplus""" +169 94 regularizer """no""" +169 94 optimizer """adadelta""" +169 94 training_loop """lcwa""" +169 94 evaluator """rankbased""" +169 95 dataset """kinships""" +169 95 model """distmult""" +169 95 loss """softplus""" +169 95 regularizer """no""" +169 95 optimizer """adadelta""" +169 95 training_loop """lcwa""" +169 95 evaluator """rankbased""" +169 96 dataset """kinships""" +169 96 model """distmult""" +169 96 loss """softplus""" +169 96 regularizer """no""" +169 96 optimizer """adadelta""" +169 96 training_loop """lcwa""" +169 96 evaluator """rankbased""" +169 97 dataset """kinships""" +169 97 model """distmult""" +169 97 loss """softplus""" +169 97 regularizer """no""" +169 97 optimizer """adadelta""" +169 97 training_loop """lcwa""" +169 97 evaluator """rankbased""" +169 98 dataset """kinships""" +169 98 model """distmult""" +169 98 loss """softplus""" +169 98 regularizer """no""" +169 98 optimizer """adadelta""" +169 98 training_loop """lcwa""" +169 98 evaluator """rankbased""" +169 99 dataset """kinships""" +169 99 model """distmult""" +169 99 loss """softplus""" +169 99 regularizer """no""" +169 99 optimizer """adadelta""" +169 99 training_loop """lcwa""" +169 99 evaluator """rankbased""" +169 100 dataset """kinships""" +169 100 model """distmult""" +169 100 loss """softplus""" +169 100 regularizer """no""" +169 100 optimizer """adadelta""" +169 100 training_loop """lcwa""" +169 100 evaluator """rankbased""" +170 1 model.embedding_dim 1.0 +170 1 training.batch_size 2.0 +170 1 training.label_smoothing 0.04887406391419205 +170 2 model.embedding_dim 0.0 +170 2 training.batch_size 0.0 +170 2 training.label_smoothing 0.004705996183515444 +170 3 model.embedding_dim 2.0 +170 3 training.batch_size 0.0 +170 3 training.label_smoothing 0.008428730265301748 +170 4 model.embedding_dim 2.0 +170 4 training.batch_size 0.0 +170 4 training.label_smoothing 0.05342950551414689 +170 5 model.embedding_dim 2.0 +170 5 training.batch_size 2.0 +170 5 training.label_smoothing 0.0012769564586503964 +170 6 model.embedding_dim 0.0 +170 6 training.batch_size 2.0 +170 6 training.label_smoothing 0.5845014635414443 +170 7 model.embedding_dim 1.0 +170 7 training.batch_size 2.0 +170 7 training.label_smoothing 0.04889430521960502 +170 8 model.embedding_dim 1.0 +170 8 training.batch_size 2.0 +170 8 training.label_smoothing 0.46442422631721825 +170 9 model.embedding_dim 0.0 +170 9 training.batch_size 2.0 +170 9 training.label_smoothing 0.9224920864165712 +170 10 model.embedding_dim 0.0 +170 10 training.batch_size 2.0 +170 10 training.label_smoothing 0.03160511726182901 +170 11 model.embedding_dim 0.0 +170 11 training.batch_size 0.0 +170 11 training.label_smoothing 0.0031043405411267846 +170 12 model.embedding_dim 2.0 +170 12 training.batch_size 1.0 +170 12 training.label_smoothing 0.054394250040166174 +170 13 model.embedding_dim 2.0 +170 13 training.batch_size 2.0 +170 13 training.label_smoothing 0.0021091754139442222 +170 14 model.embedding_dim 2.0 +170 14 training.batch_size 0.0 +170 14 training.label_smoothing 0.07090941877988514 +170 15 model.embedding_dim 2.0 +170 15 training.batch_size 2.0 +170 15 training.label_smoothing 0.011062037560604702 +170 16 model.embedding_dim 2.0 +170 16 training.batch_size 0.0 +170 16 training.label_smoothing 0.02835121147286618 +170 17 model.embedding_dim 0.0 +170 17 training.batch_size 0.0 +170 17 training.label_smoothing 0.002336115670563543 +170 18 model.embedding_dim 2.0 +170 18 training.batch_size 2.0 +170 18 training.label_smoothing 0.18994588658899442 +170 19 model.embedding_dim 1.0 +170 19 training.batch_size 1.0 +170 19 training.label_smoothing 0.005077248512966371 +170 20 model.embedding_dim 2.0 +170 20 training.batch_size 2.0 +170 20 training.label_smoothing 0.003706188714036446 +170 21 model.embedding_dim 2.0 +170 21 training.batch_size 2.0 +170 21 training.label_smoothing 0.022922701445373308 +170 22 model.embedding_dim 0.0 +170 22 training.batch_size 1.0 +170 22 training.label_smoothing 0.13147761667383495 +170 23 model.embedding_dim 2.0 +170 23 training.batch_size 1.0 +170 23 training.label_smoothing 0.2396265203522649 +170 24 model.embedding_dim 1.0 +170 24 training.batch_size 2.0 +170 24 training.label_smoothing 0.01837579211780593 +170 25 model.embedding_dim 0.0 +170 25 training.batch_size 2.0 +170 25 training.label_smoothing 0.2254058911729821 +170 26 model.embedding_dim 0.0 +170 26 training.batch_size 0.0 +170 26 training.label_smoothing 0.01174187715426265 +170 27 model.embedding_dim 2.0 +170 27 training.batch_size 0.0 +170 27 training.label_smoothing 0.018240179266381964 +170 28 model.embedding_dim 0.0 +170 28 training.batch_size 1.0 +170 28 training.label_smoothing 0.006519040430437901 +170 29 model.embedding_dim 0.0 +170 29 training.batch_size 0.0 +170 29 training.label_smoothing 0.037916259832551626 +170 30 model.embedding_dim 1.0 +170 30 training.batch_size 1.0 +170 30 training.label_smoothing 0.4224012790194171 +170 31 model.embedding_dim 0.0 +170 31 training.batch_size 2.0 +170 31 training.label_smoothing 0.003822438056127545 +170 32 model.embedding_dim 0.0 +170 32 training.batch_size 0.0 +170 32 training.label_smoothing 0.028173416345745366 +170 33 model.embedding_dim 0.0 +170 33 training.batch_size 0.0 +170 33 training.label_smoothing 0.1414975841262001 +170 34 model.embedding_dim 2.0 +170 34 training.batch_size 1.0 +170 34 training.label_smoothing 0.04439734844685627 +170 35 model.embedding_dim 0.0 +170 35 training.batch_size 1.0 +170 35 training.label_smoothing 0.011901741632992398 +170 36 model.embedding_dim 2.0 +170 36 training.batch_size 1.0 +170 36 training.label_smoothing 0.002032684126127578 +170 37 model.embedding_dim 1.0 +170 37 training.batch_size 2.0 +170 37 training.label_smoothing 0.407092021055883 +170 38 model.embedding_dim 1.0 +170 38 training.batch_size 2.0 +170 38 training.label_smoothing 0.0010333403314891352 +170 39 model.embedding_dim 1.0 +170 39 training.batch_size 2.0 +170 39 training.label_smoothing 0.021510199428313417 +170 40 model.embedding_dim 1.0 +170 40 training.batch_size 0.0 +170 40 training.label_smoothing 0.014618611420963531 +170 41 model.embedding_dim 2.0 +170 41 training.batch_size 2.0 +170 41 training.label_smoothing 0.005581962286688614 +170 42 model.embedding_dim 0.0 +170 42 training.batch_size 0.0 +170 42 training.label_smoothing 0.7140570593292386 +170 43 model.embedding_dim 0.0 +170 43 training.batch_size 0.0 +170 43 training.label_smoothing 0.0013207852803999679 +170 44 model.embedding_dim 2.0 +170 44 training.batch_size 0.0 +170 44 training.label_smoothing 0.005889120683116323 +170 45 model.embedding_dim 2.0 +170 45 training.batch_size 0.0 +170 45 training.label_smoothing 0.04979660141290751 +170 46 model.embedding_dim 2.0 +170 46 training.batch_size 0.0 +170 46 training.label_smoothing 0.05037032751721646 +170 47 model.embedding_dim 2.0 +170 47 training.batch_size 2.0 +170 47 training.label_smoothing 0.003835783046363002 +170 48 model.embedding_dim 1.0 +170 48 training.batch_size 0.0 +170 48 training.label_smoothing 0.0021957189540926544 +170 49 model.embedding_dim 1.0 +170 49 training.batch_size 0.0 +170 49 training.label_smoothing 0.20387198626046807 +170 50 model.embedding_dim 2.0 +170 50 training.batch_size 0.0 +170 50 training.label_smoothing 0.015084794311018387 +170 51 model.embedding_dim 2.0 +170 51 training.batch_size 2.0 +170 51 training.label_smoothing 0.9168271949616723 +170 52 model.embedding_dim 1.0 +170 52 training.batch_size 1.0 +170 52 training.label_smoothing 0.023364647138368066 +170 53 model.embedding_dim 0.0 +170 53 training.batch_size 0.0 +170 53 training.label_smoothing 0.00198332747685844 +170 54 model.embedding_dim 2.0 +170 54 training.batch_size 2.0 +170 54 training.label_smoothing 0.40492800139267227 +170 55 model.embedding_dim 0.0 +170 55 training.batch_size 1.0 +170 55 training.label_smoothing 0.002218697973956964 +170 56 model.embedding_dim 0.0 +170 56 training.batch_size 1.0 +170 56 training.label_smoothing 0.018261012253968572 +170 57 model.embedding_dim 1.0 +170 57 training.batch_size 1.0 +170 57 training.label_smoothing 0.006312527165921738 +170 58 model.embedding_dim 1.0 +170 58 training.batch_size 0.0 +170 58 training.label_smoothing 0.002059290428075944 +170 59 model.embedding_dim 0.0 +170 59 training.batch_size 1.0 +170 59 training.label_smoothing 0.0030467198808392082 +170 60 model.embedding_dim 0.0 +170 60 training.batch_size 0.0 +170 60 training.label_smoothing 0.7210992531334723 +170 61 model.embedding_dim 0.0 +170 61 training.batch_size 2.0 +170 61 training.label_smoothing 0.2525704317147518 +170 62 model.embedding_dim 2.0 +170 62 training.batch_size 0.0 +170 62 training.label_smoothing 0.0721844184268986 +170 63 model.embedding_dim 1.0 +170 63 training.batch_size 2.0 +170 63 training.label_smoothing 0.02200286960043966 +170 64 model.embedding_dim 0.0 +170 64 training.batch_size 0.0 +170 64 training.label_smoothing 0.003165611359636183 +170 65 model.embedding_dim 1.0 +170 65 training.batch_size 1.0 +170 65 training.label_smoothing 0.2122462709733448 +170 66 model.embedding_dim 1.0 +170 66 training.batch_size 2.0 +170 66 training.label_smoothing 0.15512940608967152 +170 67 model.embedding_dim 1.0 +170 67 training.batch_size 1.0 +170 67 training.label_smoothing 0.011639668909048724 +170 68 model.embedding_dim 2.0 +170 68 training.batch_size 0.0 +170 68 training.label_smoothing 0.01969108440011917 +170 69 model.embedding_dim 1.0 +170 69 training.batch_size 0.0 +170 69 training.label_smoothing 0.0828960641261862 +170 70 model.embedding_dim 1.0 +170 70 training.batch_size 0.0 +170 70 training.label_smoothing 0.006404945607494583 +170 71 model.embedding_dim 1.0 +170 71 training.batch_size 1.0 +170 71 training.label_smoothing 0.3382854957831387 +170 72 model.embedding_dim 1.0 +170 72 training.batch_size 1.0 +170 72 training.label_smoothing 0.03536671786611278 +170 73 model.embedding_dim 2.0 +170 73 training.batch_size 0.0 +170 73 training.label_smoothing 0.034850441737798644 +170 74 model.embedding_dim 1.0 +170 74 training.batch_size 1.0 +170 74 training.label_smoothing 0.10695129514236724 +170 75 model.embedding_dim 2.0 +170 75 training.batch_size 2.0 +170 75 training.label_smoothing 0.06858275690664432 +170 76 model.embedding_dim 2.0 +170 76 training.batch_size 1.0 +170 76 training.label_smoothing 0.6783571724686044 +170 77 model.embedding_dim 2.0 +170 77 training.batch_size 0.0 +170 77 training.label_smoothing 0.8855312070391109 +170 78 model.embedding_dim 2.0 +170 78 training.batch_size 1.0 +170 78 training.label_smoothing 0.19177768758976144 +170 79 model.embedding_dim 0.0 +170 79 training.batch_size 2.0 +170 79 training.label_smoothing 0.07123441877906068 +170 80 model.embedding_dim 0.0 +170 80 training.batch_size 1.0 +170 80 training.label_smoothing 0.00546981734191985 +170 81 model.embedding_dim 2.0 +170 81 training.batch_size 0.0 +170 81 training.label_smoothing 0.006441899459572282 +170 82 model.embedding_dim 2.0 +170 82 training.batch_size 2.0 +170 82 training.label_smoothing 0.0023803174221114195 +170 83 model.embedding_dim 0.0 +170 83 training.batch_size 0.0 +170 83 training.label_smoothing 0.27608721949818804 +170 84 model.embedding_dim 2.0 +170 84 training.batch_size 2.0 +170 84 training.label_smoothing 0.0030109748263762283 +170 85 model.embedding_dim 1.0 +170 85 training.batch_size 2.0 +170 85 training.label_smoothing 0.0017068256735286705 +170 86 model.embedding_dim 1.0 +170 86 training.batch_size 1.0 +170 86 training.label_smoothing 0.4075533193834827 +170 87 model.embedding_dim 1.0 +170 87 training.batch_size 1.0 +170 87 training.label_smoothing 0.2845438092489412 +170 88 model.embedding_dim 2.0 +170 88 training.batch_size 1.0 +170 88 training.label_smoothing 0.006985826480150931 +170 89 model.embedding_dim 1.0 +170 89 training.batch_size 0.0 +170 89 training.label_smoothing 0.021615043112642313 +170 90 model.embedding_dim 2.0 +170 90 training.batch_size 2.0 +170 90 training.label_smoothing 0.0040390329055448515 +170 91 model.embedding_dim 1.0 +170 91 training.batch_size 1.0 +170 91 training.label_smoothing 0.1478401690469272 +170 92 model.embedding_dim 0.0 +170 92 training.batch_size 0.0 +170 92 training.label_smoothing 0.39587801484165575 +170 93 model.embedding_dim 1.0 +170 93 training.batch_size 1.0 +170 93 training.label_smoothing 0.5675793738664647 +170 94 model.embedding_dim 2.0 +170 94 training.batch_size 2.0 +170 94 training.label_smoothing 0.0022386392701218543 +170 95 model.embedding_dim 0.0 +170 95 training.batch_size 1.0 +170 95 training.label_smoothing 0.011733806492068963 +170 96 model.embedding_dim 0.0 +170 96 training.batch_size 2.0 +170 96 training.label_smoothing 0.0018488167687642427 +170 97 model.embedding_dim 1.0 +170 97 training.batch_size 0.0 +170 97 training.label_smoothing 0.7175080488510455 +170 98 model.embedding_dim 2.0 +170 98 training.batch_size 0.0 +170 98 training.label_smoothing 0.09907784891836194 +170 99 model.embedding_dim 1.0 +170 99 training.batch_size 2.0 +170 99 training.label_smoothing 0.5670379664675638 +170 100 model.embedding_dim 0.0 +170 100 training.batch_size 2.0 +170 100 training.label_smoothing 0.0010561344581074307 +170 1 dataset """kinships""" +170 1 model """distmult""" +170 1 loss """bceaftersigmoid""" +170 1 regularizer """no""" +170 1 optimizer """adadelta""" +170 1 training_loop """lcwa""" +170 1 evaluator """rankbased""" +170 2 dataset """kinships""" +170 2 model """distmult""" +170 2 loss """bceaftersigmoid""" +170 2 regularizer """no""" +170 2 optimizer """adadelta""" +170 2 training_loop """lcwa""" +170 2 evaluator """rankbased""" +170 3 dataset """kinships""" +170 3 model """distmult""" +170 3 loss """bceaftersigmoid""" +170 3 regularizer """no""" +170 3 optimizer """adadelta""" +170 3 training_loop """lcwa""" +170 3 evaluator """rankbased""" +170 4 dataset """kinships""" +170 4 model """distmult""" +170 4 loss """bceaftersigmoid""" +170 4 regularizer """no""" +170 4 optimizer """adadelta""" +170 4 training_loop """lcwa""" +170 4 evaluator """rankbased""" +170 5 dataset """kinships""" +170 5 model """distmult""" +170 5 loss """bceaftersigmoid""" +170 5 regularizer """no""" +170 5 optimizer """adadelta""" +170 5 training_loop """lcwa""" +170 5 evaluator """rankbased""" +170 6 dataset """kinships""" +170 6 model """distmult""" +170 6 loss """bceaftersigmoid""" +170 6 regularizer """no""" +170 6 optimizer """adadelta""" +170 6 training_loop """lcwa""" +170 6 evaluator """rankbased""" +170 7 dataset """kinships""" +170 7 model """distmult""" +170 7 loss """bceaftersigmoid""" +170 7 regularizer """no""" +170 7 optimizer """adadelta""" +170 7 training_loop """lcwa""" +170 7 evaluator """rankbased""" +170 8 dataset """kinships""" +170 8 model """distmult""" +170 8 loss """bceaftersigmoid""" +170 8 regularizer """no""" +170 8 optimizer """adadelta""" +170 8 training_loop """lcwa""" +170 8 evaluator """rankbased""" +170 9 dataset """kinships""" +170 9 model """distmult""" +170 9 loss """bceaftersigmoid""" +170 9 regularizer """no""" +170 9 optimizer """adadelta""" +170 9 training_loop """lcwa""" +170 9 evaluator """rankbased""" +170 10 dataset """kinships""" +170 10 model """distmult""" +170 10 loss """bceaftersigmoid""" +170 10 regularizer """no""" +170 10 optimizer """adadelta""" +170 10 training_loop """lcwa""" +170 10 evaluator """rankbased""" +170 11 dataset """kinships""" +170 11 model """distmult""" +170 11 loss """bceaftersigmoid""" +170 11 regularizer """no""" +170 11 optimizer """adadelta""" +170 11 training_loop """lcwa""" +170 11 evaluator """rankbased""" +170 12 dataset """kinships""" +170 12 model """distmult""" +170 12 loss """bceaftersigmoid""" +170 12 regularizer """no""" +170 12 optimizer """adadelta""" +170 12 training_loop """lcwa""" +170 12 evaluator """rankbased""" +170 13 dataset """kinships""" +170 13 model """distmult""" +170 13 loss """bceaftersigmoid""" +170 13 regularizer """no""" +170 13 optimizer """adadelta""" +170 13 training_loop """lcwa""" +170 13 evaluator """rankbased""" +170 14 dataset """kinships""" +170 14 model """distmult""" +170 14 loss """bceaftersigmoid""" +170 14 regularizer """no""" +170 14 optimizer """adadelta""" +170 14 training_loop """lcwa""" +170 14 evaluator """rankbased""" +170 15 dataset """kinships""" +170 15 model """distmult""" +170 15 loss """bceaftersigmoid""" +170 15 regularizer """no""" +170 15 optimizer """adadelta""" +170 15 training_loop """lcwa""" +170 15 evaluator """rankbased""" +170 16 dataset """kinships""" +170 16 model """distmult""" +170 16 loss """bceaftersigmoid""" +170 16 regularizer """no""" +170 16 optimizer """adadelta""" +170 16 training_loop """lcwa""" +170 16 evaluator """rankbased""" +170 17 dataset """kinships""" +170 17 model """distmult""" +170 17 loss """bceaftersigmoid""" +170 17 regularizer """no""" +170 17 optimizer """adadelta""" +170 17 training_loop """lcwa""" +170 17 evaluator """rankbased""" +170 18 dataset """kinships""" +170 18 model """distmult""" +170 18 loss """bceaftersigmoid""" +170 18 regularizer """no""" +170 18 optimizer """adadelta""" +170 18 training_loop """lcwa""" +170 18 evaluator """rankbased""" +170 19 dataset """kinships""" +170 19 model """distmult""" +170 19 loss """bceaftersigmoid""" +170 19 regularizer """no""" +170 19 optimizer """adadelta""" +170 19 training_loop """lcwa""" +170 19 evaluator """rankbased""" +170 20 dataset """kinships""" +170 20 model """distmult""" +170 20 loss """bceaftersigmoid""" +170 20 regularizer """no""" +170 20 optimizer """adadelta""" +170 20 training_loop """lcwa""" +170 20 evaluator """rankbased""" +170 21 dataset """kinships""" +170 21 model """distmult""" +170 21 loss """bceaftersigmoid""" +170 21 regularizer """no""" +170 21 optimizer """adadelta""" +170 21 training_loop """lcwa""" +170 21 evaluator """rankbased""" +170 22 dataset """kinships""" +170 22 model """distmult""" +170 22 loss """bceaftersigmoid""" +170 22 regularizer """no""" +170 22 optimizer """adadelta""" +170 22 training_loop """lcwa""" +170 22 evaluator """rankbased""" +170 23 dataset """kinships""" +170 23 model """distmult""" +170 23 loss """bceaftersigmoid""" +170 23 regularizer """no""" +170 23 optimizer """adadelta""" +170 23 training_loop """lcwa""" +170 23 evaluator """rankbased""" +170 24 dataset """kinships""" +170 24 model """distmult""" +170 24 loss """bceaftersigmoid""" +170 24 regularizer """no""" +170 24 optimizer """adadelta""" +170 24 training_loop """lcwa""" +170 24 evaluator """rankbased""" +170 25 dataset """kinships""" +170 25 model """distmult""" +170 25 loss """bceaftersigmoid""" +170 25 regularizer """no""" +170 25 optimizer """adadelta""" +170 25 training_loop """lcwa""" +170 25 evaluator """rankbased""" +170 26 dataset """kinships""" +170 26 model """distmult""" +170 26 loss """bceaftersigmoid""" +170 26 regularizer """no""" +170 26 optimizer """adadelta""" +170 26 training_loop """lcwa""" +170 26 evaluator """rankbased""" +170 27 dataset """kinships""" +170 27 model """distmult""" +170 27 loss """bceaftersigmoid""" +170 27 regularizer """no""" +170 27 optimizer """adadelta""" +170 27 training_loop """lcwa""" +170 27 evaluator """rankbased""" +170 28 dataset """kinships""" +170 28 model """distmult""" +170 28 loss """bceaftersigmoid""" +170 28 regularizer """no""" +170 28 optimizer """adadelta""" +170 28 training_loop """lcwa""" +170 28 evaluator """rankbased""" +170 29 dataset """kinships""" +170 29 model """distmult""" +170 29 loss """bceaftersigmoid""" +170 29 regularizer """no""" +170 29 optimizer """adadelta""" +170 29 training_loop """lcwa""" +170 29 evaluator """rankbased""" +170 30 dataset """kinships""" +170 30 model """distmult""" +170 30 loss """bceaftersigmoid""" +170 30 regularizer """no""" +170 30 optimizer """adadelta""" +170 30 training_loop """lcwa""" +170 30 evaluator """rankbased""" +170 31 dataset """kinships""" +170 31 model """distmult""" +170 31 loss """bceaftersigmoid""" +170 31 regularizer """no""" +170 31 optimizer """adadelta""" +170 31 training_loop """lcwa""" +170 31 evaluator """rankbased""" +170 32 dataset """kinships""" +170 32 model """distmult""" +170 32 loss """bceaftersigmoid""" +170 32 regularizer """no""" +170 32 optimizer """adadelta""" +170 32 training_loop """lcwa""" +170 32 evaluator """rankbased""" +170 33 dataset """kinships""" +170 33 model """distmult""" +170 33 loss """bceaftersigmoid""" +170 33 regularizer """no""" +170 33 optimizer """adadelta""" +170 33 training_loop """lcwa""" +170 33 evaluator """rankbased""" +170 34 dataset """kinships""" +170 34 model """distmult""" +170 34 loss """bceaftersigmoid""" +170 34 regularizer """no""" +170 34 optimizer """adadelta""" +170 34 training_loop """lcwa""" +170 34 evaluator """rankbased""" +170 35 dataset """kinships""" +170 35 model """distmult""" +170 35 loss """bceaftersigmoid""" +170 35 regularizer """no""" +170 35 optimizer """adadelta""" +170 35 training_loop """lcwa""" +170 35 evaluator """rankbased""" +170 36 dataset """kinships""" +170 36 model """distmult""" +170 36 loss """bceaftersigmoid""" +170 36 regularizer """no""" +170 36 optimizer """adadelta""" +170 36 training_loop """lcwa""" +170 36 evaluator """rankbased""" +170 37 dataset """kinships""" +170 37 model """distmult""" +170 37 loss """bceaftersigmoid""" +170 37 regularizer """no""" +170 37 optimizer """adadelta""" +170 37 training_loop """lcwa""" +170 37 evaluator """rankbased""" +170 38 dataset """kinships""" +170 38 model """distmult""" +170 38 loss """bceaftersigmoid""" +170 38 regularizer """no""" +170 38 optimizer """adadelta""" +170 38 training_loop """lcwa""" +170 38 evaluator """rankbased""" +170 39 dataset """kinships""" +170 39 model """distmult""" +170 39 loss """bceaftersigmoid""" +170 39 regularizer """no""" +170 39 optimizer """adadelta""" +170 39 training_loop """lcwa""" +170 39 evaluator """rankbased""" +170 40 dataset """kinships""" +170 40 model """distmult""" +170 40 loss """bceaftersigmoid""" +170 40 regularizer """no""" +170 40 optimizer """adadelta""" +170 40 training_loop """lcwa""" +170 40 evaluator """rankbased""" +170 41 dataset """kinships""" +170 41 model """distmult""" +170 41 loss """bceaftersigmoid""" +170 41 regularizer """no""" +170 41 optimizer """adadelta""" +170 41 training_loop """lcwa""" +170 41 evaluator """rankbased""" +170 42 dataset """kinships""" +170 42 model """distmult""" +170 42 loss """bceaftersigmoid""" +170 42 regularizer """no""" +170 42 optimizer """adadelta""" +170 42 training_loop """lcwa""" +170 42 evaluator """rankbased""" +170 43 dataset """kinships""" +170 43 model """distmult""" +170 43 loss """bceaftersigmoid""" +170 43 regularizer """no""" +170 43 optimizer """adadelta""" +170 43 training_loop """lcwa""" +170 43 evaluator """rankbased""" +170 44 dataset """kinships""" +170 44 model """distmult""" +170 44 loss """bceaftersigmoid""" +170 44 regularizer """no""" +170 44 optimizer """adadelta""" +170 44 training_loop """lcwa""" +170 44 evaluator """rankbased""" +170 45 dataset """kinships""" +170 45 model """distmult""" +170 45 loss """bceaftersigmoid""" +170 45 regularizer """no""" +170 45 optimizer """adadelta""" +170 45 training_loop """lcwa""" +170 45 evaluator """rankbased""" +170 46 dataset """kinships""" +170 46 model """distmult""" +170 46 loss """bceaftersigmoid""" +170 46 regularizer """no""" +170 46 optimizer """adadelta""" +170 46 training_loop """lcwa""" +170 46 evaluator """rankbased""" +170 47 dataset """kinships""" +170 47 model """distmult""" +170 47 loss """bceaftersigmoid""" +170 47 regularizer """no""" +170 47 optimizer """adadelta""" +170 47 training_loop """lcwa""" +170 47 evaluator """rankbased""" +170 48 dataset """kinships""" +170 48 model """distmult""" +170 48 loss """bceaftersigmoid""" +170 48 regularizer """no""" +170 48 optimizer """adadelta""" +170 48 training_loop """lcwa""" +170 48 evaluator """rankbased""" +170 49 dataset """kinships""" +170 49 model """distmult""" +170 49 loss """bceaftersigmoid""" +170 49 regularizer """no""" +170 49 optimizer """adadelta""" +170 49 training_loop """lcwa""" +170 49 evaluator """rankbased""" +170 50 dataset """kinships""" +170 50 model """distmult""" +170 50 loss """bceaftersigmoid""" +170 50 regularizer """no""" +170 50 optimizer """adadelta""" +170 50 training_loop """lcwa""" +170 50 evaluator """rankbased""" +170 51 dataset """kinships""" +170 51 model """distmult""" +170 51 loss """bceaftersigmoid""" +170 51 regularizer """no""" +170 51 optimizer """adadelta""" +170 51 training_loop """lcwa""" +170 51 evaluator """rankbased""" +170 52 dataset """kinships""" +170 52 model """distmult""" +170 52 loss """bceaftersigmoid""" +170 52 regularizer """no""" +170 52 optimizer """adadelta""" +170 52 training_loop """lcwa""" +170 52 evaluator """rankbased""" +170 53 dataset """kinships""" +170 53 model """distmult""" +170 53 loss """bceaftersigmoid""" +170 53 regularizer """no""" +170 53 optimizer """adadelta""" +170 53 training_loop """lcwa""" +170 53 evaluator """rankbased""" +170 54 dataset """kinships""" +170 54 model """distmult""" +170 54 loss """bceaftersigmoid""" +170 54 regularizer """no""" +170 54 optimizer """adadelta""" +170 54 training_loop """lcwa""" +170 54 evaluator """rankbased""" +170 55 dataset """kinships""" +170 55 model """distmult""" +170 55 loss """bceaftersigmoid""" +170 55 regularizer """no""" +170 55 optimizer """adadelta""" +170 55 training_loop """lcwa""" +170 55 evaluator """rankbased""" +170 56 dataset """kinships""" +170 56 model """distmult""" +170 56 loss """bceaftersigmoid""" +170 56 regularizer """no""" +170 56 optimizer """adadelta""" +170 56 training_loop """lcwa""" +170 56 evaluator """rankbased""" +170 57 dataset """kinships""" +170 57 model """distmult""" +170 57 loss """bceaftersigmoid""" +170 57 regularizer """no""" +170 57 optimizer """adadelta""" +170 57 training_loop """lcwa""" +170 57 evaluator """rankbased""" +170 58 dataset """kinships""" +170 58 model """distmult""" +170 58 loss """bceaftersigmoid""" +170 58 regularizer """no""" +170 58 optimizer """adadelta""" +170 58 training_loop """lcwa""" +170 58 evaluator """rankbased""" +170 59 dataset """kinships""" +170 59 model """distmult""" +170 59 loss """bceaftersigmoid""" +170 59 regularizer """no""" +170 59 optimizer """adadelta""" +170 59 training_loop """lcwa""" +170 59 evaluator """rankbased""" +170 60 dataset """kinships""" +170 60 model """distmult""" +170 60 loss """bceaftersigmoid""" +170 60 regularizer """no""" +170 60 optimizer """adadelta""" +170 60 training_loop """lcwa""" +170 60 evaluator """rankbased""" +170 61 dataset """kinships""" +170 61 model """distmult""" +170 61 loss """bceaftersigmoid""" +170 61 regularizer """no""" +170 61 optimizer """adadelta""" +170 61 training_loop """lcwa""" +170 61 evaluator """rankbased""" +170 62 dataset """kinships""" +170 62 model """distmult""" +170 62 loss """bceaftersigmoid""" +170 62 regularizer """no""" +170 62 optimizer """adadelta""" +170 62 training_loop """lcwa""" +170 62 evaluator """rankbased""" +170 63 dataset """kinships""" +170 63 model """distmult""" +170 63 loss """bceaftersigmoid""" +170 63 regularizer """no""" +170 63 optimizer """adadelta""" +170 63 training_loop """lcwa""" +170 63 evaluator """rankbased""" +170 64 dataset """kinships""" +170 64 model """distmult""" +170 64 loss """bceaftersigmoid""" +170 64 regularizer """no""" +170 64 optimizer """adadelta""" +170 64 training_loop """lcwa""" +170 64 evaluator """rankbased""" +170 65 dataset """kinships""" +170 65 model """distmult""" +170 65 loss """bceaftersigmoid""" +170 65 regularizer """no""" +170 65 optimizer """adadelta""" +170 65 training_loop """lcwa""" +170 65 evaluator """rankbased""" +170 66 dataset """kinships""" +170 66 model """distmult""" +170 66 loss """bceaftersigmoid""" +170 66 regularizer """no""" +170 66 optimizer """adadelta""" +170 66 training_loop """lcwa""" +170 66 evaluator """rankbased""" +170 67 dataset """kinships""" +170 67 model """distmult""" +170 67 loss """bceaftersigmoid""" +170 67 regularizer """no""" +170 67 optimizer """adadelta""" +170 67 training_loop """lcwa""" +170 67 evaluator """rankbased""" +170 68 dataset """kinships""" +170 68 model """distmult""" +170 68 loss """bceaftersigmoid""" +170 68 regularizer """no""" +170 68 optimizer """adadelta""" +170 68 training_loop """lcwa""" +170 68 evaluator """rankbased""" +170 69 dataset """kinships""" +170 69 model """distmult""" +170 69 loss """bceaftersigmoid""" +170 69 regularizer """no""" +170 69 optimizer """adadelta""" +170 69 training_loop """lcwa""" +170 69 evaluator """rankbased""" +170 70 dataset """kinships""" +170 70 model """distmult""" +170 70 loss """bceaftersigmoid""" +170 70 regularizer """no""" +170 70 optimizer """adadelta""" +170 70 training_loop """lcwa""" +170 70 evaluator """rankbased""" +170 71 dataset """kinships""" +170 71 model """distmult""" +170 71 loss """bceaftersigmoid""" +170 71 regularizer """no""" +170 71 optimizer """adadelta""" +170 71 training_loop """lcwa""" +170 71 evaluator """rankbased""" +170 72 dataset """kinships""" +170 72 model """distmult""" +170 72 loss """bceaftersigmoid""" +170 72 regularizer """no""" +170 72 optimizer """adadelta""" +170 72 training_loop """lcwa""" +170 72 evaluator """rankbased""" +170 73 dataset """kinships""" +170 73 model """distmult""" +170 73 loss """bceaftersigmoid""" +170 73 regularizer """no""" +170 73 optimizer """adadelta""" +170 73 training_loop """lcwa""" +170 73 evaluator """rankbased""" +170 74 dataset """kinships""" +170 74 model """distmult""" +170 74 loss """bceaftersigmoid""" +170 74 regularizer """no""" +170 74 optimizer """adadelta""" +170 74 training_loop """lcwa""" +170 74 evaluator """rankbased""" +170 75 dataset """kinships""" +170 75 model """distmult""" +170 75 loss """bceaftersigmoid""" +170 75 regularizer """no""" +170 75 optimizer """adadelta""" +170 75 training_loop """lcwa""" +170 75 evaluator """rankbased""" +170 76 dataset """kinships""" +170 76 model """distmult""" +170 76 loss """bceaftersigmoid""" +170 76 regularizer """no""" +170 76 optimizer """adadelta""" +170 76 training_loop """lcwa""" +170 76 evaluator """rankbased""" +170 77 dataset """kinships""" +170 77 model """distmult""" +170 77 loss """bceaftersigmoid""" +170 77 regularizer """no""" +170 77 optimizer """adadelta""" +170 77 training_loop """lcwa""" +170 77 evaluator """rankbased""" +170 78 dataset """kinships""" +170 78 model """distmult""" +170 78 loss """bceaftersigmoid""" +170 78 regularizer """no""" +170 78 optimizer """adadelta""" +170 78 training_loop """lcwa""" +170 78 evaluator """rankbased""" +170 79 dataset """kinships""" +170 79 model """distmult""" +170 79 loss """bceaftersigmoid""" +170 79 regularizer """no""" +170 79 optimizer """adadelta""" +170 79 training_loop """lcwa""" +170 79 evaluator """rankbased""" +170 80 dataset """kinships""" +170 80 model """distmult""" +170 80 loss """bceaftersigmoid""" +170 80 regularizer """no""" +170 80 optimizer """adadelta""" +170 80 training_loop """lcwa""" +170 80 evaluator """rankbased""" +170 81 dataset """kinships""" +170 81 model """distmult""" +170 81 loss """bceaftersigmoid""" +170 81 regularizer """no""" +170 81 optimizer """adadelta""" +170 81 training_loop """lcwa""" +170 81 evaluator """rankbased""" +170 82 dataset """kinships""" +170 82 model """distmult""" +170 82 loss """bceaftersigmoid""" +170 82 regularizer """no""" +170 82 optimizer """adadelta""" +170 82 training_loop """lcwa""" +170 82 evaluator """rankbased""" +170 83 dataset """kinships""" +170 83 model """distmult""" +170 83 loss """bceaftersigmoid""" +170 83 regularizer """no""" +170 83 optimizer """adadelta""" +170 83 training_loop """lcwa""" +170 83 evaluator """rankbased""" +170 84 dataset """kinships""" +170 84 model """distmult""" +170 84 loss """bceaftersigmoid""" +170 84 regularizer """no""" +170 84 optimizer """adadelta""" +170 84 training_loop """lcwa""" +170 84 evaluator """rankbased""" +170 85 dataset """kinships""" +170 85 model """distmult""" +170 85 loss """bceaftersigmoid""" +170 85 regularizer """no""" +170 85 optimizer """adadelta""" +170 85 training_loop """lcwa""" +170 85 evaluator """rankbased""" +170 86 dataset """kinships""" +170 86 model """distmult""" +170 86 loss """bceaftersigmoid""" +170 86 regularizer """no""" +170 86 optimizer """adadelta""" +170 86 training_loop """lcwa""" +170 86 evaluator """rankbased""" +170 87 dataset """kinships""" +170 87 model """distmult""" +170 87 loss """bceaftersigmoid""" +170 87 regularizer """no""" +170 87 optimizer """adadelta""" +170 87 training_loop """lcwa""" +170 87 evaluator """rankbased""" +170 88 dataset """kinships""" +170 88 model """distmult""" +170 88 loss """bceaftersigmoid""" +170 88 regularizer """no""" +170 88 optimizer """adadelta""" +170 88 training_loop """lcwa""" +170 88 evaluator """rankbased""" +170 89 dataset """kinships""" +170 89 model """distmult""" +170 89 loss """bceaftersigmoid""" +170 89 regularizer """no""" +170 89 optimizer """adadelta""" +170 89 training_loop """lcwa""" +170 89 evaluator """rankbased""" +170 90 dataset """kinships""" +170 90 model """distmult""" +170 90 loss """bceaftersigmoid""" +170 90 regularizer """no""" +170 90 optimizer """adadelta""" +170 90 training_loop """lcwa""" +170 90 evaluator """rankbased""" +170 91 dataset """kinships""" +170 91 model """distmult""" +170 91 loss """bceaftersigmoid""" +170 91 regularizer """no""" +170 91 optimizer """adadelta""" +170 91 training_loop """lcwa""" +170 91 evaluator """rankbased""" +170 92 dataset """kinships""" +170 92 model """distmult""" +170 92 loss """bceaftersigmoid""" +170 92 regularizer """no""" +170 92 optimizer """adadelta""" +170 92 training_loop """lcwa""" +170 92 evaluator """rankbased""" +170 93 dataset """kinships""" +170 93 model """distmult""" +170 93 loss """bceaftersigmoid""" +170 93 regularizer """no""" +170 93 optimizer """adadelta""" +170 93 training_loop """lcwa""" +170 93 evaluator """rankbased""" +170 94 dataset """kinships""" +170 94 model """distmult""" +170 94 loss """bceaftersigmoid""" +170 94 regularizer """no""" +170 94 optimizer """adadelta""" +170 94 training_loop """lcwa""" +170 94 evaluator """rankbased""" +170 95 dataset """kinships""" +170 95 model """distmult""" +170 95 loss """bceaftersigmoid""" +170 95 regularizer """no""" +170 95 optimizer """adadelta""" +170 95 training_loop """lcwa""" +170 95 evaluator """rankbased""" +170 96 dataset """kinships""" +170 96 model """distmult""" +170 96 loss """bceaftersigmoid""" +170 96 regularizer """no""" +170 96 optimizer """adadelta""" +170 96 training_loop """lcwa""" +170 96 evaluator """rankbased""" +170 97 dataset """kinships""" +170 97 model """distmult""" +170 97 loss """bceaftersigmoid""" +170 97 regularizer """no""" +170 97 optimizer """adadelta""" +170 97 training_loop """lcwa""" +170 97 evaluator """rankbased""" +170 98 dataset """kinships""" +170 98 model """distmult""" +170 98 loss """bceaftersigmoid""" +170 98 regularizer """no""" +170 98 optimizer """adadelta""" +170 98 training_loop """lcwa""" +170 98 evaluator """rankbased""" +170 99 dataset """kinships""" +170 99 model """distmult""" +170 99 loss """bceaftersigmoid""" +170 99 regularizer """no""" +170 99 optimizer """adadelta""" +170 99 training_loop """lcwa""" +170 99 evaluator """rankbased""" +170 100 dataset """kinships""" +170 100 model """distmult""" +170 100 loss """bceaftersigmoid""" +170 100 regularizer """no""" +170 100 optimizer """adadelta""" +170 100 training_loop """lcwa""" +170 100 evaluator """rankbased""" +171 1 model.embedding_dim 1.0 +171 1 training.batch_size 1.0 +171 1 training.label_smoothing 0.10492481481542631 +171 2 model.embedding_dim 0.0 +171 2 training.batch_size 1.0 +171 2 training.label_smoothing 0.012321568947377754 +171 3 model.embedding_dim 2.0 +171 3 training.batch_size 1.0 +171 3 training.label_smoothing 0.12587673485725612 +171 4 model.embedding_dim 0.0 +171 4 training.batch_size 1.0 +171 4 training.label_smoothing 0.737572010304201 +171 5 model.embedding_dim 1.0 +171 5 training.batch_size 1.0 +171 5 training.label_smoothing 0.09337362099778378 +171 6 model.embedding_dim 0.0 +171 6 training.batch_size 2.0 +171 6 training.label_smoothing 0.0424162591280755 +171 7 model.embedding_dim 2.0 +171 7 training.batch_size 0.0 +171 7 training.label_smoothing 0.02366359153138464 +171 8 model.embedding_dim 1.0 +171 8 training.batch_size 1.0 +171 8 training.label_smoothing 0.07591742969287607 +171 9 model.embedding_dim 1.0 +171 9 training.batch_size 2.0 +171 9 training.label_smoothing 0.5102053308674354 +171 10 model.embedding_dim 0.0 +171 10 training.batch_size 2.0 +171 10 training.label_smoothing 0.001522114905854612 +171 11 model.embedding_dim 1.0 +171 11 training.batch_size 1.0 +171 11 training.label_smoothing 0.5263557655250631 +171 12 model.embedding_dim 2.0 +171 12 training.batch_size 1.0 +171 12 training.label_smoothing 0.13748060886629637 +171 13 model.embedding_dim 1.0 +171 13 training.batch_size 0.0 +171 13 training.label_smoothing 0.0012339964929523387 +171 14 model.embedding_dim 0.0 +171 14 training.batch_size 2.0 +171 14 training.label_smoothing 0.31184730905851094 +171 15 model.embedding_dim 0.0 +171 15 training.batch_size 0.0 +171 15 training.label_smoothing 0.00594838397433665 +171 16 model.embedding_dim 0.0 +171 16 training.batch_size 0.0 +171 16 training.label_smoothing 0.007942595209663787 +171 17 model.embedding_dim 1.0 +171 17 training.batch_size 2.0 +171 17 training.label_smoothing 0.12966214087652034 +171 18 model.embedding_dim 1.0 +171 18 training.batch_size 1.0 +171 18 training.label_smoothing 0.0012464678566252707 +171 19 model.embedding_dim 1.0 +171 19 training.batch_size 2.0 +171 19 training.label_smoothing 0.1663634583887682 +171 20 model.embedding_dim 1.0 +171 20 training.batch_size 1.0 +171 20 training.label_smoothing 0.0037561729523192277 +171 21 model.embedding_dim 1.0 +171 21 training.batch_size 2.0 +171 21 training.label_smoothing 0.003349244685972697 +171 22 model.embedding_dim 2.0 +171 22 training.batch_size 2.0 +171 22 training.label_smoothing 0.0023446137953695664 +171 23 model.embedding_dim 0.0 +171 23 training.batch_size 2.0 +171 23 training.label_smoothing 0.0016602234133932575 +171 24 model.embedding_dim 0.0 +171 24 training.batch_size 2.0 +171 24 training.label_smoothing 0.006838024003736054 +171 25 model.embedding_dim 1.0 +171 25 training.batch_size 0.0 +171 25 training.label_smoothing 0.5434720647575483 +171 26 model.embedding_dim 2.0 +171 26 training.batch_size 1.0 +171 26 training.label_smoothing 0.14249264191940264 +171 27 model.embedding_dim 1.0 +171 27 training.batch_size 2.0 +171 27 training.label_smoothing 0.16248336556643278 +171 28 model.embedding_dim 1.0 +171 28 training.batch_size 0.0 +171 28 training.label_smoothing 0.35563835696767643 +171 29 model.embedding_dim 1.0 +171 29 training.batch_size 1.0 +171 29 training.label_smoothing 0.2283885612078983 +171 30 model.embedding_dim 0.0 +171 30 training.batch_size 0.0 +171 30 training.label_smoothing 0.10506310531352465 +171 31 model.embedding_dim 1.0 +171 31 training.batch_size 0.0 +171 31 training.label_smoothing 0.022342585010340248 +171 32 model.embedding_dim 2.0 +171 32 training.batch_size 0.0 +171 32 training.label_smoothing 0.12620662494504906 +171 33 model.embedding_dim 2.0 +171 33 training.batch_size 1.0 +171 33 training.label_smoothing 0.002610617334561567 +171 34 model.embedding_dim 1.0 +171 34 training.batch_size 2.0 +171 34 training.label_smoothing 0.03272657548324914 +171 35 model.embedding_dim 1.0 +171 35 training.batch_size 1.0 +171 35 training.label_smoothing 0.0069750425015922485 +171 36 model.embedding_dim 0.0 +171 36 training.batch_size 2.0 +171 36 training.label_smoothing 0.21077594609164496 +171 37 model.embedding_dim 2.0 +171 37 training.batch_size 2.0 +171 37 training.label_smoothing 0.007176738967218142 +171 38 model.embedding_dim 1.0 +171 38 training.batch_size 2.0 +171 38 training.label_smoothing 0.006044373131300468 +171 39 model.embedding_dim 0.0 +171 39 training.batch_size 1.0 +171 39 training.label_smoothing 0.0067392228804284745 +171 40 model.embedding_dim 0.0 +171 40 training.batch_size 1.0 +171 40 training.label_smoothing 0.07175285809381786 +171 41 model.embedding_dim 2.0 +171 41 training.batch_size 1.0 +171 41 training.label_smoothing 0.07090648121769122 +171 42 model.embedding_dim 0.0 +171 42 training.batch_size 2.0 +171 42 training.label_smoothing 0.016665360597319812 +171 43 model.embedding_dim 2.0 +171 43 training.batch_size 2.0 +171 43 training.label_smoothing 0.12549644339975374 +171 44 model.embedding_dim 0.0 +171 44 training.batch_size 0.0 +171 44 training.label_smoothing 0.004533828423841429 +171 45 model.embedding_dim 1.0 +171 45 training.batch_size 2.0 +171 45 training.label_smoothing 0.01046938827968554 +171 46 model.embedding_dim 1.0 +171 46 training.batch_size 2.0 +171 46 training.label_smoothing 0.07677560422773377 +171 47 model.embedding_dim 0.0 +171 47 training.batch_size 2.0 +171 47 training.label_smoothing 0.02298137286022606 +171 48 model.embedding_dim 1.0 +171 48 training.batch_size 2.0 +171 48 training.label_smoothing 0.029912404503707458 +171 49 model.embedding_dim 0.0 +171 49 training.batch_size 0.0 +171 49 training.label_smoothing 0.3097126598485996 +171 50 model.embedding_dim 2.0 +171 50 training.batch_size 2.0 +171 50 training.label_smoothing 0.0025232737876719337 +171 51 model.embedding_dim 2.0 +171 51 training.batch_size 0.0 +171 51 training.label_smoothing 0.09503471692267151 +171 52 model.embedding_dim 1.0 +171 52 training.batch_size 2.0 +171 52 training.label_smoothing 0.2573253585944426 +171 53 model.embedding_dim 2.0 +171 53 training.batch_size 1.0 +171 53 training.label_smoothing 0.019039386214641 +171 54 model.embedding_dim 2.0 +171 54 training.batch_size 1.0 +171 54 training.label_smoothing 0.22451347248961626 +171 55 model.embedding_dim 0.0 +171 55 training.batch_size 2.0 +171 55 training.label_smoothing 0.005480724525928018 +171 56 model.embedding_dim 0.0 +171 56 training.batch_size 1.0 +171 56 training.label_smoothing 0.21714131632186673 +171 57 model.embedding_dim 1.0 +171 57 training.batch_size 2.0 +171 57 training.label_smoothing 0.08776034741086339 +171 58 model.embedding_dim 1.0 +171 58 training.batch_size 2.0 +171 58 training.label_smoothing 0.15819999872462767 +171 59 model.embedding_dim 0.0 +171 59 training.batch_size 1.0 +171 59 training.label_smoothing 0.04501062228196613 +171 60 model.embedding_dim 0.0 +171 60 training.batch_size 2.0 +171 60 training.label_smoothing 0.013367902466648292 +171 61 model.embedding_dim 0.0 +171 61 training.batch_size 2.0 +171 61 training.label_smoothing 0.08763418046032713 +171 62 model.embedding_dim 2.0 +171 62 training.batch_size 0.0 +171 62 training.label_smoothing 0.10218643162082806 +171 63 model.embedding_dim 0.0 +171 63 training.batch_size 2.0 +171 63 training.label_smoothing 0.01729023971254246 +171 64 model.embedding_dim 0.0 +171 64 training.batch_size 1.0 +171 64 training.label_smoothing 0.8368502308054213 +171 65 model.embedding_dim 0.0 +171 65 training.batch_size 2.0 +171 65 training.label_smoothing 0.41467446822139087 +171 66 model.embedding_dim 0.0 +171 66 training.batch_size 1.0 +171 66 training.label_smoothing 0.0012658950883722487 +171 67 model.embedding_dim 2.0 +171 67 training.batch_size 0.0 +171 67 training.label_smoothing 0.0026824926514620426 +171 68 model.embedding_dim 2.0 +171 68 training.batch_size 0.0 +171 68 training.label_smoothing 0.023165984743707416 +171 69 model.embedding_dim 1.0 +171 69 training.batch_size 2.0 +171 69 training.label_smoothing 0.07973403966057617 +171 70 model.embedding_dim 1.0 +171 70 training.batch_size 1.0 +171 70 training.label_smoothing 0.0012995875086226625 +171 71 model.embedding_dim 0.0 +171 71 training.batch_size 1.0 +171 71 training.label_smoothing 0.0025896537324634357 +171 72 model.embedding_dim 2.0 +171 72 training.batch_size 1.0 +171 72 training.label_smoothing 0.06487290629431475 +171 73 model.embedding_dim 2.0 +171 73 training.batch_size 1.0 +171 73 training.label_smoothing 0.0029515159936156564 +171 74 model.embedding_dim 1.0 +171 74 training.batch_size 2.0 +171 74 training.label_smoothing 0.0011237161580233486 +171 75 model.embedding_dim 2.0 +171 75 training.batch_size 2.0 +171 75 training.label_smoothing 0.001448400112343286 +171 76 model.embedding_dim 2.0 +171 76 training.batch_size 1.0 +171 76 training.label_smoothing 0.002274687783627006 +171 77 model.embedding_dim 1.0 +171 77 training.batch_size 2.0 +171 77 training.label_smoothing 0.0018718107573710851 +171 78 model.embedding_dim 1.0 +171 78 training.batch_size 2.0 +171 78 training.label_smoothing 0.07726439617573283 +171 79 model.embedding_dim 2.0 +171 79 training.batch_size 2.0 +171 79 training.label_smoothing 0.04628098409101177 +171 80 model.embedding_dim 2.0 +171 80 training.batch_size 2.0 +171 80 training.label_smoothing 0.01222595837681814 +171 81 model.embedding_dim 1.0 +171 81 training.batch_size 2.0 +171 81 training.label_smoothing 0.008768814647922517 +171 82 model.embedding_dim 2.0 +171 82 training.batch_size 1.0 +171 82 training.label_smoothing 0.0010209547042363965 +171 83 model.embedding_dim 1.0 +171 83 training.batch_size 1.0 +171 83 training.label_smoothing 0.2695098383050931 +171 84 model.embedding_dim 0.0 +171 84 training.batch_size 0.0 +171 84 training.label_smoothing 0.0041424116808634675 +171 85 model.embedding_dim 0.0 +171 85 training.batch_size 1.0 +171 85 training.label_smoothing 0.7105889578148501 +171 86 model.embedding_dim 2.0 +171 86 training.batch_size 1.0 +171 86 training.label_smoothing 0.29426474628539956 +171 87 model.embedding_dim 0.0 +171 87 training.batch_size 1.0 +171 87 training.label_smoothing 0.004099040692235039 +171 88 model.embedding_dim 0.0 +171 88 training.batch_size 1.0 +171 88 training.label_smoothing 0.9505546956423074 +171 89 model.embedding_dim 0.0 +171 89 training.batch_size 2.0 +171 89 training.label_smoothing 0.2122694629127647 +171 90 model.embedding_dim 0.0 +171 90 training.batch_size 0.0 +171 90 training.label_smoothing 0.004970240265428051 +171 91 model.embedding_dim 2.0 +171 91 training.batch_size 2.0 +171 91 training.label_smoothing 0.10580915494481154 +171 92 model.embedding_dim 2.0 +171 92 training.batch_size 2.0 +171 92 training.label_smoothing 0.6349298159931831 +171 93 model.embedding_dim 1.0 +171 93 training.batch_size 1.0 +171 93 training.label_smoothing 0.09766975663641778 +171 94 model.embedding_dim 2.0 +171 94 training.batch_size 1.0 +171 94 training.label_smoothing 0.1792099347350589 +171 95 model.embedding_dim 0.0 +171 95 training.batch_size 1.0 +171 95 training.label_smoothing 0.009315522603226043 +171 96 model.embedding_dim 1.0 +171 96 training.batch_size 0.0 +171 96 training.label_smoothing 0.5874107754923805 +171 97 model.embedding_dim 0.0 +171 97 training.batch_size 0.0 +171 97 training.label_smoothing 0.4885110671663249 +171 98 model.embedding_dim 0.0 +171 98 training.batch_size 1.0 +171 98 training.label_smoothing 0.019707804021089546 +171 99 model.embedding_dim 0.0 +171 99 training.batch_size 0.0 +171 99 training.label_smoothing 0.002551150635671917 +171 100 model.embedding_dim 0.0 +171 100 training.batch_size 0.0 +171 100 training.label_smoothing 0.005489301035713944 +171 1 dataset """kinships""" +171 1 model """distmult""" +171 1 loss """softplus""" +171 1 regularizer """no""" +171 1 optimizer """adadelta""" +171 1 training_loop """lcwa""" +171 1 evaluator """rankbased""" +171 2 dataset """kinships""" +171 2 model """distmult""" +171 2 loss """softplus""" +171 2 regularizer """no""" +171 2 optimizer """adadelta""" +171 2 training_loop """lcwa""" +171 2 evaluator """rankbased""" +171 3 dataset """kinships""" +171 3 model """distmult""" +171 3 loss """softplus""" +171 3 regularizer """no""" +171 3 optimizer """adadelta""" +171 3 training_loop """lcwa""" +171 3 evaluator """rankbased""" +171 4 dataset """kinships""" +171 4 model """distmult""" +171 4 loss """softplus""" +171 4 regularizer """no""" +171 4 optimizer """adadelta""" +171 4 training_loop """lcwa""" +171 4 evaluator """rankbased""" +171 5 dataset """kinships""" +171 5 model """distmult""" +171 5 loss """softplus""" +171 5 regularizer """no""" +171 5 optimizer """adadelta""" +171 5 training_loop """lcwa""" +171 5 evaluator """rankbased""" +171 6 dataset """kinships""" +171 6 model """distmult""" +171 6 loss """softplus""" +171 6 regularizer """no""" +171 6 optimizer """adadelta""" +171 6 training_loop """lcwa""" +171 6 evaluator """rankbased""" +171 7 dataset """kinships""" +171 7 model """distmult""" +171 7 loss """softplus""" +171 7 regularizer """no""" +171 7 optimizer """adadelta""" +171 7 training_loop """lcwa""" +171 7 evaluator """rankbased""" +171 8 dataset """kinships""" +171 8 model """distmult""" +171 8 loss """softplus""" +171 8 regularizer """no""" +171 8 optimizer """adadelta""" +171 8 training_loop """lcwa""" +171 8 evaluator """rankbased""" +171 9 dataset """kinships""" +171 9 model """distmult""" +171 9 loss """softplus""" +171 9 regularizer """no""" +171 9 optimizer """adadelta""" +171 9 training_loop """lcwa""" +171 9 evaluator """rankbased""" +171 10 dataset """kinships""" +171 10 model """distmult""" +171 10 loss """softplus""" +171 10 regularizer """no""" +171 10 optimizer """adadelta""" +171 10 training_loop """lcwa""" +171 10 evaluator """rankbased""" +171 11 dataset """kinships""" +171 11 model """distmult""" +171 11 loss """softplus""" +171 11 regularizer """no""" +171 11 optimizer """adadelta""" +171 11 training_loop """lcwa""" +171 11 evaluator """rankbased""" +171 12 dataset """kinships""" +171 12 model """distmult""" +171 12 loss """softplus""" +171 12 regularizer """no""" +171 12 optimizer """adadelta""" +171 12 training_loop """lcwa""" +171 12 evaluator """rankbased""" +171 13 dataset """kinships""" +171 13 model """distmult""" +171 13 loss """softplus""" +171 13 regularizer """no""" +171 13 optimizer """adadelta""" +171 13 training_loop """lcwa""" +171 13 evaluator """rankbased""" +171 14 dataset """kinships""" +171 14 model """distmult""" +171 14 loss """softplus""" +171 14 regularizer """no""" +171 14 optimizer """adadelta""" +171 14 training_loop """lcwa""" +171 14 evaluator """rankbased""" +171 15 dataset """kinships""" +171 15 model """distmult""" +171 15 loss """softplus""" +171 15 regularizer """no""" +171 15 optimizer """adadelta""" +171 15 training_loop """lcwa""" +171 15 evaluator """rankbased""" +171 16 dataset """kinships""" +171 16 model """distmult""" +171 16 loss """softplus""" +171 16 regularizer """no""" +171 16 optimizer """adadelta""" +171 16 training_loop """lcwa""" +171 16 evaluator """rankbased""" +171 17 dataset """kinships""" +171 17 model """distmult""" +171 17 loss """softplus""" +171 17 regularizer """no""" +171 17 optimizer """adadelta""" +171 17 training_loop """lcwa""" +171 17 evaluator """rankbased""" +171 18 dataset """kinships""" +171 18 model """distmult""" +171 18 loss """softplus""" +171 18 regularizer """no""" +171 18 optimizer """adadelta""" +171 18 training_loop """lcwa""" +171 18 evaluator """rankbased""" +171 19 dataset """kinships""" +171 19 model """distmult""" +171 19 loss """softplus""" +171 19 regularizer """no""" +171 19 optimizer """adadelta""" +171 19 training_loop """lcwa""" +171 19 evaluator """rankbased""" +171 20 dataset """kinships""" +171 20 model """distmult""" +171 20 loss """softplus""" +171 20 regularizer """no""" +171 20 optimizer """adadelta""" +171 20 training_loop """lcwa""" +171 20 evaluator """rankbased""" +171 21 dataset """kinships""" +171 21 model """distmult""" +171 21 loss """softplus""" +171 21 regularizer """no""" +171 21 optimizer """adadelta""" +171 21 training_loop """lcwa""" +171 21 evaluator """rankbased""" +171 22 dataset """kinships""" +171 22 model """distmult""" +171 22 loss """softplus""" +171 22 regularizer """no""" +171 22 optimizer """adadelta""" +171 22 training_loop """lcwa""" +171 22 evaluator """rankbased""" +171 23 dataset """kinships""" +171 23 model """distmult""" +171 23 loss """softplus""" +171 23 regularizer """no""" +171 23 optimizer """adadelta""" +171 23 training_loop """lcwa""" +171 23 evaluator """rankbased""" +171 24 dataset """kinships""" +171 24 model """distmult""" +171 24 loss """softplus""" +171 24 regularizer """no""" +171 24 optimizer """adadelta""" +171 24 training_loop """lcwa""" +171 24 evaluator """rankbased""" +171 25 dataset """kinships""" +171 25 model """distmult""" +171 25 loss """softplus""" +171 25 regularizer """no""" +171 25 optimizer """adadelta""" +171 25 training_loop """lcwa""" +171 25 evaluator """rankbased""" +171 26 dataset """kinships""" +171 26 model """distmult""" +171 26 loss """softplus""" +171 26 regularizer """no""" +171 26 optimizer """adadelta""" +171 26 training_loop """lcwa""" +171 26 evaluator """rankbased""" +171 27 dataset """kinships""" +171 27 model """distmult""" +171 27 loss """softplus""" +171 27 regularizer """no""" +171 27 optimizer """adadelta""" +171 27 training_loop """lcwa""" +171 27 evaluator """rankbased""" +171 28 dataset """kinships""" +171 28 model """distmult""" +171 28 loss """softplus""" +171 28 regularizer """no""" +171 28 optimizer """adadelta""" +171 28 training_loop """lcwa""" +171 28 evaluator """rankbased""" +171 29 dataset """kinships""" +171 29 model """distmult""" +171 29 loss """softplus""" +171 29 regularizer """no""" +171 29 optimizer """adadelta""" +171 29 training_loop """lcwa""" +171 29 evaluator """rankbased""" +171 30 dataset """kinships""" +171 30 model """distmult""" +171 30 loss """softplus""" +171 30 regularizer """no""" +171 30 optimizer """adadelta""" +171 30 training_loop """lcwa""" +171 30 evaluator """rankbased""" +171 31 dataset """kinships""" +171 31 model """distmult""" +171 31 loss """softplus""" +171 31 regularizer """no""" +171 31 optimizer """adadelta""" +171 31 training_loop """lcwa""" +171 31 evaluator """rankbased""" +171 32 dataset """kinships""" +171 32 model """distmult""" +171 32 loss """softplus""" +171 32 regularizer """no""" +171 32 optimizer """adadelta""" +171 32 training_loop """lcwa""" +171 32 evaluator """rankbased""" +171 33 dataset """kinships""" +171 33 model """distmult""" +171 33 loss """softplus""" +171 33 regularizer """no""" +171 33 optimizer """adadelta""" +171 33 training_loop """lcwa""" +171 33 evaluator """rankbased""" +171 34 dataset """kinships""" +171 34 model """distmult""" +171 34 loss """softplus""" +171 34 regularizer """no""" +171 34 optimizer """adadelta""" +171 34 training_loop """lcwa""" +171 34 evaluator """rankbased""" +171 35 dataset """kinships""" +171 35 model """distmult""" +171 35 loss """softplus""" +171 35 regularizer """no""" +171 35 optimizer """adadelta""" +171 35 training_loop """lcwa""" +171 35 evaluator """rankbased""" +171 36 dataset """kinships""" +171 36 model """distmult""" +171 36 loss """softplus""" +171 36 regularizer """no""" +171 36 optimizer """adadelta""" +171 36 training_loop """lcwa""" +171 36 evaluator """rankbased""" +171 37 dataset """kinships""" +171 37 model """distmult""" +171 37 loss """softplus""" +171 37 regularizer """no""" +171 37 optimizer """adadelta""" +171 37 training_loop """lcwa""" +171 37 evaluator """rankbased""" +171 38 dataset """kinships""" +171 38 model """distmult""" +171 38 loss """softplus""" +171 38 regularizer """no""" +171 38 optimizer """adadelta""" +171 38 training_loop """lcwa""" +171 38 evaluator """rankbased""" +171 39 dataset """kinships""" +171 39 model """distmult""" +171 39 loss """softplus""" +171 39 regularizer """no""" +171 39 optimizer """adadelta""" +171 39 training_loop """lcwa""" +171 39 evaluator """rankbased""" +171 40 dataset """kinships""" +171 40 model """distmult""" +171 40 loss """softplus""" +171 40 regularizer """no""" +171 40 optimizer """adadelta""" +171 40 training_loop """lcwa""" +171 40 evaluator """rankbased""" +171 41 dataset """kinships""" +171 41 model """distmult""" +171 41 loss """softplus""" +171 41 regularizer """no""" +171 41 optimizer """adadelta""" +171 41 training_loop """lcwa""" +171 41 evaluator """rankbased""" +171 42 dataset """kinships""" +171 42 model """distmult""" +171 42 loss """softplus""" +171 42 regularizer """no""" +171 42 optimizer """adadelta""" +171 42 training_loop """lcwa""" +171 42 evaluator """rankbased""" +171 43 dataset """kinships""" +171 43 model """distmult""" +171 43 loss """softplus""" +171 43 regularizer """no""" +171 43 optimizer """adadelta""" +171 43 training_loop """lcwa""" +171 43 evaluator """rankbased""" +171 44 dataset """kinships""" +171 44 model """distmult""" +171 44 loss """softplus""" +171 44 regularizer """no""" +171 44 optimizer """adadelta""" +171 44 training_loop """lcwa""" +171 44 evaluator """rankbased""" +171 45 dataset """kinships""" +171 45 model """distmult""" +171 45 loss """softplus""" +171 45 regularizer """no""" +171 45 optimizer """adadelta""" +171 45 training_loop """lcwa""" +171 45 evaluator """rankbased""" +171 46 dataset """kinships""" +171 46 model """distmult""" +171 46 loss """softplus""" +171 46 regularizer """no""" +171 46 optimizer """adadelta""" +171 46 training_loop """lcwa""" +171 46 evaluator """rankbased""" +171 47 dataset """kinships""" +171 47 model """distmult""" +171 47 loss """softplus""" +171 47 regularizer """no""" +171 47 optimizer """adadelta""" +171 47 training_loop """lcwa""" +171 47 evaluator """rankbased""" +171 48 dataset """kinships""" +171 48 model """distmult""" +171 48 loss """softplus""" +171 48 regularizer """no""" +171 48 optimizer """adadelta""" +171 48 training_loop """lcwa""" +171 48 evaluator """rankbased""" +171 49 dataset """kinships""" +171 49 model """distmult""" +171 49 loss """softplus""" +171 49 regularizer """no""" +171 49 optimizer """adadelta""" +171 49 training_loop """lcwa""" +171 49 evaluator """rankbased""" +171 50 dataset """kinships""" +171 50 model """distmult""" +171 50 loss """softplus""" +171 50 regularizer """no""" +171 50 optimizer """adadelta""" +171 50 training_loop """lcwa""" +171 50 evaluator """rankbased""" +171 51 dataset """kinships""" +171 51 model """distmult""" +171 51 loss """softplus""" +171 51 regularizer """no""" +171 51 optimizer """adadelta""" +171 51 training_loop """lcwa""" +171 51 evaluator """rankbased""" +171 52 dataset """kinships""" +171 52 model """distmult""" +171 52 loss """softplus""" +171 52 regularizer """no""" +171 52 optimizer """adadelta""" +171 52 training_loop """lcwa""" +171 52 evaluator """rankbased""" +171 53 dataset """kinships""" +171 53 model """distmult""" +171 53 loss """softplus""" +171 53 regularizer """no""" +171 53 optimizer """adadelta""" +171 53 training_loop """lcwa""" +171 53 evaluator """rankbased""" +171 54 dataset """kinships""" +171 54 model """distmult""" +171 54 loss """softplus""" +171 54 regularizer """no""" +171 54 optimizer """adadelta""" +171 54 training_loop """lcwa""" +171 54 evaluator """rankbased""" +171 55 dataset """kinships""" +171 55 model """distmult""" +171 55 loss """softplus""" +171 55 regularizer """no""" +171 55 optimizer """adadelta""" +171 55 training_loop """lcwa""" +171 55 evaluator """rankbased""" +171 56 dataset """kinships""" +171 56 model """distmult""" +171 56 loss """softplus""" +171 56 regularizer """no""" +171 56 optimizer """adadelta""" +171 56 training_loop """lcwa""" +171 56 evaluator """rankbased""" +171 57 dataset """kinships""" +171 57 model """distmult""" +171 57 loss """softplus""" +171 57 regularizer """no""" +171 57 optimizer """adadelta""" +171 57 training_loop """lcwa""" +171 57 evaluator """rankbased""" +171 58 dataset """kinships""" +171 58 model """distmult""" +171 58 loss """softplus""" +171 58 regularizer """no""" +171 58 optimizer """adadelta""" +171 58 training_loop """lcwa""" +171 58 evaluator """rankbased""" +171 59 dataset """kinships""" +171 59 model """distmult""" +171 59 loss """softplus""" +171 59 regularizer """no""" +171 59 optimizer """adadelta""" +171 59 training_loop """lcwa""" +171 59 evaluator """rankbased""" +171 60 dataset """kinships""" +171 60 model """distmult""" +171 60 loss """softplus""" +171 60 regularizer """no""" +171 60 optimizer """adadelta""" +171 60 training_loop """lcwa""" +171 60 evaluator """rankbased""" +171 61 dataset """kinships""" +171 61 model """distmult""" +171 61 loss """softplus""" +171 61 regularizer """no""" +171 61 optimizer """adadelta""" +171 61 training_loop """lcwa""" +171 61 evaluator """rankbased""" +171 62 dataset """kinships""" +171 62 model """distmult""" +171 62 loss """softplus""" +171 62 regularizer """no""" +171 62 optimizer """adadelta""" +171 62 training_loop """lcwa""" +171 62 evaluator """rankbased""" +171 63 dataset """kinships""" +171 63 model """distmult""" +171 63 loss """softplus""" +171 63 regularizer """no""" +171 63 optimizer """adadelta""" +171 63 training_loop """lcwa""" +171 63 evaluator """rankbased""" +171 64 dataset """kinships""" +171 64 model """distmult""" +171 64 loss """softplus""" +171 64 regularizer """no""" +171 64 optimizer """adadelta""" +171 64 training_loop """lcwa""" +171 64 evaluator """rankbased""" +171 65 dataset """kinships""" +171 65 model """distmult""" +171 65 loss """softplus""" +171 65 regularizer """no""" +171 65 optimizer """adadelta""" +171 65 training_loop """lcwa""" +171 65 evaluator """rankbased""" +171 66 dataset """kinships""" +171 66 model """distmult""" +171 66 loss """softplus""" +171 66 regularizer """no""" +171 66 optimizer """adadelta""" +171 66 training_loop """lcwa""" +171 66 evaluator """rankbased""" +171 67 dataset """kinships""" +171 67 model """distmult""" +171 67 loss """softplus""" +171 67 regularizer """no""" +171 67 optimizer """adadelta""" +171 67 training_loop """lcwa""" +171 67 evaluator """rankbased""" +171 68 dataset """kinships""" +171 68 model """distmult""" +171 68 loss """softplus""" +171 68 regularizer """no""" +171 68 optimizer """adadelta""" +171 68 training_loop """lcwa""" +171 68 evaluator """rankbased""" +171 69 dataset """kinships""" +171 69 model """distmult""" +171 69 loss """softplus""" +171 69 regularizer """no""" +171 69 optimizer """adadelta""" +171 69 training_loop """lcwa""" +171 69 evaluator """rankbased""" +171 70 dataset """kinships""" +171 70 model """distmult""" +171 70 loss """softplus""" +171 70 regularizer """no""" +171 70 optimizer """adadelta""" +171 70 training_loop """lcwa""" +171 70 evaluator """rankbased""" +171 71 dataset """kinships""" +171 71 model """distmult""" +171 71 loss """softplus""" +171 71 regularizer """no""" +171 71 optimizer """adadelta""" +171 71 training_loop """lcwa""" +171 71 evaluator """rankbased""" +171 72 dataset """kinships""" +171 72 model """distmult""" +171 72 loss """softplus""" +171 72 regularizer """no""" +171 72 optimizer """adadelta""" +171 72 training_loop """lcwa""" +171 72 evaluator """rankbased""" +171 73 dataset """kinships""" +171 73 model """distmult""" +171 73 loss """softplus""" +171 73 regularizer """no""" +171 73 optimizer """adadelta""" +171 73 training_loop """lcwa""" +171 73 evaluator """rankbased""" +171 74 dataset """kinships""" +171 74 model """distmult""" +171 74 loss """softplus""" +171 74 regularizer """no""" +171 74 optimizer """adadelta""" +171 74 training_loop """lcwa""" +171 74 evaluator """rankbased""" +171 75 dataset """kinships""" +171 75 model """distmult""" +171 75 loss """softplus""" +171 75 regularizer """no""" +171 75 optimizer """adadelta""" +171 75 training_loop """lcwa""" +171 75 evaluator """rankbased""" +171 76 dataset """kinships""" +171 76 model """distmult""" +171 76 loss """softplus""" +171 76 regularizer """no""" +171 76 optimizer """adadelta""" +171 76 training_loop """lcwa""" +171 76 evaluator """rankbased""" +171 77 dataset """kinships""" +171 77 model """distmult""" +171 77 loss """softplus""" +171 77 regularizer """no""" +171 77 optimizer """adadelta""" +171 77 training_loop """lcwa""" +171 77 evaluator """rankbased""" +171 78 dataset """kinships""" +171 78 model """distmult""" +171 78 loss """softplus""" +171 78 regularizer """no""" +171 78 optimizer """adadelta""" +171 78 training_loop """lcwa""" +171 78 evaluator """rankbased""" +171 79 dataset """kinships""" +171 79 model """distmult""" +171 79 loss """softplus""" +171 79 regularizer """no""" +171 79 optimizer """adadelta""" +171 79 training_loop """lcwa""" +171 79 evaluator """rankbased""" +171 80 dataset """kinships""" +171 80 model """distmult""" +171 80 loss """softplus""" +171 80 regularizer """no""" +171 80 optimizer """adadelta""" +171 80 training_loop """lcwa""" +171 80 evaluator """rankbased""" +171 81 dataset """kinships""" +171 81 model """distmult""" +171 81 loss """softplus""" +171 81 regularizer """no""" +171 81 optimizer """adadelta""" +171 81 training_loop """lcwa""" +171 81 evaluator """rankbased""" +171 82 dataset """kinships""" +171 82 model """distmult""" +171 82 loss """softplus""" +171 82 regularizer """no""" +171 82 optimizer """adadelta""" +171 82 training_loop """lcwa""" +171 82 evaluator """rankbased""" +171 83 dataset """kinships""" +171 83 model """distmult""" +171 83 loss """softplus""" +171 83 regularizer """no""" +171 83 optimizer """adadelta""" +171 83 training_loop """lcwa""" +171 83 evaluator """rankbased""" +171 84 dataset """kinships""" +171 84 model """distmult""" +171 84 loss """softplus""" +171 84 regularizer """no""" +171 84 optimizer """adadelta""" +171 84 training_loop """lcwa""" +171 84 evaluator """rankbased""" +171 85 dataset """kinships""" +171 85 model """distmult""" +171 85 loss """softplus""" +171 85 regularizer """no""" +171 85 optimizer """adadelta""" +171 85 training_loop """lcwa""" +171 85 evaluator """rankbased""" +171 86 dataset """kinships""" +171 86 model """distmult""" +171 86 loss """softplus""" +171 86 regularizer """no""" +171 86 optimizer """adadelta""" +171 86 training_loop """lcwa""" +171 86 evaluator """rankbased""" +171 87 dataset """kinships""" +171 87 model """distmult""" +171 87 loss """softplus""" +171 87 regularizer """no""" +171 87 optimizer """adadelta""" +171 87 training_loop """lcwa""" +171 87 evaluator """rankbased""" +171 88 dataset """kinships""" +171 88 model """distmult""" +171 88 loss """softplus""" +171 88 regularizer """no""" +171 88 optimizer """adadelta""" +171 88 training_loop """lcwa""" +171 88 evaluator """rankbased""" +171 89 dataset """kinships""" +171 89 model """distmult""" +171 89 loss """softplus""" +171 89 regularizer """no""" +171 89 optimizer """adadelta""" +171 89 training_loop """lcwa""" +171 89 evaluator """rankbased""" +171 90 dataset """kinships""" +171 90 model """distmult""" +171 90 loss """softplus""" +171 90 regularizer """no""" +171 90 optimizer """adadelta""" +171 90 training_loop """lcwa""" +171 90 evaluator """rankbased""" +171 91 dataset """kinships""" +171 91 model """distmult""" +171 91 loss """softplus""" +171 91 regularizer """no""" +171 91 optimizer """adadelta""" +171 91 training_loop """lcwa""" +171 91 evaluator """rankbased""" +171 92 dataset """kinships""" +171 92 model """distmult""" +171 92 loss """softplus""" +171 92 regularizer """no""" +171 92 optimizer """adadelta""" +171 92 training_loop """lcwa""" +171 92 evaluator """rankbased""" +171 93 dataset """kinships""" +171 93 model """distmult""" +171 93 loss """softplus""" +171 93 regularizer """no""" +171 93 optimizer """adadelta""" +171 93 training_loop """lcwa""" +171 93 evaluator """rankbased""" +171 94 dataset """kinships""" +171 94 model """distmult""" +171 94 loss """softplus""" +171 94 regularizer """no""" +171 94 optimizer """adadelta""" +171 94 training_loop """lcwa""" +171 94 evaluator """rankbased""" +171 95 dataset """kinships""" +171 95 model """distmult""" +171 95 loss """softplus""" +171 95 regularizer """no""" +171 95 optimizer """adadelta""" +171 95 training_loop """lcwa""" +171 95 evaluator """rankbased""" +171 96 dataset """kinships""" +171 96 model """distmult""" +171 96 loss """softplus""" +171 96 regularizer """no""" +171 96 optimizer """adadelta""" +171 96 training_loop """lcwa""" +171 96 evaluator """rankbased""" +171 97 dataset """kinships""" +171 97 model """distmult""" +171 97 loss """softplus""" +171 97 regularizer """no""" +171 97 optimizer """adadelta""" +171 97 training_loop """lcwa""" +171 97 evaluator """rankbased""" +171 98 dataset """kinships""" +171 98 model """distmult""" +171 98 loss """softplus""" +171 98 regularizer """no""" +171 98 optimizer """adadelta""" +171 98 training_loop """lcwa""" +171 98 evaluator """rankbased""" +171 99 dataset """kinships""" +171 99 model """distmult""" +171 99 loss """softplus""" +171 99 regularizer """no""" +171 99 optimizer """adadelta""" +171 99 training_loop """lcwa""" +171 99 evaluator """rankbased""" +171 100 dataset """kinships""" +171 100 model """distmult""" +171 100 loss """softplus""" +171 100 regularizer """no""" +171 100 optimizer """adadelta""" +171 100 training_loop """lcwa""" +171 100 evaluator """rankbased""" +172 1 model.embedding_dim 0.0 +172 1 training.batch_size 2.0 +172 1 training.label_smoothing 0.324969163966886 +172 2 model.embedding_dim 1.0 +172 2 training.batch_size 2.0 +172 2 training.label_smoothing 0.020180739457089588 +172 3 model.embedding_dim 2.0 +172 3 training.batch_size 2.0 +172 3 training.label_smoothing 0.3302300714302593 +172 4 model.embedding_dim 0.0 +172 4 training.batch_size 0.0 +172 4 training.label_smoothing 0.004726509243513931 +172 5 model.embedding_dim 1.0 +172 5 training.batch_size 2.0 +172 5 training.label_smoothing 0.10375125609770912 +172 6 model.embedding_dim 2.0 +172 6 training.batch_size 2.0 +172 6 training.label_smoothing 0.003831566873248463 +172 7 model.embedding_dim 1.0 +172 7 training.batch_size 1.0 +172 7 training.label_smoothing 0.0014722927957093394 +172 8 model.embedding_dim 0.0 +172 8 training.batch_size 2.0 +172 8 training.label_smoothing 0.34886657543215427 +172 9 model.embedding_dim 0.0 +172 9 training.batch_size 1.0 +172 9 training.label_smoothing 0.13470825253996424 +172 10 model.embedding_dim 2.0 +172 10 training.batch_size 0.0 +172 10 training.label_smoothing 0.005730648399435944 +172 11 model.embedding_dim 1.0 +172 11 training.batch_size 1.0 +172 11 training.label_smoothing 0.9217434940214163 +172 12 model.embedding_dim 2.0 +172 12 training.batch_size 0.0 +172 12 training.label_smoothing 0.003285152682070912 +172 13 model.embedding_dim 2.0 +172 13 training.batch_size 2.0 +172 13 training.label_smoothing 0.22745625892687688 +172 14 model.embedding_dim 2.0 +172 14 training.batch_size 2.0 +172 14 training.label_smoothing 0.03544075342601496 +172 15 model.embedding_dim 2.0 +172 15 training.batch_size 1.0 +172 15 training.label_smoothing 0.03728674303629274 +172 16 model.embedding_dim 0.0 +172 16 training.batch_size 2.0 +172 16 training.label_smoothing 0.4992625500526568 +172 17 model.embedding_dim 0.0 +172 17 training.batch_size 2.0 +172 17 training.label_smoothing 0.015945781601968887 +172 18 model.embedding_dim 0.0 +172 18 training.batch_size 0.0 +172 18 training.label_smoothing 0.08260682830886296 +172 19 model.embedding_dim 0.0 +172 19 training.batch_size 0.0 +172 19 training.label_smoothing 0.4943307175397017 +172 20 model.embedding_dim 1.0 +172 20 training.batch_size 0.0 +172 20 training.label_smoothing 0.012084127663273285 +172 21 model.embedding_dim 2.0 +172 21 training.batch_size 2.0 +172 21 training.label_smoothing 0.09569973559179785 +172 22 model.embedding_dim 1.0 +172 22 training.batch_size 2.0 +172 22 training.label_smoothing 0.0034898223814632985 +172 23 model.embedding_dim 2.0 +172 23 training.batch_size 2.0 +172 23 training.label_smoothing 0.778931004559498 +172 24 model.embedding_dim 2.0 +172 24 training.batch_size 0.0 +172 24 training.label_smoothing 0.22341542209443832 +172 25 model.embedding_dim 0.0 +172 25 training.batch_size 0.0 +172 25 training.label_smoothing 0.006504317047768822 +172 26 model.embedding_dim 2.0 +172 26 training.batch_size 0.0 +172 26 training.label_smoothing 0.11815858589625014 +172 27 model.embedding_dim 0.0 +172 27 training.batch_size 2.0 +172 27 training.label_smoothing 0.08053541088054315 +172 28 model.embedding_dim 2.0 +172 28 training.batch_size 1.0 +172 28 training.label_smoothing 0.020172006737900556 +172 29 model.embedding_dim 1.0 +172 29 training.batch_size 2.0 +172 29 training.label_smoothing 0.43636464293023963 +172 30 model.embedding_dim 1.0 +172 30 training.batch_size 2.0 +172 30 training.label_smoothing 0.011025887843288317 +172 31 model.embedding_dim 2.0 +172 31 training.batch_size 0.0 +172 31 training.label_smoothing 0.2170697478075808 +172 32 model.embedding_dim 2.0 +172 32 training.batch_size 0.0 +172 32 training.label_smoothing 0.019722381139054026 +172 33 model.embedding_dim 2.0 +172 33 training.batch_size 0.0 +172 33 training.label_smoothing 0.1648614398823731 +172 34 model.embedding_dim 2.0 +172 34 training.batch_size 1.0 +172 34 training.label_smoothing 0.11728389578381068 +172 35 model.embedding_dim 1.0 +172 35 training.batch_size 0.0 +172 35 training.label_smoothing 0.18226820225351742 +172 36 model.embedding_dim 0.0 +172 36 training.batch_size 2.0 +172 36 training.label_smoothing 0.007496664365410604 +172 37 model.embedding_dim 2.0 +172 37 training.batch_size 1.0 +172 37 training.label_smoothing 0.03279170022571493 +172 38 model.embedding_dim 2.0 +172 38 training.batch_size 0.0 +172 38 training.label_smoothing 0.0014339732940458474 +172 39 model.embedding_dim 2.0 +172 39 training.batch_size 2.0 +172 39 training.label_smoothing 0.029955673516423864 +172 40 model.embedding_dim 1.0 +172 40 training.batch_size 2.0 +172 40 training.label_smoothing 0.7133609284848048 +172 41 model.embedding_dim 2.0 +172 41 training.batch_size 1.0 +172 41 training.label_smoothing 0.04633201341756457 +172 42 model.embedding_dim 0.0 +172 42 training.batch_size 1.0 +172 42 training.label_smoothing 0.008333928750794944 +172 43 model.embedding_dim 0.0 +172 43 training.batch_size 2.0 +172 43 training.label_smoothing 0.06707262139241646 +172 44 model.embedding_dim 2.0 +172 44 training.batch_size 1.0 +172 44 training.label_smoothing 0.5767731844277725 +172 45 model.embedding_dim 1.0 +172 45 training.batch_size 2.0 +172 45 training.label_smoothing 0.19765021039236785 +172 46 model.embedding_dim 0.0 +172 46 training.batch_size 2.0 +172 46 training.label_smoothing 0.2618335229163135 +172 47 model.embedding_dim 1.0 +172 47 training.batch_size 0.0 +172 47 training.label_smoothing 0.3086022550631776 +172 48 model.embedding_dim 2.0 +172 48 training.batch_size 0.0 +172 48 training.label_smoothing 0.007595274039488638 +172 49 model.embedding_dim 1.0 +172 49 training.batch_size 2.0 +172 49 training.label_smoothing 0.4254811959290896 +172 50 model.embedding_dim 2.0 +172 50 training.batch_size 1.0 +172 50 training.label_smoothing 0.14039456846482065 +172 51 model.embedding_dim 1.0 +172 51 training.batch_size 2.0 +172 51 training.label_smoothing 0.001112712706971358 +172 52 model.embedding_dim 0.0 +172 52 training.batch_size 0.0 +172 52 training.label_smoothing 0.017864052883811543 +172 53 model.embedding_dim 2.0 +172 53 training.batch_size 0.0 +172 53 training.label_smoothing 0.18707908362483586 +172 54 model.embedding_dim 2.0 +172 54 training.batch_size 1.0 +172 54 training.label_smoothing 0.003259516658279633 +172 55 model.embedding_dim 2.0 +172 55 training.batch_size 0.0 +172 55 training.label_smoothing 0.10448076139618366 +172 56 model.embedding_dim 1.0 +172 56 training.batch_size 1.0 +172 56 training.label_smoothing 0.15569320102021078 +172 57 model.embedding_dim 2.0 +172 57 training.batch_size 1.0 +172 57 training.label_smoothing 0.062394551982818354 +172 58 model.embedding_dim 0.0 +172 58 training.batch_size 0.0 +172 58 training.label_smoothing 0.017820407893071027 +172 59 model.embedding_dim 0.0 +172 59 training.batch_size 0.0 +172 59 training.label_smoothing 0.0014790971892552336 +172 60 model.embedding_dim 0.0 +172 60 training.batch_size 1.0 +172 60 training.label_smoothing 0.06193687454952519 +172 61 model.embedding_dim 1.0 +172 61 training.batch_size 0.0 +172 61 training.label_smoothing 0.9383715394862296 +172 62 model.embedding_dim 2.0 +172 62 training.batch_size 0.0 +172 62 training.label_smoothing 0.007606151175013693 +172 63 model.embedding_dim 0.0 +172 63 training.batch_size 1.0 +172 63 training.label_smoothing 0.05418954921480295 +172 64 model.embedding_dim 0.0 +172 64 training.batch_size 2.0 +172 64 training.label_smoothing 0.03461394369353608 +172 65 model.embedding_dim 0.0 +172 65 training.batch_size 2.0 +172 65 training.label_smoothing 0.012492115228951113 +172 66 model.embedding_dim 0.0 +172 66 training.batch_size 2.0 +172 66 training.label_smoothing 0.005092471825724798 +172 67 model.embedding_dim 2.0 +172 67 training.batch_size 1.0 +172 67 training.label_smoothing 0.31551601437627275 +172 68 model.embedding_dim 1.0 +172 68 training.batch_size 1.0 +172 68 training.label_smoothing 0.06113284658406275 +172 69 model.embedding_dim 0.0 +172 69 training.batch_size 2.0 +172 69 training.label_smoothing 0.004465303350115748 +172 70 model.embedding_dim 2.0 +172 70 training.batch_size 1.0 +172 70 training.label_smoothing 0.06163438885495277 +172 71 model.embedding_dim 2.0 +172 71 training.batch_size 2.0 +172 71 training.label_smoothing 0.9051051516878458 +172 72 model.embedding_dim 1.0 +172 72 training.batch_size 0.0 +172 72 training.label_smoothing 0.001453377453165757 +172 73 model.embedding_dim 1.0 +172 73 training.batch_size 0.0 +172 73 training.label_smoothing 0.06044482414086973 +172 74 model.embedding_dim 1.0 +172 74 training.batch_size 1.0 +172 74 training.label_smoothing 0.5368682073587943 +172 75 model.embedding_dim 2.0 +172 75 training.batch_size 1.0 +172 75 training.label_smoothing 0.15908107191509993 +172 76 model.embedding_dim 1.0 +172 76 training.batch_size 2.0 +172 76 training.label_smoothing 0.002315109888858729 +172 77 model.embedding_dim 1.0 +172 77 training.batch_size 1.0 +172 77 training.label_smoothing 0.0025038024589887505 +172 78 model.embedding_dim 0.0 +172 78 training.batch_size 1.0 +172 78 training.label_smoothing 0.07632914310705999 +172 79 model.embedding_dim 0.0 +172 79 training.batch_size 1.0 +172 79 training.label_smoothing 0.6028428388225555 +172 80 model.embedding_dim 2.0 +172 80 training.batch_size 2.0 +172 80 training.label_smoothing 0.07897200008333663 +172 81 model.embedding_dim 1.0 +172 81 training.batch_size 0.0 +172 81 training.label_smoothing 0.011465508716515431 +172 82 model.embedding_dim 2.0 +172 82 training.batch_size 0.0 +172 82 training.label_smoothing 0.6804624415183659 +172 83 model.embedding_dim 2.0 +172 83 training.batch_size 1.0 +172 83 training.label_smoothing 0.0018832533266208375 +172 84 model.embedding_dim 1.0 +172 84 training.batch_size 2.0 +172 84 training.label_smoothing 0.0863883055442001 +172 85 model.embedding_dim 0.0 +172 85 training.batch_size 1.0 +172 85 training.label_smoothing 0.0072426629920683185 +172 86 model.embedding_dim 2.0 +172 86 training.batch_size 1.0 +172 86 training.label_smoothing 0.016768400726493368 +172 87 model.embedding_dim 0.0 +172 87 training.batch_size 1.0 +172 87 training.label_smoothing 0.006755372509703311 +172 88 model.embedding_dim 1.0 +172 88 training.batch_size 2.0 +172 88 training.label_smoothing 0.012542995376409452 +172 89 model.embedding_dim 1.0 +172 89 training.batch_size 0.0 +172 89 training.label_smoothing 0.024224808179336754 +172 90 model.embedding_dim 0.0 +172 90 training.batch_size 0.0 +172 90 training.label_smoothing 0.018523957055859075 +172 91 model.embedding_dim 2.0 +172 91 training.batch_size 0.0 +172 91 training.label_smoothing 0.8467732296135775 +172 92 model.embedding_dim 1.0 +172 92 training.batch_size 1.0 +172 92 training.label_smoothing 0.008102241289413597 +172 93 model.embedding_dim 1.0 +172 93 training.batch_size 0.0 +172 93 training.label_smoothing 0.11580110360046698 +172 94 model.embedding_dim 1.0 +172 94 training.batch_size 0.0 +172 94 training.label_smoothing 0.0013124568217389228 +172 95 model.embedding_dim 2.0 +172 95 training.batch_size 0.0 +172 95 training.label_smoothing 0.0364958560823579 +172 96 model.embedding_dim 1.0 +172 96 training.batch_size 1.0 +172 96 training.label_smoothing 0.00966942387376406 +172 97 model.embedding_dim 0.0 +172 97 training.batch_size 1.0 +172 97 training.label_smoothing 0.025691383024479226 +172 98 model.embedding_dim 0.0 +172 98 training.batch_size 1.0 +172 98 training.label_smoothing 0.17981404503852536 +172 99 model.embedding_dim 2.0 +172 99 training.batch_size 1.0 +172 99 training.label_smoothing 0.07130421689793023 +172 100 model.embedding_dim 0.0 +172 100 training.batch_size 1.0 +172 100 training.label_smoothing 0.42490838825013155 +172 1 dataset """kinships""" +172 1 model """distmult""" +172 1 loss """crossentropy""" +172 1 regularizer """no""" +172 1 optimizer """adadelta""" +172 1 training_loop """lcwa""" +172 1 evaluator """rankbased""" +172 2 dataset """kinships""" +172 2 model """distmult""" +172 2 loss """crossentropy""" +172 2 regularizer """no""" +172 2 optimizer """adadelta""" +172 2 training_loop """lcwa""" +172 2 evaluator """rankbased""" +172 3 dataset """kinships""" +172 3 model """distmult""" +172 3 loss """crossentropy""" +172 3 regularizer """no""" +172 3 optimizer """adadelta""" +172 3 training_loop """lcwa""" +172 3 evaluator """rankbased""" +172 4 dataset """kinships""" +172 4 model """distmult""" +172 4 loss """crossentropy""" +172 4 regularizer """no""" +172 4 optimizer """adadelta""" +172 4 training_loop """lcwa""" +172 4 evaluator """rankbased""" +172 5 dataset """kinships""" +172 5 model """distmult""" +172 5 loss """crossentropy""" +172 5 regularizer """no""" +172 5 optimizer """adadelta""" +172 5 training_loop """lcwa""" +172 5 evaluator """rankbased""" +172 6 dataset """kinships""" +172 6 model """distmult""" +172 6 loss """crossentropy""" +172 6 regularizer """no""" +172 6 optimizer """adadelta""" +172 6 training_loop """lcwa""" +172 6 evaluator """rankbased""" +172 7 dataset """kinships""" +172 7 model """distmult""" +172 7 loss """crossentropy""" +172 7 regularizer """no""" +172 7 optimizer """adadelta""" +172 7 training_loop """lcwa""" +172 7 evaluator """rankbased""" +172 8 dataset """kinships""" +172 8 model """distmult""" +172 8 loss """crossentropy""" +172 8 regularizer """no""" +172 8 optimizer """adadelta""" +172 8 training_loop """lcwa""" +172 8 evaluator """rankbased""" +172 9 dataset """kinships""" +172 9 model """distmult""" +172 9 loss """crossentropy""" +172 9 regularizer """no""" +172 9 optimizer """adadelta""" +172 9 training_loop """lcwa""" +172 9 evaluator """rankbased""" +172 10 dataset """kinships""" +172 10 model """distmult""" +172 10 loss """crossentropy""" +172 10 regularizer """no""" +172 10 optimizer """adadelta""" +172 10 training_loop """lcwa""" +172 10 evaluator """rankbased""" +172 11 dataset """kinships""" +172 11 model """distmult""" +172 11 loss """crossentropy""" +172 11 regularizer """no""" +172 11 optimizer """adadelta""" +172 11 training_loop """lcwa""" +172 11 evaluator """rankbased""" +172 12 dataset """kinships""" +172 12 model """distmult""" +172 12 loss """crossentropy""" +172 12 regularizer """no""" +172 12 optimizer """adadelta""" +172 12 training_loop """lcwa""" +172 12 evaluator """rankbased""" +172 13 dataset """kinships""" +172 13 model """distmult""" +172 13 loss """crossentropy""" +172 13 regularizer """no""" +172 13 optimizer """adadelta""" +172 13 training_loop """lcwa""" +172 13 evaluator """rankbased""" +172 14 dataset """kinships""" +172 14 model """distmult""" +172 14 loss """crossentropy""" +172 14 regularizer """no""" +172 14 optimizer """adadelta""" +172 14 training_loop """lcwa""" +172 14 evaluator """rankbased""" +172 15 dataset """kinships""" +172 15 model """distmult""" +172 15 loss """crossentropy""" +172 15 regularizer """no""" +172 15 optimizer """adadelta""" +172 15 training_loop """lcwa""" +172 15 evaluator """rankbased""" +172 16 dataset """kinships""" +172 16 model """distmult""" +172 16 loss """crossentropy""" +172 16 regularizer """no""" +172 16 optimizer """adadelta""" +172 16 training_loop """lcwa""" +172 16 evaluator """rankbased""" +172 17 dataset """kinships""" +172 17 model """distmult""" +172 17 loss """crossentropy""" +172 17 regularizer """no""" +172 17 optimizer """adadelta""" +172 17 training_loop """lcwa""" +172 17 evaluator """rankbased""" +172 18 dataset """kinships""" +172 18 model """distmult""" +172 18 loss """crossentropy""" +172 18 regularizer """no""" +172 18 optimizer """adadelta""" +172 18 training_loop """lcwa""" +172 18 evaluator """rankbased""" +172 19 dataset """kinships""" +172 19 model """distmult""" +172 19 loss """crossentropy""" +172 19 regularizer """no""" +172 19 optimizer """adadelta""" +172 19 training_loop """lcwa""" +172 19 evaluator """rankbased""" +172 20 dataset """kinships""" +172 20 model """distmult""" +172 20 loss """crossentropy""" +172 20 regularizer """no""" +172 20 optimizer """adadelta""" +172 20 training_loop """lcwa""" +172 20 evaluator """rankbased""" +172 21 dataset """kinships""" +172 21 model """distmult""" +172 21 loss """crossentropy""" +172 21 regularizer """no""" +172 21 optimizer """adadelta""" +172 21 training_loop """lcwa""" +172 21 evaluator """rankbased""" +172 22 dataset """kinships""" +172 22 model """distmult""" +172 22 loss """crossentropy""" +172 22 regularizer """no""" +172 22 optimizer """adadelta""" +172 22 training_loop """lcwa""" +172 22 evaluator """rankbased""" +172 23 dataset """kinships""" +172 23 model """distmult""" +172 23 loss """crossentropy""" +172 23 regularizer """no""" +172 23 optimizer """adadelta""" +172 23 training_loop """lcwa""" +172 23 evaluator """rankbased""" +172 24 dataset """kinships""" +172 24 model """distmult""" +172 24 loss """crossentropy""" +172 24 regularizer """no""" +172 24 optimizer """adadelta""" +172 24 training_loop """lcwa""" +172 24 evaluator """rankbased""" +172 25 dataset """kinships""" +172 25 model """distmult""" +172 25 loss """crossentropy""" +172 25 regularizer """no""" +172 25 optimizer """adadelta""" +172 25 training_loop """lcwa""" +172 25 evaluator """rankbased""" +172 26 dataset """kinships""" +172 26 model """distmult""" +172 26 loss """crossentropy""" +172 26 regularizer """no""" +172 26 optimizer """adadelta""" +172 26 training_loop """lcwa""" +172 26 evaluator """rankbased""" +172 27 dataset """kinships""" +172 27 model """distmult""" +172 27 loss """crossentropy""" +172 27 regularizer """no""" +172 27 optimizer """adadelta""" +172 27 training_loop """lcwa""" +172 27 evaluator """rankbased""" +172 28 dataset """kinships""" +172 28 model """distmult""" +172 28 loss """crossentropy""" +172 28 regularizer """no""" +172 28 optimizer """adadelta""" +172 28 training_loop """lcwa""" +172 28 evaluator """rankbased""" +172 29 dataset """kinships""" +172 29 model """distmult""" +172 29 loss """crossentropy""" +172 29 regularizer """no""" +172 29 optimizer """adadelta""" +172 29 training_loop """lcwa""" +172 29 evaluator """rankbased""" +172 30 dataset """kinships""" +172 30 model """distmult""" +172 30 loss """crossentropy""" +172 30 regularizer """no""" +172 30 optimizer """adadelta""" +172 30 training_loop """lcwa""" +172 30 evaluator """rankbased""" +172 31 dataset """kinships""" +172 31 model """distmult""" +172 31 loss """crossentropy""" +172 31 regularizer """no""" +172 31 optimizer """adadelta""" +172 31 training_loop """lcwa""" +172 31 evaluator """rankbased""" +172 32 dataset """kinships""" +172 32 model """distmult""" +172 32 loss """crossentropy""" +172 32 regularizer """no""" +172 32 optimizer """adadelta""" +172 32 training_loop """lcwa""" +172 32 evaluator """rankbased""" +172 33 dataset """kinships""" +172 33 model """distmult""" +172 33 loss """crossentropy""" +172 33 regularizer """no""" +172 33 optimizer """adadelta""" +172 33 training_loop """lcwa""" +172 33 evaluator """rankbased""" +172 34 dataset """kinships""" +172 34 model """distmult""" +172 34 loss """crossentropy""" +172 34 regularizer """no""" +172 34 optimizer """adadelta""" +172 34 training_loop """lcwa""" +172 34 evaluator """rankbased""" +172 35 dataset """kinships""" +172 35 model """distmult""" +172 35 loss """crossentropy""" +172 35 regularizer """no""" +172 35 optimizer """adadelta""" +172 35 training_loop """lcwa""" +172 35 evaluator """rankbased""" +172 36 dataset """kinships""" +172 36 model """distmult""" +172 36 loss """crossentropy""" +172 36 regularizer """no""" +172 36 optimizer """adadelta""" +172 36 training_loop """lcwa""" +172 36 evaluator """rankbased""" +172 37 dataset """kinships""" +172 37 model """distmult""" +172 37 loss """crossentropy""" +172 37 regularizer """no""" +172 37 optimizer """adadelta""" +172 37 training_loop """lcwa""" +172 37 evaluator """rankbased""" +172 38 dataset """kinships""" +172 38 model """distmult""" +172 38 loss """crossentropy""" +172 38 regularizer """no""" +172 38 optimizer """adadelta""" +172 38 training_loop """lcwa""" +172 38 evaluator """rankbased""" +172 39 dataset """kinships""" +172 39 model """distmult""" +172 39 loss """crossentropy""" +172 39 regularizer """no""" +172 39 optimizer """adadelta""" +172 39 training_loop """lcwa""" +172 39 evaluator """rankbased""" +172 40 dataset """kinships""" +172 40 model """distmult""" +172 40 loss """crossentropy""" +172 40 regularizer """no""" +172 40 optimizer """adadelta""" +172 40 training_loop """lcwa""" +172 40 evaluator """rankbased""" +172 41 dataset """kinships""" +172 41 model """distmult""" +172 41 loss """crossentropy""" +172 41 regularizer """no""" +172 41 optimizer """adadelta""" +172 41 training_loop """lcwa""" +172 41 evaluator """rankbased""" +172 42 dataset """kinships""" +172 42 model """distmult""" +172 42 loss """crossentropy""" +172 42 regularizer """no""" +172 42 optimizer """adadelta""" +172 42 training_loop """lcwa""" +172 42 evaluator """rankbased""" +172 43 dataset """kinships""" +172 43 model """distmult""" +172 43 loss """crossentropy""" +172 43 regularizer """no""" +172 43 optimizer """adadelta""" +172 43 training_loop """lcwa""" +172 43 evaluator """rankbased""" +172 44 dataset """kinships""" +172 44 model """distmult""" +172 44 loss """crossentropy""" +172 44 regularizer """no""" +172 44 optimizer """adadelta""" +172 44 training_loop """lcwa""" +172 44 evaluator """rankbased""" +172 45 dataset """kinships""" +172 45 model """distmult""" +172 45 loss """crossentropy""" +172 45 regularizer """no""" +172 45 optimizer """adadelta""" +172 45 training_loop """lcwa""" +172 45 evaluator """rankbased""" +172 46 dataset """kinships""" +172 46 model """distmult""" +172 46 loss """crossentropy""" +172 46 regularizer """no""" +172 46 optimizer """adadelta""" +172 46 training_loop """lcwa""" +172 46 evaluator """rankbased""" +172 47 dataset """kinships""" +172 47 model """distmult""" +172 47 loss """crossentropy""" +172 47 regularizer """no""" +172 47 optimizer """adadelta""" +172 47 training_loop """lcwa""" +172 47 evaluator """rankbased""" +172 48 dataset """kinships""" +172 48 model """distmult""" +172 48 loss """crossentropy""" +172 48 regularizer """no""" +172 48 optimizer """adadelta""" +172 48 training_loop """lcwa""" +172 48 evaluator """rankbased""" +172 49 dataset """kinships""" +172 49 model """distmult""" +172 49 loss """crossentropy""" +172 49 regularizer """no""" +172 49 optimizer """adadelta""" +172 49 training_loop """lcwa""" +172 49 evaluator """rankbased""" +172 50 dataset """kinships""" +172 50 model """distmult""" +172 50 loss """crossentropy""" +172 50 regularizer """no""" +172 50 optimizer """adadelta""" +172 50 training_loop """lcwa""" +172 50 evaluator """rankbased""" +172 51 dataset """kinships""" +172 51 model """distmult""" +172 51 loss """crossentropy""" +172 51 regularizer """no""" +172 51 optimizer """adadelta""" +172 51 training_loop """lcwa""" +172 51 evaluator """rankbased""" +172 52 dataset """kinships""" +172 52 model """distmult""" +172 52 loss """crossentropy""" +172 52 regularizer """no""" +172 52 optimizer """adadelta""" +172 52 training_loop """lcwa""" +172 52 evaluator """rankbased""" +172 53 dataset """kinships""" +172 53 model """distmult""" +172 53 loss """crossentropy""" +172 53 regularizer """no""" +172 53 optimizer """adadelta""" +172 53 training_loop """lcwa""" +172 53 evaluator """rankbased""" +172 54 dataset """kinships""" +172 54 model """distmult""" +172 54 loss """crossentropy""" +172 54 regularizer """no""" +172 54 optimizer """adadelta""" +172 54 training_loop """lcwa""" +172 54 evaluator """rankbased""" +172 55 dataset """kinships""" +172 55 model """distmult""" +172 55 loss """crossentropy""" +172 55 regularizer """no""" +172 55 optimizer """adadelta""" +172 55 training_loop """lcwa""" +172 55 evaluator """rankbased""" +172 56 dataset """kinships""" +172 56 model """distmult""" +172 56 loss """crossentropy""" +172 56 regularizer """no""" +172 56 optimizer """adadelta""" +172 56 training_loop """lcwa""" +172 56 evaluator """rankbased""" +172 57 dataset """kinships""" +172 57 model """distmult""" +172 57 loss """crossentropy""" +172 57 regularizer """no""" +172 57 optimizer """adadelta""" +172 57 training_loop """lcwa""" +172 57 evaluator """rankbased""" +172 58 dataset """kinships""" +172 58 model """distmult""" +172 58 loss """crossentropy""" +172 58 regularizer """no""" +172 58 optimizer """adadelta""" +172 58 training_loop """lcwa""" +172 58 evaluator """rankbased""" +172 59 dataset """kinships""" +172 59 model """distmult""" +172 59 loss """crossentropy""" +172 59 regularizer """no""" +172 59 optimizer """adadelta""" +172 59 training_loop """lcwa""" +172 59 evaluator """rankbased""" +172 60 dataset """kinships""" +172 60 model """distmult""" +172 60 loss """crossentropy""" +172 60 regularizer """no""" +172 60 optimizer """adadelta""" +172 60 training_loop """lcwa""" +172 60 evaluator """rankbased""" +172 61 dataset """kinships""" +172 61 model """distmult""" +172 61 loss """crossentropy""" +172 61 regularizer """no""" +172 61 optimizer """adadelta""" +172 61 training_loop """lcwa""" +172 61 evaluator """rankbased""" +172 62 dataset """kinships""" +172 62 model """distmult""" +172 62 loss """crossentropy""" +172 62 regularizer """no""" +172 62 optimizer """adadelta""" +172 62 training_loop """lcwa""" +172 62 evaluator """rankbased""" +172 63 dataset """kinships""" +172 63 model """distmult""" +172 63 loss """crossentropy""" +172 63 regularizer """no""" +172 63 optimizer """adadelta""" +172 63 training_loop """lcwa""" +172 63 evaluator """rankbased""" +172 64 dataset """kinships""" +172 64 model """distmult""" +172 64 loss """crossentropy""" +172 64 regularizer """no""" +172 64 optimizer """adadelta""" +172 64 training_loop """lcwa""" +172 64 evaluator """rankbased""" +172 65 dataset """kinships""" +172 65 model """distmult""" +172 65 loss """crossentropy""" +172 65 regularizer """no""" +172 65 optimizer """adadelta""" +172 65 training_loop """lcwa""" +172 65 evaluator """rankbased""" +172 66 dataset """kinships""" +172 66 model """distmult""" +172 66 loss """crossentropy""" +172 66 regularizer """no""" +172 66 optimizer """adadelta""" +172 66 training_loop """lcwa""" +172 66 evaluator """rankbased""" +172 67 dataset """kinships""" +172 67 model """distmult""" +172 67 loss """crossentropy""" +172 67 regularizer """no""" +172 67 optimizer """adadelta""" +172 67 training_loop """lcwa""" +172 67 evaluator """rankbased""" +172 68 dataset """kinships""" +172 68 model """distmult""" +172 68 loss """crossentropy""" +172 68 regularizer """no""" +172 68 optimizer """adadelta""" +172 68 training_loop """lcwa""" +172 68 evaluator """rankbased""" +172 69 dataset """kinships""" +172 69 model """distmult""" +172 69 loss """crossentropy""" +172 69 regularizer """no""" +172 69 optimizer """adadelta""" +172 69 training_loop """lcwa""" +172 69 evaluator """rankbased""" +172 70 dataset """kinships""" +172 70 model """distmult""" +172 70 loss """crossentropy""" +172 70 regularizer """no""" +172 70 optimizer """adadelta""" +172 70 training_loop """lcwa""" +172 70 evaluator """rankbased""" +172 71 dataset """kinships""" +172 71 model """distmult""" +172 71 loss """crossentropy""" +172 71 regularizer """no""" +172 71 optimizer """adadelta""" +172 71 training_loop """lcwa""" +172 71 evaluator """rankbased""" +172 72 dataset """kinships""" +172 72 model """distmult""" +172 72 loss """crossentropy""" +172 72 regularizer """no""" +172 72 optimizer """adadelta""" +172 72 training_loop """lcwa""" +172 72 evaluator """rankbased""" +172 73 dataset """kinships""" +172 73 model """distmult""" +172 73 loss """crossentropy""" +172 73 regularizer """no""" +172 73 optimizer """adadelta""" +172 73 training_loop """lcwa""" +172 73 evaluator """rankbased""" +172 74 dataset """kinships""" +172 74 model """distmult""" +172 74 loss """crossentropy""" +172 74 regularizer """no""" +172 74 optimizer """adadelta""" +172 74 training_loop """lcwa""" +172 74 evaluator """rankbased""" +172 75 dataset """kinships""" +172 75 model """distmult""" +172 75 loss """crossentropy""" +172 75 regularizer """no""" +172 75 optimizer """adadelta""" +172 75 training_loop """lcwa""" +172 75 evaluator """rankbased""" +172 76 dataset """kinships""" +172 76 model """distmult""" +172 76 loss """crossentropy""" +172 76 regularizer """no""" +172 76 optimizer """adadelta""" +172 76 training_loop """lcwa""" +172 76 evaluator """rankbased""" +172 77 dataset """kinships""" +172 77 model """distmult""" +172 77 loss """crossentropy""" +172 77 regularizer """no""" +172 77 optimizer """adadelta""" +172 77 training_loop """lcwa""" +172 77 evaluator """rankbased""" +172 78 dataset """kinships""" +172 78 model """distmult""" +172 78 loss """crossentropy""" +172 78 regularizer """no""" +172 78 optimizer """adadelta""" +172 78 training_loop """lcwa""" +172 78 evaluator """rankbased""" +172 79 dataset """kinships""" +172 79 model """distmult""" +172 79 loss """crossentropy""" +172 79 regularizer """no""" +172 79 optimizer """adadelta""" +172 79 training_loop """lcwa""" +172 79 evaluator """rankbased""" +172 80 dataset """kinships""" +172 80 model """distmult""" +172 80 loss """crossentropy""" +172 80 regularizer """no""" +172 80 optimizer """adadelta""" +172 80 training_loop """lcwa""" +172 80 evaluator """rankbased""" +172 81 dataset """kinships""" +172 81 model """distmult""" +172 81 loss """crossentropy""" +172 81 regularizer """no""" +172 81 optimizer """adadelta""" +172 81 training_loop """lcwa""" +172 81 evaluator """rankbased""" +172 82 dataset """kinships""" +172 82 model """distmult""" +172 82 loss """crossentropy""" +172 82 regularizer """no""" +172 82 optimizer """adadelta""" +172 82 training_loop """lcwa""" +172 82 evaluator """rankbased""" +172 83 dataset """kinships""" +172 83 model """distmult""" +172 83 loss """crossentropy""" +172 83 regularizer """no""" +172 83 optimizer """adadelta""" +172 83 training_loop """lcwa""" +172 83 evaluator """rankbased""" +172 84 dataset """kinships""" +172 84 model """distmult""" +172 84 loss """crossentropy""" +172 84 regularizer """no""" +172 84 optimizer """adadelta""" +172 84 training_loop """lcwa""" +172 84 evaluator """rankbased""" +172 85 dataset """kinships""" +172 85 model """distmult""" +172 85 loss """crossentropy""" +172 85 regularizer """no""" +172 85 optimizer """adadelta""" +172 85 training_loop """lcwa""" +172 85 evaluator """rankbased""" +172 86 dataset """kinships""" +172 86 model """distmult""" +172 86 loss """crossentropy""" +172 86 regularizer """no""" +172 86 optimizer """adadelta""" +172 86 training_loop """lcwa""" +172 86 evaluator """rankbased""" +172 87 dataset """kinships""" +172 87 model """distmult""" +172 87 loss """crossentropy""" +172 87 regularizer """no""" +172 87 optimizer """adadelta""" +172 87 training_loop """lcwa""" +172 87 evaluator """rankbased""" +172 88 dataset """kinships""" +172 88 model """distmult""" +172 88 loss """crossentropy""" +172 88 regularizer """no""" +172 88 optimizer """adadelta""" +172 88 training_loop """lcwa""" +172 88 evaluator """rankbased""" +172 89 dataset """kinships""" +172 89 model """distmult""" +172 89 loss """crossentropy""" +172 89 regularizer """no""" +172 89 optimizer """adadelta""" +172 89 training_loop """lcwa""" +172 89 evaluator """rankbased""" +172 90 dataset """kinships""" +172 90 model """distmult""" +172 90 loss """crossentropy""" +172 90 regularizer """no""" +172 90 optimizer """adadelta""" +172 90 training_loop """lcwa""" +172 90 evaluator """rankbased""" +172 91 dataset """kinships""" +172 91 model """distmult""" +172 91 loss """crossentropy""" +172 91 regularizer """no""" +172 91 optimizer """adadelta""" +172 91 training_loop """lcwa""" +172 91 evaluator """rankbased""" +172 92 dataset """kinships""" +172 92 model """distmult""" +172 92 loss """crossentropy""" +172 92 regularizer """no""" +172 92 optimizer """adadelta""" +172 92 training_loop """lcwa""" +172 92 evaluator """rankbased""" +172 93 dataset """kinships""" +172 93 model """distmult""" +172 93 loss """crossentropy""" +172 93 regularizer """no""" +172 93 optimizer """adadelta""" +172 93 training_loop """lcwa""" +172 93 evaluator """rankbased""" +172 94 dataset """kinships""" +172 94 model """distmult""" +172 94 loss """crossentropy""" +172 94 regularizer """no""" +172 94 optimizer """adadelta""" +172 94 training_loop """lcwa""" +172 94 evaluator """rankbased""" +172 95 dataset """kinships""" +172 95 model """distmult""" +172 95 loss """crossentropy""" +172 95 regularizer """no""" +172 95 optimizer """adadelta""" +172 95 training_loop """lcwa""" +172 95 evaluator """rankbased""" +172 96 dataset """kinships""" +172 96 model """distmult""" +172 96 loss """crossentropy""" +172 96 regularizer """no""" +172 96 optimizer """adadelta""" +172 96 training_loop """lcwa""" +172 96 evaluator """rankbased""" +172 97 dataset """kinships""" +172 97 model """distmult""" +172 97 loss """crossentropy""" +172 97 regularizer """no""" +172 97 optimizer """adadelta""" +172 97 training_loop """lcwa""" +172 97 evaluator """rankbased""" +172 98 dataset """kinships""" +172 98 model """distmult""" +172 98 loss """crossentropy""" +172 98 regularizer """no""" +172 98 optimizer """adadelta""" +172 98 training_loop """lcwa""" +172 98 evaluator """rankbased""" +172 99 dataset """kinships""" +172 99 model """distmult""" +172 99 loss """crossentropy""" +172 99 regularizer """no""" +172 99 optimizer """adadelta""" +172 99 training_loop """lcwa""" +172 99 evaluator """rankbased""" +172 100 dataset """kinships""" +172 100 model """distmult""" +172 100 loss """crossentropy""" +172 100 regularizer """no""" +172 100 optimizer """adadelta""" +172 100 training_loop """lcwa""" +172 100 evaluator """rankbased""" +173 1 model.embedding_dim 2.0 +173 1 training.batch_size 1.0 +173 1 training.label_smoothing 0.13030372393375833 +173 2 model.embedding_dim 1.0 +173 2 training.batch_size 0.0 +173 2 training.label_smoothing 0.07624038346161639 +173 3 model.embedding_dim 2.0 +173 3 training.batch_size 0.0 +173 3 training.label_smoothing 0.10914589101868317 +173 4 model.embedding_dim 0.0 +173 4 training.batch_size 1.0 +173 4 training.label_smoothing 0.0015093307167153502 +173 5 model.embedding_dim 1.0 +173 5 training.batch_size 0.0 +173 5 training.label_smoothing 0.02124483228889231 +173 6 model.embedding_dim 1.0 +173 6 training.batch_size 1.0 +173 6 training.label_smoothing 0.002615310576224586 +173 7 model.embedding_dim 2.0 +173 7 training.batch_size 0.0 +173 7 training.label_smoothing 0.00516627179419956 +173 8 model.embedding_dim 1.0 +173 8 training.batch_size 0.0 +173 8 training.label_smoothing 0.3465841802595444 +173 9 model.embedding_dim 1.0 +173 9 training.batch_size 1.0 +173 9 training.label_smoothing 0.18609014114433833 +173 10 model.embedding_dim 1.0 +173 10 training.batch_size 0.0 +173 10 training.label_smoothing 0.0016164582453947211 +173 11 model.embedding_dim 2.0 +173 11 training.batch_size 1.0 +173 11 training.label_smoothing 0.029320860513553964 +173 12 model.embedding_dim 0.0 +173 12 training.batch_size 0.0 +173 12 training.label_smoothing 0.09072519995052003 +173 13 model.embedding_dim 1.0 +173 13 training.batch_size 1.0 +173 13 training.label_smoothing 0.02134111219327262 +173 14 model.embedding_dim 0.0 +173 14 training.batch_size 1.0 +173 14 training.label_smoothing 0.024295379255152537 +173 15 model.embedding_dim 0.0 +173 15 training.batch_size 1.0 +173 15 training.label_smoothing 0.028726819933321303 +173 16 model.embedding_dim 1.0 +173 16 training.batch_size 1.0 +173 16 training.label_smoothing 0.009469920248983691 +173 17 model.embedding_dim 1.0 +173 17 training.batch_size 0.0 +173 17 training.label_smoothing 0.01599007804216072 +173 18 model.embedding_dim 1.0 +173 18 training.batch_size 2.0 +173 18 training.label_smoothing 0.5060636296598992 +173 19 model.embedding_dim 2.0 +173 19 training.batch_size 2.0 +173 19 training.label_smoothing 0.001022144144903237 +173 20 model.embedding_dim 2.0 +173 20 training.batch_size 2.0 +173 20 training.label_smoothing 0.030945134652111655 +173 21 model.embedding_dim 0.0 +173 21 training.batch_size 2.0 +173 21 training.label_smoothing 0.0010814980468944883 +173 22 model.embedding_dim 2.0 +173 22 training.batch_size 1.0 +173 22 training.label_smoothing 0.026365228801981392 +173 23 model.embedding_dim 0.0 +173 23 training.batch_size 2.0 +173 23 training.label_smoothing 0.004486384776207925 +173 24 model.embedding_dim 2.0 +173 24 training.batch_size 1.0 +173 24 training.label_smoothing 0.8507516027280513 +173 25 model.embedding_dim 1.0 +173 25 training.batch_size 2.0 +173 25 training.label_smoothing 0.004900518908824963 +173 26 model.embedding_dim 2.0 +173 26 training.batch_size 0.0 +173 26 training.label_smoothing 0.08883565252153129 +173 27 model.embedding_dim 0.0 +173 27 training.batch_size 1.0 +173 27 training.label_smoothing 0.5740822753425313 +173 28 model.embedding_dim 0.0 +173 28 training.batch_size 2.0 +173 28 training.label_smoothing 0.015064014381909626 +173 29 model.embedding_dim 2.0 +173 29 training.batch_size 2.0 +173 29 training.label_smoothing 0.5135188072367434 +173 30 model.embedding_dim 2.0 +173 30 training.batch_size 1.0 +173 30 training.label_smoothing 0.044777124819980155 +173 31 model.embedding_dim 0.0 +173 31 training.batch_size 0.0 +173 31 training.label_smoothing 0.06831840568606362 +173 32 model.embedding_dim 0.0 +173 32 training.batch_size 1.0 +173 32 training.label_smoothing 0.044512523883073266 +173 33 model.embedding_dim 2.0 +173 33 training.batch_size 1.0 +173 33 training.label_smoothing 0.16643030656962327 +173 34 model.embedding_dim 0.0 +173 34 training.batch_size 1.0 +173 34 training.label_smoothing 0.0034009170503506224 +173 35 model.embedding_dim 2.0 +173 35 training.batch_size 2.0 +173 35 training.label_smoothing 0.0022871698076941727 +173 36 model.embedding_dim 0.0 +173 36 training.batch_size 2.0 +173 36 training.label_smoothing 0.00418883915984088 +173 37 model.embedding_dim 2.0 +173 37 training.batch_size 1.0 +173 37 training.label_smoothing 0.006770474369433607 +173 38 model.embedding_dim 0.0 +173 38 training.batch_size 2.0 +173 38 training.label_smoothing 0.0030671588479311207 +173 39 model.embedding_dim 2.0 +173 39 training.batch_size 1.0 +173 39 training.label_smoothing 0.12145037417788661 +173 40 model.embedding_dim 1.0 +173 40 training.batch_size 1.0 +173 40 training.label_smoothing 0.0064914825945015945 +173 41 model.embedding_dim 1.0 +173 41 training.batch_size 1.0 +173 41 training.label_smoothing 0.002634853941660101 +173 42 model.embedding_dim 0.0 +173 42 training.batch_size 2.0 +173 42 training.label_smoothing 0.001322690125453984 +173 43 model.embedding_dim 0.0 +173 43 training.batch_size 2.0 +173 43 training.label_smoothing 0.0012562803536226626 +173 44 model.embedding_dim 1.0 +173 44 training.batch_size 2.0 +173 44 training.label_smoothing 0.009127666441792285 +173 45 model.embedding_dim 1.0 +173 45 training.batch_size 1.0 +173 45 training.label_smoothing 0.0018955901502614183 +173 46 model.embedding_dim 0.0 +173 46 training.batch_size 0.0 +173 46 training.label_smoothing 0.005974402911871453 +173 47 model.embedding_dim 0.0 +173 47 training.batch_size 0.0 +173 47 training.label_smoothing 0.08248683765628693 +173 48 model.embedding_dim 0.0 +173 48 training.batch_size 2.0 +173 48 training.label_smoothing 0.022331110472941694 +173 49 model.embedding_dim 2.0 +173 49 training.batch_size 2.0 +173 49 training.label_smoothing 0.7148785580000424 +173 50 model.embedding_dim 1.0 +173 50 training.batch_size 1.0 +173 50 training.label_smoothing 0.5976450312813378 +173 51 model.embedding_dim 0.0 +173 51 training.batch_size 1.0 +173 51 training.label_smoothing 0.32000742978621444 +173 52 model.embedding_dim 1.0 +173 52 training.batch_size 1.0 +173 52 training.label_smoothing 0.01783703077763044 +173 53 model.embedding_dim 1.0 +173 53 training.batch_size 0.0 +173 53 training.label_smoothing 0.2521075457809526 +173 54 model.embedding_dim 1.0 +173 54 training.batch_size 2.0 +173 54 training.label_smoothing 0.11814096646657178 +173 55 model.embedding_dim 2.0 +173 55 training.batch_size 2.0 +173 55 training.label_smoothing 0.002903080124622559 +173 56 model.embedding_dim 1.0 +173 56 training.batch_size 1.0 +173 56 training.label_smoothing 0.013155291125441922 +173 57 model.embedding_dim 2.0 +173 57 training.batch_size 1.0 +173 57 training.label_smoothing 0.0012445918836645363 +173 58 model.embedding_dim 1.0 +173 58 training.batch_size 1.0 +173 58 training.label_smoothing 0.047232794240078764 +173 59 model.embedding_dim 2.0 +173 59 training.batch_size 2.0 +173 59 training.label_smoothing 0.0720536345546811 +173 60 model.embedding_dim 1.0 +173 60 training.batch_size 0.0 +173 60 training.label_smoothing 0.0014974436657238532 +173 61 model.embedding_dim 1.0 +173 61 training.batch_size 2.0 +173 61 training.label_smoothing 0.10923210436441219 +173 62 model.embedding_dim 2.0 +173 62 training.batch_size 2.0 +173 62 training.label_smoothing 0.0012770925350711981 +173 63 model.embedding_dim 0.0 +173 63 training.batch_size 2.0 +173 63 training.label_smoothing 0.002419142783687355 +173 64 model.embedding_dim 0.0 +173 64 training.batch_size 1.0 +173 64 training.label_smoothing 0.0021767866695882815 +173 65 model.embedding_dim 1.0 +173 65 training.batch_size 0.0 +173 65 training.label_smoothing 0.0014500062978560731 +173 66 model.embedding_dim 0.0 +173 66 training.batch_size 1.0 +173 66 training.label_smoothing 0.001275530115877466 +173 67 model.embedding_dim 2.0 +173 67 training.batch_size 1.0 +173 67 training.label_smoothing 0.0026895742066885926 +173 68 model.embedding_dim 1.0 +173 68 training.batch_size 2.0 +173 68 training.label_smoothing 0.07780817280891353 +173 69 model.embedding_dim 2.0 +173 69 training.batch_size 1.0 +173 69 training.label_smoothing 0.3724071472915104 +173 70 model.embedding_dim 2.0 +173 70 training.batch_size 2.0 +173 70 training.label_smoothing 0.2186841519865335 +173 71 model.embedding_dim 1.0 +173 71 training.batch_size 0.0 +173 71 training.label_smoothing 0.002138755814318046 +173 72 model.embedding_dim 1.0 +173 72 training.batch_size 0.0 +173 72 training.label_smoothing 0.9761156402557236 +173 73 model.embedding_dim 1.0 +173 73 training.batch_size 1.0 +173 73 training.label_smoothing 0.13987769441113676 +173 74 model.embedding_dim 0.0 +173 74 training.batch_size 2.0 +173 74 training.label_smoothing 0.04453495130591911 +173 75 model.embedding_dim 1.0 +173 75 training.batch_size 1.0 +173 75 training.label_smoothing 0.3573895539416146 +173 76 model.embedding_dim 1.0 +173 76 training.batch_size 0.0 +173 76 training.label_smoothing 0.03138593064725193 +173 77 model.embedding_dim 2.0 +173 77 training.batch_size 1.0 +173 77 training.label_smoothing 0.01629861945572059 +173 78 model.embedding_dim 0.0 +173 78 training.batch_size 0.0 +173 78 training.label_smoothing 0.10075052654903668 +173 79 model.embedding_dim 0.0 +173 79 training.batch_size 1.0 +173 79 training.label_smoothing 0.02746003663632141 +173 80 model.embedding_dim 0.0 +173 80 training.batch_size 2.0 +173 80 training.label_smoothing 0.2624899425517518 +173 81 model.embedding_dim 2.0 +173 81 training.batch_size 2.0 +173 81 training.label_smoothing 0.5318392239673821 +173 82 model.embedding_dim 2.0 +173 82 training.batch_size 0.0 +173 82 training.label_smoothing 0.6455236493513123 +173 83 model.embedding_dim 0.0 +173 83 training.batch_size 2.0 +173 83 training.label_smoothing 0.021665921717907845 +173 84 model.embedding_dim 0.0 +173 84 training.batch_size 2.0 +173 84 training.label_smoothing 0.11479507766286053 +173 85 model.embedding_dim 1.0 +173 85 training.batch_size 0.0 +173 85 training.label_smoothing 0.032199996532633265 +173 86 model.embedding_dim 0.0 +173 86 training.batch_size 2.0 +173 86 training.label_smoothing 0.009533648415289904 +173 87 model.embedding_dim 0.0 +173 87 training.batch_size 0.0 +173 87 training.label_smoothing 0.17685468948920197 +173 88 model.embedding_dim 2.0 +173 88 training.batch_size 0.0 +173 88 training.label_smoothing 0.21528453371530626 +173 89 model.embedding_dim 2.0 +173 89 training.batch_size 1.0 +173 89 training.label_smoothing 0.6440356906979893 +173 90 model.embedding_dim 2.0 +173 90 training.batch_size 1.0 +173 90 training.label_smoothing 0.5597250403368768 +173 91 model.embedding_dim 0.0 +173 91 training.batch_size 2.0 +173 91 training.label_smoothing 0.9651338985689115 +173 92 model.embedding_dim 2.0 +173 92 training.batch_size 1.0 +173 92 training.label_smoothing 0.06500685749098703 +173 93 model.embedding_dim 0.0 +173 93 training.batch_size 0.0 +173 93 training.label_smoothing 0.013488007845205058 +173 94 model.embedding_dim 0.0 +173 94 training.batch_size 1.0 +173 94 training.label_smoothing 0.0017776527433085102 +173 95 model.embedding_dim 1.0 +173 95 training.batch_size 1.0 +173 95 training.label_smoothing 0.0933620187597033 +173 96 model.embedding_dim 2.0 +173 96 training.batch_size 0.0 +173 96 training.label_smoothing 0.1052128204789199 +173 97 model.embedding_dim 1.0 +173 97 training.batch_size 2.0 +173 97 training.label_smoothing 0.7954602466680927 +173 98 model.embedding_dim 2.0 +173 98 training.batch_size 2.0 +173 98 training.label_smoothing 0.004774047865974264 +173 99 model.embedding_dim 2.0 +173 99 training.batch_size 2.0 +173 99 training.label_smoothing 0.35343924611879113 +173 100 model.embedding_dim 2.0 +173 100 training.batch_size 2.0 +173 100 training.label_smoothing 0.01914408911737782 +173 1 dataset """kinships""" +173 1 model """distmult""" +173 1 loss """crossentropy""" +173 1 regularizer """no""" +173 1 optimizer """adadelta""" +173 1 training_loop """lcwa""" +173 1 evaluator """rankbased""" +173 2 dataset """kinships""" +173 2 model """distmult""" +173 2 loss """crossentropy""" +173 2 regularizer """no""" +173 2 optimizer """adadelta""" +173 2 training_loop """lcwa""" +173 2 evaluator """rankbased""" +173 3 dataset """kinships""" +173 3 model """distmult""" +173 3 loss """crossentropy""" +173 3 regularizer """no""" +173 3 optimizer """adadelta""" +173 3 training_loop """lcwa""" +173 3 evaluator """rankbased""" +173 4 dataset """kinships""" +173 4 model """distmult""" +173 4 loss """crossentropy""" +173 4 regularizer """no""" +173 4 optimizer """adadelta""" +173 4 training_loop """lcwa""" +173 4 evaluator """rankbased""" +173 5 dataset """kinships""" +173 5 model """distmult""" +173 5 loss """crossentropy""" +173 5 regularizer """no""" +173 5 optimizer """adadelta""" +173 5 training_loop """lcwa""" +173 5 evaluator """rankbased""" +173 6 dataset """kinships""" +173 6 model """distmult""" +173 6 loss """crossentropy""" +173 6 regularizer """no""" +173 6 optimizer """adadelta""" +173 6 training_loop """lcwa""" +173 6 evaluator """rankbased""" +173 7 dataset """kinships""" +173 7 model """distmult""" +173 7 loss """crossentropy""" +173 7 regularizer """no""" +173 7 optimizer """adadelta""" +173 7 training_loop """lcwa""" +173 7 evaluator """rankbased""" +173 8 dataset """kinships""" +173 8 model """distmult""" +173 8 loss """crossentropy""" +173 8 regularizer """no""" +173 8 optimizer """adadelta""" +173 8 training_loop """lcwa""" +173 8 evaluator """rankbased""" +173 9 dataset """kinships""" +173 9 model """distmult""" +173 9 loss """crossentropy""" +173 9 regularizer """no""" +173 9 optimizer """adadelta""" +173 9 training_loop """lcwa""" +173 9 evaluator """rankbased""" +173 10 dataset """kinships""" +173 10 model """distmult""" +173 10 loss """crossentropy""" +173 10 regularizer """no""" +173 10 optimizer """adadelta""" +173 10 training_loop """lcwa""" +173 10 evaluator """rankbased""" +173 11 dataset """kinships""" +173 11 model """distmult""" +173 11 loss """crossentropy""" +173 11 regularizer """no""" +173 11 optimizer """adadelta""" +173 11 training_loop """lcwa""" +173 11 evaluator """rankbased""" +173 12 dataset """kinships""" +173 12 model """distmult""" +173 12 loss """crossentropy""" +173 12 regularizer """no""" +173 12 optimizer """adadelta""" +173 12 training_loop """lcwa""" +173 12 evaluator """rankbased""" +173 13 dataset """kinships""" +173 13 model """distmult""" +173 13 loss """crossentropy""" +173 13 regularizer """no""" +173 13 optimizer """adadelta""" +173 13 training_loop """lcwa""" +173 13 evaluator """rankbased""" +173 14 dataset """kinships""" +173 14 model """distmult""" +173 14 loss """crossentropy""" +173 14 regularizer """no""" +173 14 optimizer """adadelta""" +173 14 training_loop """lcwa""" +173 14 evaluator """rankbased""" +173 15 dataset """kinships""" +173 15 model """distmult""" +173 15 loss """crossentropy""" +173 15 regularizer """no""" +173 15 optimizer """adadelta""" +173 15 training_loop """lcwa""" +173 15 evaluator """rankbased""" +173 16 dataset """kinships""" +173 16 model """distmult""" +173 16 loss """crossentropy""" +173 16 regularizer """no""" +173 16 optimizer """adadelta""" +173 16 training_loop """lcwa""" +173 16 evaluator """rankbased""" +173 17 dataset """kinships""" +173 17 model """distmult""" +173 17 loss """crossentropy""" +173 17 regularizer """no""" +173 17 optimizer """adadelta""" +173 17 training_loop """lcwa""" +173 17 evaluator """rankbased""" +173 18 dataset """kinships""" +173 18 model """distmult""" +173 18 loss """crossentropy""" +173 18 regularizer """no""" +173 18 optimizer """adadelta""" +173 18 training_loop """lcwa""" +173 18 evaluator """rankbased""" +173 19 dataset """kinships""" +173 19 model """distmult""" +173 19 loss """crossentropy""" +173 19 regularizer """no""" +173 19 optimizer """adadelta""" +173 19 training_loop """lcwa""" +173 19 evaluator """rankbased""" +173 20 dataset """kinships""" +173 20 model """distmult""" +173 20 loss """crossentropy""" +173 20 regularizer """no""" +173 20 optimizer """adadelta""" +173 20 training_loop """lcwa""" +173 20 evaluator """rankbased""" +173 21 dataset """kinships""" +173 21 model """distmult""" +173 21 loss """crossentropy""" +173 21 regularizer """no""" +173 21 optimizer """adadelta""" +173 21 training_loop """lcwa""" +173 21 evaluator """rankbased""" +173 22 dataset """kinships""" +173 22 model """distmult""" +173 22 loss """crossentropy""" +173 22 regularizer """no""" +173 22 optimizer """adadelta""" +173 22 training_loop """lcwa""" +173 22 evaluator """rankbased""" +173 23 dataset """kinships""" +173 23 model """distmult""" +173 23 loss """crossentropy""" +173 23 regularizer """no""" +173 23 optimizer """adadelta""" +173 23 training_loop """lcwa""" +173 23 evaluator """rankbased""" +173 24 dataset """kinships""" +173 24 model """distmult""" +173 24 loss """crossentropy""" +173 24 regularizer """no""" +173 24 optimizer """adadelta""" +173 24 training_loop """lcwa""" +173 24 evaluator """rankbased""" +173 25 dataset """kinships""" +173 25 model """distmult""" +173 25 loss """crossentropy""" +173 25 regularizer """no""" +173 25 optimizer """adadelta""" +173 25 training_loop """lcwa""" +173 25 evaluator """rankbased""" +173 26 dataset """kinships""" +173 26 model """distmult""" +173 26 loss """crossentropy""" +173 26 regularizer """no""" +173 26 optimizer """adadelta""" +173 26 training_loop """lcwa""" +173 26 evaluator """rankbased""" +173 27 dataset """kinships""" +173 27 model """distmult""" +173 27 loss """crossentropy""" +173 27 regularizer """no""" +173 27 optimizer """adadelta""" +173 27 training_loop """lcwa""" +173 27 evaluator """rankbased""" +173 28 dataset """kinships""" +173 28 model """distmult""" +173 28 loss """crossentropy""" +173 28 regularizer """no""" +173 28 optimizer """adadelta""" +173 28 training_loop """lcwa""" +173 28 evaluator """rankbased""" +173 29 dataset """kinships""" +173 29 model """distmult""" +173 29 loss """crossentropy""" +173 29 regularizer """no""" +173 29 optimizer """adadelta""" +173 29 training_loop """lcwa""" +173 29 evaluator """rankbased""" +173 30 dataset """kinships""" +173 30 model """distmult""" +173 30 loss """crossentropy""" +173 30 regularizer """no""" +173 30 optimizer """adadelta""" +173 30 training_loop """lcwa""" +173 30 evaluator """rankbased""" +173 31 dataset """kinships""" +173 31 model """distmult""" +173 31 loss """crossentropy""" +173 31 regularizer """no""" +173 31 optimizer """adadelta""" +173 31 training_loop """lcwa""" +173 31 evaluator """rankbased""" +173 32 dataset """kinships""" +173 32 model """distmult""" +173 32 loss """crossentropy""" +173 32 regularizer """no""" +173 32 optimizer """adadelta""" +173 32 training_loop """lcwa""" +173 32 evaluator """rankbased""" +173 33 dataset """kinships""" +173 33 model """distmult""" +173 33 loss """crossentropy""" +173 33 regularizer """no""" +173 33 optimizer """adadelta""" +173 33 training_loop """lcwa""" +173 33 evaluator """rankbased""" +173 34 dataset """kinships""" +173 34 model """distmult""" +173 34 loss """crossentropy""" +173 34 regularizer """no""" +173 34 optimizer """adadelta""" +173 34 training_loop """lcwa""" +173 34 evaluator """rankbased""" +173 35 dataset """kinships""" +173 35 model """distmult""" +173 35 loss """crossentropy""" +173 35 regularizer """no""" +173 35 optimizer """adadelta""" +173 35 training_loop """lcwa""" +173 35 evaluator """rankbased""" +173 36 dataset """kinships""" +173 36 model """distmult""" +173 36 loss """crossentropy""" +173 36 regularizer """no""" +173 36 optimizer """adadelta""" +173 36 training_loop """lcwa""" +173 36 evaluator """rankbased""" +173 37 dataset """kinships""" +173 37 model """distmult""" +173 37 loss """crossentropy""" +173 37 regularizer """no""" +173 37 optimizer """adadelta""" +173 37 training_loop """lcwa""" +173 37 evaluator """rankbased""" +173 38 dataset """kinships""" +173 38 model """distmult""" +173 38 loss """crossentropy""" +173 38 regularizer """no""" +173 38 optimizer """adadelta""" +173 38 training_loop """lcwa""" +173 38 evaluator """rankbased""" +173 39 dataset """kinships""" +173 39 model """distmult""" +173 39 loss """crossentropy""" +173 39 regularizer """no""" +173 39 optimizer """adadelta""" +173 39 training_loop """lcwa""" +173 39 evaluator """rankbased""" +173 40 dataset """kinships""" +173 40 model """distmult""" +173 40 loss """crossentropy""" +173 40 regularizer """no""" +173 40 optimizer """adadelta""" +173 40 training_loop """lcwa""" +173 40 evaluator """rankbased""" +173 41 dataset """kinships""" +173 41 model """distmult""" +173 41 loss """crossentropy""" +173 41 regularizer """no""" +173 41 optimizer """adadelta""" +173 41 training_loop """lcwa""" +173 41 evaluator """rankbased""" +173 42 dataset """kinships""" +173 42 model """distmult""" +173 42 loss """crossentropy""" +173 42 regularizer """no""" +173 42 optimizer """adadelta""" +173 42 training_loop """lcwa""" +173 42 evaluator """rankbased""" +173 43 dataset """kinships""" +173 43 model """distmult""" +173 43 loss """crossentropy""" +173 43 regularizer """no""" +173 43 optimizer """adadelta""" +173 43 training_loop """lcwa""" +173 43 evaluator """rankbased""" +173 44 dataset """kinships""" +173 44 model """distmult""" +173 44 loss """crossentropy""" +173 44 regularizer """no""" +173 44 optimizer """adadelta""" +173 44 training_loop """lcwa""" +173 44 evaluator """rankbased""" +173 45 dataset """kinships""" +173 45 model """distmult""" +173 45 loss """crossentropy""" +173 45 regularizer """no""" +173 45 optimizer """adadelta""" +173 45 training_loop """lcwa""" +173 45 evaluator """rankbased""" +173 46 dataset """kinships""" +173 46 model """distmult""" +173 46 loss """crossentropy""" +173 46 regularizer """no""" +173 46 optimizer """adadelta""" +173 46 training_loop """lcwa""" +173 46 evaluator """rankbased""" +173 47 dataset """kinships""" +173 47 model """distmult""" +173 47 loss """crossentropy""" +173 47 regularizer """no""" +173 47 optimizer """adadelta""" +173 47 training_loop """lcwa""" +173 47 evaluator """rankbased""" +173 48 dataset """kinships""" +173 48 model """distmult""" +173 48 loss """crossentropy""" +173 48 regularizer """no""" +173 48 optimizer """adadelta""" +173 48 training_loop """lcwa""" +173 48 evaluator """rankbased""" +173 49 dataset """kinships""" +173 49 model """distmult""" +173 49 loss """crossentropy""" +173 49 regularizer """no""" +173 49 optimizer """adadelta""" +173 49 training_loop """lcwa""" +173 49 evaluator """rankbased""" +173 50 dataset """kinships""" +173 50 model """distmult""" +173 50 loss """crossentropy""" +173 50 regularizer """no""" +173 50 optimizer """adadelta""" +173 50 training_loop """lcwa""" +173 50 evaluator """rankbased""" +173 51 dataset """kinships""" +173 51 model """distmult""" +173 51 loss """crossentropy""" +173 51 regularizer """no""" +173 51 optimizer """adadelta""" +173 51 training_loop """lcwa""" +173 51 evaluator """rankbased""" +173 52 dataset """kinships""" +173 52 model """distmult""" +173 52 loss """crossentropy""" +173 52 regularizer """no""" +173 52 optimizer """adadelta""" +173 52 training_loop """lcwa""" +173 52 evaluator """rankbased""" +173 53 dataset """kinships""" +173 53 model """distmult""" +173 53 loss """crossentropy""" +173 53 regularizer """no""" +173 53 optimizer """adadelta""" +173 53 training_loop """lcwa""" +173 53 evaluator """rankbased""" +173 54 dataset """kinships""" +173 54 model """distmult""" +173 54 loss """crossentropy""" +173 54 regularizer """no""" +173 54 optimizer """adadelta""" +173 54 training_loop """lcwa""" +173 54 evaluator """rankbased""" +173 55 dataset """kinships""" +173 55 model """distmult""" +173 55 loss """crossentropy""" +173 55 regularizer """no""" +173 55 optimizer """adadelta""" +173 55 training_loop """lcwa""" +173 55 evaluator """rankbased""" +173 56 dataset """kinships""" +173 56 model """distmult""" +173 56 loss """crossentropy""" +173 56 regularizer """no""" +173 56 optimizer """adadelta""" +173 56 training_loop """lcwa""" +173 56 evaluator """rankbased""" +173 57 dataset """kinships""" +173 57 model """distmult""" +173 57 loss """crossentropy""" +173 57 regularizer """no""" +173 57 optimizer """adadelta""" +173 57 training_loop """lcwa""" +173 57 evaluator """rankbased""" +173 58 dataset """kinships""" +173 58 model """distmult""" +173 58 loss """crossentropy""" +173 58 regularizer """no""" +173 58 optimizer """adadelta""" +173 58 training_loop """lcwa""" +173 58 evaluator """rankbased""" +173 59 dataset """kinships""" +173 59 model """distmult""" +173 59 loss """crossentropy""" +173 59 regularizer """no""" +173 59 optimizer """adadelta""" +173 59 training_loop """lcwa""" +173 59 evaluator """rankbased""" +173 60 dataset """kinships""" +173 60 model """distmult""" +173 60 loss """crossentropy""" +173 60 regularizer """no""" +173 60 optimizer """adadelta""" +173 60 training_loop """lcwa""" +173 60 evaluator """rankbased""" +173 61 dataset """kinships""" +173 61 model """distmult""" +173 61 loss """crossentropy""" +173 61 regularizer """no""" +173 61 optimizer """adadelta""" +173 61 training_loop """lcwa""" +173 61 evaluator """rankbased""" +173 62 dataset """kinships""" +173 62 model """distmult""" +173 62 loss """crossentropy""" +173 62 regularizer """no""" +173 62 optimizer """adadelta""" +173 62 training_loop """lcwa""" +173 62 evaluator """rankbased""" +173 63 dataset """kinships""" +173 63 model """distmult""" +173 63 loss """crossentropy""" +173 63 regularizer """no""" +173 63 optimizer """adadelta""" +173 63 training_loop """lcwa""" +173 63 evaluator """rankbased""" +173 64 dataset """kinships""" +173 64 model """distmult""" +173 64 loss """crossentropy""" +173 64 regularizer """no""" +173 64 optimizer """adadelta""" +173 64 training_loop """lcwa""" +173 64 evaluator """rankbased""" +173 65 dataset """kinships""" +173 65 model """distmult""" +173 65 loss """crossentropy""" +173 65 regularizer """no""" +173 65 optimizer """adadelta""" +173 65 training_loop """lcwa""" +173 65 evaluator """rankbased""" +173 66 dataset """kinships""" +173 66 model """distmult""" +173 66 loss """crossentropy""" +173 66 regularizer """no""" +173 66 optimizer """adadelta""" +173 66 training_loop """lcwa""" +173 66 evaluator """rankbased""" +173 67 dataset """kinships""" +173 67 model """distmult""" +173 67 loss """crossentropy""" +173 67 regularizer """no""" +173 67 optimizer """adadelta""" +173 67 training_loop """lcwa""" +173 67 evaluator """rankbased""" +173 68 dataset """kinships""" +173 68 model """distmult""" +173 68 loss """crossentropy""" +173 68 regularizer """no""" +173 68 optimizer """adadelta""" +173 68 training_loop """lcwa""" +173 68 evaluator """rankbased""" +173 69 dataset """kinships""" +173 69 model """distmult""" +173 69 loss """crossentropy""" +173 69 regularizer """no""" +173 69 optimizer """adadelta""" +173 69 training_loop """lcwa""" +173 69 evaluator """rankbased""" +173 70 dataset """kinships""" +173 70 model """distmult""" +173 70 loss """crossentropy""" +173 70 regularizer """no""" +173 70 optimizer """adadelta""" +173 70 training_loop """lcwa""" +173 70 evaluator """rankbased""" +173 71 dataset """kinships""" +173 71 model """distmult""" +173 71 loss """crossentropy""" +173 71 regularizer """no""" +173 71 optimizer """adadelta""" +173 71 training_loop """lcwa""" +173 71 evaluator """rankbased""" +173 72 dataset """kinships""" +173 72 model """distmult""" +173 72 loss """crossentropy""" +173 72 regularizer """no""" +173 72 optimizer """adadelta""" +173 72 training_loop """lcwa""" +173 72 evaluator """rankbased""" +173 73 dataset """kinships""" +173 73 model """distmult""" +173 73 loss """crossentropy""" +173 73 regularizer """no""" +173 73 optimizer """adadelta""" +173 73 training_loop """lcwa""" +173 73 evaluator """rankbased""" +173 74 dataset """kinships""" +173 74 model """distmult""" +173 74 loss """crossentropy""" +173 74 regularizer """no""" +173 74 optimizer """adadelta""" +173 74 training_loop """lcwa""" +173 74 evaluator """rankbased""" +173 75 dataset """kinships""" +173 75 model """distmult""" +173 75 loss """crossentropy""" +173 75 regularizer """no""" +173 75 optimizer """adadelta""" +173 75 training_loop """lcwa""" +173 75 evaluator """rankbased""" +173 76 dataset """kinships""" +173 76 model """distmult""" +173 76 loss """crossentropy""" +173 76 regularizer """no""" +173 76 optimizer """adadelta""" +173 76 training_loop """lcwa""" +173 76 evaluator """rankbased""" +173 77 dataset """kinships""" +173 77 model """distmult""" +173 77 loss """crossentropy""" +173 77 regularizer """no""" +173 77 optimizer """adadelta""" +173 77 training_loop """lcwa""" +173 77 evaluator """rankbased""" +173 78 dataset """kinships""" +173 78 model """distmult""" +173 78 loss """crossentropy""" +173 78 regularizer """no""" +173 78 optimizer """adadelta""" +173 78 training_loop """lcwa""" +173 78 evaluator """rankbased""" +173 79 dataset """kinships""" +173 79 model """distmult""" +173 79 loss """crossentropy""" +173 79 regularizer """no""" +173 79 optimizer """adadelta""" +173 79 training_loop """lcwa""" +173 79 evaluator """rankbased""" +173 80 dataset """kinships""" +173 80 model """distmult""" +173 80 loss """crossentropy""" +173 80 regularizer """no""" +173 80 optimizer """adadelta""" +173 80 training_loop """lcwa""" +173 80 evaluator """rankbased""" +173 81 dataset """kinships""" +173 81 model """distmult""" +173 81 loss """crossentropy""" +173 81 regularizer """no""" +173 81 optimizer """adadelta""" +173 81 training_loop """lcwa""" +173 81 evaluator """rankbased""" +173 82 dataset """kinships""" +173 82 model """distmult""" +173 82 loss """crossentropy""" +173 82 regularizer """no""" +173 82 optimizer """adadelta""" +173 82 training_loop """lcwa""" +173 82 evaluator """rankbased""" +173 83 dataset """kinships""" +173 83 model """distmult""" +173 83 loss """crossentropy""" +173 83 regularizer """no""" +173 83 optimizer """adadelta""" +173 83 training_loop """lcwa""" +173 83 evaluator """rankbased""" +173 84 dataset """kinships""" +173 84 model """distmult""" +173 84 loss """crossentropy""" +173 84 regularizer """no""" +173 84 optimizer """adadelta""" +173 84 training_loop """lcwa""" +173 84 evaluator """rankbased""" +173 85 dataset """kinships""" +173 85 model """distmult""" +173 85 loss """crossentropy""" +173 85 regularizer """no""" +173 85 optimizer """adadelta""" +173 85 training_loop """lcwa""" +173 85 evaluator """rankbased""" +173 86 dataset """kinships""" +173 86 model """distmult""" +173 86 loss """crossentropy""" +173 86 regularizer """no""" +173 86 optimizer """adadelta""" +173 86 training_loop """lcwa""" +173 86 evaluator """rankbased""" +173 87 dataset """kinships""" +173 87 model """distmult""" +173 87 loss """crossentropy""" +173 87 regularizer """no""" +173 87 optimizer """adadelta""" +173 87 training_loop """lcwa""" +173 87 evaluator """rankbased""" +173 88 dataset """kinships""" +173 88 model """distmult""" +173 88 loss """crossentropy""" +173 88 regularizer """no""" +173 88 optimizer """adadelta""" +173 88 training_loop """lcwa""" +173 88 evaluator """rankbased""" +173 89 dataset """kinships""" +173 89 model """distmult""" +173 89 loss """crossentropy""" +173 89 regularizer """no""" +173 89 optimizer """adadelta""" +173 89 training_loop """lcwa""" +173 89 evaluator """rankbased""" +173 90 dataset """kinships""" +173 90 model """distmult""" +173 90 loss """crossentropy""" +173 90 regularizer """no""" +173 90 optimizer """adadelta""" +173 90 training_loop """lcwa""" +173 90 evaluator """rankbased""" +173 91 dataset """kinships""" +173 91 model """distmult""" +173 91 loss """crossentropy""" +173 91 regularizer """no""" +173 91 optimizer """adadelta""" +173 91 training_loop """lcwa""" +173 91 evaluator """rankbased""" +173 92 dataset """kinships""" +173 92 model """distmult""" +173 92 loss """crossentropy""" +173 92 regularizer """no""" +173 92 optimizer """adadelta""" +173 92 training_loop """lcwa""" +173 92 evaluator """rankbased""" +173 93 dataset """kinships""" +173 93 model """distmult""" +173 93 loss """crossentropy""" +173 93 regularizer """no""" +173 93 optimizer """adadelta""" +173 93 training_loop """lcwa""" +173 93 evaluator """rankbased""" +173 94 dataset """kinships""" +173 94 model """distmult""" +173 94 loss """crossentropy""" +173 94 regularizer """no""" +173 94 optimizer """adadelta""" +173 94 training_loop """lcwa""" +173 94 evaluator """rankbased""" +173 95 dataset """kinships""" +173 95 model """distmult""" +173 95 loss """crossentropy""" +173 95 regularizer """no""" +173 95 optimizer """adadelta""" +173 95 training_loop """lcwa""" +173 95 evaluator """rankbased""" +173 96 dataset """kinships""" +173 96 model """distmult""" +173 96 loss """crossentropy""" +173 96 regularizer """no""" +173 96 optimizer """adadelta""" +173 96 training_loop """lcwa""" +173 96 evaluator """rankbased""" +173 97 dataset """kinships""" +173 97 model """distmult""" +173 97 loss """crossentropy""" +173 97 regularizer """no""" +173 97 optimizer """adadelta""" +173 97 training_loop """lcwa""" +173 97 evaluator """rankbased""" +173 98 dataset """kinships""" +173 98 model """distmult""" +173 98 loss """crossentropy""" +173 98 regularizer """no""" +173 98 optimizer """adadelta""" +173 98 training_loop """lcwa""" +173 98 evaluator """rankbased""" +173 99 dataset """kinships""" +173 99 model """distmult""" +173 99 loss """crossentropy""" +173 99 regularizer """no""" +173 99 optimizer """adadelta""" +173 99 training_loop """lcwa""" +173 99 evaluator """rankbased""" +173 100 dataset """kinships""" +173 100 model """distmult""" +173 100 loss """crossentropy""" +173 100 regularizer """no""" +173 100 optimizer """adadelta""" +173 100 training_loop """lcwa""" +173 100 evaluator """rankbased""" +174 1 model.embedding_dim 1.0 +174 1 negative_sampler.num_negs_per_pos 83.0 +174 1 training.batch_size 0.0 +174 2 model.embedding_dim 1.0 +174 2 negative_sampler.num_negs_per_pos 19.0 +174 2 training.batch_size 1.0 +174 3 model.embedding_dim 2.0 +174 3 negative_sampler.num_negs_per_pos 94.0 +174 3 training.batch_size 2.0 +174 4 model.embedding_dim 1.0 +174 4 negative_sampler.num_negs_per_pos 97.0 +174 4 training.batch_size 2.0 +174 5 model.embedding_dim 2.0 +174 5 negative_sampler.num_negs_per_pos 9.0 +174 5 training.batch_size 0.0 +174 6 model.embedding_dim 2.0 +174 6 negative_sampler.num_negs_per_pos 18.0 +174 6 training.batch_size 0.0 +174 7 model.embedding_dim 1.0 +174 7 negative_sampler.num_negs_per_pos 42.0 +174 7 training.batch_size 2.0 +174 8 model.embedding_dim 2.0 +174 8 negative_sampler.num_negs_per_pos 18.0 +174 8 training.batch_size 0.0 +174 9 model.embedding_dim 1.0 +174 9 negative_sampler.num_negs_per_pos 59.0 +174 9 training.batch_size 1.0 +174 10 model.embedding_dim 1.0 +174 10 negative_sampler.num_negs_per_pos 45.0 +174 10 training.batch_size 2.0 +174 11 model.embedding_dim 2.0 +174 11 negative_sampler.num_negs_per_pos 4.0 +174 11 training.batch_size 1.0 +174 12 model.embedding_dim 2.0 +174 12 negative_sampler.num_negs_per_pos 28.0 +174 12 training.batch_size 2.0 +174 13 model.embedding_dim 2.0 +174 13 negative_sampler.num_negs_per_pos 23.0 +174 13 training.batch_size 0.0 +174 14 model.embedding_dim 0.0 +174 14 negative_sampler.num_negs_per_pos 17.0 +174 14 training.batch_size 0.0 +174 15 model.embedding_dim 0.0 +174 15 negative_sampler.num_negs_per_pos 35.0 +174 15 training.batch_size 0.0 +174 16 model.embedding_dim 1.0 +174 16 negative_sampler.num_negs_per_pos 86.0 +174 16 training.batch_size 2.0 +174 17 model.embedding_dim 1.0 +174 17 negative_sampler.num_negs_per_pos 7.0 +174 17 training.batch_size 0.0 +174 18 model.embedding_dim 0.0 +174 18 negative_sampler.num_negs_per_pos 81.0 +174 18 training.batch_size 0.0 +174 19 model.embedding_dim 1.0 +174 19 negative_sampler.num_negs_per_pos 82.0 +174 19 training.batch_size 2.0 +174 20 model.embedding_dim 2.0 +174 20 negative_sampler.num_negs_per_pos 62.0 +174 20 training.batch_size 1.0 +174 21 model.embedding_dim 2.0 +174 21 negative_sampler.num_negs_per_pos 51.0 +174 21 training.batch_size 2.0 +174 22 model.embedding_dim 1.0 +174 22 negative_sampler.num_negs_per_pos 19.0 +174 22 training.batch_size 2.0 +174 23 model.embedding_dim 0.0 +174 23 negative_sampler.num_negs_per_pos 4.0 +174 23 training.batch_size 0.0 +174 24 model.embedding_dim 2.0 +174 24 negative_sampler.num_negs_per_pos 25.0 +174 24 training.batch_size 1.0 +174 25 model.embedding_dim 1.0 +174 25 negative_sampler.num_negs_per_pos 80.0 +174 25 training.batch_size 2.0 +174 26 model.embedding_dim 0.0 +174 26 negative_sampler.num_negs_per_pos 91.0 +174 26 training.batch_size 2.0 +174 27 model.embedding_dim 1.0 +174 27 negative_sampler.num_negs_per_pos 79.0 +174 27 training.batch_size 1.0 +174 28 model.embedding_dim 0.0 +174 28 negative_sampler.num_negs_per_pos 39.0 +174 28 training.batch_size 1.0 +174 29 model.embedding_dim 1.0 +174 29 negative_sampler.num_negs_per_pos 16.0 +174 29 training.batch_size 2.0 +174 30 model.embedding_dim 2.0 +174 30 negative_sampler.num_negs_per_pos 95.0 +174 30 training.batch_size 0.0 +174 31 model.embedding_dim 2.0 +174 31 negative_sampler.num_negs_per_pos 24.0 +174 31 training.batch_size 2.0 +174 32 model.embedding_dim 0.0 +174 32 negative_sampler.num_negs_per_pos 97.0 +174 32 training.batch_size 2.0 +174 33 model.embedding_dim 2.0 +174 33 negative_sampler.num_negs_per_pos 80.0 +174 33 training.batch_size 0.0 +174 34 model.embedding_dim 2.0 +174 34 negative_sampler.num_negs_per_pos 39.0 +174 34 training.batch_size 0.0 +174 35 model.embedding_dim 2.0 +174 35 negative_sampler.num_negs_per_pos 31.0 +174 35 training.batch_size 0.0 +174 36 model.embedding_dim 2.0 +174 36 negative_sampler.num_negs_per_pos 95.0 +174 36 training.batch_size 0.0 +174 37 model.embedding_dim 1.0 +174 37 negative_sampler.num_negs_per_pos 45.0 +174 37 training.batch_size 2.0 +174 38 model.embedding_dim 2.0 +174 38 negative_sampler.num_negs_per_pos 52.0 +174 38 training.batch_size 2.0 +174 39 model.embedding_dim 1.0 +174 39 negative_sampler.num_negs_per_pos 60.0 +174 39 training.batch_size 1.0 +174 40 model.embedding_dim 2.0 +174 40 negative_sampler.num_negs_per_pos 25.0 +174 40 training.batch_size 2.0 +174 41 model.embedding_dim 2.0 +174 41 negative_sampler.num_negs_per_pos 20.0 +174 41 training.batch_size 1.0 +174 42 model.embedding_dim 0.0 +174 42 negative_sampler.num_negs_per_pos 54.0 +174 42 training.batch_size 2.0 +174 43 model.embedding_dim 0.0 +174 43 negative_sampler.num_negs_per_pos 66.0 +174 43 training.batch_size 2.0 +174 44 model.embedding_dim 0.0 +174 44 negative_sampler.num_negs_per_pos 2.0 +174 44 training.batch_size 1.0 +174 45 model.embedding_dim 2.0 +174 45 negative_sampler.num_negs_per_pos 64.0 +174 45 training.batch_size 1.0 +174 46 model.embedding_dim 2.0 +174 46 negative_sampler.num_negs_per_pos 27.0 +174 46 training.batch_size 0.0 +174 47 model.embedding_dim 2.0 +174 47 negative_sampler.num_negs_per_pos 65.0 +174 47 training.batch_size 0.0 +174 48 model.embedding_dim 1.0 +174 48 negative_sampler.num_negs_per_pos 34.0 +174 48 training.batch_size 1.0 +174 49 model.embedding_dim 2.0 +174 49 negative_sampler.num_negs_per_pos 37.0 +174 49 training.batch_size 2.0 +174 50 model.embedding_dim 2.0 +174 50 negative_sampler.num_negs_per_pos 46.0 +174 50 training.batch_size 1.0 +174 51 model.embedding_dim 2.0 +174 51 negative_sampler.num_negs_per_pos 92.0 +174 51 training.batch_size 1.0 +174 52 model.embedding_dim 0.0 +174 52 negative_sampler.num_negs_per_pos 10.0 +174 52 training.batch_size 0.0 +174 53 model.embedding_dim 0.0 +174 53 negative_sampler.num_negs_per_pos 80.0 +174 53 training.batch_size 1.0 +174 54 model.embedding_dim 1.0 +174 54 negative_sampler.num_negs_per_pos 87.0 +174 54 training.batch_size 2.0 +174 55 model.embedding_dim 1.0 +174 55 negative_sampler.num_negs_per_pos 86.0 +174 55 training.batch_size 2.0 +174 56 model.embedding_dim 1.0 +174 56 negative_sampler.num_negs_per_pos 7.0 +174 56 training.batch_size 2.0 +174 57 model.embedding_dim 2.0 +174 57 negative_sampler.num_negs_per_pos 37.0 +174 57 training.batch_size 2.0 +174 58 model.embedding_dim 2.0 +174 58 negative_sampler.num_negs_per_pos 30.0 +174 58 training.batch_size 0.0 +174 59 model.embedding_dim 1.0 +174 59 negative_sampler.num_negs_per_pos 1.0 +174 59 training.batch_size 0.0 +174 60 model.embedding_dim 0.0 +174 60 negative_sampler.num_negs_per_pos 39.0 +174 60 training.batch_size 1.0 +174 61 model.embedding_dim 2.0 +174 61 negative_sampler.num_negs_per_pos 44.0 +174 61 training.batch_size 0.0 +174 62 model.embedding_dim 0.0 +174 62 negative_sampler.num_negs_per_pos 0.0 +174 62 training.batch_size 1.0 +174 63 model.embedding_dim 2.0 +174 63 negative_sampler.num_negs_per_pos 74.0 +174 63 training.batch_size 0.0 +174 64 model.embedding_dim 0.0 +174 64 negative_sampler.num_negs_per_pos 59.0 +174 64 training.batch_size 0.0 +174 65 model.embedding_dim 0.0 +174 65 negative_sampler.num_negs_per_pos 64.0 +174 65 training.batch_size 0.0 +174 66 model.embedding_dim 2.0 +174 66 negative_sampler.num_negs_per_pos 3.0 +174 66 training.batch_size 1.0 +174 67 model.embedding_dim 2.0 +174 67 negative_sampler.num_negs_per_pos 38.0 +174 67 training.batch_size 0.0 +174 68 model.embedding_dim 1.0 +174 68 negative_sampler.num_negs_per_pos 51.0 +174 68 training.batch_size 1.0 +174 69 model.embedding_dim 0.0 +174 69 negative_sampler.num_negs_per_pos 33.0 +174 69 training.batch_size 1.0 +174 70 model.embedding_dim 0.0 +174 70 negative_sampler.num_negs_per_pos 40.0 +174 70 training.batch_size 2.0 +174 71 model.embedding_dim 1.0 +174 71 negative_sampler.num_negs_per_pos 32.0 +174 71 training.batch_size 2.0 +174 72 model.embedding_dim 0.0 +174 72 negative_sampler.num_negs_per_pos 87.0 +174 72 training.batch_size 2.0 +174 73 model.embedding_dim 2.0 +174 73 negative_sampler.num_negs_per_pos 11.0 +174 73 training.batch_size 1.0 +174 74 model.embedding_dim 2.0 +174 74 negative_sampler.num_negs_per_pos 52.0 +174 74 training.batch_size 1.0 +174 75 model.embedding_dim 0.0 +174 75 negative_sampler.num_negs_per_pos 78.0 +174 75 training.batch_size 1.0 +174 76 model.embedding_dim 1.0 +174 76 negative_sampler.num_negs_per_pos 80.0 +174 76 training.batch_size 1.0 +174 77 model.embedding_dim 2.0 +174 77 negative_sampler.num_negs_per_pos 75.0 +174 77 training.batch_size 0.0 +174 78 model.embedding_dim 0.0 +174 78 negative_sampler.num_negs_per_pos 31.0 +174 78 training.batch_size 2.0 +174 79 model.embedding_dim 1.0 +174 79 negative_sampler.num_negs_per_pos 42.0 +174 79 training.batch_size 0.0 +174 80 model.embedding_dim 1.0 +174 80 negative_sampler.num_negs_per_pos 99.0 +174 80 training.batch_size 2.0 +174 81 model.embedding_dim 0.0 +174 81 negative_sampler.num_negs_per_pos 49.0 +174 81 training.batch_size 1.0 +174 82 model.embedding_dim 2.0 +174 82 negative_sampler.num_negs_per_pos 92.0 +174 82 training.batch_size 1.0 +174 83 model.embedding_dim 1.0 +174 83 negative_sampler.num_negs_per_pos 81.0 +174 83 training.batch_size 2.0 +174 84 model.embedding_dim 1.0 +174 84 negative_sampler.num_negs_per_pos 36.0 +174 84 training.batch_size 1.0 +174 85 model.embedding_dim 1.0 +174 85 negative_sampler.num_negs_per_pos 47.0 +174 85 training.batch_size 2.0 +174 86 model.embedding_dim 2.0 +174 86 negative_sampler.num_negs_per_pos 65.0 +174 86 training.batch_size 0.0 +174 87 model.embedding_dim 0.0 +174 87 negative_sampler.num_negs_per_pos 44.0 +174 87 training.batch_size 0.0 +174 88 model.embedding_dim 0.0 +174 88 negative_sampler.num_negs_per_pos 43.0 +174 88 training.batch_size 1.0 +174 89 model.embedding_dim 2.0 +174 89 negative_sampler.num_negs_per_pos 83.0 +174 89 training.batch_size 1.0 +174 90 model.embedding_dim 0.0 +174 90 negative_sampler.num_negs_per_pos 10.0 +174 90 training.batch_size 0.0 +174 91 model.embedding_dim 1.0 +174 91 negative_sampler.num_negs_per_pos 28.0 +174 91 training.batch_size 2.0 +174 92 model.embedding_dim 2.0 +174 92 negative_sampler.num_negs_per_pos 71.0 +174 92 training.batch_size 1.0 +174 93 model.embedding_dim 1.0 +174 93 negative_sampler.num_negs_per_pos 43.0 +174 93 training.batch_size 2.0 +174 94 model.embedding_dim 2.0 +174 94 negative_sampler.num_negs_per_pos 50.0 +174 94 training.batch_size 0.0 +174 95 model.embedding_dim 2.0 +174 95 negative_sampler.num_negs_per_pos 78.0 +174 95 training.batch_size 0.0 +174 96 model.embedding_dim 2.0 +174 96 negative_sampler.num_negs_per_pos 48.0 +174 96 training.batch_size 2.0 +174 97 model.embedding_dim 2.0 +174 97 negative_sampler.num_negs_per_pos 46.0 +174 97 training.batch_size 2.0 +174 98 model.embedding_dim 0.0 +174 98 negative_sampler.num_negs_per_pos 24.0 +174 98 training.batch_size 2.0 +174 99 model.embedding_dim 0.0 +174 99 negative_sampler.num_negs_per_pos 9.0 +174 99 training.batch_size 2.0 +174 100 model.embedding_dim 1.0 +174 100 negative_sampler.num_negs_per_pos 73.0 +174 100 training.batch_size 0.0 +174 1 dataset """kinships""" +174 1 model """distmult""" +174 1 loss """bceaftersigmoid""" +174 1 regularizer """no""" +174 1 optimizer """adadelta""" +174 1 training_loop """owa""" +174 1 negative_sampler """basic""" +174 1 evaluator """rankbased""" +174 2 dataset """kinships""" +174 2 model """distmult""" +174 2 loss """bceaftersigmoid""" +174 2 regularizer """no""" +174 2 optimizer """adadelta""" +174 2 training_loop """owa""" +174 2 negative_sampler """basic""" +174 2 evaluator """rankbased""" +174 3 dataset """kinships""" +174 3 model """distmult""" +174 3 loss """bceaftersigmoid""" +174 3 regularizer """no""" +174 3 optimizer """adadelta""" +174 3 training_loop """owa""" +174 3 negative_sampler """basic""" +174 3 evaluator """rankbased""" +174 4 dataset """kinships""" +174 4 model """distmult""" +174 4 loss """bceaftersigmoid""" +174 4 regularizer """no""" +174 4 optimizer """adadelta""" +174 4 training_loop """owa""" +174 4 negative_sampler """basic""" +174 4 evaluator """rankbased""" +174 5 dataset """kinships""" +174 5 model """distmult""" +174 5 loss """bceaftersigmoid""" +174 5 regularizer """no""" +174 5 optimizer """adadelta""" +174 5 training_loop """owa""" +174 5 negative_sampler """basic""" +174 5 evaluator """rankbased""" +174 6 dataset """kinships""" +174 6 model """distmult""" +174 6 loss """bceaftersigmoid""" +174 6 regularizer """no""" +174 6 optimizer """adadelta""" +174 6 training_loop """owa""" +174 6 negative_sampler """basic""" +174 6 evaluator """rankbased""" +174 7 dataset """kinships""" +174 7 model """distmult""" +174 7 loss """bceaftersigmoid""" +174 7 regularizer """no""" +174 7 optimizer """adadelta""" +174 7 training_loop """owa""" +174 7 negative_sampler """basic""" +174 7 evaluator """rankbased""" +174 8 dataset """kinships""" +174 8 model """distmult""" +174 8 loss """bceaftersigmoid""" +174 8 regularizer """no""" +174 8 optimizer """adadelta""" +174 8 training_loop """owa""" +174 8 negative_sampler """basic""" +174 8 evaluator """rankbased""" +174 9 dataset """kinships""" +174 9 model """distmult""" +174 9 loss """bceaftersigmoid""" +174 9 regularizer """no""" +174 9 optimizer """adadelta""" +174 9 training_loop """owa""" +174 9 negative_sampler """basic""" +174 9 evaluator """rankbased""" +174 10 dataset """kinships""" +174 10 model """distmult""" +174 10 loss """bceaftersigmoid""" +174 10 regularizer """no""" +174 10 optimizer """adadelta""" +174 10 training_loop """owa""" +174 10 negative_sampler """basic""" +174 10 evaluator """rankbased""" +174 11 dataset """kinships""" +174 11 model """distmult""" +174 11 loss """bceaftersigmoid""" +174 11 regularizer """no""" +174 11 optimizer """adadelta""" +174 11 training_loop """owa""" +174 11 negative_sampler """basic""" +174 11 evaluator """rankbased""" +174 12 dataset """kinships""" +174 12 model """distmult""" +174 12 loss """bceaftersigmoid""" +174 12 regularizer """no""" +174 12 optimizer """adadelta""" +174 12 training_loop """owa""" +174 12 negative_sampler """basic""" +174 12 evaluator """rankbased""" +174 13 dataset """kinships""" +174 13 model """distmult""" +174 13 loss """bceaftersigmoid""" +174 13 regularizer """no""" +174 13 optimizer """adadelta""" +174 13 training_loop """owa""" +174 13 negative_sampler """basic""" +174 13 evaluator """rankbased""" +174 14 dataset """kinships""" +174 14 model """distmult""" +174 14 loss """bceaftersigmoid""" +174 14 regularizer """no""" +174 14 optimizer """adadelta""" +174 14 training_loop """owa""" +174 14 negative_sampler """basic""" +174 14 evaluator """rankbased""" +174 15 dataset """kinships""" +174 15 model """distmult""" +174 15 loss """bceaftersigmoid""" +174 15 regularizer """no""" +174 15 optimizer """adadelta""" +174 15 training_loop """owa""" +174 15 negative_sampler """basic""" +174 15 evaluator """rankbased""" +174 16 dataset """kinships""" +174 16 model """distmult""" +174 16 loss """bceaftersigmoid""" +174 16 regularizer """no""" +174 16 optimizer """adadelta""" +174 16 training_loop """owa""" +174 16 negative_sampler """basic""" +174 16 evaluator """rankbased""" +174 17 dataset """kinships""" +174 17 model """distmult""" +174 17 loss """bceaftersigmoid""" +174 17 regularizer """no""" +174 17 optimizer """adadelta""" +174 17 training_loop """owa""" +174 17 negative_sampler """basic""" +174 17 evaluator """rankbased""" +174 18 dataset """kinships""" +174 18 model """distmult""" +174 18 loss """bceaftersigmoid""" +174 18 regularizer """no""" +174 18 optimizer """adadelta""" +174 18 training_loop """owa""" +174 18 negative_sampler """basic""" +174 18 evaluator """rankbased""" +174 19 dataset """kinships""" +174 19 model """distmult""" +174 19 loss """bceaftersigmoid""" +174 19 regularizer """no""" +174 19 optimizer """adadelta""" +174 19 training_loop """owa""" +174 19 negative_sampler """basic""" +174 19 evaluator """rankbased""" +174 20 dataset """kinships""" +174 20 model """distmult""" +174 20 loss """bceaftersigmoid""" +174 20 regularizer """no""" +174 20 optimizer """adadelta""" +174 20 training_loop """owa""" +174 20 negative_sampler """basic""" +174 20 evaluator """rankbased""" +174 21 dataset """kinships""" +174 21 model """distmult""" +174 21 loss """bceaftersigmoid""" +174 21 regularizer """no""" +174 21 optimizer """adadelta""" +174 21 training_loop """owa""" +174 21 negative_sampler """basic""" +174 21 evaluator """rankbased""" +174 22 dataset """kinships""" +174 22 model """distmult""" +174 22 loss """bceaftersigmoid""" +174 22 regularizer """no""" +174 22 optimizer """adadelta""" +174 22 training_loop """owa""" +174 22 negative_sampler """basic""" +174 22 evaluator """rankbased""" +174 23 dataset """kinships""" +174 23 model """distmult""" +174 23 loss """bceaftersigmoid""" +174 23 regularizer """no""" +174 23 optimizer """adadelta""" +174 23 training_loop """owa""" +174 23 negative_sampler """basic""" +174 23 evaluator """rankbased""" +174 24 dataset """kinships""" +174 24 model """distmult""" +174 24 loss """bceaftersigmoid""" +174 24 regularizer """no""" +174 24 optimizer """adadelta""" +174 24 training_loop """owa""" +174 24 negative_sampler """basic""" +174 24 evaluator """rankbased""" +174 25 dataset """kinships""" +174 25 model """distmult""" +174 25 loss """bceaftersigmoid""" +174 25 regularizer """no""" +174 25 optimizer """adadelta""" +174 25 training_loop """owa""" +174 25 negative_sampler """basic""" +174 25 evaluator """rankbased""" +174 26 dataset """kinships""" +174 26 model """distmult""" +174 26 loss """bceaftersigmoid""" +174 26 regularizer """no""" +174 26 optimizer """adadelta""" +174 26 training_loop """owa""" +174 26 negative_sampler """basic""" +174 26 evaluator """rankbased""" +174 27 dataset """kinships""" +174 27 model """distmult""" +174 27 loss """bceaftersigmoid""" +174 27 regularizer """no""" +174 27 optimizer """adadelta""" +174 27 training_loop """owa""" +174 27 negative_sampler """basic""" +174 27 evaluator """rankbased""" +174 28 dataset """kinships""" +174 28 model """distmult""" +174 28 loss """bceaftersigmoid""" +174 28 regularizer """no""" +174 28 optimizer """adadelta""" +174 28 training_loop """owa""" +174 28 negative_sampler """basic""" +174 28 evaluator """rankbased""" +174 29 dataset """kinships""" +174 29 model """distmult""" +174 29 loss """bceaftersigmoid""" +174 29 regularizer """no""" +174 29 optimizer """adadelta""" +174 29 training_loop """owa""" +174 29 negative_sampler """basic""" +174 29 evaluator """rankbased""" +174 30 dataset """kinships""" +174 30 model """distmult""" +174 30 loss """bceaftersigmoid""" +174 30 regularizer """no""" +174 30 optimizer """adadelta""" +174 30 training_loop """owa""" +174 30 negative_sampler """basic""" +174 30 evaluator """rankbased""" +174 31 dataset """kinships""" +174 31 model """distmult""" +174 31 loss """bceaftersigmoid""" +174 31 regularizer """no""" +174 31 optimizer """adadelta""" +174 31 training_loop """owa""" +174 31 negative_sampler """basic""" +174 31 evaluator """rankbased""" +174 32 dataset """kinships""" +174 32 model """distmult""" +174 32 loss """bceaftersigmoid""" +174 32 regularizer """no""" +174 32 optimizer """adadelta""" +174 32 training_loop """owa""" +174 32 negative_sampler """basic""" +174 32 evaluator """rankbased""" +174 33 dataset """kinships""" +174 33 model """distmult""" +174 33 loss """bceaftersigmoid""" +174 33 regularizer """no""" +174 33 optimizer """adadelta""" +174 33 training_loop """owa""" +174 33 negative_sampler """basic""" +174 33 evaluator """rankbased""" +174 34 dataset """kinships""" +174 34 model """distmult""" +174 34 loss """bceaftersigmoid""" +174 34 regularizer """no""" +174 34 optimizer """adadelta""" +174 34 training_loop """owa""" +174 34 negative_sampler """basic""" +174 34 evaluator """rankbased""" +174 35 dataset """kinships""" +174 35 model """distmult""" +174 35 loss """bceaftersigmoid""" +174 35 regularizer """no""" +174 35 optimizer """adadelta""" +174 35 training_loop """owa""" +174 35 negative_sampler """basic""" +174 35 evaluator """rankbased""" +174 36 dataset """kinships""" +174 36 model """distmult""" +174 36 loss """bceaftersigmoid""" +174 36 regularizer """no""" +174 36 optimizer """adadelta""" +174 36 training_loop """owa""" +174 36 negative_sampler """basic""" +174 36 evaluator """rankbased""" +174 37 dataset """kinships""" +174 37 model """distmult""" +174 37 loss """bceaftersigmoid""" +174 37 regularizer """no""" +174 37 optimizer """adadelta""" +174 37 training_loop """owa""" +174 37 negative_sampler """basic""" +174 37 evaluator """rankbased""" +174 38 dataset """kinships""" +174 38 model """distmult""" +174 38 loss """bceaftersigmoid""" +174 38 regularizer """no""" +174 38 optimizer """adadelta""" +174 38 training_loop """owa""" +174 38 negative_sampler """basic""" +174 38 evaluator """rankbased""" +174 39 dataset """kinships""" +174 39 model """distmult""" +174 39 loss """bceaftersigmoid""" +174 39 regularizer """no""" +174 39 optimizer """adadelta""" +174 39 training_loop """owa""" +174 39 negative_sampler """basic""" +174 39 evaluator """rankbased""" +174 40 dataset """kinships""" +174 40 model """distmult""" +174 40 loss """bceaftersigmoid""" +174 40 regularizer """no""" +174 40 optimizer """adadelta""" +174 40 training_loop """owa""" +174 40 negative_sampler """basic""" +174 40 evaluator """rankbased""" +174 41 dataset """kinships""" +174 41 model """distmult""" +174 41 loss """bceaftersigmoid""" +174 41 regularizer """no""" +174 41 optimizer """adadelta""" +174 41 training_loop """owa""" +174 41 negative_sampler """basic""" +174 41 evaluator """rankbased""" +174 42 dataset """kinships""" +174 42 model """distmult""" +174 42 loss """bceaftersigmoid""" +174 42 regularizer """no""" +174 42 optimizer """adadelta""" +174 42 training_loop """owa""" +174 42 negative_sampler """basic""" +174 42 evaluator """rankbased""" +174 43 dataset """kinships""" +174 43 model """distmult""" +174 43 loss """bceaftersigmoid""" +174 43 regularizer """no""" +174 43 optimizer """adadelta""" +174 43 training_loop """owa""" +174 43 negative_sampler """basic""" +174 43 evaluator """rankbased""" +174 44 dataset """kinships""" +174 44 model """distmult""" +174 44 loss """bceaftersigmoid""" +174 44 regularizer """no""" +174 44 optimizer """adadelta""" +174 44 training_loop """owa""" +174 44 negative_sampler """basic""" +174 44 evaluator """rankbased""" +174 45 dataset """kinships""" +174 45 model """distmult""" +174 45 loss """bceaftersigmoid""" +174 45 regularizer """no""" +174 45 optimizer """adadelta""" +174 45 training_loop """owa""" +174 45 negative_sampler """basic""" +174 45 evaluator """rankbased""" +174 46 dataset """kinships""" +174 46 model """distmult""" +174 46 loss """bceaftersigmoid""" +174 46 regularizer """no""" +174 46 optimizer """adadelta""" +174 46 training_loop """owa""" +174 46 negative_sampler """basic""" +174 46 evaluator """rankbased""" +174 47 dataset """kinships""" +174 47 model """distmult""" +174 47 loss """bceaftersigmoid""" +174 47 regularizer """no""" +174 47 optimizer """adadelta""" +174 47 training_loop """owa""" +174 47 negative_sampler """basic""" +174 47 evaluator """rankbased""" +174 48 dataset """kinships""" +174 48 model """distmult""" +174 48 loss """bceaftersigmoid""" +174 48 regularizer """no""" +174 48 optimizer """adadelta""" +174 48 training_loop """owa""" +174 48 negative_sampler """basic""" +174 48 evaluator """rankbased""" +174 49 dataset """kinships""" +174 49 model """distmult""" +174 49 loss """bceaftersigmoid""" +174 49 regularizer """no""" +174 49 optimizer """adadelta""" +174 49 training_loop """owa""" +174 49 negative_sampler """basic""" +174 49 evaluator """rankbased""" +174 50 dataset """kinships""" +174 50 model """distmult""" +174 50 loss """bceaftersigmoid""" +174 50 regularizer """no""" +174 50 optimizer """adadelta""" +174 50 training_loop """owa""" +174 50 negative_sampler """basic""" +174 50 evaluator """rankbased""" +174 51 dataset """kinships""" +174 51 model """distmult""" +174 51 loss """bceaftersigmoid""" +174 51 regularizer """no""" +174 51 optimizer """adadelta""" +174 51 training_loop """owa""" +174 51 negative_sampler """basic""" +174 51 evaluator """rankbased""" +174 52 dataset """kinships""" +174 52 model """distmult""" +174 52 loss """bceaftersigmoid""" +174 52 regularizer """no""" +174 52 optimizer """adadelta""" +174 52 training_loop """owa""" +174 52 negative_sampler """basic""" +174 52 evaluator """rankbased""" +174 53 dataset """kinships""" +174 53 model """distmult""" +174 53 loss """bceaftersigmoid""" +174 53 regularizer """no""" +174 53 optimizer """adadelta""" +174 53 training_loop """owa""" +174 53 negative_sampler """basic""" +174 53 evaluator """rankbased""" +174 54 dataset """kinships""" +174 54 model """distmult""" +174 54 loss """bceaftersigmoid""" +174 54 regularizer """no""" +174 54 optimizer """adadelta""" +174 54 training_loop """owa""" +174 54 negative_sampler """basic""" +174 54 evaluator """rankbased""" +174 55 dataset """kinships""" +174 55 model """distmult""" +174 55 loss """bceaftersigmoid""" +174 55 regularizer """no""" +174 55 optimizer """adadelta""" +174 55 training_loop """owa""" +174 55 negative_sampler """basic""" +174 55 evaluator """rankbased""" +174 56 dataset """kinships""" +174 56 model """distmult""" +174 56 loss """bceaftersigmoid""" +174 56 regularizer """no""" +174 56 optimizer """adadelta""" +174 56 training_loop """owa""" +174 56 negative_sampler """basic""" +174 56 evaluator """rankbased""" +174 57 dataset """kinships""" +174 57 model """distmult""" +174 57 loss """bceaftersigmoid""" +174 57 regularizer """no""" +174 57 optimizer """adadelta""" +174 57 training_loop """owa""" +174 57 negative_sampler """basic""" +174 57 evaluator """rankbased""" +174 58 dataset """kinships""" +174 58 model """distmult""" +174 58 loss """bceaftersigmoid""" +174 58 regularizer """no""" +174 58 optimizer """adadelta""" +174 58 training_loop """owa""" +174 58 negative_sampler """basic""" +174 58 evaluator """rankbased""" +174 59 dataset """kinships""" +174 59 model """distmult""" +174 59 loss """bceaftersigmoid""" +174 59 regularizer """no""" +174 59 optimizer """adadelta""" +174 59 training_loop """owa""" +174 59 negative_sampler """basic""" +174 59 evaluator """rankbased""" +174 60 dataset """kinships""" +174 60 model """distmult""" +174 60 loss """bceaftersigmoid""" +174 60 regularizer """no""" +174 60 optimizer """adadelta""" +174 60 training_loop """owa""" +174 60 negative_sampler """basic""" +174 60 evaluator """rankbased""" +174 61 dataset """kinships""" +174 61 model """distmult""" +174 61 loss """bceaftersigmoid""" +174 61 regularizer """no""" +174 61 optimizer """adadelta""" +174 61 training_loop """owa""" +174 61 negative_sampler """basic""" +174 61 evaluator """rankbased""" +174 62 dataset """kinships""" +174 62 model """distmult""" +174 62 loss """bceaftersigmoid""" +174 62 regularizer """no""" +174 62 optimizer """adadelta""" +174 62 training_loop """owa""" +174 62 negative_sampler """basic""" +174 62 evaluator """rankbased""" +174 63 dataset """kinships""" +174 63 model """distmult""" +174 63 loss """bceaftersigmoid""" +174 63 regularizer """no""" +174 63 optimizer """adadelta""" +174 63 training_loop """owa""" +174 63 negative_sampler """basic""" +174 63 evaluator """rankbased""" +174 64 dataset """kinships""" +174 64 model """distmult""" +174 64 loss """bceaftersigmoid""" +174 64 regularizer """no""" +174 64 optimizer """adadelta""" +174 64 training_loop """owa""" +174 64 negative_sampler """basic""" +174 64 evaluator """rankbased""" +174 65 dataset """kinships""" +174 65 model """distmult""" +174 65 loss """bceaftersigmoid""" +174 65 regularizer """no""" +174 65 optimizer """adadelta""" +174 65 training_loop """owa""" +174 65 negative_sampler """basic""" +174 65 evaluator """rankbased""" +174 66 dataset """kinships""" +174 66 model """distmult""" +174 66 loss """bceaftersigmoid""" +174 66 regularizer """no""" +174 66 optimizer """adadelta""" +174 66 training_loop """owa""" +174 66 negative_sampler """basic""" +174 66 evaluator """rankbased""" +174 67 dataset """kinships""" +174 67 model """distmult""" +174 67 loss """bceaftersigmoid""" +174 67 regularizer """no""" +174 67 optimizer """adadelta""" +174 67 training_loop """owa""" +174 67 negative_sampler """basic""" +174 67 evaluator """rankbased""" +174 68 dataset """kinships""" +174 68 model """distmult""" +174 68 loss """bceaftersigmoid""" +174 68 regularizer """no""" +174 68 optimizer """adadelta""" +174 68 training_loop """owa""" +174 68 negative_sampler """basic""" +174 68 evaluator """rankbased""" +174 69 dataset """kinships""" +174 69 model """distmult""" +174 69 loss """bceaftersigmoid""" +174 69 regularizer """no""" +174 69 optimizer """adadelta""" +174 69 training_loop """owa""" +174 69 negative_sampler """basic""" +174 69 evaluator """rankbased""" +174 70 dataset """kinships""" +174 70 model """distmult""" +174 70 loss """bceaftersigmoid""" +174 70 regularizer """no""" +174 70 optimizer """adadelta""" +174 70 training_loop """owa""" +174 70 negative_sampler """basic""" +174 70 evaluator """rankbased""" +174 71 dataset """kinships""" +174 71 model """distmult""" +174 71 loss """bceaftersigmoid""" +174 71 regularizer """no""" +174 71 optimizer """adadelta""" +174 71 training_loop """owa""" +174 71 negative_sampler """basic""" +174 71 evaluator """rankbased""" +174 72 dataset """kinships""" +174 72 model """distmult""" +174 72 loss """bceaftersigmoid""" +174 72 regularizer """no""" +174 72 optimizer """adadelta""" +174 72 training_loop """owa""" +174 72 negative_sampler """basic""" +174 72 evaluator """rankbased""" +174 73 dataset """kinships""" +174 73 model """distmult""" +174 73 loss """bceaftersigmoid""" +174 73 regularizer """no""" +174 73 optimizer """adadelta""" +174 73 training_loop """owa""" +174 73 negative_sampler """basic""" +174 73 evaluator """rankbased""" +174 74 dataset """kinships""" +174 74 model """distmult""" +174 74 loss """bceaftersigmoid""" +174 74 regularizer """no""" +174 74 optimizer """adadelta""" +174 74 training_loop """owa""" +174 74 negative_sampler """basic""" +174 74 evaluator """rankbased""" +174 75 dataset """kinships""" +174 75 model """distmult""" +174 75 loss """bceaftersigmoid""" +174 75 regularizer """no""" +174 75 optimizer """adadelta""" +174 75 training_loop """owa""" +174 75 negative_sampler """basic""" +174 75 evaluator """rankbased""" +174 76 dataset """kinships""" +174 76 model """distmult""" +174 76 loss """bceaftersigmoid""" +174 76 regularizer """no""" +174 76 optimizer """adadelta""" +174 76 training_loop """owa""" +174 76 negative_sampler """basic""" +174 76 evaluator """rankbased""" +174 77 dataset """kinships""" +174 77 model """distmult""" +174 77 loss """bceaftersigmoid""" +174 77 regularizer """no""" +174 77 optimizer """adadelta""" +174 77 training_loop """owa""" +174 77 negative_sampler """basic""" +174 77 evaluator """rankbased""" +174 78 dataset """kinships""" +174 78 model """distmult""" +174 78 loss """bceaftersigmoid""" +174 78 regularizer """no""" +174 78 optimizer """adadelta""" +174 78 training_loop """owa""" +174 78 negative_sampler """basic""" +174 78 evaluator """rankbased""" +174 79 dataset """kinships""" +174 79 model """distmult""" +174 79 loss """bceaftersigmoid""" +174 79 regularizer """no""" +174 79 optimizer """adadelta""" +174 79 training_loop """owa""" +174 79 negative_sampler """basic""" +174 79 evaluator """rankbased""" +174 80 dataset """kinships""" +174 80 model """distmult""" +174 80 loss """bceaftersigmoid""" +174 80 regularizer """no""" +174 80 optimizer """adadelta""" +174 80 training_loop """owa""" +174 80 negative_sampler """basic""" +174 80 evaluator """rankbased""" +174 81 dataset """kinships""" +174 81 model """distmult""" +174 81 loss """bceaftersigmoid""" +174 81 regularizer """no""" +174 81 optimizer """adadelta""" +174 81 training_loop """owa""" +174 81 negative_sampler """basic""" +174 81 evaluator """rankbased""" +174 82 dataset """kinships""" +174 82 model """distmult""" +174 82 loss """bceaftersigmoid""" +174 82 regularizer """no""" +174 82 optimizer """adadelta""" +174 82 training_loop """owa""" +174 82 negative_sampler """basic""" +174 82 evaluator """rankbased""" +174 83 dataset """kinships""" +174 83 model """distmult""" +174 83 loss """bceaftersigmoid""" +174 83 regularizer """no""" +174 83 optimizer """adadelta""" +174 83 training_loop """owa""" +174 83 negative_sampler """basic""" +174 83 evaluator """rankbased""" +174 84 dataset """kinships""" +174 84 model """distmult""" +174 84 loss """bceaftersigmoid""" +174 84 regularizer """no""" +174 84 optimizer """adadelta""" +174 84 training_loop """owa""" +174 84 negative_sampler """basic""" +174 84 evaluator """rankbased""" +174 85 dataset """kinships""" +174 85 model """distmult""" +174 85 loss """bceaftersigmoid""" +174 85 regularizer """no""" +174 85 optimizer """adadelta""" +174 85 training_loop """owa""" +174 85 negative_sampler """basic""" +174 85 evaluator """rankbased""" +174 86 dataset """kinships""" +174 86 model """distmult""" +174 86 loss """bceaftersigmoid""" +174 86 regularizer """no""" +174 86 optimizer """adadelta""" +174 86 training_loop """owa""" +174 86 negative_sampler """basic""" +174 86 evaluator """rankbased""" +174 87 dataset """kinships""" +174 87 model """distmult""" +174 87 loss """bceaftersigmoid""" +174 87 regularizer """no""" +174 87 optimizer """adadelta""" +174 87 training_loop """owa""" +174 87 negative_sampler """basic""" +174 87 evaluator """rankbased""" +174 88 dataset """kinships""" +174 88 model """distmult""" +174 88 loss """bceaftersigmoid""" +174 88 regularizer """no""" +174 88 optimizer """adadelta""" +174 88 training_loop """owa""" +174 88 negative_sampler """basic""" +174 88 evaluator """rankbased""" +174 89 dataset """kinships""" +174 89 model """distmult""" +174 89 loss """bceaftersigmoid""" +174 89 regularizer """no""" +174 89 optimizer """adadelta""" +174 89 training_loop """owa""" +174 89 negative_sampler """basic""" +174 89 evaluator """rankbased""" +174 90 dataset """kinships""" +174 90 model """distmult""" +174 90 loss """bceaftersigmoid""" +174 90 regularizer """no""" +174 90 optimizer """adadelta""" +174 90 training_loop """owa""" +174 90 negative_sampler """basic""" +174 90 evaluator """rankbased""" +174 91 dataset """kinships""" +174 91 model """distmult""" +174 91 loss """bceaftersigmoid""" +174 91 regularizer """no""" +174 91 optimizer """adadelta""" +174 91 training_loop """owa""" +174 91 negative_sampler """basic""" +174 91 evaluator """rankbased""" +174 92 dataset """kinships""" +174 92 model """distmult""" +174 92 loss """bceaftersigmoid""" +174 92 regularizer """no""" +174 92 optimizer """adadelta""" +174 92 training_loop """owa""" +174 92 negative_sampler """basic""" +174 92 evaluator """rankbased""" +174 93 dataset """kinships""" +174 93 model """distmult""" +174 93 loss """bceaftersigmoid""" +174 93 regularizer """no""" +174 93 optimizer """adadelta""" +174 93 training_loop """owa""" +174 93 negative_sampler """basic""" +174 93 evaluator """rankbased""" +174 94 dataset """kinships""" +174 94 model """distmult""" +174 94 loss """bceaftersigmoid""" +174 94 regularizer """no""" +174 94 optimizer """adadelta""" +174 94 training_loop """owa""" +174 94 negative_sampler """basic""" +174 94 evaluator """rankbased""" +174 95 dataset """kinships""" +174 95 model """distmult""" +174 95 loss """bceaftersigmoid""" +174 95 regularizer """no""" +174 95 optimizer """adadelta""" +174 95 training_loop """owa""" +174 95 negative_sampler """basic""" +174 95 evaluator """rankbased""" +174 96 dataset """kinships""" +174 96 model """distmult""" +174 96 loss """bceaftersigmoid""" +174 96 regularizer """no""" +174 96 optimizer """adadelta""" +174 96 training_loop """owa""" +174 96 negative_sampler """basic""" +174 96 evaluator """rankbased""" +174 97 dataset """kinships""" +174 97 model """distmult""" +174 97 loss """bceaftersigmoid""" +174 97 regularizer """no""" +174 97 optimizer """adadelta""" +174 97 training_loop """owa""" +174 97 negative_sampler """basic""" +174 97 evaluator """rankbased""" +174 98 dataset """kinships""" +174 98 model """distmult""" +174 98 loss """bceaftersigmoid""" +174 98 regularizer """no""" +174 98 optimizer """adadelta""" +174 98 training_loop """owa""" +174 98 negative_sampler """basic""" +174 98 evaluator """rankbased""" +174 99 dataset """kinships""" +174 99 model """distmult""" +174 99 loss """bceaftersigmoid""" +174 99 regularizer """no""" +174 99 optimizer """adadelta""" +174 99 training_loop """owa""" +174 99 negative_sampler """basic""" +174 99 evaluator """rankbased""" +174 100 dataset """kinships""" +174 100 model """distmult""" +174 100 loss """bceaftersigmoid""" +174 100 regularizer """no""" +174 100 optimizer """adadelta""" +174 100 training_loop """owa""" +174 100 negative_sampler """basic""" +174 100 evaluator """rankbased""" +175 1 model.embedding_dim 0.0 +175 1 negative_sampler.num_negs_per_pos 92.0 +175 1 training.batch_size 2.0 +175 2 model.embedding_dim 0.0 +175 2 negative_sampler.num_negs_per_pos 99.0 +175 2 training.batch_size 2.0 +175 3 model.embedding_dim 0.0 +175 3 negative_sampler.num_negs_per_pos 82.0 +175 3 training.batch_size 1.0 +175 4 model.embedding_dim 0.0 +175 4 negative_sampler.num_negs_per_pos 13.0 +175 4 training.batch_size 2.0 +175 5 model.embedding_dim 2.0 +175 5 negative_sampler.num_negs_per_pos 34.0 +175 5 training.batch_size 0.0 +175 6 model.embedding_dim 2.0 +175 6 negative_sampler.num_negs_per_pos 32.0 +175 6 training.batch_size 1.0 +175 7 model.embedding_dim 1.0 +175 7 negative_sampler.num_negs_per_pos 62.0 +175 7 training.batch_size 2.0 +175 8 model.embedding_dim 1.0 +175 8 negative_sampler.num_negs_per_pos 88.0 +175 8 training.batch_size 2.0 +175 9 model.embedding_dim 2.0 +175 9 negative_sampler.num_negs_per_pos 24.0 +175 9 training.batch_size 1.0 +175 10 model.embedding_dim 0.0 +175 10 negative_sampler.num_negs_per_pos 35.0 +175 10 training.batch_size 0.0 +175 11 model.embedding_dim 2.0 +175 11 negative_sampler.num_negs_per_pos 33.0 +175 11 training.batch_size 0.0 +175 12 model.embedding_dim 1.0 +175 12 negative_sampler.num_negs_per_pos 78.0 +175 12 training.batch_size 0.0 +175 13 model.embedding_dim 1.0 +175 13 negative_sampler.num_negs_per_pos 5.0 +175 13 training.batch_size 2.0 +175 14 model.embedding_dim 2.0 +175 14 negative_sampler.num_negs_per_pos 70.0 +175 14 training.batch_size 0.0 +175 15 model.embedding_dim 0.0 +175 15 negative_sampler.num_negs_per_pos 84.0 +175 15 training.batch_size 2.0 +175 16 model.embedding_dim 2.0 +175 16 negative_sampler.num_negs_per_pos 41.0 +175 16 training.batch_size 2.0 +175 17 model.embedding_dim 2.0 +175 17 negative_sampler.num_negs_per_pos 12.0 +175 17 training.batch_size 0.0 +175 18 model.embedding_dim 1.0 +175 18 negative_sampler.num_negs_per_pos 16.0 +175 18 training.batch_size 2.0 +175 19 model.embedding_dim 1.0 +175 19 negative_sampler.num_negs_per_pos 98.0 +175 19 training.batch_size 1.0 +175 20 model.embedding_dim 0.0 +175 20 negative_sampler.num_negs_per_pos 64.0 +175 20 training.batch_size 1.0 +175 21 model.embedding_dim 2.0 +175 21 negative_sampler.num_negs_per_pos 35.0 +175 21 training.batch_size 1.0 +175 22 model.embedding_dim 0.0 +175 22 negative_sampler.num_negs_per_pos 74.0 +175 22 training.batch_size 2.0 +175 23 model.embedding_dim 1.0 +175 23 negative_sampler.num_negs_per_pos 92.0 +175 23 training.batch_size 2.0 +175 24 model.embedding_dim 1.0 +175 24 negative_sampler.num_negs_per_pos 55.0 +175 24 training.batch_size 2.0 +175 25 model.embedding_dim 1.0 +175 25 negative_sampler.num_negs_per_pos 72.0 +175 25 training.batch_size 2.0 +175 26 model.embedding_dim 2.0 +175 26 negative_sampler.num_negs_per_pos 36.0 +175 26 training.batch_size 0.0 +175 27 model.embedding_dim 1.0 +175 27 negative_sampler.num_negs_per_pos 48.0 +175 27 training.batch_size 1.0 +175 28 model.embedding_dim 1.0 +175 28 negative_sampler.num_negs_per_pos 78.0 +175 28 training.batch_size 0.0 +175 29 model.embedding_dim 0.0 +175 29 negative_sampler.num_negs_per_pos 1.0 +175 29 training.batch_size 0.0 +175 30 model.embedding_dim 1.0 +175 30 negative_sampler.num_negs_per_pos 54.0 +175 30 training.batch_size 1.0 +175 31 model.embedding_dim 1.0 +175 31 negative_sampler.num_negs_per_pos 48.0 +175 31 training.batch_size 0.0 +175 32 model.embedding_dim 1.0 +175 32 negative_sampler.num_negs_per_pos 60.0 +175 32 training.batch_size 2.0 +175 33 model.embedding_dim 0.0 +175 33 negative_sampler.num_negs_per_pos 78.0 +175 33 training.batch_size 2.0 +175 34 model.embedding_dim 0.0 +175 34 negative_sampler.num_negs_per_pos 43.0 +175 34 training.batch_size 1.0 +175 35 model.embedding_dim 2.0 +175 35 negative_sampler.num_negs_per_pos 13.0 +175 35 training.batch_size 0.0 +175 36 model.embedding_dim 2.0 +175 36 negative_sampler.num_negs_per_pos 90.0 +175 36 training.batch_size 2.0 +175 37 model.embedding_dim 0.0 +175 37 negative_sampler.num_negs_per_pos 45.0 +175 37 training.batch_size 2.0 +175 38 model.embedding_dim 1.0 +175 38 negative_sampler.num_negs_per_pos 56.0 +175 38 training.batch_size 2.0 +175 39 model.embedding_dim 1.0 +175 39 negative_sampler.num_negs_per_pos 57.0 +175 39 training.batch_size 0.0 +175 40 model.embedding_dim 1.0 +175 40 negative_sampler.num_negs_per_pos 2.0 +175 40 training.batch_size 1.0 +175 41 model.embedding_dim 0.0 +175 41 negative_sampler.num_negs_per_pos 75.0 +175 41 training.batch_size 2.0 +175 42 model.embedding_dim 1.0 +175 42 negative_sampler.num_negs_per_pos 79.0 +175 42 training.batch_size 1.0 +175 43 model.embedding_dim 1.0 +175 43 negative_sampler.num_negs_per_pos 12.0 +175 43 training.batch_size 1.0 +175 44 model.embedding_dim 0.0 +175 44 negative_sampler.num_negs_per_pos 46.0 +175 44 training.batch_size 2.0 +175 45 model.embedding_dim 0.0 +175 45 negative_sampler.num_negs_per_pos 99.0 +175 45 training.batch_size 2.0 +175 46 model.embedding_dim 2.0 +175 46 negative_sampler.num_negs_per_pos 58.0 +175 46 training.batch_size 2.0 +175 47 model.embedding_dim 0.0 +175 47 negative_sampler.num_negs_per_pos 33.0 +175 47 training.batch_size 1.0 +175 48 model.embedding_dim 2.0 +175 48 negative_sampler.num_negs_per_pos 51.0 +175 48 training.batch_size 0.0 +175 49 model.embedding_dim 2.0 +175 49 negative_sampler.num_negs_per_pos 58.0 +175 49 training.batch_size 0.0 +175 50 model.embedding_dim 0.0 +175 50 negative_sampler.num_negs_per_pos 16.0 +175 50 training.batch_size 2.0 +175 51 model.embedding_dim 0.0 +175 51 negative_sampler.num_negs_per_pos 38.0 +175 51 training.batch_size 1.0 +175 52 model.embedding_dim 0.0 +175 52 negative_sampler.num_negs_per_pos 88.0 +175 52 training.batch_size 0.0 +175 53 model.embedding_dim 2.0 +175 53 negative_sampler.num_negs_per_pos 29.0 +175 53 training.batch_size 0.0 +175 54 model.embedding_dim 0.0 +175 54 negative_sampler.num_negs_per_pos 28.0 +175 54 training.batch_size 1.0 +175 55 model.embedding_dim 1.0 +175 55 negative_sampler.num_negs_per_pos 11.0 +175 55 training.batch_size 0.0 +175 56 model.embedding_dim 0.0 +175 56 negative_sampler.num_negs_per_pos 57.0 +175 56 training.batch_size 0.0 +175 57 model.embedding_dim 1.0 +175 57 negative_sampler.num_negs_per_pos 38.0 +175 57 training.batch_size 1.0 +175 58 model.embedding_dim 0.0 +175 58 negative_sampler.num_negs_per_pos 48.0 +175 58 training.batch_size 0.0 +175 59 model.embedding_dim 2.0 +175 59 negative_sampler.num_negs_per_pos 20.0 +175 59 training.batch_size 1.0 +175 60 model.embedding_dim 2.0 +175 60 negative_sampler.num_negs_per_pos 46.0 +175 60 training.batch_size 2.0 +175 61 model.embedding_dim 2.0 +175 61 negative_sampler.num_negs_per_pos 28.0 +175 61 training.batch_size 1.0 +175 62 model.embedding_dim 0.0 +175 62 negative_sampler.num_negs_per_pos 23.0 +175 62 training.batch_size 2.0 +175 63 model.embedding_dim 1.0 +175 63 negative_sampler.num_negs_per_pos 52.0 +175 63 training.batch_size 1.0 +175 64 model.embedding_dim 1.0 +175 64 negative_sampler.num_negs_per_pos 27.0 +175 64 training.batch_size 0.0 +175 65 model.embedding_dim 2.0 +175 65 negative_sampler.num_negs_per_pos 68.0 +175 65 training.batch_size 1.0 +175 66 model.embedding_dim 1.0 +175 66 negative_sampler.num_negs_per_pos 42.0 +175 66 training.batch_size 2.0 +175 67 model.embedding_dim 1.0 +175 67 negative_sampler.num_negs_per_pos 62.0 +175 67 training.batch_size 2.0 +175 68 model.embedding_dim 1.0 +175 68 negative_sampler.num_negs_per_pos 26.0 +175 68 training.batch_size 0.0 +175 69 model.embedding_dim 0.0 +175 69 negative_sampler.num_negs_per_pos 16.0 +175 69 training.batch_size 1.0 +175 70 model.embedding_dim 0.0 +175 70 negative_sampler.num_negs_per_pos 17.0 +175 70 training.batch_size 1.0 +175 71 model.embedding_dim 1.0 +175 71 negative_sampler.num_negs_per_pos 95.0 +175 71 training.batch_size 1.0 +175 72 model.embedding_dim 2.0 +175 72 negative_sampler.num_negs_per_pos 78.0 +175 72 training.batch_size 0.0 +175 73 model.embedding_dim 2.0 +175 73 negative_sampler.num_negs_per_pos 53.0 +175 73 training.batch_size 0.0 +175 74 model.embedding_dim 0.0 +175 74 negative_sampler.num_negs_per_pos 40.0 +175 74 training.batch_size 0.0 +175 75 model.embedding_dim 0.0 +175 75 negative_sampler.num_negs_per_pos 83.0 +175 75 training.batch_size 1.0 +175 76 model.embedding_dim 0.0 +175 76 negative_sampler.num_negs_per_pos 52.0 +175 76 training.batch_size 0.0 +175 77 model.embedding_dim 1.0 +175 77 negative_sampler.num_negs_per_pos 9.0 +175 77 training.batch_size 1.0 +175 78 model.embedding_dim 2.0 +175 78 negative_sampler.num_negs_per_pos 43.0 +175 78 training.batch_size 2.0 +175 79 model.embedding_dim 2.0 +175 79 negative_sampler.num_negs_per_pos 90.0 +175 79 training.batch_size 0.0 +175 80 model.embedding_dim 2.0 +175 80 negative_sampler.num_negs_per_pos 65.0 +175 80 training.batch_size 0.0 +175 81 model.embedding_dim 2.0 +175 81 negative_sampler.num_negs_per_pos 24.0 +175 81 training.batch_size 0.0 +175 82 model.embedding_dim 2.0 +175 82 negative_sampler.num_negs_per_pos 50.0 +175 82 training.batch_size 2.0 +175 83 model.embedding_dim 1.0 +175 83 negative_sampler.num_negs_per_pos 16.0 +175 83 training.batch_size 1.0 +175 84 model.embedding_dim 0.0 +175 84 negative_sampler.num_negs_per_pos 85.0 +175 84 training.batch_size 0.0 +175 85 model.embedding_dim 1.0 +175 85 negative_sampler.num_negs_per_pos 63.0 +175 85 training.batch_size 1.0 +175 86 model.embedding_dim 0.0 +175 86 negative_sampler.num_negs_per_pos 48.0 +175 86 training.batch_size 0.0 +175 87 model.embedding_dim 1.0 +175 87 negative_sampler.num_negs_per_pos 40.0 +175 87 training.batch_size 1.0 +175 88 model.embedding_dim 2.0 +175 88 negative_sampler.num_negs_per_pos 8.0 +175 88 training.batch_size 0.0 +175 89 model.embedding_dim 2.0 +175 89 negative_sampler.num_negs_per_pos 13.0 +175 89 training.batch_size 0.0 +175 90 model.embedding_dim 0.0 +175 90 negative_sampler.num_negs_per_pos 58.0 +175 90 training.batch_size 2.0 +175 91 model.embedding_dim 1.0 +175 91 negative_sampler.num_negs_per_pos 79.0 +175 91 training.batch_size 1.0 +175 92 model.embedding_dim 1.0 +175 92 negative_sampler.num_negs_per_pos 54.0 +175 92 training.batch_size 1.0 +175 93 model.embedding_dim 2.0 +175 93 negative_sampler.num_negs_per_pos 70.0 +175 93 training.batch_size 1.0 +175 94 model.embedding_dim 1.0 +175 94 negative_sampler.num_negs_per_pos 36.0 +175 94 training.batch_size 2.0 +175 95 model.embedding_dim 0.0 +175 95 negative_sampler.num_negs_per_pos 51.0 +175 95 training.batch_size 0.0 +175 96 model.embedding_dim 0.0 +175 96 negative_sampler.num_negs_per_pos 45.0 +175 96 training.batch_size 0.0 +175 97 model.embedding_dim 2.0 +175 97 negative_sampler.num_negs_per_pos 42.0 +175 97 training.batch_size 2.0 +175 98 model.embedding_dim 2.0 +175 98 negative_sampler.num_negs_per_pos 12.0 +175 98 training.batch_size 2.0 +175 99 model.embedding_dim 0.0 +175 99 negative_sampler.num_negs_per_pos 18.0 +175 99 training.batch_size 1.0 +175 100 model.embedding_dim 1.0 +175 100 negative_sampler.num_negs_per_pos 36.0 +175 100 training.batch_size 2.0 +175 1 dataset """kinships""" +175 1 model """distmult""" +175 1 loss """softplus""" +175 1 regularizer """no""" +175 1 optimizer """adadelta""" +175 1 training_loop """owa""" +175 1 negative_sampler """basic""" +175 1 evaluator """rankbased""" +175 2 dataset """kinships""" +175 2 model """distmult""" +175 2 loss """softplus""" +175 2 regularizer """no""" +175 2 optimizer """adadelta""" +175 2 training_loop """owa""" +175 2 negative_sampler """basic""" +175 2 evaluator """rankbased""" +175 3 dataset """kinships""" +175 3 model """distmult""" +175 3 loss """softplus""" +175 3 regularizer """no""" +175 3 optimizer """adadelta""" +175 3 training_loop """owa""" +175 3 negative_sampler """basic""" +175 3 evaluator """rankbased""" +175 4 dataset """kinships""" +175 4 model """distmult""" +175 4 loss """softplus""" +175 4 regularizer """no""" +175 4 optimizer """adadelta""" +175 4 training_loop """owa""" +175 4 negative_sampler """basic""" +175 4 evaluator """rankbased""" +175 5 dataset """kinships""" +175 5 model """distmult""" +175 5 loss """softplus""" +175 5 regularizer """no""" +175 5 optimizer """adadelta""" +175 5 training_loop """owa""" +175 5 negative_sampler """basic""" +175 5 evaluator """rankbased""" +175 6 dataset """kinships""" +175 6 model """distmult""" +175 6 loss """softplus""" +175 6 regularizer """no""" +175 6 optimizer """adadelta""" +175 6 training_loop """owa""" +175 6 negative_sampler """basic""" +175 6 evaluator """rankbased""" +175 7 dataset """kinships""" +175 7 model """distmult""" +175 7 loss """softplus""" +175 7 regularizer """no""" +175 7 optimizer """adadelta""" +175 7 training_loop """owa""" +175 7 negative_sampler """basic""" +175 7 evaluator """rankbased""" +175 8 dataset """kinships""" +175 8 model """distmult""" +175 8 loss """softplus""" +175 8 regularizer """no""" +175 8 optimizer """adadelta""" +175 8 training_loop """owa""" +175 8 negative_sampler """basic""" +175 8 evaluator """rankbased""" +175 9 dataset """kinships""" +175 9 model """distmult""" +175 9 loss """softplus""" +175 9 regularizer """no""" +175 9 optimizer """adadelta""" +175 9 training_loop """owa""" +175 9 negative_sampler """basic""" +175 9 evaluator """rankbased""" +175 10 dataset """kinships""" +175 10 model """distmult""" +175 10 loss """softplus""" +175 10 regularizer """no""" +175 10 optimizer """adadelta""" +175 10 training_loop """owa""" +175 10 negative_sampler """basic""" +175 10 evaluator """rankbased""" +175 11 dataset """kinships""" +175 11 model """distmult""" +175 11 loss """softplus""" +175 11 regularizer """no""" +175 11 optimizer """adadelta""" +175 11 training_loop """owa""" +175 11 negative_sampler """basic""" +175 11 evaluator """rankbased""" +175 12 dataset """kinships""" +175 12 model """distmult""" +175 12 loss """softplus""" +175 12 regularizer """no""" +175 12 optimizer """adadelta""" +175 12 training_loop """owa""" +175 12 negative_sampler """basic""" +175 12 evaluator """rankbased""" +175 13 dataset """kinships""" +175 13 model """distmult""" +175 13 loss """softplus""" +175 13 regularizer """no""" +175 13 optimizer """adadelta""" +175 13 training_loop """owa""" +175 13 negative_sampler """basic""" +175 13 evaluator """rankbased""" +175 14 dataset """kinships""" +175 14 model """distmult""" +175 14 loss """softplus""" +175 14 regularizer """no""" +175 14 optimizer """adadelta""" +175 14 training_loop """owa""" +175 14 negative_sampler """basic""" +175 14 evaluator """rankbased""" +175 15 dataset """kinships""" +175 15 model """distmult""" +175 15 loss """softplus""" +175 15 regularizer """no""" +175 15 optimizer """adadelta""" +175 15 training_loop """owa""" +175 15 negative_sampler """basic""" +175 15 evaluator """rankbased""" +175 16 dataset """kinships""" +175 16 model """distmult""" +175 16 loss """softplus""" +175 16 regularizer """no""" +175 16 optimizer """adadelta""" +175 16 training_loop """owa""" +175 16 negative_sampler """basic""" +175 16 evaluator """rankbased""" +175 17 dataset """kinships""" +175 17 model """distmult""" +175 17 loss """softplus""" +175 17 regularizer """no""" +175 17 optimizer """adadelta""" +175 17 training_loop """owa""" +175 17 negative_sampler """basic""" +175 17 evaluator """rankbased""" +175 18 dataset """kinships""" +175 18 model """distmult""" +175 18 loss """softplus""" +175 18 regularizer """no""" +175 18 optimizer """adadelta""" +175 18 training_loop """owa""" +175 18 negative_sampler """basic""" +175 18 evaluator """rankbased""" +175 19 dataset """kinships""" +175 19 model """distmult""" +175 19 loss """softplus""" +175 19 regularizer """no""" +175 19 optimizer """adadelta""" +175 19 training_loop """owa""" +175 19 negative_sampler """basic""" +175 19 evaluator """rankbased""" +175 20 dataset """kinships""" +175 20 model """distmult""" +175 20 loss """softplus""" +175 20 regularizer """no""" +175 20 optimizer """adadelta""" +175 20 training_loop """owa""" +175 20 negative_sampler """basic""" +175 20 evaluator """rankbased""" +175 21 dataset """kinships""" +175 21 model """distmult""" +175 21 loss """softplus""" +175 21 regularizer """no""" +175 21 optimizer """adadelta""" +175 21 training_loop """owa""" +175 21 negative_sampler """basic""" +175 21 evaluator """rankbased""" +175 22 dataset """kinships""" +175 22 model """distmult""" +175 22 loss """softplus""" +175 22 regularizer """no""" +175 22 optimizer """adadelta""" +175 22 training_loop """owa""" +175 22 negative_sampler """basic""" +175 22 evaluator """rankbased""" +175 23 dataset """kinships""" +175 23 model """distmult""" +175 23 loss """softplus""" +175 23 regularizer """no""" +175 23 optimizer """adadelta""" +175 23 training_loop """owa""" +175 23 negative_sampler """basic""" +175 23 evaluator """rankbased""" +175 24 dataset """kinships""" +175 24 model """distmult""" +175 24 loss """softplus""" +175 24 regularizer """no""" +175 24 optimizer """adadelta""" +175 24 training_loop """owa""" +175 24 negative_sampler """basic""" +175 24 evaluator """rankbased""" +175 25 dataset """kinships""" +175 25 model """distmult""" +175 25 loss """softplus""" +175 25 regularizer """no""" +175 25 optimizer """adadelta""" +175 25 training_loop """owa""" +175 25 negative_sampler """basic""" +175 25 evaluator """rankbased""" +175 26 dataset """kinships""" +175 26 model """distmult""" +175 26 loss """softplus""" +175 26 regularizer """no""" +175 26 optimizer """adadelta""" +175 26 training_loop """owa""" +175 26 negative_sampler """basic""" +175 26 evaluator """rankbased""" +175 27 dataset """kinships""" +175 27 model """distmult""" +175 27 loss """softplus""" +175 27 regularizer """no""" +175 27 optimizer """adadelta""" +175 27 training_loop """owa""" +175 27 negative_sampler """basic""" +175 27 evaluator """rankbased""" +175 28 dataset """kinships""" +175 28 model """distmult""" +175 28 loss """softplus""" +175 28 regularizer """no""" +175 28 optimizer """adadelta""" +175 28 training_loop """owa""" +175 28 negative_sampler """basic""" +175 28 evaluator """rankbased""" +175 29 dataset """kinships""" +175 29 model """distmult""" +175 29 loss """softplus""" +175 29 regularizer """no""" +175 29 optimizer """adadelta""" +175 29 training_loop """owa""" +175 29 negative_sampler """basic""" +175 29 evaluator """rankbased""" +175 30 dataset """kinships""" +175 30 model """distmult""" +175 30 loss """softplus""" +175 30 regularizer """no""" +175 30 optimizer """adadelta""" +175 30 training_loop """owa""" +175 30 negative_sampler """basic""" +175 30 evaluator """rankbased""" +175 31 dataset """kinships""" +175 31 model """distmult""" +175 31 loss """softplus""" +175 31 regularizer """no""" +175 31 optimizer """adadelta""" +175 31 training_loop """owa""" +175 31 negative_sampler """basic""" +175 31 evaluator """rankbased""" +175 32 dataset """kinships""" +175 32 model """distmult""" +175 32 loss """softplus""" +175 32 regularizer """no""" +175 32 optimizer """adadelta""" +175 32 training_loop """owa""" +175 32 negative_sampler """basic""" +175 32 evaluator """rankbased""" +175 33 dataset """kinships""" +175 33 model """distmult""" +175 33 loss """softplus""" +175 33 regularizer """no""" +175 33 optimizer """adadelta""" +175 33 training_loop """owa""" +175 33 negative_sampler """basic""" +175 33 evaluator """rankbased""" +175 34 dataset """kinships""" +175 34 model """distmult""" +175 34 loss """softplus""" +175 34 regularizer """no""" +175 34 optimizer """adadelta""" +175 34 training_loop """owa""" +175 34 negative_sampler """basic""" +175 34 evaluator """rankbased""" +175 35 dataset """kinships""" +175 35 model """distmult""" +175 35 loss """softplus""" +175 35 regularizer """no""" +175 35 optimizer """adadelta""" +175 35 training_loop """owa""" +175 35 negative_sampler """basic""" +175 35 evaluator """rankbased""" +175 36 dataset """kinships""" +175 36 model """distmult""" +175 36 loss """softplus""" +175 36 regularizer """no""" +175 36 optimizer """adadelta""" +175 36 training_loop """owa""" +175 36 negative_sampler """basic""" +175 36 evaluator """rankbased""" +175 37 dataset """kinships""" +175 37 model """distmult""" +175 37 loss """softplus""" +175 37 regularizer """no""" +175 37 optimizer """adadelta""" +175 37 training_loop """owa""" +175 37 negative_sampler """basic""" +175 37 evaluator """rankbased""" +175 38 dataset """kinships""" +175 38 model """distmult""" +175 38 loss """softplus""" +175 38 regularizer """no""" +175 38 optimizer """adadelta""" +175 38 training_loop """owa""" +175 38 negative_sampler """basic""" +175 38 evaluator """rankbased""" +175 39 dataset """kinships""" +175 39 model """distmult""" +175 39 loss """softplus""" +175 39 regularizer """no""" +175 39 optimizer """adadelta""" +175 39 training_loop """owa""" +175 39 negative_sampler """basic""" +175 39 evaluator """rankbased""" +175 40 dataset """kinships""" +175 40 model """distmult""" +175 40 loss """softplus""" +175 40 regularizer """no""" +175 40 optimizer """adadelta""" +175 40 training_loop """owa""" +175 40 negative_sampler """basic""" +175 40 evaluator """rankbased""" +175 41 dataset """kinships""" +175 41 model """distmult""" +175 41 loss """softplus""" +175 41 regularizer """no""" +175 41 optimizer """adadelta""" +175 41 training_loop """owa""" +175 41 negative_sampler """basic""" +175 41 evaluator """rankbased""" +175 42 dataset """kinships""" +175 42 model """distmult""" +175 42 loss """softplus""" +175 42 regularizer """no""" +175 42 optimizer """adadelta""" +175 42 training_loop """owa""" +175 42 negative_sampler """basic""" +175 42 evaluator """rankbased""" +175 43 dataset """kinships""" +175 43 model """distmult""" +175 43 loss """softplus""" +175 43 regularizer """no""" +175 43 optimizer """adadelta""" +175 43 training_loop """owa""" +175 43 negative_sampler """basic""" +175 43 evaluator """rankbased""" +175 44 dataset """kinships""" +175 44 model """distmult""" +175 44 loss """softplus""" +175 44 regularizer """no""" +175 44 optimizer """adadelta""" +175 44 training_loop """owa""" +175 44 negative_sampler """basic""" +175 44 evaluator """rankbased""" +175 45 dataset """kinships""" +175 45 model """distmult""" +175 45 loss """softplus""" +175 45 regularizer """no""" +175 45 optimizer """adadelta""" +175 45 training_loop """owa""" +175 45 negative_sampler """basic""" +175 45 evaluator """rankbased""" +175 46 dataset """kinships""" +175 46 model """distmult""" +175 46 loss """softplus""" +175 46 regularizer """no""" +175 46 optimizer """adadelta""" +175 46 training_loop """owa""" +175 46 negative_sampler """basic""" +175 46 evaluator """rankbased""" +175 47 dataset """kinships""" +175 47 model """distmult""" +175 47 loss """softplus""" +175 47 regularizer """no""" +175 47 optimizer """adadelta""" +175 47 training_loop """owa""" +175 47 negative_sampler """basic""" +175 47 evaluator """rankbased""" +175 48 dataset """kinships""" +175 48 model """distmult""" +175 48 loss """softplus""" +175 48 regularizer """no""" +175 48 optimizer """adadelta""" +175 48 training_loop """owa""" +175 48 negative_sampler """basic""" +175 48 evaluator """rankbased""" +175 49 dataset """kinships""" +175 49 model """distmult""" +175 49 loss """softplus""" +175 49 regularizer """no""" +175 49 optimizer """adadelta""" +175 49 training_loop """owa""" +175 49 negative_sampler """basic""" +175 49 evaluator """rankbased""" +175 50 dataset """kinships""" +175 50 model """distmult""" +175 50 loss """softplus""" +175 50 regularizer """no""" +175 50 optimizer """adadelta""" +175 50 training_loop """owa""" +175 50 negative_sampler """basic""" +175 50 evaluator """rankbased""" +175 51 dataset """kinships""" +175 51 model """distmult""" +175 51 loss """softplus""" +175 51 regularizer """no""" +175 51 optimizer """adadelta""" +175 51 training_loop """owa""" +175 51 negative_sampler """basic""" +175 51 evaluator """rankbased""" +175 52 dataset """kinships""" +175 52 model """distmult""" +175 52 loss """softplus""" +175 52 regularizer """no""" +175 52 optimizer """adadelta""" +175 52 training_loop """owa""" +175 52 negative_sampler """basic""" +175 52 evaluator """rankbased""" +175 53 dataset """kinships""" +175 53 model """distmult""" +175 53 loss """softplus""" +175 53 regularizer """no""" +175 53 optimizer """adadelta""" +175 53 training_loop """owa""" +175 53 negative_sampler """basic""" +175 53 evaluator """rankbased""" +175 54 dataset """kinships""" +175 54 model """distmult""" +175 54 loss """softplus""" +175 54 regularizer """no""" +175 54 optimizer """adadelta""" +175 54 training_loop """owa""" +175 54 negative_sampler """basic""" +175 54 evaluator """rankbased""" +175 55 dataset """kinships""" +175 55 model """distmult""" +175 55 loss """softplus""" +175 55 regularizer """no""" +175 55 optimizer """adadelta""" +175 55 training_loop """owa""" +175 55 negative_sampler """basic""" +175 55 evaluator """rankbased""" +175 56 dataset """kinships""" +175 56 model """distmult""" +175 56 loss """softplus""" +175 56 regularizer """no""" +175 56 optimizer """adadelta""" +175 56 training_loop """owa""" +175 56 negative_sampler """basic""" +175 56 evaluator """rankbased""" +175 57 dataset """kinships""" +175 57 model """distmult""" +175 57 loss """softplus""" +175 57 regularizer """no""" +175 57 optimizer """adadelta""" +175 57 training_loop """owa""" +175 57 negative_sampler """basic""" +175 57 evaluator """rankbased""" +175 58 dataset """kinships""" +175 58 model """distmult""" +175 58 loss """softplus""" +175 58 regularizer """no""" +175 58 optimizer """adadelta""" +175 58 training_loop """owa""" +175 58 negative_sampler """basic""" +175 58 evaluator """rankbased""" +175 59 dataset """kinships""" +175 59 model """distmult""" +175 59 loss """softplus""" +175 59 regularizer """no""" +175 59 optimizer """adadelta""" +175 59 training_loop """owa""" +175 59 negative_sampler """basic""" +175 59 evaluator """rankbased""" +175 60 dataset """kinships""" +175 60 model """distmult""" +175 60 loss """softplus""" +175 60 regularizer """no""" +175 60 optimizer """adadelta""" +175 60 training_loop """owa""" +175 60 negative_sampler """basic""" +175 60 evaluator """rankbased""" +175 61 dataset """kinships""" +175 61 model """distmult""" +175 61 loss """softplus""" +175 61 regularizer """no""" +175 61 optimizer """adadelta""" +175 61 training_loop """owa""" +175 61 negative_sampler """basic""" +175 61 evaluator """rankbased""" +175 62 dataset """kinships""" +175 62 model """distmult""" +175 62 loss """softplus""" +175 62 regularizer """no""" +175 62 optimizer """adadelta""" +175 62 training_loop """owa""" +175 62 negative_sampler """basic""" +175 62 evaluator """rankbased""" +175 63 dataset """kinships""" +175 63 model """distmult""" +175 63 loss """softplus""" +175 63 regularizer """no""" +175 63 optimizer """adadelta""" +175 63 training_loop """owa""" +175 63 negative_sampler """basic""" +175 63 evaluator """rankbased""" +175 64 dataset """kinships""" +175 64 model """distmult""" +175 64 loss """softplus""" +175 64 regularizer """no""" +175 64 optimizer """adadelta""" +175 64 training_loop """owa""" +175 64 negative_sampler """basic""" +175 64 evaluator """rankbased""" +175 65 dataset """kinships""" +175 65 model """distmult""" +175 65 loss """softplus""" +175 65 regularizer """no""" +175 65 optimizer """adadelta""" +175 65 training_loop """owa""" +175 65 negative_sampler """basic""" +175 65 evaluator """rankbased""" +175 66 dataset """kinships""" +175 66 model """distmult""" +175 66 loss """softplus""" +175 66 regularizer """no""" +175 66 optimizer """adadelta""" +175 66 training_loop """owa""" +175 66 negative_sampler """basic""" +175 66 evaluator """rankbased""" +175 67 dataset """kinships""" +175 67 model """distmult""" +175 67 loss """softplus""" +175 67 regularizer """no""" +175 67 optimizer """adadelta""" +175 67 training_loop """owa""" +175 67 negative_sampler """basic""" +175 67 evaluator """rankbased""" +175 68 dataset """kinships""" +175 68 model """distmult""" +175 68 loss """softplus""" +175 68 regularizer """no""" +175 68 optimizer """adadelta""" +175 68 training_loop """owa""" +175 68 negative_sampler """basic""" +175 68 evaluator """rankbased""" +175 69 dataset """kinships""" +175 69 model """distmult""" +175 69 loss """softplus""" +175 69 regularizer """no""" +175 69 optimizer """adadelta""" +175 69 training_loop """owa""" +175 69 negative_sampler """basic""" +175 69 evaluator """rankbased""" +175 70 dataset """kinships""" +175 70 model """distmult""" +175 70 loss """softplus""" +175 70 regularizer """no""" +175 70 optimizer """adadelta""" +175 70 training_loop """owa""" +175 70 negative_sampler """basic""" +175 70 evaluator """rankbased""" +175 71 dataset """kinships""" +175 71 model """distmult""" +175 71 loss """softplus""" +175 71 regularizer """no""" +175 71 optimizer """adadelta""" +175 71 training_loop """owa""" +175 71 negative_sampler """basic""" +175 71 evaluator """rankbased""" +175 72 dataset """kinships""" +175 72 model """distmult""" +175 72 loss """softplus""" +175 72 regularizer """no""" +175 72 optimizer """adadelta""" +175 72 training_loop """owa""" +175 72 negative_sampler """basic""" +175 72 evaluator """rankbased""" +175 73 dataset """kinships""" +175 73 model """distmult""" +175 73 loss """softplus""" +175 73 regularizer """no""" +175 73 optimizer """adadelta""" +175 73 training_loop """owa""" +175 73 negative_sampler """basic""" +175 73 evaluator """rankbased""" +175 74 dataset """kinships""" +175 74 model """distmult""" +175 74 loss """softplus""" +175 74 regularizer """no""" +175 74 optimizer """adadelta""" +175 74 training_loop """owa""" +175 74 negative_sampler """basic""" +175 74 evaluator """rankbased""" +175 75 dataset """kinships""" +175 75 model """distmult""" +175 75 loss """softplus""" +175 75 regularizer """no""" +175 75 optimizer """adadelta""" +175 75 training_loop """owa""" +175 75 negative_sampler """basic""" +175 75 evaluator """rankbased""" +175 76 dataset """kinships""" +175 76 model """distmult""" +175 76 loss """softplus""" +175 76 regularizer """no""" +175 76 optimizer """adadelta""" +175 76 training_loop """owa""" +175 76 negative_sampler """basic""" +175 76 evaluator """rankbased""" +175 77 dataset """kinships""" +175 77 model """distmult""" +175 77 loss """softplus""" +175 77 regularizer """no""" +175 77 optimizer """adadelta""" +175 77 training_loop """owa""" +175 77 negative_sampler """basic""" +175 77 evaluator """rankbased""" +175 78 dataset """kinships""" +175 78 model """distmult""" +175 78 loss """softplus""" +175 78 regularizer """no""" +175 78 optimizer """adadelta""" +175 78 training_loop """owa""" +175 78 negative_sampler """basic""" +175 78 evaluator """rankbased""" +175 79 dataset """kinships""" +175 79 model """distmult""" +175 79 loss """softplus""" +175 79 regularizer """no""" +175 79 optimizer """adadelta""" +175 79 training_loop """owa""" +175 79 negative_sampler """basic""" +175 79 evaluator """rankbased""" +175 80 dataset """kinships""" +175 80 model """distmult""" +175 80 loss """softplus""" +175 80 regularizer """no""" +175 80 optimizer """adadelta""" +175 80 training_loop """owa""" +175 80 negative_sampler """basic""" +175 80 evaluator """rankbased""" +175 81 dataset """kinships""" +175 81 model """distmult""" +175 81 loss """softplus""" +175 81 regularizer """no""" +175 81 optimizer """adadelta""" +175 81 training_loop """owa""" +175 81 negative_sampler """basic""" +175 81 evaluator """rankbased""" +175 82 dataset """kinships""" +175 82 model """distmult""" +175 82 loss """softplus""" +175 82 regularizer """no""" +175 82 optimizer """adadelta""" +175 82 training_loop """owa""" +175 82 negative_sampler """basic""" +175 82 evaluator """rankbased""" +175 83 dataset """kinships""" +175 83 model """distmult""" +175 83 loss """softplus""" +175 83 regularizer """no""" +175 83 optimizer """adadelta""" +175 83 training_loop """owa""" +175 83 negative_sampler """basic""" +175 83 evaluator """rankbased""" +175 84 dataset """kinships""" +175 84 model """distmult""" +175 84 loss """softplus""" +175 84 regularizer """no""" +175 84 optimizer """adadelta""" +175 84 training_loop """owa""" +175 84 negative_sampler """basic""" +175 84 evaluator """rankbased""" +175 85 dataset """kinships""" +175 85 model """distmult""" +175 85 loss """softplus""" +175 85 regularizer """no""" +175 85 optimizer """adadelta""" +175 85 training_loop """owa""" +175 85 negative_sampler """basic""" +175 85 evaluator """rankbased""" +175 86 dataset """kinships""" +175 86 model """distmult""" +175 86 loss """softplus""" +175 86 regularizer """no""" +175 86 optimizer """adadelta""" +175 86 training_loop """owa""" +175 86 negative_sampler """basic""" +175 86 evaluator """rankbased""" +175 87 dataset """kinships""" +175 87 model """distmult""" +175 87 loss """softplus""" +175 87 regularizer """no""" +175 87 optimizer """adadelta""" +175 87 training_loop """owa""" +175 87 negative_sampler """basic""" +175 87 evaluator """rankbased""" +175 88 dataset """kinships""" +175 88 model """distmult""" +175 88 loss """softplus""" +175 88 regularizer """no""" +175 88 optimizer """adadelta""" +175 88 training_loop """owa""" +175 88 negative_sampler """basic""" +175 88 evaluator """rankbased""" +175 89 dataset """kinships""" +175 89 model """distmult""" +175 89 loss """softplus""" +175 89 regularizer """no""" +175 89 optimizer """adadelta""" +175 89 training_loop """owa""" +175 89 negative_sampler """basic""" +175 89 evaluator """rankbased""" +175 90 dataset """kinships""" +175 90 model """distmult""" +175 90 loss """softplus""" +175 90 regularizer """no""" +175 90 optimizer """adadelta""" +175 90 training_loop """owa""" +175 90 negative_sampler """basic""" +175 90 evaluator """rankbased""" +175 91 dataset """kinships""" +175 91 model """distmult""" +175 91 loss """softplus""" +175 91 regularizer """no""" +175 91 optimizer """adadelta""" +175 91 training_loop """owa""" +175 91 negative_sampler """basic""" +175 91 evaluator """rankbased""" +175 92 dataset """kinships""" +175 92 model """distmult""" +175 92 loss """softplus""" +175 92 regularizer """no""" +175 92 optimizer """adadelta""" +175 92 training_loop """owa""" +175 92 negative_sampler """basic""" +175 92 evaluator """rankbased""" +175 93 dataset """kinships""" +175 93 model """distmult""" +175 93 loss """softplus""" +175 93 regularizer """no""" +175 93 optimizer """adadelta""" +175 93 training_loop """owa""" +175 93 negative_sampler """basic""" +175 93 evaluator """rankbased""" +175 94 dataset """kinships""" +175 94 model """distmult""" +175 94 loss """softplus""" +175 94 regularizer """no""" +175 94 optimizer """adadelta""" +175 94 training_loop """owa""" +175 94 negative_sampler """basic""" +175 94 evaluator """rankbased""" +175 95 dataset """kinships""" +175 95 model """distmult""" +175 95 loss """softplus""" +175 95 regularizer """no""" +175 95 optimizer """adadelta""" +175 95 training_loop """owa""" +175 95 negative_sampler """basic""" +175 95 evaluator """rankbased""" +175 96 dataset """kinships""" +175 96 model """distmult""" +175 96 loss """softplus""" +175 96 regularizer """no""" +175 96 optimizer """adadelta""" +175 96 training_loop """owa""" +175 96 negative_sampler """basic""" +175 96 evaluator """rankbased""" +175 97 dataset """kinships""" +175 97 model """distmult""" +175 97 loss """softplus""" +175 97 regularizer """no""" +175 97 optimizer """adadelta""" +175 97 training_loop """owa""" +175 97 negative_sampler """basic""" +175 97 evaluator """rankbased""" +175 98 dataset """kinships""" +175 98 model """distmult""" +175 98 loss """softplus""" +175 98 regularizer """no""" +175 98 optimizer """adadelta""" +175 98 training_loop """owa""" +175 98 negative_sampler """basic""" +175 98 evaluator """rankbased""" +175 99 dataset """kinships""" +175 99 model """distmult""" +175 99 loss """softplus""" +175 99 regularizer """no""" +175 99 optimizer """adadelta""" +175 99 training_loop """owa""" +175 99 negative_sampler """basic""" +175 99 evaluator """rankbased""" +175 100 dataset """kinships""" +175 100 model """distmult""" +175 100 loss """softplus""" +175 100 regularizer """no""" +175 100 optimizer """adadelta""" +175 100 training_loop """owa""" +175 100 negative_sampler """basic""" +175 100 evaluator """rankbased""" +176 1 model.embedding_dim 0.0 +176 1 negative_sampler.num_negs_per_pos 65.0 +176 1 training.batch_size 2.0 +176 2 model.embedding_dim 1.0 +176 2 negative_sampler.num_negs_per_pos 78.0 +176 2 training.batch_size 0.0 +176 3 model.embedding_dim 0.0 +176 3 negative_sampler.num_negs_per_pos 42.0 +176 3 training.batch_size 2.0 +176 4 model.embedding_dim 1.0 +176 4 negative_sampler.num_negs_per_pos 2.0 +176 4 training.batch_size 1.0 +176 5 model.embedding_dim 2.0 +176 5 negative_sampler.num_negs_per_pos 79.0 +176 5 training.batch_size 0.0 +176 6 model.embedding_dim 0.0 +176 6 negative_sampler.num_negs_per_pos 86.0 +176 6 training.batch_size 2.0 +176 7 model.embedding_dim 2.0 +176 7 negative_sampler.num_negs_per_pos 20.0 +176 7 training.batch_size 1.0 +176 8 model.embedding_dim 2.0 +176 8 negative_sampler.num_negs_per_pos 73.0 +176 8 training.batch_size 1.0 +176 9 model.embedding_dim 0.0 +176 9 negative_sampler.num_negs_per_pos 77.0 +176 9 training.batch_size 0.0 +176 10 model.embedding_dim 1.0 +176 10 negative_sampler.num_negs_per_pos 73.0 +176 10 training.batch_size 0.0 +176 11 model.embedding_dim 1.0 +176 11 negative_sampler.num_negs_per_pos 15.0 +176 11 training.batch_size 1.0 +176 12 model.embedding_dim 1.0 +176 12 negative_sampler.num_negs_per_pos 83.0 +176 12 training.batch_size 2.0 +176 13 model.embedding_dim 2.0 +176 13 negative_sampler.num_negs_per_pos 4.0 +176 13 training.batch_size 1.0 +176 14 model.embedding_dim 1.0 +176 14 negative_sampler.num_negs_per_pos 80.0 +176 14 training.batch_size 1.0 +176 15 model.embedding_dim 0.0 +176 15 negative_sampler.num_negs_per_pos 36.0 +176 15 training.batch_size 2.0 +176 16 model.embedding_dim 2.0 +176 16 negative_sampler.num_negs_per_pos 95.0 +176 16 training.batch_size 2.0 +176 17 model.embedding_dim 1.0 +176 17 negative_sampler.num_negs_per_pos 50.0 +176 17 training.batch_size 0.0 +176 18 model.embedding_dim 0.0 +176 18 negative_sampler.num_negs_per_pos 35.0 +176 18 training.batch_size 1.0 +176 19 model.embedding_dim 0.0 +176 19 negative_sampler.num_negs_per_pos 90.0 +176 19 training.batch_size 2.0 +176 20 model.embedding_dim 0.0 +176 20 negative_sampler.num_negs_per_pos 21.0 +176 20 training.batch_size 0.0 +176 21 model.embedding_dim 1.0 +176 21 negative_sampler.num_negs_per_pos 90.0 +176 21 training.batch_size 1.0 +176 22 model.embedding_dim 1.0 +176 22 negative_sampler.num_negs_per_pos 83.0 +176 22 training.batch_size 1.0 +176 23 model.embedding_dim 1.0 +176 23 negative_sampler.num_negs_per_pos 52.0 +176 23 training.batch_size 2.0 +176 24 model.embedding_dim 2.0 +176 24 negative_sampler.num_negs_per_pos 66.0 +176 24 training.batch_size 0.0 +176 25 model.embedding_dim 1.0 +176 25 negative_sampler.num_negs_per_pos 49.0 +176 25 training.batch_size 1.0 +176 26 model.embedding_dim 0.0 +176 26 negative_sampler.num_negs_per_pos 16.0 +176 26 training.batch_size 1.0 +176 27 model.embedding_dim 1.0 +176 27 negative_sampler.num_negs_per_pos 3.0 +176 27 training.batch_size 0.0 +176 28 model.embedding_dim 0.0 +176 28 negative_sampler.num_negs_per_pos 66.0 +176 28 training.batch_size 2.0 +176 29 model.embedding_dim 0.0 +176 29 negative_sampler.num_negs_per_pos 60.0 +176 29 training.batch_size 2.0 +176 30 model.embedding_dim 2.0 +176 30 negative_sampler.num_negs_per_pos 18.0 +176 30 training.batch_size 2.0 +176 31 model.embedding_dim 2.0 +176 31 negative_sampler.num_negs_per_pos 7.0 +176 31 training.batch_size 1.0 +176 32 model.embedding_dim 0.0 +176 32 negative_sampler.num_negs_per_pos 61.0 +176 32 training.batch_size 2.0 +176 33 model.embedding_dim 0.0 +176 33 negative_sampler.num_negs_per_pos 36.0 +176 33 training.batch_size 1.0 +176 34 model.embedding_dim 2.0 +176 34 negative_sampler.num_negs_per_pos 18.0 +176 34 training.batch_size 1.0 +176 35 model.embedding_dim 0.0 +176 35 negative_sampler.num_negs_per_pos 51.0 +176 35 training.batch_size 1.0 +176 36 model.embedding_dim 0.0 +176 36 negative_sampler.num_negs_per_pos 98.0 +176 36 training.batch_size 1.0 +176 37 model.embedding_dim 2.0 +176 37 negative_sampler.num_negs_per_pos 44.0 +176 37 training.batch_size 0.0 +176 38 model.embedding_dim 0.0 +176 38 negative_sampler.num_negs_per_pos 87.0 +176 38 training.batch_size 0.0 +176 39 model.embedding_dim 1.0 +176 39 negative_sampler.num_negs_per_pos 77.0 +176 39 training.batch_size 0.0 +176 40 model.embedding_dim 1.0 +176 40 negative_sampler.num_negs_per_pos 23.0 +176 40 training.batch_size 1.0 +176 41 model.embedding_dim 2.0 +176 41 negative_sampler.num_negs_per_pos 88.0 +176 41 training.batch_size 2.0 +176 42 model.embedding_dim 0.0 +176 42 negative_sampler.num_negs_per_pos 3.0 +176 42 training.batch_size 1.0 +176 43 model.embedding_dim 2.0 +176 43 negative_sampler.num_negs_per_pos 69.0 +176 43 training.batch_size 1.0 +176 44 model.embedding_dim 2.0 +176 44 negative_sampler.num_negs_per_pos 36.0 +176 44 training.batch_size 1.0 +176 45 model.embedding_dim 0.0 +176 45 negative_sampler.num_negs_per_pos 0.0 +176 45 training.batch_size 2.0 +176 46 model.embedding_dim 1.0 +176 46 negative_sampler.num_negs_per_pos 52.0 +176 46 training.batch_size 2.0 +176 47 model.embedding_dim 0.0 +176 47 negative_sampler.num_negs_per_pos 72.0 +176 47 training.batch_size 2.0 +176 48 model.embedding_dim 0.0 +176 48 negative_sampler.num_negs_per_pos 39.0 +176 48 training.batch_size 2.0 +176 49 model.embedding_dim 2.0 +176 49 negative_sampler.num_negs_per_pos 97.0 +176 49 training.batch_size 1.0 +176 50 model.embedding_dim 2.0 +176 50 negative_sampler.num_negs_per_pos 92.0 +176 50 training.batch_size 1.0 +176 51 model.embedding_dim 0.0 +176 51 negative_sampler.num_negs_per_pos 74.0 +176 51 training.batch_size 2.0 +176 52 model.embedding_dim 0.0 +176 52 negative_sampler.num_negs_per_pos 44.0 +176 52 training.batch_size 1.0 +176 53 model.embedding_dim 1.0 +176 53 negative_sampler.num_negs_per_pos 86.0 +176 53 training.batch_size 0.0 +176 54 model.embedding_dim 2.0 +176 54 negative_sampler.num_negs_per_pos 80.0 +176 54 training.batch_size 0.0 +176 55 model.embedding_dim 1.0 +176 55 negative_sampler.num_negs_per_pos 53.0 +176 55 training.batch_size 1.0 +176 56 model.embedding_dim 1.0 +176 56 negative_sampler.num_negs_per_pos 69.0 +176 56 training.batch_size 0.0 +176 57 model.embedding_dim 1.0 +176 57 negative_sampler.num_negs_per_pos 88.0 +176 57 training.batch_size 0.0 +176 58 model.embedding_dim 1.0 +176 58 negative_sampler.num_negs_per_pos 64.0 +176 58 training.batch_size 0.0 +176 59 model.embedding_dim 0.0 +176 59 negative_sampler.num_negs_per_pos 0.0 +176 59 training.batch_size 0.0 +176 60 model.embedding_dim 1.0 +176 60 negative_sampler.num_negs_per_pos 73.0 +176 60 training.batch_size 1.0 +176 61 model.embedding_dim 0.0 +176 61 negative_sampler.num_negs_per_pos 33.0 +176 61 training.batch_size 2.0 +176 62 model.embedding_dim 0.0 +176 62 negative_sampler.num_negs_per_pos 11.0 +176 62 training.batch_size 0.0 +176 63 model.embedding_dim 0.0 +176 63 negative_sampler.num_negs_per_pos 76.0 +176 63 training.batch_size 0.0 +176 64 model.embedding_dim 1.0 +176 64 negative_sampler.num_negs_per_pos 60.0 +176 64 training.batch_size 2.0 +176 65 model.embedding_dim 2.0 +176 65 negative_sampler.num_negs_per_pos 52.0 +176 65 training.batch_size 1.0 +176 66 model.embedding_dim 2.0 +176 66 negative_sampler.num_negs_per_pos 60.0 +176 66 training.batch_size 2.0 +176 67 model.embedding_dim 1.0 +176 67 negative_sampler.num_negs_per_pos 88.0 +176 67 training.batch_size 2.0 +176 68 model.embedding_dim 1.0 +176 68 negative_sampler.num_negs_per_pos 15.0 +176 68 training.batch_size 1.0 +176 69 model.embedding_dim 1.0 +176 69 negative_sampler.num_negs_per_pos 20.0 +176 69 training.batch_size 1.0 +176 70 model.embedding_dim 0.0 +176 70 negative_sampler.num_negs_per_pos 19.0 +176 70 training.batch_size 2.0 +176 71 model.embedding_dim 2.0 +176 71 negative_sampler.num_negs_per_pos 77.0 +176 71 training.batch_size 1.0 +176 72 model.embedding_dim 2.0 +176 72 negative_sampler.num_negs_per_pos 48.0 +176 72 training.batch_size 2.0 +176 73 model.embedding_dim 0.0 +176 73 negative_sampler.num_negs_per_pos 87.0 +176 73 training.batch_size 1.0 +176 74 model.embedding_dim 0.0 +176 74 negative_sampler.num_negs_per_pos 85.0 +176 74 training.batch_size 0.0 +176 75 model.embedding_dim 1.0 +176 75 negative_sampler.num_negs_per_pos 56.0 +176 75 training.batch_size 1.0 +176 76 model.embedding_dim 1.0 +176 76 negative_sampler.num_negs_per_pos 66.0 +176 76 training.batch_size 2.0 +176 77 model.embedding_dim 1.0 +176 77 negative_sampler.num_negs_per_pos 92.0 +176 77 training.batch_size 2.0 +176 78 model.embedding_dim 2.0 +176 78 negative_sampler.num_negs_per_pos 58.0 +176 78 training.batch_size 1.0 +176 79 model.embedding_dim 2.0 +176 79 negative_sampler.num_negs_per_pos 88.0 +176 79 training.batch_size 1.0 +176 80 model.embedding_dim 0.0 +176 80 negative_sampler.num_negs_per_pos 22.0 +176 80 training.batch_size 1.0 +176 81 model.embedding_dim 2.0 +176 81 negative_sampler.num_negs_per_pos 18.0 +176 81 training.batch_size 1.0 +176 82 model.embedding_dim 2.0 +176 82 negative_sampler.num_negs_per_pos 26.0 +176 82 training.batch_size 2.0 +176 83 model.embedding_dim 0.0 +176 83 negative_sampler.num_negs_per_pos 60.0 +176 83 training.batch_size 2.0 +176 84 model.embedding_dim 2.0 +176 84 negative_sampler.num_negs_per_pos 59.0 +176 84 training.batch_size 1.0 +176 85 model.embedding_dim 2.0 +176 85 negative_sampler.num_negs_per_pos 18.0 +176 85 training.batch_size 2.0 +176 86 model.embedding_dim 2.0 +176 86 negative_sampler.num_negs_per_pos 83.0 +176 86 training.batch_size 0.0 +176 87 model.embedding_dim 0.0 +176 87 negative_sampler.num_negs_per_pos 48.0 +176 87 training.batch_size 0.0 +176 88 model.embedding_dim 0.0 +176 88 negative_sampler.num_negs_per_pos 87.0 +176 88 training.batch_size 0.0 +176 89 model.embedding_dim 1.0 +176 89 negative_sampler.num_negs_per_pos 14.0 +176 89 training.batch_size 1.0 +176 90 model.embedding_dim 1.0 +176 90 negative_sampler.num_negs_per_pos 97.0 +176 90 training.batch_size 1.0 +176 91 model.embedding_dim 0.0 +176 91 negative_sampler.num_negs_per_pos 68.0 +176 91 training.batch_size 2.0 +176 92 model.embedding_dim 2.0 +176 92 negative_sampler.num_negs_per_pos 97.0 +176 92 training.batch_size 1.0 +176 93 model.embedding_dim 2.0 +176 93 negative_sampler.num_negs_per_pos 17.0 +176 93 training.batch_size 1.0 +176 94 model.embedding_dim 1.0 +176 94 negative_sampler.num_negs_per_pos 14.0 +176 94 training.batch_size 0.0 +176 95 model.embedding_dim 0.0 +176 95 negative_sampler.num_negs_per_pos 70.0 +176 95 training.batch_size 0.0 +176 96 model.embedding_dim 2.0 +176 96 negative_sampler.num_negs_per_pos 79.0 +176 96 training.batch_size 1.0 +176 97 model.embedding_dim 1.0 +176 97 negative_sampler.num_negs_per_pos 47.0 +176 97 training.batch_size 2.0 +176 98 model.embedding_dim 2.0 +176 98 negative_sampler.num_negs_per_pos 21.0 +176 98 training.batch_size 1.0 +176 99 model.embedding_dim 2.0 +176 99 negative_sampler.num_negs_per_pos 97.0 +176 99 training.batch_size 0.0 +176 100 model.embedding_dim 0.0 +176 100 negative_sampler.num_negs_per_pos 68.0 +176 100 training.batch_size 2.0 +176 1 dataset """kinships""" +176 1 model """distmult""" +176 1 loss """bceaftersigmoid""" +176 1 regularizer """no""" +176 1 optimizer """adadelta""" +176 1 training_loop """owa""" +176 1 negative_sampler """basic""" +176 1 evaluator """rankbased""" +176 2 dataset """kinships""" +176 2 model """distmult""" +176 2 loss """bceaftersigmoid""" +176 2 regularizer """no""" +176 2 optimizer """adadelta""" +176 2 training_loop """owa""" +176 2 negative_sampler """basic""" +176 2 evaluator """rankbased""" +176 3 dataset """kinships""" +176 3 model """distmult""" +176 3 loss """bceaftersigmoid""" +176 3 regularizer """no""" +176 3 optimizer """adadelta""" +176 3 training_loop """owa""" +176 3 negative_sampler """basic""" +176 3 evaluator """rankbased""" +176 4 dataset """kinships""" +176 4 model """distmult""" +176 4 loss """bceaftersigmoid""" +176 4 regularizer """no""" +176 4 optimizer """adadelta""" +176 4 training_loop """owa""" +176 4 negative_sampler """basic""" +176 4 evaluator """rankbased""" +176 5 dataset """kinships""" +176 5 model """distmult""" +176 5 loss """bceaftersigmoid""" +176 5 regularizer """no""" +176 5 optimizer """adadelta""" +176 5 training_loop """owa""" +176 5 negative_sampler """basic""" +176 5 evaluator """rankbased""" +176 6 dataset """kinships""" +176 6 model """distmult""" +176 6 loss """bceaftersigmoid""" +176 6 regularizer """no""" +176 6 optimizer """adadelta""" +176 6 training_loop """owa""" +176 6 negative_sampler """basic""" +176 6 evaluator """rankbased""" +176 7 dataset """kinships""" +176 7 model """distmult""" +176 7 loss """bceaftersigmoid""" +176 7 regularizer """no""" +176 7 optimizer """adadelta""" +176 7 training_loop """owa""" +176 7 negative_sampler """basic""" +176 7 evaluator """rankbased""" +176 8 dataset """kinships""" +176 8 model """distmult""" +176 8 loss """bceaftersigmoid""" +176 8 regularizer """no""" +176 8 optimizer """adadelta""" +176 8 training_loop """owa""" +176 8 negative_sampler """basic""" +176 8 evaluator """rankbased""" +176 9 dataset """kinships""" +176 9 model """distmult""" +176 9 loss """bceaftersigmoid""" +176 9 regularizer """no""" +176 9 optimizer """adadelta""" +176 9 training_loop """owa""" +176 9 negative_sampler """basic""" +176 9 evaluator """rankbased""" +176 10 dataset """kinships""" +176 10 model """distmult""" +176 10 loss """bceaftersigmoid""" +176 10 regularizer """no""" +176 10 optimizer """adadelta""" +176 10 training_loop """owa""" +176 10 negative_sampler """basic""" +176 10 evaluator """rankbased""" +176 11 dataset """kinships""" +176 11 model """distmult""" +176 11 loss """bceaftersigmoid""" +176 11 regularizer """no""" +176 11 optimizer """adadelta""" +176 11 training_loop """owa""" +176 11 negative_sampler """basic""" +176 11 evaluator """rankbased""" +176 12 dataset """kinships""" +176 12 model """distmult""" +176 12 loss """bceaftersigmoid""" +176 12 regularizer """no""" +176 12 optimizer """adadelta""" +176 12 training_loop """owa""" +176 12 negative_sampler """basic""" +176 12 evaluator """rankbased""" +176 13 dataset """kinships""" +176 13 model """distmult""" +176 13 loss """bceaftersigmoid""" +176 13 regularizer """no""" +176 13 optimizer """adadelta""" +176 13 training_loop """owa""" +176 13 negative_sampler """basic""" +176 13 evaluator """rankbased""" +176 14 dataset """kinships""" +176 14 model """distmult""" +176 14 loss """bceaftersigmoid""" +176 14 regularizer """no""" +176 14 optimizer """adadelta""" +176 14 training_loop """owa""" +176 14 negative_sampler """basic""" +176 14 evaluator """rankbased""" +176 15 dataset """kinships""" +176 15 model """distmult""" +176 15 loss """bceaftersigmoid""" +176 15 regularizer """no""" +176 15 optimizer """adadelta""" +176 15 training_loop """owa""" +176 15 negative_sampler """basic""" +176 15 evaluator """rankbased""" +176 16 dataset """kinships""" +176 16 model """distmult""" +176 16 loss """bceaftersigmoid""" +176 16 regularizer """no""" +176 16 optimizer """adadelta""" +176 16 training_loop """owa""" +176 16 negative_sampler """basic""" +176 16 evaluator """rankbased""" +176 17 dataset """kinships""" +176 17 model """distmult""" +176 17 loss """bceaftersigmoid""" +176 17 regularizer """no""" +176 17 optimizer """adadelta""" +176 17 training_loop """owa""" +176 17 negative_sampler """basic""" +176 17 evaluator """rankbased""" +176 18 dataset """kinships""" +176 18 model """distmult""" +176 18 loss """bceaftersigmoid""" +176 18 regularizer """no""" +176 18 optimizer """adadelta""" +176 18 training_loop """owa""" +176 18 negative_sampler """basic""" +176 18 evaluator """rankbased""" +176 19 dataset """kinships""" +176 19 model """distmult""" +176 19 loss """bceaftersigmoid""" +176 19 regularizer """no""" +176 19 optimizer """adadelta""" +176 19 training_loop """owa""" +176 19 negative_sampler """basic""" +176 19 evaluator """rankbased""" +176 20 dataset """kinships""" +176 20 model """distmult""" +176 20 loss """bceaftersigmoid""" +176 20 regularizer """no""" +176 20 optimizer """adadelta""" +176 20 training_loop """owa""" +176 20 negative_sampler """basic""" +176 20 evaluator """rankbased""" +176 21 dataset """kinships""" +176 21 model """distmult""" +176 21 loss """bceaftersigmoid""" +176 21 regularizer """no""" +176 21 optimizer """adadelta""" +176 21 training_loop """owa""" +176 21 negative_sampler """basic""" +176 21 evaluator """rankbased""" +176 22 dataset """kinships""" +176 22 model """distmult""" +176 22 loss """bceaftersigmoid""" +176 22 regularizer """no""" +176 22 optimizer """adadelta""" +176 22 training_loop """owa""" +176 22 negative_sampler """basic""" +176 22 evaluator """rankbased""" +176 23 dataset """kinships""" +176 23 model """distmult""" +176 23 loss """bceaftersigmoid""" +176 23 regularizer """no""" +176 23 optimizer """adadelta""" +176 23 training_loop """owa""" +176 23 negative_sampler """basic""" +176 23 evaluator """rankbased""" +176 24 dataset """kinships""" +176 24 model """distmult""" +176 24 loss """bceaftersigmoid""" +176 24 regularizer """no""" +176 24 optimizer """adadelta""" +176 24 training_loop """owa""" +176 24 negative_sampler """basic""" +176 24 evaluator """rankbased""" +176 25 dataset """kinships""" +176 25 model """distmult""" +176 25 loss """bceaftersigmoid""" +176 25 regularizer """no""" +176 25 optimizer """adadelta""" +176 25 training_loop """owa""" +176 25 negative_sampler """basic""" +176 25 evaluator """rankbased""" +176 26 dataset """kinships""" +176 26 model """distmult""" +176 26 loss """bceaftersigmoid""" +176 26 regularizer """no""" +176 26 optimizer """adadelta""" +176 26 training_loop """owa""" +176 26 negative_sampler """basic""" +176 26 evaluator """rankbased""" +176 27 dataset """kinships""" +176 27 model """distmult""" +176 27 loss """bceaftersigmoid""" +176 27 regularizer """no""" +176 27 optimizer """adadelta""" +176 27 training_loop """owa""" +176 27 negative_sampler """basic""" +176 27 evaluator """rankbased""" +176 28 dataset """kinships""" +176 28 model """distmult""" +176 28 loss """bceaftersigmoid""" +176 28 regularizer """no""" +176 28 optimizer """adadelta""" +176 28 training_loop """owa""" +176 28 negative_sampler """basic""" +176 28 evaluator """rankbased""" +176 29 dataset """kinships""" +176 29 model """distmult""" +176 29 loss """bceaftersigmoid""" +176 29 regularizer """no""" +176 29 optimizer """adadelta""" +176 29 training_loop """owa""" +176 29 negative_sampler """basic""" +176 29 evaluator """rankbased""" +176 30 dataset """kinships""" +176 30 model """distmult""" +176 30 loss """bceaftersigmoid""" +176 30 regularizer """no""" +176 30 optimizer """adadelta""" +176 30 training_loop """owa""" +176 30 negative_sampler """basic""" +176 30 evaluator """rankbased""" +176 31 dataset """kinships""" +176 31 model """distmult""" +176 31 loss """bceaftersigmoid""" +176 31 regularizer """no""" +176 31 optimizer """adadelta""" +176 31 training_loop """owa""" +176 31 negative_sampler """basic""" +176 31 evaluator """rankbased""" +176 32 dataset """kinships""" +176 32 model """distmult""" +176 32 loss """bceaftersigmoid""" +176 32 regularizer """no""" +176 32 optimizer """adadelta""" +176 32 training_loop """owa""" +176 32 negative_sampler """basic""" +176 32 evaluator """rankbased""" +176 33 dataset """kinships""" +176 33 model """distmult""" +176 33 loss """bceaftersigmoid""" +176 33 regularizer """no""" +176 33 optimizer """adadelta""" +176 33 training_loop """owa""" +176 33 negative_sampler """basic""" +176 33 evaluator """rankbased""" +176 34 dataset """kinships""" +176 34 model """distmult""" +176 34 loss """bceaftersigmoid""" +176 34 regularizer """no""" +176 34 optimizer """adadelta""" +176 34 training_loop """owa""" +176 34 negative_sampler """basic""" +176 34 evaluator """rankbased""" +176 35 dataset """kinships""" +176 35 model """distmult""" +176 35 loss """bceaftersigmoid""" +176 35 regularizer """no""" +176 35 optimizer """adadelta""" +176 35 training_loop """owa""" +176 35 negative_sampler """basic""" +176 35 evaluator """rankbased""" +176 36 dataset """kinships""" +176 36 model """distmult""" +176 36 loss """bceaftersigmoid""" +176 36 regularizer """no""" +176 36 optimizer """adadelta""" +176 36 training_loop """owa""" +176 36 negative_sampler """basic""" +176 36 evaluator """rankbased""" +176 37 dataset """kinships""" +176 37 model """distmult""" +176 37 loss """bceaftersigmoid""" +176 37 regularizer """no""" +176 37 optimizer """adadelta""" +176 37 training_loop """owa""" +176 37 negative_sampler """basic""" +176 37 evaluator """rankbased""" +176 38 dataset """kinships""" +176 38 model """distmult""" +176 38 loss """bceaftersigmoid""" +176 38 regularizer """no""" +176 38 optimizer """adadelta""" +176 38 training_loop """owa""" +176 38 negative_sampler """basic""" +176 38 evaluator """rankbased""" +176 39 dataset """kinships""" +176 39 model """distmult""" +176 39 loss """bceaftersigmoid""" +176 39 regularizer """no""" +176 39 optimizer """adadelta""" +176 39 training_loop """owa""" +176 39 negative_sampler """basic""" +176 39 evaluator """rankbased""" +176 40 dataset """kinships""" +176 40 model """distmult""" +176 40 loss """bceaftersigmoid""" +176 40 regularizer """no""" +176 40 optimizer """adadelta""" +176 40 training_loop """owa""" +176 40 negative_sampler """basic""" +176 40 evaluator """rankbased""" +176 41 dataset """kinships""" +176 41 model """distmult""" +176 41 loss """bceaftersigmoid""" +176 41 regularizer """no""" +176 41 optimizer """adadelta""" +176 41 training_loop """owa""" +176 41 negative_sampler """basic""" +176 41 evaluator """rankbased""" +176 42 dataset """kinships""" +176 42 model """distmult""" +176 42 loss """bceaftersigmoid""" +176 42 regularizer """no""" +176 42 optimizer """adadelta""" +176 42 training_loop """owa""" +176 42 negative_sampler """basic""" +176 42 evaluator """rankbased""" +176 43 dataset """kinships""" +176 43 model """distmult""" +176 43 loss """bceaftersigmoid""" +176 43 regularizer """no""" +176 43 optimizer """adadelta""" +176 43 training_loop """owa""" +176 43 negative_sampler """basic""" +176 43 evaluator """rankbased""" +176 44 dataset """kinships""" +176 44 model """distmult""" +176 44 loss """bceaftersigmoid""" +176 44 regularizer """no""" +176 44 optimizer """adadelta""" +176 44 training_loop """owa""" +176 44 negative_sampler """basic""" +176 44 evaluator """rankbased""" +176 45 dataset """kinships""" +176 45 model """distmult""" +176 45 loss """bceaftersigmoid""" +176 45 regularizer """no""" +176 45 optimizer """adadelta""" +176 45 training_loop """owa""" +176 45 negative_sampler """basic""" +176 45 evaluator """rankbased""" +176 46 dataset """kinships""" +176 46 model """distmult""" +176 46 loss """bceaftersigmoid""" +176 46 regularizer """no""" +176 46 optimizer """adadelta""" +176 46 training_loop """owa""" +176 46 negative_sampler """basic""" +176 46 evaluator """rankbased""" +176 47 dataset """kinships""" +176 47 model """distmult""" +176 47 loss """bceaftersigmoid""" +176 47 regularizer """no""" +176 47 optimizer """adadelta""" +176 47 training_loop """owa""" +176 47 negative_sampler """basic""" +176 47 evaluator """rankbased""" +176 48 dataset """kinships""" +176 48 model """distmult""" +176 48 loss """bceaftersigmoid""" +176 48 regularizer """no""" +176 48 optimizer """adadelta""" +176 48 training_loop """owa""" +176 48 negative_sampler """basic""" +176 48 evaluator """rankbased""" +176 49 dataset """kinships""" +176 49 model """distmult""" +176 49 loss """bceaftersigmoid""" +176 49 regularizer """no""" +176 49 optimizer """adadelta""" +176 49 training_loop """owa""" +176 49 negative_sampler """basic""" +176 49 evaluator """rankbased""" +176 50 dataset """kinships""" +176 50 model """distmult""" +176 50 loss """bceaftersigmoid""" +176 50 regularizer """no""" +176 50 optimizer """adadelta""" +176 50 training_loop """owa""" +176 50 negative_sampler """basic""" +176 50 evaluator """rankbased""" +176 51 dataset """kinships""" +176 51 model """distmult""" +176 51 loss """bceaftersigmoid""" +176 51 regularizer """no""" +176 51 optimizer """adadelta""" +176 51 training_loop """owa""" +176 51 negative_sampler """basic""" +176 51 evaluator """rankbased""" +176 52 dataset """kinships""" +176 52 model """distmult""" +176 52 loss """bceaftersigmoid""" +176 52 regularizer """no""" +176 52 optimizer """adadelta""" +176 52 training_loop """owa""" +176 52 negative_sampler """basic""" +176 52 evaluator """rankbased""" +176 53 dataset """kinships""" +176 53 model """distmult""" +176 53 loss """bceaftersigmoid""" +176 53 regularizer """no""" +176 53 optimizer """adadelta""" +176 53 training_loop """owa""" +176 53 negative_sampler """basic""" +176 53 evaluator """rankbased""" +176 54 dataset """kinships""" +176 54 model """distmult""" +176 54 loss """bceaftersigmoid""" +176 54 regularizer """no""" +176 54 optimizer """adadelta""" +176 54 training_loop """owa""" +176 54 negative_sampler """basic""" +176 54 evaluator """rankbased""" +176 55 dataset """kinships""" +176 55 model """distmult""" +176 55 loss """bceaftersigmoid""" +176 55 regularizer """no""" +176 55 optimizer """adadelta""" +176 55 training_loop """owa""" +176 55 negative_sampler """basic""" +176 55 evaluator """rankbased""" +176 56 dataset """kinships""" +176 56 model """distmult""" +176 56 loss """bceaftersigmoid""" +176 56 regularizer """no""" +176 56 optimizer """adadelta""" +176 56 training_loop """owa""" +176 56 negative_sampler """basic""" +176 56 evaluator """rankbased""" +176 57 dataset """kinships""" +176 57 model """distmult""" +176 57 loss """bceaftersigmoid""" +176 57 regularizer """no""" +176 57 optimizer """adadelta""" +176 57 training_loop """owa""" +176 57 negative_sampler """basic""" +176 57 evaluator """rankbased""" +176 58 dataset """kinships""" +176 58 model """distmult""" +176 58 loss """bceaftersigmoid""" +176 58 regularizer """no""" +176 58 optimizer """adadelta""" +176 58 training_loop """owa""" +176 58 negative_sampler """basic""" +176 58 evaluator """rankbased""" +176 59 dataset """kinships""" +176 59 model """distmult""" +176 59 loss """bceaftersigmoid""" +176 59 regularizer """no""" +176 59 optimizer """adadelta""" +176 59 training_loop """owa""" +176 59 negative_sampler """basic""" +176 59 evaluator """rankbased""" +176 60 dataset """kinships""" +176 60 model """distmult""" +176 60 loss """bceaftersigmoid""" +176 60 regularizer """no""" +176 60 optimizer """adadelta""" +176 60 training_loop """owa""" +176 60 negative_sampler """basic""" +176 60 evaluator """rankbased""" +176 61 dataset """kinships""" +176 61 model """distmult""" +176 61 loss """bceaftersigmoid""" +176 61 regularizer """no""" +176 61 optimizer """adadelta""" +176 61 training_loop """owa""" +176 61 negative_sampler """basic""" +176 61 evaluator """rankbased""" +176 62 dataset """kinships""" +176 62 model """distmult""" +176 62 loss """bceaftersigmoid""" +176 62 regularizer """no""" +176 62 optimizer """adadelta""" +176 62 training_loop """owa""" +176 62 negative_sampler """basic""" +176 62 evaluator """rankbased""" +176 63 dataset """kinships""" +176 63 model """distmult""" +176 63 loss """bceaftersigmoid""" +176 63 regularizer """no""" +176 63 optimizer """adadelta""" +176 63 training_loop """owa""" +176 63 negative_sampler """basic""" +176 63 evaluator """rankbased""" +176 64 dataset """kinships""" +176 64 model """distmult""" +176 64 loss """bceaftersigmoid""" +176 64 regularizer """no""" +176 64 optimizer """adadelta""" +176 64 training_loop """owa""" +176 64 negative_sampler """basic""" +176 64 evaluator """rankbased""" +176 65 dataset """kinships""" +176 65 model """distmult""" +176 65 loss """bceaftersigmoid""" +176 65 regularizer """no""" +176 65 optimizer """adadelta""" +176 65 training_loop """owa""" +176 65 negative_sampler """basic""" +176 65 evaluator """rankbased""" +176 66 dataset """kinships""" +176 66 model """distmult""" +176 66 loss """bceaftersigmoid""" +176 66 regularizer """no""" +176 66 optimizer """adadelta""" +176 66 training_loop """owa""" +176 66 negative_sampler """basic""" +176 66 evaluator """rankbased""" +176 67 dataset """kinships""" +176 67 model """distmult""" +176 67 loss """bceaftersigmoid""" +176 67 regularizer """no""" +176 67 optimizer """adadelta""" +176 67 training_loop """owa""" +176 67 negative_sampler """basic""" +176 67 evaluator """rankbased""" +176 68 dataset """kinships""" +176 68 model """distmult""" +176 68 loss """bceaftersigmoid""" +176 68 regularizer """no""" +176 68 optimizer """adadelta""" +176 68 training_loop """owa""" +176 68 negative_sampler """basic""" +176 68 evaluator """rankbased""" +176 69 dataset """kinships""" +176 69 model """distmult""" +176 69 loss """bceaftersigmoid""" +176 69 regularizer """no""" +176 69 optimizer """adadelta""" +176 69 training_loop """owa""" +176 69 negative_sampler """basic""" +176 69 evaluator """rankbased""" +176 70 dataset """kinships""" +176 70 model """distmult""" +176 70 loss """bceaftersigmoid""" +176 70 regularizer """no""" +176 70 optimizer """adadelta""" +176 70 training_loop """owa""" +176 70 negative_sampler """basic""" +176 70 evaluator """rankbased""" +176 71 dataset """kinships""" +176 71 model """distmult""" +176 71 loss """bceaftersigmoid""" +176 71 regularizer """no""" +176 71 optimizer """adadelta""" +176 71 training_loop """owa""" +176 71 negative_sampler """basic""" +176 71 evaluator """rankbased""" +176 72 dataset """kinships""" +176 72 model """distmult""" +176 72 loss """bceaftersigmoid""" +176 72 regularizer """no""" +176 72 optimizer """adadelta""" +176 72 training_loop """owa""" +176 72 negative_sampler """basic""" +176 72 evaluator """rankbased""" +176 73 dataset """kinships""" +176 73 model """distmult""" +176 73 loss """bceaftersigmoid""" +176 73 regularizer """no""" +176 73 optimizer """adadelta""" +176 73 training_loop """owa""" +176 73 negative_sampler """basic""" +176 73 evaluator """rankbased""" +176 74 dataset """kinships""" +176 74 model """distmult""" +176 74 loss """bceaftersigmoid""" +176 74 regularizer """no""" +176 74 optimizer """adadelta""" +176 74 training_loop """owa""" +176 74 negative_sampler """basic""" +176 74 evaluator """rankbased""" +176 75 dataset """kinships""" +176 75 model """distmult""" +176 75 loss """bceaftersigmoid""" +176 75 regularizer """no""" +176 75 optimizer """adadelta""" +176 75 training_loop """owa""" +176 75 negative_sampler """basic""" +176 75 evaluator """rankbased""" +176 76 dataset """kinships""" +176 76 model """distmult""" +176 76 loss """bceaftersigmoid""" +176 76 regularizer """no""" +176 76 optimizer """adadelta""" +176 76 training_loop """owa""" +176 76 negative_sampler """basic""" +176 76 evaluator """rankbased""" +176 77 dataset """kinships""" +176 77 model """distmult""" +176 77 loss """bceaftersigmoid""" +176 77 regularizer """no""" +176 77 optimizer """adadelta""" +176 77 training_loop """owa""" +176 77 negative_sampler """basic""" +176 77 evaluator """rankbased""" +176 78 dataset """kinships""" +176 78 model """distmult""" +176 78 loss """bceaftersigmoid""" +176 78 regularizer """no""" +176 78 optimizer """adadelta""" +176 78 training_loop """owa""" +176 78 negative_sampler """basic""" +176 78 evaluator """rankbased""" +176 79 dataset """kinships""" +176 79 model """distmult""" +176 79 loss """bceaftersigmoid""" +176 79 regularizer """no""" +176 79 optimizer """adadelta""" +176 79 training_loop """owa""" +176 79 negative_sampler """basic""" +176 79 evaluator """rankbased""" +176 80 dataset """kinships""" +176 80 model """distmult""" +176 80 loss """bceaftersigmoid""" +176 80 regularizer """no""" +176 80 optimizer """adadelta""" +176 80 training_loop """owa""" +176 80 negative_sampler """basic""" +176 80 evaluator """rankbased""" +176 81 dataset """kinships""" +176 81 model """distmult""" +176 81 loss """bceaftersigmoid""" +176 81 regularizer """no""" +176 81 optimizer """adadelta""" +176 81 training_loop """owa""" +176 81 negative_sampler """basic""" +176 81 evaluator """rankbased""" +176 82 dataset """kinships""" +176 82 model """distmult""" +176 82 loss """bceaftersigmoid""" +176 82 regularizer """no""" +176 82 optimizer """adadelta""" +176 82 training_loop """owa""" +176 82 negative_sampler """basic""" +176 82 evaluator """rankbased""" +176 83 dataset """kinships""" +176 83 model """distmult""" +176 83 loss """bceaftersigmoid""" +176 83 regularizer """no""" +176 83 optimizer """adadelta""" +176 83 training_loop """owa""" +176 83 negative_sampler """basic""" +176 83 evaluator """rankbased""" +176 84 dataset """kinships""" +176 84 model """distmult""" +176 84 loss """bceaftersigmoid""" +176 84 regularizer """no""" +176 84 optimizer """adadelta""" +176 84 training_loop """owa""" +176 84 negative_sampler """basic""" +176 84 evaluator """rankbased""" +176 85 dataset """kinships""" +176 85 model """distmult""" +176 85 loss """bceaftersigmoid""" +176 85 regularizer """no""" +176 85 optimizer """adadelta""" +176 85 training_loop """owa""" +176 85 negative_sampler """basic""" +176 85 evaluator """rankbased""" +176 86 dataset """kinships""" +176 86 model """distmult""" +176 86 loss """bceaftersigmoid""" +176 86 regularizer """no""" +176 86 optimizer """adadelta""" +176 86 training_loop """owa""" +176 86 negative_sampler """basic""" +176 86 evaluator """rankbased""" +176 87 dataset """kinships""" +176 87 model """distmult""" +176 87 loss """bceaftersigmoid""" +176 87 regularizer """no""" +176 87 optimizer """adadelta""" +176 87 training_loop """owa""" +176 87 negative_sampler """basic""" +176 87 evaluator """rankbased""" +176 88 dataset """kinships""" +176 88 model """distmult""" +176 88 loss """bceaftersigmoid""" +176 88 regularizer """no""" +176 88 optimizer """adadelta""" +176 88 training_loop """owa""" +176 88 negative_sampler """basic""" +176 88 evaluator """rankbased""" +176 89 dataset """kinships""" +176 89 model """distmult""" +176 89 loss """bceaftersigmoid""" +176 89 regularizer """no""" +176 89 optimizer """adadelta""" +176 89 training_loop """owa""" +176 89 negative_sampler """basic""" +176 89 evaluator """rankbased""" +176 90 dataset """kinships""" +176 90 model """distmult""" +176 90 loss """bceaftersigmoid""" +176 90 regularizer """no""" +176 90 optimizer """adadelta""" +176 90 training_loop """owa""" +176 90 negative_sampler """basic""" +176 90 evaluator """rankbased""" +176 91 dataset """kinships""" +176 91 model """distmult""" +176 91 loss """bceaftersigmoid""" +176 91 regularizer """no""" +176 91 optimizer """adadelta""" +176 91 training_loop """owa""" +176 91 negative_sampler """basic""" +176 91 evaluator """rankbased""" +176 92 dataset """kinships""" +176 92 model """distmult""" +176 92 loss """bceaftersigmoid""" +176 92 regularizer """no""" +176 92 optimizer """adadelta""" +176 92 training_loop """owa""" +176 92 negative_sampler """basic""" +176 92 evaluator """rankbased""" +176 93 dataset """kinships""" +176 93 model """distmult""" +176 93 loss """bceaftersigmoid""" +176 93 regularizer """no""" +176 93 optimizer """adadelta""" +176 93 training_loop """owa""" +176 93 negative_sampler """basic""" +176 93 evaluator """rankbased""" +176 94 dataset """kinships""" +176 94 model """distmult""" +176 94 loss """bceaftersigmoid""" +176 94 regularizer """no""" +176 94 optimizer """adadelta""" +176 94 training_loop """owa""" +176 94 negative_sampler """basic""" +176 94 evaluator """rankbased""" +176 95 dataset """kinships""" +176 95 model """distmult""" +176 95 loss """bceaftersigmoid""" +176 95 regularizer """no""" +176 95 optimizer """adadelta""" +176 95 training_loop """owa""" +176 95 negative_sampler """basic""" +176 95 evaluator """rankbased""" +176 96 dataset """kinships""" +176 96 model """distmult""" +176 96 loss """bceaftersigmoid""" +176 96 regularizer """no""" +176 96 optimizer """adadelta""" +176 96 training_loop """owa""" +176 96 negative_sampler """basic""" +176 96 evaluator """rankbased""" +176 97 dataset """kinships""" +176 97 model """distmult""" +176 97 loss """bceaftersigmoid""" +176 97 regularizer """no""" +176 97 optimizer """adadelta""" +176 97 training_loop """owa""" +176 97 negative_sampler """basic""" +176 97 evaluator """rankbased""" +176 98 dataset """kinships""" +176 98 model """distmult""" +176 98 loss """bceaftersigmoid""" +176 98 regularizer """no""" +176 98 optimizer """adadelta""" +176 98 training_loop """owa""" +176 98 negative_sampler """basic""" +176 98 evaluator """rankbased""" +176 99 dataset """kinships""" +176 99 model """distmult""" +176 99 loss """bceaftersigmoid""" +176 99 regularizer """no""" +176 99 optimizer """adadelta""" +176 99 training_loop """owa""" +176 99 negative_sampler """basic""" +176 99 evaluator """rankbased""" +176 100 dataset """kinships""" +176 100 model """distmult""" +176 100 loss """bceaftersigmoid""" +176 100 regularizer """no""" +176 100 optimizer """adadelta""" +176 100 training_loop """owa""" +176 100 negative_sampler """basic""" +176 100 evaluator """rankbased""" +177 1 model.embedding_dim 2.0 +177 1 negative_sampler.num_negs_per_pos 58.0 +177 1 training.batch_size 2.0 +177 2 model.embedding_dim 0.0 +177 2 negative_sampler.num_negs_per_pos 98.0 +177 2 training.batch_size 2.0 +177 3 model.embedding_dim 1.0 +177 3 negative_sampler.num_negs_per_pos 8.0 +177 3 training.batch_size 0.0 +177 4 model.embedding_dim 1.0 +177 4 negative_sampler.num_negs_per_pos 17.0 +177 4 training.batch_size 2.0 +177 5 model.embedding_dim 0.0 +177 5 negative_sampler.num_negs_per_pos 21.0 +177 5 training.batch_size 0.0 +177 6 model.embedding_dim 0.0 +177 6 negative_sampler.num_negs_per_pos 25.0 +177 6 training.batch_size 1.0 +177 7 model.embedding_dim 1.0 +177 7 negative_sampler.num_negs_per_pos 30.0 +177 7 training.batch_size 0.0 +177 8 model.embedding_dim 1.0 +177 8 negative_sampler.num_negs_per_pos 27.0 +177 8 training.batch_size 0.0 +177 9 model.embedding_dim 0.0 +177 9 negative_sampler.num_negs_per_pos 55.0 +177 9 training.batch_size 0.0 +177 10 model.embedding_dim 1.0 +177 10 negative_sampler.num_negs_per_pos 41.0 +177 10 training.batch_size 0.0 +177 11 model.embedding_dim 1.0 +177 11 negative_sampler.num_negs_per_pos 65.0 +177 11 training.batch_size 2.0 +177 12 model.embedding_dim 1.0 +177 12 negative_sampler.num_negs_per_pos 96.0 +177 12 training.batch_size 1.0 +177 13 model.embedding_dim 2.0 +177 13 negative_sampler.num_negs_per_pos 13.0 +177 13 training.batch_size 1.0 +177 14 model.embedding_dim 1.0 +177 14 negative_sampler.num_negs_per_pos 90.0 +177 14 training.batch_size 0.0 +177 15 model.embedding_dim 1.0 +177 15 negative_sampler.num_negs_per_pos 55.0 +177 15 training.batch_size 0.0 +177 16 model.embedding_dim 0.0 +177 16 negative_sampler.num_negs_per_pos 97.0 +177 16 training.batch_size 0.0 +177 17 model.embedding_dim 1.0 +177 17 negative_sampler.num_negs_per_pos 77.0 +177 17 training.batch_size 2.0 +177 18 model.embedding_dim 2.0 +177 18 negative_sampler.num_negs_per_pos 27.0 +177 18 training.batch_size 2.0 +177 19 model.embedding_dim 2.0 +177 19 negative_sampler.num_negs_per_pos 53.0 +177 19 training.batch_size 1.0 +177 20 model.embedding_dim 0.0 +177 20 negative_sampler.num_negs_per_pos 68.0 +177 20 training.batch_size 2.0 +177 21 model.embedding_dim 0.0 +177 21 negative_sampler.num_negs_per_pos 36.0 +177 21 training.batch_size 1.0 +177 22 model.embedding_dim 2.0 +177 22 negative_sampler.num_negs_per_pos 34.0 +177 22 training.batch_size 1.0 +177 23 model.embedding_dim 0.0 +177 23 negative_sampler.num_negs_per_pos 94.0 +177 23 training.batch_size 2.0 +177 24 model.embedding_dim 1.0 +177 24 negative_sampler.num_negs_per_pos 24.0 +177 24 training.batch_size 1.0 +177 25 model.embedding_dim 0.0 +177 25 negative_sampler.num_negs_per_pos 31.0 +177 25 training.batch_size 0.0 +177 26 model.embedding_dim 2.0 +177 26 negative_sampler.num_negs_per_pos 52.0 +177 26 training.batch_size 1.0 +177 27 model.embedding_dim 1.0 +177 27 negative_sampler.num_negs_per_pos 21.0 +177 27 training.batch_size 1.0 +177 28 model.embedding_dim 0.0 +177 28 negative_sampler.num_negs_per_pos 74.0 +177 28 training.batch_size 0.0 +177 29 model.embedding_dim 0.0 +177 29 negative_sampler.num_negs_per_pos 36.0 +177 29 training.batch_size 0.0 +177 30 model.embedding_dim 2.0 +177 30 negative_sampler.num_negs_per_pos 36.0 +177 30 training.batch_size 1.0 +177 31 model.embedding_dim 1.0 +177 31 negative_sampler.num_negs_per_pos 18.0 +177 31 training.batch_size 0.0 +177 32 model.embedding_dim 1.0 +177 32 negative_sampler.num_negs_per_pos 22.0 +177 32 training.batch_size 2.0 +177 33 model.embedding_dim 1.0 +177 33 negative_sampler.num_negs_per_pos 79.0 +177 33 training.batch_size 0.0 +177 34 model.embedding_dim 2.0 +177 34 negative_sampler.num_negs_per_pos 69.0 +177 34 training.batch_size 2.0 +177 35 model.embedding_dim 0.0 +177 35 negative_sampler.num_negs_per_pos 5.0 +177 35 training.batch_size 0.0 +177 36 model.embedding_dim 1.0 +177 36 negative_sampler.num_negs_per_pos 94.0 +177 36 training.batch_size 2.0 +177 37 model.embedding_dim 0.0 +177 37 negative_sampler.num_negs_per_pos 99.0 +177 37 training.batch_size 1.0 +177 38 model.embedding_dim 1.0 +177 38 negative_sampler.num_negs_per_pos 62.0 +177 38 training.batch_size 1.0 +177 39 model.embedding_dim 2.0 +177 39 negative_sampler.num_negs_per_pos 97.0 +177 39 training.batch_size 1.0 +177 40 model.embedding_dim 0.0 +177 40 negative_sampler.num_negs_per_pos 97.0 +177 40 training.batch_size 2.0 +177 41 model.embedding_dim 1.0 +177 41 negative_sampler.num_negs_per_pos 75.0 +177 41 training.batch_size 0.0 +177 42 model.embedding_dim 1.0 +177 42 negative_sampler.num_negs_per_pos 87.0 +177 42 training.batch_size 1.0 +177 43 model.embedding_dim 2.0 +177 43 negative_sampler.num_negs_per_pos 83.0 +177 43 training.batch_size 0.0 +177 44 model.embedding_dim 2.0 +177 44 negative_sampler.num_negs_per_pos 59.0 +177 44 training.batch_size 2.0 +177 45 model.embedding_dim 1.0 +177 45 negative_sampler.num_negs_per_pos 50.0 +177 45 training.batch_size 1.0 +177 46 model.embedding_dim 1.0 +177 46 negative_sampler.num_negs_per_pos 6.0 +177 46 training.batch_size 0.0 +177 47 model.embedding_dim 1.0 +177 47 negative_sampler.num_negs_per_pos 45.0 +177 47 training.batch_size 2.0 +177 48 model.embedding_dim 1.0 +177 48 negative_sampler.num_negs_per_pos 7.0 +177 48 training.batch_size 2.0 +177 49 model.embedding_dim 0.0 +177 49 negative_sampler.num_negs_per_pos 50.0 +177 49 training.batch_size 1.0 +177 50 model.embedding_dim 2.0 +177 50 negative_sampler.num_negs_per_pos 10.0 +177 50 training.batch_size 0.0 +177 51 model.embedding_dim 2.0 +177 51 negative_sampler.num_negs_per_pos 5.0 +177 51 training.batch_size 1.0 +177 52 model.embedding_dim 1.0 +177 52 negative_sampler.num_negs_per_pos 17.0 +177 52 training.batch_size 1.0 +177 53 model.embedding_dim 1.0 +177 53 negative_sampler.num_negs_per_pos 26.0 +177 53 training.batch_size 1.0 +177 54 model.embedding_dim 2.0 +177 54 negative_sampler.num_negs_per_pos 43.0 +177 54 training.batch_size 0.0 +177 55 model.embedding_dim 0.0 +177 55 negative_sampler.num_negs_per_pos 55.0 +177 55 training.batch_size 0.0 +177 56 model.embedding_dim 0.0 +177 56 negative_sampler.num_negs_per_pos 8.0 +177 56 training.batch_size 0.0 +177 57 model.embedding_dim 1.0 +177 57 negative_sampler.num_negs_per_pos 53.0 +177 57 training.batch_size 0.0 +177 58 model.embedding_dim 2.0 +177 58 negative_sampler.num_negs_per_pos 46.0 +177 58 training.batch_size 1.0 +177 59 model.embedding_dim 1.0 +177 59 negative_sampler.num_negs_per_pos 45.0 +177 59 training.batch_size 0.0 +177 60 model.embedding_dim 1.0 +177 60 negative_sampler.num_negs_per_pos 61.0 +177 60 training.batch_size 1.0 +177 61 model.embedding_dim 2.0 +177 61 negative_sampler.num_negs_per_pos 52.0 +177 61 training.batch_size 0.0 +177 62 model.embedding_dim 2.0 +177 62 negative_sampler.num_negs_per_pos 31.0 +177 62 training.batch_size 0.0 +177 63 model.embedding_dim 0.0 +177 63 negative_sampler.num_negs_per_pos 12.0 +177 63 training.batch_size 1.0 +177 64 model.embedding_dim 2.0 +177 64 negative_sampler.num_negs_per_pos 37.0 +177 64 training.batch_size 1.0 +177 65 model.embedding_dim 0.0 +177 65 negative_sampler.num_negs_per_pos 12.0 +177 65 training.batch_size 2.0 +177 66 model.embedding_dim 1.0 +177 66 negative_sampler.num_negs_per_pos 80.0 +177 66 training.batch_size 1.0 +177 67 model.embedding_dim 2.0 +177 67 negative_sampler.num_negs_per_pos 65.0 +177 67 training.batch_size 1.0 +177 68 model.embedding_dim 2.0 +177 68 negative_sampler.num_negs_per_pos 49.0 +177 68 training.batch_size 0.0 +177 69 model.embedding_dim 0.0 +177 69 negative_sampler.num_negs_per_pos 5.0 +177 69 training.batch_size 2.0 +177 70 model.embedding_dim 2.0 +177 70 negative_sampler.num_negs_per_pos 88.0 +177 70 training.batch_size 2.0 +177 71 model.embedding_dim 1.0 +177 71 negative_sampler.num_negs_per_pos 56.0 +177 71 training.batch_size 0.0 +177 72 model.embedding_dim 2.0 +177 72 negative_sampler.num_negs_per_pos 80.0 +177 72 training.batch_size 2.0 +177 73 model.embedding_dim 2.0 +177 73 negative_sampler.num_negs_per_pos 28.0 +177 73 training.batch_size 0.0 +177 74 model.embedding_dim 0.0 +177 74 negative_sampler.num_negs_per_pos 39.0 +177 74 training.batch_size 2.0 +177 75 model.embedding_dim 2.0 +177 75 negative_sampler.num_negs_per_pos 30.0 +177 75 training.batch_size 2.0 +177 76 model.embedding_dim 1.0 +177 76 negative_sampler.num_negs_per_pos 93.0 +177 76 training.batch_size 0.0 +177 77 model.embedding_dim 2.0 +177 77 negative_sampler.num_negs_per_pos 28.0 +177 77 training.batch_size 1.0 +177 78 model.embedding_dim 1.0 +177 78 negative_sampler.num_negs_per_pos 81.0 +177 78 training.batch_size 1.0 +177 79 model.embedding_dim 0.0 +177 79 negative_sampler.num_negs_per_pos 55.0 +177 79 training.batch_size 2.0 +177 80 model.embedding_dim 2.0 +177 80 negative_sampler.num_negs_per_pos 97.0 +177 80 training.batch_size 0.0 +177 81 model.embedding_dim 2.0 +177 81 negative_sampler.num_negs_per_pos 65.0 +177 81 training.batch_size 2.0 +177 82 model.embedding_dim 1.0 +177 82 negative_sampler.num_negs_per_pos 39.0 +177 82 training.batch_size 1.0 +177 83 model.embedding_dim 0.0 +177 83 negative_sampler.num_negs_per_pos 85.0 +177 83 training.batch_size 1.0 +177 84 model.embedding_dim 2.0 +177 84 negative_sampler.num_negs_per_pos 1.0 +177 84 training.batch_size 1.0 +177 85 model.embedding_dim 2.0 +177 85 negative_sampler.num_negs_per_pos 83.0 +177 85 training.batch_size 1.0 +177 86 model.embedding_dim 1.0 +177 86 negative_sampler.num_negs_per_pos 12.0 +177 86 training.batch_size 0.0 +177 87 model.embedding_dim 2.0 +177 87 negative_sampler.num_negs_per_pos 29.0 +177 87 training.batch_size 2.0 +177 88 model.embedding_dim 0.0 +177 88 negative_sampler.num_negs_per_pos 19.0 +177 88 training.batch_size 0.0 +177 89 model.embedding_dim 1.0 +177 89 negative_sampler.num_negs_per_pos 42.0 +177 89 training.batch_size 1.0 +177 90 model.embedding_dim 2.0 +177 90 negative_sampler.num_negs_per_pos 48.0 +177 90 training.batch_size 2.0 +177 91 model.embedding_dim 1.0 +177 91 negative_sampler.num_negs_per_pos 86.0 +177 91 training.batch_size 2.0 +177 92 model.embedding_dim 1.0 +177 92 negative_sampler.num_negs_per_pos 36.0 +177 92 training.batch_size 0.0 +177 93 model.embedding_dim 0.0 +177 93 negative_sampler.num_negs_per_pos 67.0 +177 93 training.batch_size 2.0 +177 94 model.embedding_dim 1.0 +177 94 negative_sampler.num_negs_per_pos 62.0 +177 94 training.batch_size 1.0 +177 95 model.embedding_dim 0.0 +177 95 negative_sampler.num_negs_per_pos 30.0 +177 95 training.batch_size 1.0 +177 96 model.embedding_dim 1.0 +177 96 negative_sampler.num_negs_per_pos 30.0 +177 96 training.batch_size 2.0 +177 97 model.embedding_dim 1.0 +177 97 negative_sampler.num_negs_per_pos 48.0 +177 97 training.batch_size 0.0 +177 98 model.embedding_dim 0.0 +177 98 negative_sampler.num_negs_per_pos 36.0 +177 98 training.batch_size 2.0 +177 99 model.embedding_dim 2.0 +177 99 negative_sampler.num_negs_per_pos 54.0 +177 99 training.batch_size 2.0 +177 100 model.embedding_dim 1.0 +177 100 negative_sampler.num_negs_per_pos 43.0 +177 100 training.batch_size 0.0 +177 1 dataset """kinships""" +177 1 model """distmult""" +177 1 loss """softplus""" +177 1 regularizer """no""" +177 1 optimizer """adadelta""" +177 1 training_loop """owa""" +177 1 negative_sampler """basic""" +177 1 evaluator """rankbased""" +177 2 dataset """kinships""" +177 2 model """distmult""" +177 2 loss """softplus""" +177 2 regularizer """no""" +177 2 optimizer """adadelta""" +177 2 training_loop """owa""" +177 2 negative_sampler """basic""" +177 2 evaluator """rankbased""" +177 3 dataset """kinships""" +177 3 model """distmult""" +177 3 loss """softplus""" +177 3 regularizer """no""" +177 3 optimizer """adadelta""" +177 3 training_loop """owa""" +177 3 negative_sampler """basic""" +177 3 evaluator """rankbased""" +177 4 dataset """kinships""" +177 4 model """distmult""" +177 4 loss """softplus""" +177 4 regularizer """no""" +177 4 optimizer """adadelta""" +177 4 training_loop """owa""" +177 4 negative_sampler """basic""" +177 4 evaluator """rankbased""" +177 5 dataset """kinships""" +177 5 model """distmult""" +177 5 loss """softplus""" +177 5 regularizer """no""" +177 5 optimizer """adadelta""" +177 5 training_loop """owa""" +177 5 negative_sampler """basic""" +177 5 evaluator """rankbased""" +177 6 dataset """kinships""" +177 6 model """distmult""" +177 6 loss """softplus""" +177 6 regularizer """no""" +177 6 optimizer """adadelta""" +177 6 training_loop """owa""" +177 6 negative_sampler """basic""" +177 6 evaluator """rankbased""" +177 7 dataset """kinships""" +177 7 model """distmult""" +177 7 loss """softplus""" +177 7 regularizer """no""" +177 7 optimizer """adadelta""" +177 7 training_loop """owa""" +177 7 negative_sampler """basic""" +177 7 evaluator """rankbased""" +177 8 dataset """kinships""" +177 8 model """distmult""" +177 8 loss """softplus""" +177 8 regularizer """no""" +177 8 optimizer """adadelta""" +177 8 training_loop """owa""" +177 8 negative_sampler """basic""" +177 8 evaluator """rankbased""" +177 9 dataset """kinships""" +177 9 model """distmult""" +177 9 loss """softplus""" +177 9 regularizer """no""" +177 9 optimizer """adadelta""" +177 9 training_loop """owa""" +177 9 negative_sampler """basic""" +177 9 evaluator """rankbased""" +177 10 dataset """kinships""" +177 10 model """distmult""" +177 10 loss """softplus""" +177 10 regularizer """no""" +177 10 optimizer """adadelta""" +177 10 training_loop """owa""" +177 10 negative_sampler """basic""" +177 10 evaluator """rankbased""" +177 11 dataset """kinships""" +177 11 model """distmult""" +177 11 loss """softplus""" +177 11 regularizer """no""" +177 11 optimizer """adadelta""" +177 11 training_loop """owa""" +177 11 negative_sampler """basic""" +177 11 evaluator """rankbased""" +177 12 dataset """kinships""" +177 12 model """distmult""" +177 12 loss """softplus""" +177 12 regularizer """no""" +177 12 optimizer """adadelta""" +177 12 training_loop """owa""" +177 12 negative_sampler """basic""" +177 12 evaluator """rankbased""" +177 13 dataset """kinships""" +177 13 model """distmult""" +177 13 loss """softplus""" +177 13 regularizer """no""" +177 13 optimizer """adadelta""" +177 13 training_loop """owa""" +177 13 negative_sampler """basic""" +177 13 evaluator """rankbased""" +177 14 dataset """kinships""" +177 14 model """distmult""" +177 14 loss """softplus""" +177 14 regularizer """no""" +177 14 optimizer """adadelta""" +177 14 training_loop """owa""" +177 14 negative_sampler """basic""" +177 14 evaluator """rankbased""" +177 15 dataset """kinships""" +177 15 model """distmult""" +177 15 loss """softplus""" +177 15 regularizer """no""" +177 15 optimizer """adadelta""" +177 15 training_loop """owa""" +177 15 negative_sampler """basic""" +177 15 evaluator """rankbased""" +177 16 dataset """kinships""" +177 16 model """distmult""" +177 16 loss """softplus""" +177 16 regularizer """no""" +177 16 optimizer """adadelta""" +177 16 training_loop """owa""" +177 16 negative_sampler """basic""" +177 16 evaluator """rankbased""" +177 17 dataset """kinships""" +177 17 model """distmult""" +177 17 loss """softplus""" +177 17 regularizer """no""" +177 17 optimizer """adadelta""" +177 17 training_loop """owa""" +177 17 negative_sampler """basic""" +177 17 evaluator """rankbased""" +177 18 dataset """kinships""" +177 18 model """distmult""" +177 18 loss """softplus""" +177 18 regularizer """no""" +177 18 optimizer """adadelta""" +177 18 training_loop """owa""" +177 18 negative_sampler """basic""" +177 18 evaluator """rankbased""" +177 19 dataset """kinships""" +177 19 model """distmult""" +177 19 loss """softplus""" +177 19 regularizer """no""" +177 19 optimizer """adadelta""" +177 19 training_loop """owa""" +177 19 negative_sampler """basic""" +177 19 evaluator """rankbased""" +177 20 dataset """kinships""" +177 20 model """distmult""" +177 20 loss """softplus""" +177 20 regularizer """no""" +177 20 optimizer """adadelta""" +177 20 training_loop """owa""" +177 20 negative_sampler """basic""" +177 20 evaluator """rankbased""" +177 21 dataset """kinships""" +177 21 model """distmult""" +177 21 loss """softplus""" +177 21 regularizer """no""" +177 21 optimizer """adadelta""" +177 21 training_loop """owa""" +177 21 negative_sampler """basic""" +177 21 evaluator """rankbased""" +177 22 dataset """kinships""" +177 22 model """distmult""" +177 22 loss """softplus""" +177 22 regularizer """no""" +177 22 optimizer """adadelta""" +177 22 training_loop """owa""" +177 22 negative_sampler """basic""" +177 22 evaluator """rankbased""" +177 23 dataset """kinships""" +177 23 model """distmult""" +177 23 loss """softplus""" +177 23 regularizer """no""" +177 23 optimizer """adadelta""" +177 23 training_loop """owa""" +177 23 negative_sampler """basic""" +177 23 evaluator """rankbased""" +177 24 dataset """kinships""" +177 24 model """distmult""" +177 24 loss """softplus""" +177 24 regularizer """no""" +177 24 optimizer """adadelta""" +177 24 training_loop """owa""" +177 24 negative_sampler """basic""" +177 24 evaluator """rankbased""" +177 25 dataset """kinships""" +177 25 model """distmult""" +177 25 loss """softplus""" +177 25 regularizer """no""" +177 25 optimizer """adadelta""" +177 25 training_loop """owa""" +177 25 negative_sampler """basic""" +177 25 evaluator """rankbased""" +177 26 dataset """kinships""" +177 26 model """distmult""" +177 26 loss """softplus""" +177 26 regularizer """no""" +177 26 optimizer """adadelta""" +177 26 training_loop """owa""" +177 26 negative_sampler """basic""" +177 26 evaluator """rankbased""" +177 27 dataset """kinships""" +177 27 model """distmult""" +177 27 loss """softplus""" +177 27 regularizer """no""" +177 27 optimizer """adadelta""" +177 27 training_loop """owa""" +177 27 negative_sampler """basic""" +177 27 evaluator """rankbased""" +177 28 dataset """kinships""" +177 28 model """distmult""" +177 28 loss """softplus""" +177 28 regularizer """no""" +177 28 optimizer """adadelta""" +177 28 training_loop """owa""" +177 28 negative_sampler """basic""" +177 28 evaluator """rankbased""" +177 29 dataset """kinships""" +177 29 model """distmult""" +177 29 loss """softplus""" +177 29 regularizer """no""" +177 29 optimizer """adadelta""" +177 29 training_loop """owa""" +177 29 negative_sampler """basic""" +177 29 evaluator """rankbased""" +177 30 dataset """kinships""" +177 30 model """distmult""" +177 30 loss """softplus""" +177 30 regularizer """no""" +177 30 optimizer """adadelta""" +177 30 training_loop """owa""" +177 30 negative_sampler """basic""" +177 30 evaluator """rankbased""" +177 31 dataset """kinships""" +177 31 model """distmult""" +177 31 loss """softplus""" +177 31 regularizer """no""" +177 31 optimizer """adadelta""" +177 31 training_loop """owa""" +177 31 negative_sampler """basic""" +177 31 evaluator """rankbased""" +177 32 dataset """kinships""" +177 32 model """distmult""" +177 32 loss """softplus""" +177 32 regularizer """no""" +177 32 optimizer """adadelta""" +177 32 training_loop """owa""" +177 32 negative_sampler """basic""" +177 32 evaluator """rankbased""" +177 33 dataset """kinships""" +177 33 model """distmult""" +177 33 loss """softplus""" +177 33 regularizer """no""" +177 33 optimizer """adadelta""" +177 33 training_loop """owa""" +177 33 negative_sampler """basic""" +177 33 evaluator """rankbased""" +177 34 dataset """kinships""" +177 34 model """distmult""" +177 34 loss """softplus""" +177 34 regularizer """no""" +177 34 optimizer """adadelta""" +177 34 training_loop """owa""" +177 34 negative_sampler """basic""" +177 34 evaluator """rankbased""" +177 35 dataset """kinships""" +177 35 model """distmult""" +177 35 loss """softplus""" +177 35 regularizer """no""" +177 35 optimizer """adadelta""" +177 35 training_loop """owa""" +177 35 negative_sampler """basic""" +177 35 evaluator """rankbased""" +177 36 dataset """kinships""" +177 36 model """distmult""" +177 36 loss """softplus""" +177 36 regularizer """no""" +177 36 optimizer """adadelta""" +177 36 training_loop """owa""" +177 36 negative_sampler """basic""" +177 36 evaluator """rankbased""" +177 37 dataset """kinships""" +177 37 model """distmult""" +177 37 loss """softplus""" +177 37 regularizer """no""" +177 37 optimizer """adadelta""" +177 37 training_loop """owa""" +177 37 negative_sampler """basic""" +177 37 evaluator """rankbased""" +177 38 dataset """kinships""" +177 38 model """distmult""" +177 38 loss """softplus""" +177 38 regularizer """no""" +177 38 optimizer """adadelta""" +177 38 training_loop """owa""" +177 38 negative_sampler """basic""" +177 38 evaluator """rankbased""" +177 39 dataset """kinships""" +177 39 model """distmult""" +177 39 loss """softplus""" +177 39 regularizer """no""" +177 39 optimizer """adadelta""" +177 39 training_loop """owa""" +177 39 negative_sampler """basic""" +177 39 evaluator """rankbased""" +177 40 dataset """kinships""" +177 40 model """distmult""" +177 40 loss """softplus""" +177 40 regularizer """no""" +177 40 optimizer """adadelta""" +177 40 training_loop """owa""" +177 40 negative_sampler """basic""" +177 40 evaluator """rankbased""" +177 41 dataset """kinships""" +177 41 model """distmult""" +177 41 loss """softplus""" +177 41 regularizer """no""" +177 41 optimizer """adadelta""" +177 41 training_loop """owa""" +177 41 negative_sampler """basic""" +177 41 evaluator """rankbased""" +177 42 dataset """kinships""" +177 42 model """distmult""" +177 42 loss """softplus""" +177 42 regularizer """no""" +177 42 optimizer """adadelta""" +177 42 training_loop """owa""" +177 42 negative_sampler """basic""" +177 42 evaluator """rankbased""" +177 43 dataset """kinships""" +177 43 model """distmult""" +177 43 loss """softplus""" +177 43 regularizer """no""" +177 43 optimizer """adadelta""" +177 43 training_loop """owa""" +177 43 negative_sampler """basic""" +177 43 evaluator """rankbased""" +177 44 dataset """kinships""" +177 44 model """distmult""" +177 44 loss """softplus""" +177 44 regularizer """no""" +177 44 optimizer """adadelta""" +177 44 training_loop """owa""" +177 44 negative_sampler """basic""" +177 44 evaluator """rankbased""" +177 45 dataset """kinships""" +177 45 model """distmult""" +177 45 loss """softplus""" +177 45 regularizer """no""" +177 45 optimizer """adadelta""" +177 45 training_loop """owa""" +177 45 negative_sampler """basic""" +177 45 evaluator """rankbased""" +177 46 dataset """kinships""" +177 46 model """distmult""" +177 46 loss """softplus""" +177 46 regularizer """no""" +177 46 optimizer """adadelta""" +177 46 training_loop """owa""" +177 46 negative_sampler """basic""" +177 46 evaluator """rankbased""" +177 47 dataset """kinships""" +177 47 model """distmult""" +177 47 loss """softplus""" +177 47 regularizer """no""" +177 47 optimizer """adadelta""" +177 47 training_loop """owa""" +177 47 negative_sampler """basic""" +177 47 evaluator """rankbased""" +177 48 dataset """kinships""" +177 48 model """distmult""" +177 48 loss """softplus""" +177 48 regularizer """no""" +177 48 optimizer """adadelta""" +177 48 training_loop """owa""" +177 48 negative_sampler """basic""" +177 48 evaluator """rankbased""" +177 49 dataset """kinships""" +177 49 model """distmult""" +177 49 loss """softplus""" +177 49 regularizer """no""" +177 49 optimizer """adadelta""" +177 49 training_loop """owa""" +177 49 negative_sampler """basic""" +177 49 evaluator """rankbased""" +177 50 dataset """kinships""" +177 50 model """distmult""" +177 50 loss """softplus""" +177 50 regularizer """no""" +177 50 optimizer """adadelta""" +177 50 training_loop """owa""" +177 50 negative_sampler """basic""" +177 50 evaluator """rankbased""" +177 51 dataset """kinships""" +177 51 model """distmult""" +177 51 loss """softplus""" +177 51 regularizer """no""" +177 51 optimizer """adadelta""" +177 51 training_loop """owa""" +177 51 negative_sampler """basic""" +177 51 evaluator """rankbased""" +177 52 dataset """kinships""" +177 52 model """distmult""" +177 52 loss """softplus""" +177 52 regularizer """no""" +177 52 optimizer """adadelta""" +177 52 training_loop """owa""" +177 52 negative_sampler """basic""" +177 52 evaluator """rankbased""" +177 53 dataset """kinships""" +177 53 model """distmult""" +177 53 loss """softplus""" +177 53 regularizer """no""" +177 53 optimizer """adadelta""" +177 53 training_loop """owa""" +177 53 negative_sampler """basic""" +177 53 evaluator """rankbased""" +177 54 dataset """kinships""" +177 54 model """distmult""" +177 54 loss """softplus""" +177 54 regularizer """no""" +177 54 optimizer """adadelta""" +177 54 training_loop """owa""" +177 54 negative_sampler """basic""" +177 54 evaluator """rankbased""" +177 55 dataset """kinships""" +177 55 model """distmult""" +177 55 loss """softplus""" +177 55 regularizer """no""" +177 55 optimizer """adadelta""" +177 55 training_loop """owa""" +177 55 negative_sampler """basic""" +177 55 evaluator """rankbased""" +177 56 dataset """kinships""" +177 56 model """distmult""" +177 56 loss """softplus""" +177 56 regularizer """no""" +177 56 optimizer """adadelta""" +177 56 training_loop """owa""" +177 56 negative_sampler """basic""" +177 56 evaluator """rankbased""" +177 57 dataset """kinships""" +177 57 model """distmult""" +177 57 loss """softplus""" +177 57 regularizer """no""" +177 57 optimizer """adadelta""" +177 57 training_loop """owa""" +177 57 negative_sampler """basic""" +177 57 evaluator """rankbased""" +177 58 dataset """kinships""" +177 58 model """distmult""" +177 58 loss """softplus""" +177 58 regularizer """no""" +177 58 optimizer """adadelta""" +177 58 training_loop """owa""" +177 58 negative_sampler """basic""" +177 58 evaluator """rankbased""" +177 59 dataset """kinships""" +177 59 model """distmult""" +177 59 loss """softplus""" +177 59 regularizer """no""" +177 59 optimizer """adadelta""" +177 59 training_loop """owa""" +177 59 negative_sampler """basic""" +177 59 evaluator """rankbased""" +177 60 dataset """kinships""" +177 60 model """distmult""" +177 60 loss """softplus""" +177 60 regularizer """no""" +177 60 optimizer """adadelta""" +177 60 training_loop """owa""" +177 60 negative_sampler """basic""" +177 60 evaluator """rankbased""" +177 61 dataset """kinships""" +177 61 model """distmult""" +177 61 loss """softplus""" +177 61 regularizer """no""" +177 61 optimizer """adadelta""" +177 61 training_loop """owa""" +177 61 negative_sampler """basic""" +177 61 evaluator """rankbased""" +177 62 dataset """kinships""" +177 62 model """distmult""" +177 62 loss """softplus""" +177 62 regularizer """no""" +177 62 optimizer """adadelta""" +177 62 training_loop """owa""" +177 62 negative_sampler """basic""" +177 62 evaluator """rankbased""" +177 63 dataset """kinships""" +177 63 model """distmult""" +177 63 loss """softplus""" +177 63 regularizer """no""" +177 63 optimizer """adadelta""" +177 63 training_loop """owa""" +177 63 negative_sampler """basic""" +177 63 evaluator """rankbased""" +177 64 dataset """kinships""" +177 64 model """distmult""" +177 64 loss """softplus""" +177 64 regularizer """no""" +177 64 optimizer """adadelta""" +177 64 training_loop """owa""" +177 64 negative_sampler """basic""" +177 64 evaluator """rankbased""" +177 65 dataset """kinships""" +177 65 model """distmult""" +177 65 loss """softplus""" +177 65 regularizer """no""" +177 65 optimizer """adadelta""" +177 65 training_loop """owa""" +177 65 negative_sampler """basic""" +177 65 evaluator """rankbased""" +177 66 dataset """kinships""" +177 66 model """distmult""" +177 66 loss """softplus""" +177 66 regularizer """no""" +177 66 optimizer """adadelta""" +177 66 training_loop """owa""" +177 66 negative_sampler """basic""" +177 66 evaluator """rankbased""" +177 67 dataset """kinships""" +177 67 model """distmult""" +177 67 loss """softplus""" +177 67 regularizer """no""" +177 67 optimizer """adadelta""" +177 67 training_loop """owa""" +177 67 negative_sampler """basic""" +177 67 evaluator """rankbased""" +177 68 dataset """kinships""" +177 68 model """distmult""" +177 68 loss """softplus""" +177 68 regularizer """no""" +177 68 optimizer """adadelta""" +177 68 training_loop """owa""" +177 68 negative_sampler """basic""" +177 68 evaluator """rankbased""" +177 69 dataset """kinships""" +177 69 model """distmult""" +177 69 loss """softplus""" +177 69 regularizer """no""" +177 69 optimizer """adadelta""" +177 69 training_loop """owa""" +177 69 negative_sampler """basic""" +177 69 evaluator """rankbased""" +177 70 dataset """kinships""" +177 70 model """distmult""" +177 70 loss """softplus""" +177 70 regularizer """no""" +177 70 optimizer """adadelta""" +177 70 training_loop """owa""" +177 70 negative_sampler """basic""" +177 70 evaluator """rankbased""" +177 71 dataset """kinships""" +177 71 model """distmult""" +177 71 loss """softplus""" +177 71 regularizer """no""" +177 71 optimizer """adadelta""" +177 71 training_loop """owa""" +177 71 negative_sampler """basic""" +177 71 evaluator """rankbased""" +177 72 dataset """kinships""" +177 72 model """distmult""" +177 72 loss """softplus""" +177 72 regularizer """no""" +177 72 optimizer """adadelta""" +177 72 training_loop """owa""" +177 72 negative_sampler """basic""" +177 72 evaluator """rankbased""" +177 73 dataset """kinships""" +177 73 model """distmult""" +177 73 loss """softplus""" +177 73 regularizer """no""" +177 73 optimizer """adadelta""" +177 73 training_loop """owa""" +177 73 negative_sampler """basic""" +177 73 evaluator """rankbased""" +177 74 dataset """kinships""" +177 74 model """distmult""" +177 74 loss """softplus""" +177 74 regularizer """no""" +177 74 optimizer """adadelta""" +177 74 training_loop """owa""" +177 74 negative_sampler """basic""" +177 74 evaluator """rankbased""" +177 75 dataset """kinships""" +177 75 model """distmult""" +177 75 loss """softplus""" +177 75 regularizer """no""" +177 75 optimizer """adadelta""" +177 75 training_loop """owa""" +177 75 negative_sampler """basic""" +177 75 evaluator """rankbased""" +177 76 dataset """kinships""" +177 76 model """distmult""" +177 76 loss """softplus""" +177 76 regularizer """no""" +177 76 optimizer """adadelta""" +177 76 training_loop """owa""" +177 76 negative_sampler """basic""" +177 76 evaluator """rankbased""" +177 77 dataset """kinships""" +177 77 model """distmult""" +177 77 loss """softplus""" +177 77 regularizer """no""" +177 77 optimizer """adadelta""" +177 77 training_loop """owa""" +177 77 negative_sampler """basic""" +177 77 evaluator """rankbased""" +177 78 dataset """kinships""" +177 78 model """distmult""" +177 78 loss """softplus""" +177 78 regularizer """no""" +177 78 optimizer """adadelta""" +177 78 training_loop """owa""" +177 78 negative_sampler """basic""" +177 78 evaluator """rankbased""" +177 79 dataset """kinships""" +177 79 model """distmult""" +177 79 loss """softplus""" +177 79 regularizer """no""" +177 79 optimizer """adadelta""" +177 79 training_loop """owa""" +177 79 negative_sampler """basic""" +177 79 evaluator """rankbased""" +177 80 dataset """kinships""" +177 80 model """distmult""" +177 80 loss """softplus""" +177 80 regularizer """no""" +177 80 optimizer """adadelta""" +177 80 training_loop """owa""" +177 80 negative_sampler """basic""" +177 80 evaluator """rankbased""" +177 81 dataset """kinships""" +177 81 model """distmult""" +177 81 loss """softplus""" +177 81 regularizer """no""" +177 81 optimizer """adadelta""" +177 81 training_loop """owa""" +177 81 negative_sampler """basic""" +177 81 evaluator """rankbased""" +177 82 dataset """kinships""" +177 82 model """distmult""" +177 82 loss """softplus""" +177 82 regularizer """no""" +177 82 optimizer """adadelta""" +177 82 training_loop """owa""" +177 82 negative_sampler """basic""" +177 82 evaluator """rankbased""" +177 83 dataset """kinships""" +177 83 model """distmult""" +177 83 loss """softplus""" +177 83 regularizer """no""" +177 83 optimizer """adadelta""" +177 83 training_loop """owa""" +177 83 negative_sampler """basic""" +177 83 evaluator """rankbased""" +177 84 dataset """kinships""" +177 84 model """distmult""" +177 84 loss """softplus""" +177 84 regularizer """no""" +177 84 optimizer """adadelta""" +177 84 training_loop """owa""" +177 84 negative_sampler """basic""" +177 84 evaluator """rankbased""" +177 85 dataset """kinships""" +177 85 model """distmult""" +177 85 loss """softplus""" +177 85 regularizer """no""" +177 85 optimizer """adadelta""" +177 85 training_loop """owa""" +177 85 negative_sampler """basic""" +177 85 evaluator """rankbased""" +177 86 dataset """kinships""" +177 86 model """distmult""" +177 86 loss """softplus""" +177 86 regularizer """no""" +177 86 optimizer """adadelta""" +177 86 training_loop """owa""" +177 86 negative_sampler """basic""" +177 86 evaluator """rankbased""" +177 87 dataset """kinships""" +177 87 model """distmult""" +177 87 loss """softplus""" +177 87 regularizer """no""" +177 87 optimizer """adadelta""" +177 87 training_loop """owa""" +177 87 negative_sampler """basic""" +177 87 evaluator """rankbased""" +177 88 dataset """kinships""" +177 88 model """distmult""" +177 88 loss """softplus""" +177 88 regularizer """no""" +177 88 optimizer """adadelta""" +177 88 training_loop """owa""" +177 88 negative_sampler """basic""" +177 88 evaluator """rankbased""" +177 89 dataset """kinships""" +177 89 model """distmult""" +177 89 loss """softplus""" +177 89 regularizer """no""" +177 89 optimizer """adadelta""" +177 89 training_loop """owa""" +177 89 negative_sampler """basic""" +177 89 evaluator """rankbased""" +177 90 dataset """kinships""" +177 90 model """distmult""" +177 90 loss """softplus""" +177 90 regularizer """no""" +177 90 optimizer """adadelta""" +177 90 training_loop """owa""" +177 90 negative_sampler """basic""" +177 90 evaluator """rankbased""" +177 91 dataset """kinships""" +177 91 model """distmult""" +177 91 loss """softplus""" +177 91 regularizer """no""" +177 91 optimizer """adadelta""" +177 91 training_loop """owa""" +177 91 negative_sampler """basic""" +177 91 evaluator """rankbased""" +177 92 dataset """kinships""" +177 92 model """distmult""" +177 92 loss """softplus""" +177 92 regularizer """no""" +177 92 optimizer """adadelta""" +177 92 training_loop """owa""" +177 92 negative_sampler """basic""" +177 92 evaluator """rankbased""" +177 93 dataset """kinships""" +177 93 model """distmult""" +177 93 loss """softplus""" +177 93 regularizer """no""" +177 93 optimizer """adadelta""" +177 93 training_loop """owa""" +177 93 negative_sampler """basic""" +177 93 evaluator """rankbased""" +177 94 dataset """kinships""" +177 94 model """distmult""" +177 94 loss """softplus""" +177 94 regularizer """no""" +177 94 optimizer """adadelta""" +177 94 training_loop """owa""" +177 94 negative_sampler """basic""" +177 94 evaluator """rankbased""" +177 95 dataset """kinships""" +177 95 model """distmult""" +177 95 loss """softplus""" +177 95 regularizer """no""" +177 95 optimizer """adadelta""" +177 95 training_loop """owa""" +177 95 negative_sampler """basic""" +177 95 evaluator """rankbased""" +177 96 dataset """kinships""" +177 96 model """distmult""" +177 96 loss """softplus""" +177 96 regularizer """no""" +177 96 optimizer """adadelta""" +177 96 training_loop """owa""" +177 96 negative_sampler """basic""" +177 96 evaluator """rankbased""" +177 97 dataset """kinships""" +177 97 model """distmult""" +177 97 loss """softplus""" +177 97 regularizer """no""" +177 97 optimizer """adadelta""" +177 97 training_loop """owa""" +177 97 negative_sampler """basic""" +177 97 evaluator """rankbased""" +177 98 dataset """kinships""" +177 98 model """distmult""" +177 98 loss """softplus""" +177 98 regularizer """no""" +177 98 optimizer """adadelta""" +177 98 training_loop """owa""" +177 98 negative_sampler """basic""" +177 98 evaluator """rankbased""" +177 99 dataset """kinships""" +177 99 model """distmult""" +177 99 loss """softplus""" +177 99 regularizer """no""" +177 99 optimizer """adadelta""" +177 99 training_loop """owa""" +177 99 negative_sampler """basic""" +177 99 evaluator """rankbased""" +177 100 dataset """kinships""" +177 100 model """distmult""" +177 100 loss """softplus""" +177 100 regularizer """no""" +177 100 optimizer """adadelta""" +177 100 training_loop """owa""" +177 100 negative_sampler """basic""" +177 100 evaluator """rankbased""" +178 1 model.embedding_dim 2.0 +178 1 loss.margin 2.2619054925349276 +178 1 negative_sampler.num_negs_per_pos 3.0 +178 1 training.batch_size 2.0 +178 2 model.embedding_dim 2.0 +178 2 loss.margin 3.999144220950864 +178 2 negative_sampler.num_negs_per_pos 38.0 +178 2 training.batch_size 1.0 +178 3 model.embedding_dim 0.0 +178 3 loss.margin 1.723453481059046 +178 3 negative_sampler.num_negs_per_pos 47.0 +178 3 training.batch_size 2.0 +178 4 model.embedding_dim 2.0 +178 4 loss.margin 0.7646365866122069 +178 4 negative_sampler.num_negs_per_pos 94.0 +178 4 training.batch_size 0.0 +178 5 model.embedding_dim 1.0 +178 5 loss.margin 2.570774381003845 +178 5 negative_sampler.num_negs_per_pos 15.0 +178 5 training.batch_size 0.0 +178 6 model.embedding_dim 1.0 +178 6 loss.margin 2.6485981147984545 +178 6 negative_sampler.num_negs_per_pos 69.0 +178 6 training.batch_size 2.0 +178 7 model.embedding_dim 1.0 +178 7 loss.margin 8.698284075173106 +178 7 negative_sampler.num_negs_per_pos 45.0 +178 7 training.batch_size 0.0 +178 8 model.embedding_dim 0.0 +178 8 loss.margin 2.5980826990394976 +178 8 negative_sampler.num_negs_per_pos 87.0 +178 8 training.batch_size 1.0 +178 9 model.embedding_dim 0.0 +178 9 loss.margin 7.218854734568336 +178 9 negative_sampler.num_negs_per_pos 16.0 +178 9 training.batch_size 0.0 +178 10 model.embedding_dim 1.0 +178 10 loss.margin 1.8139979088928613 +178 10 negative_sampler.num_negs_per_pos 21.0 +178 10 training.batch_size 0.0 +178 11 model.embedding_dim 1.0 +178 11 loss.margin 3.1505397203017647 +178 11 negative_sampler.num_negs_per_pos 62.0 +178 11 training.batch_size 2.0 +178 12 model.embedding_dim 2.0 +178 12 loss.margin 8.713363883270974 +178 12 negative_sampler.num_negs_per_pos 7.0 +178 12 training.batch_size 2.0 +178 13 model.embedding_dim 1.0 +178 13 loss.margin 0.6943281440703097 +178 13 negative_sampler.num_negs_per_pos 97.0 +178 13 training.batch_size 0.0 +178 14 model.embedding_dim 1.0 +178 14 loss.margin 8.791942133071814 +178 14 negative_sampler.num_negs_per_pos 4.0 +178 14 training.batch_size 1.0 +178 15 model.embedding_dim 2.0 +178 15 loss.margin 5.37304413835877 +178 15 negative_sampler.num_negs_per_pos 22.0 +178 15 training.batch_size 0.0 +178 16 model.embedding_dim 0.0 +178 16 loss.margin 4.2783671126822265 +178 16 negative_sampler.num_negs_per_pos 8.0 +178 16 training.batch_size 1.0 +178 17 model.embedding_dim 1.0 +178 17 loss.margin 9.167016905572144 +178 17 negative_sampler.num_negs_per_pos 20.0 +178 17 training.batch_size 0.0 +178 18 model.embedding_dim 1.0 +178 18 loss.margin 3.4908775464277992 +178 18 negative_sampler.num_negs_per_pos 67.0 +178 18 training.batch_size 0.0 +178 19 model.embedding_dim 2.0 +178 19 loss.margin 5.878876264842847 +178 19 negative_sampler.num_negs_per_pos 64.0 +178 19 training.batch_size 0.0 +178 20 model.embedding_dim 0.0 +178 20 loss.margin 6.282657771363196 +178 20 negative_sampler.num_negs_per_pos 3.0 +178 20 training.batch_size 2.0 +178 21 model.embedding_dim 1.0 +178 21 loss.margin 9.031060505345398 +178 21 negative_sampler.num_negs_per_pos 6.0 +178 21 training.batch_size 2.0 +178 22 model.embedding_dim 2.0 +178 22 loss.margin 1.0506266103582491 +178 22 negative_sampler.num_negs_per_pos 76.0 +178 22 training.batch_size 1.0 +178 23 model.embedding_dim 2.0 +178 23 loss.margin 8.821824416543345 +178 23 negative_sampler.num_negs_per_pos 72.0 +178 23 training.batch_size 2.0 +178 24 model.embedding_dim 0.0 +178 24 loss.margin 2.62788522849769 +178 24 negative_sampler.num_negs_per_pos 69.0 +178 24 training.batch_size 0.0 +178 25 model.embedding_dim 2.0 +178 25 loss.margin 4.141767810562463 +178 25 negative_sampler.num_negs_per_pos 68.0 +178 25 training.batch_size 2.0 +178 26 model.embedding_dim 0.0 +178 26 loss.margin 2.595193843045015 +178 26 negative_sampler.num_negs_per_pos 34.0 +178 26 training.batch_size 2.0 +178 27 model.embedding_dim 2.0 +178 27 loss.margin 9.236919854345638 +178 27 negative_sampler.num_negs_per_pos 70.0 +178 27 training.batch_size 2.0 +178 28 model.embedding_dim 2.0 +178 28 loss.margin 0.8014470188737676 +178 28 negative_sampler.num_negs_per_pos 67.0 +178 28 training.batch_size 2.0 +178 29 model.embedding_dim 0.0 +178 29 loss.margin 1.5492696999392375 +178 29 negative_sampler.num_negs_per_pos 56.0 +178 29 training.batch_size 0.0 +178 30 model.embedding_dim 0.0 +178 30 loss.margin 4.6339040639140485 +178 30 negative_sampler.num_negs_per_pos 88.0 +178 30 training.batch_size 0.0 +178 31 model.embedding_dim 2.0 +178 31 loss.margin 3.53750939005466 +178 31 negative_sampler.num_negs_per_pos 73.0 +178 31 training.batch_size 0.0 +178 32 model.embedding_dim 2.0 +178 32 loss.margin 4.4143028490753 +178 32 negative_sampler.num_negs_per_pos 58.0 +178 32 training.batch_size 0.0 +178 33 model.embedding_dim 1.0 +178 33 loss.margin 3.6162671548068723 +178 33 negative_sampler.num_negs_per_pos 89.0 +178 33 training.batch_size 1.0 +178 34 model.embedding_dim 1.0 +178 34 loss.margin 6.413553022953994 +178 34 negative_sampler.num_negs_per_pos 83.0 +178 34 training.batch_size 1.0 +178 35 model.embedding_dim 1.0 +178 35 loss.margin 4.877654891647269 +178 35 negative_sampler.num_negs_per_pos 31.0 +178 35 training.batch_size 0.0 +178 36 model.embedding_dim 0.0 +178 36 loss.margin 3.945278441731035 +178 36 negative_sampler.num_negs_per_pos 6.0 +178 36 training.batch_size 1.0 +178 37 model.embedding_dim 1.0 +178 37 loss.margin 6.688658713038031 +178 37 negative_sampler.num_negs_per_pos 70.0 +178 37 training.batch_size 2.0 +178 38 model.embedding_dim 2.0 +178 38 loss.margin 5.8075270240613275 +178 38 negative_sampler.num_negs_per_pos 38.0 +178 38 training.batch_size 0.0 +178 39 model.embedding_dim 0.0 +178 39 loss.margin 9.3659328604693 +178 39 negative_sampler.num_negs_per_pos 68.0 +178 39 training.batch_size 2.0 +178 40 model.embedding_dim 2.0 +178 40 loss.margin 6.683037484629117 +178 40 negative_sampler.num_negs_per_pos 91.0 +178 40 training.batch_size 2.0 +178 41 model.embedding_dim 0.0 +178 41 loss.margin 4.695944029586557 +178 41 negative_sampler.num_negs_per_pos 26.0 +178 41 training.batch_size 1.0 +178 42 model.embedding_dim 0.0 +178 42 loss.margin 8.012872039160253 +178 42 negative_sampler.num_negs_per_pos 33.0 +178 42 training.batch_size 2.0 +178 43 model.embedding_dim 2.0 +178 43 loss.margin 5.703175993983651 +178 43 negative_sampler.num_negs_per_pos 22.0 +178 43 training.batch_size 2.0 +178 44 model.embedding_dim 1.0 +178 44 loss.margin 6.304008207985063 +178 44 negative_sampler.num_negs_per_pos 68.0 +178 44 training.batch_size 1.0 +178 45 model.embedding_dim 2.0 +178 45 loss.margin 2.1862036687232598 +178 45 negative_sampler.num_negs_per_pos 18.0 +178 45 training.batch_size 2.0 +178 46 model.embedding_dim 1.0 +178 46 loss.margin 2.5635985535325925 +178 46 negative_sampler.num_negs_per_pos 14.0 +178 46 training.batch_size 2.0 +178 47 model.embedding_dim 1.0 +178 47 loss.margin 8.051737479855623 +178 47 negative_sampler.num_negs_per_pos 75.0 +178 47 training.batch_size 0.0 +178 48 model.embedding_dim 0.0 +178 48 loss.margin 2.091455888674442 +178 48 negative_sampler.num_negs_per_pos 79.0 +178 48 training.batch_size 1.0 +178 49 model.embedding_dim 1.0 +178 49 loss.margin 0.953949908176263 +178 49 negative_sampler.num_negs_per_pos 70.0 +178 49 training.batch_size 1.0 +178 50 model.embedding_dim 1.0 +178 50 loss.margin 2.8815708531542814 +178 50 negative_sampler.num_negs_per_pos 12.0 +178 50 training.batch_size 2.0 +178 51 model.embedding_dim 0.0 +178 51 loss.margin 8.199322214105013 +178 51 negative_sampler.num_negs_per_pos 28.0 +178 51 training.batch_size 2.0 +178 52 model.embedding_dim 1.0 +178 52 loss.margin 5.041789135110158 +178 52 negative_sampler.num_negs_per_pos 3.0 +178 52 training.batch_size 0.0 +178 53 model.embedding_dim 1.0 +178 53 loss.margin 5.154767234498932 +178 53 negative_sampler.num_negs_per_pos 8.0 +178 53 training.batch_size 0.0 +178 54 model.embedding_dim 0.0 +178 54 loss.margin 2.645559664336555 +178 54 negative_sampler.num_negs_per_pos 34.0 +178 54 training.batch_size 0.0 +178 55 model.embedding_dim 2.0 +178 55 loss.margin 6.25863357452682 +178 55 negative_sampler.num_negs_per_pos 4.0 +178 55 training.batch_size 1.0 +178 56 model.embedding_dim 2.0 +178 56 loss.margin 8.496405493891604 +178 56 negative_sampler.num_negs_per_pos 90.0 +178 56 training.batch_size 0.0 +178 57 model.embedding_dim 2.0 +178 57 loss.margin 7.5640888292953825 +178 57 negative_sampler.num_negs_per_pos 89.0 +178 57 training.batch_size 0.0 +178 58 model.embedding_dim 2.0 +178 58 loss.margin 8.090545152274546 +178 58 negative_sampler.num_negs_per_pos 99.0 +178 58 training.batch_size 0.0 +178 59 model.embedding_dim 1.0 +178 59 loss.margin 5.872854694718155 +178 59 negative_sampler.num_negs_per_pos 15.0 +178 59 training.batch_size 0.0 +178 60 model.embedding_dim 1.0 +178 60 loss.margin 3.559842703976318 +178 60 negative_sampler.num_negs_per_pos 1.0 +178 60 training.batch_size 2.0 +178 61 model.embedding_dim 1.0 +178 61 loss.margin 9.503789186218988 +178 61 negative_sampler.num_negs_per_pos 73.0 +178 61 training.batch_size 0.0 +178 62 model.embedding_dim 2.0 +178 62 loss.margin 5.099433926594188 +178 62 negative_sampler.num_negs_per_pos 21.0 +178 62 training.batch_size 1.0 +178 63 model.embedding_dim 2.0 +178 63 loss.margin 4.456753228329962 +178 63 negative_sampler.num_negs_per_pos 67.0 +178 63 training.batch_size 2.0 +178 64 model.embedding_dim 2.0 +178 64 loss.margin 2.1205352858278563 +178 64 negative_sampler.num_negs_per_pos 0.0 +178 64 training.batch_size 1.0 +178 65 model.embedding_dim 0.0 +178 65 loss.margin 3.211879363975158 +178 65 negative_sampler.num_negs_per_pos 28.0 +178 65 training.batch_size 0.0 +178 66 model.embedding_dim 0.0 +178 66 loss.margin 9.102578484293865 +178 66 negative_sampler.num_negs_per_pos 59.0 +178 66 training.batch_size 2.0 +178 67 model.embedding_dim 1.0 +178 67 loss.margin 4.15843149359543 +178 67 negative_sampler.num_negs_per_pos 11.0 +178 67 training.batch_size 1.0 +178 68 model.embedding_dim 0.0 +178 68 loss.margin 7.5847226078601375 +178 68 negative_sampler.num_negs_per_pos 2.0 +178 68 training.batch_size 0.0 +178 69 model.embedding_dim 1.0 +178 69 loss.margin 3.415763418158564 +178 69 negative_sampler.num_negs_per_pos 24.0 +178 69 training.batch_size 0.0 +178 70 model.embedding_dim 1.0 +178 70 loss.margin 1.7873567121618765 +178 70 negative_sampler.num_negs_per_pos 55.0 +178 70 training.batch_size 0.0 +178 71 model.embedding_dim 2.0 +178 71 loss.margin 0.9938281444324202 +178 71 negative_sampler.num_negs_per_pos 66.0 +178 71 training.batch_size 1.0 +178 72 model.embedding_dim 1.0 +178 72 loss.margin 8.547940745308644 +178 72 negative_sampler.num_negs_per_pos 11.0 +178 72 training.batch_size 1.0 +178 73 model.embedding_dim 0.0 +178 73 loss.margin 6.436985526943126 +178 73 negative_sampler.num_negs_per_pos 88.0 +178 73 training.batch_size 1.0 +178 74 model.embedding_dim 0.0 +178 74 loss.margin 4.186859961255924 +178 74 negative_sampler.num_negs_per_pos 21.0 +178 74 training.batch_size 1.0 +178 75 model.embedding_dim 1.0 +178 75 loss.margin 5.142313295727941 +178 75 negative_sampler.num_negs_per_pos 79.0 +178 75 training.batch_size 1.0 +178 76 model.embedding_dim 1.0 +178 76 loss.margin 6.614927668439277 +178 76 negative_sampler.num_negs_per_pos 14.0 +178 76 training.batch_size 0.0 +178 77 model.embedding_dim 0.0 +178 77 loss.margin 9.17873744622332 +178 77 negative_sampler.num_negs_per_pos 31.0 +178 77 training.batch_size 0.0 +178 78 model.embedding_dim 0.0 +178 78 loss.margin 5.58753056667602 +178 78 negative_sampler.num_negs_per_pos 60.0 +178 78 training.batch_size 0.0 +178 79 model.embedding_dim 0.0 +178 79 loss.margin 4.657000513236435 +178 79 negative_sampler.num_negs_per_pos 81.0 +178 79 training.batch_size 1.0 +178 80 model.embedding_dim 1.0 +178 80 loss.margin 7.455901367282141 +178 80 negative_sampler.num_negs_per_pos 84.0 +178 80 training.batch_size 2.0 +178 81 model.embedding_dim 0.0 +178 81 loss.margin 7.95674493453999 +178 81 negative_sampler.num_negs_per_pos 78.0 +178 81 training.batch_size 0.0 +178 82 model.embedding_dim 1.0 +178 82 loss.margin 1.5930908228948477 +178 82 negative_sampler.num_negs_per_pos 45.0 +178 82 training.batch_size 1.0 +178 83 model.embedding_dim 0.0 +178 83 loss.margin 8.674324571521348 +178 83 negative_sampler.num_negs_per_pos 12.0 +178 83 training.batch_size 1.0 +178 84 model.embedding_dim 2.0 +178 84 loss.margin 2.0365877692263794 +178 84 negative_sampler.num_negs_per_pos 34.0 +178 84 training.batch_size 0.0 +178 85 model.embedding_dim 0.0 +178 85 loss.margin 7.480216266206761 +178 85 negative_sampler.num_negs_per_pos 13.0 +178 85 training.batch_size 2.0 +178 86 model.embedding_dim 2.0 +178 86 loss.margin 1.8258481710631351 +178 86 negative_sampler.num_negs_per_pos 65.0 +178 86 training.batch_size 1.0 +178 87 model.embedding_dim 2.0 +178 87 loss.margin 0.656191703440502 +178 87 negative_sampler.num_negs_per_pos 16.0 +178 87 training.batch_size 2.0 +178 88 model.embedding_dim 1.0 +178 88 loss.margin 2.0783769504339658 +178 88 negative_sampler.num_negs_per_pos 98.0 +178 88 training.batch_size 2.0 +178 89 model.embedding_dim 2.0 +178 89 loss.margin 3.060981707052932 +178 89 negative_sampler.num_negs_per_pos 36.0 +178 89 training.batch_size 0.0 +178 90 model.embedding_dim 2.0 +178 90 loss.margin 0.8050838583976276 +178 90 negative_sampler.num_negs_per_pos 70.0 +178 90 training.batch_size 0.0 +178 91 model.embedding_dim 2.0 +178 91 loss.margin 1.3981248849680987 +178 91 negative_sampler.num_negs_per_pos 72.0 +178 91 training.batch_size 2.0 +178 92 model.embedding_dim 2.0 +178 92 loss.margin 5.033780145077325 +178 92 negative_sampler.num_negs_per_pos 2.0 +178 92 training.batch_size 2.0 +178 93 model.embedding_dim 2.0 +178 93 loss.margin 6.259071212450965 +178 93 negative_sampler.num_negs_per_pos 48.0 +178 93 training.batch_size 1.0 +178 94 model.embedding_dim 2.0 +178 94 loss.margin 3.020484961719591 +178 94 negative_sampler.num_negs_per_pos 11.0 +178 94 training.batch_size 0.0 +178 95 model.embedding_dim 0.0 +178 95 loss.margin 9.390259963634335 +178 95 negative_sampler.num_negs_per_pos 59.0 +178 95 training.batch_size 0.0 +178 96 model.embedding_dim 2.0 +178 96 loss.margin 9.82856122386446 +178 96 negative_sampler.num_negs_per_pos 80.0 +178 96 training.batch_size 0.0 +178 97 model.embedding_dim 0.0 +178 97 loss.margin 4.820249363383465 +178 97 negative_sampler.num_negs_per_pos 55.0 +178 97 training.batch_size 1.0 +178 98 model.embedding_dim 0.0 +178 98 loss.margin 1.0159315325371365 +178 98 negative_sampler.num_negs_per_pos 71.0 +178 98 training.batch_size 0.0 +178 99 model.embedding_dim 2.0 +178 99 loss.margin 5.429858809611899 +178 99 negative_sampler.num_negs_per_pos 28.0 +178 99 training.batch_size 2.0 +178 100 model.embedding_dim 1.0 +178 100 loss.margin 8.042316555622373 +178 100 negative_sampler.num_negs_per_pos 81.0 +178 100 training.batch_size 2.0 +178 1 dataset """kinships""" +178 1 model """distmult""" +178 1 loss """marginranking""" +178 1 regularizer """no""" +178 1 optimizer """adadelta""" +178 1 training_loop """owa""" +178 1 negative_sampler """basic""" +178 1 evaluator """rankbased""" +178 2 dataset """kinships""" +178 2 model """distmult""" +178 2 loss """marginranking""" +178 2 regularizer """no""" +178 2 optimizer """adadelta""" +178 2 training_loop """owa""" +178 2 negative_sampler """basic""" +178 2 evaluator """rankbased""" +178 3 dataset """kinships""" +178 3 model """distmult""" +178 3 loss """marginranking""" +178 3 regularizer """no""" +178 3 optimizer """adadelta""" +178 3 training_loop """owa""" +178 3 negative_sampler """basic""" +178 3 evaluator """rankbased""" +178 4 dataset """kinships""" +178 4 model """distmult""" +178 4 loss """marginranking""" +178 4 regularizer """no""" +178 4 optimizer """adadelta""" +178 4 training_loop """owa""" +178 4 negative_sampler """basic""" +178 4 evaluator """rankbased""" +178 5 dataset """kinships""" +178 5 model """distmult""" +178 5 loss """marginranking""" +178 5 regularizer """no""" +178 5 optimizer """adadelta""" +178 5 training_loop """owa""" +178 5 negative_sampler """basic""" +178 5 evaluator """rankbased""" +178 6 dataset """kinships""" +178 6 model """distmult""" +178 6 loss """marginranking""" +178 6 regularizer """no""" +178 6 optimizer """adadelta""" +178 6 training_loop """owa""" +178 6 negative_sampler """basic""" +178 6 evaluator """rankbased""" +178 7 dataset """kinships""" +178 7 model """distmult""" +178 7 loss """marginranking""" +178 7 regularizer """no""" +178 7 optimizer """adadelta""" +178 7 training_loop """owa""" +178 7 negative_sampler """basic""" +178 7 evaluator """rankbased""" +178 8 dataset """kinships""" +178 8 model """distmult""" +178 8 loss """marginranking""" +178 8 regularizer """no""" +178 8 optimizer """adadelta""" +178 8 training_loop """owa""" +178 8 negative_sampler """basic""" +178 8 evaluator """rankbased""" +178 9 dataset """kinships""" +178 9 model """distmult""" +178 9 loss """marginranking""" +178 9 regularizer """no""" +178 9 optimizer """adadelta""" +178 9 training_loop """owa""" +178 9 negative_sampler """basic""" +178 9 evaluator """rankbased""" +178 10 dataset """kinships""" +178 10 model """distmult""" +178 10 loss """marginranking""" +178 10 regularizer """no""" +178 10 optimizer """adadelta""" +178 10 training_loop """owa""" +178 10 negative_sampler """basic""" +178 10 evaluator """rankbased""" +178 11 dataset """kinships""" +178 11 model """distmult""" +178 11 loss """marginranking""" +178 11 regularizer """no""" +178 11 optimizer """adadelta""" +178 11 training_loop """owa""" +178 11 negative_sampler """basic""" +178 11 evaluator """rankbased""" +178 12 dataset """kinships""" +178 12 model """distmult""" +178 12 loss """marginranking""" +178 12 regularizer """no""" +178 12 optimizer """adadelta""" +178 12 training_loop """owa""" +178 12 negative_sampler """basic""" +178 12 evaluator """rankbased""" +178 13 dataset """kinships""" +178 13 model """distmult""" +178 13 loss """marginranking""" +178 13 regularizer """no""" +178 13 optimizer """adadelta""" +178 13 training_loop """owa""" +178 13 negative_sampler """basic""" +178 13 evaluator """rankbased""" +178 14 dataset """kinships""" +178 14 model """distmult""" +178 14 loss """marginranking""" +178 14 regularizer """no""" +178 14 optimizer """adadelta""" +178 14 training_loop """owa""" +178 14 negative_sampler """basic""" +178 14 evaluator """rankbased""" +178 15 dataset """kinships""" +178 15 model """distmult""" +178 15 loss """marginranking""" +178 15 regularizer """no""" +178 15 optimizer """adadelta""" +178 15 training_loop """owa""" +178 15 negative_sampler """basic""" +178 15 evaluator """rankbased""" +178 16 dataset """kinships""" +178 16 model """distmult""" +178 16 loss """marginranking""" +178 16 regularizer """no""" +178 16 optimizer """adadelta""" +178 16 training_loop """owa""" +178 16 negative_sampler """basic""" +178 16 evaluator """rankbased""" +178 17 dataset """kinships""" +178 17 model """distmult""" +178 17 loss """marginranking""" +178 17 regularizer """no""" +178 17 optimizer """adadelta""" +178 17 training_loop """owa""" +178 17 negative_sampler """basic""" +178 17 evaluator """rankbased""" +178 18 dataset """kinships""" +178 18 model """distmult""" +178 18 loss """marginranking""" +178 18 regularizer """no""" +178 18 optimizer """adadelta""" +178 18 training_loop """owa""" +178 18 negative_sampler """basic""" +178 18 evaluator """rankbased""" +178 19 dataset """kinships""" +178 19 model """distmult""" +178 19 loss """marginranking""" +178 19 regularizer """no""" +178 19 optimizer """adadelta""" +178 19 training_loop """owa""" +178 19 negative_sampler """basic""" +178 19 evaluator """rankbased""" +178 20 dataset """kinships""" +178 20 model """distmult""" +178 20 loss """marginranking""" +178 20 regularizer """no""" +178 20 optimizer """adadelta""" +178 20 training_loop """owa""" +178 20 negative_sampler """basic""" +178 20 evaluator """rankbased""" +178 21 dataset """kinships""" +178 21 model """distmult""" +178 21 loss """marginranking""" +178 21 regularizer """no""" +178 21 optimizer """adadelta""" +178 21 training_loop """owa""" +178 21 negative_sampler """basic""" +178 21 evaluator """rankbased""" +178 22 dataset """kinships""" +178 22 model """distmult""" +178 22 loss """marginranking""" +178 22 regularizer """no""" +178 22 optimizer """adadelta""" +178 22 training_loop """owa""" +178 22 negative_sampler """basic""" +178 22 evaluator """rankbased""" +178 23 dataset """kinships""" +178 23 model """distmult""" +178 23 loss """marginranking""" +178 23 regularizer """no""" +178 23 optimizer """adadelta""" +178 23 training_loop """owa""" +178 23 negative_sampler """basic""" +178 23 evaluator """rankbased""" +178 24 dataset """kinships""" +178 24 model """distmult""" +178 24 loss """marginranking""" +178 24 regularizer """no""" +178 24 optimizer """adadelta""" +178 24 training_loop """owa""" +178 24 negative_sampler """basic""" +178 24 evaluator """rankbased""" +178 25 dataset """kinships""" +178 25 model """distmult""" +178 25 loss """marginranking""" +178 25 regularizer """no""" +178 25 optimizer """adadelta""" +178 25 training_loop """owa""" +178 25 negative_sampler """basic""" +178 25 evaluator """rankbased""" +178 26 dataset """kinships""" +178 26 model """distmult""" +178 26 loss """marginranking""" +178 26 regularizer """no""" +178 26 optimizer """adadelta""" +178 26 training_loop """owa""" +178 26 negative_sampler """basic""" +178 26 evaluator """rankbased""" +178 27 dataset """kinships""" +178 27 model """distmult""" +178 27 loss """marginranking""" +178 27 regularizer """no""" +178 27 optimizer """adadelta""" +178 27 training_loop """owa""" +178 27 negative_sampler """basic""" +178 27 evaluator """rankbased""" +178 28 dataset """kinships""" +178 28 model """distmult""" +178 28 loss """marginranking""" +178 28 regularizer """no""" +178 28 optimizer """adadelta""" +178 28 training_loop """owa""" +178 28 negative_sampler """basic""" +178 28 evaluator """rankbased""" +178 29 dataset """kinships""" +178 29 model """distmult""" +178 29 loss """marginranking""" +178 29 regularizer """no""" +178 29 optimizer """adadelta""" +178 29 training_loop """owa""" +178 29 negative_sampler """basic""" +178 29 evaluator """rankbased""" +178 30 dataset """kinships""" +178 30 model """distmult""" +178 30 loss """marginranking""" +178 30 regularizer """no""" +178 30 optimizer """adadelta""" +178 30 training_loop """owa""" +178 30 negative_sampler """basic""" +178 30 evaluator """rankbased""" +178 31 dataset """kinships""" +178 31 model """distmult""" +178 31 loss """marginranking""" +178 31 regularizer """no""" +178 31 optimizer """adadelta""" +178 31 training_loop """owa""" +178 31 negative_sampler """basic""" +178 31 evaluator """rankbased""" +178 32 dataset """kinships""" +178 32 model """distmult""" +178 32 loss """marginranking""" +178 32 regularizer """no""" +178 32 optimizer """adadelta""" +178 32 training_loop """owa""" +178 32 negative_sampler """basic""" +178 32 evaluator """rankbased""" +178 33 dataset """kinships""" +178 33 model """distmult""" +178 33 loss """marginranking""" +178 33 regularizer """no""" +178 33 optimizer """adadelta""" +178 33 training_loop """owa""" +178 33 negative_sampler """basic""" +178 33 evaluator """rankbased""" +178 34 dataset """kinships""" +178 34 model """distmult""" +178 34 loss """marginranking""" +178 34 regularizer """no""" +178 34 optimizer """adadelta""" +178 34 training_loop """owa""" +178 34 negative_sampler """basic""" +178 34 evaluator """rankbased""" +178 35 dataset """kinships""" +178 35 model """distmult""" +178 35 loss """marginranking""" +178 35 regularizer """no""" +178 35 optimizer """adadelta""" +178 35 training_loop """owa""" +178 35 negative_sampler """basic""" +178 35 evaluator """rankbased""" +178 36 dataset """kinships""" +178 36 model """distmult""" +178 36 loss """marginranking""" +178 36 regularizer """no""" +178 36 optimizer """adadelta""" +178 36 training_loop """owa""" +178 36 negative_sampler """basic""" +178 36 evaluator """rankbased""" +178 37 dataset """kinships""" +178 37 model """distmult""" +178 37 loss """marginranking""" +178 37 regularizer """no""" +178 37 optimizer """adadelta""" +178 37 training_loop """owa""" +178 37 negative_sampler """basic""" +178 37 evaluator """rankbased""" +178 38 dataset """kinships""" +178 38 model """distmult""" +178 38 loss """marginranking""" +178 38 regularizer """no""" +178 38 optimizer """adadelta""" +178 38 training_loop """owa""" +178 38 negative_sampler """basic""" +178 38 evaluator """rankbased""" +178 39 dataset """kinships""" +178 39 model """distmult""" +178 39 loss """marginranking""" +178 39 regularizer """no""" +178 39 optimizer """adadelta""" +178 39 training_loop """owa""" +178 39 negative_sampler """basic""" +178 39 evaluator """rankbased""" +178 40 dataset """kinships""" +178 40 model """distmult""" +178 40 loss """marginranking""" +178 40 regularizer """no""" +178 40 optimizer """adadelta""" +178 40 training_loop """owa""" +178 40 negative_sampler """basic""" +178 40 evaluator """rankbased""" +178 41 dataset """kinships""" +178 41 model """distmult""" +178 41 loss """marginranking""" +178 41 regularizer """no""" +178 41 optimizer """adadelta""" +178 41 training_loop """owa""" +178 41 negative_sampler """basic""" +178 41 evaluator """rankbased""" +178 42 dataset """kinships""" +178 42 model """distmult""" +178 42 loss """marginranking""" +178 42 regularizer """no""" +178 42 optimizer """adadelta""" +178 42 training_loop """owa""" +178 42 negative_sampler """basic""" +178 42 evaluator """rankbased""" +178 43 dataset """kinships""" +178 43 model """distmult""" +178 43 loss """marginranking""" +178 43 regularizer """no""" +178 43 optimizer """adadelta""" +178 43 training_loop """owa""" +178 43 negative_sampler """basic""" +178 43 evaluator """rankbased""" +178 44 dataset """kinships""" +178 44 model """distmult""" +178 44 loss """marginranking""" +178 44 regularizer """no""" +178 44 optimizer """adadelta""" +178 44 training_loop """owa""" +178 44 negative_sampler """basic""" +178 44 evaluator """rankbased""" +178 45 dataset """kinships""" +178 45 model """distmult""" +178 45 loss """marginranking""" +178 45 regularizer """no""" +178 45 optimizer """adadelta""" +178 45 training_loop """owa""" +178 45 negative_sampler """basic""" +178 45 evaluator """rankbased""" +178 46 dataset """kinships""" +178 46 model """distmult""" +178 46 loss """marginranking""" +178 46 regularizer """no""" +178 46 optimizer """adadelta""" +178 46 training_loop """owa""" +178 46 negative_sampler """basic""" +178 46 evaluator """rankbased""" +178 47 dataset """kinships""" +178 47 model """distmult""" +178 47 loss """marginranking""" +178 47 regularizer """no""" +178 47 optimizer """adadelta""" +178 47 training_loop """owa""" +178 47 negative_sampler """basic""" +178 47 evaluator """rankbased""" +178 48 dataset """kinships""" +178 48 model """distmult""" +178 48 loss """marginranking""" +178 48 regularizer """no""" +178 48 optimizer """adadelta""" +178 48 training_loop """owa""" +178 48 negative_sampler """basic""" +178 48 evaluator """rankbased""" +178 49 dataset """kinships""" +178 49 model """distmult""" +178 49 loss """marginranking""" +178 49 regularizer """no""" +178 49 optimizer """adadelta""" +178 49 training_loop """owa""" +178 49 negative_sampler """basic""" +178 49 evaluator """rankbased""" +178 50 dataset """kinships""" +178 50 model """distmult""" +178 50 loss """marginranking""" +178 50 regularizer """no""" +178 50 optimizer """adadelta""" +178 50 training_loop """owa""" +178 50 negative_sampler """basic""" +178 50 evaluator """rankbased""" +178 51 dataset """kinships""" +178 51 model """distmult""" +178 51 loss """marginranking""" +178 51 regularizer """no""" +178 51 optimizer """adadelta""" +178 51 training_loop """owa""" +178 51 negative_sampler """basic""" +178 51 evaluator """rankbased""" +178 52 dataset """kinships""" +178 52 model """distmult""" +178 52 loss """marginranking""" +178 52 regularizer """no""" +178 52 optimizer """adadelta""" +178 52 training_loop """owa""" +178 52 negative_sampler """basic""" +178 52 evaluator """rankbased""" +178 53 dataset """kinships""" +178 53 model """distmult""" +178 53 loss """marginranking""" +178 53 regularizer """no""" +178 53 optimizer """adadelta""" +178 53 training_loop """owa""" +178 53 negative_sampler """basic""" +178 53 evaluator """rankbased""" +178 54 dataset """kinships""" +178 54 model """distmult""" +178 54 loss """marginranking""" +178 54 regularizer """no""" +178 54 optimizer """adadelta""" +178 54 training_loop """owa""" +178 54 negative_sampler """basic""" +178 54 evaluator """rankbased""" +178 55 dataset """kinships""" +178 55 model """distmult""" +178 55 loss """marginranking""" +178 55 regularizer """no""" +178 55 optimizer """adadelta""" +178 55 training_loop """owa""" +178 55 negative_sampler """basic""" +178 55 evaluator """rankbased""" +178 56 dataset """kinships""" +178 56 model """distmult""" +178 56 loss """marginranking""" +178 56 regularizer """no""" +178 56 optimizer """adadelta""" +178 56 training_loop """owa""" +178 56 negative_sampler """basic""" +178 56 evaluator """rankbased""" +178 57 dataset """kinships""" +178 57 model """distmult""" +178 57 loss """marginranking""" +178 57 regularizer """no""" +178 57 optimizer """adadelta""" +178 57 training_loop """owa""" +178 57 negative_sampler """basic""" +178 57 evaluator """rankbased""" +178 58 dataset """kinships""" +178 58 model """distmult""" +178 58 loss """marginranking""" +178 58 regularizer """no""" +178 58 optimizer """adadelta""" +178 58 training_loop """owa""" +178 58 negative_sampler """basic""" +178 58 evaluator """rankbased""" +178 59 dataset """kinships""" +178 59 model """distmult""" +178 59 loss """marginranking""" +178 59 regularizer """no""" +178 59 optimizer """adadelta""" +178 59 training_loop """owa""" +178 59 negative_sampler """basic""" +178 59 evaluator """rankbased""" +178 60 dataset """kinships""" +178 60 model """distmult""" +178 60 loss """marginranking""" +178 60 regularizer """no""" +178 60 optimizer """adadelta""" +178 60 training_loop """owa""" +178 60 negative_sampler """basic""" +178 60 evaluator """rankbased""" +178 61 dataset """kinships""" +178 61 model """distmult""" +178 61 loss """marginranking""" +178 61 regularizer """no""" +178 61 optimizer """adadelta""" +178 61 training_loop """owa""" +178 61 negative_sampler """basic""" +178 61 evaluator """rankbased""" +178 62 dataset """kinships""" +178 62 model """distmult""" +178 62 loss """marginranking""" +178 62 regularizer """no""" +178 62 optimizer """adadelta""" +178 62 training_loop """owa""" +178 62 negative_sampler """basic""" +178 62 evaluator """rankbased""" +178 63 dataset """kinships""" +178 63 model """distmult""" +178 63 loss """marginranking""" +178 63 regularizer """no""" +178 63 optimizer """adadelta""" +178 63 training_loop """owa""" +178 63 negative_sampler """basic""" +178 63 evaluator """rankbased""" +178 64 dataset """kinships""" +178 64 model """distmult""" +178 64 loss """marginranking""" +178 64 regularizer """no""" +178 64 optimizer """adadelta""" +178 64 training_loop """owa""" +178 64 negative_sampler """basic""" +178 64 evaluator """rankbased""" +178 65 dataset """kinships""" +178 65 model """distmult""" +178 65 loss """marginranking""" +178 65 regularizer """no""" +178 65 optimizer """adadelta""" +178 65 training_loop """owa""" +178 65 negative_sampler """basic""" +178 65 evaluator """rankbased""" +178 66 dataset """kinships""" +178 66 model """distmult""" +178 66 loss """marginranking""" +178 66 regularizer """no""" +178 66 optimizer """adadelta""" +178 66 training_loop """owa""" +178 66 negative_sampler """basic""" +178 66 evaluator """rankbased""" +178 67 dataset """kinships""" +178 67 model """distmult""" +178 67 loss """marginranking""" +178 67 regularizer """no""" +178 67 optimizer """adadelta""" +178 67 training_loop """owa""" +178 67 negative_sampler """basic""" +178 67 evaluator """rankbased""" +178 68 dataset """kinships""" +178 68 model """distmult""" +178 68 loss """marginranking""" +178 68 regularizer """no""" +178 68 optimizer """adadelta""" +178 68 training_loop """owa""" +178 68 negative_sampler """basic""" +178 68 evaluator """rankbased""" +178 69 dataset """kinships""" +178 69 model """distmult""" +178 69 loss """marginranking""" +178 69 regularizer """no""" +178 69 optimizer """adadelta""" +178 69 training_loop """owa""" +178 69 negative_sampler """basic""" +178 69 evaluator """rankbased""" +178 70 dataset """kinships""" +178 70 model """distmult""" +178 70 loss """marginranking""" +178 70 regularizer """no""" +178 70 optimizer """adadelta""" +178 70 training_loop """owa""" +178 70 negative_sampler """basic""" +178 70 evaluator """rankbased""" +178 71 dataset """kinships""" +178 71 model """distmult""" +178 71 loss """marginranking""" +178 71 regularizer """no""" +178 71 optimizer """adadelta""" +178 71 training_loop """owa""" +178 71 negative_sampler """basic""" +178 71 evaluator """rankbased""" +178 72 dataset """kinships""" +178 72 model """distmult""" +178 72 loss """marginranking""" +178 72 regularizer """no""" +178 72 optimizer """adadelta""" +178 72 training_loop """owa""" +178 72 negative_sampler """basic""" +178 72 evaluator """rankbased""" +178 73 dataset """kinships""" +178 73 model """distmult""" +178 73 loss """marginranking""" +178 73 regularizer """no""" +178 73 optimizer """adadelta""" +178 73 training_loop """owa""" +178 73 negative_sampler """basic""" +178 73 evaluator """rankbased""" +178 74 dataset """kinships""" +178 74 model """distmult""" +178 74 loss """marginranking""" +178 74 regularizer """no""" +178 74 optimizer """adadelta""" +178 74 training_loop """owa""" +178 74 negative_sampler """basic""" +178 74 evaluator """rankbased""" +178 75 dataset """kinships""" +178 75 model """distmult""" +178 75 loss """marginranking""" +178 75 regularizer """no""" +178 75 optimizer """adadelta""" +178 75 training_loop """owa""" +178 75 negative_sampler """basic""" +178 75 evaluator """rankbased""" +178 76 dataset """kinships""" +178 76 model """distmult""" +178 76 loss """marginranking""" +178 76 regularizer """no""" +178 76 optimizer """adadelta""" +178 76 training_loop """owa""" +178 76 negative_sampler """basic""" +178 76 evaluator """rankbased""" +178 77 dataset """kinships""" +178 77 model """distmult""" +178 77 loss """marginranking""" +178 77 regularizer """no""" +178 77 optimizer """adadelta""" +178 77 training_loop """owa""" +178 77 negative_sampler """basic""" +178 77 evaluator """rankbased""" +178 78 dataset """kinships""" +178 78 model """distmult""" +178 78 loss """marginranking""" +178 78 regularizer """no""" +178 78 optimizer """adadelta""" +178 78 training_loop """owa""" +178 78 negative_sampler """basic""" +178 78 evaluator """rankbased""" +178 79 dataset """kinships""" +178 79 model """distmult""" +178 79 loss """marginranking""" +178 79 regularizer """no""" +178 79 optimizer """adadelta""" +178 79 training_loop """owa""" +178 79 negative_sampler """basic""" +178 79 evaluator """rankbased""" +178 80 dataset """kinships""" +178 80 model """distmult""" +178 80 loss """marginranking""" +178 80 regularizer """no""" +178 80 optimizer """adadelta""" +178 80 training_loop """owa""" +178 80 negative_sampler """basic""" +178 80 evaluator """rankbased""" +178 81 dataset """kinships""" +178 81 model """distmult""" +178 81 loss """marginranking""" +178 81 regularizer """no""" +178 81 optimizer """adadelta""" +178 81 training_loop """owa""" +178 81 negative_sampler """basic""" +178 81 evaluator """rankbased""" +178 82 dataset """kinships""" +178 82 model """distmult""" +178 82 loss """marginranking""" +178 82 regularizer """no""" +178 82 optimizer """adadelta""" +178 82 training_loop """owa""" +178 82 negative_sampler """basic""" +178 82 evaluator """rankbased""" +178 83 dataset """kinships""" +178 83 model """distmult""" +178 83 loss """marginranking""" +178 83 regularizer """no""" +178 83 optimizer """adadelta""" +178 83 training_loop """owa""" +178 83 negative_sampler """basic""" +178 83 evaluator """rankbased""" +178 84 dataset """kinships""" +178 84 model """distmult""" +178 84 loss """marginranking""" +178 84 regularizer """no""" +178 84 optimizer """adadelta""" +178 84 training_loop """owa""" +178 84 negative_sampler """basic""" +178 84 evaluator """rankbased""" +178 85 dataset """kinships""" +178 85 model """distmult""" +178 85 loss """marginranking""" +178 85 regularizer """no""" +178 85 optimizer """adadelta""" +178 85 training_loop """owa""" +178 85 negative_sampler """basic""" +178 85 evaluator """rankbased""" +178 86 dataset """kinships""" +178 86 model """distmult""" +178 86 loss """marginranking""" +178 86 regularizer """no""" +178 86 optimizer """adadelta""" +178 86 training_loop """owa""" +178 86 negative_sampler """basic""" +178 86 evaluator """rankbased""" +178 87 dataset """kinships""" +178 87 model """distmult""" +178 87 loss """marginranking""" +178 87 regularizer """no""" +178 87 optimizer """adadelta""" +178 87 training_loop """owa""" +178 87 negative_sampler """basic""" +178 87 evaluator """rankbased""" +178 88 dataset """kinships""" +178 88 model """distmult""" +178 88 loss """marginranking""" +178 88 regularizer """no""" +178 88 optimizer """adadelta""" +178 88 training_loop """owa""" +178 88 negative_sampler """basic""" +178 88 evaluator """rankbased""" +178 89 dataset """kinships""" +178 89 model """distmult""" +178 89 loss """marginranking""" +178 89 regularizer """no""" +178 89 optimizer """adadelta""" +178 89 training_loop """owa""" +178 89 negative_sampler """basic""" +178 89 evaluator """rankbased""" +178 90 dataset """kinships""" +178 90 model """distmult""" +178 90 loss """marginranking""" +178 90 regularizer """no""" +178 90 optimizer """adadelta""" +178 90 training_loop """owa""" +178 90 negative_sampler """basic""" +178 90 evaluator """rankbased""" +178 91 dataset """kinships""" +178 91 model """distmult""" +178 91 loss """marginranking""" +178 91 regularizer """no""" +178 91 optimizer """adadelta""" +178 91 training_loop """owa""" +178 91 negative_sampler """basic""" +178 91 evaluator """rankbased""" +178 92 dataset """kinships""" +178 92 model """distmult""" +178 92 loss """marginranking""" +178 92 regularizer """no""" +178 92 optimizer """adadelta""" +178 92 training_loop """owa""" +178 92 negative_sampler """basic""" +178 92 evaluator """rankbased""" +178 93 dataset """kinships""" +178 93 model """distmult""" +178 93 loss """marginranking""" +178 93 regularizer """no""" +178 93 optimizer """adadelta""" +178 93 training_loop """owa""" +178 93 negative_sampler """basic""" +178 93 evaluator """rankbased""" +178 94 dataset """kinships""" +178 94 model """distmult""" +178 94 loss """marginranking""" +178 94 regularizer """no""" +178 94 optimizer """adadelta""" +178 94 training_loop """owa""" +178 94 negative_sampler """basic""" +178 94 evaluator """rankbased""" +178 95 dataset """kinships""" +178 95 model """distmult""" +178 95 loss """marginranking""" +178 95 regularizer """no""" +178 95 optimizer """adadelta""" +178 95 training_loop """owa""" +178 95 negative_sampler """basic""" +178 95 evaluator """rankbased""" +178 96 dataset """kinships""" +178 96 model """distmult""" +178 96 loss """marginranking""" +178 96 regularizer """no""" +178 96 optimizer """adadelta""" +178 96 training_loop """owa""" +178 96 negative_sampler """basic""" +178 96 evaluator """rankbased""" +178 97 dataset """kinships""" +178 97 model """distmult""" +178 97 loss """marginranking""" +178 97 regularizer """no""" +178 97 optimizer """adadelta""" +178 97 training_loop """owa""" +178 97 negative_sampler """basic""" +178 97 evaluator """rankbased""" +178 98 dataset """kinships""" +178 98 model """distmult""" +178 98 loss """marginranking""" +178 98 regularizer """no""" +178 98 optimizer """adadelta""" +178 98 training_loop """owa""" +178 98 negative_sampler """basic""" +178 98 evaluator """rankbased""" +178 99 dataset """kinships""" +178 99 model """distmult""" +178 99 loss """marginranking""" +178 99 regularizer """no""" +178 99 optimizer """adadelta""" +178 99 training_loop """owa""" +178 99 negative_sampler """basic""" +178 99 evaluator """rankbased""" +178 100 dataset """kinships""" +178 100 model """distmult""" +178 100 loss """marginranking""" +178 100 regularizer """no""" +178 100 optimizer """adadelta""" +178 100 training_loop """owa""" +178 100 negative_sampler """basic""" +178 100 evaluator """rankbased""" +179 1 model.embedding_dim 0.0 +179 1 loss.margin 4.08010316857292 +179 1 negative_sampler.num_negs_per_pos 93.0 +179 1 training.batch_size 1.0 +179 2 model.embedding_dim 0.0 +179 2 loss.margin 3.7838096909986323 +179 2 negative_sampler.num_negs_per_pos 51.0 +179 2 training.batch_size 0.0 +179 3 model.embedding_dim 2.0 +179 3 loss.margin 5.121853845355125 +179 3 negative_sampler.num_negs_per_pos 59.0 +179 3 training.batch_size 0.0 +179 4 model.embedding_dim 0.0 +179 4 loss.margin 3.9922955107633107 +179 4 negative_sampler.num_negs_per_pos 88.0 +179 4 training.batch_size 0.0 +179 5 model.embedding_dim 0.0 +179 5 loss.margin 2.451825699405867 +179 5 negative_sampler.num_negs_per_pos 96.0 +179 5 training.batch_size 0.0 +179 6 model.embedding_dim 2.0 +179 6 loss.margin 2.636330865382143 +179 6 negative_sampler.num_negs_per_pos 64.0 +179 6 training.batch_size 0.0 +179 7 model.embedding_dim 0.0 +179 7 loss.margin 1.2101080191603575 +179 7 negative_sampler.num_negs_per_pos 16.0 +179 7 training.batch_size 0.0 +179 8 model.embedding_dim 2.0 +179 8 loss.margin 7.872170545310979 +179 8 negative_sampler.num_negs_per_pos 13.0 +179 8 training.batch_size 0.0 +179 9 model.embedding_dim 2.0 +179 9 loss.margin 5.885515375081415 +179 9 negative_sampler.num_negs_per_pos 11.0 +179 9 training.batch_size 0.0 +179 10 model.embedding_dim 1.0 +179 10 loss.margin 3.5262290413389095 +179 10 negative_sampler.num_negs_per_pos 52.0 +179 10 training.batch_size 2.0 +179 11 model.embedding_dim 2.0 +179 11 loss.margin 8.778929806894485 +179 11 negative_sampler.num_negs_per_pos 47.0 +179 11 training.batch_size 1.0 +179 12 model.embedding_dim 0.0 +179 12 loss.margin 0.869600456409704 +179 12 negative_sampler.num_negs_per_pos 95.0 +179 12 training.batch_size 2.0 +179 13 model.embedding_dim 1.0 +179 13 loss.margin 9.999059552157606 +179 13 negative_sampler.num_negs_per_pos 1.0 +179 13 training.batch_size 2.0 +179 14 model.embedding_dim 2.0 +179 14 loss.margin 5.380717191282967 +179 14 negative_sampler.num_negs_per_pos 69.0 +179 14 training.batch_size 0.0 +179 15 model.embedding_dim 2.0 +179 15 loss.margin 9.96402157555557 +179 15 negative_sampler.num_negs_per_pos 35.0 +179 15 training.batch_size 0.0 +179 16 model.embedding_dim 1.0 +179 16 loss.margin 6.834277644271063 +179 16 negative_sampler.num_negs_per_pos 27.0 +179 16 training.batch_size 1.0 +179 17 model.embedding_dim 1.0 +179 17 loss.margin 9.219191952326819 +179 17 negative_sampler.num_negs_per_pos 19.0 +179 17 training.batch_size 0.0 +179 18 model.embedding_dim 1.0 +179 18 loss.margin 2.8488235156492894 +179 18 negative_sampler.num_negs_per_pos 19.0 +179 18 training.batch_size 2.0 +179 19 model.embedding_dim 0.0 +179 19 loss.margin 6.888375876717131 +179 19 negative_sampler.num_negs_per_pos 66.0 +179 19 training.batch_size 2.0 +179 20 model.embedding_dim 0.0 +179 20 loss.margin 4.889135608065545 +179 20 negative_sampler.num_negs_per_pos 38.0 +179 20 training.batch_size 1.0 +179 21 model.embedding_dim 1.0 +179 21 loss.margin 2.636910938388861 +179 21 negative_sampler.num_negs_per_pos 68.0 +179 21 training.batch_size 1.0 +179 22 model.embedding_dim 2.0 +179 22 loss.margin 9.378697388898292 +179 22 negative_sampler.num_negs_per_pos 6.0 +179 22 training.batch_size 2.0 +179 23 model.embedding_dim 0.0 +179 23 loss.margin 5.2450076992982835 +179 23 negative_sampler.num_negs_per_pos 28.0 +179 23 training.batch_size 0.0 +179 24 model.embedding_dim 0.0 +179 24 loss.margin 8.29793454783562 +179 24 negative_sampler.num_negs_per_pos 23.0 +179 24 training.batch_size 0.0 +179 25 model.embedding_dim 1.0 +179 25 loss.margin 0.607633121819402 +179 25 negative_sampler.num_negs_per_pos 27.0 +179 25 training.batch_size 2.0 +179 26 model.embedding_dim 0.0 +179 26 loss.margin 4.153398040472342 +179 26 negative_sampler.num_negs_per_pos 47.0 +179 26 training.batch_size 0.0 +179 27 model.embedding_dim 2.0 +179 27 loss.margin 9.90288349424195 +179 27 negative_sampler.num_negs_per_pos 97.0 +179 27 training.batch_size 1.0 +179 28 model.embedding_dim 2.0 +179 28 loss.margin 1.2668086709527002 +179 28 negative_sampler.num_negs_per_pos 38.0 +179 28 training.batch_size 2.0 +179 29 model.embedding_dim 0.0 +179 29 loss.margin 3.099313311863881 +179 29 negative_sampler.num_negs_per_pos 96.0 +179 29 training.batch_size 2.0 +179 30 model.embedding_dim 1.0 +179 30 loss.margin 1.651612522027516 +179 30 negative_sampler.num_negs_per_pos 94.0 +179 30 training.batch_size 1.0 +179 31 model.embedding_dim 1.0 +179 31 loss.margin 2.263664309038433 +179 31 negative_sampler.num_negs_per_pos 48.0 +179 31 training.batch_size 0.0 +179 32 model.embedding_dim 1.0 +179 32 loss.margin 8.992430873801737 +179 32 negative_sampler.num_negs_per_pos 33.0 +179 32 training.batch_size 1.0 +179 33 model.embedding_dim 1.0 +179 33 loss.margin 8.572927815062473 +179 33 negative_sampler.num_negs_per_pos 40.0 +179 33 training.batch_size 2.0 +179 34 model.embedding_dim 0.0 +179 34 loss.margin 4.9817062167529835 +179 34 negative_sampler.num_negs_per_pos 15.0 +179 34 training.batch_size 1.0 +179 35 model.embedding_dim 2.0 +179 35 loss.margin 2.488153355067315 +179 35 negative_sampler.num_negs_per_pos 43.0 +179 35 training.batch_size 0.0 +179 36 model.embedding_dim 1.0 +179 36 loss.margin 3.6906001008145726 +179 36 negative_sampler.num_negs_per_pos 8.0 +179 36 training.batch_size 1.0 +179 37 model.embedding_dim 2.0 +179 37 loss.margin 6.842671279226043 +179 37 negative_sampler.num_negs_per_pos 62.0 +179 37 training.batch_size 2.0 +179 38 model.embedding_dim 0.0 +179 38 loss.margin 5.2191771571249665 +179 38 negative_sampler.num_negs_per_pos 81.0 +179 38 training.batch_size 2.0 +179 39 model.embedding_dim 0.0 +179 39 loss.margin 6.578713099713221 +179 39 negative_sampler.num_negs_per_pos 3.0 +179 39 training.batch_size 1.0 +179 40 model.embedding_dim 1.0 +179 40 loss.margin 7.901979871489545 +179 40 negative_sampler.num_negs_per_pos 57.0 +179 40 training.batch_size 0.0 +179 41 model.embedding_dim 0.0 +179 41 loss.margin 2.9975342837254555 +179 41 negative_sampler.num_negs_per_pos 32.0 +179 41 training.batch_size 2.0 +179 42 model.embedding_dim 1.0 +179 42 loss.margin 7.662828349971274 +179 42 negative_sampler.num_negs_per_pos 90.0 +179 42 training.batch_size 2.0 +179 43 model.embedding_dim 1.0 +179 43 loss.margin 5.944598931622678 +179 43 negative_sampler.num_negs_per_pos 23.0 +179 43 training.batch_size 1.0 +179 44 model.embedding_dim 2.0 +179 44 loss.margin 1.7263361553007428 +179 44 negative_sampler.num_negs_per_pos 63.0 +179 44 training.batch_size 1.0 +179 45 model.embedding_dim 2.0 +179 45 loss.margin 4.027093642601234 +179 45 negative_sampler.num_negs_per_pos 47.0 +179 45 training.batch_size 1.0 +179 46 model.embedding_dim 1.0 +179 46 loss.margin 8.360776913851643 +179 46 negative_sampler.num_negs_per_pos 4.0 +179 46 training.batch_size 1.0 +179 47 model.embedding_dim 1.0 +179 47 loss.margin 1.6245899993259043 +179 47 negative_sampler.num_negs_per_pos 28.0 +179 47 training.batch_size 0.0 +179 48 model.embedding_dim 0.0 +179 48 loss.margin 0.821280884736592 +179 48 negative_sampler.num_negs_per_pos 37.0 +179 48 training.batch_size 1.0 +179 49 model.embedding_dim 2.0 +179 49 loss.margin 2.364304393674807 +179 49 negative_sampler.num_negs_per_pos 35.0 +179 49 training.batch_size 0.0 +179 50 model.embedding_dim 2.0 +179 50 loss.margin 8.073210723343802 +179 50 negative_sampler.num_negs_per_pos 50.0 +179 50 training.batch_size 2.0 +179 51 model.embedding_dim 0.0 +179 51 loss.margin 2.658207004655985 +179 51 negative_sampler.num_negs_per_pos 64.0 +179 51 training.batch_size 0.0 +179 52 model.embedding_dim 0.0 +179 52 loss.margin 8.896693191277182 +179 52 negative_sampler.num_negs_per_pos 48.0 +179 52 training.batch_size 0.0 +179 53 model.embedding_dim 1.0 +179 53 loss.margin 8.308365118698468 +179 53 negative_sampler.num_negs_per_pos 48.0 +179 53 training.batch_size 2.0 +179 54 model.embedding_dim 0.0 +179 54 loss.margin 7.402099210512155 +179 54 negative_sampler.num_negs_per_pos 23.0 +179 54 training.batch_size 0.0 +179 55 model.embedding_dim 1.0 +179 55 loss.margin 9.360267244529442 +179 55 negative_sampler.num_negs_per_pos 44.0 +179 55 training.batch_size 1.0 +179 56 model.embedding_dim 2.0 +179 56 loss.margin 9.340654752099173 +179 56 negative_sampler.num_negs_per_pos 15.0 +179 56 training.batch_size 2.0 +179 57 model.embedding_dim 2.0 +179 57 loss.margin 0.9297165132412565 +179 57 negative_sampler.num_negs_per_pos 26.0 +179 57 training.batch_size 2.0 +179 58 model.embedding_dim 2.0 +179 58 loss.margin 4.257105155118683 +179 58 negative_sampler.num_negs_per_pos 79.0 +179 58 training.batch_size 1.0 +179 59 model.embedding_dim 2.0 +179 59 loss.margin 9.885569303779272 +179 59 negative_sampler.num_negs_per_pos 7.0 +179 59 training.batch_size 2.0 +179 60 model.embedding_dim 2.0 +179 60 loss.margin 8.499705393220754 +179 60 negative_sampler.num_negs_per_pos 44.0 +179 60 training.batch_size 0.0 +179 61 model.embedding_dim 0.0 +179 61 loss.margin 6.708148388442479 +179 61 negative_sampler.num_negs_per_pos 8.0 +179 61 training.batch_size 0.0 +179 62 model.embedding_dim 2.0 +179 62 loss.margin 1.7718665235628204 +179 62 negative_sampler.num_negs_per_pos 85.0 +179 62 training.batch_size 0.0 +179 63 model.embedding_dim 0.0 +179 63 loss.margin 9.432917747808554 +179 63 negative_sampler.num_negs_per_pos 7.0 +179 63 training.batch_size 0.0 +179 64 model.embedding_dim 2.0 +179 64 loss.margin 8.490323491934625 +179 64 negative_sampler.num_negs_per_pos 38.0 +179 64 training.batch_size 0.0 +179 65 model.embedding_dim 0.0 +179 65 loss.margin 4.907674173301919 +179 65 negative_sampler.num_negs_per_pos 20.0 +179 65 training.batch_size 2.0 +179 66 model.embedding_dim 0.0 +179 66 loss.margin 4.206404385308287 +179 66 negative_sampler.num_negs_per_pos 94.0 +179 66 training.batch_size 1.0 +179 67 model.embedding_dim 1.0 +179 67 loss.margin 8.224291006037694 +179 67 negative_sampler.num_negs_per_pos 85.0 +179 67 training.batch_size 0.0 +179 68 model.embedding_dim 2.0 +179 68 loss.margin 5.141033929435265 +179 68 negative_sampler.num_negs_per_pos 12.0 +179 68 training.batch_size 2.0 +179 69 model.embedding_dim 1.0 +179 69 loss.margin 9.034916654306702 +179 69 negative_sampler.num_negs_per_pos 36.0 +179 69 training.batch_size 2.0 +179 70 model.embedding_dim 0.0 +179 70 loss.margin 0.5623034255307279 +179 70 negative_sampler.num_negs_per_pos 12.0 +179 70 training.batch_size 1.0 +179 71 model.embedding_dim 2.0 +179 71 loss.margin 1.9278352794347415 +179 71 negative_sampler.num_negs_per_pos 38.0 +179 71 training.batch_size 2.0 +179 72 model.embedding_dim 0.0 +179 72 loss.margin 3.0720573924481225 +179 72 negative_sampler.num_negs_per_pos 93.0 +179 72 training.batch_size 1.0 +179 73 model.embedding_dim 1.0 +179 73 loss.margin 6.158143890567094 +179 73 negative_sampler.num_negs_per_pos 7.0 +179 73 training.batch_size 2.0 +179 74 model.embedding_dim 0.0 +179 74 loss.margin 6.392178566768304 +179 74 negative_sampler.num_negs_per_pos 18.0 +179 74 training.batch_size 2.0 +179 75 model.embedding_dim 1.0 +179 75 loss.margin 1.0286980148999598 +179 75 negative_sampler.num_negs_per_pos 85.0 +179 75 training.batch_size 0.0 +179 76 model.embedding_dim 0.0 +179 76 loss.margin 8.45698541066257 +179 76 negative_sampler.num_negs_per_pos 46.0 +179 76 training.batch_size 0.0 +179 77 model.embedding_dim 0.0 +179 77 loss.margin 2.5865618793705742 +179 77 negative_sampler.num_negs_per_pos 44.0 +179 77 training.batch_size 1.0 +179 78 model.embedding_dim 2.0 +179 78 loss.margin 0.6695896027401458 +179 78 negative_sampler.num_negs_per_pos 95.0 +179 78 training.batch_size 1.0 +179 79 model.embedding_dim 2.0 +179 79 loss.margin 6.341534861125995 +179 79 negative_sampler.num_negs_per_pos 6.0 +179 79 training.batch_size 0.0 +179 80 model.embedding_dim 0.0 +179 80 loss.margin 3.7728101622397334 +179 80 negative_sampler.num_negs_per_pos 87.0 +179 80 training.batch_size 0.0 +179 81 model.embedding_dim 0.0 +179 81 loss.margin 8.4085973964652 +179 81 negative_sampler.num_negs_per_pos 67.0 +179 81 training.batch_size 0.0 +179 82 model.embedding_dim 1.0 +179 82 loss.margin 5.111194892729063 +179 82 negative_sampler.num_negs_per_pos 88.0 +179 82 training.batch_size 1.0 +179 83 model.embedding_dim 2.0 +179 83 loss.margin 8.162530241991817 +179 83 negative_sampler.num_negs_per_pos 36.0 +179 83 training.batch_size 1.0 +179 84 model.embedding_dim 0.0 +179 84 loss.margin 8.715735635236957 +179 84 negative_sampler.num_negs_per_pos 96.0 +179 84 training.batch_size 2.0 +179 85 model.embedding_dim 0.0 +179 85 loss.margin 8.987536237577535 +179 85 negative_sampler.num_negs_per_pos 75.0 +179 85 training.batch_size 2.0 +179 86 model.embedding_dim 1.0 +179 86 loss.margin 5.876105488019815 +179 86 negative_sampler.num_negs_per_pos 83.0 +179 86 training.batch_size 1.0 +179 87 model.embedding_dim 2.0 +179 87 loss.margin 4.517781488958707 +179 87 negative_sampler.num_negs_per_pos 76.0 +179 87 training.batch_size 1.0 +179 88 model.embedding_dim 2.0 +179 88 loss.margin 5.2638615969943 +179 88 negative_sampler.num_negs_per_pos 8.0 +179 88 training.batch_size 1.0 +179 89 model.embedding_dim 1.0 +179 89 loss.margin 7.482163216533786 +179 89 negative_sampler.num_negs_per_pos 74.0 +179 89 training.batch_size 0.0 +179 90 model.embedding_dim 2.0 +179 90 loss.margin 2.5795849008080083 +179 90 negative_sampler.num_negs_per_pos 13.0 +179 90 training.batch_size 1.0 +179 91 model.embedding_dim 2.0 +179 91 loss.margin 8.583506569181978 +179 91 negative_sampler.num_negs_per_pos 29.0 +179 91 training.batch_size 2.0 +179 92 model.embedding_dim 1.0 +179 92 loss.margin 5.173150018858366 +179 92 negative_sampler.num_negs_per_pos 16.0 +179 92 training.batch_size 2.0 +179 93 model.embedding_dim 2.0 +179 93 loss.margin 9.711140387426846 +179 93 negative_sampler.num_negs_per_pos 8.0 +179 93 training.batch_size 0.0 +179 94 model.embedding_dim 0.0 +179 94 loss.margin 2.2948211535800542 +179 94 negative_sampler.num_negs_per_pos 50.0 +179 94 training.batch_size 2.0 +179 95 model.embedding_dim 2.0 +179 95 loss.margin 4.766003963605145 +179 95 negative_sampler.num_negs_per_pos 43.0 +179 95 training.batch_size 0.0 +179 96 model.embedding_dim 0.0 +179 96 loss.margin 2.334441711221272 +179 96 negative_sampler.num_negs_per_pos 59.0 +179 96 training.batch_size 2.0 +179 97 model.embedding_dim 0.0 +179 97 loss.margin 2.3793069742897393 +179 97 negative_sampler.num_negs_per_pos 13.0 +179 97 training.batch_size 1.0 +179 98 model.embedding_dim 0.0 +179 98 loss.margin 2.215609948121523 +179 98 negative_sampler.num_negs_per_pos 24.0 +179 98 training.batch_size 2.0 +179 99 model.embedding_dim 0.0 +179 99 loss.margin 1.1509043148733247 +179 99 negative_sampler.num_negs_per_pos 78.0 +179 99 training.batch_size 0.0 +179 100 model.embedding_dim 1.0 +179 100 loss.margin 6.366157555168657 +179 100 negative_sampler.num_negs_per_pos 38.0 +179 100 training.batch_size 0.0 +179 1 dataset """kinships""" +179 1 model """distmult""" +179 1 loss """marginranking""" +179 1 regularizer """no""" +179 1 optimizer """adadelta""" +179 1 training_loop """owa""" +179 1 negative_sampler """basic""" +179 1 evaluator """rankbased""" +179 2 dataset """kinships""" +179 2 model """distmult""" +179 2 loss """marginranking""" +179 2 regularizer """no""" +179 2 optimizer """adadelta""" +179 2 training_loop """owa""" +179 2 negative_sampler """basic""" +179 2 evaluator """rankbased""" +179 3 dataset """kinships""" +179 3 model """distmult""" +179 3 loss """marginranking""" +179 3 regularizer """no""" +179 3 optimizer """adadelta""" +179 3 training_loop """owa""" +179 3 negative_sampler """basic""" +179 3 evaluator """rankbased""" +179 4 dataset """kinships""" +179 4 model """distmult""" +179 4 loss """marginranking""" +179 4 regularizer """no""" +179 4 optimizer """adadelta""" +179 4 training_loop """owa""" +179 4 negative_sampler """basic""" +179 4 evaluator """rankbased""" +179 5 dataset """kinships""" +179 5 model """distmult""" +179 5 loss """marginranking""" +179 5 regularizer """no""" +179 5 optimizer """adadelta""" +179 5 training_loop """owa""" +179 5 negative_sampler """basic""" +179 5 evaluator """rankbased""" +179 6 dataset """kinships""" +179 6 model """distmult""" +179 6 loss """marginranking""" +179 6 regularizer """no""" +179 6 optimizer """adadelta""" +179 6 training_loop """owa""" +179 6 negative_sampler """basic""" +179 6 evaluator """rankbased""" +179 7 dataset """kinships""" +179 7 model """distmult""" +179 7 loss """marginranking""" +179 7 regularizer """no""" +179 7 optimizer """adadelta""" +179 7 training_loop """owa""" +179 7 negative_sampler """basic""" +179 7 evaluator """rankbased""" +179 8 dataset """kinships""" +179 8 model """distmult""" +179 8 loss """marginranking""" +179 8 regularizer """no""" +179 8 optimizer """adadelta""" +179 8 training_loop """owa""" +179 8 negative_sampler """basic""" +179 8 evaluator """rankbased""" +179 9 dataset """kinships""" +179 9 model """distmult""" +179 9 loss """marginranking""" +179 9 regularizer """no""" +179 9 optimizer """adadelta""" +179 9 training_loop """owa""" +179 9 negative_sampler """basic""" +179 9 evaluator """rankbased""" +179 10 dataset """kinships""" +179 10 model """distmult""" +179 10 loss """marginranking""" +179 10 regularizer """no""" +179 10 optimizer """adadelta""" +179 10 training_loop """owa""" +179 10 negative_sampler """basic""" +179 10 evaluator """rankbased""" +179 11 dataset """kinships""" +179 11 model """distmult""" +179 11 loss """marginranking""" +179 11 regularizer """no""" +179 11 optimizer """adadelta""" +179 11 training_loop """owa""" +179 11 negative_sampler """basic""" +179 11 evaluator """rankbased""" +179 12 dataset """kinships""" +179 12 model """distmult""" +179 12 loss """marginranking""" +179 12 regularizer """no""" +179 12 optimizer """adadelta""" +179 12 training_loop """owa""" +179 12 negative_sampler """basic""" +179 12 evaluator """rankbased""" +179 13 dataset """kinships""" +179 13 model """distmult""" +179 13 loss """marginranking""" +179 13 regularizer """no""" +179 13 optimizer """adadelta""" +179 13 training_loop """owa""" +179 13 negative_sampler """basic""" +179 13 evaluator """rankbased""" +179 14 dataset """kinships""" +179 14 model """distmult""" +179 14 loss """marginranking""" +179 14 regularizer """no""" +179 14 optimizer """adadelta""" +179 14 training_loop """owa""" +179 14 negative_sampler """basic""" +179 14 evaluator """rankbased""" +179 15 dataset """kinships""" +179 15 model """distmult""" +179 15 loss """marginranking""" +179 15 regularizer """no""" +179 15 optimizer """adadelta""" +179 15 training_loop """owa""" +179 15 negative_sampler """basic""" +179 15 evaluator """rankbased""" +179 16 dataset """kinships""" +179 16 model """distmult""" +179 16 loss """marginranking""" +179 16 regularizer """no""" +179 16 optimizer """adadelta""" +179 16 training_loop """owa""" +179 16 negative_sampler """basic""" +179 16 evaluator """rankbased""" +179 17 dataset """kinships""" +179 17 model """distmult""" +179 17 loss """marginranking""" +179 17 regularizer """no""" +179 17 optimizer """adadelta""" +179 17 training_loop """owa""" +179 17 negative_sampler """basic""" +179 17 evaluator """rankbased""" +179 18 dataset """kinships""" +179 18 model """distmult""" +179 18 loss """marginranking""" +179 18 regularizer """no""" +179 18 optimizer """adadelta""" +179 18 training_loop """owa""" +179 18 negative_sampler """basic""" +179 18 evaluator """rankbased""" +179 19 dataset """kinships""" +179 19 model """distmult""" +179 19 loss """marginranking""" +179 19 regularizer """no""" +179 19 optimizer """adadelta""" +179 19 training_loop """owa""" +179 19 negative_sampler """basic""" +179 19 evaluator """rankbased""" +179 20 dataset """kinships""" +179 20 model """distmult""" +179 20 loss """marginranking""" +179 20 regularizer """no""" +179 20 optimizer """adadelta""" +179 20 training_loop """owa""" +179 20 negative_sampler """basic""" +179 20 evaluator """rankbased""" +179 21 dataset """kinships""" +179 21 model """distmult""" +179 21 loss """marginranking""" +179 21 regularizer """no""" +179 21 optimizer """adadelta""" +179 21 training_loop """owa""" +179 21 negative_sampler """basic""" +179 21 evaluator """rankbased""" +179 22 dataset """kinships""" +179 22 model """distmult""" +179 22 loss """marginranking""" +179 22 regularizer """no""" +179 22 optimizer """adadelta""" +179 22 training_loop """owa""" +179 22 negative_sampler """basic""" +179 22 evaluator """rankbased""" +179 23 dataset """kinships""" +179 23 model """distmult""" +179 23 loss """marginranking""" +179 23 regularizer """no""" +179 23 optimizer """adadelta""" +179 23 training_loop """owa""" +179 23 negative_sampler """basic""" +179 23 evaluator """rankbased""" +179 24 dataset """kinships""" +179 24 model """distmult""" +179 24 loss """marginranking""" +179 24 regularizer """no""" +179 24 optimizer """adadelta""" +179 24 training_loop """owa""" +179 24 negative_sampler """basic""" +179 24 evaluator """rankbased""" +179 25 dataset """kinships""" +179 25 model """distmult""" +179 25 loss """marginranking""" +179 25 regularizer """no""" +179 25 optimizer """adadelta""" +179 25 training_loop """owa""" +179 25 negative_sampler """basic""" +179 25 evaluator """rankbased""" +179 26 dataset """kinships""" +179 26 model """distmult""" +179 26 loss """marginranking""" +179 26 regularizer """no""" +179 26 optimizer """adadelta""" +179 26 training_loop """owa""" +179 26 negative_sampler """basic""" +179 26 evaluator """rankbased""" +179 27 dataset """kinships""" +179 27 model """distmult""" +179 27 loss """marginranking""" +179 27 regularizer """no""" +179 27 optimizer """adadelta""" +179 27 training_loop """owa""" +179 27 negative_sampler """basic""" +179 27 evaluator """rankbased""" +179 28 dataset """kinships""" +179 28 model """distmult""" +179 28 loss """marginranking""" +179 28 regularizer """no""" +179 28 optimizer """adadelta""" +179 28 training_loop """owa""" +179 28 negative_sampler """basic""" +179 28 evaluator """rankbased""" +179 29 dataset """kinships""" +179 29 model """distmult""" +179 29 loss """marginranking""" +179 29 regularizer """no""" +179 29 optimizer """adadelta""" +179 29 training_loop """owa""" +179 29 negative_sampler """basic""" +179 29 evaluator """rankbased""" +179 30 dataset """kinships""" +179 30 model """distmult""" +179 30 loss """marginranking""" +179 30 regularizer """no""" +179 30 optimizer """adadelta""" +179 30 training_loop """owa""" +179 30 negative_sampler """basic""" +179 30 evaluator """rankbased""" +179 31 dataset """kinships""" +179 31 model """distmult""" +179 31 loss """marginranking""" +179 31 regularizer """no""" +179 31 optimizer """adadelta""" +179 31 training_loop """owa""" +179 31 negative_sampler """basic""" +179 31 evaluator """rankbased""" +179 32 dataset """kinships""" +179 32 model """distmult""" +179 32 loss """marginranking""" +179 32 regularizer """no""" +179 32 optimizer """adadelta""" +179 32 training_loop """owa""" +179 32 negative_sampler """basic""" +179 32 evaluator """rankbased""" +179 33 dataset """kinships""" +179 33 model """distmult""" +179 33 loss """marginranking""" +179 33 regularizer """no""" +179 33 optimizer """adadelta""" +179 33 training_loop """owa""" +179 33 negative_sampler """basic""" +179 33 evaluator """rankbased""" +179 34 dataset """kinships""" +179 34 model """distmult""" +179 34 loss """marginranking""" +179 34 regularizer """no""" +179 34 optimizer """adadelta""" +179 34 training_loop """owa""" +179 34 negative_sampler """basic""" +179 34 evaluator """rankbased""" +179 35 dataset """kinships""" +179 35 model """distmult""" +179 35 loss """marginranking""" +179 35 regularizer """no""" +179 35 optimizer """adadelta""" +179 35 training_loop """owa""" +179 35 negative_sampler """basic""" +179 35 evaluator """rankbased""" +179 36 dataset """kinships""" +179 36 model """distmult""" +179 36 loss """marginranking""" +179 36 regularizer """no""" +179 36 optimizer """adadelta""" +179 36 training_loop """owa""" +179 36 negative_sampler """basic""" +179 36 evaluator """rankbased""" +179 37 dataset """kinships""" +179 37 model """distmult""" +179 37 loss """marginranking""" +179 37 regularizer """no""" +179 37 optimizer """adadelta""" +179 37 training_loop """owa""" +179 37 negative_sampler """basic""" +179 37 evaluator """rankbased""" +179 38 dataset """kinships""" +179 38 model """distmult""" +179 38 loss """marginranking""" +179 38 regularizer """no""" +179 38 optimizer """adadelta""" +179 38 training_loop """owa""" +179 38 negative_sampler """basic""" +179 38 evaluator """rankbased""" +179 39 dataset """kinships""" +179 39 model """distmult""" +179 39 loss """marginranking""" +179 39 regularizer """no""" +179 39 optimizer """adadelta""" +179 39 training_loop """owa""" +179 39 negative_sampler """basic""" +179 39 evaluator """rankbased""" +179 40 dataset """kinships""" +179 40 model """distmult""" +179 40 loss """marginranking""" +179 40 regularizer """no""" +179 40 optimizer """adadelta""" +179 40 training_loop """owa""" +179 40 negative_sampler """basic""" +179 40 evaluator """rankbased""" +179 41 dataset """kinships""" +179 41 model """distmult""" +179 41 loss """marginranking""" +179 41 regularizer """no""" +179 41 optimizer """adadelta""" +179 41 training_loop """owa""" +179 41 negative_sampler """basic""" +179 41 evaluator """rankbased""" +179 42 dataset """kinships""" +179 42 model """distmult""" +179 42 loss """marginranking""" +179 42 regularizer """no""" +179 42 optimizer """adadelta""" +179 42 training_loop """owa""" +179 42 negative_sampler """basic""" +179 42 evaluator """rankbased""" +179 43 dataset """kinships""" +179 43 model """distmult""" +179 43 loss """marginranking""" +179 43 regularizer """no""" +179 43 optimizer """adadelta""" +179 43 training_loop """owa""" +179 43 negative_sampler """basic""" +179 43 evaluator """rankbased""" +179 44 dataset """kinships""" +179 44 model """distmult""" +179 44 loss """marginranking""" +179 44 regularizer """no""" +179 44 optimizer """adadelta""" +179 44 training_loop """owa""" +179 44 negative_sampler """basic""" +179 44 evaluator """rankbased""" +179 45 dataset """kinships""" +179 45 model """distmult""" +179 45 loss """marginranking""" +179 45 regularizer """no""" +179 45 optimizer """adadelta""" +179 45 training_loop """owa""" +179 45 negative_sampler """basic""" +179 45 evaluator """rankbased""" +179 46 dataset """kinships""" +179 46 model """distmult""" +179 46 loss """marginranking""" +179 46 regularizer """no""" +179 46 optimizer """adadelta""" +179 46 training_loop """owa""" +179 46 negative_sampler """basic""" +179 46 evaluator """rankbased""" +179 47 dataset """kinships""" +179 47 model """distmult""" +179 47 loss """marginranking""" +179 47 regularizer """no""" +179 47 optimizer """adadelta""" +179 47 training_loop """owa""" +179 47 negative_sampler """basic""" +179 47 evaluator """rankbased""" +179 48 dataset """kinships""" +179 48 model """distmult""" +179 48 loss """marginranking""" +179 48 regularizer """no""" +179 48 optimizer """adadelta""" +179 48 training_loop """owa""" +179 48 negative_sampler """basic""" +179 48 evaluator """rankbased""" +179 49 dataset """kinships""" +179 49 model """distmult""" +179 49 loss """marginranking""" +179 49 regularizer """no""" +179 49 optimizer """adadelta""" +179 49 training_loop """owa""" +179 49 negative_sampler """basic""" +179 49 evaluator """rankbased""" +179 50 dataset """kinships""" +179 50 model """distmult""" +179 50 loss """marginranking""" +179 50 regularizer """no""" +179 50 optimizer """adadelta""" +179 50 training_loop """owa""" +179 50 negative_sampler """basic""" +179 50 evaluator """rankbased""" +179 51 dataset """kinships""" +179 51 model """distmult""" +179 51 loss """marginranking""" +179 51 regularizer """no""" +179 51 optimizer """adadelta""" +179 51 training_loop """owa""" +179 51 negative_sampler """basic""" +179 51 evaluator """rankbased""" +179 52 dataset """kinships""" +179 52 model """distmult""" +179 52 loss """marginranking""" +179 52 regularizer """no""" +179 52 optimizer """adadelta""" +179 52 training_loop """owa""" +179 52 negative_sampler """basic""" +179 52 evaluator """rankbased""" +179 53 dataset """kinships""" +179 53 model """distmult""" +179 53 loss """marginranking""" +179 53 regularizer """no""" +179 53 optimizer """adadelta""" +179 53 training_loop """owa""" +179 53 negative_sampler """basic""" +179 53 evaluator """rankbased""" +179 54 dataset """kinships""" +179 54 model """distmult""" +179 54 loss """marginranking""" +179 54 regularizer """no""" +179 54 optimizer """adadelta""" +179 54 training_loop """owa""" +179 54 negative_sampler """basic""" +179 54 evaluator """rankbased""" +179 55 dataset """kinships""" +179 55 model """distmult""" +179 55 loss """marginranking""" +179 55 regularizer """no""" +179 55 optimizer """adadelta""" +179 55 training_loop """owa""" +179 55 negative_sampler """basic""" +179 55 evaluator """rankbased""" +179 56 dataset """kinships""" +179 56 model """distmult""" +179 56 loss """marginranking""" +179 56 regularizer """no""" +179 56 optimizer """adadelta""" +179 56 training_loop """owa""" +179 56 negative_sampler """basic""" +179 56 evaluator """rankbased""" +179 57 dataset """kinships""" +179 57 model """distmult""" +179 57 loss """marginranking""" +179 57 regularizer """no""" +179 57 optimizer """adadelta""" +179 57 training_loop """owa""" +179 57 negative_sampler """basic""" +179 57 evaluator """rankbased""" +179 58 dataset """kinships""" +179 58 model """distmult""" +179 58 loss """marginranking""" +179 58 regularizer """no""" +179 58 optimizer """adadelta""" +179 58 training_loop """owa""" +179 58 negative_sampler """basic""" +179 58 evaluator """rankbased""" +179 59 dataset """kinships""" +179 59 model """distmult""" +179 59 loss """marginranking""" +179 59 regularizer """no""" +179 59 optimizer """adadelta""" +179 59 training_loop """owa""" +179 59 negative_sampler """basic""" +179 59 evaluator """rankbased""" +179 60 dataset """kinships""" +179 60 model """distmult""" +179 60 loss """marginranking""" +179 60 regularizer """no""" +179 60 optimizer """adadelta""" +179 60 training_loop """owa""" +179 60 negative_sampler """basic""" +179 60 evaluator """rankbased""" +179 61 dataset """kinships""" +179 61 model """distmult""" +179 61 loss """marginranking""" +179 61 regularizer """no""" +179 61 optimizer """adadelta""" +179 61 training_loop """owa""" +179 61 negative_sampler """basic""" +179 61 evaluator """rankbased""" +179 62 dataset """kinships""" +179 62 model """distmult""" +179 62 loss """marginranking""" +179 62 regularizer """no""" +179 62 optimizer """adadelta""" +179 62 training_loop """owa""" +179 62 negative_sampler """basic""" +179 62 evaluator """rankbased""" +179 63 dataset """kinships""" +179 63 model """distmult""" +179 63 loss """marginranking""" +179 63 regularizer """no""" +179 63 optimizer """adadelta""" +179 63 training_loop """owa""" +179 63 negative_sampler """basic""" +179 63 evaluator """rankbased""" +179 64 dataset """kinships""" +179 64 model """distmult""" +179 64 loss """marginranking""" +179 64 regularizer """no""" +179 64 optimizer """adadelta""" +179 64 training_loop """owa""" +179 64 negative_sampler """basic""" +179 64 evaluator """rankbased""" +179 65 dataset """kinships""" +179 65 model """distmult""" +179 65 loss """marginranking""" +179 65 regularizer """no""" +179 65 optimizer """adadelta""" +179 65 training_loop """owa""" +179 65 negative_sampler """basic""" +179 65 evaluator """rankbased""" +179 66 dataset """kinships""" +179 66 model """distmult""" +179 66 loss """marginranking""" +179 66 regularizer """no""" +179 66 optimizer """adadelta""" +179 66 training_loop """owa""" +179 66 negative_sampler """basic""" +179 66 evaluator """rankbased""" +179 67 dataset """kinships""" +179 67 model """distmult""" +179 67 loss """marginranking""" +179 67 regularizer """no""" +179 67 optimizer """adadelta""" +179 67 training_loop """owa""" +179 67 negative_sampler """basic""" +179 67 evaluator """rankbased""" +179 68 dataset """kinships""" +179 68 model """distmult""" +179 68 loss """marginranking""" +179 68 regularizer """no""" +179 68 optimizer """adadelta""" +179 68 training_loop """owa""" +179 68 negative_sampler """basic""" +179 68 evaluator """rankbased""" +179 69 dataset """kinships""" +179 69 model """distmult""" +179 69 loss """marginranking""" +179 69 regularizer """no""" +179 69 optimizer """adadelta""" +179 69 training_loop """owa""" +179 69 negative_sampler """basic""" +179 69 evaluator """rankbased""" +179 70 dataset """kinships""" +179 70 model """distmult""" +179 70 loss """marginranking""" +179 70 regularizer """no""" +179 70 optimizer """adadelta""" +179 70 training_loop """owa""" +179 70 negative_sampler """basic""" +179 70 evaluator """rankbased""" +179 71 dataset """kinships""" +179 71 model """distmult""" +179 71 loss """marginranking""" +179 71 regularizer """no""" +179 71 optimizer """adadelta""" +179 71 training_loop """owa""" +179 71 negative_sampler """basic""" +179 71 evaluator """rankbased""" +179 72 dataset """kinships""" +179 72 model """distmult""" +179 72 loss """marginranking""" +179 72 regularizer """no""" +179 72 optimizer """adadelta""" +179 72 training_loop """owa""" +179 72 negative_sampler """basic""" +179 72 evaluator """rankbased""" +179 73 dataset """kinships""" +179 73 model """distmult""" +179 73 loss """marginranking""" +179 73 regularizer """no""" +179 73 optimizer """adadelta""" +179 73 training_loop """owa""" +179 73 negative_sampler """basic""" +179 73 evaluator """rankbased""" +179 74 dataset """kinships""" +179 74 model """distmult""" +179 74 loss """marginranking""" +179 74 regularizer """no""" +179 74 optimizer """adadelta""" +179 74 training_loop """owa""" +179 74 negative_sampler """basic""" +179 74 evaluator """rankbased""" +179 75 dataset """kinships""" +179 75 model """distmult""" +179 75 loss """marginranking""" +179 75 regularizer """no""" +179 75 optimizer """adadelta""" +179 75 training_loop """owa""" +179 75 negative_sampler """basic""" +179 75 evaluator """rankbased""" +179 76 dataset """kinships""" +179 76 model """distmult""" +179 76 loss """marginranking""" +179 76 regularizer """no""" +179 76 optimizer """adadelta""" +179 76 training_loop """owa""" +179 76 negative_sampler """basic""" +179 76 evaluator """rankbased""" +179 77 dataset """kinships""" +179 77 model """distmult""" +179 77 loss """marginranking""" +179 77 regularizer """no""" +179 77 optimizer """adadelta""" +179 77 training_loop """owa""" +179 77 negative_sampler """basic""" +179 77 evaluator """rankbased""" +179 78 dataset """kinships""" +179 78 model """distmult""" +179 78 loss """marginranking""" +179 78 regularizer """no""" +179 78 optimizer """adadelta""" +179 78 training_loop """owa""" +179 78 negative_sampler """basic""" +179 78 evaluator """rankbased""" +179 79 dataset """kinships""" +179 79 model """distmult""" +179 79 loss """marginranking""" +179 79 regularizer """no""" +179 79 optimizer """adadelta""" +179 79 training_loop """owa""" +179 79 negative_sampler """basic""" +179 79 evaluator """rankbased""" +179 80 dataset """kinships""" +179 80 model """distmult""" +179 80 loss """marginranking""" +179 80 regularizer """no""" +179 80 optimizer """adadelta""" +179 80 training_loop """owa""" +179 80 negative_sampler """basic""" +179 80 evaluator """rankbased""" +179 81 dataset """kinships""" +179 81 model """distmult""" +179 81 loss """marginranking""" +179 81 regularizer """no""" +179 81 optimizer """adadelta""" +179 81 training_loop """owa""" +179 81 negative_sampler """basic""" +179 81 evaluator """rankbased""" +179 82 dataset """kinships""" +179 82 model """distmult""" +179 82 loss """marginranking""" +179 82 regularizer """no""" +179 82 optimizer """adadelta""" +179 82 training_loop """owa""" +179 82 negative_sampler """basic""" +179 82 evaluator """rankbased""" +179 83 dataset """kinships""" +179 83 model """distmult""" +179 83 loss """marginranking""" +179 83 regularizer """no""" +179 83 optimizer """adadelta""" +179 83 training_loop """owa""" +179 83 negative_sampler """basic""" +179 83 evaluator """rankbased""" +179 84 dataset """kinships""" +179 84 model """distmult""" +179 84 loss """marginranking""" +179 84 regularizer """no""" +179 84 optimizer """adadelta""" +179 84 training_loop """owa""" +179 84 negative_sampler """basic""" +179 84 evaluator """rankbased""" +179 85 dataset """kinships""" +179 85 model """distmult""" +179 85 loss """marginranking""" +179 85 regularizer """no""" +179 85 optimizer """adadelta""" +179 85 training_loop """owa""" +179 85 negative_sampler """basic""" +179 85 evaluator """rankbased""" +179 86 dataset """kinships""" +179 86 model """distmult""" +179 86 loss """marginranking""" +179 86 regularizer """no""" +179 86 optimizer """adadelta""" +179 86 training_loop """owa""" +179 86 negative_sampler """basic""" +179 86 evaluator """rankbased""" +179 87 dataset """kinships""" +179 87 model """distmult""" +179 87 loss """marginranking""" +179 87 regularizer """no""" +179 87 optimizer """adadelta""" +179 87 training_loop """owa""" +179 87 negative_sampler """basic""" +179 87 evaluator """rankbased""" +179 88 dataset """kinships""" +179 88 model """distmult""" +179 88 loss """marginranking""" +179 88 regularizer """no""" +179 88 optimizer """adadelta""" +179 88 training_loop """owa""" +179 88 negative_sampler """basic""" +179 88 evaluator """rankbased""" +179 89 dataset """kinships""" +179 89 model """distmult""" +179 89 loss """marginranking""" +179 89 regularizer """no""" +179 89 optimizer """adadelta""" +179 89 training_loop """owa""" +179 89 negative_sampler """basic""" +179 89 evaluator """rankbased""" +179 90 dataset """kinships""" +179 90 model """distmult""" +179 90 loss """marginranking""" +179 90 regularizer """no""" +179 90 optimizer """adadelta""" +179 90 training_loop """owa""" +179 90 negative_sampler """basic""" +179 90 evaluator """rankbased""" +179 91 dataset """kinships""" +179 91 model """distmult""" +179 91 loss """marginranking""" +179 91 regularizer """no""" +179 91 optimizer """adadelta""" +179 91 training_loop """owa""" +179 91 negative_sampler """basic""" +179 91 evaluator """rankbased""" +179 92 dataset """kinships""" +179 92 model """distmult""" +179 92 loss """marginranking""" +179 92 regularizer """no""" +179 92 optimizer """adadelta""" +179 92 training_loop """owa""" +179 92 negative_sampler """basic""" +179 92 evaluator """rankbased""" +179 93 dataset """kinships""" +179 93 model """distmult""" +179 93 loss """marginranking""" +179 93 regularizer """no""" +179 93 optimizer """adadelta""" +179 93 training_loop """owa""" +179 93 negative_sampler """basic""" +179 93 evaluator """rankbased""" +179 94 dataset """kinships""" +179 94 model """distmult""" +179 94 loss """marginranking""" +179 94 regularizer """no""" +179 94 optimizer """adadelta""" +179 94 training_loop """owa""" +179 94 negative_sampler """basic""" +179 94 evaluator """rankbased""" +179 95 dataset """kinships""" +179 95 model """distmult""" +179 95 loss """marginranking""" +179 95 regularizer """no""" +179 95 optimizer """adadelta""" +179 95 training_loop """owa""" +179 95 negative_sampler """basic""" +179 95 evaluator """rankbased""" +179 96 dataset """kinships""" +179 96 model """distmult""" +179 96 loss """marginranking""" +179 96 regularizer """no""" +179 96 optimizer """adadelta""" +179 96 training_loop """owa""" +179 96 negative_sampler """basic""" +179 96 evaluator """rankbased""" +179 97 dataset """kinships""" +179 97 model """distmult""" +179 97 loss """marginranking""" +179 97 regularizer """no""" +179 97 optimizer """adadelta""" +179 97 training_loop """owa""" +179 97 negative_sampler """basic""" +179 97 evaluator """rankbased""" +179 98 dataset """kinships""" +179 98 model """distmult""" +179 98 loss """marginranking""" +179 98 regularizer """no""" +179 98 optimizer """adadelta""" +179 98 training_loop """owa""" +179 98 negative_sampler """basic""" +179 98 evaluator """rankbased""" +179 99 dataset """kinships""" +179 99 model """distmult""" +179 99 loss """marginranking""" +179 99 regularizer """no""" +179 99 optimizer """adadelta""" +179 99 training_loop """owa""" +179 99 negative_sampler """basic""" +179 99 evaluator """rankbased""" +179 100 dataset """kinships""" +179 100 model """distmult""" +179 100 loss """marginranking""" +179 100 regularizer """no""" +179 100 optimizer """adadelta""" +179 100 training_loop """owa""" +179 100 negative_sampler """basic""" +179 100 evaluator """rankbased""" +180 1 model.embedding_dim 1.0 +180 1 loss.margin 9.911388829352763 +180 1 loss.adversarial_temperature 0.9044068719871966 +180 1 negative_sampler.num_negs_per_pos 44.0 +180 1 training.batch_size 2.0 +180 2 model.embedding_dim 2.0 +180 2 loss.margin 14.98184547170587 +180 2 loss.adversarial_temperature 0.3516688657727495 +180 2 negative_sampler.num_negs_per_pos 67.0 +180 2 training.batch_size 2.0 +180 3 model.embedding_dim 2.0 +180 3 loss.margin 11.80273630916156 +180 3 loss.adversarial_temperature 0.7970797499104254 +180 3 negative_sampler.num_negs_per_pos 60.0 +180 3 training.batch_size 2.0 +180 4 model.embedding_dim 1.0 +180 4 loss.margin 28.777888012915206 +180 4 loss.adversarial_temperature 0.4063561185277619 +180 4 negative_sampler.num_negs_per_pos 19.0 +180 4 training.batch_size 2.0 +180 5 model.embedding_dim 2.0 +180 5 loss.margin 6.54053190370572 +180 5 loss.adversarial_temperature 0.6605604897810512 +180 5 negative_sampler.num_negs_per_pos 7.0 +180 5 training.batch_size 2.0 +180 6 model.embedding_dim 0.0 +180 6 loss.margin 3.8299375963613826 +180 6 loss.adversarial_temperature 0.27504827695583434 +180 6 negative_sampler.num_negs_per_pos 57.0 +180 6 training.batch_size 1.0 +180 7 model.embedding_dim 2.0 +180 7 loss.margin 7.969330766875911 +180 7 loss.adversarial_temperature 0.9429744179381488 +180 7 negative_sampler.num_negs_per_pos 57.0 +180 7 training.batch_size 1.0 +180 8 model.embedding_dim 0.0 +180 8 loss.margin 5.875502547689708 +180 8 loss.adversarial_temperature 0.6607515230374372 +180 8 negative_sampler.num_negs_per_pos 79.0 +180 8 training.batch_size 1.0 +180 9 model.embedding_dim 2.0 +180 9 loss.margin 10.899070866700747 +180 9 loss.adversarial_temperature 0.8703797333767084 +180 9 negative_sampler.num_negs_per_pos 31.0 +180 9 training.batch_size 0.0 +180 10 model.embedding_dim 1.0 +180 10 loss.margin 27.31995448266897 +180 10 loss.adversarial_temperature 0.8905807925035778 +180 10 negative_sampler.num_negs_per_pos 92.0 +180 10 training.batch_size 2.0 +180 11 model.embedding_dim 0.0 +180 11 loss.margin 27.815485805484826 +180 11 loss.adversarial_temperature 0.898967819628072 +180 11 negative_sampler.num_negs_per_pos 22.0 +180 11 training.batch_size 0.0 +180 12 model.embedding_dim 1.0 +180 12 loss.margin 28.48994294022004 +180 12 loss.adversarial_temperature 0.4974770030709399 +180 12 negative_sampler.num_negs_per_pos 59.0 +180 12 training.batch_size 1.0 +180 13 model.embedding_dim 0.0 +180 13 loss.margin 28.72549967129091 +180 13 loss.adversarial_temperature 0.6790258737276087 +180 13 negative_sampler.num_negs_per_pos 88.0 +180 13 training.batch_size 1.0 +180 14 model.embedding_dim 1.0 +180 14 loss.margin 19.955046697151975 +180 14 loss.adversarial_temperature 0.7601065073764132 +180 14 negative_sampler.num_negs_per_pos 60.0 +180 14 training.batch_size 2.0 +180 15 model.embedding_dim 0.0 +180 15 loss.margin 7.091881251714362 +180 15 loss.adversarial_temperature 0.5151619659725296 +180 15 negative_sampler.num_negs_per_pos 72.0 +180 15 training.batch_size 2.0 +180 16 model.embedding_dim 0.0 +180 16 loss.margin 18.276761369355476 +180 16 loss.adversarial_temperature 0.8991179792159163 +180 16 negative_sampler.num_negs_per_pos 23.0 +180 16 training.batch_size 2.0 +180 17 model.embedding_dim 1.0 +180 17 loss.margin 7.507396699104461 +180 17 loss.adversarial_temperature 0.10828315373969868 +180 17 negative_sampler.num_negs_per_pos 36.0 +180 17 training.batch_size 0.0 +180 18 model.embedding_dim 2.0 +180 18 loss.margin 18.628210318196107 +180 18 loss.adversarial_temperature 0.8818345883041552 +180 18 negative_sampler.num_negs_per_pos 28.0 +180 18 training.batch_size 0.0 +180 19 model.embedding_dim 0.0 +180 19 loss.margin 2.30745236482524 +180 19 loss.adversarial_temperature 0.2564043125900629 +180 19 negative_sampler.num_negs_per_pos 22.0 +180 19 training.batch_size 0.0 +180 20 model.embedding_dim 0.0 +180 20 loss.margin 14.149609408846805 +180 20 loss.adversarial_temperature 0.4384677941738089 +180 20 negative_sampler.num_negs_per_pos 82.0 +180 20 training.batch_size 2.0 +180 21 model.embedding_dim 1.0 +180 21 loss.margin 18.79094498810509 +180 21 loss.adversarial_temperature 0.8356749171192169 +180 21 negative_sampler.num_negs_per_pos 75.0 +180 21 training.batch_size 1.0 +180 22 model.embedding_dim 0.0 +180 22 loss.margin 9.153361629999424 +180 22 loss.adversarial_temperature 0.153786588417248 +180 22 negative_sampler.num_negs_per_pos 24.0 +180 22 training.batch_size 1.0 +180 23 model.embedding_dim 0.0 +180 23 loss.margin 26.92968020901332 +180 23 loss.adversarial_temperature 0.19138675771723543 +180 23 negative_sampler.num_negs_per_pos 50.0 +180 23 training.batch_size 0.0 +180 24 model.embedding_dim 2.0 +180 24 loss.margin 16.640583985118326 +180 24 loss.adversarial_temperature 0.6288965116987375 +180 24 negative_sampler.num_negs_per_pos 70.0 +180 24 training.batch_size 2.0 +180 25 model.embedding_dim 2.0 +180 25 loss.margin 16.81818709383283 +180 25 loss.adversarial_temperature 0.4794707651863347 +180 25 negative_sampler.num_negs_per_pos 3.0 +180 25 training.batch_size 1.0 +180 26 model.embedding_dim 1.0 +180 26 loss.margin 2.554334397322356 +180 26 loss.adversarial_temperature 0.8262067304698347 +180 26 negative_sampler.num_negs_per_pos 30.0 +180 26 training.batch_size 2.0 +180 27 model.embedding_dim 2.0 +180 27 loss.margin 8.813798191293028 +180 27 loss.adversarial_temperature 0.2606976278902468 +180 27 negative_sampler.num_negs_per_pos 41.0 +180 27 training.batch_size 1.0 +180 28 model.embedding_dim 1.0 +180 28 loss.margin 27.83265686072559 +180 28 loss.adversarial_temperature 0.17501500924542146 +180 28 negative_sampler.num_negs_per_pos 10.0 +180 28 training.batch_size 0.0 +180 29 model.embedding_dim 0.0 +180 29 loss.margin 16.561383711404066 +180 29 loss.adversarial_temperature 0.10612163511805418 +180 29 negative_sampler.num_negs_per_pos 12.0 +180 29 training.batch_size 2.0 +180 30 model.embedding_dim 1.0 +180 30 loss.margin 29.4419244888688 +180 30 loss.adversarial_temperature 0.9038852780098111 +180 30 negative_sampler.num_negs_per_pos 6.0 +180 30 training.batch_size 2.0 +180 31 model.embedding_dim 2.0 +180 31 loss.margin 27.553612795974352 +180 31 loss.adversarial_temperature 0.2271065411916009 +180 31 negative_sampler.num_negs_per_pos 79.0 +180 31 training.batch_size 2.0 +180 32 model.embedding_dim 0.0 +180 32 loss.margin 7.6057751394346225 +180 32 loss.adversarial_temperature 0.5258680831911974 +180 32 negative_sampler.num_negs_per_pos 75.0 +180 32 training.batch_size 0.0 +180 33 model.embedding_dim 2.0 +180 33 loss.margin 18.923023529679966 +180 33 loss.adversarial_temperature 0.16086552119401393 +180 33 negative_sampler.num_negs_per_pos 6.0 +180 33 training.batch_size 2.0 +180 34 model.embedding_dim 2.0 +180 34 loss.margin 14.82408130506605 +180 34 loss.adversarial_temperature 0.9802502765524878 +180 34 negative_sampler.num_negs_per_pos 13.0 +180 34 training.batch_size 2.0 +180 35 model.embedding_dim 1.0 +180 35 loss.margin 16.324949122329965 +180 35 loss.adversarial_temperature 0.4905925560675166 +180 35 negative_sampler.num_negs_per_pos 43.0 +180 35 training.batch_size 0.0 +180 36 model.embedding_dim 1.0 +180 36 loss.margin 10.565700757309848 +180 36 loss.adversarial_temperature 0.18672756891194858 +180 36 negative_sampler.num_negs_per_pos 3.0 +180 36 training.batch_size 1.0 +180 37 model.embedding_dim 2.0 +180 37 loss.margin 20.473588197331154 +180 37 loss.adversarial_temperature 0.9864954002907145 +180 37 negative_sampler.num_negs_per_pos 64.0 +180 37 training.batch_size 1.0 +180 38 model.embedding_dim 0.0 +180 38 loss.margin 15.482073541833037 +180 38 loss.adversarial_temperature 0.37800918759377333 +180 38 negative_sampler.num_negs_per_pos 33.0 +180 38 training.batch_size 2.0 +180 39 model.embedding_dim 0.0 +180 39 loss.margin 14.836994724381249 +180 39 loss.adversarial_temperature 0.9796703075839749 +180 39 negative_sampler.num_negs_per_pos 49.0 +180 39 training.batch_size 1.0 +180 40 model.embedding_dim 2.0 +180 40 loss.margin 24.50427013596618 +180 40 loss.adversarial_temperature 0.5466846016050727 +180 40 negative_sampler.num_negs_per_pos 23.0 +180 40 training.batch_size 2.0 +180 41 model.embedding_dim 2.0 +180 41 loss.margin 1.4270514055612324 +180 41 loss.adversarial_temperature 0.2737228184864944 +180 41 negative_sampler.num_negs_per_pos 22.0 +180 41 training.batch_size 0.0 +180 42 model.embedding_dim 1.0 +180 42 loss.margin 28.263879410520452 +180 42 loss.adversarial_temperature 0.3025599757077295 +180 42 negative_sampler.num_negs_per_pos 99.0 +180 42 training.batch_size 1.0 +180 43 model.embedding_dim 1.0 +180 43 loss.margin 19.530589812558652 +180 43 loss.adversarial_temperature 0.39880891089604076 +180 43 negative_sampler.num_negs_per_pos 2.0 +180 43 training.batch_size 2.0 +180 44 model.embedding_dim 0.0 +180 44 loss.margin 12.488526098827471 +180 44 loss.adversarial_temperature 0.39107221900714084 +180 44 negative_sampler.num_negs_per_pos 43.0 +180 44 training.batch_size 0.0 +180 45 model.embedding_dim 0.0 +180 45 loss.margin 21.3914240667791 +180 45 loss.adversarial_temperature 0.8126259207858058 +180 45 negative_sampler.num_negs_per_pos 12.0 +180 45 training.batch_size 1.0 +180 46 model.embedding_dim 2.0 +180 46 loss.margin 10.029834851676258 +180 46 loss.adversarial_temperature 0.10028882036549223 +180 46 negative_sampler.num_negs_per_pos 6.0 +180 46 training.batch_size 0.0 +180 47 model.embedding_dim 2.0 +180 47 loss.margin 8.522234027018168 +180 47 loss.adversarial_temperature 0.66432150475934 +180 47 negative_sampler.num_negs_per_pos 83.0 +180 47 training.batch_size 1.0 +180 48 model.embedding_dim 0.0 +180 48 loss.margin 4.702704790304362 +180 48 loss.adversarial_temperature 0.5017067547008655 +180 48 negative_sampler.num_negs_per_pos 21.0 +180 48 training.batch_size 2.0 +180 49 model.embedding_dim 0.0 +180 49 loss.margin 6.686568535139607 +180 49 loss.adversarial_temperature 0.5431650259230886 +180 49 negative_sampler.num_negs_per_pos 72.0 +180 49 training.batch_size 1.0 +180 50 model.embedding_dim 0.0 +180 50 loss.margin 16.08447488546608 +180 50 loss.adversarial_temperature 0.12246251449220871 +180 50 negative_sampler.num_negs_per_pos 53.0 +180 50 training.batch_size 0.0 +180 51 model.embedding_dim 2.0 +180 51 loss.margin 21.828659236718547 +180 51 loss.adversarial_temperature 0.13841404968530913 +180 51 negative_sampler.num_negs_per_pos 28.0 +180 51 training.batch_size 0.0 +180 52 model.embedding_dim 1.0 +180 52 loss.margin 10.401596285115321 +180 52 loss.adversarial_temperature 0.5865673834895866 +180 52 negative_sampler.num_negs_per_pos 89.0 +180 52 training.batch_size 1.0 +180 53 model.embedding_dim 1.0 +180 53 loss.margin 22.97117050850559 +180 53 loss.adversarial_temperature 0.22068588904556863 +180 53 negative_sampler.num_negs_per_pos 20.0 +180 53 training.batch_size 0.0 +180 54 model.embedding_dim 0.0 +180 54 loss.margin 16.754176359771286 +180 54 loss.adversarial_temperature 0.2214622936105735 +180 54 negative_sampler.num_negs_per_pos 23.0 +180 54 training.batch_size 1.0 +180 55 model.embedding_dim 1.0 +180 55 loss.margin 7.804791287553342 +180 55 loss.adversarial_temperature 0.14883169387659864 +180 55 negative_sampler.num_negs_per_pos 28.0 +180 55 training.batch_size 2.0 +180 56 model.embedding_dim 0.0 +180 56 loss.margin 7.285642175658345 +180 56 loss.adversarial_temperature 0.2544271891836928 +180 56 negative_sampler.num_negs_per_pos 40.0 +180 56 training.batch_size 1.0 +180 57 model.embedding_dim 1.0 +180 57 loss.margin 17.898270919393003 +180 57 loss.adversarial_temperature 0.8440564620490258 +180 57 negative_sampler.num_negs_per_pos 27.0 +180 57 training.batch_size 0.0 +180 58 model.embedding_dim 0.0 +180 58 loss.margin 20.235475085801458 +180 58 loss.adversarial_temperature 0.25499809313536836 +180 58 negative_sampler.num_negs_per_pos 34.0 +180 58 training.batch_size 2.0 +180 59 model.embedding_dim 2.0 +180 59 loss.margin 15.701819788469551 +180 59 loss.adversarial_temperature 0.14320130564592132 +180 59 negative_sampler.num_negs_per_pos 25.0 +180 59 training.batch_size 0.0 +180 60 model.embedding_dim 0.0 +180 60 loss.margin 23.97850873942956 +180 60 loss.adversarial_temperature 0.8367212091992359 +180 60 negative_sampler.num_negs_per_pos 10.0 +180 60 training.batch_size 0.0 +180 61 model.embedding_dim 2.0 +180 61 loss.margin 25.584764288759107 +180 61 loss.adversarial_temperature 0.5550841324812266 +180 61 negative_sampler.num_negs_per_pos 72.0 +180 61 training.batch_size 2.0 +180 62 model.embedding_dim 2.0 +180 62 loss.margin 24.22872742800065 +180 62 loss.adversarial_temperature 0.3796062347717699 +180 62 negative_sampler.num_negs_per_pos 27.0 +180 62 training.batch_size 2.0 +180 63 model.embedding_dim 2.0 +180 63 loss.margin 5.457689818073489 +180 63 loss.adversarial_temperature 0.33563261711705794 +180 63 negative_sampler.num_negs_per_pos 68.0 +180 63 training.batch_size 1.0 +180 64 model.embedding_dim 2.0 +180 64 loss.margin 25.087674834583957 +180 64 loss.adversarial_temperature 0.5631623272652546 +180 64 negative_sampler.num_negs_per_pos 30.0 +180 64 training.batch_size 0.0 +180 65 model.embedding_dim 2.0 +180 65 loss.margin 2.456607340074389 +180 65 loss.adversarial_temperature 0.9767058789217464 +180 65 negative_sampler.num_negs_per_pos 52.0 +180 65 training.batch_size 0.0 +180 66 model.embedding_dim 1.0 +180 66 loss.margin 1.8676270376776758 +180 66 loss.adversarial_temperature 0.9467223405665637 +180 66 negative_sampler.num_negs_per_pos 16.0 +180 66 training.batch_size 0.0 +180 67 model.embedding_dim 0.0 +180 67 loss.margin 3.8041118065536477 +180 67 loss.adversarial_temperature 0.3349427613387874 +180 67 negative_sampler.num_negs_per_pos 51.0 +180 67 training.batch_size 1.0 +180 68 model.embedding_dim 1.0 +180 68 loss.margin 17.664437814406895 +180 68 loss.adversarial_temperature 0.5726008924909878 +180 68 negative_sampler.num_negs_per_pos 60.0 +180 68 training.batch_size 1.0 +180 69 model.embedding_dim 0.0 +180 69 loss.margin 21.930683965024855 +180 69 loss.adversarial_temperature 0.9869649507919623 +180 69 negative_sampler.num_negs_per_pos 91.0 +180 69 training.batch_size 0.0 +180 70 model.embedding_dim 0.0 +180 70 loss.margin 12.96130245011461 +180 70 loss.adversarial_temperature 0.6958517226746745 +180 70 negative_sampler.num_negs_per_pos 91.0 +180 70 training.batch_size 2.0 +180 71 model.embedding_dim 0.0 +180 71 loss.margin 26.33830544858164 +180 71 loss.adversarial_temperature 0.7771740511437529 +180 71 negative_sampler.num_negs_per_pos 62.0 +180 71 training.batch_size 1.0 +180 72 model.embedding_dim 0.0 +180 72 loss.margin 13.00350333864421 +180 72 loss.adversarial_temperature 0.5824628130666896 +180 72 negative_sampler.num_negs_per_pos 86.0 +180 72 training.batch_size 1.0 +180 73 model.embedding_dim 0.0 +180 73 loss.margin 1.1544617758362066 +180 73 loss.adversarial_temperature 0.6553387701292269 +180 73 negative_sampler.num_negs_per_pos 42.0 +180 73 training.batch_size 2.0 +180 74 model.embedding_dim 2.0 +180 74 loss.margin 21.330586305919123 +180 74 loss.adversarial_temperature 0.3377031747343532 +180 74 negative_sampler.num_negs_per_pos 80.0 +180 74 training.batch_size 0.0 +180 75 model.embedding_dim 0.0 +180 75 loss.margin 28.52440551830358 +180 75 loss.adversarial_temperature 0.37653276438281313 +180 75 negative_sampler.num_negs_per_pos 65.0 +180 75 training.batch_size 1.0 +180 76 model.embedding_dim 0.0 +180 76 loss.margin 4.878237583748392 +180 76 loss.adversarial_temperature 0.22874436547913168 +180 76 negative_sampler.num_negs_per_pos 8.0 +180 76 training.batch_size 0.0 +180 77 model.embedding_dim 1.0 +180 77 loss.margin 10.154193435252967 +180 77 loss.adversarial_temperature 0.28514866212925555 +180 77 negative_sampler.num_negs_per_pos 79.0 +180 77 training.batch_size 2.0 +180 78 model.embedding_dim 2.0 +180 78 loss.margin 27.87234143218569 +180 78 loss.adversarial_temperature 0.8854111083554624 +180 78 negative_sampler.num_negs_per_pos 71.0 +180 78 training.batch_size 2.0 +180 79 model.embedding_dim 1.0 +180 79 loss.margin 18.326079796985066 +180 79 loss.adversarial_temperature 0.23427862111863812 +180 79 negative_sampler.num_negs_per_pos 90.0 +180 79 training.batch_size 2.0 +180 80 model.embedding_dim 2.0 +180 80 loss.margin 23.051105337041037 +180 80 loss.adversarial_temperature 0.5262931487453503 +180 80 negative_sampler.num_negs_per_pos 57.0 +180 80 training.batch_size 2.0 +180 81 model.embedding_dim 2.0 +180 81 loss.margin 13.22757748747688 +180 81 loss.adversarial_temperature 0.24544093878621545 +180 81 negative_sampler.num_negs_per_pos 75.0 +180 81 training.batch_size 1.0 +180 82 model.embedding_dim 2.0 +180 82 loss.margin 26.914873945313186 +180 82 loss.adversarial_temperature 0.6985708766593027 +180 82 negative_sampler.num_negs_per_pos 64.0 +180 82 training.batch_size 2.0 +180 83 model.embedding_dim 1.0 +180 83 loss.margin 19.875649434086977 +180 83 loss.adversarial_temperature 0.7609707938463295 +180 83 negative_sampler.num_negs_per_pos 95.0 +180 83 training.batch_size 2.0 +180 84 model.embedding_dim 0.0 +180 84 loss.margin 26.644703675741304 +180 84 loss.adversarial_temperature 0.34246119514588347 +180 84 negative_sampler.num_negs_per_pos 13.0 +180 84 training.batch_size 0.0 +180 85 model.embedding_dim 2.0 +180 85 loss.margin 29.670470999191263 +180 85 loss.adversarial_temperature 0.61242594315344 +180 85 negative_sampler.num_negs_per_pos 34.0 +180 85 training.batch_size 1.0 +180 86 model.embedding_dim 1.0 +180 86 loss.margin 17.10921915643275 +180 86 loss.adversarial_temperature 0.2849849604830598 +180 86 negative_sampler.num_negs_per_pos 43.0 +180 86 training.batch_size 2.0 +180 87 model.embedding_dim 0.0 +180 87 loss.margin 24.679203428286684 +180 87 loss.adversarial_temperature 0.4300788093433354 +180 87 negative_sampler.num_negs_per_pos 83.0 +180 87 training.batch_size 2.0 +180 88 model.embedding_dim 2.0 +180 88 loss.margin 9.051068012216666 +180 88 loss.adversarial_temperature 0.7509687239929786 +180 88 negative_sampler.num_negs_per_pos 42.0 +180 88 training.batch_size 0.0 +180 89 model.embedding_dim 1.0 +180 89 loss.margin 7.6190284571532825 +180 89 loss.adversarial_temperature 0.12131271162780692 +180 89 negative_sampler.num_negs_per_pos 82.0 +180 89 training.batch_size 1.0 +180 90 model.embedding_dim 2.0 +180 90 loss.margin 13.40191906298832 +180 90 loss.adversarial_temperature 0.38844094420693465 +180 90 negative_sampler.num_negs_per_pos 61.0 +180 90 training.batch_size 2.0 +180 91 model.embedding_dim 1.0 +180 91 loss.margin 15.79051020246492 +180 91 loss.adversarial_temperature 0.54765100603824 +180 91 negative_sampler.num_negs_per_pos 86.0 +180 91 training.batch_size 1.0 +180 92 model.embedding_dim 2.0 +180 92 loss.margin 15.087624373125038 +180 92 loss.adversarial_temperature 0.2269871567119731 +180 92 negative_sampler.num_negs_per_pos 46.0 +180 92 training.batch_size 1.0 +180 93 model.embedding_dim 1.0 +180 93 loss.margin 9.273097117719185 +180 93 loss.adversarial_temperature 0.15056530058111264 +180 93 negative_sampler.num_negs_per_pos 21.0 +180 93 training.batch_size 1.0 +180 94 model.embedding_dim 2.0 +180 94 loss.margin 18.77231012159958 +180 94 loss.adversarial_temperature 0.44297578967934714 +180 94 negative_sampler.num_negs_per_pos 7.0 +180 94 training.batch_size 2.0 +180 95 model.embedding_dim 2.0 +180 95 loss.margin 4.593862816753163 +180 95 loss.adversarial_temperature 0.9643031786032978 +180 95 negative_sampler.num_negs_per_pos 10.0 +180 95 training.batch_size 1.0 +180 96 model.embedding_dim 2.0 +180 96 loss.margin 25.38106516934527 +180 96 loss.adversarial_temperature 0.25448452204179106 +180 96 negative_sampler.num_negs_per_pos 83.0 +180 96 training.batch_size 1.0 +180 97 model.embedding_dim 1.0 +180 97 loss.margin 26.522545903392178 +180 97 loss.adversarial_temperature 0.20853847647133184 +180 97 negative_sampler.num_negs_per_pos 0.0 +180 97 training.batch_size 2.0 +180 98 model.embedding_dim 2.0 +180 98 loss.margin 4.144385261317337 +180 98 loss.adversarial_temperature 0.3529120531121157 +180 98 negative_sampler.num_negs_per_pos 76.0 +180 98 training.batch_size 1.0 +180 99 model.embedding_dim 0.0 +180 99 loss.margin 23.61478123012662 +180 99 loss.adversarial_temperature 0.8260377168504147 +180 99 negative_sampler.num_negs_per_pos 15.0 +180 99 training.batch_size 1.0 +180 100 model.embedding_dim 2.0 +180 100 loss.margin 10.726754062647462 +180 100 loss.adversarial_temperature 0.35769087807991184 +180 100 negative_sampler.num_negs_per_pos 70.0 +180 100 training.batch_size 2.0 +180 1 dataset """kinships""" +180 1 model """distmult""" +180 1 loss """nssa""" +180 1 regularizer """no""" +180 1 optimizer """adadelta""" +180 1 training_loop """owa""" +180 1 negative_sampler """basic""" +180 1 evaluator """rankbased""" +180 2 dataset """kinships""" +180 2 model """distmult""" +180 2 loss """nssa""" +180 2 regularizer """no""" +180 2 optimizer """adadelta""" +180 2 training_loop """owa""" +180 2 negative_sampler """basic""" +180 2 evaluator """rankbased""" +180 3 dataset """kinships""" +180 3 model """distmult""" +180 3 loss """nssa""" +180 3 regularizer """no""" +180 3 optimizer """adadelta""" +180 3 training_loop """owa""" +180 3 negative_sampler """basic""" +180 3 evaluator """rankbased""" +180 4 dataset """kinships""" +180 4 model """distmult""" +180 4 loss """nssa""" +180 4 regularizer """no""" +180 4 optimizer """adadelta""" +180 4 training_loop """owa""" +180 4 negative_sampler """basic""" +180 4 evaluator """rankbased""" +180 5 dataset """kinships""" +180 5 model """distmult""" +180 5 loss """nssa""" +180 5 regularizer """no""" +180 5 optimizer """adadelta""" +180 5 training_loop """owa""" +180 5 negative_sampler """basic""" +180 5 evaluator """rankbased""" +180 6 dataset """kinships""" +180 6 model """distmult""" +180 6 loss """nssa""" +180 6 regularizer """no""" +180 6 optimizer """adadelta""" +180 6 training_loop """owa""" +180 6 negative_sampler """basic""" +180 6 evaluator """rankbased""" +180 7 dataset """kinships""" +180 7 model """distmult""" +180 7 loss """nssa""" +180 7 regularizer """no""" +180 7 optimizer """adadelta""" +180 7 training_loop """owa""" +180 7 negative_sampler """basic""" +180 7 evaluator """rankbased""" +180 8 dataset """kinships""" +180 8 model """distmult""" +180 8 loss """nssa""" +180 8 regularizer """no""" +180 8 optimizer """adadelta""" +180 8 training_loop """owa""" +180 8 negative_sampler """basic""" +180 8 evaluator """rankbased""" +180 9 dataset """kinships""" +180 9 model """distmult""" +180 9 loss """nssa""" +180 9 regularizer """no""" +180 9 optimizer """adadelta""" +180 9 training_loop """owa""" +180 9 negative_sampler """basic""" +180 9 evaluator """rankbased""" +180 10 dataset """kinships""" +180 10 model """distmult""" +180 10 loss """nssa""" +180 10 regularizer """no""" +180 10 optimizer """adadelta""" +180 10 training_loop """owa""" +180 10 negative_sampler """basic""" +180 10 evaluator """rankbased""" +180 11 dataset """kinships""" +180 11 model """distmult""" +180 11 loss """nssa""" +180 11 regularizer """no""" +180 11 optimizer """adadelta""" +180 11 training_loop """owa""" +180 11 negative_sampler """basic""" +180 11 evaluator """rankbased""" +180 12 dataset """kinships""" +180 12 model """distmult""" +180 12 loss """nssa""" +180 12 regularizer """no""" +180 12 optimizer """adadelta""" +180 12 training_loop """owa""" +180 12 negative_sampler """basic""" +180 12 evaluator """rankbased""" +180 13 dataset """kinships""" +180 13 model """distmult""" +180 13 loss """nssa""" +180 13 regularizer """no""" +180 13 optimizer """adadelta""" +180 13 training_loop """owa""" +180 13 negative_sampler """basic""" +180 13 evaluator """rankbased""" +180 14 dataset """kinships""" +180 14 model """distmult""" +180 14 loss """nssa""" +180 14 regularizer """no""" +180 14 optimizer """adadelta""" +180 14 training_loop """owa""" +180 14 negative_sampler """basic""" +180 14 evaluator """rankbased""" +180 15 dataset """kinships""" +180 15 model """distmult""" +180 15 loss """nssa""" +180 15 regularizer """no""" +180 15 optimizer """adadelta""" +180 15 training_loop """owa""" +180 15 negative_sampler """basic""" +180 15 evaluator """rankbased""" +180 16 dataset """kinships""" +180 16 model """distmult""" +180 16 loss """nssa""" +180 16 regularizer """no""" +180 16 optimizer """adadelta""" +180 16 training_loop """owa""" +180 16 negative_sampler """basic""" +180 16 evaluator """rankbased""" +180 17 dataset """kinships""" +180 17 model """distmult""" +180 17 loss """nssa""" +180 17 regularizer """no""" +180 17 optimizer """adadelta""" +180 17 training_loop """owa""" +180 17 negative_sampler """basic""" +180 17 evaluator """rankbased""" +180 18 dataset """kinships""" +180 18 model """distmult""" +180 18 loss """nssa""" +180 18 regularizer """no""" +180 18 optimizer """adadelta""" +180 18 training_loop """owa""" +180 18 negative_sampler """basic""" +180 18 evaluator """rankbased""" +180 19 dataset """kinships""" +180 19 model """distmult""" +180 19 loss """nssa""" +180 19 regularizer """no""" +180 19 optimizer """adadelta""" +180 19 training_loop """owa""" +180 19 negative_sampler """basic""" +180 19 evaluator """rankbased""" +180 20 dataset """kinships""" +180 20 model """distmult""" +180 20 loss """nssa""" +180 20 regularizer """no""" +180 20 optimizer """adadelta""" +180 20 training_loop """owa""" +180 20 negative_sampler """basic""" +180 20 evaluator """rankbased""" +180 21 dataset """kinships""" +180 21 model """distmult""" +180 21 loss """nssa""" +180 21 regularizer """no""" +180 21 optimizer """adadelta""" +180 21 training_loop """owa""" +180 21 negative_sampler """basic""" +180 21 evaluator """rankbased""" +180 22 dataset """kinships""" +180 22 model """distmult""" +180 22 loss """nssa""" +180 22 regularizer """no""" +180 22 optimizer """adadelta""" +180 22 training_loop """owa""" +180 22 negative_sampler """basic""" +180 22 evaluator """rankbased""" +180 23 dataset """kinships""" +180 23 model """distmult""" +180 23 loss """nssa""" +180 23 regularizer """no""" +180 23 optimizer """adadelta""" +180 23 training_loop """owa""" +180 23 negative_sampler """basic""" +180 23 evaluator """rankbased""" +180 24 dataset """kinships""" +180 24 model """distmult""" +180 24 loss """nssa""" +180 24 regularizer """no""" +180 24 optimizer """adadelta""" +180 24 training_loop """owa""" +180 24 negative_sampler """basic""" +180 24 evaluator """rankbased""" +180 25 dataset """kinships""" +180 25 model """distmult""" +180 25 loss """nssa""" +180 25 regularizer """no""" +180 25 optimizer """adadelta""" +180 25 training_loop """owa""" +180 25 negative_sampler """basic""" +180 25 evaluator """rankbased""" +180 26 dataset """kinships""" +180 26 model """distmult""" +180 26 loss """nssa""" +180 26 regularizer """no""" +180 26 optimizer """adadelta""" +180 26 training_loop """owa""" +180 26 negative_sampler """basic""" +180 26 evaluator """rankbased""" +180 27 dataset """kinships""" +180 27 model """distmult""" +180 27 loss """nssa""" +180 27 regularizer """no""" +180 27 optimizer """adadelta""" +180 27 training_loop """owa""" +180 27 negative_sampler """basic""" +180 27 evaluator """rankbased""" +180 28 dataset """kinships""" +180 28 model """distmult""" +180 28 loss """nssa""" +180 28 regularizer """no""" +180 28 optimizer """adadelta""" +180 28 training_loop """owa""" +180 28 negative_sampler """basic""" +180 28 evaluator """rankbased""" +180 29 dataset """kinships""" +180 29 model """distmult""" +180 29 loss """nssa""" +180 29 regularizer """no""" +180 29 optimizer """adadelta""" +180 29 training_loop """owa""" +180 29 negative_sampler """basic""" +180 29 evaluator """rankbased""" +180 30 dataset """kinships""" +180 30 model """distmult""" +180 30 loss """nssa""" +180 30 regularizer """no""" +180 30 optimizer """adadelta""" +180 30 training_loop """owa""" +180 30 negative_sampler """basic""" +180 30 evaluator """rankbased""" +180 31 dataset """kinships""" +180 31 model """distmult""" +180 31 loss """nssa""" +180 31 regularizer """no""" +180 31 optimizer """adadelta""" +180 31 training_loop """owa""" +180 31 negative_sampler """basic""" +180 31 evaluator """rankbased""" +180 32 dataset """kinships""" +180 32 model """distmult""" +180 32 loss """nssa""" +180 32 regularizer """no""" +180 32 optimizer """adadelta""" +180 32 training_loop """owa""" +180 32 negative_sampler """basic""" +180 32 evaluator """rankbased""" +180 33 dataset """kinships""" +180 33 model """distmult""" +180 33 loss """nssa""" +180 33 regularizer """no""" +180 33 optimizer """adadelta""" +180 33 training_loop """owa""" +180 33 negative_sampler """basic""" +180 33 evaluator """rankbased""" +180 34 dataset """kinships""" +180 34 model """distmult""" +180 34 loss """nssa""" +180 34 regularizer """no""" +180 34 optimizer """adadelta""" +180 34 training_loop """owa""" +180 34 negative_sampler """basic""" +180 34 evaluator """rankbased""" +180 35 dataset """kinships""" +180 35 model """distmult""" +180 35 loss """nssa""" +180 35 regularizer """no""" +180 35 optimizer """adadelta""" +180 35 training_loop """owa""" +180 35 negative_sampler """basic""" +180 35 evaluator """rankbased""" +180 36 dataset """kinships""" +180 36 model """distmult""" +180 36 loss """nssa""" +180 36 regularizer """no""" +180 36 optimizer """adadelta""" +180 36 training_loop """owa""" +180 36 negative_sampler """basic""" +180 36 evaluator """rankbased""" +180 37 dataset """kinships""" +180 37 model """distmult""" +180 37 loss """nssa""" +180 37 regularizer """no""" +180 37 optimizer """adadelta""" +180 37 training_loop """owa""" +180 37 negative_sampler """basic""" +180 37 evaluator """rankbased""" +180 38 dataset """kinships""" +180 38 model """distmult""" +180 38 loss """nssa""" +180 38 regularizer """no""" +180 38 optimizer """adadelta""" +180 38 training_loop """owa""" +180 38 negative_sampler """basic""" +180 38 evaluator """rankbased""" +180 39 dataset """kinships""" +180 39 model """distmult""" +180 39 loss """nssa""" +180 39 regularizer """no""" +180 39 optimizer """adadelta""" +180 39 training_loop """owa""" +180 39 negative_sampler """basic""" +180 39 evaluator """rankbased""" +180 40 dataset """kinships""" +180 40 model """distmult""" +180 40 loss """nssa""" +180 40 regularizer """no""" +180 40 optimizer """adadelta""" +180 40 training_loop """owa""" +180 40 negative_sampler """basic""" +180 40 evaluator """rankbased""" +180 41 dataset """kinships""" +180 41 model """distmult""" +180 41 loss """nssa""" +180 41 regularizer """no""" +180 41 optimizer """adadelta""" +180 41 training_loop """owa""" +180 41 negative_sampler """basic""" +180 41 evaluator """rankbased""" +180 42 dataset """kinships""" +180 42 model """distmult""" +180 42 loss """nssa""" +180 42 regularizer """no""" +180 42 optimizer """adadelta""" +180 42 training_loop """owa""" +180 42 negative_sampler """basic""" +180 42 evaluator """rankbased""" +180 43 dataset """kinships""" +180 43 model """distmult""" +180 43 loss """nssa""" +180 43 regularizer """no""" +180 43 optimizer """adadelta""" +180 43 training_loop """owa""" +180 43 negative_sampler """basic""" +180 43 evaluator """rankbased""" +180 44 dataset """kinships""" +180 44 model """distmult""" +180 44 loss """nssa""" +180 44 regularizer """no""" +180 44 optimizer """adadelta""" +180 44 training_loop """owa""" +180 44 negative_sampler """basic""" +180 44 evaluator """rankbased""" +180 45 dataset """kinships""" +180 45 model """distmult""" +180 45 loss """nssa""" +180 45 regularizer """no""" +180 45 optimizer """adadelta""" +180 45 training_loop """owa""" +180 45 negative_sampler """basic""" +180 45 evaluator """rankbased""" +180 46 dataset """kinships""" +180 46 model """distmult""" +180 46 loss """nssa""" +180 46 regularizer """no""" +180 46 optimizer """adadelta""" +180 46 training_loop """owa""" +180 46 negative_sampler """basic""" +180 46 evaluator """rankbased""" +180 47 dataset """kinships""" +180 47 model """distmult""" +180 47 loss """nssa""" +180 47 regularizer """no""" +180 47 optimizer """adadelta""" +180 47 training_loop """owa""" +180 47 negative_sampler """basic""" +180 47 evaluator """rankbased""" +180 48 dataset """kinships""" +180 48 model """distmult""" +180 48 loss """nssa""" +180 48 regularizer """no""" +180 48 optimizer """adadelta""" +180 48 training_loop """owa""" +180 48 negative_sampler """basic""" +180 48 evaluator """rankbased""" +180 49 dataset """kinships""" +180 49 model """distmult""" +180 49 loss """nssa""" +180 49 regularizer """no""" +180 49 optimizer """adadelta""" +180 49 training_loop """owa""" +180 49 negative_sampler """basic""" +180 49 evaluator """rankbased""" +180 50 dataset """kinships""" +180 50 model """distmult""" +180 50 loss """nssa""" +180 50 regularizer """no""" +180 50 optimizer """adadelta""" +180 50 training_loop """owa""" +180 50 negative_sampler """basic""" +180 50 evaluator """rankbased""" +180 51 dataset """kinships""" +180 51 model """distmult""" +180 51 loss """nssa""" +180 51 regularizer """no""" +180 51 optimizer """adadelta""" +180 51 training_loop """owa""" +180 51 negative_sampler """basic""" +180 51 evaluator """rankbased""" +180 52 dataset """kinships""" +180 52 model """distmult""" +180 52 loss """nssa""" +180 52 regularizer """no""" +180 52 optimizer """adadelta""" +180 52 training_loop """owa""" +180 52 negative_sampler """basic""" +180 52 evaluator """rankbased""" +180 53 dataset """kinships""" +180 53 model """distmult""" +180 53 loss """nssa""" +180 53 regularizer """no""" +180 53 optimizer """adadelta""" +180 53 training_loop """owa""" +180 53 negative_sampler """basic""" +180 53 evaluator """rankbased""" +180 54 dataset """kinships""" +180 54 model """distmult""" +180 54 loss """nssa""" +180 54 regularizer """no""" +180 54 optimizer """adadelta""" +180 54 training_loop """owa""" +180 54 negative_sampler """basic""" +180 54 evaluator """rankbased""" +180 55 dataset """kinships""" +180 55 model """distmult""" +180 55 loss """nssa""" +180 55 regularizer """no""" +180 55 optimizer """adadelta""" +180 55 training_loop """owa""" +180 55 negative_sampler """basic""" +180 55 evaluator """rankbased""" +180 56 dataset """kinships""" +180 56 model """distmult""" +180 56 loss """nssa""" +180 56 regularizer """no""" +180 56 optimizer """adadelta""" +180 56 training_loop """owa""" +180 56 negative_sampler """basic""" +180 56 evaluator """rankbased""" +180 57 dataset """kinships""" +180 57 model """distmult""" +180 57 loss """nssa""" +180 57 regularizer """no""" +180 57 optimizer """adadelta""" +180 57 training_loop """owa""" +180 57 negative_sampler """basic""" +180 57 evaluator """rankbased""" +180 58 dataset """kinships""" +180 58 model """distmult""" +180 58 loss """nssa""" +180 58 regularizer """no""" +180 58 optimizer """adadelta""" +180 58 training_loop """owa""" +180 58 negative_sampler """basic""" +180 58 evaluator """rankbased""" +180 59 dataset """kinships""" +180 59 model """distmult""" +180 59 loss """nssa""" +180 59 regularizer """no""" +180 59 optimizer """adadelta""" +180 59 training_loop """owa""" +180 59 negative_sampler """basic""" +180 59 evaluator """rankbased""" +180 60 dataset """kinships""" +180 60 model """distmult""" +180 60 loss """nssa""" +180 60 regularizer """no""" +180 60 optimizer """adadelta""" +180 60 training_loop """owa""" +180 60 negative_sampler """basic""" +180 60 evaluator """rankbased""" +180 61 dataset """kinships""" +180 61 model """distmult""" +180 61 loss """nssa""" +180 61 regularizer """no""" +180 61 optimizer """adadelta""" +180 61 training_loop """owa""" +180 61 negative_sampler """basic""" +180 61 evaluator """rankbased""" +180 62 dataset """kinships""" +180 62 model """distmult""" +180 62 loss """nssa""" +180 62 regularizer """no""" +180 62 optimizer """adadelta""" +180 62 training_loop """owa""" +180 62 negative_sampler """basic""" +180 62 evaluator """rankbased""" +180 63 dataset """kinships""" +180 63 model """distmult""" +180 63 loss """nssa""" +180 63 regularizer """no""" +180 63 optimizer """adadelta""" +180 63 training_loop """owa""" +180 63 negative_sampler """basic""" +180 63 evaluator """rankbased""" +180 64 dataset """kinships""" +180 64 model """distmult""" +180 64 loss """nssa""" +180 64 regularizer """no""" +180 64 optimizer """adadelta""" +180 64 training_loop """owa""" +180 64 negative_sampler """basic""" +180 64 evaluator """rankbased""" +180 65 dataset """kinships""" +180 65 model """distmult""" +180 65 loss """nssa""" +180 65 regularizer """no""" +180 65 optimizer """adadelta""" +180 65 training_loop """owa""" +180 65 negative_sampler """basic""" +180 65 evaluator """rankbased""" +180 66 dataset """kinships""" +180 66 model """distmult""" +180 66 loss """nssa""" +180 66 regularizer """no""" +180 66 optimizer """adadelta""" +180 66 training_loop """owa""" +180 66 negative_sampler """basic""" +180 66 evaluator """rankbased""" +180 67 dataset """kinships""" +180 67 model """distmult""" +180 67 loss """nssa""" +180 67 regularizer """no""" +180 67 optimizer """adadelta""" +180 67 training_loop """owa""" +180 67 negative_sampler """basic""" +180 67 evaluator """rankbased""" +180 68 dataset """kinships""" +180 68 model """distmult""" +180 68 loss """nssa""" +180 68 regularizer """no""" +180 68 optimizer """adadelta""" +180 68 training_loop """owa""" +180 68 negative_sampler """basic""" +180 68 evaluator """rankbased""" +180 69 dataset """kinships""" +180 69 model """distmult""" +180 69 loss """nssa""" +180 69 regularizer """no""" +180 69 optimizer """adadelta""" +180 69 training_loop """owa""" +180 69 negative_sampler """basic""" +180 69 evaluator """rankbased""" +180 70 dataset """kinships""" +180 70 model """distmult""" +180 70 loss """nssa""" +180 70 regularizer """no""" +180 70 optimizer """adadelta""" +180 70 training_loop """owa""" +180 70 negative_sampler """basic""" +180 70 evaluator """rankbased""" +180 71 dataset """kinships""" +180 71 model """distmult""" +180 71 loss """nssa""" +180 71 regularizer """no""" +180 71 optimizer """adadelta""" +180 71 training_loop """owa""" +180 71 negative_sampler """basic""" +180 71 evaluator """rankbased""" +180 72 dataset """kinships""" +180 72 model """distmult""" +180 72 loss """nssa""" +180 72 regularizer """no""" +180 72 optimizer """adadelta""" +180 72 training_loop """owa""" +180 72 negative_sampler """basic""" +180 72 evaluator """rankbased""" +180 73 dataset """kinships""" +180 73 model """distmult""" +180 73 loss """nssa""" +180 73 regularizer """no""" +180 73 optimizer """adadelta""" +180 73 training_loop """owa""" +180 73 negative_sampler """basic""" +180 73 evaluator """rankbased""" +180 74 dataset """kinships""" +180 74 model """distmult""" +180 74 loss """nssa""" +180 74 regularizer """no""" +180 74 optimizer """adadelta""" +180 74 training_loop """owa""" +180 74 negative_sampler """basic""" +180 74 evaluator """rankbased""" +180 75 dataset """kinships""" +180 75 model """distmult""" +180 75 loss """nssa""" +180 75 regularizer """no""" +180 75 optimizer """adadelta""" +180 75 training_loop """owa""" +180 75 negative_sampler """basic""" +180 75 evaluator """rankbased""" +180 76 dataset """kinships""" +180 76 model """distmult""" +180 76 loss """nssa""" +180 76 regularizer """no""" +180 76 optimizer """adadelta""" +180 76 training_loop """owa""" +180 76 negative_sampler """basic""" +180 76 evaluator """rankbased""" +180 77 dataset """kinships""" +180 77 model """distmult""" +180 77 loss """nssa""" +180 77 regularizer """no""" +180 77 optimizer """adadelta""" +180 77 training_loop """owa""" +180 77 negative_sampler """basic""" +180 77 evaluator """rankbased""" +180 78 dataset """kinships""" +180 78 model """distmult""" +180 78 loss """nssa""" +180 78 regularizer """no""" +180 78 optimizer """adadelta""" +180 78 training_loop """owa""" +180 78 negative_sampler """basic""" +180 78 evaluator """rankbased""" +180 79 dataset """kinships""" +180 79 model """distmult""" +180 79 loss """nssa""" +180 79 regularizer """no""" +180 79 optimizer """adadelta""" +180 79 training_loop """owa""" +180 79 negative_sampler """basic""" +180 79 evaluator """rankbased""" +180 80 dataset """kinships""" +180 80 model """distmult""" +180 80 loss """nssa""" +180 80 regularizer """no""" +180 80 optimizer """adadelta""" +180 80 training_loop """owa""" +180 80 negative_sampler """basic""" +180 80 evaluator """rankbased""" +180 81 dataset """kinships""" +180 81 model """distmult""" +180 81 loss """nssa""" +180 81 regularizer """no""" +180 81 optimizer """adadelta""" +180 81 training_loop """owa""" +180 81 negative_sampler """basic""" +180 81 evaluator """rankbased""" +180 82 dataset """kinships""" +180 82 model """distmult""" +180 82 loss """nssa""" +180 82 regularizer """no""" +180 82 optimizer """adadelta""" +180 82 training_loop """owa""" +180 82 negative_sampler """basic""" +180 82 evaluator """rankbased""" +180 83 dataset """kinships""" +180 83 model """distmult""" +180 83 loss """nssa""" +180 83 regularizer """no""" +180 83 optimizer """adadelta""" +180 83 training_loop """owa""" +180 83 negative_sampler """basic""" +180 83 evaluator """rankbased""" +180 84 dataset """kinships""" +180 84 model """distmult""" +180 84 loss """nssa""" +180 84 regularizer """no""" +180 84 optimizer """adadelta""" +180 84 training_loop """owa""" +180 84 negative_sampler """basic""" +180 84 evaluator """rankbased""" +180 85 dataset """kinships""" +180 85 model """distmult""" +180 85 loss """nssa""" +180 85 regularizer """no""" +180 85 optimizer """adadelta""" +180 85 training_loop """owa""" +180 85 negative_sampler """basic""" +180 85 evaluator """rankbased""" +180 86 dataset """kinships""" +180 86 model """distmult""" +180 86 loss """nssa""" +180 86 regularizer """no""" +180 86 optimizer """adadelta""" +180 86 training_loop """owa""" +180 86 negative_sampler """basic""" +180 86 evaluator """rankbased""" +180 87 dataset """kinships""" +180 87 model """distmult""" +180 87 loss """nssa""" +180 87 regularizer """no""" +180 87 optimizer """adadelta""" +180 87 training_loop """owa""" +180 87 negative_sampler """basic""" +180 87 evaluator """rankbased""" +180 88 dataset """kinships""" +180 88 model """distmult""" +180 88 loss """nssa""" +180 88 regularizer """no""" +180 88 optimizer """adadelta""" +180 88 training_loop """owa""" +180 88 negative_sampler """basic""" +180 88 evaluator """rankbased""" +180 89 dataset """kinships""" +180 89 model """distmult""" +180 89 loss """nssa""" +180 89 regularizer """no""" +180 89 optimizer """adadelta""" +180 89 training_loop """owa""" +180 89 negative_sampler """basic""" +180 89 evaluator """rankbased""" +180 90 dataset """kinships""" +180 90 model """distmult""" +180 90 loss """nssa""" +180 90 regularizer """no""" +180 90 optimizer """adadelta""" +180 90 training_loop """owa""" +180 90 negative_sampler """basic""" +180 90 evaluator """rankbased""" +180 91 dataset """kinships""" +180 91 model """distmult""" +180 91 loss """nssa""" +180 91 regularizer """no""" +180 91 optimizer """adadelta""" +180 91 training_loop """owa""" +180 91 negative_sampler """basic""" +180 91 evaluator """rankbased""" +180 92 dataset """kinships""" +180 92 model """distmult""" +180 92 loss """nssa""" +180 92 regularizer """no""" +180 92 optimizer """adadelta""" +180 92 training_loop """owa""" +180 92 negative_sampler """basic""" +180 92 evaluator """rankbased""" +180 93 dataset """kinships""" +180 93 model """distmult""" +180 93 loss """nssa""" +180 93 regularizer """no""" +180 93 optimizer """adadelta""" +180 93 training_loop """owa""" +180 93 negative_sampler """basic""" +180 93 evaluator """rankbased""" +180 94 dataset """kinships""" +180 94 model """distmult""" +180 94 loss """nssa""" +180 94 regularizer """no""" +180 94 optimizer """adadelta""" +180 94 training_loop """owa""" +180 94 negative_sampler """basic""" +180 94 evaluator """rankbased""" +180 95 dataset """kinships""" +180 95 model """distmult""" +180 95 loss """nssa""" +180 95 regularizer """no""" +180 95 optimizer """adadelta""" +180 95 training_loop """owa""" +180 95 negative_sampler """basic""" +180 95 evaluator """rankbased""" +180 96 dataset """kinships""" +180 96 model """distmult""" +180 96 loss """nssa""" +180 96 regularizer """no""" +180 96 optimizer """adadelta""" +180 96 training_loop """owa""" +180 96 negative_sampler """basic""" +180 96 evaluator """rankbased""" +180 97 dataset """kinships""" +180 97 model """distmult""" +180 97 loss """nssa""" +180 97 regularizer """no""" +180 97 optimizer """adadelta""" +180 97 training_loop """owa""" +180 97 negative_sampler """basic""" +180 97 evaluator """rankbased""" +180 98 dataset """kinships""" +180 98 model """distmult""" +180 98 loss """nssa""" +180 98 regularizer """no""" +180 98 optimizer """adadelta""" +180 98 training_loop """owa""" +180 98 negative_sampler """basic""" +180 98 evaluator """rankbased""" +180 99 dataset """kinships""" +180 99 model """distmult""" +180 99 loss """nssa""" +180 99 regularizer """no""" +180 99 optimizer """adadelta""" +180 99 training_loop """owa""" +180 99 negative_sampler """basic""" +180 99 evaluator """rankbased""" +180 100 dataset """kinships""" +180 100 model """distmult""" +180 100 loss """nssa""" +180 100 regularizer """no""" +180 100 optimizer """adadelta""" +180 100 training_loop """owa""" +180 100 negative_sampler """basic""" +180 100 evaluator """rankbased""" +181 1 model.embedding_dim 1.0 +181 1 loss.margin 26.02273739557766 +181 1 loss.adversarial_temperature 0.6245306153073491 +181 1 negative_sampler.num_negs_per_pos 74.0 +181 1 training.batch_size 2.0 +181 2 model.embedding_dim 0.0 +181 2 loss.margin 14.926723569540622 +181 2 loss.adversarial_temperature 0.56178588078166 +181 2 negative_sampler.num_negs_per_pos 71.0 +181 2 training.batch_size 0.0 +181 3 model.embedding_dim 2.0 +181 3 loss.margin 20.17685072505679 +181 3 loss.adversarial_temperature 0.29490247460649377 +181 3 negative_sampler.num_negs_per_pos 40.0 +181 3 training.batch_size 2.0 +181 4 model.embedding_dim 1.0 +181 4 loss.margin 19.69353214487679 +181 4 loss.adversarial_temperature 0.3671665058502599 +181 4 negative_sampler.num_negs_per_pos 21.0 +181 4 training.batch_size 0.0 +181 5 model.embedding_dim 0.0 +181 5 loss.margin 21.576824117070277 +181 5 loss.adversarial_temperature 0.9578914630252454 +181 5 negative_sampler.num_negs_per_pos 31.0 +181 5 training.batch_size 2.0 +181 6 model.embedding_dim 2.0 +181 6 loss.margin 6.948017901198893 +181 6 loss.adversarial_temperature 0.69833925429282 +181 6 negative_sampler.num_negs_per_pos 18.0 +181 6 training.batch_size 2.0 +181 7 model.embedding_dim 2.0 +181 7 loss.margin 28.05144006668775 +181 7 loss.adversarial_temperature 0.5314476117870341 +181 7 negative_sampler.num_negs_per_pos 31.0 +181 7 training.batch_size 0.0 +181 8 model.embedding_dim 0.0 +181 8 loss.margin 15.141792034428255 +181 8 loss.adversarial_temperature 0.2874388160839367 +181 8 negative_sampler.num_negs_per_pos 63.0 +181 8 training.batch_size 0.0 +181 9 model.embedding_dim 1.0 +181 9 loss.margin 2.991851107373843 +181 9 loss.adversarial_temperature 0.45719475806233734 +181 9 negative_sampler.num_negs_per_pos 71.0 +181 9 training.batch_size 0.0 +181 10 model.embedding_dim 2.0 +181 10 loss.margin 9.057747532289515 +181 10 loss.adversarial_temperature 0.5497911575241409 +181 10 negative_sampler.num_negs_per_pos 73.0 +181 10 training.batch_size 1.0 +181 11 model.embedding_dim 0.0 +181 11 loss.margin 21.487790718339493 +181 11 loss.adversarial_temperature 0.4909433530335571 +181 11 negative_sampler.num_negs_per_pos 8.0 +181 11 training.batch_size 1.0 +181 12 model.embedding_dim 2.0 +181 12 loss.margin 9.94611849565874 +181 12 loss.adversarial_temperature 0.37488456022995853 +181 12 negative_sampler.num_negs_per_pos 73.0 +181 12 training.batch_size 2.0 +181 13 model.embedding_dim 1.0 +181 13 loss.margin 29.54823006710395 +181 13 loss.adversarial_temperature 0.3162825019586762 +181 13 negative_sampler.num_negs_per_pos 56.0 +181 13 training.batch_size 0.0 +181 14 model.embedding_dim 1.0 +181 14 loss.margin 26.01488980573155 +181 14 loss.adversarial_temperature 0.5781177333317438 +181 14 negative_sampler.num_negs_per_pos 37.0 +181 14 training.batch_size 2.0 +181 15 model.embedding_dim 1.0 +181 15 loss.margin 6.083698837357717 +181 15 loss.adversarial_temperature 0.49294749892120104 +181 15 negative_sampler.num_negs_per_pos 16.0 +181 15 training.batch_size 0.0 +181 16 model.embedding_dim 1.0 +181 16 loss.margin 5.035054408804864 +181 16 loss.adversarial_temperature 0.8276165248401857 +181 16 negative_sampler.num_negs_per_pos 37.0 +181 16 training.batch_size 1.0 +181 17 model.embedding_dim 0.0 +181 17 loss.margin 15.801868065861663 +181 17 loss.adversarial_temperature 0.603177790038209 +181 17 negative_sampler.num_negs_per_pos 84.0 +181 17 training.batch_size 0.0 +181 18 model.embedding_dim 2.0 +181 18 loss.margin 22.96208467841457 +181 18 loss.adversarial_temperature 0.9808166861280114 +181 18 negative_sampler.num_negs_per_pos 98.0 +181 18 training.batch_size 0.0 +181 19 model.embedding_dim 2.0 +181 19 loss.margin 18.852905578592736 +181 19 loss.adversarial_temperature 0.616038001314766 +181 19 negative_sampler.num_negs_per_pos 24.0 +181 19 training.batch_size 2.0 +181 20 model.embedding_dim 1.0 +181 20 loss.margin 25.216873021570677 +181 20 loss.adversarial_temperature 0.9140478823076699 +181 20 negative_sampler.num_negs_per_pos 60.0 +181 20 training.batch_size 2.0 +181 21 model.embedding_dim 2.0 +181 21 loss.margin 19.04148987697758 +181 21 loss.adversarial_temperature 0.5119024518336344 +181 21 negative_sampler.num_negs_per_pos 18.0 +181 21 training.batch_size 0.0 +181 22 model.embedding_dim 2.0 +181 22 loss.margin 14.239629850874845 +181 22 loss.adversarial_temperature 0.5365026379385208 +181 22 negative_sampler.num_negs_per_pos 12.0 +181 22 training.batch_size 1.0 +181 23 model.embedding_dim 2.0 +181 23 loss.margin 24.981842324993096 +181 23 loss.adversarial_temperature 0.7618480744242692 +181 23 negative_sampler.num_negs_per_pos 41.0 +181 23 training.batch_size 1.0 +181 24 model.embedding_dim 0.0 +181 24 loss.margin 4.019601295924773 +181 24 loss.adversarial_temperature 0.3980646567294561 +181 24 negative_sampler.num_negs_per_pos 7.0 +181 24 training.batch_size 0.0 +181 25 model.embedding_dim 0.0 +181 25 loss.margin 27.417097348543397 +181 25 loss.adversarial_temperature 0.4987368701753556 +181 25 negative_sampler.num_negs_per_pos 59.0 +181 25 training.batch_size 0.0 +181 26 model.embedding_dim 2.0 +181 26 loss.margin 15.413343291280455 +181 26 loss.adversarial_temperature 0.16978376021275976 +181 26 negative_sampler.num_negs_per_pos 84.0 +181 26 training.batch_size 0.0 +181 27 model.embedding_dim 1.0 +181 27 loss.margin 5.869588528091508 +181 27 loss.adversarial_temperature 0.2759685576137516 +181 27 negative_sampler.num_negs_per_pos 33.0 +181 27 training.batch_size 0.0 +181 28 model.embedding_dim 2.0 +181 28 loss.margin 17.945989536005296 +181 28 loss.adversarial_temperature 0.9932495413537736 +181 28 negative_sampler.num_negs_per_pos 94.0 +181 28 training.batch_size 2.0 +181 29 model.embedding_dim 0.0 +181 29 loss.margin 5.275128788714883 +181 29 loss.adversarial_temperature 0.5491813055699787 +181 29 negative_sampler.num_negs_per_pos 31.0 +181 29 training.batch_size 0.0 +181 30 model.embedding_dim 1.0 +181 30 loss.margin 26.861316368322893 +181 30 loss.adversarial_temperature 0.6928357061155932 +181 30 negative_sampler.num_negs_per_pos 47.0 +181 30 training.batch_size 2.0 +181 31 model.embedding_dim 2.0 +181 31 loss.margin 21.791742999599414 +181 31 loss.adversarial_temperature 0.4855285176196805 +181 31 negative_sampler.num_negs_per_pos 26.0 +181 31 training.batch_size 0.0 +181 32 model.embedding_dim 0.0 +181 32 loss.margin 15.355371902072521 +181 32 loss.adversarial_temperature 0.9852606263351965 +181 32 negative_sampler.num_negs_per_pos 0.0 +181 32 training.batch_size 1.0 +181 33 model.embedding_dim 0.0 +181 33 loss.margin 26.0480954593886 +181 33 loss.adversarial_temperature 0.7642657198022699 +181 33 negative_sampler.num_negs_per_pos 54.0 +181 33 training.batch_size 1.0 +181 34 model.embedding_dim 0.0 +181 34 loss.margin 18.99534460302262 +181 34 loss.adversarial_temperature 0.20806703136376542 +181 34 negative_sampler.num_negs_per_pos 80.0 +181 34 training.batch_size 0.0 +181 35 model.embedding_dim 2.0 +181 35 loss.margin 15.026957785642336 +181 35 loss.adversarial_temperature 0.7013570221773133 +181 35 negative_sampler.num_negs_per_pos 47.0 +181 35 training.batch_size 1.0 +181 36 model.embedding_dim 0.0 +181 36 loss.margin 4.363987509035262 +181 36 loss.adversarial_temperature 0.8076708186210889 +181 36 negative_sampler.num_negs_per_pos 3.0 +181 36 training.batch_size 1.0 +181 37 model.embedding_dim 2.0 +181 37 loss.margin 12.160722348167893 +181 37 loss.adversarial_temperature 0.9175125460330766 +181 37 negative_sampler.num_negs_per_pos 85.0 +181 37 training.batch_size 1.0 +181 38 model.embedding_dim 2.0 +181 38 loss.margin 22.660792137932532 +181 38 loss.adversarial_temperature 0.6952652680158697 +181 38 negative_sampler.num_negs_per_pos 29.0 +181 38 training.batch_size 0.0 +181 39 model.embedding_dim 0.0 +181 39 loss.margin 16.210057381288905 +181 39 loss.adversarial_temperature 0.7007937295367501 +181 39 negative_sampler.num_negs_per_pos 24.0 +181 39 training.batch_size 1.0 +181 40 model.embedding_dim 0.0 +181 40 loss.margin 25.31763235249116 +181 40 loss.adversarial_temperature 0.5477855942251881 +181 40 negative_sampler.num_negs_per_pos 1.0 +181 40 training.batch_size 1.0 +181 41 model.embedding_dim 0.0 +181 41 loss.margin 18.338894922499282 +181 41 loss.adversarial_temperature 0.7306687559832543 +181 41 negative_sampler.num_negs_per_pos 82.0 +181 41 training.batch_size 2.0 +181 42 model.embedding_dim 0.0 +181 42 loss.margin 3.6312221418819703 +181 42 loss.adversarial_temperature 0.6687745997264579 +181 42 negative_sampler.num_negs_per_pos 84.0 +181 42 training.batch_size 1.0 +181 43 model.embedding_dim 0.0 +181 43 loss.margin 25.34375408670543 +181 43 loss.adversarial_temperature 0.9529754529192012 +181 43 negative_sampler.num_negs_per_pos 11.0 +181 43 training.batch_size 1.0 +181 44 model.embedding_dim 1.0 +181 44 loss.margin 6.176334418447327 +181 44 loss.adversarial_temperature 0.11483819272029741 +181 44 negative_sampler.num_negs_per_pos 92.0 +181 44 training.batch_size 2.0 +181 45 model.embedding_dim 1.0 +181 45 loss.margin 13.27237002994262 +181 45 loss.adversarial_temperature 0.19717613394194897 +181 45 negative_sampler.num_negs_per_pos 73.0 +181 45 training.batch_size 2.0 +181 46 model.embedding_dim 1.0 +181 46 loss.margin 5.280084717608802 +181 46 loss.adversarial_temperature 0.12138027751604982 +181 46 negative_sampler.num_negs_per_pos 14.0 +181 46 training.batch_size 2.0 +181 47 model.embedding_dim 0.0 +181 47 loss.margin 22.203615540608702 +181 47 loss.adversarial_temperature 0.8701342899540876 +181 47 negative_sampler.num_negs_per_pos 40.0 +181 47 training.batch_size 1.0 +181 48 model.embedding_dim 2.0 +181 48 loss.margin 4.78274459767332 +181 48 loss.adversarial_temperature 0.46184217997562726 +181 48 negative_sampler.num_negs_per_pos 26.0 +181 48 training.batch_size 1.0 +181 49 model.embedding_dim 2.0 +181 49 loss.margin 19.578453346519275 +181 49 loss.adversarial_temperature 0.5900910592830697 +181 49 negative_sampler.num_negs_per_pos 60.0 +181 49 training.batch_size 2.0 +181 50 model.embedding_dim 0.0 +181 50 loss.margin 25.426930900204095 +181 50 loss.adversarial_temperature 0.8642540212376572 +181 50 negative_sampler.num_negs_per_pos 95.0 +181 50 training.batch_size 0.0 +181 51 model.embedding_dim 1.0 +181 51 loss.margin 25.229952860985886 +181 51 loss.adversarial_temperature 0.3595742313063681 +181 51 negative_sampler.num_negs_per_pos 27.0 +181 51 training.batch_size 1.0 +181 52 model.embedding_dim 1.0 +181 52 loss.margin 11.952639210414445 +181 52 loss.adversarial_temperature 0.8885418308852173 +181 52 negative_sampler.num_negs_per_pos 92.0 +181 52 training.batch_size 0.0 +181 53 model.embedding_dim 2.0 +181 53 loss.margin 1.6691528890957352 +181 53 loss.adversarial_temperature 0.8354292906343335 +181 53 negative_sampler.num_negs_per_pos 7.0 +181 53 training.batch_size 2.0 +181 54 model.embedding_dim 2.0 +181 54 loss.margin 5.567385459177622 +181 54 loss.adversarial_temperature 0.8061995049566352 +181 54 negative_sampler.num_negs_per_pos 70.0 +181 54 training.batch_size 1.0 +181 55 model.embedding_dim 1.0 +181 55 loss.margin 24.091047250050636 +181 55 loss.adversarial_temperature 0.98956766902527 +181 55 negative_sampler.num_negs_per_pos 94.0 +181 55 training.batch_size 0.0 +181 56 model.embedding_dim 1.0 +181 56 loss.margin 12.913821082078895 +181 56 loss.adversarial_temperature 0.5344248881753421 +181 56 negative_sampler.num_negs_per_pos 63.0 +181 56 training.batch_size 2.0 +181 57 model.embedding_dim 1.0 +181 57 loss.margin 6.300762118992546 +181 57 loss.adversarial_temperature 0.7643331642798383 +181 57 negative_sampler.num_negs_per_pos 52.0 +181 57 training.batch_size 1.0 +181 58 model.embedding_dim 0.0 +181 58 loss.margin 19.51327647451664 +181 58 loss.adversarial_temperature 0.8496788525802772 +181 58 negative_sampler.num_negs_per_pos 42.0 +181 58 training.batch_size 0.0 +181 59 model.embedding_dim 1.0 +181 59 loss.margin 17.742578604737233 +181 59 loss.adversarial_temperature 0.9103815579913792 +181 59 negative_sampler.num_negs_per_pos 43.0 +181 59 training.batch_size 0.0 +181 60 model.embedding_dim 1.0 +181 60 loss.margin 29.177222311680126 +181 60 loss.adversarial_temperature 0.7080185350785948 +181 60 negative_sampler.num_negs_per_pos 34.0 +181 60 training.batch_size 1.0 +181 61 model.embedding_dim 1.0 +181 61 loss.margin 5.111157064385921 +181 61 loss.adversarial_temperature 0.1288462516754661 +181 61 negative_sampler.num_negs_per_pos 16.0 +181 61 training.batch_size 0.0 +181 62 model.embedding_dim 2.0 +181 62 loss.margin 25.705128640110082 +181 62 loss.adversarial_temperature 0.16627302612391504 +181 62 negative_sampler.num_negs_per_pos 33.0 +181 62 training.batch_size 0.0 +181 63 model.embedding_dim 0.0 +181 63 loss.margin 6.733109414671824 +181 63 loss.adversarial_temperature 0.6816168703668213 +181 63 negative_sampler.num_negs_per_pos 34.0 +181 63 training.batch_size 0.0 +181 64 model.embedding_dim 1.0 +181 64 loss.margin 20.433750730753214 +181 64 loss.adversarial_temperature 0.6821316068912836 +181 64 negative_sampler.num_negs_per_pos 84.0 +181 64 training.batch_size 0.0 +181 65 model.embedding_dim 0.0 +181 65 loss.margin 8.572736981200787 +181 65 loss.adversarial_temperature 0.5737489123700188 +181 65 negative_sampler.num_negs_per_pos 87.0 +181 65 training.batch_size 1.0 +181 66 model.embedding_dim 0.0 +181 66 loss.margin 17.910961265621133 +181 66 loss.adversarial_temperature 0.18451650705243855 +181 66 negative_sampler.num_negs_per_pos 52.0 +181 66 training.batch_size 0.0 +181 67 model.embedding_dim 2.0 +181 67 loss.margin 13.168471676678962 +181 67 loss.adversarial_temperature 0.36182739967725996 +181 67 negative_sampler.num_negs_per_pos 59.0 +181 67 training.batch_size 0.0 +181 68 model.embedding_dim 0.0 +181 68 loss.margin 27.632971124066398 +181 68 loss.adversarial_temperature 0.11083485217058923 +181 68 negative_sampler.num_negs_per_pos 48.0 +181 68 training.batch_size 2.0 +181 69 model.embedding_dim 1.0 +181 69 loss.margin 21.780100902645678 +181 69 loss.adversarial_temperature 0.38939027807702353 +181 69 negative_sampler.num_negs_per_pos 26.0 +181 69 training.batch_size 1.0 +181 70 model.embedding_dim 2.0 +181 70 loss.margin 21.373427259620115 +181 70 loss.adversarial_temperature 0.43701454351623004 +181 70 negative_sampler.num_negs_per_pos 71.0 +181 70 training.batch_size 2.0 +181 71 model.embedding_dim 1.0 +181 71 loss.margin 24.900752207083904 +181 71 loss.adversarial_temperature 0.9910565692383253 +181 71 negative_sampler.num_negs_per_pos 77.0 +181 71 training.batch_size 2.0 +181 72 model.embedding_dim 1.0 +181 72 loss.margin 12.42858308436902 +181 72 loss.adversarial_temperature 0.23804908413930545 +181 72 negative_sampler.num_negs_per_pos 7.0 +181 72 training.batch_size 1.0 +181 73 model.embedding_dim 1.0 +181 73 loss.margin 28.473001964539684 +181 73 loss.adversarial_temperature 0.73388666103554 +181 73 negative_sampler.num_negs_per_pos 50.0 +181 73 training.batch_size 0.0 +181 74 model.embedding_dim 0.0 +181 74 loss.margin 25.203724630874536 +181 74 loss.adversarial_temperature 0.5233211576899933 +181 74 negative_sampler.num_negs_per_pos 17.0 +181 74 training.batch_size 0.0 +181 75 model.embedding_dim 0.0 +181 75 loss.margin 27.138473822834083 +181 75 loss.adversarial_temperature 0.8163324756663436 +181 75 negative_sampler.num_negs_per_pos 82.0 +181 75 training.batch_size 0.0 +181 76 model.embedding_dim 1.0 +181 76 loss.margin 19.894642713053063 +181 76 loss.adversarial_temperature 0.43617818551480747 +181 76 negative_sampler.num_negs_per_pos 27.0 +181 76 training.batch_size 1.0 +181 77 model.embedding_dim 0.0 +181 77 loss.margin 27.90422588485311 +181 77 loss.adversarial_temperature 0.6449142275404091 +181 77 negative_sampler.num_negs_per_pos 36.0 +181 77 training.batch_size 1.0 +181 78 model.embedding_dim 1.0 +181 78 loss.margin 27.83082155925858 +181 78 loss.adversarial_temperature 0.6312906785867817 +181 78 negative_sampler.num_negs_per_pos 88.0 +181 78 training.batch_size 0.0 +181 79 model.embedding_dim 2.0 +181 79 loss.margin 10.657563297889023 +181 79 loss.adversarial_temperature 0.8719619821148389 +181 79 negative_sampler.num_negs_per_pos 84.0 +181 79 training.batch_size 0.0 +181 80 model.embedding_dim 0.0 +181 80 loss.margin 13.965221774677765 +181 80 loss.adversarial_temperature 0.7756753692740311 +181 80 negative_sampler.num_negs_per_pos 53.0 +181 80 training.batch_size 0.0 +181 81 model.embedding_dim 2.0 +181 81 loss.margin 21.463799653265884 +181 81 loss.adversarial_temperature 0.5919459044324143 +181 81 negative_sampler.num_negs_per_pos 33.0 +181 81 training.batch_size 0.0 +181 82 model.embedding_dim 2.0 +181 82 loss.margin 28.23871729283679 +181 82 loss.adversarial_temperature 0.5969573330325081 +181 82 negative_sampler.num_negs_per_pos 74.0 +181 82 training.batch_size 0.0 +181 83 model.embedding_dim 1.0 +181 83 loss.margin 7.369013964595546 +181 83 loss.adversarial_temperature 0.4856705145546859 +181 83 negative_sampler.num_negs_per_pos 23.0 +181 83 training.batch_size 1.0 +181 84 model.embedding_dim 1.0 +181 84 loss.margin 5.842131374366585 +181 84 loss.adversarial_temperature 0.45493377175528604 +181 84 negative_sampler.num_negs_per_pos 79.0 +181 84 training.batch_size 1.0 +181 85 model.embedding_dim 0.0 +181 85 loss.margin 22.754176223709234 +181 85 loss.adversarial_temperature 0.21412761405898723 +181 85 negative_sampler.num_negs_per_pos 68.0 +181 85 training.batch_size 1.0 +181 86 model.embedding_dim 1.0 +181 86 loss.margin 23.100390401985553 +181 86 loss.adversarial_temperature 0.8155519030527296 +181 86 negative_sampler.num_negs_per_pos 80.0 +181 86 training.batch_size 0.0 +181 87 model.embedding_dim 0.0 +181 87 loss.margin 6.74318065453111 +181 87 loss.adversarial_temperature 0.1836603923975137 +181 87 negative_sampler.num_negs_per_pos 74.0 +181 87 training.batch_size 0.0 +181 88 model.embedding_dim 0.0 +181 88 loss.margin 14.813269418213535 +181 88 loss.adversarial_temperature 0.45636965305161015 +181 88 negative_sampler.num_negs_per_pos 42.0 +181 88 training.batch_size 0.0 +181 89 model.embedding_dim 2.0 +181 89 loss.margin 7.903410169913233 +181 89 loss.adversarial_temperature 0.3990421983282726 +181 89 negative_sampler.num_negs_per_pos 27.0 +181 89 training.batch_size 2.0 +181 90 model.embedding_dim 1.0 +181 90 loss.margin 26.779499708715914 +181 90 loss.adversarial_temperature 0.7167774971999789 +181 90 negative_sampler.num_negs_per_pos 8.0 +181 90 training.batch_size 2.0 +181 91 model.embedding_dim 0.0 +181 91 loss.margin 4.588916008043636 +181 91 loss.adversarial_temperature 0.4217089591482983 +181 91 negative_sampler.num_negs_per_pos 7.0 +181 91 training.batch_size 1.0 +181 92 model.embedding_dim 1.0 +181 92 loss.margin 1.3273499885506308 +181 92 loss.adversarial_temperature 0.7421747148472647 +181 92 negative_sampler.num_negs_per_pos 49.0 +181 92 training.batch_size 0.0 +181 93 model.embedding_dim 0.0 +181 93 loss.margin 29.777512594379083 +181 93 loss.adversarial_temperature 0.7917320293540381 +181 93 negative_sampler.num_negs_per_pos 93.0 +181 93 training.batch_size 0.0 +181 94 model.embedding_dim 1.0 +181 94 loss.margin 12.914848457636937 +181 94 loss.adversarial_temperature 0.4753988814817769 +181 94 negative_sampler.num_negs_per_pos 37.0 +181 94 training.batch_size 1.0 +181 95 model.embedding_dim 2.0 +181 95 loss.margin 19.07100372657608 +181 95 loss.adversarial_temperature 0.3135555443147038 +181 95 negative_sampler.num_negs_per_pos 50.0 +181 95 training.batch_size 1.0 +181 96 model.embedding_dim 2.0 +181 96 loss.margin 9.232240192899077 +181 96 loss.adversarial_temperature 0.4258413784607714 +181 96 negative_sampler.num_negs_per_pos 15.0 +181 96 training.batch_size 0.0 +181 97 model.embedding_dim 1.0 +181 97 loss.margin 12.257313475390633 +181 97 loss.adversarial_temperature 0.46351946309325553 +181 97 negative_sampler.num_negs_per_pos 31.0 +181 97 training.batch_size 2.0 +181 98 model.embedding_dim 2.0 +181 98 loss.margin 8.273946002277475 +181 98 loss.adversarial_temperature 0.5592945165223523 +181 98 negative_sampler.num_negs_per_pos 46.0 +181 98 training.batch_size 0.0 +181 99 model.embedding_dim 0.0 +181 99 loss.margin 3.385609187080304 +181 99 loss.adversarial_temperature 0.8351494575576238 +181 99 negative_sampler.num_negs_per_pos 78.0 +181 99 training.batch_size 0.0 +181 100 model.embedding_dim 1.0 +181 100 loss.margin 5.755918460811476 +181 100 loss.adversarial_temperature 0.3469922920694563 +181 100 negative_sampler.num_negs_per_pos 13.0 +181 100 training.batch_size 0.0 +181 1 dataset """kinships""" +181 1 model """distmult""" +181 1 loss """nssa""" +181 1 regularizer """no""" +181 1 optimizer """adadelta""" +181 1 training_loop """owa""" +181 1 negative_sampler """basic""" +181 1 evaluator """rankbased""" +181 2 dataset """kinships""" +181 2 model """distmult""" +181 2 loss """nssa""" +181 2 regularizer """no""" +181 2 optimizer """adadelta""" +181 2 training_loop """owa""" +181 2 negative_sampler """basic""" +181 2 evaluator """rankbased""" +181 3 dataset """kinships""" +181 3 model """distmult""" +181 3 loss """nssa""" +181 3 regularizer """no""" +181 3 optimizer """adadelta""" +181 3 training_loop """owa""" +181 3 negative_sampler """basic""" +181 3 evaluator """rankbased""" +181 4 dataset """kinships""" +181 4 model """distmult""" +181 4 loss """nssa""" +181 4 regularizer """no""" +181 4 optimizer """adadelta""" +181 4 training_loop """owa""" +181 4 negative_sampler """basic""" +181 4 evaluator """rankbased""" +181 5 dataset """kinships""" +181 5 model """distmult""" +181 5 loss """nssa""" +181 5 regularizer """no""" +181 5 optimizer """adadelta""" +181 5 training_loop """owa""" +181 5 negative_sampler """basic""" +181 5 evaluator """rankbased""" +181 6 dataset """kinships""" +181 6 model """distmult""" +181 6 loss """nssa""" +181 6 regularizer """no""" +181 6 optimizer """adadelta""" +181 6 training_loop """owa""" +181 6 negative_sampler """basic""" +181 6 evaluator """rankbased""" +181 7 dataset """kinships""" +181 7 model """distmult""" +181 7 loss """nssa""" +181 7 regularizer """no""" +181 7 optimizer """adadelta""" +181 7 training_loop """owa""" +181 7 negative_sampler """basic""" +181 7 evaluator """rankbased""" +181 8 dataset """kinships""" +181 8 model """distmult""" +181 8 loss """nssa""" +181 8 regularizer """no""" +181 8 optimizer """adadelta""" +181 8 training_loop """owa""" +181 8 negative_sampler """basic""" +181 8 evaluator """rankbased""" +181 9 dataset """kinships""" +181 9 model """distmult""" +181 9 loss """nssa""" +181 9 regularizer """no""" +181 9 optimizer """adadelta""" +181 9 training_loop """owa""" +181 9 negative_sampler """basic""" +181 9 evaluator """rankbased""" +181 10 dataset """kinships""" +181 10 model """distmult""" +181 10 loss """nssa""" +181 10 regularizer """no""" +181 10 optimizer """adadelta""" +181 10 training_loop """owa""" +181 10 negative_sampler """basic""" +181 10 evaluator """rankbased""" +181 11 dataset """kinships""" +181 11 model """distmult""" +181 11 loss """nssa""" +181 11 regularizer """no""" +181 11 optimizer """adadelta""" +181 11 training_loop """owa""" +181 11 negative_sampler """basic""" +181 11 evaluator """rankbased""" +181 12 dataset """kinships""" +181 12 model """distmult""" +181 12 loss """nssa""" +181 12 regularizer """no""" +181 12 optimizer """adadelta""" +181 12 training_loop """owa""" +181 12 negative_sampler """basic""" +181 12 evaluator """rankbased""" +181 13 dataset """kinships""" +181 13 model """distmult""" +181 13 loss """nssa""" +181 13 regularizer """no""" +181 13 optimizer """adadelta""" +181 13 training_loop """owa""" +181 13 negative_sampler """basic""" +181 13 evaluator """rankbased""" +181 14 dataset """kinships""" +181 14 model """distmult""" +181 14 loss """nssa""" +181 14 regularizer """no""" +181 14 optimizer """adadelta""" +181 14 training_loop """owa""" +181 14 negative_sampler """basic""" +181 14 evaluator """rankbased""" +181 15 dataset """kinships""" +181 15 model """distmult""" +181 15 loss """nssa""" +181 15 regularizer """no""" +181 15 optimizer """adadelta""" +181 15 training_loop """owa""" +181 15 negative_sampler """basic""" +181 15 evaluator """rankbased""" +181 16 dataset """kinships""" +181 16 model """distmult""" +181 16 loss """nssa""" +181 16 regularizer """no""" +181 16 optimizer """adadelta""" +181 16 training_loop """owa""" +181 16 negative_sampler """basic""" +181 16 evaluator """rankbased""" +181 17 dataset """kinships""" +181 17 model """distmult""" +181 17 loss """nssa""" +181 17 regularizer """no""" +181 17 optimizer """adadelta""" +181 17 training_loop """owa""" +181 17 negative_sampler """basic""" +181 17 evaluator """rankbased""" +181 18 dataset """kinships""" +181 18 model """distmult""" +181 18 loss """nssa""" +181 18 regularizer """no""" +181 18 optimizer """adadelta""" +181 18 training_loop """owa""" +181 18 negative_sampler """basic""" +181 18 evaluator """rankbased""" +181 19 dataset """kinships""" +181 19 model """distmult""" +181 19 loss """nssa""" +181 19 regularizer """no""" +181 19 optimizer """adadelta""" +181 19 training_loop """owa""" +181 19 negative_sampler """basic""" +181 19 evaluator """rankbased""" +181 20 dataset """kinships""" +181 20 model """distmult""" +181 20 loss """nssa""" +181 20 regularizer """no""" +181 20 optimizer """adadelta""" +181 20 training_loop """owa""" +181 20 negative_sampler """basic""" +181 20 evaluator """rankbased""" +181 21 dataset """kinships""" +181 21 model """distmult""" +181 21 loss """nssa""" +181 21 regularizer """no""" +181 21 optimizer """adadelta""" +181 21 training_loop """owa""" +181 21 negative_sampler """basic""" +181 21 evaluator """rankbased""" +181 22 dataset """kinships""" +181 22 model """distmult""" +181 22 loss """nssa""" +181 22 regularizer """no""" +181 22 optimizer """adadelta""" +181 22 training_loop """owa""" +181 22 negative_sampler """basic""" +181 22 evaluator """rankbased""" +181 23 dataset """kinships""" +181 23 model """distmult""" +181 23 loss """nssa""" +181 23 regularizer """no""" +181 23 optimizer """adadelta""" +181 23 training_loop """owa""" +181 23 negative_sampler """basic""" +181 23 evaluator """rankbased""" +181 24 dataset """kinships""" +181 24 model """distmult""" +181 24 loss """nssa""" +181 24 regularizer """no""" +181 24 optimizer """adadelta""" +181 24 training_loop """owa""" +181 24 negative_sampler """basic""" +181 24 evaluator """rankbased""" +181 25 dataset """kinships""" +181 25 model """distmult""" +181 25 loss """nssa""" +181 25 regularizer """no""" +181 25 optimizer """adadelta""" +181 25 training_loop """owa""" +181 25 negative_sampler """basic""" +181 25 evaluator """rankbased""" +181 26 dataset """kinships""" +181 26 model """distmult""" +181 26 loss """nssa""" +181 26 regularizer """no""" +181 26 optimizer """adadelta""" +181 26 training_loop """owa""" +181 26 negative_sampler """basic""" +181 26 evaluator """rankbased""" +181 27 dataset """kinships""" +181 27 model """distmult""" +181 27 loss """nssa""" +181 27 regularizer """no""" +181 27 optimizer """adadelta""" +181 27 training_loop """owa""" +181 27 negative_sampler """basic""" +181 27 evaluator """rankbased""" +181 28 dataset """kinships""" +181 28 model """distmult""" +181 28 loss """nssa""" +181 28 regularizer """no""" +181 28 optimizer """adadelta""" +181 28 training_loop """owa""" +181 28 negative_sampler """basic""" +181 28 evaluator """rankbased""" +181 29 dataset """kinships""" +181 29 model """distmult""" +181 29 loss """nssa""" +181 29 regularizer """no""" +181 29 optimizer """adadelta""" +181 29 training_loop """owa""" +181 29 negative_sampler """basic""" +181 29 evaluator """rankbased""" +181 30 dataset """kinships""" +181 30 model """distmult""" +181 30 loss """nssa""" +181 30 regularizer """no""" +181 30 optimizer """adadelta""" +181 30 training_loop """owa""" +181 30 negative_sampler """basic""" +181 30 evaluator """rankbased""" +181 31 dataset """kinships""" +181 31 model """distmult""" +181 31 loss """nssa""" +181 31 regularizer """no""" +181 31 optimizer """adadelta""" +181 31 training_loop """owa""" +181 31 negative_sampler """basic""" +181 31 evaluator """rankbased""" +181 32 dataset """kinships""" +181 32 model """distmult""" +181 32 loss """nssa""" +181 32 regularizer """no""" +181 32 optimizer """adadelta""" +181 32 training_loop """owa""" +181 32 negative_sampler """basic""" +181 32 evaluator """rankbased""" +181 33 dataset """kinships""" +181 33 model """distmult""" +181 33 loss """nssa""" +181 33 regularizer """no""" +181 33 optimizer """adadelta""" +181 33 training_loop """owa""" +181 33 negative_sampler """basic""" +181 33 evaluator """rankbased""" +181 34 dataset """kinships""" +181 34 model """distmult""" +181 34 loss """nssa""" +181 34 regularizer """no""" +181 34 optimizer """adadelta""" +181 34 training_loop """owa""" +181 34 negative_sampler """basic""" +181 34 evaluator """rankbased""" +181 35 dataset """kinships""" +181 35 model """distmult""" +181 35 loss """nssa""" +181 35 regularizer """no""" +181 35 optimizer """adadelta""" +181 35 training_loop """owa""" +181 35 negative_sampler """basic""" +181 35 evaluator """rankbased""" +181 36 dataset """kinships""" +181 36 model """distmult""" +181 36 loss """nssa""" +181 36 regularizer """no""" +181 36 optimizer """adadelta""" +181 36 training_loop """owa""" +181 36 negative_sampler """basic""" +181 36 evaluator """rankbased""" +181 37 dataset """kinships""" +181 37 model """distmult""" +181 37 loss """nssa""" +181 37 regularizer """no""" +181 37 optimizer """adadelta""" +181 37 training_loop """owa""" +181 37 negative_sampler """basic""" +181 37 evaluator """rankbased""" +181 38 dataset """kinships""" +181 38 model """distmult""" +181 38 loss """nssa""" +181 38 regularizer """no""" +181 38 optimizer """adadelta""" +181 38 training_loop """owa""" +181 38 negative_sampler """basic""" +181 38 evaluator """rankbased""" +181 39 dataset """kinships""" +181 39 model """distmult""" +181 39 loss """nssa""" +181 39 regularizer """no""" +181 39 optimizer """adadelta""" +181 39 training_loop """owa""" +181 39 negative_sampler """basic""" +181 39 evaluator """rankbased""" +181 40 dataset """kinships""" +181 40 model """distmult""" +181 40 loss """nssa""" +181 40 regularizer """no""" +181 40 optimizer """adadelta""" +181 40 training_loop """owa""" +181 40 negative_sampler """basic""" +181 40 evaluator """rankbased""" +181 41 dataset """kinships""" +181 41 model """distmult""" +181 41 loss """nssa""" +181 41 regularizer """no""" +181 41 optimizer """adadelta""" +181 41 training_loop """owa""" +181 41 negative_sampler """basic""" +181 41 evaluator """rankbased""" +181 42 dataset """kinships""" +181 42 model """distmult""" +181 42 loss """nssa""" +181 42 regularizer """no""" +181 42 optimizer """adadelta""" +181 42 training_loop """owa""" +181 42 negative_sampler """basic""" +181 42 evaluator """rankbased""" +181 43 dataset """kinships""" +181 43 model """distmult""" +181 43 loss """nssa""" +181 43 regularizer """no""" +181 43 optimizer """adadelta""" +181 43 training_loop """owa""" +181 43 negative_sampler """basic""" +181 43 evaluator """rankbased""" +181 44 dataset """kinships""" +181 44 model """distmult""" +181 44 loss """nssa""" +181 44 regularizer """no""" +181 44 optimizer """adadelta""" +181 44 training_loop """owa""" +181 44 negative_sampler """basic""" +181 44 evaluator """rankbased""" +181 45 dataset """kinships""" +181 45 model """distmult""" +181 45 loss """nssa""" +181 45 regularizer """no""" +181 45 optimizer """adadelta""" +181 45 training_loop """owa""" +181 45 negative_sampler """basic""" +181 45 evaluator """rankbased""" +181 46 dataset """kinships""" +181 46 model """distmult""" +181 46 loss """nssa""" +181 46 regularizer """no""" +181 46 optimizer """adadelta""" +181 46 training_loop """owa""" +181 46 negative_sampler """basic""" +181 46 evaluator """rankbased""" +181 47 dataset """kinships""" +181 47 model """distmult""" +181 47 loss """nssa""" +181 47 regularizer """no""" +181 47 optimizer """adadelta""" +181 47 training_loop """owa""" +181 47 negative_sampler """basic""" +181 47 evaluator """rankbased""" +181 48 dataset """kinships""" +181 48 model """distmult""" +181 48 loss """nssa""" +181 48 regularizer """no""" +181 48 optimizer """adadelta""" +181 48 training_loop """owa""" +181 48 negative_sampler """basic""" +181 48 evaluator """rankbased""" +181 49 dataset """kinships""" +181 49 model """distmult""" +181 49 loss """nssa""" +181 49 regularizer """no""" +181 49 optimizer """adadelta""" +181 49 training_loop """owa""" +181 49 negative_sampler """basic""" +181 49 evaluator """rankbased""" +181 50 dataset """kinships""" +181 50 model """distmult""" +181 50 loss """nssa""" +181 50 regularizer """no""" +181 50 optimizer """adadelta""" +181 50 training_loop """owa""" +181 50 negative_sampler """basic""" +181 50 evaluator """rankbased""" +181 51 dataset """kinships""" +181 51 model """distmult""" +181 51 loss """nssa""" +181 51 regularizer """no""" +181 51 optimizer """adadelta""" +181 51 training_loop """owa""" +181 51 negative_sampler """basic""" +181 51 evaluator """rankbased""" +181 52 dataset """kinships""" +181 52 model """distmult""" +181 52 loss """nssa""" +181 52 regularizer """no""" +181 52 optimizer """adadelta""" +181 52 training_loop """owa""" +181 52 negative_sampler """basic""" +181 52 evaluator """rankbased""" +181 53 dataset """kinships""" +181 53 model """distmult""" +181 53 loss """nssa""" +181 53 regularizer """no""" +181 53 optimizer """adadelta""" +181 53 training_loop """owa""" +181 53 negative_sampler """basic""" +181 53 evaluator """rankbased""" +181 54 dataset """kinships""" +181 54 model """distmult""" +181 54 loss """nssa""" +181 54 regularizer """no""" +181 54 optimizer """adadelta""" +181 54 training_loop """owa""" +181 54 negative_sampler """basic""" +181 54 evaluator """rankbased""" +181 55 dataset """kinships""" +181 55 model """distmult""" +181 55 loss """nssa""" +181 55 regularizer """no""" +181 55 optimizer """adadelta""" +181 55 training_loop """owa""" +181 55 negative_sampler """basic""" +181 55 evaluator """rankbased""" +181 56 dataset """kinships""" +181 56 model """distmult""" +181 56 loss """nssa""" +181 56 regularizer """no""" +181 56 optimizer """adadelta""" +181 56 training_loop """owa""" +181 56 negative_sampler """basic""" +181 56 evaluator """rankbased""" +181 57 dataset """kinships""" +181 57 model """distmult""" +181 57 loss """nssa""" +181 57 regularizer """no""" +181 57 optimizer """adadelta""" +181 57 training_loop """owa""" +181 57 negative_sampler """basic""" +181 57 evaluator """rankbased""" +181 58 dataset """kinships""" +181 58 model """distmult""" +181 58 loss """nssa""" +181 58 regularizer """no""" +181 58 optimizer """adadelta""" +181 58 training_loop """owa""" +181 58 negative_sampler """basic""" +181 58 evaluator """rankbased""" +181 59 dataset """kinships""" +181 59 model """distmult""" +181 59 loss """nssa""" +181 59 regularizer """no""" +181 59 optimizer """adadelta""" +181 59 training_loop """owa""" +181 59 negative_sampler """basic""" +181 59 evaluator """rankbased""" +181 60 dataset """kinships""" +181 60 model """distmult""" +181 60 loss """nssa""" +181 60 regularizer """no""" +181 60 optimizer """adadelta""" +181 60 training_loop """owa""" +181 60 negative_sampler """basic""" +181 60 evaluator """rankbased""" +181 61 dataset """kinships""" +181 61 model """distmult""" +181 61 loss """nssa""" +181 61 regularizer """no""" +181 61 optimizer """adadelta""" +181 61 training_loop """owa""" +181 61 negative_sampler """basic""" +181 61 evaluator """rankbased""" +181 62 dataset """kinships""" +181 62 model """distmult""" +181 62 loss """nssa""" +181 62 regularizer """no""" +181 62 optimizer """adadelta""" +181 62 training_loop """owa""" +181 62 negative_sampler """basic""" +181 62 evaluator """rankbased""" +181 63 dataset """kinships""" +181 63 model """distmult""" +181 63 loss """nssa""" +181 63 regularizer """no""" +181 63 optimizer """adadelta""" +181 63 training_loop """owa""" +181 63 negative_sampler """basic""" +181 63 evaluator """rankbased""" +181 64 dataset """kinships""" +181 64 model """distmult""" +181 64 loss """nssa""" +181 64 regularizer """no""" +181 64 optimizer """adadelta""" +181 64 training_loop """owa""" +181 64 negative_sampler """basic""" +181 64 evaluator """rankbased""" +181 65 dataset """kinships""" +181 65 model """distmult""" +181 65 loss """nssa""" +181 65 regularizer """no""" +181 65 optimizer """adadelta""" +181 65 training_loop """owa""" +181 65 negative_sampler """basic""" +181 65 evaluator """rankbased""" +181 66 dataset """kinships""" +181 66 model """distmult""" +181 66 loss """nssa""" +181 66 regularizer """no""" +181 66 optimizer """adadelta""" +181 66 training_loop """owa""" +181 66 negative_sampler """basic""" +181 66 evaluator """rankbased""" +181 67 dataset """kinships""" +181 67 model """distmult""" +181 67 loss """nssa""" +181 67 regularizer """no""" +181 67 optimizer """adadelta""" +181 67 training_loop """owa""" +181 67 negative_sampler """basic""" +181 67 evaluator """rankbased""" +181 68 dataset """kinships""" +181 68 model """distmult""" +181 68 loss """nssa""" +181 68 regularizer """no""" +181 68 optimizer """adadelta""" +181 68 training_loop """owa""" +181 68 negative_sampler """basic""" +181 68 evaluator """rankbased""" +181 69 dataset """kinships""" +181 69 model """distmult""" +181 69 loss """nssa""" +181 69 regularizer """no""" +181 69 optimizer """adadelta""" +181 69 training_loop """owa""" +181 69 negative_sampler """basic""" +181 69 evaluator """rankbased""" +181 70 dataset """kinships""" +181 70 model """distmult""" +181 70 loss """nssa""" +181 70 regularizer """no""" +181 70 optimizer """adadelta""" +181 70 training_loop """owa""" +181 70 negative_sampler """basic""" +181 70 evaluator """rankbased""" +181 71 dataset """kinships""" +181 71 model """distmult""" +181 71 loss """nssa""" +181 71 regularizer """no""" +181 71 optimizer """adadelta""" +181 71 training_loop """owa""" +181 71 negative_sampler """basic""" +181 71 evaluator """rankbased""" +181 72 dataset """kinships""" +181 72 model """distmult""" +181 72 loss """nssa""" +181 72 regularizer """no""" +181 72 optimizer """adadelta""" +181 72 training_loop """owa""" +181 72 negative_sampler """basic""" +181 72 evaluator """rankbased""" +181 73 dataset """kinships""" +181 73 model """distmult""" +181 73 loss """nssa""" +181 73 regularizer """no""" +181 73 optimizer """adadelta""" +181 73 training_loop """owa""" +181 73 negative_sampler """basic""" +181 73 evaluator """rankbased""" +181 74 dataset """kinships""" +181 74 model """distmult""" +181 74 loss """nssa""" +181 74 regularizer """no""" +181 74 optimizer """adadelta""" +181 74 training_loop """owa""" +181 74 negative_sampler """basic""" +181 74 evaluator """rankbased""" +181 75 dataset """kinships""" +181 75 model """distmult""" +181 75 loss """nssa""" +181 75 regularizer """no""" +181 75 optimizer """adadelta""" +181 75 training_loop """owa""" +181 75 negative_sampler """basic""" +181 75 evaluator """rankbased""" +181 76 dataset """kinships""" +181 76 model """distmult""" +181 76 loss """nssa""" +181 76 regularizer """no""" +181 76 optimizer """adadelta""" +181 76 training_loop """owa""" +181 76 negative_sampler """basic""" +181 76 evaluator """rankbased""" +181 77 dataset """kinships""" +181 77 model """distmult""" +181 77 loss """nssa""" +181 77 regularizer """no""" +181 77 optimizer """adadelta""" +181 77 training_loop """owa""" +181 77 negative_sampler """basic""" +181 77 evaluator """rankbased""" +181 78 dataset """kinships""" +181 78 model """distmult""" +181 78 loss """nssa""" +181 78 regularizer """no""" +181 78 optimizer """adadelta""" +181 78 training_loop """owa""" +181 78 negative_sampler """basic""" +181 78 evaluator """rankbased""" +181 79 dataset """kinships""" +181 79 model """distmult""" +181 79 loss """nssa""" +181 79 regularizer """no""" +181 79 optimizer """adadelta""" +181 79 training_loop """owa""" +181 79 negative_sampler """basic""" +181 79 evaluator """rankbased""" +181 80 dataset """kinships""" +181 80 model """distmult""" +181 80 loss """nssa""" +181 80 regularizer """no""" +181 80 optimizer """adadelta""" +181 80 training_loop """owa""" +181 80 negative_sampler """basic""" +181 80 evaluator """rankbased""" +181 81 dataset """kinships""" +181 81 model """distmult""" +181 81 loss """nssa""" +181 81 regularizer """no""" +181 81 optimizer """adadelta""" +181 81 training_loop """owa""" +181 81 negative_sampler """basic""" +181 81 evaluator """rankbased""" +181 82 dataset """kinships""" +181 82 model """distmult""" +181 82 loss """nssa""" +181 82 regularizer """no""" +181 82 optimizer """adadelta""" +181 82 training_loop """owa""" +181 82 negative_sampler """basic""" +181 82 evaluator """rankbased""" +181 83 dataset """kinships""" +181 83 model """distmult""" +181 83 loss """nssa""" +181 83 regularizer """no""" +181 83 optimizer """adadelta""" +181 83 training_loop """owa""" +181 83 negative_sampler """basic""" +181 83 evaluator """rankbased""" +181 84 dataset """kinships""" +181 84 model """distmult""" +181 84 loss """nssa""" +181 84 regularizer """no""" +181 84 optimizer """adadelta""" +181 84 training_loop """owa""" +181 84 negative_sampler """basic""" +181 84 evaluator """rankbased""" +181 85 dataset """kinships""" +181 85 model """distmult""" +181 85 loss """nssa""" +181 85 regularizer """no""" +181 85 optimizer """adadelta""" +181 85 training_loop """owa""" +181 85 negative_sampler """basic""" +181 85 evaluator """rankbased""" +181 86 dataset """kinships""" +181 86 model """distmult""" +181 86 loss """nssa""" +181 86 regularizer """no""" +181 86 optimizer """adadelta""" +181 86 training_loop """owa""" +181 86 negative_sampler """basic""" +181 86 evaluator """rankbased""" +181 87 dataset """kinships""" +181 87 model """distmult""" +181 87 loss """nssa""" +181 87 regularizer """no""" +181 87 optimizer """adadelta""" +181 87 training_loop """owa""" +181 87 negative_sampler """basic""" +181 87 evaluator """rankbased""" +181 88 dataset """kinships""" +181 88 model """distmult""" +181 88 loss """nssa""" +181 88 regularizer """no""" +181 88 optimizer """adadelta""" +181 88 training_loop """owa""" +181 88 negative_sampler """basic""" +181 88 evaluator """rankbased""" +181 89 dataset """kinships""" +181 89 model """distmult""" +181 89 loss """nssa""" +181 89 regularizer """no""" +181 89 optimizer """adadelta""" +181 89 training_loop """owa""" +181 89 negative_sampler """basic""" +181 89 evaluator """rankbased""" +181 90 dataset """kinships""" +181 90 model """distmult""" +181 90 loss """nssa""" +181 90 regularizer """no""" +181 90 optimizer """adadelta""" +181 90 training_loop """owa""" +181 90 negative_sampler """basic""" +181 90 evaluator """rankbased""" +181 91 dataset """kinships""" +181 91 model """distmult""" +181 91 loss """nssa""" +181 91 regularizer """no""" +181 91 optimizer """adadelta""" +181 91 training_loop """owa""" +181 91 negative_sampler """basic""" +181 91 evaluator """rankbased""" +181 92 dataset """kinships""" +181 92 model """distmult""" +181 92 loss """nssa""" +181 92 regularizer """no""" +181 92 optimizer """adadelta""" +181 92 training_loop """owa""" +181 92 negative_sampler """basic""" +181 92 evaluator """rankbased""" +181 93 dataset """kinships""" +181 93 model """distmult""" +181 93 loss """nssa""" +181 93 regularizer """no""" +181 93 optimizer """adadelta""" +181 93 training_loop """owa""" +181 93 negative_sampler """basic""" +181 93 evaluator """rankbased""" +181 94 dataset """kinships""" +181 94 model """distmult""" +181 94 loss """nssa""" +181 94 regularizer """no""" +181 94 optimizer """adadelta""" +181 94 training_loop """owa""" +181 94 negative_sampler """basic""" +181 94 evaluator """rankbased""" +181 95 dataset """kinships""" +181 95 model """distmult""" +181 95 loss """nssa""" +181 95 regularizer """no""" +181 95 optimizer """adadelta""" +181 95 training_loop """owa""" +181 95 negative_sampler """basic""" +181 95 evaluator """rankbased""" +181 96 dataset """kinships""" +181 96 model """distmult""" +181 96 loss """nssa""" +181 96 regularizer """no""" +181 96 optimizer """adadelta""" +181 96 training_loop """owa""" +181 96 negative_sampler """basic""" +181 96 evaluator """rankbased""" +181 97 dataset """kinships""" +181 97 model """distmult""" +181 97 loss """nssa""" +181 97 regularizer """no""" +181 97 optimizer """adadelta""" +181 97 training_loop """owa""" +181 97 negative_sampler """basic""" +181 97 evaluator """rankbased""" +181 98 dataset """kinships""" +181 98 model """distmult""" +181 98 loss """nssa""" +181 98 regularizer """no""" +181 98 optimizer """adadelta""" +181 98 training_loop """owa""" +181 98 negative_sampler """basic""" +181 98 evaluator """rankbased""" +181 99 dataset """kinships""" +181 99 model """distmult""" +181 99 loss """nssa""" +181 99 regularizer """no""" +181 99 optimizer """adadelta""" +181 99 training_loop """owa""" +181 99 negative_sampler """basic""" +181 99 evaluator """rankbased""" +181 100 dataset """kinships""" +181 100 model """distmult""" +181 100 loss """nssa""" +181 100 regularizer """no""" +181 100 optimizer """adadelta""" +181 100 training_loop """owa""" +181 100 negative_sampler """basic""" +181 100 evaluator """rankbased""" +182 1 model.embedding_dim 0.0 +182 1 optimizer.lr 0.004098359547391069 +182 1 training.batch_size 0.0 +182 1 training.label_smoothing 0.012821377447901378 +182 2 model.embedding_dim 1.0 +182 2 optimizer.lr 0.0014873603138645542 +182 2 training.batch_size 1.0 +182 2 training.label_smoothing 0.0015893825895284305 +182 3 model.embedding_dim 0.0 +182 3 optimizer.lr 0.0013947327871464948 +182 3 training.batch_size 0.0 +182 3 training.label_smoothing 0.00485409605734814 +182 4 model.embedding_dim 0.0 +182 4 optimizer.lr 0.001985391668089018 +182 4 training.batch_size 2.0 +182 4 training.label_smoothing 0.03686640753094338 +182 5 model.embedding_dim 1.0 +182 5 optimizer.lr 0.09496722247781644 +182 5 training.batch_size 0.0 +182 5 training.label_smoothing 0.001736917976830935 +182 6 model.embedding_dim 2.0 +182 6 optimizer.lr 0.09327349531113903 +182 6 training.batch_size 1.0 +182 6 training.label_smoothing 0.22297940941503516 +182 7 model.embedding_dim 1.0 +182 7 optimizer.lr 0.04420430618690259 +182 7 training.batch_size 0.0 +182 7 training.label_smoothing 0.12676484328912277 +182 8 model.embedding_dim 2.0 +182 8 optimizer.lr 0.03245514268273811 +182 8 training.batch_size 1.0 +182 8 training.label_smoothing 0.7081127874714562 +182 9 model.embedding_dim 0.0 +182 9 optimizer.lr 0.0010044752776573907 +182 9 training.batch_size 2.0 +182 9 training.label_smoothing 0.006973274561086945 +182 10 model.embedding_dim 2.0 +182 10 optimizer.lr 0.010586247034024263 +182 10 training.batch_size 0.0 +182 10 training.label_smoothing 0.014457999859280103 +182 11 model.embedding_dim 1.0 +182 11 optimizer.lr 0.0034748638898873473 +182 11 training.batch_size 0.0 +182 11 training.label_smoothing 0.5863727643864046 +182 12 model.embedding_dim 1.0 +182 12 optimizer.lr 0.039357491135959474 +182 12 training.batch_size 0.0 +182 12 training.label_smoothing 0.002818343288042397 +182 13 model.embedding_dim 0.0 +182 13 optimizer.lr 0.02311617004903643 +182 13 training.batch_size 0.0 +182 13 training.label_smoothing 0.00800797607725108 +182 14 model.embedding_dim 0.0 +182 14 optimizer.lr 0.008726193294587948 +182 14 training.batch_size 2.0 +182 14 training.label_smoothing 0.03316350704862733 +182 15 model.embedding_dim 2.0 +182 15 optimizer.lr 0.0028473866072679928 +182 15 training.batch_size 1.0 +182 15 training.label_smoothing 0.05040659823593524 +182 16 model.embedding_dim 1.0 +182 16 optimizer.lr 0.0056288014672318 +182 16 training.batch_size 0.0 +182 16 training.label_smoothing 0.3177758591970954 +182 17 model.embedding_dim 1.0 +182 17 optimizer.lr 0.0021863039865374287 +182 17 training.batch_size 1.0 +182 17 training.label_smoothing 0.5465849246216692 +182 18 model.embedding_dim 2.0 +182 18 optimizer.lr 0.09120057121470332 +182 18 training.batch_size 1.0 +182 18 training.label_smoothing 0.0012221145951614393 +182 19 model.embedding_dim 1.0 +182 19 optimizer.lr 0.010270024031673253 +182 19 training.batch_size 1.0 +182 19 training.label_smoothing 0.3207560254331114 +182 20 model.embedding_dim 2.0 +182 20 optimizer.lr 0.0733430350803826 +182 20 training.batch_size 2.0 +182 20 training.label_smoothing 0.6012493230530772 +182 21 model.embedding_dim 2.0 +182 21 optimizer.lr 0.040268900268443125 +182 21 training.batch_size 1.0 +182 21 training.label_smoothing 0.05242155533189165 +182 22 model.embedding_dim 0.0 +182 22 optimizer.lr 0.006855550856395042 +182 22 training.batch_size 1.0 +182 22 training.label_smoothing 0.0022416341597922495 +182 23 model.embedding_dim 1.0 +182 23 optimizer.lr 0.0025321545977229827 +182 23 training.batch_size 0.0 +182 23 training.label_smoothing 0.002345977356686759 +182 24 model.embedding_dim 1.0 +182 24 optimizer.lr 0.07345273266914952 +182 24 training.batch_size 2.0 +182 24 training.label_smoothing 0.08336804713547505 +182 25 model.embedding_dim 1.0 +182 25 optimizer.lr 0.017525592027715146 +182 25 training.batch_size 2.0 +182 25 training.label_smoothing 0.08691601533402368 +182 26 model.embedding_dim 2.0 +182 26 optimizer.lr 0.013954301931547937 +182 26 training.batch_size 0.0 +182 26 training.label_smoothing 0.5065005700385522 +182 27 model.embedding_dim 2.0 +182 27 optimizer.lr 0.0017357926009983387 +182 27 training.batch_size 1.0 +182 27 training.label_smoothing 0.0012370864181899973 +182 28 model.embedding_dim 2.0 +182 28 optimizer.lr 0.025172227693199885 +182 28 training.batch_size 2.0 +182 28 training.label_smoothing 0.3405223335739247 +182 29 model.embedding_dim 2.0 +182 29 optimizer.lr 0.01089728530268096 +182 29 training.batch_size 1.0 +182 29 training.label_smoothing 0.009506205593628417 +182 30 model.embedding_dim 2.0 +182 30 optimizer.lr 0.025738446690000142 +182 30 training.batch_size 2.0 +182 30 training.label_smoothing 0.010619131685606647 +182 31 model.embedding_dim 1.0 +182 31 optimizer.lr 0.013108715676629574 +182 31 training.batch_size 1.0 +182 31 training.label_smoothing 0.0025887792692062396 +182 32 model.embedding_dim 1.0 +182 32 optimizer.lr 0.01589656490195204 +182 32 training.batch_size 0.0 +182 32 training.label_smoothing 0.013502039889052622 +182 33 model.embedding_dim 1.0 +182 33 optimizer.lr 0.001356453886644645 +182 33 training.batch_size 0.0 +182 33 training.label_smoothing 0.006558460836186155 +182 34 model.embedding_dim 2.0 +182 34 optimizer.lr 0.0010427470261581554 +182 34 training.batch_size 2.0 +182 34 training.label_smoothing 0.0019041706384559775 +182 35 model.embedding_dim 1.0 +182 35 optimizer.lr 0.008179659487905866 +182 35 training.batch_size 0.0 +182 35 training.label_smoothing 0.06767325066220561 +182 36 model.embedding_dim 2.0 +182 36 optimizer.lr 0.0028375792146778578 +182 36 training.batch_size 1.0 +182 36 training.label_smoothing 0.0010276297473023892 +182 37 model.embedding_dim 2.0 +182 37 optimizer.lr 0.002804491879438543 +182 37 training.batch_size 2.0 +182 37 training.label_smoothing 0.008344830243777733 +182 38 model.embedding_dim 0.0 +182 38 optimizer.lr 0.01885751555714728 +182 38 training.batch_size 2.0 +182 38 training.label_smoothing 0.0027705303314127256 +182 39 model.embedding_dim 1.0 +182 39 optimizer.lr 0.0014309078770787023 +182 39 training.batch_size 1.0 +182 39 training.label_smoothing 0.6898043042207538 +182 40 model.embedding_dim 2.0 +182 40 optimizer.lr 0.09191471236124485 +182 40 training.batch_size 0.0 +182 40 training.label_smoothing 0.003163222089536987 +182 41 model.embedding_dim 2.0 +182 41 optimizer.lr 0.004938173358743636 +182 41 training.batch_size 2.0 +182 41 training.label_smoothing 0.003748926583951528 +182 42 model.embedding_dim 2.0 +182 42 optimizer.lr 0.00256856864268168 +182 42 training.batch_size 0.0 +182 42 training.label_smoothing 0.0011520198966379807 +182 43 model.embedding_dim 1.0 +182 43 optimizer.lr 0.025657163982436875 +182 43 training.batch_size 2.0 +182 43 training.label_smoothing 0.01618645482748796 +182 44 model.embedding_dim 1.0 +182 44 optimizer.lr 0.004963858667410152 +182 44 training.batch_size 2.0 +182 44 training.label_smoothing 0.03536232206159316 +182 45 model.embedding_dim 2.0 +182 45 optimizer.lr 0.0369377796765047 +182 45 training.batch_size 1.0 +182 45 training.label_smoothing 0.6954616924832675 +182 46 model.embedding_dim 0.0 +182 46 optimizer.lr 0.02503130799529011 +182 46 training.batch_size 1.0 +182 46 training.label_smoothing 0.002330344919496049 +182 47 model.embedding_dim 0.0 +182 47 optimizer.lr 0.002415345009926908 +182 47 training.batch_size 0.0 +182 47 training.label_smoothing 0.00917353028960474 +182 48 model.embedding_dim 2.0 +182 48 optimizer.lr 0.0879821399942354 +182 48 training.batch_size 2.0 +182 48 training.label_smoothing 0.378948330782741 +182 49 model.embedding_dim 1.0 +182 49 optimizer.lr 0.0064889058072051895 +182 49 training.batch_size 2.0 +182 49 training.label_smoothing 0.006848608900058979 +182 50 model.embedding_dim 2.0 +182 50 optimizer.lr 0.0025290317229211537 +182 50 training.batch_size 2.0 +182 50 training.label_smoothing 0.02753287891362059 +182 51 model.embedding_dim 2.0 +182 51 optimizer.lr 0.004162331939791878 +182 51 training.batch_size 1.0 +182 51 training.label_smoothing 0.1854418013635408 +182 52 model.embedding_dim 2.0 +182 52 optimizer.lr 0.012063697212216165 +182 52 training.batch_size 1.0 +182 52 training.label_smoothing 0.16208491366375521 +182 53 model.embedding_dim 1.0 +182 53 optimizer.lr 0.018372115969720635 +182 53 training.batch_size 2.0 +182 53 training.label_smoothing 0.1400855650603393 +182 54 model.embedding_dim 2.0 +182 54 optimizer.lr 0.0018131102609105177 +182 54 training.batch_size 1.0 +182 54 training.label_smoothing 0.02102219049105938 +182 55 model.embedding_dim 2.0 +182 55 optimizer.lr 0.03982189247263435 +182 55 training.batch_size 0.0 +182 55 training.label_smoothing 0.3205548799372879 +182 56 model.embedding_dim 1.0 +182 56 optimizer.lr 0.019290013382776132 +182 56 training.batch_size 2.0 +182 56 training.label_smoothing 0.06007642561102435 +182 57 model.embedding_dim 1.0 +182 57 optimizer.lr 0.08993409438690499 +182 57 training.batch_size 2.0 +182 57 training.label_smoothing 0.04527726998016057 +182 58 model.embedding_dim 2.0 +182 58 optimizer.lr 0.0015427999914730429 +182 58 training.batch_size 1.0 +182 58 training.label_smoothing 0.00873109342579431 +182 59 model.embedding_dim 0.0 +182 59 optimizer.lr 0.001966356907649772 +182 59 training.batch_size 1.0 +182 59 training.label_smoothing 0.37335453988692596 +182 60 model.embedding_dim 1.0 +182 60 optimizer.lr 0.006745459555113108 +182 60 training.batch_size 1.0 +182 60 training.label_smoothing 0.04985648515763041 +182 61 model.embedding_dim 2.0 +182 61 optimizer.lr 0.01927783637241385 +182 61 training.batch_size 1.0 +182 61 training.label_smoothing 0.0022507507702569994 +182 62 model.embedding_dim 0.0 +182 62 optimizer.lr 0.015343247116522876 +182 62 training.batch_size 1.0 +182 62 training.label_smoothing 0.0035753995965497744 +182 63 model.embedding_dim 1.0 +182 63 optimizer.lr 0.0030528879636560553 +182 63 training.batch_size 2.0 +182 63 training.label_smoothing 0.11439976782508636 +182 64 model.embedding_dim 0.0 +182 64 optimizer.lr 0.004382982646584402 +182 64 training.batch_size 0.0 +182 64 training.label_smoothing 0.011773801262966635 +182 65 model.embedding_dim 2.0 +182 65 optimizer.lr 0.0032292481801520613 +182 65 training.batch_size 2.0 +182 65 training.label_smoothing 0.4581630312674334 +182 66 model.embedding_dim 0.0 +182 66 optimizer.lr 0.0013836642840202903 +182 66 training.batch_size 2.0 +182 66 training.label_smoothing 0.0028127131633256673 +182 67 model.embedding_dim 0.0 +182 67 optimizer.lr 0.020291951045602028 +182 67 training.batch_size 2.0 +182 67 training.label_smoothing 0.01711124818122389 +182 68 model.embedding_dim 1.0 +182 68 optimizer.lr 0.05268514078395673 +182 68 training.batch_size 0.0 +182 68 training.label_smoothing 0.09785476298234887 +182 69 model.embedding_dim 1.0 +182 69 optimizer.lr 0.0014465402224886452 +182 69 training.batch_size 0.0 +182 69 training.label_smoothing 0.5130339888213732 +182 70 model.embedding_dim 1.0 +182 70 optimizer.lr 0.013891488269068069 +182 70 training.batch_size 2.0 +182 70 training.label_smoothing 0.0017360884304557327 +182 71 model.embedding_dim 0.0 +182 71 optimizer.lr 0.0335570368153868 +182 71 training.batch_size 2.0 +182 71 training.label_smoothing 0.04822763735830779 +182 72 model.embedding_dim 0.0 +182 72 optimizer.lr 0.001049153942463448 +182 72 training.batch_size 0.0 +182 72 training.label_smoothing 0.0016964994950572274 +182 73 model.embedding_dim 0.0 +182 73 optimizer.lr 0.022415723799023946 +182 73 training.batch_size 0.0 +182 73 training.label_smoothing 0.0054548717056832055 +182 74 model.embedding_dim 1.0 +182 74 optimizer.lr 0.008106249054143177 +182 74 training.batch_size 2.0 +182 74 training.label_smoothing 0.028246146995092782 +182 75 model.embedding_dim 2.0 +182 75 optimizer.lr 0.0023102006924988444 +182 75 training.batch_size 0.0 +182 75 training.label_smoothing 0.7707042651163899 +182 76 model.embedding_dim 0.0 +182 76 optimizer.lr 0.0039883498271253235 +182 76 training.batch_size 2.0 +182 76 training.label_smoothing 0.06707936356137742 +182 77 model.embedding_dim 1.0 +182 77 optimizer.lr 0.09251396439212804 +182 77 training.batch_size 0.0 +182 77 training.label_smoothing 0.005854798816757823 +182 78 model.embedding_dim 2.0 +182 78 optimizer.lr 0.0062428052889903825 +182 78 training.batch_size 0.0 +182 78 training.label_smoothing 0.00466428833668534 +182 79 model.embedding_dim 2.0 +182 79 optimizer.lr 0.007425472836212748 +182 79 training.batch_size 0.0 +182 79 training.label_smoothing 0.12840281092574907 +182 80 model.embedding_dim 2.0 +182 80 optimizer.lr 0.09889810921475352 +182 80 training.batch_size 0.0 +182 80 training.label_smoothing 0.008276094800670927 +182 81 model.embedding_dim 2.0 +182 81 optimizer.lr 0.004276130957356382 +182 81 training.batch_size 1.0 +182 81 training.label_smoothing 0.0013597893175471838 +182 82 model.embedding_dim 0.0 +182 82 optimizer.lr 0.0012131164597390393 +182 82 training.batch_size 1.0 +182 82 training.label_smoothing 0.025271741832341262 +182 83 model.embedding_dim 1.0 +182 83 optimizer.lr 0.002110543467243526 +182 83 training.batch_size 0.0 +182 83 training.label_smoothing 0.007884301554567485 +182 84 model.embedding_dim 1.0 +182 84 optimizer.lr 0.00513713069465795 +182 84 training.batch_size 2.0 +182 84 training.label_smoothing 0.07060905139940775 +182 85 model.embedding_dim 1.0 +182 85 optimizer.lr 0.013018960722394696 +182 85 training.batch_size 1.0 +182 85 training.label_smoothing 0.002996585501022964 +182 86 model.embedding_dim 1.0 +182 86 optimizer.lr 0.0014076205256740603 +182 86 training.batch_size 1.0 +182 86 training.label_smoothing 0.0011683874291220336 +182 87 model.embedding_dim 2.0 +182 87 optimizer.lr 0.014663004158277485 +182 87 training.batch_size 0.0 +182 87 training.label_smoothing 0.0034336578014965407 +182 88 model.embedding_dim 1.0 +182 88 optimizer.lr 0.08965304226141568 +182 88 training.batch_size 0.0 +182 88 training.label_smoothing 0.0017234871723596924 +182 89 model.embedding_dim 2.0 +182 89 optimizer.lr 0.04290011109777824 +182 89 training.batch_size 0.0 +182 89 training.label_smoothing 0.13629225924788232 +182 90 model.embedding_dim 1.0 +182 90 optimizer.lr 0.06266549865356427 +182 90 training.batch_size 2.0 +182 90 training.label_smoothing 0.752531178816183 +182 91 model.embedding_dim 2.0 +182 91 optimizer.lr 0.0012213514891854615 +182 91 training.batch_size 2.0 +182 91 training.label_smoothing 0.006128016070283236 +182 92 model.embedding_dim 0.0 +182 92 optimizer.lr 0.021074350635571375 +182 92 training.batch_size 1.0 +182 92 training.label_smoothing 0.1696424393649112 +182 93 model.embedding_dim 0.0 +182 93 optimizer.lr 0.051661769663011885 +182 93 training.batch_size 2.0 +182 93 training.label_smoothing 0.00844370724564144 +182 94 model.embedding_dim 0.0 +182 94 optimizer.lr 0.00327286713983375 +182 94 training.batch_size 0.0 +182 94 training.label_smoothing 0.013011511983918114 +182 95 model.embedding_dim 1.0 +182 95 optimizer.lr 0.08079206308605477 +182 95 training.batch_size 1.0 +182 95 training.label_smoothing 0.15177268035652824 +182 96 model.embedding_dim 0.0 +182 96 optimizer.lr 0.06607548109512404 +182 96 training.batch_size 2.0 +182 96 training.label_smoothing 0.05399037539077029 +182 97 model.embedding_dim 1.0 +182 97 optimizer.lr 0.08553266688993663 +182 97 training.batch_size 2.0 +182 97 training.label_smoothing 0.0015835060610173632 +182 98 model.embedding_dim 1.0 +182 98 optimizer.lr 0.0012411111229134145 +182 98 training.batch_size 2.0 +182 98 training.label_smoothing 0.013729178749950634 +182 99 model.embedding_dim 0.0 +182 99 optimizer.lr 0.0578535194676117 +182 99 training.batch_size 2.0 +182 99 training.label_smoothing 0.08232663746848132 +182 100 model.embedding_dim 2.0 +182 100 optimizer.lr 0.0018806763708857377 +182 100 training.batch_size 1.0 +182 100 training.label_smoothing 0.007958868474419106 +182 1 dataset """kinships""" +182 1 model """distmult""" +182 1 loss """bceaftersigmoid""" +182 1 regularizer """no""" +182 1 optimizer """adam""" +182 1 training_loop """lcwa""" +182 1 evaluator """rankbased""" +182 2 dataset """kinships""" +182 2 model """distmult""" +182 2 loss """bceaftersigmoid""" +182 2 regularizer """no""" +182 2 optimizer """adam""" +182 2 training_loop """lcwa""" +182 2 evaluator """rankbased""" +182 3 dataset """kinships""" +182 3 model """distmult""" +182 3 loss """bceaftersigmoid""" +182 3 regularizer """no""" +182 3 optimizer """adam""" +182 3 training_loop """lcwa""" +182 3 evaluator """rankbased""" +182 4 dataset """kinships""" +182 4 model """distmult""" +182 4 loss """bceaftersigmoid""" +182 4 regularizer """no""" +182 4 optimizer """adam""" +182 4 training_loop """lcwa""" +182 4 evaluator """rankbased""" +182 5 dataset """kinships""" +182 5 model """distmult""" +182 5 loss """bceaftersigmoid""" +182 5 regularizer """no""" +182 5 optimizer """adam""" +182 5 training_loop """lcwa""" +182 5 evaluator """rankbased""" +182 6 dataset """kinships""" +182 6 model """distmult""" +182 6 loss """bceaftersigmoid""" +182 6 regularizer """no""" +182 6 optimizer """adam""" +182 6 training_loop """lcwa""" +182 6 evaluator """rankbased""" +182 7 dataset """kinships""" +182 7 model """distmult""" +182 7 loss """bceaftersigmoid""" +182 7 regularizer """no""" +182 7 optimizer """adam""" +182 7 training_loop """lcwa""" +182 7 evaluator """rankbased""" +182 8 dataset """kinships""" +182 8 model """distmult""" +182 8 loss """bceaftersigmoid""" +182 8 regularizer """no""" +182 8 optimizer """adam""" +182 8 training_loop """lcwa""" +182 8 evaluator """rankbased""" +182 9 dataset """kinships""" +182 9 model """distmult""" +182 9 loss """bceaftersigmoid""" +182 9 regularizer """no""" +182 9 optimizer """adam""" +182 9 training_loop """lcwa""" +182 9 evaluator """rankbased""" +182 10 dataset """kinships""" +182 10 model """distmult""" +182 10 loss """bceaftersigmoid""" +182 10 regularizer """no""" +182 10 optimizer """adam""" +182 10 training_loop """lcwa""" +182 10 evaluator """rankbased""" +182 11 dataset """kinships""" +182 11 model """distmult""" +182 11 loss """bceaftersigmoid""" +182 11 regularizer """no""" +182 11 optimizer """adam""" +182 11 training_loop """lcwa""" +182 11 evaluator """rankbased""" +182 12 dataset """kinships""" +182 12 model """distmult""" +182 12 loss """bceaftersigmoid""" +182 12 regularizer """no""" +182 12 optimizer """adam""" +182 12 training_loop """lcwa""" +182 12 evaluator """rankbased""" +182 13 dataset """kinships""" +182 13 model """distmult""" +182 13 loss """bceaftersigmoid""" +182 13 regularizer """no""" +182 13 optimizer """adam""" +182 13 training_loop """lcwa""" +182 13 evaluator """rankbased""" +182 14 dataset """kinships""" +182 14 model """distmult""" +182 14 loss """bceaftersigmoid""" +182 14 regularizer """no""" +182 14 optimizer """adam""" +182 14 training_loop """lcwa""" +182 14 evaluator """rankbased""" +182 15 dataset """kinships""" +182 15 model """distmult""" +182 15 loss """bceaftersigmoid""" +182 15 regularizer """no""" +182 15 optimizer """adam""" +182 15 training_loop """lcwa""" +182 15 evaluator """rankbased""" +182 16 dataset """kinships""" +182 16 model """distmult""" +182 16 loss """bceaftersigmoid""" +182 16 regularizer """no""" +182 16 optimizer """adam""" +182 16 training_loop """lcwa""" +182 16 evaluator """rankbased""" +182 17 dataset """kinships""" +182 17 model """distmult""" +182 17 loss """bceaftersigmoid""" +182 17 regularizer """no""" +182 17 optimizer """adam""" +182 17 training_loop """lcwa""" +182 17 evaluator """rankbased""" +182 18 dataset """kinships""" +182 18 model """distmult""" +182 18 loss """bceaftersigmoid""" +182 18 regularizer """no""" +182 18 optimizer """adam""" +182 18 training_loop """lcwa""" +182 18 evaluator """rankbased""" +182 19 dataset """kinships""" +182 19 model """distmult""" +182 19 loss """bceaftersigmoid""" +182 19 regularizer """no""" +182 19 optimizer """adam""" +182 19 training_loop """lcwa""" +182 19 evaluator """rankbased""" +182 20 dataset """kinships""" +182 20 model """distmult""" +182 20 loss """bceaftersigmoid""" +182 20 regularizer """no""" +182 20 optimizer """adam""" +182 20 training_loop """lcwa""" +182 20 evaluator """rankbased""" +182 21 dataset """kinships""" +182 21 model """distmult""" +182 21 loss """bceaftersigmoid""" +182 21 regularizer """no""" +182 21 optimizer """adam""" +182 21 training_loop """lcwa""" +182 21 evaluator """rankbased""" +182 22 dataset """kinships""" +182 22 model """distmult""" +182 22 loss """bceaftersigmoid""" +182 22 regularizer """no""" +182 22 optimizer """adam""" +182 22 training_loop """lcwa""" +182 22 evaluator """rankbased""" +182 23 dataset """kinships""" +182 23 model """distmult""" +182 23 loss """bceaftersigmoid""" +182 23 regularizer """no""" +182 23 optimizer """adam""" +182 23 training_loop """lcwa""" +182 23 evaluator """rankbased""" +182 24 dataset """kinships""" +182 24 model """distmult""" +182 24 loss """bceaftersigmoid""" +182 24 regularizer """no""" +182 24 optimizer """adam""" +182 24 training_loop """lcwa""" +182 24 evaluator """rankbased""" +182 25 dataset """kinships""" +182 25 model """distmult""" +182 25 loss """bceaftersigmoid""" +182 25 regularizer """no""" +182 25 optimizer """adam""" +182 25 training_loop """lcwa""" +182 25 evaluator """rankbased""" +182 26 dataset """kinships""" +182 26 model """distmult""" +182 26 loss """bceaftersigmoid""" +182 26 regularizer """no""" +182 26 optimizer """adam""" +182 26 training_loop """lcwa""" +182 26 evaluator """rankbased""" +182 27 dataset """kinships""" +182 27 model """distmult""" +182 27 loss """bceaftersigmoid""" +182 27 regularizer """no""" +182 27 optimizer """adam""" +182 27 training_loop """lcwa""" +182 27 evaluator """rankbased""" +182 28 dataset """kinships""" +182 28 model """distmult""" +182 28 loss """bceaftersigmoid""" +182 28 regularizer """no""" +182 28 optimizer """adam""" +182 28 training_loop """lcwa""" +182 28 evaluator """rankbased""" +182 29 dataset """kinships""" +182 29 model """distmult""" +182 29 loss """bceaftersigmoid""" +182 29 regularizer """no""" +182 29 optimizer """adam""" +182 29 training_loop """lcwa""" +182 29 evaluator """rankbased""" +182 30 dataset """kinships""" +182 30 model """distmult""" +182 30 loss """bceaftersigmoid""" +182 30 regularizer """no""" +182 30 optimizer """adam""" +182 30 training_loop """lcwa""" +182 30 evaluator """rankbased""" +182 31 dataset """kinships""" +182 31 model """distmult""" +182 31 loss """bceaftersigmoid""" +182 31 regularizer """no""" +182 31 optimizer """adam""" +182 31 training_loop """lcwa""" +182 31 evaluator """rankbased""" +182 32 dataset """kinships""" +182 32 model """distmult""" +182 32 loss """bceaftersigmoid""" +182 32 regularizer """no""" +182 32 optimizer """adam""" +182 32 training_loop """lcwa""" +182 32 evaluator """rankbased""" +182 33 dataset """kinships""" +182 33 model """distmult""" +182 33 loss """bceaftersigmoid""" +182 33 regularizer """no""" +182 33 optimizer """adam""" +182 33 training_loop """lcwa""" +182 33 evaluator """rankbased""" +182 34 dataset """kinships""" +182 34 model """distmult""" +182 34 loss """bceaftersigmoid""" +182 34 regularizer """no""" +182 34 optimizer """adam""" +182 34 training_loop """lcwa""" +182 34 evaluator """rankbased""" +182 35 dataset """kinships""" +182 35 model """distmult""" +182 35 loss """bceaftersigmoid""" +182 35 regularizer """no""" +182 35 optimizer """adam""" +182 35 training_loop """lcwa""" +182 35 evaluator """rankbased""" +182 36 dataset """kinships""" +182 36 model """distmult""" +182 36 loss """bceaftersigmoid""" +182 36 regularizer """no""" +182 36 optimizer """adam""" +182 36 training_loop """lcwa""" +182 36 evaluator """rankbased""" +182 37 dataset """kinships""" +182 37 model """distmult""" +182 37 loss """bceaftersigmoid""" +182 37 regularizer """no""" +182 37 optimizer """adam""" +182 37 training_loop """lcwa""" +182 37 evaluator """rankbased""" +182 38 dataset """kinships""" +182 38 model """distmult""" +182 38 loss """bceaftersigmoid""" +182 38 regularizer """no""" +182 38 optimizer """adam""" +182 38 training_loop """lcwa""" +182 38 evaluator """rankbased""" +182 39 dataset """kinships""" +182 39 model """distmult""" +182 39 loss """bceaftersigmoid""" +182 39 regularizer """no""" +182 39 optimizer """adam""" +182 39 training_loop """lcwa""" +182 39 evaluator """rankbased""" +182 40 dataset """kinships""" +182 40 model """distmult""" +182 40 loss """bceaftersigmoid""" +182 40 regularizer """no""" +182 40 optimizer """adam""" +182 40 training_loop """lcwa""" +182 40 evaluator """rankbased""" +182 41 dataset """kinships""" +182 41 model """distmult""" +182 41 loss """bceaftersigmoid""" +182 41 regularizer """no""" +182 41 optimizer """adam""" +182 41 training_loop """lcwa""" +182 41 evaluator """rankbased""" +182 42 dataset """kinships""" +182 42 model """distmult""" +182 42 loss """bceaftersigmoid""" +182 42 regularizer """no""" +182 42 optimizer """adam""" +182 42 training_loop """lcwa""" +182 42 evaluator """rankbased""" +182 43 dataset """kinships""" +182 43 model """distmult""" +182 43 loss """bceaftersigmoid""" +182 43 regularizer """no""" +182 43 optimizer """adam""" +182 43 training_loop """lcwa""" +182 43 evaluator """rankbased""" +182 44 dataset """kinships""" +182 44 model """distmult""" +182 44 loss """bceaftersigmoid""" +182 44 regularizer """no""" +182 44 optimizer """adam""" +182 44 training_loop """lcwa""" +182 44 evaluator """rankbased""" +182 45 dataset """kinships""" +182 45 model """distmult""" +182 45 loss """bceaftersigmoid""" +182 45 regularizer """no""" +182 45 optimizer """adam""" +182 45 training_loop """lcwa""" +182 45 evaluator """rankbased""" +182 46 dataset """kinships""" +182 46 model """distmult""" +182 46 loss """bceaftersigmoid""" +182 46 regularizer """no""" +182 46 optimizer """adam""" +182 46 training_loop """lcwa""" +182 46 evaluator """rankbased""" +182 47 dataset """kinships""" +182 47 model """distmult""" +182 47 loss """bceaftersigmoid""" +182 47 regularizer """no""" +182 47 optimizer """adam""" +182 47 training_loop """lcwa""" +182 47 evaluator """rankbased""" +182 48 dataset """kinships""" +182 48 model """distmult""" +182 48 loss """bceaftersigmoid""" +182 48 regularizer """no""" +182 48 optimizer """adam""" +182 48 training_loop """lcwa""" +182 48 evaluator """rankbased""" +182 49 dataset """kinships""" +182 49 model """distmult""" +182 49 loss """bceaftersigmoid""" +182 49 regularizer """no""" +182 49 optimizer """adam""" +182 49 training_loop """lcwa""" +182 49 evaluator """rankbased""" +182 50 dataset """kinships""" +182 50 model """distmult""" +182 50 loss """bceaftersigmoid""" +182 50 regularizer """no""" +182 50 optimizer """adam""" +182 50 training_loop """lcwa""" +182 50 evaluator """rankbased""" +182 51 dataset """kinships""" +182 51 model """distmult""" +182 51 loss """bceaftersigmoid""" +182 51 regularizer """no""" +182 51 optimizer """adam""" +182 51 training_loop """lcwa""" +182 51 evaluator """rankbased""" +182 52 dataset """kinships""" +182 52 model """distmult""" +182 52 loss """bceaftersigmoid""" +182 52 regularizer """no""" +182 52 optimizer """adam""" +182 52 training_loop """lcwa""" +182 52 evaluator """rankbased""" +182 53 dataset """kinships""" +182 53 model """distmult""" +182 53 loss """bceaftersigmoid""" +182 53 regularizer """no""" +182 53 optimizer """adam""" +182 53 training_loop """lcwa""" +182 53 evaluator """rankbased""" +182 54 dataset """kinships""" +182 54 model """distmult""" +182 54 loss """bceaftersigmoid""" +182 54 regularizer """no""" +182 54 optimizer """adam""" +182 54 training_loop """lcwa""" +182 54 evaluator """rankbased""" +182 55 dataset """kinships""" +182 55 model """distmult""" +182 55 loss """bceaftersigmoid""" +182 55 regularizer """no""" +182 55 optimizer """adam""" +182 55 training_loop """lcwa""" +182 55 evaluator """rankbased""" +182 56 dataset """kinships""" +182 56 model """distmult""" +182 56 loss """bceaftersigmoid""" +182 56 regularizer """no""" +182 56 optimizer """adam""" +182 56 training_loop """lcwa""" +182 56 evaluator """rankbased""" +182 57 dataset """kinships""" +182 57 model """distmult""" +182 57 loss """bceaftersigmoid""" +182 57 regularizer """no""" +182 57 optimizer """adam""" +182 57 training_loop """lcwa""" +182 57 evaluator """rankbased""" +182 58 dataset """kinships""" +182 58 model """distmult""" +182 58 loss """bceaftersigmoid""" +182 58 regularizer """no""" +182 58 optimizer """adam""" +182 58 training_loop """lcwa""" +182 58 evaluator """rankbased""" +182 59 dataset """kinships""" +182 59 model """distmult""" +182 59 loss """bceaftersigmoid""" +182 59 regularizer """no""" +182 59 optimizer """adam""" +182 59 training_loop """lcwa""" +182 59 evaluator """rankbased""" +182 60 dataset """kinships""" +182 60 model """distmult""" +182 60 loss """bceaftersigmoid""" +182 60 regularizer """no""" +182 60 optimizer """adam""" +182 60 training_loop """lcwa""" +182 60 evaluator """rankbased""" +182 61 dataset """kinships""" +182 61 model """distmult""" +182 61 loss """bceaftersigmoid""" +182 61 regularizer """no""" +182 61 optimizer """adam""" +182 61 training_loop """lcwa""" +182 61 evaluator """rankbased""" +182 62 dataset """kinships""" +182 62 model """distmult""" +182 62 loss """bceaftersigmoid""" +182 62 regularizer """no""" +182 62 optimizer """adam""" +182 62 training_loop """lcwa""" +182 62 evaluator """rankbased""" +182 63 dataset """kinships""" +182 63 model """distmult""" +182 63 loss """bceaftersigmoid""" +182 63 regularizer """no""" +182 63 optimizer """adam""" +182 63 training_loop """lcwa""" +182 63 evaluator """rankbased""" +182 64 dataset """kinships""" +182 64 model """distmult""" +182 64 loss """bceaftersigmoid""" +182 64 regularizer """no""" +182 64 optimizer """adam""" +182 64 training_loop """lcwa""" +182 64 evaluator """rankbased""" +182 65 dataset """kinships""" +182 65 model """distmult""" +182 65 loss """bceaftersigmoid""" +182 65 regularizer """no""" +182 65 optimizer """adam""" +182 65 training_loop """lcwa""" +182 65 evaluator """rankbased""" +182 66 dataset """kinships""" +182 66 model """distmult""" +182 66 loss """bceaftersigmoid""" +182 66 regularizer """no""" +182 66 optimizer """adam""" +182 66 training_loop """lcwa""" +182 66 evaluator """rankbased""" +182 67 dataset """kinships""" +182 67 model """distmult""" +182 67 loss """bceaftersigmoid""" +182 67 regularizer """no""" +182 67 optimizer """adam""" +182 67 training_loop """lcwa""" +182 67 evaluator """rankbased""" +182 68 dataset """kinships""" +182 68 model """distmult""" +182 68 loss """bceaftersigmoid""" +182 68 regularizer """no""" +182 68 optimizer """adam""" +182 68 training_loop """lcwa""" +182 68 evaluator """rankbased""" +182 69 dataset """kinships""" +182 69 model """distmult""" +182 69 loss """bceaftersigmoid""" +182 69 regularizer """no""" +182 69 optimizer """adam""" +182 69 training_loop """lcwa""" +182 69 evaluator """rankbased""" +182 70 dataset """kinships""" +182 70 model """distmult""" +182 70 loss """bceaftersigmoid""" +182 70 regularizer """no""" +182 70 optimizer """adam""" +182 70 training_loop """lcwa""" +182 70 evaluator """rankbased""" +182 71 dataset """kinships""" +182 71 model """distmult""" +182 71 loss """bceaftersigmoid""" +182 71 regularizer """no""" +182 71 optimizer """adam""" +182 71 training_loop """lcwa""" +182 71 evaluator """rankbased""" +182 72 dataset """kinships""" +182 72 model """distmult""" +182 72 loss """bceaftersigmoid""" +182 72 regularizer """no""" +182 72 optimizer """adam""" +182 72 training_loop """lcwa""" +182 72 evaluator """rankbased""" +182 73 dataset """kinships""" +182 73 model """distmult""" +182 73 loss """bceaftersigmoid""" +182 73 regularizer """no""" +182 73 optimizer """adam""" +182 73 training_loop """lcwa""" +182 73 evaluator """rankbased""" +182 74 dataset """kinships""" +182 74 model """distmult""" +182 74 loss """bceaftersigmoid""" +182 74 regularizer """no""" +182 74 optimizer """adam""" +182 74 training_loop """lcwa""" +182 74 evaluator """rankbased""" +182 75 dataset """kinships""" +182 75 model """distmult""" +182 75 loss """bceaftersigmoid""" +182 75 regularizer """no""" +182 75 optimizer """adam""" +182 75 training_loop """lcwa""" +182 75 evaluator """rankbased""" +182 76 dataset """kinships""" +182 76 model """distmult""" +182 76 loss """bceaftersigmoid""" +182 76 regularizer """no""" +182 76 optimizer """adam""" +182 76 training_loop """lcwa""" +182 76 evaluator """rankbased""" +182 77 dataset """kinships""" +182 77 model """distmult""" +182 77 loss """bceaftersigmoid""" +182 77 regularizer """no""" +182 77 optimizer """adam""" +182 77 training_loop """lcwa""" +182 77 evaluator """rankbased""" +182 78 dataset """kinships""" +182 78 model """distmult""" +182 78 loss """bceaftersigmoid""" +182 78 regularizer """no""" +182 78 optimizer """adam""" +182 78 training_loop """lcwa""" +182 78 evaluator """rankbased""" +182 79 dataset """kinships""" +182 79 model """distmult""" +182 79 loss """bceaftersigmoid""" +182 79 regularizer """no""" +182 79 optimizer """adam""" +182 79 training_loop """lcwa""" +182 79 evaluator """rankbased""" +182 80 dataset """kinships""" +182 80 model """distmult""" +182 80 loss """bceaftersigmoid""" +182 80 regularizer """no""" +182 80 optimizer """adam""" +182 80 training_loop """lcwa""" +182 80 evaluator """rankbased""" +182 81 dataset """kinships""" +182 81 model """distmult""" +182 81 loss """bceaftersigmoid""" +182 81 regularizer """no""" +182 81 optimizer """adam""" +182 81 training_loop """lcwa""" +182 81 evaluator """rankbased""" +182 82 dataset """kinships""" +182 82 model """distmult""" +182 82 loss """bceaftersigmoid""" +182 82 regularizer """no""" +182 82 optimizer """adam""" +182 82 training_loop """lcwa""" +182 82 evaluator """rankbased""" +182 83 dataset """kinships""" +182 83 model """distmult""" +182 83 loss """bceaftersigmoid""" +182 83 regularizer """no""" +182 83 optimizer """adam""" +182 83 training_loop """lcwa""" +182 83 evaluator """rankbased""" +182 84 dataset """kinships""" +182 84 model """distmult""" +182 84 loss """bceaftersigmoid""" +182 84 regularizer """no""" +182 84 optimizer """adam""" +182 84 training_loop """lcwa""" +182 84 evaluator """rankbased""" +182 85 dataset """kinships""" +182 85 model """distmult""" +182 85 loss """bceaftersigmoid""" +182 85 regularizer """no""" +182 85 optimizer """adam""" +182 85 training_loop """lcwa""" +182 85 evaluator """rankbased""" +182 86 dataset """kinships""" +182 86 model """distmult""" +182 86 loss """bceaftersigmoid""" +182 86 regularizer """no""" +182 86 optimizer """adam""" +182 86 training_loop """lcwa""" +182 86 evaluator """rankbased""" +182 87 dataset """kinships""" +182 87 model """distmult""" +182 87 loss """bceaftersigmoid""" +182 87 regularizer """no""" +182 87 optimizer """adam""" +182 87 training_loop """lcwa""" +182 87 evaluator """rankbased""" +182 88 dataset """kinships""" +182 88 model """distmult""" +182 88 loss """bceaftersigmoid""" +182 88 regularizer """no""" +182 88 optimizer """adam""" +182 88 training_loop """lcwa""" +182 88 evaluator """rankbased""" +182 89 dataset """kinships""" +182 89 model """distmult""" +182 89 loss """bceaftersigmoid""" +182 89 regularizer """no""" +182 89 optimizer """adam""" +182 89 training_loop """lcwa""" +182 89 evaluator """rankbased""" +182 90 dataset """kinships""" +182 90 model """distmult""" +182 90 loss """bceaftersigmoid""" +182 90 regularizer """no""" +182 90 optimizer """adam""" +182 90 training_loop """lcwa""" +182 90 evaluator """rankbased""" +182 91 dataset """kinships""" +182 91 model """distmult""" +182 91 loss """bceaftersigmoid""" +182 91 regularizer """no""" +182 91 optimizer """adam""" +182 91 training_loop """lcwa""" +182 91 evaluator """rankbased""" +182 92 dataset """kinships""" +182 92 model """distmult""" +182 92 loss """bceaftersigmoid""" +182 92 regularizer """no""" +182 92 optimizer """adam""" +182 92 training_loop """lcwa""" +182 92 evaluator """rankbased""" +182 93 dataset """kinships""" +182 93 model """distmult""" +182 93 loss """bceaftersigmoid""" +182 93 regularizer """no""" +182 93 optimizer """adam""" +182 93 training_loop """lcwa""" +182 93 evaluator """rankbased""" +182 94 dataset """kinships""" +182 94 model """distmult""" +182 94 loss """bceaftersigmoid""" +182 94 regularizer """no""" +182 94 optimizer """adam""" +182 94 training_loop """lcwa""" +182 94 evaluator """rankbased""" +182 95 dataset """kinships""" +182 95 model """distmult""" +182 95 loss """bceaftersigmoid""" +182 95 regularizer """no""" +182 95 optimizer """adam""" +182 95 training_loop """lcwa""" +182 95 evaluator """rankbased""" +182 96 dataset """kinships""" +182 96 model """distmult""" +182 96 loss """bceaftersigmoid""" +182 96 regularizer """no""" +182 96 optimizer """adam""" +182 96 training_loop """lcwa""" +182 96 evaluator """rankbased""" +182 97 dataset """kinships""" +182 97 model """distmult""" +182 97 loss """bceaftersigmoid""" +182 97 regularizer """no""" +182 97 optimizer """adam""" +182 97 training_loop """lcwa""" +182 97 evaluator """rankbased""" +182 98 dataset """kinships""" +182 98 model """distmult""" +182 98 loss """bceaftersigmoid""" +182 98 regularizer """no""" +182 98 optimizer """adam""" +182 98 training_loop """lcwa""" +182 98 evaluator """rankbased""" +182 99 dataset """kinships""" +182 99 model """distmult""" +182 99 loss """bceaftersigmoid""" +182 99 regularizer """no""" +182 99 optimizer """adam""" +182 99 training_loop """lcwa""" +182 99 evaluator """rankbased""" +182 100 dataset """kinships""" +182 100 model """distmult""" +182 100 loss """bceaftersigmoid""" +182 100 regularizer """no""" +182 100 optimizer """adam""" +182 100 training_loop """lcwa""" +182 100 evaluator """rankbased""" +183 1 model.embedding_dim 2.0 +183 1 optimizer.lr 0.002070646453314975 +183 1 training.batch_size 1.0 +183 1 training.label_smoothing 0.020638063521545614 +183 2 model.embedding_dim 0.0 +183 2 optimizer.lr 0.0047942766152754765 +183 2 training.batch_size 1.0 +183 2 training.label_smoothing 0.05250316670308161 +183 3 model.embedding_dim 2.0 +183 3 optimizer.lr 0.0057671625226813255 +183 3 training.batch_size 2.0 +183 3 training.label_smoothing 0.3696121564578477 +183 4 model.embedding_dim 1.0 +183 4 optimizer.lr 0.04164596095506518 +183 4 training.batch_size 0.0 +183 4 training.label_smoothing 0.5530032769418233 +183 5 model.embedding_dim 1.0 +183 5 optimizer.lr 0.002774335724962801 +183 5 training.batch_size 2.0 +183 5 training.label_smoothing 0.013896063087479408 +183 6 model.embedding_dim 0.0 +183 6 optimizer.lr 0.01022569712087573 +183 6 training.batch_size 0.0 +183 6 training.label_smoothing 0.01652156599523129 +183 7 model.embedding_dim 2.0 +183 7 optimizer.lr 0.01449121752366822 +183 7 training.batch_size 0.0 +183 7 training.label_smoothing 0.010934386952729361 +183 8 model.embedding_dim 2.0 +183 8 optimizer.lr 0.026106154731398688 +183 8 training.batch_size 2.0 +183 8 training.label_smoothing 0.23519835553426727 +183 9 model.embedding_dim 0.0 +183 9 optimizer.lr 0.004562876207385518 +183 9 training.batch_size 1.0 +183 9 training.label_smoothing 0.8632312384000161 +183 10 model.embedding_dim 1.0 +183 10 optimizer.lr 0.004957521402633139 +183 10 training.batch_size 1.0 +183 10 training.label_smoothing 0.035578276057728696 +183 11 model.embedding_dim 2.0 +183 11 optimizer.lr 0.004737020270956105 +183 11 training.batch_size 1.0 +183 11 training.label_smoothing 0.004824521456869756 +183 12 model.embedding_dim 1.0 +183 12 optimizer.lr 0.09358597011758514 +183 12 training.batch_size 1.0 +183 12 training.label_smoothing 0.002819259591317242 +183 13 model.embedding_dim 1.0 +183 13 optimizer.lr 0.003401439808254039 +183 13 training.batch_size 2.0 +183 13 training.label_smoothing 0.002371373706278908 +183 14 model.embedding_dim 2.0 +183 14 optimizer.lr 0.017650656698838037 +183 14 training.batch_size 0.0 +183 14 training.label_smoothing 0.03550198732187293 +183 15 model.embedding_dim 0.0 +183 15 optimizer.lr 0.06037245832690224 +183 15 training.batch_size 2.0 +183 15 training.label_smoothing 0.8857401082403843 +183 16 model.embedding_dim 0.0 +183 16 optimizer.lr 0.027885576624587398 +183 16 training.batch_size 2.0 +183 16 training.label_smoothing 0.4562572057704997 +183 17 model.embedding_dim 2.0 +183 17 optimizer.lr 0.0010837876133516127 +183 17 training.batch_size 2.0 +183 17 training.label_smoothing 0.0018020361683899382 +183 18 model.embedding_dim 1.0 +183 18 optimizer.lr 0.00794858369630544 +183 18 training.batch_size 0.0 +183 18 training.label_smoothing 0.019146523260074294 +183 19 model.embedding_dim 0.0 +183 19 optimizer.lr 0.003015828574970914 +183 19 training.batch_size 2.0 +183 19 training.label_smoothing 0.0045302291941351405 +183 20 model.embedding_dim 1.0 +183 20 optimizer.lr 0.0036671298835846567 +183 20 training.batch_size 0.0 +183 20 training.label_smoothing 0.01407824121766133 +183 21 model.embedding_dim 0.0 +183 21 optimizer.lr 0.03496697549758274 +183 21 training.batch_size 2.0 +183 21 training.label_smoothing 0.026192242834826657 +183 22 model.embedding_dim 0.0 +183 22 optimizer.lr 0.023529091154162105 +183 22 training.batch_size 1.0 +183 22 training.label_smoothing 0.0025539173176604684 +183 23 model.embedding_dim 2.0 +183 23 optimizer.lr 0.004656171631858268 +183 23 training.batch_size 0.0 +183 23 training.label_smoothing 0.6346440932921397 +183 24 model.embedding_dim 1.0 +183 24 optimizer.lr 0.007733258085148081 +183 24 training.batch_size 1.0 +183 24 training.label_smoothing 0.03973788887248951 +183 25 model.embedding_dim 2.0 +183 25 optimizer.lr 0.003215922102170209 +183 25 training.batch_size 0.0 +183 25 training.label_smoothing 0.024148179168639955 +183 26 model.embedding_dim 1.0 +183 26 optimizer.lr 0.0010352219142553596 +183 26 training.batch_size 0.0 +183 26 training.label_smoothing 0.05858483045827947 +183 27 model.embedding_dim 2.0 +183 27 optimizer.lr 0.008357865840464494 +183 27 training.batch_size 1.0 +183 27 training.label_smoothing 0.12187585495968808 +183 28 model.embedding_dim 2.0 +183 28 optimizer.lr 0.024198465788520897 +183 28 training.batch_size 1.0 +183 28 training.label_smoothing 0.008010892031091392 +183 29 model.embedding_dim 2.0 +183 29 optimizer.lr 0.07927840453460773 +183 29 training.batch_size 2.0 +183 29 training.label_smoothing 0.08804301615821263 +183 30 model.embedding_dim 0.0 +183 30 optimizer.lr 0.014740884307089828 +183 30 training.batch_size 2.0 +183 30 training.label_smoothing 0.012868541800323848 +183 31 model.embedding_dim 0.0 +183 31 optimizer.lr 0.05537693406431651 +183 31 training.batch_size 0.0 +183 31 training.label_smoothing 0.8323355527178967 +183 32 model.embedding_dim 0.0 +183 32 optimizer.lr 0.007285998026938468 +183 32 training.batch_size 1.0 +183 32 training.label_smoothing 0.0027900962218269654 +183 33 model.embedding_dim 1.0 +183 33 optimizer.lr 0.03453726332371211 +183 33 training.batch_size 0.0 +183 33 training.label_smoothing 0.009095938834055536 +183 34 model.embedding_dim 2.0 +183 34 optimizer.lr 0.06088316254338527 +183 34 training.batch_size 0.0 +183 34 training.label_smoothing 0.3953277884309977 +183 35 model.embedding_dim 1.0 +183 35 optimizer.lr 0.0015998365163538066 +183 35 training.batch_size 0.0 +183 35 training.label_smoothing 0.012122590603996623 +183 36 model.embedding_dim 0.0 +183 36 optimizer.lr 0.0025562242061080665 +183 36 training.batch_size 2.0 +183 36 training.label_smoothing 0.02547440715961422 +183 37 model.embedding_dim 0.0 +183 37 optimizer.lr 0.021090768755529437 +183 37 training.batch_size 1.0 +183 37 training.label_smoothing 0.6620559109443798 +183 38 model.embedding_dim 2.0 +183 38 optimizer.lr 0.021630365661807927 +183 38 training.batch_size 2.0 +183 38 training.label_smoothing 0.15333931654458258 +183 39 model.embedding_dim 1.0 +183 39 optimizer.lr 0.0755508337156058 +183 39 training.batch_size 2.0 +183 39 training.label_smoothing 0.05105097663076611 +183 40 model.embedding_dim 1.0 +183 40 optimizer.lr 0.024708575760301135 +183 40 training.batch_size 0.0 +183 40 training.label_smoothing 0.982927242744667 +183 41 model.embedding_dim 1.0 +183 41 optimizer.lr 0.0026861389142259066 +183 41 training.batch_size 0.0 +183 41 training.label_smoothing 0.0023390779831949163 +183 42 model.embedding_dim 1.0 +183 42 optimizer.lr 0.07073018091480705 +183 42 training.batch_size 2.0 +183 42 training.label_smoothing 0.001394369764483666 +183 43 model.embedding_dim 0.0 +183 43 optimizer.lr 0.019566090177516697 +183 43 training.batch_size 2.0 +183 43 training.label_smoothing 0.0165136725411437 +183 44 model.embedding_dim 2.0 +183 44 optimizer.lr 0.007405920477678068 +183 44 training.batch_size 2.0 +183 44 training.label_smoothing 0.0027846074403263094 +183 45 model.embedding_dim 0.0 +183 45 optimizer.lr 0.06531296704535886 +183 45 training.batch_size 0.0 +183 45 training.label_smoothing 0.0012493430569759355 +183 46 model.embedding_dim 1.0 +183 46 optimizer.lr 0.0011947910822348385 +183 46 training.batch_size 0.0 +183 46 training.label_smoothing 0.2711942218321756 +183 47 model.embedding_dim 0.0 +183 47 optimizer.lr 0.0041787298745215124 +183 47 training.batch_size 1.0 +183 47 training.label_smoothing 0.021146930563821238 +183 48 model.embedding_dim 2.0 +183 48 optimizer.lr 0.08731157188023445 +183 48 training.batch_size 2.0 +183 48 training.label_smoothing 0.10863483098751585 +183 49 model.embedding_dim 0.0 +183 49 optimizer.lr 0.06838453182095248 +183 49 training.batch_size 2.0 +183 49 training.label_smoothing 0.0649209069968813 +183 50 model.embedding_dim 1.0 +183 50 optimizer.lr 0.049814493927343224 +183 50 training.batch_size 2.0 +183 50 training.label_smoothing 0.3098845357405011 +183 51 model.embedding_dim 2.0 +183 51 optimizer.lr 0.027610180991555454 +183 51 training.batch_size 1.0 +183 51 training.label_smoothing 0.029688583924116805 +183 52 model.embedding_dim 1.0 +183 52 optimizer.lr 0.0016779935324466558 +183 52 training.batch_size 0.0 +183 52 training.label_smoothing 0.0021875232696728416 +183 53 model.embedding_dim 1.0 +183 53 optimizer.lr 0.002473146676239616 +183 53 training.batch_size 2.0 +183 53 training.label_smoothing 0.0029236500853829397 +183 54 model.embedding_dim 2.0 +183 54 optimizer.lr 0.059606091970564205 +183 54 training.batch_size 2.0 +183 54 training.label_smoothing 0.0012429764477761922 +183 55 model.embedding_dim 0.0 +183 55 optimizer.lr 0.04412134550915741 +183 55 training.batch_size 1.0 +183 55 training.label_smoothing 0.7578816518615044 +183 56 model.embedding_dim 0.0 +183 56 optimizer.lr 0.010437770065546753 +183 56 training.batch_size 0.0 +183 56 training.label_smoothing 0.008437701023046312 +183 57 model.embedding_dim 2.0 +183 57 optimizer.lr 0.03925019409172769 +183 57 training.batch_size 1.0 +183 57 training.label_smoothing 0.02157872857729037 +183 58 model.embedding_dim 0.0 +183 58 optimizer.lr 0.0014725566480440343 +183 58 training.batch_size 0.0 +183 58 training.label_smoothing 0.11644001212878753 +183 59 model.embedding_dim 2.0 +183 59 optimizer.lr 0.0012300380824018977 +183 59 training.batch_size 2.0 +183 59 training.label_smoothing 0.0076685619022104365 +183 60 model.embedding_dim 0.0 +183 60 optimizer.lr 0.09335007662470697 +183 60 training.batch_size 0.0 +183 60 training.label_smoothing 0.08838498823053291 +183 61 model.embedding_dim 2.0 +183 61 optimizer.lr 0.0727730762243316 +183 61 training.batch_size 1.0 +183 61 training.label_smoothing 0.550694088208752 +183 62 model.embedding_dim 0.0 +183 62 optimizer.lr 0.07745514607000739 +183 62 training.batch_size 1.0 +183 62 training.label_smoothing 0.7408658879642539 +183 63 model.embedding_dim 2.0 +183 63 optimizer.lr 0.016982449549954693 +183 63 training.batch_size 0.0 +183 63 training.label_smoothing 0.005191304660243947 +183 64 model.embedding_dim 0.0 +183 64 optimizer.lr 0.005472228696644202 +183 64 training.batch_size 0.0 +183 64 training.label_smoothing 0.13614234510496315 +183 65 model.embedding_dim 2.0 +183 65 optimizer.lr 0.01860267455485944 +183 65 training.batch_size 1.0 +183 65 training.label_smoothing 0.5907395204794985 +183 66 model.embedding_dim 2.0 +183 66 optimizer.lr 0.001246612417093369 +183 66 training.batch_size 1.0 +183 66 training.label_smoothing 0.3875968083602481 +183 67 model.embedding_dim 2.0 +183 67 optimizer.lr 0.010769711208673446 +183 67 training.batch_size 2.0 +183 67 training.label_smoothing 0.016928197106465023 +183 68 model.embedding_dim 1.0 +183 68 optimizer.lr 0.0011558621636867082 +183 68 training.batch_size 2.0 +183 68 training.label_smoothing 0.001814261375375848 +183 69 model.embedding_dim 0.0 +183 69 optimizer.lr 0.0026071586422745903 +183 69 training.batch_size 1.0 +183 69 training.label_smoothing 0.07827508890460853 +183 70 model.embedding_dim 2.0 +183 70 optimizer.lr 0.0021951759610101713 +183 70 training.batch_size 0.0 +183 70 training.label_smoothing 0.0020156667086840122 +183 71 model.embedding_dim 2.0 +183 71 optimizer.lr 0.05874592667754884 +183 71 training.batch_size 0.0 +183 71 training.label_smoothing 0.0077114548829350284 +183 72 model.embedding_dim 2.0 +183 72 optimizer.lr 0.03328388754727284 +183 72 training.batch_size 1.0 +183 72 training.label_smoothing 0.009494708158297702 +183 73 model.embedding_dim 0.0 +183 73 optimizer.lr 0.021334090347970732 +183 73 training.batch_size 1.0 +183 73 training.label_smoothing 0.003397508775027692 +183 74 model.embedding_dim 1.0 +183 74 optimizer.lr 0.04616139644885882 +183 74 training.batch_size 0.0 +183 74 training.label_smoothing 0.044453118249561566 +183 75 model.embedding_dim 1.0 +183 75 optimizer.lr 0.0011543711912838444 +183 75 training.batch_size 1.0 +183 75 training.label_smoothing 0.007405713068445701 +183 76 model.embedding_dim 0.0 +183 76 optimizer.lr 0.06160149105340754 +183 76 training.batch_size 0.0 +183 76 training.label_smoothing 0.7284644472349511 +183 77 model.embedding_dim 0.0 +183 77 optimizer.lr 0.0021231777611848386 +183 77 training.batch_size 0.0 +183 77 training.label_smoothing 0.4272210513946713 +183 78 model.embedding_dim 0.0 +183 78 optimizer.lr 0.017349074371790937 +183 78 training.batch_size 0.0 +183 78 training.label_smoothing 0.08076811590897594 +183 79 model.embedding_dim 2.0 +183 79 optimizer.lr 0.036738875232825444 +183 79 training.batch_size 0.0 +183 79 training.label_smoothing 0.012189362669305984 +183 80 model.embedding_dim 0.0 +183 80 optimizer.lr 0.0015374141755387604 +183 80 training.batch_size 0.0 +183 80 training.label_smoothing 0.06015463660943334 +183 81 model.embedding_dim 2.0 +183 81 optimizer.lr 0.048190345338017 +183 81 training.batch_size 2.0 +183 81 training.label_smoothing 0.03927851899896698 +183 82 model.embedding_dim 1.0 +183 82 optimizer.lr 0.019036047009054386 +183 82 training.batch_size 0.0 +183 82 training.label_smoothing 0.029196940258153052 +183 83 model.embedding_dim 2.0 +183 83 optimizer.lr 0.0068194546338824534 +183 83 training.batch_size 2.0 +183 83 training.label_smoothing 0.0092825078945294 +183 84 model.embedding_dim 0.0 +183 84 optimizer.lr 0.0016612537333444565 +183 84 training.batch_size 0.0 +183 84 training.label_smoothing 0.46569563621187776 +183 85 model.embedding_dim 2.0 +183 85 optimizer.lr 0.030662925969557844 +183 85 training.batch_size 1.0 +183 85 training.label_smoothing 0.564092619890282 +183 86 model.embedding_dim 0.0 +183 86 optimizer.lr 0.001636681684775538 +183 86 training.batch_size 0.0 +183 86 training.label_smoothing 0.010536762443686166 +183 87 model.embedding_dim 2.0 +183 87 optimizer.lr 0.0017773242789869991 +183 87 training.batch_size 1.0 +183 87 training.label_smoothing 0.01983814605752435 +183 88 model.embedding_dim 2.0 +183 88 optimizer.lr 0.03853805949017467 +183 88 training.batch_size 2.0 +183 88 training.label_smoothing 0.002357601812503326 +183 89 model.embedding_dim 0.0 +183 89 optimizer.lr 0.01085969030139451 +183 89 training.batch_size 1.0 +183 89 training.label_smoothing 0.7446296811224923 +183 90 model.embedding_dim 2.0 +183 90 optimizer.lr 0.022477687548061964 +183 90 training.batch_size 0.0 +183 90 training.label_smoothing 0.09195594921713036 +183 91 model.embedding_dim 0.0 +183 91 optimizer.lr 0.0020701823115452825 +183 91 training.batch_size 2.0 +183 91 training.label_smoothing 0.004427560420945469 +183 92 model.embedding_dim 1.0 +183 92 optimizer.lr 0.021018149484391478 +183 92 training.batch_size 0.0 +183 92 training.label_smoothing 0.00979961688450522 +183 93 model.embedding_dim 1.0 +183 93 optimizer.lr 0.0714741875097723 +183 93 training.batch_size 1.0 +183 93 training.label_smoothing 0.9184061911405204 +183 94 model.embedding_dim 1.0 +183 94 optimizer.lr 0.00176478516550837 +183 94 training.batch_size 1.0 +183 94 training.label_smoothing 0.18236755208684138 +183 95 model.embedding_dim 0.0 +183 95 optimizer.lr 0.0053370071126200595 +183 95 training.batch_size 1.0 +183 95 training.label_smoothing 0.009020783677400317 +183 96 model.embedding_dim 2.0 +183 96 optimizer.lr 0.001101735445597078 +183 96 training.batch_size 1.0 +183 96 training.label_smoothing 0.06599106653869131 +183 97 model.embedding_dim 0.0 +183 97 optimizer.lr 0.014166198518119871 +183 97 training.batch_size 1.0 +183 97 training.label_smoothing 0.010598630415610592 +183 98 model.embedding_dim 1.0 +183 98 optimizer.lr 0.0020302943415115623 +183 98 training.batch_size 0.0 +183 98 training.label_smoothing 0.5013460902236289 +183 99 model.embedding_dim 2.0 +183 99 optimizer.lr 0.013417530599411951 +183 99 training.batch_size 0.0 +183 99 training.label_smoothing 0.33093676954856543 +183 100 model.embedding_dim 1.0 +183 100 optimizer.lr 0.03646393280920396 +183 100 training.batch_size 1.0 +183 100 training.label_smoothing 0.20409510393336205 +183 1 dataset """kinships""" +183 1 model """distmult""" +183 1 loss """softplus""" +183 1 regularizer """no""" +183 1 optimizer """adam""" +183 1 training_loop """lcwa""" +183 1 evaluator """rankbased""" +183 2 dataset """kinships""" +183 2 model """distmult""" +183 2 loss """softplus""" +183 2 regularizer """no""" +183 2 optimizer """adam""" +183 2 training_loop """lcwa""" +183 2 evaluator """rankbased""" +183 3 dataset """kinships""" +183 3 model """distmult""" +183 3 loss """softplus""" +183 3 regularizer """no""" +183 3 optimizer """adam""" +183 3 training_loop """lcwa""" +183 3 evaluator """rankbased""" +183 4 dataset """kinships""" +183 4 model """distmult""" +183 4 loss """softplus""" +183 4 regularizer """no""" +183 4 optimizer """adam""" +183 4 training_loop """lcwa""" +183 4 evaluator """rankbased""" +183 5 dataset """kinships""" +183 5 model """distmult""" +183 5 loss """softplus""" +183 5 regularizer """no""" +183 5 optimizer """adam""" +183 5 training_loop """lcwa""" +183 5 evaluator """rankbased""" +183 6 dataset """kinships""" +183 6 model """distmult""" +183 6 loss """softplus""" +183 6 regularizer """no""" +183 6 optimizer """adam""" +183 6 training_loop """lcwa""" +183 6 evaluator """rankbased""" +183 7 dataset """kinships""" +183 7 model """distmult""" +183 7 loss """softplus""" +183 7 regularizer """no""" +183 7 optimizer """adam""" +183 7 training_loop """lcwa""" +183 7 evaluator """rankbased""" +183 8 dataset """kinships""" +183 8 model """distmult""" +183 8 loss """softplus""" +183 8 regularizer """no""" +183 8 optimizer """adam""" +183 8 training_loop """lcwa""" +183 8 evaluator """rankbased""" +183 9 dataset """kinships""" +183 9 model """distmult""" +183 9 loss """softplus""" +183 9 regularizer """no""" +183 9 optimizer """adam""" +183 9 training_loop """lcwa""" +183 9 evaluator """rankbased""" +183 10 dataset """kinships""" +183 10 model """distmult""" +183 10 loss """softplus""" +183 10 regularizer """no""" +183 10 optimizer """adam""" +183 10 training_loop """lcwa""" +183 10 evaluator """rankbased""" +183 11 dataset """kinships""" +183 11 model """distmult""" +183 11 loss """softplus""" +183 11 regularizer """no""" +183 11 optimizer """adam""" +183 11 training_loop """lcwa""" +183 11 evaluator """rankbased""" +183 12 dataset """kinships""" +183 12 model """distmult""" +183 12 loss """softplus""" +183 12 regularizer """no""" +183 12 optimizer """adam""" +183 12 training_loop """lcwa""" +183 12 evaluator """rankbased""" +183 13 dataset """kinships""" +183 13 model """distmult""" +183 13 loss """softplus""" +183 13 regularizer """no""" +183 13 optimizer """adam""" +183 13 training_loop """lcwa""" +183 13 evaluator """rankbased""" +183 14 dataset """kinships""" +183 14 model """distmult""" +183 14 loss """softplus""" +183 14 regularizer """no""" +183 14 optimizer """adam""" +183 14 training_loop """lcwa""" +183 14 evaluator """rankbased""" +183 15 dataset """kinships""" +183 15 model """distmult""" +183 15 loss """softplus""" +183 15 regularizer """no""" +183 15 optimizer """adam""" +183 15 training_loop """lcwa""" +183 15 evaluator """rankbased""" +183 16 dataset """kinships""" +183 16 model """distmult""" +183 16 loss """softplus""" +183 16 regularizer """no""" +183 16 optimizer """adam""" +183 16 training_loop """lcwa""" +183 16 evaluator """rankbased""" +183 17 dataset """kinships""" +183 17 model """distmult""" +183 17 loss """softplus""" +183 17 regularizer """no""" +183 17 optimizer """adam""" +183 17 training_loop """lcwa""" +183 17 evaluator """rankbased""" +183 18 dataset """kinships""" +183 18 model """distmult""" +183 18 loss """softplus""" +183 18 regularizer """no""" +183 18 optimizer """adam""" +183 18 training_loop """lcwa""" +183 18 evaluator """rankbased""" +183 19 dataset """kinships""" +183 19 model """distmult""" +183 19 loss """softplus""" +183 19 regularizer """no""" +183 19 optimizer """adam""" +183 19 training_loop """lcwa""" +183 19 evaluator """rankbased""" +183 20 dataset """kinships""" +183 20 model """distmult""" +183 20 loss """softplus""" +183 20 regularizer """no""" +183 20 optimizer """adam""" +183 20 training_loop """lcwa""" +183 20 evaluator """rankbased""" +183 21 dataset """kinships""" +183 21 model """distmult""" +183 21 loss """softplus""" +183 21 regularizer """no""" +183 21 optimizer """adam""" +183 21 training_loop """lcwa""" +183 21 evaluator """rankbased""" +183 22 dataset """kinships""" +183 22 model """distmult""" +183 22 loss """softplus""" +183 22 regularizer """no""" +183 22 optimizer """adam""" +183 22 training_loop """lcwa""" +183 22 evaluator """rankbased""" +183 23 dataset """kinships""" +183 23 model """distmult""" +183 23 loss """softplus""" +183 23 regularizer """no""" +183 23 optimizer """adam""" +183 23 training_loop """lcwa""" +183 23 evaluator """rankbased""" +183 24 dataset """kinships""" +183 24 model """distmult""" +183 24 loss """softplus""" +183 24 regularizer """no""" +183 24 optimizer """adam""" +183 24 training_loop """lcwa""" +183 24 evaluator """rankbased""" +183 25 dataset """kinships""" +183 25 model """distmult""" +183 25 loss """softplus""" +183 25 regularizer """no""" +183 25 optimizer """adam""" +183 25 training_loop """lcwa""" +183 25 evaluator """rankbased""" +183 26 dataset """kinships""" +183 26 model """distmult""" +183 26 loss """softplus""" +183 26 regularizer """no""" +183 26 optimizer """adam""" +183 26 training_loop """lcwa""" +183 26 evaluator """rankbased""" +183 27 dataset """kinships""" +183 27 model """distmult""" +183 27 loss """softplus""" +183 27 regularizer """no""" +183 27 optimizer """adam""" +183 27 training_loop """lcwa""" +183 27 evaluator """rankbased""" +183 28 dataset """kinships""" +183 28 model """distmult""" +183 28 loss """softplus""" +183 28 regularizer """no""" +183 28 optimizer """adam""" +183 28 training_loop """lcwa""" +183 28 evaluator """rankbased""" +183 29 dataset """kinships""" +183 29 model """distmult""" +183 29 loss """softplus""" +183 29 regularizer """no""" +183 29 optimizer """adam""" +183 29 training_loop """lcwa""" +183 29 evaluator """rankbased""" +183 30 dataset """kinships""" +183 30 model """distmult""" +183 30 loss """softplus""" +183 30 regularizer """no""" +183 30 optimizer """adam""" +183 30 training_loop """lcwa""" +183 30 evaluator """rankbased""" +183 31 dataset """kinships""" +183 31 model """distmult""" +183 31 loss """softplus""" +183 31 regularizer """no""" +183 31 optimizer """adam""" +183 31 training_loop """lcwa""" +183 31 evaluator """rankbased""" +183 32 dataset """kinships""" +183 32 model """distmult""" +183 32 loss """softplus""" +183 32 regularizer """no""" +183 32 optimizer """adam""" +183 32 training_loop """lcwa""" +183 32 evaluator """rankbased""" +183 33 dataset """kinships""" +183 33 model """distmult""" +183 33 loss """softplus""" +183 33 regularizer """no""" +183 33 optimizer """adam""" +183 33 training_loop """lcwa""" +183 33 evaluator """rankbased""" +183 34 dataset """kinships""" +183 34 model """distmult""" +183 34 loss """softplus""" +183 34 regularizer """no""" +183 34 optimizer """adam""" +183 34 training_loop """lcwa""" +183 34 evaluator """rankbased""" +183 35 dataset """kinships""" +183 35 model """distmult""" +183 35 loss """softplus""" +183 35 regularizer """no""" +183 35 optimizer """adam""" +183 35 training_loop """lcwa""" +183 35 evaluator """rankbased""" +183 36 dataset """kinships""" +183 36 model """distmult""" +183 36 loss """softplus""" +183 36 regularizer """no""" +183 36 optimizer """adam""" +183 36 training_loop """lcwa""" +183 36 evaluator """rankbased""" +183 37 dataset """kinships""" +183 37 model """distmult""" +183 37 loss """softplus""" +183 37 regularizer """no""" +183 37 optimizer """adam""" +183 37 training_loop """lcwa""" +183 37 evaluator """rankbased""" +183 38 dataset """kinships""" +183 38 model """distmult""" +183 38 loss """softplus""" +183 38 regularizer """no""" +183 38 optimizer """adam""" +183 38 training_loop """lcwa""" +183 38 evaluator """rankbased""" +183 39 dataset """kinships""" +183 39 model """distmult""" +183 39 loss """softplus""" +183 39 regularizer """no""" +183 39 optimizer """adam""" +183 39 training_loop """lcwa""" +183 39 evaluator """rankbased""" +183 40 dataset """kinships""" +183 40 model """distmult""" +183 40 loss """softplus""" +183 40 regularizer """no""" +183 40 optimizer """adam""" +183 40 training_loop """lcwa""" +183 40 evaluator """rankbased""" +183 41 dataset """kinships""" +183 41 model """distmult""" +183 41 loss """softplus""" +183 41 regularizer """no""" +183 41 optimizer """adam""" +183 41 training_loop """lcwa""" +183 41 evaluator """rankbased""" +183 42 dataset """kinships""" +183 42 model """distmult""" +183 42 loss """softplus""" +183 42 regularizer """no""" +183 42 optimizer """adam""" +183 42 training_loop """lcwa""" +183 42 evaluator """rankbased""" +183 43 dataset """kinships""" +183 43 model """distmult""" +183 43 loss """softplus""" +183 43 regularizer """no""" +183 43 optimizer """adam""" +183 43 training_loop """lcwa""" +183 43 evaluator """rankbased""" +183 44 dataset """kinships""" +183 44 model """distmult""" +183 44 loss """softplus""" +183 44 regularizer """no""" +183 44 optimizer """adam""" +183 44 training_loop """lcwa""" +183 44 evaluator """rankbased""" +183 45 dataset """kinships""" +183 45 model """distmult""" +183 45 loss """softplus""" +183 45 regularizer """no""" +183 45 optimizer """adam""" +183 45 training_loop """lcwa""" +183 45 evaluator """rankbased""" +183 46 dataset """kinships""" +183 46 model """distmult""" +183 46 loss """softplus""" +183 46 regularizer """no""" +183 46 optimizer """adam""" +183 46 training_loop """lcwa""" +183 46 evaluator """rankbased""" +183 47 dataset """kinships""" +183 47 model """distmult""" +183 47 loss """softplus""" +183 47 regularizer """no""" +183 47 optimizer """adam""" +183 47 training_loop """lcwa""" +183 47 evaluator """rankbased""" +183 48 dataset """kinships""" +183 48 model """distmult""" +183 48 loss """softplus""" +183 48 regularizer """no""" +183 48 optimizer """adam""" +183 48 training_loop """lcwa""" +183 48 evaluator """rankbased""" +183 49 dataset """kinships""" +183 49 model """distmult""" +183 49 loss """softplus""" +183 49 regularizer """no""" +183 49 optimizer """adam""" +183 49 training_loop """lcwa""" +183 49 evaluator """rankbased""" +183 50 dataset """kinships""" +183 50 model """distmult""" +183 50 loss """softplus""" +183 50 regularizer """no""" +183 50 optimizer """adam""" +183 50 training_loop """lcwa""" +183 50 evaluator """rankbased""" +183 51 dataset """kinships""" +183 51 model """distmult""" +183 51 loss """softplus""" +183 51 regularizer """no""" +183 51 optimizer """adam""" +183 51 training_loop """lcwa""" +183 51 evaluator """rankbased""" +183 52 dataset """kinships""" +183 52 model """distmult""" +183 52 loss """softplus""" +183 52 regularizer """no""" +183 52 optimizer """adam""" +183 52 training_loop """lcwa""" +183 52 evaluator """rankbased""" +183 53 dataset """kinships""" +183 53 model """distmult""" +183 53 loss """softplus""" +183 53 regularizer """no""" +183 53 optimizer """adam""" +183 53 training_loop """lcwa""" +183 53 evaluator """rankbased""" +183 54 dataset """kinships""" +183 54 model """distmult""" +183 54 loss """softplus""" +183 54 regularizer """no""" +183 54 optimizer """adam""" +183 54 training_loop """lcwa""" +183 54 evaluator """rankbased""" +183 55 dataset """kinships""" +183 55 model """distmult""" +183 55 loss """softplus""" +183 55 regularizer """no""" +183 55 optimizer """adam""" +183 55 training_loop """lcwa""" +183 55 evaluator """rankbased""" +183 56 dataset """kinships""" +183 56 model """distmult""" +183 56 loss """softplus""" +183 56 regularizer """no""" +183 56 optimizer """adam""" +183 56 training_loop """lcwa""" +183 56 evaluator """rankbased""" +183 57 dataset """kinships""" +183 57 model """distmult""" +183 57 loss """softplus""" +183 57 regularizer """no""" +183 57 optimizer """adam""" +183 57 training_loop """lcwa""" +183 57 evaluator """rankbased""" +183 58 dataset """kinships""" +183 58 model """distmult""" +183 58 loss """softplus""" +183 58 regularizer """no""" +183 58 optimizer """adam""" +183 58 training_loop """lcwa""" +183 58 evaluator """rankbased""" +183 59 dataset """kinships""" +183 59 model """distmult""" +183 59 loss """softplus""" +183 59 regularizer """no""" +183 59 optimizer """adam""" +183 59 training_loop """lcwa""" +183 59 evaluator """rankbased""" +183 60 dataset """kinships""" +183 60 model """distmult""" +183 60 loss """softplus""" +183 60 regularizer """no""" +183 60 optimizer """adam""" +183 60 training_loop """lcwa""" +183 60 evaluator """rankbased""" +183 61 dataset """kinships""" +183 61 model """distmult""" +183 61 loss """softplus""" +183 61 regularizer """no""" +183 61 optimizer """adam""" +183 61 training_loop """lcwa""" +183 61 evaluator """rankbased""" +183 62 dataset """kinships""" +183 62 model """distmult""" +183 62 loss """softplus""" +183 62 regularizer """no""" +183 62 optimizer """adam""" +183 62 training_loop """lcwa""" +183 62 evaluator """rankbased""" +183 63 dataset """kinships""" +183 63 model """distmult""" +183 63 loss """softplus""" +183 63 regularizer """no""" +183 63 optimizer """adam""" +183 63 training_loop """lcwa""" +183 63 evaluator """rankbased""" +183 64 dataset """kinships""" +183 64 model """distmult""" +183 64 loss """softplus""" +183 64 regularizer """no""" +183 64 optimizer """adam""" +183 64 training_loop """lcwa""" +183 64 evaluator """rankbased""" +183 65 dataset """kinships""" +183 65 model """distmult""" +183 65 loss """softplus""" +183 65 regularizer """no""" +183 65 optimizer """adam""" +183 65 training_loop """lcwa""" +183 65 evaluator """rankbased""" +183 66 dataset """kinships""" +183 66 model """distmult""" +183 66 loss """softplus""" +183 66 regularizer """no""" +183 66 optimizer """adam""" +183 66 training_loop """lcwa""" +183 66 evaluator """rankbased""" +183 67 dataset """kinships""" +183 67 model """distmult""" +183 67 loss """softplus""" +183 67 regularizer """no""" +183 67 optimizer """adam""" +183 67 training_loop """lcwa""" +183 67 evaluator """rankbased""" +183 68 dataset """kinships""" +183 68 model """distmult""" +183 68 loss """softplus""" +183 68 regularizer """no""" +183 68 optimizer """adam""" +183 68 training_loop """lcwa""" +183 68 evaluator """rankbased""" +183 69 dataset """kinships""" +183 69 model """distmult""" +183 69 loss """softplus""" +183 69 regularizer """no""" +183 69 optimizer """adam""" +183 69 training_loop """lcwa""" +183 69 evaluator """rankbased""" +183 70 dataset """kinships""" +183 70 model """distmult""" +183 70 loss """softplus""" +183 70 regularizer """no""" +183 70 optimizer """adam""" +183 70 training_loop """lcwa""" +183 70 evaluator """rankbased""" +183 71 dataset """kinships""" +183 71 model """distmult""" +183 71 loss """softplus""" +183 71 regularizer """no""" +183 71 optimizer """adam""" +183 71 training_loop """lcwa""" +183 71 evaluator """rankbased""" +183 72 dataset """kinships""" +183 72 model """distmult""" +183 72 loss """softplus""" +183 72 regularizer """no""" +183 72 optimizer """adam""" +183 72 training_loop """lcwa""" +183 72 evaluator """rankbased""" +183 73 dataset """kinships""" +183 73 model """distmult""" +183 73 loss """softplus""" +183 73 regularizer """no""" +183 73 optimizer """adam""" +183 73 training_loop """lcwa""" +183 73 evaluator """rankbased""" +183 74 dataset """kinships""" +183 74 model """distmult""" +183 74 loss """softplus""" +183 74 regularizer """no""" +183 74 optimizer """adam""" +183 74 training_loop """lcwa""" +183 74 evaluator """rankbased""" +183 75 dataset """kinships""" +183 75 model """distmult""" +183 75 loss """softplus""" +183 75 regularizer """no""" +183 75 optimizer """adam""" +183 75 training_loop """lcwa""" +183 75 evaluator """rankbased""" +183 76 dataset """kinships""" +183 76 model """distmult""" +183 76 loss """softplus""" +183 76 regularizer """no""" +183 76 optimizer """adam""" +183 76 training_loop """lcwa""" +183 76 evaluator """rankbased""" +183 77 dataset """kinships""" +183 77 model """distmult""" +183 77 loss """softplus""" +183 77 regularizer """no""" +183 77 optimizer """adam""" +183 77 training_loop """lcwa""" +183 77 evaluator """rankbased""" +183 78 dataset """kinships""" +183 78 model """distmult""" +183 78 loss """softplus""" +183 78 regularizer """no""" +183 78 optimizer """adam""" +183 78 training_loop """lcwa""" +183 78 evaluator """rankbased""" +183 79 dataset """kinships""" +183 79 model """distmult""" +183 79 loss """softplus""" +183 79 regularizer """no""" +183 79 optimizer """adam""" +183 79 training_loop """lcwa""" +183 79 evaluator """rankbased""" +183 80 dataset """kinships""" +183 80 model """distmult""" +183 80 loss """softplus""" +183 80 regularizer """no""" +183 80 optimizer """adam""" +183 80 training_loop """lcwa""" +183 80 evaluator """rankbased""" +183 81 dataset """kinships""" +183 81 model """distmult""" +183 81 loss """softplus""" +183 81 regularizer """no""" +183 81 optimizer """adam""" +183 81 training_loop """lcwa""" +183 81 evaluator """rankbased""" +183 82 dataset """kinships""" +183 82 model """distmult""" +183 82 loss """softplus""" +183 82 regularizer """no""" +183 82 optimizer """adam""" +183 82 training_loop """lcwa""" +183 82 evaluator """rankbased""" +183 83 dataset """kinships""" +183 83 model """distmult""" +183 83 loss """softplus""" +183 83 regularizer """no""" +183 83 optimizer """adam""" +183 83 training_loop """lcwa""" +183 83 evaluator """rankbased""" +183 84 dataset """kinships""" +183 84 model """distmult""" +183 84 loss """softplus""" +183 84 regularizer """no""" +183 84 optimizer """adam""" +183 84 training_loop """lcwa""" +183 84 evaluator """rankbased""" +183 85 dataset """kinships""" +183 85 model """distmult""" +183 85 loss """softplus""" +183 85 regularizer """no""" +183 85 optimizer """adam""" +183 85 training_loop """lcwa""" +183 85 evaluator """rankbased""" +183 86 dataset """kinships""" +183 86 model """distmult""" +183 86 loss """softplus""" +183 86 regularizer """no""" +183 86 optimizer """adam""" +183 86 training_loop """lcwa""" +183 86 evaluator """rankbased""" +183 87 dataset """kinships""" +183 87 model """distmult""" +183 87 loss """softplus""" +183 87 regularizer """no""" +183 87 optimizer """adam""" +183 87 training_loop """lcwa""" +183 87 evaluator """rankbased""" +183 88 dataset """kinships""" +183 88 model """distmult""" +183 88 loss """softplus""" +183 88 regularizer """no""" +183 88 optimizer """adam""" +183 88 training_loop """lcwa""" +183 88 evaluator """rankbased""" +183 89 dataset """kinships""" +183 89 model """distmult""" +183 89 loss """softplus""" +183 89 regularizer """no""" +183 89 optimizer """adam""" +183 89 training_loop """lcwa""" +183 89 evaluator """rankbased""" +183 90 dataset """kinships""" +183 90 model """distmult""" +183 90 loss """softplus""" +183 90 regularizer """no""" +183 90 optimizer """adam""" +183 90 training_loop """lcwa""" +183 90 evaluator """rankbased""" +183 91 dataset """kinships""" +183 91 model """distmult""" +183 91 loss """softplus""" +183 91 regularizer """no""" +183 91 optimizer """adam""" +183 91 training_loop """lcwa""" +183 91 evaluator """rankbased""" +183 92 dataset """kinships""" +183 92 model """distmult""" +183 92 loss """softplus""" +183 92 regularizer """no""" +183 92 optimizer """adam""" +183 92 training_loop """lcwa""" +183 92 evaluator """rankbased""" +183 93 dataset """kinships""" +183 93 model """distmult""" +183 93 loss """softplus""" +183 93 regularizer """no""" +183 93 optimizer """adam""" +183 93 training_loop """lcwa""" +183 93 evaluator """rankbased""" +183 94 dataset """kinships""" +183 94 model """distmult""" +183 94 loss """softplus""" +183 94 regularizer """no""" +183 94 optimizer """adam""" +183 94 training_loop """lcwa""" +183 94 evaluator """rankbased""" +183 95 dataset """kinships""" +183 95 model """distmult""" +183 95 loss """softplus""" +183 95 regularizer """no""" +183 95 optimizer """adam""" +183 95 training_loop """lcwa""" +183 95 evaluator """rankbased""" +183 96 dataset """kinships""" +183 96 model """distmult""" +183 96 loss """softplus""" +183 96 regularizer """no""" +183 96 optimizer """adam""" +183 96 training_loop """lcwa""" +183 96 evaluator """rankbased""" +183 97 dataset """kinships""" +183 97 model """distmult""" +183 97 loss """softplus""" +183 97 regularizer """no""" +183 97 optimizer """adam""" +183 97 training_loop """lcwa""" +183 97 evaluator """rankbased""" +183 98 dataset """kinships""" +183 98 model """distmult""" +183 98 loss """softplus""" +183 98 regularizer """no""" +183 98 optimizer """adam""" +183 98 training_loop """lcwa""" +183 98 evaluator """rankbased""" +183 99 dataset """kinships""" +183 99 model """distmult""" +183 99 loss """softplus""" +183 99 regularizer """no""" +183 99 optimizer """adam""" +183 99 training_loop """lcwa""" +183 99 evaluator """rankbased""" +183 100 dataset """kinships""" +183 100 model """distmult""" +183 100 loss """softplus""" +183 100 regularizer """no""" +183 100 optimizer """adam""" +183 100 training_loop """lcwa""" +183 100 evaluator """rankbased""" +184 1 model.embedding_dim 0.0 +184 1 optimizer.lr 0.049206281273610125 +184 1 training.batch_size 2.0 +184 1 training.label_smoothing 0.0014415064399249597 +184 2 model.embedding_dim 2.0 +184 2 optimizer.lr 0.05951439814324635 +184 2 training.batch_size 0.0 +184 2 training.label_smoothing 0.04786348922361496 +184 3 model.embedding_dim 1.0 +184 3 optimizer.lr 0.0016571115003885708 +184 3 training.batch_size 0.0 +184 3 training.label_smoothing 0.012561425707667646 +184 4 model.embedding_dim 1.0 +184 4 optimizer.lr 0.01823689284066471 +184 4 training.batch_size 1.0 +184 4 training.label_smoothing 0.042277881996296764 +184 5 model.embedding_dim 2.0 +184 5 optimizer.lr 0.0030084630552588408 +184 5 training.batch_size 2.0 +184 5 training.label_smoothing 0.0018886790619321537 +184 6 model.embedding_dim 2.0 +184 6 optimizer.lr 0.0013827069351671616 +184 6 training.batch_size 0.0 +184 6 training.label_smoothing 0.0484806927297524 +184 7 model.embedding_dim 2.0 +184 7 optimizer.lr 0.025703764039693284 +184 7 training.batch_size 2.0 +184 7 training.label_smoothing 0.004855931316153647 +184 8 model.embedding_dim 2.0 +184 8 optimizer.lr 0.06333447999197905 +184 8 training.batch_size 2.0 +184 8 training.label_smoothing 0.31921766164133375 +184 9 model.embedding_dim 2.0 +184 9 optimizer.lr 0.0051025147645010534 +184 9 training.batch_size 0.0 +184 9 training.label_smoothing 0.016101771165827367 +184 10 model.embedding_dim 0.0 +184 10 optimizer.lr 0.03573437168369832 +184 10 training.batch_size 1.0 +184 10 training.label_smoothing 0.021310613245742613 +184 11 model.embedding_dim 1.0 +184 11 optimizer.lr 0.08488746231931844 +184 11 training.batch_size 2.0 +184 11 training.label_smoothing 0.4368896347982458 +184 12 model.embedding_dim 2.0 +184 12 optimizer.lr 0.02880840858673429 +184 12 training.batch_size 1.0 +184 12 training.label_smoothing 0.006494223769800468 +184 13 model.embedding_dim 2.0 +184 13 optimizer.lr 0.001555439285757504 +184 13 training.batch_size 1.0 +184 13 training.label_smoothing 0.6491226608025134 +184 14 model.embedding_dim 1.0 +184 14 optimizer.lr 0.006752192144671265 +184 14 training.batch_size 1.0 +184 14 training.label_smoothing 0.0405308800201066 +184 15 model.embedding_dim 1.0 +184 15 optimizer.lr 0.01971890016619368 +184 15 training.batch_size 1.0 +184 15 training.label_smoothing 0.050377642347012266 +184 16 model.embedding_dim 2.0 +184 16 optimizer.lr 0.02024470488971254 +184 16 training.batch_size 0.0 +184 16 training.label_smoothing 0.2102678360778324 +184 17 model.embedding_dim 1.0 +184 17 optimizer.lr 0.003156493537605404 +184 17 training.batch_size 1.0 +184 17 training.label_smoothing 0.06451275860750123 +184 18 model.embedding_dim 1.0 +184 18 optimizer.lr 0.001780543862234945 +184 18 training.batch_size 0.0 +184 18 training.label_smoothing 0.003638829034754265 +184 19 model.embedding_dim 0.0 +184 19 optimizer.lr 0.017305685446881793 +184 19 training.batch_size 2.0 +184 19 training.label_smoothing 0.013521190270549316 +184 20 model.embedding_dim 0.0 +184 20 optimizer.lr 0.010375412232155302 +184 20 training.batch_size 2.0 +184 20 training.label_smoothing 0.4329336666214557 +184 21 model.embedding_dim 1.0 +184 21 optimizer.lr 0.04541914131222946 +184 21 training.batch_size 2.0 +184 21 training.label_smoothing 0.01990346100925807 +184 22 model.embedding_dim 1.0 +184 22 optimizer.lr 0.006148069757250181 +184 22 training.batch_size 2.0 +184 22 training.label_smoothing 0.06187005395206001 +184 23 model.embedding_dim 0.0 +184 23 optimizer.lr 0.08523702745045299 +184 23 training.batch_size 0.0 +184 23 training.label_smoothing 0.254287395166315 +184 24 model.embedding_dim 1.0 +184 24 optimizer.lr 0.003912207695466259 +184 24 training.batch_size 0.0 +184 24 training.label_smoothing 0.17213009175882418 +184 25 model.embedding_dim 0.0 +184 25 optimizer.lr 0.0021266624723644233 +184 25 training.batch_size 2.0 +184 25 training.label_smoothing 0.18920625301566682 +184 26 model.embedding_dim 2.0 +184 26 optimizer.lr 0.044808510830769366 +184 26 training.batch_size 0.0 +184 26 training.label_smoothing 0.10775644723201858 +184 27 model.embedding_dim 2.0 +184 27 optimizer.lr 0.04506352711915533 +184 27 training.batch_size 0.0 +184 27 training.label_smoothing 0.6179346985468832 +184 28 model.embedding_dim 2.0 +184 28 optimizer.lr 0.062298081353710615 +184 28 training.batch_size 2.0 +184 28 training.label_smoothing 0.1449523175107328 +184 29 model.embedding_dim 2.0 +184 29 optimizer.lr 0.0022982537054011933 +184 29 training.batch_size 1.0 +184 29 training.label_smoothing 0.002216731575766418 +184 30 model.embedding_dim 0.0 +184 30 optimizer.lr 0.0014167963353790922 +184 30 training.batch_size 2.0 +184 30 training.label_smoothing 0.0041832564660189255 +184 31 model.embedding_dim 2.0 +184 31 optimizer.lr 0.06055382012526064 +184 31 training.batch_size 2.0 +184 31 training.label_smoothing 0.007557954100489644 +184 32 model.embedding_dim 0.0 +184 32 optimizer.lr 0.03409465776048784 +184 32 training.batch_size 1.0 +184 32 training.label_smoothing 0.0032711278670556876 +184 33 model.embedding_dim 1.0 +184 33 optimizer.lr 0.005566805832602464 +184 33 training.batch_size 1.0 +184 33 training.label_smoothing 0.7127855945608987 +184 34 model.embedding_dim 2.0 +184 34 optimizer.lr 0.0010405803436543697 +184 34 training.batch_size 0.0 +184 34 training.label_smoothing 0.003831180816005403 +184 35 model.embedding_dim 2.0 +184 35 optimizer.lr 0.0036139971226077094 +184 35 training.batch_size 0.0 +184 35 training.label_smoothing 0.0013094934885256402 +184 36 model.embedding_dim 1.0 +184 36 optimizer.lr 0.0018867606768601646 +184 36 training.batch_size 2.0 +184 36 training.label_smoothing 0.0021277096165190105 +184 37 model.embedding_dim 2.0 +184 37 optimizer.lr 0.03573045337997504 +184 37 training.batch_size 2.0 +184 37 training.label_smoothing 0.013037480302234674 +184 38 model.embedding_dim 0.0 +184 38 optimizer.lr 0.06253018790761973 +184 38 training.batch_size 2.0 +184 38 training.label_smoothing 0.3419941021511906 +184 39 model.embedding_dim 0.0 +184 39 optimizer.lr 0.002244264581017308 +184 39 training.batch_size 0.0 +184 39 training.label_smoothing 0.06375950248043225 +184 40 model.embedding_dim 1.0 +184 40 optimizer.lr 0.03452128882640833 +184 40 training.batch_size 2.0 +184 40 training.label_smoothing 0.47340764148118547 +184 41 model.embedding_dim 1.0 +184 41 optimizer.lr 0.010931471017834617 +184 41 training.batch_size 1.0 +184 41 training.label_smoothing 0.0010629418382709093 +184 42 model.embedding_dim 1.0 +184 42 optimizer.lr 0.001731442330421034 +184 42 training.batch_size 1.0 +184 42 training.label_smoothing 0.013725990453060763 +184 43 model.embedding_dim 0.0 +184 43 optimizer.lr 0.04133067571527166 +184 43 training.batch_size 1.0 +184 43 training.label_smoothing 0.32792398918777177 +184 44 model.embedding_dim 0.0 +184 44 optimizer.lr 0.006465288334436098 +184 44 training.batch_size 2.0 +184 44 training.label_smoothing 0.07122147787344496 +184 45 model.embedding_dim 0.0 +184 45 optimizer.lr 0.006217118324462185 +184 45 training.batch_size 1.0 +184 45 training.label_smoothing 0.050045924380088105 +184 46 model.embedding_dim 1.0 +184 46 optimizer.lr 0.07467047256886096 +184 46 training.batch_size 0.0 +184 46 training.label_smoothing 0.012726368674764826 +184 47 model.embedding_dim 2.0 +184 47 optimizer.lr 0.006463716666350604 +184 47 training.batch_size 2.0 +184 47 training.label_smoothing 0.006491486485469786 +184 48 model.embedding_dim 2.0 +184 48 optimizer.lr 0.0015184488798080354 +184 48 training.batch_size 2.0 +184 48 training.label_smoothing 0.0027377416627753152 +184 49 model.embedding_dim 2.0 +184 49 optimizer.lr 0.06536502289749668 +184 49 training.batch_size 1.0 +184 49 training.label_smoothing 0.015997043834767362 +184 50 model.embedding_dim 0.0 +184 50 optimizer.lr 0.0014229248518350395 +184 50 training.batch_size 0.0 +184 50 training.label_smoothing 0.7942299242398076 +184 51 model.embedding_dim 2.0 +184 51 optimizer.lr 0.009228856741957668 +184 51 training.batch_size 1.0 +184 51 training.label_smoothing 0.17073964565476435 +184 52 model.embedding_dim 1.0 +184 52 optimizer.lr 0.007373010067537899 +184 52 training.batch_size 0.0 +184 52 training.label_smoothing 0.00880855012822318 +184 53 model.embedding_dim 0.0 +184 53 optimizer.lr 0.0023258150681550534 +184 53 training.batch_size 1.0 +184 53 training.label_smoothing 0.6085802974839579 +184 54 model.embedding_dim 0.0 +184 54 optimizer.lr 0.012486902338787076 +184 54 training.batch_size 2.0 +184 54 training.label_smoothing 0.20881516139206446 +184 55 model.embedding_dim 1.0 +184 55 optimizer.lr 0.006030927711891794 +184 55 training.batch_size 2.0 +184 55 training.label_smoothing 0.002672180724302262 +184 56 model.embedding_dim 0.0 +184 56 optimizer.lr 0.007624937207333377 +184 56 training.batch_size 0.0 +184 56 training.label_smoothing 0.0013846560801285819 +184 57 model.embedding_dim 0.0 +184 57 optimizer.lr 0.08728638103327115 +184 57 training.batch_size 1.0 +184 57 training.label_smoothing 0.030884908235008075 +184 58 model.embedding_dim 0.0 +184 58 optimizer.lr 0.02114040730913692 +184 58 training.batch_size 1.0 +184 58 training.label_smoothing 0.006131235626717883 +184 59 model.embedding_dim 1.0 +184 59 optimizer.lr 0.002434734599505042 +184 59 training.batch_size 1.0 +184 59 training.label_smoothing 0.0010612464439163857 +184 60 model.embedding_dim 1.0 +184 60 optimizer.lr 0.005831806244210961 +184 60 training.batch_size 0.0 +184 60 training.label_smoothing 0.029494028387346046 +184 61 model.embedding_dim 2.0 +184 61 optimizer.lr 0.03765151245927517 +184 61 training.batch_size 0.0 +184 61 training.label_smoothing 0.23481118521759628 +184 62 model.embedding_dim 0.0 +184 62 optimizer.lr 0.03254885131234092 +184 62 training.batch_size 0.0 +184 62 training.label_smoothing 0.008005178236886987 +184 63 model.embedding_dim 0.0 +184 63 optimizer.lr 0.0010207506769365825 +184 63 training.batch_size 0.0 +184 63 training.label_smoothing 0.23006096161151432 +184 64 model.embedding_dim 0.0 +184 64 optimizer.lr 0.006141939450580968 +184 64 training.batch_size 2.0 +184 64 training.label_smoothing 0.4306002521983172 +184 65 model.embedding_dim 1.0 +184 65 optimizer.lr 0.02931732501810958 +184 65 training.batch_size 2.0 +184 65 training.label_smoothing 0.12071730512078283 +184 66 model.embedding_dim 1.0 +184 66 optimizer.lr 0.04350950808735972 +184 66 training.batch_size 0.0 +184 66 training.label_smoothing 0.00558251596141458 +184 67 model.embedding_dim 2.0 +184 67 optimizer.lr 0.002923160895335742 +184 67 training.batch_size 1.0 +184 67 training.label_smoothing 0.007379908074473858 +184 68 model.embedding_dim 0.0 +184 68 optimizer.lr 0.010999254145832398 +184 68 training.batch_size 2.0 +184 68 training.label_smoothing 0.0013646405724950254 +184 69 model.embedding_dim 2.0 +184 69 optimizer.lr 0.06360629897939842 +184 69 training.batch_size 0.0 +184 69 training.label_smoothing 0.16403435476523917 +184 70 model.embedding_dim 2.0 +184 70 optimizer.lr 0.030400058123149283 +184 70 training.batch_size 2.0 +184 70 training.label_smoothing 0.002848383946882163 +184 71 model.embedding_dim 0.0 +184 71 optimizer.lr 0.001956751240538918 +184 71 training.batch_size 1.0 +184 71 training.label_smoothing 0.003803685113227735 +184 72 model.embedding_dim 1.0 +184 72 optimizer.lr 0.0027009043688634464 +184 72 training.batch_size 2.0 +184 72 training.label_smoothing 0.009910364243636569 +184 73 model.embedding_dim 1.0 +184 73 optimizer.lr 0.001129380006677008 +184 73 training.batch_size 1.0 +184 73 training.label_smoothing 0.03310098925689759 +184 74 model.embedding_dim 2.0 +184 74 optimizer.lr 0.02826142774125666 +184 74 training.batch_size 2.0 +184 74 training.label_smoothing 0.009941277776599528 +184 75 model.embedding_dim 0.0 +184 75 optimizer.lr 0.00921863164247397 +184 75 training.batch_size 0.0 +184 75 training.label_smoothing 0.026381678938283373 +184 76 model.embedding_dim 1.0 +184 76 optimizer.lr 0.006834365014855584 +184 76 training.batch_size 2.0 +184 76 training.label_smoothing 0.03809944381820001 +184 77 model.embedding_dim 1.0 +184 77 optimizer.lr 0.011597376692547186 +184 77 training.batch_size 2.0 +184 77 training.label_smoothing 0.037492766013940464 +184 78 model.embedding_dim 0.0 +184 78 optimizer.lr 0.09315293633395534 +184 78 training.batch_size 1.0 +184 78 training.label_smoothing 0.0062562073001468915 +184 79 model.embedding_dim 1.0 +184 79 optimizer.lr 0.0016595892825815302 +184 79 training.batch_size 1.0 +184 79 training.label_smoothing 0.0018640395060724342 +184 80 model.embedding_dim 1.0 +184 80 optimizer.lr 0.012805503777951048 +184 80 training.batch_size 0.0 +184 80 training.label_smoothing 0.022792252101583636 +184 81 model.embedding_dim 0.0 +184 81 optimizer.lr 0.00159021458133075 +184 81 training.batch_size 2.0 +184 81 training.label_smoothing 0.8511552462908205 +184 82 model.embedding_dim 0.0 +184 82 optimizer.lr 0.003160821851499794 +184 82 training.batch_size 2.0 +184 82 training.label_smoothing 0.0028950675088693 +184 83 model.embedding_dim 0.0 +184 83 optimizer.lr 0.06870813949757566 +184 83 training.batch_size 1.0 +184 83 training.label_smoothing 0.2564806359579522 +184 84 model.embedding_dim 1.0 +184 84 optimizer.lr 0.0017170505866975184 +184 84 training.batch_size 2.0 +184 84 training.label_smoothing 0.555325581392162 +184 85 model.embedding_dim 0.0 +184 85 optimizer.lr 0.002456148494923197 +184 85 training.batch_size 2.0 +184 85 training.label_smoothing 0.7075910237166193 +184 86 model.embedding_dim 2.0 +184 86 optimizer.lr 0.003217396329815301 +184 86 training.batch_size 1.0 +184 86 training.label_smoothing 0.23627003301078728 +184 87 model.embedding_dim 2.0 +184 87 optimizer.lr 0.0014312101477073663 +184 87 training.batch_size 0.0 +184 87 training.label_smoothing 0.023455116213687986 +184 88 model.embedding_dim 1.0 +184 88 optimizer.lr 0.016702972826141246 +184 88 training.batch_size 2.0 +184 88 training.label_smoothing 0.004058000048190193 +184 89 model.embedding_dim 1.0 +184 89 optimizer.lr 0.0021222144410821824 +184 89 training.batch_size 1.0 +184 89 training.label_smoothing 0.06279543510298963 +184 90 model.embedding_dim 1.0 +184 90 optimizer.lr 0.029058619448334235 +184 90 training.batch_size 1.0 +184 90 training.label_smoothing 0.006099128358669412 +184 91 model.embedding_dim 2.0 +184 91 optimizer.lr 0.016227993208402316 +184 91 training.batch_size 0.0 +184 91 training.label_smoothing 0.042686358259496175 +184 92 model.embedding_dim 2.0 +184 92 optimizer.lr 0.0716858889743908 +184 92 training.batch_size 1.0 +184 92 training.label_smoothing 0.0032826625341721297 +184 93 model.embedding_dim 0.0 +184 93 optimizer.lr 0.09104178950678342 +184 93 training.batch_size 2.0 +184 93 training.label_smoothing 0.010382897305989711 +184 94 model.embedding_dim 1.0 +184 94 optimizer.lr 0.00891030683710641 +184 94 training.batch_size 1.0 +184 94 training.label_smoothing 0.002302564941300001 +184 95 model.embedding_dim 0.0 +184 95 optimizer.lr 0.016439849252354925 +184 95 training.batch_size 0.0 +184 95 training.label_smoothing 0.001243483428933673 +184 96 model.embedding_dim 0.0 +184 96 optimizer.lr 0.01581912881371971 +184 96 training.batch_size 0.0 +184 96 training.label_smoothing 0.2974790980154707 +184 97 model.embedding_dim 2.0 +184 97 optimizer.lr 0.036138363171394544 +184 97 training.batch_size 2.0 +184 97 training.label_smoothing 0.03299435816696424 +184 98 model.embedding_dim 1.0 +184 98 optimizer.lr 0.016445664969849248 +184 98 training.batch_size 2.0 +184 98 training.label_smoothing 0.05391683801106215 +184 99 model.embedding_dim 1.0 +184 99 optimizer.lr 0.06812577215293768 +184 99 training.batch_size 1.0 +184 99 training.label_smoothing 0.0022570968106074524 +184 100 model.embedding_dim 0.0 +184 100 optimizer.lr 0.015292187587170176 +184 100 training.batch_size 0.0 +184 100 training.label_smoothing 0.011973406408608698 +184 1 dataset """kinships""" +184 1 model """distmult""" +184 1 loss """bceaftersigmoid""" +184 1 regularizer """no""" +184 1 optimizer """adam""" +184 1 training_loop """lcwa""" +184 1 evaluator """rankbased""" +184 2 dataset """kinships""" +184 2 model """distmult""" +184 2 loss """bceaftersigmoid""" +184 2 regularizer """no""" +184 2 optimizer """adam""" +184 2 training_loop """lcwa""" +184 2 evaluator """rankbased""" +184 3 dataset """kinships""" +184 3 model """distmult""" +184 3 loss """bceaftersigmoid""" +184 3 regularizer """no""" +184 3 optimizer """adam""" +184 3 training_loop """lcwa""" +184 3 evaluator """rankbased""" +184 4 dataset """kinships""" +184 4 model """distmult""" +184 4 loss """bceaftersigmoid""" +184 4 regularizer """no""" +184 4 optimizer """adam""" +184 4 training_loop """lcwa""" +184 4 evaluator """rankbased""" +184 5 dataset """kinships""" +184 5 model """distmult""" +184 5 loss """bceaftersigmoid""" +184 5 regularizer """no""" +184 5 optimizer """adam""" +184 5 training_loop """lcwa""" +184 5 evaluator """rankbased""" +184 6 dataset """kinships""" +184 6 model """distmult""" +184 6 loss """bceaftersigmoid""" +184 6 regularizer """no""" +184 6 optimizer """adam""" +184 6 training_loop """lcwa""" +184 6 evaluator """rankbased""" +184 7 dataset """kinships""" +184 7 model """distmult""" +184 7 loss """bceaftersigmoid""" +184 7 regularizer """no""" +184 7 optimizer """adam""" +184 7 training_loop """lcwa""" +184 7 evaluator """rankbased""" +184 8 dataset """kinships""" +184 8 model """distmult""" +184 8 loss """bceaftersigmoid""" +184 8 regularizer """no""" +184 8 optimizer """adam""" +184 8 training_loop """lcwa""" +184 8 evaluator """rankbased""" +184 9 dataset """kinships""" +184 9 model """distmult""" +184 9 loss """bceaftersigmoid""" +184 9 regularizer """no""" +184 9 optimizer """adam""" +184 9 training_loop """lcwa""" +184 9 evaluator """rankbased""" +184 10 dataset """kinships""" +184 10 model """distmult""" +184 10 loss """bceaftersigmoid""" +184 10 regularizer """no""" +184 10 optimizer """adam""" +184 10 training_loop """lcwa""" +184 10 evaluator """rankbased""" +184 11 dataset """kinships""" +184 11 model """distmult""" +184 11 loss """bceaftersigmoid""" +184 11 regularizer """no""" +184 11 optimizer """adam""" +184 11 training_loop """lcwa""" +184 11 evaluator """rankbased""" +184 12 dataset """kinships""" +184 12 model """distmult""" +184 12 loss """bceaftersigmoid""" +184 12 regularizer """no""" +184 12 optimizer """adam""" +184 12 training_loop """lcwa""" +184 12 evaluator """rankbased""" +184 13 dataset """kinships""" +184 13 model """distmult""" +184 13 loss """bceaftersigmoid""" +184 13 regularizer """no""" +184 13 optimizer """adam""" +184 13 training_loop """lcwa""" +184 13 evaluator """rankbased""" +184 14 dataset """kinships""" +184 14 model """distmult""" +184 14 loss """bceaftersigmoid""" +184 14 regularizer """no""" +184 14 optimizer """adam""" +184 14 training_loop """lcwa""" +184 14 evaluator """rankbased""" +184 15 dataset """kinships""" +184 15 model """distmult""" +184 15 loss """bceaftersigmoid""" +184 15 regularizer """no""" +184 15 optimizer """adam""" +184 15 training_loop """lcwa""" +184 15 evaluator """rankbased""" +184 16 dataset """kinships""" +184 16 model """distmult""" +184 16 loss """bceaftersigmoid""" +184 16 regularizer """no""" +184 16 optimizer """adam""" +184 16 training_loop """lcwa""" +184 16 evaluator """rankbased""" +184 17 dataset """kinships""" +184 17 model """distmult""" +184 17 loss """bceaftersigmoid""" +184 17 regularizer """no""" +184 17 optimizer """adam""" +184 17 training_loop """lcwa""" +184 17 evaluator """rankbased""" +184 18 dataset """kinships""" +184 18 model """distmult""" +184 18 loss """bceaftersigmoid""" +184 18 regularizer """no""" +184 18 optimizer """adam""" +184 18 training_loop """lcwa""" +184 18 evaluator """rankbased""" +184 19 dataset """kinships""" +184 19 model """distmult""" +184 19 loss """bceaftersigmoid""" +184 19 regularizer """no""" +184 19 optimizer """adam""" +184 19 training_loop """lcwa""" +184 19 evaluator """rankbased""" +184 20 dataset """kinships""" +184 20 model """distmult""" +184 20 loss """bceaftersigmoid""" +184 20 regularizer """no""" +184 20 optimizer """adam""" +184 20 training_loop """lcwa""" +184 20 evaluator """rankbased""" +184 21 dataset """kinships""" +184 21 model """distmult""" +184 21 loss """bceaftersigmoid""" +184 21 regularizer """no""" +184 21 optimizer """adam""" +184 21 training_loop """lcwa""" +184 21 evaluator """rankbased""" +184 22 dataset """kinships""" +184 22 model """distmult""" +184 22 loss """bceaftersigmoid""" +184 22 regularizer """no""" +184 22 optimizer """adam""" +184 22 training_loop """lcwa""" +184 22 evaluator """rankbased""" +184 23 dataset """kinships""" +184 23 model """distmult""" +184 23 loss """bceaftersigmoid""" +184 23 regularizer """no""" +184 23 optimizer """adam""" +184 23 training_loop """lcwa""" +184 23 evaluator """rankbased""" +184 24 dataset """kinships""" +184 24 model """distmult""" +184 24 loss """bceaftersigmoid""" +184 24 regularizer """no""" +184 24 optimizer """adam""" +184 24 training_loop """lcwa""" +184 24 evaluator """rankbased""" +184 25 dataset """kinships""" +184 25 model """distmult""" +184 25 loss """bceaftersigmoid""" +184 25 regularizer """no""" +184 25 optimizer """adam""" +184 25 training_loop """lcwa""" +184 25 evaluator """rankbased""" +184 26 dataset """kinships""" +184 26 model """distmult""" +184 26 loss """bceaftersigmoid""" +184 26 regularizer """no""" +184 26 optimizer """adam""" +184 26 training_loop """lcwa""" +184 26 evaluator """rankbased""" +184 27 dataset """kinships""" +184 27 model """distmult""" +184 27 loss """bceaftersigmoid""" +184 27 regularizer """no""" +184 27 optimizer """adam""" +184 27 training_loop """lcwa""" +184 27 evaluator """rankbased""" +184 28 dataset """kinships""" +184 28 model """distmult""" +184 28 loss """bceaftersigmoid""" +184 28 regularizer """no""" +184 28 optimizer """adam""" +184 28 training_loop """lcwa""" +184 28 evaluator """rankbased""" +184 29 dataset """kinships""" +184 29 model """distmult""" +184 29 loss """bceaftersigmoid""" +184 29 regularizer """no""" +184 29 optimizer """adam""" +184 29 training_loop """lcwa""" +184 29 evaluator """rankbased""" +184 30 dataset """kinships""" +184 30 model """distmult""" +184 30 loss """bceaftersigmoid""" +184 30 regularizer """no""" +184 30 optimizer """adam""" +184 30 training_loop """lcwa""" +184 30 evaluator """rankbased""" +184 31 dataset """kinships""" +184 31 model """distmult""" +184 31 loss """bceaftersigmoid""" +184 31 regularizer """no""" +184 31 optimizer """adam""" +184 31 training_loop """lcwa""" +184 31 evaluator """rankbased""" +184 32 dataset """kinships""" +184 32 model """distmult""" +184 32 loss """bceaftersigmoid""" +184 32 regularizer """no""" +184 32 optimizer """adam""" +184 32 training_loop """lcwa""" +184 32 evaluator """rankbased""" +184 33 dataset """kinships""" +184 33 model """distmult""" +184 33 loss """bceaftersigmoid""" +184 33 regularizer """no""" +184 33 optimizer """adam""" +184 33 training_loop """lcwa""" +184 33 evaluator """rankbased""" +184 34 dataset """kinships""" +184 34 model """distmult""" +184 34 loss """bceaftersigmoid""" +184 34 regularizer """no""" +184 34 optimizer """adam""" +184 34 training_loop """lcwa""" +184 34 evaluator """rankbased""" +184 35 dataset """kinships""" +184 35 model """distmult""" +184 35 loss """bceaftersigmoid""" +184 35 regularizer """no""" +184 35 optimizer """adam""" +184 35 training_loop """lcwa""" +184 35 evaluator """rankbased""" +184 36 dataset """kinships""" +184 36 model """distmult""" +184 36 loss """bceaftersigmoid""" +184 36 regularizer """no""" +184 36 optimizer """adam""" +184 36 training_loop """lcwa""" +184 36 evaluator """rankbased""" +184 37 dataset """kinships""" +184 37 model """distmult""" +184 37 loss """bceaftersigmoid""" +184 37 regularizer """no""" +184 37 optimizer """adam""" +184 37 training_loop """lcwa""" +184 37 evaluator """rankbased""" +184 38 dataset """kinships""" +184 38 model """distmult""" +184 38 loss """bceaftersigmoid""" +184 38 regularizer """no""" +184 38 optimizer """adam""" +184 38 training_loop """lcwa""" +184 38 evaluator """rankbased""" +184 39 dataset """kinships""" +184 39 model """distmult""" +184 39 loss """bceaftersigmoid""" +184 39 regularizer """no""" +184 39 optimizer """adam""" +184 39 training_loop """lcwa""" +184 39 evaluator """rankbased""" +184 40 dataset """kinships""" +184 40 model """distmult""" +184 40 loss """bceaftersigmoid""" +184 40 regularizer """no""" +184 40 optimizer """adam""" +184 40 training_loop """lcwa""" +184 40 evaluator """rankbased""" +184 41 dataset """kinships""" +184 41 model """distmult""" +184 41 loss """bceaftersigmoid""" +184 41 regularizer """no""" +184 41 optimizer """adam""" +184 41 training_loop """lcwa""" +184 41 evaluator """rankbased""" +184 42 dataset """kinships""" +184 42 model """distmult""" +184 42 loss """bceaftersigmoid""" +184 42 regularizer """no""" +184 42 optimizer """adam""" +184 42 training_loop """lcwa""" +184 42 evaluator """rankbased""" +184 43 dataset """kinships""" +184 43 model """distmult""" +184 43 loss """bceaftersigmoid""" +184 43 regularizer """no""" +184 43 optimizer """adam""" +184 43 training_loop """lcwa""" +184 43 evaluator """rankbased""" +184 44 dataset """kinships""" +184 44 model """distmult""" +184 44 loss """bceaftersigmoid""" +184 44 regularizer """no""" +184 44 optimizer """adam""" +184 44 training_loop """lcwa""" +184 44 evaluator """rankbased""" +184 45 dataset """kinships""" +184 45 model """distmult""" +184 45 loss """bceaftersigmoid""" +184 45 regularizer """no""" +184 45 optimizer """adam""" +184 45 training_loop """lcwa""" +184 45 evaluator """rankbased""" +184 46 dataset """kinships""" +184 46 model """distmult""" +184 46 loss """bceaftersigmoid""" +184 46 regularizer """no""" +184 46 optimizer """adam""" +184 46 training_loop """lcwa""" +184 46 evaluator """rankbased""" +184 47 dataset """kinships""" +184 47 model """distmult""" +184 47 loss """bceaftersigmoid""" +184 47 regularizer """no""" +184 47 optimizer """adam""" +184 47 training_loop """lcwa""" +184 47 evaluator """rankbased""" +184 48 dataset """kinships""" +184 48 model """distmult""" +184 48 loss """bceaftersigmoid""" +184 48 regularizer """no""" +184 48 optimizer """adam""" +184 48 training_loop """lcwa""" +184 48 evaluator """rankbased""" +184 49 dataset """kinships""" +184 49 model """distmult""" +184 49 loss """bceaftersigmoid""" +184 49 regularizer """no""" +184 49 optimizer """adam""" +184 49 training_loop """lcwa""" +184 49 evaluator """rankbased""" +184 50 dataset """kinships""" +184 50 model """distmult""" +184 50 loss """bceaftersigmoid""" +184 50 regularizer """no""" +184 50 optimizer """adam""" +184 50 training_loop """lcwa""" +184 50 evaluator """rankbased""" +184 51 dataset """kinships""" +184 51 model """distmult""" +184 51 loss """bceaftersigmoid""" +184 51 regularizer """no""" +184 51 optimizer """adam""" +184 51 training_loop """lcwa""" +184 51 evaluator """rankbased""" +184 52 dataset """kinships""" +184 52 model """distmult""" +184 52 loss """bceaftersigmoid""" +184 52 regularizer """no""" +184 52 optimizer """adam""" +184 52 training_loop """lcwa""" +184 52 evaluator """rankbased""" +184 53 dataset """kinships""" +184 53 model """distmult""" +184 53 loss """bceaftersigmoid""" +184 53 regularizer """no""" +184 53 optimizer """adam""" +184 53 training_loop """lcwa""" +184 53 evaluator """rankbased""" +184 54 dataset """kinships""" +184 54 model """distmult""" +184 54 loss """bceaftersigmoid""" +184 54 regularizer """no""" +184 54 optimizer """adam""" +184 54 training_loop """lcwa""" +184 54 evaluator """rankbased""" +184 55 dataset """kinships""" +184 55 model """distmult""" +184 55 loss """bceaftersigmoid""" +184 55 regularizer """no""" +184 55 optimizer """adam""" +184 55 training_loop """lcwa""" +184 55 evaluator """rankbased""" +184 56 dataset """kinships""" +184 56 model """distmult""" +184 56 loss """bceaftersigmoid""" +184 56 regularizer """no""" +184 56 optimizer """adam""" +184 56 training_loop """lcwa""" +184 56 evaluator """rankbased""" +184 57 dataset """kinships""" +184 57 model """distmult""" +184 57 loss """bceaftersigmoid""" +184 57 regularizer """no""" +184 57 optimizer """adam""" +184 57 training_loop """lcwa""" +184 57 evaluator """rankbased""" +184 58 dataset """kinships""" +184 58 model """distmult""" +184 58 loss """bceaftersigmoid""" +184 58 regularizer """no""" +184 58 optimizer """adam""" +184 58 training_loop """lcwa""" +184 58 evaluator """rankbased""" +184 59 dataset """kinships""" +184 59 model """distmult""" +184 59 loss """bceaftersigmoid""" +184 59 regularizer """no""" +184 59 optimizer """adam""" +184 59 training_loop """lcwa""" +184 59 evaluator """rankbased""" +184 60 dataset """kinships""" +184 60 model """distmult""" +184 60 loss """bceaftersigmoid""" +184 60 regularizer """no""" +184 60 optimizer """adam""" +184 60 training_loop """lcwa""" +184 60 evaluator """rankbased""" +184 61 dataset """kinships""" +184 61 model """distmult""" +184 61 loss """bceaftersigmoid""" +184 61 regularizer """no""" +184 61 optimizer """adam""" +184 61 training_loop """lcwa""" +184 61 evaluator """rankbased""" +184 62 dataset """kinships""" +184 62 model """distmult""" +184 62 loss """bceaftersigmoid""" +184 62 regularizer """no""" +184 62 optimizer """adam""" +184 62 training_loop """lcwa""" +184 62 evaluator """rankbased""" +184 63 dataset """kinships""" +184 63 model """distmult""" +184 63 loss """bceaftersigmoid""" +184 63 regularizer """no""" +184 63 optimizer """adam""" +184 63 training_loop """lcwa""" +184 63 evaluator """rankbased""" +184 64 dataset """kinships""" +184 64 model """distmult""" +184 64 loss """bceaftersigmoid""" +184 64 regularizer """no""" +184 64 optimizer """adam""" +184 64 training_loop """lcwa""" +184 64 evaluator """rankbased""" +184 65 dataset """kinships""" +184 65 model """distmult""" +184 65 loss """bceaftersigmoid""" +184 65 regularizer """no""" +184 65 optimizer """adam""" +184 65 training_loop """lcwa""" +184 65 evaluator """rankbased""" +184 66 dataset """kinships""" +184 66 model """distmult""" +184 66 loss """bceaftersigmoid""" +184 66 regularizer """no""" +184 66 optimizer """adam""" +184 66 training_loop """lcwa""" +184 66 evaluator """rankbased""" +184 67 dataset """kinships""" +184 67 model """distmult""" +184 67 loss """bceaftersigmoid""" +184 67 regularizer """no""" +184 67 optimizer """adam""" +184 67 training_loop """lcwa""" +184 67 evaluator """rankbased""" +184 68 dataset """kinships""" +184 68 model """distmult""" +184 68 loss """bceaftersigmoid""" +184 68 regularizer """no""" +184 68 optimizer """adam""" +184 68 training_loop """lcwa""" +184 68 evaluator """rankbased""" +184 69 dataset """kinships""" +184 69 model """distmult""" +184 69 loss """bceaftersigmoid""" +184 69 regularizer """no""" +184 69 optimizer """adam""" +184 69 training_loop """lcwa""" +184 69 evaluator """rankbased""" +184 70 dataset """kinships""" +184 70 model """distmult""" +184 70 loss """bceaftersigmoid""" +184 70 regularizer """no""" +184 70 optimizer """adam""" +184 70 training_loop """lcwa""" +184 70 evaluator """rankbased""" +184 71 dataset """kinships""" +184 71 model """distmult""" +184 71 loss """bceaftersigmoid""" +184 71 regularizer """no""" +184 71 optimizer """adam""" +184 71 training_loop """lcwa""" +184 71 evaluator """rankbased""" +184 72 dataset """kinships""" +184 72 model """distmult""" +184 72 loss """bceaftersigmoid""" +184 72 regularizer """no""" +184 72 optimizer """adam""" +184 72 training_loop """lcwa""" +184 72 evaluator """rankbased""" +184 73 dataset """kinships""" +184 73 model """distmult""" +184 73 loss """bceaftersigmoid""" +184 73 regularizer """no""" +184 73 optimizer """adam""" +184 73 training_loop """lcwa""" +184 73 evaluator """rankbased""" +184 74 dataset """kinships""" +184 74 model """distmult""" +184 74 loss """bceaftersigmoid""" +184 74 regularizer """no""" +184 74 optimizer """adam""" +184 74 training_loop """lcwa""" +184 74 evaluator """rankbased""" +184 75 dataset """kinships""" +184 75 model """distmult""" +184 75 loss """bceaftersigmoid""" +184 75 regularizer """no""" +184 75 optimizer """adam""" +184 75 training_loop """lcwa""" +184 75 evaluator """rankbased""" +184 76 dataset """kinships""" +184 76 model """distmult""" +184 76 loss """bceaftersigmoid""" +184 76 regularizer """no""" +184 76 optimizer """adam""" +184 76 training_loop """lcwa""" +184 76 evaluator """rankbased""" +184 77 dataset """kinships""" +184 77 model """distmult""" +184 77 loss """bceaftersigmoid""" +184 77 regularizer """no""" +184 77 optimizer """adam""" +184 77 training_loop """lcwa""" +184 77 evaluator """rankbased""" +184 78 dataset """kinships""" +184 78 model """distmult""" +184 78 loss """bceaftersigmoid""" +184 78 regularizer """no""" +184 78 optimizer """adam""" +184 78 training_loop """lcwa""" +184 78 evaluator """rankbased""" +184 79 dataset """kinships""" +184 79 model """distmult""" +184 79 loss """bceaftersigmoid""" +184 79 regularizer """no""" +184 79 optimizer """adam""" +184 79 training_loop """lcwa""" +184 79 evaluator """rankbased""" +184 80 dataset """kinships""" +184 80 model """distmult""" +184 80 loss """bceaftersigmoid""" +184 80 regularizer """no""" +184 80 optimizer """adam""" +184 80 training_loop """lcwa""" +184 80 evaluator """rankbased""" +184 81 dataset """kinships""" +184 81 model """distmult""" +184 81 loss """bceaftersigmoid""" +184 81 regularizer """no""" +184 81 optimizer """adam""" +184 81 training_loop """lcwa""" +184 81 evaluator """rankbased""" +184 82 dataset """kinships""" +184 82 model """distmult""" +184 82 loss """bceaftersigmoid""" +184 82 regularizer """no""" +184 82 optimizer """adam""" +184 82 training_loop """lcwa""" +184 82 evaluator """rankbased""" +184 83 dataset """kinships""" +184 83 model """distmult""" +184 83 loss """bceaftersigmoid""" +184 83 regularizer """no""" +184 83 optimizer """adam""" +184 83 training_loop """lcwa""" +184 83 evaluator """rankbased""" +184 84 dataset """kinships""" +184 84 model """distmult""" +184 84 loss """bceaftersigmoid""" +184 84 regularizer """no""" +184 84 optimizer """adam""" +184 84 training_loop """lcwa""" +184 84 evaluator """rankbased""" +184 85 dataset """kinships""" +184 85 model """distmult""" +184 85 loss """bceaftersigmoid""" +184 85 regularizer """no""" +184 85 optimizer """adam""" +184 85 training_loop """lcwa""" +184 85 evaluator """rankbased""" +184 86 dataset """kinships""" +184 86 model """distmult""" +184 86 loss """bceaftersigmoid""" +184 86 regularizer """no""" +184 86 optimizer """adam""" +184 86 training_loop """lcwa""" +184 86 evaluator """rankbased""" +184 87 dataset """kinships""" +184 87 model """distmult""" +184 87 loss """bceaftersigmoid""" +184 87 regularizer """no""" +184 87 optimizer """adam""" +184 87 training_loop """lcwa""" +184 87 evaluator """rankbased""" +184 88 dataset """kinships""" +184 88 model """distmult""" +184 88 loss """bceaftersigmoid""" +184 88 regularizer """no""" +184 88 optimizer """adam""" +184 88 training_loop """lcwa""" +184 88 evaluator """rankbased""" +184 89 dataset """kinships""" +184 89 model """distmult""" +184 89 loss """bceaftersigmoid""" +184 89 regularizer """no""" +184 89 optimizer """adam""" +184 89 training_loop """lcwa""" +184 89 evaluator """rankbased""" +184 90 dataset """kinships""" +184 90 model """distmult""" +184 90 loss """bceaftersigmoid""" +184 90 regularizer """no""" +184 90 optimizer """adam""" +184 90 training_loop """lcwa""" +184 90 evaluator """rankbased""" +184 91 dataset """kinships""" +184 91 model """distmult""" +184 91 loss """bceaftersigmoid""" +184 91 regularizer """no""" +184 91 optimizer """adam""" +184 91 training_loop """lcwa""" +184 91 evaluator """rankbased""" +184 92 dataset """kinships""" +184 92 model """distmult""" +184 92 loss """bceaftersigmoid""" +184 92 regularizer """no""" +184 92 optimizer """adam""" +184 92 training_loop """lcwa""" +184 92 evaluator """rankbased""" +184 93 dataset """kinships""" +184 93 model """distmult""" +184 93 loss """bceaftersigmoid""" +184 93 regularizer """no""" +184 93 optimizer """adam""" +184 93 training_loop """lcwa""" +184 93 evaluator """rankbased""" +184 94 dataset """kinships""" +184 94 model """distmult""" +184 94 loss """bceaftersigmoid""" +184 94 regularizer """no""" +184 94 optimizer """adam""" +184 94 training_loop """lcwa""" +184 94 evaluator """rankbased""" +184 95 dataset """kinships""" +184 95 model """distmult""" +184 95 loss """bceaftersigmoid""" +184 95 regularizer """no""" +184 95 optimizer """adam""" +184 95 training_loop """lcwa""" +184 95 evaluator """rankbased""" +184 96 dataset """kinships""" +184 96 model """distmult""" +184 96 loss """bceaftersigmoid""" +184 96 regularizer """no""" +184 96 optimizer """adam""" +184 96 training_loop """lcwa""" +184 96 evaluator """rankbased""" +184 97 dataset """kinships""" +184 97 model """distmult""" +184 97 loss """bceaftersigmoid""" +184 97 regularizer """no""" +184 97 optimizer """adam""" +184 97 training_loop """lcwa""" +184 97 evaluator """rankbased""" +184 98 dataset """kinships""" +184 98 model """distmult""" +184 98 loss """bceaftersigmoid""" +184 98 regularizer """no""" +184 98 optimizer """adam""" +184 98 training_loop """lcwa""" +184 98 evaluator """rankbased""" +184 99 dataset """kinships""" +184 99 model """distmult""" +184 99 loss """bceaftersigmoid""" +184 99 regularizer """no""" +184 99 optimizer """adam""" +184 99 training_loop """lcwa""" +184 99 evaluator """rankbased""" +184 100 dataset """kinships""" +184 100 model """distmult""" +184 100 loss """bceaftersigmoid""" +184 100 regularizer """no""" +184 100 optimizer """adam""" +184 100 training_loop """lcwa""" +184 100 evaluator """rankbased""" +185 1 model.embedding_dim 0.0 +185 1 optimizer.lr 0.06099558741893223 +185 1 training.batch_size 1.0 +185 1 training.label_smoothing 0.3727295608787627 +185 2 model.embedding_dim 0.0 +185 2 optimizer.lr 0.061950236433987234 +185 2 training.batch_size 2.0 +185 2 training.label_smoothing 0.019655930967530804 +185 3 model.embedding_dim 1.0 +185 3 optimizer.lr 0.009357377702374417 +185 3 training.batch_size 1.0 +185 3 training.label_smoothing 0.005864221165597641 +185 4 model.embedding_dim 0.0 +185 4 optimizer.lr 0.0011254480800097786 +185 4 training.batch_size 1.0 +185 4 training.label_smoothing 0.6470558634923003 +185 5 model.embedding_dim 0.0 +185 5 optimizer.lr 0.014414594979559138 +185 5 training.batch_size 2.0 +185 5 training.label_smoothing 0.009450630819825428 +185 6 model.embedding_dim 0.0 +185 6 optimizer.lr 0.025906927041445878 +185 6 training.batch_size 1.0 +185 6 training.label_smoothing 0.02200626876635222 +185 7 model.embedding_dim 1.0 +185 7 optimizer.lr 0.0013337326367536518 +185 7 training.batch_size 0.0 +185 7 training.label_smoothing 0.1286297682531141 +185 8 model.embedding_dim 2.0 +185 8 optimizer.lr 0.0591016615702109 +185 8 training.batch_size 1.0 +185 8 training.label_smoothing 0.001384741150864037 +185 9 model.embedding_dim 0.0 +185 9 optimizer.lr 0.09646200271227702 +185 9 training.batch_size 2.0 +185 9 training.label_smoothing 0.18930657278755966 +185 10 model.embedding_dim 0.0 +185 10 optimizer.lr 0.04115819600476558 +185 10 training.batch_size 0.0 +185 10 training.label_smoothing 0.006463987441969972 +185 11 model.embedding_dim 1.0 +185 11 optimizer.lr 0.0029361688966498605 +185 11 training.batch_size 0.0 +185 11 training.label_smoothing 0.0015222374647017906 +185 12 model.embedding_dim 2.0 +185 12 optimizer.lr 0.0034582287530449947 +185 12 training.batch_size 0.0 +185 12 training.label_smoothing 0.09738699416105971 +185 13 model.embedding_dim 1.0 +185 13 optimizer.lr 0.08810505075802663 +185 13 training.batch_size 2.0 +185 13 training.label_smoothing 0.004604706093674899 +185 14 model.embedding_dim 0.0 +185 14 optimizer.lr 0.008088135283783486 +185 14 training.batch_size 2.0 +185 14 training.label_smoothing 0.10212968000350939 +185 15 model.embedding_dim 2.0 +185 15 optimizer.lr 0.08503235169582321 +185 15 training.batch_size 0.0 +185 15 training.label_smoothing 0.013719688352369376 +185 16 model.embedding_dim 2.0 +185 16 optimizer.lr 0.026037571727550276 +185 16 training.batch_size 1.0 +185 16 training.label_smoothing 0.5646223137620432 +185 17 model.embedding_dim 1.0 +185 17 optimizer.lr 0.0013116905706310924 +185 17 training.batch_size 1.0 +185 17 training.label_smoothing 0.008678857514532899 +185 18 model.embedding_dim 2.0 +185 18 optimizer.lr 0.001298293107659661 +185 18 training.batch_size 1.0 +185 18 training.label_smoothing 0.16486836542369368 +185 19 model.embedding_dim 1.0 +185 19 optimizer.lr 0.06758997290144196 +185 19 training.batch_size 2.0 +185 19 training.label_smoothing 0.6595113562645274 +185 20 model.embedding_dim 2.0 +185 20 optimizer.lr 0.00264943383509853 +185 20 training.batch_size 2.0 +185 20 training.label_smoothing 0.01099348666039716 +185 21 model.embedding_dim 0.0 +185 21 optimizer.lr 0.0060150287265731996 +185 21 training.batch_size 0.0 +185 21 training.label_smoothing 0.015065490262833792 +185 22 model.embedding_dim 2.0 +185 22 optimizer.lr 0.0042518209252276964 +185 22 training.batch_size 0.0 +185 22 training.label_smoothing 0.012114630243605417 +185 23 model.embedding_dim 2.0 +185 23 optimizer.lr 0.04542051492359651 +185 23 training.batch_size 2.0 +185 23 training.label_smoothing 0.01546329317834755 +185 24 model.embedding_dim 2.0 +185 24 optimizer.lr 0.005954674278378703 +185 24 training.batch_size 2.0 +185 24 training.label_smoothing 0.013167657523244338 +185 25 model.embedding_dim 1.0 +185 25 optimizer.lr 0.013190938202162173 +185 25 training.batch_size 2.0 +185 25 training.label_smoothing 0.06449115513445611 +185 26 model.embedding_dim 1.0 +185 26 optimizer.lr 0.015005810256985833 +185 26 training.batch_size 1.0 +185 26 training.label_smoothing 0.2578577344206344 +185 27 model.embedding_dim 0.0 +185 27 optimizer.lr 0.006263814165834531 +185 27 training.batch_size 2.0 +185 27 training.label_smoothing 0.5002492355180488 +185 28 model.embedding_dim 2.0 +185 28 optimizer.lr 0.016820089052214618 +185 28 training.batch_size 0.0 +185 28 training.label_smoothing 0.06274794785222962 +185 29 model.embedding_dim 2.0 +185 29 optimizer.lr 0.001961418127407933 +185 29 training.batch_size 2.0 +185 29 training.label_smoothing 0.007388118401757567 +185 30 model.embedding_dim 2.0 +185 30 optimizer.lr 0.00888034170571823 +185 30 training.batch_size 1.0 +185 30 training.label_smoothing 0.09684658863311009 +185 31 model.embedding_dim 2.0 +185 31 optimizer.lr 0.0016344936687629384 +185 31 training.batch_size 0.0 +185 31 training.label_smoothing 0.7020259276123699 +185 32 model.embedding_dim 0.0 +185 32 optimizer.lr 0.0624179400675158 +185 32 training.batch_size 2.0 +185 32 training.label_smoothing 0.01372273571098742 +185 33 model.embedding_dim 1.0 +185 33 optimizer.lr 0.061770819436311505 +185 33 training.batch_size 2.0 +185 33 training.label_smoothing 0.014803451505734064 +185 34 model.embedding_dim 1.0 +185 34 optimizer.lr 0.0018114796160464716 +185 34 training.batch_size 2.0 +185 34 training.label_smoothing 0.08444339216714995 +185 35 model.embedding_dim 0.0 +185 35 optimizer.lr 0.07783054174331708 +185 35 training.batch_size 2.0 +185 35 training.label_smoothing 0.011324229662324343 +185 36 model.embedding_dim 1.0 +185 36 optimizer.lr 0.007257789331639826 +185 36 training.batch_size 2.0 +185 36 training.label_smoothing 0.0010508889854855125 +185 37 model.embedding_dim 2.0 +185 37 optimizer.lr 0.0021465033232699127 +185 37 training.batch_size 2.0 +185 37 training.label_smoothing 0.4831779856410694 +185 38 model.embedding_dim 2.0 +185 38 optimizer.lr 0.00659309526065 +185 38 training.batch_size 1.0 +185 38 training.label_smoothing 0.07198124366010894 +185 39 model.embedding_dim 0.0 +185 39 optimizer.lr 0.07572212835315086 +185 39 training.batch_size 2.0 +185 39 training.label_smoothing 0.052091216160479126 +185 40 model.embedding_dim 2.0 +185 40 optimizer.lr 0.0015054022767397425 +185 40 training.batch_size 0.0 +185 40 training.label_smoothing 0.0028652682359214593 +185 41 model.embedding_dim 0.0 +185 41 optimizer.lr 0.001916396852087709 +185 41 training.batch_size 0.0 +185 41 training.label_smoothing 0.2391275609859559 +185 42 model.embedding_dim 0.0 +185 42 optimizer.lr 0.0018466135154701154 +185 42 training.batch_size 1.0 +185 42 training.label_smoothing 0.006993405655418459 +185 43 model.embedding_dim 2.0 +185 43 optimizer.lr 0.0014210868570532487 +185 43 training.batch_size 1.0 +185 43 training.label_smoothing 0.2874516473923913 +185 44 model.embedding_dim 2.0 +185 44 optimizer.lr 0.036084227973419244 +185 44 training.batch_size 2.0 +185 44 training.label_smoothing 0.012253725526710443 +185 45 model.embedding_dim 1.0 +185 45 optimizer.lr 0.0014319383789648623 +185 45 training.batch_size 2.0 +185 45 training.label_smoothing 0.08935564305145774 +185 46 model.embedding_dim 0.0 +185 46 optimizer.lr 0.016737065492666292 +185 46 training.batch_size 1.0 +185 46 training.label_smoothing 0.027483269004753733 +185 47 model.embedding_dim 1.0 +185 47 optimizer.lr 0.008925493714977238 +185 47 training.batch_size 0.0 +185 47 training.label_smoothing 0.023025098085649925 +185 48 model.embedding_dim 1.0 +185 48 optimizer.lr 0.012666151551678669 +185 48 training.batch_size 0.0 +185 48 training.label_smoothing 0.002219600696441562 +185 49 model.embedding_dim 0.0 +185 49 optimizer.lr 0.0012782540504715665 +185 49 training.batch_size 1.0 +185 49 training.label_smoothing 0.03073522900483926 +185 50 model.embedding_dim 2.0 +185 50 optimizer.lr 0.08106945103619387 +185 50 training.batch_size 0.0 +185 50 training.label_smoothing 0.004806759568559049 +185 51 model.embedding_dim 1.0 +185 51 optimizer.lr 0.047608675672960385 +185 51 training.batch_size 0.0 +185 51 training.label_smoothing 0.06697233250998925 +185 52 model.embedding_dim 2.0 +185 52 optimizer.lr 0.0021673857594382557 +185 52 training.batch_size 1.0 +185 52 training.label_smoothing 0.6334497515791352 +185 53 model.embedding_dim 1.0 +185 53 optimizer.lr 0.00485634432227105 +185 53 training.batch_size 2.0 +185 53 training.label_smoothing 0.26629720797610396 +185 54 model.embedding_dim 0.0 +185 54 optimizer.lr 0.05255861088326984 +185 54 training.batch_size 0.0 +185 54 training.label_smoothing 0.550250315497527 +185 55 model.embedding_dim 1.0 +185 55 optimizer.lr 0.024918084601530107 +185 55 training.batch_size 0.0 +185 55 training.label_smoothing 0.004593994627198297 +185 56 model.embedding_dim 0.0 +185 56 optimizer.lr 0.001822695524207332 +185 56 training.batch_size 0.0 +185 56 training.label_smoothing 0.240482923395186 +185 57 model.embedding_dim 2.0 +185 57 optimizer.lr 0.0053651700026987395 +185 57 training.batch_size 2.0 +185 57 training.label_smoothing 0.06483460967580272 +185 58 model.embedding_dim 0.0 +185 58 optimizer.lr 0.007326747535423887 +185 58 training.batch_size 0.0 +185 58 training.label_smoothing 0.00966320083049681 +185 59 model.embedding_dim 2.0 +185 59 optimizer.lr 0.00932173071515997 +185 59 training.batch_size 2.0 +185 59 training.label_smoothing 0.02603561432013179 +185 60 model.embedding_dim 2.0 +185 60 optimizer.lr 0.009122444265806719 +185 60 training.batch_size 2.0 +185 60 training.label_smoothing 0.021669426479295255 +185 61 model.embedding_dim 1.0 +185 61 optimizer.lr 0.001958083516535452 +185 61 training.batch_size 1.0 +185 61 training.label_smoothing 0.303252291115052 +185 62 model.embedding_dim 1.0 +185 62 optimizer.lr 0.05136083539808051 +185 62 training.batch_size 1.0 +185 62 training.label_smoothing 0.08723225693399271 +185 63 model.embedding_dim 1.0 +185 63 optimizer.lr 0.016570290522454987 +185 63 training.batch_size 1.0 +185 63 training.label_smoothing 0.0018127418610969128 +185 64 model.embedding_dim 1.0 +185 64 optimizer.lr 0.006900154308980767 +185 64 training.batch_size 0.0 +185 64 training.label_smoothing 0.20976793186698584 +185 65 model.embedding_dim 1.0 +185 65 optimizer.lr 0.06245753809416748 +185 65 training.batch_size 2.0 +185 65 training.label_smoothing 0.008297631644041712 +185 66 model.embedding_dim 1.0 +185 66 optimizer.lr 0.003531521688619473 +185 66 training.batch_size 0.0 +185 66 training.label_smoothing 0.00343382294142558 +185 67 model.embedding_dim 0.0 +185 67 optimizer.lr 0.0047780541102469655 +185 67 training.batch_size 2.0 +185 67 training.label_smoothing 0.11918338093475632 +185 68 model.embedding_dim 0.0 +185 68 optimizer.lr 0.003794982099460185 +185 68 training.batch_size 1.0 +185 68 training.label_smoothing 0.6544531365841522 +185 69 model.embedding_dim 0.0 +185 69 optimizer.lr 0.038592137774114625 +185 69 training.batch_size 1.0 +185 69 training.label_smoothing 0.0026112019526696997 +185 70 model.embedding_dim 0.0 +185 70 optimizer.lr 0.002708781858712531 +185 70 training.batch_size 2.0 +185 70 training.label_smoothing 0.13147187418059625 +185 71 model.embedding_dim 2.0 +185 71 optimizer.lr 0.0023892561190457843 +185 71 training.batch_size 0.0 +185 71 training.label_smoothing 0.05035470098107243 +185 72 model.embedding_dim 2.0 +185 72 optimizer.lr 0.002414180059270695 +185 72 training.batch_size 0.0 +185 72 training.label_smoothing 0.017408679539328762 +185 73 model.embedding_dim 0.0 +185 73 optimizer.lr 0.012692274553973327 +185 73 training.batch_size 1.0 +185 73 training.label_smoothing 0.003588906711414962 +185 74 model.embedding_dim 0.0 +185 74 optimizer.lr 0.09945837607987175 +185 74 training.batch_size 2.0 +185 74 training.label_smoothing 0.019771570009539496 +185 75 model.embedding_dim 0.0 +185 75 optimizer.lr 0.04420780689989457 +185 75 training.batch_size 2.0 +185 75 training.label_smoothing 0.0022984778996039523 +185 76 model.embedding_dim 2.0 +185 76 optimizer.lr 0.032214239454372344 +185 76 training.batch_size 0.0 +185 76 training.label_smoothing 0.0077855607027955435 +185 77 model.embedding_dim 0.0 +185 77 optimizer.lr 0.02388110532003307 +185 77 training.batch_size 1.0 +185 77 training.label_smoothing 0.02262118140894043 +185 78 model.embedding_dim 2.0 +185 78 optimizer.lr 0.05224017262302032 +185 78 training.batch_size 2.0 +185 78 training.label_smoothing 0.04496194381241204 +185 79 model.embedding_dim 2.0 +185 79 optimizer.lr 0.0016986082156244006 +185 79 training.batch_size 0.0 +185 79 training.label_smoothing 0.04206911030747643 +185 80 model.embedding_dim 2.0 +185 80 optimizer.lr 0.04800609653220566 +185 80 training.batch_size 0.0 +185 80 training.label_smoothing 0.0023202009114325437 +185 81 model.embedding_dim 0.0 +185 81 optimizer.lr 0.012840263461885732 +185 81 training.batch_size 2.0 +185 81 training.label_smoothing 0.28234262446815706 +185 82 model.embedding_dim 0.0 +185 82 optimizer.lr 0.08146620139965345 +185 82 training.batch_size 0.0 +185 82 training.label_smoothing 0.0031552478195387454 +185 83 model.embedding_dim 0.0 +185 83 optimizer.lr 0.08477792106036845 +185 83 training.batch_size 0.0 +185 83 training.label_smoothing 0.5868139889646022 +185 84 model.embedding_dim 2.0 +185 84 optimizer.lr 0.023215725465587898 +185 84 training.batch_size 0.0 +185 84 training.label_smoothing 0.00850267025022813 +185 85 model.embedding_dim 0.0 +185 85 optimizer.lr 0.007292005604804092 +185 85 training.batch_size 0.0 +185 85 training.label_smoothing 0.5026069833266709 +185 86 model.embedding_dim 2.0 +185 86 optimizer.lr 0.004817485772304847 +185 86 training.batch_size 2.0 +185 86 training.label_smoothing 0.8823912467105588 +185 87 model.embedding_dim 0.0 +185 87 optimizer.lr 0.08666295487784016 +185 87 training.batch_size 1.0 +185 87 training.label_smoothing 0.013911437036539728 +185 88 model.embedding_dim 0.0 +185 88 optimizer.lr 0.00736765503625427 +185 88 training.batch_size 1.0 +185 88 training.label_smoothing 0.009363180363420381 +185 89 model.embedding_dim 0.0 +185 89 optimizer.lr 0.006335017472165054 +185 89 training.batch_size 2.0 +185 89 training.label_smoothing 0.7740610329753321 +185 90 model.embedding_dim 0.0 +185 90 optimizer.lr 0.03635196800233826 +185 90 training.batch_size 0.0 +185 90 training.label_smoothing 0.09817998429302285 +185 91 model.embedding_dim 2.0 +185 91 optimizer.lr 0.01436706630679409 +185 91 training.batch_size 0.0 +185 91 training.label_smoothing 0.010370289355521041 +185 92 model.embedding_dim 1.0 +185 92 optimizer.lr 0.0029741121072066738 +185 92 training.batch_size 0.0 +185 92 training.label_smoothing 0.4835592813464455 +185 93 model.embedding_dim 1.0 +185 93 optimizer.lr 0.003806393568966538 +185 93 training.batch_size 0.0 +185 93 training.label_smoothing 0.46019147754815576 +185 94 model.embedding_dim 1.0 +185 94 optimizer.lr 0.01829176175553881 +185 94 training.batch_size 1.0 +185 94 training.label_smoothing 0.20295600389699353 +185 95 model.embedding_dim 1.0 +185 95 optimizer.lr 0.003905750877648853 +185 95 training.batch_size 1.0 +185 95 training.label_smoothing 0.007853275277162211 +185 96 model.embedding_dim 0.0 +185 96 optimizer.lr 0.00607535763111085 +185 96 training.batch_size 1.0 +185 96 training.label_smoothing 0.975352076637605 +185 97 model.embedding_dim 1.0 +185 97 optimizer.lr 0.01772095989112805 +185 97 training.batch_size 2.0 +185 97 training.label_smoothing 0.45273323168703733 +185 98 model.embedding_dim 0.0 +185 98 optimizer.lr 0.029684156914675144 +185 98 training.batch_size 0.0 +185 98 training.label_smoothing 0.492339676039673 +185 99 model.embedding_dim 1.0 +185 99 optimizer.lr 0.004948178365235697 +185 99 training.batch_size 2.0 +185 99 training.label_smoothing 0.0456143758467064 +185 100 model.embedding_dim 1.0 +185 100 optimizer.lr 0.0022824765774010374 +185 100 training.batch_size 2.0 +185 100 training.label_smoothing 0.0012616278583339423 +185 1 dataset """kinships""" +185 1 model """distmult""" +185 1 loss """softplus""" +185 1 regularizer """no""" +185 1 optimizer """adam""" +185 1 training_loop """lcwa""" +185 1 evaluator """rankbased""" +185 2 dataset """kinships""" +185 2 model """distmult""" +185 2 loss """softplus""" +185 2 regularizer """no""" +185 2 optimizer """adam""" +185 2 training_loop """lcwa""" +185 2 evaluator """rankbased""" +185 3 dataset """kinships""" +185 3 model """distmult""" +185 3 loss """softplus""" +185 3 regularizer """no""" +185 3 optimizer """adam""" +185 3 training_loop """lcwa""" +185 3 evaluator """rankbased""" +185 4 dataset """kinships""" +185 4 model """distmult""" +185 4 loss """softplus""" +185 4 regularizer """no""" +185 4 optimizer """adam""" +185 4 training_loop """lcwa""" +185 4 evaluator """rankbased""" +185 5 dataset """kinships""" +185 5 model """distmult""" +185 5 loss """softplus""" +185 5 regularizer """no""" +185 5 optimizer """adam""" +185 5 training_loop """lcwa""" +185 5 evaluator """rankbased""" +185 6 dataset """kinships""" +185 6 model """distmult""" +185 6 loss """softplus""" +185 6 regularizer """no""" +185 6 optimizer """adam""" +185 6 training_loop """lcwa""" +185 6 evaluator """rankbased""" +185 7 dataset """kinships""" +185 7 model """distmult""" +185 7 loss """softplus""" +185 7 regularizer """no""" +185 7 optimizer """adam""" +185 7 training_loop """lcwa""" +185 7 evaluator """rankbased""" +185 8 dataset """kinships""" +185 8 model """distmult""" +185 8 loss """softplus""" +185 8 regularizer """no""" +185 8 optimizer """adam""" +185 8 training_loop """lcwa""" +185 8 evaluator """rankbased""" +185 9 dataset """kinships""" +185 9 model """distmult""" +185 9 loss """softplus""" +185 9 regularizer """no""" +185 9 optimizer """adam""" +185 9 training_loop """lcwa""" +185 9 evaluator """rankbased""" +185 10 dataset """kinships""" +185 10 model """distmult""" +185 10 loss """softplus""" +185 10 regularizer """no""" +185 10 optimizer """adam""" +185 10 training_loop """lcwa""" +185 10 evaluator """rankbased""" +185 11 dataset """kinships""" +185 11 model """distmult""" +185 11 loss """softplus""" +185 11 regularizer """no""" +185 11 optimizer """adam""" +185 11 training_loop """lcwa""" +185 11 evaluator """rankbased""" +185 12 dataset """kinships""" +185 12 model """distmult""" +185 12 loss """softplus""" +185 12 regularizer """no""" +185 12 optimizer """adam""" +185 12 training_loop """lcwa""" +185 12 evaluator """rankbased""" +185 13 dataset """kinships""" +185 13 model """distmult""" +185 13 loss """softplus""" +185 13 regularizer """no""" +185 13 optimizer """adam""" +185 13 training_loop """lcwa""" +185 13 evaluator """rankbased""" +185 14 dataset """kinships""" +185 14 model """distmult""" +185 14 loss """softplus""" +185 14 regularizer """no""" +185 14 optimizer """adam""" +185 14 training_loop """lcwa""" +185 14 evaluator """rankbased""" +185 15 dataset """kinships""" +185 15 model """distmult""" +185 15 loss """softplus""" +185 15 regularizer """no""" +185 15 optimizer """adam""" +185 15 training_loop """lcwa""" +185 15 evaluator """rankbased""" +185 16 dataset """kinships""" +185 16 model """distmult""" +185 16 loss """softplus""" +185 16 regularizer """no""" +185 16 optimizer """adam""" +185 16 training_loop """lcwa""" +185 16 evaluator """rankbased""" +185 17 dataset """kinships""" +185 17 model """distmult""" +185 17 loss """softplus""" +185 17 regularizer """no""" +185 17 optimizer """adam""" +185 17 training_loop """lcwa""" +185 17 evaluator """rankbased""" +185 18 dataset """kinships""" +185 18 model """distmult""" +185 18 loss """softplus""" +185 18 regularizer """no""" +185 18 optimizer """adam""" +185 18 training_loop """lcwa""" +185 18 evaluator """rankbased""" +185 19 dataset """kinships""" +185 19 model """distmult""" +185 19 loss """softplus""" +185 19 regularizer """no""" +185 19 optimizer """adam""" +185 19 training_loop """lcwa""" +185 19 evaluator """rankbased""" +185 20 dataset """kinships""" +185 20 model """distmult""" +185 20 loss """softplus""" +185 20 regularizer """no""" +185 20 optimizer """adam""" +185 20 training_loop """lcwa""" +185 20 evaluator """rankbased""" +185 21 dataset """kinships""" +185 21 model """distmult""" +185 21 loss """softplus""" +185 21 regularizer """no""" +185 21 optimizer """adam""" +185 21 training_loop """lcwa""" +185 21 evaluator """rankbased""" +185 22 dataset """kinships""" +185 22 model """distmult""" +185 22 loss """softplus""" +185 22 regularizer """no""" +185 22 optimizer """adam""" +185 22 training_loop """lcwa""" +185 22 evaluator """rankbased""" +185 23 dataset """kinships""" +185 23 model """distmult""" +185 23 loss """softplus""" +185 23 regularizer """no""" +185 23 optimizer """adam""" +185 23 training_loop """lcwa""" +185 23 evaluator """rankbased""" +185 24 dataset """kinships""" +185 24 model """distmult""" +185 24 loss """softplus""" +185 24 regularizer """no""" +185 24 optimizer """adam""" +185 24 training_loop """lcwa""" +185 24 evaluator """rankbased""" +185 25 dataset """kinships""" +185 25 model """distmult""" +185 25 loss """softplus""" +185 25 regularizer """no""" +185 25 optimizer """adam""" +185 25 training_loop """lcwa""" +185 25 evaluator """rankbased""" +185 26 dataset """kinships""" +185 26 model """distmult""" +185 26 loss """softplus""" +185 26 regularizer """no""" +185 26 optimizer """adam""" +185 26 training_loop """lcwa""" +185 26 evaluator """rankbased""" +185 27 dataset """kinships""" +185 27 model """distmult""" +185 27 loss """softplus""" +185 27 regularizer """no""" +185 27 optimizer """adam""" +185 27 training_loop """lcwa""" +185 27 evaluator """rankbased""" +185 28 dataset """kinships""" +185 28 model """distmult""" +185 28 loss """softplus""" +185 28 regularizer """no""" +185 28 optimizer """adam""" +185 28 training_loop """lcwa""" +185 28 evaluator """rankbased""" +185 29 dataset """kinships""" +185 29 model """distmult""" +185 29 loss """softplus""" +185 29 regularizer """no""" +185 29 optimizer """adam""" +185 29 training_loop """lcwa""" +185 29 evaluator """rankbased""" +185 30 dataset """kinships""" +185 30 model """distmult""" +185 30 loss """softplus""" +185 30 regularizer """no""" +185 30 optimizer """adam""" +185 30 training_loop """lcwa""" +185 30 evaluator """rankbased""" +185 31 dataset """kinships""" +185 31 model """distmult""" +185 31 loss """softplus""" +185 31 regularizer """no""" +185 31 optimizer """adam""" +185 31 training_loop """lcwa""" +185 31 evaluator """rankbased""" +185 32 dataset """kinships""" +185 32 model """distmult""" +185 32 loss """softplus""" +185 32 regularizer """no""" +185 32 optimizer """adam""" +185 32 training_loop """lcwa""" +185 32 evaluator """rankbased""" +185 33 dataset """kinships""" +185 33 model """distmult""" +185 33 loss """softplus""" +185 33 regularizer """no""" +185 33 optimizer """adam""" +185 33 training_loop """lcwa""" +185 33 evaluator """rankbased""" +185 34 dataset """kinships""" +185 34 model """distmult""" +185 34 loss """softplus""" +185 34 regularizer """no""" +185 34 optimizer """adam""" +185 34 training_loop """lcwa""" +185 34 evaluator """rankbased""" +185 35 dataset """kinships""" +185 35 model """distmult""" +185 35 loss """softplus""" +185 35 regularizer """no""" +185 35 optimizer """adam""" +185 35 training_loop """lcwa""" +185 35 evaluator """rankbased""" +185 36 dataset """kinships""" +185 36 model """distmult""" +185 36 loss """softplus""" +185 36 regularizer """no""" +185 36 optimizer """adam""" +185 36 training_loop """lcwa""" +185 36 evaluator """rankbased""" +185 37 dataset """kinships""" +185 37 model """distmult""" +185 37 loss """softplus""" +185 37 regularizer """no""" +185 37 optimizer """adam""" +185 37 training_loop """lcwa""" +185 37 evaluator """rankbased""" +185 38 dataset """kinships""" +185 38 model """distmult""" +185 38 loss """softplus""" +185 38 regularizer """no""" +185 38 optimizer """adam""" +185 38 training_loop """lcwa""" +185 38 evaluator """rankbased""" +185 39 dataset """kinships""" +185 39 model """distmult""" +185 39 loss """softplus""" +185 39 regularizer """no""" +185 39 optimizer """adam""" +185 39 training_loop """lcwa""" +185 39 evaluator """rankbased""" +185 40 dataset """kinships""" +185 40 model """distmult""" +185 40 loss """softplus""" +185 40 regularizer """no""" +185 40 optimizer """adam""" +185 40 training_loop """lcwa""" +185 40 evaluator """rankbased""" +185 41 dataset """kinships""" +185 41 model """distmult""" +185 41 loss """softplus""" +185 41 regularizer """no""" +185 41 optimizer """adam""" +185 41 training_loop """lcwa""" +185 41 evaluator """rankbased""" +185 42 dataset """kinships""" +185 42 model """distmult""" +185 42 loss """softplus""" +185 42 regularizer """no""" +185 42 optimizer """adam""" +185 42 training_loop """lcwa""" +185 42 evaluator """rankbased""" +185 43 dataset """kinships""" +185 43 model """distmult""" +185 43 loss """softplus""" +185 43 regularizer """no""" +185 43 optimizer """adam""" +185 43 training_loop """lcwa""" +185 43 evaluator """rankbased""" +185 44 dataset """kinships""" +185 44 model """distmult""" +185 44 loss """softplus""" +185 44 regularizer """no""" +185 44 optimizer """adam""" +185 44 training_loop """lcwa""" +185 44 evaluator """rankbased""" +185 45 dataset """kinships""" +185 45 model """distmult""" +185 45 loss """softplus""" +185 45 regularizer """no""" +185 45 optimizer """adam""" +185 45 training_loop """lcwa""" +185 45 evaluator """rankbased""" +185 46 dataset """kinships""" +185 46 model """distmult""" +185 46 loss """softplus""" +185 46 regularizer """no""" +185 46 optimizer """adam""" +185 46 training_loop """lcwa""" +185 46 evaluator """rankbased""" +185 47 dataset """kinships""" +185 47 model """distmult""" +185 47 loss """softplus""" +185 47 regularizer """no""" +185 47 optimizer """adam""" +185 47 training_loop """lcwa""" +185 47 evaluator """rankbased""" +185 48 dataset """kinships""" +185 48 model """distmult""" +185 48 loss """softplus""" +185 48 regularizer """no""" +185 48 optimizer """adam""" +185 48 training_loop """lcwa""" +185 48 evaluator """rankbased""" +185 49 dataset """kinships""" +185 49 model """distmult""" +185 49 loss """softplus""" +185 49 regularizer """no""" +185 49 optimizer """adam""" +185 49 training_loop """lcwa""" +185 49 evaluator """rankbased""" +185 50 dataset """kinships""" +185 50 model """distmult""" +185 50 loss """softplus""" +185 50 regularizer """no""" +185 50 optimizer """adam""" +185 50 training_loop """lcwa""" +185 50 evaluator """rankbased""" +185 51 dataset """kinships""" +185 51 model """distmult""" +185 51 loss """softplus""" +185 51 regularizer """no""" +185 51 optimizer """adam""" +185 51 training_loop """lcwa""" +185 51 evaluator """rankbased""" +185 52 dataset """kinships""" +185 52 model """distmult""" +185 52 loss """softplus""" +185 52 regularizer """no""" +185 52 optimizer """adam""" +185 52 training_loop """lcwa""" +185 52 evaluator """rankbased""" +185 53 dataset """kinships""" +185 53 model """distmult""" +185 53 loss """softplus""" +185 53 regularizer """no""" +185 53 optimizer """adam""" +185 53 training_loop """lcwa""" +185 53 evaluator """rankbased""" +185 54 dataset """kinships""" +185 54 model """distmult""" +185 54 loss """softplus""" +185 54 regularizer """no""" +185 54 optimizer """adam""" +185 54 training_loop """lcwa""" +185 54 evaluator """rankbased""" +185 55 dataset """kinships""" +185 55 model """distmult""" +185 55 loss """softplus""" +185 55 regularizer """no""" +185 55 optimizer """adam""" +185 55 training_loop """lcwa""" +185 55 evaluator """rankbased""" +185 56 dataset """kinships""" +185 56 model """distmult""" +185 56 loss """softplus""" +185 56 regularizer """no""" +185 56 optimizer """adam""" +185 56 training_loop """lcwa""" +185 56 evaluator """rankbased""" +185 57 dataset """kinships""" +185 57 model """distmult""" +185 57 loss """softplus""" +185 57 regularizer """no""" +185 57 optimizer """adam""" +185 57 training_loop """lcwa""" +185 57 evaluator """rankbased""" +185 58 dataset """kinships""" +185 58 model """distmult""" +185 58 loss """softplus""" +185 58 regularizer """no""" +185 58 optimizer """adam""" +185 58 training_loop """lcwa""" +185 58 evaluator """rankbased""" +185 59 dataset """kinships""" +185 59 model """distmult""" +185 59 loss """softplus""" +185 59 regularizer """no""" +185 59 optimizer """adam""" +185 59 training_loop """lcwa""" +185 59 evaluator """rankbased""" +185 60 dataset """kinships""" +185 60 model """distmult""" +185 60 loss """softplus""" +185 60 regularizer """no""" +185 60 optimizer """adam""" +185 60 training_loop """lcwa""" +185 60 evaluator """rankbased""" +185 61 dataset """kinships""" +185 61 model """distmult""" +185 61 loss """softplus""" +185 61 regularizer """no""" +185 61 optimizer """adam""" +185 61 training_loop """lcwa""" +185 61 evaluator """rankbased""" +185 62 dataset """kinships""" +185 62 model """distmult""" +185 62 loss """softplus""" +185 62 regularizer """no""" +185 62 optimizer """adam""" +185 62 training_loop """lcwa""" +185 62 evaluator """rankbased""" +185 63 dataset """kinships""" +185 63 model """distmult""" +185 63 loss """softplus""" +185 63 regularizer """no""" +185 63 optimizer """adam""" +185 63 training_loop """lcwa""" +185 63 evaluator """rankbased""" +185 64 dataset """kinships""" +185 64 model """distmult""" +185 64 loss """softplus""" +185 64 regularizer """no""" +185 64 optimizer """adam""" +185 64 training_loop """lcwa""" +185 64 evaluator """rankbased""" +185 65 dataset """kinships""" +185 65 model """distmult""" +185 65 loss """softplus""" +185 65 regularizer """no""" +185 65 optimizer """adam""" +185 65 training_loop """lcwa""" +185 65 evaluator """rankbased""" +185 66 dataset """kinships""" +185 66 model """distmult""" +185 66 loss """softplus""" +185 66 regularizer """no""" +185 66 optimizer """adam""" +185 66 training_loop """lcwa""" +185 66 evaluator """rankbased""" +185 67 dataset """kinships""" +185 67 model """distmult""" +185 67 loss """softplus""" +185 67 regularizer """no""" +185 67 optimizer """adam""" +185 67 training_loop """lcwa""" +185 67 evaluator """rankbased""" +185 68 dataset """kinships""" +185 68 model """distmult""" +185 68 loss """softplus""" +185 68 regularizer """no""" +185 68 optimizer """adam""" +185 68 training_loop """lcwa""" +185 68 evaluator """rankbased""" +185 69 dataset """kinships""" +185 69 model """distmult""" +185 69 loss """softplus""" +185 69 regularizer """no""" +185 69 optimizer """adam""" +185 69 training_loop """lcwa""" +185 69 evaluator """rankbased""" +185 70 dataset """kinships""" +185 70 model """distmult""" +185 70 loss """softplus""" +185 70 regularizer """no""" +185 70 optimizer """adam""" +185 70 training_loop """lcwa""" +185 70 evaluator """rankbased""" +185 71 dataset """kinships""" +185 71 model """distmult""" +185 71 loss """softplus""" +185 71 regularizer """no""" +185 71 optimizer """adam""" +185 71 training_loop """lcwa""" +185 71 evaluator """rankbased""" +185 72 dataset """kinships""" +185 72 model """distmult""" +185 72 loss """softplus""" +185 72 regularizer """no""" +185 72 optimizer """adam""" +185 72 training_loop """lcwa""" +185 72 evaluator """rankbased""" +185 73 dataset """kinships""" +185 73 model """distmult""" +185 73 loss """softplus""" +185 73 regularizer """no""" +185 73 optimizer """adam""" +185 73 training_loop """lcwa""" +185 73 evaluator """rankbased""" +185 74 dataset """kinships""" +185 74 model """distmult""" +185 74 loss """softplus""" +185 74 regularizer """no""" +185 74 optimizer """adam""" +185 74 training_loop """lcwa""" +185 74 evaluator """rankbased""" +185 75 dataset """kinships""" +185 75 model """distmult""" +185 75 loss """softplus""" +185 75 regularizer """no""" +185 75 optimizer """adam""" +185 75 training_loop """lcwa""" +185 75 evaluator """rankbased""" +185 76 dataset """kinships""" +185 76 model """distmult""" +185 76 loss """softplus""" +185 76 regularizer """no""" +185 76 optimizer """adam""" +185 76 training_loop """lcwa""" +185 76 evaluator """rankbased""" +185 77 dataset """kinships""" +185 77 model """distmult""" +185 77 loss """softplus""" +185 77 regularizer """no""" +185 77 optimizer """adam""" +185 77 training_loop """lcwa""" +185 77 evaluator """rankbased""" +185 78 dataset """kinships""" +185 78 model """distmult""" +185 78 loss """softplus""" +185 78 regularizer """no""" +185 78 optimizer """adam""" +185 78 training_loop """lcwa""" +185 78 evaluator """rankbased""" +185 79 dataset """kinships""" +185 79 model """distmult""" +185 79 loss """softplus""" +185 79 regularizer """no""" +185 79 optimizer """adam""" +185 79 training_loop """lcwa""" +185 79 evaluator """rankbased""" +185 80 dataset """kinships""" +185 80 model """distmult""" +185 80 loss """softplus""" +185 80 regularizer """no""" +185 80 optimizer """adam""" +185 80 training_loop """lcwa""" +185 80 evaluator """rankbased""" +185 81 dataset """kinships""" +185 81 model """distmult""" +185 81 loss """softplus""" +185 81 regularizer """no""" +185 81 optimizer """adam""" +185 81 training_loop """lcwa""" +185 81 evaluator """rankbased""" +185 82 dataset """kinships""" +185 82 model """distmult""" +185 82 loss """softplus""" +185 82 regularizer """no""" +185 82 optimizer """adam""" +185 82 training_loop """lcwa""" +185 82 evaluator """rankbased""" +185 83 dataset """kinships""" +185 83 model """distmult""" +185 83 loss """softplus""" +185 83 regularizer """no""" +185 83 optimizer """adam""" +185 83 training_loop """lcwa""" +185 83 evaluator """rankbased""" +185 84 dataset """kinships""" +185 84 model """distmult""" +185 84 loss """softplus""" +185 84 regularizer """no""" +185 84 optimizer """adam""" +185 84 training_loop """lcwa""" +185 84 evaluator """rankbased""" +185 85 dataset """kinships""" +185 85 model """distmult""" +185 85 loss """softplus""" +185 85 regularizer """no""" +185 85 optimizer """adam""" +185 85 training_loop """lcwa""" +185 85 evaluator """rankbased""" +185 86 dataset """kinships""" +185 86 model """distmult""" +185 86 loss """softplus""" +185 86 regularizer """no""" +185 86 optimizer """adam""" +185 86 training_loop """lcwa""" +185 86 evaluator """rankbased""" +185 87 dataset """kinships""" +185 87 model """distmult""" +185 87 loss """softplus""" +185 87 regularizer """no""" +185 87 optimizer """adam""" +185 87 training_loop """lcwa""" +185 87 evaluator """rankbased""" +185 88 dataset """kinships""" +185 88 model """distmult""" +185 88 loss """softplus""" +185 88 regularizer """no""" +185 88 optimizer """adam""" +185 88 training_loop """lcwa""" +185 88 evaluator """rankbased""" +185 89 dataset """kinships""" +185 89 model """distmult""" +185 89 loss """softplus""" +185 89 regularizer """no""" +185 89 optimizer """adam""" +185 89 training_loop """lcwa""" +185 89 evaluator """rankbased""" +185 90 dataset """kinships""" +185 90 model """distmult""" +185 90 loss """softplus""" +185 90 regularizer """no""" +185 90 optimizer """adam""" +185 90 training_loop """lcwa""" +185 90 evaluator """rankbased""" +185 91 dataset """kinships""" +185 91 model """distmult""" +185 91 loss """softplus""" +185 91 regularizer """no""" +185 91 optimizer """adam""" +185 91 training_loop """lcwa""" +185 91 evaluator """rankbased""" +185 92 dataset """kinships""" +185 92 model """distmult""" +185 92 loss """softplus""" +185 92 regularizer """no""" +185 92 optimizer """adam""" +185 92 training_loop """lcwa""" +185 92 evaluator """rankbased""" +185 93 dataset """kinships""" +185 93 model """distmult""" +185 93 loss """softplus""" +185 93 regularizer """no""" +185 93 optimizer """adam""" +185 93 training_loop """lcwa""" +185 93 evaluator """rankbased""" +185 94 dataset """kinships""" +185 94 model """distmult""" +185 94 loss """softplus""" +185 94 regularizer """no""" +185 94 optimizer """adam""" +185 94 training_loop """lcwa""" +185 94 evaluator """rankbased""" +185 95 dataset """kinships""" +185 95 model """distmult""" +185 95 loss """softplus""" +185 95 regularizer """no""" +185 95 optimizer """adam""" +185 95 training_loop """lcwa""" +185 95 evaluator """rankbased""" +185 96 dataset """kinships""" +185 96 model """distmult""" +185 96 loss """softplus""" +185 96 regularizer """no""" +185 96 optimizer """adam""" +185 96 training_loop """lcwa""" +185 96 evaluator """rankbased""" +185 97 dataset """kinships""" +185 97 model """distmult""" +185 97 loss """softplus""" +185 97 regularizer """no""" +185 97 optimizer """adam""" +185 97 training_loop """lcwa""" +185 97 evaluator """rankbased""" +185 98 dataset """kinships""" +185 98 model """distmult""" +185 98 loss """softplus""" +185 98 regularizer """no""" +185 98 optimizer """adam""" +185 98 training_loop """lcwa""" +185 98 evaluator """rankbased""" +185 99 dataset """kinships""" +185 99 model """distmult""" +185 99 loss """softplus""" +185 99 regularizer """no""" +185 99 optimizer """adam""" +185 99 training_loop """lcwa""" +185 99 evaluator """rankbased""" +185 100 dataset """kinships""" +185 100 model """distmult""" +185 100 loss """softplus""" +185 100 regularizer """no""" +185 100 optimizer """adam""" +185 100 training_loop """lcwa""" +185 100 evaluator """rankbased""" +186 1 model.embedding_dim 0.0 +186 1 optimizer.lr 0.05343985479089482 +186 1 training.batch_size 2.0 +186 1 training.label_smoothing 0.0856154447110256 +186 2 model.embedding_dim 1.0 +186 2 optimizer.lr 0.0029441281600890664 +186 2 training.batch_size 0.0 +186 2 training.label_smoothing 0.0035605133286678 +186 3 model.embedding_dim 1.0 +186 3 optimizer.lr 0.0955272006690856 +186 3 training.batch_size 0.0 +186 3 training.label_smoothing 0.04816718960062814 +186 4 model.embedding_dim 0.0 +186 4 optimizer.lr 0.08309185187085992 +186 4 training.batch_size 0.0 +186 4 training.label_smoothing 0.02651719512259978 +186 5 model.embedding_dim 0.0 +186 5 optimizer.lr 0.0013483594335877598 +186 5 training.batch_size 1.0 +186 5 training.label_smoothing 0.009729705507476441 +186 6 model.embedding_dim 0.0 +186 6 optimizer.lr 0.018855747413265518 +186 6 training.batch_size 0.0 +186 6 training.label_smoothing 0.01032681000862212 +186 7 model.embedding_dim 0.0 +186 7 optimizer.lr 0.09200409080900883 +186 7 training.batch_size 0.0 +186 7 training.label_smoothing 0.24837731071407168 +186 8 model.embedding_dim 0.0 +186 8 optimizer.lr 0.0012606207733738722 +186 8 training.batch_size 1.0 +186 8 training.label_smoothing 0.208349991125104 +186 9 model.embedding_dim 1.0 +186 9 optimizer.lr 0.018997804154399053 +186 9 training.batch_size 1.0 +186 9 training.label_smoothing 0.00254671094684188 +186 10 model.embedding_dim 1.0 +186 10 optimizer.lr 0.0558517488222573 +186 10 training.batch_size 2.0 +186 10 training.label_smoothing 0.014572845203202288 +186 11 model.embedding_dim 1.0 +186 11 optimizer.lr 0.012121832062297846 +186 11 training.batch_size 1.0 +186 11 training.label_smoothing 0.004642839962770149 +186 12 model.embedding_dim 2.0 +186 12 optimizer.lr 0.08448693365926986 +186 12 training.batch_size 1.0 +186 12 training.label_smoothing 0.18206610944973634 +186 13 model.embedding_dim 2.0 +186 13 optimizer.lr 0.08310159232351277 +186 13 training.batch_size 0.0 +186 13 training.label_smoothing 0.17406830192201744 +186 14 model.embedding_dim 0.0 +186 14 optimizer.lr 0.0018350105080167918 +186 14 training.batch_size 2.0 +186 14 training.label_smoothing 0.007174563481155823 +186 15 model.embedding_dim 1.0 +186 15 optimizer.lr 0.035671460528282094 +186 15 training.batch_size 1.0 +186 15 training.label_smoothing 0.8909084050885039 +186 16 model.embedding_dim 0.0 +186 16 optimizer.lr 0.0905421495678761 +186 16 training.batch_size 2.0 +186 16 training.label_smoothing 0.0037823830065146892 +186 17 model.embedding_dim 1.0 +186 17 optimizer.lr 0.0014907705313027602 +186 17 training.batch_size 0.0 +186 17 training.label_smoothing 0.07373309859820913 +186 18 model.embedding_dim 0.0 +186 18 optimizer.lr 0.010482157775761869 +186 18 training.batch_size 1.0 +186 18 training.label_smoothing 0.004238728440650191 +186 19 model.embedding_dim 0.0 +186 19 optimizer.lr 0.0026892837309568235 +186 19 training.batch_size 1.0 +186 19 training.label_smoothing 0.0053342013496956225 +186 20 model.embedding_dim 2.0 +186 20 optimizer.lr 0.024834556302530644 +186 20 training.batch_size 1.0 +186 20 training.label_smoothing 0.0028371018139487897 +186 21 model.embedding_dim 2.0 +186 21 optimizer.lr 0.05197219724169164 +186 21 training.batch_size 1.0 +186 21 training.label_smoothing 0.28881240254437696 +186 22 model.embedding_dim 0.0 +186 22 optimizer.lr 0.005316959996171091 +186 22 training.batch_size 2.0 +186 22 training.label_smoothing 0.8427012146346576 +186 23 model.embedding_dim 2.0 +186 23 optimizer.lr 0.03619148757465163 +186 23 training.batch_size 0.0 +186 23 training.label_smoothing 0.0013129459098663954 +186 24 model.embedding_dim 1.0 +186 24 optimizer.lr 0.030037550525505666 +186 24 training.batch_size 0.0 +186 24 training.label_smoothing 0.030407453760698203 +186 25 model.embedding_dim 0.0 +186 25 optimizer.lr 0.07540545971158889 +186 25 training.batch_size 2.0 +186 25 training.label_smoothing 0.008198559312366984 +186 26 model.embedding_dim 2.0 +186 26 optimizer.lr 0.020598433535334047 +186 26 training.batch_size 0.0 +186 26 training.label_smoothing 0.11408447807915653 +186 27 model.embedding_dim 2.0 +186 27 optimizer.lr 0.004949749025866382 +186 27 training.batch_size 2.0 +186 27 training.label_smoothing 0.0032209744869801634 +186 28 model.embedding_dim 1.0 +186 28 optimizer.lr 0.0378930274102213 +186 28 training.batch_size 0.0 +186 28 training.label_smoothing 0.5008689125547167 +186 29 model.embedding_dim 1.0 +186 29 optimizer.lr 0.0010209845565084881 +186 29 training.batch_size 0.0 +186 29 training.label_smoothing 0.001514090062699849 +186 30 model.embedding_dim 2.0 +186 30 optimizer.lr 0.07640877351698307 +186 30 training.batch_size 0.0 +186 30 training.label_smoothing 0.038556564773513255 +186 31 model.embedding_dim 0.0 +186 31 optimizer.lr 0.020392768889541966 +186 31 training.batch_size 2.0 +186 31 training.label_smoothing 0.0027485447567657485 +186 32 model.embedding_dim 2.0 +186 32 optimizer.lr 0.04000418250580103 +186 32 training.batch_size 2.0 +186 32 training.label_smoothing 0.2568217440591756 +186 33 model.embedding_dim 1.0 +186 33 optimizer.lr 0.01149462949816775 +186 33 training.batch_size 1.0 +186 33 training.label_smoothing 0.0058574537240058695 +186 34 model.embedding_dim 1.0 +186 34 optimizer.lr 0.002753919195919833 +186 34 training.batch_size 2.0 +186 34 training.label_smoothing 0.0012834871278039868 +186 35 model.embedding_dim 2.0 +186 35 optimizer.lr 0.0038935197274999104 +186 35 training.batch_size 2.0 +186 35 training.label_smoothing 0.004299574506389405 +186 36 model.embedding_dim 2.0 +186 36 optimizer.lr 0.049002034509470396 +186 36 training.batch_size 1.0 +186 36 training.label_smoothing 0.003775364655313997 +186 37 model.embedding_dim 0.0 +186 37 optimizer.lr 0.002732805052947462 +186 37 training.batch_size 1.0 +186 37 training.label_smoothing 0.2440178026831684 +186 38 model.embedding_dim 2.0 +186 38 optimizer.lr 0.05613458080602471 +186 38 training.batch_size 2.0 +186 38 training.label_smoothing 0.001589586722056567 +186 39 model.embedding_dim 2.0 +186 39 optimizer.lr 0.041589538275995645 +186 39 training.batch_size 2.0 +186 39 training.label_smoothing 0.022334306325535326 +186 40 model.embedding_dim 0.0 +186 40 optimizer.lr 0.0026726461905705714 +186 40 training.batch_size 0.0 +186 40 training.label_smoothing 0.008970412047598806 +186 41 model.embedding_dim 2.0 +186 41 optimizer.lr 0.006480674332823088 +186 41 training.batch_size 2.0 +186 41 training.label_smoothing 0.03815117864965675 +186 42 model.embedding_dim 0.0 +186 42 optimizer.lr 0.0044045026302746764 +186 42 training.batch_size 1.0 +186 42 training.label_smoothing 0.5166538043776557 +186 43 model.embedding_dim 0.0 +186 43 optimizer.lr 0.00501279269180562 +186 43 training.batch_size 0.0 +186 43 training.label_smoothing 0.27225195291446136 +186 44 model.embedding_dim 1.0 +186 44 optimizer.lr 0.002311299916188818 +186 44 training.batch_size 2.0 +186 44 training.label_smoothing 0.014483994531472087 +186 45 model.embedding_dim 0.0 +186 45 optimizer.lr 0.017932633100925147 +186 45 training.batch_size 1.0 +186 45 training.label_smoothing 0.006403179107312727 +186 46 model.embedding_dim 0.0 +186 46 optimizer.lr 0.0016224367374401707 +186 46 training.batch_size 2.0 +186 46 training.label_smoothing 0.058853542336002575 +186 47 model.embedding_dim 2.0 +186 47 optimizer.lr 0.034133771526694334 +186 47 training.batch_size 0.0 +186 47 training.label_smoothing 0.25444034720245134 +186 48 model.embedding_dim 0.0 +186 48 optimizer.lr 0.0037310985401905715 +186 48 training.batch_size 1.0 +186 48 training.label_smoothing 0.14645323262060803 +186 49 model.embedding_dim 1.0 +186 49 optimizer.lr 0.004435120172668186 +186 49 training.batch_size 2.0 +186 49 training.label_smoothing 0.001206007277001615 +186 50 model.embedding_dim 2.0 +186 50 optimizer.lr 0.008728810035087232 +186 50 training.batch_size 2.0 +186 50 training.label_smoothing 0.055025857505781414 +186 51 model.embedding_dim 1.0 +186 51 optimizer.lr 0.03045273851052758 +186 51 training.batch_size 2.0 +186 51 training.label_smoothing 0.00600890586618619 +186 52 model.embedding_dim 2.0 +186 52 optimizer.lr 0.003897341319115781 +186 52 training.batch_size 1.0 +186 52 training.label_smoothing 0.048758507909943856 +186 53 model.embedding_dim 2.0 +186 53 optimizer.lr 0.03733305813607409 +186 53 training.batch_size 1.0 +186 53 training.label_smoothing 0.006554268694761674 +186 54 model.embedding_dim 0.0 +186 54 optimizer.lr 0.028303859277792234 +186 54 training.batch_size 0.0 +186 54 training.label_smoothing 0.0010568928302644748 +186 55 model.embedding_dim 1.0 +186 55 optimizer.lr 0.006374167841250546 +186 55 training.batch_size 1.0 +186 55 training.label_smoothing 0.0012158441381581193 +186 56 model.embedding_dim 2.0 +186 56 optimizer.lr 0.008874002648381821 +186 56 training.batch_size 0.0 +186 56 training.label_smoothing 0.006285188761202917 +186 57 model.embedding_dim 1.0 +186 57 optimizer.lr 0.01039038908448423 +186 57 training.batch_size 1.0 +186 57 training.label_smoothing 0.0013465741215382301 +186 58 model.embedding_dim 0.0 +186 58 optimizer.lr 0.005210261245218008 +186 58 training.batch_size 1.0 +186 58 training.label_smoothing 0.2092258482483199 +186 59 model.embedding_dim 0.0 +186 59 optimizer.lr 0.0036669937532653706 +186 59 training.batch_size 2.0 +186 59 training.label_smoothing 0.47027006129522364 +186 60 model.embedding_dim 1.0 +186 60 optimizer.lr 0.0038636352785411344 +186 60 training.batch_size 0.0 +186 60 training.label_smoothing 0.44106373522390513 +186 61 model.embedding_dim 2.0 +186 61 optimizer.lr 0.05006132824733703 +186 61 training.batch_size 1.0 +186 61 training.label_smoothing 0.09470965971842663 +186 62 model.embedding_dim 0.0 +186 62 optimizer.lr 0.015653582345970397 +186 62 training.batch_size 0.0 +186 62 training.label_smoothing 0.0012767149200502577 +186 63 model.embedding_dim 1.0 +186 63 optimizer.lr 0.008729060977605282 +186 63 training.batch_size 0.0 +186 63 training.label_smoothing 0.6286220510649188 +186 64 model.embedding_dim 2.0 +186 64 optimizer.lr 0.004642421526511973 +186 64 training.batch_size 0.0 +186 64 training.label_smoothing 0.41095782348997306 +186 65 model.embedding_dim 1.0 +186 65 optimizer.lr 0.030829618370117228 +186 65 training.batch_size 2.0 +186 65 training.label_smoothing 0.3295994791130572 +186 66 model.embedding_dim 2.0 +186 66 optimizer.lr 0.011105408327387935 +186 66 training.batch_size 1.0 +186 66 training.label_smoothing 0.8796161281789182 +186 67 model.embedding_dim 2.0 +186 67 optimizer.lr 0.004322260516375169 +186 67 training.batch_size 2.0 +186 67 training.label_smoothing 0.6581134670232325 +186 68 model.embedding_dim 0.0 +186 68 optimizer.lr 0.0010756715600199111 +186 68 training.batch_size 2.0 +186 68 training.label_smoothing 0.09821661448489345 +186 69 model.embedding_dim 0.0 +186 69 optimizer.lr 0.0418847607600406 +186 69 training.batch_size 0.0 +186 69 training.label_smoothing 0.2812053561120882 +186 70 model.embedding_dim 0.0 +186 70 optimizer.lr 0.017942353627885654 +186 70 training.batch_size 1.0 +186 70 training.label_smoothing 0.4098899474464585 +186 71 model.embedding_dim 1.0 +186 71 optimizer.lr 0.011049437861988221 +186 71 training.batch_size 0.0 +186 71 training.label_smoothing 0.020251744258838522 +186 72 model.embedding_dim 1.0 +186 72 optimizer.lr 0.005304124540451156 +186 72 training.batch_size 1.0 +186 72 training.label_smoothing 0.021571898276486506 +186 73 model.embedding_dim 1.0 +186 73 optimizer.lr 0.0010002452617914395 +186 73 training.batch_size 2.0 +186 73 training.label_smoothing 0.00618009701149977 +186 74 model.embedding_dim 0.0 +186 74 optimizer.lr 0.0012533797512801579 +186 74 training.batch_size 0.0 +186 74 training.label_smoothing 0.5633000681720192 +186 75 model.embedding_dim 0.0 +186 75 optimizer.lr 0.006026359572302001 +186 75 training.batch_size 0.0 +186 75 training.label_smoothing 0.07917785234168946 +186 76 model.embedding_dim 1.0 +186 76 optimizer.lr 0.008051485651368324 +186 76 training.batch_size 1.0 +186 76 training.label_smoothing 0.0013707801896812 +186 77 model.embedding_dim 1.0 +186 77 optimizer.lr 0.0030203283269146304 +186 77 training.batch_size 0.0 +186 77 training.label_smoothing 0.014340954454218085 +186 78 model.embedding_dim 2.0 +186 78 optimizer.lr 0.0031876971838381246 +186 78 training.batch_size 0.0 +186 78 training.label_smoothing 0.05504110005776858 +186 79 model.embedding_dim 0.0 +186 79 optimizer.lr 0.01885060994845204 +186 79 training.batch_size 0.0 +186 79 training.label_smoothing 0.03805534562663299 +186 80 model.embedding_dim 1.0 +186 80 optimizer.lr 0.023963004560920678 +186 80 training.batch_size 0.0 +186 80 training.label_smoothing 0.027868398549190827 +186 81 model.embedding_dim 0.0 +186 81 optimizer.lr 0.0031588221241784275 +186 81 training.batch_size 1.0 +186 81 training.label_smoothing 0.8239065839055449 +186 82 model.embedding_dim 0.0 +186 82 optimizer.lr 0.07646346673353958 +186 82 training.batch_size 2.0 +186 82 training.label_smoothing 0.3334087576490427 +186 83 model.embedding_dim 1.0 +186 83 optimizer.lr 0.001647229369035381 +186 83 training.batch_size 2.0 +186 83 training.label_smoothing 0.0066491992914491736 +186 84 model.embedding_dim 2.0 +186 84 optimizer.lr 0.00100215153484392 +186 84 training.batch_size 2.0 +186 84 training.label_smoothing 0.6945137792249026 +186 85 model.embedding_dim 1.0 +186 85 optimizer.lr 0.049091849755689754 +186 85 training.batch_size 2.0 +186 85 training.label_smoothing 0.015566682564254648 +186 86 model.embedding_dim 2.0 +186 86 optimizer.lr 0.002009091543201049 +186 86 training.batch_size 2.0 +186 86 training.label_smoothing 0.4009762025425667 +186 87 model.embedding_dim 0.0 +186 87 optimizer.lr 0.00780255231464349 +186 87 training.batch_size 0.0 +186 87 training.label_smoothing 0.4934039318483208 +186 88 model.embedding_dim 1.0 +186 88 optimizer.lr 0.037504413968337834 +186 88 training.batch_size 2.0 +186 88 training.label_smoothing 0.001001911625215728 +186 89 model.embedding_dim 1.0 +186 89 optimizer.lr 0.04300227896511245 +186 89 training.batch_size 2.0 +186 89 training.label_smoothing 0.005291248636871144 +186 90 model.embedding_dim 1.0 +186 90 optimizer.lr 0.011608201461708257 +186 90 training.batch_size 0.0 +186 90 training.label_smoothing 0.10754755289256185 +186 91 model.embedding_dim 2.0 +186 91 optimizer.lr 0.01797530443765589 +186 91 training.batch_size 2.0 +186 91 training.label_smoothing 0.007578761562441138 +186 92 model.embedding_dim 1.0 +186 92 optimizer.lr 0.010979927081045612 +186 92 training.batch_size 1.0 +186 92 training.label_smoothing 0.010036152111020092 +186 93 model.embedding_dim 0.0 +186 93 optimizer.lr 0.008593945140730365 +186 93 training.batch_size 0.0 +186 93 training.label_smoothing 0.0015769090085161542 +186 94 model.embedding_dim 2.0 +186 94 optimizer.lr 0.059874339303956255 +186 94 training.batch_size 0.0 +186 94 training.label_smoothing 0.007144657629958978 +186 95 model.embedding_dim 1.0 +186 95 optimizer.lr 0.01707116622839222 +186 95 training.batch_size 1.0 +186 95 training.label_smoothing 0.09300629776925069 +186 96 model.embedding_dim 1.0 +186 96 optimizer.lr 0.028336068340447825 +186 96 training.batch_size 2.0 +186 96 training.label_smoothing 0.002726760158010854 +186 97 model.embedding_dim 1.0 +186 97 optimizer.lr 0.016719130136081332 +186 97 training.batch_size 1.0 +186 97 training.label_smoothing 0.013501658634267786 +186 98 model.embedding_dim 1.0 +186 98 optimizer.lr 0.01706089232590218 +186 98 training.batch_size 2.0 +186 98 training.label_smoothing 0.1790802200673062 +186 99 model.embedding_dim 1.0 +186 99 optimizer.lr 0.00315784613887812 +186 99 training.batch_size 2.0 +186 99 training.label_smoothing 0.0019966196128335624 +186 100 model.embedding_dim 1.0 +186 100 optimizer.lr 0.01697669971333795 +186 100 training.batch_size 0.0 +186 100 training.label_smoothing 0.027219798649914816 +186 1 dataset """kinships""" +186 1 model """distmult""" +186 1 loss """crossentropy""" +186 1 regularizer """no""" +186 1 optimizer """adam""" +186 1 training_loop """lcwa""" +186 1 evaluator """rankbased""" +186 2 dataset """kinships""" +186 2 model """distmult""" +186 2 loss """crossentropy""" +186 2 regularizer """no""" +186 2 optimizer """adam""" +186 2 training_loop """lcwa""" +186 2 evaluator """rankbased""" +186 3 dataset """kinships""" +186 3 model """distmult""" +186 3 loss """crossentropy""" +186 3 regularizer """no""" +186 3 optimizer """adam""" +186 3 training_loop """lcwa""" +186 3 evaluator """rankbased""" +186 4 dataset """kinships""" +186 4 model """distmult""" +186 4 loss """crossentropy""" +186 4 regularizer """no""" +186 4 optimizer """adam""" +186 4 training_loop """lcwa""" +186 4 evaluator """rankbased""" +186 5 dataset """kinships""" +186 5 model """distmult""" +186 5 loss """crossentropy""" +186 5 regularizer """no""" +186 5 optimizer """adam""" +186 5 training_loop """lcwa""" +186 5 evaluator """rankbased""" +186 6 dataset """kinships""" +186 6 model """distmult""" +186 6 loss """crossentropy""" +186 6 regularizer """no""" +186 6 optimizer """adam""" +186 6 training_loop """lcwa""" +186 6 evaluator """rankbased""" +186 7 dataset """kinships""" +186 7 model """distmult""" +186 7 loss """crossentropy""" +186 7 regularizer """no""" +186 7 optimizer """adam""" +186 7 training_loop """lcwa""" +186 7 evaluator """rankbased""" +186 8 dataset """kinships""" +186 8 model """distmult""" +186 8 loss """crossentropy""" +186 8 regularizer """no""" +186 8 optimizer """adam""" +186 8 training_loop """lcwa""" +186 8 evaluator """rankbased""" +186 9 dataset """kinships""" +186 9 model """distmult""" +186 9 loss """crossentropy""" +186 9 regularizer """no""" +186 9 optimizer """adam""" +186 9 training_loop """lcwa""" +186 9 evaluator """rankbased""" +186 10 dataset """kinships""" +186 10 model """distmult""" +186 10 loss """crossentropy""" +186 10 regularizer """no""" +186 10 optimizer """adam""" +186 10 training_loop """lcwa""" +186 10 evaluator """rankbased""" +186 11 dataset """kinships""" +186 11 model """distmult""" +186 11 loss """crossentropy""" +186 11 regularizer """no""" +186 11 optimizer """adam""" +186 11 training_loop """lcwa""" +186 11 evaluator """rankbased""" +186 12 dataset """kinships""" +186 12 model """distmult""" +186 12 loss """crossentropy""" +186 12 regularizer """no""" +186 12 optimizer """adam""" +186 12 training_loop """lcwa""" +186 12 evaluator """rankbased""" +186 13 dataset """kinships""" +186 13 model """distmult""" +186 13 loss """crossentropy""" +186 13 regularizer """no""" +186 13 optimizer """adam""" +186 13 training_loop """lcwa""" +186 13 evaluator """rankbased""" +186 14 dataset """kinships""" +186 14 model """distmult""" +186 14 loss """crossentropy""" +186 14 regularizer """no""" +186 14 optimizer """adam""" +186 14 training_loop """lcwa""" +186 14 evaluator """rankbased""" +186 15 dataset """kinships""" +186 15 model """distmult""" +186 15 loss """crossentropy""" +186 15 regularizer """no""" +186 15 optimizer """adam""" +186 15 training_loop """lcwa""" +186 15 evaluator """rankbased""" +186 16 dataset """kinships""" +186 16 model """distmult""" +186 16 loss """crossentropy""" +186 16 regularizer """no""" +186 16 optimizer """adam""" +186 16 training_loop """lcwa""" +186 16 evaluator """rankbased""" +186 17 dataset """kinships""" +186 17 model """distmult""" +186 17 loss """crossentropy""" +186 17 regularizer """no""" +186 17 optimizer """adam""" +186 17 training_loop """lcwa""" +186 17 evaluator """rankbased""" +186 18 dataset """kinships""" +186 18 model """distmult""" +186 18 loss """crossentropy""" +186 18 regularizer """no""" +186 18 optimizer """adam""" +186 18 training_loop """lcwa""" +186 18 evaluator """rankbased""" +186 19 dataset """kinships""" +186 19 model """distmult""" +186 19 loss """crossentropy""" +186 19 regularizer """no""" +186 19 optimizer """adam""" +186 19 training_loop """lcwa""" +186 19 evaluator """rankbased""" +186 20 dataset """kinships""" +186 20 model """distmult""" +186 20 loss """crossentropy""" +186 20 regularizer """no""" +186 20 optimizer """adam""" +186 20 training_loop """lcwa""" +186 20 evaluator """rankbased""" +186 21 dataset """kinships""" +186 21 model """distmult""" +186 21 loss """crossentropy""" +186 21 regularizer """no""" +186 21 optimizer """adam""" +186 21 training_loop """lcwa""" +186 21 evaluator """rankbased""" +186 22 dataset """kinships""" +186 22 model """distmult""" +186 22 loss """crossentropy""" +186 22 regularizer """no""" +186 22 optimizer """adam""" +186 22 training_loop """lcwa""" +186 22 evaluator """rankbased""" +186 23 dataset """kinships""" +186 23 model """distmult""" +186 23 loss """crossentropy""" +186 23 regularizer """no""" +186 23 optimizer """adam""" +186 23 training_loop """lcwa""" +186 23 evaluator """rankbased""" +186 24 dataset """kinships""" +186 24 model """distmult""" +186 24 loss """crossentropy""" +186 24 regularizer """no""" +186 24 optimizer """adam""" +186 24 training_loop """lcwa""" +186 24 evaluator """rankbased""" +186 25 dataset """kinships""" +186 25 model """distmult""" +186 25 loss """crossentropy""" +186 25 regularizer """no""" +186 25 optimizer """adam""" +186 25 training_loop """lcwa""" +186 25 evaluator """rankbased""" +186 26 dataset """kinships""" +186 26 model """distmult""" +186 26 loss """crossentropy""" +186 26 regularizer """no""" +186 26 optimizer """adam""" +186 26 training_loop """lcwa""" +186 26 evaluator """rankbased""" +186 27 dataset """kinships""" +186 27 model """distmult""" +186 27 loss """crossentropy""" +186 27 regularizer """no""" +186 27 optimizer """adam""" +186 27 training_loop """lcwa""" +186 27 evaluator """rankbased""" +186 28 dataset """kinships""" +186 28 model """distmult""" +186 28 loss """crossentropy""" +186 28 regularizer """no""" +186 28 optimizer """adam""" +186 28 training_loop """lcwa""" +186 28 evaluator """rankbased""" +186 29 dataset """kinships""" +186 29 model """distmult""" +186 29 loss """crossentropy""" +186 29 regularizer """no""" +186 29 optimizer """adam""" +186 29 training_loop """lcwa""" +186 29 evaluator """rankbased""" +186 30 dataset """kinships""" +186 30 model """distmult""" +186 30 loss """crossentropy""" +186 30 regularizer """no""" +186 30 optimizer """adam""" +186 30 training_loop """lcwa""" +186 30 evaluator """rankbased""" +186 31 dataset """kinships""" +186 31 model """distmult""" +186 31 loss """crossentropy""" +186 31 regularizer """no""" +186 31 optimizer """adam""" +186 31 training_loop """lcwa""" +186 31 evaluator """rankbased""" +186 32 dataset """kinships""" +186 32 model """distmult""" +186 32 loss """crossentropy""" +186 32 regularizer """no""" +186 32 optimizer """adam""" +186 32 training_loop """lcwa""" +186 32 evaluator """rankbased""" +186 33 dataset """kinships""" +186 33 model """distmult""" +186 33 loss """crossentropy""" +186 33 regularizer """no""" +186 33 optimizer """adam""" +186 33 training_loop """lcwa""" +186 33 evaluator """rankbased""" +186 34 dataset """kinships""" +186 34 model """distmult""" +186 34 loss """crossentropy""" +186 34 regularizer """no""" +186 34 optimizer """adam""" +186 34 training_loop """lcwa""" +186 34 evaluator """rankbased""" +186 35 dataset """kinships""" +186 35 model """distmult""" +186 35 loss """crossentropy""" +186 35 regularizer """no""" +186 35 optimizer """adam""" +186 35 training_loop """lcwa""" +186 35 evaluator """rankbased""" +186 36 dataset """kinships""" +186 36 model """distmult""" +186 36 loss """crossentropy""" +186 36 regularizer """no""" +186 36 optimizer """adam""" +186 36 training_loop """lcwa""" +186 36 evaluator """rankbased""" +186 37 dataset """kinships""" +186 37 model """distmult""" +186 37 loss """crossentropy""" +186 37 regularizer """no""" +186 37 optimizer """adam""" +186 37 training_loop """lcwa""" +186 37 evaluator """rankbased""" +186 38 dataset """kinships""" +186 38 model """distmult""" +186 38 loss """crossentropy""" +186 38 regularizer """no""" +186 38 optimizer """adam""" +186 38 training_loop """lcwa""" +186 38 evaluator """rankbased""" +186 39 dataset """kinships""" +186 39 model """distmult""" +186 39 loss """crossentropy""" +186 39 regularizer """no""" +186 39 optimizer """adam""" +186 39 training_loop """lcwa""" +186 39 evaluator """rankbased""" +186 40 dataset """kinships""" +186 40 model """distmult""" +186 40 loss """crossentropy""" +186 40 regularizer """no""" +186 40 optimizer """adam""" +186 40 training_loop """lcwa""" +186 40 evaluator """rankbased""" +186 41 dataset """kinships""" +186 41 model """distmult""" +186 41 loss """crossentropy""" +186 41 regularizer """no""" +186 41 optimizer """adam""" +186 41 training_loop """lcwa""" +186 41 evaluator """rankbased""" +186 42 dataset """kinships""" +186 42 model """distmult""" +186 42 loss """crossentropy""" +186 42 regularizer """no""" +186 42 optimizer """adam""" +186 42 training_loop """lcwa""" +186 42 evaluator """rankbased""" +186 43 dataset """kinships""" +186 43 model """distmult""" +186 43 loss """crossentropy""" +186 43 regularizer """no""" +186 43 optimizer """adam""" +186 43 training_loop """lcwa""" +186 43 evaluator """rankbased""" +186 44 dataset """kinships""" +186 44 model """distmult""" +186 44 loss """crossentropy""" +186 44 regularizer """no""" +186 44 optimizer """adam""" +186 44 training_loop """lcwa""" +186 44 evaluator """rankbased""" +186 45 dataset """kinships""" +186 45 model """distmult""" +186 45 loss """crossentropy""" +186 45 regularizer """no""" +186 45 optimizer """adam""" +186 45 training_loop """lcwa""" +186 45 evaluator """rankbased""" +186 46 dataset """kinships""" +186 46 model """distmult""" +186 46 loss """crossentropy""" +186 46 regularizer """no""" +186 46 optimizer """adam""" +186 46 training_loop """lcwa""" +186 46 evaluator """rankbased""" +186 47 dataset """kinships""" +186 47 model """distmult""" +186 47 loss """crossentropy""" +186 47 regularizer """no""" +186 47 optimizer """adam""" +186 47 training_loop """lcwa""" +186 47 evaluator """rankbased""" +186 48 dataset """kinships""" +186 48 model """distmult""" +186 48 loss """crossentropy""" +186 48 regularizer """no""" +186 48 optimizer """adam""" +186 48 training_loop """lcwa""" +186 48 evaluator """rankbased""" +186 49 dataset """kinships""" +186 49 model """distmult""" +186 49 loss """crossentropy""" +186 49 regularizer """no""" +186 49 optimizer """adam""" +186 49 training_loop """lcwa""" +186 49 evaluator """rankbased""" +186 50 dataset """kinships""" +186 50 model """distmult""" +186 50 loss """crossentropy""" +186 50 regularizer """no""" +186 50 optimizer """adam""" +186 50 training_loop """lcwa""" +186 50 evaluator """rankbased""" +186 51 dataset """kinships""" +186 51 model """distmult""" +186 51 loss """crossentropy""" +186 51 regularizer """no""" +186 51 optimizer """adam""" +186 51 training_loop """lcwa""" +186 51 evaluator """rankbased""" +186 52 dataset """kinships""" +186 52 model """distmult""" +186 52 loss """crossentropy""" +186 52 regularizer """no""" +186 52 optimizer """adam""" +186 52 training_loop """lcwa""" +186 52 evaluator """rankbased""" +186 53 dataset """kinships""" +186 53 model """distmult""" +186 53 loss """crossentropy""" +186 53 regularizer """no""" +186 53 optimizer """adam""" +186 53 training_loop """lcwa""" +186 53 evaluator """rankbased""" +186 54 dataset """kinships""" +186 54 model """distmult""" +186 54 loss """crossentropy""" +186 54 regularizer """no""" +186 54 optimizer """adam""" +186 54 training_loop """lcwa""" +186 54 evaluator """rankbased""" +186 55 dataset """kinships""" +186 55 model """distmult""" +186 55 loss """crossentropy""" +186 55 regularizer """no""" +186 55 optimizer """adam""" +186 55 training_loop """lcwa""" +186 55 evaluator """rankbased""" +186 56 dataset """kinships""" +186 56 model """distmult""" +186 56 loss """crossentropy""" +186 56 regularizer """no""" +186 56 optimizer """adam""" +186 56 training_loop """lcwa""" +186 56 evaluator """rankbased""" +186 57 dataset """kinships""" +186 57 model """distmult""" +186 57 loss """crossentropy""" +186 57 regularizer """no""" +186 57 optimizer """adam""" +186 57 training_loop """lcwa""" +186 57 evaluator """rankbased""" +186 58 dataset """kinships""" +186 58 model """distmult""" +186 58 loss """crossentropy""" +186 58 regularizer """no""" +186 58 optimizer """adam""" +186 58 training_loop """lcwa""" +186 58 evaluator """rankbased""" +186 59 dataset """kinships""" +186 59 model """distmult""" +186 59 loss """crossentropy""" +186 59 regularizer """no""" +186 59 optimizer """adam""" +186 59 training_loop """lcwa""" +186 59 evaluator """rankbased""" +186 60 dataset """kinships""" +186 60 model """distmult""" +186 60 loss """crossentropy""" +186 60 regularizer """no""" +186 60 optimizer """adam""" +186 60 training_loop """lcwa""" +186 60 evaluator """rankbased""" +186 61 dataset """kinships""" +186 61 model """distmult""" +186 61 loss """crossentropy""" +186 61 regularizer """no""" +186 61 optimizer """adam""" +186 61 training_loop """lcwa""" +186 61 evaluator """rankbased""" +186 62 dataset """kinships""" +186 62 model """distmult""" +186 62 loss """crossentropy""" +186 62 regularizer """no""" +186 62 optimizer """adam""" +186 62 training_loop """lcwa""" +186 62 evaluator """rankbased""" +186 63 dataset """kinships""" +186 63 model """distmult""" +186 63 loss """crossentropy""" +186 63 regularizer """no""" +186 63 optimizer """adam""" +186 63 training_loop """lcwa""" +186 63 evaluator """rankbased""" +186 64 dataset """kinships""" +186 64 model """distmult""" +186 64 loss """crossentropy""" +186 64 regularizer """no""" +186 64 optimizer """adam""" +186 64 training_loop """lcwa""" +186 64 evaluator """rankbased""" +186 65 dataset """kinships""" +186 65 model """distmult""" +186 65 loss """crossentropy""" +186 65 regularizer """no""" +186 65 optimizer """adam""" +186 65 training_loop """lcwa""" +186 65 evaluator """rankbased""" +186 66 dataset """kinships""" +186 66 model """distmult""" +186 66 loss """crossentropy""" +186 66 regularizer """no""" +186 66 optimizer """adam""" +186 66 training_loop """lcwa""" +186 66 evaluator """rankbased""" +186 67 dataset """kinships""" +186 67 model """distmult""" +186 67 loss """crossentropy""" +186 67 regularizer """no""" +186 67 optimizer """adam""" +186 67 training_loop """lcwa""" +186 67 evaluator """rankbased""" +186 68 dataset """kinships""" +186 68 model """distmult""" +186 68 loss """crossentropy""" +186 68 regularizer """no""" +186 68 optimizer """adam""" +186 68 training_loop """lcwa""" +186 68 evaluator """rankbased""" +186 69 dataset """kinships""" +186 69 model """distmult""" +186 69 loss """crossentropy""" +186 69 regularizer """no""" +186 69 optimizer """adam""" +186 69 training_loop """lcwa""" +186 69 evaluator """rankbased""" +186 70 dataset """kinships""" +186 70 model """distmult""" +186 70 loss """crossentropy""" +186 70 regularizer """no""" +186 70 optimizer """adam""" +186 70 training_loop """lcwa""" +186 70 evaluator """rankbased""" +186 71 dataset """kinships""" +186 71 model """distmult""" +186 71 loss """crossentropy""" +186 71 regularizer """no""" +186 71 optimizer """adam""" +186 71 training_loop """lcwa""" +186 71 evaluator """rankbased""" +186 72 dataset """kinships""" +186 72 model """distmult""" +186 72 loss """crossentropy""" +186 72 regularizer """no""" +186 72 optimizer """adam""" +186 72 training_loop """lcwa""" +186 72 evaluator """rankbased""" +186 73 dataset """kinships""" +186 73 model """distmult""" +186 73 loss """crossentropy""" +186 73 regularizer """no""" +186 73 optimizer """adam""" +186 73 training_loop """lcwa""" +186 73 evaluator """rankbased""" +186 74 dataset """kinships""" +186 74 model """distmult""" +186 74 loss """crossentropy""" +186 74 regularizer """no""" +186 74 optimizer """adam""" +186 74 training_loop """lcwa""" +186 74 evaluator """rankbased""" +186 75 dataset """kinships""" +186 75 model """distmult""" +186 75 loss """crossentropy""" +186 75 regularizer """no""" +186 75 optimizer """adam""" +186 75 training_loop """lcwa""" +186 75 evaluator """rankbased""" +186 76 dataset """kinships""" +186 76 model """distmult""" +186 76 loss """crossentropy""" +186 76 regularizer """no""" +186 76 optimizer """adam""" +186 76 training_loop """lcwa""" +186 76 evaluator """rankbased""" +186 77 dataset """kinships""" +186 77 model """distmult""" +186 77 loss """crossentropy""" +186 77 regularizer """no""" +186 77 optimizer """adam""" +186 77 training_loop """lcwa""" +186 77 evaluator """rankbased""" +186 78 dataset """kinships""" +186 78 model """distmult""" +186 78 loss """crossentropy""" +186 78 regularizer """no""" +186 78 optimizer """adam""" +186 78 training_loop """lcwa""" +186 78 evaluator """rankbased""" +186 79 dataset """kinships""" +186 79 model """distmult""" +186 79 loss """crossentropy""" +186 79 regularizer """no""" +186 79 optimizer """adam""" +186 79 training_loop """lcwa""" +186 79 evaluator """rankbased""" +186 80 dataset """kinships""" +186 80 model """distmult""" +186 80 loss """crossentropy""" +186 80 regularizer """no""" +186 80 optimizer """adam""" +186 80 training_loop """lcwa""" +186 80 evaluator """rankbased""" +186 81 dataset """kinships""" +186 81 model """distmult""" +186 81 loss """crossentropy""" +186 81 regularizer """no""" +186 81 optimizer """adam""" +186 81 training_loop """lcwa""" +186 81 evaluator """rankbased""" +186 82 dataset """kinships""" +186 82 model """distmult""" +186 82 loss """crossentropy""" +186 82 regularizer """no""" +186 82 optimizer """adam""" +186 82 training_loop """lcwa""" +186 82 evaluator """rankbased""" +186 83 dataset """kinships""" +186 83 model """distmult""" +186 83 loss """crossentropy""" +186 83 regularizer """no""" +186 83 optimizer """adam""" +186 83 training_loop """lcwa""" +186 83 evaluator """rankbased""" +186 84 dataset """kinships""" +186 84 model """distmult""" +186 84 loss """crossentropy""" +186 84 regularizer """no""" +186 84 optimizer """adam""" +186 84 training_loop """lcwa""" +186 84 evaluator """rankbased""" +186 85 dataset """kinships""" +186 85 model """distmult""" +186 85 loss """crossentropy""" +186 85 regularizer """no""" +186 85 optimizer """adam""" +186 85 training_loop """lcwa""" +186 85 evaluator """rankbased""" +186 86 dataset """kinships""" +186 86 model """distmult""" +186 86 loss """crossentropy""" +186 86 regularizer """no""" +186 86 optimizer """adam""" +186 86 training_loop """lcwa""" +186 86 evaluator """rankbased""" +186 87 dataset """kinships""" +186 87 model """distmult""" +186 87 loss """crossentropy""" +186 87 regularizer """no""" +186 87 optimizer """adam""" +186 87 training_loop """lcwa""" +186 87 evaluator """rankbased""" +186 88 dataset """kinships""" +186 88 model """distmult""" +186 88 loss """crossentropy""" +186 88 regularizer """no""" +186 88 optimizer """adam""" +186 88 training_loop """lcwa""" +186 88 evaluator """rankbased""" +186 89 dataset """kinships""" +186 89 model """distmult""" +186 89 loss """crossentropy""" +186 89 regularizer """no""" +186 89 optimizer """adam""" +186 89 training_loop """lcwa""" +186 89 evaluator """rankbased""" +186 90 dataset """kinships""" +186 90 model """distmult""" +186 90 loss """crossentropy""" +186 90 regularizer """no""" +186 90 optimizer """adam""" +186 90 training_loop """lcwa""" +186 90 evaluator """rankbased""" +186 91 dataset """kinships""" +186 91 model """distmult""" +186 91 loss """crossentropy""" +186 91 regularizer """no""" +186 91 optimizer """adam""" +186 91 training_loop """lcwa""" +186 91 evaluator """rankbased""" +186 92 dataset """kinships""" +186 92 model """distmult""" +186 92 loss """crossentropy""" +186 92 regularizer """no""" +186 92 optimizer """adam""" +186 92 training_loop """lcwa""" +186 92 evaluator """rankbased""" +186 93 dataset """kinships""" +186 93 model """distmult""" +186 93 loss """crossentropy""" +186 93 regularizer """no""" +186 93 optimizer """adam""" +186 93 training_loop """lcwa""" +186 93 evaluator """rankbased""" +186 94 dataset """kinships""" +186 94 model """distmult""" +186 94 loss """crossentropy""" +186 94 regularizer """no""" +186 94 optimizer """adam""" +186 94 training_loop """lcwa""" +186 94 evaluator """rankbased""" +186 95 dataset """kinships""" +186 95 model """distmult""" +186 95 loss """crossentropy""" +186 95 regularizer """no""" +186 95 optimizer """adam""" +186 95 training_loop """lcwa""" +186 95 evaluator """rankbased""" +186 96 dataset """kinships""" +186 96 model """distmult""" +186 96 loss """crossentropy""" +186 96 regularizer """no""" +186 96 optimizer """adam""" +186 96 training_loop """lcwa""" +186 96 evaluator """rankbased""" +186 97 dataset """kinships""" +186 97 model """distmult""" +186 97 loss """crossentropy""" +186 97 regularizer """no""" +186 97 optimizer """adam""" +186 97 training_loop """lcwa""" +186 97 evaluator """rankbased""" +186 98 dataset """kinships""" +186 98 model """distmult""" +186 98 loss """crossentropy""" +186 98 regularizer """no""" +186 98 optimizer """adam""" +186 98 training_loop """lcwa""" +186 98 evaluator """rankbased""" +186 99 dataset """kinships""" +186 99 model """distmult""" +186 99 loss """crossentropy""" +186 99 regularizer """no""" +186 99 optimizer """adam""" +186 99 training_loop """lcwa""" +186 99 evaluator """rankbased""" +186 100 dataset """kinships""" +186 100 model """distmult""" +186 100 loss """crossentropy""" +186 100 regularizer """no""" +186 100 optimizer """adam""" +186 100 training_loop """lcwa""" +186 100 evaluator """rankbased""" +187 1 model.embedding_dim 0.0 +187 1 optimizer.lr 0.01456098191232723 +187 1 training.batch_size 1.0 +187 1 training.label_smoothing 0.048199039788778764 +187 2 model.embedding_dim 2.0 +187 2 optimizer.lr 0.044581795239040274 +187 2 training.batch_size 1.0 +187 2 training.label_smoothing 0.26551587677001753 +187 3 model.embedding_dim 0.0 +187 3 optimizer.lr 0.015759987509763117 +187 3 training.batch_size 2.0 +187 3 training.label_smoothing 0.01726490436278675 +187 4 model.embedding_dim 2.0 +187 4 optimizer.lr 0.012153675836758393 +187 4 training.batch_size 0.0 +187 4 training.label_smoothing 0.002945919212689328 +187 5 model.embedding_dim 2.0 +187 5 optimizer.lr 0.09081525208016866 +187 5 training.batch_size 2.0 +187 5 training.label_smoothing 0.012902015539427755 +187 6 model.embedding_dim 2.0 +187 6 optimizer.lr 0.011508240596353123 +187 6 training.batch_size 2.0 +187 6 training.label_smoothing 0.903402851039472 +187 7 model.embedding_dim 1.0 +187 7 optimizer.lr 0.0452375074645323 +187 7 training.batch_size 2.0 +187 7 training.label_smoothing 0.5574212581784149 +187 8 model.embedding_dim 2.0 +187 8 optimizer.lr 0.007909092359002343 +187 8 training.batch_size 2.0 +187 8 training.label_smoothing 0.23949467038903438 +187 9 model.embedding_dim 0.0 +187 9 optimizer.lr 0.004413220767086754 +187 9 training.batch_size 0.0 +187 9 training.label_smoothing 0.32794770510267035 +187 10 model.embedding_dim 2.0 +187 10 optimizer.lr 0.044086318089986934 +187 10 training.batch_size 1.0 +187 10 training.label_smoothing 0.22960994808726434 +187 11 model.embedding_dim 2.0 +187 11 optimizer.lr 0.018877413927000583 +187 11 training.batch_size 1.0 +187 11 training.label_smoothing 0.007228555548791086 +187 12 model.embedding_dim 2.0 +187 12 optimizer.lr 0.0472371432401778 +187 12 training.batch_size 0.0 +187 12 training.label_smoothing 0.0010790403378162748 +187 13 model.embedding_dim 2.0 +187 13 optimizer.lr 0.0027709944247721464 +187 13 training.batch_size 0.0 +187 13 training.label_smoothing 0.11586778374480909 +187 14 model.embedding_dim 0.0 +187 14 optimizer.lr 0.02868094278356511 +187 14 training.batch_size 0.0 +187 14 training.label_smoothing 0.1752313355321472 +187 15 model.embedding_dim 0.0 +187 15 optimizer.lr 0.0030773041928502352 +187 15 training.batch_size 1.0 +187 15 training.label_smoothing 0.016242840637295867 +187 16 model.embedding_dim 0.0 +187 16 optimizer.lr 0.011340185944588948 +187 16 training.batch_size 1.0 +187 16 training.label_smoothing 0.38324671315050557 +187 17 model.embedding_dim 0.0 +187 17 optimizer.lr 0.053764885128275874 +187 17 training.batch_size 1.0 +187 17 training.label_smoothing 0.025437468411755808 +187 18 model.embedding_dim 2.0 +187 18 optimizer.lr 0.07858088246154565 +187 18 training.batch_size 1.0 +187 18 training.label_smoothing 0.031943902787392585 +187 19 model.embedding_dim 1.0 +187 19 optimizer.lr 0.014949406508998658 +187 19 training.batch_size 0.0 +187 19 training.label_smoothing 0.001359460572735209 +187 20 model.embedding_dim 1.0 +187 20 optimizer.lr 0.0020049245578224835 +187 20 training.batch_size 2.0 +187 20 training.label_smoothing 0.001463347384190178 +187 21 model.embedding_dim 2.0 +187 21 optimizer.lr 0.0040096002186211935 +187 21 training.batch_size 0.0 +187 21 training.label_smoothing 0.0012852517967448676 +187 22 model.embedding_dim 2.0 +187 22 optimizer.lr 0.017753804524434397 +187 22 training.batch_size 1.0 +187 22 training.label_smoothing 0.014318030196650846 +187 23 model.embedding_dim 1.0 +187 23 optimizer.lr 0.048003544334803944 +187 23 training.batch_size 2.0 +187 23 training.label_smoothing 0.0017984231497282554 +187 24 model.embedding_dim 0.0 +187 24 optimizer.lr 0.07107957005572475 +187 24 training.batch_size 2.0 +187 24 training.label_smoothing 0.01498564487509235 +187 25 model.embedding_dim 2.0 +187 25 optimizer.lr 0.004162921505123817 +187 25 training.batch_size 2.0 +187 25 training.label_smoothing 0.03023828073929869 +187 26 model.embedding_dim 2.0 +187 26 optimizer.lr 0.01787900240807974 +187 26 training.batch_size 0.0 +187 26 training.label_smoothing 0.5286981214009988 +187 27 model.embedding_dim 2.0 +187 27 optimizer.lr 0.03276035536644632 +187 27 training.batch_size 1.0 +187 27 training.label_smoothing 0.3446709154697953 +187 28 model.embedding_dim 2.0 +187 28 optimizer.lr 0.00121701885083182 +187 28 training.batch_size 2.0 +187 28 training.label_smoothing 0.03772724287005371 +187 29 model.embedding_dim 2.0 +187 29 optimizer.lr 0.02129513606804766 +187 29 training.batch_size 0.0 +187 29 training.label_smoothing 0.004372322935651539 +187 30 model.embedding_dim 0.0 +187 30 optimizer.lr 0.0013095571136811 +187 30 training.batch_size 0.0 +187 30 training.label_smoothing 0.9935271067891881 +187 31 model.embedding_dim 2.0 +187 31 optimizer.lr 0.08671378477217948 +187 31 training.batch_size 0.0 +187 31 training.label_smoothing 0.0022768629725840248 +187 32 model.embedding_dim 2.0 +187 32 optimizer.lr 0.013233969574676947 +187 32 training.batch_size 1.0 +187 32 training.label_smoothing 0.03328709177627421 +187 33 model.embedding_dim 0.0 +187 33 optimizer.lr 0.0030979933131736023 +187 33 training.batch_size 0.0 +187 33 training.label_smoothing 0.0010562951774129592 +187 34 model.embedding_dim 1.0 +187 34 optimizer.lr 0.012920565461640237 +187 34 training.batch_size 0.0 +187 34 training.label_smoothing 0.2017731562993329 +187 35 model.embedding_dim 1.0 +187 35 optimizer.lr 0.022199720453345163 +187 35 training.batch_size 0.0 +187 35 training.label_smoothing 0.3639703106659455 +187 36 model.embedding_dim 1.0 +187 36 optimizer.lr 0.0069155679108761185 +187 36 training.batch_size 0.0 +187 36 training.label_smoothing 0.22457917235373873 +187 37 model.embedding_dim 1.0 +187 37 optimizer.lr 0.002689721328530969 +187 37 training.batch_size 1.0 +187 37 training.label_smoothing 0.733639536967499 +187 38 model.embedding_dim 1.0 +187 38 optimizer.lr 0.06913164176418425 +187 38 training.batch_size 1.0 +187 38 training.label_smoothing 0.011460739742408025 +187 39 model.embedding_dim 0.0 +187 39 optimizer.lr 0.0016502883529144838 +187 39 training.batch_size 1.0 +187 39 training.label_smoothing 0.007439452494806274 +187 40 model.embedding_dim 2.0 +187 40 optimizer.lr 0.006850427002606257 +187 40 training.batch_size 1.0 +187 40 training.label_smoothing 0.11242937430515121 +187 41 model.embedding_dim 1.0 +187 41 optimizer.lr 0.07628937963812601 +187 41 training.batch_size 1.0 +187 41 training.label_smoothing 0.011255604055224912 +187 42 model.embedding_dim 2.0 +187 42 optimizer.lr 0.026684432835036888 +187 42 training.batch_size 0.0 +187 42 training.label_smoothing 0.11525382761138671 +187 43 model.embedding_dim 0.0 +187 43 optimizer.lr 0.02702358530788356 +187 43 training.batch_size 1.0 +187 43 training.label_smoothing 0.014551303202276204 +187 44 model.embedding_dim 0.0 +187 44 optimizer.lr 0.0012574692419029577 +187 44 training.batch_size 2.0 +187 44 training.label_smoothing 0.6951214370055826 +187 45 model.embedding_dim 1.0 +187 45 optimizer.lr 0.003096223886676238 +187 45 training.batch_size 1.0 +187 45 training.label_smoothing 0.004720259548102446 +187 46 model.embedding_dim 2.0 +187 46 optimizer.lr 0.08805047794736659 +187 46 training.batch_size 0.0 +187 46 training.label_smoothing 0.5026884594772112 +187 47 model.embedding_dim 2.0 +187 47 optimizer.lr 0.0010412779376662434 +187 47 training.batch_size 2.0 +187 47 training.label_smoothing 0.019062665088185285 +187 48 model.embedding_dim 0.0 +187 48 optimizer.lr 0.001044273196724729 +187 48 training.batch_size 2.0 +187 48 training.label_smoothing 0.00338792954463817 +187 49 model.embedding_dim 1.0 +187 49 optimizer.lr 0.0024798348450030686 +187 49 training.batch_size 2.0 +187 49 training.label_smoothing 0.6624235812822726 +187 50 model.embedding_dim 2.0 +187 50 optimizer.lr 0.06125023180524637 +187 50 training.batch_size 2.0 +187 50 training.label_smoothing 0.0023591648208133604 +187 51 model.embedding_dim 2.0 +187 51 optimizer.lr 0.002075464394010231 +187 51 training.batch_size 2.0 +187 51 training.label_smoothing 0.13654167596596725 +187 52 model.embedding_dim 1.0 +187 52 optimizer.lr 0.06108477931391831 +187 52 training.batch_size 0.0 +187 52 training.label_smoothing 0.0011740967364960375 +187 53 model.embedding_dim 0.0 +187 53 optimizer.lr 0.06367039472465069 +187 53 training.batch_size 1.0 +187 53 training.label_smoothing 0.6271508024898051 +187 54 model.embedding_dim 2.0 +187 54 optimizer.lr 0.010384328967629235 +187 54 training.batch_size 2.0 +187 54 training.label_smoothing 0.14957366128194166 +187 55 model.embedding_dim 1.0 +187 55 optimizer.lr 0.01650892368964227 +187 55 training.batch_size 0.0 +187 55 training.label_smoothing 0.034898236908045256 +187 56 model.embedding_dim 2.0 +187 56 optimizer.lr 0.08476601648697715 +187 56 training.batch_size 1.0 +187 56 training.label_smoothing 0.6262362433649067 +187 57 model.embedding_dim 2.0 +187 57 optimizer.lr 0.0016082526221604262 +187 57 training.batch_size 0.0 +187 57 training.label_smoothing 0.02913629632445854 +187 58 model.embedding_dim 1.0 +187 58 optimizer.lr 0.001178750114773116 +187 58 training.batch_size 0.0 +187 58 training.label_smoothing 0.6549855315504469 +187 59 model.embedding_dim 2.0 +187 59 optimizer.lr 0.005716082594571351 +187 59 training.batch_size 0.0 +187 59 training.label_smoothing 0.19932477358752904 +187 60 model.embedding_dim 0.0 +187 60 optimizer.lr 0.07730221717598672 +187 60 training.batch_size 0.0 +187 60 training.label_smoothing 0.04115802192906445 +187 61 model.embedding_dim 2.0 +187 61 optimizer.lr 0.03978095285937925 +187 61 training.batch_size 1.0 +187 61 training.label_smoothing 0.16367167899281637 +187 62 model.embedding_dim 0.0 +187 62 optimizer.lr 0.0012607718187477859 +187 62 training.batch_size 2.0 +187 62 training.label_smoothing 0.014588868907345223 +187 63 model.embedding_dim 1.0 +187 63 optimizer.lr 0.004643598793349088 +187 63 training.batch_size 1.0 +187 63 training.label_smoothing 0.041072048793168565 +187 64 model.embedding_dim 1.0 +187 64 optimizer.lr 0.0019417672335245776 +187 64 training.batch_size 0.0 +187 64 training.label_smoothing 0.008791073484756889 +187 65 model.embedding_dim 2.0 +187 65 optimizer.lr 0.015413664752836061 +187 65 training.batch_size 0.0 +187 65 training.label_smoothing 0.00431522419050341 +187 66 model.embedding_dim 0.0 +187 66 optimizer.lr 0.0011574341189809966 +187 66 training.batch_size 0.0 +187 66 training.label_smoothing 0.17779264720961924 +187 67 model.embedding_dim 2.0 +187 67 optimizer.lr 0.0011603957258610215 +187 67 training.batch_size 0.0 +187 67 training.label_smoothing 0.09672484530570678 +187 68 model.embedding_dim 0.0 +187 68 optimizer.lr 0.0012272599654111931 +187 68 training.batch_size 1.0 +187 68 training.label_smoothing 0.025229547670954228 +187 69 model.embedding_dim 2.0 +187 69 optimizer.lr 0.002016932180544579 +187 69 training.batch_size 1.0 +187 69 training.label_smoothing 0.07925345603189989 +187 70 model.embedding_dim 0.0 +187 70 optimizer.lr 0.0047703968226436915 +187 70 training.batch_size 1.0 +187 70 training.label_smoothing 0.12902855258993487 +187 71 model.embedding_dim 1.0 +187 71 optimizer.lr 0.004989719463022414 +187 71 training.batch_size 0.0 +187 71 training.label_smoothing 0.013544206299052843 +187 72 model.embedding_dim 2.0 +187 72 optimizer.lr 0.0013186194335047007 +187 72 training.batch_size 1.0 +187 72 training.label_smoothing 0.019152514324999533 +187 73 model.embedding_dim 1.0 +187 73 optimizer.lr 0.004033652311639018 +187 73 training.batch_size 0.0 +187 73 training.label_smoothing 0.1934404934333678 +187 74 model.embedding_dim 2.0 +187 74 optimizer.lr 0.0010737759991233755 +187 74 training.batch_size 2.0 +187 74 training.label_smoothing 0.11864051901533244 +187 75 model.embedding_dim 0.0 +187 75 optimizer.lr 0.006625659652094422 +187 75 training.batch_size 1.0 +187 75 training.label_smoothing 0.004002668621260706 +187 76 model.embedding_dim 2.0 +187 76 optimizer.lr 0.0020240429619420392 +187 76 training.batch_size 0.0 +187 76 training.label_smoothing 0.7952636576858506 +187 77 model.embedding_dim 1.0 +187 77 optimizer.lr 0.07889962578688525 +187 77 training.batch_size 1.0 +187 77 training.label_smoothing 0.004753211822783175 +187 78 model.embedding_dim 0.0 +187 78 optimizer.lr 0.05082814133606356 +187 78 training.batch_size 0.0 +187 78 training.label_smoothing 0.0012925838297912148 +187 79 model.embedding_dim 0.0 +187 79 optimizer.lr 0.0034415896529503666 +187 79 training.batch_size 1.0 +187 79 training.label_smoothing 0.0011633925018386317 +187 80 model.embedding_dim 1.0 +187 80 optimizer.lr 0.014711412334508333 +187 80 training.batch_size 1.0 +187 80 training.label_smoothing 0.08475419766409618 +187 81 model.embedding_dim 2.0 +187 81 optimizer.lr 0.014297487336517605 +187 81 training.batch_size 1.0 +187 81 training.label_smoothing 0.003448932906812416 +187 82 model.embedding_dim 1.0 +187 82 optimizer.lr 0.008464390216184153 +187 82 training.batch_size 2.0 +187 82 training.label_smoothing 0.19141273142865053 +187 83 model.embedding_dim 1.0 +187 83 optimizer.lr 0.030691039204301853 +187 83 training.batch_size 0.0 +187 83 training.label_smoothing 0.0018496271030177401 +187 84 model.embedding_dim 2.0 +187 84 optimizer.lr 0.01714334937997138 +187 84 training.batch_size 1.0 +187 84 training.label_smoothing 0.254534716722115 +187 85 model.embedding_dim 0.0 +187 85 optimizer.lr 0.007039779030738038 +187 85 training.batch_size 0.0 +187 85 training.label_smoothing 0.8461501378764444 +187 86 model.embedding_dim 2.0 +187 86 optimizer.lr 0.025364686798434708 +187 86 training.batch_size 2.0 +187 86 training.label_smoothing 0.35140288870651126 +187 87 model.embedding_dim 1.0 +187 87 optimizer.lr 0.04653955223981564 +187 87 training.batch_size 0.0 +187 87 training.label_smoothing 0.8101580659627408 +187 88 model.embedding_dim 0.0 +187 88 optimizer.lr 0.009638360764841994 +187 88 training.batch_size 2.0 +187 88 training.label_smoothing 0.016988095571237757 +187 89 model.embedding_dim 1.0 +187 89 optimizer.lr 0.0016584195245755691 +187 89 training.batch_size 0.0 +187 89 training.label_smoothing 0.013696159832332553 +187 90 model.embedding_dim 2.0 +187 90 optimizer.lr 0.09994633414898983 +187 90 training.batch_size 2.0 +187 90 training.label_smoothing 0.20064456444077253 +187 91 model.embedding_dim 0.0 +187 91 optimizer.lr 0.00418262477133138 +187 91 training.batch_size 1.0 +187 91 training.label_smoothing 0.0021741290943198825 +187 92 model.embedding_dim 1.0 +187 92 optimizer.lr 0.07709339890441189 +187 92 training.batch_size 2.0 +187 92 training.label_smoothing 0.0010486130549897535 +187 93 model.embedding_dim 0.0 +187 93 optimizer.lr 0.056408136332858294 +187 93 training.batch_size 1.0 +187 93 training.label_smoothing 0.012382084053278302 +187 94 model.embedding_dim 1.0 +187 94 optimizer.lr 0.0017235363928587415 +187 94 training.batch_size 1.0 +187 94 training.label_smoothing 0.47220488621277756 +187 95 model.embedding_dim 0.0 +187 95 optimizer.lr 0.001870821463190523 +187 95 training.batch_size 1.0 +187 95 training.label_smoothing 0.009992050609252954 +187 96 model.embedding_dim 1.0 +187 96 optimizer.lr 0.018962797908980592 +187 96 training.batch_size 2.0 +187 96 training.label_smoothing 0.10171646474754793 +187 97 model.embedding_dim 2.0 +187 97 optimizer.lr 0.04278249462945219 +187 97 training.batch_size 1.0 +187 97 training.label_smoothing 0.018074112124971467 +187 98 model.embedding_dim 1.0 +187 98 optimizer.lr 0.035367012142161744 +187 98 training.batch_size 1.0 +187 98 training.label_smoothing 0.005360203974071546 +187 99 model.embedding_dim 2.0 +187 99 optimizer.lr 0.0029029627497174555 +187 99 training.batch_size 1.0 +187 99 training.label_smoothing 0.013856307621138143 +187 100 model.embedding_dim 2.0 +187 100 optimizer.lr 0.01091830067313318 +187 100 training.batch_size 2.0 +187 100 training.label_smoothing 0.008586421752323633 +187 1 dataset """kinships""" +187 1 model """distmult""" +187 1 loss """crossentropy""" +187 1 regularizer """no""" +187 1 optimizer """adam""" +187 1 training_loop """lcwa""" +187 1 evaluator """rankbased""" +187 2 dataset """kinships""" +187 2 model """distmult""" +187 2 loss """crossentropy""" +187 2 regularizer """no""" +187 2 optimizer """adam""" +187 2 training_loop """lcwa""" +187 2 evaluator """rankbased""" +187 3 dataset """kinships""" +187 3 model """distmult""" +187 3 loss """crossentropy""" +187 3 regularizer """no""" +187 3 optimizer """adam""" +187 3 training_loop """lcwa""" +187 3 evaluator """rankbased""" +187 4 dataset """kinships""" +187 4 model """distmult""" +187 4 loss """crossentropy""" +187 4 regularizer """no""" +187 4 optimizer """adam""" +187 4 training_loop """lcwa""" +187 4 evaluator """rankbased""" +187 5 dataset """kinships""" +187 5 model """distmult""" +187 5 loss """crossentropy""" +187 5 regularizer """no""" +187 5 optimizer """adam""" +187 5 training_loop """lcwa""" +187 5 evaluator """rankbased""" +187 6 dataset """kinships""" +187 6 model """distmult""" +187 6 loss """crossentropy""" +187 6 regularizer """no""" +187 6 optimizer """adam""" +187 6 training_loop """lcwa""" +187 6 evaluator """rankbased""" +187 7 dataset """kinships""" +187 7 model """distmult""" +187 7 loss """crossentropy""" +187 7 regularizer """no""" +187 7 optimizer """adam""" +187 7 training_loop """lcwa""" +187 7 evaluator """rankbased""" +187 8 dataset """kinships""" +187 8 model """distmult""" +187 8 loss """crossentropy""" +187 8 regularizer """no""" +187 8 optimizer """adam""" +187 8 training_loop """lcwa""" +187 8 evaluator """rankbased""" +187 9 dataset """kinships""" +187 9 model """distmult""" +187 9 loss """crossentropy""" +187 9 regularizer """no""" +187 9 optimizer """adam""" +187 9 training_loop """lcwa""" +187 9 evaluator """rankbased""" +187 10 dataset """kinships""" +187 10 model """distmult""" +187 10 loss """crossentropy""" +187 10 regularizer """no""" +187 10 optimizer """adam""" +187 10 training_loop """lcwa""" +187 10 evaluator """rankbased""" +187 11 dataset """kinships""" +187 11 model """distmult""" +187 11 loss """crossentropy""" +187 11 regularizer """no""" +187 11 optimizer """adam""" +187 11 training_loop """lcwa""" +187 11 evaluator """rankbased""" +187 12 dataset """kinships""" +187 12 model """distmult""" +187 12 loss """crossentropy""" +187 12 regularizer """no""" +187 12 optimizer """adam""" +187 12 training_loop """lcwa""" +187 12 evaluator """rankbased""" +187 13 dataset """kinships""" +187 13 model """distmult""" +187 13 loss """crossentropy""" +187 13 regularizer """no""" +187 13 optimizer """adam""" +187 13 training_loop """lcwa""" +187 13 evaluator """rankbased""" +187 14 dataset """kinships""" +187 14 model """distmult""" +187 14 loss """crossentropy""" +187 14 regularizer """no""" +187 14 optimizer """adam""" +187 14 training_loop """lcwa""" +187 14 evaluator """rankbased""" +187 15 dataset """kinships""" +187 15 model """distmult""" +187 15 loss """crossentropy""" +187 15 regularizer """no""" +187 15 optimizer """adam""" +187 15 training_loop """lcwa""" +187 15 evaluator """rankbased""" +187 16 dataset """kinships""" +187 16 model """distmult""" +187 16 loss """crossentropy""" +187 16 regularizer """no""" +187 16 optimizer """adam""" +187 16 training_loop """lcwa""" +187 16 evaluator """rankbased""" +187 17 dataset """kinships""" +187 17 model """distmult""" +187 17 loss """crossentropy""" +187 17 regularizer """no""" +187 17 optimizer """adam""" +187 17 training_loop """lcwa""" +187 17 evaluator """rankbased""" +187 18 dataset """kinships""" +187 18 model """distmult""" +187 18 loss """crossentropy""" +187 18 regularizer """no""" +187 18 optimizer """adam""" +187 18 training_loop """lcwa""" +187 18 evaluator """rankbased""" +187 19 dataset """kinships""" +187 19 model """distmult""" +187 19 loss """crossentropy""" +187 19 regularizer """no""" +187 19 optimizer """adam""" +187 19 training_loop """lcwa""" +187 19 evaluator """rankbased""" +187 20 dataset """kinships""" +187 20 model """distmult""" +187 20 loss """crossentropy""" +187 20 regularizer """no""" +187 20 optimizer """adam""" +187 20 training_loop """lcwa""" +187 20 evaluator """rankbased""" +187 21 dataset """kinships""" +187 21 model """distmult""" +187 21 loss """crossentropy""" +187 21 regularizer """no""" +187 21 optimizer """adam""" +187 21 training_loop """lcwa""" +187 21 evaluator """rankbased""" +187 22 dataset """kinships""" +187 22 model """distmult""" +187 22 loss """crossentropy""" +187 22 regularizer """no""" +187 22 optimizer """adam""" +187 22 training_loop """lcwa""" +187 22 evaluator """rankbased""" +187 23 dataset """kinships""" +187 23 model """distmult""" +187 23 loss """crossentropy""" +187 23 regularizer """no""" +187 23 optimizer """adam""" +187 23 training_loop """lcwa""" +187 23 evaluator """rankbased""" +187 24 dataset """kinships""" +187 24 model """distmult""" +187 24 loss """crossentropy""" +187 24 regularizer """no""" +187 24 optimizer """adam""" +187 24 training_loop """lcwa""" +187 24 evaluator """rankbased""" +187 25 dataset """kinships""" +187 25 model """distmult""" +187 25 loss """crossentropy""" +187 25 regularizer """no""" +187 25 optimizer """adam""" +187 25 training_loop """lcwa""" +187 25 evaluator """rankbased""" +187 26 dataset """kinships""" +187 26 model """distmult""" +187 26 loss """crossentropy""" +187 26 regularizer """no""" +187 26 optimizer """adam""" +187 26 training_loop """lcwa""" +187 26 evaluator """rankbased""" +187 27 dataset """kinships""" +187 27 model """distmult""" +187 27 loss """crossentropy""" +187 27 regularizer """no""" +187 27 optimizer """adam""" +187 27 training_loop """lcwa""" +187 27 evaluator """rankbased""" +187 28 dataset """kinships""" +187 28 model """distmult""" +187 28 loss """crossentropy""" +187 28 regularizer """no""" +187 28 optimizer """adam""" +187 28 training_loop """lcwa""" +187 28 evaluator """rankbased""" +187 29 dataset """kinships""" +187 29 model """distmult""" +187 29 loss """crossentropy""" +187 29 regularizer """no""" +187 29 optimizer """adam""" +187 29 training_loop """lcwa""" +187 29 evaluator """rankbased""" +187 30 dataset """kinships""" +187 30 model """distmult""" +187 30 loss """crossentropy""" +187 30 regularizer """no""" +187 30 optimizer """adam""" +187 30 training_loop """lcwa""" +187 30 evaluator """rankbased""" +187 31 dataset """kinships""" +187 31 model """distmult""" +187 31 loss """crossentropy""" +187 31 regularizer """no""" +187 31 optimizer """adam""" +187 31 training_loop """lcwa""" +187 31 evaluator """rankbased""" +187 32 dataset """kinships""" +187 32 model """distmult""" +187 32 loss """crossentropy""" +187 32 regularizer """no""" +187 32 optimizer """adam""" +187 32 training_loop """lcwa""" +187 32 evaluator """rankbased""" +187 33 dataset """kinships""" +187 33 model """distmult""" +187 33 loss """crossentropy""" +187 33 regularizer """no""" +187 33 optimizer """adam""" +187 33 training_loop """lcwa""" +187 33 evaluator """rankbased""" +187 34 dataset """kinships""" +187 34 model """distmult""" +187 34 loss """crossentropy""" +187 34 regularizer """no""" +187 34 optimizer """adam""" +187 34 training_loop """lcwa""" +187 34 evaluator """rankbased""" +187 35 dataset """kinships""" +187 35 model """distmult""" +187 35 loss """crossentropy""" +187 35 regularizer """no""" +187 35 optimizer """adam""" +187 35 training_loop """lcwa""" +187 35 evaluator """rankbased""" +187 36 dataset """kinships""" +187 36 model """distmult""" +187 36 loss """crossentropy""" +187 36 regularizer """no""" +187 36 optimizer """adam""" +187 36 training_loop """lcwa""" +187 36 evaluator """rankbased""" +187 37 dataset """kinships""" +187 37 model """distmult""" +187 37 loss """crossentropy""" +187 37 regularizer """no""" +187 37 optimizer """adam""" +187 37 training_loop """lcwa""" +187 37 evaluator """rankbased""" +187 38 dataset """kinships""" +187 38 model """distmult""" +187 38 loss """crossentropy""" +187 38 regularizer """no""" +187 38 optimizer """adam""" +187 38 training_loop """lcwa""" +187 38 evaluator """rankbased""" +187 39 dataset """kinships""" +187 39 model """distmult""" +187 39 loss """crossentropy""" +187 39 regularizer """no""" +187 39 optimizer """adam""" +187 39 training_loop """lcwa""" +187 39 evaluator """rankbased""" +187 40 dataset """kinships""" +187 40 model """distmult""" +187 40 loss """crossentropy""" +187 40 regularizer """no""" +187 40 optimizer """adam""" +187 40 training_loop """lcwa""" +187 40 evaluator """rankbased""" +187 41 dataset """kinships""" +187 41 model """distmult""" +187 41 loss """crossentropy""" +187 41 regularizer """no""" +187 41 optimizer """adam""" +187 41 training_loop """lcwa""" +187 41 evaluator """rankbased""" +187 42 dataset """kinships""" +187 42 model """distmult""" +187 42 loss """crossentropy""" +187 42 regularizer """no""" +187 42 optimizer """adam""" +187 42 training_loop """lcwa""" +187 42 evaluator """rankbased""" +187 43 dataset """kinships""" +187 43 model """distmult""" +187 43 loss """crossentropy""" +187 43 regularizer """no""" +187 43 optimizer """adam""" +187 43 training_loop """lcwa""" +187 43 evaluator """rankbased""" +187 44 dataset """kinships""" +187 44 model """distmult""" +187 44 loss """crossentropy""" +187 44 regularizer """no""" +187 44 optimizer """adam""" +187 44 training_loop """lcwa""" +187 44 evaluator """rankbased""" +187 45 dataset """kinships""" +187 45 model """distmult""" +187 45 loss """crossentropy""" +187 45 regularizer """no""" +187 45 optimizer """adam""" +187 45 training_loop """lcwa""" +187 45 evaluator """rankbased""" +187 46 dataset """kinships""" +187 46 model """distmult""" +187 46 loss """crossentropy""" +187 46 regularizer """no""" +187 46 optimizer """adam""" +187 46 training_loop """lcwa""" +187 46 evaluator """rankbased""" +187 47 dataset """kinships""" +187 47 model """distmult""" +187 47 loss """crossentropy""" +187 47 regularizer """no""" +187 47 optimizer """adam""" +187 47 training_loop """lcwa""" +187 47 evaluator """rankbased""" +187 48 dataset """kinships""" +187 48 model """distmult""" +187 48 loss """crossentropy""" +187 48 regularizer """no""" +187 48 optimizer """adam""" +187 48 training_loop """lcwa""" +187 48 evaluator """rankbased""" +187 49 dataset """kinships""" +187 49 model """distmult""" +187 49 loss """crossentropy""" +187 49 regularizer """no""" +187 49 optimizer """adam""" +187 49 training_loop """lcwa""" +187 49 evaluator """rankbased""" +187 50 dataset """kinships""" +187 50 model """distmult""" +187 50 loss """crossentropy""" +187 50 regularizer """no""" +187 50 optimizer """adam""" +187 50 training_loop """lcwa""" +187 50 evaluator """rankbased""" +187 51 dataset """kinships""" +187 51 model """distmult""" +187 51 loss """crossentropy""" +187 51 regularizer """no""" +187 51 optimizer """adam""" +187 51 training_loop """lcwa""" +187 51 evaluator """rankbased""" +187 52 dataset """kinships""" +187 52 model """distmult""" +187 52 loss """crossentropy""" +187 52 regularizer """no""" +187 52 optimizer """adam""" +187 52 training_loop """lcwa""" +187 52 evaluator """rankbased""" +187 53 dataset """kinships""" +187 53 model """distmult""" +187 53 loss """crossentropy""" +187 53 regularizer """no""" +187 53 optimizer """adam""" +187 53 training_loop """lcwa""" +187 53 evaluator """rankbased""" +187 54 dataset """kinships""" +187 54 model """distmult""" +187 54 loss """crossentropy""" +187 54 regularizer """no""" +187 54 optimizer """adam""" +187 54 training_loop """lcwa""" +187 54 evaluator """rankbased""" +187 55 dataset """kinships""" +187 55 model """distmult""" +187 55 loss """crossentropy""" +187 55 regularizer """no""" +187 55 optimizer """adam""" +187 55 training_loop """lcwa""" +187 55 evaluator """rankbased""" +187 56 dataset """kinships""" +187 56 model """distmult""" +187 56 loss """crossentropy""" +187 56 regularizer """no""" +187 56 optimizer """adam""" +187 56 training_loop """lcwa""" +187 56 evaluator """rankbased""" +187 57 dataset """kinships""" +187 57 model """distmult""" +187 57 loss """crossentropy""" +187 57 regularizer """no""" +187 57 optimizer """adam""" +187 57 training_loop """lcwa""" +187 57 evaluator """rankbased""" +187 58 dataset """kinships""" +187 58 model """distmult""" +187 58 loss """crossentropy""" +187 58 regularizer """no""" +187 58 optimizer """adam""" +187 58 training_loop """lcwa""" +187 58 evaluator """rankbased""" +187 59 dataset """kinships""" +187 59 model """distmult""" +187 59 loss """crossentropy""" +187 59 regularizer """no""" +187 59 optimizer """adam""" +187 59 training_loop """lcwa""" +187 59 evaluator """rankbased""" +187 60 dataset """kinships""" +187 60 model """distmult""" +187 60 loss """crossentropy""" +187 60 regularizer """no""" +187 60 optimizer """adam""" +187 60 training_loop """lcwa""" +187 60 evaluator """rankbased""" +187 61 dataset """kinships""" +187 61 model """distmult""" +187 61 loss """crossentropy""" +187 61 regularizer """no""" +187 61 optimizer """adam""" +187 61 training_loop """lcwa""" +187 61 evaluator """rankbased""" +187 62 dataset """kinships""" +187 62 model """distmult""" +187 62 loss """crossentropy""" +187 62 regularizer """no""" +187 62 optimizer """adam""" +187 62 training_loop """lcwa""" +187 62 evaluator """rankbased""" +187 63 dataset """kinships""" +187 63 model """distmult""" +187 63 loss """crossentropy""" +187 63 regularizer """no""" +187 63 optimizer """adam""" +187 63 training_loop """lcwa""" +187 63 evaluator """rankbased""" +187 64 dataset """kinships""" +187 64 model """distmult""" +187 64 loss """crossentropy""" +187 64 regularizer """no""" +187 64 optimizer """adam""" +187 64 training_loop """lcwa""" +187 64 evaluator """rankbased""" +187 65 dataset """kinships""" +187 65 model """distmult""" +187 65 loss """crossentropy""" +187 65 regularizer """no""" +187 65 optimizer """adam""" +187 65 training_loop """lcwa""" +187 65 evaluator """rankbased""" +187 66 dataset """kinships""" +187 66 model """distmult""" +187 66 loss """crossentropy""" +187 66 regularizer """no""" +187 66 optimizer """adam""" +187 66 training_loop """lcwa""" +187 66 evaluator """rankbased""" +187 67 dataset """kinships""" +187 67 model """distmult""" +187 67 loss """crossentropy""" +187 67 regularizer """no""" +187 67 optimizer """adam""" +187 67 training_loop """lcwa""" +187 67 evaluator """rankbased""" +187 68 dataset """kinships""" +187 68 model """distmult""" +187 68 loss """crossentropy""" +187 68 regularizer """no""" +187 68 optimizer """adam""" +187 68 training_loop """lcwa""" +187 68 evaluator """rankbased""" +187 69 dataset """kinships""" +187 69 model """distmult""" +187 69 loss """crossentropy""" +187 69 regularizer """no""" +187 69 optimizer """adam""" +187 69 training_loop """lcwa""" +187 69 evaluator """rankbased""" +187 70 dataset """kinships""" +187 70 model """distmult""" +187 70 loss """crossentropy""" +187 70 regularizer """no""" +187 70 optimizer """adam""" +187 70 training_loop """lcwa""" +187 70 evaluator """rankbased""" +187 71 dataset """kinships""" +187 71 model """distmult""" +187 71 loss """crossentropy""" +187 71 regularizer """no""" +187 71 optimizer """adam""" +187 71 training_loop """lcwa""" +187 71 evaluator """rankbased""" +187 72 dataset """kinships""" +187 72 model """distmult""" +187 72 loss """crossentropy""" +187 72 regularizer """no""" +187 72 optimizer """adam""" +187 72 training_loop """lcwa""" +187 72 evaluator """rankbased""" +187 73 dataset """kinships""" +187 73 model """distmult""" +187 73 loss """crossentropy""" +187 73 regularizer """no""" +187 73 optimizer """adam""" +187 73 training_loop """lcwa""" +187 73 evaluator """rankbased""" +187 74 dataset """kinships""" +187 74 model """distmult""" +187 74 loss """crossentropy""" +187 74 regularizer """no""" +187 74 optimizer """adam""" +187 74 training_loop """lcwa""" +187 74 evaluator """rankbased""" +187 75 dataset """kinships""" +187 75 model """distmult""" +187 75 loss """crossentropy""" +187 75 regularizer """no""" +187 75 optimizer """adam""" +187 75 training_loop """lcwa""" +187 75 evaluator """rankbased""" +187 76 dataset """kinships""" +187 76 model """distmult""" +187 76 loss """crossentropy""" +187 76 regularizer """no""" +187 76 optimizer """adam""" +187 76 training_loop """lcwa""" +187 76 evaluator """rankbased""" +187 77 dataset """kinships""" +187 77 model """distmult""" +187 77 loss """crossentropy""" +187 77 regularizer """no""" +187 77 optimizer """adam""" +187 77 training_loop """lcwa""" +187 77 evaluator """rankbased""" +187 78 dataset """kinships""" +187 78 model """distmult""" +187 78 loss """crossentropy""" +187 78 regularizer """no""" +187 78 optimizer """adam""" +187 78 training_loop """lcwa""" +187 78 evaluator """rankbased""" +187 79 dataset """kinships""" +187 79 model """distmult""" +187 79 loss """crossentropy""" +187 79 regularizer """no""" +187 79 optimizer """adam""" +187 79 training_loop """lcwa""" +187 79 evaluator """rankbased""" +187 80 dataset """kinships""" +187 80 model """distmult""" +187 80 loss """crossentropy""" +187 80 regularizer """no""" +187 80 optimizer """adam""" +187 80 training_loop """lcwa""" +187 80 evaluator """rankbased""" +187 81 dataset """kinships""" +187 81 model """distmult""" +187 81 loss """crossentropy""" +187 81 regularizer """no""" +187 81 optimizer """adam""" +187 81 training_loop """lcwa""" +187 81 evaluator """rankbased""" +187 82 dataset """kinships""" +187 82 model """distmult""" +187 82 loss """crossentropy""" +187 82 regularizer """no""" +187 82 optimizer """adam""" +187 82 training_loop """lcwa""" +187 82 evaluator """rankbased""" +187 83 dataset """kinships""" +187 83 model """distmult""" +187 83 loss """crossentropy""" +187 83 regularizer """no""" +187 83 optimizer """adam""" +187 83 training_loop """lcwa""" +187 83 evaluator """rankbased""" +187 84 dataset """kinships""" +187 84 model """distmult""" +187 84 loss """crossentropy""" +187 84 regularizer """no""" +187 84 optimizer """adam""" +187 84 training_loop """lcwa""" +187 84 evaluator """rankbased""" +187 85 dataset """kinships""" +187 85 model """distmult""" +187 85 loss """crossentropy""" +187 85 regularizer """no""" +187 85 optimizer """adam""" +187 85 training_loop """lcwa""" +187 85 evaluator """rankbased""" +187 86 dataset """kinships""" +187 86 model """distmult""" +187 86 loss """crossentropy""" +187 86 regularizer """no""" +187 86 optimizer """adam""" +187 86 training_loop """lcwa""" +187 86 evaluator """rankbased""" +187 87 dataset """kinships""" +187 87 model """distmult""" +187 87 loss """crossentropy""" +187 87 regularizer """no""" +187 87 optimizer """adam""" +187 87 training_loop """lcwa""" +187 87 evaluator """rankbased""" +187 88 dataset """kinships""" +187 88 model """distmult""" +187 88 loss """crossentropy""" +187 88 regularizer """no""" +187 88 optimizer """adam""" +187 88 training_loop """lcwa""" +187 88 evaluator """rankbased""" +187 89 dataset """kinships""" +187 89 model """distmult""" +187 89 loss """crossentropy""" +187 89 regularizer """no""" +187 89 optimizer """adam""" +187 89 training_loop """lcwa""" +187 89 evaluator """rankbased""" +187 90 dataset """kinships""" +187 90 model """distmult""" +187 90 loss """crossentropy""" +187 90 regularizer """no""" +187 90 optimizer """adam""" +187 90 training_loop """lcwa""" +187 90 evaluator """rankbased""" +187 91 dataset """kinships""" +187 91 model """distmult""" +187 91 loss """crossentropy""" +187 91 regularizer """no""" +187 91 optimizer """adam""" +187 91 training_loop """lcwa""" +187 91 evaluator """rankbased""" +187 92 dataset """kinships""" +187 92 model """distmult""" +187 92 loss """crossentropy""" +187 92 regularizer """no""" +187 92 optimizer """adam""" +187 92 training_loop """lcwa""" +187 92 evaluator """rankbased""" +187 93 dataset """kinships""" +187 93 model """distmult""" +187 93 loss """crossentropy""" +187 93 regularizer """no""" +187 93 optimizer """adam""" +187 93 training_loop """lcwa""" +187 93 evaluator """rankbased""" +187 94 dataset """kinships""" +187 94 model """distmult""" +187 94 loss """crossentropy""" +187 94 regularizer """no""" +187 94 optimizer """adam""" +187 94 training_loop """lcwa""" +187 94 evaluator """rankbased""" +187 95 dataset """kinships""" +187 95 model """distmult""" +187 95 loss """crossentropy""" +187 95 regularizer """no""" +187 95 optimizer """adam""" +187 95 training_loop """lcwa""" +187 95 evaluator """rankbased""" +187 96 dataset """kinships""" +187 96 model """distmult""" +187 96 loss """crossentropy""" +187 96 regularizer """no""" +187 96 optimizer """adam""" +187 96 training_loop """lcwa""" +187 96 evaluator """rankbased""" +187 97 dataset """kinships""" +187 97 model """distmult""" +187 97 loss """crossentropy""" +187 97 regularizer """no""" +187 97 optimizer """adam""" +187 97 training_loop """lcwa""" +187 97 evaluator """rankbased""" +187 98 dataset """kinships""" +187 98 model """distmult""" +187 98 loss """crossentropy""" +187 98 regularizer """no""" +187 98 optimizer """adam""" +187 98 training_loop """lcwa""" +187 98 evaluator """rankbased""" +187 99 dataset """kinships""" +187 99 model """distmult""" +187 99 loss """crossentropy""" +187 99 regularizer """no""" +187 99 optimizer """adam""" +187 99 training_loop """lcwa""" +187 99 evaluator """rankbased""" +187 100 dataset """kinships""" +187 100 model """distmult""" +187 100 loss """crossentropy""" +187 100 regularizer """no""" +187 100 optimizer """adam""" +187 100 training_loop """lcwa""" +187 100 evaluator """rankbased""" +188 1 model.embedding_dim 2.0 +188 1 optimizer.lr 0.008631786276869625 +188 1 negative_sampler.num_negs_per_pos 94.0 +188 1 training.batch_size 2.0 +188 2 model.embedding_dim 0.0 +188 2 optimizer.lr 0.0016755212426637358 +188 2 negative_sampler.num_negs_per_pos 3.0 +188 2 training.batch_size 2.0 +188 3 model.embedding_dim 1.0 +188 3 optimizer.lr 0.006034609851877125 +188 3 negative_sampler.num_negs_per_pos 10.0 +188 3 training.batch_size 2.0 +188 4 model.embedding_dim 2.0 +188 4 optimizer.lr 0.009579699569933977 +188 4 negative_sampler.num_negs_per_pos 93.0 +188 4 training.batch_size 2.0 +188 5 model.embedding_dim 0.0 +188 5 optimizer.lr 0.007214373442524809 +188 5 negative_sampler.num_negs_per_pos 42.0 +188 5 training.batch_size 2.0 +188 6 model.embedding_dim 0.0 +188 6 optimizer.lr 0.002137974688134276 +188 6 negative_sampler.num_negs_per_pos 54.0 +188 6 training.batch_size 2.0 +188 7 model.embedding_dim 2.0 +188 7 optimizer.lr 0.0023060475080297718 +188 7 negative_sampler.num_negs_per_pos 32.0 +188 7 training.batch_size 0.0 +188 8 model.embedding_dim 2.0 +188 8 optimizer.lr 0.0033772656039331847 +188 8 negative_sampler.num_negs_per_pos 83.0 +188 8 training.batch_size 2.0 +188 9 model.embedding_dim 1.0 +188 9 optimizer.lr 0.08396531481593889 +188 9 negative_sampler.num_negs_per_pos 13.0 +188 9 training.batch_size 0.0 +188 10 model.embedding_dim 1.0 +188 10 optimizer.lr 0.02985546732369086 +188 10 negative_sampler.num_negs_per_pos 47.0 +188 10 training.batch_size 1.0 +188 11 model.embedding_dim 1.0 +188 11 optimizer.lr 0.02391312831630682 +188 11 negative_sampler.num_negs_per_pos 24.0 +188 11 training.batch_size 0.0 +188 12 model.embedding_dim 0.0 +188 12 optimizer.lr 0.009390724657227647 +188 12 negative_sampler.num_negs_per_pos 48.0 +188 12 training.batch_size 2.0 +188 13 model.embedding_dim 2.0 +188 13 optimizer.lr 0.022667867956670175 +188 13 negative_sampler.num_negs_per_pos 43.0 +188 13 training.batch_size 2.0 +188 14 model.embedding_dim 1.0 +188 14 optimizer.lr 0.0012524215030968585 +188 14 negative_sampler.num_negs_per_pos 32.0 +188 14 training.batch_size 0.0 +188 15 model.embedding_dim 2.0 +188 15 optimizer.lr 0.03302481280244541 +188 15 negative_sampler.num_negs_per_pos 12.0 +188 15 training.batch_size 1.0 +188 16 model.embedding_dim 1.0 +188 16 optimizer.lr 0.0011358034014620484 +188 16 negative_sampler.num_negs_per_pos 65.0 +188 16 training.batch_size 1.0 +188 17 model.embedding_dim 2.0 +188 17 optimizer.lr 0.0018236579386651387 +188 17 negative_sampler.num_negs_per_pos 59.0 +188 17 training.batch_size 0.0 +188 18 model.embedding_dim 0.0 +188 18 optimizer.lr 0.05398453560908145 +188 18 negative_sampler.num_negs_per_pos 47.0 +188 18 training.batch_size 0.0 +188 19 model.embedding_dim 1.0 +188 19 optimizer.lr 0.0015554508192940533 +188 19 negative_sampler.num_negs_per_pos 80.0 +188 19 training.batch_size 2.0 +188 20 model.embedding_dim 0.0 +188 20 optimizer.lr 0.015222345979995638 +188 20 negative_sampler.num_negs_per_pos 92.0 +188 20 training.batch_size 2.0 +188 21 model.embedding_dim 1.0 +188 21 optimizer.lr 0.001559412728355341 +188 21 negative_sampler.num_negs_per_pos 0.0 +188 21 training.batch_size 1.0 +188 22 model.embedding_dim 0.0 +188 22 optimizer.lr 0.005822838478616946 +188 22 negative_sampler.num_negs_per_pos 2.0 +188 22 training.batch_size 1.0 +188 23 model.embedding_dim 1.0 +188 23 optimizer.lr 0.008439237601579832 +188 23 negative_sampler.num_negs_per_pos 48.0 +188 23 training.batch_size 0.0 +188 24 model.embedding_dim 0.0 +188 24 optimizer.lr 0.007206079692470637 +188 24 negative_sampler.num_negs_per_pos 80.0 +188 24 training.batch_size 2.0 +188 25 model.embedding_dim 1.0 +188 25 optimizer.lr 0.07011625641739906 +188 25 negative_sampler.num_negs_per_pos 22.0 +188 25 training.batch_size 2.0 +188 26 model.embedding_dim 0.0 +188 26 optimizer.lr 0.024090268114638803 +188 26 negative_sampler.num_negs_per_pos 1.0 +188 26 training.batch_size 2.0 +188 27 model.embedding_dim 0.0 +188 27 optimizer.lr 0.0036293840840546827 +188 27 negative_sampler.num_negs_per_pos 42.0 +188 27 training.batch_size 2.0 +188 28 model.embedding_dim 1.0 +188 28 optimizer.lr 0.0015950848615610435 +188 28 negative_sampler.num_negs_per_pos 34.0 +188 28 training.batch_size 0.0 +188 29 model.embedding_dim 1.0 +188 29 optimizer.lr 0.0026278985882671603 +188 29 negative_sampler.num_negs_per_pos 32.0 +188 29 training.batch_size 0.0 +188 30 model.embedding_dim 0.0 +188 30 optimizer.lr 0.00569905807463592 +188 30 negative_sampler.num_negs_per_pos 51.0 +188 30 training.batch_size 2.0 +188 31 model.embedding_dim 0.0 +188 31 optimizer.lr 0.0054923551966297 +188 31 negative_sampler.num_negs_per_pos 61.0 +188 31 training.batch_size 0.0 +188 32 model.embedding_dim 1.0 +188 32 optimizer.lr 0.0012165980390315295 +188 32 negative_sampler.num_negs_per_pos 52.0 +188 32 training.batch_size 2.0 +188 33 model.embedding_dim 0.0 +188 33 optimizer.lr 0.001517409606963969 +188 33 negative_sampler.num_negs_per_pos 85.0 +188 33 training.batch_size 0.0 +188 34 model.embedding_dim 1.0 +188 34 optimizer.lr 0.01474532327340012 +188 34 negative_sampler.num_negs_per_pos 74.0 +188 34 training.batch_size 1.0 +188 35 model.embedding_dim 2.0 +188 35 optimizer.lr 0.03854186423739908 +188 35 negative_sampler.num_negs_per_pos 82.0 +188 35 training.batch_size 1.0 +188 36 model.embedding_dim 1.0 +188 36 optimizer.lr 0.011411737086342877 +188 36 negative_sampler.num_negs_per_pos 63.0 +188 36 training.batch_size 2.0 +188 37 model.embedding_dim 2.0 +188 37 optimizer.lr 0.001024354748514744 +188 37 negative_sampler.num_negs_per_pos 52.0 +188 37 training.batch_size 0.0 +188 38 model.embedding_dim 2.0 +188 38 optimizer.lr 0.0023550236504306116 +188 38 negative_sampler.num_negs_per_pos 25.0 +188 38 training.batch_size 2.0 +188 39 model.embedding_dim 0.0 +188 39 optimizer.lr 0.003463621294592639 +188 39 negative_sampler.num_negs_per_pos 14.0 +188 39 training.batch_size 1.0 +188 40 model.embedding_dim 2.0 +188 40 optimizer.lr 0.00479061080436537 +188 40 negative_sampler.num_negs_per_pos 44.0 +188 40 training.batch_size 0.0 +188 41 model.embedding_dim 2.0 +188 41 optimizer.lr 0.0014393840178506527 +188 41 negative_sampler.num_negs_per_pos 69.0 +188 41 training.batch_size 2.0 +188 42 model.embedding_dim 1.0 +188 42 optimizer.lr 0.004819631241745508 +188 42 negative_sampler.num_negs_per_pos 66.0 +188 42 training.batch_size 0.0 +188 43 model.embedding_dim 2.0 +188 43 optimizer.lr 0.020977199013971316 +188 43 negative_sampler.num_negs_per_pos 72.0 +188 43 training.batch_size 0.0 +188 44 model.embedding_dim 0.0 +188 44 optimizer.lr 0.0016209669816419818 +188 44 negative_sampler.num_negs_per_pos 49.0 +188 44 training.batch_size 2.0 +188 45 model.embedding_dim 2.0 +188 45 optimizer.lr 0.014239944379137476 +188 45 negative_sampler.num_negs_per_pos 98.0 +188 45 training.batch_size 1.0 +188 46 model.embedding_dim 0.0 +188 46 optimizer.lr 0.05179358946381808 +188 46 negative_sampler.num_negs_per_pos 13.0 +188 46 training.batch_size 0.0 +188 47 model.embedding_dim 1.0 +188 47 optimizer.lr 0.03469019847018261 +188 47 negative_sampler.num_negs_per_pos 92.0 +188 47 training.batch_size 2.0 +188 48 model.embedding_dim 2.0 +188 48 optimizer.lr 0.0011342684370581864 +188 48 negative_sampler.num_negs_per_pos 37.0 +188 48 training.batch_size 1.0 +188 49 model.embedding_dim 0.0 +188 49 optimizer.lr 0.0077648732599427586 +188 49 negative_sampler.num_negs_per_pos 28.0 +188 49 training.batch_size 0.0 +188 50 model.embedding_dim 1.0 +188 50 optimizer.lr 0.09578054864090917 +188 50 negative_sampler.num_negs_per_pos 58.0 +188 50 training.batch_size 0.0 +188 51 model.embedding_dim 1.0 +188 51 optimizer.lr 0.00222334086611416 +188 51 negative_sampler.num_negs_per_pos 9.0 +188 51 training.batch_size 2.0 +188 52 model.embedding_dim 2.0 +188 52 optimizer.lr 0.025865744358966308 +188 52 negative_sampler.num_negs_per_pos 84.0 +188 52 training.batch_size 1.0 +188 53 model.embedding_dim 1.0 +188 53 optimizer.lr 0.002455819212311402 +188 53 negative_sampler.num_negs_per_pos 46.0 +188 53 training.batch_size 1.0 +188 54 model.embedding_dim 0.0 +188 54 optimizer.lr 0.0035645466432547785 +188 54 negative_sampler.num_negs_per_pos 21.0 +188 54 training.batch_size 1.0 +188 55 model.embedding_dim 2.0 +188 55 optimizer.lr 0.0050718468472108055 +188 55 negative_sampler.num_negs_per_pos 8.0 +188 55 training.batch_size 2.0 +188 56 model.embedding_dim 2.0 +188 56 optimizer.lr 0.013011746412522892 +188 56 negative_sampler.num_negs_per_pos 80.0 +188 56 training.batch_size 2.0 +188 57 model.embedding_dim 2.0 +188 57 optimizer.lr 0.011516989342486487 +188 57 negative_sampler.num_negs_per_pos 33.0 +188 57 training.batch_size 2.0 +188 58 model.embedding_dim 0.0 +188 58 optimizer.lr 0.02175198073571344 +188 58 negative_sampler.num_negs_per_pos 76.0 +188 58 training.batch_size 2.0 +188 59 model.embedding_dim 2.0 +188 59 optimizer.lr 0.0021605477022787908 +188 59 negative_sampler.num_negs_per_pos 45.0 +188 59 training.batch_size 0.0 +188 60 model.embedding_dim 1.0 +188 60 optimizer.lr 0.06949188394154633 +188 60 negative_sampler.num_negs_per_pos 13.0 +188 60 training.batch_size 0.0 +188 61 model.embedding_dim 2.0 +188 61 optimizer.lr 0.010251969794795448 +188 61 negative_sampler.num_negs_per_pos 62.0 +188 61 training.batch_size 2.0 +188 62 model.embedding_dim 2.0 +188 62 optimizer.lr 0.0012545662336292244 +188 62 negative_sampler.num_negs_per_pos 2.0 +188 62 training.batch_size 0.0 +188 63 model.embedding_dim 0.0 +188 63 optimizer.lr 0.00243380086862547 +188 63 negative_sampler.num_negs_per_pos 25.0 +188 63 training.batch_size 2.0 +188 64 model.embedding_dim 2.0 +188 64 optimizer.lr 0.0024884518967083448 +188 64 negative_sampler.num_negs_per_pos 72.0 +188 64 training.batch_size 0.0 +188 65 model.embedding_dim 0.0 +188 65 optimizer.lr 0.0024180695128953097 +188 65 negative_sampler.num_negs_per_pos 12.0 +188 65 training.batch_size 2.0 +188 66 model.embedding_dim 2.0 +188 66 optimizer.lr 0.0039558288028895845 +188 66 negative_sampler.num_negs_per_pos 88.0 +188 66 training.batch_size 2.0 +188 67 model.embedding_dim 0.0 +188 67 optimizer.lr 0.008431288903239891 +188 67 negative_sampler.num_negs_per_pos 48.0 +188 67 training.batch_size 0.0 +188 68 model.embedding_dim 2.0 +188 68 optimizer.lr 0.035653088900700874 +188 68 negative_sampler.num_negs_per_pos 13.0 +188 68 training.batch_size 1.0 +188 69 model.embedding_dim 1.0 +188 69 optimizer.lr 0.01780068426366306 +188 69 negative_sampler.num_negs_per_pos 47.0 +188 69 training.batch_size 0.0 +188 70 model.embedding_dim 2.0 +188 70 optimizer.lr 0.002073101962126284 +188 70 negative_sampler.num_negs_per_pos 43.0 +188 70 training.batch_size 0.0 +188 71 model.embedding_dim 1.0 +188 71 optimizer.lr 0.09016024286423938 +188 71 negative_sampler.num_negs_per_pos 58.0 +188 71 training.batch_size 1.0 +188 72 model.embedding_dim 0.0 +188 72 optimizer.lr 0.001244712839843931 +188 72 negative_sampler.num_negs_per_pos 51.0 +188 72 training.batch_size 1.0 +188 73 model.embedding_dim 1.0 +188 73 optimizer.lr 0.09306701038018063 +188 73 negative_sampler.num_negs_per_pos 93.0 +188 73 training.batch_size 2.0 +188 74 model.embedding_dim 2.0 +188 74 optimizer.lr 0.06232342800929224 +188 74 negative_sampler.num_negs_per_pos 39.0 +188 74 training.batch_size 2.0 +188 75 model.embedding_dim 1.0 +188 75 optimizer.lr 0.0017453917087472375 +188 75 negative_sampler.num_negs_per_pos 36.0 +188 75 training.batch_size 1.0 +188 76 model.embedding_dim 1.0 +188 76 optimizer.lr 0.002470829739727385 +188 76 negative_sampler.num_negs_per_pos 75.0 +188 76 training.batch_size 1.0 +188 77 model.embedding_dim 1.0 +188 77 optimizer.lr 0.08314822810922644 +188 77 negative_sampler.num_negs_per_pos 23.0 +188 77 training.batch_size 2.0 +188 78 model.embedding_dim 0.0 +188 78 optimizer.lr 0.010668566563567603 +188 78 negative_sampler.num_negs_per_pos 29.0 +188 78 training.batch_size 1.0 +188 79 model.embedding_dim 2.0 +188 79 optimizer.lr 0.04847989953521415 +188 79 negative_sampler.num_negs_per_pos 45.0 +188 79 training.batch_size 1.0 +188 80 model.embedding_dim 0.0 +188 80 optimizer.lr 0.04925305458341548 +188 80 negative_sampler.num_negs_per_pos 82.0 +188 80 training.batch_size 2.0 +188 81 model.embedding_dim 0.0 +188 81 optimizer.lr 0.0013820609832087133 +188 81 negative_sampler.num_negs_per_pos 4.0 +188 81 training.batch_size 1.0 +188 82 model.embedding_dim 1.0 +188 82 optimizer.lr 0.01040011455703377 +188 82 negative_sampler.num_negs_per_pos 24.0 +188 82 training.batch_size 2.0 +188 83 model.embedding_dim 0.0 +188 83 optimizer.lr 0.023952564219901284 +188 83 negative_sampler.num_negs_per_pos 25.0 +188 83 training.batch_size 0.0 +188 84 model.embedding_dim 0.0 +188 84 optimizer.lr 0.001877768522554595 +188 84 negative_sampler.num_negs_per_pos 77.0 +188 84 training.batch_size 2.0 +188 85 model.embedding_dim 0.0 +188 85 optimizer.lr 0.022443878573934838 +188 85 negative_sampler.num_negs_per_pos 28.0 +188 85 training.batch_size 1.0 +188 86 model.embedding_dim 2.0 +188 86 optimizer.lr 0.0013019728707348672 +188 86 negative_sampler.num_negs_per_pos 31.0 +188 86 training.batch_size 1.0 +188 87 model.embedding_dim 2.0 +188 87 optimizer.lr 0.001217908652021698 +188 87 negative_sampler.num_negs_per_pos 91.0 +188 87 training.batch_size 2.0 +188 88 model.embedding_dim 2.0 +188 88 optimizer.lr 0.03699016920277624 +188 88 negative_sampler.num_negs_per_pos 2.0 +188 88 training.batch_size 2.0 +188 89 model.embedding_dim 2.0 +188 89 optimizer.lr 0.053631333898362804 +188 89 negative_sampler.num_negs_per_pos 35.0 +188 89 training.batch_size 0.0 +188 90 model.embedding_dim 2.0 +188 90 optimizer.lr 0.06525456206371388 +188 90 negative_sampler.num_negs_per_pos 45.0 +188 90 training.batch_size 1.0 +188 91 model.embedding_dim 0.0 +188 91 optimizer.lr 0.004556497002649864 +188 91 negative_sampler.num_negs_per_pos 61.0 +188 91 training.batch_size 0.0 +188 92 model.embedding_dim 2.0 +188 92 optimizer.lr 0.09419834679550936 +188 92 negative_sampler.num_negs_per_pos 65.0 +188 92 training.batch_size 0.0 +188 93 model.embedding_dim 2.0 +188 93 optimizer.lr 0.022141512203752236 +188 93 negative_sampler.num_negs_per_pos 50.0 +188 93 training.batch_size 1.0 +188 94 model.embedding_dim 0.0 +188 94 optimizer.lr 0.09036564454875128 +188 94 negative_sampler.num_negs_per_pos 72.0 +188 94 training.batch_size 1.0 +188 95 model.embedding_dim 2.0 +188 95 optimizer.lr 0.005169851590347729 +188 95 negative_sampler.num_negs_per_pos 20.0 +188 95 training.batch_size 2.0 +188 96 model.embedding_dim 0.0 +188 96 optimizer.lr 0.0022350906690045053 +188 96 negative_sampler.num_negs_per_pos 10.0 +188 96 training.batch_size 0.0 +188 97 model.embedding_dim 0.0 +188 97 optimizer.lr 0.05736404477876238 +188 97 negative_sampler.num_negs_per_pos 18.0 +188 97 training.batch_size 1.0 +188 98 model.embedding_dim 0.0 +188 98 optimizer.lr 0.00422530576475271 +188 98 negative_sampler.num_negs_per_pos 72.0 +188 98 training.batch_size 2.0 +188 99 model.embedding_dim 0.0 +188 99 optimizer.lr 0.003953293101018161 +188 99 negative_sampler.num_negs_per_pos 98.0 +188 99 training.batch_size 0.0 +188 100 model.embedding_dim 0.0 +188 100 optimizer.lr 0.019849326361146526 +188 100 negative_sampler.num_negs_per_pos 59.0 +188 100 training.batch_size 0.0 +188 1 dataset """kinships""" +188 1 model """distmult""" +188 1 loss """bceaftersigmoid""" +188 1 regularizer """no""" +188 1 optimizer """adam""" +188 1 training_loop """owa""" +188 1 negative_sampler """basic""" +188 1 evaluator """rankbased""" +188 2 dataset """kinships""" +188 2 model """distmult""" +188 2 loss """bceaftersigmoid""" +188 2 regularizer """no""" +188 2 optimizer """adam""" +188 2 training_loop """owa""" +188 2 negative_sampler """basic""" +188 2 evaluator """rankbased""" +188 3 dataset """kinships""" +188 3 model """distmult""" +188 3 loss """bceaftersigmoid""" +188 3 regularizer """no""" +188 3 optimizer """adam""" +188 3 training_loop """owa""" +188 3 negative_sampler """basic""" +188 3 evaluator """rankbased""" +188 4 dataset """kinships""" +188 4 model """distmult""" +188 4 loss """bceaftersigmoid""" +188 4 regularizer """no""" +188 4 optimizer """adam""" +188 4 training_loop """owa""" +188 4 negative_sampler """basic""" +188 4 evaluator """rankbased""" +188 5 dataset """kinships""" +188 5 model """distmult""" +188 5 loss """bceaftersigmoid""" +188 5 regularizer """no""" +188 5 optimizer """adam""" +188 5 training_loop """owa""" +188 5 negative_sampler """basic""" +188 5 evaluator """rankbased""" +188 6 dataset """kinships""" +188 6 model """distmult""" +188 6 loss """bceaftersigmoid""" +188 6 regularizer """no""" +188 6 optimizer """adam""" +188 6 training_loop """owa""" +188 6 negative_sampler """basic""" +188 6 evaluator """rankbased""" +188 7 dataset """kinships""" +188 7 model """distmult""" +188 7 loss """bceaftersigmoid""" +188 7 regularizer """no""" +188 7 optimizer """adam""" +188 7 training_loop """owa""" +188 7 negative_sampler """basic""" +188 7 evaluator """rankbased""" +188 8 dataset """kinships""" +188 8 model """distmult""" +188 8 loss """bceaftersigmoid""" +188 8 regularizer """no""" +188 8 optimizer """adam""" +188 8 training_loop """owa""" +188 8 negative_sampler """basic""" +188 8 evaluator """rankbased""" +188 9 dataset """kinships""" +188 9 model """distmult""" +188 9 loss """bceaftersigmoid""" +188 9 regularizer """no""" +188 9 optimizer """adam""" +188 9 training_loop """owa""" +188 9 negative_sampler """basic""" +188 9 evaluator """rankbased""" +188 10 dataset """kinships""" +188 10 model """distmult""" +188 10 loss """bceaftersigmoid""" +188 10 regularizer """no""" +188 10 optimizer """adam""" +188 10 training_loop """owa""" +188 10 negative_sampler """basic""" +188 10 evaluator """rankbased""" +188 11 dataset """kinships""" +188 11 model """distmult""" +188 11 loss """bceaftersigmoid""" +188 11 regularizer """no""" +188 11 optimizer """adam""" +188 11 training_loop """owa""" +188 11 negative_sampler """basic""" +188 11 evaluator """rankbased""" +188 12 dataset """kinships""" +188 12 model """distmult""" +188 12 loss """bceaftersigmoid""" +188 12 regularizer """no""" +188 12 optimizer """adam""" +188 12 training_loop """owa""" +188 12 negative_sampler """basic""" +188 12 evaluator """rankbased""" +188 13 dataset """kinships""" +188 13 model """distmult""" +188 13 loss """bceaftersigmoid""" +188 13 regularizer """no""" +188 13 optimizer """adam""" +188 13 training_loop """owa""" +188 13 negative_sampler """basic""" +188 13 evaluator """rankbased""" +188 14 dataset """kinships""" +188 14 model """distmult""" +188 14 loss """bceaftersigmoid""" +188 14 regularizer """no""" +188 14 optimizer """adam""" +188 14 training_loop """owa""" +188 14 negative_sampler """basic""" +188 14 evaluator """rankbased""" +188 15 dataset """kinships""" +188 15 model """distmult""" +188 15 loss """bceaftersigmoid""" +188 15 regularizer """no""" +188 15 optimizer """adam""" +188 15 training_loop """owa""" +188 15 negative_sampler """basic""" +188 15 evaluator """rankbased""" +188 16 dataset """kinships""" +188 16 model """distmult""" +188 16 loss """bceaftersigmoid""" +188 16 regularizer """no""" +188 16 optimizer """adam""" +188 16 training_loop """owa""" +188 16 negative_sampler """basic""" +188 16 evaluator """rankbased""" +188 17 dataset """kinships""" +188 17 model """distmult""" +188 17 loss """bceaftersigmoid""" +188 17 regularizer """no""" +188 17 optimizer """adam""" +188 17 training_loop """owa""" +188 17 negative_sampler """basic""" +188 17 evaluator """rankbased""" +188 18 dataset """kinships""" +188 18 model """distmult""" +188 18 loss """bceaftersigmoid""" +188 18 regularizer """no""" +188 18 optimizer """adam""" +188 18 training_loop """owa""" +188 18 negative_sampler """basic""" +188 18 evaluator """rankbased""" +188 19 dataset """kinships""" +188 19 model """distmult""" +188 19 loss """bceaftersigmoid""" +188 19 regularizer """no""" +188 19 optimizer """adam""" +188 19 training_loop """owa""" +188 19 negative_sampler """basic""" +188 19 evaluator """rankbased""" +188 20 dataset """kinships""" +188 20 model """distmult""" +188 20 loss """bceaftersigmoid""" +188 20 regularizer """no""" +188 20 optimizer """adam""" +188 20 training_loop """owa""" +188 20 negative_sampler """basic""" +188 20 evaluator """rankbased""" +188 21 dataset """kinships""" +188 21 model """distmult""" +188 21 loss """bceaftersigmoid""" +188 21 regularizer """no""" +188 21 optimizer """adam""" +188 21 training_loop """owa""" +188 21 negative_sampler """basic""" +188 21 evaluator """rankbased""" +188 22 dataset """kinships""" +188 22 model """distmult""" +188 22 loss """bceaftersigmoid""" +188 22 regularizer """no""" +188 22 optimizer """adam""" +188 22 training_loop """owa""" +188 22 negative_sampler """basic""" +188 22 evaluator """rankbased""" +188 23 dataset """kinships""" +188 23 model """distmult""" +188 23 loss """bceaftersigmoid""" +188 23 regularizer """no""" +188 23 optimizer """adam""" +188 23 training_loop """owa""" +188 23 negative_sampler """basic""" +188 23 evaluator """rankbased""" +188 24 dataset """kinships""" +188 24 model """distmult""" +188 24 loss """bceaftersigmoid""" +188 24 regularizer """no""" +188 24 optimizer """adam""" +188 24 training_loop """owa""" +188 24 negative_sampler """basic""" +188 24 evaluator """rankbased""" +188 25 dataset """kinships""" +188 25 model """distmult""" +188 25 loss """bceaftersigmoid""" +188 25 regularizer """no""" +188 25 optimizer """adam""" +188 25 training_loop """owa""" +188 25 negative_sampler """basic""" +188 25 evaluator """rankbased""" +188 26 dataset """kinships""" +188 26 model """distmult""" +188 26 loss """bceaftersigmoid""" +188 26 regularizer """no""" +188 26 optimizer """adam""" +188 26 training_loop """owa""" +188 26 negative_sampler """basic""" +188 26 evaluator """rankbased""" +188 27 dataset """kinships""" +188 27 model """distmult""" +188 27 loss """bceaftersigmoid""" +188 27 regularizer """no""" +188 27 optimizer """adam""" +188 27 training_loop """owa""" +188 27 negative_sampler """basic""" +188 27 evaluator """rankbased""" +188 28 dataset """kinships""" +188 28 model """distmult""" +188 28 loss """bceaftersigmoid""" +188 28 regularizer """no""" +188 28 optimizer """adam""" +188 28 training_loop """owa""" +188 28 negative_sampler """basic""" +188 28 evaluator """rankbased""" +188 29 dataset """kinships""" +188 29 model """distmult""" +188 29 loss """bceaftersigmoid""" +188 29 regularizer """no""" +188 29 optimizer """adam""" +188 29 training_loop """owa""" +188 29 negative_sampler """basic""" +188 29 evaluator """rankbased""" +188 30 dataset """kinships""" +188 30 model """distmult""" +188 30 loss """bceaftersigmoid""" +188 30 regularizer """no""" +188 30 optimizer """adam""" +188 30 training_loop """owa""" +188 30 negative_sampler """basic""" +188 30 evaluator """rankbased""" +188 31 dataset """kinships""" +188 31 model """distmult""" +188 31 loss """bceaftersigmoid""" +188 31 regularizer """no""" +188 31 optimizer """adam""" +188 31 training_loop """owa""" +188 31 negative_sampler """basic""" +188 31 evaluator """rankbased""" +188 32 dataset """kinships""" +188 32 model """distmult""" +188 32 loss """bceaftersigmoid""" +188 32 regularizer """no""" +188 32 optimizer """adam""" +188 32 training_loop """owa""" +188 32 negative_sampler """basic""" +188 32 evaluator """rankbased""" +188 33 dataset """kinships""" +188 33 model """distmult""" +188 33 loss """bceaftersigmoid""" +188 33 regularizer """no""" +188 33 optimizer """adam""" +188 33 training_loop """owa""" +188 33 negative_sampler """basic""" +188 33 evaluator """rankbased""" +188 34 dataset """kinships""" +188 34 model """distmult""" +188 34 loss """bceaftersigmoid""" +188 34 regularizer """no""" +188 34 optimizer """adam""" +188 34 training_loop """owa""" +188 34 negative_sampler """basic""" +188 34 evaluator """rankbased""" +188 35 dataset """kinships""" +188 35 model """distmult""" +188 35 loss """bceaftersigmoid""" +188 35 regularizer """no""" +188 35 optimizer """adam""" +188 35 training_loop """owa""" +188 35 negative_sampler """basic""" +188 35 evaluator """rankbased""" +188 36 dataset """kinships""" +188 36 model """distmult""" +188 36 loss """bceaftersigmoid""" +188 36 regularizer """no""" +188 36 optimizer """adam""" +188 36 training_loop """owa""" +188 36 negative_sampler """basic""" +188 36 evaluator """rankbased""" +188 37 dataset """kinships""" +188 37 model """distmult""" +188 37 loss """bceaftersigmoid""" +188 37 regularizer """no""" +188 37 optimizer """adam""" +188 37 training_loop """owa""" +188 37 negative_sampler """basic""" +188 37 evaluator """rankbased""" +188 38 dataset """kinships""" +188 38 model """distmult""" +188 38 loss """bceaftersigmoid""" +188 38 regularizer """no""" +188 38 optimizer """adam""" +188 38 training_loop """owa""" +188 38 negative_sampler """basic""" +188 38 evaluator """rankbased""" +188 39 dataset """kinships""" +188 39 model """distmult""" +188 39 loss """bceaftersigmoid""" +188 39 regularizer """no""" +188 39 optimizer """adam""" +188 39 training_loop """owa""" +188 39 negative_sampler """basic""" +188 39 evaluator """rankbased""" +188 40 dataset """kinships""" +188 40 model """distmult""" +188 40 loss """bceaftersigmoid""" +188 40 regularizer """no""" +188 40 optimizer """adam""" +188 40 training_loop """owa""" +188 40 negative_sampler """basic""" +188 40 evaluator """rankbased""" +188 41 dataset """kinships""" +188 41 model """distmult""" +188 41 loss """bceaftersigmoid""" +188 41 regularizer """no""" +188 41 optimizer """adam""" +188 41 training_loop """owa""" +188 41 negative_sampler """basic""" +188 41 evaluator """rankbased""" +188 42 dataset """kinships""" +188 42 model """distmult""" +188 42 loss """bceaftersigmoid""" +188 42 regularizer """no""" +188 42 optimizer """adam""" +188 42 training_loop """owa""" +188 42 negative_sampler """basic""" +188 42 evaluator """rankbased""" +188 43 dataset """kinships""" +188 43 model """distmult""" +188 43 loss """bceaftersigmoid""" +188 43 regularizer """no""" +188 43 optimizer """adam""" +188 43 training_loop """owa""" +188 43 negative_sampler """basic""" +188 43 evaluator """rankbased""" +188 44 dataset """kinships""" +188 44 model """distmult""" +188 44 loss """bceaftersigmoid""" +188 44 regularizer """no""" +188 44 optimizer """adam""" +188 44 training_loop """owa""" +188 44 negative_sampler """basic""" +188 44 evaluator """rankbased""" +188 45 dataset """kinships""" +188 45 model """distmult""" +188 45 loss """bceaftersigmoid""" +188 45 regularizer """no""" +188 45 optimizer """adam""" +188 45 training_loop """owa""" +188 45 negative_sampler """basic""" +188 45 evaluator """rankbased""" +188 46 dataset """kinships""" +188 46 model """distmult""" +188 46 loss """bceaftersigmoid""" +188 46 regularizer """no""" +188 46 optimizer """adam""" +188 46 training_loop """owa""" +188 46 negative_sampler """basic""" +188 46 evaluator """rankbased""" +188 47 dataset """kinships""" +188 47 model """distmult""" +188 47 loss """bceaftersigmoid""" +188 47 regularizer """no""" +188 47 optimizer """adam""" +188 47 training_loop """owa""" +188 47 negative_sampler """basic""" +188 47 evaluator """rankbased""" +188 48 dataset """kinships""" +188 48 model """distmult""" +188 48 loss """bceaftersigmoid""" +188 48 regularizer """no""" +188 48 optimizer """adam""" +188 48 training_loop """owa""" +188 48 negative_sampler """basic""" +188 48 evaluator """rankbased""" +188 49 dataset """kinships""" +188 49 model """distmult""" +188 49 loss """bceaftersigmoid""" +188 49 regularizer """no""" +188 49 optimizer """adam""" +188 49 training_loop """owa""" +188 49 negative_sampler """basic""" +188 49 evaluator """rankbased""" +188 50 dataset """kinships""" +188 50 model """distmult""" +188 50 loss """bceaftersigmoid""" +188 50 regularizer """no""" +188 50 optimizer """adam""" +188 50 training_loop """owa""" +188 50 negative_sampler """basic""" +188 50 evaluator """rankbased""" +188 51 dataset """kinships""" +188 51 model """distmult""" +188 51 loss """bceaftersigmoid""" +188 51 regularizer """no""" +188 51 optimizer """adam""" +188 51 training_loop """owa""" +188 51 negative_sampler """basic""" +188 51 evaluator """rankbased""" +188 52 dataset """kinships""" +188 52 model """distmult""" +188 52 loss """bceaftersigmoid""" +188 52 regularizer """no""" +188 52 optimizer """adam""" +188 52 training_loop """owa""" +188 52 negative_sampler """basic""" +188 52 evaluator """rankbased""" +188 53 dataset """kinships""" +188 53 model """distmult""" +188 53 loss """bceaftersigmoid""" +188 53 regularizer """no""" +188 53 optimizer """adam""" +188 53 training_loop """owa""" +188 53 negative_sampler """basic""" +188 53 evaluator """rankbased""" +188 54 dataset """kinships""" +188 54 model """distmult""" +188 54 loss """bceaftersigmoid""" +188 54 regularizer """no""" +188 54 optimizer """adam""" +188 54 training_loop """owa""" +188 54 negative_sampler """basic""" +188 54 evaluator """rankbased""" +188 55 dataset """kinships""" +188 55 model """distmult""" +188 55 loss """bceaftersigmoid""" +188 55 regularizer """no""" +188 55 optimizer """adam""" +188 55 training_loop """owa""" +188 55 negative_sampler """basic""" +188 55 evaluator """rankbased""" +188 56 dataset """kinships""" +188 56 model """distmult""" +188 56 loss """bceaftersigmoid""" +188 56 regularizer """no""" +188 56 optimizer """adam""" +188 56 training_loop """owa""" +188 56 negative_sampler """basic""" +188 56 evaluator """rankbased""" +188 57 dataset """kinships""" +188 57 model """distmult""" +188 57 loss """bceaftersigmoid""" +188 57 regularizer """no""" +188 57 optimizer """adam""" +188 57 training_loop """owa""" +188 57 negative_sampler """basic""" +188 57 evaluator """rankbased""" +188 58 dataset """kinships""" +188 58 model """distmult""" +188 58 loss """bceaftersigmoid""" +188 58 regularizer """no""" +188 58 optimizer """adam""" +188 58 training_loop """owa""" +188 58 negative_sampler """basic""" +188 58 evaluator """rankbased""" +188 59 dataset """kinships""" +188 59 model """distmult""" +188 59 loss """bceaftersigmoid""" +188 59 regularizer """no""" +188 59 optimizer """adam""" +188 59 training_loop """owa""" +188 59 negative_sampler """basic""" +188 59 evaluator """rankbased""" +188 60 dataset """kinships""" +188 60 model """distmult""" +188 60 loss """bceaftersigmoid""" +188 60 regularizer """no""" +188 60 optimizer """adam""" +188 60 training_loop """owa""" +188 60 negative_sampler """basic""" +188 60 evaluator """rankbased""" +188 61 dataset """kinships""" +188 61 model """distmult""" +188 61 loss """bceaftersigmoid""" +188 61 regularizer """no""" +188 61 optimizer """adam""" +188 61 training_loop """owa""" +188 61 negative_sampler """basic""" +188 61 evaluator """rankbased""" +188 62 dataset """kinships""" +188 62 model """distmult""" +188 62 loss """bceaftersigmoid""" +188 62 regularizer """no""" +188 62 optimizer """adam""" +188 62 training_loop """owa""" +188 62 negative_sampler """basic""" +188 62 evaluator """rankbased""" +188 63 dataset """kinships""" +188 63 model """distmult""" +188 63 loss """bceaftersigmoid""" +188 63 regularizer """no""" +188 63 optimizer """adam""" +188 63 training_loop """owa""" +188 63 negative_sampler """basic""" +188 63 evaluator """rankbased""" +188 64 dataset """kinships""" +188 64 model """distmult""" +188 64 loss """bceaftersigmoid""" +188 64 regularizer """no""" +188 64 optimizer """adam""" +188 64 training_loop """owa""" +188 64 negative_sampler """basic""" +188 64 evaluator """rankbased""" +188 65 dataset """kinships""" +188 65 model """distmult""" +188 65 loss """bceaftersigmoid""" +188 65 regularizer """no""" +188 65 optimizer """adam""" +188 65 training_loop """owa""" +188 65 negative_sampler """basic""" +188 65 evaluator """rankbased""" +188 66 dataset """kinships""" +188 66 model """distmult""" +188 66 loss """bceaftersigmoid""" +188 66 regularizer """no""" +188 66 optimizer """adam""" +188 66 training_loop """owa""" +188 66 negative_sampler """basic""" +188 66 evaluator """rankbased""" +188 67 dataset """kinships""" +188 67 model """distmult""" +188 67 loss """bceaftersigmoid""" +188 67 regularizer """no""" +188 67 optimizer """adam""" +188 67 training_loop """owa""" +188 67 negative_sampler """basic""" +188 67 evaluator """rankbased""" +188 68 dataset """kinships""" +188 68 model """distmult""" +188 68 loss """bceaftersigmoid""" +188 68 regularizer """no""" +188 68 optimizer """adam""" +188 68 training_loop """owa""" +188 68 negative_sampler """basic""" +188 68 evaluator """rankbased""" +188 69 dataset """kinships""" +188 69 model """distmult""" +188 69 loss """bceaftersigmoid""" +188 69 regularizer """no""" +188 69 optimizer """adam""" +188 69 training_loop """owa""" +188 69 negative_sampler """basic""" +188 69 evaluator """rankbased""" +188 70 dataset """kinships""" +188 70 model """distmult""" +188 70 loss """bceaftersigmoid""" +188 70 regularizer """no""" +188 70 optimizer """adam""" +188 70 training_loop """owa""" +188 70 negative_sampler """basic""" +188 70 evaluator """rankbased""" +188 71 dataset """kinships""" +188 71 model """distmult""" +188 71 loss """bceaftersigmoid""" +188 71 regularizer """no""" +188 71 optimizer """adam""" +188 71 training_loop """owa""" +188 71 negative_sampler """basic""" +188 71 evaluator """rankbased""" +188 72 dataset """kinships""" +188 72 model """distmult""" +188 72 loss """bceaftersigmoid""" +188 72 regularizer """no""" +188 72 optimizer """adam""" +188 72 training_loop """owa""" +188 72 negative_sampler """basic""" +188 72 evaluator """rankbased""" +188 73 dataset """kinships""" +188 73 model """distmult""" +188 73 loss """bceaftersigmoid""" +188 73 regularizer """no""" +188 73 optimizer """adam""" +188 73 training_loop """owa""" +188 73 negative_sampler """basic""" +188 73 evaluator """rankbased""" +188 74 dataset """kinships""" +188 74 model """distmult""" +188 74 loss """bceaftersigmoid""" +188 74 regularizer """no""" +188 74 optimizer """adam""" +188 74 training_loop """owa""" +188 74 negative_sampler """basic""" +188 74 evaluator """rankbased""" +188 75 dataset """kinships""" +188 75 model """distmult""" +188 75 loss """bceaftersigmoid""" +188 75 regularizer """no""" +188 75 optimizer """adam""" +188 75 training_loop """owa""" +188 75 negative_sampler """basic""" +188 75 evaluator """rankbased""" +188 76 dataset """kinships""" +188 76 model """distmult""" +188 76 loss """bceaftersigmoid""" +188 76 regularizer """no""" +188 76 optimizer """adam""" +188 76 training_loop """owa""" +188 76 negative_sampler """basic""" +188 76 evaluator """rankbased""" +188 77 dataset """kinships""" +188 77 model """distmult""" +188 77 loss """bceaftersigmoid""" +188 77 regularizer """no""" +188 77 optimizer """adam""" +188 77 training_loop """owa""" +188 77 negative_sampler """basic""" +188 77 evaluator """rankbased""" +188 78 dataset """kinships""" +188 78 model """distmult""" +188 78 loss """bceaftersigmoid""" +188 78 regularizer """no""" +188 78 optimizer """adam""" +188 78 training_loop """owa""" +188 78 negative_sampler """basic""" +188 78 evaluator """rankbased""" +188 79 dataset """kinships""" +188 79 model """distmult""" +188 79 loss """bceaftersigmoid""" +188 79 regularizer """no""" +188 79 optimizer """adam""" +188 79 training_loop """owa""" +188 79 negative_sampler """basic""" +188 79 evaluator """rankbased""" +188 80 dataset """kinships""" +188 80 model """distmult""" +188 80 loss """bceaftersigmoid""" +188 80 regularizer """no""" +188 80 optimizer """adam""" +188 80 training_loop """owa""" +188 80 negative_sampler """basic""" +188 80 evaluator """rankbased""" +188 81 dataset """kinships""" +188 81 model """distmult""" +188 81 loss """bceaftersigmoid""" +188 81 regularizer """no""" +188 81 optimizer """adam""" +188 81 training_loop """owa""" +188 81 negative_sampler """basic""" +188 81 evaluator """rankbased""" +188 82 dataset """kinships""" +188 82 model """distmult""" +188 82 loss """bceaftersigmoid""" +188 82 regularizer """no""" +188 82 optimizer """adam""" +188 82 training_loop """owa""" +188 82 negative_sampler """basic""" +188 82 evaluator """rankbased""" +188 83 dataset """kinships""" +188 83 model """distmult""" +188 83 loss """bceaftersigmoid""" +188 83 regularizer """no""" +188 83 optimizer """adam""" +188 83 training_loop """owa""" +188 83 negative_sampler """basic""" +188 83 evaluator """rankbased""" +188 84 dataset """kinships""" +188 84 model """distmult""" +188 84 loss """bceaftersigmoid""" +188 84 regularizer """no""" +188 84 optimizer """adam""" +188 84 training_loop """owa""" +188 84 negative_sampler """basic""" +188 84 evaluator """rankbased""" +188 85 dataset """kinships""" +188 85 model """distmult""" +188 85 loss """bceaftersigmoid""" +188 85 regularizer """no""" +188 85 optimizer """adam""" +188 85 training_loop """owa""" +188 85 negative_sampler """basic""" +188 85 evaluator """rankbased""" +188 86 dataset """kinships""" +188 86 model """distmult""" +188 86 loss """bceaftersigmoid""" +188 86 regularizer """no""" +188 86 optimizer """adam""" +188 86 training_loop """owa""" +188 86 negative_sampler """basic""" +188 86 evaluator """rankbased""" +188 87 dataset """kinships""" +188 87 model """distmult""" +188 87 loss """bceaftersigmoid""" +188 87 regularizer """no""" +188 87 optimizer """adam""" +188 87 training_loop """owa""" +188 87 negative_sampler """basic""" +188 87 evaluator """rankbased""" +188 88 dataset """kinships""" +188 88 model """distmult""" +188 88 loss """bceaftersigmoid""" +188 88 regularizer """no""" +188 88 optimizer """adam""" +188 88 training_loop """owa""" +188 88 negative_sampler """basic""" +188 88 evaluator """rankbased""" +188 89 dataset """kinships""" +188 89 model """distmult""" +188 89 loss """bceaftersigmoid""" +188 89 regularizer """no""" +188 89 optimizer """adam""" +188 89 training_loop """owa""" +188 89 negative_sampler """basic""" +188 89 evaluator """rankbased""" +188 90 dataset """kinships""" +188 90 model """distmult""" +188 90 loss """bceaftersigmoid""" +188 90 regularizer """no""" +188 90 optimizer """adam""" +188 90 training_loop """owa""" +188 90 negative_sampler """basic""" +188 90 evaluator """rankbased""" +188 91 dataset """kinships""" +188 91 model """distmult""" +188 91 loss """bceaftersigmoid""" +188 91 regularizer """no""" +188 91 optimizer """adam""" +188 91 training_loop """owa""" +188 91 negative_sampler """basic""" +188 91 evaluator """rankbased""" +188 92 dataset """kinships""" +188 92 model """distmult""" +188 92 loss """bceaftersigmoid""" +188 92 regularizer """no""" +188 92 optimizer """adam""" +188 92 training_loop """owa""" +188 92 negative_sampler """basic""" +188 92 evaluator """rankbased""" +188 93 dataset """kinships""" +188 93 model """distmult""" +188 93 loss """bceaftersigmoid""" +188 93 regularizer """no""" +188 93 optimizer """adam""" +188 93 training_loop """owa""" +188 93 negative_sampler """basic""" +188 93 evaluator """rankbased""" +188 94 dataset """kinships""" +188 94 model """distmult""" +188 94 loss """bceaftersigmoid""" +188 94 regularizer """no""" +188 94 optimizer """adam""" +188 94 training_loop """owa""" +188 94 negative_sampler """basic""" +188 94 evaluator """rankbased""" +188 95 dataset """kinships""" +188 95 model """distmult""" +188 95 loss """bceaftersigmoid""" +188 95 regularizer """no""" +188 95 optimizer """adam""" +188 95 training_loop """owa""" +188 95 negative_sampler """basic""" +188 95 evaluator """rankbased""" +188 96 dataset """kinships""" +188 96 model """distmult""" +188 96 loss """bceaftersigmoid""" +188 96 regularizer """no""" +188 96 optimizer """adam""" +188 96 training_loop """owa""" +188 96 negative_sampler """basic""" +188 96 evaluator """rankbased""" +188 97 dataset """kinships""" +188 97 model """distmult""" +188 97 loss """bceaftersigmoid""" +188 97 regularizer """no""" +188 97 optimizer """adam""" +188 97 training_loop """owa""" +188 97 negative_sampler """basic""" +188 97 evaluator """rankbased""" +188 98 dataset """kinships""" +188 98 model """distmult""" +188 98 loss """bceaftersigmoid""" +188 98 regularizer """no""" +188 98 optimizer """adam""" +188 98 training_loop """owa""" +188 98 negative_sampler """basic""" +188 98 evaluator """rankbased""" +188 99 dataset """kinships""" +188 99 model """distmult""" +188 99 loss """bceaftersigmoid""" +188 99 regularizer """no""" +188 99 optimizer """adam""" +188 99 training_loop """owa""" +188 99 negative_sampler """basic""" +188 99 evaluator """rankbased""" +188 100 dataset """kinships""" +188 100 model """distmult""" +188 100 loss """bceaftersigmoid""" +188 100 regularizer """no""" +188 100 optimizer """adam""" +188 100 training_loop """owa""" +188 100 negative_sampler """basic""" +188 100 evaluator """rankbased""" +189 1 model.embedding_dim 1.0 +189 1 optimizer.lr 0.0072244589197546505 +189 1 negative_sampler.num_negs_per_pos 27.0 +189 1 training.batch_size 2.0 +189 2 model.embedding_dim 1.0 +189 2 optimizer.lr 0.055408369842374676 +189 2 negative_sampler.num_negs_per_pos 24.0 +189 2 training.batch_size 0.0 +189 3 model.embedding_dim 2.0 +189 3 optimizer.lr 0.08964450698912231 +189 3 negative_sampler.num_negs_per_pos 99.0 +189 3 training.batch_size 0.0 +189 4 model.embedding_dim 0.0 +189 4 optimizer.lr 0.008233624082973076 +189 4 negative_sampler.num_negs_per_pos 90.0 +189 4 training.batch_size 1.0 +189 5 model.embedding_dim 2.0 +189 5 optimizer.lr 0.02539373495981298 +189 5 negative_sampler.num_negs_per_pos 0.0 +189 5 training.batch_size 2.0 +189 6 model.embedding_dim 2.0 +189 6 optimizer.lr 0.05956004569947247 +189 6 negative_sampler.num_negs_per_pos 40.0 +189 6 training.batch_size 0.0 +189 7 model.embedding_dim 0.0 +189 7 optimizer.lr 0.09661527302464303 +189 7 negative_sampler.num_negs_per_pos 99.0 +189 7 training.batch_size 0.0 +189 8 model.embedding_dim 0.0 +189 8 optimizer.lr 0.012670964153762565 +189 8 negative_sampler.num_negs_per_pos 89.0 +189 8 training.batch_size 2.0 +189 9 model.embedding_dim 1.0 +189 9 optimizer.lr 0.02786414701125805 +189 9 negative_sampler.num_negs_per_pos 50.0 +189 9 training.batch_size 2.0 +189 10 model.embedding_dim 1.0 +189 10 optimizer.lr 0.001728708869306191 +189 10 negative_sampler.num_negs_per_pos 0.0 +189 10 training.batch_size 1.0 +189 11 model.embedding_dim 2.0 +189 11 optimizer.lr 0.011949250395185068 +189 11 negative_sampler.num_negs_per_pos 29.0 +189 11 training.batch_size 2.0 +189 12 model.embedding_dim 1.0 +189 12 optimizer.lr 0.023803463027198782 +189 12 negative_sampler.num_negs_per_pos 48.0 +189 12 training.batch_size 2.0 +189 13 model.embedding_dim 2.0 +189 13 optimizer.lr 0.001277272664671615 +189 13 negative_sampler.num_negs_per_pos 25.0 +189 13 training.batch_size 1.0 +189 14 model.embedding_dim 1.0 +189 14 optimizer.lr 0.025215339181923698 +189 14 negative_sampler.num_negs_per_pos 35.0 +189 14 training.batch_size 1.0 +189 15 model.embedding_dim 2.0 +189 15 optimizer.lr 0.04193811977384322 +189 15 negative_sampler.num_negs_per_pos 76.0 +189 15 training.batch_size 1.0 +189 16 model.embedding_dim 0.0 +189 16 optimizer.lr 0.07304968844117736 +189 16 negative_sampler.num_negs_per_pos 42.0 +189 16 training.batch_size 2.0 +189 17 model.embedding_dim 0.0 +189 17 optimizer.lr 0.022407375375577812 +189 17 negative_sampler.num_negs_per_pos 34.0 +189 17 training.batch_size 0.0 +189 18 model.embedding_dim 1.0 +189 18 optimizer.lr 0.048305335184708614 +189 18 negative_sampler.num_negs_per_pos 45.0 +189 18 training.batch_size 0.0 +189 19 model.embedding_dim 2.0 +189 19 optimizer.lr 0.0025106673775696577 +189 19 negative_sampler.num_negs_per_pos 56.0 +189 19 training.batch_size 0.0 +189 20 model.embedding_dim 0.0 +189 20 optimizer.lr 0.0023883672886631383 +189 20 negative_sampler.num_negs_per_pos 38.0 +189 20 training.batch_size 0.0 +189 21 model.embedding_dim 2.0 +189 21 optimizer.lr 0.006051630507022907 +189 21 negative_sampler.num_negs_per_pos 19.0 +189 21 training.batch_size 0.0 +189 22 model.embedding_dim 1.0 +189 22 optimizer.lr 0.030641123563680716 +189 22 negative_sampler.num_negs_per_pos 15.0 +189 22 training.batch_size 1.0 +189 23 model.embedding_dim 0.0 +189 23 optimizer.lr 0.009637423026877383 +189 23 negative_sampler.num_negs_per_pos 50.0 +189 23 training.batch_size 1.0 +189 24 model.embedding_dim 0.0 +189 24 optimizer.lr 0.008483085368193538 +189 24 negative_sampler.num_negs_per_pos 97.0 +189 24 training.batch_size 1.0 +189 25 model.embedding_dim 0.0 +189 25 optimizer.lr 0.004574253055153563 +189 25 negative_sampler.num_negs_per_pos 59.0 +189 25 training.batch_size 2.0 +189 26 model.embedding_dim 1.0 +189 26 optimizer.lr 0.02843574094952461 +189 26 negative_sampler.num_negs_per_pos 36.0 +189 26 training.batch_size 2.0 +189 27 model.embedding_dim 1.0 +189 27 optimizer.lr 0.0039059975092115957 +189 27 negative_sampler.num_negs_per_pos 35.0 +189 27 training.batch_size 2.0 +189 28 model.embedding_dim 0.0 +189 28 optimizer.lr 0.0034384677693101323 +189 28 negative_sampler.num_negs_per_pos 63.0 +189 28 training.batch_size 2.0 +189 29 model.embedding_dim 0.0 +189 29 optimizer.lr 0.007140816511817431 +189 29 negative_sampler.num_negs_per_pos 80.0 +189 29 training.batch_size 0.0 +189 30 model.embedding_dim 2.0 +189 30 optimizer.lr 0.005248758369887589 +189 30 negative_sampler.num_negs_per_pos 99.0 +189 30 training.batch_size 2.0 +189 31 model.embedding_dim 0.0 +189 31 optimizer.lr 0.014821425483382578 +189 31 negative_sampler.num_negs_per_pos 28.0 +189 31 training.batch_size 0.0 +189 32 model.embedding_dim 2.0 +189 32 optimizer.lr 0.04774750581661479 +189 32 negative_sampler.num_negs_per_pos 9.0 +189 32 training.batch_size 1.0 +189 33 model.embedding_dim 2.0 +189 33 optimizer.lr 0.006684480488112724 +189 33 negative_sampler.num_negs_per_pos 10.0 +189 33 training.batch_size 2.0 +189 34 model.embedding_dim 0.0 +189 34 optimizer.lr 0.0012680046409687023 +189 34 negative_sampler.num_negs_per_pos 24.0 +189 34 training.batch_size 2.0 +189 35 model.embedding_dim 2.0 +189 35 optimizer.lr 0.0379226268208241 +189 35 negative_sampler.num_negs_per_pos 38.0 +189 35 training.batch_size 0.0 +189 36 model.embedding_dim 2.0 +189 36 optimizer.lr 0.001071371397374549 +189 36 negative_sampler.num_negs_per_pos 82.0 +189 36 training.batch_size 1.0 +189 37 model.embedding_dim 0.0 +189 37 optimizer.lr 0.0013507694544499216 +189 37 negative_sampler.num_negs_per_pos 24.0 +189 37 training.batch_size 2.0 +189 38 model.embedding_dim 0.0 +189 38 optimizer.lr 0.0029339880433360147 +189 38 negative_sampler.num_negs_per_pos 32.0 +189 38 training.batch_size 0.0 +189 39 model.embedding_dim 2.0 +189 39 optimizer.lr 0.08900760720803408 +189 39 negative_sampler.num_negs_per_pos 13.0 +189 39 training.batch_size 0.0 +189 40 model.embedding_dim 2.0 +189 40 optimizer.lr 0.017111677571220115 +189 40 negative_sampler.num_negs_per_pos 88.0 +189 40 training.batch_size 2.0 +189 41 model.embedding_dim 2.0 +189 41 optimizer.lr 0.0011943526741974833 +189 41 negative_sampler.num_negs_per_pos 97.0 +189 41 training.batch_size 1.0 +189 42 model.embedding_dim 2.0 +189 42 optimizer.lr 0.03648922478502381 +189 42 negative_sampler.num_negs_per_pos 48.0 +189 42 training.batch_size 2.0 +189 43 model.embedding_dim 1.0 +189 43 optimizer.lr 0.009632535757672998 +189 43 negative_sampler.num_negs_per_pos 73.0 +189 43 training.batch_size 2.0 +189 44 model.embedding_dim 0.0 +189 44 optimizer.lr 0.014520604526398728 +189 44 negative_sampler.num_negs_per_pos 99.0 +189 44 training.batch_size 1.0 +189 45 model.embedding_dim 2.0 +189 45 optimizer.lr 0.007934562212220433 +189 45 negative_sampler.num_negs_per_pos 46.0 +189 45 training.batch_size 1.0 +189 46 model.embedding_dim 1.0 +189 46 optimizer.lr 0.0028266390375845385 +189 46 negative_sampler.num_negs_per_pos 1.0 +189 46 training.batch_size 1.0 +189 47 model.embedding_dim 0.0 +189 47 optimizer.lr 0.09349001234990287 +189 47 negative_sampler.num_negs_per_pos 3.0 +189 47 training.batch_size 1.0 +189 48 model.embedding_dim 2.0 +189 48 optimizer.lr 0.0010226899691731452 +189 48 negative_sampler.num_negs_per_pos 84.0 +189 48 training.batch_size 0.0 +189 49 model.embedding_dim 2.0 +189 49 optimizer.lr 0.0070789149356581755 +189 49 negative_sampler.num_negs_per_pos 17.0 +189 49 training.batch_size 1.0 +189 50 model.embedding_dim 2.0 +189 50 optimizer.lr 0.005204287990101302 +189 50 negative_sampler.num_negs_per_pos 4.0 +189 50 training.batch_size 0.0 +189 51 model.embedding_dim 2.0 +189 51 optimizer.lr 0.04669167552566745 +189 51 negative_sampler.num_negs_per_pos 76.0 +189 51 training.batch_size 1.0 +189 52 model.embedding_dim 0.0 +189 52 optimizer.lr 0.001557167804960356 +189 52 negative_sampler.num_negs_per_pos 58.0 +189 52 training.batch_size 2.0 +189 53 model.embedding_dim 0.0 +189 53 optimizer.lr 0.06817657580996499 +189 53 negative_sampler.num_negs_per_pos 25.0 +189 53 training.batch_size 2.0 +189 54 model.embedding_dim 0.0 +189 54 optimizer.lr 0.0029433570566805654 +189 54 negative_sampler.num_negs_per_pos 23.0 +189 54 training.batch_size 1.0 +189 55 model.embedding_dim 1.0 +189 55 optimizer.lr 0.01190725441144148 +189 55 negative_sampler.num_negs_per_pos 42.0 +189 55 training.batch_size 1.0 +189 56 model.embedding_dim 2.0 +189 56 optimizer.lr 0.009128926711331543 +189 56 negative_sampler.num_negs_per_pos 40.0 +189 56 training.batch_size 2.0 +189 57 model.embedding_dim 1.0 +189 57 optimizer.lr 0.005735411432618576 +189 57 negative_sampler.num_negs_per_pos 26.0 +189 57 training.batch_size 1.0 +189 58 model.embedding_dim 1.0 +189 58 optimizer.lr 0.004642322440450321 +189 58 negative_sampler.num_negs_per_pos 1.0 +189 58 training.batch_size 0.0 +189 59 model.embedding_dim 0.0 +189 59 optimizer.lr 0.0013227270847648845 +189 59 negative_sampler.num_negs_per_pos 21.0 +189 59 training.batch_size 0.0 +189 60 model.embedding_dim 0.0 +189 60 optimizer.lr 0.001175156836760225 +189 60 negative_sampler.num_negs_per_pos 1.0 +189 60 training.batch_size 1.0 +189 61 model.embedding_dim 0.0 +189 61 optimizer.lr 0.0069989080201541334 +189 61 negative_sampler.num_negs_per_pos 36.0 +189 61 training.batch_size 2.0 +189 62 model.embedding_dim 0.0 +189 62 optimizer.lr 0.021147602056513174 +189 62 negative_sampler.num_negs_per_pos 2.0 +189 62 training.batch_size 1.0 +189 63 model.embedding_dim 0.0 +189 63 optimizer.lr 0.038243102223143643 +189 63 negative_sampler.num_negs_per_pos 85.0 +189 63 training.batch_size 2.0 +189 64 model.embedding_dim 1.0 +189 64 optimizer.lr 0.008433332635915796 +189 64 negative_sampler.num_negs_per_pos 48.0 +189 64 training.batch_size 2.0 +189 65 model.embedding_dim 0.0 +189 65 optimizer.lr 0.020429288135180853 +189 65 negative_sampler.num_negs_per_pos 88.0 +189 65 training.batch_size 0.0 +189 66 model.embedding_dim 2.0 +189 66 optimizer.lr 0.001197575098265567 +189 66 negative_sampler.num_negs_per_pos 71.0 +189 66 training.batch_size 1.0 +189 67 model.embedding_dim 2.0 +189 67 optimizer.lr 0.012594907687469424 +189 67 negative_sampler.num_negs_per_pos 19.0 +189 67 training.batch_size 1.0 +189 68 model.embedding_dim 0.0 +189 68 optimizer.lr 0.0025898428986964647 +189 68 negative_sampler.num_negs_per_pos 34.0 +189 68 training.batch_size 0.0 +189 69 model.embedding_dim 0.0 +189 69 optimizer.lr 0.02661304158261565 +189 69 negative_sampler.num_negs_per_pos 75.0 +189 69 training.batch_size 2.0 +189 70 model.embedding_dim 0.0 +189 70 optimizer.lr 0.04956514747208833 +189 70 negative_sampler.num_negs_per_pos 46.0 +189 70 training.batch_size 1.0 +189 71 model.embedding_dim 0.0 +189 71 optimizer.lr 0.008566725495407487 +189 71 negative_sampler.num_negs_per_pos 10.0 +189 71 training.batch_size 0.0 +189 72 model.embedding_dim 0.0 +189 72 optimizer.lr 0.0244556771282718 +189 72 negative_sampler.num_negs_per_pos 51.0 +189 72 training.batch_size 0.0 +189 73 model.embedding_dim 0.0 +189 73 optimizer.lr 0.008016287170035734 +189 73 negative_sampler.num_negs_per_pos 71.0 +189 73 training.batch_size 1.0 +189 74 model.embedding_dim 0.0 +189 74 optimizer.lr 0.020591488274714496 +189 74 negative_sampler.num_negs_per_pos 91.0 +189 74 training.batch_size 0.0 +189 75 model.embedding_dim 0.0 +189 75 optimizer.lr 0.06252358101482795 +189 75 negative_sampler.num_negs_per_pos 19.0 +189 75 training.batch_size 0.0 +189 76 model.embedding_dim 0.0 +189 76 optimizer.lr 0.007670925697517653 +189 76 negative_sampler.num_negs_per_pos 29.0 +189 76 training.batch_size 1.0 +189 77 model.embedding_dim 1.0 +189 77 optimizer.lr 0.002876327631893234 +189 77 negative_sampler.num_negs_per_pos 72.0 +189 77 training.batch_size 1.0 +189 78 model.embedding_dim 1.0 +189 78 optimizer.lr 0.0016917728779522303 +189 78 negative_sampler.num_negs_per_pos 79.0 +189 78 training.batch_size 1.0 +189 79 model.embedding_dim 1.0 +189 79 optimizer.lr 0.0013081191518690715 +189 79 negative_sampler.num_negs_per_pos 31.0 +189 79 training.batch_size 1.0 +189 80 model.embedding_dim 2.0 +189 80 optimizer.lr 0.0026019338274720946 +189 80 negative_sampler.num_negs_per_pos 83.0 +189 80 training.batch_size 1.0 +189 81 model.embedding_dim 2.0 +189 81 optimizer.lr 0.002079776310026686 +189 81 negative_sampler.num_negs_per_pos 71.0 +189 81 training.batch_size 0.0 +189 82 model.embedding_dim 2.0 +189 82 optimizer.lr 0.0013873284068146277 +189 82 negative_sampler.num_negs_per_pos 49.0 +189 82 training.batch_size 0.0 +189 83 model.embedding_dim 2.0 +189 83 optimizer.lr 0.0068415605195307535 +189 83 negative_sampler.num_negs_per_pos 7.0 +189 83 training.batch_size 1.0 +189 84 model.embedding_dim 0.0 +189 84 optimizer.lr 0.006344761062135756 +189 84 negative_sampler.num_negs_per_pos 91.0 +189 84 training.batch_size 0.0 +189 85 model.embedding_dim 2.0 +189 85 optimizer.lr 0.06527548149846191 +189 85 negative_sampler.num_negs_per_pos 56.0 +189 85 training.batch_size 0.0 +189 86 model.embedding_dim 2.0 +189 86 optimizer.lr 0.00369411538022656 +189 86 negative_sampler.num_negs_per_pos 34.0 +189 86 training.batch_size 1.0 +189 87 model.embedding_dim 1.0 +189 87 optimizer.lr 0.005254015247703111 +189 87 negative_sampler.num_negs_per_pos 96.0 +189 87 training.batch_size 1.0 +189 88 model.embedding_dim 2.0 +189 88 optimizer.lr 0.005882410320532493 +189 88 negative_sampler.num_negs_per_pos 0.0 +189 88 training.batch_size 0.0 +189 89 model.embedding_dim 1.0 +189 89 optimizer.lr 0.0010168969635170155 +189 89 negative_sampler.num_negs_per_pos 34.0 +189 89 training.batch_size 1.0 +189 90 model.embedding_dim 1.0 +189 90 optimizer.lr 0.006096764902612023 +189 90 negative_sampler.num_negs_per_pos 60.0 +189 90 training.batch_size 0.0 +189 91 model.embedding_dim 1.0 +189 91 optimizer.lr 0.0010124092419700101 +189 91 negative_sampler.num_negs_per_pos 49.0 +189 91 training.batch_size 2.0 +189 92 model.embedding_dim 0.0 +189 92 optimizer.lr 0.0050868271861732395 +189 92 negative_sampler.num_negs_per_pos 36.0 +189 92 training.batch_size 1.0 +189 93 model.embedding_dim 0.0 +189 93 optimizer.lr 0.0029847366582499554 +189 93 negative_sampler.num_negs_per_pos 20.0 +189 93 training.batch_size 1.0 +189 94 model.embedding_dim 2.0 +189 94 optimizer.lr 0.0069354111821672785 +189 94 negative_sampler.num_negs_per_pos 97.0 +189 94 training.batch_size 0.0 +189 95 model.embedding_dim 1.0 +189 95 optimizer.lr 0.07352705545644887 +189 95 negative_sampler.num_negs_per_pos 10.0 +189 95 training.batch_size 2.0 +189 96 model.embedding_dim 2.0 +189 96 optimizer.lr 0.0017595724453159365 +189 96 negative_sampler.num_negs_per_pos 46.0 +189 96 training.batch_size 2.0 +189 97 model.embedding_dim 0.0 +189 97 optimizer.lr 0.0011708978048081992 +189 97 negative_sampler.num_negs_per_pos 62.0 +189 97 training.batch_size 0.0 +189 98 model.embedding_dim 1.0 +189 98 optimizer.lr 0.03340154518213473 +189 98 negative_sampler.num_negs_per_pos 80.0 +189 98 training.batch_size 0.0 +189 99 model.embedding_dim 2.0 +189 99 optimizer.lr 0.05457241386286995 +189 99 negative_sampler.num_negs_per_pos 37.0 +189 99 training.batch_size 0.0 +189 100 model.embedding_dim 2.0 +189 100 optimizer.lr 0.002963396536178263 +189 100 negative_sampler.num_negs_per_pos 37.0 +189 100 training.batch_size 1.0 +189 1 dataset """kinships""" +189 1 model """distmult""" +189 1 loss """softplus""" +189 1 regularizer """no""" +189 1 optimizer """adam""" +189 1 training_loop """owa""" +189 1 negative_sampler """basic""" +189 1 evaluator """rankbased""" +189 2 dataset """kinships""" +189 2 model """distmult""" +189 2 loss """softplus""" +189 2 regularizer """no""" +189 2 optimizer """adam""" +189 2 training_loop """owa""" +189 2 negative_sampler """basic""" +189 2 evaluator """rankbased""" +189 3 dataset """kinships""" +189 3 model """distmult""" +189 3 loss """softplus""" +189 3 regularizer """no""" +189 3 optimizer """adam""" +189 3 training_loop """owa""" +189 3 negative_sampler """basic""" +189 3 evaluator """rankbased""" +189 4 dataset """kinships""" +189 4 model """distmult""" +189 4 loss """softplus""" +189 4 regularizer """no""" +189 4 optimizer """adam""" +189 4 training_loop """owa""" +189 4 negative_sampler """basic""" +189 4 evaluator """rankbased""" +189 5 dataset """kinships""" +189 5 model """distmult""" +189 5 loss """softplus""" +189 5 regularizer """no""" +189 5 optimizer """adam""" +189 5 training_loop """owa""" +189 5 negative_sampler """basic""" +189 5 evaluator """rankbased""" +189 6 dataset """kinships""" +189 6 model """distmult""" +189 6 loss """softplus""" +189 6 regularizer """no""" +189 6 optimizer """adam""" +189 6 training_loop """owa""" +189 6 negative_sampler """basic""" +189 6 evaluator """rankbased""" +189 7 dataset """kinships""" +189 7 model """distmult""" +189 7 loss """softplus""" +189 7 regularizer """no""" +189 7 optimizer """adam""" +189 7 training_loop """owa""" +189 7 negative_sampler """basic""" +189 7 evaluator """rankbased""" +189 8 dataset """kinships""" +189 8 model """distmult""" +189 8 loss """softplus""" +189 8 regularizer """no""" +189 8 optimizer """adam""" +189 8 training_loop """owa""" +189 8 negative_sampler """basic""" +189 8 evaluator """rankbased""" +189 9 dataset """kinships""" +189 9 model """distmult""" +189 9 loss """softplus""" +189 9 regularizer """no""" +189 9 optimizer """adam""" +189 9 training_loop """owa""" +189 9 negative_sampler """basic""" +189 9 evaluator """rankbased""" +189 10 dataset """kinships""" +189 10 model """distmult""" +189 10 loss """softplus""" +189 10 regularizer """no""" +189 10 optimizer """adam""" +189 10 training_loop """owa""" +189 10 negative_sampler """basic""" +189 10 evaluator """rankbased""" +189 11 dataset """kinships""" +189 11 model """distmult""" +189 11 loss """softplus""" +189 11 regularizer """no""" +189 11 optimizer """adam""" +189 11 training_loop """owa""" +189 11 negative_sampler """basic""" +189 11 evaluator """rankbased""" +189 12 dataset """kinships""" +189 12 model """distmult""" +189 12 loss """softplus""" +189 12 regularizer """no""" +189 12 optimizer """adam""" +189 12 training_loop """owa""" +189 12 negative_sampler """basic""" +189 12 evaluator """rankbased""" +189 13 dataset """kinships""" +189 13 model """distmult""" +189 13 loss """softplus""" +189 13 regularizer """no""" +189 13 optimizer """adam""" +189 13 training_loop """owa""" +189 13 negative_sampler """basic""" +189 13 evaluator """rankbased""" +189 14 dataset """kinships""" +189 14 model """distmult""" +189 14 loss """softplus""" +189 14 regularizer """no""" +189 14 optimizer """adam""" +189 14 training_loop """owa""" +189 14 negative_sampler """basic""" +189 14 evaluator """rankbased""" +189 15 dataset """kinships""" +189 15 model """distmult""" +189 15 loss """softplus""" +189 15 regularizer """no""" +189 15 optimizer """adam""" +189 15 training_loop """owa""" +189 15 negative_sampler """basic""" +189 15 evaluator """rankbased""" +189 16 dataset """kinships""" +189 16 model """distmult""" +189 16 loss """softplus""" +189 16 regularizer """no""" +189 16 optimizer """adam""" +189 16 training_loop """owa""" +189 16 negative_sampler """basic""" +189 16 evaluator """rankbased""" +189 17 dataset """kinships""" +189 17 model """distmult""" +189 17 loss """softplus""" +189 17 regularizer """no""" +189 17 optimizer """adam""" +189 17 training_loop """owa""" +189 17 negative_sampler """basic""" +189 17 evaluator """rankbased""" +189 18 dataset """kinships""" +189 18 model """distmult""" +189 18 loss """softplus""" +189 18 regularizer """no""" +189 18 optimizer """adam""" +189 18 training_loop """owa""" +189 18 negative_sampler """basic""" +189 18 evaluator """rankbased""" +189 19 dataset """kinships""" +189 19 model """distmult""" +189 19 loss """softplus""" +189 19 regularizer """no""" +189 19 optimizer """adam""" +189 19 training_loop """owa""" +189 19 negative_sampler """basic""" +189 19 evaluator """rankbased""" +189 20 dataset """kinships""" +189 20 model """distmult""" +189 20 loss """softplus""" +189 20 regularizer """no""" +189 20 optimizer """adam""" +189 20 training_loop """owa""" +189 20 negative_sampler """basic""" +189 20 evaluator """rankbased""" +189 21 dataset """kinships""" +189 21 model """distmult""" +189 21 loss """softplus""" +189 21 regularizer """no""" +189 21 optimizer """adam""" +189 21 training_loop """owa""" +189 21 negative_sampler """basic""" +189 21 evaluator """rankbased""" +189 22 dataset """kinships""" +189 22 model """distmult""" +189 22 loss """softplus""" +189 22 regularizer """no""" +189 22 optimizer """adam""" +189 22 training_loop """owa""" +189 22 negative_sampler """basic""" +189 22 evaluator """rankbased""" +189 23 dataset """kinships""" +189 23 model """distmult""" +189 23 loss """softplus""" +189 23 regularizer """no""" +189 23 optimizer """adam""" +189 23 training_loop """owa""" +189 23 negative_sampler """basic""" +189 23 evaluator """rankbased""" +189 24 dataset """kinships""" +189 24 model """distmult""" +189 24 loss """softplus""" +189 24 regularizer """no""" +189 24 optimizer """adam""" +189 24 training_loop """owa""" +189 24 negative_sampler """basic""" +189 24 evaluator """rankbased""" +189 25 dataset """kinships""" +189 25 model """distmult""" +189 25 loss """softplus""" +189 25 regularizer """no""" +189 25 optimizer """adam""" +189 25 training_loop """owa""" +189 25 negative_sampler """basic""" +189 25 evaluator """rankbased""" +189 26 dataset """kinships""" +189 26 model """distmult""" +189 26 loss """softplus""" +189 26 regularizer """no""" +189 26 optimizer """adam""" +189 26 training_loop """owa""" +189 26 negative_sampler """basic""" +189 26 evaluator """rankbased""" +189 27 dataset """kinships""" +189 27 model """distmult""" +189 27 loss """softplus""" +189 27 regularizer """no""" +189 27 optimizer """adam""" +189 27 training_loop """owa""" +189 27 negative_sampler """basic""" +189 27 evaluator """rankbased""" +189 28 dataset """kinships""" +189 28 model """distmult""" +189 28 loss """softplus""" +189 28 regularizer """no""" +189 28 optimizer """adam""" +189 28 training_loop """owa""" +189 28 negative_sampler """basic""" +189 28 evaluator """rankbased""" +189 29 dataset """kinships""" +189 29 model """distmult""" +189 29 loss """softplus""" +189 29 regularizer """no""" +189 29 optimizer """adam""" +189 29 training_loop """owa""" +189 29 negative_sampler """basic""" +189 29 evaluator """rankbased""" +189 30 dataset """kinships""" +189 30 model """distmult""" +189 30 loss """softplus""" +189 30 regularizer """no""" +189 30 optimizer """adam""" +189 30 training_loop """owa""" +189 30 negative_sampler """basic""" +189 30 evaluator """rankbased""" +189 31 dataset """kinships""" +189 31 model """distmult""" +189 31 loss """softplus""" +189 31 regularizer """no""" +189 31 optimizer """adam""" +189 31 training_loop """owa""" +189 31 negative_sampler """basic""" +189 31 evaluator """rankbased""" +189 32 dataset """kinships""" +189 32 model """distmult""" +189 32 loss """softplus""" +189 32 regularizer """no""" +189 32 optimizer """adam""" +189 32 training_loop """owa""" +189 32 negative_sampler """basic""" +189 32 evaluator """rankbased""" +189 33 dataset """kinships""" +189 33 model """distmult""" +189 33 loss """softplus""" +189 33 regularizer """no""" +189 33 optimizer """adam""" +189 33 training_loop """owa""" +189 33 negative_sampler """basic""" +189 33 evaluator """rankbased""" +189 34 dataset """kinships""" +189 34 model """distmult""" +189 34 loss """softplus""" +189 34 regularizer """no""" +189 34 optimizer """adam""" +189 34 training_loop """owa""" +189 34 negative_sampler """basic""" +189 34 evaluator """rankbased""" +189 35 dataset """kinships""" +189 35 model """distmult""" +189 35 loss """softplus""" +189 35 regularizer """no""" +189 35 optimizer """adam""" +189 35 training_loop """owa""" +189 35 negative_sampler """basic""" +189 35 evaluator """rankbased""" +189 36 dataset """kinships""" +189 36 model """distmult""" +189 36 loss """softplus""" +189 36 regularizer """no""" +189 36 optimizer """adam""" +189 36 training_loop """owa""" +189 36 negative_sampler """basic""" +189 36 evaluator """rankbased""" +189 37 dataset """kinships""" +189 37 model """distmult""" +189 37 loss """softplus""" +189 37 regularizer """no""" +189 37 optimizer """adam""" +189 37 training_loop """owa""" +189 37 negative_sampler """basic""" +189 37 evaluator """rankbased""" +189 38 dataset """kinships""" +189 38 model """distmult""" +189 38 loss """softplus""" +189 38 regularizer """no""" +189 38 optimizer """adam""" +189 38 training_loop """owa""" +189 38 negative_sampler """basic""" +189 38 evaluator """rankbased""" +189 39 dataset """kinships""" +189 39 model """distmult""" +189 39 loss """softplus""" +189 39 regularizer """no""" +189 39 optimizer """adam""" +189 39 training_loop """owa""" +189 39 negative_sampler """basic""" +189 39 evaluator """rankbased""" +189 40 dataset """kinships""" +189 40 model """distmult""" +189 40 loss """softplus""" +189 40 regularizer """no""" +189 40 optimizer """adam""" +189 40 training_loop """owa""" +189 40 negative_sampler """basic""" +189 40 evaluator """rankbased""" +189 41 dataset """kinships""" +189 41 model """distmult""" +189 41 loss """softplus""" +189 41 regularizer """no""" +189 41 optimizer """adam""" +189 41 training_loop """owa""" +189 41 negative_sampler """basic""" +189 41 evaluator """rankbased""" +189 42 dataset """kinships""" +189 42 model """distmult""" +189 42 loss """softplus""" +189 42 regularizer """no""" +189 42 optimizer """adam""" +189 42 training_loop """owa""" +189 42 negative_sampler """basic""" +189 42 evaluator """rankbased""" +189 43 dataset """kinships""" +189 43 model """distmult""" +189 43 loss """softplus""" +189 43 regularizer """no""" +189 43 optimizer """adam""" +189 43 training_loop """owa""" +189 43 negative_sampler """basic""" +189 43 evaluator """rankbased""" +189 44 dataset """kinships""" +189 44 model """distmult""" +189 44 loss """softplus""" +189 44 regularizer """no""" +189 44 optimizer """adam""" +189 44 training_loop """owa""" +189 44 negative_sampler """basic""" +189 44 evaluator """rankbased""" +189 45 dataset """kinships""" +189 45 model """distmult""" +189 45 loss """softplus""" +189 45 regularizer """no""" +189 45 optimizer """adam""" +189 45 training_loop """owa""" +189 45 negative_sampler """basic""" +189 45 evaluator """rankbased""" +189 46 dataset """kinships""" +189 46 model """distmult""" +189 46 loss """softplus""" +189 46 regularizer """no""" +189 46 optimizer """adam""" +189 46 training_loop """owa""" +189 46 negative_sampler """basic""" +189 46 evaluator """rankbased""" +189 47 dataset """kinships""" +189 47 model """distmult""" +189 47 loss """softplus""" +189 47 regularizer """no""" +189 47 optimizer """adam""" +189 47 training_loop """owa""" +189 47 negative_sampler """basic""" +189 47 evaluator """rankbased""" +189 48 dataset """kinships""" +189 48 model """distmult""" +189 48 loss """softplus""" +189 48 regularizer """no""" +189 48 optimizer """adam""" +189 48 training_loop """owa""" +189 48 negative_sampler """basic""" +189 48 evaluator """rankbased""" +189 49 dataset """kinships""" +189 49 model """distmult""" +189 49 loss """softplus""" +189 49 regularizer """no""" +189 49 optimizer """adam""" +189 49 training_loop """owa""" +189 49 negative_sampler """basic""" +189 49 evaluator """rankbased""" +189 50 dataset """kinships""" +189 50 model """distmult""" +189 50 loss """softplus""" +189 50 regularizer """no""" +189 50 optimizer """adam""" +189 50 training_loop """owa""" +189 50 negative_sampler """basic""" +189 50 evaluator """rankbased""" +189 51 dataset """kinships""" +189 51 model """distmult""" +189 51 loss """softplus""" +189 51 regularizer """no""" +189 51 optimizer """adam""" +189 51 training_loop """owa""" +189 51 negative_sampler """basic""" +189 51 evaluator """rankbased""" +189 52 dataset """kinships""" +189 52 model """distmult""" +189 52 loss """softplus""" +189 52 regularizer """no""" +189 52 optimizer """adam""" +189 52 training_loop """owa""" +189 52 negative_sampler """basic""" +189 52 evaluator """rankbased""" +189 53 dataset """kinships""" +189 53 model """distmult""" +189 53 loss """softplus""" +189 53 regularizer """no""" +189 53 optimizer """adam""" +189 53 training_loop """owa""" +189 53 negative_sampler """basic""" +189 53 evaluator """rankbased""" +189 54 dataset """kinships""" +189 54 model """distmult""" +189 54 loss """softplus""" +189 54 regularizer """no""" +189 54 optimizer """adam""" +189 54 training_loop """owa""" +189 54 negative_sampler """basic""" +189 54 evaluator """rankbased""" +189 55 dataset """kinships""" +189 55 model """distmult""" +189 55 loss """softplus""" +189 55 regularizer """no""" +189 55 optimizer """adam""" +189 55 training_loop """owa""" +189 55 negative_sampler """basic""" +189 55 evaluator """rankbased""" +189 56 dataset """kinships""" +189 56 model """distmult""" +189 56 loss """softplus""" +189 56 regularizer """no""" +189 56 optimizer """adam""" +189 56 training_loop """owa""" +189 56 negative_sampler """basic""" +189 56 evaluator """rankbased""" +189 57 dataset """kinships""" +189 57 model """distmult""" +189 57 loss """softplus""" +189 57 regularizer """no""" +189 57 optimizer """adam""" +189 57 training_loop """owa""" +189 57 negative_sampler """basic""" +189 57 evaluator """rankbased""" +189 58 dataset """kinships""" +189 58 model """distmult""" +189 58 loss """softplus""" +189 58 regularizer """no""" +189 58 optimizer """adam""" +189 58 training_loop """owa""" +189 58 negative_sampler """basic""" +189 58 evaluator """rankbased""" +189 59 dataset """kinships""" +189 59 model """distmult""" +189 59 loss """softplus""" +189 59 regularizer """no""" +189 59 optimizer """adam""" +189 59 training_loop """owa""" +189 59 negative_sampler """basic""" +189 59 evaluator """rankbased""" +189 60 dataset """kinships""" +189 60 model """distmult""" +189 60 loss """softplus""" +189 60 regularizer """no""" +189 60 optimizer """adam""" +189 60 training_loop """owa""" +189 60 negative_sampler """basic""" +189 60 evaluator """rankbased""" +189 61 dataset """kinships""" +189 61 model """distmult""" +189 61 loss """softplus""" +189 61 regularizer """no""" +189 61 optimizer """adam""" +189 61 training_loop """owa""" +189 61 negative_sampler """basic""" +189 61 evaluator """rankbased""" +189 62 dataset """kinships""" +189 62 model """distmult""" +189 62 loss """softplus""" +189 62 regularizer """no""" +189 62 optimizer """adam""" +189 62 training_loop """owa""" +189 62 negative_sampler """basic""" +189 62 evaluator """rankbased""" +189 63 dataset """kinships""" +189 63 model """distmult""" +189 63 loss """softplus""" +189 63 regularizer """no""" +189 63 optimizer """adam""" +189 63 training_loop """owa""" +189 63 negative_sampler """basic""" +189 63 evaluator """rankbased""" +189 64 dataset """kinships""" +189 64 model """distmult""" +189 64 loss """softplus""" +189 64 regularizer """no""" +189 64 optimizer """adam""" +189 64 training_loop """owa""" +189 64 negative_sampler """basic""" +189 64 evaluator """rankbased""" +189 65 dataset """kinships""" +189 65 model """distmult""" +189 65 loss """softplus""" +189 65 regularizer """no""" +189 65 optimizer """adam""" +189 65 training_loop """owa""" +189 65 negative_sampler """basic""" +189 65 evaluator """rankbased""" +189 66 dataset """kinships""" +189 66 model """distmult""" +189 66 loss """softplus""" +189 66 regularizer """no""" +189 66 optimizer """adam""" +189 66 training_loop """owa""" +189 66 negative_sampler """basic""" +189 66 evaluator """rankbased""" +189 67 dataset """kinships""" +189 67 model """distmult""" +189 67 loss """softplus""" +189 67 regularizer """no""" +189 67 optimizer """adam""" +189 67 training_loop """owa""" +189 67 negative_sampler """basic""" +189 67 evaluator """rankbased""" +189 68 dataset """kinships""" +189 68 model """distmult""" +189 68 loss """softplus""" +189 68 regularizer """no""" +189 68 optimizer """adam""" +189 68 training_loop """owa""" +189 68 negative_sampler """basic""" +189 68 evaluator """rankbased""" +189 69 dataset """kinships""" +189 69 model """distmult""" +189 69 loss """softplus""" +189 69 regularizer """no""" +189 69 optimizer """adam""" +189 69 training_loop """owa""" +189 69 negative_sampler """basic""" +189 69 evaluator """rankbased""" +189 70 dataset """kinships""" +189 70 model """distmult""" +189 70 loss """softplus""" +189 70 regularizer """no""" +189 70 optimizer """adam""" +189 70 training_loop """owa""" +189 70 negative_sampler """basic""" +189 70 evaluator """rankbased""" +189 71 dataset """kinships""" +189 71 model """distmult""" +189 71 loss """softplus""" +189 71 regularizer """no""" +189 71 optimizer """adam""" +189 71 training_loop """owa""" +189 71 negative_sampler """basic""" +189 71 evaluator """rankbased""" +189 72 dataset """kinships""" +189 72 model """distmult""" +189 72 loss """softplus""" +189 72 regularizer """no""" +189 72 optimizer """adam""" +189 72 training_loop """owa""" +189 72 negative_sampler """basic""" +189 72 evaluator """rankbased""" +189 73 dataset """kinships""" +189 73 model """distmult""" +189 73 loss """softplus""" +189 73 regularizer """no""" +189 73 optimizer """adam""" +189 73 training_loop """owa""" +189 73 negative_sampler """basic""" +189 73 evaluator """rankbased""" +189 74 dataset """kinships""" +189 74 model """distmult""" +189 74 loss """softplus""" +189 74 regularizer """no""" +189 74 optimizer """adam""" +189 74 training_loop """owa""" +189 74 negative_sampler """basic""" +189 74 evaluator """rankbased""" +189 75 dataset """kinships""" +189 75 model """distmult""" +189 75 loss """softplus""" +189 75 regularizer """no""" +189 75 optimizer """adam""" +189 75 training_loop """owa""" +189 75 negative_sampler """basic""" +189 75 evaluator """rankbased""" +189 76 dataset """kinships""" +189 76 model """distmult""" +189 76 loss """softplus""" +189 76 regularizer """no""" +189 76 optimizer """adam""" +189 76 training_loop """owa""" +189 76 negative_sampler """basic""" +189 76 evaluator """rankbased""" +189 77 dataset """kinships""" +189 77 model """distmult""" +189 77 loss """softplus""" +189 77 regularizer """no""" +189 77 optimizer """adam""" +189 77 training_loop """owa""" +189 77 negative_sampler """basic""" +189 77 evaluator """rankbased""" +189 78 dataset """kinships""" +189 78 model """distmult""" +189 78 loss """softplus""" +189 78 regularizer """no""" +189 78 optimizer """adam""" +189 78 training_loop """owa""" +189 78 negative_sampler """basic""" +189 78 evaluator """rankbased""" +189 79 dataset """kinships""" +189 79 model """distmult""" +189 79 loss """softplus""" +189 79 regularizer """no""" +189 79 optimizer """adam""" +189 79 training_loop """owa""" +189 79 negative_sampler """basic""" +189 79 evaluator """rankbased""" +189 80 dataset """kinships""" +189 80 model """distmult""" +189 80 loss """softplus""" +189 80 regularizer """no""" +189 80 optimizer """adam""" +189 80 training_loop """owa""" +189 80 negative_sampler """basic""" +189 80 evaluator """rankbased""" +189 81 dataset """kinships""" +189 81 model """distmult""" +189 81 loss """softplus""" +189 81 regularizer """no""" +189 81 optimizer """adam""" +189 81 training_loop """owa""" +189 81 negative_sampler """basic""" +189 81 evaluator """rankbased""" +189 82 dataset """kinships""" +189 82 model """distmult""" +189 82 loss """softplus""" +189 82 regularizer """no""" +189 82 optimizer """adam""" +189 82 training_loop """owa""" +189 82 negative_sampler """basic""" +189 82 evaluator """rankbased""" +189 83 dataset """kinships""" +189 83 model """distmult""" +189 83 loss """softplus""" +189 83 regularizer """no""" +189 83 optimizer """adam""" +189 83 training_loop """owa""" +189 83 negative_sampler """basic""" +189 83 evaluator """rankbased""" +189 84 dataset """kinships""" +189 84 model """distmult""" +189 84 loss """softplus""" +189 84 regularizer """no""" +189 84 optimizer """adam""" +189 84 training_loop """owa""" +189 84 negative_sampler """basic""" +189 84 evaluator """rankbased""" +189 85 dataset """kinships""" +189 85 model """distmult""" +189 85 loss """softplus""" +189 85 regularizer """no""" +189 85 optimizer """adam""" +189 85 training_loop """owa""" +189 85 negative_sampler """basic""" +189 85 evaluator """rankbased""" +189 86 dataset """kinships""" +189 86 model """distmult""" +189 86 loss """softplus""" +189 86 regularizer """no""" +189 86 optimizer """adam""" +189 86 training_loop """owa""" +189 86 negative_sampler """basic""" +189 86 evaluator """rankbased""" +189 87 dataset """kinships""" +189 87 model """distmult""" +189 87 loss """softplus""" +189 87 regularizer """no""" +189 87 optimizer """adam""" +189 87 training_loop """owa""" +189 87 negative_sampler """basic""" +189 87 evaluator """rankbased""" +189 88 dataset """kinships""" +189 88 model """distmult""" +189 88 loss """softplus""" +189 88 regularizer """no""" +189 88 optimizer """adam""" +189 88 training_loop """owa""" +189 88 negative_sampler """basic""" +189 88 evaluator """rankbased""" +189 89 dataset """kinships""" +189 89 model """distmult""" +189 89 loss """softplus""" +189 89 regularizer """no""" +189 89 optimizer """adam""" +189 89 training_loop """owa""" +189 89 negative_sampler """basic""" +189 89 evaluator """rankbased""" +189 90 dataset """kinships""" +189 90 model """distmult""" +189 90 loss """softplus""" +189 90 regularizer """no""" +189 90 optimizer """adam""" +189 90 training_loop """owa""" +189 90 negative_sampler """basic""" +189 90 evaluator """rankbased""" +189 91 dataset """kinships""" +189 91 model """distmult""" +189 91 loss """softplus""" +189 91 regularizer """no""" +189 91 optimizer """adam""" +189 91 training_loop """owa""" +189 91 negative_sampler """basic""" +189 91 evaluator """rankbased""" +189 92 dataset """kinships""" +189 92 model """distmult""" +189 92 loss """softplus""" +189 92 regularizer """no""" +189 92 optimizer """adam""" +189 92 training_loop """owa""" +189 92 negative_sampler """basic""" +189 92 evaluator """rankbased""" +189 93 dataset """kinships""" +189 93 model """distmult""" +189 93 loss """softplus""" +189 93 regularizer """no""" +189 93 optimizer """adam""" +189 93 training_loop """owa""" +189 93 negative_sampler """basic""" +189 93 evaluator """rankbased""" +189 94 dataset """kinships""" +189 94 model """distmult""" +189 94 loss """softplus""" +189 94 regularizer """no""" +189 94 optimizer """adam""" +189 94 training_loop """owa""" +189 94 negative_sampler """basic""" +189 94 evaluator """rankbased""" +189 95 dataset """kinships""" +189 95 model """distmult""" +189 95 loss """softplus""" +189 95 regularizer """no""" +189 95 optimizer """adam""" +189 95 training_loop """owa""" +189 95 negative_sampler """basic""" +189 95 evaluator """rankbased""" +189 96 dataset """kinships""" +189 96 model """distmult""" +189 96 loss """softplus""" +189 96 regularizer """no""" +189 96 optimizer """adam""" +189 96 training_loop """owa""" +189 96 negative_sampler """basic""" +189 96 evaluator """rankbased""" +189 97 dataset """kinships""" +189 97 model """distmult""" +189 97 loss """softplus""" +189 97 regularizer """no""" +189 97 optimizer """adam""" +189 97 training_loop """owa""" +189 97 negative_sampler """basic""" +189 97 evaluator """rankbased""" +189 98 dataset """kinships""" +189 98 model """distmult""" +189 98 loss """softplus""" +189 98 regularizer """no""" +189 98 optimizer """adam""" +189 98 training_loop """owa""" +189 98 negative_sampler """basic""" +189 98 evaluator """rankbased""" +189 99 dataset """kinships""" +189 99 model """distmult""" +189 99 loss """softplus""" +189 99 regularizer """no""" +189 99 optimizer """adam""" +189 99 training_loop """owa""" +189 99 negative_sampler """basic""" +189 99 evaluator """rankbased""" +189 100 dataset """kinships""" +189 100 model """distmult""" +189 100 loss """softplus""" +189 100 regularizer """no""" +189 100 optimizer """adam""" +189 100 training_loop """owa""" +189 100 negative_sampler """basic""" +189 100 evaluator """rankbased""" +190 1 model.embedding_dim 2.0 +190 1 optimizer.lr 0.003178135460825728 +190 1 negative_sampler.num_negs_per_pos 67.0 +190 1 training.batch_size 2.0 +190 2 model.embedding_dim 2.0 +190 2 optimizer.lr 0.03365089366309382 +190 2 negative_sampler.num_negs_per_pos 72.0 +190 2 training.batch_size 1.0 +190 3 model.embedding_dim 1.0 +190 3 optimizer.lr 0.0060954007540342135 +190 3 negative_sampler.num_negs_per_pos 47.0 +190 3 training.batch_size 0.0 +190 4 model.embedding_dim 1.0 +190 4 optimizer.lr 0.018887803258403398 +190 4 negative_sampler.num_negs_per_pos 57.0 +190 4 training.batch_size 0.0 +190 5 model.embedding_dim 1.0 +190 5 optimizer.lr 0.056326295681519985 +190 5 negative_sampler.num_negs_per_pos 35.0 +190 5 training.batch_size 1.0 +190 6 model.embedding_dim 0.0 +190 6 optimizer.lr 0.014929541060743248 +190 6 negative_sampler.num_negs_per_pos 9.0 +190 6 training.batch_size 2.0 +190 7 model.embedding_dim 1.0 +190 7 optimizer.lr 0.0010955038863842677 +190 7 negative_sampler.num_negs_per_pos 69.0 +190 7 training.batch_size 0.0 +190 8 model.embedding_dim 0.0 +190 8 optimizer.lr 0.001992764070921132 +190 8 negative_sampler.num_negs_per_pos 61.0 +190 8 training.batch_size 0.0 +190 9 model.embedding_dim 1.0 +190 9 optimizer.lr 0.06260871275805757 +190 9 negative_sampler.num_negs_per_pos 31.0 +190 9 training.batch_size 1.0 +190 10 model.embedding_dim 2.0 +190 10 optimizer.lr 0.008226640870488827 +190 10 negative_sampler.num_negs_per_pos 45.0 +190 10 training.batch_size 0.0 +190 11 model.embedding_dim 0.0 +190 11 optimizer.lr 0.008667158979401382 +190 11 negative_sampler.num_negs_per_pos 52.0 +190 11 training.batch_size 1.0 +190 12 model.embedding_dim 2.0 +190 12 optimizer.lr 0.001416632591035647 +190 12 negative_sampler.num_negs_per_pos 79.0 +190 12 training.batch_size 0.0 +190 13 model.embedding_dim 2.0 +190 13 optimizer.lr 0.003371040965243831 +190 13 negative_sampler.num_negs_per_pos 56.0 +190 13 training.batch_size 1.0 +190 14 model.embedding_dim 2.0 +190 14 optimizer.lr 0.001297577805060329 +190 14 negative_sampler.num_negs_per_pos 12.0 +190 14 training.batch_size 2.0 +190 15 model.embedding_dim 2.0 +190 15 optimizer.lr 0.019236863138820007 +190 15 negative_sampler.num_negs_per_pos 68.0 +190 15 training.batch_size 2.0 +190 16 model.embedding_dim 2.0 +190 16 optimizer.lr 0.016594922081300353 +190 16 negative_sampler.num_negs_per_pos 9.0 +190 16 training.batch_size 1.0 +190 17 model.embedding_dim 1.0 +190 17 optimizer.lr 0.031950035385221585 +190 17 negative_sampler.num_negs_per_pos 75.0 +190 17 training.batch_size 0.0 +190 18 model.embedding_dim 1.0 +190 18 optimizer.lr 0.04463125761414228 +190 18 negative_sampler.num_negs_per_pos 30.0 +190 18 training.batch_size 0.0 +190 19 model.embedding_dim 2.0 +190 19 optimizer.lr 0.004152617228047548 +190 19 negative_sampler.num_negs_per_pos 7.0 +190 19 training.batch_size 0.0 +190 20 model.embedding_dim 0.0 +190 20 optimizer.lr 0.005570929150590304 +190 20 negative_sampler.num_negs_per_pos 83.0 +190 20 training.batch_size 1.0 +190 21 model.embedding_dim 1.0 +190 21 optimizer.lr 0.0014345337783082167 +190 21 negative_sampler.num_negs_per_pos 46.0 +190 21 training.batch_size 1.0 +190 22 model.embedding_dim 2.0 +190 22 optimizer.lr 0.0031222685398639353 +190 22 negative_sampler.num_negs_per_pos 4.0 +190 22 training.batch_size 1.0 +190 23 model.embedding_dim 0.0 +190 23 optimizer.lr 0.09531473248573584 +190 23 negative_sampler.num_negs_per_pos 63.0 +190 23 training.batch_size 0.0 +190 24 model.embedding_dim 0.0 +190 24 optimizer.lr 0.0013495889800639463 +190 24 negative_sampler.num_negs_per_pos 86.0 +190 24 training.batch_size 1.0 +190 25 model.embedding_dim 2.0 +190 25 optimizer.lr 0.013607573275998526 +190 25 negative_sampler.num_negs_per_pos 27.0 +190 25 training.batch_size 1.0 +190 26 model.embedding_dim 2.0 +190 26 optimizer.lr 0.012690908139897565 +190 26 negative_sampler.num_negs_per_pos 55.0 +190 26 training.batch_size 1.0 +190 27 model.embedding_dim 2.0 +190 27 optimizer.lr 0.029344677357951696 +190 27 negative_sampler.num_negs_per_pos 74.0 +190 27 training.batch_size 1.0 +190 28 model.embedding_dim 1.0 +190 28 optimizer.lr 0.020741054036514495 +190 28 negative_sampler.num_negs_per_pos 88.0 +190 28 training.batch_size 1.0 +190 29 model.embedding_dim 2.0 +190 29 optimizer.lr 0.06203684574463218 +190 29 negative_sampler.num_negs_per_pos 85.0 +190 29 training.batch_size 1.0 +190 30 model.embedding_dim 2.0 +190 30 optimizer.lr 0.013411869847069342 +190 30 negative_sampler.num_negs_per_pos 51.0 +190 30 training.batch_size 1.0 +190 31 model.embedding_dim 2.0 +190 31 optimizer.lr 0.002245843425274309 +190 31 negative_sampler.num_negs_per_pos 5.0 +190 31 training.batch_size 0.0 +190 32 model.embedding_dim 1.0 +190 32 optimizer.lr 0.09830688030240134 +190 32 negative_sampler.num_negs_per_pos 14.0 +190 32 training.batch_size 0.0 +190 33 model.embedding_dim 0.0 +190 33 optimizer.lr 0.006073342280615908 +190 33 negative_sampler.num_negs_per_pos 88.0 +190 33 training.batch_size 0.0 +190 34 model.embedding_dim 2.0 +190 34 optimizer.lr 0.002747442568119251 +190 34 negative_sampler.num_negs_per_pos 37.0 +190 34 training.batch_size 1.0 +190 35 model.embedding_dim 1.0 +190 35 optimizer.lr 0.037151409969624655 +190 35 negative_sampler.num_negs_per_pos 81.0 +190 35 training.batch_size 1.0 +190 36 model.embedding_dim 0.0 +190 36 optimizer.lr 0.010149629179666038 +190 36 negative_sampler.num_negs_per_pos 95.0 +190 36 training.batch_size 0.0 +190 37 model.embedding_dim 2.0 +190 37 optimizer.lr 0.02165073129012688 +190 37 negative_sampler.num_negs_per_pos 2.0 +190 37 training.batch_size 1.0 +190 38 model.embedding_dim 1.0 +190 38 optimizer.lr 0.002988279557613538 +190 38 negative_sampler.num_negs_per_pos 15.0 +190 38 training.batch_size 0.0 +190 39 model.embedding_dim 0.0 +190 39 optimizer.lr 0.04377828608936222 +190 39 negative_sampler.num_negs_per_pos 0.0 +190 39 training.batch_size 2.0 +190 40 model.embedding_dim 2.0 +190 40 optimizer.lr 0.002320127512012889 +190 40 negative_sampler.num_negs_per_pos 44.0 +190 40 training.batch_size 2.0 +190 41 model.embedding_dim 2.0 +190 41 optimizer.lr 0.008469258639479695 +190 41 negative_sampler.num_negs_per_pos 37.0 +190 41 training.batch_size 2.0 +190 42 model.embedding_dim 1.0 +190 42 optimizer.lr 0.005687913698576258 +190 42 negative_sampler.num_negs_per_pos 5.0 +190 42 training.batch_size 0.0 +190 43 model.embedding_dim 1.0 +190 43 optimizer.lr 0.024701859012325054 +190 43 negative_sampler.num_negs_per_pos 32.0 +190 43 training.batch_size 2.0 +190 44 model.embedding_dim 0.0 +190 44 optimizer.lr 0.007359204395070761 +190 44 negative_sampler.num_negs_per_pos 16.0 +190 44 training.batch_size 0.0 +190 45 model.embedding_dim 0.0 +190 45 optimizer.lr 0.04570435008335 +190 45 negative_sampler.num_negs_per_pos 41.0 +190 45 training.batch_size 0.0 +190 46 model.embedding_dim 0.0 +190 46 optimizer.lr 0.01083043224302704 +190 46 negative_sampler.num_negs_per_pos 60.0 +190 46 training.batch_size 0.0 +190 47 model.embedding_dim 0.0 +190 47 optimizer.lr 0.01499491456523428 +190 47 negative_sampler.num_negs_per_pos 43.0 +190 47 training.batch_size 1.0 +190 48 model.embedding_dim 0.0 +190 48 optimizer.lr 0.0010030005340756906 +190 48 negative_sampler.num_negs_per_pos 12.0 +190 48 training.batch_size 2.0 +190 49 model.embedding_dim 1.0 +190 49 optimizer.lr 0.001680848001717058 +190 49 negative_sampler.num_negs_per_pos 65.0 +190 49 training.batch_size 2.0 +190 50 model.embedding_dim 2.0 +190 50 optimizer.lr 0.08974088903301267 +190 50 negative_sampler.num_negs_per_pos 99.0 +190 50 training.batch_size 0.0 +190 51 model.embedding_dim 0.0 +190 51 optimizer.lr 0.01677225600544162 +190 51 negative_sampler.num_negs_per_pos 66.0 +190 51 training.batch_size 0.0 +190 52 model.embedding_dim 2.0 +190 52 optimizer.lr 0.01994395884722618 +190 52 negative_sampler.num_negs_per_pos 7.0 +190 52 training.batch_size 1.0 +190 53 model.embedding_dim 2.0 +190 53 optimizer.lr 0.01141030290980671 +190 53 negative_sampler.num_negs_per_pos 80.0 +190 53 training.batch_size 2.0 +190 54 model.embedding_dim 2.0 +190 54 optimizer.lr 0.0018529116221537509 +190 54 negative_sampler.num_negs_per_pos 28.0 +190 54 training.batch_size 1.0 +190 55 model.embedding_dim 1.0 +190 55 optimizer.lr 0.0028362560885794703 +190 55 negative_sampler.num_negs_per_pos 78.0 +190 55 training.batch_size 1.0 +190 56 model.embedding_dim 2.0 +190 56 optimizer.lr 0.022337484381923595 +190 56 negative_sampler.num_negs_per_pos 58.0 +190 56 training.batch_size 0.0 +190 57 model.embedding_dim 2.0 +190 57 optimizer.lr 0.035830848086242734 +190 57 negative_sampler.num_negs_per_pos 75.0 +190 57 training.batch_size 1.0 +190 58 model.embedding_dim 0.0 +190 58 optimizer.lr 0.02113432571709826 +190 58 negative_sampler.num_negs_per_pos 54.0 +190 58 training.batch_size 1.0 +190 59 model.embedding_dim 0.0 +190 59 optimizer.lr 0.03135916964801308 +190 59 negative_sampler.num_negs_per_pos 86.0 +190 59 training.batch_size 0.0 +190 60 model.embedding_dim 0.0 +190 60 optimizer.lr 0.0174153899296397 +190 60 negative_sampler.num_negs_per_pos 6.0 +190 60 training.batch_size 1.0 +190 61 model.embedding_dim 1.0 +190 61 optimizer.lr 0.023012717315624896 +190 61 negative_sampler.num_negs_per_pos 90.0 +190 61 training.batch_size 0.0 +190 62 model.embedding_dim 2.0 +190 62 optimizer.lr 0.008960880061129927 +190 62 negative_sampler.num_negs_per_pos 19.0 +190 62 training.batch_size 2.0 +190 63 model.embedding_dim 0.0 +190 63 optimizer.lr 0.08182491711625246 +190 63 negative_sampler.num_negs_per_pos 86.0 +190 63 training.batch_size 1.0 +190 64 model.embedding_dim 1.0 +190 64 optimizer.lr 0.026561242965603627 +190 64 negative_sampler.num_negs_per_pos 86.0 +190 64 training.batch_size 0.0 +190 65 model.embedding_dim 2.0 +190 65 optimizer.lr 0.0015113525745402653 +190 65 negative_sampler.num_negs_per_pos 48.0 +190 65 training.batch_size 2.0 +190 66 model.embedding_dim 2.0 +190 66 optimizer.lr 0.02756167404439104 +190 66 negative_sampler.num_negs_per_pos 12.0 +190 66 training.batch_size 1.0 +190 67 model.embedding_dim 0.0 +190 67 optimizer.lr 0.01976945199714054 +190 67 negative_sampler.num_negs_per_pos 36.0 +190 67 training.batch_size 0.0 +190 68 model.embedding_dim 0.0 +190 68 optimizer.lr 0.06508474640160118 +190 68 negative_sampler.num_negs_per_pos 51.0 +190 68 training.batch_size 2.0 +190 69 model.embedding_dim 1.0 +190 69 optimizer.lr 0.0011504495101713973 +190 69 negative_sampler.num_negs_per_pos 3.0 +190 69 training.batch_size 1.0 +190 70 model.embedding_dim 2.0 +190 70 optimizer.lr 0.0014048839610062455 +190 70 negative_sampler.num_negs_per_pos 23.0 +190 70 training.batch_size 0.0 +190 71 model.embedding_dim 1.0 +190 71 optimizer.lr 0.029382056376837683 +190 71 negative_sampler.num_negs_per_pos 21.0 +190 71 training.batch_size 1.0 +190 72 model.embedding_dim 2.0 +190 72 optimizer.lr 0.05309670188363819 +190 72 negative_sampler.num_negs_per_pos 83.0 +190 72 training.batch_size 1.0 +190 73 model.embedding_dim 0.0 +190 73 optimizer.lr 0.003775193289145577 +190 73 negative_sampler.num_negs_per_pos 2.0 +190 73 training.batch_size 0.0 +190 74 model.embedding_dim 2.0 +190 74 optimizer.lr 0.019812418108676486 +190 74 negative_sampler.num_negs_per_pos 54.0 +190 74 training.batch_size 0.0 +190 75 model.embedding_dim 1.0 +190 75 optimizer.lr 0.0021568616998501816 +190 75 negative_sampler.num_negs_per_pos 77.0 +190 75 training.batch_size 1.0 +190 76 model.embedding_dim 0.0 +190 76 optimizer.lr 0.011788759803614074 +190 76 negative_sampler.num_negs_per_pos 35.0 +190 76 training.batch_size 2.0 +190 77 model.embedding_dim 2.0 +190 77 optimizer.lr 0.008417281974591245 +190 77 negative_sampler.num_negs_per_pos 39.0 +190 77 training.batch_size 2.0 +190 78 model.embedding_dim 0.0 +190 78 optimizer.lr 0.0779532140384854 +190 78 negative_sampler.num_negs_per_pos 98.0 +190 78 training.batch_size 0.0 +190 79 model.embedding_dim 2.0 +190 79 optimizer.lr 0.0074041364858640725 +190 79 negative_sampler.num_negs_per_pos 78.0 +190 79 training.batch_size 2.0 +190 80 model.embedding_dim 1.0 +190 80 optimizer.lr 0.002616665450748491 +190 80 negative_sampler.num_negs_per_pos 86.0 +190 80 training.batch_size 0.0 +190 81 model.embedding_dim 0.0 +190 81 optimizer.lr 0.003900692004120857 +190 81 negative_sampler.num_negs_per_pos 96.0 +190 81 training.batch_size 2.0 +190 82 model.embedding_dim 2.0 +190 82 optimizer.lr 0.001955148319783313 +190 82 negative_sampler.num_negs_per_pos 19.0 +190 82 training.batch_size 0.0 +190 83 model.embedding_dim 0.0 +190 83 optimizer.lr 0.004598526757466984 +190 83 negative_sampler.num_negs_per_pos 60.0 +190 83 training.batch_size 2.0 +190 84 model.embedding_dim 0.0 +190 84 optimizer.lr 0.009345505055633608 +190 84 negative_sampler.num_negs_per_pos 63.0 +190 84 training.batch_size 2.0 +190 85 model.embedding_dim 1.0 +190 85 optimizer.lr 0.006184570955754971 +190 85 negative_sampler.num_negs_per_pos 49.0 +190 85 training.batch_size 1.0 +190 86 model.embedding_dim 2.0 +190 86 optimizer.lr 0.02040681836241899 +190 86 negative_sampler.num_negs_per_pos 10.0 +190 86 training.batch_size 2.0 +190 87 model.embedding_dim 0.0 +190 87 optimizer.lr 0.0027605508017126824 +190 87 negative_sampler.num_negs_per_pos 0.0 +190 87 training.batch_size 1.0 +190 88 model.embedding_dim 0.0 +190 88 optimizer.lr 0.0010943098753361691 +190 88 negative_sampler.num_negs_per_pos 31.0 +190 88 training.batch_size 2.0 +190 89 model.embedding_dim 2.0 +190 89 optimizer.lr 0.005879956086617682 +190 89 negative_sampler.num_negs_per_pos 96.0 +190 89 training.batch_size 2.0 +190 90 model.embedding_dim 1.0 +190 90 optimizer.lr 0.027686182893668573 +190 90 negative_sampler.num_negs_per_pos 2.0 +190 90 training.batch_size 2.0 +190 91 model.embedding_dim 1.0 +190 91 optimizer.lr 0.0032348156865946846 +190 91 negative_sampler.num_negs_per_pos 29.0 +190 91 training.batch_size 2.0 +190 92 model.embedding_dim 0.0 +190 92 optimizer.lr 0.006068594847372682 +190 92 negative_sampler.num_negs_per_pos 42.0 +190 92 training.batch_size 2.0 +190 93 model.embedding_dim 1.0 +190 93 optimizer.lr 0.0017456659608135197 +190 93 negative_sampler.num_negs_per_pos 55.0 +190 93 training.batch_size 0.0 +190 94 model.embedding_dim 2.0 +190 94 optimizer.lr 0.006863234222542147 +190 94 negative_sampler.num_negs_per_pos 11.0 +190 94 training.batch_size 2.0 +190 95 model.embedding_dim 0.0 +190 95 optimizer.lr 0.0020792537962656273 +190 95 negative_sampler.num_negs_per_pos 26.0 +190 95 training.batch_size 1.0 +190 96 model.embedding_dim 1.0 +190 96 optimizer.lr 0.0033501197319531152 +190 96 negative_sampler.num_negs_per_pos 76.0 +190 96 training.batch_size 2.0 +190 97 model.embedding_dim 2.0 +190 97 optimizer.lr 0.0201782791817826 +190 97 negative_sampler.num_negs_per_pos 61.0 +190 97 training.batch_size 2.0 +190 98 model.embedding_dim 0.0 +190 98 optimizer.lr 0.001189658715227153 +190 98 negative_sampler.num_negs_per_pos 7.0 +190 98 training.batch_size 2.0 +190 99 model.embedding_dim 0.0 +190 99 optimizer.lr 0.04418728164794929 +190 99 negative_sampler.num_negs_per_pos 6.0 +190 99 training.batch_size 0.0 +190 100 model.embedding_dim 1.0 +190 100 optimizer.lr 0.09204895086942293 +190 100 negative_sampler.num_negs_per_pos 5.0 +190 100 training.batch_size 1.0 +190 1 dataset """kinships""" +190 1 model """distmult""" +190 1 loss """bceaftersigmoid""" +190 1 regularizer """no""" +190 1 optimizer """adam""" +190 1 training_loop """owa""" +190 1 negative_sampler """basic""" +190 1 evaluator """rankbased""" +190 2 dataset """kinships""" +190 2 model """distmult""" +190 2 loss """bceaftersigmoid""" +190 2 regularizer """no""" +190 2 optimizer """adam""" +190 2 training_loop """owa""" +190 2 negative_sampler """basic""" +190 2 evaluator """rankbased""" +190 3 dataset """kinships""" +190 3 model """distmult""" +190 3 loss """bceaftersigmoid""" +190 3 regularizer """no""" +190 3 optimizer """adam""" +190 3 training_loop """owa""" +190 3 negative_sampler """basic""" +190 3 evaluator """rankbased""" +190 4 dataset """kinships""" +190 4 model """distmult""" +190 4 loss """bceaftersigmoid""" +190 4 regularizer """no""" +190 4 optimizer """adam""" +190 4 training_loop """owa""" +190 4 negative_sampler """basic""" +190 4 evaluator """rankbased""" +190 5 dataset """kinships""" +190 5 model """distmult""" +190 5 loss """bceaftersigmoid""" +190 5 regularizer """no""" +190 5 optimizer """adam""" +190 5 training_loop """owa""" +190 5 negative_sampler """basic""" +190 5 evaluator """rankbased""" +190 6 dataset """kinships""" +190 6 model """distmult""" +190 6 loss """bceaftersigmoid""" +190 6 regularizer """no""" +190 6 optimizer """adam""" +190 6 training_loop """owa""" +190 6 negative_sampler """basic""" +190 6 evaluator """rankbased""" +190 7 dataset """kinships""" +190 7 model """distmult""" +190 7 loss """bceaftersigmoid""" +190 7 regularizer """no""" +190 7 optimizer """adam""" +190 7 training_loop """owa""" +190 7 negative_sampler """basic""" +190 7 evaluator """rankbased""" +190 8 dataset """kinships""" +190 8 model """distmult""" +190 8 loss """bceaftersigmoid""" +190 8 regularizer """no""" +190 8 optimizer """adam""" +190 8 training_loop """owa""" +190 8 negative_sampler """basic""" +190 8 evaluator """rankbased""" +190 9 dataset """kinships""" +190 9 model """distmult""" +190 9 loss """bceaftersigmoid""" +190 9 regularizer """no""" +190 9 optimizer """adam""" +190 9 training_loop """owa""" +190 9 negative_sampler """basic""" +190 9 evaluator """rankbased""" +190 10 dataset """kinships""" +190 10 model """distmult""" +190 10 loss """bceaftersigmoid""" +190 10 regularizer """no""" +190 10 optimizer """adam""" +190 10 training_loop """owa""" +190 10 negative_sampler """basic""" +190 10 evaluator """rankbased""" +190 11 dataset """kinships""" +190 11 model """distmult""" +190 11 loss """bceaftersigmoid""" +190 11 regularizer """no""" +190 11 optimizer """adam""" +190 11 training_loop """owa""" +190 11 negative_sampler """basic""" +190 11 evaluator """rankbased""" +190 12 dataset """kinships""" +190 12 model """distmult""" +190 12 loss """bceaftersigmoid""" +190 12 regularizer """no""" +190 12 optimizer """adam""" +190 12 training_loop """owa""" +190 12 negative_sampler """basic""" +190 12 evaluator """rankbased""" +190 13 dataset """kinships""" +190 13 model """distmult""" +190 13 loss """bceaftersigmoid""" +190 13 regularizer """no""" +190 13 optimizer """adam""" +190 13 training_loop """owa""" +190 13 negative_sampler """basic""" +190 13 evaluator """rankbased""" +190 14 dataset """kinships""" +190 14 model """distmult""" +190 14 loss """bceaftersigmoid""" +190 14 regularizer """no""" +190 14 optimizer """adam""" +190 14 training_loop """owa""" +190 14 negative_sampler """basic""" +190 14 evaluator """rankbased""" +190 15 dataset """kinships""" +190 15 model """distmult""" +190 15 loss """bceaftersigmoid""" +190 15 regularizer """no""" +190 15 optimizer """adam""" +190 15 training_loop """owa""" +190 15 negative_sampler """basic""" +190 15 evaluator """rankbased""" +190 16 dataset """kinships""" +190 16 model """distmult""" +190 16 loss """bceaftersigmoid""" +190 16 regularizer """no""" +190 16 optimizer """adam""" +190 16 training_loop """owa""" +190 16 negative_sampler """basic""" +190 16 evaluator """rankbased""" +190 17 dataset """kinships""" +190 17 model """distmult""" +190 17 loss """bceaftersigmoid""" +190 17 regularizer """no""" +190 17 optimizer """adam""" +190 17 training_loop """owa""" +190 17 negative_sampler """basic""" +190 17 evaluator """rankbased""" +190 18 dataset """kinships""" +190 18 model """distmult""" +190 18 loss """bceaftersigmoid""" +190 18 regularizer """no""" +190 18 optimizer """adam""" +190 18 training_loop """owa""" +190 18 negative_sampler """basic""" +190 18 evaluator """rankbased""" +190 19 dataset """kinships""" +190 19 model """distmult""" +190 19 loss """bceaftersigmoid""" +190 19 regularizer """no""" +190 19 optimizer """adam""" +190 19 training_loop """owa""" +190 19 negative_sampler """basic""" +190 19 evaluator """rankbased""" +190 20 dataset """kinships""" +190 20 model """distmult""" +190 20 loss """bceaftersigmoid""" +190 20 regularizer """no""" +190 20 optimizer """adam""" +190 20 training_loop """owa""" +190 20 negative_sampler """basic""" +190 20 evaluator """rankbased""" +190 21 dataset """kinships""" +190 21 model """distmult""" +190 21 loss """bceaftersigmoid""" +190 21 regularizer """no""" +190 21 optimizer """adam""" +190 21 training_loop """owa""" +190 21 negative_sampler """basic""" +190 21 evaluator """rankbased""" +190 22 dataset """kinships""" +190 22 model """distmult""" +190 22 loss """bceaftersigmoid""" +190 22 regularizer """no""" +190 22 optimizer """adam""" +190 22 training_loop """owa""" +190 22 negative_sampler """basic""" +190 22 evaluator """rankbased""" +190 23 dataset """kinships""" +190 23 model """distmult""" +190 23 loss """bceaftersigmoid""" +190 23 regularizer """no""" +190 23 optimizer """adam""" +190 23 training_loop """owa""" +190 23 negative_sampler """basic""" +190 23 evaluator """rankbased""" +190 24 dataset """kinships""" +190 24 model """distmult""" +190 24 loss """bceaftersigmoid""" +190 24 regularizer """no""" +190 24 optimizer """adam""" +190 24 training_loop """owa""" +190 24 negative_sampler """basic""" +190 24 evaluator """rankbased""" +190 25 dataset """kinships""" +190 25 model """distmult""" +190 25 loss """bceaftersigmoid""" +190 25 regularizer """no""" +190 25 optimizer """adam""" +190 25 training_loop """owa""" +190 25 negative_sampler """basic""" +190 25 evaluator """rankbased""" +190 26 dataset """kinships""" +190 26 model """distmult""" +190 26 loss """bceaftersigmoid""" +190 26 regularizer """no""" +190 26 optimizer """adam""" +190 26 training_loop """owa""" +190 26 negative_sampler """basic""" +190 26 evaluator """rankbased""" +190 27 dataset """kinships""" +190 27 model """distmult""" +190 27 loss """bceaftersigmoid""" +190 27 regularizer """no""" +190 27 optimizer """adam""" +190 27 training_loop """owa""" +190 27 negative_sampler """basic""" +190 27 evaluator """rankbased""" +190 28 dataset """kinships""" +190 28 model """distmult""" +190 28 loss """bceaftersigmoid""" +190 28 regularizer """no""" +190 28 optimizer """adam""" +190 28 training_loop """owa""" +190 28 negative_sampler """basic""" +190 28 evaluator """rankbased""" +190 29 dataset """kinships""" +190 29 model """distmult""" +190 29 loss """bceaftersigmoid""" +190 29 regularizer """no""" +190 29 optimizer """adam""" +190 29 training_loop """owa""" +190 29 negative_sampler """basic""" +190 29 evaluator """rankbased""" +190 30 dataset """kinships""" +190 30 model """distmult""" +190 30 loss """bceaftersigmoid""" +190 30 regularizer """no""" +190 30 optimizer """adam""" +190 30 training_loop """owa""" +190 30 negative_sampler """basic""" +190 30 evaluator """rankbased""" +190 31 dataset """kinships""" +190 31 model """distmult""" +190 31 loss """bceaftersigmoid""" +190 31 regularizer """no""" +190 31 optimizer """adam""" +190 31 training_loop """owa""" +190 31 negative_sampler """basic""" +190 31 evaluator """rankbased""" +190 32 dataset """kinships""" +190 32 model """distmult""" +190 32 loss """bceaftersigmoid""" +190 32 regularizer """no""" +190 32 optimizer """adam""" +190 32 training_loop """owa""" +190 32 negative_sampler """basic""" +190 32 evaluator """rankbased""" +190 33 dataset """kinships""" +190 33 model """distmult""" +190 33 loss """bceaftersigmoid""" +190 33 regularizer """no""" +190 33 optimizer """adam""" +190 33 training_loop """owa""" +190 33 negative_sampler """basic""" +190 33 evaluator """rankbased""" +190 34 dataset """kinships""" +190 34 model """distmult""" +190 34 loss """bceaftersigmoid""" +190 34 regularizer """no""" +190 34 optimizer """adam""" +190 34 training_loop """owa""" +190 34 negative_sampler """basic""" +190 34 evaluator """rankbased""" +190 35 dataset """kinships""" +190 35 model """distmult""" +190 35 loss """bceaftersigmoid""" +190 35 regularizer """no""" +190 35 optimizer """adam""" +190 35 training_loop """owa""" +190 35 negative_sampler """basic""" +190 35 evaluator """rankbased""" +190 36 dataset """kinships""" +190 36 model """distmult""" +190 36 loss """bceaftersigmoid""" +190 36 regularizer """no""" +190 36 optimizer """adam""" +190 36 training_loop """owa""" +190 36 negative_sampler """basic""" +190 36 evaluator """rankbased""" +190 37 dataset """kinships""" +190 37 model """distmult""" +190 37 loss """bceaftersigmoid""" +190 37 regularizer """no""" +190 37 optimizer """adam""" +190 37 training_loop """owa""" +190 37 negative_sampler """basic""" +190 37 evaluator """rankbased""" +190 38 dataset """kinships""" +190 38 model """distmult""" +190 38 loss """bceaftersigmoid""" +190 38 regularizer """no""" +190 38 optimizer """adam""" +190 38 training_loop """owa""" +190 38 negative_sampler """basic""" +190 38 evaluator """rankbased""" +190 39 dataset """kinships""" +190 39 model """distmult""" +190 39 loss """bceaftersigmoid""" +190 39 regularizer """no""" +190 39 optimizer """adam""" +190 39 training_loop """owa""" +190 39 negative_sampler """basic""" +190 39 evaluator """rankbased""" +190 40 dataset """kinships""" +190 40 model """distmult""" +190 40 loss """bceaftersigmoid""" +190 40 regularizer """no""" +190 40 optimizer """adam""" +190 40 training_loop """owa""" +190 40 negative_sampler """basic""" +190 40 evaluator """rankbased""" +190 41 dataset """kinships""" +190 41 model """distmult""" +190 41 loss """bceaftersigmoid""" +190 41 regularizer """no""" +190 41 optimizer """adam""" +190 41 training_loop """owa""" +190 41 negative_sampler """basic""" +190 41 evaluator """rankbased""" +190 42 dataset """kinships""" +190 42 model """distmult""" +190 42 loss """bceaftersigmoid""" +190 42 regularizer """no""" +190 42 optimizer """adam""" +190 42 training_loop """owa""" +190 42 negative_sampler """basic""" +190 42 evaluator """rankbased""" +190 43 dataset """kinships""" +190 43 model """distmult""" +190 43 loss """bceaftersigmoid""" +190 43 regularizer """no""" +190 43 optimizer """adam""" +190 43 training_loop """owa""" +190 43 negative_sampler """basic""" +190 43 evaluator """rankbased""" +190 44 dataset """kinships""" +190 44 model """distmult""" +190 44 loss """bceaftersigmoid""" +190 44 regularizer """no""" +190 44 optimizer """adam""" +190 44 training_loop """owa""" +190 44 negative_sampler """basic""" +190 44 evaluator """rankbased""" +190 45 dataset """kinships""" +190 45 model """distmult""" +190 45 loss """bceaftersigmoid""" +190 45 regularizer """no""" +190 45 optimizer """adam""" +190 45 training_loop """owa""" +190 45 negative_sampler """basic""" +190 45 evaluator """rankbased""" +190 46 dataset """kinships""" +190 46 model """distmult""" +190 46 loss """bceaftersigmoid""" +190 46 regularizer """no""" +190 46 optimizer """adam""" +190 46 training_loop """owa""" +190 46 negative_sampler """basic""" +190 46 evaluator """rankbased""" +190 47 dataset """kinships""" +190 47 model """distmult""" +190 47 loss """bceaftersigmoid""" +190 47 regularizer """no""" +190 47 optimizer """adam""" +190 47 training_loop """owa""" +190 47 negative_sampler """basic""" +190 47 evaluator """rankbased""" +190 48 dataset """kinships""" +190 48 model """distmult""" +190 48 loss """bceaftersigmoid""" +190 48 regularizer """no""" +190 48 optimizer """adam""" +190 48 training_loop """owa""" +190 48 negative_sampler """basic""" +190 48 evaluator """rankbased""" +190 49 dataset """kinships""" +190 49 model """distmult""" +190 49 loss """bceaftersigmoid""" +190 49 regularizer """no""" +190 49 optimizer """adam""" +190 49 training_loop """owa""" +190 49 negative_sampler """basic""" +190 49 evaluator """rankbased""" +190 50 dataset """kinships""" +190 50 model """distmult""" +190 50 loss """bceaftersigmoid""" +190 50 regularizer """no""" +190 50 optimizer """adam""" +190 50 training_loop """owa""" +190 50 negative_sampler """basic""" +190 50 evaluator """rankbased""" +190 51 dataset """kinships""" +190 51 model """distmult""" +190 51 loss """bceaftersigmoid""" +190 51 regularizer """no""" +190 51 optimizer """adam""" +190 51 training_loop """owa""" +190 51 negative_sampler """basic""" +190 51 evaluator """rankbased""" +190 52 dataset """kinships""" +190 52 model """distmult""" +190 52 loss """bceaftersigmoid""" +190 52 regularizer """no""" +190 52 optimizer """adam""" +190 52 training_loop """owa""" +190 52 negative_sampler """basic""" +190 52 evaluator """rankbased""" +190 53 dataset """kinships""" +190 53 model """distmult""" +190 53 loss """bceaftersigmoid""" +190 53 regularizer """no""" +190 53 optimizer """adam""" +190 53 training_loop """owa""" +190 53 negative_sampler """basic""" +190 53 evaluator """rankbased""" +190 54 dataset """kinships""" +190 54 model """distmult""" +190 54 loss """bceaftersigmoid""" +190 54 regularizer """no""" +190 54 optimizer """adam""" +190 54 training_loop """owa""" +190 54 negative_sampler """basic""" +190 54 evaluator """rankbased""" +190 55 dataset """kinships""" +190 55 model """distmult""" +190 55 loss """bceaftersigmoid""" +190 55 regularizer """no""" +190 55 optimizer """adam""" +190 55 training_loop """owa""" +190 55 negative_sampler """basic""" +190 55 evaluator """rankbased""" +190 56 dataset """kinships""" +190 56 model """distmult""" +190 56 loss """bceaftersigmoid""" +190 56 regularizer """no""" +190 56 optimizer """adam""" +190 56 training_loop """owa""" +190 56 negative_sampler """basic""" +190 56 evaluator """rankbased""" +190 57 dataset """kinships""" +190 57 model """distmult""" +190 57 loss """bceaftersigmoid""" +190 57 regularizer """no""" +190 57 optimizer """adam""" +190 57 training_loop """owa""" +190 57 negative_sampler """basic""" +190 57 evaluator """rankbased""" +190 58 dataset """kinships""" +190 58 model """distmult""" +190 58 loss """bceaftersigmoid""" +190 58 regularizer """no""" +190 58 optimizer """adam""" +190 58 training_loop """owa""" +190 58 negative_sampler """basic""" +190 58 evaluator """rankbased""" +190 59 dataset """kinships""" +190 59 model """distmult""" +190 59 loss """bceaftersigmoid""" +190 59 regularizer """no""" +190 59 optimizer """adam""" +190 59 training_loop """owa""" +190 59 negative_sampler """basic""" +190 59 evaluator """rankbased""" +190 60 dataset """kinships""" +190 60 model """distmult""" +190 60 loss """bceaftersigmoid""" +190 60 regularizer """no""" +190 60 optimizer """adam""" +190 60 training_loop """owa""" +190 60 negative_sampler """basic""" +190 60 evaluator """rankbased""" +190 61 dataset """kinships""" +190 61 model """distmult""" +190 61 loss """bceaftersigmoid""" +190 61 regularizer """no""" +190 61 optimizer """adam""" +190 61 training_loop """owa""" +190 61 negative_sampler """basic""" +190 61 evaluator """rankbased""" +190 62 dataset """kinships""" +190 62 model """distmult""" +190 62 loss """bceaftersigmoid""" +190 62 regularizer """no""" +190 62 optimizer """adam""" +190 62 training_loop """owa""" +190 62 negative_sampler """basic""" +190 62 evaluator """rankbased""" +190 63 dataset """kinships""" +190 63 model """distmult""" +190 63 loss """bceaftersigmoid""" +190 63 regularizer """no""" +190 63 optimizer """adam""" +190 63 training_loop """owa""" +190 63 negative_sampler """basic""" +190 63 evaluator """rankbased""" +190 64 dataset """kinships""" +190 64 model """distmult""" +190 64 loss """bceaftersigmoid""" +190 64 regularizer """no""" +190 64 optimizer """adam""" +190 64 training_loop """owa""" +190 64 negative_sampler """basic""" +190 64 evaluator """rankbased""" +190 65 dataset """kinships""" +190 65 model """distmult""" +190 65 loss """bceaftersigmoid""" +190 65 regularizer """no""" +190 65 optimizer """adam""" +190 65 training_loop """owa""" +190 65 negative_sampler """basic""" +190 65 evaluator """rankbased""" +190 66 dataset """kinships""" +190 66 model """distmult""" +190 66 loss """bceaftersigmoid""" +190 66 regularizer """no""" +190 66 optimizer """adam""" +190 66 training_loop """owa""" +190 66 negative_sampler """basic""" +190 66 evaluator """rankbased""" +190 67 dataset """kinships""" +190 67 model """distmult""" +190 67 loss """bceaftersigmoid""" +190 67 regularizer """no""" +190 67 optimizer """adam""" +190 67 training_loop """owa""" +190 67 negative_sampler """basic""" +190 67 evaluator """rankbased""" +190 68 dataset """kinships""" +190 68 model """distmult""" +190 68 loss """bceaftersigmoid""" +190 68 regularizer """no""" +190 68 optimizer """adam""" +190 68 training_loop """owa""" +190 68 negative_sampler """basic""" +190 68 evaluator """rankbased""" +190 69 dataset """kinships""" +190 69 model """distmult""" +190 69 loss """bceaftersigmoid""" +190 69 regularizer """no""" +190 69 optimizer """adam""" +190 69 training_loop """owa""" +190 69 negative_sampler """basic""" +190 69 evaluator """rankbased""" +190 70 dataset """kinships""" +190 70 model """distmult""" +190 70 loss """bceaftersigmoid""" +190 70 regularizer """no""" +190 70 optimizer """adam""" +190 70 training_loop """owa""" +190 70 negative_sampler """basic""" +190 70 evaluator """rankbased""" +190 71 dataset """kinships""" +190 71 model """distmult""" +190 71 loss """bceaftersigmoid""" +190 71 regularizer """no""" +190 71 optimizer """adam""" +190 71 training_loop """owa""" +190 71 negative_sampler """basic""" +190 71 evaluator """rankbased""" +190 72 dataset """kinships""" +190 72 model """distmult""" +190 72 loss """bceaftersigmoid""" +190 72 regularizer """no""" +190 72 optimizer """adam""" +190 72 training_loop """owa""" +190 72 negative_sampler """basic""" +190 72 evaluator """rankbased""" +190 73 dataset """kinships""" +190 73 model """distmult""" +190 73 loss """bceaftersigmoid""" +190 73 regularizer """no""" +190 73 optimizer """adam""" +190 73 training_loop """owa""" +190 73 negative_sampler """basic""" +190 73 evaluator """rankbased""" +190 74 dataset """kinships""" +190 74 model """distmult""" +190 74 loss """bceaftersigmoid""" +190 74 regularizer """no""" +190 74 optimizer """adam""" +190 74 training_loop """owa""" +190 74 negative_sampler """basic""" +190 74 evaluator """rankbased""" +190 75 dataset """kinships""" +190 75 model """distmult""" +190 75 loss """bceaftersigmoid""" +190 75 regularizer """no""" +190 75 optimizer """adam""" +190 75 training_loop """owa""" +190 75 negative_sampler """basic""" +190 75 evaluator """rankbased""" +190 76 dataset """kinships""" +190 76 model """distmult""" +190 76 loss """bceaftersigmoid""" +190 76 regularizer """no""" +190 76 optimizer """adam""" +190 76 training_loop """owa""" +190 76 negative_sampler """basic""" +190 76 evaluator """rankbased""" +190 77 dataset """kinships""" +190 77 model """distmult""" +190 77 loss """bceaftersigmoid""" +190 77 regularizer """no""" +190 77 optimizer """adam""" +190 77 training_loop """owa""" +190 77 negative_sampler """basic""" +190 77 evaluator """rankbased""" +190 78 dataset """kinships""" +190 78 model """distmult""" +190 78 loss """bceaftersigmoid""" +190 78 regularizer """no""" +190 78 optimizer """adam""" +190 78 training_loop """owa""" +190 78 negative_sampler """basic""" +190 78 evaluator """rankbased""" +190 79 dataset """kinships""" +190 79 model """distmult""" +190 79 loss """bceaftersigmoid""" +190 79 regularizer """no""" +190 79 optimizer """adam""" +190 79 training_loop """owa""" +190 79 negative_sampler """basic""" +190 79 evaluator """rankbased""" +190 80 dataset """kinships""" +190 80 model """distmult""" +190 80 loss """bceaftersigmoid""" +190 80 regularizer """no""" +190 80 optimizer """adam""" +190 80 training_loop """owa""" +190 80 negative_sampler """basic""" +190 80 evaluator """rankbased""" +190 81 dataset """kinships""" +190 81 model """distmult""" +190 81 loss """bceaftersigmoid""" +190 81 regularizer """no""" +190 81 optimizer """adam""" +190 81 training_loop """owa""" +190 81 negative_sampler """basic""" +190 81 evaluator """rankbased""" +190 82 dataset """kinships""" +190 82 model """distmult""" +190 82 loss """bceaftersigmoid""" +190 82 regularizer """no""" +190 82 optimizer """adam""" +190 82 training_loop """owa""" +190 82 negative_sampler """basic""" +190 82 evaluator """rankbased""" +190 83 dataset """kinships""" +190 83 model """distmult""" +190 83 loss """bceaftersigmoid""" +190 83 regularizer """no""" +190 83 optimizer """adam""" +190 83 training_loop """owa""" +190 83 negative_sampler """basic""" +190 83 evaluator """rankbased""" +190 84 dataset """kinships""" +190 84 model """distmult""" +190 84 loss """bceaftersigmoid""" +190 84 regularizer """no""" +190 84 optimizer """adam""" +190 84 training_loop """owa""" +190 84 negative_sampler """basic""" +190 84 evaluator """rankbased""" +190 85 dataset """kinships""" +190 85 model """distmult""" +190 85 loss """bceaftersigmoid""" +190 85 regularizer """no""" +190 85 optimizer """adam""" +190 85 training_loop """owa""" +190 85 negative_sampler """basic""" +190 85 evaluator """rankbased""" +190 86 dataset """kinships""" +190 86 model """distmult""" +190 86 loss """bceaftersigmoid""" +190 86 regularizer """no""" +190 86 optimizer """adam""" +190 86 training_loop """owa""" +190 86 negative_sampler """basic""" +190 86 evaluator """rankbased""" +190 87 dataset """kinships""" +190 87 model """distmult""" +190 87 loss """bceaftersigmoid""" +190 87 regularizer """no""" +190 87 optimizer """adam""" +190 87 training_loop """owa""" +190 87 negative_sampler """basic""" +190 87 evaluator """rankbased""" +190 88 dataset """kinships""" +190 88 model """distmult""" +190 88 loss """bceaftersigmoid""" +190 88 regularizer """no""" +190 88 optimizer """adam""" +190 88 training_loop """owa""" +190 88 negative_sampler """basic""" +190 88 evaluator """rankbased""" +190 89 dataset """kinships""" +190 89 model """distmult""" +190 89 loss """bceaftersigmoid""" +190 89 regularizer """no""" +190 89 optimizer """adam""" +190 89 training_loop """owa""" +190 89 negative_sampler """basic""" +190 89 evaluator """rankbased""" +190 90 dataset """kinships""" +190 90 model """distmult""" +190 90 loss """bceaftersigmoid""" +190 90 regularizer """no""" +190 90 optimizer """adam""" +190 90 training_loop """owa""" +190 90 negative_sampler """basic""" +190 90 evaluator """rankbased""" +190 91 dataset """kinships""" +190 91 model """distmult""" +190 91 loss """bceaftersigmoid""" +190 91 regularizer """no""" +190 91 optimizer """adam""" +190 91 training_loop """owa""" +190 91 negative_sampler """basic""" +190 91 evaluator """rankbased""" +190 92 dataset """kinships""" +190 92 model """distmult""" +190 92 loss """bceaftersigmoid""" +190 92 regularizer """no""" +190 92 optimizer """adam""" +190 92 training_loop """owa""" +190 92 negative_sampler """basic""" +190 92 evaluator """rankbased""" +190 93 dataset """kinships""" +190 93 model """distmult""" +190 93 loss """bceaftersigmoid""" +190 93 regularizer """no""" +190 93 optimizer """adam""" +190 93 training_loop """owa""" +190 93 negative_sampler """basic""" +190 93 evaluator """rankbased""" +190 94 dataset """kinships""" +190 94 model """distmult""" +190 94 loss """bceaftersigmoid""" +190 94 regularizer """no""" +190 94 optimizer """adam""" +190 94 training_loop """owa""" +190 94 negative_sampler """basic""" +190 94 evaluator """rankbased""" +190 95 dataset """kinships""" +190 95 model """distmult""" +190 95 loss """bceaftersigmoid""" +190 95 regularizer """no""" +190 95 optimizer """adam""" +190 95 training_loop """owa""" +190 95 negative_sampler """basic""" +190 95 evaluator """rankbased""" +190 96 dataset """kinships""" +190 96 model """distmult""" +190 96 loss """bceaftersigmoid""" +190 96 regularizer """no""" +190 96 optimizer """adam""" +190 96 training_loop """owa""" +190 96 negative_sampler """basic""" +190 96 evaluator """rankbased""" +190 97 dataset """kinships""" +190 97 model """distmult""" +190 97 loss """bceaftersigmoid""" +190 97 regularizer """no""" +190 97 optimizer """adam""" +190 97 training_loop """owa""" +190 97 negative_sampler """basic""" +190 97 evaluator """rankbased""" +190 98 dataset """kinships""" +190 98 model """distmult""" +190 98 loss """bceaftersigmoid""" +190 98 regularizer """no""" +190 98 optimizer """adam""" +190 98 training_loop """owa""" +190 98 negative_sampler """basic""" +190 98 evaluator """rankbased""" +190 99 dataset """kinships""" +190 99 model """distmult""" +190 99 loss """bceaftersigmoid""" +190 99 regularizer """no""" +190 99 optimizer """adam""" +190 99 training_loop """owa""" +190 99 negative_sampler """basic""" +190 99 evaluator """rankbased""" +190 100 dataset """kinships""" +190 100 model """distmult""" +190 100 loss """bceaftersigmoid""" +190 100 regularizer """no""" +190 100 optimizer """adam""" +190 100 training_loop """owa""" +190 100 negative_sampler """basic""" +190 100 evaluator """rankbased""" +191 1 model.embedding_dim 1.0 +191 1 optimizer.lr 0.0061707663946179835 +191 1 negative_sampler.num_negs_per_pos 32.0 +191 1 training.batch_size 0.0 +191 2 model.embedding_dim 2.0 +191 2 optimizer.lr 0.009495752739665993 +191 2 negative_sampler.num_negs_per_pos 87.0 +191 2 training.batch_size 0.0 +191 3 model.embedding_dim 0.0 +191 3 optimizer.lr 0.002912303309738083 +191 3 negative_sampler.num_negs_per_pos 61.0 +191 3 training.batch_size 0.0 +191 4 model.embedding_dim 0.0 +191 4 optimizer.lr 0.0012384829971998767 +191 4 negative_sampler.num_negs_per_pos 65.0 +191 4 training.batch_size 0.0 +191 5 model.embedding_dim 0.0 +191 5 optimizer.lr 0.010544697537051588 +191 5 negative_sampler.num_negs_per_pos 45.0 +191 5 training.batch_size 2.0 +191 6 model.embedding_dim 2.0 +191 6 optimizer.lr 0.002662338272628539 +191 6 negative_sampler.num_negs_per_pos 1.0 +191 6 training.batch_size 2.0 +191 7 model.embedding_dim 2.0 +191 7 optimizer.lr 0.01532433841798667 +191 7 negative_sampler.num_negs_per_pos 29.0 +191 7 training.batch_size 2.0 +191 8 model.embedding_dim 1.0 +191 8 optimizer.lr 0.0012529439935252285 +191 8 negative_sampler.num_negs_per_pos 83.0 +191 8 training.batch_size 2.0 +191 9 model.embedding_dim 0.0 +191 9 optimizer.lr 0.04740864894342327 +191 9 negative_sampler.num_negs_per_pos 0.0 +191 9 training.batch_size 0.0 +191 10 model.embedding_dim 0.0 +191 10 optimizer.lr 0.07278348096750184 +191 10 negative_sampler.num_negs_per_pos 58.0 +191 10 training.batch_size 2.0 +191 11 model.embedding_dim 0.0 +191 11 optimizer.lr 0.0013466458669362904 +191 11 negative_sampler.num_negs_per_pos 20.0 +191 11 training.batch_size 2.0 +191 12 model.embedding_dim 1.0 +191 12 optimizer.lr 0.0015724487245281124 +191 12 negative_sampler.num_negs_per_pos 79.0 +191 12 training.batch_size 2.0 +191 13 model.embedding_dim 2.0 +191 13 optimizer.lr 0.009013518918754059 +191 13 negative_sampler.num_negs_per_pos 53.0 +191 13 training.batch_size 2.0 +191 14 model.embedding_dim 2.0 +191 14 optimizer.lr 0.010509004864892485 +191 14 negative_sampler.num_negs_per_pos 87.0 +191 14 training.batch_size 0.0 +191 15 model.embedding_dim 0.0 +191 15 optimizer.lr 0.0011715259688378926 +191 15 negative_sampler.num_negs_per_pos 88.0 +191 15 training.batch_size 1.0 +191 16 model.embedding_dim 0.0 +191 16 optimizer.lr 0.01861861199438543 +191 16 negative_sampler.num_negs_per_pos 21.0 +191 16 training.batch_size 0.0 +191 17 model.embedding_dim 0.0 +191 17 optimizer.lr 0.003514142953465461 +191 17 negative_sampler.num_negs_per_pos 7.0 +191 17 training.batch_size 2.0 +191 18 model.embedding_dim 2.0 +191 18 optimizer.lr 0.007587485283735013 +191 18 negative_sampler.num_negs_per_pos 90.0 +191 18 training.batch_size 1.0 +191 19 model.embedding_dim 0.0 +191 19 optimizer.lr 0.008128127010281863 +191 19 negative_sampler.num_negs_per_pos 52.0 +191 19 training.batch_size 2.0 +191 20 model.embedding_dim 2.0 +191 20 optimizer.lr 0.0013480745394095475 +191 20 negative_sampler.num_negs_per_pos 71.0 +191 20 training.batch_size 0.0 +191 21 model.embedding_dim 1.0 +191 21 optimizer.lr 0.013047909321632122 +191 21 negative_sampler.num_negs_per_pos 32.0 +191 21 training.batch_size 2.0 +191 22 model.embedding_dim 1.0 +191 22 optimizer.lr 0.016694863508426278 +191 22 negative_sampler.num_negs_per_pos 41.0 +191 22 training.batch_size 0.0 +191 23 model.embedding_dim 1.0 +191 23 optimizer.lr 0.004528237750037342 +191 23 negative_sampler.num_negs_per_pos 78.0 +191 23 training.batch_size 1.0 +191 24 model.embedding_dim 2.0 +191 24 optimizer.lr 0.08201709850738462 +191 24 negative_sampler.num_negs_per_pos 55.0 +191 24 training.batch_size 1.0 +191 25 model.embedding_dim 2.0 +191 25 optimizer.lr 0.003613548435350404 +191 25 negative_sampler.num_negs_per_pos 1.0 +191 25 training.batch_size 2.0 +191 26 model.embedding_dim 1.0 +191 26 optimizer.lr 0.007667897868604545 +191 26 negative_sampler.num_negs_per_pos 50.0 +191 26 training.batch_size 1.0 +191 27 model.embedding_dim 0.0 +191 27 optimizer.lr 0.028077782961965583 +191 27 negative_sampler.num_negs_per_pos 96.0 +191 27 training.batch_size 2.0 +191 28 model.embedding_dim 1.0 +191 28 optimizer.lr 0.02076188396037172 +191 28 negative_sampler.num_negs_per_pos 10.0 +191 28 training.batch_size 2.0 +191 29 model.embedding_dim 2.0 +191 29 optimizer.lr 0.012381921654520392 +191 29 negative_sampler.num_negs_per_pos 72.0 +191 29 training.batch_size 1.0 +191 30 model.embedding_dim 0.0 +191 30 optimizer.lr 0.00970894150247116 +191 30 negative_sampler.num_negs_per_pos 81.0 +191 30 training.batch_size 1.0 +191 31 model.embedding_dim 0.0 +191 31 optimizer.lr 0.0012543864257455605 +191 31 negative_sampler.num_negs_per_pos 61.0 +191 31 training.batch_size 1.0 +191 32 model.embedding_dim 1.0 +191 32 optimizer.lr 0.09657124613663516 +191 32 negative_sampler.num_negs_per_pos 72.0 +191 32 training.batch_size 2.0 +191 33 model.embedding_dim 1.0 +191 33 optimizer.lr 0.012593668587710303 +191 33 negative_sampler.num_negs_per_pos 89.0 +191 33 training.batch_size 2.0 +191 34 model.embedding_dim 2.0 +191 34 optimizer.lr 0.03277675775284741 +191 34 negative_sampler.num_negs_per_pos 47.0 +191 34 training.batch_size 2.0 +191 35 model.embedding_dim 1.0 +191 35 optimizer.lr 0.08980178036281361 +191 35 negative_sampler.num_negs_per_pos 36.0 +191 35 training.batch_size 1.0 +191 36 model.embedding_dim 2.0 +191 36 optimizer.lr 0.002899316352352346 +191 36 negative_sampler.num_negs_per_pos 96.0 +191 36 training.batch_size 0.0 +191 37 model.embedding_dim 2.0 +191 37 optimizer.lr 0.008520742650937281 +191 37 negative_sampler.num_negs_per_pos 2.0 +191 37 training.batch_size 1.0 +191 38 model.embedding_dim 2.0 +191 38 optimizer.lr 0.0013811190102026485 +191 38 negative_sampler.num_negs_per_pos 19.0 +191 38 training.batch_size 1.0 +191 39 model.embedding_dim 0.0 +191 39 optimizer.lr 0.09231274902515797 +191 39 negative_sampler.num_negs_per_pos 68.0 +191 39 training.batch_size 1.0 +191 40 model.embedding_dim 0.0 +191 40 optimizer.lr 0.0014565165825493386 +191 40 negative_sampler.num_negs_per_pos 31.0 +191 40 training.batch_size 1.0 +191 41 model.embedding_dim 2.0 +191 41 optimizer.lr 0.01593141621076556 +191 41 negative_sampler.num_negs_per_pos 42.0 +191 41 training.batch_size 1.0 +191 42 model.embedding_dim 2.0 +191 42 optimizer.lr 0.016736454852515546 +191 42 negative_sampler.num_negs_per_pos 23.0 +191 42 training.batch_size 0.0 +191 43 model.embedding_dim 1.0 +191 43 optimizer.lr 0.022503713293737394 +191 43 negative_sampler.num_negs_per_pos 36.0 +191 43 training.batch_size 0.0 +191 44 model.embedding_dim 1.0 +191 44 optimizer.lr 0.006566780437082782 +191 44 negative_sampler.num_negs_per_pos 3.0 +191 44 training.batch_size 1.0 +191 45 model.embedding_dim 0.0 +191 45 optimizer.lr 0.0023218909139882033 +191 45 negative_sampler.num_negs_per_pos 62.0 +191 45 training.batch_size 0.0 +191 46 model.embedding_dim 2.0 +191 46 optimizer.lr 0.04926284138731444 +191 46 negative_sampler.num_negs_per_pos 22.0 +191 46 training.batch_size 0.0 +191 47 model.embedding_dim 2.0 +191 47 optimizer.lr 0.015129437783542402 +191 47 negative_sampler.num_negs_per_pos 88.0 +191 47 training.batch_size 1.0 +191 48 model.embedding_dim 1.0 +191 48 optimizer.lr 0.032763669714899 +191 48 negative_sampler.num_negs_per_pos 50.0 +191 48 training.batch_size 1.0 +191 49 model.embedding_dim 1.0 +191 49 optimizer.lr 0.030172339991439003 +191 49 negative_sampler.num_negs_per_pos 46.0 +191 49 training.batch_size 1.0 +191 50 model.embedding_dim 0.0 +191 50 optimizer.lr 0.006025342948282992 +191 50 negative_sampler.num_negs_per_pos 9.0 +191 50 training.batch_size 1.0 +191 51 model.embedding_dim 1.0 +191 51 optimizer.lr 0.003996383847468553 +191 51 negative_sampler.num_negs_per_pos 50.0 +191 51 training.batch_size 2.0 +191 52 model.embedding_dim 2.0 +191 52 optimizer.lr 0.010692161732338104 +191 52 negative_sampler.num_negs_per_pos 77.0 +191 52 training.batch_size 0.0 +191 53 model.embedding_dim 1.0 +191 53 optimizer.lr 0.05267927861659789 +191 53 negative_sampler.num_negs_per_pos 50.0 +191 53 training.batch_size 0.0 +191 54 model.embedding_dim 2.0 +191 54 optimizer.lr 0.0033228033906796185 +191 54 negative_sampler.num_negs_per_pos 83.0 +191 54 training.batch_size 1.0 +191 55 model.embedding_dim 1.0 +191 55 optimizer.lr 0.0021240126693394356 +191 55 negative_sampler.num_negs_per_pos 33.0 +191 55 training.batch_size 0.0 +191 56 model.embedding_dim 0.0 +191 56 optimizer.lr 0.08424212177801724 +191 56 negative_sampler.num_negs_per_pos 67.0 +191 56 training.batch_size 0.0 +191 57 model.embedding_dim 0.0 +191 57 optimizer.lr 0.016760914942391462 +191 57 negative_sampler.num_negs_per_pos 23.0 +191 57 training.batch_size 0.0 +191 58 model.embedding_dim 2.0 +191 58 optimizer.lr 0.03678685002614355 +191 58 negative_sampler.num_negs_per_pos 55.0 +191 58 training.batch_size 0.0 +191 59 model.embedding_dim 1.0 +191 59 optimizer.lr 0.05254603782611166 +191 59 negative_sampler.num_negs_per_pos 64.0 +191 59 training.batch_size 1.0 +191 60 model.embedding_dim 2.0 +191 60 optimizer.lr 0.005121779480527482 +191 60 negative_sampler.num_negs_per_pos 27.0 +191 60 training.batch_size 1.0 +191 61 model.embedding_dim 0.0 +191 61 optimizer.lr 0.0071013440990151284 +191 61 negative_sampler.num_negs_per_pos 64.0 +191 61 training.batch_size 2.0 +191 62 model.embedding_dim 1.0 +191 62 optimizer.lr 0.0026321992925016557 +191 62 negative_sampler.num_negs_per_pos 1.0 +191 62 training.batch_size 2.0 +191 63 model.embedding_dim 0.0 +191 63 optimizer.lr 0.010070294124701671 +191 63 negative_sampler.num_negs_per_pos 59.0 +191 63 training.batch_size 1.0 +191 64 model.embedding_dim 1.0 +191 64 optimizer.lr 0.07737502683950202 +191 64 negative_sampler.num_negs_per_pos 82.0 +191 64 training.batch_size 2.0 +191 65 model.embedding_dim 0.0 +191 65 optimizer.lr 0.007429523453865376 +191 65 negative_sampler.num_negs_per_pos 59.0 +191 65 training.batch_size 2.0 +191 66 model.embedding_dim 2.0 +191 66 optimizer.lr 0.0013316318189737458 +191 66 negative_sampler.num_negs_per_pos 48.0 +191 66 training.batch_size 1.0 +191 67 model.embedding_dim 1.0 +191 67 optimizer.lr 0.004276007216487145 +191 67 negative_sampler.num_negs_per_pos 45.0 +191 67 training.batch_size 0.0 +191 68 model.embedding_dim 0.0 +191 68 optimizer.lr 0.017147547331457926 +191 68 negative_sampler.num_negs_per_pos 46.0 +191 68 training.batch_size 2.0 +191 69 model.embedding_dim 1.0 +191 69 optimizer.lr 0.0013107326990320438 +191 69 negative_sampler.num_negs_per_pos 19.0 +191 69 training.batch_size 1.0 +191 70 model.embedding_dim 1.0 +191 70 optimizer.lr 0.02532259053129769 +191 70 negative_sampler.num_negs_per_pos 31.0 +191 70 training.batch_size 1.0 +191 71 model.embedding_dim 0.0 +191 71 optimizer.lr 0.03826838563858869 +191 71 negative_sampler.num_negs_per_pos 36.0 +191 71 training.batch_size 0.0 +191 72 model.embedding_dim 0.0 +191 72 optimizer.lr 0.008554969833922484 +191 72 negative_sampler.num_negs_per_pos 21.0 +191 72 training.batch_size 0.0 +191 73 model.embedding_dim 1.0 +191 73 optimizer.lr 0.0016377454315821562 +191 73 negative_sampler.num_negs_per_pos 93.0 +191 73 training.batch_size 0.0 +191 74 model.embedding_dim 0.0 +191 74 optimizer.lr 0.005525366191282403 +191 74 negative_sampler.num_negs_per_pos 64.0 +191 74 training.batch_size 1.0 +191 75 model.embedding_dim 1.0 +191 75 optimizer.lr 0.08857723773636243 +191 75 negative_sampler.num_negs_per_pos 78.0 +191 75 training.batch_size 2.0 +191 76 model.embedding_dim 1.0 +191 76 optimizer.lr 0.07883508421287427 +191 76 negative_sampler.num_negs_per_pos 58.0 +191 76 training.batch_size 2.0 +191 77 model.embedding_dim 1.0 +191 77 optimizer.lr 0.0011726998080852076 +191 77 negative_sampler.num_negs_per_pos 44.0 +191 77 training.batch_size 1.0 +191 78 model.embedding_dim 1.0 +191 78 optimizer.lr 0.0037528332811514703 +191 78 negative_sampler.num_negs_per_pos 15.0 +191 78 training.batch_size 1.0 +191 79 model.embedding_dim 1.0 +191 79 optimizer.lr 0.009957624660477239 +191 79 negative_sampler.num_negs_per_pos 99.0 +191 79 training.batch_size 2.0 +191 80 model.embedding_dim 0.0 +191 80 optimizer.lr 0.0012574641137756124 +191 80 negative_sampler.num_negs_per_pos 18.0 +191 80 training.batch_size 0.0 +191 81 model.embedding_dim 1.0 +191 81 optimizer.lr 0.006648282750004586 +191 81 negative_sampler.num_negs_per_pos 21.0 +191 81 training.batch_size 0.0 +191 82 model.embedding_dim 2.0 +191 82 optimizer.lr 0.008413999932613552 +191 82 negative_sampler.num_negs_per_pos 36.0 +191 82 training.batch_size 1.0 +191 83 model.embedding_dim 0.0 +191 83 optimizer.lr 0.01583540095533578 +191 83 negative_sampler.num_negs_per_pos 42.0 +191 83 training.batch_size 0.0 +191 84 model.embedding_dim 0.0 +191 84 optimizer.lr 0.006171664225710041 +191 84 negative_sampler.num_negs_per_pos 17.0 +191 84 training.batch_size 0.0 +191 85 model.embedding_dim 2.0 +191 85 optimizer.lr 0.002371939850776457 +191 85 negative_sampler.num_negs_per_pos 79.0 +191 85 training.batch_size 2.0 +191 86 model.embedding_dim 1.0 +191 86 optimizer.lr 0.019129371064122765 +191 86 negative_sampler.num_negs_per_pos 30.0 +191 86 training.batch_size 1.0 +191 87 model.embedding_dim 1.0 +191 87 optimizer.lr 0.002810250514911547 +191 87 negative_sampler.num_negs_per_pos 80.0 +191 87 training.batch_size 1.0 +191 88 model.embedding_dim 1.0 +191 88 optimizer.lr 0.0014354239450721825 +191 88 negative_sampler.num_negs_per_pos 6.0 +191 88 training.batch_size 2.0 +191 89 model.embedding_dim 1.0 +191 89 optimizer.lr 0.010863035481185201 +191 89 negative_sampler.num_negs_per_pos 83.0 +191 89 training.batch_size 2.0 +191 90 model.embedding_dim 2.0 +191 90 optimizer.lr 0.007320698662136678 +191 90 negative_sampler.num_negs_per_pos 22.0 +191 90 training.batch_size 1.0 +191 91 model.embedding_dim 2.0 +191 91 optimizer.lr 0.04144370869938544 +191 91 negative_sampler.num_negs_per_pos 21.0 +191 91 training.batch_size 1.0 +191 92 model.embedding_dim 0.0 +191 92 optimizer.lr 0.012128240974977103 +191 92 negative_sampler.num_negs_per_pos 81.0 +191 92 training.batch_size 2.0 +191 93 model.embedding_dim 2.0 +191 93 optimizer.lr 0.003582088879822709 +191 93 negative_sampler.num_negs_per_pos 32.0 +191 93 training.batch_size 0.0 +191 94 model.embedding_dim 2.0 +191 94 optimizer.lr 0.017131285926354267 +191 94 negative_sampler.num_negs_per_pos 3.0 +191 94 training.batch_size 0.0 +191 95 model.embedding_dim 2.0 +191 95 optimizer.lr 0.04977106734738657 +191 95 negative_sampler.num_negs_per_pos 32.0 +191 95 training.batch_size 0.0 +191 96 model.embedding_dim 2.0 +191 96 optimizer.lr 0.0019025568038126314 +191 96 negative_sampler.num_negs_per_pos 17.0 +191 96 training.batch_size 2.0 +191 97 model.embedding_dim 0.0 +191 97 optimizer.lr 0.0034149590350550335 +191 97 negative_sampler.num_negs_per_pos 30.0 +191 97 training.batch_size 2.0 +191 98 model.embedding_dim 1.0 +191 98 optimizer.lr 0.03305465112710661 +191 98 negative_sampler.num_negs_per_pos 22.0 +191 98 training.batch_size 0.0 +191 99 model.embedding_dim 0.0 +191 99 optimizer.lr 0.0018577947722680855 +191 99 negative_sampler.num_negs_per_pos 49.0 +191 99 training.batch_size 0.0 +191 100 model.embedding_dim 2.0 +191 100 optimizer.lr 0.08796778235585413 +191 100 negative_sampler.num_negs_per_pos 13.0 +191 100 training.batch_size 1.0 +191 1 dataset """kinships""" +191 1 model """distmult""" +191 1 loss """softplus""" +191 1 regularizer """no""" +191 1 optimizer """adam""" +191 1 training_loop """owa""" +191 1 negative_sampler """basic""" +191 1 evaluator """rankbased""" +191 2 dataset """kinships""" +191 2 model """distmult""" +191 2 loss """softplus""" +191 2 regularizer """no""" +191 2 optimizer """adam""" +191 2 training_loop """owa""" +191 2 negative_sampler """basic""" +191 2 evaluator """rankbased""" +191 3 dataset """kinships""" +191 3 model """distmult""" +191 3 loss """softplus""" +191 3 regularizer """no""" +191 3 optimizer """adam""" +191 3 training_loop """owa""" +191 3 negative_sampler """basic""" +191 3 evaluator """rankbased""" +191 4 dataset """kinships""" +191 4 model """distmult""" +191 4 loss """softplus""" +191 4 regularizer """no""" +191 4 optimizer """adam""" +191 4 training_loop """owa""" +191 4 negative_sampler """basic""" +191 4 evaluator """rankbased""" +191 5 dataset """kinships""" +191 5 model """distmult""" +191 5 loss """softplus""" +191 5 regularizer """no""" +191 5 optimizer """adam""" +191 5 training_loop """owa""" +191 5 negative_sampler """basic""" +191 5 evaluator """rankbased""" +191 6 dataset """kinships""" +191 6 model """distmult""" +191 6 loss """softplus""" +191 6 regularizer """no""" +191 6 optimizer """adam""" +191 6 training_loop """owa""" +191 6 negative_sampler """basic""" +191 6 evaluator """rankbased""" +191 7 dataset """kinships""" +191 7 model """distmult""" +191 7 loss """softplus""" +191 7 regularizer """no""" +191 7 optimizer """adam""" +191 7 training_loop """owa""" +191 7 negative_sampler """basic""" +191 7 evaluator """rankbased""" +191 8 dataset """kinships""" +191 8 model """distmult""" +191 8 loss """softplus""" +191 8 regularizer """no""" +191 8 optimizer """adam""" +191 8 training_loop """owa""" +191 8 negative_sampler """basic""" +191 8 evaluator """rankbased""" +191 9 dataset """kinships""" +191 9 model """distmult""" +191 9 loss """softplus""" +191 9 regularizer """no""" +191 9 optimizer """adam""" +191 9 training_loop """owa""" +191 9 negative_sampler """basic""" +191 9 evaluator """rankbased""" +191 10 dataset """kinships""" +191 10 model """distmult""" +191 10 loss """softplus""" +191 10 regularizer """no""" +191 10 optimizer """adam""" +191 10 training_loop """owa""" +191 10 negative_sampler """basic""" +191 10 evaluator """rankbased""" +191 11 dataset """kinships""" +191 11 model """distmult""" +191 11 loss """softplus""" +191 11 regularizer """no""" +191 11 optimizer """adam""" +191 11 training_loop """owa""" +191 11 negative_sampler """basic""" +191 11 evaluator """rankbased""" +191 12 dataset """kinships""" +191 12 model """distmult""" +191 12 loss """softplus""" +191 12 regularizer """no""" +191 12 optimizer """adam""" +191 12 training_loop """owa""" +191 12 negative_sampler """basic""" +191 12 evaluator """rankbased""" +191 13 dataset """kinships""" +191 13 model """distmult""" +191 13 loss """softplus""" +191 13 regularizer """no""" +191 13 optimizer """adam""" +191 13 training_loop """owa""" +191 13 negative_sampler """basic""" +191 13 evaluator """rankbased""" +191 14 dataset """kinships""" +191 14 model """distmult""" +191 14 loss """softplus""" +191 14 regularizer """no""" +191 14 optimizer """adam""" +191 14 training_loop """owa""" +191 14 negative_sampler """basic""" +191 14 evaluator """rankbased""" +191 15 dataset """kinships""" +191 15 model """distmult""" +191 15 loss """softplus""" +191 15 regularizer """no""" +191 15 optimizer """adam""" +191 15 training_loop """owa""" +191 15 negative_sampler """basic""" +191 15 evaluator """rankbased""" +191 16 dataset """kinships""" +191 16 model """distmult""" +191 16 loss """softplus""" +191 16 regularizer """no""" +191 16 optimizer """adam""" +191 16 training_loop """owa""" +191 16 negative_sampler """basic""" +191 16 evaluator """rankbased""" +191 17 dataset """kinships""" +191 17 model """distmult""" +191 17 loss """softplus""" +191 17 regularizer """no""" +191 17 optimizer """adam""" +191 17 training_loop """owa""" +191 17 negative_sampler """basic""" +191 17 evaluator """rankbased""" +191 18 dataset """kinships""" +191 18 model """distmult""" +191 18 loss """softplus""" +191 18 regularizer """no""" +191 18 optimizer """adam""" +191 18 training_loop """owa""" +191 18 negative_sampler """basic""" +191 18 evaluator """rankbased""" +191 19 dataset """kinships""" +191 19 model """distmult""" +191 19 loss """softplus""" +191 19 regularizer """no""" +191 19 optimizer """adam""" +191 19 training_loop """owa""" +191 19 negative_sampler """basic""" +191 19 evaluator """rankbased""" +191 20 dataset """kinships""" +191 20 model """distmult""" +191 20 loss """softplus""" +191 20 regularizer """no""" +191 20 optimizer """adam""" +191 20 training_loop """owa""" +191 20 negative_sampler """basic""" +191 20 evaluator """rankbased""" +191 21 dataset """kinships""" +191 21 model """distmult""" +191 21 loss """softplus""" +191 21 regularizer """no""" +191 21 optimizer """adam""" +191 21 training_loop """owa""" +191 21 negative_sampler """basic""" +191 21 evaluator """rankbased""" +191 22 dataset """kinships""" +191 22 model """distmult""" +191 22 loss """softplus""" +191 22 regularizer """no""" +191 22 optimizer """adam""" +191 22 training_loop """owa""" +191 22 negative_sampler """basic""" +191 22 evaluator """rankbased""" +191 23 dataset """kinships""" +191 23 model """distmult""" +191 23 loss """softplus""" +191 23 regularizer """no""" +191 23 optimizer """adam""" +191 23 training_loop """owa""" +191 23 negative_sampler """basic""" +191 23 evaluator """rankbased""" +191 24 dataset """kinships""" +191 24 model """distmult""" +191 24 loss """softplus""" +191 24 regularizer """no""" +191 24 optimizer """adam""" +191 24 training_loop """owa""" +191 24 negative_sampler """basic""" +191 24 evaluator """rankbased""" +191 25 dataset """kinships""" +191 25 model """distmult""" +191 25 loss """softplus""" +191 25 regularizer """no""" +191 25 optimizer """adam""" +191 25 training_loop """owa""" +191 25 negative_sampler """basic""" +191 25 evaluator """rankbased""" +191 26 dataset """kinships""" +191 26 model """distmult""" +191 26 loss """softplus""" +191 26 regularizer """no""" +191 26 optimizer """adam""" +191 26 training_loop """owa""" +191 26 negative_sampler """basic""" +191 26 evaluator """rankbased""" +191 27 dataset """kinships""" +191 27 model """distmult""" +191 27 loss """softplus""" +191 27 regularizer """no""" +191 27 optimizer """adam""" +191 27 training_loop """owa""" +191 27 negative_sampler """basic""" +191 27 evaluator """rankbased""" +191 28 dataset """kinships""" +191 28 model """distmult""" +191 28 loss """softplus""" +191 28 regularizer """no""" +191 28 optimizer """adam""" +191 28 training_loop """owa""" +191 28 negative_sampler """basic""" +191 28 evaluator """rankbased""" +191 29 dataset """kinships""" +191 29 model """distmult""" +191 29 loss """softplus""" +191 29 regularizer """no""" +191 29 optimizer """adam""" +191 29 training_loop """owa""" +191 29 negative_sampler """basic""" +191 29 evaluator """rankbased""" +191 30 dataset """kinships""" +191 30 model """distmult""" +191 30 loss """softplus""" +191 30 regularizer """no""" +191 30 optimizer """adam""" +191 30 training_loop """owa""" +191 30 negative_sampler """basic""" +191 30 evaluator """rankbased""" +191 31 dataset """kinships""" +191 31 model """distmult""" +191 31 loss """softplus""" +191 31 regularizer """no""" +191 31 optimizer """adam""" +191 31 training_loop """owa""" +191 31 negative_sampler """basic""" +191 31 evaluator """rankbased""" +191 32 dataset """kinships""" +191 32 model """distmult""" +191 32 loss """softplus""" +191 32 regularizer """no""" +191 32 optimizer """adam""" +191 32 training_loop """owa""" +191 32 negative_sampler """basic""" +191 32 evaluator """rankbased""" +191 33 dataset """kinships""" +191 33 model """distmult""" +191 33 loss """softplus""" +191 33 regularizer """no""" +191 33 optimizer """adam""" +191 33 training_loop """owa""" +191 33 negative_sampler """basic""" +191 33 evaluator """rankbased""" +191 34 dataset """kinships""" +191 34 model """distmult""" +191 34 loss """softplus""" +191 34 regularizer """no""" +191 34 optimizer """adam""" +191 34 training_loop """owa""" +191 34 negative_sampler """basic""" +191 34 evaluator """rankbased""" +191 35 dataset """kinships""" +191 35 model """distmult""" +191 35 loss """softplus""" +191 35 regularizer """no""" +191 35 optimizer """adam""" +191 35 training_loop """owa""" +191 35 negative_sampler """basic""" +191 35 evaluator """rankbased""" +191 36 dataset """kinships""" +191 36 model """distmult""" +191 36 loss """softplus""" +191 36 regularizer """no""" +191 36 optimizer """adam""" +191 36 training_loop """owa""" +191 36 negative_sampler """basic""" +191 36 evaluator """rankbased""" +191 37 dataset """kinships""" +191 37 model """distmult""" +191 37 loss """softplus""" +191 37 regularizer """no""" +191 37 optimizer """adam""" +191 37 training_loop """owa""" +191 37 negative_sampler """basic""" +191 37 evaluator """rankbased""" +191 38 dataset """kinships""" +191 38 model """distmult""" +191 38 loss """softplus""" +191 38 regularizer """no""" +191 38 optimizer """adam""" +191 38 training_loop """owa""" +191 38 negative_sampler """basic""" +191 38 evaluator """rankbased""" +191 39 dataset """kinships""" +191 39 model """distmult""" +191 39 loss """softplus""" +191 39 regularizer """no""" +191 39 optimizer """adam""" +191 39 training_loop """owa""" +191 39 negative_sampler """basic""" +191 39 evaluator """rankbased""" +191 40 dataset """kinships""" +191 40 model """distmult""" +191 40 loss """softplus""" +191 40 regularizer """no""" +191 40 optimizer """adam""" +191 40 training_loop """owa""" +191 40 negative_sampler """basic""" +191 40 evaluator """rankbased""" +191 41 dataset """kinships""" +191 41 model """distmult""" +191 41 loss """softplus""" +191 41 regularizer """no""" +191 41 optimizer """adam""" +191 41 training_loop """owa""" +191 41 negative_sampler """basic""" +191 41 evaluator """rankbased""" +191 42 dataset """kinships""" +191 42 model """distmult""" +191 42 loss """softplus""" +191 42 regularizer """no""" +191 42 optimizer """adam""" +191 42 training_loop """owa""" +191 42 negative_sampler """basic""" +191 42 evaluator """rankbased""" +191 43 dataset """kinships""" +191 43 model """distmult""" +191 43 loss """softplus""" +191 43 regularizer """no""" +191 43 optimizer """adam""" +191 43 training_loop """owa""" +191 43 negative_sampler """basic""" +191 43 evaluator """rankbased""" +191 44 dataset """kinships""" +191 44 model """distmult""" +191 44 loss """softplus""" +191 44 regularizer """no""" +191 44 optimizer """adam""" +191 44 training_loop """owa""" +191 44 negative_sampler """basic""" +191 44 evaluator """rankbased""" +191 45 dataset """kinships""" +191 45 model """distmult""" +191 45 loss """softplus""" +191 45 regularizer """no""" +191 45 optimizer """adam""" +191 45 training_loop """owa""" +191 45 negative_sampler """basic""" +191 45 evaluator """rankbased""" +191 46 dataset """kinships""" +191 46 model """distmult""" +191 46 loss """softplus""" +191 46 regularizer """no""" +191 46 optimizer """adam""" +191 46 training_loop """owa""" +191 46 negative_sampler """basic""" +191 46 evaluator """rankbased""" +191 47 dataset """kinships""" +191 47 model """distmult""" +191 47 loss """softplus""" +191 47 regularizer """no""" +191 47 optimizer """adam""" +191 47 training_loop """owa""" +191 47 negative_sampler """basic""" +191 47 evaluator """rankbased""" +191 48 dataset """kinships""" +191 48 model """distmult""" +191 48 loss """softplus""" +191 48 regularizer """no""" +191 48 optimizer """adam""" +191 48 training_loop """owa""" +191 48 negative_sampler """basic""" +191 48 evaluator """rankbased""" +191 49 dataset """kinships""" +191 49 model """distmult""" +191 49 loss """softplus""" +191 49 regularizer """no""" +191 49 optimizer """adam""" +191 49 training_loop """owa""" +191 49 negative_sampler """basic""" +191 49 evaluator """rankbased""" +191 50 dataset """kinships""" +191 50 model """distmult""" +191 50 loss """softplus""" +191 50 regularizer """no""" +191 50 optimizer """adam""" +191 50 training_loop """owa""" +191 50 negative_sampler """basic""" +191 50 evaluator """rankbased""" +191 51 dataset """kinships""" +191 51 model """distmult""" +191 51 loss """softplus""" +191 51 regularizer """no""" +191 51 optimizer """adam""" +191 51 training_loop """owa""" +191 51 negative_sampler """basic""" +191 51 evaluator """rankbased""" +191 52 dataset """kinships""" +191 52 model """distmult""" +191 52 loss """softplus""" +191 52 regularizer """no""" +191 52 optimizer """adam""" +191 52 training_loop """owa""" +191 52 negative_sampler """basic""" +191 52 evaluator """rankbased""" +191 53 dataset """kinships""" +191 53 model """distmult""" +191 53 loss """softplus""" +191 53 regularizer """no""" +191 53 optimizer """adam""" +191 53 training_loop """owa""" +191 53 negative_sampler """basic""" +191 53 evaluator """rankbased""" +191 54 dataset """kinships""" +191 54 model """distmult""" +191 54 loss """softplus""" +191 54 regularizer """no""" +191 54 optimizer """adam""" +191 54 training_loop """owa""" +191 54 negative_sampler """basic""" +191 54 evaluator """rankbased""" +191 55 dataset """kinships""" +191 55 model """distmult""" +191 55 loss """softplus""" +191 55 regularizer """no""" +191 55 optimizer """adam""" +191 55 training_loop """owa""" +191 55 negative_sampler """basic""" +191 55 evaluator """rankbased""" +191 56 dataset """kinships""" +191 56 model """distmult""" +191 56 loss """softplus""" +191 56 regularizer """no""" +191 56 optimizer """adam""" +191 56 training_loop """owa""" +191 56 negative_sampler """basic""" +191 56 evaluator """rankbased""" +191 57 dataset """kinships""" +191 57 model """distmult""" +191 57 loss """softplus""" +191 57 regularizer """no""" +191 57 optimizer """adam""" +191 57 training_loop """owa""" +191 57 negative_sampler """basic""" +191 57 evaluator """rankbased""" +191 58 dataset """kinships""" +191 58 model """distmult""" +191 58 loss """softplus""" +191 58 regularizer """no""" +191 58 optimizer """adam""" +191 58 training_loop """owa""" +191 58 negative_sampler """basic""" +191 58 evaluator """rankbased""" +191 59 dataset """kinships""" +191 59 model """distmult""" +191 59 loss """softplus""" +191 59 regularizer """no""" +191 59 optimizer """adam""" +191 59 training_loop """owa""" +191 59 negative_sampler """basic""" +191 59 evaluator """rankbased""" +191 60 dataset """kinships""" +191 60 model """distmult""" +191 60 loss """softplus""" +191 60 regularizer """no""" +191 60 optimizer """adam""" +191 60 training_loop """owa""" +191 60 negative_sampler """basic""" +191 60 evaluator """rankbased""" +191 61 dataset """kinships""" +191 61 model """distmult""" +191 61 loss """softplus""" +191 61 regularizer """no""" +191 61 optimizer """adam""" +191 61 training_loop """owa""" +191 61 negative_sampler """basic""" +191 61 evaluator """rankbased""" +191 62 dataset """kinships""" +191 62 model """distmult""" +191 62 loss """softplus""" +191 62 regularizer """no""" +191 62 optimizer """adam""" +191 62 training_loop """owa""" +191 62 negative_sampler """basic""" +191 62 evaluator """rankbased""" +191 63 dataset """kinships""" +191 63 model """distmult""" +191 63 loss """softplus""" +191 63 regularizer """no""" +191 63 optimizer """adam""" +191 63 training_loop """owa""" +191 63 negative_sampler """basic""" +191 63 evaluator """rankbased""" +191 64 dataset """kinships""" +191 64 model """distmult""" +191 64 loss """softplus""" +191 64 regularizer """no""" +191 64 optimizer """adam""" +191 64 training_loop """owa""" +191 64 negative_sampler """basic""" +191 64 evaluator """rankbased""" +191 65 dataset """kinships""" +191 65 model """distmult""" +191 65 loss """softplus""" +191 65 regularizer """no""" +191 65 optimizer """adam""" +191 65 training_loop """owa""" +191 65 negative_sampler """basic""" +191 65 evaluator """rankbased""" +191 66 dataset """kinships""" +191 66 model """distmult""" +191 66 loss """softplus""" +191 66 regularizer """no""" +191 66 optimizer """adam""" +191 66 training_loop """owa""" +191 66 negative_sampler """basic""" +191 66 evaluator """rankbased""" +191 67 dataset """kinships""" +191 67 model """distmult""" +191 67 loss """softplus""" +191 67 regularizer """no""" +191 67 optimizer """adam""" +191 67 training_loop """owa""" +191 67 negative_sampler """basic""" +191 67 evaluator """rankbased""" +191 68 dataset """kinships""" +191 68 model """distmult""" +191 68 loss """softplus""" +191 68 regularizer """no""" +191 68 optimizer """adam""" +191 68 training_loop """owa""" +191 68 negative_sampler """basic""" +191 68 evaluator """rankbased""" +191 69 dataset """kinships""" +191 69 model """distmult""" +191 69 loss """softplus""" +191 69 regularizer """no""" +191 69 optimizer """adam""" +191 69 training_loop """owa""" +191 69 negative_sampler """basic""" +191 69 evaluator """rankbased""" +191 70 dataset """kinships""" +191 70 model """distmult""" +191 70 loss """softplus""" +191 70 regularizer """no""" +191 70 optimizer """adam""" +191 70 training_loop """owa""" +191 70 negative_sampler """basic""" +191 70 evaluator """rankbased""" +191 71 dataset """kinships""" +191 71 model """distmult""" +191 71 loss """softplus""" +191 71 regularizer """no""" +191 71 optimizer """adam""" +191 71 training_loop """owa""" +191 71 negative_sampler """basic""" +191 71 evaluator """rankbased""" +191 72 dataset """kinships""" +191 72 model """distmult""" +191 72 loss """softplus""" +191 72 regularizer """no""" +191 72 optimizer """adam""" +191 72 training_loop """owa""" +191 72 negative_sampler """basic""" +191 72 evaluator """rankbased""" +191 73 dataset """kinships""" +191 73 model """distmult""" +191 73 loss """softplus""" +191 73 regularizer """no""" +191 73 optimizer """adam""" +191 73 training_loop """owa""" +191 73 negative_sampler """basic""" +191 73 evaluator """rankbased""" +191 74 dataset """kinships""" +191 74 model """distmult""" +191 74 loss """softplus""" +191 74 regularizer """no""" +191 74 optimizer """adam""" +191 74 training_loop """owa""" +191 74 negative_sampler """basic""" +191 74 evaluator """rankbased""" +191 75 dataset """kinships""" +191 75 model """distmult""" +191 75 loss """softplus""" +191 75 regularizer """no""" +191 75 optimizer """adam""" +191 75 training_loop """owa""" +191 75 negative_sampler """basic""" +191 75 evaluator """rankbased""" +191 76 dataset """kinships""" +191 76 model """distmult""" +191 76 loss """softplus""" +191 76 regularizer """no""" +191 76 optimizer """adam""" +191 76 training_loop """owa""" +191 76 negative_sampler """basic""" +191 76 evaluator """rankbased""" +191 77 dataset """kinships""" +191 77 model """distmult""" +191 77 loss """softplus""" +191 77 regularizer """no""" +191 77 optimizer """adam""" +191 77 training_loop """owa""" +191 77 negative_sampler """basic""" +191 77 evaluator """rankbased""" +191 78 dataset """kinships""" +191 78 model """distmult""" +191 78 loss """softplus""" +191 78 regularizer """no""" +191 78 optimizer """adam""" +191 78 training_loop """owa""" +191 78 negative_sampler """basic""" +191 78 evaluator """rankbased""" +191 79 dataset """kinships""" +191 79 model """distmult""" +191 79 loss """softplus""" +191 79 regularizer """no""" +191 79 optimizer """adam""" +191 79 training_loop """owa""" +191 79 negative_sampler """basic""" +191 79 evaluator """rankbased""" +191 80 dataset """kinships""" +191 80 model """distmult""" +191 80 loss """softplus""" +191 80 regularizer """no""" +191 80 optimizer """adam""" +191 80 training_loop """owa""" +191 80 negative_sampler """basic""" +191 80 evaluator """rankbased""" +191 81 dataset """kinships""" +191 81 model """distmult""" +191 81 loss """softplus""" +191 81 regularizer """no""" +191 81 optimizer """adam""" +191 81 training_loop """owa""" +191 81 negative_sampler """basic""" +191 81 evaluator """rankbased""" +191 82 dataset """kinships""" +191 82 model """distmult""" +191 82 loss """softplus""" +191 82 regularizer """no""" +191 82 optimizer """adam""" +191 82 training_loop """owa""" +191 82 negative_sampler """basic""" +191 82 evaluator """rankbased""" +191 83 dataset """kinships""" +191 83 model """distmult""" +191 83 loss """softplus""" +191 83 regularizer """no""" +191 83 optimizer """adam""" +191 83 training_loop """owa""" +191 83 negative_sampler """basic""" +191 83 evaluator """rankbased""" +191 84 dataset """kinships""" +191 84 model """distmult""" +191 84 loss """softplus""" +191 84 regularizer """no""" +191 84 optimizer """adam""" +191 84 training_loop """owa""" +191 84 negative_sampler """basic""" +191 84 evaluator """rankbased""" +191 85 dataset """kinships""" +191 85 model """distmult""" +191 85 loss """softplus""" +191 85 regularizer """no""" +191 85 optimizer """adam""" +191 85 training_loop """owa""" +191 85 negative_sampler """basic""" +191 85 evaluator """rankbased""" +191 86 dataset """kinships""" +191 86 model """distmult""" +191 86 loss """softplus""" +191 86 regularizer """no""" +191 86 optimizer """adam""" +191 86 training_loop """owa""" +191 86 negative_sampler """basic""" +191 86 evaluator """rankbased""" +191 87 dataset """kinships""" +191 87 model """distmult""" +191 87 loss """softplus""" +191 87 regularizer """no""" +191 87 optimizer """adam""" +191 87 training_loop """owa""" +191 87 negative_sampler """basic""" +191 87 evaluator """rankbased""" +191 88 dataset """kinships""" +191 88 model """distmult""" +191 88 loss """softplus""" +191 88 regularizer """no""" +191 88 optimizer """adam""" +191 88 training_loop """owa""" +191 88 negative_sampler """basic""" +191 88 evaluator """rankbased""" +191 89 dataset """kinships""" +191 89 model """distmult""" +191 89 loss """softplus""" +191 89 regularizer """no""" +191 89 optimizer """adam""" +191 89 training_loop """owa""" +191 89 negative_sampler """basic""" +191 89 evaluator """rankbased""" +191 90 dataset """kinships""" +191 90 model """distmult""" +191 90 loss """softplus""" +191 90 regularizer """no""" +191 90 optimizer """adam""" +191 90 training_loop """owa""" +191 90 negative_sampler """basic""" +191 90 evaluator """rankbased""" +191 91 dataset """kinships""" +191 91 model """distmult""" +191 91 loss """softplus""" +191 91 regularizer """no""" +191 91 optimizer """adam""" +191 91 training_loop """owa""" +191 91 negative_sampler """basic""" +191 91 evaluator """rankbased""" +191 92 dataset """kinships""" +191 92 model """distmult""" +191 92 loss """softplus""" +191 92 regularizer """no""" +191 92 optimizer """adam""" +191 92 training_loop """owa""" +191 92 negative_sampler """basic""" +191 92 evaluator """rankbased""" +191 93 dataset """kinships""" +191 93 model """distmult""" +191 93 loss """softplus""" +191 93 regularizer """no""" +191 93 optimizer """adam""" +191 93 training_loop """owa""" +191 93 negative_sampler """basic""" +191 93 evaluator """rankbased""" +191 94 dataset """kinships""" +191 94 model """distmult""" +191 94 loss """softplus""" +191 94 regularizer """no""" +191 94 optimizer """adam""" +191 94 training_loop """owa""" +191 94 negative_sampler """basic""" +191 94 evaluator """rankbased""" +191 95 dataset """kinships""" +191 95 model """distmult""" +191 95 loss """softplus""" +191 95 regularizer """no""" +191 95 optimizer """adam""" +191 95 training_loop """owa""" +191 95 negative_sampler """basic""" +191 95 evaluator """rankbased""" +191 96 dataset """kinships""" +191 96 model """distmult""" +191 96 loss """softplus""" +191 96 regularizer """no""" +191 96 optimizer """adam""" +191 96 training_loop """owa""" +191 96 negative_sampler """basic""" +191 96 evaluator """rankbased""" +191 97 dataset """kinships""" +191 97 model """distmult""" +191 97 loss """softplus""" +191 97 regularizer """no""" +191 97 optimizer """adam""" +191 97 training_loop """owa""" +191 97 negative_sampler """basic""" +191 97 evaluator """rankbased""" +191 98 dataset """kinships""" +191 98 model """distmult""" +191 98 loss """softplus""" +191 98 regularizer """no""" +191 98 optimizer """adam""" +191 98 training_loop """owa""" +191 98 negative_sampler """basic""" +191 98 evaluator """rankbased""" +191 99 dataset """kinships""" +191 99 model """distmult""" +191 99 loss """softplus""" +191 99 regularizer """no""" +191 99 optimizer """adam""" +191 99 training_loop """owa""" +191 99 negative_sampler """basic""" +191 99 evaluator """rankbased""" +191 100 dataset """kinships""" +191 100 model """distmult""" +191 100 loss """softplus""" +191 100 regularizer """no""" +191 100 optimizer """adam""" +191 100 training_loop """owa""" +191 100 negative_sampler """basic""" +191 100 evaluator """rankbased""" +192 1 model.embedding_dim 2.0 +192 1 loss.margin 7.399571413609981 +192 1 optimizer.lr 0.0012035386323556085 +192 1 negative_sampler.num_negs_per_pos 12.0 +192 1 training.batch_size 2.0 +192 2 model.embedding_dim 1.0 +192 2 loss.margin 9.293171126838956 +192 2 optimizer.lr 0.019065138086895656 +192 2 negative_sampler.num_negs_per_pos 90.0 +192 2 training.batch_size 0.0 +192 3 model.embedding_dim 1.0 +192 3 loss.margin 8.273919781073658 +192 3 optimizer.lr 0.0015705096262908734 +192 3 negative_sampler.num_negs_per_pos 39.0 +192 3 training.batch_size 2.0 +192 4 model.embedding_dim 2.0 +192 4 loss.margin 5.996229453557205 +192 4 optimizer.lr 0.0018261858989116005 +192 4 negative_sampler.num_negs_per_pos 48.0 +192 4 training.batch_size 0.0 +192 5 model.embedding_dim 2.0 +192 5 loss.margin 9.759437186341495 +192 5 optimizer.lr 0.0878971809546825 +192 5 negative_sampler.num_negs_per_pos 51.0 +192 5 training.batch_size 2.0 +192 6 model.embedding_dim 0.0 +192 6 loss.margin 8.614042472415605 +192 6 optimizer.lr 0.02547907675844983 +192 6 negative_sampler.num_negs_per_pos 98.0 +192 6 training.batch_size 2.0 +192 7 model.embedding_dim 0.0 +192 7 loss.margin 8.421198119770276 +192 7 optimizer.lr 0.0077011863210764515 +192 7 negative_sampler.num_negs_per_pos 31.0 +192 7 training.batch_size 0.0 +192 8 model.embedding_dim 1.0 +192 8 loss.margin 1.7180162330221405 +192 8 optimizer.lr 0.04709412925507727 +192 8 negative_sampler.num_negs_per_pos 10.0 +192 8 training.batch_size 0.0 +192 9 model.embedding_dim 0.0 +192 9 loss.margin 9.922184308177581 +192 9 optimizer.lr 0.005307727666405946 +192 9 negative_sampler.num_negs_per_pos 79.0 +192 9 training.batch_size 1.0 +192 10 model.embedding_dim 2.0 +192 10 loss.margin 9.689105480416254 +192 10 optimizer.lr 0.025421312455293953 +192 10 negative_sampler.num_negs_per_pos 89.0 +192 10 training.batch_size 2.0 +192 11 model.embedding_dim 0.0 +192 11 loss.margin 9.571083932807259 +192 11 optimizer.lr 0.0012959630328553708 +192 11 negative_sampler.num_negs_per_pos 62.0 +192 11 training.batch_size 0.0 +192 12 model.embedding_dim 2.0 +192 12 loss.margin 5.136115275385699 +192 12 optimizer.lr 0.00779139840361106 +192 12 negative_sampler.num_negs_per_pos 55.0 +192 12 training.batch_size 2.0 +192 13 model.embedding_dim 0.0 +192 13 loss.margin 1.1308604209488293 +192 13 optimizer.lr 0.09618527453853602 +192 13 negative_sampler.num_negs_per_pos 73.0 +192 13 training.batch_size 0.0 +192 14 model.embedding_dim 2.0 +192 14 loss.margin 9.596382725331042 +192 14 optimizer.lr 0.017849285079407227 +192 14 negative_sampler.num_negs_per_pos 80.0 +192 14 training.batch_size 2.0 +192 15 model.embedding_dim 1.0 +192 15 loss.margin 2.2149991401924414 +192 15 optimizer.lr 0.005522529513832307 +192 15 negative_sampler.num_negs_per_pos 6.0 +192 15 training.batch_size 0.0 +192 16 model.embedding_dim 0.0 +192 16 loss.margin 2.9195353857686364 +192 16 optimizer.lr 0.0020645259641479574 +192 16 negative_sampler.num_negs_per_pos 64.0 +192 16 training.batch_size 2.0 +192 17 model.embedding_dim 1.0 +192 17 loss.margin 3.3507536429182156 +192 17 optimizer.lr 0.002314452882042273 +192 17 negative_sampler.num_negs_per_pos 57.0 +192 17 training.batch_size 0.0 +192 18 model.embedding_dim 1.0 +192 18 loss.margin 7.702672339274169 +192 18 optimizer.lr 0.019803857003813062 +192 18 negative_sampler.num_negs_per_pos 97.0 +192 18 training.batch_size 1.0 +192 19 model.embedding_dim 1.0 +192 19 loss.margin 4.627156295162915 +192 19 optimizer.lr 0.003911768096670647 +192 19 negative_sampler.num_negs_per_pos 5.0 +192 19 training.batch_size 1.0 +192 20 model.embedding_dim 0.0 +192 20 loss.margin 8.595358406379294 +192 20 optimizer.lr 0.002766758355749066 +192 20 negative_sampler.num_negs_per_pos 85.0 +192 20 training.batch_size 2.0 +192 21 model.embedding_dim 1.0 +192 21 loss.margin 9.531184042286542 +192 21 optimizer.lr 0.006611725596217654 +192 21 negative_sampler.num_negs_per_pos 32.0 +192 21 training.batch_size 1.0 +192 22 model.embedding_dim 0.0 +192 22 loss.margin 0.5605682171639816 +192 22 optimizer.lr 0.008062395737946336 +192 22 negative_sampler.num_negs_per_pos 31.0 +192 22 training.batch_size 1.0 +192 23 model.embedding_dim 2.0 +192 23 loss.margin 7.79049140768259 +192 23 optimizer.lr 0.05720602919760712 +192 23 negative_sampler.num_negs_per_pos 26.0 +192 23 training.batch_size 0.0 +192 24 model.embedding_dim 2.0 +192 24 loss.margin 4.423927205684668 +192 24 optimizer.lr 0.021465454018925197 +192 24 negative_sampler.num_negs_per_pos 72.0 +192 24 training.batch_size 0.0 +192 25 model.embedding_dim 1.0 +192 25 loss.margin 3.72511938184392 +192 25 optimizer.lr 0.003995687546682151 +192 25 negative_sampler.num_negs_per_pos 68.0 +192 25 training.batch_size 0.0 +192 26 model.embedding_dim 2.0 +192 26 loss.margin 9.92241960745175 +192 26 optimizer.lr 0.06659141732295185 +192 26 negative_sampler.num_negs_per_pos 13.0 +192 26 training.batch_size 1.0 +192 27 model.embedding_dim 2.0 +192 27 loss.margin 0.522772347976103 +192 27 optimizer.lr 0.07504276165668725 +192 27 negative_sampler.num_negs_per_pos 8.0 +192 27 training.batch_size 0.0 +192 28 model.embedding_dim 1.0 +192 28 loss.margin 2.5368911932414524 +192 28 optimizer.lr 0.018573624874985397 +192 28 negative_sampler.num_negs_per_pos 52.0 +192 28 training.batch_size 0.0 +192 29 model.embedding_dim 1.0 +192 29 loss.margin 9.583516519198227 +192 29 optimizer.lr 0.0012405642618449053 +192 29 negative_sampler.num_negs_per_pos 79.0 +192 29 training.batch_size 2.0 +192 30 model.embedding_dim 2.0 +192 30 loss.margin 0.9829032928194299 +192 30 optimizer.lr 0.004927856743980612 +192 30 negative_sampler.num_negs_per_pos 55.0 +192 30 training.batch_size 1.0 +192 31 model.embedding_dim 1.0 +192 31 loss.margin 1.853775142895396 +192 31 optimizer.lr 0.025065331148958144 +192 31 negative_sampler.num_negs_per_pos 73.0 +192 31 training.batch_size 0.0 +192 32 model.embedding_dim 0.0 +192 32 loss.margin 8.886771916460361 +192 32 optimizer.lr 0.0019773182762506656 +192 32 negative_sampler.num_negs_per_pos 65.0 +192 32 training.batch_size 1.0 +192 33 model.embedding_dim 1.0 +192 33 loss.margin 1.324777040221231 +192 33 optimizer.lr 0.013993001347210824 +192 33 negative_sampler.num_negs_per_pos 34.0 +192 33 training.batch_size 1.0 +192 34 model.embedding_dim 2.0 +192 34 loss.margin 9.18317535351633 +192 34 optimizer.lr 0.006635300333098741 +192 34 negative_sampler.num_negs_per_pos 52.0 +192 34 training.batch_size 1.0 +192 35 model.embedding_dim 0.0 +192 35 loss.margin 3.5707867800187048 +192 35 optimizer.lr 0.004123897860162739 +192 35 negative_sampler.num_negs_per_pos 61.0 +192 35 training.batch_size 0.0 +192 36 model.embedding_dim 1.0 +192 36 loss.margin 7.183326912007746 +192 36 optimizer.lr 0.040120451049770814 +192 36 negative_sampler.num_negs_per_pos 45.0 +192 36 training.batch_size 2.0 +192 37 model.embedding_dim 1.0 +192 37 loss.margin 2.4651183021274763 +192 37 optimizer.lr 0.03803668755932589 +192 37 negative_sampler.num_negs_per_pos 18.0 +192 37 training.batch_size 0.0 +192 38 model.embedding_dim 1.0 +192 38 loss.margin 4.426505402095315 +192 38 optimizer.lr 0.011309105111961409 +192 38 negative_sampler.num_negs_per_pos 67.0 +192 38 training.batch_size 0.0 +192 39 model.embedding_dim 0.0 +192 39 loss.margin 0.9135616409960634 +192 39 optimizer.lr 0.014444131462467202 +192 39 negative_sampler.num_negs_per_pos 84.0 +192 39 training.batch_size 1.0 +192 40 model.embedding_dim 0.0 +192 40 loss.margin 4.5979472721266275 +192 40 optimizer.lr 0.0257590087215044 +192 40 negative_sampler.num_negs_per_pos 27.0 +192 40 training.batch_size 1.0 +192 41 model.embedding_dim 0.0 +192 41 loss.margin 6.625468179075086 +192 41 optimizer.lr 0.037985690453876474 +192 41 negative_sampler.num_negs_per_pos 98.0 +192 41 training.batch_size 2.0 +192 42 model.embedding_dim 2.0 +192 42 loss.margin 5.782428725047753 +192 42 optimizer.lr 0.006895609342177788 +192 42 negative_sampler.num_negs_per_pos 58.0 +192 42 training.batch_size 2.0 +192 43 model.embedding_dim 0.0 +192 43 loss.margin 4.51041039010591 +192 43 optimizer.lr 0.0050909379443250385 +192 43 negative_sampler.num_negs_per_pos 90.0 +192 43 training.batch_size 0.0 +192 44 model.embedding_dim 2.0 +192 44 loss.margin 4.4981532320847695 +192 44 optimizer.lr 0.07291206556387039 +192 44 negative_sampler.num_negs_per_pos 65.0 +192 44 training.batch_size 1.0 +192 45 model.embedding_dim 2.0 +192 45 loss.margin 7.048358321445781 +192 45 optimizer.lr 0.07968051390060801 +192 45 negative_sampler.num_negs_per_pos 12.0 +192 45 training.batch_size 0.0 +192 46 model.embedding_dim 1.0 +192 46 loss.margin 4.459128941159019 +192 46 optimizer.lr 0.01958753933415599 +192 46 negative_sampler.num_negs_per_pos 28.0 +192 46 training.batch_size 2.0 +192 47 model.embedding_dim 1.0 +192 47 loss.margin 5.3861447080242115 +192 47 optimizer.lr 0.0073526165825548604 +192 47 negative_sampler.num_negs_per_pos 65.0 +192 47 training.batch_size 2.0 +192 48 model.embedding_dim 2.0 +192 48 loss.margin 1.825194275540013 +192 48 optimizer.lr 0.029378619528174783 +192 48 negative_sampler.num_negs_per_pos 86.0 +192 48 training.batch_size 2.0 +192 49 model.embedding_dim 0.0 +192 49 loss.margin 6.895984227902365 +192 49 optimizer.lr 0.00974449110297363 +192 49 negative_sampler.num_negs_per_pos 57.0 +192 49 training.batch_size 0.0 +192 50 model.embedding_dim 0.0 +192 50 loss.margin 9.133032499384727 +192 50 optimizer.lr 0.01612546779178063 +192 50 negative_sampler.num_negs_per_pos 6.0 +192 50 training.batch_size 2.0 +192 51 model.embedding_dim 2.0 +192 51 loss.margin 3.743478745358834 +192 51 optimizer.lr 0.0034327277582168205 +192 51 negative_sampler.num_negs_per_pos 5.0 +192 51 training.batch_size 0.0 +192 52 model.embedding_dim 2.0 +192 52 loss.margin 4.280836361720629 +192 52 optimizer.lr 0.0031105276899604625 +192 52 negative_sampler.num_negs_per_pos 64.0 +192 52 training.batch_size 0.0 +192 53 model.embedding_dim 0.0 +192 53 loss.margin 5.02600296850467 +192 53 optimizer.lr 0.060052240418698014 +192 53 negative_sampler.num_negs_per_pos 54.0 +192 53 training.batch_size 0.0 +192 54 model.embedding_dim 0.0 +192 54 loss.margin 2.3015093541028855 +192 54 optimizer.lr 0.015079676496542694 +192 54 negative_sampler.num_negs_per_pos 18.0 +192 54 training.batch_size 0.0 +192 55 model.embedding_dim 1.0 +192 55 loss.margin 7.013293582161514 +192 55 optimizer.lr 0.0021039506094877834 +192 55 negative_sampler.num_negs_per_pos 82.0 +192 55 training.batch_size 0.0 +192 56 model.embedding_dim 2.0 +192 56 loss.margin 8.57517066300329 +192 56 optimizer.lr 0.003331002993549325 +192 56 negative_sampler.num_negs_per_pos 90.0 +192 56 training.batch_size 2.0 +192 57 model.embedding_dim 2.0 +192 57 loss.margin 6.7160493875565805 +192 57 optimizer.lr 0.014055926678416741 +192 57 negative_sampler.num_negs_per_pos 89.0 +192 57 training.batch_size 1.0 +192 58 model.embedding_dim 0.0 +192 58 loss.margin 1.082675040199702 +192 58 optimizer.lr 0.005891988899780721 +192 58 negative_sampler.num_negs_per_pos 48.0 +192 58 training.batch_size 2.0 +192 59 model.embedding_dim 1.0 +192 59 loss.margin 7.894486912136344 +192 59 optimizer.lr 0.009866226204823587 +192 59 negative_sampler.num_negs_per_pos 91.0 +192 59 training.batch_size 1.0 +192 60 model.embedding_dim 2.0 +192 60 loss.margin 2.588828595037336 +192 60 optimizer.lr 0.0017886560072524391 +192 60 negative_sampler.num_negs_per_pos 83.0 +192 60 training.batch_size 0.0 +192 61 model.embedding_dim 1.0 +192 61 loss.margin 2.2885832188681867 +192 61 optimizer.lr 0.0080084356429699 +192 61 negative_sampler.num_negs_per_pos 12.0 +192 61 training.batch_size 1.0 +192 62 model.embedding_dim 0.0 +192 62 loss.margin 9.394630986150192 +192 62 optimizer.lr 0.0011913956087486082 +192 62 negative_sampler.num_negs_per_pos 45.0 +192 62 training.batch_size 2.0 +192 63 model.embedding_dim 1.0 +192 63 loss.margin 3.03814323558733 +192 63 optimizer.lr 0.0013492318799976015 +192 63 negative_sampler.num_negs_per_pos 23.0 +192 63 training.batch_size 1.0 +192 64 model.embedding_dim 1.0 +192 64 loss.margin 5.629006977346718 +192 64 optimizer.lr 0.08303426863236164 +192 64 negative_sampler.num_negs_per_pos 64.0 +192 64 training.batch_size 1.0 +192 65 model.embedding_dim 1.0 +192 65 loss.margin 3.7256217055886838 +192 65 optimizer.lr 0.07268181955779675 +192 65 negative_sampler.num_negs_per_pos 2.0 +192 65 training.batch_size 0.0 +192 66 model.embedding_dim 2.0 +192 66 loss.margin 4.0021993919609935 +192 66 optimizer.lr 0.003879330758977104 +192 66 negative_sampler.num_negs_per_pos 1.0 +192 66 training.batch_size 2.0 +192 67 model.embedding_dim 0.0 +192 67 loss.margin 9.775498704836771 +192 67 optimizer.lr 0.010665519925275334 +192 67 negative_sampler.num_negs_per_pos 82.0 +192 67 training.batch_size 0.0 +192 68 model.embedding_dim 2.0 +192 68 loss.margin 7.231841141874053 +192 68 optimizer.lr 0.05710237687900262 +192 68 negative_sampler.num_negs_per_pos 5.0 +192 68 training.batch_size 0.0 +192 69 model.embedding_dim 1.0 +192 69 loss.margin 7.319922788267019 +192 69 optimizer.lr 0.08499893627167714 +192 69 negative_sampler.num_negs_per_pos 1.0 +192 69 training.batch_size 1.0 +192 70 model.embedding_dim 2.0 +192 70 loss.margin 8.367281318945103 +192 70 optimizer.lr 0.012553554113764355 +192 70 negative_sampler.num_negs_per_pos 90.0 +192 70 training.batch_size 1.0 +192 71 model.embedding_dim 2.0 +192 71 loss.margin 9.367984466438726 +192 71 optimizer.lr 0.004588766624094364 +192 71 negative_sampler.num_negs_per_pos 9.0 +192 71 training.batch_size 2.0 +192 72 model.embedding_dim 1.0 +192 72 loss.margin 4.691987702326327 +192 72 optimizer.lr 0.011836783737458629 +192 72 negative_sampler.num_negs_per_pos 36.0 +192 72 training.batch_size 0.0 +192 73 model.embedding_dim 2.0 +192 73 loss.margin 7.061055669056193 +192 73 optimizer.lr 0.093170757060422 +192 73 negative_sampler.num_negs_per_pos 34.0 +192 73 training.batch_size 2.0 +192 74 model.embedding_dim 0.0 +192 74 loss.margin 7.471469651960429 +192 74 optimizer.lr 0.033313993418780906 +192 74 negative_sampler.num_negs_per_pos 42.0 +192 74 training.batch_size 2.0 +192 75 model.embedding_dim 0.0 +192 75 loss.margin 4.624797061210188 +192 75 optimizer.lr 0.07816675724170088 +192 75 negative_sampler.num_negs_per_pos 27.0 +192 75 training.batch_size 0.0 +192 76 model.embedding_dim 0.0 +192 76 loss.margin 9.28749823083836 +192 76 optimizer.lr 0.002252426415666254 +192 76 negative_sampler.num_negs_per_pos 24.0 +192 76 training.batch_size 2.0 +192 77 model.embedding_dim 2.0 +192 77 loss.margin 8.86580336533939 +192 77 optimizer.lr 0.053150436498712025 +192 77 negative_sampler.num_negs_per_pos 7.0 +192 77 training.batch_size 0.0 +192 78 model.embedding_dim 0.0 +192 78 loss.margin 5.387298551229288 +192 78 optimizer.lr 0.04620557790942395 +192 78 negative_sampler.num_negs_per_pos 87.0 +192 78 training.batch_size 0.0 +192 79 model.embedding_dim 0.0 +192 79 loss.margin 1.7457059392969405 +192 79 optimizer.lr 0.008916891485447071 +192 79 negative_sampler.num_negs_per_pos 63.0 +192 79 training.batch_size 1.0 +192 80 model.embedding_dim 0.0 +192 80 loss.margin 7.344617807468291 +192 80 optimizer.lr 0.01939641003401544 +192 80 negative_sampler.num_negs_per_pos 55.0 +192 80 training.batch_size 1.0 +192 81 model.embedding_dim 1.0 +192 81 loss.margin 6.406953391159271 +192 81 optimizer.lr 0.004266227710627388 +192 81 negative_sampler.num_negs_per_pos 46.0 +192 81 training.batch_size 0.0 +192 82 model.embedding_dim 1.0 +192 82 loss.margin 2.5775913164646083 +192 82 optimizer.lr 0.09834228622346106 +192 82 negative_sampler.num_negs_per_pos 55.0 +192 82 training.batch_size 2.0 +192 83 model.embedding_dim 0.0 +192 83 loss.margin 2.9723652489441363 +192 83 optimizer.lr 0.011620305651336106 +192 83 negative_sampler.num_negs_per_pos 31.0 +192 83 training.batch_size 1.0 +192 84 model.embedding_dim 2.0 +192 84 loss.margin 9.55961525730236 +192 84 optimizer.lr 0.042789519611425145 +192 84 negative_sampler.num_negs_per_pos 39.0 +192 84 training.batch_size 2.0 +192 85 model.embedding_dim 2.0 +192 85 loss.margin 1.1265031223989987 +192 85 optimizer.lr 0.006360623553960098 +192 85 negative_sampler.num_negs_per_pos 39.0 +192 85 training.batch_size 2.0 +192 86 model.embedding_dim 2.0 +192 86 loss.margin 9.297301713180223 +192 86 optimizer.lr 0.030745936981529543 +192 86 negative_sampler.num_negs_per_pos 18.0 +192 86 training.batch_size 0.0 +192 87 model.embedding_dim 2.0 +192 87 loss.margin 9.05115200183254 +192 87 optimizer.lr 0.003799512141694782 +192 87 negative_sampler.num_negs_per_pos 80.0 +192 87 training.batch_size 2.0 +192 88 model.embedding_dim 2.0 +192 88 loss.margin 2.833337821077368 +192 88 optimizer.lr 0.0032469716779603254 +192 88 negative_sampler.num_negs_per_pos 38.0 +192 88 training.batch_size 2.0 +192 89 model.embedding_dim 1.0 +192 89 loss.margin 5.890842700378361 +192 89 optimizer.lr 0.0013038245506841748 +192 89 negative_sampler.num_negs_per_pos 66.0 +192 89 training.batch_size 1.0 +192 90 model.embedding_dim 1.0 +192 90 loss.margin 1.6157279260775415 +192 90 optimizer.lr 0.0014110106646151234 +192 90 negative_sampler.num_negs_per_pos 40.0 +192 90 training.batch_size 2.0 +192 91 model.embedding_dim 0.0 +192 91 loss.margin 7.093347458223263 +192 91 optimizer.lr 0.0789583039160318 +192 91 negative_sampler.num_negs_per_pos 41.0 +192 91 training.batch_size 2.0 +192 92 model.embedding_dim 0.0 +192 92 loss.margin 9.785143536917797 +192 92 optimizer.lr 0.030676601972012937 +192 92 negative_sampler.num_negs_per_pos 96.0 +192 92 training.batch_size 1.0 +192 93 model.embedding_dim 0.0 +192 93 loss.margin 6.203714677169499 +192 93 optimizer.lr 0.006400983724171439 +192 93 negative_sampler.num_negs_per_pos 68.0 +192 93 training.batch_size 2.0 +192 94 model.embedding_dim 1.0 +192 94 loss.margin 0.5299833702466995 +192 94 optimizer.lr 0.05338606538729286 +192 94 negative_sampler.num_negs_per_pos 0.0 +192 94 training.batch_size 1.0 +192 95 model.embedding_dim 2.0 +192 95 loss.margin 6.656911326165401 +192 95 optimizer.lr 0.002654069006110903 +192 95 negative_sampler.num_negs_per_pos 95.0 +192 95 training.batch_size 0.0 +192 96 model.embedding_dim 0.0 +192 96 loss.margin 8.656654742254538 +192 96 optimizer.lr 0.001249166996816829 +192 96 negative_sampler.num_negs_per_pos 22.0 +192 96 training.batch_size 2.0 +192 97 model.embedding_dim 0.0 +192 97 loss.margin 2.3220182018425763 +192 97 optimizer.lr 0.06259447702009303 +192 97 negative_sampler.num_negs_per_pos 70.0 +192 97 training.batch_size 1.0 +192 98 model.embedding_dim 0.0 +192 98 loss.margin 0.834132978879838 +192 98 optimizer.lr 0.03088541345677193 +192 98 negative_sampler.num_negs_per_pos 87.0 +192 98 training.batch_size 2.0 +192 99 model.embedding_dim 2.0 +192 99 loss.margin 5.151197480474541 +192 99 optimizer.lr 0.026912807959981282 +192 99 negative_sampler.num_negs_per_pos 43.0 +192 99 training.batch_size 2.0 +192 100 model.embedding_dim 1.0 +192 100 loss.margin 3.8182784297546295 +192 100 optimizer.lr 0.0017429189042187103 +192 100 negative_sampler.num_negs_per_pos 99.0 +192 100 training.batch_size 1.0 +192 1 dataset """kinships""" +192 1 model """distmult""" +192 1 loss """marginranking""" +192 1 regularizer """no""" +192 1 optimizer """adam""" +192 1 training_loop """owa""" +192 1 negative_sampler """basic""" +192 1 evaluator """rankbased""" +192 2 dataset """kinships""" +192 2 model """distmult""" +192 2 loss """marginranking""" +192 2 regularizer """no""" +192 2 optimizer """adam""" +192 2 training_loop """owa""" +192 2 negative_sampler """basic""" +192 2 evaluator """rankbased""" +192 3 dataset """kinships""" +192 3 model """distmult""" +192 3 loss """marginranking""" +192 3 regularizer """no""" +192 3 optimizer """adam""" +192 3 training_loop """owa""" +192 3 negative_sampler """basic""" +192 3 evaluator """rankbased""" +192 4 dataset """kinships""" +192 4 model """distmult""" +192 4 loss """marginranking""" +192 4 regularizer """no""" +192 4 optimizer """adam""" +192 4 training_loop """owa""" +192 4 negative_sampler """basic""" +192 4 evaluator """rankbased""" +192 5 dataset """kinships""" +192 5 model """distmult""" +192 5 loss """marginranking""" +192 5 regularizer """no""" +192 5 optimizer """adam""" +192 5 training_loop """owa""" +192 5 negative_sampler """basic""" +192 5 evaluator """rankbased""" +192 6 dataset """kinships""" +192 6 model """distmult""" +192 6 loss """marginranking""" +192 6 regularizer """no""" +192 6 optimizer """adam""" +192 6 training_loop """owa""" +192 6 negative_sampler """basic""" +192 6 evaluator """rankbased""" +192 7 dataset """kinships""" +192 7 model """distmult""" +192 7 loss """marginranking""" +192 7 regularizer """no""" +192 7 optimizer """adam""" +192 7 training_loop """owa""" +192 7 negative_sampler """basic""" +192 7 evaluator """rankbased""" +192 8 dataset """kinships""" +192 8 model """distmult""" +192 8 loss """marginranking""" +192 8 regularizer """no""" +192 8 optimizer """adam""" +192 8 training_loop """owa""" +192 8 negative_sampler """basic""" +192 8 evaluator """rankbased""" +192 9 dataset """kinships""" +192 9 model """distmult""" +192 9 loss """marginranking""" +192 9 regularizer """no""" +192 9 optimizer """adam""" +192 9 training_loop """owa""" +192 9 negative_sampler """basic""" +192 9 evaluator """rankbased""" +192 10 dataset """kinships""" +192 10 model """distmult""" +192 10 loss """marginranking""" +192 10 regularizer """no""" +192 10 optimizer """adam""" +192 10 training_loop """owa""" +192 10 negative_sampler """basic""" +192 10 evaluator """rankbased""" +192 11 dataset """kinships""" +192 11 model """distmult""" +192 11 loss """marginranking""" +192 11 regularizer """no""" +192 11 optimizer """adam""" +192 11 training_loop """owa""" +192 11 negative_sampler """basic""" +192 11 evaluator """rankbased""" +192 12 dataset """kinships""" +192 12 model """distmult""" +192 12 loss """marginranking""" +192 12 regularizer """no""" +192 12 optimizer """adam""" +192 12 training_loop """owa""" +192 12 negative_sampler """basic""" +192 12 evaluator """rankbased""" +192 13 dataset """kinships""" +192 13 model """distmult""" +192 13 loss """marginranking""" +192 13 regularizer """no""" +192 13 optimizer """adam""" +192 13 training_loop """owa""" +192 13 negative_sampler """basic""" +192 13 evaluator """rankbased""" +192 14 dataset """kinships""" +192 14 model """distmult""" +192 14 loss """marginranking""" +192 14 regularizer """no""" +192 14 optimizer """adam""" +192 14 training_loop """owa""" +192 14 negative_sampler """basic""" +192 14 evaluator """rankbased""" +192 15 dataset """kinships""" +192 15 model """distmult""" +192 15 loss """marginranking""" +192 15 regularizer """no""" +192 15 optimizer """adam""" +192 15 training_loop """owa""" +192 15 negative_sampler """basic""" +192 15 evaluator """rankbased""" +192 16 dataset """kinships""" +192 16 model """distmult""" +192 16 loss """marginranking""" +192 16 regularizer """no""" +192 16 optimizer """adam""" +192 16 training_loop """owa""" +192 16 negative_sampler """basic""" +192 16 evaluator """rankbased""" +192 17 dataset """kinships""" +192 17 model """distmult""" +192 17 loss """marginranking""" +192 17 regularizer """no""" +192 17 optimizer """adam""" +192 17 training_loop """owa""" +192 17 negative_sampler """basic""" +192 17 evaluator """rankbased""" +192 18 dataset """kinships""" +192 18 model """distmult""" +192 18 loss """marginranking""" +192 18 regularizer """no""" +192 18 optimizer """adam""" +192 18 training_loop """owa""" +192 18 negative_sampler """basic""" +192 18 evaluator """rankbased""" +192 19 dataset """kinships""" +192 19 model """distmult""" +192 19 loss """marginranking""" +192 19 regularizer """no""" +192 19 optimizer """adam""" +192 19 training_loop """owa""" +192 19 negative_sampler """basic""" +192 19 evaluator """rankbased""" +192 20 dataset """kinships""" +192 20 model """distmult""" +192 20 loss """marginranking""" +192 20 regularizer """no""" +192 20 optimizer """adam""" +192 20 training_loop """owa""" +192 20 negative_sampler """basic""" +192 20 evaluator """rankbased""" +192 21 dataset """kinships""" +192 21 model """distmult""" +192 21 loss """marginranking""" +192 21 regularizer """no""" +192 21 optimizer """adam""" +192 21 training_loop """owa""" +192 21 negative_sampler """basic""" +192 21 evaluator """rankbased""" +192 22 dataset """kinships""" +192 22 model """distmult""" +192 22 loss """marginranking""" +192 22 regularizer """no""" +192 22 optimizer """adam""" +192 22 training_loop """owa""" +192 22 negative_sampler """basic""" +192 22 evaluator """rankbased""" +192 23 dataset """kinships""" +192 23 model """distmult""" +192 23 loss """marginranking""" +192 23 regularizer """no""" +192 23 optimizer """adam""" +192 23 training_loop """owa""" +192 23 negative_sampler """basic""" +192 23 evaluator """rankbased""" +192 24 dataset """kinships""" +192 24 model """distmult""" +192 24 loss """marginranking""" +192 24 regularizer """no""" +192 24 optimizer """adam""" +192 24 training_loop """owa""" +192 24 negative_sampler """basic""" +192 24 evaluator """rankbased""" +192 25 dataset """kinships""" +192 25 model """distmult""" +192 25 loss """marginranking""" +192 25 regularizer """no""" +192 25 optimizer """adam""" +192 25 training_loop """owa""" +192 25 negative_sampler """basic""" +192 25 evaluator """rankbased""" +192 26 dataset """kinships""" +192 26 model """distmult""" +192 26 loss """marginranking""" +192 26 regularizer """no""" +192 26 optimizer """adam""" +192 26 training_loop """owa""" +192 26 negative_sampler """basic""" +192 26 evaluator """rankbased""" +192 27 dataset """kinships""" +192 27 model """distmult""" +192 27 loss """marginranking""" +192 27 regularizer """no""" +192 27 optimizer """adam""" +192 27 training_loop """owa""" +192 27 negative_sampler """basic""" +192 27 evaluator """rankbased""" +192 28 dataset """kinships""" +192 28 model """distmult""" +192 28 loss """marginranking""" +192 28 regularizer """no""" +192 28 optimizer """adam""" +192 28 training_loop """owa""" +192 28 negative_sampler """basic""" +192 28 evaluator """rankbased""" +192 29 dataset """kinships""" +192 29 model """distmult""" +192 29 loss """marginranking""" +192 29 regularizer """no""" +192 29 optimizer """adam""" +192 29 training_loop """owa""" +192 29 negative_sampler """basic""" +192 29 evaluator """rankbased""" +192 30 dataset """kinships""" +192 30 model """distmult""" +192 30 loss """marginranking""" +192 30 regularizer """no""" +192 30 optimizer """adam""" +192 30 training_loop """owa""" +192 30 negative_sampler """basic""" +192 30 evaluator """rankbased""" +192 31 dataset """kinships""" +192 31 model """distmult""" +192 31 loss """marginranking""" +192 31 regularizer """no""" +192 31 optimizer """adam""" +192 31 training_loop """owa""" +192 31 negative_sampler """basic""" +192 31 evaluator """rankbased""" +192 32 dataset """kinships""" +192 32 model """distmult""" +192 32 loss """marginranking""" +192 32 regularizer """no""" +192 32 optimizer """adam""" +192 32 training_loop """owa""" +192 32 negative_sampler """basic""" +192 32 evaluator """rankbased""" +192 33 dataset """kinships""" +192 33 model """distmult""" +192 33 loss """marginranking""" +192 33 regularizer """no""" +192 33 optimizer """adam""" +192 33 training_loop """owa""" +192 33 negative_sampler """basic""" +192 33 evaluator """rankbased""" +192 34 dataset """kinships""" +192 34 model """distmult""" +192 34 loss """marginranking""" +192 34 regularizer """no""" +192 34 optimizer """adam""" +192 34 training_loop """owa""" +192 34 negative_sampler """basic""" +192 34 evaluator """rankbased""" +192 35 dataset """kinships""" +192 35 model """distmult""" +192 35 loss """marginranking""" +192 35 regularizer """no""" +192 35 optimizer """adam""" +192 35 training_loop """owa""" +192 35 negative_sampler """basic""" +192 35 evaluator """rankbased""" +192 36 dataset """kinships""" +192 36 model """distmult""" +192 36 loss """marginranking""" +192 36 regularizer """no""" +192 36 optimizer """adam""" +192 36 training_loop """owa""" +192 36 negative_sampler """basic""" +192 36 evaluator """rankbased""" +192 37 dataset """kinships""" +192 37 model """distmult""" +192 37 loss """marginranking""" +192 37 regularizer """no""" +192 37 optimizer """adam""" +192 37 training_loop """owa""" +192 37 negative_sampler """basic""" +192 37 evaluator """rankbased""" +192 38 dataset """kinships""" +192 38 model """distmult""" +192 38 loss """marginranking""" +192 38 regularizer """no""" +192 38 optimizer """adam""" +192 38 training_loop """owa""" +192 38 negative_sampler """basic""" +192 38 evaluator """rankbased""" +192 39 dataset """kinships""" +192 39 model """distmult""" +192 39 loss """marginranking""" +192 39 regularizer """no""" +192 39 optimizer """adam""" +192 39 training_loop """owa""" +192 39 negative_sampler """basic""" +192 39 evaluator """rankbased""" +192 40 dataset """kinships""" +192 40 model """distmult""" +192 40 loss """marginranking""" +192 40 regularizer """no""" +192 40 optimizer """adam""" +192 40 training_loop """owa""" +192 40 negative_sampler """basic""" +192 40 evaluator """rankbased""" +192 41 dataset """kinships""" +192 41 model """distmult""" +192 41 loss """marginranking""" +192 41 regularizer """no""" +192 41 optimizer """adam""" +192 41 training_loop """owa""" +192 41 negative_sampler """basic""" +192 41 evaluator """rankbased""" +192 42 dataset """kinships""" +192 42 model """distmult""" +192 42 loss """marginranking""" +192 42 regularizer """no""" +192 42 optimizer """adam""" +192 42 training_loop """owa""" +192 42 negative_sampler """basic""" +192 42 evaluator """rankbased""" +192 43 dataset """kinships""" +192 43 model """distmult""" +192 43 loss """marginranking""" +192 43 regularizer """no""" +192 43 optimizer """adam""" +192 43 training_loop """owa""" +192 43 negative_sampler """basic""" +192 43 evaluator """rankbased""" +192 44 dataset """kinships""" +192 44 model """distmult""" +192 44 loss """marginranking""" +192 44 regularizer """no""" +192 44 optimizer """adam""" +192 44 training_loop """owa""" +192 44 negative_sampler """basic""" +192 44 evaluator """rankbased""" +192 45 dataset """kinships""" +192 45 model """distmult""" +192 45 loss """marginranking""" +192 45 regularizer """no""" +192 45 optimizer """adam""" +192 45 training_loop """owa""" +192 45 negative_sampler """basic""" +192 45 evaluator """rankbased""" +192 46 dataset """kinships""" +192 46 model """distmult""" +192 46 loss """marginranking""" +192 46 regularizer """no""" +192 46 optimizer """adam""" +192 46 training_loop """owa""" +192 46 negative_sampler """basic""" +192 46 evaluator """rankbased""" +192 47 dataset """kinships""" +192 47 model """distmult""" +192 47 loss """marginranking""" +192 47 regularizer """no""" +192 47 optimizer """adam""" +192 47 training_loop """owa""" +192 47 negative_sampler """basic""" +192 47 evaluator """rankbased""" +192 48 dataset """kinships""" +192 48 model """distmult""" +192 48 loss """marginranking""" +192 48 regularizer """no""" +192 48 optimizer """adam""" +192 48 training_loop """owa""" +192 48 negative_sampler """basic""" +192 48 evaluator """rankbased""" +192 49 dataset """kinships""" +192 49 model """distmult""" +192 49 loss """marginranking""" +192 49 regularizer """no""" +192 49 optimizer """adam""" +192 49 training_loop """owa""" +192 49 negative_sampler """basic""" +192 49 evaluator """rankbased""" +192 50 dataset """kinships""" +192 50 model """distmult""" +192 50 loss """marginranking""" +192 50 regularizer """no""" +192 50 optimizer """adam""" +192 50 training_loop """owa""" +192 50 negative_sampler """basic""" +192 50 evaluator """rankbased""" +192 51 dataset """kinships""" +192 51 model """distmult""" +192 51 loss """marginranking""" +192 51 regularizer """no""" +192 51 optimizer """adam""" +192 51 training_loop """owa""" +192 51 negative_sampler """basic""" +192 51 evaluator """rankbased""" +192 52 dataset """kinships""" +192 52 model """distmult""" +192 52 loss """marginranking""" +192 52 regularizer """no""" +192 52 optimizer """adam""" +192 52 training_loop """owa""" +192 52 negative_sampler """basic""" +192 52 evaluator """rankbased""" +192 53 dataset """kinships""" +192 53 model """distmult""" +192 53 loss """marginranking""" +192 53 regularizer """no""" +192 53 optimizer """adam""" +192 53 training_loop """owa""" +192 53 negative_sampler """basic""" +192 53 evaluator """rankbased""" +192 54 dataset """kinships""" +192 54 model """distmult""" +192 54 loss """marginranking""" +192 54 regularizer """no""" +192 54 optimizer """adam""" +192 54 training_loop """owa""" +192 54 negative_sampler """basic""" +192 54 evaluator """rankbased""" +192 55 dataset """kinships""" +192 55 model """distmult""" +192 55 loss """marginranking""" +192 55 regularizer """no""" +192 55 optimizer """adam""" +192 55 training_loop """owa""" +192 55 negative_sampler """basic""" +192 55 evaluator """rankbased""" +192 56 dataset """kinships""" +192 56 model """distmult""" +192 56 loss """marginranking""" +192 56 regularizer """no""" +192 56 optimizer """adam""" +192 56 training_loop """owa""" +192 56 negative_sampler """basic""" +192 56 evaluator """rankbased""" +192 57 dataset """kinships""" +192 57 model """distmult""" +192 57 loss """marginranking""" +192 57 regularizer """no""" +192 57 optimizer """adam""" +192 57 training_loop """owa""" +192 57 negative_sampler """basic""" +192 57 evaluator """rankbased""" +192 58 dataset """kinships""" +192 58 model """distmult""" +192 58 loss """marginranking""" +192 58 regularizer """no""" +192 58 optimizer """adam""" +192 58 training_loop """owa""" +192 58 negative_sampler """basic""" +192 58 evaluator """rankbased""" +192 59 dataset """kinships""" +192 59 model """distmult""" +192 59 loss """marginranking""" +192 59 regularizer """no""" +192 59 optimizer """adam""" +192 59 training_loop """owa""" +192 59 negative_sampler """basic""" +192 59 evaluator """rankbased""" +192 60 dataset """kinships""" +192 60 model """distmult""" +192 60 loss """marginranking""" +192 60 regularizer """no""" +192 60 optimizer """adam""" +192 60 training_loop """owa""" +192 60 negative_sampler """basic""" +192 60 evaluator """rankbased""" +192 61 dataset """kinships""" +192 61 model """distmult""" +192 61 loss """marginranking""" +192 61 regularizer """no""" +192 61 optimizer """adam""" +192 61 training_loop """owa""" +192 61 negative_sampler """basic""" +192 61 evaluator """rankbased""" +192 62 dataset """kinships""" +192 62 model """distmult""" +192 62 loss """marginranking""" +192 62 regularizer """no""" +192 62 optimizer """adam""" +192 62 training_loop """owa""" +192 62 negative_sampler """basic""" +192 62 evaluator """rankbased""" +192 63 dataset """kinships""" +192 63 model """distmult""" +192 63 loss """marginranking""" +192 63 regularizer """no""" +192 63 optimizer """adam""" +192 63 training_loop """owa""" +192 63 negative_sampler """basic""" +192 63 evaluator """rankbased""" +192 64 dataset """kinships""" +192 64 model """distmult""" +192 64 loss """marginranking""" +192 64 regularizer """no""" +192 64 optimizer """adam""" +192 64 training_loop """owa""" +192 64 negative_sampler """basic""" +192 64 evaluator """rankbased""" +192 65 dataset """kinships""" +192 65 model """distmult""" +192 65 loss """marginranking""" +192 65 regularizer """no""" +192 65 optimizer """adam""" +192 65 training_loop """owa""" +192 65 negative_sampler """basic""" +192 65 evaluator """rankbased""" +192 66 dataset """kinships""" +192 66 model """distmult""" +192 66 loss """marginranking""" +192 66 regularizer """no""" +192 66 optimizer """adam""" +192 66 training_loop """owa""" +192 66 negative_sampler """basic""" +192 66 evaluator """rankbased""" +192 67 dataset """kinships""" +192 67 model """distmult""" +192 67 loss """marginranking""" +192 67 regularizer """no""" +192 67 optimizer """adam""" +192 67 training_loop """owa""" +192 67 negative_sampler """basic""" +192 67 evaluator """rankbased""" +192 68 dataset """kinships""" +192 68 model """distmult""" +192 68 loss """marginranking""" +192 68 regularizer """no""" +192 68 optimizer """adam""" +192 68 training_loop """owa""" +192 68 negative_sampler """basic""" +192 68 evaluator """rankbased""" +192 69 dataset """kinships""" +192 69 model """distmult""" +192 69 loss """marginranking""" +192 69 regularizer """no""" +192 69 optimizer """adam""" +192 69 training_loop """owa""" +192 69 negative_sampler """basic""" +192 69 evaluator """rankbased""" +192 70 dataset """kinships""" +192 70 model """distmult""" +192 70 loss """marginranking""" +192 70 regularizer """no""" +192 70 optimizer """adam""" +192 70 training_loop """owa""" +192 70 negative_sampler """basic""" +192 70 evaluator """rankbased""" +192 71 dataset """kinships""" +192 71 model """distmult""" +192 71 loss """marginranking""" +192 71 regularizer """no""" +192 71 optimizer """adam""" +192 71 training_loop """owa""" +192 71 negative_sampler """basic""" +192 71 evaluator """rankbased""" +192 72 dataset """kinships""" +192 72 model """distmult""" +192 72 loss """marginranking""" +192 72 regularizer """no""" +192 72 optimizer """adam""" +192 72 training_loop """owa""" +192 72 negative_sampler """basic""" +192 72 evaluator """rankbased""" +192 73 dataset """kinships""" +192 73 model """distmult""" +192 73 loss """marginranking""" +192 73 regularizer """no""" +192 73 optimizer """adam""" +192 73 training_loop """owa""" +192 73 negative_sampler """basic""" +192 73 evaluator """rankbased""" +192 74 dataset """kinships""" +192 74 model """distmult""" +192 74 loss """marginranking""" +192 74 regularizer """no""" +192 74 optimizer """adam""" +192 74 training_loop """owa""" +192 74 negative_sampler """basic""" +192 74 evaluator """rankbased""" +192 75 dataset """kinships""" +192 75 model """distmult""" +192 75 loss """marginranking""" +192 75 regularizer """no""" +192 75 optimizer """adam""" +192 75 training_loop """owa""" +192 75 negative_sampler """basic""" +192 75 evaluator """rankbased""" +192 76 dataset """kinships""" +192 76 model """distmult""" +192 76 loss """marginranking""" +192 76 regularizer """no""" +192 76 optimizer """adam""" +192 76 training_loop """owa""" +192 76 negative_sampler """basic""" +192 76 evaluator """rankbased""" +192 77 dataset """kinships""" +192 77 model """distmult""" +192 77 loss """marginranking""" +192 77 regularizer """no""" +192 77 optimizer """adam""" +192 77 training_loop """owa""" +192 77 negative_sampler """basic""" +192 77 evaluator """rankbased""" +192 78 dataset """kinships""" +192 78 model """distmult""" +192 78 loss """marginranking""" +192 78 regularizer """no""" +192 78 optimizer """adam""" +192 78 training_loop """owa""" +192 78 negative_sampler """basic""" +192 78 evaluator """rankbased""" +192 79 dataset """kinships""" +192 79 model """distmult""" +192 79 loss """marginranking""" +192 79 regularizer """no""" +192 79 optimizer """adam""" +192 79 training_loop """owa""" +192 79 negative_sampler """basic""" +192 79 evaluator """rankbased""" +192 80 dataset """kinships""" +192 80 model """distmult""" +192 80 loss """marginranking""" +192 80 regularizer """no""" +192 80 optimizer """adam""" +192 80 training_loop """owa""" +192 80 negative_sampler """basic""" +192 80 evaluator """rankbased""" +192 81 dataset """kinships""" +192 81 model """distmult""" +192 81 loss """marginranking""" +192 81 regularizer """no""" +192 81 optimizer """adam""" +192 81 training_loop """owa""" +192 81 negative_sampler """basic""" +192 81 evaluator """rankbased""" +192 82 dataset """kinships""" +192 82 model """distmult""" +192 82 loss """marginranking""" +192 82 regularizer """no""" +192 82 optimizer """adam""" +192 82 training_loop """owa""" +192 82 negative_sampler """basic""" +192 82 evaluator """rankbased""" +192 83 dataset """kinships""" +192 83 model """distmult""" +192 83 loss """marginranking""" +192 83 regularizer """no""" +192 83 optimizer """adam""" +192 83 training_loop """owa""" +192 83 negative_sampler """basic""" +192 83 evaluator """rankbased""" +192 84 dataset """kinships""" +192 84 model """distmult""" +192 84 loss """marginranking""" +192 84 regularizer """no""" +192 84 optimizer """adam""" +192 84 training_loop """owa""" +192 84 negative_sampler """basic""" +192 84 evaluator """rankbased""" +192 85 dataset """kinships""" +192 85 model """distmult""" +192 85 loss """marginranking""" +192 85 regularizer """no""" +192 85 optimizer """adam""" +192 85 training_loop """owa""" +192 85 negative_sampler """basic""" +192 85 evaluator """rankbased""" +192 86 dataset """kinships""" +192 86 model """distmult""" +192 86 loss """marginranking""" +192 86 regularizer """no""" +192 86 optimizer """adam""" +192 86 training_loop """owa""" +192 86 negative_sampler """basic""" +192 86 evaluator """rankbased""" +192 87 dataset """kinships""" +192 87 model """distmult""" +192 87 loss """marginranking""" +192 87 regularizer """no""" +192 87 optimizer """adam""" +192 87 training_loop """owa""" +192 87 negative_sampler """basic""" +192 87 evaluator """rankbased""" +192 88 dataset """kinships""" +192 88 model """distmult""" +192 88 loss """marginranking""" +192 88 regularizer """no""" +192 88 optimizer """adam""" +192 88 training_loop """owa""" +192 88 negative_sampler """basic""" +192 88 evaluator """rankbased""" +192 89 dataset """kinships""" +192 89 model """distmult""" +192 89 loss """marginranking""" +192 89 regularizer """no""" +192 89 optimizer """adam""" +192 89 training_loop """owa""" +192 89 negative_sampler """basic""" +192 89 evaluator """rankbased""" +192 90 dataset """kinships""" +192 90 model """distmult""" +192 90 loss """marginranking""" +192 90 regularizer """no""" +192 90 optimizer """adam""" +192 90 training_loop """owa""" +192 90 negative_sampler """basic""" +192 90 evaluator """rankbased""" +192 91 dataset """kinships""" +192 91 model """distmult""" +192 91 loss """marginranking""" +192 91 regularizer """no""" +192 91 optimizer """adam""" +192 91 training_loop """owa""" +192 91 negative_sampler """basic""" +192 91 evaluator """rankbased""" +192 92 dataset """kinships""" +192 92 model """distmult""" +192 92 loss """marginranking""" +192 92 regularizer """no""" +192 92 optimizer """adam""" +192 92 training_loop """owa""" +192 92 negative_sampler """basic""" +192 92 evaluator """rankbased""" +192 93 dataset """kinships""" +192 93 model """distmult""" +192 93 loss """marginranking""" +192 93 regularizer """no""" +192 93 optimizer """adam""" +192 93 training_loop """owa""" +192 93 negative_sampler """basic""" +192 93 evaluator """rankbased""" +192 94 dataset """kinships""" +192 94 model """distmult""" +192 94 loss """marginranking""" +192 94 regularizer """no""" +192 94 optimizer """adam""" +192 94 training_loop """owa""" +192 94 negative_sampler """basic""" +192 94 evaluator """rankbased""" +192 95 dataset """kinships""" +192 95 model """distmult""" +192 95 loss """marginranking""" +192 95 regularizer """no""" +192 95 optimizer """adam""" +192 95 training_loop """owa""" +192 95 negative_sampler """basic""" +192 95 evaluator """rankbased""" +192 96 dataset """kinships""" +192 96 model """distmult""" +192 96 loss """marginranking""" +192 96 regularizer """no""" +192 96 optimizer """adam""" +192 96 training_loop """owa""" +192 96 negative_sampler """basic""" +192 96 evaluator """rankbased""" +192 97 dataset """kinships""" +192 97 model """distmult""" +192 97 loss """marginranking""" +192 97 regularizer """no""" +192 97 optimizer """adam""" +192 97 training_loop """owa""" +192 97 negative_sampler """basic""" +192 97 evaluator """rankbased""" +192 98 dataset """kinships""" +192 98 model """distmult""" +192 98 loss """marginranking""" +192 98 regularizer """no""" +192 98 optimizer """adam""" +192 98 training_loop """owa""" +192 98 negative_sampler """basic""" +192 98 evaluator """rankbased""" +192 99 dataset """kinships""" +192 99 model """distmult""" +192 99 loss """marginranking""" +192 99 regularizer """no""" +192 99 optimizer """adam""" +192 99 training_loop """owa""" +192 99 negative_sampler """basic""" +192 99 evaluator """rankbased""" +192 100 dataset """kinships""" +192 100 model """distmult""" +192 100 loss """marginranking""" +192 100 regularizer """no""" +192 100 optimizer """adam""" +192 100 training_loop """owa""" +192 100 negative_sampler """basic""" +192 100 evaluator """rankbased""" +193 1 model.embedding_dim 1.0 +193 1 loss.margin 2.9431446585439267 +193 1 optimizer.lr 0.005198723455347409 +193 1 negative_sampler.num_negs_per_pos 92.0 +193 1 training.batch_size 2.0 +193 2 model.embedding_dim 1.0 +193 2 loss.margin 3.9762390988103733 +193 2 optimizer.lr 0.017909353781388995 +193 2 negative_sampler.num_negs_per_pos 90.0 +193 2 training.batch_size 2.0 +193 3 model.embedding_dim 0.0 +193 3 loss.margin 0.5386255848979946 +193 3 optimizer.lr 0.00286198097326308 +193 3 negative_sampler.num_negs_per_pos 52.0 +193 3 training.batch_size 0.0 +193 4 model.embedding_dim 0.0 +193 4 loss.margin 6.767402109272595 +193 4 optimizer.lr 0.0029120230854866387 +193 4 negative_sampler.num_negs_per_pos 54.0 +193 4 training.batch_size 1.0 +193 5 model.embedding_dim 1.0 +193 5 loss.margin 9.509312434614957 +193 5 optimizer.lr 0.0012227708879821093 +193 5 negative_sampler.num_negs_per_pos 8.0 +193 5 training.batch_size 0.0 +193 6 model.embedding_dim 1.0 +193 6 loss.margin 7.30862890622238 +193 6 optimizer.lr 0.0013021865673216427 +193 6 negative_sampler.num_negs_per_pos 26.0 +193 6 training.batch_size 2.0 +193 7 model.embedding_dim 1.0 +193 7 loss.margin 8.132251490340915 +193 7 optimizer.lr 0.02625109568928685 +193 7 negative_sampler.num_negs_per_pos 65.0 +193 7 training.batch_size 1.0 +193 8 model.embedding_dim 1.0 +193 8 loss.margin 8.444822762769295 +193 8 optimizer.lr 0.005030869281986907 +193 8 negative_sampler.num_negs_per_pos 64.0 +193 8 training.batch_size 2.0 +193 9 model.embedding_dim 0.0 +193 9 loss.margin 5.51019962950544 +193 9 optimizer.lr 0.022663899125365056 +193 9 negative_sampler.num_negs_per_pos 65.0 +193 9 training.batch_size 1.0 +193 10 model.embedding_dim 1.0 +193 10 loss.margin 9.698219675191776 +193 10 optimizer.lr 0.01164583463734074 +193 10 negative_sampler.num_negs_per_pos 78.0 +193 10 training.batch_size 2.0 +193 11 model.embedding_dim 1.0 +193 11 loss.margin 6.482236668197073 +193 11 optimizer.lr 0.010362803433191037 +193 11 negative_sampler.num_negs_per_pos 18.0 +193 11 training.batch_size 1.0 +193 12 model.embedding_dim 2.0 +193 12 loss.margin 1.8627653556711912 +193 12 optimizer.lr 0.0014671353860796516 +193 12 negative_sampler.num_negs_per_pos 67.0 +193 12 training.batch_size 2.0 +193 13 model.embedding_dim 0.0 +193 13 loss.margin 4.501239681359961 +193 13 optimizer.lr 0.008483070425732235 +193 13 negative_sampler.num_negs_per_pos 19.0 +193 13 training.batch_size 2.0 +193 14 model.embedding_dim 1.0 +193 14 loss.margin 8.52844761537043 +193 14 optimizer.lr 0.003847588225799917 +193 14 negative_sampler.num_negs_per_pos 41.0 +193 14 training.batch_size 2.0 +193 15 model.embedding_dim 2.0 +193 15 loss.margin 6.371270834432211 +193 15 optimizer.lr 0.0010287820770906678 +193 15 negative_sampler.num_negs_per_pos 66.0 +193 15 training.batch_size 2.0 +193 16 model.embedding_dim 0.0 +193 16 loss.margin 6.278411770229282 +193 16 optimizer.lr 0.06333699567668327 +193 16 negative_sampler.num_negs_per_pos 0.0 +193 16 training.batch_size 1.0 +193 17 model.embedding_dim 1.0 +193 17 loss.margin 2.7708470074692695 +193 17 optimizer.lr 0.039638033509539865 +193 17 negative_sampler.num_negs_per_pos 33.0 +193 17 training.batch_size 0.0 +193 18 model.embedding_dim 2.0 +193 18 loss.margin 8.046099567763747 +193 18 optimizer.lr 0.007437569650749489 +193 18 negative_sampler.num_negs_per_pos 60.0 +193 18 training.batch_size 1.0 +193 19 model.embedding_dim 2.0 +193 19 loss.margin 5.929729471328365 +193 19 optimizer.lr 0.018760073525931474 +193 19 negative_sampler.num_negs_per_pos 14.0 +193 19 training.batch_size 0.0 +193 20 model.embedding_dim 0.0 +193 20 loss.margin 7.820117137765344 +193 20 optimizer.lr 0.06451277506741232 +193 20 negative_sampler.num_negs_per_pos 22.0 +193 20 training.batch_size 1.0 +193 21 model.embedding_dim 1.0 +193 21 loss.margin 2.7339723905034257 +193 21 optimizer.lr 0.001869523324887952 +193 21 negative_sampler.num_negs_per_pos 83.0 +193 21 training.batch_size 1.0 +193 22 model.embedding_dim 0.0 +193 22 loss.margin 1.5068846836147591 +193 22 optimizer.lr 0.08549505872056427 +193 22 negative_sampler.num_negs_per_pos 84.0 +193 22 training.batch_size 2.0 +193 23 model.embedding_dim 1.0 +193 23 loss.margin 8.031502815130933 +193 23 optimizer.lr 0.00790934801338396 +193 23 negative_sampler.num_negs_per_pos 11.0 +193 23 training.batch_size 1.0 +193 24 model.embedding_dim 2.0 +193 24 loss.margin 3.451862808032372 +193 24 optimizer.lr 0.088937584973031 +193 24 negative_sampler.num_negs_per_pos 34.0 +193 24 training.batch_size 0.0 +193 25 model.embedding_dim 0.0 +193 25 loss.margin 2.0368607710157263 +193 25 optimizer.lr 0.0010379125784242295 +193 25 negative_sampler.num_negs_per_pos 7.0 +193 25 training.batch_size 2.0 +193 26 model.embedding_dim 2.0 +193 26 loss.margin 5.433114583464842 +193 26 optimizer.lr 0.018099092358822837 +193 26 negative_sampler.num_negs_per_pos 92.0 +193 26 training.batch_size 0.0 +193 27 model.embedding_dim 0.0 +193 27 loss.margin 4.414456691256229 +193 27 optimizer.lr 0.0033917992144640936 +193 27 negative_sampler.num_negs_per_pos 23.0 +193 27 training.batch_size 0.0 +193 28 model.embedding_dim 1.0 +193 28 loss.margin 7.230232226181065 +193 28 optimizer.lr 0.0052969150675772686 +193 28 negative_sampler.num_negs_per_pos 41.0 +193 28 training.batch_size 1.0 +193 29 model.embedding_dim 0.0 +193 29 loss.margin 5.3925026120045505 +193 29 optimizer.lr 0.0023873759478903523 +193 29 negative_sampler.num_negs_per_pos 46.0 +193 29 training.batch_size 2.0 +193 30 model.embedding_dim 1.0 +193 30 loss.margin 1.3021993579417 +193 30 optimizer.lr 0.007368867842101278 +193 30 negative_sampler.num_negs_per_pos 7.0 +193 30 training.batch_size 2.0 +193 31 model.embedding_dim 1.0 +193 31 loss.margin 5.207752370986562 +193 31 optimizer.lr 0.01370503415988387 +193 31 negative_sampler.num_negs_per_pos 82.0 +193 31 training.batch_size 1.0 +193 32 model.embedding_dim 2.0 +193 32 loss.margin 7.658152131646805 +193 32 optimizer.lr 0.0017669073242022554 +193 32 negative_sampler.num_negs_per_pos 42.0 +193 32 training.batch_size 1.0 +193 33 model.embedding_dim 0.0 +193 33 loss.margin 2.7273862137871197 +193 33 optimizer.lr 0.003397982162748639 +193 33 negative_sampler.num_negs_per_pos 28.0 +193 33 training.batch_size 1.0 +193 34 model.embedding_dim 0.0 +193 34 loss.margin 6.495900290604214 +193 34 optimizer.lr 0.029272546439320174 +193 34 negative_sampler.num_negs_per_pos 47.0 +193 34 training.batch_size 1.0 +193 35 model.embedding_dim 2.0 +193 35 loss.margin 4.330867238170669 +193 35 optimizer.lr 0.005404405080641276 +193 35 negative_sampler.num_negs_per_pos 77.0 +193 35 training.batch_size 1.0 +193 36 model.embedding_dim 2.0 +193 36 loss.margin 6.664525180273601 +193 36 optimizer.lr 0.004367776015905363 +193 36 negative_sampler.num_negs_per_pos 46.0 +193 36 training.batch_size 0.0 +193 37 model.embedding_dim 1.0 +193 37 loss.margin 7.442021771510497 +193 37 optimizer.lr 0.0076878246555046785 +193 37 negative_sampler.num_negs_per_pos 62.0 +193 37 training.batch_size 0.0 +193 38 model.embedding_dim 0.0 +193 38 loss.margin 0.9656892334767484 +193 38 optimizer.lr 0.029572319314701757 +193 38 negative_sampler.num_negs_per_pos 38.0 +193 38 training.batch_size 2.0 +193 39 model.embedding_dim 1.0 +193 39 loss.margin 9.518533278599776 +193 39 optimizer.lr 0.055728175136845304 +193 39 negative_sampler.num_negs_per_pos 61.0 +193 39 training.batch_size 2.0 +193 40 model.embedding_dim 0.0 +193 40 loss.margin 4.566964697194779 +193 40 optimizer.lr 0.0025091249663215204 +193 40 negative_sampler.num_negs_per_pos 26.0 +193 40 training.batch_size 1.0 +193 41 model.embedding_dim 0.0 +193 41 loss.margin 1.4985464572741445 +193 41 optimizer.lr 0.0027499439047016065 +193 41 negative_sampler.num_negs_per_pos 46.0 +193 41 training.batch_size 2.0 +193 42 model.embedding_dim 1.0 +193 42 loss.margin 8.749685307165782 +193 42 optimizer.lr 0.01074594746868279 +193 42 negative_sampler.num_negs_per_pos 48.0 +193 42 training.batch_size 2.0 +193 43 model.embedding_dim 0.0 +193 43 loss.margin 3.424430771369449 +193 43 optimizer.lr 0.0015883193117178427 +193 43 negative_sampler.num_negs_per_pos 76.0 +193 43 training.batch_size 0.0 +193 44 model.embedding_dim 0.0 +193 44 loss.margin 4.939367974585243 +193 44 optimizer.lr 0.0031063936194544142 +193 44 negative_sampler.num_negs_per_pos 94.0 +193 44 training.batch_size 1.0 +193 45 model.embedding_dim 2.0 +193 45 loss.margin 1.5725295819411682 +193 45 optimizer.lr 0.003754832656881245 +193 45 negative_sampler.num_negs_per_pos 64.0 +193 45 training.batch_size 2.0 +193 46 model.embedding_dim 2.0 +193 46 loss.margin 6.083284916518474 +193 46 optimizer.lr 0.0012821564397303194 +193 46 negative_sampler.num_negs_per_pos 52.0 +193 46 training.batch_size 2.0 +193 47 model.embedding_dim 2.0 +193 47 loss.margin 2.4828177636189315 +193 47 optimizer.lr 0.024127385336084283 +193 47 negative_sampler.num_negs_per_pos 62.0 +193 47 training.batch_size 1.0 +193 48 model.embedding_dim 1.0 +193 48 loss.margin 0.6827329938663098 +193 48 optimizer.lr 0.012428761871813012 +193 48 negative_sampler.num_negs_per_pos 52.0 +193 48 training.batch_size 0.0 +193 49 model.embedding_dim 0.0 +193 49 loss.margin 8.0290303304693 +193 49 optimizer.lr 0.006162180896895272 +193 49 negative_sampler.num_negs_per_pos 23.0 +193 49 training.batch_size 0.0 +193 50 model.embedding_dim 2.0 +193 50 loss.margin 3.6289534787557214 +193 50 optimizer.lr 0.010478693023926705 +193 50 negative_sampler.num_negs_per_pos 39.0 +193 50 training.batch_size 2.0 +193 51 model.embedding_dim 1.0 +193 51 loss.margin 3.9636886589020013 +193 51 optimizer.lr 0.04712470073597816 +193 51 negative_sampler.num_negs_per_pos 26.0 +193 51 training.batch_size 0.0 +193 52 model.embedding_dim 1.0 +193 52 loss.margin 2.9013937908833753 +193 52 optimizer.lr 0.03900083326544368 +193 52 negative_sampler.num_negs_per_pos 85.0 +193 52 training.batch_size 0.0 +193 53 model.embedding_dim 0.0 +193 53 loss.margin 9.305451437004963 +193 53 optimizer.lr 0.010967217812744531 +193 53 negative_sampler.num_negs_per_pos 10.0 +193 53 training.batch_size 1.0 +193 54 model.embedding_dim 2.0 +193 54 loss.margin 7.621506878695802 +193 54 optimizer.lr 0.00408979168656585 +193 54 negative_sampler.num_negs_per_pos 80.0 +193 54 training.batch_size 1.0 +193 55 model.embedding_dim 1.0 +193 55 loss.margin 7.794945186221883 +193 55 optimizer.lr 0.0011529853876041099 +193 55 negative_sampler.num_negs_per_pos 94.0 +193 55 training.batch_size 2.0 +193 56 model.embedding_dim 2.0 +193 56 loss.margin 8.88066144782323 +193 56 optimizer.lr 0.0010319109750042898 +193 56 negative_sampler.num_negs_per_pos 27.0 +193 56 training.batch_size 0.0 +193 57 model.embedding_dim 2.0 +193 57 loss.margin 3.7890018601468842 +193 57 optimizer.lr 0.005901649342667914 +193 57 negative_sampler.num_negs_per_pos 93.0 +193 57 training.batch_size 2.0 +193 58 model.embedding_dim 0.0 +193 58 loss.margin 4.049477891280925 +193 58 optimizer.lr 0.06799794747992993 +193 58 negative_sampler.num_negs_per_pos 59.0 +193 58 training.batch_size 2.0 +193 59 model.embedding_dim 1.0 +193 59 loss.margin 4.709917637473863 +193 59 optimizer.lr 0.0017233594735108994 +193 59 negative_sampler.num_negs_per_pos 11.0 +193 59 training.batch_size 1.0 +193 60 model.embedding_dim 2.0 +193 60 loss.margin 1.070439256020395 +193 60 optimizer.lr 0.003276565719319771 +193 60 negative_sampler.num_negs_per_pos 13.0 +193 60 training.batch_size 1.0 +193 61 model.embedding_dim 1.0 +193 61 loss.margin 9.373779730140624 +193 61 optimizer.lr 0.018913833369603554 +193 61 negative_sampler.num_negs_per_pos 15.0 +193 61 training.batch_size 0.0 +193 62 model.embedding_dim 2.0 +193 62 loss.margin 2.979985658981953 +193 62 optimizer.lr 0.001040605202937749 +193 62 negative_sampler.num_negs_per_pos 3.0 +193 62 training.batch_size 0.0 +193 63 model.embedding_dim 0.0 +193 63 loss.margin 9.673351439850045 +193 63 optimizer.lr 0.0033074752207449937 +193 63 negative_sampler.num_negs_per_pos 51.0 +193 63 training.batch_size 0.0 +193 64 model.embedding_dim 2.0 +193 64 loss.margin 1.2985988450762327 +193 64 optimizer.lr 0.005247662520743152 +193 64 negative_sampler.num_negs_per_pos 31.0 +193 64 training.batch_size 1.0 +193 65 model.embedding_dim 0.0 +193 65 loss.margin 1.2784405117623536 +193 65 optimizer.lr 0.00911417910074773 +193 65 negative_sampler.num_negs_per_pos 63.0 +193 65 training.batch_size 1.0 +193 66 model.embedding_dim 1.0 +193 66 loss.margin 7.300291048640649 +193 66 optimizer.lr 0.0015208762107854323 +193 66 negative_sampler.num_negs_per_pos 89.0 +193 66 training.batch_size 1.0 +193 67 model.embedding_dim 1.0 +193 67 loss.margin 6.670934480128695 +193 67 optimizer.lr 0.00482453432393267 +193 67 negative_sampler.num_negs_per_pos 93.0 +193 67 training.batch_size 1.0 +193 68 model.embedding_dim 1.0 +193 68 loss.margin 7.969677822379586 +193 68 optimizer.lr 0.0016013804906335949 +193 68 negative_sampler.num_negs_per_pos 55.0 +193 68 training.batch_size 1.0 +193 69 model.embedding_dim 2.0 +193 69 loss.margin 3.1709266459261265 +193 69 optimizer.lr 0.018759719857480452 +193 69 negative_sampler.num_negs_per_pos 87.0 +193 69 training.batch_size 1.0 +193 70 model.embedding_dim 0.0 +193 70 loss.margin 0.7887238338470047 +193 70 optimizer.lr 0.07030428609144002 +193 70 negative_sampler.num_negs_per_pos 12.0 +193 70 training.batch_size 0.0 +193 71 model.embedding_dim 1.0 +193 71 loss.margin 0.9713957814987215 +193 71 optimizer.lr 0.014898300997407785 +193 71 negative_sampler.num_negs_per_pos 98.0 +193 71 training.batch_size 1.0 +193 72 model.embedding_dim 0.0 +193 72 loss.margin 1.3686948548852347 +193 72 optimizer.lr 0.0023045873163051404 +193 72 negative_sampler.num_negs_per_pos 78.0 +193 72 training.batch_size 1.0 +193 73 model.embedding_dim 1.0 +193 73 loss.margin 9.176362485677922 +193 73 optimizer.lr 0.0017026810733189691 +193 73 negative_sampler.num_negs_per_pos 84.0 +193 73 training.batch_size 2.0 +193 74 model.embedding_dim 0.0 +193 74 loss.margin 6.782216154215355 +193 74 optimizer.lr 0.024238098369444554 +193 74 negative_sampler.num_negs_per_pos 67.0 +193 74 training.batch_size 2.0 +193 75 model.embedding_dim 1.0 +193 75 loss.margin 3.2293649930284527 +193 75 optimizer.lr 0.009678821628214615 +193 75 negative_sampler.num_negs_per_pos 48.0 +193 75 training.batch_size 0.0 +193 76 model.embedding_dim 0.0 +193 76 loss.margin 5.640483938526934 +193 76 optimizer.lr 0.01784143988677224 +193 76 negative_sampler.num_negs_per_pos 7.0 +193 76 training.batch_size 1.0 +193 77 model.embedding_dim 1.0 +193 77 loss.margin 8.592200877594362 +193 77 optimizer.lr 0.02599678802981765 +193 77 negative_sampler.num_negs_per_pos 77.0 +193 77 training.batch_size 0.0 +193 78 model.embedding_dim 2.0 +193 78 loss.margin 3.85875687290025 +193 78 optimizer.lr 0.03200000719106941 +193 78 negative_sampler.num_negs_per_pos 80.0 +193 78 training.batch_size 2.0 +193 79 model.embedding_dim 2.0 +193 79 loss.margin 6.771554144043817 +193 79 optimizer.lr 0.0036460491676911905 +193 79 negative_sampler.num_negs_per_pos 51.0 +193 79 training.batch_size 2.0 +193 80 model.embedding_dim 2.0 +193 80 loss.margin 6.709232087881308 +193 80 optimizer.lr 0.0020236617077827217 +193 80 negative_sampler.num_negs_per_pos 47.0 +193 80 training.batch_size 1.0 +193 81 model.embedding_dim 1.0 +193 81 loss.margin 2.9093835004670163 +193 81 optimizer.lr 0.002022831529935085 +193 81 negative_sampler.num_negs_per_pos 86.0 +193 81 training.batch_size 2.0 +193 82 model.embedding_dim 0.0 +193 82 loss.margin 3.184918516997487 +193 82 optimizer.lr 0.002487868040520559 +193 82 negative_sampler.num_negs_per_pos 82.0 +193 82 training.batch_size 1.0 +193 83 model.embedding_dim 2.0 +193 83 loss.margin 5.41662866722506 +193 83 optimizer.lr 0.002417859465637073 +193 83 negative_sampler.num_negs_per_pos 87.0 +193 83 training.batch_size 1.0 +193 84 model.embedding_dim 2.0 +193 84 loss.margin 1.2509788228384702 +193 84 optimizer.lr 0.08340310710677291 +193 84 negative_sampler.num_negs_per_pos 61.0 +193 84 training.batch_size 2.0 +193 85 model.embedding_dim 1.0 +193 85 loss.margin 1.1158549647033413 +193 85 optimizer.lr 0.0011499799024056053 +193 85 negative_sampler.num_negs_per_pos 42.0 +193 85 training.batch_size 2.0 +193 86 model.embedding_dim 1.0 +193 86 loss.margin 8.423529741651276 +193 86 optimizer.lr 0.03307177713624888 +193 86 negative_sampler.num_negs_per_pos 55.0 +193 86 training.batch_size 0.0 +193 87 model.embedding_dim 2.0 +193 87 loss.margin 4.188335695567277 +193 87 optimizer.lr 0.0010857814979820526 +193 87 negative_sampler.num_negs_per_pos 3.0 +193 87 training.batch_size 0.0 +193 88 model.embedding_dim 1.0 +193 88 loss.margin 9.09031661263187 +193 88 optimizer.lr 0.012501403942987157 +193 88 negative_sampler.num_negs_per_pos 37.0 +193 88 training.batch_size 1.0 +193 89 model.embedding_dim 1.0 +193 89 loss.margin 3.043218391324314 +193 89 optimizer.lr 0.0059107596725931025 +193 89 negative_sampler.num_negs_per_pos 43.0 +193 89 training.batch_size 1.0 +193 90 model.embedding_dim 1.0 +193 90 loss.margin 1.0566873839169837 +193 90 optimizer.lr 0.0028770864489403923 +193 90 negative_sampler.num_negs_per_pos 4.0 +193 90 training.batch_size 2.0 +193 91 model.embedding_dim 0.0 +193 91 loss.margin 9.474943217165517 +193 91 optimizer.lr 0.0013130930807719588 +193 91 negative_sampler.num_negs_per_pos 69.0 +193 91 training.batch_size 0.0 +193 92 model.embedding_dim 1.0 +193 92 loss.margin 8.819016978407731 +193 92 optimizer.lr 0.01730330767374282 +193 92 negative_sampler.num_negs_per_pos 60.0 +193 92 training.batch_size 1.0 +193 93 model.embedding_dim 2.0 +193 93 loss.margin 3.1883706324239474 +193 93 optimizer.lr 0.015842572580329374 +193 93 negative_sampler.num_negs_per_pos 22.0 +193 93 training.batch_size 1.0 +193 94 model.embedding_dim 1.0 +193 94 loss.margin 7.708634334052096 +193 94 optimizer.lr 0.0048848695742071055 +193 94 negative_sampler.num_negs_per_pos 86.0 +193 94 training.batch_size 0.0 +193 95 model.embedding_dim 1.0 +193 95 loss.margin 9.928278245837948 +193 95 optimizer.lr 0.0036491503600697 +193 95 negative_sampler.num_negs_per_pos 51.0 +193 95 training.batch_size 2.0 +193 96 model.embedding_dim 2.0 +193 96 loss.margin 5.2567025397513785 +193 96 optimizer.lr 0.008235155540934716 +193 96 negative_sampler.num_negs_per_pos 46.0 +193 96 training.batch_size 1.0 +193 97 model.embedding_dim 1.0 +193 97 loss.margin 3.6264234846856365 +193 97 optimizer.lr 0.02600025814563906 +193 97 negative_sampler.num_negs_per_pos 49.0 +193 97 training.batch_size 2.0 +193 98 model.embedding_dim 0.0 +193 98 loss.margin 7.368782937247415 +193 98 optimizer.lr 0.05589112604578949 +193 98 negative_sampler.num_negs_per_pos 17.0 +193 98 training.batch_size 1.0 +193 99 model.embedding_dim 0.0 +193 99 loss.margin 1.0037890190820622 +193 99 optimizer.lr 0.061579459762718926 +193 99 negative_sampler.num_negs_per_pos 98.0 +193 99 training.batch_size 0.0 +193 100 model.embedding_dim 1.0 +193 100 loss.margin 8.115922855025698 +193 100 optimizer.lr 0.01655544029908022 +193 100 negative_sampler.num_negs_per_pos 12.0 +193 100 training.batch_size 0.0 +193 1 dataset """kinships""" +193 1 model """distmult""" +193 1 loss """marginranking""" +193 1 regularizer """no""" +193 1 optimizer """adam""" +193 1 training_loop """owa""" +193 1 negative_sampler """basic""" +193 1 evaluator """rankbased""" +193 2 dataset """kinships""" +193 2 model """distmult""" +193 2 loss """marginranking""" +193 2 regularizer """no""" +193 2 optimizer """adam""" +193 2 training_loop """owa""" +193 2 negative_sampler """basic""" +193 2 evaluator """rankbased""" +193 3 dataset """kinships""" +193 3 model """distmult""" +193 3 loss """marginranking""" +193 3 regularizer """no""" +193 3 optimizer """adam""" +193 3 training_loop """owa""" +193 3 negative_sampler """basic""" +193 3 evaluator """rankbased""" +193 4 dataset """kinships""" +193 4 model """distmult""" +193 4 loss """marginranking""" +193 4 regularizer """no""" +193 4 optimizer """adam""" +193 4 training_loop """owa""" +193 4 negative_sampler """basic""" +193 4 evaluator """rankbased""" +193 5 dataset """kinships""" +193 5 model """distmult""" +193 5 loss """marginranking""" +193 5 regularizer """no""" +193 5 optimizer """adam""" +193 5 training_loop """owa""" +193 5 negative_sampler """basic""" +193 5 evaluator """rankbased""" +193 6 dataset """kinships""" +193 6 model """distmult""" +193 6 loss """marginranking""" +193 6 regularizer """no""" +193 6 optimizer """adam""" +193 6 training_loop """owa""" +193 6 negative_sampler """basic""" +193 6 evaluator """rankbased""" +193 7 dataset """kinships""" +193 7 model """distmult""" +193 7 loss """marginranking""" +193 7 regularizer """no""" +193 7 optimizer """adam""" +193 7 training_loop """owa""" +193 7 negative_sampler """basic""" +193 7 evaluator """rankbased""" +193 8 dataset """kinships""" +193 8 model """distmult""" +193 8 loss """marginranking""" +193 8 regularizer """no""" +193 8 optimizer """adam""" +193 8 training_loop """owa""" +193 8 negative_sampler """basic""" +193 8 evaluator """rankbased""" +193 9 dataset """kinships""" +193 9 model """distmult""" +193 9 loss """marginranking""" +193 9 regularizer """no""" +193 9 optimizer """adam""" +193 9 training_loop """owa""" +193 9 negative_sampler """basic""" +193 9 evaluator """rankbased""" +193 10 dataset """kinships""" +193 10 model """distmult""" +193 10 loss """marginranking""" +193 10 regularizer """no""" +193 10 optimizer """adam""" +193 10 training_loop """owa""" +193 10 negative_sampler """basic""" +193 10 evaluator """rankbased""" +193 11 dataset """kinships""" +193 11 model """distmult""" +193 11 loss """marginranking""" +193 11 regularizer """no""" +193 11 optimizer """adam""" +193 11 training_loop """owa""" +193 11 negative_sampler """basic""" +193 11 evaluator """rankbased""" +193 12 dataset """kinships""" +193 12 model """distmult""" +193 12 loss """marginranking""" +193 12 regularizer """no""" +193 12 optimizer """adam""" +193 12 training_loop """owa""" +193 12 negative_sampler """basic""" +193 12 evaluator """rankbased""" +193 13 dataset """kinships""" +193 13 model """distmult""" +193 13 loss """marginranking""" +193 13 regularizer """no""" +193 13 optimizer """adam""" +193 13 training_loop """owa""" +193 13 negative_sampler """basic""" +193 13 evaluator """rankbased""" +193 14 dataset """kinships""" +193 14 model """distmult""" +193 14 loss """marginranking""" +193 14 regularizer """no""" +193 14 optimizer """adam""" +193 14 training_loop """owa""" +193 14 negative_sampler """basic""" +193 14 evaluator """rankbased""" +193 15 dataset """kinships""" +193 15 model """distmult""" +193 15 loss """marginranking""" +193 15 regularizer """no""" +193 15 optimizer """adam""" +193 15 training_loop """owa""" +193 15 negative_sampler """basic""" +193 15 evaluator """rankbased""" +193 16 dataset """kinships""" +193 16 model """distmult""" +193 16 loss """marginranking""" +193 16 regularizer """no""" +193 16 optimizer """adam""" +193 16 training_loop """owa""" +193 16 negative_sampler """basic""" +193 16 evaluator """rankbased""" +193 17 dataset """kinships""" +193 17 model """distmult""" +193 17 loss """marginranking""" +193 17 regularizer """no""" +193 17 optimizer """adam""" +193 17 training_loop """owa""" +193 17 negative_sampler """basic""" +193 17 evaluator """rankbased""" +193 18 dataset """kinships""" +193 18 model """distmult""" +193 18 loss """marginranking""" +193 18 regularizer """no""" +193 18 optimizer """adam""" +193 18 training_loop """owa""" +193 18 negative_sampler """basic""" +193 18 evaluator """rankbased""" +193 19 dataset """kinships""" +193 19 model """distmult""" +193 19 loss """marginranking""" +193 19 regularizer """no""" +193 19 optimizer """adam""" +193 19 training_loop """owa""" +193 19 negative_sampler """basic""" +193 19 evaluator """rankbased""" +193 20 dataset """kinships""" +193 20 model """distmult""" +193 20 loss """marginranking""" +193 20 regularizer """no""" +193 20 optimizer """adam""" +193 20 training_loop """owa""" +193 20 negative_sampler """basic""" +193 20 evaluator """rankbased""" +193 21 dataset """kinships""" +193 21 model """distmult""" +193 21 loss """marginranking""" +193 21 regularizer """no""" +193 21 optimizer """adam""" +193 21 training_loop """owa""" +193 21 negative_sampler """basic""" +193 21 evaluator """rankbased""" +193 22 dataset """kinships""" +193 22 model """distmult""" +193 22 loss """marginranking""" +193 22 regularizer """no""" +193 22 optimizer """adam""" +193 22 training_loop """owa""" +193 22 negative_sampler """basic""" +193 22 evaluator """rankbased""" +193 23 dataset """kinships""" +193 23 model """distmult""" +193 23 loss """marginranking""" +193 23 regularizer """no""" +193 23 optimizer """adam""" +193 23 training_loop """owa""" +193 23 negative_sampler """basic""" +193 23 evaluator """rankbased""" +193 24 dataset """kinships""" +193 24 model """distmult""" +193 24 loss """marginranking""" +193 24 regularizer """no""" +193 24 optimizer """adam""" +193 24 training_loop """owa""" +193 24 negative_sampler """basic""" +193 24 evaluator """rankbased""" +193 25 dataset """kinships""" +193 25 model """distmult""" +193 25 loss """marginranking""" +193 25 regularizer """no""" +193 25 optimizer """adam""" +193 25 training_loop """owa""" +193 25 negative_sampler """basic""" +193 25 evaluator """rankbased""" +193 26 dataset """kinships""" +193 26 model """distmult""" +193 26 loss """marginranking""" +193 26 regularizer """no""" +193 26 optimizer """adam""" +193 26 training_loop """owa""" +193 26 negative_sampler """basic""" +193 26 evaluator """rankbased""" +193 27 dataset """kinships""" +193 27 model """distmult""" +193 27 loss """marginranking""" +193 27 regularizer """no""" +193 27 optimizer """adam""" +193 27 training_loop """owa""" +193 27 negative_sampler """basic""" +193 27 evaluator """rankbased""" +193 28 dataset """kinships""" +193 28 model """distmult""" +193 28 loss """marginranking""" +193 28 regularizer """no""" +193 28 optimizer """adam""" +193 28 training_loop """owa""" +193 28 negative_sampler """basic""" +193 28 evaluator """rankbased""" +193 29 dataset """kinships""" +193 29 model """distmult""" +193 29 loss """marginranking""" +193 29 regularizer """no""" +193 29 optimizer """adam""" +193 29 training_loop """owa""" +193 29 negative_sampler """basic""" +193 29 evaluator """rankbased""" +193 30 dataset """kinships""" +193 30 model """distmult""" +193 30 loss """marginranking""" +193 30 regularizer """no""" +193 30 optimizer """adam""" +193 30 training_loop """owa""" +193 30 negative_sampler """basic""" +193 30 evaluator """rankbased""" +193 31 dataset """kinships""" +193 31 model """distmult""" +193 31 loss """marginranking""" +193 31 regularizer """no""" +193 31 optimizer """adam""" +193 31 training_loop """owa""" +193 31 negative_sampler """basic""" +193 31 evaluator """rankbased""" +193 32 dataset """kinships""" +193 32 model """distmult""" +193 32 loss """marginranking""" +193 32 regularizer """no""" +193 32 optimizer """adam""" +193 32 training_loop """owa""" +193 32 negative_sampler """basic""" +193 32 evaluator """rankbased""" +193 33 dataset """kinships""" +193 33 model """distmult""" +193 33 loss """marginranking""" +193 33 regularizer """no""" +193 33 optimizer """adam""" +193 33 training_loop """owa""" +193 33 negative_sampler """basic""" +193 33 evaluator """rankbased""" +193 34 dataset """kinships""" +193 34 model """distmult""" +193 34 loss """marginranking""" +193 34 regularizer """no""" +193 34 optimizer """adam""" +193 34 training_loop """owa""" +193 34 negative_sampler """basic""" +193 34 evaluator """rankbased""" +193 35 dataset """kinships""" +193 35 model """distmult""" +193 35 loss """marginranking""" +193 35 regularizer """no""" +193 35 optimizer """adam""" +193 35 training_loop """owa""" +193 35 negative_sampler """basic""" +193 35 evaluator """rankbased""" +193 36 dataset """kinships""" +193 36 model """distmult""" +193 36 loss """marginranking""" +193 36 regularizer """no""" +193 36 optimizer """adam""" +193 36 training_loop """owa""" +193 36 negative_sampler """basic""" +193 36 evaluator """rankbased""" +193 37 dataset """kinships""" +193 37 model """distmult""" +193 37 loss """marginranking""" +193 37 regularizer """no""" +193 37 optimizer """adam""" +193 37 training_loop """owa""" +193 37 negative_sampler """basic""" +193 37 evaluator """rankbased""" +193 38 dataset """kinships""" +193 38 model """distmult""" +193 38 loss """marginranking""" +193 38 regularizer """no""" +193 38 optimizer """adam""" +193 38 training_loop """owa""" +193 38 negative_sampler """basic""" +193 38 evaluator """rankbased""" +193 39 dataset """kinships""" +193 39 model """distmult""" +193 39 loss """marginranking""" +193 39 regularizer """no""" +193 39 optimizer """adam""" +193 39 training_loop """owa""" +193 39 negative_sampler """basic""" +193 39 evaluator """rankbased""" +193 40 dataset """kinships""" +193 40 model """distmult""" +193 40 loss """marginranking""" +193 40 regularizer """no""" +193 40 optimizer """adam""" +193 40 training_loop """owa""" +193 40 negative_sampler """basic""" +193 40 evaluator """rankbased""" +193 41 dataset """kinships""" +193 41 model """distmult""" +193 41 loss """marginranking""" +193 41 regularizer """no""" +193 41 optimizer """adam""" +193 41 training_loop """owa""" +193 41 negative_sampler """basic""" +193 41 evaluator """rankbased""" +193 42 dataset """kinships""" +193 42 model """distmult""" +193 42 loss """marginranking""" +193 42 regularizer """no""" +193 42 optimizer """adam""" +193 42 training_loop """owa""" +193 42 negative_sampler """basic""" +193 42 evaluator """rankbased""" +193 43 dataset """kinships""" +193 43 model """distmult""" +193 43 loss """marginranking""" +193 43 regularizer """no""" +193 43 optimizer """adam""" +193 43 training_loop """owa""" +193 43 negative_sampler """basic""" +193 43 evaluator """rankbased""" +193 44 dataset """kinships""" +193 44 model """distmult""" +193 44 loss """marginranking""" +193 44 regularizer """no""" +193 44 optimizer """adam""" +193 44 training_loop """owa""" +193 44 negative_sampler """basic""" +193 44 evaluator """rankbased""" +193 45 dataset """kinships""" +193 45 model """distmult""" +193 45 loss """marginranking""" +193 45 regularizer """no""" +193 45 optimizer """adam""" +193 45 training_loop """owa""" +193 45 negative_sampler """basic""" +193 45 evaluator """rankbased""" +193 46 dataset """kinships""" +193 46 model """distmult""" +193 46 loss """marginranking""" +193 46 regularizer """no""" +193 46 optimizer """adam""" +193 46 training_loop """owa""" +193 46 negative_sampler """basic""" +193 46 evaluator """rankbased""" +193 47 dataset """kinships""" +193 47 model """distmult""" +193 47 loss """marginranking""" +193 47 regularizer """no""" +193 47 optimizer """adam""" +193 47 training_loop """owa""" +193 47 negative_sampler """basic""" +193 47 evaluator """rankbased""" +193 48 dataset """kinships""" +193 48 model """distmult""" +193 48 loss """marginranking""" +193 48 regularizer """no""" +193 48 optimizer """adam""" +193 48 training_loop """owa""" +193 48 negative_sampler """basic""" +193 48 evaluator """rankbased""" +193 49 dataset """kinships""" +193 49 model """distmult""" +193 49 loss """marginranking""" +193 49 regularizer """no""" +193 49 optimizer """adam""" +193 49 training_loop """owa""" +193 49 negative_sampler """basic""" +193 49 evaluator """rankbased""" +193 50 dataset """kinships""" +193 50 model """distmult""" +193 50 loss """marginranking""" +193 50 regularizer """no""" +193 50 optimizer """adam""" +193 50 training_loop """owa""" +193 50 negative_sampler """basic""" +193 50 evaluator """rankbased""" +193 51 dataset """kinships""" +193 51 model """distmult""" +193 51 loss """marginranking""" +193 51 regularizer """no""" +193 51 optimizer """adam""" +193 51 training_loop """owa""" +193 51 negative_sampler """basic""" +193 51 evaluator """rankbased""" +193 52 dataset """kinships""" +193 52 model """distmult""" +193 52 loss """marginranking""" +193 52 regularizer """no""" +193 52 optimizer """adam""" +193 52 training_loop """owa""" +193 52 negative_sampler """basic""" +193 52 evaluator """rankbased""" +193 53 dataset """kinships""" +193 53 model """distmult""" +193 53 loss """marginranking""" +193 53 regularizer """no""" +193 53 optimizer """adam""" +193 53 training_loop """owa""" +193 53 negative_sampler """basic""" +193 53 evaluator """rankbased""" +193 54 dataset """kinships""" +193 54 model """distmult""" +193 54 loss """marginranking""" +193 54 regularizer """no""" +193 54 optimizer """adam""" +193 54 training_loop """owa""" +193 54 negative_sampler """basic""" +193 54 evaluator """rankbased""" +193 55 dataset """kinships""" +193 55 model """distmult""" +193 55 loss """marginranking""" +193 55 regularizer """no""" +193 55 optimizer """adam""" +193 55 training_loop """owa""" +193 55 negative_sampler """basic""" +193 55 evaluator """rankbased""" +193 56 dataset """kinships""" +193 56 model """distmult""" +193 56 loss """marginranking""" +193 56 regularizer """no""" +193 56 optimizer """adam""" +193 56 training_loop """owa""" +193 56 negative_sampler """basic""" +193 56 evaluator """rankbased""" +193 57 dataset """kinships""" +193 57 model """distmult""" +193 57 loss """marginranking""" +193 57 regularizer """no""" +193 57 optimizer """adam""" +193 57 training_loop """owa""" +193 57 negative_sampler """basic""" +193 57 evaluator """rankbased""" +193 58 dataset """kinships""" +193 58 model """distmult""" +193 58 loss """marginranking""" +193 58 regularizer """no""" +193 58 optimizer """adam""" +193 58 training_loop """owa""" +193 58 negative_sampler """basic""" +193 58 evaluator """rankbased""" +193 59 dataset """kinships""" +193 59 model """distmult""" +193 59 loss """marginranking""" +193 59 regularizer """no""" +193 59 optimizer """adam""" +193 59 training_loop """owa""" +193 59 negative_sampler """basic""" +193 59 evaluator """rankbased""" +193 60 dataset """kinships""" +193 60 model """distmult""" +193 60 loss """marginranking""" +193 60 regularizer """no""" +193 60 optimizer """adam""" +193 60 training_loop """owa""" +193 60 negative_sampler """basic""" +193 60 evaluator """rankbased""" +193 61 dataset """kinships""" +193 61 model """distmult""" +193 61 loss """marginranking""" +193 61 regularizer """no""" +193 61 optimizer """adam""" +193 61 training_loop """owa""" +193 61 negative_sampler """basic""" +193 61 evaluator """rankbased""" +193 62 dataset """kinships""" +193 62 model """distmult""" +193 62 loss """marginranking""" +193 62 regularizer """no""" +193 62 optimizer """adam""" +193 62 training_loop """owa""" +193 62 negative_sampler """basic""" +193 62 evaluator """rankbased""" +193 63 dataset """kinships""" +193 63 model """distmult""" +193 63 loss """marginranking""" +193 63 regularizer """no""" +193 63 optimizer """adam""" +193 63 training_loop """owa""" +193 63 negative_sampler """basic""" +193 63 evaluator """rankbased""" +193 64 dataset """kinships""" +193 64 model """distmult""" +193 64 loss """marginranking""" +193 64 regularizer """no""" +193 64 optimizer """adam""" +193 64 training_loop """owa""" +193 64 negative_sampler """basic""" +193 64 evaluator """rankbased""" +193 65 dataset """kinships""" +193 65 model """distmult""" +193 65 loss """marginranking""" +193 65 regularizer """no""" +193 65 optimizer """adam""" +193 65 training_loop """owa""" +193 65 negative_sampler """basic""" +193 65 evaluator """rankbased""" +193 66 dataset """kinships""" +193 66 model """distmult""" +193 66 loss """marginranking""" +193 66 regularizer """no""" +193 66 optimizer """adam""" +193 66 training_loop """owa""" +193 66 negative_sampler """basic""" +193 66 evaluator """rankbased""" +193 67 dataset """kinships""" +193 67 model """distmult""" +193 67 loss """marginranking""" +193 67 regularizer """no""" +193 67 optimizer """adam""" +193 67 training_loop """owa""" +193 67 negative_sampler """basic""" +193 67 evaluator """rankbased""" +193 68 dataset """kinships""" +193 68 model """distmult""" +193 68 loss """marginranking""" +193 68 regularizer """no""" +193 68 optimizer """adam""" +193 68 training_loop """owa""" +193 68 negative_sampler """basic""" +193 68 evaluator """rankbased""" +193 69 dataset """kinships""" +193 69 model """distmult""" +193 69 loss """marginranking""" +193 69 regularizer """no""" +193 69 optimizer """adam""" +193 69 training_loop """owa""" +193 69 negative_sampler """basic""" +193 69 evaluator """rankbased""" +193 70 dataset """kinships""" +193 70 model """distmult""" +193 70 loss """marginranking""" +193 70 regularizer """no""" +193 70 optimizer """adam""" +193 70 training_loop """owa""" +193 70 negative_sampler """basic""" +193 70 evaluator """rankbased""" +193 71 dataset """kinships""" +193 71 model """distmult""" +193 71 loss """marginranking""" +193 71 regularizer """no""" +193 71 optimizer """adam""" +193 71 training_loop """owa""" +193 71 negative_sampler """basic""" +193 71 evaluator """rankbased""" +193 72 dataset """kinships""" +193 72 model """distmult""" +193 72 loss """marginranking""" +193 72 regularizer """no""" +193 72 optimizer """adam""" +193 72 training_loop """owa""" +193 72 negative_sampler """basic""" +193 72 evaluator """rankbased""" +193 73 dataset """kinships""" +193 73 model """distmult""" +193 73 loss """marginranking""" +193 73 regularizer """no""" +193 73 optimizer """adam""" +193 73 training_loop """owa""" +193 73 negative_sampler """basic""" +193 73 evaluator """rankbased""" +193 74 dataset """kinships""" +193 74 model """distmult""" +193 74 loss """marginranking""" +193 74 regularizer """no""" +193 74 optimizer """adam""" +193 74 training_loop """owa""" +193 74 negative_sampler """basic""" +193 74 evaluator """rankbased""" +193 75 dataset """kinships""" +193 75 model """distmult""" +193 75 loss """marginranking""" +193 75 regularizer """no""" +193 75 optimizer """adam""" +193 75 training_loop """owa""" +193 75 negative_sampler """basic""" +193 75 evaluator """rankbased""" +193 76 dataset """kinships""" +193 76 model """distmult""" +193 76 loss """marginranking""" +193 76 regularizer """no""" +193 76 optimizer """adam""" +193 76 training_loop """owa""" +193 76 negative_sampler """basic""" +193 76 evaluator """rankbased""" +193 77 dataset """kinships""" +193 77 model """distmult""" +193 77 loss """marginranking""" +193 77 regularizer """no""" +193 77 optimizer """adam""" +193 77 training_loop """owa""" +193 77 negative_sampler """basic""" +193 77 evaluator """rankbased""" +193 78 dataset """kinships""" +193 78 model """distmult""" +193 78 loss """marginranking""" +193 78 regularizer """no""" +193 78 optimizer """adam""" +193 78 training_loop """owa""" +193 78 negative_sampler """basic""" +193 78 evaluator """rankbased""" +193 79 dataset """kinships""" +193 79 model """distmult""" +193 79 loss """marginranking""" +193 79 regularizer """no""" +193 79 optimizer """adam""" +193 79 training_loop """owa""" +193 79 negative_sampler """basic""" +193 79 evaluator """rankbased""" +193 80 dataset """kinships""" +193 80 model """distmult""" +193 80 loss """marginranking""" +193 80 regularizer """no""" +193 80 optimizer """adam""" +193 80 training_loop """owa""" +193 80 negative_sampler """basic""" +193 80 evaluator """rankbased""" +193 81 dataset """kinships""" +193 81 model """distmult""" +193 81 loss """marginranking""" +193 81 regularizer """no""" +193 81 optimizer """adam""" +193 81 training_loop """owa""" +193 81 negative_sampler """basic""" +193 81 evaluator """rankbased""" +193 82 dataset """kinships""" +193 82 model """distmult""" +193 82 loss """marginranking""" +193 82 regularizer """no""" +193 82 optimizer """adam""" +193 82 training_loop """owa""" +193 82 negative_sampler """basic""" +193 82 evaluator """rankbased""" +193 83 dataset """kinships""" +193 83 model """distmult""" +193 83 loss """marginranking""" +193 83 regularizer """no""" +193 83 optimizer """adam""" +193 83 training_loop """owa""" +193 83 negative_sampler """basic""" +193 83 evaluator """rankbased""" +193 84 dataset """kinships""" +193 84 model """distmult""" +193 84 loss """marginranking""" +193 84 regularizer """no""" +193 84 optimizer """adam""" +193 84 training_loop """owa""" +193 84 negative_sampler """basic""" +193 84 evaluator """rankbased""" +193 85 dataset """kinships""" +193 85 model """distmult""" +193 85 loss """marginranking""" +193 85 regularizer """no""" +193 85 optimizer """adam""" +193 85 training_loop """owa""" +193 85 negative_sampler """basic""" +193 85 evaluator """rankbased""" +193 86 dataset """kinships""" +193 86 model """distmult""" +193 86 loss """marginranking""" +193 86 regularizer """no""" +193 86 optimizer """adam""" +193 86 training_loop """owa""" +193 86 negative_sampler """basic""" +193 86 evaluator """rankbased""" +193 87 dataset """kinships""" +193 87 model """distmult""" +193 87 loss """marginranking""" +193 87 regularizer """no""" +193 87 optimizer """adam""" +193 87 training_loop """owa""" +193 87 negative_sampler """basic""" +193 87 evaluator """rankbased""" +193 88 dataset """kinships""" +193 88 model """distmult""" +193 88 loss """marginranking""" +193 88 regularizer """no""" +193 88 optimizer """adam""" +193 88 training_loop """owa""" +193 88 negative_sampler """basic""" +193 88 evaluator """rankbased""" +193 89 dataset """kinships""" +193 89 model """distmult""" +193 89 loss """marginranking""" +193 89 regularizer """no""" +193 89 optimizer """adam""" +193 89 training_loop """owa""" +193 89 negative_sampler """basic""" +193 89 evaluator """rankbased""" +193 90 dataset """kinships""" +193 90 model """distmult""" +193 90 loss """marginranking""" +193 90 regularizer """no""" +193 90 optimizer """adam""" +193 90 training_loop """owa""" +193 90 negative_sampler """basic""" +193 90 evaluator """rankbased""" +193 91 dataset """kinships""" +193 91 model """distmult""" +193 91 loss """marginranking""" +193 91 regularizer """no""" +193 91 optimizer """adam""" +193 91 training_loop """owa""" +193 91 negative_sampler """basic""" +193 91 evaluator """rankbased""" +193 92 dataset """kinships""" +193 92 model """distmult""" +193 92 loss """marginranking""" +193 92 regularizer """no""" +193 92 optimizer """adam""" +193 92 training_loop """owa""" +193 92 negative_sampler """basic""" +193 92 evaluator """rankbased""" +193 93 dataset """kinships""" +193 93 model """distmult""" +193 93 loss """marginranking""" +193 93 regularizer """no""" +193 93 optimizer """adam""" +193 93 training_loop """owa""" +193 93 negative_sampler """basic""" +193 93 evaluator """rankbased""" +193 94 dataset """kinships""" +193 94 model """distmult""" +193 94 loss """marginranking""" +193 94 regularizer """no""" +193 94 optimizer """adam""" +193 94 training_loop """owa""" +193 94 negative_sampler """basic""" +193 94 evaluator """rankbased""" +193 95 dataset """kinships""" +193 95 model """distmult""" +193 95 loss """marginranking""" +193 95 regularizer """no""" +193 95 optimizer """adam""" +193 95 training_loop """owa""" +193 95 negative_sampler """basic""" +193 95 evaluator """rankbased""" +193 96 dataset """kinships""" +193 96 model """distmult""" +193 96 loss """marginranking""" +193 96 regularizer """no""" +193 96 optimizer """adam""" +193 96 training_loop """owa""" +193 96 negative_sampler """basic""" +193 96 evaluator """rankbased""" +193 97 dataset """kinships""" +193 97 model """distmult""" +193 97 loss """marginranking""" +193 97 regularizer """no""" +193 97 optimizer """adam""" +193 97 training_loop """owa""" +193 97 negative_sampler """basic""" +193 97 evaluator """rankbased""" +193 98 dataset """kinships""" +193 98 model """distmult""" +193 98 loss """marginranking""" +193 98 regularizer """no""" +193 98 optimizer """adam""" +193 98 training_loop """owa""" +193 98 negative_sampler """basic""" +193 98 evaluator """rankbased""" +193 99 dataset """kinships""" +193 99 model """distmult""" +193 99 loss """marginranking""" +193 99 regularizer """no""" +193 99 optimizer """adam""" +193 99 training_loop """owa""" +193 99 negative_sampler """basic""" +193 99 evaluator """rankbased""" +193 100 dataset """kinships""" +193 100 model """distmult""" +193 100 loss """marginranking""" +193 100 regularizer """no""" +193 100 optimizer """adam""" +193 100 training_loop """owa""" +193 100 negative_sampler """basic""" +193 100 evaluator """rankbased""" +194 1 model.embedding_dim 1.0 +194 1 loss.margin 19.287709105078132 +194 1 loss.adversarial_temperature 0.3118081942654726 +194 1 optimizer.lr 0.00458492849932462 +194 1 negative_sampler.num_negs_per_pos 18.0 +194 1 training.batch_size 0.0 +194 2 model.embedding_dim 2.0 +194 2 loss.margin 4.695937192076465 +194 2 loss.adversarial_temperature 0.3653934750594039 +194 2 optimizer.lr 0.05475501576967993 +194 2 negative_sampler.num_negs_per_pos 50.0 +194 2 training.batch_size 0.0 +194 3 model.embedding_dim 1.0 +194 3 loss.margin 5.554782546228971 +194 3 loss.adversarial_temperature 0.9725332415252407 +194 3 optimizer.lr 0.009806578350932642 +194 3 negative_sampler.num_negs_per_pos 12.0 +194 3 training.batch_size 1.0 +194 4 model.embedding_dim 2.0 +194 4 loss.margin 29.33360884136108 +194 4 loss.adversarial_temperature 0.8869647109340042 +194 4 optimizer.lr 0.019172094069011898 +194 4 negative_sampler.num_negs_per_pos 81.0 +194 4 training.batch_size 1.0 +194 5 model.embedding_dim 1.0 +194 5 loss.margin 20.58214660414398 +194 5 loss.adversarial_temperature 0.9381687833406638 +194 5 optimizer.lr 0.0015232970784428564 +194 5 negative_sampler.num_negs_per_pos 15.0 +194 5 training.batch_size 1.0 +194 6 model.embedding_dim 1.0 +194 6 loss.margin 6.915212132796896 +194 6 loss.adversarial_temperature 0.3287465952899347 +194 6 optimizer.lr 0.021639604772122543 +194 6 negative_sampler.num_negs_per_pos 27.0 +194 6 training.batch_size 0.0 +194 7 model.embedding_dim 2.0 +194 7 loss.margin 20.24168345978495 +194 7 loss.adversarial_temperature 0.1259503409071905 +194 7 optimizer.lr 0.0027102959929106887 +194 7 negative_sampler.num_negs_per_pos 0.0 +194 7 training.batch_size 2.0 +194 8 model.embedding_dim 2.0 +194 8 loss.margin 28.812288956621057 +194 8 loss.adversarial_temperature 0.9122643151569982 +194 8 optimizer.lr 0.003613357237262872 +194 8 negative_sampler.num_negs_per_pos 94.0 +194 8 training.batch_size 1.0 +194 9 model.embedding_dim 2.0 +194 9 loss.margin 25.948488456291173 +194 9 loss.adversarial_temperature 0.19415016882541344 +194 9 optimizer.lr 0.022195921860574598 +194 9 negative_sampler.num_negs_per_pos 1.0 +194 9 training.batch_size 0.0 +194 10 model.embedding_dim 0.0 +194 10 loss.margin 2.3364772050338014 +194 10 loss.adversarial_temperature 0.7850500777212879 +194 10 optimizer.lr 0.018016729486570855 +194 10 negative_sampler.num_negs_per_pos 74.0 +194 10 training.batch_size 0.0 +194 11 model.embedding_dim 1.0 +194 11 loss.margin 27.18155569224571 +194 11 loss.adversarial_temperature 0.8561160316110722 +194 11 optimizer.lr 0.0015937939481800241 +194 11 negative_sampler.num_negs_per_pos 2.0 +194 11 training.batch_size 1.0 +194 12 model.embedding_dim 0.0 +194 12 loss.margin 21.04231038510961 +194 12 loss.adversarial_temperature 0.15728666805484526 +194 12 optimizer.lr 0.007696527270377919 +194 12 negative_sampler.num_negs_per_pos 4.0 +194 12 training.batch_size 1.0 +194 13 model.embedding_dim 1.0 +194 13 loss.margin 5.833171591388759 +194 13 loss.adversarial_temperature 0.20709140307998175 +194 13 optimizer.lr 0.004952678724755569 +194 13 negative_sampler.num_negs_per_pos 56.0 +194 13 training.batch_size 2.0 +194 14 model.embedding_dim 2.0 +194 14 loss.margin 10.037272605892989 +194 14 loss.adversarial_temperature 0.34888091283405165 +194 14 optimizer.lr 0.059600825283034606 +194 14 negative_sampler.num_negs_per_pos 23.0 +194 14 training.batch_size 0.0 +194 15 model.embedding_dim 0.0 +194 15 loss.margin 26.52442984085356 +194 15 loss.adversarial_temperature 0.23319395389478434 +194 15 optimizer.lr 0.09361337468683502 +194 15 negative_sampler.num_negs_per_pos 94.0 +194 15 training.batch_size 2.0 +194 16 model.embedding_dim 2.0 +194 16 loss.margin 4.499698084359039 +194 16 loss.adversarial_temperature 0.44780353343678303 +194 16 optimizer.lr 0.018801796048616193 +194 16 negative_sampler.num_negs_per_pos 35.0 +194 16 training.batch_size 2.0 +194 17 model.embedding_dim 0.0 +194 17 loss.margin 15.706976580897885 +194 17 loss.adversarial_temperature 0.9151715368525263 +194 17 optimizer.lr 0.015131537096109584 +194 17 negative_sampler.num_negs_per_pos 38.0 +194 17 training.batch_size 2.0 +194 18 model.embedding_dim 0.0 +194 18 loss.margin 29.212449492128332 +194 18 loss.adversarial_temperature 0.7153744610149422 +194 18 optimizer.lr 0.0026987401136183403 +194 18 negative_sampler.num_negs_per_pos 97.0 +194 18 training.batch_size 1.0 +194 19 model.embedding_dim 2.0 +194 19 loss.margin 20.020638937915034 +194 19 loss.adversarial_temperature 0.5388861889945932 +194 19 optimizer.lr 0.04188776128345484 +194 19 negative_sampler.num_negs_per_pos 79.0 +194 19 training.batch_size 2.0 +194 20 model.embedding_dim 1.0 +194 20 loss.margin 27.539623386069135 +194 20 loss.adversarial_temperature 0.415334564567054 +194 20 optimizer.lr 0.04688500854324102 +194 20 negative_sampler.num_negs_per_pos 39.0 +194 20 training.batch_size 0.0 +194 21 model.embedding_dim 0.0 +194 21 loss.margin 19.98985823994278 +194 21 loss.adversarial_temperature 0.55773120146424 +194 21 optimizer.lr 0.013108700139714286 +194 21 negative_sampler.num_negs_per_pos 39.0 +194 21 training.batch_size 1.0 +194 22 model.embedding_dim 2.0 +194 22 loss.margin 3.5929117501614485 +194 22 loss.adversarial_temperature 0.15096372983771972 +194 22 optimizer.lr 0.06462222714436401 +194 22 negative_sampler.num_negs_per_pos 27.0 +194 22 training.batch_size 0.0 +194 23 model.embedding_dim 0.0 +194 23 loss.margin 29.933816128864137 +194 23 loss.adversarial_temperature 0.7970039423691294 +194 23 optimizer.lr 0.014788546134992952 +194 23 negative_sampler.num_negs_per_pos 30.0 +194 23 training.batch_size 0.0 +194 24 model.embedding_dim 0.0 +194 24 loss.margin 13.446403408682201 +194 24 loss.adversarial_temperature 0.8185292126279831 +194 24 optimizer.lr 0.01659280188634849 +194 24 negative_sampler.num_negs_per_pos 3.0 +194 24 training.batch_size 1.0 +194 25 model.embedding_dim 2.0 +194 25 loss.margin 5.841735180906114 +194 25 loss.adversarial_temperature 0.3392104822923544 +194 25 optimizer.lr 0.020131354930484735 +194 25 negative_sampler.num_negs_per_pos 47.0 +194 25 training.batch_size 2.0 +194 26 model.embedding_dim 1.0 +194 26 loss.margin 20.240151474378305 +194 26 loss.adversarial_temperature 0.6955686871005412 +194 26 optimizer.lr 0.01729225966981405 +194 26 negative_sampler.num_negs_per_pos 62.0 +194 26 training.batch_size 1.0 +194 27 model.embedding_dim 0.0 +194 27 loss.margin 19.92885013051516 +194 27 loss.adversarial_temperature 0.9880550375471651 +194 27 optimizer.lr 0.018395662779363456 +194 27 negative_sampler.num_negs_per_pos 97.0 +194 27 training.batch_size 2.0 +194 28 model.embedding_dim 0.0 +194 28 loss.margin 29.521082967025194 +194 28 loss.adversarial_temperature 0.33869840149300334 +194 28 optimizer.lr 0.001821995115413832 +194 28 negative_sampler.num_negs_per_pos 39.0 +194 28 training.batch_size 1.0 +194 29 model.embedding_dim 2.0 +194 29 loss.margin 11.010078349403328 +194 29 loss.adversarial_temperature 0.21840869633050042 +194 29 optimizer.lr 0.021104196144275066 +194 29 negative_sampler.num_negs_per_pos 34.0 +194 29 training.batch_size 1.0 +194 30 model.embedding_dim 1.0 +194 30 loss.margin 13.592099141819292 +194 30 loss.adversarial_temperature 0.17254701670174555 +194 30 optimizer.lr 0.028679627951864795 +194 30 negative_sampler.num_negs_per_pos 85.0 +194 30 training.batch_size 0.0 +194 31 model.embedding_dim 1.0 +194 31 loss.margin 17.000588500033956 +194 31 loss.adversarial_temperature 0.31494833536800937 +194 31 optimizer.lr 0.0018980419679647438 +194 31 negative_sampler.num_negs_per_pos 9.0 +194 31 training.batch_size 0.0 +194 32 model.embedding_dim 1.0 +194 32 loss.margin 9.437846130721969 +194 32 loss.adversarial_temperature 0.6505258449473796 +194 32 optimizer.lr 0.0017899264915704277 +194 32 negative_sampler.num_negs_per_pos 94.0 +194 32 training.batch_size 1.0 +194 33 model.embedding_dim 1.0 +194 33 loss.margin 19.90214802499441 +194 33 loss.adversarial_temperature 0.7139573147585891 +194 33 optimizer.lr 0.01074186385365389 +194 33 negative_sampler.num_negs_per_pos 47.0 +194 33 training.batch_size 1.0 +194 34 model.embedding_dim 2.0 +194 34 loss.margin 22.422660789061542 +194 34 loss.adversarial_temperature 0.9361001966176676 +194 34 optimizer.lr 0.03267951027283134 +194 34 negative_sampler.num_negs_per_pos 24.0 +194 34 training.batch_size 1.0 +194 35 model.embedding_dim 2.0 +194 35 loss.margin 10.469110198472903 +194 35 loss.adversarial_temperature 0.3694729623409657 +194 35 optimizer.lr 0.003368627359471031 +194 35 negative_sampler.num_negs_per_pos 49.0 +194 35 training.batch_size 1.0 +194 36 model.embedding_dim 0.0 +194 36 loss.margin 17.558449006858442 +194 36 loss.adversarial_temperature 0.8126937747230225 +194 36 optimizer.lr 0.0022634223278004263 +194 36 negative_sampler.num_negs_per_pos 69.0 +194 36 training.batch_size 0.0 +194 37 model.embedding_dim 2.0 +194 37 loss.margin 6.625165359344043 +194 37 loss.adversarial_temperature 0.4203775204545528 +194 37 optimizer.lr 0.024095701880558748 +194 37 negative_sampler.num_negs_per_pos 97.0 +194 37 training.batch_size 1.0 +194 38 model.embedding_dim 2.0 +194 38 loss.margin 1.4656196823044123 +194 38 loss.adversarial_temperature 0.8938705153832002 +194 38 optimizer.lr 0.005969274450081213 +194 38 negative_sampler.num_negs_per_pos 65.0 +194 38 training.batch_size 1.0 +194 39 model.embedding_dim 0.0 +194 39 loss.margin 7.049997749209607 +194 39 loss.adversarial_temperature 0.37680743625247537 +194 39 optimizer.lr 0.06278760048301044 +194 39 negative_sampler.num_negs_per_pos 73.0 +194 39 training.batch_size 1.0 +194 40 model.embedding_dim 2.0 +194 40 loss.margin 23.847000876136782 +194 40 loss.adversarial_temperature 0.2455356714796494 +194 40 optimizer.lr 0.0014579592350294171 +194 40 negative_sampler.num_negs_per_pos 4.0 +194 40 training.batch_size 0.0 +194 41 model.embedding_dim 1.0 +194 41 loss.margin 17.701389937422803 +194 41 loss.adversarial_temperature 0.913135995240241 +194 41 optimizer.lr 0.06211435548178358 +194 41 negative_sampler.num_negs_per_pos 48.0 +194 41 training.batch_size 1.0 +194 42 model.embedding_dim 1.0 +194 42 loss.margin 21.618266454605735 +194 42 loss.adversarial_temperature 0.2545311628949132 +194 42 optimizer.lr 0.020872096294510218 +194 42 negative_sampler.num_negs_per_pos 98.0 +194 42 training.batch_size 1.0 +194 43 model.embedding_dim 0.0 +194 43 loss.margin 23.72185868729088 +194 43 loss.adversarial_temperature 0.9537521815060537 +194 43 optimizer.lr 0.005825503916699071 +194 43 negative_sampler.num_negs_per_pos 65.0 +194 43 training.batch_size 1.0 +194 44 model.embedding_dim 1.0 +194 44 loss.margin 28.9350828093125 +194 44 loss.adversarial_temperature 0.8185069290778306 +194 44 optimizer.lr 0.005413924263361423 +194 44 negative_sampler.num_negs_per_pos 99.0 +194 44 training.batch_size 1.0 +194 45 model.embedding_dim 1.0 +194 45 loss.margin 11.469695464592188 +194 45 loss.adversarial_temperature 0.6363171239192705 +194 45 optimizer.lr 0.015090597124171289 +194 45 negative_sampler.num_negs_per_pos 20.0 +194 45 training.batch_size 0.0 +194 46 model.embedding_dim 1.0 +194 46 loss.margin 1.915145121748742 +194 46 loss.adversarial_temperature 0.8968707869596174 +194 46 optimizer.lr 0.012133982775376155 +194 46 negative_sampler.num_negs_per_pos 77.0 +194 46 training.batch_size 1.0 +194 47 model.embedding_dim 0.0 +194 47 loss.margin 10.637063520126622 +194 47 loss.adversarial_temperature 0.18993535460588823 +194 47 optimizer.lr 0.025528528093836073 +194 47 negative_sampler.num_negs_per_pos 0.0 +194 47 training.batch_size 0.0 +194 48 model.embedding_dim 1.0 +194 48 loss.margin 22.73027931388005 +194 48 loss.adversarial_temperature 0.12231725671790177 +194 48 optimizer.lr 0.057553095472492384 +194 48 negative_sampler.num_negs_per_pos 18.0 +194 48 training.batch_size 2.0 +194 49 model.embedding_dim 0.0 +194 49 loss.margin 17.31049250567637 +194 49 loss.adversarial_temperature 0.9271580815373087 +194 49 optimizer.lr 0.011698790397670526 +194 49 negative_sampler.num_negs_per_pos 12.0 +194 49 training.batch_size 2.0 +194 50 model.embedding_dim 0.0 +194 50 loss.margin 13.531948488112153 +194 50 loss.adversarial_temperature 0.5611427779596515 +194 50 optimizer.lr 0.04120167565476176 +194 50 negative_sampler.num_negs_per_pos 30.0 +194 50 training.batch_size 1.0 +194 51 model.embedding_dim 2.0 +194 51 loss.margin 15.639738442403775 +194 51 loss.adversarial_temperature 0.19520881375867177 +194 51 optimizer.lr 0.03317303038686074 +194 51 negative_sampler.num_negs_per_pos 13.0 +194 51 training.batch_size 2.0 +194 52 model.embedding_dim 2.0 +194 52 loss.margin 9.812782461933649 +194 52 loss.adversarial_temperature 0.17023933765375918 +194 52 optimizer.lr 0.009673100753576391 +194 52 negative_sampler.num_negs_per_pos 3.0 +194 52 training.batch_size 1.0 +194 53 model.embedding_dim 0.0 +194 53 loss.margin 7.0953390145692525 +194 53 loss.adversarial_temperature 0.7557397416092692 +194 53 optimizer.lr 0.0026212778699108894 +194 53 negative_sampler.num_negs_per_pos 85.0 +194 53 training.batch_size 2.0 +194 54 model.embedding_dim 1.0 +194 54 loss.margin 13.128572872102538 +194 54 loss.adversarial_temperature 0.8570841077742687 +194 54 optimizer.lr 0.018824411479143942 +194 54 negative_sampler.num_negs_per_pos 88.0 +194 54 training.batch_size 2.0 +194 55 model.embedding_dim 1.0 +194 55 loss.margin 10.642494659518716 +194 55 loss.adversarial_temperature 0.8835482510685692 +194 55 optimizer.lr 0.0030605172095607407 +194 55 negative_sampler.num_negs_per_pos 43.0 +194 55 training.batch_size 1.0 +194 56 model.embedding_dim 1.0 +194 56 loss.margin 25.60334463457117 +194 56 loss.adversarial_temperature 0.44448705599204363 +194 56 optimizer.lr 0.018621632813210862 +194 56 negative_sampler.num_negs_per_pos 17.0 +194 56 training.batch_size 1.0 +194 57 model.embedding_dim 1.0 +194 57 loss.margin 16.43591653942907 +194 57 loss.adversarial_temperature 0.15211042282757703 +194 57 optimizer.lr 0.005311813952200824 +194 57 negative_sampler.num_negs_per_pos 19.0 +194 57 training.batch_size 0.0 +194 58 model.embedding_dim 0.0 +194 58 loss.margin 25.69341031715036 +194 58 loss.adversarial_temperature 0.9274799581001356 +194 58 optimizer.lr 0.005750228259966237 +194 58 negative_sampler.num_negs_per_pos 16.0 +194 58 training.batch_size 2.0 +194 59 model.embedding_dim 0.0 +194 59 loss.margin 24.19233341966506 +194 59 loss.adversarial_temperature 0.8457744083084636 +194 59 optimizer.lr 0.028848258744809376 +194 59 negative_sampler.num_negs_per_pos 7.0 +194 59 training.batch_size 0.0 +194 60 model.embedding_dim 1.0 +194 60 loss.margin 21.31214998795306 +194 60 loss.adversarial_temperature 0.5890159113692137 +194 60 optimizer.lr 0.0858261805818883 +194 60 negative_sampler.num_negs_per_pos 32.0 +194 60 training.batch_size 0.0 +194 61 model.embedding_dim 1.0 +194 61 loss.margin 3.4328777101996333 +194 61 loss.adversarial_temperature 0.5688700220318691 +194 61 optimizer.lr 0.007624843138449603 +194 61 negative_sampler.num_negs_per_pos 32.0 +194 61 training.batch_size 1.0 +194 62 model.embedding_dim 2.0 +194 62 loss.margin 22.939217164086706 +194 62 loss.adversarial_temperature 0.9952691068398019 +194 62 optimizer.lr 0.00223282242812255 +194 62 negative_sampler.num_negs_per_pos 50.0 +194 62 training.batch_size 0.0 +194 63 model.embedding_dim 1.0 +194 63 loss.margin 9.78142822454211 +194 63 loss.adversarial_temperature 0.6482853012776176 +194 63 optimizer.lr 0.0019919344922571546 +194 63 negative_sampler.num_negs_per_pos 67.0 +194 63 training.batch_size 1.0 +194 64 model.embedding_dim 0.0 +194 64 loss.margin 24.1867791203463 +194 64 loss.adversarial_temperature 0.73980471578915 +194 64 optimizer.lr 0.05769547365886281 +194 64 negative_sampler.num_negs_per_pos 57.0 +194 64 training.batch_size 1.0 +194 65 model.embedding_dim 0.0 +194 65 loss.margin 26.465287983415827 +194 65 loss.adversarial_temperature 0.9632800714620223 +194 65 optimizer.lr 0.0015120054329078392 +194 65 negative_sampler.num_negs_per_pos 97.0 +194 65 training.batch_size 0.0 +194 66 model.embedding_dim 2.0 +194 66 loss.margin 27.55773075506928 +194 66 loss.adversarial_temperature 0.7358968525002374 +194 66 optimizer.lr 0.0020571550739948634 +194 66 negative_sampler.num_negs_per_pos 23.0 +194 66 training.batch_size 1.0 +194 67 model.embedding_dim 0.0 +194 67 loss.margin 2.33730931918482 +194 67 loss.adversarial_temperature 0.15932835952504262 +194 67 optimizer.lr 0.046085231025750586 +194 67 negative_sampler.num_negs_per_pos 31.0 +194 67 training.batch_size 1.0 +194 68 model.embedding_dim 2.0 +194 68 loss.margin 1.7050896303241774 +194 68 loss.adversarial_temperature 0.7302561989387357 +194 68 optimizer.lr 0.021721274230107526 +194 68 negative_sampler.num_negs_per_pos 64.0 +194 68 training.batch_size 2.0 +194 69 model.embedding_dim 2.0 +194 69 loss.margin 19.701894014255153 +194 69 loss.adversarial_temperature 0.19804153169733058 +194 69 optimizer.lr 0.015252658988701689 +194 69 negative_sampler.num_negs_per_pos 41.0 +194 69 training.batch_size 1.0 +194 70 model.embedding_dim 0.0 +194 70 loss.margin 3.384835871440299 +194 70 loss.adversarial_temperature 0.25098325415405803 +194 70 optimizer.lr 0.010653039152339039 +194 70 negative_sampler.num_negs_per_pos 34.0 +194 70 training.batch_size 0.0 +194 71 model.embedding_dim 1.0 +194 71 loss.margin 5.891669446249182 +194 71 loss.adversarial_temperature 0.8001712430693035 +194 71 optimizer.lr 0.002342594785453451 +194 71 negative_sampler.num_negs_per_pos 24.0 +194 71 training.batch_size 0.0 +194 72 model.embedding_dim 0.0 +194 72 loss.margin 19.362552238454956 +194 72 loss.adversarial_temperature 0.8690182668498199 +194 72 optimizer.lr 0.006905674393492032 +194 72 negative_sampler.num_negs_per_pos 22.0 +194 72 training.batch_size 2.0 +194 73 model.embedding_dim 1.0 +194 73 loss.margin 19.138896240746146 +194 73 loss.adversarial_temperature 0.3384111666530748 +194 73 optimizer.lr 0.007540619163482561 +194 73 negative_sampler.num_negs_per_pos 47.0 +194 73 training.batch_size 2.0 +194 74 model.embedding_dim 1.0 +194 74 loss.margin 15.72933655065539 +194 74 loss.adversarial_temperature 0.7111068858244611 +194 74 optimizer.lr 0.001251049431596507 +194 74 negative_sampler.num_negs_per_pos 34.0 +194 74 training.batch_size 2.0 +194 75 model.embedding_dim 2.0 +194 75 loss.margin 6.340378294462243 +194 75 loss.adversarial_temperature 0.6322515735464568 +194 75 optimizer.lr 0.06107570390090377 +194 75 negative_sampler.num_negs_per_pos 69.0 +194 75 training.batch_size 0.0 +194 76 model.embedding_dim 1.0 +194 76 loss.margin 18.237207687549592 +194 76 loss.adversarial_temperature 0.996476623034697 +194 76 optimizer.lr 0.0026368717104589996 +194 76 negative_sampler.num_negs_per_pos 40.0 +194 76 training.batch_size 0.0 +194 77 model.embedding_dim 2.0 +194 77 loss.margin 27.72903320553766 +194 77 loss.adversarial_temperature 0.19387628992144273 +194 77 optimizer.lr 0.043443880848997 +194 77 negative_sampler.num_negs_per_pos 34.0 +194 77 training.batch_size 1.0 +194 78 model.embedding_dim 0.0 +194 78 loss.margin 26.166116697981266 +194 78 loss.adversarial_temperature 0.7726672146068495 +194 78 optimizer.lr 0.0014301581605764397 +194 78 negative_sampler.num_negs_per_pos 10.0 +194 78 training.batch_size 1.0 +194 79 model.embedding_dim 0.0 +194 79 loss.margin 26.74726022755113 +194 79 loss.adversarial_temperature 0.36801860038931045 +194 79 optimizer.lr 0.01890811868089144 +194 79 negative_sampler.num_negs_per_pos 5.0 +194 79 training.batch_size 0.0 +194 80 model.embedding_dim 1.0 +194 80 loss.margin 20.297761326469427 +194 80 loss.adversarial_temperature 0.9515951266223207 +194 80 optimizer.lr 0.007256008753384553 +194 80 negative_sampler.num_negs_per_pos 30.0 +194 80 training.batch_size 2.0 +194 81 model.embedding_dim 2.0 +194 81 loss.margin 12.87618141563548 +194 81 loss.adversarial_temperature 0.5541523658823043 +194 81 optimizer.lr 0.026847146699955048 +194 81 negative_sampler.num_negs_per_pos 79.0 +194 81 training.batch_size 2.0 +194 82 model.embedding_dim 0.0 +194 82 loss.margin 5.54902574637528 +194 82 loss.adversarial_temperature 0.5590502311019512 +194 82 optimizer.lr 0.0014550681509863602 +194 82 negative_sampler.num_negs_per_pos 6.0 +194 82 training.batch_size 2.0 +194 83 model.embedding_dim 1.0 +194 83 loss.margin 7.393503442508709 +194 83 loss.adversarial_temperature 0.33402210788580894 +194 83 optimizer.lr 0.007688127703120182 +194 83 negative_sampler.num_negs_per_pos 27.0 +194 83 training.batch_size 0.0 +194 84 model.embedding_dim 1.0 +194 84 loss.margin 29.73383763737572 +194 84 loss.adversarial_temperature 0.8439479642736966 +194 84 optimizer.lr 0.0031594115842019364 +194 84 negative_sampler.num_negs_per_pos 56.0 +194 84 training.batch_size 1.0 +194 85 model.embedding_dim 0.0 +194 85 loss.margin 16.993471363370688 +194 85 loss.adversarial_temperature 0.5854591124758077 +194 85 optimizer.lr 0.008715287153046423 +194 85 negative_sampler.num_negs_per_pos 55.0 +194 85 training.batch_size 1.0 +194 86 model.embedding_dim 2.0 +194 86 loss.margin 12.023499061959503 +194 86 loss.adversarial_temperature 0.3602464577050123 +194 86 optimizer.lr 0.07113631634624315 +194 86 negative_sampler.num_negs_per_pos 62.0 +194 86 training.batch_size 2.0 +194 87 model.embedding_dim 0.0 +194 87 loss.margin 23.671871514437193 +194 87 loss.adversarial_temperature 0.7378908919903342 +194 87 optimizer.lr 0.023401524942223712 +194 87 negative_sampler.num_negs_per_pos 4.0 +194 87 training.batch_size 0.0 +194 88 model.embedding_dim 1.0 +194 88 loss.margin 18.074275983362647 +194 88 loss.adversarial_temperature 0.48698233261257495 +194 88 optimizer.lr 0.0022204151397821003 +194 88 negative_sampler.num_negs_per_pos 74.0 +194 88 training.batch_size 2.0 +194 89 model.embedding_dim 1.0 +194 89 loss.margin 27.45593264227189 +194 89 loss.adversarial_temperature 0.619496797302168 +194 89 optimizer.lr 0.0034211109713756186 +194 89 negative_sampler.num_negs_per_pos 12.0 +194 89 training.batch_size 2.0 +194 90 model.embedding_dim 0.0 +194 90 loss.margin 4.083539614924955 +194 90 loss.adversarial_temperature 0.30465684335462284 +194 90 optimizer.lr 0.018998299734840508 +194 90 negative_sampler.num_negs_per_pos 38.0 +194 90 training.batch_size 1.0 +194 91 model.embedding_dim 0.0 +194 91 loss.margin 11.546369054293397 +194 91 loss.adversarial_temperature 0.34813724987677497 +194 91 optimizer.lr 0.009902693169469314 +194 91 negative_sampler.num_negs_per_pos 74.0 +194 91 training.batch_size 1.0 +194 92 model.embedding_dim 0.0 +194 92 loss.margin 4.976485778844069 +194 92 loss.adversarial_temperature 0.4625638615595976 +194 92 optimizer.lr 0.031151279855038544 +194 92 negative_sampler.num_negs_per_pos 52.0 +194 92 training.batch_size 2.0 +194 93 model.embedding_dim 1.0 +194 93 loss.margin 4.800178060541069 +194 93 loss.adversarial_temperature 0.9915651235979067 +194 93 optimizer.lr 0.0032624926060198316 +194 93 negative_sampler.num_negs_per_pos 70.0 +194 93 training.batch_size 2.0 +194 94 model.embedding_dim 1.0 +194 94 loss.margin 12.337857005289068 +194 94 loss.adversarial_temperature 0.41802235864677406 +194 94 optimizer.lr 0.07211326830642133 +194 94 negative_sampler.num_negs_per_pos 52.0 +194 94 training.batch_size 2.0 +194 95 model.embedding_dim 2.0 +194 95 loss.margin 14.922585770399417 +194 95 loss.adversarial_temperature 0.33807916993350784 +194 95 optimizer.lr 0.017672760582580573 +194 95 negative_sampler.num_negs_per_pos 51.0 +194 95 training.batch_size 1.0 +194 96 model.embedding_dim 0.0 +194 96 loss.margin 27.434266415424453 +194 96 loss.adversarial_temperature 0.41341193571755996 +194 96 optimizer.lr 0.06861709843460158 +194 96 negative_sampler.num_negs_per_pos 67.0 +194 96 training.batch_size 2.0 +194 97 model.embedding_dim 1.0 +194 97 loss.margin 15.653194592164134 +194 97 loss.adversarial_temperature 0.32209005598390816 +194 97 optimizer.lr 0.003925757760012626 +194 97 negative_sampler.num_negs_per_pos 6.0 +194 97 training.batch_size 1.0 +194 98 model.embedding_dim 1.0 +194 98 loss.margin 28.92750861330369 +194 98 loss.adversarial_temperature 0.5044464820022007 +194 98 optimizer.lr 0.01819156440946965 +194 98 negative_sampler.num_negs_per_pos 63.0 +194 98 training.batch_size 0.0 +194 99 model.embedding_dim 2.0 +194 99 loss.margin 8.063932851657542 +194 99 loss.adversarial_temperature 0.6515144971647326 +194 99 optimizer.lr 0.022566377062458395 +194 99 negative_sampler.num_negs_per_pos 93.0 +194 99 training.batch_size 2.0 +194 100 model.embedding_dim 0.0 +194 100 loss.margin 8.752100675573555 +194 100 loss.adversarial_temperature 0.4442152355933746 +194 100 optimizer.lr 0.03505617100056057 +194 100 negative_sampler.num_negs_per_pos 15.0 +194 100 training.batch_size 1.0 +194 1 dataset """kinships""" +194 1 model """distmult""" +194 1 loss """nssa""" +194 1 regularizer """no""" +194 1 optimizer """adam""" +194 1 training_loop """owa""" +194 1 negative_sampler """basic""" +194 1 evaluator """rankbased""" +194 2 dataset """kinships""" +194 2 model """distmult""" +194 2 loss """nssa""" +194 2 regularizer """no""" +194 2 optimizer """adam""" +194 2 training_loop """owa""" +194 2 negative_sampler """basic""" +194 2 evaluator """rankbased""" +194 3 dataset """kinships""" +194 3 model """distmult""" +194 3 loss """nssa""" +194 3 regularizer """no""" +194 3 optimizer """adam""" +194 3 training_loop """owa""" +194 3 negative_sampler """basic""" +194 3 evaluator """rankbased""" +194 4 dataset """kinships""" +194 4 model """distmult""" +194 4 loss """nssa""" +194 4 regularizer """no""" +194 4 optimizer """adam""" +194 4 training_loop """owa""" +194 4 negative_sampler """basic""" +194 4 evaluator """rankbased""" +194 5 dataset """kinships""" +194 5 model """distmult""" +194 5 loss """nssa""" +194 5 regularizer """no""" +194 5 optimizer """adam""" +194 5 training_loop """owa""" +194 5 negative_sampler """basic""" +194 5 evaluator """rankbased""" +194 6 dataset """kinships""" +194 6 model """distmult""" +194 6 loss """nssa""" +194 6 regularizer """no""" +194 6 optimizer """adam""" +194 6 training_loop """owa""" +194 6 negative_sampler """basic""" +194 6 evaluator """rankbased""" +194 7 dataset """kinships""" +194 7 model """distmult""" +194 7 loss """nssa""" +194 7 regularizer """no""" +194 7 optimizer """adam""" +194 7 training_loop """owa""" +194 7 negative_sampler """basic""" +194 7 evaluator """rankbased""" +194 8 dataset """kinships""" +194 8 model """distmult""" +194 8 loss """nssa""" +194 8 regularizer """no""" +194 8 optimizer """adam""" +194 8 training_loop """owa""" +194 8 negative_sampler """basic""" +194 8 evaluator """rankbased""" +194 9 dataset """kinships""" +194 9 model """distmult""" +194 9 loss """nssa""" +194 9 regularizer """no""" +194 9 optimizer """adam""" +194 9 training_loop """owa""" +194 9 negative_sampler """basic""" +194 9 evaluator """rankbased""" +194 10 dataset """kinships""" +194 10 model """distmult""" +194 10 loss """nssa""" +194 10 regularizer """no""" +194 10 optimizer """adam""" +194 10 training_loop """owa""" +194 10 negative_sampler """basic""" +194 10 evaluator """rankbased""" +194 11 dataset """kinships""" +194 11 model """distmult""" +194 11 loss """nssa""" +194 11 regularizer """no""" +194 11 optimizer """adam""" +194 11 training_loop """owa""" +194 11 negative_sampler """basic""" +194 11 evaluator """rankbased""" +194 12 dataset """kinships""" +194 12 model """distmult""" +194 12 loss """nssa""" +194 12 regularizer """no""" +194 12 optimizer """adam""" +194 12 training_loop """owa""" +194 12 negative_sampler """basic""" +194 12 evaluator """rankbased""" +194 13 dataset """kinships""" +194 13 model """distmult""" +194 13 loss """nssa""" +194 13 regularizer """no""" +194 13 optimizer """adam""" +194 13 training_loop """owa""" +194 13 negative_sampler """basic""" +194 13 evaluator """rankbased""" +194 14 dataset """kinships""" +194 14 model """distmult""" +194 14 loss """nssa""" +194 14 regularizer """no""" +194 14 optimizer """adam""" +194 14 training_loop """owa""" +194 14 negative_sampler """basic""" +194 14 evaluator """rankbased""" +194 15 dataset """kinships""" +194 15 model """distmult""" +194 15 loss """nssa""" +194 15 regularizer """no""" +194 15 optimizer """adam""" +194 15 training_loop """owa""" +194 15 negative_sampler """basic""" +194 15 evaluator """rankbased""" +194 16 dataset """kinships""" +194 16 model """distmult""" +194 16 loss """nssa""" +194 16 regularizer """no""" +194 16 optimizer """adam""" +194 16 training_loop """owa""" +194 16 negative_sampler """basic""" +194 16 evaluator """rankbased""" +194 17 dataset """kinships""" +194 17 model """distmult""" +194 17 loss """nssa""" +194 17 regularizer """no""" +194 17 optimizer """adam""" +194 17 training_loop """owa""" +194 17 negative_sampler """basic""" +194 17 evaluator """rankbased""" +194 18 dataset """kinships""" +194 18 model """distmult""" +194 18 loss """nssa""" +194 18 regularizer """no""" +194 18 optimizer """adam""" +194 18 training_loop """owa""" +194 18 negative_sampler """basic""" +194 18 evaluator """rankbased""" +194 19 dataset """kinships""" +194 19 model """distmult""" +194 19 loss """nssa""" +194 19 regularizer """no""" +194 19 optimizer """adam""" +194 19 training_loop """owa""" +194 19 negative_sampler """basic""" +194 19 evaluator """rankbased""" +194 20 dataset """kinships""" +194 20 model """distmult""" +194 20 loss """nssa""" +194 20 regularizer """no""" +194 20 optimizer """adam""" +194 20 training_loop """owa""" +194 20 negative_sampler """basic""" +194 20 evaluator """rankbased""" +194 21 dataset """kinships""" +194 21 model """distmult""" +194 21 loss """nssa""" +194 21 regularizer """no""" +194 21 optimizer """adam""" +194 21 training_loop """owa""" +194 21 negative_sampler """basic""" +194 21 evaluator """rankbased""" +194 22 dataset """kinships""" +194 22 model """distmult""" +194 22 loss """nssa""" +194 22 regularizer """no""" +194 22 optimizer """adam""" +194 22 training_loop """owa""" +194 22 negative_sampler """basic""" +194 22 evaluator """rankbased""" +194 23 dataset """kinships""" +194 23 model """distmult""" +194 23 loss """nssa""" +194 23 regularizer """no""" +194 23 optimizer """adam""" +194 23 training_loop """owa""" +194 23 negative_sampler """basic""" +194 23 evaluator """rankbased""" +194 24 dataset """kinships""" +194 24 model """distmult""" +194 24 loss """nssa""" +194 24 regularizer """no""" +194 24 optimizer """adam""" +194 24 training_loop """owa""" +194 24 negative_sampler """basic""" +194 24 evaluator """rankbased""" +194 25 dataset """kinships""" +194 25 model """distmult""" +194 25 loss """nssa""" +194 25 regularizer """no""" +194 25 optimizer """adam""" +194 25 training_loop """owa""" +194 25 negative_sampler """basic""" +194 25 evaluator """rankbased""" +194 26 dataset """kinships""" +194 26 model """distmult""" +194 26 loss """nssa""" +194 26 regularizer """no""" +194 26 optimizer """adam""" +194 26 training_loop """owa""" +194 26 negative_sampler """basic""" +194 26 evaluator """rankbased""" +194 27 dataset """kinships""" +194 27 model """distmult""" +194 27 loss """nssa""" +194 27 regularizer """no""" +194 27 optimizer """adam""" +194 27 training_loop """owa""" +194 27 negative_sampler """basic""" +194 27 evaluator """rankbased""" +194 28 dataset """kinships""" +194 28 model """distmult""" +194 28 loss """nssa""" +194 28 regularizer """no""" +194 28 optimizer """adam""" +194 28 training_loop """owa""" +194 28 negative_sampler """basic""" +194 28 evaluator """rankbased""" +194 29 dataset """kinships""" +194 29 model """distmult""" +194 29 loss """nssa""" +194 29 regularizer """no""" +194 29 optimizer """adam""" +194 29 training_loop """owa""" +194 29 negative_sampler """basic""" +194 29 evaluator """rankbased""" +194 30 dataset """kinships""" +194 30 model """distmult""" +194 30 loss """nssa""" +194 30 regularizer """no""" +194 30 optimizer """adam""" +194 30 training_loop """owa""" +194 30 negative_sampler """basic""" +194 30 evaluator """rankbased""" +194 31 dataset """kinships""" +194 31 model """distmult""" +194 31 loss """nssa""" +194 31 regularizer """no""" +194 31 optimizer """adam""" +194 31 training_loop """owa""" +194 31 negative_sampler """basic""" +194 31 evaluator """rankbased""" +194 32 dataset """kinships""" +194 32 model """distmult""" +194 32 loss """nssa""" +194 32 regularizer """no""" +194 32 optimizer """adam""" +194 32 training_loop """owa""" +194 32 negative_sampler """basic""" +194 32 evaluator """rankbased""" +194 33 dataset """kinships""" +194 33 model """distmult""" +194 33 loss """nssa""" +194 33 regularizer """no""" +194 33 optimizer """adam""" +194 33 training_loop """owa""" +194 33 negative_sampler """basic""" +194 33 evaluator """rankbased""" +194 34 dataset """kinships""" +194 34 model """distmult""" +194 34 loss """nssa""" +194 34 regularizer """no""" +194 34 optimizer """adam""" +194 34 training_loop """owa""" +194 34 negative_sampler """basic""" +194 34 evaluator """rankbased""" +194 35 dataset """kinships""" +194 35 model """distmult""" +194 35 loss """nssa""" +194 35 regularizer """no""" +194 35 optimizer """adam""" +194 35 training_loop """owa""" +194 35 negative_sampler """basic""" +194 35 evaluator """rankbased""" +194 36 dataset """kinships""" +194 36 model """distmult""" +194 36 loss """nssa""" +194 36 regularizer """no""" +194 36 optimizer """adam""" +194 36 training_loop """owa""" +194 36 negative_sampler """basic""" +194 36 evaluator """rankbased""" +194 37 dataset """kinships""" +194 37 model """distmult""" +194 37 loss """nssa""" +194 37 regularizer """no""" +194 37 optimizer """adam""" +194 37 training_loop """owa""" +194 37 negative_sampler """basic""" +194 37 evaluator """rankbased""" +194 38 dataset """kinships""" +194 38 model """distmult""" +194 38 loss """nssa""" +194 38 regularizer """no""" +194 38 optimizer """adam""" +194 38 training_loop """owa""" +194 38 negative_sampler """basic""" +194 38 evaluator """rankbased""" +194 39 dataset """kinships""" +194 39 model """distmult""" +194 39 loss """nssa""" +194 39 regularizer """no""" +194 39 optimizer """adam""" +194 39 training_loop """owa""" +194 39 negative_sampler """basic""" +194 39 evaluator """rankbased""" +194 40 dataset """kinships""" +194 40 model """distmult""" +194 40 loss """nssa""" +194 40 regularizer """no""" +194 40 optimizer """adam""" +194 40 training_loop """owa""" +194 40 negative_sampler """basic""" +194 40 evaluator """rankbased""" +194 41 dataset """kinships""" +194 41 model """distmult""" +194 41 loss """nssa""" +194 41 regularizer """no""" +194 41 optimizer """adam""" +194 41 training_loop """owa""" +194 41 negative_sampler """basic""" +194 41 evaluator """rankbased""" +194 42 dataset """kinships""" +194 42 model """distmult""" +194 42 loss """nssa""" +194 42 regularizer """no""" +194 42 optimizer """adam""" +194 42 training_loop """owa""" +194 42 negative_sampler """basic""" +194 42 evaluator """rankbased""" +194 43 dataset """kinships""" +194 43 model """distmult""" +194 43 loss """nssa""" +194 43 regularizer """no""" +194 43 optimizer """adam""" +194 43 training_loop """owa""" +194 43 negative_sampler """basic""" +194 43 evaluator """rankbased""" +194 44 dataset """kinships""" +194 44 model """distmult""" +194 44 loss """nssa""" +194 44 regularizer """no""" +194 44 optimizer """adam""" +194 44 training_loop """owa""" +194 44 negative_sampler """basic""" +194 44 evaluator """rankbased""" +194 45 dataset """kinships""" +194 45 model """distmult""" +194 45 loss """nssa""" +194 45 regularizer """no""" +194 45 optimizer """adam""" +194 45 training_loop """owa""" +194 45 negative_sampler """basic""" +194 45 evaluator """rankbased""" +194 46 dataset """kinships""" +194 46 model """distmult""" +194 46 loss """nssa""" +194 46 regularizer """no""" +194 46 optimizer """adam""" +194 46 training_loop """owa""" +194 46 negative_sampler """basic""" +194 46 evaluator """rankbased""" +194 47 dataset """kinships""" +194 47 model """distmult""" +194 47 loss """nssa""" +194 47 regularizer """no""" +194 47 optimizer """adam""" +194 47 training_loop """owa""" +194 47 negative_sampler """basic""" +194 47 evaluator """rankbased""" +194 48 dataset """kinships""" +194 48 model """distmult""" +194 48 loss """nssa""" +194 48 regularizer """no""" +194 48 optimizer """adam""" +194 48 training_loop """owa""" +194 48 negative_sampler """basic""" +194 48 evaluator """rankbased""" +194 49 dataset """kinships""" +194 49 model """distmult""" +194 49 loss """nssa""" +194 49 regularizer """no""" +194 49 optimizer """adam""" +194 49 training_loop """owa""" +194 49 negative_sampler """basic""" +194 49 evaluator """rankbased""" +194 50 dataset """kinships""" +194 50 model """distmult""" +194 50 loss """nssa""" +194 50 regularizer """no""" +194 50 optimizer """adam""" +194 50 training_loop """owa""" +194 50 negative_sampler """basic""" +194 50 evaluator """rankbased""" +194 51 dataset """kinships""" +194 51 model """distmult""" +194 51 loss """nssa""" +194 51 regularizer """no""" +194 51 optimizer """adam""" +194 51 training_loop """owa""" +194 51 negative_sampler """basic""" +194 51 evaluator """rankbased""" +194 52 dataset """kinships""" +194 52 model """distmult""" +194 52 loss """nssa""" +194 52 regularizer """no""" +194 52 optimizer """adam""" +194 52 training_loop """owa""" +194 52 negative_sampler """basic""" +194 52 evaluator """rankbased""" +194 53 dataset """kinships""" +194 53 model """distmult""" +194 53 loss """nssa""" +194 53 regularizer """no""" +194 53 optimizer """adam""" +194 53 training_loop """owa""" +194 53 negative_sampler """basic""" +194 53 evaluator """rankbased""" +194 54 dataset """kinships""" +194 54 model """distmult""" +194 54 loss """nssa""" +194 54 regularizer """no""" +194 54 optimizer """adam""" +194 54 training_loop """owa""" +194 54 negative_sampler """basic""" +194 54 evaluator """rankbased""" +194 55 dataset """kinships""" +194 55 model """distmult""" +194 55 loss """nssa""" +194 55 regularizer """no""" +194 55 optimizer """adam""" +194 55 training_loop """owa""" +194 55 negative_sampler """basic""" +194 55 evaluator """rankbased""" +194 56 dataset """kinships""" +194 56 model """distmult""" +194 56 loss """nssa""" +194 56 regularizer """no""" +194 56 optimizer """adam""" +194 56 training_loop """owa""" +194 56 negative_sampler """basic""" +194 56 evaluator """rankbased""" +194 57 dataset """kinships""" +194 57 model """distmult""" +194 57 loss """nssa""" +194 57 regularizer """no""" +194 57 optimizer """adam""" +194 57 training_loop """owa""" +194 57 negative_sampler """basic""" +194 57 evaluator """rankbased""" +194 58 dataset """kinships""" +194 58 model """distmult""" +194 58 loss """nssa""" +194 58 regularizer """no""" +194 58 optimizer """adam""" +194 58 training_loop """owa""" +194 58 negative_sampler """basic""" +194 58 evaluator """rankbased""" +194 59 dataset """kinships""" +194 59 model """distmult""" +194 59 loss """nssa""" +194 59 regularizer """no""" +194 59 optimizer """adam""" +194 59 training_loop """owa""" +194 59 negative_sampler """basic""" +194 59 evaluator """rankbased""" +194 60 dataset """kinships""" +194 60 model """distmult""" +194 60 loss """nssa""" +194 60 regularizer """no""" +194 60 optimizer """adam""" +194 60 training_loop """owa""" +194 60 negative_sampler """basic""" +194 60 evaluator """rankbased""" +194 61 dataset """kinships""" +194 61 model """distmult""" +194 61 loss """nssa""" +194 61 regularizer """no""" +194 61 optimizer """adam""" +194 61 training_loop """owa""" +194 61 negative_sampler """basic""" +194 61 evaluator """rankbased""" +194 62 dataset """kinships""" +194 62 model """distmult""" +194 62 loss """nssa""" +194 62 regularizer """no""" +194 62 optimizer """adam""" +194 62 training_loop """owa""" +194 62 negative_sampler """basic""" +194 62 evaluator """rankbased""" +194 63 dataset """kinships""" +194 63 model """distmult""" +194 63 loss """nssa""" +194 63 regularizer """no""" +194 63 optimizer """adam""" +194 63 training_loop """owa""" +194 63 negative_sampler """basic""" +194 63 evaluator """rankbased""" +194 64 dataset """kinships""" +194 64 model """distmult""" +194 64 loss """nssa""" +194 64 regularizer """no""" +194 64 optimizer """adam""" +194 64 training_loop """owa""" +194 64 negative_sampler """basic""" +194 64 evaluator """rankbased""" +194 65 dataset """kinships""" +194 65 model """distmult""" +194 65 loss """nssa""" +194 65 regularizer """no""" +194 65 optimizer """adam""" +194 65 training_loop """owa""" +194 65 negative_sampler """basic""" +194 65 evaluator """rankbased""" +194 66 dataset """kinships""" +194 66 model """distmult""" +194 66 loss """nssa""" +194 66 regularizer """no""" +194 66 optimizer """adam""" +194 66 training_loop """owa""" +194 66 negative_sampler """basic""" +194 66 evaluator """rankbased""" +194 67 dataset """kinships""" +194 67 model """distmult""" +194 67 loss """nssa""" +194 67 regularizer """no""" +194 67 optimizer """adam""" +194 67 training_loop """owa""" +194 67 negative_sampler """basic""" +194 67 evaluator """rankbased""" +194 68 dataset """kinships""" +194 68 model """distmult""" +194 68 loss """nssa""" +194 68 regularizer """no""" +194 68 optimizer """adam""" +194 68 training_loop """owa""" +194 68 negative_sampler """basic""" +194 68 evaluator """rankbased""" +194 69 dataset """kinships""" +194 69 model """distmult""" +194 69 loss """nssa""" +194 69 regularizer """no""" +194 69 optimizer """adam""" +194 69 training_loop """owa""" +194 69 negative_sampler """basic""" +194 69 evaluator """rankbased""" +194 70 dataset """kinships""" +194 70 model """distmult""" +194 70 loss """nssa""" +194 70 regularizer """no""" +194 70 optimizer """adam""" +194 70 training_loop """owa""" +194 70 negative_sampler """basic""" +194 70 evaluator """rankbased""" +194 71 dataset """kinships""" +194 71 model """distmult""" +194 71 loss """nssa""" +194 71 regularizer """no""" +194 71 optimizer """adam""" +194 71 training_loop """owa""" +194 71 negative_sampler """basic""" +194 71 evaluator """rankbased""" +194 72 dataset """kinships""" +194 72 model """distmult""" +194 72 loss """nssa""" +194 72 regularizer """no""" +194 72 optimizer """adam""" +194 72 training_loop """owa""" +194 72 negative_sampler """basic""" +194 72 evaluator """rankbased""" +194 73 dataset """kinships""" +194 73 model """distmult""" +194 73 loss """nssa""" +194 73 regularizer """no""" +194 73 optimizer """adam""" +194 73 training_loop """owa""" +194 73 negative_sampler """basic""" +194 73 evaluator """rankbased""" +194 74 dataset """kinships""" +194 74 model """distmult""" +194 74 loss """nssa""" +194 74 regularizer """no""" +194 74 optimizer """adam""" +194 74 training_loop """owa""" +194 74 negative_sampler """basic""" +194 74 evaluator """rankbased""" +194 75 dataset """kinships""" +194 75 model """distmult""" +194 75 loss """nssa""" +194 75 regularizer """no""" +194 75 optimizer """adam""" +194 75 training_loop """owa""" +194 75 negative_sampler """basic""" +194 75 evaluator """rankbased""" +194 76 dataset """kinships""" +194 76 model """distmult""" +194 76 loss """nssa""" +194 76 regularizer """no""" +194 76 optimizer """adam""" +194 76 training_loop """owa""" +194 76 negative_sampler """basic""" +194 76 evaluator """rankbased""" +194 77 dataset """kinships""" +194 77 model """distmult""" +194 77 loss """nssa""" +194 77 regularizer """no""" +194 77 optimizer """adam""" +194 77 training_loop """owa""" +194 77 negative_sampler """basic""" +194 77 evaluator """rankbased""" +194 78 dataset """kinships""" +194 78 model """distmult""" +194 78 loss """nssa""" +194 78 regularizer """no""" +194 78 optimizer """adam""" +194 78 training_loop """owa""" +194 78 negative_sampler """basic""" +194 78 evaluator """rankbased""" +194 79 dataset """kinships""" +194 79 model """distmult""" +194 79 loss """nssa""" +194 79 regularizer """no""" +194 79 optimizer """adam""" +194 79 training_loop """owa""" +194 79 negative_sampler """basic""" +194 79 evaluator """rankbased""" +194 80 dataset """kinships""" +194 80 model """distmult""" +194 80 loss """nssa""" +194 80 regularizer """no""" +194 80 optimizer """adam""" +194 80 training_loop """owa""" +194 80 negative_sampler """basic""" +194 80 evaluator """rankbased""" +194 81 dataset """kinships""" +194 81 model """distmult""" +194 81 loss """nssa""" +194 81 regularizer """no""" +194 81 optimizer """adam""" +194 81 training_loop """owa""" +194 81 negative_sampler """basic""" +194 81 evaluator """rankbased""" +194 82 dataset """kinships""" +194 82 model """distmult""" +194 82 loss """nssa""" +194 82 regularizer """no""" +194 82 optimizer """adam""" +194 82 training_loop """owa""" +194 82 negative_sampler """basic""" +194 82 evaluator """rankbased""" +194 83 dataset """kinships""" +194 83 model """distmult""" +194 83 loss """nssa""" +194 83 regularizer """no""" +194 83 optimizer """adam""" +194 83 training_loop """owa""" +194 83 negative_sampler """basic""" +194 83 evaluator """rankbased""" +194 84 dataset """kinships""" +194 84 model """distmult""" +194 84 loss """nssa""" +194 84 regularizer """no""" +194 84 optimizer """adam""" +194 84 training_loop """owa""" +194 84 negative_sampler """basic""" +194 84 evaluator """rankbased""" +194 85 dataset """kinships""" +194 85 model """distmult""" +194 85 loss """nssa""" +194 85 regularizer """no""" +194 85 optimizer """adam""" +194 85 training_loop """owa""" +194 85 negative_sampler """basic""" +194 85 evaluator """rankbased""" +194 86 dataset """kinships""" +194 86 model """distmult""" +194 86 loss """nssa""" +194 86 regularizer """no""" +194 86 optimizer """adam""" +194 86 training_loop """owa""" +194 86 negative_sampler """basic""" +194 86 evaluator """rankbased""" +194 87 dataset """kinships""" +194 87 model """distmult""" +194 87 loss """nssa""" +194 87 regularizer """no""" +194 87 optimizer """adam""" +194 87 training_loop """owa""" +194 87 negative_sampler """basic""" +194 87 evaluator """rankbased""" +194 88 dataset """kinships""" +194 88 model """distmult""" +194 88 loss """nssa""" +194 88 regularizer """no""" +194 88 optimizer """adam""" +194 88 training_loop """owa""" +194 88 negative_sampler """basic""" +194 88 evaluator """rankbased""" +194 89 dataset """kinships""" +194 89 model """distmult""" +194 89 loss """nssa""" +194 89 regularizer """no""" +194 89 optimizer """adam""" +194 89 training_loop """owa""" +194 89 negative_sampler """basic""" +194 89 evaluator """rankbased""" +194 90 dataset """kinships""" +194 90 model """distmult""" +194 90 loss """nssa""" +194 90 regularizer """no""" +194 90 optimizer """adam""" +194 90 training_loop """owa""" +194 90 negative_sampler """basic""" +194 90 evaluator """rankbased""" +194 91 dataset """kinships""" +194 91 model """distmult""" +194 91 loss """nssa""" +194 91 regularizer """no""" +194 91 optimizer """adam""" +194 91 training_loop """owa""" +194 91 negative_sampler """basic""" +194 91 evaluator """rankbased""" +194 92 dataset """kinships""" +194 92 model """distmult""" +194 92 loss """nssa""" +194 92 regularizer """no""" +194 92 optimizer """adam""" +194 92 training_loop """owa""" +194 92 negative_sampler """basic""" +194 92 evaluator """rankbased""" +194 93 dataset """kinships""" +194 93 model """distmult""" +194 93 loss """nssa""" +194 93 regularizer """no""" +194 93 optimizer """adam""" +194 93 training_loop """owa""" +194 93 negative_sampler """basic""" +194 93 evaluator """rankbased""" +194 94 dataset """kinships""" +194 94 model """distmult""" +194 94 loss """nssa""" +194 94 regularizer """no""" +194 94 optimizer """adam""" +194 94 training_loop """owa""" +194 94 negative_sampler """basic""" +194 94 evaluator """rankbased""" +194 95 dataset """kinships""" +194 95 model """distmult""" +194 95 loss """nssa""" +194 95 regularizer """no""" +194 95 optimizer """adam""" +194 95 training_loop """owa""" +194 95 negative_sampler """basic""" +194 95 evaluator """rankbased""" +194 96 dataset """kinships""" +194 96 model """distmult""" +194 96 loss """nssa""" +194 96 regularizer """no""" +194 96 optimizer """adam""" +194 96 training_loop """owa""" +194 96 negative_sampler """basic""" +194 96 evaluator """rankbased""" +194 97 dataset """kinships""" +194 97 model """distmult""" +194 97 loss """nssa""" +194 97 regularizer """no""" +194 97 optimizer """adam""" +194 97 training_loop """owa""" +194 97 negative_sampler """basic""" +194 97 evaluator """rankbased""" +194 98 dataset """kinships""" +194 98 model """distmult""" +194 98 loss """nssa""" +194 98 regularizer """no""" +194 98 optimizer """adam""" +194 98 training_loop """owa""" +194 98 negative_sampler """basic""" +194 98 evaluator """rankbased""" +194 99 dataset """kinships""" +194 99 model """distmult""" +194 99 loss """nssa""" +194 99 regularizer """no""" +194 99 optimizer """adam""" +194 99 training_loop """owa""" +194 99 negative_sampler """basic""" +194 99 evaluator """rankbased""" +194 100 dataset """kinships""" +194 100 model """distmult""" +194 100 loss """nssa""" +194 100 regularizer """no""" +194 100 optimizer """adam""" +194 100 training_loop """owa""" +194 100 negative_sampler """basic""" +194 100 evaluator """rankbased""" +195 1 model.embedding_dim 0.0 +195 1 loss.margin 16.89356820381019 +195 1 loss.adversarial_temperature 0.7413532353093858 +195 1 optimizer.lr 0.0013977570569662116 +195 1 negative_sampler.num_negs_per_pos 47.0 +195 1 training.batch_size 2.0 +195 2 model.embedding_dim 0.0 +195 2 loss.margin 3.192744551026196 +195 2 loss.adversarial_temperature 0.7071873691522638 +195 2 optimizer.lr 0.013941689284424009 +195 2 negative_sampler.num_negs_per_pos 92.0 +195 2 training.batch_size 0.0 +195 3 model.embedding_dim 1.0 +195 3 loss.margin 1.9590379268278135 +195 3 loss.adversarial_temperature 0.19123830130766592 +195 3 optimizer.lr 0.00120989637989837 +195 3 negative_sampler.num_negs_per_pos 75.0 +195 3 training.batch_size 2.0 +195 4 model.embedding_dim 2.0 +195 4 loss.margin 16.04550254431844 +195 4 loss.adversarial_temperature 0.18145943733082393 +195 4 optimizer.lr 0.014561971952992518 +195 4 negative_sampler.num_negs_per_pos 37.0 +195 4 training.batch_size 1.0 +195 5 model.embedding_dim 1.0 +195 5 loss.margin 5.661721668036142 +195 5 loss.adversarial_temperature 0.4203813037971863 +195 5 optimizer.lr 0.020136181378865373 +195 5 negative_sampler.num_negs_per_pos 64.0 +195 5 training.batch_size 0.0 +195 6 model.embedding_dim 2.0 +195 6 loss.margin 17.133781504816962 +195 6 loss.adversarial_temperature 0.9334836969660703 +195 6 optimizer.lr 0.013393943003472757 +195 6 negative_sampler.num_negs_per_pos 4.0 +195 6 training.batch_size 2.0 +195 7 model.embedding_dim 1.0 +195 7 loss.margin 20.200226337636977 +195 7 loss.adversarial_temperature 0.7908283322900516 +195 7 optimizer.lr 0.002556160520863638 +195 7 negative_sampler.num_negs_per_pos 23.0 +195 7 training.batch_size 2.0 +195 8 model.embedding_dim 1.0 +195 8 loss.margin 27.268526776959824 +195 8 loss.adversarial_temperature 0.13862227820161807 +195 8 optimizer.lr 0.04334041813888457 +195 8 negative_sampler.num_negs_per_pos 27.0 +195 8 training.batch_size 2.0 +195 9 model.embedding_dim 2.0 +195 9 loss.margin 4.634831496942574 +195 9 loss.adversarial_temperature 0.6430446016961625 +195 9 optimizer.lr 0.038000885154931933 +195 9 negative_sampler.num_negs_per_pos 44.0 +195 9 training.batch_size 0.0 +195 10 model.embedding_dim 0.0 +195 10 loss.margin 20.91634056550817 +195 10 loss.adversarial_temperature 0.8157800238992767 +195 10 optimizer.lr 0.004445610038010474 +195 10 negative_sampler.num_negs_per_pos 81.0 +195 10 training.batch_size 1.0 +195 11 model.embedding_dim 0.0 +195 11 loss.margin 29.63005940899553 +195 11 loss.adversarial_temperature 0.9141809423199214 +195 11 optimizer.lr 0.001148519352954452 +195 11 negative_sampler.num_negs_per_pos 37.0 +195 11 training.batch_size 0.0 +195 12 model.embedding_dim 0.0 +195 12 loss.margin 22.252307827539997 +195 12 loss.adversarial_temperature 0.9234462831932911 +195 12 optimizer.lr 0.0039211334869403485 +195 12 negative_sampler.num_negs_per_pos 84.0 +195 12 training.batch_size 1.0 +195 13 model.embedding_dim 0.0 +195 13 loss.margin 5.236975976378314 +195 13 loss.adversarial_temperature 0.5141556278577561 +195 13 optimizer.lr 0.037503042292975006 +195 13 negative_sampler.num_negs_per_pos 68.0 +195 13 training.batch_size 0.0 +195 14 model.embedding_dim 0.0 +195 14 loss.margin 5.765017645821307 +195 14 loss.adversarial_temperature 0.6795134904872183 +195 14 optimizer.lr 0.011736183751077945 +195 14 negative_sampler.num_negs_per_pos 99.0 +195 14 training.batch_size 0.0 +195 15 model.embedding_dim 2.0 +195 15 loss.margin 7.714109996868022 +195 15 loss.adversarial_temperature 0.9153462457933123 +195 15 optimizer.lr 0.012334966813174081 +195 15 negative_sampler.num_negs_per_pos 69.0 +195 15 training.batch_size 1.0 +195 16 model.embedding_dim 0.0 +195 16 loss.margin 3.78501923053006 +195 16 loss.adversarial_temperature 0.28537958646266925 +195 16 optimizer.lr 0.005792655124267249 +195 16 negative_sampler.num_negs_per_pos 70.0 +195 16 training.batch_size 0.0 +195 17 model.embedding_dim 2.0 +195 17 loss.margin 3.891721477899239 +195 17 loss.adversarial_temperature 0.9017130094376146 +195 17 optimizer.lr 0.007285820129894264 +195 17 negative_sampler.num_negs_per_pos 81.0 +195 17 training.batch_size 0.0 +195 18 model.embedding_dim 1.0 +195 18 loss.margin 22.365712800609536 +195 18 loss.adversarial_temperature 0.4412886040425078 +195 18 optimizer.lr 0.028145439275048152 +195 18 negative_sampler.num_negs_per_pos 60.0 +195 18 training.batch_size 2.0 +195 19 model.embedding_dim 2.0 +195 19 loss.margin 12.692448851530031 +195 19 loss.adversarial_temperature 0.7674367712713693 +195 19 optimizer.lr 0.00484637973399735 +195 19 negative_sampler.num_negs_per_pos 59.0 +195 19 training.batch_size 0.0 +195 20 model.embedding_dim 1.0 +195 20 loss.margin 10.367276129345422 +195 20 loss.adversarial_temperature 0.9114220230835006 +195 20 optimizer.lr 0.00943705372235372 +195 20 negative_sampler.num_negs_per_pos 42.0 +195 20 training.batch_size 2.0 +195 21 model.embedding_dim 2.0 +195 21 loss.margin 8.874610417129798 +195 21 loss.adversarial_temperature 0.25296618419296524 +195 21 optimizer.lr 0.010577199835308481 +195 21 negative_sampler.num_negs_per_pos 75.0 +195 21 training.batch_size 0.0 +195 22 model.embedding_dim 0.0 +195 22 loss.margin 17.770282533001595 +195 22 loss.adversarial_temperature 0.47225514654130063 +195 22 optimizer.lr 0.003444290256758542 +195 22 negative_sampler.num_negs_per_pos 12.0 +195 22 training.batch_size 2.0 +195 23 model.embedding_dim 2.0 +195 23 loss.margin 24.53459150968846 +195 23 loss.adversarial_temperature 0.602090387135915 +195 23 optimizer.lr 0.07079258283436732 +195 23 negative_sampler.num_negs_per_pos 78.0 +195 23 training.batch_size 0.0 +195 24 model.embedding_dim 2.0 +195 24 loss.margin 12.384577357424751 +195 24 loss.adversarial_temperature 0.18476764286479397 +195 24 optimizer.lr 0.0011262982392279083 +195 24 negative_sampler.num_negs_per_pos 89.0 +195 24 training.batch_size 0.0 +195 25 model.embedding_dim 2.0 +195 25 loss.margin 16.832307932755043 +195 25 loss.adversarial_temperature 0.651732091741283 +195 25 optimizer.lr 0.06433958999991889 +195 25 negative_sampler.num_negs_per_pos 42.0 +195 25 training.batch_size 2.0 +195 26 model.embedding_dim 2.0 +195 26 loss.margin 1.8196848595022386 +195 26 loss.adversarial_temperature 0.21941512450190254 +195 26 optimizer.lr 0.07847009556572736 +195 26 negative_sampler.num_negs_per_pos 29.0 +195 26 training.batch_size 0.0 +195 27 model.embedding_dim 1.0 +195 27 loss.margin 7.668232591771828 +195 27 loss.adversarial_temperature 0.2914358255963335 +195 27 optimizer.lr 0.0051358164040781985 +195 27 negative_sampler.num_negs_per_pos 67.0 +195 27 training.batch_size 0.0 +195 28 model.embedding_dim 0.0 +195 28 loss.margin 19.726369251545307 +195 28 loss.adversarial_temperature 0.3998347071610773 +195 28 optimizer.lr 0.0941629892189052 +195 28 negative_sampler.num_negs_per_pos 80.0 +195 28 training.batch_size 1.0 +195 29 model.embedding_dim 0.0 +195 29 loss.margin 20.061829959718338 +195 29 loss.adversarial_temperature 0.560758063316535 +195 29 optimizer.lr 0.006719980428652593 +195 29 negative_sampler.num_negs_per_pos 28.0 +195 29 training.batch_size 0.0 +195 30 model.embedding_dim 2.0 +195 30 loss.margin 19.094823908139244 +195 30 loss.adversarial_temperature 0.5514964657733394 +195 30 optimizer.lr 0.013389478483614357 +195 30 negative_sampler.num_negs_per_pos 69.0 +195 30 training.batch_size 0.0 +195 31 model.embedding_dim 2.0 +195 31 loss.margin 26.48382578403237 +195 31 loss.adversarial_temperature 0.5340245314041446 +195 31 optimizer.lr 0.03546996117269695 +195 31 negative_sampler.num_negs_per_pos 81.0 +195 31 training.batch_size 2.0 +195 32 model.embedding_dim 1.0 +195 32 loss.margin 21.555450946144557 +195 32 loss.adversarial_temperature 0.7722539229616008 +195 32 optimizer.lr 0.006275335156745448 +195 32 negative_sampler.num_negs_per_pos 31.0 +195 32 training.batch_size 2.0 +195 33 model.embedding_dim 2.0 +195 33 loss.margin 5.483668064737392 +195 33 loss.adversarial_temperature 0.5665267207315671 +195 33 optimizer.lr 0.04302517059373892 +195 33 negative_sampler.num_negs_per_pos 64.0 +195 33 training.batch_size 2.0 +195 34 model.embedding_dim 0.0 +195 34 loss.margin 25.400414810775775 +195 34 loss.adversarial_temperature 0.5873948725755125 +195 34 optimizer.lr 0.0019450424832225385 +195 34 negative_sampler.num_negs_per_pos 45.0 +195 34 training.batch_size 1.0 +195 35 model.embedding_dim 1.0 +195 35 loss.margin 24.247131365834736 +195 35 loss.adversarial_temperature 0.39274687057285185 +195 35 optimizer.lr 0.03251276432582353 +195 35 negative_sampler.num_negs_per_pos 25.0 +195 35 training.batch_size 2.0 +195 36 model.embedding_dim 2.0 +195 36 loss.margin 18.923548369059336 +195 36 loss.adversarial_temperature 0.9752487912909412 +195 36 optimizer.lr 0.05107248507067582 +195 36 negative_sampler.num_negs_per_pos 71.0 +195 36 training.batch_size 0.0 +195 37 model.embedding_dim 2.0 +195 37 loss.margin 27.810642538650672 +195 37 loss.adversarial_temperature 0.9283199614003018 +195 37 optimizer.lr 0.03857918793718182 +195 37 negative_sampler.num_negs_per_pos 49.0 +195 37 training.batch_size 0.0 +195 38 model.embedding_dim 1.0 +195 38 loss.margin 11.638757735836187 +195 38 loss.adversarial_temperature 0.5157539324910495 +195 38 optimizer.lr 0.061342495959116755 +195 38 negative_sampler.num_negs_per_pos 59.0 +195 38 training.batch_size 1.0 +195 39 model.embedding_dim 2.0 +195 39 loss.margin 9.15200632729173 +195 39 loss.adversarial_temperature 0.5753232971577841 +195 39 optimizer.lr 0.0037794146763979256 +195 39 negative_sampler.num_negs_per_pos 99.0 +195 39 training.batch_size 1.0 +195 40 model.embedding_dim 2.0 +195 40 loss.margin 6.214407278157501 +195 40 loss.adversarial_temperature 0.5670666842315438 +195 40 optimizer.lr 0.030932701747753363 +195 40 negative_sampler.num_negs_per_pos 30.0 +195 40 training.batch_size 2.0 +195 41 model.embedding_dim 0.0 +195 41 loss.margin 18.467188799550424 +195 41 loss.adversarial_temperature 0.7145595880421435 +195 41 optimizer.lr 0.004020927761026708 +195 41 negative_sampler.num_negs_per_pos 29.0 +195 41 training.batch_size 1.0 +195 42 model.embedding_dim 1.0 +195 42 loss.margin 6.260779769150119 +195 42 loss.adversarial_temperature 0.37081564811443024 +195 42 optimizer.lr 0.001763615986043564 +195 42 negative_sampler.num_negs_per_pos 44.0 +195 42 training.batch_size 1.0 +195 43 model.embedding_dim 2.0 +195 43 loss.margin 15.488014757437723 +195 43 loss.adversarial_temperature 0.312883924478166 +195 43 optimizer.lr 0.0012156456553185217 +195 43 negative_sampler.num_negs_per_pos 67.0 +195 43 training.batch_size 2.0 +195 44 model.embedding_dim 0.0 +195 44 loss.margin 17.596698392820503 +195 44 loss.adversarial_temperature 0.8063937163366022 +195 44 optimizer.lr 0.004669051022999702 +195 44 negative_sampler.num_negs_per_pos 51.0 +195 44 training.batch_size 1.0 +195 45 model.embedding_dim 2.0 +195 45 loss.margin 5.620802004588566 +195 45 loss.adversarial_temperature 0.6968640313862345 +195 45 optimizer.lr 0.0015165015688842819 +195 45 negative_sampler.num_negs_per_pos 27.0 +195 45 training.batch_size 2.0 +195 46 model.embedding_dim 0.0 +195 46 loss.margin 28.89112725806265 +195 46 loss.adversarial_temperature 0.6532845671269233 +195 46 optimizer.lr 0.03781996827000511 +195 46 negative_sampler.num_negs_per_pos 36.0 +195 46 training.batch_size 0.0 +195 47 model.embedding_dim 1.0 +195 47 loss.margin 22.71265586799487 +195 47 loss.adversarial_temperature 0.1392194934342711 +195 47 optimizer.lr 0.00828150215979453 +195 47 negative_sampler.num_negs_per_pos 74.0 +195 47 training.batch_size 2.0 +195 48 model.embedding_dim 1.0 +195 48 loss.margin 2.116038561254122 +195 48 loss.adversarial_temperature 0.38444989036831756 +195 48 optimizer.lr 0.07256623720792585 +195 48 negative_sampler.num_negs_per_pos 10.0 +195 48 training.batch_size 0.0 +195 49 model.embedding_dim 0.0 +195 49 loss.margin 23.361172158262367 +195 49 loss.adversarial_temperature 0.22258885333835754 +195 49 optimizer.lr 0.028791418215432067 +195 49 negative_sampler.num_negs_per_pos 34.0 +195 49 training.batch_size 2.0 +195 50 model.embedding_dim 0.0 +195 50 loss.margin 18.737503320975925 +195 50 loss.adversarial_temperature 0.4301472658723109 +195 50 optimizer.lr 0.004945008542928176 +195 50 negative_sampler.num_negs_per_pos 24.0 +195 50 training.batch_size 2.0 +195 51 model.embedding_dim 0.0 +195 51 loss.margin 8.589473801995982 +195 51 loss.adversarial_temperature 0.13933899777010286 +195 51 optimizer.lr 0.009832119854304103 +195 51 negative_sampler.num_negs_per_pos 21.0 +195 51 training.batch_size 1.0 +195 52 model.embedding_dim 2.0 +195 52 loss.margin 2.9015352858219283 +195 52 loss.adversarial_temperature 0.5739139479027395 +195 52 optimizer.lr 0.012899368649100303 +195 52 negative_sampler.num_negs_per_pos 92.0 +195 52 training.batch_size 2.0 +195 53 model.embedding_dim 2.0 +195 53 loss.margin 21.58431561008811 +195 53 loss.adversarial_temperature 0.6043771843971307 +195 53 optimizer.lr 0.001021359811633264 +195 53 negative_sampler.num_negs_per_pos 36.0 +195 53 training.batch_size 0.0 +195 54 model.embedding_dim 0.0 +195 54 loss.margin 23.830803425833302 +195 54 loss.adversarial_temperature 0.7332701581826206 +195 54 optimizer.lr 0.004603193004399122 +195 54 negative_sampler.num_negs_per_pos 23.0 +195 54 training.batch_size 0.0 +195 55 model.embedding_dim 2.0 +195 55 loss.margin 24.84459005227863 +195 55 loss.adversarial_temperature 0.6876067478516527 +195 55 optimizer.lr 0.0011879382795809102 +195 55 negative_sampler.num_negs_per_pos 5.0 +195 55 training.batch_size 2.0 +195 56 model.embedding_dim 0.0 +195 56 loss.margin 5.256644088313768 +195 56 loss.adversarial_temperature 0.9946377275010727 +195 56 optimizer.lr 0.020958850771131975 +195 56 negative_sampler.num_negs_per_pos 62.0 +195 56 training.batch_size 1.0 +195 57 model.embedding_dim 2.0 +195 57 loss.margin 3.2593855039707424 +195 57 loss.adversarial_temperature 0.26632516408394624 +195 57 optimizer.lr 0.0011715730779035779 +195 57 negative_sampler.num_negs_per_pos 66.0 +195 57 training.batch_size 1.0 +195 58 model.embedding_dim 1.0 +195 58 loss.margin 27.459519058366254 +195 58 loss.adversarial_temperature 0.1633492599547059 +195 58 optimizer.lr 0.0020064230396776186 +195 58 negative_sampler.num_negs_per_pos 1.0 +195 58 training.batch_size 1.0 +195 59 model.embedding_dim 0.0 +195 59 loss.margin 10.0415099504502 +195 59 loss.adversarial_temperature 0.19453263110434937 +195 59 optimizer.lr 0.0056844347003252 +195 59 negative_sampler.num_negs_per_pos 33.0 +195 59 training.batch_size 1.0 +195 60 model.embedding_dim 0.0 +195 60 loss.margin 9.459917782056728 +195 60 loss.adversarial_temperature 0.39382053631656166 +195 60 optimizer.lr 0.06682810797122068 +195 60 negative_sampler.num_negs_per_pos 4.0 +195 60 training.batch_size 1.0 +195 61 model.embedding_dim 2.0 +195 61 loss.margin 22.44380308564871 +195 61 loss.adversarial_temperature 0.10737685532727877 +195 61 optimizer.lr 0.07444829238760582 +195 61 negative_sampler.num_negs_per_pos 99.0 +195 61 training.batch_size 0.0 +195 62 model.embedding_dim 2.0 +195 62 loss.margin 22.781295851496214 +195 62 loss.adversarial_temperature 0.6640567114306061 +195 62 optimizer.lr 0.026411229859767633 +195 62 negative_sampler.num_negs_per_pos 29.0 +195 62 training.batch_size 1.0 +195 63 model.embedding_dim 1.0 +195 63 loss.margin 5.095819218803994 +195 63 loss.adversarial_temperature 0.8481931452593745 +195 63 optimizer.lr 0.00827614660309713 +195 63 negative_sampler.num_negs_per_pos 18.0 +195 63 training.batch_size 1.0 +195 64 model.embedding_dim 1.0 +195 64 loss.margin 10.950956673989635 +195 64 loss.adversarial_temperature 0.7118673100963855 +195 64 optimizer.lr 0.001915996497785007 +195 64 negative_sampler.num_negs_per_pos 85.0 +195 64 training.batch_size 2.0 +195 65 model.embedding_dim 1.0 +195 65 loss.margin 18.788003787657587 +195 65 loss.adversarial_temperature 0.35721900615398117 +195 65 optimizer.lr 0.006931074779067925 +195 65 negative_sampler.num_negs_per_pos 83.0 +195 65 training.batch_size 0.0 +195 66 model.embedding_dim 0.0 +195 66 loss.margin 20.34621296997762 +195 66 loss.adversarial_temperature 0.8032568448178562 +195 66 optimizer.lr 0.022382681741160773 +195 66 negative_sampler.num_negs_per_pos 31.0 +195 66 training.batch_size 2.0 +195 67 model.embedding_dim 1.0 +195 67 loss.margin 27.489048641989246 +195 67 loss.adversarial_temperature 0.5086096181610011 +195 67 optimizer.lr 0.013824132619769927 +195 67 negative_sampler.num_negs_per_pos 50.0 +195 67 training.batch_size 1.0 +195 68 model.embedding_dim 0.0 +195 68 loss.margin 6.232138393291285 +195 68 loss.adversarial_temperature 0.24153237053949334 +195 68 optimizer.lr 0.022394215728144998 +195 68 negative_sampler.num_negs_per_pos 79.0 +195 68 training.batch_size 1.0 +195 69 model.embedding_dim 2.0 +195 69 loss.margin 29.73989881066459 +195 69 loss.adversarial_temperature 0.3401619088868449 +195 69 optimizer.lr 0.04614174701443053 +195 69 negative_sampler.num_negs_per_pos 73.0 +195 69 training.batch_size 0.0 +195 70 model.embedding_dim 1.0 +195 70 loss.margin 9.570877349718822 +195 70 loss.adversarial_temperature 0.6773248586318251 +195 70 optimizer.lr 0.0038682254328296435 +195 70 negative_sampler.num_negs_per_pos 7.0 +195 70 training.batch_size 1.0 +195 71 model.embedding_dim 1.0 +195 71 loss.margin 12.22109280198769 +195 71 loss.adversarial_temperature 0.7814901212960849 +195 71 optimizer.lr 0.0013603015416634202 +195 71 negative_sampler.num_negs_per_pos 99.0 +195 71 training.batch_size 1.0 +195 72 model.embedding_dim 0.0 +195 72 loss.margin 29.044956820696385 +195 72 loss.adversarial_temperature 0.46653354262103264 +195 72 optimizer.lr 0.001577108780247923 +195 72 negative_sampler.num_negs_per_pos 32.0 +195 72 training.batch_size 1.0 +195 73 model.embedding_dim 1.0 +195 73 loss.margin 17.691343878257374 +195 73 loss.adversarial_temperature 0.6372622959757904 +195 73 optimizer.lr 0.029825450334851428 +195 73 negative_sampler.num_negs_per_pos 20.0 +195 73 training.batch_size 1.0 +195 74 model.embedding_dim 2.0 +195 74 loss.margin 28.073777806815972 +195 74 loss.adversarial_temperature 0.35093304299652084 +195 74 optimizer.lr 0.029658996855615082 +195 74 negative_sampler.num_negs_per_pos 10.0 +195 74 training.batch_size 2.0 +195 75 model.embedding_dim 1.0 +195 75 loss.margin 25.556955299458195 +195 75 loss.adversarial_temperature 0.2415282604807531 +195 75 optimizer.lr 0.001500778365253192 +195 75 negative_sampler.num_negs_per_pos 20.0 +195 75 training.batch_size 1.0 +195 76 model.embedding_dim 1.0 +195 76 loss.margin 4.200162459629576 +195 76 loss.adversarial_temperature 0.5393936981645401 +195 76 optimizer.lr 0.0030728677319149006 +195 76 negative_sampler.num_negs_per_pos 46.0 +195 76 training.batch_size 0.0 +195 77 model.embedding_dim 2.0 +195 77 loss.margin 1.934145208602796 +195 77 loss.adversarial_temperature 0.2014992502103819 +195 77 optimizer.lr 0.010380520296197031 +195 77 negative_sampler.num_negs_per_pos 80.0 +195 77 training.batch_size 1.0 +195 78 model.embedding_dim 0.0 +195 78 loss.margin 12.642393388876258 +195 78 loss.adversarial_temperature 0.20291472122506993 +195 78 optimizer.lr 0.033269779548568364 +195 78 negative_sampler.num_negs_per_pos 11.0 +195 78 training.batch_size 2.0 +195 79 model.embedding_dim 0.0 +195 79 loss.margin 5.545600452740847 +195 79 loss.adversarial_temperature 0.21322615548883567 +195 79 optimizer.lr 0.07881625774270623 +195 79 negative_sampler.num_negs_per_pos 60.0 +195 79 training.batch_size 0.0 +195 80 model.embedding_dim 0.0 +195 80 loss.margin 16.375847710731293 +195 80 loss.adversarial_temperature 0.17558560077429958 +195 80 optimizer.lr 0.07870181167536681 +195 80 negative_sampler.num_negs_per_pos 88.0 +195 80 training.batch_size 1.0 +195 81 model.embedding_dim 1.0 +195 81 loss.margin 26.083051156828052 +195 81 loss.adversarial_temperature 0.9513736479244177 +195 81 optimizer.lr 0.0016018783735991127 +195 81 negative_sampler.num_negs_per_pos 76.0 +195 81 training.batch_size 0.0 +195 82 model.embedding_dim 0.0 +195 82 loss.margin 2.325305663380412 +195 82 loss.adversarial_temperature 0.9721930412771141 +195 82 optimizer.lr 0.0012315090791077392 +195 82 negative_sampler.num_negs_per_pos 26.0 +195 82 training.batch_size 2.0 +195 83 model.embedding_dim 1.0 +195 83 loss.margin 5.261359587667802 +195 83 loss.adversarial_temperature 0.8024917888768244 +195 83 optimizer.lr 0.0011801658610212688 +195 83 negative_sampler.num_negs_per_pos 26.0 +195 83 training.batch_size 0.0 +195 84 model.embedding_dim 0.0 +195 84 loss.margin 8.063145135890084 +195 84 loss.adversarial_temperature 0.129418291200115 +195 84 optimizer.lr 0.0032042382853137256 +195 84 negative_sampler.num_negs_per_pos 79.0 +195 84 training.batch_size 2.0 +195 85 model.embedding_dim 0.0 +195 85 loss.margin 9.691243045616009 +195 85 loss.adversarial_temperature 0.5939275765528679 +195 85 optimizer.lr 0.009906446348095094 +195 85 negative_sampler.num_negs_per_pos 8.0 +195 85 training.batch_size 0.0 +195 86 model.embedding_dim 2.0 +195 86 loss.margin 6.02502602519499 +195 86 loss.adversarial_temperature 0.7358617841377414 +195 86 optimizer.lr 0.01053505281480236 +195 86 negative_sampler.num_negs_per_pos 4.0 +195 86 training.batch_size 1.0 +195 87 model.embedding_dim 0.0 +195 87 loss.margin 18.22811145200148 +195 87 loss.adversarial_temperature 0.5248489478276802 +195 87 optimizer.lr 0.0011413011264082124 +195 87 negative_sampler.num_negs_per_pos 50.0 +195 87 training.batch_size 0.0 +195 88 model.embedding_dim 0.0 +195 88 loss.margin 8.232750938989144 +195 88 loss.adversarial_temperature 0.4795931830919642 +195 88 optimizer.lr 0.006301713444222223 +195 88 negative_sampler.num_negs_per_pos 11.0 +195 88 training.batch_size 1.0 +195 89 model.embedding_dim 0.0 +195 89 loss.margin 15.62181967871458 +195 89 loss.adversarial_temperature 0.2754300853142174 +195 89 optimizer.lr 0.007328699770339374 +195 89 negative_sampler.num_negs_per_pos 32.0 +195 89 training.batch_size 2.0 +195 90 model.embedding_dim 0.0 +195 90 loss.margin 6.5190563283032255 +195 90 loss.adversarial_temperature 0.9603110461734161 +195 90 optimizer.lr 0.0022359789257001455 +195 90 negative_sampler.num_negs_per_pos 8.0 +195 90 training.batch_size 2.0 +195 91 model.embedding_dim 1.0 +195 91 loss.margin 24.00488271729412 +195 91 loss.adversarial_temperature 0.9889661149436335 +195 91 optimizer.lr 0.03709410613524943 +195 91 negative_sampler.num_negs_per_pos 55.0 +195 91 training.batch_size 2.0 +195 92 model.embedding_dim 2.0 +195 92 loss.margin 25.86687365334208 +195 92 loss.adversarial_temperature 0.8868893693254639 +195 92 optimizer.lr 0.006351275937756843 +195 92 negative_sampler.num_negs_per_pos 22.0 +195 92 training.batch_size 0.0 +195 93 model.embedding_dim 0.0 +195 93 loss.margin 9.922537110533776 +195 93 loss.adversarial_temperature 0.8835376184968822 +195 93 optimizer.lr 0.004512052378065541 +195 93 negative_sampler.num_negs_per_pos 81.0 +195 93 training.batch_size 1.0 +195 94 model.embedding_dim 0.0 +195 94 loss.margin 6.969272936390973 +195 94 loss.adversarial_temperature 0.753621777447906 +195 94 optimizer.lr 0.023774683778006372 +195 94 negative_sampler.num_negs_per_pos 79.0 +195 94 training.batch_size 1.0 +195 95 model.embedding_dim 1.0 +195 95 loss.margin 1.1859625548823094 +195 95 loss.adversarial_temperature 0.172242102772441 +195 95 optimizer.lr 0.06875385397030924 +195 95 negative_sampler.num_negs_per_pos 37.0 +195 95 training.batch_size 1.0 +195 96 model.embedding_dim 2.0 +195 96 loss.margin 28.837611541220305 +195 96 loss.adversarial_temperature 0.38524974500289766 +195 96 optimizer.lr 0.005564218111219338 +195 96 negative_sampler.num_negs_per_pos 16.0 +195 96 training.batch_size 2.0 +195 97 model.embedding_dim 1.0 +195 97 loss.margin 8.950308101417821 +195 97 loss.adversarial_temperature 0.7511015296489061 +195 97 optimizer.lr 0.031823135316817665 +195 97 negative_sampler.num_negs_per_pos 80.0 +195 97 training.batch_size 1.0 +195 98 model.embedding_dim 1.0 +195 98 loss.margin 4.645910261522339 +195 98 loss.adversarial_temperature 0.8846854553282899 +195 98 optimizer.lr 0.04305497212681429 +195 98 negative_sampler.num_negs_per_pos 35.0 +195 98 training.batch_size 2.0 +195 99 model.embedding_dim 1.0 +195 99 loss.margin 3.269683600420866 +195 99 loss.adversarial_temperature 0.33670392057798226 +195 99 optimizer.lr 0.008794815324440477 +195 99 negative_sampler.num_negs_per_pos 25.0 +195 99 training.batch_size 2.0 +195 100 model.embedding_dim 1.0 +195 100 loss.margin 20.489930131675614 +195 100 loss.adversarial_temperature 0.7665560844374553 +195 100 optimizer.lr 0.0021937855182241513 +195 100 negative_sampler.num_negs_per_pos 98.0 +195 100 training.batch_size 0.0 +195 1 dataset """kinships""" +195 1 model """distmult""" +195 1 loss """nssa""" +195 1 regularizer """no""" +195 1 optimizer """adam""" +195 1 training_loop """owa""" +195 1 negative_sampler """basic""" +195 1 evaluator """rankbased""" +195 2 dataset """kinships""" +195 2 model """distmult""" +195 2 loss """nssa""" +195 2 regularizer """no""" +195 2 optimizer """adam""" +195 2 training_loop """owa""" +195 2 negative_sampler """basic""" +195 2 evaluator """rankbased""" +195 3 dataset """kinships""" +195 3 model """distmult""" +195 3 loss """nssa""" +195 3 regularizer """no""" +195 3 optimizer """adam""" +195 3 training_loop """owa""" +195 3 negative_sampler """basic""" +195 3 evaluator """rankbased""" +195 4 dataset """kinships""" +195 4 model """distmult""" +195 4 loss """nssa""" +195 4 regularizer """no""" +195 4 optimizer """adam""" +195 4 training_loop """owa""" +195 4 negative_sampler """basic""" +195 4 evaluator """rankbased""" +195 5 dataset """kinships""" +195 5 model """distmult""" +195 5 loss """nssa""" +195 5 regularizer """no""" +195 5 optimizer """adam""" +195 5 training_loop """owa""" +195 5 negative_sampler """basic""" +195 5 evaluator """rankbased""" +195 6 dataset """kinships""" +195 6 model """distmult""" +195 6 loss """nssa""" +195 6 regularizer """no""" +195 6 optimizer """adam""" +195 6 training_loop """owa""" +195 6 negative_sampler """basic""" +195 6 evaluator """rankbased""" +195 7 dataset """kinships""" +195 7 model """distmult""" +195 7 loss """nssa""" +195 7 regularizer """no""" +195 7 optimizer """adam""" +195 7 training_loop """owa""" +195 7 negative_sampler """basic""" +195 7 evaluator """rankbased""" +195 8 dataset """kinships""" +195 8 model """distmult""" +195 8 loss """nssa""" +195 8 regularizer """no""" +195 8 optimizer """adam""" +195 8 training_loop """owa""" +195 8 negative_sampler """basic""" +195 8 evaluator """rankbased""" +195 9 dataset """kinships""" +195 9 model """distmult""" +195 9 loss """nssa""" +195 9 regularizer """no""" +195 9 optimizer """adam""" +195 9 training_loop """owa""" +195 9 negative_sampler """basic""" +195 9 evaluator """rankbased""" +195 10 dataset """kinships""" +195 10 model """distmult""" +195 10 loss """nssa""" +195 10 regularizer """no""" +195 10 optimizer """adam""" +195 10 training_loop """owa""" +195 10 negative_sampler """basic""" +195 10 evaluator """rankbased""" +195 11 dataset """kinships""" +195 11 model """distmult""" +195 11 loss """nssa""" +195 11 regularizer """no""" +195 11 optimizer """adam""" +195 11 training_loop """owa""" +195 11 negative_sampler """basic""" +195 11 evaluator """rankbased""" +195 12 dataset """kinships""" +195 12 model """distmult""" +195 12 loss """nssa""" +195 12 regularizer """no""" +195 12 optimizer """adam""" +195 12 training_loop """owa""" +195 12 negative_sampler """basic""" +195 12 evaluator """rankbased""" +195 13 dataset """kinships""" +195 13 model """distmult""" +195 13 loss """nssa""" +195 13 regularizer """no""" +195 13 optimizer """adam""" +195 13 training_loop """owa""" +195 13 negative_sampler """basic""" +195 13 evaluator """rankbased""" +195 14 dataset """kinships""" +195 14 model """distmult""" +195 14 loss """nssa""" +195 14 regularizer """no""" +195 14 optimizer """adam""" +195 14 training_loop """owa""" +195 14 negative_sampler """basic""" +195 14 evaluator """rankbased""" +195 15 dataset """kinships""" +195 15 model """distmult""" +195 15 loss """nssa""" +195 15 regularizer """no""" +195 15 optimizer """adam""" +195 15 training_loop """owa""" +195 15 negative_sampler """basic""" +195 15 evaluator """rankbased""" +195 16 dataset """kinships""" +195 16 model """distmult""" +195 16 loss """nssa""" +195 16 regularizer """no""" +195 16 optimizer """adam""" +195 16 training_loop """owa""" +195 16 negative_sampler """basic""" +195 16 evaluator """rankbased""" +195 17 dataset """kinships""" +195 17 model """distmult""" +195 17 loss """nssa""" +195 17 regularizer """no""" +195 17 optimizer """adam""" +195 17 training_loop """owa""" +195 17 negative_sampler """basic""" +195 17 evaluator """rankbased""" +195 18 dataset """kinships""" +195 18 model """distmult""" +195 18 loss """nssa""" +195 18 regularizer """no""" +195 18 optimizer """adam""" +195 18 training_loop """owa""" +195 18 negative_sampler """basic""" +195 18 evaluator """rankbased""" +195 19 dataset """kinships""" +195 19 model """distmult""" +195 19 loss """nssa""" +195 19 regularizer """no""" +195 19 optimizer """adam""" +195 19 training_loop """owa""" +195 19 negative_sampler """basic""" +195 19 evaluator """rankbased""" +195 20 dataset """kinships""" +195 20 model """distmult""" +195 20 loss """nssa""" +195 20 regularizer """no""" +195 20 optimizer """adam""" +195 20 training_loop """owa""" +195 20 negative_sampler """basic""" +195 20 evaluator """rankbased""" +195 21 dataset """kinships""" +195 21 model """distmult""" +195 21 loss """nssa""" +195 21 regularizer """no""" +195 21 optimizer """adam""" +195 21 training_loop """owa""" +195 21 negative_sampler """basic""" +195 21 evaluator """rankbased""" +195 22 dataset """kinships""" +195 22 model """distmult""" +195 22 loss """nssa""" +195 22 regularizer """no""" +195 22 optimizer """adam""" +195 22 training_loop """owa""" +195 22 negative_sampler """basic""" +195 22 evaluator """rankbased""" +195 23 dataset """kinships""" +195 23 model """distmult""" +195 23 loss """nssa""" +195 23 regularizer """no""" +195 23 optimizer """adam""" +195 23 training_loop """owa""" +195 23 negative_sampler """basic""" +195 23 evaluator """rankbased""" +195 24 dataset """kinships""" +195 24 model """distmult""" +195 24 loss """nssa""" +195 24 regularizer """no""" +195 24 optimizer """adam""" +195 24 training_loop """owa""" +195 24 negative_sampler """basic""" +195 24 evaluator """rankbased""" +195 25 dataset """kinships""" +195 25 model """distmult""" +195 25 loss """nssa""" +195 25 regularizer """no""" +195 25 optimizer """adam""" +195 25 training_loop """owa""" +195 25 negative_sampler """basic""" +195 25 evaluator """rankbased""" +195 26 dataset """kinships""" +195 26 model """distmult""" +195 26 loss """nssa""" +195 26 regularizer """no""" +195 26 optimizer """adam""" +195 26 training_loop """owa""" +195 26 negative_sampler """basic""" +195 26 evaluator """rankbased""" +195 27 dataset """kinships""" +195 27 model """distmult""" +195 27 loss """nssa""" +195 27 regularizer """no""" +195 27 optimizer """adam""" +195 27 training_loop """owa""" +195 27 negative_sampler """basic""" +195 27 evaluator """rankbased""" +195 28 dataset """kinships""" +195 28 model """distmult""" +195 28 loss """nssa""" +195 28 regularizer """no""" +195 28 optimizer """adam""" +195 28 training_loop """owa""" +195 28 negative_sampler """basic""" +195 28 evaluator """rankbased""" +195 29 dataset """kinships""" +195 29 model """distmult""" +195 29 loss """nssa""" +195 29 regularizer """no""" +195 29 optimizer """adam""" +195 29 training_loop """owa""" +195 29 negative_sampler """basic""" +195 29 evaluator """rankbased""" +195 30 dataset """kinships""" +195 30 model """distmult""" +195 30 loss """nssa""" +195 30 regularizer """no""" +195 30 optimizer """adam""" +195 30 training_loop """owa""" +195 30 negative_sampler """basic""" +195 30 evaluator """rankbased""" +195 31 dataset """kinships""" +195 31 model """distmult""" +195 31 loss """nssa""" +195 31 regularizer """no""" +195 31 optimizer """adam""" +195 31 training_loop """owa""" +195 31 negative_sampler """basic""" +195 31 evaluator """rankbased""" +195 32 dataset """kinships""" +195 32 model """distmult""" +195 32 loss """nssa""" +195 32 regularizer """no""" +195 32 optimizer """adam""" +195 32 training_loop """owa""" +195 32 negative_sampler """basic""" +195 32 evaluator """rankbased""" +195 33 dataset """kinships""" +195 33 model """distmult""" +195 33 loss """nssa""" +195 33 regularizer """no""" +195 33 optimizer """adam""" +195 33 training_loop """owa""" +195 33 negative_sampler """basic""" +195 33 evaluator """rankbased""" +195 34 dataset """kinships""" +195 34 model """distmult""" +195 34 loss """nssa""" +195 34 regularizer """no""" +195 34 optimizer """adam""" +195 34 training_loop """owa""" +195 34 negative_sampler """basic""" +195 34 evaluator """rankbased""" +195 35 dataset """kinships""" +195 35 model """distmult""" +195 35 loss """nssa""" +195 35 regularizer """no""" +195 35 optimizer """adam""" +195 35 training_loop """owa""" +195 35 negative_sampler """basic""" +195 35 evaluator """rankbased""" +195 36 dataset """kinships""" +195 36 model """distmult""" +195 36 loss """nssa""" +195 36 regularizer """no""" +195 36 optimizer """adam""" +195 36 training_loop """owa""" +195 36 negative_sampler """basic""" +195 36 evaluator """rankbased""" +195 37 dataset """kinships""" +195 37 model """distmult""" +195 37 loss """nssa""" +195 37 regularizer """no""" +195 37 optimizer """adam""" +195 37 training_loop """owa""" +195 37 negative_sampler """basic""" +195 37 evaluator """rankbased""" +195 38 dataset """kinships""" +195 38 model """distmult""" +195 38 loss """nssa""" +195 38 regularizer """no""" +195 38 optimizer """adam""" +195 38 training_loop """owa""" +195 38 negative_sampler """basic""" +195 38 evaluator """rankbased""" +195 39 dataset """kinships""" +195 39 model """distmult""" +195 39 loss """nssa""" +195 39 regularizer """no""" +195 39 optimizer """adam""" +195 39 training_loop """owa""" +195 39 negative_sampler """basic""" +195 39 evaluator """rankbased""" +195 40 dataset """kinships""" +195 40 model """distmult""" +195 40 loss """nssa""" +195 40 regularizer """no""" +195 40 optimizer """adam""" +195 40 training_loop """owa""" +195 40 negative_sampler """basic""" +195 40 evaluator """rankbased""" +195 41 dataset """kinships""" +195 41 model """distmult""" +195 41 loss """nssa""" +195 41 regularizer """no""" +195 41 optimizer """adam""" +195 41 training_loop """owa""" +195 41 negative_sampler """basic""" +195 41 evaluator """rankbased""" +195 42 dataset """kinships""" +195 42 model """distmult""" +195 42 loss """nssa""" +195 42 regularizer """no""" +195 42 optimizer """adam""" +195 42 training_loop """owa""" +195 42 negative_sampler """basic""" +195 42 evaluator """rankbased""" +195 43 dataset """kinships""" +195 43 model """distmult""" +195 43 loss """nssa""" +195 43 regularizer """no""" +195 43 optimizer """adam""" +195 43 training_loop """owa""" +195 43 negative_sampler """basic""" +195 43 evaluator """rankbased""" +195 44 dataset """kinships""" +195 44 model """distmult""" +195 44 loss """nssa""" +195 44 regularizer """no""" +195 44 optimizer """adam""" +195 44 training_loop """owa""" +195 44 negative_sampler """basic""" +195 44 evaluator """rankbased""" +195 45 dataset """kinships""" +195 45 model """distmult""" +195 45 loss """nssa""" +195 45 regularizer """no""" +195 45 optimizer """adam""" +195 45 training_loop """owa""" +195 45 negative_sampler """basic""" +195 45 evaluator """rankbased""" +195 46 dataset """kinships""" +195 46 model """distmult""" +195 46 loss """nssa""" +195 46 regularizer """no""" +195 46 optimizer """adam""" +195 46 training_loop """owa""" +195 46 negative_sampler """basic""" +195 46 evaluator """rankbased""" +195 47 dataset """kinships""" +195 47 model """distmult""" +195 47 loss """nssa""" +195 47 regularizer """no""" +195 47 optimizer """adam""" +195 47 training_loop """owa""" +195 47 negative_sampler """basic""" +195 47 evaluator """rankbased""" +195 48 dataset """kinships""" +195 48 model """distmult""" +195 48 loss """nssa""" +195 48 regularizer """no""" +195 48 optimizer """adam""" +195 48 training_loop """owa""" +195 48 negative_sampler """basic""" +195 48 evaluator """rankbased""" +195 49 dataset """kinships""" +195 49 model """distmult""" +195 49 loss """nssa""" +195 49 regularizer """no""" +195 49 optimizer """adam""" +195 49 training_loop """owa""" +195 49 negative_sampler """basic""" +195 49 evaluator """rankbased""" +195 50 dataset """kinships""" +195 50 model """distmult""" +195 50 loss """nssa""" +195 50 regularizer """no""" +195 50 optimizer """adam""" +195 50 training_loop """owa""" +195 50 negative_sampler """basic""" +195 50 evaluator """rankbased""" +195 51 dataset """kinships""" +195 51 model """distmult""" +195 51 loss """nssa""" +195 51 regularizer """no""" +195 51 optimizer """adam""" +195 51 training_loop """owa""" +195 51 negative_sampler """basic""" +195 51 evaluator """rankbased""" +195 52 dataset """kinships""" +195 52 model """distmult""" +195 52 loss """nssa""" +195 52 regularizer """no""" +195 52 optimizer """adam""" +195 52 training_loop """owa""" +195 52 negative_sampler """basic""" +195 52 evaluator """rankbased""" +195 53 dataset """kinships""" +195 53 model """distmult""" +195 53 loss """nssa""" +195 53 regularizer """no""" +195 53 optimizer """adam""" +195 53 training_loop """owa""" +195 53 negative_sampler """basic""" +195 53 evaluator """rankbased""" +195 54 dataset """kinships""" +195 54 model """distmult""" +195 54 loss """nssa""" +195 54 regularizer """no""" +195 54 optimizer """adam""" +195 54 training_loop """owa""" +195 54 negative_sampler """basic""" +195 54 evaluator """rankbased""" +195 55 dataset """kinships""" +195 55 model """distmult""" +195 55 loss """nssa""" +195 55 regularizer """no""" +195 55 optimizer """adam""" +195 55 training_loop """owa""" +195 55 negative_sampler """basic""" +195 55 evaluator """rankbased""" +195 56 dataset """kinships""" +195 56 model """distmult""" +195 56 loss """nssa""" +195 56 regularizer """no""" +195 56 optimizer """adam""" +195 56 training_loop """owa""" +195 56 negative_sampler """basic""" +195 56 evaluator """rankbased""" +195 57 dataset """kinships""" +195 57 model """distmult""" +195 57 loss """nssa""" +195 57 regularizer """no""" +195 57 optimizer """adam""" +195 57 training_loop """owa""" +195 57 negative_sampler """basic""" +195 57 evaluator """rankbased""" +195 58 dataset """kinships""" +195 58 model """distmult""" +195 58 loss """nssa""" +195 58 regularizer """no""" +195 58 optimizer """adam""" +195 58 training_loop """owa""" +195 58 negative_sampler """basic""" +195 58 evaluator """rankbased""" +195 59 dataset """kinships""" +195 59 model """distmult""" +195 59 loss """nssa""" +195 59 regularizer """no""" +195 59 optimizer """adam""" +195 59 training_loop """owa""" +195 59 negative_sampler """basic""" +195 59 evaluator """rankbased""" +195 60 dataset """kinships""" +195 60 model """distmult""" +195 60 loss """nssa""" +195 60 regularizer """no""" +195 60 optimizer """adam""" +195 60 training_loop """owa""" +195 60 negative_sampler """basic""" +195 60 evaluator """rankbased""" +195 61 dataset """kinships""" +195 61 model """distmult""" +195 61 loss """nssa""" +195 61 regularizer """no""" +195 61 optimizer """adam""" +195 61 training_loop """owa""" +195 61 negative_sampler """basic""" +195 61 evaluator """rankbased""" +195 62 dataset """kinships""" +195 62 model """distmult""" +195 62 loss """nssa""" +195 62 regularizer """no""" +195 62 optimizer """adam""" +195 62 training_loop """owa""" +195 62 negative_sampler """basic""" +195 62 evaluator """rankbased""" +195 63 dataset """kinships""" +195 63 model """distmult""" +195 63 loss """nssa""" +195 63 regularizer """no""" +195 63 optimizer """adam""" +195 63 training_loop """owa""" +195 63 negative_sampler """basic""" +195 63 evaluator """rankbased""" +195 64 dataset """kinships""" +195 64 model """distmult""" +195 64 loss """nssa""" +195 64 regularizer """no""" +195 64 optimizer """adam""" +195 64 training_loop """owa""" +195 64 negative_sampler """basic""" +195 64 evaluator """rankbased""" +195 65 dataset """kinships""" +195 65 model """distmult""" +195 65 loss """nssa""" +195 65 regularizer """no""" +195 65 optimizer """adam""" +195 65 training_loop """owa""" +195 65 negative_sampler """basic""" +195 65 evaluator """rankbased""" +195 66 dataset """kinships""" +195 66 model """distmult""" +195 66 loss """nssa""" +195 66 regularizer """no""" +195 66 optimizer """adam""" +195 66 training_loop """owa""" +195 66 negative_sampler """basic""" +195 66 evaluator """rankbased""" +195 67 dataset """kinships""" +195 67 model """distmult""" +195 67 loss """nssa""" +195 67 regularizer """no""" +195 67 optimizer """adam""" +195 67 training_loop """owa""" +195 67 negative_sampler """basic""" +195 67 evaluator """rankbased""" +195 68 dataset """kinships""" +195 68 model """distmult""" +195 68 loss """nssa""" +195 68 regularizer """no""" +195 68 optimizer """adam""" +195 68 training_loop """owa""" +195 68 negative_sampler """basic""" +195 68 evaluator """rankbased""" +195 69 dataset """kinships""" +195 69 model """distmult""" +195 69 loss """nssa""" +195 69 regularizer """no""" +195 69 optimizer """adam""" +195 69 training_loop """owa""" +195 69 negative_sampler """basic""" +195 69 evaluator """rankbased""" +195 70 dataset """kinships""" +195 70 model """distmult""" +195 70 loss """nssa""" +195 70 regularizer """no""" +195 70 optimizer """adam""" +195 70 training_loop """owa""" +195 70 negative_sampler """basic""" +195 70 evaluator """rankbased""" +195 71 dataset """kinships""" +195 71 model """distmult""" +195 71 loss """nssa""" +195 71 regularizer """no""" +195 71 optimizer """adam""" +195 71 training_loop """owa""" +195 71 negative_sampler """basic""" +195 71 evaluator """rankbased""" +195 72 dataset """kinships""" +195 72 model """distmult""" +195 72 loss """nssa""" +195 72 regularizer """no""" +195 72 optimizer """adam""" +195 72 training_loop """owa""" +195 72 negative_sampler """basic""" +195 72 evaluator """rankbased""" +195 73 dataset """kinships""" +195 73 model """distmult""" +195 73 loss """nssa""" +195 73 regularizer """no""" +195 73 optimizer """adam""" +195 73 training_loop """owa""" +195 73 negative_sampler """basic""" +195 73 evaluator """rankbased""" +195 74 dataset """kinships""" +195 74 model """distmult""" +195 74 loss """nssa""" +195 74 regularizer """no""" +195 74 optimizer """adam""" +195 74 training_loop """owa""" +195 74 negative_sampler """basic""" +195 74 evaluator """rankbased""" +195 75 dataset """kinships""" +195 75 model """distmult""" +195 75 loss """nssa""" +195 75 regularizer """no""" +195 75 optimizer """adam""" +195 75 training_loop """owa""" +195 75 negative_sampler """basic""" +195 75 evaluator """rankbased""" +195 76 dataset """kinships""" +195 76 model """distmult""" +195 76 loss """nssa""" +195 76 regularizer """no""" +195 76 optimizer """adam""" +195 76 training_loop """owa""" +195 76 negative_sampler """basic""" +195 76 evaluator """rankbased""" +195 77 dataset """kinships""" +195 77 model """distmult""" +195 77 loss """nssa""" +195 77 regularizer """no""" +195 77 optimizer """adam""" +195 77 training_loop """owa""" +195 77 negative_sampler """basic""" +195 77 evaluator """rankbased""" +195 78 dataset """kinships""" +195 78 model """distmult""" +195 78 loss """nssa""" +195 78 regularizer """no""" +195 78 optimizer """adam""" +195 78 training_loop """owa""" +195 78 negative_sampler """basic""" +195 78 evaluator """rankbased""" +195 79 dataset """kinships""" +195 79 model """distmult""" +195 79 loss """nssa""" +195 79 regularizer """no""" +195 79 optimizer """adam""" +195 79 training_loop """owa""" +195 79 negative_sampler """basic""" +195 79 evaluator """rankbased""" +195 80 dataset """kinships""" +195 80 model """distmult""" +195 80 loss """nssa""" +195 80 regularizer """no""" +195 80 optimizer """adam""" +195 80 training_loop """owa""" +195 80 negative_sampler """basic""" +195 80 evaluator """rankbased""" +195 81 dataset """kinships""" +195 81 model """distmult""" +195 81 loss """nssa""" +195 81 regularizer """no""" +195 81 optimizer """adam""" +195 81 training_loop """owa""" +195 81 negative_sampler """basic""" +195 81 evaluator """rankbased""" +195 82 dataset """kinships""" +195 82 model """distmult""" +195 82 loss """nssa""" +195 82 regularizer """no""" +195 82 optimizer """adam""" +195 82 training_loop """owa""" +195 82 negative_sampler """basic""" +195 82 evaluator """rankbased""" +195 83 dataset """kinships""" +195 83 model """distmult""" +195 83 loss """nssa""" +195 83 regularizer """no""" +195 83 optimizer """adam""" +195 83 training_loop """owa""" +195 83 negative_sampler """basic""" +195 83 evaluator """rankbased""" +195 84 dataset """kinships""" +195 84 model """distmult""" +195 84 loss """nssa""" +195 84 regularizer """no""" +195 84 optimizer """adam""" +195 84 training_loop """owa""" +195 84 negative_sampler """basic""" +195 84 evaluator """rankbased""" +195 85 dataset """kinships""" +195 85 model """distmult""" +195 85 loss """nssa""" +195 85 regularizer """no""" +195 85 optimizer """adam""" +195 85 training_loop """owa""" +195 85 negative_sampler """basic""" +195 85 evaluator """rankbased""" +195 86 dataset """kinships""" +195 86 model """distmult""" +195 86 loss """nssa""" +195 86 regularizer """no""" +195 86 optimizer """adam""" +195 86 training_loop """owa""" +195 86 negative_sampler """basic""" +195 86 evaluator """rankbased""" +195 87 dataset """kinships""" +195 87 model """distmult""" +195 87 loss """nssa""" +195 87 regularizer """no""" +195 87 optimizer """adam""" +195 87 training_loop """owa""" +195 87 negative_sampler """basic""" +195 87 evaluator """rankbased""" +195 88 dataset """kinships""" +195 88 model """distmult""" +195 88 loss """nssa""" +195 88 regularizer """no""" +195 88 optimizer """adam""" +195 88 training_loop """owa""" +195 88 negative_sampler """basic""" +195 88 evaluator """rankbased""" +195 89 dataset """kinships""" +195 89 model """distmult""" +195 89 loss """nssa""" +195 89 regularizer """no""" +195 89 optimizer """adam""" +195 89 training_loop """owa""" +195 89 negative_sampler """basic""" +195 89 evaluator """rankbased""" +195 90 dataset """kinships""" +195 90 model """distmult""" +195 90 loss """nssa""" +195 90 regularizer """no""" +195 90 optimizer """adam""" +195 90 training_loop """owa""" +195 90 negative_sampler """basic""" +195 90 evaluator """rankbased""" +195 91 dataset """kinships""" +195 91 model """distmult""" +195 91 loss """nssa""" +195 91 regularizer """no""" +195 91 optimizer """adam""" +195 91 training_loop """owa""" +195 91 negative_sampler """basic""" +195 91 evaluator """rankbased""" +195 92 dataset """kinships""" +195 92 model """distmult""" +195 92 loss """nssa""" +195 92 regularizer """no""" +195 92 optimizer """adam""" +195 92 training_loop """owa""" +195 92 negative_sampler """basic""" +195 92 evaluator """rankbased""" +195 93 dataset """kinships""" +195 93 model """distmult""" +195 93 loss """nssa""" +195 93 regularizer """no""" +195 93 optimizer """adam""" +195 93 training_loop """owa""" +195 93 negative_sampler """basic""" +195 93 evaluator """rankbased""" +195 94 dataset """kinships""" +195 94 model """distmult""" +195 94 loss """nssa""" +195 94 regularizer """no""" +195 94 optimizer """adam""" +195 94 training_loop """owa""" +195 94 negative_sampler """basic""" +195 94 evaluator """rankbased""" +195 95 dataset """kinships""" +195 95 model """distmult""" +195 95 loss """nssa""" +195 95 regularizer """no""" +195 95 optimizer """adam""" +195 95 training_loop """owa""" +195 95 negative_sampler """basic""" +195 95 evaluator """rankbased""" +195 96 dataset """kinships""" +195 96 model """distmult""" +195 96 loss """nssa""" +195 96 regularizer """no""" +195 96 optimizer """adam""" +195 96 training_loop """owa""" +195 96 negative_sampler """basic""" +195 96 evaluator """rankbased""" +195 97 dataset """kinships""" +195 97 model """distmult""" +195 97 loss """nssa""" +195 97 regularizer """no""" +195 97 optimizer """adam""" +195 97 training_loop """owa""" +195 97 negative_sampler """basic""" +195 97 evaluator """rankbased""" +195 98 dataset """kinships""" +195 98 model """distmult""" +195 98 loss """nssa""" +195 98 regularizer """no""" +195 98 optimizer """adam""" +195 98 training_loop """owa""" +195 98 negative_sampler """basic""" +195 98 evaluator """rankbased""" +195 99 dataset """kinships""" +195 99 model """distmult""" +195 99 loss """nssa""" +195 99 regularizer """no""" +195 99 optimizer """adam""" +195 99 training_loop """owa""" +195 99 negative_sampler """basic""" +195 99 evaluator """rankbased""" +195 100 dataset """kinships""" +195 100 model """distmult""" +195 100 loss """nssa""" +195 100 regularizer """no""" +195 100 optimizer """adam""" +195 100 training_loop """owa""" +195 100 negative_sampler """basic""" +195 100 evaluator """rankbased""" +196 1 model.embedding_dim 2.0 +196 1 optimizer.lr 0.0025318538523266443 +196 1 negative_sampler.num_negs_per_pos 92.0 +196 1 training.batch_size 0.0 +196 2 model.embedding_dim 1.0 +196 2 optimizer.lr 0.00749311361815954 +196 2 negative_sampler.num_negs_per_pos 38.0 +196 2 training.batch_size 2.0 +196 3 model.embedding_dim 2.0 +196 3 optimizer.lr 0.06855572204192403 +196 3 negative_sampler.num_negs_per_pos 23.0 +196 3 training.batch_size 2.0 +196 4 model.embedding_dim 2.0 +196 4 optimizer.lr 0.002624650398316379 +196 4 negative_sampler.num_negs_per_pos 69.0 +196 4 training.batch_size 1.0 +196 5 model.embedding_dim 2.0 +196 5 optimizer.lr 0.0026197688684281534 +196 5 negative_sampler.num_negs_per_pos 58.0 +196 5 training.batch_size 2.0 +196 6 model.embedding_dim 1.0 +196 6 optimizer.lr 0.07100346906493993 +196 6 negative_sampler.num_negs_per_pos 38.0 +196 6 training.batch_size 0.0 +196 7 model.embedding_dim 0.0 +196 7 optimizer.lr 0.013289739451586626 +196 7 negative_sampler.num_negs_per_pos 10.0 +196 7 training.batch_size 0.0 +196 8 model.embedding_dim 0.0 +196 8 optimizer.lr 0.03523088695941533 +196 8 negative_sampler.num_negs_per_pos 28.0 +196 8 training.batch_size 2.0 +196 9 model.embedding_dim 0.0 +196 9 optimizer.lr 0.03649355027793704 +196 9 negative_sampler.num_negs_per_pos 31.0 +196 9 training.batch_size 2.0 +196 10 model.embedding_dim 2.0 +196 10 optimizer.lr 0.00628461733436692 +196 10 negative_sampler.num_negs_per_pos 58.0 +196 10 training.batch_size 1.0 +196 11 model.embedding_dim 0.0 +196 11 optimizer.lr 0.04020596911743737 +196 11 negative_sampler.num_negs_per_pos 34.0 +196 11 training.batch_size 0.0 +196 12 model.embedding_dim 2.0 +196 12 optimizer.lr 0.03692023866716314 +196 12 negative_sampler.num_negs_per_pos 97.0 +196 12 training.batch_size 2.0 +196 13 model.embedding_dim 2.0 +196 13 optimizer.lr 0.0029682049978234774 +196 13 negative_sampler.num_negs_per_pos 72.0 +196 13 training.batch_size 1.0 +196 14 model.embedding_dim 2.0 +196 14 optimizer.lr 0.02019280171598071 +196 14 negative_sampler.num_negs_per_pos 17.0 +196 14 training.batch_size 0.0 +196 15 model.embedding_dim 0.0 +196 15 optimizer.lr 0.0040491238547460204 +196 15 negative_sampler.num_negs_per_pos 14.0 +196 15 training.batch_size 1.0 +196 16 model.embedding_dim 0.0 +196 16 optimizer.lr 0.001485602953720375 +196 16 negative_sampler.num_negs_per_pos 11.0 +196 16 training.batch_size 1.0 +196 17 model.embedding_dim 2.0 +196 17 optimizer.lr 0.002942908783431497 +196 17 negative_sampler.num_negs_per_pos 14.0 +196 17 training.batch_size 2.0 +196 18 model.embedding_dim 1.0 +196 18 optimizer.lr 0.03367298334519174 +196 18 negative_sampler.num_negs_per_pos 24.0 +196 18 training.batch_size 1.0 +196 19 model.embedding_dim 1.0 +196 19 optimizer.lr 0.0013099384058185412 +196 19 negative_sampler.num_negs_per_pos 83.0 +196 19 training.batch_size 0.0 +196 20 model.embedding_dim 1.0 +196 20 optimizer.lr 0.04322565029646051 +196 20 negative_sampler.num_negs_per_pos 8.0 +196 20 training.batch_size 2.0 +196 21 model.embedding_dim 1.0 +196 21 optimizer.lr 0.0020114602702547314 +196 21 negative_sampler.num_negs_per_pos 57.0 +196 21 training.batch_size 1.0 +196 22 model.embedding_dim 2.0 +196 22 optimizer.lr 0.09261033714939196 +196 22 negative_sampler.num_negs_per_pos 70.0 +196 22 training.batch_size 2.0 +196 23 model.embedding_dim 1.0 +196 23 optimizer.lr 0.01893847487059479 +196 23 negative_sampler.num_negs_per_pos 56.0 +196 23 training.batch_size 1.0 +196 24 model.embedding_dim 0.0 +196 24 optimizer.lr 0.0033946502854799686 +196 24 negative_sampler.num_negs_per_pos 85.0 +196 24 training.batch_size 1.0 +196 25 model.embedding_dim 0.0 +196 25 optimizer.lr 0.06418167776588775 +196 25 negative_sampler.num_negs_per_pos 40.0 +196 25 training.batch_size 2.0 +196 26 model.embedding_dim 1.0 +196 26 optimizer.lr 0.003916547024843775 +196 26 negative_sampler.num_negs_per_pos 47.0 +196 26 training.batch_size 0.0 +196 27 model.embedding_dim 1.0 +196 27 optimizer.lr 0.04161961821418776 +196 27 negative_sampler.num_negs_per_pos 36.0 +196 27 training.batch_size 1.0 +196 28 model.embedding_dim 0.0 +196 28 optimizer.lr 0.001341964475391954 +196 28 negative_sampler.num_negs_per_pos 21.0 +196 28 training.batch_size 1.0 +196 29 model.embedding_dim 1.0 +196 29 optimizer.lr 0.0010335602433631948 +196 29 negative_sampler.num_negs_per_pos 51.0 +196 29 training.batch_size 0.0 +196 30 model.embedding_dim 1.0 +196 30 optimizer.lr 0.001081124303746358 +196 30 negative_sampler.num_negs_per_pos 71.0 +196 30 training.batch_size 2.0 +196 31 model.embedding_dim 0.0 +196 31 optimizer.lr 0.001584811464621085 +196 31 negative_sampler.num_negs_per_pos 80.0 +196 31 training.batch_size 0.0 +196 32 model.embedding_dim 2.0 +196 32 optimizer.lr 0.034667806349894977 +196 32 negative_sampler.num_negs_per_pos 65.0 +196 32 training.batch_size 2.0 +196 33 model.embedding_dim 1.0 +196 33 optimizer.lr 0.06686799339181118 +196 33 negative_sampler.num_negs_per_pos 81.0 +196 33 training.batch_size 1.0 +196 34 model.embedding_dim 2.0 +196 34 optimizer.lr 0.06530144909402551 +196 34 negative_sampler.num_negs_per_pos 12.0 +196 34 training.batch_size 1.0 +196 35 model.embedding_dim 0.0 +196 35 optimizer.lr 0.01732651943285605 +196 35 negative_sampler.num_negs_per_pos 76.0 +196 35 training.batch_size 2.0 +196 36 model.embedding_dim 2.0 +196 36 optimizer.lr 0.05602052987958223 +196 36 negative_sampler.num_negs_per_pos 69.0 +196 36 training.batch_size 1.0 +196 37 model.embedding_dim 1.0 +196 37 optimizer.lr 0.054704077471732754 +196 37 negative_sampler.num_negs_per_pos 26.0 +196 37 training.batch_size 1.0 +196 38 model.embedding_dim 2.0 +196 38 optimizer.lr 0.046001971916854886 +196 38 negative_sampler.num_negs_per_pos 48.0 +196 38 training.batch_size 1.0 +196 39 model.embedding_dim 0.0 +196 39 optimizer.lr 0.008689396431263248 +196 39 negative_sampler.num_negs_per_pos 25.0 +196 39 training.batch_size 2.0 +196 40 model.embedding_dim 1.0 +196 40 optimizer.lr 0.0011095595326855137 +196 40 negative_sampler.num_negs_per_pos 0.0 +196 40 training.batch_size 2.0 +196 41 model.embedding_dim 2.0 +196 41 optimizer.lr 0.0013473062075858872 +196 41 negative_sampler.num_negs_per_pos 23.0 +196 41 training.batch_size 1.0 +196 42 model.embedding_dim 1.0 +196 42 optimizer.lr 0.011931692680594038 +196 42 negative_sampler.num_negs_per_pos 10.0 +196 42 training.batch_size 2.0 +196 43 model.embedding_dim 0.0 +196 43 optimizer.lr 0.053116992371907444 +196 43 negative_sampler.num_negs_per_pos 17.0 +196 43 training.batch_size 0.0 +196 44 model.embedding_dim 1.0 +196 44 optimizer.lr 0.04801798326089748 +196 44 negative_sampler.num_negs_per_pos 57.0 +196 44 training.batch_size 2.0 +196 45 model.embedding_dim 2.0 +196 45 optimizer.lr 0.004551388792876806 +196 45 negative_sampler.num_negs_per_pos 32.0 +196 45 training.batch_size 0.0 +196 46 model.embedding_dim 0.0 +196 46 optimizer.lr 0.02894464392469257 +196 46 negative_sampler.num_negs_per_pos 78.0 +196 46 training.batch_size 1.0 +196 47 model.embedding_dim 2.0 +196 47 optimizer.lr 0.001488856327260472 +196 47 negative_sampler.num_negs_per_pos 67.0 +196 47 training.batch_size 2.0 +196 48 model.embedding_dim 1.0 +196 48 optimizer.lr 0.03349047101551467 +196 48 negative_sampler.num_negs_per_pos 26.0 +196 48 training.batch_size 2.0 +196 49 model.embedding_dim 0.0 +196 49 optimizer.lr 0.002275321281156689 +196 49 negative_sampler.num_negs_per_pos 69.0 +196 49 training.batch_size 2.0 +196 50 model.embedding_dim 1.0 +196 50 optimizer.lr 0.007798082303841597 +196 50 negative_sampler.num_negs_per_pos 40.0 +196 50 training.batch_size 2.0 +196 51 model.embedding_dim 1.0 +196 51 optimizer.lr 0.0027916146738206746 +196 51 negative_sampler.num_negs_per_pos 0.0 +196 51 training.batch_size 2.0 +196 52 model.embedding_dim 1.0 +196 52 optimizer.lr 0.014052896467902455 +196 52 negative_sampler.num_negs_per_pos 24.0 +196 52 training.batch_size 1.0 +196 53 model.embedding_dim 2.0 +196 53 optimizer.lr 0.0062628847568936994 +196 53 negative_sampler.num_negs_per_pos 19.0 +196 53 training.batch_size 2.0 +196 54 model.embedding_dim 0.0 +196 54 optimizer.lr 0.035542687066050446 +196 54 negative_sampler.num_negs_per_pos 67.0 +196 54 training.batch_size 2.0 +196 55 model.embedding_dim 1.0 +196 55 optimizer.lr 0.0043508167869889675 +196 55 negative_sampler.num_negs_per_pos 69.0 +196 55 training.batch_size 2.0 +196 56 model.embedding_dim 1.0 +196 56 optimizer.lr 0.02306036565259572 +196 56 negative_sampler.num_negs_per_pos 95.0 +196 56 training.batch_size 2.0 +196 57 model.embedding_dim 0.0 +196 57 optimizer.lr 0.01197098052262916 +196 57 negative_sampler.num_negs_per_pos 4.0 +196 57 training.batch_size 2.0 +196 58 model.embedding_dim 2.0 +196 58 optimizer.lr 0.012079718892642454 +196 58 negative_sampler.num_negs_per_pos 47.0 +196 58 training.batch_size 0.0 +196 59 model.embedding_dim 0.0 +196 59 optimizer.lr 0.020341394081448635 +196 59 negative_sampler.num_negs_per_pos 15.0 +196 59 training.batch_size 0.0 +196 60 model.embedding_dim 1.0 +196 60 optimizer.lr 0.0997253474362748 +196 60 negative_sampler.num_negs_per_pos 3.0 +196 60 training.batch_size 0.0 +196 61 model.embedding_dim 1.0 +196 61 optimizer.lr 0.001028956281515055 +196 61 negative_sampler.num_negs_per_pos 89.0 +196 61 training.batch_size 1.0 +196 62 model.embedding_dim 2.0 +196 62 optimizer.lr 0.0028441170096209422 +196 62 negative_sampler.num_negs_per_pos 16.0 +196 62 training.batch_size 0.0 +196 63 model.embedding_dim 2.0 +196 63 optimizer.lr 0.09086568698853688 +196 63 negative_sampler.num_negs_per_pos 66.0 +196 63 training.batch_size 2.0 +196 64 model.embedding_dim 2.0 +196 64 optimizer.lr 0.013519087321408637 +196 64 negative_sampler.num_negs_per_pos 90.0 +196 64 training.batch_size 0.0 +196 65 model.embedding_dim 0.0 +196 65 optimizer.lr 0.01135270678987335 +196 65 negative_sampler.num_negs_per_pos 40.0 +196 65 training.batch_size 0.0 +196 66 model.embedding_dim 1.0 +196 66 optimizer.lr 0.015881621128772085 +196 66 negative_sampler.num_negs_per_pos 67.0 +196 66 training.batch_size 2.0 +196 67 model.embedding_dim 1.0 +196 67 optimizer.lr 0.001878335973489748 +196 67 negative_sampler.num_negs_per_pos 77.0 +196 67 training.batch_size 1.0 +196 68 model.embedding_dim 2.0 +196 68 optimizer.lr 0.07533159283566641 +196 68 negative_sampler.num_negs_per_pos 3.0 +196 68 training.batch_size 1.0 +196 69 model.embedding_dim 1.0 +196 69 optimizer.lr 0.014796946045793045 +196 69 negative_sampler.num_negs_per_pos 45.0 +196 69 training.batch_size 2.0 +196 70 model.embedding_dim 1.0 +196 70 optimizer.lr 0.0025410229800733016 +196 70 negative_sampler.num_negs_per_pos 51.0 +196 70 training.batch_size 2.0 +196 71 model.embedding_dim 2.0 +196 71 optimizer.lr 0.03220052619705473 +196 71 negative_sampler.num_negs_per_pos 84.0 +196 71 training.batch_size 0.0 +196 72 model.embedding_dim 1.0 +196 72 optimizer.lr 0.0021297588691433465 +196 72 negative_sampler.num_negs_per_pos 90.0 +196 72 training.batch_size 2.0 +196 73 model.embedding_dim 2.0 +196 73 optimizer.lr 0.0020154290554807285 +196 73 negative_sampler.num_negs_per_pos 97.0 +196 73 training.batch_size 2.0 +196 74 model.embedding_dim 0.0 +196 74 optimizer.lr 0.004408417061707818 +196 74 negative_sampler.num_negs_per_pos 76.0 +196 74 training.batch_size 1.0 +196 75 model.embedding_dim 0.0 +196 75 optimizer.lr 0.0010821698651186315 +196 75 negative_sampler.num_negs_per_pos 84.0 +196 75 training.batch_size 2.0 +196 76 model.embedding_dim 2.0 +196 76 optimizer.lr 0.053588794334802986 +196 76 negative_sampler.num_negs_per_pos 0.0 +196 76 training.batch_size 1.0 +196 77 model.embedding_dim 0.0 +196 77 optimizer.lr 0.010734556709677698 +196 77 negative_sampler.num_negs_per_pos 50.0 +196 77 training.batch_size 0.0 +196 78 model.embedding_dim 0.0 +196 78 optimizer.lr 0.03138750091843783 +196 78 negative_sampler.num_negs_per_pos 89.0 +196 78 training.batch_size 1.0 +196 1 dataset """wn18rr""" +196 1 model """distmult""" +196 1 loss """bceaftersigmoid""" +196 1 regularizer """no""" +196 1 optimizer """adam""" +196 1 training_loop """owa""" +196 1 negative_sampler """basic""" +196 1 evaluator """rankbased""" +196 2 dataset """wn18rr""" +196 2 model """distmult""" +196 2 loss """bceaftersigmoid""" +196 2 regularizer """no""" +196 2 optimizer """adam""" +196 2 training_loop """owa""" +196 2 negative_sampler """basic""" +196 2 evaluator """rankbased""" +196 3 dataset """wn18rr""" +196 3 model """distmult""" +196 3 loss """bceaftersigmoid""" +196 3 regularizer """no""" +196 3 optimizer """adam""" +196 3 training_loop """owa""" +196 3 negative_sampler """basic""" +196 3 evaluator """rankbased""" +196 4 dataset """wn18rr""" +196 4 model """distmult""" +196 4 loss """bceaftersigmoid""" +196 4 regularizer """no""" +196 4 optimizer """adam""" +196 4 training_loop """owa""" +196 4 negative_sampler """basic""" +196 4 evaluator """rankbased""" +196 5 dataset """wn18rr""" +196 5 model """distmult""" +196 5 loss """bceaftersigmoid""" +196 5 regularizer """no""" +196 5 optimizer """adam""" +196 5 training_loop """owa""" +196 5 negative_sampler """basic""" +196 5 evaluator """rankbased""" +196 6 dataset """wn18rr""" +196 6 model """distmult""" +196 6 loss """bceaftersigmoid""" +196 6 regularizer """no""" +196 6 optimizer """adam""" +196 6 training_loop """owa""" +196 6 negative_sampler """basic""" +196 6 evaluator """rankbased""" +196 7 dataset """wn18rr""" +196 7 model """distmult""" +196 7 loss """bceaftersigmoid""" +196 7 regularizer """no""" +196 7 optimizer """adam""" +196 7 training_loop """owa""" +196 7 negative_sampler """basic""" +196 7 evaluator """rankbased""" +196 8 dataset """wn18rr""" +196 8 model """distmult""" +196 8 loss """bceaftersigmoid""" +196 8 regularizer """no""" +196 8 optimizer """adam""" +196 8 training_loop """owa""" +196 8 negative_sampler """basic""" +196 8 evaluator """rankbased""" +196 9 dataset """wn18rr""" +196 9 model """distmult""" +196 9 loss """bceaftersigmoid""" +196 9 regularizer """no""" +196 9 optimizer """adam""" +196 9 training_loop """owa""" +196 9 negative_sampler """basic""" +196 9 evaluator """rankbased""" +196 10 dataset """wn18rr""" +196 10 model """distmult""" +196 10 loss """bceaftersigmoid""" +196 10 regularizer """no""" +196 10 optimizer """adam""" +196 10 training_loop """owa""" +196 10 negative_sampler """basic""" +196 10 evaluator """rankbased""" +196 11 dataset """wn18rr""" +196 11 model """distmult""" +196 11 loss """bceaftersigmoid""" +196 11 regularizer """no""" +196 11 optimizer """adam""" +196 11 training_loop """owa""" +196 11 negative_sampler """basic""" +196 11 evaluator """rankbased""" +196 12 dataset """wn18rr""" +196 12 model """distmult""" +196 12 loss """bceaftersigmoid""" +196 12 regularizer """no""" +196 12 optimizer """adam""" +196 12 training_loop """owa""" +196 12 negative_sampler """basic""" +196 12 evaluator """rankbased""" +196 13 dataset """wn18rr""" +196 13 model """distmult""" +196 13 loss """bceaftersigmoid""" +196 13 regularizer """no""" +196 13 optimizer """adam""" +196 13 training_loop """owa""" +196 13 negative_sampler """basic""" +196 13 evaluator """rankbased""" +196 14 dataset """wn18rr""" +196 14 model """distmult""" +196 14 loss """bceaftersigmoid""" +196 14 regularizer """no""" +196 14 optimizer """adam""" +196 14 training_loop """owa""" +196 14 negative_sampler """basic""" +196 14 evaluator """rankbased""" +196 15 dataset """wn18rr""" +196 15 model """distmult""" +196 15 loss """bceaftersigmoid""" +196 15 regularizer """no""" +196 15 optimizer """adam""" +196 15 training_loop """owa""" +196 15 negative_sampler """basic""" +196 15 evaluator """rankbased""" +196 16 dataset """wn18rr""" +196 16 model """distmult""" +196 16 loss """bceaftersigmoid""" +196 16 regularizer """no""" +196 16 optimizer """adam""" +196 16 training_loop """owa""" +196 16 negative_sampler """basic""" +196 16 evaluator """rankbased""" +196 17 dataset """wn18rr""" +196 17 model """distmult""" +196 17 loss """bceaftersigmoid""" +196 17 regularizer """no""" +196 17 optimizer """adam""" +196 17 training_loop """owa""" +196 17 negative_sampler """basic""" +196 17 evaluator """rankbased""" +196 18 dataset """wn18rr""" +196 18 model """distmult""" +196 18 loss """bceaftersigmoid""" +196 18 regularizer """no""" +196 18 optimizer """adam""" +196 18 training_loop """owa""" +196 18 negative_sampler """basic""" +196 18 evaluator """rankbased""" +196 19 dataset """wn18rr""" +196 19 model """distmult""" +196 19 loss """bceaftersigmoid""" +196 19 regularizer """no""" +196 19 optimizer """adam""" +196 19 training_loop """owa""" +196 19 negative_sampler """basic""" +196 19 evaluator """rankbased""" +196 20 dataset """wn18rr""" +196 20 model """distmult""" +196 20 loss """bceaftersigmoid""" +196 20 regularizer """no""" +196 20 optimizer """adam""" +196 20 training_loop """owa""" +196 20 negative_sampler """basic""" +196 20 evaluator """rankbased""" +196 21 dataset """wn18rr""" +196 21 model """distmult""" +196 21 loss """bceaftersigmoid""" +196 21 regularizer """no""" +196 21 optimizer """adam""" +196 21 training_loop """owa""" +196 21 negative_sampler """basic""" +196 21 evaluator """rankbased""" +196 22 dataset """wn18rr""" +196 22 model """distmult""" +196 22 loss """bceaftersigmoid""" +196 22 regularizer """no""" +196 22 optimizer """adam""" +196 22 training_loop """owa""" +196 22 negative_sampler """basic""" +196 22 evaluator """rankbased""" +196 23 dataset """wn18rr""" +196 23 model """distmult""" +196 23 loss """bceaftersigmoid""" +196 23 regularizer """no""" +196 23 optimizer """adam""" +196 23 training_loop """owa""" +196 23 negative_sampler """basic""" +196 23 evaluator """rankbased""" +196 24 dataset """wn18rr""" +196 24 model """distmult""" +196 24 loss """bceaftersigmoid""" +196 24 regularizer """no""" +196 24 optimizer """adam""" +196 24 training_loop """owa""" +196 24 negative_sampler """basic""" +196 24 evaluator """rankbased""" +196 25 dataset """wn18rr""" +196 25 model """distmult""" +196 25 loss """bceaftersigmoid""" +196 25 regularizer """no""" +196 25 optimizer """adam""" +196 25 training_loop """owa""" +196 25 negative_sampler """basic""" +196 25 evaluator """rankbased""" +196 26 dataset """wn18rr""" +196 26 model """distmult""" +196 26 loss """bceaftersigmoid""" +196 26 regularizer """no""" +196 26 optimizer """adam""" +196 26 training_loop """owa""" +196 26 negative_sampler """basic""" +196 26 evaluator """rankbased""" +196 27 dataset """wn18rr""" +196 27 model """distmult""" +196 27 loss """bceaftersigmoid""" +196 27 regularizer """no""" +196 27 optimizer """adam""" +196 27 training_loop """owa""" +196 27 negative_sampler """basic""" +196 27 evaluator """rankbased""" +196 28 dataset """wn18rr""" +196 28 model """distmult""" +196 28 loss """bceaftersigmoid""" +196 28 regularizer """no""" +196 28 optimizer """adam""" +196 28 training_loop """owa""" +196 28 negative_sampler """basic""" +196 28 evaluator """rankbased""" +196 29 dataset """wn18rr""" +196 29 model """distmult""" +196 29 loss """bceaftersigmoid""" +196 29 regularizer """no""" +196 29 optimizer """adam""" +196 29 training_loop """owa""" +196 29 negative_sampler """basic""" +196 29 evaluator """rankbased""" +196 30 dataset """wn18rr""" +196 30 model """distmult""" +196 30 loss """bceaftersigmoid""" +196 30 regularizer """no""" +196 30 optimizer """adam""" +196 30 training_loop """owa""" +196 30 negative_sampler """basic""" +196 30 evaluator """rankbased""" +196 31 dataset """wn18rr""" +196 31 model """distmult""" +196 31 loss """bceaftersigmoid""" +196 31 regularizer """no""" +196 31 optimizer """adam""" +196 31 training_loop """owa""" +196 31 negative_sampler """basic""" +196 31 evaluator """rankbased""" +196 32 dataset """wn18rr""" +196 32 model """distmult""" +196 32 loss """bceaftersigmoid""" +196 32 regularizer """no""" +196 32 optimizer """adam""" +196 32 training_loop """owa""" +196 32 negative_sampler """basic""" +196 32 evaluator """rankbased""" +196 33 dataset """wn18rr""" +196 33 model """distmult""" +196 33 loss """bceaftersigmoid""" +196 33 regularizer """no""" +196 33 optimizer """adam""" +196 33 training_loop """owa""" +196 33 negative_sampler """basic""" +196 33 evaluator """rankbased""" +196 34 dataset """wn18rr""" +196 34 model """distmult""" +196 34 loss """bceaftersigmoid""" +196 34 regularizer """no""" +196 34 optimizer """adam""" +196 34 training_loop """owa""" +196 34 negative_sampler """basic""" +196 34 evaluator """rankbased""" +196 35 dataset """wn18rr""" +196 35 model """distmult""" +196 35 loss """bceaftersigmoid""" +196 35 regularizer """no""" +196 35 optimizer """adam""" +196 35 training_loop """owa""" +196 35 negative_sampler """basic""" +196 35 evaluator """rankbased""" +196 36 dataset """wn18rr""" +196 36 model """distmult""" +196 36 loss """bceaftersigmoid""" +196 36 regularizer """no""" +196 36 optimizer """adam""" +196 36 training_loop """owa""" +196 36 negative_sampler """basic""" +196 36 evaluator """rankbased""" +196 37 dataset """wn18rr""" +196 37 model """distmult""" +196 37 loss """bceaftersigmoid""" +196 37 regularizer """no""" +196 37 optimizer """adam""" +196 37 training_loop """owa""" +196 37 negative_sampler """basic""" +196 37 evaluator """rankbased""" +196 38 dataset """wn18rr""" +196 38 model """distmult""" +196 38 loss """bceaftersigmoid""" +196 38 regularizer """no""" +196 38 optimizer """adam""" +196 38 training_loop """owa""" +196 38 negative_sampler """basic""" +196 38 evaluator """rankbased""" +196 39 dataset """wn18rr""" +196 39 model """distmult""" +196 39 loss """bceaftersigmoid""" +196 39 regularizer """no""" +196 39 optimizer """adam""" +196 39 training_loop """owa""" +196 39 negative_sampler """basic""" +196 39 evaluator """rankbased""" +196 40 dataset """wn18rr""" +196 40 model """distmult""" +196 40 loss """bceaftersigmoid""" +196 40 regularizer """no""" +196 40 optimizer """adam""" +196 40 training_loop """owa""" +196 40 negative_sampler """basic""" +196 40 evaluator """rankbased""" +196 41 dataset """wn18rr""" +196 41 model """distmult""" +196 41 loss """bceaftersigmoid""" +196 41 regularizer """no""" +196 41 optimizer """adam""" +196 41 training_loop """owa""" +196 41 negative_sampler """basic""" +196 41 evaluator """rankbased""" +196 42 dataset """wn18rr""" +196 42 model """distmult""" +196 42 loss """bceaftersigmoid""" +196 42 regularizer """no""" +196 42 optimizer """adam""" +196 42 training_loop """owa""" +196 42 negative_sampler """basic""" +196 42 evaluator """rankbased""" +196 43 dataset """wn18rr""" +196 43 model """distmult""" +196 43 loss """bceaftersigmoid""" +196 43 regularizer """no""" +196 43 optimizer """adam""" +196 43 training_loop """owa""" +196 43 negative_sampler """basic""" +196 43 evaluator """rankbased""" +196 44 dataset """wn18rr""" +196 44 model """distmult""" +196 44 loss """bceaftersigmoid""" +196 44 regularizer """no""" +196 44 optimizer """adam""" +196 44 training_loop """owa""" +196 44 negative_sampler """basic""" +196 44 evaluator """rankbased""" +196 45 dataset """wn18rr""" +196 45 model """distmult""" +196 45 loss """bceaftersigmoid""" +196 45 regularizer """no""" +196 45 optimizer """adam""" +196 45 training_loop """owa""" +196 45 negative_sampler """basic""" +196 45 evaluator """rankbased""" +196 46 dataset """wn18rr""" +196 46 model """distmult""" +196 46 loss """bceaftersigmoid""" +196 46 regularizer """no""" +196 46 optimizer """adam""" +196 46 training_loop """owa""" +196 46 negative_sampler """basic""" +196 46 evaluator """rankbased""" +196 47 dataset """wn18rr""" +196 47 model """distmult""" +196 47 loss """bceaftersigmoid""" +196 47 regularizer """no""" +196 47 optimizer """adam""" +196 47 training_loop """owa""" +196 47 negative_sampler """basic""" +196 47 evaluator """rankbased""" +196 48 dataset """wn18rr""" +196 48 model """distmult""" +196 48 loss """bceaftersigmoid""" +196 48 regularizer """no""" +196 48 optimizer """adam""" +196 48 training_loop """owa""" +196 48 negative_sampler """basic""" +196 48 evaluator """rankbased""" +196 49 dataset """wn18rr""" +196 49 model """distmult""" +196 49 loss """bceaftersigmoid""" +196 49 regularizer """no""" +196 49 optimizer """adam""" +196 49 training_loop """owa""" +196 49 negative_sampler """basic""" +196 49 evaluator """rankbased""" +196 50 dataset """wn18rr""" +196 50 model """distmult""" +196 50 loss """bceaftersigmoid""" +196 50 regularizer """no""" +196 50 optimizer """adam""" +196 50 training_loop """owa""" +196 50 negative_sampler """basic""" +196 50 evaluator """rankbased""" +196 51 dataset """wn18rr""" +196 51 model """distmult""" +196 51 loss """bceaftersigmoid""" +196 51 regularizer """no""" +196 51 optimizer """adam""" +196 51 training_loop """owa""" +196 51 negative_sampler """basic""" +196 51 evaluator """rankbased""" +196 52 dataset """wn18rr""" +196 52 model """distmult""" +196 52 loss """bceaftersigmoid""" +196 52 regularizer """no""" +196 52 optimizer """adam""" +196 52 training_loop """owa""" +196 52 negative_sampler """basic""" +196 52 evaluator """rankbased""" +196 53 dataset """wn18rr""" +196 53 model """distmult""" +196 53 loss """bceaftersigmoid""" +196 53 regularizer """no""" +196 53 optimizer """adam""" +196 53 training_loop """owa""" +196 53 negative_sampler """basic""" +196 53 evaluator """rankbased""" +196 54 dataset """wn18rr""" +196 54 model """distmult""" +196 54 loss """bceaftersigmoid""" +196 54 regularizer """no""" +196 54 optimizer """adam""" +196 54 training_loop """owa""" +196 54 negative_sampler """basic""" +196 54 evaluator """rankbased""" +196 55 dataset """wn18rr""" +196 55 model """distmult""" +196 55 loss """bceaftersigmoid""" +196 55 regularizer """no""" +196 55 optimizer """adam""" +196 55 training_loop """owa""" +196 55 negative_sampler """basic""" +196 55 evaluator """rankbased""" +196 56 dataset """wn18rr""" +196 56 model """distmult""" +196 56 loss """bceaftersigmoid""" +196 56 regularizer """no""" +196 56 optimizer """adam""" +196 56 training_loop """owa""" +196 56 negative_sampler """basic""" +196 56 evaluator """rankbased""" +196 57 dataset """wn18rr""" +196 57 model """distmult""" +196 57 loss """bceaftersigmoid""" +196 57 regularizer """no""" +196 57 optimizer """adam""" +196 57 training_loop """owa""" +196 57 negative_sampler """basic""" +196 57 evaluator """rankbased""" +196 58 dataset """wn18rr""" +196 58 model """distmult""" +196 58 loss """bceaftersigmoid""" +196 58 regularizer """no""" +196 58 optimizer """adam""" +196 58 training_loop """owa""" +196 58 negative_sampler """basic""" +196 58 evaluator """rankbased""" +196 59 dataset """wn18rr""" +196 59 model """distmult""" +196 59 loss """bceaftersigmoid""" +196 59 regularizer """no""" +196 59 optimizer """adam""" +196 59 training_loop """owa""" +196 59 negative_sampler """basic""" +196 59 evaluator """rankbased""" +196 60 dataset """wn18rr""" +196 60 model """distmult""" +196 60 loss """bceaftersigmoid""" +196 60 regularizer """no""" +196 60 optimizer """adam""" +196 60 training_loop """owa""" +196 60 negative_sampler """basic""" +196 60 evaluator """rankbased""" +196 61 dataset """wn18rr""" +196 61 model """distmult""" +196 61 loss """bceaftersigmoid""" +196 61 regularizer """no""" +196 61 optimizer """adam""" +196 61 training_loop """owa""" +196 61 negative_sampler """basic""" +196 61 evaluator """rankbased""" +196 62 dataset """wn18rr""" +196 62 model """distmult""" +196 62 loss """bceaftersigmoid""" +196 62 regularizer """no""" +196 62 optimizer """adam""" +196 62 training_loop """owa""" +196 62 negative_sampler """basic""" +196 62 evaluator """rankbased""" +196 63 dataset """wn18rr""" +196 63 model """distmult""" +196 63 loss """bceaftersigmoid""" +196 63 regularizer """no""" +196 63 optimizer """adam""" +196 63 training_loop """owa""" +196 63 negative_sampler """basic""" +196 63 evaluator """rankbased""" +196 64 dataset """wn18rr""" +196 64 model """distmult""" +196 64 loss """bceaftersigmoid""" +196 64 regularizer """no""" +196 64 optimizer """adam""" +196 64 training_loop """owa""" +196 64 negative_sampler """basic""" +196 64 evaluator """rankbased""" +196 65 dataset """wn18rr""" +196 65 model """distmult""" +196 65 loss """bceaftersigmoid""" +196 65 regularizer """no""" +196 65 optimizer """adam""" +196 65 training_loop """owa""" +196 65 negative_sampler """basic""" +196 65 evaluator """rankbased""" +196 66 dataset """wn18rr""" +196 66 model """distmult""" +196 66 loss """bceaftersigmoid""" +196 66 regularizer """no""" +196 66 optimizer """adam""" +196 66 training_loop """owa""" +196 66 negative_sampler """basic""" +196 66 evaluator """rankbased""" +196 67 dataset """wn18rr""" +196 67 model """distmult""" +196 67 loss """bceaftersigmoid""" +196 67 regularizer """no""" +196 67 optimizer """adam""" +196 67 training_loop """owa""" +196 67 negative_sampler """basic""" +196 67 evaluator """rankbased""" +196 68 dataset """wn18rr""" +196 68 model """distmult""" +196 68 loss """bceaftersigmoid""" +196 68 regularizer """no""" +196 68 optimizer """adam""" +196 68 training_loop """owa""" +196 68 negative_sampler """basic""" +196 68 evaluator """rankbased""" +196 69 dataset """wn18rr""" +196 69 model """distmult""" +196 69 loss """bceaftersigmoid""" +196 69 regularizer """no""" +196 69 optimizer """adam""" +196 69 training_loop """owa""" +196 69 negative_sampler """basic""" +196 69 evaluator """rankbased""" +196 70 dataset """wn18rr""" +196 70 model """distmult""" +196 70 loss """bceaftersigmoid""" +196 70 regularizer """no""" +196 70 optimizer """adam""" +196 70 training_loop """owa""" +196 70 negative_sampler """basic""" +196 70 evaluator """rankbased""" +196 71 dataset """wn18rr""" +196 71 model """distmult""" +196 71 loss """bceaftersigmoid""" +196 71 regularizer """no""" +196 71 optimizer """adam""" +196 71 training_loop """owa""" +196 71 negative_sampler """basic""" +196 71 evaluator """rankbased""" +196 72 dataset """wn18rr""" +196 72 model """distmult""" +196 72 loss """bceaftersigmoid""" +196 72 regularizer """no""" +196 72 optimizer """adam""" +196 72 training_loop """owa""" +196 72 negative_sampler """basic""" +196 72 evaluator """rankbased""" +196 73 dataset """wn18rr""" +196 73 model """distmult""" +196 73 loss """bceaftersigmoid""" +196 73 regularizer """no""" +196 73 optimizer """adam""" +196 73 training_loop """owa""" +196 73 negative_sampler """basic""" +196 73 evaluator """rankbased""" +196 74 dataset """wn18rr""" +196 74 model """distmult""" +196 74 loss """bceaftersigmoid""" +196 74 regularizer """no""" +196 74 optimizer """adam""" +196 74 training_loop """owa""" +196 74 negative_sampler """basic""" +196 74 evaluator """rankbased""" +196 75 dataset """wn18rr""" +196 75 model """distmult""" +196 75 loss """bceaftersigmoid""" +196 75 regularizer """no""" +196 75 optimizer """adam""" +196 75 training_loop """owa""" +196 75 negative_sampler """basic""" +196 75 evaluator """rankbased""" +196 76 dataset """wn18rr""" +196 76 model """distmult""" +196 76 loss """bceaftersigmoid""" +196 76 regularizer """no""" +196 76 optimizer """adam""" +196 76 training_loop """owa""" +196 76 negative_sampler """basic""" +196 76 evaluator """rankbased""" +196 77 dataset """wn18rr""" +196 77 model """distmult""" +196 77 loss """bceaftersigmoid""" +196 77 regularizer """no""" +196 77 optimizer """adam""" +196 77 training_loop """owa""" +196 77 negative_sampler """basic""" +196 77 evaluator """rankbased""" +196 78 dataset """wn18rr""" +196 78 model """distmult""" +196 78 loss """bceaftersigmoid""" +196 78 regularizer """no""" +196 78 optimizer """adam""" +196 78 training_loop """owa""" +196 78 negative_sampler """basic""" +196 78 evaluator """rankbased""" +197 1 model.embedding_dim 1.0 +197 1 optimizer.lr 0.002711791537811089 +197 1 negative_sampler.num_negs_per_pos 12.0 +197 1 training.batch_size 0.0 +197 2 model.embedding_dim 0.0 +197 2 optimizer.lr 0.0019369029625816167 +197 2 negative_sampler.num_negs_per_pos 15.0 +197 2 training.batch_size 0.0 +197 3 model.embedding_dim 1.0 +197 3 optimizer.lr 0.09535844589287971 +197 3 negative_sampler.num_negs_per_pos 55.0 +197 3 training.batch_size 0.0 +197 4 model.embedding_dim 0.0 +197 4 optimizer.lr 0.001023987891647992 +197 4 negative_sampler.num_negs_per_pos 86.0 +197 4 training.batch_size 2.0 +197 5 model.embedding_dim 0.0 +197 5 optimizer.lr 0.04719247129696081 +197 5 negative_sampler.num_negs_per_pos 50.0 +197 5 training.batch_size 1.0 +197 6 model.embedding_dim 1.0 +197 6 optimizer.lr 0.020773206294026 +197 6 negative_sampler.num_negs_per_pos 4.0 +197 6 training.batch_size 1.0 +197 7 model.embedding_dim 0.0 +197 7 optimizer.lr 0.004652835739764522 +197 7 negative_sampler.num_negs_per_pos 28.0 +197 7 training.batch_size 2.0 +197 8 model.embedding_dim 2.0 +197 8 optimizer.lr 0.017258399048781237 +197 8 negative_sampler.num_negs_per_pos 9.0 +197 8 training.batch_size 1.0 +197 9 model.embedding_dim 1.0 +197 9 optimizer.lr 0.006780257799308667 +197 9 negative_sampler.num_negs_per_pos 92.0 +197 9 training.batch_size 2.0 +197 10 model.embedding_dim 2.0 +197 10 optimizer.lr 0.003712773867235937 +197 10 negative_sampler.num_negs_per_pos 24.0 +197 10 training.batch_size 1.0 +197 11 model.embedding_dim 2.0 +197 11 optimizer.lr 0.010351097944598401 +197 11 negative_sampler.num_negs_per_pos 53.0 +197 11 training.batch_size 0.0 +197 12 model.embedding_dim 0.0 +197 12 optimizer.lr 0.0014170919140399996 +197 12 negative_sampler.num_negs_per_pos 93.0 +197 12 training.batch_size 0.0 +197 13 model.embedding_dim 0.0 +197 13 optimizer.lr 0.09605131327976359 +197 13 negative_sampler.num_negs_per_pos 9.0 +197 13 training.batch_size 0.0 +197 14 model.embedding_dim 2.0 +197 14 optimizer.lr 0.002720003107690837 +197 14 negative_sampler.num_negs_per_pos 50.0 +197 14 training.batch_size 1.0 +197 15 model.embedding_dim 2.0 +197 15 optimizer.lr 0.0014361935986468219 +197 15 negative_sampler.num_negs_per_pos 92.0 +197 15 training.batch_size 1.0 +197 16 model.embedding_dim 0.0 +197 16 optimizer.lr 0.028427637129350983 +197 16 negative_sampler.num_negs_per_pos 84.0 +197 16 training.batch_size 1.0 +197 17 model.embedding_dim 0.0 +197 17 optimizer.lr 0.0018151064576239179 +197 17 negative_sampler.num_negs_per_pos 40.0 +197 17 training.batch_size 2.0 +197 18 model.embedding_dim 2.0 +197 18 optimizer.lr 0.0053857820275609175 +197 18 negative_sampler.num_negs_per_pos 62.0 +197 18 training.batch_size 1.0 +197 19 model.embedding_dim 2.0 +197 19 optimizer.lr 0.004908305428890087 +197 19 negative_sampler.num_negs_per_pos 2.0 +197 19 training.batch_size 1.0 +197 20 model.embedding_dim 1.0 +197 20 optimizer.lr 0.0017166647989674023 +197 20 negative_sampler.num_negs_per_pos 27.0 +197 20 training.batch_size 2.0 +197 21 model.embedding_dim 0.0 +197 21 optimizer.lr 0.001427654012071288 +197 21 negative_sampler.num_negs_per_pos 9.0 +197 21 training.batch_size 1.0 +197 22 model.embedding_dim 1.0 +197 22 optimizer.lr 0.05167212592606625 +197 22 negative_sampler.num_negs_per_pos 4.0 +197 22 training.batch_size 0.0 +197 23 model.embedding_dim 0.0 +197 23 optimizer.lr 0.0021120622900005345 +197 23 negative_sampler.num_negs_per_pos 17.0 +197 23 training.batch_size 1.0 +197 24 model.embedding_dim 0.0 +197 24 optimizer.lr 0.002948194161052769 +197 24 negative_sampler.num_negs_per_pos 72.0 +197 24 training.batch_size 2.0 +197 25 model.embedding_dim 1.0 +197 25 optimizer.lr 0.0030152172277561717 +197 25 negative_sampler.num_negs_per_pos 47.0 +197 25 training.batch_size 0.0 +197 26 model.embedding_dim 2.0 +197 26 optimizer.lr 0.021835026967025006 +197 26 negative_sampler.num_negs_per_pos 27.0 +197 26 training.batch_size 1.0 +197 27 model.embedding_dim 1.0 +197 27 optimizer.lr 0.0017131269660250544 +197 27 negative_sampler.num_negs_per_pos 18.0 +197 27 training.batch_size 0.0 +197 28 model.embedding_dim 1.0 +197 28 optimizer.lr 0.025256066879588212 +197 28 negative_sampler.num_negs_per_pos 98.0 +197 28 training.batch_size 2.0 +197 29 model.embedding_dim 1.0 +197 29 optimizer.lr 0.02847233476878576 +197 29 negative_sampler.num_negs_per_pos 29.0 +197 29 training.batch_size 0.0 +197 30 model.embedding_dim 1.0 +197 30 optimizer.lr 0.00359186318867213 +197 30 negative_sampler.num_negs_per_pos 93.0 +197 30 training.batch_size 2.0 +197 31 model.embedding_dim 1.0 +197 31 optimizer.lr 0.017687822437579874 +197 31 negative_sampler.num_negs_per_pos 56.0 +197 31 training.batch_size 1.0 +197 32 model.embedding_dim 2.0 +197 32 optimizer.lr 0.003278168993471895 +197 32 negative_sampler.num_negs_per_pos 55.0 +197 32 training.batch_size 2.0 +197 33 model.embedding_dim 1.0 +197 33 optimizer.lr 0.02107183012666925 +197 33 negative_sampler.num_negs_per_pos 97.0 +197 33 training.batch_size 0.0 +197 34 model.embedding_dim 0.0 +197 34 optimizer.lr 0.027893687626849226 +197 34 negative_sampler.num_negs_per_pos 52.0 +197 34 training.batch_size 1.0 +197 35 model.embedding_dim 1.0 +197 35 optimizer.lr 0.0016435989619481403 +197 35 negative_sampler.num_negs_per_pos 42.0 +197 35 training.batch_size 1.0 +197 36 model.embedding_dim 1.0 +197 36 optimizer.lr 0.08027289355236504 +197 36 negative_sampler.num_negs_per_pos 78.0 +197 36 training.batch_size 0.0 +197 37 model.embedding_dim 1.0 +197 37 optimizer.lr 0.016285170693236692 +197 37 negative_sampler.num_negs_per_pos 78.0 +197 37 training.batch_size 1.0 +197 38 model.embedding_dim 0.0 +197 38 optimizer.lr 0.008541486889904266 +197 38 negative_sampler.num_negs_per_pos 38.0 +197 38 training.batch_size 0.0 +197 39 model.embedding_dim 0.0 +197 39 optimizer.lr 0.001979315157568559 +197 39 negative_sampler.num_negs_per_pos 7.0 +197 39 training.batch_size 2.0 +197 40 model.embedding_dim 1.0 +197 40 optimizer.lr 0.03236114421679499 +197 40 negative_sampler.num_negs_per_pos 30.0 +197 40 training.batch_size 2.0 +197 41 model.embedding_dim 2.0 +197 41 optimizer.lr 0.0016130077655964865 +197 41 negative_sampler.num_negs_per_pos 48.0 +197 41 training.batch_size 1.0 +197 42 model.embedding_dim 1.0 +197 42 optimizer.lr 0.01953728255937985 +197 42 negative_sampler.num_negs_per_pos 22.0 +197 42 training.batch_size 0.0 +197 43 model.embedding_dim 0.0 +197 43 optimizer.lr 0.0013612608597285824 +197 43 negative_sampler.num_negs_per_pos 39.0 +197 43 training.batch_size 1.0 +197 44 model.embedding_dim 0.0 +197 44 optimizer.lr 0.002568527730237773 +197 44 negative_sampler.num_negs_per_pos 45.0 +197 44 training.batch_size 1.0 +197 45 model.embedding_dim 2.0 +197 45 optimizer.lr 0.0012818070404179358 +197 45 negative_sampler.num_negs_per_pos 53.0 +197 45 training.batch_size 0.0 +197 46 model.embedding_dim 2.0 +197 46 optimizer.lr 0.0025371710267907337 +197 46 negative_sampler.num_negs_per_pos 57.0 +197 46 training.batch_size 2.0 +197 47 model.embedding_dim 2.0 +197 47 optimizer.lr 0.0031282169258859444 +197 47 negative_sampler.num_negs_per_pos 82.0 +197 47 training.batch_size 0.0 +197 48 model.embedding_dim 2.0 +197 48 optimizer.lr 0.0018027214500939785 +197 48 negative_sampler.num_negs_per_pos 87.0 +197 48 training.batch_size 1.0 +197 49 model.embedding_dim 0.0 +197 49 optimizer.lr 0.0035211810289658366 +197 49 negative_sampler.num_negs_per_pos 49.0 +197 49 training.batch_size 0.0 +197 50 model.embedding_dim 1.0 +197 50 optimizer.lr 0.07553361716717119 +197 50 negative_sampler.num_negs_per_pos 61.0 +197 50 training.batch_size 1.0 +197 51 model.embedding_dim 0.0 +197 51 optimizer.lr 0.008970021180557437 +197 51 negative_sampler.num_negs_per_pos 75.0 +197 51 training.batch_size 0.0 +197 52 model.embedding_dim 1.0 +197 52 optimizer.lr 0.09617383221906738 +197 52 negative_sampler.num_negs_per_pos 70.0 +197 52 training.batch_size 2.0 +197 53 model.embedding_dim 2.0 +197 53 optimizer.lr 0.017848819384580498 +197 53 negative_sampler.num_negs_per_pos 71.0 +197 53 training.batch_size 0.0 +197 54 model.embedding_dim 2.0 +197 54 optimizer.lr 0.013505597182541884 +197 54 negative_sampler.num_negs_per_pos 32.0 +197 54 training.batch_size 0.0 +197 55 model.embedding_dim 0.0 +197 55 optimizer.lr 0.008917965502014949 +197 55 negative_sampler.num_negs_per_pos 5.0 +197 55 training.batch_size 2.0 +197 56 model.embedding_dim 0.0 +197 56 optimizer.lr 0.016372355356869525 +197 56 negative_sampler.num_negs_per_pos 49.0 +197 56 training.batch_size 0.0 +197 57 model.embedding_dim 0.0 +197 57 optimizer.lr 0.08695837518860883 +197 57 negative_sampler.num_negs_per_pos 30.0 +197 57 training.batch_size 1.0 +197 58 model.embedding_dim 1.0 +197 58 optimizer.lr 0.0065812856963676425 +197 58 negative_sampler.num_negs_per_pos 61.0 +197 58 training.batch_size 1.0 +197 59 model.embedding_dim 0.0 +197 59 optimizer.lr 0.0697036901729787 +197 59 negative_sampler.num_negs_per_pos 2.0 +197 59 training.batch_size 0.0 +197 60 model.embedding_dim 2.0 +197 60 optimizer.lr 0.0029115216196080153 +197 60 negative_sampler.num_negs_per_pos 19.0 +197 60 training.batch_size 1.0 +197 61 model.embedding_dim 1.0 +197 61 optimizer.lr 0.0024768751521034144 +197 61 negative_sampler.num_negs_per_pos 28.0 +197 61 training.batch_size 0.0 +197 62 model.embedding_dim 2.0 +197 62 optimizer.lr 0.017921041360813465 +197 62 negative_sampler.num_negs_per_pos 92.0 +197 62 training.batch_size 2.0 +197 63 model.embedding_dim 0.0 +197 63 optimizer.lr 0.009532810566151849 +197 63 negative_sampler.num_negs_per_pos 5.0 +197 63 training.batch_size 0.0 +197 64 model.embedding_dim 0.0 +197 64 optimizer.lr 0.0499409830079786 +197 64 negative_sampler.num_negs_per_pos 24.0 +197 64 training.batch_size 0.0 +197 65 model.embedding_dim 0.0 +197 65 optimizer.lr 0.0012406056476820192 +197 65 negative_sampler.num_negs_per_pos 73.0 +197 65 training.batch_size 1.0 +197 66 model.embedding_dim 0.0 +197 66 optimizer.lr 0.07471649077927146 +197 66 negative_sampler.num_negs_per_pos 38.0 +197 66 training.batch_size 0.0 +197 67 model.embedding_dim 2.0 +197 67 optimizer.lr 0.012882126559696108 +197 67 negative_sampler.num_negs_per_pos 98.0 +197 67 training.batch_size 0.0 +197 1 dataset """wn18rr""" +197 1 model """distmult""" +197 1 loss """softplus""" +197 1 regularizer """no""" +197 1 optimizer """adam""" +197 1 training_loop """owa""" +197 1 negative_sampler """basic""" +197 1 evaluator """rankbased""" +197 2 dataset """wn18rr""" +197 2 model """distmult""" +197 2 loss """softplus""" +197 2 regularizer """no""" +197 2 optimizer """adam""" +197 2 training_loop """owa""" +197 2 negative_sampler """basic""" +197 2 evaluator """rankbased""" +197 3 dataset """wn18rr""" +197 3 model """distmult""" +197 3 loss """softplus""" +197 3 regularizer """no""" +197 3 optimizer """adam""" +197 3 training_loop """owa""" +197 3 negative_sampler """basic""" +197 3 evaluator """rankbased""" +197 4 dataset """wn18rr""" +197 4 model """distmult""" +197 4 loss """softplus""" +197 4 regularizer """no""" +197 4 optimizer """adam""" +197 4 training_loop """owa""" +197 4 negative_sampler """basic""" +197 4 evaluator """rankbased""" +197 5 dataset """wn18rr""" +197 5 model """distmult""" +197 5 loss """softplus""" +197 5 regularizer """no""" +197 5 optimizer """adam""" +197 5 training_loop """owa""" +197 5 negative_sampler """basic""" +197 5 evaluator """rankbased""" +197 6 dataset """wn18rr""" +197 6 model """distmult""" +197 6 loss """softplus""" +197 6 regularizer """no""" +197 6 optimizer """adam""" +197 6 training_loop """owa""" +197 6 negative_sampler """basic""" +197 6 evaluator """rankbased""" +197 7 dataset """wn18rr""" +197 7 model """distmult""" +197 7 loss """softplus""" +197 7 regularizer """no""" +197 7 optimizer """adam""" +197 7 training_loop """owa""" +197 7 negative_sampler """basic""" +197 7 evaluator """rankbased""" +197 8 dataset """wn18rr""" +197 8 model """distmult""" +197 8 loss """softplus""" +197 8 regularizer """no""" +197 8 optimizer """adam""" +197 8 training_loop """owa""" +197 8 negative_sampler """basic""" +197 8 evaluator """rankbased""" +197 9 dataset """wn18rr""" +197 9 model """distmult""" +197 9 loss """softplus""" +197 9 regularizer """no""" +197 9 optimizer """adam""" +197 9 training_loop """owa""" +197 9 negative_sampler """basic""" +197 9 evaluator """rankbased""" +197 10 dataset """wn18rr""" +197 10 model """distmult""" +197 10 loss """softplus""" +197 10 regularizer """no""" +197 10 optimizer """adam""" +197 10 training_loop """owa""" +197 10 negative_sampler """basic""" +197 10 evaluator """rankbased""" +197 11 dataset """wn18rr""" +197 11 model """distmult""" +197 11 loss """softplus""" +197 11 regularizer """no""" +197 11 optimizer """adam""" +197 11 training_loop """owa""" +197 11 negative_sampler """basic""" +197 11 evaluator """rankbased""" +197 12 dataset """wn18rr""" +197 12 model """distmult""" +197 12 loss """softplus""" +197 12 regularizer """no""" +197 12 optimizer """adam""" +197 12 training_loop """owa""" +197 12 negative_sampler """basic""" +197 12 evaluator """rankbased""" +197 13 dataset """wn18rr""" +197 13 model """distmult""" +197 13 loss """softplus""" +197 13 regularizer """no""" +197 13 optimizer """adam""" +197 13 training_loop """owa""" +197 13 negative_sampler """basic""" +197 13 evaluator """rankbased""" +197 14 dataset """wn18rr""" +197 14 model """distmult""" +197 14 loss """softplus""" +197 14 regularizer """no""" +197 14 optimizer """adam""" +197 14 training_loop """owa""" +197 14 negative_sampler """basic""" +197 14 evaluator """rankbased""" +197 15 dataset """wn18rr""" +197 15 model """distmult""" +197 15 loss """softplus""" +197 15 regularizer """no""" +197 15 optimizer """adam""" +197 15 training_loop """owa""" +197 15 negative_sampler """basic""" +197 15 evaluator """rankbased""" +197 16 dataset """wn18rr""" +197 16 model """distmult""" +197 16 loss """softplus""" +197 16 regularizer """no""" +197 16 optimizer """adam""" +197 16 training_loop """owa""" +197 16 negative_sampler """basic""" +197 16 evaluator """rankbased""" +197 17 dataset """wn18rr""" +197 17 model """distmult""" +197 17 loss """softplus""" +197 17 regularizer """no""" +197 17 optimizer """adam""" +197 17 training_loop """owa""" +197 17 negative_sampler """basic""" +197 17 evaluator """rankbased""" +197 18 dataset """wn18rr""" +197 18 model """distmult""" +197 18 loss """softplus""" +197 18 regularizer """no""" +197 18 optimizer """adam""" +197 18 training_loop """owa""" +197 18 negative_sampler """basic""" +197 18 evaluator """rankbased""" +197 19 dataset """wn18rr""" +197 19 model """distmult""" +197 19 loss """softplus""" +197 19 regularizer """no""" +197 19 optimizer """adam""" +197 19 training_loop """owa""" +197 19 negative_sampler """basic""" +197 19 evaluator """rankbased""" +197 20 dataset """wn18rr""" +197 20 model """distmult""" +197 20 loss """softplus""" +197 20 regularizer """no""" +197 20 optimizer """adam""" +197 20 training_loop """owa""" +197 20 negative_sampler """basic""" +197 20 evaluator """rankbased""" +197 21 dataset """wn18rr""" +197 21 model """distmult""" +197 21 loss """softplus""" +197 21 regularizer """no""" +197 21 optimizer """adam""" +197 21 training_loop """owa""" +197 21 negative_sampler """basic""" +197 21 evaluator """rankbased""" +197 22 dataset """wn18rr""" +197 22 model """distmult""" +197 22 loss """softplus""" +197 22 regularizer """no""" +197 22 optimizer """adam""" +197 22 training_loop """owa""" +197 22 negative_sampler """basic""" +197 22 evaluator """rankbased""" +197 23 dataset """wn18rr""" +197 23 model """distmult""" +197 23 loss """softplus""" +197 23 regularizer """no""" +197 23 optimizer """adam""" +197 23 training_loop """owa""" +197 23 negative_sampler """basic""" +197 23 evaluator """rankbased""" +197 24 dataset """wn18rr""" +197 24 model """distmult""" +197 24 loss """softplus""" +197 24 regularizer """no""" +197 24 optimizer """adam""" +197 24 training_loop """owa""" +197 24 negative_sampler """basic""" +197 24 evaluator """rankbased""" +197 25 dataset """wn18rr""" +197 25 model """distmult""" +197 25 loss """softplus""" +197 25 regularizer """no""" +197 25 optimizer """adam""" +197 25 training_loop """owa""" +197 25 negative_sampler """basic""" +197 25 evaluator """rankbased""" +197 26 dataset """wn18rr""" +197 26 model """distmult""" +197 26 loss """softplus""" +197 26 regularizer """no""" +197 26 optimizer """adam""" +197 26 training_loop """owa""" +197 26 negative_sampler """basic""" +197 26 evaluator """rankbased""" +197 27 dataset """wn18rr""" +197 27 model """distmult""" +197 27 loss """softplus""" +197 27 regularizer """no""" +197 27 optimizer """adam""" +197 27 training_loop """owa""" +197 27 negative_sampler """basic""" +197 27 evaluator """rankbased""" +197 28 dataset """wn18rr""" +197 28 model """distmult""" +197 28 loss """softplus""" +197 28 regularizer """no""" +197 28 optimizer """adam""" +197 28 training_loop """owa""" +197 28 negative_sampler """basic""" +197 28 evaluator """rankbased""" +197 29 dataset """wn18rr""" +197 29 model """distmult""" +197 29 loss """softplus""" +197 29 regularizer """no""" +197 29 optimizer """adam""" +197 29 training_loop """owa""" +197 29 negative_sampler """basic""" +197 29 evaluator """rankbased""" +197 30 dataset """wn18rr""" +197 30 model """distmult""" +197 30 loss """softplus""" +197 30 regularizer """no""" +197 30 optimizer """adam""" +197 30 training_loop """owa""" +197 30 negative_sampler """basic""" +197 30 evaluator """rankbased""" +197 31 dataset """wn18rr""" +197 31 model """distmult""" +197 31 loss """softplus""" +197 31 regularizer """no""" +197 31 optimizer """adam""" +197 31 training_loop """owa""" +197 31 negative_sampler """basic""" +197 31 evaluator """rankbased""" +197 32 dataset """wn18rr""" +197 32 model """distmult""" +197 32 loss """softplus""" +197 32 regularizer """no""" +197 32 optimizer """adam""" +197 32 training_loop """owa""" +197 32 negative_sampler """basic""" +197 32 evaluator """rankbased""" +197 33 dataset """wn18rr""" +197 33 model """distmult""" +197 33 loss """softplus""" +197 33 regularizer """no""" +197 33 optimizer """adam""" +197 33 training_loop """owa""" +197 33 negative_sampler """basic""" +197 33 evaluator """rankbased""" +197 34 dataset """wn18rr""" +197 34 model """distmult""" +197 34 loss """softplus""" +197 34 regularizer """no""" +197 34 optimizer """adam""" +197 34 training_loop """owa""" +197 34 negative_sampler """basic""" +197 34 evaluator """rankbased""" +197 35 dataset """wn18rr""" +197 35 model """distmult""" +197 35 loss """softplus""" +197 35 regularizer """no""" +197 35 optimizer """adam""" +197 35 training_loop """owa""" +197 35 negative_sampler """basic""" +197 35 evaluator """rankbased""" +197 36 dataset """wn18rr""" +197 36 model """distmult""" +197 36 loss """softplus""" +197 36 regularizer """no""" +197 36 optimizer """adam""" +197 36 training_loop """owa""" +197 36 negative_sampler """basic""" +197 36 evaluator """rankbased""" +197 37 dataset """wn18rr""" +197 37 model """distmult""" +197 37 loss """softplus""" +197 37 regularizer """no""" +197 37 optimizer """adam""" +197 37 training_loop """owa""" +197 37 negative_sampler """basic""" +197 37 evaluator """rankbased""" +197 38 dataset """wn18rr""" +197 38 model """distmult""" +197 38 loss """softplus""" +197 38 regularizer """no""" +197 38 optimizer """adam""" +197 38 training_loop """owa""" +197 38 negative_sampler """basic""" +197 38 evaluator """rankbased""" +197 39 dataset """wn18rr""" +197 39 model """distmult""" +197 39 loss """softplus""" +197 39 regularizer """no""" +197 39 optimizer """adam""" +197 39 training_loop """owa""" +197 39 negative_sampler """basic""" +197 39 evaluator """rankbased""" +197 40 dataset """wn18rr""" +197 40 model """distmult""" +197 40 loss """softplus""" +197 40 regularizer """no""" +197 40 optimizer """adam""" +197 40 training_loop """owa""" +197 40 negative_sampler """basic""" +197 40 evaluator """rankbased""" +197 41 dataset """wn18rr""" +197 41 model """distmult""" +197 41 loss """softplus""" +197 41 regularizer """no""" +197 41 optimizer """adam""" +197 41 training_loop """owa""" +197 41 negative_sampler """basic""" +197 41 evaluator """rankbased""" +197 42 dataset """wn18rr""" +197 42 model """distmult""" +197 42 loss """softplus""" +197 42 regularizer """no""" +197 42 optimizer """adam""" +197 42 training_loop """owa""" +197 42 negative_sampler """basic""" +197 42 evaluator """rankbased""" +197 43 dataset """wn18rr""" +197 43 model """distmult""" +197 43 loss """softplus""" +197 43 regularizer """no""" +197 43 optimizer """adam""" +197 43 training_loop """owa""" +197 43 negative_sampler """basic""" +197 43 evaluator """rankbased""" +197 44 dataset """wn18rr""" +197 44 model """distmult""" +197 44 loss """softplus""" +197 44 regularizer """no""" +197 44 optimizer """adam""" +197 44 training_loop """owa""" +197 44 negative_sampler """basic""" +197 44 evaluator """rankbased""" +197 45 dataset """wn18rr""" +197 45 model """distmult""" +197 45 loss """softplus""" +197 45 regularizer """no""" +197 45 optimizer """adam""" +197 45 training_loop """owa""" +197 45 negative_sampler """basic""" +197 45 evaluator """rankbased""" +197 46 dataset """wn18rr""" +197 46 model """distmult""" +197 46 loss """softplus""" +197 46 regularizer """no""" +197 46 optimizer """adam""" +197 46 training_loop """owa""" +197 46 negative_sampler """basic""" +197 46 evaluator """rankbased""" +197 47 dataset """wn18rr""" +197 47 model """distmult""" +197 47 loss """softplus""" +197 47 regularizer """no""" +197 47 optimizer """adam""" +197 47 training_loop """owa""" +197 47 negative_sampler """basic""" +197 47 evaluator """rankbased""" +197 48 dataset """wn18rr""" +197 48 model """distmult""" +197 48 loss """softplus""" +197 48 regularizer """no""" +197 48 optimizer """adam""" +197 48 training_loop """owa""" +197 48 negative_sampler """basic""" +197 48 evaluator """rankbased""" +197 49 dataset """wn18rr""" +197 49 model """distmult""" +197 49 loss """softplus""" +197 49 regularizer """no""" +197 49 optimizer """adam""" +197 49 training_loop """owa""" +197 49 negative_sampler """basic""" +197 49 evaluator """rankbased""" +197 50 dataset """wn18rr""" +197 50 model """distmult""" +197 50 loss """softplus""" +197 50 regularizer """no""" +197 50 optimizer """adam""" +197 50 training_loop """owa""" +197 50 negative_sampler """basic""" +197 50 evaluator """rankbased""" +197 51 dataset """wn18rr""" +197 51 model """distmult""" +197 51 loss """softplus""" +197 51 regularizer """no""" +197 51 optimizer """adam""" +197 51 training_loop """owa""" +197 51 negative_sampler """basic""" +197 51 evaluator """rankbased""" +197 52 dataset """wn18rr""" +197 52 model """distmult""" +197 52 loss """softplus""" +197 52 regularizer """no""" +197 52 optimizer """adam""" +197 52 training_loop """owa""" +197 52 negative_sampler """basic""" +197 52 evaluator """rankbased""" +197 53 dataset """wn18rr""" +197 53 model """distmult""" +197 53 loss """softplus""" +197 53 regularizer """no""" +197 53 optimizer """adam""" +197 53 training_loop """owa""" +197 53 negative_sampler """basic""" +197 53 evaluator """rankbased""" +197 54 dataset """wn18rr""" +197 54 model """distmult""" +197 54 loss """softplus""" +197 54 regularizer """no""" +197 54 optimizer """adam""" +197 54 training_loop """owa""" +197 54 negative_sampler """basic""" +197 54 evaluator """rankbased""" +197 55 dataset """wn18rr""" +197 55 model """distmult""" +197 55 loss """softplus""" +197 55 regularizer """no""" +197 55 optimizer """adam""" +197 55 training_loop """owa""" +197 55 negative_sampler """basic""" +197 55 evaluator """rankbased""" +197 56 dataset """wn18rr""" +197 56 model """distmult""" +197 56 loss """softplus""" +197 56 regularizer """no""" +197 56 optimizer """adam""" +197 56 training_loop """owa""" +197 56 negative_sampler """basic""" +197 56 evaluator """rankbased""" +197 57 dataset """wn18rr""" +197 57 model """distmult""" +197 57 loss """softplus""" +197 57 regularizer """no""" +197 57 optimizer """adam""" +197 57 training_loop """owa""" +197 57 negative_sampler """basic""" +197 57 evaluator """rankbased""" +197 58 dataset """wn18rr""" +197 58 model """distmult""" +197 58 loss """softplus""" +197 58 regularizer """no""" +197 58 optimizer """adam""" +197 58 training_loop """owa""" +197 58 negative_sampler """basic""" +197 58 evaluator """rankbased""" +197 59 dataset """wn18rr""" +197 59 model """distmult""" +197 59 loss """softplus""" +197 59 regularizer """no""" +197 59 optimizer """adam""" +197 59 training_loop """owa""" +197 59 negative_sampler """basic""" +197 59 evaluator """rankbased""" +197 60 dataset """wn18rr""" +197 60 model """distmult""" +197 60 loss """softplus""" +197 60 regularizer """no""" +197 60 optimizer """adam""" +197 60 training_loop """owa""" +197 60 negative_sampler """basic""" +197 60 evaluator """rankbased""" +197 61 dataset """wn18rr""" +197 61 model """distmult""" +197 61 loss """softplus""" +197 61 regularizer """no""" +197 61 optimizer """adam""" +197 61 training_loop """owa""" +197 61 negative_sampler """basic""" +197 61 evaluator """rankbased""" +197 62 dataset """wn18rr""" +197 62 model """distmult""" +197 62 loss """softplus""" +197 62 regularizer """no""" +197 62 optimizer """adam""" +197 62 training_loop """owa""" +197 62 negative_sampler """basic""" +197 62 evaluator """rankbased""" +197 63 dataset """wn18rr""" +197 63 model """distmult""" +197 63 loss """softplus""" +197 63 regularizer """no""" +197 63 optimizer """adam""" +197 63 training_loop """owa""" +197 63 negative_sampler """basic""" +197 63 evaluator """rankbased""" +197 64 dataset """wn18rr""" +197 64 model """distmult""" +197 64 loss """softplus""" +197 64 regularizer """no""" +197 64 optimizer """adam""" +197 64 training_loop """owa""" +197 64 negative_sampler """basic""" +197 64 evaluator """rankbased""" +197 65 dataset """wn18rr""" +197 65 model """distmult""" +197 65 loss """softplus""" +197 65 regularizer """no""" +197 65 optimizer """adam""" +197 65 training_loop """owa""" +197 65 negative_sampler """basic""" +197 65 evaluator """rankbased""" +197 66 dataset """wn18rr""" +197 66 model """distmult""" +197 66 loss """softplus""" +197 66 regularizer """no""" +197 66 optimizer """adam""" +197 66 training_loop """owa""" +197 66 negative_sampler """basic""" +197 66 evaluator """rankbased""" +197 67 dataset """wn18rr""" +197 67 model """distmult""" +197 67 loss """softplus""" +197 67 regularizer """no""" +197 67 optimizer """adam""" +197 67 training_loop """owa""" +197 67 negative_sampler """basic""" +197 67 evaluator """rankbased""" +198 1 model.embedding_dim 1.0 +198 1 optimizer.lr 0.058865643394842584 +198 1 negative_sampler.num_negs_per_pos 78.0 +198 1 training.batch_size 2.0 +198 2 model.embedding_dim 2.0 +198 2 optimizer.lr 0.07827688686630087 +198 2 negative_sampler.num_negs_per_pos 55.0 +198 2 training.batch_size 1.0 +198 3 model.embedding_dim 1.0 +198 3 optimizer.lr 0.0016029122117727516 +198 3 negative_sampler.num_negs_per_pos 17.0 +198 3 training.batch_size 2.0 +198 4 model.embedding_dim 1.0 +198 4 optimizer.lr 0.06611894864412579 +198 4 negative_sampler.num_negs_per_pos 15.0 +198 4 training.batch_size 1.0 +198 5 model.embedding_dim 2.0 +198 5 optimizer.lr 0.02570469821093938 +198 5 negative_sampler.num_negs_per_pos 77.0 +198 5 training.batch_size 1.0 +198 6 model.embedding_dim 0.0 +198 6 optimizer.lr 0.027926427209536444 +198 6 negative_sampler.num_negs_per_pos 37.0 +198 6 training.batch_size 1.0 +198 7 model.embedding_dim 2.0 +198 7 optimizer.lr 0.002283415931469103 +198 7 negative_sampler.num_negs_per_pos 62.0 +198 7 training.batch_size 1.0 +198 8 model.embedding_dim 2.0 +198 8 optimizer.lr 0.01790492037413802 +198 8 negative_sampler.num_negs_per_pos 83.0 +198 8 training.batch_size 1.0 +198 9 model.embedding_dim 0.0 +198 9 optimizer.lr 0.004372526533242378 +198 9 negative_sampler.num_negs_per_pos 44.0 +198 9 training.batch_size 0.0 +198 10 model.embedding_dim 2.0 +198 10 optimizer.lr 0.007481247199172398 +198 10 negative_sampler.num_negs_per_pos 26.0 +198 10 training.batch_size 1.0 +198 11 model.embedding_dim 2.0 +198 11 optimizer.lr 0.04590498210417388 +198 11 negative_sampler.num_negs_per_pos 99.0 +198 11 training.batch_size 0.0 +198 12 model.embedding_dim 0.0 +198 12 optimizer.lr 0.029584565126363344 +198 12 negative_sampler.num_negs_per_pos 56.0 +198 12 training.batch_size 1.0 +198 13 model.embedding_dim 0.0 +198 13 optimizer.lr 0.02908631703299858 +198 13 negative_sampler.num_negs_per_pos 5.0 +198 13 training.batch_size 2.0 +198 14 model.embedding_dim 1.0 +198 14 optimizer.lr 0.013674279169597788 +198 14 negative_sampler.num_negs_per_pos 60.0 +198 14 training.batch_size 1.0 +198 15 model.embedding_dim 1.0 +198 15 optimizer.lr 0.03500655043745861 +198 15 negative_sampler.num_negs_per_pos 83.0 +198 15 training.batch_size 0.0 +198 16 model.embedding_dim 1.0 +198 16 optimizer.lr 0.0010093414364899598 +198 16 negative_sampler.num_negs_per_pos 5.0 +198 16 training.batch_size 1.0 +198 17 model.embedding_dim 2.0 +198 17 optimizer.lr 0.0844487545160854 +198 17 negative_sampler.num_negs_per_pos 6.0 +198 17 training.batch_size 1.0 +198 18 model.embedding_dim 0.0 +198 18 optimizer.lr 0.03212881233057707 +198 18 negative_sampler.num_negs_per_pos 23.0 +198 18 training.batch_size 1.0 +198 19 model.embedding_dim 0.0 +198 19 optimizer.lr 0.003169939168827549 +198 19 negative_sampler.num_negs_per_pos 67.0 +198 19 training.batch_size 0.0 +198 20 model.embedding_dim 1.0 +198 20 optimizer.lr 0.031061262582632605 +198 20 negative_sampler.num_negs_per_pos 25.0 +198 20 training.batch_size 0.0 +198 21 model.embedding_dim 2.0 +198 21 optimizer.lr 0.004642444252785949 +198 21 negative_sampler.num_negs_per_pos 78.0 +198 21 training.batch_size 1.0 +198 22 model.embedding_dim 0.0 +198 22 optimizer.lr 0.002475874296900087 +198 22 negative_sampler.num_negs_per_pos 85.0 +198 22 training.batch_size 2.0 +198 23 model.embedding_dim 0.0 +198 23 optimizer.lr 0.009998816754646412 +198 23 negative_sampler.num_negs_per_pos 13.0 +198 23 training.batch_size 2.0 +198 24 model.embedding_dim 2.0 +198 24 optimizer.lr 0.04287097886191183 +198 24 negative_sampler.num_negs_per_pos 35.0 +198 24 training.batch_size 0.0 +198 25 model.embedding_dim 0.0 +198 25 optimizer.lr 0.0016008799952611404 +198 25 negative_sampler.num_negs_per_pos 53.0 +198 25 training.batch_size 1.0 +198 26 model.embedding_dim 1.0 +198 26 optimizer.lr 0.018444018156962266 +198 26 negative_sampler.num_negs_per_pos 42.0 +198 26 training.batch_size 0.0 +198 27 model.embedding_dim 1.0 +198 27 optimizer.lr 0.02837734975932265 +198 27 negative_sampler.num_negs_per_pos 57.0 +198 27 training.batch_size 0.0 +198 28 model.embedding_dim 0.0 +198 28 optimizer.lr 0.006090922911343574 +198 28 negative_sampler.num_negs_per_pos 59.0 +198 28 training.batch_size 1.0 +198 29 model.embedding_dim 0.0 +198 29 optimizer.lr 0.06076388269778895 +198 29 negative_sampler.num_negs_per_pos 80.0 +198 29 training.batch_size 2.0 +198 30 model.embedding_dim 2.0 +198 30 optimizer.lr 0.07163085997095518 +198 30 negative_sampler.num_negs_per_pos 97.0 +198 30 training.batch_size 1.0 +198 31 model.embedding_dim 2.0 +198 31 optimizer.lr 0.0017777065698936024 +198 31 negative_sampler.num_negs_per_pos 62.0 +198 31 training.batch_size 1.0 +198 32 model.embedding_dim 2.0 +198 32 optimizer.lr 0.04727448122473751 +198 32 negative_sampler.num_negs_per_pos 37.0 +198 32 training.batch_size 2.0 +198 33 model.embedding_dim 0.0 +198 33 optimizer.lr 0.05770198596340399 +198 33 negative_sampler.num_negs_per_pos 67.0 +198 33 training.batch_size 0.0 +198 34 model.embedding_dim 2.0 +198 34 optimizer.lr 0.01638378772730313 +198 34 negative_sampler.num_negs_per_pos 63.0 +198 34 training.batch_size 1.0 +198 35 model.embedding_dim 0.0 +198 35 optimizer.lr 0.007697014403061349 +198 35 negative_sampler.num_negs_per_pos 53.0 +198 35 training.batch_size 2.0 +198 36 model.embedding_dim 1.0 +198 36 optimizer.lr 0.007855146827453339 +198 36 negative_sampler.num_negs_per_pos 50.0 +198 36 training.batch_size 1.0 +198 37 model.embedding_dim 1.0 +198 37 optimizer.lr 0.00726465074036574 +198 37 negative_sampler.num_negs_per_pos 93.0 +198 37 training.batch_size 0.0 +198 38 model.embedding_dim 2.0 +198 38 optimizer.lr 0.08428278816054698 +198 38 negative_sampler.num_negs_per_pos 4.0 +198 38 training.batch_size 0.0 +198 39 model.embedding_dim 2.0 +198 39 optimizer.lr 0.015481521374173883 +198 39 negative_sampler.num_negs_per_pos 81.0 +198 39 training.batch_size 1.0 +198 40 model.embedding_dim 1.0 +198 40 optimizer.lr 0.008180699688267666 +198 40 negative_sampler.num_negs_per_pos 55.0 +198 40 training.batch_size 2.0 +198 41 model.embedding_dim 1.0 +198 41 optimizer.lr 0.007302566717036158 +198 41 negative_sampler.num_negs_per_pos 77.0 +198 41 training.batch_size 0.0 +198 42 model.embedding_dim 2.0 +198 42 optimizer.lr 0.011966827520673495 +198 42 negative_sampler.num_negs_per_pos 7.0 +198 42 training.batch_size 1.0 +198 43 model.embedding_dim 1.0 +198 43 optimizer.lr 0.005337115941365853 +198 43 negative_sampler.num_negs_per_pos 3.0 +198 43 training.batch_size 2.0 +198 44 model.embedding_dim 1.0 +198 44 optimizer.lr 0.08612978212381481 +198 44 negative_sampler.num_negs_per_pos 36.0 +198 44 training.batch_size 1.0 +198 45 model.embedding_dim 2.0 +198 45 optimizer.lr 0.0015791693615639132 +198 45 negative_sampler.num_negs_per_pos 46.0 +198 45 training.batch_size 2.0 +198 46 model.embedding_dim 2.0 +198 46 optimizer.lr 0.08339417320755711 +198 46 negative_sampler.num_negs_per_pos 51.0 +198 46 training.batch_size 1.0 +198 47 model.embedding_dim 2.0 +198 47 optimizer.lr 0.09252959204202356 +198 47 negative_sampler.num_negs_per_pos 53.0 +198 47 training.batch_size 0.0 +198 48 model.embedding_dim 0.0 +198 48 optimizer.lr 0.001582340965662915 +198 48 negative_sampler.num_negs_per_pos 26.0 +198 48 training.batch_size 0.0 +198 49 model.embedding_dim 1.0 +198 49 optimizer.lr 0.009565689114026618 +198 49 negative_sampler.num_negs_per_pos 80.0 +198 49 training.batch_size 1.0 +198 50 model.embedding_dim 2.0 +198 50 optimizer.lr 0.0011649196194408794 +198 50 negative_sampler.num_negs_per_pos 24.0 +198 50 training.batch_size 0.0 +198 51 model.embedding_dim 0.0 +198 51 optimizer.lr 0.031152619285609227 +198 51 negative_sampler.num_negs_per_pos 90.0 +198 51 training.batch_size 0.0 +198 52 model.embedding_dim 2.0 +198 52 optimizer.lr 0.00136996714735465 +198 52 negative_sampler.num_negs_per_pos 58.0 +198 52 training.batch_size 0.0 +198 53 model.embedding_dim 0.0 +198 53 optimizer.lr 0.05595549894353123 +198 53 negative_sampler.num_negs_per_pos 17.0 +198 53 training.batch_size 0.0 +198 54 model.embedding_dim 1.0 +198 54 optimizer.lr 0.0016706617992922935 +198 54 negative_sampler.num_negs_per_pos 27.0 +198 54 training.batch_size 2.0 +198 55 model.embedding_dim 0.0 +198 55 optimizer.lr 0.003944096107504857 +198 55 negative_sampler.num_negs_per_pos 98.0 +198 55 training.batch_size 0.0 +198 56 model.embedding_dim 1.0 +198 56 optimizer.lr 0.09917446651414763 +198 56 negative_sampler.num_negs_per_pos 2.0 +198 56 training.batch_size 1.0 +198 57 model.embedding_dim 2.0 +198 57 optimizer.lr 0.03400829073367979 +198 57 negative_sampler.num_negs_per_pos 96.0 +198 57 training.batch_size 1.0 +198 58 model.embedding_dim 2.0 +198 58 optimizer.lr 0.01303784355215451 +198 58 negative_sampler.num_negs_per_pos 34.0 +198 58 training.batch_size 1.0 +198 59 model.embedding_dim 0.0 +198 59 optimizer.lr 0.0013322446781823395 +198 59 negative_sampler.num_negs_per_pos 78.0 +198 59 training.batch_size 0.0 +198 60 model.embedding_dim 0.0 +198 60 optimizer.lr 0.046498245690568596 +198 60 negative_sampler.num_negs_per_pos 96.0 +198 60 training.batch_size 1.0 +198 61 model.embedding_dim 0.0 +198 61 optimizer.lr 0.09180833166583005 +198 61 negative_sampler.num_negs_per_pos 46.0 +198 61 training.batch_size 0.0 +198 62 model.embedding_dim 2.0 +198 62 optimizer.lr 0.012223313276976039 +198 62 negative_sampler.num_negs_per_pos 80.0 +198 62 training.batch_size 0.0 +198 63 model.embedding_dim 1.0 +198 63 optimizer.lr 0.015384401399262016 +198 63 negative_sampler.num_negs_per_pos 81.0 +198 63 training.batch_size 1.0 +198 64 model.embedding_dim 1.0 +198 64 optimizer.lr 0.0077756123098679285 +198 64 negative_sampler.num_negs_per_pos 34.0 +198 64 training.batch_size 0.0 +198 65 model.embedding_dim 1.0 +198 65 optimizer.lr 0.0026833948086390147 +198 65 negative_sampler.num_negs_per_pos 61.0 +198 65 training.batch_size 1.0 +198 66 model.embedding_dim 1.0 +198 66 optimizer.lr 0.01417158198503873 +198 66 negative_sampler.num_negs_per_pos 11.0 +198 66 training.batch_size 1.0 +198 67 model.embedding_dim 1.0 +198 67 optimizer.lr 0.05294489954760852 +198 67 negative_sampler.num_negs_per_pos 53.0 +198 67 training.batch_size 0.0 +198 68 model.embedding_dim 2.0 +198 68 optimizer.lr 0.015661368406722307 +198 68 negative_sampler.num_negs_per_pos 58.0 +198 68 training.batch_size 2.0 +198 69 model.embedding_dim 0.0 +198 69 optimizer.lr 0.00255520003526506 +198 69 negative_sampler.num_negs_per_pos 24.0 +198 69 training.batch_size 0.0 +198 70 model.embedding_dim 0.0 +198 70 optimizer.lr 0.010677524206469174 +198 70 negative_sampler.num_negs_per_pos 79.0 +198 70 training.batch_size 1.0 +198 71 model.embedding_dim 2.0 +198 71 optimizer.lr 0.0333883227996759 +198 71 negative_sampler.num_negs_per_pos 81.0 +198 71 training.batch_size 0.0 +198 72 model.embedding_dim 2.0 +198 72 optimizer.lr 0.026223156870820843 +198 72 negative_sampler.num_negs_per_pos 32.0 +198 72 training.batch_size 2.0 +198 73 model.embedding_dim 0.0 +198 73 optimizer.lr 0.03233223931592516 +198 73 negative_sampler.num_negs_per_pos 0.0 +198 73 training.batch_size 0.0 +198 74 model.embedding_dim 0.0 +198 74 optimizer.lr 0.04558624003664856 +198 74 negative_sampler.num_negs_per_pos 9.0 +198 74 training.batch_size 1.0 +198 75 model.embedding_dim 0.0 +198 75 optimizer.lr 0.008627695435239868 +198 75 negative_sampler.num_negs_per_pos 46.0 +198 75 training.batch_size 0.0 +198 76 model.embedding_dim 2.0 +198 76 optimizer.lr 0.03530196744768469 +198 76 negative_sampler.num_negs_per_pos 85.0 +198 76 training.batch_size 0.0 +198 77 model.embedding_dim 0.0 +198 77 optimizer.lr 0.022348379282084566 +198 77 negative_sampler.num_negs_per_pos 9.0 +198 77 training.batch_size 0.0 +198 78 model.embedding_dim 1.0 +198 78 optimizer.lr 0.09891582863271532 +198 78 negative_sampler.num_negs_per_pos 85.0 +198 78 training.batch_size 0.0 +198 79 model.embedding_dim 1.0 +198 79 optimizer.lr 0.00348766172703444 +198 79 negative_sampler.num_negs_per_pos 7.0 +198 79 training.batch_size 2.0 +198 80 model.embedding_dim 2.0 +198 80 optimizer.lr 0.0032626513468852966 +198 80 negative_sampler.num_negs_per_pos 17.0 +198 80 training.batch_size 0.0 +198 81 model.embedding_dim 2.0 +198 81 optimizer.lr 0.05422365460303088 +198 81 negative_sampler.num_negs_per_pos 39.0 +198 81 training.batch_size 2.0 +198 82 model.embedding_dim 2.0 +198 82 optimizer.lr 0.06482026809741921 +198 82 negative_sampler.num_negs_per_pos 87.0 +198 82 training.batch_size 1.0 +198 83 model.embedding_dim 2.0 +198 83 optimizer.lr 0.09646281546602518 +198 83 negative_sampler.num_negs_per_pos 62.0 +198 83 training.batch_size 1.0 +198 84 model.embedding_dim 1.0 +198 84 optimizer.lr 0.001538749122686606 +198 84 negative_sampler.num_negs_per_pos 14.0 +198 84 training.batch_size 1.0 +198 85 model.embedding_dim 0.0 +198 85 optimizer.lr 0.052498663612413156 +198 85 negative_sampler.num_negs_per_pos 73.0 +198 85 training.batch_size 1.0 +198 86 model.embedding_dim 0.0 +198 86 optimizer.lr 0.0019804353241879625 +198 86 negative_sampler.num_negs_per_pos 61.0 +198 86 training.batch_size 2.0 +198 87 model.embedding_dim 2.0 +198 87 optimizer.lr 0.028271990967766183 +198 87 negative_sampler.num_negs_per_pos 21.0 +198 87 training.batch_size 2.0 +198 88 model.embedding_dim 1.0 +198 88 optimizer.lr 0.036906950013128895 +198 88 negative_sampler.num_negs_per_pos 29.0 +198 88 training.batch_size 1.0 +198 89 model.embedding_dim 2.0 +198 89 optimizer.lr 0.018372198091285222 +198 89 negative_sampler.num_negs_per_pos 76.0 +198 89 training.batch_size 1.0 +198 90 model.embedding_dim 0.0 +198 90 optimizer.lr 0.07101609478056202 +198 90 negative_sampler.num_negs_per_pos 67.0 +198 90 training.batch_size 2.0 +198 91 model.embedding_dim 2.0 +198 91 optimizer.lr 0.0013672164419022035 +198 91 negative_sampler.num_negs_per_pos 30.0 +198 91 training.batch_size 2.0 +198 92 model.embedding_dim 2.0 +198 92 optimizer.lr 0.0067283342616929216 +198 92 negative_sampler.num_negs_per_pos 83.0 +198 92 training.batch_size 0.0 +198 93 model.embedding_dim 2.0 +198 93 optimizer.lr 0.0015563894829389484 +198 93 negative_sampler.num_negs_per_pos 67.0 +198 93 training.batch_size 1.0 +198 94 model.embedding_dim 1.0 +198 94 optimizer.lr 0.0013116940596494915 +198 94 negative_sampler.num_negs_per_pos 77.0 +198 94 training.batch_size 0.0 +198 95 model.embedding_dim 1.0 +198 95 optimizer.lr 0.0038003228295515243 +198 95 negative_sampler.num_negs_per_pos 78.0 +198 95 training.batch_size 0.0 +198 96 model.embedding_dim 0.0 +198 96 optimizer.lr 0.026440630902091693 +198 96 negative_sampler.num_negs_per_pos 51.0 +198 96 training.batch_size 0.0 +198 97 model.embedding_dim 1.0 +198 97 optimizer.lr 0.011041105609942582 +198 97 negative_sampler.num_negs_per_pos 14.0 +198 97 training.batch_size 1.0 +198 98 model.embedding_dim 1.0 +198 98 optimizer.lr 0.007441115710917459 +198 98 negative_sampler.num_negs_per_pos 42.0 +198 98 training.batch_size 2.0 +198 99 model.embedding_dim 1.0 +198 99 optimizer.lr 0.008176235903860246 +198 99 negative_sampler.num_negs_per_pos 60.0 +198 99 training.batch_size 1.0 +198 100 model.embedding_dim 2.0 +198 100 optimizer.lr 0.0010175868515956473 +198 100 negative_sampler.num_negs_per_pos 21.0 +198 100 training.batch_size 1.0 +198 1 dataset """wn18rr""" +198 1 model """distmult""" +198 1 loss """bceaftersigmoid""" +198 1 regularizer """no""" +198 1 optimizer """adam""" +198 1 training_loop """owa""" +198 1 negative_sampler """basic""" +198 1 evaluator """rankbased""" +198 2 dataset """wn18rr""" +198 2 model """distmult""" +198 2 loss """bceaftersigmoid""" +198 2 regularizer """no""" +198 2 optimizer """adam""" +198 2 training_loop """owa""" +198 2 negative_sampler """basic""" +198 2 evaluator """rankbased""" +198 3 dataset """wn18rr""" +198 3 model """distmult""" +198 3 loss """bceaftersigmoid""" +198 3 regularizer """no""" +198 3 optimizer """adam""" +198 3 training_loop """owa""" +198 3 negative_sampler """basic""" +198 3 evaluator """rankbased""" +198 4 dataset """wn18rr""" +198 4 model """distmult""" +198 4 loss """bceaftersigmoid""" +198 4 regularizer """no""" +198 4 optimizer """adam""" +198 4 training_loop """owa""" +198 4 negative_sampler """basic""" +198 4 evaluator """rankbased""" +198 5 dataset """wn18rr""" +198 5 model """distmult""" +198 5 loss """bceaftersigmoid""" +198 5 regularizer """no""" +198 5 optimizer """adam""" +198 5 training_loop """owa""" +198 5 negative_sampler """basic""" +198 5 evaluator """rankbased""" +198 6 dataset """wn18rr""" +198 6 model """distmult""" +198 6 loss """bceaftersigmoid""" +198 6 regularizer """no""" +198 6 optimizer """adam""" +198 6 training_loop """owa""" +198 6 negative_sampler """basic""" +198 6 evaluator """rankbased""" +198 7 dataset """wn18rr""" +198 7 model """distmult""" +198 7 loss """bceaftersigmoid""" +198 7 regularizer """no""" +198 7 optimizer """adam""" +198 7 training_loop """owa""" +198 7 negative_sampler """basic""" +198 7 evaluator """rankbased""" +198 8 dataset """wn18rr""" +198 8 model """distmult""" +198 8 loss """bceaftersigmoid""" +198 8 regularizer """no""" +198 8 optimizer """adam""" +198 8 training_loop """owa""" +198 8 negative_sampler """basic""" +198 8 evaluator """rankbased""" +198 9 dataset """wn18rr""" +198 9 model """distmult""" +198 9 loss """bceaftersigmoid""" +198 9 regularizer """no""" +198 9 optimizer """adam""" +198 9 training_loop """owa""" +198 9 negative_sampler """basic""" +198 9 evaluator """rankbased""" +198 10 dataset """wn18rr""" +198 10 model """distmult""" +198 10 loss """bceaftersigmoid""" +198 10 regularizer """no""" +198 10 optimizer """adam""" +198 10 training_loop """owa""" +198 10 negative_sampler """basic""" +198 10 evaluator """rankbased""" +198 11 dataset """wn18rr""" +198 11 model """distmult""" +198 11 loss """bceaftersigmoid""" +198 11 regularizer """no""" +198 11 optimizer """adam""" +198 11 training_loop """owa""" +198 11 negative_sampler """basic""" +198 11 evaluator """rankbased""" +198 12 dataset """wn18rr""" +198 12 model """distmult""" +198 12 loss """bceaftersigmoid""" +198 12 regularizer """no""" +198 12 optimizer """adam""" +198 12 training_loop """owa""" +198 12 negative_sampler """basic""" +198 12 evaluator """rankbased""" +198 13 dataset """wn18rr""" +198 13 model """distmult""" +198 13 loss """bceaftersigmoid""" +198 13 regularizer """no""" +198 13 optimizer """adam""" +198 13 training_loop """owa""" +198 13 negative_sampler """basic""" +198 13 evaluator """rankbased""" +198 14 dataset """wn18rr""" +198 14 model """distmult""" +198 14 loss """bceaftersigmoid""" +198 14 regularizer """no""" +198 14 optimizer """adam""" +198 14 training_loop """owa""" +198 14 negative_sampler """basic""" +198 14 evaluator """rankbased""" +198 15 dataset """wn18rr""" +198 15 model """distmult""" +198 15 loss """bceaftersigmoid""" +198 15 regularizer """no""" +198 15 optimizer """adam""" +198 15 training_loop """owa""" +198 15 negative_sampler """basic""" +198 15 evaluator """rankbased""" +198 16 dataset """wn18rr""" +198 16 model """distmult""" +198 16 loss """bceaftersigmoid""" +198 16 regularizer """no""" +198 16 optimizer """adam""" +198 16 training_loop """owa""" +198 16 negative_sampler """basic""" +198 16 evaluator """rankbased""" +198 17 dataset """wn18rr""" +198 17 model """distmult""" +198 17 loss """bceaftersigmoid""" +198 17 regularizer """no""" +198 17 optimizer """adam""" +198 17 training_loop """owa""" +198 17 negative_sampler """basic""" +198 17 evaluator """rankbased""" +198 18 dataset """wn18rr""" +198 18 model """distmult""" +198 18 loss """bceaftersigmoid""" +198 18 regularizer """no""" +198 18 optimizer """adam""" +198 18 training_loop """owa""" +198 18 negative_sampler """basic""" +198 18 evaluator """rankbased""" +198 19 dataset """wn18rr""" +198 19 model """distmult""" +198 19 loss """bceaftersigmoid""" +198 19 regularizer """no""" +198 19 optimizer """adam""" +198 19 training_loop """owa""" +198 19 negative_sampler """basic""" +198 19 evaluator """rankbased""" +198 20 dataset """wn18rr""" +198 20 model """distmult""" +198 20 loss """bceaftersigmoid""" +198 20 regularizer """no""" +198 20 optimizer """adam""" +198 20 training_loop """owa""" +198 20 negative_sampler """basic""" +198 20 evaluator """rankbased""" +198 21 dataset """wn18rr""" +198 21 model """distmult""" +198 21 loss """bceaftersigmoid""" +198 21 regularizer """no""" +198 21 optimizer """adam""" +198 21 training_loop """owa""" +198 21 negative_sampler """basic""" +198 21 evaluator """rankbased""" +198 22 dataset """wn18rr""" +198 22 model """distmult""" +198 22 loss """bceaftersigmoid""" +198 22 regularizer """no""" +198 22 optimizer """adam""" +198 22 training_loop """owa""" +198 22 negative_sampler """basic""" +198 22 evaluator """rankbased""" +198 23 dataset """wn18rr""" +198 23 model """distmult""" +198 23 loss """bceaftersigmoid""" +198 23 regularizer """no""" +198 23 optimizer """adam""" +198 23 training_loop """owa""" +198 23 negative_sampler """basic""" +198 23 evaluator """rankbased""" +198 24 dataset """wn18rr""" +198 24 model """distmult""" +198 24 loss """bceaftersigmoid""" +198 24 regularizer """no""" +198 24 optimizer """adam""" +198 24 training_loop """owa""" +198 24 negative_sampler """basic""" +198 24 evaluator """rankbased""" +198 25 dataset """wn18rr""" +198 25 model """distmult""" +198 25 loss """bceaftersigmoid""" +198 25 regularizer """no""" +198 25 optimizer """adam""" +198 25 training_loop """owa""" +198 25 negative_sampler """basic""" +198 25 evaluator """rankbased""" +198 26 dataset """wn18rr""" +198 26 model """distmult""" +198 26 loss """bceaftersigmoid""" +198 26 regularizer """no""" +198 26 optimizer """adam""" +198 26 training_loop """owa""" +198 26 negative_sampler """basic""" +198 26 evaluator """rankbased""" +198 27 dataset """wn18rr""" +198 27 model """distmult""" +198 27 loss """bceaftersigmoid""" +198 27 regularizer """no""" +198 27 optimizer """adam""" +198 27 training_loop """owa""" +198 27 negative_sampler """basic""" +198 27 evaluator """rankbased""" +198 28 dataset """wn18rr""" +198 28 model """distmult""" +198 28 loss """bceaftersigmoid""" +198 28 regularizer """no""" +198 28 optimizer """adam""" +198 28 training_loop """owa""" +198 28 negative_sampler """basic""" +198 28 evaluator """rankbased""" +198 29 dataset """wn18rr""" +198 29 model """distmult""" +198 29 loss """bceaftersigmoid""" +198 29 regularizer """no""" +198 29 optimizer """adam""" +198 29 training_loop """owa""" +198 29 negative_sampler """basic""" +198 29 evaluator """rankbased""" +198 30 dataset """wn18rr""" +198 30 model """distmult""" +198 30 loss """bceaftersigmoid""" +198 30 regularizer """no""" +198 30 optimizer """adam""" +198 30 training_loop """owa""" +198 30 negative_sampler """basic""" +198 30 evaluator """rankbased""" +198 31 dataset """wn18rr""" +198 31 model """distmult""" +198 31 loss """bceaftersigmoid""" +198 31 regularizer """no""" +198 31 optimizer """adam""" +198 31 training_loop """owa""" +198 31 negative_sampler """basic""" +198 31 evaluator """rankbased""" +198 32 dataset """wn18rr""" +198 32 model """distmult""" +198 32 loss """bceaftersigmoid""" +198 32 regularizer """no""" +198 32 optimizer """adam""" +198 32 training_loop """owa""" +198 32 negative_sampler """basic""" +198 32 evaluator """rankbased""" +198 33 dataset """wn18rr""" +198 33 model """distmult""" +198 33 loss """bceaftersigmoid""" +198 33 regularizer """no""" +198 33 optimizer """adam""" +198 33 training_loop """owa""" +198 33 negative_sampler """basic""" +198 33 evaluator """rankbased""" +198 34 dataset """wn18rr""" +198 34 model """distmult""" +198 34 loss """bceaftersigmoid""" +198 34 regularizer """no""" +198 34 optimizer """adam""" +198 34 training_loop """owa""" +198 34 negative_sampler """basic""" +198 34 evaluator """rankbased""" +198 35 dataset """wn18rr""" +198 35 model """distmult""" +198 35 loss """bceaftersigmoid""" +198 35 regularizer """no""" +198 35 optimizer """adam""" +198 35 training_loop """owa""" +198 35 negative_sampler """basic""" +198 35 evaluator """rankbased""" +198 36 dataset """wn18rr""" +198 36 model """distmult""" +198 36 loss """bceaftersigmoid""" +198 36 regularizer """no""" +198 36 optimizer """adam""" +198 36 training_loop """owa""" +198 36 negative_sampler """basic""" +198 36 evaluator """rankbased""" +198 37 dataset """wn18rr""" +198 37 model """distmult""" +198 37 loss """bceaftersigmoid""" +198 37 regularizer """no""" +198 37 optimizer """adam""" +198 37 training_loop """owa""" +198 37 negative_sampler """basic""" +198 37 evaluator """rankbased""" +198 38 dataset """wn18rr""" +198 38 model """distmult""" +198 38 loss """bceaftersigmoid""" +198 38 regularizer """no""" +198 38 optimizer """adam""" +198 38 training_loop """owa""" +198 38 negative_sampler """basic""" +198 38 evaluator """rankbased""" +198 39 dataset """wn18rr""" +198 39 model """distmult""" +198 39 loss """bceaftersigmoid""" +198 39 regularizer """no""" +198 39 optimizer """adam""" +198 39 training_loop """owa""" +198 39 negative_sampler """basic""" +198 39 evaluator """rankbased""" +198 40 dataset """wn18rr""" +198 40 model """distmult""" +198 40 loss """bceaftersigmoid""" +198 40 regularizer """no""" +198 40 optimizer """adam""" +198 40 training_loop """owa""" +198 40 negative_sampler """basic""" +198 40 evaluator """rankbased""" +198 41 dataset """wn18rr""" +198 41 model """distmult""" +198 41 loss """bceaftersigmoid""" +198 41 regularizer """no""" +198 41 optimizer """adam""" +198 41 training_loop """owa""" +198 41 negative_sampler """basic""" +198 41 evaluator """rankbased""" +198 42 dataset """wn18rr""" +198 42 model """distmult""" +198 42 loss """bceaftersigmoid""" +198 42 regularizer """no""" +198 42 optimizer """adam""" +198 42 training_loop """owa""" +198 42 negative_sampler """basic""" +198 42 evaluator """rankbased""" +198 43 dataset """wn18rr""" +198 43 model """distmult""" +198 43 loss """bceaftersigmoid""" +198 43 regularizer """no""" +198 43 optimizer """adam""" +198 43 training_loop """owa""" +198 43 negative_sampler """basic""" +198 43 evaluator """rankbased""" +198 44 dataset """wn18rr""" +198 44 model """distmult""" +198 44 loss """bceaftersigmoid""" +198 44 regularizer """no""" +198 44 optimizer """adam""" +198 44 training_loop """owa""" +198 44 negative_sampler """basic""" +198 44 evaluator """rankbased""" +198 45 dataset """wn18rr""" +198 45 model """distmult""" +198 45 loss """bceaftersigmoid""" +198 45 regularizer """no""" +198 45 optimizer """adam""" +198 45 training_loop """owa""" +198 45 negative_sampler """basic""" +198 45 evaluator """rankbased""" +198 46 dataset """wn18rr""" +198 46 model """distmult""" +198 46 loss """bceaftersigmoid""" +198 46 regularizer """no""" +198 46 optimizer """adam""" +198 46 training_loop """owa""" +198 46 negative_sampler """basic""" +198 46 evaluator """rankbased""" +198 47 dataset """wn18rr""" +198 47 model """distmult""" +198 47 loss """bceaftersigmoid""" +198 47 regularizer """no""" +198 47 optimizer """adam""" +198 47 training_loop """owa""" +198 47 negative_sampler """basic""" +198 47 evaluator """rankbased""" +198 48 dataset """wn18rr""" +198 48 model """distmult""" +198 48 loss """bceaftersigmoid""" +198 48 regularizer """no""" +198 48 optimizer """adam""" +198 48 training_loop """owa""" +198 48 negative_sampler """basic""" +198 48 evaluator """rankbased""" +198 49 dataset """wn18rr""" +198 49 model """distmult""" +198 49 loss """bceaftersigmoid""" +198 49 regularizer """no""" +198 49 optimizer """adam""" +198 49 training_loop """owa""" +198 49 negative_sampler """basic""" +198 49 evaluator """rankbased""" +198 50 dataset """wn18rr""" +198 50 model """distmult""" +198 50 loss """bceaftersigmoid""" +198 50 regularizer """no""" +198 50 optimizer """adam""" +198 50 training_loop """owa""" +198 50 negative_sampler """basic""" +198 50 evaluator """rankbased""" +198 51 dataset """wn18rr""" +198 51 model """distmult""" +198 51 loss """bceaftersigmoid""" +198 51 regularizer """no""" +198 51 optimizer """adam""" +198 51 training_loop """owa""" +198 51 negative_sampler """basic""" +198 51 evaluator """rankbased""" +198 52 dataset """wn18rr""" +198 52 model """distmult""" +198 52 loss """bceaftersigmoid""" +198 52 regularizer """no""" +198 52 optimizer """adam""" +198 52 training_loop """owa""" +198 52 negative_sampler """basic""" +198 52 evaluator """rankbased""" +198 53 dataset """wn18rr""" +198 53 model """distmult""" +198 53 loss """bceaftersigmoid""" +198 53 regularizer """no""" +198 53 optimizer """adam""" +198 53 training_loop """owa""" +198 53 negative_sampler """basic""" +198 53 evaluator """rankbased""" +198 54 dataset """wn18rr""" +198 54 model """distmult""" +198 54 loss """bceaftersigmoid""" +198 54 regularizer """no""" +198 54 optimizer """adam""" +198 54 training_loop """owa""" +198 54 negative_sampler """basic""" +198 54 evaluator """rankbased""" +198 55 dataset """wn18rr""" +198 55 model """distmult""" +198 55 loss """bceaftersigmoid""" +198 55 regularizer """no""" +198 55 optimizer """adam""" +198 55 training_loop """owa""" +198 55 negative_sampler """basic""" +198 55 evaluator """rankbased""" +198 56 dataset """wn18rr""" +198 56 model """distmult""" +198 56 loss """bceaftersigmoid""" +198 56 regularizer """no""" +198 56 optimizer """adam""" +198 56 training_loop """owa""" +198 56 negative_sampler """basic""" +198 56 evaluator """rankbased""" +198 57 dataset """wn18rr""" +198 57 model """distmult""" +198 57 loss """bceaftersigmoid""" +198 57 regularizer """no""" +198 57 optimizer """adam""" +198 57 training_loop """owa""" +198 57 negative_sampler """basic""" +198 57 evaluator """rankbased""" +198 58 dataset """wn18rr""" +198 58 model """distmult""" +198 58 loss """bceaftersigmoid""" +198 58 regularizer """no""" +198 58 optimizer """adam""" +198 58 training_loop """owa""" +198 58 negative_sampler """basic""" +198 58 evaluator """rankbased""" +198 59 dataset """wn18rr""" +198 59 model """distmult""" +198 59 loss """bceaftersigmoid""" +198 59 regularizer """no""" +198 59 optimizer """adam""" +198 59 training_loop """owa""" +198 59 negative_sampler """basic""" +198 59 evaluator """rankbased""" +198 60 dataset """wn18rr""" +198 60 model """distmult""" +198 60 loss """bceaftersigmoid""" +198 60 regularizer """no""" +198 60 optimizer """adam""" +198 60 training_loop """owa""" +198 60 negative_sampler """basic""" +198 60 evaluator """rankbased""" +198 61 dataset """wn18rr""" +198 61 model """distmult""" +198 61 loss """bceaftersigmoid""" +198 61 regularizer """no""" +198 61 optimizer """adam""" +198 61 training_loop """owa""" +198 61 negative_sampler """basic""" +198 61 evaluator """rankbased""" +198 62 dataset """wn18rr""" +198 62 model """distmult""" +198 62 loss """bceaftersigmoid""" +198 62 regularizer """no""" +198 62 optimizer """adam""" +198 62 training_loop """owa""" +198 62 negative_sampler """basic""" +198 62 evaluator """rankbased""" +198 63 dataset """wn18rr""" +198 63 model """distmult""" +198 63 loss """bceaftersigmoid""" +198 63 regularizer """no""" +198 63 optimizer """adam""" +198 63 training_loop """owa""" +198 63 negative_sampler """basic""" +198 63 evaluator """rankbased""" +198 64 dataset """wn18rr""" +198 64 model """distmult""" +198 64 loss """bceaftersigmoid""" +198 64 regularizer """no""" +198 64 optimizer """adam""" +198 64 training_loop """owa""" +198 64 negative_sampler """basic""" +198 64 evaluator """rankbased""" +198 65 dataset """wn18rr""" +198 65 model """distmult""" +198 65 loss """bceaftersigmoid""" +198 65 regularizer """no""" +198 65 optimizer """adam""" +198 65 training_loop """owa""" +198 65 negative_sampler """basic""" +198 65 evaluator """rankbased""" +198 66 dataset """wn18rr""" +198 66 model """distmult""" +198 66 loss """bceaftersigmoid""" +198 66 regularizer """no""" +198 66 optimizer """adam""" +198 66 training_loop """owa""" +198 66 negative_sampler """basic""" +198 66 evaluator """rankbased""" +198 67 dataset """wn18rr""" +198 67 model """distmult""" +198 67 loss """bceaftersigmoid""" +198 67 regularizer """no""" +198 67 optimizer """adam""" +198 67 training_loop """owa""" +198 67 negative_sampler """basic""" +198 67 evaluator """rankbased""" +198 68 dataset """wn18rr""" +198 68 model """distmult""" +198 68 loss """bceaftersigmoid""" +198 68 regularizer """no""" +198 68 optimizer """adam""" +198 68 training_loop """owa""" +198 68 negative_sampler """basic""" +198 68 evaluator """rankbased""" +198 69 dataset """wn18rr""" +198 69 model """distmult""" +198 69 loss """bceaftersigmoid""" +198 69 regularizer """no""" +198 69 optimizer """adam""" +198 69 training_loop """owa""" +198 69 negative_sampler """basic""" +198 69 evaluator """rankbased""" +198 70 dataset """wn18rr""" +198 70 model """distmult""" +198 70 loss """bceaftersigmoid""" +198 70 regularizer """no""" +198 70 optimizer """adam""" +198 70 training_loop """owa""" +198 70 negative_sampler """basic""" +198 70 evaluator """rankbased""" +198 71 dataset """wn18rr""" +198 71 model """distmult""" +198 71 loss """bceaftersigmoid""" +198 71 regularizer """no""" +198 71 optimizer """adam""" +198 71 training_loop """owa""" +198 71 negative_sampler """basic""" +198 71 evaluator """rankbased""" +198 72 dataset """wn18rr""" +198 72 model """distmult""" +198 72 loss """bceaftersigmoid""" +198 72 regularizer """no""" +198 72 optimizer """adam""" +198 72 training_loop """owa""" +198 72 negative_sampler """basic""" +198 72 evaluator """rankbased""" +198 73 dataset """wn18rr""" +198 73 model """distmult""" +198 73 loss """bceaftersigmoid""" +198 73 regularizer """no""" +198 73 optimizer """adam""" +198 73 training_loop """owa""" +198 73 negative_sampler """basic""" +198 73 evaluator """rankbased""" +198 74 dataset """wn18rr""" +198 74 model """distmult""" +198 74 loss """bceaftersigmoid""" +198 74 regularizer """no""" +198 74 optimizer """adam""" +198 74 training_loop """owa""" +198 74 negative_sampler """basic""" +198 74 evaluator """rankbased""" +198 75 dataset """wn18rr""" +198 75 model """distmult""" +198 75 loss """bceaftersigmoid""" +198 75 regularizer """no""" +198 75 optimizer """adam""" +198 75 training_loop """owa""" +198 75 negative_sampler """basic""" +198 75 evaluator """rankbased""" +198 76 dataset """wn18rr""" +198 76 model """distmult""" +198 76 loss """bceaftersigmoid""" +198 76 regularizer """no""" +198 76 optimizer """adam""" +198 76 training_loop """owa""" +198 76 negative_sampler """basic""" +198 76 evaluator """rankbased""" +198 77 dataset """wn18rr""" +198 77 model """distmult""" +198 77 loss """bceaftersigmoid""" +198 77 regularizer """no""" +198 77 optimizer """adam""" +198 77 training_loop """owa""" +198 77 negative_sampler """basic""" +198 77 evaluator """rankbased""" +198 78 dataset """wn18rr""" +198 78 model """distmult""" +198 78 loss """bceaftersigmoid""" +198 78 regularizer """no""" +198 78 optimizer """adam""" +198 78 training_loop """owa""" +198 78 negative_sampler """basic""" +198 78 evaluator """rankbased""" +198 79 dataset """wn18rr""" +198 79 model """distmult""" +198 79 loss """bceaftersigmoid""" +198 79 regularizer """no""" +198 79 optimizer """adam""" +198 79 training_loop """owa""" +198 79 negative_sampler """basic""" +198 79 evaluator """rankbased""" +198 80 dataset """wn18rr""" +198 80 model """distmult""" +198 80 loss """bceaftersigmoid""" +198 80 regularizer """no""" +198 80 optimizer """adam""" +198 80 training_loop """owa""" +198 80 negative_sampler """basic""" +198 80 evaluator """rankbased""" +198 81 dataset """wn18rr""" +198 81 model """distmult""" +198 81 loss """bceaftersigmoid""" +198 81 regularizer """no""" +198 81 optimizer """adam""" +198 81 training_loop """owa""" +198 81 negative_sampler """basic""" +198 81 evaluator """rankbased""" +198 82 dataset """wn18rr""" +198 82 model """distmult""" +198 82 loss """bceaftersigmoid""" +198 82 regularizer """no""" +198 82 optimizer """adam""" +198 82 training_loop """owa""" +198 82 negative_sampler """basic""" +198 82 evaluator """rankbased""" +198 83 dataset """wn18rr""" +198 83 model """distmult""" +198 83 loss """bceaftersigmoid""" +198 83 regularizer """no""" +198 83 optimizer """adam""" +198 83 training_loop """owa""" +198 83 negative_sampler """basic""" +198 83 evaluator """rankbased""" +198 84 dataset """wn18rr""" +198 84 model """distmult""" +198 84 loss """bceaftersigmoid""" +198 84 regularizer """no""" +198 84 optimizer """adam""" +198 84 training_loop """owa""" +198 84 negative_sampler """basic""" +198 84 evaluator """rankbased""" +198 85 dataset """wn18rr""" +198 85 model """distmult""" +198 85 loss """bceaftersigmoid""" +198 85 regularizer """no""" +198 85 optimizer """adam""" +198 85 training_loop """owa""" +198 85 negative_sampler """basic""" +198 85 evaluator """rankbased""" +198 86 dataset """wn18rr""" +198 86 model """distmult""" +198 86 loss """bceaftersigmoid""" +198 86 regularizer """no""" +198 86 optimizer """adam""" +198 86 training_loop """owa""" +198 86 negative_sampler """basic""" +198 86 evaluator """rankbased""" +198 87 dataset """wn18rr""" +198 87 model """distmult""" +198 87 loss """bceaftersigmoid""" +198 87 regularizer """no""" +198 87 optimizer """adam""" +198 87 training_loop """owa""" +198 87 negative_sampler """basic""" +198 87 evaluator """rankbased""" +198 88 dataset """wn18rr""" +198 88 model """distmult""" +198 88 loss """bceaftersigmoid""" +198 88 regularizer """no""" +198 88 optimizer """adam""" +198 88 training_loop """owa""" +198 88 negative_sampler """basic""" +198 88 evaluator """rankbased""" +198 89 dataset """wn18rr""" +198 89 model """distmult""" +198 89 loss """bceaftersigmoid""" +198 89 regularizer """no""" +198 89 optimizer """adam""" +198 89 training_loop """owa""" +198 89 negative_sampler """basic""" +198 89 evaluator """rankbased""" +198 90 dataset """wn18rr""" +198 90 model """distmult""" +198 90 loss """bceaftersigmoid""" +198 90 regularizer """no""" +198 90 optimizer """adam""" +198 90 training_loop """owa""" +198 90 negative_sampler """basic""" +198 90 evaluator """rankbased""" +198 91 dataset """wn18rr""" +198 91 model """distmult""" +198 91 loss """bceaftersigmoid""" +198 91 regularizer """no""" +198 91 optimizer """adam""" +198 91 training_loop """owa""" +198 91 negative_sampler """basic""" +198 91 evaluator """rankbased""" +198 92 dataset """wn18rr""" +198 92 model """distmult""" +198 92 loss """bceaftersigmoid""" +198 92 regularizer """no""" +198 92 optimizer """adam""" +198 92 training_loop """owa""" +198 92 negative_sampler """basic""" +198 92 evaluator """rankbased""" +198 93 dataset """wn18rr""" +198 93 model """distmult""" +198 93 loss """bceaftersigmoid""" +198 93 regularizer """no""" +198 93 optimizer """adam""" +198 93 training_loop """owa""" +198 93 negative_sampler """basic""" +198 93 evaluator """rankbased""" +198 94 dataset """wn18rr""" +198 94 model """distmult""" +198 94 loss """bceaftersigmoid""" +198 94 regularizer """no""" +198 94 optimizer """adam""" +198 94 training_loop """owa""" +198 94 negative_sampler """basic""" +198 94 evaluator """rankbased""" +198 95 dataset """wn18rr""" +198 95 model """distmult""" +198 95 loss """bceaftersigmoid""" +198 95 regularizer """no""" +198 95 optimizer """adam""" +198 95 training_loop """owa""" +198 95 negative_sampler """basic""" +198 95 evaluator """rankbased""" +198 96 dataset """wn18rr""" +198 96 model """distmult""" +198 96 loss """bceaftersigmoid""" +198 96 regularizer """no""" +198 96 optimizer """adam""" +198 96 training_loop """owa""" +198 96 negative_sampler """basic""" +198 96 evaluator """rankbased""" +198 97 dataset """wn18rr""" +198 97 model """distmult""" +198 97 loss """bceaftersigmoid""" +198 97 regularizer """no""" +198 97 optimizer """adam""" +198 97 training_loop """owa""" +198 97 negative_sampler """basic""" +198 97 evaluator """rankbased""" +198 98 dataset """wn18rr""" +198 98 model """distmult""" +198 98 loss """bceaftersigmoid""" +198 98 regularizer """no""" +198 98 optimizer """adam""" +198 98 training_loop """owa""" +198 98 negative_sampler """basic""" +198 98 evaluator """rankbased""" +198 99 dataset """wn18rr""" +198 99 model """distmult""" +198 99 loss """bceaftersigmoid""" +198 99 regularizer """no""" +198 99 optimizer """adam""" +198 99 training_loop """owa""" +198 99 negative_sampler """basic""" +198 99 evaluator """rankbased""" +198 100 dataset """wn18rr""" +198 100 model """distmult""" +198 100 loss """bceaftersigmoid""" +198 100 regularizer """no""" +198 100 optimizer """adam""" +198 100 training_loop """owa""" +198 100 negative_sampler """basic""" +198 100 evaluator """rankbased""" +199 1 model.embedding_dim 2.0 +199 1 optimizer.lr 0.02392140338235255 +199 1 negative_sampler.num_negs_per_pos 23.0 +199 1 training.batch_size 1.0 +199 2 model.embedding_dim 2.0 +199 2 optimizer.lr 0.003829053450462666 +199 2 negative_sampler.num_negs_per_pos 59.0 +199 2 training.batch_size 1.0 +199 3 model.embedding_dim 2.0 +199 3 optimizer.lr 0.009541920074367377 +199 3 negative_sampler.num_negs_per_pos 40.0 +199 3 training.batch_size 0.0 +199 4 model.embedding_dim 1.0 +199 4 optimizer.lr 0.009801682240465154 +199 4 negative_sampler.num_negs_per_pos 32.0 +199 4 training.batch_size 2.0 +199 5 model.embedding_dim 0.0 +199 5 optimizer.lr 0.0037817620451808267 +199 5 negative_sampler.num_negs_per_pos 16.0 +199 5 training.batch_size 0.0 +199 6 model.embedding_dim 2.0 +199 6 optimizer.lr 0.059879450871007205 +199 6 negative_sampler.num_negs_per_pos 30.0 +199 6 training.batch_size 2.0 +199 7 model.embedding_dim 0.0 +199 7 optimizer.lr 0.011350262361611277 +199 7 negative_sampler.num_negs_per_pos 82.0 +199 7 training.batch_size 0.0 +199 8 model.embedding_dim 1.0 +199 8 optimizer.lr 0.0011849108111050305 +199 8 negative_sampler.num_negs_per_pos 83.0 +199 8 training.batch_size 1.0 +199 9 model.embedding_dim 2.0 +199 9 optimizer.lr 0.0026986964198033223 +199 9 negative_sampler.num_negs_per_pos 0.0 +199 9 training.batch_size 0.0 +199 10 model.embedding_dim 0.0 +199 10 optimizer.lr 0.0022591484166603034 +199 10 negative_sampler.num_negs_per_pos 6.0 +199 10 training.batch_size 2.0 +199 11 model.embedding_dim 0.0 +199 11 optimizer.lr 0.0015079829325741157 +199 11 negative_sampler.num_negs_per_pos 91.0 +199 11 training.batch_size 0.0 +199 12 model.embedding_dim 0.0 +199 12 optimizer.lr 0.0020522231566292284 +199 12 negative_sampler.num_negs_per_pos 86.0 +199 12 training.batch_size 1.0 +199 13 model.embedding_dim 2.0 +199 13 optimizer.lr 0.09105030461904605 +199 13 negative_sampler.num_negs_per_pos 61.0 +199 13 training.batch_size 0.0 +199 14 model.embedding_dim 0.0 +199 14 optimizer.lr 0.08135027562526653 +199 14 negative_sampler.num_negs_per_pos 55.0 +199 14 training.batch_size 2.0 +199 15 model.embedding_dim 2.0 +199 15 optimizer.lr 0.005396048151538776 +199 15 negative_sampler.num_negs_per_pos 69.0 +199 15 training.batch_size 0.0 +199 16 model.embedding_dim 2.0 +199 16 optimizer.lr 0.06006370791520965 +199 16 negative_sampler.num_negs_per_pos 4.0 +199 16 training.batch_size 2.0 +199 17 model.embedding_dim 1.0 +199 17 optimizer.lr 0.025994009156755808 +199 17 negative_sampler.num_negs_per_pos 82.0 +199 17 training.batch_size 1.0 +199 18 model.embedding_dim 1.0 +199 18 optimizer.lr 0.09279335146169855 +199 18 negative_sampler.num_negs_per_pos 85.0 +199 18 training.batch_size 2.0 +199 19 model.embedding_dim 1.0 +199 19 optimizer.lr 0.027415373356782992 +199 19 negative_sampler.num_negs_per_pos 67.0 +199 19 training.batch_size 2.0 +199 20 model.embedding_dim 2.0 +199 20 optimizer.lr 0.01561847454338419 +199 20 negative_sampler.num_negs_per_pos 13.0 +199 20 training.batch_size 0.0 +199 21 model.embedding_dim 2.0 +199 21 optimizer.lr 0.0072352797768170125 +199 21 negative_sampler.num_negs_per_pos 37.0 +199 21 training.batch_size 2.0 +199 22 model.embedding_dim 0.0 +199 22 optimizer.lr 0.05689352227902585 +199 22 negative_sampler.num_negs_per_pos 43.0 +199 22 training.batch_size 1.0 +199 23 model.embedding_dim 0.0 +199 23 optimizer.lr 0.01677992969103787 +199 23 negative_sampler.num_negs_per_pos 63.0 +199 23 training.batch_size 0.0 +199 24 model.embedding_dim 2.0 +199 24 optimizer.lr 0.017814577927952522 +199 24 negative_sampler.num_negs_per_pos 55.0 +199 24 training.batch_size 2.0 +199 25 model.embedding_dim 1.0 +199 25 optimizer.lr 0.04129917147127579 +199 25 negative_sampler.num_negs_per_pos 83.0 +199 25 training.batch_size 1.0 +199 26 model.embedding_dim 2.0 +199 26 optimizer.lr 0.007566757292337399 +199 26 negative_sampler.num_negs_per_pos 35.0 +199 26 training.batch_size 0.0 +199 27 model.embedding_dim 0.0 +199 27 optimizer.lr 0.01061699884355707 +199 27 negative_sampler.num_negs_per_pos 24.0 +199 27 training.batch_size 1.0 +199 28 model.embedding_dim 2.0 +199 28 optimizer.lr 0.0034806159154441587 +199 28 negative_sampler.num_negs_per_pos 17.0 +199 28 training.batch_size 1.0 +199 29 model.embedding_dim 0.0 +199 29 optimizer.lr 0.012763557293286777 +199 29 negative_sampler.num_negs_per_pos 83.0 +199 29 training.batch_size 2.0 +199 30 model.embedding_dim 1.0 +199 30 optimizer.lr 0.005462197190025303 +199 30 negative_sampler.num_negs_per_pos 99.0 +199 30 training.batch_size 0.0 +199 31 model.embedding_dim 0.0 +199 31 optimizer.lr 0.0012335386777961323 +199 31 negative_sampler.num_negs_per_pos 99.0 +199 31 training.batch_size 2.0 +199 32 model.embedding_dim 2.0 +199 32 optimizer.lr 0.004852593606017834 +199 32 negative_sampler.num_negs_per_pos 45.0 +199 32 training.batch_size 1.0 +199 33 model.embedding_dim 1.0 +199 33 optimizer.lr 0.001517835069828359 +199 33 negative_sampler.num_negs_per_pos 51.0 +199 33 training.batch_size 1.0 +199 34 model.embedding_dim 1.0 +199 34 optimizer.lr 0.009661825217012733 +199 34 negative_sampler.num_negs_per_pos 4.0 +199 34 training.batch_size 0.0 +199 35 model.embedding_dim 1.0 +199 35 optimizer.lr 0.03151669930529803 +199 35 negative_sampler.num_negs_per_pos 29.0 +199 35 training.batch_size 2.0 +199 36 model.embedding_dim 2.0 +199 36 optimizer.lr 0.011864221518325698 +199 36 negative_sampler.num_negs_per_pos 62.0 +199 36 training.batch_size 0.0 +199 37 model.embedding_dim 0.0 +199 37 optimizer.lr 0.06338121170012297 +199 37 negative_sampler.num_negs_per_pos 96.0 +199 37 training.batch_size 2.0 +199 38 model.embedding_dim 1.0 +199 38 optimizer.lr 0.06661842296987938 +199 38 negative_sampler.num_negs_per_pos 29.0 +199 38 training.batch_size 1.0 +199 39 model.embedding_dim 0.0 +199 39 optimizer.lr 0.05168531307693642 +199 39 negative_sampler.num_negs_per_pos 9.0 +199 39 training.batch_size 2.0 +199 40 model.embedding_dim 0.0 +199 40 optimizer.lr 0.02887488447006113 +199 40 negative_sampler.num_negs_per_pos 83.0 +199 40 training.batch_size 1.0 +199 41 model.embedding_dim 0.0 +199 41 optimizer.lr 0.004793540306282372 +199 41 negative_sampler.num_negs_per_pos 79.0 +199 41 training.batch_size 1.0 +199 42 model.embedding_dim 2.0 +199 42 optimizer.lr 0.0043761023414725765 +199 42 negative_sampler.num_negs_per_pos 65.0 +199 42 training.batch_size 0.0 +199 43 model.embedding_dim 2.0 +199 43 optimizer.lr 0.0665263084001819 +199 43 negative_sampler.num_negs_per_pos 23.0 +199 43 training.batch_size 0.0 +199 44 model.embedding_dim 0.0 +199 44 optimizer.lr 0.0029013454740193516 +199 44 negative_sampler.num_negs_per_pos 61.0 +199 44 training.batch_size 2.0 +199 45 model.embedding_dim 1.0 +199 45 optimizer.lr 0.0029662477914305318 +199 45 negative_sampler.num_negs_per_pos 56.0 +199 45 training.batch_size 1.0 +199 46 model.embedding_dim 2.0 +199 46 optimizer.lr 0.03855512338861159 +199 46 negative_sampler.num_negs_per_pos 58.0 +199 46 training.batch_size 2.0 +199 47 model.embedding_dim 0.0 +199 47 optimizer.lr 0.0506099889329217 +199 47 negative_sampler.num_negs_per_pos 67.0 +199 47 training.batch_size 1.0 +199 48 model.embedding_dim 1.0 +199 48 optimizer.lr 0.08807731134981793 +199 48 negative_sampler.num_negs_per_pos 30.0 +199 48 training.batch_size 1.0 +199 49 model.embedding_dim 2.0 +199 49 optimizer.lr 0.01891363522836598 +199 49 negative_sampler.num_negs_per_pos 32.0 +199 49 training.batch_size 0.0 +199 50 model.embedding_dim 1.0 +199 50 optimizer.lr 0.007246445729993515 +199 50 negative_sampler.num_negs_per_pos 8.0 +199 50 training.batch_size 2.0 +199 51 model.embedding_dim 0.0 +199 51 optimizer.lr 0.01856446341341534 +199 51 negative_sampler.num_negs_per_pos 64.0 +199 51 training.batch_size 0.0 +199 52 model.embedding_dim 2.0 +199 52 optimizer.lr 0.009970355306648866 +199 52 negative_sampler.num_negs_per_pos 25.0 +199 52 training.batch_size 0.0 +199 53 model.embedding_dim 2.0 +199 53 optimizer.lr 0.008540152175745459 +199 53 negative_sampler.num_negs_per_pos 6.0 +199 53 training.batch_size 0.0 +199 54 model.embedding_dim 2.0 +199 54 optimizer.lr 0.0016964937192754572 +199 54 negative_sampler.num_negs_per_pos 60.0 +199 54 training.batch_size 1.0 +199 55 model.embedding_dim 1.0 +199 55 optimizer.lr 0.027716308776524173 +199 55 negative_sampler.num_negs_per_pos 11.0 +199 55 training.batch_size 0.0 +199 56 model.embedding_dim 2.0 +199 56 optimizer.lr 0.001608650325292351 +199 56 negative_sampler.num_negs_per_pos 99.0 +199 56 training.batch_size 1.0 +199 57 model.embedding_dim 1.0 +199 57 optimizer.lr 0.00695227317892388 +199 57 negative_sampler.num_negs_per_pos 46.0 +199 57 training.batch_size 1.0 +199 58 model.embedding_dim 0.0 +199 58 optimizer.lr 0.042291653309100764 +199 58 negative_sampler.num_negs_per_pos 30.0 +199 58 training.batch_size 2.0 +199 59 model.embedding_dim 2.0 +199 59 optimizer.lr 0.0592734856510819 +199 59 negative_sampler.num_negs_per_pos 50.0 +199 59 training.batch_size 2.0 +199 60 model.embedding_dim 2.0 +199 60 optimizer.lr 0.02013738537784485 +199 60 negative_sampler.num_negs_per_pos 42.0 +199 60 training.batch_size 0.0 +199 61 model.embedding_dim 0.0 +199 61 optimizer.lr 0.04862892066089571 +199 61 negative_sampler.num_negs_per_pos 13.0 +199 61 training.batch_size 0.0 +199 62 model.embedding_dim 2.0 +199 62 optimizer.lr 0.09820644793849236 +199 62 negative_sampler.num_negs_per_pos 41.0 +199 62 training.batch_size 1.0 +199 63 model.embedding_dim 0.0 +199 63 optimizer.lr 0.06962684623596492 +199 63 negative_sampler.num_negs_per_pos 71.0 +199 63 training.batch_size 0.0 +199 64 model.embedding_dim 1.0 +199 64 optimizer.lr 0.09855735078779375 +199 64 negative_sampler.num_negs_per_pos 23.0 +199 64 training.batch_size 0.0 +199 65 model.embedding_dim 2.0 +199 65 optimizer.lr 0.007363839837699524 +199 65 negative_sampler.num_negs_per_pos 48.0 +199 65 training.batch_size 1.0 +199 66 model.embedding_dim 1.0 +199 66 optimizer.lr 0.0015378139039245024 +199 66 negative_sampler.num_negs_per_pos 1.0 +199 66 training.batch_size 0.0 +199 67 model.embedding_dim 1.0 +199 67 optimizer.lr 0.04756381340840383 +199 67 negative_sampler.num_negs_per_pos 17.0 +199 67 training.batch_size 1.0 +199 68 model.embedding_dim 1.0 +199 68 optimizer.lr 0.030183010195946363 +199 68 negative_sampler.num_negs_per_pos 60.0 +199 68 training.batch_size 1.0 +199 69 model.embedding_dim 1.0 +199 69 optimizer.lr 0.01761418927328003 +199 69 negative_sampler.num_negs_per_pos 94.0 +199 69 training.batch_size 0.0 +199 70 model.embedding_dim 1.0 +199 70 optimizer.lr 0.006864350372892993 +199 70 negative_sampler.num_negs_per_pos 54.0 +199 70 training.batch_size 0.0 +199 71 model.embedding_dim 0.0 +199 71 optimizer.lr 0.001971891576867126 +199 71 negative_sampler.num_negs_per_pos 70.0 +199 71 training.batch_size 2.0 +199 72 model.embedding_dim 2.0 +199 72 optimizer.lr 0.002792520380075772 +199 72 negative_sampler.num_negs_per_pos 72.0 +199 72 training.batch_size 1.0 +199 73 model.embedding_dim 0.0 +199 73 optimizer.lr 0.0035264911257119093 +199 73 negative_sampler.num_negs_per_pos 94.0 +199 73 training.batch_size 0.0 +199 74 model.embedding_dim 0.0 +199 74 optimizer.lr 0.00516629597119442 +199 74 negative_sampler.num_negs_per_pos 36.0 +199 74 training.batch_size 0.0 +199 75 model.embedding_dim 0.0 +199 75 optimizer.lr 0.0023813562835663777 +199 75 negative_sampler.num_negs_per_pos 43.0 +199 75 training.batch_size 2.0 +199 76 model.embedding_dim 2.0 +199 76 optimizer.lr 0.0326189073447191 +199 76 negative_sampler.num_negs_per_pos 36.0 +199 76 training.batch_size 2.0 +199 77 model.embedding_dim 0.0 +199 77 optimizer.lr 0.07574306614210449 +199 77 negative_sampler.num_negs_per_pos 16.0 +199 77 training.batch_size 2.0 +199 78 model.embedding_dim 2.0 +199 78 optimizer.lr 0.0174838025575839 +199 78 negative_sampler.num_negs_per_pos 98.0 +199 78 training.batch_size 0.0 +199 79 model.embedding_dim 1.0 +199 79 optimizer.lr 0.033256600528551644 +199 79 negative_sampler.num_negs_per_pos 31.0 +199 79 training.batch_size 0.0 +199 80 model.embedding_dim 2.0 +199 80 optimizer.lr 0.002849198634750511 +199 80 negative_sampler.num_negs_per_pos 98.0 +199 80 training.batch_size 2.0 +199 81 model.embedding_dim 0.0 +199 81 optimizer.lr 0.054786277384816104 +199 81 negative_sampler.num_negs_per_pos 73.0 +199 81 training.batch_size 2.0 +199 82 model.embedding_dim 1.0 +199 82 optimizer.lr 0.056051033805666235 +199 82 negative_sampler.num_negs_per_pos 53.0 +199 82 training.batch_size 1.0 +199 83 model.embedding_dim 1.0 +199 83 optimizer.lr 0.0032679292106706937 +199 83 negative_sampler.num_negs_per_pos 58.0 +199 83 training.batch_size 1.0 +199 84 model.embedding_dim 0.0 +199 84 optimizer.lr 0.005207828153956273 +199 84 negative_sampler.num_negs_per_pos 82.0 +199 84 training.batch_size 1.0 +199 85 model.embedding_dim 2.0 +199 85 optimizer.lr 0.005036268736967629 +199 85 negative_sampler.num_negs_per_pos 39.0 +199 85 training.batch_size 2.0 +199 86 model.embedding_dim 2.0 +199 86 optimizer.lr 0.06532240674228595 +199 86 negative_sampler.num_negs_per_pos 67.0 +199 86 training.batch_size 0.0 +199 87 model.embedding_dim 2.0 +199 87 optimizer.lr 0.001952604974235306 +199 87 negative_sampler.num_negs_per_pos 90.0 +199 87 training.batch_size 2.0 +199 88 model.embedding_dim 2.0 +199 88 optimizer.lr 0.022716315938136937 +199 88 negative_sampler.num_negs_per_pos 47.0 +199 88 training.batch_size 0.0 +199 89 model.embedding_dim 2.0 +199 89 optimizer.lr 0.0038134221757926507 +199 89 negative_sampler.num_negs_per_pos 11.0 +199 89 training.batch_size 2.0 +199 90 model.embedding_dim 2.0 +199 90 optimizer.lr 0.017165433477320573 +199 90 negative_sampler.num_negs_per_pos 0.0 +199 90 training.batch_size 2.0 +199 91 model.embedding_dim 2.0 +199 91 optimizer.lr 0.010431160994318449 +199 91 negative_sampler.num_negs_per_pos 26.0 +199 91 training.batch_size 2.0 +199 92 model.embedding_dim 0.0 +199 92 optimizer.lr 0.04322035382204317 +199 92 negative_sampler.num_negs_per_pos 18.0 +199 92 training.batch_size 1.0 +199 93 model.embedding_dim 1.0 +199 93 optimizer.lr 0.004377602014897975 +199 93 negative_sampler.num_negs_per_pos 99.0 +199 93 training.batch_size 1.0 +199 94 model.embedding_dim 0.0 +199 94 optimizer.lr 0.029044557268726824 +199 94 negative_sampler.num_negs_per_pos 38.0 +199 94 training.batch_size 1.0 +199 95 model.embedding_dim 2.0 +199 95 optimizer.lr 0.0025697700091587786 +199 95 negative_sampler.num_negs_per_pos 66.0 +199 95 training.batch_size 2.0 +199 96 model.embedding_dim 0.0 +199 96 optimizer.lr 0.0021370819333531204 +199 96 negative_sampler.num_negs_per_pos 99.0 +199 96 training.batch_size 1.0 +199 97 model.embedding_dim 1.0 +199 97 optimizer.lr 0.0294594417885692 +199 97 negative_sampler.num_negs_per_pos 92.0 +199 97 training.batch_size 2.0 +199 98 model.embedding_dim 0.0 +199 98 optimizer.lr 0.06066069320723583 +199 98 negative_sampler.num_negs_per_pos 84.0 +199 98 training.batch_size 1.0 +199 99 model.embedding_dim 2.0 +199 99 optimizer.lr 0.06580595467967629 +199 99 negative_sampler.num_negs_per_pos 92.0 +199 99 training.batch_size 1.0 +199 100 model.embedding_dim 2.0 +199 100 optimizer.lr 0.005920802898210609 +199 100 negative_sampler.num_negs_per_pos 47.0 +199 100 training.batch_size 0.0 +199 1 dataset """wn18rr""" +199 1 model """distmult""" +199 1 loss """softplus""" +199 1 regularizer """no""" +199 1 optimizer """adam""" +199 1 training_loop """owa""" +199 1 negative_sampler """basic""" +199 1 evaluator """rankbased""" +199 2 dataset """wn18rr""" +199 2 model """distmult""" +199 2 loss """softplus""" +199 2 regularizer """no""" +199 2 optimizer """adam""" +199 2 training_loop """owa""" +199 2 negative_sampler """basic""" +199 2 evaluator """rankbased""" +199 3 dataset """wn18rr""" +199 3 model """distmult""" +199 3 loss """softplus""" +199 3 regularizer """no""" +199 3 optimizer """adam""" +199 3 training_loop """owa""" +199 3 negative_sampler """basic""" +199 3 evaluator """rankbased""" +199 4 dataset """wn18rr""" +199 4 model """distmult""" +199 4 loss """softplus""" +199 4 regularizer """no""" +199 4 optimizer """adam""" +199 4 training_loop """owa""" +199 4 negative_sampler """basic""" +199 4 evaluator """rankbased""" +199 5 dataset """wn18rr""" +199 5 model """distmult""" +199 5 loss """softplus""" +199 5 regularizer """no""" +199 5 optimizer """adam""" +199 5 training_loop """owa""" +199 5 negative_sampler """basic""" +199 5 evaluator """rankbased""" +199 6 dataset """wn18rr""" +199 6 model """distmult""" +199 6 loss """softplus""" +199 6 regularizer """no""" +199 6 optimizer """adam""" +199 6 training_loop """owa""" +199 6 negative_sampler """basic""" +199 6 evaluator """rankbased""" +199 7 dataset """wn18rr""" +199 7 model """distmult""" +199 7 loss """softplus""" +199 7 regularizer """no""" +199 7 optimizer """adam""" +199 7 training_loop """owa""" +199 7 negative_sampler """basic""" +199 7 evaluator """rankbased""" +199 8 dataset """wn18rr""" +199 8 model """distmult""" +199 8 loss """softplus""" +199 8 regularizer """no""" +199 8 optimizer """adam""" +199 8 training_loop """owa""" +199 8 negative_sampler """basic""" +199 8 evaluator """rankbased""" +199 9 dataset """wn18rr""" +199 9 model """distmult""" +199 9 loss """softplus""" +199 9 regularizer """no""" +199 9 optimizer """adam""" +199 9 training_loop """owa""" +199 9 negative_sampler """basic""" +199 9 evaluator """rankbased""" +199 10 dataset """wn18rr""" +199 10 model """distmult""" +199 10 loss """softplus""" +199 10 regularizer """no""" +199 10 optimizer """adam""" +199 10 training_loop """owa""" +199 10 negative_sampler """basic""" +199 10 evaluator """rankbased""" +199 11 dataset """wn18rr""" +199 11 model """distmult""" +199 11 loss """softplus""" +199 11 regularizer """no""" +199 11 optimizer """adam""" +199 11 training_loop """owa""" +199 11 negative_sampler """basic""" +199 11 evaluator """rankbased""" +199 12 dataset """wn18rr""" +199 12 model """distmult""" +199 12 loss """softplus""" +199 12 regularizer """no""" +199 12 optimizer """adam""" +199 12 training_loop """owa""" +199 12 negative_sampler """basic""" +199 12 evaluator """rankbased""" +199 13 dataset """wn18rr""" +199 13 model """distmult""" +199 13 loss """softplus""" +199 13 regularizer """no""" +199 13 optimizer """adam""" +199 13 training_loop """owa""" +199 13 negative_sampler """basic""" +199 13 evaluator """rankbased""" +199 14 dataset """wn18rr""" +199 14 model """distmult""" +199 14 loss """softplus""" +199 14 regularizer """no""" +199 14 optimizer """adam""" +199 14 training_loop """owa""" +199 14 negative_sampler """basic""" +199 14 evaluator """rankbased""" +199 15 dataset """wn18rr""" +199 15 model """distmult""" +199 15 loss """softplus""" +199 15 regularizer """no""" +199 15 optimizer """adam""" +199 15 training_loop """owa""" +199 15 negative_sampler """basic""" +199 15 evaluator """rankbased""" +199 16 dataset """wn18rr""" +199 16 model """distmult""" +199 16 loss """softplus""" +199 16 regularizer """no""" +199 16 optimizer """adam""" +199 16 training_loop """owa""" +199 16 negative_sampler """basic""" +199 16 evaluator """rankbased""" +199 17 dataset """wn18rr""" +199 17 model """distmult""" +199 17 loss """softplus""" +199 17 regularizer """no""" +199 17 optimizer """adam""" +199 17 training_loop """owa""" +199 17 negative_sampler """basic""" +199 17 evaluator """rankbased""" +199 18 dataset """wn18rr""" +199 18 model """distmult""" +199 18 loss """softplus""" +199 18 regularizer """no""" +199 18 optimizer """adam""" +199 18 training_loop """owa""" +199 18 negative_sampler """basic""" +199 18 evaluator """rankbased""" +199 19 dataset """wn18rr""" +199 19 model """distmult""" +199 19 loss """softplus""" +199 19 regularizer """no""" +199 19 optimizer """adam""" +199 19 training_loop """owa""" +199 19 negative_sampler """basic""" +199 19 evaluator """rankbased""" +199 20 dataset """wn18rr""" +199 20 model """distmult""" +199 20 loss """softplus""" +199 20 regularizer """no""" +199 20 optimizer """adam""" +199 20 training_loop """owa""" +199 20 negative_sampler """basic""" +199 20 evaluator """rankbased""" +199 21 dataset """wn18rr""" +199 21 model """distmult""" +199 21 loss """softplus""" +199 21 regularizer """no""" +199 21 optimizer """adam""" +199 21 training_loop """owa""" +199 21 negative_sampler """basic""" +199 21 evaluator """rankbased""" +199 22 dataset """wn18rr""" +199 22 model """distmult""" +199 22 loss """softplus""" +199 22 regularizer """no""" +199 22 optimizer """adam""" +199 22 training_loop """owa""" +199 22 negative_sampler """basic""" +199 22 evaluator """rankbased""" +199 23 dataset """wn18rr""" +199 23 model """distmult""" +199 23 loss """softplus""" +199 23 regularizer """no""" +199 23 optimizer """adam""" +199 23 training_loop """owa""" +199 23 negative_sampler """basic""" +199 23 evaluator """rankbased""" +199 24 dataset """wn18rr""" +199 24 model """distmult""" +199 24 loss """softplus""" +199 24 regularizer """no""" +199 24 optimizer """adam""" +199 24 training_loop """owa""" +199 24 negative_sampler """basic""" +199 24 evaluator """rankbased""" +199 25 dataset """wn18rr""" +199 25 model """distmult""" +199 25 loss """softplus""" +199 25 regularizer """no""" +199 25 optimizer """adam""" +199 25 training_loop """owa""" +199 25 negative_sampler """basic""" +199 25 evaluator """rankbased""" +199 26 dataset """wn18rr""" +199 26 model """distmult""" +199 26 loss """softplus""" +199 26 regularizer """no""" +199 26 optimizer """adam""" +199 26 training_loop """owa""" +199 26 negative_sampler """basic""" +199 26 evaluator """rankbased""" +199 27 dataset """wn18rr""" +199 27 model """distmult""" +199 27 loss """softplus""" +199 27 regularizer """no""" +199 27 optimizer """adam""" +199 27 training_loop """owa""" +199 27 negative_sampler """basic""" +199 27 evaluator """rankbased""" +199 28 dataset """wn18rr""" +199 28 model """distmult""" +199 28 loss """softplus""" +199 28 regularizer """no""" +199 28 optimizer """adam""" +199 28 training_loop """owa""" +199 28 negative_sampler """basic""" +199 28 evaluator """rankbased""" +199 29 dataset """wn18rr""" +199 29 model """distmult""" +199 29 loss """softplus""" +199 29 regularizer """no""" +199 29 optimizer """adam""" +199 29 training_loop """owa""" +199 29 negative_sampler """basic""" +199 29 evaluator """rankbased""" +199 30 dataset """wn18rr""" +199 30 model """distmult""" +199 30 loss """softplus""" +199 30 regularizer """no""" +199 30 optimizer """adam""" +199 30 training_loop """owa""" +199 30 negative_sampler """basic""" +199 30 evaluator """rankbased""" +199 31 dataset """wn18rr""" +199 31 model """distmult""" +199 31 loss """softplus""" +199 31 regularizer """no""" +199 31 optimizer """adam""" +199 31 training_loop """owa""" +199 31 negative_sampler """basic""" +199 31 evaluator """rankbased""" +199 32 dataset """wn18rr""" +199 32 model """distmult""" +199 32 loss """softplus""" +199 32 regularizer """no""" +199 32 optimizer """adam""" +199 32 training_loop """owa""" +199 32 negative_sampler """basic""" +199 32 evaluator """rankbased""" +199 33 dataset """wn18rr""" +199 33 model """distmult""" +199 33 loss """softplus""" +199 33 regularizer """no""" +199 33 optimizer """adam""" +199 33 training_loop """owa""" +199 33 negative_sampler """basic""" +199 33 evaluator """rankbased""" +199 34 dataset """wn18rr""" +199 34 model """distmult""" +199 34 loss """softplus""" +199 34 regularizer """no""" +199 34 optimizer """adam""" +199 34 training_loop """owa""" +199 34 negative_sampler """basic""" +199 34 evaluator """rankbased""" +199 35 dataset """wn18rr""" +199 35 model """distmult""" +199 35 loss """softplus""" +199 35 regularizer """no""" +199 35 optimizer """adam""" +199 35 training_loop """owa""" +199 35 negative_sampler """basic""" +199 35 evaluator """rankbased""" +199 36 dataset """wn18rr""" +199 36 model """distmult""" +199 36 loss """softplus""" +199 36 regularizer """no""" +199 36 optimizer """adam""" +199 36 training_loop """owa""" +199 36 negative_sampler """basic""" +199 36 evaluator """rankbased""" +199 37 dataset """wn18rr""" +199 37 model """distmult""" +199 37 loss """softplus""" +199 37 regularizer """no""" +199 37 optimizer """adam""" +199 37 training_loop """owa""" +199 37 negative_sampler """basic""" +199 37 evaluator """rankbased""" +199 38 dataset """wn18rr""" +199 38 model """distmult""" +199 38 loss """softplus""" +199 38 regularizer """no""" +199 38 optimizer """adam""" +199 38 training_loop """owa""" +199 38 negative_sampler """basic""" +199 38 evaluator """rankbased""" +199 39 dataset """wn18rr""" +199 39 model """distmult""" +199 39 loss """softplus""" +199 39 regularizer """no""" +199 39 optimizer """adam""" +199 39 training_loop """owa""" +199 39 negative_sampler """basic""" +199 39 evaluator """rankbased""" +199 40 dataset """wn18rr""" +199 40 model """distmult""" +199 40 loss """softplus""" +199 40 regularizer """no""" +199 40 optimizer """adam""" +199 40 training_loop """owa""" +199 40 negative_sampler """basic""" +199 40 evaluator """rankbased""" +199 41 dataset """wn18rr""" +199 41 model """distmult""" +199 41 loss """softplus""" +199 41 regularizer """no""" +199 41 optimizer """adam""" +199 41 training_loop """owa""" +199 41 negative_sampler """basic""" +199 41 evaluator """rankbased""" +199 42 dataset """wn18rr""" +199 42 model """distmult""" +199 42 loss """softplus""" +199 42 regularizer """no""" +199 42 optimizer """adam""" +199 42 training_loop """owa""" +199 42 negative_sampler """basic""" +199 42 evaluator """rankbased""" +199 43 dataset """wn18rr""" +199 43 model """distmult""" +199 43 loss """softplus""" +199 43 regularizer """no""" +199 43 optimizer """adam""" +199 43 training_loop """owa""" +199 43 negative_sampler """basic""" +199 43 evaluator """rankbased""" +199 44 dataset """wn18rr""" +199 44 model """distmult""" +199 44 loss """softplus""" +199 44 regularizer """no""" +199 44 optimizer """adam""" +199 44 training_loop """owa""" +199 44 negative_sampler """basic""" +199 44 evaluator """rankbased""" +199 45 dataset """wn18rr""" +199 45 model """distmult""" +199 45 loss """softplus""" +199 45 regularizer """no""" +199 45 optimizer """adam""" +199 45 training_loop """owa""" +199 45 negative_sampler """basic""" +199 45 evaluator """rankbased""" +199 46 dataset """wn18rr""" +199 46 model """distmult""" +199 46 loss """softplus""" +199 46 regularizer """no""" +199 46 optimizer """adam""" +199 46 training_loop """owa""" +199 46 negative_sampler """basic""" +199 46 evaluator """rankbased""" +199 47 dataset """wn18rr""" +199 47 model """distmult""" +199 47 loss """softplus""" +199 47 regularizer """no""" +199 47 optimizer """adam""" +199 47 training_loop """owa""" +199 47 negative_sampler """basic""" +199 47 evaluator """rankbased""" +199 48 dataset """wn18rr""" +199 48 model """distmult""" +199 48 loss """softplus""" +199 48 regularizer """no""" +199 48 optimizer """adam""" +199 48 training_loop """owa""" +199 48 negative_sampler """basic""" +199 48 evaluator """rankbased""" +199 49 dataset """wn18rr""" +199 49 model """distmult""" +199 49 loss """softplus""" +199 49 regularizer """no""" +199 49 optimizer """adam""" +199 49 training_loop """owa""" +199 49 negative_sampler """basic""" +199 49 evaluator """rankbased""" +199 50 dataset """wn18rr""" +199 50 model """distmult""" +199 50 loss """softplus""" +199 50 regularizer """no""" +199 50 optimizer """adam""" +199 50 training_loop """owa""" +199 50 negative_sampler """basic""" +199 50 evaluator """rankbased""" +199 51 dataset """wn18rr""" +199 51 model """distmult""" +199 51 loss """softplus""" +199 51 regularizer """no""" +199 51 optimizer """adam""" +199 51 training_loop """owa""" +199 51 negative_sampler """basic""" +199 51 evaluator """rankbased""" +199 52 dataset """wn18rr""" +199 52 model """distmult""" +199 52 loss """softplus""" +199 52 regularizer """no""" +199 52 optimizer """adam""" +199 52 training_loop """owa""" +199 52 negative_sampler """basic""" +199 52 evaluator """rankbased""" +199 53 dataset """wn18rr""" +199 53 model """distmult""" +199 53 loss """softplus""" +199 53 regularizer """no""" +199 53 optimizer """adam""" +199 53 training_loop """owa""" +199 53 negative_sampler """basic""" +199 53 evaluator """rankbased""" +199 54 dataset """wn18rr""" +199 54 model """distmult""" +199 54 loss """softplus""" +199 54 regularizer """no""" +199 54 optimizer """adam""" +199 54 training_loop """owa""" +199 54 negative_sampler """basic""" +199 54 evaluator """rankbased""" +199 55 dataset """wn18rr""" +199 55 model """distmult""" +199 55 loss """softplus""" +199 55 regularizer """no""" +199 55 optimizer """adam""" +199 55 training_loop """owa""" +199 55 negative_sampler """basic""" +199 55 evaluator """rankbased""" +199 56 dataset """wn18rr""" +199 56 model """distmult""" +199 56 loss """softplus""" +199 56 regularizer """no""" +199 56 optimizer """adam""" +199 56 training_loop """owa""" +199 56 negative_sampler """basic""" +199 56 evaluator """rankbased""" +199 57 dataset """wn18rr""" +199 57 model """distmult""" +199 57 loss """softplus""" +199 57 regularizer """no""" +199 57 optimizer """adam""" +199 57 training_loop """owa""" +199 57 negative_sampler """basic""" +199 57 evaluator """rankbased""" +199 58 dataset """wn18rr""" +199 58 model """distmult""" +199 58 loss """softplus""" +199 58 regularizer """no""" +199 58 optimizer """adam""" +199 58 training_loop """owa""" +199 58 negative_sampler """basic""" +199 58 evaluator """rankbased""" +199 59 dataset """wn18rr""" +199 59 model """distmult""" +199 59 loss """softplus""" +199 59 regularizer """no""" +199 59 optimizer """adam""" +199 59 training_loop """owa""" +199 59 negative_sampler """basic""" +199 59 evaluator """rankbased""" +199 60 dataset """wn18rr""" +199 60 model """distmult""" +199 60 loss """softplus""" +199 60 regularizer """no""" +199 60 optimizer """adam""" +199 60 training_loop """owa""" +199 60 negative_sampler """basic""" +199 60 evaluator """rankbased""" +199 61 dataset """wn18rr""" +199 61 model """distmult""" +199 61 loss """softplus""" +199 61 regularizer """no""" +199 61 optimizer """adam""" +199 61 training_loop """owa""" +199 61 negative_sampler """basic""" +199 61 evaluator """rankbased""" +199 62 dataset """wn18rr""" +199 62 model """distmult""" +199 62 loss """softplus""" +199 62 regularizer """no""" +199 62 optimizer """adam""" +199 62 training_loop """owa""" +199 62 negative_sampler """basic""" +199 62 evaluator """rankbased""" +199 63 dataset """wn18rr""" +199 63 model """distmult""" +199 63 loss """softplus""" +199 63 regularizer """no""" +199 63 optimizer """adam""" +199 63 training_loop """owa""" +199 63 negative_sampler """basic""" +199 63 evaluator """rankbased""" +199 64 dataset """wn18rr""" +199 64 model """distmult""" +199 64 loss """softplus""" +199 64 regularizer """no""" +199 64 optimizer """adam""" +199 64 training_loop """owa""" +199 64 negative_sampler """basic""" +199 64 evaluator """rankbased""" +199 65 dataset """wn18rr""" +199 65 model """distmult""" +199 65 loss """softplus""" +199 65 regularizer """no""" +199 65 optimizer """adam""" +199 65 training_loop """owa""" +199 65 negative_sampler """basic""" +199 65 evaluator """rankbased""" +199 66 dataset """wn18rr""" +199 66 model """distmult""" +199 66 loss """softplus""" +199 66 regularizer """no""" +199 66 optimizer """adam""" +199 66 training_loop """owa""" +199 66 negative_sampler """basic""" +199 66 evaluator """rankbased""" +199 67 dataset """wn18rr""" +199 67 model """distmult""" +199 67 loss """softplus""" +199 67 regularizer """no""" +199 67 optimizer """adam""" +199 67 training_loop """owa""" +199 67 negative_sampler """basic""" +199 67 evaluator """rankbased""" +199 68 dataset """wn18rr""" +199 68 model """distmult""" +199 68 loss """softplus""" +199 68 regularizer """no""" +199 68 optimizer """adam""" +199 68 training_loop """owa""" +199 68 negative_sampler """basic""" +199 68 evaluator """rankbased""" +199 69 dataset """wn18rr""" +199 69 model """distmult""" +199 69 loss """softplus""" +199 69 regularizer """no""" +199 69 optimizer """adam""" +199 69 training_loop """owa""" +199 69 negative_sampler """basic""" +199 69 evaluator """rankbased""" +199 70 dataset """wn18rr""" +199 70 model """distmult""" +199 70 loss """softplus""" +199 70 regularizer """no""" +199 70 optimizer """adam""" +199 70 training_loop """owa""" +199 70 negative_sampler """basic""" +199 70 evaluator """rankbased""" +199 71 dataset """wn18rr""" +199 71 model """distmult""" +199 71 loss """softplus""" +199 71 regularizer """no""" +199 71 optimizer """adam""" +199 71 training_loop """owa""" +199 71 negative_sampler """basic""" +199 71 evaluator """rankbased""" +199 72 dataset """wn18rr""" +199 72 model """distmult""" +199 72 loss """softplus""" +199 72 regularizer """no""" +199 72 optimizer """adam""" +199 72 training_loop """owa""" +199 72 negative_sampler """basic""" +199 72 evaluator """rankbased""" +199 73 dataset """wn18rr""" +199 73 model """distmult""" +199 73 loss """softplus""" +199 73 regularizer """no""" +199 73 optimizer """adam""" +199 73 training_loop """owa""" +199 73 negative_sampler """basic""" +199 73 evaluator """rankbased""" +199 74 dataset """wn18rr""" +199 74 model """distmult""" +199 74 loss """softplus""" +199 74 regularizer """no""" +199 74 optimizer """adam""" +199 74 training_loop """owa""" +199 74 negative_sampler """basic""" +199 74 evaluator """rankbased""" +199 75 dataset """wn18rr""" +199 75 model """distmult""" +199 75 loss """softplus""" +199 75 regularizer """no""" +199 75 optimizer """adam""" +199 75 training_loop """owa""" +199 75 negative_sampler """basic""" +199 75 evaluator """rankbased""" +199 76 dataset """wn18rr""" +199 76 model """distmult""" +199 76 loss """softplus""" +199 76 regularizer """no""" +199 76 optimizer """adam""" +199 76 training_loop """owa""" +199 76 negative_sampler """basic""" +199 76 evaluator """rankbased""" +199 77 dataset """wn18rr""" +199 77 model """distmult""" +199 77 loss """softplus""" +199 77 regularizer """no""" +199 77 optimizer """adam""" +199 77 training_loop """owa""" +199 77 negative_sampler """basic""" +199 77 evaluator """rankbased""" +199 78 dataset """wn18rr""" +199 78 model """distmult""" +199 78 loss """softplus""" +199 78 regularizer """no""" +199 78 optimizer """adam""" +199 78 training_loop """owa""" +199 78 negative_sampler """basic""" +199 78 evaluator """rankbased""" +199 79 dataset """wn18rr""" +199 79 model """distmult""" +199 79 loss """softplus""" +199 79 regularizer """no""" +199 79 optimizer """adam""" +199 79 training_loop """owa""" +199 79 negative_sampler """basic""" +199 79 evaluator """rankbased""" +199 80 dataset """wn18rr""" +199 80 model """distmult""" +199 80 loss """softplus""" +199 80 regularizer """no""" +199 80 optimizer """adam""" +199 80 training_loop """owa""" +199 80 negative_sampler """basic""" +199 80 evaluator """rankbased""" +199 81 dataset """wn18rr""" +199 81 model """distmult""" +199 81 loss """softplus""" +199 81 regularizer """no""" +199 81 optimizer """adam""" +199 81 training_loop """owa""" +199 81 negative_sampler """basic""" +199 81 evaluator """rankbased""" +199 82 dataset """wn18rr""" +199 82 model """distmult""" +199 82 loss """softplus""" +199 82 regularizer """no""" +199 82 optimizer """adam""" +199 82 training_loop """owa""" +199 82 negative_sampler """basic""" +199 82 evaluator """rankbased""" +199 83 dataset """wn18rr""" +199 83 model """distmult""" +199 83 loss """softplus""" +199 83 regularizer """no""" +199 83 optimizer """adam""" +199 83 training_loop """owa""" +199 83 negative_sampler """basic""" +199 83 evaluator """rankbased""" +199 84 dataset """wn18rr""" +199 84 model """distmult""" +199 84 loss """softplus""" +199 84 regularizer """no""" +199 84 optimizer """adam""" +199 84 training_loop """owa""" +199 84 negative_sampler """basic""" +199 84 evaluator """rankbased""" +199 85 dataset """wn18rr""" +199 85 model """distmult""" +199 85 loss """softplus""" +199 85 regularizer """no""" +199 85 optimizer """adam""" +199 85 training_loop """owa""" +199 85 negative_sampler """basic""" +199 85 evaluator """rankbased""" +199 86 dataset """wn18rr""" +199 86 model """distmult""" +199 86 loss """softplus""" +199 86 regularizer """no""" +199 86 optimizer """adam""" +199 86 training_loop """owa""" +199 86 negative_sampler """basic""" +199 86 evaluator """rankbased""" +199 87 dataset """wn18rr""" +199 87 model """distmult""" +199 87 loss """softplus""" +199 87 regularizer """no""" +199 87 optimizer """adam""" +199 87 training_loop """owa""" +199 87 negative_sampler """basic""" +199 87 evaluator """rankbased""" +199 88 dataset """wn18rr""" +199 88 model """distmult""" +199 88 loss """softplus""" +199 88 regularizer """no""" +199 88 optimizer """adam""" +199 88 training_loop """owa""" +199 88 negative_sampler """basic""" +199 88 evaluator """rankbased""" +199 89 dataset """wn18rr""" +199 89 model """distmult""" +199 89 loss """softplus""" +199 89 regularizer """no""" +199 89 optimizer """adam""" +199 89 training_loop """owa""" +199 89 negative_sampler """basic""" +199 89 evaluator """rankbased""" +199 90 dataset """wn18rr""" +199 90 model """distmult""" +199 90 loss """softplus""" +199 90 regularizer """no""" +199 90 optimizer """adam""" +199 90 training_loop """owa""" +199 90 negative_sampler """basic""" +199 90 evaluator """rankbased""" +199 91 dataset """wn18rr""" +199 91 model """distmult""" +199 91 loss """softplus""" +199 91 regularizer """no""" +199 91 optimizer """adam""" +199 91 training_loop """owa""" +199 91 negative_sampler """basic""" +199 91 evaluator """rankbased""" +199 92 dataset """wn18rr""" +199 92 model """distmult""" +199 92 loss """softplus""" +199 92 regularizer """no""" +199 92 optimizer """adam""" +199 92 training_loop """owa""" +199 92 negative_sampler """basic""" +199 92 evaluator """rankbased""" +199 93 dataset """wn18rr""" +199 93 model """distmult""" +199 93 loss """softplus""" +199 93 regularizer """no""" +199 93 optimizer """adam""" +199 93 training_loop """owa""" +199 93 negative_sampler """basic""" +199 93 evaluator """rankbased""" +199 94 dataset """wn18rr""" +199 94 model """distmult""" +199 94 loss """softplus""" +199 94 regularizer """no""" +199 94 optimizer """adam""" +199 94 training_loop """owa""" +199 94 negative_sampler """basic""" +199 94 evaluator """rankbased""" +199 95 dataset """wn18rr""" +199 95 model """distmult""" +199 95 loss """softplus""" +199 95 regularizer """no""" +199 95 optimizer """adam""" +199 95 training_loop """owa""" +199 95 negative_sampler """basic""" +199 95 evaluator """rankbased""" +199 96 dataset """wn18rr""" +199 96 model """distmult""" +199 96 loss """softplus""" +199 96 regularizer """no""" +199 96 optimizer """adam""" +199 96 training_loop """owa""" +199 96 negative_sampler """basic""" +199 96 evaluator """rankbased""" +199 97 dataset """wn18rr""" +199 97 model """distmult""" +199 97 loss """softplus""" +199 97 regularizer """no""" +199 97 optimizer """adam""" +199 97 training_loop """owa""" +199 97 negative_sampler """basic""" +199 97 evaluator """rankbased""" +199 98 dataset """wn18rr""" +199 98 model """distmult""" +199 98 loss """softplus""" +199 98 regularizer """no""" +199 98 optimizer """adam""" +199 98 training_loop """owa""" +199 98 negative_sampler """basic""" +199 98 evaluator """rankbased""" +199 99 dataset """wn18rr""" +199 99 model """distmult""" +199 99 loss """softplus""" +199 99 regularizer """no""" +199 99 optimizer """adam""" +199 99 training_loop """owa""" +199 99 negative_sampler """basic""" +199 99 evaluator """rankbased""" +199 100 dataset """wn18rr""" +199 100 model """distmult""" +199 100 loss """softplus""" +199 100 regularizer """no""" +199 100 optimizer """adam""" +199 100 training_loop """owa""" +199 100 negative_sampler """basic""" +199 100 evaluator """rankbased""" +200 1 model.embedding_dim 2.0 +200 1 optimizer.lr 0.050909822490892244 +200 1 training.batch_size 1.0 +200 1 training.label_smoothing 0.020346638991777424 +200 2 model.embedding_dim 2.0 +200 2 optimizer.lr 0.006949267909234321 +200 2 training.batch_size 0.0 +200 2 training.label_smoothing 0.01301482017882324 +200 3 model.embedding_dim 2.0 +200 3 optimizer.lr 0.012662318334662973 +200 3 training.batch_size 0.0 +200 3 training.label_smoothing 0.062195321996027564 +200 4 model.embedding_dim 0.0 +200 4 optimizer.lr 0.021515506289119846 +200 4 training.batch_size 0.0 +200 4 training.label_smoothing 0.4930046451393812 +200 5 model.embedding_dim 2.0 +200 5 optimizer.lr 0.012952620986111251 +200 5 training.batch_size 0.0 +200 5 training.label_smoothing 0.02038778654192068 +200 6 model.embedding_dim 2.0 +200 6 optimizer.lr 0.002112405925475073 +200 6 training.batch_size 2.0 +200 6 training.label_smoothing 0.025171086967299326 +200 7 model.embedding_dim 0.0 +200 7 optimizer.lr 0.0015999495501271559 +200 7 training.batch_size 1.0 +200 7 training.label_smoothing 0.058678804682021656 +200 8 model.embedding_dim 1.0 +200 8 optimizer.lr 0.0059676659054541704 +200 8 training.batch_size 0.0 +200 8 training.label_smoothing 0.023422411931668274 +200 9 model.embedding_dim 2.0 +200 9 optimizer.lr 0.001004102200999067 +200 9 training.batch_size 1.0 +200 9 training.label_smoothing 0.016915715702172655 +200 10 model.embedding_dim 1.0 +200 10 optimizer.lr 0.021573102609198046 +200 10 training.batch_size 1.0 +200 10 training.label_smoothing 0.00294390110013064 +200 11 model.embedding_dim 2.0 +200 11 optimizer.lr 0.006499913294885338 +200 11 training.batch_size 1.0 +200 11 training.label_smoothing 0.0162549337842421 +200 1 dataset """wn18rr""" +200 1 model """distmult""" +200 1 loss """crossentropy""" +200 1 regularizer """no""" +200 1 optimizer """adam""" +200 1 training_loop """lcwa""" +200 1 evaluator """rankbased""" +200 2 dataset """wn18rr""" +200 2 model """distmult""" +200 2 loss """crossentropy""" +200 2 regularizer """no""" +200 2 optimizer """adam""" +200 2 training_loop """lcwa""" +200 2 evaluator """rankbased""" +200 3 dataset """wn18rr""" +200 3 model """distmult""" +200 3 loss """crossentropy""" +200 3 regularizer """no""" +200 3 optimizer """adam""" +200 3 training_loop """lcwa""" +200 3 evaluator """rankbased""" +200 4 dataset """wn18rr""" +200 4 model """distmult""" +200 4 loss """crossentropy""" +200 4 regularizer """no""" +200 4 optimizer """adam""" +200 4 training_loop """lcwa""" +200 4 evaluator """rankbased""" +200 5 dataset """wn18rr""" +200 5 model """distmult""" +200 5 loss """crossentropy""" +200 5 regularizer """no""" +200 5 optimizer """adam""" +200 5 training_loop """lcwa""" +200 5 evaluator """rankbased""" +200 6 dataset """wn18rr""" +200 6 model """distmult""" +200 6 loss """crossentropy""" +200 6 regularizer """no""" +200 6 optimizer """adam""" +200 6 training_loop """lcwa""" +200 6 evaluator """rankbased""" +200 7 dataset """wn18rr""" +200 7 model """distmult""" +200 7 loss """crossentropy""" +200 7 regularizer """no""" +200 7 optimizer """adam""" +200 7 training_loop """lcwa""" +200 7 evaluator """rankbased""" +200 8 dataset """wn18rr""" +200 8 model """distmult""" +200 8 loss """crossentropy""" +200 8 regularizer """no""" +200 8 optimizer """adam""" +200 8 training_loop """lcwa""" +200 8 evaluator """rankbased""" +200 9 dataset """wn18rr""" +200 9 model """distmult""" +200 9 loss """crossentropy""" +200 9 regularizer """no""" +200 9 optimizer """adam""" +200 9 training_loop """lcwa""" +200 9 evaluator """rankbased""" +200 10 dataset """wn18rr""" +200 10 model """distmult""" +200 10 loss """crossentropy""" +200 10 regularizer """no""" +200 10 optimizer """adam""" +200 10 training_loop """lcwa""" +200 10 evaluator """rankbased""" +200 11 dataset """wn18rr""" +200 11 model """distmult""" +200 11 loss """crossentropy""" +200 11 regularizer """no""" +200 11 optimizer """adam""" +200 11 training_loop """lcwa""" +200 11 evaluator """rankbased""" +201 1 model.embedding_dim 0.0 +201 1 optimizer.lr 0.09490867076419314 +201 1 training.batch_size 1.0 +201 1 training.label_smoothing 0.0335976922023723 +201 2 model.embedding_dim 1.0 +201 2 optimizer.lr 0.009761516356441824 +201 2 training.batch_size 2.0 +201 2 training.label_smoothing 0.029003702732405618 +201 3 model.embedding_dim 0.0 +201 3 optimizer.lr 0.01887129092769178 +201 3 training.batch_size 0.0 +201 3 training.label_smoothing 0.23021243269926908 +201 4 model.embedding_dim 2.0 +201 4 optimizer.lr 0.06893702410885427 +201 4 training.batch_size 2.0 +201 4 training.label_smoothing 0.003553214303644289 +201 5 model.embedding_dim 2.0 +201 5 optimizer.lr 0.06697585499723359 +201 5 training.batch_size 0.0 +201 5 training.label_smoothing 0.010375844537403091 +201 6 model.embedding_dim 1.0 +201 6 optimizer.lr 0.002392000977613645 +201 6 training.batch_size 2.0 +201 6 training.label_smoothing 0.0072345969259250634 +201 7 model.embedding_dim 2.0 +201 7 optimizer.lr 0.008083398417719538 +201 7 training.batch_size 2.0 +201 7 training.label_smoothing 0.008008442173367143 +201 8 model.embedding_dim 1.0 +201 8 optimizer.lr 0.06251763928114346 +201 8 training.batch_size 0.0 +201 8 training.label_smoothing 0.018755378067479046 +201 9 model.embedding_dim 0.0 +201 9 optimizer.lr 0.02796436475768373 +201 9 training.batch_size 2.0 +201 9 training.label_smoothing 0.0048987417838849655 +201 10 model.embedding_dim 1.0 +201 10 optimizer.lr 0.007292217355742454 +201 10 training.batch_size 0.0 +201 10 training.label_smoothing 0.11722981356497104 +201 11 model.embedding_dim 0.0 +201 11 optimizer.lr 0.023085575480386186 +201 11 training.batch_size 2.0 +201 11 training.label_smoothing 0.9080844943844986 +201 12 model.embedding_dim 2.0 +201 12 optimizer.lr 0.005869161254018368 +201 12 training.batch_size 1.0 +201 12 training.label_smoothing 0.16086607308464246 +201 13 model.embedding_dim 2.0 +201 13 optimizer.lr 0.013883677634739054 +201 13 training.batch_size 1.0 +201 13 training.label_smoothing 0.007395691789354226 +201 14 model.embedding_dim 0.0 +201 14 optimizer.lr 0.0012105685898445382 +201 14 training.batch_size 1.0 +201 14 training.label_smoothing 0.024934306735676634 +201 15 model.embedding_dim 0.0 +201 15 optimizer.lr 0.08718541013307513 +201 15 training.batch_size 0.0 +201 15 training.label_smoothing 0.011986314255309544 +201 16 model.embedding_dim 1.0 +201 16 optimizer.lr 0.0025827827404932766 +201 16 training.batch_size 0.0 +201 16 training.label_smoothing 0.041676664913851406 +201 17 model.embedding_dim 2.0 +201 17 optimizer.lr 0.008586233485726909 +201 17 training.batch_size 0.0 +201 17 training.label_smoothing 0.0010558007576783222 +201 18 model.embedding_dim 2.0 +201 18 optimizer.lr 0.016611465801931454 +201 18 training.batch_size 2.0 +201 18 training.label_smoothing 0.02494546395802048 +201 1 dataset """wn18rr""" +201 1 model """distmult""" +201 1 loss """crossentropy""" +201 1 regularizer """no""" +201 1 optimizer """adam""" +201 1 training_loop """lcwa""" +201 1 evaluator """rankbased""" +201 2 dataset """wn18rr""" +201 2 model """distmult""" +201 2 loss """crossentropy""" +201 2 regularizer """no""" +201 2 optimizer """adam""" +201 2 training_loop """lcwa""" +201 2 evaluator """rankbased""" +201 3 dataset """wn18rr""" +201 3 model """distmult""" +201 3 loss """crossentropy""" +201 3 regularizer """no""" +201 3 optimizer """adam""" +201 3 training_loop """lcwa""" +201 3 evaluator """rankbased""" +201 4 dataset """wn18rr""" +201 4 model """distmult""" +201 4 loss """crossentropy""" +201 4 regularizer """no""" +201 4 optimizer """adam""" +201 4 training_loop """lcwa""" +201 4 evaluator """rankbased""" +201 5 dataset """wn18rr""" +201 5 model """distmult""" +201 5 loss """crossentropy""" +201 5 regularizer """no""" +201 5 optimizer """adam""" +201 5 training_loop """lcwa""" +201 5 evaluator """rankbased""" +201 6 dataset """wn18rr""" +201 6 model """distmult""" +201 6 loss """crossentropy""" +201 6 regularizer """no""" +201 6 optimizer """adam""" +201 6 training_loop """lcwa""" +201 6 evaluator """rankbased""" +201 7 dataset """wn18rr""" +201 7 model """distmult""" +201 7 loss """crossentropy""" +201 7 regularizer """no""" +201 7 optimizer """adam""" +201 7 training_loop """lcwa""" +201 7 evaluator """rankbased""" +201 8 dataset """wn18rr""" +201 8 model """distmult""" +201 8 loss """crossentropy""" +201 8 regularizer """no""" +201 8 optimizer """adam""" +201 8 training_loop """lcwa""" +201 8 evaluator """rankbased""" +201 9 dataset """wn18rr""" +201 9 model """distmult""" +201 9 loss """crossentropy""" +201 9 regularizer """no""" +201 9 optimizer """adam""" +201 9 training_loop """lcwa""" +201 9 evaluator """rankbased""" +201 10 dataset """wn18rr""" +201 10 model """distmult""" +201 10 loss """crossentropy""" +201 10 regularizer """no""" +201 10 optimizer """adam""" +201 10 training_loop """lcwa""" +201 10 evaluator """rankbased""" +201 11 dataset """wn18rr""" +201 11 model """distmult""" +201 11 loss """crossentropy""" +201 11 regularizer """no""" +201 11 optimizer """adam""" +201 11 training_loop """lcwa""" +201 11 evaluator """rankbased""" +201 12 dataset """wn18rr""" +201 12 model """distmult""" +201 12 loss """crossentropy""" +201 12 regularizer """no""" +201 12 optimizer """adam""" +201 12 training_loop """lcwa""" +201 12 evaluator """rankbased""" +201 13 dataset """wn18rr""" +201 13 model """distmult""" +201 13 loss """crossentropy""" +201 13 regularizer """no""" +201 13 optimizer """adam""" +201 13 training_loop """lcwa""" +201 13 evaluator """rankbased""" +201 14 dataset """wn18rr""" +201 14 model """distmult""" +201 14 loss """crossentropy""" +201 14 regularizer """no""" +201 14 optimizer """adam""" +201 14 training_loop """lcwa""" +201 14 evaluator """rankbased""" +201 15 dataset """wn18rr""" +201 15 model """distmult""" +201 15 loss """crossentropy""" +201 15 regularizer """no""" +201 15 optimizer """adam""" +201 15 training_loop """lcwa""" +201 15 evaluator """rankbased""" +201 16 dataset """wn18rr""" +201 16 model """distmult""" +201 16 loss """crossentropy""" +201 16 regularizer """no""" +201 16 optimizer """adam""" +201 16 training_loop """lcwa""" +201 16 evaluator """rankbased""" +201 17 dataset """wn18rr""" +201 17 model """distmult""" +201 17 loss """crossentropy""" +201 17 regularizer """no""" +201 17 optimizer """adam""" +201 17 training_loop """lcwa""" +201 17 evaluator """rankbased""" +201 18 dataset """wn18rr""" +201 18 model """distmult""" +201 18 loss """crossentropy""" +201 18 regularizer """no""" +201 18 optimizer """adam""" +201 18 training_loop """lcwa""" +201 18 evaluator """rankbased""" +202 1 model.embedding_dim 1.0 +202 1 optimizer.lr 0.013862951931164333 +202 1 training.batch_size 1.0 +202 1 training.label_smoothing 0.8374468685605196 +202 2 model.embedding_dim 1.0 +202 2 optimizer.lr 0.008094878350943715 +202 2 training.batch_size 1.0 +202 2 training.label_smoothing 0.06135040664032105 +202 3 model.embedding_dim 0.0 +202 3 optimizer.lr 0.06146468227078572 +202 3 training.batch_size 2.0 +202 3 training.label_smoothing 0.003275702836216675 +202 4 model.embedding_dim 1.0 +202 4 optimizer.lr 0.011092658097119288 +202 4 training.batch_size 2.0 +202 4 training.label_smoothing 0.003182432883186778 +202 5 model.embedding_dim 1.0 +202 5 optimizer.lr 0.005100335908572562 +202 5 training.batch_size 0.0 +202 5 training.label_smoothing 0.003024251395231347 +202 6 model.embedding_dim 1.0 +202 6 optimizer.lr 0.002572835612189098 +202 6 training.batch_size 2.0 +202 6 training.label_smoothing 0.0027477051597897834 +202 7 model.embedding_dim 0.0 +202 7 optimizer.lr 0.0015598896605839784 +202 7 training.batch_size 0.0 +202 7 training.label_smoothing 0.48653530385065 +202 8 model.embedding_dim 1.0 +202 8 optimizer.lr 0.0054089903164463975 +202 8 training.batch_size 1.0 +202 8 training.label_smoothing 0.36444511244106026 +202 9 model.embedding_dim 2.0 +202 9 optimizer.lr 0.0010881161659486245 +202 9 training.batch_size 2.0 +202 9 training.label_smoothing 0.08218179794712017 +202 1 dataset """wn18rr""" +202 1 model """distmult""" +202 1 loss """bceaftersigmoid""" +202 1 regularizer """no""" +202 1 optimizer """adam""" +202 1 training_loop """lcwa""" +202 1 evaluator """rankbased""" +202 2 dataset """wn18rr""" +202 2 model """distmult""" +202 2 loss """bceaftersigmoid""" +202 2 regularizer """no""" +202 2 optimizer """adam""" +202 2 training_loop """lcwa""" +202 2 evaluator """rankbased""" +202 3 dataset """wn18rr""" +202 3 model """distmult""" +202 3 loss """bceaftersigmoid""" +202 3 regularizer """no""" +202 3 optimizer """adam""" +202 3 training_loop """lcwa""" +202 3 evaluator """rankbased""" +202 4 dataset """wn18rr""" +202 4 model """distmult""" +202 4 loss """bceaftersigmoid""" +202 4 regularizer """no""" +202 4 optimizer """adam""" +202 4 training_loop """lcwa""" +202 4 evaluator """rankbased""" +202 5 dataset """wn18rr""" +202 5 model """distmult""" +202 5 loss """bceaftersigmoid""" +202 5 regularizer """no""" +202 5 optimizer """adam""" +202 5 training_loop """lcwa""" +202 5 evaluator """rankbased""" +202 6 dataset """wn18rr""" +202 6 model """distmult""" +202 6 loss """bceaftersigmoid""" +202 6 regularizer """no""" +202 6 optimizer """adam""" +202 6 training_loop """lcwa""" +202 6 evaluator """rankbased""" +202 7 dataset """wn18rr""" +202 7 model """distmult""" +202 7 loss """bceaftersigmoid""" +202 7 regularizer """no""" +202 7 optimizer """adam""" +202 7 training_loop """lcwa""" +202 7 evaluator """rankbased""" +202 8 dataset """wn18rr""" +202 8 model """distmult""" +202 8 loss """bceaftersigmoid""" +202 8 regularizer """no""" +202 8 optimizer """adam""" +202 8 training_loop """lcwa""" +202 8 evaluator """rankbased""" +202 9 dataset """wn18rr""" +202 9 model """distmult""" +202 9 loss """bceaftersigmoid""" +202 9 regularizer """no""" +202 9 optimizer """adam""" +202 9 training_loop """lcwa""" +202 9 evaluator """rankbased""" +203 1 model.embedding_dim 0.0 +203 1 optimizer.lr 0.0015782513192141633 +203 1 training.batch_size 1.0 +203 1 training.label_smoothing 0.27752916869062383 +203 2 model.embedding_dim 2.0 +203 2 optimizer.lr 0.0023876439548138747 +203 2 training.batch_size 0.0 +203 2 training.label_smoothing 0.5400616955332042 +203 3 model.embedding_dim 0.0 +203 3 optimizer.lr 0.0010029732684191854 +203 3 training.batch_size 2.0 +203 3 training.label_smoothing 0.07408428240196441 +203 4 model.embedding_dim 2.0 +203 4 optimizer.lr 0.001471045800828087 +203 4 training.batch_size 1.0 +203 4 training.label_smoothing 0.00174154866818315 +203 5 model.embedding_dim 2.0 +203 5 optimizer.lr 0.018367449350092185 +203 5 training.batch_size 0.0 +203 5 training.label_smoothing 0.11299151200202552 +203 6 model.embedding_dim 2.0 +203 6 optimizer.lr 0.03873312986342858 +203 6 training.batch_size 2.0 +203 6 training.label_smoothing 0.01739793335083182 +203 7 model.embedding_dim 0.0 +203 7 optimizer.lr 0.0173739373116492 +203 7 training.batch_size 2.0 +203 7 training.label_smoothing 0.033373730985887 +203 1 dataset """wn18rr""" +203 1 model """distmult""" +203 1 loss """softplus""" +203 1 regularizer """no""" +203 1 optimizer """adam""" +203 1 training_loop """lcwa""" +203 1 evaluator """rankbased""" +203 2 dataset """wn18rr""" +203 2 model """distmult""" +203 2 loss """softplus""" +203 2 regularizer """no""" +203 2 optimizer """adam""" +203 2 training_loop """lcwa""" +203 2 evaluator """rankbased""" +203 3 dataset """wn18rr""" +203 3 model """distmult""" +203 3 loss """softplus""" +203 3 regularizer """no""" +203 3 optimizer """adam""" +203 3 training_loop """lcwa""" +203 3 evaluator """rankbased""" +203 4 dataset """wn18rr""" +203 4 model """distmult""" +203 4 loss """softplus""" +203 4 regularizer """no""" +203 4 optimizer """adam""" +203 4 training_loop """lcwa""" +203 4 evaluator """rankbased""" +203 5 dataset """wn18rr""" +203 5 model """distmult""" +203 5 loss """softplus""" +203 5 regularizer """no""" +203 5 optimizer """adam""" +203 5 training_loop """lcwa""" +203 5 evaluator """rankbased""" +203 6 dataset """wn18rr""" +203 6 model """distmult""" +203 6 loss """softplus""" +203 6 regularizer """no""" +203 6 optimizer """adam""" +203 6 training_loop """lcwa""" +203 6 evaluator """rankbased""" +203 7 dataset """wn18rr""" +203 7 model """distmult""" +203 7 loss """softplus""" +203 7 regularizer """no""" +203 7 optimizer """adam""" +203 7 training_loop """lcwa""" +203 7 evaluator """rankbased""" +204 1 model.embedding_dim 0.0 +204 1 optimizer.lr 0.0023242782444151035 +204 1 training.batch_size 1.0 +204 1 training.label_smoothing 0.8651180128428991 +204 2 model.embedding_dim 0.0 +204 2 optimizer.lr 0.0054005874538257245 +204 2 training.batch_size 2.0 +204 2 training.label_smoothing 0.0019089608609424934 +204 3 model.embedding_dim 2.0 +204 3 optimizer.lr 0.056326825567618095 +204 3 training.batch_size 1.0 +204 3 training.label_smoothing 0.08817599406821895 +204 4 model.embedding_dim 1.0 +204 4 optimizer.lr 0.008189253779149485 +204 4 training.batch_size 2.0 +204 4 training.label_smoothing 0.05624346192434626 +204 5 model.embedding_dim 1.0 +204 5 optimizer.lr 0.03347049679905889 +204 5 training.batch_size 1.0 +204 5 training.label_smoothing 0.010295217296676935 +204 6 model.embedding_dim 2.0 +204 6 optimizer.lr 0.006330059317356587 +204 6 training.batch_size 0.0 +204 6 training.label_smoothing 0.005145495550460713 +204 7 model.embedding_dim 0.0 +204 7 optimizer.lr 0.002723331251326311 +204 7 training.batch_size 0.0 +204 7 training.label_smoothing 0.7006889057782684 +204 8 model.embedding_dim 2.0 +204 8 optimizer.lr 0.0022807171909205674 +204 8 training.batch_size 0.0 +204 8 training.label_smoothing 0.12769211028338986 +204 9 model.embedding_dim 2.0 +204 9 optimizer.lr 0.00499673798958802 +204 9 training.batch_size 2.0 +204 9 training.label_smoothing 0.046615499782165844 +204 10 model.embedding_dim 0.0 +204 10 optimizer.lr 0.002921773739996739 +204 10 training.batch_size 2.0 +204 10 training.label_smoothing 0.0018162464244554568 +204 11 model.embedding_dim 1.0 +204 11 optimizer.lr 0.0014391988475041038 +204 11 training.batch_size 0.0 +204 11 training.label_smoothing 0.8332303641295747 +204 1 dataset """wn18rr""" +204 1 model """distmult""" +204 1 loss """bceaftersigmoid""" +204 1 regularizer """no""" +204 1 optimizer """adam""" +204 1 training_loop """lcwa""" +204 1 evaluator """rankbased""" +204 2 dataset """wn18rr""" +204 2 model """distmult""" +204 2 loss """bceaftersigmoid""" +204 2 regularizer """no""" +204 2 optimizer """adam""" +204 2 training_loop """lcwa""" +204 2 evaluator """rankbased""" +204 3 dataset """wn18rr""" +204 3 model """distmult""" +204 3 loss """bceaftersigmoid""" +204 3 regularizer """no""" +204 3 optimizer """adam""" +204 3 training_loop """lcwa""" +204 3 evaluator """rankbased""" +204 4 dataset """wn18rr""" +204 4 model """distmult""" +204 4 loss """bceaftersigmoid""" +204 4 regularizer """no""" +204 4 optimizer """adam""" +204 4 training_loop """lcwa""" +204 4 evaluator """rankbased""" +204 5 dataset """wn18rr""" +204 5 model """distmult""" +204 5 loss """bceaftersigmoid""" +204 5 regularizer """no""" +204 5 optimizer """adam""" +204 5 training_loop """lcwa""" +204 5 evaluator """rankbased""" +204 6 dataset """wn18rr""" +204 6 model """distmult""" +204 6 loss """bceaftersigmoid""" +204 6 regularizer """no""" +204 6 optimizer """adam""" +204 6 training_loop """lcwa""" +204 6 evaluator """rankbased""" +204 7 dataset """wn18rr""" +204 7 model """distmult""" +204 7 loss """bceaftersigmoid""" +204 7 regularizer """no""" +204 7 optimizer """adam""" +204 7 training_loop """lcwa""" +204 7 evaluator """rankbased""" +204 8 dataset """wn18rr""" +204 8 model """distmult""" +204 8 loss """bceaftersigmoid""" +204 8 regularizer """no""" +204 8 optimizer """adam""" +204 8 training_loop """lcwa""" +204 8 evaluator """rankbased""" +204 9 dataset """wn18rr""" +204 9 model """distmult""" +204 9 loss """bceaftersigmoid""" +204 9 regularizer """no""" +204 9 optimizer """adam""" +204 9 training_loop """lcwa""" +204 9 evaluator """rankbased""" +204 10 dataset """wn18rr""" +204 10 model """distmult""" +204 10 loss """bceaftersigmoid""" +204 10 regularizer """no""" +204 10 optimizer """adam""" +204 10 training_loop """lcwa""" +204 10 evaluator """rankbased""" +204 11 dataset """wn18rr""" +204 11 model """distmult""" +204 11 loss """bceaftersigmoid""" +204 11 regularizer """no""" +204 11 optimizer """adam""" +204 11 training_loop """lcwa""" +204 11 evaluator """rankbased""" +205 1 model.embedding_dim 2.0 +205 1 optimizer.lr 0.06881450109778532 +205 1 training.batch_size 1.0 +205 1 training.label_smoothing 0.011832778679921023 +205 2 model.embedding_dim 1.0 +205 2 optimizer.lr 0.016329350088764804 +205 2 training.batch_size 0.0 +205 2 training.label_smoothing 0.2219740381786327 +205 3 model.embedding_dim 2.0 +205 3 optimizer.lr 0.0075767218962482005 +205 3 training.batch_size 1.0 +205 3 training.label_smoothing 0.01774637945614916 +205 4 model.embedding_dim 2.0 +205 4 optimizer.lr 0.005714453827730859 +205 4 training.batch_size 0.0 +205 4 training.label_smoothing 0.04000420094192406 +205 5 model.embedding_dim 0.0 +205 5 optimizer.lr 0.002651383484498591 +205 5 training.batch_size 1.0 +205 5 training.label_smoothing 0.08213193898216954 +205 6 model.embedding_dim 0.0 +205 6 optimizer.lr 0.028361436789574708 +205 6 training.batch_size 0.0 +205 6 training.label_smoothing 0.014277936663515836 +205 7 model.embedding_dim 2.0 +205 7 optimizer.lr 0.008877049704271452 +205 7 training.batch_size 0.0 +205 7 training.label_smoothing 0.0021421549220131692 +205 8 model.embedding_dim 1.0 +205 8 optimizer.lr 0.02652949767342057 +205 8 training.batch_size 2.0 +205 8 training.label_smoothing 0.06000110679456699 +205 9 model.embedding_dim 2.0 +205 9 optimizer.lr 0.008796919033214003 +205 9 training.batch_size 0.0 +205 9 training.label_smoothing 0.007723763711165599 +205 10 model.embedding_dim 2.0 +205 10 optimizer.lr 0.0015304988105310374 +205 10 training.batch_size 2.0 +205 10 training.label_smoothing 0.054055340370273965 +205 1 dataset """wn18rr""" +205 1 model """distmult""" +205 1 loss """softplus""" +205 1 regularizer """no""" +205 1 optimizer """adam""" +205 1 training_loop """lcwa""" +205 1 evaluator """rankbased""" +205 2 dataset """wn18rr""" +205 2 model """distmult""" +205 2 loss """softplus""" +205 2 regularizer """no""" +205 2 optimizer """adam""" +205 2 training_loop """lcwa""" +205 2 evaluator """rankbased""" +205 3 dataset """wn18rr""" +205 3 model """distmult""" +205 3 loss """softplus""" +205 3 regularizer """no""" +205 3 optimizer """adam""" +205 3 training_loop """lcwa""" +205 3 evaluator """rankbased""" +205 4 dataset """wn18rr""" +205 4 model """distmult""" +205 4 loss """softplus""" +205 4 regularizer """no""" +205 4 optimizer """adam""" +205 4 training_loop """lcwa""" +205 4 evaluator """rankbased""" +205 5 dataset """wn18rr""" +205 5 model """distmult""" +205 5 loss """softplus""" +205 5 regularizer """no""" +205 5 optimizer """adam""" +205 5 training_loop """lcwa""" +205 5 evaluator """rankbased""" +205 6 dataset """wn18rr""" +205 6 model """distmult""" +205 6 loss """softplus""" +205 6 regularizer """no""" +205 6 optimizer """adam""" +205 6 training_loop """lcwa""" +205 6 evaluator """rankbased""" +205 7 dataset """wn18rr""" +205 7 model """distmult""" +205 7 loss """softplus""" +205 7 regularizer """no""" +205 7 optimizer """adam""" +205 7 training_loop """lcwa""" +205 7 evaluator """rankbased""" +205 8 dataset """wn18rr""" +205 8 model """distmult""" +205 8 loss """softplus""" +205 8 regularizer """no""" +205 8 optimizer """adam""" +205 8 training_loop """lcwa""" +205 8 evaluator """rankbased""" +205 9 dataset """wn18rr""" +205 9 model """distmult""" +205 9 loss """softplus""" +205 9 regularizer """no""" +205 9 optimizer """adam""" +205 9 training_loop """lcwa""" +205 9 evaluator """rankbased""" +205 10 dataset """wn18rr""" +205 10 model """distmult""" +205 10 loss """softplus""" +205 10 regularizer """no""" +205 10 optimizer """adam""" +205 10 training_loop """lcwa""" +205 10 evaluator """rankbased""" +206 1 model.embedding_dim 0.0 +206 1 loss.margin 6.855719048101325 +206 1 loss.adversarial_temperature 0.8778355524843051 +206 1 optimizer.lr 0.008876787286728473 +206 1 negative_sampler.num_negs_per_pos 82.0 +206 1 training.batch_size 2.0 +206 2 model.embedding_dim 2.0 +206 2 loss.margin 21.956109652940654 +206 2 loss.adversarial_temperature 0.32826499705254075 +206 2 optimizer.lr 0.06621753191830364 +206 2 negative_sampler.num_negs_per_pos 27.0 +206 2 training.batch_size 1.0 +206 3 model.embedding_dim 1.0 +206 3 loss.margin 13.600615350301249 +206 3 loss.adversarial_temperature 0.6457022414832186 +206 3 optimizer.lr 0.022287543573104904 +206 3 negative_sampler.num_negs_per_pos 18.0 +206 3 training.batch_size 1.0 +206 4 model.embedding_dim 0.0 +206 4 loss.margin 19.34189403362306 +206 4 loss.adversarial_temperature 0.33010297345629813 +206 4 optimizer.lr 0.0720527911705904 +206 4 negative_sampler.num_negs_per_pos 11.0 +206 4 training.batch_size 0.0 +206 5 model.embedding_dim 0.0 +206 5 loss.margin 21.741146535696934 +206 5 loss.adversarial_temperature 0.93618988406181 +206 5 optimizer.lr 0.001064918987519947 +206 5 negative_sampler.num_negs_per_pos 97.0 +206 5 training.batch_size 1.0 +206 6 model.embedding_dim 1.0 +206 6 loss.margin 19.776507600776167 +206 6 loss.adversarial_temperature 0.7730201262972588 +206 6 optimizer.lr 0.0013559366698863533 +206 6 negative_sampler.num_negs_per_pos 76.0 +206 6 training.batch_size 2.0 +206 7 model.embedding_dim 0.0 +206 7 loss.margin 4.363623215134719 +206 7 loss.adversarial_temperature 0.41704097917326705 +206 7 optimizer.lr 0.0032754572775089137 +206 7 negative_sampler.num_negs_per_pos 39.0 +206 7 training.batch_size 1.0 +206 8 model.embedding_dim 1.0 +206 8 loss.margin 23.75940232836585 +206 8 loss.adversarial_temperature 0.8789402997913548 +206 8 optimizer.lr 0.0061574044810065115 +206 8 negative_sampler.num_negs_per_pos 46.0 +206 8 training.batch_size 1.0 +206 9 model.embedding_dim 1.0 +206 9 loss.margin 27.22527884372694 +206 9 loss.adversarial_temperature 0.38249701632920485 +206 9 optimizer.lr 0.014756711453620277 +206 9 negative_sampler.num_negs_per_pos 89.0 +206 9 training.batch_size 1.0 +206 10 model.embedding_dim 2.0 +206 10 loss.margin 20.408891154043825 +206 10 loss.adversarial_temperature 0.22971181497324555 +206 10 optimizer.lr 0.001640589744975069 +206 10 negative_sampler.num_negs_per_pos 42.0 +206 10 training.batch_size 1.0 +206 11 model.embedding_dim 0.0 +206 11 loss.margin 17.79803562670865 +206 11 loss.adversarial_temperature 0.6999568728741505 +206 11 optimizer.lr 0.003678803395502257 +206 11 negative_sampler.num_negs_per_pos 26.0 +206 11 training.batch_size 2.0 +206 12 model.embedding_dim 1.0 +206 12 loss.margin 11.150865595819202 +206 12 loss.adversarial_temperature 0.6446810426424687 +206 12 optimizer.lr 0.004032613633032687 +206 12 negative_sampler.num_negs_per_pos 16.0 +206 12 training.batch_size 0.0 +206 13 model.embedding_dim 2.0 +206 13 loss.margin 20.11679919376632 +206 13 loss.adversarial_temperature 0.33745582162956667 +206 13 optimizer.lr 0.0014870898210203576 +206 13 negative_sampler.num_negs_per_pos 23.0 +206 13 training.batch_size 0.0 +206 14 model.embedding_dim 2.0 +206 14 loss.margin 12.938625786643172 +206 14 loss.adversarial_temperature 0.690966794571769 +206 14 optimizer.lr 0.0074036069550851335 +206 14 negative_sampler.num_negs_per_pos 77.0 +206 14 training.batch_size 2.0 +206 15 model.embedding_dim 2.0 +206 15 loss.margin 27.960665189491213 +206 15 loss.adversarial_temperature 0.37736223938025626 +206 15 optimizer.lr 0.06888637663912285 +206 15 negative_sampler.num_negs_per_pos 40.0 +206 15 training.batch_size 2.0 +206 16 model.embedding_dim 0.0 +206 16 loss.margin 9.644940131552556 +206 16 loss.adversarial_temperature 0.9523166121276991 +206 16 optimizer.lr 0.001580743994748319 +206 16 negative_sampler.num_negs_per_pos 49.0 +206 16 training.batch_size 0.0 +206 17 model.embedding_dim 1.0 +206 17 loss.margin 14.163536715248847 +206 17 loss.adversarial_temperature 0.31453941544217057 +206 17 optimizer.lr 0.04317732876139978 +206 17 negative_sampler.num_negs_per_pos 25.0 +206 17 training.batch_size 1.0 +206 18 model.embedding_dim 1.0 +206 18 loss.margin 21.86272804308658 +206 18 loss.adversarial_temperature 0.9279818149579103 +206 18 optimizer.lr 0.09177995133222483 +206 18 negative_sampler.num_negs_per_pos 39.0 +206 18 training.batch_size 0.0 +206 19 model.embedding_dim 2.0 +206 19 loss.margin 9.234422342175488 +206 19 loss.adversarial_temperature 0.5853730340701101 +206 19 optimizer.lr 0.0012980383223304955 +206 19 negative_sampler.num_negs_per_pos 75.0 +206 19 training.batch_size 1.0 +206 20 model.embedding_dim 0.0 +206 20 loss.margin 25.250353320852845 +206 20 loss.adversarial_temperature 0.1585402983574317 +206 20 optimizer.lr 0.00861225073351007 +206 20 negative_sampler.num_negs_per_pos 94.0 +206 20 training.batch_size 1.0 +206 21 model.embedding_dim 2.0 +206 21 loss.margin 25.331822309325922 +206 21 loss.adversarial_temperature 0.6563693482894244 +206 21 optimizer.lr 0.001124707174872594 +206 21 negative_sampler.num_negs_per_pos 16.0 +206 21 training.batch_size 1.0 +206 22 model.embedding_dim 1.0 +206 22 loss.margin 19.939811404297483 +206 22 loss.adversarial_temperature 0.9678726083968393 +206 22 optimizer.lr 0.01075542079234385 +206 22 negative_sampler.num_negs_per_pos 96.0 +206 22 training.batch_size 1.0 +206 23 model.embedding_dim 0.0 +206 23 loss.margin 17.599417819651958 +206 23 loss.adversarial_temperature 0.8921639316579582 +206 23 optimizer.lr 0.004148726792628559 +206 23 negative_sampler.num_negs_per_pos 66.0 +206 23 training.batch_size 2.0 +206 24 model.embedding_dim 2.0 +206 24 loss.margin 5.301671310356723 +206 24 loss.adversarial_temperature 0.7525398933703488 +206 24 optimizer.lr 0.016410485980642688 +206 24 negative_sampler.num_negs_per_pos 80.0 +206 24 training.batch_size 2.0 +206 25 model.embedding_dim 0.0 +206 25 loss.margin 25.90341097774468 +206 25 loss.adversarial_temperature 0.6425654375766026 +206 25 optimizer.lr 0.0799221787718427 +206 25 negative_sampler.num_negs_per_pos 21.0 +206 25 training.batch_size 1.0 +206 26 model.embedding_dim 2.0 +206 26 loss.margin 6.149826668736054 +206 26 loss.adversarial_temperature 0.6426898548723344 +206 26 optimizer.lr 0.0466262392032128 +206 26 negative_sampler.num_negs_per_pos 73.0 +206 26 training.batch_size 0.0 +206 27 model.embedding_dim 2.0 +206 27 loss.margin 12.580291845706586 +206 27 loss.adversarial_temperature 0.2404972750626892 +206 27 optimizer.lr 0.0964484005288112 +206 27 negative_sampler.num_negs_per_pos 27.0 +206 27 training.batch_size 1.0 +206 28 model.embedding_dim 1.0 +206 28 loss.margin 10.776317979573687 +206 28 loss.adversarial_temperature 0.376361225745472 +206 28 optimizer.lr 0.005043201726593526 +206 28 negative_sampler.num_negs_per_pos 33.0 +206 28 training.batch_size 0.0 +206 29 model.embedding_dim 2.0 +206 29 loss.margin 18.18518870075329 +206 29 loss.adversarial_temperature 0.33371169156273073 +206 29 optimizer.lr 0.06345403691532926 +206 29 negative_sampler.num_negs_per_pos 98.0 +206 29 training.batch_size 2.0 +206 30 model.embedding_dim 0.0 +206 30 loss.margin 15.872897055257008 +206 30 loss.adversarial_temperature 0.6173076622329379 +206 30 optimizer.lr 0.0010520248791704824 +206 30 negative_sampler.num_negs_per_pos 98.0 +206 30 training.batch_size 1.0 +206 31 model.embedding_dim 0.0 +206 31 loss.margin 17.987197901558556 +206 31 loss.adversarial_temperature 0.4121826469193021 +206 31 optimizer.lr 0.0012007950753730827 +206 31 negative_sampler.num_negs_per_pos 79.0 +206 31 training.batch_size 0.0 +206 32 model.embedding_dim 1.0 +206 32 loss.margin 27.60389708986484 +206 32 loss.adversarial_temperature 0.3008484150010284 +206 32 optimizer.lr 0.07705075085185085 +206 32 negative_sampler.num_negs_per_pos 94.0 +206 32 training.batch_size 2.0 +206 33 model.embedding_dim 2.0 +206 33 loss.margin 2.8660724372915056 +206 33 loss.adversarial_temperature 0.7862898528027809 +206 33 optimizer.lr 0.08540617315976592 +206 33 negative_sampler.num_negs_per_pos 87.0 +206 33 training.batch_size 2.0 +206 34 model.embedding_dim 0.0 +206 34 loss.margin 28.118192670540868 +206 34 loss.adversarial_temperature 0.5975048642351074 +206 34 optimizer.lr 0.024587514581787883 +206 34 negative_sampler.num_negs_per_pos 79.0 +206 34 training.batch_size 0.0 +206 35 model.embedding_dim 0.0 +206 35 loss.margin 20.715193592845186 +206 35 loss.adversarial_temperature 0.45232655155766527 +206 35 optimizer.lr 0.030199795793334608 +206 35 negative_sampler.num_negs_per_pos 34.0 +206 35 training.batch_size 0.0 +206 36 model.embedding_dim 2.0 +206 36 loss.margin 3.8598367922878873 +206 36 loss.adversarial_temperature 0.40185859372645794 +206 36 optimizer.lr 0.012933903444676502 +206 36 negative_sampler.num_negs_per_pos 37.0 +206 36 training.batch_size 0.0 +206 37 model.embedding_dim 0.0 +206 37 loss.margin 22.888476095591916 +206 37 loss.adversarial_temperature 0.9569379013818388 +206 37 optimizer.lr 0.04428068293885547 +206 37 negative_sampler.num_negs_per_pos 67.0 +206 37 training.batch_size 0.0 +206 38 model.embedding_dim 1.0 +206 38 loss.margin 25.51134673490901 +206 38 loss.adversarial_temperature 0.8578016608658972 +206 38 optimizer.lr 0.004650995166092222 +206 38 negative_sampler.num_negs_per_pos 69.0 +206 38 training.batch_size 0.0 +206 39 model.embedding_dim 2.0 +206 39 loss.margin 2.4825467390988676 +206 39 loss.adversarial_temperature 0.11294147213209077 +206 39 optimizer.lr 0.010606937856416875 +206 39 negative_sampler.num_negs_per_pos 55.0 +206 39 training.batch_size 0.0 +206 40 model.embedding_dim 1.0 +206 40 loss.margin 27.098429115483345 +206 40 loss.adversarial_temperature 0.17663617589445446 +206 40 optimizer.lr 0.09091629770158835 +206 40 negative_sampler.num_negs_per_pos 18.0 +206 40 training.batch_size 2.0 +206 41 model.embedding_dim 1.0 +206 41 loss.margin 21.54840806998462 +206 41 loss.adversarial_temperature 0.32087491918627464 +206 41 optimizer.lr 0.0014405266150722563 +206 41 negative_sampler.num_negs_per_pos 46.0 +206 41 training.batch_size 1.0 +206 42 model.embedding_dim 0.0 +206 42 loss.margin 3.5863722684708494 +206 42 loss.adversarial_temperature 0.8675299974372485 +206 42 optimizer.lr 0.0010049956066825378 +206 42 negative_sampler.num_negs_per_pos 9.0 +206 42 training.batch_size 2.0 +206 43 model.embedding_dim 1.0 +206 43 loss.margin 17.05106774348007 +206 43 loss.adversarial_temperature 0.5974172665939116 +206 43 optimizer.lr 0.030036055804072027 +206 43 negative_sampler.num_negs_per_pos 56.0 +206 43 training.batch_size 2.0 +206 44 model.embedding_dim 0.0 +206 44 loss.margin 14.004894886262003 +206 44 loss.adversarial_temperature 0.8717415577409092 +206 44 optimizer.lr 0.0015040047068883254 +206 44 negative_sampler.num_negs_per_pos 38.0 +206 44 training.batch_size 2.0 +206 45 model.embedding_dim 2.0 +206 45 loss.margin 23.28175725997091 +206 45 loss.adversarial_temperature 0.6626019591672808 +206 45 optimizer.lr 0.09907929668192647 +206 45 negative_sampler.num_negs_per_pos 30.0 +206 45 training.batch_size 2.0 +206 46 model.embedding_dim 0.0 +206 46 loss.margin 15.55775302796053 +206 46 loss.adversarial_temperature 0.9698134785471911 +206 46 optimizer.lr 0.005032168168854922 +206 46 negative_sampler.num_negs_per_pos 43.0 +206 46 training.batch_size 2.0 +206 47 model.embedding_dim 1.0 +206 47 loss.margin 14.954071703700967 +206 47 loss.adversarial_temperature 0.9271886265767855 +206 47 optimizer.lr 0.003506986354269538 +206 47 negative_sampler.num_negs_per_pos 55.0 +206 47 training.batch_size 2.0 +206 48 model.embedding_dim 2.0 +206 48 loss.margin 1.5724362651738648 +206 48 loss.adversarial_temperature 0.48881912334772704 +206 48 optimizer.lr 0.0010986222729061058 +206 48 negative_sampler.num_negs_per_pos 32.0 +206 48 training.batch_size 1.0 +206 49 model.embedding_dim 2.0 +206 49 loss.margin 3.579861317420946 +206 49 loss.adversarial_temperature 0.37867694971628973 +206 49 optimizer.lr 0.006749015378085768 +206 49 negative_sampler.num_negs_per_pos 57.0 +206 49 training.batch_size 0.0 +206 50 model.embedding_dim 0.0 +206 50 loss.margin 26.644339380222608 +206 50 loss.adversarial_temperature 0.21795553740544232 +206 50 optimizer.lr 0.043302506718830944 +206 50 negative_sampler.num_negs_per_pos 18.0 +206 50 training.batch_size 0.0 +206 51 model.embedding_dim 2.0 +206 51 loss.margin 27.550209585058877 +206 51 loss.adversarial_temperature 0.5901206378781549 +206 51 optimizer.lr 0.004234413781253303 +206 51 negative_sampler.num_negs_per_pos 44.0 +206 51 training.batch_size 0.0 +206 52 model.embedding_dim 1.0 +206 52 loss.margin 17.397498194686744 +206 52 loss.adversarial_temperature 0.7182610848011716 +206 52 optimizer.lr 0.0012657932623587258 +206 52 negative_sampler.num_negs_per_pos 94.0 +206 52 training.batch_size 2.0 +206 53 model.embedding_dim 0.0 +206 53 loss.margin 9.691299886316477 +206 53 loss.adversarial_temperature 0.24878971101279956 +206 53 optimizer.lr 0.06835556233557727 +206 53 negative_sampler.num_negs_per_pos 42.0 +206 53 training.batch_size 1.0 +206 54 model.embedding_dim 0.0 +206 54 loss.margin 14.391629432824248 +206 54 loss.adversarial_temperature 0.7475922254942998 +206 54 optimizer.lr 0.020584716615089607 +206 54 negative_sampler.num_negs_per_pos 5.0 +206 54 training.batch_size 2.0 +206 55 model.embedding_dim 2.0 +206 55 loss.margin 12.723963471676992 +206 55 loss.adversarial_temperature 0.7746826488772521 +206 55 optimizer.lr 0.002136485228169497 +206 55 negative_sampler.num_negs_per_pos 0.0 +206 55 training.batch_size 0.0 +206 56 model.embedding_dim 1.0 +206 56 loss.margin 10.015528577418864 +206 56 loss.adversarial_temperature 0.6560734767469659 +206 56 optimizer.lr 0.002360212593645514 +206 56 negative_sampler.num_negs_per_pos 96.0 +206 56 training.batch_size 0.0 +206 57 model.embedding_dim 2.0 +206 57 loss.margin 26.810622535429395 +206 57 loss.adversarial_temperature 0.7227725163676842 +206 57 optimizer.lr 0.0018273430845397634 +206 57 negative_sampler.num_negs_per_pos 33.0 +206 57 training.batch_size 2.0 +206 58 model.embedding_dim 1.0 +206 58 loss.margin 26.89042764231691 +206 58 loss.adversarial_temperature 0.5979122754351154 +206 58 optimizer.lr 0.004174725566294951 +206 58 negative_sampler.num_negs_per_pos 37.0 +206 58 training.batch_size 1.0 +206 59 model.embedding_dim 2.0 +206 59 loss.margin 2.7597979607864156 +206 59 loss.adversarial_temperature 0.9574987295033942 +206 59 optimizer.lr 0.07586134160346061 +206 59 negative_sampler.num_negs_per_pos 99.0 +206 59 training.batch_size 2.0 +206 60 model.embedding_dim 0.0 +206 60 loss.margin 1.9479351628680155 +206 60 loss.adversarial_temperature 0.3412039346655458 +206 60 optimizer.lr 0.002323489107462976 +206 60 negative_sampler.num_negs_per_pos 93.0 +206 60 training.batch_size 2.0 +206 61 model.embedding_dim 1.0 +206 61 loss.margin 11.983063229594435 +206 61 loss.adversarial_temperature 0.877671010916678 +206 61 optimizer.lr 0.002365799526540202 +206 61 negative_sampler.num_negs_per_pos 75.0 +206 61 training.batch_size 0.0 +206 62 model.embedding_dim 1.0 +206 62 loss.margin 26.618415721883622 +206 62 loss.adversarial_temperature 0.44419401537786857 +206 62 optimizer.lr 0.01672386217146542 +206 62 negative_sampler.num_negs_per_pos 85.0 +206 62 training.batch_size 1.0 +206 63 model.embedding_dim 2.0 +206 63 loss.margin 10.090571550954222 +206 63 loss.adversarial_temperature 0.9924695128182234 +206 63 optimizer.lr 0.00928764393026148 +206 63 negative_sampler.num_negs_per_pos 87.0 +206 63 training.batch_size 2.0 +206 1 dataset """wn18rr""" +206 1 model """distmult""" +206 1 loss """nssa""" +206 1 regularizer """no""" +206 1 optimizer """adam""" +206 1 training_loop """owa""" +206 1 negative_sampler """basic""" +206 1 evaluator """rankbased""" +206 2 dataset """wn18rr""" +206 2 model """distmult""" +206 2 loss """nssa""" +206 2 regularizer """no""" +206 2 optimizer """adam""" +206 2 training_loop """owa""" +206 2 negative_sampler """basic""" +206 2 evaluator """rankbased""" +206 3 dataset """wn18rr""" +206 3 model """distmult""" +206 3 loss """nssa""" +206 3 regularizer """no""" +206 3 optimizer """adam""" +206 3 training_loop """owa""" +206 3 negative_sampler """basic""" +206 3 evaluator """rankbased""" +206 4 dataset """wn18rr""" +206 4 model """distmult""" +206 4 loss """nssa""" +206 4 regularizer """no""" +206 4 optimizer """adam""" +206 4 training_loop """owa""" +206 4 negative_sampler """basic""" +206 4 evaluator """rankbased""" +206 5 dataset """wn18rr""" +206 5 model """distmult""" +206 5 loss """nssa""" +206 5 regularizer """no""" +206 5 optimizer """adam""" +206 5 training_loop """owa""" +206 5 negative_sampler """basic""" +206 5 evaluator """rankbased""" +206 6 dataset """wn18rr""" +206 6 model """distmult""" +206 6 loss """nssa""" +206 6 regularizer """no""" +206 6 optimizer """adam""" +206 6 training_loop """owa""" +206 6 negative_sampler """basic""" +206 6 evaluator """rankbased""" +206 7 dataset """wn18rr""" +206 7 model """distmult""" +206 7 loss """nssa""" +206 7 regularizer """no""" +206 7 optimizer """adam""" +206 7 training_loop """owa""" +206 7 negative_sampler """basic""" +206 7 evaluator """rankbased""" +206 8 dataset """wn18rr""" +206 8 model """distmult""" +206 8 loss """nssa""" +206 8 regularizer """no""" +206 8 optimizer """adam""" +206 8 training_loop """owa""" +206 8 negative_sampler """basic""" +206 8 evaluator """rankbased""" +206 9 dataset """wn18rr""" +206 9 model """distmult""" +206 9 loss """nssa""" +206 9 regularizer """no""" +206 9 optimizer """adam""" +206 9 training_loop """owa""" +206 9 negative_sampler """basic""" +206 9 evaluator """rankbased""" +206 10 dataset """wn18rr""" +206 10 model """distmult""" +206 10 loss """nssa""" +206 10 regularizer """no""" +206 10 optimizer """adam""" +206 10 training_loop """owa""" +206 10 negative_sampler """basic""" +206 10 evaluator """rankbased""" +206 11 dataset """wn18rr""" +206 11 model """distmult""" +206 11 loss """nssa""" +206 11 regularizer """no""" +206 11 optimizer """adam""" +206 11 training_loop """owa""" +206 11 negative_sampler """basic""" +206 11 evaluator """rankbased""" +206 12 dataset """wn18rr""" +206 12 model """distmult""" +206 12 loss """nssa""" +206 12 regularizer """no""" +206 12 optimizer """adam""" +206 12 training_loop """owa""" +206 12 negative_sampler """basic""" +206 12 evaluator """rankbased""" +206 13 dataset """wn18rr""" +206 13 model """distmult""" +206 13 loss """nssa""" +206 13 regularizer """no""" +206 13 optimizer """adam""" +206 13 training_loop """owa""" +206 13 negative_sampler """basic""" +206 13 evaluator """rankbased""" +206 14 dataset """wn18rr""" +206 14 model """distmult""" +206 14 loss """nssa""" +206 14 regularizer """no""" +206 14 optimizer """adam""" +206 14 training_loop """owa""" +206 14 negative_sampler """basic""" +206 14 evaluator """rankbased""" +206 15 dataset """wn18rr""" +206 15 model """distmult""" +206 15 loss """nssa""" +206 15 regularizer """no""" +206 15 optimizer """adam""" +206 15 training_loop """owa""" +206 15 negative_sampler """basic""" +206 15 evaluator """rankbased""" +206 16 dataset """wn18rr""" +206 16 model """distmult""" +206 16 loss """nssa""" +206 16 regularizer """no""" +206 16 optimizer """adam""" +206 16 training_loop """owa""" +206 16 negative_sampler """basic""" +206 16 evaluator """rankbased""" +206 17 dataset """wn18rr""" +206 17 model """distmult""" +206 17 loss """nssa""" +206 17 regularizer """no""" +206 17 optimizer """adam""" +206 17 training_loop """owa""" +206 17 negative_sampler """basic""" +206 17 evaluator """rankbased""" +206 18 dataset """wn18rr""" +206 18 model """distmult""" +206 18 loss """nssa""" +206 18 regularizer """no""" +206 18 optimizer """adam""" +206 18 training_loop """owa""" +206 18 negative_sampler """basic""" +206 18 evaluator """rankbased""" +206 19 dataset """wn18rr""" +206 19 model """distmult""" +206 19 loss """nssa""" +206 19 regularizer """no""" +206 19 optimizer """adam""" +206 19 training_loop """owa""" +206 19 negative_sampler """basic""" +206 19 evaluator """rankbased""" +206 20 dataset """wn18rr""" +206 20 model """distmult""" +206 20 loss """nssa""" +206 20 regularizer """no""" +206 20 optimizer """adam""" +206 20 training_loop """owa""" +206 20 negative_sampler """basic""" +206 20 evaluator """rankbased""" +206 21 dataset """wn18rr""" +206 21 model """distmult""" +206 21 loss """nssa""" +206 21 regularizer """no""" +206 21 optimizer """adam""" +206 21 training_loop """owa""" +206 21 negative_sampler """basic""" +206 21 evaluator """rankbased""" +206 22 dataset """wn18rr""" +206 22 model """distmult""" +206 22 loss """nssa""" +206 22 regularizer """no""" +206 22 optimizer """adam""" +206 22 training_loop """owa""" +206 22 negative_sampler """basic""" +206 22 evaluator """rankbased""" +206 23 dataset """wn18rr""" +206 23 model """distmult""" +206 23 loss """nssa""" +206 23 regularizer """no""" +206 23 optimizer """adam""" +206 23 training_loop """owa""" +206 23 negative_sampler """basic""" +206 23 evaluator """rankbased""" +206 24 dataset """wn18rr""" +206 24 model """distmult""" +206 24 loss """nssa""" +206 24 regularizer """no""" +206 24 optimizer """adam""" +206 24 training_loop """owa""" +206 24 negative_sampler """basic""" +206 24 evaluator """rankbased""" +206 25 dataset """wn18rr""" +206 25 model """distmult""" +206 25 loss """nssa""" +206 25 regularizer """no""" +206 25 optimizer """adam""" +206 25 training_loop """owa""" +206 25 negative_sampler """basic""" +206 25 evaluator """rankbased""" +206 26 dataset """wn18rr""" +206 26 model """distmult""" +206 26 loss """nssa""" +206 26 regularizer """no""" +206 26 optimizer """adam""" +206 26 training_loop """owa""" +206 26 negative_sampler """basic""" +206 26 evaluator """rankbased""" +206 27 dataset """wn18rr""" +206 27 model """distmult""" +206 27 loss """nssa""" +206 27 regularizer """no""" +206 27 optimizer """adam""" +206 27 training_loop """owa""" +206 27 negative_sampler """basic""" +206 27 evaluator """rankbased""" +206 28 dataset """wn18rr""" +206 28 model """distmult""" +206 28 loss """nssa""" +206 28 regularizer """no""" +206 28 optimizer """adam""" +206 28 training_loop """owa""" +206 28 negative_sampler """basic""" +206 28 evaluator """rankbased""" +206 29 dataset """wn18rr""" +206 29 model """distmult""" +206 29 loss """nssa""" +206 29 regularizer """no""" +206 29 optimizer """adam""" +206 29 training_loop """owa""" +206 29 negative_sampler """basic""" +206 29 evaluator """rankbased""" +206 30 dataset """wn18rr""" +206 30 model """distmult""" +206 30 loss """nssa""" +206 30 regularizer """no""" +206 30 optimizer """adam""" +206 30 training_loop """owa""" +206 30 negative_sampler """basic""" +206 30 evaluator """rankbased""" +206 31 dataset """wn18rr""" +206 31 model """distmult""" +206 31 loss """nssa""" +206 31 regularizer """no""" +206 31 optimizer """adam""" +206 31 training_loop """owa""" +206 31 negative_sampler """basic""" +206 31 evaluator """rankbased""" +206 32 dataset """wn18rr""" +206 32 model """distmult""" +206 32 loss """nssa""" +206 32 regularizer """no""" +206 32 optimizer """adam""" +206 32 training_loop """owa""" +206 32 negative_sampler """basic""" +206 32 evaluator """rankbased""" +206 33 dataset """wn18rr""" +206 33 model """distmult""" +206 33 loss """nssa""" +206 33 regularizer """no""" +206 33 optimizer """adam""" +206 33 training_loop """owa""" +206 33 negative_sampler """basic""" +206 33 evaluator """rankbased""" +206 34 dataset """wn18rr""" +206 34 model """distmult""" +206 34 loss """nssa""" +206 34 regularizer """no""" +206 34 optimizer """adam""" +206 34 training_loop """owa""" +206 34 negative_sampler """basic""" +206 34 evaluator """rankbased""" +206 35 dataset """wn18rr""" +206 35 model """distmult""" +206 35 loss """nssa""" +206 35 regularizer """no""" +206 35 optimizer """adam""" +206 35 training_loop """owa""" +206 35 negative_sampler """basic""" +206 35 evaluator """rankbased""" +206 36 dataset """wn18rr""" +206 36 model """distmult""" +206 36 loss """nssa""" +206 36 regularizer """no""" +206 36 optimizer """adam""" +206 36 training_loop """owa""" +206 36 negative_sampler """basic""" +206 36 evaluator """rankbased""" +206 37 dataset """wn18rr""" +206 37 model """distmult""" +206 37 loss """nssa""" +206 37 regularizer """no""" +206 37 optimizer """adam""" +206 37 training_loop """owa""" +206 37 negative_sampler """basic""" +206 37 evaluator """rankbased""" +206 38 dataset """wn18rr""" +206 38 model """distmult""" +206 38 loss """nssa""" +206 38 regularizer """no""" +206 38 optimizer """adam""" +206 38 training_loop """owa""" +206 38 negative_sampler """basic""" +206 38 evaluator """rankbased""" +206 39 dataset """wn18rr""" +206 39 model """distmult""" +206 39 loss """nssa""" +206 39 regularizer """no""" +206 39 optimizer """adam""" +206 39 training_loop """owa""" +206 39 negative_sampler """basic""" +206 39 evaluator """rankbased""" +206 40 dataset """wn18rr""" +206 40 model """distmult""" +206 40 loss """nssa""" +206 40 regularizer """no""" +206 40 optimizer """adam""" +206 40 training_loop """owa""" +206 40 negative_sampler """basic""" +206 40 evaluator """rankbased""" +206 41 dataset """wn18rr""" +206 41 model """distmult""" +206 41 loss """nssa""" +206 41 regularizer """no""" +206 41 optimizer """adam""" +206 41 training_loop """owa""" +206 41 negative_sampler """basic""" +206 41 evaluator """rankbased""" +206 42 dataset """wn18rr""" +206 42 model """distmult""" +206 42 loss """nssa""" +206 42 regularizer """no""" +206 42 optimizer """adam""" +206 42 training_loop """owa""" +206 42 negative_sampler """basic""" +206 42 evaluator """rankbased""" +206 43 dataset """wn18rr""" +206 43 model """distmult""" +206 43 loss """nssa""" +206 43 regularizer """no""" +206 43 optimizer """adam""" +206 43 training_loop """owa""" +206 43 negative_sampler """basic""" +206 43 evaluator """rankbased""" +206 44 dataset """wn18rr""" +206 44 model """distmult""" +206 44 loss """nssa""" +206 44 regularizer """no""" +206 44 optimizer """adam""" +206 44 training_loop """owa""" +206 44 negative_sampler """basic""" +206 44 evaluator """rankbased""" +206 45 dataset """wn18rr""" +206 45 model """distmult""" +206 45 loss """nssa""" +206 45 regularizer """no""" +206 45 optimizer """adam""" +206 45 training_loop """owa""" +206 45 negative_sampler """basic""" +206 45 evaluator """rankbased""" +206 46 dataset """wn18rr""" +206 46 model """distmult""" +206 46 loss """nssa""" +206 46 regularizer """no""" +206 46 optimizer """adam""" +206 46 training_loop """owa""" +206 46 negative_sampler """basic""" +206 46 evaluator """rankbased""" +206 47 dataset """wn18rr""" +206 47 model """distmult""" +206 47 loss """nssa""" +206 47 regularizer """no""" +206 47 optimizer """adam""" +206 47 training_loop """owa""" +206 47 negative_sampler """basic""" +206 47 evaluator """rankbased""" +206 48 dataset """wn18rr""" +206 48 model """distmult""" +206 48 loss """nssa""" +206 48 regularizer """no""" +206 48 optimizer """adam""" +206 48 training_loop """owa""" +206 48 negative_sampler """basic""" +206 48 evaluator """rankbased""" +206 49 dataset """wn18rr""" +206 49 model """distmult""" +206 49 loss """nssa""" +206 49 regularizer """no""" +206 49 optimizer """adam""" +206 49 training_loop """owa""" +206 49 negative_sampler """basic""" +206 49 evaluator """rankbased""" +206 50 dataset """wn18rr""" +206 50 model """distmult""" +206 50 loss """nssa""" +206 50 regularizer """no""" +206 50 optimizer """adam""" +206 50 training_loop """owa""" +206 50 negative_sampler """basic""" +206 50 evaluator """rankbased""" +206 51 dataset """wn18rr""" +206 51 model """distmult""" +206 51 loss """nssa""" +206 51 regularizer """no""" +206 51 optimizer """adam""" +206 51 training_loop """owa""" +206 51 negative_sampler """basic""" +206 51 evaluator """rankbased""" +206 52 dataset """wn18rr""" +206 52 model """distmult""" +206 52 loss """nssa""" +206 52 regularizer """no""" +206 52 optimizer """adam""" +206 52 training_loop """owa""" +206 52 negative_sampler """basic""" +206 52 evaluator """rankbased""" +206 53 dataset """wn18rr""" +206 53 model """distmult""" +206 53 loss """nssa""" +206 53 regularizer """no""" +206 53 optimizer """adam""" +206 53 training_loop """owa""" +206 53 negative_sampler """basic""" +206 53 evaluator """rankbased""" +206 54 dataset """wn18rr""" +206 54 model """distmult""" +206 54 loss """nssa""" +206 54 regularizer """no""" +206 54 optimizer """adam""" +206 54 training_loop """owa""" +206 54 negative_sampler """basic""" +206 54 evaluator """rankbased""" +206 55 dataset """wn18rr""" +206 55 model """distmult""" +206 55 loss """nssa""" +206 55 regularizer """no""" +206 55 optimizer """adam""" +206 55 training_loop """owa""" +206 55 negative_sampler """basic""" +206 55 evaluator """rankbased""" +206 56 dataset """wn18rr""" +206 56 model """distmult""" +206 56 loss """nssa""" +206 56 regularizer """no""" +206 56 optimizer """adam""" +206 56 training_loop """owa""" +206 56 negative_sampler """basic""" +206 56 evaluator """rankbased""" +206 57 dataset """wn18rr""" +206 57 model """distmult""" +206 57 loss """nssa""" +206 57 regularizer """no""" +206 57 optimizer """adam""" +206 57 training_loop """owa""" +206 57 negative_sampler """basic""" +206 57 evaluator """rankbased""" +206 58 dataset """wn18rr""" +206 58 model """distmult""" +206 58 loss """nssa""" +206 58 regularizer """no""" +206 58 optimizer """adam""" +206 58 training_loop """owa""" +206 58 negative_sampler """basic""" +206 58 evaluator """rankbased""" +206 59 dataset """wn18rr""" +206 59 model """distmult""" +206 59 loss """nssa""" +206 59 regularizer """no""" +206 59 optimizer """adam""" +206 59 training_loop """owa""" +206 59 negative_sampler """basic""" +206 59 evaluator """rankbased""" +206 60 dataset """wn18rr""" +206 60 model """distmult""" +206 60 loss """nssa""" +206 60 regularizer """no""" +206 60 optimizer """adam""" +206 60 training_loop """owa""" +206 60 negative_sampler """basic""" +206 60 evaluator """rankbased""" +206 61 dataset """wn18rr""" +206 61 model """distmult""" +206 61 loss """nssa""" +206 61 regularizer """no""" +206 61 optimizer """adam""" +206 61 training_loop """owa""" +206 61 negative_sampler """basic""" +206 61 evaluator """rankbased""" +206 62 dataset """wn18rr""" +206 62 model """distmult""" +206 62 loss """nssa""" +206 62 regularizer """no""" +206 62 optimizer """adam""" +206 62 training_loop """owa""" +206 62 negative_sampler """basic""" +206 62 evaluator """rankbased""" +206 63 dataset """wn18rr""" +206 63 model """distmult""" +206 63 loss """nssa""" +206 63 regularizer """no""" +206 63 optimizer """adam""" +206 63 training_loop """owa""" +206 63 negative_sampler """basic""" +206 63 evaluator """rankbased""" +207 1 model.embedding_dim 0.0 +207 1 loss.margin 24.580141215021776 +207 1 loss.adversarial_temperature 0.17098065683547328 +207 1 optimizer.lr 0.004236657011251214 +207 1 negative_sampler.num_negs_per_pos 24.0 +207 1 training.batch_size 2.0 +207 2 model.embedding_dim 0.0 +207 2 loss.margin 23.12026132943863 +207 2 loss.adversarial_temperature 0.9693330411869211 +207 2 optimizer.lr 0.01751794898300095 +207 2 negative_sampler.num_negs_per_pos 50.0 +207 2 training.batch_size 0.0 +207 3 model.embedding_dim 1.0 +207 3 loss.margin 19.33177679217789 +207 3 loss.adversarial_temperature 0.31473115613345765 +207 3 optimizer.lr 0.014261385199718651 +207 3 negative_sampler.num_negs_per_pos 49.0 +207 3 training.batch_size 2.0 +207 4 model.embedding_dim 0.0 +207 4 loss.margin 19.68068355428145 +207 4 loss.adversarial_temperature 0.2205127075370292 +207 4 optimizer.lr 0.0021697135894088895 +207 4 negative_sampler.num_negs_per_pos 92.0 +207 4 training.batch_size 2.0 +207 5 model.embedding_dim 1.0 +207 5 loss.margin 6.258482233765329 +207 5 loss.adversarial_temperature 0.7349023594339748 +207 5 optimizer.lr 0.002035341283851874 +207 5 negative_sampler.num_negs_per_pos 9.0 +207 5 training.batch_size 0.0 +207 6 model.embedding_dim 1.0 +207 6 loss.margin 3.125220358906885 +207 6 loss.adversarial_temperature 0.9083069154083828 +207 6 optimizer.lr 0.001000269931267678 +207 6 negative_sampler.num_negs_per_pos 11.0 +207 6 training.batch_size 2.0 +207 7 model.embedding_dim 0.0 +207 7 loss.margin 29.120215025678668 +207 7 loss.adversarial_temperature 0.2754614972470764 +207 7 optimizer.lr 0.009907956994779149 +207 7 negative_sampler.num_negs_per_pos 16.0 +207 7 training.batch_size 2.0 +207 8 model.embedding_dim 2.0 +207 8 loss.margin 6.716560979247367 +207 8 loss.adversarial_temperature 0.8357523645246291 +207 8 optimizer.lr 0.06491258884133937 +207 8 negative_sampler.num_negs_per_pos 26.0 +207 8 training.batch_size 0.0 +207 9 model.embedding_dim 2.0 +207 9 loss.margin 13.25635982153155 +207 9 loss.adversarial_temperature 0.6893400813641493 +207 9 optimizer.lr 0.002204049357276578 +207 9 negative_sampler.num_negs_per_pos 60.0 +207 9 training.batch_size 1.0 +207 10 model.embedding_dim 2.0 +207 10 loss.margin 19.357806228630984 +207 10 loss.adversarial_temperature 0.37053885501442485 +207 10 optimizer.lr 0.003599077472929918 +207 10 negative_sampler.num_negs_per_pos 46.0 +207 10 training.batch_size 0.0 +207 11 model.embedding_dim 0.0 +207 11 loss.margin 13.070851982022424 +207 11 loss.adversarial_temperature 0.27128217535853033 +207 11 optimizer.lr 0.013339872416965357 +207 11 negative_sampler.num_negs_per_pos 32.0 +207 11 training.batch_size 0.0 +207 12 model.embedding_dim 1.0 +207 12 loss.margin 12.368008325586016 +207 12 loss.adversarial_temperature 0.889394078889289 +207 12 optimizer.lr 0.002241048171425656 +207 12 negative_sampler.num_negs_per_pos 80.0 +207 12 training.batch_size 2.0 +207 13 model.embedding_dim 0.0 +207 13 loss.margin 15.926061224938907 +207 13 loss.adversarial_temperature 0.24873781059329847 +207 13 optimizer.lr 0.05103628834508198 +207 13 negative_sampler.num_negs_per_pos 27.0 +207 13 training.batch_size 2.0 +207 14 model.embedding_dim 0.0 +207 14 loss.margin 13.250534132009708 +207 14 loss.adversarial_temperature 0.5565776297911 +207 14 optimizer.lr 0.008619972618944075 +207 14 negative_sampler.num_negs_per_pos 99.0 +207 14 training.batch_size 2.0 +207 15 model.embedding_dim 0.0 +207 15 loss.margin 25.44819863131364 +207 15 loss.adversarial_temperature 0.9091574078924918 +207 15 optimizer.lr 0.0020461290465880583 +207 15 negative_sampler.num_negs_per_pos 45.0 +207 15 training.batch_size 2.0 +207 16 model.embedding_dim 0.0 +207 16 loss.margin 16.919280903334087 +207 16 loss.adversarial_temperature 0.6418458119928047 +207 16 optimizer.lr 0.01168711386992798 +207 16 negative_sampler.num_negs_per_pos 84.0 +207 16 training.batch_size 2.0 +207 17 model.embedding_dim 0.0 +207 17 loss.margin 24.93169392478189 +207 17 loss.adversarial_temperature 0.6016279074138403 +207 17 optimizer.lr 0.052111064172451496 +207 17 negative_sampler.num_negs_per_pos 98.0 +207 17 training.batch_size 0.0 +207 18 model.embedding_dim 2.0 +207 18 loss.margin 21.80155242495751 +207 18 loss.adversarial_temperature 0.9806534914658324 +207 18 optimizer.lr 0.009856463195139586 +207 18 negative_sampler.num_negs_per_pos 16.0 +207 18 training.batch_size 0.0 +207 19 model.embedding_dim 2.0 +207 19 loss.margin 21.10485754649106 +207 19 loss.adversarial_temperature 0.7023932816285434 +207 19 optimizer.lr 0.03868330470393433 +207 19 negative_sampler.num_negs_per_pos 63.0 +207 19 training.batch_size 0.0 +207 20 model.embedding_dim 1.0 +207 20 loss.margin 20.703912631199604 +207 20 loss.adversarial_temperature 0.3636906142475729 +207 20 optimizer.lr 0.012600175056513984 +207 20 negative_sampler.num_negs_per_pos 98.0 +207 20 training.batch_size 2.0 +207 21 model.embedding_dim 2.0 +207 21 loss.margin 14.251555014427488 +207 21 loss.adversarial_temperature 0.6095056239253489 +207 21 optimizer.lr 0.07267741543643298 +207 21 negative_sampler.num_negs_per_pos 60.0 +207 21 training.batch_size 1.0 +207 22 model.embedding_dim 1.0 +207 22 loss.margin 8.755009553128161 +207 22 loss.adversarial_temperature 0.2784310066132703 +207 22 optimizer.lr 0.0014406586923278659 +207 22 negative_sampler.num_negs_per_pos 68.0 +207 22 training.batch_size 0.0 +207 23 model.embedding_dim 1.0 +207 23 loss.margin 17.570693485305945 +207 23 loss.adversarial_temperature 0.6127547502772223 +207 23 optimizer.lr 0.09289404820480779 +207 23 negative_sampler.num_negs_per_pos 85.0 +207 23 training.batch_size 1.0 +207 24 model.embedding_dim 1.0 +207 24 loss.margin 17.188643377468818 +207 24 loss.adversarial_temperature 0.739935943312626 +207 24 optimizer.lr 0.027270871775217646 +207 24 negative_sampler.num_negs_per_pos 42.0 +207 24 training.batch_size 1.0 +207 25 model.embedding_dim 0.0 +207 25 loss.margin 11.359266522715028 +207 25 loss.adversarial_temperature 0.18766518085277442 +207 25 optimizer.lr 0.03535353522900537 +207 25 negative_sampler.num_negs_per_pos 95.0 +207 25 training.batch_size 1.0 +207 26 model.embedding_dim 1.0 +207 26 loss.margin 6.9862203277713695 +207 26 loss.adversarial_temperature 0.6681466414387025 +207 26 optimizer.lr 0.004023887571825189 +207 26 negative_sampler.num_negs_per_pos 38.0 +207 26 training.batch_size 0.0 +207 27 model.embedding_dim 2.0 +207 27 loss.margin 10.227247269832022 +207 27 loss.adversarial_temperature 0.7310863515046028 +207 27 optimizer.lr 0.04951471569564851 +207 27 negative_sampler.num_negs_per_pos 83.0 +207 27 training.batch_size 2.0 +207 28 model.embedding_dim 2.0 +207 28 loss.margin 12.904130638018781 +207 28 loss.adversarial_temperature 0.7955325602111115 +207 28 optimizer.lr 0.007525972741572671 +207 28 negative_sampler.num_negs_per_pos 6.0 +207 28 training.batch_size 1.0 +207 29 model.embedding_dim 2.0 +207 29 loss.margin 23.991860233141097 +207 29 loss.adversarial_temperature 0.22924027241981468 +207 29 optimizer.lr 0.001831037053112353 +207 29 negative_sampler.num_negs_per_pos 70.0 +207 29 training.batch_size 2.0 +207 30 model.embedding_dim 1.0 +207 30 loss.margin 25.474004745606507 +207 30 loss.adversarial_temperature 0.2831122884684887 +207 30 optimizer.lr 0.0010001909093310202 +207 30 negative_sampler.num_negs_per_pos 86.0 +207 30 training.batch_size 2.0 +207 31 model.embedding_dim 1.0 +207 31 loss.margin 29.081097129990148 +207 31 loss.adversarial_temperature 0.5507953196015417 +207 31 optimizer.lr 0.013200762590490037 +207 31 negative_sampler.num_negs_per_pos 89.0 +207 31 training.batch_size 2.0 +207 32 model.embedding_dim 0.0 +207 32 loss.margin 11.820384354341101 +207 32 loss.adversarial_temperature 0.8442859139804285 +207 32 optimizer.lr 0.00975554991686004 +207 32 negative_sampler.num_negs_per_pos 4.0 +207 32 training.batch_size 0.0 +207 33 model.embedding_dim 2.0 +207 33 loss.margin 11.400465490575789 +207 33 loss.adversarial_temperature 0.8346255737659659 +207 33 optimizer.lr 0.0013832132002200186 +207 33 negative_sampler.num_negs_per_pos 0.0 +207 33 training.batch_size 0.0 +207 34 model.embedding_dim 2.0 +207 34 loss.margin 8.281574495928968 +207 34 loss.adversarial_temperature 0.7752084563056892 +207 34 optimizer.lr 0.015391312712242285 +207 34 negative_sampler.num_negs_per_pos 57.0 +207 34 training.batch_size 2.0 +207 35 model.embedding_dim 0.0 +207 35 loss.margin 10.893674459416184 +207 35 loss.adversarial_temperature 0.4520129575590838 +207 35 optimizer.lr 0.06782628150598513 +207 35 negative_sampler.num_negs_per_pos 95.0 +207 35 training.batch_size 1.0 +207 36 model.embedding_dim 2.0 +207 36 loss.margin 15.5322934823453 +207 36 loss.adversarial_temperature 0.9967146002151348 +207 36 optimizer.lr 0.002975486711043927 +207 36 negative_sampler.num_negs_per_pos 41.0 +207 36 training.batch_size 2.0 +207 37 model.embedding_dim 0.0 +207 37 loss.margin 11.455024839445539 +207 37 loss.adversarial_temperature 0.4816349966420859 +207 37 optimizer.lr 0.0014199504323924464 +207 37 negative_sampler.num_negs_per_pos 50.0 +207 37 training.batch_size 2.0 +207 38 model.embedding_dim 0.0 +207 38 loss.margin 17.079582318066084 +207 38 loss.adversarial_temperature 0.7272581985475497 +207 38 optimizer.lr 0.021210568718522938 +207 38 negative_sampler.num_negs_per_pos 59.0 +207 38 training.batch_size 2.0 +207 39 model.embedding_dim 0.0 +207 39 loss.margin 4.249581474080735 +207 39 loss.adversarial_temperature 0.9454719787435822 +207 39 optimizer.lr 0.005678751871158207 +207 39 negative_sampler.num_negs_per_pos 35.0 +207 39 training.batch_size 0.0 +207 40 model.embedding_dim 1.0 +207 40 loss.margin 27.09119683714892 +207 40 loss.adversarial_temperature 0.8035315809014195 +207 40 optimizer.lr 0.013114053856391018 +207 40 negative_sampler.num_negs_per_pos 59.0 +207 40 training.batch_size 0.0 +207 41 model.embedding_dim 1.0 +207 41 loss.margin 8.604623252670908 +207 41 loss.adversarial_temperature 0.3222231533937766 +207 41 optimizer.lr 0.001064383337676143 +207 41 negative_sampler.num_negs_per_pos 70.0 +207 41 training.batch_size 1.0 +207 42 model.embedding_dim 2.0 +207 42 loss.margin 24.27765096983359 +207 42 loss.adversarial_temperature 0.23925743972988456 +207 42 optimizer.lr 0.032702429639530864 +207 42 negative_sampler.num_negs_per_pos 89.0 +207 42 training.batch_size 0.0 +207 43 model.embedding_dim 1.0 +207 43 loss.margin 12.4185312015513 +207 43 loss.adversarial_temperature 0.9038971438564387 +207 43 optimizer.lr 0.028587047351285737 +207 43 negative_sampler.num_negs_per_pos 39.0 +207 43 training.batch_size 1.0 +207 44 model.embedding_dim 0.0 +207 44 loss.margin 12.969697553196637 +207 44 loss.adversarial_temperature 0.7897424453957426 +207 44 optimizer.lr 0.0023860530718064522 +207 44 negative_sampler.num_negs_per_pos 55.0 +207 44 training.batch_size 2.0 +207 45 model.embedding_dim 1.0 +207 45 loss.margin 24.150543724600368 +207 45 loss.adversarial_temperature 0.5621211132435631 +207 45 optimizer.lr 0.011075068190928319 +207 45 negative_sampler.num_negs_per_pos 75.0 +207 45 training.batch_size 0.0 +207 46 model.embedding_dim 0.0 +207 46 loss.margin 15.666762235458418 +207 46 loss.adversarial_temperature 0.6475337436121881 +207 46 optimizer.lr 0.011462096283180793 +207 46 negative_sampler.num_negs_per_pos 84.0 +207 46 training.batch_size 0.0 +207 47 model.embedding_dim 1.0 +207 47 loss.margin 22.317767950254595 +207 47 loss.adversarial_temperature 0.7994812305230101 +207 47 optimizer.lr 0.0010803065562304476 +207 47 negative_sampler.num_negs_per_pos 47.0 +207 47 training.batch_size 0.0 +207 48 model.embedding_dim 1.0 +207 48 loss.margin 1.7831255319318404 +207 48 loss.adversarial_temperature 0.9391173857983538 +207 48 optimizer.lr 0.0034511473355056817 +207 48 negative_sampler.num_negs_per_pos 32.0 +207 48 training.batch_size 2.0 +207 49 model.embedding_dim 2.0 +207 49 loss.margin 2.2530467448125835 +207 49 loss.adversarial_temperature 0.9940362355131901 +207 49 optimizer.lr 0.03643574285475178 +207 49 negative_sampler.num_negs_per_pos 35.0 +207 49 training.batch_size 0.0 +207 50 model.embedding_dim 2.0 +207 50 loss.margin 28.660965920707817 +207 50 loss.adversarial_temperature 0.8565260599282573 +207 50 optimizer.lr 0.0018787669308253656 +207 50 negative_sampler.num_negs_per_pos 92.0 +207 50 training.batch_size 0.0 +207 51 model.embedding_dim 2.0 +207 51 loss.margin 29.99275833395565 +207 51 loss.adversarial_temperature 0.3412760677421578 +207 51 optimizer.lr 0.004804100293522774 +207 51 negative_sampler.num_negs_per_pos 60.0 +207 51 training.batch_size 0.0 +207 52 model.embedding_dim 2.0 +207 52 loss.margin 6.976686725436769 +207 52 loss.adversarial_temperature 0.20809404966882697 +207 52 optimizer.lr 0.0019205280778626444 +207 52 negative_sampler.num_negs_per_pos 79.0 +207 52 training.batch_size 0.0 +207 53 model.embedding_dim 0.0 +207 53 loss.margin 5.098334493395686 +207 53 loss.adversarial_temperature 0.8028510423119053 +207 53 optimizer.lr 0.05121887599216204 +207 53 negative_sampler.num_negs_per_pos 56.0 +207 53 training.batch_size 2.0 +207 54 model.embedding_dim 2.0 +207 54 loss.margin 3.3066562647095132 +207 54 loss.adversarial_temperature 0.7326163111465619 +207 54 optimizer.lr 0.04837888896818414 +207 54 negative_sampler.num_negs_per_pos 58.0 +207 54 training.batch_size 1.0 +207 55 model.embedding_dim 1.0 +207 55 loss.margin 14.798647889823211 +207 55 loss.adversarial_temperature 0.9749275415805511 +207 55 optimizer.lr 0.08748035073241772 +207 55 negative_sampler.num_negs_per_pos 21.0 +207 55 training.batch_size 0.0 +207 56 model.embedding_dim 1.0 +207 56 loss.margin 3.9585263792419756 +207 56 loss.adversarial_temperature 0.35860743753902774 +207 56 optimizer.lr 0.004732227031001963 +207 56 negative_sampler.num_negs_per_pos 29.0 +207 56 training.batch_size 0.0 +207 57 model.embedding_dim 2.0 +207 57 loss.margin 11.314168675187961 +207 57 loss.adversarial_temperature 0.21511324741797216 +207 57 optimizer.lr 0.002696313459066592 +207 57 negative_sampler.num_negs_per_pos 4.0 +207 57 training.batch_size 1.0 +207 58 model.embedding_dim 0.0 +207 58 loss.margin 3.5777871989163663 +207 58 loss.adversarial_temperature 0.33899785360186596 +207 58 optimizer.lr 0.0015409126766096003 +207 58 negative_sampler.num_negs_per_pos 77.0 +207 58 training.batch_size 1.0 +207 59 model.embedding_dim 0.0 +207 59 loss.margin 5.4383391146667135 +207 59 loss.adversarial_temperature 0.31647390267828424 +207 59 optimizer.lr 0.0018675011348117745 +207 59 negative_sampler.num_negs_per_pos 57.0 +207 59 training.batch_size 0.0 +207 60 model.embedding_dim 2.0 +207 60 loss.margin 2.852174292863883 +207 60 loss.adversarial_temperature 0.5687524630175738 +207 60 optimizer.lr 0.001746550543519737 +207 60 negative_sampler.num_negs_per_pos 6.0 +207 60 training.batch_size 0.0 +207 61 model.embedding_dim 1.0 +207 61 loss.margin 28.153487911426165 +207 61 loss.adversarial_temperature 0.11350724931310113 +207 61 optimizer.lr 0.00647367643742424 +207 61 negative_sampler.num_negs_per_pos 81.0 +207 61 training.batch_size 0.0 +207 62 model.embedding_dim 0.0 +207 62 loss.margin 15.705178898007835 +207 62 loss.adversarial_temperature 0.618387041416349 +207 62 optimizer.lr 0.0022017079154108693 +207 62 negative_sampler.num_negs_per_pos 59.0 +207 62 training.batch_size 2.0 +207 63 model.embedding_dim 1.0 +207 63 loss.margin 21.457547654397253 +207 63 loss.adversarial_temperature 0.2597707165985896 +207 63 optimizer.lr 0.014823015153691176 +207 63 negative_sampler.num_negs_per_pos 51.0 +207 63 training.batch_size 2.0 +207 64 model.embedding_dim 2.0 +207 64 loss.margin 10.927192410672362 +207 64 loss.adversarial_temperature 0.12400991917939562 +207 64 optimizer.lr 0.04238757764249694 +207 64 negative_sampler.num_negs_per_pos 50.0 +207 64 training.batch_size 0.0 +207 65 model.embedding_dim 2.0 +207 65 loss.margin 24.543081028452963 +207 65 loss.adversarial_temperature 0.47137187473632497 +207 65 optimizer.lr 0.04714940780730661 +207 65 negative_sampler.num_negs_per_pos 80.0 +207 65 training.batch_size 0.0 +207 66 model.embedding_dim 0.0 +207 66 loss.margin 3.385295494118413 +207 66 loss.adversarial_temperature 0.9325543082847156 +207 66 optimizer.lr 0.0018634652509957952 +207 66 negative_sampler.num_negs_per_pos 14.0 +207 66 training.batch_size 1.0 +207 67 model.embedding_dim 2.0 +207 67 loss.margin 26.5620902491757 +207 67 loss.adversarial_temperature 0.27958947829852365 +207 67 optimizer.lr 0.03681337909275023 +207 67 negative_sampler.num_negs_per_pos 4.0 +207 67 training.batch_size 0.0 +207 68 model.embedding_dim 0.0 +207 68 loss.margin 3.8213689985975914 +207 68 loss.adversarial_temperature 0.37136351122158373 +207 68 optimizer.lr 0.009691209165223329 +207 68 negative_sampler.num_negs_per_pos 81.0 +207 68 training.batch_size 0.0 +207 69 model.embedding_dim 0.0 +207 69 loss.margin 11.07215174816528 +207 69 loss.adversarial_temperature 0.49699611983569697 +207 69 optimizer.lr 0.0012403979067263846 +207 69 negative_sampler.num_negs_per_pos 81.0 +207 69 training.batch_size 2.0 +207 70 model.embedding_dim 2.0 +207 70 loss.margin 5.249828933646736 +207 70 loss.adversarial_temperature 0.6548356806609487 +207 70 optimizer.lr 0.004826829947438697 +207 70 negative_sampler.num_negs_per_pos 39.0 +207 70 training.batch_size 1.0 +207 71 model.embedding_dim 2.0 +207 71 loss.margin 20.19924281188521 +207 71 loss.adversarial_temperature 0.2810747438663862 +207 71 optimizer.lr 0.02323997873094718 +207 71 negative_sampler.num_negs_per_pos 29.0 +207 71 training.batch_size 0.0 +207 72 model.embedding_dim 2.0 +207 72 loss.margin 24.564920793556976 +207 72 loss.adversarial_temperature 0.7341921877839245 +207 72 optimizer.lr 0.004154204426219732 +207 72 negative_sampler.num_negs_per_pos 40.0 +207 72 training.batch_size 1.0 +207 73 model.embedding_dim 2.0 +207 73 loss.margin 5.549875196768136 +207 73 loss.adversarial_temperature 0.5275440514558908 +207 73 optimizer.lr 0.006187002301483463 +207 73 negative_sampler.num_negs_per_pos 77.0 +207 73 training.batch_size 1.0 +207 74 model.embedding_dim 1.0 +207 74 loss.margin 19.956887906056963 +207 74 loss.adversarial_temperature 0.9779685341309386 +207 74 optimizer.lr 0.0303917799502223 +207 74 negative_sampler.num_negs_per_pos 48.0 +207 74 training.batch_size 2.0 +207 75 model.embedding_dim 2.0 +207 75 loss.margin 18.547271578125407 +207 75 loss.adversarial_temperature 0.13281533228530024 +207 75 optimizer.lr 0.012759509163785147 +207 75 negative_sampler.num_negs_per_pos 18.0 +207 75 training.batch_size 0.0 +207 76 model.embedding_dim 2.0 +207 76 loss.margin 21.754135914975304 +207 76 loss.adversarial_temperature 0.2712850223852397 +207 76 optimizer.lr 0.0014282894902463274 +207 76 negative_sampler.num_negs_per_pos 82.0 +207 76 training.batch_size 0.0 +207 77 model.embedding_dim 2.0 +207 77 loss.margin 5.3937431267803095 +207 77 loss.adversarial_temperature 0.6303701347233053 +207 77 optimizer.lr 0.03209714846198939 +207 77 negative_sampler.num_negs_per_pos 28.0 +207 77 training.batch_size 2.0 +207 78 model.embedding_dim 1.0 +207 78 loss.margin 2.1207018376849294 +207 78 loss.adversarial_temperature 0.37169787836757173 +207 78 optimizer.lr 0.029623871428724472 +207 78 negative_sampler.num_negs_per_pos 72.0 +207 78 training.batch_size 2.0 +207 79 model.embedding_dim 1.0 +207 79 loss.margin 2.7463577280587366 +207 79 loss.adversarial_temperature 0.4004411335676114 +207 79 optimizer.lr 0.0038963498585037953 +207 79 negative_sampler.num_negs_per_pos 10.0 +207 79 training.batch_size 2.0 +207 80 model.embedding_dim 0.0 +207 80 loss.margin 3.6524703643856173 +207 80 loss.adversarial_temperature 0.5206561894787514 +207 80 optimizer.lr 0.02564450204960041 +207 80 negative_sampler.num_negs_per_pos 72.0 +207 80 training.batch_size 0.0 +207 81 model.embedding_dim 1.0 +207 81 loss.margin 10.565625010452573 +207 81 loss.adversarial_temperature 0.25374537243787226 +207 81 optimizer.lr 0.07659920493635117 +207 81 negative_sampler.num_negs_per_pos 13.0 +207 81 training.batch_size 1.0 +207 82 model.embedding_dim 1.0 +207 82 loss.margin 15.615733579919059 +207 82 loss.adversarial_temperature 0.2809577485476844 +207 82 optimizer.lr 0.01276896344683993 +207 82 negative_sampler.num_negs_per_pos 86.0 +207 82 training.batch_size 0.0 +207 83 model.embedding_dim 1.0 +207 83 loss.margin 1.2468549305135816 +207 83 loss.adversarial_temperature 0.18994531923024466 +207 83 optimizer.lr 0.018048220444050626 +207 83 negative_sampler.num_negs_per_pos 37.0 +207 83 training.batch_size 1.0 +207 84 model.embedding_dim 2.0 +207 84 loss.margin 20.812444072069702 +207 84 loss.adversarial_temperature 0.7660853697640704 +207 84 optimizer.lr 0.043668688501519004 +207 84 negative_sampler.num_negs_per_pos 19.0 +207 84 training.batch_size 1.0 +207 85 model.embedding_dim 2.0 +207 85 loss.margin 11.002926435349865 +207 85 loss.adversarial_temperature 0.9042537081379425 +207 85 optimizer.lr 0.002556059264713952 +207 85 negative_sampler.num_negs_per_pos 20.0 +207 85 training.batch_size 1.0 +207 86 model.embedding_dim 1.0 +207 86 loss.margin 6.406010863795927 +207 86 loss.adversarial_temperature 0.21739147702019182 +207 86 optimizer.lr 0.00741694934875289 +207 86 negative_sampler.num_negs_per_pos 32.0 +207 86 training.batch_size 2.0 +207 87 model.embedding_dim 1.0 +207 87 loss.margin 25.267088909693417 +207 87 loss.adversarial_temperature 0.26196571842446204 +207 87 optimizer.lr 0.021704098264888158 +207 87 negative_sampler.num_negs_per_pos 4.0 +207 87 training.batch_size 1.0 +207 88 model.embedding_dim 1.0 +207 88 loss.margin 16.177921771663506 +207 88 loss.adversarial_temperature 0.9887607639173172 +207 88 optimizer.lr 0.0022510419935369685 +207 88 negative_sampler.num_negs_per_pos 12.0 +207 88 training.batch_size 0.0 +207 89 model.embedding_dim 2.0 +207 89 loss.margin 18.346825048273296 +207 89 loss.adversarial_temperature 0.6424636038876314 +207 89 optimizer.lr 0.029472147883108295 +207 89 negative_sampler.num_negs_per_pos 32.0 +207 89 training.batch_size 2.0 +207 90 model.embedding_dim 2.0 +207 90 loss.margin 12.104236790620345 +207 90 loss.adversarial_temperature 0.8291514014872574 +207 90 optimizer.lr 0.018501297619871748 +207 90 negative_sampler.num_negs_per_pos 46.0 +207 90 training.batch_size 1.0 +207 91 model.embedding_dim 0.0 +207 91 loss.margin 15.975368877734091 +207 91 loss.adversarial_temperature 0.1636093373471023 +207 91 optimizer.lr 0.07156431536284157 +207 91 negative_sampler.num_negs_per_pos 53.0 +207 91 training.batch_size 2.0 +207 92 model.embedding_dim 1.0 +207 92 loss.margin 26.57300837112085 +207 92 loss.adversarial_temperature 0.8315817687397123 +207 92 optimizer.lr 0.005238499002873839 +207 92 negative_sampler.num_negs_per_pos 56.0 +207 92 training.batch_size 0.0 +207 93 model.embedding_dim 1.0 +207 93 loss.margin 29.160243375309967 +207 93 loss.adversarial_temperature 0.6629856917748274 +207 93 optimizer.lr 0.0010581399019662183 +207 93 negative_sampler.num_negs_per_pos 70.0 +207 93 training.batch_size 2.0 +207 94 model.embedding_dim 2.0 +207 94 loss.margin 9.881822034853156 +207 94 loss.adversarial_temperature 0.9161659614168379 +207 94 optimizer.lr 0.0017333628932827416 +207 94 negative_sampler.num_negs_per_pos 20.0 +207 94 training.batch_size 1.0 +207 95 model.embedding_dim 0.0 +207 95 loss.margin 6.805870602854881 +207 95 loss.adversarial_temperature 0.9707102360625768 +207 95 optimizer.lr 0.051655403868779676 +207 95 negative_sampler.num_negs_per_pos 62.0 +207 95 training.batch_size 1.0 +207 96 model.embedding_dim 0.0 +207 96 loss.margin 25.309662607225537 +207 96 loss.adversarial_temperature 0.33509195316109425 +207 96 optimizer.lr 0.0016155284884140958 +207 96 negative_sampler.num_negs_per_pos 5.0 +207 96 training.batch_size 1.0 +207 97 model.embedding_dim 0.0 +207 97 loss.margin 13.228763605999431 +207 97 loss.adversarial_temperature 0.7327232070999872 +207 97 optimizer.lr 0.050126099596528195 +207 97 negative_sampler.num_negs_per_pos 78.0 +207 97 training.batch_size 1.0 +207 98 model.embedding_dim 2.0 +207 98 loss.margin 11.456400780971606 +207 98 loss.adversarial_temperature 0.4015140594645715 +207 98 optimizer.lr 0.06622248691257102 +207 98 negative_sampler.num_negs_per_pos 63.0 +207 98 training.batch_size 1.0 +207 99 model.embedding_dim 1.0 +207 99 loss.margin 20.80034852950407 +207 99 loss.adversarial_temperature 0.7966991651606544 +207 99 optimizer.lr 0.09990163462952921 +207 99 negative_sampler.num_negs_per_pos 34.0 +207 99 training.batch_size 0.0 +207 100 model.embedding_dim 1.0 +207 100 loss.margin 5.3566254406777265 +207 100 loss.adversarial_temperature 0.9810228795215155 +207 100 optimizer.lr 0.006388976989788862 +207 100 negative_sampler.num_negs_per_pos 1.0 +207 100 training.batch_size 0.0 +207 1 dataset """wn18rr""" +207 1 model """distmult""" +207 1 loss """nssa""" +207 1 regularizer """no""" +207 1 optimizer """adam""" +207 1 training_loop """owa""" +207 1 negative_sampler """basic""" +207 1 evaluator """rankbased""" +207 2 dataset """wn18rr""" +207 2 model """distmult""" +207 2 loss """nssa""" +207 2 regularizer """no""" +207 2 optimizer """adam""" +207 2 training_loop """owa""" +207 2 negative_sampler """basic""" +207 2 evaluator """rankbased""" +207 3 dataset """wn18rr""" +207 3 model """distmult""" +207 3 loss """nssa""" +207 3 regularizer """no""" +207 3 optimizer """adam""" +207 3 training_loop """owa""" +207 3 negative_sampler """basic""" +207 3 evaluator """rankbased""" +207 4 dataset """wn18rr""" +207 4 model """distmult""" +207 4 loss """nssa""" +207 4 regularizer """no""" +207 4 optimizer """adam""" +207 4 training_loop """owa""" +207 4 negative_sampler """basic""" +207 4 evaluator """rankbased""" +207 5 dataset """wn18rr""" +207 5 model """distmult""" +207 5 loss """nssa""" +207 5 regularizer """no""" +207 5 optimizer """adam""" +207 5 training_loop """owa""" +207 5 negative_sampler """basic""" +207 5 evaluator """rankbased""" +207 6 dataset """wn18rr""" +207 6 model """distmult""" +207 6 loss """nssa""" +207 6 regularizer """no""" +207 6 optimizer """adam""" +207 6 training_loop """owa""" +207 6 negative_sampler """basic""" +207 6 evaluator """rankbased""" +207 7 dataset """wn18rr""" +207 7 model """distmult""" +207 7 loss """nssa""" +207 7 regularizer """no""" +207 7 optimizer """adam""" +207 7 training_loop """owa""" +207 7 negative_sampler """basic""" +207 7 evaluator """rankbased""" +207 8 dataset """wn18rr""" +207 8 model """distmult""" +207 8 loss """nssa""" +207 8 regularizer """no""" +207 8 optimizer """adam""" +207 8 training_loop """owa""" +207 8 negative_sampler """basic""" +207 8 evaluator """rankbased""" +207 9 dataset """wn18rr""" +207 9 model """distmult""" +207 9 loss """nssa""" +207 9 regularizer """no""" +207 9 optimizer """adam""" +207 9 training_loop """owa""" +207 9 negative_sampler """basic""" +207 9 evaluator """rankbased""" +207 10 dataset """wn18rr""" +207 10 model """distmult""" +207 10 loss """nssa""" +207 10 regularizer """no""" +207 10 optimizer """adam""" +207 10 training_loop """owa""" +207 10 negative_sampler """basic""" +207 10 evaluator """rankbased""" +207 11 dataset """wn18rr""" +207 11 model """distmult""" +207 11 loss """nssa""" +207 11 regularizer """no""" +207 11 optimizer """adam""" +207 11 training_loop """owa""" +207 11 negative_sampler """basic""" +207 11 evaluator """rankbased""" +207 12 dataset """wn18rr""" +207 12 model """distmult""" +207 12 loss """nssa""" +207 12 regularizer """no""" +207 12 optimizer """adam""" +207 12 training_loop """owa""" +207 12 negative_sampler """basic""" +207 12 evaluator """rankbased""" +207 13 dataset """wn18rr""" +207 13 model """distmult""" +207 13 loss """nssa""" +207 13 regularizer """no""" +207 13 optimizer """adam""" +207 13 training_loop """owa""" +207 13 negative_sampler """basic""" +207 13 evaluator """rankbased""" +207 14 dataset """wn18rr""" +207 14 model """distmult""" +207 14 loss """nssa""" +207 14 regularizer """no""" +207 14 optimizer """adam""" +207 14 training_loop """owa""" +207 14 negative_sampler """basic""" +207 14 evaluator """rankbased""" +207 15 dataset """wn18rr""" +207 15 model """distmult""" +207 15 loss """nssa""" +207 15 regularizer """no""" +207 15 optimizer """adam""" +207 15 training_loop """owa""" +207 15 negative_sampler """basic""" +207 15 evaluator """rankbased""" +207 16 dataset """wn18rr""" +207 16 model """distmult""" +207 16 loss """nssa""" +207 16 regularizer """no""" +207 16 optimizer """adam""" +207 16 training_loop """owa""" +207 16 negative_sampler """basic""" +207 16 evaluator """rankbased""" +207 17 dataset """wn18rr""" +207 17 model """distmult""" +207 17 loss """nssa""" +207 17 regularizer """no""" +207 17 optimizer """adam""" +207 17 training_loop """owa""" +207 17 negative_sampler """basic""" +207 17 evaluator """rankbased""" +207 18 dataset """wn18rr""" +207 18 model """distmult""" +207 18 loss """nssa""" +207 18 regularizer """no""" +207 18 optimizer """adam""" +207 18 training_loop """owa""" +207 18 negative_sampler """basic""" +207 18 evaluator """rankbased""" +207 19 dataset """wn18rr""" +207 19 model """distmult""" +207 19 loss """nssa""" +207 19 regularizer """no""" +207 19 optimizer """adam""" +207 19 training_loop """owa""" +207 19 negative_sampler """basic""" +207 19 evaluator """rankbased""" +207 20 dataset """wn18rr""" +207 20 model """distmult""" +207 20 loss """nssa""" +207 20 regularizer """no""" +207 20 optimizer """adam""" +207 20 training_loop """owa""" +207 20 negative_sampler """basic""" +207 20 evaluator """rankbased""" +207 21 dataset """wn18rr""" +207 21 model """distmult""" +207 21 loss """nssa""" +207 21 regularizer """no""" +207 21 optimizer """adam""" +207 21 training_loop """owa""" +207 21 negative_sampler """basic""" +207 21 evaluator """rankbased""" +207 22 dataset """wn18rr""" +207 22 model """distmult""" +207 22 loss """nssa""" +207 22 regularizer """no""" +207 22 optimizer """adam""" +207 22 training_loop """owa""" +207 22 negative_sampler """basic""" +207 22 evaluator """rankbased""" +207 23 dataset """wn18rr""" +207 23 model """distmult""" +207 23 loss """nssa""" +207 23 regularizer """no""" +207 23 optimizer """adam""" +207 23 training_loop """owa""" +207 23 negative_sampler """basic""" +207 23 evaluator """rankbased""" +207 24 dataset """wn18rr""" +207 24 model """distmult""" +207 24 loss """nssa""" +207 24 regularizer """no""" +207 24 optimizer """adam""" +207 24 training_loop """owa""" +207 24 negative_sampler """basic""" +207 24 evaluator """rankbased""" +207 25 dataset """wn18rr""" +207 25 model """distmult""" +207 25 loss """nssa""" +207 25 regularizer """no""" +207 25 optimizer """adam""" +207 25 training_loop """owa""" +207 25 negative_sampler """basic""" +207 25 evaluator """rankbased""" +207 26 dataset """wn18rr""" +207 26 model """distmult""" +207 26 loss """nssa""" +207 26 regularizer """no""" +207 26 optimizer """adam""" +207 26 training_loop """owa""" +207 26 negative_sampler """basic""" +207 26 evaluator """rankbased""" +207 27 dataset """wn18rr""" +207 27 model """distmult""" +207 27 loss """nssa""" +207 27 regularizer """no""" +207 27 optimizer """adam""" +207 27 training_loop """owa""" +207 27 negative_sampler """basic""" +207 27 evaluator """rankbased""" +207 28 dataset """wn18rr""" +207 28 model """distmult""" +207 28 loss """nssa""" +207 28 regularizer """no""" +207 28 optimizer """adam""" +207 28 training_loop """owa""" +207 28 negative_sampler """basic""" +207 28 evaluator """rankbased""" +207 29 dataset """wn18rr""" +207 29 model """distmult""" +207 29 loss """nssa""" +207 29 regularizer """no""" +207 29 optimizer """adam""" +207 29 training_loop """owa""" +207 29 negative_sampler """basic""" +207 29 evaluator """rankbased""" +207 30 dataset """wn18rr""" +207 30 model """distmult""" +207 30 loss """nssa""" +207 30 regularizer """no""" +207 30 optimizer """adam""" +207 30 training_loop """owa""" +207 30 negative_sampler """basic""" +207 30 evaluator """rankbased""" +207 31 dataset """wn18rr""" +207 31 model """distmult""" +207 31 loss """nssa""" +207 31 regularizer """no""" +207 31 optimizer """adam""" +207 31 training_loop """owa""" +207 31 negative_sampler """basic""" +207 31 evaluator """rankbased""" +207 32 dataset """wn18rr""" +207 32 model """distmult""" +207 32 loss """nssa""" +207 32 regularizer """no""" +207 32 optimizer """adam""" +207 32 training_loop """owa""" +207 32 negative_sampler """basic""" +207 32 evaluator """rankbased""" +207 33 dataset """wn18rr""" +207 33 model """distmult""" +207 33 loss """nssa""" +207 33 regularizer """no""" +207 33 optimizer """adam""" +207 33 training_loop """owa""" +207 33 negative_sampler """basic""" +207 33 evaluator """rankbased""" +207 34 dataset """wn18rr""" +207 34 model """distmult""" +207 34 loss """nssa""" +207 34 regularizer """no""" +207 34 optimizer """adam""" +207 34 training_loop """owa""" +207 34 negative_sampler """basic""" +207 34 evaluator """rankbased""" +207 35 dataset """wn18rr""" +207 35 model """distmult""" +207 35 loss """nssa""" +207 35 regularizer """no""" +207 35 optimizer """adam""" +207 35 training_loop """owa""" +207 35 negative_sampler """basic""" +207 35 evaluator """rankbased""" +207 36 dataset """wn18rr""" +207 36 model """distmult""" +207 36 loss """nssa""" +207 36 regularizer """no""" +207 36 optimizer """adam""" +207 36 training_loop """owa""" +207 36 negative_sampler """basic""" +207 36 evaluator """rankbased""" +207 37 dataset """wn18rr""" +207 37 model """distmult""" +207 37 loss """nssa""" +207 37 regularizer """no""" +207 37 optimizer """adam""" +207 37 training_loop """owa""" +207 37 negative_sampler """basic""" +207 37 evaluator """rankbased""" +207 38 dataset """wn18rr""" +207 38 model """distmult""" +207 38 loss """nssa""" +207 38 regularizer """no""" +207 38 optimizer """adam""" +207 38 training_loop """owa""" +207 38 negative_sampler """basic""" +207 38 evaluator """rankbased""" +207 39 dataset """wn18rr""" +207 39 model """distmult""" +207 39 loss """nssa""" +207 39 regularizer """no""" +207 39 optimizer """adam""" +207 39 training_loop """owa""" +207 39 negative_sampler """basic""" +207 39 evaluator """rankbased""" +207 40 dataset """wn18rr""" +207 40 model """distmult""" +207 40 loss """nssa""" +207 40 regularizer """no""" +207 40 optimizer """adam""" +207 40 training_loop """owa""" +207 40 negative_sampler """basic""" +207 40 evaluator """rankbased""" +207 41 dataset """wn18rr""" +207 41 model """distmult""" +207 41 loss """nssa""" +207 41 regularizer """no""" +207 41 optimizer """adam""" +207 41 training_loop """owa""" +207 41 negative_sampler """basic""" +207 41 evaluator """rankbased""" +207 42 dataset """wn18rr""" +207 42 model """distmult""" +207 42 loss """nssa""" +207 42 regularizer """no""" +207 42 optimizer """adam""" +207 42 training_loop """owa""" +207 42 negative_sampler """basic""" +207 42 evaluator """rankbased""" +207 43 dataset """wn18rr""" +207 43 model """distmult""" +207 43 loss """nssa""" +207 43 regularizer """no""" +207 43 optimizer """adam""" +207 43 training_loop """owa""" +207 43 negative_sampler """basic""" +207 43 evaluator """rankbased""" +207 44 dataset """wn18rr""" +207 44 model """distmult""" +207 44 loss """nssa""" +207 44 regularizer """no""" +207 44 optimizer """adam""" +207 44 training_loop """owa""" +207 44 negative_sampler """basic""" +207 44 evaluator """rankbased""" +207 45 dataset """wn18rr""" +207 45 model """distmult""" +207 45 loss """nssa""" +207 45 regularizer """no""" +207 45 optimizer """adam""" +207 45 training_loop """owa""" +207 45 negative_sampler """basic""" +207 45 evaluator """rankbased""" +207 46 dataset """wn18rr""" +207 46 model """distmult""" +207 46 loss """nssa""" +207 46 regularizer """no""" +207 46 optimizer """adam""" +207 46 training_loop """owa""" +207 46 negative_sampler """basic""" +207 46 evaluator """rankbased""" +207 47 dataset """wn18rr""" +207 47 model """distmult""" +207 47 loss """nssa""" +207 47 regularizer """no""" +207 47 optimizer """adam""" +207 47 training_loop """owa""" +207 47 negative_sampler """basic""" +207 47 evaluator """rankbased""" +207 48 dataset """wn18rr""" +207 48 model """distmult""" +207 48 loss """nssa""" +207 48 regularizer """no""" +207 48 optimizer """adam""" +207 48 training_loop """owa""" +207 48 negative_sampler """basic""" +207 48 evaluator """rankbased""" +207 49 dataset """wn18rr""" +207 49 model """distmult""" +207 49 loss """nssa""" +207 49 regularizer """no""" +207 49 optimizer """adam""" +207 49 training_loop """owa""" +207 49 negative_sampler """basic""" +207 49 evaluator """rankbased""" +207 50 dataset """wn18rr""" +207 50 model """distmult""" +207 50 loss """nssa""" +207 50 regularizer """no""" +207 50 optimizer """adam""" +207 50 training_loop """owa""" +207 50 negative_sampler """basic""" +207 50 evaluator """rankbased""" +207 51 dataset """wn18rr""" +207 51 model """distmult""" +207 51 loss """nssa""" +207 51 regularizer """no""" +207 51 optimizer """adam""" +207 51 training_loop """owa""" +207 51 negative_sampler """basic""" +207 51 evaluator """rankbased""" +207 52 dataset """wn18rr""" +207 52 model """distmult""" +207 52 loss """nssa""" +207 52 regularizer """no""" +207 52 optimizer """adam""" +207 52 training_loop """owa""" +207 52 negative_sampler """basic""" +207 52 evaluator """rankbased""" +207 53 dataset """wn18rr""" +207 53 model """distmult""" +207 53 loss """nssa""" +207 53 regularizer """no""" +207 53 optimizer """adam""" +207 53 training_loop """owa""" +207 53 negative_sampler """basic""" +207 53 evaluator """rankbased""" +207 54 dataset """wn18rr""" +207 54 model """distmult""" +207 54 loss """nssa""" +207 54 regularizer """no""" +207 54 optimizer """adam""" +207 54 training_loop """owa""" +207 54 negative_sampler """basic""" +207 54 evaluator """rankbased""" +207 55 dataset """wn18rr""" +207 55 model """distmult""" +207 55 loss """nssa""" +207 55 regularizer """no""" +207 55 optimizer """adam""" +207 55 training_loop """owa""" +207 55 negative_sampler """basic""" +207 55 evaluator """rankbased""" +207 56 dataset """wn18rr""" +207 56 model """distmult""" +207 56 loss """nssa""" +207 56 regularizer """no""" +207 56 optimizer """adam""" +207 56 training_loop """owa""" +207 56 negative_sampler """basic""" +207 56 evaluator """rankbased""" +207 57 dataset """wn18rr""" +207 57 model """distmult""" +207 57 loss """nssa""" +207 57 regularizer """no""" +207 57 optimizer """adam""" +207 57 training_loop """owa""" +207 57 negative_sampler """basic""" +207 57 evaluator """rankbased""" +207 58 dataset """wn18rr""" +207 58 model """distmult""" +207 58 loss """nssa""" +207 58 regularizer """no""" +207 58 optimizer """adam""" +207 58 training_loop """owa""" +207 58 negative_sampler """basic""" +207 58 evaluator """rankbased""" +207 59 dataset """wn18rr""" +207 59 model """distmult""" +207 59 loss """nssa""" +207 59 regularizer """no""" +207 59 optimizer """adam""" +207 59 training_loop """owa""" +207 59 negative_sampler """basic""" +207 59 evaluator """rankbased""" +207 60 dataset """wn18rr""" +207 60 model """distmult""" +207 60 loss """nssa""" +207 60 regularizer """no""" +207 60 optimizer """adam""" +207 60 training_loop """owa""" +207 60 negative_sampler """basic""" +207 60 evaluator """rankbased""" +207 61 dataset """wn18rr""" +207 61 model """distmult""" +207 61 loss """nssa""" +207 61 regularizer """no""" +207 61 optimizer """adam""" +207 61 training_loop """owa""" +207 61 negative_sampler """basic""" +207 61 evaluator """rankbased""" +207 62 dataset """wn18rr""" +207 62 model """distmult""" +207 62 loss """nssa""" +207 62 regularizer """no""" +207 62 optimizer """adam""" +207 62 training_loop """owa""" +207 62 negative_sampler """basic""" +207 62 evaluator """rankbased""" +207 63 dataset """wn18rr""" +207 63 model """distmult""" +207 63 loss """nssa""" +207 63 regularizer """no""" +207 63 optimizer """adam""" +207 63 training_loop """owa""" +207 63 negative_sampler """basic""" +207 63 evaluator """rankbased""" +207 64 dataset """wn18rr""" +207 64 model """distmult""" +207 64 loss """nssa""" +207 64 regularizer """no""" +207 64 optimizer """adam""" +207 64 training_loop """owa""" +207 64 negative_sampler """basic""" +207 64 evaluator """rankbased""" +207 65 dataset """wn18rr""" +207 65 model """distmult""" +207 65 loss """nssa""" +207 65 regularizer """no""" +207 65 optimizer """adam""" +207 65 training_loop """owa""" +207 65 negative_sampler """basic""" +207 65 evaluator """rankbased""" +207 66 dataset """wn18rr""" +207 66 model """distmult""" +207 66 loss """nssa""" +207 66 regularizer """no""" +207 66 optimizer """adam""" +207 66 training_loop """owa""" +207 66 negative_sampler """basic""" +207 66 evaluator """rankbased""" +207 67 dataset """wn18rr""" +207 67 model """distmult""" +207 67 loss """nssa""" +207 67 regularizer """no""" +207 67 optimizer """adam""" +207 67 training_loop """owa""" +207 67 negative_sampler """basic""" +207 67 evaluator """rankbased""" +207 68 dataset """wn18rr""" +207 68 model """distmult""" +207 68 loss """nssa""" +207 68 regularizer """no""" +207 68 optimizer """adam""" +207 68 training_loop """owa""" +207 68 negative_sampler """basic""" +207 68 evaluator """rankbased""" +207 69 dataset """wn18rr""" +207 69 model """distmult""" +207 69 loss """nssa""" +207 69 regularizer """no""" +207 69 optimizer """adam""" +207 69 training_loop """owa""" +207 69 negative_sampler """basic""" +207 69 evaluator """rankbased""" +207 70 dataset """wn18rr""" +207 70 model """distmult""" +207 70 loss """nssa""" +207 70 regularizer """no""" +207 70 optimizer """adam""" +207 70 training_loop """owa""" +207 70 negative_sampler """basic""" +207 70 evaluator """rankbased""" +207 71 dataset """wn18rr""" +207 71 model """distmult""" +207 71 loss """nssa""" +207 71 regularizer """no""" +207 71 optimizer """adam""" +207 71 training_loop """owa""" +207 71 negative_sampler """basic""" +207 71 evaluator """rankbased""" +207 72 dataset """wn18rr""" +207 72 model """distmult""" +207 72 loss """nssa""" +207 72 regularizer """no""" +207 72 optimizer """adam""" +207 72 training_loop """owa""" +207 72 negative_sampler """basic""" +207 72 evaluator """rankbased""" +207 73 dataset """wn18rr""" +207 73 model """distmult""" +207 73 loss """nssa""" +207 73 regularizer """no""" +207 73 optimizer """adam""" +207 73 training_loop """owa""" +207 73 negative_sampler """basic""" +207 73 evaluator """rankbased""" +207 74 dataset """wn18rr""" +207 74 model """distmult""" +207 74 loss """nssa""" +207 74 regularizer """no""" +207 74 optimizer """adam""" +207 74 training_loop """owa""" +207 74 negative_sampler """basic""" +207 74 evaluator """rankbased""" +207 75 dataset """wn18rr""" +207 75 model """distmult""" +207 75 loss """nssa""" +207 75 regularizer """no""" +207 75 optimizer """adam""" +207 75 training_loop """owa""" +207 75 negative_sampler """basic""" +207 75 evaluator """rankbased""" +207 76 dataset """wn18rr""" +207 76 model """distmult""" +207 76 loss """nssa""" +207 76 regularizer """no""" +207 76 optimizer """adam""" +207 76 training_loop """owa""" +207 76 negative_sampler """basic""" +207 76 evaluator """rankbased""" +207 77 dataset """wn18rr""" +207 77 model """distmult""" +207 77 loss """nssa""" +207 77 regularizer """no""" +207 77 optimizer """adam""" +207 77 training_loop """owa""" +207 77 negative_sampler """basic""" +207 77 evaluator """rankbased""" +207 78 dataset """wn18rr""" +207 78 model """distmult""" +207 78 loss """nssa""" +207 78 regularizer """no""" +207 78 optimizer """adam""" +207 78 training_loop """owa""" +207 78 negative_sampler """basic""" +207 78 evaluator """rankbased""" +207 79 dataset """wn18rr""" +207 79 model """distmult""" +207 79 loss """nssa""" +207 79 regularizer """no""" +207 79 optimizer """adam""" +207 79 training_loop """owa""" +207 79 negative_sampler """basic""" +207 79 evaluator """rankbased""" +207 80 dataset """wn18rr""" +207 80 model """distmult""" +207 80 loss """nssa""" +207 80 regularizer """no""" +207 80 optimizer """adam""" +207 80 training_loop """owa""" +207 80 negative_sampler """basic""" +207 80 evaluator """rankbased""" +207 81 dataset """wn18rr""" +207 81 model """distmult""" +207 81 loss """nssa""" +207 81 regularizer """no""" +207 81 optimizer """adam""" +207 81 training_loop """owa""" +207 81 negative_sampler """basic""" +207 81 evaluator """rankbased""" +207 82 dataset """wn18rr""" +207 82 model """distmult""" +207 82 loss """nssa""" +207 82 regularizer """no""" +207 82 optimizer """adam""" +207 82 training_loop """owa""" +207 82 negative_sampler """basic""" +207 82 evaluator """rankbased""" +207 83 dataset """wn18rr""" +207 83 model """distmult""" +207 83 loss """nssa""" +207 83 regularizer """no""" +207 83 optimizer """adam""" +207 83 training_loop """owa""" +207 83 negative_sampler """basic""" +207 83 evaluator """rankbased""" +207 84 dataset """wn18rr""" +207 84 model """distmult""" +207 84 loss """nssa""" +207 84 regularizer """no""" +207 84 optimizer """adam""" +207 84 training_loop """owa""" +207 84 negative_sampler """basic""" +207 84 evaluator """rankbased""" +207 85 dataset """wn18rr""" +207 85 model """distmult""" +207 85 loss """nssa""" +207 85 regularizer """no""" +207 85 optimizer """adam""" +207 85 training_loop """owa""" +207 85 negative_sampler """basic""" +207 85 evaluator """rankbased""" +207 86 dataset """wn18rr""" +207 86 model """distmult""" +207 86 loss """nssa""" +207 86 regularizer """no""" +207 86 optimizer """adam""" +207 86 training_loop """owa""" +207 86 negative_sampler """basic""" +207 86 evaluator """rankbased""" +207 87 dataset """wn18rr""" +207 87 model """distmult""" +207 87 loss """nssa""" +207 87 regularizer """no""" +207 87 optimizer """adam""" +207 87 training_loop """owa""" +207 87 negative_sampler """basic""" +207 87 evaluator """rankbased""" +207 88 dataset """wn18rr""" +207 88 model """distmult""" +207 88 loss """nssa""" +207 88 regularizer """no""" +207 88 optimizer """adam""" +207 88 training_loop """owa""" +207 88 negative_sampler """basic""" +207 88 evaluator """rankbased""" +207 89 dataset """wn18rr""" +207 89 model """distmult""" +207 89 loss """nssa""" +207 89 regularizer """no""" +207 89 optimizer """adam""" +207 89 training_loop """owa""" +207 89 negative_sampler """basic""" +207 89 evaluator """rankbased""" +207 90 dataset """wn18rr""" +207 90 model """distmult""" +207 90 loss """nssa""" +207 90 regularizer """no""" +207 90 optimizer """adam""" +207 90 training_loop """owa""" +207 90 negative_sampler """basic""" +207 90 evaluator """rankbased""" +207 91 dataset """wn18rr""" +207 91 model """distmult""" +207 91 loss """nssa""" +207 91 regularizer """no""" +207 91 optimizer """adam""" +207 91 training_loop """owa""" +207 91 negative_sampler """basic""" +207 91 evaluator """rankbased""" +207 92 dataset """wn18rr""" +207 92 model """distmult""" +207 92 loss """nssa""" +207 92 regularizer """no""" +207 92 optimizer """adam""" +207 92 training_loop """owa""" +207 92 negative_sampler """basic""" +207 92 evaluator """rankbased""" +207 93 dataset """wn18rr""" +207 93 model """distmult""" +207 93 loss """nssa""" +207 93 regularizer """no""" +207 93 optimizer """adam""" +207 93 training_loop """owa""" +207 93 negative_sampler """basic""" +207 93 evaluator """rankbased""" +207 94 dataset """wn18rr""" +207 94 model """distmult""" +207 94 loss """nssa""" +207 94 regularizer """no""" +207 94 optimizer """adam""" +207 94 training_loop """owa""" +207 94 negative_sampler """basic""" +207 94 evaluator """rankbased""" +207 95 dataset """wn18rr""" +207 95 model """distmult""" +207 95 loss """nssa""" +207 95 regularizer """no""" +207 95 optimizer """adam""" +207 95 training_loop """owa""" +207 95 negative_sampler """basic""" +207 95 evaluator """rankbased""" +207 96 dataset """wn18rr""" +207 96 model """distmult""" +207 96 loss """nssa""" +207 96 regularizer """no""" +207 96 optimizer """adam""" +207 96 training_loop """owa""" +207 96 negative_sampler """basic""" +207 96 evaluator """rankbased""" +207 97 dataset """wn18rr""" +207 97 model """distmult""" +207 97 loss """nssa""" +207 97 regularizer """no""" +207 97 optimizer """adam""" +207 97 training_loop """owa""" +207 97 negative_sampler """basic""" +207 97 evaluator """rankbased""" +207 98 dataset """wn18rr""" +207 98 model """distmult""" +207 98 loss """nssa""" +207 98 regularizer """no""" +207 98 optimizer """adam""" +207 98 training_loop """owa""" +207 98 negative_sampler """basic""" +207 98 evaluator """rankbased""" +207 99 dataset """wn18rr""" +207 99 model """distmult""" +207 99 loss """nssa""" +207 99 regularizer """no""" +207 99 optimizer """adam""" +207 99 training_loop """owa""" +207 99 negative_sampler """basic""" +207 99 evaluator """rankbased""" +207 100 dataset """wn18rr""" +207 100 model """distmult""" +207 100 loss """nssa""" +207 100 regularizer """no""" +207 100 optimizer """adam""" +207 100 training_loop """owa""" +207 100 negative_sampler """basic""" +207 100 evaluator """rankbased""" +208 1 model.embedding_dim 2.0 +208 1 loss.margin 9.045077415360176 +208 1 optimizer.lr 0.0344004280727528 +208 1 negative_sampler.num_negs_per_pos 64.0 +208 1 training.batch_size 2.0 +208 2 model.embedding_dim 1.0 +208 2 loss.margin 7.807394431926092 +208 2 optimizer.lr 0.001051321328580711 +208 2 negative_sampler.num_negs_per_pos 11.0 +208 2 training.batch_size 0.0 +208 3 model.embedding_dim 2.0 +208 3 loss.margin 8.799081518321955 +208 3 optimizer.lr 0.03363805375942258 +208 3 negative_sampler.num_negs_per_pos 74.0 +208 3 training.batch_size 0.0 +208 4 model.embedding_dim 1.0 +208 4 loss.margin 5.029716128858159 +208 4 optimizer.lr 0.007021914637107604 +208 4 negative_sampler.num_negs_per_pos 36.0 +208 4 training.batch_size 0.0 +208 5 model.embedding_dim 2.0 +208 5 loss.margin 1.9866822657066212 +208 5 optimizer.lr 0.0018128407069361795 +208 5 negative_sampler.num_negs_per_pos 24.0 +208 5 training.batch_size 1.0 +208 6 model.embedding_dim 1.0 +208 6 loss.margin 1.3850435550591362 +208 6 optimizer.lr 0.01807332113398192 +208 6 negative_sampler.num_negs_per_pos 45.0 +208 6 training.batch_size 0.0 +208 7 model.embedding_dim 0.0 +208 7 loss.margin 2.064241061561635 +208 7 optimizer.lr 0.002682807992862331 +208 7 negative_sampler.num_negs_per_pos 41.0 +208 7 training.batch_size 0.0 +208 8 model.embedding_dim 0.0 +208 8 loss.margin 6.448633782916 +208 8 optimizer.lr 0.007583909789834662 +208 8 negative_sampler.num_negs_per_pos 94.0 +208 8 training.batch_size 1.0 +208 9 model.embedding_dim 0.0 +208 9 loss.margin 5.246809261180286 +208 9 optimizer.lr 0.0033232571533411814 +208 9 negative_sampler.num_negs_per_pos 0.0 +208 9 training.batch_size 1.0 +208 10 model.embedding_dim 2.0 +208 10 loss.margin 8.684912653962208 +208 10 optimizer.lr 0.06520867916752121 +208 10 negative_sampler.num_negs_per_pos 90.0 +208 10 training.batch_size 2.0 +208 11 model.embedding_dim 0.0 +208 11 loss.margin 9.223417447594823 +208 11 optimizer.lr 0.03814685531192888 +208 11 negative_sampler.num_negs_per_pos 24.0 +208 11 training.batch_size 2.0 +208 12 model.embedding_dim 0.0 +208 12 loss.margin 3.9712680137540612 +208 12 optimizer.lr 0.007524803977291521 +208 12 negative_sampler.num_negs_per_pos 20.0 +208 12 training.batch_size 1.0 +208 13 model.embedding_dim 2.0 +208 13 loss.margin 7.513793430637533 +208 13 optimizer.lr 0.06281871513099184 +208 13 negative_sampler.num_negs_per_pos 37.0 +208 13 training.batch_size 0.0 +208 14 model.embedding_dim 2.0 +208 14 loss.margin 1.9030042770912226 +208 14 optimizer.lr 0.001994732985408503 +208 14 negative_sampler.num_negs_per_pos 27.0 +208 14 training.batch_size 0.0 +208 15 model.embedding_dim 2.0 +208 15 loss.margin 3.3598977482952597 +208 15 optimizer.lr 0.08840266647886859 +208 15 negative_sampler.num_negs_per_pos 17.0 +208 15 training.batch_size 0.0 +208 16 model.embedding_dim 0.0 +208 16 loss.margin 9.283425664883044 +208 16 optimizer.lr 0.01872329543732135 +208 16 negative_sampler.num_negs_per_pos 83.0 +208 16 training.batch_size 1.0 +208 17 model.embedding_dim 0.0 +208 17 loss.margin 5.988575601703427 +208 17 optimizer.lr 0.056254288204712996 +208 17 negative_sampler.num_negs_per_pos 18.0 +208 17 training.batch_size 1.0 +208 18 model.embedding_dim 1.0 +208 18 loss.margin 6.635594584181415 +208 18 optimizer.lr 0.005586113227856261 +208 18 negative_sampler.num_negs_per_pos 57.0 +208 18 training.batch_size 2.0 +208 19 model.embedding_dim 1.0 +208 19 loss.margin 8.07870017689839 +208 19 optimizer.lr 0.024140198497004318 +208 19 negative_sampler.num_negs_per_pos 43.0 +208 19 training.batch_size 2.0 +208 20 model.embedding_dim 0.0 +208 20 loss.margin 8.514298065912014 +208 20 optimizer.lr 0.03828318161042594 +208 20 negative_sampler.num_negs_per_pos 71.0 +208 20 training.batch_size 0.0 +208 21 model.embedding_dim 2.0 +208 21 loss.margin 3.2505151410785116 +208 21 optimizer.lr 0.07114872923471052 +208 21 negative_sampler.num_negs_per_pos 96.0 +208 21 training.batch_size 2.0 +208 22 model.embedding_dim 1.0 +208 22 loss.margin 7.074578093630703 +208 22 optimizer.lr 0.001503516490335435 +208 22 negative_sampler.num_negs_per_pos 41.0 +208 22 training.batch_size 2.0 +208 23 model.embedding_dim 0.0 +208 23 loss.margin 9.433085775777428 +208 23 optimizer.lr 0.04936280244419481 +208 23 negative_sampler.num_negs_per_pos 99.0 +208 23 training.batch_size 1.0 +208 24 model.embedding_dim 0.0 +208 24 loss.margin 6.219321983165392 +208 24 optimizer.lr 0.004564338064729648 +208 24 negative_sampler.num_negs_per_pos 13.0 +208 24 training.batch_size 1.0 +208 25 model.embedding_dim 1.0 +208 25 loss.margin 2.999017143951022 +208 25 optimizer.lr 0.05901557623600669 +208 25 negative_sampler.num_negs_per_pos 42.0 +208 25 training.batch_size 2.0 +208 26 model.embedding_dim 2.0 +208 26 loss.margin 8.058350037161041 +208 26 optimizer.lr 0.09119671144959089 +208 26 negative_sampler.num_negs_per_pos 5.0 +208 26 training.batch_size 1.0 +208 27 model.embedding_dim 0.0 +208 27 loss.margin 9.247047246304444 +208 27 optimizer.lr 0.07828208035893848 +208 27 negative_sampler.num_negs_per_pos 99.0 +208 27 training.batch_size 2.0 +208 28 model.embedding_dim 2.0 +208 28 loss.margin 3.3001806516160173 +208 28 optimizer.lr 0.0023630728686310776 +208 28 negative_sampler.num_negs_per_pos 10.0 +208 28 training.batch_size 1.0 +208 29 model.embedding_dim 1.0 +208 29 loss.margin 6.114175498980611 +208 29 optimizer.lr 0.005736171858310029 +208 29 negative_sampler.num_negs_per_pos 74.0 +208 29 training.batch_size 1.0 +208 30 model.embedding_dim 2.0 +208 30 loss.margin 3.691687483998868 +208 30 optimizer.lr 0.044377780867386994 +208 30 negative_sampler.num_negs_per_pos 22.0 +208 30 training.batch_size 0.0 +208 31 model.embedding_dim 0.0 +208 31 loss.margin 8.57941977703496 +208 31 optimizer.lr 0.03519154193787752 +208 31 negative_sampler.num_negs_per_pos 51.0 +208 31 training.batch_size 0.0 +208 32 model.embedding_dim 2.0 +208 32 loss.margin 6.002450840834847 +208 32 optimizer.lr 0.002649232003233997 +208 32 negative_sampler.num_negs_per_pos 19.0 +208 32 training.batch_size 2.0 +208 33 model.embedding_dim 2.0 +208 33 loss.margin 6.521927197746378 +208 33 optimizer.lr 0.030071798071472176 +208 33 negative_sampler.num_negs_per_pos 46.0 +208 33 training.batch_size 2.0 +208 34 model.embedding_dim 0.0 +208 34 loss.margin 6.808840105229831 +208 34 optimizer.lr 0.09021792349583607 +208 34 negative_sampler.num_negs_per_pos 51.0 +208 34 training.batch_size 0.0 +208 35 model.embedding_dim 2.0 +208 35 loss.margin 0.5194873352459792 +208 35 optimizer.lr 0.05267540274715926 +208 35 negative_sampler.num_negs_per_pos 95.0 +208 35 training.batch_size 1.0 +208 36 model.embedding_dim 0.0 +208 36 loss.margin 9.06013631691682 +208 36 optimizer.lr 0.0022291185466859595 +208 36 negative_sampler.num_negs_per_pos 66.0 +208 36 training.batch_size 0.0 +208 37 model.embedding_dim 2.0 +208 37 loss.margin 8.06363430062361 +208 37 optimizer.lr 0.004101088210396384 +208 37 negative_sampler.num_negs_per_pos 15.0 +208 37 training.batch_size 1.0 +208 38 model.embedding_dim 1.0 +208 38 loss.margin 4.087292809050647 +208 38 optimizer.lr 0.08448113569265066 +208 38 negative_sampler.num_negs_per_pos 21.0 +208 38 training.batch_size 0.0 +208 39 model.embedding_dim 2.0 +208 39 loss.margin 2.860834090531301 +208 39 optimizer.lr 0.0054581571017419915 +208 39 negative_sampler.num_negs_per_pos 58.0 +208 39 training.batch_size 1.0 +208 40 model.embedding_dim 0.0 +208 40 loss.margin 3.533621369131623 +208 40 optimizer.lr 0.0033435368864896658 +208 40 negative_sampler.num_negs_per_pos 14.0 +208 40 training.batch_size 0.0 +208 41 model.embedding_dim 1.0 +208 41 loss.margin 9.038806119639837 +208 41 optimizer.lr 0.02296953235680712 +208 41 negative_sampler.num_negs_per_pos 17.0 +208 41 training.batch_size 2.0 +208 42 model.embedding_dim 2.0 +208 42 loss.margin 4.639167930497608 +208 42 optimizer.lr 0.003747738688780643 +208 42 negative_sampler.num_negs_per_pos 15.0 +208 42 training.batch_size 0.0 +208 43 model.embedding_dim 2.0 +208 43 loss.margin 9.349760092804283 +208 43 optimizer.lr 0.00752453227083462 +208 43 negative_sampler.num_negs_per_pos 19.0 +208 43 training.batch_size 0.0 +208 44 model.embedding_dim 0.0 +208 44 loss.margin 3.6016598153410997 +208 44 optimizer.lr 0.005752470997928599 +208 44 negative_sampler.num_negs_per_pos 90.0 +208 44 training.batch_size 2.0 +208 45 model.embedding_dim 0.0 +208 45 loss.margin 1.9626248167936926 +208 45 optimizer.lr 0.005310183150178855 +208 45 negative_sampler.num_negs_per_pos 45.0 +208 45 training.batch_size 2.0 +208 46 model.embedding_dim 2.0 +208 46 loss.margin 9.682007439227727 +208 46 optimizer.lr 0.06558238405048553 +208 46 negative_sampler.num_negs_per_pos 91.0 +208 46 training.batch_size 1.0 +208 47 model.embedding_dim 0.0 +208 47 loss.margin 4.395984012441197 +208 47 optimizer.lr 0.054259469770916356 +208 47 negative_sampler.num_negs_per_pos 51.0 +208 47 training.batch_size 2.0 +208 48 model.embedding_dim 0.0 +208 48 loss.margin 9.969162458669736 +208 48 optimizer.lr 0.00845875258522238 +208 48 negative_sampler.num_negs_per_pos 78.0 +208 48 training.batch_size 0.0 +208 49 model.embedding_dim 0.0 +208 49 loss.margin 1.4530711365613587 +208 49 optimizer.lr 0.00919057847227623 +208 49 negative_sampler.num_negs_per_pos 97.0 +208 49 training.batch_size 0.0 +208 50 model.embedding_dim 2.0 +208 50 loss.margin 6.669874277784703 +208 50 optimizer.lr 0.007487097180002114 +208 50 negative_sampler.num_negs_per_pos 39.0 +208 50 training.batch_size 1.0 +208 51 model.embedding_dim 1.0 +208 51 loss.margin 0.6632489551453891 +208 51 optimizer.lr 0.06430441429339928 +208 51 negative_sampler.num_negs_per_pos 32.0 +208 51 training.batch_size 0.0 +208 52 model.embedding_dim 2.0 +208 52 loss.margin 6.620950951670353 +208 52 optimizer.lr 0.050201044543371556 +208 52 negative_sampler.num_negs_per_pos 71.0 +208 52 training.batch_size 0.0 +208 53 model.embedding_dim 2.0 +208 53 loss.margin 5.585982717887699 +208 53 optimizer.lr 0.0014393792850135774 +208 53 negative_sampler.num_negs_per_pos 51.0 +208 53 training.batch_size 1.0 +208 54 model.embedding_dim 1.0 +208 54 loss.margin 7.928239125958996 +208 54 optimizer.lr 0.002362526364900228 +208 54 negative_sampler.num_negs_per_pos 7.0 +208 54 training.batch_size 0.0 +208 55 model.embedding_dim 1.0 +208 55 loss.margin 4.8635431705448795 +208 55 optimizer.lr 0.0028834904819842718 +208 55 negative_sampler.num_negs_per_pos 45.0 +208 55 training.batch_size 0.0 +208 56 model.embedding_dim 0.0 +208 56 loss.margin 9.787808256007565 +208 56 optimizer.lr 0.0017947045722907647 +208 56 negative_sampler.num_negs_per_pos 22.0 +208 56 training.batch_size 1.0 +208 57 model.embedding_dim 2.0 +208 57 loss.margin 5.223991676586145 +208 57 optimizer.lr 0.09885902544792474 +208 57 negative_sampler.num_negs_per_pos 89.0 +208 57 training.batch_size 0.0 +208 58 model.embedding_dim 0.0 +208 58 loss.margin 6.176616935025823 +208 58 optimizer.lr 0.016762305420989353 +208 58 negative_sampler.num_negs_per_pos 53.0 +208 58 training.batch_size 1.0 +208 59 model.embedding_dim 1.0 +208 59 loss.margin 3.3960383104533634 +208 59 optimizer.lr 0.04232977389764239 +208 59 negative_sampler.num_negs_per_pos 49.0 +208 59 training.batch_size 0.0 +208 60 model.embedding_dim 1.0 +208 60 loss.margin 9.37493590626091 +208 60 optimizer.lr 0.00484142097217239 +208 60 negative_sampler.num_negs_per_pos 36.0 +208 60 training.batch_size 0.0 +208 61 model.embedding_dim 2.0 +208 61 loss.margin 7.145652600761825 +208 61 optimizer.lr 0.017993448909460155 +208 61 negative_sampler.num_negs_per_pos 80.0 +208 61 training.batch_size 0.0 +208 1 dataset """wn18rr""" +208 1 model """distmult""" +208 1 loss """marginranking""" +208 1 regularizer """no""" +208 1 optimizer """adam""" +208 1 training_loop """owa""" +208 1 negative_sampler """basic""" +208 1 evaluator """rankbased""" +208 2 dataset """wn18rr""" +208 2 model """distmult""" +208 2 loss """marginranking""" +208 2 regularizer """no""" +208 2 optimizer """adam""" +208 2 training_loop """owa""" +208 2 negative_sampler """basic""" +208 2 evaluator """rankbased""" +208 3 dataset """wn18rr""" +208 3 model """distmult""" +208 3 loss """marginranking""" +208 3 regularizer """no""" +208 3 optimizer """adam""" +208 3 training_loop """owa""" +208 3 negative_sampler """basic""" +208 3 evaluator """rankbased""" +208 4 dataset """wn18rr""" +208 4 model """distmult""" +208 4 loss """marginranking""" +208 4 regularizer """no""" +208 4 optimizer """adam""" +208 4 training_loop """owa""" +208 4 negative_sampler """basic""" +208 4 evaluator """rankbased""" +208 5 dataset """wn18rr""" +208 5 model """distmult""" +208 5 loss """marginranking""" +208 5 regularizer """no""" +208 5 optimizer """adam""" +208 5 training_loop """owa""" +208 5 negative_sampler """basic""" +208 5 evaluator """rankbased""" +208 6 dataset """wn18rr""" +208 6 model """distmult""" +208 6 loss """marginranking""" +208 6 regularizer """no""" +208 6 optimizer """adam""" +208 6 training_loop """owa""" +208 6 negative_sampler """basic""" +208 6 evaluator """rankbased""" +208 7 dataset """wn18rr""" +208 7 model """distmult""" +208 7 loss """marginranking""" +208 7 regularizer """no""" +208 7 optimizer """adam""" +208 7 training_loop """owa""" +208 7 negative_sampler """basic""" +208 7 evaluator """rankbased""" +208 8 dataset """wn18rr""" +208 8 model """distmult""" +208 8 loss """marginranking""" +208 8 regularizer """no""" +208 8 optimizer """adam""" +208 8 training_loop """owa""" +208 8 negative_sampler """basic""" +208 8 evaluator """rankbased""" +208 9 dataset """wn18rr""" +208 9 model """distmult""" +208 9 loss """marginranking""" +208 9 regularizer """no""" +208 9 optimizer """adam""" +208 9 training_loop """owa""" +208 9 negative_sampler """basic""" +208 9 evaluator """rankbased""" +208 10 dataset """wn18rr""" +208 10 model """distmult""" +208 10 loss """marginranking""" +208 10 regularizer """no""" +208 10 optimizer """adam""" +208 10 training_loop """owa""" +208 10 negative_sampler """basic""" +208 10 evaluator """rankbased""" +208 11 dataset """wn18rr""" +208 11 model """distmult""" +208 11 loss """marginranking""" +208 11 regularizer """no""" +208 11 optimizer """adam""" +208 11 training_loop """owa""" +208 11 negative_sampler """basic""" +208 11 evaluator """rankbased""" +208 12 dataset """wn18rr""" +208 12 model """distmult""" +208 12 loss """marginranking""" +208 12 regularizer """no""" +208 12 optimizer """adam""" +208 12 training_loop """owa""" +208 12 negative_sampler """basic""" +208 12 evaluator """rankbased""" +208 13 dataset """wn18rr""" +208 13 model """distmult""" +208 13 loss """marginranking""" +208 13 regularizer """no""" +208 13 optimizer """adam""" +208 13 training_loop """owa""" +208 13 negative_sampler """basic""" +208 13 evaluator """rankbased""" +208 14 dataset """wn18rr""" +208 14 model """distmult""" +208 14 loss """marginranking""" +208 14 regularizer """no""" +208 14 optimizer """adam""" +208 14 training_loop """owa""" +208 14 negative_sampler """basic""" +208 14 evaluator """rankbased""" +208 15 dataset """wn18rr""" +208 15 model """distmult""" +208 15 loss """marginranking""" +208 15 regularizer """no""" +208 15 optimizer """adam""" +208 15 training_loop """owa""" +208 15 negative_sampler """basic""" +208 15 evaluator """rankbased""" +208 16 dataset """wn18rr""" +208 16 model """distmult""" +208 16 loss """marginranking""" +208 16 regularizer """no""" +208 16 optimizer """adam""" +208 16 training_loop """owa""" +208 16 negative_sampler """basic""" +208 16 evaluator """rankbased""" +208 17 dataset """wn18rr""" +208 17 model """distmult""" +208 17 loss """marginranking""" +208 17 regularizer """no""" +208 17 optimizer """adam""" +208 17 training_loop """owa""" +208 17 negative_sampler """basic""" +208 17 evaluator """rankbased""" +208 18 dataset """wn18rr""" +208 18 model """distmult""" +208 18 loss """marginranking""" +208 18 regularizer """no""" +208 18 optimizer """adam""" +208 18 training_loop """owa""" +208 18 negative_sampler """basic""" +208 18 evaluator """rankbased""" +208 19 dataset """wn18rr""" +208 19 model """distmult""" +208 19 loss """marginranking""" +208 19 regularizer """no""" +208 19 optimizer """adam""" +208 19 training_loop """owa""" +208 19 negative_sampler """basic""" +208 19 evaluator """rankbased""" +208 20 dataset """wn18rr""" +208 20 model """distmult""" +208 20 loss """marginranking""" +208 20 regularizer """no""" +208 20 optimizer """adam""" +208 20 training_loop """owa""" +208 20 negative_sampler """basic""" +208 20 evaluator """rankbased""" +208 21 dataset """wn18rr""" +208 21 model """distmult""" +208 21 loss """marginranking""" +208 21 regularizer """no""" +208 21 optimizer """adam""" +208 21 training_loop """owa""" +208 21 negative_sampler """basic""" +208 21 evaluator """rankbased""" +208 22 dataset """wn18rr""" +208 22 model """distmult""" +208 22 loss """marginranking""" +208 22 regularizer """no""" +208 22 optimizer """adam""" +208 22 training_loop """owa""" +208 22 negative_sampler """basic""" +208 22 evaluator """rankbased""" +208 23 dataset """wn18rr""" +208 23 model """distmult""" +208 23 loss """marginranking""" +208 23 regularizer """no""" +208 23 optimizer """adam""" +208 23 training_loop """owa""" +208 23 negative_sampler """basic""" +208 23 evaluator """rankbased""" +208 24 dataset """wn18rr""" +208 24 model """distmult""" +208 24 loss """marginranking""" +208 24 regularizer """no""" +208 24 optimizer """adam""" +208 24 training_loop """owa""" +208 24 negative_sampler """basic""" +208 24 evaluator """rankbased""" +208 25 dataset """wn18rr""" +208 25 model """distmult""" +208 25 loss """marginranking""" +208 25 regularizer """no""" +208 25 optimizer """adam""" +208 25 training_loop """owa""" +208 25 negative_sampler """basic""" +208 25 evaluator """rankbased""" +208 26 dataset """wn18rr""" +208 26 model """distmult""" +208 26 loss """marginranking""" +208 26 regularizer """no""" +208 26 optimizer """adam""" +208 26 training_loop """owa""" +208 26 negative_sampler """basic""" +208 26 evaluator """rankbased""" +208 27 dataset """wn18rr""" +208 27 model """distmult""" +208 27 loss """marginranking""" +208 27 regularizer """no""" +208 27 optimizer """adam""" +208 27 training_loop """owa""" +208 27 negative_sampler """basic""" +208 27 evaluator """rankbased""" +208 28 dataset """wn18rr""" +208 28 model """distmult""" +208 28 loss """marginranking""" +208 28 regularizer """no""" +208 28 optimizer """adam""" +208 28 training_loop """owa""" +208 28 negative_sampler """basic""" +208 28 evaluator """rankbased""" +208 29 dataset """wn18rr""" +208 29 model """distmult""" +208 29 loss """marginranking""" +208 29 regularizer """no""" +208 29 optimizer """adam""" +208 29 training_loop """owa""" +208 29 negative_sampler """basic""" +208 29 evaluator """rankbased""" +208 30 dataset """wn18rr""" +208 30 model """distmult""" +208 30 loss """marginranking""" +208 30 regularizer """no""" +208 30 optimizer """adam""" +208 30 training_loop """owa""" +208 30 negative_sampler """basic""" +208 30 evaluator """rankbased""" +208 31 dataset """wn18rr""" +208 31 model """distmult""" +208 31 loss """marginranking""" +208 31 regularizer """no""" +208 31 optimizer """adam""" +208 31 training_loop """owa""" +208 31 negative_sampler """basic""" +208 31 evaluator """rankbased""" +208 32 dataset """wn18rr""" +208 32 model """distmult""" +208 32 loss """marginranking""" +208 32 regularizer """no""" +208 32 optimizer """adam""" +208 32 training_loop """owa""" +208 32 negative_sampler """basic""" +208 32 evaluator """rankbased""" +208 33 dataset """wn18rr""" +208 33 model """distmult""" +208 33 loss """marginranking""" +208 33 regularizer """no""" +208 33 optimizer """adam""" +208 33 training_loop """owa""" +208 33 negative_sampler """basic""" +208 33 evaluator """rankbased""" +208 34 dataset """wn18rr""" +208 34 model """distmult""" +208 34 loss """marginranking""" +208 34 regularizer """no""" +208 34 optimizer """adam""" +208 34 training_loop """owa""" +208 34 negative_sampler """basic""" +208 34 evaluator """rankbased""" +208 35 dataset """wn18rr""" +208 35 model """distmult""" +208 35 loss """marginranking""" +208 35 regularizer """no""" +208 35 optimizer """adam""" +208 35 training_loop """owa""" +208 35 negative_sampler """basic""" +208 35 evaluator """rankbased""" +208 36 dataset """wn18rr""" +208 36 model """distmult""" +208 36 loss """marginranking""" +208 36 regularizer """no""" +208 36 optimizer """adam""" +208 36 training_loop """owa""" +208 36 negative_sampler """basic""" +208 36 evaluator """rankbased""" +208 37 dataset """wn18rr""" +208 37 model """distmult""" +208 37 loss """marginranking""" +208 37 regularizer """no""" +208 37 optimizer """adam""" +208 37 training_loop """owa""" +208 37 negative_sampler """basic""" +208 37 evaluator """rankbased""" +208 38 dataset """wn18rr""" +208 38 model """distmult""" +208 38 loss """marginranking""" +208 38 regularizer """no""" +208 38 optimizer """adam""" +208 38 training_loop """owa""" +208 38 negative_sampler """basic""" +208 38 evaluator """rankbased""" +208 39 dataset """wn18rr""" +208 39 model """distmult""" +208 39 loss """marginranking""" +208 39 regularizer """no""" +208 39 optimizer """adam""" +208 39 training_loop """owa""" +208 39 negative_sampler """basic""" +208 39 evaluator """rankbased""" +208 40 dataset """wn18rr""" +208 40 model """distmult""" +208 40 loss """marginranking""" +208 40 regularizer """no""" +208 40 optimizer """adam""" +208 40 training_loop """owa""" +208 40 negative_sampler """basic""" +208 40 evaluator """rankbased""" +208 41 dataset """wn18rr""" +208 41 model """distmult""" +208 41 loss """marginranking""" +208 41 regularizer """no""" +208 41 optimizer """adam""" +208 41 training_loop """owa""" +208 41 negative_sampler """basic""" +208 41 evaluator """rankbased""" +208 42 dataset """wn18rr""" +208 42 model """distmult""" +208 42 loss """marginranking""" +208 42 regularizer """no""" +208 42 optimizer """adam""" +208 42 training_loop """owa""" +208 42 negative_sampler """basic""" +208 42 evaluator """rankbased""" +208 43 dataset """wn18rr""" +208 43 model """distmult""" +208 43 loss """marginranking""" +208 43 regularizer """no""" +208 43 optimizer """adam""" +208 43 training_loop """owa""" +208 43 negative_sampler """basic""" +208 43 evaluator """rankbased""" +208 44 dataset """wn18rr""" +208 44 model """distmult""" +208 44 loss """marginranking""" +208 44 regularizer """no""" +208 44 optimizer """adam""" +208 44 training_loop """owa""" +208 44 negative_sampler """basic""" +208 44 evaluator """rankbased""" +208 45 dataset """wn18rr""" +208 45 model """distmult""" +208 45 loss """marginranking""" +208 45 regularizer """no""" +208 45 optimizer """adam""" +208 45 training_loop """owa""" +208 45 negative_sampler """basic""" +208 45 evaluator """rankbased""" +208 46 dataset """wn18rr""" +208 46 model """distmult""" +208 46 loss """marginranking""" +208 46 regularizer """no""" +208 46 optimizer """adam""" +208 46 training_loop """owa""" +208 46 negative_sampler """basic""" +208 46 evaluator """rankbased""" +208 47 dataset """wn18rr""" +208 47 model """distmult""" +208 47 loss """marginranking""" +208 47 regularizer """no""" +208 47 optimizer """adam""" +208 47 training_loop """owa""" +208 47 negative_sampler """basic""" +208 47 evaluator """rankbased""" +208 48 dataset """wn18rr""" +208 48 model """distmult""" +208 48 loss """marginranking""" +208 48 regularizer """no""" +208 48 optimizer """adam""" +208 48 training_loop """owa""" +208 48 negative_sampler """basic""" +208 48 evaluator """rankbased""" +208 49 dataset """wn18rr""" +208 49 model """distmult""" +208 49 loss """marginranking""" +208 49 regularizer """no""" +208 49 optimizer """adam""" +208 49 training_loop """owa""" +208 49 negative_sampler """basic""" +208 49 evaluator """rankbased""" +208 50 dataset """wn18rr""" +208 50 model """distmult""" +208 50 loss """marginranking""" +208 50 regularizer """no""" +208 50 optimizer """adam""" +208 50 training_loop """owa""" +208 50 negative_sampler """basic""" +208 50 evaluator """rankbased""" +208 51 dataset """wn18rr""" +208 51 model """distmult""" +208 51 loss """marginranking""" +208 51 regularizer """no""" +208 51 optimizer """adam""" +208 51 training_loop """owa""" +208 51 negative_sampler """basic""" +208 51 evaluator """rankbased""" +208 52 dataset """wn18rr""" +208 52 model """distmult""" +208 52 loss """marginranking""" +208 52 regularizer """no""" +208 52 optimizer """adam""" +208 52 training_loop """owa""" +208 52 negative_sampler """basic""" +208 52 evaluator """rankbased""" +208 53 dataset """wn18rr""" +208 53 model """distmult""" +208 53 loss """marginranking""" +208 53 regularizer """no""" +208 53 optimizer """adam""" +208 53 training_loop """owa""" +208 53 negative_sampler """basic""" +208 53 evaluator """rankbased""" +208 54 dataset """wn18rr""" +208 54 model """distmult""" +208 54 loss """marginranking""" +208 54 regularizer """no""" +208 54 optimizer """adam""" +208 54 training_loop """owa""" +208 54 negative_sampler """basic""" +208 54 evaluator """rankbased""" +208 55 dataset """wn18rr""" +208 55 model """distmult""" +208 55 loss """marginranking""" +208 55 regularizer """no""" +208 55 optimizer """adam""" +208 55 training_loop """owa""" +208 55 negative_sampler """basic""" +208 55 evaluator """rankbased""" +208 56 dataset """wn18rr""" +208 56 model """distmult""" +208 56 loss """marginranking""" +208 56 regularizer """no""" +208 56 optimizer """adam""" +208 56 training_loop """owa""" +208 56 negative_sampler """basic""" +208 56 evaluator """rankbased""" +208 57 dataset """wn18rr""" +208 57 model """distmult""" +208 57 loss """marginranking""" +208 57 regularizer """no""" +208 57 optimizer """adam""" +208 57 training_loop """owa""" +208 57 negative_sampler """basic""" +208 57 evaluator """rankbased""" +208 58 dataset """wn18rr""" +208 58 model """distmult""" +208 58 loss """marginranking""" +208 58 regularizer """no""" +208 58 optimizer """adam""" +208 58 training_loop """owa""" +208 58 negative_sampler """basic""" +208 58 evaluator """rankbased""" +208 59 dataset """wn18rr""" +208 59 model """distmult""" +208 59 loss """marginranking""" +208 59 regularizer """no""" +208 59 optimizer """adam""" +208 59 training_loop """owa""" +208 59 negative_sampler """basic""" +208 59 evaluator """rankbased""" +208 60 dataset """wn18rr""" +208 60 model """distmult""" +208 60 loss """marginranking""" +208 60 regularizer """no""" +208 60 optimizer """adam""" +208 60 training_loop """owa""" +208 60 negative_sampler """basic""" +208 60 evaluator """rankbased""" +208 61 dataset """wn18rr""" +208 61 model """distmult""" +208 61 loss """marginranking""" +208 61 regularizer """no""" +208 61 optimizer """adam""" +208 61 training_loop """owa""" +208 61 negative_sampler """basic""" +208 61 evaluator """rankbased""" +209 1 model.embedding_dim 2.0 +209 1 loss.margin 3.2404843711492686 +209 1 optimizer.lr 0.006780268568894968 +209 1 negative_sampler.num_negs_per_pos 4.0 +209 1 training.batch_size 0.0 +209 2 model.embedding_dim 2.0 +209 2 loss.margin 8.188039598685583 +209 2 optimizer.lr 0.011172574078121281 +209 2 negative_sampler.num_negs_per_pos 14.0 +209 2 training.batch_size 0.0 +209 3 model.embedding_dim 0.0 +209 3 loss.margin 3.308682091022428 +209 3 optimizer.lr 0.006228155627670482 +209 3 negative_sampler.num_negs_per_pos 61.0 +209 3 training.batch_size 1.0 +209 4 model.embedding_dim 0.0 +209 4 loss.margin 8.54593177147149 +209 4 optimizer.lr 0.0017706913620258063 +209 4 negative_sampler.num_negs_per_pos 31.0 +209 4 training.batch_size 1.0 +209 5 model.embedding_dim 1.0 +209 5 loss.margin 5.1272770325203805 +209 5 optimizer.lr 0.035054419749920254 +209 5 negative_sampler.num_negs_per_pos 17.0 +209 5 training.batch_size 2.0 +209 6 model.embedding_dim 0.0 +209 6 loss.margin 5.812289137289441 +209 6 optimizer.lr 0.005018597939712068 +209 6 negative_sampler.num_negs_per_pos 84.0 +209 6 training.batch_size 2.0 +209 7 model.embedding_dim 1.0 +209 7 loss.margin 6.628004654630413 +209 7 optimizer.lr 0.0557463620138345 +209 7 negative_sampler.num_negs_per_pos 74.0 +209 7 training.batch_size 1.0 +209 8 model.embedding_dim 1.0 +209 8 loss.margin 2.5620725011575027 +209 8 optimizer.lr 0.0022531378246876286 +209 8 negative_sampler.num_negs_per_pos 11.0 +209 8 training.batch_size 2.0 +209 9 model.embedding_dim 2.0 +209 9 loss.margin 3.1671104856244776 +209 9 optimizer.lr 0.022968928701326618 +209 9 negative_sampler.num_negs_per_pos 1.0 +209 9 training.batch_size 2.0 +209 10 model.embedding_dim 1.0 +209 10 loss.margin 8.493840854430944 +209 10 optimizer.lr 0.0017184059031469829 +209 10 negative_sampler.num_negs_per_pos 55.0 +209 10 training.batch_size 1.0 +209 11 model.embedding_dim 2.0 +209 11 loss.margin 2.893707607556618 +209 11 optimizer.lr 0.005756860993111515 +209 11 negative_sampler.num_negs_per_pos 0.0 +209 11 training.batch_size 1.0 +209 12 model.embedding_dim 2.0 +209 12 loss.margin 8.342854193937733 +209 12 optimizer.lr 0.013143536358368442 +209 12 negative_sampler.num_negs_per_pos 20.0 +209 12 training.batch_size 0.0 +209 13 model.embedding_dim 1.0 +209 13 loss.margin 8.84755405267853 +209 13 optimizer.lr 0.09884526888099698 +209 13 negative_sampler.num_negs_per_pos 7.0 +209 13 training.batch_size 1.0 +209 14 model.embedding_dim 2.0 +209 14 loss.margin 5.196822618052518 +209 14 optimizer.lr 0.07247786305148184 +209 14 negative_sampler.num_negs_per_pos 27.0 +209 14 training.batch_size 2.0 +209 15 model.embedding_dim 0.0 +209 15 loss.margin 3.9387260568032265 +209 15 optimizer.lr 0.004394430945962332 +209 15 negative_sampler.num_negs_per_pos 57.0 +209 15 training.batch_size 0.0 +209 16 model.embedding_dim 1.0 +209 16 loss.margin 3.221991560127953 +209 16 optimizer.lr 0.014921015976132707 +209 16 negative_sampler.num_negs_per_pos 89.0 +209 16 training.batch_size 0.0 +209 17 model.embedding_dim 0.0 +209 17 loss.margin 2.0703292706213667 +209 17 optimizer.lr 0.0021304331026615045 +209 17 negative_sampler.num_negs_per_pos 76.0 +209 17 training.batch_size 1.0 +209 18 model.embedding_dim 0.0 +209 18 loss.margin 0.7175534781280692 +209 18 optimizer.lr 0.04647832002039243 +209 18 negative_sampler.num_negs_per_pos 53.0 +209 18 training.batch_size 0.0 +209 19 model.embedding_dim 2.0 +209 19 loss.margin 3.766056642729967 +209 19 optimizer.lr 0.001261721535229223 +209 19 negative_sampler.num_negs_per_pos 11.0 +209 19 training.batch_size 2.0 +209 20 model.embedding_dim 0.0 +209 20 loss.margin 9.126644654045199 +209 20 optimizer.lr 0.059654533251402964 +209 20 negative_sampler.num_negs_per_pos 48.0 +209 20 training.batch_size 1.0 +209 21 model.embedding_dim 0.0 +209 21 loss.margin 1.6241719277418971 +209 21 optimizer.lr 0.026230214726916768 +209 21 negative_sampler.num_negs_per_pos 61.0 +209 21 training.batch_size 2.0 +209 22 model.embedding_dim 2.0 +209 22 loss.margin 4.084441702023245 +209 22 optimizer.lr 0.0028808075522008436 +209 22 negative_sampler.num_negs_per_pos 54.0 +209 22 training.batch_size 0.0 +209 23 model.embedding_dim 1.0 +209 23 loss.margin 3.230612724411931 +209 23 optimizer.lr 0.0024730940126247414 +209 23 negative_sampler.num_negs_per_pos 23.0 +209 23 training.batch_size 2.0 +209 24 model.embedding_dim 0.0 +209 24 loss.margin 3.4453942772371966 +209 24 optimizer.lr 0.0014185462072142713 +209 24 negative_sampler.num_negs_per_pos 63.0 +209 24 training.batch_size 2.0 +209 25 model.embedding_dim 2.0 +209 25 loss.margin 3.0615560440039506 +209 25 optimizer.lr 0.08948290391790163 +209 25 negative_sampler.num_negs_per_pos 92.0 +209 25 training.batch_size 1.0 +209 26 model.embedding_dim 0.0 +209 26 loss.margin 8.750894225551429 +209 26 optimizer.lr 0.003957936366839362 +209 26 negative_sampler.num_negs_per_pos 25.0 +209 26 training.batch_size 2.0 +209 27 model.embedding_dim 1.0 +209 27 loss.margin 6.134199397035094 +209 27 optimizer.lr 0.028591856356865637 +209 27 negative_sampler.num_negs_per_pos 32.0 +209 27 training.batch_size 2.0 +209 28 model.embedding_dim 1.0 +209 28 loss.margin 1.5727344341332126 +209 28 optimizer.lr 0.07039232926755326 +209 28 negative_sampler.num_negs_per_pos 19.0 +209 28 training.batch_size 0.0 +209 29 model.embedding_dim 1.0 +209 29 loss.margin 2.5015265846965535 +209 29 optimizer.lr 0.0068586762837126005 +209 29 negative_sampler.num_negs_per_pos 86.0 +209 29 training.batch_size 1.0 +209 30 model.embedding_dim 2.0 +209 30 loss.margin 2.2782647617190457 +209 30 optimizer.lr 0.002985809479356611 +209 30 negative_sampler.num_negs_per_pos 48.0 +209 30 training.batch_size 0.0 +209 31 model.embedding_dim 0.0 +209 31 loss.margin 7.1789845819082245 +209 31 optimizer.lr 0.05052811417804674 +209 31 negative_sampler.num_negs_per_pos 91.0 +209 31 training.batch_size 1.0 +209 32 model.embedding_dim 1.0 +209 32 loss.margin 2.1464064906615907 +209 32 optimizer.lr 0.0020533955792069254 +209 32 negative_sampler.num_negs_per_pos 7.0 +209 32 training.batch_size 2.0 +209 33 model.embedding_dim 1.0 +209 33 loss.margin 9.265679825947233 +209 33 optimizer.lr 0.09764633836574535 +209 33 negative_sampler.num_negs_per_pos 18.0 +209 33 training.batch_size 0.0 +209 34 model.embedding_dim 1.0 +209 34 loss.margin 9.990365674122458 +209 34 optimizer.lr 0.09287559629028867 +209 34 negative_sampler.num_negs_per_pos 0.0 +209 34 training.batch_size 1.0 +209 35 model.embedding_dim 2.0 +209 35 loss.margin 1.814188729693386 +209 35 optimizer.lr 0.004910626703640339 +209 35 negative_sampler.num_negs_per_pos 47.0 +209 35 training.batch_size 0.0 +209 36 model.embedding_dim 2.0 +209 36 loss.margin 6.908431608293659 +209 36 optimizer.lr 0.0018934216922214758 +209 36 negative_sampler.num_negs_per_pos 64.0 +209 36 training.batch_size 0.0 +209 37 model.embedding_dim 1.0 +209 37 loss.margin 0.5994142667112767 +209 37 optimizer.lr 0.004141878253918505 +209 37 negative_sampler.num_negs_per_pos 52.0 +209 37 training.batch_size 2.0 +209 38 model.embedding_dim 1.0 +209 38 loss.margin 5.1979311631643625 +209 38 optimizer.lr 0.005868612381209194 +209 38 negative_sampler.num_negs_per_pos 58.0 +209 38 training.batch_size 2.0 +209 39 model.embedding_dim 1.0 +209 39 loss.margin 8.45124666431256 +209 39 optimizer.lr 0.0030246306367032296 +209 39 negative_sampler.num_negs_per_pos 75.0 +209 39 training.batch_size 0.0 +209 40 model.embedding_dim 2.0 +209 40 loss.margin 9.676235467977945 +209 40 optimizer.lr 0.06788711095414815 +209 40 negative_sampler.num_negs_per_pos 12.0 +209 40 training.batch_size 1.0 +209 41 model.embedding_dim 0.0 +209 41 loss.margin 9.615430720812464 +209 41 optimizer.lr 0.0013022246983848437 +209 41 negative_sampler.num_negs_per_pos 65.0 +209 41 training.batch_size 0.0 +209 42 model.embedding_dim 1.0 +209 42 loss.margin 5.308610440755992 +209 42 optimizer.lr 0.003357870308833218 +209 42 negative_sampler.num_negs_per_pos 88.0 +209 42 training.batch_size 2.0 +209 43 model.embedding_dim 0.0 +209 43 loss.margin 3.895164369889387 +209 43 optimizer.lr 0.023201901013627558 +209 43 negative_sampler.num_negs_per_pos 20.0 +209 43 training.batch_size 1.0 +209 44 model.embedding_dim 0.0 +209 44 loss.margin 4.9551532364643815 +209 44 optimizer.lr 0.06866645469536421 +209 44 negative_sampler.num_negs_per_pos 33.0 +209 44 training.batch_size 1.0 +209 45 model.embedding_dim 0.0 +209 45 loss.margin 4.8695056397354834 +209 45 optimizer.lr 0.012088239178558485 +209 45 negative_sampler.num_negs_per_pos 30.0 +209 45 training.batch_size 1.0 +209 46 model.embedding_dim 1.0 +209 46 loss.margin 5.920965449140436 +209 46 optimizer.lr 0.06311879857320819 +209 46 negative_sampler.num_negs_per_pos 16.0 +209 46 training.batch_size 1.0 +209 47 model.embedding_dim 1.0 +209 47 loss.margin 9.084400731678459 +209 47 optimizer.lr 0.0759048515276636 +209 47 negative_sampler.num_negs_per_pos 54.0 +209 47 training.batch_size 0.0 +209 48 model.embedding_dim 1.0 +209 48 loss.margin 8.912083880834318 +209 48 optimizer.lr 0.0023404091156808775 +209 48 negative_sampler.num_negs_per_pos 88.0 +209 48 training.batch_size 0.0 +209 49 model.embedding_dim 2.0 +209 49 loss.margin 3.8094753236601573 +209 49 optimizer.lr 0.0053378534101233424 +209 49 negative_sampler.num_negs_per_pos 79.0 +209 49 training.batch_size 2.0 +209 50 model.embedding_dim 2.0 +209 50 loss.margin 6.975141567028368 +209 50 optimizer.lr 0.0013941607308249392 +209 50 negative_sampler.num_negs_per_pos 40.0 +209 50 training.batch_size 2.0 +209 51 model.embedding_dim 1.0 +209 51 loss.margin 7.235151842461668 +209 51 optimizer.lr 0.01808054588926905 +209 51 negative_sampler.num_negs_per_pos 29.0 +209 51 training.batch_size 0.0 +209 52 model.embedding_dim 1.0 +209 52 loss.margin 6.749931471230209 +209 52 optimizer.lr 0.05719371407078953 +209 52 negative_sampler.num_negs_per_pos 64.0 +209 52 training.batch_size 0.0 +209 53 model.embedding_dim 2.0 +209 53 loss.margin 5.799067442025539 +209 53 optimizer.lr 0.00466364805209014 +209 53 negative_sampler.num_negs_per_pos 84.0 +209 53 training.batch_size 2.0 +209 54 model.embedding_dim 1.0 +209 54 loss.margin 4.903785238234184 +209 54 optimizer.lr 0.08808279373973332 +209 54 negative_sampler.num_negs_per_pos 84.0 +209 54 training.batch_size 1.0 +209 55 model.embedding_dim 1.0 +209 55 loss.margin 5.934211590130689 +209 55 optimizer.lr 0.011100618698508818 +209 55 negative_sampler.num_negs_per_pos 92.0 +209 55 training.batch_size 2.0 +209 56 model.embedding_dim 2.0 +209 56 loss.margin 8.617252272282071 +209 56 optimizer.lr 0.005353490904177028 +209 56 negative_sampler.num_negs_per_pos 93.0 +209 56 training.batch_size 0.0 +209 57 model.embedding_dim 1.0 +209 57 loss.margin 5.642899344750286 +209 57 optimizer.lr 0.01742508747323432 +209 57 negative_sampler.num_negs_per_pos 78.0 +209 57 training.batch_size 1.0 +209 58 model.embedding_dim 2.0 +209 58 loss.margin 5.499619857214358 +209 58 optimizer.lr 0.01862679907482343 +209 58 negative_sampler.num_negs_per_pos 25.0 +209 58 training.batch_size 1.0 +209 59 model.embedding_dim 0.0 +209 59 loss.margin 1.2434752491066137 +209 59 optimizer.lr 0.037947033024945565 +209 59 negative_sampler.num_negs_per_pos 37.0 +209 59 training.batch_size 2.0 +209 60 model.embedding_dim 2.0 +209 60 loss.margin 6.808369662695528 +209 60 optimizer.lr 0.00791590475932728 +209 60 negative_sampler.num_negs_per_pos 77.0 +209 60 training.batch_size 2.0 +209 61 model.embedding_dim 0.0 +209 61 loss.margin 3.168855618272456 +209 61 optimizer.lr 0.008179773044872876 +209 61 negative_sampler.num_negs_per_pos 75.0 +209 61 training.batch_size 1.0 +209 62 model.embedding_dim 1.0 +209 62 loss.margin 5.682303223124883 +209 62 optimizer.lr 0.003828743181526494 +209 62 negative_sampler.num_negs_per_pos 98.0 +209 62 training.batch_size 2.0 +209 63 model.embedding_dim 2.0 +209 63 loss.margin 3.9704230661126885 +209 63 optimizer.lr 0.015371970051343716 +209 63 negative_sampler.num_negs_per_pos 24.0 +209 63 training.batch_size 0.0 +209 64 model.embedding_dim 2.0 +209 64 loss.margin 5.820698978084446 +209 64 optimizer.lr 0.0035230452100542646 +209 64 negative_sampler.num_negs_per_pos 76.0 +209 64 training.batch_size 2.0 +209 65 model.embedding_dim 1.0 +209 65 loss.margin 5.108336877608956 +209 65 optimizer.lr 0.017982738173219517 +209 65 negative_sampler.num_negs_per_pos 1.0 +209 65 training.batch_size 1.0 +209 66 model.embedding_dim 0.0 +209 66 loss.margin 8.20852506994471 +209 66 optimizer.lr 0.06199580548041463 +209 66 negative_sampler.num_negs_per_pos 35.0 +209 66 training.batch_size 1.0 +209 67 model.embedding_dim 0.0 +209 67 loss.margin 4.557233312044796 +209 67 optimizer.lr 0.003807623586214823 +209 67 negative_sampler.num_negs_per_pos 25.0 +209 67 training.batch_size 0.0 +209 68 model.embedding_dim 2.0 +209 68 loss.margin 4.010461477946006 +209 68 optimizer.lr 0.003979243831890988 +209 68 negative_sampler.num_negs_per_pos 78.0 +209 68 training.batch_size 2.0 +209 69 model.embedding_dim 2.0 +209 69 loss.margin 7.010690243976358 +209 69 optimizer.lr 0.0028335169244188925 +209 69 negative_sampler.num_negs_per_pos 72.0 +209 69 training.batch_size 1.0 +209 70 model.embedding_dim 1.0 +209 70 loss.margin 7.39519213081683 +209 70 optimizer.lr 0.01640105095078199 +209 70 negative_sampler.num_negs_per_pos 36.0 +209 70 training.batch_size 1.0 +209 71 model.embedding_dim 0.0 +209 71 loss.margin 9.743062165858197 +209 71 optimizer.lr 0.017012922246130775 +209 71 negative_sampler.num_negs_per_pos 49.0 +209 71 training.batch_size 1.0 +209 72 model.embedding_dim 0.0 +209 72 loss.margin 7.102855630586513 +209 72 optimizer.lr 0.014211465622038087 +209 72 negative_sampler.num_negs_per_pos 17.0 +209 72 training.batch_size 1.0 +209 73 model.embedding_dim 2.0 +209 73 loss.margin 5.887476236179057 +209 73 optimizer.lr 0.0018767934439567876 +209 73 negative_sampler.num_negs_per_pos 4.0 +209 73 training.batch_size 1.0 +209 74 model.embedding_dim 0.0 +209 74 loss.margin 6.44849945711917 +209 74 optimizer.lr 0.04316829752849615 +209 74 negative_sampler.num_negs_per_pos 58.0 +209 74 training.batch_size 1.0 +209 75 model.embedding_dim 0.0 +209 75 loss.margin 3.59224701411453 +209 75 optimizer.lr 0.018489851319664283 +209 75 negative_sampler.num_negs_per_pos 79.0 +209 75 training.batch_size 1.0 +209 76 model.embedding_dim 0.0 +209 76 loss.margin 7.907451102922346 +209 76 optimizer.lr 0.002524724436210405 +209 76 negative_sampler.num_negs_per_pos 54.0 +209 76 training.batch_size 1.0 +209 77 model.embedding_dim 2.0 +209 77 loss.margin 9.618329912470703 +209 77 optimizer.lr 0.05932414253141143 +209 77 negative_sampler.num_negs_per_pos 73.0 +209 77 training.batch_size 1.0 +209 78 model.embedding_dim 1.0 +209 78 loss.margin 5.944522022908089 +209 78 optimizer.lr 0.0030759319326847554 +209 78 negative_sampler.num_negs_per_pos 59.0 +209 78 training.batch_size 0.0 +209 79 model.embedding_dim 2.0 +209 79 loss.margin 3.9117528855745562 +209 79 optimizer.lr 0.040463160820555885 +209 79 negative_sampler.num_negs_per_pos 68.0 +209 79 training.batch_size 2.0 +209 80 model.embedding_dim 2.0 +209 80 loss.margin 2.7052296767460065 +209 80 optimizer.lr 0.0030447484161926857 +209 80 negative_sampler.num_negs_per_pos 46.0 +209 80 training.batch_size 0.0 +209 81 model.embedding_dim 1.0 +209 81 loss.margin 2.0892166250790467 +209 81 optimizer.lr 0.09015591873147064 +209 81 negative_sampler.num_negs_per_pos 15.0 +209 81 training.batch_size 0.0 +209 82 model.embedding_dim 0.0 +209 82 loss.margin 9.579122897201856 +209 82 optimizer.lr 0.0345681075348796 +209 82 negative_sampler.num_negs_per_pos 1.0 +209 82 training.batch_size 1.0 +209 83 model.embedding_dim 1.0 +209 83 loss.margin 5.191740305477581 +209 83 optimizer.lr 0.00140394666053742 +209 83 negative_sampler.num_negs_per_pos 69.0 +209 83 training.batch_size 1.0 +209 84 model.embedding_dim 0.0 +209 84 loss.margin 9.583882917684415 +209 84 optimizer.lr 0.0026905916314343566 +209 84 negative_sampler.num_negs_per_pos 20.0 +209 84 training.batch_size 2.0 +209 85 model.embedding_dim 1.0 +209 85 loss.margin 4.650032004695391 +209 85 optimizer.lr 0.033200760090604645 +209 85 negative_sampler.num_negs_per_pos 42.0 +209 85 training.batch_size 2.0 +209 86 model.embedding_dim 1.0 +209 86 loss.margin 7.208382821596162 +209 86 optimizer.lr 0.001266719196754954 +209 86 negative_sampler.num_negs_per_pos 80.0 +209 86 training.batch_size 1.0 +209 87 model.embedding_dim 1.0 +209 87 loss.margin 6.3110794366713545 +209 87 optimizer.lr 0.02867151060793093 +209 87 negative_sampler.num_negs_per_pos 9.0 +209 87 training.batch_size 1.0 +209 88 model.embedding_dim 1.0 +209 88 loss.margin 7.245789311784114 +209 88 optimizer.lr 0.029875932372591302 +209 88 negative_sampler.num_negs_per_pos 50.0 +209 88 training.batch_size 2.0 +209 89 model.embedding_dim 0.0 +209 89 loss.margin 2.1729354625475397 +209 89 optimizer.lr 0.032686760600107716 +209 89 negative_sampler.num_negs_per_pos 63.0 +209 89 training.batch_size 0.0 +209 90 model.embedding_dim 1.0 +209 90 loss.margin 3.6088550050428014 +209 90 optimizer.lr 0.015641168371991594 +209 90 negative_sampler.num_negs_per_pos 7.0 +209 90 training.batch_size 1.0 +209 91 model.embedding_dim 0.0 +209 91 loss.margin 5.336751420799692 +209 91 optimizer.lr 0.02040893948953912 +209 91 negative_sampler.num_negs_per_pos 20.0 +209 91 training.batch_size 0.0 +209 92 model.embedding_dim 1.0 +209 92 loss.margin 6.857035788099924 +209 92 optimizer.lr 0.023507004555915523 +209 92 negative_sampler.num_negs_per_pos 82.0 +209 92 training.batch_size 2.0 +209 93 model.embedding_dim 1.0 +209 93 loss.margin 5.918836730498864 +209 93 optimizer.lr 0.002242777856310815 +209 93 negative_sampler.num_negs_per_pos 37.0 +209 93 training.batch_size 2.0 +209 94 model.embedding_dim 1.0 +209 94 loss.margin 9.985300845726325 +209 94 optimizer.lr 0.07607092918441316 +209 94 negative_sampler.num_negs_per_pos 65.0 +209 94 training.batch_size 2.0 +209 95 model.embedding_dim 1.0 +209 95 loss.margin 5.10404843754811 +209 95 optimizer.lr 0.0013937030417792392 +209 95 negative_sampler.num_negs_per_pos 55.0 +209 95 training.batch_size 2.0 +209 96 model.embedding_dim 0.0 +209 96 loss.margin 9.596164802668017 +209 96 optimizer.lr 0.05172456798733188 +209 96 negative_sampler.num_negs_per_pos 54.0 +209 96 training.batch_size 1.0 +209 97 model.embedding_dim 1.0 +209 97 loss.margin 0.9603015717318651 +209 97 optimizer.lr 0.0012489813631697453 +209 97 negative_sampler.num_negs_per_pos 74.0 +209 97 training.batch_size 1.0 +209 98 model.embedding_dim 2.0 +209 98 loss.margin 2.9991705112735083 +209 98 optimizer.lr 0.03595908510077139 +209 98 negative_sampler.num_negs_per_pos 1.0 +209 98 training.batch_size 1.0 +209 99 model.embedding_dim 0.0 +209 99 loss.margin 1.7700634263822654 +209 99 optimizer.lr 0.001178735472175985 +209 99 negative_sampler.num_negs_per_pos 33.0 +209 99 training.batch_size 1.0 +209 100 model.embedding_dim 0.0 +209 100 loss.margin 8.46004540586289 +209 100 optimizer.lr 0.009024640764426438 +209 100 negative_sampler.num_negs_per_pos 48.0 +209 100 training.batch_size 2.0 +209 1 dataset """wn18rr""" +209 1 model """distmult""" +209 1 loss """marginranking""" +209 1 regularizer """no""" +209 1 optimizer """adam""" +209 1 training_loop """owa""" +209 1 negative_sampler """basic""" +209 1 evaluator """rankbased""" +209 2 dataset """wn18rr""" +209 2 model """distmult""" +209 2 loss """marginranking""" +209 2 regularizer """no""" +209 2 optimizer """adam""" +209 2 training_loop """owa""" +209 2 negative_sampler """basic""" +209 2 evaluator """rankbased""" +209 3 dataset """wn18rr""" +209 3 model """distmult""" +209 3 loss """marginranking""" +209 3 regularizer """no""" +209 3 optimizer """adam""" +209 3 training_loop """owa""" +209 3 negative_sampler """basic""" +209 3 evaluator """rankbased""" +209 4 dataset """wn18rr""" +209 4 model """distmult""" +209 4 loss """marginranking""" +209 4 regularizer """no""" +209 4 optimizer """adam""" +209 4 training_loop """owa""" +209 4 negative_sampler """basic""" +209 4 evaluator """rankbased""" +209 5 dataset """wn18rr""" +209 5 model """distmult""" +209 5 loss """marginranking""" +209 5 regularizer """no""" +209 5 optimizer """adam""" +209 5 training_loop """owa""" +209 5 negative_sampler """basic""" +209 5 evaluator """rankbased""" +209 6 dataset """wn18rr""" +209 6 model """distmult""" +209 6 loss """marginranking""" +209 6 regularizer """no""" +209 6 optimizer """adam""" +209 6 training_loop """owa""" +209 6 negative_sampler """basic""" +209 6 evaluator """rankbased""" +209 7 dataset """wn18rr""" +209 7 model """distmult""" +209 7 loss """marginranking""" +209 7 regularizer """no""" +209 7 optimizer """adam""" +209 7 training_loop """owa""" +209 7 negative_sampler """basic""" +209 7 evaluator """rankbased""" +209 8 dataset """wn18rr""" +209 8 model """distmult""" +209 8 loss """marginranking""" +209 8 regularizer """no""" +209 8 optimizer """adam""" +209 8 training_loop """owa""" +209 8 negative_sampler """basic""" +209 8 evaluator """rankbased""" +209 9 dataset """wn18rr""" +209 9 model """distmult""" +209 9 loss """marginranking""" +209 9 regularizer """no""" +209 9 optimizer """adam""" +209 9 training_loop """owa""" +209 9 negative_sampler """basic""" +209 9 evaluator """rankbased""" +209 10 dataset """wn18rr""" +209 10 model """distmult""" +209 10 loss """marginranking""" +209 10 regularizer """no""" +209 10 optimizer """adam""" +209 10 training_loop """owa""" +209 10 negative_sampler """basic""" +209 10 evaluator """rankbased""" +209 11 dataset """wn18rr""" +209 11 model """distmult""" +209 11 loss """marginranking""" +209 11 regularizer """no""" +209 11 optimizer """adam""" +209 11 training_loop """owa""" +209 11 negative_sampler """basic""" +209 11 evaluator """rankbased""" +209 12 dataset """wn18rr""" +209 12 model """distmult""" +209 12 loss """marginranking""" +209 12 regularizer """no""" +209 12 optimizer """adam""" +209 12 training_loop """owa""" +209 12 negative_sampler """basic""" +209 12 evaluator """rankbased""" +209 13 dataset """wn18rr""" +209 13 model """distmult""" +209 13 loss """marginranking""" +209 13 regularizer """no""" +209 13 optimizer """adam""" +209 13 training_loop """owa""" +209 13 negative_sampler """basic""" +209 13 evaluator """rankbased""" +209 14 dataset """wn18rr""" +209 14 model """distmult""" +209 14 loss """marginranking""" +209 14 regularizer """no""" +209 14 optimizer """adam""" +209 14 training_loop """owa""" +209 14 negative_sampler """basic""" +209 14 evaluator """rankbased""" +209 15 dataset """wn18rr""" +209 15 model """distmult""" +209 15 loss """marginranking""" +209 15 regularizer """no""" +209 15 optimizer """adam""" +209 15 training_loop """owa""" +209 15 negative_sampler """basic""" +209 15 evaluator """rankbased""" +209 16 dataset """wn18rr""" +209 16 model """distmult""" +209 16 loss """marginranking""" +209 16 regularizer """no""" +209 16 optimizer """adam""" +209 16 training_loop """owa""" +209 16 negative_sampler """basic""" +209 16 evaluator """rankbased""" +209 17 dataset """wn18rr""" +209 17 model """distmult""" +209 17 loss """marginranking""" +209 17 regularizer """no""" +209 17 optimizer """adam""" +209 17 training_loop """owa""" +209 17 negative_sampler """basic""" +209 17 evaluator """rankbased""" +209 18 dataset """wn18rr""" +209 18 model """distmult""" +209 18 loss """marginranking""" +209 18 regularizer """no""" +209 18 optimizer """adam""" +209 18 training_loop """owa""" +209 18 negative_sampler """basic""" +209 18 evaluator """rankbased""" +209 19 dataset """wn18rr""" +209 19 model """distmult""" +209 19 loss """marginranking""" +209 19 regularizer """no""" +209 19 optimizer """adam""" +209 19 training_loop """owa""" +209 19 negative_sampler """basic""" +209 19 evaluator """rankbased""" +209 20 dataset """wn18rr""" +209 20 model """distmult""" +209 20 loss """marginranking""" +209 20 regularizer """no""" +209 20 optimizer """adam""" +209 20 training_loop """owa""" +209 20 negative_sampler """basic""" +209 20 evaluator """rankbased""" +209 21 dataset """wn18rr""" +209 21 model """distmult""" +209 21 loss """marginranking""" +209 21 regularizer """no""" +209 21 optimizer """adam""" +209 21 training_loop """owa""" +209 21 negative_sampler """basic""" +209 21 evaluator """rankbased""" +209 22 dataset """wn18rr""" +209 22 model """distmult""" +209 22 loss """marginranking""" +209 22 regularizer """no""" +209 22 optimizer """adam""" +209 22 training_loop """owa""" +209 22 negative_sampler """basic""" +209 22 evaluator """rankbased""" +209 23 dataset """wn18rr""" +209 23 model """distmult""" +209 23 loss """marginranking""" +209 23 regularizer """no""" +209 23 optimizer """adam""" +209 23 training_loop """owa""" +209 23 negative_sampler """basic""" +209 23 evaluator """rankbased""" +209 24 dataset """wn18rr""" +209 24 model """distmult""" +209 24 loss """marginranking""" +209 24 regularizer """no""" +209 24 optimizer """adam""" +209 24 training_loop """owa""" +209 24 negative_sampler """basic""" +209 24 evaluator """rankbased""" +209 25 dataset """wn18rr""" +209 25 model """distmult""" +209 25 loss """marginranking""" +209 25 regularizer """no""" +209 25 optimizer """adam""" +209 25 training_loop """owa""" +209 25 negative_sampler """basic""" +209 25 evaluator """rankbased""" +209 26 dataset """wn18rr""" +209 26 model """distmult""" +209 26 loss """marginranking""" +209 26 regularizer """no""" +209 26 optimizer """adam""" +209 26 training_loop """owa""" +209 26 negative_sampler """basic""" +209 26 evaluator """rankbased""" +209 27 dataset """wn18rr""" +209 27 model """distmult""" +209 27 loss """marginranking""" +209 27 regularizer """no""" +209 27 optimizer """adam""" +209 27 training_loop """owa""" +209 27 negative_sampler """basic""" +209 27 evaluator """rankbased""" +209 28 dataset """wn18rr""" +209 28 model """distmult""" +209 28 loss """marginranking""" +209 28 regularizer """no""" +209 28 optimizer """adam""" +209 28 training_loop """owa""" +209 28 negative_sampler """basic""" +209 28 evaluator """rankbased""" +209 29 dataset """wn18rr""" +209 29 model """distmult""" +209 29 loss """marginranking""" +209 29 regularizer """no""" +209 29 optimizer """adam""" +209 29 training_loop """owa""" +209 29 negative_sampler """basic""" +209 29 evaluator """rankbased""" +209 30 dataset """wn18rr""" +209 30 model """distmult""" +209 30 loss """marginranking""" +209 30 regularizer """no""" +209 30 optimizer """adam""" +209 30 training_loop """owa""" +209 30 negative_sampler """basic""" +209 30 evaluator """rankbased""" +209 31 dataset """wn18rr""" +209 31 model """distmult""" +209 31 loss """marginranking""" +209 31 regularizer """no""" +209 31 optimizer """adam""" +209 31 training_loop """owa""" +209 31 negative_sampler """basic""" +209 31 evaluator """rankbased""" +209 32 dataset """wn18rr""" +209 32 model """distmult""" +209 32 loss """marginranking""" +209 32 regularizer """no""" +209 32 optimizer """adam""" +209 32 training_loop """owa""" +209 32 negative_sampler """basic""" +209 32 evaluator """rankbased""" +209 33 dataset """wn18rr""" +209 33 model """distmult""" +209 33 loss """marginranking""" +209 33 regularizer """no""" +209 33 optimizer """adam""" +209 33 training_loop """owa""" +209 33 negative_sampler """basic""" +209 33 evaluator """rankbased""" +209 34 dataset """wn18rr""" +209 34 model """distmult""" +209 34 loss """marginranking""" +209 34 regularizer """no""" +209 34 optimizer """adam""" +209 34 training_loop """owa""" +209 34 negative_sampler """basic""" +209 34 evaluator """rankbased""" +209 35 dataset """wn18rr""" +209 35 model """distmult""" +209 35 loss """marginranking""" +209 35 regularizer """no""" +209 35 optimizer """adam""" +209 35 training_loop """owa""" +209 35 negative_sampler """basic""" +209 35 evaluator """rankbased""" +209 36 dataset """wn18rr""" +209 36 model """distmult""" +209 36 loss """marginranking""" +209 36 regularizer """no""" +209 36 optimizer """adam""" +209 36 training_loop """owa""" +209 36 negative_sampler """basic""" +209 36 evaluator """rankbased""" +209 37 dataset """wn18rr""" +209 37 model """distmult""" +209 37 loss """marginranking""" +209 37 regularizer """no""" +209 37 optimizer """adam""" +209 37 training_loop """owa""" +209 37 negative_sampler """basic""" +209 37 evaluator """rankbased""" +209 38 dataset """wn18rr""" +209 38 model """distmult""" +209 38 loss """marginranking""" +209 38 regularizer """no""" +209 38 optimizer """adam""" +209 38 training_loop """owa""" +209 38 negative_sampler """basic""" +209 38 evaluator """rankbased""" +209 39 dataset """wn18rr""" +209 39 model """distmult""" +209 39 loss """marginranking""" +209 39 regularizer """no""" +209 39 optimizer """adam""" +209 39 training_loop """owa""" +209 39 negative_sampler """basic""" +209 39 evaluator """rankbased""" +209 40 dataset """wn18rr""" +209 40 model """distmult""" +209 40 loss """marginranking""" +209 40 regularizer """no""" +209 40 optimizer """adam""" +209 40 training_loop """owa""" +209 40 negative_sampler """basic""" +209 40 evaluator """rankbased""" +209 41 dataset """wn18rr""" +209 41 model """distmult""" +209 41 loss """marginranking""" +209 41 regularizer """no""" +209 41 optimizer """adam""" +209 41 training_loop """owa""" +209 41 negative_sampler """basic""" +209 41 evaluator """rankbased""" +209 42 dataset """wn18rr""" +209 42 model """distmult""" +209 42 loss """marginranking""" +209 42 regularizer """no""" +209 42 optimizer """adam""" +209 42 training_loop """owa""" +209 42 negative_sampler """basic""" +209 42 evaluator """rankbased""" +209 43 dataset """wn18rr""" +209 43 model """distmult""" +209 43 loss """marginranking""" +209 43 regularizer """no""" +209 43 optimizer """adam""" +209 43 training_loop """owa""" +209 43 negative_sampler """basic""" +209 43 evaluator """rankbased""" +209 44 dataset """wn18rr""" +209 44 model """distmult""" +209 44 loss """marginranking""" +209 44 regularizer """no""" +209 44 optimizer """adam""" +209 44 training_loop """owa""" +209 44 negative_sampler """basic""" +209 44 evaluator """rankbased""" +209 45 dataset """wn18rr""" +209 45 model """distmult""" +209 45 loss """marginranking""" +209 45 regularizer """no""" +209 45 optimizer """adam""" +209 45 training_loop """owa""" +209 45 negative_sampler """basic""" +209 45 evaluator """rankbased""" +209 46 dataset """wn18rr""" +209 46 model """distmult""" +209 46 loss """marginranking""" +209 46 regularizer """no""" +209 46 optimizer """adam""" +209 46 training_loop """owa""" +209 46 negative_sampler """basic""" +209 46 evaluator """rankbased""" +209 47 dataset """wn18rr""" +209 47 model """distmult""" +209 47 loss """marginranking""" +209 47 regularizer """no""" +209 47 optimizer """adam""" +209 47 training_loop """owa""" +209 47 negative_sampler """basic""" +209 47 evaluator """rankbased""" +209 48 dataset """wn18rr""" +209 48 model """distmult""" +209 48 loss """marginranking""" +209 48 regularizer """no""" +209 48 optimizer """adam""" +209 48 training_loop """owa""" +209 48 negative_sampler """basic""" +209 48 evaluator """rankbased""" +209 49 dataset """wn18rr""" +209 49 model """distmult""" +209 49 loss """marginranking""" +209 49 regularizer """no""" +209 49 optimizer """adam""" +209 49 training_loop """owa""" +209 49 negative_sampler """basic""" +209 49 evaluator """rankbased""" +209 50 dataset """wn18rr""" +209 50 model """distmult""" +209 50 loss """marginranking""" +209 50 regularizer """no""" +209 50 optimizer """adam""" +209 50 training_loop """owa""" +209 50 negative_sampler """basic""" +209 50 evaluator """rankbased""" +209 51 dataset """wn18rr""" +209 51 model """distmult""" +209 51 loss """marginranking""" +209 51 regularizer """no""" +209 51 optimizer """adam""" +209 51 training_loop """owa""" +209 51 negative_sampler """basic""" +209 51 evaluator """rankbased""" +209 52 dataset """wn18rr""" +209 52 model """distmult""" +209 52 loss """marginranking""" +209 52 regularizer """no""" +209 52 optimizer """adam""" +209 52 training_loop """owa""" +209 52 negative_sampler """basic""" +209 52 evaluator """rankbased""" +209 53 dataset """wn18rr""" +209 53 model """distmult""" +209 53 loss """marginranking""" +209 53 regularizer """no""" +209 53 optimizer """adam""" +209 53 training_loop """owa""" +209 53 negative_sampler """basic""" +209 53 evaluator """rankbased""" +209 54 dataset """wn18rr""" +209 54 model """distmult""" +209 54 loss """marginranking""" +209 54 regularizer """no""" +209 54 optimizer """adam""" +209 54 training_loop """owa""" +209 54 negative_sampler """basic""" +209 54 evaluator """rankbased""" +209 55 dataset """wn18rr""" +209 55 model """distmult""" +209 55 loss """marginranking""" +209 55 regularizer """no""" +209 55 optimizer """adam""" +209 55 training_loop """owa""" +209 55 negative_sampler """basic""" +209 55 evaluator """rankbased""" +209 56 dataset """wn18rr""" +209 56 model """distmult""" +209 56 loss """marginranking""" +209 56 regularizer """no""" +209 56 optimizer """adam""" +209 56 training_loop """owa""" +209 56 negative_sampler """basic""" +209 56 evaluator """rankbased""" +209 57 dataset """wn18rr""" +209 57 model """distmult""" +209 57 loss """marginranking""" +209 57 regularizer """no""" +209 57 optimizer """adam""" +209 57 training_loop """owa""" +209 57 negative_sampler """basic""" +209 57 evaluator """rankbased""" +209 58 dataset """wn18rr""" +209 58 model """distmult""" +209 58 loss """marginranking""" +209 58 regularizer """no""" +209 58 optimizer """adam""" +209 58 training_loop """owa""" +209 58 negative_sampler """basic""" +209 58 evaluator """rankbased""" +209 59 dataset """wn18rr""" +209 59 model """distmult""" +209 59 loss """marginranking""" +209 59 regularizer """no""" +209 59 optimizer """adam""" +209 59 training_loop """owa""" +209 59 negative_sampler """basic""" +209 59 evaluator """rankbased""" +209 60 dataset """wn18rr""" +209 60 model """distmult""" +209 60 loss """marginranking""" +209 60 regularizer """no""" +209 60 optimizer """adam""" +209 60 training_loop """owa""" +209 60 negative_sampler """basic""" +209 60 evaluator """rankbased""" +209 61 dataset """wn18rr""" +209 61 model """distmult""" +209 61 loss """marginranking""" +209 61 regularizer """no""" +209 61 optimizer """adam""" +209 61 training_loop """owa""" +209 61 negative_sampler """basic""" +209 61 evaluator """rankbased""" +209 62 dataset """wn18rr""" +209 62 model """distmult""" +209 62 loss """marginranking""" +209 62 regularizer """no""" +209 62 optimizer """adam""" +209 62 training_loop """owa""" +209 62 negative_sampler """basic""" +209 62 evaluator """rankbased""" +209 63 dataset """wn18rr""" +209 63 model """distmult""" +209 63 loss """marginranking""" +209 63 regularizer """no""" +209 63 optimizer """adam""" +209 63 training_loop """owa""" +209 63 negative_sampler """basic""" +209 63 evaluator """rankbased""" +209 64 dataset """wn18rr""" +209 64 model """distmult""" +209 64 loss """marginranking""" +209 64 regularizer """no""" +209 64 optimizer """adam""" +209 64 training_loop """owa""" +209 64 negative_sampler """basic""" +209 64 evaluator """rankbased""" +209 65 dataset """wn18rr""" +209 65 model """distmult""" +209 65 loss """marginranking""" +209 65 regularizer """no""" +209 65 optimizer """adam""" +209 65 training_loop """owa""" +209 65 negative_sampler """basic""" +209 65 evaluator """rankbased""" +209 66 dataset """wn18rr""" +209 66 model """distmult""" +209 66 loss """marginranking""" +209 66 regularizer """no""" +209 66 optimizer """adam""" +209 66 training_loop """owa""" +209 66 negative_sampler """basic""" +209 66 evaluator """rankbased""" +209 67 dataset """wn18rr""" +209 67 model """distmult""" +209 67 loss """marginranking""" +209 67 regularizer """no""" +209 67 optimizer """adam""" +209 67 training_loop """owa""" +209 67 negative_sampler """basic""" +209 67 evaluator """rankbased""" +209 68 dataset """wn18rr""" +209 68 model """distmult""" +209 68 loss """marginranking""" +209 68 regularizer """no""" +209 68 optimizer """adam""" +209 68 training_loop """owa""" +209 68 negative_sampler """basic""" +209 68 evaluator """rankbased""" +209 69 dataset """wn18rr""" +209 69 model """distmult""" +209 69 loss """marginranking""" +209 69 regularizer """no""" +209 69 optimizer """adam""" +209 69 training_loop """owa""" +209 69 negative_sampler """basic""" +209 69 evaluator """rankbased""" +209 70 dataset """wn18rr""" +209 70 model """distmult""" +209 70 loss """marginranking""" +209 70 regularizer """no""" +209 70 optimizer """adam""" +209 70 training_loop """owa""" +209 70 negative_sampler """basic""" +209 70 evaluator """rankbased""" +209 71 dataset """wn18rr""" +209 71 model """distmult""" +209 71 loss """marginranking""" +209 71 regularizer """no""" +209 71 optimizer """adam""" +209 71 training_loop """owa""" +209 71 negative_sampler """basic""" +209 71 evaluator """rankbased""" +209 72 dataset """wn18rr""" +209 72 model """distmult""" +209 72 loss """marginranking""" +209 72 regularizer """no""" +209 72 optimizer """adam""" +209 72 training_loop """owa""" +209 72 negative_sampler """basic""" +209 72 evaluator """rankbased""" +209 73 dataset """wn18rr""" +209 73 model """distmult""" +209 73 loss """marginranking""" +209 73 regularizer """no""" +209 73 optimizer """adam""" +209 73 training_loop """owa""" +209 73 negative_sampler """basic""" +209 73 evaluator """rankbased""" +209 74 dataset """wn18rr""" +209 74 model """distmult""" +209 74 loss """marginranking""" +209 74 regularizer """no""" +209 74 optimizer """adam""" +209 74 training_loop """owa""" +209 74 negative_sampler """basic""" +209 74 evaluator """rankbased""" +209 75 dataset """wn18rr""" +209 75 model """distmult""" +209 75 loss """marginranking""" +209 75 regularizer """no""" +209 75 optimizer """adam""" +209 75 training_loop """owa""" +209 75 negative_sampler """basic""" +209 75 evaluator """rankbased""" +209 76 dataset """wn18rr""" +209 76 model """distmult""" +209 76 loss """marginranking""" +209 76 regularizer """no""" +209 76 optimizer """adam""" +209 76 training_loop """owa""" +209 76 negative_sampler """basic""" +209 76 evaluator """rankbased""" +209 77 dataset """wn18rr""" +209 77 model """distmult""" +209 77 loss """marginranking""" +209 77 regularizer """no""" +209 77 optimizer """adam""" +209 77 training_loop """owa""" +209 77 negative_sampler """basic""" +209 77 evaluator """rankbased""" +209 78 dataset """wn18rr""" +209 78 model """distmult""" +209 78 loss """marginranking""" +209 78 regularizer """no""" +209 78 optimizer """adam""" +209 78 training_loop """owa""" +209 78 negative_sampler """basic""" +209 78 evaluator """rankbased""" +209 79 dataset """wn18rr""" +209 79 model """distmult""" +209 79 loss """marginranking""" +209 79 regularizer """no""" +209 79 optimizer """adam""" +209 79 training_loop """owa""" +209 79 negative_sampler """basic""" +209 79 evaluator """rankbased""" +209 80 dataset """wn18rr""" +209 80 model """distmult""" +209 80 loss """marginranking""" +209 80 regularizer """no""" +209 80 optimizer """adam""" +209 80 training_loop """owa""" +209 80 negative_sampler """basic""" +209 80 evaluator """rankbased""" +209 81 dataset """wn18rr""" +209 81 model """distmult""" +209 81 loss """marginranking""" +209 81 regularizer """no""" +209 81 optimizer """adam""" +209 81 training_loop """owa""" +209 81 negative_sampler """basic""" +209 81 evaluator """rankbased""" +209 82 dataset """wn18rr""" +209 82 model """distmult""" +209 82 loss """marginranking""" +209 82 regularizer """no""" +209 82 optimizer """adam""" +209 82 training_loop """owa""" +209 82 negative_sampler """basic""" +209 82 evaluator """rankbased""" +209 83 dataset """wn18rr""" +209 83 model """distmult""" +209 83 loss """marginranking""" +209 83 regularizer """no""" +209 83 optimizer """adam""" +209 83 training_loop """owa""" +209 83 negative_sampler """basic""" +209 83 evaluator """rankbased""" +209 84 dataset """wn18rr""" +209 84 model """distmult""" +209 84 loss """marginranking""" +209 84 regularizer """no""" +209 84 optimizer """adam""" +209 84 training_loop """owa""" +209 84 negative_sampler """basic""" +209 84 evaluator """rankbased""" +209 85 dataset """wn18rr""" +209 85 model """distmult""" +209 85 loss """marginranking""" +209 85 regularizer """no""" +209 85 optimizer """adam""" +209 85 training_loop """owa""" +209 85 negative_sampler """basic""" +209 85 evaluator """rankbased""" +209 86 dataset """wn18rr""" +209 86 model """distmult""" +209 86 loss """marginranking""" +209 86 regularizer """no""" +209 86 optimizer """adam""" +209 86 training_loop """owa""" +209 86 negative_sampler """basic""" +209 86 evaluator """rankbased""" +209 87 dataset """wn18rr""" +209 87 model """distmult""" +209 87 loss """marginranking""" +209 87 regularizer """no""" +209 87 optimizer """adam""" +209 87 training_loop """owa""" +209 87 negative_sampler """basic""" +209 87 evaluator """rankbased""" +209 88 dataset """wn18rr""" +209 88 model """distmult""" +209 88 loss """marginranking""" +209 88 regularizer """no""" +209 88 optimizer """adam""" +209 88 training_loop """owa""" +209 88 negative_sampler """basic""" +209 88 evaluator """rankbased""" +209 89 dataset """wn18rr""" +209 89 model """distmult""" +209 89 loss """marginranking""" +209 89 regularizer """no""" +209 89 optimizer """adam""" +209 89 training_loop """owa""" +209 89 negative_sampler """basic""" +209 89 evaluator """rankbased""" +209 90 dataset """wn18rr""" +209 90 model """distmult""" +209 90 loss """marginranking""" +209 90 regularizer """no""" +209 90 optimizer """adam""" +209 90 training_loop """owa""" +209 90 negative_sampler """basic""" +209 90 evaluator """rankbased""" +209 91 dataset """wn18rr""" +209 91 model """distmult""" +209 91 loss """marginranking""" +209 91 regularizer """no""" +209 91 optimizer """adam""" +209 91 training_loop """owa""" +209 91 negative_sampler """basic""" +209 91 evaluator """rankbased""" +209 92 dataset """wn18rr""" +209 92 model """distmult""" +209 92 loss """marginranking""" +209 92 regularizer """no""" +209 92 optimizer """adam""" +209 92 training_loop """owa""" +209 92 negative_sampler """basic""" +209 92 evaluator """rankbased""" +209 93 dataset """wn18rr""" +209 93 model """distmult""" +209 93 loss """marginranking""" +209 93 regularizer """no""" +209 93 optimizer """adam""" +209 93 training_loop """owa""" +209 93 negative_sampler """basic""" +209 93 evaluator """rankbased""" +209 94 dataset """wn18rr""" +209 94 model """distmult""" +209 94 loss """marginranking""" +209 94 regularizer """no""" +209 94 optimizer """adam""" +209 94 training_loop """owa""" +209 94 negative_sampler """basic""" +209 94 evaluator """rankbased""" +209 95 dataset """wn18rr""" +209 95 model """distmult""" +209 95 loss """marginranking""" +209 95 regularizer """no""" +209 95 optimizer """adam""" +209 95 training_loop """owa""" +209 95 negative_sampler """basic""" +209 95 evaluator """rankbased""" +209 96 dataset """wn18rr""" +209 96 model """distmult""" +209 96 loss """marginranking""" +209 96 regularizer """no""" +209 96 optimizer """adam""" +209 96 training_loop """owa""" +209 96 negative_sampler """basic""" +209 96 evaluator """rankbased""" +209 97 dataset """wn18rr""" +209 97 model """distmult""" +209 97 loss """marginranking""" +209 97 regularizer """no""" +209 97 optimizer """adam""" +209 97 training_loop """owa""" +209 97 negative_sampler """basic""" +209 97 evaluator """rankbased""" +209 98 dataset """wn18rr""" +209 98 model """distmult""" +209 98 loss """marginranking""" +209 98 regularizer """no""" +209 98 optimizer """adam""" +209 98 training_loop """owa""" +209 98 negative_sampler """basic""" +209 98 evaluator """rankbased""" +209 99 dataset """wn18rr""" +209 99 model """distmult""" +209 99 loss """marginranking""" +209 99 regularizer """no""" +209 99 optimizer """adam""" +209 99 training_loop """owa""" +209 99 negative_sampler """basic""" +209 99 evaluator """rankbased""" +209 100 dataset """wn18rr""" +209 100 model """distmult""" +209 100 loss """marginranking""" +209 100 regularizer """no""" +209 100 optimizer """adam""" +209 100 training_loop """owa""" +209 100 negative_sampler """basic""" +209 100 evaluator """rankbased""" +210 1 model.embedding_dim 1.0 +210 1 optimizer.lr 0.08244069919250546 +210 1 negative_sampler.num_negs_per_pos 14.0 +210 1 training.batch_size 1.0 +210 2 model.embedding_dim 1.0 +210 2 optimizer.lr 0.06402191806028419 +210 2 negative_sampler.num_negs_per_pos 16.0 +210 2 training.batch_size 0.0 +210 3 model.embedding_dim 1.0 +210 3 optimizer.lr 0.009642699757779066 +210 3 negative_sampler.num_negs_per_pos 7.0 +210 3 training.batch_size 2.0 +210 4 model.embedding_dim 0.0 +210 4 optimizer.lr 0.06962331963537936 +210 4 negative_sampler.num_negs_per_pos 23.0 +210 4 training.batch_size 2.0 +210 5 model.embedding_dim 1.0 +210 5 optimizer.lr 0.03207099946258672 +210 5 negative_sampler.num_negs_per_pos 2.0 +210 5 training.batch_size 2.0 +210 6 model.embedding_dim 2.0 +210 6 optimizer.lr 0.004114967271178812 +210 6 negative_sampler.num_negs_per_pos 14.0 +210 6 training.batch_size 1.0 +210 7 model.embedding_dim 0.0 +210 7 optimizer.lr 0.0015918882546466936 +210 7 negative_sampler.num_negs_per_pos 6.0 +210 7 training.batch_size 0.0 +210 8 model.embedding_dim 0.0 +210 8 optimizer.lr 0.0015534605001924667 +210 8 negative_sampler.num_negs_per_pos 19.0 +210 8 training.batch_size 3.0 +210 9 model.embedding_dim 2.0 +210 9 optimizer.lr 0.016195808840561914 +210 9 negative_sampler.num_negs_per_pos 39.0 +210 9 training.batch_size 3.0 +210 10 model.embedding_dim 1.0 +210 10 optimizer.lr 0.022478031499527953 +210 10 negative_sampler.num_negs_per_pos 21.0 +210 10 training.batch_size 2.0 +210 11 model.embedding_dim 0.0 +210 11 optimizer.lr 0.07407055739771737 +210 11 negative_sampler.num_negs_per_pos 39.0 +210 11 training.batch_size 1.0 +210 12 model.embedding_dim 1.0 +210 12 optimizer.lr 0.06081766667846473 +210 12 negative_sampler.num_negs_per_pos 40.0 +210 12 training.batch_size 3.0 +210 13 model.embedding_dim 0.0 +210 13 optimizer.lr 0.0025720835947239213 +210 13 negative_sampler.num_negs_per_pos 17.0 +210 13 training.batch_size 3.0 +210 14 model.embedding_dim 1.0 +210 14 optimizer.lr 0.04334378522353448 +210 14 negative_sampler.num_negs_per_pos 47.0 +210 14 training.batch_size 1.0 +210 15 model.embedding_dim 1.0 +210 15 optimizer.lr 0.05129502080959796 +210 15 negative_sampler.num_negs_per_pos 12.0 +210 15 training.batch_size 1.0 +210 16 model.embedding_dim 2.0 +210 16 optimizer.lr 0.006477837708410522 +210 16 negative_sampler.num_negs_per_pos 3.0 +210 16 training.batch_size 0.0 +210 17 model.embedding_dim 1.0 +210 17 optimizer.lr 0.014042037377820484 +210 17 negative_sampler.num_negs_per_pos 7.0 +210 17 training.batch_size 0.0 +210 18 model.embedding_dim 1.0 +210 18 optimizer.lr 0.0058909374597874405 +210 18 negative_sampler.num_negs_per_pos 2.0 +210 18 training.batch_size 1.0 +210 19 model.embedding_dim 1.0 +210 19 optimizer.lr 0.009068443446699952 +210 19 negative_sampler.num_negs_per_pos 28.0 +210 19 training.batch_size 0.0 +210 20 model.embedding_dim 0.0 +210 20 optimizer.lr 0.012044487581939968 +210 20 negative_sampler.num_negs_per_pos 4.0 +210 20 training.batch_size 0.0 +210 21 model.embedding_dim 1.0 +210 21 optimizer.lr 0.0038422620382021124 +210 21 negative_sampler.num_negs_per_pos 11.0 +210 21 training.batch_size 2.0 +210 22 model.embedding_dim 2.0 +210 22 optimizer.lr 0.00731660091565466 +210 22 negative_sampler.num_negs_per_pos 3.0 +210 22 training.batch_size 3.0 +210 23 model.embedding_dim 2.0 +210 23 optimizer.lr 0.022524950560523192 +210 23 negative_sampler.num_negs_per_pos 34.0 +210 23 training.batch_size 2.0 +210 24 model.embedding_dim 1.0 +210 24 optimizer.lr 0.012938499253012447 +210 24 negative_sampler.num_negs_per_pos 12.0 +210 24 training.batch_size 0.0 +210 25 model.embedding_dim 2.0 +210 25 optimizer.lr 0.007063779020448645 +210 25 negative_sampler.num_negs_per_pos 8.0 +210 25 training.batch_size 0.0 +210 26 model.embedding_dim 2.0 +210 26 optimizer.lr 0.053819759403203236 +210 26 negative_sampler.num_negs_per_pos 28.0 +210 26 training.batch_size 0.0 +210 27 model.embedding_dim 1.0 +210 27 optimizer.lr 0.010834634090873888 +210 27 negative_sampler.num_negs_per_pos 47.0 +210 27 training.batch_size 3.0 +210 28 model.embedding_dim 1.0 +210 28 optimizer.lr 0.0048423404150700665 +210 28 negative_sampler.num_negs_per_pos 38.0 +210 28 training.batch_size 3.0 +210 29 model.embedding_dim 0.0 +210 29 optimizer.lr 0.012203378672622176 +210 29 negative_sampler.num_negs_per_pos 0.0 +210 29 training.batch_size 0.0 +210 30 model.embedding_dim 1.0 +210 30 optimizer.lr 0.001672481921479607 +210 30 negative_sampler.num_negs_per_pos 36.0 +210 30 training.batch_size 0.0 +210 31 model.embedding_dim 2.0 +210 31 optimizer.lr 0.0017604072646106549 +210 31 negative_sampler.num_negs_per_pos 5.0 +210 31 training.batch_size 1.0 +210 32 model.embedding_dim 2.0 +210 32 optimizer.lr 0.08961362962147192 +210 32 negative_sampler.num_negs_per_pos 36.0 +210 32 training.batch_size 1.0 +210 1 dataset """yago310""" +210 1 model """distmult""" +210 1 loss """softplus""" +210 1 regularizer """no""" +210 1 optimizer """adam""" +210 1 training_loop """owa""" +210 1 negative_sampler """basic""" +210 1 evaluator """rankbased""" +210 2 dataset """yago310""" +210 2 model """distmult""" +210 2 loss """softplus""" +210 2 regularizer """no""" +210 2 optimizer """adam""" +210 2 training_loop """owa""" +210 2 negative_sampler """basic""" +210 2 evaluator """rankbased""" +210 3 dataset """yago310""" +210 3 model """distmult""" +210 3 loss """softplus""" +210 3 regularizer """no""" +210 3 optimizer """adam""" +210 3 training_loop """owa""" +210 3 negative_sampler """basic""" +210 3 evaluator """rankbased""" +210 4 dataset """yago310""" +210 4 model """distmult""" +210 4 loss """softplus""" +210 4 regularizer """no""" +210 4 optimizer """adam""" +210 4 training_loop """owa""" +210 4 negative_sampler """basic""" +210 4 evaluator """rankbased""" +210 5 dataset """yago310""" +210 5 model """distmult""" +210 5 loss """softplus""" +210 5 regularizer """no""" +210 5 optimizer """adam""" +210 5 training_loop """owa""" +210 5 negative_sampler """basic""" +210 5 evaluator """rankbased""" +210 6 dataset """yago310""" +210 6 model """distmult""" +210 6 loss """softplus""" +210 6 regularizer """no""" +210 6 optimizer """adam""" +210 6 training_loop """owa""" +210 6 negative_sampler """basic""" +210 6 evaluator """rankbased""" +210 7 dataset """yago310""" +210 7 model """distmult""" +210 7 loss """softplus""" +210 7 regularizer """no""" +210 7 optimizer """adam""" +210 7 training_loop """owa""" +210 7 negative_sampler """basic""" +210 7 evaluator """rankbased""" +210 8 dataset """yago310""" +210 8 model """distmult""" +210 8 loss """softplus""" +210 8 regularizer """no""" +210 8 optimizer """adam""" +210 8 training_loop """owa""" +210 8 negative_sampler """basic""" +210 8 evaluator """rankbased""" +210 9 dataset """yago310""" +210 9 model """distmult""" +210 9 loss """softplus""" +210 9 regularizer """no""" +210 9 optimizer """adam""" +210 9 training_loop """owa""" +210 9 negative_sampler """basic""" +210 9 evaluator """rankbased""" +210 10 dataset """yago310""" +210 10 model """distmult""" +210 10 loss """softplus""" +210 10 regularizer """no""" +210 10 optimizer """adam""" +210 10 training_loop """owa""" +210 10 negative_sampler """basic""" +210 10 evaluator """rankbased""" +210 11 dataset """yago310""" +210 11 model """distmult""" +210 11 loss """softplus""" +210 11 regularizer """no""" +210 11 optimizer """adam""" +210 11 training_loop """owa""" +210 11 negative_sampler """basic""" +210 11 evaluator """rankbased""" +210 12 dataset """yago310""" +210 12 model """distmult""" +210 12 loss """softplus""" +210 12 regularizer """no""" +210 12 optimizer """adam""" +210 12 training_loop """owa""" +210 12 negative_sampler """basic""" +210 12 evaluator """rankbased""" +210 13 dataset """yago310""" +210 13 model """distmult""" +210 13 loss """softplus""" +210 13 regularizer """no""" +210 13 optimizer """adam""" +210 13 training_loop """owa""" +210 13 negative_sampler """basic""" +210 13 evaluator """rankbased""" +210 14 dataset """yago310""" +210 14 model """distmult""" +210 14 loss """softplus""" +210 14 regularizer """no""" +210 14 optimizer """adam""" +210 14 training_loop """owa""" +210 14 negative_sampler """basic""" +210 14 evaluator """rankbased""" +210 15 dataset """yago310""" +210 15 model """distmult""" +210 15 loss """softplus""" +210 15 regularizer """no""" +210 15 optimizer """adam""" +210 15 training_loop """owa""" +210 15 negative_sampler """basic""" +210 15 evaluator """rankbased""" +210 16 dataset """yago310""" +210 16 model """distmult""" +210 16 loss """softplus""" +210 16 regularizer """no""" +210 16 optimizer """adam""" +210 16 training_loop """owa""" +210 16 negative_sampler """basic""" +210 16 evaluator """rankbased""" +210 17 dataset """yago310""" +210 17 model """distmult""" +210 17 loss """softplus""" +210 17 regularizer """no""" +210 17 optimizer """adam""" +210 17 training_loop """owa""" +210 17 negative_sampler """basic""" +210 17 evaluator """rankbased""" +210 18 dataset """yago310""" +210 18 model """distmult""" +210 18 loss """softplus""" +210 18 regularizer """no""" +210 18 optimizer """adam""" +210 18 training_loop """owa""" +210 18 negative_sampler """basic""" +210 18 evaluator """rankbased""" +210 19 dataset """yago310""" +210 19 model """distmult""" +210 19 loss """softplus""" +210 19 regularizer """no""" +210 19 optimizer """adam""" +210 19 training_loop """owa""" +210 19 negative_sampler """basic""" +210 19 evaluator """rankbased""" +210 20 dataset """yago310""" +210 20 model """distmult""" +210 20 loss """softplus""" +210 20 regularizer """no""" +210 20 optimizer """adam""" +210 20 training_loop """owa""" +210 20 negative_sampler """basic""" +210 20 evaluator """rankbased""" +210 21 dataset """yago310""" +210 21 model """distmult""" +210 21 loss """softplus""" +210 21 regularizer """no""" +210 21 optimizer """adam""" +210 21 training_loop """owa""" +210 21 negative_sampler """basic""" +210 21 evaluator """rankbased""" +210 22 dataset """yago310""" +210 22 model """distmult""" +210 22 loss """softplus""" +210 22 regularizer """no""" +210 22 optimizer """adam""" +210 22 training_loop """owa""" +210 22 negative_sampler """basic""" +210 22 evaluator """rankbased""" +210 23 dataset """yago310""" +210 23 model """distmult""" +210 23 loss """softplus""" +210 23 regularizer """no""" +210 23 optimizer """adam""" +210 23 training_loop """owa""" +210 23 negative_sampler """basic""" +210 23 evaluator """rankbased""" +210 24 dataset """yago310""" +210 24 model """distmult""" +210 24 loss """softplus""" +210 24 regularizer """no""" +210 24 optimizer """adam""" +210 24 training_loop """owa""" +210 24 negative_sampler """basic""" +210 24 evaluator """rankbased""" +210 25 dataset """yago310""" +210 25 model """distmult""" +210 25 loss """softplus""" +210 25 regularizer """no""" +210 25 optimizer """adam""" +210 25 training_loop """owa""" +210 25 negative_sampler """basic""" +210 25 evaluator """rankbased""" +210 26 dataset """yago310""" +210 26 model """distmult""" +210 26 loss """softplus""" +210 26 regularizer """no""" +210 26 optimizer """adam""" +210 26 training_loop """owa""" +210 26 negative_sampler """basic""" +210 26 evaluator """rankbased""" +210 27 dataset """yago310""" +210 27 model """distmult""" +210 27 loss """softplus""" +210 27 regularizer """no""" +210 27 optimizer """adam""" +210 27 training_loop """owa""" +210 27 negative_sampler """basic""" +210 27 evaluator """rankbased""" +210 28 dataset """yago310""" +210 28 model """distmult""" +210 28 loss """softplus""" +210 28 regularizer """no""" +210 28 optimizer """adam""" +210 28 training_loop """owa""" +210 28 negative_sampler """basic""" +210 28 evaluator """rankbased""" +210 29 dataset """yago310""" +210 29 model """distmult""" +210 29 loss """softplus""" +210 29 regularizer """no""" +210 29 optimizer """adam""" +210 29 training_loop """owa""" +210 29 negative_sampler """basic""" +210 29 evaluator """rankbased""" +210 30 dataset """yago310""" +210 30 model """distmult""" +210 30 loss """softplus""" +210 30 regularizer """no""" +210 30 optimizer """adam""" +210 30 training_loop """owa""" +210 30 negative_sampler """basic""" +210 30 evaluator """rankbased""" +210 31 dataset """yago310""" +210 31 model """distmult""" +210 31 loss """softplus""" +210 31 regularizer """no""" +210 31 optimizer """adam""" +210 31 training_loop """owa""" +210 31 negative_sampler """basic""" +210 31 evaluator """rankbased""" +210 32 dataset """yago310""" +210 32 model """distmult""" +210 32 loss """softplus""" +210 32 regularizer """no""" +210 32 optimizer """adam""" +210 32 training_loop """owa""" +210 32 negative_sampler """basic""" +210 32 evaluator """rankbased""" +211 1 model.embedding_dim 1.0 +211 1 optimizer.lr 0.0011546082372723095 +211 1 negative_sampler.num_negs_per_pos 43.0 +211 1 training.batch_size 3.0 +211 2 model.embedding_dim 1.0 +211 2 optimizer.lr 0.007047974555672346 +211 2 negative_sampler.num_negs_per_pos 31.0 +211 2 training.batch_size 1.0 +211 3 model.embedding_dim 1.0 +211 3 optimizer.lr 0.005992823865619603 +211 3 negative_sampler.num_negs_per_pos 4.0 +211 3 training.batch_size 0.0 +211 4 model.embedding_dim 0.0 +211 4 optimizer.lr 0.04161360779196898 +211 4 negative_sampler.num_negs_per_pos 38.0 +211 4 training.batch_size 0.0 +211 5 model.embedding_dim 1.0 +211 5 optimizer.lr 0.006247872370133704 +211 5 negative_sampler.num_negs_per_pos 20.0 +211 5 training.batch_size 1.0 +211 6 model.embedding_dim 1.0 +211 6 optimizer.lr 0.006861879128395805 +211 6 negative_sampler.num_negs_per_pos 39.0 +211 6 training.batch_size 3.0 +211 7 model.embedding_dim 2.0 +211 7 optimizer.lr 0.005915190890835538 +211 7 negative_sampler.num_negs_per_pos 40.0 +211 7 training.batch_size 2.0 +211 8 model.embedding_dim 1.0 +211 8 optimizer.lr 0.0013765568087236123 +211 8 negative_sampler.num_negs_per_pos 36.0 +211 8 training.batch_size 1.0 +211 9 model.embedding_dim 0.0 +211 9 optimizer.lr 0.0027068593805962804 +211 9 negative_sampler.num_negs_per_pos 15.0 +211 9 training.batch_size 0.0 +211 10 model.embedding_dim 0.0 +211 10 optimizer.lr 0.007720643944491571 +211 10 negative_sampler.num_negs_per_pos 35.0 +211 10 training.batch_size 3.0 +211 11 model.embedding_dim 0.0 +211 11 optimizer.lr 0.04950537500274663 +211 11 negative_sampler.num_negs_per_pos 49.0 +211 11 training.batch_size 0.0 +211 12 model.embedding_dim 2.0 +211 12 optimizer.lr 0.0017193006014668286 +211 12 negative_sampler.num_negs_per_pos 1.0 +211 12 training.batch_size 3.0 +211 13 model.embedding_dim 2.0 +211 13 optimizer.lr 0.005140501278904405 +211 13 negative_sampler.num_negs_per_pos 44.0 +211 13 training.batch_size 1.0 +211 14 model.embedding_dim 1.0 +211 14 optimizer.lr 0.007382198894288034 +211 14 negative_sampler.num_negs_per_pos 0.0 +211 14 training.batch_size 0.0 +211 15 model.embedding_dim 1.0 +211 15 optimizer.lr 0.010140639153966761 +211 15 negative_sampler.num_negs_per_pos 8.0 +211 15 training.batch_size 1.0 +211 16 model.embedding_dim 2.0 +211 16 optimizer.lr 0.08227527093509833 +211 16 negative_sampler.num_negs_per_pos 31.0 +211 16 training.batch_size 2.0 +211 17 model.embedding_dim 0.0 +211 17 optimizer.lr 0.08779473055523178 +211 17 negative_sampler.num_negs_per_pos 31.0 +211 17 training.batch_size 1.0 +211 18 model.embedding_dim 0.0 +211 18 optimizer.lr 0.026421927215831593 +211 18 negative_sampler.num_negs_per_pos 8.0 +211 18 training.batch_size 1.0 +211 19 model.embedding_dim 0.0 +211 19 optimizer.lr 0.009810580050210675 +211 19 negative_sampler.num_negs_per_pos 35.0 +211 19 training.batch_size 2.0 +211 20 model.embedding_dim 2.0 +211 20 optimizer.lr 0.001447067934216357 +211 20 negative_sampler.num_negs_per_pos 23.0 +211 20 training.batch_size 0.0 +211 21 model.embedding_dim 1.0 +211 21 optimizer.lr 0.03684635027907933 +211 21 negative_sampler.num_negs_per_pos 15.0 +211 21 training.batch_size 2.0 +211 22 model.embedding_dim 1.0 +211 22 optimizer.lr 0.0015904068053905404 +211 22 negative_sampler.num_negs_per_pos 48.0 +211 22 training.batch_size 0.0 +211 23 model.embedding_dim 2.0 +211 23 optimizer.lr 0.025960862467759468 +211 23 negative_sampler.num_negs_per_pos 48.0 +211 23 training.batch_size 1.0 +211 24 model.embedding_dim 0.0 +211 24 optimizer.lr 0.0014674407108747204 +211 24 negative_sampler.num_negs_per_pos 22.0 +211 24 training.batch_size 0.0 +211 25 model.embedding_dim 0.0 +211 25 optimizer.lr 0.028535529045037148 +211 25 negative_sampler.num_negs_per_pos 7.0 +211 25 training.batch_size 2.0 +211 26 model.embedding_dim 0.0 +211 26 optimizer.lr 0.014770193036195698 +211 26 negative_sampler.num_negs_per_pos 27.0 +211 26 training.batch_size 0.0 +211 27 model.embedding_dim 2.0 +211 27 optimizer.lr 0.0025626313478640137 +211 27 negative_sampler.num_negs_per_pos 36.0 +211 27 training.batch_size 0.0 +211 28 model.embedding_dim 1.0 +211 28 optimizer.lr 0.0016017230674945225 +211 28 negative_sampler.num_negs_per_pos 13.0 +211 28 training.batch_size 2.0 +211 29 model.embedding_dim 1.0 +211 29 optimizer.lr 0.013534726544230907 +211 29 negative_sampler.num_negs_per_pos 1.0 +211 29 training.batch_size 1.0 +211 30 model.embedding_dim 2.0 +211 30 optimizer.lr 0.005590015179637964 +211 30 negative_sampler.num_negs_per_pos 3.0 +211 30 training.batch_size 2.0 +211 31 model.embedding_dim 1.0 +211 31 optimizer.lr 0.03454934220394653 +211 31 negative_sampler.num_negs_per_pos 44.0 +211 31 training.batch_size 2.0 +211 32 model.embedding_dim 2.0 +211 32 optimizer.lr 0.01300095943719627 +211 32 negative_sampler.num_negs_per_pos 8.0 +211 32 training.batch_size 0.0 +211 33 model.embedding_dim 2.0 +211 33 optimizer.lr 0.004537878684523956 +211 33 negative_sampler.num_negs_per_pos 21.0 +211 33 training.batch_size 1.0 +211 34 model.embedding_dim 2.0 +211 34 optimizer.lr 0.027569381980138502 +211 34 negative_sampler.num_negs_per_pos 2.0 +211 34 training.batch_size 1.0 +211 35 model.embedding_dim 2.0 +211 35 optimizer.lr 0.08768072164785924 +211 35 negative_sampler.num_negs_per_pos 31.0 +211 35 training.batch_size 1.0 +211 36 model.embedding_dim 2.0 +211 36 optimizer.lr 0.027810981675703283 +211 36 negative_sampler.num_negs_per_pos 38.0 +211 36 training.batch_size 1.0 +211 37 model.embedding_dim 1.0 +211 37 optimizer.lr 0.06762784603794887 +211 37 negative_sampler.num_negs_per_pos 19.0 +211 37 training.batch_size 1.0 +211 38 model.embedding_dim 0.0 +211 38 optimizer.lr 0.029725798469723088 +211 38 negative_sampler.num_negs_per_pos 39.0 +211 38 training.batch_size 2.0 +211 39 model.embedding_dim 1.0 +211 39 optimizer.lr 0.006222602638652615 +211 39 negative_sampler.num_negs_per_pos 20.0 +211 39 training.batch_size 2.0 +211 40 model.embedding_dim 0.0 +211 40 optimizer.lr 0.0017855846168187697 +211 40 negative_sampler.num_negs_per_pos 26.0 +211 40 training.batch_size 1.0 +211 41 model.embedding_dim 1.0 +211 41 optimizer.lr 0.0011596620441135122 +211 41 negative_sampler.num_negs_per_pos 19.0 +211 41 training.batch_size 3.0 +211 42 model.embedding_dim 2.0 +211 42 optimizer.lr 0.08779162504161434 +211 42 negative_sampler.num_negs_per_pos 24.0 +211 42 training.batch_size 0.0 +211 43 model.embedding_dim 1.0 +211 43 optimizer.lr 0.02060565549431103 +211 43 negative_sampler.num_negs_per_pos 14.0 +211 43 training.batch_size 2.0 +211 44 model.embedding_dim 2.0 +211 44 optimizer.lr 0.03559714377489461 +211 44 negative_sampler.num_negs_per_pos 41.0 +211 44 training.batch_size 0.0 +211 1 dataset """yago310""" +211 1 model """distmult""" +211 1 loss """softplus""" +211 1 regularizer """no""" +211 1 optimizer """adam""" +211 1 training_loop """owa""" +211 1 negative_sampler """basic""" +211 1 evaluator """rankbased""" +211 2 dataset """yago310""" +211 2 model """distmult""" +211 2 loss """softplus""" +211 2 regularizer """no""" +211 2 optimizer """adam""" +211 2 training_loop """owa""" +211 2 negative_sampler """basic""" +211 2 evaluator """rankbased""" +211 3 dataset """yago310""" +211 3 model """distmult""" +211 3 loss """softplus""" +211 3 regularizer """no""" +211 3 optimizer """adam""" +211 3 training_loop """owa""" +211 3 negative_sampler """basic""" +211 3 evaluator """rankbased""" +211 4 dataset """yago310""" +211 4 model """distmult""" +211 4 loss """softplus""" +211 4 regularizer """no""" +211 4 optimizer """adam""" +211 4 training_loop """owa""" +211 4 negative_sampler """basic""" +211 4 evaluator """rankbased""" +211 5 dataset """yago310""" +211 5 model """distmult""" +211 5 loss """softplus""" +211 5 regularizer """no""" +211 5 optimizer """adam""" +211 5 training_loop """owa""" +211 5 negative_sampler """basic""" +211 5 evaluator """rankbased""" +211 6 dataset """yago310""" +211 6 model """distmult""" +211 6 loss """softplus""" +211 6 regularizer """no""" +211 6 optimizer """adam""" +211 6 training_loop """owa""" +211 6 negative_sampler """basic""" +211 6 evaluator """rankbased""" +211 7 dataset """yago310""" +211 7 model """distmult""" +211 7 loss """softplus""" +211 7 regularizer """no""" +211 7 optimizer """adam""" +211 7 training_loop """owa""" +211 7 negative_sampler """basic""" +211 7 evaluator """rankbased""" +211 8 dataset """yago310""" +211 8 model """distmult""" +211 8 loss """softplus""" +211 8 regularizer """no""" +211 8 optimizer """adam""" +211 8 training_loop """owa""" +211 8 negative_sampler """basic""" +211 8 evaluator """rankbased""" +211 9 dataset """yago310""" +211 9 model """distmult""" +211 9 loss """softplus""" +211 9 regularizer """no""" +211 9 optimizer """adam""" +211 9 training_loop """owa""" +211 9 negative_sampler """basic""" +211 9 evaluator """rankbased""" +211 10 dataset """yago310""" +211 10 model """distmult""" +211 10 loss """softplus""" +211 10 regularizer """no""" +211 10 optimizer """adam""" +211 10 training_loop """owa""" +211 10 negative_sampler """basic""" +211 10 evaluator """rankbased""" +211 11 dataset """yago310""" +211 11 model """distmult""" +211 11 loss """softplus""" +211 11 regularizer """no""" +211 11 optimizer """adam""" +211 11 training_loop """owa""" +211 11 negative_sampler """basic""" +211 11 evaluator """rankbased""" +211 12 dataset """yago310""" +211 12 model """distmult""" +211 12 loss """softplus""" +211 12 regularizer """no""" +211 12 optimizer """adam""" +211 12 training_loop """owa""" +211 12 negative_sampler """basic""" +211 12 evaluator """rankbased""" +211 13 dataset """yago310""" +211 13 model """distmult""" +211 13 loss """softplus""" +211 13 regularizer """no""" +211 13 optimizer """adam""" +211 13 training_loop """owa""" +211 13 negative_sampler """basic""" +211 13 evaluator """rankbased""" +211 14 dataset """yago310""" +211 14 model """distmult""" +211 14 loss """softplus""" +211 14 regularizer """no""" +211 14 optimizer """adam""" +211 14 training_loop """owa""" +211 14 negative_sampler """basic""" +211 14 evaluator """rankbased""" +211 15 dataset """yago310""" +211 15 model """distmult""" +211 15 loss """softplus""" +211 15 regularizer """no""" +211 15 optimizer """adam""" +211 15 training_loop """owa""" +211 15 negative_sampler """basic""" +211 15 evaluator """rankbased""" +211 16 dataset """yago310""" +211 16 model """distmult""" +211 16 loss """softplus""" +211 16 regularizer """no""" +211 16 optimizer """adam""" +211 16 training_loop """owa""" +211 16 negative_sampler """basic""" +211 16 evaluator """rankbased""" +211 17 dataset """yago310""" +211 17 model """distmult""" +211 17 loss """softplus""" +211 17 regularizer """no""" +211 17 optimizer """adam""" +211 17 training_loop """owa""" +211 17 negative_sampler """basic""" +211 17 evaluator """rankbased""" +211 18 dataset """yago310""" +211 18 model """distmult""" +211 18 loss """softplus""" +211 18 regularizer """no""" +211 18 optimizer """adam""" +211 18 training_loop """owa""" +211 18 negative_sampler """basic""" +211 18 evaluator """rankbased""" +211 19 dataset """yago310""" +211 19 model """distmult""" +211 19 loss """softplus""" +211 19 regularizer """no""" +211 19 optimizer """adam""" +211 19 training_loop """owa""" +211 19 negative_sampler """basic""" +211 19 evaluator """rankbased""" +211 20 dataset """yago310""" +211 20 model """distmult""" +211 20 loss """softplus""" +211 20 regularizer """no""" +211 20 optimizer """adam""" +211 20 training_loop """owa""" +211 20 negative_sampler """basic""" +211 20 evaluator """rankbased""" +211 21 dataset """yago310""" +211 21 model """distmult""" +211 21 loss """softplus""" +211 21 regularizer """no""" +211 21 optimizer """adam""" +211 21 training_loop """owa""" +211 21 negative_sampler """basic""" +211 21 evaluator """rankbased""" +211 22 dataset """yago310""" +211 22 model """distmult""" +211 22 loss """softplus""" +211 22 regularizer """no""" +211 22 optimizer """adam""" +211 22 training_loop """owa""" +211 22 negative_sampler """basic""" +211 22 evaluator """rankbased""" +211 23 dataset """yago310""" +211 23 model """distmult""" +211 23 loss """softplus""" +211 23 regularizer """no""" +211 23 optimizer """adam""" +211 23 training_loop """owa""" +211 23 negative_sampler """basic""" +211 23 evaluator """rankbased""" +211 24 dataset """yago310""" +211 24 model """distmult""" +211 24 loss """softplus""" +211 24 regularizer """no""" +211 24 optimizer """adam""" +211 24 training_loop """owa""" +211 24 negative_sampler """basic""" +211 24 evaluator """rankbased""" +211 25 dataset """yago310""" +211 25 model """distmult""" +211 25 loss """softplus""" +211 25 regularizer """no""" +211 25 optimizer """adam""" +211 25 training_loop """owa""" +211 25 negative_sampler """basic""" +211 25 evaluator """rankbased""" +211 26 dataset """yago310""" +211 26 model """distmult""" +211 26 loss """softplus""" +211 26 regularizer """no""" +211 26 optimizer """adam""" +211 26 training_loop """owa""" +211 26 negative_sampler """basic""" +211 26 evaluator """rankbased""" +211 27 dataset """yago310""" +211 27 model """distmult""" +211 27 loss """softplus""" +211 27 regularizer """no""" +211 27 optimizer """adam""" +211 27 training_loop """owa""" +211 27 negative_sampler """basic""" +211 27 evaluator """rankbased""" +211 28 dataset """yago310""" +211 28 model """distmult""" +211 28 loss """softplus""" +211 28 regularizer """no""" +211 28 optimizer """adam""" +211 28 training_loop """owa""" +211 28 negative_sampler """basic""" +211 28 evaluator """rankbased""" +211 29 dataset """yago310""" +211 29 model """distmult""" +211 29 loss """softplus""" +211 29 regularizer """no""" +211 29 optimizer """adam""" +211 29 training_loop """owa""" +211 29 negative_sampler """basic""" +211 29 evaluator """rankbased""" +211 30 dataset """yago310""" +211 30 model """distmult""" +211 30 loss """softplus""" +211 30 regularizer """no""" +211 30 optimizer """adam""" +211 30 training_loop """owa""" +211 30 negative_sampler """basic""" +211 30 evaluator """rankbased""" +211 31 dataset """yago310""" +211 31 model """distmult""" +211 31 loss """softplus""" +211 31 regularizer """no""" +211 31 optimizer """adam""" +211 31 training_loop """owa""" +211 31 negative_sampler """basic""" +211 31 evaluator """rankbased""" +211 32 dataset """yago310""" +211 32 model """distmult""" +211 32 loss """softplus""" +211 32 regularizer """no""" +211 32 optimizer """adam""" +211 32 training_loop """owa""" +211 32 negative_sampler """basic""" +211 32 evaluator """rankbased""" +211 33 dataset """yago310""" +211 33 model """distmult""" +211 33 loss """softplus""" +211 33 regularizer """no""" +211 33 optimizer """adam""" +211 33 training_loop """owa""" +211 33 negative_sampler """basic""" +211 33 evaluator """rankbased""" +211 34 dataset """yago310""" +211 34 model """distmult""" +211 34 loss """softplus""" +211 34 regularizer """no""" +211 34 optimizer """adam""" +211 34 training_loop """owa""" +211 34 negative_sampler """basic""" +211 34 evaluator """rankbased""" +211 35 dataset """yago310""" +211 35 model """distmult""" +211 35 loss """softplus""" +211 35 regularizer """no""" +211 35 optimizer """adam""" +211 35 training_loop """owa""" +211 35 negative_sampler """basic""" +211 35 evaluator """rankbased""" +211 36 dataset """yago310""" +211 36 model """distmult""" +211 36 loss """softplus""" +211 36 regularizer """no""" +211 36 optimizer """adam""" +211 36 training_loop """owa""" +211 36 negative_sampler """basic""" +211 36 evaluator """rankbased""" +211 37 dataset """yago310""" +211 37 model """distmult""" +211 37 loss """softplus""" +211 37 regularizer """no""" +211 37 optimizer """adam""" +211 37 training_loop """owa""" +211 37 negative_sampler """basic""" +211 37 evaluator """rankbased""" +211 38 dataset """yago310""" +211 38 model """distmult""" +211 38 loss """softplus""" +211 38 regularizer """no""" +211 38 optimizer """adam""" +211 38 training_loop """owa""" +211 38 negative_sampler """basic""" +211 38 evaluator """rankbased""" +211 39 dataset """yago310""" +211 39 model """distmult""" +211 39 loss """softplus""" +211 39 regularizer """no""" +211 39 optimizer """adam""" +211 39 training_loop """owa""" +211 39 negative_sampler """basic""" +211 39 evaluator """rankbased""" +211 40 dataset """yago310""" +211 40 model """distmult""" +211 40 loss """softplus""" +211 40 regularizer """no""" +211 40 optimizer """adam""" +211 40 training_loop """owa""" +211 40 negative_sampler """basic""" +211 40 evaluator """rankbased""" +211 41 dataset """yago310""" +211 41 model """distmult""" +211 41 loss """softplus""" +211 41 regularizer """no""" +211 41 optimizer """adam""" +211 41 training_loop """owa""" +211 41 negative_sampler """basic""" +211 41 evaluator """rankbased""" +211 42 dataset """yago310""" +211 42 model """distmult""" +211 42 loss """softplus""" +211 42 regularizer """no""" +211 42 optimizer """adam""" +211 42 training_loop """owa""" +211 42 negative_sampler """basic""" +211 42 evaluator """rankbased""" +211 43 dataset """yago310""" +211 43 model """distmult""" +211 43 loss """softplus""" +211 43 regularizer """no""" +211 43 optimizer """adam""" +211 43 training_loop """owa""" +211 43 negative_sampler """basic""" +211 43 evaluator """rankbased""" +211 44 dataset """yago310""" +211 44 model """distmult""" +211 44 loss """softplus""" +211 44 regularizer """no""" +211 44 optimizer """adam""" +211 44 training_loop """owa""" +211 44 negative_sampler """basic""" +211 44 evaluator """rankbased""" +212 1 model.embedding_dim 0.0 +212 1 optimizer.lr 0.007480364672391538 +212 1 negative_sampler.num_negs_per_pos 11.0 +212 1 training.batch_size 1.0 +212 2 model.embedding_dim 1.0 +212 2 optimizer.lr 0.01881587474279415 +212 2 negative_sampler.num_negs_per_pos 16.0 +212 2 training.batch_size 3.0 +212 3 model.embedding_dim 0.0 +212 3 optimizer.lr 0.03338122744841372 +212 3 negative_sampler.num_negs_per_pos 47.0 +212 3 training.batch_size 0.0 +212 4 model.embedding_dim 0.0 +212 4 optimizer.lr 0.04653487125820933 +212 4 negative_sampler.num_negs_per_pos 0.0 +212 4 training.batch_size 0.0 +212 5 model.embedding_dim 0.0 +212 5 optimizer.lr 0.08995047216749831 +212 5 negative_sampler.num_negs_per_pos 5.0 +212 5 training.batch_size 2.0 +212 6 model.embedding_dim 0.0 +212 6 optimizer.lr 0.002840598429053342 +212 6 negative_sampler.num_negs_per_pos 38.0 +212 6 training.batch_size 1.0 +212 7 model.embedding_dim 1.0 +212 7 optimizer.lr 0.024649493806924296 +212 7 negative_sampler.num_negs_per_pos 38.0 +212 7 training.batch_size 2.0 +212 8 model.embedding_dim 1.0 +212 8 optimizer.lr 0.006179321029423893 +212 8 negative_sampler.num_negs_per_pos 1.0 +212 8 training.batch_size 0.0 +212 9 model.embedding_dim 0.0 +212 9 optimizer.lr 0.0031946351410442426 +212 9 negative_sampler.num_negs_per_pos 27.0 +212 9 training.batch_size 3.0 +212 10 model.embedding_dim 0.0 +212 10 optimizer.lr 0.062034264170504026 +212 10 negative_sampler.num_negs_per_pos 22.0 +212 10 training.batch_size 1.0 +212 11 model.embedding_dim 2.0 +212 11 optimizer.lr 0.017970483444138055 +212 11 negative_sampler.num_negs_per_pos 40.0 +212 11 training.batch_size 1.0 +212 12 model.embedding_dim 2.0 +212 12 optimizer.lr 0.004412295855351446 +212 12 negative_sampler.num_negs_per_pos 18.0 +212 12 training.batch_size 0.0 +212 13 model.embedding_dim 2.0 +212 13 optimizer.lr 0.03948977668593049 +212 13 negative_sampler.num_negs_per_pos 15.0 +212 13 training.batch_size 0.0 +212 14 model.embedding_dim 2.0 +212 14 optimizer.lr 0.04005160189337185 +212 14 negative_sampler.num_negs_per_pos 12.0 +212 14 training.batch_size 2.0 +212 15 model.embedding_dim 2.0 +212 15 optimizer.lr 0.0017487183403083722 +212 15 negative_sampler.num_negs_per_pos 49.0 +212 15 training.batch_size 0.0 +212 16 model.embedding_dim 1.0 +212 16 optimizer.lr 0.08451460168416744 +212 16 negative_sampler.num_negs_per_pos 44.0 +212 16 training.batch_size 3.0 +212 17 model.embedding_dim 0.0 +212 17 optimizer.lr 0.009322951373629759 +212 17 negative_sampler.num_negs_per_pos 22.0 +212 17 training.batch_size 1.0 +212 18 model.embedding_dim 1.0 +212 18 optimizer.lr 0.07645448853178549 +212 18 negative_sampler.num_negs_per_pos 18.0 +212 18 training.batch_size 0.0 +212 19 model.embedding_dim 2.0 +212 19 optimizer.lr 0.055475155775156895 +212 19 negative_sampler.num_negs_per_pos 22.0 +212 19 training.batch_size 0.0 +212 20 model.embedding_dim 2.0 +212 20 optimizer.lr 0.00923462126287228 +212 20 negative_sampler.num_negs_per_pos 37.0 +212 20 training.batch_size 0.0 +212 21 model.embedding_dim 2.0 +212 21 optimizer.lr 0.014617591968028771 +212 21 negative_sampler.num_negs_per_pos 41.0 +212 21 training.batch_size 3.0 +212 22 model.embedding_dim 1.0 +212 22 optimizer.lr 0.04189474067429391 +212 22 negative_sampler.num_negs_per_pos 11.0 +212 22 training.batch_size 3.0 +212 23 model.embedding_dim 0.0 +212 23 optimizer.lr 0.0042517458834860355 +212 23 negative_sampler.num_negs_per_pos 14.0 +212 23 training.batch_size 1.0 +212 24 model.embedding_dim 1.0 +212 24 optimizer.lr 0.012895709428680124 +212 24 negative_sampler.num_negs_per_pos 20.0 +212 24 training.batch_size 1.0 +212 25 model.embedding_dim 0.0 +212 25 optimizer.lr 0.007612514110152097 +212 25 negative_sampler.num_negs_per_pos 48.0 +212 25 training.batch_size 2.0 +212 26 model.embedding_dim 0.0 +212 26 optimizer.lr 0.0022602797192961617 +212 26 negative_sampler.num_negs_per_pos 3.0 +212 26 training.batch_size 3.0 +212 27 model.embedding_dim 0.0 +212 27 optimizer.lr 0.04593432996036873 +212 27 negative_sampler.num_negs_per_pos 16.0 +212 27 training.batch_size 0.0 +212 28 model.embedding_dim 2.0 +212 28 optimizer.lr 0.023580493650759697 +212 28 negative_sampler.num_negs_per_pos 41.0 +212 28 training.batch_size 1.0 +212 29 model.embedding_dim 0.0 +212 29 optimizer.lr 0.003946618814148217 +212 29 negative_sampler.num_negs_per_pos 22.0 +212 29 training.batch_size 2.0 +212 30 model.embedding_dim 2.0 +212 30 optimizer.lr 0.0024897952929962244 +212 30 negative_sampler.num_negs_per_pos 42.0 +212 30 training.batch_size 2.0 +212 1 dataset """yago310""" +212 1 model """distmult""" +212 1 loss """bceaftersigmoid""" +212 1 regularizer """no""" +212 1 optimizer """adam""" +212 1 training_loop """owa""" +212 1 negative_sampler """basic""" +212 1 evaluator """rankbased""" +212 2 dataset """yago310""" +212 2 model """distmult""" +212 2 loss """bceaftersigmoid""" +212 2 regularizer """no""" +212 2 optimizer """adam""" +212 2 training_loop """owa""" +212 2 negative_sampler """basic""" +212 2 evaluator """rankbased""" +212 3 dataset """yago310""" +212 3 model """distmult""" +212 3 loss """bceaftersigmoid""" +212 3 regularizer """no""" +212 3 optimizer """adam""" +212 3 training_loop """owa""" +212 3 negative_sampler """basic""" +212 3 evaluator """rankbased""" +212 4 dataset """yago310""" +212 4 model """distmult""" +212 4 loss """bceaftersigmoid""" +212 4 regularizer """no""" +212 4 optimizer """adam""" +212 4 training_loop """owa""" +212 4 negative_sampler """basic""" +212 4 evaluator """rankbased""" +212 5 dataset """yago310""" +212 5 model """distmult""" +212 5 loss """bceaftersigmoid""" +212 5 regularizer """no""" +212 5 optimizer """adam""" +212 5 training_loop """owa""" +212 5 negative_sampler """basic""" +212 5 evaluator """rankbased""" +212 6 dataset """yago310""" +212 6 model """distmult""" +212 6 loss """bceaftersigmoid""" +212 6 regularizer """no""" +212 6 optimizer """adam""" +212 6 training_loop """owa""" +212 6 negative_sampler """basic""" +212 6 evaluator """rankbased""" +212 7 dataset """yago310""" +212 7 model """distmult""" +212 7 loss """bceaftersigmoid""" +212 7 regularizer """no""" +212 7 optimizer """adam""" +212 7 training_loop """owa""" +212 7 negative_sampler """basic""" +212 7 evaluator """rankbased""" +212 8 dataset """yago310""" +212 8 model """distmult""" +212 8 loss """bceaftersigmoid""" +212 8 regularizer """no""" +212 8 optimizer """adam""" +212 8 training_loop """owa""" +212 8 negative_sampler """basic""" +212 8 evaluator """rankbased""" +212 9 dataset """yago310""" +212 9 model """distmult""" +212 9 loss """bceaftersigmoid""" +212 9 regularizer """no""" +212 9 optimizer """adam""" +212 9 training_loop """owa""" +212 9 negative_sampler """basic""" +212 9 evaluator """rankbased""" +212 10 dataset """yago310""" +212 10 model """distmult""" +212 10 loss """bceaftersigmoid""" +212 10 regularizer """no""" +212 10 optimizer """adam""" +212 10 training_loop """owa""" +212 10 negative_sampler """basic""" +212 10 evaluator """rankbased""" +212 11 dataset """yago310""" +212 11 model """distmult""" +212 11 loss """bceaftersigmoid""" +212 11 regularizer """no""" +212 11 optimizer """adam""" +212 11 training_loop """owa""" +212 11 negative_sampler """basic""" +212 11 evaluator """rankbased""" +212 12 dataset """yago310""" +212 12 model """distmult""" +212 12 loss """bceaftersigmoid""" +212 12 regularizer """no""" +212 12 optimizer """adam""" +212 12 training_loop """owa""" +212 12 negative_sampler """basic""" +212 12 evaluator """rankbased""" +212 13 dataset """yago310""" +212 13 model """distmult""" +212 13 loss """bceaftersigmoid""" +212 13 regularizer """no""" +212 13 optimizer """adam""" +212 13 training_loop """owa""" +212 13 negative_sampler """basic""" +212 13 evaluator """rankbased""" +212 14 dataset """yago310""" +212 14 model """distmult""" +212 14 loss """bceaftersigmoid""" +212 14 regularizer """no""" +212 14 optimizer """adam""" +212 14 training_loop """owa""" +212 14 negative_sampler """basic""" +212 14 evaluator """rankbased""" +212 15 dataset """yago310""" +212 15 model """distmult""" +212 15 loss """bceaftersigmoid""" +212 15 regularizer """no""" +212 15 optimizer """adam""" +212 15 training_loop """owa""" +212 15 negative_sampler """basic""" +212 15 evaluator """rankbased""" +212 16 dataset """yago310""" +212 16 model """distmult""" +212 16 loss """bceaftersigmoid""" +212 16 regularizer """no""" +212 16 optimizer """adam""" +212 16 training_loop """owa""" +212 16 negative_sampler """basic""" +212 16 evaluator """rankbased""" +212 17 dataset """yago310""" +212 17 model """distmult""" +212 17 loss """bceaftersigmoid""" +212 17 regularizer """no""" +212 17 optimizer """adam""" +212 17 training_loop """owa""" +212 17 negative_sampler """basic""" +212 17 evaluator """rankbased""" +212 18 dataset """yago310""" +212 18 model """distmult""" +212 18 loss """bceaftersigmoid""" +212 18 regularizer """no""" +212 18 optimizer """adam""" +212 18 training_loop """owa""" +212 18 negative_sampler """basic""" +212 18 evaluator """rankbased""" +212 19 dataset """yago310""" +212 19 model """distmult""" +212 19 loss """bceaftersigmoid""" +212 19 regularizer """no""" +212 19 optimizer """adam""" +212 19 training_loop """owa""" +212 19 negative_sampler """basic""" +212 19 evaluator """rankbased""" +212 20 dataset """yago310""" +212 20 model """distmult""" +212 20 loss """bceaftersigmoid""" +212 20 regularizer """no""" +212 20 optimizer """adam""" +212 20 training_loop """owa""" +212 20 negative_sampler """basic""" +212 20 evaluator """rankbased""" +212 21 dataset """yago310""" +212 21 model """distmult""" +212 21 loss """bceaftersigmoid""" +212 21 regularizer """no""" +212 21 optimizer """adam""" +212 21 training_loop """owa""" +212 21 negative_sampler """basic""" +212 21 evaluator """rankbased""" +212 22 dataset """yago310""" +212 22 model """distmult""" +212 22 loss """bceaftersigmoid""" +212 22 regularizer """no""" +212 22 optimizer """adam""" +212 22 training_loop """owa""" +212 22 negative_sampler """basic""" +212 22 evaluator """rankbased""" +212 23 dataset """yago310""" +212 23 model """distmult""" +212 23 loss """bceaftersigmoid""" +212 23 regularizer """no""" +212 23 optimizer """adam""" +212 23 training_loop """owa""" +212 23 negative_sampler """basic""" +212 23 evaluator """rankbased""" +212 24 dataset """yago310""" +212 24 model """distmult""" +212 24 loss """bceaftersigmoid""" +212 24 regularizer """no""" +212 24 optimizer """adam""" +212 24 training_loop """owa""" +212 24 negative_sampler """basic""" +212 24 evaluator """rankbased""" +212 25 dataset """yago310""" +212 25 model """distmult""" +212 25 loss """bceaftersigmoid""" +212 25 regularizer """no""" +212 25 optimizer """adam""" +212 25 training_loop """owa""" +212 25 negative_sampler """basic""" +212 25 evaluator """rankbased""" +212 26 dataset """yago310""" +212 26 model """distmult""" +212 26 loss """bceaftersigmoid""" +212 26 regularizer """no""" +212 26 optimizer """adam""" +212 26 training_loop """owa""" +212 26 negative_sampler """basic""" +212 26 evaluator """rankbased""" +212 27 dataset """yago310""" +212 27 model """distmult""" +212 27 loss """bceaftersigmoid""" +212 27 regularizer """no""" +212 27 optimizer """adam""" +212 27 training_loop """owa""" +212 27 negative_sampler """basic""" +212 27 evaluator """rankbased""" +212 28 dataset """yago310""" +212 28 model """distmult""" +212 28 loss """bceaftersigmoid""" +212 28 regularizer """no""" +212 28 optimizer """adam""" +212 28 training_loop """owa""" +212 28 negative_sampler """basic""" +212 28 evaluator """rankbased""" +212 29 dataset """yago310""" +212 29 model """distmult""" +212 29 loss """bceaftersigmoid""" +212 29 regularizer """no""" +212 29 optimizer """adam""" +212 29 training_loop """owa""" +212 29 negative_sampler """basic""" +212 29 evaluator """rankbased""" +212 30 dataset """yago310""" +212 30 model """distmult""" +212 30 loss """bceaftersigmoid""" +212 30 regularizer """no""" +212 30 optimizer """adam""" +212 30 training_loop """owa""" +212 30 negative_sampler """basic""" +212 30 evaluator """rankbased""" +213 1 model.embedding_dim 1.0 +213 1 optimizer.lr 0.03104555866641647 +213 1 negative_sampler.num_negs_per_pos 33.0 +213 1 training.batch_size 0.0 +213 2 model.embedding_dim 1.0 +213 2 optimizer.lr 0.001211659387409536 +213 2 negative_sampler.num_negs_per_pos 38.0 +213 2 training.batch_size 1.0 +213 3 model.embedding_dim 1.0 +213 3 optimizer.lr 0.001012583714840299 +213 3 negative_sampler.num_negs_per_pos 22.0 +213 3 training.batch_size 2.0 +213 4 model.embedding_dim 1.0 +213 4 optimizer.lr 0.003817434082307807 +213 4 negative_sampler.num_negs_per_pos 32.0 +213 4 training.batch_size 0.0 +213 5 model.embedding_dim 0.0 +213 5 optimizer.lr 0.0032042155252605255 +213 5 negative_sampler.num_negs_per_pos 39.0 +213 5 training.batch_size 3.0 +213 6 model.embedding_dim 1.0 +213 6 optimizer.lr 0.006854935269894854 +213 6 negative_sampler.num_negs_per_pos 4.0 +213 6 training.batch_size 1.0 +213 7 model.embedding_dim 1.0 +213 7 optimizer.lr 0.05870079764251357 +213 7 negative_sampler.num_negs_per_pos 47.0 +213 7 training.batch_size 1.0 +213 8 model.embedding_dim 1.0 +213 8 optimizer.lr 0.03355975381363167 +213 8 negative_sampler.num_negs_per_pos 29.0 +213 8 training.batch_size 1.0 +213 9 model.embedding_dim 1.0 +213 9 optimizer.lr 0.0053410867294286955 +213 9 negative_sampler.num_negs_per_pos 17.0 +213 9 training.batch_size 2.0 +213 10 model.embedding_dim 1.0 +213 10 optimizer.lr 0.011951368203715099 +213 10 negative_sampler.num_negs_per_pos 34.0 +213 10 training.batch_size 0.0 +213 11 model.embedding_dim 0.0 +213 11 optimizer.lr 0.002997752107900286 +213 11 negative_sampler.num_negs_per_pos 37.0 +213 11 training.batch_size 0.0 +213 12 model.embedding_dim 0.0 +213 12 optimizer.lr 0.0010727023602277752 +213 12 negative_sampler.num_negs_per_pos 38.0 +213 12 training.batch_size 2.0 +213 13 model.embedding_dim 2.0 +213 13 optimizer.lr 0.08113582532572729 +213 13 negative_sampler.num_negs_per_pos 20.0 +213 13 training.batch_size 0.0 +213 14 model.embedding_dim 1.0 +213 14 optimizer.lr 0.04320394998625282 +213 14 negative_sampler.num_negs_per_pos 36.0 +213 14 training.batch_size 1.0 +213 15 model.embedding_dim 0.0 +213 15 optimizer.lr 0.001762149268775787 +213 15 negative_sampler.num_negs_per_pos 7.0 +213 15 training.batch_size 0.0 +213 16 model.embedding_dim 2.0 +213 16 optimizer.lr 0.007465369508053801 +213 16 negative_sampler.num_negs_per_pos 37.0 +213 16 training.batch_size 3.0 +213 17 model.embedding_dim 2.0 +213 17 optimizer.lr 0.00647000233365083 +213 17 negative_sampler.num_negs_per_pos 48.0 +213 17 training.batch_size 2.0 +213 18 model.embedding_dim 2.0 +213 18 optimizer.lr 0.002130109078630826 +213 18 negative_sampler.num_negs_per_pos 43.0 +213 18 training.batch_size 0.0 +213 19 model.embedding_dim 0.0 +213 19 optimizer.lr 0.0012066432753004537 +213 19 negative_sampler.num_negs_per_pos 40.0 +213 19 training.batch_size 2.0 +213 20 model.embedding_dim 1.0 +213 20 optimizer.lr 0.007012994980356752 +213 20 negative_sampler.num_negs_per_pos 6.0 +213 20 training.batch_size 0.0 +213 21 model.embedding_dim 2.0 +213 21 optimizer.lr 0.0023010951719348873 +213 21 negative_sampler.num_negs_per_pos 11.0 +213 21 training.batch_size 1.0 +213 22 model.embedding_dim 2.0 +213 22 optimizer.lr 0.005656405460014539 +213 22 negative_sampler.num_negs_per_pos 21.0 +213 22 training.batch_size 0.0 +213 23 model.embedding_dim 2.0 +213 23 optimizer.lr 0.022548794779718172 +213 23 negative_sampler.num_negs_per_pos 2.0 +213 23 training.batch_size 1.0 +213 24 model.embedding_dim 1.0 +213 24 optimizer.lr 0.0024583434283219447 +213 24 negative_sampler.num_negs_per_pos 9.0 +213 24 training.batch_size 3.0 +213 25 model.embedding_dim 1.0 +213 25 optimizer.lr 0.06712626639150078 +213 25 negative_sampler.num_negs_per_pos 38.0 +213 25 training.batch_size 3.0 +213 26 model.embedding_dim 0.0 +213 26 optimizer.lr 0.01373101206670507 +213 26 negative_sampler.num_negs_per_pos 19.0 +213 26 training.batch_size 0.0 +213 27 model.embedding_dim 0.0 +213 27 optimizer.lr 0.07416386932238013 +213 27 negative_sampler.num_negs_per_pos 11.0 +213 27 training.batch_size 1.0 +213 28 model.embedding_dim 1.0 +213 28 optimizer.lr 0.0013766294533609988 +213 28 negative_sampler.num_negs_per_pos 13.0 +213 28 training.batch_size 1.0 +213 29 model.embedding_dim 0.0 +213 29 optimizer.lr 0.0023579602929529255 +213 29 negative_sampler.num_negs_per_pos 16.0 +213 29 training.batch_size 2.0 +213 30 model.embedding_dim 2.0 +213 30 optimizer.lr 0.00113355532419969 +213 30 negative_sampler.num_negs_per_pos 41.0 +213 30 training.batch_size 2.0 +213 31 model.embedding_dim 2.0 +213 31 optimizer.lr 0.024322987051341463 +213 31 negative_sampler.num_negs_per_pos 43.0 +213 31 training.batch_size 2.0 +213 32 model.embedding_dim 2.0 +213 32 optimizer.lr 0.001443915673504037 +213 32 negative_sampler.num_negs_per_pos 32.0 +213 32 training.batch_size 1.0 +213 33 model.embedding_dim 1.0 +213 33 optimizer.lr 0.01006807041192801 +213 33 negative_sampler.num_negs_per_pos 2.0 +213 33 training.batch_size 3.0 +213 34 model.embedding_dim 2.0 +213 34 optimizer.lr 0.021588334911343907 +213 34 negative_sampler.num_negs_per_pos 38.0 +213 34 training.batch_size 3.0 +213 35 model.embedding_dim 1.0 +213 35 optimizer.lr 0.055384196492833916 +213 35 negative_sampler.num_negs_per_pos 17.0 +213 35 training.batch_size 3.0 +213 36 model.embedding_dim 0.0 +213 36 optimizer.lr 0.0033397468992383762 +213 36 negative_sampler.num_negs_per_pos 40.0 +213 36 training.batch_size 2.0 +213 37 model.embedding_dim 1.0 +213 37 optimizer.lr 0.004658187490702626 +213 37 negative_sampler.num_negs_per_pos 9.0 +213 37 training.batch_size 3.0 +213 38 model.embedding_dim 1.0 +213 38 optimizer.lr 0.001579457060571312 +213 38 negative_sampler.num_negs_per_pos 7.0 +213 38 training.batch_size 0.0 +213 1 dataset """yago310""" +213 1 model """distmult""" +213 1 loss """bceaftersigmoid""" +213 1 regularizer """no""" +213 1 optimizer """adam""" +213 1 training_loop """owa""" +213 1 negative_sampler """basic""" +213 1 evaluator """rankbased""" +213 2 dataset """yago310""" +213 2 model """distmult""" +213 2 loss """bceaftersigmoid""" +213 2 regularizer """no""" +213 2 optimizer """adam""" +213 2 training_loop """owa""" +213 2 negative_sampler """basic""" +213 2 evaluator """rankbased""" +213 3 dataset """yago310""" +213 3 model """distmult""" +213 3 loss """bceaftersigmoid""" +213 3 regularizer """no""" +213 3 optimizer """adam""" +213 3 training_loop """owa""" +213 3 negative_sampler """basic""" +213 3 evaluator """rankbased""" +213 4 dataset """yago310""" +213 4 model """distmult""" +213 4 loss """bceaftersigmoid""" +213 4 regularizer """no""" +213 4 optimizer """adam""" +213 4 training_loop """owa""" +213 4 negative_sampler """basic""" +213 4 evaluator """rankbased""" +213 5 dataset """yago310""" +213 5 model """distmult""" +213 5 loss """bceaftersigmoid""" +213 5 regularizer """no""" +213 5 optimizer """adam""" +213 5 training_loop """owa""" +213 5 negative_sampler """basic""" +213 5 evaluator """rankbased""" +213 6 dataset """yago310""" +213 6 model """distmult""" +213 6 loss """bceaftersigmoid""" +213 6 regularizer """no""" +213 6 optimizer """adam""" +213 6 training_loop """owa""" +213 6 negative_sampler """basic""" +213 6 evaluator """rankbased""" +213 7 dataset """yago310""" +213 7 model """distmult""" +213 7 loss """bceaftersigmoid""" +213 7 regularizer """no""" +213 7 optimizer """adam""" +213 7 training_loop """owa""" +213 7 negative_sampler """basic""" +213 7 evaluator """rankbased""" +213 8 dataset """yago310""" +213 8 model """distmult""" +213 8 loss """bceaftersigmoid""" +213 8 regularizer """no""" +213 8 optimizer """adam""" +213 8 training_loop """owa""" +213 8 negative_sampler """basic""" +213 8 evaluator """rankbased""" +213 9 dataset """yago310""" +213 9 model """distmult""" +213 9 loss """bceaftersigmoid""" +213 9 regularizer """no""" +213 9 optimizer """adam""" +213 9 training_loop """owa""" +213 9 negative_sampler """basic""" +213 9 evaluator """rankbased""" +213 10 dataset """yago310""" +213 10 model """distmult""" +213 10 loss """bceaftersigmoid""" +213 10 regularizer """no""" +213 10 optimizer """adam""" +213 10 training_loop """owa""" +213 10 negative_sampler """basic""" +213 10 evaluator """rankbased""" +213 11 dataset """yago310""" +213 11 model """distmult""" +213 11 loss """bceaftersigmoid""" +213 11 regularizer """no""" +213 11 optimizer """adam""" +213 11 training_loop """owa""" +213 11 negative_sampler """basic""" +213 11 evaluator """rankbased""" +213 12 dataset """yago310""" +213 12 model """distmult""" +213 12 loss """bceaftersigmoid""" +213 12 regularizer """no""" +213 12 optimizer """adam""" +213 12 training_loop """owa""" +213 12 negative_sampler """basic""" +213 12 evaluator """rankbased""" +213 13 dataset """yago310""" +213 13 model """distmult""" +213 13 loss """bceaftersigmoid""" +213 13 regularizer """no""" +213 13 optimizer """adam""" +213 13 training_loop """owa""" +213 13 negative_sampler """basic""" +213 13 evaluator """rankbased""" +213 14 dataset """yago310""" +213 14 model """distmult""" +213 14 loss """bceaftersigmoid""" +213 14 regularizer """no""" +213 14 optimizer """adam""" +213 14 training_loop """owa""" +213 14 negative_sampler """basic""" +213 14 evaluator """rankbased""" +213 15 dataset """yago310""" +213 15 model """distmult""" +213 15 loss """bceaftersigmoid""" +213 15 regularizer """no""" +213 15 optimizer """adam""" +213 15 training_loop """owa""" +213 15 negative_sampler """basic""" +213 15 evaluator """rankbased""" +213 16 dataset """yago310""" +213 16 model """distmult""" +213 16 loss """bceaftersigmoid""" +213 16 regularizer """no""" +213 16 optimizer """adam""" +213 16 training_loop """owa""" +213 16 negative_sampler """basic""" +213 16 evaluator """rankbased""" +213 17 dataset """yago310""" +213 17 model """distmult""" +213 17 loss """bceaftersigmoid""" +213 17 regularizer """no""" +213 17 optimizer """adam""" +213 17 training_loop """owa""" +213 17 negative_sampler """basic""" +213 17 evaluator """rankbased""" +213 18 dataset """yago310""" +213 18 model """distmult""" +213 18 loss """bceaftersigmoid""" +213 18 regularizer """no""" +213 18 optimizer """adam""" +213 18 training_loop """owa""" +213 18 negative_sampler """basic""" +213 18 evaluator """rankbased""" +213 19 dataset """yago310""" +213 19 model """distmult""" +213 19 loss """bceaftersigmoid""" +213 19 regularizer """no""" +213 19 optimizer """adam""" +213 19 training_loop """owa""" +213 19 negative_sampler """basic""" +213 19 evaluator """rankbased""" +213 20 dataset """yago310""" +213 20 model """distmult""" +213 20 loss """bceaftersigmoid""" +213 20 regularizer """no""" +213 20 optimizer """adam""" +213 20 training_loop """owa""" +213 20 negative_sampler """basic""" +213 20 evaluator """rankbased""" +213 21 dataset """yago310""" +213 21 model """distmult""" +213 21 loss """bceaftersigmoid""" +213 21 regularizer """no""" +213 21 optimizer """adam""" +213 21 training_loop """owa""" +213 21 negative_sampler """basic""" +213 21 evaluator """rankbased""" +213 22 dataset """yago310""" +213 22 model """distmult""" +213 22 loss """bceaftersigmoid""" +213 22 regularizer """no""" +213 22 optimizer """adam""" +213 22 training_loop """owa""" +213 22 negative_sampler """basic""" +213 22 evaluator """rankbased""" +213 23 dataset """yago310""" +213 23 model """distmult""" +213 23 loss """bceaftersigmoid""" +213 23 regularizer """no""" +213 23 optimizer """adam""" +213 23 training_loop """owa""" +213 23 negative_sampler """basic""" +213 23 evaluator """rankbased""" +213 24 dataset """yago310""" +213 24 model """distmult""" +213 24 loss """bceaftersigmoid""" +213 24 regularizer """no""" +213 24 optimizer """adam""" +213 24 training_loop """owa""" +213 24 negative_sampler """basic""" +213 24 evaluator """rankbased""" +213 25 dataset """yago310""" +213 25 model """distmult""" +213 25 loss """bceaftersigmoid""" +213 25 regularizer """no""" +213 25 optimizer """adam""" +213 25 training_loop """owa""" +213 25 negative_sampler """basic""" +213 25 evaluator """rankbased""" +213 26 dataset """yago310""" +213 26 model """distmult""" +213 26 loss """bceaftersigmoid""" +213 26 regularizer """no""" +213 26 optimizer """adam""" +213 26 training_loop """owa""" +213 26 negative_sampler """basic""" +213 26 evaluator """rankbased""" +213 27 dataset """yago310""" +213 27 model """distmult""" +213 27 loss """bceaftersigmoid""" +213 27 regularizer """no""" +213 27 optimizer """adam""" +213 27 training_loop """owa""" +213 27 negative_sampler """basic""" +213 27 evaluator """rankbased""" +213 28 dataset """yago310""" +213 28 model """distmult""" +213 28 loss """bceaftersigmoid""" +213 28 regularizer """no""" +213 28 optimizer """adam""" +213 28 training_loop """owa""" +213 28 negative_sampler """basic""" +213 28 evaluator """rankbased""" +213 29 dataset """yago310""" +213 29 model """distmult""" +213 29 loss """bceaftersigmoid""" +213 29 regularizer """no""" +213 29 optimizer """adam""" +213 29 training_loop """owa""" +213 29 negative_sampler """basic""" +213 29 evaluator """rankbased""" +213 30 dataset """yago310""" +213 30 model """distmult""" +213 30 loss """bceaftersigmoid""" +213 30 regularizer """no""" +213 30 optimizer """adam""" +213 30 training_loop """owa""" +213 30 negative_sampler """basic""" +213 30 evaluator """rankbased""" +213 31 dataset """yago310""" +213 31 model """distmult""" +213 31 loss """bceaftersigmoid""" +213 31 regularizer """no""" +213 31 optimizer """adam""" +213 31 training_loop """owa""" +213 31 negative_sampler """basic""" +213 31 evaluator """rankbased""" +213 32 dataset """yago310""" +213 32 model """distmult""" +213 32 loss """bceaftersigmoid""" +213 32 regularizer """no""" +213 32 optimizer """adam""" +213 32 training_loop """owa""" +213 32 negative_sampler """basic""" +213 32 evaluator """rankbased""" +213 33 dataset """yago310""" +213 33 model """distmult""" +213 33 loss """bceaftersigmoid""" +213 33 regularizer """no""" +213 33 optimizer """adam""" +213 33 training_loop """owa""" +213 33 negative_sampler """basic""" +213 33 evaluator """rankbased""" +213 34 dataset """yago310""" +213 34 model """distmult""" +213 34 loss """bceaftersigmoid""" +213 34 regularizer """no""" +213 34 optimizer """adam""" +213 34 training_loop """owa""" +213 34 negative_sampler """basic""" +213 34 evaluator """rankbased""" +213 35 dataset """yago310""" +213 35 model """distmult""" +213 35 loss """bceaftersigmoid""" +213 35 regularizer """no""" +213 35 optimizer """adam""" +213 35 training_loop """owa""" +213 35 negative_sampler """basic""" +213 35 evaluator """rankbased""" +213 36 dataset """yago310""" +213 36 model """distmult""" +213 36 loss """bceaftersigmoid""" +213 36 regularizer """no""" +213 36 optimizer """adam""" +213 36 training_loop """owa""" +213 36 negative_sampler """basic""" +213 36 evaluator """rankbased""" +213 37 dataset """yago310""" +213 37 model """distmult""" +213 37 loss """bceaftersigmoid""" +213 37 regularizer """no""" +213 37 optimizer """adam""" +213 37 training_loop """owa""" +213 37 negative_sampler """basic""" +213 37 evaluator """rankbased""" +213 38 dataset """yago310""" +213 38 model """distmult""" +213 38 loss """bceaftersigmoid""" +213 38 regularizer """no""" +213 38 optimizer """adam""" +213 38 training_loop """owa""" +213 38 negative_sampler """basic""" +213 38 evaluator """rankbased""" +214 1 model.embedding_dim 2.0 +214 1 loss.margin 7.34476855580912 +214 1 optimizer.lr 0.018672018703097527 +214 1 negative_sampler.num_negs_per_pos 32.0 +214 1 training.batch_size 0.0 +214 2 model.embedding_dim 2.0 +214 2 loss.margin 2.6867667095015366 +214 2 optimizer.lr 0.03767519137591151 +214 2 negative_sampler.num_negs_per_pos 41.0 +214 2 training.batch_size 2.0 +214 3 model.embedding_dim 2.0 +214 3 loss.margin 5.622905831973432 +214 3 optimizer.lr 0.06414325029059571 +214 3 negative_sampler.num_negs_per_pos 3.0 +214 3 training.batch_size 3.0 +214 4 model.embedding_dim 2.0 +214 4 loss.margin 9.967483161802432 +214 4 optimizer.lr 0.012283666312681359 +214 4 negative_sampler.num_negs_per_pos 9.0 +214 4 training.batch_size 2.0 +214 5 model.embedding_dim 2.0 +214 5 loss.margin 1.3113972999781929 +214 5 optimizer.lr 0.05019692374640359 +214 5 negative_sampler.num_negs_per_pos 14.0 +214 5 training.batch_size 1.0 +214 6 model.embedding_dim 0.0 +214 6 loss.margin 4.721445947207221 +214 6 optimizer.lr 0.00883717465934148 +214 6 negative_sampler.num_negs_per_pos 1.0 +214 6 training.batch_size 0.0 +214 7 model.embedding_dim 2.0 +214 7 loss.margin 1.3198454906418384 +214 7 optimizer.lr 0.004476234029425085 +214 7 negative_sampler.num_negs_per_pos 33.0 +214 7 training.batch_size 2.0 +214 8 model.embedding_dim 2.0 +214 8 loss.margin 5.603955292943088 +214 8 optimizer.lr 0.0014324863779036846 +214 8 negative_sampler.num_negs_per_pos 48.0 +214 8 training.batch_size 3.0 +214 9 model.embedding_dim 1.0 +214 9 loss.margin 4.540660869816713 +214 9 optimizer.lr 0.0054791880374380555 +214 9 negative_sampler.num_negs_per_pos 19.0 +214 9 training.batch_size 0.0 +214 10 model.embedding_dim 0.0 +214 10 loss.margin 6.9998414740545085 +214 10 optimizer.lr 0.024143726460746053 +214 10 negative_sampler.num_negs_per_pos 23.0 +214 10 training.batch_size 0.0 +214 11 model.embedding_dim 0.0 +214 11 loss.margin 2.328375863294498 +214 11 optimizer.lr 0.0018125639580671934 +214 11 negative_sampler.num_negs_per_pos 43.0 +214 11 training.batch_size 0.0 +214 12 model.embedding_dim 2.0 +214 12 loss.margin 9.13343300985799 +214 12 optimizer.lr 0.010215512174004857 +214 12 negative_sampler.num_negs_per_pos 21.0 +214 12 training.batch_size 1.0 +214 13 model.embedding_dim 0.0 +214 13 loss.margin 7.577794030890379 +214 13 optimizer.lr 0.06667193006995659 +214 13 negative_sampler.num_negs_per_pos 18.0 +214 13 training.batch_size 3.0 +214 14 model.embedding_dim 0.0 +214 14 loss.margin 5.4766054040435765 +214 14 optimizer.lr 0.05395133336250376 +214 14 negative_sampler.num_negs_per_pos 16.0 +214 14 training.batch_size 1.0 +214 15 model.embedding_dim 0.0 +214 15 loss.margin 7.959956895992573 +214 15 optimizer.lr 0.0310928521714264 +214 15 negative_sampler.num_negs_per_pos 24.0 +214 15 training.batch_size 3.0 +214 16 model.embedding_dim 0.0 +214 16 loss.margin 6.216698276005194 +214 16 optimizer.lr 0.0048176815260164395 +214 16 negative_sampler.num_negs_per_pos 40.0 +214 16 training.batch_size 2.0 +214 17 model.embedding_dim 1.0 +214 17 loss.margin 4.384287869693297 +214 17 optimizer.lr 0.016790824539101343 +214 17 negative_sampler.num_negs_per_pos 6.0 +214 17 training.batch_size 3.0 +214 18 model.embedding_dim 1.0 +214 18 loss.margin 2.5431391924121476 +214 18 optimizer.lr 0.05807096929947213 +214 18 negative_sampler.num_negs_per_pos 12.0 +214 18 training.batch_size 2.0 +214 19 model.embedding_dim 0.0 +214 19 loss.margin 9.293860444264766 +214 19 optimizer.lr 0.025337608726946206 +214 19 negative_sampler.num_negs_per_pos 2.0 +214 19 training.batch_size 1.0 +214 20 model.embedding_dim 2.0 +214 20 loss.margin 7.311324064274487 +214 20 optimizer.lr 0.010536871801706803 +214 20 negative_sampler.num_negs_per_pos 16.0 +214 20 training.batch_size 3.0 +214 21 model.embedding_dim 1.0 +214 21 loss.margin 5.690419757177975 +214 21 optimizer.lr 0.010085196997072124 +214 21 negative_sampler.num_negs_per_pos 0.0 +214 21 training.batch_size 1.0 +214 22 model.embedding_dim 1.0 +214 22 loss.margin 6.190147693516553 +214 22 optimizer.lr 0.02891287217319033 +214 22 negative_sampler.num_negs_per_pos 30.0 +214 22 training.batch_size 2.0 +214 23 model.embedding_dim 1.0 +214 23 loss.margin 0.8198392750255877 +214 23 optimizer.lr 0.013325734297297318 +214 23 negative_sampler.num_negs_per_pos 46.0 +214 23 training.batch_size 3.0 +214 24 model.embedding_dim 0.0 +214 24 loss.margin 4.591842449866823 +214 24 optimizer.lr 0.010828851972659809 +214 24 negative_sampler.num_negs_per_pos 33.0 +214 24 training.batch_size 1.0 +214 25 model.embedding_dim 1.0 +214 25 loss.margin 6.339954775437149 +214 25 optimizer.lr 0.005121323353858399 +214 25 negative_sampler.num_negs_per_pos 39.0 +214 25 training.batch_size 3.0 +214 26 model.embedding_dim 0.0 +214 26 loss.margin 6.013977804751018 +214 26 optimizer.lr 0.001768128989883127 +214 26 negative_sampler.num_negs_per_pos 4.0 +214 26 training.batch_size 3.0 +214 27 model.embedding_dim 0.0 +214 27 loss.margin 6.895524887293269 +214 27 optimizer.lr 0.0362997537266643 +214 27 negative_sampler.num_negs_per_pos 31.0 +214 27 training.batch_size 2.0 +214 28 model.embedding_dim 1.0 +214 28 loss.margin 5.032222272737858 +214 28 optimizer.lr 0.06337737355980312 +214 28 negative_sampler.num_negs_per_pos 37.0 +214 28 training.batch_size 1.0 +214 29 model.embedding_dim 1.0 +214 29 loss.margin 4.310344867656674 +214 29 optimizer.lr 0.0027563292437880692 +214 29 negative_sampler.num_negs_per_pos 7.0 +214 29 training.batch_size 1.0 +214 30 model.embedding_dim 1.0 +214 30 loss.margin 9.645196559757302 +214 30 optimizer.lr 0.0027640410315885244 +214 30 negative_sampler.num_negs_per_pos 8.0 +214 30 training.batch_size 0.0 +214 31 model.embedding_dim 1.0 +214 31 loss.margin 3.0089497815919084 +214 31 optimizer.lr 0.007467245911106849 +214 31 negative_sampler.num_negs_per_pos 34.0 +214 31 training.batch_size 1.0 +214 32 model.embedding_dim 1.0 +214 32 loss.margin 3.9011990519262416 +214 32 optimizer.lr 0.01229453697325439 +214 32 negative_sampler.num_negs_per_pos 48.0 +214 32 training.batch_size 2.0 +214 33 model.embedding_dim 0.0 +214 33 loss.margin 5.912656916521924 +214 33 optimizer.lr 0.001539406767342198 +214 33 negative_sampler.num_negs_per_pos 24.0 +214 33 training.batch_size 3.0 +214 34 model.embedding_dim 2.0 +214 34 loss.margin 5.384344768667124 +214 34 optimizer.lr 0.09696514533779803 +214 34 negative_sampler.num_negs_per_pos 5.0 +214 34 training.batch_size 3.0 +214 35 model.embedding_dim 1.0 +214 35 loss.margin 2.847348191951067 +214 35 optimizer.lr 0.02889554601372476 +214 35 negative_sampler.num_negs_per_pos 47.0 +214 35 training.batch_size 2.0 +214 36 model.embedding_dim 1.0 +214 36 loss.margin 0.9004494257656481 +214 36 optimizer.lr 0.004237309203737969 +214 36 negative_sampler.num_negs_per_pos 3.0 +214 36 training.batch_size 0.0 +214 37 model.embedding_dim 2.0 +214 37 loss.margin 4.292697526606826 +214 37 optimizer.lr 0.0035895695198170092 +214 37 negative_sampler.num_negs_per_pos 5.0 +214 37 training.batch_size 0.0 +214 38 model.embedding_dim 2.0 +214 38 loss.margin 3.2085338255434763 +214 38 optimizer.lr 0.09155222898864476 +214 38 negative_sampler.num_negs_per_pos 39.0 +214 38 training.batch_size 1.0 +214 39 model.embedding_dim 1.0 +214 39 loss.margin 8.164900412432367 +214 39 optimizer.lr 0.09012797755629036 +214 39 negative_sampler.num_negs_per_pos 32.0 +214 39 training.batch_size 3.0 +214 40 model.embedding_dim 0.0 +214 40 loss.margin 7.652751751482983 +214 40 optimizer.lr 0.0024201879142042183 +214 40 negative_sampler.num_negs_per_pos 32.0 +214 40 training.batch_size 0.0 +214 1 dataset """yago310""" +214 1 model """distmult""" +214 1 loss """marginranking""" +214 1 regularizer """no""" +214 1 optimizer """adam""" +214 1 training_loop """owa""" +214 1 negative_sampler """basic""" +214 1 evaluator """rankbased""" +214 2 dataset """yago310""" +214 2 model """distmult""" +214 2 loss """marginranking""" +214 2 regularizer """no""" +214 2 optimizer """adam""" +214 2 training_loop """owa""" +214 2 negative_sampler """basic""" +214 2 evaluator """rankbased""" +214 3 dataset """yago310""" +214 3 model """distmult""" +214 3 loss """marginranking""" +214 3 regularizer """no""" +214 3 optimizer """adam""" +214 3 training_loop """owa""" +214 3 negative_sampler """basic""" +214 3 evaluator """rankbased""" +214 4 dataset """yago310""" +214 4 model """distmult""" +214 4 loss """marginranking""" +214 4 regularizer """no""" +214 4 optimizer """adam""" +214 4 training_loop """owa""" +214 4 negative_sampler """basic""" +214 4 evaluator """rankbased""" +214 5 dataset """yago310""" +214 5 model """distmult""" +214 5 loss """marginranking""" +214 5 regularizer """no""" +214 5 optimizer """adam""" +214 5 training_loop """owa""" +214 5 negative_sampler """basic""" +214 5 evaluator """rankbased""" +214 6 dataset """yago310""" +214 6 model """distmult""" +214 6 loss """marginranking""" +214 6 regularizer """no""" +214 6 optimizer """adam""" +214 6 training_loop """owa""" +214 6 negative_sampler """basic""" +214 6 evaluator """rankbased""" +214 7 dataset """yago310""" +214 7 model """distmult""" +214 7 loss """marginranking""" +214 7 regularizer """no""" +214 7 optimizer """adam""" +214 7 training_loop """owa""" +214 7 negative_sampler """basic""" +214 7 evaluator """rankbased""" +214 8 dataset """yago310""" +214 8 model """distmult""" +214 8 loss """marginranking""" +214 8 regularizer """no""" +214 8 optimizer """adam""" +214 8 training_loop """owa""" +214 8 negative_sampler """basic""" +214 8 evaluator """rankbased""" +214 9 dataset """yago310""" +214 9 model """distmult""" +214 9 loss """marginranking""" +214 9 regularizer """no""" +214 9 optimizer """adam""" +214 9 training_loop """owa""" +214 9 negative_sampler """basic""" +214 9 evaluator """rankbased""" +214 10 dataset """yago310""" +214 10 model """distmult""" +214 10 loss """marginranking""" +214 10 regularizer """no""" +214 10 optimizer """adam""" +214 10 training_loop """owa""" +214 10 negative_sampler """basic""" +214 10 evaluator """rankbased""" +214 11 dataset """yago310""" +214 11 model """distmult""" +214 11 loss """marginranking""" +214 11 regularizer """no""" +214 11 optimizer """adam""" +214 11 training_loop """owa""" +214 11 negative_sampler """basic""" +214 11 evaluator """rankbased""" +214 12 dataset """yago310""" +214 12 model """distmult""" +214 12 loss """marginranking""" +214 12 regularizer """no""" +214 12 optimizer """adam""" +214 12 training_loop """owa""" +214 12 negative_sampler """basic""" +214 12 evaluator """rankbased""" +214 13 dataset """yago310""" +214 13 model """distmult""" +214 13 loss """marginranking""" +214 13 regularizer """no""" +214 13 optimizer """adam""" +214 13 training_loop """owa""" +214 13 negative_sampler """basic""" +214 13 evaluator """rankbased""" +214 14 dataset """yago310""" +214 14 model """distmult""" +214 14 loss """marginranking""" +214 14 regularizer """no""" +214 14 optimizer """adam""" +214 14 training_loop """owa""" +214 14 negative_sampler """basic""" +214 14 evaluator """rankbased""" +214 15 dataset """yago310""" +214 15 model """distmult""" +214 15 loss """marginranking""" +214 15 regularizer """no""" +214 15 optimizer """adam""" +214 15 training_loop """owa""" +214 15 negative_sampler """basic""" +214 15 evaluator """rankbased""" +214 16 dataset """yago310""" +214 16 model """distmult""" +214 16 loss """marginranking""" +214 16 regularizer """no""" +214 16 optimizer """adam""" +214 16 training_loop """owa""" +214 16 negative_sampler """basic""" +214 16 evaluator """rankbased""" +214 17 dataset """yago310""" +214 17 model """distmult""" +214 17 loss """marginranking""" +214 17 regularizer """no""" +214 17 optimizer """adam""" +214 17 training_loop """owa""" +214 17 negative_sampler """basic""" +214 17 evaluator """rankbased""" +214 18 dataset """yago310""" +214 18 model """distmult""" +214 18 loss """marginranking""" +214 18 regularizer """no""" +214 18 optimizer """adam""" +214 18 training_loop """owa""" +214 18 negative_sampler """basic""" +214 18 evaluator """rankbased""" +214 19 dataset """yago310""" +214 19 model """distmult""" +214 19 loss """marginranking""" +214 19 regularizer """no""" +214 19 optimizer """adam""" +214 19 training_loop """owa""" +214 19 negative_sampler """basic""" +214 19 evaluator """rankbased""" +214 20 dataset """yago310""" +214 20 model """distmult""" +214 20 loss """marginranking""" +214 20 regularizer """no""" +214 20 optimizer """adam""" +214 20 training_loop """owa""" +214 20 negative_sampler """basic""" +214 20 evaluator """rankbased""" +214 21 dataset """yago310""" +214 21 model """distmult""" +214 21 loss """marginranking""" +214 21 regularizer """no""" +214 21 optimizer """adam""" +214 21 training_loop """owa""" +214 21 negative_sampler """basic""" +214 21 evaluator """rankbased""" +214 22 dataset """yago310""" +214 22 model """distmult""" +214 22 loss """marginranking""" +214 22 regularizer """no""" +214 22 optimizer """adam""" +214 22 training_loop """owa""" +214 22 negative_sampler """basic""" +214 22 evaluator """rankbased""" +214 23 dataset """yago310""" +214 23 model """distmult""" +214 23 loss """marginranking""" +214 23 regularizer """no""" +214 23 optimizer """adam""" +214 23 training_loop """owa""" +214 23 negative_sampler """basic""" +214 23 evaluator """rankbased""" +214 24 dataset """yago310""" +214 24 model """distmult""" +214 24 loss """marginranking""" +214 24 regularizer """no""" +214 24 optimizer """adam""" +214 24 training_loop """owa""" +214 24 negative_sampler """basic""" +214 24 evaluator """rankbased""" +214 25 dataset """yago310""" +214 25 model """distmult""" +214 25 loss """marginranking""" +214 25 regularizer """no""" +214 25 optimizer """adam""" +214 25 training_loop """owa""" +214 25 negative_sampler """basic""" +214 25 evaluator """rankbased""" +214 26 dataset """yago310""" +214 26 model """distmult""" +214 26 loss """marginranking""" +214 26 regularizer """no""" +214 26 optimizer """adam""" +214 26 training_loop """owa""" +214 26 negative_sampler """basic""" +214 26 evaluator """rankbased""" +214 27 dataset """yago310""" +214 27 model """distmult""" +214 27 loss """marginranking""" +214 27 regularizer """no""" +214 27 optimizer """adam""" +214 27 training_loop """owa""" +214 27 negative_sampler """basic""" +214 27 evaluator """rankbased""" +214 28 dataset """yago310""" +214 28 model """distmult""" +214 28 loss """marginranking""" +214 28 regularizer """no""" +214 28 optimizer """adam""" +214 28 training_loop """owa""" +214 28 negative_sampler """basic""" +214 28 evaluator """rankbased""" +214 29 dataset """yago310""" +214 29 model """distmult""" +214 29 loss """marginranking""" +214 29 regularizer """no""" +214 29 optimizer """adam""" +214 29 training_loop """owa""" +214 29 negative_sampler """basic""" +214 29 evaluator """rankbased""" +214 30 dataset """yago310""" +214 30 model """distmult""" +214 30 loss """marginranking""" +214 30 regularizer """no""" +214 30 optimizer """adam""" +214 30 training_loop """owa""" +214 30 negative_sampler """basic""" +214 30 evaluator """rankbased""" +214 31 dataset """yago310""" +214 31 model """distmult""" +214 31 loss """marginranking""" +214 31 regularizer """no""" +214 31 optimizer """adam""" +214 31 training_loop """owa""" +214 31 negative_sampler """basic""" +214 31 evaluator """rankbased""" +214 32 dataset """yago310""" +214 32 model """distmult""" +214 32 loss """marginranking""" +214 32 regularizer """no""" +214 32 optimizer """adam""" +214 32 training_loop """owa""" +214 32 negative_sampler """basic""" +214 32 evaluator """rankbased""" +214 33 dataset """yago310""" +214 33 model """distmult""" +214 33 loss """marginranking""" +214 33 regularizer """no""" +214 33 optimizer """adam""" +214 33 training_loop """owa""" +214 33 negative_sampler """basic""" +214 33 evaluator """rankbased""" +214 34 dataset """yago310""" +214 34 model """distmult""" +214 34 loss """marginranking""" +214 34 regularizer """no""" +214 34 optimizer """adam""" +214 34 training_loop """owa""" +214 34 negative_sampler """basic""" +214 34 evaluator """rankbased""" +214 35 dataset """yago310""" +214 35 model """distmult""" +214 35 loss """marginranking""" +214 35 regularizer """no""" +214 35 optimizer """adam""" +214 35 training_loop """owa""" +214 35 negative_sampler """basic""" +214 35 evaluator """rankbased""" +214 36 dataset """yago310""" +214 36 model """distmult""" +214 36 loss """marginranking""" +214 36 regularizer """no""" +214 36 optimizer """adam""" +214 36 training_loop """owa""" +214 36 negative_sampler """basic""" +214 36 evaluator """rankbased""" +214 37 dataset """yago310""" +214 37 model """distmult""" +214 37 loss """marginranking""" +214 37 regularizer """no""" +214 37 optimizer """adam""" +214 37 training_loop """owa""" +214 37 negative_sampler """basic""" +214 37 evaluator """rankbased""" +214 38 dataset """yago310""" +214 38 model """distmult""" +214 38 loss """marginranking""" +214 38 regularizer """no""" +214 38 optimizer """adam""" +214 38 training_loop """owa""" +214 38 negative_sampler """basic""" +214 38 evaluator """rankbased""" +214 39 dataset """yago310""" +214 39 model """distmult""" +214 39 loss """marginranking""" +214 39 regularizer """no""" +214 39 optimizer """adam""" +214 39 training_loop """owa""" +214 39 negative_sampler """basic""" +214 39 evaluator """rankbased""" +214 40 dataset """yago310""" +214 40 model """distmult""" +214 40 loss """marginranking""" +214 40 regularizer """no""" +214 40 optimizer """adam""" +214 40 training_loop """owa""" +214 40 negative_sampler """basic""" +214 40 evaluator """rankbased""" +215 1 model.embedding_dim 0.0 +215 1 loss.margin 3.82112374427901 +215 1 optimizer.lr 0.004288633355998644 +215 1 negative_sampler.num_negs_per_pos 12.0 +215 1 training.batch_size 1.0 +215 2 model.embedding_dim 1.0 +215 2 loss.margin 3.225666884568635 +215 2 optimizer.lr 0.06346800537989397 +215 2 negative_sampler.num_negs_per_pos 33.0 +215 2 training.batch_size 0.0 +215 3 model.embedding_dim 0.0 +215 3 loss.margin 3.5863384404221703 +215 3 optimizer.lr 0.03336918492113477 +215 3 negative_sampler.num_negs_per_pos 38.0 +215 3 training.batch_size 2.0 +215 4 model.embedding_dim 1.0 +215 4 loss.margin 4.472743054177685 +215 4 optimizer.lr 0.0013346241939584619 +215 4 negative_sampler.num_negs_per_pos 2.0 +215 4 training.batch_size 0.0 +215 5 model.embedding_dim 0.0 +215 5 loss.margin 9.000348126141544 +215 5 optimizer.lr 0.01973108877095351 +215 5 negative_sampler.num_negs_per_pos 22.0 +215 5 training.batch_size 3.0 +215 6 model.embedding_dim 0.0 +215 6 loss.margin 3.5114959805633603 +215 6 optimizer.lr 0.013184058826095607 +215 6 negative_sampler.num_negs_per_pos 49.0 +215 6 training.batch_size 3.0 +215 7 model.embedding_dim 0.0 +215 7 loss.margin 3.546627749972671 +215 7 optimizer.lr 0.016362514245234848 +215 7 negative_sampler.num_negs_per_pos 43.0 +215 7 training.batch_size 0.0 +215 8 model.embedding_dim 0.0 +215 8 loss.margin 2.8146100655178086 +215 8 optimizer.lr 0.028098825124853503 +215 8 negative_sampler.num_negs_per_pos 7.0 +215 8 training.batch_size 1.0 +215 9 model.embedding_dim 1.0 +215 9 loss.margin 8.94706413522895 +215 9 optimizer.lr 0.025890490453424547 +215 9 negative_sampler.num_negs_per_pos 21.0 +215 9 training.batch_size 1.0 +215 10 model.embedding_dim 1.0 +215 10 loss.margin 1.7575622499386332 +215 10 optimizer.lr 0.07092724906406034 +215 10 negative_sampler.num_negs_per_pos 3.0 +215 10 training.batch_size 2.0 +215 11 model.embedding_dim 0.0 +215 11 loss.margin 6.04093142635867 +215 11 optimizer.lr 0.003083249160947338 +215 11 negative_sampler.num_negs_per_pos 41.0 +215 11 training.batch_size 2.0 +215 12 model.embedding_dim 2.0 +215 12 loss.margin 5.609584881266634 +215 12 optimizer.lr 0.05455010304983688 +215 12 negative_sampler.num_negs_per_pos 18.0 +215 12 training.batch_size 3.0 +215 13 model.embedding_dim 2.0 +215 13 loss.margin 1.9391417392895423 +215 13 optimizer.lr 0.018768559743360835 +215 13 negative_sampler.num_negs_per_pos 29.0 +215 13 training.batch_size 0.0 +215 14 model.embedding_dim 1.0 +215 14 loss.margin 5.111487405242116 +215 14 optimizer.lr 0.01443018246687905 +215 14 negative_sampler.num_negs_per_pos 9.0 +215 14 training.batch_size 3.0 +215 15 model.embedding_dim 2.0 +215 15 loss.margin 9.586368667664715 +215 15 optimizer.lr 0.0014564215255765092 +215 15 negative_sampler.num_negs_per_pos 11.0 +215 15 training.batch_size 0.0 +215 16 model.embedding_dim 0.0 +215 16 loss.margin 4.2055174545500185 +215 16 optimizer.lr 0.007247950039075476 +215 16 negative_sampler.num_negs_per_pos 39.0 +215 16 training.batch_size 1.0 +215 17 model.embedding_dim 0.0 +215 17 loss.margin 3.2087953410228613 +215 17 optimizer.lr 0.026471048550602224 +215 17 negative_sampler.num_negs_per_pos 5.0 +215 17 training.batch_size 3.0 +215 18 model.embedding_dim 2.0 +215 18 loss.margin 9.417350516185998 +215 18 optimizer.lr 0.053252377985316776 +215 18 negative_sampler.num_negs_per_pos 7.0 +215 18 training.batch_size 2.0 +215 19 model.embedding_dim 2.0 +215 19 loss.margin 6.455206342146536 +215 19 optimizer.lr 0.028383147100824645 +215 19 negative_sampler.num_negs_per_pos 20.0 +215 19 training.batch_size 0.0 +215 20 model.embedding_dim 0.0 +215 20 loss.margin 8.675683229647623 +215 20 optimizer.lr 0.028506520779908538 +215 20 negative_sampler.num_negs_per_pos 26.0 +215 20 training.batch_size 2.0 +215 21 model.embedding_dim 2.0 +215 21 loss.margin 1.6871142029541437 +215 21 optimizer.lr 0.001534875476118011 +215 21 negative_sampler.num_negs_per_pos 49.0 +215 21 training.batch_size 2.0 +215 22 model.embedding_dim 1.0 +215 22 loss.margin 8.552435212325165 +215 22 optimizer.lr 0.005521821986765789 +215 22 negative_sampler.num_negs_per_pos 30.0 +215 22 training.batch_size 3.0 +215 23 model.embedding_dim 2.0 +215 23 loss.margin 1.521576287334848 +215 23 optimizer.lr 0.014578051257639174 +215 23 negative_sampler.num_negs_per_pos 43.0 +215 23 training.batch_size 3.0 +215 24 model.embedding_dim 1.0 +215 24 loss.margin 4.261503721000392 +215 24 optimizer.lr 0.099350938121488 +215 24 negative_sampler.num_negs_per_pos 0.0 +215 24 training.batch_size 1.0 +215 25 model.embedding_dim 2.0 +215 25 loss.margin 1.1678520244001354 +215 25 optimizer.lr 0.014970634122331886 +215 25 negative_sampler.num_negs_per_pos 13.0 +215 25 training.batch_size 3.0 +215 26 model.embedding_dim 0.0 +215 26 loss.margin 8.559958810632633 +215 26 optimizer.lr 0.0527793979040643 +215 26 negative_sampler.num_negs_per_pos 12.0 +215 26 training.batch_size 1.0 +215 27 model.embedding_dim 0.0 +215 27 loss.margin 1.5668318434411976 +215 27 optimizer.lr 0.08964533553954031 +215 27 negative_sampler.num_negs_per_pos 48.0 +215 27 training.batch_size 1.0 +215 28 model.embedding_dim 0.0 +215 28 loss.margin 5.3620232685308835 +215 28 optimizer.lr 0.06781382541865259 +215 28 negative_sampler.num_negs_per_pos 8.0 +215 28 training.batch_size 1.0 +215 29 model.embedding_dim 1.0 +215 29 loss.margin 8.149906994382956 +215 29 optimizer.lr 0.019333534028769965 +215 29 negative_sampler.num_negs_per_pos 9.0 +215 29 training.batch_size 2.0 +215 30 model.embedding_dim 2.0 +215 30 loss.margin 9.433412158611478 +215 30 optimizer.lr 0.016437635676938275 +215 30 negative_sampler.num_negs_per_pos 45.0 +215 30 training.batch_size 2.0 +215 31 model.embedding_dim 0.0 +215 31 loss.margin 1.949737179856917 +215 31 optimizer.lr 0.022629318279520232 +215 31 negative_sampler.num_negs_per_pos 4.0 +215 31 training.batch_size 1.0 +215 32 model.embedding_dim 0.0 +215 32 loss.margin 4.649535503274724 +215 32 optimizer.lr 0.019352318603490806 +215 32 negative_sampler.num_negs_per_pos 37.0 +215 32 training.batch_size 2.0 +215 33 model.embedding_dim 1.0 +215 33 loss.margin 7.575618045130681 +215 33 optimizer.lr 0.06008833448720723 +215 33 negative_sampler.num_negs_per_pos 5.0 +215 33 training.batch_size 2.0 +215 34 model.embedding_dim 1.0 +215 34 loss.margin 0.7326731827242303 +215 34 optimizer.lr 0.015226897816659023 +215 34 negative_sampler.num_negs_per_pos 1.0 +215 34 training.batch_size 3.0 +215 35 model.embedding_dim 0.0 +215 35 loss.margin 7.471341163554118 +215 35 optimizer.lr 0.0026812636539804004 +215 35 negative_sampler.num_negs_per_pos 32.0 +215 35 training.batch_size 0.0 +215 36 model.embedding_dim 0.0 +215 36 loss.margin 0.5467500091154021 +215 36 optimizer.lr 0.07315472411450959 +215 36 negative_sampler.num_negs_per_pos 33.0 +215 36 training.batch_size 3.0 +215 37 model.embedding_dim 1.0 +215 37 loss.margin 6.188991809335484 +215 37 optimizer.lr 0.002094097116184599 +215 37 negative_sampler.num_negs_per_pos 16.0 +215 37 training.batch_size 3.0 +215 38 model.embedding_dim 2.0 +215 38 loss.margin 2.489393005589465 +215 38 optimizer.lr 0.012530053878293573 +215 38 negative_sampler.num_negs_per_pos 4.0 +215 38 training.batch_size 1.0 +215 39 model.embedding_dim 0.0 +215 39 loss.margin 3.50536301536144 +215 39 optimizer.lr 0.011119151912045955 +215 39 negative_sampler.num_negs_per_pos 16.0 +215 39 training.batch_size 0.0 +215 40 model.embedding_dim 0.0 +215 40 loss.margin 1.2204193190694372 +215 40 optimizer.lr 0.0020786978595924875 +215 40 negative_sampler.num_negs_per_pos 15.0 +215 40 training.batch_size 0.0 +215 41 model.embedding_dim 2.0 +215 41 loss.margin 8.011513987165175 +215 41 optimizer.lr 0.006284431208143815 +215 41 negative_sampler.num_negs_per_pos 42.0 +215 41 training.batch_size 2.0 +215 42 model.embedding_dim 2.0 +215 42 loss.margin 9.980498340181237 +215 42 optimizer.lr 0.03750817352079762 +215 42 negative_sampler.num_negs_per_pos 7.0 +215 42 training.batch_size 3.0 +215 43 model.embedding_dim 0.0 +215 43 loss.margin 4.15489665303952 +215 43 optimizer.lr 0.005822191371096252 +215 43 negative_sampler.num_negs_per_pos 20.0 +215 43 training.batch_size 2.0 +215 44 model.embedding_dim 0.0 +215 44 loss.margin 1.139264614331077 +215 44 optimizer.lr 0.009130920838331558 +215 44 negative_sampler.num_negs_per_pos 49.0 +215 44 training.batch_size 3.0 +215 45 model.embedding_dim 2.0 +215 45 loss.margin 8.369018468560169 +215 45 optimizer.lr 0.030511479399660816 +215 45 negative_sampler.num_negs_per_pos 48.0 +215 45 training.batch_size 1.0 +215 46 model.embedding_dim 1.0 +215 46 loss.margin 6.353124466405846 +215 46 optimizer.lr 0.0061737829287096 +215 46 negative_sampler.num_negs_per_pos 25.0 +215 46 training.batch_size 0.0 +215 47 model.embedding_dim 1.0 +215 47 loss.margin 6.137382822488164 +215 47 optimizer.lr 0.007662442727914473 +215 47 negative_sampler.num_negs_per_pos 6.0 +215 47 training.batch_size 2.0 +215 48 model.embedding_dim 2.0 +215 48 loss.margin 3.3907590267655405 +215 48 optimizer.lr 0.030007594710836548 +215 48 negative_sampler.num_negs_per_pos 19.0 +215 48 training.batch_size 1.0 +215 49 model.embedding_dim 2.0 +215 49 loss.margin 7.579954199436189 +215 49 optimizer.lr 0.014802026701687522 +215 49 negative_sampler.num_negs_per_pos 8.0 +215 49 training.batch_size 3.0 +215 50 model.embedding_dim 2.0 +215 50 loss.margin 1.2836803415927038 +215 50 optimizer.lr 0.005754969608793095 +215 50 negative_sampler.num_negs_per_pos 10.0 +215 50 training.batch_size 2.0 +215 51 model.embedding_dim 0.0 +215 51 loss.margin 4.390484829725416 +215 51 optimizer.lr 0.0952000760299073 +215 51 negative_sampler.num_negs_per_pos 17.0 +215 51 training.batch_size 2.0 +215 52 model.embedding_dim 1.0 +215 52 loss.margin 3.569863656309753 +215 52 optimizer.lr 0.011236300859714258 +215 52 negative_sampler.num_negs_per_pos 35.0 +215 52 training.batch_size 3.0 +215 53 model.embedding_dim 1.0 +215 53 loss.margin 7.3623444824817526 +215 53 optimizer.lr 0.010620613093696009 +215 53 negative_sampler.num_negs_per_pos 46.0 +215 53 training.batch_size 3.0 +215 54 model.embedding_dim 1.0 +215 54 loss.margin 9.334754931607902 +215 54 optimizer.lr 0.0015205618350369176 +215 54 negative_sampler.num_negs_per_pos 5.0 +215 54 training.batch_size 2.0 +215 55 model.embedding_dim 0.0 +215 55 loss.margin 7.483774394032407 +215 55 optimizer.lr 0.023094094685871397 +215 55 negative_sampler.num_negs_per_pos 38.0 +215 55 training.batch_size 1.0 +215 56 model.embedding_dim 0.0 +215 56 loss.margin 5.718545655365414 +215 56 optimizer.lr 0.001788538067867326 +215 56 negative_sampler.num_negs_per_pos 21.0 +215 56 training.batch_size 0.0 +215 57 model.embedding_dim 2.0 +215 57 loss.margin 0.6438194632519847 +215 57 optimizer.lr 0.0033501973672408415 +215 57 negative_sampler.num_negs_per_pos 23.0 +215 57 training.batch_size 0.0 +215 58 model.embedding_dim 2.0 +215 58 loss.margin 5.179933719169961 +215 58 optimizer.lr 0.01573211460069446 +215 58 negative_sampler.num_negs_per_pos 49.0 +215 58 training.batch_size 0.0 +215 59 model.embedding_dim 1.0 +215 59 loss.margin 9.773799575002581 +215 59 optimizer.lr 0.0017568636393888216 +215 59 negative_sampler.num_negs_per_pos 14.0 +215 59 training.batch_size 2.0 +215 60 model.embedding_dim 0.0 +215 60 loss.margin 9.38577233906922 +215 60 optimizer.lr 0.0932954662420638 +215 60 negative_sampler.num_negs_per_pos 32.0 +215 60 training.batch_size 3.0 +215 61 model.embedding_dim 1.0 +215 61 loss.margin 3.775759061899926 +215 61 optimizer.lr 0.0018727204062493636 +215 61 negative_sampler.num_negs_per_pos 20.0 +215 61 training.batch_size 3.0 +215 62 model.embedding_dim 1.0 +215 62 loss.margin 4.766668702119814 +215 62 optimizer.lr 0.00798782909837476 +215 62 negative_sampler.num_negs_per_pos 49.0 +215 62 training.batch_size 2.0 +215 63 model.embedding_dim 0.0 +215 63 loss.margin 2.079653165484051 +215 63 optimizer.lr 0.012419189891736243 +215 63 negative_sampler.num_negs_per_pos 42.0 +215 63 training.batch_size 0.0 +215 64 model.embedding_dim 1.0 +215 64 loss.margin 7.447410234156095 +215 64 optimizer.lr 0.003279775678175717 +215 64 negative_sampler.num_negs_per_pos 6.0 +215 64 training.batch_size 0.0 +215 65 model.embedding_dim 2.0 +215 65 loss.margin 8.989009948609416 +215 65 optimizer.lr 0.014948572517223601 +215 65 negative_sampler.num_negs_per_pos 40.0 +215 65 training.batch_size 1.0 +215 66 model.embedding_dim 2.0 +215 66 loss.margin 8.562127605644095 +215 66 optimizer.lr 0.0022983617882865173 +215 66 negative_sampler.num_negs_per_pos 39.0 +215 66 training.batch_size 1.0 +215 1 dataset """yago310""" +215 1 model """distmult""" +215 1 loss """marginranking""" +215 1 regularizer """no""" +215 1 optimizer """adam""" +215 1 training_loop """owa""" +215 1 negative_sampler """basic""" +215 1 evaluator """rankbased""" +215 2 dataset """yago310""" +215 2 model """distmult""" +215 2 loss """marginranking""" +215 2 regularizer """no""" +215 2 optimizer """adam""" +215 2 training_loop """owa""" +215 2 negative_sampler """basic""" +215 2 evaluator """rankbased""" +215 3 dataset """yago310""" +215 3 model """distmult""" +215 3 loss """marginranking""" +215 3 regularizer """no""" +215 3 optimizer """adam""" +215 3 training_loop """owa""" +215 3 negative_sampler """basic""" +215 3 evaluator """rankbased""" +215 4 dataset """yago310""" +215 4 model """distmult""" +215 4 loss """marginranking""" +215 4 regularizer """no""" +215 4 optimizer """adam""" +215 4 training_loop """owa""" +215 4 negative_sampler """basic""" +215 4 evaluator """rankbased""" +215 5 dataset """yago310""" +215 5 model """distmult""" +215 5 loss """marginranking""" +215 5 regularizer """no""" +215 5 optimizer """adam""" +215 5 training_loop """owa""" +215 5 negative_sampler """basic""" +215 5 evaluator """rankbased""" +215 6 dataset """yago310""" +215 6 model """distmult""" +215 6 loss """marginranking""" +215 6 regularizer """no""" +215 6 optimizer """adam""" +215 6 training_loop """owa""" +215 6 negative_sampler """basic""" +215 6 evaluator """rankbased""" +215 7 dataset """yago310""" +215 7 model """distmult""" +215 7 loss """marginranking""" +215 7 regularizer """no""" +215 7 optimizer """adam""" +215 7 training_loop """owa""" +215 7 negative_sampler """basic""" +215 7 evaluator """rankbased""" +215 8 dataset """yago310""" +215 8 model """distmult""" +215 8 loss """marginranking""" +215 8 regularizer """no""" +215 8 optimizer """adam""" +215 8 training_loop """owa""" +215 8 negative_sampler """basic""" +215 8 evaluator """rankbased""" +215 9 dataset """yago310""" +215 9 model """distmult""" +215 9 loss """marginranking""" +215 9 regularizer """no""" +215 9 optimizer """adam""" +215 9 training_loop """owa""" +215 9 negative_sampler """basic""" +215 9 evaluator """rankbased""" +215 10 dataset """yago310""" +215 10 model """distmult""" +215 10 loss """marginranking""" +215 10 regularizer """no""" +215 10 optimizer """adam""" +215 10 training_loop """owa""" +215 10 negative_sampler """basic""" +215 10 evaluator """rankbased""" +215 11 dataset """yago310""" +215 11 model """distmult""" +215 11 loss """marginranking""" +215 11 regularizer """no""" +215 11 optimizer """adam""" +215 11 training_loop """owa""" +215 11 negative_sampler """basic""" +215 11 evaluator """rankbased""" +215 12 dataset """yago310""" +215 12 model """distmult""" +215 12 loss """marginranking""" +215 12 regularizer """no""" +215 12 optimizer """adam""" +215 12 training_loop """owa""" +215 12 negative_sampler """basic""" +215 12 evaluator """rankbased""" +215 13 dataset """yago310""" +215 13 model """distmult""" +215 13 loss """marginranking""" +215 13 regularizer """no""" +215 13 optimizer """adam""" +215 13 training_loop """owa""" +215 13 negative_sampler """basic""" +215 13 evaluator """rankbased""" +215 14 dataset """yago310""" +215 14 model """distmult""" +215 14 loss """marginranking""" +215 14 regularizer """no""" +215 14 optimizer """adam""" +215 14 training_loop """owa""" +215 14 negative_sampler """basic""" +215 14 evaluator """rankbased""" +215 15 dataset """yago310""" +215 15 model """distmult""" +215 15 loss """marginranking""" +215 15 regularizer """no""" +215 15 optimizer """adam""" +215 15 training_loop """owa""" +215 15 negative_sampler """basic""" +215 15 evaluator """rankbased""" +215 16 dataset """yago310""" +215 16 model """distmult""" +215 16 loss """marginranking""" +215 16 regularizer """no""" +215 16 optimizer """adam""" +215 16 training_loop """owa""" +215 16 negative_sampler """basic""" +215 16 evaluator """rankbased""" +215 17 dataset """yago310""" +215 17 model """distmult""" +215 17 loss """marginranking""" +215 17 regularizer """no""" +215 17 optimizer """adam""" +215 17 training_loop """owa""" +215 17 negative_sampler """basic""" +215 17 evaluator """rankbased""" +215 18 dataset """yago310""" +215 18 model """distmult""" +215 18 loss """marginranking""" +215 18 regularizer """no""" +215 18 optimizer """adam""" +215 18 training_loop """owa""" +215 18 negative_sampler """basic""" +215 18 evaluator """rankbased""" +215 19 dataset """yago310""" +215 19 model """distmult""" +215 19 loss """marginranking""" +215 19 regularizer """no""" +215 19 optimizer """adam""" +215 19 training_loop """owa""" +215 19 negative_sampler """basic""" +215 19 evaluator """rankbased""" +215 20 dataset """yago310""" +215 20 model """distmult""" +215 20 loss """marginranking""" +215 20 regularizer """no""" +215 20 optimizer """adam""" +215 20 training_loop """owa""" +215 20 negative_sampler """basic""" +215 20 evaluator """rankbased""" +215 21 dataset """yago310""" +215 21 model """distmult""" +215 21 loss """marginranking""" +215 21 regularizer """no""" +215 21 optimizer """adam""" +215 21 training_loop """owa""" +215 21 negative_sampler """basic""" +215 21 evaluator """rankbased""" +215 22 dataset """yago310""" +215 22 model """distmult""" +215 22 loss """marginranking""" +215 22 regularizer """no""" +215 22 optimizer """adam""" +215 22 training_loop """owa""" +215 22 negative_sampler """basic""" +215 22 evaluator """rankbased""" +215 23 dataset """yago310""" +215 23 model """distmult""" +215 23 loss """marginranking""" +215 23 regularizer """no""" +215 23 optimizer """adam""" +215 23 training_loop """owa""" +215 23 negative_sampler """basic""" +215 23 evaluator """rankbased""" +215 24 dataset """yago310""" +215 24 model """distmult""" +215 24 loss """marginranking""" +215 24 regularizer """no""" +215 24 optimizer """adam""" +215 24 training_loop """owa""" +215 24 negative_sampler """basic""" +215 24 evaluator """rankbased""" +215 25 dataset """yago310""" +215 25 model """distmult""" +215 25 loss """marginranking""" +215 25 regularizer """no""" +215 25 optimizer """adam""" +215 25 training_loop """owa""" +215 25 negative_sampler """basic""" +215 25 evaluator """rankbased""" +215 26 dataset """yago310""" +215 26 model """distmult""" +215 26 loss """marginranking""" +215 26 regularizer """no""" +215 26 optimizer """adam""" +215 26 training_loop """owa""" +215 26 negative_sampler """basic""" +215 26 evaluator """rankbased""" +215 27 dataset """yago310""" +215 27 model """distmult""" +215 27 loss """marginranking""" +215 27 regularizer """no""" +215 27 optimizer """adam""" +215 27 training_loop """owa""" +215 27 negative_sampler """basic""" +215 27 evaluator """rankbased""" +215 28 dataset """yago310""" +215 28 model """distmult""" +215 28 loss """marginranking""" +215 28 regularizer """no""" +215 28 optimizer """adam""" +215 28 training_loop """owa""" +215 28 negative_sampler """basic""" +215 28 evaluator """rankbased""" +215 29 dataset """yago310""" +215 29 model """distmult""" +215 29 loss """marginranking""" +215 29 regularizer """no""" +215 29 optimizer """adam""" +215 29 training_loop """owa""" +215 29 negative_sampler """basic""" +215 29 evaluator """rankbased""" +215 30 dataset """yago310""" +215 30 model """distmult""" +215 30 loss """marginranking""" +215 30 regularizer """no""" +215 30 optimizer """adam""" +215 30 training_loop """owa""" +215 30 negative_sampler """basic""" +215 30 evaluator """rankbased""" +215 31 dataset """yago310""" +215 31 model """distmult""" +215 31 loss """marginranking""" +215 31 regularizer """no""" +215 31 optimizer """adam""" +215 31 training_loop """owa""" +215 31 negative_sampler """basic""" +215 31 evaluator """rankbased""" +215 32 dataset """yago310""" +215 32 model """distmult""" +215 32 loss """marginranking""" +215 32 regularizer """no""" +215 32 optimizer """adam""" +215 32 training_loop """owa""" +215 32 negative_sampler """basic""" +215 32 evaluator """rankbased""" +215 33 dataset """yago310""" +215 33 model """distmult""" +215 33 loss """marginranking""" +215 33 regularizer """no""" +215 33 optimizer """adam""" +215 33 training_loop """owa""" +215 33 negative_sampler """basic""" +215 33 evaluator """rankbased""" +215 34 dataset """yago310""" +215 34 model """distmult""" +215 34 loss """marginranking""" +215 34 regularizer """no""" +215 34 optimizer """adam""" +215 34 training_loop """owa""" +215 34 negative_sampler """basic""" +215 34 evaluator """rankbased""" +215 35 dataset """yago310""" +215 35 model """distmult""" +215 35 loss """marginranking""" +215 35 regularizer """no""" +215 35 optimizer """adam""" +215 35 training_loop """owa""" +215 35 negative_sampler """basic""" +215 35 evaluator """rankbased""" +215 36 dataset """yago310""" +215 36 model """distmult""" +215 36 loss """marginranking""" +215 36 regularizer """no""" +215 36 optimizer """adam""" +215 36 training_loop """owa""" +215 36 negative_sampler """basic""" +215 36 evaluator """rankbased""" +215 37 dataset """yago310""" +215 37 model """distmult""" +215 37 loss """marginranking""" +215 37 regularizer """no""" +215 37 optimizer """adam""" +215 37 training_loop """owa""" +215 37 negative_sampler """basic""" +215 37 evaluator """rankbased""" +215 38 dataset """yago310""" +215 38 model """distmult""" +215 38 loss """marginranking""" +215 38 regularizer """no""" +215 38 optimizer """adam""" +215 38 training_loop """owa""" +215 38 negative_sampler """basic""" +215 38 evaluator """rankbased""" +215 39 dataset """yago310""" +215 39 model """distmult""" +215 39 loss """marginranking""" +215 39 regularizer """no""" +215 39 optimizer """adam""" +215 39 training_loop """owa""" +215 39 negative_sampler """basic""" +215 39 evaluator """rankbased""" +215 40 dataset """yago310""" +215 40 model """distmult""" +215 40 loss """marginranking""" +215 40 regularizer """no""" +215 40 optimizer """adam""" +215 40 training_loop """owa""" +215 40 negative_sampler """basic""" +215 40 evaluator """rankbased""" +215 41 dataset """yago310""" +215 41 model """distmult""" +215 41 loss """marginranking""" +215 41 regularizer """no""" +215 41 optimizer """adam""" +215 41 training_loop """owa""" +215 41 negative_sampler """basic""" +215 41 evaluator """rankbased""" +215 42 dataset """yago310""" +215 42 model """distmult""" +215 42 loss """marginranking""" +215 42 regularizer """no""" +215 42 optimizer """adam""" +215 42 training_loop """owa""" +215 42 negative_sampler """basic""" +215 42 evaluator """rankbased""" +215 43 dataset """yago310""" +215 43 model """distmult""" +215 43 loss """marginranking""" +215 43 regularizer """no""" +215 43 optimizer """adam""" +215 43 training_loop """owa""" +215 43 negative_sampler """basic""" +215 43 evaluator """rankbased""" +215 44 dataset """yago310""" +215 44 model """distmult""" +215 44 loss """marginranking""" +215 44 regularizer """no""" +215 44 optimizer """adam""" +215 44 training_loop """owa""" +215 44 negative_sampler """basic""" +215 44 evaluator """rankbased""" +215 45 dataset """yago310""" +215 45 model """distmult""" +215 45 loss """marginranking""" +215 45 regularizer """no""" +215 45 optimizer """adam""" +215 45 training_loop """owa""" +215 45 negative_sampler """basic""" +215 45 evaluator """rankbased""" +215 46 dataset """yago310""" +215 46 model """distmult""" +215 46 loss """marginranking""" +215 46 regularizer """no""" +215 46 optimizer """adam""" +215 46 training_loop """owa""" +215 46 negative_sampler """basic""" +215 46 evaluator """rankbased""" +215 47 dataset """yago310""" +215 47 model """distmult""" +215 47 loss """marginranking""" +215 47 regularizer """no""" +215 47 optimizer """adam""" +215 47 training_loop """owa""" +215 47 negative_sampler """basic""" +215 47 evaluator """rankbased""" +215 48 dataset """yago310""" +215 48 model """distmult""" +215 48 loss """marginranking""" +215 48 regularizer """no""" +215 48 optimizer """adam""" +215 48 training_loop """owa""" +215 48 negative_sampler """basic""" +215 48 evaluator """rankbased""" +215 49 dataset """yago310""" +215 49 model """distmult""" +215 49 loss """marginranking""" +215 49 regularizer """no""" +215 49 optimizer """adam""" +215 49 training_loop """owa""" +215 49 negative_sampler """basic""" +215 49 evaluator """rankbased""" +215 50 dataset """yago310""" +215 50 model """distmult""" +215 50 loss """marginranking""" +215 50 regularizer """no""" +215 50 optimizer """adam""" +215 50 training_loop """owa""" +215 50 negative_sampler """basic""" +215 50 evaluator """rankbased""" +215 51 dataset """yago310""" +215 51 model """distmult""" +215 51 loss """marginranking""" +215 51 regularizer """no""" +215 51 optimizer """adam""" +215 51 training_loop """owa""" +215 51 negative_sampler """basic""" +215 51 evaluator """rankbased""" +215 52 dataset """yago310""" +215 52 model """distmult""" +215 52 loss """marginranking""" +215 52 regularizer """no""" +215 52 optimizer """adam""" +215 52 training_loop """owa""" +215 52 negative_sampler """basic""" +215 52 evaluator """rankbased""" +215 53 dataset """yago310""" +215 53 model """distmult""" +215 53 loss """marginranking""" +215 53 regularizer """no""" +215 53 optimizer """adam""" +215 53 training_loop """owa""" +215 53 negative_sampler """basic""" +215 53 evaluator """rankbased""" +215 54 dataset """yago310""" +215 54 model """distmult""" +215 54 loss """marginranking""" +215 54 regularizer """no""" +215 54 optimizer """adam""" +215 54 training_loop """owa""" +215 54 negative_sampler """basic""" +215 54 evaluator """rankbased""" +215 55 dataset """yago310""" +215 55 model """distmult""" +215 55 loss """marginranking""" +215 55 regularizer """no""" +215 55 optimizer """adam""" +215 55 training_loop """owa""" +215 55 negative_sampler """basic""" +215 55 evaluator """rankbased""" +215 56 dataset """yago310""" +215 56 model """distmult""" +215 56 loss """marginranking""" +215 56 regularizer """no""" +215 56 optimizer """adam""" +215 56 training_loop """owa""" +215 56 negative_sampler """basic""" +215 56 evaluator """rankbased""" +215 57 dataset """yago310""" +215 57 model """distmult""" +215 57 loss """marginranking""" +215 57 regularizer """no""" +215 57 optimizer """adam""" +215 57 training_loop """owa""" +215 57 negative_sampler """basic""" +215 57 evaluator """rankbased""" +215 58 dataset """yago310""" +215 58 model """distmult""" +215 58 loss """marginranking""" +215 58 regularizer """no""" +215 58 optimizer """adam""" +215 58 training_loop """owa""" +215 58 negative_sampler """basic""" +215 58 evaluator """rankbased""" +215 59 dataset """yago310""" +215 59 model """distmult""" +215 59 loss """marginranking""" +215 59 regularizer """no""" +215 59 optimizer """adam""" +215 59 training_loop """owa""" +215 59 negative_sampler """basic""" +215 59 evaluator """rankbased""" +215 60 dataset """yago310""" +215 60 model """distmult""" +215 60 loss """marginranking""" +215 60 regularizer """no""" +215 60 optimizer """adam""" +215 60 training_loop """owa""" +215 60 negative_sampler """basic""" +215 60 evaluator """rankbased""" +215 61 dataset """yago310""" +215 61 model """distmult""" +215 61 loss """marginranking""" +215 61 regularizer """no""" +215 61 optimizer """adam""" +215 61 training_loop """owa""" +215 61 negative_sampler """basic""" +215 61 evaluator """rankbased""" +215 62 dataset """yago310""" +215 62 model """distmult""" +215 62 loss """marginranking""" +215 62 regularizer """no""" +215 62 optimizer """adam""" +215 62 training_loop """owa""" +215 62 negative_sampler """basic""" +215 62 evaluator """rankbased""" +215 63 dataset """yago310""" +215 63 model """distmult""" +215 63 loss """marginranking""" +215 63 regularizer """no""" +215 63 optimizer """adam""" +215 63 training_loop """owa""" +215 63 negative_sampler """basic""" +215 63 evaluator """rankbased""" +215 64 dataset """yago310""" +215 64 model """distmult""" +215 64 loss """marginranking""" +215 64 regularizer """no""" +215 64 optimizer """adam""" +215 64 training_loop """owa""" +215 64 negative_sampler """basic""" +215 64 evaluator """rankbased""" +215 65 dataset """yago310""" +215 65 model """distmult""" +215 65 loss """marginranking""" +215 65 regularizer """no""" +215 65 optimizer """adam""" +215 65 training_loop """owa""" +215 65 negative_sampler """basic""" +215 65 evaluator """rankbased""" +215 66 dataset """yago310""" +215 66 model """distmult""" +215 66 loss """marginranking""" +215 66 regularizer """no""" +215 66 optimizer """adam""" +215 66 training_loop """owa""" +215 66 negative_sampler """basic""" +215 66 evaluator """rankbased""" +216 1 model.embedding_dim 2.0 +216 1 loss.margin 10.696457291892429 +216 1 loss.adversarial_temperature 0.6400858836488094 +216 1 optimizer.lr 0.0014182421050380164 +216 1 negative_sampler.num_negs_per_pos 43.0 +216 1 training.batch_size 0.0 +216 2 model.embedding_dim 1.0 +216 2 loss.margin 11.548429432773156 +216 2 loss.adversarial_temperature 0.663083062907825 +216 2 optimizer.lr 0.002283592238192419 +216 2 negative_sampler.num_negs_per_pos 7.0 +216 2 training.batch_size 2.0 +216 3 model.embedding_dim 0.0 +216 3 loss.margin 16.668487307034702 +216 3 loss.adversarial_temperature 0.6140819369994023 +216 3 optimizer.lr 0.011458586329172548 +216 3 negative_sampler.num_negs_per_pos 33.0 +216 3 training.batch_size 0.0 +216 4 model.embedding_dim 2.0 +216 4 loss.margin 5.02156359427337 +216 4 loss.adversarial_temperature 0.4812449153067253 +216 4 optimizer.lr 0.0012902091124481737 +216 4 negative_sampler.num_negs_per_pos 5.0 +216 4 training.batch_size 3.0 +216 5 model.embedding_dim 2.0 +216 5 loss.margin 2.962955600603052 +216 5 loss.adversarial_temperature 0.7697598148236204 +216 5 optimizer.lr 0.007390944501188939 +216 5 negative_sampler.num_negs_per_pos 44.0 +216 5 training.batch_size 1.0 +216 6 model.embedding_dim 1.0 +216 6 loss.margin 27.55222403412376 +216 6 loss.adversarial_temperature 0.9641033699143686 +216 6 optimizer.lr 0.0342446423238025 +216 6 negative_sampler.num_negs_per_pos 28.0 +216 6 training.batch_size 3.0 +216 7 model.embedding_dim 2.0 +216 7 loss.margin 2.4697430535956513 +216 7 loss.adversarial_temperature 0.5001839008652803 +216 7 optimizer.lr 0.015289508374169257 +216 7 negative_sampler.num_negs_per_pos 10.0 +216 7 training.batch_size 2.0 +216 8 model.embedding_dim 0.0 +216 8 loss.margin 24.150776402708477 +216 8 loss.adversarial_temperature 0.5888106075087659 +216 8 optimizer.lr 0.01945275426081545 +216 8 negative_sampler.num_negs_per_pos 13.0 +216 8 training.batch_size 0.0 +216 9 model.embedding_dim 2.0 +216 9 loss.margin 19.69776122025374 +216 9 loss.adversarial_temperature 0.5208158937101899 +216 9 optimizer.lr 0.036012206610611365 +216 9 negative_sampler.num_negs_per_pos 34.0 +216 9 training.batch_size 0.0 +216 10 model.embedding_dim 2.0 +216 10 loss.margin 8.543450384409761 +216 10 loss.adversarial_temperature 0.4061399683872047 +216 10 optimizer.lr 0.0011851462090900942 +216 10 negative_sampler.num_negs_per_pos 16.0 +216 10 training.batch_size 0.0 +216 11 model.embedding_dim 1.0 +216 11 loss.margin 11.058073078694177 +216 11 loss.adversarial_temperature 0.4880021683737369 +216 11 optimizer.lr 0.0015369003339149314 +216 11 negative_sampler.num_negs_per_pos 23.0 +216 11 training.batch_size 0.0 +216 12 model.embedding_dim 1.0 +216 12 loss.margin 24.889997904762616 +216 12 loss.adversarial_temperature 0.7147739156524942 +216 12 optimizer.lr 0.0010734446216479072 +216 12 negative_sampler.num_negs_per_pos 45.0 +216 12 training.batch_size 3.0 +216 13 model.embedding_dim 2.0 +216 13 loss.margin 3.0345574074781645 +216 13 loss.adversarial_temperature 0.870644748256858 +216 13 optimizer.lr 0.00664332347709327 +216 13 negative_sampler.num_negs_per_pos 12.0 +216 13 training.batch_size 1.0 +216 14 model.embedding_dim 1.0 +216 14 loss.margin 16.76201630233382 +216 14 loss.adversarial_temperature 0.7649376676378513 +216 14 optimizer.lr 0.013539395726203343 +216 14 negative_sampler.num_negs_per_pos 48.0 +216 14 training.batch_size 0.0 +216 15 model.embedding_dim 2.0 +216 15 loss.margin 4.90164321947066 +216 15 loss.adversarial_temperature 0.3482045326519957 +216 15 optimizer.lr 0.06920466536718622 +216 15 negative_sampler.num_negs_per_pos 11.0 +216 15 training.batch_size 3.0 +216 16 model.embedding_dim 1.0 +216 16 loss.margin 14.20022696076597 +216 16 loss.adversarial_temperature 0.9292833656502134 +216 16 optimizer.lr 0.012322061708338099 +216 16 negative_sampler.num_negs_per_pos 20.0 +216 16 training.batch_size 3.0 +216 17 model.embedding_dim 1.0 +216 17 loss.margin 25.041040964493167 +216 17 loss.adversarial_temperature 0.840011123149775 +216 17 optimizer.lr 0.03981123829403542 +216 17 negative_sampler.num_negs_per_pos 18.0 +216 17 training.batch_size 2.0 +216 18 model.embedding_dim 2.0 +216 18 loss.margin 22.63607458301256 +216 18 loss.adversarial_temperature 0.27498692773057015 +216 18 optimizer.lr 0.012578325803000454 +216 18 negative_sampler.num_negs_per_pos 37.0 +216 18 training.batch_size 1.0 +216 19 model.embedding_dim 2.0 +216 19 loss.margin 25.225026383174228 +216 19 loss.adversarial_temperature 0.2830271534152738 +216 19 optimizer.lr 0.0010189270038425465 +216 19 negative_sampler.num_negs_per_pos 5.0 +216 19 training.batch_size 0.0 +216 1 dataset """yago310""" +216 1 model """distmult""" +216 1 loss """nssa""" +216 1 regularizer """no""" +216 1 optimizer """adam""" +216 1 training_loop """owa""" +216 1 negative_sampler """basic""" +216 1 evaluator """rankbased""" +216 2 dataset """yago310""" +216 2 model """distmult""" +216 2 loss """nssa""" +216 2 regularizer """no""" +216 2 optimizer """adam""" +216 2 training_loop """owa""" +216 2 negative_sampler """basic""" +216 2 evaluator """rankbased""" +216 3 dataset """yago310""" +216 3 model """distmult""" +216 3 loss """nssa""" +216 3 regularizer """no""" +216 3 optimizer """adam""" +216 3 training_loop """owa""" +216 3 negative_sampler """basic""" +216 3 evaluator """rankbased""" +216 4 dataset """yago310""" +216 4 model """distmult""" +216 4 loss """nssa""" +216 4 regularizer """no""" +216 4 optimizer """adam""" +216 4 training_loop """owa""" +216 4 negative_sampler """basic""" +216 4 evaluator """rankbased""" +216 5 dataset """yago310""" +216 5 model """distmult""" +216 5 loss """nssa""" +216 5 regularizer """no""" +216 5 optimizer """adam""" +216 5 training_loop """owa""" +216 5 negative_sampler """basic""" +216 5 evaluator """rankbased""" +216 6 dataset """yago310""" +216 6 model """distmult""" +216 6 loss """nssa""" +216 6 regularizer """no""" +216 6 optimizer """adam""" +216 6 training_loop """owa""" +216 6 negative_sampler """basic""" +216 6 evaluator """rankbased""" +216 7 dataset """yago310""" +216 7 model """distmult""" +216 7 loss """nssa""" +216 7 regularizer """no""" +216 7 optimizer """adam""" +216 7 training_loop """owa""" +216 7 negative_sampler """basic""" +216 7 evaluator """rankbased""" +216 8 dataset """yago310""" +216 8 model """distmult""" +216 8 loss """nssa""" +216 8 regularizer """no""" +216 8 optimizer """adam""" +216 8 training_loop """owa""" +216 8 negative_sampler """basic""" +216 8 evaluator """rankbased""" +216 9 dataset """yago310""" +216 9 model """distmult""" +216 9 loss """nssa""" +216 9 regularizer """no""" +216 9 optimizer """adam""" +216 9 training_loop """owa""" +216 9 negative_sampler """basic""" +216 9 evaluator """rankbased""" +216 10 dataset """yago310""" +216 10 model """distmult""" +216 10 loss """nssa""" +216 10 regularizer """no""" +216 10 optimizer """adam""" +216 10 training_loop """owa""" +216 10 negative_sampler """basic""" +216 10 evaluator """rankbased""" +216 11 dataset """yago310""" +216 11 model """distmult""" +216 11 loss """nssa""" +216 11 regularizer """no""" +216 11 optimizer """adam""" +216 11 training_loop """owa""" +216 11 negative_sampler """basic""" +216 11 evaluator """rankbased""" +216 12 dataset """yago310""" +216 12 model """distmult""" +216 12 loss """nssa""" +216 12 regularizer """no""" +216 12 optimizer """adam""" +216 12 training_loop """owa""" +216 12 negative_sampler """basic""" +216 12 evaluator """rankbased""" +216 13 dataset """yago310""" +216 13 model """distmult""" +216 13 loss """nssa""" +216 13 regularizer """no""" +216 13 optimizer """adam""" +216 13 training_loop """owa""" +216 13 negative_sampler """basic""" +216 13 evaluator """rankbased""" +216 14 dataset """yago310""" +216 14 model """distmult""" +216 14 loss """nssa""" +216 14 regularizer """no""" +216 14 optimizer """adam""" +216 14 training_loop """owa""" +216 14 negative_sampler """basic""" +216 14 evaluator """rankbased""" +216 15 dataset """yago310""" +216 15 model """distmult""" +216 15 loss """nssa""" +216 15 regularizer """no""" +216 15 optimizer """adam""" +216 15 training_loop """owa""" +216 15 negative_sampler """basic""" +216 15 evaluator """rankbased""" +216 16 dataset """yago310""" +216 16 model """distmult""" +216 16 loss """nssa""" +216 16 regularizer """no""" +216 16 optimizer """adam""" +216 16 training_loop """owa""" +216 16 negative_sampler """basic""" +216 16 evaluator """rankbased""" +216 17 dataset """yago310""" +216 17 model """distmult""" +216 17 loss """nssa""" +216 17 regularizer """no""" +216 17 optimizer """adam""" +216 17 training_loop """owa""" +216 17 negative_sampler """basic""" +216 17 evaluator """rankbased""" +216 18 dataset """yago310""" +216 18 model """distmult""" +216 18 loss """nssa""" +216 18 regularizer """no""" +216 18 optimizer """adam""" +216 18 training_loop """owa""" +216 18 negative_sampler """basic""" +216 18 evaluator """rankbased""" +216 19 dataset """yago310""" +216 19 model """distmult""" +216 19 loss """nssa""" +216 19 regularizer """no""" +216 19 optimizer """adam""" +216 19 training_loop """owa""" +216 19 negative_sampler """basic""" +216 19 evaluator """rankbased""" +217 1 model.embedding_dim 0.0 +217 1 loss.margin 1.3529834974346386 +217 1 loss.adversarial_temperature 0.9220042832126595 +217 1 optimizer.lr 0.015140249001677496 +217 1 negative_sampler.num_negs_per_pos 7.0 +217 1 training.batch_size 1.0 +217 2 model.embedding_dim 1.0 +217 2 loss.margin 25.488244435218142 +217 2 loss.adversarial_temperature 0.2413755917804127 +217 2 optimizer.lr 0.004815708689243645 +217 2 negative_sampler.num_negs_per_pos 44.0 +217 2 training.batch_size 3.0 +217 3 model.embedding_dim 2.0 +217 3 loss.margin 23.250715992887056 +217 3 loss.adversarial_temperature 0.6854931957203857 +217 3 optimizer.lr 0.013447878451473182 +217 3 negative_sampler.num_negs_per_pos 5.0 +217 3 training.batch_size 1.0 +217 4 model.embedding_dim 0.0 +217 4 loss.margin 19.5830986614623 +217 4 loss.adversarial_temperature 0.26344572702273833 +217 4 optimizer.lr 0.0011925655971924934 +217 4 negative_sampler.num_negs_per_pos 33.0 +217 4 training.batch_size 2.0 +217 5 model.embedding_dim 1.0 +217 5 loss.margin 16.304241661773077 +217 5 loss.adversarial_temperature 0.9589753586520545 +217 5 optimizer.lr 0.026019375757239484 +217 5 negative_sampler.num_negs_per_pos 4.0 +217 5 training.batch_size 2.0 +217 6 model.embedding_dim 2.0 +217 6 loss.margin 19.38066517617082 +217 6 loss.adversarial_temperature 0.7466533580947748 +217 6 optimizer.lr 0.0012476445932300515 +217 6 negative_sampler.num_negs_per_pos 10.0 +217 6 training.batch_size 2.0 +217 7 model.embedding_dim 2.0 +217 7 loss.margin 6.780326355725127 +217 7 loss.adversarial_temperature 0.8057940454041834 +217 7 optimizer.lr 0.02450739242176059 +217 7 negative_sampler.num_negs_per_pos 46.0 +217 7 training.batch_size 0.0 +217 8 model.embedding_dim 1.0 +217 8 loss.margin 13.435521617509872 +217 8 loss.adversarial_temperature 0.46473496576051077 +217 8 optimizer.lr 0.0012806426755263283 +217 8 negative_sampler.num_negs_per_pos 18.0 +217 8 training.batch_size 1.0 +217 9 model.embedding_dim 2.0 +217 9 loss.margin 6.616344079209662 +217 9 loss.adversarial_temperature 0.6880429340267823 +217 9 optimizer.lr 0.026999579309005116 +217 9 negative_sampler.num_negs_per_pos 25.0 +217 9 training.batch_size 1.0 +217 10 model.embedding_dim 2.0 +217 10 loss.margin 26.7336700793017 +217 10 loss.adversarial_temperature 0.8361883857009355 +217 10 optimizer.lr 0.006481979781591162 +217 10 negative_sampler.num_negs_per_pos 28.0 +217 10 training.batch_size 0.0 +217 11 model.embedding_dim 0.0 +217 11 loss.margin 28.331203381441117 +217 11 loss.adversarial_temperature 0.7565081190879045 +217 11 optimizer.lr 0.005563521549482913 +217 11 negative_sampler.num_negs_per_pos 37.0 +217 11 training.batch_size 3.0 +217 12 model.embedding_dim 2.0 +217 12 loss.margin 18.86963844659678 +217 12 loss.adversarial_temperature 0.3534342071802097 +217 12 optimizer.lr 0.06945868509047505 +217 12 negative_sampler.num_negs_per_pos 43.0 +217 12 training.batch_size 1.0 +217 13 model.embedding_dim 1.0 +217 13 loss.margin 1.8499777736289875 +217 13 loss.adversarial_temperature 0.5641370983154602 +217 13 optimizer.lr 0.04825794164047326 +217 13 negative_sampler.num_negs_per_pos 19.0 +217 13 training.batch_size 3.0 +217 14 model.embedding_dim 1.0 +217 14 loss.margin 28.154964050962754 +217 14 loss.adversarial_temperature 0.40536208364716786 +217 14 optimizer.lr 0.003231130889109115 +217 14 negative_sampler.num_negs_per_pos 4.0 +217 14 training.batch_size 1.0 +217 15 model.embedding_dim 1.0 +217 15 loss.margin 28.2957367866485 +217 15 loss.adversarial_temperature 0.4659593020046825 +217 15 optimizer.lr 0.039509121744823184 +217 15 negative_sampler.num_negs_per_pos 27.0 +217 15 training.batch_size 2.0 +217 16 model.embedding_dim 2.0 +217 16 loss.margin 5.98350333862007 +217 16 loss.adversarial_temperature 0.8554036403778805 +217 16 optimizer.lr 0.0011077805921949895 +217 16 negative_sampler.num_negs_per_pos 5.0 +217 16 training.batch_size 3.0 +217 17 model.embedding_dim 1.0 +217 17 loss.margin 26.769841236242666 +217 17 loss.adversarial_temperature 0.5991890316314994 +217 17 optimizer.lr 0.0031231599462318653 +217 17 negative_sampler.num_negs_per_pos 31.0 +217 17 training.batch_size 3.0 +217 18 model.embedding_dim 2.0 +217 18 loss.margin 25.24415447279032 +217 18 loss.adversarial_temperature 0.9440731504604902 +217 18 optimizer.lr 0.020932795455437955 +217 18 negative_sampler.num_negs_per_pos 17.0 +217 18 training.batch_size 1.0 +217 19 model.embedding_dim 0.0 +217 19 loss.margin 10.94290833816099 +217 19 loss.adversarial_temperature 0.6995496433814709 +217 19 optimizer.lr 0.04359635125359347 +217 19 negative_sampler.num_negs_per_pos 48.0 +217 19 training.batch_size 0.0 +217 20 model.embedding_dim 1.0 +217 20 loss.margin 12.854477430900877 +217 20 loss.adversarial_temperature 0.6534408216227711 +217 20 optimizer.lr 0.04021603806778856 +217 20 negative_sampler.num_negs_per_pos 36.0 +217 20 training.batch_size 3.0 +217 21 model.embedding_dim 1.0 +217 21 loss.margin 29.40945808858789 +217 21 loss.adversarial_temperature 0.937739298414678 +217 21 optimizer.lr 0.005407423545352365 +217 21 negative_sampler.num_negs_per_pos 31.0 +217 21 training.batch_size 0.0 +217 22 model.embedding_dim 1.0 +217 22 loss.margin 14.006460459516948 +217 22 loss.adversarial_temperature 0.10991767374072303 +217 22 optimizer.lr 0.002504758281386441 +217 22 negative_sampler.num_negs_per_pos 40.0 +217 22 training.batch_size 0.0 +217 23 model.embedding_dim 1.0 +217 23 loss.margin 2.910132009851476 +217 23 loss.adversarial_temperature 0.5943428183211017 +217 23 optimizer.lr 0.007332359625028311 +217 23 negative_sampler.num_negs_per_pos 16.0 +217 23 training.batch_size 2.0 +217 24 model.embedding_dim 2.0 +217 24 loss.margin 12.390170257133455 +217 24 loss.adversarial_temperature 0.6636386402711981 +217 24 optimizer.lr 0.002671600570053759 +217 24 negative_sampler.num_negs_per_pos 32.0 +217 24 training.batch_size 1.0 +217 25 model.embedding_dim 0.0 +217 25 loss.margin 17.514227433770113 +217 25 loss.adversarial_temperature 0.45628551203041146 +217 25 optimizer.lr 0.00840077748957271 +217 25 negative_sampler.num_negs_per_pos 39.0 +217 25 training.batch_size 1.0 +217 26 model.embedding_dim 2.0 +217 26 loss.margin 9.66225482131584 +217 26 loss.adversarial_temperature 0.43550493925604183 +217 26 optimizer.lr 0.03325285977543543 +217 26 negative_sampler.num_negs_per_pos 11.0 +217 26 training.batch_size 3.0 +217 27 model.embedding_dim 2.0 +217 27 loss.margin 14.166762005159665 +217 27 loss.adversarial_temperature 0.4164675956048036 +217 27 optimizer.lr 0.0534673610854422 +217 27 negative_sampler.num_negs_per_pos 27.0 +217 27 training.batch_size 1.0 +217 28 model.embedding_dim 2.0 +217 28 loss.margin 8.025268164678302 +217 28 loss.adversarial_temperature 0.741733852343787 +217 28 optimizer.lr 0.024563034210335472 +217 28 negative_sampler.num_negs_per_pos 6.0 +217 28 training.batch_size 2.0 +217 29 model.embedding_dim 1.0 +217 29 loss.margin 15.182800133461965 +217 29 loss.adversarial_temperature 0.6782753524919367 +217 29 optimizer.lr 0.0011631876624673763 +217 29 negative_sampler.num_negs_per_pos 42.0 +217 29 training.batch_size 2.0 +217 30 model.embedding_dim 2.0 +217 30 loss.margin 4.438837724527813 +217 30 loss.adversarial_temperature 0.2318760082410559 +217 30 optimizer.lr 0.013188298980101296 +217 30 negative_sampler.num_negs_per_pos 28.0 +217 30 training.batch_size 1.0 +217 31 model.embedding_dim 2.0 +217 31 loss.margin 26.970886099167704 +217 31 loss.adversarial_temperature 0.5804538799172122 +217 31 optimizer.lr 0.003523002489296904 +217 31 negative_sampler.num_negs_per_pos 31.0 +217 31 training.batch_size 2.0 +217 32 model.embedding_dim 0.0 +217 32 loss.margin 11.640041784626998 +217 32 loss.adversarial_temperature 0.8079959237579166 +217 32 optimizer.lr 0.005225688055124087 +217 32 negative_sampler.num_negs_per_pos 5.0 +217 32 training.batch_size 0.0 +217 33 model.embedding_dim 1.0 +217 33 loss.margin 22.590270137143424 +217 33 loss.adversarial_temperature 0.23637815115363836 +217 33 optimizer.lr 0.013907452910152456 +217 33 negative_sampler.num_negs_per_pos 0.0 +217 33 training.batch_size 0.0 +217 34 model.embedding_dim 1.0 +217 34 loss.margin 17.892213123646894 +217 34 loss.adversarial_temperature 0.7654830297961424 +217 34 optimizer.lr 0.0011374544196182002 +217 34 negative_sampler.num_negs_per_pos 2.0 +217 34 training.batch_size 1.0 +217 35 model.embedding_dim 0.0 +217 35 loss.margin 1.1223706957336432 +217 35 loss.adversarial_temperature 0.14105788074387593 +217 35 optimizer.lr 0.006928422356786462 +217 35 negative_sampler.num_negs_per_pos 39.0 +217 35 training.batch_size 0.0 +217 36 model.embedding_dim 0.0 +217 36 loss.margin 1.2461168106075553 +217 36 loss.adversarial_temperature 0.6497488305540261 +217 36 optimizer.lr 0.00728718966432318 +217 36 negative_sampler.num_negs_per_pos 30.0 +217 36 training.batch_size 1.0 +217 37 model.embedding_dim 1.0 +217 37 loss.margin 5.8306773894089226 +217 37 loss.adversarial_temperature 0.7192567546906554 +217 37 optimizer.lr 0.0037153818512334178 +217 37 negative_sampler.num_negs_per_pos 47.0 +217 37 training.batch_size 0.0 +217 38 model.embedding_dim 1.0 +217 38 loss.margin 3.761316130045322 +217 38 loss.adversarial_temperature 0.4005229476834339 +217 38 optimizer.lr 0.06446612240669775 +217 38 negative_sampler.num_negs_per_pos 26.0 +217 38 training.batch_size 1.0 +217 39 model.embedding_dim 0.0 +217 39 loss.margin 4.775990729025035 +217 39 loss.adversarial_temperature 0.41285014885027715 +217 39 optimizer.lr 0.0585998138016954 +217 39 negative_sampler.num_negs_per_pos 44.0 +217 39 training.batch_size 0.0 +217 40 model.embedding_dim 0.0 +217 40 loss.margin 1.6003127422352783 +217 40 loss.adversarial_temperature 0.5206665565014137 +217 40 optimizer.lr 0.03383924536806318 +217 40 negative_sampler.num_negs_per_pos 34.0 +217 40 training.batch_size 3.0 +217 41 model.embedding_dim 2.0 +217 41 loss.margin 4.713873299394518 +217 41 loss.adversarial_temperature 0.7458768750410751 +217 41 optimizer.lr 0.03386194953621995 +217 41 negative_sampler.num_negs_per_pos 24.0 +217 41 training.batch_size 3.0 +217 42 model.embedding_dim 2.0 +217 42 loss.margin 15.442894176279681 +217 42 loss.adversarial_temperature 0.3605849189970073 +217 42 optimizer.lr 0.005299011398520893 +217 42 negative_sampler.num_negs_per_pos 29.0 +217 42 training.batch_size 0.0 +217 43 model.embedding_dim 1.0 +217 43 loss.margin 5.343448272061486 +217 43 loss.adversarial_temperature 0.82970322546195 +217 43 optimizer.lr 0.001918313499316522 +217 43 negative_sampler.num_negs_per_pos 40.0 +217 43 training.batch_size 1.0 +217 44 model.embedding_dim 2.0 +217 44 loss.margin 14.73124337246859 +217 44 loss.adversarial_temperature 0.8646879006732195 +217 44 optimizer.lr 0.020473706171024515 +217 44 negative_sampler.num_negs_per_pos 19.0 +217 44 training.batch_size 1.0 +217 45 model.embedding_dim 0.0 +217 45 loss.margin 2.4029540713516617 +217 45 loss.adversarial_temperature 0.9126221229912983 +217 45 optimizer.lr 0.011142175332064339 +217 45 negative_sampler.num_negs_per_pos 0.0 +217 45 training.batch_size 0.0 +217 46 model.embedding_dim 2.0 +217 46 loss.margin 12.162210471587096 +217 46 loss.adversarial_temperature 0.8212584494857554 +217 46 optimizer.lr 0.027237306965365783 +217 46 negative_sampler.num_negs_per_pos 9.0 +217 46 training.batch_size 1.0 +217 47 model.embedding_dim 1.0 +217 47 loss.margin 25.80772174678863 +217 47 loss.adversarial_temperature 0.46821611121189755 +217 47 optimizer.lr 0.00980978288161741 +217 47 negative_sampler.num_negs_per_pos 32.0 +217 47 training.batch_size 0.0 +217 48 model.embedding_dim 1.0 +217 48 loss.margin 28.33240496683291 +217 48 loss.adversarial_temperature 0.6064158234035663 +217 48 optimizer.lr 0.04041816495634423 +217 48 negative_sampler.num_negs_per_pos 38.0 +217 48 training.batch_size 2.0 +217 49 model.embedding_dim 0.0 +217 49 loss.margin 16.125754496327755 +217 49 loss.adversarial_temperature 0.13213934658123355 +217 49 optimizer.lr 0.0031121767562423113 +217 49 negative_sampler.num_negs_per_pos 37.0 +217 49 training.batch_size 2.0 +217 50 model.embedding_dim 0.0 +217 50 loss.margin 27.1752543137453 +217 50 loss.adversarial_temperature 0.5995397364183197 +217 50 optimizer.lr 0.04394232645601915 +217 50 negative_sampler.num_negs_per_pos 33.0 +217 50 training.batch_size 2.0 +217 51 model.embedding_dim 1.0 +217 51 loss.margin 22.01071591482764 +217 51 loss.adversarial_temperature 0.3759414027891459 +217 51 optimizer.lr 0.09344207428713418 +217 51 negative_sampler.num_negs_per_pos 11.0 +217 51 training.batch_size 2.0 +217 52 model.embedding_dim 0.0 +217 52 loss.margin 27.58708571553864 +217 52 loss.adversarial_temperature 0.5158237967620671 +217 52 optimizer.lr 0.0010548810892277649 +217 52 negative_sampler.num_negs_per_pos 13.0 +217 52 training.batch_size 2.0 +217 53 model.embedding_dim 1.0 +217 53 loss.margin 21.206521333998186 +217 53 loss.adversarial_temperature 0.7007208688746439 +217 53 optimizer.lr 0.006228439315105849 +217 53 negative_sampler.num_negs_per_pos 36.0 +217 53 training.batch_size 0.0 +217 54 model.embedding_dim 1.0 +217 54 loss.margin 23.908126124655816 +217 54 loss.adversarial_temperature 0.8981333292296382 +217 54 optimizer.lr 0.0022303009348611045 +217 54 negative_sampler.num_negs_per_pos 44.0 +217 54 training.batch_size 0.0 +217 55 model.embedding_dim 2.0 +217 55 loss.margin 1.1839633525086897 +217 55 loss.adversarial_temperature 0.3445140714427023 +217 55 optimizer.lr 0.012709397892459066 +217 55 negative_sampler.num_negs_per_pos 22.0 +217 55 training.batch_size 1.0 +217 56 model.embedding_dim 1.0 +217 56 loss.margin 24.54715451507105 +217 56 loss.adversarial_temperature 0.28844936750656336 +217 56 optimizer.lr 0.03465402958411442 +217 56 negative_sampler.num_negs_per_pos 40.0 +217 56 training.batch_size 0.0 +217 57 model.embedding_dim 0.0 +217 57 loss.margin 12.464855313827442 +217 57 loss.adversarial_temperature 0.8443033185810895 +217 57 optimizer.lr 0.035205375353360525 +217 57 negative_sampler.num_negs_per_pos 10.0 +217 57 training.batch_size 3.0 +217 58 model.embedding_dim 1.0 +217 58 loss.margin 11.06575522255691 +217 58 loss.adversarial_temperature 0.42624576055793195 +217 58 optimizer.lr 0.0060818335133953905 +217 58 negative_sampler.num_negs_per_pos 7.0 +217 58 training.batch_size 0.0 +217 1 dataset """yago310""" +217 1 model """distmult""" +217 1 loss """nssa""" +217 1 regularizer """no""" +217 1 optimizer """adam""" +217 1 training_loop """owa""" +217 1 negative_sampler """basic""" +217 1 evaluator """rankbased""" +217 2 dataset """yago310""" +217 2 model """distmult""" +217 2 loss """nssa""" +217 2 regularizer """no""" +217 2 optimizer """adam""" +217 2 training_loop """owa""" +217 2 negative_sampler """basic""" +217 2 evaluator """rankbased""" +217 3 dataset """yago310""" +217 3 model """distmult""" +217 3 loss """nssa""" +217 3 regularizer """no""" +217 3 optimizer """adam""" +217 3 training_loop """owa""" +217 3 negative_sampler """basic""" +217 3 evaluator """rankbased""" +217 4 dataset """yago310""" +217 4 model """distmult""" +217 4 loss """nssa""" +217 4 regularizer """no""" +217 4 optimizer """adam""" +217 4 training_loop """owa""" +217 4 negative_sampler """basic""" +217 4 evaluator """rankbased""" +217 5 dataset """yago310""" +217 5 model """distmult""" +217 5 loss """nssa""" +217 5 regularizer """no""" +217 5 optimizer """adam""" +217 5 training_loop """owa""" +217 5 negative_sampler """basic""" +217 5 evaluator """rankbased""" +217 6 dataset """yago310""" +217 6 model """distmult""" +217 6 loss """nssa""" +217 6 regularizer """no""" +217 6 optimizer """adam""" +217 6 training_loop """owa""" +217 6 negative_sampler """basic""" +217 6 evaluator """rankbased""" +217 7 dataset """yago310""" +217 7 model """distmult""" +217 7 loss """nssa""" +217 7 regularizer """no""" +217 7 optimizer """adam""" +217 7 training_loop """owa""" +217 7 negative_sampler """basic""" +217 7 evaluator """rankbased""" +217 8 dataset """yago310""" +217 8 model """distmult""" +217 8 loss """nssa""" +217 8 regularizer """no""" +217 8 optimizer """adam""" +217 8 training_loop """owa""" +217 8 negative_sampler """basic""" +217 8 evaluator """rankbased""" +217 9 dataset """yago310""" +217 9 model """distmult""" +217 9 loss """nssa""" +217 9 regularizer """no""" +217 9 optimizer """adam""" +217 9 training_loop """owa""" +217 9 negative_sampler """basic""" +217 9 evaluator """rankbased""" +217 10 dataset """yago310""" +217 10 model """distmult""" +217 10 loss """nssa""" +217 10 regularizer """no""" +217 10 optimizer """adam""" +217 10 training_loop """owa""" +217 10 negative_sampler """basic""" +217 10 evaluator """rankbased""" +217 11 dataset """yago310""" +217 11 model """distmult""" +217 11 loss """nssa""" +217 11 regularizer """no""" +217 11 optimizer """adam""" +217 11 training_loop """owa""" +217 11 negative_sampler """basic""" +217 11 evaluator """rankbased""" +217 12 dataset """yago310""" +217 12 model """distmult""" +217 12 loss """nssa""" +217 12 regularizer """no""" +217 12 optimizer """adam""" +217 12 training_loop """owa""" +217 12 negative_sampler """basic""" +217 12 evaluator """rankbased""" +217 13 dataset """yago310""" +217 13 model """distmult""" +217 13 loss """nssa""" +217 13 regularizer """no""" +217 13 optimizer """adam""" +217 13 training_loop """owa""" +217 13 negative_sampler """basic""" +217 13 evaluator """rankbased""" +217 14 dataset """yago310""" +217 14 model """distmult""" +217 14 loss """nssa""" +217 14 regularizer """no""" +217 14 optimizer """adam""" +217 14 training_loop """owa""" +217 14 negative_sampler """basic""" +217 14 evaluator """rankbased""" +217 15 dataset """yago310""" +217 15 model """distmult""" +217 15 loss """nssa""" +217 15 regularizer """no""" +217 15 optimizer """adam""" +217 15 training_loop """owa""" +217 15 negative_sampler """basic""" +217 15 evaluator """rankbased""" +217 16 dataset """yago310""" +217 16 model """distmult""" +217 16 loss """nssa""" +217 16 regularizer """no""" +217 16 optimizer """adam""" +217 16 training_loop """owa""" +217 16 negative_sampler """basic""" +217 16 evaluator """rankbased""" +217 17 dataset """yago310""" +217 17 model """distmult""" +217 17 loss """nssa""" +217 17 regularizer """no""" +217 17 optimizer """adam""" +217 17 training_loop """owa""" +217 17 negative_sampler """basic""" +217 17 evaluator """rankbased""" +217 18 dataset """yago310""" +217 18 model """distmult""" +217 18 loss """nssa""" +217 18 regularizer """no""" +217 18 optimizer """adam""" +217 18 training_loop """owa""" +217 18 negative_sampler """basic""" +217 18 evaluator """rankbased""" +217 19 dataset """yago310""" +217 19 model """distmult""" +217 19 loss """nssa""" +217 19 regularizer """no""" +217 19 optimizer """adam""" +217 19 training_loop """owa""" +217 19 negative_sampler """basic""" +217 19 evaluator """rankbased""" +217 20 dataset """yago310""" +217 20 model """distmult""" +217 20 loss """nssa""" +217 20 regularizer """no""" +217 20 optimizer """adam""" +217 20 training_loop """owa""" +217 20 negative_sampler """basic""" +217 20 evaluator """rankbased""" +217 21 dataset """yago310""" +217 21 model """distmult""" +217 21 loss """nssa""" +217 21 regularizer """no""" +217 21 optimizer """adam""" +217 21 training_loop """owa""" +217 21 negative_sampler """basic""" +217 21 evaluator """rankbased""" +217 22 dataset """yago310""" +217 22 model """distmult""" +217 22 loss """nssa""" +217 22 regularizer """no""" +217 22 optimizer """adam""" +217 22 training_loop """owa""" +217 22 negative_sampler """basic""" +217 22 evaluator """rankbased""" +217 23 dataset """yago310""" +217 23 model """distmult""" +217 23 loss """nssa""" +217 23 regularizer """no""" +217 23 optimizer """adam""" +217 23 training_loop """owa""" +217 23 negative_sampler """basic""" +217 23 evaluator """rankbased""" +217 24 dataset """yago310""" +217 24 model """distmult""" +217 24 loss """nssa""" +217 24 regularizer """no""" +217 24 optimizer """adam""" +217 24 training_loop """owa""" +217 24 negative_sampler """basic""" +217 24 evaluator """rankbased""" +217 25 dataset """yago310""" +217 25 model """distmult""" +217 25 loss """nssa""" +217 25 regularizer """no""" +217 25 optimizer """adam""" +217 25 training_loop """owa""" +217 25 negative_sampler """basic""" +217 25 evaluator """rankbased""" +217 26 dataset """yago310""" +217 26 model """distmult""" +217 26 loss """nssa""" +217 26 regularizer """no""" +217 26 optimizer """adam""" +217 26 training_loop """owa""" +217 26 negative_sampler """basic""" +217 26 evaluator """rankbased""" +217 27 dataset """yago310""" +217 27 model """distmult""" +217 27 loss """nssa""" +217 27 regularizer """no""" +217 27 optimizer """adam""" +217 27 training_loop """owa""" +217 27 negative_sampler """basic""" +217 27 evaluator """rankbased""" +217 28 dataset """yago310""" +217 28 model """distmult""" +217 28 loss """nssa""" +217 28 regularizer """no""" +217 28 optimizer """adam""" +217 28 training_loop """owa""" +217 28 negative_sampler """basic""" +217 28 evaluator """rankbased""" +217 29 dataset """yago310""" +217 29 model """distmult""" +217 29 loss """nssa""" +217 29 regularizer """no""" +217 29 optimizer """adam""" +217 29 training_loop """owa""" +217 29 negative_sampler """basic""" +217 29 evaluator """rankbased""" +217 30 dataset """yago310""" +217 30 model """distmult""" +217 30 loss """nssa""" +217 30 regularizer """no""" +217 30 optimizer """adam""" +217 30 training_loop """owa""" +217 30 negative_sampler """basic""" +217 30 evaluator """rankbased""" +217 31 dataset """yago310""" +217 31 model """distmult""" +217 31 loss """nssa""" +217 31 regularizer """no""" +217 31 optimizer """adam""" +217 31 training_loop """owa""" +217 31 negative_sampler """basic""" +217 31 evaluator """rankbased""" +217 32 dataset """yago310""" +217 32 model """distmult""" +217 32 loss """nssa""" +217 32 regularizer """no""" +217 32 optimizer """adam""" +217 32 training_loop """owa""" +217 32 negative_sampler """basic""" +217 32 evaluator """rankbased""" +217 33 dataset """yago310""" +217 33 model """distmult""" +217 33 loss """nssa""" +217 33 regularizer """no""" +217 33 optimizer """adam""" +217 33 training_loop """owa""" +217 33 negative_sampler """basic""" +217 33 evaluator """rankbased""" +217 34 dataset """yago310""" +217 34 model """distmult""" +217 34 loss """nssa""" +217 34 regularizer """no""" +217 34 optimizer """adam""" +217 34 training_loop """owa""" +217 34 negative_sampler """basic""" +217 34 evaluator """rankbased""" +217 35 dataset """yago310""" +217 35 model """distmult""" +217 35 loss """nssa""" +217 35 regularizer """no""" +217 35 optimizer """adam""" +217 35 training_loop """owa""" +217 35 negative_sampler """basic""" +217 35 evaluator """rankbased""" +217 36 dataset """yago310""" +217 36 model """distmult""" +217 36 loss """nssa""" +217 36 regularizer """no""" +217 36 optimizer """adam""" +217 36 training_loop """owa""" +217 36 negative_sampler """basic""" +217 36 evaluator """rankbased""" +217 37 dataset """yago310""" +217 37 model """distmult""" +217 37 loss """nssa""" +217 37 regularizer """no""" +217 37 optimizer """adam""" +217 37 training_loop """owa""" +217 37 negative_sampler """basic""" +217 37 evaluator """rankbased""" +217 38 dataset """yago310""" +217 38 model """distmult""" +217 38 loss """nssa""" +217 38 regularizer """no""" +217 38 optimizer """adam""" +217 38 training_loop """owa""" +217 38 negative_sampler """basic""" +217 38 evaluator """rankbased""" +217 39 dataset """yago310""" +217 39 model """distmult""" +217 39 loss """nssa""" +217 39 regularizer """no""" +217 39 optimizer """adam""" +217 39 training_loop """owa""" +217 39 negative_sampler """basic""" +217 39 evaluator """rankbased""" +217 40 dataset """yago310""" +217 40 model """distmult""" +217 40 loss """nssa""" +217 40 regularizer """no""" +217 40 optimizer """adam""" +217 40 training_loop """owa""" +217 40 negative_sampler """basic""" +217 40 evaluator """rankbased""" +217 41 dataset """yago310""" +217 41 model """distmult""" +217 41 loss """nssa""" +217 41 regularizer """no""" +217 41 optimizer """adam""" +217 41 training_loop """owa""" +217 41 negative_sampler """basic""" +217 41 evaluator """rankbased""" +217 42 dataset """yago310""" +217 42 model """distmult""" +217 42 loss """nssa""" +217 42 regularizer """no""" +217 42 optimizer """adam""" +217 42 training_loop """owa""" +217 42 negative_sampler """basic""" +217 42 evaluator """rankbased""" +217 43 dataset """yago310""" +217 43 model """distmult""" +217 43 loss """nssa""" +217 43 regularizer """no""" +217 43 optimizer """adam""" +217 43 training_loop """owa""" +217 43 negative_sampler """basic""" +217 43 evaluator """rankbased""" +217 44 dataset """yago310""" +217 44 model """distmult""" +217 44 loss """nssa""" +217 44 regularizer """no""" +217 44 optimizer """adam""" +217 44 training_loop """owa""" +217 44 negative_sampler """basic""" +217 44 evaluator """rankbased""" +217 45 dataset """yago310""" +217 45 model """distmult""" +217 45 loss """nssa""" +217 45 regularizer """no""" +217 45 optimizer """adam""" +217 45 training_loop """owa""" +217 45 negative_sampler """basic""" +217 45 evaluator """rankbased""" +217 46 dataset """yago310""" +217 46 model """distmult""" +217 46 loss """nssa""" +217 46 regularizer """no""" +217 46 optimizer """adam""" +217 46 training_loop """owa""" +217 46 negative_sampler """basic""" +217 46 evaluator """rankbased""" +217 47 dataset """yago310""" +217 47 model """distmult""" +217 47 loss """nssa""" +217 47 regularizer """no""" +217 47 optimizer """adam""" +217 47 training_loop """owa""" +217 47 negative_sampler """basic""" +217 47 evaluator """rankbased""" +217 48 dataset """yago310""" +217 48 model """distmult""" +217 48 loss """nssa""" +217 48 regularizer """no""" +217 48 optimizer """adam""" +217 48 training_loop """owa""" +217 48 negative_sampler """basic""" +217 48 evaluator """rankbased""" +217 49 dataset """yago310""" +217 49 model """distmult""" +217 49 loss """nssa""" +217 49 regularizer """no""" +217 49 optimizer """adam""" +217 49 training_loop """owa""" +217 49 negative_sampler """basic""" +217 49 evaluator """rankbased""" +217 50 dataset """yago310""" +217 50 model """distmult""" +217 50 loss """nssa""" +217 50 regularizer """no""" +217 50 optimizer """adam""" +217 50 training_loop """owa""" +217 50 negative_sampler """basic""" +217 50 evaluator """rankbased""" +217 51 dataset """yago310""" +217 51 model """distmult""" +217 51 loss """nssa""" +217 51 regularizer """no""" +217 51 optimizer """adam""" +217 51 training_loop """owa""" +217 51 negative_sampler """basic""" +217 51 evaluator """rankbased""" +217 52 dataset """yago310""" +217 52 model """distmult""" +217 52 loss """nssa""" +217 52 regularizer """no""" +217 52 optimizer """adam""" +217 52 training_loop """owa""" +217 52 negative_sampler """basic""" +217 52 evaluator """rankbased""" +217 53 dataset """yago310""" +217 53 model """distmult""" +217 53 loss """nssa""" +217 53 regularizer """no""" +217 53 optimizer """adam""" +217 53 training_loop """owa""" +217 53 negative_sampler """basic""" +217 53 evaluator """rankbased""" +217 54 dataset """yago310""" +217 54 model """distmult""" +217 54 loss """nssa""" +217 54 regularizer """no""" +217 54 optimizer """adam""" +217 54 training_loop """owa""" +217 54 negative_sampler """basic""" +217 54 evaluator """rankbased""" +217 55 dataset """yago310""" +217 55 model """distmult""" +217 55 loss """nssa""" +217 55 regularizer """no""" +217 55 optimizer """adam""" +217 55 training_loop """owa""" +217 55 negative_sampler """basic""" +217 55 evaluator """rankbased""" +217 56 dataset """yago310""" +217 56 model """distmult""" +217 56 loss """nssa""" +217 56 regularizer """no""" +217 56 optimizer """adam""" +217 56 training_loop """owa""" +217 56 negative_sampler """basic""" +217 56 evaluator """rankbased""" +217 57 dataset """yago310""" +217 57 model """distmult""" +217 57 loss """nssa""" +217 57 regularizer """no""" +217 57 optimizer """adam""" +217 57 training_loop """owa""" +217 57 negative_sampler """basic""" +217 57 evaluator """rankbased""" +217 58 dataset """yago310""" +217 58 model """distmult""" +217 58 loss """nssa""" +217 58 regularizer """no""" +217 58 optimizer """adam""" +217 58 training_loop """owa""" +217 58 negative_sampler """basic""" +217 58 evaluator """rankbased""" +218 1 model.embedding_dim 0.0 +218 1 loss.margin 6.653613244326859 +218 1 optimizer.lr 0.0027519985989677576 +218 1 negative_sampler.num_negs_per_pos 53.0 +218 1 training.batch_size 2.0 +218 2 model.embedding_dim 1.0 +218 2 loss.margin 8.931163895040548 +218 2 optimizer.lr 0.0012184608838098996 +218 2 negative_sampler.num_negs_per_pos 68.0 +218 2 training.batch_size 0.0 +218 3 model.embedding_dim 0.0 +218 3 loss.margin 6.735333763376679 +218 3 optimizer.lr 0.03893365995167048 +218 3 negative_sampler.num_negs_per_pos 48.0 +218 3 training.batch_size 1.0 +218 4 model.embedding_dim 0.0 +218 4 loss.margin 1.348350822158204 +218 4 optimizer.lr 0.0019521138237236894 +218 4 negative_sampler.num_negs_per_pos 87.0 +218 4 training.batch_size 2.0 +218 5 model.embedding_dim 0.0 +218 5 loss.margin 6.814729225245358 +218 5 optimizer.lr 0.015636879424926635 +218 5 negative_sampler.num_negs_per_pos 31.0 +218 5 training.batch_size 0.0 +218 6 model.embedding_dim 1.0 +218 6 loss.margin 6.4372596597764025 +218 6 optimizer.lr 0.0023972439669339212 +218 6 negative_sampler.num_negs_per_pos 75.0 +218 6 training.batch_size 1.0 +218 7 model.embedding_dim 0.0 +218 7 loss.margin 4.141877966177345 +218 7 optimizer.lr 0.0015930199237429615 +218 7 negative_sampler.num_negs_per_pos 8.0 +218 7 training.batch_size 2.0 +218 8 model.embedding_dim 2.0 +218 8 loss.margin 5.887890956656403 +218 8 optimizer.lr 0.02914745236553691 +218 8 negative_sampler.num_negs_per_pos 48.0 +218 8 training.batch_size 0.0 +218 9 model.embedding_dim 0.0 +218 9 loss.margin 3.122552847813513 +218 9 optimizer.lr 0.037424594009640606 +218 9 negative_sampler.num_negs_per_pos 88.0 +218 9 training.batch_size 2.0 +218 10 model.embedding_dim 2.0 +218 10 loss.margin 5.631361542285685 +218 10 optimizer.lr 0.0696350527732766 +218 10 negative_sampler.num_negs_per_pos 16.0 +218 10 training.batch_size 0.0 +218 11 model.embedding_dim 2.0 +218 11 loss.margin 1.5243907858726515 +218 11 optimizer.lr 0.06832027728563606 +218 11 negative_sampler.num_negs_per_pos 2.0 +218 11 training.batch_size 2.0 +218 12 model.embedding_dim 0.0 +218 12 loss.margin 8.275632596314464 +218 12 optimizer.lr 0.005797593040615309 +218 12 negative_sampler.num_negs_per_pos 64.0 +218 12 training.batch_size 0.0 +218 13 model.embedding_dim 2.0 +218 13 loss.margin 5.292269033615684 +218 13 optimizer.lr 0.001031049546643885 +218 13 negative_sampler.num_negs_per_pos 8.0 +218 13 training.batch_size 0.0 +218 14 model.embedding_dim 1.0 +218 14 loss.margin 3.21269152869419 +218 14 optimizer.lr 0.003173772358226878 +218 14 negative_sampler.num_negs_per_pos 89.0 +218 14 training.batch_size 2.0 +218 15 model.embedding_dim 1.0 +218 15 loss.margin 8.553174400797815 +218 15 optimizer.lr 0.05347424718082903 +218 15 negative_sampler.num_negs_per_pos 51.0 +218 15 training.batch_size 2.0 +218 16 model.embedding_dim 2.0 +218 16 loss.margin 4.060830366263965 +218 16 optimizer.lr 0.033267689689189764 +218 16 negative_sampler.num_negs_per_pos 12.0 +218 16 training.batch_size 2.0 +218 17 model.embedding_dim 0.0 +218 17 loss.margin 1.7436670185240588 +218 17 optimizer.lr 0.011732125965147909 +218 17 negative_sampler.num_negs_per_pos 30.0 +218 17 training.batch_size 0.0 +218 18 model.embedding_dim 2.0 +218 18 loss.margin 8.476101598581092 +218 18 optimizer.lr 0.013459617126497866 +218 18 negative_sampler.num_negs_per_pos 96.0 +218 18 training.batch_size 0.0 +218 19 model.embedding_dim 0.0 +218 19 loss.margin 9.903015238892893 +218 19 optimizer.lr 0.03896754737081778 +218 19 negative_sampler.num_negs_per_pos 39.0 +218 19 training.batch_size 2.0 +218 20 model.embedding_dim 1.0 +218 20 loss.margin 7.610999695911629 +218 20 optimizer.lr 0.0030803244894435024 +218 20 negative_sampler.num_negs_per_pos 54.0 +218 20 training.batch_size 0.0 +218 21 model.embedding_dim 2.0 +218 21 loss.margin 4.523878148060107 +218 21 optimizer.lr 0.06984262511728238 +218 21 negative_sampler.num_negs_per_pos 66.0 +218 21 training.batch_size 1.0 +218 22 model.embedding_dim 1.0 +218 22 loss.margin 8.833293476650407 +218 22 optimizer.lr 0.03619239833410762 +218 22 negative_sampler.num_negs_per_pos 17.0 +218 22 training.batch_size 1.0 +218 23 model.embedding_dim 1.0 +218 23 loss.margin 9.375575187942218 +218 23 optimizer.lr 0.0016442547555017486 +218 23 negative_sampler.num_negs_per_pos 87.0 +218 23 training.batch_size 1.0 +218 24 model.embedding_dim 1.0 +218 24 loss.margin 5.89270769160776 +218 24 optimizer.lr 0.009599175748915658 +218 24 negative_sampler.num_negs_per_pos 97.0 +218 24 training.batch_size 0.0 +218 1 dataset """fb15k237""" +218 1 model """ermlp""" +218 1 loss """marginranking""" +218 1 regularizer """no""" +218 1 optimizer """adam""" +218 1 training_loop """owa""" +218 1 negative_sampler """basic""" +218 1 evaluator """rankbased""" +218 2 dataset """fb15k237""" +218 2 model """ermlp""" +218 2 loss """marginranking""" +218 2 regularizer """no""" +218 2 optimizer """adam""" +218 2 training_loop """owa""" +218 2 negative_sampler """basic""" +218 2 evaluator """rankbased""" +218 3 dataset """fb15k237""" +218 3 model """ermlp""" +218 3 loss """marginranking""" +218 3 regularizer """no""" +218 3 optimizer """adam""" +218 3 training_loop """owa""" +218 3 negative_sampler """basic""" +218 3 evaluator """rankbased""" +218 4 dataset """fb15k237""" +218 4 model """ermlp""" +218 4 loss """marginranking""" +218 4 regularizer """no""" +218 4 optimizer """adam""" +218 4 training_loop """owa""" +218 4 negative_sampler """basic""" +218 4 evaluator """rankbased""" +218 5 dataset """fb15k237""" +218 5 model """ermlp""" +218 5 loss """marginranking""" +218 5 regularizer """no""" +218 5 optimizer """adam""" +218 5 training_loop """owa""" +218 5 negative_sampler """basic""" +218 5 evaluator """rankbased""" +218 6 dataset """fb15k237""" +218 6 model """ermlp""" +218 6 loss """marginranking""" +218 6 regularizer """no""" +218 6 optimizer """adam""" +218 6 training_loop """owa""" +218 6 negative_sampler """basic""" +218 6 evaluator """rankbased""" +218 7 dataset """fb15k237""" +218 7 model """ermlp""" +218 7 loss """marginranking""" +218 7 regularizer """no""" +218 7 optimizer """adam""" +218 7 training_loop """owa""" +218 7 negative_sampler """basic""" +218 7 evaluator """rankbased""" +218 8 dataset """fb15k237""" +218 8 model """ermlp""" +218 8 loss """marginranking""" +218 8 regularizer """no""" +218 8 optimizer """adam""" +218 8 training_loop """owa""" +218 8 negative_sampler """basic""" +218 8 evaluator """rankbased""" +218 9 dataset """fb15k237""" +218 9 model """ermlp""" +218 9 loss """marginranking""" +218 9 regularizer """no""" +218 9 optimizer """adam""" +218 9 training_loop """owa""" +218 9 negative_sampler """basic""" +218 9 evaluator """rankbased""" +218 10 dataset """fb15k237""" +218 10 model """ermlp""" +218 10 loss """marginranking""" +218 10 regularizer """no""" +218 10 optimizer """adam""" +218 10 training_loop """owa""" +218 10 negative_sampler """basic""" +218 10 evaluator """rankbased""" +218 11 dataset """fb15k237""" +218 11 model """ermlp""" +218 11 loss """marginranking""" +218 11 regularizer """no""" +218 11 optimizer """adam""" +218 11 training_loop """owa""" +218 11 negative_sampler """basic""" +218 11 evaluator """rankbased""" +218 12 dataset """fb15k237""" +218 12 model """ermlp""" +218 12 loss """marginranking""" +218 12 regularizer """no""" +218 12 optimizer """adam""" +218 12 training_loop """owa""" +218 12 negative_sampler """basic""" +218 12 evaluator """rankbased""" +218 13 dataset """fb15k237""" +218 13 model """ermlp""" +218 13 loss """marginranking""" +218 13 regularizer """no""" +218 13 optimizer """adam""" +218 13 training_loop """owa""" +218 13 negative_sampler """basic""" +218 13 evaluator """rankbased""" +218 14 dataset """fb15k237""" +218 14 model """ermlp""" +218 14 loss """marginranking""" +218 14 regularizer """no""" +218 14 optimizer """adam""" +218 14 training_loop """owa""" +218 14 negative_sampler """basic""" +218 14 evaluator """rankbased""" +218 15 dataset """fb15k237""" +218 15 model """ermlp""" +218 15 loss """marginranking""" +218 15 regularizer """no""" +218 15 optimizer """adam""" +218 15 training_loop """owa""" +218 15 negative_sampler """basic""" +218 15 evaluator """rankbased""" +218 16 dataset """fb15k237""" +218 16 model """ermlp""" +218 16 loss """marginranking""" +218 16 regularizer """no""" +218 16 optimizer """adam""" +218 16 training_loop """owa""" +218 16 negative_sampler """basic""" +218 16 evaluator """rankbased""" +218 17 dataset """fb15k237""" +218 17 model """ermlp""" +218 17 loss """marginranking""" +218 17 regularizer """no""" +218 17 optimizer """adam""" +218 17 training_loop """owa""" +218 17 negative_sampler """basic""" +218 17 evaluator """rankbased""" +218 18 dataset """fb15k237""" +218 18 model """ermlp""" +218 18 loss """marginranking""" +218 18 regularizer """no""" +218 18 optimizer """adam""" +218 18 training_loop """owa""" +218 18 negative_sampler """basic""" +218 18 evaluator """rankbased""" +218 19 dataset """fb15k237""" +218 19 model """ermlp""" +218 19 loss """marginranking""" +218 19 regularizer """no""" +218 19 optimizer """adam""" +218 19 training_loop """owa""" +218 19 negative_sampler """basic""" +218 19 evaluator """rankbased""" +218 20 dataset """fb15k237""" +218 20 model """ermlp""" +218 20 loss """marginranking""" +218 20 regularizer """no""" +218 20 optimizer """adam""" +218 20 training_loop """owa""" +218 20 negative_sampler """basic""" +218 20 evaluator """rankbased""" +218 21 dataset """fb15k237""" +218 21 model """ermlp""" +218 21 loss """marginranking""" +218 21 regularizer """no""" +218 21 optimizer """adam""" +218 21 training_loop """owa""" +218 21 negative_sampler """basic""" +218 21 evaluator """rankbased""" +218 22 dataset """fb15k237""" +218 22 model """ermlp""" +218 22 loss """marginranking""" +218 22 regularizer """no""" +218 22 optimizer """adam""" +218 22 training_loop """owa""" +218 22 negative_sampler """basic""" +218 22 evaluator """rankbased""" +218 23 dataset """fb15k237""" +218 23 model """ermlp""" +218 23 loss """marginranking""" +218 23 regularizer """no""" +218 23 optimizer """adam""" +218 23 training_loop """owa""" +218 23 negative_sampler """basic""" +218 23 evaluator """rankbased""" +218 24 dataset """fb15k237""" +218 24 model """ermlp""" +218 24 loss """marginranking""" +218 24 regularizer """no""" +218 24 optimizer """adam""" +218 24 training_loop """owa""" +218 24 negative_sampler """basic""" +218 24 evaluator """rankbased""" +219 1 model.embedding_dim 1.0 +219 1 loss.margin 9.520517417065072 +219 1 optimizer.lr 0.013114138336038627 +219 1 negative_sampler.num_negs_per_pos 91.0 +219 1 training.batch_size 0.0 +219 2 model.embedding_dim 1.0 +219 2 loss.margin 1.1789985261357416 +219 2 optimizer.lr 0.0014628669543464245 +219 2 negative_sampler.num_negs_per_pos 32.0 +219 2 training.batch_size 0.0 +219 3 model.embedding_dim 0.0 +219 3 loss.margin 7.802806765357209 +219 3 optimizer.lr 0.006524940211213711 +219 3 negative_sampler.num_negs_per_pos 6.0 +219 3 training.batch_size 2.0 +219 4 model.embedding_dim 2.0 +219 4 loss.margin 9.388684415203656 +219 4 optimizer.lr 0.004621499179996151 +219 4 negative_sampler.num_negs_per_pos 69.0 +219 4 training.batch_size 2.0 +219 5 model.embedding_dim 2.0 +219 5 loss.margin 2.9856074903888095 +219 5 optimizer.lr 0.03238065627173248 +219 5 negative_sampler.num_negs_per_pos 45.0 +219 5 training.batch_size 2.0 +219 6 model.embedding_dim 0.0 +219 6 loss.margin 8.236049608801421 +219 6 optimizer.lr 0.007894313545971606 +219 6 negative_sampler.num_negs_per_pos 30.0 +219 6 training.batch_size 0.0 +219 7 model.embedding_dim 1.0 +219 7 loss.margin 7.036865845412631 +219 7 optimizer.lr 0.002210051868270069 +219 7 negative_sampler.num_negs_per_pos 62.0 +219 7 training.batch_size 2.0 +219 8 model.embedding_dim 2.0 +219 8 loss.margin 1.2428939850897591 +219 8 optimizer.lr 0.09785473890269458 +219 8 negative_sampler.num_negs_per_pos 51.0 +219 8 training.batch_size 1.0 +219 9 model.embedding_dim 1.0 +219 9 loss.margin 2.0956016723361346 +219 9 optimizer.lr 0.003872721712486825 +219 9 negative_sampler.num_negs_per_pos 33.0 +219 9 training.batch_size 2.0 +219 10 model.embedding_dim 1.0 +219 10 loss.margin 3.7541141800285556 +219 10 optimizer.lr 0.04614834405047993 +219 10 negative_sampler.num_negs_per_pos 37.0 +219 10 training.batch_size 1.0 +219 11 model.embedding_dim 0.0 +219 11 loss.margin 2.3571197823225067 +219 11 optimizer.lr 0.004128218937287213 +219 11 negative_sampler.num_negs_per_pos 32.0 +219 11 training.batch_size 0.0 +219 12 model.embedding_dim 1.0 +219 12 loss.margin 1.7818558875577954 +219 12 optimizer.lr 0.006622637490938706 +219 12 negative_sampler.num_negs_per_pos 63.0 +219 12 training.batch_size 0.0 +219 13 model.embedding_dim 0.0 +219 13 loss.margin 5.781807658403693 +219 13 optimizer.lr 0.09268568052818592 +219 13 negative_sampler.num_negs_per_pos 13.0 +219 13 training.batch_size 1.0 +219 14 model.embedding_dim 2.0 +219 14 loss.margin 9.957577504937268 +219 14 optimizer.lr 0.004259651092079704 +219 14 negative_sampler.num_negs_per_pos 88.0 +219 14 training.batch_size 2.0 +219 15 model.embedding_dim 0.0 +219 15 loss.margin 3.96007541254152 +219 15 optimizer.lr 0.0344933516684978 +219 15 negative_sampler.num_negs_per_pos 15.0 +219 15 training.batch_size 0.0 +219 16 model.embedding_dim 1.0 +219 16 loss.margin 2.3362451147307497 +219 16 optimizer.lr 0.0015338501983663167 +219 16 negative_sampler.num_negs_per_pos 62.0 +219 16 training.batch_size 1.0 +219 17 model.embedding_dim 1.0 +219 17 loss.margin 5.822009642328956 +219 17 optimizer.lr 0.032926645560190225 +219 17 negative_sampler.num_negs_per_pos 75.0 +219 17 training.batch_size 2.0 +219 18 model.embedding_dim 0.0 +219 18 loss.margin 3.837243515387909 +219 18 optimizer.lr 0.002536976854675905 +219 18 negative_sampler.num_negs_per_pos 49.0 +219 18 training.batch_size 2.0 +219 19 model.embedding_dim 2.0 +219 19 loss.margin 2.456605615751913 +219 19 optimizer.lr 0.045978763082087384 +219 19 negative_sampler.num_negs_per_pos 14.0 +219 19 training.batch_size 0.0 +219 20 model.embedding_dim 1.0 +219 20 loss.margin 8.352230634052232 +219 20 optimizer.lr 0.0015009675951616751 +219 20 negative_sampler.num_negs_per_pos 63.0 +219 20 training.batch_size 1.0 +219 21 model.embedding_dim 2.0 +219 21 loss.margin 9.832380065005442 +219 21 optimizer.lr 0.004844315865544356 +219 21 negative_sampler.num_negs_per_pos 44.0 +219 21 training.batch_size 1.0 +219 22 model.embedding_dim 1.0 +219 22 loss.margin 5.841642408730859 +219 22 optimizer.lr 0.022771783659689133 +219 22 negative_sampler.num_negs_per_pos 48.0 +219 22 training.batch_size 0.0 +219 23 model.embedding_dim 2.0 +219 23 loss.margin 5.369500017495975 +219 23 optimizer.lr 0.00650509483209105 +219 23 negative_sampler.num_negs_per_pos 2.0 +219 23 training.batch_size 2.0 +219 24 model.embedding_dim 2.0 +219 24 loss.margin 3.2890800376435334 +219 24 optimizer.lr 0.0023550899252767704 +219 24 negative_sampler.num_negs_per_pos 76.0 +219 24 training.batch_size 2.0 +219 25 model.embedding_dim 2.0 +219 25 loss.margin 8.448930843446151 +219 25 optimizer.lr 0.04433055236229048 +219 25 negative_sampler.num_negs_per_pos 29.0 +219 25 training.batch_size 1.0 +219 26 model.embedding_dim 0.0 +219 26 loss.margin 8.901696901160829 +219 26 optimizer.lr 0.0010531050093881805 +219 26 negative_sampler.num_negs_per_pos 26.0 +219 26 training.batch_size 2.0 +219 27 model.embedding_dim 0.0 +219 27 loss.margin 6.498967268687484 +219 27 optimizer.lr 0.03794946199489642 +219 27 negative_sampler.num_negs_per_pos 4.0 +219 27 training.batch_size 2.0 +219 28 model.embedding_dim 1.0 +219 28 loss.margin 5.3711890299515055 +219 28 optimizer.lr 0.004723899545681594 +219 28 negative_sampler.num_negs_per_pos 49.0 +219 28 training.batch_size 2.0 +219 29 model.embedding_dim 0.0 +219 29 loss.margin 1.8541380872378508 +219 29 optimizer.lr 0.004771947200244773 +219 29 negative_sampler.num_negs_per_pos 29.0 +219 29 training.batch_size 2.0 +219 30 model.embedding_dim 0.0 +219 30 loss.margin 7.776559106496783 +219 30 optimizer.lr 0.09943768154393672 +219 30 negative_sampler.num_negs_per_pos 20.0 +219 30 training.batch_size 1.0 +219 31 model.embedding_dim 1.0 +219 31 loss.margin 3.968266942718493 +219 31 optimizer.lr 0.004730428320706154 +219 31 negative_sampler.num_negs_per_pos 0.0 +219 31 training.batch_size 0.0 +219 32 model.embedding_dim 1.0 +219 32 loss.margin 4.7915532947653405 +219 32 optimizer.lr 0.05162083738907535 +219 32 negative_sampler.num_negs_per_pos 85.0 +219 32 training.batch_size 0.0 +219 33 model.embedding_dim 1.0 +219 33 loss.margin 9.583260581848197 +219 33 optimizer.lr 0.09527214558906447 +219 33 negative_sampler.num_negs_per_pos 41.0 +219 33 training.batch_size 2.0 +219 34 model.embedding_dim 0.0 +219 34 loss.margin 6.875046553382593 +219 34 optimizer.lr 0.0032868311289791243 +219 34 negative_sampler.num_negs_per_pos 32.0 +219 34 training.batch_size 1.0 +219 35 model.embedding_dim 0.0 +219 35 loss.margin 6.694756340489386 +219 35 optimizer.lr 0.0010032043251390435 +219 35 negative_sampler.num_negs_per_pos 77.0 +219 35 training.batch_size 0.0 +219 36 model.embedding_dim 1.0 +219 36 loss.margin 9.339818619188103 +219 36 optimizer.lr 0.0011169774908023635 +219 36 negative_sampler.num_negs_per_pos 46.0 +219 36 training.batch_size 0.0 +219 37 model.embedding_dim 0.0 +219 37 loss.margin 3.1632097154359524 +219 37 optimizer.lr 0.005934743743687456 +219 37 negative_sampler.num_negs_per_pos 33.0 +219 37 training.batch_size 1.0 +219 38 model.embedding_dim 0.0 +219 38 loss.margin 5.171500214130986 +219 38 optimizer.lr 0.0216992098191163 +219 38 negative_sampler.num_negs_per_pos 90.0 +219 38 training.batch_size 1.0 +219 39 model.embedding_dim 2.0 +219 39 loss.margin 0.8831126674071719 +219 39 optimizer.lr 0.00434907363450762 +219 39 negative_sampler.num_negs_per_pos 66.0 +219 39 training.batch_size 0.0 +219 40 model.embedding_dim 2.0 +219 40 loss.margin 6.313879019600879 +219 40 optimizer.lr 0.04685103760587453 +219 40 negative_sampler.num_negs_per_pos 5.0 +219 40 training.batch_size 2.0 +219 41 model.embedding_dim 1.0 +219 41 loss.margin 1.9734355083607595 +219 41 optimizer.lr 0.004732246855360195 +219 41 negative_sampler.num_negs_per_pos 97.0 +219 41 training.batch_size 1.0 +219 42 model.embedding_dim 1.0 +219 42 loss.margin 3.0913674950285674 +219 42 optimizer.lr 0.010845188115970014 +219 42 negative_sampler.num_negs_per_pos 47.0 +219 42 training.batch_size 2.0 +219 43 model.embedding_dim 2.0 +219 43 loss.margin 9.669706778075069 +219 43 optimizer.lr 0.003543172670610472 +219 43 negative_sampler.num_negs_per_pos 54.0 +219 43 training.batch_size 1.0 +219 44 model.embedding_dim 1.0 +219 44 loss.margin 4.297841784120159 +219 44 optimizer.lr 0.004297865859408651 +219 44 negative_sampler.num_negs_per_pos 36.0 +219 44 training.batch_size 0.0 +219 1 dataset """fb15k237""" +219 1 model """ermlp""" +219 1 loss """marginranking""" +219 1 regularizer """no""" +219 1 optimizer """adam""" +219 1 training_loop """owa""" +219 1 negative_sampler """basic""" +219 1 evaluator """rankbased""" +219 2 dataset """fb15k237""" +219 2 model """ermlp""" +219 2 loss """marginranking""" +219 2 regularizer """no""" +219 2 optimizer """adam""" +219 2 training_loop """owa""" +219 2 negative_sampler """basic""" +219 2 evaluator """rankbased""" +219 3 dataset """fb15k237""" +219 3 model """ermlp""" +219 3 loss """marginranking""" +219 3 regularizer """no""" +219 3 optimizer """adam""" +219 3 training_loop """owa""" +219 3 negative_sampler """basic""" +219 3 evaluator """rankbased""" +219 4 dataset """fb15k237""" +219 4 model """ermlp""" +219 4 loss """marginranking""" +219 4 regularizer """no""" +219 4 optimizer """adam""" +219 4 training_loop """owa""" +219 4 negative_sampler """basic""" +219 4 evaluator """rankbased""" +219 5 dataset """fb15k237""" +219 5 model """ermlp""" +219 5 loss """marginranking""" +219 5 regularizer """no""" +219 5 optimizer """adam""" +219 5 training_loop """owa""" +219 5 negative_sampler """basic""" +219 5 evaluator """rankbased""" +219 6 dataset """fb15k237""" +219 6 model """ermlp""" +219 6 loss """marginranking""" +219 6 regularizer """no""" +219 6 optimizer """adam""" +219 6 training_loop """owa""" +219 6 negative_sampler """basic""" +219 6 evaluator """rankbased""" +219 7 dataset """fb15k237""" +219 7 model """ermlp""" +219 7 loss """marginranking""" +219 7 regularizer """no""" +219 7 optimizer """adam""" +219 7 training_loop """owa""" +219 7 negative_sampler """basic""" +219 7 evaluator """rankbased""" +219 8 dataset """fb15k237""" +219 8 model """ermlp""" +219 8 loss """marginranking""" +219 8 regularizer """no""" +219 8 optimizer """adam""" +219 8 training_loop """owa""" +219 8 negative_sampler """basic""" +219 8 evaluator """rankbased""" +219 9 dataset """fb15k237""" +219 9 model """ermlp""" +219 9 loss """marginranking""" +219 9 regularizer """no""" +219 9 optimizer """adam""" +219 9 training_loop """owa""" +219 9 negative_sampler """basic""" +219 9 evaluator """rankbased""" +219 10 dataset """fb15k237""" +219 10 model """ermlp""" +219 10 loss """marginranking""" +219 10 regularizer """no""" +219 10 optimizer """adam""" +219 10 training_loop """owa""" +219 10 negative_sampler """basic""" +219 10 evaluator """rankbased""" +219 11 dataset """fb15k237""" +219 11 model """ermlp""" +219 11 loss """marginranking""" +219 11 regularizer """no""" +219 11 optimizer """adam""" +219 11 training_loop """owa""" +219 11 negative_sampler """basic""" +219 11 evaluator """rankbased""" +219 12 dataset """fb15k237""" +219 12 model """ermlp""" +219 12 loss """marginranking""" +219 12 regularizer """no""" +219 12 optimizer """adam""" +219 12 training_loop """owa""" +219 12 negative_sampler """basic""" +219 12 evaluator """rankbased""" +219 13 dataset """fb15k237""" +219 13 model """ermlp""" +219 13 loss """marginranking""" +219 13 regularizer """no""" +219 13 optimizer """adam""" +219 13 training_loop """owa""" +219 13 negative_sampler """basic""" +219 13 evaluator """rankbased""" +219 14 dataset """fb15k237""" +219 14 model """ermlp""" +219 14 loss """marginranking""" +219 14 regularizer """no""" +219 14 optimizer """adam""" +219 14 training_loop """owa""" +219 14 negative_sampler """basic""" +219 14 evaluator """rankbased""" +219 15 dataset """fb15k237""" +219 15 model """ermlp""" +219 15 loss """marginranking""" +219 15 regularizer """no""" +219 15 optimizer """adam""" +219 15 training_loop """owa""" +219 15 negative_sampler """basic""" +219 15 evaluator """rankbased""" +219 16 dataset """fb15k237""" +219 16 model """ermlp""" +219 16 loss """marginranking""" +219 16 regularizer """no""" +219 16 optimizer """adam""" +219 16 training_loop """owa""" +219 16 negative_sampler """basic""" +219 16 evaluator """rankbased""" +219 17 dataset """fb15k237""" +219 17 model """ermlp""" +219 17 loss """marginranking""" +219 17 regularizer """no""" +219 17 optimizer """adam""" +219 17 training_loop """owa""" +219 17 negative_sampler """basic""" +219 17 evaluator """rankbased""" +219 18 dataset """fb15k237""" +219 18 model """ermlp""" +219 18 loss """marginranking""" +219 18 regularizer """no""" +219 18 optimizer """adam""" +219 18 training_loop """owa""" +219 18 negative_sampler """basic""" +219 18 evaluator """rankbased""" +219 19 dataset """fb15k237""" +219 19 model """ermlp""" +219 19 loss """marginranking""" +219 19 regularizer """no""" +219 19 optimizer """adam""" +219 19 training_loop """owa""" +219 19 negative_sampler """basic""" +219 19 evaluator """rankbased""" +219 20 dataset """fb15k237""" +219 20 model """ermlp""" +219 20 loss """marginranking""" +219 20 regularizer """no""" +219 20 optimizer """adam""" +219 20 training_loop """owa""" +219 20 negative_sampler """basic""" +219 20 evaluator """rankbased""" +219 21 dataset """fb15k237""" +219 21 model """ermlp""" +219 21 loss """marginranking""" +219 21 regularizer """no""" +219 21 optimizer """adam""" +219 21 training_loop """owa""" +219 21 negative_sampler """basic""" +219 21 evaluator """rankbased""" +219 22 dataset """fb15k237""" +219 22 model """ermlp""" +219 22 loss """marginranking""" +219 22 regularizer """no""" +219 22 optimizer """adam""" +219 22 training_loop """owa""" +219 22 negative_sampler """basic""" +219 22 evaluator """rankbased""" +219 23 dataset """fb15k237""" +219 23 model """ermlp""" +219 23 loss """marginranking""" +219 23 regularizer """no""" +219 23 optimizer """adam""" +219 23 training_loop """owa""" +219 23 negative_sampler """basic""" +219 23 evaluator """rankbased""" +219 24 dataset """fb15k237""" +219 24 model """ermlp""" +219 24 loss """marginranking""" +219 24 regularizer """no""" +219 24 optimizer """adam""" +219 24 training_loop """owa""" +219 24 negative_sampler """basic""" +219 24 evaluator """rankbased""" +219 25 dataset """fb15k237""" +219 25 model """ermlp""" +219 25 loss """marginranking""" +219 25 regularizer """no""" +219 25 optimizer """adam""" +219 25 training_loop """owa""" +219 25 negative_sampler """basic""" +219 25 evaluator """rankbased""" +219 26 dataset """fb15k237""" +219 26 model """ermlp""" +219 26 loss """marginranking""" +219 26 regularizer """no""" +219 26 optimizer """adam""" +219 26 training_loop """owa""" +219 26 negative_sampler """basic""" +219 26 evaluator """rankbased""" +219 27 dataset """fb15k237""" +219 27 model """ermlp""" +219 27 loss """marginranking""" +219 27 regularizer """no""" +219 27 optimizer """adam""" +219 27 training_loop """owa""" +219 27 negative_sampler """basic""" +219 27 evaluator """rankbased""" +219 28 dataset """fb15k237""" +219 28 model """ermlp""" +219 28 loss """marginranking""" +219 28 regularizer """no""" +219 28 optimizer """adam""" +219 28 training_loop """owa""" +219 28 negative_sampler """basic""" +219 28 evaluator """rankbased""" +219 29 dataset """fb15k237""" +219 29 model """ermlp""" +219 29 loss """marginranking""" +219 29 regularizer """no""" +219 29 optimizer """adam""" +219 29 training_loop """owa""" +219 29 negative_sampler """basic""" +219 29 evaluator """rankbased""" +219 30 dataset """fb15k237""" +219 30 model """ermlp""" +219 30 loss """marginranking""" +219 30 regularizer """no""" +219 30 optimizer """adam""" +219 30 training_loop """owa""" +219 30 negative_sampler """basic""" +219 30 evaluator """rankbased""" +219 31 dataset """fb15k237""" +219 31 model """ermlp""" +219 31 loss """marginranking""" +219 31 regularizer """no""" +219 31 optimizer """adam""" +219 31 training_loop """owa""" +219 31 negative_sampler """basic""" +219 31 evaluator """rankbased""" +219 32 dataset """fb15k237""" +219 32 model """ermlp""" +219 32 loss """marginranking""" +219 32 regularizer """no""" +219 32 optimizer """adam""" +219 32 training_loop """owa""" +219 32 negative_sampler """basic""" +219 32 evaluator """rankbased""" +219 33 dataset """fb15k237""" +219 33 model """ermlp""" +219 33 loss """marginranking""" +219 33 regularizer """no""" +219 33 optimizer """adam""" +219 33 training_loop """owa""" +219 33 negative_sampler """basic""" +219 33 evaluator """rankbased""" +219 34 dataset """fb15k237""" +219 34 model """ermlp""" +219 34 loss """marginranking""" +219 34 regularizer """no""" +219 34 optimizer """adam""" +219 34 training_loop """owa""" +219 34 negative_sampler """basic""" +219 34 evaluator """rankbased""" +219 35 dataset """fb15k237""" +219 35 model """ermlp""" +219 35 loss """marginranking""" +219 35 regularizer """no""" +219 35 optimizer """adam""" +219 35 training_loop """owa""" +219 35 negative_sampler """basic""" +219 35 evaluator """rankbased""" +219 36 dataset """fb15k237""" +219 36 model """ermlp""" +219 36 loss """marginranking""" +219 36 regularizer """no""" +219 36 optimizer """adam""" +219 36 training_loop """owa""" +219 36 negative_sampler """basic""" +219 36 evaluator """rankbased""" +219 37 dataset """fb15k237""" +219 37 model """ermlp""" +219 37 loss """marginranking""" +219 37 regularizer """no""" +219 37 optimizer """adam""" +219 37 training_loop """owa""" +219 37 negative_sampler """basic""" +219 37 evaluator """rankbased""" +219 38 dataset """fb15k237""" +219 38 model """ermlp""" +219 38 loss """marginranking""" +219 38 regularizer """no""" +219 38 optimizer """adam""" +219 38 training_loop """owa""" +219 38 negative_sampler """basic""" +219 38 evaluator """rankbased""" +219 39 dataset """fb15k237""" +219 39 model """ermlp""" +219 39 loss """marginranking""" +219 39 regularizer """no""" +219 39 optimizer """adam""" +219 39 training_loop """owa""" +219 39 negative_sampler """basic""" +219 39 evaluator """rankbased""" +219 40 dataset """fb15k237""" +219 40 model """ermlp""" +219 40 loss """marginranking""" +219 40 regularizer """no""" +219 40 optimizer """adam""" +219 40 training_loop """owa""" +219 40 negative_sampler """basic""" +219 40 evaluator """rankbased""" +219 41 dataset """fb15k237""" +219 41 model """ermlp""" +219 41 loss """marginranking""" +219 41 regularizer """no""" +219 41 optimizer """adam""" +219 41 training_loop """owa""" +219 41 negative_sampler """basic""" +219 41 evaluator """rankbased""" +219 42 dataset """fb15k237""" +219 42 model """ermlp""" +219 42 loss """marginranking""" +219 42 regularizer """no""" +219 42 optimizer """adam""" +219 42 training_loop """owa""" +219 42 negative_sampler """basic""" +219 42 evaluator """rankbased""" +219 43 dataset """fb15k237""" +219 43 model """ermlp""" +219 43 loss """marginranking""" +219 43 regularizer """no""" +219 43 optimizer """adam""" +219 43 training_loop """owa""" +219 43 negative_sampler """basic""" +219 43 evaluator """rankbased""" +219 44 dataset """fb15k237""" +219 44 model """ermlp""" +219 44 loss """marginranking""" +219 44 regularizer """no""" +219 44 optimizer """adam""" +219 44 training_loop """owa""" +219 44 negative_sampler """basic""" +219 44 evaluator """rankbased""" +220 1 model.embedding_dim 0.0 +220 1 optimizer.lr 0.0014510507674807373 +220 1 negative_sampler.num_negs_per_pos 20.0 +220 1 training.batch_size 1.0 +220 2 model.embedding_dim 0.0 +220 2 optimizer.lr 0.08631748638477184 +220 2 negative_sampler.num_negs_per_pos 44.0 +220 2 training.batch_size 1.0 +220 3 model.embedding_dim 1.0 +220 3 optimizer.lr 0.028966815721466397 +220 3 negative_sampler.num_negs_per_pos 58.0 +220 3 training.batch_size 1.0 +220 4 model.embedding_dim 2.0 +220 4 optimizer.lr 0.03233676819279106 +220 4 negative_sampler.num_negs_per_pos 30.0 +220 4 training.batch_size 0.0 +220 5 model.embedding_dim 1.0 +220 5 optimizer.lr 0.0034304298225238507 +220 5 negative_sampler.num_negs_per_pos 58.0 +220 5 training.batch_size 2.0 +220 6 model.embedding_dim 2.0 +220 6 optimizer.lr 0.040289154781068204 +220 6 negative_sampler.num_negs_per_pos 19.0 +220 6 training.batch_size 2.0 +220 7 model.embedding_dim 1.0 +220 7 optimizer.lr 0.015895459543623716 +220 7 negative_sampler.num_negs_per_pos 42.0 +220 7 training.batch_size 2.0 +220 8 model.embedding_dim 2.0 +220 8 optimizer.lr 0.059235296060570215 +220 8 negative_sampler.num_negs_per_pos 5.0 +220 8 training.batch_size 0.0 +220 9 model.embedding_dim 2.0 +220 9 optimizer.lr 0.007235505846565528 +220 9 negative_sampler.num_negs_per_pos 29.0 +220 9 training.batch_size 1.0 +220 10 model.embedding_dim 1.0 +220 10 optimizer.lr 0.019910370906888394 +220 10 negative_sampler.num_negs_per_pos 88.0 +220 10 training.batch_size 1.0 +220 11 model.embedding_dim 0.0 +220 11 optimizer.lr 0.027378913184292565 +220 11 negative_sampler.num_negs_per_pos 83.0 +220 11 training.batch_size 1.0 +220 12 model.embedding_dim 1.0 +220 12 optimizer.lr 0.01574282159114048 +220 12 negative_sampler.num_negs_per_pos 49.0 +220 12 training.batch_size 2.0 +220 13 model.embedding_dim 1.0 +220 13 optimizer.lr 0.012084211615048997 +220 13 negative_sampler.num_negs_per_pos 17.0 +220 13 training.batch_size 0.0 +220 14 model.embedding_dim 0.0 +220 14 optimizer.lr 0.0034338647285984594 +220 14 negative_sampler.num_negs_per_pos 61.0 +220 14 training.batch_size 0.0 +220 15 model.embedding_dim 1.0 +220 15 optimizer.lr 0.09022719427357236 +220 15 negative_sampler.num_negs_per_pos 31.0 +220 15 training.batch_size 2.0 +220 16 model.embedding_dim 2.0 +220 16 optimizer.lr 0.002614127695079347 +220 16 negative_sampler.num_negs_per_pos 77.0 +220 16 training.batch_size 2.0 +220 17 model.embedding_dim 0.0 +220 17 optimizer.lr 0.06467935325409467 +220 17 negative_sampler.num_negs_per_pos 24.0 +220 17 training.batch_size 1.0 +220 18 model.embedding_dim 1.0 +220 18 optimizer.lr 0.03388549879057419 +220 18 negative_sampler.num_negs_per_pos 81.0 +220 18 training.batch_size 0.0 +220 19 model.embedding_dim 2.0 +220 19 optimizer.lr 0.010558083475825411 +220 19 negative_sampler.num_negs_per_pos 27.0 +220 19 training.batch_size 1.0 +220 20 model.embedding_dim 0.0 +220 20 optimizer.lr 0.040289067839996806 +220 20 negative_sampler.num_negs_per_pos 35.0 +220 20 training.batch_size 2.0 +220 21 model.embedding_dim 2.0 +220 21 optimizer.lr 0.0012309039163776148 +220 21 negative_sampler.num_negs_per_pos 3.0 +220 21 training.batch_size 0.0 +220 22 model.embedding_dim 2.0 +220 22 optimizer.lr 0.0014429320131160947 +220 22 negative_sampler.num_negs_per_pos 70.0 +220 22 training.batch_size 2.0 +220 23 model.embedding_dim 1.0 +220 23 optimizer.lr 0.0019549124308680748 +220 23 negative_sampler.num_negs_per_pos 6.0 +220 23 training.batch_size 0.0 +220 24 model.embedding_dim 0.0 +220 24 optimizer.lr 0.023749618261816832 +220 24 negative_sampler.num_negs_per_pos 96.0 +220 24 training.batch_size 1.0 +220 25 model.embedding_dim 2.0 +220 25 optimizer.lr 0.048767901490399584 +220 25 negative_sampler.num_negs_per_pos 48.0 +220 25 training.batch_size 1.0 +220 26 model.embedding_dim 2.0 +220 26 optimizer.lr 0.054335630660731646 +220 26 negative_sampler.num_negs_per_pos 16.0 +220 26 training.batch_size 0.0 +220 27 model.embedding_dim 2.0 +220 27 optimizer.lr 0.0019670180163064827 +220 27 negative_sampler.num_negs_per_pos 13.0 +220 27 training.batch_size 0.0 +220 1 dataset """fb15k237""" +220 1 model """ermlp""" +220 1 loss """bceaftersigmoid""" +220 1 regularizer """no""" +220 1 optimizer """adam""" +220 1 training_loop """owa""" +220 1 negative_sampler """basic""" +220 1 evaluator """rankbased""" +220 2 dataset """fb15k237""" +220 2 model """ermlp""" +220 2 loss """bceaftersigmoid""" +220 2 regularizer """no""" +220 2 optimizer """adam""" +220 2 training_loop """owa""" +220 2 negative_sampler """basic""" +220 2 evaluator """rankbased""" +220 3 dataset """fb15k237""" +220 3 model """ermlp""" +220 3 loss """bceaftersigmoid""" +220 3 regularizer """no""" +220 3 optimizer """adam""" +220 3 training_loop """owa""" +220 3 negative_sampler """basic""" +220 3 evaluator """rankbased""" +220 4 dataset """fb15k237""" +220 4 model """ermlp""" +220 4 loss """bceaftersigmoid""" +220 4 regularizer """no""" +220 4 optimizer """adam""" +220 4 training_loop """owa""" +220 4 negative_sampler """basic""" +220 4 evaluator """rankbased""" +220 5 dataset """fb15k237""" +220 5 model """ermlp""" +220 5 loss """bceaftersigmoid""" +220 5 regularizer """no""" +220 5 optimizer """adam""" +220 5 training_loop """owa""" +220 5 negative_sampler """basic""" +220 5 evaluator """rankbased""" +220 6 dataset """fb15k237""" +220 6 model """ermlp""" +220 6 loss """bceaftersigmoid""" +220 6 regularizer """no""" +220 6 optimizer """adam""" +220 6 training_loop """owa""" +220 6 negative_sampler """basic""" +220 6 evaluator """rankbased""" +220 7 dataset """fb15k237""" +220 7 model """ermlp""" +220 7 loss """bceaftersigmoid""" +220 7 regularizer """no""" +220 7 optimizer """adam""" +220 7 training_loop """owa""" +220 7 negative_sampler """basic""" +220 7 evaluator """rankbased""" +220 8 dataset """fb15k237""" +220 8 model """ermlp""" +220 8 loss """bceaftersigmoid""" +220 8 regularizer """no""" +220 8 optimizer """adam""" +220 8 training_loop """owa""" +220 8 negative_sampler """basic""" +220 8 evaluator """rankbased""" +220 9 dataset """fb15k237""" +220 9 model """ermlp""" +220 9 loss """bceaftersigmoid""" +220 9 regularizer """no""" +220 9 optimizer """adam""" +220 9 training_loop """owa""" +220 9 negative_sampler """basic""" +220 9 evaluator """rankbased""" +220 10 dataset """fb15k237""" +220 10 model """ermlp""" +220 10 loss """bceaftersigmoid""" +220 10 regularizer """no""" +220 10 optimizer """adam""" +220 10 training_loop """owa""" +220 10 negative_sampler """basic""" +220 10 evaluator """rankbased""" +220 11 dataset """fb15k237""" +220 11 model """ermlp""" +220 11 loss """bceaftersigmoid""" +220 11 regularizer """no""" +220 11 optimizer """adam""" +220 11 training_loop """owa""" +220 11 negative_sampler """basic""" +220 11 evaluator """rankbased""" +220 12 dataset """fb15k237""" +220 12 model """ermlp""" +220 12 loss """bceaftersigmoid""" +220 12 regularizer """no""" +220 12 optimizer """adam""" +220 12 training_loop """owa""" +220 12 negative_sampler """basic""" +220 12 evaluator """rankbased""" +220 13 dataset """fb15k237""" +220 13 model """ermlp""" +220 13 loss """bceaftersigmoid""" +220 13 regularizer """no""" +220 13 optimizer """adam""" +220 13 training_loop """owa""" +220 13 negative_sampler """basic""" +220 13 evaluator """rankbased""" +220 14 dataset """fb15k237""" +220 14 model """ermlp""" +220 14 loss """bceaftersigmoid""" +220 14 regularizer """no""" +220 14 optimizer """adam""" +220 14 training_loop """owa""" +220 14 negative_sampler """basic""" +220 14 evaluator """rankbased""" +220 15 dataset """fb15k237""" +220 15 model """ermlp""" +220 15 loss """bceaftersigmoid""" +220 15 regularizer """no""" +220 15 optimizer """adam""" +220 15 training_loop """owa""" +220 15 negative_sampler """basic""" +220 15 evaluator """rankbased""" +220 16 dataset """fb15k237""" +220 16 model """ermlp""" +220 16 loss """bceaftersigmoid""" +220 16 regularizer """no""" +220 16 optimizer """adam""" +220 16 training_loop """owa""" +220 16 negative_sampler """basic""" +220 16 evaluator """rankbased""" +220 17 dataset """fb15k237""" +220 17 model """ermlp""" +220 17 loss """bceaftersigmoid""" +220 17 regularizer """no""" +220 17 optimizer """adam""" +220 17 training_loop """owa""" +220 17 negative_sampler """basic""" +220 17 evaluator """rankbased""" +220 18 dataset """fb15k237""" +220 18 model """ermlp""" +220 18 loss """bceaftersigmoid""" +220 18 regularizer """no""" +220 18 optimizer """adam""" +220 18 training_loop """owa""" +220 18 negative_sampler """basic""" +220 18 evaluator """rankbased""" +220 19 dataset """fb15k237""" +220 19 model """ermlp""" +220 19 loss """bceaftersigmoid""" +220 19 regularizer """no""" +220 19 optimizer """adam""" +220 19 training_loop """owa""" +220 19 negative_sampler """basic""" +220 19 evaluator """rankbased""" +220 20 dataset """fb15k237""" +220 20 model """ermlp""" +220 20 loss """bceaftersigmoid""" +220 20 regularizer """no""" +220 20 optimizer """adam""" +220 20 training_loop """owa""" +220 20 negative_sampler """basic""" +220 20 evaluator """rankbased""" +220 21 dataset """fb15k237""" +220 21 model """ermlp""" +220 21 loss """bceaftersigmoid""" +220 21 regularizer """no""" +220 21 optimizer """adam""" +220 21 training_loop """owa""" +220 21 negative_sampler """basic""" +220 21 evaluator """rankbased""" +220 22 dataset """fb15k237""" +220 22 model """ermlp""" +220 22 loss """bceaftersigmoid""" +220 22 regularizer """no""" +220 22 optimizer """adam""" +220 22 training_loop """owa""" +220 22 negative_sampler """basic""" +220 22 evaluator """rankbased""" +220 23 dataset """fb15k237""" +220 23 model """ermlp""" +220 23 loss """bceaftersigmoid""" +220 23 regularizer """no""" +220 23 optimizer """adam""" +220 23 training_loop """owa""" +220 23 negative_sampler """basic""" +220 23 evaluator """rankbased""" +220 24 dataset """fb15k237""" +220 24 model """ermlp""" +220 24 loss """bceaftersigmoid""" +220 24 regularizer """no""" +220 24 optimizer """adam""" +220 24 training_loop """owa""" +220 24 negative_sampler """basic""" +220 24 evaluator """rankbased""" +220 25 dataset """fb15k237""" +220 25 model """ermlp""" +220 25 loss """bceaftersigmoid""" +220 25 regularizer """no""" +220 25 optimizer """adam""" +220 25 training_loop """owa""" +220 25 negative_sampler """basic""" +220 25 evaluator """rankbased""" +220 26 dataset """fb15k237""" +220 26 model """ermlp""" +220 26 loss """bceaftersigmoid""" +220 26 regularizer """no""" +220 26 optimizer """adam""" +220 26 training_loop """owa""" +220 26 negative_sampler """basic""" +220 26 evaluator """rankbased""" +220 27 dataset """fb15k237""" +220 27 model """ermlp""" +220 27 loss """bceaftersigmoid""" +220 27 regularizer """no""" +220 27 optimizer """adam""" +220 27 training_loop """owa""" +220 27 negative_sampler """basic""" +220 27 evaluator """rankbased""" +221 1 model.embedding_dim 1.0 +221 1 optimizer.lr 0.006542676303994591 +221 1 negative_sampler.num_negs_per_pos 73.0 +221 1 training.batch_size 2.0 +221 2 model.embedding_dim 1.0 +221 2 optimizer.lr 0.0022426658617885967 +221 2 negative_sampler.num_negs_per_pos 70.0 +221 2 training.batch_size 0.0 +221 3 model.embedding_dim 0.0 +221 3 optimizer.lr 0.009890963439511955 +221 3 negative_sampler.num_negs_per_pos 98.0 +221 3 training.batch_size 0.0 +221 4 model.embedding_dim 2.0 +221 4 optimizer.lr 0.018503117436018245 +221 4 negative_sampler.num_negs_per_pos 14.0 +221 4 training.batch_size 1.0 +221 5 model.embedding_dim 2.0 +221 5 optimizer.lr 0.014194729324353802 +221 5 negative_sampler.num_negs_per_pos 85.0 +221 5 training.batch_size 1.0 +221 6 model.embedding_dim 1.0 +221 6 optimizer.lr 0.016124686975291777 +221 6 negative_sampler.num_negs_per_pos 93.0 +221 6 training.batch_size 2.0 +221 7 model.embedding_dim 2.0 +221 7 optimizer.lr 0.001944603790675805 +221 7 negative_sampler.num_negs_per_pos 52.0 +221 7 training.batch_size 0.0 +221 8 model.embedding_dim 2.0 +221 8 optimizer.lr 0.0046688122014810806 +221 8 negative_sampler.num_negs_per_pos 61.0 +221 8 training.batch_size 1.0 +221 9 model.embedding_dim 0.0 +221 9 optimizer.lr 0.02262863708695595 +221 9 negative_sampler.num_negs_per_pos 21.0 +221 9 training.batch_size 0.0 +221 10 model.embedding_dim 0.0 +221 10 optimizer.lr 0.009509073346197227 +221 10 negative_sampler.num_negs_per_pos 57.0 +221 10 training.batch_size 1.0 +221 11 model.embedding_dim 2.0 +221 11 optimizer.lr 0.0016525210471702194 +221 11 negative_sampler.num_negs_per_pos 51.0 +221 11 training.batch_size 2.0 +221 12 model.embedding_dim 1.0 +221 12 optimizer.lr 0.013011882099370167 +221 12 negative_sampler.num_negs_per_pos 62.0 +221 12 training.batch_size 2.0 +221 13 model.embedding_dim 0.0 +221 13 optimizer.lr 0.010657923998732614 +221 13 negative_sampler.num_negs_per_pos 92.0 +221 13 training.batch_size 1.0 +221 14 model.embedding_dim 1.0 +221 14 optimizer.lr 0.006266224710875758 +221 14 negative_sampler.num_negs_per_pos 62.0 +221 14 training.batch_size 0.0 +221 15 model.embedding_dim 1.0 +221 15 optimizer.lr 0.09345704944890408 +221 15 negative_sampler.num_negs_per_pos 54.0 +221 15 training.batch_size 0.0 +221 16 model.embedding_dim 0.0 +221 16 optimizer.lr 0.010674056031206716 +221 16 negative_sampler.num_negs_per_pos 88.0 +221 16 training.batch_size 0.0 +221 17 model.embedding_dim 1.0 +221 17 optimizer.lr 0.0038644921669080926 +221 17 negative_sampler.num_negs_per_pos 13.0 +221 17 training.batch_size 1.0 +221 18 model.embedding_dim 1.0 +221 18 optimizer.lr 0.0033880935686467764 +221 18 negative_sampler.num_negs_per_pos 99.0 +221 18 training.batch_size 0.0 +221 19 model.embedding_dim 1.0 +221 19 optimizer.lr 0.003776901702695982 +221 19 negative_sampler.num_negs_per_pos 57.0 +221 19 training.batch_size 2.0 +221 20 model.embedding_dim 0.0 +221 20 optimizer.lr 0.0018156311185931558 +221 20 negative_sampler.num_negs_per_pos 5.0 +221 20 training.batch_size 2.0 +221 21 model.embedding_dim 2.0 +221 21 optimizer.lr 0.0011441793367257748 +221 21 negative_sampler.num_negs_per_pos 78.0 +221 21 training.batch_size 0.0 +221 1 dataset """fb15k237""" +221 1 model """ermlp""" +221 1 loss """softplus""" +221 1 regularizer """no""" +221 1 optimizer """adam""" +221 1 training_loop """owa""" +221 1 negative_sampler """basic""" +221 1 evaluator """rankbased""" +221 2 dataset """fb15k237""" +221 2 model """ermlp""" +221 2 loss """softplus""" +221 2 regularizer """no""" +221 2 optimizer """adam""" +221 2 training_loop """owa""" +221 2 negative_sampler """basic""" +221 2 evaluator """rankbased""" +221 3 dataset """fb15k237""" +221 3 model """ermlp""" +221 3 loss """softplus""" +221 3 regularizer """no""" +221 3 optimizer """adam""" +221 3 training_loop """owa""" +221 3 negative_sampler """basic""" +221 3 evaluator """rankbased""" +221 4 dataset """fb15k237""" +221 4 model """ermlp""" +221 4 loss """softplus""" +221 4 regularizer """no""" +221 4 optimizer """adam""" +221 4 training_loop """owa""" +221 4 negative_sampler """basic""" +221 4 evaluator """rankbased""" +221 5 dataset """fb15k237""" +221 5 model """ermlp""" +221 5 loss """softplus""" +221 5 regularizer """no""" +221 5 optimizer """adam""" +221 5 training_loop """owa""" +221 5 negative_sampler """basic""" +221 5 evaluator """rankbased""" +221 6 dataset """fb15k237""" +221 6 model """ermlp""" +221 6 loss """softplus""" +221 6 regularizer """no""" +221 6 optimizer """adam""" +221 6 training_loop """owa""" +221 6 negative_sampler """basic""" +221 6 evaluator """rankbased""" +221 7 dataset """fb15k237""" +221 7 model """ermlp""" +221 7 loss """softplus""" +221 7 regularizer """no""" +221 7 optimizer """adam""" +221 7 training_loop """owa""" +221 7 negative_sampler """basic""" +221 7 evaluator """rankbased""" +221 8 dataset """fb15k237""" +221 8 model """ermlp""" +221 8 loss """softplus""" +221 8 regularizer """no""" +221 8 optimizer """adam""" +221 8 training_loop """owa""" +221 8 negative_sampler """basic""" +221 8 evaluator """rankbased""" +221 9 dataset """fb15k237""" +221 9 model """ermlp""" +221 9 loss """softplus""" +221 9 regularizer """no""" +221 9 optimizer """adam""" +221 9 training_loop """owa""" +221 9 negative_sampler """basic""" +221 9 evaluator """rankbased""" +221 10 dataset """fb15k237""" +221 10 model """ermlp""" +221 10 loss """softplus""" +221 10 regularizer """no""" +221 10 optimizer """adam""" +221 10 training_loop """owa""" +221 10 negative_sampler """basic""" +221 10 evaluator """rankbased""" +221 11 dataset """fb15k237""" +221 11 model """ermlp""" +221 11 loss """softplus""" +221 11 regularizer """no""" +221 11 optimizer """adam""" +221 11 training_loop """owa""" +221 11 negative_sampler """basic""" +221 11 evaluator """rankbased""" +221 12 dataset """fb15k237""" +221 12 model """ermlp""" +221 12 loss """softplus""" +221 12 regularizer """no""" +221 12 optimizer """adam""" +221 12 training_loop """owa""" +221 12 negative_sampler """basic""" +221 12 evaluator """rankbased""" +221 13 dataset """fb15k237""" +221 13 model """ermlp""" +221 13 loss """softplus""" +221 13 regularizer """no""" +221 13 optimizer """adam""" +221 13 training_loop """owa""" +221 13 negative_sampler """basic""" +221 13 evaluator """rankbased""" +221 14 dataset """fb15k237""" +221 14 model """ermlp""" +221 14 loss """softplus""" +221 14 regularizer """no""" +221 14 optimizer """adam""" +221 14 training_loop """owa""" +221 14 negative_sampler """basic""" +221 14 evaluator """rankbased""" +221 15 dataset """fb15k237""" +221 15 model """ermlp""" +221 15 loss """softplus""" +221 15 regularizer """no""" +221 15 optimizer """adam""" +221 15 training_loop """owa""" +221 15 negative_sampler """basic""" +221 15 evaluator """rankbased""" +221 16 dataset """fb15k237""" +221 16 model """ermlp""" +221 16 loss """softplus""" +221 16 regularizer """no""" +221 16 optimizer """adam""" +221 16 training_loop """owa""" +221 16 negative_sampler """basic""" +221 16 evaluator """rankbased""" +221 17 dataset """fb15k237""" +221 17 model """ermlp""" +221 17 loss """softplus""" +221 17 regularizer """no""" +221 17 optimizer """adam""" +221 17 training_loop """owa""" +221 17 negative_sampler """basic""" +221 17 evaluator """rankbased""" +221 18 dataset """fb15k237""" +221 18 model """ermlp""" +221 18 loss """softplus""" +221 18 regularizer """no""" +221 18 optimizer """adam""" +221 18 training_loop """owa""" +221 18 negative_sampler """basic""" +221 18 evaluator """rankbased""" +221 19 dataset """fb15k237""" +221 19 model """ermlp""" +221 19 loss """softplus""" +221 19 regularizer """no""" +221 19 optimizer """adam""" +221 19 training_loop """owa""" +221 19 negative_sampler """basic""" +221 19 evaluator """rankbased""" +221 20 dataset """fb15k237""" +221 20 model """ermlp""" +221 20 loss """softplus""" +221 20 regularizer """no""" +221 20 optimizer """adam""" +221 20 training_loop """owa""" +221 20 negative_sampler """basic""" +221 20 evaluator """rankbased""" +221 21 dataset """fb15k237""" +221 21 model """ermlp""" +221 21 loss """softplus""" +221 21 regularizer """no""" +221 21 optimizer """adam""" +221 21 training_loop """owa""" +221 21 negative_sampler """basic""" +221 21 evaluator """rankbased""" +222 1 model.embedding_dim 0.0 +222 1 optimizer.lr 0.0020881173878311817 +222 1 negative_sampler.num_negs_per_pos 9.0 +222 1 training.batch_size 2.0 +222 2 model.embedding_dim 1.0 +222 2 optimizer.lr 0.006166793209726337 +222 2 negative_sampler.num_negs_per_pos 94.0 +222 2 training.batch_size 0.0 +222 3 model.embedding_dim 2.0 +222 3 optimizer.lr 0.006177039440392552 +222 3 negative_sampler.num_negs_per_pos 10.0 +222 3 training.batch_size 1.0 +222 4 model.embedding_dim 0.0 +222 4 optimizer.lr 0.012320182124845699 +222 4 negative_sampler.num_negs_per_pos 64.0 +222 4 training.batch_size 1.0 +222 5 model.embedding_dim 2.0 +222 5 optimizer.lr 0.020423222874629537 +222 5 negative_sampler.num_negs_per_pos 21.0 +222 5 training.batch_size 1.0 +222 6 model.embedding_dim 0.0 +222 6 optimizer.lr 0.0042056331074652 +222 6 negative_sampler.num_negs_per_pos 51.0 +222 6 training.batch_size 1.0 +222 7 model.embedding_dim 2.0 +222 7 optimizer.lr 0.010014739105092596 +222 7 negative_sampler.num_negs_per_pos 95.0 +222 7 training.batch_size 0.0 +222 8 model.embedding_dim 2.0 +222 8 optimizer.lr 0.00635771268375256 +222 8 negative_sampler.num_negs_per_pos 83.0 +222 8 training.batch_size 1.0 +222 9 model.embedding_dim 2.0 +222 9 optimizer.lr 0.004329309658681612 +222 9 negative_sampler.num_negs_per_pos 8.0 +222 9 training.batch_size 1.0 +222 10 model.embedding_dim 0.0 +222 10 optimizer.lr 0.0042994117382831425 +222 10 negative_sampler.num_negs_per_pos 28.0 +222 10 training.batch_size 1.0 +222 11 model.embedding_dim 0.0 +222 11 optimizer.lr 0.004159011952016435 +222 11 negative_sampler.num_negs_per_pos 79.0 +222 11 training.batch_size 2.0 +222 12 model.embedding_dim 0.0 +222 12 optimizer.lr 0.005129855598375433 +222 12 negative_sampler.num_negs_per_pos 46.0 +222 12 training.batch_size 1.0 +222 13 model.embedding_dim 1.0 +222 13 optimizer.lr 0.05221969083482826 +222 13 negative_sampler.num_negs_per_pos 94.0 +222 13 training.batch_size 2.0 +222 14 model.embedding_dim 1.0 +222 14 optimizer.lr 0.01406522687274812 +222 14 negative_sampler.num_negs_per_pos 36.0 +222 14 training.batch_size 1.0 +222 15 model.embedding_dim 1.0 +222 15 optimizer.lr 0.0032400535077727376 +222 15 negative_sampler.num_negs_per_pos 63.0 +222 15 training.batch_size 0.0 +222 16 model.embedding_dim 1.0 +222 16 optimizer.lr 0.05948205244115869 +222 16 negative_sampler.num_negs_per_pos 19.0 +222 16 training.batch_size 1.0 +222 17 model.embedding_dim 0.0 +222 17 optimizer.lr 0.004138099596462786 +222 17 negative_sampler.num_negs_per_pos 85.0 +222 17 training.batch_size 1.0 +222 18 model.embedding_dim 0.0 +222 18 optimizer.lr 0.003963518638844855 +222 18 negative_sampler.num_negs_per_pos 83.0 +222 18 training.batch_size 2.0 +222 19 model.embedding_dim 0.0 +222 19 optimizer.lr 0.0013076894140487373 +222 19 negative_sampler.num_negs_per_pos 3.0 +222 19 training.batch_size 0.0 +222 20 model.embedding_dim 2.0 +222 20 optimizer.lr 0.07645658388243667 +222 20 negative_sampler.num_negs_per_pos 75.0 +222 20 training.batch_size 2.0 +222 21 model.embedding_dim 2.0 +222 21 optimizer.lr 0.021590106844026652 +222 21 negative_sampler.num_negs_per_pos 79.0 +222 21 training.batch_size 0.0 +222 22 model.embedding_dim 2.0 +222 22 optimizer.lr 0.001016939747791535 +222 22 negative_sampler.num_negs_per_pos 22.0 +222 22 training.batch_size 0.0 +222 23 model.embedding_dim 1.0 +222 23 optimizer.lr 0.07090707654848774 +222 23 negative_sampler.num_negs_per_pos 68.0 +222 23 training.batch_size 0.0 +222 24 model.embedding_dim 0.0 +222 24 optimizer.lr 0.005021321992292831 +222 24 negative_sampler.num_negs_per_pos 96.0 +222 24 training.batch_size 1.0 +222 25 model.embedding_dim 2.0 +222 25 optimizer.lr 0.002025770231414569 +222 25 negative_sampler.num_negs_per_pos 15.0 +222 25 training.batch_size 1.0 +222 26 model.embedding_dim 1.0 +222 26 optimizer.lr 0.002075287326229843 +222 26 negative_sampler.num_negs_per_pos 82.0 +222 26 training.batch_size 1.0 +222 27 model.embedding_dim 1.0 +222 27 optimizer.lr 0.007854980689538415 +222 27 negative_sampler.num_negs_per_pos 80.0 +222 27 training.batch_size 0.0 +222 28 model.embedding_dim 1.0 +222 28 optimizer.lr 0.0034586957348630913 +222 28 negative_sampler.num_negs_per_pos 44.0 +222 28 training.batch_size 2.0 +222 29 model.embedding_dim 0.0 +222 29 optimizer.lr 0.03232506555484555 +222 29 negative_sampler.num_negs_per_pos 34.0 +222 29 training.batch_size 0.0 +222 30 model.embedding_dim 2.0 +222 30 optimizer.lr 0.0029841704292659175 +222 30 negative_sampler.num_negs_per_pos 4.0 +222 30 training.batch_size 2.0 +222 31 model.embedding_dim 1.0 +222 31 optimizer.lr 0.019682460621125632 +222 31 negative_sampler.num_negs_per_pos 46.0 +222 31 training.batch_size 0.0 +222 32 model.embedding_dim 0.0 +222 32 optimizer.lr 0.07132043965179136 +222 32 negative_sampler.num_negs_per_pos 49.0 +222 32 training.batch_size 2.0 +222 33 model.embedding_dim 1.0 +222 33 optimizer.lr 0.03550129054098538 +222 33 negative_sampler.num_negs_per_pos 84.0 +222 33 training.batch_size 1.0 +222 34 model.embedding_dim 0.0 +222 34 optimizer.lr 0.004356788744071803 +222 34 negative_sampler.num_negs_per_pos 28.0 +222 34 training.batch_size 1.0 +222 35 model.embedding_dim 2.0 +222 35 optimizer.lr 0.003772311818837226 +222 35 negative_sampler.num_negs_per_pos 62.0 +222 35 training.batch_size 1.0 +222 36 model.embedding_dim 0.0 +222 36 optimizer.lr 0.008585078127682725 +222 36 negative_sampler.num_negs_per_pos 33.0 +222 36 training.batch_size 0.0 +222 37 model.embedding_dim 1.0 +222 37 optimizer.lr 0.09933755831303973 +222 37 negative_sampler.num_negs_per_pos 3.0 +222 37 training.batch_size 1.0 +222 38 model.embedding_dim 0.0 +222 38 optimizer.lr 0.014502147848945397 +222 38 negative_sampler.num_negs_per_pos 33.0 +222 38 training.batch_size 2.0 +222 39 model.embedding_dim 0.0 +222 39 optimizer.lr 0.006094406014085718 +222 39 negative_sampler.num_negs_per_pos 75.0 +222 39 training.batch_size 2.0 +222 40 model.embedding_dim 1.0 +222 40 optimizer.lr 0.09719850408152957 +222 40 negative_sampler.num_negs_per_pos 76.0 +222 40 training.batch_size 0.0 +222 41 model.embedding_dim 1.0 +222 41 optimizer.lr 0.015803536070999942 +222 41 negative_sampler.num_negs_per_pos 43.0 +222 41 training.batch_size 0.0 +222 42 model.embedding_dim 1.0 +222 42 optimizer.lr 0.0017568644941305116 +222 42 negative_sampler.num_negs_per_pos 63.0 +222 42 training.batch_size 1.0 +222 43 model.embedding_dim 2.0 +222 43 optimizer.lr 0.00923256058958099 +222 43 negative_sampler.num_negs_per_pos 98.0 +222 43 training.batch_size 1.0 +222 44 model.embedding_dim 1.0 +222 44 optimizer.lr 0.003315410353475561 +222 44 negative_sampler.num_negs_per_pos 99.0 +222 44 training.batch_size 0.0 +222 1 dataset """fb15k237""" +222 1 model """ermlp""" +222 1 loss """bceaftersigmoid""" +222 1 regularizer """no""" +222 1 optimizer """adam""" +222 1 training_loop """owa""" +222 1 negative_sampler """basic""" +222 1 evaluator """rankbased""" +222 2 dataset """fb15k237""" +222 2 model """ermlp""" +222 2 loss """bceaftersigmoid""" +222 2 regularizer """no""" +222 2 optimizer """adam""" +222 2 training_loop """owa""" +222 2 negative_sampler """basic""" +222 2 evaluator """rankbased""" +222 3 dataset """fb15k237""" +222 3 model """ermlp""" +222 3 loss """bceaftersigmoid""" +222 3 regularizer """no""" +222 3 optimizer """adam""" +222 3 training_loop """owa""" +222 3 negative_sampler """basic""" +222 3 evaluator """rankbased""" +222 4 dataset """fb15k237""" +222 4 model """ermlp""" +222 4 loss """bceaftersigmoid""" +222 4 regularizer """no""" +222 4 optimizer """adam""" +222 4 training_loop """owa""" +222 4 negative_sampler """basic""" +222 4 evaluator """rankbased""" +222 5 dataset """fb15k237""" +222 5 model """ermlp""" +222 5 loss """bceaftersigmoid""" +222 5 regularizer """no""" +222 5 optimizer """adam""" +222 5 training_loop """owa""" +222 5 negative_sampler """basic""" +222 5 evaluator """rankbased""" +222 6 dataset """fb15k237""" +222 6 model """ermlp""" +222 6 loss """bceaftersigmoid""" +222 6 regularizer """no""" +222 6 optimizer """adam""" +222 6 training_loop """owa""" +222 6 negative_sampler """basic""" +222 6 evaluator """rankbased""" +222 7 dataset """fb15k237""" +222 7 model """ermlp""" +222 7 loss """bceaftersigmoid""" +222 7 regularizer """no""" +222 7 optimizer """adam""" +222 7 training_loop """owa""" +222 7 negative_sampler """basic""" +222 7 evaluator """rankbased""" +222 8 dataset """fb15k237""" +222 8 model """ermlp""" +222 8 loss """bceaftersigmoid""" +222 8 regularizer """no""" +222 8 optimizer """adam""" +222 8 training_loop """owa""" +222 8 negative_sampler """basic""" +222 8 evaluator """rankbased""" +222 9 dataset """fb15k237""" +222 9 model """ermlp""" +222 9 loss """bceaftersigmoid""" +222 9 regularizer """no""" +222 9 optimizer """adam""" +222 9 training_loop """owa""" +222 9 negative_sampler """basic""" +222 9 evaluator """rankbased""" +222 10 dataset """fb15k237""" +222 10 model """ermlp""" +222 10 loss """bceaftersigmoid""" +222 10 regularizer """no""" +222 10 optimizer """adam""" +222 10 training_loop """owa""" +222 10 negative_sampler """basic""" +222 10 evaluator """rankbased""" +222 11 dataset """fb15k237""" +222 11 model """ermlp""" +222 11 loss """bceaftersigmoid""" +222 11 regularizer """no""" +222 11 optimizer """adam""" +222 11 training_loop """owa""" +222 11 negative_sampler """basic""" +222 11 evaluator """rankbased""" +222 12 dataset """fb15k237""" +222 12 model """ermlp""" +222 12 loss """bceaftersigmoid""" +222 12 regularizer """no""" +222 12 optimizer """adam""" +222 12 training_loop """owa""" +222 12 negative_sampler """basic""" +222 12 evaluator """rankbased""" +222 13 dataset """fb15k237""" +222 13 model """ermlp""" +222 13 loss """bceaftersigmoid""" +222 13 regularizer """no""" +222 13 optimizer """adam""" +222 13 training_loop """owa""" +222 13 negative_sampler """basic""" +222 13 evaluator """rankbased""" +222 14 dataset """fb15k237""" +222 14 model """ermlp""" +222 14 loss """bceaftersigmoid""" +222 14 regularizer """no""" +222 14 optimizer """adam""" +222 14 training_loop """owa""" +222 14 negative_sampler """basic""" +222 14 evaluator """rankbased""" +222 15 dataset """fb15k237""" +222 15 model """ermlp""" +222 15 loss """bceaftersigmoid""" +222 15 regularizer """no""" +222 15 optimizer """adam""" +222 15 training_loop """owa""" +222 15 negative_sampler """basic""" +222 15 evaluator """rankbased""" +222 16 dataset """fb15k237""" +222 16 model """ermlp""" +222 16 loss """bceaftersigmoid""" +222 16 regularizer """no""" +222 16 optimizer """adam""" +222 16 training_loop """owa""" +222 16 negative_sampler """basic""" +222 16 evaluator """rankbased""" +222 17 dataset """fb15k237""" +222 17 model """ermlp""" +222 17 loss """bceaftersigmoid""" +222 17 regularizer """no""" +222 17 optimizer """adam""" +222 17 training_loop """owa""" +222 17 negative_sampler """basic""" +222 17 evaluator """rankbased""" +222 18 dataset """fb15k237""" +222 18 model """ermlp""" +222 18 loss """bceaftersigmoid""" +222 18 regularizer """no""" +222 18 optimizer """adam""" +222 18 training_loop """owa""" +222 18 negative_sampler """basic""" +222 18 evaluator """rankbased""" +222 19 dataset """fb15k237""" +222 19 model """ermlp""" +222 19 loss """bceaftersigmoid""" +222 19 regularizer """no""" +222 19 optimizer """adam""" +222 19 training_loop """owa""" +222 19 negative_sampler """basic""" +222 19 evaluator """rankbased""" +222 20 dataset """fb15k237""" +222 20 model """ermlp""" +222 20 loss """bceaftersigmoid""" +222 20 regularizer """no""" +222 20 optimizer """adam""" +222 20 training_loop """owa""" +222 20 negative_sampler """basic""" +222 20 evaluator """rankbased""" +222 21 dataset """fb15k237""" +222 21 model """ermlp""" +222 21 loss """bceaftersigmoid""" +222 21 regularizer """no""" +222 21 optimizer """adam""" +222 21 training_loop """owa""" +222 21 negative_sampler """basic""" +222 21 evaluator """rankbased""" +222 22 dataset """fb15k237""" +222 22 model """ermlp""" +222 22 loss """bceaftersigmoid""" +222 22 regularizer """no""" +222 22 optimizer """adam""" +222 22 training_loop """owa""" +222 22 negative_sampler """basic""" +222 22 evaluator """rankbased""" +222 23 dataset """fb15k237""" +222 23 model """ermlp""" +222 23 loss """bceaftersigmoid""" +222 23 regularizer """no""" +222 23 optimizer """adam""" +222 23 training_loop """owa""" +222 23 negative_sampler """basic""" +222 23 evaluator """rankbased""" +222 24 dataset """fb15k237""" +222 24 model """ermlp""" +222 24 loss """bceaftersigmoid""" +222 24 regularizer """no""" +222 24 optimizer """adam""" +222 24 training_loop """owa""" +222 24 negative_sampler """basic""" +222 24 evaluator """rankbased""" +222 25 dataset """fb15k237""" +222 25 model """ermlp""" +222 25 loss """bceaftersigmoid""" +222 25 regularizer """no""" +222 25 optimizer """adam""" +222 25 training_loop """owa""" +222 25 negative_sampler """basic""" +222 25 evaluator """rankbased""" +222 26 dataset """fb15k237""" +222 26 model """ermlp""" +222 26 loss """bceaftersigmoid""" +222 26 regularizer """no""" +222 26 optimizer """adam""" +222 26 training_loop """owa""" +222 26 negative_sampler """basic""" +222 26 evaluator """rankbased""" +222 27 dataset """fb15k237""" +222 27 model """ermlp""" +222 27 loss """bceaftersigmoid""" +222 27 regularizer """no""" +222 27 optimizer """adam""" +222 27 training_loop """owa""" +222 27 negative_sampler """basic""" +222 27 evaluator """rankbased""" +222 28 dataset """fb15k237""" +222 28 model """ermlp""" +222 28 loss """bceaftersigmoid""" +222 28 regularizer """no""" +222 28 optimizer """adam""" +222 28 training_loop """owa""" +222 28 negative_sampler """basic""" +222 28 evaluator """rankbased""" +222 29 dataset """fb15k237""" +222 29 model """ermlp""" +222 29 loss """bceaftersigmoid""" +222 29 regularizer """no""" +222 29 optimizer """adam""" +222 29 training_loop """owa""" +222 29 negative_sampler """basic""" +222 29 evaluator """rankbased""" +222 30 dataset """fb15k237""" +222 30 model """ermlp""" +222 30 loss """bceaftersigmoid""" +222 30 regularizer """no""" +222 30 optimizer """adam""" +222 30 training_loop """owa""" +222 30 negative_sampler """basic""" +222 30 evaluator """rankbased""" +222 31 dataset """fb15k237""" +222 31 model """ermlp""" +222 31 loss """bceaftersigmoid""" +222 31 regularizer """no""" +222 31 optimizer """adam""" +222 31 training_loop """owa""" +222 31 negative_sampler """basic""" +222 31 evaluator """rankbased""" +222 32 dataset """fb15k237""" +222 32 model """ermlp""" +222 32 loss """bceaftersigmoid""" +222 32 regularizer """no""" +222 32 optimizer """adam""" +222 32 training_loop """owa""" +222 32 negative_sampler """basic""" +222 32 evaluator """rankbased""" +222 33 dataset """fb15k237""" +222 33 model """ermlp""" +222 33 loss """bceaftersigmoid""" +222 33 regularizer """no""" +222 33 optimizer """adam""" +222 33 training_loop """owa""" +222 33 negative_sampler """basic""" +222 33 evaluator """rankbased""" +222 34 dataset """fb15k237""" +222 34 model """ermlp""" +222 34 loss """bceaftersigmoid""" +222 34 regularizer """no""" +222 34 optimizer """adam""" +222 34 training_loop """owa""" +222 34 negative_sampler """basic""" +222 34 evaluator """rankbased""" +222 35 dataset """fb15k237""" +222 35 model """ermlp""" +222 35 loss """bceaftersigmoid""" +222 35 regularizer """no""" +222 35 optimizer """adam""" +222 35 training_loop """owa""" +222 35 negative_sampler """basic""" +222 35 evaluator """rankbased""" +222 36 dataset """fb15k237""" +222 36 model """ermlp""" +222 36 loss """bceaftersigmoid""" +222 36 regularizer """no""" +222 36 optimizer """adam""" +222 36 training_loop """owa""" +222 36 negative_sampler """basic""" +222 36 evaluator """rankbased""" +222 37 dataset """fb15k237""" +222 37 model """ermlp""" +222 37 loss """bceaftersigmoid""" +222 37 regularizer """no""" +222 37 optimizer """adam""" +222 37 training_loop """owa""" +222 37 negative_sampler """basic""" +222 37 evaluator """rankbased""" +222 38 dataset """fb15k237""" +222 38 model """ermlp""" +222 38 loss """bceaftersigmoid""" +222 38 regularizer """no""" +222 38 optimizer """adam""" +222 38 training_loop """owa""" +222 38 negative_sampler """basic""" +222 38 evaluator """rankbased""" +222 39 dataset """fb15k237""" +222 39 model """ermlp""" +222 39 loss """bceaftersigmoid""" +222 39 regularizer """no""" +222 39 optimizer """adam""" +222 39 training_loop """owa""" +222 39 negative_sampler """basic""" +222 39 evaluator """rankbased""" +222 40 dataset """fb15k237""" +222 40 model """ermlp""" +222 40 loss """bceaftersigmoid""" +222 40 regularizer """no""" +222 40 optimizer """adam""" +222 40 training_loop """owa""" +222 40 negative_sampler """basic""" +222 40 evaluator """rankbased""" +222 41 dataset """fb15k237""" +222 41 model """ermlp""" +222 41 loss """bceaftersigmoid""" +222 41 regularizer """no""" +222 41 optimizer """adam""" +222 41 training_loop """owa""" +222 41 negative_sampler """basic""" +222 41 evaluator """rankbased""" +222 42 dataset """fb15k237""" +222 42 model """ermlp""" +222 42 loss """bceaftersigmoid""" +222 42 regularizer """no""" +222 42 optimizer """adam""" +222 42 training_loop """owa""" +222 42 negative_sampler """basic""" +222 42 evaluator """rankbased""" +222 43 dataset """fb15k237""" +222 43 model """ermlp""" +222 43 loss """bceaftersigmoid""" +222 43 regularizer """no""" +222 43 optimizer """adam""" +222 43 training_loop """owa""" +222 43 negative_sampler """basic""" +222 43 evaluator """rankbased""" +222 44 dataset """fb15k237""" +222 44 model """ermlp""" +222 44 loss """bceaftersigmoid""" +222 44 regularizer """no""" +222 44 optimizer """adam""" +222 44 training_loop """owa""" +222 44 negative_sampler """basic""" +222 44 evaluator """rankbased""" +223 1 model.embedding_dim 0.0 +223 1 optimizer.lr 0.023804170652810527 +223 1 negative_sampler.num_negs_per_pos 68.0 +223 1 training.batch_size 1.0 +223 2 model.embedding_dim 1.0 +223 2 optimizer.lr 0.003670420356326883 +223 2 negative_sampler.num_negs_per_pos 89.0 +223 2 training.batch_size 1.0 +223 3 model.embedding_dim 2.0 +223 3 optimizer.lr 0.004139244276696481 +223 3 negative_sampler.num_negs_per_pos 61.0 +223 3 training.batch_size 2.0 +223 4 model.embedding_dim 1.0 +223 4 optimizer.lr 0.0010101721636236304 +223 4 negative_sampler.num_negs_per_pos 83.0 +223 4 training.batch_size 1.0 +223 5 model.embedding_dim 2.0 +223 5 optimizer.lr 0.007873564099429404 +223 5 negative_sampler.num_negs_per_pos 22.0 +223 5 training.batch_size 0.0 +223 6 model.embedding_dim 2.0 +223 6 optimizer.lr 0.007795935318842724 +223 6 negative_sampler.num_negs_per_pos 43.0 +223 6 training.batch_size 2.0 +223 7 model.embedding_dim 0.0 +223 7 optimizer.lr 0.06308167897648478 +223 7 negative_sampler.num_negs_per_pos 73.0 +223 7 training.batch_size 0.0 +223 8 model.embedding_dim 0.0 +223 8 optimizer.lr 0.0035893830738870803 +223 8 negative_sampler.num_negs_per_pos 44.0 +223 8 training.batch_size 2.0 +223 9 model.embedding_dim 2.0 +223 9 optimizer.lr 0.019632226841505205 +223 9 negative_sampler.num_negs_per_pos 31.0 +223 9 training.batch_size 2.0 +223 10 model.embedding_dim 0.0 +223 10 optimizer.lr 0.007027747657684126 +223 10 negative_sampler.num_negs_per_pos 45.0 +223 10 training.batch_size 1.0 +223 11 model.embedding_dim 0.0 +223 11 optimizer.lr 0.014646750350137766 +223 11 negative_sampler.num_negs_per_pos 69.0 +223 11 training.batch_size 2.0 +223 12 model.embedding_dim 2.0 +223 12 optimizer.lr 0.0010179141622708547 +223 12 negative_sampler.num_negs_per_pos 40.0 +223 12 training.batch_size 1.0 +223 13 model.embedding_dim 1.0 +223 13 optimizer.lr 0.0035189171440348508 +223 13 negative_sampler.num_negs_per_pos 94.0 +223 13 training.batch_size 2.0 +223 14 model.embedding_dim 1.0 +223 14 optimizer.lr 0.04792164461499667 +223 14 negative_sampler.num_negs_per_pos 99.0 +223 14 training.batch_size 2.0 +223 15 model.embedding_dim 0.0 +223 15 optimizer.lr 0.03645112813434177 +223 15 negative_sampler.num_negs_per_pos 31.0 +223 15 training.batch_size 2.0 +223 16 model.embedding_dim 2.0 +223 16 optimizer.lr 0.001312208591998532 +223 16 negative_sampler.num_negs_per_pos 90.0 +223 16 training.batch_size 2.0 +223 17 model.embedding_dim 0.0 +223 17 optimizer.lr 0.006627745145404088 +223 17 negative_sampler.num_negs_per_pos 32.0 +223 17 training.batch_size 1.0 +223 18 model.embedding_dim 1.0 +223 18 optimizer.lr 0.0012386453359005776 +223 18 negative_sampler.num_negs_per_pos 46.0 +223 18 training.batch_size 2.0 +223 19 model.embedding_dim 1.0 +223 19 optimizer.lr 0.0067197682582357705 +223 19 negative_sampler.num_negs_per_pos 72.0 +223 19 training.batch_size 0.0 +223 20 model.embedding_dim 2.0 +223 20 optimizer.lr 0.005604475995589155 +223 20 negative_sampler.num_negs_per_pos 40.0 +223 20 training.batch_size 2.0 +223 21 model.embedding_dim 2.0 +223 21 optimizer.lr 0.002412539718788735 +223 21 negative_sampler.num_negs_per_pos 77.0 +223 21 training.batch_size 0.0 +223 22 model.embedding_dim 1.0 +223 22 optimizer.lr 0.0625762189313325 +223 22 negative_sampler.num_negs_per_pos 85.0 +223 22 training.batch_size 1.0 +223 23 model.embedding_dim 1.0 +223 23 optimizer.lr 0.005223929327207667 +223 23 negative_sampler.num_negs_per_pos 21.0 +223 23 training.batch_size 1.0 +223 24 model.embedding_dim 1.0 +223 24 optimizer.lr 0.008026530245910933 +223 24 negative_sampler.num_negs_per_pos 69.0 +223 24 training.batch_size 0.0 +223 25 model.embedding_dim 0.0 +223 25 optimizer.lr 0.04783660022750574 +223 25 negative_sampler.num_negs_per_pos 4.0 +223 25 training.batch_size 1.0 +223 26 model.embedding_dim 2.0 +223 26 optimizer.lr 0.0015912085876185324 +223 26 negative_sampler.num_negs_per_pos 43.0 +223 26 training.batch_size 1.0 +223 27 model.embedding_dim 1.0 +223 27 optimizer.lr 0.00668390502539481 +223 27 negative_sampler.num_negs_per_pos 68.0 +223 27 training.batch_size 0.0 +223 28 model.embedding_dim 2.0 +223 28 optimizer.lr 0.0019217589322972745 +223 28 negative_sampler.num_negs_per_pos 75.0 +223 28 training.batch_size 1.0 +223 29 model.embedding_dim 0.0 +223 29 optimizer.lr 0.09691788015261774 +223 29 negative_sampler.num_negs_per_pos 48.0 +223 29 training.batch_size 1.0 +223 30 model.embedding_dim 0.0 +223 30 optimizer.lr 0.02764602515165223 +223 30 negative_sampler.num_negs_per_pos 71.0 +223 30 training.batch_size 0.0 +223 31 model.embedding_dim 1.0 +223 31 optimizer.lr 0.027219560686816732 +223 31 negative_sampler.num_negs_per_pos 70.0 +223 31 training.batch_size 1.0 +223 32 model.embedding_dim 2.0 +223 32 optimizer.lr 0.050761743433348175 +223 32 negative_sampler.num_negs_per_pos 53.0 +223 32 training.batch_size 2.0 +223 33 model.embedding_dim 1.0 +223 33 optimizer.lr 0.00108228627809943 +223 33 negative_sampler.num_negs_per_pos 58.0 +223 33 training.batch_size 0.0 +223 34 model.embedding_dim 0.0 +223 34 optimizer.lr 0.015322147450684154 +223 34 negative_sampler.num_negs_per_pos 56.0 +223 34 training.batch_size 1.0 +223 35 model.embedding_dim 2.0 +223 35 optimizer.lr 0.0059310966768442615 +223 35 negative_sampler.num_negs_per_pos 10.0 +223 35 training.batch_size 2.0 +223 36 model.embedding_dim 1.0 +223 36 optimizer.lr 0.052213770640694226 +223 36 negative_sampler.num_negs_per_pos 58.0 +223 36 training.batch_size 0.0 +223 37 model.embedding_dim 0.0 +223 37 optimizer.lr 0.007572883778499307 +223 37 negative_sampler.num_negs_per_pos 34.0 +223 37 training.batch_size 2.0 +223 38 model.embedding_dim 0.0 +223 38 optimizer.lr 0.0016448430868674796 +223 38 negative_sampler.num_negs_per_pos 57.0 +223 38 training.batch_size 1.0 +223 39 model.embedding_dim 2.0 +223 39 optimizer.lr 0.016085036161162327 +223 39 negative_sampler.num_negs_per_pos 29.0 +223 39 training.batch_size 1.0 +223 40 model.embedding_dim 1.0 +223 40 optimizer.lr 0.0453403544908514 +223 40 negative_sampler.num_negs_per_pos 57.0 +223 40 training.batch_size 2.0 +223 41 model.embedding_dim 2.0 +223 41 optimizer.lr 0.030715626973038358 +223 41 negative_sampler.num_negs_per_pos 89.0 +223 41 training.batch_size 1.0 +223 42 model.embedding_dim 1.0 +223 42 optimizer.lr 0.00437474523019593 +223 42 negative_sampler.num_negs_per_pos 45.0 +223 42 training.batch_size 2.0 +223 43 model.embedding_dim 0.0 +223 43 optimizer.lr 0.0017725397442121817 +223 43 negative_sampler.num_negs_per_pos 16.0 +223 43 training.batch_size 2.0 +223 1 dataset """fb15k237""" +223 1 model """ermlp""" +223 1 loss """softplus""" +223 1 regularizer """no""" +223 1 optimizer """adam""" +223 1 training_loop """owa""" +223 1 negative_sampler """basic""" +223 1 evaluator """rankbased""" +223 2 dataset """fb15k237""" +223 2 model """ermlp""" +223 2 loss """softplus""" +223 2 regularizer """no""" +223 2 optimizer """adam""" +223 2 training_loop """owa""" +223 2 negative_sampler """basic""" +223 2 evaluator """rankbased""" +223 3 dataset """fb15k237""" +223 3 model """ermlp""" +223 3 loss """softplus""" +223 3 regularizer """no""" +223 3 optimizer """adam""" +223 3 training_loop """owa""" +223 3 negative_sampler """basic""" +223 3 evaluator """rankbased""" +223 4 dataset """fb15k237""" +223 4 model """ermlp""" +223 4 loss """softplus""" +223 4 regularizer """no""" +223 4 optimizer """adam""" +223 4 training_loop """owa""" +223 4 negative_sampler """basic""" +223 4 evaluator """rankbased""" +223 5 dataset """fb15k237""" +223 5 model """ermlp""" +223 5 loss """softplus""" +223 5 regularizer """no""" +223 5 optimizer """adam""" +223 5 training_loop """owa""" +223 5 negative_sampler """basic""" +223 5 evaluator """rankbased""" +223 6 dataset """fb15k237""" +223 6 model """ermlp""" +223 6 loss """softplus""" +223 6 regularizer """no""" +223 6 optimizer """adam""" +223 6 training_loop """owa""" +223 6 negative_sampler """basic""" +223 6 evaluator """rankbased""" +223 7 dataset """fb15k237""" +223 7 model """ermlp""" +223 7 loss """softplus""" +223 7 regularizer """no""" +223 7 optimizer """adam""" +223 7 training_loop """owa""" +223 7 negative_sampler """basic""" +223 7 evaluator """rankbased""" +223 8 dataset """fb15k237""" +223 8 model """ermlp""" +223 8 loss """softplus""" +223 8 regularizer """no""" +223 8 optimizer """adam""" +223 8 training_loop """owa""" +223 8 negative_sampler """basic""" +223 8 evaluator """rankbased""" +223 9 dataset """fb15k237""" +223 9 model """ermlp""" +223 9 loss """softplus""" +223 9 regularizer """no""" +223 9 optimizer """adam""" +223 9 training_loop """owa""" +223 9 negative_sampler """basic""" +223 9 evaluator """rankbased""" +223 10 dataset """fb15k237""" +223 10 model """ermlp""" +223 10 loss """softplus""" +223 10 regularizer """no""" +223 10 optimizer """adam""" +223 10 training_loop """owa""" +223 10 negative_sampler """basic""" +223 10 evaluator """rankbased""" +223 11 dataset """fb15k237""" +223 11 model """ermlp""" +223 11 loss """softplus""" +223 11 regularizer """no""" +223 11 optimizer """adam""" +223 11 training_loop """owa""" +223 11 negative_sampler """basic""" +223 11 evaluator """rankbased""" +223 12 dataset """fb15k237""" +223 12 model """ermlp""" +223 12 loss """softplus""" +223 12 regularizer """no""" +223 12 optimizer """adam""" +223 12 training_loop """owa""" +223 12 negative_sampler """basic""" +223 12 evaluator """rankbased""" +223 13 dataset """fb15k237""" +223 13 model """ermlp""" +223 13 loss """softplus""" +223 13 regularizer """no""" +223 13 optimizer """adam""" +223 13 training_loop """owa""" +223 13 negative_sampler """basic""" +223 13 evaluator """rankbased""" +223 14 dataset """fb15k237""" +223 14 model """ermlp""" +223 14 loss """softplus""" +223 14 regularizer """no""" +223 14 optimizer """adam""" +223 14 training_loop """owa""" +223 14 negative_sampler """basic""" +223 14 evaluator """rankbased""" +223 15 dataset """fb15k237""" +223 15 model """ermlp""" +223 15 loss """softplus""" +223 15 regularizer """no""" +223 15 optimizer """adam""" +223 15 training_loop """owa""" +223 15 negative_sampler """basic""" +223 15 evaluator """rankbased""" +223 16 dataset """fb15k237""" +223 16 model """ermlp""" +223 16 loss """softplus""" +223 16 regularizer """no""" +223 16 optimizer """adam""" +223 16 training_loop """owa""" +223 16 negative_sampler """basic""" +223 16 evaluator """rankbased""" +223 17 dataset """fb15k237""" +223 17 model """ermlp""" +223 17 loss """softplus""" +223 17 regularizer """no""" +223 17 optimizer """adam""" +223 17 training_loop """owa""" +223 17 negative_sampler """basic""" +223 17 evaluator """rankbased""" +223 18 dataset """fb15k237""" +223 18 model """ermlp""" +223 18 loss """softplus""" +223 18 regularizer """no""" +223 18 optimizer """adam""" +223 18 training_loop """owa""" +223 18 negative_sampler """basic""" +223 18 evaluator """rankbased""" +223 19 dataset """fb15k237""" +223 19 model """ermlp""" +223 19 loss """softplus""" +223 19 regularizer """no""" +223 19 optimizer """adam""" +223 19 training_loop """owa""" +223 19 negative_sampler """basic""" +223 19 evaluator """rankbased""" +223 20 dataset """fb15k237""" +223 20 model """ermlp""" +223 20 loss """softplus""" +223 20 regularizer """no""" +223 20 optimizer """adam""" +223 20 training_loop """owa""" +223 20 negative_sampler """basic""" +223 20 evaluator """rankbased""" +223 21 dataset """fb15k237""" +223 21 model """ermlp""" +223 21 loss """softplus""" +223 21 regularizer """no""" +223 21 optimizer """adam""" +223 21 training_loop """owa""" +223 21 negative_sampler """basic""" +223 21 evaluator """rankbased""" +223 22 dataset """fb15k237""" +223 22 model """ermlp""" +223 22 loss """softplus""" +223 22 regularizer """no""" +223 22 optimizer """adam""" +223 22 training_loop """owa""" +223 22 negative_sampler """basic""" +223 22 evaluator """rankbased""" +223 23 dataset """fb15k237""" +223 23 model """ermlp""" +223 23 loss """softplus""" +223 23 regularizer """no""" +223 23 optimizer """adam""" +223 23 training_loop """owa""" +223 23 negative_sampler """basic""" +223 23 evaluator """rankbased""" +223 24 dataset """fb15k237""" +223 24 model """ermlp""" +223 24 loss """softplus""" +223 24 regularizer """no""" +223 24 optimizer """adam""" +223 24 training_loop """owa""" +223 24 negative_sampler """basic""" +223 24 evaluator """rankbased""" +223 25 dataset """fb15k237""" +223 25 model """ermlp""" +223 25 loss """softplus""" +223 25 regularizer """no""" +223 25 optimizer """adam""" +223 25 training_loop """owa""" +223 25 negative_sampler """basic""" +223 25 evaluator """rankbased""" +223 26 dataset """fb15k237""" +223 26 model """ermlp""" +223 26 loss """softplus""" +223 26 regularizer """no""" +223 26 optimizer """adam""" +223 26 training_loop """owa""" +223 26 negative_sampler """basic""" +223 26 evaluator """rankbased""" +223 27 dataset """fb15k237""" +223 27 model """ermlp""" +223 27 loss """softplus""" +223 27 regularizer """no""" +223 27 optimizer """adam""" +223 27 training_loop """owa""" +223 27 negative_sampler """basic""" +223 27 evaluator """rankbased""" +223 28 dataset """fb15k237""" +223 28 model """ermlp""" +223 28 loss """softplus""" +223 28 regularizer """no""" +223 28 optimizer """adam""" +223 28 training_loop """owa""" +223 28 negative_sampler """basic""" +223 28 evaluator """rankbased""" +223 29 dataset """fb15k237""" +223 29 model """ermlp""" +223 29 loss """softplus""" +223 29 regularizer """no""" +223 29 optimizer """adam""" +223 29 training_loop """owa""" +223 29 negative_sampler """basic""" +223 29 evaluator """rankbased""" +223 30 dataset """fb15k237""" +223 30 model """ermlp""" +223 30 loss """softplus""" +223 30 regularizer """no""" +223 30 optimizer """adam""" +223 30 training_loop """owa""" +223 30 negative_sampler """basic""" +223 30 evaluator """rankbased""" +223 31 dataset """fb15k237""" +223 31 model """ermlp""" +223 31 loss """softplus""" +223 31 regularizer """no""" +223 31 optimizer """adam""" +223 31 training_loop """owa""" +223 31 negative_sampler """basic""" +223 31 evaluator """rankbased""" +223 32 dataset """fb15k237""" +223 32 model """ermlp""" +223 32 loss """softplus""" +223 32 regularizer """no""" +223 32 optimizer """adam""" +223 32 training_loop """owa""" +223 32 negative_sampler """basic""" +223 32 evaluator """rankbased""" +223 33 dataset """fb15k237""" +223 33 model """ermlp""" +223 33 loss """softplus""" +223 33 regularizer """no""" +223 33 optimizer """adam""" +223 33 training_loop """owa""" +223 33 negative_sampler """basic""" +223 33 evaluator """rankbased""" +223 34 dataset """fb15k237""" +223 34 model """ermlp""" +223 34 loss """softplus""" +223 34 regularizer """no""" +223 34 optimizer """adam""" +223 34 training_loop """owa""" +223 34 negative_sampler """basic""" +223 34 evaluator """rankbased""" +223 35 dataset """fb15k237""" +223 35 model """ermlp""" +223 35 loss """softplus""" +223 35 regularizer """no""" +223 35 optimizer """adam""" +223 35 training_loop """owa""" +223 35 negative_sampler """basic""" +223 35 evaluator """rankbased""" +223 36 dataset """fb15k237""" +223 36 model """ermlp""" +223 36 loss """softplus""" +223 36 regularizer """no""" +223 36 optimizer """adam""" +223 36 training_loop """owa""" +223 36 negative_sampler """basic""" +223 36 evaluator """rankbased""" +223 37 dataset """fb15k237""" +223 37 model """ermlp""" +223 37 loss """softplus""" +223 37 regularizer """no""" +223 37 optimizer """adam""" +223 37 training_loop """owa""" +223 37 negative_sampler """basic""" +223 37 evaluator """rankbased""" +223 38 dataset """fb15k237""" +223 38 model """ermlp""" +223 38 loss """softplus""" +223 38 regularizer """no""" +223 38 optimizer """adam""" +223 38 training_loop """owa""" +223 38 negative_sampler """basic""" +223 38 evaluator """rankbased""" +223 39 dataset """fb15k237""" +223 39 model """ermlp""" +223 39 loss """softplus""" +223 39 regularizer """no""" +223 39 optimizer """adam""" +223 39 training_loop """owa""" +223 39 negative_sampler """basic""" +223 39 evaluator """rankbased""" +223 40 dataset """fb15k237""" +223 40 model """ermlp""" +223 40 loss """softplus""" +223 40 regularizer """no""" +223 40 optimizer """adam""" +223 40 training_loop """owa""" +223 40 negative_sampler """basic""" +223 40 evaluator """rankbased""" +223 41 dataset """fb15k237""" +223 41 model """ermlp""" +223 41 loss """softplus""" +223 41 regularizer """no""" +223 41 optimizer """adam""" +223 41 training_loop """owa""" +223 41 negative_sampler """basic""" +223 41 evaluator """rankbased""" +223 42 dataset """fb15k237""" +223 42 model """ermlp""" +223 42 loss """softplus""" +223 42 regularizer """no""" +223 42 optimizer """adam""" +223 42 training_loop """owa""" +223 42 negative_sampler """basic""" +223 42 evaluator """rankbased""" +223 43 dataset """fb15k237""" +223 43 model """ermlp""" +223 43 loss """softplus""" +223 43 regularizer """no""" +223 43 optimizer """adam""" +223 43 training_loop """owa""" +223 43 negative_sampler """basic""" +223 43 evaluator """rankbased""" +224 1 model.embedding_dim 0.0 +224 1 optimizer.lr 0.002971790613526126 +224 1 training.batch_size 0.0 +224 1 training.label_smoothing 0.007066227750846006 +224 2 model.embedding_dim 2.0 +224 2 optimizer.lr 0.004214842499848448 +224 2 training.batch_size 2.0 +224 2 training.label_smoothing 0.017887827247507152 +224 3 model.embedding_dim 1.0 +224 3 optimizer.lr 0.004920583123232811 +224 3 training.batch_size 0.0 +224 3 training.label_smoothing 0.02571397779367104 +224 4 model.embedding_dim 1.0 +224 4 optimizer.lr 0.004242403072098942 +224 4 training.batch_size 1.0 +224 4 training.label_smoothing 0.9454154360235444 +224 5 model.embedding_dim 2.0 +224 5 optimizer.lr 0.03383847601077852 +224 5 training.batch_size 0.0 +224 5 training.label_smoothing 0.023982618176083315 +224 6 model.embedding_dim 2.0 +224 6 optimizer.lr 0.05756959711945164 +224 6 training.batch_size 1.0 +224 6 training.label_smoothing 0.0012809466371511825 +224 7 model.embedding_dim 0.0 +224 7 optimizer.lr 0.005779875816374009 +224 7 training.batch_size 1.0 +224 7 training.label_smoothing 0.6431540317057921 +224 8 model.embedding_dim 1.0 +224 8 optimizer.lr 0.00260610483863949 +224 8 training.batch_size 2.0 +224 8 training.label_smoothing 0.22952938799589173 +224 9 model.embedding_dim 1.0 +224 9 optimizer.lr 0.0036315216765628543 +224 9 training.batch_size 2.0 +224 9 training.label_smoothing 0.037435237211042915 +224 10 model.embedding_dim 2.0 +224 10 optimizer.lr 0.018537296848761476 +224 10 training.batch_size 2.0 +224 10 training.label_smoothing 0.0294731835740043 +224 11 model.embedding_dim 2.0 +224 11 optimizer.lr 0.019943206212596012 +224 11 training.batch_size 2.0 +224 11 training.label_smoothing 0.002510119809649127 +224 12 model.embedding_dim 2.0 +224 12 optimizer.lr 0.01136410085397267 +224 12 training.batch_size 1.0 +224 12 training.label_smoothing 0.00418686910529204 +224 1 dataset """fb15k237""" +224 1 model """ermlp""" +224 1 loss """bceaftersigmoid""" +224 1 regularizer """no""" +224 1 optimizer """adam""" +224 1 training_loop """lcwa""" +224 1 evaluator """rankbased""" +224 2 dataset """fb15k237""" +224 2 model """ermlp""" +224 2 loss """bceaftersigmoid""" +224 2 regularizer """no""" +224 2 optimizer """adam""" +224 2 training_loop """lcwa""" +224 2 evaluator """rankbased""" +224 3 dataset """fb15k237""" +224 3 model """ermlp""" +224 3 loss """bceaftersigmoid""" +224 3 regularizer """no""" +224 3 optimizer """adam""" +224 3 training_loop """lcwa""" +224 3 evaluator """rankbased""" +224 4 dataset """fb15k237""" +224 4 model """ermlp""" +224 4 loss """bceaftersigmoid""" +224 4 regularizer """no""" +224 4 optimizer """adam""" +224 4 training_loop """lcwa""" +224 4 evaluator """rankbased""" +224 5 dataset """fb15k237""" +224 5 model """ermlp""" +224 5 loss """bceaftersigmoid""" +224 5 regularizer """no""" +224 5 optimizer """adam""" +224 5 training_loop """lcwa""" +224 5 evaluator """rankbased""" +224 6 dataset """fb15k237""" +224 6 model """ermlp""" +224 6 loss """bceaftersigmoid""" +224 6 regularizer """no""" +224 6 optimizer """adam""" +224 6 training_loop """lcwa""" +224 6 evaluator """rankbased""" +224 7 dataset """fb15k237""" +224 7 model """ermlp""" +224 7 loss """bceaftersigmoid""" +224 7 regularizer """no""" +224 7 optimizer """adam""" +224 7 training_loop """lcwa""" +224 7 evaluator """rankbased""" +224 8 dataset """fb15k237""" +224 8 model """ermlp""" +224 8 loss """bceaftersigmoid""" +224 8 regularizer """no""" +224 8 optimizer """adam""" +224 8 training_loop """lcwa""" +224 8 evaluator """rankbased""" +224 9 dataset """fb15k237""" +224 9 model """ermlp""" +224 9 loss """bceaftersigmoid""" +224 9 regularizer """no""" +224 9 optimizer """adam""" +224 9 training_loop """lcwa""" +224 9 evaluator """rankbased""" +224 10 dataset """fb15k237""" +224 10 model """ermlp""" +224 10 loss """bceaftersigmoid""" +224 10 regularizer """no""" +224 10 optimizer """adam""" +224 10 training_loop """lcwa""" +224 10 evaluator """rankbased""" +224 11 dataset """fb15k237""" +224 11 model """ermlp""" +224 11 loss """bceaftersigmoid""" +224 11 regularizer """no""" +224 11 optimizer """adam""" +224 11 training_loop """lcwa""" +224 11 evaluator """rankbased""" +224 12 dataset """fb15k237""" +224 12 model """ermlp""" +224 12 loss """bceaftersigmoid""" +224 12 regularizer """no""" +224 12 optimizer """adam""" +224 12 training_loop """lcwa""" +224 12 evaluator """rankbased""" +225 1 model.embedding_dim 2.0 +225 1 optimizer.lr 0.0032262922379957875 +225 1 training.batch_size 1.0 +225 1 training.label_smoothing 0.03465661007598112 +225 2 model.embedding_dim 2.0 +225 2 optimizer.lr 0.09900194361392663 +225 2 training.batch_size 1.0 +225 2 training.label_smoothing 0.022698291262062062 +225 3 model.embedding_dim 2.0 +225 3 optimizer.lr 0.0019199795606843813 +225 3 training.batch_size 2.0 +225 3 training.label_smoothing 0.037223550160167936 +225 4 model.embedding_dim 2.0 +225 4 optimizer.lr 0.04703732653381223 +225 4 training.batch_size 1.0 +225 4 training.label_smoothing 0.5853387759747956 +225 5 model.embedding_dim 0.0 +225 5 optimizer.lr 0.002359243544515732 +225 5 training.batch_size 1.0 +225 5 training.label_smoothing 0.022860271060509273 +225 6 model.embedding_dim 2.0 +225 6 optimizer.lr 0.038473487027495784 +225 6 training.batch_size 0.0 +225 6 training.label_smoothing 0.007340067580439387 +225 7 model.embedding_dim 2.0 +225 7 optimizer.lr 0.01126137460328515 +225 7 training.batch_size 0.0 +225 7 training.label_smoothing 0.002064063585997599 +225 8 model.embedding_dim 1.0 +225 8 optimizer.lr 0.04095572201382912 +225 8 training.batch_size 1.0 +225 8 training.label_smoothing 0.0011113243000591478 +225 9 model.embedding_dim 1.0 +225 9 optimizer.lr 0.012213984874916982 +225 9 training.batch_size 1.0 +225 9 training.label_smoothing 0.006091864188491905 +225 10 model.embedding_dim 0.0 +225 10 optimizer.lr 0.0028868867352526287 +225 10 training.batch_size 2.0 +225 10 training.label_smoothing 0.4324486111980641 +225 11 model.embedding_dim 1.0 +225 11 optimizer.lr 0.011073082033521316 +225 11 training.batch_size 2.0 +225 11 training.label_smoothing 0.1882234864805162 +225 12 model.embedding_dim 2.0 +225 12 optimizer.lr 0.021047066730502773 +225 12 training.batch_size 2.0 +225 12 training.label_smoothing 0.022961109321781806 +225 1 dataset """fb15k237""" +225 1 model """ermlp""" +225 1 loss """softplus""" +225 1 regularizer """no""" +225 1 optimizer """adam""" +225 1 training_loop """lcwa""" +225 1 evaluator """rankbased""" +225 2 dataset """fb15k237""" +225 2 model """ermlp""" +225 2 loss """softplus""" +225 2 regularizer """no""" +225 2 optimizer """adam""" +225 2 training_loop """lcwa""" +225 2 evaluator """rankbased""" +225 3 dataset """fb15k237""" +225 3 model """ermlp""" +225 3 loss """softplus""" +225 3 regularizer """no""" +225 3 optimizer """adam""" +225 3 training_loop """lcwa""" +225 3 evaluator """rankbased""" +225 4 dataset """fb15k237""" +225 4 model """ermlp""" +225 4 loss """softplus""" +225 4 regularizer """no""" +225 4 optimizer """adam""" +225 4 training_loop """lcwa""" +225 4 evaluator """rankbased""" +225 5 dataset """fb15k237""" +225 5 model """ermlp""" +225 5 loss """softplus""" +225 5 regularizer """no""" +225 5 optimizer """adam""" +225 5 training_loop """lcwa""" +225 5 evaluator """rankbased""" +225 6 dataset """fb15k237""" +225 6 model """ermlp""" +225 6 loss """softplus""" +225 6 regularizer """no""" +225 6 optimizer """adam""" +225 6 training_loop """lcwa""" +225 6 evaluator """rankbased""" +225 7 dataset """fb15k237""" +225 7 model """ermlp""" +225 7 loss """softplus""" +225 7 regularizer """no""" +225 7 optimizer """adam""" +225 7 training_loop """lcwa""" +225 7 evaluator """rankbased""" +225 8 dataset """fb15k237""" +225 8 model """ermlp""" +225 8 loss """softplus""" +225 8 regularizer """no""" +225 8 optimizer """adam""" +225 8 training_loop """lcwa""" +225 8 evaluator """rankbased""" +225 9 dataset """fb15k237""" +225 9 model """ermlp""" +225 9 loss """softplus""" +225 9 regularizer """no""" +225 9 optimizer """adam""" +225 9 training_loop """lcwa""" +225 9 evaluator """rankbased""" +225 10 dataset """fb15k237""" +225 10 model """ermlp""" +225 10 loss """softplus""" +225 10 regularizer """no""" +225 10 optimizer """adam""" +225 10 training_loop """lcwa""" +225 10 evaluator """rankbased""" +225 11 dataset """fb15k237""" +225 11 model """ermlp""" +225 11 loss """softplus""" +225 11 regularizer """no""" +225 11 optimizer """adam""" +225 11 training_loop """lcwa""" +225 11 evaluator """rankbased""" +225 12 dataset """fb15k237""" +225 12 model """ermlp""" +225 12 loss """softplus""" +225 12 regularizer """no""" +225 12 optimizer """adam""" +225 12 training_loop """lcwa""" +225 12 evaluator """rankbased""" +226 1 model.embedding_dim 1.0 +226 1 optimizer.lr 0.0976064612935316 +226 1 training.batch_size 2.0 +226 1 training.label_smoothing 0.16112319811929207 +226 2 model.embedding_dim 1.0 +226 2 optimizer.lr 0.0016048444217377658 +226 2 training.batch_size 0.0 +226 2 training.label_smoothing 0.015669806243614217 +226 3 model.embedding_dim 1.0 +226 3 optimizer.lr 0.00718643171751819 +226 3 training.batch_size 0.0 +226 3 training.label_smoothing 0.12418827272415425 +226 4 model.embedding_dim 1.0 +226 4 optimizer.lr 0.005593348868725122 +226 4 training.batch_size 2.0 +226 4 training.label_smoothing 0.06336196883518141 +226 5 model.embedding_dim 1.0 +226 5 optimizer.lr 0.06617665352781442 +226 5 training.batch_size 0.0 +226 5 training.label_smoothing 0.40620840056653246 +226 6 model.embedding_dim 2.0 +226 6 optimizer.lr 0.04017376461717002 +226 6 training.batch_size 2.0 +226 6 training.label_smoothing 0.025480088633469563 +226 7 model.embedding_dim 2.0 +226 7 optimizer.lr 0.005642083120193485 +226 7 training.batch_size 2.0 +226 7 training.label_smoothing 0.01786162445047763 +226 8 model.embedding_dim 2.0 +226 8 optimizer.lr 0.0017752404657143239 +226 8 training.batch_size 1.0 +226 8 training.label_smoothing 0.005194328710220893 +226 9 model.embedding_dim 1.0 +226 9 optimizer.lr 0.027555543707763856 +226 9 training.batch_size 2.0 +226 9 training.label_smoothing 0.028937295374342663 +226 10 model.embedding_dim 2.0 +226 10 optimizer.lr 0.001386100493376949 +226 10 training.batch_size 1.0 +226 10 training.label_smoothing 0.009533954971180699 +226 11 model.embedding_dim 2.0 +226 11 optimizer.lr 0.04845708496520674 +226 11 training.batch_size 0.0 +226 11 training.label_smoothing 0.003325226693372565 +226 12 model.embedding_dim 2.0 +226 12 optimizer.lr 0.03309601901209184 +226 12 training.batch_size 2.0 +226 12 training.label_smoothing 0.016330262161788168 +226 13 model.embedding_dim 0.0 +226 13 optimizer.lr 0.011556450930882556 +226 13 training.batch_size 0.0 +226 13 training.label_smoothing 0.09552294833886119 +226 14 model.embedding_dim 0.0 +226 14 optimizer.lr 0.005188601460096706 +226 14 training.batch_size 1.0 +226 14 training.label_smoothing 0.1478453133996546 +226 15 model.embedding_dim 1.0 +226 15 optimizer.lr 0.0010107016776132042 +226 15 training.batch_size 1.0 +226 15 training.label_smoothing 0.0012355161010116145 +226 16 model.embedding_dim 1.0 +226 16 optimizer.lr 0.003111280679487922 +226 16 training.batch_size 2.0 +226 16 training.label_smoothing 0.0390768186734151 +226 17 model.embedding_dim 2.0 +226 17 optimizer.lr 0.0032122147102647707 +226 17 training.batch_size 2.0 +226 17 training.label_smoothing 0.019891514549545134 +226 18 model.embedding_dim 2.0 +226 18 optimizer.lr 0.00490482011104472 +226 18 training.batch_size 2.0 +226 18 training.label_smoothing 0.3105946692612293 +226 19 model.embedding_dim 0.0 +226 19 optimizer.lr 0.006534881452881157 +226 19 training.batch_size 2.0 +226 19 training.label_smoothing 0.14244533197637713 +226 20 model.embedding_dim 0.0 +226 20 optimizer.lr 0.013861287662824903 +226 20 training.batch_size 0.0 +226 20 training.label_smoothing 0.01557023901994163 +226 21 model.embedding_dim 0.0 +226 21 optimizer.lr 0.01238731788052104 +226 21 training.batch_size 0.0 +226 21 training.label_smoothing 0.23064001997275907 +226 22 model.embedding_dim 2.0 +226 22 optimizer.lr 0.0036440828002854693 +226 22 training.batch_size 0.0 +226 22 training.label_smoothing 0.04797403723662691 +226 23 model.embedding_dim 1.0 +226 23 optimizer.lr 0.004185196800911714 +226 23 training.batch_size 2.0 +226 23 training.label_smoothing 0.005526154218515118 +226 24 model.embedding_dim 1.0 +226 24 optimizer.lr 0.003456077345016451 +226 24 training.batch_size 2.0 +226 24 training.label_smoothing 0.29331261297748973 +226 25 model.embedding_dim 1.0 +226 25 optimizer.lr 0.004800619475437899 +226 25 training.batch_size 1.0 +226 25 training.label_smoothing 0.04967056972422685 +226 1 dataset """fb15k237""" +226 1 model """ermlp""" +226 1 loss """bceaftersigmoid""" +226 1 regularizer """no""" +226 1 optimizer """adam""" +226 1 training_loop """lcwa""" +226 1 evaluator """rankbased""" +226 2 dataset """fb15k237""" +226 2 model """ermlp""" +226 2 loss """bceaftersigmoid""" +226 2 regularizer """no""" +226 2 optimizer """adam""" +226 2 training_loop """lcwa""" +226 2 evaluator """rankbased""" +226 3 dataset """fb15k237""" +226 3 model """ermlp""" +226 3 loss """bceaftersigmoid""" +226 3 regularizer """no""" +226 3 optimizer """adam""" +226 3 training_loop """lcwa""" +226 3 evaluator """rankbased""" +226 4 dataset """fb15k237""" +226 4 model """ermlp""" +226 4 loss """bceaftersigmoid""" +226 4 regularizer """no""" +226 4 optimizer """adam""" +226 4 training_loop """lcwa""" +226 4 evaluator """rankbased""" +226 5 dataset """fb15k237""" +226 5 model """ermlp""" +226 5 loss """bceaftersigmoid""" +226 5 regularizer """no""" +226 5 optimizer """adam""" +226 5 training_loop """lcwa""" +226 5 evaluator """rankbased""" +226 6 dataset """fb15k237""" +226 6 model """ermlp""" +226 6 loss """bceaftersigmoid""" +226 6 regularizer """no""" +226 6 optimizer """adam""" +226 6 training_loop """lcwa""" +226 6 evaluator """rankbased""" +226 7 dataset """fb15k237""" +226 7 model """ermlp""" +226 7 loss """bceaftersigmoid""" +226 7 regularizer """no""" +226 7 optimizer """adam""" +226 7 training_loop """lcwa""" +226 7 evaluator """rankbased""" +226 8 dataset """fb15k237""" +226 8 model """ermlp""" +226 8 loss """bceaftersigmoid""" +226 8 regularizer """no""" +226 8 optimizer """adam""" +226 8 training_loop """lcwa""" +226 8 evaluator """rankbased""" +226 9 dataset """fb15k237""" +226 9 model """ermlp""" +226 9 loss """bceaftersigmoid""" +226 9 regularizer """no""" +226 9 optimizer """adam""" +226 9 training_loop """lcwa""" +226 9 evaluator """rankbased""" +226 10 dataset """fb15k237""" +226 10 model """ermlp""" +226 10 loss """bceaftersigmoid""" +226 10 regularizer """no""" +226 10 optimizer """adam""" +226 10 training_loop """lcwa""" +226 10 evaluator """rankbased""" +226 11 dataset """fb15k237""" +226 11 model """ermlp""" +226 11 loss """bceaftersigmoid""" +226 11 regularizer """no""" +226 11 optimizer """adam""" +226 11 training_loop """lcwa""" +226 11 evaluator """rankbased""" +226 12 dataset """fb15k237""" +226 12 model """ermlp""" +226 12 loss """bceaftersigmoid""" +226 12 regularizer """no""" +226 12 optimizer """adam""" +226 12 training_loop """lcwa""" +226 12 evaluator """rankbased""" +226 13 dataset """fb15k237""" +226 13 model """ermlp""" +226 13 loss """bceaftersigmoid""" +226 13 regularizer """no""" +226 13 optimizer """adam""" +226 13 training_loop """lcwa""" +226 13 evaluator """rankbased""" +226 14 dataset """fb15k237""" +226 14 model """ermlp""" +226 14 loss """bceaftersigmoid""" +226 14 regularizer """no""" +226 14 optimizer """adam""" +226 14 training_loop """lcwa""" +226 14 evaluator """rankbased""" +226 15 dataset """fb15k237""" +226 15 model """ermlp""" +226 15 loss """bceaftersigmoid""" +226 15 regularizer """no""" +226 15 optimizer """adam""" +226 15 training_loop """lcwa""" +226 15 evaluator """rankbased""" +226 16 dataset """fb15k237""" +226 16 model """ermlp""" +226 16 loss """bceaftersigmoid""" +226 16 regularizer """no""" +226 16 optimizer """adam""" +226 16 training_loop """lcwa""" +226 16 evaluator """rankbased""" +226 17 dataset """fb15k237""" +226 17 model """ermlp""" +226 17 loss """bceaftersigmoid""" +226 17 regularizer """no""" +226 17 optimizer """adam""" +226 17 training_loop """lcwa""" +226 17 evaluator """rankbased""" +226 18 dataset """fb15k237""" +226 18 model """ermlp""" +226 18 loss """bceaftersigmoid""" +226 18 regularizer """no""" +226 18 optimizer """adam""" +226 18 training_loop """lcwa""" +226 18 evaluator """rankbased""" +226 19 dataset """fb15k237""" +226 19 model """ermlp""" +226 19 loss """bceaftersigmoid""" +226 19 regularizer """no""" +226 19 optimizer """adam""" +226 19 training_loop """lcwa""" +226 19 evaluator """rankbased""" +226 20 dataset """fb15k237""" +226 20 model """ermlp""" +226 20 loss """bceaftersigmoid""" +226 20 regularizer """no""" +226 20 optimizer """adam""" +226 20 training_loop """lcwa""" +226 20 evaluator """rankbased""" +226 21 dataset """fb15k237""" +226 21 model """ermlp""" +226 21 loss """bceaftersigmoid""" +226 21 regularizer """no""" +226 21 optimizer """adam""" +226 21 training_loop """lcwa""" +226 21 evaluator """rankbased""" +226 22 dataset """fb15k237""" +226 22 model """ermlp""" +226 22 loss """bceaftersigmoid""" +226 22 regularizer """no""" +226 22 optimizer """adam""" +226 22 training_loop """lcwa""" +226 22 evaluator """rankbased""" +226 23 dataset """fb15k237""" +226 23 model """ermlp""" +226 23 loss """bceaftersigmoid""" +226 23 regularizer """no""" +226 23 optimizer """adam""" +226 23 training_loop """lcwa""" +226 23 evaluator """rankbased""" +226 24 dataset """fb15k237""" +226 24 model """ermlp""" +226 24 loss """bceaftersigmoid""" +226 24 regularizer """no""" +226 24 optimizer """adam""" +226 24 training_loop """lcwa""" +226 24 evaluator """rankbased""" +226 25 dataset """fb15k237""" +226 25 model """ermlp""" +226 25 loss """bceaftersigmoid""" +226 25 regularizer """no""" +226 25 optimizer """adam""" +226 25 training_loop """lcwa""" +226 25 evaluator """rankbased""" +227 1 model.embedding_dim 1.0 +227 1 optimizer.lr 0.059338255738564216 +227 1 training.batch_size 1.0 +227 1 training.label_smoothing 0.09233438840719552 +227 2 model.embedding_dim 2.0 +227 2 optimizer.lr 0.004542504961151167 +227 2 training.batch_size 1.0 +227 2 training.label_smoothing 0.04519959746903438 +227 3 model.embedding_dim 2.0 +227 3 optimizer.lr 0.004885918550628323 +227 3 training.batch_size 1.0 +227 3 training.label_smoothing 0.0012455113215281688 +227 4 model.embedding_dim 1.0 +227 4 optimizer.lr 0.0013817207893871395 +227 4 training.batch_size 2.0 +227 4 training.label_smoothing 0.0411813455678451 +227 5 model.embedding_dim 1.0 +227 5 optimizer.lr 0.004154429395348693 +227 5 training.batch_size 0.0 +227 5 training.label_smoothing 0.3091832183719745 +227 6 model.embedding_dim 0.0 +227 6 optimizer.lr 0.008203527873356445 +227 6 training.batch_size 0.0 +227 6 training.label_smoothing 0.06987745449833874 +227 7 model.embedding_dim 2.0 +227 7 optimizer.lr 0.025035197073362374 +227 7 training.batch_size 2.0 +227 7 training.label_smoothing 0.0802871069930542 +227 8 model.embedding_dim 2.0 +227 8 optimizer.lr 0.07181632799849455 +227 8 training.batch_size 0.0 +227 8 training.label_smoothing 0.014009546154350065 +227 9 model.embedding_dim 1.0 +227 9 optimizer.lr 0.013446283872691464 +227 9 training.batch_size 0.0 +227 9 training.label_smoothing 0.1684444124049066 +227 10 model.embedding_dim 2.0 +227 10 optimizer.lr 0.0014246748383480725 +227 10 training.batch_size 0.0 +227 10 training.label_smoothing 0.002354557568757103 +227 11 model.embedding_dim 0.0 +227 11 optimizer.lr 0.007377552706944723 +227 11 training.batch_size 2.0 +227 11 training.label_smoothing 0.035498323316323924 +227 12 model.embedding_dim 0.0 +227 12 optimizer.lr 0.08554705721661089 +227 12 training.batch_size 1.0 +227 12 training.label_smoothing 0.06832424410771719 +227 13 model.embedding_dim 0.0 +227 13 optimizer.lr 0.0016720733375777577 +227 13 training.batch_size 1.0 +227 13 training.label_smoothing 0.007261247371907453 +227 14 model.embedding_dim 1.0 +227 14 optimizer.lr 0.0010016420741349632 +227 14 training.batch_size 0.0 +227 14 training.label_smoothing 0.0015352159444445848 +227 15 model.embedding_dim 1.0 +227 15 optimizer.lr 0.007864130063999558 +227 15 training.batch_size 2.0 +227 15 training.label_smoothing 0.0013584003329923555 +227 16 model.embedding_dim 0.0 +227 16 optimizer.lr 0.07853229067836327 +227 16 training.batch_size 0.0 +227 16 training.label_smoothing 0.001183024494386525 +227 17 model.embedding_dim 1.0 +227 17 optimizer.lr 0.0033580677691638187 +227 17 training.batch_size 0.0 +227 17 training.label_smoothing 0.004024850660777091 +227 18 model.embedding_dim 0.0 +227 18 optimizer.lr 0.008952998769820695 +227 18 training.batch_size 2.0 +227 18 training.label_smoothing 0.0746863932244629 +227 19 model.embedding_dim 2.0 +227 19 optimizer.lr 0.0038583586924355825 +227 19 training.batch_size 1.0 +227 19 training.label_smoothing 0.21325303434517998 +227 20 model.embedding_dim 0.0 +227 20 optimizer.lr 0.005832281509836958 +227 20 training.batch_size 0.0 +227 20 training.label_smoothing 0.0011234353936022045 +227 21 model.embedding_dim 0.0 +227 21 optimizer.lr 0.009250726383164778 +227 21 training.batch_size 2.0 +227 21 training.label_smoothing 0.004774711853064392 +227 22 model.embedding_dim 0.0 +227 22 optimizer.lr 0.006085073413606896 +227 22 training.batch_size 1.0 +227 22 training.label_smoothing 0.0374545107294729 +227 23 model.embedding_dim 1.0 +227 23 optimizer.lr 0.038694354737123376 +227 23 training.batch_size 0.0 +227 23 training.label_smoothing 0.11172878992998411 +227 1 dataset """fb15k237""" +227 1 model """ermlp""" +227 1 loss """softplus""" +227 1 regularizer """no""" +227 1 optimizer """adam""" +227 1 training_loop """lcwa""" +227 1 evaluator """rankbased""" +227 2 dataset """fb15k237""" +227 2 model """ermlp""" +227 2 loss """softplus""" +227 2 regularizer """no""" +227 2 optimizer """adam""" +227 2 training_loop """lcwa""" +227 2 evaluator """rankbased""" +227 3 dataset """fb15k237""" +227 3 model """ermlp""" +227 3 loss """softplus""" +227 3 regularizer """no""" +227 3 optimizer """adam""" +227 3 training_loop """lcwa""" +227 3 evaluator """rankbased""" +227 4 dataset """fb15k237""" +227 4 model """ermlp""" +227 4 loss """softplus""" +227 4 regularizer """no""" +227 4 optimizer """adam""" +227 4 training_loop """lcwa""" +227 4 evaluator """rankbased""" +227 5 dataset """fb15k237""" +227 5 model """ermlp""" +227 5 loss """softplus""" +227 5 regularizer """no""" +227 5 optimizer """adam""" +227 5 training_loop """lcwa""" +227 5 evaluator """rankbased""" +227 6 dataset """fb15k237""" +227 6 model """ermlp""" +227 6 loss """softplus""" +227 6 regularizer """no""" +227 6 optimizer """adam""" +227 6 training_loop """lcwa""" +227 6 evaluator """rankbased""" +227 7 dataset """fb15k237""" +227 7 model """ermlp""" +227 7 loss """softplus""" +227 7 regularizer """no""" +227 7 optimizer """adam""" +227 7 training_loop """lcwa""" +227 7 evaluator """rankbased""" +227 8 dataset """fb15k237""" +227 8 model """ermlp""" +227 8 loss """softplus""" +227 8 regularizer """no""" +227 8 optimizer """adam""" +227 8 training_loop """lcwa""" +227 8 evaluator """rankbased""" +227 9 dataset """fb15k237""" +227 9 model """ermlp""" +227 9 loss """softplus""" +227 9 regularizer """no""" +227 9 optimizer """adam""" +227 9 training_loop """lcwa""" +227 9 evaluator """rankbased""" +227 10 dataset """fb15k237""" +227 10 model """ermlp""" +227 10 loss """softplus""" +227 10 regularizer """no""" +227 10 optimizer """adam""" +227 10 training_loop """lcwa""" +227 10 evaluator """rankbased""" +227 11 dataset """fb15k237""" +227 11 model """ermlp""" +227 11 loss """softplus""" +227 11 regularizer """no""" +227 11 optimizer """adam""" +227 11 training_loop """lcwa""" +227 11 evaluator """rankbased""" +227 12 dataset """fb15k237""" +227 12 model """ermlp""" +227 12 loss """softplus""" +227 12 regularizer """no""" +227 12 optimizer """adam""" +227 12 training_loop """lcwa""" +227 12 evaluator """rankbased""" +227 13 dataset """fb15k237""" +227 13 model """ermlp""" +227 13 loss """softplus""" +227 13 regularizer """no""" +227 13 optimizer """adam""" +227 13 training_loop """lcwa""" +227 13 evaluator """rankbased""" +227 14 dataset """fb15k237""" +227 14 model """ermlp""" +227 14 loss """softplus""" +227 14 regularizer """no""" +227 14 optimizer """adam""" +227 14 training_loop """lcwa""" +227 14 evaluator """rankbased""" +227 15 dataset """fb15k237""" +227 15 model """ermlp""" +227 15 loss """softplus""" +227 15 regularizer """no""" +227 15 optimizer """adam""" +227 15 training_loop """lcwa""" +227 15 evaluator """rankbased""" +227 16 dataset """fb15k237""" +227 16 model """ermlp""" +227 16 loss """softplus""" +227 16 regularizer """no""" +227 16 optimizer """adam""" +227 16 training_loop """lcwa""" +227 16 evaluator """rankbased""" +227 17 dataset """fb15k237""" +227 17 model """ermlp""" +227 17 loss """softplus""" +227 17 regularizer """no""" +227 17 optimizer """adam""" +227 17 training_loop """lcwa""" +227 17 evaluator """rankbased""" +227 18 dataset """fb15k237""" +227 18 model """ermlp""" +227 18 loss """softplus""" +227 18 regularizer """no""" +227 18 optimizer """adam""" +227 18 training_loop """lcwa""" +227 18 evaluator """rankbased""" +227 19 dataset """fb15k237""" +227 19 model """ermlp""" +227 19 loss """softplus""" +227 19 regularizer """no""" +227 19 optimizer """adam""" +227 19 training_loop """lcwa""" +227 19 evaluator """rankbased""" +227 20 dataset """fb15k237""" +227 20 model """ermlp""" +227 20 loss """softplus""" +227 20 regularizer """no""" +227 20 optimizer """adam""" +227 20 training_loop """lcwa""" +227 20 evaluator """rankbased""" +227 21 dataset """fb15k237""" +227 21 model """ermlp""" +227 21 loss """softplus""" +227 21 regularizer """no""" +227 21 optimizer """adam""" +227 21 training_loop """lcwa""" +227 21 evaluator """rankbased""" +227 22 dataset """fb15k237""" +227 22 model """ermlp""" +227 22 loss """softplus""" +227 22 regularizer """no""" +227 22 optimizer """adam""" +227 22 training_loop """lcwa""" +227 22 evaluator """rankbased""" +227 23 dataset """fb15k237""" +227 23 model """ermlp""" +227 23 loss """softplus""" +227 23 regularizer """no""" +227 23 optimizer """adam""" +227 23 training_loop """lcwa""" +227 23 evaluator """rankbased""" +228 1 model.embedding_dim 1.0 +228 1 optimizer.lr 0.010332730192559035 +228 1 training.batch_size 2.0 +228 1 training.label_smoothing 0.1356468474675966 +228 2 model.embedding_dim 1.0 +228 2 optimizer.lr 0.03966757731540983 +228 2 training.batch_size 2.0 +228 2 training.label_smoothing 0.007764370719833773 +228 3 model.embedding_dim 2.0 +228 3 optimizer.lr 0.07096731280548199 +228 3 training.batch_size 2.0 +228 3 training.label_smoothing 0.07518850779270904 +228 4 model.embedding_dim 2.0 +228 4 optimizer.lr 0.005752291182348232 +228 4 training.batch_size 2.0 +228 4 training.label_smoothing 0.0038104414192328064 +228 5 model.embedding_dim 1.0 +228 5 optimizer.lr 0.010597116422413299 +228 5 training.batch_size 2.0 +228 5 training.label_smoothing 0.3082926247874306 +228 6 model.embedding_dim 1.0 +228 6 optimizer.lr 0.013048585006056379 +228 6 training.batch_size 0.0 +228 6 training.label_smoothing 0.5570312285101187 +228 7 model.embedding_dim 2.0 +228 7 optimizer.lr 0.00277004920206998 +228 7 training.batch_size 2.0 +228 7 training.label_smoothing 0.051211766967070055 +228 8 model.embedding_dim 2.0 +228 8 optimizer.lr 0.012595916483688614 +228 8 training.batch_size 2.0 +228 8 training.label_smoothing 0.0018887664058603371 +228 1 dataset """fb15k237""" +228 1 model """ermlp""" +228 1 loss """crossentropy""" +228 1 regularizer """no""" +228 1 optimizer """adam""" +228 1 training_loop """lcwa""" +228 1 evaluator """rankbased""" +228 2 dataset """fb15k237""" +228 2 model """ermlp""" +228 2 loss """crossentropy""" +228 2 regularizer """no""" +228 2 optimizer """adam""" +228 2 training_loop """lcwa""" +228 2 evaluator """rankbased""" +228 3 dataset """fb15k237""" +228 3 model """ermlp""" +228 3 loss """crossentropy""" +228 3 regularizer """no""" +228 3 optimizer """adam""" +228 3 training_loop """lcwa""" +228 3 evaluator """rankbased""" +228 4 dataset """fb15k237""" +228 4 model """ermlp""" +228 4 loss """crossentropy""" +228 4 regularizer """no""" +228 4 optimizer """adam""" +228 4 training_loop """lcwa""" +228 4 evaluator """rankbased""" +228 5 dataset """fb15k237""" +228 5 model """ermlp""" +228 5 loss """crossentropy""" +228 5 regularizer """no""" +228 5 optimizer """adam""" +228 5 training_loop """lcwa""" +228 5 evaluator """rankbased""" +228 6 dataset """fb15k237""" +228 6 model """ermlp""" +228 6 loss """crossentropy""" +228 6 regularizer """no""" +228 6 optimizer """adam""" +228 6 training_loop """lcwa""" +228 6 evaluator """rankbased""" +228 7 dataset """fb15k237""" +228 7 model """ermlp""" +228 7 loss """crossentropy""" +228 7 regularizer """no""" +228 7 optimizer """adam""" +228 7 training_loop """lcwa""" +228 7 evaluator """rankbased""" +228 8 dataset """fb15k237""" +228 8 model """ermlp""" +228 8 loss """crossentropy""" +228 8 regularizer """no""" +228 8 optimizer """adam""" +228 8 training_loop """lcwa""" +228 8 evaluator """rankbased""" +229 1 model.embedding_dim 0.0 +229 1 optimizer.lr 0.03235935243440166 +229 1 training.batch_size 2.0 +229 1 training.label_smoothing 0.07014076734356052 +229 2 model.embedding_dim 0.0 +229 2 optimizer.lr 0.0018849425390709672 +229 2 training.batch_size 1.0 +229 2 training.label_smoothing 0.0021440031393075622 +229 3 model.embedding_dim 0.0 +229 3 optimizer.lr 0.0015496124161473164 +229 3 training.batch_size 2.0 +229 3 training.label_smoothing 0.9766370213176961 +229 4 model.embedding_dim 1.0 +229 4 optimizer.lr 0.022316724199328834 +229 4 training.batch_size 1.0 +229 4 training.label_smoothing 0.033800560951493995 +229 5 model.embedding_dim 1.0 +229 5 optimizer.lr 0.0017594782883148692 +229 5 training.batch_size 0.0 +229 5 training.label_smoothing 0.01003232284752809 +229 6 model.embedding_dim 0.0 +229 6 optimizer.lr 0.025919373011737433 +229 6 training.batch_size 0.0 +229 6 training.label_smoothing 0.1590028849781691 +229 7 model.embedding_dim 2.0 +229 7 optimizer.lr 0.033956017970134506 +229 7 training.batch_size 2.0 +229 7 training.label_smoothing 0.0036102079503566053 +229 8 model.embedding_dim 0.0 +229 8 optimizer.lr 0.0023124421891525387 +229 8 training.batch_size 2.0 +229 8 training.label_smoothing 0.0021996002415776324 +229 9 model.embedding_dim 2.0 +229 9 optimizer.lr 0.0012868611764538097 +229 9 training.batch_size 0.0 +229 9 training.label_smoothing 0.0010813533703320792 +229 10 model.embedding_dim 0.0 +229 10 optimizer.lr 0.017504711478456222 +229 10 training.batch_size 0.0 +229 10 training.label_smoothing 0.0025682722551781363 +229 11 model.embedding_dim 2.0 +229 11 optimizer.lr 0.0037083840586824074 +229 11 training.batch_size 0.0 +229 11 training.label_smoothing 0.01997021743896656 +229 12 model.embedding_dim 2.0 +229 12 optimizer.lr 0.006683730837883398 +229 12 training.batch_size 2.0 +229 12 training.label_smoothing 0.2052547905304122 +229 13 model.embedding_dim 1.0 +229 13 optimizer.lr 0.007534130262683318 +229 13 training.batch_size 1.0 +229 13 training.label_smoothing 0.017579212596936157 +229 14 model.embedding_dim 1.0 +229 14 optimizer.lr 0.0016169040367214023 +229 14 training.batch_size 0.0 +229 14 training.label_smoothing 0.0015349552224941001 +229 1 dataset """fb15k237""" +229 1 model """ermlp""" +229 1 loss """crossentropy""" +229 1 regularizer """no""" +229 1 optimizer """adam""" +229 1 training_loop """lcwa""" +229 1 evaluator """rankbased""" +229 2 dataset """fb15k237""" +229 2 model """ermlp""" +229 2 loss """crossentropy""" +229 2 regularizer """no""" +229 2 optimizer """adam""" +229 2 training_loop """lcwa""" +229 2 evaluator """rankbased""" +229 3 dataset """fb15k237""" +229 3 model """ermlp""" +229 3 loss """crossentropy""" +229 3 regularizer """no""" +229 3 optimizer """adam""" +229 3 training_loop """lcwa""" +229 3 evaluator """rankbased""" +229 4 dataset """fb15k237""" +229 4 model """ermlp""" +229 4 loss """crossentropy""" +229 4 regularizer """no""" +229 4 optimizer """adam""" +229 4 training_loop """lcwa""" +229 4 evaluator """rankbased""" +229 5 dataset """fb15k237""" +229 5 model """ermlp""" +229 5 loss """crossentropy""" +229 5 regularizer """no""" +229 5 optimizer """adam""" +229 5 training_loop """lcwa""" +229 5 evaluator """rankbased""" +229 6 dataset """fb15k237""" +229 6 model """ermlp""" +229 6 loss """crossentropy""" +229 6 regularizer """no""" +229 6 optimizer """adam""" +229 6 training_loop """lcwa""" +229 6 evaluator """rankbased""" +229 7 dataset """fb15k237""" +229 7 model """ermlp""" +229 7 loss """crossentropy""" +229 7 regularizer """no""" +229 7 optimizer """adam""" +229 7 training_loop """lcwa""" +229 7 evaluator """rankbased""" +229 8 dataset """fb15k237""" +229 8 model """ermlp""" +229 8 loss """crossentropy""" +229 8 regularizer """no""" +229 8 optimizer """adam""" +229 8 training_loop """lcwa""" +229 8 evaluator """rankbased""" +229 9 dataset """fb15k237""" +229 9 model """ermlp""" +229 9 loss """crossentropy""" +229 9 regularizer """no""" +229 9 optimizer """adam""" +229 9 training_loop """lcwa""" +229 9 evaluator """rankbased""" +229 10 dataset """fb15k237""" +229 10 model """ermlp""" +229 10 loss """crossentropy""" +229 10 regularizer """no""" +229 10 optimizer """adam""" +229 10 training_loop """lcwa""" +229 10 evaluator """rankbased""" +229 11 dataset """fb15k237""" +229 11 model """ermlp""" +229 11 loss """crossentropy""" +229 11 regularizer """no""" +229 11 optimizer """adam""" +229 11 training_loop """lcwa""" +229 11 evaluator """rankbased""" +229 12 dataset """fb15k237""" +229 12 model """ermlp""" +229 12 loss """crossentropy""" +229 12 regularizer """no""" +229 12 optimizer """adam""" +229 12 training_loop """lcwa""" +229 12 evaluator """rankbased""" +229 13 dataset """fb15k237""" +229 13 model """ermlp""" +229 13 loss """crossentropy""" +229 13 regularizer """no""" +229 13 optimizer """adam""" +229 13 training_loop """lcwa""" +229 13 evaluator """rankbased""" +229 14 dataset """fb15k237""" +229 14 model """ermlp""" +229 14 loss """crossentropy""" +229 14 regularizer """no""" +229 14 optimizer """adam""" +229 14 training_loop """lcwa""" +229 14 evaluator """rankbased""" +230 1 model.embedding_dim 2.0 +230 1 loss.margin 26.609976696386962 +230 1 loss.adversarial_temperature 0.9011723622162522 +230 1 optimizer.lr 0.023360786092495346 +230 1 negative_sampler.num_negs_per_pos 14.0 +230 1 training.batch_size 2.0 +230 2 model.embedding_dim 0.0 +230 2 loss.margin 28.62778979893926 +230 2 loss.adversarial_temperature 0.24778696657053187 +230 2 optimizer.lr 0.023550115820216194 +230 2 negative_sampler.num_negs_per_pos 58.0 +230 2 training.batch_size 2.0 +230 3 model.embedding_dim 1.0 +230 3 loss.margin 4.113743106592478 +230 3 loss.adversarial_temperature 0.6602888228012651 +230 3 optimizer.lr 0.009962962126442022 +230 3 negative_sampler.num_negs_per_pos 96.0 +230 3 training.batch_size 2.0 +230 4 model.embedding_dim 1.0 +230 4 loss.margin 17.669059201576545 +230 4 loss.adversarial_temperature 0.4136917683560515 +230 4 optimizer.lr 0.0034508599070099963 +230 4 negative_sampler.num_negs_per_pos 12.0 +230 4 training.batch_size 2.0 +230 5 model.embedding_dim 1.0 +230 5 loss.margin 9.89567395711803 +230 5 loss.adversarial_temperature 0.9643461766291537 +230 5 optimizer.lr 0.006188403437924383 +230 5 negative_sampler.num_negs_per_pos 80.0 +230 5 training.batch_size 2.0 +230 6 model.embedding_dim 0.0 +230 6 loss.margin 2.0606355654490547 +230 6 loss.adversarial_temperature 0.775645995035568 +230 6 optimizer.lr 0.0017401780927702023 +230 6 negative_sampler.num_negs_per_pos 17.0 +230 6 training.batch_size 0.0 +230 7 model.embedding_dim 1.0 +230 7 loss.margin 1.8780811958051848 +230 7 loss.adversarial_temperature 0.9210956082362108 +230 7 optimizer.lr 0.03550129090912202 +230 7 negative_sampler.num_negs_per_pos 10.0 +230 7 training.batch_size 2.0 +230 8 model.embedding_dim 2.0 +230 8 loss.margin 23.708149458412322 +230 8 loss.adversarial_temperature 0.23030394976607363 +230 8 optimizer.lr 0.05718013080739682 +230 8 negative_sampler.num_negs_per_pos 53.0 +230 8 training.batch_size 1.0 +230 9 model.embedding_dim 1.0 +230 9 loss.margin 3.248574900711761 +230 9 loss.adversarial_temperature 0.7152542073862007 +230 9 optimizer.lr 0.024822086694017864 +230 9 negative_sampler.num_negs_per_pos 99.0 +230 9 training.batch_size 2.0 +230 10 model.embedding_dim 0.0 +230 10 loss.margin 20.578745421259082 +230 10 loss.adversarial_temperature 0.41494931755936537 +230 10 optimizer.lr 0.017364410585382913 +230 10 negative_sampler.num_negs_per_pos 42.0 +230 10 training.batch_size 2.0 +230 11 model.embedding_dim 0.0 +230 11 loss.margin 19.874654885933378 +230 11 loss.adversarial_temperature 0.7827417315654186 +230 11 optimizer.lr 0.005549312544175211 +230 11 negative_sampler.num_negs_per_pos 20.0 +230 11 training.batch_size 1.0 +230 12 model.embedding_dim 2.0 +230 12 loss.margin 17.350777130439518 +230 12 loss.adversarial_temperature 0.6844526919502325 +230 12 optimizer.lr 0.00276756380228713 +230 12 negative_sampler.num_negs_per_pos 68.0 +230 12 training.batch_size 0.0 +230 13 model.embedding_dim 2.0 +230 13 loss.margin 4.911738789567553 +230 13 loss.adversarial_temperature 0.3844432593653617 +230 13 optimizer.lr 0.010440629883702273 +230 13 negative_sampler.num_negs_per_pos 22.0 +230 13 training.batch_size 0.0 +230 14 model.embedding_dim 1.0 +230 14 loss.margin 2.4605038448651215 +230 14 loss.adversarial_temperature 0.6008319738887583 +230 14 optimizer.lr 0.0022641907204999597 +230 14 negative_sampler.num_negs_per_pos 91.0 +230 14 training.batch_size 2.0 +230 15 model.embedding_dim 0.0 +230 15 loss.margin 28.899242456307807 +230 15 loss.adversarial_temperature 0.36696805690765655 +230 15 optimizer.lr 0.02140091293441921 +230 15 negative_sampler.num_negs_per_pos 33.0 +230 15 training.batch_size 2.0 +230 16 model.embedding_dim 0.0 +230 16 loss.margin 9.521928515976564 +230 16 loss.adversarial_temperature 0.31955007375352756 +230 16 optimizer.lr 0.026545085193331056 +230 16 negative_sampler.num_negs_per_pos 25.0 +230 16 training.batch_size 0.0 +230 17 model.embedding_dim 2.0 +230 17 loss.margin 7.1327730632483775 +230 17 loss.adversarial_temperature 0.3793251338018006 +230 17 optimizer.lr 0.001983001240068502 +230 17 negative_sampler.num_negs_per_pos 86.0 +230 17 training.batch_size 2.0 +230 18 model.embedding_dim 1.0 +230 18 loss.margin 10.948554224937208 +230 18 loss.adversarial_temperature 0.6730072069564703 +230 18 optimizer.lr 0.013327376777949847 +230 18 negative_sampler.num_negs_per_pos 84.0 +230 18 training.batch_size 1.0 +230 19 model.embedding_dim 1.0 +230 19 loss.margin 13.296582163029935 +230 19 loss.adversarial_temperature 0.749751977784671 +230 19 optimizer.lr 0.004926948216854853 +230 19 negative_sampler.num_negs_per_pos 93.0 +230 19 training.batch_size 0.0 +230 20 model.embedding_dim 1.0 +230 20 loss.margin 11.120756165954278 +230 20 loss.adversarial_temperature 0.38550293918815454 +230 20 optimizer.lr 0.00950714558030556 +230 20 negative_sampler.num_negs_per_pos 83.0 +230 20 training.batch_size 1.0 +230 21 model.embedding_dim 1.0 +230 21 loss.margin 1.9428896474080415 +230 21 loss.adversarial_temperature 0.7409110070584429 +230 21 optimizer.lr 0.011354840016186966 +230 21 negative_sampler.num_negs_per_pos 67.0 +230 21 training.batch_size 0.0 +230 22 model.embedding_dim 2.0 +230 22 loss.margin 20.058828704768672 +230 22 loss.adversarial_temperature 0.5122884546577888 +230 22 optimizer.lr 0.04428152501747328 +230 22 negative_sampler.num_negs_per_pos 41.0 +230 22 training.batch_size 0.0 +230 23 model.embedding_dim 1.0 +230 23 loss.margin 9.164495182915084 +230 23 loss.adversarial_temperature 0.6905716918569516 +230 23 optimizer.lr 0.0037965969873899694 +230 23 negative_sampler.num_negs_per_pos 73.0 +230 23 training.batch_size 0.0 +230 24 model.embedding_dim 0.0 +230 24 loss.margin 5.397939260691517 +230 24 loss.adversarial_temperature 0.5323189507096975 +230 24 optimizer.lr 0.050559277972168595 +230 24 negative_sampler.num_negs_per_pos 25.0 +230 24 training.batch_size 1.0 +230 25 model.embedding_dim 1.0 +230 25 loss.margin 16.312026341376015 +230 25 loss.adversarial_temperature 0.7254674855457899 +230 25 optimizer.lr 0.002514989748494771 +230 25 negative_sampler.num_negs_per_pos 38.0 +230 25 training.batch_size 2.0 +230 26 model.embedding_dim 1.0 +230 26 loss.margin 8.936400312155664 +230 26 loss.adversarial_temperature 0.24740190937561005 +230 26 optimizer.lr 0.01089456079909077 +230 26 negative_sampler.num_negs_per_pos 80.0 +230 26 training.batch_size 1.0 +230 27 model.embedding_dim 0.0 +230 27 loss.margin 5.67027095240707 +230 27 loss.adversarial_temperature 0.3180457085427114 +230 27 optimizer.lr 0.015897246692293324 +230 27 negative_sampler.num_negs_per_pos 44.0 +230 27 training.batch_size 2.0 +230 28 model.embedding_dim 1.0 +230 28 loss.margin 11.111779626948334 +230 28 loss.adversarial_temperature 0.5587491446141745 +230 28 optimizer.lr 0.0019125141406289255 +230 28 negative_sampler.num_negs_per_pos 22.0 +230 28 training.batch_size 2.0 +230 29 model.embedding_dim 0.0 +230 29 loss.margin 18.912327707943305 +230 29 loss.adversarial_temperature 0.618517814812564 +230 29 optimizer.lr 0.003903095176777861 +230 29 negative_sampler.num_negs_per_pos 98.0 +230 29 training.batch_size 0.0 +230 30 model.embedding_dim 0.0 +230 30 loss.margin 11.268601228477113 +230 30 loss.adversarial_temperature 0.9564458710247646 +230 30 optimizer.lr 0.0429663752465684 +230 30 negative_sampler.num_negs_per_pos 45.0 +230 30 training.batch_size 0.0 +230 31 model.embedding_dim 2.0 +230 31 loss.margin 29.05545594069841 +230 31 loss.adversarial_temperature 0.8571537854151249 +230 31 optimizer.lr 0.006495978760036524 +230 31 negative_sampler.num_negs_per_pos 33.0 +230 31 training.batch_size 0.0 +230 1 dataset """fb15k237""" +230 1 model """ermlp""" +230 1 loss """nssa""" +230 1 regularizer """no""" +230 1 optimizer """adam""" +230 1 training_loop """owa""" +230 1 negative_sampler """basic""" +230 1 evaluator """rankbased""" +230 2 dataset """fb15k237""" +230 2 model """ermlp""" +230 2 loss """nssa""" +230 2 regularizer """no""" +230 2 optimizer """adam""" +230 2 training_loop """owa""" +230 2 negative_sampler """basic""" +230 2 evaluator """rankbased""" +230 3 dataset """fb15k237""" +230 3 model """ermlp""" +230 3 loss """nssa""" +230 3 regularizer """no""" +230 3 optimizer """adam""" +230 3 training_loop """owa""" +230 3 negative_sampler """basic""" +230 3 evaluator """rankbased""" +230 4 dataset """fb15k237""" +230 4 model """ermlp""" +230 4 loss """nssa""" +230 4 regularizer """no""" +230 4 optimizer """adam""" +230 4 training_loop """owa""" +230 4 negative_sampler """basic""" +230 4 evaluator """rankbased""" +230 5 dataset """fb15k237""" +230 5 model """ermlp""" +230 5 loss """nssa""" +230 5 regularizer """no""" +230 5 optimizer """adam""" +230 5 training_loop """owa""" +230 5 negative_sampler """basic""" +230 5 evaluator """rankbased""" +230 6 dataset """fb15k237""" +230 6 model """ermlp""" +230 6 loss """nssa""" +230 6 regularizer """no""" +230 6 optimizer """adam""" +230 6 training_loop """owa""" +230 6 negative_sampler """basic""" +230 6 evaluator """rankbased""" +230 7 dataset """fb15k237""" +230 7 model """ermlp""" +230 7 loss """nssa""" +230 7 regularizer """no""" +230 7 optimizer """adam""" +230 7 training_loop """owa""" +230 7 negative_sampler """basic""" +230 7 evaluator """rankbased""" +230 8 dataset """fb15k237""" +230 8 model """ermlp""" +230 8 loss """nssa""" +230 8 regularizer """no""" +230 8 optimizer """adam""" +230 8 training_loop """owa""" +230 8 negative_sampler """basic""" +230 8 evaluator """rankbased""" +230 9 dataset """fb15k237""" +230 9 model """ermlp""" +230 9 loss """nssa""" +230 9 regularizer """no""" +230 9 optimizer """adam""" +230 9 training_loop """owa""" +230 9 negative_sampler """basic""" +230 9 evaluator """rankbased""" +230 10 dataset """fb15k237""" +230 10 model """ermlp""" +230 10 loss """nssa""" +230 10 regularizer """no""" +230 10 optimizer """adam""" +230 10 training_loop """owa""" +230 10 negative_sampler """basic""" +230 10 evaluator """rankbased""" +230 11 dataset """fb15k237""" +230 11 model """ermlp""" +230 11 loss """nssa""" +230 11 regularizer """no""" +230 11 optimizer """adam""" +230 11 training_loop """owa""" +230 11 negative_sampler """basic""" +230 11 evaluator """rankbased""" +230 12 dataset """fb15k237""" +230 12 model """ermlp""" +230 12 loss """nssa""" +230 12 regularizer """no""" +230 12 optimizer """adam""" +230 12 training_loop """owa""" +230 12 negative_sampler """basic""" +230 12 evaluator """rankbased""" +230 13 dataset """fb15k237""" +230 13 model """ermlp""" +230 13 loss """nssa""" +230 13 regularizer """no""" +230 13 optimizer """adam""" +230 13 training_loop """owa""" +230 13 negative_sampler """basic""" +230 13 evaluator """rankbased""" +230 14 dataset """fb15k237""" +230 14 model """ermlp""" +230 14 loss """nssa""" +230 14 regularizer """no""" +230 14 optimizer """adam""" +230 14 training_loop """owa""" +230 14 negative_sampler """basic""" +230 14 evaluator """rankbased""" +230 15 dataset """fb15k237""" +230 15 model """ermlp""" +230 15 loss """nssa""" +230 15 regularizer """no""" +230 15 optimizer """adam""" +230 15 training_loop """owa""" +230 15 negative_sampler """basic""" +230 15 evaluator """rankbased""" +230 16 dataset """fb15k237""" +230 16 model """ermlp""" +230 16 loss """nssa""" +230 16 regularizer """no""" +230 16 optimizer """adam""" +230 16 training_loop """owa""" +230 16 negative_sampler """basic""" +230 16 evaluator """rankbased""" +230 17 dataset """fb15k237""" +230 17 model """ermlp""" +230 17 loss """nssa""" +230 17 regularizer """no""" +230 17 optimizer """adam""" +230 17 training_loop """owa""" +230 17 negative_sampler """basic""" +230 17 evaluator """rankbased""" +230 18 dataset """fb15k237""" +230 18 model """ermlp""" +230 18 loss """nssa""" +230 18 regularizer """no""" +230 18 optimizer """adam""" +230 18 training_loop """owa""" +230 18 negative_sampler """basic""" +230 18 evaluator """rankbased""" +230 19 dataset """fb15k237""" +230 19 model """ermlp""" +230 19 loss """nssa""" +230 19 regularizer """no""" +230 19 optimizer """adam""" +230 19 training_loop """owa""" +230 19 negative_sampler """basic""" +230 19 evaluator """rankbased""" +230 20 dataset """fb15k237""" +230 20 model """ermlp""" +230 20 loss """nssa""" +230 20 regularizer """no""" +230 20 optimizer """adam""" +230 20 training_loop """owa""" +230 20 negative_sampler """basic""" +230 20 evaluator """rankbased""" +230 21 dataset """fb15k237""" +230 21 model """ermlp""" +230 21 loss """nssa""" +230 21 regularizer """no""" +230 21 optimizer """adam""" +230 21 training_loop """owa""" +230 21 negative_sampler """basic""" +230 21 evaluator """rankbased""" +230 22 dataset """fb15k237""" +230 22 model """ermlp""" +230 22 loss """nssa""" +230 22 regularizer """no""" +230 22 optimizer """adam""" +230 22 training_loop """owa""" +230 22 negative_sampler """basic""" +230 22 evaluator """rankbased""" +230 23 dataset """fb15k237""" +230 23 model """ermlp""" +230 23 loss """nssa""" +230 23 regularizer """no""" +230 23 optimizer """adam""" +230 23 training_loop """owa""" +230 23 negative_sampler """basic""" +230 23 evaluator """rankbased""" +230 24 dataset """fb15k237""" +230 24 model """ermlp""" +230 24 loss """nssa""" +230 24 regularizer """no""" +230 24 optimizer """adam""" +230 24 training_loop """owa""" +230 24 negative_sampler """basic""" +230 24 evaluator """rankbased""" +230 25 dataset """fb15k237""" +230 25 model """ermlp""" +230 25 loss """nssa""" +230 25 regularizer """no""" +230 25 optimizer """adam""" +230 25 training_loop """owa""" +230 25 negative_sampler """basic""" +230 25 evaluator """rankbased""" +230 26 dataset """fb15k237""" +230 26 model """ermlp""" +230 26 loss """nssa""" +230 26 regularizer """no""" +230 26 optimizer """adam""" +230 26 training_loop """owa""" +230 26 negative_sampler """basic""" +230 26 evaluator """rankbased""" +230 27 dataset """fb15k237""" +230 27 model """ermlp""" +230 27 loss """nssa""" +230 27 regularizer """no""" +230 27 optimizer """adam""" +230 27 training_loop """owa""" +230 27 negative_sampler """basic""" +230 27 evaluator """rankbased""" +230 28 dataset """fb15k237""" +230 28 model """ermlp""" +230 28 loss """nssa""" +230 28 regularizer """no""" +230 28 optimizer """adam""" +230 28 training_loop """owa""" +230 28 negative_sampler """basic""" +230 28 evaluator """rankbased""" +230 29 dataset """fb15k237""" +230 29 model """ermlp""" +230 29 loss """nssa""" +230 29 regularizer """no""" +230 29 optimizer """adam""" +230 29 training_loop """owa""" +230 29 negative_sampler """basic""" +230 29 evaluator """rankbased""" +230 30 dataset """fb15k237""" +230 30 model """ermlp""" +230 30 loss """nssa""" +230 30 regularizer """no""" +230 30 optimizer """adam""" +230 30 training_loop """owa""" +230 30 negative_sampler """basic""" +230 30 evaluator """rankbased""" +230 31 dataset """fb15k237""" +230 31 model """ermlp""" +230 31 loss """nssa""" +230 31 regularizer """no""" +230 31 optimizer """adam""" +230 31 training_loop """owa""" +230 31 negative_sampler """basic""" +230 31 evaluator """rankbased""" +231 1 model.embedding_dim 0.0 +231 1 loss.margin 12.759051569859997 +231 1 loss.adversarial_temperature 0.21623416617973107 +231 1 optimizer.lr 0.023532494228742512 +231 1 negative_sampler.num_negs_per_pos 10.0 +231 1 training.batch_size 1.0 +231 2 model.embedding_dim 1.0 +231 2 loss.margin 5.408371696032818 +231 2 loss.adversarial_temperature 0.472621721332848 +231 2 optimizer.lr 0.032299958113769976 +231 2 negative_sampler.num_negs_per_pos 57.0 +231 2 training.batch_size 1.0 +231 3 model.embedding_dim 2.0 +231 3 loss.margin 20.40700012526506 +231 3 loss.adversarial_temperature 0.7634901140352743 +231 3 optimizer.lr 0.016971526626724718 +231 3 negative_sampler.num_negs_per_pos 13.0 +231 3 training.batch_size 2.0 +231 4 model.embedding_dim 0.0 +231 4 loss.margin 23.90145146650712 +231 4 loss.adversarial_temperature 0.8721580295378428 +231 4 optimizer.lr 0.03528933726478715 +231 4 negative_sampler.num_negs_per_pos 28.0 +231 4 training.batch_size 2.0 +231 5 model.embedding_dim 0.0 +231 5 loss.margin 19.83066738757215 +231 5 loss.adversarial_temperature 0.8562899167552684 +231 5 optimizer.lr 0.0028014766908008282 +231 5 negative_sampler.num_negs_per_pos 15.0 +231 5 training.batch_size 0.0 +231 6 model.embedding_dim 2.0 +231 6 loss.margin 8.652187871112954 +231 6 loss.adversarial_temperature 0.6738767157380795 +231 6 optimizer.lr 0.0012307596174075982 +231 6 negative_sampler.num_negs_per_pos 29.0 +231 6 training.batch_size 2.0 +231 7 model.embedding_dim 0.0 +231 7 loss.margin 4.948029066901206 +231 7 loss.adversarial_temperature 0.5155645223691152 +231 7 optimizer.lr 0.03882476386317257 +231 7 negative_sampler.num_negs_per_pos 72.0 +231 7 training.batch_size 2.0 +231 8 model.embedding_dim 0.0 +231 8 loss.margin 23.535338827808463 +231 8 loss.adversarial_temperature 0.28845384987366895 +231 8 optimizer.lr 0.0012944804911614574 +231 8 negative_sampler.num_negs_per_pos 52.0 +231 8 training.batch_size 1.0 +231 9 model.embedding_dim 1.0 +231 9 loss.margin 7.392201988736279 +231 9 loss.adversarial_temperature 0.37242637020069497 +231 9 optimizer.lr 0.006758249380318997 +231 9 negative_sampler.num_negs_per_pos 2.0 +231 9 training.batch_size 0.0 +231 10 model.embedding_dim 2.0 +231 10 loss.margin 27.40739053151481 +231 10 loss.adversarial_temperature 0.31521561798801684 +231 10 optimizer.lr 0.0010460665626109377 +231 10 negative_sampler.num_negs_per_pos 90.0 +231 10 training.batch_size 2.0 +231 11 model.embedding_dim 0.0 +231 11 loss.margin 20.389051815342846 +231 11 loss.adversarial_temperature 0.7386186505034931 +231 11 optimizer.lr 0.044782584858689564 +231 11 negative_sampler.num_negs_per_pos 91.0 +231 11 training.batch_size 2.0 +231 12 model.embedding_dim 0.0 +231 12 loss.margin 16.417442934091333 +231 12 loss.adversarial_temperature 0.13712108924791594 +231 12 optimizer.lr 0.0027521668453672796 +231 12 negative_sampler.num_negs_per_pos 28.0 +231 12 training.batch_size 1.0 +231 13 model.embedding_dim 0.0 +231 13 loss.margin 3.620737878344731 +231 13 loss.adversarial_temperature 0.6185494168287126 +231 13 optimizer.lr 0.002810311893109909 +231 13 negative_sampler.num_negs_per_pos 79.0 +231 13 training.batch_size 0.0 +231 14 model.embedding_dim 0.0 +231 14 loss.margin 13.6383745556881 +231 14 loss.adversarial_temperature 0.13833205244918242 +231 14 optimizer.lr 0.0023110145729722744 +231 14 negative_sampler.num_negs_per_pos 16.0 +231 14 training.batch_size 0.0 +231 15 model.embedding_dim 2.0 +231 15 loss.margin 3.163356212075972 +231 15 loss.adversarial_temperature 0.21477064645236374 +231 15 optimizer.lr 0.001646011016283675 +231 15 negative_sampler.num_negs_per_pos 51.0 +231 15 training.batch_size 0.0 +231 16 model.embedding_dim 2.0 +231 16 loss.margin 5.021008740072334 +231 16 loss.adversarial_temperature 0.18499683287415372 +231 16 optimizer.lr 0.013448215680347432 +231 16 negative_sampler.num_negs_per_pos 1.0 +231 16 training.batch_size 2.0 +231 17 model.embedding_dim 2.0 +231 17 loss.margin 23.061707723469535 +231 17 loss.adversarial_temperature 0.3995274725863758 +231 17 optimizer.lr 0.009732741532267294 +231 17 negative_sampler.num_negs_per_pos 13.0 +231 17 training.batch_size 2.0 +231 18 model.embedding_dim 1.0 +231 18 loss.margin 4.30394265849906 +231 18 loss.adversarial_temperature 0.3947765047882482 +231 18 optimizer.lr 0.0037230681388798546 +231 18 negative_sampler.num_negs_per_pos 16.0 +231 18 training.batch_size 2.0 +231 19 model.embedding_dim 1.0 +231 19 loss.margin 15.544822654697326 +231 19 loss.adversarial_temperature 0.2521849838792304 +231 19 optimizer.lr 0.0019125294098103554 +231 19 negative_sampler.num_negs_per_pos 26.0 +231 19 training.batch_size 0.0 +231 20 model.embedding_dim 1.0 +231 20 loss.margin 5.968054128169835 +231 20 loss.adversarial_temperature 0.14756772407788193 +231 20 optimizer.lr 0.08849477500684406 +231 20 negative_sampler.num_negs_per_pos 74.0 +231 20 training.batch_size 2.0 +231 21 model.embedding_dim 0.0 +231 21 loss.margin 27.843397071176756 +231 21 loss.adversarial_temperature 0.9980467086139065 +231 21 optimizer.lr 0.006379896124512532 +231 21 negative_sampler.num_negs_per_pos 43.0 +231 21 training.batch_size 0.0 +231 22 model.embedding_dim 2.0 +231 22 loss.margin 11.89442156091883 +231 22 loss.adversarial_temperature 0.7792739757968297 +231 22 optimizer.lr 0.0020321011754559034 +231 22 negative_sampler.num_negs_per_pos 31.0 +231 22 training.batch_size 1.0 +231 23 model.embedding_dim 1.0 +231 23 loss.margin 11.994945637118187 +231 23 loss.adversarial_temperature 0.5728529887706026 +231 23 optimizer.lr 0.00837577609738614 +231 23 negative_sampler.num_negs_per_pos 26.0 +231 23 training.batch_size 2.0 +231 24 model.embedding_dim 2.0 +231 24 loss.margin 15.588999343029727 +231 24 loss.adversarial_temperature 0.3182248381239926 +231 24 optimizer.lr 0.001100056618127318 +231 24 negative_sampler.num_negs_per_pos 21.0 +231 24 training.batch_size 2.0 +231 25 model.embedding_dim 0.0 +231 25 loss.margin 10.206312802879792 +231 25 loss.adversarial_temperature 0.15996371232038942 +231 25 optimizer.lr 0.010371452509721638 +231 25 negative_sampler.num_negs_per_pos 23.0 +231 25 training.batch_size 1.0 +231 26 model.embedding_dim 2.0 +231 26 loss.margin 25.29688449134099 +231 26 loss.adversarial_temperature 0.935922748250966 +231 26 optimizer.lr 0.03537105032748565 +231 26 negative_sampler.num_negs_per_pos 13.0 +231 26 training.batch_size 2.0 +231 27 model.embedding_dim 2.0 +231 27 loss.margin 7.115000993924471 +231 27 loss.adversarial_temperature 0.37290458464634174 +231 27 optimizer.lr 0.00585811763740646 +231 27 negative_sampler.num_negs_per_pos 76.0 +231 27 training.batch_size 1.0 +231 28 model.embedding_dim 2.0 +231 28 loss.margin 14.061554556141969 +231 28 loss.adversarial_temperature 0.8133116770514267 +231 28 optimizer.lr 0.009977034540249072 +231 28 negative_sampler.num_negs_per_pos 6.0 +231 28 training.batch_size 0.0 +231 29 model.embedding_dim 1.0 +231 29 loss.margin 22.664394842531852 +231 29 loss.adversarial_temperature 0.49947127155056803 +231 29 optimizer.lr 0.05454884964281105 +231 29 negative_sampler.num_negs_per_pos 8.0 +231 29 training.batch_size 0.0 +231 30 model.embedding_dim 2.0 +231 30 loss.margin 12.176993398506514 +231 30 loss.adversarial_temperature 0.894393590682829 +231 30 optimizer.lr 0.09687791534359926 +231 30 negative_sampler.num_negs_per_pos 32.0 +231 30 training.batch_size 0.0 +231 31 model.embedding_dim 1.0 +231 31 loss.margin 4.485204064155637 +231 31 loss.adversarial_temperature 0.5628542507037503 +231 31 optimizer.lr 0.003462965218029985 +231 31 negative_sampler.num_negs_per_pos 51.0 +231 31 training.batch_size 2.0 +231 32 model.embedding_dim 2.0 +231 32 loss.margin 25.34666491333477 +231 32 loss.adversarial_temperature 0.42172851088791447 +231 32 optimizer.lr 0.0012881863826986282 +231 32 negative_sampler.num_negs_per_pos 39.0 +231 32 training.batch_size 0.0 +231 33 model.embedding_dim 1.0 +231 33 loss.margin 3.5407516076540384 +231 33 loss.adversarial_temperature 0.8053264615627592 +231 33 optimizer.lr 0.0036054266915836143 +231 33 negative_sampler.num_negs_per_pos 82.0 +231 33 training.batch_size 1.0 +231 34 model.embedding_dim 1.0 +231 34 loss.margin 11.638231763593764 +231 34 loss.adversarial_temperature 0.33013159349098853 +231 34 optimizer.lr 0.0015949343777890597 +231 34 negative_sampler.num_negs_per_pos 87.0 +231 34 training.batch_size 0.0 +231 35 model.embedding_dim 2.0 +231 35 loss.margin 20.76638619897506 +231 35 loss.adversarial_temperature 0.22816285218916713 +231 35 optimizer.lr 0.0028013154550692843 +231 35 negative_sampler.num_negs_per_pos 44.0 +231 35 training.batch_size 0.0 +231 36 model.embedding_dim 1.0 +231 36 loss.margin 27.330224825524795 +231 36 loss.adversarial_temperature 0.24734511132891784 +231 36 optimizer.lr 0.0013484305647942642 +231 36 negative_sampler.num_negs_per_pos 18.0 +231 36 training.batch_size 0.0 +231 37 model.embedding_dim 0.0 +231 37 loss.margin 2.3677650235842025 +231 37 loss.adversarial_temperature 0.18113174494346315 +231 37 optimizer.lr 0.025292433339554023 +231 37 negative_sampler.num_negs_per_pos 68.0 +231 37 training.batch_size 2.0 +231 38 model.embedding_dim 2.0 +231 38 loss.margin 22.72088107654973 +231 38 loss.adversarial_temperature 0.758249149994448 +231 38 optimizer.lr 0.002143668223437394 +231 38 negative_sampler.num_negs_per_pos 54.0 +231 38 training.batch_size 2.0 +231 39 model.embedding_dim 1.0 +231 39 loss.margin 26.682423376379468 +231 39 loss.adversarial_temperature 0.907775545311421 +231 39 optimizer.lr 0.0062949095145100956 +231 39 negative_sampler.num_negs_per_pos 30.0 +231 39 training.batch_size 2.0 +231 40 model.embedding_dim 0.0 +231 40 loss.margin 26.402603463182604 +231 40 loss.adversarial_temperature 0.5997173772795077 +231 40 optimizer.lr 0.004882450170241131 +231 40 negative_sampler.num_negs_per_pos 83.0 +231 40 training.batch_size 2.0 +231 41 model.embedding_dim 1.0 +231 41 loss.margin 29.883660347919317 +231 41 loss.adversarial_temperature 0.23219309255328216 +231 41 optimizer.lr 0.08532153916369625 +231 41 negative_sampler.num_negs_per_pos 21.0 +231 41 training.batch_size 1.0 +231 42 model.embedding_dim 2.0 +231 42 loss.margin 12.277343842374359 +231 42 loss.adversarial_temperature 0.15384861945602504 +231 42 optimizer.lr 0.001768376679886238 +231 42 negative_sampler.num_negs_per_pos 13.0 +231 42 training.batch_size 2.0 +231 43 model.embedding_dim 0.0 +231 43 loss.margin 26.5222219259452 +231 43 loss.adversarial_temperature 0.6246508591176179 +231 43 optimizer.lr 0.01238756431271081 +231 43 negative_sampler.num_negs_per_pos 57.0 +231 43 training.batch_size 0.0 +231 44 model.embedding_dim 2.0 +231 44 loss.margin 1.8694832565131503 +231 44 loss.adversarial_temperature 0.37996367482309207 +231 44 optimizer.lr 0.03234054690768653 +231 44 negative_sampler.num_negs_per_pos 90.0 +231 44 training.batch_size 1.0 +231 45 model.embedding_dim 0.0 +231 45 loss.margin 19.552680348705223 +231 45 loss.adversarial_temperature 0.8845537397380839 +231 45 optimizer.lr 0.002329481396812647 +231 45 negative_sampler.num_negs_per_pos 10.0 +231 45 training.batch_size 1.0 +231 46 model.embedding_dim 0.0 +231 46 loss.margin 22.60006228050685 +231 46 loss.adversarial_temperature 0.36824824054930105 +231 46 optimizer.lr 0.0742407414613715 +231 46 negative_sampler.num_negs_per_pos 80.0 +231 46 training.batch_size 1.0 +231 47 model.embedding_dim 0.0 +231 47 loss.margin 4.770765528288182 +231 47 loss.adversarial_temperature 0.7625229368953299 +231 47 optimizer.lr 0.01714473840932114 +231 47 negative_sampler.num_negs_per_pos 74.0 +231 47 training.batch_size 2.0 +231 48 model.embedding_dim 0.0 +231 48 loss.margin 12.57803871213467 +231 48 loss.adversarial_temperature 0.11547474083707035 +231 48 optimizer.lr 0.08750119558402258 +231 48 negative_sampler.num_negs_per_pos 46.0 +231 48 training.batch_size 1.0 +231 49 model.embedding_dim 2.0 +231 49 loss.margin 18.911805495267327 +231 49 loss.adversarial_temperature 0.5916776440950333 +231 49 optimizer.lr 0.06813107066282659 +231 49 negative_sampler.num_negs_per_pos 81.0 +231 49 training.batch_size 0.0 +231 50 model.embedding_dim 0.0 +231 50 loss.margin 3.8503316574669872 +231 50 loss.adversarial_temperature 0.13132783309533033 +231 50 optimizer.lr 0.0033176352721848447 +231 50 negative_sampler.num_negs_per_pos 85.0 +231 50 training.batch_size 2.0 +231 51 model.embedding_dim 1.0 +231 51 loss.margin 20.444099492600973 +231 51 loss.adversarial_temperature 0.5424981323773768 +231 51 optimizer.lr 0.04249496151611741 +231 51 negative_sampler.num_negs_per_pos 13.0 +231 51 training.batch_size 0.0 +231 52 model.embedding_dim 0.0 +231 52 loss.margin 1.9164166194225096 +231 52 loss.adversarial_temperature 0.38759763163310923 +231 52 optimizer.lr 0.09954870282784124 +231 52 negative_sampler.num_negs_per_pos 37.0 +231 52 training.batch_size 1.0 +231 53 model.embedding_dim 0.0 +231 53 loss.margin 5.840602841051707 +231 53 loss.adversarial_temperature 0.11717040630800173 +231 53 optimizer.lr 0.021093388376188616 +231 53 negative_sampler.num_negs_per_pos 54.0 +231 53 training.batch_size 1.0 +231 54 model.embedding_dim 2.0 +231 54 loss.margin 15.48223877258379 +231 54 loss.adversarial_temperature 0.5327816668833326 +231 54 optimizer.lr 0.010185301790122364 +231 54 negative_sampler.num_negs_per_pos 46.0 +231 54 training.batch_size 1.0 +231 55 model.embedding_dim 1.0 +231 55 loss.margin 12.645093517041634 +231 55 loss.adversarial_temperature 0.7695009436758844 +231 55 optimizer.lr 0.007797292074916114 +231 55 negative_sampler.num_negs_per_pos 76.0 +231 55 training.batch_size 2.0 +231 1 dataset """fb15k237""" +231 1 model """ermlp""" +231 1 loss """nssa""" +231 1 regularizer """no""" +231 1 optimizer """adam""" +231 1 training_loop """owa""" +231 1 negative_sampler """basic""" +231 1 evaluator """rankbased""" +231 2 dataset """fb15k237""" +231 2 model """ermlp""" +231 2 loss """nssa""" +231 2 regularizer """no""" +231 2 optimizer """adam""" +231 2 training_loop """owa""" +231 2 negative_sampler """basic""" +231 2 evaluator """rankbased""" +231 3 dataset """fb15k237""" +231 3 model """ermlp""" +231 3 loss """nssa""" +231 3 regularizer """no""" +231 3 optimizer """adam""" +231 3 training_loop """owa""" +231 3 negative_sampler """basic""" +231 3 evaluator """rankbased""" +231 4 dataset """fb15k237""" +231 4 model """ermlp""" +231 4 loss """nssa""" +231 4 regularizer """no""" +231 4 optimizer """adam""" +231 4 training_loop """owa""" +231 4 negative_sampler """basic""" +231 4 evaluator """rankbased""" +231 5 dataset """fb15k237""" +231 5 model """ermlp""" +231 5 loss """nssa""" +231 5 regularizer """no""" +231 5 optimizer """adam""" +231 5 training_loop """owa""" +231 5 negative_sampler """basic""" +231 5 evaluator """rankbased""" +231 6 dataset """fb15k237""" +231 6 model """ermlp""" +231 6 loss """nssa""" +231 6 regularizer """no""" +231 6 optimizer """adam""" +231 6 training_loop """owa""" +231 6 negative_sampler """basic""" +231 6 evaluator """rankbased""" +231 7 dataset """fb15k237""" +231 7 model """ermlp""" +231 7 loss """nssa""" +231 7 regularizer """no""" +231 7 optimizer """adam""" +231 7 training_loop """owa""" +231 7 negative_sampler """basic""" +231 7 evaluator """rankbased""" +231 8 dataset """fb15k237""" +231 8 model """ermlp""" +231 8 loss """nssa""" +231 8 regularizer """no""" +231 8 optimizer """adam""" +231 8 training_loop """owa""" +231 8 negative_sampler """basic""" +231 8 evaluator """rankbased""" +231 9 dataset """fb15k237""" +231 9 model """ermlp""" +231 9 loss """nssa""" +231 9 regularizer """no""" +231 9 optimizer """adam""" +231 9 training_loop """owa""" +231 9 negative_sampler """basic""" +231 9 evaluator """rankbased""" +231 10 dataset """fb15k237""" +231 10 model """ermlp""" +231 10 loss """nssa""" +231 10 regularizer """no""" +231 10 optimizer """adam""" +231 10 training_loop """owa""" +231 10 negative_sampler """basic""" +231 10 evaluator """rankbased""" +231 11 dataset """fb15k237""" +231 11 model """ermlp""" +231 11 loss """nssa""" +231 11 regularizer """no""" +231 11 optimizer """adam""" +231 11 training_loop """owa""" +231 11 negative_sampler """basic""" +231 11 evaluator """rankbased""" +231 12 dataset """fb15k237""" +231 12 model """ermlp""" +231 12 loss """nssa""" +231 12 regularizer """no""" +231 12 optimizer """adam""" +231 12 training_loop """owa""" +231 12 negative_sampler """basic""" +231 12 evaluator """rankbased""" +231 13 dataset """fb15k237""" +231 13 model """ermlp""" +231 13 loss """nssa""" +231 13 regularizer """no""" +231 13 optimizer """adam""" +231 13 training_loop """owa""" +231 13 negative_sampler """basic""" +231 13 evaluator """rankbased""" +231 14 dataset """fb15k237""" +231 14 model """ermlp""" +231 14 loss """nssa""" +231 14 regularizer """no""" +231 14 optimizer """adam""" +231 14 training_loop """owa""" +231 14 negative_sampler """basic""" +231 14 evaluator """rankbased""" +231 15 dataset """fb15k237""" +231 15 model """ermlp""" +231 15 loss """nssa""" +231 15 regularizer """no""" +231 15 optimizer """adam""" +231 15 training_loop """owa""" +231 15 negative_sampler """basic""" +231 15 evaluator """rankbased""" +231 16 dataset """fb15k237""" +231 16 model """ermlp""" +231 16 loss """nssa""" +231 16 regularizer """no""" +231 16 optimizer """adam""" +231 16 training_loop """owa""" +231 16 negative_sampler """basic""" +231 16 evaluator """rankbased""" +231 17 dataset """fb15k237""" +231 17 model """ermlp""" +231 17 loss """nssa""" +231 17 regularizer """no""" +231 17 optimizer """adam""" +231 17 training_loop """owa""" +231 17 negative_sampler """basic""" +231 17 evaluator """rankbased""" +231 18 dataset """fb15k237""" +231 18 model """ermlp""" +231 18 loss """nssa""" +231 18 regularizer """no""" +231 18 optimizer """adam""" +231 18 training_loop """owa""" +231 18 negative_sampler """basic""" +231 18 evaluator """rankbased""" +231 19 dataset """fb15k237""" +231 19 model """ermlp""" +231 19 loss """nssa""" +231 19 regularizer """no""" +231 19 optimizer """adam""" +231 19 training_loop """owa""" +231 19 negative_sampler """basic""" +231 19 evaluator """rankbased""" +231 20 dataset """fb15k237""" +231 20 model """ermlp""" +231 20 loss """nssa""" +231 20 regularizer """no""" +231 20 optimizer """adam""" +231 20 training_loop """owa""" +231 20 negative_sampler """basic""" +231 20 evaluator """rankbased""" +231 21 dataset """fb15k237""" +231 21 model """ermlp""" +231 21 loss """nssa""" +231 21 regularizer """no""" +231 21 optimizer """adam""" +231 21 training_loop """owa""" +231 21 negative_sampler """basic""" +231 21 evaluator """rankbased""" +231 22 dataset """fb15k237""" +231 22 model """ermlp""" +231 22 loss """nssa""" +231 22 regularizer """no""" +231 22 optimizer """adam""" +231 22 training_loop """owa""" +231 22 negative_sampler """basic""" +231 22 evaluator """rankbased""" +231 23 dataset """fb15k237""" +231 23 model """ermlp""" +231 23 loss """nssa""" +231 23 regularizer """no""" +231 23 optimizer """adam""" +231 23 training_loop """owa""" +231 23 negative_sampler """basic""" +231 23 evaluator """rankbased""" +231 24 dataset """fb15k237""" +231 24 model """ermlp""" +231 24 loss """nssa""" +231 24 regularizer """no""" +231 24 optimizer """adam""" +231 24 training_loop """owa""" +231 24 negative_sampler """basic""" +231 24 evaluator """rankbased""" +231 25 dataset """fb15k237""" +231 25 model """ermlp""" +231 25 loss """nssa""" +231 25 regularizer """no""" +231 25 optimizer """adam""" +231 25 training_loop """owa""" +231 25 negative_sampler """basic""" +231 25 evaluator """rankbased""" +231 26 dataset """fb15k237""" +231 26 model """ermlp""" +231 26 loss """nssa""" +231 26 regularizer """no""" +231 26 optimizer """adam""" +231 26 training_loop """owa""" +231 26 negative_sampler """basic""" +231 26 evaluator """rankbased""" +231 27 dataset """fb15k237""" +231 27 model """ermlp""" +231 27 loss """nssa""" +231 27 regularizer """no""" +231 27 optimizer """adam""" +231 27 training_loop """owa""" +231 27 negative_sampler """basic""" +231 27 evaluator """rankbased""" +231 28 dataset """fb15k237""" +231 28 model """ermlp""" +231 28 loss """nssa""" +231 28 regularizer """no""" +231 28 optimizer """adam""" +231 28 training_loop """owa""" +231 28 negative_sampler """basic""" +231 28 evaluator """rankbased""" +231 29 dataset """fb15k237""" +231 29 model """ermlp""" +231 29 loss """nssa""" +231 29 regularizer """no""" +231 29 optimizer """adam""" +231 29 training_loop """owa""" +231 29 negative_sampler """basic""" +231 29 evaluator """rankbased""" +231 30 dataset """fb15k237""" +231 30 model """ermlp""" +231 30 loss """nssa""" +231 30 regularizer """no""" +231 30 optimizer """adam""" +231 30 training_loop """owa""" +231 30 negative_sampler """basic""" +231 30 evaluator """rankbased""" +231 31 dataset """fb15k237""" +231 31 model """ermlp""" +231 31 loss """nssa""" +231 31 regularizer """no""" +231 31 optimizer """adam""" +231 31 training_loop """owa""" +231 31 negative_sampler """basic""" +231 31 evaluator """rankbased""" +231 32 dataset """fb15k237""" +231 32 model """ermlp""" +231 32 loss """nssa""" +231 32 regularizer """no""" +231 32 optimizer """adam""" +231 32 training_loop """owa""" +231 32 negative_sampler """basic""" +231 32 evaluator """rankbased""" +231 33 dataset """fb15k237""" +231 33 model """ermlp""" +231 33 loss """nssa""" +231 33 regularizer """no""" +231 33 optimizer """adam""" +231 33 training_loop """owa""" +231 33 negative_sampler """basic""" +231 33 evaluator """rankbased""" +231 34 dataset """fb15k237""" +231 34 model """ermlp""" +231 34 loss """nssa""" +231 34 regularizer """no""" +231 34 optimizer """adam""" +231 34 training_loop """owa""" +231 34 negative_sampler """basic""" +231 34 evaluator """rankbased""" +231 35 dataset """fb15k237""" +231 35 model """ermlp""" +231 35 loss """nssa""" +231 35 regularizer """no""" +231 35 optimizer """adam""" +231 35 training_loop """owa""" +231 35 negative_sampler """basic""" +231 35 evaluator """rankbased""" +231 36 dataset """fb15k237""" +231 36 model """ermlp""" +231 36 loss """nssa""" +231 36 regularizer """no""" +231 36 optimizer """adam""" +231 36 training_loop """owa""" +231 36 negative_sampler """basic""" +231 36 evaluator """rankbased""" +231 37 dataset """fb15k237""" +231 37 model """ermlp""" +231 37 loss """nssa""" +231 37 regularizer """no""" +231 37 optimizer """adam""" +231 37 training_loop """owa""" +231 37 negative_sampler """basic""" +231 37 evaluator """rankbased""" +231 38 dataset """fb15k237""" +231 38 model """ermlp""" +231 38 loss """nssa""" +231 38 regularizer """no""" +231 38 optimizer """adam""" +231 38 training_loop """owa""" +231 38 negative_sampler """basic""" +231 38 evaluator """rankbased""" +231 39 dataset """fb15k237""" +231 39 model """ermlp""" +231 39 loss """nssa""" +231 39 regularizer """no""" +231 39 optimizer """adam""" +231 39 training_loop """owa""" +231 39 negative_sampler """basic""" +231 39 evaluator """rankbased""" +231 40 dataset """fb15k237""" +231 40 model """ermlp""" +231 40 loss """nssa""" +231 40 regularizer """no""" +231 40 optimizer """adam""" +231 40 training_loop """owa""" +231 40 negative_sampler """basic""" +231 40 evaluator """rankbased""" +231 41 dataset """fb15k237""" +231 41 model """ermlp""" +231 41 loss """nssa""" +231 41 regularizer """no""" +231 41 optimizer """adam""" +231 41 training_loop """owa""" +231 41 negative_sampler """basic""" +231 41 evaluator """rankbased""" +231 42 dataset """fb15k237""" +231 42 model """ermlp""" +231 42 loss """nssa""" +231 42 regularizer """no""" +231 42 optimizer """adam""" +231 42 training_loop """owa""" +231 42 negative_sampler """basic""" +231 42 evaluator """rankbased""" +231 43 dataset """fb15k237""" +231 43 model """ermlp""" +231 43 loss """nssa""" +231 43 regularizer """no""" +231 43 optimizer """adam""" +231 43 training_loop """owa""" +231 43 negative_sampler """basic""" +231 43 evaluator """rankbased""" +231 44 dataset """fb15k237""" +231 44 model """ermlp""" +231 44 loss """nssa""" +231 44 regularizer """no""" +231 44 optimizer """adam""" +231 44 training_loop """owa""" +231 44 negative_sampler """basic""" +231 44 evaluator """rankbased""" +231 45 dataset """fb15k237""" +231 45 model """ermlp""" +231 45 loss """nssa""" +231 45 regularizer """no""" +231 45 optimizer """adam""" +231 45 training_loop """owa""" +231 45 negative_sampler """basic""" +231 45 evaluator """rankbased""" +231 46 dataset """fb15k237""" +231 46 model """ermlp""" +231 46 loss """nssa""" +231 46 regularizer """no""" +231 46 optimizer """adam""" +231 46 training_loop """owa""" +231 46 negative_sampler """basic""" +231 46 evaluator """rankbased""" +231 47 dataset """fb15k237""" +231 47 model """ermlp""" +231 47 loss """nssa""" +231 47 regularizer """no""" +231 47 optimizer """adam""" +231 47 training_loop """owa""" +231 47 negative_sampler """basic""" +231 47 evaluator """rankbased""" +231 48 dataset """fb15k237""" +231 48 model """ermlp""" +231 48 loss """nssa""" +231 48 regularizer """no""" +231 48 optimizer """adam""" +231 48 training_loop """owa""" +231 48 negative_sampler """basic""" +231 48 evaluator """rankbased""" +231 49 dataset """fb15k237""" +231 49 model """ermlp""" +231 49 loss """nssa""" +231 49 regularizer """no""" +231 49 optimizer """adam""" +231 49 training_loop """owa""" +231 49 negative_sampler """basic""" +231 49 evaluator """rankbased""" +231 50 dataset """fb15k237""" +231 50 model """ermlp""" +231 50 loss """nssa""" +231 50 regularizer """no""" +231 50 optimizer """adam""" +231 50 training_loop """owa""" +231 50 negative_sampler """basic""" +231 50 evaluator """rankbased""" +231 51 dataset """fb15k237""" +231 51 model """ermlp""" +231 51 loss """nssa""" +231 51 regularizer """no""" +231 51 optimizer """adam""" +231 51 training_loop """owa""" +231 51 negative_sampler """basic""" +231 51 evaluator """rankbased""" +231 52 dataset """fb15k237""" +231 52 model """ermlp""" +231 52 loss """nssa""" +231 52 regularizer """no""" +231 52 optimizer """adam""" +231 52 training_loop """owa""" +231 52 negative_sampler """basic""" +231 52 evaluator """rankbased""" +231 53 dataset """fb15k237""" +231 53 model """ermlp""" +231 53 loss """nssa""" +231 53 regularizer """no""" +231 53 optimizer """adam""" +231 53 training_loop """owa""" +231 53 negative_sampler """basic""" +231 53 evaluator """rankbased""" +231 54 dataset """fb15k237""" +231 54 model """ermlp""" +231 54 loss """nssa""" +231 54 regularizer """no""" +231 54 optimizer """adam""" +231 54 training_loop """owa""" +231 54 negative_sampler """basic""" +231 54 evaluator """rankbased""" +231 55 dataset """fb15k237""" +231 55 model """ermlp""" +231 55 loss """nssa""" +231 55 regularizer """no""" +231 55 optimizer """adam""" +231 55 training_loop """owa""" +231 55 negative_sampler """basic""" +231 55 evaluator """rankbased""" +232 1 model.embedding_dim 1.0 +232 1 training.batch_size 1.0 +232 1 training.label_smoothing 0.004323197051310376 +232 2 model.embedding_dim 2.0 +232 2 training.batch_size 0.0 +232 2 training.label_smoothing 0.0062944098554441295 +232 3 model.embedding_dim 1.0 +232 3 training.batch_size 0.0 +232 3 training.label_smoothing 0.0013932870752242838 +232 4 model.embedding_dim 2.0 +232 4 training.batch_size 1.0 +232 4 training.label_smoothing 0.00789561554876591 +232 5 model.embedding_dim 1.0 +232 5 training.batch_size 1.0 +232 5 training.label_smoothing 0.06494126608770287 +232 6 model.embedding_dim 0.0 +232 6 training.batch_size 2.0 +232 6 training.label_smoothing 0.19070364017927857 +232 7 model.embedding_dim 2.0 +232 7 training.batch_size 1.0 +232 7 training.label_smoothing 0.3176543784822693 +232 8 model.embedding_dim 2.0 +232 8 training.batch_size 0.0 +232 8 training.label_smoothing 0.0025391954467744683 +232 9 model.embedding_dim 2.0 +232 9 training.batch_size 2.0 +232 9 training.label_smoothing 0.11376421024701715 +232 10 model.embedding_dim 0.0 +232 10 training.batch_size 0.0 +232 10 training.label_smoothing 0.04981161463485198 +232 11 model.embedding_dim 0.0 +232 11 training.batch_size 2.0 +232 11 training.label_smoothing 0.037340712360668354 +232 12 model.embedding_dim 0.0 +232 12 training.batch_size 0.0 +232 12 training.label_smoothing 0.3593989257915375 +232 13 model.embedding_dim 2.0 +232 13 training.batch_size 2.0 +232 13 training.label_smoothing 0.0030313915944882234 +232 14 model.embedding_dim 2.0 +232 14 training.batch_size 1.0 +232 14 training.label_smoothing 0.16440277306159223 +232 15 model.embedding_dim 1.0 +232 15 training.batch_size 2.0 +232 15 training.label_smoothing 0.05247303756716551 +232 16 model.embedding_dim 1.0 +232 16 training.batch_size 2.0 +232 16 training.label_smoothing 0.49266906723136694 +232 17 model.embedding_dim 2.0 +232 17 training.batch_size 1.0 +232 17 training.label_smoothing 0.006319313364894935 +232 18 model.embedding_dim 0.0 +232 18 training.batch_size 1.0 +232 18 training.label_smoothing 0.014489167238258563 +232 19 model.embedding_dim 2.0 +232 19 training.batch_size 1.0 +232 19 training.label_smoothing 0.19087323995256325 +232 20 model.embedding_dim 2.0 +232 20 training.batch_size 0.0 +232 20 training.label_smoothing 0.6345259865044629 +232 21 model.embedding_dim 1.0 +232 21 training.batch_size 0.0 +232 21 training.label_smoothing 0.003445561395947351 +232 22 model.embedding_dim 0.0 +232 22 training.batch_size 2.0 +232 22 training.label_smoothing 0.0013431089249619636 +232 23 model.embedding_dim 0.0 +232 23 training.batch_size 2.0 +232 23 training.label_smoothing 0.9584197887873409 +232 24 model.embedding_dim 2.0 +232 24 training.batch_size 2.0 +232 24 training.label_smoothing 0.049148796820796675 +232 25 model.embedding_dim 0.0 +232 25 training.batch_size 1.0 +232 25 training.label_smoothing 0.17246786826568072 +232 26 model.embedding_dim 1.0 +232 26 training.batch_size 0.0 +232 26 training.label_smoothing 0.637953800139033 +232 27 model.embedding_dim 2.0 +232 27 training.batch_size 1.0 +232 27 training.label_smoothing 0.2709149971316762 +232 28 model.embedding_dim 2.0 +232 28 training.batch_size 2.0 +232 28 training.label_smoothing 0.050298497343246616 +232 29 model.embedding_dim 0.0 +232 29 training.batch_size 2.0 +232 29 training.label_smoothing 0.007277346472798441 +232 30 model.embedding_dim 1.0 +232 30 training.batch_size 2.0 +232 30 training.label_smoothing 0.9055772250410652 +232 31 model.embedding_dim 0.0 +232 31 training.batch_size 0.0 +232 31 training.label_smoothing 0.12704254233696582 +232 32 model.embedding_dim 2.0 +232 32 training.batch_size 0.0 +232 32 training.label_smoothing 0.07883747984050633 +232 33 model.embedding_dim 0.0 +232 33 training.batch_size 2.0 +232 33 training.label_smoothing 0.001088809365197149 +232 34 model.embedding_dim 0.0 +232 34 training.batch_size 2.0 +232 34 training.label_smoothing 0.007478471064351869 +232 35 model.embedding_dim 1.0 +232 35 training.batch_size 2.0 +232 35 training.label_smoothing 0.0023905791751424 +232 36 model.embedding_dim 1.0 +232 36 training.batch_size 0.0 +232 36 training.label_smoothing 0.003896618011375072 +232 37 model.embedding_dim 0.0 +232 37 training.batch_size 1.0 +232 37 training.label_smoothing 0.004772931687621799 +232 38 model.embedding_dim 0.0 +232 38 training.batch_size 0.0 +232 38 training.label_smoothing 0.6208231180235568 +232 39 model.embedding_dim 1.0 +232 39 training.batch_size 1.0 +232 39 training.label_smoothing 0.002443270007838657 +232 40 model.embedding_dim 0.0 +232 40 training.batch_size 2.0 +232 40 training.label_smoothing 0.002013875349395678 +232 41 model.embedding_dim 2.0 +232 41 training.batch_size 1.0 +232 41 training.label_smoothing 0.09474642181659888 +232 42 model.embedding_dim 2.0 +232 42 training.batch_size 1.0 +232 42 training.label_smoothing 0.008673758078030763 +232 43 model.embedding_dim 1.0 +232 43 training.batch_size 2.0 +232 43 training.label_smoothing 0.0013595335244839673 +232 44 model.embedding_dim 1.0 +232 44 training.batch_size 1.0 +232 44 training.label_smoothing 0.006811214216311431 +232 45 model.embedding_dim 0.0 +232 45 training.batch_size 1.0 +232 45 training.label_smoothing 0.0012488515546360422 +232 46 model.embedding_dim 0.0 +232 46 training.batch_size 0.0 +232 46 training.label_smoothing 0.01914246976978456 +232 47 model.embedding_dim 0.0 +232 47 training.batch_size 1.0 +232 47 training.label_smoothing 0.8788518832398876 +232 48 model.embedding_dim 1.0 +232 48 training.batch_size 0.0 +232 48 training.label_smoothing 0.036214027561504755 +232 49 model.embedding_dim 0.0 +232 49 training.batch_size 0.0 +232 49 training.label_smoothing 0.0010699134447702701 +232 50 model.embedding_dim 0.0 +232 50 training.batch_size 2.0 +232 50 training.label_smoothing 0.10234087754140987 +232 51 model.embedding_dim 0.0 +232 51 training.batch_size 1.0 +232 51 training.label_smoothing 0.004721121776592804 +232 52 model.embedding_dim 1.0 +232 52 training.batch_size 1.0 +232 52 training.label_smoothing 0.002470692847035349 +232 53 model.embedding_dim 1.0 +232 53 training.batch_size 1.0 +232 53 training.label_smoothing 0.3052526741867838 +232 54 model.embedding_dim 2.0 +232 54 training.batch_size 2.0 +232 54 training.label_smoothing 0.005604083734009418 +232 55 model.embedding_dim 1.0 +232 55 training.batch_size 1.0 +232 55 training.label_smoothing 0.6453159491129222 +232 56 model.embedding_dim 0.0 +232 56 training.batch_size 1.0 +232 56 training.label_smoothing 0.8817375702397419 +232 57 model.embedding_dim 0.0 +232 57 training.batch_size 0.0 +232 57 training.label_smoothing 0.006081019568735273 +232 58 model.embedding_dim 1.0 +232 58 training.batch_size 2.0 +232 58 training.label_smoothing 0.2879312510268512 +232 59 model.embedding_dim 1.0 +232 59 training.batch_size 1.0 +232 59 training.label_smoothing 0.009424770751725129 +232 60 model.embedding_dim 1.0 +232 60 training.batch_size 1.0 +232 60 training.label_smoothing 0.001700462414875636 +232 61 model.embedding_dim 2.0 +232 61 training.batch_size 2.0 +232 61 training.label_smoothing 0.1963090773225054 +232 62 model.embedding_dim 1.0 +232 62 training.batch_size 2.0 +232 62 training.label_smoothing 0.33664078718638696 +232 63 model.embedding_dim 1.0 +232 63 training.batch_size 2.0 +232 63 training.label_smoothing 0.007580407992090538 +232 64 model.embedding_dim 1.0 +232 64 training.batch_size 2.0 +232 64 training.label_smoothing 0.024694805624222986 +232 65 model.embedding_dim 0.0 +232 65 training.batch_size 0.0 +232 65 training.label_smoothing 0.040618412100915616 +232 66 model.embedding_dim 2.0 +232 66 training.batch_size 1.0 +232 66 training.label_smoothing 0.41737939395711143 +232 67 model.embedding_dim 2.0 +232 67 training.batch_size 0.0 +232 67 training.label_smoothing 0.41459528048206745 +232 68 model.embedding_dim 2.0 +232 68 training.batch_size 1.0 +232 68 training.label_smoothing 0.00941883707162411 +232 69 model.embedding_dim 2.0 +232 69 training.batch_size 2.0 +232 69 training.label_smoothing 0.08662132455327347 +232 70 model.embedding_dim 2.0 +232 70 training.batch_size 2.0 +232 70 training.label_smoothing 0.011780203338346854 +232 71 model.embedding_dim 1.0 +232 71 training.batch_size 0.0 +232 71 training.label_smoothing 0.029489702243249226 +232 72 model.embedding_dim 0.0 +232 72 training.batch_size 2.0 +232 72 training.label_smoothing 0.004807473546862822 +232 73 model.embedding_dim 1.0 +232 73 training.batch_size 0.0 +232 73 training.label_smoothing 0.2829106678754144 +232 74 model.embedding_dim 0.0 +232 74 training.batch_size 0.0 +232 74 training.label_smoothing 0.01382708061455956 +232 75 model.embedding_dim 2.0 +232 75 training.batch_size 0.0 +232 75 training.label_smoothing 0.7382307488988924 +232 76 model.embedding_dim 1.0 +232 76 training.batch_size 2.0 +232 76 training.label_smoothing 0.0012295942258017088 +232 77 model.embedding_dim 0.0 +232 77 training.batch_size 1.0 +232 77 training.label_smoothing 0.0028827785664926773 +232 78 model.embedding_dim 1.0 +232 78 training.batch_size 0.0 +232 78 training.label_smoothing 0.7269425406328284 +232 79 model.embedding_dim 2.0 +232 79 training.batch_size 2.0 +232 79 training.label_smoothing 0.13978414313008458 +232 80 model.embedding_dim 1.0 +232 80 training.batch_size 2.0 +232 80 training.label_smoothing 0.018235026548434757 +232 81 model.embedding_dim 1.0 +232 81 training.batch_size 2.0 +232 81 training.label_smoothing 0.01495826914501284 +232 82 model.embedding_dim 1.0 +232 82 training.batch_size 1.0 +232 82 training.label_smoothing 0.06588865177204778 +232 83 model.embedding_dim 1.0 +232 83 training.batch_size 2.0 +232 83 training.label_smoothing 0.06779475588645188 +232 84 model.embedding_dim 1.0 +232 84 training.batch_size 2.0 +232 84 training.label_smoothing 0.013831999929731546 +232 85 model.embedding_dim 1.0 +232 85 training.batch_size 1.0 +232 85 training.label_smoothing 0.0013834997660531537 +232 86 model.embedding_dim 0.0 +232 86 training.batch_size 2.0 +232 86 training.label_smoothing 0.04264894215925859 +232 87 model.embedding_dim 0.0 +232 87 training.batch_size 2.0 +232 87 training.label_smoothing 0.10862350887887076 +232 88 model.embedding_dim 1.0 +232 88 training.batch_size 0.0 +232 88 training.label_smoothing 0.4345928340834619 +232 89 model.embedding_dim 0.0 +232 89 training.batch_size 2.0 +232 89 training.label_smoothing 0.001033691902467648 +232 90 model.embedding_dim 1.0 +232 90 training.batch_size 0.0 +232 90 training.label_smoothing 0.13940834153634 +232 91 model.embedding_dim 1.0 +232 91 training.batch_size 1.0 +232 91 training.label_smoothing 0.0015186660784268426 +232 92 model.embedding_dim 2.0 +232 92 training.batch_size 0.0 +232 92 training.label_smoothing 0.331422000274578 +232 93 model.embedding_dim 2.0 +232 93 training.batch_size 0.0 +232 93 training.label_smoothing 0.01067225366468675 +232 94 model.embedding_dim 0.0 +232 94 training.batch_size 0.0 +232 94 training.label_smoothing 0.01124385237850143 +232 95 model.embedding_dim 2.0 +232 95 training.batch_size 2.0 +232 95 training.label_smoothing 0.03347540101375911 +232 96 model.embedding_dim 2.0 +232 96 training.batch_size 2.0 +232 96 training.label_smoothing 0.0011007737625642552 +232 97 model.embedding_dim 0.0 +232 97 training.batch_size 2.0 +232 97 training.label_smoothing 0.05314376341826025 +232 98 model.embedding_dim 0.0 +232 98 training.batch_size 0.0 +232 98 training.label_smoothing 0.016785775197908533 +232 99 model.embedding_dim 2.0 +232 99 training.batch_size 0.0 +232 99 training.label_smoothing 0.055566960184679 +232 100 model.embedding_dim 0.0 +232 100 training.batch_size 1.0 +232 100 training.label_smoothing 0.08059310419839777 +232 1 dataset """kinships""" +232 1 model """ermlp""" +232 1 loss """crossentropy""" +232 1 regularizer """no""" +232 1 optimizer """adadelta""" +232 1 training_loop """lcwa""" +232 1 evaluator """rankbased""" +232 2 dataset """kinships""" +232 2 model """ermlp""" +232 2 loss """crossentropy""" +232 2 regularizer """no""" +232 2 optimizer """adadelta""" +232 2 training_loop """lcwa""" +232 2 evaluator """rankbased""" +232 3 dataset """kinships""" +232 3 model """ermlp""" +232 3 loss """crossentropy""" +232 3 regularizer """no""" +232 3 optimizer """adadelta""" +232 3 training_loop """lcwa""" +232 3 evaluator """rankbased""" +232 4 dataset """kinships""" +232 4 model """ermlp""" +232 4 loss """crossentropy""" +232 4 regularizer """no""" +232 4 optimizer """adadelta""" +232 4 training_loop """lcwa""" +232 4 evaluator """rankbased""" +232 5 dataset """kinships""" +232 5 model """ermlp""" +232 5 loss """crossentropy""" +232 5 regularizer """no""" +232 5 optimizer """adadelta""" +232 5 training_loop """lcwa""" +232 5 evaluator """rankbased""" +232 6 dataset """kinships""" +232 6 model """ermlp""" +232 6 loss """crossentropy""" +232 6 regularizer """no""" +232 6 optimizer """adadelta""" +232 6 training_loop """lcwa""" +232 6 evaluator """rankbased""" +232 7 dataset """kinships""" +232 7 model """ermlp""" +232 7 loss """crossentropy""" +232 7 regularizer """no""" +232 7 optimizer """adadelta""" +232 7 training_loop """lcwa""" +232 7 evaluator """rankbased""" +232 8 dataset """kinships""" +232 8 model """ermlp""" +232 8 loss """crossentropy""" +232 8 regularizer """no""" +232 8 optimizer """adadelta""" +232 8 training_loop """lcwa""" +232 8 evaluator """rankbased""" +232 9 dataset """kinships""" +232 9 model """ermlp""" +232 9 loss """crossentropy""" +232 9 regularizer """no""" +232 9 optimizer """adadelta""" +232 9 training_loop """lcwa""" +232 9 evaluator """rankbased""" +232 10 dataset """kinships""" +232 10 model """ermlp""" +232 10 loss """crossentropy""" +232 10 regularizer """no""" +232 10 optimizer """adadelta""" +232 10 training_loop """lcwa""" +232 10 evaluator """rankbased""" +232 11 dataset """kinships""" +232 11 model """ermlp""" +232 11 loss """crossentropy""" +232 11 regularizer """no""" +232 11 optimizer """adadelta""" +232 11 training_loop """lcwa""" +232 11 evaluator """rankbased""" +232 12 dataset """kinships""" +232 12 model """ermlp""" +232 12 loss """crossentropy""" +232 12 regularizer """no""" +232 12 optimizer """adadelta""" +232 12 training_loop """lcwa""" +232 12 evaluator """rankbased""" +232 13 dataset """kinships""" +232 13 model """ermlp""" +232 13 loss """crossentropy""" +232 13 regularizer """no""" +232 13 optimizer """adadelta""" +232 13 training_loop """lcwa""" +232 13 evaluator """rankbased""" +232 14 dataset """kinships""" +232 14 model """ermlp""" +232 14 loss """crossentropy""" +232 14 regularizer """no""" +232 14 optimizer """adadelta""" +232 14 training_loop """lcwa""" +232 14 evaluator """rankbased""" +232 15 dataset """kinships""" +232 15 model """ermlp""" +232 15 loss """crossentropy""" +232 15 regularizer """no""" +232 15 optimizer """adadelta""" +232 15 training_loop """lcwa""" +232 15 evaluator """rankbased""" +232 16 dataset """kinships""" +232 16 model """ermlp""" +232 16 loss """crossentropy""" +232 16 regularizer """no""" +232 16 optimizer """adadelta""" +232 16 training_loop """lcwa""" +232 16 evaluator """rankbased""" +232 17 dataset """kinships""" +232 17 model """ermlp""" +232 17 loss """crossentropy""" +232 17 regularizer """no""" +232 17 optimizer """adadelta""" +232 17 training_loop """lcwa""" +232 17 evaluator """rankbased""" +232 18 dataset """kinships""" +232 18 model """ermlp""" +232 18 loss """crossentropy""" +232 18 regularizer """no""" +232 18 optimizer """adadelta""" +232 18 training_loop """lcwa""" +232 18 evaluator """rankbased""" +232 19 dataset """kinships""" +232 19 model """ermlp""" +232 19 loss """crossentropy""" +232 19 regularizer """no""" +232 19 optimizer """adadelta""" +232 19 training_loop """lcwa""" +232 19 evaluator """rankbased""" +232 20 dataset """kinships""" +232 20 model """ermlp""" +232 20 loss """crossentropy""" +232 20 regularizer """no""" +232 20 optimizer """adadelta""" +232 20 training_loop """lcwa""" +232 20 evaluator """rankbased""" +232 21 dataset """kinships""" +232 21 model """ermlp""" +232 21 loss """crossentropy""" +232 21 regularizer """no""" +232 21 optimizer """adadelta""" +232 21 training_loop """lcwa""" +232 21 evaluator """rankbased""" +232 22 dataset """kinships""" +232 22 model """ermlp""" +232 22 loss """crossentropy""" +232 22 regularizer """no""" +232 22 optimizer """adadelta""" +232 22 training_loop """lcwa""" +232 22 evaluator """rankbased""" +232 23 dataset """kinships""" +232 23 model """ermlp""" +232 23 loss """crossentropy""" +232 23 regularizer """no""" +232 23 optimizer """adadelta""" +232 23 training_loop """lcwa""" +232 23 evaluator """rankbased""" +232 24 dataset """kinships""" +232 24 model """ermlp""" +232 24 loss """crossentropy""" +232 24 regularizer """no""" +232 24 optimizer """adadelta""" +232 24 training_loop """lcwa""" +232 24 evaluator """rankbased""" +232 25 dataset """kinships""" +232 25 model """ermlp""" +232 25 loss """crossentropy""" +232 25 regularizer """no""" +232 25 optimizer """adadelta""" +232 25 training_loop """lcwa""" +232 25 evaluator """rankbased""" +232 26 dataset """kinships""" +232 26 model """ermlp""" +232 26 loss """crossentropy""" +232 26 regularizer """no""" +232 26 optimizer """adadelta""" +232 26 training_loop """lcwa""" +232 26 evaluator """rankbased""" +232 27 dataset """kinships""" +232 27 model """ermlp""" +232 27 loss """crossentropy""" +232 27 regularizer """no""" +232 27 optimizer """adadelta""" +232 27 training_loop """lcwa""" +232 27 evaluator """rankbased""" +232 28 dataset """kinships""" +232 28 model """ermlp""" +232 28 loss """crossentropy""" +232 28 regularizer """no""" +232 28 optimizer """adadelta""" +232 28 training_loop """lcwa""" +232 28 evaluator """rankbased""" +232 29 dataset """kinships""" +232 29 model """ermlp""" +232 29 loss """crossentropy""" +232 29 regularizer """no""" +232 29 optimizer """adadelta""" +232 29 training_loop """lcwa""" +232 29 evaluator """rankbased""" +232 30 dataset """kinships""" +232 30 model """ermlp""" +232 30 loss """crossentropy""" +232 30 regularizer """no""" +232 30 optimizer """adadelta""" +232 30 training_loop """lcwa""" +232 30 evaluator """rankbased""" +232 31 dataset """kinships""" +232 31 model """ermlp""" +232 31 loss """crossentropy""" +232 31 regularizer """no""" +232 31 optimizer """adadelta""" +232 31 training_loop """lcwa""" +232 31 evaluator """rankbased""" +232 32 dataset """kinships""" +232 32 model """ermlp""" +232 32 loss """crossentropy""" +232 32 regularizer """no""" +232 32 optimizer """adadelta""" +232 32 training_loop """lcwa""" +232 32 evaluator """rankbased""" +232 33 dataset """kinships""" +232 33 model """ermlp""" +232 33 loss """crossentropy""" +232 33 regularizer """no""" +232 33 optimizer """adadelta""" +232 33 training_loop """lcwa""" +232 33 evaluator """rankbased""" +232 34 dataset """kinships""" +232 34 model """ermlp""" +232 34 loss """crossentropy""" +232 34 regularizer """no""" +232 34 optimizer """adadelta""" +232 34 training_loop """lcwa""" +232 34 evaluator """rankbased""" +232 35 dataset """kinships""" +232 35 model """ermlp""" +232 35 loss """crossentropy""" +232 35 regularizer """no""" +232 35 optimizer """adadelta""" +232 35 training_loop """lcwa""" +232 35 evaluator """rankbased""" +232 36 dataset """kinships""" +232 36 model """ermlp""" +232 36 loss """crossentropy""" +232 36 regularizer """no""" +232 36 optimizer """adadelta""" +232 36 training_loop """lcwa""" +232 36 evaluator """rankbased""" +232 37 dataset """kinships""" +232 37 model """ermlp""" +232 37 loss """crossentropy""" +232 37 regularizer """no""" +232 37 optimizer """adadelta""" +232 37 training_loop """lcwa""" +232 37 evaluator """rankbased""" +232 38 dataset """kinships""" +232 38 model """ermlp""" +232 38 loss """crossentropy""" +232 38 regularizer """no""" +232 38 optimizer """adadelta""" +232 38 training_loop """lcwa""" +232 38 evaluator """rankbased""" +232 39 dataset """kinships""" +232 39 model """ermlp""" +232 39 loss """crossentropy""" +232 39 regularizer """no""" +232 39 optimizer """adadelta""" +232 39 training_loop """lcwa""" +232 39 evaluator """rankbased""" +232 40 dataset """kinships""" +232 40 model """ermlp""" +232 40 loss """crossentropy""" +232 40 regularizer """no""" +232 40 optimizer """adadelta""" +232 40 training_loop """lcwa""" +232 40 evaluator """rankbased""" +232 41 dataset """kinships""" +232 41 model """ermlp""" +232 41 loss """crossentropy""" +232 41 regularizer """no""" +232 41 optimizer """adadelta""" +232 41 training_loop """lcwa""" +232 41 evaluator """rankbased""" +232 42 dataset """kinships""" +232 42 model """ermlp""" +232 42 loss """crossentropy""" +232 42 regularizer """no""" +232 42 optimizer """adadelta""" +232 42 training_loop """lcwa""" +232 42 evaluator """rankbased""" +232 43 dataset """kinships""" +232 43 model """ermlp""" +232 43 loss """crossentropy""" +232 43 regularizer """no""" +232 43 optimizer """adadelta""" +232 43 training_loop """lcwa""" +232 43 evaluator """rankbased""" +232 44 dataset """kinships""" +232 44 model """ermlp""" +232 44 loss """crossentropy""" +232 44 regularizer """no""" +232 44 optimizer """adadelta""" +232 44 training_loop """lcwa""" +232 44 evaluator """rankbased""" +232 45 dataset """kinships""" +232 45 model """ermlp""" +232 45 loss """crossentropy""" +232 45 regularizer """no""" +232 45 optimizer """adadelta""" +232 45 training_loop """lcwa""" +232 45 evaluator """rankbased""" +232 46 dataset """kinships""" +232 46 model """ermlp""" +232 46 loss """crossentropy""" +232 46 regularizer """no""" +232 46 optimizer """adadelta""" +232 46 training_loop """lcwa""" +232 46 evaluator """rankbased""" +232 47 dataset """kinships""" +232 47 model """ermlp""" +232 47 loss """crossentropy""" +232 47 regularizer """no""" +232 47 optimizer """adadelta""" +232 47 training_loop """lcwa""" +232 47 evaluator """rankbased""" +232 48 dataset """kinships""" +232 48 model """ermlp""" +232 48 loss """crossentropy""" +232 48 regularizer """no""" +232 48 optimizer """adadelta""" +232 48 training_loop """lcwa""" +232 48 evaluator """rankbased""" +232 49 dataset """kinships""" +232 49 model """ermlp""" +232 49 loss """crossentropy""" +232 49 regularizer """no""" +232 49 optimizer """adadelta""" +232 49 training_loop """lcwa""" +232 49 evaluator """rankbased""" +232 50 dataset """kinships""" +232 50 model """ermlp""" +232 50 loss """crossentropy""" +232 50 regularizer """no""" +232 50 optimizer """adadelta""" +232 50 training_loop """lcwa""" +232 50 evaluator """rankbased""" +232 51 dataset """kinships""" +232 51 model """ermlp""" +232 51 loss """crossentropy""" +232 51 regularizer """no""" +232 51 optimizer """adadelta""" +232 51 training_loop """lcwa""" +232 51 evaluator """rankbased""" +232 52 dataset """kinships""" +232 52 model """ermlp""" +232 52 loss """crossentropy""" +232 52 regularizer """no""" +232 52 optimizer """adadelta""" +232 52 training_loop """lcwa""" +232 52 evaluator """rankbased""" +232 53 dataset """kinships""" +232 53 model """ermlp""" +232 53 loss """crossentropy""" +232 53 regularizer """no""" +232 53 optimizer """adadelta""" +232 53 training_loop """lcwa""" +232 53 evaluator """rankbased""" +232 54 dataset """kinships""" +232 54 model """ermlp""" +232 54 loss """crossentropy""" +232 54 regularizer """no""" +232 54 optimizer """adadelta""" +232 54 training_loop """lcwa""" +232 54 evaluator """rankbased""" +232 55 dataset """kinships""" +232 55 model """ermlp""" +232 55 loss """crossentropy""" +232 55 regularizer """no""" +232 55 optimizer """adadelta""" +232 55 training_loop """lcwa""" +232 55 evaluator """rankbased""" +232 56 dataset """kinships""" +232 56 model """ermlp""" +232 56 loss """crossentropy""" +232 56 regularizer """no""" +232 56 optimizer """adadelta""" +232 56 training_loop """lcwa""" +232 56 evaluator """rankbased""" +232 57 dataset """kinships""" +232 57 model """ermlp""" +232 57 loss """crossentropy""" +232 57 regularizer """no""" +232 57 optimizer """adadelta""" +232 57 training_loop """lcwa""" +232 57 evaluator """rankbased""" +232 58 dataset """kinships""" +232 58 model """ermlp""" +232 58 loss """crossentropy""" +232 58 regularizer """no""" +232 58 optimizer """adadelta""" +232 58 training_loop """lcwa""" +232 58 evaluator """rankbased""" +232 59 dataset """kinships""" +232 59 model """ermlp""" +232 59 loss """crossentropy""" +232 59 regularizer """no""" +232 59 optimizer """adadelta""" +232 59 training_loop """lcwa""" +232 59 evaluator """rankbased""" +232 60 dataset """kinships""" +232 60 model """ermlp""" +232 60 loss """crossentropy""" +232 60 regularizer """no""" +232 60 optimizer """adadelta""" +232 60 training_loop """lcwa""" +232 60 evaluator """rankbased""" +232 61 dataset """kinships""" +232 61 model """ermlp""" +232 61 loss """crossentropy""" +232 61 regularizer """no""" +232 61 optimizer """adadelta""" +232 61 training_loop """lcwa""" +232 61 evaluator """rankbased""" +232 62 dataset """kinships""" +232 62 model """ermlp""" +232 62 loss """crossentropy""" +232 62 regularizer """no""" +232 62 optimizer """adadelta""" +232 62 training_loop """lcwa""" +232 62 evaluator """rankbased""" +232 63 dataset """kinships""" +232 63 model """ermlp""" +232 63 loss """crossentropy""" +232 63 regularizer """no""" +232 63 optimizer """adadelta""" +232 63 training_loop """lcwa""" +232 63 evaluator """rankbased""" +232 64 dataset """kinships""" +232 64 model """ermlp""" +232 64 loss """crossentropy""" +232 64 regularizer """no""" +232 64 optimizer """adadelta""" +232 64 training_loop """lcwa""" +232 64 evaluator """rankbased""" +232 65 dataset """kinships""" +232 65 model """ermlp""" +232 65 loss """crossentropy""" +232 65 regularizer """no""" +232 65 optimizer """adadelta""" +232 65 training_loop """lcwa""" +232 65 evaluator """rankbased""" +232 66 dataset """kinships""" +232 66 model """ermlp""" +232 66 loss """crossentropy""" +232 66 regularizer """no""" +232 66 optimizer """adadelta""" +232 66 training_loop """lcwa""" +232 66 evaluator """rankbased""" +232 67 dataset """kinships""" +232 67 model """ermlp""" +232 67 loss """crossentropy""" +232 67 regularizer """no""" +232 67 optimizer """adadelta""" +232 67 training_loop """lcwa""" +232 67 evaluator """rankbased""" +232 68 dataset """kinships""" +232 68 model """ermlp""" +232 68 loss """crossentropy""" +232 68 regularizer """no""" +232 68 optimizer """adadelta""" +232 68 training_loop """lcwa""" +232 68 evaluator """rankbased""" +232 69 dataset """kinships""" +232 69 model """ermlp""" +232 69 loss """crossentropy""" +232 69 regularizer """no""" +232 69 optimizer """adadelta""" +232 69 training_loop """lcwa""" +232 69 evaluator """rankbased""" +232 70 dataset """kinships""" +232 70 model """ermlp""" +232 70 loss """crossentropy""" +232 70 regularizer """no""" +232 70 optimizer """adadelta""" +232 70 training_loop """lcwa""" +232 70 evaluator """rankbased""" +232 71 dataset """kinships""" +232 71 model """ermlp""" +232 71 loss """crossentropy""" +232 71 regularizer """no""" +232 71 optimizer """adadelta""" +232 71 training_loop """lcwa""" +232 71 evaluator """rankbased""" +232 72 dataset """kinships""" +232 72 model """ermlp""" +232 72 loss """crossentropy""" +232 72 regularizer """no""" +232 72 optimizer """adadelta""" +232 72 training_loop """lcwa""" +232 72 evaluator """rankbased""" +232 73 dataset """kinships""" +232 73 model """ermlp""" +232 73 loss """crossentropy""" +232 73 regularizer """no""" +232 73 optimizer """adadelta""" +232 73 training_loop """lcwa""" +232 73 evaluator """rankbased""" +232 74 dataset """kinships""" +232 74 model """ermlp""" +232 74 loss """crossentropy""" +232 74 regularizer """no""" +232 74 optimizer """adadelta""" +232 74 training_loop """lcwa""" +232 74 evaluator """rankbased""" +232 75 dataset """kinships""" +232 75 model """ermlp""" +232 75 loss """crossentropy""" +232 75 regularizer """no""" +232 75 optimizer """adadelta""" +232 75 training_loop """lcwa""" +232 75 evaluator """rankbased""" +232 76 dataset """kinships""" +232 76 model """ermlp""" +232 76 loss """crossentropy""" +232 76 regularizer """no""" +232 76 optimizer """adadelta""" +232 76 training_loop """lcwa""" +232 76 evaluator """rankbased""" +232 77 dataset """kinships""" +232 77 model """ermlp""" +232 77 loss """crossentropy""" +232 77 regularizer """no""" +232 77 optimizer """adadelta""" +232 77 training_loop """lcwa""" +232 77 evaluator """rankbased""" +232 78 dataset """kinships""" +232 78 model """ermlp""" +232 78 loss """crossentropy""" +232 78 regularizer """no""" +232 78 optimizer """adadelta""" +232 78 training_loop """lcwa""" +232 78 evaluator """rankbased""" +232 79 dataset """kinships""" +232 79 model """ermlp""" +232 79 loss """crossentropy""" +232 79 regularizer """no""" +232 79 optimizer """adadelta""" +232 79 training_loop """lcwa""" +232 79 evaluator """rankbased""" +232 80 dataset """kinships""" +232 80 model """ermlp""" +232 80 loss """crossentropy""" +232 80 regularizer """no""" +232 80 optimizer """adadelta""" +232 80 training_loop """lcwa""" +232 80 evaluator """rankbased""" +232 81 dataset """kinships""" +232 81 model """ermlp""" +232 81 loss """crossentropy""" +232 81 regularizer """no""" +232 81 optimizer """adadelta""" +232 81 training_loop """lcwa""" +232 81 evaluator """rankbased""" +232 82 dataset """kinships""" +232 82 model """ermlp""" +232 82 loss """crossentropy""" +232 82 regularizer """no""" +232 82 optimizer """adadelta""" +232 82 training_loop """lcwa""" +232 82 evaluator """rankbased""" +232 83 dataset """kinships""" +232 83 model """ermlp""" +232 83 loss """crossentropy""" +232 83 regularizer """no""" +232 83 optimizer """adadelta""" +232 83 training_loop """lcwa""" +232 83 evaluator """rankbased""" +232 84 dataset """kinships""" +232 84 model """ermlp""" +232 84 loss """crossentropy""" +232 84 regularizer """no""" +232 84 optimizer """adadelta""" +232 84 training_loop """lcwa""" +232 84 evaluator """rankbased""" +232 85 dataset """kinships""" +232 85 model """ermlp""" +232 85 loss """crossentropy""" +232 85 regularizer """no""" +232 85 optimizer """adadelta""" +232 85 training_loop """lcwa""" +232 85 evaluator """rankbased""" +232 86 dataset """kinships""" +232 86 model """ermlp""" +232 86 loss """crossentropy""" +232 86 regularizer """no""" +232 86 optimizer """adadelta""" +232 86 training_loop """lcwa""" +232 86 evaluator """rankbased""" +232 87 dataset """kinships""" +232 87 model """ermlp""" +232 87 loss """crossentropy""" +232 87 regularizer """no""" +232 87 optimizer """adadelta""" +232 87 training_loop """lcwa""" +232 87 evaluator """rankbased""" +232 88 dataset """kinships""" +232 88 model """ermlp""" +232 88 loss """crossentropy""" +232 88 regularizer """no""" +232 88 optimizer """adadelta""" +232 88 training_loop """lcwa""" +232 88 evaluator """rankbased""" +232 89 dataset """kinships""" +232 89 model """ermlp""" +232 89 loss """crossentropy""" +232 89 regularizer """no""" +232 89 optimizer """adadelta""" +232 89 training_loop """lcwa""" +232 89 evaluator """rankbased""" +232 90 dataset """kinships""" +232 90 model """ermlp""" +232 90 loss """crossentropy""" +232 90 regularizer """no""" +232 90 optimizer """adadelta""" +232 90 training_loop """lcwa""" +232 90 evaluator """rankbased""" +232 91 dataset """kinships""" +232 91 model """ermlp""" +232 91 loss """crossentropy""" +232 91 regularizer """no""" +232 91 optimizer """adadelta""" +232 91 training_loop """lcwa""" +232 91 evaluator """rankbased""" +232 92 dataset """kinships""" +232 92 model """ermlp""" +232 92 loss """crossentropy""" +232 92 regularizer """no""" +232 92 optimizer """adadelta""" +232 92 training_loop """lcwa""" +232 92 evaluator """rankbased""" +232 93 dataset """kinships""" +232 93 model """ermlp""" +232 93 loss """crossentropy""" +232 93 regularizer """no""" +232 93 optimizer """adadelta""" +232 93 training_loop """lcwa""" +232 93 evaluator """rankbased""" +232 94 dataset """kinships""" +232 94 model """ermlp""" +232 94 loss """crossentropy""" +232 94 regularizer """no""" +232 94 optimizer """adadelta""" +232 94 training_loop """lcwa""" +232 94 evaluator """rankbased""" +232 95 dataset """kinships""" +232 95 model """ermlp""" +232 95 loss """crossentropy""" +232 95 regularizer """no""" +232 95 optimizer """adadelta""" +232 95 training_loop """lcwa""" +232 95 evaluator """rankbased""" +232 96 dataset """kinships""" +232 96 model """ermlp""" +232 96 loss """crossentropy""" +232 96 regularizer """no""" +232 96 optimizer """adadelta""" +232 96 training_loop """lcwa""" +232 96 evaluator """rankbased""" +232 97 dataset """kinships""" +232 97 model """ermlp""" +232 97 loss """crossentropy""" +232 97 regularizer """no""" +232 97 optimizer """adadelta""" +232 97 training_loop """lcwa""" +232 97 evaluator """rankbased""" +232 98 dataset """kinships""" +232 98 model """ermlp""" +232 98 loss """crossentropy""" +232 98 regularizer """no""" +232 98 optimizer """adadelta""" +232 98 training_loop """lcwa""" +232 98 evaluator """rankbased""" +232 99 dataset """kinships""" +232 99 model """ermlp""" +232 99 loss """crossentropy""" +232 99 regularizer """no""" +232 99 optimizer """adadelta""" +232 99 training_loop """lcwa""" +232 99 evaluator """rankbased""" +232 100 dataset """kinships""" +232 100 model """ermlp""" +232 100 loss """crossentropy""" +232 100 regularizer """no""" +232 100 optimizer """adadelta""" +232 100 training_loop """lcwa""" +232 100 evaluator """rankbased""" +233 1 model.embedding_dim 2.0 +233 1 training.batch_size 1.0 +233 1 training.label_smoothing 0.7684510892344353 +233 2 model.embedding_dim 1.0 +233 2 training.batch_size 0.0 +233 2 training.label_smoothing 0.14675654467097607 +233 3 model.embedding_dim 2.0 +233 3 training.batch_size 2.0 +233 3 training.label_smoothing 0.0377565030588669 +233 4 model.embedding_dim 0.0 +233 4 training.batch_size 2.0 +233 4 training.label_smoothing 0.16085746534702172 +233 5 model.embedding_dim 0.0 +233 5 training.batch_size 0.0 +233 5 training.label_smoothing 0.002891168644863496 +233 6 model.embedding_dim 0.0 +233 6 training.batch_size 1.0 +233 6 training.label_smoothing 0.06918789677138373 +233 7 model.embedding_dim 1.0 +233 7 training.batch_size 0.0 +233 7 training.label_smoothing 0.008325364693972177 +233 8 model.embedding_dim 2.0 +233 8 training.batch_size 0.0 +233 8 training.label_smoothing 0.37072233672912924 +233 9 model.embedding_dim 1.0 +233 9 training.batch_size 2.0 +233 9 training.label_smoothing 0.011745787719518173 +233 10 model.embedding_dim 1.0 +233 10 training.batch_size 2.0 +233 10 training.label_smoothing 0.001484111747059835 +233 11 model.embedding_dim 0.0 +233 11 training.batch_size 0.0 +233 11 training.label_smoothing 0.0012230232083702264 +233 12 model.embedding_dim 2.0 +233 12 training.batch_size 2.0 +233 12 training.label_smoothing 0.008075056349256758 +233 13 model.embedding_dim 1.0 +233 13 training.batch_size 1.0 +233 13 training.label_smoothing 0.004747694189053304 +233 14 model.embedding_dim 0.0 +233 14 training.batch_size 2.0 +233 14 training.label_smoothing 0.010328098956968897 +233 15 model.embedding_dim 0.0 +233 15 training.batch_size 2.0 +233 15 training.label_smoothing 0.00514383227010015 +233 16 model.embedding_dim 0.0 +233 16 training.batch_size 0.0 +233 16 training.label_smoothing 0.0019696003842192725 +233 17 model.embedding_dim 0.0 +233 17 training.batch_size 0.0 +233 17 training.label_smoothing 0.0028716373290755847 +233 18 model.embedding_dim 1.0 +233 18 training.batch_size 1.0 +233 18 training.label_smoothing 0.00408495408445389 +233 19 model.embedding_dim 0.0 +233 19 training.batch_size 0.0 +233 19 training.label_smoothing 0.0012051619115166898 +233 20 model.embedding_dim 2.0 +233 20 training.batch_size 0.0 +233 20 training.label_smoothing 0.6234803075289218 +233 21 model.embedding_dim 2.0 +233 21 training.batch_size 1.0 +233 21 training.label_smoothing 0.25338233075048977 +233 22 model.embedding_dim 2.0 +233 22 training.batch_size 0.0 +233 22 training.label_smoothing 0.005307715843162983 +233 23 model.embedding_dim 2.0 +233 23 training.batch_size 1.0 +233 23 training.label_smoothing 0.0033122532881554867 +233 24 model.embedding_dim 0.0 +233 24 training.batch_size 2.0 +233 24 training.label_smoothing 0.007231274679759177 +233 25 model.embedding_dim 1.0 +233 25 training.batch_size 0.0 +233 25 training.label_smoothing 0.04758898331150468 +233 26 model.embedding_dim 0.0 +233 26 training.batch_size 2.0 +233 26 training.label_smoothing 0.021820902018935967 +233 27 model.embedding_dim 0.0 +233 27 training.batch_size 1.0 +233 27 training.label_smoothing 0.030008239932123395 +233 28 model.embedding_dim 0.0 +233 28 training.batch_size 0.0 +233 28 training.label_smoothing 0.05591449880752689 +233 29 model.embedding_dim 1.0 +233 29 training.batch_size 2.0 +233 29 training.label_smoothing 0.024754314773723286 +233 30 model.embedding_dim 0.0 +233 30 training.batch_size 0.0 +233 30 training.label_smoothing 0.2924983634872373 +233 31 model.embedding_dim 1.0 +233 31 training.batch_size 1.0 +233 31 training.label_smoothing 0.008931361139171966 +233 32 model.embedding_dim 1.0 +233 32 training.batch_size 0.0 +233 32 training.label_smoothing 0.031475756056904655 +233 33 model.embedding_dim 1.0 +233 33 training.batch_size 2.0 +233 33 training.label_smoothing 0.6494404984327582 +233 34 model.embedding_dim 1.0 +233 34 training.batch_size 1.0 +233 34 training.label_smoothing 0.07683294538794645 +233 35 model.embedding_dim 2.0 +233 35 training.batch_size 0.0 +233 35 training.label_smoothing 0.05790968898245294 +233 36 model.embedding_dim 2.0 +233 36 training.batch_size 2.0 +233 36 training.label_smoothing 0.005419449151708327 +233 37 model.embedding_dim 2.0 +233 37 training.batch_size 0.0 +233 37 training.label_smoothing 0.01674287142807042 +233 38 model.embedding_dim 1.0 +233 38 training.batch_size 2.0 +233 38 training.label_smoothing 0.06510871971601125 +233 39 model.embedding_dim 2.0 +233 39 training.batch_size 0.0 +233 39 training.label_smoothing 0.0020543115578988322 +233 40 model.embedding_dim 2.0 +233 40 training.batch_size 1.0 +233 40 training.label_smoothing 0.0038545975995773296 +233 41 model.embedding_dim 1.0 +233 41 training.batch_size 2.0 +233 41 training.label_smoothing 0.0016232097708989933 +233 42 model.embedding_dim 0.0 +233 42 training.batch_size 1.0 +233 42 training.label_smoothing 0.12204997216710685 +233 43 model.embedding_dim 2.0 +233 43 training.batch_size 0.0 +233 43 training.label_smoothing 0.00361069742555033 +233 44 model.embedding_dim 1.0 +233 44 training.batch_size 2.0 +233 44 training.label_smoothing 0.3843666020936583 +233 45 model.embedding_dim 1.0 +233 45 training.batch_size 0.0 +233 45 training.label_smoothing 0.703789530207733 +233 46 model.embedding_dim 0.0 +233 46 training.batch_size 2.0 +233 46 training.label_smoothing 0.005734424751161541 +233 47 model.embedding_dim 1.0 +233 47 training.batch_size 0.0 +233 47 training.label_smoothing 0.11744467489141931 +233 48 model.embedding_dim 0.0 +233 48 training.batch_size 1.0 +233 48 training.label_smoothing 0.6096702521144036 +233 49 model.embedding_dim 0.0 +233 49 training.batch_size 1.0 +233 49 training.label_smoothing 0.7384696239970219 +233 50 model.embedding_dim 2.0 +233 50 training.batch_size 0.0 +233 50 training.label_smoothing 0.009984426705255003 +233 51 model.embedding_dim 2.0 +233 51 training.batch_size 1.0 +233 51 training.label_smoothing 0.23969365571530413 +233 52 model.embedding_dim 2.0 +233 52 training.batch_size 2.0 +233 52 training.label_smoothing 0.39412975862495225 +233 53 model.embedding_dim 0.0 +233 53 training.batch_size 0.0 +233 53 training.label_smoothing 0.17070815102520365 +233 54 model.embedding_dim 1.0 +233 54 training.batch_size 2.0 +233 54 training.label_smoothing 0.26349326540405743 +233 55 model.embedding_dim 1.0 +233 55 training.batch_size 0.0 +233 55 training.label_smoothing 0.008824350271466493 +233 56 model.embedding_dim 1.0 +233 56 training.batch_size 2.0 +233 56 training.label_smoothing 0.6037448970988127 +233 57 model.embedding_dim 0.0 +233 57 training.batch_size 2.0 +233 57 training.label_smoothing 0.03465537463930445 +233 58 model.embedding_dim 1.0 +233 58 training.batch_size 1.0 +233 58 training.label_smoothing 0.026783278004091728 +233 59 model.embedding_dim 1.0 +233 59 training.batch_size 1.0 +233 59 training.label_smoothing 0.19293439180270747 +233 60 model.embedding_dim 0.0 +233 60 training.batch_size 0.0 +233 60 training.label_smoothing 0.020194037086555903 +233 61 model.embedding_dim 1.0 +233 61 training.batch_size 1.0 +233 61 training.label_smoothing 0.34725403443489794 +233 62 model.embedding_dim 1.0 +233 62 training.batch_size 0.0 +233 62 training.label_smoothing 0.14780817019649126 +233 63 model.embedding_dim 0.0 +233 63 training.batch_size 2.0 +233 63 training.label_smoothing 0.6743147347000178 +233 64 model.embedding_dim 2.0 +233 64 training.batch_size 0.0 +233 64 training.label_smoothing 0.06267811959750454 +233 65 model.embedding_dim 0.0 +233 65 training.batch_size 1.0 +233 65 training.label_smoothing 0.009170507089691445 +233 66 model.embedding_dim 1.0 +233 66 training.batch_size 1.0 +233 66 training.label_smoothing 0.2523182257674186 +233 67 model.embedding_dim 1.0 +233 67 training.batch_size 0.0 +233 67 training.label_smoothing 0.019463232805066913 +233 68 model.embedding_dim 1.0 +233 68 training.batch_size 2.0 +233 68 training.label_smoothing 0.08715489252375566 +233 69 model.embedding_dim 2.0 +233 69 training.batch_size 1.0 +233 69 training.label_smoothing 0.15321751001463416 +233 70 model.embedding_dim 0.0 +233 70 training.batch_size 1.0 +233 70 training.label_smoothing 0.0016067532961647462 +233 71 model.embedding_dim 2.0 +233 71 training.batch_size 0.0 +233 71 training.label_smoothing 0.012643350050550346 +233 72 model.embedding_dim 2.0 +233 72 training.batch_size 2.0 +233 72 training.label_smoothing 0.5524729957550675 +233 73 model.embedding_dim 1.0 +233 73 training.batch_size 1.0 +233 73 training.label_smoothing 0.05608801179425876 +233 74 model.embedding_dim 2.0 +233 74 training.batch_size 2.0 +233 74 training.label_smoothing 0.17365956054777526 +233 75 model.embedding_dim 0.0 +233 75 training.batch_size 0.0 +233 75 training.label_smoothing 0.018901639141274504 +233 76 model.embedding_dim 1.0 +233 76 training.batch_size 2.0 +233 76 training.label_smoothing 0.20695530171818785 +233 77 model.embedding_dim 0.0 +233 77 training.batch_size 2.0 +233 77 training.label_smoothing 0.19875218388513113 +233 78 model.embedding_dim 1.0 +233 78 training.batch_size 1.0 +233 78 training.label_smoothing 0.29866731351578196 +233 79 model.embedding_dim 1.0 +233 79 training.batch_size 2.0 +233 79 training.label_smoothing 0.008860328564703014 +233 80 model.embedding_dim 2.0 +233 80 training.batch_size 1.0 +233 80 training.label_smoothing 0.01683816268162936 +233 81 model.embedding_dim 2.0 +233 81 training.batch_size 0.0 +233 81 training.label_smoothing 0.02798591683458201 +233 82 model.embedding_dim 0.0 +233 82 training.batch_size 0.0 +233 82 training.label_smoothing 0.9696990882627058 +233 83 model.embedding_dim 2.0 +233 83 training.batch_size 1.0 +233 83 training.label_smoothing 0.0028694445250462986 +233 84 model.embedding_dim 1.0 +233 84 training.batch_size 2.0 +233 84 training.label_smoothing 0.009844138384126221 +233 85 model.embedding_dim 1.0 +233 85 training.batch_size 2.0 +233 85 training.label_smoothing 0.00724325951615865 +233 86 model.embedding_dim 0.0 +233 86 training.batch_size 2.0 +233 86 training.label_smoothing 0.12885821647500867 +233 87 model.embedding_dim 2.0 +233 87 training.batch_size 2.0 +233 87 training.label_smoothing 0.10105825895921737 +233 88 model.embedding_dim 0.0 +233 88 training.batch_size 2.0 +233 88 training.label_smoothing 0.0012672999411346393 +233 89 model.embedding_dim 2.0 +233 89 training.batch_size 0.0 +233 89 training.label_smoothing 0.0029482927030698765 +233 90 model.embedding_dim 2.0 +233 90 training.batch_size 2.0 +233 90 training.label_smoothing 0.2314088393053325 +233 91 model.embedding_dim 1.0 +233 91 training.batch_size 2.0 +233 91 training.label_smoothing 0.17001670468676885 +233 92 model.embedding_dim 1.0 +233 92 training.batch_size 1.0 +233 92 training.label_smoothing 0.002720745379913559 +233 93 model.embedding_dim 1.0 +233 93 training.batch_size 1.0 +233 93 training.label_smoothing 0.9176960127130764 +233 94 model.embedding_dim 2.0 +233 94 training.batch_size 2.0 +233 94 training.label_smoothing 0.19070164784864327 +233 95 model.embedding_dim 1.0 +233 95 training.batch_size 1.0 +233 95 training.label_smoothing 0.003890541049421649 +233 96 model.embedding_dim 2.0 +233 96 training.batch_size 1.0 +233 96 training.label_smoothing 0.03799309744699636 +233 97 model.embedding_dim 1.0 +233 97 training.batch_size 0.0 +233 97 training.label_smoothing 0.005351719448819887 +233 98 model.embedding_dim 0.0 +233 98 training.batch_size 2.0 +233 98 training.label_smoothing 0.02272669996512755 +233 99 model.embedding_dim 1.0 +233 99 training.batch_size 1.0 +233 99 training.label_smoothing 0.6759665977962476 +233 100 model.embedding_dim 0.0 +233 100 training.batch_size 1.0 +233 100 training.label_smoothing 0.12165478693271711 +233 1 dataset """kinships""" +233 1 model """ermlp""" +233 1 loss """crossentropy""" +233 1 regularizer """no""" +233 1 optimizer """adadelta""" +233 1 training_loop """lcwa""" +233 1 evaluator """rankbased""" +233 2 dataset """kinships""" +233 2 model """ermlp""" +233 2 loss """crossentropy""" +233 2 regularizer """no""" +233 2 optimizer """adadelta""" +233 2 training_loop """lcwa""" +233 2 evaluator """rankbased""" +233 3 dataset """kinships""" +233 3 model """ermlp""" +233 3 loss """crossentropy""" +233 3 regularizer """no""" +233 3 optimizer """adadelta""" +233 3 training_loop """lcwa""" +233 3 evaluator """rankbased""" +233 4 dataset """kinships""" +233 4 model """ermlp""" +233 4 loss """crossentropy""" +233 4 regularizer """no""" +233 4 optimizer """adadelta""" +233 4 training_loop """lcwa""" +233 4 evaluator """rankbased""" +233 5 dataset """kinships""" +233 5 model """ermlp""" +233 5 loss """crossentropy""" +233 5 regularizer """no""" +233 5 optimizer """adadelta""" +233 5 training_loop """lcwa""" +233 5 evaluator """rankbased""" +233 6 dataset """kinships""" +233 6 model """ermlp""" +233 6 loss """crossentropy""" +233 6 regularizer """no""" +233 6 optimizer """adadelta""" +233 6 training_loop """lcwa""" +233 6 evaluator """rankbased""" +233 7 dataset """kinships""" +233 7 model """ermlp""" +233 7 loss """crossentropy""" +233 7 regularizer """no""" +233 7 optimizer """adadelta""" +233 7 training_loop """lcwa""" +233 7 evaluator """rankbased""" +233 8 dataset """kinships""" +233 8 model """ermlp""" +233 8 loss """crossentropy""" +233 8 regularizer """no""" +233 8 optimizer """adadelta""" +233 8 training_loop """lcwa""" +233 8 evaluator """rankbased""" +233 9 dataset """kinships""" +233 9 model """ermlp""" +233 9 loss """crossentropy""" +233 9 regularizer """no""" +233 9 optimizer """adadelta""" +233 9 training_loop """lcwa""" +233 9 evaluator """rankbased""" +233 10 dataset """kinships""" +233 10 model """ermlp""" +233 10 loss """crossentropy""" +233 10 regularizer """no""" +233 10 optimizer """adadelta""" +233 10 training_loop """lcwa""" +233 10 evaluator """rankbased""" +233 11 dataset """kinships""" +233 11 model """ermlp""" +233 11 loss """crossentropy""" +233 11 regularizer """no""" +233 11 optimizer """adadelta""" +233 11 training_loop """lcwa""" +233 11 evaluator """rankbased""" +233 12 dataset """kinships""" +233 12 model """ermlp""" +233 12 loss """crossentropy""" +233 12 regularizer """no""" +233 12 optimizer """adadelta""" +233 12 training_loop """lcwa""" +233 12 evaluator """rankbased""" +233 13 dataset """kinships""" +233 13 model """ermlp""" +233 13 loss """crossentropy""" +233 13 regularizer """no""" +233 13 optimizer """adadelta""" +233 13 training_loop """lcwa""" +233 13 evaluator """rankbased""" +233 14 dataset """kinships""" +233 14 model """ermlp""" +233 14 loss """crossentropy""" +233 14 regularizer """no""" +233 14 optimizer """adadelta""" +233 14 training_loop """lcwa""" +233 14 evaluator """rankbased""" +233 15 dataset """kinships""" +233 15 model """ermlp""" +233 15 loss """crossentropy""" +233 15 regularizer """no""" +233 15 optimizer """adadelta""" +233 15 training_loop """lcwa""" +233 15 evaluator """rankbased""" +233 16 dataset """kinships""" +233 16 model """ermlp""" +233 16 loss """crossentropy""" +233 16 regularizer """no""" +233 16 optimizer """adadelta""" +233 16 training_loop """lcwa""" +233 16 evaluator """rankbased""" +233 17 dataset """kinships""" +233 17 model """ermlp""" +233 17 loss """crossentropy""" +233 17 regularizer """no""" +233 17 optimizer """adadelta""" +233 17 training_loop """lcwa""" +233 17 evaluator """rankbased""" +233 18 dataset """kinships""" +233 18 model """ermlp""" +233 18 loss """crossentropy""" +233 18 regularizer """no""" +233 18 optimizer """adadelta""" +233 18 training_loop """lcwa""" +233 18 evaluator """rankbased""" +233 19 dataset """kinships""" +233 19 model """ermlp""" +233 19 loss """crossentropy""" +233 19 regularizer """no""" +233 19 optimizer """adadelta""" +233 19 training_loop """lcwa""" +233 19 evaluator """rankbased""" +233 20 dataset """kinships""" +233 20 model """ermlp""" +233 20 loss """crossentropy""" +233 20 regularizer """no""" +233 20 optimizer """adadelta""" +233 20 training_loop """lcwa""" +233 20 evaluator """rankbased""" +233 21 dataset """kinships""" +233 21 model """ermlp""" +233 21 loss """crossentropy""" +233 21 regularizer """no""" +233 21 optimizer """adadelta""" +233 21 training_loop """lcwa""" +233 21 evaluator """rankbased""" +233 22 dataset """kinships""" +233 22 model """ermlp""" +233 22 loss """crossentropy""" +233 22 regularizer """no""" +233 22 optimizer """adadelta""" +233 22 training_loop """lcwa""" +233 22 evaluator """rankbased""" +233 23 dataset """kinships""" +233 23 model """ermlp""" +233 23 loss """crossentropy""" +233 23 regularizer """no""" +233 23 optimizer """adadelta""" +233 23 training_loop """lcwa""" +233 23 evaluator """rankbased""" +233 24 dataset """kinships""" +233 24 model """ermlp""" +233 24 loss """crossentropy""" +233 24 regularizer """no""" +233 24 optimizer """adadelta""" +233 24 training_loop """lcwa""" +233 24 evaluator """rankbased""" +233 25 dataset """kinships""" +233 25 model """ermlp""" +233 25 loss """crossentropy""" +233 25 regularizer """no""" +233 25 optimizer """adadelta""" +233 25 training_loop """lcwa""" +233 25 evaluator """rankbased""" +233 26 dataset """kinships""" +233 26 model """ermlp""" +233 26 loss """crossentropy""" +233 26 regularizer """no""" +233 26 optimizer """adadelta""" +233 26 training_loop """lcwa""" +233 26 evaluator """rankbased""" +233 27 dataset """kinships""" +233 27 model """ermlp""" +233 27 loss """crossentropy""" +233 27 regularizer """no""" +233 27 optimizer """adadelta""" +233 27 training_loop """lcwa""" +233 27 evaluator """rankbased""" +233 28 dataset """kinships""" +233 28 model """ermlp""" +233 28 loss """crossentropy""" +233 28 regularizer """no""" +233 28 optimizer """adadelta""" +233 28 training_loop """lcwa""" +233 28 evaluator """rankbased""" +233 29 dataset """kinships""" +233 29 model """ermlp""" +233 29 loss """crossentropy""" +233 29 regularizer """no""" +233 29 optimizer """adadelta""" +233 29 training_loop """lcwa""" +233 29 evaluator """rankbased""" +233 30 dataset """kinships""" +233 30 model """ermlp""" +233 30 loss """crossentropy""" +233 30 regularizer """no""" +233 30 optimizer """adadelta""" +233 30 training_loop """lcwa""" +233 30 evaluator """rankbased""" +233 31 dataset """kinships""" +233 31 model """ermlp""" +233 31 loss """crossentropy""" +233 31 regularizer """no""" +233 31 optimizer """adadelta""" +233 31 training_loop """lcwa""" +233 31 evaluator """rankbased""" +233 32 dataset """kinships""" +233 32 model """ermlp""" +233 32 loss """crossentropy""" +233 32 regularizer """no""" +233 32 optimizer """adadelta""" +233 32 training_loop """lcwa""" +233 32 evaluator """rankbased""" +233 33 dataset """kinships""" +233 33 model """ermlp""" +233 33 loss """crossentropy""" +233 33 regularizer """no""" +233 33 optimizer """adadelta""" +233 33 training_loop """lcwa""" +233 33 evaluator """rankbased""" +233 34 dataset """kinships""" +233 34 model """ermlp""" +233 34 loss """crossentropy""" +233 34 regularizer """no""" +233 34 optimizer """adadelta""" +233 34 training_loop """lcwa""" +233 34 evaluator """rankbased""" +233 35 dataset """kinships""" +233 35 model """ermlp""" +233 35 loss """crossentropy""" +233 35 regularizer """no""" +233 35 optimizer """adadelta""" +233 35 training_loop """lcwa""" +233 35 evaluator """rankbased""" +233 36 dataset """kinships""" +233 36 model """ermlp""" +233 36 loss """crossentropy""" +233 36 regularizer """no""" +233 36 optimizer """adadelta""" +233 36 training_loop """lcwa""" +233 36 evaluator """rankbased""" +233 37 dataset """kinships""" +233 37 model """ermlp""" +233 37 loss """crossentropy""" +233 37 regularizer """no""" +233 37 optimizer """adadelta""" +233 37 training_loop """lcwa""" +233 37 evaluator """rankbased""" +233 38 dataset """kinships""" +233 38 model """ermlp""" +233 38 loss """crossentropy""" +233 38 regularizer """no""" +233 38 optimizer """adadelta""" +233 38 training_loop """lcwa""" +233 38 evaluator """rankbased""" +233 39 dataset """kinships""" +233 39 model """ermlp""" +233 39 loss """crossentropy""" +233 39 regularizer """no""" +233 39 optimizer """adadelta""" +233 39 training_loop """lcwa""" +233 39 evaluator """rankbased""" +233 40 dataset """kinships""" +233 40 model """ermlp""" +233 40 loss """crossentropy""" +233 40 regularizer """no""" +233 40 optimizer """adadelta""" +233 40 training_loop """lcwa""" +233 40 evaluator """rankbased""" +233 41 dataset """kinships""" +233 41 model """ermlp""" +233 41 loss """crossentropy""" +233 41 regularizer """no""" +233 41 optimizer """adadelta""" +233 41 training_loop """lcwa""" +233 41 evaluator """rankbased""" +233 42 dataset """kinships""" +233 42 model """ermlp""" +233 42 loss """crossentropy""" +233 42 regularizer """no""" +233 42 optimizer """adadelta""" +233 42 training_loop """lcwa""" +233 42 evaluator """rankbased""" +233 43 dataset """kinships""" +233 43 model """ermlp""" +233 43 loss """crossentropy""" +233 43 regularizer """no""" +233 43 optimizer """adadelta""" +233 43 training_loop """lcwa""" +233 43 evaluator """rankbased""" +233 44 dataset """kinships""" +233 44 model """ermlp""" +233 44 loss """crossentropy""" +233 44 regularizer """no""" +233 44 optimizer """adadelta""" +233 44 training_loop """lcwa""" +233 44 evaluator """rankbased""" +233 45 dataset """kinships""" +233 45 model """ermlp""" +233 45 loss """crossentropy""" +233 45 regularizer """no""" +233 45 optimizer """adadelta""" +233 45 training_loop """lcwa""" +233 45 evaluator """rankbased""" +233 46 dataset """kinships""" +233 46 model """ermlp""" +233 46 loss """crossentropy""" +233 46 regularizer """no""" +233 46 optimizer """adadelta""" +233 46 training_loop """lcwa""" +233 46 evaluator """rankbased""" +233 47 dataset """kinships""" +233 47 model """ermlp""" +233 47 loss """crossentropy""" +233 47 regularizer """no""" +233 47 optimizer """adadelta""" +233 47 training_loop """lcwa""" +233 47 evaluator """rankbased""" +233 48 dataset """kinships""" +233 48 model """ermlp""" +233 48 loss """crossentropy""" +233 48 regularizer """no""" +233 48 optimizer """adadelta""" +233 48 training_loop """lcwa""" +233 48 evaluator """rankbased""" +233 49 dataset """kinships""" +233 49 model """ermlp""" +233 49 loss """crossentropy""" +233 49 regularizer """no""" +233 49 optimizer """adadelta""" +233 49 training_loop """lcwa""" +233 49 evaluator """rankbased""" +233 50 dataset """kinships""" +233 50 model """ermlp""" +233 50 loss """crossentropy""" +233 50 regularizer """no""" +233 50 optimizer """adadelta""" +233 50 training_loop """lcwa""" +233 50 evaluator """rankbased""" +233 51 dataset """kinships""" +233 51 model """ermlp""" +233 51 loss """crossentropy""" +233 51 regularizer """no""" +233 51 optimizer """adadelta""" +233 51 training_loop """lcwa""" +233 51 evaluator """rankbased""" +233 52 dataset """kinships""" +233 52 model """ermlp""" +233 52 loss """crossentropy""" +233 52 regularizer """no""" +233 52 optimizer """adadelta""" +233 52 training_loop """lcwa""" +233 52 evaluator """rankbased""" +233 53 dataset """kinships""" +233 53 model """ermlp""" +233 53 loss """crossentropy""" +233 53 regularizer """no""" +233 53 optimizer """adadelta""" +233 53 training_loop """lcwa""" +233 53 evaluator """rankbased""" +233 54 dataset """kinships""" +233 54 model """ermlp""" +233 54 loss """crossentropy""" +233 54 regularizer """no""" +233 54 optimizer """adadelta""" +233 54 training_loop """lcwa""" +233 54 evaluator """rankbased""" +233 55 dataset """kinships""" +233 55 model """ermlp""" +233 55 loss """crossentropy""" +233 55 regularizer """no""" +233 55 optimizer """adadelta""" +233 55 training_loop """lcwa""" +233 55 evaluator """rankbased""" +233 56 dataset """kinships""" +233 56 model """ermlp""" +233 56 loss """crossentropy""" +233 56 regularizer """no""" +233 56 optimizer """adadelta""" +233 56 training_loop """lcwa""" +233 56 evaluator """rankbased""" +233 57 dataset """kinships""" +233 57 model """ermlp""" +233 57 loss """crossentropy""" +233 57 regularizer """no""" +233 57 optimizer """adadelta""" +233 57 training_loop """lcwa""" +233 57 evaluator """rankbased""" +233 58 dataset """kinships""" +233 58 model """ermlp""" +233 58 loss """crossentropy""" +233 58 regularizer """no""" +233 58 optimizer """adadelta""" +233 58 training_loop """lcwa""" +233 58 evaluator """rankbased""" +233 59 dataset """kinships""" +233 59 model """ermlp""" +233 59 loss """crossentropy""" +233 59 regularizer """no""" +233 59 optimizer """adadelta""" +233 59 training_loop """lcwa""" +233 59 evaluator """rankbased""" +233 60 dataset """kinships""" +233 60 model """ermlp""" +233 60 loss """crossentropy""" +233 60 regularizer """no""" +233 60 optimizer """adadelta""" +233 60 training_loop """lcwa""" +233 60 evaluator """rankbased""" +233 61 dataset """kinships""" +233 61 model """ermlp""" +233 61 loss """crossentropy""" +233 61 regularizer """no""" +233 61 optimizer """adadelta""" +233 61 training_loop """lcwa""" +233 61 evaluator """rankbased""" +233 62 dataset """kinships""" +233 62 model """ermlp""" +233 62 loss """crossentropy""" +233 62 regularizer """no""" +233 62 optimizer """adadelta""" +233 62 training_loop """lcwa""" +233 62 evaluator """rankbased""" +233 63 dataset """kinships""" +233 63 model """ermlp""" +233 63 loss """crossentropy""" +233 63 regularizer """no""" +233 63 optimizer """adadelta""" +233 63 training_loop """lcwa""" +233 63 evaluator """rankbased""" +233 64 dataset """kinships""" +233 64 model """ermlp""" +233 64 loss """crossentropy""" +233 64 regularizer """no""" +233 64 optimizer """adadelta""" +233 64 training_loop """lcwa""" +233 64 evaluator """rankbased""" +233 65 dataset """kinships""" +233 65 model """ermlp""" +233 65 loss """crossentropy""" +233 65 regularizer """no""" +233 65 optimizer """adadelta""" +233 65 training_loop """lcwa""" +233 65 evaluator """rankbased""" +233 66 dataset """kinships""" +233 66 model """ermlp""" +233 66 loss """crossentropy""" +233 66 regularizer """no""" +233 66 optimizer """adadelta""" +233 66 training_loop """lcwa""" +233 66 evaluator """rankbased""" +233 67 dataset """kinships""" +233 67 model """ermlp""" +233 67 loss """crossentropy""" +233 67 regularizer """no""" +233 67 optimizer """adadelta""" +233 67 training_loop """lcwa""" +233 67 evaluator """rankbased""" +233 68 dataset """kinships""" +233 68 model """ermlp""" +233 68 loss """crossentropy""" +233 68 regularizer """no""" +233 68 optimizer """adadelta""" +233 68 training_loop """lcwa""" +233 68 evaluator """rankbased""" +233 69 dataset """kinships""" +233 69 model """ermlp""" +233 69 loss """crossentropy""" +233 69 regularizer """no""" +233 69 optimizer """adadelta""" +233 69 training_loop """lcwa""" +233 69 evaluator """rankbased""" +233 70 dataset """kinships""" +233 70 model """ermlp""" +233 70 loss """crossentropy""" +233 70 regularizer """no""" +233 70 optimizer """adadelta""" +233 70 training_loop """lcwa""" +233 70 evaluator """rankbased""" +233 71 dataset """kinships""" +233 71 model """ermlp""" +233 71 loss """crossentropy""" +233 71 regularizer """no""" +233 71 optimizer """adadelta""" +233 71 training_loop """lcwa""" +233 71 evaluator """rankbased""" +233 72 dataset """kinships""" +233 72 model """ermlp""" +233 72 loss """crossentropy""" +233 72 regularizer """no""" +233 72 optimizer """adadelta""" +233 72 training_loop """lcwa""" +233 72 evaluator """rankbased""" +233 73 dataset """kinships""" +233 73 model """ermlp""" +233 73 loss """crossentropy""" +233 73 regularizer """no""" +233 73 optimizer """adadelta""" +233 73 training_loop """lcwa""" +233 73 evaluator """rankbased""" +233 74 dataset """kinships""" +233 74 model """ermlp""" +233 74 loss """crossentropy""" +233 74 regularizer """no""" +233 74 optimizer """adadelta""" +233 74 training_loop """lcwa""" +233 74 evaluator """rankbased""" +233 75 dataset """kinships""" +233 75 model """ermlp""" +233 75 loss """crossentropy""" +233 75 regularizer """no""" +233 75 optimizer """adadelta""" +233 75 training_loop """lcwa""" +233 75 evaluator """rankbased""" +233 76 dataset """kinships""" +233 76 model """ermlp""" +233 76 loss """crossentropy""" +233 76 regularizer """no""" +233 76 optimizer """adadelta""" +233 76 training_loop """lcwa""" +233 76 evaluator """rankbased""" +233 77 dataset """kinships""" +233 77 model """ermlp""" +233 77 loss """crossentropy""" +233 77 regularizer """no""" +233 77 optimizer """adadelta""" +233 77 training_loop """lcwa""" +233 77 evaluator """rankbased""" +233 78 dataset """kinships""" +233 78 model """ermlp""" +233 78 loss """crossentropy""" +233 78 regularizer """no""" +233 78 optimizer """adadelta""" +233 78 training_loop """lcwa""" +233 78 evaluator """rankbased""" +233 79 dataset """kinships""" +233 79 model """ermlp""" +233 79 loss """crossentropy""" +233 79 regularizer """no""" +233 79 optimizer """adadelta""" +233 79 training_loop """lcwa""" +233 79 evaluator """rankbased""" +233 80 dataset """kinships""" +233 80 model """ermlp""" +233 80 loss """crossentropy""" +233 80 regularizer """no""" +233 80 optimizer """adadelta""" +233 80 training_loop """lcwa""" +233 80 evaluator """rankbased""" +233 81 dataset """kinships""" +233 81 model """ermlp""" +233 81 loss """crossentropy""" +233 81 regularizer """no""" +233 81 optimizer """adadelta""" +233 81 training_loop """lcwa""" +233 81 evaluator """rankbased""" +233 82 dataset """kinships""" +233 82 model """ermlp""" +233 82 loss """crossentropy""" +233 82 regularizer """no""" +233 82 optimizer """adadelta""" +233 82 training_loop """lcwa""" +233 82 evaluator """rankbased""" +233 83 dataset """kinships""" +233 83 model """ermlp""" +233 83 loss """crossentropy""" +233 83 regularizer """no""" +233 83 optimizer """adadelta""" +233 83 training_loop """lcwa""" +233 83 evaluator """rankbased""" +233 84 dataset """kinships""" +233 84 model """ermlp""" +233 84 loss """crossentropy""" +233 84 regularizer """no""" +233 84 optimizer """adadelta""" +233 84 training_loop """lcwa""" +233 84 evaluator """rankbased""" +233 85 dataset """kinships""" +233 85 model """ermlp""" +233 85 loss """crossentropy""" +233 85 regularizer """no""" +233 85 optimizer """adadelta""" +233 85 training_loop """lcwa""" +233 85 evaluator """rankbased""" +233 86 dataset """kinships""" +233 86 model """ermlp""" +233 86 loss """crossentropy""" +233 86 regularizer """no""" +233 86 optimizer """adadelta""" +233 86 training_loop """lcwa""" +233 86 evaluator """rankbased""" +233 87 dataset """kinships""" +233 87 model """ermlp""" +233 87 loss """crossentropy""" +233 87 regularizer """no""" +233 87 optimizer """adadelta""" +233 87 training_loop """lcwa""" +233 87 evaluator """rankbased""" +233 88 dataset """kinships""" +233 88 model """ermlp""" +233 88 loss """crossentropy""" +233 88 regularizer """no""" +233 88 optimizer """adadelta""" +233 88 training_loop """lcwa""" +233 88 evaluator """rankbased""" +233 89 dataset """kinships""" +233 89 model """ermlp""" +233 89 loss """crossentropy""" +233 89 regularizer """no""" +233 89 optimizer """adadelta""" +233 89 training_loop """lcwa""" +233 89 evaluator """rankbased""" +233 90 dataset """kinships""" +233 90 model """ermlp""" +233 90 loss """crossentropy""" +233 90 regularizer """no""" +233 90 optimizer """adadelta""" +233 90 training_loop """lcwa""" +233 90 evaluator """rankbased""" +233 91 dataset """kinships""" +233 91 model """ermlp""" +233 91 loss """crossentropy""" +233 91 regularizer """no""" +233 91 optimizer """adadelta""" +233 91 training_loop """lcwa""" +233 91 evaluator """rankbased""" +233 92 dataset """kinships""" +233 92 model """ermlp""" +233 92 loss """crossentropy""" +233 92 regularizer """no""" +233 92 optimizer """adadelta""" +233 92 training_loop """lcwa""" +233 92 evaluator """rankbased""" +233 93 dataset """kinships""" +233 93 model """ermlp""" +233 93 loss """crossentropy""" +233 93 regularizer """no""" +233 93 optimizer """adadelta""" +233 93 training_loop """lcwa""" +233 93 evaluator """rankbased""" +233 94 dataset """kinships""" +233 94 model """ermlp""" +233 94 loss """crossentropy""" +233 94 regularizer """no""" +233 94 optimizer """adadelta""" +233 94 training_loop """lcwa""" +233 94 evaluator """rankbased""" +233 95 dataset """kinships""" +233 95 model """ermlp""" +233 95 loss """crossentropy""" +233 95 regularizer """no""" +233 95 optimizer """adadelta""" +233 95 training_loop """lcwa""" +233 95 evaluator """rankbased""" +233 96 dataset """kinships""" +233 96 model """ermlp""" +233 96 loss """crossentropy""" +233 96 regularizer """no""" +233 96 optimizer """adadelta""" +233 96 training_loop """lcwa""" +233 96 evaluator """rankbased""" +233 97 dataset """kinships""" +233 97 model """ermlp""" +233 97 loss """crossentropy""" +233 97 regularizer """no""" +233 97 optimizer """adadelta""" +233 97 training_loop """lcwa""" +233 97 evaluator """rankbased""" +233 98 dataset """kinships""" +233 98 model """ermlp""" +233 98 loss """crossentropy""" +233 98 regularizer """no""" +233 98 optimizer """adadelta""" +233 98 training_loop """lcwa""" +233 98 evaluator """rankbased""" +233 99 dataset """kinships""" +233 99 model """ermlp""" +233 99 loss """crossentropy""" +233 99 regularizer """no""" +233 99 optimizer """adadelta""" +233 99 training_loop """lcwa""" +233 99 evaluator """rankbased""" +233 100 dataset """kinships""" +233 100 model """ermlp""" +233 100 loss """crossentropy""" +233 100 regularizer """no""" +233 100 optimizer """adadelta""" +233 100 training_loop """lcwa""" +233 100 evaluator """rankbased""" +234 1 model.embedding_dim 0.0 +234 1 training.batch_size 1.0 +234 1 training.label_smoothing 0.002292529738674997 +234 2 model.embedding_dim 1.0 +234 2 training.batch_size 2.0 +234 2 training.label_smoothing 0.002610685679616404 +234 3 model.embedding_dim 1.0 +234 3 training.batch_size 2.0 +234 3 training.label_smoothing 0.0013695996713023555 +234 4 model.embedding_dim 2.0 +234 4 training.batch_size 0.0 +234 4 training.label_smoothing 0.619741055982704 +234 5 model.embedding_dim 2.0 +234 5 training.batch_size 0.0 +234 5 training.label_smoothing 0.1380391533977146 +234 6 model.embedding_dim 1.0 +234 6 training.batch_size 1.0 +234 6 training.label_smoothing 0.05619512508649927 +234 7 model.embedding_dim 0.0 +234 7 training.batch_size 2.0 +234 7 training.label_smoothing 0.10133721073234735 +234 8 model.embedding_dim 0.0 +234 8 training.batch_size 0.0 +234 8 training.label_smoothing 0.4538982734927364 +234 9 model.embedding_dim 2.0 +234 9 training.batch_size 0.0 +234 9 training.label_smoothing 0.0023310622934668347 +234 10 model.embedding_dim 0.0 +234 10 training.batch_size 0.0 +234 10 training.label_smoothing 0.5485403769405913 +234 11 model.embedding_dim 1.0 +234 11 training.batch_size 2.0 +234 11 training.label_smoothing 0.16638364954262402 +234 12 model.embedding_dim 2.0 +234 12 training.batch_size 1.0 +234 12 training.label_smoothing 0.0359821283757767 +234 13 model.embedding_dim 2.0 +234 13 training.batch_size 2.0 +234 13 training.label_smoothing 0.07797570988955645 +234 14 model.embedding_dim 2.0 +234 14 training.batch_size 0.0 +234 14 training.label_smoothing 0.0012096416788092582 +234 15 model.embedding_dim 0.0 +234 15 training.batch_size 2.0 +234 15 training.label_smoothing 0.07622968142606795 +234 16 model.embedding_dim 0.0 +234 16 training.batch_size 0.0 +234 16 training.label_smoothing 0.1134572343394543 +234 17 model.embedding_dim 0.0 +234 17 training.batch_size 2.0 +234 17 training.label_smoothing 0.017073528496632556 +234 18 model.embedding_dim 0.0 +234 18 training.batch_size 1.0 +234 18 training.label_smoothing 0.006708876058530669 +234 19 model.embedding_dim 0.0 +234 19 training.batch_size 0.0 +234 19 training.label_smoothing 0.057119347291914126 +234 20 model.embedding_dim 1.0 +234 20 training.batch_size 2.0 +234 20 training.label_smoothing 0.39833171851037574 +234 21 model.embedding_dim 2.0 +234 21 training.batch_size 1.0 +234 21 training.label_smoothing 0.06677234971381489 +234 22 model.embedding_dim 0.0 +234 22 training.batch_size 2.0 +234 22 training.label_smoothing 0.007311378439270306 +234 23 model.embedding_dim 2.0 +234 23 training.batch_size 0.0 +234 23 training.label_smoothing 0.003495443514989716 +234 24 model.embedding_dim 1.0 +234 24 training.batch_size 0.0 +234 24 training.label_smoothing 0.026437261369870648 +234 25 model.embedding_dim 0.0 +234 25 training.batch_size 0.0 +234 25 training.label_smoothing 0.07397646992290591 +234 26 model.embedding_dim 1.0 +234 26 training.batch_size 0.0 +234 26 training.label_smoothing 0.007594414441253147 +234 27 model.embedding_dim 1.0 +234 27 training.batch_size 2.0 +234 27 training.label_smoothing 0.0017306729296904457 +234 28 model.embedding_dim 1.0 +234 28 training.batch_size 0.0 +234 28 training.label_smoothing 0.021307496711835378 +234 29 model.embedding_dim 1.0 +234 29 training.batch_size 2.0 +234 29 training.label_smoothing 0.021931818724039293 +234 30 model.embedding_dim 0.0 +234 30 training.batch_size 2.0 +234 30 training.label_smoothing 0.9034301585301725 +234 31 model.embedding_dim 1.0 +234 31 training.batch_size 2.0 +234 31 training.label_smoothing 0.27943162316414905 +234 32 model.embedding_dim 0.0 +234 32 training.batch_size 1.0 +234 32 training.label_smoothing 0.0013780752017406033 +234 33 model.embedding_dim 0.0 +234 33 training.batch_size 1.0 +234 33 training.label_smoothing 0.013689968889569611 +234 34 model.embedding_dim 1.0 +234 34 training.batch_size 2.0 +234 34 training.label_smoothing 0.12690698375864198 +234 35 model.embedding_dim 0.0 +234 35 training.batch_size 2.0 +234 35 training.label_smoothing 0.3007332854034082 +234 36 model.embedding_dim 1.0 +234 36 training.batch_size 0.0 +234 36 training.label_smoothing 0.008671229615294455 +234 37 model.embedding_dim 0.0 +234 37 training.batch_size 0.0 +234 37 training.label_smoothing 0.3826139652900121 +234 38 model.embedding_dim 1.0 +234 38 training.batch_size 2.0 +234 38 training.label_smoothing 0.0013214024362222835 +234 39 model.embedding_dim 1.0 +234 39 training.batch_size 0.0 +234 39 training.label_smoothing 0.001181407363340176 +234 40 model.embedding_dim 1.0 +234 40 training.batch_size 2.0 +234 40 training.label_smoothing 0.08509031423452791 +234 41 model.embedding_dim 2.0 +234 41 training.batch_size 1.0 +234 41 training.label_smoothing 0.7478749075207335 +234 42 model.embedding_dim 2.0 +234 42 training.batch_size 0.0 +234 42 training.label_smoothing 0.5804587165533782 +234 43 model.embedding_dim 2.0 +234 43 training.batch_size 2.0 +234 43 training.label_smoothing 0.0014897051293449252 +234 44 model.embedding_dim 1.0 +234 44 training.batch_size 0.0 +234 44 training.label_smoothing 0.6238371644219731 +234 45 model.embedding_dim 1.0 +234 45 training.batch_size 0.0 +234 45 training.label_smoothing 0.17003775104920005 +234 46 model.embedding_dim 2.0 +234 46 training.batch_size 1.0 +234 46 training.label_smoothing 0.10247469184768603 +234 47 model.embedding_dim 2.0 +234 47 training.batch_size 1.0 +234 47 training.label_smoothing 0.0026879382836424576 +234 48 model.embedding_dim 2.0 +234 48 training.batch_size 0.0 +234 48 training.label_smoothing 0.0030535614631505355 +234 49 model.embedding_dim 1.0 +234 49 training.batch_size 2.0 +234 49 training.label_smoothing 0.03908439049534146 +234 50 model.embedding_dim 0.0 +234 50 training.batch_size 2.0 +234 50 training.label_smoothing 0.1711304456421107 +234 51 model.embedding_dim 1.0 +234 51 training.batch_size 1.0 +234 51 training.label_smoothing 0.009091686273062713 +234 52 model.embedding_dim 1.0 +234 52 training.batch_size 2.0 +234 52 training.label_smoothing 0.013008998037122157 +234 53 model.embedding_dim 2.0 +234 53 training.batch_size 1.0 +234 53 training.label_smoothing 0.38943822211387324 +234 54 model.embedding_dim 2.0 +234 54 training.batch_size 2.0 +234 54 training.label_smoothing 0.019514662715433496 +234 55 model.embedding_dim 2.0 +234 55 training.batch_size 0.0 +234 55 training.label_smoothing 0.0028853371603773524 +234 56 model.embedding_dim 0.0 +234 56 training.batch_size 1.0 +234 56 training.label_smoothing 0.03437269931628287 +234 57 model.embedding_dim 2.0 +234 57 training.batch_size 2.0 +234 57 training.label_smoothing 0.03616989668482157 +234 58 model.embedding_dim 2.0 +234 58 training.batch_size 0.0 +234 58 training.label_smoothing 0.003920133297649763 +234 59 model.embedding_dim 2.0 +234 59 training.batch_size 2.0 +234 59 training.label_smoothing 0.8070981280633106 +234 60 model.embedding_dim 1.0 +234 60 training.batch_size 1.0 +234 60 training.label_smoothing 0.017493082328925765 +234 61 model.embedding_dim 1.0 +234 61 training.batch_size 1.0 +234 61 training.label_smoothing 0.13310736242953392 +234 62 model.embedding_dim 0.0 +234 62 training.batch_size 1.0 +234 62 training.label_smoothing 0.014975969520956606 +234 63 model.embedding_dim 2.0 +234 63 training.batch_size 1.0 +234 63 training.label_smoothing 0.00703958696765015 +234 64 model.embedding_dim 0.0 +234 64 training.batch_size 2.0 +234 64 training.label_smoothing 0.005825077874196881 +234 65 model.embedding_dim 0.0 +234 65 training.batch_size 0.0 +234 65 training.label_smoothing 0.004416200078393179 +234 66 model.embedding_dim 2.0 +234 66 training.batch_size 0.0 +234 66 training.label_smoothing 0.00952405033653272 +234 67 model.embedding_dim 1.0 +234 67 training.batch_size 1.0 +234 67 training.label_smoothing 0.07749556298797251 +234 68 model.embedding_dim 2.0 +234 68 training.batch_size 1.0 +234 68 training.label_smoothing 0.011914222495439912 +234 69 model.embedding_dim 0.0 +234 69 training.batch_size 0.0 +234 69 training.label_smoothing 0.21399612665657355 +234 70 model.embedding_dim 2.0 +234 70 training.batch_size 0.0 +234 70 training.label_smoothing 0.026515689554397807 +234 71 model.embedding_dim 2.0 +234 71 training.batch_size 1.0 +234 71 training.label_smoothing 0.1581734551720133 +234 72 model.embedding_dim 2.0 +234 72 training.batch_size 0.0 +234 72 training.label_smoothing 0.010879238325473781 +234 73 model.embedding_dim 2.0 +234 73 training.batch_size 0.0 +234 73 training.label_smoothing 0.18738149853379696 +234 74 model.embedding_dim 1.0 +234 74 training.batch_size 1.0 +234 74 training.label_smoothing 0.35149719917669914 +234 75 model.embedding_dim 2.0 +234 75 training.batch_size 1.0 +234 75 training.label_smoothing 0.131305891801094 +234 76 model.embedding_dim 1.0 +234 76 training.batch_size 0.0 +234 76 training.label_smoothing 0.65412645309692 +234 77 model.embedding_dim 0.0 +234 77 training.batch_size 2.0 +234 77 training.label_smoothing 0.005146845385582568 +234 78 model.embedding_dim 1.0 +234 78 training.batch_size 2.0 +234 78 training.label_smoothing 0.0011938962346927427 +234 79 model.embedding_dim 0.0 +234 79 training.batch_size 2.0 +234 79 training.label_smoothing 0.5827397567839604 +234 80 model.embedding_dim 2.0 +234 80 training.batch_size 1.0 +234 80 training.label_smoothing 0.0027306996782908307 +234 81 model.embedding_dim 0.0 +234 81 training.batch_size 0.0 +234 81 training.label_smoothing 0.015695410458910583 +234 82 model.embedding_dim 0.0 +234 82 training.batch_size 0.0 +234 82 training.label_smoothing 0.00841481179849509 +234 83 model.embedding_dim 1.0 +234 83 training.batch_size 0.0 +234 83 training.label_smoothing 0.0040216595366573925 +234 84 model.embedding_dim 1.0 +234 84 training.batch_size 0.0 +234 84 training.label_smoothing 0.11490632294813577 +234 85 model.embedding_dim 2.0 +234 85 training.batch_size 2.0 +234 85 training.label_smoothing 0.001442435032178097 +234 86 model.embedding_dim 2.0 +234 86 training.batch_size 1.0 +234 86 training.label_smoothing 0.012001140125109378 +234 87 model.embedding_dim 0.0 +234 87 training.batch_size 1.0 +234 87 training.label_smoothing 0.3632138142922015 +234 88 model.embedding_dim 0.0 +234 88 training.batch_size 2.0 +234 88 training.label_smoothing 0.012196283532685039 +234 89 model.embedding_dim 0.0 +234 89 training.batch_size 2.0 +234 89 training.label_smoothing 0.021267468201296356 +234 90 model.embedding_dim 0.0 +234 90 training.batch_size 0.0 +234 90 training.label_smoothing 0.024878894222958168 +234 91 model.embedding_dim 2.0 +234 91 training.batch_size 0.0 +234 91 training.label_smoothing 0.07171862877233372 +234 92 model.embedding_dim 0.0 +234 92 training.batch_size 2.0 +234 92 training.label_smoothing 0.07040493946557115 +234 93 model.embedding_dim 0.0 +234 93 training.batch_size 2.0 +234 93 training.label_smoothing 0.0029954730993964054 +234 94 model.embedding_dim 2.0 +234 94 training.batch_size 1.0 +234 94 training.label_smoothing 0.48681266284755864 +234 95 model.embedding_dim 0.0 +234 95 training.batch_size 2.0 +234 95 training.label_smoothing 0.021427751437856503 +234 96 model.embedding_dim 0.0 +234 96 training.batch_size 2.0 +234 96 training.label_smoothing 0.4109049412686158 +234 97 model.embedding_dim 0.0 +234 97 training.batch_size 2.0 +234 97 training.label_smoothing 0.07502593537070684 +234 98 model.embedding_dim 1.0 +234 98 training.batch_size 1.0 +234 98 training.label_smoothing 0.984976275385672 +234 99 model.embedding_dim 1.0 +234 99 training.batch_size 2.0 +234 99 training.label_smoothing 0.010490270659975938 +234 100 model.embedding_dim 1.0 +234 100 training.batch_size 1.0 +234 100 training.label_smoothing 0.019187033657484552 +234 1 dataset """kinships""" +234 1 model """ermlp""" +234 1 loss """bceaftersigmoid""" +234 1 regularizer """no""" +234 1 optimizer """adadelta""" +234 1 training_loop """lcwa""" +234 1 evaluator """rankbased""" +234 2 dataset """kinships""" +234 2 model """ermlp""" +234 2 loss """bceaftersigmoid""" +234 2 regularizer """no""" +234 2 optimizer """adadelta""" +234 2 training_loop """lcwa""" +234 2 evaluator """rankbased""" +234 3 dataset """kinships""" +234 3 model """ermlp""" +234 3 loss """bceaftersigmoid""" +234 3 regularizer """no""" +234 3 optimizer """adadelta""" +234 3 training_loop """lcwa""" +234 3 evaluator """rankbased""" +234 4 dataset """kinships""" +234 4 model """ermlp""" +234 4 loss """bceaftersigmoid""" +234 4 regularizer """no""" +234 4 optimizer """adadelta""" +234 4 training_loop """lcwa""" +234 4 evaluator """rankbased""" +234 5 dataset """kinships""" +234 5 model """ermlp""" +234 5 loss """bceaftersigmoid""" +234 5 regularizer """no""" +234 5 optimizer """adadelta""" +234 5 training_loop """lcwa""" +234 5 evaluator """rankbased""" +234 6 dataset """kinships""" +234 6 model """ermlp""" +234 6 loss """bceaftersigmoid""" +234 6 regularizer """no""" +234 6 optimizer """adadelta""" +234 6 training_loop """lcwa""" +234 6 evaluator """rankbased""" +234 7 dataset """kinships""" +234 7 model """ermlp""" +234 7 loss """bceaftersigmoid""" +234 7 regularizer """no""" +234 7 optimizer """adadelta""" +234 7 training_loop """lcwa""" +234 7 evaluator """rankbased""" +234 8 dataset """kinships""" +234 8 model """ermlp""" +234 8 loss """bceaftersigmoid""" +234 8 regularizer """no""" +234 8 optimizer """adadelta""" +234 8 training_loop """lcwa""" +234 8 evaluator """rankbased""" +234 9 dataset """kinships""" +234 9 model """ermlp""" +234 9 loss """bceaftersigmoid""" +234 9 regularizer """no""" +234 9 optimizer """adadelta""" +234 9 training_loop """lcwa""" +234 9 evaluator """rankbased""" +234 10 dataset """kinships""" +234 10 model """ermlp""" +234 10 loss """bceaftersigmoid""" +234 10 regularizer """no""" +234 10 optimizer """adadelta""" +234 10 training_loop """lcwa""" +234 10 evaluator """rankbased""" +234 11 dataset """kinships""" +234 11 model """ermlp""" +234 11 loss """bceaftersigmoid""" +234 11 regularizer """no""" +234 11 optimizer """adadelta""" +234 11 training_loop """lcwa""" +234 11 evaluator """rankbased""" +234 12 dataset """kinships""" +234 12 model """ermlp""" +234 12 loss """bceaftersigmoid""" +234 12 regularizer """no""" +234 12 optimizer """adadelta""" +234 12 training_loop """lcwa""" +234 12 evaluator """rankbased""" +234 13 dataset """kinships""" +234 13 model """ermlp""" +234 13 loss """bceaftersigmoid""" +234 13 regularizer """no""" +234 13 optimizer """adadelta""" +234 13 training_loop """lcwa""" +234 13 evaluator """rankbased""" +234 14 dataset """kinships""" +234 14 model """ermlp""" +234 14 loss """bceaftersigmoid""" +234 14 regularizer """no""" +234 14 optimizer """adadelta""" +234 14 training_loop """lcwa""" +234 14 evaluator """rankbased""" +234 15 dataset """kinships""" +234 15 model """ermlp""" +234 15 loss """bceaftersigmoid""" +234 15 regularizer """no""" +234 15 optimizer """adadelta""" +234 15 training_loop """lcwa""" +234 15 evaluator """rankbased""" +234 16 dataset """kinships""" +234 16 model """ermlp""" +234 16 loss """bceaftersigmoid""" +234 16 regularizer """no""" +234 16 optimizer """adadelta""" +234 16 training_loop """lcwa""" +234 16 evaluator """rankbased""" +234 17 dataset """kinships""" +234 17 model """ermlp""" +234 17 loss """bceaftersigmoid""" +234 17 regularizer """no""" +234 17 optimizer """adadelta""" +234 17 training_loop """lcwa""" +234 17 evaluator """rankbased""" +234 18 dataset """kinships""" +234 18 model """ermlp""" +234 18 loss """bceaftersigmoid""" +234 18 regularizer """no""" +234 18 optimizer """adadelta""" +234 18 training_loop """lcwa""" +234 18 evaluator """rankbased""" +234 19 dataset """kinships""" +234 19 model """ermlp""" +234 19 loss """bceaftersigmoid""" +234 19 regularizer """no""" +234 19 optimizer """adadelta""" +234 19 training_loop """lcwa""" +234 19 evaluator """rankbased""" +234 20 dataset """kinships""" +234 20 model """ermlp""" +234 20 loss """bceaftersigmoid""" +234 20 regularizer """no""" +234 20 optimizer """adadelta""" +234 20 training_loop """lcwa""" +234 20 evaluator """rankbased""" +234 21 dataset """kinships""" +234 21 model """ermlp""" +234 21 loss """bceaftersigmoid""" +234 21 regularizer """no""" +234 21 optimizer """adadelta""" +234 21 training_loop """lcwa""" +234 21 evaluator """rankbased""" +234 22 dataset """kinships""" +234 22 model """ermlp""" +234 22 loss """bceaftersigmoid""" +234 22 regularizer """no""" +234 22 optimizer """adadelta""" +234 22 training_loop """lcwa""" +234 22 evaluator """rankbased""" +234 23 dataset """kinships""" +234 23 model """ermlp""" +234 23 loss """bceaftersigmoid""" +234 23 regularizer """no""" +234 23 optimizer """adadelta""" +234 23 training_loop """lcwa""" +234 23 evaluator """rankbased""" +234 24 dataset """kinships""" +234 24 model """ermlp""" +234 24 loss """bceaftersigmoid""" +234 24 regularizer """no""" +234 24 optimizer """adadelta""" +234 24 training_loop """lcwa""" +234 24 evaluator """rankbased""" +234 25 dataset """kinships""" +234 25 model """ermlp""" +234 25 loss """bceaftersigmoid""" +234 25 regularizer """no""" +234 25 optimizer """adadelta""" +234 25 training_loop """lcwa""" +234 25 evaluator """rankbased""" +234 26 dataset """kinships""" +234 26 model """ermlp""" +234 26 loss """bceaftersigmoid""" +234 26 regularizer """no""" +234 26 optimizer """adadelta""" +234 26 training_loop """lcwa""" +234 26 evaluator """rankbased""" +234 27 dataset """kinships""" +234 27 model """ermlp""" +234 27 loss """bceaftersigmoid""" +234 27 regularizer """no""" +234 27 optimizer """adadelta""" +234 27 training_loop """lcwa""" +234 27 evaluator """rankbased""" +234 28 dataset """kinships""" +234 28 model """ermlp""" +234 28 loss """bceaftersigmoid""" +234 28 regularizer """no""" +234 28 optimizer """adadelta""" +234 28 training_loop """lcwa""" +234 28 evaluator """rankbased""" +234 29 dataset """kinships""" +234 29 model """ermlp""" +234 29 loss """bceaftersigmoid""" +234 29 regularizer """no""" +234 29 optimizer """adadelta""" +234 29 training_loop """lcwa""" +234 29 evaluator """rankbased""" +234 30 dataset """kinships""" +234 30 model """ermlp""" +234 30 loss """bceaftersigmoid""" +234 30 regularizer """no""" +234 30 optimizer """adadelta""" +234 30 training_loop """lcwa""" +234 30 evaluator """rankbased""" +234 31 dataset """kinships""" +234 31 model """ermlp""" +234 31 loss """bceaftersigmoid""" +234 31 regularizer """no""" +234 31 optimizer """adadelta""" +234 31 training_loop """lcwa""" +234 31 evaluator """rankbased""" +234 32 dataset """kinships""" +234 32 model """ermlp""" +234 32 loss """bceaftersigmoid""" +234 32 regularizer """no""" +234 32 optimizer """adadelta""" +234 32 training_loop """lcwa""" +234 32 evaluator """rankbased""" +234 33 dataset """kinships""" +234 33 model """ermlp""" +234 33 loss """bceaftersigmoid""" +234 33 regularizer """no""" +234 33 optimizer """adadelta""" +234 33 training_loop """lcwa""" +234 33 evaluator """rankbased""" +234 34 dataset """kinships""" +234 34 model """ermlp""" +234 34 loss """bceaftersigmoid""" +234 34 regularizer """no""" +234 34 optimizer """adadelta""" +234 34 training_loop """lcwa""" +234 34 evaluator """rankbased""" +234 35 dataset """kinships""" +234 35 model """ermlp""" +234 35 loss """bceaftersigmoid""" +234 35 regularizer """no""" +234 35 optimizer """adadelta""" +234 35 training_loop """lcwa""" +234 35 evaluator """rankbased""" +234 36 dataset """kinships""" +234 36 model """ermlp""" +234 36 loss """bceaftersigmoid""" +234 36 regularizer """no""" +234 36 optimizer """adadelta""" +234 36 training_loop """lcwa""" +234 36 evaluator """rankbased""" +234 37 dataset """kinships""" +234 37 model """ermlp""" +234 37 loss """bceaftersigmoid""" +234 37 regularizer """no""" +234 37 optimizer """adadelta""" +234 37 training_loop """lcwa""" +234 37 evaluator """rankbased""" +234 38 dataset """kinships""" +234 38 model """ermlp""" +234 38 loss """bceaftersigmoid""" +234 38 regularizer """no""" +234 38 optimizer """adadelta""" +234 38 training_loop """lcwa""" +234 38 evaluator """rankbased""" +234 39 dataset """kinships""" +234 39 model """ermlp""" +234 39 loss """bceaftersigmoid""" +234 39 regularizer """no""" +234 39 optimizer """adadelta""" +234 39 training_loop """lcwa""" +234 39 evaluator """rankbased""" +234 40 dataset """kinships""" +234 40 model """ermlp""" +234 40 loss """bceaftersigmoid""" +234 40 regularizer """no""" +234 40 optimizer """adadelta""" +234 40 training_loop """lcwa""" +234 40 evaluator """rankbased""" +234 41 dataset """kinships""" +234 41 model """ermlp""" +234 41 loss """bceaftersigmoid""" +234 41 regularizer """no""" +234 41 optimizer """adadelta""" +234 41 training_loop """lcwa""" +234 41 evaluator """rankbased""" +234 42 dataset """kinships""" +234 42 model """ermlp""" +234 42 loss """bceaftersigmoid""" +234 42 regularizer """no""" +234 42 optimizer """adadelta""" +234 42 training_loop """lcwa""" +234 42 evaluator """rankbased""" +234 43 dataset """kinships""" +234 43 model """ermlp""" +234 43 loss """bceaftersigmoid""" +234 43 regularizer """no""" +234 43 optimizer """adadelta""" +234 43 training_loop """lcwa""" +234 43 evaluator """rankbased""" +234 44 dataset """kinships""" +234 44 model """ermlp""" +234 44 loss """bceaftersigmoid""" +234 44 regularizer """no""" +234 44 optimizer """adadelta""" +234 44 training_loop """lcwa""" +234 44 evaluator """rankbased""" +234 45 dataset """kinships""" +234 45 model """ermlp""" +234 45 loss """bceaftersigmoid""" +234 45 regularizer """no""" +234 45 optimizer """adadelta""" +234 45 training_loop """lcwa""" +234 45 evaluator """rankbased""" +234 46 dataset """kinships""" +234 46 model """ermlp""" +234 46 loss """bceaftersigmoid""" +234 46 regularizer """no""" +234 46 optimizer """adadelta""" +234 46 training_loop """lcwa""" +234 46 evaluator """rankbased""" +234 47 dataset """kinships""" +234 47 model """ermlp""" +234 47 loss """bceaftersigmoid""" +234 47 regularizer """no""" +234 47 optimizer """adadelta""" +234 47 training_loop """lcwa""" +234 47 evaluator """rankbased""" +234 48 dataset """kinships""" +234 48 model """ermlp""" +234 48 loss """bceaftersigmoid""" +234 48 regularizer """no""" +234 48 optimizer """adadelta""" +234 48 training_loop """lcwa""" +234 48 evaluator """rankbased""" +234 49 dataset """kinships""" +234 49 model """ermlp""" +234 49 loss """bceaftersigmoid""" +234 49 regularizer """no""" +234 49 optimizer """adadelta""" +234 49 training_loop """lcwa""" +234 49 evaluator """rankbased""" +234 50 dataset """kinships""" +234 50 model """ermlp""" +234 50 loss """bceaftersigmoid""" +234 50 regularizer """no""" +234 50 optimizer """adadelta""" +234 50 training_loop """lcwa""" +234 50 evaluator """rankbased""" +234 51 dataset """kinships""" +234 51 model """ermlp""" +234 51 loss """bceaftersigmoid""" +234 51 regularizer """no""" +234 51 optimizer """adadelta""" +234 51 training_loop """lcwa""" +234 51 evaluator """rankbased""" +234 52 dataset """kinships""" +234 52 model """ermlp""" +234 52 loss """bceaftersigmoid""" +234 52 regularizer """no""" +234 52 optimizer """adadelta""" +234 52 training_loop """lcwa""" +234 52 evaluator """rankbased""" +234 53 dataset """kinships""" +234 53 model """ermlp""" +234 53 loss """bceaftersigmoid""" +234 53 regularizer """no""" +234 53 optimizer """adadelta""" +234 53 training_loop """lcwa""" +234 53 evaluator """rankbased""" +234 54 dataset """kinships""" +234 54 model """ermlp""" +234 54 loss """bceaftersigmoid""" +234 54 regularizer """no""" +234 54 optimizer """adadelta""" +234 54 training_loop """lcwa""" +234 54 evaluator """rankbased""" +234 55 dataset """kinships""" +234 55 model """ermlp""" +234 55 loss """bceaftersigmoid""" +234 55 regularizer """no""" +234 55 optimizer """adadelta""" +234 55 training_loop """lcwa""" +234 55 evaluator """rankbased""" +234 56 dataset """kinships""" +234 56 model """ermlp""" +234 56 loss """bceaftersigmoid""" +234 56 regularizer """no""" +234 56 optimizer """adadelta""" +234 56 training_loop """lcwa""" +234 56 evaluator """rankbased""" +234 57 dataset """kinships""" +234 57 model """ermlp""" +234 57 loss """bceaftersigmoid""" +234 57 regularizer """no""" +234 57 optimizer """adadelta""" +234 57 training_loop """lcwa""" +234 57 evaluator """rankbased""" +234 58 dataset """kinships""" +234 58 model """ermlp""" +234 58 loss """bceaftersigmoid""" +234 58 regularizer """no""" +234 58 optimizer """adadelta""" +234 58 training_loop """lcwa""" +234 58 evaluator """rankbased""" +234 59 dataset """kinships""" +234 59 model """ermlp""" +234 59 loss """bceaftersigmoid""" +234 59 regularizer """no""" +234 59 optimizer """adadelta""" +234 59 training_loop """lcwa""" +234 59 evaluator """rankbased""" +234 60 dataset """kinships""" +234 60 model """ermlp""" +234 60 loss """bceaftersigmoid""" +234 60 regularizer """no""" +234 60 optimizer """adadelta""" +234 60 training_loop """lcwa""" +234 60 evaluator """rankbased""" +234 61 dataset """kinships""" +234 61 model """ermlp""" +234 61 loss """bceaftersigmoid""" +234 61 regularizer """no""" +234 61 optimizer """adadelta""" +234 61 training_loop """lcwa""" +234 61 evaluator """rankbased""" +234 62 dataset """kinships""" +234 62 model """ermlp""" +234 62 loss """bceaftersigmoid""" +234 62 regularizer """no""" +234 62 optimizer """adadelta""" +234 62 training_loop """lcwa""" +234 62 evaluator """rankbased""" +234 63 dataset """kinships""" +234 63 model """ermlp""" +234 63 loss """bceaftersigmoid""" +234 63 regularizer """no""" +234 63 optimizer """adadelta""" +234 63 training_loop """lcwa""" +234 63 evaluator """rankbased""" +234 64 dataset """kinships""" +234 64 model """ermlp""" +234 64 loss """bceaftersigmoid""" +234 64 regularizer """no""" +234 64 optimizer """adadelta""" +234 64 training_loop """lcwa""" +234 64 evaluator """rankbased""" +234 65 dataset """kinships""" +234 65 model """ermlp""" +234 65 loss """bceaftersigmoid""" +234 65 regularizer """no""" +234 65 optimizer """adadelta""" +234 65 training_loop """lcwa""" +234 65 evaluator """rankbased""" +234 66 dataset """kinships""" +234 66 model """ermlp""" +234 66 loss """bceaftersigmoid""" +234 66 regularizer """no""" +234 66 optimizer """adadelta""" +234 66 training_loop """lcwa""" +234 66 evaluator """rankbased""" +234 67 dataset """kinships""" +234 67 model """ermlp""" +234 67 loss """bceaftersigmoid""" +234 67 regularizer """no""" +234 67 optimizer """adadelta""" +234 67 training_loop """lcwa""" +234 67 evaluator """rankbased""" +234 68 dataset """kinships""" +234 68 model """ermlp""" +234 68 loss """bceaftersigmoid""" +234 68 regularizer """no""" +234 68 optimizer """adadelta""" +234 68 training_loop """lcwa""" +234 68 evaluator """rankbased""" +234 69 dataset """kinships""" +234 69 model """ermlp""" +234 69 loss """bceaftersigmoid""" +234 69 regularizer """no""" +234 69 optimizer """adadelta""" +234 69 training_loop """lcwa""" +234 69 evaluator """rankbased""" +234 70 dataset """kinships""" +234 70 model """ermlp""" +234 70 loss """bceaftersigmoid""" +234 70 regularizer """no""" +234 70 optimizer """adadelta""" +234 70 training_loop """lcwa""" +234 70 evaluator """rankbased""" +234 71 dataset """kinships""" +234 71 model """ermlp""" +234 71 loss """bceaftersigmoid""" +234 71 regularizer """no""" +234 71 optimizer """adadelta""" +234 71 training_loop """lcwa""" +234 71 evaluator """rankbased""" +234 72 dataset """kinships""" +234 72 model """ermlp""" +234 72 loss """bceaftersigmoid""" +234 72 regularizer """no""" +234 72 optimizer """adadelta""" +234 72 training_loop """lcwa""" +234 72 evaluator """rankbased""" +234 73 dataset """kinships""" +234 73 model """ermlp""" +234 73 loss """bceaftersigmoid""" +234 73 regularizer """no""" +234 73 optimizer """adadelta""" +234 73 training_loop """lcwa""" +234 73 evaluator """rankbased""" +234 74 dataset """kinships""" +234 74 model """ermlp""" +234 74 loss """bceaftersigmoid""" +234 74 regularizer """no""" +234 74 optimizer """adadelta""" +234 74 training_loop """lcwa""" +234 74 evaluator """rankbased""" +234 75 dataset """kinships""" +234 75 model """ermlp""" +234 75 loss """bceaftersigmoid""" +234 75 regularizer """no""" +234 75 optimizer """adadelta""" +234 75 training_loop """lcwa""" +234 75 evaluator """rankbased""" +234 76 dataset """kinships""" +234 76 model """ermlp""" +234 76 loss """bceaftersigmoid""" +234 76 regularizer """no""" +234 76 optimizer """adadelta""" +234 76 training_loop """lcwa""" +234 76 evaluator """rankbased""" +234 77 dataset """kinships""" +234 77 model """ermlp""" +234 77 loss """bceaftersigmoid""" +234 77 regularizer """no""" +234 77 optimizer """adadelta""" +234 77 training_loop """lcwa""" +234 77 evaluator """rankbased""" +234 78 dataset """kinships""" +234 78 model """ermlp""" +234 78 loss """bceaftersigmoid""" +234 78 regularizer """no""" +234 78 optimizer """adadelta""" +234 78 training_loop """lcwa""" +234 78 evaluator """rankbased""" +234 79 dataset """kinships""" +234 79 model """ermlp""" +234 79 loss """bceaftersigmoid""" +234 79 regularizer """no""" +234 79 optimizer """adadelta""" +234 79 training_loop """lcwa""" +234 79 evaluator """rankbased""" +234 80 dataset """kinships""" +234 80 model """ermlp""" +234 80 loss """bceaftersigmoid""" +234 80 regularizer """no""" +234 80 optimizer """adadelta""" +234 80 training_loop """lcwa""" +234 80 evaluator """rankbased""" +234 81 dataset """kinships""" +234 81 model """ermlp""" +234 81 loss """bceaftersigmoid""" +234 81 regularizer """no""" +234 81 optimizer """adadelta""" +234 81 training_loop """lcwa""" +234 81 evaluator """rankbased""" +234 82 dataset """kinships""" +234 82 model """ermlp""" +234 82 loss """bceaftersigmoid""" +234 82 regularizer """no""" +234 82 optimizer """adadelta""" +234 82 training_loop """lcwa""" +234 82 evaluator """rankbased""" +234 83 dataset """kinships""" +234 83 model """ermlp""" +234 83 loss """bceaftersigmoid""" +234 83 regularizer """no""" +234 83 optimizer """adadelta""" +234 83 training_loop """lcwa""" +234 83 evaluator """rankbased""" +234 84 dataset """kinships""" +234 84 model """ermlp""" +234 84 loss """bceaftersigmoid""" +234 84 regularizer """no""" +234 84 optimizer """adadelta""" +234 84 training_loop """lcwa""" +234 84 evaluator """rankbased""" +234 85 dataset """kinships""" +234 85 model """ermlp""" +234 85 loss """bceaftersigmoid""" +234 85 regularizer """no""" +234 85 optimizer """adadelta""" +234 85 training_loop """lcwa""" +234 85 evaluator """rankbased""" +234 86 dataset """kinships""" +234 86 model """ermlp""" +234 86 loss """bceaftersigmoid""" +234 86 regularizer """no""" +234 86 optimizer """adadelta""" +234 86 training_loop """lcwa""" +234 86 evaluator """rankbased""" +234 87 dataset """kinships""" +234 87 model """ermlp""" +234 87 loss """bceaftersigmoid""" +234 87 regularizer """no""" +234 87 optimizer """adadelta""" +234 87 training_loop """lcwa""" +234 87 evaluator """rankbased""" +234 88 dataset """kinships""" +234 88 model """ermlp""" +234 88 loss """bceaftersigmoid""" +234 88 regularizer """no""" +234 88 optimizer """adadelta""" +234 88 training_loop """lcwa""" +234 88 evaluator """rankbased""" +234 89 dataset """kinships""" +234 89 model """ermlp""" +234 89 loss """bceaftersigmoid""" +234 89 regularizer """no""" +234 89 optimizer """adadelta""" +234 89 training_loop """lcwa""" +234 89 evaluator """rankbased""" +234 90 dataset """kinships""" +234 90 model """ermlp""" +234 90 loss """bceaftersigmoid""" +234 90 regularizer """no""" +234 90 optimizer """adadelta""" +234 90 training_loop """lcwa""" +234 90 evaluator """rankbased""" +234 91 dataset """kinships""" +234 91 model """ermlp""" +234 91 loss """bceaftersigmoid""" +234 91 regularizer """no""" +234 91 optimizer """adadelta""" +234 91 training_loop """lcwa""" +234 91 evaluator """rankbased""" +234 92 dataset """kinships""" +234 92 model """ermlp""" +234 92 loss """bceaftersigmoid""" +234 92 regularizer """no""" +234 92 optimizer """adadelta""" +234 92 training_loop """lcwa""" +234 92 evaluator """rankbased""" +234 93 dataset """kinships""" +234 93 model """ermlp""" +234 93 loss """bceaftersigmoid""" +234 93 regularizer """no""" +234 93 optimizer """adadelta""" +234 93 training_loop """lcwa""" +234 93 evaluator """rankbased""" +234 94 dataset """kinships""" +234 94 model """ermlp""" +234 94 loss """bceaftersigmoid""" +234 94 regularizer """no""" +234 94 optimizer """adadelta""" +234 94 training_loop """lcwa""" +234 94 evaluator """rankbased""" +234 95 dataset """kinships""" +234 95 model """ermlp""" +234 95 loss """bceaftersigmoid""" +234 95 regularizer """no""" +234 95 optimizer """adadelta""" +234 95 training_loop """lcwa""" +234 95 evaluator """rankbased""" +234 96 dataset """kinships""" +234 96 model """ermlp""" +234 96 loss """bceaftersigmoid""" +234 96 regularizer """no""" +234 96 optimizer """adadelta""" +234 96 training_loop """lcwa""" +234 96 evaluator """rankbased""" +234 97 dataset """kinships""" +234 97 model """ermlp""" +234 97 loss """bceaftersigmoid""" +234 97 regularizer """no""" +234 97 optimizer """adadelta""" +234 97 training_loop """lcwa""" +234 97 evaluator """rankbased""" +234 98 dataset """kinships""" +234 98 model """ermlp""" +234 98 loss """bceaftersigmoid""" +234 98 regularizer """no""" +234 98 optimizer """adadelta""" +234 98 training_loop """lcwa""" +234 98 evaluator """rankbased""" +234 99 dataset """kinships""" +234 99 model """ermlp""" +234 99 loss """bceaftersigmoid""" +234 99 regularizer """no""" +234 99 optimizer """adadelta""" +234 99 training_loop """lcwa""" +234 99 evaluator """rankbased""" +234 100 dataset """kinships""" +234 100 model """ermlp""" +234 100 loss """bceaftersigmoid""" +234 100 regularizer """no""" +234 100 optimizer """adadelta""" +234 100 training_loop """lcwa""" +234 100 evaluator """rankbased""" +235 1 model.embedding_dim 1.0 +235 1 training.batch_size 0.0 +235 1 training.label_smoothing 0.009683324918166834 +235 2 model.embedding_dim 0.0 +235 2 training.batch_size 1.0 +235 2 training.label_smoothing 0.042262008878576666 +235 3 model.embedding_dim 0.0 +235 3 training.batch_size 1.0 +235 3 training.label_smoothing 0.002643880483413095 +235 4 model.embedding_dim 0.0 +235 4 training.batch_size 2.0 +235 4 training.label_smoothing 0.0031099397751840783 +235 5 model.embedding_dim 0.0 +235 5 training.batch_size 0.0 +235 5 training.label_smoothing 0.023920726653765684 +235 6 model.embedding_dim 2.0 +235 6 training.batch_size 2.0 +235 6 training.label_smoothing 0.00431883332787735 +235 7 model.embedding_dim 1.0 +235 7 training.batch_size 1.0 +235 7 training.label_smoothing 0.5067836489536985 +235 8 model.embedding_dim 2.0 +235 8 training.batch_size 2.0 +235 8 training.label_smoothing 0.7260350538448974 +235 9 model.embedding_dim 0.0 +235 9 training.batch_size 1.0 +235 9 training.label_smoothing 0.9654068920435149 +235 10 model.embedding_dim 0.0 +235 10 training.batch_size 1.0 +235 10 training.label_smoothing 0.1528404147945856 +235 11 model.embedding_dim 1.0 +235 11 training.batch_size 2.0 +235 11 training.label_smoothing 0.07474793263112996 +235 12 model.embedding_dim 1.0 +235 12 training.batch_size 1.0 +235 12 training.label_smoothing 0.036856637872480544 +235 13 model.embedding_dim 2.0 +235 13 training.batch_size 1.0 +235 13 training.label_smoothing 0.11875924398582262 +235 14 model.embedding_dim 1.0 +235 14 training.batch_size 1.0 +235 14 training.label_smoothing 0.009603504320457174 +235 15 model.embedding_dim 1.0 +235 15 training.batch_size 1.0 +235 15 training.label_smoothing 0.006435412917714952 +235 16 model.embedding_dim 1.0 +235 16 training.batch_size 1.0 +235 16 training.label_smoothing 0.0454266479057158 +235 17 model.embedding_dim 1.0 +235 17 training.batch_size 0.0 +235 17 training.label_smoothing 0.7262459663714591 +235 18 model.embedding_dim 0.0 +235 18 training.batch_size 0.0 +235 18 training.label_smoothing 0.0015849304125544626 +235 19 model.embedding_dim 1.0 +235 19 training.batch_size 1.0 +235 19 training.label_smoothing 0.17898927870015952 +235 20 model.embedding_dim 0.0 +235 20 training.batch_size 2.0 +235 20 training.label_smoothing 0.4808224505281923 +235 21 model.embedding_dim 1.0 +235 21 training.batch_size 2.0 +235 21 training.label_smoothing 0.7307720699837973 +235 22 model.embedding_dim 0.0 +235 22 training.batch_size 1.0 +235 22 training.label_smoothing 0.02527675576772593 +235 23 model.embedding_dim 1.0 +235 23 training.batch_size 2.0 +235 23 training.label_smoothing 0.01598490460863701 +235 24 model.embedding_dim 0.0 +235 24 training.batch_size 1.0 +235 24 training.label_smoothing 0.0016792784567152858 +235 25 model.embedding_dim 2.0 +235 25 training.batch_size 1.0 +235 25 training.label_smoothing 0.001944190621623962 +235 26 model.embedding_dim 1.0 +235 26 training.batch_size 2.0 +235 26 training.label_smoothing 0.03047858939767287 +235 27 model.embedding_dim 1.0 +235 27 training.batch_size 2.0 +235 27 training.label_smoothing 0.0741047154873543 +235 28 model.embedding_dim 0.0 +235 28 training.batch_size 0.0 +235 28 training.label_smoothing 0.001326900194438565 +235 29 model.embedding_dim 1.0 +235 29 training.batch_size 2.0 +235 29 training.label_smoothing 0.0017719096652388023 +235 30 model.embedding_dim 2.0 +235 30 training.batch_size 2.0 +235 30 training.label_smoothing 0.002038676021032479 +235 31 model.embedding_dim 0.0 +235 31 training.batch_size 0.0 +235 31 training.label_smoothing 0.2569166219815092 +235 32 model.embedding_dim 1.0 +235 32 training.batch_size 2.0 +235 32 training.label_smoothing 0.021747857838953718 +235 33 model.embedding_dim 1.0 +235 33 training.batch_size 0.0 +235 33 training.label_smoothing 0.5014605902443628 +235 34 model.embedding_dim 0.0 +235 34 training.batch_size 0.0 +235 34 training.label_smoothing 0.25936826278328273 +235 35 model.embedding_dim 0.0 +235 35 training.batch_size 2.0 +235 35 training.label_smoothing 0.017917619102455706 +235 36 model.embedding_dim 2.0 +235 36 training.batch_size 1.0 +235 36 training.label_smoothing 0.0020087638084890147 +235 37 model.embedding_dim 0.0 +235 37 training.batch_size 2.0 +235 37 training.label_smoothing 0.009036603183152298 +235 38 model.embedding_dim 0.0 +235 38 training.batch_size 1.0 +235 38 training.label_smoothing 0.20721358388926028 +235 39 model.embedding_dim 2.0 +235 39 training.batch_size 0.0 +235 39 training.label_smoothing 0.7282720851205854 +235 40 model.embedding_dim 1.0 +235 40 training.batch_size 2.0 +235 40 training.label_smoothing 0.0012700898107228036 +235 41 model.embedding_dim 1.0 +235 41 training.batch_size 2.0 +235 41 training.label_smoothing 0.02317044304087745 +235 42 model.embedding_dim 0.0 +235 42 training.batch_size 1.0 +235 42 training.label_smoothing 0.044839815813043256 +235 43 model.embedding_dim 1.0 +235 43 training.batch_size 1.0 +235 43 training.label_smoothing 0.097322910483445 +235 44 model.embedding_dim 1.0 +235 44 training.batch_size 2.0 +235 44 training.label_smoothing 0.018973173506094308 +235 45 model.embedding_dim 0.0 +235 45 training.batch_size 2.0 +235 45 training.label_smoothing 0.0010955605096348942 +235 46 model.embedding_dim 0.0 +235 46 training.batch_size 0.0 +235 46 training.label_smoothing 0.0019250982580229635 +235 47 model.embedding_dim 1.0 +235 47 training.batch_size 0.0 +235 47 training.label_smoothing 0.9289084949455512 +235 48 model.embedding_dim 0.0 +235 48 training.batch_size 1.0 +235 48 training.label_smoothing 0.5832477385324578 +235 49 model.embedding_dim 2.0 +235 49 training.batch_size 1.0 +235 49 training.label_smoothing 0.001032115096053631 +235 50 model.embedding_dim 2.0 +235 50 training.batch_size 0.0 +235 50 training.label_smoothing 0.007147293317714652 +235 51 model.embedding_dim 1.0 +235 51 training.batch_size 2.0 +235 51 training.label_smoothing 0.0018157608463337843 +235 52 model.embedding_dim 2.0 +235 52 training.batch_size 1.0 +235 52 training.label_smoothing 0.28444279345401585 +235 53 model.embedding_dim 1.0 +235 53 training.batch_size 1.0 +235 53 training.label_smoothing 0.04124094472380922 +235 54 model.embedding_dim 0.0 +235 54 training.batch_size 0.0 +235 54 training.label_smoothing 0.04547443634861129 +235 55 model.embedding_dim 0.0 +235 55 training.batch_size 2.0 +235 55 training.label_smoothing 0.005810006796750563 +235 56 model.embedding_dim 0.0 +235 56 training.batch_size 0.0 +235 56 training.label_smoothing 0.2985109039454081 +235 57 model.embedding_dim 0.0 +235 57 training.batch_size 0.0 +235 57 training.label_smoothing 0.07735207312922633 +235 58 model.embedding_dim 2.0 +235 58 training.batch_size 1.0 +235 58 training.label_smoothing 0.0036603638969081695 +235 59 model.embedding_dim 1.0 +235 59 training.batch_size 1.0 +235 59 training.label_smoothing 0.01696462709304752 +235 60 model.embedding_dim 2.0 +235 60 training.batch_size 1.0 +235 60 training.label_smoothing 0.01593924963410894 +235 61 model.embedding_dim 0.0 +235 61 training.batch_size 0.0 +235 61 training.label_smoothing 0.07957852382063192 +235 62 model.embedding_dim 2.0 +235 62 training.batch_size 2.0 +235 62 training.label_smoothing 0.1903690571391247 +235 63 model.embedding_dim 2.0 +235 63 training.batch_size 2.0 +235 63 training.label_smoothing 0.0011150722049405586 +235 64 model.embedding_dim 0.0 +235 64 training.batch_size 2.0 +235 64 training.label_smoothing 0.10301584753694172 +235 65 model.embedding_dim 1.0 +235 65 training.batch_size 1.0 +235 65 training.label_smoothing 0.0027013710248283297 +235 66 model.embedding_dim 2.0 +235 66 training.batch_size 0.0 +235 66 training.label_smoothing 0.0359741757337148 +235 67 model.embedding_dim 1.0 +235 67 training.batch_size 2.0 +235 67 training.label_smoothing 0.20208780635281673 +235 68 model.embedding_dim 0.0 +235 68 training.batch_size 2.0 +235 68 training.label_smoothing 0.21268475507439577 +235 69 model.embedding_dim 2.0 +235 69 training.batch_size 0.0 +235 69 training.label_smoothing 0.09413561186057674 +235 70 model.embedding_dim 0.0 +235 70 training.batch_size 2.0 +235 70 training.label_smoothing 0.007741397416166273 +235 71 model.embedding_dim 2.0 +235 71 training.batch_size 2.0 +235 71 training.label_smoothing 0.2174737490750708 +235 72 model.embedding_dim 1.0 +235 72 training.batch_size 0.0 +235 72 training.label_smoothing 0.11303046198339863 +235 73 model.embedding_dim 2.0 +235 73 training.batch_size 0.0 +235 73 training.label_smoothing 0.7328507984176337 +235 74 model.embedding_dim 0.0 +235 74 training.batch_size 1.0 +235 74 training.label_smoothing 0.055080698505824886 +235 75 model.embedding_dim 1.0 +235 75 training.batch_size 2.0 +235 75 training.label_smoothing 0.40688107986204725 +235 76 model.embedding_dim 0.0 +235 76 training.batch_size 1.0 +235 76 training.label_smoothing 0.17157900968165088 +235 77 model.embedding_dim 2.0 +235 77 training.batch_size 1.0 +235 77 training.label_smoothing 0.5393831427582862 +235 78 model.embedding_dim 1.0 +235 78 training.batch_size 0.0 +235 78 training.label_smoothing 0.2815800919600202 +235 79 model.embedding_dim 1.0 +235 79 training.batch_size 2.0 +235 79 training.label_smoothing 0.9429670773403278 +235 80 model.embedding_dim 1.0 +235 80 training.batch_size 2.0 +235 80 training.label_smoothing 0.05134950226994994 +235 81 model.embedding_dim 0.0 +235 81 training.batch_size 2.0 +235 81 training.label_smoothing 0.3884019042898917 +235 82 model.embedding_dim 2.0 +235 82 training.batch_size 2.0 +235 82 training.label_smoothing 0.008037578459303557 +235 83 model.embedding_dim 0.0 +235 83 training.batch_size 1.0 +235 83 training.label_smoothing 0.1889714996477125 +235 84 model.embedding_dim 2.0 +235 84 training.batch_size 2.0 +235 84 training.label_smoothing 0.021134313016829154 +235 85 model.embedding_dim 2.0 +235 85 training.batch_size 0.0 +235 85 training.label_smoothing 0.0015489007036231714 +235 86 model.embedding_dim 1.0 +235 86 training.batch_size 0.0 +235 86 training.label_smoothing 0.0034474502827583856 +235 87 model.embedding_dim 1.0 +235 87 training.batch_size 1.0 +235 87 training.label_smoothing 0.05830456614961259 +235 88 model.embedding_dim 1.0 +235 88 training.batch_size 0.0 +235 88 training.label_smoothing 0.00978300816737457 +235 89 model.embedding_dim 0.0 +235 89 training.batch_size 0.0 +235 89 training.label_smoothing 0.06484638710933954 +235 90 model.embedding_dim 1.0 +235 90 training.batch_size 0.0 +235 90 training.label_smoothing 0.6332998438689681 +235 91 model.embedding_dim 1.0 +235 91 training.batch_size 0.0 +235 91 training.label_smoothing 0.021914914566144163 +235 92 model.embedding_dim 0.0 +235 92 training.batch_size 1.0 +235 92 training.label_smoothing 0.3775341579318818 +235 93 model.embedding_dim 1.0 +235 93 training.batch_size 0.0 +235 93 training.label_smoothing 0.1087374300501345 +235 94 model.embedding_dim 0.0 +235 94 training.batch_size 1.0 +235 94 training.label_smoothing 0.0012359872749854236 +235 95 model.embedding_dim 1.0 +235 95 training.batch_size 2.0 +235 95 training.label_smoothing 0.0023953273934523007 +235 96 model.embedding_dim 2.0 +235 96 training.batch_size 0.0 +235 96 training.label_smoothing 0.013894437738827035 +235 97 model.embedding_dim 0.0 +235 97 training.batch_size 2.0 +235 97 training.label_smoothing 0.28264203917771324 +235 98 model.embedding_dim 2.0 +235 98 training.batch_size 0.0 +235 98 training.label_smoothing 0.008018310452496815 +235 99 model.embedding_dim 0.0 +235 99 training.batch_size 0.0 +235 99 training.label_smoothing 0.392072955790115 +235 100 model.embedding_dim 0.0 +235 100 training.batch_size 2.0 +235 100 training.label_smoothing 0.05591681450439205 +235 1 dataset """kinships""" +235 1 model """ermlp""" +235 1 loss """softplus""" +235 1 regularizer """no""" +235 1 optimizer """adadelta""" +235 1 training_loop """lcwa""" +235 1 evaluator """rankbased""" +235 2 dataset """kinships""" +235 2 model """ermlp""" +235 2 loss """softplus""" +235 2 regularizer """no""" +235 2 optimizer """adadelta""" +235 2 training_loop """lcwa""" +235 2 evaluator """rankbased""" +235 3 dataset """kinships""" +235 3 model """ermlp""" +235 3 loss """softplus""" +235 3 regularizer """no""" +235 3 optimizer """adadelta""" +235 3 training_loop """lcwa""" +235 3 evaluator """rankbased""" +235 4 dataset """kinships""" +235 4 model """ermlp""" +235 4 loss """softplus""" +235 4 regularizer """no""" +235 4 optimizer """adadelta""" +235 4 training_loop """lcwa""" +235 4 evaluator """rankbased""" +235 5 dataset """kinships""" +235 5 model """ermlp""" +235 5 loss """softplus""" +235 5 regularizer """no""" +235 5 optimizer """adadelta""" +235 5 training_loop """lcwa""" +235 5 evaluator """rankbased""" +235 6 dataset """kinships""" +235 6 model """ermlp""" +235 6 loss """softplus""" +235 6 regularizer """no""" +235 6 optimizer """adadelta""" +235 6 training_loop """lcwa""" +235 6 evaluator """rankbased""" +235 7 dataset """kinships""" +235 7 model """ermlp""" +235 7 loss """softplus""" +235 7 regularizer """no""" +235 7 optimizer """adadelta""" +235 7 training_loop """lcwa""" +235 7 evaluator """rankbased""" +235 8 dataset """kinships""" +235 8 model """ermlp""" +235 8 loss """softplus""" +235 8 regularizer """no""" +235 8 optimizer """adadelta""" +235 8 training_loop """lcwa""" +235 8 evaluator """rankbased""" +235 9 dataset """kinships""" +235 9 model """ermlp""" +235 9 loss """softplus""" +235 9 regularizer """no""" +235 9 optimizer """adadelta""" +235 9 training_loop """lcwa""" +235 9 evaluator """rankbased""" +235 10 dataset """kinships""" +235 10 model """ermlp""" +235 10 loss """softplus""" +235 10 regularizer """no""" +235 10 optimizer """adadelta""" +235 10 training_loop """lcwa""" +235 10 evaluator """rankbased""" +235 11 dataset """kinships""" +235 11 model """ermlp""" +235 11 loss """softplus""" +235 11 regularizer """no""" +235 11 optimizer """adadelta""" +235 11 training_loop """lcwa""" +235 11 evaluator """rankbased""" +235 12 dataset """kinships""" +235 12 model """ermlp""" +235 12 loss """softplus""" +235 12 regularizer """no""" +235 12 optimizer """adadelta""" +235 12 training_loop """lcwa""" +235 12 evaluator """rankbased""" +235 13 dataset """kinships""" +235 13 model """ermlp""" +235 13 loss """softplus""" +235 13 regularizer """no""" +235 13 optimizer """adadelta""" +235 13 training_loop """lcwa""" +235 13 evaluator """rankbased""" +235 14 dataset """kinships""" +235 14 model """ermlp""" +235 14 loss """softplus""" +235 14 regularizer """no""" +235 14 optimizer """adadelta""" +235 14 training_loop """lcwa""" +235 14 evaluator """rankbased""" +235 15 dataset """kinships""" +235 15 model """ermlp""" +235 15 loss """softplus""" +235 15 regularizer """no""" +235 15 optimizer """adadelta""" +235 15 training_loop """lcwa""" +235 15 evaluator """rankbased""" +235 16 dataset """kinships""" +235 16 model """ermlp""" +235 16 loss """softplus""" +235 16 regularizer """no""" +235 16 optimizer """adadelta""" +235 16 training_loop """lcwa""" +235 16 evaluator """rankbased""" +235 17 dataset """kinships""" +235 17 model """ermlp""" +235 17 loss """softplus""" +235 17 regularizer """no""" +235 17 optimizer """adadelta""" +235 17 training_loop """lcwa""" +235 17 evaluator """rankbased""" +235 18 dataset """kinships""" +235 18 model """ermlp""" +235 18 loss """softplus""" +235 18 regularizer """no""" +235 18 optimizer """adadelta""" +235 18 training_loop """lcwa""" +235 18 evaluator """rankbased""" +235 19 dataset """kinships""" +235 19 model """ermlp""" +235 19 loss """softplus""" +235 19 regularizer """no""" +235 19 optimizer """adadelta""" +235 19 training_loop """lcwa""" +235 19 evaluator """rankbased""" +235 20 dataset """kinships""" +235 20 model """ermlp""" +235 20 loss """softplus""" +235 20 regularizer """no""" +235 20 optimizer """adadelta""" +235 20 training_loop """lcwa""" +235 20 evaluator """rankbased""" +235 21 dataset """kinships""" +235 21 model """ermlp""" +235 21 loss """softplus""" +235 21 regularizer """no""" +235 21 optimizer """adadelta""" +235 21 training_loop """lcwa""" +235 21 evaluator """rankbased""" +235 22 dataset """kinships""" +235 22 model """ermlp""" +235 22 loss """softplus""" +235 22 regularizer """no""" +235 22 optimizer """adadelta""" +235 22 training_loop """lcwa""" +235 22 evaluator """rankbased""" +235 23 dataset """kinships""" +235 23 model """ermlp""" +235 23 loss """softplus""" +235 23 regularizer """no""" +235 23 optimizer """adadelta""" +235 23 training_loop """lcwa""" +235 23 evaluator """rankbased""" +235 24 dataset """kinships""" +235 24 model """ermlp""" +235 24 loss """softplus""" +235 24 regularizer """no""" +235 24 optimizer """adadelta""" +235 24 training_loop """lcwa""" +235 24 evaluator """rankbased""" +235 25 dataset """kinships""" +235 25 model """ermlp""" +235 25 loss """softplus""" +235 25 regularizer """no""" +235 25 optimizer """adadelta""" +235 25 training_loop """lcwa""" +235 25 evaluator """rankbased""" +235 26 dataset """kinships""" +235 26 model """ermlp""" +235 26 loss """softplus""" +235 26 regularizer """no""" +235 26 optimizer """adadelta""" +235 26 training_loop """lcwa""" +235 26 evaluator """rankbased""" +235 27 dataset """kinships""" +235 27 model """ermlp""" +235 27 loss """softplus""" +235 27 regularizer """no""" +235 27 optimizer """adadelta""" +235 27 training_loop """lcwa""" +235 27 evaluator """rankbased""" +235 28 dataset """kinships""" +235 28 model """ermlp""" +235 28 loss """softplus""" +235 28 regularizer """no""" +235 28 optimizer """adadelta""" +235 28 training_loop """lcwa""" +235 28 evaluator """rankbased""" +235 29 dataset """kinships""" +235 29 model """ermlp""" +235 29 loss """softplus""" +235 29 regularizer """no""" +235 29 optimizer """adadelta""" +235 29 training_loop """lcwa""" +235 29 evaluator """rankbased""" +235 30 dataset """kinships""" +235 30 model """ermlp""" +235 30 loss """softplus""" +235 30 regularizer """no""" +235 30 optimizer """adadelta""" +235 30 training_loop """lcwa""" +235 30 evaluator """rankbased""" +235 31 dataset """kinships""" +235 31 model """ermlp""" +235 31 loss """softplus""" +235 31 regularizer """no""" +235 31 optimizer """adadelta""" +235 31 training_loop """lcwa""" +235 31 evaluator """rankbased""" +235 32 dataset """kinships""" +235 32 model """ermlp""" +235 32 loss """softplus""" +235 32 regularizer """no""" +235 32 optimizer """adadelta""" +235 32 training_loop """lcwa""" +235 32 evaluator """rankbased""" +235 33 dataset """kinships""" +235 33 model """ermlp""" +235 33 loss """softplus""" +235 33 regularizer """no""" +235 33 optimizer """adadelta""" +235 33 training_loop """lcwa""" +235 33 evaluator """rankbased""" +235 34 dataset """kinships""" +235 34 model """ermlp""" +235 34 loss """softplus""" +235 34 regularizer """no""" +235 34 optimizer """adadelta""" +235 34 training_loop """lcwa""" +235 34 evaluator """rankbased""" +235 35 dataset """kinships""" +235 35 model """ermlp""" +235 35 loss """softplus""" +235 35 regularizer """no""" +235 35 optimizer """adadelta""" +235 35 training_loop """lcwa""" +235 35 evaluator """rankbased""" +235 36 dataset """kinships""" +235 36 model """ermlp""" +235 36 loss """softplus""" +235 36 regularizer """no""" +235 36 optimizer """adadelta""" +235 36 training_loop """lcwa""" +235 36 evaluator """rankbased""" +235 37 dataset """kinships""" +235 37 model """ermlp""" +235 37 loss """softplus""" +235 37 regularizer """no""" +235 37 optimizer """adadelta""" +235 37 training_loop """lcwa""" +235 37 evaluator """rankbased""" +235 38 dataset """kinships""" +235 38 model """ermlp""" +235 38 loss """softplus""" +235 38 regularizer """no""" +235 38 optimizer """adadelta""" +235 38 training_loop """lcwa""" +235 38 evaluator """rankbased""" +235 39 dataset """kinships""" +235 39 model """ermlp""" +235 39 loss """softplus""" +235 39 regularizer """no""" +235 39 optimizer """adadelta""" +235 39 training_loop """lcwa""" +235 39 evaluator """rankbased""" +235 40 dataset """kinships""" +235 40 model """ermlp""" +235 40 loss """softplus""" +235 40 regularizer """no""" +235 40 optimizer """adadelta""" +235 40 training_loop """lcwa""" +235 40 evaluator """rankbased""" +235 41 dataset """kinships""" +235 41 model """ermlp""" +235 41 loss """softplus""" +235 41 regularizer """no""" +235 41 optimizer """adadelta""" +235 41 training_loop """lcwa""" +235 41 evaluator """rankbased""" +235 42 dataset """kinships""" +235 42 model """ermlp""" +235 42 loss """softplus""" +235 42 regularizer """no""" +235 42 optimizer """adadelta""" +235 42 training_loop """lcwa""" +235 42 evaluator """rankbased""" +235 43 dataset """kinships""" +235 43 model """ermlp""" +235 43 loss """softplus""" +235 43 regularizer """no""" +235 43 optimizer """adadelta""" +235 43 training_loop """lcwa""" +235 43 evaluator """rankbased""" +235 44 dataset """kinships""" +235 44 model """ermlp""" +235 44 loss """softplus""" +235 44 regularizer """no""" +235 44 optimizer """adadelta""" +235 44 training_loop """lcwa""" +235 44 evaluator """rankbased""" +235 45 dataset """kinships""" +235 45 model """ermlp""" +235 45 loss """softplus""" +235 45 regularizer """no""" +235 45 optimizer """adadelta""" +235 45 training_loop """lcwa""" +235 45 evaluator """rankbased""" +235 46 dataset """kinships""" +235 46 model """ermlp""" +235 46 loss """softplus""" +235 46 regularizer """no""" +235 46 optimizer """adadelta""" +235 46 training_loop """lcwa""" +235 46 evaluator """rankbased""" +235 47 dataset """kinships""" +235 47 model """ermlp""" +235 47 loss """softplus""" +235 47 regularizer """no""" +235 47 optimizer """adadelta""" +235 47 training_loop """lcwa""" +235 47 evaluator """rankbased""" +235 48 dataset """kinships""" +235 48 model """ermlp""" +235 48 loss """softplus""" +235 48 regularizer """no""" +235 48 optimizer """adadelta""" +235 48 training_loop """lcwa""" +235 48 evaluator """rankbased""" +235 49 dataset """kinships""" +235 49 model """ermlp""" +235 49 loss """softplus""" +235 49 regularizer """no""" +235 49 optimizer """adadelta""" +235 49 training_loop """lcwa""" +235 49 evaluator """rankbased""" +235 50 dataset """kinships""" +235 50 model """ermlp""" +235 50 loss """softplus""" +235 50 regularizer """no""" +235 50 optimizer """adadelta""" +235 50 training_loop """lcwa""" +235 50 evaluator """rankbased""" +235 51 dataset """kinships""" +235 51 model """ermlp""" +235 51 loss """softplus""" +235 51 regularizer """no""" +235 51 optimizer """adadelta""" +235 51 training_loop """lcwa""" +235 51 evaluator """rankbased""" +235 52 dataset """kinships""" +235 52 model """ermlp""" +235 52 loss """softplus""" +235 52 regularizer """no""" +235 52 optimizer """adadelta""" +235 52 training_loop """lcwa""" +235 52 evaluator """rankbased""" +235 53 dataset """kinships""" +235 53 model """ermlp""" +235 53 loss """softplus""" +235 53 regularizer """no""" +235 53 optimizer """adadelta""" +235 53 training_loop """lcwa""" +235 53 evaluator """rankbased""" +235 54 dataset """kinships""" +235 54 model """ermlp""" +235 54 loss """softplus""" +235 54 regularizer """no""" +235 54 optimizer """adadelta""" +235 54 training_loop """lcwa""" +235 54 evaluator """rankbased""" +235 55 dataset """kinships""" +235 55 model """ermlp""" +235 55 loss """softplus""" +235 55 regularizer """no""" +235 55 optimizer """adadelta""" +235 55 training_loop """lcwa""" +235 55 evaluator """rankbased""" +235 56 dataset """kinships""" +235 56 model """ermlp""" +235 56 loss """softplus""" +235 56 regularizer """no""" +235 56 optimizer """adadelta""" +235 56 training_loop """lcwa""" +235 56 evaluator """rankbased""" +235 57 dataset """kinships""" +235 57 model """ermlp""" +235 57 loss """softplus""" +235 57 regularizer """no""" +235 57 optimizer """adadelta""" +235 57 training_loop """lcwa""" +235 57 evaluator """rankbased""" +235 58 dataset """kinships""" +235 58 model """ermlp""" +235 58 loss """softplus""" +235 58 regularizer """no""" +235 58 optimizer """adadelta""" +235 58 training_loop """lcwa""" +235 58 evaluator """rankbased""" +235 59 dataset """kinships""" +235 59 model """ermlp""" +235 59 loss """softplus""" +235 59 regularizer """no""" +235 59 optimizer """adadelta""" +235 59 training_loop """lcwa""" +235 59 evaluator """rankbased""" +235 60 dataset """kinships""" +235 60 model """ermlp""" +235 60 loss """softplus""" +235 60 regularizer """no""" +235 60 optimizer """adadelta""" +235 60 training_loop """lcwa""" +235 60 evaluator """rankbased""" +235 61 dataset """kinships""" +235 61 model """ermlp""" +235 61 loss """softplus""" +235 61 regularizer """no""" +235 61 optimizer """adadelta""" +235 61 training_loop """lcwa""" +235 61 evaluator """rankbased""" +235 62 dataset """kinships""" +235 62 model """ermlp""" +235 62 loss """softplus""" +235 62 regularizer """no""" +235 62 optimizer """adadelta""" +235 62 training_loop """lcwa""" +235 62 evaluator """rankbased""" +235 63 dataset """kinships""" +235 63 model """ermlp""" +235 63 loss """softplus""" +235 63 regularizer """no""" +235 63 optimizer """adadelta""" +235 63 training_loop """lcwa""" +235 63 evaluator """rankbased""" +235 64 dataset """kinships""" +235 64 model """ermlp""" +235 64 loss """softplus""" +235 64 regularizer """no""" +235 64 optimizer """adadelta""" +235 64 training_loop """lcwa""" +235 64 evaluator """rankbased""" +235 65 dataset """kinships""" +235 65 model """ermlp""" +235 65 loss """softplus""" +235 65 regularizer """no""" +235 65 optimizer """adadelta""" +235 65 training_loop """lcwa""" +235 65 evaluator """rankbased""" +235 66 dataset """kinships""" +235 66 model """ermlp""" +235 66 loss """softplus""" +235 66 regularizer """no""" +235 66 optimizer """adadelta""" +235 66 training_loop """lcwa""" +235 66 evaluator """rankbased""" +235 67 dataset """kinships""" +235 67 model """ermlp""" +235 67 loss """softplus""" +235 67 regularizer """no""" +235 67 optimizer """adadelta""" +235 67 training_loop """lcwa""" +235 67 evaluator """rankbased""" +235 68 dataset """kinships""" +235 68 model """ermlp""" +235 68 loss """softplus""" +235 68 regularizer """no""" +235 68 optimizer """adadelta""" +235 68 training_loop """lcwa""" +235 68 evaluator """rankbased""" +235 69 dataset """kinships""" +235 69 model """ermlp""" +235 69 loss """softplus""" +235 69 regularizer """no""" +235 69 optimizer """adadelta""" +235 69 training_loop """lcwa""" +235 69 evaluator """rankbased""" +235 70 dataset """kinships""" +235 70 model """ermlp""" +235 70 loss """softplus""" +235 70 regularizer """no""" +235 70 optimizer """adadelta""" +235 70 training_loop """lcwa""" +235 70 evaluator """rankbased""" +235 71 dataset """kinships""" +235 71 model """ermlp""" +235 71 loss """softplus""" +235 71 regularizer """no""" +235 71 optimizer """adadelta""" +235 71 training_loop """lcwa""" +235 71 evaluator """rankbased""" +235 72 dataset """kinships""" +235 72 model """ermlp""" +235 72 loss """softplus""" +235 72 regularizer """no""" +235 72 optimizer """adadelta""" +235 72 training_loop """lcwa""" +235 72 evaluator """rankbased""" +235 73 dataset """kinships""" +235 73 model """ermlp""" +235 73 loss """softplus""" +235 73 regularizer """no""" +235 73 optimizer """adadelta""" +235 73 training_loop """lcwa""" +235 73 evaluator """rankbased""" +235 74 dataset """kinships""" +235 74 model """ermlp""" +235 74 loss """softplus""" +235 74 regularizer """no""" +235 74 optimizer """adadelta""" +235 74 training_loop """lcwa""" +235 74 evaluator """rankbased""" +235 75 dataset """kinships""" +235 75 model """ermlp""" +235 75 loss """softplus""" +235 75 regularizer """no""" +235 75 optimizer """adadelta""" +235 75 training_loop """lcwa""" +235 75 evaluator """rankbased""" +235 76 dataset """kinships""" +235 76 model """ermlp""" +235 76 loss """softplus""" +235 76 regularizer """no""" +235 76 optimizer """adadelta""" +235 76 training_loop """lcwa""" +235 76 evaluator """rankbased""" +235 77 dataset """kinships""" +235 77 model """ermlp""" +235 77 loss """softplus""" +235 77 regularizer """no""" +235 77 optimizer """adadelta""" +235 77 training_loop """lcwa""" +235 77 evaluator """rankbased""" +235 78 dataset """kinships""" +235 78 model """ermlp""" +235 78 loss """softplus""" +235 78 regularizer """no""" +235 78 optimizer """adadelta""" +235 78 training_loop """lcwa""" +235 78 evaluator """rankbased""" +235 79 dataset """kinships""" +235 79 model """ermlp""" +235 79 loss """softplus""" +235 79 regularizer """no""" +235 79 optimizer """adadelta""" +235 79 training_loop """lcwa""" +235 79 evaluator """rankbased""" +235 80 dataset """kinships""" +235 80 model """ermlp""" +235 80 loss """softplus""" +235 80 regularizer """no""" +235 80 optimizer """adadelta""" +235 80 training_loop """lcwa""" +235 80 evaluator """rankbased""" +235 81 dataset """kinships""" +235 81 model """ermlp""" +235 81 loss """softplus""" +235 81 regularizer """no""" +235 81 optimizer """adadelta""" +235 81 training_loop """lcwa""" +235 81 evaluator """rankbased""" +235 82 dataset """kinships""" +235 82 model """ermlp""" +235 82 loss """softplus""" +235 82 regularizer """no""" +235 82 optimizer """adadelta""" +235 82 training_loop """lcwa""" +235 82 evaluator """rankbased""" +235 83 dataset """kinships""" +235 83 model """ermlp""" +235 83 loss """softplus""" +235 83 regularizer """no""" +235 83 optimizer """adadelta""" +235 83 training_loop """lcwa""" +235 83 evaluator """rankbased""" +235 84 dataset """kinships""" +235 84 model """ermlp""" +235 84 loss """softplus""" +235 84 regularizer """no""" +235 84 optimizer """adadelta""" +235 84 training_loop """lcwa""" +235 84 evaluator """rankbased""" +235 85 dataset """kinships""" +235 85 model """ermlp""" +235 85 loss """softplus""" +235 85 regularizer """no""" +235 85 optimizer """adadelta""" +235 85 training_loop """lcwa""" +235 85 evaluator """rankbased""" +235 86 dataset """kinships""" +235 86 model """ermlp""" +235 86 loss """softplus""" +235 86 regularizer """no""" +235 86 optimizer """adadelta""" +235 86 training_loop """lcwa""" +235 86 evaluator """rankbased""" +235 87 dataset """kinships""" +235 87 model """ermlp""" +235 87 loss """softplus""" +235 87 regularizer """no""" +235 87 optimizer """adadelta""" +235 87 training_loop """lcwa""" +235 87 evaluator """rankbased""" +235 88 dataset """kinships""" +235 88 model """ermlp""" +235 88 loss """softplus""" +235 88 regularizer """no""" +235 88 optimizer """adadelta""" +235 88 training_loop """lcwa""" +235 88 evaluator """rankbased""" +235 89 dataset """kinships""" +235 89 model """ermlp""" +235 89 loss """softplus""" +235 89 regularizer """no""" +235 89 optimizer """adadelta""" +235 89 training_loop """lcwa""" +235 89 evaluator """rankbased""" +235 90 dataset """kinships""" +235 90 model """ermlp""" +235 90 loss """softplus""" +235 90 regularizer """no""" +235 90 optimizer """adadelta""" +235 90 training_loop """lcwa""" +235 90 evaluator """rankbased""" +235 91 dataset """kinships""" +235 91 model """ermlp""" +235 91 loss """softplus""" +235 91 regularizer """no""" +235 91 optimizer """adadelta""" +235 91 training_loop """lcwa""" +235 91 evaluator """rankbased""" +235 92 dataset """kinships""" +235 92 model """ermlp""" +235 92 loss """softplus""" +235 92 regularizer """no""" +235 92 optimizer """adadelta""" +235 92 training_loop """lcwa""" +235 92 evaluator """rankbased""" +235 93 dataset """kinships""" +235 93 model """ermlp""" +235 93 loss """softplus""" +235 93 regularizer """no""" +235 93 optimizer """adadelta""" +235 93 training_loop """lcwa""" +235 93 evaluator """rankbased""" +235 94 dataset """kinships""" +235 94 model """ermlp""" +235 94 loss """softplus""" +235 94 regularizer """no""" +235 94 optimizer """adadelta""" +235 94 training_loop """lcwa""" +235 94 evaluator """rankbased""" +235 95 dataset """kinships""" +235 95 model """ermlp""" +235 95 loss """softplus""" +235 95 regularizer """no""" +235 95 optimizer """adadelta""" +235 95 training_loop """lcwa""" +235 95 evaluator """rankbased""" +235 96 dataset """kinships""" +235 96 model """ermlp""" +235 96 loss """softplus""" +235 96 regularizer """no""" +235 96 optimizer """adadelta""" +235 96 training_loop """lcwa""" +235 96 evaluator """rankbased""" +235 97 dataset """kinships""" +235 97 model """ermlp""" +235 97 loss """softplus""" +235 97 regularizer """no""" +235 97 optimizer """adadelta""" +235 97 training_loop """lcwa""" +235 97 evaluator """rankbased""" +235 98 dataset """kinships""" +235 98 model """ermlp""" +235 98 loss """softplus""" +235 98 regularizer """no""" +235 98 optimizer """adadelta""" +235 98 training_loop """lcwa""" +235 98 evaluator """rankbased""" +235 99 dataset """kinships""" +235 99 model """ermlp""" +235 99 loss """softplus""" +235 99 regularizer """no""" +235 99 optimizer """adadelta""" +235 99 training_loop """lcwa""" +235 99 evaluator """rankbased""" +235 100 dataset """kinships""" +235 100 model """ermlp""" +235 100 loss """softplus""" +235 100 regularizer """no""" +235 100 optimizer """adadelta""" +235 100 training_loop """lcwa""" +235 100 evaluator """rankbased""" +236 1 model.embedding_dim 1.0 +236 1 training.batch_size 1.0 +236 1 training.label_smoothing 0.20161069515734498 +236 2 model.embedding_dim 0.0 +236 2 training.batch_size 1.0 +236 2 training.label_smoothing 0.0011507436760977187 +236 3 model.embedding_dim 0.0 +236 3 training.batch_size 0.0 +236 3 training.label_smoothing 0.031419141848504126 +236 4 model.embedding_dim 2.0 +236 4 training.batch_size 2.0 +236 4 training.label_smoothing 0.008405035053773277 +236 5 model.embedding_dim 0.0 +236 5 training.batch_size 0.0 +236 5 training.label_smoothing 0.08360033917436774 +236 6 model.embedding_dim 2.0 +236 6 training.batch_size 0.0 +236 6 training.label_smoothing 0.8599108873489477 +236 7 model.embedding_dim 1.0 +236 7 training.batch_size 2.0 +236 7 training.label_smoothing 0.011300815020858447 +236 8 model.embedding_dim 2.0 +236 8 training.batch_size 2.0 +236 8 training.label_smoothing 0.004634018826551689 +236 9 model.embedding_dim 2.0 +236 9 training.batch_size 1.0 +236 9 training.label_smoothing 0.0012560016108362423 +236 10 model.embedding_dim 1.0 +236 10 training.batch_size 0.0 +236 10 training.label_smoothing 0.001113373252180018 +236 11 model.embedding_dim 1.0 +236 11 training.batch_size 0.0 +236 11 training.label_smoothing 0.001249978831750588 +236 12 model.embedding_dim 2.0 +236 12 training.batch_size 2.0 +236 12 training.label_smoothing 0.007037410658188917 +236 13 model.embedding_dim 2.0 +236 13 training.batch_size 1.0 +236 13 training.label_smoothing 0.015285423987022245 +236 14 model.embedding_dim 2.0 +236 14 training.batch_size 2.0 +236 14 training.label_smoothing 0.31043812460913667 +236 15 model.embedding_dim 1.0 +236 15 training.batch_size 1.0 +236 15 training.label_smoothing 0.4133839298184013 +236 16 model.embedding_dim 0.0 +236 16 training.batch_size 0.0 +236 16 training.label_smoothing 0.0020574055741534886 +236 17 model.embedding_dim 2.0 +236 17 training.batch_size 2.0 +236 17 training.label_smoothing 0.004213502038551499 +236 18 model.embedding_dim 1.0 +236 18 training.batch_size 2.0 +236 18 training.label_smoothing 0.007733198805758568 +236 19 model.embedding_dim 2.0 +236 19 training.batch_size 1.0 +236 19 training.label_smoothing 0.05092296671008794 +236 20 model.embedding_dim 2.0 +236 20 training.batch_size 0.0 +236 20 training.label_smoothing 0.04185542336185793 +236 21 model.embedding_dim 2.0 +236 21 training.batch_size 0.0 +236 21 training.label_smoothing 0.5049231444006815 +236 22 model.embedding_dim 2.0 +236 22 training.batch_size 0.0 +236 22 training.label_smoothing 0.009894124541742355 +236 23 model.embedding_dim 1.0 +236 23 training.batch_size 1.0 +236 23 training.label_smoothing 0.4614317119254215 +236 24 model.embedding_dim 1.0 +236 24 training.batch_size 2.0 +236 24 training.label_smoothing 0.04832394811886629 +236 25 model.embedding_dim 1.0 +236 25 training.batch_size 0.0 +236 25 training.label_smoothing 0.004632550754478273 +236 26 model.embedding_dim 1.0 +236 26 training.batch_size 1.0 +236 26 training.label_smoothing 0.0630783558672463 +236 27 model.embedding_dim 0.0 +236 27 training.batch_size 0.0 +236 27 training.label_smoothing 0.002469918885095785 +236 28 model.embedding_dim 2.0 +236 28 training.batch_size 1.0 +236 28 training.label_smoothing 0.003992522570414693 +236 29 model.embedding_dim 1.0 +236 29 training.batch_size 0.0 +236 29 training.label_smoothing 0.3528728165544821 +236 30 model.embedding_dim 0.0 +236 30 training.batch_size 0.0 +236 30 training.label_smoothing 0.0035961190859602225 +236 31 model.embedding_dim 0.0 +236 31 training.batch_size 2.0 +236 31 training.label_smoothing 0.00830664124397245 +236 32 model.embedding_dim 0.0 +236 32 training.batch_size 1.0 +236 32 training.label_smoothing 0.018491595043181798 +236 33 model.embedding_dim 0.0 +236 33 training.batch_size 0.0 +236 33 training.label_smoothing 0.004184613317710937 +236 34 model.embedding_dim 1.0 +236 34 training.batch_size 1.0 +236 34 training.label_smoothing 0.1358001641830304 +236 35 model.embedding_dim 2.0 +236 35 training.batch_size 2.0 +236 35 training.label_smoothing 0.03378395972075334 +236 36 model.embedding_dim 0.0 +236 36 training.batch_size 2.0 +236 36 training.label_smoothing 0.08866621527150012 +236 37 model.embedding_dim 0.0 +236 37 training.batch_size 0.0 +236 37 training.label_smoothing 0.9473574066810936 +236 38 model.embedding_dim 0.0 +236 38 training.batch_size 0.0 +236 38 training.label_smoothing 0.06836951366060874 +236 39 model.embedding_dim 2.0 +236 39 training.batch_size 2.0 +236 39 training.label_smoothing 0.001546364661142074 +236 40 model.embedding_dim 1.0 +236 40 training.batch_size 0.0 +236 40 training.label_smoothing 0.013523750477055492 +236 41 model.embedding_dim 0.0 +236 41 training.batch_size 1.0 +236 41 training.label_smoothing 0.7780517853860196 +236 42 model.embedding_dim 1.0 +236 42 training.batch_size 0.0 +236 42 training.label_smoothing 0.8595351898656493 +236 43 model.embedding_dim 1.0 +236 43 training.batch_size 0.0 +236 43 training.label_smoothing 0.6881092557888108 +236 44 model.embedding_dim 0.0 +236 44 training.batch_size 0.0 +236 44 training.label_smoothing 0.005105436172866333 +236 45 model.embedding_dim 1.0 +236 45 training.batch_size 0.0 +236 45 training.label_smoothing 0.8864188391931879 +236 46 model.embedding_dim 1.0 +236 46 training.batch_size 1.0 +236 46 training.label_smoothing 0.030966467481663106 +236 47 model.embedding_dim 2.0 +236 47 training.batch_size 2.0 +236 47 training.label_smoothing 0.0012553003304226036 +236 48 model.embedding_dim 2.0 +236 48 training.batch_size 1.0 +236 48 training.label_smoothing 0.0022877577481684223 +236 49 model.embedding_dim 2.0 +236 49 training.batch_size 1.0 +236 49 training.label_smoothing 0.02968564029017823 +236 50 model.embedding_dim 0.0 +236 50 training.batch_size 1.0 +236 50 training.label_smoothing 0.3648748829984754 +236 51 model.embedding_dim 2.0 +236 51 training.batch_size 0.0 +236 51 training.label_smoothing 0.08236535071849727 +236 52 model.embedding_dim 1.0 +236 52 training.batch_size 2.0 +236 52 training.label_smoothing 0.006868178698002345 +236 53 model.embedding_dim 1.0 +236 53 training.batch_size 2.0 +236 53 training.label_smoothing 0.016218970104240563 +236 54 model.embedding_dim 0.0 +236 54 training.batch_size 1.0 +236 54 training.label_smoothing 0.029963701798750467 +236 55 model.embedding_dim 0.0 +236 55 training.batch_size 2.0 +236 55 training.label_smoothing 0.004518516343702444 +236 56 model.embedding_dim 2.0 +236 56 training.batch_size 0.0 +236 56 training.label_smoothing 0.03336629874607757 +236 57 model.embedding_dim 1.0 +236 57 training.batch_size 1.0 +236 57 training.label_smoothing 0.4314990784089436 +236 58 model.embedding_dim 1.0 +236 58 training.batch_size 1.0 +236 58 training.label_smoothing 0.0012350060832167417 +236 59 model.embedding_dim 1.0 +236 59 training.batch_size 0.0 +236 59 training.label_smoothing 0.0610069803761639 +236 60 model.embedding_dim 0.0 +236 60 training.batch_size 0.0 +236 60 training.label_smoothing 0.0028669945182536057 +236 61 model.embedding_dim 1.0 +236 61 training.batch_size 0.0 +236 61 training.label_smoothing 0.19141466171903534 +236 62 model.embedding_dim 1.0 +236 62 training.batch_size 1.0 +236 62 training.label_smoothing 0.8790902656553707 +236 63 model.embedding_dim 1.0 +236 63 training.batch_size 0.0 +236 63 training.label_smoothing 0.019796010006107888 +236 64 model.embedding_dim 2.0 +236 64 training.batch_size 2.0 +236 64 training.label_smoothing 0.004912260853357542 +236 65 model.embedding_dim 1.0 +236 65 training.batch_size 1.0 +236 65 training.label_smoothing 0.01580273608551408 +236 66 model.embedding_dim 2.0 +236 66 training.batch_size 0.0 +236 66 training.label_smoothing 0.001697716163950439 +236 67 model.embedding_dim 0.0 +236 67 training.batch_size 1.0 +236 67 training.label_smoothing 0.0010064901677153326 +236 68 model.embedding_dim 0.0 +236 68 training.batch_size 2.0 +236 68 training.label_smoothing 0.36587862928412906 +236 69 model.embedding_dim 2.0 +236 69 training.batch_size 1.0 +236 69 training.label_smoothing 0.18501597629055905 +236 70 model.embedding_dim 2.0 +236 70 training.batch_size 2.0 +236 70 training.label_smoothing 0.0032294437998976275 +236 71 model.embedding_dim 1.0 +236 71 training.batch_size 0.0 +236 71 training.label_smoothing 0.001607960406534354 +236 72 model.embedding_dim 1.0 +236 72 training.batch_size 2.0 +236 72 training.label_smoothing 0.03066410679188153 +236 73 model.embedding_dim 1.0 +236 73 training.batch_size 1.0 +236 73 training.label_smoothing 0.04608989558031508 +236 74 model.embedding_dim 0.0 +236 74 training.batch_size 2.0 +236 74 training.label_smoothing 0.004343532693881164 +236 75 model.embedding_dim 2.0 +236 75 training.batch_size 1.0 +236 75 training.label_smoothing 0.1219886177875376 +236 76 model.embedding_dim 0.0 +236 76 training.batch_size 2.0 +236 76 training.label_smoothing 0.22874953573198104 +236 77 model.embedding_dim 1.0 +236 77 training.batch_size 0.0 +236 77 training.label_smoothing 0.0039049741570516446 +236 78 model.embedding_dim 0.0 +236 78 training.batch_size 1.0 +236 78 training.label_smoothing 0.0014043267862972248 +236 79 model.embedding_dim 2.0 +236 79 training.batch_size 2.0 +236 79 training.label_smoothing 0.0050781717824874804 +236 80 model.embedding_dim 0.0 +236 80 training.batch_size 1.0 +236 80 training.label_smoothing 0.11987478032143126 +236 81 model.embedding_dim 0.0 +236 81 training.batch_size 1.0 +236 81 training.label_smoothing 0.007221921897909166 +236 82 model.embedding_dim 0.0 +236 82 training.batch_size 2.0 +236 82 training.label_smoothing 0.02555009473203127 +236 83 model.embedding_dim 1.0 +236 83 training.batch_size 0.0 +236 83 training.label_smoothing 0.19563787481304606 +236 84 model.embedding_dim 1.0 +236 84 training.batch_size 1.0 +236 84 training.label_smoothing 0.5830875595580941 +236 85 model.embedding_dim 1.0 +236 85 training.batch_size 2.0 +236 85 training.label_smoothing 0.002184658200873433 +236 86 model.embedding_dim 0.0 +236 86 training.batch_size 0.0 +236 86 training.label_smoothing 0.018238539079447804 +236 87 model.embedding_dim 2.0 +236 87 training.batch_size 0.0 +236 87 training.label_smoothing 0.010416936669580153 +236 88 model.embedding_dim 2.0 +236 88 training.batch_size 2.0 +236 88 training.label_smoothing 0.0016107057082504553 +236 89 model.embedding_dim 1.0 +236 89 training.batch_size 2.0 +236 89 training.label_smoothing 0.20449057236706233 +236 90 model.embedding_dim 2.0 +236 90 training.batch_size 0.0 +236 90 training.label_smoothing 0.004186110281810923 +236 91 model.embedding_dim 2.0 +236 91 training.batch_size 0.0 +236 91 training.label_smoothing 0.01598564191412289 +236 92 model.embedding_dim 0.0 +236 92 training.batch_size 0.0 +236 92 training.label_smoothing 0.3357387259820355 +236 93 model.embedding_dim 2.0 +236 93 training.batch_size 0.0 +236 93 training.label_smoothing 0.2673132054069026 +236 94 model.embedding_dim 1.0 +236 94 training.batch_size 2.0 +236 94 training.label_smoothing 0.019386938944375497 +236 95 model.embedding_dim 1.0 +236 95 training.batch_size 0.0 +236 95 training.label_smoothing 0.0010618339210859595 +236 96 model.embedding_dim 0.0 +236 96 training.batch_size 2.0 +236 96 training.label_smoothing 0.0011575436939894438 +236 97 model.embedding_dim 1.0 +236 97 training.batch_size 1.0 +236 97 training.label_smoothing 0.01046344771425867 +236 98 model.embedding_dim 2.0 +236 98 training.batch_size 1.0 +236 98 training.label_smoothing 0.0020366857752949275 +236 99 model.embedding_dim 2.0 +236 99 training.batch_size 0.0 +236 99 training.label_smoothing 0.16702821332782733 +236 100 model.embedding_dim 2.0 +236 100 training.batch_size 2.0 +236 100 training.label_smoothing 0.34152095524975573 +236 1 dataset """kinships""" +236 1 model """ermlp""" +236 1 loss """bceaftersigmoid""" +236 1 regularizer """no""" +236 1 optimizer """adadelta""" +236 1 training_loop """lcwa""" +236 1 evaluator """rankbased""" +236 2 dataset """kinships""" +236 2 model """ermlp""" +236 2 loss """bceaftersigmoid""" +236 2 regularizer """no""" +236 2 optimizer """adadelta""" +236 2 training_loop """lcwa""" +236 2 evaluator """rankbased""" +236 3 dataset """kinships""" +236 3 model """ermlp""" +236 3 loss """bceaftersigmoid""" +236 3 regularizer """no""" +236 3 optimizer """adadelta""" +236 3 training_loop """lcwa""" +236 3 evaluator """rankbased""" +236 4 dataset """kinships""" +236 4 model """ermlp""" +236 4 loss """bceaftersigmoid""" +236 4 regularizer """no""" +236 4 optimizer """adadelta""" +236 4 training_loop """lcwa""" +236 4 evaluator """rankbased""" +236 5 dataset """kinships""" +236 5 model """ermlp""" +236 5 loss """bceaftersigmoid""" +236 5 regularizer """no""" +236 5 optimizer """adadelta""" +236 5 training_loop """lcwa""" +236 5 evaluator """rankbased""" +236 6 dataset """kinships""" +236 6 model """ermlp""" +236 6 loss """bceaftersigmoid""" +236 6 regularizer """no""" +236 6 optimizer """adadelta""" +236 6 training_loop """lcwa""" +236 6 evaluator """rankbased""" +236 7 dataset """kinships""" +236 7 model """ermlp""" +236 7 loss """bceaftersigmoid""" +236 7 regularizer """no""" +236 7 optimizer """adadelta""" +236 7 training_loop """lcwa""" +236 7 evaluator """rankbased""" +236 8 dataset """kinships""" +236 8 model """ermlp""" +236 8 loss """bceaftersigmoid""" +236 8 regularizer """no""" +236 8 optimizer """adadelta""" +236 8 training_loop """lcwa""" +236 8 evaluator """rankbased""" +236 9 dataset """kinships""" +236 9 model """ermlp""" +236 9 loss """bceaftersigmoid""" +236 9 regularizer """no""" +236 9 optimizer """adadelta""" +236 9 training_loop """lcwa""" +236 9 evaluator """rankbased""" +236 10 dataset """kinships""" +236 10 model """ermlp""" +236 10 loss """bceaftersigmoid""" +236 10 regularizer """no""" +236 10 optimizer """adadelta""" +236 10 training_loop """lcwa""" +236 10 evaluator """rankbased""" +236 11 dataset """kinships""" +236 11 model """ermlp""" +236 11 loss """bceaftersigmoid""" +236 11 regularizer """no""" +236 11 optimizer """adadelta""" +236 11 training_loop """lcwa""" +236 11 evaluator """rankbased""" +236 12 dataset """kinships""" +236 12 model """ermlp""" +236 12 loss """bceaftersigmoid""" +236 12 regularizer """no""" +236 12 optimizer """adadelta""" +236 12 training_loop """lcwa""" +236 12 evaluator """rankbased""" +236 13 dataset """kinships""" +236 13 model """ermlp""" +236 13 loss """bceaftersigmoid""" +236 13 regularizer """no""" +236 13 optimizer """adadelta""" +236 13 training_loop """lcwa""" +236 13 evaluator """rankbased""" +236 14 dataset """kinships""" +236 14 model """ermlp""" +236 14 loss """bceaftersigmoid""" +236 14 regularizer """no""" +236 14 optimizer """adadelta""" +236 14 training_loop """lcwa""" +236 14 evaluator """rankbased""" +236 15 dataset """kinships""" +236 15 model """ermlp""" +236 15 loss """bceaftersigmoid""" +236 15 regularizer """no""" +236 15 optimizer """adadelta""" +236 15 training_loop """lcwa""" +236 15 evaluator """rankbased""" +236 16 dataset """kinships""" +236 16 model """ermlp""" +236 16 loss """bceaftersigmoid""" +236 16 regularizer """no""" +236 16 optimizer """adadelta""" +236 16 training_loop """lcwa""" +236 16 evaluator """rankbased""" +236 17 dataset """kinships""" +236 17 model """ermlp""" +236 17 loss """bceaftersigmoid""" +236 17 regularizer """no""" +236 17 optimizer """adadelta""" +236 17 training_loop """lcwa""" +236 17 evaluator """rankbased""" +236 18 dataset """kinships""" +236 18 model """ermlp""" +236 18 loss """bceaftersigmoid""" +236 18 regularizer """no""" +236 18 optimizer """adadelta""" +236 18 training_loop """lcwa""" +236 18 evaluator """rankbased""" +236 19 dataset """kinships""" +236 19 model """ermlp""" +236 19 loss """bceaftersigmoid""" +236 19 regularizer """no""" +236 19 optimizer """adadelta""" +236 19 training_loop """lcwa""" +236 19 evaluator """rankbased""" +236 20 dataset """kinships""" +236 20 model """ermlp""" +236 20 loss """bceaftersigmoid""" +236 20 regularizer """no""" +236 20 optimizer """adadelta""" +236 20 training_loop """lcwa""" +236 20 evaluator """rankbased""" +236 21 dataset """kinships""" +236 21 model """ermlp""" +236 21 loss """bceaftersigmoid""" +236 21 regularizer """no""" +236 21 optimizer """adadelta""" +236 21 training_loop """lcwa""" +236 21 evaluator """rankbased""" +236 22 dataset """kinships""" +236 22 model """ermlp""" +236 22 loss """bceaftersigmoid""" +236 22 regularizer """no""" +236 22 optimizer """adadelta""" +236 22 training_loop """lcwa""" +236 22 evaluator """rankbased""" +236 23 dataset """kinships""" +236 23 model """ermlp""" +236 23 loss """bceaftersigmoid""" +236 23 regularizer """no""" +236 23 optimizer """adadelta""" +236 23 training_loop """lcwa""" +236 23 evaluator """rankbased""" +236 24 dataset """kinships""" +236 24 model """ermlp""" +236 24 loss """bceaftersigmoid""" +236 24 regularizer """no""" +236 24 optimizer """adadelta""" +236 24 training_loop """lcwa""" +236 24 evaluator """rankbased""" +236 25 dataset """kinships""" +236 25 model """ermlp""" +236 25 loss """bceaftersigmoid""" +236 25 regularizer """no""" +236 25 optimizer """adadelta""" +236 25 training_loop """lcwa""" +236 25 evaluator """rankbased""" +236 26 dataset """kinships""" +236 26 model """ermlp""" +236 26 loss """bceaftersigmoid""" +236 26 regularizer """no""" +236 26 optimizer """adadelta""" +236 26 training_loop """lcwa""" +236 26 evaluator """rankbased""" +236 27 dataset """kinships""" +236 27 model """ermlp""" +236 27 loss """bceaftersigmoid""" +236 27 regularizer """no""" +236 27 optimizer """adadelta""" +236 27 training_loop """lcwa""" +236 27 evaluator """rankbased""" +236 28 dataset """kinships""" +236 28 model """ermlp""" +236 28 loss """bceaftersigmoid""" +236 28 regularizer """no""" +236 28 optimizer """adadelta""" +236 28 training_loop """lcwa""" +236 28 evaluator """rankbased""" +236 29 dataset """kinships""" +236 29 model """ermlp""" +236 29 loss """bceaftersigmoid""" +236 29 regularizer """no""" +236 29 optimizer """adadelta""" +236 29 training_loop """lcwa""" +236 29 evaluator """rankbased""" +236 30 dataset """kinships""" +236 30 model """ermlp""" +236 30 loss """bceaftersigmoid""" +236 30 regularizer """no""" +236 30 optimizer """adadelta""" +236 30 training_loop """lcwa""" +236 30 evaluator """rankbased""" +236 31 dataset """kinships""" +236 31 model """ermlp""" +236 31 loss """bceaftersigmoid""" +236 31 regularizer """no""" +236 31 optimizer """adadelta""" +236 31 training_loop """lcwa""" +236 31 evaluator """rankbased""" +236 32 dataset """kinships""" +236 32 model """ermlp""" +236 32 loss """bceaftersigmoid""" +236 32 regularizer """no""" +236 32 optimizer """adadelta""" +236 32 training_loop """lcwa""" +236 32 evaluator """rankbased""" +236 33 dataset """kinships""" +236 33 model """ermlp""" +236 33 loss """bceaftersigmoid""" +236 33 regularizer """no""" +236 33 optimizer """adadelta""" +236 33 training_loop """lcwa""" +236 33 evaluator """rankbased""" +236 34 dataset """kinships""" +236 34 model """ermlp""" +236 34 loss """bceaftersigmoid""" +236 34 regularizer """no""" +236 34 optimizer """adadelta""" +236 34 training_loop """lcwa""" +236 34 evaluator """rankbased""" +236 35 dataset """kinships""" +236 35 model """ermlp""" +236 35 loss """bceaftersigmoid""" +236 35 regularizer """no""" +236 35 optimizer """adadelta""" +236 35 training_loop """lcwa""" +236 35 evaluator """rankbased""" +236 36 dataset """kinships""" +236 36 model """ermlp""" +236 36 loss """bceaftersigmoid""" +236 36 regularizer """no""" +236 36 optimizer """adadelta""" +236 36 training_loop """lcwa""" +236 36 evaluator """rankbased""" +236 37 dataset """kinships""" +236 37 model """ermlp""" +236 37 loss """bceaftersigmoid""" +236 37 regularizer """no""" +236 37 optimizer """adadelta""" +236 37 training_loop """lcwa""" +236 37 evaluator """rankbased""" +236 38 dataset """kinships""" +236 38 model """ermlp""" +236 38 loss """bceaftersigmoid""" +236 38 regularizer """no""" +236 38 optimizer """adadelta""" +236 38 training_loop """lcwa""" +236 38 evaluator """rankbased""" +236 39 dataset """kinships""" +236 39 model """ermlp""" +236 39 loss """bceaftersigmoid""" +236 39 regularizer """no""" +236 39 optimizer """adadelta""" +236 39 training_loop """lcwa""" +236 39 evaluator """rankbased""" +236 40 dataset """kinships""" +236 40 model """ermlp""" +236 40 loss """bceaftersigmoid""" +236 40 regularizer """no""" +236 40 optimizer """adadelta""" +236 40 training_loop """lcwa""" +236 40 evaluator """rankbased""" +236 41 dataset """kinships""" +236 41 model """ermlp""" +236 41 loss """bceaftersigmoid""" +236 41 regularizer """no""" +236 41 optimizer """adadelta""" +236 41 training_loop """lcwa""" +236 41 evaluator """rankbased""" +236 42 dataset """kinships""" +236 42 model """ermlp""" +236 42 loss """bceaftersigmoid""" +236 42 regularizer """no""" +236 42 optimizer """adadelta""" +236 42 training_loop """lcwa""" +236 42 evaluator """rankbased""" +236 43 dataset """kinships""" +236 43 model """ermlp""" +236 43 loss """bceaftersigmoid""" +236 43 regularizer """no""" +236 43 optimizer """adadelta""" +236 43 training_loop """lcwa""" +236 43 evaluator """rankbased""" +236 44 dataset """kinships""" +236 44 model """ermlp""" +236 44 loss """bceaftersigmoid""" +236 44 regularizer """no""" +236 44 optimizer """adadelta""" +236 44 training_loop """lcwa""" +236 44 evaluator """rankbased""" +236 45 dataset """kinships""" +236 45 model """ermlp""" +236 45 loss """bceaftersigmoid""" +236 45 regularizer """no""" +236 45 optimizer """adadelta""" +236 45 training_loop """lcwa""" +236 45 evaluator """rankbased""" +236 46 dataset """kinships""" +236 46 model """ermlp""" +236 46 loss """bceaftersigmoid""" +236 46 regularizer """no""" +236 46 optimizer """adadelta""" +236 46 training_loop """lcwa""" +236 46 evaluator """rankbased""" +236 47 dataset """kinships""" +236 47 model """ermlp""" +236 47 loss """bceaftersigmoid""" +236 47 regularizer """no""" +236 47 optimizer """adadelta""" +236 47 training_loop """lcwa""" +236 47 evaluator """rankbased""" +236 48 dataset """kinships""" +236 48 model """ermlp""" +236 48 loss """bceaftersigmoid""" +236 48 regularizer """no""" +236 48 optimizer """adadelta""" +236 48 training_loop """lcwa""" +236 48 evaluator """rankbased""" +236 49 dataset """kinships""" +236 49 model """ermlp""" +236 49 loss """bceaftersigmoid""" +236 49 regularizer """no""" +236 49 optimizer """adadelta""" +236 49 training_loop """lcwa""" +236 49 evaluator """rankbased""" +236 50 dataset """kinships""" +236 50 model """ermlp""" +236 50 loss """bceaftersigmoid""" +236 50 regularizer """no""" +236 50 optimizer """adadelta""" +236 50 training_loop """lcwa""" +236 50 evaluator """rankbased""" +236 51 dataset """kinships""" +236 51 model """ermlp""" +236 51 loss """bceaftersigmoid""" +236 51 regularizer """no""" +236 51 optimizer """adadelta""" +236 51 training_loop """lcwa""" +236 51 evaluator """rankbased""" +236 52 dataset """kinships""" +236 52 model """ermlp""" +236 52 loss """bceaftersigmoid""" +236 52 regularizer """no""" +236 52 optimizer """adadelta""" +236 52 training_loop """lcwa""" +236 52 evaluator """rankbased""" +236 53 dataset """kinships""" +236 53 model """ermlp""" +236 53 loss """bceaftersigmoid""" +236 53 regularizer """no""" +236 53 optimizer """adadelta""" +236 53 training_loop """lcwa""" +236 53 evaluator """rankbased""" +236 54 dataset """kinships""" +236 54 model """ermlp""" +236 54 loss """bceaftersigmoid""" +236 54 regularizer """no""" +236 54 optimizer """adadelta""" +236 54 training_loop """lcwa""" +236 54 evaluator """rankbased""" +236 55 dataset """kinships""" +236 55 model """ermlp""" +236 55 loss """bceaftersigmoid""" +236 55 regularizer """no""" +236 55 optimizer """adadelta""" +236 55 training_loop """lcwa""" +236 55 evaluator """rankbased""" +236 56 dataset """kinships""" +236 56 model """ermlp""" +236 56 loss """bceaftersigmoid""" +236 56 regularizer """no""" +236 56 optimizer """adadelta""" +236 56 training_loop """lcwa""" +236 56 evaluator """rankbased""" +236 57 dataset """kinships""" +236 57 model """ermlp""" +236 57 loss """bceaftersigmoid""" +236 57 regularizer """no""" +236 57 optimizer """adadelta""" +236 57 training_loop """lcwa""" +236 57 evaluator """rankbased""" +236 58 dataset """kinships""" +236 58 model """ermlp""" +236 58 loss """bceaftersigmoid""" +236 58 regularizer """no""" +236 58 optimizer """adadelta""" +236 58 training_loop """lcwa""" +236 58 evaluator """rankbased""" +236 59 dataset """kinships""" +236 59 model """ermlp""" +236 59 loss """bceaftersigmoid""" +236 59 regularizer """no""" +236 59 optimizer """adadelta""" +236 59 training_loop """lcwa""" +236 59 evaluator """rankbased""" +236 60 dataset """kinships""" +236 60 model """ermlp""" +236 60 loss """bceaftersigmoid""" +236 60 regularizer """no""" +236 60 optimizer """adadelta""" +236 60 training_loop """lcwa""" +236 60 evaluator """rankbased""" +236 61 dataset """kinships""" +236 61 model """ermlp""" +236 61 loss """bceaftersigmoid""" +236 61 regularizer """no""" +236 61 optimizer """adadelta""" +236 61 training_loop """lcwa""" +236 61 evaluator """rankbased""" +236 62 dataset """kinships""" +236 62 model """ermlp""" +236 62 loss """bceaftersigmoid""" +236 62 regularizer """no""" +236 62 optimizer """adadelta""" +236 62 training_loop """lcwa""" +236 62 evaluator """rankbased""" +236 63 dataset """kinships""" +236 63 model """ermlp""" +236 63 loss """bceaftersigmoid""" +236 63 regularizer """no""" +236 63 optimizer """adadelta""" +236 63 training_loop """lcwa""" +236 63 evaluator """rankbased""" +236 64 dataset """kinships""" +236 64 model """ermlp""" +236 64 loss """bceaftersigmoid""" +236 64 regularizer """no""" +236 64 optimizer """adadelta""" +236 64 training_loop """lcwa""" +236 64 evaluator """rankbased""" +236 65 dataset """kinships""" +236 65 model """ermlp""" +236 65 loss """bceaftersigmoid""" +236 65 regularizer """no""" +236 65 optimizer """adadelta""" +236 65 training_loop """lcwa""" +236 65 evaluator """rankbased""" +236 66 dataset """kinships""" +236 66 model """ermlp""" +236 66 loss """bceaftersigmoid""" +236 66 regularizer """no""" +236 66 optimizer """adadelta""" +236 66 training_loop """lcwa""" +236 66 evaluator """rankbased""" +236 67 dataset """kinships""" +236 67 model """ermlp""" +236 67 loss """bceaftersigmoid""" +236 67 regularizer """no""" +236 67 optimizer """adadelta""" +236 67 training_loop """lcwa""" +236 67 evaluator """rankbased""" +236 68 dataset """kinships""" +236 68 model """ermlp""" +236 68 loss """bceaftersigmoid""" +236 68 regularizer """no""" +236 68 optimizer """adadelta""" +236 68 training_loop """lcwa""" +236 68 evaluator """rankbased""" +236 69 dataset """kinships""" +236 69 model """ermlp""" +236 69 loss """bceaftersigmoid""" +236 69 regularizer """no""" +236 69 optimizer """adadelta""" +236 69 training_loop """lcwa""" +236 69 evaluator """rankbased""" +236 70 dataset """kinships""" +236 70 model """ermlp""" +236 70 loss """bceaftersigmoid""" +236 70 regularizer """no""" +236 70 optimizer """adadelta""" +236 70 training_loop """lcwa""" +236 70 evaluator """rankbased""" +236 71 dataset """kinships""" +236 71 model """ermlp""" +236 71 loss """bceaftersigmoid""" +236 71 regularizer """no""" +236 71 optimizer """adadelta""" +236 71 training_loop """lcwa""" +236 71 evaluator """rankbased""" +236 72 dataset """kinships""" +236 72 model """ermlp""" +236 72 loss """bceaftersigmoid""" +236 72 regularizer """no""" +236 72 optimizer """adadelta""" +236 72 training_loop """lcwa""" +236 72 evaluator """rankbased""" +236 73 dataset """kinships""" +236 73 model """ermlp""" +236 73 loss """bceaftersigmoid""" +236 73 regularizer """no""" +236 73 optimizer """adadelta""" +236 73 training_loop """lcwa""" +236 73 evaluator """rankbased""" +236 74 dataset """kinships""" +236 74 model """ermlp""" +236 74 loss """bceaftersigmoid""" +236 74 regularizer """no""" +236 74 optimizer """adadelta""" +236 74 training_loop """lcwa""" +236 74 evaluator """rankbased""" +236 75 dataset """kinships""" +236 75 model """ermlp""" +236 75 loss """bceaftersigmoid""" +236 75 regularizer """no""" +236 75 optimizer """adadelta""" +236 75 training_loop """lcwa""" +236 75 evaluator """rankbased""" +236 76 dataset """kinships""" +236 76 model """ermlp""" +236 76 loss """bceaftersigmoid""" +236 76 regularizer """no""" +236 76 optimizer """adadelta""" +236 76 training_loop """lcwa""" +236 76 evaluator """rankbased""" +236 77 dataset """kinships""" +236 77 model """ermlp""" +236 77 loss """bceaftersigmoid""" +236 77 regularizer """no""" +236 77 optimizer """adadelta""" +236 77 training_loop """lcwa""" +236 77 evaluator """rankbased""" +236 78 dataset """kinships""" +236 78 model """ermlp""" +236 78 loss """bceaftersigmoid""" +236 78 regularizer """no""" +236 78 optimizer """adadelta""" +236 78 training_loop """lcwa""" +236 78 evaluator """rankbased""" +236 79 dataset """kinships""" +236 79 model """ermlp""" +236 79 loss """bceaftersigmoid""" +236 79 regularizer """no""" +236 79 optimizer """adadelta""" +236 79 training_loop """lcwa""" +236 79 evaluator """rankbased""" +236 80 dataset """kinships""" +236 80 model """ermlp""" +236 80 loss """bceaftersigmoid""" +236 80 regularizer """no""" +236 80 optimizer """adadelta""" +236 80 training_loop """lcwa""" +236 80 evaluator """rankbased""" +236 81 dataset """kinships""" +236 81 model """ermlp""" +236 81 loss """bceaftersigmoid""" +236 81 regularizer """no""" +236 81 optimizer """adadelta""" +236 81 training_loop """lcwa""" +236 81 evaluator """rankbased""" +236 82 dataset """kinships""" +236 82 model """ermlp""" +236 82 loss """bceaftersigmoid""" +236 82 regularizer """no""" +236 82 optimizer """adadelta""" +236 82 training_loop """lcwa""" +236 82 evaluator """rankbased""" +236 83 dataset """kinships""" +236 83 model """ermlp""" +236 83 loss """bceaftersigmoid""" +236 83 regularizer """no""" +236 83 optimizer """adadelta""" +236 83 training_loop """lcwa""" +236 83 evaluator """rankbased""" +236 84 dataset """kinships""" +236 84 model """ermlp""" +236 84 loss """bceaftersigmoid""" +236 84 regularizer """no""" +236 84 optimizer """adadelta""" +236 84 training_loop """lcwa""" +236 84 evaluator """rankbased""" +236 85 dataset """kinships""" +236 85 model """ermlp""" +236 85 loss """bceaftersigmoid""" +236 85 regularizer """no""" +236 85 optimizer """adadelta""" +236 85 training_loop """lcwa""" +236 85 evaluator """rankbased""" +236 86 dataset """kinships""" +236 86 model """ermlp""" +236 86 loss """bceaftersigmoid""" +236 86 regularizer """no""" +236 86 optimizer """adadelta""" +236 86 training_loop """lcwa""" +236 86 evaluator """rankbased""" +236 87 dataset """kinships""" +236 87 model """ermlp""" +236 87 loss """bceaftersigmoid""" +236 87 regularizer """no""" +236 87 optimizer """adadelta""" +236 87 training_loop """lcwa""" +236 87 evaluator """rankbased""" +236 88 dataset """kinships""" +236 88 model """ermlp""" +236 88 loss """bceaftersigmoid""" +236 88 regularizer """no""" +236 88 optimizer """adadelta""" +236 88 training_loop """lcwa""" +236 88 evaluator """rankbased""" +236 89 dataset """kinships""" +236 89 model """ermlp""" +236 89 loss """bceaftersigmoid""" +236 89 regularizer """no""" +236 89 optimizer """adadelta""" +236 89 training_loop """lcwa""" +236 89 evaluator """rankbased""" +236 90 dataset """kinships""" +236 90 model """ermlp""" +236 90 loss """bceaftersigmoid""" +236 90 regularizer """no""" +236 90 optimizer """adadelta""" +236 90 training_loop """lcwa""" +236 90 evaluator """rankbased""" +236 91 dataset """kinships""" +236 91 model """ermlp""" +236 91 loss """bceaftersigmoid""" +236 91 regularizer """no""" +236 91 optimizer """adadelta""" +236 91 training_loop """lcwa""" +236 91 evaluator """rankbased""" +236 92 dataset """kinships""" +236 92 model """ermlp""" +236 92 loss """bceaftersigmoid""" +236 92 regularizer """no""" +236 92 optimizer """adadelta""" +236 92 training_loop """lcwa""" +236 92 evaluator """rankbased""" +236 93 dataset """kinships""" +236 93 model """ermlp""" +236 93 loss """bceaftersigmoid""" +236 93 regularizer """no""" +236 93 optimizer """adadelta""" +236 93 training_loop """lcwa""" +236 93 evaluator """rankbased""" +236 94 dataset """kinships""" +236 94 model """ermlp""" +236 94 loss """bceaftersigmoid""" +236 94 regularizer """no""" +236 94 optimizer """adadelta""" +236 94 training_loop """lcwa""" +236 94 evaluator """rankbased""" +236 95 dataset """kinships""" +236 95 model """ermlp""" +236 95 loss """bceaftersigmoid""" +236 95 regularizer """no""" +236 95 optimizer """adadelta""" +236 95 training_loop """lcwa""" +236 95 evaluator """rankbased""" +236 96 dataset """kinships""" +236 96 model """ermlp""" +236 96 loss """bceaftersigmoid""" +236 96 regularizer """no""" +236 96 optimizer """adadelta""" +236 96 training_loop """lcwa""" +236 96 evaluator """rankbased""" +236 97 dataset """kinships""" +236 97 model """ermlp""" +236 97 loss """bceaftersigmoid""" +236 97 regularizer """no""" +236 97 optimizer """adadelta""" +236 97 training_loop """lcwa""" +236 97 evaluator """rankbased""" +236 98 dataset """kinships""" +236 98 model """ermlp""" +236 98 loss """bceaftersigmoid""" +236 98 regularizer """no""" +236 98 optimizer """adadelta""" +236 98 training_loop """lcwa""" +236 98 evaluator """rankbased""" +236 99 dataset """kinships""" +236 99 model """ermlp""" +236 99 loss """bceaftersigmoid""" +236 99 regularizer """no""" +236 99 optimizer """adadelta""" +236 99 training_loop """lcwa""" +236 99 evaluator """rankbased""" +236 100 dataset """kinships""" +236 100 model """ermlp""" +236 100 loss """bceaftersigmoid""" +236 100 regularizer """no""" +236 100 optimizer """adadelta""" +236 100 training_loop """lcwa""" +236 100 evaluator """rankbased""" +237 1 model.embedding_dim 1.0 +237 1 training.batch_size 1.0 +237 1 training.label_smoothing 0.6094745118595303 +237 2 model.embedding_dim 1.0 +237 2 training.batch_size 2.0 +237 2 training.label_smoothing 0.014038768966809953 +237 3 model.embedding_dim 2.0 +237 3 training.batch_size 2.0 +237 3 training.label_smoothing 0.10380634318725718 +237 4 model.embedding_dim 2.0 +237 4 training.batch_size 1.0 +237 4 training.label_smoothing 0.007260865501479863 +237 5 model.embedding_dim 0.0 +237 5 training.batch_size 1.0 +237 5 training.label_smoothing 0.06863368539504142 +237 6 model.embedding_dim 1.0 +237 6 training.batch_size 2.0 +237 6 training.label_smoothing 0.037633527173916784 +237 7 model.embedding_dim 0.0 +237 7 training.batch_size 1.0 +237 7 training.label_smoothing 0.2770834658319969 +237 8 model.embedding_dim 2.0 +237 8 training.batch_size 0.0 +237 8 training.label_smoothing 0.03227278226492964 +237 9 model.embedding_dim 2.0 +237 9 training.batch_size 0.0 +237 9 training.label_smoothing 0.005341589332916649 +237 10 model.embedding_dim 1.0 +237 10 training.batch_size 1.0 +237 10 training.label_smoothing 0.3800926963782488 +237 11 model.embedding_dim 1.0 +237 11 training.batch_size 2.0 +237 11 training.label_smoothing 0.03783400694863086 +237 12 model.embedding_dim 2.0 +237 12 training.batch_size 2.0 +237 12 training.label_smoothing 0.027709403849660832 +237 13 model.embedding_dim 1.0 +237 13 training.batch_size 1.0 +237 13 training.label_smoothing 0.16822547000602214 +237 14 model.embedding_dim 1.0 +237 14 training.batch_size 1.0 +237 14 training.label_smoothing 0.025813539916764123 +237 15 model.embedding_dim 2.0 +237 15 training.batch_size 0.0 +237 15 training.label_smoothing 0.11316607527739006 +237 16 model.embedding_dim 2.0 +237 16 training.batch_size 1.0 +237 16 training.label_smoothing 0.539047741386672 +237 17 model.embedding_dim 2.0 +237 17 training.batch_size 2.0 +237 17 training.label_smoothing 0.011909628868315154 +237 18 model.embedding_dim 1.0 +237 18 training.batch_size 2.0 +237 18 training.label_smoothing 0.13384716930934792 +237 19 model.embedding_dim 0.0 +237 19 training.batch_size 2.0 +237 19 training.label_smoothing 0.0025242918982712513 +237 20 model.embedding_dim 0.0 +237 20 training.batch_size 1.0 +237 20 training.label_smoothing 0.01028793864621655 +237 21 model.embedding_dim 2.0 +237 21 training.batch_size 2.0 +237 21 training.label_smoothing 0.642527502899472 +237 22 model.embedding_dim 2.0 +237 22 training.batch_size 1.0 +237 22 training.label_smoothing 0.28727110663949734 +237 23 model.embedding_dim 1.0 +237 23 training.batch_size 0.0 +237 23 training.label_smoothing 0.34961605366789067 +237 24 model.embedding_dim 2.0 +237 24 training.batch_size 0.0 +237 24 training.label_smoothing 0.16727174049285032 +237 25 model.embedding_dim 0.0 +237 25 training.batch_size 0.0 +237 25 training.label_smoothing 0.053319943211529656 +237 26 model.embedding_dim 2.0 +237 26 training.batch_size 1.0 +237 26 training.label_smoothing 0.37966871525826595 +237 27 model.embedding_dim 2.0 +237 27 training.batch_size 2.0 +237 27 training.label_smoothing 0.002447442751516755 +237 28 model.embedding_dim 0.0 +237 28 training.batch_size 1.0 +237 28 training.label_smoothing 0.20484143555834486 +237 29 model.embedding_dim 1.0 +237 29 training.batch_size 1.0 +237 29 training.label_smoothing 0.001905126765382341 +237 30 model.embedding_dim 2.0 +237 30 training.batch_size 0.0 +237 30 training.label_smoothing 0.0012120886957888502 +237 31 model.embedding_dim 2.0 +237 31 training.batch_size 2.0 +237 31 training.label_smoothing 0.005926917461596166 +237 32 model.embedding_dim 2.0 +237 32 training.batch_size 2.0 +237 32 training.label_smoothing 0.7893356124432728 +237 33 model.embedding_dim 2.0 +237 33 training.batch_size 2.0 +237 33 training.label_smoothing 0.011740957696634188 +237 34 model.embedding_dim 0.0 +237 34 training.batch_size 1.0 +237 34 training.label_smoothing 0.01002253625177728 +237 35 model.embedding_dim 1.0 +237 35 training.batch_size 1.0 +237 35 training.label_smoothing 0.007284025065772237 +237 36 model.embedding_dim 1.0 +237 36 training.batch_size 2.0 +237 36 training.label_smoothing 0.1368339037652844 +237 37 model.embedding_dim 2.0 +237 37 training.batch_size 2.0 +237 37 training.label_smoothing 0.028818038882686105 +237 38 model.embedding_dim 2.0 +237 38 training.batch_size 1.0 +237 38 training.label_smoothing 0.341659446376783 +237 39 model.embedding_dim 0.0 +237 39 training.batch_size 0.0 +237 39 training.label_smoothing 0.19664121655362868 +237 40 model.embedding_dim 0.0 +237 40 training.batch_size 0.0 +237 40 training.label_smoothing 0.001433553906642988 +237 41 model.embedding_dim 1.0 +237 41 training.batch_size 1.0 +237 41 training.label_smoothing 0.016490598700167173 +237 42 model.embedding_dim 0.0 +237 42 training.batch_size 2.0 +237 42 training.label_smoothing 0.013300413655318085 +237 43 model.embedding_dim 2.0 +237 43 training.batch_size 1.0 +237 43 training.label_smoothing 0.005595842112429684 +237 44 model.embedding_dim 1.0 +237 44 training.batch_size 1.0 +237 44 training.label_smoothing 0.0025756978197128007 +237 45 model.embedding_dim 1.0 +237 45 training.batch_size 2.0 +237 45 training.label_smoothing 0.020999957939462348 +237 46 model.embedding_dim 0.0 +237 46 training.batch_size 0.0 +237 46 training.label_smoothing 0.053841423152496376 +237 47 model.embedding_dim 2.0 +237 47 training.batch_size 2.0 +237 47 training.label_smoothing 0.27787823776874904 +237 48 model.embedding_dim 1.0 +237 48 training.batch_size 0.0 +237 48 training.label_smoothing 0.002900054670709152 +237 49 model.embedding_dim 2.0 +237 49 training.batch_size 2.0 +237 49 training.label_smoothing 0.033700491735535895 +237 50 model.embedding_dim 0.0 +237 50 training.batch_size 0.0 +237 50 training.label_smoothing 0.0015034933282214078 +237 51 model.embedding_dim 0.0 +237 51 training.batch_size 2.0 +237 51 training.label_smoothing 0.03176967843530623 +237 52 model.embedding_dim 2.0 +237 52 training.batch_size 1.0 +237 52 training.label_smoothing 0.0040822195303750275 +237 53 model.embedding_dim 1.0 +237 53 training.batch_size 0.0 +237 53 training.label_smoothing 0.03270873250279368 +237 54 model.embedding_dim 0.0 +237 54 training.batch_size 2.0 +237 54 training.label_smoothing 0.0740407410269394 +237 55 model.embedding_dim 1.0 +237 55 training.batch_size 1.0 +237 55 training.label_smoothing 0.00749190138806348 +237 56 model.embedding_dim 2.0 +237 56 training.batch_size 1.0 +237 56 training.label_smoothing 0.03693273612961837 +237 57 model.embedding_dim 2.0 +237 57 training.batch_size 1.0 +237 57 training.label_smoothing 0.5646951010773413 +237 58 model.embedding_dim 2.0 +237 58 training.batch_size 0.0 +237 58 training.label_smoothing 0.8261672840974909 +237 59 model.embedding_dim 1.0 +237 59 training.batch_size 2.0 +237 59 training.label_smoothing 0.7196020504427987 +237 60 model.embedding_dim 2.0 +237 60 training.batch_size 1.0 +237 60 training.label_smoothing 0.0035396918204952292 +237 61 model.embedding_dim 1.0 +237 61 training.batch_size 0.0 +237 61 training.label_smoothing 0.11501831425362873 +237 62 model.embedding_dim 0.0 +237 62 training.batch_size 2.0 +237 62 training.label_smoothing 0.4500473946238693 +237 63 model.embedding_dim 0.0 +237 63 training.batch_size 1.0 +237 63 training.label_smoothing 0.011658600370052247 +237 64 model.embedding_dim 1.0 +237 64 training.batch_size 0.0 +237 64 training.label_smoothing 0.004749474502730423 +237 65 model.embedding_dim 0.0 +237 65 training.batch_size 0.0 +237 65 training.label_smoothing 0.024857223324664092 +237 66 model.embedding_dim 0.0 +237 66 training.batch_size 2.0 +237 66 training.label_smoothing 0.0227969513280364 +237 67 model.embedding_dim 2.0 +237 67 training.batch_size 2.0 +237 67 training.label_smoothing 0.004903777075339721 +237 68 model.embedding_dim 0.0 +237 68 training.batch_size 1.0 +237 68 training.label_smoothing 0.0019753436951242065 +237 69 model.embedding_dim 1.0 +237 69 training.batch_size 0.0 +237 69 training.label_smoothing 0.20966811286187878 +237 70 model.embedding_dim 0.0 +237 70 training.batch_size 1.0 +237 70 training.label_smoothing 0.09574344570758031 +237 71 model.embedding_dim 2.0 +237 71 training.batch_size 2.0 +237 71 training.label_smoothing 0.5840636944690685 +237 72 model.embedding_dim 0.0 +237 72 training.batch_size 2.0 +237 72 training.label_smoothing 0.3858269530510783 +237 73 model.embedding_dim 0.0 +237 73 training.batch_size 2.0 +237 73 training.label_smoothing 0.0014709067911896421 +237 74 model.embedding_dim 2.0 +237 74 training.batch_size 0.0 +237 74 training.label_smoothing 0.02536418007041177 +237 75 model.embedding_dim 1.0 +237 75 training.batch_size 1.0 +237 75 training.label_smoothing 0.05185455486306578 +237 76 model.embedding_dim 1.0 +237 76 training.batch_size 2.0 +237 76 training.label_smoothing 0.17724779111160793 +237 77 model.embedding_dim 0.0 +237 77 training.batch_size 2.0 +237 77 training.label_smoothing 0.04834275646766876 +237 78 model.embedding_dim 0.0 +237 78 training.batch_size 2.0 +237 78 training.label_smoothing 0.001306398277969925 +237 79 model.embedding_dim 1.0 +237 79 training.batch_size 1.0 +237 79 training.label_smoothing 0.012360071374817842 +237 80 model.embedding_dim 2.0 +237 80 training.batch_size 0.0 +237 80 training.label_smoothing 0.5864914172651553 +237 81 model.embedding_dim 1.0 +237 81 training.batch_size 1.0 +237 81 training.label_smoothing 0.07985166771961001 +237 82 model.embedding_dim 2.0 +237 82 training.batch_size 0.0 +237 82 training.label_smoothing 0.001609250919145323 +237 83 model.embedding_dim 1.0 +237 83 training.batch_size 1.0 +237 83 training.label_smoothing 0.046598733979625134 +237 84 model.embedding_dim 0.0 +237 84 training.batch_size 1.0 +237 84 training.label_smoothing 0.0053581080229557106 +237 85 model.embedding_dim 0.0 +237 85 training.batch_size 1.0 +237 85 training.label_smoothing 0.004589634911006087 +237 86 model.embedding_dim 2.0 +237 86 training.batch_size 0.0 +237 86 training.label_smoothing 0.043987058422008486 +237 87 model.embedding_dim 1.0 +237 87 training.batch_size 2.0 +237 87 training.label_smoothing 0.0019033099890804434 +237 88 model.embedding_dim 2.0 +237 88 training.batch_size 0.0 +237 88 training.label_smoothing 0.001441036698320296 +237 89 model.embedding_dim 1.0 +237 89 training.batch_size 2.0 +237 89 training.label_smoothing 0.07631394020484865 +237 90 model.embedding_dim 2.0 +237 90 training.batch_size 0.0 +237 90 training.label_smoothing 0.011200575089247912 +237 91 model.embedding_dim 0.0 +237 91 training.batch_size 1.0 +237 91 training.label_smoothing 0.26981047922111845 +237 92 model.embedding_dim 1.0 +237 92 training.batch_size 0.0 +237 92 training.label_smoothing 0.013073547570586872 +237 93 model.embedding_dim 0.0 +237 93 training.batch_size 1.0 +237 93 training.label_smoothing 0.04574528141917437 +237 94 model.embedding_dim 0.0 +237 94 training.batch_size 1.0 +237 94 training.label_smoothing 0.057013257809419855 +237 95 model.embedding_dim 0.0 +237 95 training.batch_size 2.0 +237 95 training.label_smoothing 0.008432690451122984 +237 96 model.embedding_dim 2.0 +237 96 training.batch_size 0.0 +237 96 training.label_smoothing 0.23386750583438262 +237 97 model.embedding_dim 2.0 +237 97 training.batch_size 1.0 +237 97 training.label_smoothing 0.002650201515590936 +237 98 model.embedding_dim 0.0 +237 98 training.batch_size 0.0 +237 98 training.label_smoothing 0.008007726356476058 +237 99 model.embedding_dim 0.0 +237 99 training.batch_size 2.0 +237 99 training.label_smoothing 0.09343128131877919 +237 100 model.embedding_dim 2.0 +237 100 training.batch_size 0.0 +237 100 training.label_smoothing 0.05793676227249836 +237 1 dataset """kinships""" +237 1 model """ermlp""" +237 1 loss """softplus""" +237 1 regularizer """no""" +237 1 optimizer """adadelta""" +237 1 training_loop """lcwa""" +237 1 evaluator """rankbased""" +237 2 dataset """kinships""" +237 2 model """ermlp""" +237 2 loss """softplus""" +237 2 regularizer """no""" +237 2 optimizer """adadelta""" +237 2 training_loop """lcwa""" +237 2 evaluator """rankbased""" +237 3 dataset """kinships""" +237 3 model """ermlp""" +237 3 loss """softplus""" +237 3 regularizer """no""" +237 3 optimizer """adadelta""" +237 3 training_loop """lcwa""" +237 3 evaluator """rankbased""" +237 4 dataset """kinships""" +237 4 model """ermlp""" +237 4 loss """softplus""" +237 4 regularizer """no""" +237 4 optimizer """adadelta""" +237 4 training_loop """lcwa""" +237 4 evaluator """rankbased""" +237 5 dataset """kinships""" +237 5 model """ermlp""" +237 5 loss """softplus""" +237 5 regularizer """no""" +237 5 optimizer """adadelta""" +237 5 training_loop """lcwa""" +237 5 evaluator """rankbased""" +237 6 dataset """kinships""" +237 6 model """ermlp""" +237 6 loss """softplus""" +237 6 regularizer """no""" +237 6 optimizer """adadelta""" +237 6 training_loop """lcwa""" +237 6 evaluator """rankbased""" +237 7 dataset """kinships""" +237 7 model """ermlp""" +237 7 loss """softplus""" +237 7 regularizer """no""" +237 7 optimizer """adadelta""" +237 7 training_loop """lcwa""" +237 7 evaluator """rankbased""" +237 8 dataset """kinships""" +237 8 model """ermlp""" +237 8 loss """softplus""" +237 8 regularizer """no""" +237 8 optimizer """adadelta""" +237 8 training_loop """lcwa""" +237 8 evaluator """rankbased""" +237 9 dataset """kinships""" +237 9 model """ermlp""" +237 9 loss """softplus""" +237 9 regularizer """no""" +237 9 optimizer """adadelta""" +237 9 training_loop """lcwa""" +237 9 evaluator """rankbased""" +237 10 dataset """kinships""" +237 10 model """ermlp""" +237 10 loss """softplus""" +237 10 regularizer """no""" +237 10 optimizer """adadelta""" +237 10 training_loop """lcwa""" +237 10 evaluator """rankbased""" +237 11 dataset """kinships""" +237 11 model """ermlp""" +237 11 loss """softplus""" +237 11 regularizer """no""" +237 11 optimizer """adadelta""" +237 11 training_loop """lcwa""" +237 11 evaluator """rankbased""" +237 12 dataset """kinships""" +237 12 model """ermlp""" +237 12 loss """softplus""" +237 12 regularizer """no""" +237 12 optimizer """adadelta""" +237 12 training_loop """lcwa""" +237 12 evaluator """rankbased""" +237 13 dataset """kinships""" +237 13 model """ermlp""" +237 13 loss """softplus""" +237 13 regularizer """no""" +237 13 optimizer """adadelta""" +237 13 training_loop """lcwa""" +237 13 evaluator """rankbased""" +237 14 dataset """kinships""" +237 14 model """ermlp""" +237 14 loss """softplus""" +237 14 regularizer """no""" +237 14 optimizer """adadelta""" +237 14 training_loop """lcwa""" +237 14 evaluator """rankbased""" +237 15 dataset """kinships""" +237 15 model """ermlp""" +237 15 loss """softplus""" +237 15 regularizer """no""" +237 15 optimizer """adadelta""" +237 15 training_loop """lcwa""" +237 15 evaluator """rankbased""" +237 16 dataset """kinships""" +237 16 model """ermlp""" +237 16 loss """softplus""" +237 16 regularizer """no""" +237 16 optimizer """adadelta""" +237 16 training_loop """lcwa""" +237 16 evaluator """rankbased""" +237 17 dataset """kinships""" +237 17 model """ermlp""" +237 17 loss """softplus""" +237 17 regularizer """no""" +237 17 optimizer """adadelta""" +237 17 training_loop """lcwa""" +237 17 evaluator """rankbased""" +237 18 dataset """kinships""" +237 18 model """ermlp""" +237 18 loss """softplus""" +237 18 regularizer """no""" +237 18 optimizer """adadelta""" +237 18 training_loop """lcwa""" +237 18 evaluator """rankbased""" +237 19 dataset """kinships""" +237 19 model """ermlp""" +237 19 loss """softplus""" +237 19 regularizer """no""" +237 19 optimizer """adadelta""" +237 19 training_loop """lcwa""" +237 19 evaluator """rankbased""" +237 20 dataset """kinships""" +237 20 model """ermlp""" +237 20 loss """softplus""" +237 20 regularizer """no""" +237 20 optimizer """adadelta""" +237 20 training_loop """lcwa""" +237 20 evaluator """rankbased""" +237 21 dataset """kinships""" +237 21 model """ermlp""" +237 21 loss """softplus""" +237 21 regularizer """no""" +237 21 optimizer """adadelta""" +237 21 training_loop """lcwa""" +237 21 evaluator """rankbased""" +237 22 dataset """kinships""" +237 22 model """ermlp""" +237 22 loss """softplus""" +237 22 regularizer """no""" +237 22 optimizer """adadelta""" +237 22 training_loop """lcwa""" +237 22 evaluator """rankbased""" +237 23 dataset """kinships""" +237 23 model """ermlp""" +237 23 loss """softplus""" +237 23 regularizer """no""" +237 23 optimizer """adadelta""" +237 23 training_loop """lcwa""" +237 23 evaluator """rankbased""" +237 24 dataset """kinships""" +237 24 model """ermlp""" +237 24 loss """softplus""" +237 24 regularizer """no""" +237 24 optimizer """adadelta""" +237 24 training_loop """lcwa""" +237 24 evaluator """rankbased""" +237 25 dataset """kinships""" +237 25 model """ermlp""" +237 25 loss """softplus""" +237 25 regularizer """no""" +237 25 optimizer """adadelta""" +237 25 training_loop """lcwa""" +237 25 evaluator """rankbased""" +237 26 dataset """kinships""" +237 26 model """ermlp""" +237 26 loss """softplus""" +237 26 regularizer """no""" +237 26 optimizer """adadelta""" +237 26 training_loop """lcwa""" +237 26 evaluator """rankbased""" +237 27 dataset """kinships""" +237 27 model """ermlp""" +237 27 loss """softplus""" +237 27 regularizer """no""" +237 27 optimizer """adadelta""" +237 27 training_loop """lcwa""" +237 27 evaluator """rankbased""" +237 28 dataset """kinships""" +237 28 model """ermlp""" +237 28 loss """softplus""" +237 28 regularizer """no""" +237 28 optimizer """adadelta""" +237 28 training_loop """lcwa""" +237 28 evaluator """rankbased""" +237 29 dataset """kinships""" +237 29 model """ermlp""" +237 29 loss """softplus""" +237 29 regularizer """no""" +237 29 optimizer """adadelta""" +237 29 training_loop """lcwa""" +237 29 evaluator """rankbased""" +237 30 dataset """kinships""" +237 30 model """ermlp""" +237 30 loss """softplus""" +237 30 regularizer """no""" +237 30 optimizer """adadelta""" +237 30 training_loop """lcwa""" +237 30 evaluator """rankbased""" +237 31 dataset """kinships""" +237 31 model """ermlp""" +237 31 loss """softplus""" +237 31 regularizer """no""" +237 31 optimizer """adadelta""" +237 31 training_loop """lcwa""" +237 31 evaluator """rankbased""" +237 32 dataset """kinships""" +237 32 model """ermlp""" +237 32 loss """softplus""" +237 32 regularizer """no""" +237 32 optimizer """adadelta""" +237 32 training_loop """lcwa""" +237 32 evaluator """rankbased""" +237 33 dataset """kinships""" +237 33 model """ermlp""" +237 33 loss """softplus""" +237 33 regularizer """no""" +237 33 optimizer """adadelta""" +237 33 training_loop """lcwa""" +237 33 evaluator """rankbased""" +237 34 dataset """kinships""" +237 34 model """ermlp""" +237 34 loss """softplus""" +237 34 regularizer """no""" +237 34 optimizer """adadelta""" +237 34 training_loop """lcwa""" +237 34 evaluator """rankbased""" +237 35 dataset """kinships""" +237 35 model """ermlp""" +237 35 loss """softplus""" +237 35 regularizer """no""" +237 35 optimizer """adadelta""" +237 35 training_loop """lcwa""" +237 35 evaluator """rankbased""" +237 36 dataset """kinships""" +237 36 model """ermlp""" +237 36 loss """softplus""" +237 36 regularizer """no""" +237 36 optimizer """adadelta""" +237 36 training_loop """lcwa""" +237 36 evaluator """rankbased""" +237 37 dataset """kinships""" +237 37 model """ermlp""" +237 37 loss """softplus""" +237 37 regularizer """no""" +237 37 optimizer """adadelta""" +237 37 training_loop """lcwa""" +237 37 evaluator """rankbased""" +237 38 dataset """kinships""" +237 38 model """ermlp""" +237 38 loss """softplus""" +237 38 regularizer """no""" +237 38 optimizer """adadelta""" +237 38 training_loop """lcwa""" +237 38 evaluator """rankbased""" +237 39 dataset """kinships""" +237 39 model """ermlp""" +237 39 loss """softplus""" +237 39 regularizer """no""" +237 39 optimizer """adadelta""" +237 39 training_loop """lcwa""" +237 39 evaluator """rankbased""" +237 40 dataset """kinships""" +237 40 model """ermlp""" +237 40 loss """softplus""" +237 40 regularizer """no""" +237 40 optimizer """adadelta""" +237 40 training_loop """lcwa""" +237 40 evaluator """rankbased""" +237 41 dataset """kinships""" +237 41 model """ermlp""" +237 41 loss """softplus""" +237 41 regularizer """no""" +237 41 optimizer """adadelta""" +237 41 training_loop """lcwa""" +237 41 evaluator """rankbased""" +237 42 dataset """kinships""" +237 42 model """ermlp""" +237 42 loss """softplus""" +237 42 regularizer """no""" +237 42 optimizer """adadelta""" +237 42 training_loop """lcwa""" +237 42 evaluator """rankbased""" +237 43 dataset """kinships""" +237 43 model """ermlp""" +237 43 loss """softplus""" +237 43 regularizer """no""" +237 43 optimizer """adadelta""" +237 43 training_loop """lcwa""" +237 43 evaluator """rankbased""" +237 44 dataset """kinships""" +237 44 model """ermlp""" +237 44 loss """softplus""" +237 44 regularizer """no""" +237 44 optimizer """adadelta""" +237 44 training_loop """lcwa""" +237 44 evaluator """rankbased""" +237 45 dataset """kinships""" +237 45 model """ermlp""" +237 45 loss """softplus""" +237 45 regularizer """no""" +237 45 optimizer """adadelta""" +237 45 training_loop """lcwa""" +237 45 evaluator """rankbased""" +237 46 dataset """kinships""" +237 46 model """ermlp""" +237 46 loss """softplus""" +237 46 regularizer """no""" +237 46 optimizer """adadelta""" +237 46 training_loop """lcwa""" +237 46 evaluator """rankbased""" +237 47 dataset """kinships""" +237 47 model """ermlp""" +237 47 loss """softplus""" +237 47 regularizer """no""" +237 47 optimizer """adadelta""" +237 47 training_loop """lcwa""" +237 47 evaluator """rankbased""" +237 48 dataset """kinships""" +237 48 model """ermlp""" +237 48 loss """softplus""" +237 48 regularizer """no""" +237 48 optimizer """adadelta""" +237 48 training_loop """lcwa""" +237 48 evaluator """rankbased""" +237 49 dataset """kinships""" +237 49 model """ermlp""" +237 49 loss """softplus""" +237 49 regularizer """no""" +237 49 optimizer """adadelta""" +237 49 training_loop """lcwa""" +237 49 evaluator """rankbased""" +237 50 dataset """kinships""" +237 50 model """ermlp""" +237 50 loss """softplus""" +237 50 regularizer """no""" +237 50 optimizer """adadelta""" +237 50 training_loop """lcwa""" +237 50 evaluator """rankbased""" +237 51 dataset """kinships""" +237 51 model """ermlp""" +237 51 loss """softplus""" +237 51 regularizer """no""" +237 51 optimizer """adadelta""" +237 51 training_loop """lcwa""" +237 51 evaluator """rankbased""" +237 52 dataset """kinships""" +237 52 model """ermlp""" +237 52 loss """softplus""" +237 52 regularizer """no""" +237 52 optimizer """adadelta""" +237 52 training_loop """lcwa""" +237 52 evaluator """rankbased""" +237 53 dataset """kinships""" +237 53 model """ermlp""" +237 53 loss """softplus""" +237 53 regularizer """no""" +237 53 optimizer """adadelta""" +237 53 training_loop """lcwa""" +237 53 evaluator """rankbased""" +237 54 dataset """kinships""" +237 54 model """ermlp""" +237 54 loss """softplus""" +237 54 regularizer """no""" +237 54 optimizer """adadelta""" +237 54 training_loop """lcwa""" +237 54 evaluator """rankbased""" +237 55 dataset """kinships""" +237 55 model """ermlp""" +237 55 loss """softplus""" +237 55 regularizer """no""" +237 55 optimizer """adadelta""" +237 55 training_loop """lcwa""" +237 55 evaluator """rankbased""" +237 56 dataset """kinships""" +237 56 model """ermlp""" +237 56 loss """softplus""" +237 56 regularizer """no""" +237 56 optimizer """adadelta""" +237 56 training_loop """lcwa""" +237 56 evaluator """rankbased""" +237 57 dataset """kinships""" +237 57 model """ermlp""" +237 57 loss """softplus""" +237 57 regularizer """no""" +237 57 optimizer """adadelta""" +237 57 training_loop """lcwa""" +237 57 evaluator """rankbased""" +237 58 dataset """kinships""" +237 58 model """ermlp""" +237 58 loss """softplus""" +237 58 regularizer """no""" +237 58 optimizer """adadelta""" +237 58 training_loop """lcwa""" +237 58 evaluator """rankbased""" +237 59 dataset """kinships""" +237 59 model """ermlp""" +237 59 loss """softplus""" +237 59 regularizer """no""" +237 59 optimizer """adadelta""" +237 59 training_loop """lcwa""" +237 59 evaluator """rankbased""" +237 60 dataset """kinships""" +237 60 model """ermlp""" +237 60 loss """softplus""" +237 60 regularizer """no""" +237 60 optimizer """adadelta""" +237 60 training_loop """lcwa""" +237 60 evaluator """rankbased""" +237 61 dataset """kinships""" +237 61 model """ermlp""" +237 61 loss """softplus""" +237 61 regularizer """no""" +237 61 optimizer """adadelta""" +237 61 training_loop """lcwa""" +237 61 evaluator """rankbased""" +237 62 dataset """kinships""" +237 62 model """ermlp""" +237 62 loss """softplus""" +237 62 regularizer """no""" +237 62 optimizer """adadelta""" +237 62 training_loop """lcwa""" +237 62 evaluator """rankbased""" +237 63 dataset """kinships""" +237 63 model """ermlp""" +237 63 loss """softplus""" +237 63 regularizer """no""" +237 63 optimizer """adadelta""" +237 63 training_loop """lcwa""" +237 63 evaluator """rankbased""" +237 64 dataset """kinships""" +237 64 model """ermlp""" +237 64 loss """softplus""" +237 64 regularizer """no""" +237 64 optimizer """adadelta""" +237 64 training_loop """lcwa""" +237 64 evaluator """rankbased""" +237 65 dataset """kinships""" +237 65 model """ermlp""" +237 65 loss """softplus""" +237 65 regularizer """no""" +237 65 optimizer """adadelta""" +237 65 training_loop """lcwa""" +237 65 evaluator """rankbased""" +237 66 dataset """kinships""" +237 66 model """ermlp""" +237 66 loss """softplus""" +237 66 regularizer """no""" +237 66 optimizer """adadelta""" +237 66 training_loop """lcwa""" +237 66 evaluator """rankbased""" +237 67 dataset """kinships""" +237 67 model """ermlp""" +237 67 loss """softplus""" +237 67 regularizer """no""" +237 67 optimizer """adadelta""" +237 67 training_loop """lcwa""" +237 67 evaluator """rankbased""" +237 68 dataset """kinships""" +237 68 model """ermlp""" +237 68 loss """softplus""" +237 68 regularizer """no""" +237 68 optimizer """adadelta""" +237 68 training_loop """lcwa""" +237 68 evaluator """rankbased""" +237 69 dataset """kinships""" +237 69 model """ermlp""" +237 69 loss """softplus""" +237 69 regularizer """no""" +237 69 optimizer """adadelta""" +237 69 training_loop """lcwa""" +237 69 evaluator """rankbased""" +237 70 dataset """kinships""" +237 70 model """ermlp""" +237 70 loss """softplus""" +237 70 regularizer """no""" +237 70 optimizer """adadelta""" +237 70 training_loop """lcwa""" +237 70 evaluator """rankbased""" +237 71 dataset """kinships""" +237 71 model """ermlp""" +237 71 loss """softplus""" +237 71 regularizer """no""" +237 71 optimizer """adadelta""" +237 71 training_loop """lcwa""" +237 71 evaluator """rankbased""" +237 72 dataset """kinships""" +237 72 model """ermlp""" +237 72 loss """softplus""" +237 72 regularizer """no""" +237 72 optimizer """adadelta""" +237 72 training_loop """lcwa""" +237 72 evaluator """rankbased""" +237 73 dataset """kinships""" +237 73 model """ermlp""" +237 73 loss """softplus""" +237 73 regularizer """no""" +237 73 optimizer """adadelta""" +237 73 training_loop """lcwa""" +237 73 evaluator """rankbased""" +237 74 dataset """kinships""" +237 74 model """ermlp""" +237 74 loss """softplus""" +237 74 regularizer """no""" +237 74 optimizer """adadelta""" +237 74 training_loop """lcwa""" +237 74 evaluator """rankbased""" +237 75 dataset """kinships""" +237 75 model """ermlp""" +237 75 loss """softplus""" +237 75 regularizer """no""" +237 75 optimizer """adadelta""" +237 75 training_loop """lcwa""" +237 75 evaluator """rankbased""" +237 76 dataset """kinships""" +237 76 model """ermlp""" +237 76 loss """softplus""" +237 76 regularizer """no""" +237 76 optimizer """adadelta""" +237 76 training_loop """lcwa""" +237 76 evaluator """rankbased""" +237 77 dataset """kinships""" +237 77 model """ermlp""" +237 77 loss """softplus""" +237 77 regularizer """no""" +237 77 optimizer """adadelta""" +237 77 training_loop """lcwa""" +237 77 evaluator """rankbased""" +237 78 dataset """kinships""" +237 78 model """ermlp""" +237 78 loss """softplus""" +237 78 regularizer """no""" +237 78 optimizer """adadelta""" +237 78 training_loop """lcwa""" +237 78 evaluator """rankbased""" +237 79 dataset """kinships""" +237 79 model """ermlp""" +237 79 loss """softplus""" +237 79 regularizer """no""" +237 79 optimizer """adadelta""" +237 79 training_loop """lcwa""" +237 79 evaluator """rankbased""" +237 80 dataset """kinships""" +237 80 model """ermlp""" +237 80 loss """softplus""" +237 80 regularizer """no""" +237 80 optimizer """adadelta""" +237 80 training_loop """lcwa""" +237 80 evaluator """rankbased""" +237 81 dataset """kinships""" +237 81 model """ermlp""" +237 81 loss """softplus""" +237 81 regularizer """no""" +237 81 optimizer """adadelta""" +237 81 training_loop """lcwa""" +237 81 evaluator """rankbased""" +237 82 dataset """kinships""" +237 82 model """ermlp""" +237 82 loss """softplus""" +237 82 regularizer """no""" +237 82 optimizer """adadelta""" +237 82 training_loop """lcwa""" +237 82 evaluator """rankbased""" +237 83 dataset """kinships""" +237 83 model """ermlp""" +237 83 loss """softplus""" +237 83 regularizer """no""" +237 83 optimizer """adadelta""" +237 83 training_loop """lcwa""" +237 83 evaluator """rankbased""" +237 84 dataset """kinships""" +237 84 model """ermlp""" +237 84 loss """softplus""" +237 84 regularizer """no""" +237 84 optimizer """adadelta""" +237 84 training_loop """lcwa""" +237 84 evaluator """rankbased""" +237 85 dataset """kinships""" +237 85 model """ermlp""" +237 85 loss """softplus""" +237 85 regularizer """no""" +237 85 optimizer """adadelta""" +237 85 training_loop """lcwa""" +237 85 evaluator """rankbased""" +237 86 dataset """kinships""" +237 86 model """ermlp""" +237 86 loss """softplus""" +237 86 regularizer """no""" +237 86 optimizer """adadelta""" +237 86 training_loop """lcwa""" +237 86 evaluator """rankbased""" +237 87 dataset """kinships""" +237 87 model """ermlp""" +237 87 loss """softplus""" +237 87 regularizer """no""" +237 87 optimizer """adadelta""" +237 87 training_loop """lcwa""" +237 87 evaluator """rankbased""" +237 88 dataset """kinships""" +237 88 model """ermlp""" +237 88 loss """softplus""" +237 88 regularizer """no""" +237 88 optimizer """adadelta""" +237 88 training_loop """lcwa""" +237 88 evaluator """rankbased""" +237 89 dataset """kinships""" +237 89 model """ermlp""" +237 89 loss """softplus""" +237 89 regularizer """no""" +237 89 optimizer """adadelta""" +237 89 training_loop """lcwa""" +237 89 evaluator """rankbased""" +237 90 dataset """kinships""" +237 90 model """ermlp""" +237 90 loss """softplus""" +237 90 regularizer """no""" +237 90 optimizer """adadelta""" +237 90 training_loop """lcwa""" +237 90 evaluator """rankbased""" +237 91 dataset """kinships""" +237 91 model """ermlp""" +237 91 loss """softplus""" +237 91 regularizer """no""" +237 91 optimizer """adadelta""" +237 91 training_loop """lcwa""" +237 91 evaluator """rankbased""" +237 92 dataset """kinships""" +237 92 model """ermlp""" +237 92 loss """softplus""" +237 92 regularizer """no""" +237 92 optimizer """adadelta""" +237 92 training_loop """lcwa""" +237 92 evaluator """rankbased""" +237 93 dataset """kinships""" +237 93 model """ermlp""" +237 93 loss """softplus""" +237 93 regularizer """no""" +237 93 optimizer """adadelta""" +237 93 training_loop """lcwa""" +237 93 evaluator """rankbased""" +237 94 dataset """kinships""" +237 94 model """ermlp""" +237 94 loss """softplus""" +237 94 regularizer """no""" +237 94 optimizer """adadelta""" +237 94 training_loop """lcwa""" +237 94 evaluator """rankbased""" +237 95 dataset """kinships""" +237 95 model """ermlp""" +237 95 loss """softplus""" +237 95 regularizer """no""" +237 95 optimizer """adadelta""" +237 95 training_loop """lcwa""" +237 95 evaluator """rankbased""" +237 96 dataset """kinships""" +237 96 model """ermlp""" +237 96 loss """softplus""" +237 96 regularizer """no""" +237 96 optimizer """adadelta""" +237 96 training_loop """lcwa""" +237 96 evaluator """rankbased""" +237 97 dataset """kinships""" +237 97 model """ermlp""" +237 97 loss """softplus""" +237 97 regularizer """no""" +237 97 optimizer """adadelta""" +237 97 training_loop """lcwa""" +237 97 evaluator """rankbased""" +237 98 dataset """kinships""" +237 98 model """ermlp""" +237 98 loss """softplus""" +237 98 regularizer """no""" +237 98 optimizer """adadelta""" +237 98 training_loop """lcwa""" +237 98 evaluator """rankbased""" +237 99 dataset """kinships""" +237 99 model """ermlp""" +237 99 loss """softplus""" +237 99 regularizer """no""" +237 99 optimizer """adadelta""" +237 99 training_loop """lcwa""" +237 99 evaluator """rankbased""" +237 100 dataset """kinships""" +237 100 model """ermlp""" +237 100 loss """softplus""" +237 100 regularizer """no""" +237 100 optimizer """adadelta""" +237 100 training_loop """lcwa""" +237 100 evaluator """rankbased""" +238 1 model.embedding_dim 0.0 +238 1 negative_sampler.num_negs_per_pos 50.0 +238 1 training.batch_size 0.0 +238 2 model.embedding_dim 1.0 +238 2 negative_sampler.num_negs_per_pos 2.0 +238 2 training.batch_size 1.0 +238 3 model.embedding_dim 0.0 +238 3 negative_sampler.num_negs_per_pos 3.0 +238 3 training.batch_size 0.0 +238 4 model.embedding_dim 2.0 +238 4 negative_sampler.num_negs_per_pos 18.0 +238 4 training.batch_size 0.0 +238 5 model.embedding_dim 1.0 +238 5 negative_sampler.num_negs_per_pos 8.0 +238 5 training.batch_size 1.0 +238 6 model.embedding_dim 2.0 +238 6 negative_sampler.num_negs_per_pos 89.0 +238 6 training.batch_size 2.0 +238 7 model.embedding_dim 0.0 +238 7 negative_sampler.num_negs_per_pos 24.0 +238 7 training.batch_size 0.0 +238 8 model.embedding_dim 2.0 +238 8 negative_sampler.num_negs_per_pos 84.0 +238 8 training.batch_size 0.0 +238 9 model.embedding_dim 1.0 +238 9 negative_sampler.num_negs_per_pos 8.0 +238 9 training.batch_size 1.0 +238 10 model.embedding_dim 2.0 +238 10 negative_sampler.num_negs_per_pos 25.0 +238 10 training.batch_size 1.0 +238 11 model.embedding_dim 1.0 +238 11 negative_sampler.num_negs_per_pos 99.0 +238 11 training.batch_size 2.0 +238 12 model.embedding_dim 0.0 +238 12 negative_sampler.num_negs_per_pos 42.0 +238 12 training.batch_size 0.0 +238 13 model.embedding_dim 1.0 +238 13 negative_sampler.num_negs_per_pos 77.0 +238 13 training.batch_size 0.0 +238 14 model.embedding_dim 2.0 +238 14 negative_sampler.num_negs_per_pos 60.0 +238 14 training.batch_size 2.0 +238 15 model.embedding_dim 0.0 +238 15 negative_sampler.num_negs_per_pos 34.0 +238 15 training.batch_size 0.0 +238 16 model.embedding_dim 0.0 +238 16 negative_sampler.num_negs_per_pos 21.0 +238 16 training.batch_size 2.0 +238 17 model.embedding_dim 0.0 +238 17 negative_sampler.num_negs_per_pos 5.0 +238 17 training.batch_size 1.0 +238 18 model.embedding_dim 2.0 +238 18 negative_sampler.num_negs_per_pos 34.0 +238 18 training.batch_size 2.0 +238 19 model.embedding_dim 2.0 +238 19 negative_sampler.num_negs_per_pos 11.0 +238 19 training.batch_size 2.0 +238 20 model.embedding_dim 2.0 +238 20 negative_sampler.num_negs_per_pos 7.0 +238 20 training.batch_size 1.0 +238 21 model.embedding_dim 2.0 +238 21 negative_sampler.num_negs_per_pos 83.0 +238 21 training.batch_size 0.0 +238 22 model.embedding_dim 0.0 +238 22 negative_sampler.num_negs_per_pos 85.0 +238 22 training.batch_size 0.0 +238 23 model.embedding_dim 2.0 +238 23 negative_sampler.num_negs_per_pos 43.0 +238 23 training.batch_size 2.0 +238 24 model.embedding_dim 2.0 +238 24 negative_sampler.num_negs_per_pos 41.0 +238 24 training.batch_size 2.0 +238 25 model.embedding_dim 2.0 +238 25 negative_sampler.num_negs_per_pos 14.0 +238 25 training.batch_size 1.0 +238 26 model.embedding_dim 0.0 +238 26 negative_sampler.num_negs_per_pos 96.0 +238 26 training.batch_size 1.0 +238 27 model.embedding_dim 2.0 +238 27 negative_sampler.num_negs_per_pos 5.0 +238 27 training.batch_size 0.0 +238 28 model.embedding_dim 0.0 +238 28 negative_sampler.num_negs_per_pos 90.0 +238 28 training.batch_size 2.0 +238 29 model.embedding_dim 1.0 +238 29 negative_sampler.num_negs_per_pos 58.0 +238 29 training.batch_size 0.0 +238 30 model.embedding_dim 1.0 +238 30 negative_sampler.num_negs_per_pos 77.0 +238 30 training.batch_size 2.0 +238 31 model.embedding_dim 1.0 +238 31 negative_sampler.num_negs_per_pos 87.0 +238 31 training.batch_size 0.0 +238 32 model.embedding_dim 0.0 +238 32 negative_sampler.num_negs_per_pos 38.0 +238 32 training.batch_size 2.0 +238 33 model.embedding_dim 0.0 +238 33 negative_sampler.num_negs_per_pos 32.0 +238 33 training.batch_size 0.0 +238 34 model.embedding_dim 1.0 +238 34 negative_sampler.num_negs_per_pos 67.0 +238 34 training.batch_size 1.0 +238 35 model.embedding_dim 1.0 +238 35 negative_sampler.num_negs_per_pos 81.0 +238 35 training.batch_size 1.0 +238 36 model.embedding_dim 0.0 +238 36 negative_sampler.num_negs_per_pos 30.0 +238 36 training.batch_size 0.0 +238 37 model.embedding_dim 2.0 +238 37 negative_sampler.num_negs_per_pos 89.0 +238 37 training.batch_size 2.0 +238 38 model.embedding_dim 1.0 +238 38 negative_sampler.num_negs_per_pos 4.0 +238 38 training.batch_size 0.0 +238 39 model.embedding_dim 1.0 +238 39 negative_sampler.num_negs_per_pos 46.0 +238 39 training.batch_size 2.0 +238 40 model.embedding_dim 1.0 +238 40 negative_sampler.num_negs_per_pos 73.0 +238 40 training.batch_size 0.0 +238 41 model.embedding_dim 2.0 +238 41 negative_sampler.num_negs_per_pos 6.0 +238 41 training.batch_size 1.0 +238 42 model.embedding_dim 1.0 +238 42 negative_sampler.num_negs_per_pos 2.0 +238 42 training.batch_size 1.0 +238 43 model.embedding_dim 1.0 +238 43 negative_sampler.num_negs_per_pos 6.0 +238 43 training.batch_size 0.0 +238 44 model.embedding_dim 1.0 +238 44 negative_sampler.num_negs_per_pos 95.0 +238 44 training.batch_size 1.0 +238 45 model.embedding_dim 0.0 +238 45 negative_sampler.num_negs_per_pos 97.0 +238 45 training.batch_size 1.0 +238 46 model.embedding_dim 2.0 +238 46 negative_sampler.num_negs_per_pos 71.0 +238 46 training.batch_size 0.0 +238 47 model.embedding_dim 0.0 +238 47 negative_sampler.num_negs_per_pos 10.0 +238 47 training.batch_size 1.0 +238 48 model.embedding_dim 2.0 +238 48 negative_sampler.num_negs_per_pos 35.0 +238 48 training.batch_size 2.0 +238 49 model.embedding_dim 2.0 +238 49 negative_sampler.num_negs_per_pos 52.0 +238 49 training.batch_size 2.0 +238 50 model.embedding_dim 0.0 +238 50 negative_sampler.num_negs_per_pos 5.0 +238 50 training.batch_size 2.0 +238 51 model.embedding_dim 0.0 +238 51 negative_sampler.num_negs_per_pos 20.0 +238 51 training.batch_size 1.0 +238 52 model.embedding_dim 1.0 +238 52 negative_sampler.num_negs_per_pos 16.0 +238 52 training.batch_size 2.0 +238 53 model.embedding_dim 2.0 +238 53 negative_sampler.num_negs_per_pos 68.0 +238 53 training.batch_size 0.0 +238 54 model.embedding_dim 2.0 +238 54 negative_sampler.num_negs_per_pos 54.0 +238 54 training.batch_size 1.0 +238 55 model.embedding_dim 1.0 +238 55 negative_sampler.num_negs_per_pos 17.0 +238 55 training.batch_size 0.0 +238 56 model.embedding_dim 0.0 +238 56 negative_sampler.num_negs_per_pos 32.0 +238 56 training.batch_size 2.0 +238 57 model.embedding_dim 2.0 +238 57 negative_sampler.num_negs_per_pos 74.0 +238 57 training.batch_size 0.0 +238 58 model.embedding_dim 1.0 +238 58 negative_sampler.num_negs_per_pos 39.0 +238 58 training.batch_size 1.0 +238 59 model.embedding_dim 1.0 +238 59 negative_sampler.num_negs_per_pos 99.0 +238 59 training.batch_size 0.0 +238 60 model.embedding_dim 1.0 +238 60 negative_sampler.num_negs_per_pos 69.0 +238 60 training.batch_size 1.0 +238 61 model.embedding_dim 2.0 +238 61 negative_sampler.num_negs_per_pos 73.0 +238 61 training.batch_size 2.0 +238 62 model.embedding_dim 0.0 +238 62 negative_sampler.num_negs_per_pos 55.0 +238 62 training.batch_size 2.0 +238 63 model.embedding_dim 1.0 +238 63 negative_sampler.num_negs_per_pos 90.0 +238 63 training.batch_size 0.0 +238 64 model.embedding_dim 2.0 +238 64 negative_sampler.num_negs_per_pos 11.0 +238 64 training.batch_size 2.0 +238 65 model.embedding_dim 1.0 +238 65 negative_sampler.num_negs_per_pos 27.0 +238 65 training.batch_size 2.0 +238 66 model.embedding_dim 2.0 +238 66 negative_sampler.num_negs_per_pos 65.0 +238 66 training.batch_size 1.0 +238 67 model.embedding_dim 2.0 +238 67 negative_sampler.num_negs_per_pos 31.0 +238 67 training.batch_size 2.0 +238 68 model.embedding_dim 0.0 +238 68 negative_sampler.num_negs_per_pos 37.0 +238 68 training.batch_size 2.0 +238 69 model.embedding_dim 2.0 +238 69 negative_sampler.num_negs_per_pos 35.0 +238 69 training.batch_size 0.0 +238 70 model.embedding_dim 0.0 +238 70 negative_sampler.num_negs_per_pos 17.0 +238 70 training.batch_size 1.0 +238 71 model.embedding_dim 0.0 +238 71 negative_sampler.num_negs_per_pos 23.0 +238 71 training.batch_size 1.0 +238 72 model.embedding_dim 2.0 +238 72 negative_sampler.num_negs_per_pos 69.0 +238 72 training.batch_size 0.0 +238 73 model.embedding_dim 2.0 +238 73 negative_sampler.num_negs_per_pos 95.0 +238 73 training.batch_size 0.0 +238 74 model.embedding_dim 0.0 +238 74 negative_sampler.num_negs_per_pos 98.0 +238 74 training.batch_size 2.0 +238 75 model.embedding_dim 2.0 +238 75 negative_sampler.num_negs_per_pos 68.0 +238 75 training.batch_size 0.0 +238 76 model.embedding_dim 0.0 +238 76 negative_sampler.num_negs_per_pos 80.0 +238 76 training.batch_size 1.0 +238 77 model.embedding_dim 2.0 +238 77 negative_sampler.num_negs_per_pos 68.0 +238 77 training.batch_size 2.0 +238 78 model.embedding_dim 1.0 +238 78 negative_sampler.num_negs_per_pos 40.0 +238 78 training.batch_size 1.0 +238 79 model.embedding_dim 1.0 +238 79 negative_sampler.num_negs_per_pos 80.0 +238 79 training.batch_size 2.0 +238 80 model.embedding_dim 0.0 +238 80 negative_sampler.num_negs_per_pos 53.0 +238 80 training.batch_size 0.0 +238 81 model.embedding_dim 0.0 +238 81 negative_sampler.num_negs_per_pos 83.0 +238 81 training.batch_size 1.0 +238 82 model.embedding_dim 0.0 +238 82 negative_sampler.num_negs_per_pos 45.0 +238 82 training.batch_size 1.0 +238 83 model.embedding_dim 1.0 +238 83 negative_sampler.num_negs_per_pos 9.0 +238 83 training.batch_size 1.0 +238 84 model.embedding_dim 1.0 +238 84 negative_sampler.num_negs_per_pos 39.0 +238 84 training.batch_size 1.0 +238 85 model.embedding_dim 0.0 +238 85 negative_sampler.num_negs_per_pos 64.0 +238 85 training.batch_size 0.0 +238 86 model.embedding_dim 0.0 +238 86 negative_sampler.num_negs_per_pos 56.0 +238 86 training.batch_size 1.0 +238 87 model.embedding_dim 2.0 +238 87 negative_sampler.num_negs_per_pos 27.0 +238 87 training.batch_size 2.0 +238 88 model.embedding_dim 0.0 +238 88 negative_sampler.num_negs_per_pos 15.0 +238 88 training.batch_size 0.0 +238 89 model.embedding_dim 1.0 +238 89 negative_sampler.num_negs_per_pos 53.0 +238 89 training.batch_size 1.0 +238 90 model.embedding_dim 0.0 +238 90 negative_sampler.num_negs_per_pos 46.0 +238 90 training.batch_size 1.0 +238 91 model.embedding_dim 0.0 +238 91 negative_sampler.num_negs_per_pos 90.0 +238 91 training.batch_size 0.0 +238 92 model.embedding_dim 2.0 +238 92 negative_sampler.num_negs_per_pos 12.0 +238 92 training.batch_size 2.0 +238 93 model.embedding_dim 1.0 +238 93 negative_sampler.num_negs_per_pos 91.0 +238 93 training.batch_size 2.0 +238 94 model.embedding_dim 1.0 +238 94 negative_sampler.num_negs_per_pos 18.0 +238 94 training.batch_size 0.0 +238 95 model.embedding_dim 0.0 +238 95 negative_sampler.num_negs_per_pos 60.0 +238 95 training.batch_size 0.0 +238 96 model.embedding_dim 2.0 +238 96 negative_sampler.num_negs_per_pos 32.0 +238 96 training.batch_size 1.0 +238 97 model.embedding_dim 1.0 +238 97 negative_sampler.num_negs_per_pos 23.0 +238 97 training.batch_size 2.0 +238 98 model.embedding_dim 0.0 +238 98 negative_sampler.num_negs_per_pos 90.0 +238 98 training.batch_size 0.0 +238 99 model.embedding_dim 1.0 +238 99 negative_sampler.num_negs_per_pos 30.0 +238 99 training.batch_size 2.0 +238 100 model.embedding_dim 0.0 +238 100 negative_sampler.num_negs_per_pos 17.0 +238 100 training.batch_size 0.0 +238 1 dataset """kinships""" +238 1 model """ermlp""" +238 1 loss """bceaftersigmoid""" +238 1 regularizer """no""" +238 1 optimizer """adadelta""" +238 1 training_loop """owa""" +238 1 negative_sampler """basic""" +238 1 evaluator """rankbased""" +238 2 dataset """kinships""" +238 2 model """ermlp""" +238 2 loss """bceaftersigmoid""" +238 2 regularizer """no""" +238 2 optimizer """adadelta""" +238 2 training_loop """owa""" +238 2 negative_sampler """basic""" +238 2 evaluator """rankbased""" +238 3 dataset """kinships""" +238 3 model """ermlp""" +238 3 loss """bceaftersigmoid""" +238 3 regularizer """no""" +238 3 optimizer """adadelta""" +238 3 training_loop """owa""" +238 3 negative_sampler """basic""" +238 3 evaluator """rankbased""" +238 4 dataset """kinships""" +238 4 model """ermlp""" +238 4 loss """bceaftersigmoid""" +238 4 regularizer """no""" +238 4 optimizer """adadelta""" +238 4 training_loop """owa""" +238 4 negative_sampler """basic""" +238 4 evaluator """rankbased""" +238 5 dataset """kinships""" +238 5 model """ermlp""" +238 5 loss """bceaftersigmoid""" +238 5 regularizer """no""" +238 5 optimizer """adadelta""" +238 5 training_loop """owa""" +238 5 negative_sampler """basic""" +238 5 evaluator """rankbased""" +238 6 dataset """kinships""" +238 6 model """ermlp""" +238 6 loss """bceaftersigmoid""" +238 6 regularizer """no""" +238 6 optimizer """adadelta""" +238 6 training_loop """owa""" +238 6 negative_sampler """basic""" +238 6 evaluator """rankbased""" +238 7 dataset """kinships""" +238 7 model """ermlp""" +238 7 loss """bceaftersigmoid""" +238 7 regularizer """no""" +238 7 optimizer """adadelta""" +238 7 training_loop """owa""" +238 7 negative_sampler """basic""" +238 7 evaluator """rankbased""" +238 8 dataset """kinships""" +238 8 model """ermlp""" +238 8 loss """bceaftersigmoid""" +238 8 regularizer """no""" +238 8 optimizer """adadelta""" +238 8 training_loop """owa""" +238 8 negative_sampler """basic""" +238 8 evaluator """rankbased""" +238 9 dataset """kinships""" +238 9 model """ermlp""" +238 9 loss """bceaftersigmoid""" +238 9 regularizer """no""" +238 9 optimizer """adadelta""" +238 9 training_loop """owa""" +238 9 negative_sampler """basic""" +238 9 evaluator """rankbased""" +238 10 dataset """kinships""" +238 10 model """ermlp""" +238 10 loss """bceaftersigmoid""" +238 10 regularizer """no""" +238 10 optimizer """adadelta""" +238 10 training_loop """owa""" +238 10 negative_sampler """basic""" +238 10 evaluator """rankbased""" +238 11 dataset """kinships""" +238 11 model """ermlp""" +238 11 loss """bceaftersigmoid""" +238 11 regularizer """no""" +238 11 optimizer """adadelta""" +238 11 training_loop """owa""" +238 11 negative_sampler """basic""" +238 11 evaluator """rankbased""" +238 12 dataset """kinships""" +238 12 model """ermlp""" +238 12 loss """bceaftersigmoid""" +238 12 regularizer """no""" +238 12 optimizer """adadelta""" +238 12 training_loop """owa""" +238 12 negative_sampler """basic""" +238 12 evaluator """rankbased""" +238 13 dataset """kinships""" +238 13 model """ermlp""" +238 13 loss """bceaftersigmoid""" +238 13 regularizer """no""" +238 13 optimizer """adadelta""" +238 13 training_loop """owa""" +238 13 negative_sampler """basic""" +238 13 evaluator """rankbased""" +238 14 dataset """kinships""" +238 14 model """ermlp""" +238 14 loss """bceaftersigmoid""" +238 14 regularizer """no""" +238 14 optimizer """adadelta""" +238 14 training_loop """owa""" +238 14 negative_sampler """basic""" +238 14 evaluator """rankbased""" +238 15 dataset """kinships""" +238 15 model """ermlp""" +238 15 loss """bceaftersigmoid""" +238 15 regularizer """no""" +238 15 optimizer """adadelta""" +238 15 training_loop """owa""" +238 15 negative_sampler """basic""" +238 15 evaluator """rankbased""" +238 16 dataset """kinships""" +238 16 model """ermlp""" +238 16 loss """bceaftersigmoid""" +238 16 regularizer """no""" +238 16 optimizer """adadelta""" +238 16 training_loop """owa""" +238 16 negative_sampler """basic""" +238 16 evaluator """rankbased""" +238 17 dataset """kinships""" +238 17 model """ermlp""" +238 17 loss """bceaftersigmoid""" +238 17 regularizer """no""" +238 17 optimizer """adadelta""" +238 17 training_loop """owa""" +238 17 negative_sampler """basic""" +238 17 evaluator """rankbased""" +238 18 dataset """kinships""" +238 18 model """ermlp""" +238 18 loss """bceaftersigmoid""" +238 18 regularizer """no""" +238 18 optimizer """adadelta""" +238 18 training_loop """owa""" +238 18 negative_sampler """basic""" +238 18 evaluator """rankbased""" +238 19 dataset """kinships""" +238 19 model """ermlp""" +238 19 loss """bceaftersigmoid""" +238 19 regularizer """no""" +238 19 optimizer """adadelta""" +238 19 training_loop """owa""" +238 19 negative_sampler """basic""" +238 19 evaluator """rankbased""" +238 20 dataset """kinships""" +238 20 model """ermlp""" +238 20 loss """bceaftersigmoid""" +238 20 regularizer """no""" +238 20 optimizer """adadelta""" +238 20 training_loop """owa""" +238 20 negative_sampler """basic""" +238 20 evaluator """rankbased""" +238 21 dataset """kinships""" +238 21 model """ermlp""" +238 21 loss """bceaftersigmoid""" +238 21 regularizer """no""" +238 21 optimizer """adadelta""" +238 21 training_loop """owa""" +238 21 negative_sampler """basic""" +238 21 evaluator """rankbased""" +238 22 dataset """kinships""" +238 22 model """ermlp""" +238 22 loss """bceaftersigmoid""" +238 22 regularizer """no""" +238 22 optimizer """adadelta""" +238 22 training_loop """owa""" +238 22 negative_sampler """basic""" +238 22 evaluator """rankbased""" +238 23 dataset """kinships""" +238 23 model """ermlp""" +238 23 loss """bceaftersigmoid""" +238 23 regularizer """no""" +238 23 optimizer """adadelta""" +238 23 training_loop """owa""" +238 23 negative_sampler """basic""" +238 23 evaluator """rankbased""" +238 24 dataset """kinships""" +238 24 model """ermlp""" +238 24 loss """bceaftersigmoid""" +238 24 regularizer """no""" +238 24 optimizer """adadelta""" +238 24 training_loop """owa""" +238 24 negative_sampler """basic""" +238 24 evaluator """rankbased""" +238 25 dataset """kinships""" +238 25 model """ermlp""" +238 25 loss """bceaftersigmoid""" +238 25 regularizer """no""" +238 25 optimizer """adadelta""" +238 25 training_loop """owa""" +238 25 negative_sampler """basic""" +238 25 evaluator """rankbased""" +238 26 dataset """kinships""" +238 26 model """ermlp""" +238 26 loss """bceaftersigmoid""" +238 26 regularizer """no""" +238 26 optimizer """adadelta""" +238 26 training_loop """owa""" +238 26 negative_sampler """basic""" +238 26 evaluator """rankbased""" +238 27 dataset """kinships""" +238 27 model """ermlp""" +238 27 loss """bceaftersigmoid""" +238 27 regularizer """no""" +238 27 optimizer """adadelta""" +238 27 training_loop """owa""" +238 27 negative_sampler """basic""" +238 27 evaluator """rankbased""" +238 28 dataset """kinships""" +238 28 model """ermlp""" +238 28 loss """bceaftersigmoid""" +238 28 regularizer """no""" +238 28 optimizer """adadelta""" +238 28 training_loop """owa""" +238 28 negative_sampler """basic""" +238 28 evaluator """rankbased""" +238 29 dataset """kinships""" +238 29 model """ermlp""" +238 29 loss """bceaftersigmoid""" +238 29 regularizer """no""" +238 29 optimizer """adadelta""" +238 29 training_loop """owa""" +238 29 negative_sampler """basic""" +238 29 evaluator """rankbased""" +238 30 dataset """kinships""" +238 30 model """ermlp""" +238 30 loss """bceaftersigmoid""" +238 30 regularizer """no""" +238 30 optimizer """adadelta""" +238 30 training_loop """owa""" +238 30 negative_sampler """basic""" +238 30 evaluator """rankbased""" +238 31 dataset """kinships""" +238 31 model """ermlp""" +238 31 loss """bceaftersigmoid""" +238 31 regularizer """no""" +238 31 optimizer """adadelta""" +238 31 training_loop """owa""" +238 31 negative_sampler """basic""" +238 31 evaluator """rankbased""" +238 32 dataset """kinships""" +238 32 model """ermlp""" +238 32 loss """bceaftersigmoid""" +238 32 regularizer """no""" +238 32 optimizer """adadelta""" +238 32 training_loop """owa""" +238 32 negative_sampler """basic""" +238 32 evaluator """rankbased""" +238 33 dataset """kinships""" +238 33 model """ermlp""" +238 33 loss """bceaftersigmoid""" +238 33 regularizer """no""" +238 33 optimizer """adadelta""" +238 33 training_loop """owa""" +238 33 negative_sampler """basic""" +238 33 evaluator """rankbased""" +238 34 dataset """kinships""" +238 34 model """ermlp""" +238 34 loss """bceaftersigmoid""" +238 34 regularizer """no""" +238 34 optimizer """adadelta""" +238 34 training_loop """owa""" +238 34 negative_sampler """basic""" +238 34 evaluator """rankbased""" +238 35 dataset """kinships""" +238 35 model """ermlp""" +238 35 loss """bceaftersigmoid""" +238 35 regularizer """no""" +238 35 optimizer """adadelta""" +238 35 training_loop """owa""" +238 35 negative_sampler """basic""" +238 35 evaluator """rankbased""" +238 36 dataset """kinships""" +238 36 model """ermlp""" +238 36 loss """bceaftersigmoid""" +238 36 regularizer """no""" +238 36 optimizer """adadelta""" +238 36 training_loop """owa""" +238 36 negative_sampler """basic""" +238 36 evaluator """rankbased""" +238 37 dataset """kinships""" +238 37 model """ermlp""" +238 37 loss """bceaftersigmoid""" +238 37 regularizer """no""" +238 37 optimizer """adadelta""" +238 37 training_loop """owa""" +238 37 negative_sampler """basic""" +238 37 evaluator """rankbased""" +238 38 dataset """kinships""" +238 38 model """ermlp""" +238 38 loss """bceaftersigmoid""" +238 38 regularizer """no""" +238 38 optimizer """adadelta""" +238 38 training_loop """owa""" +238 38 negative_sampler """basic""" +238 38 evaluator """rankbased""" +238 39 dataset """kinships""" +238 39 model """ermlp""" +238 39 loss """bceaftersigmoid""" +238 39 regularizer """no""" +238 39 optimizer """adadelta""" +238 39 training_loop """owa""" +238 39 negative_sampler """basic""" +238 39 evaluator """rankbased""" +238 40 dataset """kinships""" +238 40 model """ermlp""" +238 40 loss """bceaftersigmoid""" +238 40 regularizer """no""" +238 40 optimizer """adadelta""" +238 40 training_loop """owa""" +238 40 negative_sampler """basic""" +238 40 evaluator """rankbased""" +238 41 dataset """kinships""" +238 41 model """ermlp""" +238 41 loss """bceaftersigmoid""" +238 41 regularizer """no""" +238 41 optimizer """adadelta""" +238 41 training_loop """owa""" +238 41 negative_sampler """basic""" +238 41 evaluator """rankbased""" +238 42 dataset """kinships""" +238 42 model """ermlp""" +238 42 loss """bceaftersigmoid""" +238 42 regularizer """no""" +238 42 optimizer """adadelta""" +238 42 training_loop """owa""" +238 42 negative_sampler """basic""" +238 42 evaluator """rankbased""" +238 43 dataset """kinships""" +238 43 model """ermlp""" +238 43 loss """bceaftersigmoid""" +238 43 regularizer """no""" +238 43 optimizer """adadelta""" +238 43 training_loop """owa""" +238 43 negative_sampler """basic""" +238 43 evaluator """rankbased""" +238 44 dataset """kinships""" +238 44 model """ermlp""" +238 44 loss """bceaftersigmoid""" +238 44 regularizer """no""" +238 44 optimizer """adadelta""" +238 44 training_loop """owa""" +238 44 negative_sampler """basic""" +238 44 evaluator """rankbased""" +238 45 dataset """kinships""" +238 45 model """ermlp""" +238 45 loss """bceaftersigmoid""" +238 45 regularizer """no""" +238 45 optimizer """adadelta""" +238 45 training_loop """owa""" +238 45 negative_sampler """basic""" +238 45 evaluator """rankbased""" +238 46 dataset """kinships""" +238 46 model """ermlp""" +238 46 loss """bceaftersigmoid""" +238 46 regularizer """no""" +238 46 optimizer """adadelta""" +238 46 training_loop """owa""" +238 46 negative_sampler """basic""" +238 46 evaluator """rankbased""" +238 47 dataset """kinships""" +238 47 model """ermlp""" +238 47 loss """bceaftersigmoid""" +238 47 regularizer """no""" +238 47 optimizer """adadelta""" +238 47 training_loop """owa""" +238 47 negative_sampler """basic""" +238 47 evaluator """rankbased""" +238 48 dataset """kinships""" +238 48 model """ermlp""" +238 48 loss """bceaftersigmoid""" +238 48 regularizer """no""" +238 48 optimizer """adadelta""" +238 48 training_loop """owa""" +238 48 negative_sampler """basic""" +238 48 evaluator """rankbased""" +238 49 dataset """kinships""" +238 49 model """ermlp""" +238 49 loss """bceaftersigmoid""" +238 49 regularizer """no""" +238 49 optimizer """adadelta""" +238 49 training_loop """owa""" +238 49 negative_sampler """basic""" +238 49 evaluator """rankbased""" +238 50 dataset """kinships""" +238 50 model """ermlp""" +238 50 loss """bceaftersigmoid""" +238 50 regularizer """no""" +238 50 optimizer """adadelta""" +238 50 training_loop """owa""" +238 50 negative_sampler """basic""" +238 50 evaluator """rankbased""" +238 51 dataset """kinships""" +238 51 model """ermlp""" +238 51 loss """bceaftersigmoid""" +238 51 regularizer """no""" +238 51 optimizer """adadelta""" +238 51 training_loop """owa""" +238 51 negative_sampler """basic""" +238 51 evaluator """rankbased""" +238 52 dataset """kinships""" +238 52 model """ermlp""" +238 52 loss """bceaftersigmoid""" +238 52 regularizer """no""" +238 52 optimizer """adadelta""" +238 52 training_loop """owa""" +238 52 negative_sampler """basic""" +238 52 evaluator """rankbased""" +238 53 dataset """kinships""" +238 53 model """ermlp""" +238 53 loss """bceaftersigmoid""" +238 53 regularizer """no""" +238 53 optimizer """adadelta""" +238 53 training_loop """owa""" +238 53 negative_sampler """basic""" +238 53 evaluator """rankbased""" +238 54 dataset """kinships""" +238 54 model """ermlp""" +238 54 loss """bceaftersigmoid""" +238 54 regularizer """no""" +238 54 optimizer """adadelta""" +238 54 training_loop """owa""" +238 54 negative_sampler """basic""" +238 54 evaluator """rankbased""" +238 55 dataset """kinships""" +238 55 model """ermlp""" +238 55 loss """bceaftersigmoid""" +238 55 regularizer """no""" +238 55 optimizer """adadelta""" +238 55 training_loop """owa""" +238 55 negative_sampler """basic""" +238 55 evaluator """rankbased""" +238 56 dataset """kinships""" +238 56 model """ermlp""" +238 56 loss """bceaftersigmoid""" +238 56 regularizer """no""" +238 56 optimizer """adadelta""" +238 56 training_loop """owa""" +238 56 negative_sampler """basic""" +238 56 evaluator """rankbased""" +238 57 dataset """kinships""" +238 57 model """ermlp""" +238 57 loss """bceaftersigmoid""" +238 57 regularizer """no""" +238 57 optimizer """adadelta""" +238 57 training_loop """owa""" +238 57 negative_sampler """basic""" +238 57 evaluator """rankbased""" +238 58 dataset """kinships""" +238 58 model """ermlp""" +238 58 loss """bceaftersigmoid""" +238 58 regularizer """no""" +238 58 optimizer """adadelta""" +238 58 training_loop """owa""" +238 58 negative_sampler """basic""" +238 58 evaluator """rankbased""" +238 59 dataset """kinships""" +238 59 model """ermlp""" +238 59 loss """bceaftersigmoid""" +238 59 regularizer """no""" +238 59 optimizer """adadelta""" +238 59 training_loop """owa""" +238 59 negative_sampler """basic""" +238 59 evaluator """rankbased""" +238 60 dataset """kinships""" +238 60 model """ermlp""" +238 60 loss """bceaftersigmoid""" +238 60 regularizer """no""" +238 60 optimizer """adadelta""" +238 60 training_loop """owa""" +238 60 negative_sampler """basic""" +238 60 evaluator """rankbased""" +238 61 dataset """kinships""" +238 61 model """ermlp""" +238 61 loss """bceaftersigmoid""" +238 61 regularizer """no""" +238 61 optimizer """adadelta""" +238 61 training_loop """owa""" +238 61 negative_sampler """basic""" +238 61 evaluator """rankbased""" +238 62 dataset """kinships""" +238 62 model """ermlp""" +238 62 loss """bceaftersigmoid""" +238 62 regularizer """no""" +238 62 optimizer """adadelta""" +238 62 training_loop """owa""" +238 62 negative_sampler """basic""" +238 62 evaluator """rankbased""" +238 63 dataset """kinships""" +238 63 model """ermlp""" +238 63 loss """bceaftersigmoid""" +238 63 regularizer """no""" +238 63 optimizer """adadelta""" +238 63 training_loop """owa""" +238 63 negative_sampler """basic""" +238 63 evaluator """rankbased""" +238 64 dataset """kinships""" +238 64 model """ermlp""" +238 64 loss """bceaftersigmoid""" +238 64 regularizer """no""" +238 64 optimizer """adadelta""" +238 64 training_loop """owa""" +238 64 negative_sampler """basic""" +238 64 evaluator """rankbased""" +238 65 dataset """kinships""" +238 65 model """ermlp""" +238 65 loss """bceaftersigmoid""" +238 65 regularizer """no""" +238 65 optimizer """adadelta""" +238 65 training_loop """owa""" +238 65 negative_sampler """basic""" +238 65 evaluator """rankbased""" +238 66 dataset """kinships""" +238 66 model """ermlp""" +238 66 loss """bceaftersigmoid""" +238 66 regularizer """no""" +238 66 optimizer """adadelta""" +238 66 training_loop """owa""" +238 66 negative_sampler """basic""" +238 66 evaluator """rankbased""" +238 67 dataset """kinships""" +238 67 model """ermlp""" +238 67 loss """bceaftersigmoid""" +238 67 regularizer """no""" +238 67 optimizer """adadelta""" +238 67 training_loop """owa""" +238 67 negative_sampler """basic""" +238 67 evaluator """rankbased""" +238 68 dataset """kinships""" +238 68 model """ermlp""" +238 68 loss """bceaftersigmoid""" +238 68 regularizer """no""" +238 68 optimizer """adadelta""" +238 68 training_loop """owa""" +238 68 negative_sampler """basic""" +238 68 evaluator """rankbased""" +238 69 dataset """kinships""" +238 69 model """ermlp""" +238 69 loss """bceaftersigmoid""" +238 69 regularizer """no""" +238 69 optimizer """adadelta""" +238 69 training_loop """owa""" +238 69 negative_sampler """basic""" +238 69 evaluator """rankbased""" +238 70 dataset """kinships""" +238 70 model """ermlp""" +238 70 loss """bceaftersigmoid""" +238 70 regularizer """no""" +238 70 optimizer """adadelta""" +238 70 training_loop """owa""" +238 70 negative_sampler """basic""" +238 70 evaluator """rankbased""" +238 71 dataset """kinships""" +238 71 model """ermlp""" +238 71 loss """bceaftersigmoid""" +238 71 regularizer """no""" +238 71 optimizer """adadelta""" +238 71 training_loop """owa""" +238 71 negative_sampler """basic""" +238 71 evaluator """rankbased""" +238 72 dataset """kinships""" +238 72 model """ermlp""" +238 72 loss """bceaftersigmoid""" +238 72 regularizer """no""" +238 72 optimizer """adadelta""" +238 72 training_loop """owa""" +238 72 negative_sampler """basic""" +238 72 evaluator """rankbased""" +238 73 dataset """kinships""" +238 73 model """ermlp""" +238 73 loss """bceaftersigmoid""" +238 73 regularizer """no""" +238 73 optimizer """adadelta""" +238 73 training_loop """owa""" +238 73 negative_sampler """basic""" +238 73 evaluator """rankbased""" +238 74 dataset """kinships""" +238 74 model """ermlp""" +238 74 loss """bceaftersigmoid""" +238 74 regularizer """no""" +238 74 optimizer """adadelta""" +238 74 training_loop """owa""" +238 74 negative_sampler """basic""" +238 74 evaluator """rankbased""" +238 75 dataset """kinships""" +238 75 model """ermlp""" +238 75 loss """bceaftersigmoid""" +238 75 regularizer """no""" +238 75 optimizer """adadelta""" +238 75 training_loop """owa""" +238 75 negative_sampler """basic""" +238 75 evaluator """rankbased""" +238 76 dataset """kinships""" +238 76 model """ermlp""" +238 76 loss """bceaftersigmoid""" +238 76 regularizer """no""" +238 76 optimizer """adadelta""" +238 76 training_loop """owa""" +238 76 negative_sampler """basic""" +238 76 evaluator """rankbased""" +238 77 dataset """kinships""" +238 77 model """ermlp""" +238 77 loss """bceaftersigmoid""" +238 77 regularizer """no""" +238 77 optimizer """adadelta""" +238 77 training_loop """owa""" +238 77 negative_sampler """basic""" +238 77 evaluator """rankbased""" +238 78 dataset """kinships""" +238 78 model """ermlp""" +238 78 loss """bceaftersigmoid""" +238 78 regularizer """no""" +238 78 optimizer """adadelta""" +238 78 training_loop """owa""" +238 78 negative_sampler """basic""" +238 78 evaluator """rankbased""" +238 79 dataset """kinships""" +238 79 model """ermlp""" +238 79 loss """bceaftersigmoid""" +238 79 regularizer """no""" +238 79 optimizer """adadelta""" +238 79 training_loop """owa""" +238 79 negative_sampler """basic""" +238 79 evaluator """rankbased""" +238 80 dataset """kinships""" +238 80 model """ermlp""" +238 80 loss """bceaftersigmoid""" +238 80 regularizer """no""" +238 80 optimizer """adadelta""" +238 80 training_loop """owa""" +238 80 negative_sampler """basic""" +238 80 evaluator """rankbased""" +238 81 dataset """kinships""" +238 81 model """ermlp""" +238 81 loss """bceaftersigmoid""" +238 81 regularizer """no""" +238 81 optimizer """adadelta""" +238 81 training_loop """owa""" +238 81 negative_sampler """basic""" +238 81 evaluator """rankbased""" +238 82 dataset """kinships""" +238 82 model """ermlp""" +238 82 loss """bceaftersigmoid""" +238 82 regularizer """no""" +238 82 optimizer """adadelta""" +238 82 training_loop """owa""" +238 82 negative_sampler """basic""" +238 82 evaluator """rankbased""" +238 83 dataset """kinships""" +238 83 model """ermlp""" +238 83 loss """bceaftersigmoid""" +238 83 regularizer """no""" +238 83 optimizer """adadelta""" +238 83 training_loop """owa""" +238 83 negative_sampler """basic""" +238 83 evaluator """rankbased""" +238 84 dataset """kinships""" +238 84 model """ermlp""" +238 84 loss """bceaftersigmoid""" +238 84 regularizer """no""" +238 84 optimizer """adadelta""" +238 84 training_loop """owa""" +238 84 negative_sampler """basic""" +238 84 evaluator """rankbased""" +238 85 dataset """kinships""" +238 85 model """ermlp""" +238 85 loss """bceaftersigmoid""" +238 85 regularizer """no""" +238 85 optimizer """adadelta""" +238 85 training_loop """owa""" +238 85 negative_sampler """basic""" +238 85 evaluator """rankbased""" +238 86 dataset """kinships""" +238 86 model """ermlp""" +238 86 loss """bceaftersigmoid""" +238 86 regularizer """no""" +238 86 optimizer """adadelta""" +238 86 training_loop """owa""" +238 86 negative_sampler """basic""" +238 86 evaluator """rankbased""" +238 87 dataset """kinships""" +238 87 model """ermlp""" +238 87 loss """bceaftersigmoid""" +238 87 regularizer """no""" +238 87 optimizer """adadelta""" +238 87 training_loop """owa""" +238 87 negative_sampler """basic""" +238 87 evaluator """rankbased""" +238 88 dataset """kinships""" +238 88 model """ermlp""" +238 88 loss """bceaftersigmoid""" +238 88 regularizer """no""" +238 88 optimizer """adadelta""" +238 88 training_loop """owa""" +238 88 negative_sampler """basic""" +238 88 evaluator """rankbased""" +238 89 dataset """kinships""" +238 89 model """ermlp""" +238 89 loss """bceaftersigmoid""" +238 89 regularizer """no""" +238 89 optimizer """adadelta""" +238 89 training_loop """owa""" +238 89 negative_sampler """basic""" +238 89 evaluator """rankbased""" +238 90 dataset """kinships""" +238 90 model """ermlp""" +238 90 loss """bceaftersigmoid""" +238 90 regularizer """no""" +238 90 optimizer """adadelta""" +238 90 training_loop """owa""" +238 90 negative_sampler """basic""" +238 90 evaluator """rankbased""" +238 91 dataset """kinships""" +238 91 model """ermlp""" +238 91 loss """bceaftersigmoid""" +238 91 regularizer """no""" +238 91 optimizer """adadelta""" +238 91 training_loop """owa""" +238 91 negative_sampler """basic""" +238 91 evaluator """rankbased""" +238 92 dataset """kinships""" +238 92 model """ermlp""" +238 92 loss """bceaftersigmoid""" +238 92 regularizer """no""" +238 92 optimizer """adadelta""" +238 92 training_loop """owa""" +238 92 negative_sampler """basic""" +238 92 evaluator """rankbased""" +238 93 dataset """kinships""" +238 93 model """ermlp""" +238 93 loss """bceaftersigmoid""" +238 93 regularizer """no""" +238 93 optimizer """adadelta""" +238 93 training_loop """owa""" +238 93 negative_sampler """basic""" +238 93 evaluator """rankbased""" +238 94 dataset """kinships""" +238 94 model """ermlp""" +238 94 loss """bceaftersigmoid""" +238 94 regularizer """no""" +238 94 optimizer """adadelta""" +238 94 training_loop """owa""" +238 94 negative_sampler """basic""" +238 94 evaluator """rankbased""" +238 95 dataset """kinships""" +238 95 model """ermlp""" +238 95 loss """bceaftersigmoid""" +238 95 regularizer """no""" +238 95 optimizer """adadelta""" +238 95 training_loop """owa""" +238 95 negative_sampler """basic""" +238 95 evaluator """rankbased""" +238 96 dataset """kinships""" +238 96 model """ermlp""" +238 96 loss """bceaftersigmoid""" +238 96 regularizer """no""" +238 96 optimizer """adadelta""" +238 96 training_loop """owa""" +238 96 negative_sampler """basic""" +238 96 evaluator """rankbased""" +238 97 dataset """kinships""" +238 97 model """ermlp""" +238 97 loss """bceaftersigmoid""" +238 97 regularizer """no""" +238 97 optimizer """adadelta""" +238 97 training_loop """owa""" +238 97 negative_sampler """basic""" +238 97 evaluator """rankbased""" +238 98 dataset """kinships""" +238 98 model """ermlp""" +238 98 loss """bceaftersigmoid""" +238 98 regularizer """no""" +238 98 optimizer """adadelta""" +238 98 training_loop """owa""" +238 98 negative_sampler """basic""" +238 98 evaluator """rankbased""" +238 99 dataset """kinships""" +238 99 model """ermlp""" +238 99 loss """bceaftersigmoid""" +238 99 regularizer """no""" +238 99 optimizer """adadelta""" +238 99 training_loop """owa""" +238 99 negative_sampler """basic""" +238 99 evaluator """rankbased""" +238 100 dataset """kinships""" +238 100 model """ermlp""" +238 100 loss """bceaftersigmoid""" +238 100 regularizer """no""" +238 100 optimizer """adadelta""" +238 100 training_loop """owa""" +238 100 negative_sampler """basic""" +238 100 evaluator """rankbased""" +239 1 model.embedding_dim 0.0 +239 1 negative_sampler.num_negs_per_pos 45.0 +239 1 training.batch_size 2.0 +239 2 model.embedding_dim 2.0 +239 2 negative_sampler.num_negs_per_pos 78.0 +239 2 training.batch_size 1.0 +239 3 model.embedding_dim 0.0 +239 3 negative_sampler.num_negs_per_pos 19.0 +239 3 training.batch_size 2.0 +239 4 model.embedding_dim 2.0 +239 4 negative_sampler.num_negs_per_pos 86.0 +239 4 training.batch_size 2.0 +239 5 model.embedding_dim 0.0 +239 5 negative_sampler.num_negs_per_pos 87.0 +239 5 training.batch_size 1.0 +239 6 model.embedding_dim 1.0 +239 6 negative_sampler.num_negs_per_pos 20.0 +239 6 training.batch_size 0.0 +239 7 model.embedding_dim 2.0 +239 7 negative_sampler.num_negs_per_pos 61.0 +239 7 training.batch_size 1.0 +239 8 model.embedding_dim 1.0 +239 8 negative_sampler.num_negs_per_pos 96.0 +239 8 training.batch_size 0.0 +239 9 model.embedding_dim 0.0 +239 9 negative_sampler.num_negs_per_pos 42.0 +239 9 training.batch_size 2.0 +239 10 model.embedding_dim 2.0 +239 10 negative_sampler.num_negs_per_pos 21.0 +239 10 training.batch_size 2.0 +239 11 model.embedding_dim 1.0 +239 11 negative_sampler.num_negs_per_pos 96.0 +239 11 training.batch_size 1.0 +239 12 model.embedding_dim 2.0 +239 12 negative_sampler.num_negs_per_pos 87.0 +239 12 training.batch_size 0.0 +239 13 model.embedding_dim 2.0 +239 13 negative_sampler.num_negs_per_pos 86.0 +239 13 training.batch_size 2.0 +239 14 model.embedding_dim 1.0 +239 14 negative_sampler.num_negs_per_pos 15.0 +239 14 training.batch_size 0.0 +239 15 model.embedding_dim 1.0 +239 15 negative_sampler.num_negs_per_pos 67.0 +239 15 training.batch_size 0.0 +239 16 model.embedding_dim 1.0 +239 16 negative_sampler.num_negs_per_pos 79.0 +239 16 training.batch_size 2.0 +239 17 model.embedding_dim 0.0 +239 17 negative_sampler.num_negs_per_pos 52.0 +239 17 training.batch_size 0.0 +239 18 model.embedding_dim 1.0 +239 18 negative_sampler.num_negs_per_pos 16.0 +239 18 training.batch_size 1.0 +239 19 model.embedding_dim 1.0 +239 19 negative_sampler.num_negs_per_pos 11.0 +239 19 training.batch_size 2.0 +239 20 model.embedding_dim 1.0 +239 20 negative_sampler.num_negs_per_pos 95.0 +239 20 training.batch_size 2.0 +239 21 model.embedding_dim 0.0 +239 21 negative_sampler.num_negs_per_pos 82.0 +239 21 training.batch_size 0.0 +239 22 model.embedding_dim 1.0 +239 22 negative_sampler.num_negs_per_pos 48.0 +239 22 training.batch_size 0.0 +239 23 model.embedding_dim 2.0 +239 23 negative_sampler.num_negs_per_pos 95.0 +239 23 training.batch_size 0.0 +239 24 model.embedding_dim 0.0 +239 24 negative_sampler.num_negs_per_pos 59.0 +239 24 training.batch_size 0.0 +239 25 model.embedding_dim 1.0 +239 25 negative_sampler.num_negs_per_pos 79.0 +239 25 training.batch_size 2.0 +239 26 model.embedding_dim 1.0 +239 26 negative_sampler.num_negs_per_pos 54.0 +239 26 training.batch_size 2.0 +239 27 model.embedding_dim 0.0 +239 27 negative_sampler.num_negs_per_pos 53.0 +239 27 training.batch_size 1.0 +239 28 model.embedding_dim 2.0 +239 28 negative_sampler.num_negs_per_pos 16.0 +239 28 training.batch_size 2.0 +239 29 model.embedding_dim 1.0 +239 29 negative_sampler.num_negs_per_pos 28.0 +239 29 training.batch_size 1.0 +239 30 model.embedding_dim 1.0 +239 30 negative_sampler.num_negs_per_pos 34.0 +239 30 training.batch_size 2.0 +239 31 model.embedding_dim 0.0 +239 31 negative_sampler.num_negs_per_pos 80.0 +239 31 training.batch_size 1.0 +239 32 model.embedding_dim 1.0 +239 32 negative_sampler.num_negs_per_pos 70.0 +239 32 training.batch_size 1.0 +239 33 model.embedding_dim 2.0 +239 33 negative_sampler.num_negs_per_pos 89.0 +239 33 training.batch_size 1.0 +239 34 model.embedding_dim 2.0 +239 34 negative_sampler.num_negs_per_pos 50.0 +239 34 training.batch_size 1.0 +239 35 model.embedding_dim 0.0 +239 35 negative_sampler.num_negs_per_pos 59.0 +239 35 training.batch_size 1.0 +239 36 model.embedding_dim 0.0 +239 36 negative_sampler.num_negs_per_pos 28.0 +239 36 training.batch_size 1.0 +239 37 model.embedding_dim 2.0 +239 37 negative_sampler.num_negs_per_pos 15.0 +239 37 training.batch_size 2.0 +239 38 model.embedding_dim 2.0 +239 38 negative_sampler.num_negs_per_pos 63.0 +239 38 training.batch_size 2.0 +239 39 model.embedding_dim 2.0 +239 39 negative_sampler.num_negs_per_pos 64.0 +239 39 training.batch_size 0.0 +239 40 model.embedding_dim 1.0 +239 40 negative_sampler.num_negs_per_pos 92.0 +239 40 training.batch_size 0.0 +239 41 model.embedding_dim 0.0 +239 41 negative_sampler.num_negs_per_pos 62.0 +239 41 training.batch_size 1.0 +239 42 model.embedding_dim 2.0 +239 42 negative_sampler.num_negs_per_pos 41.0 +239 42 training.batch_size 2.0 +239 43 model.embedding_dim 2.0 +239 43 negative_sampler.num_negs_per_pos 55.0 +239 43 training.batch_size 1.0 +239 44 model.embedding_dim 2.0 +239 44 negative_sampler.num_negs_per_pos 86.0 +239 44 training.batch_size 1.0 +239 45 model.embedding_dim 0.0 +239 45 negative_sampler.num_negs_per_pos 56.0 +239 45 training.batch_size 0.0 +239 46 model.embedding_dim 1.0 +239 46 negative_sampler.num_negs_per_pos 91.0 +239 46 training.batch_size 0.0 +239 47 model.embedding_dim 2.0 +239 47 negative_sampler.num_negs_per_pos 58.0 +239 47 training.batch_size 1.0 +239 48 model.embedding_dim 1.0 +239 48 negative_sampler.num_negs_per_pos 94.0 +239 48 training.batch_size 0.0 +239 49 model.embedding_dim 2.0 +239 49 negative_sampler.num_negs_per_pos 98.0 +239 49 training.batch_size 2.0 +239 50 model.embedding_dim 1.0 +239 50 negative_sampler.num_negs_per_pos 58.0 +239 50 training.batch_size 1.0 +239 51 model.embedding_dim 1.0 +239 51 negative_sampler.num_negs_per_pos 35.0 +239 51 training.batch_size 1.0 +239 52 model.embedding_dim 2.0 +239 52 negative_sampler.num_negs_per_pos 60.0 +239 52 training.batch_size 1.0 +239 53 model.embedding_dim 2.0 +239 53 negative_sampler.num_negs_per_pos 50.0 +239 53 training.batch_size 2.0 +239 54 model.embedding_dim 0.0 +239 54 negative_sampler.num_negs_per_pos 0.0 +239 54 training.batch_size 0.0 +239 55 model.embedding_dim 2.0 +239 55 negative_sampler.num_negs_per_pos 46.0 +239 55 training.batch_size 0.0 +239 56 model.embedding_dim 1.0 +239 56 negative_sampler.num_negs_per_pos 48.0 +239 56 training.batch_size 0.0 +239 57 model.embedding_dim 0.0 +239 57 negative_sampler.num_negs_per_pos 6.0 +239 57 training.batch_size 1.0 +239 58 model.embedding_dim 1.0 +239 58 negative_sampler.num_negs_per_pos 43.0 +239 58 training.batch_size 0.0 +239 59 model.embedding_dim 0.0 +239 59 negative_sampler.num_negs_per_pos 45.0 +239 59 training.batch_size 0.0 +239 60 model.embedding_dim 0.0 +239 60 negative_sampler.num_negs_per_pos 85.0 +239 60 training.batch_size 1.0 +239 61 model.embedding_dim 2.0 +239 61 negative_sampler.num_negs_per_pos 57.0 +239 61 training.batch_size 1.0 +239 62 model.embedding_dim 2.0 +239 62 negative_sampler.num_negs_per_pos 30.0 +239 62 training.batch_size 2.0 +239 63 model.embedding_dim 0.0 +239 63 negative_sampler.num_negs_per_pos 82.0 +239 63 training.batch_size 0.0 +239 64 model.embedding_dim 2.0 +239 64 negative_sampler.num_negs_per_pos 31.0 +239 64 training.batch_size 2.0 +239 65 model.embedding_dim 0.0 +239 65 negative_sampler.num_negs_per_pos 4.0 +239 65 training.batch_size 0.0 +239 66 model.embedding_dim 1.0 +239 66 negative_sampler.num_negs_per_pos 56.0 +239 66 training.batch_size 1.0 +239 67 model.embedding_dim 1.0 +239 67 negative_sampler.num_negs_per_pos 58.0 +239 67 training.batch_size 0.0 +239 68 model.embedding_dim 2.0 +239 68 negative_sampler.num_negs_per_pos 1.0 +239 68 training.batch_size 2.0 +239 69 model.embedding_dim 2.0 +239 69 negative_sampler.num_negs_per_pos 85.0 +239 69 training.batch_size 1.0 +239 70 model.embedding_dim 1.0 +239 70 negative_sampler.num_negs_per_pos 8.0 +239 70 training.batch_size 0.0 +239 71 model.embedding_dim 1.0 +239 71 negative_sampler.num_negs_per_pos 12.0 +239 71 training.batch_size 1.0 +239 72 model.embedding_dim 1.0 +239 72 negative_sampler.num_negs_per_pos 58.0 +239 72 training.batch_size 1.0 +239 73 model.embedding_dim 0.0 +239 73 negative_sampler.num_negs_per_pos 39.0 +239 73 training.batch_size 2.0 +239 74 model.embedding_dim 0.0 +239 74 negative_sampler.num_negs_per_pos 75.0 +239 74 training.batch_size 2.0 +239 75 model.embedding_dim 0.0 +239 75 negative_sampler.num_negs_per_pos 98.0 +239 75 training.batch_size 0.0 +239 76 model.embedding_dim 0.0 +239 76 negative_sampler.num_negs_per_pos 96.0 +239 76 training.batch_size 1.0 +239 77 model.embedding_dim 2.0 +239 77 negative_sampler.num_negs_per_pos 31.0 +239 77 training.batch_size 1.0 +239 78 model.embedding_dim 1.0 +239 78 negative_sampler.num_negs_per_pos 48.0 +239 78 training.batch_size 2.0 +239 79 model.embedding_dim 2.0 +239 79 negative_sampler.num_negs_per_pos 59.0 +239 79 training.batch_size 1.0 +239 80 model.embedding_dim 0.0 +239 80 negative_sampler.num_negs_per_pos 77.0 +239 80 training.batch_size 1.0 +239 81 model.embedding_dim 2.0 +239 81 negative_sampler.num_negs_per_pos 60.0 +239 81 training.batch_size 0.0 +239 82 model.embedding_dim 1.0 +239 82 negative_sampler.num_negs_per_pos 79.0 +239 82 training.batch_size 0.0 +239 83 model.embedding_dim 1.0 +239 83 negative_sampler.num_negs_per_pos 21.0 +239 83 training.batch_size 0.0 +239 84 model.embedding_dim 2.0 +239 84 negative_sampler.num_negs_per_pos 72.0 +239 84 training.batch_size 2.0 +239 85 model.embedding_dim 2.0 +239 85 negative_sampler.num_negs_per_pos 3.0 +239 85 training.batch_size 2.0 +239 86 model.embedding_dim 1.0 +239 86 negative_sampler.num_negs_per_pos 13.0 +239 86 training.batch_size 1.0 +239 87 model.embedding_dim 2.0 +239 87 negative_sampler.num_negs_per_pos 73.0 +239 87 training.batch_size 1.0 +239 88 model.embedding_dim 0.0 +239 88 negative_sampler.num_negs_per_pos 59.0 +239 88 training.batch_size 0.0 +239 89 model.embedding_dim 2.0 +239 89 negative_sampler.num_negs_per_pos 20.0 +239 89 training.batch_size 0.0 +239 90 model.embedding_dim 1.0 +239 90 negative_sampler.num_negs_per_pos 34.0 +239 90 training.batch_size 1.0 +239 91 model.embedding_dim 0.0 +239 91 negative_sampler.num_negs_per_pos 53.0 +239 91 training.batch_size 2.0 +239 92 model.embedding_dim 1.0 +239 92 negative_sampler.num_negs_per_pos 2.0 +239 92 training.batch_size 2.0 +239 93 model.embedding_dim 1.0 +239 93 negative_sampler.num_negs_per_pos 1.0 +239 93 training.batch_size 1.0 +239 94 model.embedding_dim 2.0 +239 94 negative_sampler.num_negs_per_pos 3.0 +239 94 training.batch_size 0.0 +239 95 model.embedding_dim 1.0 +239 95 negative_sampler.num_negs_per_pos 82.0 +239 95 training.batch_size 2.0 +239 96 model.embedding_dim 0.0 +239 96 negative_sampler.num_negs_per_pos 71.0 +239 96 training.batch_size 2.0 +239 97 model.embedding_dim 2.0 +239 97 negative_sampler.num_negs_per_pos 21.0 +239 97 training.batch_size 2.0 +239 98 model.embedding_dim 0.0 +239 98 negative_sampler.num_negs_per_pos 37.0 +239 98 training.batch_size 0.0 +239 99 model.embedding_dim 0.0 +239 99 negative_sampler.num_negs_per_pos 21.0 +239 99 training.batch_size 0.0 +239 100 model.embedding_dim 1.0 +239 100 negative_sampler.num_negs_per_pos 24.0 +239 100 training.batch_size 0.0 +239 1 dataset """kinships""" +239 1 model """ermlp""" +239 1 loss """softplus""" +239 1 regularizer """no""" +239 1 optimizer """adadelta""" +239 1 training_loop """owa""" +239 1 negative_sampler """basic""" +239 1 evaluator """rankbased""" +239 2 dataset """kinships""" +239 2 model """ermlp""" +239 2 loss """softplus""" +239 2 regularizer """no""" +239 2 optimizer """adadelta""" +239 2 training_loop """owa""" +239 2 negative_sampler """basic""" +239 2 evaluator """rankbased""" +239 3 dataset """kinships""" +239 3 model """ermlp""" +239 3 loss """softplus""" +239 3 regularizer """no""" +239 3 optimizer """adadelta""" +239 3 training_loop """owa""" +239 3 negative_sampler """basic""" +239 3 evaluator """rankbased""" +239 4 dataset """kinships""" +239 4 model """ermlp""" +239 4 loss """softplus""" +239 4 regularizer """no""" +239 4 optimizer """adadelta""" +239 4 training_loop """owa""" +239 4 negative_sampler """basic""" +239 4 evaluator """rankbased""" +239 5 dataset """kinships""" +239 5 model """ermlp""" +239 5 loss """softplus""" +239 5 regularizer """no""" +239 5 optimizer """adadelta""" +239 5 training_loop """owa""" +239 5 negative_sampler """basic""" +239 5 evaluator """rankbased""" +239 6 dataset """kinships""" +239 6 model """ermlp""" +239 6 loss """softplus""" +239 6 regularizer """no""" +239 6 optimizer """adadelta""" +239 6 training_loop """owa""" +239 6 negative_sampler """basic""" +239 6 evaluator """rankbased""" +239 7 dataset """kinships""" +239 7 model """ermlp""" +239 7 loss """softplus""" +239 7 regularizer """no""" +239 7 optimizer """adadelta""" +239 7 training_loop """owa""" +239 7 negative_sampler """basic""" +239 7 evaluator """rankbased""" +239 8 dataset """kinships""" +239 8 model """ermlp""" +239 8 loss """softplus""" +239 8 regularizer """no""" +239 8 optimizer """adadelta""" +239 8 training_loop """owa""" +239 8 negative_sampler """basic""" +239 8 evaluator """rankbased""" +239 9 dataset """kinships""" +239 9 model """ermlp""" +239 9 loss """softplus""" +239 9 regularizer """no""" +239 9 optimizer """adadelta""" +239 9 training_loop """owa""" +239 9 negative_sampler """basic""" +239 9 evaluator """rankbased""" +239 10 dataset """kinships""" +239 10 model """ermlp""" +239 10 loss """softplus""" +239 10 regularizer """no""" +239 10 optimizer """adadelta""" +239 10 training_loop """owa""" +239 10 negative_sampler """basic""" +239 10 evaluator """rankbased""" +239 11 dataset """kinships""" +239 11 model """ermlp""" +239 11 loss """softplus""" +239 11 regularizer """no""" +239 11 optimizer """adadelta""" +239 11 training_loop """owa""" +239 11 negative_sampler """basic""" +239 11 evaluator """rankbased""" +239 12 dataset """kinships""" +239 12 model """ermlp""" +239 12 loss """softplus""" +239 12 regularizer """no""" +239 12 optimizer """adadelta""" +239 12 training_loop """owa""" +239 12 negative_sampler """basic""" +239 12 evaluator """rankbased""" +239 13 dataset """kinships""" +239 13 model """ermlp""" +239 13 loss """softplus""" +239 13 regularizer """no""" +239 13 optimizer """adadelta""" +239 13 training_loop """owa""" +239 13 negative_sampler """basic""" +239 13 evaluator """rankbased""" +239 14 dataset """kinships""" +239 14 model """ermlp""" +239 14 loss """softplus""" +239 14 regularizer """no""" +239 14 optimizer """adadelta""" +239 14 training_loop """owa""" +239 14 negative_sampler """basic""" +239 14 evaluator """rankbased""" +239 15 dataset """kinships""" +239 15 model """ermlp""" +239 15 loss """softplus""" +239 15 regularizer """no""" +239 15 optimizer """adadelta""" +239 15 training_loop """owa""" +239 15 negative_sampler """basic""" +239 15 evaluator """rankbased""" +239 16 dataset """kinships""" +239 16 model """ermlp""" +239 16 loss """softplus""" +239 16 regularizer """no""" +239 16 optimizer """adadelta""" +239 16 training_loop """owa""" +239 16 negative_sampler """basic""" +239 16 evaluator """rankbased""" +239 17 dataset """kinships""" +239 17 model """ermlp""" +239 17 loss """softplus""" +239 17 regularizer """no""" +239 17 optimizer """adadelta""" +239 17 training_loop """owa""" +239 17 negative_sampler """basic""" +239 17 evaluator """rankbased""" +239 18 dataset """kinships""" +239 18 model """ermlp""" +239 18 loss """softplus""" +239 18 regularizer """no""" +239 18 optimizer """adadelta""" +239 18 training_loop """owa""" +239 18 negative_sampler """basic""" +239 18 evaluator """rankbased""" +239 19 dataset """kinships""" +239 19 model """ermlp""" +239 19 loss """softplus""" +239 19 regularizer """no""" +239 19 optimizer """adadelta""" +239 19 training_loop """owa""" +239 19 negative_sampler """basic""" +239 19 evaluator """rankbased""" +239 20 dataset """kinships""" +239 20 model """ermlp""" +239 20 loss """softplus""" +239 20 regularizer """no""" +239 20 optimizer """adadelta""" +239 20 training_loop """owa""" +239 20 negative_sampler """basic""" +239 20 evaluator """rankbased""" +239 21 dataset """kinships""" +239 21 model """ermlp""" +239 21 loss """softplus""" +239 21 regularizer """no""" +239 21 optimizer """adadelta""" +239 21 training_loop """owa""" +239 21 negative_sampler """basic""" +239 21 evaluator """rankbased""" +239 22 dataset """kinships""" +239 22 model """ermlp""" +239 22 loss """softplus""" +239 22 regularizer """no""" +239 22 optimizer """adadelta""" +239 22 training_loop """owa""" +239 22 negative_sampler """basic""" +239 22 evaluator """rankbased""" +239 23 dataset """kinships""" +239 23 model """ermlp""" +239 23 loss """softplus""" +239 23 regularizer """no""" +239 23 optimizer """adadelta""" +239 23 training_loop """owa""" +239 23 negative_sampler """basic""" +239 23 evaluator """rankbased""" +239 24 dataset """kinships""" +239 24 model """ermlp""" +239 24 loss """softplus""" +239 24 regularizer """no""" +239 24 optimizer """adadelta""" +239 24 training_loop """owa""" +239 24 negative_sampler """basic""" +239 24 evaluator """rankbased""" +239 25 dataset """kinships""" +239 25 model """ermlp""" +239 25 loss """softplus""" +239 25 regularizer """no""" +239 25 optimizer """adadelta""" +239 25 training_loop """owa""" +239 25 negative_sampler """basic""" +239 25 evaluator """rankbased""" +239 26 dataset """kinships""" +239 26 model """ermlp""" +239 26 loss """softplus""" +239 26 regularizer """no""" +239 26 optimizer """adadelta""" +239 26 training_loop """owa""" +239 26 negative_sampler """basic""" +239 26 evaluator """rankbased""" +239 27 dataset """kinships""" +239 27 model """ermlp""" +239 27 loss """softplus""" +239 27 regularizer """no""" +239 27 optimizer """adadelta""" +239 27 training_loop """owa""" +239 27 negative_sampler """basic""" +239 27 evaluator """rankbased""" +239 28 dataset """kinships""" +239 28 model """ermlp""" +239 28 loss """softplus""" +239 28 regularizer """no""" +239 28 optimizer """adadelta""" +239 28 training_loop """owa""" +239 28 negative_sampler """basic""" +239 28 evaluator """rankbased""" +239 29 dataset """kinships""" +239 29 model """ermlp""" +239 29 loss """softplus""" +239 29 regularizer """no""" +239 29 optimizer """adadelta""" +239 29 training_loop """owa""" +239 29 negative_sampler """basic""" +239 29 evaluator """rankbased""" +239 30 dataset """kinships""" +239 30 model """ermlp""" +239 30 loss """softplus""" +239 30 regularizer """no""" +239 30 optimizer """adadelta""" +239 30 training_loop """owa""" +239 30 negative_sampler """basic""" +239 30 evaluator """rankbased""" +239 31 dataset """kinships""" +239 31 model """ermlp""" +239 31 loss """softplus""" +239 31 regularizer """no""" +239 31 optimizer """adadelta""" +239 31 training_loop """owa""" +239 31 negative_sampler """basic""" +239 31 evaluator """rankbased""" +239 32 dataset """kinships""" +239 32 model """ermlp""" +239 32 loss """softplus""" +239 32 regularizer """no""" +239 32 optimizer """adadelta""" +239 32 training_loop """owa""" +239 32 negative_sampler """basic""" +239 32 evaluator """rankbased""" +239 33 dataset """kinships""" +239 33 model """ermlp""" +239 33 loss """softplus""" +239 33 regularizer """no""" +239 33 optimizer """adadelta""" +239 33 training_loop """owa""" +239 33 negative_sampler """basic""" +239 33 evaluator """rankbased""" +239 34 dataset """kinships""" +239 34 model """ermlp""" +239 34 loss """softplus""" +239 34 regularizer """no""" +239 34 optimizer """adadelta""" +239 34 training_loop """owa""" +239 34 negative_sampler """basic""" +239 34 evaluator """rankbased""" +239 35 dataset """kinships""" +239 35 model """ermlp""" +239 35 loss """softplus""" +239 35 regularizer """no""" +239 35 optimizer """adadelta""" +239 35 training_loop """owa""" +239 35 negative_sampler """basic""" +239 35 evaluator """rankbased""" +239 36 dataset """kinships""" +239 36 model """ermlp""" +239 36 loss """softplus""" +239 36 regularizer """no""" +239 36 optimizer """adadelta""" +239 36 training_loop """owa""" +239 36 negative_sampler """basic""" +239 36 evaluator """rankbased""" +239 37 dataset """kinships""" +239 37 model """ermlp""" +239 37 loss """softplus""" +239 37 regularizer """no""" +239 37 optimizer """adadelta""" +239 37 training_loop """owa""" +239 37 negative_sampler """basic""" +239 37 evaluator """rankbased""" +239 38 dataset """kinships""" +239 38 model """ermlp""" +239 38 loss """softplus""" +239 38 regularizer """no""" +239 38 optimizer """adadelta""" +239 38 training_loop """owa""" +239 38 negative_sampler """basic""" +239 38 evaluator """rankbased""" +239 39 dataset """kinships""" +239 39 model """ermlp""" +239 39 loss """softplus""" +239 39 regularizer """no""" +239 39 optimizer """adadelta""" +239 39 training_loop """owa""" +239 39 negative_sampler """basic""" +239 39 evaluator """rankbased""" +239 40 dataset """kinships""" +239 40 model """ermlp""" +239 40 loss """softplus""" +239 40 regularizer """no""" +239 40 optimizer """adadelta""" +239 40 training_loop """owa""" +239 40 negative_sampler """basic""" +239 40 evaluator """rankbased""" +239 41 dataset """kinships""" +239 41 model """ermlp""" +239 41 loss """softplus""" +239 41 regularizer """no""" +239 41 optimizer """adadelta""" +239 41 training_loop """owa""" +239 41 negative_sampler """basic""" +239 41 evaluator """rankbased""" +239 42 dataset """kinships""" +239 42 model """ermlp""" +239 42 loss """softplus""" +239 42 regularizer """no""" +239 42 optimizer """adadelta""" +239 42 training_loop """owa""" +239 42 negative_sampler """basic""" +239 42 evaluator """rankbased""" +239 43 dataset """kinships""" +239 43 model """ermlp""" +239 43 loss """softplus""" +239 43 regularizer """no""" +239 43 optimizer """adadelta""" +239 43 training_loop """owa""" +239 43 negative_sampler """basic""" +239 43 evaluator """rankbased""" +239 44 dataset """kinships""" +239 44 model """ermlp""" +239 44 loss """softplus""" +239 44 regularizer """no""" +239 44 optimizer """adadelta""" +239 44 training_loop """owa""" +239 44 negative_sampler """basic""" +239 44 evaluator """rankbased""" +239 45 dataset """kinships""" +239 45 model """ermlp""" +239 45 loss """softplus""" +239 45 regularizer """no""" +239 45 optimizer """adadelta""" +239 45 training_loop """owa""" +239 45 negative_sampler """basic""" +239 45 evaluator """rankbased""" +239 46 dataset """kinships""" +239 46 model """ermlp""" +239 46 loss """softplus""" +239 46 regularizer """no""" +239 46 optimizer """adadelta""" +239 46 training_loop """owa""" +239 46 negative_sampler """basic""" +239 46 evaluator """rankbased""" +239 47 dataset """kinships""" +239 47 model """ermlp""" +239 47 loss """softplus""" +239 47 regularizer """no""" +239 47 optimizer """adadelta""" +239 47 training_loop """owa""" +239 47 negative_sampler """basic""" +239 47 evaluator """rankbased""" +239 48 dataset """kinships""" +239 48 model """ermlp""" +239 48 loss """softplus""" +239 48 regularizer """no""" +239 48 optimizer """adadelta""" +239 48 training_loop """owa""" +239 48 negative_sampler """basic""" +239 48 evaluator """rankbased""" +239 49 dataset """kinships""" +239 49 model """ermlp""" +239 49 loss """softplus""" +239 49 regularizer """no""" +239 49 optimizer """adadelta""" +239 49 training_loop """owa""" +239 49 negative_sampler """basic""" +239 49 evaluator """rankbased""" +239 50 dataset """kinships""" +239 50 model """ermlp""" +239 50 loss """softplus""" +239 50 regularizer """no""" +239 50 optimizer """adadelta""" +239 50 training_loop """owa""" +239 50 negative_sampler """basic""" +239 50 evaluator """rankbased""" +239 51 dataset """kinships""" +239 51 model """ermlp""" +239 51 loss """softplus""" +239 51 regularizer """no""" +239 51 optimizer """adadelta""" +239 51 training_loop """owa""" +239 51 negative_sampler """basic""" +239 51 evaluator """rankbased""" +239 52 dataset """kinships""" +239 52 model """ermlp""" +239 52 loss """softplus""" +239 52 regularizer """no""" +239 52 optimizer """adadelta""" +239 52 training_loop """owa""" +239 52 negative_sampler """basic""" +239 52 evaluator """rankbased""" +239 53 dataset """kinships""" +239 53 model """ermlp""" +239 53 loss """softplus""" +239 53 regularizer """no""" +239 53 optimizer """adadelta""" +239 53 training_loop """owa""" +239 53 negative_sampler """basic""" +239 53 evaluator """rankbased""" +239 54 dataset """kinships""" +239 54 model """ermlp""" +239 54 loss """softplus""" +239 54 regularizer """no""" +239 54 optimizer """adadelta""" +239 54 training_loop """owa""" +239 54 negative_sampler """basic""" +239 54 evaluator """rankbased""" +239 55 dataset """kinships""" +239 55 model """ermlp""" +239 55 loss """softplus""" +239 55 regularizer """no""" +239 55 optimizer """adadelta""" +239 55 training_loop """owa""" +239 55 negative_sampler """basic""" +239 55 evaluator """rankbased""" +239 56 dataset """kinships""" +239 56 model """ermlp""" +239 56 loss """softplus""" +239 56 regularizer """no""" +239 56 optimizer """adadelta""" +239 56 training_loop """owa""" +239 56 negative_sampler """basic""" +239 56 evaluator """rankbased""" +239 57 dataset """kinships""" +239 57 model """ermlp""" +239 57 loss """softplus""" +239 57 regularizer """no""" +239 57 optimizer """adadelta""" +239 57 training_loop """owa""" +239 57 negative_sampler """basic""" +239 57 evaluator """rankbased""" +239 58 dataset """kinships""" +239 58 model """ermlp""" +239 58 loss """softplus""" +239 58 regularizer """no""" +239 58 optimizer """adadelta""" +239 58 training_loop """owa""" +239 58 negative_sampler """basic""" +239 58 evaluator """rankbased""" +239 59 dataset """kinships""" +239 59 model """ermlp""" +239 59 loss """softplus""" +239 59 regularizer """no""" +239 59 optimizer """adadelta""" +239 59 training_loop """owa""" +239 59 negative_sampler """basic""" +239 59 evaluator """rankbased""" +239 60 dataset """kinships""" +239 60 model """ermlp""" +239 60 loss """softplus""" +239 60 regularizer """no""" +239 60 optimizer """adadelta""" +239 60 training_loop """owa""" +239 60 negative_sampler """basic""" +239 60 evaluator """rankbased""" +239 61 dataset """kinships""" +239 61 model """ermlp""" +239 61 loss """softplus""" +239 61 regularizer """no""" +239 61 optimizer """adadelta""" +239 61 training_loop """owa""" +239 61 negative_sampler """basic""" +239 61 evaluator """rankbased""" +239 62 dataset """kinships""" +239 62 model """ermlp""" +239 62 loss """softplus""" +239 62 regularizer """no""" +239 62 optimizer """adadelta""" +239 62 training_loop """owa""" +239 62 negative_sampler """basic""" +239 62 evaluator """rankbased""" +239 63 dataset """kinships""" +239 63 model """ermlp""" +239 63 loss """softplus""" +239 63 regularizer """no""" +239 63 optimizer """adadelta""" +239 63 training_loop """owa""" +239 63 negative_sampler """basic""" +239 63 evaluator """rankbased""" +239 64 dataset """kinships""" +239 64 model """ermlp""" +239 64 loss """softplus""" +239 64 regularizer """no""" +239 64 optimizer """adadelta""" +239 64 training_loop """owa""" +239 64 negative_sampler """basic""" +239 64 evaluator """rankbased""" +239 65 dataset """kinships""" +239 65 model """ermlp""" +239 65 loss """softplus""" +239 65 regularizer """no""" +239 65 optimizer """adadelta""" +239 65 training_loop """owa""" +239 65 negative_sampler """basic""" +239 65 evaluator """rankbased""" +239 66 dataset """kinships""" +239 66 model """ermlp""" +239 66 loss """softplus""" +239 66 regularizer """no""" +239 66 optimizer """adadelta""" +239 66 training_loop """owa""" +239 66 negative_sampler """basic""" +239 66 evaluator """rankbased""" +239 67 dataset """kinships""" +239 67 model """ermlp""" +239 67 loss """softplus""" +239 67 regularizer """no""" +239 67 optimizer """adadelta""" +239 67 training_loop """owa""" +239 67 negative_sampler """basic""" +239 67 evaluator """rankbased""" +239 68 dataset """kinships""" +239 68 model """ermlp""" +239 68 loss """softplus""" +239 68 regularizer """no""" +239 68 optimizer """adadelta""" +239 68 training_loop """owa""" +239 68 negative_sampler """basic""" +239 68 evaluator """rankbased""" +239 69 dataset """kinships""" +239 69 model """ermlp""" +239 69 loss """softplus""" +239 69 regularizer """no""" +239 69 optimizer """adadelta""" +239 69 training_loop """owa""" +239 69 negative_sampler """basic""" +239 69 evaluator """rankbased""" +239 70 dataset """kinships""" +239 70 model """ermlp""" +239 70 loss """softplus""" +239 70 regularizer """no""" +239 70 optimizer """adadelta""" +239 70 training_loop """owa""" +239 70 negative_sampler """basic""" +239 70 evaluator """rankbased""" +239 71 dataset """kinships""" +239 71 model """ermlp""" +239 71 loss """softplus""" +239 71 regularizer """no""" +239 71 optimizer """adadelta""" +239 71 training_loop """owa""" +239 71 negative_sampler """basic""" +239 71 evaluator """rankbased""" +239 72 dataset """kinships""" +239 72 model """ermlp""" +239 72 loss """softplus""" +239 72 regularizer """no""" +239 72 optimizer """adadelta""" +239 72 training_loop """owa""" +239 72 negative_sampler """basic""" +239 72 evaluator """rankbased""" +239 73 dataset """kinships""" +239 73 model """ermlp""" +239 73 loss """softplus""" +239 73 regularizer """no""" +239 73 optimizer """adadelta""" +239 73 training_loop """owa""" +239 73 negative_sampler """basic""" +239 73 evaluator """rankbased""" +239 74 dataset """kinships""" +239 74 model """ermlp""" +239 74 loss """softplus""" +239 74 regularizer """no""" +239 74 optimizer """adadelta""" +239 74 training_loop """owa""" +239 74 negative_sampler """basic""" +239 74 evaluator """rankbased""" +239 75 dataset """kinships""" +239 75 model """ermlp""" +239 75 loss """softplus""" +239 75 regularizer """no""" +239 75 optimizer """adadelta""" +239 75 training_loop """owa""" +239 75 negative_sampler """basic""" +239 75 evaluator """rankbased""" +239 76 dataset """kinships""" +239 76 model """ermlp""" +239 76 loss """softplus""" +239 76 regularizer """no""" +239 76 optimizer """adadelta""" +239 76 training_loop """owa""" +239 76 negative_sampler """basic""" +239 76 evaluator """rankbased""" +239 77 dataset """kinships""" +239 77 model """ermlp""" +239 77 loss """softplus""" +239 77 regularizer """no""" +239 77 optimizer """adadelta""" +239 77 training_loop """owa""" +239 77 negative_sampler """basic""" +239 77 evaluator """rankbased""" +239 78 dataset """kinships""" +239 78 model """ermlp""" +239 78 loss """softplus""" +239 78 regularizer """no""" +239 78 optimizer """adadelta""" +239 78 training_loop """owa""" +239 78 negative_sampler """basic""" +239 78 evaluator """rankbased""" +239 79 dataset """kinships""" +239 79 model """ermlp""" +239 79 loss """softplus""" +239 79 regularizer """no""" +239 79 optimizer """adadelta""" +239 79 training_loop """owa""" +239 79 negative_sampler """basic""" +239 79 evaluator """rankbased""" +239 80 dataset """kinships""" +239 80 model """ermlp""" +239 80 loss """softplus""" +239 80 regularizer """no""" +239 80 optimizer """adadelta""" +239 80 training_loop """owa""" +239 80 negative_sampler """basic""" +239 80 evaluator """rankbased""" +239 81 dataset """kinships""" +239 81 model """ermlp""" +239 81 loss """softplus""" +239 81 regularizer """no""" +239 81 optimizer """adadelta""" +239 81 training_loop """owa""" +239 81 negative_sampler """basic""" +239 81 evaluator """rankbased""" +239 82 dataset """kinships""" +239 82 model """ermlp""" +239 82 loss """softplus""" +239 82 regularizer """no""" +239 82 optimizer """adadelta""" +239 82 training_loop """owa""" +239 82 negative_sampler """basic""" +239 82 evaluator """rankbased""" +239 83 dataset """kinships""" +239 83 model """ermlp""" +239 83 loss """softplus""" +239 83 regularizer """no""" +239 83 optimizer """adadelta""" +239 83 training_loop """owa""" +239 83 negative_sampler """basic""" +239 83 evaluator """rankbased""" +239 84 dataset """kinships""" +239 84 model """ermlp""" +239 84 loss """softplus""" +239 84 regularizer """no""" +239 84 optimizer """adadelta""" +239 84 training_loop """owa""" +239 84 negative_sampler """basic""" +239 84 evaluator """rankbased""" +239 85 dataset """kinships""" +239 85 model """ermlp""" +239 85 loss """softplus""" +239 85 regularizer """no""" +239 85 optimizer """adadelta""" +239 85 training_loop """owa""" +239 85 negative_sampler """basic""" +239 85 evaluator """rankbased""" +239 86 dataset """kinships""" +239 86 model """ermlp""" +239 86 loss """softplus""" +239 86 regularizer """no""" +239 86 optimizer """adadelta""" +239 86 training_loop """owa""" +239 86 negative_sampler """basic""" +239 86 evaluator """rankbased""" +239 87 dataset """kinships""" +239 87 model """ermlp""" +239 87 loss """softplus""" +239 87 regularizer """no""" +239 87 optimizer """adadelta""" +239 87 training_loop """owa""" +239 87 negative_sampler """basic""" +239 87 evaluator """rankbased""" +239 88 dataset """kinships""" +239 88 model """ermlp""" +239 88 loss """softplus""" +239 88 regularizer """no""" +239 88 optimizer """adadelta""" +239 88 training_loop """owa""" +239 88 negative_sampler """basic""" +239 88 evaluator """rankbased""" +239 89 dataset """kinships""" +239 89 model """ermlp""" +239 89 loss """softplus""" +239 89 regularizer """no""" +239 89 optimizer """adadelta""" +239 89 training_loop """owa""" +239 89 negative_sampler """basic""" +239 89 evaluator """rankbased""" +239 90 dataset """kinships""" +239 90 model """ermlp""" +239 90 loss """softplus""" +239 90 regularizer """no""" +239 90 optimizer """adadelta""" +239 90 training_loop """owa""" +239 90 negative_sampler """basic""" +239 90 evaluator """rankbased""" +239 91 dataset """kinships""" +239 91 model """ermlp""" +239 91 loss """softplus""" +239 91 regularizer """no""" +239 91 optimizer """adadelta""" +239 91 training_loop """owa""" +239 91 negative_sampler """basic""" +239 91 evaluator """rankbased""" +239 92 dataset """kinships""" +239 92 model """ermlp""" +239 92 loss """softplus""" +239 92 regularizer """no""" +239 92 optimizer """adadelta""" +239 92 training_loop """owa""" +239 92 negative_sampler """basic""" +239 92 evaluator """rankbased""" +239 93 dataset """kinships""" +239 93 model """ermlp""" +239 93 loss """softplus""" +239 93 regularizer """no""" +239 93 optimizer """adadelta""" +239 93 training_loop """owa""" +239 93 negative_sampler """basic""" +239 93 evaluator """rankbased""" +239 94 dataset """kinships""" +239 94 model """ermlp""" +239 94 loss """softplus""" +239 94 regularizer """no""" +239 94 optimizer """adadelta""" +239 94 training_loop """owa""" +239 94 negative_sampler """basic""" +239 94 evaluator """rankbased""" +239 95 dataset """kinships""" +239 95 model """ermlp""" +239 95 loss """softplus""" +239 95 regularizer """no""" +239 95 optimizer """adadelta""" +239 95 training_loop """owa""" +239 95 negative_sampler """basic""" +239 95 evaluator """rankbased""" +239 96 dataset """kinships""" +239 96 model """ermlp""" +239 96 loss """softplus""" +239 96 regularizer """no""" +239 96 optimizer """adadelta""" +239 96 training_loop """owa""" +239 96 negative_sampler """basic""" +239 96 evaluator """rankbased""" +239 97 dataset """kinships""" +239 97 model """ermlp""" +239 97 loss """softplus""" +239 97 regularizer """no""" +239 97 optimizer """adadelta""" +239 97 training_loop """owa""" +239 97 negative_sampler """basic""" +239 97 evaluator """rankbased""" +239 98 dataset """kinships""" +239 98 model """ermlp""" +239 98 loss """softplus""" +239 98 regularizer """no""" +239 98 optimizer """adadelta""" +239 98 training_loop """owa""" +239 98 negative_sampler """basic""" +239 98 evaluator """rankbased""" +239 99 dataset """kinships""" +239 99 model """ermlp""" +239 99 loss """softplus""" +239 99 regularizer """no""" +239 99 optimizer """adadelta""" +239 99 training_loop """owa""" +239 99 negative_sampler """basic""" +239 99 evaluator """rankbased""" +239 100 dataset """kinships""" +239 100 model """ermlp""" +239 100 loss """softplus""" +239 100 regularizer """no""" +239 100 optimizer """adadelta""" +239 100 training_loop """owa""" +239 100 negative_sampler """basic""" +239 100 evaluator """rankbased""" +240 1 model.embedding_dim 1.0 +240 1 negative_sampler.num_negs_per_pos 96.0 +240 1 training.batch_size 1.0 +240 2 model.embedding_dim 2.0 +240 2 negative_sampler.num_negs_per_pos 11.0 +240 2 training.batch_size 0.0 +240 3 model.embedding_dim 1.0 +240 3 negative_sampler.num_negs_per_pos 34.0 +240 3 training.batch_size 0.0 +240 4 model.embedding_dim 2.0 +240 4 negative_sampler.num_negs_per_pos 95.0 +240 4 training.batch_size 2.0 +240 5 model.embedding_dim 2.0 +240 5 negative_sampler.num_negs_per_pos 72.0 +240 5 training.batch_size 0.0 +240 6 model.embedding_dim 2.0 +240 6 negative_sampler.num_negs_per_pos 79.0 +240 6 training.batch_size 0.0 +240 7 model.embedding_dim 1.0 +240 7 negative_sampler.num_negs_per_pos 54.0 +240 7 training.batch_size 2.0 +240 8 model.embedding_dim 0.0 +240 8 negative_sampler.num_negs_per_pos 15.0 +240 8 training.batch_size 0.0 +240 9 model.embedding_dim 2.0 +240 9 negative_sampler.num_negs_per_pos 11.0 +240 9 training.batch_size 1.0 +240 10 model.embedding_dim 2.0 +240 10 negative_sampler.num_negs_per_pos 71.0 +240 10 training.batch_size 2.0 +240 11 model.embedding_dim 0.0 +240 11 negative_sampler.num_negs_per_pos 62.0 +240 11 training.batch_size 2.0 +240 12 model.embedding_dim 2.0 +240 12 negative_sampler.num_negs_per_pos 29.0 +240 12 training.batch_size 0.0 +240 13 model.embedding_dim 1.0 +240 13 negative_sampler.num_negs_per_pos 78.0 +240 13 training.batch_size 1.0 +240 14 model.embedding_dim 1.0 +240 14 negative_sampler.num_negs_per_pos 28.0 +240 14 training.batch_size 0.0 +240 15 model.embedding_dim 0.0 +240 15 negative_sampler.num_negs_per_pos 98.0 +240 15 training.batch_size 2.0 +240 16 model.embedding_dim 2.0 +240 16 negative_sampler.num_negs_per_pos 51.0 +240 16 training.batch_size 2.0 +240 17 model.embedding_dim 1.0 +240 17 negative_sampler.num_negs_per_pos 83.0 +240 17 training.batch_size 0.0 +240 18 model.embedding_dim 0.0 +240 18 negative_sampler.num_negs_per_pos 56.0 +240 18 training.batch_size 0.0 +240 19 model.embedding_dim 1.0 +240 19 negative_sampler.num_negs_per_pos 86.0 +240 19 training.batch_size 0.0 +240 20 model.embedding_dim 2.0 +240 20 negative_sampler.num_negs_per_pos 34.0 +240 20 training.batch_size 2.0 +240 21 model.embedding_dim 0.0 +240 21 negative_sampler.num_negs_per_pos 46.0 +240 21 training.batch_size 0.0 +240 22 model.embedding_dim 0.0 +240 22 negative_sampler.num_negs_per_pos 5.0 +240 22 training.batch_size 1.0 +240 23 model.embedding_dim 2.0 +240 23 negative_sampler.num_negs_per_pos 12.0 +240 23 training.batch_size 1.0 +240 24 model.embedding_dim 2.0 +240 24 negative_sampler.num_negs_per_pos 88.0 +240 24 training.batch_size 2.0 +240 25 model.embedding_dim 0.0 +240 25 negative_sampler.num_negs_per_pos 51.0 +240 25 training.batch_size 0.0 +240 26 model.embedding_dim 2.0 +240 26 negative_sampler.num_negs_per_pos 27.0 +240 26 training.batch_size 1.0 +240 27 model.embedding_dim 2.0 +240 27 negative_sampler.num_negs_per_pos 39.0 +240 27 training.batch_size 2.0 +240 28 model.embedding_dim 2.0 +240 28 negative_sampler.num_negs_per_pos 66.0 +240 28 training.batch_size 1.0 +240 29 model.embedding_dim 0.0 +240 29 negative_sampler.num_negs_per_pos 58.0 +240 29 training.batch_size 1.0 +240 30 model.embedding_dim 1.0 +240 30 negative_sampler.num_negs_per_pos 97.0 +240 30 training.batch_size 0.0 +240 31 model.embedding_dim 2.0 +240 31 negative_sampler.num_negs_per_pos 56.0 +240 31 training.batch_size 1.0 +240 32 model.embedding_dim 1.0 +240 32 negative_sampler.num_negs_per_pos 83.0 +240 32 training.batch_size 2.0 +240 33 model.embedding_dim 1.0 +240 33 negative_sampler.num_negs_per_pos 52.0 +240 33 training.batch_size 1.0 +240 34 model.embedding_dim 2.0 +240 34 negative_sampler.num_negs_per_pos 12.0 +240 34 training.batch_size 0.0 +240 35 model.embedding_dim 2.0 +240 35 negative_sampler.num_negs_per_pos 53.0 +240 35 training.batch_size 1.0 +240 36 model.embedding_dim 1.0 +240 36 negative_sampler.num_negs_per_pos 38.0 +240 36 training.batch_size 2.0 +240 37 model.embedding_dim 0.0 +240 37 negative_sampler.num_negs_per_pos 74.0 +240 37 training.batch_size 0.0 +240 38 model.embedding_dim 2.0 +240 38 negative_sampler.num_negs_per_pos 99.0 +240 38 training.batch_size 0.0 +240 39 model.embedding_dim 1.0 +240 39 negative_sampler.num_negs_per_pos 99.0 +240 39 training.batch_size 0.0 +240 40 model.embedding_dim 2.0 +240 40 negative_sampler.num_negs_per_pos 61.0 +240 40 training.batch_size 2.0 +240 41 model.embedding_dim 1.0 +240 41 negative_sampler.num_negs_per_pos 37.0 +240 41 training.batch_size 1.0 +240 42 model.embedding_dim 0.0 +240 42 negative_sampler.num_negs_per_pos 31.0 +240 42 training.batch_size 0.0 +240 43 model.embedding_dim 1.0 +240 43 negative_sampler.num_negs_per_pos 59.0 +240 43 training.batch_size 1.0 +240 44 model.embedding_dim 2.0 +240 44 negative_sampler.num_negs_per_pos 60.0 +240 44 training.batch_size 2.0 +240 45 model.embedding_dim 0.0 +240 45 negative_sampler.num_negs_per_pos 6.0 +240 45 training.batch_size 1.0 +240 46 model.embedding_dim 2.0 +240 46 negative_sampler.num_negs_per_pos 71.0 +240 46 training.batch_size 1.0 +240 47 model.embedding_dim 2.0 +240 47 negative_sampler.num_negs_per_pos 90.0 +240 47 training.batch_size 2.0 +240 48 model.embedding_dim 0.0 +240 48 negative_sampler.num_negs_per_pos 65.0 +240 48 training.batch_size 0.0 +240 49 model.embedding_dim 0.0 +240 49 negative_sampler.num_negs_per_pos 28.0 +240 49 training.batch_size 1.0 +240 50 model.embedding_dim 0.0 +240 50 negative_sampler.num_negs_per_pos 37.0 +240 50 training.batch_size 2.0 +240 51 model.embedding_dim 1.0 +240 51 negative_sampler.num_negs_per_pos 87.0 +240 51 training.batch_size 0.0 +240 52 model.embedding_dim 2.0 +240 52 negative_sampler.num_negs_per_pos 95.0 +240 52 training.batch_size 2.0 +240 53 model.embedding_dim 1.0 +240 53 negative_sampler.num_negs_per_pos 14.0 +240 53 training.batch_size 1.0 +240 54 model.embedding_dim 0.0 +240 54 negative_sampler.num_negs_per_pos 46.0 +240 54 training.batch_size 0.0 +240 55 model.embedding_dim 0.0 +240 55 negative_sampler.num_negs_per_pos 65.0 +240 55 training.batch_size 1.0 +240 56 model.embedding_dim 2.0 +240 56 negative_sampler.num_negs_per_pos 33.0 +240 56 training.batch_size 2.0 +240 57 model.embedding_dim 0.0 +240 57 negative_sampler.num_negs_per_pos 77.0 +240 57 training.batch_size 0.0 +240 58 model.embedding_dim 1.0 +240 58 negative_sampler.num_negs_per_pos 57.0 +240 58 training.batch_size 1.0 +240 59 model.embedding_dim 2.0 +240 59 negative_sampler.num_negs_per_pos 25.0 +240 59 training.batch_size 1.0 +240 60 model.embedding_dim 1.0 +240 60 negative_sampler.num_negs_per_pos 45.0 +240 60 training.batch_size 2.0 +240 61 model.embedding_dim 1.0 +240 61 negative_sampler.num_negs_per_pos 72.0 +240 61 training.batch_size 0.0 +240 62 model.embedding_dim 2.0 +240 62 negative_sampler.num_negs_per_pos 82.0 +240 62 training.batch_size 2.0 +240 63 model.embedding_dim 1.0 +240 63 negative_sampler.num_negs_per_pos 36.0 +240 63 training.batch_size 0.0 +240 64 model.embedding_dim 2.0 +240 64 negative_sampler.num_negs_per_pos 28.0 +240 64 training.batch_size 0.0 +240 65 model.embedding_dim 2.0 +240 65 negative_sampler.num_negs_per_pos 99.0 +240 65 training.batch_size 1.0 +240 66 model.embedding_dim 1.0 +240 66 negative_sampler.num_negs_per_pos 31.0 +240 66 training.batch_size 0.0 +240 67 model.embedding_dim 0.0 +240 67 negative_sampler.num_negs_per_pos 97.0 +240 67 training.batch_size 1.0 +240 68 model.embedding_dim 0.0 +240 68 negative_sampler.num_negs_per_pos 10.0 +240 68 training.batch_size 1.0 +240 69 model.embedding_dim 2.0 +240 69 negative_sampler.num_negs_per_pos 0.0 +240 69 training.batch_size 0.0 +240 70 model.embedding_dim 1.0 +240 70 negative_sampler.num_negs_per_pos 46.0 +240 70 training.batch_size 1.0 +240 71 model.embedding_dim 1.0 +240 71 negative_sampler.num_negs_per_pos 76.0 +240 71 training.batch_size 1.0 +240 72 model.embedding_dim 2.0 +240 72 negative_sampler.num_negs_per_pos 62.0 +240 72 training.batch_size 2.0 +240 73 model.embedding_dim 2.0 +240 73 negative_sampler.num_negs_per_pos 74.0 +240 73 training.batch_size 1.0 +240 74 model.embedding_dim 1.0 +240 74 negative_sampler.num_negs_per_pos 61.0 +240 74 training.batch_size 2.0 +240 75 model.embedding_dim 2.0 +240 75 negative_sampler.num_negs_per_pos 6.0 +240 75 training.batch_size 0.0 +240 76 model.embedding_dim 2.0 +240 76 negative_sampler.num_negs_per_pos 56.0 +240 76 training.batch_size 0.0 +240 77 model.embedding_dim 1.0 +240 77 negative_sampler.num_negs_per_pos 85.0 +240 77 training.batch_size 0.0 +240 78 model.embedding_dim 1.0 +240 78 negative_sampler.num_negs_per_pos 79.0 +240 78 training.batch_size 1.0 +240 79 model.embedding_dim 1.0 +240 79 negative_sampler.num_negs_per_pos 8.0 +240 79 training.batch_size 1.0 +240 80 model.embedding_dim 0.0 +240 80 negative_sampler.num_negs_per_pos 53.0 +240 80 training.batch_size 0.0 +240 81 model.embedding_dim 0.0 +240 81 negative_sampler.num_negs_per_pos 98.0 +240 81 training.batch_size 2.0 +240 82 model.embedding_dim 0.0 +240 82 negative_sampler.num_negs_per_pos 75.0 +240 82 training.batch_size 0.0 +240 83 model.embedding_dim 1.0 +240 83 negative_sampler.num_negs_per_pos 33.0 +240 83 training.batch_size 1.0 +240 84 model.embedding_dim 2.0 +240 84 negative_sampler.num_negs_per_pos 98.0 +240 84 training.batch_size 0.0 +240 85 model.embedding_dim 0.0 +240 85 negative_sampler.num_negs_per_pos 70.0 +240 85 training.batch_size 1.0 +240 86 model.embedding_dim 2.0 +240 86 negative_sampler.num_negs_per_pos 50.0 +240 86 training.batch_size 1.0 +240 87 model.embedding_dim 0.0 +240 87 negative_sampler.num_negs_per_pos 22.0 +240 87 training.batch_size 1.0 +240 88 model.embedding_dim 2.0 +240 88 negative_sampler.num_negs_per_pos 67.0 +240 88 training.batch_size 0.0 +240 89 model.embedding_dim 1.0 +240 89 negative_sampler.num_negs_per_pos 80.0 +240 89 training.batch_size 1.0 +240 90 model.embedding_dim 1.0 +240 90 negative_sampler.num_negs_per_pos 31.0 +240 90 training.batch_size 0.0 +240 91 model.embedding_dim 1.0 +240 91 negative_sampler.num_negs_per_pos 22.0 +240 91 training.batch_size 2.0 +240 92 model.embedding_dim 1.0 +240 92 negative_sampler.num_negs_per_pos 90.0 +240 92 training.batch_size 1.0 +240 93 model.embedding_dim 1.0 +240 93 negative_sampler.num_negs_per_pos 60.0 +240 93 training.batch_size 1.0 +240 94 model.embedding_dim 1.0 +240 94 negative_sampler.num_negs_per_pos 30.0 +240 94 training.batch_size 2.0 +240 95 model.embedding_dim 1.0 +240 95 negative_sampler.num_negs_per_pos 50.0 +240 95 training.batch_size 0.0 +240 96 model.embedding_dim 2.0 +240 96 negative_sampler.num_negs_per_pos 90.0 +240 96 training.batch_size 1.0 +240 97 model.embedding_dim 0.0 +240 97 negative_sampler.num_negs_per_pos 26.0 +240 97 training.batch_size 1.0 +240 98 model.embedding_dim 1.0 +240 98 negative_sampler.num_negs_per_pos 50.0 +240 98 training.batch_size 2.0 +240 99 model.embedding_dim 1.0 +240 99 negative_sampler.num_negs_per_pos 99.0 +240 99 training.batch_size 1.0 +240 100 model.embedding_dim 2.0 +240 100 negative_sampler.num_negs_per_pos 42.0 +240 100 training.batch_size 1.0 +240 1 dataset """kinships""" +240 1 model """ermlp""" +240 1 loss """bceaftersigmoid""" +240 1 regularizer """no""" +240 1 optimizer """adadelta""" +240 1 training_loop """owa""" +240 1 negative_sampler """basic""" +240 1 evaluator """rankbased""" +240 2 dataset """kinships""" +240 2 model """ermlp""" +240 2 loss """bceaftersigmoid""" +240 2 regularizer """no""" +240 2 optimizer """adadelta""" +240 2 training_loop """owa""" +240 2 negative_sampler """basic""" +240 2 evaluator """rankbased""" +240 3 dataset """kinships""" +240 3 model """ermlp""" +240 3 loss """bceaftersigmoid""" +240 3 regularizer """no""" +240 3 optimizer """adadelta""" +240 3 training_loop """owa""" +240 3 negative_sampler """basic""" +240 3 evaluator """rankbased""" +240 4 dataset """kinships""" +240 4 model """ermlp""" +240 4 loss """bceaftersigmoid""" +240 4 regularizer """no""" +240 4 optimizer """adadelta""" +240 4 training_loop """owa""" +240 4 negative_sampler """basic""" +240 4 evaluator """rankbased""" +240 5 dataset """kinships""" +240 5 model """ermlp""" +240 5 loss """bceaftersigmoid""" +240 5 regularizer """no""" +240 5 optimizer """adadelta""" +240 5 training_loop """owa""" +240 5 negative_sampler """basic""" +240 5 evaluator """rankbased""" +240 6 dataset """kinships""" +240 6 model """ermlp""" +240 6 loss """bceaftersigmoid""" +240 6 regularizer """no""" +240 6 optimizer """adadelta""" +240 6 training_loop """owa""" +240 6 negative_sampler """basic""" +240 6 evaluator """rankbased""" +240 7 dataset """kinships""" +240 7 model """ermlp""" +240 7 loss """bceaftersigmoid""" +240 7 regularizer """no""" +240 7 optimizer """adadelta""" +240 7 training_loop """owa""" +240 7 negative_sampler """basic""" +240 7 evaluator """rankbased""" +240 8 dataset """kinships""" +240 8 model """ermlp""" +240 8 loss """bceaftersigmoid""" +240 8 regularizer """no""" +240 8 optimizer """adadelta""" +240 8 training_loop """owa""" +240 8 negative_sampler """basic""" +240 8 evaluator """rankbased""" +240 9 dataset """kinships""" +240 9 model """ermlp""" +240 9 loss """bceaftersigmoid""" +240 9 regularizer """no""" +240 9 optimizer """adadelta""" +240 9 training_loop """owa""" +240 9 negative_sampler """basic""" +240 9 evaluator """rankbased""" +240 10 dataset """kinships""" +240 10 model """ermlp""" +240 10 loss """bceaftersigmoid""" +240 10 regularizer """no""" +240 10 optimizer """adadelta""" +240 10 training_loop """owa""" +240 10 negative_sampler """basic""" +240 10 evaluator """rankbased""" +240 11 dataset """kinships""" +240 11 model """ermlp""" +240 11 loss """bceaftersigmoid""" +240 11 regularizer """no""" +240 11 optimizer """adadelta""" +240 11 training_loop """owa""" +240 11 negative_sampler """basic""" +240 11 evaluator """rankbased""" +240 12 dataset """kinships""" +240 12 model """ermlp""" +240 12 loss """bceaftersigmoid""" +240 12 regularizer """no""" +240 12 optimizer """adadelta""" +240 12 training_loop """owa""" +240 12 negative_sampler """basic""" +240 12 evaluator """rankbased""" +240 13 dataset """kinships""" +240 13 model """ermlp""" +240 13 loss """bceaftersigmoid""" +240 13 regularizer """no""" +240 13 optimizer """adadelta""" +240 13 training_loop """owa""" +240 13 negative_sampler """basic""" +240 13 evaluator """rankbased""" +240 14 dataset """kinships""" +240 14 model """ermlp""" +240 14 loss """bceaftersigmoid""" +240 14 regularizer """no""" +240 14 optimizer """adadelta""" +240 14 training_loop """owa""" +240 14 negative_sampler """basic""" +240 14 evaluator """rankbased""" +240 15 dataset """kinships""" +240 15 model """ermlp""" +240 15 loss """bceaftersigmoid""" +240 15 regularizer """no""" +240 15 optimizer """adadelta""" +240 15 training_loop """owa""" +240 15 negative_sampler """basic""" +240 15 evaluator """rankbased""" +240 16 dataset """kinships""" +240 16 model """ermlp""" +240 16 loss """bceaftersigmoid""" +240 16 regularizer """no""" +240 16 optimizer """adadelta""" +240 16 training_loop """owa""" +240 16 negative_sampler """basic""" +240 16 evaluator """rankbased""" +240 17 dataset """kinships""" +240 17 model """ermlp""" +240 17 loss """bceaftersigmoid""" +240 17 regularizer """no""" +240 17 optimizer """adadelta""" +240 17 training_loop """owa""" +240 17 negative_sampler """basic""" +240 17 evaluator """rankbased""" +240 18 dataset """kinships""" +240 18 model """ermlp""" +240 18 loss """bceaftersigmoid""" +240 18 regularizer """no""" +240 18 optimizer """adadelta""" +240 18 training_loop """owa""" +240 18 negative_sampler """basic""" +240 18 evaluator """rankbased""" +240 19 dataset """kinships""" +240 19 model """ermlp""" +240 19 loss """bceaftersigmoid""" +240 19 regularizer """no""" +240 19 optimizer """adadelta""" +240 19 training_loop """owa""" +240 19 negative_sampler """basic""" +240 19 evaluator """rankbased""" +240 20 dataset """kinships""" +240 20 model """ermlp""" +240 20 loss """bceaftersigmoid""" +240 20 regularizer """no""" +240 20 optimizer """adadelta""" +240 20 training_loop """owa""" +240 20 negative_sampler """basic""" +240 20 evaluator """rankbased""" +240 21 dataset """kinships""" +240 21 model """ermlp""" +240 21 loss """bceaftersigmoid""" +240 21 regularizer """no""" +240 21 optimizer """adadelta""" +240 21 training_loop """owa""" +240 21 negative_sampler """basic""" +240 21 evaluator """rankbased""" +240 22 dataset """kinships""" +240 22 model """ermlp""" +240 22 loss """bceaftersigmoid""" +240 22 regularizer """no""" +240 22 optimizer """adadelta""" +240 22 training_loop """owa""" +240 22 negative_sampler """basic""" +240 22 evaluator """rankbased""" +240 23 dataset """kinships""" +240 23 model """ermlp""" +240 23 loss """bceaftersigmoid""" +240 23 regularizer """no""" +240 23 optimizer """adadelta""" +240 23 training_loop """owa""" +240 23 negative_sampler """basic""" +240 23 evaluator """rankbased""" +240 24 dataset """kinships""" +240 24 model """ermlp""" +240 24 loss """bceaftersigmoid""" +240 24 regularizer """no""" +240 24 optimizer """adadelta""" +240 24 training_loop """owa""" +240 24 negative_sampler """basic""" +240 24 evaluator """rankbased""" +240 25 dataset """kinships""" +240 25 model """ermlp""" +240 25 loss """bceaftersigmoid""" +240 25 regularizer """no""" +240 25 optimizer """adadelta""" +240 25 training_loop """owa""" +240 25 negative_sampler """basic""" +240 25 evaluator """rankbased""" +240 26 dataset """kinships""" +240 26 model """ermlp""" +240 26 loss """bceaftersigmoid""" +240 26 regularizer """no""" +240 26 optimizer """adadelta""" +240 26 training_loop """owa""" +240 26 negative_sampler """basic""" +240 26 evaluator """rankbased""" +240 27 dataset """kinships""" +240 27 model """ermlp""" +240 27 loss """bceaftersigmoid""" +240 27 regularizer """no""" +240 27 optimizer """adadelta""" +240 27 training_loop """owa""" +240 27 negative_sampler """basic""" +240 27 evaluator """rankbased""" +240 28 dataset """kinships""" +240 28 model """ermlp""" +240 28 loss """bceaftersigmoid""" +240 28 regularizer """no""" +240 28 optimizer """adadelta""" +240 28 training_loop """owa""" +240 28 negative_sampler """basic""" +240 28 evaluator """rankbased""" +240 29 dataset """kinships""" +240 29 model """ermlp""" +240 29 loss """bceaftersigmoid""" +240 29 regularizer """no""" +240 29 optimizer """adadelta""" +240 29 training_loop """owa""" +240 29 negative_sampler """basic""" +240 29 evaluator """rankbased""" +240 30 dataset """kinships""" +240 30 model """ermlp""" +240 30 loss """bceaftersigmoid""" +240 30 regularizer """no""" +240 30 optimizer """adadelta""" +240 30 training_loop """owa""" +240 30 negative_sampler """basic""" +240 30 evaluator """rankbased""" +240 31 dataset """kinships""" +240 31 model """ermlp""" +240 31 loss """bceaftersigmoid""" +240 31 regularizer """no""" +240 31 optimizer """adadelta""" +240 31 training_loop """owa""" +240 31 negative_sampler """basic""" +240 31 evaluator """rankbased""" +240 32 dataset """kinships""" +240 32 model """ermlp""" +240 32 loss """bceaftersigmoid""" +240 32 regularizer """no""" +240 32 optimizer """adadelta""" +240 32 training_loop """owa""" +240 32 negative_sampler """basic""" +240 32 evaluator """rankbased""" +240 33 dataset """kinships""" +240 33 model """ermlp""" +240 33 loss """bceaftersigmoid""" +240 33 regularizer """no""" +240 33 optimizer """adadelta""" +240 33 training_loop """owa""" +240 33 negative_sampler """basic""" +240 33 evaluator """rankbased""" +240 34 dataset """kinships""" +240 34 model """ermlp""" +240 34 loss """bceaftersigmoid""" +240 34 regularizer """no""" +240 34 optimizer """adadelta""" +240 34 training_loop """owa""" +240 34 negative_sampler """basic""" +240 34 evaluator """rankbased""" +240 35 dataset """kinships""" +240 35 model """ermlp""" +240 35 loss """bceaftersigmoid""" +240 35 regularizer """no""" +240 35 optimizer """adadelta""" +240 35 training_loop """owa""" +240 35 negative_sampler """basic""" +240 35 evaluator """rankbased""" +240 36 dataset """kinships""" +240 36 model """ermlp""" +240 36 loss """bceaftersigmoid""" +240 36 regularizer """no""" +240 36 optimizer """adadelta""" +240 36 training_loop """owa""" +240 36 negative_sampler """basic""" +240 36 evaluator """rankbased""" +240 37 dataset """kinships""" +240 37 model """ermlp""" +240 37 loss """bceaftersigmoid""" +240 37 regularizer """no""" +240 37 optimizer """adadelta""" +240 37 training_loop """owa""" +240 37 negative_sampler """basic""" +240 37 evaluator """rankbased""" +240 38 dataset """kinships""" +240 38 model """ermlp""" +240 38 loss """bceaftersigmoid""" +240 38 regularizer """no""" +240 38 optimizer """adadelta""" +240 38 training_loop """owa""" +240 38 negative_sampler """basic""" +240 38 evaluator """rankbased""" +240 39 dataset """kinships""" +240 39 model """ermlp""" +240 39 loss """bceaftersigmoid""" +240 39 regularizer """no""" +240 39 optimizer """adadelta""" +240 39 training_loop """owa""" +240 39 negative_sampler """basic""" +240 39 evaluator """rankbased""" +240 40 dataset """kinships""" +240 40 model """ermlp""" +240 40 loss """bceaftersigmoid""" +240 40 regularizer """no""" +240 40 optimizer """adadelta""" +240 40 training_loop """owa""" +240 40 negative_sampler """basic""" +240 40 evaluator """rankbased""" +240 41 dataset """kinships""" +240 41 model """ermlp""" +240 41 loss """bceaftersigmoid""" +240 41 regularizer """no""" +240 41 optimizer """adadelta""" +240 41 training_loop """owa""" +240 41 negative_sampler """basic""" +240 41 evaluator """rankbased""" +240 42 dataset """kinships""" +240 42 model """ermlp""" +240 42 loss """bceaftersigmoid""" +240 42 regularizer """no""" +240 42 optimizer """adadelta""" +240 42 training_loop """owa""" +240 42 negative_sampler """basic""" +240 42 evaluator """rankbased""" +240 43 dataset """kinships""" +240 43 model """ermlp""" +240 43 loss """bceaftersigmoid""" +240 43 regularizer """no""" +240 43 optimizer """adadelta""" +240 43 training_loop """owa""" +240 43 negative_sampler """basic""" +240 43 evaluator """rankbased""" +240 44 dataset """kinships""" +240 44 model """ermlp""" +240 44 loss """bceaftersigmoid""" +240 44 regularizer """no""" +240 44 optimizer """adadelta""" +240 44 training_loop """owa""" +240 44 negative_sampler """basic""" +240 44 evaluator """rankbased""" +240 45 dataset """kinships""" +240 45 model """ermlp""" +240 45 loss """bceaftersigmoid""" +240 45 regularizer """no""" +240 45 optimizer """adadelta""" +240 45 training_loop """owa""" +240 45 negative_sampler """basic""" +240 45 evaluator """rankbased""" +240 46 dataset """kinships""" +240 46 model """ermlp""" +240 46 loss """bceaftersigmoid""" +240 46 regularizer """no""" +240 46 optimizer """adadelta""" +240 46 training_loop """owa""" +240 46 negative_sampler """basic""" +240 46 evaluator """rankbased""" +240 47 dataset """kinships""" +240 47 model """ermlp""" +240 47 loss """bceaftersigmoid""" +240 47 regularizer """no""" +240 47 optimizer """adadelta""" +240 47 training_loop """owa""" +240 47 negative_sampler """basic""" +240 47 evaluator """rankbased""" +240 48 dataset """kinships""" +240 48 model """ermlp""" +240 48 loss """bceaftersigmoid""" +240 48 regularizer """no""" +240 48 optimizer """adadelta""" +240 48 training_loop """owa""" +240 48 negative_sampler """basic""" +240 48 evaluator """rankbased""" +240 49 dataset """kinships""" +240 49 model """ermlp""" +240 49 loss """bceaftersigmoid""" +240 49 regularizer """no""" +240 49 optimizer """adadelta""" +240 49 training_loop """owa""" +240 49 negative_sampler """basic""" +240 49 evaluator """rankbased""" +240 50 dataset """kinships""" +240 50 model """ermlp""" +240 50 loss """bceaftersigmoid""" +240 50 regularizer """no""" +240 50 optimizer """adadelta""" +240 50 training_loop """owa""" +240 50 negative_sampler """basic""" +240 50 evaluator """rankbased""" +240 51 dataset """kinships""" +240 51 model """ermlp""" +240 51 loss """bceaftersigmoid""" +240 51 regularizer """no""" +240 51 optimizer """adadelta""" +240 51 training_loop """owa""" +240 51 negative_sampler """basic""" +240 51 evaluator """rankbased""" +240 52 dataset """kinships""" +240 52 model """ermlp""" +240 52 loss """bceaftersigmoid""" +240 52 regularizer """no""" +240 52 optimizer """adadelta""" +240 52 training_loop """owa""" +240 52 negative_sampler """basic""" +240 52 evaluator """rankbased""" +240 53 dataset """kinships""" +240 53 model """ermlp""" +240 53 loss """bceaftersigmoid""" +240 53 regularizer """no""" +240 53 optimizer """adadelta""" +240 53 training_loop """owa""" +240 53 negative_sampler """basic""" +240 53 evaluator """rankbased""" +240 54 dataset """kinships""" +240 54 model """ermlp""" +240 54 loss """bceaftersigmoid""" +240 54 regularizer """no""" +240 54 optimizer """adadelta""" +240 54 training_loop """owa""" +240 54 negative_sampler """basic""" +240 54 evaluator """rankbased""" +240 55 dataset """kinships""" +240 55 model """ermlp""" +240 55 loss """bceaftersigmoid""" +240 55 regularizer """no""" +240 55 optimizer """adadelta""" +240 55 training_loop """owa""" +240 55 negative_sampler """basic""" +240 55 evaluator """rankbased""" +240 56 dataset """kinships""" +240 56 model """ermlp""" +240 56 loss """bceaftersigmoid""" +240 56 regularizer """no""" +240 56 optimizer """adadelta""" +240 56 training_loop """owa""" +240 56 negative_sampler """basic""" +240 56 evaluator """rankbased""" +240 57 dataset """kinships""" +240 57 model """ermlp""" +240 57 loss """bceaftersigmoid""" +240 57 regularizer """no""" +240 57 optimizer """adadelta""" +240 57 training_loop """owa""" +240 57 negative_sampler """basic""" +240 57 evaluator """rankbased""" +240 58 dataset """kinships""" +240 58 model """ermlp""" +240 58 loss """bceaftersigmoid""" +240 58 regularizer """no""" +240 58 optimizer """adadelta""" +240 58 training_loop """owa""" +240 58 negative_sampler """basic""" +240 58 evaluator """rankbased""" +240 59 dataset """kinships""" +240 59 model """ermlp""" +240 59 loss """bceaftersigmoid""" +240 59 regularizer """no""" +240 59 optimizer """adadelta""" +240 59 training_loop """owa""" +240 59 negative_sampler """basic""" +240 59 evaluator """rankbased""" +240 60 dataset """kinships""" +240 60 model """ermlp""" +240 60 loss """bceaftersigmoid""" +240 60 regularizer """no""" +240 60 optimizer """adadelta""" +240 60 training_loop """owa""" +240 60 negative_sampler """basic""" +240 60 evaluator """rankbased""" +240 61 dataset """kinships""" +240 61 model """ermlp""" +240 61 loss """bceaftersigmoid""" +240 61 regularizer """no""" +240 61 optimizer """adadelta""" +240 61 training_loop """owa""" +240 61 negative_sampler """basic""" +240 61 evaluator """rankbased""" +240 62 dataset """kinships""" +240 62 model """ermlp""" +240 62 loss """bceaftersigmoid""" +240 62 regularizer """no""" +240 62 optimizer """adadelta""" +240 62 training_loop """owa""" +240 62 negative_sampler """basic""" +240 62 evaluator """rankbased""" +240 63 dataset """kinships""" +240 63 model """ermlp""" +240 63 loss """bceaftersigmoid""" +240 63 regularizer """no""" +240 63 optimizer """adadelta""" +240 63 training_loop """owa""" +240 63 negative_sampler """basic""" +240 63 evaluator """rankbased""" +240 64 dataset """kinships""" +240 64 model """ermlp""" +240 64 loss """bceaftersigmoid""" +240 64 regularizer """no""" +240 64 optimizer """adadelta""" +240 64 training_loop """owa""" +240 64 negative_sampler """basic""" +240 64 evaluator """rankbased""" +240 65 dataset """kinships""" +240 65 model """ermlp""" +240 65 loss """bceaftersigmoid""" +240 65 regularizer """no""" +240 65 optimizer """adadelta""" +240 65 training_loop """owa""" +240 65 negative_sampler """basic""" +240 65 evaluator """rankbased""" +240 66 dataset """kinships""" +240 66 model """ermlp""" +240 66 loss """bceaftersigmoid""" +240 66 regularizer """no""" +240 66 optimizer """adadelta""" +240 66 training_loop """owa""" +240 66 negative_sampler """basic""" +240 66 evaluator """rankbased""" +240 67 dataset """kinships""" +240 67 model """ermlp""" +240 67 loss """bceaftersigmoid""" +240 67 regularizer """no""" +240 67 optimizer """adadelta""" +240 67 training_loop """owa""" +240 67 negative_sampler """basic""" +240 67 evaluator """rankbased""" +240 68 dataset """kinships""" +240 68 model """ermlp""" +240 68 loss """bceaftersigmoid""" +240 68 regularizer """no""" +240 68 optimizer """adadelta""" +240 68 training_loop """owa""" +240 68 negative_sampler """basic""" +240 68 evaluator """rankbased""" +240 69 dataset """kinships""" +240 69 model """ermlp""" +240 69 loss """bceaftersigmoid""" +240 69 regularizer """no""" +240 69 optimizer """adadelta""" +240 69 training_loop """owa""" +240 69 negative_sampler """basic""" +240 69 evaluator """rankbased""" +240 70 dataset """kinships""" +240 70 model """ermlp""" +240 70 loss """bceaftersigmoid""" +240 70 regularizer """no""" +240 70 optimizer """adadelta""" +240 70 training_loop """owa""" +240 70 negative_sampler """basic""" +240 70 evaluator """rankbased""" +240 71 dataset """kinships""" +240 71 model """ermlp""" +240 71 loss """bceaftersigmoid""" +240 71 regularizer """no""" +240 71 optimizer """adadelta""" +240 71 training_loop """owa""" +240 71 negative_sampler """basic""" +240 71 evaluator """rankbased""" +240 72 dataset """kinships""" +240 72 model """ermlp""" +240 72 loss """bceaftersigmoid""" +240 72 regularizer """no""" +240 72 optimizer """adadelta""" +240 72 training_loop """owa""" +240 72 negative_sampler """basic""" +240 72 evaluator """rankbased""" +240 73 dataset """kinships""" +240 73 model """ermlp""" +240 73 loss """bceaftersigmoid""" +240 73 regularizer """no""" +240 73 optimizer """adadelta""" +240 73 training_loop """owa""" +240 73 negative_sampler """basic""" +240 73 evaluator """rankbased""" +240 74 dataset """kinships""" +240 74 model """ermlp""" +240 74 loss """bceaftersigmoid""" +240 74 regularizer """no""" +240 74 optimizer """adadelta""" +240 74 training_loop """owa""" +240 74 negative_sampler """basic""" +240 74 evaluator """rankbased""" +240 75 dataset """kinships""" +240 75 model """ermlp""" +240 75 loss """bceaftersigmoid""" +240 75 regularizer """no""" +240 75 optimizer """adadelta""" +240 75 training_loop """owa""" +240 75 negative_sampler """basic""" +240 75 evaluator """rankbased""" +240 76 dataset """kinships""" +240 76 model """ermlp""" +240 76 loss """bceaftersigmoid""" +240 76 regularizer """no""" +240 76 optimizer """adadelta""" +240 76 training_loop """owa""" +240 76 negative_sampler """basic""" +240 76 evaluator """rankbased""" +240 77 dataset """kinships""" +240 77 model """ermlp""" +240 77 loss """bceaftersigmoid""" +240 77 regularizer """no""" +240 77 optimizer """adadelta""" +240 77 training_loop """owa""" +240 77 negative_sampler """basic""" +240 77 evaluator """rankbased""" +240 78 dataset """kinships""" +240 78 model """ermlp""" +240 78 loss """bceaftersigmoid""" +240 78 regularizer """no""" +240 78 optimizer """adadelta""" +240 78 training_loop """owa""" +240 78 negative_sampler """basic""" +240 78 evaluator """rankbased""" +240 79 dataset """kinships""" +240 79 model """ermlp""" +240 79 loss """bceaftersigmoid""" +240 79 regularizer """no""" +240 79 optimizer """adadelta""" +240 79 training_loop """owa""" +240 79 negative_sampler """basic""" +240 79 evaluator """rankbased""" +240 80 dataset """kinships""" +240 80 model """ermlp""" +240 80 loss """bceaftersigmoid""" +240 80 regularizer """no""" +240 80 optimizer """adadelta""" +240 80 training_loop """owa""" +240 80 negative_sampler """basic""" +240 80 evaluator """rankbased""" +240 81 dataset """kinships""" +240 81 model """ermlp""" +240 81 loss """bceaftersigmoid""" +240 81 regularizer """no""" +240 81 optimizer """adadelta""" +240 81 training_loop """owa""" +240 81 negative_sampler """basic""" +240 81 evaluator """rankbased""" +240 82 dataset """kinships""" +240 82 model """ermlp""" +240 82 loss """bceaftersigmoid""" +240 82 regularizer """no""" +240 82 optimizer """adadelta""" +240 82 training_loop """owa""" +240 82 negative_sampler """basic""" +240 82 evaluator """rankbased""" +240 83 dataset """kinships""" +240 83 model """ermlp""" +240 83 loss """bceaftersigmoid""" +240 83 regularizer """no""" +240 83 optimizer """adadelta""" +240 83 training_loop """owa""" +240 83 negative_sampler """basic""" +240 83 evaluator """rankbased""" +240 84 dataset """kinships""" +240 84 model """ermlp""" +240 84 loss """bceaftersigmoid""" +240 84 regularizer """no""" +240 84 optimizer """adadelta""" +240 84 training_loop """owa""" +240 84 negative_sampler """basic""" +240 84 evaluator """rankbased""" +240 85 dataset """kinships""" +240 85 model """ermlp""" +240 85 loss """bceaftersigmoid""" +240 85 regularizer """no""" +240 85 optimizer """adadelta""" +240 85 training_loop """owa""" +240 85 negative_sampler """basic""" +240 85 evaluator """rankbased""" +240 86 dataset """kinships""" +240 86 model """ermlp""" +240 86 loss """bceaftersigmoid""" +240 86 regularizer """no""" +240 86 optimizer """adadelta""" +240 86 training_loop """owa""" +240 86 negative_sampler """basic""" +240 86 evaluator """rankbased""" +240 87 dataset """kinships""" +240 87 model """ermlp""" +240 87 loss """bceaftersigmoid""" +240 87 regularizer """no""" +240 87 optimizer """adadelta""" +240 87 training_loop """owa""" +240 87 negative_sampler """basic""" +240 87 evaluator """rankbased""" +240 88 dataset """kinships""" +240 88 model """ermlp""" +240 88 loss """bceaftersigmoid""" +240 88 regularizer """no""" +240 88 optimizer """adadelta""" +240 88 training_loop """owa""" +240 88 negative_sampler """basic""" +240 88 evaluator """rankbased""" +240 89 dataset """kinships""" +240 89 model """ermlp""" +240 89 loss """bceaftersigmoid""" +240 89 regularizer """no""" +240 89 optimizer """adadelta""" +240 89 training_loop """owa""" +240 89 negative_sampler """basic""" +240 89 evaluator """rankbased""" +240 90 dataset """kinships""" +240 90 model """ermlp""" +240 90 loss """bceaftersigmoid""" +240 90 regularizer """no""" +240 90 optimizer """adadelta""" +240 90 training_loop """owa""" +240 90 negative_sampler """basic""" +240 90 evaluator """rankbased""" +240 91 dataset """kinships""" +240 91 model """ermlp""" +240 91 loss """bceaftersigmoid""" +240 91 regularizer """no""" +240 91 optimizer """adadelta""" +240 91 training_loop """owa""" +240 91 negative_sampler """basic""" +240 91 evaluator """rankbased""" +240 92 dataset """kinships""" +240 92 model """ermlp""" +240 92 loss """bceaftersigmoid""" +240 92 regularizer """no""" +240 92 optimizer """adadelta""" +240 92 training_loop """owa""" +240 92 negative_sampler """basic""" +240 92 evaluator """rankbased""" +240 93 dataset """kinships""" +240 93 model """ermlp""" +240 93 loss """bceaftersigmoid""" +240 93 regularizer """no""" +240 93 optimizer """adadelta""" +240 93 training_loop """owa""" +240 93 negative_sampler """basic""" +240 93 evaluator """rankbased""" +240 94 dataset """kinships""" +240 94 model """ermlp""" +240 94 loss """bceaftersigmoid""" +240 94 regularizer """no""" +240 94 optimizer """adadelta""" +240 94 training_loop """owa""" +240 94 negative_sampler """basic""" +240 94 evaluator """rankbased""" +240 95 dataset """kinships""" +240 95 model """ermlp""" +240 95 loss """bceaftersigmoid""" +240 95 regularizer """no""" +240 95 optimizer """adadelta""" +240 95 training_loop """owa""" +240 95 negative_sampler """basic""" +240 95 evaluator """rankbased""" +240 96 dataset """kinships""" +240 96 model """ermlp""" +240 96 loss """bceaftersigmoid""" +240 96 regularizer """no""" +240 96 optimizer """adadelta""" +240 96 training_loop """owa""" +240 96 negative_sampler """basic""" +240 96 evaluator """rankbased""" +240 97 dataset """kinships""" +240 97 model """ermlp""" +240 97 loss """bceaftersigmoid""" +240 97 regularizer """no""" +240 97 optimizer """adadelta""" +240 97 training_loop """owa""" +240 97 negative_sampler """basic""" +240 97 evaluator """rankbased""" +240 98 dataset """kinships""" +240 98 model """ermlp""" +240 98 loss """bceaftersigmoid""" +240 98 regularizer """no""" +240 98 optimizer """adadelta""" +240 98 training_loop """owa""" +240 98 negative_sampler """basic""" +240 98 evaluator """rankbased""" +240 99 dataset """kinships""" +240 99 model """ermlp""" +240 99 loss """bceaftersigmoid""" +240 99 regularizer """no""" +240 99 optimizer """adadelta""" +240 99 training_loop """owa""" +240 99 negative_sampler """basic""" +240 99 evaluator """rankbased""" +240 100 dataset """kinships""" +240 100 model """ermlp""" +240 100 loss """bceaftersigmoid""" +240 100 regularizer """no""" +240 100 optimizer """adadelta""" +240 100 training_loop """owa""" +240 100 negative_sampler """basic""" +240 100 evaluator """rankbased""" +241 1 model.embedding_dim 0.0 +241 1 negative_sampler.num_negs_per_pos 95.0 +241 1 training.batch_size 2.0 +241 2 model.embedding_dim 0.0 +241 2 negative_sampler.num_negs_per_pos 67.0 +241 2 training.batch_size 2.0 +241 3 model.embedding_dim 0.0 +241 3 negative_sampler.num_negs_per_pos 11.0 +241 3 training.batch_size 1.0 +241 4 model.embedding_dim 0.0 +241 4 negative_sampler.num_negs_per_pos 35.0 +241 4 training.batch_size 0.0 +241 5 model.embedding_dim 2.0 +241 5 negative_sampler.num_negs_per_pos 21.0 +241 5 training.batch_size 2.0 +241 6 model.embedding_dim 1.0 +241 6 negative_sampler.num_negs_per_pos 31.0 +241 6 training.batch_size 0.0 +241 7 model.embedding_dim 1.0 +241 7 negative_sampler.num_negs_per_pos 29.0 +241 7 training.batch_size 1.0 +241 8 model.embedding_dim 2.0 +241 8 negative_sampler.num_negs_per_pos 81.0 +241 8 training.batch_size 0.0 +241 9 model.embedding_dim 1.0 +241 9 negative_sampler.num_negs_per_pos 71.0 +241 9 training.batch_size 1.0 +241 10 model.embedding_dim 0.0 +241 10 negative_sampler.num_negs_per_pos 70.0 +241 10 training.batch_size 0.0 +241 11 model.embedding_dim 0.0 +241 11 negative_sampler.num_negs_per_pos 37.0 +241 11 training.batch_size 0.0 +241 12 model.embedding_dim 2.0 +241 12 negative_sampler.num_negs_per_pos 59.0 +241 12 training.batch_size 2.0 +241 13 model.embedding_dim 1.0 +241 13 negative_sampler.num_negs_per_pos 28.0 +241 13 training.batch_size 2.0 +241 14 model.embedding_dim 1.0 +241 14 negative_sampler.num_negs_per_pos 89.0 +241 14 training.batch_size 1.0 +241 15 model.embedding_dim 0.0 +241 15 negative_sampler.num_negs_per_pos 44.0 +241 15 training.batch_size 0.0 +241 16 model.embedding_dim 0.0 +241 16 negative_sampler.num_negs_per_pos 22.0 +241 16 training.batch_size 1.0 +241 17 model.embedding_dim 1.0 +241 17 negative_sampler.num_negs_per_pos 97.0 +241 17 training.batch_size 0.0 +241 18 model.embedding_dim 2.0 +241 18 negative_sampler.num_negs_per_pos 26.0 +241 18 training.batch_size 0.0 +241 19 model.embedding_dim 2.0 +241 19 negative_sampler.num_negs_per_pos 25.0 +241 19 training.batch_size 1.0 +241 20 model.embedding_dim 2.0 +241 20 negative_sampler.num_negs_per_pos 71.0 +241 20 training.batch_size 2.0 +241 21 model.embedding_dim 0.0 +241 21 negative_sampler.num_negs_per_pos 9.0 +241 21 training.batch_size 0.0 +241 22 model.embedding_dim 2.0 +241 22 negative_sampler.num_negs_per_pos 52.0 +241 22 training.batch_size 1.0 +241 23 model.embedding_dim 1.0 +241 23 negative_sampler.num_negs_per_pos 82.0 +241 23 training.batch_size 0.0 +241 24 model.embedding_dim 0.0 +241 24 negative_sampler.num_negs_per_pos 91.0 +241 24 training.batch_size 0.0 +241 25 model.embedding_dim 0.0 +241 25 negative_sampler.num_negs_per_pos 49.0 +241 25 training.batch_size 1.0 +241 26 model.embedding_dim 0.0 +241 26 negative_sampler.num_negs_per_pos 48.0 +241 26 training.batch_size 2.0 +241 27 model.embedding_dim 1.0 +241 27 negative_sampler.num_negs_per_pos 1.0 +241 27 training.batch_size 2.0 +241 28 model.embedding_dim 2.0 +241 28 negative_sampler.num_negs_per_pos 62.0 +241 28 training.batch_size 2.0 +241 29 model.embedding_dim 1.0 +241 29 negative_sampler.num_negs_per_pos 22.0 +241 29 training.batch_size 2.0 +241 30 model.embedding_dim 1.0 +241 30 negative_sampler.num_negs_per_pos 12.0 +241 30 training.batch_size 0.0 +241 31 model.embedding_dim 2.0 +241 31 negative_sampler.num_negs_per_pos 5.0 +241 31 training.batch_size 1.0 +241 32 model.embedding_dim 1.0 +241 32 negative_sampler.num_negs_per_pos 86.0 +241 32 training.batch_size 1.0 +241 33 model.embedding_dim 2.0 +241 33 negative_sampler.num_negs_per_pos 63.0 +241 33 training.batch_size 2.0 +241 34 model.embedding_dim 2.0 +241 34 negative_sampler.num_negs_per_pos 65.0 +241 34 training.batch_size 2.0 +241 35 model.embedding_dim 2.0 +241 35 negative_sampler.num_negs_per_pos 19.0 +241 35 training.batch_size 1.0 +241 36 model.embedding_dim 0.0 +241 36 negative_sampler.num_negs_per_pos 53.0 +241 36 training.batch_size 1.0 +241 37 model.embedding_dim 2.0 +241 37 negative_sampler.num_negs_per_pos 25.0 +241 37 training.batch_size 2.0 +241 38 model.embedding_dim 0.0 +241 38 negative_sampler.num_negs_per_pos 41.0 +241 38 training.batch_size 1.0 +241 39 model.embedding_dim 1.0 +241 39 negative_sampler.num_negs_per_pos 46.0 +241 39 training.batch_size 0.0 +241 40 model.embedding_dim 1.0 +241 40 negative_sampler.num_negs_per_pos 19.0 +241 40 training.batch_size 1.0 +241 41 model.embedding_dim 1.0 +241 41 negative_sampler.num_negs_per_pos 4.0 +241 41 training.batch_size 0.0 +241 42 model.embedding_dim 1.0 +241 42 negative_sampler.num_negs_per_pos 50.0 +241 42 training.batch_size 0.0 +241 43 model.embedding_dim 2.0 +241 43 negative_sampler.num_negs_per_pos 23.0 +241 43 training.batch_size 1.0 +241 44 model.embedding_dim 0.0 +241 44 negative_sampler.num_negs_per_pos 46.0 +241 44 training.batch_size 0.0 +241 45 model.embedding_dim 2.0 +241 45 negative_sampler.num_negs_per_pos 35.0 +241 45 training.batch_size 1.0 +241 46 model.embedding_dim 1.0 +241 46 negative_sampler.num_negs_per_pos 33.0 +241 46 training.batch_size 2.0 +241 47 model.embedding_dim 2.0 +241 47 negative_sampler.num_negs_per_pos 80.0 +241 47 training.batch_size 2.0 +241 48 model.embedding_dim 0.0 +241 48 negative_sampler.num_negs_per_pos 90.0 +241 48 training.batch_size 1.0 +241 49 model.embedding_dim 0.0 +241 49 negative_sampler.num_negs_per_pos 54.0 +241 49 training.batch_size 1.0 +241 50 model.embedding_dim 0.0 +241 50 negative_sampler.num_negs_per_pos 94.0 +241 50 training.batch_size 1.0 +241 51 model.embedding_dim 1.0 +241 51 negative_sampler.num_negs_per_pos 91.0 +241 51 training.batch_size 2.0 +241 52 model.embedding_dim 0.0 +241 52 negative_sampler.num_negs_per_pos 71.0 +241 52 training.batch_size 0.0 +241 53 model.embedding_dim 2.0 +241 53 negative_sampler.num_negs_per_pos 34.0 +241 53 training.batch_size 1.0 +241 54 model.embedding_dim 2.0 +241 54 negative_sampler.num_negs_per_pos 98.0 +241 54 training.batch_size 0.0 +241 55 model.embedding_dim 2.0 +241 55 negative_sampler.num_negs_per_pos 46.0 +241 55 training.batch_size 0.0 +241 56 model.embedding_dim 0.0 +241 56 negative_sampler.num_negs_per_pos 58.0 +241 56 training.batch_size 2.0 +241 57 model.embedding_dim 2.0 +241 57 negative_sampler.num_negs_per_pos 72.0 +241 57 training.batch_size 0.0 +241 58 model.embedding_dim 0.0 +241 58 negative_sampler.num_negs_per_pos 7.0 +241 58 training.batch_size 1.0 +241 59 model.embedding_dim 2.0 +241 59 negative_sampler.num_negs_per_pos 79.0 +241 59 training.batch_size 1.0 +241 60 model.embedding_dim 2.0 +241 60 negative_sampler.num_negs_per_pos 53.0 +241 60 training.batch_size 1.0 +241 61 model.embedding_dim 0.0 +241 61 negative_sampler.num_negs_per_pos 86.0 +241 61 training.batch_size 1.0 +241 62 model.embedding_dim 1.0 +241 62 negative_sampler.num_negs_per_pos 82.0 +241 62 training.batch_size 2.0 +241 63 model.embedding_dim 2.0 +241 63 negative_sampler.num_negs_per_pos 50.0 +241 63 training.batch_size 2.0 +241 64 model.embedding_dim 1.0 +241 64 negative_sampler.num_negs_per_pos 93.0 +241 64 training.batch_size 1.0 +241 65 model.embedding_dim 2.0 +241 65 negative_sampler.num_negs_per_pos 11.0 +241 65 training.batch_size 1.0 +241 66 model.embedding_dim 0.0 +241 66 negative_sampler.num_negs_per_pos 75.0 +241 66 training.batch_size 0.0 +241 67 model.embedding_dim 2.0 +241 67 negative_sampler.num_negs_per_pos 50.0 +241 67 training.batch_size 2.0 +241 68 model.embedding_dim 0.0 +241 68 negative_sampler.num_negs_per_pos 48.0 +241 68 training.batch_size 0.0 +241 69 model.embedding_dim 1.0 +241 69 negative_sampler.num_negs_per_pos 37.0 +241 69 training.batch_size 0.0 +241 70 model.embedding_dim 2.0 +241 70 negative_sampler.num_negs_per_pos 99.0 +241 70 training.batch_size 2.0 +241 71 model.embedding_dim 1.0 +241 71 negative_sampler.num_negs_per_pos 77.0 +241 71 training.batch_size 2.0 +241 72 model.embedding_dim 0.0 +241 72 negative_sampler.num_negs_per_pos 33.0 +241 72 training.batch_size 1.0 +241 73 model.embedding_dim 0.0 +241 73 negative_sampler.num_negs_per_pos 17.0 +241 73 training.batch_size 2.0 +241 74 model.embedding_dim 2.0 +241 74 negative_sampler.num_negs_per_pos 2.0 +241 74 training.batch_size 0.0 +241 75 model.embedding_dim 1.0 +241 75 negative_sampler.num_negs_per_pos 53.0 +241 75 training.batch_size 0.0 +241 76 model.embedding_dim 2.0 +241 76 negative_sampler.num_negs_per_pos 16.0 +241 76 training.batch_size 2.0 +241 77 model.embedding_dim 0.0 +241 77 negative_sampler.num_negs_per_pos 12.0 +241 77 training.batch_size 1.0 +241 78 model.embedding_dim 2.0 +241 78 negative_sampler.num_negs_per_pos 94.0 +241 78 training.batch_size 1.0 +241 79 model.embedding_dim 0.0 +241 79 negative_sampler.num_negs_per_pos 64.0 +241 79 training.batch_size 0.0 +241 80 model.embedding_dim 0.0 +241 80 negative_sampler.num_negs_per_pos 51.0 +241 80 training.batch_size 0.0 +241 81 model.embedding_dim 1.0 +241 81 negative_sampler.num_negs_per_pos 67.0 +241 81 training.batch_size 0.0 +241 82 model.embedding_dim 0.0 +241 82 negative_sampler.num_negs_per_pos 0.0 +241 82 training.batch_size 0.0 +241 83 model.embedding_dim 1.0 +241 83 negative_sampler.num_negs_per_pos 58.0 +241 83 training.batch_size 0.0 +241 84 model.embedding_dim 0.0 +241 84 negative_sampler.num_negs_per_pos 9.0 +241 84 training.batch_size 1.0 +241 85 model.embedding_dim 1.0 +241 85 negative_sampler.num_negs_per_pos 64.0 +241 85 training.batch_size 2.0 +241 86 model.embedding_dim 1.0 +241 86 negative_sampler.num_negs_per_pos 58.0 +241 86 training.batch_size 2.0 +241 87 model.embedding_dim 2.0 +241 87 negative_sampler.num_negs_per_pos 36.0 +241 87 training.batch_size 2.0 +241 88 model.embedding_dim 0.0 +241 88 negative_sampler.num_negs_per_pos 90.0 +241 88 training.batch_size 2.0 +241 89 model.embedding_dim 2.0 +241 89 negative_sampler.num_negs_per_pos 56.0 +241 89 training.batch_size 2.0 +241 90 model.embedding_dim 0.0 +241 90 negative_sampler.num_negs_per_pos 12.0 +241 90 training.batch_size 2.0 +241 91 model.embedding_dim 2.0 +241 91 negative_sampler.num_negs_per_pos 67.0 +241 91 training.batch_size 1.0 +241 92 model.embedding_dim 0.0 +241 92 negative_sampler.num_negs_per_pos 41.0 +241 92 training.batch_size 2.0 +241 93 model.embedding_dim 0.0 +241 93 negative_sampler.num_negs_per_pos 21.0 +241 93 training.batch_size 0.0 +241 94 model.embedding_dim 1.0 +241 94 negative_sampler.num_negs_per_pos 6.0 +241 94 training.batch_size 1.0 +241 95 model.embedding_dim 1.0 +241 95 negative_sampler.num_negs_per_pos 23.0 +241 95 training.batch_size 1.0 +241 96 model.embedding_dim 0.0 +241 96 negative_sampler.num_negs_per_pos 75.0 +241 96 training.batch_size 0.0 +241 97 model.embedding_dim 1.0 +241 97 negative_sampler.num_negs_per_pos 5.0 +241 97 training.batch_size 0.0 +241 98 model.embedding_dim 0.0 +241 98 negative_sampler.num_negs_per_pos 67.0 +241 98 training.batch_size 2.0 +241 99 model.embedding_dim 1.0 +241 99 negative_sampler.num_negs_per_pos 91.0 +241 99 training.batch_size 2.0 +241 100 model.embedding_dim 1.0 +241 100 negative_sampler.num_negs_per_pos 73.0 +241 100 training.batch_size 2.0 +241 1 dataset """kinships""" +241 1 model """ermlp""" +241 1 loss """softplus""" +241 1 regularizer """no""" +241 1 optimizer """adadelta""" +241 1 training_loop """owa""" +241 1 negative_sampler """basic""" +241 1 evaluator """rankbased""" +241 2 dataset """kinships""" +241 2 model """ermlp""" +241 2 loss """softplus""" +241 2 regularizer """no""" +241 2 optimizer """adadelta""" +241 2 training_loop """owa""" +241 2 negative_sampler """basic""" +241 2 evaluator """rankbased""" +241 3 dataset """kinships""" +241 3 model """ermlp""" +241 3 loss """softplus""" +241 3 regularizer """no""" +241 3 optimizer """adadelta""" +241 3 training_loop """owa""" +241 3 negative_sampler """basic""" +241 3 evaluator """rankbased""" +241 4 dataset """kinships""" +241 4 model """ermlp""" +241 4 loss """softplus""" +241 4 regularizer """no""" +241 4 optimizer """adadelta""" +241 4 training_loop """owa""" +241 4 negative_sampler """basic""" +241 4 evaluator """rankbased""" +241 5 dataset """kinships""" +241 5 model """ermlp""" +241 5 loss """softplus""" +241 5 regularizer """no""" +241 5 optimizer """adadelta""" +241 5 training_loop """owa""" +241 5 negative_sampler """basic""" +241 5 evaluator """rankbased""" +241 6 dataset """kinships""" +241 6 model """ermlp""" +241 6 loss """softplus""" +241 6 regularizer """no""" +241 6 optimizer """adadelta""" +241 6 training_loop """owa""" +241 6 negative_sampler """basic""" +241 6 evaluator """rankbased""" +241 7 dataset """kinships""" +241 7 model """ermlp""" +241 7 loss """softplus""" +241 7 regularizer """no""" +241 7 optimizer """adadelta""" +241 7 training_loop """owa""" +241 7 negative_sampler """basic""" +241 7 evaluator """rankbased""" +241 8 dataset """kinships""" +241 8 model """ermlp""" +241 8 loss """softplus""" +241 8 regularizer """no""" +241 8 optimizer """adadelta""" +241 8 training_loop """owa""" +241 8 negative_sampler """basic""" +241 8 evaluator """rankbased""" +241 9 dataset """kinships""" +241 9 model """ermlp""" +241 9 loss """softplus""" +241 9 regularizer """no""" +241 9 optimizer """adadelta""" +241 9 training_loop """owa""" +241 9 negative_sampler """basic""" +241 9 evaluator """rankbased""" +241 10 dataset """kinships""" +241 10 model """ermlp""" +241 10 loss """softplus""" +241 10 regularizer """no""" +241 10 optimizer """adadelta""" +241 10 training_loop """owa""" +241 10 negative_sampler """basic""" +241 10 evaluator """rankbased""" +241 11 dataset """kinships""" +241 11 model """ermlp""" +241 11 loss """softplus""" +241 11 regularizer """no""" +241 11 optimizer """adadelta""" +241 11 training_loop """owa""" +241 11 negative_sampler """basic""" +241 11 evaluator """rankbased""" +241 12 dataset """kinships""" +241 12 model """ermlp""" +241 12 loss """softplus""" +241 12 regularizer """no""" +241 12 optimizer """adadelta""" +241 12 training_loop """owa""" +241 12 negative_sampler """basic""" +241 12 evaluator """rankbased""" +241 13 dataset """kinships""" +241 13 model """ermlp""" +241 13 loss """softplus""" +241 13 regularizer """no""" +241 13 optimizer """adadelta""" +241 13 training_loop """owa""" +241 13 negative_sampler """basic""" +241 13 evaluator """rankbased""" +241 14 dataset """kinships""" +241 14 model """ermlp""" +241 14 loss """softplus""" +241 14 regularizer """no""" +241 14 optimizer """adadelta""" +241 14 training_loop """owa""" +241 14 negative_sampler """basic""" +241 14 evaluator """rankbased""" +241 15 dataset """kinships""" +241 15 model """ermlp""" +241 15 loss """softplus""" +241 15 regularizer """no""" +241 15 optimizer """adadelta""" +241 15 training_loop """owa""" +241 15 negative_sampler """basic""" +241 15 evaluator """rankbased""" +241 16 dataset """kinships""" +241 16 model """ermlp""" +241 16 loss """softplus""" +241 16 regularizer """no""" +241 16 optimizer """adadelta""" +241 16 training_loop """owa""" +241 16 negative_sampler """basic""" +241 16 evaluator """rankbased""" +241 17 dataset """kinships""" +241 17 model """ermlp""" +241 17 loss """softplus""" +241 17 regularizer """no""" +241 17 optimizer """adadelta""" +241 17 training_loop """owa""" +241 17 negative_sampler """basic""" +241 17 evaluator """rankbased""" +241 18 dataset """kinships""" +241 18 model """ermlp""" +241 18 loss """softplus""" +241 18 regularizer """no""" +241 18 optimizer """adadelta""" +241 18 training_loop """owa""" +241 18 negative_sampler """basic""" +241 18 evaluator """rankbased""" +241 19 dataset """kinships""" +241 19 model """ermlp""" +241 19 loss """softplus""" +241 19 regularizer """no""" +241 19 optimizer """adadelta""" +241 19 training_loop """owa""" +241 19 negative_sampler """basic""" +241 19 evaluator """rankbased""" +241 20 dataset """kinships""" +241 20 model """ermlp""" +241 20 loss """softplus""" +241 20 regularizer """no""" +241 20 optimizer """adadelta""" +241 20 training_loop """owa""" +241 20 negative_sampler """basic""" +241 20 evaluator """rankbased""" +241 21 dataset """kinships""" +241 21 model """ermlp""" +241 21 loss """softplus""" +241 21 regularizer """no""" +241 21 optimizer """adadelta""" +241 21 training_loop """owa""" +241 21 negative_sampler """basic""" +241 21 evaluator """rankbased""" +241 22 dataset """kinships""" +241 22 model """ermlp""" +241 22 loss """softplus""" +241 22 regularizer """no""" +241 22 optimizer """adadelta""" +241 22 training_loop """owa""" +241 22 negative_sampler """basic""" +241 22 evaluator """rankbased""" +241 23 dataset """kinships""" +241 23 model """ermlp""" +241 23 loss """softplus""" +241 23 regularizer """no""" +241 23 optimizer """adadelta""" +241 23 training_loop """owa""" +241 23 negative_sampler """basic""" +241 23 evaluator """rankbased""" +241 24 dataset """kinships""" +241 24 model """ermlp""" +241 24 loss """softplus""" +241 24 regularizer """no""" +241 24 optimizer """adadelta""" +241 24 training_loop """owa""" +241 24 negative_sampler """basic""" +241 24 evaluator """rankbased""" +241 25 dataset """kinships""" +241 25 model """ermlp""" +241 25 loss """softplus""" +241 25 regularizer """no""" +241 25 optimizer """adadelta""" +241 25 training_loop """owa""" +241 25 negative_sampler """basic""" +241 25 evaluator """rankbased""" +241 26 dataset """kinships""" +241 26 model """ermlp""" +241 26 loss """softplus""" +241 26 regularizer """no""" +241 26 optimizer """adadelta""" +241 26 training_loop """owa""" +241 26 negative_sampler """basic""" +241 26 evaluator """rankbased""" +241 27 dataset """kinships""" +241 27 model """ermlp""" +241 27 loss """softplus""" +241 27 regularizer """no""" +241 27 optimizer """adadelta""" +241 27 training_loop """owa""" +241 27 negative_sampler """basic""" +241 27 evaluator """rankbased""" +241 28 dataset """kinships""" +241 28 model """ermlp""" +241 28 loss """softplus""" +241 28 regularizer """no""" +241 28 optimizer """adadelta""" +241 28 training_loop """owa""" +241 28 negative_sampler """basic""" +241 28 evaluator """rankbased""" +241 29 dataset """kinships""" +241 29 model """ermlp""" +241 29 loss """softplus""" +241 29 regularizer """no""" +241 29 optimizer """adadelta""" +241 29 training_loop """owa""" +241 29 negative_sampler """basic""" +241 29 evaluator """rankbased""" +241 30 dataset """kinships""" +241 30 model """ermlp""" +241 30 loss """softplus""" +241 30 regularizer """no""" +241 30 optimizer """adadelta""" +241 30 training_loop """owa""" +241 30 negative_sampler """basic""" +241 30 evaluator """rankbased""" +241 31 dataset """kinships""" +241 31 model """ermlp""" +241 31 loss """softplus""" +241 31 regularizer """no""" +241 31 optimizer """adadelta""" +241 31 training_loop """owa""" +241 31 negative_sampler """basic""" +241 31 evaluator """rankbased""" +241 32 dataset """kinships""" +241 32 model """ermlp""" +241 32 loss """softplus""" +241 32 regularizer """no""" +241 32 optimizer """adadelta""" +241 32 training_loop """owa""" +241 32 negative_sampler """basic""" +241 32 evaluator """rankbased""" +241 33 dataset """kinships""" +241 33 model """ermlp""" +241 33 loss """softplus""" +241 33 regularizer """no""" +241 33 optimizer """adadelta""" +241 33 training_loop """owa""" +241 33 negative_sampler """basic""" +241 33 evaluator """rankbased""" +241 34 dataset """kinships""" +241 34 model """ermlp""" +241 34 loss """softplus""" +241 34 regularizer """no""" +241 34 optimizer """adadelta""" +241 34 training_loop """owa""" +241 34 negative_sampler """basic""" +241 34 evaluator """rankbased""" +241 35 dataset """kinships""" +241 35 model """ermlp""" +241 35 loss """softplus""" +241 35 regularizer """no""" +241 35 optimizer """adadelta""" +241 35 training_loop """owa""" +241 35 negative_sampler """basic""" +241 35 evaluator """rankbased""" +241 36 dataset """kinships""" +241 36 model """ermlp""" +241 36 loss """softplus""" +241 36 regularizer """no""" +241 36 optimizer """adadelta""" +241 36 training_loop """owa""" +241 36 negative_sampler """basic""" +241 36 evaluator """rankbased""" +241 37 dataset """kinships""" +241 37 model """ermlp""" +241 37 loss """softplus""" +241 37 regularizer """no""" +241 37 optimizer """adadelta""" +241 37 training_loop """owa""" +241 37 negative_sampler """basic""" +241 37 evaluator """rankbased""" +241 38 dataset """kinships""" +241 38 model """ermlp""" +241 38 loss """softplus""" +241 38 regularizer """no""" +241 38 optimizer """adadelta""" +241 38 training_loop """owa""" +241 38 negative_sampler """basic""" +241 38 evaluator """rankbased""" +241 39 dataset """kinships""" +241 39 model """ermlp""" +241 39 loss """softplus""" +241 39 regularizer """no""" +241 39 optimizer """adadelta""" +241 39 training_loop """owa""" +241 39 negative_sampler """basic""" +241 39 evaluator """rankbased""" +241 40 dataset """kinships""" +241 40 model """ermlp""" +241 40 loss """softplus""" +241 40 regularizer """no""" +241 40 optimizer """adadelta""" +241 40 training_loop """owa""" +241 40 negative_sampler """basic""" +241 40 evaluator """rankbased""" +241 41 dataset """kinships""" +241 41 model """ermlp""" +241 41 loss """softplus""" +241 41 regularizer """no""" +241 41 optimizer """adadelta""" +241 41 training_loop """owa""" +241 41 negative_sampler """basic""" +241 41 evaluator """rankbased""" +241 42 dataset """kinships""" +241 42 model """ermlp""" +241 42 loss """softplus""" +241 42 regularizer """no""" +241 42 optimizer """adadelta""" +241 42 training_loop """owa""" +241 42 negative_sampler """basic""" +241 42 evaluator """rankbased""" +241 43 dataset """kinships""" +241 43 model """ermlp""" +241 43 loss """softplus""" +241 43 regularizer """no""" +241 43 optimizer """adadelta""" +241 43 training_loop """owa""" +241 43 negative_sampler """basic""" +241 43 evaluator """rankbased""" +241 44 dataset """kinships""" +241 44 model """ermlp""" +241 44 loss """softplus""" +241 44 regularizer """no""" +241 44 optimizer """adadelta""" +241 44 training_loop """owa""" +241 44 negative_sampler """basic""" +241 44 evaluator """rankbased""" +241 45 dataset """kinships""" +241 45 model """ermlp""" +241 45 loss """softplus""" +241 45 regularizer """no""" +241 45 optimizer """adadelta""" +241 45 training_loop """owa""" +241 45 negative_sampler """basic""" +241 45 evaluator """rankbased""" +241 46 dataset """kinships""" +241 46 model """ermlp""" +241 46 loss """softplus""" +241 46 regularizer """no""" +241 46 optimizer """adadelta""" +241 46 training_loop """owa""" +241 46 negative_sampler """basic""" +241 46 evaluator """rankbased""" +241 47 dataset """kinships""" +241 47 model """ermlp""" +241 47 loss """softplus""" +241 47 regularizer """no""" +241 47 optimizer """adadelta""" +241 47 training_loop """owa""" +241 47 negative_sampler """basic""" +241 47 evaluator """rankbased""" +241 48 dataset """kinships""" +241 48 model """ermlp""" +241 48 loss """softplus""" +241 48 regularizer """no""" +241 48 optimizer """adadelta""" +241 48 training_loop """owa""" +241 48 negative_sampler """basic""" +241 48 evaluator """rankbased""" +241 49 dataset """kinships""" +241 49 model """ermlp""" +241 49 loss """softplus""" +241 49 regularizer """no""" +241 49 optimizer """adadelta""" +241 49 training_loop """owa""" +241 49 negative_sampler """basic""" +241 49 evaluator """rankbased""" +241 50 dataset """kinships""" +241 50 model """ermlp""" +241 50 loss """softplus""" +241 50 regularizer """no""" +241 50 optimizer """adadelta""" +241 50 training_loop """owa""" +241 50 negative_sampler """basic""" +241 50 evaluator """rankbased""" +241 51 dataset """kinships""" +241 51 model """ermlp""" +241 51 loss """softplus""" +241 51 regularizer """no""" +241 51 optimizer """adadelta""" +241 51 training_loop """owa""" +241 51 negative_sampler """basic""" +241 51 evaluator """rankbased""" +241 52 dataset """kinships""" +241 52 model """ermlp""" +241 52 loss """softplus""" +241 52 regularizer """no""" +241 52 optimizer """adadelta""" +241 52 training_loop """owa""" +241 52 negative_sampler """basic""" +241 52 evaluator """rankbased""" +241 53 dataset """kinships""" +241 53 model """ermlp""" +241 53 loss """softplus""" +241 53 regularizer """no""" +241 53 optimizer """adadelta""" +241 53 training_loop """owa""" +241 53 negative_sampler """basic""" +241 53 evaluator """rankbased""" +241 54 dataset """kinships""" +241 54 model """ermlp""" +241 54 loss """softplus""" +241 54 regularizer """no""" +241 54 optimizer """adadelta""" +241 54 training_loop """owa""" +241 54 negative_sampler """basic""" +241 54 evaluator """rankbased""" +241 55 dataset """kinships""" +241 55 model """ermlp""" +241 55 loss """softplus""" +241 55 regularizer """no""" +241 55 optimizer """adadelta""" +241 55 training_loop """owa""" +241 55 negative_sampler """basic""" +241 55 evaluator """rankbased""" +241 56 dataset """kinships""" +241 56 model """ermlp""" +241 56 loss """softplus""" +241 56 regularizer """no""" +241 56 optimizer """adadelta""" +241 56 training_loop """owa""" +241 56 negative_sampler """basic""" +241 56 evaluator """rankbased""" +241 57 dataset """kinships""" +241 57 model """ermlp""" +241 57 loss """softplus""" +241 57 regularizer """no""" +241 57 optimizer """adadelta""" +241 57 training_loop """owa""" +241 57 negative_sampler """basic""" +241 57 evaluator """rankbased""" +241 58 dataset """kinships""" +241 58 model """ermlp""" +241 58 loss """softplus""" +241 58 regularizer """no""" +241 58 optimizer """adadelta""" +241 58 training_loop """owa""" +241 58 negative_sampler """basic""" +241 58 evaluator """rankbased""" +241 59 dataset """kinships""" +241 59 model """ermlp""" +241 59 loss """softplus""" +241 59 regularizer """no""" +241 59 optimizer """adadelta""" +241 59 training_loop """owa""" +241 59 negative_sampler """basic""" +241 59 evaluator """rankbased""" +241 60 dataset """kinships""" +241 60 model """ermlp""" +241 60 loss """softplus""" +241 60 regularizer """no""" +241 60 optimizer """adadelta""" +241 60 training_loop """owa""" +241 60 negative_sampler """basic""" +241 60 evaluator """rankbased""" +241 61 dataset """kinships""" +241 61 model """ermlp""" +241 61 loss """softplus""" +241 61 regularizer """no""" +241 61 optimizer """adadelta""" +241 61 training_loop """owa""" +241 61 negative_sampler """basic""" +241 61 evaluator """rankbased""" +241 62 dataset """kinships""" +241 62 model """ermlp""" +241 62 loss """softplus""" +241 62 regularizer """no""" +241 62 optimizer """adadelta""" +241 62 training_loop """owa""" +241 62 negative_sampler """basic""" +241 62 evaluator """rankbased""" +241 63 dataset """kinships""" +241 63 model """ermlp""" +241 63 loss """softplus""" +241 63 regularizer """no""" +241 63 optimizer """adadelta""" +241 63 training_loop """owa""" +241 63 negative_sampler """basic""" +241 63 evaluator """rankbased""" +241 64 dataset """kinships""" +241 64 model """ermlp""" +241 64 loss """softplus""" +241 64 regularizer """no""" +241 64 optimizer """adadelta""" +241 64 training_loop """owa""" +241 64 negative_sampler """basic""" +241 64 evaluator """rankbased""" +241 65 dataset """kinships""" +241 65 model """ermlp""" +241 65 loss """softplus""" +241 65 regularizer """no""" +241 65 optimizer """adadelta""" +241 65 training_loop """owa""" +241 65 negative_sampler """basic""" +241 65 evaluator """rankbased""" +241 66 dataset """kinships""" +241 66 model """ermlp""" +241 66 loss """softplus""" +241 66 regularizer """no""" +241 66 optimizer """adadelta""" +241 66 training_loop """owa""" +241 66 negative_sampler """basic""" +241 66 evaluator """rankbased""" +241 67 dataset """kinships""" +241 67 model """ermlp""" +241 67 loss """softplus""" +241 67 regularizer """no""" +241 67 optimizer """adadelta""" +241 67 training_loop """owa""" +241 67 negative_sampler """basic""" +241 67 evaluator """rankbased""" +241 68 dataset """kinships""" +241 68 model """ermlp""" +241 68 loss """softplus""" +241 68 regularizer """no""" +241 68 optimizer """adadelta""" +241 68 training_loop """owa""" +241 68 negative_sampler """basic""" +241 68 evaluator """rankbased""" +241 69 dataset """kinships""" +241 69 model """ermlp""" +241 69 loss """softplus""" +241 69 regularizer """no""" +241 69 optimizer """adadelta""" +241 69 training_loop """owa""" +241 69 negative_sampler """basic""" +241 69 evaluator """rankbased""" +241 70 dataset """kinships""" +241 70 model """ermlp""" +241 70 loss """softplus""" +241 70 regularizer """no""" +241 70 optimizer """adadelta""" +241 70 training_loop """owa""" +241 70 negative_sampler """basic""" +241 70 evaluator """rankbased""" +241 71 dataset """kinships""" +241 71 model """ermlp""" +241 71 loss """softplus""" +241 71 regularizer """no""" +241 71 optimizer """adadelta""" +241 71 training_loop """owa""" +241 71 negative_sampler """basic""" +241 71 evaluator """rankbased""" +241 72 dataset """kinships""" +241 72 model """ermlp""" +241 72 loss """softplus""" +241 72 regularizer """no""" +241 72 optimizer """adadelta""" +241 72 training_loop """owa""" +241 72 negative_sampler """basic""" +241 72 evaluator """rankbased""" +241 73 dataset """kinships""" +241 73 model """ermlp""" +241 73 loss """softplus""" +241 73 regularizer """no""" +241 73 optimizer """adadelta""" +241 73 training_loop """owa""" +241 73 negative_sampler """basic""" +241 73 evaluator """rankbased""" +241 74 dataset """kinships""" +241 74 model """ermlp""" +241 74 loss """softplus""" +241 74 regularizer """no""" +241 74 optimizer """adadelta""" +241 74 training_loop """owa""" +241 74 negative_sampler """basic""" +241 74 evaluator """rankbased""" +241 75 dataset """kinships""" +241 75 model """ermlp""" +241 75 loss """softplus""" +241 75 regularizer """no""" +241 75 optimizer """adadelta""" +241 75 training_loop """owa""" +241 75 negative_sampler """basic""" +241 75 evaluator """rankbased""" +241 76 dataset """kinships""" +241 76 model """ermlp""" +241 76 loss """softplus""" +241 76 regularizer """no""" +241 76 optimizer """adadelta""" +241 76 training_loop """owa""" +241 76 negative_sampler """basic""" +241 76 evaluator """rankbased""" +241 77 dataset """kinships""" +241 77 model """ermlp""" +241 77 loss """softplus""" +241 77 regularizer """no""" +241 77 optimizer """adadelta""" +241 77 training_loop """owa""" +241 77 negative_sampler """basic""" +241 77 evaluator """rankbased""" +241 78 dataset """kinships""" +241 78 model """ermlp""" +241 78 loss """softplus""" +241 78 regularizer """no""" +241 78 optimizer """adadelta""" +241 78 training_loop """owa""" +241 78 negative_sampler """basic""" +241 78 evaluator """rankbased""" +241 79 dataset """kinships""" +241 79 model """ermlp""" +241 79 loss """softplus""" +241 79 regularizer """no""" +241 79 optimizer """adadelta""" +241 79 training_loop """owa""" +241 79 negative_sampler """basic""" +241 79 evaluator """rankbased""" +241 80 dataset """kinships""" +241 80 model """ermlp""" +241 80 loss """softplus""" +241 80 regularizer """no""" +241 80 optimizer """adadelta""" +241 80 training_loop """owa""" +241 80 negative_sampler """basic""" +241 80 evaluator """rankbased""" +241 81 dataset """kinships""" +241 81 model """ermlp""" +241 81 loss """softplus""" +241 81 regularizer """no""" +241 81 optimizer """adadelta""" +241 81 training_loop """owa""" +241 81 negative_sampler """basic""" +241 81 evaluator """rankbased""" +241 82 dataset """kinships""" +241 82 model """ermlp""" +241 82 loss """softplus""" +241 82 regularizer """no""" +241 82 optimizer """adadelta""" +241 82 training_loop """owa""" +241 82 negative_sampler """basic""" +241 82 evaluator """rankbased""" +241 83 dataset """kinships""" +241 83 model """ermlp""" +241 83 loss """softplus""" +241 83 regularizer """no""" +241 83 optimizer """adadelta""" +241 83 training_loop """owa""" +241 83 negative_sampler """basic""" +241 83 evaluator """rankbased""" +241 84 dataset """kinships""" +241 84 model """ermlp""" +241 84 loss """softplus""" +241 84 regularizer """no""" +241 84 optimizer """adadelta""" +241 84 training_loop """owa""" +241 84 negative_sampler """basic""" +241 84 evaluator """rankbased""" +241 85 dataset """kinships""" +241 85 model """ermlp""" +241 85 loss """softplus""" +241 85 regularizer """no""" +241 85 optimizer """adadelta""" +241 85 training_loop """owa""" +241 85 negative_sampler """basic""" +241 85 evaluator """rankbased""" +241 86 dataset """kinships""" +241 86 model """ermlp""" +241 86 loss """softplus""" +241 86 regularizer """no""" +241 86 optimizer """adadelta""" +241 86 training_loop """owa""" +241 86 negative_sampler """basic""" +241 86 evaluator """rankbased""" +241 87 dataset """kinships""" +241 87 model """ermlp""" +241 87 loss """softplus""" +241 87 regularizer """no""" +241 87 optimizer """adadelta""" +241 87 training_loop """owa""" +241 87 negative_sampler """basic""" +241 87 evaluator """rankbased""" +241 88 dataset """kinships""" +241 88 model """ermlp""" +241 88 loss """softplus""" +241 88 regularizer """no""" +241 88 optimizer """adadelta""" +241 88 training_loop """owa""" +241 88 negative_sampler """basic""" +241 88 evaluator """rankbased""" +241 89 dataset """kinships""" +241 89 model """ermlp""" +241 89 loss """softplus""" +241 89 regularizer """no""" +241 89 optimizer """adadelta""" +241 89 training_loop """owa""" +241 89 negative_sampler """basic""" +241 89 evaluator """rankbased""" +241 90 dataset """kinships""" +241 90 model """ermlp""" +241 90 loss """softplus""" +241 90 regularizer """no""" +241 90 optimizer """adadelta""" +241 90 training_loop """owa""" +241 90 negative_sampler """basic""" +241 90 evaluator """rankbased""" +241 91 dataset """kinships""" +241 91 model """ermlp""" +241 91 loss """softplus""" +241 91 regularizer """no""" +241 91 optimizer """adadelta""" +241 91 training_loop """owa""" +241 91 negative_sampler """basic""" +241 91 evaluator """rankbased""" +241 92 dataset """kinships""" +241 92 model """ermlp""" +241 92 loss """softplus""" +241 92 regularizer """no""" +241 92 optimizer """adadelta""" +241 92 training_loop """owa""" +241 92 negative_sampler """basic""" +241 92 evaluator """rankbased""" +241 93 dataset """kinships""" +241 93 model """ermlp""" +241 93 loss """softplus""" +241 93 regularizer """no""" +241 93 optimizer """adadelta""" +241 93 training_loop """owa""" +241 93 negative_sampler """basic""" +241 93 evaluator """rankbased""" +241 94 dataset """kinships""" +241 94 model """ermlp""" +241 94 loss """softplus""" +241 94 regularizer """no""" +241 94 optimizer """adadelta""" +241 94 training_loop """owa""" +241 94 negative_sampler """basic""" +241 94 evaluator """rankbased""" +241 95 dataset """kinships""" +241 95 model """ermlp""" +241 95 loss """softplus""" +241 95 regularizer """no""" +241 95 optimizer """adadelta""" +241 95 training_loop """owa""" +241 95 negative_sampler """basic""" +241 95 evaluator """rankbased""" +241 96 dataset """kinships""" +241 96 model """ermlp""" +241 96 loss """softplus""" +241 96 regularizer """no""" +241 96 optimizer """adadelta""" +241 96 training_loop """owa""" +241 96 negative_sampler """basic""" +241 96 evaluator """rankbased""" +241 97 dataset """kinships""" +241 97 model """ermlp""" +241 97 loss """softplus""" +241 97 regularizer """no""" +241 97 optimizer """adadelta""" +241 97 training_loop """owa""" +241 97 negative_sampler """basic""" +241 97 evaluator """rankbased""" +241 98 dataset """kinships""" +241 98 model """ermlp""" +241 98 loss """softplus""" +241 98 regularizer """no""" +241 98 optimizer """adadelta""" +241 98 training_loop """owa""" +241 98 negative_sampler """basic""" +241 98 evaluator """rankbased""" +241 99 dataset """kinships""" +241 99 model """ermlp""" +241 99 loss """softplus""" +241 99 regularizer """no""" +241 99 optimizer """adadelta""" +241 99 training_loop """owa""" +241 99 negative_sampler """basic""" +241 99 evaluator """rankbased""" +241 100 dataset """kinships""" +241 100 model """ermlp""" +241 100 loss """softplus""" +241 100 regularizer """no""" +241 100 optimizer """adadelta""" +241 100 training_loop """owa""" +241 100 negative_sampler """basic""" +241 100 evaluator """rankbased""" +242 1 model.embedding_dim 1.0 +242 1 loss.margin 2.1828073784099296 +242 1 negative_sampler.num_negs_per_pos 38.0 +242 1 training.batch_size 0.0 +242 2 model.embedding_dim 1.0 +242 2 loss.margin 7.781184120812118 +242 2 negative_sampler.num_negs_per_pos 44.0 +242 2 training.batch_size 0.0 +242 3 model.embedding_dim 2.0 +242 3 loss.margin 9.32933928965419 +242 3 negative_sampler.num_negs_per_pos 62.0 +242 3 training.batch_size 2.0 +242 4 model.embedding_dim 2.0 +242 4 loss.margin 8.118531763755016 +242 4 negative_sampler.num_negs_per_pos 48.0 +242 4 training.batch_size 2.0 +242 5 model.embedding_dim 0.0 +242 5 loss.margin 7.367176920193337 +242 5 negative_sampler.num_negs_per_pos 50.0 +242 5 training.batch_size 0.0 +242 6 model.embedding_dim 2.0 +242 6 loss.margin 8.887766213097656 +242 6 negative_sampler.num_negs_per_pos 83.0 +242 6 training.batch_size 1.0 +242 7 model.embedding_dim 0.0 +242 7 loss.margin 2.362801547020367 +242 7 negative_sampler.num_negs_per_pos 56.0 +242 7 training.batch_size 0.0 +242 8 model.embedding_dim 0.0 +242 8 loss.margin 1.128823316539202 +242 8 negative_sampler.num_negs_per_pos 27.0 +242 8 training.batch_size 2.0 +242 9 model.embedding_dim 2.0 +242 9 loss.margin 0.9534334004969488 +242 9 negative_sampler.num_negs_per_pos 67.0 +242 9 training.batch_size 0.0 +242 10 model.embedding_dim 1.0 +242 10 loss.margin 3.8632792312265507 +242 10 negative_sampler.num_negs_per_pos 20.0 +242 10 training.batch_size 2.0 +242 11 model.embedding_dim 0.0 +242 11 loss.margin 5.983608100037568 +242 11 negative_sampler.num_negs_per_pos 11.0 +242 11 training.batch_size 2.0 +242 12 model.embedding_dim 2.0 +242 12 loss.margin 3.0047373584048516 +242 12 negative_sampler.num_negs_per_pos 44.0 +242 12 training.batch_size 1.0 +242 13 model.embedding_dim 0.0 +242 13 loss.margin 1.8424220368828188 +242 13 negative_sampler.num_negs_per_pos 2.0 +242 13 training.batch_size 0.0 +242 14 model.embedding_dim 1.0 +242 14 loss.margin 1.643341717377493 +242 14 negative_sampler.num_negs_per_pos 58.0 +242 14 training.batch_size 2.0 +242 15 model.embedding_dim 2.0 +242 15 loss.margin 2.7385493196176682 +242 15 negative_sampler.num_negs_per_pos 8.0 +242 15 training.batch_size 0.0 +242 16 model.embedding_dim 2.0 +242 16 loss.margin 5.882712117187956 +242 16 negative_sampler.num_negs_per_pos 80.0 +242 16 training.batch_size 0.0 +242 17 model.embedding_dim 2.0 +242 17 loss.margin 8.652690189121428 +242 17 negative_sampler.num_negs_per_pos 25.0 +242 17 training.batch_size 0.0 +242 18 model.embedding_dim 0.0 +242 18 loss.margin 3.1867858127362636 +242 18 negative_sampler.num_negs_per_pos 18.0 +242 18 training.batch_size 1.0 +242 19 model.embedding_dim 1.0 +242 19 loss.margin 9.22990257360172 +242 19 negative_sampler.num_negs_per_pos 51.0 +242 19 training.batch_size 1.0 +242 20 model.embedding_dim 2.0 +242 20 loss.margin 5.002977837069355 +242 20 negative_sampler.num_negs_per_pos 62.0 +242 20 training.batch_size 2.0 +242 21 model.embedding_dim 2.0 +242 21 loss.margin 1.7461714487394802 +242 21 negative_sampler.num_negs_per_pos 69.0 +242 21 training.batch_size 0.0 +242 22 model.embedding_dim 0.0 +242 22 loss.margin 0.917413747122908 +242 22 negative_sampler.num_negs_per_pos 37.0 +242 22 training.batch_size 2.0 +242 23 model.embedding_dim 1.0 +242 23 loss.margin 8.290974784506009 +242 23 negative_sampler.num_negs_per_pos 70.0 +242 23 training.batch_size 2.0 +242 24 model.embedding_dim 0.0 +242 24 loss.margin 1.0190588564859526 +242 24 negative_sampler.num_negs_per_pos 51.0 +242 24 training.batch_size 0.0 +242 25 model.embedding_dim 2.0 +242 25 loss.margin 2.8945646085498913 +242 25 negative_sampler.num_negs_per_pos 78.0 +242 25 training.batch_size 1.0 +242 26 model.embedding_dim 0.0 +242 26 loss.margin 9.046542063657276 +242 26 negative_sampler.num_negs_per_pos 30.0 +242 26 training.batch_size 0.0 +242 27 model.embedding_dim 0.0 +242 27 loss.margin 3.4759773767870725 +242 27 negative_sampler.num_negs_per_pos 30.0 +242 27 training.batch_size 1.0 +242 28 model.embedding_dim 2.0 +242 28 loss.margin 1.1624578142859143 +242 28 negative_sampler.num_negs_per_pos 34.0 +242 28 training.batch_size 0.0 +242 29 model.embedding_dim 0.0 +242 29 loss.margin 2.395948541639913 +242 29 negative_sampler.num_negs_per_pos 10.0 +242 29 training.batch_size 2.0 +242 30 model.embedding_dim 0.0 +242 30 loss.margin 3.523036188359759 +242 30 negative_sampler.num_negs_per_pos 95.0 +242 30 training.batch_size 2.0 +242 31 model.embedding_dim 1.0 +242 31 loss.margin 1.193081160650051 +242 31 negative_sampler.num_negs_per_pos 31.0 +242 31 training.batch_size 0.0 +242 32 model.embedding_dim 0.0 +242 32 loss.margin 3.3465915570457208 +242 32 negative_sampler.num_negs_per_pos 62.0 +242 32 training.batch_size 0.0 +242 33 model.embedding_dim 0.0 +242 33 loss.margin 8.124716511567412 +242 33 negative_sampler.num_negs_per_pos 85.0 +242 33 training.batch_size 1.0 +242 34 model.embedding_dim 2.0 +242 34 loss.margin 6.512033772314285 +242 34 negative_sampler.num_negs_per_pos 45.0 +242 34 training.batch_size 1.0 +242 35 model.embedding_dim 2.0 +242 35 loss.margin 8.311738488306741 +242 35 negative_sampler.num_negs_per_pos 22.0 +242 35 training.batch_size 0.0 +242 36 model.embedding_dim 1.0 +242 36 loss.margin 4.967967129134455 +242 36 negative_sampler.num_negs_per_pos 83.0 +242 36 training.batch_size 0.0 +242 37 model.embedding_dim 2.0 +242 37 loss.margin 4.145265689672591 +242 37 negative_sampler.num_negs_per_pos 59.0 +242 37 training.batch_size 2.0 +242 38 model.embedding_dim 2.0 +242 38 loss.margin 6.986222167278158 +242 38 negative_sampler.num_negs_per_pos 20.0 +242 38 training.batch_size 2.0 +242 39 model.embedding_dim 2.0 +242 39 loss.margin 3.3048377696161593 +242 39 negative_sampler.num_negs_per_pos 70.0 +242 39 training.batch_size 1.0 +242 40 model.embedding_dim 0.0 +242 40 loss.margin 2.759233670567728 +242 40 negative_sampler.num_negs_per_pos 50.0 +242 40 training.batch_size 2.0 +242 41 model.embedding_dim 1.0 +242 41 loss.margin 6.042478315078349 +242 41 negative_sampler.num_negs_per_pos 84.0 +242 41 training.batch_size 1.0 +242 42 model.embedding_dim 0.0 +242 42 loss.margin 6.959713631862199 +242 42 negative_sampler.num_negs_per_pos 65.0 +242 42 training.batch_size 0.0 +242 43 model.embedding_dim 0.0 +242 43 loss.margin 5.041569485209053 +242 43 negative_sampler.num_negs_per_pos 46.0 +242 43 training.batch_size 2.0 +242 44 model.embedding_dim 2.0 +242 44 loss.margin 3.5190896509652725 +242 44 negative_sampler.num_negs_per_pos 58.0 +242 44 training.batch_size 0.0 +242 45 model.embedding_dim 1.0 +242 45 loss.margin 0.5981567092492676 +242 45 negative_sampler.num_negs_per_pos 56.0 +242 45 training.batch_size 2.0 +242 46 model.embedding_dim 0.0 +242 46 loss.margin 5.753422188930381 +242 46 negative_sampler.num_negs_per_pos 45.0 +242 46 training.batch_size 2.0 +242 47 model.embedding_dim 0.0 +242 47 loss.margin 7.622184915620997 +242 47 negative_sampler.num_negs_per_pos 84.0 +242 47 training.batch_size 1.0 +242 48 model.embedding_dim 0.0 +242 48 loss.margin 1.019207587597537 +242 48 negative_sampler.num_negs_per_pos 80.0 +242 48 training.batch_size 1.0 +242 49 model.embedding_dim 2.0 +242 49 loss.margin 5.313548723249755 +242 49 negative_sampler.num_negs_per_pos 30.0 +242 49 training.batch_size 0.0 +242 50 model.embedding_dim 2.0 +242 50 loss.margin 4.768797579145565 +242 50 negative_sampler.num_negs_per_pos 92.0 +242 50 training.batch_size 0.0 +242 51 model.embedding_dim 0.0 +242 51 loss.margin 6.158960131778468 +242 51 negative_sampler.num_negs_per_pos 41.0 +242 51 training.batch_size 1.0 +242 52 model.embedding_dim 0.0 +242 52 loss.margin 8.342328134279764 +242 52 negative_sampler.num_negs_per_pos 73.0 +242 52 training.batch_size 2.0 +242 53 model.embedding_dim 1.0 +242 53 loss.margin 9.555495884375839 +242 53 negative_sampler.num_negs_per_pos 62.0 +242 53 training.batch_size 0.0 +242 54 model.embedding_dim 0.0 +242 54 loss.margin 3.230264031374141 +242 54 negative_sampler.num_negs_per_pos 0.0 +242 54 training.batch_size 1.0 +242 55 model.embedding_dim 1.0 +242 55 loss.margin 2.667426377995051 +242 55 negative_sampler.num_negs_per_pos 29.0 +242 55 training.batch_size 1.0 +242 56 model.embedding_dim 1.0 +242 56 loss.margin 8.16590340273458 +242 56 negative_sampler.num_negs_per_pos 61.0 +242 56 training.batch_size 2.0 +242 57 model.embedding_dim 2.0 +242 57 loss.margin 7.224897526850564 +242 57 negative_sampler.num_negs_per_pos 94.0 +242 57 training.batch_size 2.0 +242 58 model.embedding_dim 2.0 +242 58 loss.margin 8.021580842658743 +242 58 negative_sampler.num_negs_per_pos 65.0 +242 58 training.batch_size 1.0 +242 59 model.embedding_dim 2.0 +242 59 loss.margin 9.81396476619683 +242 59 negative_sampler.num_negs_per_pos 16.0 +242 59 training.batch_size 0.0 +242 60 model.embedding_dim 2.0 +242 60 loss.margin 1.8616204998233958 +242 60 negative_sampler.num_negs_per_pos 31.0 +242 60 training.batch_size 1.0 +242 61 model.embedding_dim 2.0 +242 61 loss.margin 7.4803664274457615 +242 61 negative_sampler.num_negs_per_pos 17.0 +242 61 training.batch_size 1.0 +242 62 model.embedding_dim 0.0 +242 62 loss.margin 5.089536910487901 +242 62 negative_sampler.num_negs_per_pos 38.0 +242 62 training.batch_size 2.0 +242 63 model.embedding_dim 2.0 +242 63 loss.margin 6.733231636344543 +242 63 negative_sampler.num_negs_per_pos 79.0 +242 63 training.batch_size 0.0 +242 64 model.embedding_dim 0.0 +242 64 loss.margin 4.739019728681779 +242 64 negative_sampler.num_negs_per_pos 9.0 +242 64 training.batch_size 2.0 +242 65 model.embedding_dim 1.0 +242 65 loss.margin 2.40964776432274 +242 65 negative_sampler.num_negs_per_pos 85.0 +242 65 training.batch_size 2.0 +242 66 model.embedding_dim 0.0 +242 66 loss.margin 5.072799940336064 +242 66 negative_sampler.num_negs_per_pos 53.0 +242 66 training.batch_size 2.0 +242 67 model.embedding_dim 0.0 +242 67 loss.margin 5.916783773687561 +242 67 negative_sampler.num_negs_per_pos 32.0 +242 67 training.batch_size 2.0 +242 68 model.embedding_dim 0.0 +242 68 loss.margin 1.8980492123498651 +242 68 negative_sampler.num_negs_per_pos 20.0 +242 68 training.batch_size 1.0 +242 69 model.embedding_dim 1.0 +242 69 loss.margin 3.777758359439304 +242 69 negative_sampler.num_negs_per_pos 48.0 +242 69 training.batch_size 2.0 +242 70 model.embedding_dim 1.0 +242 70 loss.margin 3.038299782668391 +242 70 negative_sampler.num_negs_per_pos 30.0 +242 70 training.batch_size 0.0 +242 71 model.embedding_dim 1.0 +242 71 loss.margin 9.535582389605572 +242 71 negative_sampler.num_negs_per_pos 54.0 +242 71 training.batch_size 0.0 +242 72 model.embedding_dim 2.0 +242 72 loss.margin 9.871376651757023 +242 72 negative_sampler.num_negs_per_pos 14.0 +242 72 training.batch_size 0.0 +242 73 model.embedding_dim 0.0 +242 73 loss.margin 4.960855944812656 +242 73 negative_sampler.num_negs_per_pos 62.0 +242 73 training.batch_size 1.0 +242 74 model.embedding_dim 0.0 +242 74 loss.margin 7.751790621420366 +242 74 negative_sampler.num_negs_per_pos 42.0 +242 74 training.batch_size 1.0 +242 75 model.embedding_dim 2.0 +242 75 loss.margin 1.8186981093464876 +242 75 negative_sampler.num_negs_per_pos 38.0 +242 75 training.batch_size 2.0 +242 76 model.embedding_dim 2.0 +242 76 loss.margin 3.316327040658359 +242 76 negative_sampler.num_negs_per_pos 46.0 +242 76 training.batch_size 2.0 +242 77 model.embedding_dim 2.0 +242 77 loss.margin 7.315464731163237 +242 77 negative_sampler.num_negs_per_pos 62.0 +242 77 training.batch_size 2.0 +242 78 model.embedding_dim 2.0 +242 78 loss.margin 7.415899972867793 +242 78 negative_sampler.num_negs_per_pos 52.0 +242 78 training.batch_size 0.0 +242 79 model.embedding_dim 0.0 +242 79 loss.margin 6.747883820799574 +242 79 negative_sampler.num_negs_per_pos 92.0 +242 79 training.batch_size 0.0 +242 80 model.embedding_dim 2.0 +242 80 loss.margin 7.23008557824945 +242 80 negative_sampler.num_negs_per_pos 49.0 +242 80 training.batch_size 0.0 +242 81 model.embedding_dim 2.0 +242 81 loss.margin 5.544385393445304 +242 81 negative_sampler.num_negs_per_pos 20.0 +242 81 training.batch_size 1.0 +242 82 model.embedding_dim 1.0 +242 82 loss.margin 3.708256201107913 +242 82 negative_sampler.num_negs_per_pos 77.0 +242 82 training.batch_size 1.0 +242 83 model.embedding_dim 2.0 +242 83 loss.margin 5.60547144137892 +242 83 negative_sampler.num_negs_per_pos 71.0 +242 83 training.batch_size 1.0 +242 84 model.embedding_dim 1.0 +242 84 loss.margin 9.132753578590767 +242 84 negative_sampler.num_negs_per_pos 34.0 +242 84 training.batch_size 2.0 +242 85 model.embedding_dim 1.0 +242 85 loss.margin 9.345724704072923 +242 85 negative_sampler.num_negs_per_pos 45.0 +242 85 training.batch_size 1.0 +242 86 model.embedding_dim 1.0 +242 86 loss.margin 6.518856236156077 +242 86 negative_sampler.num_negs_per_pos 74.0 +242 86 training.batch_size 2.0 +242 87 model.embedding_dim 1.0 +242 87 loss.margin 0.9227373758134827 +242 87 negative_sampler.num_negs_per_pos 2.0 +242 87 training.batch_size 1.0 +242 88 model.embedding_dim 0.0 +242 88 loss.margin 9.491045121028376 +242 88 negative_sampler.num_negs_per_pos 60.0 +242 88 training.batch_size 0.0 +242 89 model.embedding_dim 2.0 +242 89 loss.margin 5.8050461671833045 +242 89 negative_sampler.num_negs_per_pos 57.0 +242 89 training.batch_size 1.0 +242 90 model.embedding_dim 2.0 +242 90 loss.margin 7.272289355274397 +242 90 negative_sampler.num_negs_per_pos 94.0 +242 90 training.batch_size 2.0 +242 91 model.embedding_dim 2.0 +242 91 loss.margin 7.055503936752198 +242 91 negative_sampler.num_negs_per_pos 96.0 +242 91 training.batch_size 2.0 +242 92 model.embedding_dim 1.0 +242 92 loss.margin 6.564713307014154 +242 92 negative_sampler.num_negs_per_pos 3.0 +242 92 training.batch_size 1.0 +242 93 model.embedding_dim 1.0 +242 93 loss.margin 1.2788771313384957 +242 93 negative_sampler.num_negs_per_pos 72.0 +242 93 training.batch_size 2.0 +242 94 model.embedding_dim 2.0 +242 94 loss.margin 8.72016408956612 +242 94 negative_sampler.num_negs_per_pos 29.0 +242 94 training.batch_size 0.0 +242 95 model.embedding_dim 0.0 +242 95 loss.margin 0.5449234526509608 +242 95 negative_sampler.num_negs_per_pos 27.0 +242 95 training.batch_size 1.0 +242 96 model.embedding_dim 2.0 +242 96 loss.margin 9.336384690970625 +242 96 negative_sampler.num_negs_per_pos 38.0 +242 96 training.batch_size 0.0 +242 97 model.embedding_dim 2.0 +242 97 loss.margin 7.293079247112784 +242 97 negative_sampler.num_negs_per_pos 77.0 +242 97 training.batch_size 2.0 +242 98 model.embedding_dim 1.0 +242 98 loss.margin 8.781271447288708 +242 98 negative_sampler.num_negs_per_pos 42.0 +242 98 training.batch_size 1.0 +242 99 model.embedding_dim 0.0 +242 99 loss.margin 0.6657115666238992 +242 99 negative_sampler.num_negs_per_pos 22.0 +242 99 training.batch_size 0.0 +242 100 model.embedding_dim 0.0 +242 100 loss.margin 9.746452579445538 +242 100 negative_sampler.num_negs_per_pos 29.0 +242 100 training.batch_size 2.0 +242 1 dataset """kinships""" +242 1 model """ermlp""" +242 1 loss """marginranking""" +242 1 regularizer """no""" +242 1 optimizer """adadelta""" +242 1 training_loop """owa""" +242 1 negative_sampler """basic""" +242 1 evaluator """rankbased""" +242 2 dataset """kinships""" +242 2 model """ermlp""" +242 2 loss """marginranking""" +242 2 regularizer """no""" +242 2 optimizer """adadelta""" +242 2 training_loop """owa""" +242 2 negative_sampler """basic""" +242 2 evaluator """rankbased""" +242 3 dataset """kinships""" +242 3 model """ermlp""" +242 3 loss """marginranking""" +242 3 regularizer """no""" +242 3 optimizer """adadelta""" +242 3 training_loop """owa""" +242 3 negative_sampler """basic""" +242 3 evaluator """rankbased""" +242 4 dataset """kinships""" +242 4 model """ermlp""" +242 4 loss """marginranking""" +242 4 regularizer """no""" +242 4 optimizer """adadelta""" +242 4 training_loop """owa""" +242 4 negative_sampler """basic""" +242 4 evaluator """rankbased""" +242 5 dataset """kinships""" +242 5 model """ermlp""" +242 5 loss """marginranking""" +242 5 regularizer """no""" +242 5 optimizer """adadelta""" +242 5 training_loop """owa""" +242 5 negative_sampler """basic""" +242 5 evaluator """rankbased""" +242 6 dataset """kinships""" +242 6 model """ermlp""" +242 6 loss """marginranking""" +242 6 regularizer """no""" +242 6 optimizer """adadelta""" +242 6 training_loop """owa""" +242 6 negative_sampler """basic""" +242 6 evaluator """rankbased""" +242 7 dataset """kinships""" +242 7 model """ermlp""" +242 7 loss """marginranking""" +242 7 regularizer """no""" +242 7 optimizer """adadelta""" +242 7 training_loop """owa""" +242 7 negative_sampler """basic""" +242 7 evaluator """rankbased""" +242 8 dataset """kinships""" +242 8 model """ermlp""" +242 8 loss """marginranking""" +242 8 regularizer """no""" +242 8 optimizer """adadelta""" +242 8 training_loop """owa""" +242 8 negative_sampler """basic""" +242 8 evaluator """rankbased""" +242 9 dataset """kinships""" +242 9 model """ermlp""" +242 9 loss """marginranking""" +242 9 regularizer """no""" +242 9 optimizer """adadelta""" +242 9 training_loop """owa""" +242 9 negative_sampler """basic""" +242 9 evaluator """rankbased""" +242 10 dataset """kinships""" +242 10 model """ermlp""" +242 10 loss """marginranking""" +242 10 regularizer """no""" +242 10 optimizer """adadelta""" +242 10 training_loop """owa""" +242 10 negative_sampler """basic""" +242 10 evaluator """rankbased""" +242 11 dataset """kinships""" +242 11 model """ermlp""" +242 11 loss """marginranking""" +242 11 regularizer """no""" +242 11 optimizer """adadelta""" +242 11 training_loop """owa""" +242 11 negative_sampler """basic""" +242 11 evaluator """rankbased""" +242 12 dataset """kinships""" +242 12 model """ermlp""" +242 12 loss """marginranking""" +242 12 regularizer """no""" +242 12 optimizer """adadelta""" +242 12 training_loop """owa""" +242 12 negative_sampler """basic""" +242 12 evaluator """rankbased""" +242 13 dataset """kinships""" +242 13 model """ermlp""" +242 13 loss """marginranking""" +242 13 regularizer """no""" +242 13 optimizer """adadelta""" +242 13 training_loop """owa""" +242 13 negative_sampler """basic""" +242 13 evaluator """rankbased""" +242 14 dataset """kinships""" +242 14 model """ermlp""" +242 14 loss """marginranking""" +242 14 regularizer """no""" +242 14 optimizer """adadelta""" +242 14 training_loop """owa""" +242 14 negative_sampler """basic""" +242 14 evaluator """rankbased""" +242 15 dataset """kinships""" +242 15 model """ermlp""" +242 15 loss """marginranking""" +242 15 regularizer """no""" +242 15 optimizer """adadelta""" +242 15 training_loop """owa""" +242 15 negative_sampler """basic""" +242 15 evaluator """rankbased""" +242 16 dataset """kinships""" +242 16 model """ermlp""" +242 16 loss """marginranking""" +242 16 regularizer """no""" +242 16 optimizer """adadelta""" +242 16 training_loop """owa""" +242 16 negative_sampler """basic""" +242 16 evaluator """rankbased""" +242 17 dataset """kinships""" +242 17 model """ermlp""" +242 17 loss """marginranking""" +242 17 regularizer """no""" +242 17 optimizer """adadelta""" +242 17 training_loop """owa""" +242 17 negative_sampler """basic""" +242 17 evaluator """rankbased""" +242 18 dataset """kinships""" +242 18 model """ermlp""" +242 18 loss """marginranking""" +242 18 regularizer """no""" +242 18 optimizer """adadelta""" +242 18 training_loop """owa""" +242 18 negative_sampler """basic""" +242 18 evaluator """rankbased""" +242 19 dataset """kinships""" +242 19 model """ermlp""" +242 19 loss """marginranking""" +242 19 regularizer """no""" +242 19 optimizer """adadelta""" +242 19 training_loop """owa""" +242 19 negative_sampler """basic""" +242 19 evaluator """rankbased""" +242 20 dataset """kinships""" +242 20 model """ermlp""" +242 20 loss """marginranking""" +242 20 regularizer """no""" +242 20 optimizer """adadelta""" +242 20 training_loop """owa""" +242 20 negative_sampler """basic""" +242 20 evaluator """rankbased""" +242 21 dataset """kinships""" +242 21 model """ermlp""" +242 21 loss """marginranking""" +242 21 regularizer """no""" +242 21 optimizer """adadelta""" +242 21 training_loop """owa""" +242 21 negative_sampler """basic""" +242 21 evaluator """rankbased""" +242 22 dataset """kinships""" +242 22 model """ermlp""" +242 22 loss """marginranking""" +242 22 regularizer """no""" +242 22 optimizer """adadelta""" +242 22 training_loop """owa""" +242 22 negative_sampler """basic""" +242 22 evaluator """rankbased""" +242 23 dataset """kinships""" +242 23 model """ermlp""" +242 23 loss """marginranking""" +242 23 regularizer """no""" +242 23 optimizer """adadelta""" +242 23 training_loop """owa""" +242 23 negative_sampler """basic""" +242 23 evaluator """rankbased""" +242 24 dataset """kinships""" +242 24 model """ermlp""" +242 24 loss """marginranking""" +242 24 regularizer """no""" +242 24 optimizer """adadelta""" +242 24 training_loop """owa""" +242 24 negative_sampler """basic""" +242 24 evaluator """rankbased""" +242 25 dataset """kinships""" +242 25 model """ermlp""" +242 25 loss """marginranking""" +242 25 regularizer """no""" +242 25 optimizer """adadelta""" +242 25 training_loop """owa""" +242 25 negative_sampler """basic""" +242 25 evaluator """rankbased""" +242 26 dataset """kinships""" +242 26 model """ermlp""" +242 26 loss """marginranking""" +242 26 regularizer """no""" +242 26 optimizer """adadelta""" +242 26 training_loop """owa""" +242 26 negative_sampler """basic""" +242 26 evaluator """rankbased""" +242 27 dataset """kinships""" +242 27 model """ermlp""" +242 27 loss """marginranking""" +242 27 regularizer """no""" +242 27 optimizer """adadelta""" +242 27 training_loop """owa""" +242 27 negative_sampler """basic""" +242 27 evaluator """rankbased""" +242 28 dataset """kinships""" +242 28 model """ermlp""" +242 28 loss """marginranking""" +242 28 regularizer """no""" +242 28 optimizer """adadelta""" +242 28 training_loop """owa""" +242 28 negative_sampler """basic""" +242 28 evaluator """rankbased""" +242 29 dataset """kinships""" +242 29 model """ermlp""" +242 29 loss """marginranking""" +242 29 regularizer """no""" +242 29 optimizer """adadelta""" +242 29 training_loop """owa""" +242 29 negative_sampler """basic""" +242 29 evaluator """rankbased""" +242 30 dataset """kinships""" +242 30 model """ermlp""" +242 30 loss """marginranking""" +242 30 regularizer """no""" +242 30 optimizer """adadelta""" +242 30 training_loop """owa""" +242 30 negative_sampler """basic""" +242 30 evaluator """rankbased""" +242 31 dataset """kinships""" +242 31 model """ermlp""" +242 31 loss """marginranking""" +242 31 regularizer """no""" +242 31 optimizer """adadelta""" +242 31 training_loop """owa""" +242 31 negative_sampler """basic""" +242 31 evaluator """rankbased""" +242 32 dataset """kinships""" +242 32 model """ermlp""" +242 32 loss """marginranking""" +242 32 regularizer """no""" +242 32 optimizer """adadelta""" +242 32 training_loop """owa""" +242 32 negative_sampler """basic""" +242 32 evaluator """rankbased""" +242 33 dataset """kinships""" +242 33 model """ermlp""" +242 33 loss """marginranking""" +242 33 regularizer """no""" +242 33 optimizer """adadelta""" +242 33 training_loop """owa""" +242 33 negative_sampler """basic""" +242 33 evaluator """rankbased""" +242 34 dataset """kinships""" +242 34 model """ermlp""" +242 34 loss """marginranking""" +242 34 regularizer """no""" +242 34 optimizer """adadelta""" +242 34 training_loop """owa""" +242 34 negative_sampler """basic""" +242 34 evaluator """rankbased""" +242 35 dataset """kinships""" +242 35 model """ermlp""" +242 35 loss """marginranking""" +242 35 regularizer """no""" +242 35 optimizer """adadelta""" +242 35 training_loop """owa""" +242 35 negative_sampler """basic""" +242 35 evaluator """rankbased""" +242 36 dataset """kinships""" +242 36 model """ermlp""" +242 36 loss """marginranking""" +242 36 regularizer """no""" +242 36 optimizer """adadelta""" +242 36 training_loop """owa""" +242 36 negative_sampler """basic""" +242 36 evaluator """rankbased""" +242 37 dataset """kinships""" +242 37 model """ermlp""" +242 37 loss """marginranking""" +242 37 regularizer """no""" +242 37 optimizer """adadelta""" +242 37 training_loop """owa""" +242 37 negative_sampler """basic""" +242 37 evaluator """rankbased""" +242 38 dataset """kinships""" +242 38 model """ermlp""" +242 38 loss """marginranking""" +242 38 regularizer """no""" +242 38 optimizer """adadelta""" +242 38 training_loop """owa""" +242 38 negative_sampler """basic""" +242 38 evaluator """rankbased""" +242 39 dataset """kinships""" +242 39 model """ermlp""" +242 39 loss """marginranking""" +242 39 regularizer """no""" +242 39 optimizer """adadelta""" +242 39 training_loop """owa""" +242 39 negative_sampler """basic""" +242 39 evaluator """rankbased""" +242 40 dataset """kinships""" +242 40 model """ermlp""" +242 40 loss """marginranking""" +242 40 regularizer """no""" +242 40 optimizer """adadelta""" +242 40 training_loop """owa""" +242 40 negative_sampler """basic""" +242 40 evaluator """rankbased""" +242 41 dataset """kinships""" +242 41 model """ermlp""" +242 41 loss """marginranking""" +242 41 regularizer """no""" +242 41 optimizer """adadelta""" +242 41 training_loop """owa""" +242 41 negative_sampler """basic""" +242 41 evaluator """rankbased""" +242 42 dataset """kinships""" +242 42 model """ermlp""" +242 42 loss """marginranking""" +242 42 regularizer """no""" +242 42 optimizer """adadelta""" +242 42 training_loop """owa""" +242 42 negative_sampler """basic""" +242 42 evaluator """rankbased""" +242 43 dataset """kinships""" +242 43 model """ermlp""" +242 43 loss """marginranking""" +242 43 regularizer """no""" +242 43 optimizer """adadelta""" +242 43 training_loop """owa""" +242 43 negative_sampler """basic""" +242 43 evaluator """rankbased""" +242 44 dataset """kinships""" +242 44 model """ermlp""" +242 44 loss """marginranking""" +242 44 regularizer """no""" +242 44 optimizer """adadelta""" +242 44 training_loop """owa""" +242 44 negative_sampler """basic""" +242 44 evaluator """rankbased""" +242 45 dataset """kinships""" +242 45 model """ermlp""" +242 45 loss """marginranking""" +242 45 regularizer """no""" +242 45 optimizer """adadelta""" +242 45 training_loop """owa""" +242 45 negative_sampler """basic""" +242 45 evaluator """rankbased""" +242 46 dataset """kinships""" +242 46 model """ermlp""" +242 46 loss """marginranking""" +242 46 regularizer """no""" +242 46 optimizer """adadelta""" +242 46 training_loop """owa""" +242 46 negative_sampler """basic""" +242 46 evaluator """rankbased""" +242 47 dataset """kinships""" +242 47 model """ermlp""" +242 47 loss """marginranking""" +242 47 regularizer """no""" +242 47 optimizer """adadelta""" +242 47 training_loop """owa""" +242 47 negative_sampler """basic""" +242 47 evaluator """rankbased""" +242 48 dataset """kinships""" +242 48 model """ermlp""" +242 48 loss """marginranking""" +242 48 regularizer """no""" +242 48 optimizer """adadelta""" +242 48 training_loop """owa""" +242 48 negative_sampler """basic""" +242 48 evaluator """rankbased""" +242 49 dataset """kinships""" +242 49 model """ermlp""" +242 49 loss """marginranking""" +242 49 regularizer """no""" +242 49 optimizer """adadelta""" +242 49 training_loop """owa""" +242 49 negative_sampler """basic""" +242 49 evaluator """rankbased""" +242 50 dataset """kinships""" +242 50 model """ermlp""" +242 50 loss """marginranking""" +242 50 regularizer """no""" +242 50 optimizer """adadelta""" +242 50 training_loop """owa""" +242 50 negative_sampler """basic""" +242 50 evaluator """rankbased""" +242 51 dataset """kinships""" +242 51 model """ermlp""" +242 51 loss """marginranking""" +242 51 regularizer """no""" +242 51 optimizer """adadelta""" +242 51 training_loop """owa""" +242 51 negative_sampler """basic""" +242 51 evaluator """rankbased""" +242 52 dataset """kinships""" +242 52 model """ermlp""" +242 52 loss """marginranking""" +242 52 regularizer """no""" +242 52 optimizer """adadelta""" +242 52 training_loop """owa""" +242 52 negative_sampler """basic""" +242 52 evaluator """rankbased""" +242 53 dataset """kinships""" +242 53 model """ermlp""" +242 53 loss """marginranking""" +242 53 regularizer """no""" +242 53 optimizer """adadelta""" +242 53 training_loop """owa""" +242 53 negative_sampler """basic""" +242 53 evaluator """rankbased""" +242 54 dataset """kinships""" +242 54 model """ermlp""" +242 54 loss """marginranking""" +242 54 regularizer """no""" +242 54 optimizer """adadelta""" +242 54 training_loop """owa""" +242 54 negative_sampler """basic""" +242 54 evaluator """rankbased""" +242 55 dataset """kinships""" +242 55 model """ermlp""" +242 55 loss """marginranking""" +242 55 regularizer """no""" +242 55 optimizer """adadelta""" +242 55 training_loop """owa""" +242 55 negative_sampler """basic""" +242 55 evaluator """rankbased""" +242 56 dataset """kinships""" +242 56 model """ermlp""" +242 56 loss """marginranking""" +242 56 regularizer """no""" +242 56 optimizer """adadelta""" +242 56 training_loop """owa""" +242 56 negative_sampler """basic""" +242 56 evaluator """rankbased""" +242 57 dataset """kinships""" +242 57 model """ermlp""" +242 57 loss """marginranking""" +242 57 regularizer """no""" +242 57 optimizer """adadelta""" +242 57 training_loop """owa""" +242 57 negative_sampler """basic""" +242 57 evaluator """rankbased""" +242 58 dataset """kinships""" +242 58 model """ermlp""" +242 58 loss """marginranking""" +242 58 regularizer """no""" +242 58 optimizer """adadelta""" +242 58 training_loop """owa""" +242 58 negative_sampler """basic""" +242 58 evaluator """rankbased""" +242 59 dataset """kinships""" +242 59 model """ermlp""" +242 59 loss """marginranking""" +242 59 regularizer """no""" +242 59 optimizer """adadelta""" +242 59 training_loop """owa""" +242 59 negative_sampler """basic""" +242 59 evaluator """rankbased""" +242 60 dataset """kinships""" +242 60 model """ermlp""" +242 60 loss """marginranking""" +242 60 regularizer """no""" +242 60 optimizer """adadelta""" +242 60 training_loop """owa""" +242 60 negative_sampler """basic""" +242 60 evaluator """rankbased""" +242 61 dataset """kinships""" +242 61 model """ermlp""" +242 61 loss """marginranking""" +242 61 regularizer """no""" +242 61 optimizer """adadelta""" +242 61 training_loop """owa""" +242 61 negative_sampler """basic""" +242 61 evaluator """rankbased""" +242 62 dataset """kinships""" +242 62 model """ermlp""" +242 62 loss """marginranking""" +242 62 regularizer """no""" +242 62 optimizer """adadelta""" +242 62 training_loop """owa""" +242 62 negative_sampler """basic""" +242 62 evaluator """rankbased""" +242 63 dataset """kinships""" +242 63 model """ermlp""" +242 63 loss """marginranking""" +242 63 regularizer """no""" +242 63 optimizer """adadelta""" +242 63 training_loop """owa""" +242 63 negative_sampler """basic""" +242 63 evaluator """rankbased""" +242 64 dataset """kinships""" +242 64 model """ermlp""" +242 64 loss """marginranking""" +242 64 regularizer """no""" +242 64 optimizer """adadelta""" +242 64 training_loop """owa""" +242 64 negative_sampler """basic""" +242 64 evaluator """rankbased""" +242 65 dataset """kinships""" +242 65 model """ermlp""" +242 65 loss """marginranking""" +242 65 regularizer """no""" +242 65 optimizer """adadelta""" +242 65 training_loop """owa""" +242 65 negative_sampler """basic""" +242 65 evaluator """rankbased""" +242 66 dataset """kinships""" +242 66 model """ermlp""" +242 66 loss """marginranking""" +242 66 regularizer """no""" +242 66 optimizer """adadelta""" +242 66 training_loop """owa""" +242 66 negative_sampler """basic""" +242 66 evaluator """rankbased""" +242 67 dataset """kinships""" +242 67 model """ermlp""" +242 67 loss """marginranking""" +242 67 regularizer """no""" +242 67 optimizer """adadelta""" +242 67 training_loop """owa""" +242 67 negative_sampler """basic""" +242 67 evaluator """rankbased""" +242 68 dataset """kinships""" +242 68 model """ermlp""" +242 68 loss """marginranking""" +242 68 regularizer """no""" +242 68 optimizer """adadelta""" +242 68 training_loop """owa""" +242 68 negative_sampler """basic""" +242 68 evaluator """rankbased""" +242 69 dataset """kinships""" +242 69 model """ermlp""" +242 69 loss """marginranking""" +242 69 regularizer """no""" +242 69 optimizer """adadelta""" +242 69 training_loop """owa""" +242 69 negative_sampler """basic""" +242 69 evaluator """rankbased""" +242 70 dataset """kinships""" +242 70 model """ermlp""" +242 70 loss """marginranking""" +242 70 regularizer """no""" +242 70 optimizer """adadelta""" +242 70 training_loop """owa""" +242 70 negative_sampler """basic""" +242 70 evaluator """rankbased""" +242 71 dataset """kinships""" +242 71 model """ermlp""" +242 71 loss """marginranking""" +242 71 regularizer """no""" +242 71 optimizer """adadelta""" +242 71 training_loop """owa""" +242 71 negative_sampler """basic""" +242 71 evaluator """rankbased""" +242 72 dataset """kinships""" +242 72 model """ermlp""" +242 72 loss """marginranking""" +242 72 regularizer """no""" +242 72 optimizer """adadelta""" +242 72 training_loop """owa""" +242 72 negative_sampler """basic""" +242 72 evaluator """rankbased""" +242 73 dataset """kinships""" +242 73 model """ermlp""" +242 73 loss """marginranking""" +242 73 regularizer """no""" +242 73 optimizer """adadelta""" +242 73 training_loop """owa""" +242 73 negative_sampler """basic""" +242 73 evaluator """rankbased""" +242 74 dataset """kinships""" +242 74 model """ermlp""" +242 74 loss """marginranking""" +242 74 regularizer """no""" +242 74 optimizer """adadelta""" +242 74 training_loop """owa""" +242 74 negative_sampler """basic""" +242 74 evaluator """rankbased""" +242 75 dataset """kinships""" +242 75 model """ermlp""" +242 75 loss """marginranking""" +242 75 regularizer """no""" +242 75 optimizer """adadelta""" +242 75 training_loop """owa""" +242 75 negative_sampler """basic""" +242 75 evaluator """rankbased""" +242 76 dataset """kinships""" +242 76 model """ermlp""" +242 76 loss """marginranking""" +242 76 regularizer """no""" +242 76 optimizer """adadelta""" +242 76 training_loop """owa""" +242 76 negative_sampler """basic""" +242 76 evaluator """rankbased""" +242 77 dataset """kinships""" +242 77 model """ermlp""" +242 77 loss """marginranking""" +242 77 regularizer """no""" +242 77 optimizer """adadelta""" +242 77 training_loop """owa""" +242 77 negative_sampler """basic""" +242 77 evaluator """rankbased""" +242 78 dataset """kinships""" +242 78 model """ermlp""" +242 78 loss """marginranking""" +242 78 regularizer """no""" +242 78 optimizer """adadelta""" +242 78 training_loop """owa""" +242 78 negative_sampler """basic""" +242 78 evaluator """rankbased""" +242 79 dataset """kinships""" +242 79 model """ermlp""" +242 79 loss """marginranking""" +242 79 regularizer """no""" +242 79 optimizer """adadelta""" +242 79 training_loop """owa""" +242 79 negative_sampler """basic""" +242 79 evaluator """rankbased""" +242 80 dataset """kinships""" +242 80 model """ermlp""" +242 80 loss """marginranking""" +242 80 regularizer """no""" +242 80 optimizer """adadelta""" +242 80 training_loop """owa""" +242 80 negative_sampler """basic""" +242 80 evaluator """rankbased""" +242 81 dataset """kinships""" +242 81 model """ermlp""" +242 81 loss """marginranking""" +242 81 regularizer """no""" +242 81 optimizer """adadelta""" +242 81 training_loop """owa""" +242 81 negative_sampler """basic""" +242 81 evaluator """rankbased""" +242 82 dataset """kinships""" +242 82 model """ermlp""" +242 82 loss """marginranking""" +242 82 regularizer """no""" +242 82 optimizer """adadelta""" +242 82 training_loop """owa""" +242 82 negative_sampler """basic""" +242 82 evaluator """rankbased""" +242 83 dataset """kinships""" +242 83 model """ermlp""" +242 83 loss """marginranking""" +242 83 regularizer """no""" +242 83 optimizer """adadelta""" +242 83 training_loop """owa""" +242 83 negative_sampler """basic""" +242 83 evaluator """rankbased""" +242 84 dataset """kinships""" +242 84 model """ermlp""" +242 84 loss """marginranking""" +242 84 regularizer """no""" +242 84 optimizer """adadelta""" +242 84 training_loop """owa""" +242 84 negative_sampler """basic""" +242 84 evaluator """rankbased""" +242 85 dataset """kinships""" +242 85 model """ermlp""" +242 85 loss """marginranking""" +242 85 regularizer """no""" +242 85 optimizer """adadelta""" +242 85 training_loop """owa""" +242 85 negative_sampler """basic""" +242 85 evaluator """rankbased""" +242 86 dataset """kinships""" +242 86 model """ermlp""" +242 86 loss """marginranking""" +242 86 regularizer """no""" +242 86 optimizer """adadelta""" +242 86 training_loop """owa""" +242 86 negative_sampler """basic""" +242 86 evaluator """rankbased""" +242 87 dataset """kinships""" +242 87 model """ermlp""" +242 87 loss """marginranking""" +242 87 regularizer """no""" +242 87 optimizer """adadelta""" +242 87 training_loop """owa""" +242 87 negative_sampler """basic""" +242 87 evaluator """rankbased""" +242 88 dataset """kinships""" +242 88 model """ermlp""" +242 88 loss """marginranking""" +242 88 regularizer """no""" +242 88 optimizer """adadelta""" +242 88 training_loop """owa""" +242 88 negative_sampler """basic""" +242 88 evaluator """rankbased""" +242 89 dataset """kinships""" +242 89 model """ermlp""" +242 89 loss """marginranking""" +242 89 regularizer """no""" +242 89 optimizer """adadelta""" +242 89 training_loop """owa""" +242 89 negative_sampler """basic""" +242 89 evaluator """rankbased""" +242 90 dataset """kinships""" +242 90 model """ermlp""" +242 90 loss """marginranking""" +242 90 regularizer """no""" +242 90 optimizer """adadelta""" +242 90 training_loop """owa""" +242 90 negative_sampler """basic""" +242 90 evaluator """rankbased""" +242 91 dataset """kinships""" +242 91 model """ermlp""" +242 91 loss """marginranking""" +242 91 regularizer """no""" +242 91 optimizer """adadelta""" +242 91 training_loop """owa""" +242 91 negative_sampler """basic""" +242 91 evaluator """rankbased""" +242 92 dataset """kinships""" +242 92 model """ermlp""" +242 92 loss """marginranking""" +242 92 regularizer """no""" +242 92 optimizer """adadelta""" +242 92 training_loop """owa""" +242 92 negative_sampler """basic""" +242 92 evaluator """rankbased""" +242 93 dataset """kinships""" +242 93 model """ermlp""" +242 93 loss """marginranking""" +242 93 regularizer """no""" +242 93 optimizer """adadelta""" +242 93 training_loop """owa""" +242 93 negative_sampler """basic""" +242 93 evaluator """rankbased""" +242 94 dataset """kinships""" +242 94 model """ermlp""" +242 94 loss """marginranking""" +242 94 regularizer """no""" +242 94 optimizer """adadelta""" +242 94 training_loop """owa""" +242 94 negative_sampler """basic""" +242 94 evaluator """rankbased""" +242 95 dataset """kinships""" +242 95 model """ermlp""" +242 95 loss """marginranking""" +242 95 regularizer """no""" +242 95 optimizer """adadelta""" +242 95 training_loop """owa""" +242 95 negative_sampler """basic""" +242 95 evaluator """rankbased""" +242 96 dataset """kinships""" +242 96 model """ermlp""" +242 96 loss """marginranking""" +242 96 regularizer """no""" +242 96 optimizer """adadelta""" +242 96 training_loop """owa""" +242 96 negative_sampler """basic""" +242 96 evaluator """rankbased""" +242 97 dataset """kinships""" +242 97 model """ermlp""" +242 97 loss """marginranking""" +242 97 regularizer """no""" +242 97 optimizer """adadelta""" +242 97 training_loop """owa""" +242 97 negative_sampler """basic""" +242 97 evaluator """rankbased""" +242 98 dataset """kinships""" +242 98 model """ermlp""" +242 98 loss """marginranking""" +242 98 regularizer """no""" +242 98 optimizer """adadelta""" +242 98 training_loop """owa""" +242 98 negative_sampler """basic""" +242 98 evaluator """rankbased""" +242 99 dataset """kinships""" +242 99 model """ermlp""" +242 99 loss """marginranking""" +242 99 regularizer """no""" +242 99 optimizer """adadelta""" +242 99 training_loop """owa""" +242 99 negative_sampler """basic""" +242 99 evaluator """rankbased""" +242 100 dataset """kinships""" +242 100 model """ermlp""" +242 100 loss """marginranking""" +242 100 regularizer """no""" +242 100 optimizer """adadelta""" +242 100 training_loop """owa""" +242 100 negative_sampler """basic""" +242 100 evaluator """rankbased""" +243 1 model.embedding_dim 2.0 +243 1 loss.margin 8.319076012326388 +243 1 negative_sampler.num_negs_per_pos 75.0 +243 1 training.batch_size 0.0 +243 2 model.embedding_dim 1.0 +243 2 loss.margin 4.038541787461788 +243 2 negative_sampler.num_negs_per_pos 74.0 +243 2 training.batch_size 0.0 +243 3 model.embedding_dim 1.0 +243 3 loss.margin 5.765615358731757 +243 3 negative_sampler.num_negs_per_pos 32.0 +243 3 training.batch_size 2.0 +243 4 model.embedding_dim 0.0 +243 4 loss.margin 9.27117135953422 +243 4 negative_sampler.num_negs_per_pos 52.0 +243 4 training.batch_size 0.0 +243 5 model.embedding_dim 1.0 +243 5 loss.margin 5.496498269943691 +243 5 negative_sampler.num_negs_per_pos 32.0 +243 5 training.batch_size 0.0 +243 6 model.embedding_dim 0.0 +243 6 loss.margin 7.849680146377029 +243 6 negative_sampler.num_negs_per_pos 98.0 +243 6 training.batch_size 2.0 +243 7 model.embedding_dim 1.0 +243 7 loss.margin 8.080281425979893 +243 7 negative_sampler.num_negs_per_pos 91.0 +243 7 training.batch_size 2.0 +243 8 model.embedding_dim 0.0 +243 8 loss.margin 5.443769084534431 +243 8 negative_sampler.num_negs_per_pos 65.0 +243 8 training.batch_size 0.0 +243 9 model.embedding_dim 2.0 +243 9 loss.margin 5.999654875436748 +243 9 negative_sampler.num_negs_per_pos 55.0 +243 9 training.batch_size 0.0 +243 10 model.embedding_dim 0.0 +243 10 loss.margin 2.5231116769007733 +243 10 negative_sampler.num_negs_per_pos 13.0 +243 10 training.batch_size 2.0 +243 11 model.embedding_dim 1.0 +243 11 loss.margin 5.248891207913082 +243 11 negative_sampler.num_negs_per_pos 21.0 +243 11 training.batch_size 0.0 +243 12 model.embedding_dim 1.0 +243 12 loss.margin 4.206575090605879 +243 12 negative_sampler.num_negs_per_pos 34.0 +243 12 training.batch_size 1.0 +243 13 model.embedding_dim 1.0 +243 13 loss.margin 1.4967562868685855 +243 13 negative_sampler.num_negs_per_pos 32.0 +243 13 training.batch_size 0.0 +243 14 model.embedding_dim 1.0 +243 14 loss.margin 1.5184958258240058 +243 14 negative_sampler.num_negs_per_pos 91.0 +243 14 training.batch_size 0.0 +243 15 model.embedding_dim 2.0 +243 15 loss.margin 6.928744757737032 +243 15 negative_sampler.num_negs_per_pos 73.0 +243 15 training.batch_size 2.0 +243 16 model.embedding_dim 1.0 +243 16 loss.margin 7.831928359717533 +243 16 negative_sampler.num_negs_per_pos 99.0 +243 16 training.batch_size 1.0 +243 17 model.embedding_dim 1.0 +243 17 loss.margin 1.269075591591025 +243 17 negative_sampler.num_negs_per_pos 72.0 +243 17 training.batch_size 0.0 +243 18 model.embedding_dim 1.0 +243 18 loss.margin 1.5381453381220198 +243 18 negative_sampler.num_negs_per_pos 7.0 +243 18 training.batch_size 2.0 +243 19 model.embedding_dim 0.0 +243 19 loss.margin 5.680517993350091 +243 19 negative_sampler.num_negs_per_pos 8.0 +243 19 training.batch_size 1.0 +243 20 model.embedding_dim 2.0 +243 20 loss.margin 3.9582449190773437 +243 20 negative_sampler.num_negs_per_pos 86.0 +243 20 training.batch_size 2.0 +243 21 model.embedding_dim 0.0 +243 21 loss.margin 4.504080235186686 +243 21 negative_sampler.num_negs_per_pos 69.0 +243 21 training.batch_size 1.0 +243 22 model.embedding_dim 1.0 +243 22 loss.margin 3.166121384053955 +243 22 negative_sampler.num_negs_per_pos 17.0 +243 22 training.batch_size 2.0 +243 23 model.embedding_dim 0.0 +243 23 loss.margin 8.302849660187132 +243 23 negative_sampler.num_negs_per_pos 71.0 +243 23 training.batch_size 0.0 +243 24 model.embedding_dim 1.0 +243 24 loss.margin 3.0483776760697414 +243 24 negative_sampler.num_negs_per_pos 88.0 +243 24 training.batch_size 1.0 +243 25 model.embedding_dim 1.0 +243 25 loss.margin 2.6143217985937426 +243 25 negative_sampler.num_negs_per_pos 12.0 +243 25 training.batch_size 2.0 +243 26 model.embedding_dim 0.0 +243 26 loss.margin 2.2795741506104137 +243 26 negative_sampler.num_negs_per_pos 24.0 +243 26 training.batch_size 0.0 +243 27 model.embedding_dim 0.0 +243 27 loss.margin 5.572345678827524 +243 27 negative_sampler.num_negs_per_pos 28.0 +243 27 training.batch_size 2.0 +243 28 model.embedding_dim 2.0 +243 28 loss.margin 7.985874084208165 +243 28 negative_sampler.num_negs_per_pos 20.0 +243 28 training.batch_size 2.0 +243 29 model.embedding_dim 0.0 +243 29 loss.margin 7.355145758367269 +243 29 negative_sampler.num_negs_per_pos 59.0 +243 29 training.batch_size 1.0 +243 30 model.embedding_dim 1.0 +243 30 loss.margin 3.076892273065077 +243 30 negative_sampler.num_negs_per_pos 46.0 +243 30 training.batch_size 1.0 +243 31 model.embedding_dim 0.0 +243 31 loss.margin 7.229175257459294 +243 31 negative_sampler.num_negs_per_pos 96.0 +243 31 training.batch_size 2.0 +243 32 model.embedding_dim 2.0 +243 32 loss.margin 5.870476772008139 +243 32 negative_sampler.num_negs_per_pos 32.0 +243 32 training.batch_size 1.0 +243 33 model.embedding_dim 2.0 +243 33 loss.margin 3.272466680245052 +243 33 negative_sampler.num_negs_per_pos 54.0 +243 33 training.batch_size 0.0 +243 34 model.embedding_dim 1.0 +243 34 loss.margin 7.095616191453942 +243 34 negative_sampler.num_negs_per_pos 32.0 +243 34 training.batch_size 0.0 +243 35 model.embedding_dim 0.0 +243 35 loss.margin 9.220356543143643 +243 35 negative_sampler.num_negs_per_pos 93.0 +243 35 training.batch_size 1.0 +243 36 model.embedding_dim 1.0 +243 36 loss.margin 6.431227556946542 +243 36 negative_sampler.num_negs_per_pos 87.0 +243 36 training.batch_size 0.0 +243 37 model.embedding_dim 2.0 +243 37 loss.margin 2.919110877561448 +243 37 negative_sampler.num_negs_per_pos 25.0 +243 37 training.batch_size 0.0 +243 38 model.embedding_dim 2.0 +243 38 loss.margin 9.06621127791742 +243 38 negative_sampler.num_negs_per_pos 12.0 +243 38 training.batch_size 0.0 +243 39 model.embedding_dim 2.0 +243 39 loss.margin 1.4122447466008772 +243 39 negative_sampler.num_negs_per_pos 16.0 +243 39 training.batch_size 1.0 +243 40 model.embedding_dim 2.0 +243 40 loss.margin 5.543449739493814 +243 40 negative_sampler.num_negs_per_pos 87.0 +243 40 training.batch_size 0.0 +243 41 model.embedding_dim 0.0 +243 41 loss.margin 7.177446347631477 +243 41 negative_sampler.num_negs_per_pos 68.0 +243 41 training.batch_size 0.0 +243 42 model.embedding_dim 0.0 +243 42 loss.margin 9.178005397030217 +243 42 negative_sampler.num_negs_per_pos 20.0 +243 42 training.batch_size 2.0 +243 43 model.embedding_dim 2.0 +243 43 loss.margin 3.3687092571890282 +243 43 negative_sampler.num_negs_per_pos 75.0 +243 43 training.batch_size 0.0 +243 44 model.embedding_dim 1.0 +243 44 loss.margin 7.072202430818157 +243 44 negative_sampler.num_negs_per_pos 83.0 +243 44 training.batch_size 2.0 +243 45 model.embedding_dim 2.0 +243 45 loss.margin 3.1086258490198273 +243 45 negative_sampler.num_negs_per_pos 94.0 +243 45 training.batch_size 1.0 +243 46 model.embedding_dim 1.0 +243 46 loss.margin 1.8984456595427646 +243 46 negative_sampler.num_negs_per_pos 95.0 +243 46 training.batch_size 2.0 +243 47 model.embedding_dim 2.0 +243 47 loss.margin 7.317412042773399 +243 47 negative_sampler.num_negs_per_pos 19.0 +243 47 training.batch_size 2.0 +243 48 model.embedding_dim 0.0 +243 48 loss.margin 8.898525206965957 +243 48 negative_sampler.num_negs_per_pos 99.0 +243 48 training.batch_size 2.0 +243 49 model.embedding_dim 2.0 +243 49 loss.margin 2.101022856785624 +243 49 negative_sampler.num_negs_per_pos 50.0 +243 49 training.batch_size 1.0 +243 50 model.embedding_dim 2.0 +243 50 loss.margin 7.472182153589961 +243 50 negative_sampler.num_negs_per_pos 78.0 +243 50 training.batch_size 1.0 +243 51 model.embedding_dim 1.0 +243 51 loss.margin 8.105093938860936 +243 51 negative_sampler.num_negs_per_pos 47.0 +243 51 training.batch_size 2.0 +243 52 model.embedding_dim 0.0 +243 52 loss.margin 0.79316626600092 +243 52 negative_sampler.num_negs_per_pos 70.0 +243 52 training.batch_size 1.0 +243 53 model.embedding_dim 0.0 +243 53 loss.margin 0.6475105039148724 +243 53 negative_sampler.num_negs_per_pos 97.0 +243 53 training.batch_size 1.0 +243 54 model.embedding_dim 1.0 +243 54 loss.margin 2.142295868521309 +243 54 negative_sampler.num_negs_per_pos 65.0 +243 54 training.batch_size 1.0 +243 55 model.embedding_dim 1.0 +243 55 loss.margin 7.8420693203213 +243 55 negative_sampler.num_negs_per_pos 13.0 +243 55 training.batch_size 0.0 +243 56 model.embedding_dim 0.0 +243 56 loss.margin 6.4299110909710055 +243 56 negative_sampler.num_negs_per_pos 75.0 +243 56 training.batch_size 2.0 +243 57 model.embedding_dim 0.0 +243 57 loss.margin 4.704743754419133 +243 57 negative_sampler.num_negs_per_pos 43.0 +243 57 training.batch_size 0.0 +243 58 model.embedding_dim 1.0 +243 58 loss.margin 7.387234225423288 +243 58 negative_sampler.num_negs_per_pos 23.0 +243 58 training.batch_size 2.0 +243 59 model.embedding_dim 2.0 +243 59 loss.margin 5.721062071190317 +243 59 negative_sampler.num_negs_per_pos 82.0 +243 59 training.batch_size 2.0 +243 60 model.embedding_dim 1.0 +243 60 loss.margin 8.232451440156922 +243 60 negative_sampler.num_negs_per_pos 4.0 +243 60 training.batch_size 0.0 +243 61 model.embedding_dim 0.0 +243 61 loss.margin 1.8774431909705274 +243 61 negative_sampler.num_negs_per_pos 23.0 +243 61 training.batch_size 1.0 +243 62 model.embedding_dim 1.0 +243 62 loss.margin 2.07317117328778 +243 62 negative_sampler.num_negs_per_pos 4.0 +243 62 training.batch_size 0.0 +243 63 model.embedding_dim 0.0 +243 63 loss.margin 0.8325054378652919 +243 63 negative_sampler.num_negs_per_pos 57.0 +243 63 training.batch_size 2.0 +243 64 model.embedding_dim 0.0 +243 64 loss.margin 8.905378623753863 +243 64 negative_sampler.num_negs_per_pos 30.0 +243 64 training.batch_size 1.0 +243 65 model.embedding_dim 2.0 +243 65 loss.margin 8.321311032615608 +243 65 negative_sampler.num_negs_per_pos 94.0 +243 65 training.batch_size 2.0 +243 66 model.embedding_dim 0.0 +243 66 loss.margin 8.679685504453506 +243 66 negative_sampler.num_negs_per_pos 46.0 +243 66 training.batch_size 2.0 +243 67 model.embedding_dim 2.0 +243 67 loss.margin 4.753071210532582 +243 67 negative_sampler.num_negs_per_pos 57.0 +243 67 training.batch_size 1.0 +243 68 model.embedding_dim 1.0 +243 68 loss.margin 0.6870136847054618 +243 68 negative_sampler.num_negs_per_pos 63.0 +243 68 training.batch_size 1.0 +243 69 model.embedding_dim 2.0 +243 69 loss.margin 1.1029925182937743 +243 69 negative_sampler.num_negs_per_pos 79.0 +243 69 training.batch_size 0.0 +243 70 model.embedding_dim 2.0 +243 70 loss.margin 9.213511494988442 +243 70 negative_sampler.num_negs_per_pos 14.0 +243 70 training.batch_size 1.0 +243 71 model.embedding_dim 2.0 +243 71 loss.margin 6.525932842545387 +243 71 negative_sampler.num_negs_per_pos 97.0 +243 71 training.batch_size 1.0 +243 72 model.embedding_dim 2.0 +243 72 loss.margin 1.8292588314554963 +243 72 negative_sampler.num_negs_per_pos 49.0 +243 72 training.batch_size 1.0 +243 73 model.embedding_dim 1.0 +243 73 loss.margin 4.743395718584242 +243 73 negative_sampler.num_negs_per_pos 28.0 +243 73 training.batch_size 2.0 +243 74 model.embedding_dim 2.0 +243 74 loss.margin 4.466807189509577 +243 74 negative_sampler.num_negs_per_pos 67.0 +243 74 training.batch_size 2.0 +243 75 model.embedding_dim 0.0 +243 75 loss.margin 0.9846893230580738 +243 75 negative_sampler.num_negs_per_pos 72.0 +243 75 training.batch_size 0.0 +243 76 model.embedding_dim 0.0 +243 76 loss.margin 8.999530428789244 +243 76 negative_sampler.num_negs_per_pos 17.0 +243 76 training.batch_size 1.0 +243 77 model.embedding_dim 1.0 +243 77 loss.margin 1.6077241914016351 +243 77 negative_sampler.num_negs_per_pos 50.0 +243 77 training.batch_size 2.0 +243 78 model.embedding_dim 2.0 +243 78 loss.margin 9.001487190676182 +243 78 negative_sampler.num_negs_per_pos 95.0 +243 78 training.batch_size 2.0 +243 79 model.embedding_dim 1.0 +243 79 loss.margin 4.14383028972877 +243 79 negative_sampler.num_negs_per_pos 90.0 +243 79 training.batch_size 0.0 +243 80 model.embedding_dim 1.0 +243 80 loss.margin 3.276458621134685 +243 80 negative_sampler.num_negs_per_pos 53.0 +243 80 training.batch_size 0.0 +243 81 model.embedding_dim 0.0 +243 81 loss.margin 6.347460054263746 +243 81 negative_sampler.num_negs_per_pos 5.0 +243 81 training.batch_size 1.0 +243 82 model.embedding_dim 0.0 +243 82 loss.margin 7.692369864012467 +243 82 negative_sampler.num_negs_per_pos 82.0 +243 82 training.batch_size 0.0 +243 83 model.embedding_dim 2.0 +243 83 loss.margin 8.246934934239443 +243 83 negative_sampler.num_negs_per_pos 83.0 +243 83 training.batch_size 2.0 +243 84 model.embedding_dim 1.0 +243 84 loss.margin 2.942219288976946 +243 84 negative_sampler.num_negs_per_pos 86.0 +243 84 training.batch_size 1.0 +243 85 model.embedding_dim 1.0 +243 85 loss.margin 9.042546316295104 +243 85 negative_sampler.num_negs_per_pos 79.0 +243 85 training.batch_size 0.0 +243 86 model.embedding_dim 1.0 +243 86 loss.margin 8.814968911658267 +243 86 negative_sampler.num_negs_per_pos 63.0 +243 86 training.batch_size 0.0 +243 87 model.embedding_dim 2.0 +243 87 loss.margin 3.0973158234146836 +243 87 negative_sampler.num_negs_per_pos 23.0 +243 87 training.batch_size 2.0 +243 88 model.embedding_dim 2.0 +243 88 loss.margin 5.602843843450098 +243 88 negative_sampler.num_negs_per_pos 24.0 +243 88 training.batch_size 1.0 +243 89 model.embedding_dim 0.0 +243 89 loss.margin 8.154334108875366 +243 89 negative_sampler.num_negs_per_pos 59.0 +243 89 training.batch_size 0.0 +243 90 model.embedding_dim 1.0 +243 90 loss.margin 3.0966181195973763 +243 90 negative_sampler.num_negs_per_pos 59.0 +243 90 training.batch_size 1.0 +243 91 model.embedding_dim 1.0 +243 91 loss.margin 1.2447814804655042 +243 91 negative_sampler.num_negs_per_pos 21.0 +243 91 training.batch_size 0.0 +243 92 model.embedding_dim 1.0 +243 92 loss.margin 3.3135701489956717 +243 92 negative_sampler.num_negs_per_pos 24.0 +243 92 training.batch_size 0.0 +243 93 model.embedding_dim 1.0 +243 93 loss.margin 9.521194489971693 +243 93 negative_sampler.num_negs_per_pos 6.0 +243 93 training.batch_size 2.0 +243 94 model.embedding_dim 0.0 +243 94 loss.margin 7.720045483228423 +243 94 negative_sampler.num_negs_per_pos 13.0 +243 94 training.batch_size 2.0 +243 95 model.embedding_dim 2.0 +243 95 loss.margin 9.897600688226728 +243 95 negative_sampler.num_negs_per_pos 15.0 +243 95 training.batch_size 1.0 +243 96 model.embedding_dim 2.0 +243 96 loss.margin 4.732223155677648 +243 96 negative_sampler.num_negs_per_pos 9.0 +243 96 training.batch_size 0.0 +243 97 model.embedding_dim 1.0 +243 97 loss.margin 9.798821084747539 +243 97 negative_sampler.num_negs_per_pos 28.0 +243 97 training.batch_size 1.0 +243 98 model.embedding_dim 0.0 +243 98 loss.margin 5.097619521912532 +243 98 negative_sampler.num_negs_per_pos 96.0 +243 98 training.batch_size 0.0 +243 99 model.embedding_dim 1.0 +243 99 loss.margin 9.105382581590064 +243 99 negative_sampler.num_negs_per_pos 20.0 +243 99 training.batch_size 2.0 +243 100 model.embedding_dim 1.0 +243 100 loss.margin 6.140644926574991 +243 100 negative_sampler.num_negs_per_pos 19.0 +243 100 training.batch_size 1.0 +243 1 dataset """kinships""" +243 1 model """ermlp""" +243 1 loss """marginranking""" +243 1 regularizer """no""" +243 1 optimizer """adadelta""" +243 1 training_loop """owa""" +243 1 negative_sampler """basic""" +243 1 evaluator """rankbased""" +243 2 dataset """kinships""" +243 2 model """ermlp""" +243 2 loss """marginranking""" +243 2 regularizer """no""" +243 2 optimizer """adadelta""" +243 2 training_loop """owa""" +243 2 negative_sampler """basic""" +243 2 evaluator """rankbased""" +243 3 dataset """kinships""" +243 3 model """ermlp""" +243 3 loss """marginranking""" +243 3 regularizer """no""" +243 3 optimizer """adadelta""" +243 3 training_loop """owa""" +243 3 negative_sampler """basic""" +243 3 evaluator """rankbased""" +243 4 dataset """kinships""" +243 4 model """ermlp""" +243 4 loss """marginranking""" +243 4 regularizer """no""" +243 4 optimizer """adadelta""" +243 4 training_loop """owa""" +243 4 negative_sampler """basic""" +243 4 evaluator """rankbased""" +243 5 dataset """kinships""" +243 5 model """ermlp""" +243 5 loss """marginranking""" +243 5 regularizer """no""" +243 5 optimizer """adadelta""" +243 5 training_loop """owa""" +243 5 negative_sampler """basic""" +243 5 evaluator """rankbased""" +243 6 dataset """kinships""" +243 6 model """ermlp""" +243 6 loss """marginranking""" +243 6 regularizer """no""" +243 6 optimizer """adadelta""" +243 6 training_loop """owa""" +243 6 negative_sampler """basic""" +243 6 evaluator """rankbased""" +243 7 dataset """kinships""" +243 7 model """ermlp""" +243 7 loss """marginranking""" +243 7 regularizer """no""" +243 7 optimizer """adadelta""" +243 7 training_loop """owa""" +243 7 negative_sampler """basic""" +243 7 evaluator """rankbased""" +243 8 dataset """kinships""" +243 8 model """ermlp""" +243 8 loss """marginranking""" +243 8 regularizer """no""" +243 8 optimizer """adadelta""" +243 8 training_loop """owa""" +243 8 negative_sampler """basic""" +243 8 evaluator """rankbased""" +243 9 dataset """kinships""" +243 9 model """ermlp""" +243 9 loss """marginranking""" +243 9 regularizer """no""" +243 9 optimizer """adadelta""" +243 9 training_loop """owa""" +243 9 negative_sampler """basic""" +243 9 evaluator """rankbased""" +243 10 dataset """kinships""" +243 10 model """ermlp""" +243 10 loss """marginranking""" +243 10 regularizer """no""" +243 10 optimizer """adadelta""" +243 10 training_loop """owa""" +243 10 negative_sampler """basic""" +243 10 evaluator """rankbased""" +243 11 dataset """kinships""" +243 11 model """ermlp""" +243 11 loss """marginranking""" +243 11 regularizer """no""" +243 11 optimizer """adadelta""" +243 11 training_loop """owa""" +243 11 negative_sampler """basic""" +243 11 evaluator """rankbased""" +243 12 dataset """kinships""" +243 12 model """ermlp""" +243 12 loss """marginranking""" +243 12 regularizer """no""" +243 12 optimizer """adadelta""" +243 12 training_loop """owa""" +243 12 negative_sampler """basic""" +243 12 evaluator """rankbased""" +243 13 dataset """kinships""" +243 13 model """ermlp""" +243 13 loss """marginranking""" +243 13 regularizer """no""" +243 13 optimizer """adadelta""" +243 13 training_loop """owa""" +243 13 negative_sampler """basic""" +243 13 evaluator """rankbased""" +243 14 dataset """kinships""" +243 14 model """ermlp""" +243 14 loss """marginranking""" +243 14 regularizer """no""" +243 14 optimizer """adadelta""" +243 14 training_loop """owa""" +243 14 negative_sampler """basic""" +243 14 evaluator """rankbased""" +243 15 dataset """kinships""" +243 15 model """ermlp""" +243 15 loss """marginranking""" +243 15 regularizer """no""" +243 15 optimizer """adadelta""" +243 15 training_loop """owa""" +243 15 negative_sampler """basic""" +243 15 evaluator """rankbased""" +243 16 dataset """kinships""" +243 16 model """ermlp""" +243 16 loss """marginranking""" +243 16 regularizer """no""" +243 16 optimizer """adadelta""" +243 16 training_loop """owa""" +243 16 negative_sampler """basic""" +243 16 evaluator """rankbased""" +243 17 dataset """kinships""" +243 17 model """ermlp""" +243 17 loss """marginranking""" +243 17 regularizer """no""" +243 17 optimizer """adadelta""" +243 17 training_loop """owa""" +243 17 negative_sampler """basic""" +243 17 evaluator """rankbased""" +243 18 dataset """kinships""" +243 18 model """ermlp""" +243 18 loss """marginranking""" +243 18 regularizer """no""" +243 18 optimizer """adadelta""" +243 18 training_loop """owa""" +243 18 negative_sampler """basic""" +243 18 evaluator """rankbased""" +243 19 dataset """kinships""" +243 19 model """ermlp""" +243 19 loss """marginranking""" +243 19 regularizer """no""" +243 19 optimizer """adadelta""" +243 19 training_loop """owa""" +243 19 negative_sampler """basic""" +243 19 evaluator """rankbased""" +243 20 dataset """kinships""" +243 20 model """ermlp""" +243 20 loss """marginranking""" +243 20 regularizer """no""" +243 20 optimizer """adadelta""" +243 20 training_loop """owa""" +243 20 negative_sampler """basic""" +243 20 evaluator """rankbased""" +243 21 dataset """kinships""" +243 21 model """ermlp""" +243 21 loss """marginranking""" +243 21 regularizer """no""" +243 21 optimizer """adadelta""" +243 21 training_loop """owa""" +243 21 negative_sampler """basic""" +243 21 evaluator """rankbased""" +243 22 dataset """kinships""" +243 22 model """ermlp""" +243 22 loss """marginranking""" +243 22 regularizer """no""" +243 22 optimizer """adadelta""" +243 22 training_loop """owa""" +243 22 negative_sampler """basic""" +243 22 evaluator """rankbased""" +243 23 dataset """kinships""" +243 23 model """ermlp""" +243 23 loss """marginranking""" +243 23 regularizer """no""" +243 23 optimizer """adadelta""" +243 23 training_loop """owa""" +243 23 negative_sampler """basic""" +243 23 evaluator """rankbased""" +243 24 dataset """kinships""" +243 24 model """ermlp""" +243 24 loss """marginranking""" +243 24 regularizer """no""" +243 24 optimizer """adadelta""" +243 24 training_loop """owa""" +243 24 negative_sampler """basic""" +243 24 evaluator """rankbased""" +243 25 dataset """kinships""" +243 25 model """ermlp""" +243 25 loss """marginranking""" +243 25 regularizer """no""" +243 25 optimizer """adadelta""" +243 25 training_loop """owa""" +243 25 negative_sampler """basic""" +243 25 evaluator """rankbased""" +243 26 dataset """kinships""" +243 26 model """ermlp""" +243 26 loss """marginranking""" +243 26 regularizer """no""" +243 26 optimizer """adadelta""" +243 26 training_loop """owa""" +243 26 negative_sampler """basic""" +243 26 evaluator """rankbased""" +243 27 dataset """kinships""" +243 27 model """ermlp""" +243 27 loss """marginranking""" +243 27 regularizer """no""" +243 27 optimizer """adadelta""" +243 27 training_loop """owa""" +243 27 negative_sampler """basic""" +243 27 evaluator """rankbased""" +243 28 dataset """kinships""" +243 28 model """ermlp""" +243 28 loss """marginranking""" +243 28 regularizer """no""" +243 28 optimizer """adadelta""" +243 28 training_loop """owa""" +243 28 negative_sampler """basic""" +243 28 evaluator """rankbased""" +243 29 dataset """kinships""" +243 29 model """ermlp""" +243 29 loss """marginranking""" +243 29 regularizer """no""" +243 29 optimizer """adadelta""" +243 29 training_loop """owa""" +243 29 negative_sampler """basic""" +243 29 evaluator """rankbased""" +243 30 dataset """kinships""" +243 30 model """ermlp""" +243 30 loss """marginranking""" +243 30 regularizer """no""" +243 30 optimizer """adadelta""" +243 30 training_loop """owa""" +243 30 negative_sampler """basic""" +243 30 evaluator """rankbased""" +243 31 dataset """kinships""" +243 31 model """ermlp""" +243 31 loss """marginranking""" +243 31 regularizer """no""" +243 31 optimizer """adadelta""" +243 31 training_loop """owa""" +243 31 negative_sampler """basic""" +243 31 evaluator """rankbased""" +243 32 dataset """kinships""" +243 32 model """ermlp""" +243 32 loss """marginranking""" +243 32 regularizer """no""" +243 32 optimizer """adadelta""" +243 32 training_loop """owa""" +243 32 negative_sampler """basic""" +243 32 evaluator """rankbased""" +243 33 dataset """kinships""" +243 33 model """ermlp""" +243 33 loss """marginranking""" +243 33 regularizer """no""" +243 33 optimizer """adadelta""" +243 33 training_loop """owa""" +243 33 negative_sampler """basic""" +243 33 evaluator """rankbased""" +243 34 dataset """kinships""" +243 34 model """ermlp""" +243 34 loss """marginranking""" +243 34 regularizer """no""" +243 34 optimizer """adadelta""" +243 34 training_loop """owa""" +243 34 negative_sampler """basic""" +243 34 evaluator """rankbased""" +243 35 dataset """kinships""" +243 35 model """ermlp""" +243 35 loss """marginranking""" +243 35 regularizer """no""" +243 35 optimizer """adadelta""" +243 35 training_loop """owa""" +243 35 negative_sampler """basic""" +243 35 evaluator """rankbased""" +243 36 dataset """kinships""" +243 36 model """ermlp""" +243 36 loss """marginranking""" +243 36 regularizer """no""" +243 36 optimizer """adadelta""" +243 36 training_loop """owa""" +243 36 negative_sampler """basic""" +243 36 evaluator """rankbased""" +243 37 dataset """kinships""" +243 37 model """ermlp""" +243 37 loss """marginranking""" +243 37 regularizer """no""" +243 37 optimizer """adadelta""" +243 37 training_loop """owa""" +243 37 negative_sampler """basic""" +243 37 evaluator """rankbased""" +243 38 dataset """kinships""" +243 38 model """ermlp""" +243 38 loss """marginranking""" +243 38 regularizer """no""" +243 38 optimizer """adadelta""" +243 38 training_loop """owa""" +243 38 negative_sampler """basic""" +243 38 evaluator """rankbased""" +243 39 dataset """kinships""" +243 39 model """ermlp""" +243 39 loss """marginranking""" +243 39 regularizer """no""" +243 39 optimizer """adadelta""" +243 39 training_loop """owa""" +243 39 negative_sampler """basic""" +243 39 evaluator """rankbased""" +243 40 dataset """kinships""" +243 40 model """ermlp""" +243 40 loss """marginranking""" +243 40 regularizer """no""" +243 40 optimizer """adadelta""" +243 40 training_loop """owa""" +243 40 negative_sampler """basic""" +243 40 evaluator """rankbased""" +243 41 dataset """kinships""" +243 41 model """ermlp""" +243 41 loss """marginranking""" +243 41 regularizer """no""" +243 41 optimizer """adadelta""" +243 41 training_loop """owa""" +243 41 negative_sampler """basic""" +243 41 evaluator """rankbased""" +243 42 dataset """kinships""" +243 42 model """ermlp""" +243 42 loss """marginranking""" +243 42 regularizer """no""" +243 42 optimizer """adadelta""" +243 42 training_loop """owa""" +243 42 negative_sampler """basic""" +243 42 evaluator """rankbased""" +243 43 dataset """kinships""" +243 43 model """ermlp""" +243 43 loss """marginranking""" +243 43 regularizer """no""" +243 43 optimizer """adadelta""" +243 43 training_loop """owa""" +243 43 negative_sampler """basic""" +243 43 evaluator """rankbased""" +243 44 dataset """kinships""" +243 44 model """ermlp""" +243 44 loss """marginranking""" +243 44 regularizer """no""" +243 44 optimizer """adadelta""" +243 44 training_loop """owa""" +243 44 negative_sampler """basic""" +243 44 evaluator """rankbased""" +243 45 dataset """kinships""" +243 45 model """ermlp""" +243 45 loss """marginranking""" +243 45 regularizer """no""" +243 45 optimizer """adadelta""" +243 45 training_loop """owa""" +243 45 negative_sampler """basic""" +243 45 evaluator """rankbased""" +243 46 dataset """kinships""" +243 46 model """ermlp""" +243 46 loss """marginranking""" +243 46 regularizer """no""" +243 46 optimizer """adadelta""" +243 46 training_loop """owa""" +243 46 negative_sampler """basic""" +243 46 evaluator """rankbased""" +243 47 dataset """kinships""" +243 47 model """ermlp""" +243 47 loss """marginranking""" +243 47 regularizer """no""" +243 47 optimizer """adadelta""" +243 47 training_loop """owa""" +243 47 negative_sampler """basic""" +243 47 evaluator """rankbased""" +243 48 dataset """kinships""" +243 48 model """ermlp""" +243 48 loss """marginranking""" +243 48 regularizer """no""" +243 48 optimizer """adadelta""" +243 48 training_loop """owa""" +243 48 negative_sampler """basic""" +243 48 evaluator """rankbased""" +243 49 dataset """kinships""" +243 49 model """ermlp""" +243 49 loss """marginranking""" +243 49 regularizer """no""" +243 49 optimizer """adadelta""" +243 49 training_loop """owa""" +243 49 negative_sampler """basic""" +243 49 evaluator """rankbased""" +243 50 dataset """kinships""" +243 50 model """ermlp""" +243 50 loss """marginranking""" +243 50 regularizer """no""" +243 50 optimizer """adadelta""" +243 50 training_loop """owa""" +243 50 negative_sampler """basic""" +243 50 evaluator """rankbased""" +243 51 dataset """kinships""" +243 51 model """ermlp""" +243 51 loss """marginranking""" +243 51 regularizer """no""" +243 51 optimizer """adadelta""" +243 51 training_loop """owa""" +243 51 negative_sampler """basic""" +243 51 evaluator """rankbased""" +243 52 dataset """kinships""" +243 52 model """ermlp""" +243 52 loss """marginranking""" +243 52 regularizer """no""" +243 52 optimizer """adadelta""" +243 52 training_loop """owa""" +243 52 negative_sampler """basic""" +243 52 evaluator """rankbased""" +243 53 dataset """kinships""" +243 53 model """ermlp""" +243 53 loss """marginranking""" +243 53 regularizer """no""" +243 53 optimizer """adadelta""" +243 53 training_loop """owa""" +243 53 negative_sampler """basic""" +243 53 evaluator """rankbased""" +243 54 dataset """kinships""" +243 54 model """ermlp""" +243 54 loss """marginranking""" +243 54 regularizer """no""" +243 54 optimizer """adadelta""" +243 54 training_loop """owa""" +243 54 negative_sampler """basic""" +243 54 evaluator """rankbased""" +243 55 dataset """kinships""" +243 55 model """ermlp""" +243 55 loss """marginranking""" +243 55 regularizer """no""" +243 55 optimizer """adadelta""" +243 55 training_loop """owa""" +243 55 negative_sampler """basic""" +243 55 evaluator """rankbased""" +243 56 dataset """kinships""" +243 56 model """ermlp""" +243 56 loss """marginranking""" +243 56 regularizer """no""" +243 56 optimizer """adadelta""" +243 56 training_loop """owa""" +243 56 negative_sampler """basic""" +243 56 evaluator """rankbased""" +243 57 dataset """kinships""" +243 57 model """ermlp""" +243 57 loss """marginranking""" +243 57 regularizer """no""" +243 57 optimizer """adadelta""" +243 57 training_loop """owa""" +243 57 negative_sampler """basic""" +243 57 evaluator """rankbased""" +243 58 dataset """kinships""" +243 58 model """ermlp""" +243 58 loss """marginranking""" +243 58 regularizer """no""" +243 58 optimizer """adadelta""" +243 58 training_loop """owa""" +243 58 negative_sampler """basic""" +243 58 evaluator """rankbased""" +243 59 dataset """kinships""" +243 59 model """ermlp""" +243 59 loss """marginranking""" +243 59 regularizer """no""" +243 59 optimizer """adadelta""" +243 59 training_loop """owa""" +243 59 negative_sampler """basic""" +243 59 evaluator """rankbased""" +243 60 dataset """kinships""" +243 60 model """ermlp""" +243 60 loss """marginranking""" +243 60 regularizer """no""" +243 60 optimizer """adadelta""" +243 60 training_loop """owa""" +243 60 negative_sampler """basic""" +243 60 evaluator """rankbased""" +243 61 dataset """kinships""" +243 61 model """ermlp""" +243 61 loss """marginranking""" +243 61 regularizer """no""" +243 61 optimizer """adadelta""" +243 61 training_loop """owa""" +243 61 negative_sampler """basic""" +243 61 evaluator """rankbased""" +243 62 dataset """kinships""" +243 62 model """ermlp""" +243 62 loss """marginranking""" +243 62 regularizer """no""" +243 62 optimizer """adadelta""" +243 62 training_loop """owa""" +243 62 negative_sampler """basic""" +243 62 evaluator """rankbased""" +243 63 dataset """kinships""" +243 63 model """ermlp""" +243 63 loss """marginranking""" +243 63 regularizer """no""" +243 63 optimizer """adadelta""" +243 63 training_loop """owa""" +243 63 negative_sampler """basic""" +243 63 evaluator """rankbased""" +243 64 dataset """kinships""" +243 64 model """ermlp""" +243 64 loss """marginranking""" +243 64 regularizer """no""" +243 64 optimizer """adadelta""" +243 64 training_loop """owa""" +243 64 negative_sampler """basic""" +243 64 evaluator """rankbased""" +243 65 dataset """kinships""" +243 65 model """ermlp""" +243 65 loss """marginranking""" +243 65 regularizer """no""" +243 65 optimizer """adadelta""" +243 65 training_loop """owa""" +243 65 negative_sampler """basic""" +243 65 evaluator """rankbased""" +243 66 dataset """kinships""" +243 66 model """ermlp""" +243 66 loss """marginranking""" +243 66 regularizer """no""" +243 66 optimizer """adadelta""" +243 66 training_loop """owa""" +243 66 negative_sampler """basic""" +243 66 evaluator """rankbased""" +243 67 dataset """kinships""" +243 67 model """ermlp""" +243 67 loss """marginranking""" +243 67 regularizer """no""" +243 67 optimizer """adadelta""" +243 67 training_loop """owa""" +243 67 negative_sampler """basic""" +243 67 evaluator """rankbased""" +243 68 dataset """kinships""" +243 68 model """ermlp""" +243 68 loss """marginranking""" +243 68 regularizer """no""" +243 68 optimizer """adadelta""" +243 68 training_loop """owa""" +243 68 negative_sampler """basic""" +243 68 evaluator """rankbased""" +243 69 dataset """kinships""" +243 69 model """ermlp""" +243 69 loss """marginranking""" +243 69 regularizer """no""" +243 69 optimizer """adadelta""" +243 69 training_loop """owa""" +243 69 negative_sampler """basic""" +243 69 evaluator """rankbased""" +243 70 dataset """kinships""" +243 70 model """ermlp""" +243 70 loss """marginranking""" +243 70 regularizer """no""" +243 70 optimizer """adadelta""" +243 70 training_loop """owa""" +243 70 negative_sampler """basic""" +243 70 evaluator """rankbased""" +243 71 dataset """kinships""" +243 71 model """ermlp""" +243 71 loss """marginranking""" +243 71 regularizer """no""" +243 71 optimizer """adadelta""" +243 71 training_loop """owa""" +243 71 negative_sampler """basic""" +243 71 evaluator """rankbased""" +243 72 dataset """kinships""" +243 72 model """ermlp""" +243 72 loss """marginranking""" +243 72 regularizer """no""" +243 72 optimizer """adadelta""" +243 72 training_loop """owa""" +243 72 negative_sampler """basic""" +243 72 evaluator """rankbased""" +243 73 dataset """kinships""" +243 73 model """ermlp""" +243 73 loss """marginranking""" +243 73 regularizer """no""" +243 73 optimizer """adadelta""" +243 73 training_loop """owa""" +243 73 negative_sampler """basic""" +243 73 evaluator """rankbased""" +243 74 dataset """kinships""" +243 74 model """ermlp""" +243 74 loss """marginranking""" +243 74 regularizer """no""" +243 74 optimizer """adadelta""" +243 74 training_loop """owa""" +243 74 negative_sampler """basic""" +243 74 evaluator """rankbased""" +243 75 dataset """kinships""" +243 75 model """ermlp""" +243 75 loss """marginranking""" +243 75 regularizer """no""" +243 75 optimizer """adadelta""" +243 75 training_loop """owa""" +243 75 negative_sampler """basic""" +243 75 evaluator """rankbased""" +243 76 dataset """kinships""" +243 76 model """ermlp""" +243 76 loss """marginranking""" +243 76 regularizer """no""" +243 76 optimizer """adadelta""" +243 76 training_loop """owa""" +243 76 negative_sampler """basic""" +243 76 evaluator """rankbased""" +243 77 dataset """kinships""" +243 77 model """ermlp""" +243 77 loss """marginranking""" +243 77 regularizer """no""" +243 77 optimizer """adadelta""" +243 77 training_loop """owa""" +243 77 negative_sampler """basic""" +243 77 evaluator """rankbased""" +243 78 dataset """kinships""" +243 78 model """ermlp""" +243 78 loss """marginranking""" +243 78 regularizer """no""" +243 78 optimizer """adadelta""" +243 78 training_loop """owa""" +243 78 negative_sampler """basic""" +243 78 evaluator """rankbased""" +243 79 dataset """kinships""" +243 79 model """ermlp""" +243 79 loss """marginranking""" +243 79 regularizer """no""" +243 79 optimizer """adadelta""" +243 79 training_loop """owa""" +243 79 negative_sampler """basic""" +243 79 evaluator """rankbased""" +243 80 dataset """kinships""" +243 80 model """ermlp""" +243 80 loss """marginranking""" +243 80 regularizer """no""" +243 80 optimizer """adadelta""" +243 80 training_loop """owa""" +243 80 negative_sampler """basic""" +243 80 evaluator """rankbased""" +243 81 dataset """kinships""" +243 81 model """ermlp""" +243 81 loss """marginranking""" +243 81 regularizer """no""" +243 81 optimizer """adadelta""" +243 81 training_loop """owa""" +243 81 negative_sampler """basic""" +243 81 evaluator """rankbased""" +243 82 dataset """kinships""" +243 82 model """ermlp""" +243 82 loss """marginranking""" +243 82 regularizer """no""" +243 82 optimizer """adadelta""" +243 82 training_loop """owa""" +243 82 negative_sampler """basic""" +243 82 evaluator """rankbased""" +243 83 dataset """kinships""" +243 83 model """ermlp""" +243 83 loss """marginranking""" +243 83 regularizer """no""" +243 83 optimizer """adadelta""" +243 83 training_loop """owa""" +243 83 negative_sampler """basic""" +243 83 evaluator """rankbased""" +243 84 dataset """kinships""" +243 84 model """ermlp""" +243 84 loss """marginranking""" +243 84 regularizer """no""" +243 84 optimizer """adadelta""" +243 84 training_loop """owa""" +243 84 negative_sampler """basic""" +243 84 evaluator """rankbased""" +243 85 dataset """kinships""" +243 85 model """ermlp""" +243 85 loss """marginranking""" +243 85 regularizer """no""" +243 85 optimizer """adadelta""" +243 85 training_loop """owa""" +243 85 negative_sampler """basic""" +243 85 evaluator """rankbased""" +243 86 dataset """kinships""" +243 86 model """ermlp""" +243 86 loss """marginranking""" +243 86 regularizer """no""" +243 86 optimizer """adadelta""" +243 86 training_loop """owa""" +243 86 negative_sampler """basic""" +243 86 evaluator """rankbased""" +243 87 dataset """kinships""" +243 87 model """ermlp""" +243 87 loss """marginranking""" +243 87 regularizer """no""" +243 87 optimizer """adadelta""" +243 87 training_loop """owa""" +243 87 negative_sampler """basic""" +243 87 evaluator """rankbased""" +243 88 dataset """kinships""" +243 88 model """ermlp""" +243 88 loss """marginranking""" +243 88 regularizer """no""" +243 88 optimizer """adadelta""" +243 88 training_loop """owa""" +243 88 negative_sampler """basic""" +243 88 evaluator """rankbased""" +243 89 dataset """kinships""" +243 89 model """ermlp""" +243 89 loss """marginranking""" +243 89 regularizer """no""" +243 89 optimizer """adadelta""" +243 89 training_loop """owa""" +243 89 negative_sampler """basic""" +243 89 evaluator """rankbased""" +243 90 dataset """kinships""" +243 90 model """ermlp""" +243 90 loss """marginranking""" +243 90 regularizer """no""" +243 90 optimizer """adadelta""" +243 90 training_loop """owa""" +243 90 negative_sampler """basic""" +243 90 evaluator """rankbased""" +243 91 dataset """kinships""" +243 91 model """ermlp""" +243 91 loss """marginranking""" +243 91 regularizer """no""" +243 91 optimizer """adadelta""" +243 91 training_loop """owa""" +243 91 negative_sampler """basic""" +243 91 evaluator """rankbased""" +243 92 dataset """kinships""" +243 92 model """ermlp""" +243 92 loss """marginranking""" +243 92 regularizer """no""" +243 92 optimizer """adadelta""" +243 92 training_loop """owa""" +243 92 negative_sampler """basic""" +243 92 evaluator """rankbased""" +243 93 dataset """kinships""" +243 93 model """ermlp""" +243 93 loss """marginranking""" +243 93 regularizer """no""" +243 93 optimizer """adadelta""" +243 93 training_loop """owa""" +243 93 negative_sampler """basic""" +243 93 evaluator """rankbased""" +243 94 dataset """kinships""" +243 94 model """ermlp""" +243 94 loss """marginranking""" +243 94 regularizer """no""" +243 94 optimizer """adadelta""" +243 94 training_loop """owa""" +243 94 negative_sampler """basic""" +243 94 evaluator """rankbased""" +243 95 dataset """kinships""" +243 95 model """ermlp""" +243 95 loss """marginranking""" +243 95 regularizer """no""" +243 95 optimizer """adadelta""" +243 95 training_loop """owa""" +243 95 negative_sampler """basic""" +243 95 evaluator """rankbased""" +243 96 dataset """kinships""" +243 96 model """ermlp""" +243 96 loss """marginranking""" +243 96 regularizer """no""" +243 96 optimizer """adadelta""" +243 96 training_loop """owa""" +243 96 negative_sampler """basic""" +243 96 evaluator """rankbased""" +243 97 dataset """kinships""" +243 97 model """ermlp""" +243 97 loss """marginranking""" +243 97 regularizer """no""" +243 97 optimizer """adadelta""" +243 97 training_loop """owa""" +243 97 negative_sampler """basic""" +243 97 evaluator """rankbased""" +243 98 dataset """kinships""" +243 98 model """ermlp""" +243 98 loss """marginranking""" +243 98 regularizer """no""" +243 98 optimizer """adadelta""" +243 98 training_loop """owa""" +243 98 negative_sampler """basic""" +243 98 evaluator """rankbased""" +243 99 dataset """kinships""" +243 99 model """ermlp""" +243 99 loss """marginranking""" +243 99 regularizer """no""" +243 99 optimizer """adadelta""" +243 99 training_loop """owa""" +243 99 negative_sampler """basic""" +243 99 evaluator """rankbased""" +243 100 dataset """kinships""" +243 100 model """ermlp""" +243 100 loss """marginranking""" +243 100 regularizer """no""" +243 100 optimizer """adadelta""" +243 100 training_loop """owa""" +243 100 negative_sampler """basic""" +243 100 evaluator """rankbased""" +244 1 model.embedding_dim 1.0 +244 1 loss.margin 25.32648427975583 +244 1 loss.adversarial_temperature 0.15819285056742383 +244 1 negative_sampler.num_negs_per_pos 9.0 +244 1 training.batch_size 1.0 +244 2 model.embedding_dim 2.0 +244 2 loss.margin 18.274225787646714 +244 2 loss.adversarial_temperature 0.32552228610369516 +244 2 negative_sampler.num_negs_per_pos 88.0 +244 2 training.batch_size 1.0 +244 3 model.embedding_dim 1.0 +244 3 loss.margin 18.79466754265871 +244 3 loss.adversarial_temperature 0.5594310791713824 +244 3 negative_sampler.num_negs_per_pos 61.0 +244 3 training.batch_size 1.0 +244 4 model.embedding_dim 0.0 +244 4 loss.margin 15.729265971196318 +244 4 loss.adversarial_temperature 0.2128593478872064 +244 4 negative_sampler.num_negs_per_pos 51.0 +244 4 training.batch_size 1.0 +244 5 model.embedding_dim 1.0 +244 5 loss.margin 2.249246478320588 +244 5 loss.adversarial_temperature 0.15709797498246422 +244 5 negative_sampler.num_negs_per_pos 54.0 +244 5 training.batch_size 0.0 +244 6 model.embedding_dim 1.0 +244 6 loss.margin 26.281524062871352 +244 6 loss.adversarial_temperature 0.9654245175053897 +244 6 negative_sampler.num_negs_per_pos 50.0 +244 6 training.batch_size 1.0 +244 7 model.embedding_dim 1.0 +244 7 loss.margin 22.202605395775603 +244 7 loss.adversarial_temperature 0.33790091437745173 +244 7 negative_sampler.num_negs_per_pos 36.0 +244 7 training.batch_size 1.0 +244 8 model.embedding_dim 2.0 +244 8 loss.margin 9.465214113711289 +244 8 loss.adversarial_temperature 0.6403329821339707 +244 8 negative_sampler.num_negs_per_pos 64.0 +244 8 training.batch_size 0.0 +244 9 model.embedding_dim 1.0 +244 9 loss.margin 3.0436317190822835 +244 9 loss.adversarial_temperature 0.7836269456717377 +244 9 negative_sampler.num_negs_per_pos 49.0 +244 9 training.batch_size 2.0 +244 10 model.embedding_dim 1.0 +244 10 loss.margin 22.269099448713618 +244 10 loss.adversarial_temperature 0.1368031669910648 +244 10 negative_sampler.num_negs_per_pos 70.0 +244 10 training.batch_size 1.0 +244 11 model.embedding_dim 2.0 +244 11 loss.margin 23.339911047117024 +244 11 loss.adversarial_temperature 0.674870868055921 +244 11 negative_sampler.num_negs_per_pos 32.0 +244 11 training.batch_size 0.0 +244 12 model.embedding_dim 2.0 +244 12 loss.margin 6.463044477609704 +244 12 loss.adversarial_temperature 0.48329684743440815 +244 12 negative_sampler.num_negs_per_pos 26.0 +244 12 training.batch_size 2.0 +244 13 model.embedding_dim 1.0 +244 13 loss.margin 7.142372491071653 +244 13 loss.adversarial_temperature 0.9386419652030228 +244 13 negative_sampler.num_negs_per_pos 56.0 +244 13 training.batch_size 2.0 +244 14 model.embedding_dim 0.0 +244 14 loss.margin 13.697605508411248 +244 14 loss.adversarial_temperature 0.32541797261667826 +244 14 negative_sampler.num_negs_per_pos 16.0 +244 14 training.batch_size 0.0 +244 15 model.embedding_dim 1.0 +244 15 loss.margin 3.5505841647743783 +244 15 loss.adversarial_temperature 0.3630747659031952 +244 15 negative_sampler.num_negs_per_pos 40.0 +244 15 training.batch_size 0.0 +244 16 model.embedding_dim 1.0 +244 16 loss.margin 19.10113610969857 +244 16 loss.adversarial_temperature 0.2642288938005775 +244 16 negative_sampler.num_negs_per_pos 77.0 +244 16 training.batch_size 1.0 +244 17 model.embedding_dim 1.0 +244 17 loss.margin 12.874222243166916 +244 17 loss.adversarial_temperature 0.4900321282533614 +244 17 negative_sampler.num_negs_per_pos 25.0 +244 17 training.batch_size 1.0 +244 18 model.embedding_dim 0.0 +244 18 loss.margin 26.981346493152902 +244 18 loss.adversarial_temperature 0.8453681343421199 +244 18 negative_sampler.num_negs_per_pos 93.0 +244 18 training.batch_size 1.0 +244 19 model.embedding_dim 2.0 +244 19 loss.margin 15.479510004902108 +244 19 loss.adversarial_temperature 0.3765612765863958 +244 19 negative_sampler.num_negs_per_pos 9.0 +244 19 training.batch_size 0.0 +244 20 model.embedding_dim 1.0 +244 20 loss.margin 9.935542310084557 +244 20 loss.adversarial_temperature 0.7154345698688261 +244 20 negative_sampler.num_negs_per_pos 62.0 +244 20 training.batch_size 1.0 +244 21 model.embedding_dim 2.0 +244 21 loss.margin 17.517516550067818 +244 21 loss.adversarial_temperature 0.7542038356518014 +244 21 negative_sampler.num_negs_per_pos 22.0 +244 21 training.batch_size 1.0 +244 22 model.embedding_dim 2.0 +244 22 loss.margin 29.276623891898428 +244 22 loss.adversarial_temperature 0.24022692052083294 +244 22 negative_sampler.num_negs_per_pos 2.0 +244 22 training.batch_size 0.0 +244 23 model.embedding_dim 1.0 +244 23 loss.margin 22.990857883294257 +244 23 loss.adversarial_temperature 0.8645324906828318 +244 23 negative_sampler.num_negs_per_pos 73.0 +244 23 training.batch_size 1.0 +244 24 model.embedding_dim 2.0 +244 24 loss.margin 7.446681172702132 +244 24 loss.adversarial_temperature 0.32914096696137085 +244 24 negative_sampler.num_negs_per_pos 79.0 +244 24 training.batch_size 2.0 +244 25 model.embedding_dim 2.0 +244 25 loss.margin 22.5297047840636 +244 25 loss.adversarial_temperature 0.7884259346377306 +244 25 negative_sampler.num_negs_per_pos 95.0 +244 25 training.batch_size 0.0 +244 26 model.embedding_dim 1.0 +244 26 loss.margin 8.581309754177562 +244 26 loss.adversarial_temperature 0.7441773526320924 +244 26 negative_sampler.num_negs_per_pos 77.0 +244 26 training.batch_size 1.0 +244 27 model.embedding_dim 2.0 +244 27 loss.margin 5.334627440850074 +244 27 loss.adversarial_temperature 0.6778848026399397 +244 27 negative_sampler.num_negs_per_pos 47.0 +244 27 training.batch_size 2.0 +244 28 model.embedding_dim 0.0 +244 28 loss.margin 18.744804770907248 +244 28 loss.adversarial_temperature 0.9211865257726319 +244 28 negative_sampler.num_negs_per_pos 51.0 +244 28 training.batch_size 2.0 +244 29 model.embedding_dim 0.0 +244 29 loss.margin 25.426752237486564 +244 29 loss.adversarial_temperature 0.3195693956465898 +244 29 negative_sampler.num_negs_per_pos 53.0 +244 29 training.batch_size 2.0 +244 30 model.embedding_dim 1.0 +244 30 loss.margin 3.0043536855265813 +244 30 loss.adversarial_temperature 0.29213897831028557 +244 30 negative_sampler.num_negs_per_pos 66.0 +244 30 training.batch_size 1.0 +244 31 model.embedding_dim 2.0 +244 31 loss.margin 23.99711058827023 +244 31 loss.adversarial_temperature 0.14018253883404594 +244 31 negative_sampler.num_negs_per_pos 67.0 +244 31 training.batch_size 2.0 +244 32 model.embedding_dim 1.0 +244 32 loss.margin 1.7382639642205944 +244 32 loss.adversarial_temperature 0.26814115883021317 +244 32 negative_sampler.num_negs_per_pos 35.0 +244 32 training.batch_size 0.0 +244 33 model.embedding_dim 0.0 +244 33 loss.margin 10.375488780917383 +244 33 loss.adversarial_temperature 0.24835046972013336 +244 33 negative_sampler.num_negs_per_pos 23.0 +244 33 training.batch_size 0.0 +244 34 model.embedding_dim 1.0 +244 34 loss.margin 22.683631184941014 +244 34 loss.adversarial_temperature 0.38466316466919215 +244 34 negative_sampler.num_negs_per_pos 3.0 +244 34 training.batch_size 0.0 +244 35 model.embedding_dim 0.0 +244 35 loss.margin 17.57592639365889 +244 35 loss.adversarial_temperature 0.9725302521506128 +244 35 negative_sampler.num_negs_per_pos 61.0 +244 35 training.batch_size 2.0 +244 36 model.embedding_dim 2.0 +244 36 loss.margin 12.420308398295662 +244 36 loss.adversarial_temperature 0.8428680329345372 +244 36 negative_sampler.num_negs_per_pos 54.0 +244 36 training.batch_size 2.0 +244 37 model.embedding_dim 0.0 +244 37 loss.margin 26.734832677876497 +244 37 loss.adversarial_temperature 0.4119852712376325 +244 37 negative_sampler.num_negs_per_pos 78.0 +244 37 training.batch_size 0.0 +244 38 model.embedding_dim 1.0 +244 38 loss.margin 8.814415884897661 +244 38 loss.adversarial_temperature 0.5611065152756166 +244 38 negative_sampler.num_negs_per_pos 50.0 +244 38 training.batch_size 0.0 +244 39 model.embedding_dim 1.0 +244 39 loss.margin 8.25429883938563 +244 39 loss.adversarial_temperature 0.7191624275427408 +244 39 negative_sampler.num_negs_per_pos 47.0 +244 39 training.batch_size 2.0 +244 40 model.embedding_dim 2.0 +244 40 loss.margin 12.943690557135982 +244 40 loss.adversarial_temperature 0.9353088505384819 +244 40 negative_sampler.num_negs_per_pos 24.0 +244 40 training.batch_size 1.0 +244 41 model.embedding_dim 2.0 +244 41 loss.margin 21.341588177845086 +244 41 loss.adversarial_temperature 0.9800725760630831 +244 41 negative_sampler.num_negs_per_pos 88.0 +244 41 training.batch_size 1.0 +244 42 model.embedding_dim 0.0 +244 42 loss.margin 5.6059113690434 +244 42 loss.adversarial_temperature 0.17087192979547258 +244 42 negative_sampler.num_negs_per_pos 64.0 +244 42 training.batch_size 1.0 +244 43 model.embedding_dim 2.0 +244 43 loss.margin 21.246127365063995 +244 43 loss.adversarial_temperature 0.12741450050995873 +244 43 negative_sampler.num_negs_per_pos 85.0 +244 43 training.batch_size 1.0 +244 44 model.embedding_dim 2.0 +244 44 loss.margin 16.128217528009905 +244 44 loss.adversarial_temperature 0.9314851706525511 +244 44 negative_sampler.num_negs_per_pos 65.0 +244 44 training.batch_size 2.0 +244 45 model.embedding_dim 0.0 +244 45 loss.margin 5.39920292695411 +244 45 loss.adversarial_temperature 0.608778400002338 +244 45 negative_sampler.num_negs_per_pos 75.0 +244 45 training.batch_size 1.0 +244 46 model.embedding_dim 1.0 +244 46 loss.margin 9.183056605714428 +244 46 loss.adversarial_temperature 0.5931381496509445 +244 46 negative_sampler.num_negs_per_pos 6.0 +244 46 training.batch_size 1.0 +244 47 model.embedding_dim 1.0 +244 47 loss.margin 4.625442557011105 +244 47 loss.adversarial_temperature 0.6535445149312235 +244 47 negative_sampler.num_negs_per_pos 80.0 +244 47 training.batch_size 1.0 +244 48 model.embedding_dim 1.0 +244 48 loss.margin 10.36981722660503 +244 48 loss.adversarial_temperature 0.4949689805309987 +244 48 negative_sampler.num_negs_per_pos 39.0 +244 48 training.batch_size 1.0 +244 49 model.embedding_dim 1.0 +244 49 loss.margin 9.654993161328118 +244 49 loss.adversarial_temperature 0.10882110525665403 +244 49 negative_sampler.num_negs_per_pos 83.0 +244 49 training.batch_size 2.0 +244 50 model.embedding_dim 0.0 +244 50 loss.margin 15.210172053071089 +244 50 loss.adversarial_temperature 0.7952570442419503 +244 50 negative_sampler.num_negs_per_pos 55.0 +244 50 training.batch_size 0.0 +244 51 model.embedding_dim 2.0 +244 51 loss.margin 14.067767997558562 +244 51 loss.adversarial_temperature 0.9179928959883709 +244 51 negative_sampler.num_negs_per_pos 74.0 +244 51 training.batch_size 0.0 +244 52 model.embedding_dim 1.0 +244 52 loss.margin 17.874179514369953 +244 52 loss.adversarial_temperature 0.2001990358721788 +244 52 negative_sampler.num_negs_per_pos 63.0 +244 52 training.batch_size 0.0 +244 53 model.embedding_dim 2.0 +244 53 loss.margin 28.341796147927823 +244 53 loss.adversarial_temperature 0.9866220904578431 +244 53 negative_sampler.num_negs_per_pos 83.0 +244 53 training.batch_size 2.0 +244 54 model.embedding_dim 2.0 +244 54 loss.margin 13.224491712506104 +244 54 loss.adversarial_temperature 0.6599403127937872 +244 54 negative_sampler.num_negs_per_pos 2.0 +244 54 training.batch_size 2.0 +244 55 model.embedding_dim 1.0 +244 55 loss.margin 17.960230144355215 +244 55 loss.adversarial_temperature 0.7135210946432827 +244 55 negative_sampler.num_negs_per_pos 16.0 +244 55 training.batch_size 1.0 +244 56 model.embedding_dim 2.0 +244 56 loss.margin 11.436174458174989 +244 56 loss.adversarial_temperature 0.9158101431073696 +244 56 negative_sampler.num_negs_per_pos 45.0 +244 56 training.batch_size 0.0 +244 57 model.embedding_dim 2.0 +244 57 loss.margin 2.3915227506856347 +244 57 loss.adversarial_temperature 0.9925373080224215 +244 57 negative_sampler.num_negs_per_pos 96.0 +244 57 training.batch_size 1.0 +244 58 model.embedding_dim 0.0 +244 58 loss.margin 22.566094300120152 +244 58 loss.adversarial_temperature 0.4945388881282282 +244 58 negative_sampler.num_negs_per_pos 91.0 +244 58 training.batch_size 2.0 +244 59 model.embedding_dim 2.0 +244 59 loss.margin 13.759929708862117 +244 59 loss.adversarial_temperature 0.38847470351343294 +244 59 negative_sampler.num_negs_per_pos 77.0 +244 59 training.batch_size 2.0 +244 60 model.embedding_dim 2.0 +244 60 loss.margin 25.328514356716425 +244 60 loss.adversarial_temperature 0.18959520020053758 +244 60 negative_sampler.num_negs_per_pos 82.0 +244 60 training.batch_size 0.0 +244 61 model.embedding_dim 0.0 +244 61 loss.margin 28.177852597059708 +244 61 loss.adversarial_temperature 0.7686273288786394 +244 61 negative_sampler.num_negs_per_pos 93.0 +244 61 training.batch_size 0.0 +244 62 model.embedding_dim 2.0 +244 62 loss.margin 25.768915367451374 +244 62 loss.adversarial_temperature 0.7789302372828774 +244 62 negative_sampler.num_negs_per_pos 74.0 +244 62 training.batch_size 0.0 +244 63 model.embedding_dim 1.0 +244 63 loss.margin 20.442035802726437 +244 63 loss.adversarial_temperature 0.9472526872935775 +244 63 negative_sampler.num_negs_per_pos 72.0 +244 63 training.batch_size 1.0 +244 64 model.embedding_dim 0.0 +244 64 loss.margin 8.10242638821686 +244 64 loss.adversarial_temperature 0.8296055819626157 +244 64 negative_sampler.num_negs_per_pos 97.0 +244 64 training.batch_size 2.0 +244 65 model.embedding_dim 0.0 +244 65 loss.margin 24.882259037157663 +244 65 loss.adversarial_temperature 0.8702902785766682 +244 65 negative_sampler.num_negs_per_pos 65.0 +244 65 training.batch_size 1.0 +244 66 model.embedding_dim 1.0 +244 66 loss.margin 18.06313590786329 +244 66 loss.adversarial_temperature 0.31425318530183055 +244 66 negative_sampler.num_negs_per_pos 22.0 +244 66 training.batch_size 2.0 +244 67 model.embedding_dim 2.0 +244 67 loss.margin 28.067257931071794 +244 67 loss.adversarial_temperature 0.194729674078017 +244 67 negative_sampler.num_negs_per_pos 22.0 +244 67 training.batch_size 0.0 +244 68 model.embedding_dim 0.0 +244 68 loss.margin 14.245893680530195 +244 68 loss.adversarial_temperature 0.6997661306439352 +244 68 negative_sampler.num_negs_per_pos 28.0 +244 68 training.batch_size 2.0 +244 69 model.embedding_dim 2.0 +244 69 loss.margin 12.377322407783877 +244 69 loss.adversarial_temperature 0.46128992527896184 +244 69 negative_sampler.num_negs_per_pos 10.0 +244 69 training.batch_size 2.0 +244 70 model.embedding_dim 1.0 +244 70 loss.margin 7.8642917968719095 +244 70 loss.adversarial_temperature 0.47327409031502865 +244 70 negative_sampler.num_negs_per_pos 28.0 +244 70 training.batch_size 2.0 +244 71 model.embedding_dim 1.0 +244 71 loss.margin 5.915052377794111 +244 71 loss.adversarial_temperature 0.9418869797224322 +244 71 negative_sampler.num_negs_per_pos 60.0 +244 71 training.batch_size 2.0 +244 72 model.embedding_dim 2.0 +244 72 loss.margin 17.53689449398644 +244 72 loss.adversarial_temperature 0.7923629728626325 +244 72 negative_sampler.num_negs_per_pos 49.0 +244 72 training.batch_size 0.0 +244 73 model.embedding_dim 0.0 +244 73 loss.margin 24.68145551755906 +244 73 loss.adversarial_temperature 0.5294800674522381 +244 73 negative_sampler.num_negs_per_pos 23.0 +244 73 training.batch_size 0.0 +244 74 model.embedding_dim 0.0 +244 74 loss.margin 28.226841569780202 +244 74 loss.adversarial_temperature 0.6423894946323556 +244 74 negative_sampler.num_negs_per_pos 75.0 +244 74 training.batch_size 2.0 +244 75 model.embedding_dim 2.0 +244 75 loss.margin 22.16977367396963 +244 75 loss.adversarial_temperature 0.10331506945899549 +244 75 negative_sampler.num_negs_per_pos 88.0 +244 75 training.batch_size 1.0 +244 76 model.embedding_dim 0.0 +244 76 loss.margin 28.76042996089729 +244 76 loss.adversarial_temperature 0.16227882649438136 +244 76 negative_sampler.num_negs_per_pos 24.0 +244 76 training.batch_size 2.0 +244 77 model.embedding_dim 1.0 +244 77 loss.margin 28.31344820308727 +244 77 loss.adversarial_temperature 0.39044677661417726 +244 77 negative_sampler.num_negs_per_pos 70.0 +244 77 training.batch_size 0.0 +244 78 model.embedding_dim 0.0 +244 78 loss.margin 27.83428791748264 +244 78 loss.adversarial_temperature 0.8475086734650349 +244 78 negative_sampler.num_negs_per_pos 80.0 +244 78 training.batch_size 1.0 +244 79 model.embedding_dim 1.0 +244 79 loss.margin 27.296544833079682 +244 79 loss.adversarial_temperature 0.24066001109391164 +244 79 negative_sampler.num_negs_per_pos 99.0 +244 79 training.batch_size 0.0 +244 80 model.embedding_dim 0.0 +244 80 loss.margin 27.19113910910345 +244 80 loss.adversarial_temperature 0.37768694793243907 +244 80 negative_sampler.num_negs_per_pos 10.0 +244 80 training.batch_size 0.0 +244 81 model.embedding_dim 1.0 +244 81 loss.margin 19.58627229727362 +244 81 loss.adversarial_temperature 0.5883792007446903 +244 81 negative_sampler.num_negs_per_pos 39.0 +244 81 training.batch_size 2.0 +244 82 model.embedding_dim 2.0 +244 82 loss.margin 23.820022167333576 +244 82 loss.adversarial_temperature 0.44476911262758023 +244 82 negative_sampler.num_negs_per_pos 44.0 +244 82 training.batch_size 2.0 +244 83 model.embedding_dim 0.0 +244 83 loss.margin 3.3071642204624814 +244 83 loss.adversarial_temperature 0.6018640737201753 +244 83 negative_sampler.num_negs_per_pos 72.0 +244 83 training.batch_size 1.0 +244 84 model.embedding_dim 0.0 +244 84 loss.margin 26.49970515864087 +244 84 loss.adversarial_temperature 0.2767278800866288 +244 84 negative_sampler.num_negs_per_pos 38.0 +244 84 training.batch_size 2.0 +244 85 model.embedding_dim 1.0 +244 85 loss.margin 3.7796548039245588 +244 85 loss.adversarial_temperature 0.5410663273102309 +244 85 negative_sampler.num_negs_per_pos 21.0 +244 85 training.batch_size 2.0 +244 86 model.embedding_dim 0.0 +244 86 loss.margin 6.284718504175921 +244 86 loss.adversarial_temperature 0.7970512887136941 +244 86 negative_sampler.num_negs_per_pos 79.0 +244 86 training.batch_size 0.0 +244 87 model.embedding_dim 2.0 +244 87 loss.margin 29.562032729924518 +244 87 loss.adversarial_temperature 0.593978500732772 +244 87 negative_sampler.num_negs_per_pos 65.0 +244 87 training.batch_size 2.0 +244 88 model.embedding_dim 1.0 +244 88 loss.margin 15.69126209970094 +244 88 loss.adversarial_temperature 0.4550480848791544 +244 88 negative_sampler.num_negs_per_pos 11.0 +244 88 training.batch_size 2.0 +244 89 model.embedding_dim 1.0 +244 89 loss.margin 8.345887262854257 +244 89 loss.adversarial_temperature 0.336196410308342 +244 89 negative_sampler.num_negs_per_pos 17.0 +244 89 training.batch_size 2.0 +244 90 model.embedding_dim 2.0 +244 90 loss.margin 10.447294837612212 +244 90 loss.adversarial_temperature 0.820206822079362 +244 90 negative_sampler.num_negs_per_pos 66.0 +244 90 training.batch_size 1.0 +244 91 model.embedding_dim 0.0 +244 91 loss.margin 8.110819778280598 +244 91 loss.adversarial_temperature 0.1266805441586063 +244 91 negative_sampler.num_negs_per_pos 89.0 +244 91 training.batch_size 1.0 +244 92 model.embedding_dim 2.0 +244 92 loss.margin 13.61446892413616 +244 92 loss.adversarial_temperature 0.49795540208363354 +244 92 negative_sampler.num_negs_per_pos 43.0 +244 92 training.batch_size 1.0 +244 93 model.embedding_dim 2.0 +244 93 loss.margin 16.75087141796979 +244 93 loss.adversarial_temperature 0.60549726685669 +244 93 negative_sampler.num_negs_per_pos 90.0 +244 93 training.batch_size 1.0 +244 94 model.embedding_dim 2.0 +244 94 loss.margin 9.73962297177313 +244 94 loss.adversarial_temperature 0.2253986335975282 +244 94 negative_sampler.num_negs_per_pos 77.0 +244 94 training.batch_size 1.0 +244 95 model.embedding_dim 0.0 +244 95 loss.margin 15.040997908883767 +244 95 loss.adversarial_temperature 0.9745232477043271 +244 95 negative_sampler.num_negs_per_pos 48.0 +244 95 training.batch_size 0.0 +244 96 model.embedding_dim 2.0 +244 96 loss.margin 9.740803087272681 +244 96 loss.adversarial_temperature 0.23365169841378908 +244 96 negative_sampler.num_negs_per_pos 74.0 +244 96 training.batch_size 1.0 +244 97 model.embedding_dim 1.0 +244 97 loss.margin 22.096418092010214 +244 97 loss.adversarial_temperature 0.670992137465976 +244 97 negative_sampler.num_negs_per_pos 33.0 +244 97 training.batch_size 2.0 +244 98 model.embedding_dim 0.0 +244 98 loss.margin 6.485569606281726 +244 98 loss.adversarial_temperature 0.8614417465380996 +244 98 negative_sampler.num_negs_per_pos 15.0 +244 98 training.batch_size 2.0 +244 99 model.embedding_dim 0.0 +244 99 loss.margin 1.3554738828174107 +244 99 loss.adversarial_temperature 0.5756960826818831 +244 99 negative_sampler.num_negs_per_pos 74.0 +244 99 training.batch_size 1.0 +244 100 model.embedding_dim 1.0 +244 100 loss.margin 9.725291868579419 +244 100 loss.adversarial_temperature 0.4444326210352243 +244 100 negative_sampler.num_negs_per_pos 53.0 +244 100 training.batch_size 0.0 +244 1 dataset """kinships""" +244 1 model """ermlp""" +244 1 loss """nssa""" +244 1 regularizer """no""" +244 1 optimizer """adadelta""" +244 1 training_loop """owa""" +244 1 negative_sampler """basic""" +244 1 evaluator """rankbased""" +244 2 dataset """kinships""" +244 2 model """ermlp""" +244 2 loss """nssa""" +244 2 regularizer """no""" +244 2 optimizer """adadelta""" +244 2 training_loop """owa""" +244 2 negative_sampler """basic""" +244 2 evaluator """rankbased""" +244 3 dataset """kinships""" +244 3 model """ermlp""" +244 3 loss """nssa""" +244 3 regularizer """no""" +244 3 optimizer """adadelta""" +244 3 training_loop """owa""" +244 3 negative_sampler """basic""" +244 3 evaluator """rankbased""" +244 4 dataset """kinships""" +244 4 model """ermlp""" +244 4 loss """nssa""" +244 4 regularizer """no""" +244 4 optimizer """adadelta""" +244 4 training_loop """owa""" +244 4 negative_sampler """basic""" +244 4 evaluator """rankbased""" +244 5 dataset """kinships""" +244 5 model """ermlp""" +244 5 loss """nssa""" +244 5 regularizer """no""" +244 5 optimizer """adadelta""" +244 5 training_loop """owa""" +244 5 negative_sampler """basic""" +244 5 evaluator """rankbased""" +244 6 dataset """kinships""" +244 6 model """ermlp""" +244 6 loss """nssa""" +244 6 regularizer """no""" +244 6 optimizer """adadelta""" +244 6 training_loop """owa""" +244 6 negative_sampler """basic""" +244 6 evaluator """rankbased""" +244 7 dataset """kinships""" +244 7 model """ermlp""" +244 7 loss """nssa""" +244 7 regularizer """no""" +244 7 optimizer """adadelta""" +244 7 training_loop """owa""" +244 7 negative_sampler """basic""" +244 7 evaluator """rankbased""" +244 8 dataset """kinships""" +244 8 model """ermlp""" +244 8 loss """nssa""" +244 8 regularizer """no""" +244 8 optimizer """adadelta""" +244 8 training_loop """owa""" +244 8 negative_sampler """basic""" +244 8 evaluator """rankbased""" +244 9 dataset """kinships""" +244 9 model """ermlp""" +244 9 loss """nssa""" +244 9 regularizer """no""" +244 9 optimizer """adadelta""" +244 9 training_loop """owa""" +244 9 negative_sampler """basic""" +244 9 evaluator """rankbased""" +244 10 dataset """kinships""" +244 10 model """ermlp""" +244 10 loss """nssa""" +244 10 regularizer """no""" +244 10 optimizer """adadelta""" +244 10 training_loop """owa""" +244 10 negative_sampler """basic""" +244 10 evaluator """rankbased""" +244 11 dataset """kinships""" +244 11 model """ermlp""" +244 11 loss """nssa""" +244 11 regularizer """no""" +244 11 optimizer """adadelta""" +244 11 training_loop """owa""" +244 11 negative_sampler """basic""" +244 11 evaluator """rankbased""" +244 12 dataset """kinships""" +244 12 model """ermlp""" +244 12 loss """nssa""" +244 12 regularizer """no""" +244 12 optimizer """adadelta""" +244 12 training_loop """owa""" +244 12 negative_sampler """basic""" +244 12 evaluator """rankbased""" +244 13 dataset """kinships""" +244 13 model """ermlp""" +244 13 loss """nssa""" +244 13 regularizer """no""" +244 13 optimizer """adadelta""" +244 13 training_loop """owa""" +244 13 negative_sampler """basic""" +244 13 evaluator """rankbased""" +244 14 dataset """kinships""" +244 14 model """ermlp""" +244 14 loss """nssa""" +244 14 regularizer """no""" +244 14 optimizer """adadelta""" +244 14 training_loop """owa""" +244 14 negative_sampler """basic""" +244 14 evaluator """rankbased""" +244 15 dataset """kinships""" +244 15 model """ermlp""" +244 15 loss """nssa""" +244 15 regularizer """no""" +244 15 optimizer """adadelta""" +244 15 training_loop """owa""" +244 15 negative_sampler """basic""" +244 15 evaluator """rankbased""" +244 16 dataset """kinships""" +244 16 model """ermlp""" +244 16 loss """nssa""" +244 16 regularizer """no""" +244 16 optimizer """adadelta""" +244 16 training_loop """owa""" +244 16 negative_sampler """basic""" +244 16 evaluator """rankbased""" +244 17 dataset """kinships""" +244 17 model """ermlp""" +244 17 loss """nssa""" +244 17 regularizer """no""" +244 17 optimizer """adadelta""" +244 17 training_loop """owa""" +244 17 negative_sampler """basic""" +244 17 evaluator """rankbased""" +244 18 dataset """kinships""" +244 18 model """ermlp""" +244 18 loss """nssa""" +244 18 regularizer """no""" +244 18 optimizer """adadelta""" +244 18 training_loop """owa""" +244 18 negative_sampler """basic""" +244 18 evaluator """rankbased""" +244 19 dataset """kinships""" +244 19 model """ermlp""" +244 19 loss """nssa""" +244 19 regularizer """no""" +244 19 optimizer """adadelta""" +244 19 training_loop """owa""" +244 19 negative_sampler """basic""" +244 19 evaluator """rankbased""" +244 20 dataset """kinships""" +244 20 model """ermlp""" +244 20 loss """nssa""" +244 20 regularizer """no""" +244 20 optimizer """adadelta""" +244 20 training_loop """owa""" +244 20 negative_sampler """basic""" +244 20 evaluator """rankbased""" +244 21 dataset """kinships""" +244 21 model """ermlp""" +244 21 loss """nssa""" +244 21 regularizer """no""" +244 21 optimizer """adadelta""" +244 21 training_loop """owa""" +244 21 negative_sampler """basic""" +244 21 evaluator """rankbased""" +244 22 dataset """kinships""" +244 22 model """ermlp""" +244 22 loss """nssa""" +244 22 regularizer """no""" +244 22 optimizer """adadelta""" +244 22 training_loop """owa""" +244 22 negative_sampler """basic""" +244 22 evaluator """rankbased""" +244 23 dataset """kinships""" +244 23 model """ermlp""" +244 23 loss """nssa""" +244 23 regularizer """no""" +244 23 optimizer """adadelta""" +244 23 training_loop """owa""" +244 23 negative_sampler """basic""" +244 23 evaluator """rankbased""" +244 24 dataset """kinships""" +244 24 model """ermlp""" +244 24 loss """nssa""" +244 24 regularizer """no""" +244 24 optimizer """adadelta""" +244 24 training_loop """owa""" +244 24 negative_sampler """basic""" +244 24 evaluator """rankbased""" +244 25 dataset """kinships""" +244 25 model """ermlp""" +244 25 loss """nssa""" +244 25 regularizer """no""" +244 25 optimizer """adadelta""" +244 25 training_loop """owa""" +244 25 negative_sampler """basic""" +244 25 evaluator """rankbased""" +244 26 dataset """kinships""" +244 26 model """ermlp""" +244 26 loss """nssa""" +244 26 regularizer """no""" +244 26 optimizer """adadelta""" +244 26 training_loop """owa""" +244 26 negative_sampler """basic""" +244 26 evaluator """rankbased""" +244 27 dataset """kinships""" +244 27 model """ermlp""" +244 27 loss """nssa""" +244 27 regularizer """no""" +244 27 optimizer """adadelta""" +244 27 training_loop """owa""" +244 27 negative_sampler """basic""" +244 27 evaluator """rankbased""" +244 28 dataset """kinships""" +244 28 model """ermlp""" +244 28 loss """nssa""" +244 28 regularizer """no""" +244 28 optimizer """adadelta""" +244 28 training_loop """owa""" +244 28 negative_sampler """basic""" +244 28 evaluator """rankbased""" +244 29 dataset """kinships""" +244 29 model """ermlp""" +244 29 loss """nssa""" +244 29 regularizer """no""" +244 29 optimizer """adadelta""" +244 29 training_loop """owa""" +244 29 negative_sampler """basic""" +244 29 evaluator """rankbased""" +244 30 dataset """kinships""" +244 30 model """ermlp""" +244 30 loss """nssa""" +244 30 regularizer """no""" +244 30 optimizer """adadelta""" +244 30 training_loop """owa""" +244 30 negative_sampler """basic""" +244 30 evaluator """rankbased""" +244 31 dataset """kinships""" +244 31 model """ermlp""" +244 31 loss """nssa""" +244 31 regularizer """no""" +244 31 optimizer """adadelta""" +244 31 training_loop """owa""" +244 31 negative_sampler """basic""" +244 31 evaluator """rankbased""" +244 32 dataset """kinships""" +244 32 model """ermlp""" +244 32 loss """nssa""" +244 32 regularizer """no""" +244 32 optimizer """adadelta""" +244 32 training_loop """owa""" +244 32 negative_sampler """basic""" +244 32 evaluator """rankbased""" +244 33 dataset """kinships""" +244 33 model """ermlp""" +244 33 loss """nssa""" +244 33 regularizer """no""" +244 33 optimizer """adadelta""" +244 33 training_loop """owa""" +244 33 negative_sampler """basic""" +244 33 evaluator """rankbased""" +244 34 dataset """kinships""" +244 34 model """ermlp""" +244 34 loss """nssa""" +244 34 regularizer """no""" +244 34 optimizer """adadelta""" +244 34 training_loop """owa""" +244 34 negative_sampler """basic""" +244 34 evaluator """rankbased""" +244 35 dataset """kinships""" +244 35 model """ermlp""" +244 35 loss """nssa""" +244 35 regularizer """no""" +244 35 optimizer """adadelta""" +244 35 training_loop """owa""" +244 35 negative_sampler """basic""" +244 35 evaluator """rankbased""" +244 36 dataset """kinships""" +244 36 model """ermlp""" +244 36 loss """nssa""" +244 36 regularizer """no""" +244 36 optimizer """adadelta""" +244 36 training_loop """owa""" +244 36 negative_sampler """basic""" +244 36 evaluator """rankbased""" +244 37 dataset """kinships""" +244 37 model """ermlp""" +244 37 loss """nssa""" +244 37 regularizer """no""" +244 37 optimizer """adadelta""" +244 37 training_loop """owa""" +244 37 negative_sampler """basic""" +244 37 evaluator """rankbased""" +244 38 dataset """kinships""" +244 38 model """ermlp""" +244 38 loss """nssa""" +244 38 regularizer """no""" +244 38 optimizer """adadelta""" +244 38 training_loop """owa""" +244 38 negative_sampler """basic""" +244 38 evaluator """rankbased""" +244 39 dataset """kinships""" +244 39 model """ermlp""" +244 39 loss """nssa""" +244 39 regularizer """no""" +244 39 optimizer """adadelta""" +244 39 training_loop """owa""" +244 39 negative_sampler """basic""" +244 39 evaluator """rankbased""" +244 40 dataset """kinships""" +244 40 model """ermlp""" +244 40 loss """nssa""" +244 40 regularizer """no""" +244 40 optimizer """adadelta""" +244 40 training_loop """owa""" +244 40 negative_sampler """basic""" +244 40 evaluator """rankbased""" +244 41 dataset """kinships""" +244 41 model """ermlp""" +244 41 loss """nssa""" +244 41 regularizer """no""" +244 41 optimizer """adadelta""" +244 41 training_loop """owa""" +244 41 negative_sampler """basic""" +244 41 evaluator """rankbased""" +244 42 dataset """kinships""" +244 42 model """ermlp""" +244 42 loss """nssa""" +244 42 regularizer """no""" +244 42 optimizer """adadelta""" +244 42 training_loop """owa""" +244 42 negative_sampler """basic""" +244 42 evaluator """rankbased""" +244 43 dataset """kinships""" +244 43 model """ermlp""" +244 43 loss """nssa""" +244 43 regularizer """no""" +244 43 optimizer """adadelta""" +244 43 training_loop """owa""" +244 43 negative_sampler """basic""" +244 43 evaluator """rankbased""" +244 44 dataset """kinships""" +244 44 model """ermlp""" +244 44 loss """nssa""" +244 44 regularizer """no""" +244 44 optimizer """adadelta""" +244 44 training_loop """owa""" +244 44 negative_sampler """basic""" +244 44 evaluator """rankbased""" +244 45 dataset """kinships""" +244 45 model """ermlp""" +244 45 loss """nssa""" +244 45 regularizer """no""" +244 45 optimizer """adadelta""" +244 45 training_loop """owa""" +244 45 negative_sampler """basic""" +244 45 evaluator """rankbased""" +244 46 dataset """kinships""" +244 46 model """ermlp""" +244 46 loss """nssa""" +244 46 regularizer """no""" +244 46 optimizer """adadelta""" +244 46 training_loop """owa""" +244 46 negative_sampler """basic""" +244 46 evaluator """rankbased""" +244 47 dataset """kinships""" +244 47 model """ermlp""" +244 47 loss """nssa""" +244 47 regularizer """no""" +244 47 optimizer """adadelta""" +244 47 training_loop """owa""" +244 47 negative_sampler """basic""" +244 47 evaluator """rankbased""" +244 48 dataset """kinships""" +244 48 model """ermlp""" +244 48 loss """nssa""" +244 48 regularizer """no""" +244 48 optimizer """adadelta""" +244 48 training_loop """owa""" +244 48 negative_sampler """basic""" +244 48 evaluator """rankbased""" +244 49 dataset """kinships""" +244 49 model """ermlp""" +244 49 loss """nssa""" +244 49 regularizer """no""" +244 49 optimizer """adadelta""" +244 49 training_loop """owa""" +244 49 negative_sampler """basic""" +244 49 evaluator """rankbased""" +244 50 dataset """kinships""" +244 50 model """ermlp""" +244 50 loss """nssa""" +244 50 regularizer """no""" +244 50 optimizer """adadelta""" +244 50 training_loop """owa""" +244 50 negative_sampler """basic""" +244 50 evaluator """rankbased""" +244 51 dataset """kinships""" +244 51 model """ermlp""" +244 51 loss """nssa""" +244 51 regularizer """no""" +244 51 optimizer """adadelta""" +244 51 training_loop """owa""" +244 51 negative_sampler """basic""" +244 51 evaluator """rankbased""" +244 52 dataset """kinships""" +244 52 model """ermlp""" +244 52 loss """nssa""" +244 52 regularizer """no""" +244 52 optimizer """adadelta""" +244 52 training_loop """owa""" +244 52 negative_sampler """basic""" +244 52 evaluator """rankbased""" +244 53 dataset """kinships""" +244 53 model """ermlp""" +244 53 loss """nssa""" +244 53 regularizer """no""" +244 53 optimizer """adadelta""" +244 53 training_loop """owa""" +244 53 negative_sampler """basic""" +244 53 evaluator """rankbased""" +244 54 dataset """kinships""" +244 54 model """ermlp""" +244 54 loss """nssa""" +244 54 regularizer """no""" +244 54 optimizer """adadelta""" +244 54 training_loop """owa""" +244 54 negative_sampler """basic""" +244 54 evaluator """rankbased""" +244 55 dataset """kinships""" +244 55 model """ermlp""" +244 55 loss """nssa""" +244 55 regularizer """no""" +244 55 optimizer """adadelta""" +244 55 training_loop """owa""" +244 55 negative_sampler """basic""" +244 55 evaluator """rankbased""" +244 56 dataset """kinships""" +244 56 model """ermlp""" +244 56 loss """nssa""" +244 56 regularizer """no""" +244 56 optimizer """adadelta""" +244 56 training_loop """owa""" +244 56 negative_sampler """basic""" +244 56 evaluator """rankbased""" +244 57 dataset """kinships""" +244 57 model """ermlp""" +244 57 loss """nssa""" +244 57 regularizer """no""" +244 57 optimizer """adadelta""" +244 57 training_loop """owa""" +244 57 negative_sampler """basic""" +244 57 evaluator """rankbased""" +244 58 dataset """kinships""" +244 58 model """ermlp""" +244 58 loss """nssa""" +244 58 regularizer """no""" +244 58 optimizer """adadelta""" +244 58 training_loop """owa""" +244 58 negative_sampler """basic""" +244 58 evaluator """rankbased""" +244 59 dataset """kinships""" +244 59 model """ermlp""" +244 59 loss """nssa""" +244 59 regularizer """no""" +244 59 optimizer """adadelta""" +244 59 training_loop """owa""" +244 59 negative_sampler """basic""" +244 59 evaluator """rankbased""" +244 60 dataset """kinships""" +244 60 model """ermlp""" +244 60 loss """nssa""" +244 60 regularizer """no""" +244 60 optimizer """adadelta""" +244 60 training_loop """owa""" +244 60 negative_sampler """basic""" +244 60 evaluator """rankbased""" +244 61 dataset """kinships""" +244 61 model """ermlp""" +244 61 loss """nssa""" +244 61 regularizer """no""" +244 61 optimizer """adadelta""" +244 61 training_loop """owa""" +244 61 negative_sampler """basic""" +244 61 evaluator """rankbased""" +244 62 dataset """kinships""" +244 62 model """ermlp""" +244 62 loss """nssa""" +244 62 regularizer """no""" +244 62 optimizer """adadelta""" +244 62 training_loop """owa""" +244 62 negative_sampler """basic""" +244 62 evaluator """rankbased""" +244 63 dataset """kinships""" +244 63 model """ermlp""" +244 63 loss """nssa""" +244 63 regularizer """no""" +244 63 optimizer """adadelta""" +244 63 training_loop """owa""" +244 63 negative_sampler """basic""" +244 63 evaluator """rankbased""" +244 64 dataset """kinships""" +244 64 model """ermlp""" +244 64 loss """nssa""" +244 64 regularizer """no""" +244 64 optimizer """adadelta""" +244 64 training_loop """owa""" +244 64 negative_sampler """basic""" +244 64 evaluator """rankbased""" +244 65 dataset """kinships""" +244 65 model """ermlp""" +244 65 loss """nssa""" +244 65 regularizer """no""" +244 65 optimizer """adadelta""" +244 65 training_loop """owa""" +244 65 negative_sampler """basic""" +244 65 evaluator """rankbased""" +244 66 dataset """kinships""" +244 66 model """ermlp""" +244 66 loss """nssa""" +244 66 regularizer """no""" +244 66 optimizer """adadelta""" +244 66 training_loop """owa""" +244 66 negative_sampler """basic""" +244 66 evaluator """rankbased""" +244 67 dataset """kinships""" +244 67 model """ermlp""" +244 67 loss """nssa""" +244 67 regularizer """no""" +244 67 optimizer """adadelta""" +244 67 training_loop """owa""" +244 67 negative_sampler """basic""" +244 67 evaluator """rankbased""" +244 68 dataset """kinships""" +244 68 model """ermlp""" +244 68 loss """nssa""" +244 68 regularizer """no""" +244 68 optimizer """adadelta""" +244 68 training_loop """owa""" +244 68 negative_sampler """basic""" +244 68 evaluator """rankbased""" +244 69 dataset """kinships""" +244 69 model """ermlp""" +244 69 loss """nssa""" +244 69 regularizer """no""" +244 69 optimizer """adadelta""" +244 69 training_loop """owa""" +244 69 negative_sampler """basic""" +244 69 evaluator """rankbased""" +244 70 dataset """kinships""" +244 70 model """ermlp""" +244 70 loss """nssa""" +244 70 regularizer """no""" +244 70 optimizer """adadelta""" +244 70 training_loop """owa""" +244 70 negative_sampler """basic""" +244 70 evaluator """rankbased""" +244 71 dataset """kinships""" +244 71 model """ermlp""" +244 71 loss """nssa""" +244 71 regularizer """no""" +244 71 optimizer """adadelta""" +244 71 training_loop """owa""" +244 71 negative_sampler """basic""" +244 71 evaluator """rankbased""" +244 72 dataset """kinships""" +244 72 model """ermlp""" +244 72 loss """nssa""" +244 72 regularizer """no""" +244 72 optimizer """adadelta""" +244 72 training_loop """owa""" +244 72 negative_sampler """basic""" +244 72 evaluator """rankbased""" +244 73 dataset """kinships""" +244 73 model """ermlp""" +244 73 loss """nssa""" +244 73 regularizer """no""" +244 73 optimizer """adadelta""" +244 73 training_loop """owa""" +244 73 negative_sampler """basic""" +244 73 evaluator """rankbased""" +244 74 dataset """kinships""" +244 74 model """ermlp""" +244 74 loss """nssa""" +244 74 regularizer """no""" +244 74 optimizer """adadelta""" +244 74 training_loop """owa""" +244 74 negative_sampler """basic""" +244 74 evaluator """rankbased""" +244 75 dataset """kinships""" +244 75 model """ermlp""" +244 75 loss """nssa""" +244 75 regularizer """no""" +244 75 optimizer """adadelta""" +244 75 training_loop """owa""" +244 75 negative_sampler """basic""" +244 75 evaluator """rankbased""" +244 76 dataset """kinships""" +244 76 model """ermlp""" +244 76 loss """nssa""" +244 76 regularizer """no""" +244 76 optimizer """adadelta""" +244 76 training_loop """owa""" +244 76 negative_sampler """basic""" +244 76 evaluator """rankbased""" +244 77 dataset """kinships""" +244 77 model """ermlp""" +244 77 loss """nssa""" +244 77 regularizer """no""" +244 77 optimizer """adadelta""" +244 77 training_loop """owa""" +244 77 negative_sampler """basic""" +244 77 evaluator """rankbased""" +244 78 dataset """kinships""" +244 78 model """ermlp""" +244 78 loss """nssa""" +244 78 regularizer """no""" +244 78 optimizer """adadelta""" +244 78 training_loop """owa""" +244 78 negative_sampler """basic""" +244 78 evaluator """rankbased""" +244 79 dataset """kinships""" +244 79 model """ermlp""" +244 79 loss """nssa""" +244 79 regularizer """no""" +244 79 optimizer """adadelta""" +244 79 training_loop """owa""" +244 79 negative_sampler """basic""" +244 79 evaluator """rankbased""" +244 80 dataset """kinships""" +244 80 model """ermlp""" +244 80 loss """nssa""" +244 80 regularizer """no""" +244 80 optimizer """adadelta""" +244 80 training_loop """owa""" +244 80 negative_sampler """basic""" +244 80 evaluator """rankbased""" +244 81 dataset """kinships""" +244 81 model """ermlp""" +244 81 loss """nssa""" +244 81 regularizer """no""" +244 81 optimizer """adadelta""" +244 81 training_loop """owa""" +244 81 negative_sampler """basic""" +244 81 evaluator """rankbased""" +244 82 dataset """kinships""" +244 82 model """ermlp""" +244 82 loss """nssa""" +244 82 regularizer """no""" +244 82 optimizer """adadelta""" +244 82 training_loop """owa""" +244 82 negative_sampler """basic""" +244 82 evaluator """rankbased""" +244 83 dataset """kinships""" +244 83 model """ermlp""" +244 83 loss """nssa""" +244 83 regularizer """no""" +244 83 optimizer """adadelta""" +244 83 training_loop """owa""" +244 83 negative_sampler """basic""" +244 83 evaluator """rankbased""" +244 84 dataset """kinships""" +244 84 model """ermlp""" +244 84 loss """nssa""" +244 84 regularizer """no""" +244 84 optimizer """adadelta""" +244 84 training_loop """owa""" +244 84 negative_sampler """basic""" +244 84 evaluator """rankbased""" +244 85 dataset """kinships""" +244 85 model """ermlp""" +244 85 loss """nssa""" +244 85 regularizer """no""" +244 85 optimizer """adadelta""" +244 85 training_loop """owa""" +244 85 negative_sampler """basic""" +244 85 evaluator """rankbased""" +244 86 dataset """kinships""" +244 86 model """ermlp""" +244 86 loss """nssa""" +244 86 regularizer """no""" +244 86 optimizer """adadelta""" +244 86 training_loop """owa""" +244 86 negative_sampler """basic""" +244 86 evaluator """rankbased""" +244 87 dataset """kinships""" +244 87 model """ermlp""" +244 87 loss """nssa""" +244 87 regularizer """no""" +244 87 optimizer """adadelta""" +244 87 training_loop """owa""" +244 87 negative_sampler """basic""" +244 87 evaluator """rankbased""" +244 88 dataset """kinships""" +244 88 model """ermlp""" +244 88 loss """nssa""" +244 88 regularizer """no""" +244 88 optimizer """adadelta""" +244 88 training_loop """owa""" +244 88 negative_sampler """basic""" +244 88 evaluator """rankbased""" +244 89 dataset """kinships""" +244 89 model """ermlp""" +244 89 loss """nssa""" +244 89 regularizer """no""" +244 89 optimizer """adadelta""" +244 89 training_loop """owa""" +244 89 negative_sampler """basic""" +244 89 evaluator """rankbased""" +244 90 dataset """kinships""" +244 90 model """ermlp""" +244 90 loss """nssa""" +244 90 regularizer """no""" +244 90 optimizer """adadelta""" +244 90 training_loop """owa""" +244 90 negative_sampler """basic""" +244 90 evaluator """rankbased""" +244 91 dataset """kinships""" +244 91 model """ermlp""" +244 91 loss """nssa""" +244 91 regularizer """no""" +244 91 optimizer """adadelta""" +244 91 training_loop """owa""" +244 91 negative_sampler """basic""" +244 91 evaluator """rankbased""" +244 92 dataset """kinships""" +244 92 model """ermlp""" +244 92 loss """nssa""" +244 92 regularizer """no""" +244 92 optimizer """adadelta""" +244 92 training_loop """owa""" +244 92 negative_sampler """basic""" +244 92 evaluator """rankbased""" +244 93 dataset """kinships""" +244 93 model """ermlp""" +244 93 loss """nssa""" +244 93 regularizer """no""" +244 93 optimizer """adadelta""" +244 93 training_loop """owa""" +244 93 negative_sampler """basic""" +244 93 evaluator """rankbased""" +244 94 dataset """kinships""" +244 94 model """ermlp""" +244 94 loss """nssa""" +244 94 regularizer """no""" +244 94 optimizer """adadelta""" +244 94 training_loop """owa""" +244 94 negative_sampler """basic""" +244 94 evaluator """rankbased""" +244 95 dataset """kinships""" +244 95 model """ermlp""" +244 95 loss """nssa""" +244 95 regularizer """no""" +244 95 optimizer """adadelta""" +244 95 training_loop """owa""" +244 95 negative_sampler """basic""" +244 95 evaluator """rankbased""" +244 96 dataset """kinships""" +244 96 model """ermlp""" +244 96 loss """nssa""" +244 96 regularizer """no""" +244 96 optimizer """adadelta""" +244 96 training_loop """owa""" +244 96 negative_sampler """basic""" +244 96 evaluator """rankbased""" +244 97 dataset """kinships""" +244 97 model """ermlp""" +244 97 loss """nssa""" +244 97 regularizer """no""" +244 97 optimizer """adadelta""" +244 97 training_loop """owa""" +244 97 negative_sampler """basic""" +244 97 evaluator """rankbased""" +244 98 dataset """kinships""" +244 98 model """ermlp""" +244 98 loss """nssa""" +244 98 regularizer """no""" +244 98 optimizer """adadelta""" +244 98 training_loop """owa""" +244 98 negative_sampler """basic""" +244 98 evaluator """rankbased""" +244 99 dataset """kinships""" +244 99 model """ermlp""" +244 99 loss """nssa""" +244 99 regularizer """no""" +244 99 optimizer """adadelta""" +244 99 training_loop """owa""" +244 99 negative_sampler """basic""" +244 99 evaluator """rankbased""" +244 100 dataset """kinships""" +244 100 model """ermlp""" +244 100 loss """nssa""" +244 100 regularizer """no""" +244 100 optimizer """adadelta""" +244 100 training_loop """owa""" +244 100 negative_sampler """basic""" +244 100 evaluator """rankbased""" +245 1 model.embedding_dim 1.0 +245 1 loss.margin 26.377030937520058 +245 1 loss.adversarial_temperature 0.37082993462203395 +245 1 negative_sampler.num_negs_per_pos 47.0 +245 1 training.batch_size 1.0 +245 2 model.embedding_dim 2.0 +245 2 loss.margin 15.484634784651995 +245 2 loss.adversarial_temperature 0.8121858241690716 +245 2 negative_sampler.num_negs_per_pos 38.0 +245 2 training.batch_size 1.0 +245 3 model.embedding_dim 2.0 +245 3 loss.margin 28.157288702310325 +245 3 loss.adversarial_temperature 0.6970256450207876 +245 3 negative_sampler.num_negs_per_pos 35.0 +245 3 training.batch_size 2.0 +245 4 model.embedding_dim 1.0 +245 4 loss.margin 26.53018806503563 +245 4 loss.adversarial_temperature 0.8610486937890716 +245 4 negative_sampler.num_negs_per_pos 65.0 +245 4 training.batch_size 0.0 +245 5 model.embedding_dim 0.0 +245 5 loss.margin 9.398410392411991 +245 5 loss.adversarial_temperature 0.6305284913041711 +245 5 negative_sampler.num_negs_per_pos 74.0 +245 5 training.batch_size 2.0 +245 6 model.embedding_dim 1.0 +245 6 loss.margin 9.287573923939952 +245 6 loss.adversarial_temperature 0.2971740621124893 +245 6 negative_sampler.num_negs_per_pos 71.0 +245 6 training.batch_size 2.0 +245 7 model.embedding_dim 1.0 +245 7 loss.margin 19.440807308733227 +245 7 loss.adversarial_temperature 0.4279163961638125 +245 7 negative_sampler.num_negs_per_pos 41.0 +245 7 training.batch_size 2.0 +245 8 model.embedding_dim 0.0 +245 8 loss.margin 23.648257732123078 +245 8 loss.adversarial_temperature 0.5083459088529957 +245 8 negative_sampler.num_negs_per_pos 53.0 +245 8 training.batch_size 2.0 +245 9 model.embedding_dim 1.0 +245 9 loss.margin 2.982665278228516 +245 9 loss.adversarial_temperature 0.7129439255349485 +245 9 negative_sampler.num_negs_per_pos 58.0 +245 9 training.batch_size 2.0 +245 10 model.embedding_dim 2.0 +245 10 loss.margin 7.865134985348364 +245 10 loss.adversarial_temperature 0.6616329879085397 +245 10 negative_sampler.num_negs_per_pos 39.0 +245 10 training.batch_size 0.0 +245 11 model.embedding_dim 1.0 +245 11 loss.margin 17.278127254640992 +245 11 loss.adversarial_temperature 0.7450121817393663 +245 11 negative_sampler.num_negs_per_pos 83.0 +245 11 training.batch_size 0.0 +245 12 model.embedding_dim 0.0 +245 12 loss.margin 27.386358519463425 +245 12 loss.adversarial_temperature 0.30067096763941037 +245 12 negative_sampler.num_negs_per_pos 51.0 +245 12 training.batch_size 1.0 +245 13 model.embedding_dim 0.0 +245 13 loss.margin 12.870011054980738 +245 13 loss.adversarial_temperature 0.8476641406372319 +245 13 negative_sampler.num_negs_per_pos 73.0 +245 13 training.batch_size 1.0 +245 14 model.embedding_dim 0.0 +245 14 loss.margin 3.435280681549119 +245 14 loss.adversarial_temperature 0.4421329055075284 +245 14 negative_sampler.num_negs_per_pos 80.0 +245 14 training.batch_size 2.0 +245 15 model.embedding_dim 1.0 +245 15 loss.margin 11.474943099757176 +245 15 loss.adversarial_temperature 0.7804579297163025 +245 15 negative_sampler.num_negs_per_pos 96.0 +245 15 training.batch_size 2.0 +245 16 model.embedding_dim 0.0 +245 16 loss.margin 23.58354141410308 +245 16 loss.adversarial_temperature 0.4084549145436883 +245 16 negative_sampler.num_negs_per_pos 30.0 +245 16 training.batch_size 1.0 +245 17 model.embedding_dim 2.0 +245 17 loss.margin 18.361344021449266 +245 17 loss.adversarial_temperature 0.7337958305803736 +245 17 negative_sampler.num_negs_per_pos 5.0 +245 17 training.batch_size 0.0 +245 18 model.embedding_dim 1.0 +245 18 loss.margin 19.18608871595869 +245 18 loss.adversarial_temperature 0.6800978668845598 +245 18 negative_sampler.num_negs_per_pos 64.0 +245 18 training.batch_size 1.0 +245 19 model.embedding_dim 0.0 +245 19 loss.margin 10.658256335340916 +245 19 loss.adversarial_temperature 0.14508950636034518 +245 19 negative_sampler.num_negs_per_pos 99.0 +245 19 training.batch_size 1.0 +245 20 model.embedding_dim 2.0 +245 20 loss.margin 24.518188914329517 +245 20 loss.adversarial_temperature 0.13362790733954888 +245 20 negative_sampler.num_negs_per_pos 62.0 +245 20 training.batch_size 0.0 +245 21 model.embedding_dim 0.0 +245 21 loss.margin 28.88507432430969 +245 21 loss.adversarial_temperature 0.3021675112890527 +245 21 negative_sampler.num_negs_per_pos 91.0 +245 21 training.batch_size 2.0 +245 22 model.embedding_dim 2.0 +245 22 loss.margin 14.156301840723174 +245 22 loss.adversarial_temperature 0.18095458389169627 +245 22 negative_sampler.num_negs_per_pos 23.0 +245 22 training.batch_size 2.0 +245 23 model.embedding_dim 2.0 +245 23 loss.margin 21.51122402721326 +245 23 loss.adversarial_temperature 0.3672601568723939 +245 23 negative_sampler.num_negs_per_pos 93.0 +245 23 training.batch_size 1.0 +245 24 model.embedding_dim 0.0 +245 24 loss.margin 19.005233766187366 +245 24 loss.adversarial_temperature 0.6887955459465895 +245 24 negative_sampler.num_negs_per_pos 11.0 +245 24 training.batch_size 0.0 +245 25 model.embedding_dim 2.0 +245 25 loss.margin 9.5549807367006 +245 25 loss.adversarial_temperature 0.46861396970549907 +245 25 negative_sampler.num_negs_per_pos 43.0 +245 25 training.batch_size 1.0 +245 26 model.embedding_dim 1.0 +245 26 loss.margin 24.063826749350735 +245 26 loss.adversarial_temperature 0.18608499494344666 +245 26 negative_sampler.num_negs_per_pos 65.0 +245 26 training.batch_size 0.0 +245 27 model.embedding_dim 0.0 +245 27 loss.margin 2.926723561930644 +245 27 loss.adversarial_temperature 0.9791582356191348 +245 27 negative_sampler.num_negs_per_pos 81.0 +245 27 training.batch_size 2.0 +245 28 model.embedding_dim 2.0 +245 28 loss.margin 25.508765231925874 +245 28 loss.adversarial_temperature 0.20288873920030037 +245 28 negative_sampler.num_negs_per_pos 75.0 +245 28 training.batch_size 2.0 +245 29 model.embedding_dim 1.0 +245 29 loss.margin 5.987586483040456 +245 29 loss.adversarial_temperature 0.7411819499480808 +245 29 negative_sampler.num_negs_per_pos 52.0 +245 29 training.batch_size 1.0 +245 30 model.embedding_dim 1.0 +245 30 loss.margin 14.853311524547708 +245 30 loss.adversarial_temperature 0.390709937908857 +245 30 negative_sampler.num_negs_per_pos 57.0 +245 30 training.batch_size 1.0 +245 31 model.embedding_dim 2.0 +245 31 loss.margin 24.391665451238655 +245 31 loss.adversarial_temperature 0.1795842127072616 +245 31 negative_sampler.num_negs_per_pos 50.0 +245 31 training.batch_size 1.0 +245 32 model.embedding_dim 0.0 +245 32 loss.margin 27.523874040487495 +245 32 loss.adversarial_temperature 0.1564540444155243 +245 32 negative_sampler.num_negs_per_pos 82.0 +245 32 training.batch_size 2.0 +245 33 model.embedding_dim 2.0 +245 33 loss.margin 27.00158004877178 +245 33 loss.adversarial_temperature 0.35489646664544083 +245 33 negative_sampler.num_negs_per_pos 25.0 +245 33 training.batch_size 2.0 +245 34 model.embedding_dim 1.0 +245 34 loss.margin 3.2451012028671955 +245 34 loss.adversarial_temperature 0.10924341085673424 +245 34 negative_sampler.num_negs_per_pos 79.0 +245 34 training.batch_size 1.0 +245 35 model.embedding_dim 1.0 +245 35 loss.margin 9.985046248760458 +245 35 loss.adversarial_temperature 0.5709850424209422 +245 35 negative_sampler.num_negs_per_pos 4.0 +245 35 training.batch_size 2.0 +245 36 model.embedding_dim 0.0 +245 36 loss.margin 22.81842474743662 +245 36 loss.adversarial_temperature 0.6619473645807163 +245 36 negative_sampler.num_negs_per_pos 88.0 +245 36 training.batch_size 2.0 +245 37 model.embedding_dim 0.0 +245 37 loss.margin 18.50248224031361 +245 37 loss.adversarial_temperature 0.5601869107935571 +245 37 negative_sampler.num_negs_per_pos 92.0 +245 37 training.batch_size 1.0 +245 38 model.embedding_dim 2.0 +245 38 loss.margin 10.30631890700764 +245 38 loss.adversarial_temperature 0.15027343980056818 +245 38 negative_sampler.num_negs_per_pos 76.0 +245 38 training.batch_size 2.0 +245 39 model.embedding_dim 1.0 +245 39 loss.margin 24.30761516089696 +245 39 loss.adversarial_temperature 0.8559857958687371 +245 39 negative_sampler.num_negs_per_pos 69.0 +245 39 training.batch_size 0.0 +245 40 model.embedding_dim 2.0 +245 40 loss.margin 21.019792666584017 +245 40 loss.adversarial_temperature 0.5298593069815157 +245 40 negative_sampler.num_negs_per_pos 15.0 +245 40 training.batch_size 0.0 +245 41 model.embedding_dim 2.0 +245 41 loss.margin 16.26164870292459 +245 41 loss.adversarial_temperature 0.44432220167496506 +245 41 negative_sampler.num_negs_per_pos 30.0 +245 41 training.batch_size 2.0 +245 42 model.embedding_dim 2.0 +245 42 loss.margin 20.958606243244258 +245 42 loss.adversarial_temperature 0.2596832356388781 +245 42 negative_sampler.num_negs_per_pos 51.0 +245 42 training.batch_size 0.0 +245 43 model.embedding_dim 2.0 +245 43 loss.margin 12.373039225175265 +245 43 loss.adversarial_temperature 0.8290756002345345 +245 43 negative_sampler.num_negs_per_pos 95.0 +245 43 training.batch_size 2.0 +245 44 model.embedding_dim 2.0 +245 44 loss.margin 25.18099861408554 +245 44 loss.adversarial_temperature 0.5241587979941797 +245 44 negative_sampler.num_negs_per_pos 25.0 +245 44 training.batch_size 0.0 +245 45 model.embedding_dim 1.0 +245 45 loss.margin 23.329227040180637 +245 45 loss.adversarial_temperature 0.6196857145545603 +245 45 negative_sampler.num_negs_per_pos 44.0 +245 45 training.batch_size 0.0 +245 46 model.embedding_dim 1.0 +245 46 loss.margin 22.812972234891326 +245 46 loss.adversarial_temperature 0.8085454593316425 +245 46 negative_sampler.num_negs_per_pos 47.0 +245 46 training.batch_size 1.0 +245 47 model.embedding_dim 2.0 +245 47 loss.margin 2.303379034331453 +245 47 loss.adversarial_temperature 0.9757297454971828 +245 47 negative_sampler.num_negs_per_pos 43.0 +245 47 training.batch_size 0.0 +245 48 model.embedding_dim 2.0 +245 48 loss.margin 27.167155214423037 +245 48 loss.adversarial_temperature 0.1721782429069731 +245 48 negative_sampler.num_negs_per_pos 8.0 +245 48 training.batch_size 2.0 +245 49 model.embedding_dim 0.0 +245 49 loss.margin 20.420216408781826 +245 49 loss.adversarial_temperature 0.35459309727440025 +245 49 negative_sampler.num_negs_per_pos 47.0 +245 49 training.batch_size 0.0 +245 50 model.embedding_dim 1.0 +245 50 loss.margin 27.160352689189878 +245 50 loss.adversarial_temperature 0.46820338916812765 +245 50 negative_sampler.num_negs_per_pos 42.0 +245 50 training.batch_size 0.0 +245 51 model.embedding_dim 1.0 +245 51 loss.margin 20.836062821825955 +245 51 loss.adversarial_temperature 0.17710087860013016 +245 51 negative_sampler.num_negs_per_pos 84.0 +245 51 training.batch_size 1.0 +245 52 model.embedding_dim 1.0 +245 52 loss.margin 1.741815913329567 +245 52 loss.adversarial_temperature 0.9402423067632839 +245 52 negative_sampler.num_negs_per_pos 3.0 +245 52 training.batch_size 2.0 +245 53 model.embedding_dim 0.0 +245 53 loss.margin 7.635805644023205 +245 53 loss.adversarial_temperature 0.6740742392602111 +245 53 negative_sampler.num_negs_per_pos 65.0 +245 53 training.batch_size 2.0 +245 54 model.embedding_dim 0.0 +245 54 loss.margin 23.107887411705015 +245 54 loss.adversarial_temperature 0.1151149302811705 +245 54 negative_sampler.num_negs_per_pos 64.0 +245 54 training.batch_size 2.0 +245 55 model.embedding_dim 1.0 +245 55 loss.margin 5.133741454847031 +245 55 loss.adversarial_temperature 0.2875078328432743 +245 55 negative_sampler.num_negs_per_pos 94.0 +245 55 training.batch_size 0.0 +245 56 model.embedding_dim 1.0 +245 56 loss.margin 9.101616977487055 +245 56 loss.adversarial_temperature 0.2355395653591011 +245 56 negative_sampler.num_negs_per_pos 64.0 +245 56 training.batch_size 0.0 +245 57 model.embedding_dim 1.0 +245 57 loss.margin 19.34283480714389 +245 57 loss.adversarial_temperature 0.23748006501113128 +245 57 negative_sampler.num_negs_per_pos 68.0 +245 57 training.batch_size 0.0 +245 58 model.embedding_dim 1.0 +245 58 loss.margin 3.402207708366766 +245 58 loss.adversarial_temperature 0.3980915515274124 +245 58 negative_sampler.num_negs_per_pos 13.0 +245 58 training.batch_size 2.0 +245 59 model.embedding_dim 1.0 +245 59 loss.margin 7.176661741996971 +245 59 loss.adversarial_temperature 0.18032527599616208 +245 59 negative_sampler.num_negs_per_pos 41.0 +245 59 training.batch_size 1.0 +245 60 model.embedding_dim 1.0 +245 60 loss.margin 24.949677909111355 +245 60 loss.adversarial_temperature 0.8885431663118738 +245 60 negative_sampler.num_negs_per_pos 3.0 +245 60 training.batch_size 2.0 +245 61 model.embedding_dim 2.0 +245 61 loss.margin 26.040209444364496 +245 61 loss.adversarial_temperature 0.2844190602680242 +245 61 negative_sampler.num_negs_per_pos 77.0 +245 61 training.batch_size 2.0 +245 62 model.embedding_dim 2.0 +245 62 loss.margin 27.773196314017326 +245 62 loss.adversarial_temperature 0.6474564793878617 +245 62 negative_sampler.num_negs_per_pos 86.0 +245 62 training.batch_size 1.0 +245 63 model.embedding_dim 0.0 +245 63 loss.margin 11.4749146318535 +245 63 loss.adversarial_temperature 0.45679182109233474 +245 63 negative_sampler.num_negs_per_pos 98.0 +245 63 training.batch_size 0.0 +245 64 model.embedding_dim 1.0 +245 64 loss.margin 7.274236030070081 +245 64 loss.adversarial_temperature 0.5552974558079062 +245 64 negative_sampler.num_negs_per_pos 73.0 +245 64 training.batch_size 0.0 +245 65 model.embedding_dim 2.0 +245 65 loss.margin 16.293478333262854 +245 65 loss.adversarial_temperature 0.22160024931419356 +245 65 negative_sampler.num_negs_per_pos 17.0 +245 65 training.batch_size 1.0 +245 66 model.embedding_dim 2.0 +245 66 loss.margin 12.913661156633992 +245 66 loss.adversarial_temperature 0.8407993805636255 +245 66 negative_sampler.num_negs_per_pos 62.0 +245 66 training.batch_size 2.0 +245 67 model.embedding_dim 2.0 +245 67 loss.margin 24.013329770192616 +245 67 loss.adversarial_temperature 0.4329955755573901 +245 67 negative_sampler.num_negs_per_pos 55.0 +245 67 training.batch_size 1.0 +245 68 model.embedding_dim 1.0 +245 68 loss.margin 13.031634326216954 +245 68 loss.adversarial_temperature 0.5731102990279681 +245 68 negative_sampler.num_negs_per_pos 53.0 +245 68 training.batch_size 1.0 +245 69 model.embedding_dim 0.0 +245 69 loss.margin 9.691199223748725 +245 69 loss.adversarial_temperature 0.4786149422477658 +245 69 negative_sampler.num_negs_per_pos 75.0 +245 69 training.batch_size 1.0 +245 70 model.embedding_dim 2.0 +245 70 loss.margin 2.813855202827193 +245 70 loss.adversarial_temperature 0.710928850790745 +245 70 negative_sampler.num_negs_per_pos 78.0 +245 70 training.batch_size 2.0 +245 71 model.embedding_dim 0.0 +245 71 loss.margin 2.4579435581748177 +245 71 loss.adversarial_temperature 0.4774869085086749 +245 71 negative_sampler.num_negs_per_pos 68.0 +245 71 training.batch_size 2.0 +245 72 model.embedding_dim 0.0 +245 72 loss.margin 18.277635440136603 +245 72 loss.adversarial_temperature 0.26192659756573744 +245 72 negative_sampler.num_negs_per_pos 38.0 +245 72 training.batch_size 2.0 +245 73 model.embedding_dim 1.0 +245 73 loss.margin 20.27439499540245 +245 73 loss.adversarial_temperature 0.5614842460676831 +245 73 negative_sampler.num_negs_per_pos 87.0 +245 73 training.batch_size 0.0 +245 74 model.embedding_dim 2.0 +245 74 loss.margin 12.668545481332098 +245 74 loss.adversarial_temperature 0.2653309729610078 +245 74 negative_sampler.num_negs_per_pos 73.0 +245 74 training.batch_size 1.0 +245 75 model.embedding_dim 2.0 +245 75 loss.margin 24.64715724018446 +245 75 loss.adversarial_temperature 0.23490628743473912 +245 75 negative_sampler.num_negs_per_pos 67.0 +245 75 training.batch_size 2.0 +245 76 model.embedding_dim 0.0 +245 76 loss.margin 19.913587327738767 +245 76 loss.adversarial_temperature 0.388700461870092 +245 76 negative_sampler.num_negs_per_pos 23.0 +245 76 training.batch_size 2.0 +245 77 model.embedding_dim 1.0 +245 77 loss.margin 21.800641593188953 +245 77 loss.adversarial_temperature 0.7830730642442014 +245 77 negative_sampler.num_negs_per_pos 75.0 +245 77 training.batch_size 1.0 +245 78 model.embedding_dim 0.0 +245 78 loss.margin 15.74583965373966 +245 78 loss.adversarial_temperature 0.3421283481877854 +245 78 negative_sampler.num_negs_per_pos 33.0 +245 78 training.batch_size 0.0 +245 79 model.embedding_dim 2.0 +245 79 loss.margin 15.870980839222435 +245 79 loss.adversarial_temperature 0.38515963912054574 +245 79 negative_sampler.num_negs_per_pos 66.0 +245 79 training.batch_size 0.0 +245 80 model.embedding_dim 1.0 +245 80 loss.margin 27.840193729574725 +245 80 loss.adversarial_temperature 0.579572973826795 +245 80 negative_sampler.num_negs_per_pos 96.0 +245 80 training.batch_size 2.0 +245 81 model.embedding_dim 0.0 +245 81 loss.margin 12.493379973602016 +245 81 loss.adversarial_temperature 0.5703845235026656 +245 81 negative_sampler.num_negs_per_pos 27.0 +245 81 training.batch_size 1.0 +245 82 model.embedding_dim 0.0 +245 82 loss.margin 12.92328243891884 +245 82 loss.adversarial_temperature 0.5911589009723692 +245 82 negative_sampler.num_negs_per_pos 76.0 +245 82 training.batch_size 1.0 +245 83 model.embedding_dim 0.0 +245 83 loss.margin 8.71044729764721 +245 83 loss.adversarial_temperature 0.9585251582182271 +245 83 negative_sampler.num_negs_per_pos 43.0 +245 83 training.batch_size 2.0 +245 84 model.embedding_dim 1.0 +245 84 loss.margin 20.0759498701226 +245 84 loss.adversarial_temperature 0.2954692130945201 +245 84 negative_sampler.num_negs_per_pos 68.0 +245 84 training.batch_size 2.0 +245 85 model.embedding_dim 1.0 +245 85 loss.margin 18.155875327756426 +245 85 loss.adversarial_temperature 0.2233712282401007 +245 85 negative_sampler.num_negs_per_pos 97.0 +245 85 training.batch_size 0.0 +245 86 model.embedding_dim 1.0 +245 86 loss.margin 7.658238120499607 +245 86 loss.adversarial_temperature 0.3501112965117731 +245 86 negative_sampler.num_negs_per_pos 92.0 +245 86 training.batch_size 2.0 +245 87 model.embedding_dim 2.0 +245 87 loss.margin 4.015221209131042 +245 87 loss.adversarial_temperature 0.5315482263968814 +245 87 negative_sampler.num_negs_per_pos 60.0 +245 87 training.batch_size 2.0 +245 88 model.embedding_dim 2.0 +245 88 loss.margin 10.89554672173239 +245 88 loss.adversarial_temperature 0.2939447656446338 +245 88 negative_sampler.num_negs_per_pos 20.0 +245 88 training.batch_size 0.0 +245 89 model.embedding_dim 0.0 +245 89 loss.margin 17.453654280099787 +245 89 loss.adversarial_temperature 0.36958743707189623 +245 89 negative_sampler.num_negs_per_pos 21.0 +245 89 training.batch_size 0.0 +245 90 model.embedding_dim 2.0 +245 90 loss.margin 28.104924058357042 +245 90 loss.adversarial_temperature 0.43823059525830577 +245 90 negative_sampler.num_negs_per_pos 70.0 +245 90 training.batch_size 2.0 +245 91 model.embedding_dim 1.0 +245 91 loss.margin 22.358085115270068 +245 91 loss.adversarial_temperature 0.9948587338492333 +245 91 negative_sampler.num_negs_per_pos 31.0 +245 91 training.batch_size 0.0 +245 92 model.embedding_dim 2.0 +245 92 loss.margin 9.372166837014433 +245 92 loss.adversarial_temperature 0.9764186767073005 +245 92 negative_sampler.num_negs_per_pos 47.0 +245 92 training.batch_size 1.0 +245 93 model.embedding_dim 2.0 +245 93 loss.margin 29.702270612004813 +245 93 loss.adversarial_temperature 0.2573591466241214 +245 93 negative_sampler.num_negs_per_pos 19.0 +245 93 training.batch_size 2.0 +245 94 model.embedding_dim 2.0 +245 94 loss.margin 12.063890187698382 +245 94 loss.adversarial_temperature 0.924539113845235 +245 94 negative_sampler.num_negs_per_pos 56.0 +245 94 training.batch_size 2.0 +245 95 model.embedding_dim 2.0 +245 95 loss.margin 21.53502194665845 +245 95 loss.adversarial_temperature 0.5183147810613242 +245 95 negative_sampler.num_negs_per_pos 79.0 +245 95 training.batch_size 1.0 +245 96 model.embedding_dim 1.0 +245 96 loss.margin 10.017699480749304 +245 96 loss.adversarial_temperature 0.3168822396605875 +245 96 negative_sampler.num_negs_per_pos 52.0 +245 96 training.batch_size 2.0 +245 97 model.embedding_dim 1.0 +245 97 loss.margin 14.235714549915324 +245 97 loss.adversarial_temperature 0.40751199546010586 +245 97 negative_sampler.num_negs_per_pos 76.0 +245 97 training.batch_size 2.0 +245 98 model.embedding_dim 1.0 +245 98 loss.margin 8.936371497398085 +245 98 loss.adversarial_temperature 0.5925958636204528 +245 98 negative_sampler.num_negs_per_pos 36.0 +245 98 training.batch_size 2.0 +245 99 model.embedding_dim 0.0 +245 99 loss.margin 28.487573496500406 +245 99 loss.adversarial_temperature 0.6094349139839113 +245 99 negative_sampler.num_negs_per_pos 78.0 +245 99 training.batch_size 2.0 +245 100 model.embedding_dim 1.0 +245 100 loss.margin 14.293630554144974 +245 100 loss.adversarial_temperature 0.6356958100101979 +245 100 negative_sampler.num_negs_per_pos 40.0 +245 100 training.batch_size 1.0 +245 1 dataset """kinships""" +245 1 model """ermlp""" +245 1 loss """nssa""" +245 1 regularizer """no""" +245 1 optimizer """adadelta""" +245 1 training_loop """owa""" +245 1 negative_sampler """basic""" +245 1 evaluator """rankbased""" +245 2 dataset """kinships""" +245 2 model """ermlp""" +245 2 loss """nssa""" +245 2 regularizer """no""" +245 2 optimizer """adadelta""" +245 2 training_loop """owa""" +245 2 negative_sampler """basic""" +245 2 evaluator """rankbased""" +245 3 dataset """kinships""" +245 3 model """ermlp""" +245 3 loss """nssa""" +245 3 regularizer """no""" +245 3 optimizer """adadelta""" +245 3 training_loop """owa""" +245 3 negative_sampler """basic""" +245 3 evaluator """rankbased""" +245 4 dataset """kinships""" +245 4 model """ermlp""" +245 4 loss """nssa""" +245 4 regularizer """no""" +245 4 optimizer """adadelta""" +245 4 training_loop """owa""" +245 4 negative_sampler """basic""" +245 4 evaluator """rankbased""" +245 5 dataset """kinships""" +245 5 model """ermlp""" +245 5 loss """nssa""" +245 5 regularizer """no""" +245 5 optimizer """adadelta""" +245 5 training_loop """owa""" +245 5 negative_sampler """basic""" +245 5 evaluator """rankbased""" +245 6 dataset """kinships""" +245 6 model """ermlp""" +245 6 loss """nssa""" +245 6 regularizer """no""" +245 6 optimizer """adadelta""" +245 6 training_loop """owa""" +245 6 negative_sampler """basic""" +245 6 evaluator """rankbased""" +245 7 dataset """kinships""" +245 7 model """ermlp""" +245 7 loss """nssa""" +245 7 regularizer """no""" +245 7 optimizer """adadelta""" +245 7 training_loop """owa""" +245 7 negative_sampler """basic""" +245 7 evaluator """rankbased""" +245 8 dataset """kinships""" +245 8 model """ermlp""" +245 8 loss """nssa""" +245 8 regularizer """no""" +245 8 optimizer """adadelta""" +245 8 training_loop """owa""" +245 8 negative_sampler """basic""" +245 8 evaluator """rankbased""" +245 9 dataset """kinships""" +245 9 model """ermlp""" +245 9 loss """nssa""" +245 9 regularizer """no""" +245 9 optimizer """adadelta""" +245 9 training_loop """owa""" +245 9 negative_sampler """basic""" +245 9 evaluator """rankbased""" +245 10 dataset """kinships""" +245 10 model """ermlp""" +245 10 loss """nssa""" +245 10 regularizer """no""" +245 10 optimizer """adadelta""" +245 10 training_loop """owa""" +245 10 negative_sampler """basic""" +245 10 evaluator """rankbased""" +245 11 dataset """kinships""" +245 11 model """ermlp""" +245 11 loss """nssa""" +245 11 regularizer """no""" +245 11 optimizer """adadelta""" +245 11 training_loop """owa""" +245 11 negative_sampler """basic""" +245 11 evaluator """rankbased""" +245 12 dataset """kinships""" +245 12 model """ermlp""" +245 12 loss """nssa""" +245 12 regularizer """no""" +245 12 optimizer """adadelta""" +245 12 training_loop """owa""" +245 12 negative_sampler """basic""" +245 12 evaluator """rankbased""" +245 13 dataset """kinships""" +245 13 model """ermlp""" +245 13 loss """nssa""" +245 13 regularizer """no""" +245 13 optimizer """adadelta""" +245 13 training_loop """owa""" +245 13 negative_sampler """basic""" +245 13 evaluator """rankbased""" +245 14 dataset """kinships""" +245 14 model """ermlp""" +245 14 loss """nssa""" +245 14 regularizer """no""" +245 14 optimizer """adadelta""" +245 14 training_loop """owa""" +245 14 negative_sampler """basic""" +245 14 evaluator """rankbased""" +245 15 dataset """kinships""" +245 15 model """ermlp""" +245 15 loss """nssa""" +245 15 regularizer """no""" +245 15 optimizer """adadelta""" +245 15 training_loop """owa""" +245 15 negative_sampler """basic""" +245 15 evaluator """rankbased""" +245 16 dataset """kinships""" +245 16 model """ermlp""" +245 16 loss """nssa""" +245 16 regularizer """no""" +245 16 optimizer """adadelta""" +245 16 training_loop """owa""" +245 16 negative_sampler """basic""" +245 16 evaluator """rankbased""" +245 17 dataset """kinships""" +245 17 model """ermlp""" +245 17 loss """nssa""" +245 17 regularizer """no""" +245 17 optimizer """adadelta""" +245 17 training_loop """owa""" +245 17 negative_sampler """basic""" +245 17 evaluator """rankbased""" +245 18 dataset """kinships""" +245 18 model """ermlp""" +245 18 loss """nssa""" +245 18 regularizer """no""" +245 18 optimizer """adadelta""" +245 18 training_loop """owa""" +245 18 negative_sampler """basic""" +245 18 evaluator """rankbased""" +245 19 dataset """kinships""" +245 19 model """ermlp""" +245 19 loss """nssa""" +245 19 regularizer """no""" +245 19 optimizer """adadelta""" +245 19 training_loop """owa""" +245 19 negative_sampler """basic""" +245 19 evaluator """rankbased""" +245 20 dataset """kinships""" +245 20 model """ermlp""" +245 20 loss """nssa""" +245 20 regularizer """no""" +245 20 optimizer """adadelta""" +245 20 training_loop """owa""" +245 20 negative_sampler """basic""" +245 20 evaluator """rankbased""" +245 21 dataset """kinships""" +245 21 model """ermlp""" +245 21 loss """nssa""" +245 21 regularizer """no""" +245 21 optimizer """adadelta""" +245 21 training_loop """owa""" +245 21 negative_sampler """basic""" +245 21 evaluator """rankbased""" +245 22 dataset """kinships""" +245 22 model """ermlp""" +245 22 loss """nssa""" +245 22 regularizer """no""" +245 22 optimizer """adadelta""" +245 22 training_loop """owa""" +245 22 negative_sampler """basic""" +245 22 evaluator """rankbased""" +245 23 dataset """kinships""" +245 23 model """ermlp""" +245 23 loss """nssa""" +245 23 regularizer """no""" +245 23 optimizer """adadelta""" +245 23 training_loop """owa""" +245 23 negative_sampler """basic""" +245 23 evaluator """rankbased""" +245 24 dataset """kinships""" +245 24 model """ermlp""" +245 24 loss """nssa""" +245 24 regularizer """no""" +245 24 optimizer """adadelta""" +245 24 training_loop """owa""" +245 24 negative_sampler """basic""" +245 24 evaluator """rankbased""" +245 25 dataset """kinships""" +245 25 model """ermlp""" +245 25 loss """nssa""" +245 25 regularizer """no""" +245 25 optimizer """adadelta""" +245 25 training_loop """owa""" +245 25 negative_sampler """basic""" +245 25 evaluator """rankbased""" +245 26 dataset """kinships""" +245 26 model """ermlp""" +245 26 loss """nssa""" +245 26 regularizer """no""" +245 26 optimizer """adadelta""" +245 26 training_loop """owa""" +245 26 negative_sampler """basic""" +245 26 evaluator """rankbased""" +245 27 dataset """kinships""" +245 27 model """ermlp""" +245 27 loss """nssa""" +245 27 regularizer """no""" +245 27 optimizer """adadelta""" +245 27 training_loop """owa""" +245 27 negative_sampler """basic""" +245 27 evaluator """rankbased""" +245 28 dataset """kinships""" +245 28 model """ermlp""" +245 28 loss """nssa""" +245 28 regularizer """no""" +245 28 optimizer """adadelta""" +245 28 training_loop """owa""" +245 28 negative_sampler """basic""" +245 28 evaluator """rankbased""" +245 29 dataset """kinships""" +245 29 model """ermlp""" +245 29 loss """nssa""" +245 29 regularizer """no""" +245 29 optimizer """adadelta""" +245 29 training_loop """owa""" +245 29 negative_sampler """basic""" +245 29 evaluator """rankbased""" +245 30 dataset """kinships""" +245 30 model """ermlp""" +245 30 loss """nssa""" +245 30 regularizer """no""" +245 30 optimizer """adadelta""" +245 30 training_loop """owa""" +245 30 negative_sampler """basic""" +245 30 evaluator """rankbased""" +245 31 dataset """kinships""" +245 31 model """ermlp""" +245 31 loss """nssa""" +245 31 regularizer """no""" +245 31 optimizer """adadelta""" +245 31 training_loop """owa""" +245 31 negative_sampler """basic""" +245 31 evaluator """rankbased""" +245 32 dataset """kinships""" +245 32 model """ermlp""" +245 32 loss """nssa""" +245 32 regularizer """no""" +245 32 optimizer """adadelta""" +245 32 training_loop """owa""" +245 32 negative_sampler """basic""" +245 32 evaluator """rankbased""" +245 33 dataset """kinships""" +245 33 model """ermlp""" +245 33 loss """nssa""" +245 33 regularizer """no""" +245 33 optimizer """adadelta""" +245 33 training_loop """owa""" +245 33 negative_sampler """basic""" +245 33 evaluator """rankbased""" +245 34 dataset """kinships""" +245 34 model """ermlp""" +245 34 loss """nssa""" +245 34 regularizer """no""" +245 34 optimizer """adadelta""" +245 34 training_loop """owa""" +245 34 negative_sampler """basic""" +245 34 evaluator """rankbased""" +245 35 dataset """kinships""" +245 35 model """ermlp""" +245 35 loss """nssa""" +245 35 regularizer """no""" +245 35 optimizer """adadelta""" +245 35 training_loop """owa""" +245 35 negative_sampler """basic""" +245 35 evaluator """rankbased""" +245 36 dataset """kinships""" +245 36 model """ermlp""" +245 36 loss """nssa""" +245 36 regularizer """no""" +245 36 optimizer """adadelta""" +245 36 training_loop """owa""" +245 36 negative_sampler """basic""" +245 36 evaluator """rankbased""" +245 37 dataset """kinships""" +245 37 model """ermlp""" +245 37 loss """nssa""" +245 37 regularizer """no""" +245 37 optimizer """adadelta""" +245 37 training_loop """owa""" +245 37 negative_sampler """basic""" +245 37 evaluator """rankbased""" +245 38 dataset """kinships""" +245 38 model """ermlp""" +245 38 loss """nssa""" +245 38 regularizer """no""" +245 38 optimizer """adadelta""" +245 38 training_loop """owa""" +245 38 negative_sampler """basic""" +245 38 evaluator """rankbased""" +245 39 dataset """kinships""" +245 39 model """ermlp""" +245 39 loss """nssa""" +245 39 regularizer """no""" +245 39 optimizer """adadelta""" +245 39 training_loop """owa""" +245 39 negative_sampler """basic""" +245 39 evaluator """rankbased""" +245 40 dataset """kinships""" +245 40 model """ermlp""" +245 40 loss """nssa""" +245 40 regularizer """no""" +245 40 optimizer """adadelta""" +245 40 training_loop """owa""" +245 40 negative_sampler """basic""" +245 40 evaluator """rankbased""" +245 41 dataset """kinships""" +245 41 model """ermlp""" +245 41 loss """nssa""" +245 41 regularizer """no""" +245 41 optimizer """adadelta""" +245 41 training_loop """owa""" +245 41 negative_sampler """basic""" +245 41 evaluator """rankbased""" +245 42 dataset """kinships""" +245 42 model """ermlp""" +245 42 loss """nssa""" +245 42 regularizer """no""" +245 42 optimizer """adadelta""" +245 42 training_loop """owa""" +245 42 negative_sampler """basic""" +245 42 evaluator """rankbased""" +245 43 dataset """kinships""" +245 43 model """ermlp""" +245 43 loss """nssa""" +245 43 regularizer """no""" +245 43 optimizer """adadelta""" +245 43 training_loop """owa""" +245 43 negative_sampler """basic""" +245 43 evaluator """rankbased""" +245 44 dataset """kinships""" +245 44 model """ermlp""" +245 44 loss """nssa""" +245 44 regularizer """no""" +245 44 optimizer """adadelta""" +245 44 training_loop """owa""" +245 44 negative_sampler """basic""" +245 44 evaluator """rankbased""" +245 45 dataset """kinships""" +245 45 model """ermlp""" +245 45 loss """nssa""" +245 45 regularizer """no""" +245 45 optimizer """adadelta""" +245 45 training_loop """owa""" +245 45 negative_sampler """basic""" +245 45 evaluator """rankbased""" +245 46 dataset """kinships""" +245 46 model """ermlp""" +245 46 loss """nssa""" +245 46 regularizer """no""" +245 46 optimizer """adadelta""" +245 46 training_loop """owa""" +245 46 negative_sampler """basic""" +245 46 evaluator """rankbased""" +245 47 dataset """kinships""" +245 47 model """ermlp""" +245 47 loss """nssa""" +245 47 regularizer """no""" +245 47 optimizer """adadelta""" +245 47 training_loop """owa""" +245 47 negative_sampler """basic""" +245 47 evaluator """rankbased""" +245 48 dataset """kinships""" +245 48 model """ermlp""" +245 48 loss """nssa""" +245 48 regularizer """no""" +245 48 optimizer """adadelta""" +245 48 training_loop """owa""" +245 48 negative_sampler """basic""" +245 48 evaluator """rankbased""" +245 49 dataset """kinships""" +245 49 model """ermlp""" +245 49 loss """nssa""" +245 49 regularizer """no""" +245 49 optimizer """adadelta""" +245 49 training_loop """owa""" +245 49 negative_sampler """basic""" +245 49 evaluator """rankbased""" +245 50 dataset """kinships""" +245 50 model """ermlp""" +245 50 loss """nssa""" +245 50 regularizer """no""" +245 50 optimizer """adadelta""" +245 50 training_loop """owa""" +245 50 negative_sampler """basic""" +245 50 evaluator """rankbased""" +245 51 dataset """kinships""" +245 51 model """ermlp""" +245 51 loss """nssa""" +245 51 regularizer """no""" +245 51 optimizer """adadelta""" +245 51 training_loop """owa""" +245 51 negative_sampler """basic""" +245 51 evaluator """rankbased""" +245 52 dataset """kinships""" +245 52 model """ermlp""" +245 52 loss """nssa""" +245 52 regularizer """no""" +245 52 optimizer """adadelta""" +245 52 training_loop """owa""" +245 52 negative_sampler """basic""" +245 52 evaluator """rankbased""" +245 53 dataset """kinships""" +245 53 model """ermlp""" +245 53 loss """nssa""" +245 53 regularizer """no""" +245 53 optimizer """adadelta""" +245 53 training_loop """owa""" +245 53 negative_sampler """basic""" +245 53 evaluator """rankbased""" +245 54 dataset """kinships""" +245 54 model """ermlp""" +245 54 loss """nssa""" +245 54 regularizer """no""" +245 54 optimizer """adadelta""" +245 54 training_loop """owa""" +245 54 negative_sampler """basic""" +245 54 evaluator """rankbased""" +245 55 dataset """kinships""" +245 55 model """ermlp""" +245 55 loss """nssa""" +245 55 regularizer """no""" +245 55 optimizer """adadelta""" +245 55 training_loop """owa""" +245 55 negative_sampler """basic""" +245 55 evaluator """rankbased""" +245 56 dataset """kinships""" +245 56 model """ermlp""" +245 56 loss """nssa""" +245 56 regularizer """no""" +245 56 optimizer """adadelta""" +245 56 training_loop """owa""" +245 56 negative_sampler """basic""" +245 56 evaluator """rankbased""" +245 57 dataset """kinships""" +245 57 model """ermlp""" +245 57 loss """nssa""" +245 57 regularizer """no""" +245 57 optimizer """adadelta""" +245 57 training_loop """owa""" +245 57 negative_sampler """basic""" +245 57 evaluator """rankbased""" +245 58 dataset """kinships""" +245 58 model """ermlp""" +245 58 loss """nssa""" +245 58 regularizer """no""" +245 58 optimizer """adadelta""" +245 58 training_loop """owa""" +245 58 negative_sampler """basic""" +245 58 evaluator """rankbased""" +245 59 dataset """kinships""" +245 59 model """ermlp""" +245 59 loss """nssa""" +245 59 regularizer """no""" +245 59 optimizer """adadelta""" +245 59 training_loop """owa""" +245 59 negative_sampler """basic""" +245 59 evaluator """rankbased""" +245 60 dataset """kinships""" +245 60 model """ermlp""" +245 60 loss """nssa""" +245 60 regularizer """no""" +245 60 optimizer """adadelta""" +245 60 training_loop """owa""" +245 60 negative_sampler """basic""" +245 60 evaluator """rankbased""" +245 61 dataset """kinships""" +245 61 model """ermlp""" +245 61 loss """nssa""" +245 61 regularizer """no""" +245 61 optimizer """adadelta""" +245 61 training_loop """owa""" +245 61 negative_sampler """basic""" +245 61 evaluator """rankbased""" +245 62 dataset """kinships""" +245 62 model """ermlp""" +245 62 loss """nssa""" +245 62 regularizer """no""" +245 62 optimizer """adadelta""" +245 62 training_loop """owa""" +245 62 negative_sampler """basic""" +245 62 evaluator """rankbased""" +245 63 dataset """kinships""" +245 63 model """ermlp""" +245 63 loss """nssa""" +245 63 regularizer """no""" +245 63 optimizer """adadelta""" +245 63 training_loop """owa""" +245 63 negative_sampler """basic""" +245 63 evaluator """rankbased""" +245 64 dataset """kinships""" +245 64 model """ermlp""" +245 64 loss """nssa""" +245 64 regularizer """no""" +245 64 optimizer """adadelta""" +245 64 training_loop """owa""" +245 64 negative_sampler """basic""" +245 64 evaluator """rankbased""" +245 65 dataset """kinships""" +245 65 model """ermlp""" +245 65 loss """nssa""" +245 65 regularizer """no""" +245 65 optimizer """adadelta""" +245 65 training_loop """owa""" +245 65 negative_sampler """basic""" +245 65 evaluator """rankbased""" +245 66 dataset """kinships""" +245 66 model """ermlp""" +245 66 loss """nssa""" +245 66 regularizer """no""" +245 66 optimizer """adadelta""" +245 66 training_loop """owa""" +245 66 negative_sampler """basic""" +245 66 evaluator """rankbased""" +245 67 dataset """kinships""" +245 67 model """ermlp""" +245 67 loss """nssa""" +245 67 regularizer """no""" +245 67 optimizer """adadelta""" +245 67 training_loop """owa""" +245 67 negative_sampler """basic""" +245 67 evaluator """rankbased""" +245 68 dataset """kinships""" +245 68 model """ermlp""" +245 68 loss """nssa""" +245 68 regularizer """no""" +245 68 optimizer """adadelta""" +245 68 training_loop """owa""" +245 68 negative_sampler """basic""" +245 68 evaluator """rankbased""" +245 69 dataset """kinships""" +245 69 model """ermlp""" +245 69 loss """nssa""" +245 69 regularizer """no""" +245 69 optimizer """adadelta""" +245 69 training_loop """owa""" +245 69 negative_sampler """basic""" +245 69 evaluator """rankbased""" +245 70 dataset """kinships""" +245 70 model """ermlp""" +245 70 loss """nssa""" +245 70 regularizer """no""" +245 70 optimizer """adadelta""" +245 70 training_loop """owa""" +245 70 negative_sampler """basic""" +245 70 evaluator """rankbased""" +245 71 dataset """kinships""" +245 71 model """ermlp""" +245 71 loss """nssa""" +245 71 regularizer """no""" +245 71 optimizer """adadelta""" +245 71 training_loop """owa""" +245 71 negative_sampler """basic""" +245 71 evaluator """rankbased""" +245 72 dataset """kinships""" +245 72 model """ermlp""" +245 72 loss """nssa""" +245 72 regularizer """no""" +245 72 optimizer """adadelta""" +245 72 training_loop """owa""" +245 72 negative_sampler """basic""" +245 72 evaluator """rankbased""" +245 73 dataset """kinships""" +245 73 model """ermlp""" +245 73 loss """nssa""" +245 73 regularizer """no""" +245 73 optimizer """adadelta""" +245 73 training_loop """owa""" +245 73 negative_sampler """basic""" +245 73 evaluator """rankbased""" +245 74 dataset """kinships""" +245 74 model """ermlp""" +245 74 loss """nssa""" +245 74 regularizer """no""" +245 74 optimizer """adadelta""" +245 74 training_loop """owa""" +245 74 negative_sampler """basic""" +245 74 evaluator """rankbased""" +245 75 dataset """kinships""" +245 75 model """ermlp""" +245 75 loss """nssa""" +245 75 regularizer """no""" +245 75 optimizer """adadelta""" +245 75 training_loop """owa""" +245 75 negative_sampler """basic""" +245 75 evaluator """rankbased""" +245 76 dataset """kinships""" +245 76 model """ermlp""" +245 76 loss """nssa""" +245 76 regularizer """no""" +245 76 optimizer """adadelta""" +245 76 training_loop """owa""" +245 76 negative_sampler """basic""" +245 76 evaluator """rankbased""" +245 77 dataset """kinships""" +245 77 model """ermlp""" +245 77 loss """nssa""" +245 77 regularizer """no""" +245 77 optimizer """adadelta""" +245 77 training_loop """owa""" +245 77 negative_sampler """basic""" +245 77 evaluator """rankbased""" +245 78 dataset """kinships""" +245 78 model """ermlp""" +245 78 loss """nssa""" +245 78 regularizer """no""" +245 78 optimizer """adadelta""" +245 78 training_loop """owa""" +245 78 negative_sampler """basic""" +245 78 evaluator """rankbased""" +245 79 dataset """kinships""" +245 79 model """ermlp""" +245 79 loss """nssa""" +245 79 regularizer """no""" +245 79 optimizer """adadelta""" +245 79 training_loop """owa""" +245 79 negative_sampler """basic""" +245 79 evaluator """rankbased""" +245 80 dataset """kinships""" +245 80 model """ermlp""" +245 80 loss """nssa""" +245 80 regularizer """no""" +245 80 optimizer """adadelta""" +245 80 training_loop """owa""" +245 80 negative_sampler """basic""" +245 80 evaluator """rankbased""" +245 81 dataset """kinships""" +245 81 model """ermlp""" +245 81 loss """nssa""" +245 81 regularizer """no""" +245 81 optimizer """adadelta""" +245 81 training_loop """owa""" +245 81 negative_sampler """basic""" +245 81 evaluator """rankbased""" +245 82 dataset """kinships""" +245 82 model """ermlp""" +245 82 loss """nssa""" +245 82 regularizer """no""" +245 82 optimizer """adadelta""" +245 82 training_loop """owa""" +245 82 negative_sampler """basic""" +245 82 evaluator """rankbased""" +245 83 dataset """kinships""" +245 83 model """ermlp""" +245 83 loss """nssa""" +245 83 regularizer """no""" +245 83 optimizer """adadelta""" +245 83 training_loop """owa""" +245 83 negative_sampler """basic""" +245 83 evaluator """rankbased""" +245 84 dataset """kinships""" +245 84 model """ermlp""" +245 84 loss """nssa""" +245 84 regularizer """no""" +245 84 optimizer """adadelta""" +245 84 training_loop """owa""" +245 84 negative_sampler """basic""" +245 84 evaluator """rankbased""" +245 85 dataset """kinships""" +245 85 model """ermlp""" +245 85 loss """nssa""" +245 85 regularizer """no""" +245 85 optimizer """adadelta""" +245 85 training_loop """owa""" +245 85 negative_sampler """basic""" +245 85 evaluator """rankbased""" +245 86 dataset """kinships""" +245 86 model """ermlp""" +245 86 loss """nssa""" +245 86 regularizer """no""" +245 86 optimizer """adadelta""" +245 86 training_loop """owa""" +245 86 negative_sampler """basic""" +245 86 evaluator """rankbased""" +245 87 dataset """kinships""" +245 87 model """ermlp""" +245 87 loss """nssa""" +245 87 regularizer """no""" +245 87 optimizer """adadelta""" +245 87 training_loop """owa""" +245 87 negative_sampler """basic""" +245 87 evaluator """rankbased""" +245 88 dataset """kinships""" +245 88 model """ermlp""" +245 88 loss """nssa""" +245 88 regularizer """no""" +245 88 optimizer """adadelta""" +245 88 training_loop """owa""" +245 88 negative_sampler """basic""" +245 88 evaluator """rankbased""" +245 89 dataset """kinships""" +245 89 model """ermlp""" +245 89 loss """nssa""" +245 89 regularizer """no""" +245 89 optimizer """adadelta""" +245 89 training_loop """owa""" +245 89 negative_sampler """basic""" +245 89 evaluator """rankbased""" +245 90 dataset """kinships""" +245 90 model """ermlp""" +245 90 loss """nssa""" +245 90 regularizer """no""" +245 90 optimizer """adadelta""" +245 90 training_loop """owa""" +245 90 negative_sampler """basic""" +245 90 evaluator """rankbased""" +245 91 dataset """kinships""" +245 91 model """ermlp""" +245 91 loss """nssa""" +245 91 regularizer """no""" +245 91 optimizer """adadelta""" +245 91 training_loop """owa""" +245 91 negative_sampler """basic""" +245 91 evaluator """rankbased""" +245 92 dataset """kinships""" +245 92 model """ermlp""" +245 92 loss """nssa""" +245 92 regularizer """no""" +245 92 optimizer """adadelta""" +245 92 training_loop """owa""" +245 92 negative_sampler """basic""" +245 92 evaluator """rankbased""" +245 93 dataset """kinships""" +245 93 model """ermlp""" +245 93 loss """nssa""" +245 93 regularizer """no""" +245 93 optimizer """adadelta""" +245 93 training_loop """owa""" +245 93 negative_sampler """basic""" +245 93 evaluator """rankbased""" +245 94 dataset """kinships""" +245 94 model """ermlp""" +245 94 loss """nssa""" +245 94 regularizer """no""" +245 94 optimizer """adadelta""" +245 94 training_loop """owa""" +245 94 negative_sampler """basic""" +245 94 evaluator """rankbased""" +245 95 dataset """kinships""" +245 95 model """ermlp""" +245 95 loss """nssa""" +245 95 regularizer """no""" +245 95 optimizer """adadelta""" +245 95 training_loop """owa""" +245 95 negative_sampler """basic""" +245 95 evaluator """rankbased""" +245 96 dataset """kinships""" +245 96 model """ermlp""" +245 96 loss """nssa""" +245 96 regularizer """no""" +245 96 optimizer """adadelta""" +245 96 training_loop """owa""" +245 96 negative_sampler """basic""" +245 96 evaluator """rankbased""" +245 97 dataset """kinships""" +245 97 model """ermlp""" +245 97 loss """nssa""" +245 97 regularizer """no""" +245 97 optimizer """adadelta""" +245 97 training_loop """owa""" +245 97 negative_sampler """basic""" +245 97 evaluator """rankbased""" +245 98 dataset """kinships""" +245 98 model """ermlp""" +245 98 loss """nssa""" +245 98 regularizer """no""" +245 98 optimizer """adadelta""" +245 98 training_loop """owa""" +245 98 negative_sampler """basic""" +245 98 evaluator """rankbased""" +245 99 dataset """kinships""" +245 99 model """ermlp""" +245 99 loss """nssa""" +245 99 regularizer """no""" +245 99 optimizer """adadelta""" +245 99 training_loop """owa""" +245 99 negative_sampler """basic""" +245 99 evaluator """rankbased""" +245 100 dataset """kinships""" +245 100 model """ermlp""" +245 100 loss """nssa""" +245 100 regularizer """no""" +245 100 optimizer """adadelta""" +245 100 training_loop """owa""" +245 100 negative_sampler """basic""" +245 100 evaluator """rankbased""" +246 1 model.embedding_dim 0.0 +246 1 optimizer.lr 0.006694000907168076 +246 1 training.batch_size 0.0 +246 1 training.label_smoothing 0.13001759183974496 +246 2 model.embedding_dim 1.0 +246 2 optimizer.lr 0.0010125260468564678 +246 2 training.batch_size 2.0 +246 2 training.label_smoothing 0.15039622012964765 +246 3 model.embedding_dim 0.0 +246 3 optimizer.lr 0.0035183465852288496 +246 3 training.batch_size 2.0 +246 3 training.label_smoothing 0.0022917797582051 +246 4 model.embedding_dim 1.0 +246 4 optimizer.lr 0.028924614182726238 +246 4 training.batch_size 1.0 +246 4 training.label_smoothing 0.11059577082761837 +246 5 model.embedding_dim 0.0 +246 5 optimizer.lr 0.0037577391373944205 +246 5 training.batch_size 1.0 +246 5 training.label_smoothing 0.7346775978743295 +246 6 model.embedding_dim 0.0 +246 6 optimizer.lr 0.031446468852986284 +246 6 training.batch_size 0.0 +246 6 training.label_smoothing 0.002598707624714964 +246 7 model.embedding_dim 2.0 +246 7 optimizer.lr 0.006613785038737203 +246 7 training.batch_size 2.0 +246 7 training.label_smoothing 0.020193473897253643 +246 8 model.embedding_dim 2.0 +246 8 optimizer.lr 0.07427886709766797 +246 8 training.batch_size 1.0 +246 8 training.label_smoothing 0.040536540156241495 +246 9 model.embedding_dim 0.0 +246 9 optimizer.lr 0.001268115139010623 +246 9 training.batch_size 2.0 +246 9 training.label_smoothing 0.01310022198648754 +246 10 model.embedding_dim 2.0 +246 10 optimizer.lr 0.08309903817470417 +246 10 training.batch_size 0.0 +246 10 training.label_smoothing 0.0017243572318587739 +246 11 model.embedding_dim 2.0 +246 11 optimizer.lr 0.002871669065625728 +246 11 training.batch_size 2.0 +246 11 training.label_smoothing 0.0642429637614121 +246 12 model.embedding_dim 0.0 +246 12 optimizer.lr 0.003627854054145648 +246 12 training.batch_size 2.0 +246 12 training.label_smoothing 0.08713483077872103 +246 13 model.embedding_dim 2.0 +246 13 optimizer.lr 0.029997143371558045 +246 13 training.batch_size 2.0 +246 13 training.label_smoothing 0.005341333017891061 +246 14 model.embedding_dim 2.0 +246 14 optimizer.lr 0.008443321566407206 +246 14 training.batch_size 2.0 +246 14 training.label_smoothing 0.1605788704335921 +246 15 model.embedding_dim 2.0 +246 15 optimizer.lr 0.006807443452402851 +246 15 training.batch_size 1.0 +246 15 training.label_smoothing 0.2272326969243547 +246 16 model.embedding_dim 0.0 +246 16 optimizer.lr 0.009132893269863886 +246 16 training.batch_size 2.0 +246 16 training.label_smoothing 0.008020034644916692 +246 17 model.embedding_dim 1.0 +246 17 optimizer.lr 0.01820902921748448 +246 17 training.batch_size 2.0 +246 17 training.label_smoothing 0.3078157986745807 +246 18 model.embedding_dim 1.0 +246 18 optimizer.lr 0.044899863614358014 +246 18 training.batch_size 1.0 +246 18 training.label_smoothing 0.0012966359707570353 +246 19 model.embedding_dim 1.0 +246 19 optimizer.lr 0.05264305091518804 +246 19 training.batch_size 1.0 +246 19 training.label_smoothing 0.18055919025183287 +246 20 model.embedding_dim 0.0 +246 20 optimizer.lr 0.021401322850391907 +246 20 training.batch_size 1.0 +246 20 training.label_smoothing 0.001044943767213707 +246 21 model.embedding_dim 1.0 +246 21 optimizer.lr 0.0015335611817159087 +246 21 training.batch_size 2.0 +246 21 training.label_smoothing 0.11242877281273765 +246 22 model.embedding_dim 2.0 +246 22 optimizer.lr 0.010469522650075188 +246 22 training.batch_size 1.0 +246 22 training.label_smoothing 0.0656289513784881 +246 23 model.embedding_dim 0.0 +246 23 optimizer.lr 0.024940789765469908 +246 23 training.batch_size 1.0 +246 23 training.label_smoothing 0.0022445560688755875 +246 24 model.embedding_dim 1.0 +246 24 optimizer.lr 0.013339566254139686 +246 24 training.batch_size 2.0 +246 24 training.label_smoothing 0.16482863986988885 +246 25 model.embedding_dim 2.0 +246 25 optimizer.lr 0.015736989732447366 +246 25 training.batch_size 0.0 +246 25 training.label_smoothing 0.03142405535369467 +246 26 model.embedding_dim 0.0 +246 26 optimizer.lr 0.037998266373339654 +246 26 training.batch_size 1.0 +246 26 training.label_smoothing 0.26570793560301126 +246 27 model.embedding_dim 0.0 +246 27 optimizer.lr 0.015133593308613864 +246 27 training.batch_size 0.0 +246 27 training.label_smoothing 0.0021146497062549357 +246 28 model.embedding_dim 1.0 +246 28 optimizer.lr 0.0021626158628719077 +246 28 training.batch_size 2.0 +246 28 training.label_smoothing 0.0018760795577926878 +246 29 model.embedding_dim 1.0 +246 29 optimizer.lr 0.052132252474195416 +246 29 training.batch_size 0.0 +246 29 training.label_smoothing 0.07791512267815495 +246 30 model.embedding_dim 0.0 +246 30 optimizer.lr 0.05071140019587769 +246 30 training.batch_size 2.0 +246 30 training.label_smoothing 0.5130269683488984 +246 31 model.embedding_dim 0.0 +246 31 optimizer.lr 0.06346787195037751 +246 31 training.batch_size 0.0 +246 31 training.label_smoothing 0.012681641120482109 +246 32 model.embedding_dim 2.0 +246 32 optimizer.lr 0.04050067621883861 +246 32 training.batch_size 1.0 +246 32 training.label_smoothing 0.001180065184142004 +246 33 model.embedding_dim 1.0 +246 33 optimizer.lr 0.0012550364441754449 +246 33 training.batch_size 1.0 +246 33 training.label_smoothing 0.2759175127452571 +246 34 model.embedding_dim 2.0 +246 34 optimizer.lr 0.06799367074503734 +246 34 training.batch_size 1.0 +246 34 training.label_smoothing 0.0035217125546900944 +246 35 model.embedding_dim 0.0 +246 35 optimizer.lr 0.004087154232623867 +246 35 training.batch_size 0.0 +246 35 training.label_smoothing 0.17966153787837355 +246 36 model.embedding_dim 2.0 +246 36 optimizer.lr 0.012674719366722499 +246 36 training.batch_size 1.0 +246 36 training.label_smoothing 0.17591753711111024 +246 37 model.embedding_dim 2.0 +246 37 optimizer.lr 0.018887131261444038 +246 37 training.batch_size 0.0 +246 37 training.label_smoothing 0.4680301767577401 +246 38 model.embedding_dim 0.0 +246 38 optimizer.lr 0.004998799509120492 +246 38 training.batch_size 2.0 +246 38 training.label_smoothing 0.1011637345273653 +246 39 model.embedding_dim 1.0 +246 39 optimizer.lr 0.0044564329715576735 +246 39 training.batch_size 1.0 +246 39 training.label_smoothing 0.004643618739867243 +246 40 model.embedding_dim 0.0 +246 40 optimizer.lr 0.07351308079931423 +246 40 training.batch_size 1.0 +246 40 training.label_smoothing 0.3183723056398536 +246 41 model.embedding_dim 0.0 +246 41 optimizer.lr 0.012688699323242769 +246 41 training.batch_size 0.0 +246 41 training.label_smoothing 0.6706333641203416 +246 42 model.embedding_dim 2.0 +246 42 optimizer.lr 0.0010364080783735131 +246 42 training.batch_size 0.0 +246 42 training.label_smoothing 0.5984279227495095 +246 43 model.embedding_dim 2.0 +246 43 optimizer.lr 0.002635491233287443 +246 43 training.batch_size 1.0 +246 43 training.label_smoothing 0.34210390910003147 +246 44 model.embedding_dim 0.0 +246 44 optimizer.lr 0.0023855773407406734 +246 44 training.batch_size 2.0 +246 44 training.label_smoothing 0.24016779098528013 +246 45 model.embedding_dim 0.0 +246 45 optimizer.lr 0.0011191655599585603 +246 45 training.batch_size 1.0 +246 45 training.label_smoothing 0.7423819704102176 +246 46 model.embedding_dim 1.0 +246 46 optimizer.lr 0.0028045609277709623 +246 46 training.batch_size 0.0 +246 46 training.label_smoothing 0.011984223230417613 +246 47 model.embedding_dim 1.0 +246 47 optimizer.lr 0.0077262344508698165 +246 47 training.batch_size 1.0 +246 47 training.label_smoothing 0.010160790185905004 +246 48 model.embedding_dim 2.0 +246 48 optimizer.lr 0.044277163814545044 +246 48 training.batch_size 2.0 +246 48 training.label_smoothing 0.003020036140202355 +246 49 model.embedding_dim 2.0 +246 49 optimizer.lr 0.0012258984625064158 +246 49 training.batch_size 2.0 +246 49 training.label_smoothing 0.1871015916919277 +246 50 model.embedding_dim 0.0 +246 50 optimizer.lr 0.050928649776676466 +246 50 training.batch_size 0.0 +246 50 training.label_smoothing 0.015888877163802526 +246 51 model.embedding_dim 1.0 +246 51 optimizer.lr 0.07290000341250609 +246 51 training.batch_size 0.0 +246 51 training.label_smoothing 0.002742438218256368 +246 52 model.embedding_dim 1.0 +246 52 optimizer.lr 0.010665850966157632 +246 52 training.batch_size 1.0 +246 52 training.label_smoothing 0.004186526717178629 +246 53 model.embedding_dim 1.0 +246 53 optimizer.lr 0.08262242200130193 +246 53 training.batch_size 1.0 +246 53 training.label_smoothing 0.0026918910729077485 +246 54 model.embedding_dim 0.0 +246 54 optimizer.lr 0.001602246244113657 +246 54 training.batch_size 2.0 +246 54 training.label_smoothing 0.25371847758092486 +246 55 model.embedding_dim 2.0 +246 55 optimizer.lr 0.017836608846306546 +246 55 training.batch_size 2.0 +246 55 training.label_smoothing 0.021447387442898803 +246 56 model.embedding_dim 2.0 +246 56 optimizer.lr 0.08697298210778422 +246 56 training.batch_size 1.0 +246 56 training.label_smoothing 0.00911288707404282 +246 57 model.embedding_dim 1.0 +246 57 optimizer.lr 0.00472343911691957 +246 57 training.batch_size 0.0 +246 57 training.label_smoothing 0.36805563606206076 +246 58 model.embedding_dim 0.0 +246 58 optimizer.lr 0.022427749203922112 +246 58 training.batch_size 2.0 +246 58 training.label_smoothing 0.026284128844670167 +246 59 model.embedding_dim 1.0 +246 59 optimizer.lr 0.08829103081556923 +246 59 training.batch_size 1.0 +246 59 training.label_smoothing 0.06818679144268171 +246 60 model.embedding_dim 0.0 +246 60 optimizer.lr 0.0015345231337117416 +246 60 training.batch_size 2.0 +246 60 training.label_smoothing 0.002025712324848951 +246 61 model.embedding_dim 1.0 +246 61 optimizer.lr 0.03935910567832702 +246 61 training.batch_size 0.0 +246 61 training.label_smoothing 0.012807347074531305 +246 62 model.embedding_dim 2.0 +246 62 optimizer.lr 0.005034058626793041 +246 62 training.batch_size 0.0 +246 62 training.label_smoothing 0.2323913722267806 +246 63 model.embedding_dim 0.0 +246 63 optimizer.lr 0.023459181127368025 +246 63 training.batch_size 1.0 +246 63 training.label_smoothing 0.077697200414832 +246 64 model.embedding_dim 2.0 +246 64 optimizer.lr 0.0028623807591572586 +246 64 training.batch_size 1.0 +246 64 training.label_smoothing 0.13616153338739997 +246 65 model.embedding_dim 0.0 +246 65 optimizer.lr 0.008334746948367604 +246 65 training.batch_size 2.0 +246 65 training.label_smoothing 0.007338252401587325 +246 66 model.embedding_dim 1.0 +246 66 optimizer.lr 0.0013627000158020848 +246 66 training.batch_size 2.0 +246 66 training.label_smoothing 0.008312540041466561 +246 67 model.embedding_dim 1.0 +246 67 optimizer.lr 0.008015073247111926 +246 67 training.batch_size 2.0 +246 67 training.label_smoothing 0.01044845735869631 +246 68 model.embedding_dim 0.0 +246 68 optimizer.lr 0.08643214430464526 +246 68 training.batch_size 1.0 +246 68 training.label_smoothing 0.2669177370171964 +246 69 model.embedding_dim 2.0 +246 69 optimizer.lr 0.009101604826609601 +246 69 training.batch_size 2.0 +246 69 training.label_smoothing 0.11790834968256347 +246 70 model.embedding_dim 2.0 +246 70 optimizer.lr 0.049769190591279155 +246 70 training.batch_size 0.0 +246 70 training.label_smoothing 0.0023994013702325215 +246 71 model.embedding_dim 1.0 +246 71 optimizer.lr 0.01256060832153136 +246 71 training.batch_size 2.0 +246 71 training.label_smoothing 0.40311186319630155 +246 72 model.embedding_dim 1.0 +246 72 optimizer.lr 0.0016901199538181286 +246 72 training.batch_size 0.0 +246 72 training.label_smoothing 0.010579596012250838 +246 73 model.embedding_dim 0.0 +246 73 optimizer.lr 0.0034322839679389117 +246 73 training.batch_size 0.0 +246 73 training.label_smoothing 0.005067042799836958 +246 74 model.embedding_dim 1.0 +246 74 optimizer.lr 0.0010339718056172978 +246 74 training.batch_size 0.0 +246 74 training.label_smoothing 0.002477532661455356 +246 75 model.embedding_dim 1.0 +246 75 optimizer.lr 0.04323209261095884 +246 75 training.batch_size 2.0 +246 75 training.label_smoothing 0.0041605325668512255 +246 76 model.embedding_dim 1.0 +246 76 optimizer.lr 0.002716640758239123 +246 76 training.batch_size 0.0 +246 76 training.label_smoothing 0.017470216473000108 +246 77 model.embedding_dim 2.0 +246 77 optimizer.lr 0.03504306702587763 +246 77 training.batch_size 2.0 +246 77 training.label_smoothing 0.016693991789974705 +246 78 model.embedding_dim 1.0 +246 78 optimizer.lr 0.03796820555704415 +246 78 training.batch_size 2.0 +246 78 training.label_smoothing 0.033331975147167674 +246 79 model.embedding_dim 1.0 +246 79 optimizer.lr 0.02751961001968324 +246 79 training.batch_size 1.0 +246 79 training.label_smoothing 0.6716754345846199 +246 80 model.embedding_dim 1.0 +246 80 optimizer.lr 0.07762529988338578 +246 80 training.batch_size 0.0 +246 80 training.label_smoothing 0.0018423216631015986 +246 81 model.embedding_dim 1.0 +246 81 optimizer.lr 0.016611522937831545 +246 81 training.batch_size 2.0 +246 81 training.label_smoothing 0.005580742683279462 +246 82 model.embedding_dim 0.0 +246 82 optimizer.lr 0.02543799174171361 +246 82 training.batch_size 2.0 +246 82 training.label_smoothing 0.1600205881931317 +246 83 model.embedding_dim 1.0 +246 83 optimizer.lr 0.006399363851891437 +246 83 training.batch_size 1.0 +246 83 training.label_smoothing 0.05044659527629187 +246 84 model.embedding_dim 0.0 +246 84 optimizer.lr 0.0014331663590763763 +246 84 training.batch_size 1.0 +246 84 training.label_smoothing 0.009020113881424992 +246 85 model.embedding_dim 1.0 +246 85 optimizer.lr 0.0026416036074308207 +246 85 training.batch_size 2.0 +246 85 training.label_smoothing 0.09453298881865836 +246 86 model.embedding_dim 2.0 +246 86 optimizer.lr 0.0877396410177387 +246 86 training.batch_size 1.0 +246 86 training.label_smoothing 0.5545322594502516 +246 87 model.embedding_dim 0.0 +246 87 optimizer.lr 0.0703731508305226 +246 87 training.batch_size 1.0 +246 87 training.label_smoothing 0.5540414676529422 +246 88 model.embedding_dim 1.0 +246 88 optimizer.lr 0.023459526492497235 +246 88 training.batch_size 1.0 +246 88 training.label_smoothing 0.31390062508530925 +246 89 model.embedding_dim 0.0 +246 89 optimizer.lr 0.08436547077249694 +246 89 training.batch_size 2.0 +246 89 training.label_smoothing 0.09988936560206174 +246 90 model.embedding_dim 2.0 +246 90 optimizer.lr 0.021244840262845652 +246 90 training.batch_size 2.0 +246 90 training.label_smoothing 0.024269460484126114 +246 91 model.embedding_dim 2.0 +246 91 optimizer.lr 0.047828083523295525 +246 91 training.batch_size 2.0 +246 91 training.label_smoothing 0.003089039796307765 +246 92 model.embedding_dim 2.0 +246 92 optimizer.lr 0.029262785635546733 +246 92 training.batch_size 1.0 +246 92 training.label_smoothing 0.0028411234211478393 +246 93 model.embedding_dim 2.0 +246 93 optimizer.lr 0.09285244880944432 +246 93 training.batch_size 1.0 +246 93 training.label_smoothing 0.005671498437275245 +246 94 model.embedding_dim 1.0 +246 94 optimizer.lr 0.01253817135428925 +246 94 training.batch_size 1.0 +246 94 training.label_smoothing 0.011154033408406852 +246 95 model.embedding_dim 0.0 +246 95 optimizer.lr 0.014644514353868862 +246 95 training.batch_size 2.0 +246 95 training.label_smoothing 0.00722890895120228 +246 96 model.embedding_dim 2.0 +246 96 optimizer.lr 0.001360933734169775 +246 96 training.batch_size 2.0 +246 96 training.label_smoothing 0.23774042674967957 +246 97 model.embedding_dim 0.0 +246 97 optimizer.lr 0.016577616504625168 +246 97 training.batch_size 0.0 +246 97 training.label_smoothing 0.06849830926372942 +246 98 model.embedding_dim 2.0 +246 98 optimizer.lr 0.0037505484123238233 +246 98 training.batch_size 0.0 +246 98 training.label_smoothing 0.0022074805925849953 +246 99 model.embedding_dim 0.0 +246 99 optimizer.lr 0.005305667445627532 +246 99 training.batch_size 1.0 +246 99 training.label_smoothing 0.008811330212687046 +246 100 model.embedding_dim 1.0 +246 100 optimizer.lr 0.02435827696754348 +246 100 training.batch_size 1.0 +246 100 training.label_smoothing 0.0054440554225949345 +246 1 dataset """kinships""" +246 1 model """ermlp""" +246 1 loss """bceaftersigmoid""" +246 1 regularizer """no""" +246 1 optimizer """adam""" +246 1 training_loop """lcwa""" +246 1 evaluator """rankbased""" +246 2 dataset """kinships""" +246 2 model """ermlp""" +246 2 loss """bceaftersigmoid""" +246 2 regularizer """no""" +246 2 optimizer """adam""" +246 2 training_loop """lcwa""" +246 2 evaluator """rankbased""" +246 3 dataset """kinships""" +246 3 model """ermlp""" +246 3 loss """bceaftersigmoid""" +246 3 regularizer """no""" +246 3 optimizer """adam""" +246 3 training_loop """lcwa""" +246 3 evaluator """rankbased""" +246 4 dataset """kinships""" +246 4 model """ermlp""" +246 4 loss """bceaftersigmoid""" +246 4 regularizer """no""" +246 4 optimizer """adam""" +246 4 training_loop """lcwa""" +246 4 evaluator """rankbased""" +246 5 dataset """kinships""" +246 5 model """ermlp""" +246 5 loss """bceaftersigmoid""" +246 5 regularizer """no""" +246 5 optimizer """adam""" +246 5 training_loop """lcwa""" +246 5 evaluator """rankbased""" +246 6 dataset """kinships""" +246 6 model """ermlp""" +246 6 loss """bceaftersigmoid""" +246 6 regularizer """no""" +246 6 optimizer """adam""" +246 6 training_loop """lcwa""" +246 6 evaluator """rankbased""" +246 7 dataset """kinships""" +246 7 model """ermlp""" +246 7 loss """bceaftersigmoid""" +246 7 regularizer """no""" +246 7 optimizer """adam""" +246 7 training_loop """lcwa""" +246 7 evaluator """rankbased""" +246 8 dataset """kinships""" +246 8 model """ermlp""" +246 8 loss """bceaftersigmoid""" +246 8 regularizer """no""" +246 8 optimizer """adam""" +246 8 training_loop """lcwa""" +246 8 evaluator """rankbased""" +246 9 dataset """kinships""" +246 9 model """ermlp""" +246 9 loss """bceaftersigmoid""" +246 9 regularizer """no""" +246 9 optimizer """adam""" +246 9 training_loop """lcwa""" +246 9 evaluator """rankbased""" +246 10 dataset """kinships""" +246 10 model """ermlp""" +246 10 loss """bceaftersigmoid""" +246 10 regularizer """no""" +246 10 optimizer """adam""" +246 10 training_loop """lcwa""" +246 10 evaluator """rankbased""" +246 11 dataset """kinships""" +246 11 model """ermlp""" +246 11 loss """bceaftersigmoid""" +246 11 regularizer """no""" +246 11 optimizer """adam""" +246 11 training_loop """lcwa""" +246 11 evaluator """rankbased""" +246 12 dataset """kinships""" +246 12 model """ermlp""" +246 12 loss """bceaftersigmoid""" +246 12 regularizer """no""" +246 12 optimizer """adam""" +246 12 training_loop """lcwa""" +246 12 evaluator """rankbased""" +246 13 dataset """kinships""" +246 13 model """ermlp""" +246 13 loss """bceaftersigmoid""" +246 13 regularizer """no""" +246 13 optimizer """adam""" +246 13 training_loop """lcwa""" +246 13 evaluator """rankbased""" +246 14 dataset """kinships""" +246 14 model """ermlp""" +246 14 loss """bceaftersigmoid""" +246 14 regularizer """no""" +246 14 optimizer """adam""" +246 14 training_loop """lcwa""" +246 14 evaluator """rankbased""" +246 15 dataset """kinships""" +246 15 model """ermlp""" +246 15 loss """bceaftersigmoid""" +246 15 regularizer """no""" +246 15 optimizer """adam""" +246 15 training_loop """lcwa""" +246 15 evaluator """rankbased""" +246 16 dataset """kinships""" +246 16 model """ermlp""" +246 16 loss """bceaftersigmoid""" +246 16 regularizer """no""" +246 16 optimizer """adam""" +246 16 training_loop """lcwa""" +246 16 evaluator """rankbased""" +246 17 dataset """kinships""" +246 17 model """ermlp""" +246 17 loss """bceaftersigmoid""" +246 17 regularizer """no""" +246 17 optimizer """adam""" +246 17 training_loop """lcwa""" +246 17 evaluator """rankbased""" +246 18 dataset """kinships""" +246 18 model """ermlp""" +246 18 loss """bceaftersigmoid""" +246 18 regularizer """no""" +246 18 optimizer """adam""" +246 18 training_loop """lcwa""" +246 18 evaluator """rankbased""" +246 19 dataset """kinships""" +246 19 model """ermlp""" +246 19 loss """bceaftersigmoid""" +246 19 regularizer """no""" +246 19 optimizer """adam""" +246 19 training_loop """lcwa""" +246 19 evaluator """rankbased""" +246 20 dataset """kinships""" +246 20 model """ermlp""" +246 20 loss """bceaftersigmoid""" +246 20 regularizer """no""" +246 20 optimizer """adam""" +246 20 training_loop """lcwa""" +246 20 evaluator """rankbased""" +246 21 dataset """kinships""" +246 21 model """ermlp""" +246 21 loss """bceaftersigmoid""" +246 21 regularizer """no""" +246 21 optimizer """adam""" +246 21 training_loop """lcwa""" +246 21 evaluator """rankbased""" +246 22 dataset """kinships""" +246 22 model """ermlp""" +246 22 loss """bceaftersigmoid""" +246 22 regularizer """no""" +246 22 optimizer """adam""" +246 22 training_loop """lcwa""" +246 22 evaluator """rankbased""" +246 23 dataset """kinships""" +246 23 model """ermlp""" +246 23 loss """bceaftersigmoid""" +246 23 regularizer """no""" +246 23 optimizer """adam""" +246 23 training_loop """lcwa""" +246 23 evaluator """rankbased""" +246 24 dataset """kinships""" +246 24 model """ermlp""" +246 24 loss """bceaftersigmoid""" +246 24 regularizer """no""" +246 24 optimizer """adam""" +246 24 training_loop """lcwa""" +246 24 evaluator """rankbased""" +246 25 dataset """kinships""" +246 25 model """ermlp""" +246 25 loss """bceaftersigmoid""" +246 25 regularizer """no""" +246 25 optimizer """adam""" +246 25 training_loop """lcwa""" +246 25 evaluator """rankbased""" +246 26 dataset """kinships""" +246 26 model """ermlp""" +246 26 loss """bceaftersigmoid""" +246 26 regularizer """no""" +246 26 optimizer """adam""" +246 26 training_loop """lcwa""" +246 26 evaluator """rankbased""" +246 27 dataset """kinships""" +246 27 model """ermlp""" +246 27 loss """bceaftersigmoid""" +246 27 regularizer """no""" +246 27 optimizer """adam""" +246 27 training_loop """lcwa""" +246 27 evaluator """rankbased""" +246 28 dataset """kinships""" +246 28 model """ermlp""" +246 28 loss """bceaftersigmoid""" +246 28 regularizer """no""" +246 28 optimizer """adam""" +246 28 training_loop """lcwa""" +246 28 evaluator """rankbased""" +246 29 dataset """kinships""" +246 29 model """ermlp""" +246 29 loss """bceaftersigmoid""" +246 29 regularizer """no""" +246 29 optimizer """adam""" +246 29 training_loop """lcwa""" +246 29 evaluator """rankbased""" +246 30 dataset """kinships""" +246 30 model """ermlp""" +246 30 loss """bceaftersigmoid""" +246 30 regularizer """no""" +246 30 optimizer """adam""" +246 30 training_loop """lcwa""" +246 30 evaluator """rankbased""" +246 31 dataset """kinships""" +246 31 model """ermlp""" +246 31 loss """bceaftersigmoid""" +246 31 regularizer """no""" +246 31 optimizer """adam""" +246 31 training_loop """lcwa""" +246 31 evaluator """rankbased""" +246 32 dataset """kinships""" +246 32 model """ermlp""" +246 32 loss """bceaftersigmoid""" +246 32 regularizer """no""" +246 32 optimizer """adam""" +246 32 training_loop """lcwa""" +246 32 evaluator """rankbased""" +246 33 dataset """kinships""" +246 33 model """ermlp""" +246 33 loss """bceaftersigmoid""" +246 33 regularizer """no""" +246 33 optimizer """adam""" +246 33 training_loop """lcwa""" +246 33 evaluator """rankbased""" +246 34 dataset """kinships""" +246 34 model """ermlp""" +246 34 loss """bceaftersigmoid""" +246 34 regularizer """no""" +246 34 optimizer """adam""" +246 34 training_loop """lcwa""" +246 34 evaluator """rankbased""" +246 35 dataset """kinships""" +246 35 model """ermlp""" +246 35 loss """bceaftersigmoid""" +246 35 regularizer """no""" +246 35 optimizer """adam""" +246 35 training_loop """lcwa""" +246 35 evaluator """rankbased""" +246 36 dataset """kinships""" +246 36 model """ermlp""" +246 36 loss """bceaftersigmoid""" +246 36 regularizer """no""" +246 36 optimizer """adam""" +246 36 training_loop """lcwa""" +246 36 evaluator """rankbased""" +246 37 dataset """kinships""" +246 37 model """ermlp""" +246 37 loss """bceaftersigmoid""" +246 37 regularizer """no""" +246 37 optimizer """adam""" +246 37 training_loop """lcwa""" +246 37 evaluator """rankbased""" +246 38 dataset """kinships""" +246 38 model """ermlp""" +246 38 loss """bceaftersigmoid""" +246 38 regularizer """no""" +246 38 optimizer """adam""" +246 38 training_loop """lcwa""" +246 38 evaluator """rankbased""" +246 39 dataset """kinships""" +246 39 model """ermlp""" +246 39 loss """bceaftersigmoid""" +246 39 regularizer """no""" +246 39 optimizer """adam""" +246 39 training_loop """lcwa""" +246 39 evaluator """rankbased""" +246 40 dataset """kinships""" +246 40 model """ermlp""" +246 40 loss """bceaftersigmoid""" +246 40 regularizer """no""" +246 40 optimizer """adam""" +246 40 training_loop """lcwa""" +246 40 evaluator """rankbased""" +246 41 dataset """kinships""" +246 41 model """ermlp""" +246 41 loss """bceaftersigmoid""" +246 41 regularizer """no""" +246 41 optimizer """adam""" +246 41 training_loop """lcwa""" +246 41 evaluator """rankbased""" +246 42 dataset """kinships""" +246 42 model """ermlp""" +246 42 loss """bceaftersigmoid""" +246 42 regularizer """no""" +246 42 optimizer """adam""" +246 42 training_loop """lcwa""" +246 42 evaluator """rankbased""" +246 43 dataset """kinships""" +246 43 model """ermlp""" +246 43 loss """bceaftersigmoid""" +246 43 regularizer """no""" +246 43 optimizer """adam""" +246 43 training_loop """lcwa""" +246 43 evaluator """rankbased""" +246 44 dataset """kinships""" +246 44 model """ermlp""" +246 44 loss """bceaftersigmoid""" +246 44 regularizer """no""" +246 44 optimizer """adam""" +246 44 training_loop """lcwa""" +246 44 evaluator """rankbased""" +246 45 dataset """kinships""" +246 45 model """ermlp""" +246 45 loss """bceaftersigmoid""" +246 45 regularizer """no""" +246 45 optimizer """adam""" +246 45 training_loop """lcwa""" +246 45 evaluator """rankbased""" +246 46 dataset """kinships""" +246 46 model """ermlp""" +246 46 loss """bceaftersigmoid""" +246 46 regularizer """no""" +246 46 optimizer """adam""" +246 46 training_loop """lcwa""" +246 46 evaluator """rankbased""" +246 47 dataset """kinships""" +246 47 model """ermlp""" +246 47 loss """bceaftersigmoid""" +246 47 regularizer """no""" +246 47 optimizer """adam""" +246 47 training_loop """lcwa""" +246 47 evaluator """rankbased""" +246 48 dataset """kinships""" +246 48 model """ermlp""" +246 48 loss """bceaftersigmoid""" +246 48 regularizer """no""" +246 48 optimizer """adam""" +246 48 training_loop """lcwa""" +246 48 evaluator """rankbased""" +246 49 dataset """kinships""" +246 49 model """ermlp""" +246 49 loss """bceaftersigmoid""" +246 49 regularizer """no""" +246 49 optimizer """adam""" +246 49 training_loop """lcwa""" +246 49 evaluator """rankbased""" +246 50 dataset """kinships""" +246 50 model """ermlp""" +246 50 loss """bceaftersigmoid""" +246 50 regularizer """no""" +246 50 optimizer """adam""" +246 50 training_loop """lcwa""" +246 50 evaluator """rankbased""" +246 51 dataset """kinships""" +246 51 model """ermlp""" +246 51 loss """bceaftersigmoid""" +246 51 regularizer """no""" +246 51 optimizer """adam""" +246 51 training_loop """lcwa""" +246 51 evaluator """rankbased""" +246 52 dataset """kinships""" +246 52 model """ermlp""" +246 52 loss """bceaftersigmoid""" +246 52 regularizer """no""" +246 52 optimizer """adam""" +246 52 training_loop """lcwa""" +246 52 evaluator """rankbased""" +246 53 dataset """kinships""" +246 53 model """ermlp""" +246 53 loss """bceaftersigmoid""" +246 53 regularizer """no""" +246 53 optimizer """adam""" +246 53 training_loop """lcwa""" +246 53 evaluator """rankbased""" +246 54 dataset """kinships""" +246 54 model """ermlp""" +246 54 loss """bceaftersigmoid""" +246 54 regularizer """no""" +246 54 optimizer """adam""" +246 54 training_loop """lcwa""" +246 54 evaluator """rankbased""" +246 55 dataset """kinships""" +246 55 model """ermlp""" +246 55 loss """bceaftersigmoid""" +246 55 regularizer """no""" +246 55 optimizer """adam""" +246 55 training_loop """lcwa""" +246 55 evaluator """rankbased""" +246 56 dataset """kinships""" +246 56 model """ermlp""" +246 56 loss """bceaftersigmoid""" +246 56 regularizer """no""" +246 56 optimizer """adam""" +246 56 training_loop """lcwa""" +246 56 evaluator """rankbased""" +246 57 dataset """kinships""" +246 57 model """ermlp""" +246 57 loss """bceaftersigmoid""" +246 57 regularizer """no""" +246 57 optimizer """adam""" +246 57 training_loop """lcwa""" +246 57 evaluator """rankbased""" +246 58 dataset """kinships""" +246 58 model """ermlp""" +246 58 loss """bceaftersigmoid""" +246 58 regularizer """no""" +246 58 optimizer """adam""" +246 58 training_loop """lcwa""" +246 58 evaluator """rankbased""" +246 59 dataset """kinships""" +246 59 model """ermlp""" +246 59 loss """bceaftersigmoid""" +246 59 regularizer """no""" +246 59 optimizer """adam""" +246 59 training_loop """lcwa""" +246 59 evaluator """rankbased""" +246 60 dataset """kinships""" +246 60 model """ermlp""" +246 60 loss """bceaftersigmoid""" +246 60 regularizer """no""" +246 60 optimizer """adam""" +246 60 training_loop """lcwa""" +246 60 evaluator """rankbased""" +246 61 dataset """kinships""" +246 61 model """ermlp""" +246 61 loss """bceaftersigmoid""" +246 61 regularizer """no""" +246 61 optimizer """adam""" +246 61 training_loop """lcwa""" +246 61 evaluator """rankbased""" +246 62 dataset """kinships""" +246 62 model """ermlp""" +246 62 loss """bceaftersigmoid""" +246 62 regularizer """no""" +246 62 optimizer """adam""" +246 62 training_loop """lcwa""" +246 62 evaluator """rankbased""" +246 63 dataset """kinships""" +246 63 model """ermlp""" +246 63 loss """bceaftersigmoid""" +246 63 regularizer """no""" +246 63 optimizer """adam""" +246 63 training_loop """lcwa""" +246 63 evaluator """rankbased""" +246 64 dataset """kinships""" +246 64 model """ermlp""" +246 64 loss """bceaftersigmoid""" +246 64 regularizer """no""" +246 64 optimizer """adam""" +246 64 training_loop """lcwa""" +246 64 evaluator """rankbased""" +246 65 dataset """kinships""" +246 65 model """ermlp""" +246 65 loss """bceaftersigmoid""" +246 65 regularizer """no""" +246 65 optimizer """adam""" +246 65 training_loop """lcwa""" +246 65 evaluator """rankbased""" +246 66 dataset """kinships""" +246 66 model """ermlp""" +246 66 loss """bceaftersigmoid""" +246 66 regularizer """no""" +246 66 optimizer """adam""" +246 66 training_loop """lcwa""" +246 66 evaluator """rankbased""" +246 67 dataset """kinships""" +246 67 model """ermlp""" +246 67 loss """bceaftersigmoid""" +246 67 regularizer """no""" +246 67 optimizer """adam""" +246 67 training_loop """lcwa""" +246 67 evaluator """rankbased""" +246 68 dataset """kinships""" +246 68 model """ermlp""" +246 68 loss """bceaftersigmoid""" +246 68 regularizer """no""" +246 68 optimizer """adam""" +246 68 training_loop """lcwa""" +246 68 evaluator """rankbased""" +246 69 dataset """kinships""" +246 69 model """ermlp""" +246 69 loss """bceaftersigmoid""" +246 69 regularizer """no""" +246 69 optimizer """adam""" +246 69 training_loop """lcwa""" +246 69 evaluator """rankbased""" +246 70 dataset """kinships""" +246 70 model """ermlp""" +246 70 loss """bceaftersigmoid""" +246 70 regularizer """no""" +246 70 optimizer """adam""" +246 70 training_loop """lcwa""" +246 70 evaluator """rankbased""" +246 71 dataset """kinships""" +246 71 model """ermlp""" +246 71 loss """bceaftersigmoid""" +246 71 regularizer """no""" +246 71 optimizer """adam""" +246 71 training_loop """lcwa""" +246 71 evaluator """rankbased""" +246 72 dataset """kinships""" +246 72 model """ermlp""" +246 72 loss """bceaftersigmoid""" +246 72 regularizer """no""" +246 72 optimizer """adam""" +246 72 training_loop """lcwa""" +246 72 evaluator """rankbased""" +246 73 dataset """kinships""" +246 73 model """ermlp""" +246 73 loss """bceaftersigmoid""" +246 73 regularizer """no""" +246 73 optimizer """adam""" +246 73 training_loop """lcwa""" +246 73 evaluator """rankbased""" +246 74 dataset """kinships""" +246 74 model """ermlp""" +246 74 loss """bceaftersigmoid""" +246 74 regularizer """no""" +246 74 optimizer """adam""" +246 74 training_loop """lcwa""" +246 74 evaluator """rankbased""" +246 75 dataset """kinships""" +246 75 model """ermlp""" +246 75 loss """bceaftersigmoid""" +246 75 regularizer """no""" +246 75 optimizer """adam""" +246 75 training_loop """lcwa""" +246 75 evaluator """rankbased""" +246 76 dataset """kinships""" +246 76 model """ermlp""" +246 76 loss """bceaftersigmoid""" +246 76 regularizer """no""" +246 76 optimizer """adam""" +246 76 training_loop """lcwa""" +246 76 evaluator """rankbased""" +246 77 dataset """kinships""" +246 77 model """ermlp""" +246 77 loss """bceaftersigmoid""" +246 77 regularizer """no""" +246 77 optimizer """adam""" +246 77 training_loop """lcwa""" +246 77 evaluator """rankbased""" +246 78 dataset """kinships""" +246 78 model """ermlp""" +246 78 loss """bceaftersigmoid""" +246 78 regularizer """no""" +246 78 optimizer """adam""" +246 78 training_loop """lcwa""" +246 78 evaluator """rankbased""" +246 79 dataset """kinships""" +246 79 model """ermlp""" +246 79 loss """bceaftersigmoid""" +246 79 regularizer """no""" +246 79 optimizer """adam""" +246 79 training_loop """lcwa""" +246 79 evaluator """rankbased""" +246 80 dataset """kinships""" +246 80 model """ermlp""" +246 80 loss """bceaftersigmoid""" +246 80 regularizer """no""" +246 80 optimizer """adam""" +246 80 training_loop """lcwa""" +246 80 evaluator """rankbased""" +246 81 dataset """kinships""" +246 81 model """ermlp""" +246 81 loss """bceaftersigmoid""" +246 81 regularizer """no""" +246 81 optimizer """adam""" +246 81 training_loop """lcwa""" +246 81 evaluator """rankbased""" +246 82 dataset """kinships""" +246 82 model """ermlp""" +246 82 loss """bceaftersigmoid""" +246 82 regularizer """no""" +246 82 optimizer """adam""" +246 82 training_loop """lcwa""" +246 82 evaluator """rankbased""" +246 83 dataset """kinships""" +246 83 model """ermlp""" +246 83 loss """bceaftersigmoid""" +246 83 regularizer """no""" +246 83 optimizer """adam""" +246 83 training_loop """lcwa""" +246 83 evaluator """rankbased""" +246 84 dataset """kinships""" +246 84 model """ermlp""" +246 84 loss """bceaftersigmoid""" +246 84 regularizer """no""" +246 84 optimizer """adam""" +246 84 training_loop """lcwa""" +246 84 evaluator """rankbased""" +246 85 dataset """kinships""" +246 85 model """ermlp""" +246 85 loss """bceaftersigmoid""" +246 85 regularizer """no""" +246 85 optimizer """adam""" +246 85 training_loop """lcwa""" +246 85 evaluator """rankbased""" +246 86 dataset """kinships""" +246 86 model """ermlp""" +246 86 loss """bceaftersigmoid""" +246 86 regularizer """no""" +246 86 optimizer """adam""" +246 86 training_loop """lcwa""" +246 86 evaluator """rankbased""" +246 87 dataset """kinships""" +246 87 model """ermlp""" +246 87 loss """bceaftersigmoid""" +246 87 regularizer """no""" +246 87 optimizer """adam""" +246 87 training_loop """lcwa""" +246 87 evaluator """rankbased""" +246 88 dataset """kinships""" +246 88 model """ermlp""" +246 88 loss """bceaftersigmoid""" +246 88 regularizer """no""" +246 88 optimizer """adam""" +246 88 training_loop """lcwa""" +246 88 evaluator """rankbased""" +246 89 dataset """kinships""" +246 89 model """ermlp""" +246 89 loss """bceaftersigmoid""" +246 89 regularizer """no""" +246 89 optimizer """adam""" +246 89 training_loop """lcwa""" +246 89 evaluator """rankbased""" +246 90 dataset """kinships""" +246 90 model """ermlp""" +246 90 loss """bceaftersigmoid""" +246 90 regularizer """no""" +246 90 optimizer """adam""" +246 90 training_loop """lcwa""" +246 90 evaluator """rankbased""" +246 91 dataset """kinships""" +246 91 model """ermlp""" +246 91 loss """bceaftersigmoid""" +246 91 regularizer """no""" +246 91 optimizer """adam""" +246 91 training_loop """lcwa""" +246 91 evaluator """rankbased""" +246 92 dataset """kinships""" +246 92 model """ermlp""" +246 92 loss """bceaftersigmoid""" +246 92 regularizer """no""" +246 92 optimizer """adam""" +246 92 training_loop """lcwa""" +246 92 evaluator """rankbased""" +246 93 dataset """kinships""" +246 93 model """ermlp""" +246 93 loss """bceaftersigmoid""" +246 93 regularizer """no""" +246 93 optimizer """adam""" +246 93 training_loop """lcwa""" +246 93 evaluator """rankbased""" +246 94 dataset """kinships""" +246 94 model """ermlp""" +246 94 loss """bceaftersigmoid""" +246 94 regularizer """no""" +246 94 optimizer """adam""" +246 94 training_loop """lcwa""" +246 94 evaluator """rankbased""" +246 95 dataset """kinships""" +246 95 model """ermlp""" +246 95 loss """bceaftersigmoid""" +246 95 regularizer """no""" +246 95 optimizer """adam""" +246 95 training_loop """lcwa""" +246 95 evaluator """rankbased""" +246 96 dataset """kinships""" +246 96 model """ermlp""" +246 96 loss """bceaftersigmoid""" +246 96 regularizer """no""" +246 96 optimizer """adam""" +246 96 training_loop """lcwa""" +246 96 evaluator """rankbased""" +246 97 dataset """kinships""" +246 97 model """ermlp""" +246 97 loss """bceaftersigmoid""" +246 97 regularizer """no""" +246 97 optimizer """adam""" +246 97 training_loop """lcwa""" +246 97 evaluator """rankbased""" +246 98 dataset """kinships""" +246 98 model """ermlp""" +246 98 loss """bceaftersigmoid""" +246 98 regularizer """no""" +246 98 optimizer """adam""" +246 98 training_loop """lcwa""" +246 98 evaluator """rankbased""" +246 99 dataset """kinships""" +246 99 model """ermlp""" +246 99 loss """bceaftersigmoid""" +246 99 regularizer """no""" +246 99 optimizer """adam""" +246 99 training_loop """lcwa""" +246 99 evaluator """rankbased""" +246 100 dataset """kinships""" +246 100 model """ermlp""" +246 100 loss """bceaftersigmoid""" +246 100 regularizer """no""" +246 100 optimizer """adam""" +246 100 training_loop """lcwa""" +246 100 evaluator """rankbased""" +247 1 model.embedding_dim 1.0 +247 1 optimizer.lr 0.00591735151139575 +247 1 training.batch_size 2.0 +247 1 training.label_smoothing 0.6057007112717737 +247 2 model.embedding_dim 2.0 +247 2 optimizer.lr 0.014452153171665705 +247 2 training.batch_size 2.0 +247 2 training.label_smoothing 0.00578299657946065 +247 3 model.embedding_dim 1.0 +247 3 optimizer.lr 0.04410032462143136 +247 3 training.batch_size 0.0 +247 3 training.label_smoothing 0.4896641108509003 +247 4 model.embedding_dim 1.0 +247 4 optimizer.lr 0.00481266564880028 +247 4 training.batch_size 2.0 +247 4 training.label_smoothing 0.02383307590276285 +247 5 model.embedding_dim 2.0 +247 5 optimizer.lr 0.003109813247547464 +247 5 training.batch_size 1.0 +247 5 training.label_smoothing 0.5751379346557439 +247 6 model.embedding_dim 1.0 +247 6 optimizer.lr 0.011116000235309937 +247 6 training.batch_size 0.0 +247 6 training.label_smoothing 0.002181819897130883 +247 7 model.embedding_dim 0.0 +247 7 optimizer.lr 0.07305371066671633 +247 7 training.batch_size 0.0 +247 7 training.label_smoothing 0.1445247128650985 +247 8 model.embedding_dim 0.0 +247 8 optimizer.lr 0.06936475720843538 +247 8 training.batch_size 0.0 +247 8 training.label_smoothing 0.0037139778084548823 +247 9 model.embedding_dim 2.0 +247 9 optimizer.lr 0.037489717583357104 +247 9 training.batch_size 0.0 +247 9 training.label_smoothing 0.3959414303904816 +247 10 model.embedding_dim 2.0 +247 10 optimizer.lr 0.0029857581561263443 +247 10 training.batch_size 0.0 +247 10 training.label_smoothing 0.004765935189709607 +247 11 model.embedding_dim 1.0 +247 11 optimizer.lr 0.012140368124223219 +247 11 training.batch_size 1.0 +247 11 training.label_smoothing 0.0018702620182025787 +247 12 model.embedding_dim 1.0 +247 12 optimizer.lr 0.001629203031065154 +247 12 training.batch_size 0.0 +247 12 training.label_smoothing 0.014044919511999418 +247 13 model.embedding_dim 1.0 +247 13 optimizer.lr 0.014026020512739545 +247 13 training.batch_size 0.0 +247 13 training.label_smoothing 0.015039291867768456 +247 14 model.embedding_dim 2.0 +247 14 optimizer.lr 0.011632631271326252 +247 14 training.batch_size 1.0 +247 14 training.label_smoothing 0.002133804553665474 +247 15 model.embedding_dim 0.0 +247 15 optimizer.lr 0.029175551379943036 +247 15 training.batch_size 2.0 +247 15 training.label_smoothing 0.0024221221268201708 +247 16 model.embedding_dim 0.0 +247 16 optimizer.lr 0.002324114728664994 +247 16 training.batch_size 2.0 +247 16 training.label_smoothing 0.003451051868136549 +247 17 model.embedding_dim 0.0 +247 17 optimizer.lr 0.0029911338650450707 +247 17 training.batch_size 2.0 +247 17 training.label_smoothing 0.005846548417714094 +247 18 model.embedding_dim 1.0 +247 18 optimizer.lr 0.003031944590195643 +247 18 training.batch_size 2.0 +247 18 training.label_smoothing 0.06631986285712788 +247 19 model.embedding_dim 1.0 +247 19 optimizer.lr 0.0050326011753028385 +247 19 training.batch_size 1.0 +247 19 training.label_smoothing 0.001485715108127715 +247 20 model.embedding_dim 2.0 +247 20 optimizer.lr 0.0013509521476393507 +247 20 training.batch_size 2.0 +247 20 training.label_smoothing 0.09390323308075688 +247 21 model.embedding_dim 1.0 +247 21 optimizer.lr 0.001045826419132925 +247 21 training.batch_size 2.0 +247 21 training.label_smoothing 0.005054471217622672 +247 22 model.embedding_dim 1.0 +247 22 optimizer.lr 0.04588548944699805 +247 22 training.batch_size 2.0 +247 22 training.label_smoothing 0.6005954461100333 +247 23 model.embedding_dim 0.0 +247 23 optimizer.lr 0.014850559105247303 +247 23 training.batch_size 1.0 +247 23 training.label_smoothing 0.49946436718272375 +247 24 model.embedding_dim 2.0 +247 24 optimizer.lr 0.002663382254918827 +247 24 training.batch_size 0.0 +247 24 training.label_smoothing 0.46090821953891625 +247 25 model.embedding_dim 0.0 +247 25 optimizer.lr 0.01666996295107058 +247 25 training.batch_size 2.0 +247 25 training.label_smoothing 0.033857093361037004 +247 26 model.embedding_dim 0.0 +247 26 optimizer.lr 0.007660353122416501 +247 26 training.batch_size 0.0 +247 26 training.label_smoothing 0.05875488412113419 +247 27 model.embedding_dim 2.0 +247 27 optimizer.lr 0.0010388620820083432 +247 27 training.batch_size 0.0 +247 27 training.label_smoothing 0.09443050773079806 +247 28 model.embedding_dim 1.0 +247 28 optimizer.lr 0.003367508052071358 +247 28 training.batch_size 1.0 +247 28 training.label_smoothing 0.6752153030443495 +247 29 model.embedding_dim 0.0 +247 29 optimizer.lr 0.05102639945598113 +247 29 training.batch_size 2.0 +247 29 training.label_smoothing 0.14115251751806643 +247 30 model.embedding_dim 0.0 +247 30 optimizer.lr 0.005269219564911875 +247 30 training.batch_size 0.0 +247 30 training.label_smoothing 0.0017689623176940404 +247 31 model.embedding_dim 2.0 +247 31 optimizer.lr 0.013207717767332356 +247 31 training.batch_size 1.0 +247 31 training.label_smoothing 0.04102205655976387 +247 32 model.embedding_dim 0.0 +247 32 optimizer.lr 0.02020524771509497 +247 32 training.batch_size 0.0 +247 32 training.label_smoothing 0.0019086115733326697 +247 33 model.embedding_dim 1.0 +247 33 optimizer.lr 0.0039117853671674484 +247 33 training.batch_size 1.0 +247 33 training.label_smoothing 0.3545344967703571 +247 34 model.embedding_dim 2.0 +247 34 optimizer.lr 0.002172154916747328 +247 34 training.batch_size 1.0 +247 34 training.label_smoothing 0.3986029779811845 +247 35 model.embedding_dim 0.0 +247 35 optimizer.lr 0.0015231882264843818 +247 35 training.batch_size 2.0 +247 35 training.label_smoothing 0.009398548922958971 +247 36 model.embedding_dim 2.0 +247 36 optimizer.lr 0.001943403242894336 +247 36 training.batch_size 0.0 +247 36 training.label_smoothing 0.009130458968008352 +247 37 model.embedding_dim 2.0 +247 37 optimizer.lr 0.032386036559063525 +247 37 training.batch_size 2.0 +247 37 training.label_smoothing 0.006214058165336089 +247 38 model.embedding_dim 1.0 +247 38 optimizer.lr 0.015034824390011746 +247 38 training.batch_size 1.0 +247 38 training.label_smoothing 0.8375582980532222 +247 39 model.embedding_dim 0.0 +247 39 optimizer.lr 0.003427929645347479 +247 39 training.batch_size 1.0 +247 39 training.label_smoothing 0.001753147478941945 +247 40 model.embedding_dim 1.0 +247 40 optimizer.lr 0.007990825238711908 +247 40 training.batch_size 2.0 +247 40 training.label_smoothing 0.1423282701006411 +247 41 model.embedding_dim 1.0 +247 41 optimizer.lr 0.0019309327456375027 +247 41 training.batch_size 1.0 +247 41 training.label_smoothing 0.0033816595385444204 +247 42 model.embedding_dim 0.0 +247 42 optimizer.lr 0.013669552947105009 +247 42 training.batch_size 1.0 +247 42 training.label_smoothing 0.002550519947027682 +247 43 model.embedding_dim 2.0 +247 43 optimizer.lr 0.010092489330044295 +247 43 training.batch_size 1.0 +247 43 training.label_smoothing 0.0014565991635970732 +247 44 model.embedding_dim 2.0 +247 44 optimizer.lr 0.07389345079447733 +247 44 training.batch_size 1.0 +247 44 training.label_smoothing 0.10042481272914977 +247 45 model.embedding_dim 2.0 +247 45 optimizer.lr 0.015511988912851953 +247 45 training.batch_size 1.0 +247 45 training.label_smoothing 0.010268287131929784 +247 46 model.embedding_dim 0.0 +247 46 optimizer.lr 0.0038091202235244316 +247 46 training.batch_size 0.0 +247 46 training.label_smoothing 0.750464777562877 +247 47 model.embedding_dim 1.0 +247 47 optimizer.lr 0.0072126106773381885 +247 47 training.batch_size 1.0 +247 47 training.label_smoothing 0.5598537312728542 +247 48 model.embedding_dim 2.0 +247 48 optimizer.lr 0.0010768943586092975 +247 48 training.batch_size 0.0 +247 48 training.label_smoothing 0.12031778092022405 +247 49 model.embedding_dim 0.0 +247 49 optimizer.lr 0.0033865598501855265 +247 49 training.batch_size 0.0 +247 49 training.label_smoothing 0.003492673756359506 +247 50 model.embedding_dim 1.0 +247 50 optimizer.lr 0.039189105929854605 +247 50 training.batch_size 0.0 +247 50 training.label_smoothing 0.7078934137340838 +247 51 model.embedding_dim 1.0 +247 51 optimizer.lr 0.031677633972942404 +247 51 training.batch_size 2.0 +247 51 training.label_smoothing 0.037864986333304686 +247 52 model.embedding_dim 2.0 +247 52 optimizer.lr 0.08094063015526312 +247 52 training.batch_size 2.0 +247 52 training.label_smoothing 0.19579311885622397 +247 53 model.embedding_dim 1.0 +247 53 optimizer.lr 0.0016264274826184666 +247 53 training.batch_size 2.0 +247 53 training.label_smoothing 0.013838755315307608 +247 54 model.embedding_dim 1.0 +247 54 optimizer.lr 0.027892919988967414 +247 54 training.batch_size 0.0 +247 54 training.label_smoothing 0.1378565501545278 +247 55 model.embedding_dim 2.0 +247 55 optimizer.lr 0.010931663596812784 +247 55 training.batch_size 1.0 +247 55 training.label_smoothing 0.009189128505580957 +247 56 model.embedding_dim 1.0 +247 56 optimizer.lr 0.0758694492296365 +247 56 training.batch_size 0.0 +247 56 training.label_smoothing 0.4946834715424776 +247 57 model.embedding_dim 2.0 +247 57 optimizer.lr 0.011326125160419148 +247 57 training.batch_size 2.0 +247 57 training.label_smoothing 0.0012106227003553138 +247 58 model.embedding_dim 0.0 +247 58 optimizer.lr 0.007935105547070633 +247 58 training.batch_size 1.0 +247 58 training.label_smoothing 0.0018551885574175683 +247 59 model.embedding_dim 1.0 +247 59 optimizer.lr 0.0017878958320202542 +247 59 training.batch_size 1.0 +247 59 training.label_smoothing 0.006402760476013211 +247 60 model.embedding_dim 1.0 +247 60 optimizer.lr 0.09424723756017148 +247 60 training.batch_size 1.0 +247 60 training.label_smoothing 0.15025253790102824 +247 61 model.embedding_dim 0.0 +247 61 optimizer.lr 0.0033055039497458067 +247 61 training.batch_size 2.0 +247 61 training.label_smoothing 0.0035295231899439092 +247 62 model.embedding_dim 2.0 +247 62 optimizer.lr 0.0012560290113158895 +247 62 training.batch_size 0.0 +247 62 training.label_smoothing 0.016718228615463387 +247 63 model.embedding_dim 2.0 +247 63 optimizer.lr 0.007559734371832753 +247 63 training.batch_size 0.0 +247 63 training.label_smoothing 0.7643699120980015 +247 64 model.embedding_dim 0.0 +247 64 optimizer.lr 0.0033396720265997312 +247 64 training.batch_size 2.0 +247 64 training.label_smoothing 0.009647626639694389 +247 65 model.embedding_dim 0.0 +247 65 optimizer.lr 0.009699145535415036 +247 65 training.batch_size 1.0 +247 65 training.label_smoothing 0.47671778583333446 +247 66 model.embedding_dim 0.0 +247 66 optimizer.lr 0.0014007417058068735 +247 66 training.batch_size 2.0 +247 66 training.label_smoothing 0.001906018359817679 +247 67 model.embedding_dim 0.0 +247 67 optimizer.lr 0.0018734335819596893 +247 67 training.batch_size 2.0 +247 67 training.label_smoothing 0.023677134141708106 +247 68 model.embedding_dim 1.0 +247 68 optimizer.lr 0.001524513855097816 +247 68 training.batch_size 2.0 +247 68 training.label_smoothing 0.0014537634052683855 +247 69 model.embedding_dim 0.0 +247 69 optimizer.lr 0.0034050854660251217 +247 69 training.batch_size 2.0 +247 69 training.label_smoothing 0.5027010887264401 +247 70 model.embedding_dim 1.0 +247 70 optimizer.lr 0.0219248283522337 +247 70 training.batch_size 1.0 +247 70 training.label_smoothing 0.04391959774803326 +247 71 model.embedding_dim 2.0 +247 71 optimizer.lr 0.0409874220157986 +247 71 training.batch_size 0.0 +247 71 training.label_smoothing 0.2047140237820667 +247 72 model.embedding_dim 1.0 +247 72 optimizer.lr 0.0025350460725343258 +247 72 training.batch_size 0.0 +247 72 training.label_smoothing 0.010569106351613924 +247 73 model.embedding_dim 2.0 +247 73 optimizer.lr 0.007898857552883588 +247 73 training.batch_size 1.0 +247 73 training.label_smoothing 0.001653753511351753 +247 74 model.embedding_dim 1.0 +247 74 optimizer.lr 0.015150936948652115 +247 74 training.batch_size 0.0 +247 74 training.label_smoothing 0.3088835093116306 +247 75 model.embedding_dim 1.0 +247 75 optimizer.lr 0.007793504402958426 +247 75 training.batch_size 0.0 +247 75 training.label_smoothing 0.0029557003452921806 +247 76 model.embedding_dim 2.0 +247 76 optimizer.lr 0.0020228237263932233 +247 76 training.batch_size 1.0 +247 76 training.label_smoothing 0.0020312372500737286 +247 77 model.embedding_dim 1.0 +247 77 optimizer.lr 0.0023423524384248414 +247 77 training.batch_size 1.0 +247 77 training.label_smoothing 0.026558081946775502 +247 78 model.embedding_dim 0.0 +247 78 optimizer.lr 0.07982944032147722 +247 78 training.batch_size 0.0 +247 78 training.label_smoothing 0.01329530771087245 +247 79 model.embedding_dim 2.0 +247 79 optimizer.lr 0.079831140259087 +247 79 training.batch_size 0.0 +247 79 training.label_smoothing 0.07222026747659692 +247 80 model.embedding_dim 1.0 +247 80 optimizer.lr 0.002524947562701839 +247 80 training.batch_size 0.0 +247 80 training.label_smoothing 0.019195837375115554 +247 81 model.embedding_dim 1.0 +247 81 optimizer.lr 0.010654796529219548 +247 81 training.batch_size 2.0 +247 81 training.label_smoothing 0.23010534445282776 +247 82 model.embedding_dim 1.0 +247 82 optimizer.lr 0.003485907322517414 +247 82 training.batch_size 1.0 +247 82 training.label_smoothing 0.005612597292220186 +247 83 model.embedding_dim 1.0 +247 83 optimizer.lr 0.02020575098216639 +247 83 training.batch_size 2.0 +247 83 training.label_smoothing 0.35882713903390445 +247 84 model.embedding_dim 1.0 +247 84 optimizer.lr 0.012223376665260499 +247 84 training.batch_size 1.0 +247 84 training.label_smoothing 0.001419418385761558 +247 85 model.embedding_dim 2.0 +247 85 optimizer.lr 0.02525398324413561 +247 85 training.batch_size 0.0 +247 85 training.label_smoothing 0.004075497739063895 +247 86 model.embedding_dim 2.0 +247 86 optimizer.lr 0.018874375087647326 +247 86 training.batch_size 2.0 +247 86 training.label_smoothing 0.05785199652579079 +247 87 model.embedding_dim 2.0 +247 87 optimizer.lr 0.08432057563809559 +247 87 training.batch_size 2.0 +247 87 training.label_smoothing 0.034195942711545885 +247 88 model.embedding_dim 1.0 +247 88 optimizer.lr 0.027737884254563493 +247 88 training.batch_size 1.0 +247 88 training.label_smoothing 0.030533837466928285 +247 89 model.embedding_dim 1.0 +247 89 optimizer.lr 0.006881322267648829 +247 89 training.batch_size 2.0 +247 89 training.label_smoothing 0.003542058547365167 +247 90 model.embedding_dim 0.0 +247 90 optimizer.lr 0.018271756248442975 +247 90 training.batch_size 0.0 +247 90 training.label_smoothing 0.9279408773211059 +247 91 model.embedding_dim 2.0 +247 91 optimizer.lr 0.01318643906644193 +247 91 training.batch_size 2.0 +247 91 training.label_smoothing 0.055682115128944346 +247 92 model.embedding_dim 0.0 +247 92 optimizer.lr 0.0030325188711465023 +247 92 training.batch_size 0.0 +247 92 training.label_smoothing 0.004649032239948023 +247 93 model.embedding_dim 0.0 +247 93 optimizer.lr 0.004708491811008921 +247 93 training.batch_size 2.0 +247 93 training.label_smoothing 0.1758439146557614 +247 94 model.embedding_dim 1.0 +247 94 optimizer.lr 0.020366243466452924 +247 94 training.batch_size 0.0 +247 94 training.label_smoothing 0.19857670022654025 +247 95 model.embedding_dim 0.0 +247 95 optimizer.lr 0.0043798111602926584 +247 95 training.batch_size 2.0 +247 95 training.label_smoothing 0.003607109108137685 +247 96 model.embedding_dim 0.0 +247 96 optimizer.lr 0.0011837758353554322 +247 96 training.batch_size 2.0 +247 96 training.label_smoothing 0.9611539812555574 +247 97 model.embedding_dim 2.0 +247 97 optimizer.lr 0.0015930292445909397 +247 97 training.batch_size 0.0 +247 97 training.label_smoothing 0.007862092681011594 +247 98 model.embedding_dim 1.0 +247 98 optimizer.lr 0.048671182660202504 +247 98 training.batch_size 0.0 +247 98 training.label_smoothing 0.01863572740843041 +247 99 model.embedding_dim 2.0 +247 99 optimizer.lr 0.0901676605834749 +247 99 training.batch_size 2.0 +247 99 training.label_smoothing 0.2294918821360634 +247 100 model.embedding_dim 1.0 +247 100 optimizer.lr 0.02612871697628748 +247 100 training.batch_size 2.0 +247 100 training.label_smoothing 0.03184131616514262 +247 1 dataset """kinships""" +247 1 model """ermlp""" +247 1 loss """softplus""" +247 1 regularizer """no""" +247 1 optimizer """adam""" +247 1 training_loop """lcwa""" +247 1 evaluator """rankbased""" +247 2 dataset """kinships""" +247 2 model """ermlp""" +247 2 loss """softplus""" +247 2 regularizer """no""" +247 2 optimizer """adam""" +247 2 training_loop """lcwa""" +247 2 evaluator """rankbased""" +247 3 dataset """kinships""" +247 3 model """ermlp""" +247 3 loss """softplus""" +247 3 regularizer """no""" +247 3 optimizer """adam""" +247 3 training_loop """lcwa""" +247 3 evaluator """rankbased""" +247 4 dataset """kinships""" +247 4 model """ermlp""" +247 4 loss """softplus""" +247 4 regularizer """no""" +247 4 optimizer """adam""" +247 4 training_loop """lcwa""" +247 4 evaluator """rankbased""" +247 5 dataset """kinships""" +247 5 model """ermlp""" +247 5 loss """softplus""" +247 5 regularizer """no""" +247 5 optimizer """adam""" +247 5 training_loop """lcwa""" +247 5 evaluator """rankbased""" +247 6 dataset """kinships""" +247 6 model """ermlp""" +247 6 loss """softplus""" +247 6 regularizer """no""" +247 6 optimizer """adam""" +247 6 training_loop """lcwa""" +247 6 evaluator """rankbased""" +247 7 dataset """kinships""" +247 7 model """ermlp""" +247 7 loss """softplus""" +247 7 regularizer """no""" +247 7 optimizer """adam""" +247 7 training_loop """lcwa""" +247 7 evaluator """rankbased""" +247 8 dataset """kinships""" +247 8 model """ermlp""" +247 8 loss """softplus""" +247 8 regularizer """no""" +247 8 optimizer """adam""" +247 8 training_loop """lcwa""" +247 8 evaluator """rankbased""" +247 9 dataset """kinships""" +247 9 model """ermlp""" +247 9 loss """softplus""" +247 9 regularizer """no""" +247 9 optimizer """adam""" +247 9 training_loop """lcwa""" +247 9 evaluator """rankbased""" +247 10 dataset """kinships""" +247 10 model """ermlp""" +247 10 loss """softplus""" +247 10 regularizer """no""" +247 10 optimizer """adam""" +247 10 training_loop """lcwa""" +247 10 evaluator """rankbased""" +247 11 dataset """kinships""" +247 11 model """ermlp""" +247 11 loss """softplus""" +247 11 regularizer """no""" +247 11 optimizer """adam""" +247 11 training_loop """lcwa""" +247 11 evaluator """rankbased""" +247 12 dataset """kinships""" +247 12 model """ermlp""" +247 12 loss """softplus""" +247 12 regularizer """no""" +247 12 optimizer """adam""" +247 12 training_loop """lcwa""" +247 12 evaluator """rankbased""" +247 13 dataset """kinships""" +247 13 model """ermlp""" +247 13 loss """softplus""" +247 13 regularizer """no""" +247 13 optimizer """adam""" +247 13 training_loop """lcwa""" +247 13 evaluator """rankbased""" +247 14 dataset """kinships""" +247 14 model """ermlp""" +247 14 loss """softplus""" +247 14 regularizer """no""" +247 14 optimizer """adam""" +247 14 training_loop """lcwa""" +247 14 evaluator """rankbased""" +247 15 dataset """kinships""" +247 15 model """ermlp""" +247 15 loss """softplus""" +247 15 regularizer """no""" +247 15 optimizer """adam""" +247 15 training_loop """lcwa""" +247 15 evaluator """rankbased""" +247 16 dataset """kinships""" +247 16 model """ermlp""" +247 16 loss """softplus""" +247 16 regularizer """no""" +247 16 optimizer """adam""" +247 16 training_loop """lcwa""" +247 16 evaluator """rankbased""" +247 17 dataset """kinships""" +247 17 model """ermlp""" +247 17 loss """softplus""" +247 17 regularizer """no""" +247 17 optimizer """adam""" +247 17 training_loop """lcwa""" +247 17 evaluator """rankbased""" +247 18 dataset """kinships""" +247 18 model """ermlp""" +247 18 loss """softplus""" +247 18 regularizer """no""" +247 18 optimizer """adam""" +247 18 training_loop """lcwa""" +247 18 evaluator """rankbased""" +247 19 dataset """kinships""" +247 19 model """ermlp""" +247 19 loss """softplus""" +247 19 regularizer """no""" +247 19 optimizer """adam""" +247 19 training_loop """lcwa""" +247 19 evaluator """rankbased""" +247 20 dataset """kinships""" +247 20 model """ermlp""" +247 20 loss """softplus""" +247 20 regularizer """no""" +247 20 optimizer """adam""" +247 20 training_loop """lcwa""" +247 20 evaluator """rankbased""" +247 21 dataset """kinships""" +247 21 model """ermlp""" +247 21 loss """softplus""" +247 21 regularizer """no""" +247 21 optimizer """adam""" +247 21 training_loop """lcwa""" +247 21 evaluator """rankbased""" +247 22 dataset """kinships""" +247 22 model """ermlp""" +247 22 loss """softplus""" +247 22 regularizer """no""" +247 22 optimizer """adam""" +247 22 training_loop """lcwa""" +247 22 evaluator """rankbased""" +247 23 dataset """kinships""" +247 23 model """ermlp""" +247 23 loss """softplus""" +247 23 regularizer """no""" +247 23 optimizer """adam""" +247 23 training_loop """lcwa""" +247 23 evaluator """rankbased""" +247 24 dataset """kinships""" +247 24 model """ermlp""" +247 24 loss """softplus""" +247 24 regularizer """no""" +247 24 optimizer """adam""" +247 24 training_loop """lcwa""" +247 24 evaluator """rankbased""" +247 25 dataset """kinships""" +247 25 model """ermlp""" +247 25 loss """softplus""" +247 25 regularizer """no""" +247 25 optimizer """adam""" +247 25 training_loop """lcwa""" +247 25 evaluator """rankbased""" +247 26 dataset """kinships""" +247 26 model """ermlp""" +247 26 loss """softplus""" +247 26 regularizer """no""" +247 26 optimizer """adam""" +247 26 training_loop """lcwa""" +247 26 evaluator """rankbased""" +247 27 dataset """kinships""" +247 27 model """ermlp""" +247 27 loss """softplus""" +247 27 regularizer """no""" +247 27 optimizer """adam""" +247 27 training_loop """lcwa""" +247 27 evaluator """rankbased""" +247 28 dataset """kinships""" +247 28 model """ermlp""" +247 28 loss """softplus""" +247 28 regularizer """no""" +247 28 optimizer """adam""" +247 28 training_loop """lcwa""" +247 28 evaluator """rankbased""" +247 29 dataset """kinships""" +247 29 model """ermlp""" +247 29 loss """softplus""" +247 29 regularizer """no""" +247 29 optimizer """adam""" +247 29 training_loop """lcwa""" +247 29 evaluator """rankbased""" +247 30 dataset """kinships""" +247 30 model """ermlp""" +247 30 loss """softplus""" +247 30 regularizer """no""" +247 30 optimizer """adam""" +247 30 training_loop """lcwa""" +247 30 evaluator """rankbased""" +247 31 dataset """kinships""" +247 31 model """ermlp""" +247 31 loss """softplus""" +247 31 regularizer """no""" +247 31 optimizer """adam""" +247 31 training_loop """lcwa""" +247 31 evaluator """rankbased""" +247 32 dataset """kinships""" +247 32 model """ermlp""" +247 32 loss """softplus""" +247 32 regularizer """no""" +247 32 optimizer """adam""" +247 32 training_loop """lcwa""" +247 32 evaluator """rankbased""" +247 33 dataset """kinships""" +247 33 model """ermlp""" +247 33 loss """softplus""" +247 33 regularizer """no""" +247 33 optimizer """adam""" +247 33 training_loop """lcwa""" +247 33 evaluator """rankbased""" +247 34 dataset """kinships""" +247 34 model """ermlp""" +247 34 loss """softplus""" +247 34 regularizer """no""" +247 34 optimizer """adam""" +247 34 training_loop """lcwa""" +247 34 evaluator """rankbased""" +247 35 dataset """kinships""" +247 35 model """ermlp""" +247 35 loss """softplus""" +247 35 regularizer """no""" +247 35 optimizer """adam""" +247 35 training_loop """lcwa""" +247 35 evaluator """rankbased""" +247 36 dataset """kinships""" +247 36 model """ermlp""" +247 36 loss """softplus""" +247 36 regularizer """no""" +247 36 optimizer """adam""" +247 36 training_loop """lcwa""" +247 36 evaluator """rankbased""" +247 37 dataset """kinships""" +247 37 model """ermlp""" +247 37 loss """softplus""" +247 37 regularizer """no""" +247 37 optimizer """adam""" +247 37 training_loop """lcwa""" +247 37 evaluator """rankbased""" +247 38 dataset """kinships""" +247 38 model """ermlp""" +247 38 loss """softplus""" +247 38 regularizer """no""" +247 38 optimizer """adam""" +247 38 training_loop """lcwa""" +247 38 evaluator """rankbased""" +247 39 dataset """kinships""" +247 39 model """ermlp""" +247 39 loss """softplus""" +247 39 regularizer """no""" +247 39 optimizer """adam""" +247 39 training_loop """lcwa""" +247 39 evaluator """rankbased""" +247 40 dataset """kinships""" +247 40 model """ermlp""" +247 40 loss """softplus""" +247 40 regularizer """no""" +247 40 optimizer """adam""" +247 40 training_loop """lcwa""" +247 40 evaluator """rankbased""" +247 41 dataset """kinships""" +247 41 model """ermlp""" +247 41 loss """softplus""" +247 41 regularizer """no""" +247 41 optimizer """adam""" +247 41 training_loop """lcwa""" +247 41 evaluator """rankbased""" +247 42 dataset """kinships""" +247 42 model """ermlp""" +247 42 loss """softplus""" +247 42 regularizer """no""" +247 42 optimizer """adam""" +247 42 training_loop """lcwa""" +247 42 evaluator """rankbased""" +247 43 dataset """kinships""" +247 43 model """ermlp""" +247 43 loss """softplus""" +247 43 regularizer """no""" +247 43 optimizer """adam""" +247 43 training_loop """lcwa""" +247 43 evaluator """rankbased""" +247 44 dataset """kinships""" +247 44 model """ermlp""" +247 44 loss """softplus""" +247 44 regularizer """no""" +247 44 optimizer """adam""" +247 44 training_loop """lcwa""" +247 44 evaluator """rankbased""" +247 45 dataset """kinships""" +247 45 model """ermlp""" +247 45 loss """softplus""" +247 45 regularizer """no""" +247 45 optimizer """adam""" +247 45 training_loop """lcwa""" +247 45 evaluator """rankbased""" +247 46 dataset """kinships""" +247 46 model """ermlp""" +247 46 loss """softplus""" +247 46 regularizer """no""" +247 46 optimizer """adam""" +247 46 training_loop """lcwa""" +247 46 evaluator """rankbased""" +247 47 dataset """kinships""" +247 47 model """ermlp""" +247 47 loss """softplus""" +247 47 regularizer """no""" +247 47 optimizer """adam""" +247 47 training_loop """lcwa""" +247 47 evaluator """rankbased""" +247 48 dataset """kinships""" +247 48 model """ermlp""" +247 48 loss """softplus""" +247 48 regularizer """no""" +247 48 optimizer """adam""" +247 48 training_loop """lcwa""" +247 48 evaluator """rankbased""" +247 49 dataset """kinships""" +247 49 model """ermlp""" +247 49 loss """softplus""" +247 49 regularizer """no""" +247 49 optimizer """adam""" +247 49 training_loop """lcwa""" +247 49 evaluator """rankbased""" +247 50 dataset """kinships""" +247 50 model """ermlp""" +247 50 loss """softplus""" +247 50 regularizer """no""" +247 50 optimizer """adam""" +247 50 training_loop """lcwa""" +247 50 evaluator """rankbased""" +247 51 dataset """kinships""" +247 51 model """ermlp""" +247 51 loss """softplus""" +247 51 regularizer """no""" +247 51 optimizer """adam""" +247 51 training_loop """lcwa""" +247 51 evaluator """rankbased""" +247 52 dataset """kinships""" +247 52 model """ermlp""" +247 52 loss """softplus""" +247 52 regularizer """no""" +247 52 optimizer """adam""" +247 52 training_loop """lcwa""" +247 52 evaluator """rankbased""" +247 53 dataset """kinships""" +247 53 model """ermlp""" +247 53 loss """softplus""" +247 53 regularizer """no""" +247 53 optimizer """adam""" +247 53 training_loop """lcwa""" +247 53 evaluator """rankbased""" +247 54 dataset """kinships""" +247 54 model """ermlp""" +247 54 loss """softplus""" +247 54 regularizer """no""" +247 54 optimizer """adam""" +247 54 training_loop """lcwa""" +247 54 evaluator """rankbased""" +247 55 dataset """kinships""" +247 55 model """ermlp""" +247 55 loss """softplus""" +247 55 regularizer """no""" +247 55 optimizer """adam""" +247 55 training_loop """lcwa""" +247 55 evaluator """rankbased""" +247 56 dataset """kinships""" +247 56 model """ermlp""" +247 56 loss """softplus""" +247 56 regularizer """no""" +247 56 optimizer """adam""" +247 56 training_loop """lcwa""" +247 56 evaluator """rankbased""" +247 57 dataset """kinships""" +247 57 model """ermlp""" +247 57 loss """softplus""" +247 57 regularizer """no""" +247 57 optimizer """adam""" +247 57 training_loop """lcwa""" +247 57 evaluator """rankbased""" +247 58 dataset """kinships""" +247 58 model """ermlp""" +247 58 loss """softplus""" +247 58 regularizer """no""" +247 58 optimizer """adam""" +247 58 training_loop """lcwa""" +247 58 evaluator """rankbased""" +247 59 dataset """kinships""" +247 59 model """ermlp""" +247 59 loss """softplus""" +247 59 regularizer """no""" +247 59 optimizer """adam""" +247 59 training_loop """lcwa""" +247 59 evaluator """rankbased""" +247 60 dataset """kinships""" +247 60 model """ermlp""" +247 60 loss """softplus""" +247 60 regularizer """no""" +247 60 optimizer """adam""" +247 60 training_loop """lcwa""" +247 60 evaluator """rankbased""" +247 61 dataset """kinships""" +247 61 model """ermlp""" +247 61 loss """softplus""" +247 61 regularizer """no""" +247 61 optimizer """adam""" +247 61 training_loop """lcwa""" +247 61 evaluator """rankbased""" +247 62 dataset """kinships""" +247 62 model """ermlp""" +247 62 loss """softplus""" +247 62 regularizer """no""" +247 62 optimizer """adam""" +247 62 training_loop """lcwa""" +247 62 evaluator """rankbased""" +247 63 dataset """kinships""" +247 63 model """ermlp""" +247 63 loss """softplus""" +247 63 regularizer """no""" +247 63 optimizer """adam""" +247 63 training_loop """lcwa""" +247 63 evaluator """rankbased""" +247 64 dataset """kinships""" +247 64 model """ermlp""" +247 64 loss """softplus""" +247 64 regularizer """no""" +247 64 optimizer """adam""" +247 64 training_loop """lcwa""" +247 64 evaluator """rankbased""" +247 65 dataset """kinships""" +247 65 model """ermlp""" +247 65 loss """softplus""" +247 65 regularizer """no""" +247 65 optimizer """adam""" +247 65 training_loop """lcwa""" +247 65 evaluator """rankbased""" +247 66 dataset """kinships""" +247 66 model """ermlp""" +247 66 loss """softplus""" +247 66 regularizer """no""" +247 66 optimizer """adam""" +247 66 training_loop """lcwa""" +247 66 evaluator """rankbased""" +247 67 dataset """kinships""" +247 67 model """ermlp""" +247 67 loss """softplus""" +247 67 regularizer """no""" +247 67 optimizer """adam""" +247 67 training_loop """lcwa""" +247 67 evaluator """rankbased""" +247 68 dataset """kinships""" +247 68 model """ermlp""" +247 68 loss """softplus""" +247 68 regularizer """no""" +247 68 optimizer """adam""" +247 68 training_loop """lcwa""" +247 68 evaluator """rankbased""" +247 69 dataset """kinships""" +247 69 model """ermlp""" +247 69 loss """softplus""" +247 69 regularizer """no""" +247 69 optimizer """adam""" +247 69 training_loop """lcwa""" +247 69 evaluator """rankbased""" +247 70 dataset """kinships""" +247 70 model """ermlp""" +247 70 loss """softplus""" +247 70 regularizer """no""" +247 70 optimizer """adam""" +247 70 training_loop """lcwa""" +247 70 evaluator """rankbased""" +247 71 dataset """kinships""" +247 71 model """ermlp""" +247 71 loss """softplus""" +247 71 regularizer """no""" +247 71 optimizer """adam""" +247 71 training_loop """lcwa""" +247 71 evaluator """rankbased""" +247 72 dataset """kinships""" +247 72 model """ermlp""" +247 72 loss """softplus""" +247 72 regularizer """no""" +247 72 optimizer """adam""" +247 72 training_loop """lcwa""" +247 72 evaluator """rankbased""" +247 73 dataset """kinships""" +247 73 model """ermlp""" +247 73 loss """softplus""" +247 73 regularizer """no""" +247 73 optimizer """adam""" +247 73 training_loop """lcwa""" +247 73 evaluator """rankbased""" +247 74 dataset """kinships""" +247 74 model """ermlp""" +247 74 loss """softplus""" +247 74 regularizer """no""" +247 74 optimizer """adam""" +247 74 training_loop """lcwa""" +247 74 evaluator """rankbased""" +247 75 dataset """kinships""" +247 75 model """ermlp""" +247 75 loss """softplus""" +247 75 regularizer """no""" +247 75 optimizer """adam""" +247 75 training_loop """lcwa""" +247 75 evaluator """rankbased""" +247 76 dataset """kinships""" +247 76 model """ermlp""" +247 76 loss """softplus""" +247 76 regularizer """no""" +247 76 optimizer """adam""" +247 76 training_loop """lcwa""" +247 76 evaluator """rankbased""" +247 77 dataset """kinships""" +247 77 model """ermlp""" +247 77 loss """softplus""" +247 77 regularizer """no""" +247 77 optimizer """adam""" +247 77 training_loop """lcwa""" +247 77 evaluator """rankbased""" +247 78 dataset """kinships""" +247 78 model """ermlp""" +247 78 loss """softplus""" +247 78 regularizer """no""" +247 78 optimizer """adam""" +247 78 training_loop """lcwa""" +247 78 evaluator """rankbased""" +247 79 dataset """kinships""" +247 79 model """ermlp""" +247 79 loss """softplus""" +247 79 regularizer """no""" +247 79 optimizer """adam""" +247 79 training_loop """lcwa""" +247 79 evaluator """rankbased""" +247 80 dataset """kinships""" +247 80 model """ermlp""" +247 80 loss """softplus""" +247 80 regularizer """no""" +247 80 optimizer """adam""" +247 80 training_loop """lcwa""" +247 80 evaluator """rankbased""" +247 81 dataset """kinships""" +247 81 model """ermlp""" +247 81 loss """softplus""" +247 81 regularizer """no""" +247 81 optimizer """adam""" +247 81 training_loop """lcwa""" +247 81 evaluator """rankbased""" +247 82 dataset """kinships""" +247 82 model """ermlp""" +247 82 loss """softplus""" +247 82 regularizer """no""" +247 82 optimizer """adam""" +247 82 training_loop """lcwa""" +247 82 evaluator """rankbased""" +247 83 dataset """kinships""" +247 83 model """ermlp""" +247 83 loss """softplus""" +247 83 regularizer """no""" +247 83 optimizer """adam""" +247 83 training_loop """lcwa""" +247 83 evaluator """rankbased""" +247 84 dataset """kinships""" +247 84 model """ermlp""" +247 84 loss """softplus""" +247 84 regularizer """no""" +247 84 optimizer """adam""" +247 84 training_loop """lcwa""" +247 84 evaluator """rankbased""" +247 85 dataset """kinships""" +247 85 model """ermlp""" +247 85 loss """softplus""" +247 85 regularizer """no""" +247 85 optimizer """adam""" +247 85 training_loop """lcwa""" +247 85 evaluator """rankbased""" +247 86 dataset """kinships""" +247 86 model """ermlp""" +247 86 loss """softplus""" +247 86 regularizer """no""" +247 86 optimizer """adam""" +247 86 training_loop """lcwa""" +247 86 evaluator """rankbased""" +247 87 dataset """kinships""" +247 87 model """ermlp""" +247 87 loss """softplus""" +247 87 regularizer """no""" +247 87 optimizer """adam""" +247 87 training_loop """lcwa""" +247 87 evaluator """rankbased""" +247 88 dataset """kinships""" +247 88 model """ermlp""" +247 88 loss """softplus""" +247 88 regularizer """no""" +247 88 optimizer """adam""" +247 88 training_loop """lcwa""" +247 88 evaluator """rankbased""" +247 89 dataset """kinships""" +247 89 model """ermlp""" +247 89 loss """softplus""" +247 89 regularizer """no""" +247 89 optimizer """adam""" +247 89 training_loop """lcwa""" +247 89 evaluator """rankbased""" +247 90 dataset """kinships""" +247 90 model """ermlp""" +247 90 loss """softplus""" +247 90 regularizer """no""" +247 90 optimizer """adam""" +247 90 training_loop """lcwa""" +247 90 evaluator """rankbased""" +247 91 dataset """kinships""" +247 91 model """ermlp""" +247 91 loss """softplus""" +247 91 regularizer """no""" +247 91 optimizer """adam""" +247 91 training_loop """lcwa""" +247 91 evaluator """rankbased""" +247 92 dataset """kinships""" +247 92 model """ermlp""" +247 92 loss """softplus""" +247 92 regularizer """no""" +247 92 optimizer """adam""" +247 92 training_loop """lcwa""" +247 92 evaluator """rankbased""" +247 93 dataset """kinships""" +247 93 model """ermlp""" +247 93 loss """softplus""" +247 93 regularizer """no""" +247 93 optimizer """adam""" +247 93 training_loop """lcwa""" +247 93 evaluator """rankbased""" +247 94 dataset """kinships""" +247 94 model """ermlp""" +247 94 loss """softplus""" +247 94 regularizer """no""" +247 94 optimizer """adam""" +247 94 training_loop """lcwa""" +247 94 evaluator """rankbased""" +247 95 dataset """kinships""" +247 95 model """ermlp""" +247 95 loss """softplus""" +247 95 regularizer """no""" +247 95 optimizer """adam""" +247 95 training_loop """lcwa""" +247 95 evaluator """rankbased""" +247 96 dataset """kinships""" +247 96 model """ermlp""" +247 96 loss """softplus""" +247 96 regularizer """no""" +247 96 optimizer """adam""" +247 96 training_loop """lcwa""" +247 96 evaluator """rankbased""" +247 97 dataset """kinships""" +247 97 model """ermlp""" +247 97 loss """softplus""" +247 97 regularizer """no""" +247 97 optimizer """adam""" +247 97 training_loop """lcwa""" +247 97 evaluator """rankbased""" +247 98 dataset """kinships""" +247 98 model """ermlp""" +247 98 loss """softplus""" +247 98 regularizer """no""" +247 98 optimizer """adam""" +247 98 training_loop """lcwa""" +247 98 evaluator """rankbased""" +247 99 dataset """kinships""" +247 99 model """ermlp""" +247 99 loss """softplus""" +247 99 regularizer """no""" +247 99 optimizer """adam""" +247 99 training_loop """lcwa""" +247 99 evaluator """rankbased""" +247 100 dataset """kinships""" +247 100 model """ermlp""" +247 100 loss """softplus""" +247 100 regularizer """no""" +247 100 optimizer """adam""" +247 100 training_loop """lcwa""" +247 100 evaluator """rankbased""" +248 1 model.embedding_dim 0.0 +248 1 optimizer.lr 0.001987238757573815 +248 1 training.batch_size 1.0 +248 1 training.label_smoothing 0.006550467918782655 +248 2 model.embedding_dim 2.0 +248 2 optimizer.lr 0.008920225378747317 +248 2 training.batch_size 2.0 +248 2 training.label_smoothing 0.07126934744977391 +248 3 model.embedding_dim 0.0 +248 3 optimizer.lr 0.010225662387355951 +248 3 training.batch_size 0.0 +248 3 training.label_smoothing 0.006629100210311856 +248 4 model.embedding_dim 0.0 +248 4 optimizer.lr 0.009774143203687975 +248 4 training.batch_size 2.0 +248 4 training.label_smoothing 0.15330280586227263 +248 5 model.embedding_dim 0.0 +248 5 optimizer.lr 0.006043738557715091 +248 5 training.batch_size 1.0 +248 5 training.label_smoothing 0.1045449254592544 +248 6 model.embedding_dim 1.0 +248 6 optimizer.lr 0.0016469430302084398 +248 6 training.batch_size 0.0 +248 6 training.label_smoothing 0.020844816712679005 +248 7 model.embedding_dim 1.0 +248 7 optimizer.lr 0.0021657734989289175 +248 7 training.batch_size 2.0 +248 7 training.label_smoothing 0.0013043977227083564 +248 8 model.embedding_dim 1.0 +248 8 optimizer.lr 0.01250070761580727 +248 8 training.batch_size 2.0 +248 8 training.label_smoothing 0.36237637790261346 +248 9 model.embedding_dim 2.0 +248 9 optimizer.lr 0.005228713986756267 +248 9 training.batch_size 0.0 +248 9 training.label_smoothing 0.0021960144362682004 +248 10 model.embedding_dim 2.0 +248 10 optimizer.lr 0.07610024819343943 +248 10 training.batch_size 2.0 +248 10 training.label_smoothing 0.025714665420909475 +248 11 model.embedding_dim 2.0 +248 11 optimizer.lr 0.0022216782194469853 +248 11 training.batch_size 0.0 +248 11 training.label_smoothing 0.8953003809554905 +248 12 model.embedding_dim 2.0 +248 12 optimizer.lr 0.03253104326606303 +248 12 training.batch_size 1.0 +248 12 training.label_smoothing 0.27037417201488034 +248 13 model.embedding_dim 0.0 +248 13 optimizer.lr 0.010666554946161082 +248 13 training.batch_size 0.0 +248 13 training.label_smoothing 0.05251471681164749 +248 14 model.embedding_dim 1.0 +248 14 optimizer.lr 0.0415451615617935 +248 14 training.batch_size 2.0 +248 14 training.label_smoothing 0.07340492806574019 +248 15 model.embedding_dim 0.0 +248 15 optimizer.lr 0.00679623697312659 +248 15 training.batch_size 2.0 +248 15 training.label_smoothing 0.011258389545444919 +248 16 model.embedding_dim 2.0 +248 16 optimizer.lr 0.039807162438320005 +248 16 training.batch_size 2.0 +248 16 training.label_smoothing 0.8755492072824954 +248 17 model.embedding_dim 2.0 +248 17 optimizer.lr 0.003447333004969449 +248 17 training.batch_size 0.0 +248 17 training.label_smoothing 0.006883093430805377 +248 18 model.embedding_dim 2.0 +248 18 optimizer.lr 0.006873586970164797 +248 18 training.batch_size 0.0 +248 18 training.label_smoothing 0.008403526250012675 +248 19 model.embedding_dim 0.0 +248 19 optimizer.lr 0.007524894567803224 +248 19 training.batch_size 1.0 +248 19 training.label_smoothing 0.001671742963809367 +248 20 model.embedding_dim 2.0 +248 20 optimizer.lr 0.031973063967200455 +248 20 training.batch_size 1.0 +248 20 training.label_smoothing 0.001076589022031371 +248 21 model.embedding_dim 1.0 +248 21 optimizer.lr 0.04968728263291614 +248 21 training.batch_size 0.0 +248 21 training.label_smoothing 0.009684432331253203 +248 22 model.embedding_dim 2.0 +248 22 optimizer.lr 0.0021894763546758994 +248 22 training.batch_size 0.0 +248 22 training.label_smoothing 0.046252945880797286 +248 23 model.embedding_dim 0.0 +248 23 optimizer.lr 0.01879757505597224 +248 23 training.batch_size 2.0 +248 23 training.label_smoothing 0.0011698275957198248 +248 24 model.embedding_dim 1.0 +248 24 optimizer.lr 0.013418420054068318 +248 24 training.batch_size 1.0 +248 24 training.label_smoothing 0.11212870013812162 +248 25 model.embedding_dim 0.0 +248 25 optimizer.lr 0.00886859407013093 +248 25 training.batch_size 2.0 +248 25 training.label_smoothing 0.039550871774137276 +248 26 model.embedding_dim 1.0 +248 26 optimizer.lr 0.029315155354608175 +248 26 training.batch_size 0.0 +248 26 training.label_smoothing 0.023883856770478742 +248 27 model.embedding_dim 1.0 +248 27 optimizer.lr 0.08351691885508687 +248 27 training.batch_size 1.0 +248 27 training.label_smoothing 0.014947969876797116 +248 28 model.embedding_dim 1.0 +248 28 optimizer.lr 0.060252277650375864 +248 28 training.batch_size 1.0 +248 28 training.label_smoothing 0.005718494510798235 +248 29 model.embedding_dim 2.0 +248 29 optimizer.lr 0.0010786485952913923 +248 29 training.batch_size 1.0 +248 29 training.label_smoothing 0.002282980455734154 +248 30 model.embedding_dim 2.0 +248 30 optimizer.lr 0.002126215397874426 +248 30 training.batch_size 0.0 +248 30 training.label_smoothing 0.0015501024813740888 +248 31 model.embedding_dim 1.0 +248 31 optimizer.lr 0.0011502884844459254 +248 31 training.batch_size 0.0 +248 31 training.label_smoothing 0.0645576629512059 +248 32 model.embedding_dim 2.0 +248 32 optimizer.lr 0.019742551522686207 +248 32 training.batch_size 2.0 +248 32 training.label_smoothing 0.15757041374483938 +248 33 model.embedding_dim 2.0 +248 33 optimizer.lr 0.018913196854481062 +248 33 training.batch_size 2.0 +248 33 training.label_smoothing 0.053293170661016034 +248 34 model.embedding_dim 0.0 +248 34 optimizer.lr 0.05431093381469172 +248 34 training.batch_size 2.0 +248 34 training.label_smoothing 0.0068979368346110724 +248 35 model.embedding_dim 1.0 +248 35 optimizer.lr 0.01980249985960405 +248 35 training.batch_size 2.0 +248 35 training.label_smoothing 0.0011333847956830126 +248 36 model.embedding_dim 0.0 +248 36 optimizer.lr 0.0028478989987545935 +248 36 training.batch_size 0.0 +248 36 training.label_smoothing 0.32737483234733655 +248 37 model.embedding_dim 1.0 +248 37 optimizer.lr 0.0015766003838651062 +248 37 training.batch_size 1.0 +248 37 training.label_smoothing 0.07050570958373 +248 38 model.embedding_dim 0.0 +248 38 optimizer.lr 0.01256075763626107 +248 38 training.batch_size 1.0 +248 38 training.label_smoothing 0.0013523442895686116 +248 39 model.embedding_dim 0.0 +248 39 optimizer.lr 0.011503033317499795 +248 39 training.batch_size 0.0 +248 39 training.label_smoothing 0.12105731425102824 +248 40 model.embedding_dim 0.0 +248 40 optimizer.lr 0.009979407734032771 +248 40 training.batch_size 0.0 +248 40 training.label_smoothing 0.66084682112458 +248 41 model.embedding_dim 0.0 +248 41 optimizer.lr 0.008206568299382353 +248 41 training.batch_size 1.0 +248 41 training.label_smoothing 0.0012621157721271414 +248 42 model.embedding_dim 1.0 +248 42 optimizer.lr 0.05204622809691988 +248 42 training.batch_size 2.0 +248 42 training.label_smoothing 0.003742482949697667 +248 43 model.embedding_dim 2.0 +248 43 optimizer.lr 0.0678533723140432 +248 43 training.batch_size 2.0 +248 43 training.label_smoothing 0.0018451995034099154 +248 44 model.embedding_dim 0.0 +248 44 optimizer.lr 0.0015237341859623062 +248 44 training.batch_size 2.0 +248 44 training.label_smoothing 0.06022877784455965 +248 45 model.embedding_dim 2.0 +248 45 optimizer.lr 0.05047031411282203 +248 45 training.batch_size 0.0 +248 45 training.label_smoothing 0.17274432811857646 +248 46 model.embedding_dim 0.0 +248 46 optimizer.lr 0.008953715423543996 +248 46 training.batch_size 2.0 +248 46 training.label_smoothing 0.6185120474113863 +248 47 model.embedding_dim 2.0 +248 47 optimizer.lr 0.08989768868360254 +248 47 training.batch_size 0.0 +248 47 training.label_smoothing 0.048937337807358344 +248 48 model.embedding_dim 0.0 +248 48 optimizer.lr 0.004824173904042344 +248 48 training.batch_size 2.0 +248 48 training.label_smoothing 0.0030873145262704954 +248 49 model.embedding_dim 0.0 +248 49 optimizer.lr 0.009052435199417867 +248 49 training.batch_size 2.0 +248 49 training.label_smoothing 0.1988368236786794 +248 50 model.embedding_dim 0.0 +248 50 optimizer.lr 0.03997079049492142 +248 50 training.batch_size 2.0 +248 50 training.label_smoothing 0.289454792290667 +248 51 model.embedding_dim 1.0 +248 51 optimizer.lr 0.0011140105911861934 +248 51 training.batch_size 1.0 +248 51 training.label_smoothing 0.08551850436313874 +248 52 model.embedding_dim 0.0 +248 52 optimizer.lr 0.0010864752051920321 +248 52 training.batch_size 0.0 +248 52 training.label_smoothing 0.07967641006244588 +248 53 model.embedding_dim 2.0 +248 53 optimizer.lr 0.0943447183588668 +248 53 training.batch_size 2.0 +248 53 training.label_smoothing 0.041878463159923565 +248 54 model.embedding_dim 0.0 +248 54 optimizer.lr 0.005636647708978638 +248 54 training.batch_size 1.0 +248 54 training.label_smoothing 0.04970204518110759 +248 55 model.embedding_dim 1.0 +248 55 optimizer.lr 0.001355679495288251 +248 55 training.batch_size 1.0 +248 55 training.label_smoothing 0.0022730607638581485 +248 56 model.embedding_dim 1.0 +248 56 optimizer.lr 0.025603755070877993 +248 56 training.batch_size 2.0 +248 56 training.label_smoothing 0.32345936113094553 +248 57 model.embedding_dim 2.0 +248 57 optimizer.lr 0.008748443673119163 +248 57 training.batch_size 1.0 +248 57 training.label_smoothing 0.01031164258104071 +248 58 model.embedding_dim 0.0 +248 58 optimizer.lr 0.001520247583824923 +248 58 training.batch_size 1.0 +248 58 training.label_smoothing 0.0037172990393559827 +248 59 model.embedding_dim 1.0 +248 59 optimizer.lr 0.011252053713329139 +248 59 training.batch_size 0.0 +248 59 training.label_smoothing 0.4122496717192033 +248 60 model.embedding_dim 1.0 +248 60 optimizer.lr 0.01805574088673282 +248 60 training.batch_size 0.0 +248 60 training.label_smoothing 0.01826221098692618 +248 61 model.embedding_dim 1.0 +248 61 optimizer.lr 0.008288745197974078 +248 61 training.batch_size 0.0 +248 61 training.label_smoothing 0.008519958197659314 +248 62 model.embedding_dim 2.0 +248 62 optimizer.lr 0.0018411521927482707 +248 62 training.batch_size 0.0 +248 62 training.label_smoothing 0.016875791916405643 +248 63 model.embedding_dim 2.0 +248 63 optimizer.lr 0.0035191879448788006 +248 63 training.batch_size 2.0 +248 63 training.label_smoothing 0.13750749041547664 +248 64 model.embedding_dim 2.0 +248 64 optimizer.lr 0.0022388266246333817 +248 64 training.batch_size 1.0 +248 64 training.label_smoothing 0.017009196812378464 +248 65 model.embedding_dim 1.0 +248 65 optimizer.lr 0.06273052458430985 +248 65 training.batch_size 0.0 +248 65 training.label_smoothing 0.01467863306697467 +248 66 model.embedding_dim 1.0 +248 66 optimizer.lr 0.004860081741123463 +248 66 training.batch_size 1.0 +248 66 training.label_smoothing 0.08962510378758545 +248 67 model.embedding_dim 1.0 +248 67 optimizer.lr 0.0034099606347532127 +248 67 training.batch_size 0.0 +248 67 training.label_smoothing 0.006029431156716383 +248 68 model.embedding_dim 2.0 +248 68 optimizer.lr 0.05145994892266241 +248 68 training.batch_size 2.0 +248 68 training.label_smoothing 0.0011912498207044132 +248 69 model.embedding_dim 1.0 +248 69 optimizer.lr 0.001576174799482576 +248 69 training.batch_size 2.0 +248 69 training.label_smoothing 0.22845621065728527 +248 70 model.embedding_dim 1.0 +248 70 optimizer.lr 0.09022845033427024 +248 70 training.batch_size 1.0 +248 70 training.label_smoothing 0.006349891278646305 +248 71 model.embedding_dim 0.0 +248 71 optimizer.lr 0.08733820688952229 +248 71 training.batch_size 1.0 +248 71 training.label_smoothing 0.7857977169665038 +248 72 model.embedding_dim 2.0 +248 72 optimizer.lr 0.0938832494614659 +248 72 training.batch_size 1.0 +248 72 training.label_smoothing 0.42903944407366673 +248 73 model.embedding_dim 1.0 +248 73 optimizer.lr 0.0016719472883363745 +248 73 training.batch_size 1.0 +248 73 training.label_smoothing 0.004723326196677221 +248 74 model.embedding_dim 1.0 +248 74 optimizer.lr 0.003206621627076769 +248 74 training.batch_size 2.0 +248 74 training.label_smoothing 0.6904012696591187 +248 75 model.embedding_dim 2.0 +248 75 optimizer.lr 0.051435900136250635 +248 75 training.batch_size 0.0 +248 75 training.label_smoothing 0.03869169605202427 +248 76 model.embedding_dim 0.0 +248 76 optimizer.lr 0.044657003027119876 +248 76 training.batch_size 2.0 +248 76 training.label_smoothing 0.013814189335080504 +248 77 model.embedding_dim 1.0 +248 77 optimizer.lr 0.001391647240723256 +248 77 training.batch_size 1.0 +248 77 training.label_smoothing 0.03842334627175329 +248 78 model.embedding_dim 2.0 +248 78 optimizer.lr 0.009103959185391934 +248 78 training.batch_size 1.0 +248 78 training.label_smoothing 0.7020422761420663 +248 79 model.embedding_dim 0.0 +248 79 optimizer.lr 0.09260083731195927 +248 79 training.batch_size 2.0 +248 79 training.label_smoothing 0.1888845361817847 +248 80 model.embedding_dim 2.0 +248 80 optimizer.lr 0.012541871208937263 +248 80 training.batch_size 1.0 +248 80 training.label_smoothing 0.26903559133865157 +248 81 model.embedding_dim 0.0 +248 81 optimizer.lr 0.010296613811584131 +248 81 training.batch_size 0.0 +248 81 training.label_smoothing 0.00324058569409966 +248 82 model.embedding_dim 2.0 +248 82 optimizer.lr 0.004510960371365426 +248 82 training.batch_size 1.0 +248 82 training.label_smoothing 0.04129929136951609 +248 83 model.embedding_dim 0.0 +248 83 optimizer.lr 0.067071117328516 +248 83 training.batch_size 2.0 +248 83 training.label_smoothing 0.013754777305788782 +248 84 model.embedding_dim 2.0 +248 84 optimizer.lr 0.05260077998541858 +248 84 training.batch_size 0.0 +248 84 training.label_smoothing 0.1037038850214117 +248 85 model.embedding_dim 0.0 +248 85 optimizer.lr 0.011847426911477629 +248 85 training.batch_size 1.0 +248 85 training.label_smoothing 0.019045345639051345 +248 86 model.embedding_dim 1.0 +248 86 optimizer.lr 0.0026580919821579695 +248 86 training.batch_size 0.0 +248 86 training.label_smoothing 0.1403152160890601 +248 87 model.embedding_dim 0.0 +248 87 optimizer.lr 0.03219758258748973 +248 87 training.batch_size 0.0 +248 87 training.label_smoothing 0.04399805178532771 +248 88 model.embedding_dim 0.0 +248 88 optimizer.lr 0.01927078348827224 +248 88 training.batch_size 2.0 +248 88 training.label_smoothing 0.002353996246541677 +248 89 model.embedding_dim 1.0 +248 89 optimizer.lr 0.025152386781796418 +248 89 training.batch_size 0.0 +248 89 training.label_smoothing 0.026915565668345513 +248 90 model.embedding_dim 2.0 +248 90 optimizer.lr 0.03887886316256997 +248 90 training.batch_size 0.0 +248 90 training.label_smoothing 0.17242683831180425 +248 91 model.embedding_dim 2.0 +248 91 optimizer.lr 0.001605964233557198 +248 91 training.batch_size 0.0 +248 91 training.label_smoothing 0.12485255657742836 +248 92 model.embedding_dim 1.0 +248 92 optimizer.lr 0.013380483567377015 +248 92 training.batch_size 0.0 +248 92 training.label_smoothing 0.001875091228891225 +248 93 model.embedding_dim 1.0 +248 93 optimizer.lr 0.03975106506793919 +248 93 training.batch_size 2.0 +248 93 training.label_smoothing 0.7079691641874047 +248 94 model.embedding_dim 0.0 +248 94 optimizer.lr 0.004565877017766184 +248 94 training.batch_size 1.0 +248 94 training.label_smoothing 0.005338128811547598 +248 95 model.embedding_dim 1.0 +248 95 optimizer.lr 0.006758575156325777 +248 95 training.batch_size 0.0 +248 95 training.label_smoothing 0.013535399319528102 +248 96 model.embedding_dim 2.0 +248 96 optimizer.lr 0.0027073300420572513 +248 96 training.batch_size 1.0 +248 96 training.label_smoothing 0.07805794209480017 +248 97 model.embedding_dim 2.0 +248 97 optimizer.lr 0.007093378060392404 +248 97 training.batch_size 1.0 +248 97 training.label_smoothing 0.9470651589008888 +248 98 model.embedding_dim 1.0 +248 98 optimizer.lr 0.039057433204844276 +248 98 training.batch_size 2.0 +248 98 training.label_smoothing 0.08682715198062621 +248 99 model.embedding_dim 0.0 +248 99 optimizer.lr 0.01864909662263229 +248 99 training.batch_size 1.0 +248 99 training.label_smoothing 0.8490315393359077 +248 100 model.embedding_dim 1.0 +248 100 optimizer.lr 0.0029810208558490414 +248 100 training.batch_size 2.0 +248 100 training.label_smoothing 0.002891920515229524 +248 1 dataset """kinships""" +248 1 model """ermlp""" +248 1 loss """bceaftersigmoid""" +248 1 regularizer """no""" +248 1 optimizer """adam""" +248 1 training_loop """lcwa""" +248 1 evaluator """rankbased""" +248 2 dataset """kinships""" +248 2 model """ermlp""" +248 2 loss """bceaftersigmoid""" +248 2 regularizer """no""" +248 2 optimizer """adam""" +248 2 training_loop """lcwa""" +248 2 evaluator """rankbased""" +248 3 dataset """kinships""" +248 3 model """ermlp""" +248 3 loss """bceaftersigmoid""" +248 3 regularizer """no""" +248 3 optimizer """adam""" +248 3 training_loop """lcwa""" +248 3 evaluator """rankbased""" +248 4 dataset """kinships""" +248 4 model """ermlp""" +248 4 loss """bceaftersigmoid""" +248 4 regularizer """no""" +248 4 optimizer """adam""" +248 4 training_loop """lcwa""" +248 4 evaluator """rankbased""" +248 5 dataset """kinships""" +248 5 model """ermlp""" +248 5 loss """bceaftersigmoid""" +248 5 regularizer """no""" +248 5 optimizer """adam""" +248 5 training_loop """lcwa""" +248 5 evaluator """rankbased""" +248 6 dataset """kinships""" +248 6 model """ermlp""" +248 6 loss """bceaftersigmoid""" +248 6 regularizer """no""" +248 6 optimizer """adam""" +248 6 training_loop """lcwa""" +248 6 evaluator """rankbased""" +248 7 dataset """kinships""" +248 7 model """ermlp""" +248 7 loss """bceaftersigmoid""" +248 7 regularizer """no""" +248 7 optimizer """adam""" +248 7 training_loop """lcwa""" +248 7 evaluator """rankbased""" +248 8 dataset """kinships""" +248 8 model """ermlp""" +248 8 loss """bceaftersigmoid""" +248 8 regularizer """no""" +248 8 optimizer """adam""" +248 8 training_loop """lcwa""" +248 8 evaluator """rankbased""" +248 9 dataset """kinships""" +248 9 model """ermlp""" +248 9 loss """bceaftersigmoid""" +248 9 regularizer """no""" +248 9 optimizer """adam""" +248 9 training_loop """lcwa""" +248 9 evaluator """rankbased""" +248 10 dataset """kinships""" +248 10 model """ermlp""" +248 10 loss """bceaftersigmoid""" +248 10 regularizer """no""" +248 10 optimizer """adam""" +248 10 training_loop """lcwa""" +248 10 evaluator """rankbased""" +248 11 dataset """kinships""" +248 11 model """ermlp""" +248 11 loss """bceaftersigmoid""" +248 11 regularizer """no""" +248 11 optimizer """adam""" +248 11 training_loop """lcwa""" +248 11 evaluator """rankbased""" +248 12 dataset """kinships""" +248 12 model """ermlp""" +248 12 loss """bceaftersigmoid""" +248 12 regularizer """no""" +248 12 optimizer """adam""" +248 12 training_loop """lcwa""" +248 12 evaluator """rankbased""" +248 13 dataset """kinships""" +248 13 model """ermlp""" +248 13 loss """bceaftersigmoid""" +248 13 regularizer """no""" +248 13 optimizer """adam""" +248 13 training_loop """lcwa""" +248 13 evaluator """rankbased""" +248 14 dataset """kinships""" +248 14 model """ermlp""" +248 14 loss """bceaftersigmoid""" +248 14 regularizer """no""" +248 14 optimizer """adam""" +248 14 training_loop """lcwa""" +248 14 evaluator """rankbased""" +248 15 dataset """kinships""" +248 15 model """ermlp""" +248 15 loss """bceaftersigmoid""" +248 15 regularizer """no""" +248 15 optimizer """adam""" +248 15 training_loop """lcwa""" +248 15 evaluator """rankbased""" +248 16 dataset """kinships""" +248 16 model """ermlp""" +248 16 loss """bceaftersigmoid""" +248 16 regularizer """no""" +248 16 optimizer """adam""" +248 16 training_loop """lcwa""" +248 16 evaluator """rankbased""" +248 17 dataset """kinships""" +248 17 model """ermlp""" +248 17 loss """bceaftersigmoid""" +248 17 regularizer """no""" +248 17 optimizer """adam""" +248 17 training_loop """lcwa""" +248 17 evaluator """rankbased""" +248 18 dataset """kinships""" +248 18 model """ermlp""" +248 18 loss """bceaftersigmoid""" +248 18 regularizer """no""" +248 18 optimizer """adam""" +248 18 training_loop """lcwa""" +248 18 evaluator """rankbased""" +248 19 dataset """kinships""" +248 19 model """ermlp""" +248 19 loss """bceaftersigmoid""" +248 19 regularizer """no""" +248 19 optimizer """adam""" +248 19 training_loop """lcwa""" +248 19 evaluator """rankbased""" +248 20 dataset """kinships""" +248 20 model """ermlp""" +248 20 loss """bceaftersigmoid""" +248 20 regularizer """no""" +248 20 optimizer """adam""" +248 20 training_loop """lcwa""" +248 20 evaluator """rankbased""" +248 21 dataset """kinships""" +248 21 model """ermlp""" +248 21 loss """bceaftersigmoid""" +248 21 regularizer """no""" +248 21 optimizer """adam""" +248 21 training_loop """lcwa""" +248 21 evaluator """rankbased""" +248 22 dataset """kinships""" +248 22 model """ermlp""" +248 22 loss """bceaftersigmoid""" +248 22 regularizer """no""" +248 22 optimizer """adam""" +248 22 training_loop """lcwa""" +248 22 evaluator """rankbased""" +248 23 dataset """kinships""" +248 23 model """ermlp""" +248 23 loss """bceaftersigmoid""" +248 23 regularizer """no""" +248 23 optimizer """adam""" +248 23 training_loop """lcwa""" +248 23 evaluator """rankbased""" +248 24 dataset """kinships""" +248 24 model """ermlp""" +248 24 loss """bceaftersigmoid""" +248 24 regularizer """no""" +248 24 optimizer """adam""" +248 24 training_loop """lcwa""" +248 24 evaluator """rankbased""" +248 25 dataset """kinships""" +248 25 model """ermlp""" +248 25 loss """bceaftersigmoid""" +248 25 regularizer """no""" +248 25 optimizer """adam""" +248 25 training_loop """lcwa""" +248 25 evaluator """rankbased""" +248 26 dataset """kinships""" +248 26 model """ermlp""" +248 26 loss """bceaftersigmoid""" +248 26 regularizer """no""" +248 26 optimizer """adam""" +248 26 training_loop """lcwa""" +248 26 evaluator """rankbased""" +248 27 dataset """kinships""" +248 27 model """ermlp""" +248 27 loss """bceaftersigmoid""" +248 27 regularizer """no""" +248 27 optimizer """adam""" +248 27 training_loop """lcwa""" +248 27 evaluator """rankbased""" +248 28 dataset """kinships""" +248 28 model """ermlp""" +248 28 loss """bceaftersigmoid""" +248 28 regularizer """no""" +248 28 optimizer """adam""" +248 28 training_loop """lcwa""" +248 28 evaluator """rankbased""" +248 29 dataset """kinships""" +248 29 model """ermlp""" +248 29 loss """bceaftersigmoid""" +248 29 regularizer """no""" +248 29 optimizer """adam""" +248 29 training_loop """lcwa""" +248 29 evaluator """rankbased""" +248 30 dataset """kinships""" +248 30 model """ermlp""" +248 30 loss """bceaftersigmoid""" +248 30 regularizer """no""" +248 30 optimizer """adam""" +248 30 training_loop """lcwa""" +248 30 evaluator """rankbased""" +248 31 dataset """kinships""" +248 31 model """ermlp""" +248 31 loss """bceaftersigmoid""" +248 31 regularizer """no""" +248 31 optimizer """adam""" +248 31 training_loop """lcwa""" +248 31 evaluator """rankbased""" +248 32 dataset """kinships""" +248 32 model """ermlp""" +248 32 loss """bceaftersigmoid""" +248 32 regularizer """no""" +248 32 optimizer """adam""" +248 32 training_loop """lcwa""" +248 32 evaluator """rankbased""" +248 33 dataset """kinships""" +248 33 model """ermlp""" +248 33 loss """bceaftersigmoid""" +248 33 regularizer """no""" +248 33 optimizer """adam""" +248 33 training_loop """lcwa""" +248 33 evaluator """rankbased""" +248 34 dataset """kinships""" +248 34 model """ermlp""" +248 34 loss """bceaftersigmoid""" +248 34 regularizer """no""" +248 34 optimizer """adam""" +248 34 training_loop """lcwa""" +248 34 evaluator """rankbased""" +248 35 dataset """kinships""" +248 35 model """ermlp""" +248 35 loss """bceaftersigmoid""" +248 35 regularizer """no""" +248 35 optimizer """adam""" +248 35 training_loop """lcwa""" +248 35 evaluator """rankbased""" +248 36 dataset """kinships""" +248 36 model """ermlp""" +248 36 loss """bceaftersigmoid""" +248 36 regularizer """no""" +248 36 optimizer """adam""" +248 36 training_loop """lcwa""" +248 36 evaluator """rankbased""" +248 37 dataset """kinships""" +248 37 model """ermlp""" +248 37 loss """bceaftersigmoid""" +248 37 regularizer """no""" +248 37 optimizer """adam""" +248 37 training_loop """lcwa""" +248 37 evaluator """rankbased""" +248 38 dataset """kinships""" +248 38 model """ermlp""" +248 38 loss """bceaftersigmoid""" +248 38 regularizer """no""" +248 38 optimizer """adam""" +248 38 training_loop """lcwa""" +248 38 evaluator """rankbased""" +248 39 dataset """kinships""" +248 39 model """ermlp""" +248 39 loss """bceaftersigmoid""" +248 39 regularizer """no""" +248 39 optimizer """adam""" +248 39 training_loop """lcwa""" +248 39 evaluator """rankbased""" +248 40 dataset """kinships""" +248 40 model """ermlp""" +248 40 loss """bceaftersigmoid""" +248 40 regularizer """no""" +248 40 optimizer """adam""" +248 40 training_loop """lcwa""" +248 40 evaluator """rankbased""" +248 41 dataset """kinships""" +248 41 model """ermlp""" +248 41 loss """bceaftersigmoid""" +248 41 regularizer """no""" +248 41 optimizer """adam""" +248 41 training_loop """lcwa""" +248 41 evaluator """rankbased""" +248 42 dataset """kinships""" +248 42 model """ermlp""" +248 42 loss """bceaftersigmoid""" +248 42 regularizer """no""" +248 42 optimizer """adam""" +248 42 training_loop """lcwa""" +248 42 evaluator """rankbased""" +248 43 dataset """kinships""" +248 43 model """ermlp""" +248 43 loss """bceaftersigmoid""" +248 43 regularizer """no""" +248 43 optimizer """adam""" +248 43 training_loop """lcwa""" +248 43 evaluator """rankbased""" +248 44 dataset """kinships""" +248 44 model """ermlp""" +248 44 loss """bceaftersigmoid""" +248 44 regularizer """no""" +248 44 optimizer """adam""" +248 44 training_loop """lcwa""" +248 44 evaluator """rankbased""" +248 45 dataset """kinships""" +248 45 model """ermlp""" +248 45 loss """bceaftersigmoid""" +248 45 regularizer """no""" +248 45 optimizer """adam""" +248 45 training_loop """lcwa""" +248 45 evaluator """rankbased""" +248 46 dataset """kinships""" +248 46 model """ermlp""" +248 46 loss """bceaftersigmoid""" +248 46 regularizer """no""" +248 46 optimizer """adam""" +248 46 training_loop """lcwa""" +248 46 evaluator """rankbased""" +248 47 dataset """kinships""" +248 47 model """ermlp""" +248 47 loss """bceaftersigmoid""" +248 47 regularizer """no""" +248 47 optimizer """adam""" +248 47 training_loop """lcwa""" +248 47 evaluator """rankbased""" +248 48 dataset """kinships""" +248 48 model """ermlp""" +248 48 loss """bceaftersigmoid""" +248 48 regularizer """no""" +248 48 optimizer """adam""" +248 48 training_loop """lcwa""" +248 48 evaluator """rankbased""" +248 49 dataset """kinships""" +248 49 model """ermlp""" +248 49 loss """bceaftersigmoid""" +248 49 regularizer """no""" +248 49 optimizer """adam""" +248 49 training_loop """lcwa""" +248 49 evaluator """rankbased""" +248 50 dataset """kinships""" +248 50 model """ermlp""" +248 50 loss """bceaftersigmoid""" +248 50 regularizer """no""" +248 50 optimizer """adam""" +248 50 training_loop """lcwa""" +248 50 evaluator """rankbased""" +248 51 dataset """kinships""" +248 51 model """ermlp""" +248 51 loss """bceaftersigmoid""" +248 51 regularizer """no""" +248 51 optimizer """adam""" +248 51 training_loop """lcwa""" +248 51 evaluator """rankbased""" +248 52 dataset """kinships""" +248 52 model """ermlp""" +248 52 loss """bceaftersigmoid""" +248 52 regularizer """no""" +248 52 optimizer """adam""" +248 52 training_loop """lcwa""" +248 52 evaluator """rankbased""" +248 53 dataset """kinships""" +248 53 model """ermlp""" +248 53 loss """bceaftersigmoid""" +248 53 regularizer """no""" +248 53 optimizer """adam""" +248 53 training_loop """lcwa""" +248 53 evaluator """rankbased""" +248 54 dataset """kinships""" +248 54 model """ermlp""" +248 54 loss """bceaftersigmoid""" +248 54 regularizer """no""" +248 54 optimizer """adam""" +248 54 training_loop """lcwa""" +248 54 evaluator """rankbased""" +248 55 dataset """kinships""" +248 55 model """ermlp""" +248 55 loss """bceaftersigmoid""" +248 55 regularizer """no""" +248 55 optimizer """adam""" +248 55 training_loop """lcwa""" +248 55 evaluator """rankbased""" +248 56 dataset """kinships""" +248 56 model """ermlp""" +248 56 loss """bceaftersigmoid""" +248 56 regularizer """no""" +248 56 optimizer """adam""" +248 56 training_loop """lcwa""" +248 56 evaluator """rankbased""" +248 57 dataset """kinships""" +248 57 model """ermlp""" +248 57 loss """bceaftersigmoid""" +248 57 regularizer """no""" +248 57 optimizer """adam""" +248 57 training_loop """lcwa""" +248 57 evaluator """rankbased""" +248 58 dataset """kinships""" +248 58 model """ermlp""" +248 58 loss """bceaftersigmoid""" +248 58 regularizer """no""" +248 58 optimizer """adam""" +248 58 training_loop """lcwa""" +248 58 evaluator """rankbased""" +248 59 dataset """kinships""" +248 59 model """ermlp""" +248 59 loss """bceaftersigmoid""" +248 59 regularizer """no""" +248 59 optimizer """adam""" +248 59 training_loop """lcwa""" +248 59 evaluator """rankbased""" +248 60 dataset """kinships""" +248 60 model """ermlp""" +248 60 loss """bceaftersigmoid""" +248 60 regularizer """no""" +248 60 optimizer """adam""" +248 60 training_loop """lcwa""" +248 60 evaluator """rankbased""" +248 61 dataset """kinships""" +248 61 model """ermlp""" +248 61 loss """bceaftersigmoid""" +248 61 regularizer """no""" +248 61 optimizer """adam""" +248 61 training_loop """lcwa""" +248 61 evaluator """rankbased""" +248 62 dataset """kinships""" +248 62 model """ermlp""" +248 62 loss """bceaftersigmoid""" +248 62 regularizer """no""" +248 62 optimizer """adam""" +248 62 training_loop """lcwa""" +248 62 evaluator """rankbased""" +248 63 dataset """kinships""" +248 63 model """ermlp""" +248 63 loss """bceaftersigmoid""" +248 63 regularizer """no""" +248 63 optimizer """adam""" +248 63 training_loop """lcwa""" +248 63 evaluator """rankbased""" +248 64 dataset """kinships""" +248 64 model """ermlp""" +248 64 loss """bceaftersigmoid""" +248 64 regularizer """no""" +248 64 optimizer """adam""" +248 64 training_loop """lcwa""" +248 64 evaluator """rankbased""" +248 65 dataset """kinships""" +248 65 model """ermlp""" +248 65 loss """bceaftersigmoid""" +248 65 regularizer """no""" +248 65 optimizer """adam""" +248 65 training_loop """lcwa""" +248 65 evaluator """rankbased""" +248 66 dataset """kinships""" +248 66 model """ermlp""" +248 66 loss """bceaftersigmoid""" +248 66 regularizer """no""" +248 66 optimizer """adam""" +248 66 training_loop """lcwa""" +248 66 evaluator """rankbased""" +248 67 dataset """kinships""" +248 67 model """ermlp""" +248 67 loss """bceaftersigmoid""" +248 67 regularizer """no""" +248 67 optimizer """adam""" +248 67 training_loop """lcwa""" +248 67 evaluator """rankbased""" +248 68 dataset """kinships""" +248 68 model """ermlp""" +248 68 loss """bceaftersigmoid""" +248 68 regularizer """no""" +248 68 optimizer """adam""" +248 68 training_loop """lcwa""" +248 68 evaluator """rankbased""" +248 69 dataset """kinships""" +248 69 model """ermlp""" +248 69 loss """bceaftersigmoid""" +248 69 regularizer """no""" +248 69 optimizer """adam""" +248 69 training_loop """lcwa""" +248 69 evaluator """rankbased""" +248 70 dataset """kinships""" +248 70 model """ermlp""" +248 70 loss """bceaftersigmoid""" +248 70 regularizer """no""" +248 70 optimizer """adam""" +248 70 training_loop """lcwa""" +248 70 evaluator """rankbased""" +248 71 dataset """kinships""" +248 71 model """ermlp""" +248 71 loss """bceaftersigmoid""" +248 71 regularizer """no""" +248 71 optimizer """adam""" +248 71 training_loop """lcwa""" +248 71 evaluator """rankbased""" +248 72 dataset """kinships""" +248 72 model """ermlp""" +248 72 loss """bceaftersigmoid""" +248 72 regularizer """no""" +248 72 optimizer """adam""" +248 72 training_loop """lcwa""" +248 72 evaluator """rankbased""" +248 73 dataset """kinships""" +248 73 model """ermlp""" +248 73 loss """bceaftersigmoid""" +248 73 regularizer """no""" +248 73 optimizer """adam""" +248 73 training_loop """lcwa""" +248 73 evaluator """rankbased""" +248 74 dataset """kinships""" +248 74 model """ermlp""" +248 74 loss """bceaftersigmoid""" +248 74 regularizer """no""" +248 74 optimizer """adam""" +248 74 training_loop """lcwa""" +248 74 evaluator """rankbased""" +248 75 dataset """kinships""" +248 75 model """ermlp""" +248 75 loss """bceaftersigmoid""" +248 75 regularizer """no""" +248 75 optimizer """adam""" +248 75 training_loop """lcwa""" +248 75 evaluator """rankbased""" +248 76 dataset """kinships""" +248 76 model """ermlp""" +248 76 loss """bceaftersigmoid""" +248 76 regularizer """no""" +248 76 optimizer """adam""" +248 76 training_loop """lcwa""" +248 76 evaluator """rankbased""" +248 77 dataset """kinships""" +248 77 model """ermlp""" +248 77 loss """bceaftersigmoid""" +248 77 regularizer """no""" +248 77 optimizer """adam""" +248 77 training_loop """lcwa""" +248 77 evaluator """rankbased""" +248 78 dataset """kinships""" +248 78 model """ermlp""" +248 78 loss """bceaftersigmoid""" +248 78 regularizer """no""" +248 78 optimizer """adam""" +248 78 training_loop """lcwa""" +248 78 evaluator """rankbased""" +248 79 dataset """kinships""" +248 79 model """ermlp""" +248 79 loss """bceaftersigmoid""" +248 79 regularizer """no""" +248 79 optimizer """adam""" +248 79 training_loop """lcwa""" +248 79 evaluator """rankbased""" +248 80 dataset """kinships""" +248 80 model """ermlp""" +248 80 loss """bceaftersigmoid""" +248 80 regularizer """no""" +248 80 optimizer """adam""" +248 80 training_loop """lcwa""" +248 80 evaluator """rankbased""" +248 81 dataset """kinships""" +248 81 model """ermlp""" +248 81 loss """bceaftersigmoid""" +248 81 regularizer """no""" +248 81 optimizer """adam""" +248 81 training_loop """lcwa""" +248 81 evaluator """rankbased""" +248 82 dataset """kinships""" +248 82 model """ermlp""" +248 82 loss """bceaftersigmoid""" +248 82 regularizer """no""" +248 82 optimizer """adam""" +248 82 training_loop """lcwa""" +248 82 evaluator """rankbased""" +248 83 dataset """kinships""" +248 83 model """ermlp""" +248 83 loss """bceaftersigmoid""" +248 83 regularizer """no""" +248 83 optimizer """adam""" +248 83 training_loop """lcwa""" +248 83 evaluator """rankbased""" +248 84 dataset """kinships""" +248 84 model """ermlp""" +248 84 loss """bceaftersigmoid""" +248 84 regularizer """no""" +248 84 optimizer """adam""" +248 84 training_loop """lcwa""" +248 84 evaluator """rankbased""" +248 85 dataset """kinships""" +248 85 model """ermlp""" +248 85 loss """bceaftersigmoid""" +248 85 regularizer """no""" +248 85 optimizer """adam""" +248 85 training_loop """lcwa""" +248 85 evaluator """rankbased""" +248 86 dataset """kinships""" +248 86 model """ermlp""" +248 86 loss """bceaftersigmoid""" +248 86 regularizer """no""" +248 86 optimizer """adam""" +248 86 training_loop """lcwa""" +248 86 evaluator """rankbased""" +248 87 dataset """kinships""" +248 87 model """ermlp""" +248 87 loss """bceaftersigmoid""" +248 87 regularizer """no""" +248 87 optimizer """adam""" +248 87 training_loop """lcwa""" +248 87 evaluator """rankbased""" +248 88 dataset """kinships""" +248 88 model """ermlp""" +248 88 loss """bceaftersigmoid""" +248 88 regularizer """no""" +248 88 optimizer """adam""" +248 88 training_loop """lcwa""" +248 88 evaluator """rankbased""" +248 89 dataset """kinships""" +248 89 model """ermlp""" +248 89 loss """bceaftersigmoid""" +248 89 regularizer """no""" +248 89 optimizer """adam""" +248 89 training_loop """lcwa""" +248 89 evaluator """rankbased""" +248 90 dataset """kinships""" +248 90 model """ermlp""" +248 90 loss """bceaftersigmoid""" +248 90 regularizer """no""" +248 90 optimizer """adam""" +248 90 training_loop """lcwa""" +248 90 evaluator """rankbased""" +248 91 dataset """kinships""" +248 91 model """ermlp""" +248 91 loss """bceaftersigmoid""" +248 91 regularizer """no""" +248 91 optimizer """adam""" +248 91 training_loop """lcwa""" +248 91 evaluator """rankbased""" +248 92 dataset """kinships""" +248 92 model """ermlp""" +248 92 loss """bceaftersigmoid""" +248 92 regularizer """no""" +248 92 optimizer """adam""" +248 92 training_loop """lcwa""" +248 92 evaluator """rankbased""" +248 93 dataset """kinships""" +248 93 model """ermlp""" +248 93 loss """bceaftersigmoid""" +248 93 regularizer """no""" +248 93 optimizer """adam""" +248 93 training_loop """lcwa""" +248 93 evaluator """rankbased""" +248 94 dataset """kinships""" +248 94 model """ermlp""" +248 94 loss """bceaftersigmoid""" +248 94 regularizer """no""" +248 94 optimizer """adam""" +248 94 training_loop """lcwa""" +248 94 evaluator """rankbased""" +248 95 dataset """kinships""" +248 95 model """ermlp""" +248 95 loss """bceaftersigmoid""" +248 95 regularizer """no""" +248 95 optimizer """adam""" +248 95 training_loop """lcwa""" +248 95 evaluator """rankbased""" +248 96 dataset """kinships""" +248 96 model """ermlp""" +248 96 loss """bceaftersigmoid""" +248 96 regularizer """no""" +248 96 optimizer """adam""" +248 96 training_loop """lcwa""" +248 96 evaluator """rankbased""" +248 97 dataset """kinships""" +248 97 model """ermlp""" +248 97 loss """bceaftersigmoid""" +248 97 regularizer """no""" +248 97 optimizer """adam""" +248 97 training_loop """lcwa""" +248 97 evaluator """rankbased""" +248 98 dataset """kinships""" +248 98 model """ermlp""" +248 98 loss """bceaftersigmoid""" +248 98 regularizer """no""" +248 98 optimizer """adam""" +248 98 training_loop """lcwa""" +248 98 evaluator """rankbased""" +248 99 dataset """kinships""" +248 99 model """ermlp""" +248 99 loss """bceaftersigmoid""" +248 99 regularizer """no""" +248 99 optimizer """adam""" +248 99 training_loop """lcwa""" +248 99 evaluator """rankbased""" +248 100 dataset """kinships""" +248 100 model """ermlp""" +248 100 loss """bceaftersigmoid""" +248 100 regularizer """no""" +248 100 optimizer """adam""" +248 100 training_loop """lcwa""" +248 100 evaluator """rankbased""" +249 1 model.embedding_dim 0.0 +249 1 optimizer.lr 0.010979399700691197 +249 1 training.batch_size 0.0 +249 1 training.label_smoothing 0.13798572995596153 +249 2 model.embedding_dim 1.0 +249 2 optimizer.lr 0.0016778036125750192 +249 2 training.batch_size 1.0 +249 2 training.label_smoothing 0.36544756276132573 +249 3 model.embedding_dim 1.0 +249 3 optimizer.lr 0.008430780045448286 +249 3 training.batch_size 0.0 +249 3 training.label_smoothing 0.0015275524017583659 +249 4 model.embedding_dim 0.0 +249 4 optimizer.lr 0.001372814263358308 +249 4 training.batch_size 1.0 +249 4 training.label_smoothing 0.0025446399444684097 +249 5 model.embedding_dim 1.0 +249 5 optimizer.lr 0.010509635612906918 +249 5 training.batch_size 1.0 +249 5 training.label_smoothing 0.8263465793254052 +249 6 model.embedding_dim 1.0 +249 6 optimizer.lr 0.005481079044958256 +249 6 training.batch_size 1.0 +249 6 training.label_smoothing 0.0022769024752242093 +249 7 model.embedding_dim 1.0 +249 7 optimizer.lr 0.00196286912681513 +249 7 training.batch_size 1.0 +249 7 training.label_smoothing 0.15828699003169208 +249 8 model.embedding_dim 2.0 +249 8 optimizer.lr 0.016042698433825872 +249 8 training.batch_size 0.0 +249 8 training.label_smoothing 0.2174614615880322 +249 9 model.embedding_dim 2.0 +249 9 optimizer.lr 0.04185572107428451 +249 9 training.batch_size 0.0 +249 9 training.label_smoothing 0.20491558351785596 +249 10 model.embedding_dim 1.0 +249 10 optimizer.lr 0.001004947246336479 +249 10 training.batch_size 0.0 +249 10 training.label_smoothing 0.0024407938051511525 +249 11 model.embedding_dim 0.0 +249 11 optimizer.lr 0.025085710212141794 +249 11 training.batch_size 1.0 +249 11 training.label_smoothing 0.02969403736159846 +249 12 model.embedding_dim 0.0 +249 12 optimizer.lr 0.026907706930327173 +249 12 training.batch_size 2.0 +249 12 training.label_smoothing 0.04935167001900325 +249 13 model.embedding_dim 0.0 +249 13 optimizer.lr 0.017646505602925324 +249 13 training.batch_size 1.0 +249 13 training.label_smoothing 0.021638376574230143 +249 14 model.embedding_dim 2.0 +249 14 optimizer.lr 0.00734483501109796 +249 14 training.batch_size 1.0 +249 14 training.label_smoothing 0.007621201039926724 +249 15 model.embedding_dim 0.0 +249 15 optimizer.lr 0.002443039850885856 +249 15 training.batch_size 1.0 +249 15 training.label_smoothing 0.00786171586482999 +249 16 model.embedding_dim 2.0 +249 16 optimizer.lr 0.0030225756993181824 +249 16 training.batch_size 1.0 +249 16 training.label_smoothing 0.388424339233748 +249 17 model.embedding_dim 1.0 +249 17 optimizer.lr 0.0011215188343699381 +249 17 training.batch_size 0.0 +249 17 training.label_smoothing 0.005460515069088949 +249 18 model.embedding_dim 1.0 +249 18 optimizer.lr 0.0021569783464813973 +249 18 training.batch_size 1.0 +249 18 training.label_smoothing 0.6761554547884178 +249 19 model.embedding_dim 1.0 +249 19 optimizer.lr 0.07395514727965838 +249 19 training.batch_size 1.0 +249 19 training.label_smoothing 0.021076854871475644 +249 20 model.embedding_dim 2.0 +249 20 optimizer.lr 0.038990052245134174 +249 20 training.batch_size 2.0 +249 20 training.label_smoothing 0.004430902211663401 +249 21 model.embedding_dim 0.0 +249 21 optimizer.lr 0.07986697604496912 +249 21 training.batch_size 1.0 +249 21 training.label_smoothing 0.3959006214495619 +249 22 model.embedding_dim 0.0 +249 22 optimizer.lr 0.009880858001024853 +249 22 training.batch_size 0.0 +249 22 training.label_smoothing 0.0011225407641242912 +249 23 model.embedding_dim 1.0 +249 23 optimizer.lr 0.0016170835020967876 +249 23 training.batch_size 0.0 +249 23 training.label_smoothing 0.4090342006714729 +249 24 model.embedding_dim 0.0 +249 24 optimizer.lr 0.016548864095804767 +249 24 training.batch_size 0.0 +249 24 training.label_smoothing 0.012668330642283012 +249 25 model.embedding_dim 0.0 +249 25 optimizer.lr 0.044331097852861226 +249 25 training.batch_size 1.0 +249 25 training.label_smoothing 0.2527882559841789 +249 26 model.embedding_dim 0.0 +249 26 optimizer.lr 0.02430731472672159 +249 26 training.batch_size 2.0 +249 26 training.label_smoothing 0.007605981500170818 +249 27 model.embedding_dim 0.0 +249 27 optimizer.lr 0.041553141359309126 +249 27 training.batch_size 2.0 +249 27 training.label_smoothing 0.009600923795346899 +249 28 model.embedding_dim 2.0 +249 28 optimizer.lr 0.0011077344434347177 +249 28 training.batch_size 1.0 +249 28 training.label_smoothing 0.0860827029468107 +249 29 model.embedding_dim 0.0 +249 29 optimizer.lr 0.004038305178614022 +249 29 training.batch_size 0.0 +249 29 training.label_smoothing 0.11540485250452318 +249 30 model.embedding_dim 1.0 +249 30 optimizer.lr 0.023288714680832125 +249 30 training.batch_size 0.0 +249 30 training.label_smoothing 0.051332164208465675 +249 31 model.embedding_dim 0.0 +249 31 optimizer.lr 0.006858948286425145 +249 31 training.batch_size 1.0 +249 31 training.label_smoothing 0.06507570248742828 +249 32 model.embedding_dim 2.0 +249 32 optimizer.lr 0.021038750972962185 +249 32 training.batch_size 2.0 +249 32 training.label_smoothing 0.006058901976177213 +249 33 model.embedding_dim 1.0 +249 33 optimizer.lr 0.03346349353244392 +249 33 training.batch_size 2.0 +249 33 training.label_smoothing 0.08274286012894859 +249 34 model.embedding_dim 2.0 +249 34 optimizer.lr 0.015115843324022894 +249 34 training.batch_size 1.0 +249 34 training.label_smoothing 0.10392753836953349 +249 35 model.embedding_dim 0.0 +249 35 optimizer.lr 0.0028403547005825215 +249 35 training.batch_size 2.0 +249 35 training.label_smoothing 0.308696271026234 +249 36 model.embedding_dim 1.0 +249 36 optimizer.lr 0.0010521340645083214 +249 36 training.batch_size 0.0 +249 36 training.label_smoothing 0.004870821252838274 +249 37 model.embedding_dim 2.0 +249 37 optimizer.lr 0.07882362706508263 +249 37 training.batch_size 1.0 +249 37 training.label_smoothing 0.06441506923739365 +249 38 model.embedding_dim 1.0 +249 38 optimizer.lr 0.017430679272034295 +249 38 training.batch_size 2.0 +249 38 training.label_smoothing 0.12663332566607713 +249 39 model.embedding_dim 2.0 +249 39 optimizer.lr 0.054540022475111576 +249 39 training.batch_size 0.0 +249 39 training.label_smoothing 0.39338590947432717 +249 40 model.embedding_dim 0.0 +249 40 optimizer.lr 0.04237868601494272 +249 40 training.batch_size 1.0 +249 40 training.label_smoothing 0.012707443922733548 +249 41 model.embedding_dim 0.0 +249 41 optimizer.lr 0.06490807887324902 +249 41 training.batch_size 2.0 +249 41 training.label_smoothing 0.0020265187575104373 +249 42 model.embedding_dim 1.0 +249 42 optimizer.lr 0.020829062556899338 +249 42 training.batch_size 0.0 +249 42 training.label_smoothing 0.04418027769530251 +249 43 model.embedding_dim 1.0 +249 43 optimizer.lr 0.039864836265047804 +249 43 training.batch_size 2.0 +249 43 training.label_smoothing 0.008753099995932025 +249 44 model.embedding_dim 1.0 +249 44 optimizer.lr 0.0018022553679099646 +249 44 training.batch_size 0.0 +249 44 training.label_smoothing 0.04287183745070985 +249 45 model.embedding_dim 0.0 +249 45 optimizer.lr 0.003961030942455321 +249 45 training.batch_size 2.0 +249 45 training.label_smoothing 0.003017773799925954 +249 46 model.embedding_dim 1.0 +249 46 optimizer.lr 0.04039855411876625 +249 46 training.batch_size 0.0 +249 46 training.label_smoothing 0.012270237681441462 +249 47 model.embedding_dim 1.0 +249 47 optimizer.lr 0.0021374791700336554 +249 47 training.batch_size 2.0 +249 47 training.label_smoothing 0.3290073239849196 +249 48 model.embedding_dim 2.0 +249 48 optimizer.lr 0.0052831555198834534 +249 48 training.batch_size 2.0 +249 48 training.label_smoothing 0.005567044657098249 +249 49 model.embedding_dim 2.0 +249 49 optimizer.lr 0.09330488744967007 +249 49 training.batch_size 1.0 +249 49 training.label_smoothing 0.0012359329593865796 +249 50 model.embedding_dim 1.0 +249 50 optimizer.lr 0.01712695381252667 +249 50 training.batch_size 1.0 +249 50 training.label_smoothing 0.09966257708586716 +249 51 model.embedding_dim 0.0 +249 51 optimizer.lr 0.09167618784850548 +249 51 training.batch_size 2.0 +249 51 training.label_smoothing 0.006755349448505757 +249 52 model.embedding_dim 1.0 +249 52 optimizer.lr 0.05400203706436414 +249 52 training.batch_size 2.0 +249 52 training.label_smoothing 0.020333178277915254 +249 53 model.embedding_dim 1.0 +249 53 optimizer.lr 0.020590127146224362 +249 53 training.batch_size 1.0 +249 53 training.label_smoothing 0.005552259931275591 +249 54 model.embedding_dim 2.0 +249 54 optimizer.lr 0.055268276996924916 +249 54 training.batch_size 0.0 +249 54 training.label_smoothing 0.3731496484042278 +249 55 model.embedding_dim 2.0 +249 55 optimizer.lr 0.005046531231437851 +249 55 training.batch_size 0.0 +249 55 training.label_smoothing 0.003452316418884786 +249 56 model.embedding_dim 2.0 +249 56 optimizer.lr 0.02709622601589041 +249 56 training.batch_size 2.0 +249 56 training.label_smoothing 0.034545179313214686 +249 57 model.embedding_dim 0.0 +249 57 optimizer.lr 0.02807863092595604 +249 57 training.batch_size 1.0 +249 57 training.label_smoothing 0.0034409080025161395 +249 58 model.embedding_dim 0.0 +249 58 optimizer.lr 0.0018166240090991114 +249 58 training.batch_size 1.0 +249 58 training.label_smoothing 0.0036582744445152 +249 59 model.embedding_dim 2.0 +249 59 optimizer.lr 0.0010397149291345661 +249 59 training.batch_size 0.0 +249 59 training.label_smoothing 0.45688448887296573 +249 60 model.embedding_dim 1.0 +249 60 optimizer.lr 0.049550581965333525 +249 60 training.batch_size 1.0 +249 60 training.label_smoothing 0.05797623023346193 +249 61 model.embedding_dim 1.0 +249 61 optimizer.lr 0.0014188340771443624 +249 61 training.batch_size 0.0 +249 61 training.label_smoothing 0.0012482221248447862 +249 62 model.embedding_dim 0.0 +249 62 optimizer.lr 0.06657566983542973 +249 62 training.batch_size 0.0 +249 62 training.label_smoothing 0.001358375177850896 +249 63 model.embedding_dim 2.0 +249 63 optimizer.lr 0.06366359980119286 +249 63 training.batch_size 1.0 +249 63 training.label_smoothing 0.3718598300175224 +249 64 model.embedding_dim 2.0 +249 64 optimizer.lr 0.0694599300009232 +249 64 training.batch_size 1.0 +249 64 training.label_smoothing 0.002738060658013098 +249 65 model.embedding_dim 0.0 +249 65 optimizer.lr 0.0011212711834274302 +249 65 training.batch_size 0.0 +249 65 training.label_smoothing 0.07433181359343836 +249 66 model.embedding_dim 0.0 +249 66 optimizer.lr 0.0027625820445285345 +249 66 training.batch_size 2.0 +249 66 training.label_smoothing 0.0010640558715762805 +249 67 model.embedding_dim 1.0 +249 67 optimizer.lr 0.09100171772991883 +249 67 training.batch_size 0.0 +249 67 training.label_smoothing 0.013314085393456717 +249 68 model.embedding_dim 0.0 +249 68 optimizer.lr 0.014065876010654723 +249 68 training.batch_size 0.0 +249 68 training.label_smoothing 0.0010171505449969984 +249 69 model.embedding_dim 2.0 +249 69 optimizer.lr 0.015267425518023077 +249 69 training.batch_size 1.0 +249 69 training.label_smoothing 0.13772125400049437 +249 70 model.embedding_dim 2.0 +249 70 optimizer.lr 0.00442495561068249 +249 70 training.batch_size 2.0 +249 70 training.label_smoothing 0.05145315296892361 +249 71 model.embedding_dim 1.0 +249 71 optimizer.lr 0.003501111556854894 +249 71 training.batch_size 2.0 +249 71 training.label_smoothing 0.3746600812911698 +249 72 model.embedding_dim 2.0 +249 72 optimizer.lr 0.03311068770860372 +249 72 training.batch_size 1.0 +249 72 training.label_smoothing 0.003804438825518929 +249 73 model.embedding_dim 0.0 +249 73 optimizer.lr 0.03542332388522658 +249 73 training.batch_size 1.0 +249 73 training.label_smoothing 0.0013715278623301131 +249 74 model.embedding_dim 2.0 +249 74 optimizer.lr 0.005911599746305195 +249 74 training.batch_size 0.0 +249 74 training.label_smoothing 0.042598582998779985 +249 75 model.embedding_dim 1.0 +249 75 optimizer.lr 0.011861326014853136 +249 75 training.batch_size 0.0 +249 75 training.label_smoothing 0.0013634483589273951 +249 76 model.embedding_dim 2.0 +249 76 optimizer.lr 0.008250485800207367 +249 76 training.batch_size 2.0 +249 76 training.label_smoothing 0.002326711765182473 +249 77 model.embedding_dim 1.0 +249 77 optimizer.lr 0.005628956362490612 +249 77 training.batch_size 1.0 +249 77 training.label_smoothing 0.12489768814370183 +249 78 model.embedding_dim 1.0 +249 78 optimizer.lr 0.06622592003399976 +249 78 training.batch_size 0.0 +249 78 training.label_smoothing 0.15194965394715815 +249 79 model.embedding_dim 1.0 +249 79 optimizer.lr 0.014280333102218185 +249 79 training.batch_size 2.0 +249 79 training.label_smoothing 0.0017639630204153414 +249 80 model.embedding_dim 2.0 +249 80 optimizer.lr 0.0027383022898921716 +249 80 training.batch_size 0.0 +249 80 training.label_smoothing 0.031942594059304656 +249 81 model.embedding_dim 2.0 +249 81 optimizer.lr 0.05530951388767627 +249 81 training.batch_size 0.0 +249 81 training.label_smoothing 0.4130001691755754 +249 82 model.embedding_dim 0.0 +249 82 optimizer.lr 0.003972813375739525 +249 82 training.batch_size 0.0 +249 82 training.label_smoothing 0.005448886640371104 +249 83 model.embedding_dim 0.0 +249 83 optimizer.lr 0.01500747013461523 +249 83 training.batch_size 1.0 +249 83 training.label_smoothing 0.18534117510004322 +249 84 model.embedding_dim 2.0 +249 84 optimizer.lr 0.0013967849075517248 +249 84 training.batch_size 1.0 +249 84 training.label_smoothing 0.42550175422134934 +249 85 model.embedding_dim 2.0 +249 85 optimizer.lr 0.04082567986425772 +249 85 training.batch_size 0.0 +249 85 training.label_smoothing 0.615882331909118 +249 86 model.embedding_dim 0.0 +249 86 optimizer.lr 0.010354828458421684 +249 86 training.batch_size 0.0 +249 86 training.label_smoothing 0.03607420186142937 +249 87 model.embedding_dim 1.0 +249 87 optimizer.lr 0.00920653045878736 +249 87 training.batch_size 1.0 +249 87 training.label_smoothing 0.010834705181894588 +249 88 model.embedding_dim 0.0 +249 88 optimizer.lr 0.07257563429398237 +249 88 training.batch_size 1.0 +249 88 training.label_smoothing 0.003493887039345043 +249 89 model.embedding_dim 0.0 +249 89 optimizer.lr 0.061395465210437435 +249 89 training.batch_size 2.0 +249 89 training.label_smoothing 0.011801422960083787 +249 90 model.embedding_dim 0.0 +249 90 optimizer.lr 0.010829188647142402 +249 90 training.batch_size 0.0 +249 90 training.label_smoothing 0.013889080226540222 +249 91 model.embedding_dim 2.0 +249 91 optimizer.lr 0.04836052820983872 +249 91 training.batch_size 1.0 +249 91 training.label_smoothing 0.14822387161888873 +249 92 model.embedding_dim 0.0 +249 92 optimizer.lr 0.004973565637718204 +249 92 training.batch_size 0.0 +249 92 training.label_smoothing 0.0017007465108728477 +249 93 model.embedding_dim 2.0 +249 93 optimizer.lr 0.0011190811459207619 +249 93 training.batch_size 1.0 +249 93 training.label_smoothing 0.014881966951273643 +249 94 model.embedding_dim 2.0 +249 94 optimizer.lr 0.0034702462751869953 +249 94 training.batch_size 0.0 +249 94 training.label_smoothing 0.12035416781796238 +249 95 model.embedding_dim 2.0 +249 95 optimizer.lr 0.09856535372206834 +249 95 training.batch_size 1.0 +249 95 training.label_smoothing 0.9715010880472015 +249 96 model.embedding_dim 0.0 +249 96 optimizer.lr 0.018336241924711644 +249 96 training.batch_size 0.0 +249 96 training.label_smoothing 0.0037631496943311923 +249 97 model.embedding_dim 1.0 +249 97 optimizer.lr 0.0193204686307846 +249 97 training.batch_size 2.0 +249 97 training.label_smoothing 0.9595502916223722 +249 98 model.embedding_dim 2.0 +249 98 optimizer.lr 0.0773835123690224 +249 98 training.batch_size 1.0 +249 98 training.label_smoothing 0.44100208070399827 +249 99 model.embedding_dim 1.0 +249 99 optimizer.lr 0.005390571459899473 +249 99 training.batch_size 1.0 +249 99 training.label_smoothing 0.015001084624278977 +249 100 model.embedding_dim 1.0 +249 100 optimizer.lr 0.036634791893618165 +249 100 training.batch_size 0.0 +249 100 training.label_smoothing 0.005391175474267769 +249 1 dataset """kinships""" +249 1 model """ermlp""" +249 1 loss """softplus""" +249 1 regularizer """no""" +249 1 optimizer """adam""" +249 1 training_loop """lcwa""" +249 1 evaluator """rankbased""" +249 2 dataset """kinships""" +249 2 model """ermlp""" +249 2 loss """softplus""" +249 2 regularizer """no""" +249 2 optimizer """adam""" +249 2 training_loop """lcwa""" +249 2 evaluator """rankbased""" +249 3 dataset """kinships""" +249 3 model """ermlp""" +249 3 loss """softplus""" +249 3 regularizer """no""" +249 3 optimizer """adam""" +249 3 training_loop """lcwa""" +249 3 evaluator """rankbased""" +249 4 dataset """kinships""" +249 4 model """ermlp""" +249 4 loss """softplus""" +249 4 regularizer """no""" +249 4 optimizer """adam""" +249 4 training_loop """lcwa""" +249 4 evaluator """rankbased""" +249 5 dataset """kinships""" +249 5 model """ermlp""" +249 5 loss """softplus""" +249 5 regularizer """no""" +249 5 optimizer """adam""" +249 5 training_loop """lcwa""" +249 5 evaluator """rankbased""" +249 6 dataset """kinships""" +249 6 model """ermlp""" +249 6 loss """softplus""" +249 6 regularizer """no""" +249 6 optimizer """adam""" +249 6 training_loop """lcwa""" +249 6 evaluator """rankbased""" +249 7 dataset """kinships""" +249 7 model """ermlp""" +249 7 loss """softplus""" +249 7 regularizer """no""" +249 7 optimizer """adam""" +249 7 training_loop """lcwa""" +249 7 evaluator """rankbased""" +249 8 dataset """kinships""" +249 8 model """ermlp""" +249 8 loss """softplus""" +249 8 regularizer """no""" +249 8 optimizer """adam""" +249 8 training_loop """lcwa""" +249 8 evaluator """rankbased""" +249 9 dataset """kinships""" +249 9 model """ermlp""" +249 9 loss """softplus""" +249 9 regularizer """no""" +249 9 optimizer """adam""" +249 9 training_loop """lcwa""" +249 9 evaluator """rankbased""" +249 10 dataset """kinships""" +249 10 model """ermlp""" +249 10 loss """softplus""" +249 10 regularizer """no""" +249 10 optimizer """adam""" +249 10 training_loop """lcwa""" +249 10 evaluator """rankbased""" +249 11 dataset """kinships""" +249 11 model """ermlp""" +249 11 loss """softplus""" +249 11 regularizer """no""" +249 11 optimizer """adam""" +249 11 training_loop """lcwa""" +249 11 evaluator """rankbased""" +249 12 dataset """kinships""" +249 12 model """ermlp""" +249 12 loss """softplus""" +249 12 regularizer """no""" +249 12 optimizer """adam""" +249 12 training_loop """lcwa""" +249 12 evaluator """rankbased""" +249 13 dataset """kinships""" +249 13 model """ermlp""" +249 13 loss """softplus""" +249 13 regularizer """no""" +249 13 optimizer """adam""" +249 13 training_loop """lcwa""" +249 13 evaluator """rankbased""" +249 14 dataset """kinships""" +249 14 model """ermlp""" +249 14 loss """softplus""" +249 14 regularizer """no""" +249 14 optimizer """adam""" +249 14 training_loop """lcwa""" +249 14 evaluator """rankbased""" +249 15 dataset """kinships""" +249 15 model """ermlp""" +249 15 loss """softplus""" +249 15 regularizer """no""" +249 15 optimizer """adam""" +249 15 training_loop """lcwa""" +249 15 evaluator """rankbased""" +249 16 dataset """kinships""" +249 16 model """ermlp""" +249 16 loss """softplus""" +249 16 regularizer """no""" +249 16 optimizer """adam""" +249 16 training_loop """lcwa""" +249 16 evaluator """rankbased""" +249 17 dataset """kinships""" +249 17 model """ermlp""" +249 17 loss """softplus""" +249 17 regularizer """no""" +249 17 optimizer """adam""" +249 17 training_loop """lcwa""" +249 17 evaluator """rankbased""" +249 18 dataset """kinships""" +249 18 model """ermlp""" +249 18 loss """softplus""" +249 18 regularizer """no""" +249 18 optimizer """adam""" +249 18 training_loop """lcwa""" +249 18 evaluator """rankbased""" +249 19 dataset """kinships""" +249 19 model """ermlp""" +249 19 loss """softplus""" +249 19 regularizer """no""" +249 19 optimizer """adam""" +249 19 training_loop """lcwa""" +249 19 evaluator """rankbased""" +249 20 dataset """kinships""" +249 20 model """ermlp""" +249 20 loss """softplus""" +249 20 regularizer """no""" +249 20 optimizer """adam""" +249 20 training_loop """lcwa""" +249 20 evaluator """rankbased""" +249 21 dataset """kinships""" +249 21 model """ermlp""" +249 21 loss """softplus""" +249 21 regularizer """no""" +249 21 optimizer """adam""" +249 21 training_loop """lcwa""" +249 21 evaluator """rankbased""" +249 22 dataset """kinships""" +249 22 model """ermlp""" +249 22 loss """softplus""" +249 22 regularizer """no""" +249 22 optimizer """adam""" +249 22 training_loop """lcwa""" +249 22 evaluator """rankbased""" +249 23 dataset """kinships""" +249 23 model """ermlp""" +249 23 loss """softplus""" +249 23 regularizer """no""" +249 23 optimizer """adam""" +249 23 training_loop """lcwa""" +249 23 evaluator """rankbased""" +249 24 dataset """kinships""" +249 24 model """ermlp""" +249 24 loss """softplus""" +249 24 regularizer """no""" +249 24 optimizer """adam""" +249 24 training_loop """lcwa""" +249 24 evaluator """rankbased""" +249 25 dataset """kinships""" +249 25 model """ermlp""" +249 25 loss """softplus""" +249 25 regularizer """no""" +249 25 optimizer """adam""" +249 25 training_loop """lcwa""" +249 25 evaluator """rankbased""" +249 26 dataset """kinships""" +249 26 model """ermlp""" +249 26 loss """softplus""" +249 26 regularizer """no""" +249 26 optimizer """adam""" +249 26 training_loop """lcwa""" +249 26 evaluator """rankbased""" +249 27 dataset """kinships""" +249 27 model """ermlp""" +249 27 loss """softplus""" +249 27 regularizer """no""" +249 27 optimizer """adam""" +249 27 training_loop """lcwa""" +249 27 evaluator """rankbased""" +249 28 dataset """kinships""" +249 28 model """ermlp""" +249 28 loss """softplus""" +249 28 regularizer """no""" +249 28 optimizer """adam""" +249 28 training_loop """lcwa""" +249 28 evaluator """rankbased""" +249 29 dataset """kinships""" +249 29 model """ermlp""" +249 29 loss """softplus""" +249 29 regularizer """no""" +249 29 optimizer """adam""" +249 29 training_loop """lcwa""" +249 29 evaluator """rankbased""" +249 30 dataset """kinships""" +249 30 model """ermlp""" +249 30 loss """softplus""" +249 30 regularizer """no""" +249 30 optimizer """adam""" +249 30 training_loop """lcwa""" +249 30 evaluator """rankbased""" +249 31 dataset """kinships""" +249 31 model """ermlp""" +249 31 loss """softplus""" +249 31 regularizer """no""" +249 31 optimizer """adam""" +249 31 training_loop """lcwa""" +249 31 evaluator """rankbased""" +249 32 dataset """kinships""" +249 32 model """ermlp""" +249 32 loss """softplus""" +249 32 regularizer """no""" +249 32 optimizer """adam""" +249 32 training_loop """lcwa""" +249 32 evaluator """rankbased""" +249 33 dataset """kinships""" +249 33 model """ermlp""" +249 33 loss """softplus""" +249 33 regularizer """no""" +249 33 optimizer """adam""" +249 33 training_loop """lcwa""" +249 33 evaluator """rankbased""" +249 34 dataset """kinships""" +249 34 model """ermlp""" +249 34 loss """softplus""" +249 34 regularizer """no""" +249 34 optimizer """adam""" +249 34 training_loop """lcwa""" +249 34 evaluator """rankbased""" +249 35 dataset """kinships""" +249 35 model """ermlp""" +249 35 loss """softplus""" +249 35 regularizer """no""" +249 35 optimizer """adam""" +249 35 training_loop """lcwa""" +249 35 evaluator """rankbased""" +249 36 dataset """kinships""" +249 36 model """ermlp""" +249 36 loss """softplus""" +249 36 regularizer """no""" +249 36 optimizer """adam""" +249 36 training_loop """lcwa""" +249 36 evaluator """rankbased""" +249 37 dataset """kinships""" +249 37 model """ermlp""" +249 37 loss """softplus""" +249 37 regularizer """no""" +249 37 optimizer """adam""" +249 37 training_loop """lcwa""" +249 37 evaluator """rankbased""" +249 38 dataset """kinships""" +249 38 model """ermlp""" +249 38 loss """softplus""" +249 38 regularizer """no""" +249 38 optimizer """adam""" +249 38 training_loop """lcwa""" +249 38 evaluator """rankbased""" +249 39 dataset """kinships""" +249 39 model """ermlp""" +249 39 loss """softplus""" +249 39 regularizer """no""" +249 39 optimizer """adam""" +249 39 training_loop """lcwa""" +249 39 evaluator """rankbased""" +249 40 dataset """kinships""" +249 40 model """ermlp""" +249 40 loss """softplus""" +249 40 regularizer """no""" +249 40 optimizer """adam""" +249 40 training_loop """lcwa""" +249 40 evaluator """rankbased""" +249 41 dataset """kinships""" +249 41 model """ermlp""" +249 41 loss """softplus""" +249 41 regularizer """no""" +249 41 optimizer """adam""" +249 41 training_loop """lcwa""" +249 41 evaluator """rankbased""" +249 42 dataset """kinships""" +249 42 model """ermlp""" +249 42 loss """softplus""" +249 42 regularizer """no""" +249 42 optimizer """adam""" +249 42 training_loop """lcwa""" +249 42 evaluator """rankbased""" +249 43 dataset """kinships""" +249 43 model """ermlp""" +249 43 loss """softplus""" +249 43 regularizer """no""" +249 43 optimizer """adam""" +249 43 training_loop """lcwa""" +249 43 evaluator """rankbased""" +249 44 dataset """kinships""" +249 44 model """ermlp""" +249 44 loss """softplus""" +249 44 regularizer """no""" +249 44 optimizer """adam""" +249 44 training_loop """lcwa""" +249 44 evaluator """rankbased""" +249 45 dataset """kinships""" +249 45 model """ermlp""" +249 45 loss """softplus""" +249 45 regularizer """no""" +249 45 optimizer """adam""" +249 45 training_loop """lcwa""" +249 45 evaluator """rankbased""" +249 46 dataset """kinships""" +249 46 model """ermlp""" +249 46 loss """softplus""" +249 46 regularizer """no""" +249 46 optimizer """adam""" +249 46 training_loop """lcwa""" +249 46 evaluator """rankbased""" +249 47 dataset """kinships""" +249 47 model """ermlp""" +249 47 loss """softplus""" +249 47 regularizer """no""" +249 47 optimizer """adam""" +249 47 training_loop """lcwa""" +249 47 evaluator """rankbased""" +249 48 dataset """kinships""" +249 48 model """ermlp""" +249 48 loss """softplus""" +249 48 regularizer """no""" +249 48 optimizer """adam""" +249 48 training_loop """lcwa""" +249 48 evaluator """rankbased""" +249 49 dataset """kinships""" +249 49 model """ermlp""" +249 49 loss """softplus""" +249 49 regularizer """no""" +249 49 optimizer """adam""" +249 49 training_loop """lcwa""" +249 49 evaluator """rankbased""" +249 50 dataset """kinships""" +249 50 model """ermlp""" +249 50 loss """softplus""" +249 50 regularizer """no""" +249 50 optimizer """adam""" +249 50 training_loop """lcwa""" +249 50 evaluator """rankbased""" +249 51 dataset """kinships""" +249 51 model """ermlp""" +249 51 loss """softplus""" +249 51 regularizer """no""" +249 51 optimizer """adam""" +249 51 training_loop """lcwa""" +249 51 evaluator """rankbased""" +249 52 dataset """kinships""" +249 52 model """ermlp""" +249 52 loss """softplus""" +249 52 regularizer """no""" +249 52 optimizer """adam""" +249 52 training_loop """lcwa""" +249 52 evaluator """rankbased""" +249 53 dataset """kinships""" +249 53 model """ermlp""" +249 53 loss """softplus""" +249 53 regularizer """no""" +249 53 optimizer """adam""" +249 53 training_loop """lcwa""" +249 53 evaluator """rankbased""" +249 54 dataset """kinships""" +249 54 model """ermlp""" +249 54 loss """softplus""" +249 54 regularizer """no""" +249 54 optimizer """adam""" +249 54 training_loop """lcwa""" +249 54 evaluator """rankbased""" +249 55 dataset """kinships""" +249 55 model """ermlp""" +249 55 loss """softplus""" +249 55 regularizer """no""" +249 55 optimizer """adam""" +249 55 training_loop """lcwa""" +249 55 evaluator """rankbased""" +249 56 dataset """kinships""" +249 56 model """ermlp""" +249 56 loss """softplus""" +249 56 regularizer """no""" +249 56 optimizer """adam""" +249 56 training_loop """lcwa""" +249 56 evaluator """rankbased""" +249 57 dataset """kinships""" +249 57 model """ermlp""" +249 57 loss """softplus""" +249 57 regularizer """no""" +249 57 optimizer """adam""" +249 57 training_loop """lcwa""" +249 57 evaluator """rankbased""" +249 58 dataset """kinships""" +249 58 model """ermlp""" +249 58 loss """softplus""" +249 58 regularizer """no""" +249 58 optimizer """adam""" +249 58 training_loop """lcwa""" +249 58 evaluator """rankbased""" +249 59 dataset """kinships""" +249 59 model """ermlp""" +249 59 loss """softplus""" +249 59 regularizer """no""" +249 59 optimizer """adam""" +249 59 training_loop """lcwa""" +249 59 evaluator """rankbased""" +249 60 dataset """kinships""" +249 60 model """ermlp""" +249 60 loss """softplus""" +249 60 regularizer """no""" +249 60 optimizer """adam""" +249 60 training_loop """lcwa""" +249 60 evaluator """rankbased""" +249 61 dataset """kinships""" +249 61 model """ermlp""" +249 61 loss """softplus""" +249 61 regularizer """no""" +249 61 optimizer """adam""" +249 61 training_loop """lcwa""" +249 61 evaluator """rankbased""" +249 62 dataset """kinships""" +249 62 model """ermlp""" +249 62 loss """softplus""" +249 62 regularizer """no""" +249 62 optimizer """adam""" +249 62 training_loop """lcwa""" +249 62 evaluator """rankbased""" +249 63 dataset """kinships""" +249 63 model """ermlp""" +249 63 loss """softplus""" +249 63 regularizer """no""" +249 63 optimizer """adam""" +249 63 training_loop """lcwa""" +249 63 evaluator """rankbased""" +249 64 dataset """kinships""" +249 64 model """ermlp""" +249 64 loss """softplus""" +249 64 regularizer """no""" +249 64 optimizer """adam""" +249 64 training_loop """lcwa""" +249 64 evaluator """rankbased""" +249 65 dataset """kinships""" +249 65 model """ermlp""" +249 65 loss """softplus""" +249 65 regularizer """no""" +249 65 optimizer """adam""" +249 65 training_loop """lcwa""" +249 65 evaluator """rankbased""" +249 66 dataset """kinships""" +249 66 model """ermlp""" +249 66 loss """softplus""" +249 66 regularizer """no""" +249 66 optimizer """adam""" +249 66 training_loop """lcwa""" +249 66 evaluator """rankbased""" +249 67 dataset """kinships""" +249 67 model """ermlp""" +249 67 loss """softplus""" +249 67 regularizer """no""" +249 67 optimizer """adam""" +249 67 training_loop """lcwa""" +249 67 evaluator """rankbased""" +249 68 dataset """kinships""" +249 68 model """ermlp""" +249 68 loss """softplus""" +249 68 regularizer """no""" +249 68 optimizer """adam""" +249 68 training_loop """lcwa""" +249 68 evaluator """rankbased""" +249 69 dataset """kinships""" +249 69 model """ermlp""" +249 69 loss """softplus""" +249 69 regularizer """no""" +249 69 optimizer """adam""" +249 69 training_loop """lcwa""" +249 69 evaluator """rankbased""" +249 70 dataset """kinships""" +249 70 model """ermlp""" +249 70 loss """softplus""" +249 70 regularizer """no""" +249 70 optimizer """adam""" +249 70 training_loop """lcwa""" +249 70 evaluator """rankbased""" +249 71 dataset """kinships""" +249 71 model """ermlp""" +249 71 loss """softplus""" +249 71 regularizer """no""" +249 71 optimizer """adam""" +249 71 training_loop """lcwa""" +249 71 evaluator """rankbased""" +249 72 dataset """kinships""" +249 72 model """ermlp""" +249 72 loss """softplus""" +249 72 regularizer """no""" +249 72 optimizer """adam""" +249 72 training_loop """lcwa""" +249 72 evaluator """rankbased""" +249 73 dataset """kinships""" +249 73 model """ermlp""" +249 73 loss """softplus""" +249 73 regularizer """no""" +249 73 optimizer """adam""" +249 73 training_loop """lcwa""" +249 73 evaluator """rankbased""" +249 74 dataset """kinships""" +249 74 model """ermlp""" +249 74 loss """softplus""" +249 74 regularizer """no""" +249 74 optimizer """adam""" +249 74 training_loop """lcwa""" +249 74 evaluator """rankbased""" +249 75 dataset """kinships""" +249 75 model """ermlp""" +249 75 loss """softplus""" +249 75 regularizer """no""" +249 75 optimizer """adam""" +249 75 training_loop """lcwa""" +249 75 evaluator """rankbased""" +249 76 dataset """kinships""" +249 76 model """ermlp""" +249 76 loss """softplus""" +249 76 regularizer """no""" +249 76 optimizer """adam""" +249 76 training_loop """lcwa""" +249 76 evaluator """rankbased""" +249 77 dataset """kinships""" +249 77 model """ermlp""" +249 77 loss """softplus""" +249 77 regularizer """no""" +249 77 optimizer """adam""" +249 77 training_loop """lcwa""" +249 77 evaluator """rankbased""" +249 78 dataset """kinships""" +249 78 model """ermlp""" +249 78 loss """softplus""" +249 78 regularizer """no""" +249 78 optimizer """adam""" +249 78 training_loop """lcwa""" +249 78 evaluator """rankbased""" +249 79 dataset """kinships""" +249 79 model """ermlp""" +249 79 loss """softplus""" +249 79 regularizer """no""" +249 79 optimizer """adam""" +249 79 training_loop """lcwa""" +249 79 evaluator """rankbased""" +249 80 dataset """kinships""" +249 80 model """ermlp""" +249 80 loss """softplus""" +249 80 regularizer """no""" +249 80 optimizer """adam""" +249 80 training_loop """lcwa""" +249 80 evaluator """rankbased""" +249 81 dataset """kinships""" +249 81 model """ermlp""" +249 81 loss """softplus""" +249 81 regularizer """no""" +249 81 optimizer """adam""" +249 81 training_loop """lcwa""" +249 81 evaluator """rankbased""" +249 82 dataset """kinships""" +249 82 model """ermlp""" +249 82 loss """softplus""" +249 82 regularizer """no""" +249 82 optimizer """adam""" +249 82 training_loop """lcwa""" +249 82 evaluator """rankbased""" +249 83 dataset """kinships""" +249 83 model """ermlp""" +249 83 loss """softplus""" +249 83 regularizer """no""" +249 83 optimizer """adam""" +249 83 training_loop """lcwa""" +249 83 evaluator """rankbased""" +249 84 dataset """kinships""" +249 84 model """ermlp""" +249 84 loss """softplus""" +249 84 regularizer """no""" +249 84 optimizer """adam""" +249 84 training_loop """lcwa""" +249 84 evaluator """rankbased""" +249 85 dataset """kinships""" +249 85 model """ermlp""" +249 85 loss """softplus""" +249 85 regularizer """no""" +249 85 optimizer """adam""" +249 85 training_loop """lcwa""" +249 85 evaluator """rankbased""" +249 86 dataset """kinships""" +249 86 model """ermlp""" +249 86 loss """softplus""" +249 86 regularizer """no""" +249 86 optimizer """adam""" +249 86 training_loop """lcwa""" +249 86 evaluator """rankbased""" +249 87 dataset """kinships""" +249 87 model """ermlp""" +249 87 loss """softplus""" +249 87 regularizer """no""" +249 87 optimizer """adam""" +249 87 training_loop """lcwa""" +249 87 evaluator """rankbased""" +249 88 dataset """kinships""" +249 88 model """ermlp""" +249 88 loss """softplus""" +249 88 regularizer """no""" +249 88 optimizer """adam""" +249 88 training_loop """lcwa""" +249 88 evaluator """rankbased""" +249 89 dataset """kinships""" +249 89 model """ermlp""" +249 89 loss """softplus""" +249 89 regularizer """no""" +249 89 optimizer """adam""" +249 89 training_loop """lcwa""" +249 89 evaluator """rankbased""" +249 90 dataset """kinships""" +249 90 model """ermlp""" +249 90 loss """softplus""" +249 90 regularizer """no""" +249 90 optimizer """adam""" +249 90 training_loop """lcwa""" +249 90 evaluator """rankbased""" +249 91 dataset """kinships""" +249 91 model """ermlp""" +249 91 loss """softplus""" +249 91 regularizer """no""" +249 91 optimizer """adam""" +249 91 training_loop """lcwa""" +249 91 evaluator """rankbased""" +249 92 dataset """kinships""" +249 92 model """ermlp""" +249 92 loss """softplus""" +249 92 regularizer """no""" +249 92 optimizer """adam""" +249 92 training_loop """lcwa""" +249 92 evaluator """rankbased""" +249 93 dataset """kinships""" +249 93 model """ermlp""" +249 93 loss """softplus""" +249 93 regularizer """no""" +249 93 optimizer """adam""" +249 93 training_loop """lcwa""" +249 93 evaluator """rankbased""" +249 94 dataset """kinships""" +249 94 model """ermlp""" +249 94 loss """softplus""" +249 94 regularizer """no""" +249 94 optimizer """adam""" +249 94 training_loop """lcwa""" +249 94 evaluator """rankbased""" +249 95 dataset """kinships""" +249 95 model """ermlp""" +249 95 loss """softplus""" +249 95 regularizer """no""" +249 95 optimizer """adam""" +249 95 training_loop """lcwa""" +249 95 evaluator """rankbased""" +249 96 dataset """kinships""" +249 96 model """ermlp""" +249 96 loss """softplus""" +249 96 regularizer """no""" +249 96 optimizer """adam""" +249 96 training_loop """lcwa""" +249 96 evaluator """rankbased""" +249 97 dataset """kinships""" +249 97 model """ermlp""" +249 97 loss """softplus""" +249 97 regularizer """no""" +249 97 optimizer """adam""" +249 97 training_loop """lcwa""" +249 97 evaluator """rankbased""" +249 98 dataset """kinships""" +249 98 model """ermlp""" +249 98 loss """softplus""" +249 98 regularizer """no""" +249 98 optimizer """adam""" +249 98 training_loop """lcwa""" +249 98 evaluator """rankbased""" +249 99 dataset """kinships""" +249 99 model """ermlp""" +249 99 loss """softplus""" +249 99 regularizer """no""" +249 99 optimizer """adam""" +249 99 training_loop """lcwa""" +249 99 evaluator """rankbased""" +249 100 dataset """kinships""" +249 100 model """ermlp""" +249 100 loss """softplus""" +249 100 regularizer """no""" +249 100 optimizer """adam""" +249 100 training_loop """lcwa""" +249 100 evaluator """rankbased""" +250 1 model.embedding_dim 0.0 +250 1 optimizer.lr 0.0011038570028213683 +250 1 training.batch_size 1.0 +250 1 training.label_smoothing 0.21019962597334144 +250 2 model.embedding_dim 2.0 +250 2 optimizer.lr 0.030302357621081452 +250 2 training.batch_size 2.0 +250 2 training.label_smoothing 0.21336248474585703 +250 3 model.embedding_dim 2.0 +250 3 optimizer.lr 0.02061960657115603 +250 3 training.batch_size 1.0 +250 3 training.label_smoothing 0.002025151154211021 +250 4 model.embedding_dim 1.0 +250 4 optimizer.lr 0.0038641629576435643 +250 4 training.batch_size 0.0 +250 4 training.label_smoothing 0.21665769683524624 +250 5 model.embedding_dim 2.0 +250 5 optimizer.lr 0.0013243755120762833 +250 5 training.batch_size 1.0 +250 5 training.label_smoothing 0.009414684911765503 +250 6 model.embedding_dim 2.0 +250 6 optimizer.lr 0.004475475001539658 +250 6 training.batch_size 0.0 +250 6 training.label_smoothing 0.0645199853258406 +250 7 model.embedding_dim 1.0 +250 7 optimizer.lr 0.07064552548131425 +250 7 training.batch_size 0.0 +250 7 training.label_smoothing 0.020970342593708655 +250 8 model.embedding_dim 1.0 +250 8 optimizer.lr 0.050518209668232804 +250 8 training.batch_size 0.0 +250 8 training.label_smoothing 0.007178014799142797 +250 9 model.embedding_dim 2.0 +250 9 optimizer.lr 0.00442680620730391 +250 9 training.batch_size 2.0 +250 9 training.label_smoothing 0.10132336688902532 +250 10 model.embedding_dim 2.0 +250 10 optimizer.lr 0.0023168675437757586 +250 10 training.batch_size 0.0 +250 10 training.label_smoothing 0.00219902765006063 +250 11 model.embedding_dim 0.0 +250 11 optimizer.lr 0.01209325123015984 +250 11 training.batch_size 1.0 +250 11 training.label_smoothing 0.03864033259628384 +250 12 model.embedding_dim 1.0 +250 12 optimizer.lr 0.018950450281548636 +250 12 training.batch_size 2.0 +250 12 training.label_smoothing 0.3177926002719972 +250 13 model.embedding_dim 0.0 +250 13 optimizer.lr 0.01183693851171805 +250 13 training.batch_size 0.0 +250 13 training.label_smoothing 0.5794518906622934 +250 14 model.embedding_dim 1.0 +250 14 optimizer.lr 0.001473656458259126 +250 14 training.batch_size 0.0 +250 14 training.label_smoothing 0.0033252526401684832 +250 15 model.embedding_dim 1.0 +250 15 optimizer.lr 0.002850094256373029 +250 15 training.batch_size 0.0 +250 15 training.label_smoothing 0.016114812258270845 +250 16 model.embedding_dim 2.0 +250 16 optimizer.lr 0.014163641856419372 +250 16 training.batch_size 2.0 +250 16 training.label_smoothing 0.005081599110843455 +250 17 model.embedding_dim 2.0 +250 17 optimizer.lr 0.004890718339913599 +250 17 training.batch_size 1.0 +250 17 training.label_smoothing 0.04615884253323123 +250 18 model.embedding_dim 0.0 +250 18 optimizer.lr 0.017235381398441357 +250 18 training.batch_size 1.0 +250 18 training.label_smoothing 0.009711142832921587 +250 19 model.embedding_dim 0.0 +250 19 optimizer.lr 0.001756538430776923 +250 19 training.batch_size 2.0 +250 19 training.label_smoothing 0.0030397447734587376 +250 20 model.embedding_dim 1.0 +250 20 optimizer.lr 0.0012502997272795218 +250 20 training.batch_size 1.0 +250 20 training.label_smoothing 0.045211748727171285 +250 21 model.embedding_dim 2.0 +250 21 optimizer.lr 0.0136369284285428 +250 21 training.batch_size 1.0 +250 21 training.label_smoothing 0.07203720473970257 +250 22 model.embedding_dim 1.0 +250 22 optimizer.lr 0.003437710480075143 +250 22 training.batch_size 1.0 +250 22 training.label_smoothing 0.00132311515434492 +250 23 model.embedding_dim 2.0 +250 23 optimizer.lr 0.0011116359333286988 +250 23 training.batch_size 1.0 +250 23 training.label_smoothing 0.003942058494478691 +250 24 model.embedding_dim 2.0 +250 24 optimizer.lr 0.005126127550296935 +250 24 training.batch_size 0.0 +250 24 training.label_smoothing 0.2415338946465326 +250 25 model.embedding_dim 0.0 +250 25 optimizer.lr 0.07545127981517978 +250 25 training.batch_size 0.0 +250 25 training.label_smoothing 0.01608578931269775 +250 26 model.embedding_dim 1.0 +250 26 optimizer.lr 0.00853239426355839 +250 26 training.batch_size 2.0 +250 26 training.label_smoothing 0.001218679318023704 +250 27 model.embedding_dim 1.0 +250 27 optimizer.lr 0.0022080292015612237 +250 27 training.batch_size 1.0 +250 27 training.label_smoothing 0.24553480957589202 +250 28 model.embedding_dim 2.0 +250 28 optimizer.lr 0.00736916001809038 +250 28 training.batch_size 2.0 +250 28 training.label_smoothing 0.341553914777984 +250 29 model.embedding_dim 2.0 +250 29 optimizer.lr 0.0012520590299945028 +250 29 training.batch_size 0.0 +250 29 training.label_smoothing 0.022143794141852382 +250 30 model.embedding_dim 2.0 +250 30 optimizer.lr 0.011233065990443297 +250 30 training.batch_size 1.0 +250 30 training.label_smoothing 0.0012655511127521765 +250 31 model.embedding_dim 2.0 +250 31 optimizer.lr 0.03408229308672391 +250 31 training.batch_size 1.0 +250 31 training.label_smoothing 0.007270358914441284 +250 32 model.embedding_dim 2.0 +250 32 optimizer.lr 0.014633467604034462 +250 32 training.batch_size 0.0 +250 32 training.label_smoothing 0.03482550016242217 +250 33 model.embedding_dim 1.0 +250 33 optimizer.lr 0.0029595675900103173 +250 33 training.batch_size 0.0 +250 33 training.label_smoothing 0.31152563223409774 +250 34 model.embedding_dim 2.0 +250 34 optimizer.lr 0.06540847553707066 +250 34 training.batch_size 1.0 +250 34 training.label_smoothing 0.004550742385684322 +250 35 model.embedding_dim 2.0 +250 35 optimizer.lr 0.007723515560767275 +250 35 training.batch_size 0.0 +250 35 training.label_smoothing 0.0027247428834471493 +250 36 model.embedding_dim 2.0 +250 36 optimizer.lr 0.002967707711509219 +250 36 training.batch_size 0.0 +250 36 training.label_smoothing 0.0488854407206006 +250 37 model.embedding_dim 2.0 +250 37 optimizer.lr 0.0021750359862083304 +250 37 training.batch_size 1.0 +250 37 training.label_smoothing 0.03287893821335854 +250 38 model.embedding_dim 1.0 +250 38 optimizer.lr 0.019490094367217072 +250 38 training.batch_size 1.0 +250 38 training.label_smoothing 0.011836647199251604 +250 39 model.embedding_dim 2.0 +250 39 optimizer.lr 0.002896888058400645 +250 39 training.batch_size 2.0 +250 39 training.label_smoothing 0.08635059143146681 +250 40 model.embedding_dim 0.0 +250 40 optimizer.lr 0.02584477361246196 +250 40 training.batch_size 2.0 +250 40 training.label_smoothing 0.004910923694529598 +250 41 model.embedding_dim 1.0 +250 41 optimizer.lr 0.0034573350750337663 +250 41 training.batch_size 0.0 +250 41 training.label_smoothing 0.013905390181489095 +250 42 model.embedding_dim 0.0 +250 42 optimizer.lr 0.0018806780192121967 +250 42 training.batch_size 0.0 +250 42 training.label_smoothing 0.6343449635999021 +250 43 model.embedding_dim 0.0 +250 43 optimizer.lr 0.0381937482830854 +250 43 training.batch_size 0.0 +250 43 training.label_smoothing 0.007242817708351617 +250 44 model.embedding_dim 1.0 +250 44 optimizer.lr 0.0020035960605773286 +250 44 training.batch_size 2.0 +250 44 training.label_smoothing 0.0031122533343520067 +250 45 model.embedding_dim 1.0 +250 45 optimizer.lr 0.0012195233522960824 +250 45 training.batch_size 1.0 +250 45 training.label_smoothing 0.22148287048889365 +250 46 model.embedding_dim 2.0 +250 46 optimizer.lr 0.0013545353290680155 +250 46 training.batch_size 1.0 +250 46 training.label_smoothing 0.8569165483171558 +250 47 model.embedding_dim 2.0 +250 47 optimizer.lr 0.0023019398241666943 +250 47 training.batch_size 1.0 +250 47 training.label_smoothing 0.010607882185658248 +250 48 model.embedding_dim 2.0 +250 48 optimizer.lr 0.014815385874991838 +250 48 training.batch_size 1.0 +250 48 training.label_smoothing 0.04915583622252531 +250 49 model.embedding_dim 1.0 +250 49 optimizer.lr 0.0030812883840068924 +250 49 training.batch_size 2.0 +250 49 training.label_smoothing 0.5560789703161391 +250 50 model.embedding_dim 2.0 +250 50 optimizer.lr 0.011149252723500704 +250 50 training.batch_size 2.0 +250 50 training.label_smoothing 0.010370444228414539 +250 51 model.embedding_dim 2.0 +250 51 optimizer.lr 0.06515980902426541 +250 51 training.batch_size 2.0 +250 51 training.label_smoothing 0.02168703199170856 +250 52 model.embedding_dim 1.0 +250 52 optimizer.lr 0.00140272606643963 +250 52 training.batch_size 1.0 +250 52 training.label_smoothing 0.0011644227776719878 +250 53 model.embedding_dim 1.0 +250 53 optimizer.lr 0.04876471356649238 +250 53 training.batch_size 1.0 +250 53 training.label_smoothing 0.1168251289584039 +250 54 model.embedding_dim 1.0 +250 54 optimizer.lr 0.01698522195637663 +250 54 training.batch_size 0.0 +250 54 training.label_smoothing 0.0038999766935118776 +250 55 model.embedding_dim 1.0 +250 55 optimizer.lr 0.0032914821344473366 +250 55 training.batch_size 0.0 +250 55 training.label_smoothing 0.6035594369532222 +250 56 model.embedding_dim 2.0 +250 56 optimizer.lr 0.010018228384554628 +250 56 training.batch_size 1.0 +250 56 training.label_smoothing 0.0060177363789756634 +250 57 model.embedding_dim 1.0 +250 57 optimizer.lr 0.08291036622542536 +250 57 training.batch_size 0.0 +250 57 training.label_smoothing 0.034221595244879895 +250 58 model.embedding_dim 1.0 +250 58 optimizer.lr 0.011532554885209453 +250 58 training.batch_size 0.0 +250 58 training.label_smoothing 0.11684165016286195 +250 59 model.embedding_dim 2.0 +250 59 optimizer.lr 0.001956777106583406 +250 59 training.batch_size 0.0 +250 59 training.label_smoothing 0.02939226562759239 +250 60 model.embedding_dim 0.0 +250 60 optimizer.lr 0.018678939809033696 +250 60 training.batch_size 0.0 +250 60 training.label_smoothing 0.005969935260042436 +250 61 model.embedding_dim 1.0 +250 61 optimizer.lr 0.0010476235273913879 +250 61 training.batch_size 1.0 +250 61 training.label_smoothing 0.010042870052322649 +250 62 model.embedding_dim 1.0 +250 62 optimizer.lr 0.0048629517026260935 +250 62 training.batch_size 1.0 +250 62 training.label_smoothing 0.029189809124084844 +250 63 model.embedding_dim 0.0 +250 63 optimizer.lr 0.0017269635934659127 +250 63 training.batch_size 0.0 +250 63 training.label_smoothing 0.012091630656959828 +250 64 model.embedding_dim 2.0 +250 64 optimizer.lr 0.013605396678687919 +250 64 training.batch_size 1.0 +250 64 training.label_smoothing 0.011703264657652899 +250 65 model.embedding_dim 2.0 +250 65 optimizer.lr 0.06908016207230325 +250 65 training.batch_size 1.0 +250 65 training.label_smoothing 0.012813769966549772 +250 66 model.embedding_dim 0.0 +250 66 optimizer.lr 0.057389225177688394 +250 66 training.batch_size 1.0 +250 66 training.label_smoothing 0.22789795632034276 +250 67 model.embedding_dim 0.0 +250 67 optimizer.lr 0.02589552324927371 +250 67 training.batch_size 0.0 +250 67 training.label_smoothing 0.0026543185019352058 +250 68 model.embedding_dim 0.0 +250 68 optimizer.lr 0.0032658566446757613 +250 68 training.batch_size 2.0 +250 68 training.label_smoothing 0.03155217846575589 +250 69 model.embedding_dim 2.0 +250 69 optimizer.lr 0.005087795105815058 +250 69 training.batch_size 2.0 +250 69 training.label_smoothing 0.0019007792853096646 +250 70 model.embedding_dim 1.0 +250 70 optimizer.lr 0.027631345251279964 +250 70 training.batch_size 2.0 +250 70 training.label_smoothing 0.6200525919578078 +250 71 model.embedding_dim 2.0 +250 71 optimizer.lr 0.02125282249755313 +250 71 training.batch_size 1.0 +250 71 training.label_smoothing 0.025284535193986712 +250 72 model.embedding_dim 2.0 +250 72 optimizer.lr 0.056664757572044026 +250 72 training.batch_size 0.0 +250 72 training.label_smoothing 0.27834739485029114 +250 73 model.embedding_dim 1.0 +250 73 optimizer.lr 0.013745419701915892 +250 73 training.batch_size 1.0 +250 73 training.label_smoothing 0.027460125985223592 +250 74 model.embedding_dim 1.0 +250 74 optimizer.lr 0.03426405149509384 +250 74 training.batch_size 2.0 +250 74 training.label_smoothing 0.10331914111946544 +250 75 model.embedding_dim 1.0 +250 75 optimizer.lr 0.03374647357689557 +250 75 training.batch_size 2.0 +250 75 training.label_smoothing 0.09418677160366003 +250 76 model.embedding_dim 2.0 +250 76 optimizer.lr 0.006340064931317225 +250 76 training.batch_size 0.0 +250 76 training.label_smoothing 0.12474192305030475 +250 77 model.embedding_dim 0.0 +250 77 optimizer.lr 0.0017192471969526633 +250 77 training.batch_size 0.0 +250 77 training.label_smoothing 0.006348344631804681 +250 78 model.embedding_dim 2.0 +250 78 optimizer.lr 0.019929894106624903 +250 78 training.batch_size 0.0 +250 78 training.label_smoothing 0.028654744655390592 +250 79 model.embedding_dim 2.0 +250 79 optimizer.lr 0.02519642680180454 +250 79 training.batch_size 2.0 +250 79 training.label_smoothing 0.2145163071043326 +250 80 model.embedding_dim 0.0 +250 80 optimizer.lr 0.0010221711506727728 +250 80 training.batch_size 0.0 +250 80 training.label_smoothing 0.27398849604190945 +250 81 model.embedding_dim 0.0 +250 81 optimizer.lr 0.001567722167945888 +250 81 training.batch_size 1.0 +250 81 training.label_smoothing 0.1461442352572102 +250 82 model.embedding_dim 1.0 +250 82 optimizer.lr 0.008911959359339633 +250 82 training.batch_size 1.0 +250 82 training.label_smoothing 0.02312544225761828 +250 83 model.embedding_dim 2.0 +250 83 optimizer.lr 0.004229845800791934 +250 83 training.batch_size 1.0 +250 83 training.label_smoothing 0.015326254285947529 +250 84 model.embedding_dim 2.0 +250 84 optimizer.lr 0.008141740816775728 +250 84 training.batch_size 2.0 +250 84 training.label_smoothing 0.06931794776224373 +250 85 model.embedding_dim 2.0 +250 85 optimizer.lr 0.010805977837156063 +250 85 training.batch_size 1.0 +250 85 training.label_smoothing 0.013896948658902925 +250 86 model.embedding_dim 0.0 +250 86 optimizer.lr 0.018665838630608736 +250 86 training.batch_size 1.0 +250 86 training.label_smoothing 0.2515087814275349 +250 87 model.embedding_dim 0.0 +250 87 optimizer.lr 0.011536599656956832 +250 87 training.batch_size 0.0 +250 87 training.label_smoothing 0.013570927358450677 +250 88 model.embedding_dim 0.0 +250 88 optimizer.lr 0.09895826027478695 +250 88 training.batch_size 2.0 +250 88 training.label_smoothing 0.02040883290952522 +250 89 model.embedding_dim 2.0 +250 89 optimizer.lr 0.018582219595671266 +250 89 training.batch_size 0.0 +250 89 training.label_smoothing 0.051029510860915714 +250 90 model.embedding_dim 2.0 +250 90 optimizer.lr 0.04806469924390702 +250 90 training.batch_size 0.0 +250 90 training.label_smoothing 0.0010366784966448432 +250 91 model.embedding_dim 2.0 +250 91 optimizer.lr 0.002067397739032821 +250 91 training.batch_size 1.0 +250 91 training.label_smoothing 0.01998373421086594 +250 92 model.embedding_dim 2.0 +250 92 optimizer.lr 0.002825719590338312 +250 92 training.batch_size 0.0 +250 92 training.label_smoothing 0.001046264702257221 +250 93 model.embedding_dim 2.0 +250 93 optimizer.lr 0.003456730863740891 +250 93 training.batch_size 0.0 +250 93 training.label_smoothing 0.05778623681089483 +250 94 model.embedding_dim 1.0 +250 94 optimizer.lr 0.005684049265657455 +250 94 training.batch_size 2.0 +250 94 training.label_smoothing 0.1417733717561446 +250 95 model.embedding_dim 1.0 +250 95 optimizer.lr 0.004066188816274504 +250 95 training.batch_size 0.0 +250 95 training.label_smoothing 0.009379153054824733 +250 96 model.embedding_dim 0.0 +250 96 optimizer.lr 0.08835990133129452 +250 96 training.batch_size 2.0 +250 96 training.label_smoothing 0.03657605640747413 +250 97 model.embedding_dim 1.0 +250 97 optimizer.lr 0.014778710801975703 +250 97 training.batch_size 2.0 +250 97 training.label_smoothing 0.4378263404653404 +250 98 model.embedding_dim 1.0 +250 98 optimizer.lr 0.002019972637598254 +250 98 training.batch_size 1.0 +250 98 training.label_smoothing 0.18098121050508825 +250 99 model.embedding_dim 0.0 +250 99 optimizer.lr 0.07129483054954137 +250 99 training.batch_size 0.0 +250 99 training.label_smoothing 0.009966909392329268 +250 100 model.embedding_dim 0.0 +250 100 optimizer.lr 0.09361478844518885 +250 100 training.batch_size 0.0 +250 100 training.label_smoothing 0.0020334191226262835 +250 1 dataset """kinships""" +250 1 model """ermlp""" +250 1 loss """crossentropy""" +250 1 regularizer """no""" +250 1 optimizer """adam""" +250 1 training_loop """lcwa""" +250 1 evaluator """rankbased""" +250 2 dataset """kinships""" +250 2 model """ermlp""" +250 2 loss """crossentropy""" +250 2 regularizer """no""" +250 2 optimizer """adam""" +250 2 training_loop """lcwa""" +250 2 evaluator """rankbased""" +250 3 dataset """kinships""" +250 3 model """ermlp""" +250 3 loss """crossentropy""" +250 3 regularizer """no""" +250 3 optimizer """adam""" +250 3 training_loop """lcwa""" +250 3 evaluator """rankbased""" +250 4 dataset """kinships""" +250 4 model """ermlp""" +250 4 loss """crossentropy""" +250 4 regularizer """no""" +250 4 optimizer """adam""" +250 4 training_loop """lcwa""" +250 4 evaluator """rankbased""" +250 5 dataset """kinships""" +250 5 model """ermlp""" +250 5 loss """crossentropy""" +250 5 regularizer """no""" +250 5 optimizer """adam""" +250 5 training_loop """lcwa""" +250 5 evaluator """rankbased""" +250 6 dataset """kinships""" +250 6 model """ermlp""" +250 6 loss """crossentropy""" +250 6 regularizer """no""" +250 6 optimizer """adam""" +250 6 training_loop """lcwa""" +250 6 evaluator """rankbased""" +250 7 dataset """kinships""" +250 7 model """ermlp""" +250 7 loss """crossentropy""" +250 7 regularizer """no""" +250 7 optimizer """adam""" +250 7 training_loop """lcwa""" +250 7 evaluator """rankbased""" +250 8 dataset """kinships""" +250 8 model """ermlp""" +250 8 loss """crossentropy""" +250 8 regularizer """no""" +250 8 optimizer """adam""" +250 8 training_loop """lcwa""" +250 8 evaluator """rankbased""" +250 9 dataset """kinships""" +250 9 model """ermlp""" +250 9 loss """crossentropy""" +250 9 regularizer """no""" +250 9 optimizer """adam""" +250 9 training_loop """lcwa""" +250 9 evaluator """rankbased""" +250 10 dataset """kinships""" +250 10 model """ermlp""" +250 10 loss """crossentropy""" +250 10 regularizer """no""" +250 10 optimizer """adam""" +250 10 training_loop """lcwa""" +250 10 evaluator """rankbased""" +250 11 dataset """kinships""" +250 11 model """ermlp""" +250 11 loss """crossentropy""" +250 11 regularizer """no""" +250 11 optimizer """adam""" +250 11 training_loop """lcwa""" +250 11 evaluator """rankbased""" +250 12 dataset """kinships""" +250 12 model """ermlp""" +250 12 loss """crossentropy""" +250 12 regularizer """no""" +250 12 optimizer """adam""" +250 12 training_loop """lcwa""" +250 12 evaluator """rankbased""" +250 13 dataset """kinships""" +250 13 model """ermlp""" +250 13 loss """crossentropy""" +250 13 regularizer """no""" +250 13 optimizer """adam""" +250 13 training_loop """lcwa""" +250 13 evaluator """rankbased""" +250 14 dataset """kinships""" +250 14 model """ermlp""" +250 14 loss """crossentropy""" +250 14 regularizer """no""" +250 14 optimizer """adam""" +250 14 training_loop """lcwa""" +250 14 evaluator """rankbased""" +250 15 dataset """kinships""" +250 15 model """ermlp""" +250 15 loss """crossentropy""" +250 15 regularizer """no""" +250 15 optimizer """adam""" +250 15 training_loop """lcwa""" +250 15 evaluator """rankbased""" +250 16 dataset """kinships""" +250 16 model """ermlp""" +250 16 loss """crossentropy""" +250 16 regularizer """no""" +250 16 optimizer """adam""" +250 16 training_loop """lcwa""" +250 16 evaluator """rankbased""" +250 17 dataset """kinships""" +250 17 model """ermlp""" +250 17 loss """crossentropy""" +250 17 regularizer """no""" +250 17 optimizer """adam""" +250 17 training_loop """lcwa""" +250 17 evaluator """rankbased""" +250 18 dataset """kinships""" +250 18 model """ermlp""" +250 18 loss """crossentropy""" +250 18 regularizer """no""" +250 18 optimizer """adam""" +250 18 training_loop """lcwa""" +250 18 evaluator """rankbased""" +250 19 dataset """kinships""" +250 19 model """ermlp""" +250 19 loss """crossentropy""" +250 19 regularizer """no""" +250 19 optimizer """adam""" +250 19 training_loop """lcwa""" +250 19 evaluator """rankbased""" +250 20 dataset """kinships""" +250 20 model """ermlp""" +250 20 loss """crossentropy""" +250 20 regularizer """no""" +250 20 optimizer """adam""" +250 20 training_loop """lcwa""" +250 20 evaluator """rankbased""" +250 21 dataset """kinships""" +250 21 model """ermlp""" +250 21 loss """crossentropy""" +250 21 regularizer """no""" +250 21 optimizer """adam""" +250 21 training_loop """lcwa""" +250 21 evaluator """rankbased""" +250 22 dataset """kinships""" +250 22 model """ermlp""" +250 22 loss """crossentropy""" +250 22 regularizer """no""" +250 22 optimizer """adam""" +250 22 training_loop """lcwa""" +250 22 evaluator """rankbased""" +250 23 dataset """kinships""" +250 23 model """ermlp""" +250 23 loss """crossentropy""" +250 23 regularizer """no""" +250 23 optimizer """adam""" +250 23 training_loop """lcwa""" +250 23 evaluator """rankbased""" +250 24 dataset """kinships""" +250 24 model """ermlp""" +250 24 loss """crossentropy""" +250 24 regularizer """no""" +250 24 optimizer """adam""" +250 24 training_loop """lcwa""" +250 24 evaluator """rankbased""" +250 25 dataset """kinships""" +250 25 model """ermlp""" +250 25 loss """crossentropy""" +250 25 regularizer """no""" +250 25 optimizer """adam""" +250 25 training_loop """lcwa""" +250 25 evaluator """rankbased""" +250 26 dataset """kinships""" +250 26 model """ermlp""" +250 26 loss """crossentropy""" +250 26 regularizer """no""" +250 26 optimizer """adam""" +250 26 training_loop """lcwa""" +250 26 evaluator """rankbased""" +250 27 dataset """kinships""" +250 27 model """ermlp""" +250 27 loss """crossentropy""" +250 27 regularizer """no""" +250 27 optimizer """adam""" +250 27 training_loop """lcwa""" +250 27 evaluator """rankbased""" +250 28 dataset """kinships""" +250 28 model """ermlp""" +250 28 loss """crossentropy""" +250 28 regularizer """no""" +250 28 optimizer """adam""" +250 28 training_loop """lcwa""" +250 28 evaluator """rankbased""" +250 29 dataset """kinships""" +250 29 model """ermlp""" +250 29 loss """crossentropy""" +250 29 regularizer """no""" +250 29 optimizer """adam""" +250 29 training_loop """lcwa""" +250 29 evaluator """rankbased""" +250 30 dataset """kinships""" +250 30 model """ermlp""" +250 30 loss """crossentropy""" +250 30 regularizer """no""" +250 30 optimizer """adam""" +250 30 training_loop """lcwa""" +250 30 evaluator """rankbased""" +250 31 dataset """kinships""" +250 31 model """ermlp""" +250 31 loss """crossentropy""" +250 31 regularizer """no""" +250 31 optimizer """adam""" +250 31 training_loop """lcwa""" +250 31 evaluator """rankbased""" +250 32 dataset """kinships""" +250 32 model """ermlp""" +250 32 loss """crossentropy""" +250 32 regularizer """no""" +250 32 optimizer """adam""" +250 32 training_loop """lcwa""" +250 32 evaluator """rankbased""" +250 33 dataset """kinships""" +250 33 model """ermlp""" +250 33 loss """crossentropy""" +250 33 regularizer """no""" +250 33 optimizer """adam""" +250 33 training_loop """lcwa""" +250 33 evaluator """rankbased""" +250 34 dataset """kinships""" +250 34 model """ermlp""" +250 34 loss """crossentropy""" +250 34 regularizer """no""" +250 34 optimizer """adam""" +250 34 training_loop """lcwa""" +250 34 evaluator """rankbased""" +250 35 dataset """kinships""" +250 35 model """ermlp""" +250 35 loss """crossentropy""" +250 35 regularizer """no""" +250 35 optimizer """adam""" +250 35 training_loop """lcwa""" +250 35 evaluator """rankbased""" +250 36 dataset """kinships""" +250 36 model """ermlp""" +250 36 loss """crossentropy""" +250 36 regularizer """no""" +250 36 optimizer """adam""" +250 36 training_loop """lcwa""" +250 36 evaluator """rankbased""" +250 37 dataset """kinships""" +250 37 model """ermlp""" +250 37 loss """crossentropy""" +250 37 regularizer """no""" +250 37 optimizer """adam""" +250 37 training_loop """lcwa""" +250 37 evaluator """rankbased""" +250 38 dataset """kinships""" +250 38 model """ermlp""" +250 38 loss """crossentropy""" +250 38 regularizer """no""" +250 38 optimizer """adam""" +250 38 training_loop """lcwa""" +250 38 evaluator """rankbased""" +250 39 dataset """kinships""" +250 39 model """ermlp""" +250 39 loss """crossentropy""" +250 39 regularizer """no""" +250 39 optimizer """adam""" +250 39 training_loop """lcwa""" +250 39 evaluator """rankbased""" +250 40 dataset """kinships""" +250 40 model """ermlp""" +250 40 loss """crossentropy""" +250 40 regularizer """no""" +250 40 optimizer """adam""" +250 40 training_loop """lcwa""" +250 40 evaluator """rankbased""" +250 41 dataset """kinships""" +250 41 model """ermlp""" +250 41 loss """crossentropy""" +250 41 regularizer """no""" +250 41 optimizer """adam""" +250 41 training_loop """lcwa""" +250 41 evaluator """rankbased""" +250 42 dataset """kinships""" +250 42 model """ermlp""" +250 42 loss """crossentropy""" +250 42 regularizer """no""" +250 42 optimizer """adam""" +250 42 training_loop """lcwa""" +250 42 evaluator """rankbased""" +250 43 dataset """kinships""" +250 43 model """ermlp""" +250 43 loss """crossentropy""" +250 43 regularizer """no""" +250 43 optimizer """adam""" +250 43 training_loop """lcwa""" +250 43 evaluator """rankbased""" +250 44 dataset """kinships""" +250 44 model """ermlp""" +250 44 loss """crossentropy""" +250 44 regularizer """no""" +250 44 optimizer """adam""" +250 44 training_loop """lcwa""" +250 44 evaluator """rankbased""" +250 45 dataset """kinships""" +250 45 model """ermlp""" +250 45 loss """crossentropy""" +250 45 regularizer """no""" +250 45 optimizer """adam""" +250 45 training_loop """lcwa""" +250 45 evaluator """rankbased""" +250 46 dataset """kinships""" +250 46 model """ermlp""" +250 46 loss """crossentropy""" +250 46 regularizer """no""" +250 46 optimizer """adam""" +250 46 training_loop """lcwa""" +250 46 evaluator """rankbased""" +250 47 dataset """kinships""" +250 47 model """ermlp""" +250 47 loss """crossentropy""" +250 47 regularizer """no""" +250 47 optimizer """adam""" +250 47 training_loop """lcwa""" +250 47 evaluator """rankbased""" +250 48 dataset """kinships""" +250 48 model """ermlp""" +250 48 loss """crossentropy""" +250 48 regularizer """no""" +250 48 optimizer """adam""" +250 48 training_loop """lcwa""" +250 48 evaluator """rankbased""" +250 49 dataset """kinships""" +250 49 model """ermlp""" +250 49 loss """crossentropy""" +250 49 regularizer """no""" +250 49 optimizer """adam""" +250 49 training_loop """lcwa""" +250 49 evaluator """rankbased""" +250 50 dataset """kinships""" +250 50 model """ermlp""" +250 50 loss """crossentropy""" +250 50 regularizer """no""" +250 50 optimizer """adam""" +250 50 training_loop """lcwa""" +250 50 evaluator """rankbased""" +250 51 dataset """kinships""" +250 51 model """ermlp""" +250 51 loss """crossentropy""" +250 51 regularizer """no""" +250 51 optimizer """adam""" +250 51 training_loop """lcwa""" +250 51 evaluator """rankbased""" +250 52 dataset """kinships""" +250 52 model """ermlp""" +250 52 loss """crossentropy""" +250 52 regularizer """no""" +250 52 optimizer """adam""" +250 52 training_loop """lcwa""" +250 52 evaluator """rankbased""" +250 53 dataset """kinships""" +250 53 model """ermlp""" +250 53 loss """crossentropy""" +250 53 regularizer """no""" +250 53 optimizer """adam""" +250 53 training_loop """lcwa""" +250 53 evaluator """rankbased""" +250 54 dataset """kinships""" +250 54 model """ermlp""" +250 54 loss """crossentropy""" +250 54 regularizer """no""" +250 54 optimizer """adam""" +250 54 training_loop """lcwa""" +250 54 evaluator """rankbased""" +250 55 dataset """kinships""" +250 55 model """ermlp""" +250 55 loss """crossentropy""" +250 55 regularizer """no""" +250 55 optimizer """adam""" +250 55 training_loop """lcwa""" +250 55 evaluator """rankbased""" +250 56 dataset """kinships""" +250 56 model """ermlp""" +250 56 loss """crossentropy""" +250 56 regularizer """no""" +250 56 optimizer """adam""" +250 56 training_loop """lcwa""" +250 56 evaluator """rankbased""" +250 57 dataset """kinships""" +250 57 model """ermlp""" +250 57 loss """crossentropy""" +250 57 regularizer """no""" +250 57 optimizer """adam""" +250 57 training_loop """lcwa""" +250 57 evaluator """rankbased""" +250 58 dataset """kinships""" +250 58 model """ermlp""" +250 58 loss """crossentropy""" +250 58 regularizer """no""" +250 58 optimizer """adam""" +250 58 training_loop """lcwa""" +250 58 evaluator """rankbased""" +250 59 dataset """kinships""" +250 59 model """ermlp""" +250 59 loss """crossentropy""" +250 59 regularizer """no""" +250 59 optimizer """adam""" +250 59 training_loop """lcwa""" +250 59 evaluator """rankbased""" +250 60 dataset """kinships""" +250 60 model """ermlp""" +250 60 loss """crossentropy""" +250 60 regularizer """no""" +250 60 optimizer """adam""" +250 60 training_loop """lcwa""" +250 60 evaluator """rankbased""" +250 61 dataset """kinships""" +250 61 model """ermlp""" +250 61 loss """crossentropy""" +250 61 regularizer """no""" +250 61 optimizer """adam""" +250 61 training_loop """lcwa""" +250 61 evaluator """rankbased""" +250 62 dataset """kinships""" +250 62 model """ermlp""" +250 62 loss """crossentropy""" +250 62 regularizer """no""" +250 62 optimizer """adam""" +250 62 training_loop """lcwa""" +250 62 evaluator """rankbased""" +250 63 dataset """kinships""" +250 63 model """ermlp""" +250 63 loss """crossentropy""" +250 63 regularizer """no""" +250 63 optimizer """adam""" +250 63 training_loop """lcwa""" +250 63 evaluator """rankbased""" +250 64 dataset """kinships""" +250 64 model """ermlp""" +250 64 loss """crossentropy""" +250 64 regularizer """no""" +250 64 optimizer """adam""" +250 64 training_loop """lcwa""" +250 64 evaluator """rankbased""" +250 65 dataset """kinships""" +250 65 model """ermlp""" +250 65 loss """crossentropy""" +250 65 regularizer """no""" +250 65 optimizer """adam""" +250 65 training_loop """lcwa""" +250 65 evaluator """rankbased""" +250 66 dataset """kinships""" +250 66 model """ermlp""" +250 66 loss """crossentropy""" +250 66 regularizer """no""" +250 66 optimizer """adam""" +250 66 training_loop """lcwa""" +250 66 evaluator """rankbased""" +250 67 dataset """kinships""" +250 67 model """ermlp""" +250 67 loss """crossentropy""" +250 67 regularizer """no""" +250 67 optimizer """adam""" +250 67 training_loop """lcwa""" +250 67 evaluator """rankbased""" +250 68 dataset """kinships""" +250 68 model """ermlp""" +250 68 loss """crossentropy""" +250 68 regularizer """no""" +250 68 optimizer """adam""" +250 68 training_loop """lcwa""" +250 68 evaluator """rankbased""" +250 69 dataset """kinships""" +250 69 model """ermlp""" +250 69 loss """crossentropy""" +250 69 regularizer """no""" +250 69 optimizer """adam""" +250 69 training_loop """lcwa""" +250 69 evaluator """rankbased""" +250 70 dataset """kinships""" +250 70 model """ermlp""" +250 70 loss """crossentropy""" +250 70 regularizer """no""" +250 70 optimizer """adam""" +250 70 training_loop """lcwa""" +250 70 evaluator """rankbased""" +250 71 dataset """kinships""" +250 71 model """ermlp""" +250 71 loss """crossentropy""" +250 71 regularizer """no""" +250 71 optimizer """adam""" +250 71 training_loop """lcwa""" +250 71 evaluator """rankbased""" +250 72 dataset """kinships""" +250 72 model """ermlp""" +250 72 loss """crossentropy""" +250 72 regularizer """no""" +250 72 optimizer """adam""" +250 72 training_loop """lcwa""" +250 72 evaluator """rankbased""" +250 73 dataset """kinships""" +250 73 model """ermlp""" +250 73 loss """crossentropy""" +250 73 regularizer """no""" +250 73 optimizer """adam""" +250 73 training_loop """lcwa""" +250 73 evaluator """rankbased""" +250 74 dataset """kinships""" +250 74 model """ermlp""" +250 74 loss """crossentropy""" +250 74 regularizer """no""" +250 74 optimizer """adam""" +250 74 training_loop """lcwa""" +250 74 evaluator """rankbased""" +250 75 dataset """kinships""" +250 75 model """ermlp""" +250 75 loss """crossentropy""" +250 75 regularizer """no""" +250 75 optimizer """adam""" +250 75 training_loop """lcwa""" +250 75 evaluator """rankbased""" +250 76 dataset """kinships""" +250 76 model """ermlp""" +250 76 loss """crossentropy""" +250 76 regularizer """no""" +250 76 optimizer """adam""" +250 76 training_loop """lcwa""" +250 76 evaluator """rankbased""" +250 77 dataset """kinships""" +250 77 model """ermlp""" +250 77 loss """crossentropy""" +250 77 regularizer """no""" +250 77 optimizer """adam""" +250 77 training_loop """lcwa""" +250 77 evaluator """rankbased""" +250 78 dataset """kinships""" +250 78 model """ermlp""" +250 78 loss """crossentropy""" +250 78 regularizer """no""" +250 78 optimizer """adam""" +250 78 training_loop """lcwa""" +250 78 evaluator """rankbased""" +250 79 dataset """kinships""" +250 79 model """ermlp""" +250 79 loss """crossentropy""" +250 79 regularizer """no""" +250 79 optimizer """adam""" +250 79 training_loop """lcwa""" +250 79 evaluator """rankbased""" +250 80 dataset """kinships""" +250 80 model """ermlp""" +250 80 loss """crossentropy""" +250 80 regularizer """no""" +250 80 optimizer """adam""" +250 80 training_loop """lcwa""" +250 80 evaluator """rankbased""" +250 81 dataset """kinships""" +250 81 model """ermlp""" +250 81 loss """crossentropy""" +250 81 regularizer """no""" +250 81 optimizer """adam""" +250 81 training_loop """lcwa""" +250 81 evaluator """rankbased""" +250 82 dataset """kinships""" +250 82 model """ermlp""" +250 82 loss """crossentropy""" +250 82 regularizer """no""" +250 82 optimizer """adam""" +250 82 training_loop """lcwa""" +250 82 evaluator """rankbased""" +250 83 dataset """kinships""" +250 83 model """ermlp""" +250 83 loss """crossentropy""" +250 83 regularizer """no""" +250 83 optimizer """adam""" +250 83 training_loop """lcwa""" +250 83 evaluator """rankbased""" +250 84 dataset """kinships""" +250 84 model """ermlp""" +250 84 loss """crossentropy""" +250 84 regularizer """no""" +250 84 optimizer """adam""" +250 84 training_loop """lcwa""" +250 84 evaluator """rankbased""" +250 85 dataset """kinships""" +250 85 model """ermlp""" +250 85 loss """crossentropy""" +250 85 regularizer """no""" +250 85 optimizer """adam""" +250 85 training_loop """lcwa""" +250 85 evaluator """rankbased""" +250 86 dataset """kinships""" +250 86 model """ermlp""" +250 86 loss """crossentropy""" +250 86 regularizer """no""" +250 86 optimizer """adam""" +250 86 training_loop """lcwa""" +250 86 evaluator """rankbased""" +250 87 dataset """kinships""" +250 87 model """ermlp""" +250 87 loss """crossentropy""" +250 87 regularizer """no""" +250 87 optimizer """adam""" +250 87 training_loop """lcwa""" +250 87 evaluator """rankbased""" +250 88 dataset """kinships""" +250 88 model """ermlp""" +250 88 loss """crossentropy""" +250 88 regularizer """no""" +250 88 optimizer """adam""" +250 88 training_loop """lcwa""" +250 88 evaluator """rankbased""" +250 89 dataset """kinships""" +250 89 model """ermlp""" +250 89 loss """crossentropy""" +250 89 regularizer """no""" +250 89 optimizer """adam""" +250 89 training_loop """lcwa""" +250 89 evaluator """rankbased""" +250 90 dataset """kinships""" +250 90 model """ermlp""" +250 90 loss """crossentropy""" +250 90 regularizer """no""" +250 90 optimizer """adam""" +250 90 training_loop """lcwa""" +250 90 evaluator """rankbased""" +250 91 dataset """kinships""" +250 91 model """ermlp""" +250 91 loss """crossentropy""" +250 91 regularizer """no""" +250 91 optimizer """adam""" +250 91 training_loop """lcwa""" +250 91 evaluator """rankbased""" +250 92 dataset """kinships""" +250 92 model """ermlp""" +250 92 loss """crossentropy""" +250 92 regularizer """no""" +250 92 optimizer """adam""" +250 92 training_loop """lcwa""" +250 92 evaluator """rankbased""" +250 93 dataset """kinships""" +250 93 model """ermlp""" +250 93 loss """crossentropy""" +250 93 regularizer """no""" +250 93 optimizer """adam""" +250 93 training_loop """lcwa""" +250 93 evaluator """rankbased""" +250 94 dataset """kinships""" +250 94 model """ermlp""" +250 94 loss """crossentropy""" +250 94 regularizer """no""" +250 94 optimizer """adam""" +250 94 training_loop """lcwa""" +250 94 evaluator """rankbased""" +250 95 dataset """kinships""" +250 95 model """ermlp""" +250 95 loss """crossentropy""" +250 95 regularizer """no""" +250 95 optimizer """adam""" +250 95 training_loop """lcwa""" +250 95 evaluator """rankbased""" +250 96 dataset """kinships""" +250 96 model """ermlp""" +250 96 loss """crossentropy""" +250 96 regularizer """no""" +250 96 optimizer """adam""" +250 96 training_loop """lcwa""" +250 96 evaluator """rankbased""" +250 97 dataset """kinships""" +250 97 model """ermlp""" +250 97 loss """crossentropy""" +250 97 regularizer """no""" +250 97 optimizer """adam""" +250 97 training_loop """lcwa""" +250 97 evaluator """rankbased""" +250 98 dataset """kinships""" +250 98 model """ermlp""" +250 98 loss """crossentropy""" +250 98 regularizer """no""" +250 98 optimizer """adam""" +250 98 training_loop """lcwa""" +250 98 evaluator """rankbased""" +250 99 dataset """kinships""" +250 99 model """ermlp""" +250 99 loss """crossentropy""" +250 99 regularizer """no""" +250 99 optimizer """adam""" +250 99 training_loop """lcwa""" +250 99 evaluator """rankbased""" +250 100 dataset """kinships""" +250 100 model """ermlp""" +250 100 loss """crossentropy""" +250 100 regularizer """no""" +250 100 optimizer """adam""" +250 100 training_loop """lcwa""" +250 100 evaluator """rankbased""" +251 1 model.embedding_dim 2.0 +251 1 optimizer.lr 0.0029633678464359814 +251 1 training.batch_size 0.0 +251 1 training.label_smoothing 0.6113738664984594 +251 2 model.embedding_dim 2.0 +251 2 optimizer.lr 0.0024405686679858374 +251 2 training.batch_size 0.0 +251 2 training.label_smoothing 0.007426426922560511 +251 3 model.embedding_dim 2.0 +251 3 optimizer.lr 0.0017518662295776275 +251 3 training.batch_size 1.0 +251 3 training.label_smoothing 0.0010199164596094844 +251 4 model.embedding_dim 2.0 +251 4 optimizer.lr 0.07932486537302265 +251 4 training.batch_size 0.0 +251 4 training.label_smoothing 0.05961668747375181 +251 5 model.embedding_dim 2.0 +251 5 optimizer.lr 0.004683869662093894 +251 5 training.batch_size 0.0 +251 5 training.label_smoothing 0.015201295298666042 +251 6 model.embedding_dim 0.0 +251 6 optimizer.lr 0.013801165067038275 +251 6 training.batch_size 0.0 +251 6 training.label_smoothing 0.0012312670852265004 +251 7 model.embedding_dim 2.0 +251 7 optimizer.lr 0.035278346340220944 +251 7 training.batch_size 0.0 +251 7 training.label_smoothing 0.004477572130952695 +251 8 model.embedding_dim 1.0 +251 8 optimizer.lr 0.020696009094015082 +251 8 training.batch_size 0.0 +251 8 training.label_smoothing 0.6811511888964376 +251 9 model.embedding_dim 2.0 +251 9 optimizer.lr 0.0032232684771111263 +251 9 training.batch_size 2.0 +251 9 training.label_smoothing 0.0392803906562793 +251 10 model.embedding_dim 2.0 +251 10 optimizer.lr 0.03366396637303282 +251 10 training.batch_size 0.0 +251 10 training.label_smoothing 0.5937239414018962 +251 11 model.embedding_dim 2.0 +251 11 optimizer.lr 0.0169462588723758 +251 11 training.batch_size 0.0 +251 11 training.label_smoothing 0.02689057841504115 +251 12 model.embedding_dim 0.0 +251 12 optimizer.lr 0.0017721176943035902 +251 12 training.batch_size 0.0 +251 12 training.label_smoothing 0.09133382647870775 +251 13 model.embedding_dim 0.0 +251 13 optimizer.lr 0.005950421406778224 +251 13 training.batch_size 0.0 +251 13 training.label_smoothing 0.004298298443692591 +251 14 model.embedding_dim 1.0 +251 14 optimizer.lr 0.06723371298456371 +251 14 training.batch_size 1.0 +251 14 training.label_smoothing 0.019624028615012806 +251 15 model.embedding_dim 0.0 +251 15 optimizer.lr 0.02927453069360234 +251 15 training.batch_size 2.0 +251 15 training.label_smoothing 0.04309473107894987 +251 16 model.embedding_dim 0.0 +251 16 optimizer.lr 0.05351465051734167 +251 16 training.batch_size 1.0 +251 16 training.label_smoothing 0.006098875001624397 +251 17 model.embedding_dim 0.0 +251 17 optimizer.lr 0.0025858392282739253 +251 17 training.batch_size 1.0 +251 17 training.label_smoothing 0.8060103996953562 +251 18 model.embedding_dim 2.0 +251 18 optimizer.lr 0.004227669585717243 +251 18 training.batch_size 0.0 +251 18 training.label_smoothing 0.0011312253222365396 +251 19 model.embedding_dim 1.0 +251 19 optimizer.lr 0.001730139287660261 +251 19 training.batch_size 1.0 +251 19 training.label_smoothing 0.034509711570108674 +251 20 model.embedding_dim 1.0 +251 20 optimizer.lr 0.08405869544881223 +251 20 training.batch_size 0.0 +251 20 training.label_smoothing 0.13341010340251594 +251 21 model.embedding_dim 1.0 +251 21 optimizer.lr 0.012560733131662415 +251 21 training.batch_size 1.0 +251 21 training.label_smoothing 0.030151051940755676 +251 22 model.embedding_dim 2.0 +251 22 optimizer.lr 0.0028211884416646715 +251 22 training.batch_size 1.0 +251 22 training.label_smoothing 0.1578504761051818 +251 23 model.embedding_dim 1.0 +251 23 optimizer.lr 0.08515604682958822 +251 23 training.batch_size 0.0 +251 23 training.label_smoothing 0.2929037371826151 +251 24 model.embedding_dim 1.0 +251 24 optimizer.lr 0.02070461708481971 +251 24 training.batch_size 1.0 +251 24 training.label_smoothing 0.043398067885177284 +251 25 model.embedding_dim 1.0 +251 25 optimizer.lr 0.020731074002024702 +251 25 training.batch_size 2.0 +251 25 training.label_smoothing 0.0038075270249984857 +251 26 model.embedding_dim 0.0 +251 26 optimizer.lr 0.004133340660186966 +251 26 training.batch_size 1.0 +251 26 training.label_smoothing 0.4029558484367708 +251 27 model.embedding_dim 1.0 +251 27 optimizer.lr 0.019911198396459263 +251 27 training.batch_size 2.0 +251 27 training.label_smoothing 0.006569079867574441 +251 28 model.embedding_dim 1.0 +251 28 optimizer.lr 0.0012319663021722156 +251 28 training.batch_size 0.0 +251 28 training.label_smoothing 0.0050792238495722085 +251 29 model.embedding_dim 0.0 +251 29 optimizer.lr 0.0016152618104181545 +251 29 training.batch_size 0.0 +251 29 training.label_smoothing 0.079177017166919 +251 30 model.embedding_dim 1.0 +251 30 optimizer.lr 0.002497320442553051 +251 30 training.batch_size 2.0 +251 30 training.label_smoothing 0.027769486060620727 +251 31 model.embedding_dim 1.0 +251 31 optimizer.lr 0.01589714120886694 +251 31 training.batch_size 0.0 +251 31 training.label_smoothing 0.735848885481284 +251 32 model.embedding_dim 0.0 +251 32 optimizer.lr 0.0763669647265674 +251 32 training.batch_size 0.0 +251 32 training.label_smoothing 0.616537448492898 +251 33 model.embedding_dim 0.0 +251 33 optimizer.lr 0.004214757471170675 +251 33 training.batch_size 2.0 +251 33 training.label_smoothing 0.019357679561122304 +251 34 model.embedding_dim 0.0 +251 34 optimizer.lr 0.023895184844458146 +251 34 training.batch_size 2.0 +251 34 training.label_smoothing 0.001589280936697898 +251 35 model.embedding_dim 0.0 +251 35 optimizer.lr 0.03908198643595814 +251 35 training.batch_size 2.0 +251 35 training.label_smoothing 0.004633106668896012 +251 36 model.embedding_dim 1.0 +251 36 optimizer.lr 0.0013125326666296598 +251 36 training.batch_size 2.0 +251 36 training.label_smoothing 0.0032147984551879503 +251 37 model.embedding_dim 1.0 +251 37 optimizer.lr 0.00566110083412375 +251 37 training.batch_size 2.0 +251 37 training.label_smoothing 0.6474803323266287 +251 38 model.embedding_dim 2.0 +251 38 optimizer.lr 0.006525376549274599 +251 38 training.batch_size 1.0 +251 38 training.label_smoothing 0.07610559783950635 +251 39 model.embedding_dim 2.0 +251 39 optimizer.lr 0.06785448273303149 +251 39 training.batch_size 1.0 +251 39 training.label_smoothing 0.32975887765319 +251 40 model.embedding_dim 2.0 +251 40 optimizer.lr 0.0010350174276455987 +251 40 training.batch_size 2.0 +251 40 training.label_smoothing 0.03427190547510116 +251 41 model.embedding_dim 2.0 +251 41 optimizer.lr 0.003142491817098697 +251 41 training.batch_size 2.0 +251 41 training.label_smoothing 0.024284446686061188 +251 42 model.embedding_dim 0.0 +251 42 optimizer.lr 0.034247188158404766 +251 42 training.batch_size 0.0 +251 42 training.label_smoothing 0.1442509622557087 +251 43 model.embedding_dim 0.0 +251 43 optimizer.lr 0.015044907626043533 +251 43 training.batch_size 2.0 +251 43 training.label_smoothing 0.009540820788124175 +251 44 model.embedding_dim 0.0 +251 44 optimizer.lr 0.038879046280779796 +251 44 training.batch_size 2.0 +251 44 training.label_smoothing 0.009169391272189364 +251 45 model.embedding_dim 0.0 +251 45 optimizer.lr 0.002439525676837906 +251 45 training.batch_size 0.0 +251 45 training.label_smoothing 0.002045073374997887 +251 46 model.embedding_dim 2.0 +251 46 optimizer.lr 0.008458039095761335 +251 46 training.batch_size 0.0 +251 46 training.label_smoothing 0.0017260530817410139 +251 47 model.embedding_dim 0.0 +251 47 optimizer.lr 0.015668290157741196 +251 47 training.batch_size 2.0 +251 47 training.label_smoothing 0.3520763805906117 +251 48 model.embedding_dim 2.0 +251 48 optimizer.lr 0.08896631959640523 +251 48 training.batch_size 0.0 +251 48 training.label_smoothing 0.04038970978549557 +251 49 model.embedding_dim 1.0 +251 49 optimizer.lr 0.0021296012776795725 +251 49 training.batch_size 2.0 +251 49 training.label_smoothing 0.023104896665081285 +251 50 model.embedding_dim 2.0 +251 50 optimizer.lr 0.005590390804956863 +251 50 training.batch_size 0.0 +251 50 training.label_smoothing 0.043370756759400345 +251 51 model.embedding_dim 2.0 +251 51 optimizer.lr 0.0020811112988743177 +251 51 training.batch_size 0.0 +251 51 training.label_smoothing 0.10082922307280064 +251 52 model.embedding_dim 1.0 +251 52 optimizer.lr 0.0014074807423150194 +251 52 training.batch_size 0.0 +251 52 training.label_smoothing 0.06328312183627861 +251 53 model.embedding_dim 2.0 +251 53 optimizer.lr 0.0016258445437874964 +251 53 training.batch_size 0.0 +251 53 training.label_smoothing 0.004967122007466425 +251 54 model.embedding_dim 2.0 +251 54 optimizer.lr 0.047543226465366704 +251 54 training.batch_size 2.0 +251 54 training.label_smoothing 0.20937093357051384 +251 55 model.embedding_dim 0.0 +251 55 optimizer.lr 0.02496181437552378 +251 55 training.batch_size 1.0 +251 55 training.label_smoothing 0.004813321365486808 +251 56 model.embedding_dim 1.0 +251 56 optimizer.lr 0.031372456798936896 +251 56 training.batch_size 1.0 +251 56 training.label_smoothing 0.004343644222495561 +251 57 model.embedding_dim 1.0 +251 57 optimizer.lr 0.02129068432993416 +251 57 training.batch_size 1.0 +251 57 training.label_smoothing 0.03567904880996224 +251 58 model.embedding_dim 1.0 +251 58 optimizer.lr 0.06792791054497328 +251 58 training.batch_size 1.0 +251 58 training.label_smoothing 0.003298219307682381 +251 59 model.embedding_dim 0.0 +251 59 optimizer.lr 0.08823862983846209 +251 59 training.batch_size 2.0 +251 59 training.label_smoothing 0.027114712794142133 +251 60 model.embedding_dim 1.0 +251 60 optimizer.lr 0.014312424111102923 +251 60 training.batch_size 1.0 +251 60 training.label_smoothing 0.3415096844144513 +251 61 model.embedding_dim 2.0 +251 61 optimizer.lr 0.012179611873998235 +251 61 training.batch_size 2.0 +251 61 training.label_smoothing 0.005028823349997118 +251 62 model.embedding_dim 0.0 +251 62 optimizer.lr 0.004815630644101176 +251 62 training.batch_size 2.0 +251 62 training.label_smoothing 0.0013316315085124104 +251 63 model.embedding_dim 2.0 +251 63 optimizer.lr 0.009492987934925007 +251 63 training.batch_size 2.0 +251 63 training.label_smoothing 0.42072885214162287 +251 64 model.embedding_dim 2.0 +251 64 optimizer.lr 0.008981674396933977 +251 64 training.batch_size 1.0 +251 64 training.label_smoothing 0.00544800516227253 +251 65 model.embedding_dim 1.0 +251 65 optimizer.lr 0.008132661674097779 +251 65 training.batch_size 1.0 +251 65 training.label_smoothing 0.13639246835923965 +251 66 model.embedding_dim 0.0 +251 66 optimizer.lr 0.023900410191639673 +251 66 training.batch_size 1.0 +251 66 training.label_smoothing 0.03715050328201944 +251 67 model.embedding_dim 1.0 +251 67 optimizer.lr 0.003437614448953357 +251 67 training.batch_size 2.0 +251 67 training.label_smoothing 0.27381474009050355 +251 68 model.embedding_dim 2.0 +251 68 optimizer.lr 0.0022529946858381496 +251 68 training.batch_size 1.0 +251 68 training.label_smoothing 0.004091965397093015 +251 69 model.embedding_dim 2.0 +251 69 optimizer.lr 0.0032578932398393333 +251 69 training.batch_size 1.0 +251 69 training.label_smoothing 0.4214011066135175 +251 70 model.embedding_dim 2.0 +251 70 optimizer.lr 0.023830938642482775 +251 70 training.batch_size 0.0 +251 70 training.label_smoothing 0.06200112274249518 +251 71 model.embedding_dim 0.0 +251 71 optimizer.lr 0.001634868483086217 +251 71 training.batch_size 0.0 +251 71 training.label_smoothing 0.011936089691220309 +251 72 model.embedding_dim 2.0 +251 72 optimizer.lr 0.03048617513236653 +251 72 training.batch_size 1.0 +251 72 training.label_smoothing 0.03481120999206557 +251 73 model.embedding_dim 2.0 +251 73 optimizer.lr 0.025295160020698537 +251 73 training.batch_size 1.0 +251 73 training.label_smoothing 0.003525109946157505 +251 74 model.embedding_dim 1.0 +251 74 optimizer.lr 0.004253130058356446 +251 74 training.batch_size 1.0 +251 74 training.label_smoothing 0.007494056117312562 +251 75 model.embedding_dim 1.0 +251 75 optimizer.lr 0.00256702635523877 +251 75 training.batch_size 1.0 +251 75 training.label_smoothing 0.0020325277598979537 +251 76 model.embedding_dim 0.0 +251 76 optimizer.lr 0.0012579828120518814 +251 76 training.batch_size 0.0 +251 76 training.label_smoothing 0.009219892475424898 +251 77 model.embedding_dim 0.0 +251 77 optimizer.lr 0.0035570649873823044 +251 77 training.batch_size 0.0 +251 77 training.label_smoothing 0.0020341036743507803 +251 78 model.embedding_dim 0.0 +251 78 optimizer.lr 0.00996560466485943 +251 78 training.batch_size 0.0 +251 78 training.label_smoothing 0.026637787391296526 +251 79 model.embedding_dim 2.0 +251 79 optimizer.lr 0.005429563415446357 +251 79 training.batch_size 2.0 +251 79 training.label_smoothing 0.002241419119721326 +251 80 model.embedding_dim 1.0 +251 80 optimizer.lr 0.004913334975182297 +251 80 training.batch_size 2.0 +251 80 training.label_smoothing 0.05897411535456027 +251 81 model.embedding_dim 2.0 +251 81 optimizer.lr 0.006256863896751308 +251 81 training.batch_size 1.0 +251 81 training.label_smoothing 0.0017160966458229644 +251 82 model.embedding_dim 0.0 +251 82 optimizer.lr 0.001010208198001298 +251 82 training.batch_size 0.0 +251 82 training.label_smoothing 0.6788837421856414 +251 83 model.embedding_dim 0.0 +251 83 optimizer.lr 0.012909199874741924 +251 83 training.batch_size 0.0 +251 83 training.label_smoothing 0.05308336089892225 +251 84 model.embedding_dim 0.0 +251 84 optimizer.lr 0.0030030548727435656 +251 84 training.batch_size 1.0 +251 84 training.label_smoothing 0.026756044599423306 +251 85 model.embedding_dim 2.0 +251 85 optimizer.lr 0.006453041288638536 +251 85 training.batch_size 0.0 +251 85 training.label_smoothing 0.2681340128793107 +251 86 model.embedding_dim 0.0 +251 86 optimizer.lr 0.09715307052283238 +251 86 training.batch_size 1.0 +251 86 training.label_smoothing 0.00919272679679964 +251 87 model.embedding_dim 2.0 +251 87 optimizer.lr 0.0918748442920608 +251 87 training.batch_size 0.0 +251 87 training.label_smoothing 0.0024243865106942683 +251 88 model.embedding_dim 0.0 +251 88 optimizer.lr 0.0010523054890126478 +251 88 training.batch_size 1.0 +251 88 training.label_smoothing 0.002141259930025974 +251 89 model.embedding_dim 0.0 +251 89 optimizer.lr 0.0032795697487012984 +251 89 training.batch_size 0.0 +251 89 training.label_smoothing 0.46045150792681566 +251 90 model.embedding_dim 0.0 +251 90 optimizer.lr 0.0035759375552709403 +251 90 training.batch_size 0.0 +251 90 training.label_smoothing 0.004736401229871732 +251 91 model.embedding_dim 0.0 +251 91 optimizer.lr 0.001397133281821775 +251 91 training.batch_size 1.0 +251 91 training.label_smoothing 0.006833313659643683 +251 92 model.embedding_dim 1.0 +251 92 optimizer.lr 0.012478477939104587 +251 92 training.batch_size 2.0 +251 92 training.label_smoothing 0.03917403331793694 +251 93 model.embedding_dim 2.0 +251 93 optimizer.lr 0.03652697914695623 +251 93 training.batch_size 0.0 +251 93 training.label_smoothing 0.09194514186922009 +251 94 model.embedding_dim 1.0 +251 94 optimizer.lr 0.0028386461659731816 +251 94 training.batch_size 1.0 +251 94 training.label_smoothing 0.13837503549675387 +251 95 model.embedding_dim 2.0 +251 95 optimizer.lr 0.06791755597319281 +251 95 training.batch_size 2.0 +251 95 training.label_smoothing 0.013410970632657022 +251 96 model.embedding_dim 1.0 +251 96 optimizer.lr 0.00637303199429444 +251 96 training.batch_size 1.0 +251 96 training.label_smoothing 0.007867552577237438 +251 97 model.embedding_dim 0.0 +251 97 optimizer.lr 0.025495424602693637 +251 97 training.batch_size 1.0 +251 97 training.label_smoothing 0.01700003292787551 +251 98 model.embedding_dim 0.0 +251 98 optimizer.lr 0.0020745557633514078 +251 98 training.batch_size 0.0 +251 98 training.label_smoothing 0.04242250399745932 +251 99 model.embedding_dim 0.0 +251 99 optimizer.lr 0.03207123606495096 +251 99 training.batch_size 2.0 +251 99 training.label_smoothing 0.0847313759313914 +251 100 model.embedding_dim 0.0 +251 100 optimizer.lr 0.0020114593122641136 +251 100 training.batch_size 1.0 +251 100 training.label_smoothing 0.002090412982735832 +251 1 dataset """kinships""" +251 1 model """ermlp""" +251 1 loss """crossentropy""" +251 1 regularizer """no""" +251 1 optimizer """adam""" +251 1 training_loop """lcwa""" +251 1 evaluator """rankbased""" +251 2 dataset """kinships""" +251 2 model """ermlp""" +251 2 loss """crossentropy""" +251 2 regularizer """no""" +251 2 optimizer """adam""" +251 2 training_loop """lcwa""" +251 2 evaluator """rankbased""" +251 3 dataset """kinships""" +251 3 model """ermlp""" +251 3 loss """crossentropy""" +251 3 regularizer """no""" +251 3 optimizer """adam""" +251 3 training_loop """lcwa""" +251 3 evaluator """rankbased""" +251 4 dataset """kinships""" +251 4 model """ermlp""" +251 4 loss """crossentropy""" +251 4 regularizer """no""" +251 4 optimizer """adam""" +251 4 training_loop """lcwa""" +251 4 evaluator """rankbased""" +251 5 dataset """kinships""" +251 5 model """ermlp""" +251 5 loss """crossentropy""" +251 5 regularizer """no""" +251 5 optimizer """adam""" +251 5 training_loop """lcwa""" +251 5 evaluator """rankbased""" +251 6 dataset """kinships""" +251 6 model """ermlp""" +251 6 loss """crossentropy""" +251 6 regularizer """no""" +251 6 optimizer """adam""" +251 6 training_loop """lcwa""" +251 6 evaluator """rankbased""" +251 7 dataset """kinships""" +251 7 model """ermlp""" +251 7 loss """crossentropy""" +251 7 regularizer """no""" +251 7 optimizer """adam""" +251 7 training_loop """lcwa""" +251 7 evaluator """rankbased""" +251 8 dataset """kinships""" +251 8 model """ermlp""" +251 8 loss """crossentropy""" +251 8 regularizer """no""" +251 8 optimizer """adam""" +251 8 training_loop """lcwa""" +251 8 evaluator """rankbased""" +251 9 dataset """kinships""" +251 9 model """ermlp""" +251 9 loss """crossentropy""" +251 9 regularizer """no""" +251 9 optimizer """adam""" +251 9 training_loop """lcwa""" +251 9 evaluator """rankbased""" +251 10 dataset """kinships""" +251 10 model """ermlp""" +251 10 loss """crossentropy""" +251 10 regularizer """no""" +251 10 optimizer """adam""" +251 10 training_loop """lcwa""" +251 10 evaluator """rankbased""" +251 11 dataset """kinships""" +251 11 model """ermlp""" +251 11 loss """crossentropy""" +251 11 regularizer """no""" +251 11 optimizer """adam""" +251 11 training_loop """lcwa""" +251 11 evaluator """rankbased""" +251 12 dataset """kinships""" +251 12 model """ermlp""" +251 12 loss """crossentropy""" +251 12 regularizer """no""" +251 12 optimizer """adam""" +251 12 training_loop """lcwa""" +251 12 evaluator """rankbased""" +251 13 dataset """kinships""" +251 13 model """ermlp""" +251 13 loss """crossentropy""" +251 13 regularizer """no""" +251 13 optimizer """adam""" +251 13 training_loop """lcwa""" +251 13 evaluator """rankbased""" +251 14 dataset """kinships""" +251 14 model """ermlp""" +251 14 loss """crossentropy""" +251 14 regularizer """no""" +251 14 optimizer """adam""" +251 14 training_loop """lcwa""" +251 14 evaluator """rankbased""" +251 15 dataset """kinships""" +251 15 model """ermlp""" +251 15 loss """crossentropy""" +251 15 regularizer """no""" +251 15 optimizer """adam""" +251 15 training_loop """lcwa""" +251 15 evaluator """rankbased""" +251 16 dataset """kinships""" +251 16 model """ermlp""" +251 16 loss """crossentropy""" +251 16 regularizer """no""" +251 16 optimizer """adam""" +251 16 training_loop """lcwa""" +251 16 evaluator """rankbased""" +251 17 dataset """kinships""" +251 17 model """ermlp""" +251 17 loss """crossentropy""" +251 17 regularizer """no""" +251 17 optimizer """adam""" +251 17 training_loop """lcwa""" +251 17 evaluator """rankbased""" +251 18 dataset """kinships""" +251 18 model """ermlp""" +251 18 loss """crossentropy""" +251 18 regularizer """no""" +251 18 optimizer """adam""" +251 18 training_loop """lcwa""" +251 18 evaluator """rankbased""" +251 19 dataset """kinships""" +251 19 model """ermlp""" +251 19 loss """crossentropy""" +251 19 regularizer """no""" +251 19 optimizer """adam""" +251 19 training_loop """lcwa""" +251 19 evaluator """rankbased""" +251 20 dataset """kinships""" +251 20 model """ermlp""" +251 20 loss """crossentropy""" +251 20 regularizer """no""" +251 20 optimizer """adam""" +251 20 training_loop """lcwa""" +251 20 evaluator """rankbased""" +251 21 dataset """kinships""" +251 21 model """ermlp""" +251 21 loss """crossentropy""" +251 21 regularizer """no""" +251 21 optimizer """adam""" +251 21 training_loop """lcwa""" +251 21 evaluator """rankbased""" +251 22 dataset """kinships""" +251 22 model """ermlp""" +251 22 loss """crossentropy""" +251 22 regularizer """no""" +251 22 optimizer """adam""" +251 22 training_loop """lcwa""" +251 22 evaluator """rankbased""" +251 23 dataset """kinships""" +251 23 model """ermlp""" +251 23 loss """crossentropy""" +251 23 regularizer """no""" +251 23 optimizer """adam""" +251 23 training_loop """lcwa""" +251 23 evaluator """rankbased""" +251 24 dataset """kinships""" +251 24 model """ermlp""" +251 24 loss """crossentropy""" +251 24 regularizer """no""" +251 24 optimizer """adam""" +251 24 training_loop """lcwa""" +251 24 evaluator """rankbased""" +251 25 dataset """kinships""" +251 25 model """ermlp""" +251 25 loss """crossentropy""" +251 25 regularizer """no""" +251 25 optimizer """adam""" +251 25 training_loop """lcwa""" +251 25 evaluator """rankbased""" +251 26 dataset """kinships""" +251 26 model """ermlp""" +251 26 loss """crossentropy""" +251 26 regularizer """no""" +251 26 optimizer """adam""" +251 26 training_loop """lcwa""" +251 26 evaluator """rankbased""" +251 27 dataset """kinships""" +251 27 model """ermlp""" +251 27 loss """crossentropy""" +251 27 regularizer """no""" +251 27 optimizer """adam""" +251 27 training_loop """lcwa""" +251 27 evaluator """rankbased""" +251 28 dataset """kinships""" +251 28 model """ermlp""" +251 28 loss """crossentropy""" +251 28 regularizer """no""" +251 28 optimizer """adam""" +251 28 training_loop """lcwa""" +251 28 evaluator """rankbased""" +251 29 dataset """kinships""" +251 29 model """ermlp""" +251 29 loss """crossentropy""" +251 29 regularizer """no""" +251 29 optimizer """adam""" +251 29 training_loop """lcwa""" +251 29 evaluator """rankbased""" +251 30 dataset """kinships""" +251 30 model """ermlp""" +251 30 loss """crossentropy""" +251 30 regularizer """no""" +251 30 optimizer """adam""" +251 30 training_loop """lcwa""" +251 30 evaluator """rankbased""" +251 31 dataset """kinships""" +251 31 model """ermlp""" +251 31 loss """crossentropy""" +251 31 regularizer """no""" +251 31 optimizer """adam""" +251 31 training_loop """lcwa""" +251 31 evaluator """rankbased""" +251 32 dataset """kinships""" +251 32 model """ermlp""" +251 32 loss """crossentropy""" +251 32 regularizer """no""" +251 32 optimizer """adam""" +251 32 training_loop """lcwa""" +251 32 evaluator """rankbased""" +251 33 dataset """kinships""" +251 33 model """ermlp""" +251 33 loss """crossentropy""" +251 33 regularizer """no""" +251 33 optimizer """adam""" +251 33 training_loop """lcwa""" +251 33 evaluator """rankbased""" +251 34 dataset """kinships""" +251 34 model """ermlp""" +251 34 loss """crossentropy""" +251 34 regularizer """no""" +251 34 optimizer """adam""" +251 34 training_loop """lcwa""" +251 34 evaluator """rankbased""" +251 35 dataset """kinships""" +251 35 model """ermlp""" +251 35 loss """crossentropy""" +251 35 regularizer """no""" +251 35 optimizer """adam""" +251 35 training_loop """lcwa""" +251 35 evaluator """rankbased""" +251 36 dataset """kinships""" +251 36 model """ermlp""" +251 36 loss """crossentropy""" +251 36 regularizer """no""" +251 36 optimizer """adam""" +251 36 training_loop """lcwa""" +251 36 evaluator """rankbased""" +251 37 dataset """kinships""" +251 37 model """ermlp""" +251 37 loss """crossentropy""" +251 37 regularizer """no""" +251 37 optimizer """adam""" +251 37 training_loop """lcwa""" +251 37 evaluator """rankbased""" +251 38 dataset """kinships""" +251 38 model """ermlp""" +251 38 loss """crossentropy""" +251 38 regularizer """no""" +251 38 optimizer """adam""" +251 38 training_loop """lcwa""" +251 38 evaluator """rankbased""" +251 39 dataset """kinships""" +251 39 model """ermlp""" +251 39 loss """crossentropy""" +251 39 regularizer """no""" +251 39 optimizer """adam""" +251 39 training_loop """lcwa""" +251 39 evaluator """rankbased""" +251 40 dataset """kinships""" +251 40 model """ermlp""" +251 40 loss """crossentropy""" +251 40 regularizer """no""" +251 40 optimizer """adam""" +251 40 training_loop """lcwa""" +251 40 evaluator """rankbased""" +251 41 dataset """kinships""" +251 41 model """ermlp""" +251 41 loss """crossentropy""" +251 41 regularizer """no""" +251 41 optimizer """adam""" +251 41 training_loop """lcwa""" +251 41 evaluator """rankbased""" +251 42 dataset """kinships""" +251 42 model """ermlp""" +251 42 loss """crossentropy""" +251 42 regularizer """no""" +251 42 optimizer """adam""" +251 42 training_loop """lcwa""" +251 42 evaluator """rankbased""" +251 43 dataset """kinships""" +251 43 model """ermlp""" +251 43 loss """crossentropy""" +251 43 regularizer """no""" +251 43 optimizer """adam""" +251 43 training_loop """lcwa""" +251 43 evaluator """rankbased""" +251 44 dataset """kinships""" +251 44 model """ermlp""" +251 44 loss """crossentropy""" +251 44 regularizer """no""" +251 44 optimizer """adam""" +251 44 training_loop """lcwa""" +251 44 evaluator """rankbased""" +251 45 dataset """kinships""" +251 45 model """ermlp""" +251 45 loss """crossentropy""" +251 45 regularizer """no""" +251 45 optimizer """adam""" +251 45 training_loop """lcwa""" +251 45 evaluator """rankbased""" +251 46 dataset """kinships""" +251 46 model """ermlp""" +251 46 loss """crossentropy""" +251 46 regularizer """no""" +251 46 optimizer """adam""" +251 46 training_loop """lcwa""" +251 46 evaluator """rankbased""" +251 47 dataset """kinships""" +251 47 model """ermlp""" +251 47 loss """crossentropy""" +251 47 regularizer """no""" +251 47 optimizer """adam""" +251 47 training_loop """lcwa""" +251 47 evaluator """rankbased""" +251 48 dataset """kinships""" +251 48 model """ermlp""" +251 48 loss """crossentropy""" +251 48 regularizer """no""" +251 48 optimizer """adam""" +251 48 training_loop """lcwa""" +251 48 evaluator """rankbased""" +251 49 dataset """kinships""" +251 49 model """ermlp""" +251 49 loss """crossentropy""" +251 49 regularizer """no""" +251 49 optimizer """adam""" +251 49 training_loop """lcwa""" +251 49 evaluator """rankbased""" +251 50 dataset """kinships""" +251 50 model """ermlp""" +251 50 loss """crossentropy""" +251 50 regularizer """no""" +251 50 optimizer """adam""" +251 50 training_loop """lcwa""" +251 50 evaluator """rankbased""" +251 51 dataset """kinships""" +251 51 model """ermlp""" +251 51 loss """crossentropy""" +251 51 regularizer """no""" +251 51 optimizer """adam""" +251 51 training_loop """lcwa""" +251 51 evaluator """rankbased""" +251 52 dataset """kinships""" +251 52 model """ermlp""" +251 52 loss """crossentropy""" +251 52 regularizer """no""" +251 52 optimizer """adam""" +251 52 training_loop """lcwa""" +251 52 evaluator """rankbased""" +251 53 dataset """kinships""" +251 53 model """ermlp""" +251 53 loss """crossentropy""" +251 53 regularizer """no""" +251 53 optimizer """adam""" +251 53 training_loop """lcwa""" +251 53 evaluator """rankbased""" +251 54 dataset """kinships""" +251 54 model """ermlp""" +251 54 loss """crossentropy""" +251 54 regularizer """no""" +251 54 optimizer """adam""" +251 54 training_loop """lcwa""" +251 54 evaluator """rankbased""" +251 55 dataset """kinships""" +251 55 model """ermlp""" +251 55 loss """crossentropy""" +251 55 regularizer """no""" +251 55 optimizer """adam""" +251 55 training_loop """lcwa""" +251 55 evaluator """rankbased""" +251 56 dataset """kinships""" +251 56 model """ermlp""" +251 56 loss """crossentropy""" +251 56 regularizer """no""" +251 56 optimizer """adam""" +251 56 training_loop """lcwa""" +251 56 evaluator """rankbased""" +251 57 dataset """kinships""" +251 57 model """ermlp""" +251 57 loss """crossentropy""" +251 57 regularizer """no""" +251 57 optimizer """adam""" +251 57 training_loop """lcwa""" +251 57 evaluator """rankbased""" +251 58 dataset """kinships""" +251 58 model """ermlp""" +251 58 loss """crossentropy""" +251 58 regularizer """no""" +251 58 optimizer """adam""" +251 58 training_loop """lcwa""" +251 58 evaluator """rankbased""" +251 59 dataset """kinships""" +251 59 model """ermlp""" +251 59 loss """crossentropy""" +251 59 regularizer """no""" +251 59 optimizer """adam""" +251 59 training_loop """lcwa""" +251 59 evaluator """rankbased""" +251 60 dataset """kinships""" +251 60 model """ermlp""" +251 60 loss """crossentropy""" +251 60 regularizer """no""" +251 60 optimizer """adam""" +251 60 training_loop """lcwa""" +251 60 evaluator """rankbased""" +251 61 dataset """kinships""" +251 61 model """ermlp""" +251 61 loss """crossentropy""" +251 61 regularizer """no""" +251 61 optimizer """adam""" +251 61 training_loop """lcwa""" +251 61 evaluator """rankbased""" +251 62 dataset """kinships""" +251 62 model """ermlp""" +251 62 loss """crossentropy""" +251 62 regularizer """no""" +251 62 optimizer """adam""" +251 62 training_loop """lcwa""" +251 62 evaluator """rankbased""" +251 63 dataset """kinships""" +251 63 model """ermlp""" +251 63 loss """crossentropy""" +251 63 regularizer """no""" +251 63 optimizer """adam""" +251 63 training_loop """lcwa""" +251 63 evaluator """rankbased""" +251 64 dataset """kinships""" +251 64 model """ermlp""" +251 64 loss """crossentropy""" +251 64 regularizer """no""" +251 64 optimizer """adam""" +251 64 training_loop """lcwa""" +251 64 evaluator """rankbased""" +251 65 dataset """kinships""" +251 65 model """ermlp""" +251 65 loss """crossentropy""" +251 65 regularizer """no""" +251 65 optimizer """adam""" +251 65 training_loop """lcwa""" +251 65 evaluator """rankbased""" +251 66 dataset """kinships""" +251 66 model """ermlp""" +251 66 loss """crossentropy""" +251 66 regularizer """no""" +251 66 optimizer """adam""" +251 66 training_loop """lcwa""" +251 66 evaluator """rankbased""" +251 67 dataset """kinships""" +251 67 model """ermlp""" +251 67 loss """crossentropy""" +251 67 regularizer """no""" +251 67 optimizer """adam""" +251 67 training_loop """lcwa""" +251 67 evaluator """rankbased""" +251 68 dataset """kinships""" +251 68 model """ermlp""" +251 68 loss """crossentropy""" +251 68 regularizer """no""" +251 68 optimizer """adam""" +251 68 training_loop """lcwa""" +251 68 evaluator """rankbased""" +251 69 dataset """kinships""" +251 69 model """ermlp""" +251 69 loss """crossentropy""" +251 69 regularizer """no""" +251 69 optimizer """adam""" +251 69 training_loop """lcwa""" +251 69 evaluator """rankbased""" +251 70 dataset """kinships""" +251 70 model """ermlp""" +251 70 loss """crossentropy""" +251 70 regularizer """no""" +251 70 optimizer """adam""" +251 70 training_loop """lcwa""" +251 70 evaluator """rankbased""" +251 71 dataset """kinships""" +251 71 model """ermlp""" +251 71 loss """crossentropy""" +251 71 regularizer """no""" +251 71 optimizer """adam""" +251 71 training_loop """lcwa""" +251 71 evaluator """rankbased""" +251 72 dataset """kinships""" +251 72 model """ermlp""" +251 72 loss """crossentropy""" +251 72 regularizer """no""" +251 72 optimizer """adam""" +251 72 training_loop """lcwa""" +251 72 evaluator """rankbased""" +251 73 dataset """kinships""" +251 73 model """ermlp""" +251 73 loss """crossentropy""" +251 73 regularizer """no""" +251 73 optimizer """adam""" +251 73 training_loop """lcwa""" +251 73 evaluator """rankbased""" +251 74 dataset """kinships""" +251 74 model """ermlp""" +251 74 loss """crossentropy""" +251 74 regularizer """no""" +251 74 optimizer """adam""" +251 74 training_loop """lcwa""" +251 74 evaluator """rankbased""" +251 75 dataset """kinships""" +251 75 model """ermlp""" +251 75 loss """crossentropy""" +251 75 regularizer """no""" +251 75 optimizer """adam""" +251 75 training_loop """lcwa""" +251 75 evaluator """rankbased""" +251 76 dataset """kinships""" +251 76 model """ermlp""" +251 76 loss """crossentropy""" +251 76 regularizer """no""" +251 76 optimizer """adam""" +251 76 training_loop """lcwa""" +251 76 evaluator """rankbased""" +251 77 dataset """kinships""" +251 77 model """ermlp""" +251 77 loss """crossentropy""" +251 77 regularizer """no""" +251 77 optimizer """adam""" +251 77 training_loop """lcwa""" +251 77 evaluator """rankbased""" +251 78 dataset """kinships""" +251 78 model """ermlp""" +251 78 loss """crossentropy""" +251 78 regularizer """no""" +251 78 optimizer """adam""" +251 78 training_loop """lcwa""" +251 78 evaluator """rankbased""" +251 79 dataset """kinships""" +251 79 model """ermlp""" +251 79 loss """crossentropy""" +251 79 regularizer """no""" +251 79 optimizer """adam""" +251 79 training_loop """lcwa""" +251 79 evaluator """rankbased""" +251 80 dataset """kinships""" +251 80 model """ermlp""" +251 80 loss """crossentropy""" +251 80 regularizer """no""" +251 80 optimizer """adam""" +251 80 training_loop """lcwa""" +251 80 evaluator """rankbased""" +251 81 dataset """kinships""" +251 81 model """ermlp""" +251 81 loss """crossentropy""" +251 81 regularizer """no""" +251 81 optimizer """adam""" +251 81 training_loop """lcwa""" +251 81 evaluator """rankbased""" +251 82 dataset """kinships""" +251 82 model """ermlp""" +251 82 loss """crossentropy""" +251 82 regularizer """no""" +251 82 optimizer """adam""" +251 82 training_loop """lcwa""" +251 82 evaluator """rankbased""" +251 83 dataset """kinships""" +251 83 model """ermlp""" +251 83 loss """crossentropy""" +251 83 regularizer """no""" +251 83 optimizer """adam""" +251 83 training_loop """lcwa""" +251 83 evaluator """rankbased""" +251 84 dataset """kinships""" +251 84 model """ermlp""" +251 84 loss """crossentropy""" +251 84 regularizer """no""" +251 84 optimizer """adam""" +251 84 training_loop """lcwa""" +251 84 evaluator """rankbased""" +251 85 dataset """kinships""" +251 85 model """ermlp""" +251 85 loss """crossentropy""" +251 85 regularizer """no""" +251 85 optimizer """adam""" +251 85 training_loop """lcwa""" +251 85 evaluator """rankbased""" +251 86 dataset """kinships""" +251 86 model """ermlp""" +251 86 loss """crossentropy""" +251 86 regularizer """no""" +251 86 optimizer """adam""" +251 86 training_loop """lcwa""" +251 86 evaluator """rankbased""" +251 87 dataset """kinships""" +251 87 model """ermlp""" +251 87 loss """crossentropy""" +251 87 regularizer """no""" +251 87 optimizer """adam""" +251 87 training_loop """lcwa""" +251 87 evaluator """rankbased""" +251 88 dataset """kinships""" +251 88 model """ermlp""" +251 88 loss """crossentropy""" +251 88 regularizer """no""" +251 88 optimizer """adam""" +251 88 training_loop """lcwa""" +251 88 evaluator """rankbased""" +251 89 dataset """kinships""" +251 89 model """ermlp""" +251 89 loss """crossentropy""" +251 89 regularizer """no""" +251 89 optimizer """adam""" +251 89 training_loop """lcwa""" +251 89 evaluator """rankbased""" +251 90 dataset """kinships""" +251 90 model """ermlp""" +251 90 loss """crossentropy""" +251 90 regularizer """no""" +251 90 optimizer """adam""" +251 90 training_loop """lcwa""" +251 90 evaluator """rankbased""" +251 91 dataset """kinships""" +251 91 model """ermlp""" +251 91 loss """crossentropy""" +251 91 regularizer """no""" +251 91 optimizer """adam""" +251 91 training_loop """lcwa""" +251 91 evaluator """rankbased""" +251 92 dataset """kinships""" +251 92 model """ermlp""" +251 92 loss """crossentropy""" +251 92 regularizer """no""" +251 92 optimizer """adam""" +251 92 training_loop """lcwa""" +251 92 evaluator """rankbased""" +251 93 dataset """kinships""" +251 93 model """ermlp""" +251 93 loss """crossentropy""" +251 93 regularizer """no""" +251 93 optimizer """adam""" +251 93 training_loop """lcwa""" +251 93 evaluator """rankbased""" +251 94 dataset """kinships""" +251 94 model """ermlp""" +251 94 loss """crossentropy""" +251 94 regularizer """no""" +251 94 optimizer """adam""" +251 94 training_loop """lcwa""" +251 94 evaluator """rankbased""" +251 95 dataset """kinships""" +251 95 model """ermlp""" +251 95 loss """crossentropy""" +251 95 regularizer """no""" +251 95 optimizer """adam""" +251 95 training_loop """lcwa""" +251 95 evaluator """rankbased""" +251 96 dataset """kinships""" +251 96 model """ermlp""" +251 96 loss """crossentropy""" +251 96 regularizer """no""" +251 96 optimizer """adam""" +251 96 training_loop """lcwa""" +251 96 evaluator """rankbased""" +251 97 dataset """kinships""" +251 97 model """ermlp""" +251 97 loss """crossentropy""" +251 97 regularizer """no""" +251 97 optimizer """adam""" +251 97 training_loop """lcwa""" +251 97 evaluator """rankbased""" +251 98 dataset """kinships""" +251 98 model """ermlp""" +251 98 loss """crossentropy""" +251 98 regularizer """no""" +251 98 optimizer """adam""" +251 98 training_loop """lcwa""" +251 98 evaluator """rankbased""" +251 99 dataset """kinships""" +251 99 model """ermlp""" +251 99 loss """crossentropy""" +251 99 regularizer """no""" +251 99 optimizer """adam""" +251 99 training_loop """lcwa""" +251 99 evaluator """rankbased""" +251 100 dataset """kinships""" +251 100 model """ermlp""" +251 100 loss """crossentropy""" +251 100 regularizer """no""" +251 100 optimizer """adam""" +251 100 training_loop """lcwa""" +251 100 evaluator """rankbased""" +252 1 model.embedding_dim 0.0 +252 1 optimizer.lr 0.0029191484071051104 +252 1 negative_sampler.num_negs_per_pos 30.0 +252 1 training.batch_size 1.0 +252 2 model.embedding_dim 1.0 +252 2 optimizer.lr 0.0077319127111566185 +252 2 negative_sampler.num_negs_per_pos 55.0 +252 2 training.batch_size 2.0 +252 3 model.embedding_dim 0.0 +252 3 optimizer.lr 0.008468264940791004 +252 3 negative_sampler.num_negs_per_pos 11.0 +252 3 training.batch_size 2.0 +252 4 model.embedding_dim 1.0 +252 4 optimizer.lr 0.03038730108857186 +252 4 negative_sampler.num_negs_per_pos 29.0 +252 4 training.batch_size 2.0 +252 5 model.embedding_dim 2.0 +252 5 optimizer.lr 0.014411249332201833 +252 5 negative_sampler.num_negs_per_pos 44.0 +252 5 training.batch_size 2.0 +252 6 model.embedding_dim 2.0 +252 6 optimizer.lr 0.003959314363233087 +252 6 negative_sampler.num_negs_per_pos 30.0 +252 6 training.batch_size 0.0 +252 7 model.embedding_dim 2.0 +252 7 optimizer.lr 0.01041045719812789 +252 7 negative_sampler.num_negs_per_pos 10.0 +252 7 training.batch_size 2.0 +252 8 model.embedding_dim 0.0 +252 8 optimizer.lr 0.06973544995247431 +252 8 negative_sampler.num_negs_per_pos 54.0 +252 8 training.batch_size 0.0 +252 9 model.embedding_dim 0.0 +252 9 optimizer.lr 0.00400431136332096 +252 9 negative_sampler.num_negs_per_pos 82.0 +252 9 training.batch_size 0.0 +252 10 model.embedding_dim 0.0 +252 10 optimizer.lr 0.0012492798041479117 +252 10 negative_sampler.num_negs_per_pos 20.0 +252 10 training.batch_size 2.0 +252 11 model.embedding_dim 1.0 +252 11 optimizer.lr 0.005462370985924373 +252 11 negative_sampler.num_negs_per_pos 39.0 +252 11 training.batch_size 2.0 +252 12 model.embedding_dim 2.0 +252 12 optimizer.lr 0.013054420942635037 +252 12 negative_sampler.num_negs_per_pos 82.0 +252 12 training.batch_size 0.0 +252 13 model.embedding_dim 0.0 +252 13 optimizer.lr 0.07429912816373303 +252 13 negative_sampler.num_negs_per_pos 71.0 +252 13 training.batch_size 2.0 +252 14 model.embedding_dim 0.0 +252 14 optimizer.lr 0.0516055309540952 +252 14 negative_sampler.num_negs_per_pos 18.0 +252 14 training.batch_size 2.0 +252 15 model.embedding_dim 2.0 +252 15 optimizer.lr 0.004360370493734431 +252 15 negative_sampler.num_negs_per_pos 57.0 +252 15 training.batch_size 0.0 +252 16 model.embedding_dim 0.0 +252 16 optimizer.lr 0.02437913329590699 +252 16 negative_sampler.num_negs_per_pos 73.0 +252 16 training.batch_size 0.0 +252 17 model.embedding_dim 0.0 +252 17 optimizer.lr 0.033219552292163446 +252 17 negative_sampler.num_negs_per_pos 70.0 +252 17 training.batch_size 0.0 +252 18 model.embedding_dim 0.0 +252 18 optimizer.lr 0.02220609079486332 +252 18 negative_sampler.num_negs_per_pos 33.0 +252 18 training.batch_size 0.0 +252 19 model.embedding_dim 1.0 +252 19 optimizer.lr 0.0031696973009275493 +252 19 negative_sampler.num_negs_per_pos 97.0 +252 19 training.batch_size 2.0 +252 20 model.embedding_dim 2.0 +252 20 optimizer.lr 0.07394434806758132 +252 20 negative_sampler.num_negs_per_pos 97.0 +252 20 training.batch_size 1.0 +252 21 model.embedding_dim 2.0 +252 21 optimizer.lr 0.03202167464079554 +252 21 negative_sampler.num_negs_per_pos 1.0 +252 21 training.batch_size 0.0 +252 22 model.embedding_dim 0.0 +252 22 optimizer.lr 0.007354852541674061 +252 22 negative_sampler.num_negs_per_pos 66.0 +252 22 training.batch_size 0.0 +252 23 model.embedding_dim 2.0 +252 23 optimizer.lr 0.0052767238981605204 +252 23 negative_sampler.num_negs_per_pos 70.0 +252 23 training.batch_size 0.0 +252 24 model.embedding_dim 2.0 +252 24 optimizer.lr 0.014514063150586677 +252 24 negative_sampler.num_negs_per_pos 52.0 +252 24 training.batch_size 2.0 +252 25 model.embedding_dim 0.0 +252 25 optimizer.lr 0.001319945708388269 +252 25 negative_sampler.num_negs_per_pos 6.0 +252 25 training.batch_size 1.0 +252 26 model.embedding_dim 1.0 +252 26 optimizer.lr 0.025619356251300514 +252 26 negative_sampler.num_negs_per_pos 86.0 +252 26 training.batch_size 0.0 +252 27 model.embedding_dim 2.0 +252 27 optimizer.lr 0.003032439080941804 +252 27 negative_sampler.num_negs_per_pos 78.0 +252 27 training.batch_size 1.0 +252 28 model.embedding_dim 2.0 +252 28 optimizer.lr 0.08540332205110564 +252 28 negative_sampler.num_negs_per_pos 70.0 +252 28 training.batch_size 1.0 +252 29 model.embedding_dim 1.0 +252 29 optimizer.lr 0.028964320087551548 +252 29 negative_sampler.num_negs_per_pos 55.0 +252 29 training.batch_size 1.0 +252 30 model.embedding_dim 1.0 +252 30 optimizer.lr 0.007795082530557864 +252 30 negative_sampler.num_negs_per_pos 68.0 +252 30 training.batch_size 0.0 +252 31 model.embedding_dim 1.0 +252 31 optimizer.lr 0.07048527128418489 +252 31 negative_sampler.num_negs_per_pos 97.0 +252 31 training.batch_size 2.0 +252 32 model.embedding_dim 0.0 +252 32 optimizer.lr 0.013065122588383738 +252 32 negative_sampler.num_negs_per_pos 83.0 +252 32 training.batch_size 0.0 +252 33 model.embedding_dim 1.0 +252 33 optimizer.lr 0.0030797842255111956 +252 33 negative_sampler.num_negs_per_pos 53.0 +252 33 training.batch_size 2.0 +252 34 model.embedding_dim 1.0 +252 34 optimizer.lr 0.03920888139677352 +252 34 negative_sampler.num_negs_per_pos 28.0 +252 34 training.batch_size 1.0 +252 35 model.embedding_dim 2.0 +252 35 optimizer.lr 0.05234850012295355 +252 35 negative_sampler.num_negs_per_pos 17.0 +252 35 training.batch_size 1.0 +252 36 model.embedding_dim 2.0 +252 36 optimizer.lr 0.08569918993496503 +252 36 negative_sampler.num_negs_per_pos 41.0 +252 36 training.batch_size 2.0 +252 37 model.embedding_dim 2.0 +252 37 optimizer.lr 0.04714912861683246 +252 37 negative_sampler.num_negs_per_pos 53.0 +252 37 training.batch_size 0.0 +252 38 model.embedding_dim 0.0 +252 38 optimizer.lr 0.02077574809673528 +252 38 negative_sampler.num_negs_per_pos 25.0 +252 38 training.batch_size 0.0 +252 39 model.embedding_dim 0.0 +252 39 optimizer.lr 0.05849506292666384 +252 39 negative_sampler.num_negs_per_pos 85.0 +252 39 training.batch_size 1.0 +252 40 model.embedding_dim 2.0 +252 40 optimizer.lr 0.014464916599509065 +252 40 negative_sampler.num_negs_per_pos 94.0 +252 40 training.batch_size 2.0 +252 41 model.embedding_dim 1.0 +252 41 optimizer.lr 0.00542623496069656 +252 41 negative_sampler.num_negs_per_pos 40.0 +252 41 training.batch_size 2.0 +252 42 model.embedding_dim 2.0 +252 42 optimizer.lr 0.01935595971120392 +252 42 negative_sampler.num_negs_per_pos 3.0 +252 42 training.batch_size 1.0 +252 43 model.embedding_dim 1.0 +252 43 optimizer.lr 0.0027799365613122946 +252 43 negative_sampler.num_negs_per_pos 83.0 +252 43 training.batch_size 0.0 +252 44 model.embedding_dim 2.0 +252 44 optimizer.lr 0.007808654574490743 +252 44 negative_sampler.num_negs_per_pos 40.0 +252 44 training.batch_size 1.0 +252 45 model.embedding_dim 1.0 +252 45 optimizer.lr 0.01249300766672931 +252 45 negative_sampler.num_negs_per_pos 67.0 +252 45 training.batch_size 1.0 +252 46 model.embedding_dim 1.0 +252 46 optimizer.lr 0.0037672511271551923 +252 46 negative_sampler.num_negs_per_pos 39.0 +252 46 training.batch_size 0.0 +252 47 model.embedding_dim 0.0 +252 47 optimizer.lr 0.006921644193554742 +252 47 negative_sampler.num_negs_per_pos 86.0 +252 47 training.batch_size 2.0 +252 48 model.embedding_dim 0.0 +252 48 optimizer.lr 0.010855599762166069 +252 48 negative_sampler.num_negs_per_pos 46.0 +252 48 training.batch_size 1.0 +252 49 model.embedding_dim 0.0 +252 49 optimizer.lr 0.0320898499720598 +252 49 negative_sampler.num_negs_per_pos 71.0 +252 49 training.batch_size 2.0 +252 50 model.embedding_dim 0.0 +252 50 optimizer.lr 0.0065714999557774795 +252 50 negative_sampler.num_negs_per_pos 97.0 +252 50 training.batch_size 2.0 +252 51 model.embedding_dim 0.0 +252 51 optimizer.lr 0.06846155025643955 +252 51 negative_sampler.num_negs_per_pos 40.0 +252 51 training.batch_size 2.0 +252 52 model.embedding_dim 0.0 +252 52 optimizer.lr 0.004846763200782989 +252 52 negative_sampler.num_negs_per_pos 79.0 +252 52 training.batch_size 0.0 +252 53 model.embedding_dim 1.0 +252 53 optimizer.lr 0.03057942153107401 +252 53 negative_sampler.num_negs_per_pos 63.0 +252 53 training.batch_size 1.0 +252 54 model.embedding_dim 0.0 +252 54 optimizer.lr 0.002516948619859286 +252 54 negative_sampler.num_negs_per_pos 87.0 +252 54 training.batch_size 1.0 +252 55 model.embedding_dim 2.0 +252 55 optimizer.lr 0.02890065576125889 +252 55 negative_sampler.num_negs_per_pos 34.0 +252 55 training.batch_size 2.0 +252 56 model.embedding_dim 2.0 +252 56 optimizer.lr 0.0168415926581871 +252 56 negative_sampler.num_negs_per_pos 97.0 +252 56 training.batch_size 2.0 +252 57 model.embedding_dim 1.0 +252 57 optimizer.lr 0.018946083683389993 +252 57 negative_sampler.num_negs_per_pos 80.0 +252 57 training.batch_size 0.0 +252 58 model.embedding_dim 0.0 +252 58 optimizer.lr 0.026417638567507496 +252 58 negative_sampler.num_negs_per_pos 17.0 +252 58 training.batch_size 2.0 +252 59 model.embedding_dim 1.0 +252 59 optimizer.lr 0.0012521702632805848 +252 59 negative_sampler.num_negs_per_pos 70.0 +252 59 training.batch_size 1.0 +252 60 model.embedding_dim 2.0 +252 60 optimizer.lr 0.014441286477600845 +252 60 negative_sampler.num_negs_per_pos 14.0 +252 60 training.batch_size 2.0 +252 61 model.embedding_dim 2.0 +252 61 optimizer.lr 0.04467918511740085 +252 61 negative_sampler.num_negs_per_pos 98.0 +252 61 training.batch_size 2.0 +252 62 model.embedding_dim 2.0 +252 62 optimizer.lr 0.021298365883278908 +252 62 negative_sampler.num_negs_per_pos 80.0 +252 62 training.batch_size 0.0 +252 63 model.embedding_dim 1.0 +252 63 optimizer.lr 0.09248434055421527 +252 63 negative_sampler.num_negs_per_pos 49.0 +252 63 training.batch_size 2.0 +252 64 model.embedding_dim 0.0 +252 64 optimizer.lr 0.011646838880395223 +252 64 negative_sampler.num_negs_per_pos 26.0 +252 64 training.batch_size 1.0 +252 65 model.embedding_dim 1.0 +252 65 optimizer.lr 0.0049482191775655935 +252 65 negative_sampler.num_negs_per_pos 76.0 +252 65 training.batch_size 2.0 +252 66 model.embedding_dim 1.0 +252 66 optimizer.lr 0.003308606258159383 +252 66 negative_sampler.num_negs_per_pos 85.0 +252 66 training.batch_size 2.0 +252 67 model.embedding_dim 2.0 +252 67 optimizer.lr 0.06706802492112894 +252 67 negative_sampler.num_negs_per_pos 87.0 +252 67 training.batch_size 1.0 +252 68 model.embedding_dim 2.0 +252 68 optimizer.lr 0.001167584547495623 +252 68 negative_sampler.num_negs_per_pos 93.0 +252 68 training.batch_size 2.0 +252 69 model.embedding_dim 2.0 +252 69 optimizer.lr 0.05961636679117615 +252 69 negative_sampler.num_negs_per_pos 48.0 +252 69 training.batch_size 0.0 +252 70 model.embedding_dim 1.0 +252 70 optimizer.lr 0.027983057239802222 +252 70 negative_sampler.num_negs_per_pos 49.0 +252 70 training.batch_size 0.0 +252 71 model.embedding_dim 2.0 +252 71 optimizer.lr 0.001074167845976409 +252 71 negative_sampler.num_negs_per_pos 86.0 +252 71 training.batch_size 1.0 +252 72 model.embedding_dim 0.0 +252 72 optimizer.lr 0.015086184918235462 +252 72 negative_sampler.num_negs_per_pos 68.0 +252 72 training.batch_size 2.0 +252 73 model.embedding_dim 2.0 +252 73 optimizer.lr 0.003040317071556112 +252 73 negative_sampler.num_negs_per_pos 42.0 +252 73 training.batch_size 1.0 +252 74 model.embedding_dim 0.0 +252 74 optimizer.lr 0.0042184405350696105 +252 74 negative_sampler.num_negs_per_pos 35.0 +252 74 training.batch_size 2.0 +252 75 model.embedding_dim 0.0 +252 75 optimizer.lr 0.002075129501569133 +252 75 negative_sampler.num_negs_per_pos 25.0 +252 75 training.batch_size 1.0 +252 76 model.embedding_dim 0.0 +252 76 optimizer.lr 0.002731191158991678 +252 76 negative_sampler.num_negs_per_pos 66.0 +252 76 training.batch_size 2.0 +252 77 model.embedding_dim 2.0 +252 77 optimizer.lr 0.0018936807217922508 +252 77 negative_sampler.num_negs_per_pos 21.0 +252 77 training.batch_size 0.0 +252 78 model.embedding_dim 1.0 +252 78 optimizer.lr 0.023724129051782426 +252 78 negative_sampler.num_negs_per_pos 83.0 +252 78 training.batch_size 0.0 +252 79 model.embedding_dim 2.0 +252 79 optimizer.lr 0.0012143258433136533 +252 79 negative_sampler.num_negs_per_pos 47.0 +252 79 training.batch_size 1.0 +252 80 model.embedding_dim 1.0 +252 80 optimizer.lr 0.0059388679165009615 +252 80 negative_sampler.num_negs_per_pos 14.0 +252 80 training.batch_size 2.0 +252 81 model.embedding_dim 2.0 +252 81 optimizer.lr 0.06602285423869668 +252 81 negative_sampler.num_negs_per_pos 40.0 +252 81 training.batch_size 1.0 +252 82 model.embedding_dim 2.0 +252 82 optimizer.lr 0.005452493416493271 +252 82 negative_sampler.num_negs_per_pos 88.0 +252 82 training.batch_size 2.0 +252 83 model.embedding_dim 2.0 +252 83 optimizer.lr 0.006469289959182253 +252 83 negative_sampler.num_negs_per_pos 9.0 +252 83 training.batch_size 1.0 +252 84 model.embedding_dim 1.0 +252 84 optimizer.lr 0.008494321144272142 +252 84 negative_sampler.num_negs_per_pos 61.0 +252 84 training.batch_size 2.0 +252 85 model.embedding_dim 1.0 +252 85 optimizer.lr 0.019926514446106672 +252 85 negative_sampler.num_negs_per_pos 71.0 +252 85 training.batch_size 1.0 +252 86 model.embedding_dim 1.0 +252 86 optimizer.lr 0.028905222057302372 +252 86 negative_sampler.num_negs_per_pos 54.0 +252 86 training.batch_size 2.0 +252 87 model.embedding_dim 1.0 +252 87 optimizer.lr 0.09121854672525365 +252 87 negative_sampler.num_negs_per_pos 89.0 +252 87 training.batch_size 2.0 +252 88 model.embedding_dim 2.0 +252 88 optimizer.lr 0.004294713284558423 +252 88 negative_sampler.num_negs_per_pos 42.0 +252 88 training.batch_size 1.0 +252 89 model.embedding_dim 2.0 +252 89 optimizer.lr 0.014307238658374674 +252 89 negative_sampler.num_negs_per_pos 34.0 +252 89 training.batch_size 2.0 +252 90 model.embedding_dim 2.0 +252 90 optimizer.lr 0.05878140225155444 +252 90 negative_sampler.num_negs_per_pos 63.0 +252 90 training.batch_size 1.0 +252 91 model.embedding_dim 1.0 +252 91 optimizer.lr 0.021012863598507847 +252 91 negative_sampler.num_negs_per_pos 94.0 +252 91 training.batch_size 2.0 +252 92 model.embedding_dim 0.0 +252 92 optimizer.lr 0.0887561073845056 +252 92 negative_sampler.num_negs_per_pos 12.0 +252 92 training.batch_size 1.0 +252 93 model.embedding_dim 1.0 +252 93 optimizer.lr 0.0012187125012648131 +252 93 negative_sampler.num_negs_per_pos 33.0 +252 93 training.batch_size 1.0 +252 94 model.embedding_dim 1.0 +252 94 optimizer.lr 0.00568714747191252 +252 94 negative_sampler.num_negs_per_pos 48.0 +252 94 training.batch_size 1.0 +252 95 model.embedding_dim 2.0 +252 95 optimizer.lr 0.054009667567790805 +252 95 negative_sampler.num_negs_per_pos 63.0 +252 95 training.batch_size 2.0 +252 96 model.embedding_dim 0.0 +252 96 optimizer.lr 0.005711116732826573 +252 96 negative_sampler.num_negs_per_pos 56.0 +252 96 training.batch_size 0.0 +252 97 model.embedding_dim 2.0 +252 97 optimizer.lr 0.012228384084732187 +252 97 negative_sampler.num_negs_per_pos 84.0 +252 97 training.batch_size 1.0 +252 98 model.embedding_dim 1.0 +252 98 optimizer.lr 0.00477112734106742 +252 98 negative_sampler.num_negs_per_pos 95.0 +252 98 training.batch_size 0.0 +252 99 model.embedding_dim 2.0 +252 99 optimizer.lr 0.0012061380081370143 +252 99 negative_sampler.num_negs_per_pos 27.0 +252 99 training.batch_size 1.0 +252 100 model.embedding_dim 0.0 +252 100 optimizer.lr 0.0028907211505936007 +252 100 negative_sampler.num_negs_per_pos 61.0 +252 100 training.batch_size 0.0 +252 1 dataset """kinships""" +252 1 model """ermlp""" +252 1 loss """bceaftersigmoid""" +252 1 regularizer """no""" +252 1 optimizer """adam""" +252 1 training_loop """owa""" +252 1 negative_sampler """basic""" +252 1 evaluator """rankbased""" +252 2 dataset """kinships""" +252 2 model """ermlp""" +252 2 loss """bceaftersigmoid""" +252 2 regularizer """no""" +252 2 optimizer """adam""" +252 2 training_loop """owa""" +252 2 negative_sampler """basic""" +252 2 evaluator """rankbased""" +252 3 dataset """kinships""" +252 3 model """ermlp""" +252 3 loss """bceaftersigmoid""" +252 3 regularizer """no""" +252 3 optimizer """adam""" +252 3 training_loop """owa""" +252 3 negative_sampler """basic""" +252 3 evaluator """rankbased""" +252 4 dataset """kinships""" +252 4 model """ermlp""" +252 4 loss """bceaftersigmoid""" +252 4 regularizer """no""" +252 4 optimizer """adam""" +252 4 training_loop """owa""" +252 4 negative_sampler """basic""" +252 4 evaluator """rankbased""" +252 5 dataset """kinships""" +252 5 model """ermlp""" +252 5 loss """bceaftersigmoid""" +252 5 regularizer """no""" +252 5 optimizer """adam""" +252 5 training_loop """owa""" +252 5 negative_sampler """basic""" +252 5 evaluator """rankbased""" +252 6 dataset """kinships""" +252 6 model """ermlp""" +252 6 loss """bceaftersigmoid""" +252 6 regularizer """no""" +252 6 optimizer """adam""" +252 6 training_loop """owa""" +252 6 negative_sampler """basic""" +252 6 evaluator """rankbased""" +252 7 dataset """kinships""" +252 7 model """ermlp""" +252 7 loss """bceaftersigmoid""" +252 7 regularizer """no""" +252 7 optimizer """adam""" +252 7 training_loop """owa""" +252 7 negative_sampler """basic""" +252 7 evaluator """rankbased""" +252 8 dataset """kinships""" +252 8 model """ermlp""" +252 8 loss """bceaftersigmoid""" +252 8 regularizer """no""" +252 8 optimizer """adam""" +252 8 training_loop """owa""" +252 8 negative_sampler """basic""" +252 8 evaluator """rankbased""" +252 9 dataset """kinships""" +252 9 model """ermlp""" +252 9 loss """bceaftersigmoid""" +252 9 regularizer """no""" +252 9 optimizer """adam""" +252 9 training_loop """owa""" +252 9 negative_sampler """basic""" +252 9 evaluator """rankbased""" +252 10 dataset """kinships""" +252 10 model """ermlp""" +252 10 loss """bceaftersigmoid""" +252 10 regularizer """no""" +252 10 optimizer """adam""" +252 10 training_loop """owa""" +252 10 negative_sampler """basic""" +252 10 evaluator """rankbased""" +252 11 dataset """kinships""" +252 11 model """ermlp""" +252 11 loss """bceaftersigmoid""" +252 11 regularizer """no""" +252 11 optimizer """adam""" +252 11 training_loop """owa""" +252 11 negative_sampler """basic""" +252 11 evaluator """rankbased""" +252 12 dataset """kinships""" +252 12 model """ermlp""" +252 12 loss """bceaftersigmoid""" +252 12 regularizer """no""" +252 12 optimizer """adam""" +252 12 training_loop """owa""" +252 12 negative_sampler """basic""" +252 12 evaluator """rankbased""" +252 13 dataset """kinships""" +252 13 model """ermlp""" +252 13 loss """bceaftersigmoid""" +252 13 regularizer """no""" +252 13 optimizer """adam""" +252 13 training_loop """owa""" +252 13 negative_sampler """basic""" +252 13 evaluator """rankbased""" +252 14 dataset """kinships""" +252 14 model """ermlp""" +252 14 loss """bceaftersigmoid""" +252 14 regularizer """no""" +252 14 optimizer """adam""" +252 14 training_loop """owa""" +252 14 negative_sampler """basic""" +252 14 evaluator """rankbased""" +252 15 dataset """kinships""" +252 15 model """ermlp""" +252 15 loss """bceaftersigmoid""" +252 15 regularizer """no""" +252 15 optimizer """adam""" +252 15 training_loop """owa""" +252 15 negative_sampler """basic""" +252 15 evaluator """rankbased""" +252 16 dataset """kinships""" +252 16 model """ermlp""" +252 16 loss """bceaftersigmoid""" +252 16 regularizer """no""" +252 16 optimizer """adam""" +252 16 training_loop """owa""" +252 16 negative_sampler """basic""" +252 16 evaluator """rankbased""" +252 17 dataset """kinships""" +252 17 model """ermlp""" +252 17 loss """bceaftersigmoid""" +252 17 regularizer """no""" +252 17 optimizer """adam""" +252 17 training_loop """owa""" +252 17 negative_sampler """basic""" +252 17 evaluator """rankbased""" +252 18 dataset """kinships""" +252 18 model """ermlp""" +252 18 loss """bceaftersigmoid""" +252 18 regularizer """no""" +252 18 optimizer """adam""" +252 18 training_loop """owa""" +252 18 negative_sampler """basic""" +252 18 evaluator """rankbased""" +252 19 dataset """kinships""" +252 19 model """ermlp""" +252 19 loss """bceaftersigmoid""" +252 19 regularizer """no""" +252 19 optimizer """adam""" +252 19 training_loop """owa""" +252 19 negative_sampler """basic""" +252 19 evaluator """rankbased""" +252 20 dataset """kinships""" +252 20 model """ermlp""" +252 20 loss """bceaftersigmoid""" +252 20 regularizer """no""" +252 20 optimizer """adam""" +252 20 training_loop """owa""" +252 20 negative_sampler """basic""" +252 20 evaluator """rankbased""" +252 21 dataset """kinships""" +252 21 model """ermlp""" +252 21 loss """bceaftersigmoid""" +252 21 regularizer """no""" +252 21 optimizer """adam""" +252 21 training_loop """owa""" +252 21 negative_sampler """basic""" +252 21 evaluator """rankbased""" +252 22 dataset """kinships""" +252 22 model """ermlp""" +252 22 loss """bceaftersigmoid""" +252 22 regularizer """no""" +252 22 optimizer """adam""" +252 22 training_loop """owa""" +252 22 negative_sampler """basic""" +252 22 evaluator """rankbased""" +252 23 dataset """kinships""" +252 23 model """ermlp""" +252 23 loss """bceaftersigmoid""" +252 23 regularizer """no""" +252 23 optimizer """adam""" +252 23 training_loop """owa""" +252 23 negative_sampler """basic""" +252 23 evaluator """rankbased""" +252 24 dataset """kinships""" +252 24 model """ermlp""" +252 24 loss """bceaftersigmoid""" +252 24 regularizer """no""" +252 24 optimizer """adam""" +252 24 training_loop """owa""" +252 24 negative_sampler """basic""" +252 24 evaluator """rankbased""" +252 25 dataset """kinships""" +252 25 model """ermlp""" +252 25 loss """bceaftersigmoid""" +252 25 regularizer """no""" +252 25 optimizer """adam""" +252 25 training_loop """owa""" +252 25 negative_sampler """basic""" +252 25 evaluator """rankbased""" +252 26 dataset """kinships""" +252 26 model """ermlp""" +252 26 loss """bceaftersigmoid""" +252 26 regularizer """no""" +252 26 optimizer """adam""" +252 26 training_loop """owa""" +252 26 negative_sampler """basic""" +252 26 evaluator """rankbased""" +252 27 dataset """kinships""" +252 27 model """ermlp""" +252 27 loss """bceaftersigmoid""" +252 27 regularizer """no""" +252 27 optimizer """adam""" +252 27 training_loop """owa""" +252 27 negative_sampler """basic""" +252 27 evaluator """rankbased""" +252 28 dataset """kinships""" +252 28 model """ermlp""" +252 28 loss """bceaftersigmoid""" +252 28 regularizer """no""" +252 28 optimizer """adam""" +252 28 training_loop """owa""" +252 28 negative_sampler """basic""" +252 28 evaluator """rankbased""" +252 29 dataset """kinships""" +252 29 model """ermlp""" +252 29 loss """bceaftersigmoid""" +252 29 regularizer """no""" +252 29 optimizer """adam""" +252 29 training_loop """owa""" +252 29 negative_sampler """basic""" +252 29 evaluator """rankbased""" +252 30 dataset """kinships""" +252 30 model """ermlp""" +252 30 loss """bceaftersigmoid""" +252 30 regularizer """no""" +252 30 optimizer """adam""" +252 30 training_loop """owa""" +252 30 negative_sampler """basic""" +252 30 evaluator """rankbased""" +252 31 dataset """kinships""" +252 31 model """ermlp""" +252 31 loss """bceaftersigmoid""" +252 31 regularizer """no""" +252 31 optimizer """adam""" +252 31 training_loop """owa""" +252 31 negative_sampler """basic""" +252 31 evaluator """rankbased""" +252 32 dataset """kinships""" +252 32 model """ermlp""" +252 32 loss """bceaftersigmoid""" +252 32 regularizer """no""" +252 32 optimizer """adam""" +252 32 training_loop """owa""" +252 32 negative_sampler """basic""" +252 32 evaluator """rankbased""" +252 33 dataset """kinships""" +252 33 model """ermlp""" +252 33 loss """bceaftersigmoid""" +252 33 regularizer """no""" +252 33 optimizer """adam""" +252 33 training_loop """owa""" +252 33 negative_sampler """basic""" +252 33 evaluator """rankbased""" +252 34 dataset """kinships""" +252 34 model """ermlp""" +252 34 loss """bceaftersigmoid""" +252 34 regularizer """no""" +252 34 optimizer """adam""" +252 34 training_loop """owa""" +252 34 negative_sampler """basic""" +252 34 evaluator """rankbased""" +252 35 dataset """kinships""" +252 35 model """ermlp""" +252 35 loss """bceaftersigmoid""" +252 35 regularizer """no""" +252 35 optimizer """adam""" +252 35 training_loop """owa""" +252 35 negative_sampler """basic""" +252 35 evaluator """rankbased""" +252 36 dataset """kinships""" +252 36 model """ermlp""" +252 36 loss """bceaftersigmoid""" +252 36 regularizer """no""" +252 36 optimizer """adam""" +252 36 training_loop """owa""" +252 36 negative_sampler """basic""" +252 36 evaluator """rankbased""" +252 37 dataset """kinships""" +252 37 model """ermlp""" +252 37 loss """bceaftersigmoid""" +252 37 regularizer """no""" +252 37 optimizer """adam""" +252 37 training_loop """owa""" +252 37 negative_sampler """basic""" +252 37 evaluator """rankbased""" +252 38 dataset """kinships""" +252 38 model """ermlp""" +252 38 loss """bceaftersigmoid""" +252 38 regularizer """no""" +252 38 optimizer """adam""" +252 38 training_loop """owa""" +252 38 negative_sampler """basic""" +252 38 evaluator """rankbased""" +252 39 dataset """kinships""" +252 39 model """ermlp""" +252 39 loss """bceaftersigmoid""" +252 39 regularizer """no""" +252 39 optimizer """adam""" +252 39 training_loop """owa""" +252 39 negative_sampler """basic""" +252 39 evaluator """rankbased""" +252 40 dataset """kinships""" +252 40 model """ermlp""" +252 40 loss """bceaftersigmoid""" +252 40 regularizer """no""" +252 40 optimizer """adam""" +252 40 training_loop """owa""" +252 40 negative_sampler """basic""" +252 40 evaluator """rankbased""" +252 41 dataset """kinships""" +252 41 model """ermlp""" +252 41 loss """bceaftersigmoid""" +252 41 regularizer """no""" +252 41 optimizer """adam""" +252 41 training_loop """owa""" +252 41 negative_sampler """basic""" +252 41 evaluator """rankbased""" +252 42 dataset """kinships""" +252 42 model """ermlp""" +252 42 loss """bceaftersigmoid""" +252 42 regularizer """no""" +252 42 optimizer """adam""" +252 42 training_loop """owa""" +252 42 negative_sampler """basic""" +252 42 evaluator """rankbased""" +252 43 dataset """kinships""" +252 43 model """ermlp""" +252 43 loss """bceaftersigmoid""" +252 43 regularizer """no""" +252 43 optimizer """adam""" +252 43 training_loop """owa""" +252 43 negative_sampler """basic""" +252 43 evaluator """rankbased""" +252 44 dataset """kinships""" +252 44 model """ermlp""" +252 44 loss """bceaftersigmoid""" +252 44 regularizer """no""" +252 44 optimizer """adam""" +252 44 training_loop """owa""" +252 44 negative_sampler """basic""" +252 44 evaluator """rankbased""" +252 45 dataset """kinships""" +252 45 model """ermlp""" +252 45 loss """bceaftersigmoid""" +252 45 regularizer """no""" +252 45 optimizer """adam""" +252 45 training_loop """owa""" +252 45 negative_sampler """basic""" +252 45 evaluator """rankbased""" +252 46 dataset """kinships""" +252 46 model """ermlp""" +252 46 loss """bceaftersigmoid""" +252 46 regularizer """no""" +252 46 optimizer """adam""" +252 46 training_loop """owa""" +252 46 negative_sampler """basic""" +252 46 evaluator """rankbased""" +252 47 dataset """kinships""" +252 47 model """ermlp""" +252 47 loss """bceaftersigmoid""" +252 47 regularizer """no""" +252 47 optimizer """adam""" +252 47 training_loop """owa""" +252 47 negative_sampler """basic""" +252 47 evaluator """rankbased""" +252 48 dataset """kinships""" +252 48 model """ermlp""" +252 48 loss """bceaftersigmoid""" +252 48 regularizer """no""" +252 48 optimizer """adam""" +252 48 training_loop """owa""" +252 48 negative_sampler """basic""" +252 48 evaluator """rankbased""" +252 49 dataset """kinships""" +252 49 model """ermlp""" +252 49 loss """bceaftersigmoid""" +252 49 regularizer """no""" +252 49 optimizer """adam""" +252 49 training_loop """owa""" +252 49 negative_sampler """basic""" +252 49 evaluator """rankbased""" +252 50 dataset """kinships""" +252 50 model """ermlp""" +252 50 loss """bceaftersigmoid""" +252 50 regularizer """no""" +252 50 optimizer """adam""" +252 50 training_loop """owa""" +252 50 negative_sampler """basic""" +252 50 evaluator """rankbased""" +252 51 dataset """kinships""" +252 51 model """ermlp""" +252 51 loss """bceaftersigmoid""" +252 51 regularizer """no""" +252 51 optimizer """adam""" +252 51 training_loop """owa""" +252 51 negative_sampler """basic""" +252 51 evaluator """rankbased""" +252 52 dataset """kinships""" +252 52 model """ermlp""" +252 52 loss """bceaftersigmoid""" +252 52 regularizer """no""" +252 52 optimizer """adam""" +252 52 training_loop """owa""" +252 52 negative_sampler """basic""" +252 52 evaluator """rankbased""" +252 53 dataset """kinships""" +252 53 model """ermlp""" +252 53 loss """bceaftersigmoid""" +252 53 regularizer """no""" +252 53 optimizer """adam""" +252 53 training_loop """owa""" +252 53 negative_sampler """basic""" +252 53 evaluator """rankbased""" +252 54 dataset """kinships""" +252 54 model """ermlp""" +252 54 loss """bceaftersigmoid""" +252 54 regularizer """no""" +252 54 optimizer """adam""" +252 54 training_loop """owa""" +252 54 negative_sampler """basic""" +252 54 evaluator """rankbased""" +252 55 dataset """kinships""" +252 55 model """ermlp""" +252 55 loss """bceaftersigmoid""" +252 55 regularizer """no""" +252 55 optimizer """adam""" +252 55 training_loop """owa""" +252 55 negative_sampler """basic""" +252 55 evaluator """rankbased""" +252 56 dataset """kinships""" +252 56 model """ermlp""" +252 56 loss """bceaftersigmoid""" +252 56 regularizer """no""" +252 56 optimizer """adam""" +252 56 training_loop """owa""" +252 56 negative_sampler """basic""" +252 56 evaluator """rankbased""" +252 57 dataset """kinships""" +252 57 model """ermlp""" +252 57 loss """bceaftersigmoid""" +252 57 regularizer """no""" +252 57 optimizer """adam""" +252 57 training_loop """owa""" +252 57 negative_sampler """basic""" +252 57 evaluator """rankbased""" +252 58 dataset """kinships""" +252 58 model """ermlp""" +252 58 loss """bceaftersigmoid""" +252 58 regularizer """no""" +252 58 optimizer """adam""" +252 58 training_loop """owa""" +252 58 negative_sampler """basic""" +252 58 evaluator """rankbased""" +252 59 dataset """kinships""" +252 59 model """ermlp""" +252 59 loss """bceaftersigmoid""" +252 59 regularizer """no""" +252 59 optimizer """adam""" +252 59 training_loop """owa""" +252 59 negative_sampler """basic""" +252 59 evaluator """rankbased""" +252 60 dataset """kinships""" +252 60 model """ermlp""" +252 60 loss """bceaftersigmoid""" +252 60 regularizer """no""" +252 60 optimizer """adam""" +252 60 training_loop """owa""" +252 60 negative_sampler """basic""" +252 60 evaluator """rankbased""" +252 61 dataset """kinships""" +252 61 model """ermlp""" +252 61 loss """bceaftersigmoid""" +252 61 regularizer """no""" +252 61 optimizer """adam""" +252 61 training_loop """owa""" +252 61 negative_sampler """basic""" +252 61 evaluator """rankbased""" +252 62 dataset """kinships""" +252 62 model """ermlp""" +252 62 loss """bceaftersigmoid""" +252 62 regularizer """no""" +252 62 optimizer """adam""" +252 62 training_loop """owa""" +252 62 negative_sampler """basic""" +252 62 evaluator """rankbased""" +252 63 dataset """kinships""" +252 63 model """ermlp""" +252 63 loss """bceaftersigmoid""" +252 63 regularizer """no""" +252 63 optimizer """adam""" +252 63 training_loop """owa""" +252 63 negative_sampler """basic""" +252 63 evaluator """rankbased""" +252 64 dataset """kinships""" +252 64 model """ermlp""" +252 64 loss """bceaftersigmoid""" +252 64 regularizer """no""" +252 64 optimizer """adam""" +252 64 training_loop """owa""" +252 64 negative_sampler """basic""" +252 64 evaluator """rankbased""" +252 65 dataset """kinships""" +252 65 model """ermlp""" +252 65 loss """bceaftersigmoid""" +252 65 regularizer """no""" +252 65 optimizer """adam""" +252 65 training_loop """owa""" +252 65 negative_sampler """basic""" +252 65 evaluator """rankbased""" +252 66 dataset """kinships""" +252 66 model """ermlp""" +252 66 loss """bceaftersigmoid""" +252 66 regularizer """no""" +252 66 optimizer """adam""" +252 66 training_loop """owa""" +252 66 negative_sampler """basic""" +252 66 evaluator """rankbased""" +252 67 dataset """kinships""" +252 67 model """ermlp""" +252 67 loss """bceaftersigmoid""" +252 67 regularizer """no""" +252 67 optimizer """adam""" +252 67 training_loop """owa""" +252 67 negative_sampler """basic""" +252 67 evaluator """rankbased""" +252 68 dataset """kinships""" +252 68 model """ermlp""" +252 68 loss """bceaftersigmoid""" +252 68 regularizer """no""" +252 68 optimizer """adam""" +252 68 training_loop """owa""" +252 68 negative_sampler """basic""" +252 68 evaluator """rankbased""" +252 69 dataset """kinships""" +252 69 model """ermlp""" +252 69 loss """bceaftersigmoid""" +252 69 regularizer """no""" +252 69 optimizer """adam""" +252 69 training_loop """owa""" +252 69 negative_sampler """basic""" +252 69 evaluator """rankbased""" +252 70 dataset """kinships""" +252 70 model """ermlp""" +252 70 loss """bceaftersigmoid""" +252 70 regularizer """no""" +252 70 optimizer """adam""" +252 70 training_loop """owa""" +252 70 negative_sampler """basic""" +252 70 evaluator """rankbased""" +252 71 dataset """kinships""" +252 71 model """ermlp""" +252 71 loss """bceaftersigmoid""" +252 71 regularizer """no""" +252 71 optimizer """adam""" +252 71 training_loop """owa""" +252 71 negative_sampler """basic""" +252 71 evaluator """rankbased""" +252 72 dataset """kinships""" +252 72 model """ermlp""" +252 72 loss """bceaftersigmoid""" +252 72 regularizer """no""" +252 72 optimizer """adam""" +252 72 training_loop """owa""" +252 72 negative_sampler """basic""" +252 72 evaluator """rankbased""" +252 73 dataset """kinships""" +252 73 model """ermlp""" +252 73 loss """bceaftersigmoid""" +252 73 regularizer """no""" +252 73 optimizer """adam""" +252 73 training_loop """owa""" +252 73 negative_sampler """basic""" +252 73 evaluator """rankbased""" +252 74 dataset """kinships""" +252 74 model """ermlp""" +252 74 loss """bceaftersigmoid""" +252 74 regularizer """no""" +252 74 optimizer """adam""" +252 74 training_loop """owa""" +252 74 negative_sampler """basic""" +252 74 evaluator """rankbased""" +252 75 dataset """kinships""" +252 75 model """ermlp""" +252 75 loss """bceaftersigmoid""" +252 75 regularizer """no""" +252 75 optimizer """adam""" +252 75 training_loop """owa""" +252 75 negative_sampler """basic""" +252 75 evaluator """rankbased""" +252 76 dataset """kinships""" +252 76 model """ermlp""" +252 76 loss """bceaftersigmoid""" +252 76 regularizer """no""" +252 76 optimizer """adam""" +252 76 training_loop """owa""" +252 76 negative_sampler """basic""" +252 76 evaluator """rankbased""" +252 77 dataset """kinships""" +252 77 model """ermlp""" +252 77 loss """bceaftersigmoid""" +252 77 regularizer """no""" +252 77 optimizer """adam""" +252 77 training_loop """owa""" +252 77 negative_sampler """basic""" +252 77 evaluator """rankbased""" +252 78 dataset """kinships""" +252 78 model """ermlp""" +252 78 loss """bceaftersigmoid""" +252 78 regularizer """no""" +252 78 optimizer """adam""" +252 78 training_loop """owa""" +252 78 negative_sampler """basic""" +252 78 evaluator """rankbased""" +252 79 dataset """kinships""" +252 79 model """ermlp""" +252 79 loss """bceaftersigmoid""" +252 79 regularizer """no""" +252 79 optimizer """adam""" +252 79 training_loop """owa""" +252 79 negative_sampler """basic""" +252 79 evaluator """rankbased""" +252 80 dataset """kinships""" +252 80 model """ermlp""" +252 80 loss """bceaftersigmoid""" +252 80 regularizer """no""" +252 80 optimizer """adam""" +252 80 training_loop """owa""" +252 80 negative_sampler """basic""" +252 80 evaluator """rankbased""" +252 81 dataset """kinships""" +252 81 model """ermlp""" +252 81 loss """bceaftersigmoid""" +252 81 regularizer """no""" +252 81 optimizer """adam""" +252 81 training_loop """owa""" +252 81 negative_sampler """basic""" +252 81 evaluator """rankbased""" +252 82 dataset """kinships""" +252 82 model """ermlp""" +252 82 loss """bceaftersigmoid""" +252 82 regularizer """no""" +252 82 optimizer """adam""" +252 82 training_loop """owa""" +252 82 negative_sampler """basic""" +252 82 evaluator """rankbased""" +252 83 dataset """kinships""" +252 83 model """ermlp""" +252 83 loss """bceaftersigmoid""" +252 83 regularizer """no""" +252 83 optimizer """adam""" +252 83 training_loop """owa""" +252 83 negative_sampler """basic""" +252 83 evaluator """rankbased""" +252 84 dataset """kinships""" +252 84 model """ermlp""" +252 84 loss """bceaftersigmoid""" +252 84 regularizer """no""" +252 84 optimizer """adam""" +252 84 training_loop """owa""" +252 84 negative_sampler """basic""" +252 84 evaluator """rankbased""" +252 85 dataset """kinships""" +252 85 model """ermlp""" +252 85 loss """bceaftersigmoid""" +252 85 regularizer """no""" +252 85 optimizer """adam""" +252 85 training_loop """owa""" +252 85 negative_sampler """basic""" +252 85 evaluator """rankbased""" +252 86 dataset """kinships""" +252 86 model """ermlp""" +252 86 loss """bceaftersigmoid""" +252 86 regularizer """no""" +252 86 optimizer """adam""" +252 86 training_loop """owa""" +252 86 negative_sampler """basic""" +252 86 evaluator """rankbased""" +252 87 dataset """kinships""" +252 87 model """ermlp""" +252 87 loss """bceaftersigmoid""" +252 87 regularizer """no""" +252 87 optimizer """adam""" +252 87 training_loop """owa""" +252 87 negative_sampler """basic""" +252 87 evaluator """rankbased""" +252 88 dataset """kinships""" +252 88 model """ermlp""" +252 88 loss """bceaftersigmoid""" +252 88 regularizer """no""" +252 88 optimizer """adam""" +252 88 training_loop """owa""" +252 88 negative_sampler """basic""" +252 88 evaluator """rankbased""" +252 89 dataset """kinships""" +252 89 model """ermlp""" +252 89 loss """bceaftersigmoid""" +252 89 regularizer """no""" +252 89 optimizer """adam""" +252 89 training_loop """owa""" +252 89 negative_sampler """basic""" +252 89 evaluator """rankbased""" +252 90 dataset """kinships""" +252 90 model """ermlp""" +252 90 loss """bceaftersigmoid""" +252 90 regularizer """no""" +252 90 optimizer """adam""" +252 90 training_loop """owa""" +252 90 negative_sampler """basic""" +252 90 evaluator """rankbased""" +252 91 dataset """kinships""" +252 91 model """ermlp""" +252 91 loss """bceaftersigmoid""" +252 91 regularizer """no""" +252 91 optimizer """adam""" +252 91 training_loop """owa""" +252 91 negative_sampler """basic""" +252 91 evaluator """rankbased""" +252 92 dataset """kinships""" +252 92 model """ermlp""" +252 92 loss """bceaftersigmoid""" +252 92 regularizer """no""" +252 92 optimizer """adam""" +252 92 training_loop """owa""" +252 92 negative_sampler """basic""" +252 92 evaluator """rankbased""" +252 93 dataset """kinships""" +252 93 model """ermlp""" +252 93 loss """bceaftersigmoid""" +252 93 regularizer """no""" +252 93 optimizer """adam""" +252 93 training_loop """owa""" +252 93 negative_sampler """basic""" +252 93 evaluator """rankbased""" +252 94 dataset """kinships""" +252 94 model """ermlp""" +252 94 loss """bceaftersigmoid""" +252 94 regularizer """no""" +252 94 optimizer """adam""" +252 94 training_loop """owa""" +252 94 negative_sampler """basic""" +252 94 evaluator """rankbased""" +252 95 dataset """kinships""" +252 95 model """ermlp""" +252 95 loss """bceaftersigmoid""" +252 95 regularizer """no""" +252 95 optimizer """adam""" +252 95 training_loop """owa""" +252 95 negative_sampler """basic""" +252 95 evaluator """rankbased""" +252 96 dataset """kinships""" +252 96 model """ermlp""" +252 96 loss """bceaftersigmoid""" +252 96 regularizer """no""" +252 96 optimizer """adam""" +252 96 training_loop """owa""" +252 96 negative_sampler """basic""" +252 96 evaluator """rankbased""" +252 97 dataset """kinships""" +252 97 model """ermlp""" +252 97 loss """bceaftersigmoid""" +252 97 regularizer """no""" +252 97 optimizer """adam""" +252 97 training_loop """owa""" +252 97 negative_sampler """basic""" +252 97 evaluator """rankbased""" +252 98 dataset """kinships""" +252 98 model """ermlp""" +252 98 loss """bceaftersigmoid""" +252 98 regularizer """no""" +252 98 optimizer """adam""" +252 98 training_loop """owa""" +252 98 negative_sampler """basic""" +252 98 evaluator """rankbased""" +252 99 dataset """kinships""" +252 99 model """ermlp""" +252 99 loss """bceaftersigmoid""" +252 99 regularizer """no""" +252 99 optimizer """adam""" +252 99 training_loop """owa""" +252 99 negative_sampler """basic""" +252 99 evaluator """rankbased""" +252 100 dataset """kinships""" +252 100 model """ermlp""" +252 100 loss """bceaftersigmoid""" +252 100 regularizer """no""" +252 100 optimizer """adam""" +252 100 training_loop """owa""" +252 100 negative_sampler """basic""" +252 100 evaluator """rankbased""" +253 1 model.embedding_dim 1.0 +253 1 optimizer.lr 0.015775198815224295 +253 1 negative_sampler.num_negs_per_pos 13.0 +253 1 training.batch_size 2.0 +253 2 model.embedding_dim 2.0 +253 2 optimizer.lr 0.0015422076598226055 +253 2 negative_sampler.num_negs_per_pos 69.0 +253 2 training.batch_size 0.0 +253 3 model.embedding_dim 2.0 +253 3 optimizer.lr 0.018842275344282396 +253 3 negative_sampler.num_negs_per_pos 95.0 +253 3 training.batch_size 2.0 +253 4 model.embedding_dim 2.0 +253 4 optimizer.lr 0.020461664377871138 +253 4 negative_sampler.num_negs_per_pos 45.0 +253 4 training.batch_size 1.0 +253 5 model.embedding_dim 0.0 +253 5 optimizer.lr 0.05768254444620886 +253 5 negative_sampler.num_negs_per_pos 43.0 +253 5 training.batch_size 1.0 +253 6 model.embedding_dim 2.0 +253 6 optimizer.lr 0.06706922099220484 +253 6 negative_sampler.num_negs_per_pos 84.0 +253 6 training.batch_size 1.0 +253 7 model.embedding_dim 0.0 +253 7 optimizer.lr 0.0045075625215739035 +253 7 negative_sampler.num_negs_per_pos 32.0 +253 7 training.batch_size 1.0 +253 8 model.embedding_dim 0.0 +253 8 optimizer.lr 0.02741648806771924 +253 8 negative_sampler.num_negs_per_pos 49.0 +253 8 training.batch_size 2.0 +253 9 model.embedding_dim 2.0 +253 9 optimizer.lr 0.001981538458249399 +253 9 negative_sampler.num_negs_per_pos 3.0 +253 9 training.batch_size 0.0 +253 10 model.embedding_dim 2.0 +253 10 optimizer.lr 0.06044149514160652 +253 10 negative_sampler.num_negs_per_pos 70.0 +253 10 training.batch_size 0.0 +253 11 model.embedding_dim 0.0 +253 11 optimizer.lr 0.056231693796734934 +253 11 negative_sampler.num_negs_per_pos 28.0 +253 11 training.batch_size 0.0 +253 12 model.embedding_dim 0.0 +253 12 optimizer.lr 0.024313824428084756 +253 12 negative_sampler.num_negs_per_pos 76.0 +253 12 training.batch_size 2.0 +253 13 model.embedding_dim 1.0 +253 13 optimizer.lr 0.0012774163633417468 +253 13 negative_sampler.num_negs_per_pos 80.0 +253 13 training.batch_size 1.0 +253 14 model.embedding_dim 2.0 +253 14 optimizer.lr 0.0010607292348550409 +253 14 negative_sampler.num_negs_per_pos 44.0 +253 14 training.batch_size 2.0 +253 15 model.embedding_dim 0.0 +253 15 optimizer.lr 0.05643168262618337 +253 15 negative_sampler.num_negs_per_pos 0.0 +253 15 training.batch_size 1.0 +253 16 model.embedding_dim 1.0 +253 16 optimizer.lr 0.06730549970506297 +253 16 negative_sampler.num_negs_per_pos 98.0 +253 16 training.batch_size 2.0 +253 17 model.embedding_dim 0.0 +253 17 optimizer.lr 0.005852667449650973 +253 17 negative_sampler.num_negs_per_pos 19.0 +253 17 training.batch_size 2.0 +253 18 model.embedding_dim 2.0 +253 18 optimizer.lr 0.052618409396905465 +253 18 negative_sampler.num_negs_per_pos 12.0 +253 18 training.batch_size 2.0 +253 19 model.embedding_dim 2.0 +253 19 optimizer.lr 0.014830007189217497 +253 19 negative_sampler.num_negs_per_pos 3.0 +253 19 training.batch_size 2.0 +253 20 model.embedding_dim 2.0 +253 20 optimizer.lr 0.023619259831559873 +253 20 negative_sampler.num_negs_per_pos 52.0 +253 20 training.batch_size 2.0 +253 21 model.embedding_dim 2.0 +253 21 optimizer.lr 0.008535346725146511 +253 21 negative_sampler.num_negs_per_pos 34.0 +253 21 training.batch_size 2.0 +253 22 model.embedding_dim 1.0 +253 22 optimizer.lr 0.004829238176318468 +253 22 negative_sampler.num_negs_per_pos 43.0 +253 22 training.batch_size 0.0 +253 23 model.embedding_dim 0.0 +253 23 optimizer.lr 0.022414809227631366 +253 23 negative_sampler.num_negs_per_pos 67.0 +253 23 training.batch_size 0.0 +253 24 model.embedding_dim 1.0 +253 24 optimizer.lr 0.0048530331239335246 +253 24 negative_sampler.num_negs_per_pos 84.0 +253 24 training.batch_size 2.0 +253 25 model.embedding_dim 2.0 +253 25 optimizer.lr 0.0021960804125508953 +253 25 negative_sampler.num_negs_per_pos 35.0 +253 25 training.batch_size 1.0 +253 26 model.embedding_dim 1.0 +253 26 optimizer.lr 0.04506023487368293 +253 26 negative_sampler.num_negs_per_pos 50.0 +253 26 training.batch_size 0.0 +253 27 model.embedding_dim 0.0 +253 27 optimizer.lr 0.02879863476317729 +253 27 negative_sampler.num_negs_per_pos 90.0 +253 27 training.batch_size 0.0 +253 28 model.embedding_dim 2.0 +253 28 optimizer.lr 0.07358177384645577 +253 28 negative_sampler.num_negs_per_pos 1.0 +253 28 training.batch_size 0.0 +253 29 model.embedding_dim 1.0 +253 29 optimizer.lr 0.03640310405000173 +253 29 negative_sampler.num_negs_per_pos 99.0 +253 29 training.batch_size 0.0 +253 30 model.embedding_dim 2.0 +253 30 optimizer.lr 0.031005982678533498 +253 30 negative_sampler.num_negs_per_pos 14.0 +253 30 training.batch_size 0.0 +253 31 model.embedding_dim 0.0 +253 31 optimizer.lr 0.007843858380954595 +253 31 negative_sampler.num_negs_per_pos 39.0 +253 31 training.batch_size 2.0 +253 32 model.embedding_dim 0.0 +253 32 optimizer.lr 0.00144425876478816 +253 32 negative_sampler.num_negs_per_pos 3.0 +253 32 training.batch_size 2.0 +253 33 model.embedding_dim 2.0 +253 33 optimizer.lr 0.04208165301843336 +253 33 negative_sampler.num_negs_per_pos 71.0 +253 33 training.batch_size 2.0 +253 34 model.embedding_dim 2.0 +253 34 optimizer.lr 0.015646446241040987 +253 34 negative_sampler.num_negs_per_pos 22.0 +253 34 training.batch_size 0.0 +253 35 model.embedding_dim 0.0 +253 35 optimizer.lr 0.004189849307552043 +253 35 negative_sampler.num_negs_per_pos 21.0 +253 35 training.batch_size 2.0 +253 36 model.embedding_dim 0.0 +253 36 optimizer.lr 0.010852766262736019 +253 36 negative_sampler.num_negs_per_pos 38.0 +253 36 training.batch_size 0.0 +253 37 model.embedding_dim 0.0 +253 37 optimizer.lr 0.03111021854011443 +253 37 negative_sampler.num_negs_per_pos 96.0 +253 37 training.batch_size 1.0 +253 38 model.embedding_dim 1.0 +253 38 optimizer.lr 0.005152725387459209 +253 38 negative_sampler.num_negs_per_pos 5.0 +253 38 training.batch_size 1.0 +253 39 model.embedding_dim 1.0 +253 39 optimizer.lr 0.0018468929458521862 +253 39 negative_sampler.num_negs_per_pos 18.0 +253 39 training.batch_size 0.0 +253 40 model.embedding_dim 1.0 +253 40 optimizer.lr 0.06703261012436247 +253 40 negative_sampler.num_negs_per_pos 63.0 +253 40 training.batch_size 0.0 +253 41 model.embedding_dim 2.0 +253 41 optimizer.lr 0.002140718676297739 +253 41 negative_sampler.num_negs_per_pos 85.0 +253 41 training.batch_size 1.0 +253 42 model.embedding_dim 1.0 +253 42 optimizer.lr 0.001898864082813312 +253 42 negative_sampler.num_negs_per_pos 13.0 +253 42 training.batch_size 0.0 +253 43 model.embedding_dim 0.0 +253 43 optimizer.lr 0.032040747995229726 +253 43 negative_sampler.num_negs_per_pos 51.0 +253 43 training.batch_size 2.0 +253 44 model.embedding_dim 2.0 +253 44 optimizer.lr 0.013048701035598725 +253 44 negative_sampler.num_negs_per_pos 79.0 +253 44 training.batch_size 1.0 +253 45 model.embedding_dim 0.0 +253 45 optimizer.lr 0.030116047994667817 +253 45 negative_sampler.num_negs_per_pos 51.0 +253 45 training.batch_size 1.0 +253 46 model.embedding_dim 1.0 +253 46 optimizer.lr 0.0020140313068942603 +253 46 negative_sampler.num_negs_per_pos 76.0 +253 46 training.batch_size 2.0 +253 47 model.embedding_dim 0.0 +253 47 optimizer.lr 0.003324746674074648 +253 47 negative_sampler.num_negs_per_pos 22.0 +253 47 training.batch_size 2.0 +253 48 model.embedding_dim 0.0 +253 48 optimizer.lr 0.08891388880257498 +253 48 negative_sampler.num_negs_per_pos 53.0 +253 48 training.batch_size 2.0 +253 49 model.embedding_dim 1.0 +253 49 optimizer.lr 0.05015088250367176 +253 49 negative_sampler.num_negs_per_pos 35.0 +253 49 training.batch_size 2.0 +253 50 model.embedding_dim 0.0 +253 50 optimizer.lr 0.05339288744526199 +253 50 negative_sampler.num_negs_per_pos 98.0 +253 50 training.batch_size 1.0 +253 51 model.embedding_dim 0.0 +253 51 optimizer.lr 0.01143246310828453 +253 51 negative_sampler.num_negs_per_pos 42.0 +253 51 training.batch_size 0.0 +253 52 model.embedding_dim 1.0 +253 52 optimizer.lr 0.06082267035375625 +253 52 negative_sampler.num_negs_per_pos 99.0 +253 52 training.batch_size 0.0 +253 53 model.embedding_dim 0.0 +253 53 optimizer.lr 0.01770349607939968 +253 53 negative_sampler.num_negs_per_pos 11.0 +253 53 training.batch_size 0.0 +253 54 model.embedding_dim 2.0 +253 54 optimizer.lr 0.004890262219990566 +253 54 negative_sampler.num_negs_per_pos 19.0 +253 54 training.batch_size 1.0 +253 55 model.embedding_dim 0.0 +253 55 optimizer.lr 0.002025585894640638 +253 55 negative_sampler.num_negs_per_pos 78.0 +253 55 training.batch_size 2.0 +253 56 model.embedding_dim 1.0 +253 56 optimizer.lr 0.0031574707395245656 +253 56 negative_sampler.num_negs_per_pos 19.0 +253 56 training.batch_size 2.0 +253 57 model.embedding_dim 2.0 +253 57 optimizer.lr 0.08811153701754723 +253 57 negative_sampler.num_negs_per_pos 56.0 +253 57 training.batch_size 2.0 +253 58 model.embedding_dim 0.0 +253 58 optimizer.lr 0.08908241028839077 +253 58 negative_sampler.num_negs_per_pos 28.0 +253 58 training.batch_size 1.0 +253 59 model.embedding_dim 2.0 +253 59 optimizer.lr 0.006306834506183488 +253 59 negative_sampler.num_negs_per_pos 16.0 +253 59 training.batch_size 0.0 +253 60 model.embedding_dim 0.0 +253 60 optimizer.lr 0.001026369554390423 +253 60 negative_sampler.num_negs_per_pos 92.0 +253 60 training.batch_size 2.0 +253 61 model.embedding_dim 0.0 +253 61 optimizer.lr 0.0023143722888070827 +253 61 negative_sampler.num_negs_per_pos 65.0 +253 61 training.batch_size 1.0 +253 62 model.embedding_dim 2.0 +253 62 optimizer.lr 0.004611435604985179 +253 62 negative_sampler.num_negs_per_pos 64.0 +253 62 training.batch_size 2.0 +253 63 model.embedding_dim 2.0 +253 63 optimizer.lr 0.001562139657480617 +253 63 negative_sampler.num_negs_per_pos 50.0 +253 63 training.batch_size 2.0 +253 64 model.embedding_dim 2.0 +253 64 optimizer.lr 0.0011515369396787342 +253 64 negative_sampler.num_negs_per_pos 32.0 +253 64 training.batch_size 1.0 +253 65 model.embedding_dim 2.0 +253 65 optimizer.lr 0.011343664697043785 +253 65 negative_sampler.num_negs_per_pos 7.0 +253 65 training.batch_size 1.0 +253 66 model.embedding_dim 0.0 +253 66 optimizer.lr 0.024616318481084126 +253 66 negative_sampler.num_negs_per_pos 77.0 +253 66 training.batch_size 1.0 +253 67 model.embedding_dim 0.0 +253 67 optimizer.lr 0.004061418347060604 +253 67 negative_sampler.num_negs_per_pos 19.0 +253 67 training.batch_size 1.0 +253 68 model.embedding_dim 2.0 +253 68 optimizer.lr 0.03692205421809162 +253 68 negative_sampler.num_negs_per_pos 27.0 +253 68 training.batch_size 0.0 +253 69 model.embedding_dim 0.0 +253 69 optimizer.lr 0.004415862833318468 +253 69 negative_sampler.num_negs_per_pos 30.0 +253 69 training.batch_size 0.0 +253 70 model.embedding_dim 1.0 +253 70 optimizer.lr 0.017922778263151726 +253 70 negative_sampler.num_negs_per_pos 28.0 +253 70 training.batch_size 0.0 +253 71 model.embedding_dim 1.0 +253 71 optimizer.lr 0.004845184014735192 +253 71 negative_sampler.num_negs_per_pos 47.0 +253 71 training.batch_size 1.0 +253 72 model.embedding_dim 0.0 +253 72 optimizer.lr 0.003474771356480419 +253 72 negative_sampler.num_negs_per_pos 17.0 +253 72 training.batch_size 2.0 +253 73 model.embedding_dim 0.0 +253 73 optimizer.lr 0.0018028302964605987 +253 73 negative_sampler.num_negs_per_pos 78.0 +253 73 training.batch_size 2.0 +253 74 model.embedding_dim 2.0 +253 74 optimizer.lr 0.001500247015041506 +253 74 negative_sampler.num_negs_per_pos 59.0 +253 74 training.batch_size 0.0 +253 75 model.embedding_dim 1.0 +253 75 optimizer.lr 0.004310973539757016 +253 75 negative_sampler.num_negs_per_pos 66.0 +253 75 training.batch_size 0.0 +253 76 model.embedding_dim 1.0 +253 76 optimizer.lr 0.033891263423997814 +253 76 negative_sampler.num_negs_per_pos 21.0 +253 76 training.batch_size 2.0 +253 77 model.embedding_dim 2.0 +253 77 optimizer.lr 0.09580921188503067 +253 77 negative_sampler.num_negs_per_pos 94.0 +253 77 training.batch_size 0.0 +253 78 model.embedding_dim 1.0 +253 78 optimizer.lr 0.0014501171776911466 +253 78 negative_sampler.num_negs_per_pos 0.0 +253 78 training.batch_size 2.0 +253 79 model.embedding_dim 0.0 +253 79 optimizer.lr 0.0018596374511560304 +253 79 negative_sampler.num_negs_per_pos 4.0 +253 79 training.batch_size 1.0 +253 80 model.embedding_dim 0.0 +253 80 optimizer.lr 0.010120357642236703 +253 80 negative_sampler.num_negs_per_pos 38.0 +253 80 training.batch_size 1.0 +253 81 model.embedding_dim 2.0 +253 81 optimizer.lr 0.014047025219954726 +253 81 negative_sampler.num_negs_per_pos 3.0 +253 81 training.batch_size 0.0 +253 82 model.embedding_dim 1.0 +253 82 optimizer.lr 0.0010901988884359764 +253 82 negative_sampler.num_negs_per_pos 20.0 +253 82 training.batch_size 0.0 +253 83 model.embedding_dim 2.0 +253 83 optimizer.lr 0.07553597387899252 +253 83 negative_sampler.num_negs_per_pos 83.0 +253 83 training.batch_size 2.0 +253 84 model.embedding_dim 2.0 +253 84 optimizer.lr 0.0014355084188380591 +253 84 negative_sampler.num_negs_per_pos 24.0 +253 84 training.batch_size 1.0 +253 85 model.embedding_dim 0.0 +253 85 optimizer.lr 0.05661077056279952 +253 85 negative_sampler.num_negs_per_pos 28.0 +253 85 training.batch_size 2.0 +253 86 model.embedding_dim 0.0 +253 86 optimizer.lr 0.01291478128207003 +253 86 negative_sampler.num_negs_per_pos 91.0 +253 86 training.batch_size 2.0 +253 87 model.embedding_dim 1.0 +253 87 optimizer.lr 0.016044939919859766 +253 87 negative_sampler.num_negs_per_pos 56.0 +253 87 training.batch_size 2.0 +253 88 model.embedding_dim 2.0 +253 88 optimizer.lr 0.0589111488770481 +253 88 negative_sampler.num_negs_per_pos 9.0 +253 88 training.batch_size 2.0 +253 89 model.embedding_dim 0.0 +253 89 optimizer.lr 0.002524848176052281 +253 89 negative_sampler.num_negs_per_pos 77.0 +253 89 training.batch_size 2.0 +253 90 model.embedding_dim 1.0 +253 90 optimizer.lr 0.07740814699793448 +253 90 negative_sampler.num_negs_per_pos 97.0 +253 90 training.batch_size 1.0 +253 91 model.embedding_dim 0.0 +253 91 optimizer.lr 0.0476483187737527 +253 91 negative_sampler.num_negs_per_pos 64.0 +253 91 training.batch_size 2.0 +253 92 model.embedding_dim 1.0 +253 92 optimizer.lr 0.0065480870474961105 +253 92 negative_sampler.num_negs_per_pos 51.0 +253 92 training.batch_size 1.0 +253 93 model.embedding_dim 2.0 +253 93 optimizer.lr 0.003279881227372069 +253 93 negative_sampler.num_negs_per_pos 89.0 +253 93 training.batch_size 1.0 +253 94 model.embedding_dim 2.0 +253 94 optimizer.lr 0.047642529304820845 +253 94 negative_sampler.num_negs_per_pos 80.0 +253 94 training.batch_size 2.0 +253 95 model.embedding_dim 1.0 +253 95 optimizer.lr 0.011877320672811833 +253 95 negative_sampler.num_negs_per_pos 49.0 +253 95 training.batch_size 1.0 +253 96 model.embedding_dim 1.0 +253 96 optimizer.lr 0.0047793213275369485 +253 96 negative_sampler.num_negs_per_pos 12.0 +253 96 training.batch_size 1.0 +253 97 model.embedding_dim 2.0 +253 97 optimizer.lr 0.0029298784187471853 +253 97 negative_sampler.num_negs_per_pos 26.0 +253 97 training.batch_size 1.0 +253 98 model.embedding_dim 1.0 +253 98 optimizer.lr 0.0016690906586960621 +253 98 negative_sampler.num_negs_per_pos 58.0 +253 98 training.batch_size 1.0 +253 99 model.embedding_dim 1.0 +253 99 optimizer.lr 0.009451026630938655 +253 99 negative_sampler.num_negs_per_pos 4.0 +253 99 training.batch_size 2.0 +253 100 model.embedding_dim 0.0 +253 100 optimizer.lr 0.020883314079754647 +253 100 negative_sampler.num_negs_per_pos 53.0 +253 100 training.batch_size 1.0 +253 1 dataset """kinships""" +253 1 model """ermlp""" +253 1 loss """softplus""" +253 1 regularizer """no""" +253 1 optimizer """adam""" +253 1 training_loop """owa""" +253 1 negative_sampler """basic""" +253 1 evaluator """rankbased""" +253 2 dataset """kinships""" +253 2 model """ermlp""" +253 2 loss """softplus""" +253 2 regularizer """no""" +253 2 optimizer """adam""" +253 2 training_loop """owa""" +253 2 negative_sampler """basic""" +253 2 evaluator """rankbased""" +253 3 dataset """kinships""" +253 3 model """ermlp""" +253 3 loss """softplus""" +253 3 regularizer """no""" +253 3 optimizer """adam""" +253 3 training_loop """owa""" +253 3 negative_sampler """basic""" +253 3 evaluator """rankbased""" +253 4 dataset """kinships""" +253 4 model """ermlp""" +253 4 loss """softplus""" +253 4 regularizer """no""" +253 4 optimizer """adam""" +253 4 training_loop """owa""" +253 4 negative_sampler """basic""" +253 4 evaluator """rankbased""" +253 5 dataset """kinships""" +253 5 model """ermlp""" +253 5 loss """softplus""" +253 5 regularizer """no""" +253 5 optimizer """adam""" +253 5 training_loop """owa""" +253 5 negative_sampler """basic""" +253 5 evaluator """rankbased""" +253 6 dataset """kinships""" +253 6 model """ermlp""" +253 6 loss """softplus""" +253 6 regularizer """no""" +253 6 optimizer """adam""" +253 6 training_loop """owa""" +253 6 negative_sampler """basic""" +253 6 evaluator """rankbased""" +253 7 dataset """kinships""" +253 7 model """ermlp""" +253 7 loss """softplus""" +253 7 regularizer """no""" +253 7 optimizer """adam""" +253 7 training_loop """owa""" +253 7 negative_sampler """basic""" +253 7 evaluator """rankbased""" +253 8 dataset """kinships""" +253 8 model """ermlp""" +253 8 loss """softplus""" +253 8 regularizer """no""" +253 8 optimizer """adam""" +253 8 training_loop """owa""" +253 8 negative_sampler """basic""" +253 8 evaluator """rankbased""" +253 9 dataset """kinships""" +253 9 model """ermlp""" +253 9 loss """softplus""" +253 9 regularizer """no""" +253 9 optimizer """adam""" +253 9 training_loop """owa""" +253 9 negative_sampler """basic""" +253 9 evaluator """rankbased""" +253 10 dataset """kinships""" +253 10 model """ermlp""" +253 10 loss """softplus""" +253 10 regularizer """no""" +253 10 optimizer """adam""" +253 10 training_loop """owa""" +253 10 negative_sampler """basic""" +253 10 evaluator """rankbased""" +253 11 dataset """kinships""" +253 11 model """ermlp""" +253 11 loss """softplus""" +253 11 regularizer """no""" +253 11 optimizer """adam""" +253 11 training_loop """owa""" +253 11 negative_sampler """basic""" +253 11 evaluator """rankbased""" +253 12 dataset """kinships""" +253 12 model """ermlp""" +253 12 loss """softplus""" +253 12 regularizer """no""" +253 12 optimizer """adam""" +253 12 training_loop """owa""" +253 12 negative_sampler """basic""" +253 12 evaluator """rankbased""" +253 13 dataset """kinships""" +253 13 model """ermlp""" +253 13 loss """softplus""" +253 13 regularizer """no""" +253 13 optimizer """adam""" +253 13 training_loop """owa""" +253 13 negative_sampler """basic""" +253 13 evaluator """rankbased""" +253 14 dataset """kinships""" +253 14 model """ermlp""" +253 14 loss """softplus""" +253 14 regularizer """no""" +253 14 optimizer """adam""" +253 14 training_loop """owa""" +253 14 negative_sampler """basic""" +253 14 evaluator """rankbased""" +253 15 dataset """kinships""" +253 15 model """ermlp""" +253 15 loss """softplus""" +253 15 regularizer """no""" +253 15 optimizer """adam""" +253 15 training_loop """owa""" +253 15 negative_sampler """basic""" +253 15 evaluator """rankbased""" +253 16 dataset """kinships""" +253 16 model """ermlp""" +253 16 loss """softplus""" +253 16 regularizer """no""" +253 16 optimizer """adam""" +253 16 training_loop """owa""" +253 16 negative_sampler """basic""" +253 16 evaluator """rankbased""" +253 17 dataset """kinships""" +253 17 model """ermlp""" +253 17 loss """softplus""" +253 17 regularizer """no""" +253 17 optimizer """adam""" +253 17 training_loop """owa""" +253 17 negative_sampler """basic""" +253 17 evaluator """rankbased""" +253 18 dataset """kinships""" +253 18 model """ermlp""" +253 18 loss """softplus""" +253 18 regularizer """no""" +253 18 optimizer """adam""" +253 18 training_loop """owa""" +253 18 negative_sampler """basic""" +253 18 evaluator """rankbased""" +253 19 dataset """kinships""" +253 19 model """ermlp""" +253 19 loss """softplus""" +253 19 regularizer """no""" +253 19 optimizer """adam""" +253 19 training_loop """owa""" +253 19 negative_sampler """basic""" +253 19 evaluator """rankbased""" +253 20 dataset """kinships""" +253 20 model """ermlp""" +253 20 loss """softplus""" +253 20 regularizer """no""" +253 20 optimizer """adam""" +253 20 training_loop """owa""" +253 20 negative_sampler """basic""" +253 20 evaluator """rankbased""" +253 21 dataset """kinships""" +253 21 model """ermlp""" +253 21 loss """softplus""" +253 21 regularizer """no""" +253 21 optimizer """adam""" +253 21 training_loop """owa""" +253 21 negative_sampler """basic""" +253 21 evaluator """rankbased""" +253 22 dataset """kinships""" +253 22 model """ermlp""" +253 22 loss """softplus""" +253 22 regularizer """no""" +253 22 optimizer """adam""" +253 22 training_loop """owa""" +253 22 negative_sampler """basic""" +253 22 evaluator """rankbased""" +253 23 dataset """kinships""" +253 23 model """ermlp""" +253 23 loss """softplus""" +253 23 regularizer """no""" +253 23 optimizer """adam""" +253 23 training_loop """owa""" +253 23 negative_sampler """basic""" +253 23 evaluator """rankbased""" +253 24 dataset """kinships""" +253 24 model """ermlp""" +253 24 loss """softplus""" +253 24 regularizer """no""" +253 24 optimizer """adam""" +253 24 training_loop """owa""" +253 24 negative_sampler """basic""" +253 24 evaluator """rankbased""" +253 25 dataset """kinships""" +253 25 model """ermlp""" +253 25 loss """softplus""" +253 25 regularizer """no""" +253 25 optimizer """adam""" +253 25 training_loop """owa""" +253 25 negative_sampler """basic""" +253 25 evaluator """rankbased""" +253 26 dataset """kinships""" +253 26 model """ermlp""" +253 26 loss """softplus""" +253 26 regularizer """no""" +253 26 optimizer """adam""" +253 26 training_loop """owa""" +253 26 negative_sampler """basic""" +253 26 evaluator """rankbased""" +253 27 dataset """kinships""" +253 27 model """ermlp""" +253 27 loss """softplus""" +253 27 regularizer """no""" +253 27 optimizer """adam""" +253 27 training_loop """owa""" +253 27 negative_sampler """basic""" +253 27 evaluator """rankbased""" +253 28 dataset """kinships""" +253 28 model """ermlp""" +253 28 loss """softplus""" +253 28 regularizer """no""" +253 28 optimizer """adam""" +253 28 training_loop """owa""" +253 28 negative_sampler """basic""" +253 28 evaluator """rankbased""" +253 29 dataset """kinships""" +253 29 model """ermlp""" +253 29 loss """softplus""" +253 29 regularizer """no""" +253 29 optimizer """adam""" +253 29 training_loop """owa""" +253 29 negative_sampler """basic""" +253 29 evaluator """rankbased""" +253 30 dataset """kinships""" +253 30 model """ermlp""" +253 30 loss """softplus""" +253 30 regularizer """no""" +253 30 optimizer """adam""" +253 30 training_loop """owa""" +253 30 negative_sampler """basic""" +253 30 evaluator """rankbased""" +253 31 dataset """kinships""" +253 31 model """ermlp""" +253 31 loss """softplus""" +253 31 regularizer """no""" +253 31 optimizer """adam""" +253 31 training_loop """owa""" +253 31 negative_sampler """basic""" +253 31 evaluator """rankbased""" +253 32 dataset """kinships""" +253 32 model """ermlp""" +253 32 loss """softplus""" +253 32 regularizer """no""" +253 32 optimizer """adam""" +253 32 training_loop """owa""" +253 32 negative_sampler """basic""" +253 32 evaluator """rankbased""" +253 33 dataset """kinships""" +253 33 model """ermlp""" +253 33 loss """softplus""" +253 33 regularizer """no""" +253 33 optimizer """adam""" +253 33 training_loop """owa""" +253 33 negative_sampler """basic""" +253 33 evaluator """rankbased""" +253 34 dataset """kinships""" +253 34 model """ermlp""" +253 34 loss """softplus""" +253 34 regularizer """no""" +253 34 optimizer """adam""" +253 34 training_loop """owa""" +253 34 negative_sampler """basic""" +253 34 evaluator """rankbased""" +253 35 dataset """kinships""" +253 35 model """ermlp""" +253 35 loss """softplus""" +253 35 regularizer """no""" +253 35 optimizer """adam""" +253 35 training_loop """owa""" +253 35 negative_sampler """basic""" +253 35 evaluator """rankbased""" +253 36 dataset """kinships""" +253 36 model """ermlp""" +253 36 loss """softplus""" +253 36 regularizer """no""" +253 36 optimizer """adam""" +253 36 training_loop """owa""" +253 36 negative_sampler """basic""" +253 36 evaluator """rankbased""" +253 37 dataset """kinships""" +253 37 model """ermlp""" +253 37 loss """softplus""" +253 37 regularizer """no""" +253 37 optimizer """adam""" +253 37 training_loop """owa""" +253 37 negative_sampler """basic""" +253 37 evaluator """rankbased""" +253 38 dataset """kinships""" +253 38 model """ermlp""" +253 38 loss """softplus""" +253 38 regularizer """no""" +253 38 optimizer """adam""" +253 38 training_loop """owa""" +253 38 negative_sampler """basic""" +253 38 evaluator """rankbased""" +253 39 dataset """kinships""" +253 39 model """ermlp""" +253 39 loss """softplus""" +253 39 regularizer """no""" +253 39 optimizer """adam""" +253 39 training_loop """owa""" +253 39 negative_sampler """basic""" +253 39 evaluator """rankbased""" +253 40 dataset """kinships""" +253 40 model """ermlp""" +253 40 loss """softplus""" +253 40 regularizer """no""" +253 40 optimizer """adam""" +253 40 training_loop """owa""" +253 40 negative_sampler """basic""" +253 40 evaluator """rankbased""" +253 41 dataset """kinships""" +253 41 model """ermlp""" +253 41 loss """softplus""" +253 41 regularizer """no""" +253 41 optimizer """adam""" +253 41 training_loop """owa""" +253 41 negative_sampler """basic""" +253 41 evaluator """rankbased""" +253 42 dataset """kinships""" +253 42 model """ermlp""" +253 42 loss """softplus""" +253 42 regularizer """no""" +253 42 optimizer """adam""" +253 42 training_loop """owa""" +253 42 negative_sampler """basic""" +253 42 evaluator """rankbased""" +253 43 dataset """kinships""" +253 43 model """ermlp""" +253 43 loss """softplus""" +253 43 regularizer """no""" +253 43 optimizer """adam""" +253 43 training_loop """owa""" +253 43 negative_sampler """basic""" +253 43 evaluator """rankbased""" +253 44 dataset """kinships""" +253 44 model """ermlp""" +253 44 loss """softplus""" +253 44 regularizer """no""" +253 44 optimizer """adam""" +253 44 training_loop """owa""" +253 44 negative_sampler """basic""" +253 44 evaluator """rankbased""" +253 45 dataset """kinships""" +253 45 model """ermlp""" +253 45 loss """softplus""" +253 45 regularizer """no""" +253 45 optimizer """adam""" +253 45 training_loop """owa""" +253 45 negative_sampler """basic""" +253 45 evaluator """rankbased""" +253 46 dataset """kinships""" +253 46 model """ermlp""" +253 46 loss """softplus""" +253 46 regularizer """no""" +253 46 optimizer """adam""" +253 46 training_loop """owa""" +253 46 negative_sampler """basic""" +253 46 evaluator """rankbased""" +253 47 dataset """kinships""" +253 47 model """ermlp""" +253 47 loss """softplus""" +253 47 regularizer """no""" +253 47 optimizer """adam""" +253 47 training_loop """owa""" +253 47 negative_sampler """basic""" +253 47 evaluator """rankbased""" +253 48 dataset """kinships""" +253 48 model """ermlp""" +253 48 loss """softplus""" +253 48 regularizer """no""" +253 48 optimizer """adam""" +253 48 training_loop """owa""" +253 48 negative_sampler """basic""" +253 48 evaluator """rankbased""" +253 49 dataset """kinships""" +253 49 model """ermlp""" +253 49 loss """softplus""" +253 49 regularizer """no""" +253 49 optimizer """adam""" +253 49 training_loop """owa""" +253 49 negative_sampler """basic""" +253 49 evaluator """rankbased""" +253 50 dataset """kinships""" +253 50 model """ermlp""" +253 50 loss """softplus""" +253 50 regularizer """no""" +253 50 optimizer """adam""" +253 50 training_loop """owa""" +253 50 negative_sampler """basic""" +253 50 evaluator """rankbased""" +253 51 dataset """kinships""" +253 51 model """ermlp""" +253 51 loss """softplus""" +253 51 regularizer """no""" +253 51 optimizer """adam""" +253 51 training_loop """owa""" +253 51 negative_sampler """basic""" +253 51 evaluator """rankbased""" +253 52 dataset """kinships""" +253 52 model """ermlp""" +253 52 loss """softplus""" +253 52 regularizer """no""" +253 52 optimizer """adam""" +253 52 training_loop """owa""" +253 52 negative_sampler """basic""" +253 52 evaluator """rankbased""" +253 53 dataset """kinships""" +253 53 model """ermlp""" +253 53 loss """softplus""" +253 53 regularizer """no""" +253 53 optimizer """adam""" +253 53 training_loop """owa""" +253 53 negative_sampler """basic""" +253 53 evaluator """rankbased""" +253 54 dataset """kinships""" +253 54 model """ermlp""" +253 54 loss """softplus""" +253 54 regularizer """no""" +253 54 optimizer """adam""" +253 54 training_loop """owa""" +253 54 negative_sampler """basic""" +253 54 evaluator """rankbased""" +253 55 dataset """kinships""" +253 55 model """ermlp""" +253 55 loss """softplus""" +253 55 regularizer """no""" +253 55 optimizer """adam""" +253 55 training_loop """owa""" +253 55 negative_sampler """basic""" +253 55 evaluator """rankbased""" +253 56 dataset """kinships""" +253 56 model """ermlp""" +253 56 loss """softplus""" +253 56 regularizer """no""" +253 56 optimizer """adam""" +253 56 training_loop """owa""" +253 56 negative_sampler """basic""" +253 56 evaluator """rankbased""" +253 57 dataset """kinships""" +253 57 model """ermlp""" +253 57 loss """softplus""" +253 57 regularizer """no""" +253 57 optimizer """adam""" +253 57 training_loop """owa""" +253 57 negative_sampler """basic""" +253 57 evaluator """rankbased""" +253 58 dataset """kinships""" +253 58 model """ermlp""" +253 58 loss """softplus""" +253 58 regularizer """no""" +253 58 optimizer """adam""" +253 58 training_loop """owa""" +253 58 negative_sampler """basic""" +253 58 evaluator """rankbased""" +253 59 dataset """kinships""" +253 59 model """ermlp""" +253 59 loss """softplus""" +253 59 regularizer """no""" +253 59 optimizer """adam""" +253 59 training_loop """owa""" +253 59 negative_sampler """basic""" +253 59 evaluator """rankbased""" +253 60 dataset """kinships""" +253 60 model """ermlp""" +253 60 loss """softplus""" +253 60 regularizer """no""" +253 60 optimizer """adam""" +253 60 training_loop """owa""" +253 60 negative_sampler """basic""" +253 60 evaluator """rankbased""" +253 61 dataset """kinships""" +253 61 model """ermlp""" +253 61 loss """softplus""" +253 61 regularizer """no""" +253 61 optimizer """adam""" +253 61 training_loop """owa""" +253 61 negative_sampler """basic""" +253 61 evaluator """rankbased""" +253 62 dataset """kinships""" +253 62 model """ermlp""" +253 62 loss """softplus""" +253 62 regularizer """no""" +253 62 optimizer """adam""" +253 62 training_loop """owa""" +253 62 negative_sampler """basic""" +253 62 evaluator """rankbased""" +253 63 dataset """kinships""" +253 63 model """ermlp""" +253 63 loss """softplus""" +253 63 regularizer """no""" +253 63 optimizer """adam""" +253 63 training_loop """owa""" +253 63 negative_sampler """basic""" +253 63 evaluator """rankbased""" +253 64 dataset """kinships""" +253 64 model """ermlp""" +253 64 loss """softplus""" +253 64 regularizer """no""" +253 64 optimizer """adam""" +253 64 training_loop """owa""" +253 64 negative_sampler """basic""" +253 64 evaluator """rankbased""" +253 65 dataset """kinships""" +253 65 model """ermlp""" +253 65 loss """softplus""" +253 65 regularizer """no""" +253 65 optimizer """adam""" +253 65 training_loop """owa""" +253 65 negative_sampler """basic""" +253 65 evaluator """rankbased""" +253 66 dataset """kinships""" +253 66 model """ermlp""" +253 66 loss """softplus""" +253 66 regularizer """no""" +253 66 optimizer """adam""" +253 66 training_loop """owa""" +253 66 negative_sampler """basic""" +253 66 evaluator """rankbased""" +253 67 dataset """kinships""" +253 67 model """ermlp""" +253 67 loss """softplus""" +253 67 regularizer """no""" +253 67 optimizer """adam""" +253 67 training_loop """owa""" +253 67 negative_sampler """basic""" +253 67 evaluator """rankbased""" +253 68 dataset """kinships""" +253 68 model """ermlp""" +253 68 loss """softplus""" +253 68 regularizer """no""" +253 68 optimizer """adam""" +253 68 training_loop """owa""" +253 68 negative_sampler """basic""" +253 68 evaluator """rankbased""" +253 69 dataset """kinships""" +253 69 model """ermlp""" +253 69 loss """softplus""" +253 69 regularizer """no""" +253 69 optimizer """adam""" +253 69 training_loop """owa""" +253 69 negative_sampler """basic""" +253 69 evaluator """rankbased""" +253 70 dataset """kinships""" +253 70 model """ermlp""" +253 70 loss """softplus""" +253 70 regularizer """no""" +253 70 optimizer """adam""" +253 70 training_loop """owa""" +253 70 negative_sampler """basic""" +253 70 evaluator """rankbased""" +253 71 dataset """kinships""" +253 71 model """ermlp""" +253 71 loss """softplus""" +253 71 regularizer """no""" +253 71 optimizer """adam""" +253 71 training_loop """owa""" +253 71 negative_sampler """basic""" +253 71 evaluator """rankbased""" +253 72 dataset """kinships""" +253 72 model """ermlp""" +253 72 loss """softplus""" +253 72 regularizer """no""" +253 72 optimizer """adam""" +253 72 training_loop """owa""" +253 72 negative_sampler """basic""" +253 72 evaluator """rankbased""" +253 73 dataset """kinships""" +253 73 model """ermlp""" +253 73 loss """softplus""" +253 73 regularizer """no""" +253 73 optimizer """adam""" +253 73 training_loop """owa""" +253 73 negative_sampler """basic""" +253 73 evaluator """rankbased""" +253 74 dataset """kinships""" +253 74 model """ermlp""" +253 74 loss """softplus""" +253 74 regularizer """no""" +253 74 optimizer """adam""" +253 74 training_loop """owa""" +253 74 negative_sampler """basic""" +253 74 evaluator """rankbased""" +253 75 dataset """kinships""" +253 75 model """ermlp""" +253 75 loss """softplus""" +253 75 regularizer """no""" +253 75 optimizer """adam""" +253 75 training_loop """owa""" +253 75 negative_sampler """basic""" +253 75 evaluator """rankbased""" +253 76 dataset """kinships""" +253 76 model """ermlp""" +253 76 loss """softplus""" +253 76 regularizer """no""" +253 76 optimizer """adam""" +253 76 training_loop """owa""" +253 76 negative_sampler """basic""" +253 76 evaluator """rankbased""" +253 77 dataset """kinships""" +253 77 model """ermlp""" +253 77 loss """softplus""" +253 77 regularizer """no""" +253 77 optimizer """adam""" +253 77 training_loop """owa""" +253 77 negative_sampler """basic""" +253 77 evaluator """rankbased""" +253 78 dataset """kinships""" +253 78 model """ermlp""" +253 78 loss """softplus""" +253 78 regularizer """no""" +253 78 optimizer """adam""" +253 78 training_loop """owa""" +253 78 negative_sampler """basic""" +253 78 evaluator """rankbased""" +253 79 dataset """kinships""" +253 79 model """ermlp""" +253 79 loss """softplus""" +253 79 regularizer """no""" +253 79 optimizer """adam""" +253 79 training_loop """owa""" +253 79 negative_sampler """basic""" +253 79 evaluator """rankbased""" +253 80 dataset """kinships""" +253 80 model """ermlp""" +253 80 loss """softplus""" +253 80 regularizer """no""" +253 80 optimizer """adam""" +253 80 training_loop """owa""" +253 80 negative_sampler """basic""" +253 80 evaluator """rankbased""" +253 81 dataset """kinships""" +253 81 model """ermlp""" +253 81 loss """softplus""" +253 81 regularizer """no""" +253 81 optimizer """adam""" +253 81 training_loop """owa""" +253 81 negative_sampler """basic""" +253 81 evaluator """rankbased""" +253 82 dataset """kinships""" +253 82 model """ermlp""" +253 82 loss """softplus""" +253 82 regularizer """no""" +253 82 optimizer """adam""" +253 82 training_loop """owa""" +253 82 negative_sampler """basic""" +253 82 evaluator """rankbased""" +253 83 dataset """kinships""" +253 83 model """ermlp""" +253 83 loss """softplus""" +253 83 regularizer """no""" +253 83 optimizer """adam""" +253 83 training_loop """owa""" +253 83 negative_sampler """basic""" +253 83 evaluator """rankbased""" +253 84 dataset """kinships""" +253 84 model """ermlp""" +253 84 loss """softplus""" +253 84 regularizer """no""" +253 84 optimizer """adam""" +253 84 training_loop """owa""" +253 84 negative_sampler """basic""" +253 84 evaluator """rankbased""" +253 85 dataset """kinships""" +253 85 model """ermlp""" +253 85 loss """softplus""" +253 85 regularizer """no""" +253 85 optimizer """adam""" +253 85 training_loop """owa""" +253 85 negative_sampler """basic""" +253 85 evaluator """rankbased""" +253 86 dataset """kinships""" +253 86 model """ermlp""" +253 86 loss """softplus""" +253 86 regularizer """no""" +253 86 optimizer """adam""" +253 86 training_loop """owa""" +253 86 negative_sampler """basic""" +253 86 evaluator """rankbased""" +253 87 dataset """kinships""" +253 87 model """ermlp""" +253 87 loss """softplus""" +253 87 regularizer """no""" +253 87 optimizer """adam""" +253 87 training_loop """owa""" +253 87 negative_sampler """basic""" +253 87 evaluator """rankbased""" +253 88 dataset """kinships""" +253 88 model """ermlp""" +253 88 loss """softplus""" +253 88 regularizer """no""" +253 88 optimizer """adam""" +253 88 training_loop """owa""" +253 88 negative_sampler """basic""" +253 88 evaluator """rankbased""" +253 89 dataset """kinships""" +253 89 model """ermlp""" +253 89 loss """softplus""" +253 89 regularizer """no""" +253 89 optimizer """adam""" +253 89 training_loop """owa""" +253 89 negative_sampler """basic""" +253 89 evaluator """rankbased""" +253 90 dataset """kinships""" +253 90 model """ermlp""" +253 90 loss """softplus""" +253 90 regularizer """no""" +253 90 optimizer """adam""" +253 90 training_loop """owa""" +253 90 negative_sampler """basic""" +253 90 evaluator """rankbased""" +253 91 dataset """kinships""" +253 91 model """ermlp""" +253 91 loss """softplus""" +253 91 regularizer """no""" +253 91 optimizer """adam""" +253 91 training_loop """owa""" +253 91 negative_sampler """basic""" +253 91 evaluator """rankbased""" +253 92 dataset """kinships""" +253 92 model """ermlp""" +253 92 loss """softplus""" +253 92 regularizer """no""" +253 92 optimizer """adam""" +253 92 training_loop """owa""" +253 92 negative_sampler """basic""" +253 92 evaluator """rankbased""" +253 93 dataset """kinships""" +253 93 model """ermlp""" +253 93 loss """softplus""" +253 93 regularizer """no""" +253 93 optimizer """adam""" +253 93 training_loop """owa""" +253 93 negative_sampler """basic""" +253 93 evaluator """rankbased""" +253 94 dataset """kinships""" +253 94 model """ermlp""" +253 94 loss """softplus""" +253 94 regularizer """no""" +253 94 optimizer """adam""" +253 94 training_loop """owa""" +253 94 negative_sampler """basic""" +253 94 evaluator """rankbased""" +253 95 dataset """kinships""" +253 95 model """ermlp""" +253 95 loss """softplus""" +253 95 regularizer """no""" +253 95 optimizer """adam""" +253 95 training_loop """owa""" +253 95 negative_sampler """basic""" +253 95 evaluator """rankbased""" +253 96 dataset """kinships""" +253 96 model """ermlp""" +253 96 loss """softplus""" +253 96 regularizer """no""" +253 96 optimizer """adam""" +253 96 training_loop """owa""" +253 96 negative_sampler """basic""" +253 96 evaluator """rankbased""" +253 97 dataset """kinships""" +253 97 model """ermlp""" +253 97 loss """softplus""" +253 97 regularizer """no""" +253 97 optimizer """adam""" +253 97 training_loop """owa""" +253 97 negative_sampler """basic""" +253 97 evaluator """rankbased""" +253 98 dataset """kinships""" +253 98 model """ermlp""" +253 98 loss """softplus""" +253 98 regularizer """no""" +253 98 optimizer """adam""" +253 98 training_loop """owa""" +253 98 negative_sampler """basic""" +253 98 evaluator """rankbased""" +253 99 dataset """kinships""" +253 99 model """ermlp""" +253 99 loss """softplus""" +253 99 regularizer """no""" +253 99 optimizer """adam""" +253 99 training_loop """owa""" +253 99 negative_sampler """basic""" +253 99 evaluator """rankbased""" +253 100 dataset """kinships""" +253 100 model """ermlp""" +253 100 loss """softplus""" +253 100 regularizer """no""" +253 100 optimizer """adam""" +253 100 training_loop """owa""" +253 100 negative_sampler """basic""" +253 100 evaluator """rankbased""" +254 1 model.embedding_dim 1.0 +254 1 optimizer.lr 0.005142260188486096 +254 1 negative_sampler.num_negs_per_pos 81.0 +254 1 training.batch_size 1.0 +254 2 model.embedding_dim 1.0 +254 2 optimizer.lr 0.00469960674995673 +254 2 negative_sampler.num_negs_per_pos 50.0 +254 2 training.batch_size 2.0 +254 3 model.embedding_dim 0.0 +254 3 optimizer.lr 0.09735153657585464 +254 3 negative_sampler.num_negs_per_pos 99.0 +254 3 training.batch_size 1.0 +254 4 model.embedding_dim 2.0 +254 4 optimizer.lr 0.00422756254277872 +254 4 negative_sampler.num_negs_per_pos 56.0 +254 4 training.batch_size 1.0 +254 5 model.embedding_dim 1.0 +254 5 optimizer.lr 0.028532117589339684 +254 5 negative_sampler.num_negs_per_pos 28.0 +254 5 training.batch_size 0.0 +254 6 model.embedding_dim 1.0 +254 6 optimizer.lr 0.002014861725403315 +254 6 negative_sampler.num_negs_per_pos 9.0 +254 6 training.batch_size 1.0 +254 7 model.embedding_dim 1.0 +254 7 optimizer.lr 0.0012177496133254024 +254 7 negative_sampler.num_negs_per_pos 43.0 +254 7 training.batch_size 2.0 +254 8 model.embedding_dim 0.0 +254 8 optimizer.lr 0.002043062786985805 +254 8 negative_sampler.num_negs_per_pos 43.0 +254 8 training.batch_size 1.0 +254 9 model.embedding_dim 2.0 +254 9 optimizer.lr 0.033707428729720254 +254 9 negative_sampler.num_negs_per_pos 44.0 +254 9 training.batch_size 2.0 +254 10 model.embedding_dim 1.0 +254 10 optimizer.lr 0.003815971125935028 +254 10 negative_sampler.num_negs_per_pos 54.0 +254 10 training.batch_size 2.0 +254 11 model.embedding_dim 0.0 +254 11 optimizer.lr 0.0026691893091590497 +254 11 negative_sampler.num_negs_per_pos 44.0 +254 11 training.batch_size 0.0 +254 12 model.embedding_dim 1.0 +254 12 optimizer.lr 0.04510468942069362 +254 12 negative_sampler.num_negs_per_pos 86.0 +254 12 training.batch_size 0.0 +254 13 model.embedding_dim 0.0 +254 13 optimizer.lr 0.013008616395780614 +254 13 negative_sampler.num_negs_per_pos 66.0 +254 13 training.batch_size 1.0 +254 14 model.embedding_dim 0.0 +254 14 optimizer.lr 0.00420483582840981 +254 14 negative_sampler.num_negs_per_pos 50.0 +254 14 training.batch_size 2.0 +254 15 model.embedding_dim 1.0 +254 15 optimizer.lr 0.08051125040711503 +254 15 negative_sampler.num_negs_per_pos 91.0 +254 15 training.batch_size 1.0 +254 16 model.embedding_dim 0.0 +254 16 optimizer.lr 0.01734812837388059 +254 16 negative_sampler.num_negs_per_pos 16.0 +254 16 training.batch_size 1.0 +254 17 model.embedding_dim 1.0 +254 17 optimizer.lr 0.013761230006749246 +254 17 negative_sampler.num_negs_per_pos 58.0 +254 17 training.batch_size 2.0 +254 18 model.embedding_dim 2.0 +254 18 optimizer.lr 0.0022763237680574343 +254 18 negative_sampler.num_negs_per_pos 36.0 +254 18 training.batch_size 2.0 +254 19 model.embedding_dim 0.0 +254 19 optimizer.lr 0.05082574036510669 +254 19 negative_sampler.num_negs_per_pos 21.0 +254 19 training.batch_size 0.0 +254 20 model.embedding_dim 0.0 +254 20 optimizer.lr 0.03331102721395807 +254 20 negative_sampler.num_negs_per_pos 22.0 +254 20 training.batch_size 0.0 +254 21 model.embedding_dim 2.0 +254 21 optimizer.lr 0.05205610901149301 +254 21 negative_sampler.num_negs_per_pos 30.0 +254 21 training.batch_size 2.0 +254 22 model.embedding_dim 2.0 +254 22 optimizer.lr 0.0027257640656268758 +254 22 negative_sampler.num_negs_per_pos 67.0 +254 22 training.batch_size 2.0 +254 23 model.embedding_dim 1.0 +254 23 optimizer.lr 0.03809539832188979 +254 23 negative_sampler.num_negs_per_pos 73.0 +254 23 training.batch_size 1.0 +254 24 model.embedding_dim 1.0 +254 24 optimizer.lr 0.01016286430221301 +254 24 negative_sampler.num_negs_per_pos 24.0 +254 24 training.batch_size 1.0 +254 25 model.embedding_dim 1.0 +254 25 optimizer.lr 0.0021471368255039495 +254 25 negative_sampler.num_negs_per_pos 16.0 +254 25 training.batch_size 0.0 +254 26 model.embedding_dim 2.0 +254 26 optimizer.lr 0.05220389889072714 +254 26 negative_sampler.num_negs_per_pos 98.0 +254 26 training.batch_size 0.0 +254 27 model.embedding_dim 2.0 +254 27 optimizer.lr 0.0029775214241629794 +254 27 negative_sampler.num_negs_per_pos 21.0 +254 27 training.batch_size 0.0 +254 28 model.embedding_dim 2.0 +254 28 optimizer.lr 0.00179085887960686 +254 28 negative_sampler.num_negs_per_pos 95.0 +254 28 training.batch_size 2.0 +254 29 model.embedding_dim 1.0 +254 29 optimizer.lr 0.0036725607720348593 +254 29 negative_sampler.num_negs_per_pos 93.0 +254 29 training.batch_size 2.0 +254 30 model.embedding_dim 0.0 +254 30 optimizer.lr 0.001074368800361168 +254 30 negative_sampler.num_negs_per_pos 78.0 +254 30 training.batch_size 2.0 +254 31 model.embedding_dim 1.0 +254 31 optimizer.lr 0.005300257685105569 +254 31 negative_sampler.num_negs_per_pos 67.0 +254 31 training.batch_size 1.0 +254 32 model.embedding_dim 0.0 +254 32 optimizer.lr 0.058518880654388934 +254 32 negative_sampler.num_negs_per_pos 75.0 +254 32 training.batch_size 2.0 +254 33 model.embedding_dim 2.0 +254 33 optimizer.lr 0.045996521802643194 +254 33 negative_sampler.num_negs_per_pos 42.0 +254 33 training.batch_size 0.0 +254 34 model.embedding_dim 2.0 +254 34 optimizer.lr 0.004089190717298531 +254 34 negative_sampler.num_negs_per_pos 10.0 +254 34 training.batch_size 2.0 +254 35 model.embedding_dim 1.0 +254 35 optimizer.lr 0.03619524995759685 +254 35 negative_sampler.num_negs_per_pos 25.0 +254 35 training.batch_size 0.0 +254 36 model.embedding_dim 1.0 +254 36 optimizer.lr 0.0063290507696161765 +254 36 negative_sampler.num_negs_per_pos 45.0 +254 36 training.batch_size 0.0 +254 37 model.embedding_dim 1.0 +254 37 optimizer.lr 0.02990398281242962 +254 37 negative_sampler.num_negs_per_pos 77.0 +254 37 training.batch_size 2.0 +254 38 model.embedding_dim 1.0 +254 38 optimizer.lr 0.004072140367722511 +254 38 negative_sampler.num_negs_per_pos 65.0 +254 38 training.batch_size 0.0 +254 39 model.embedding_dim 0.0 +254 39 optimizer.lr 0.015378379457541516 +254 39 negative_sampler.num_negs_per_pos 42.0 +254 39 training.batch_size 2.0 +254 40 model.embedding_dim 2.0 +254 40 optimizer.lr 0.04939257858024175 +254 40 negative_sampler.num_negs_per_pos 58.0 +254 40 training.batch_size 1.0 +254 41 model.embedding_dim 0.0 +254 41 optimizer.lr 0.0027551453112100387 +254 41 negative_sampler.num_negs_per_pos 47.0 +254 41 training.batch_size 2.0 +254 42 model.embedding_dim 1.0 +254 42 optimizer.lr 0.06749098916535241 +254 42 negative_sampler.num_negs_per_pos 35.0 +254 42 training.batch_size 1.0 +254 43 model.embedding_dim 0.0 +254 43 optimizer.lr 0.004917235186884279 +254 43 negative_sampler.num_negs_per_pos 8.0 +254 43 training.batch_size 2.0 +254 44 model.embedding_dim 0.0 +254 44 optimizer.lr 0.005103069040644064 +254 44 negative_sampler.num_negs_per_pos 57.0 +254 44 training.batch_size 2.0 +254 45 model.embedding_dim 1.0 +254 45 optimizer.lr 0.04615402796451431 +254 45 negative_sampler.num_negs_per_pos 96.0 +254 45 training.batch_size 2.0 +254 46 model.embedding_dim 2.0 +254 46 optimizer.lr 0.01597002974665771 +254 46 negative_sampler.num_negs_per_pos 56.0 +254 46 training.batch_size 2.0 +254 47 model.embedding_dim 0.0 +254 47 optimizer.lr 0.04417594686069045 +254 47 negative_sampler.num_negs_per_pos 13.0 +254 47 training.batch_size 1.0 +254 48 model.embedding_dim 0.0 +254 48 optimizer.lr 0.006735024395263851 +254 48 negative_sampler.num_negs_per_pos 12.0 +254 48 training.batch_size 1.0 +254 49 model.embedding_dim 0.0 +254 49 optimizer.lr 0.001105574345117515 +254 49 negative_sampler.num_negs_per_pos 50.0 +254 49 training.batch_size 2.0 +254 50 model.embedding_dim 2.0 +254 50 optimizer.lr 0.02210615720532331 +254 50 negative_sampler.num_negs_per_pos 26.0 +254 50 training.batch_size 0.0 +254 51 model.embedding_dim 1.0 +254 51 optimizer.lr 0.00563250756931534 +254 51 negative_sampler.num_negs_per_pos 14.0 +254 51 training.batch_size 0.0 +254 52 model.embedding_dim 0.0 +254 52 optimizer.lr 0.008038803854616704 +254 52 negative_sampler.num_negs_per_pos 61.0 +254 52 training.batch_size 0.0 +254 53 model.embedding_dim 2.0 +254 53 optimizer.lr 0.018913287791973824 +254 53 negative_sampler.num_negs_per_pos 14.0 +254 53 training.batch_size 1.0 +254 54 model.embedding_dim 1.0 +254 54 optimizer.lr 0.024483114787874495 +254 54 negative_sampler.num_negs_per_pos 28.0 +254 54 training.batch_size 2.0 +254 55 model.embedding_dim 2.0 +254 55 optimizer.lr 0.0018956638140980287 +254 55 negative_sampler.num_negs_per_pos 44.0 +254 55 training.batch_size 1.0 +254 56 model.embedding_dim 2.0 +254 56 optimizer.lr 0.03874841154189481 +254 56 negative_sampler.num_negs_per_pos 65.0 +254 56 training.batch_size 2.0 +254 57 model.embedding_dim 1.0 +254 57 optimizer.lr 0.004609681817220968 +254 57 negative_sampler.num_negs_per_pos 75.0 +254 57 training.batch_size 1.0 +254 58 model.embedding_dim 0.0 +254 58 optimizer.lr 0.017687762501944117 +254 58 negative_sampler.num_negs_per_pos 36.0 +254 58 training.batch_size 2.0 +254 59 model.embedding_dim 2.0 +254 59 optimizer.lr 0.0013014360949311643 +254 59 negative_sampler.num_negs_per_pos 77.0 +254 59 training.batch_size 1.0 +254 60 model.embedding_dim 1.0 +254 60 optimizer.lr 0.059820689582490476 +254 60 negative_sampler.num_negs_per_pos 97.0 +254 60 training.batch_size 2.0 +254 61 model.embedding_dim 0.0 +254 61 optimizer.lr 0.007805501584039932 +254 61 negative_sampler.num_negs_per_pos 51.0 +254 61 training.batch_size 0.0 +254 62 model.embedding_dim 0.0 +254 62 optimizer.lr 0.09046922168416908 +254 62 negative_sampler.num_negs_per_pos 5.0 +254 62 training.batch_size 2.0 +254 63 model.embedding_dim 2.0 +254 63 optimizer.lr 0.04976418428080647 +254 63 negative_sampler.num_negs_per_pos 9.0 +254 63 training.batch_size 0.0 +254 64 model.embedding_dim 0.0 +254 64 optimizer.lr 0.023739140463248036 +254 64 negative_sampler.num_negs_per_pos 42.0 +254 64 training.batch_size 2.0 +254 65 model.embedding_dim 0.0 +254 65 optimizer.lr 0.031816255479737085 +254 65 negative_sampler.num_negs_per_pos 59.0 +254 65 training.batch_size 1.0 +254 66 model.embedding_dim 0.0 +254 66 optimizer.lr 0.019023888847267356 +254 66 negative_sampler.num_negs_per_pos 92.0 +254 66 training.batch_size 2.0 +254 67 model.embedding_dim 0.0 +254 67 optimizer.lr 0.05835404669058806 +254 67 negative_sampler.num_negs_per_pos 41.0 +254 67 training.batch_size 0.0 +254 68 model.embedding_dim 1.0 +254 68 optimizer.lr 0.07863564490177917 +254 68 negative_sampler.num_negs_per_pos 86.0 +254 68 training.batch_size 1.0 +254 69 model.embedding_dim 2.0 +254 69 optimizer.lr 0.01947993347969098 +254 69 negative_sampler.num_negs_per_pos 68.0 +254 69 training.batch_size 0.0 +254 70 model.embedding_dim 2.0 +254 70 optimizer.lr 0.013992158844754632 +254 70 negative_sampler.num_negs_per_pos 60.0 +254 70 training.batch_size 0.0 +254 71 model.embedding_dim 0.0 +254 71 optimizer.lr 0.01865523440616379 +254 71 negative_sampler.num_negs_per_pos 54.0 +254 71 training.batch_size 0.0 +254 72 model.embedding_dim 1.0 +254 72 optimizer.lr 0.001106240050428617 +254 72 negative_sampler.num_negs_per_pos 42.0 +254 72 training.batch_size 0.0 +254 73 model.embedding_dim 1.0 +254 73 optimizer.lr 0.06795423420640828 +254 73 negative_sampler.num_negs_per_pos 15.0 +254 73 training.batch_size 0.0 +254 74 model.embedding_dim 1.0 +254 74 optimizer.lr 0.06615104400996384 +254 74 negative_sampler.num_negs_per_pos 16.0 +254 74 training.batch_size 1.0 +254 75 model.embedding_dim 0.0 +254 75 optimizer.lr 0.0037913639894328415 +254 75 negative_sampler.num_negs_per_pos 89.0 +254 75 training.batch_size 1.0 +254 76 model.embedding_dim 1.0 +254 76 optimizer.lr 0.004718004273297859 +254 76 negative_sampler.num_negs_per_pos 25.0 +254 76 training.batch_size 0.0 +254 77 model.embedding_dim 1.0 +254 77 optimizer.lr 0.01727088900556846 +254 77 negative_sampler.num_negs_per_pos 71.0 +254 77 training.batch_size 0.0 +254 78 model.embedding_dim 0.0 +254 78 optimizer.lr 0.07028916291473836 +254 78 negative_sampler.num_negs_per_pos 75.0 +254 78 training.batch_size 1.0 +254 79 model.embedding_dim 0.0 +254 79 optimizer.lr 0.002709663443311017 +254 79 negative_sampler.num_negs_per_pos 77.0 +254 79 training.batch_size 1.0 +254 80 model.embedding_dim 1.0 +254 80 optimizer.lr 0.0013583379008018813 +254 80 negative_sampler.num_negs_per_pos 19.0 +254 80 training.batch_size 0.0 +254 81 model.embedding_dim 2.0 +254 81 optimizer.lr 0.0021736076543726213 +254 81 negative_sampler.num_negs_per_pos 66.0 +254 81 training.batch_size 2.0 +254 82 model.embedding_dim 1.0 +254 82 optimizer.lr 0.09369320691821545 +254 82 negative_sampler.num_negs_per_pos 84.0 +254 82 training.batch_size 1.0 +254 83 model.embedding_dim 1.0 +254 83 optimizer.lr 0.009037234607763085 +254 83 negative_sampler.num_negs_per_pos 60.0 +254 83 training.batch_size 0.0 +254 84 model.embedding_dim 2.0 +254 84 optimizer.lr 0.07284843098700307 +254 84 negative_sampler.num_negs_per_pos 74.0 +254 84 training.batch_size 1.0 +254 85 model.embedding_dim 0.0 +254 85 optimizer.lr 0.002333059225333524 +254 85 negative_sampler.num_negs_per_pos 71.0 +254 85 training.batch_size 0.0 +254 86 model.embedding_dim 0.0 +254 86 optimizer.lr 0.051609507138126155 +254 86 negative_sampler.num_negs_per_pos 47.0 +254 86 training.batch_size 1.0 +254 87 model.embedding_dim 2.0 +254 87 optimizer.lr 0.004615900423517471 +254 87 negative_sampler.num_negs_per_pos 75.0 +254 87 training.batch_size 1.0 +254 88 model.embedding_dim 2.0 +254 88 optimizer.lr 0.019160341088178098 +254 88 negative_sampler.num_negs_per_pos 20.0 +254 88 training.batch_size 1.0 +254 89 model.embedding_dim 2.0 +254 89 optimizer.lr 0.057350001968549225 +254 89 negative_sampler.num_negs_per_pos 74.0 +254 89 training.batch_size 1.0 +254 90 model.embedding_dim 1.0 +254 90 optimizer.lr 0.09943233320177852 +254 90 negative_sampler.num_negs_per_pos 4.0 +254 90 training.batch_size 1.0 +254 91 model.embedding_dim 1.0 +254 91 optimizer.lr 0.011044115859128347 +254 91 negative_sampler.num_negs_per_pos 72.0 +254 91 training.batch_size 1.0 +254 92 model.embedding_dim 2.0 +254 92 optimizer.lr 0.001057149505553134 +254 92 negative_sampler.num_negs_per_pos 29.0 +254 92 training.batch_size 2.0 +254 93 model.embedding_dim 2.0 +254 93 optimizer.lr 0.011161150194645022 +254 93 negative_sampler.num_negs_per_pos 59.0 +254 93 training.batch_size 2.0 +254 94 model.embedding_dim 0.0 +254 94 optimizer.lr 0.022424262293235225 +254 94 negative_sampler.num_negs_per_pos 31.0 +254 94 training.batch_size 2.0 +254 95 model.embedding_dim 0.0 +254 95 optimizer.lr 0.0010530742730652596 +254 95 negative_sampler.num_negs_per_pos 75.0 +254 95 training.batch_size 1.0 +254 96 model.embedding_dim 1.0 +254 96 optimizer.lr 0.05358563334435814 +254 96 negative_sampler.num_negs_per_pos 64.0 +254 96 training.batch_size 0.0 +254 97 model.embedding_dim 0.0 +254 97 optimizer.lr 0.019261811149764194 +254 97 negative_sampler.num_negs_per_pos 32.0 +254 97 training.batch_size 2.0 +254 98 model.embedding_dim 2.0 +254 98 optimizer.lr 0.03646509958581882 +254 98 negative_sampler.num_negs_per_pos 26.0 +254 98 training.batch_size 2.0 +254 99 model.embedding_dim 1.0 +254 99 optimizer.lr 0.005508353989796074 +254 99 negative_sampler.num_negs_per_pos 81.0 +254 99 training.batch_size 2.0 +254 100 model.embedding_dim 1.0 +254 100 optimizer.lr 0.07578962250932327 +254 100 negative_sampler.num_negs_per_pos 16.0 +254 100 training.batch_size 1.0 +254 1 dataset """kinships""" +254 1 model """ermlp""" +254 1 loss """bceaftersigmoid""" +254 1 regularizer """no""" +254 1 optimizer """adam""" +254 1 training_loop """owa""" +254 1 negative_sampler """basic""" +254 1 evaluator """rankbased""" +254 2 dataset """kinships""" +254 2 model """ermlp""" +254 2 loss """bceaftersigmoid""" +254 2 regularizer """no""" +254 2 optimizer """adam""" +254 2 training_loop """owa""" +254 2 negative_sampler """basic""" +254 2 evaluator """rankbased""" +254 3 dataset """kinships""" +254 3 model """ermlp""" +254 3 loss """bceaftersigmoid""" +254 3 regularizer """no""" +254 3 optimizer """adam""" +254 3 training_loop """owa""" +254 3 negative_sampler """basic""" +254 3 evaluator """rankbased""" +254 4 dataset """kinships""" +254 4 model """ermlp""" +254 4 loss """bceaftersigmoid""" +254 4 regularizer """no""" +254 4 optimizer """adam""" +254 4 training_loop """owa""" +254 4 negative_sampler """basic""" +254 4 evaluator """rankbased""" +254 5 dataset """kinships""" +254 5 model """ermlp""" +254 5 loss """bceaftersigmoid""" +254 5 regularizer """no""" +254 5 optimizer """adam""" +254 5 training_loop """owa""" +254 5 negative_sampler """basic""" +254 5 evaluator """rankbased""" +254 6 dataset """kinships""" +254 6 model """ermlp""" +254 6 loss """bceaftersigmoid""" +254 6 regularizer """no""" +254 6 optimizer """adam""" +254 6 training_loop """owa""" +254 6 negative_sampler """basic""" +254 6 evaluator """rankbased""" +254 7 dataset """kinships""" +254 7 model """ermlp""" +254 7 loss """bceaftersigmoid""" +254 7 regularizer """no""" +254 7 optimizer """adam""" +254 7 training_loop """owa""" +254 7 negative_sampler """basic""" +254 7 evaluator """rankbased""" +254 8 dataset """kinships""" +254 8 model """ermlp""" +254 8 loss """bceaftersigmoid""" +254 8 regularizer """no""" +254 8 optimizer """adam""" +254 8 training_loop """owa""" +254 8 negative_sampler """basic""" +254 8 evaluator """rankbased""" +254 9 dataset """kinships""" +254 9 model """ermlp""" +254 9 loss """bceaftersigmoid""" +254 9 regularizer """no""" +254 9 optimizer """adam""" +254 9 training_loop """owa""" +254 9 negative_sampler """basic""" +254 9 evaluator """rankbased""" +254 10 dataset """kinships""" +254 10 model """ermlp""" +254 10 loss """bceaftersigmoid""" +254 10 regularizer """no""" +254 10 optimizer """adam""" +254 10 training_loop """owa""" +254 10 negative_sampler """basic""" +254 10 evaluator """rankbased""" +254 11 dataset """kinships""" +254 11 model """ermlp""" +254 11 loss """bceaftersigmoid""" +254 11 regularizer """no""" +254 11 optimizer """adam""" +254 11 training_loop """owa""" +254 11 negative_sampler """basic""" +254 11 evaluator """rankbased""" +254 12 dataset """kinships""" +254 12 model """ermlp""" +254 12 loss """bceaftersigmoid""" +254 12 regularizer """no""" +254 12 optimizer """adam""" +254 12 training_loop """owa""" +254 12 negative_sampler """basic""" +254 12 evaluator """rankbased""" +254 13 dataset """kinships""" +254 13 model """ermlp""" +254 13 loss """bceaftersigmoid""" +254 13 regularizer """no""" +254 13 optimizer """adam""" +254 13 training_loop """owa""" +254 13 negative_sampler """basic""" +254 13 evaluator """rankbased""" +254 14 dataset """kinships""" +254 14 model """ermlp""" +254 14 loss """bceaftersigmoid""" +254 14 regularizer """no""" +254 14 optimizer """adam""" +254 14 training_loop """owa""" +254 14 negative_sampler """basic""" +254 14 evaluator """rankbased""" +254 15 dataset """kinships""" +254 15 model """ermlp""" +254 15 loss """bceaftersigmoid""" +254 15 regularizer """no""" +254 15 optimizer """adam""" +254 15 training_loop """owa""" +254 15 negative_sampler """basic""" +254 15 evaluator """rankbased""" +254 16 dataset """kinships""" +254 16 model """ermlp""" +254 16 loss """bceaftersigmoid""" +254 16 regularizer """no""" +254 16 optimizer """adam""" +254 16 training_loop """owa""" +254 16 negative_sampler """basic""" +254 16 evaluator """rankbased""" +254 17 dataset """kinships""" +254 17 model """ermlp""" +254 17 loss """bceaftersigmoid""" +254 17 regularizer """no""" +254 17 optimizer """adam""" +254 17 training_loop """owa""" +254 17 negative_sampler """basic""" +254 17 evaluator """rankbased""" +254 18 dataset """kinships""" +254 18 model """ermlp""" +254 18 loss """bceaftersigmoid""" +254 18 regularizer """no""" +254 18 optimizer """adam""" +254 18 training_loop """owa""" +254 18 negative_sampler """basic""" +254 18 evaluator """rankbased""" +254 19 dataset """kinships""" +254 19 model """ermlp""" +254 19 loss """bceaftersigmoid""" +254 19 regularizer """no""" +254 19 optimizer """adam""" +254 19 training_loop """owa""" +254 19 negative_sampler """basic""" +254 19 evaluator """rankbased""" +254 20 dataset """kinships""" +254 20 model """ermlp""" +254 20 loss """bceaftersigmoid""" +254 20 regularizer """no""" +254 20 optimizer """adam""" +254 20 training_loop """owa""" +254 20 negative_sampler """basic""" +254 20 evaluator """rankbased""" +254 21 dataset """kinships""" +254 21 model """ermlp""" +254 21 loss """bceaftersigmoid""" +254 21 regularizer """no""" +254 21 optimizer """adam""" +254 21 training_loop """owa""" +254 21 negative_sampler """basic""" +254 21 evaluator """rankbased""" +254 22 dataset """kinships""" +254 22 model """ermlp""" +254 22 loss """bceaftersigmoid""" +254 22 regularizer """no""" +254 22 optimizer """adam""" +254 22 training_loop """owa""" +254 22 negative_sampler """basic""" +254 22 evaluator """rankbased""" +254 23 dataset """kinships""" +254 23 model """ermlp""" +254 23 loss """bceaftersigmoid""" +254 23 regularizer """no""" +254 23 optimizer """adam""" +254 23 training_loop """owa""" +254 23 negative_sampler """basic""" +254 23 evaluator """rankbased""" +254 24 dataset """kinships""" +254 24 model """ermlp""" +254 24 loss """bceaftersigmoid""" +254 24 regularizer """no""" +254 24 optimizer """adam""" +254 24 training_loop """owa""" +254 24 negative_sampler """basic""" +254 24 evaluator """rankbased""" +254 25 dataset """kinships""" +254 25 model """ermlp""" +254 25 loss """bceaftersigmoid""" +254 25 regularizer """no""" +254 25 optimizer """adam""" +254 25 training_loop """owa""" +254 25 negative_sampler """basic""" +254 25 evaluator """rankbased""" +254 26 dataset """kinships""" +254 26 model """ermlp""" +254 26 loss """bceaftersigmoid""" +254 26 regularizer """no""" +254 26 optimizer """adam""" +254 26 training_loop """owa""" +254 26 negative_sampler """basic""" +254 26 evaluator """rankbased""" +254 27 dataset """kinships""" +254 27 model """ermlp""" +254 27 loss """bceaftersigmoid""" +254 27 regularizer """no""" +254 27 optimizer """adam""" +254 27 training_loop """owa""" +254 27 negative_sampler """basic""" +254 27 evaluator """rankbased""" +254 28 dataset """kinships""" +254 28 model """ermlp""" +254 28 loss """bceaftersigmoid""" +254 28 regularizer """no""" +254 28 optimizer """adam""" +254 28 training_loop """owa""" +254 28 negative_sampler """basic""" +254 28 evaluator """rankbased""" +254 29 dataset """kinships""" +254 29 model """ermlp""" +254 29 loss """bceaftersigmoid""" +254 29 regularizer """no""" +254 29 optimizer """adam""" +254 29 training_loop """owa""" +254 29 negative_sampler """basic""" +254 29 evaluator """rankbased""" +254 30 dataset """kinships""" +254 30 model """ermlp""" +254 30 loss """bceaftersigmoid""" +254 30 regularizer """no""" +254 30 optimizer """adam""" +254 30 training_loop """owa""" +254 30 negative_sampler """basic""" +254 30 evaluator """rankbased""" +254 31 dataset """kinships""" +254 31 model """ermlp""" +254 31 loss """bceaftersigmoid""" +254 31 regularizer """no""" +254 31 optimizer """adam""" +254 31 training_loop """owa""" +254 31 negative_sampler """basic""" +254 31 evaluator """rankbased""" +254 32 dataset """kinships""" +254 32 model """ermlp""" +254 32 loss """bceaftersigmoid""" +254 32 regularizer """no""" +254 32 optimizer """adam""" +254 32 training_loop """owa""" +254 32 negative_sampler """basic""" +254 32 evaluator """rankbased""" +254 33 dataset """kinships""" +254 33 model """ermlp""" +254 33 loss """bceaftersigmoid""" +254 33 regularizer """no""" +254 33 optimizer """adam""" +254 33 training_loop """owa""" +254 33 negative_sampler """basic""" +254 33 evaluator """rankbased""" +254 34 dataset """kinships""" +254 34 model """ermlp""" +254 34 loss """bceaftersigmoid""" +254 34 regularizer """no""" +254 34 optimizer """adam""" +254 34 training_loop """owa""" +254 34 negative_sampler """basic""" +254 34 evaluator """rankbased""" +254 35 dataset """kinships""" +254 35 model """ermlp""" +254 35 loss """bceaftersigmoid""" +254 35 regularizer """no""" +254 35 optimizer """adam""" +254 35 training_loop """owa""" +254 35 negative_sampler """basic""" +254 35 evaluator """rankbased""" +254 36 dataset """kinships""" +254 36 model """ermlp""" +254 36 loss """bceaftersigmoid""" +254 36 regularizer """no""" +254 36 optimizer """adam""" +254 36 training_loop """owa""" +254 36 negative_sampler """basic""" +254 36 evaluator """rankbased""" +254 37 dataset """kinships""" +254 37 model """ermlp""" +254 37 loss """bceaftersigmoid""" +254 37 regularizer """no""" +254 37 optimizer """adam""" +254 37 training_loop """owa""" +254 37 negative_sampler """basic""" +254 37 evaluator """rankbased""" +254 38 dataset """kinships""" +254 38 model """ermlp""" +254 38 loss """bceaftersigmoid""" +254 38 regularizer """no""" +254 38 optimizer """adam""" +254 38 training_loop """owa""" +254 38 negative_sampler """basic""" +254 38 evaluator """rankbased""" +254 39 dataset """kinships""" +254 39 model """ermlp""" +254 39 loss """bceaftersigmoid""" +254 39 regularizer """no""" +254 39 optimizer """adam""" +254 39 training_loop """owa""" +254 39 negative_sampler """basic""" +254 39 evaluator """rankbased""" +254 40 dataset """kinships""" +254 40 model """ermlp""" +254 40 loss """bceaftersigmoid""" +254 40 regularizer """no""" +254 40 optimizer """adam""" +254 40 training_loop """owa""" +254 40 negative_sampler """basic""" +254 40 evaluator """rankbased""" +254 41 dataset """kinships""" +254 41 model """ermlp""" +254 41 loss """bceaftersigmoid""" +254 41 regularizer """no""" +254 41 optimizer """adam""" +254 41 training_loop """owa""" +254 41 negative_sampler """basic""" +254 41 evaluator """rankbased""" +254 42 dataset """kinships""" +254 42 model """ermlp""" +254 42 loss """bceaftersigmoid""" +254 42 regularizer """no""" +254 42 optimizer """adam""" +254 42 training_loop """owa""" +254 42 negative_sampler """basic""" +254 42 evaluator """rankbased""" +254 43 dataset """kinships""" +254 43 model """ermlp""" +254 43 loss """bceaftersigmoid""" +254 43 regularizer """no""" +254 43 optimizer """adam""" +254 43 training_loop """owa""" +254 43 negative_sampler """basic""" +254 43 evaluator """rankbased""" +254 44 dataset """kinships""" +254 44 model """ermlp""" +254 44 loss """bceaftersigmoid""" +254 44 regularizer """no""" +254 44 optimizer """adam""" +254 44 training_loop """owa""" +254 44 negative_sampler """basic""" +254 44 evaluator """rankbased""" +254 45 dataset """kinships""" +254 45 model """ermlp""" +254 45 loss """bceaftersigmoid""" +254 45 regularizer """no""" +254 45 optimizer """adam""" +254 45 training_loop """owa""" +254 45 negative_sampler """basic""" +254 45 evaluator """rankbased""" +254 46 dataset """kinships""" +254 46 model """ermlp""" +254 46 loss """bceaftersigmoid""" +254 46 regularizer """no""" +254 46 optimizer """adam""" +254 46 training_loop """owa""" +254 46 negative_sampler """basic""" +254 46 evaluator """rankbased""" +254 47 dataset """kinships""" +254 47 model """ermlp""" +254 47 loss """bceaftersigmoid""" +254 47 regularizer """no""" +254 47 optimizer """adam""" +254 47 training_loop """owa""" +254 47 negative_sampler """basic""" +254 47 evaluator """rankbased""" +254 48 dataset """kinships""" +254 48 model """ermlp""" +254 48 loss """bceaftersigmoid""" +254 48 regularizer """no""" +254 48 optimizer """adam""" +254 48 training_loop """owa""" +254 48 negative_sampler """basic""" +254 48 evaluator """rankbased""" +254 49 dataset """kinships""" +254 49 model """ermlp""" +254 49 loss """bceaftersigmoid""" +254 49 regularizer """no""" +254 49 optimizer """adam""" +254 49 training_loop """owa""" +254 49 negative_sampler """basic""" +254 49 evaluator """rankbased""" +254 50 dataset """kinships""" +254 50 model """ermlp""" +254 50 loss """bceaftersigmoid""" +254 50 regularizer """no""" +254 50 optimizer """adam""" +254 50 training_loop """owa""" +254 50 negative_sampler """basic""" +254 50 evaluator """rankbased""" +254 51 dataset """kinships""" +254 51 model """ermlp""" +254 51 loss """bceaftersigmoid""" +254 51 regularizer """no""" +254 51 optimizer """adam""" +254 51 training_loop """owa""" +254 51 negative_sampler """basic""" +254 51 evaluator """rankbased""" +254 52 dataset """kinships""" +254 52 model """ermlp""" +254 52 loss """bceaftersigmoid""" +254 52 regularizer """no""" +254 52 optimizer """adam""" +254 52 training_loop """owa""" +254 52 negative_sampler """basic""" +254 52 evaluator """rankbased""" +254 53 dataset """kinships""" +254 53 model """ermlp""" +254 53 loss """bceaftersigmoid""" +254 53 regularizer """no""" +254 53 optimizer """adam""" +254 53 training_loop """owa""" +254 53 negative_sampler """basic""" +254 53 evaluator """rankbased""" +254 54 dataset """kinships""" +254 54 model """ermlp""" +254 54 loss """bceaftersigmoid""" +254 54 regularizer """no""" +254 54 optimizer """adam""" +254 54 training_loop """owa""" +254 54 negative_sampler """basic""" +254 54 evaluator """rankbased""" +254 55 dataset """kinships""" +254 55 model """ermlp""" +254 55 loss """bceaftersigmoid""" +254 55 regularizer """no""" +254 55 optimizer """adam""" +254 55 training_loop """owa""" +254 55 negative_sampler """basic""" +254 55 evaluator """rankbased""" +254 56 dataset """kinships""" +254 56 model """ermlp""" +254 56 loss """bceaftersigmoid""" +254 56 regularizer """no""" +254 56 optimizer """adam""" +254 56 training_loop """owa""" +254 56 negative_sampler """basic""" +254 56 evaluator """rankbased""" +254 57 dataset """kinships""" +254 57 model """ermlp""" +254 57 loss """bceaftersigmoid""" +254 57 regularizer """no""" +254 57 optimizer """adam""" +254 57 training_loop """owa""" +254 57 negative_sampler """basic""" +254 57 evaluator """rankbased""" +254 58 dataset """kinships""" +254 58 model """ermlp""" +254 58 loss """bceaftersigmoid""" +254 58 regularizer """no""" +254 58 optimizer """adam""" +254 58 training_loop """owa""" +254 58 negative_sampler """basic""" +254 58 evaluator """rankbased""" +254 59 dataset """kinships""" +254 59 model """ermlp""" +254 59 loss """bceaftersigmoid""" +254 59 regularizer """no""" +254 59 optimizer """adam""" +254 59 training_loop """owa""" +254 59 negative_sampler """basic""" +254 59 evaluator """rankbased""" +254 60 dataset """kinships""" +254 60 model """ermlp""" +254 60 loss """bceaftersigmoid""" +254 60 regularizer """no""" +254 60 optimizer """adam""" +254 60 training_loop """owa""" +254 60 negative_sampler """basic""" +254 60 evaluator """rankbased""" +254 61 dataset """kinships""" +254 61 model """ermlp""" +254 61 loss """bceaftersigmoid""" +254 61 regularizer """no""" +254 61 optimizer """adam""" +254 61 training_loop """owa""" +254 61 negative_sampler """basic""" +254 61 evaluator """rankbased""" +254 62 dataset """kinships""" +254 62 model """ermlp""" +254 62 loss """bceaftersigmoid""" +254 62 regularizer """no""" +254 62 optimizer """adam""" +254 62 training_loop """owa""" +254 62 negative_sampler """basic""" +254 62 evaluator """rankbased""" +254 63 dataset """kinships""" +254 63 model """ermlp""" +254 63 loss """bceaftersigmoid""" +254 63 regularizer """no""" +254 63 optimizer """adam""" +254 63 training_loop """owa""" +254 63 negative_sampler """basic""" +254 63 evaluator """rankbased""" +254 64 dataset """kinships""" +254 64 model """ermlp""" +254 64 loss """bceaftersigmoid""" +254 64 regularizer """no""" +254 64 optimizer """adam""" +254 64 training_loop """owa""" +254 64 negative_sampler """basic""" +254 64 evaluator """rankbased""" +254 65 dataset """kinships""" +254 65 model """ermlp""" +254 65 loss """bceaftersigmoid""" +254 65 regularizer """no""" +254 65 optimizer """adam""" +254 65 training_loop """owa""" +254 65 negative_sampler """basic""" +254 65 evaluator """rankbased""" +254 66 dataset """kinships""" +254 66 model """ermlp""" +254 66 loss """bceaftersigmoid""" +254 66 regularizer """no""" +254 66 optimizer """adam""" +254 66 training_loop """owa""" +254 66 negative_sampler """basic""" +254 66 evaluator """rankbased""" +254 67 dataset """kinships""" +254 67 model """ermlp""" +254 67 loss """bceaftersigmoid""" +254 67 regularizer """no""" +254 67 optimizer """adam""" +254 67 training_loop """owa""" +254 67 negative_sampler """basic""" +254 67 evaluator """rankbased""" +254 68 dataset """kinships""" +254 68 model """ermlp""" +254 68 loss """bceaftersigmoid""" +254 68 regularizer """no""" +254 68 optimizer """adam""" +254 68 training_loop """owa""" +254 68 negative_sampler """basic""" +254 68 evaluator """rankbased""" +254 69 dataset """kinships""" +254 69 model """ermlp""" +254 69 loss """bceaftersigmoid""" +254 69 regularizer """no""" +254 69 optimizer """adam""" +254 69 training_loop """owa""" +254 69 negative_sampler """basic""" +254 69 evaluator """rankbased""" +254 70 dataset """kinships""" +254 70 model """ermlp""" +254 70 loss """bceaftersigmoid""" +254 70 regularizer """no""" +254 70 optimizer """adam""" +254 70 training_loop """owa""" +254 70 negative_sampler """basic""" +254 70 evaluator """rankbased""" +254 71 dataset """kinships""" +254 71 model """ermlp""" +254 71 loss """bceaftersigmoid""" +254 71 regularizer """no""" +254 71 optimizer """adam""" +254 71 training_loop """owa""" +254 71 negative_sampler """basic""" +254 71 evaluator """rankbased""" +254 72 dataset """kinships""" +254 72 model """ermlp""" +254 72 loss """bceaftersigmoid""" +254 72 regularizer """no""" +254 72 optimizer """adam""" +254 72 training_loop """owa""" +254 72 negative_sampler """basic""" +254 72 evaluator """rankbased""" +254 73 dataset """kinships""" +254 73 model """ermlp""" +254 73 loss """bceaftersigmoid""" +254 73 regularizer """no""" +254 73 optimizer """adam""" +254 73 training_loop """owa""" +254 73 negative_sampler """basic""" +254 73 evaluator """rankbased""" +254 74 dataset """kinships""" +254 74 model """ermlp""" +254 74 loss """bceaftersigmoid""" +254 74 regularizer """no""" +254 74 optimizer """adam""" +254 74 training_loop """owa""" +254 74 negative_sampler """basic""" +254 74 evaluator """rankbased""" +254 75 dataset """kinships""" +254 75 model """ermlp""" +254 75 loss """bceaftersigmoid""" +254 75 regularizer """no""" +254 75 optimizer """adam""" +254 75 training_loop """owa""" +254 75 negative_sampler """basic""" +254 75 evaluator """rankbased""" +254 76 dataset """kinships""" +254 76 model """ermlp""" +254 76 loss """bceaftersigmoid""" +254 76 regularizer """no""" +254 76 optimizer """adam""" +254 76 training_loop """owa""" +254 76 negative_sampler """basic""" +254 76 evaluator """rankbased""" +254 77 dataset """kinships""" +254 77 model """ermlp""" +254 77 loss """bceaftersigmoid""" +254 77 regularizer """no""" +254 77 optimizer """adam""" +254 77 training_loop """owa""" +254 77 negative_sampler """basic""" +254 77 evaluator """rankbased""" +254 78 dataset """kinships""" +254 78 model """ermlp""" +254 78 loss """bceaftersigmoid""" +254 78 regularizer """no""" +254 78 optimizer """adam""" +254 78 training_loop """owa""" +254 78 negative_sampler """basic""" +254 78 evaluator """rankbased""" +254 79 dataset """kinships""" +254 79 model """ermlp""" +254 79 loss """bceaftersigmoid""" +254 79 regularizer """no""" +254 79 optimizer """adam""" +254 79 training_loop """owa""" +254 79 negative_sampler """basic""" +254 79 evaluator """rankbased""" +254 80 dataset """kinships""" +254 80 model """ermlp""" +254 80 loss """bceaftersigmoid""" +254 80 regularizer """no""" +254 80 optimizer """adam""" +254 80 training_loop """owa""" +254 80 negative_sampler """basic""" +254 80 evaluator """rankbased""" +254 81 dataset """kinships""" +254 81 model """ermlp""" +254 81 loss """bceaftersigmoid""" +254 81 regularizer """no""" +254 81 optimizer """adam""" +254 81 training_loop """owa""" +254 81 negative_sampler """basic""" +254 81 evaluator """rankbased""" +254 82 dataset """kinships""" +254 82 model """ermlp""" +254 82 loss """bceaftersigmoid""" +254 82 regularizer """no""" +254 82 optimizer """adam""" +254 82 training_loop """owa""" +254 82 negative_sampler """basic""" +254 82 evaluator """rankbased""" +254 83 dataset """kinships""" +254 83 model """ermlp""" +254 83 loss """bceaftersigmoid""" +254 83 regularizer """no""" +254 83 optimizer """adam""" +254 83 training_loop """owa""" +254 83 negative_sampler """basic""" +254 83 evaluator """rankbased""" +254 84 dataset """kinships""" +254 84 model """ermlp""" +254 84 loss """bceaftersigmoid""" +254 84 regularizer """no""" +254 84 optimizer """adam""" +254 84 training_loop """owa""" +254 84 negative_sampler """basic""" +254 84 evaluator """rankbased""" +254 85 dataset """kinships""" +254 85 model """ermlp""" +254 85 loss """bceaftersigmoid""" +254 85 regularizer """no""" +254 85 optimizer """adam""" +254 85 training_loop """owa""" +254 85 negative_sampler """basic""" +254 85 evaluator """rankbased""" +254 86 dataset """kinships""" +254 86 model """ermlp""" +254 86 loss """bceaftersigmoid""" +254 86 regularizer """no""" +254 86 optimizer """adam""" +254 86 training_loop """owa""" +254 86 negative_sampler """basic""" +254 86 evaluator """rankbased""" +254 87 dataset """kinships""" +254 87 model """ermlp""" +254 87 loss """bceaftersigmoid""" +254 87 regularizer """no""" +254 87 optimizer """adam""" +254 87 training_loop """owa""" +254 87 negative_sampler """basic""" +254 87 evaluator """rankbased""" +254 88 dataset """kinships""" +254 88 model """ermlp""" +254 88 loss """bceaftersigmoid""" +254 88 regularizer """no""" +254 88 optimizer """adam""" +254 88 training_loop """owa""" +254 88 negative_sampler """basic""" +254 88 evaluator """rankbased""" +254 89 dataset """kinships""" +254 89 model """ermlp""" +254 89 loss """bceaftersigmoid""" +254 89 regularizer """no""" +254 89 optimizer """adam""" +254 89 training_loop """owa""" +254 89 negative_sampler """basic""" +254 89 evaluator """rankbased""" +254 90 dataset """kinships""" +254 90 model """ermlp""" +254 90 loss """bceaftersigmoid""" +254 90 regularizer """no""" +254 90 optimizer """adam""" +254 90 training_loop """owa""" +254 90 negative_sampler """basic""" +254 90 evaluator """rankbased""" +254 91 dataset """kinships""" +254 91 model """ermlp""" +254 91 loss """bceaftersigmoid""" +254 91 regularizer """no""" +254 91 optimizer """adam""" +254 91 training_loop """owa""" +254 91 negative_sampler """basic""" +254 91 evaluator """rankbased""" +254 92 dataset """kinships""" +254 92 model """ermlp""" +254 92 loss """bceaftersigmoid""" +254 92 regularizer """no""" +254 92 optimizer """adam""" +254 92 training_loop """owa""" +254 92 negative_sampler """basic""" +254 92 evaluator """rankbased""" +254 93 dataset """kinships""" +254 93 model """ermlp""" +254 93 loss """bceaftersigmoid""" +254 93 regularizer """no""" +254 93 optimizer """adam""" +254 93 training_loop """owa""" +254 93 negative_sampler """basic""" +254 93 evaluator """rankbased""" +254 94 dataset """kinships""" +254 94 model """ermlp""" +254 94 loss """bceaftersigmoid""" +254 94 regularizer """no""" +254 94 optimizer """adam""" +254 94 training_loop """owa""" +254 94 negative_sampler """basic""" +254 94 evaluator """rankbased""" +254 95 dataset """kinships""" +254 95 model """ermlp""" +254 95 loss """bceaftersigmoid""" +254 95 regularizer """no""" +254 95 optimizer """adam""" +254 95 training_loop """owa""" +254 95 negative_sampler """basic""" +254 95 evaluator """rankbased""" +254 96 dataset """kinships""" +254 96 model """ermlp""" +254 96 loss """bceaftersigmoid""" +254 96 regularizer """no""" +254 96 optimizer """adam""" +254 96 training_loop """owa""" +254 96 negative_sampler """basic""" +254 96 evaluator """rankbased""" +254 97 dataset """kinships""" +254 97 model """ermlp""" +254 97 loss """bceaftersigmoid""" +254 97 regularizer """no""" +254 97 optimizer """adam""" +254 97 training_loop """owa""" +254 97 negative_sampler """basic""" +254 97 evaluator """rankbased""" +254 98 dataset """kinships""" +254 98 model """ermlp""" +254 98 loss """bceaftersigmoid""" +254 98 regularizer """no""" +254 98 optimizer """adam""" +254 98 training_loop """owa""" +254 98 negative_sampler """basic""" +254 98 evaluator """rankbased""" +254 99 dataset """kinships""" +254 99 model """ermlp""" +254 99 loss """bceaftersigmoid""" +254 99 regularizer """no""" +254 99 optimizer """adam""" +254 99 training_loop """owa""" +254 99 negative_sampler """basic""" +254 99 evaluator """rankbased""" +254 100 dataset """kinships""" +254 100 model """ermlp""" +254 100 loss """bceaftersigmoid""" +254 100 regularizer """no""" +254 100 optimizer """adam""" +254 100 training_loop """owa""" +254 100 negative_sampler """basic""" +254 100 evaluator """rankbased""" +255 1 model.embedding_dim 1.0 +255 1 optimizer.lr 0.06587819983448558 +255 1 negative_sampler.num_negs_per_pos 95.0 +255 1 training.batch_size 1.0 +255 2 model.embedding_dim 0.0 +255 2 optimizer.lr 0.00259127785661051 +255 2 negative_sampler.num_negs_per_pos 50.0 +255 2 training.batch_size 2.0 +255 3 model.embedding_dim 2.0 +255 3 optimizer.lr 0.0025013072428453215 +255 3 negative_sampler.num_negs_per_pos 46.0 +255 3 training.batch_size 0.0 +255 4 model.embedding_dim 1.0 +255 4 optimizer.lr 0.001745144767443753 +255 4 negative_sampler.num_negs_per_pos 49.0 +255 4 training.batch_size 2.0 +255 5 model.embedding_dim 2.0 +255 5 optimizer.lr 0.02286970668241927 +255 5 negative_sampler.num_negs_per_pos 75.0 +255 5 training.batch_size 2.0 +255 6 model.embedding_dim 2.0 +255 6 optimizer.lr 0.012797248069888063 +255 6 negative_sampler.num_negs_per_pos 92.0 +255 6 training.batch_size 2.0 +255 7 model.embedding_dim 1.0 +255 7 optimizer.lr 0.09084055549305994 +255 7 negative_sampler.num_negs_per_pos 86.0 +255 7 training.batch_size 2.0 +255 8 model.embedding_dim 0.0 +255 8 optimizer.lr 0.006314132771865705 +255 8 negative_sampler.num_negs_per_pos 4.0 +255 8 training.batch_size 0.0 +255 9 model.embedding_dim 0.0 +255 9 optimizer.lr 0.03217657706250602 +255 9 negative_sampler.num_negs_per_pos 57.0 +255 9 training.batch_size 1.0 +255 10 model.embedding_dim 0.0 +255 10 optimizer.lr 0.02699361756852383 +255 10 negative_sampler.num_negs_per_pos 58.0 +255 10 training.batch_size 1.0 +255 11 model.embedding_dim 0.0 +255 11 optimizer.lr 0.007393871793839684 +255 11 negative_sampler.num_negs_per_pos 71.0 +255 11 training.batch_size 0.0 +255 12 model.embedding_dim 0.0 +255 12 optimizer.lr 0.0010590008974278977 +255 12 negative_sampler.num_negs_per_pos 45.0 +255 12 training.batch_size 0.0 +255 13 model.embedding_dim 2.0 +255 13 optimizer.lr 0.0012310257491209148 +255 13 negative_sampler.num_negs_per_pos 49.0 +255 13 training.batch_size 2.0 +255 14 model.embedding_dim 1.0 +255 14 optimizer.lr 0.0032171042767735933 +255 14 negative_sampler.num_negs_per_pos 29.0 +255 14 training.batch_size 1.0 +255 15 model.embedding_dim 0.0 +255 15 optimizer.lr 0.00723500906688439 +255 15 negative_sampler.num_negs_per_pos 39.0 +255 15 training.batch_size 0.0 +255 16 model.embedding_dim 1.0 +255 16 optimizer.lr 0.08793194761177878 +255 16 negative_sampler.num_negs_per_pos 1.0 +255 16 training.batch_size 1.0 +255 17 model.embedding_dim 1.0 +255 17 optimizer.lr 0.009985143418637378 +255 17 negative_sampler.num_negs_per_pos 93.0 +255 17 training.batch_size 1.0 +255 18 model.embedding_dim 2.0 +255 18 optimizer.lr 0.006495458959620142 +255 18 negative_sampler.num_negs_per_pos 26.0 +255 18 training.batch_size 2.0 +255 19 model.embedding_dim 2.0 +255 19 optimizer.lr 0.074244974416249 +255 19 negative_sampler.num_negs_per_pos 38.0 +255 19 training.batch_size 0.0 +255 20 model.embedding_dim 2.0 +255 20 optimizer.lr 0.0010784129169850655 +255 20 negative_sampler.num_negs_per_pos 95.0 +255 20 training.batch_size 2.0 +255 21 model.embedding_dim 0.0 +255 21 optimizer.lr 0.014421565472660752 +255 21 negative_sampler.num_negs_per_pos 2.0 +255 21 training.batch_size 1.0 +255 22 model.embedding_dim 2.0 +255 22 optimizer.lr 0.01395268011432605 +255 22 negative_sampler.num_negs_per_pos 48.0 +255 22 training.batch_size 2.0 +255 23 model.embedding_dim 1.0 +255 23 optimizer.lr 0.006669226894300033 +255 23 negative_sampler.num_negs_per_pos 99.0 +255 23 training.batch_size 0.0 +255 24 model.embedding_dim 0.0 +255 24 optimizer.lr 0.007235791317722808 +255 24 negative_sampler.num_negs_per_pos 3.0 +255 24 training.batch_size 0.0 +255 25 model.embedding_dim 1.0 +255 25 optimizer.lr 0.07297263344894443 +255 25 negative_sampler.num_negs_per_pos 67.0 +255 25 training.batch_size 0.0 +255 26 model.embedding_dim 2.0 +255 26 optimizer.lr 0.008175687869578495 +255 26 negative_sampler.num_negs_per_pos 5.0 +255 26 training.batch_size 1.0 +255 27 model.embedding_dim 1.0 +255 27 optimizer.lr 0.04421146230626099 +255 27 negative_sampler.num_negs_per_pos 25.0 +255 27 training.batch_size 2.0 +255 28 model.embedding_dim 0.0 +255 28 optimizer.lr 0.008530209300643346 +255 28 negative_sampler.num_negs_per_pos 75.0 +255 28 training.batch_size 1.0 +255 29 model.embedding_dim 2.0 +255 29 optimizer.lr 0.004291415031751922 +255 29 negative_sampler.num_negs_per_pos 70.0 +255 29 training.batch_size 1.0 +255 30 model.embedding_dim 2.0 +255 30 optimizer.lr 0.024622505865320225 +255 30 negative_sampler.num_negs_per_pos 15.0 +255 30 training.batch_size 0.0 +255 31 model.embedding_dim 2.0 +255 31 optimizer.lr 0.005210667114836793 +255 31 negative_sampler.num_negs_per_pos 16.0 +255 31 training.batch_size 2.0 +255 32 model.embedding_dim 1.0 +255 32 optimizer.lr 0.058710994334233574 +255 32 negative_sampler.num_negs_per_pos 14.0 +255 32 training.batch_size 2.0 +255 33 model.embedding_dim 0.0 +255 33 optimizer.lr 0.049879170217352144 +255 33 negative_sampler.num_negs_per_pos 24.0 +255 33 training.batch_size 1.0 +255 34 model.embedding_dim 2.0 +255 34 optimizer.lr 0.004780514361006616 +255 34 negative_sampler.num_negs_per_pos 78.0 +255 34 training.batch_size 0.0 +255 35 model.embedding_dim 2.0 +255 35 optimizer.lr 0.0024194382945564477 +255 35 negative_sampler.num_negs_per_pos 50.0 +255 35 training.batch_size 0.0 +255 36 model.embedding_dim 0.0 +255 36 optimizer.lr 0.015935377153809126 +255 36 negative_sampler.num_negs_per_pos 15.0 +255 36 training.batch_size 1.0 +255 37 model.embedding_dim 1.0 +255 37 optimizer.lr 0.0015708485946276279 +255 37 negative_sampler.num_negs_per_pos 6.0 +255 37 training.batch_size 1.0 +255 38 model.embedding_dim 1.0 +255 38 optimizer.lr 0.05209685191404465 +255 38 negative_sampler.num_negs_per_pos 52.0 +255 38 training.batch_size 0.0 +255 39 model.embedding_dim 1.0 +255 39 optimizer.lr 0.0025174403256604263 +255 39 negative_sampler.num_negs_per_pos 14.0 +255 39 training.batch_size 0.0 +255 40 model.embedding_dim 0.0 +255 40 optimizer.lr 0.032797395288409095 +255 40 negative_sampler.num_negs_per_pos 87.0 +255 40 training.batch_size 1.0 +255 41 model.embedding_dim 0.0 +255 41 optimizer.lr 0.03618297397565473 +255 41 negative_sampler.num_negs_per_pos 15.0 +255 41 training.batch_size 1.0 +255 42 model.embedding_dim 1.0 +255 42 optimizer.lr 0.07675954058646484 +255 42 negative_sampler.num_negs_per_pos 55.0 +255 42 training.batch_size 0.0 +255 43 model.embedding_dim 2.0 +255 43 optimizer.lr 0.013849210651115825 +255 43 negative_sampler.num_negs_per_pos 93.0 +255 43 training.batch_size 0.0 +255 44 model.embedding_dim 2.0 +255 44 optimizer.lr 0.07500158420467234 +255 44 negative_sampler.num_negs_per_pos 54.0 +255 44 training.batch_size 1.0 +255 45 model.embedding_dim 2.0 +255 45 optimizer.lr 0.03450322908858802 +255 45 negative_sampler.num_negs_per_pos 68.0 +255 45 training.batch_size 1.0 +255 46 model.embedding_dim 2.0 +255 46 optimizer.lr 0.00611760604826755 +255 46 negative_sampler.num_negs_per_pos 80.0 +255 46 training.batch_size 1.0 +255 47 model.embedding_dim 1.0 +255 47 optimizer.lr 0.0025077229197304303 +255 47 negative_sampler.num_negs_per_pos 4.0 +255 47 training.batch_size 2.0 +255 48 model.embedding_dim 2.0 +255 48 optimizer.lr 0.008254434303413497 +255 48 negative_sampler.num_negs_per_pos 55.0 +255 48 training.batch_size 1.0 +255 49 model.embedding_dim 1.0 +255 49 optimizer.lr 0.02849605497334513 +255 49 negative_sampler.num_negs_per_pos 77.0 +255 49 training.batch_size 1.0 +255 50 model.embedding_dim 2.0 +255 50 optimizer.lr 0.02960968488931382 +255 50 negative_sampler.num_negs_per_pos 22.0 +255 50 training.batch_size 0.0 +255 51 model.embedding_dim 2.0 +255 51 optimizer.lr 0.018077391593839146 +255 51 negative_sampler.num_negs_per_pos 90.0 +255 51 training.batch_size 2.0 +255 52 model.embedding_dim 2.0 +255 52 optimizer.lr 0.001608681897244949 +255 52 negative_sampler.num_negs_per_pos 69.0 +255 52 training.batch_size 1.0 +255 53 model.embedding_dim 2.0 +255 53 optimizer.lr 0.01733732372063075 +255 53 negative_sampler.num_negs_per_pos 21.0 +255 53 training.batch_size 2.0 +255 54 model.embedding_dim 0.0 +255 54 optimizer.lr 0.0036366328233370564 +255 54 negative_sampler.num_negs_per_pos 62.0 +255 54 training.batch_size 0.0 +255 55 model.embedding_dim 0.0 +255 55 optimizer.lr 0.0024128610543738558 +255 55 negative_sampler.num_negs_per_pos 16.0 +255 55 training.batch_size 2.0 +255 56 model.embedding_dim 0.0 +255 56 optimizer.lr 0.006616171985482975 +255 56 negative_sampler.num_negs_per_pos 62.0 +255 56 training.batch_size 1.0 +255 57 model.embedding_dim 2.0 +255 57 optimizer.lr 0.0020778280097595104 +255 57 negative_sampler.num_negs_per_pos 32.0 +255 57 training.batch_size 2.0 +255 58 model.embedding_dim 1.0 +255 58 optimizer.lr 0.008600578431163186 +255 58 negative_sampler.num_negs_per_pos 96.0 +255 58 training.batch_size 0.0 +255 59 model.embedding_dim 0.0 +255 59 optimizer.lr 0.010960949191657375 +255 59 negative_sampler.num_negs_per_pos 67.0 +255 59 training.batch_size 1.0 +255 60 model.embedding_dim 2.0 +255 60 optimizer.lr 0.02748048794857237 +255 60 negative_sampler.num_negs_per_pos 38.0 +255 60 training.batch_size 1.0 +255 61 model.embedding_dim 1.0 +255 61 optimizer.lr 0.04391861085705655 +255 61 negative_sampler.num_negs_per_pos 62.0 +255 61 training.batch_size 0.0 +255 62 model.embedding_dim 2.0 +255 62 optimizer.lr 0.02471841482192295 +255 62 negative_sampler.num_negs_per_pos 37.0 +255 62 training.batch_size 1.0 +255 63 model.embedding_dim 0.0 +255 63 optimizer.lr 0.002446700277948311 +255 63 negative_sampler.num_negs_per_pos 14.0 +255 63 training.batch_size 2.0 +255 64 model.embedding_dim 0.0 +255 64 optimizer.lr 0.001032056322670083 +255 64 negative_sampler.num_negs_per_pos 30.0 +255 64 training.batch_size 2.0 +255 65 model.embedding_dim 1.0 +255 65 optimizer.lr 0.0013900383746245763 +255 65 negative_sampler.num_negs_per_pos 34.0 +255 65 training.batch_size 2.0 +255 66 model.embedding_dim 0.0 +255 66 optimizer.lr 0.014433787017041438 +255 66 negative_sampler.num_negs_per_pos 61.0 +255 66 training.batch_size 2.0 +255 67 model.embedding_dim 1.0 +255 67 optimizer.lr 0.02313460522559595 +255 67 negative_sampler.num_negs_per_pos 23.0 +255 67 training.batch_size 0.0 +255 68 model.embedding_dim 1.0 +255 68 optimizer.lr 0.08925441203562796 +255 68 negative_sampler.num_negs_per_pos 79.0 +255 68 training.batch_size 0.0 +255 69 model.embedding_dim 1.0 +255 69 optimizer.lr 0.0025077356833724668 +255 69 negative_sampler.num_negs_per_pos 78.0 +255 69 training.batch_size 0.0 +255 70 model.embedding_dim 1.0 +255 70 optimizer.lr 0.02175429270297395 +255 70 negative_sampler.num_negs_per_pos 60.0 +255 70 training.batch_size 2.0 +255 71 model.embedding_dim 2.0 +255 71 optimizer.lr 0.0077668569562428435 +255 71 negative_sampler.num_negs_per_pos 70.0 +255 71 training.batch_size 1.0 +255 72 model.embedding_dim 0.0 +255 72 optimizer.lr 0.004559214877627502 +255 72 negative_sampler.num_negs_per_pos 44.0 +255 72 training.batch_size 2.0 +255 73 model.embedding_dim 1.0 +255 73 optimizer.lr 0.014515632611306799 +255 73 negative_sampler.num_negs_per_pos 62.0 +255 73 training.batch_size 1.0 +255 74 model.embedding_dim 0.0 +255 74 optimizer.lr 0.009813380534859034 +255 74 negative_sampler.num_negs_per_pos 89.0 +255 74 training.batch_size 0.0 +255 75 model.embedding_dim 2.0 +255 75 optimizer.lr 0.0664801370811876 +255 75 negative_sampler.num_negs_per_pos 49.0 +255 75 training.batch_size 2.0 +255 76 model.embedding_dim 2.0 +255 76 optimizer.lr 0.023931982247617216 +255 76 negative_sampler.num_negs_per_pos 98.0 +255 76 training.batch_size 1.0 +255 77 model.embedding_dim 1.0 +255 77 optimizer.lr 0.013011263653657712 +255 77 negative_sampler.num_negs_per_pos 3.0 +255 77 training.batch_size 2.0 +255 78 model.embedding_dim 0.0 +255 78 optimizer.lr 0.008028147377549895 +255 78 negative_sampler.num_negs_per_pos 74.0 +255 78 training.batch_size 0.0 +255 79 model.embedding_dim 0.0 +255 79 optimizer.lr 0.033416374236355385 +255 79 negative_sampler.num_negs_per_pos 48.0 +255 79 training.batch_size 0.0 +255 80 model.embedding_dim 1.0 +255 80 optimizer.lr 0.006968532519210984 +255 80 negative_sampler.num_negs_per_pos 91.0 +255 80 training.batch_size 0.0 +255 81 model.embedding_dim 1.0 +255 81 optimizer.lr 0.00860961494114785 +255 81 negative_sampler.num_negs_per_pos 88.0 +255 81 training.batch_size 2.0 +255 82 model.embedding_dim 1.0 +255 82 optimizer.lr 0.0010979679387699566 +255 82 negative_sampler.num_negs_per_pos 71.0 +255 82 training.batch_size 0.0 +255 83 model.embedding_dim 1.0 +255 83 optimizer.lr 0.007863769216955254 +255 83 negative_sampler.num_negs_per_pos 54.0 +255 83 training.batch_size 0.0 +255 84 model.embedding_dim 0.0 +255 84 optimizer.lr 0.001289437654475443 +255 84 negative_sampler.num_negs_per_pos 8.0 +255 84 training.batch_size 2.0 +255 85 model.embedding_dim 0.0 +255 85 optimizer.lr 0.0011749613452120757 +255 85 negative_sampler.num_negs_per_pos 65.0 +255 85 training.batch_size 2.0 +255 86 model.embedding_dim 1.0 +255 86 optimizer.lr 0.0023507620595866864 +255 86 negative_sampler.num_negs_per_pos 86.0 +255 86 training.batch_size 0.0 +255 87 model.embedding_dim 1.0 +255 87 optimizer.lr 0.0011369930705395618 +255 87 negative_sampler.num_negs_per_pos 47.0 +255 87 training.batch_size 2.0 +255 88 model.embedding_dim 2.0 +255 88 optimizer.lr 0.0034736509743360754 +255 88 negative_sampler.num_negs_per_pos 21.0 +255 88 training.batch_size 2.0 +255 89 model.embedding_dim 0.0 +255 89 optimizer.lr 0.0015005209916447706 +255 89 negative_sampler.num_negs_per_pos 51.0 +255 89 training.batch_size 2.0 +255 90 model.embedding_dim 0.0 +255 90 optimizer.lr 0.0036503929093814197 +255 90 negative_sampler.num_negs_per_pos 77.0 +255 90 training.batch_size 1.0 +255 91 model.embedding_dim 2.0 +255 91 optimizer.lr 0.0013490532028171663 +255 91 negative_sampler.num_negs_per_pos 87.0 +255 91 training.batch_size 0.0 +255 92 model.embedding_dim 1.0 +255 92 optimizer.lr 0.003920290833084983 +255 92 negative_sampler.num_negs_per_pos 19.0 +255 92 training.batch_size 1.0 +255 93 model.embedding_dim 1.0 +255 93 optimizer.lr 0.08990015091934295 +255 93 negative_sampler.num_negs_per_pos 98.0 +255 93 training.batch_size 2.0 +255 94 model.embedding_dim 1.0 +255 94 optimizer.lr 0.022353318539015 +255 94 negative_sampler.num_negs_per_pos 52.0 +255 94 training.batch_size 1.0 +255 95 model.embedding_dim 0.0 +255 95 optimizer.lr 0.0030191865308212104 +255 95 negative_sampler.num_negs_per_pos 45.0 +255 95 training.batch_size 1.0 +255 96 model.embedding_dim 0.0 +255 96 optimizer.lr 0.001609710316867907 +255 96 negative_sampler.num_negs_per_pos 78.0 +255 96 training.batch_size 0.0 +255 97 model.embedding_dim 1.0 +255 97 optimizer.lr 0.0015944766913648495 +255 97 negative_sampler.num_negs_per_pos 5.0 +255 97 training.batch_size 1.0 +255 98 model.embedding_dim 1.0 +255 98 optimizer.lr 0.08268441535297515 +255 98 negative_sampler.num_negs_per_pos 85.0 +255 98 training.batch_size 2.0 +255 99 model.embedding_dim 1.0 +255 99 optimizer.lr 0.017677066721421875 +255 99 negative_sampler.num_negs_per_pos 54.0 +255 99 training.batch_size 1.0 +255 100 model.embedding_dim 1.0 +255 100 optimizer.lr 0.005600021754173694 +255 100 negative_sampler.num_negs_per_pos 49.0 +255 100 training.batch_size 0.0 +255 1 dataset """kinships""" +255 1 model """ermlp""" +255 1 loss """softplus""" +255 1 regularizer """no""" +255 1 optimizer """adam""" +255 1 training_loop """owa""" +255 1 negative_sampler """basic""" +255 1 evaluator """rankbased""" +255 2 dataset """kinships""" +255 2 model """ermlp""" +255 2 loss """softplus""" +255 2 regularizer """no""" +255 2 optimizer """adam""" +255 2 training_loop """owa""" +255 2 negative_sampler """basic""" +255 2 evaluator """rankbased""" +255 3 dataset """kinships""" +255 3 model """ermlp""" +255 3 loss """softplus""" +255 3 regularizer """no""" +255 3 optimizer """adam""" +255 3 training_loop """owa""" +255 3 negative_sampler """basic""" +255 3 evaluator """rankbased""" +255 4 dataset """kinships""" +255 4 model """ermlp""" +255 4 loss """softplus""" +255 4 regularizer """no""" +255 4 optimizer """adam""" +255 4 training_loop """owa""" +255 4 negative_sampler """basic""" +255 4 evaluator """rankbased""" +255 5 dataset """kinships""" +255 5 model """ermlp""" +255 5 loss """softplus""" +255 5 regularizer """no""" +255 5 optimizer """adam""" +255 5 training_loop """owa""" +255 5 negative_sampler """basic""" +255 5 evaluator """rankbased""" +255 6 dataset """kinships""" +255 6 model """ermlp""" +255 6 loss """softplus""" +255 6 regularizer """no""" +255 6 optimizer """adam""" +255 6 training_loop """owa""" +255 6 negative_sampler """basic""" +255 6 evaluator """rankbased""" +255 7 dataset """kinships""" +255 7 model """ermlp""" +255 7 loss """softplus""" +255 7 regularizer """no""" +255 7 optimizer """adam""" +255 7 training_loop """owa""" +255 7 negative_sampler """basic""" +255 7 evaluator """rankbased""" +255 8 dataset """kinships""" +255 8 model """ermlp""" +255 8 loss """softplus""" +255 8 regularizer """no""" +255 8 optimizer """adam""" +255 8 training_loop """owa""" +255 8 negative_sampler """basic""" +255 8 evaluator """rankbased""" +255 9 dataset """kinships""" +255 9 model """ermlp""" +255 9 loss """softplus""" +255 9 regularizer """no""" +255 9 optimizer """adam""" +255 9 training_loop """owa""" +255 9 negative_sampler """basic""" +255 9 evaluator """rankbased""" +255 10 dataset """kinships""" +255 10 model """ermlp""" +255 10 loss """softplus""" +255 10 regularizer """no""" +255 10 optimizer """adam""" +255 10 training_loop """owa""" +255 10 negative_sampler """basic""" +255 10 evaluator """rankbased""" +255 11 dataset """kinships""" +255 11 model """ermlp""" +255 11 loss """softplus""" +255 11 regularizer """no""" +255 11 optimizer """adam""" +255 11 training_loop """owa""" +255 11 negative_sampler """basic""" +255 11 evaluator """rankbased""" +255 12 dataset """kinships""" +255 12 model """ermlp""" +255 12 loss """softplus""" +255 12 regularizer """no""" +255 12 optimizer """adam""" +255 12 training_loop """owa""" +255 12 negative_sampler """basic""" +255 12 evaluator """rankbased""" +255 13 dataset """kinships""" +255 13 model """ermlp""" +255 13 loss """softplus""" +255 13 regularizer """no""" +255 13 optimizer """adam""" +255 13 training_loop """owa""" +255 13 negative_sampler """basic""" +255 13 evaluator """rankbased""" +255 14 dataset """kinships""" +255 14 model """ermlp""" +255 14 loss """softplus""" +255 14 regularizer """no""" +255 14 optimizer """adam""" +255 14 training_loop """owa""" +255 14 negative_sampler """basic""" +255 14 evaluator """rankbased""" +255 15 dataset """kinships""" +255 15 model """ermlp""" +255 15 loss """softplus""" +255 15 regularizer """no""" +255 15 optimizer """adam""" +255 15 training_loop """owa""" +255 15 negative_sampler """basic""" +255 15 evaluator """rankbased""" +255 16 dataset """kinships""" +255 16 model """ermlp""" +255 16 loss """softplus""" +255 16 regularizer """no""" +255 16 optimizer """adam""" +255 16 training_loop """owa""" +255 16 negative_sampler """basic""" +255 16 evaluator """rankbased""" +255 17 dataset """kinships""" +255 17 model """ermlp""" +255 17 loss """softplus""" +255 17 regularizer """no""" +255 17 optimizer """adam""" +255 17 training_loop """owa""" +255 17 negative_sampler """basic""" +255 17 evaluator """rankbased""" +255 18 dataset """kinships""" +255 18 model """ermlp""" +255 18 loss """softplus""" +255 18 regularizer """no""" +255 18 optimizer """adam""" +255 18 training_loop """owa""" +255 18 negative_sampler """basic""" +255 18 evaluator """rankbased""" +255 19 dataset """kinships""" +255 19 model """ermlp""" +255 19 loss """softplus""" +255 19 regularizer """no""" +255 19 optimizer """adam""" +255 19 training_loop """owa""" +255 19 negative_sampler """basic""" +255 19 evaluator """rankbased""" +255 20 dataset """kinships""" +255 20 model """ermlp""" +255 20 loss """softplus""" +255 20 regularizer """no""" +255 20 optimizer """adam""" +255 20 training_loop """owa""" +255 20 negative_sampler """basic""" +255 20 evaluator """rankbased""" +255 21 dataset """kinships""" +255 21 model """ermlp""" +255 21 loss """softplus""" +255 21 regularizer """no""" +255 21 optimizer """adam""" +255 21 training_loop """owa""" +255 21 negative_sampler """basic""" +255 21 evaluator """rankbased""" +255 22 dataset """kinships""" +255 22 model """ermlp""" +255 22 loss """softplus""" +255 22 regularizer """no""" +255 22 optimizer """adam""" +255 22 training_loop """owa""" +255 22 negative_sampler """basic""" +255 22 evaluator """rankbased""" +255 23 dataset """kinships""" +255 23 model """ermlp""" +255 23 loss """softplus""" +255 23 regularizer """no""" +255 23 optimizer """adam""" +255 23 training_loop """owa""" +255 23 negative_sampler """basic""" +255 23 evaluator """rankbased""" +255 24 dataset """kinships""" +255 24 model """ermlp""" +255 24 loss """softplus""" +255 24 regularizer """no""" +255 24 optimizer """adam""" +255 24 training_loop """owa""" +255 24 negative_sampler """basic""" +255 24 evaluator """rankbased""" +255 25 dataset """kinships""" +255 25 model """ermlp""" +255 25 loss """softplus""" +255 25 regularizer """no""" +255 25 optimizer """adam""" +255 25 training_loop """owa""" +255 25 negative_sampler """basic""" +255 25 evaluator """rankbased""" +255 26 dataset """kinships""" +255 26 model """ermlp""" +255 26 loss """softplus""" +255 26 regularizer """no""" +255 26 optimizer """adam""" +255 26 training_loop """owa""" +255 26 negative_sampler """basic""" +255 26 evaluator """rankbased""" +255 27 dataset """kinships""" +255 27 model """ermlp""" +255 27 loss """softplus""" +255 27 regularizer """no""" +255 27 optimizer """adam""" +255 27 training_loop """owa""" +255 27 negative_sampler """basic""" +255 27 evaluator """rankbased""" +255 28 dataset """kinships""" +255 28 model """ermlp""" +255 28 loss """softplus""" +255 28 regularizer """no""" +255 28 optimizer """adam""" +255 28 training_loop """owa""" +255 28 negative_sampler """basic""" +255 28 evaluator """rankbased""" +255 29 dataset """kinships""" +255 29 model """ermlp""" +255 29 loss """softplus""" +255 29 regularizer """no""" +255 29 optimizer """adam""" +255 29 training_loop """owa""" +255 29 negative_sampler """basic""" +255 29 evaluator """rankbased""" +255 30 dataset """kinships""" +255 30 model """ermlp""" +255 30 loss """softplus""" +255 30 regularizer """no""" +255 30 optimizer """adam""" +255 30 training_loop """owa""" +255 30 negative_sampler """basic""" +255 30 evaluator """rankbased""" +255 31 dataset """kinships""" +255 31 model """ermlp""" +255 31 loss """softplus""" +255 31 regularizer """no""" +255 31 optimizer """adam""" +255 31 training_loop """owa""" +255 31 negative_sampler """basic""" +255 31 evaluator """rankbased""" +255 32 dataset """kinships""" +255 32 model """ermlp""" +255 32 loss """softplus""" +255 32 regularizer """no""" +255 32 optimizer """adam""" +255 32 training_loop """owa""" +255 32 negative_sampler """basic""" +255 32 evaluator """rankbased""" +255 33 dataset """kinships""" +255 33 model """ermlp""" +255 33 loss """softplus""" +255 33 regularizer """no""" +255 33 optimizer """adam""" +255 33 training_loop """owa""" +255 33 negative_sampler """basic""" +255 33 evaluator """rankbased""" +255 34 dataset """kinships""" +255 34 model """ermlp""" +255 34 loss """softplus""" +255 34 regularizer """no""" +255 34 optimizer """adam""" +255 34 training_loop """owa""" +255 34 negative_sampler """basic""" +255 34 evaluator """rankbased""" +255 35 dataset """kinships""" +255 35 model """ermlp""" +255 35 loss """softplus""" +255 35 regularizer """no""" +255 35 optimizer """adam""" +255 35 training_loop """owa""" +255 35 negative_sampler """basic""" +255 35 evaluator """rankbased""" +255 36 dataset """kinships""" +255 36 model """ermlp""" +255 36 loss """softplus""" +255 36 regularizer """no""" +255 36 optimizer """adam""" +255 36 training_loop """owa""" +255 36 negative_sampler """basic""" +255 36 evaluator """rankbased""" +255 37 dataset """kinships""" +255 37 model """ermlp""" +255 37 loss """softplus""" +255 37 regularizer """no""" +255 37 optimizer """adam""" +255 37 training_loop """owa""" +255 37 negative_sampler """basic""" +255 37 evaluator """rankbased""" +255 38 dataset """kinships""" +255 38 model """ermlp""" +255 38 loss """softplus""" +255 38 regularizer """no""" +255 38 optimizer """adam""" +255 38 training_loop """owa""" +255 38 negative_sampler """basic""" +255 38 evaluator """rankbased""" +255 39 dataset """kinships""" +255 39 model """ermlp""" +255 39 loss """softplus""" +255 39 regularizer """no""" +255 39 optimizer """adam""" +255 39 training_loop """owa""" +255 39 negative_sampler """basic""" +255 39 evaluator """rankbased""" +255 40 dataset """kinships""" +255 40 model """ermlp""" +255 40 loss """softplus""" +255 40 regularizer """no""" +255 40 optimizer """adam""" +255 40 training_loop """owa""" +255 40 negative_sampler """basic""" +255 40 evaluator """rankbased""" +255 41 dataset """kinships""" +255 41 model """ermlp""" +255 41 loss """softplus""" +255 41 regularizer """no""" +255 41 optimizer """adam""" +255 41 training_loop """owa""" +255 41 negative_sampler """basic""" +255 41 evaluator """rankbased""" +255 42 dataset """kinships""" +255 42 model """ermlp""" +255 42 loss """softplus""" +255 42 regularizer """no""" +255 42 optimizer """adam""" +255 42 training_loop """owa""" +255 42 negative_sampler """basic""" +255 42 evaluator """rankbased""" +255 43 dataset """kinships""" +255 43 model """ermlp""" +255 43 loss """softplus""" +255 43 regularizer """no""" +255 43 optimizer """adam""" +255 43 training_loop """owa""" +255 43 negative_sampler """basic""" +255 43 evaluator """rankbased""" +255 44 dataset """kinships""" +255 44 model """ermlp""" +255 44 loss """softplus""" +255 44 regularizer """no""" +255 44 optimizer """adam""" +255 44 training_loop """owa""" +255 44 negative_sampler """basic""" +255 44 evaluator """rankbased""" +255 45 dataset """kinships""" +255 45 model """ermlp""" +255 45 loss """softplus""" +255 45 regularizer """no""" +255 45 optimizer """adam""" +255 45 training_loop """owa""" +255 45 negative_sampler """basic""" +255 45 evaluator """rankbased""" +255 46 dataset """kinships""" +255 46 model """ermlp""" +255 46 loss """softplus""" +255 46 regularizer """no""" +255 46 optimizer """adam""" +255 46 training_loop """owa""" +255 46 negative_sampler """basic""" +255 46 evaluator """rankbased""" +255 47 dataset """kinships""" +255 47 model """ermlp""" +255 47 loss """softplus""" +255 47 regularizer """no""" +255 47 optimizer """adam""" +255 47 training_loop """owa""" +255 47 negative_sampler """basic""" +255 47 evaluator """rankbased""" +255 48 dataset """kinships""" +255 48 model """ermlp""" +255 48 loss """softplus""" +255 48 regularizer """no""" +255 48 optimizer """adam""" +255 48 training_loop """owa""" +255 48 negative_sampler """basic""" +255 48 evaluator """rankbased""" +255 49 dataset """kinships""" +255 49 model """ermlp""" +255 49 loss """softplus""" +255 49 regularizer """no""" +255 49 optimizer """adam""" +255 49 training_loop """owa""" +255 49 negative_sampler """basic""" +255 49 evaluator """rankbased""" +255 50 dataset """kinships""" +255 50 model """ermlp""" +255 50 loss """softplus""" +255 50 regularizer """no""" +255 50 optimizer """adam""" +255 50 training_loop """owa""" +255 50 negative_sampler """basic""" +255 50 evaluator """rankbased""" +255 51 dataset """kinships""" +255 51 model """ermlp""" +255 51 loss """softplus""" +255 51 regularizer """no""" +255 51 optimizer """adam""" +255 51 training_loop """owa""" +255 51 negative_sampler """basic""" +255 51 evaluator """rankbased""" +255 52 dataset """kinships""" +255 52 model """ermlp""" +255 52 loss """softplus""" +255 52 regularizer """no""" +255 52 optimizer """adam""" +255 52 training_loop """owa""" +255 52 negative_sampler """basic""" +255 52 evaluator """rankbased""" +255 53 dataset """kinships""" +255 53 model """ermlp""" +255 53 loss """softplus""" +255 53 regularizer """no""" +255 53 optimizer """adam""" +255 53 training_loop """owa""" +255 53 negative_sampler """basic""" +255 53 evaluator """rankbased""" +255 54 dataset """kinships""" +255 54 model """ermlp""" +255 54 loss """softplus""" +255 54 regularizer """no""" +255 54 optimizer """adam""" +255 54 training_loop """owa""" +255 54 negative_sampler """basic""" +255 54 evaluator """rankbased""" +255 55 dataset """kinships""" +255 55 model """ermlp""" +255 55 loss """softplus""" +255 55 regularizer """no""" +255 55 optimizer """adam""" +255 55 training_loop """owa""" +255 55 negative_sampler """basic""" +255 55 evaluator """rankbased""" +255 56 dataset """kinships""" +255 56 model """ermlp""" +255 56 loss """softplus""" +255 56 regularizer """no""" +255 56 optimizer """adam""" +255 56 training_loop """owa""" +255 56 negative_sampler """basic""" +255 56 evaluator """rankbased""" +255 57 dataset """kinships""" +255 57 model """ermlp""" +255 57 loss """softplus""" +255 57 regularizer """no""" +255 57 optimizer """adam""" +255 57 training_loop """owa""" +255 57 negative_sampler """basic""" +255 57 evaluator """rankbased""" +255 58 dataset """kinships""" +255 58 model """ermlp""" +255 58 loss """softplus""" +255 58 regularizer """no""" +255 58 optimizer """adam""" +255 58 training_loop """owa""" +255 58 negative_sampler """basic""" +255 58 evaluator """rankbased""" +255 59 dataset """kinships""" +255 59 model """ermlp""" +255 59 loss """softplus""" +255 59 regularizer """no""" +255 59 optimizer """adam""" +255 59 training_loop """owa""" +255 59 negative_sampler """basic""" +255 59 evaluator """rankbased""" +255 60 dataset """kinships""" +255 60 model """ermlp""" +255 60 loss """softplus""" +255 60 regularizer """no""" +255 60 optimizer """adam""" +255 60 training_loop """owa""" +255 60 negative_sampler """basic""" +255 60 evaluator """rankbased""" +255 61 dataset """kinships""" +255 61 model """ermlp""" +255 61 loss """softplus""" +255 61 regularizer """no""" +255 61 optimizer """adam""" +255 61 training_loop """owa""" +255 61 negative_sampler """basic""" +255 61 evaluator """rankbased""" +255 62 dataset """kinships""" +255 62 model """ermlp""" +255 62 loss """softplus""" +255 62 regularizer """no""" +255 62 optimizer """adam""" +255 62 training_loop """owa""" +255 62 negative_sampler """basic""" +255 62 evaluator """rankbased""" +255 63 dataset """kinships""" +255 63 model """ermlp""" +255 63 loss """softplus""" +255 63 regularizer """no""" +255 63 optimizer """adam""" +255 63 training_loop """owa""" +255 63 negative_sampler """basic""" +255 63 evaluator """rankbased""" +255 64 dataset """kinships""" +255 64 model """ermlp""" +255 64 loss """softplus""" +255 64 regularizer """no""" +255 64 optimizer """adam""" +255 64 training_loop """owa""" +255 64 negative_sampler """basic""" +255 64 evaluator """rankbased""" +255 65 dataset """kinships""" +255 65 model """ermlp""" +255 65 loss """softplus""" +255 65 regularizer """no""" +255 65 optimizer """adam""" +255 65 training_loop """owa""" +255 65 negative_sampler """basic""" +255 65 evaluator """rankbased""" +255 66 dataset """kinships""" +255 66 model """ermlp""" +255 66 loss """softplus""" +255 66 regularizer """no""" +255 66 optimizer """adam""" +255 66 training_loop """owa""" +255 66 negative_sampler """basic""" +255 66 evaluator """rankbased""" +255 67 dataset """kinships""" +255 67 model """ermlp""" +255 67 loss """softplus""" +255 67 regularizer """no""" +255 67 optimizer """adam""" +255 67 training_loop """owa""" +255 67 negative_sampler """basic""" +255 67 evaluator """rankbased""" +255 68 dataset """kinships""" +255 68 model """ermlp""" +255 68 loss """softplus""" +255 68 regularizer """no""" +255 68 optimizer """adam""" +255 68 training_loop """owa""" +255 68 negative_sampler """basic""" +255 68 evaluator """rankbased""" +255 69 dataset """kinships""" +255 69 model """ermlp""" +255 69 loss """softplus""" +255 69 regularizer """no""" +255 69 optimizer """adam""" +255 69 training_loop """owa""" +255 69 negative_sampler """basic""" +255 69 evaluator """rankbased""" +255 70 dataset """kinships""" +255 70 model """ermlp""" +255 70 loss """softplus""" +255 70 regularizer """no""" +255 70 optimizer """adam""" +255 70 training_loop """owa""" +255 70 negative_sampler """basic""" +255 70 evaluator """rankbased""" +255 71 dataset """kinships""" +255 71 model """ermlp""" +255 71 loss """softplus""" +255 71 regularizer """no""" +255 71 optimizer """adam""" +255 71 training_loop """owa""" +255 71 negative_sampler """basic""" +255 71 evaluator """rankbased""" +255 72 dataset """kinships""" +255 72 model """ermlp""" +255 72 loss """softplus""" +255 72 regularizer """no""" +255 72 optimizer """adam""" +255 72 training_loop """owa""" +255 72 negative_sampler """basic""" +255 72 evaluator """rankbased""" +255 73 dataset """kinships""" +255 73 model """ermlp""" +255 73 loss """softplus""" +255 73 regularizer """no""" +255 73 optimizer """adam""" +255 73 training_loop """owa""" +255 73 negative_sampler """basic""" +255 73 evaluator """rankbased""" +255 74 dataset """kinships""" +255 74 model """ermlp""" +255 74 loss """softplus""" +255 74 regularizer """no""" +255 74 optimizer """adam""" +255 74 training_loop """owa""" +255 74 negative_sampler """basic""" +255 74 evaluator """rankbased""" +255 75 dataset """kinships""" +255 75 model """ermlp""" +255 75 loss """softplus""" +255 75 regularizer """no""" +255 75 optimizer """adam""" +255 75 training_loop """owa""" +255 75 negative_sampler """basic""" +255 75 evaluator """rankbased""" +255 76 dataset """kinships""" +255 76 model """ermlp""" +255 76 loss """softplus""" +255 76 regularizer """no""" +255 76 optimizer """adam""" +255 76 training_loop """owa""" +255 76 negative_sampler """basic""" +255 76 evaluator """rankbased""" +255 77 dataset """kinships""" +255 77 model """ermlp""" +255 77 loss """softplus""" +255 77 regularizer """no""" +255 77 optimizer """adam""" +255 77 training_loop """owa""" +255 77 negative_sampler """basic""" +255 77 evaluator """rankbased""" +255 78 dataset """kinships""" +255 78 model """ermlp""" +255 78 loss """softplus""" +255 78 regularizer """no""" +255 78 optimizer """adam""" +255 78 training_loop """owa""" +255 78 negative_sampler """basic""" +255 78 evaluator """rankbased""" +255 79 dataset """kinships""" +255 79 model """ermlp""" +255 79 loss """softplus""" +255 79 regularizer """no""" +255 79 optimizer """adam""" +255 79 training_loop """owa""" +255 79 negative_sampler """basic""" +255 79 evaluator """rankbased""" +255 80 dataset """kinships""" +255 80 model """ermlp""" +255 80 loss """softplus""" +255 80 regularizer """no""" +255 80 optimizer """adam""" +255 80 training_loop """owa""" +255 80 negative_sampler """basic""" +255 80 evaluator """rankbased""" +255 81 dataset """kinships""" +255 81 model """ermlp""" +255 81 loss """softplus""" +255 81 regularizer """no""" +255 81 optimizer """adam""" +255 81 training_loop """owa""" +255 81 negative_sampler """basic""" +255 81 evaluator """rankbased""" +255 82 dataset """kinships""" +255 82 model """ermlp""" +255 82 loss """softplus""" +255 82 regularizer """no""" +255 82 optimizer """adam""" +255 82 training_loop """owa""" +255 82 negative_sampler """basic""" +255 82 evaluator """rankbased""" +255 83 dataset """kinships""" +255 83 model """ermlp""" +255 83 loss """softplus""" +255 83 regularizer """no""" +255 83 optimizer """adam""" +255 83 training_loop """owa""" +255 83 negative_sampler """basic""" +255 83 evaluator """rankbased""" +255 84 dataset """kinships""" +255 84 model """ermlp""" +255 84 loss """softplus""" +255 84 regularizer """no""" +255 84 optimizer """adam""" +255 84 training_loop """owa""" +255 84 negative_sampler """basic""" +255 84 evaluator """rankbased""" +255 85 dataset """kinships""" +255 85 model """ermlp""" +255 85 loss """softplus""" +255 85 regularizer """no""" +255 85 optimizer """adam""" +255 85 training_loop """owa""" +255 85 negative_sampler """basic""" +255 85 evaluator """rankbased""" +255 86 dataset """kinships""" +255 86 model """ermlp""" +255 86 loss """softplus""" +255 86 regularizer """no""" +255 86 optimizer """adam""" +255 86 training_loop """owa""" +255 86 negative_sampler """basic""" +255 86 evaluator """rankbased""" +255 87 dataset """kinships""" +255 87 model """ermlp""" +255 87 loss """softplus""" +255 87 regularizer """no""" +255 87 optimizer """adam""" +255 87 training_loop """owa""" +255 87 negative_sampler """basic""" +255 87 evaluator """rankbased""" +255 88 dataset """kinships""" +255 88 model """ermlp""" +255 88 loss """softplus""" +255 88 regularizer """no""" +255 88 optimizer """adam""" +255 88 training_loop """owa""" +255 88 negative_sampler """basic""" +255 88 evaluator """rankbased""" +255 89 dataset """kinships""" +255 89 model """ermlp""" +255 89 loss """softplus""" +255 89 regularizer """no""" +255 89 optimizer """adam""" +255 89 training_loop """owa""" +255 89 negative_sampler """basic""" +255 89 evaluator """rankbased""" +255 90 dataset """kinships""" +255 90 model """ermlp""" +255 90 loss """softplus""" +255 90 regularizer """no""" +255 90 optimizer """adam""" +255 90 training_loop """owa""" +255 90 negative_sampler """basic""" +255 90 evaluator """rankbased""" +255 91 dataset """kinships""" +255 91 model """ermlp""" +255 91 loss """softplus""" +255 91 regularizer """no""" +255 91 optimizer """adam""" +255 91 training_loop """owa""" +255 91 negative_sampler """basic""" +255 91 evaluator """rankbased""" +255 92 dataset """kinships""" +255 92 model """ermlp""" +255 92 loss """softplus""" +255 92 regularizer """no""" +255 92 optimizer """adam""" +255 92 training_loop """owa""" +255 92 negative_sampler """basic""" +255 92 evaluator """rankbased""" +255 93 dataset """kinships""" +255 93 model """ermlp""" +255 93 loss """softplus""" +255 93 regularizer """no""" +255 93 optimizer """adam""" +255 93 training_loop """owa""" +255 93 negative_sampler """basic""" +255 93 evaluator """rankbased""" +255 94 dataset """kinships""" +255 94 model """ermlp""" +255 94 loss """softplus""" +255 94 regularizer """no""" +255 94 optimizer """adam""" +255 94 training_loop """owa""" +255 94 negative_sampler """basic""" +255 94 evaluator """rankbased""" +255 95 dataset """kinships""" +255 95 model """ermlp""" +255 95 loss """softplus""" +255 95 regularizer """no""" +255 95 optimizer """adam""" +255 95 training_loop """owa""" +255 95 negative_sampler """basic""" +255 95 evaluator """rankbased""" +255 96 dataset """kinships""" +255 96 model """ermlp""" +255 96 loss """softplus""" +255 96 regularizer """no""" +255 96 optimizer """adam""" +255 96 training_loop """owa""" +255 96 negative_sampler """basic""" +255 96 evaluator """rankbased""" +255 97 dataset """kinships""" +255 97 model """ermlp""" +255 97 loss """softplus""" +255 97 regularizer """no""" +255 97 optimizer """adam""" +255 97 training_loop """owa""" +255 97 negative_sampler """basic""" +255 97 evaluator """rankbased""" +255 98 dataset """kinships""" +255 98 model """ermlp""" +255 98 loss """softplus""" +255 98 regularizer """no""" +255 98 optimizer """adam""" +255 98 training_loop """owa""" +255 98 negative_sampler """basic""" +255 98 evaluator """rankbased""" +255 99 dataset """kinships""" +255 99 model """ermlp""" +255 99 loss """softplus""" +255 99 regularizer """no""" +255 99 optimizer """adam""" +255 99 training_loop """owa""" +255 99 negative_sampler """basic""" +255 99 evaluator """rankbased""" +255 100 dataset """kinships""" +255 100 model """ermlp""" +255 100 loss """softplus""" +255 100 regularizer """no""" +255 100 optimizer """adam""" +255 100 training_loop """owa""" +255 100 negative_sampler """basic""" +255 100 evaluator """rankbased""" +256 1 model.embedding_dim 2.0 +256 1 loss.margin 9.917168680114747 +256 1 optimizer.lr 0.0265294858274292 +256 1 negative_sampler.num_negs_per_pos 49.0 +256 1 training.batch_size 0.0 +256 2 model.embedding_dim 1.0 +256 2 loss.margin 4.304444871184307 +256 2 optimizer.lr 0.002707059784187555 +256 2 negative_sampler.num_negs_per_pos 14.0 +256 2 training.batch_size 0.0 +256 3 model.embedding_dim 0.0 +256 3 loss.margin 2.3445109938179227 +256 3 optimizer.lr 0.023993768460491834 +256 3 negative_sampler.num_negs_per_pos 63.0 +256 3 training.batch_size 1.0 +256 4 model.embedding_dim 0.0 +256 4 loss.margin 3.7058154586745946 +256 4 optimizer.lr 0.00584551475763664 +256 4 negative_sampler.num_negs_per_pos 49.0 +256 4 training.batch_size 2.0 +256 5 model.embedding_dim 0.0 +256 5 loss.margin 9.916739795371333 +256 5 optimizer.lr 0.005954751635243643 +256 5 negative_sampler.num_negs_per_pos 74.0 +256 5 training.batch_size 0.0 +256 6 model.embedding_dim 1.0 +256 6 loss.margin 3.3447737831672644 +256 6 optimizer.lr 0.017576951460868587 +256 6 negative_sampler.num_negs_per_pos 57.0 +256 6 training.batch_size 2.0 +256 7 model.embedding_dim 0.0 +256 7 loss.margin 2.9062665427280696 +256 7 optimizer.lr 0.001356540351232002 +256 7 negative_sampler.num_negs_per_pos 74.0 +256 7 training.batch_size 1.0 +256 8 model.embedding_dim 2.0 +256 8 loss.margin 1.74945893625328 +256 8 optimizer.lr 0.001898273802644329 +256 8 negative_sampler.num_negs_per_pos 35.0 +256 8 training.batch_size 1.0 +256 9 model.embedding_dim 2.0 +256 9 loss.margin 5.230713104960525 +256 9 optimizer.lr 0.010932262631034324 +256 9 negative_sampler.num_negs_per_pos 98.0 +256 9 training.batch_size 2.0 +256 10 model.embedding_dim 1.0 +256 10 loss.margin 5.216733951853312 +256 10 optimizer.lr 0.006812447759691913 +256 10 negative_sampler.num_negs_per_pos 77.0 +256 10 training.batch_size 2.0 +256 11 model.embedding_dim 0.0 +256 11 loss.margin 9.990299465570683 +256 11 optimizer.lr 0.0012667422333393484 +256 11 negative_sampler.num_negs_per_pos 32.0 +256 11 training.batch_size 0.0 +256 12 model.embedding_dim 1.0 +256 12 loss.margin 9.661417518387061 +256 12 optimizer.lr 0.042907196216167016 +256 12 negative_sampler.num_negs_per_pos 23.0 +256 12 training.batch_size 2.0 +256 13 model.embedding_dim 1.0 +256 13 loss.margin 8.499939689522261 +256 13 optimizer.lr 0.06415630329699092 +256 13 negative_sampler.num_negs_per_pos 61.0 +256 13 training.batch_size 2.0 +256 14 model.embedding_dim 0.0 +256 14 loss.margin 3.247044429923597 +256 14 optimizer.lr 0.023419923184751053 +256 14 negative_sampler.num_negs_per_pos 24.0 +256 14 training.batch_size 0.0 +256 15 model.embedding_dim 2.0 +256 15 loss.margin 8.710725911781083 +256 15 optimizer.lr 0.07563164791056241 +256 15 negative_sampler.num_negs_per_pos 74.0 +256 15 training.batch_size 1.0 +256 16 model.embedding_dim 0.0 +256 16 loss.margin 3.5474691845173 +256 16 optimizer.lr 0.01952497504824385 +256 16 negative_sampler.num_negs_per_pos 56.0 +256 16 training.batch_size 1.0 +256 17 model.embedding_dim 2.0 +256 17 loss.margin 3.312328101771211 +256 17 optimizer.lr 0.06386967526599695 +256 17 negative_sampler.num_negs_per_pos 12.0 +256 17 training.batch_size 0.0 +256 18 model.embedding_dim 0.0 +256 18 loss.margin 0.8848007203785191 +256 18 optimizer.lr 0.05306459514831881 +256 18 negative_sampler.num_negs_per_pos 32.0 +256 18 training.batch_size 2.0 +256 19 model.embedding_dim 2.0 +256 19 loss.margin 2.6239547058881567 +256 19 optimizer.lr 0.07872585388742588 +256 19 negative_sampler.num_negs_per_pos 26.0 +256 19 training.batch_size 2.0 +256 20 model.embedding_dim 1.0 +256 20 loss.margin 4.2055367789485905 +256 20 optimizer.lr 0.003988519068620464 +256 20 negative_sampler.num_negs_per_pos 45.0 +256 20 training.batch_size 0.0 +256 21 model.embedding_dim 1.0 +256 21 loss.margin 7.7701723203613415 +256 21 optimizer.lr 0.008983861112411153 +256 21 negative_sampler.num_negs_per_pos 8.0 +256 21 training.batch_size 0.0 +256 22 model.embedding_dim 1.0 +256 22 loss.margin 7.103903098429173 +256 22 optimizer.lr 0.024159241455614485 +256 22 negative_sampler.num_negs_per_pos 78.0 +256 22 training.batch_size 2.0 +256 23 model.embedding_dim 1.0 +256 23 loss.margin 2.345463806892508 +256 23 optimizer.lr 0.002050746512412283 +256 23 negative_sampler.num_negs_per_pos 95.0 +256 23 training.batch_size 2.0 +256 24 model.embedding_dim 1.0 +256 24 loss.margin 3.1240534224452876 +256 24 optimizer.lr 0.0012405287619021035 +256 24 negative_sampler.num_negs_per_pos 86.0 +256 24 training.batch_size 0.0 +256 25 model.embedding_dim 0.0 +256 25 loss.margin 7.718110187948189 +256 25 optimizer.lr 0.0892046318756971 +256 25 negative_sampler.num_negs_per_pos 97.0 +256 25 training.batch_size 2.0 +256 26 model.embedding_dim 0.0 +256 26 loss.margin 8.773931678444379 +256 26 optimizer.lr 0.01688473383302507 +256 26 negative_sampler.num_negs_per_pos 22.0 +256 26 training.batch_size 0.0 +256 27 model.embedding_dim 1.0 +256 27 loss.margin 7.691163675264386 +256 27 optimizer.lr 0.0015964384449694978 +256 27 negative_sampler.num_negs_per_pos 22.0 +256 27 training.batch_size 2.0 +256 28 model.embedding_dim 0.0 +256 28 loss.margin 7.873373004560062 +256 28 optimizer.lr 0.09293269197114466 +256 28 negative_sampler.num_negs_per_pos 52.0 +256 28 training.batch_size 1.0 +256 29 model.embedding_dim 1.0 +256 29 loss.margin 5.574885948389194 +256 29 optimizer.lr 0.0011576245359516191 +256 29 negative_sampler.num_negs_per_pos 37.0 +256 29 training.batch_size 2.0 +256 30 model.embedding_dim 2.0 +256 30 loss.margin 9.503817250995567 +256 30 optimizer.lr 0.0014034480269243038 +256 30 negative_sampler.num_negs_per_pos 94.0 +256 30 training.batch_size 2.0 +256 31 model.embedding_dim 0.0 +256 31 loss.margin 8.135256173249225 +256 31 optimizer.lr 0.005442079757467995 +256 31 negative_sampler.num_negs_per_pos 28.0 +256 31 training.batch_size 2.0 +256 32 model.embedding_dim 1.0 +256 32 loss.margin 2.9641087891640696 +256 32 optimizer.lr 0.0014361896895647883 +256 32 negative_sampler.num_negs_per_pos 13.0 +256 32 training.batch_size 2.0 +256 33 model.embedding_dim 0.0 +256 33 loss.margin 7.537417251128346 +256 33 optimizer.lr 0.0010200335484074018 +256 33 negative_sampler.num_negs_per_pos 39.0 +256 33 training.batch_size 0.0 +256 34 model.embedding_dim 0.0 +256 34 loss.margin 7.158751757659905 +256 34 optimizer.lr 0.0597980163094096 +256 34 negative_sampler.num_negs_per_pos 54.0 +256 34 training.batch_size 2.0 +256 35 model.embedding_dim 0.0 +256 35 loss.margin 1.3584747865862352 +256 35 optimizer.lr 0.004142495855360031 +256 35 negative_sampler.num_negs_per_pos 7.0 +256 35 training.batch_size 1.0 +256 36 model.embedding_dim 1.0 +256 36 loss.margin 2.583773737953827 +256 36 optimizer.lr 0.013195927769119709 +256 36 negative_sampler.num_negs_per_pos 91.0 +256 36 training.batch_size 0.0 +256 37 model.embedding_dim 0.0 +256 37 loss.margin 6.570327858455074 +256 37 optimizer.lr 0.002414693053784084 +256 37 negative_sampler.num_negs_per_pos 39.0 +256 37 training.batch_size 1.0 +256 38 model.embedding_dim 0.0 +256 38 loss.margin 3.352940431824754 +256 38 optimizer.lr 0.003879126697837257 +256 38 negative_sampler.num_negs_per_pos 93.0 +256 38 training.batch_size 0.0 +256 39 model.embedding_dim 1.0 +256 39 loss.margin 8.087056750683411 +256 39 optimizer.lr 0.0012953572161913797 +256 39 negative_sampler.num_negs_per_pos 93.0 +256 39 training.batch_size 0.0 +256 40 model.embedding_dim 2.0 +256 40 loss.margin 8.699286417178598 +256 40 optimizer.lr 0.018848237314173004 +256 40 negative_sampler.num_negs_per_pos 32.0 +256 40 training.batch_size 2.0 +256 41 model.embedding_dim 2.0 +256 41 loss.margin 2.691769531301741 +256 41 optimizer.lr 0.010474617490568982 +256 41 negative_sampler.num_negs_per_pos 30.0 +256 41 training.batch_size 1.0 +256 42 model.embedding_dim 0.0 +256 42 loss.margin 5.737009572696905 +256 42 optimizer.lr 0.0018269566741359845 +256 42 negative_sampler.num_negs_per_pos 11.0 +256 42 training.batch_size 0.0 +256 43 model.embedding_dim 0.0 +256 43 loss.margin 6.83708780640353 +256 43 optimizer.lr 0.002853329436394867 +256 43 negative_sampler.num_negs_per_pos 36.0 +256 43 training.batch_size 2.0 +256 44 model.embedding_dim 1.0 +256 44 loss.margin 5.834673088449404 +256 44 optimizer.lr 0.0294015516867577 +256 44 negative_sampler.num_negs_per_pos 83.0 +256 44 training.batch_size 2.0 +256 45 model.embedding_dim 2.0 +256 45 loss.margin 6.254710097797264 +256 45 optimizer.lr 0.026329605379456302 +256 45 negative_sampler.num_negs_per_pos 47.0 +256 45 training.batch_size 0.0 +256 46 model.embedding_dim 1.0 +256 46 loss.margin 5.429974741374565 +256 46 optimizer.lr 0.01739643314510577 +256 46 negative_sampler.num_negs_per_pos 92.0 +256 46 training.batch_size 1.0 +256 47 model.embedding_dim 2.0 +256 47 loss.margin 8.046864409775182 +256 47 optimizer.lr 0.06221593915555256 +256 47 negative_sampler.num_negs_per_pos 84.0 +256 47 training.batch_size 1.0 +256 48 model.embedding_dim 2.0 +256 48 loss.margin 2.137841638804205 +256 48 optimizer.lr 0.007622046211541134 +256 48 negative_sampler.num_negs_per_pos 46.0 +256 48 training.batch_size 1.0 +256 49 model.embedding_dim 2.0 +256 49 loss.margin 9.691422043036555 +256 49 optimizer.lr 0.0015751478814051807 +256 49 negative_sampler.num_negs_per_pos 44.0 +256 49 training.batch_size 1.0 +256 50 model.embedding_dim 0.0 +256 50 loss.margin 6.415855135111187 +256 50 optimizer.lr 0.00914728619031487 +256 50 negative_sampler.num_negs_per_pos 58.0 +256 50 training.batch_size 2.0 +256 51 model.embedding_dim 2.0 +256 51 loss.margin 4.412525378626642 +256 51 optimizer.lr 0.013087131169892111 +256 51 negative_sampler.num_negs_per_pos 48.0 +256 51 training.batch_size 1.0 +256 52 model.embedding_dim 0.0 +256 52 loss.margin 5.418771513553435 +256 52 optimizer.lr 0.03663187043262418 +256 52 negative_sampler.num_negs_per_pos 60.0 +256 52 training.batch_size 0.0 +256 53 model.embedding_dim 0.0 +256 53 loss.margin 2.7577974662413425 +256 53 optimizer.lr 0.011210210608680942 +256 53 negative_sampler.num_negs_per_pos 26.0 +256 53 training.batch_size 0.0 +256 54 model.embedding_dim 0.0 +256 54 loss.margin 5.476253673540452 +256 54 optimizer.lr 0.008568300288506392 +256 54 negative_sampler.num_negs_per_pos 17.0 +256 54 training.batch_size 2.0 +256 55 model.embedding_dim 0.0 +256 55 loss.margin 1.3818224309724392 +256 55 optimizer.lr 0.003992330761661969 +256 55 negative_sampler.num_negs_per_pos 43.0 +256 55 training.batch_size 1.0 +256 56 model.embedding_dim 0.0 +256 56 loss.margin 1.3044410040816181 +256 56 optimizer.lr 0.02117863279057949 +256 56 negative_sampler.num_negs_per_pos 87.0 +256 56 training.batch_size 1.0 +256 57 model.embedding_dim 2.0 +256 57 loss.margin 8.440131057389266 +256 57 optimizer.lr 0.08908331439591313 +256 57 negative_sampler.num_negs_per_pos 64.0 +256 57 training.batch_size 0.0 +256 58 model.embedding_dim 0.0 +256 58 loss.margin 3.9252780779162917 +256 58 optimizer.lr 0.0056919800254658795 +256 58 negative_sampler.num_negs_per_pos 28.0 +256 58 training.batch_size 2.0 +256 59 model.embedding_dim 0.0 +256 59 loss.margin 8.854488905068374 +256 59 optimizer.lr 0.004980271385782817 +256 59 negative_sampler.num_negs_per_pos 88.0 +256 59 training.batch_size 0.0 +256 60 model.embedding_dim 1.0 +256 60 loss.margin 8.99548136679618 +256 60 optimizer.lr 0.005106242807258641 +256 60 negative_sampler.num_negs_per_pos 3.0 +256 60 training.batch_size 1.0 +256 61 model.embedding_dim 2.0 +256 61 loss.margin 6.4477309586009035 +256 61 optimizer.lr 0.006067577864390203 +256 61 negative_sampler.num_negs_per_pos 38.0 +256 61 training.batch_size 1.0 +256 62 model.embedding_dim 0.0 +256 62 loss.margin 1.3402579935367616 +256 62 optimizer.lr 0.0026017867923599667 +256 62 negative_sampler.num_negs_per_pos 89.0 +256 62 training.batch_size 1.0 +256 63 model.embedding_dim 0.0 +256 63 loss.margin 9.314673295615687 +256 63 optimizer.lr 0.033154551311211915 +256 63 negative_sampler.num_negs_per_pos 53.0 +256 63 training.batch_size 1.0 +256 64 model.embedding_dim 0.0 +256 64 loss.margin 3.0572428530566955 +256 64 optimizer.lr 0.0014067870183153413 +256 64 negative_sampler.num_negs_per_pos 96.0 +256 64 training.batch_size 2.0 +256 65 model.embedding_dim 1.0 +256 65 loss.margin 9.764100657599963 +256 65 optimizer.lr 0.004344244040544361 +256 65 negative_sampler.num_negs_per_pos 95.0 +256 65 training.batch_size 1.0 +256 66 model.embedding_dim 1.0 +256 66 loss.margin 3.707860087823572 +256 66 optimizer.lr 0.01377579045572096 +256 66 negative_sampler.num_negs_per_pos 66.0 +256 66 training.batch_size 2.0 +256 67 model.embedding_dim 0.0 +256 67 loss.margin 1.4106138262549313 +256 67 optimizer.lr 0.07328585958437168 +256 67 negative_sampler.num_negs_per_pos 36.0 +256 67 training.batch_size 2.0 +256 68 model.embedding_dim 1.0 +256 68 loss.margin 8.399186108456155 +256 68 optimizer.lr 0.02711598531136181 +256 68 negative_sampler.num_negs_per_pos 32.0 +256 68 training.batch_size 1.0 +256 69 model.embedding_dim 0.0 +256 69 loss.margin 6.311905577337919 +256 69 optimizer.lr 0.005597428328098813 +256 69 negative_sampler.num_negs_per_pos 77.0 +256 69 training.batch_size 2.0 +256 70 model.embedding_dim 1.0 +256 70 loss.margin 6.466513050424391 +256 70 optimizer.lr 0.004087128635155032 +256 70 negative_sampler.num_negs_per_pos 0.0 +256 70 training.batch_size 1.0 +256 71 model.embedding_dim 0.0 +256 71 loss.margin 9.023015857612073 +256 71 optimizer.lr 0.0014375531115621827 +256 71 negative_sampler.num_negs_per_pos 29.0 +256 71 training.batch_size 2.0 +256 72 model.embedding_dim 2.0 +256 72 loss.margin 5.501815345597186 +256 72 optimizer.lr 0.0031003058591556085 +256 72 negative_sampler.num_negs_per_pos 1.0 +256 72 training.batch_size 1.0 +256 73 model.embedding_dim 0.0 +256 73 loss.margin 6.268375932984895 +256 73 optimizer.lr 0.044646702203546794 +256 73 negative_sampler.num_negs_per_pos 75.0 +256 73 training.batch_size 0.0 +256 74 model.embedding_dim 1.0 +256 74 loss.margin 6.491678967618467 +256 74 optimizer.lr 0.004039570371734487 +256 74 negative_sampler.num_negs_per_pos 97.0 +256 74 training.batch_size 1.0 +256 75 model.embedding_dim 0.0 +256 75 loss.margin 6.794750974471687 +256 75 optimizer.lr 0.018568372852965417 +256 75 negative_sampler.num_negs_per_pos 74.0 +256 75 training.batch_size 2.0 +256 76 model.embedding_dim 0.0 +256 76 loss.margin 2.388387942892976 +256 76 optimizer.lr 0.003496118476972127 +256 76 negative_sampler.num_negs_per_pos 45.0 +256 76 training.batch_size 2.0 +256 77 model.embedding_dim 0.0 +256 77 loss.margin 4.238923978622055 +256 77 optimizer.lr 0.002104659552227883 +256 77 negative_sampler.num_negs_per_pos 23.0 +256 77 training.batch_size 0.0 +256 78 model.embedding_dim 0.0 +256 78 loss.margin 9.133079123273749 +256 78 optimizer.lr 0.059611592443220444 +256 78 negative_sampler.num_negs_per_pos 29.0 +256 78 training.batch_size 2.0 +256 79 model.embedding_dim 1.0 +256 79 loss.margin 3.7028795054301984 +256 79 optimizer.lr 0.005158344170224267 +256 79 negative_sampler.num_negs_per_pos 71.0 +256 79 training.batch_size 0.0 +256 80 model.embedding_dim 0.0 +256 80 loss.margin 4.585007962804063 +256 80 optimizer.lr 0.04536339851309995 +256 80 negative_sampler.num_negs_per_pos 76.0 +256 80 training.batch_size 1.0 +256 81 model.embedding_dim 0.0 +256 81 loss.margin 9.107754166838394 +256 81 optimizer.lr 0.025744329313025765 +256 81 negative_sampler.num_negs_per_pos 31.0 +256 81 training.batch_size 0.0 +256 82 model.embedding_dim 0.0 +256 82 loss.margin 6.654577717221468 +256 82 optimizer.lr 0.05725410365727513 +256 82 negative_sampler.num_negs_per_pos 48.0 +256 82 training.batch_size 1.0 +256 83 model.embedding_dim 2.0 +256 83 loss.margin 6.035958299497836 +256 83 optimizer.lr 0.004017168981111912 +256 83 negative_sampler.num_negs_per_pos 13.0 +256 83 training.batch_size 2.0 +256 84 model.embedding_dim 0.0 +256 84 loss.margin 6.974425797488089 +256 84 optimizer.lr 0.010038678921174231 +256 84 negative_sampler.num_negs_per_pos 10.0 +256 84 training.batch_size 2.0 +256 85 model.embedding_dim 1.0 +256 85 loss.margin 2.3303245543215203 +256 85 optimizer.lr 0.0022609780927596666 +256 85 negative_sampler.num_negs_per_pos 55.0 +256 85 training.batch_size 0.0 +256 86 model.embedding_dim 2.0 +256 86 loss.margin 4.372808225912477 +256 86 optimizer.lr 0.002249542226178495 +256 86 negative_sampler.num_negs_per_pos 4.0 +256 86 training.batch_size 0.0 +256 87 model.embedding_dim 1.0 +256 87 loss.margin 0.7585141611606747 +256 87 optimizer.lr 0.04507349410017732 +256 87 negative_sampler.num_negs_per_pos 22.0 +256 87 training.batch_size 2.0 +256 88 model.embedding_dim 0.0 +256 88 loss.margin 7.270646726799356 +256 88 optimizer.lr 0.005050670018330432 +256 88 negative_sampler.num_negs_per_pos 73.0 +256 88 training.batch_size 0.0 +256 89 model.embedding_dim 1.0 +256 89 loss.margin 2.993447302670465 +256 89 optimizer.lr 0.011477033389927064 +256 89 negative_sampler.num_negs_per_pos 46.0 +256 89 training.batch_size 0.0 +256 90 model.embedding_dim 2.0 +256 90 loss.margin 8.743351685145486 +256 90 optimizer.lr 0.010697778855635921 +256 90 negative_sampler.num_negs_per_pos 36.0 +256 90 training.batch_size 1.0 +256 91 model.embedding_dim 1.0 +256 91 loss.margin 4.413809666542534 +256 91 optimizer.lr 0.02956923035422923 +256 91 negative_sampler.num_negs_per_pos 25.0 +256 91 training.batch_size 2.0 +256 92 model.embedding_dim 0.0 +256 92 loss.margin 6.8867747279654274 +256 92 optimizer.lr 0.008855826229223427 +256 92 negative_sampler.num_negs_per_pos 36.0 +256 92 training.batch_size 0.0 +256 93 model.embedding_dim 0.0 +256 93 loss.margin 0.9698637655065521 +256 93 optimizer.lr 0.01328278887143644 +256 93 negative_sampler.num_negs_per_pos 3.0 +256 93 training.batch_size 0.0 +256 94 model.embedding_dim 1.0 +256 94 loss.margin 9.133821019251899 +256 94 optimizer.lr 0.008302236911893565 +256 94 negative_sampler.num_negs_per_pos 6.0 +256 94 training.batch_size 2.0 +256 95 model.embedding_dim 1.0 +256 95 loss.margin 9.011732338101272 +256 95 optimizer.lr 0.0032419956715356813 +256 95 negative_sampler.num_negs_per_pos 30.0 +256 95 training.batch_size 2.0 +256 96 model.embedding_dim 0.0 +256 96 loss.margin 9.0498878084909 +256 96 optimizer.lr 0.005675994873289366 +256 96 negative_sampler.num_negs_per_pos 55.0 +256 96 training.batch_size 1.0 +256 97 model.embedding_dim 1.0 +256 97 loss.margin 6.355410626994283 +256 97 optimizer.lr 0.019485214678925278 +256 97 negative_sampler.num_negs_per_pos 51.0 +256 97 training.batch_size 1.0 +256 98 model.embedding_dim 1.0 +256 98 loss.margin 4.559276810027694 +256 98 optimizer.lr 0.015329565012774927 +256 98 negative_sampler.num_negs_per_pos 6.0 +256 98 training.batch_size 1.0 +256 99 model.embedding_dim 2.0 +256 99 loss.margin 5.789490338109631 +256 99 optimizer.lr 0.0015849464090609484 +256 99 negative_sampler.num_negs_per_pos 62.0 +256 99 training.batch_size 1.0 +256 100 model.embedding_dim 2.0 +256 100 loss.margin 1.9775360234216355 +256 100 optimizer.lr 0.0013946608346878096 +256 100 negative_sampler.num_negs_per_pos 83.0 +256 100 training.batch_size 1.0 +256 1 dataset """kinships""" +256 1 model """ermlp""" +256 1 loss """marginranking""" +256 1 regularizer """no""" +256 1 optimizer """adam""" +256 1 training_loop """owa""" +256 1 negative_sampler """basic""" +256 1 evaluator """rankbased""" +256 2 dataset """kinships""" +256 2 model """ermlp""" +256 2 loss """marginranking""" +256 2 regularizer """no""" +256 2 optimizer """adam""" +256 2 training_loop """owa""" +256 2 negative_sampler """basic""" +256 2 evaluator """rankbased""" +256 3 dataset """kinships""" +256 3 model """ermlp""" +256 3 loss """marginranking""" +256 3 regularizer """no""" +256 3 optimizer """adam""" +256 3 training_loop """owa""" +256 3 negative_sampler """basic""" +256 3 evaluator """rankbased""" +256 4 dataset """kinships""" +256 4 model """ermlp""" +256 4 loss """marginranking""" +256 4 regularizer """no""" +256 4 optimizer """adam""" +256 4 training_loop """owa""" +256 4 negative_sampler """basic""" +256 4 evaluator """rankbased""" +256 5 dataset """kinships""" +256 5 model """ermlp""" +256 5 loss """marginranking""" +256 5 regularizer """no""" +256 5 optimizer """adam""" +256 5 training_loop """owa""" +256 5 negative_sampler """basic""" +256 5 evaluator """rankbased""" +256 6 dataset """kinships""" +256 6 model """ermlp""" +256 6 loss """marginranking""" +256 6 regularizer """no""" +256 6 optimizer """adam""" +256 6 training_loop """owa""" +256 6 negative_sampler """basic""" +256 6 evaluator """rankbased""" +256 7 dataset """kinships""" +256 7 model """ermlp""" +256 7 loss """marginranking""" +256 7 regularizer """no""" +256 7 optimizer """adam""" +256 7 training_loop """owa""" +256 7 negative_sampler """basic""" +256 7 evaluator """rankbased""" +256 8 dataset """kinships""" +256 8 model """ermlp""" +256 8 loss """marginranking""" +256 8 regularizer """no""" +256 8 optimizer """adam""" +256 8 training_loop """owa""" +256 8 negative_sampler """basic""" +256 8 evaluator """rankbased""" +256 9 dataset """kinships""" +256 9 model """ermlp""" +256 9 loss """marginranking""" +256 9 regularizer """no""" +256 9 optimizer """adam""" +256 9 training_loop """owa""" +256 9 negative_sampler """basic""" +256 9 evaluator """rankbased""" +256 10 dataset """kinships""" +256 10 model """ermlp""" +256 10 loss """marginranking""" +256 10 regularizer """no""" +256 10 optimizer """adam""" +256 10 training_loop """owa""" +256 10 negative_sampler """basic""" +256 10 evaluator """rankbased""" +256 11 dataset """kinships""" +256 11 model """ermlp""" +256 11 loss """marginranking""" +256 11 regularizer """no""" +256 11 optimizer """adam""" +256 11 training_loop """owa""" +256 11 negative_sampler """basic""" +256 11 evaluator """rankbased""" +256 12 dataset """kinships""" +256 12 model """ermlp""" +256 12 loss """marginranking""" +256 12 regularizer """no""" +256 12 optimizer """adam""" +256 12 training_loop """owa""" +256 12 negative_sampler """basic""" +256 12 evaluator """rankbased""" +256 13 dataset """kinships""" +256 13 model """ermlp""" +256 13 loss """marginranking""" +256 13 regularizer """no""" +256 13 optimizer """adam""" +256 13 training_loop """owa""" +256 13 negative_sampler """basic""" +256 13 evaluator """rankbased""" +256 14 dataset """kinships""" +256 14 model """ermlp""" +256 14 loss """marginranking""" +256 14 regularizer """no""" +256 14 optimizer """adam""" +256 14 training_loop """owa""" +256 14 negative_sampler """basic""" +256 14 evaluator """rankbased""" +256 15 dataset """kinships""" +256 15 model """ermlp""" +256 15 loss """marginranking""" +256 15 regularizer """no""" +256 15 optimizer """adam""" +256 15 training_loop """owa""" +256 15 negative_sampler """basic""" +256 15 evaluator """rankbased""" +256 16 dataset """kinships""" +256 16 model """ermlp""" +256 16 loss """marginranking""" +256 16 regularizer """no""" +256 16 optimizer """adam""" +256 16 training_loop """owa""" +256 16 negative_sampler """basic""" +256 16 evaluator """rankbased""" +256 17 dataset """kinships""" +256 17 model """ermlp""" +256 17 loss """marginranking""" +256 17 regularizer """no""" +256 17 optimizer """adam""" +256 17 training_loop """owa""" +256 17 negative_sampler """basic""" +256 17 evaluator """rankbased""" +256 18 dataset """kinships""" +256 18 model """ermlp""" +256 18 loss """marginranking""" +256 18 regularizer """no""" +256 18 optimizer """adam""" +256 18 training_loop """owa""" +256 18 negative_sampler """basic""" +256 18 evaluator """rankbased""" +256 19 dataset """kinships""" +256 19 model """ermlp""" +256 19 loss """marginranking""" +256 19 regularizer """no""" +256 19 optimizer """adam""" +256 19 training_loop """owa""" +256 19 negative_sampler """basic""" +256 19 evaluator """rankbased""" +256 20 dataset """kinships""" +256 20 model """ermlp""" +256 20 loss """marginranking""" +256 20 regularizer """no""" +256 20 optimizer """adam""" +256 20 training_loop """owa""" +256 20 negative_sampler """basic""" +256 20 evaluator """rankbased""" +256 21 dataset """kinships""" +256 21 model """ermlp""" +256 21 loss """marginranking""" +256 21 regularizer """no""" +256 21 optimizer """adam""" +256 21 training_loop """owa""" +256 21 negative_sampler """basic""" +256 21 evaluator """rankbased""" +256 22 dataset """kinships""" +256 22 model """ermlp""" +256 22 loss """marginranking""" +256 22 regularizer """no""" +256 22 optimizer """adam""" +256 22 training_loop """owa""" +256 22 negative_sampler """basic""" +256 22 evaluator """rankbased""" +256 23 dataset """kinships""" +256 23 model """ermlp""" +256 23 loss """marginranking""" +256 23 regularizer """no""" +256 23 optimizer """adam""" +256 23 training_loop """owa""" +256 23 negative_sampler """basic""" +256 23 evaluator """rankbased""" +256 24 dataset """kinships""" +256 24 model """ermlp""" +256 24 loss """marginranking""" +256 24 regularizer """no""" +256 24 optimizer """adam""" +256 24 training_loop """owa""" +256 24 negative_sampler """basic""" +256 24 evaluator """rankbased""" +256 25 dataset """kinships""" +256 25 model """ermlp""" +256 25 loss """marginranking""" +256 25 regularizer """no""" +256 25 optimizer """adam""" +256 25 training_loop """owa""" +256 25 negative_sampler """basic""" +256 25 evaluator """rankbased""" +256 26 dataset """kinships""" +256 26 model """ermlp""" +256 26 loss """marginranking""" +256 26 regularizer """no""" +256 26 optimizer """adam""" +256 26 training_loop """owa""" +256 26 negative_sampler """basic""" +256 26 evaluator """rankbased""" +256 27 dataset """kinships""" +256 27 model """ermlp""" +256 27 loss """marginranking""" +256 27 regularizer """no""" +256 27 optimizer """adam""" +256 27 training_loop """owa""" +256 27 negative_sampler """basic""" +256 27 evaluator """rankbased""" +256 28 dataset """kinships""" +256 28 model """ermlp""" +256 28 loss """marginranking""" +256 28 regularizer """no""" +256 28 optimizer """adam""" +256 28 training_loop """owa""" +256 28 negative_sampler """basic""" +256 28 evaluator """rankbased""" +256 29 dataset """kinships""" +256 29 model """ermlp""" +256 29 loss """marginranking""" +256 29 regularizer """no""" +256 29 optimizer """adam""" +256 29 training_loop """owa""" +256 29 negative_sampler """basic""" +256 29 evaluator """rankbased""" +256 30 dataset """kinships""" +256 30 model """ermlp""" +256 30 loss """marginranking""" +256 30 regularizer """no""" +256 30 optimizer """adam""" +256 30 training_loop """owa""" +256 30 negative_sampler """basic""" +256 30 evaluator """rankbased""" +256 31 dataset """kinships""" +256 31 model """ermlp""" +256 31 loss """marginranking""" +256 31 regularizer """no""" +256 31 optimizer """adam""" +256 31 training_loop """owa""" +256 31 negative_sampler """basic""" +256 31 evaluator """rankbased""" +256 32 dataset """kinships""" +256 32 model """ermlp""" +256 32 loss """marginranking""" +256 32 regularizer """no""" +256 32 optimizer """adam""" +256 32 training_loop """owa""" +256 32 negative_sampler """basic""" +256 32 evaluator """rankbased""" +256 33 dataset """kinships""" +256 33 model """ermlp""" +256 33 loss """marginranking""" +256 33 regularizer """no""" +256 33 optimizer """adam""" +256 33 training_loop """owa""" +256 33 negative_sampler """basic""" +256 33 evaluator """rankbased""" +256 34 dataset """kinships""" +256 34 model """ermlp""" +256 34 loss """marginranking""" +256 34 regularizer """no""" +256 34 optimizer """adam""" +256 34 training_loop """owa""" +256 34 negative_sampler """basic""" +256 34 evaluator """rankbased""" +256 35 dataset """kinships""" +256 35 model """ermlp""" +256 35 loss """marginranking""" +256 35 regularizer """no""" +256 35 optimizer """adam""" +256 35 training_loop """owa""" +256 35 negative_sampler """basic""" +256 35 evaluator """rankbased""" +256 36 dataset """kinships""" +256 36 model """ermlp""" +256 36 loss """marginranking""" +256 36 regularizer """no""" +256 36 optimizer """adam""" +256 36 training_loop """owa""" +256 36 negative_sampler """basic""" +256 36 evaluator """rankbased""" +256 37 dataset """kinships""" +256 37 model """ermlp""" +256 37 loss """marginranking""" +256 37 regularizer """no""" +256 37 optimizer """adam""" +256 37 training_loop """owa""" +256 37 negative_sampler """basic""" +256 37 evaluator """rankbased""" +256 38 dataset """kinships""" +256 38 model """ermlp""" +256 38 loss """marginranking""" +256 38 regularizer """no""" +256 38 optimizer """adam""" +256 38 training_loop """owa""" +256 38 negative_sampler """basic""" +256 38 evaluator """rankbased""" +256 39 dataset """kinships""" +256 39 model """ermlp""" +256 39 loss """marginranking""" +256 39 regularizer """no""" +256 39 optimizer """adam""" +256 39 training_loop """owa""" +256 39 negative_sampler """basic""" +256 39 evaluator """rankbased""" +256 40 dataset """kinships""" +256 40 model """ermlp""" +256 40 loss """marginranking""" +256 40 regularizer """no""" +256 40 optimizer """adam""" +256 40 training_loop """owa""" +256 40 negative_sampler """basic""" +256 40 evaluator """rankbased""" +256 41 dataset """kinships""" +256 41 model """ermlp""" +256 41 loss """marginranking""" +256 41 regularizer """no""" +256 41 optimizer """adam""" +256 41 training_loop """owa""" +256 41 negative_sampler """basic""" +256 41 evaluator """rankbased""" +256 42 dataset """kinships""" +256 42 model """ermlp""" +256 42 loss """marginranking""" +256 42 regularizer """no""" +256 42 optimizer """adam""" +256 42 training_loop """owa""" +256 42 negative_sampler """basic""" +256 42 evaluator """rankbased""" +256 43 dataset """kinships""" +256 43 model """ermlp""" +256 43 loss """marginranking""" +256 43 regularizer """no""" +256 43 optimizer """adam""" +256 43 training_loop """owa""" +256 43 negative_sampler """basic""" +256 43 evaluator """rankbased""" +256 44 dataset """kinships""" +256 44 model """ermlp""" +256 44 loss """marginranking""" +256 44 regularizer """no""" +256 44 optimizer """adam""" +256 44 training_loop """owa""" +256 44 negative_sampler """basic""" +256 44 evaluator """rankbased""" +256 45 dataset """kinships""" +256 45 model """ermlp""" +256 45 loss """marginranking""" +256 45 regularizer """no""" +256 45 optimizer """adam""" +256 45 training_loop """owa""" +256 45 negative_sampler """basic""" +256 45 evaluator """rankbased""" +256 46 dataset """kinships""" +256 46 model """ermlp""" +256 46 loss """marginranking""" +256 46 regularizer """no""" +256 46 optimizer """adam""" +256 46 training_loop """owa""" +256 46 negative_sampler """basic""" +256 46 evaluator """rankbased""" +256 47 dataset """kinships""" +256 47 model """ermlp""" +256 47 loss """marginranking""" +256 47 regularizer """no""" +256 47 optimizer """adam""" +256 47 training_loop """owa""" +256 47 negative_sampler """basic""" +256 47 evaluator """rankbased""" +256 48 dataset """kinships""" +256 48 model """ermlp""" +256 48 loss """marginranking""" +256 48 regularizer """no""" +256 48 optimizer """adam""" +256 48 training_loop """owa""" +256 48 negative_sampler """basic""" +256 48 evaluator """rankbased""" +256 49 dataset """kinships""" +256 49 model """ermlp""" +256 49 loss """marginranking""" +256 49 regularizer """no""" +256 49 optimizer """adam""" +256 49 training_loop """owa""" +256 49 negative_sampler """basic""" +256 49 evaluator """rankbased""" +256 50 dataset """kinships""" +256 50 model """ermlp""" +256 50 loss """marginranking""" +256 50 regularizer """no""" +256 50 optimizer """adam""" +256 50 training_loop """owa""" +256 50 negative_sampler """basic""" +256 50 evaluator """rankbased""" +256 51 dataset """kinships""" +256 51 model """ermlp""" +256 51 loss """marginranking""" +256 51 regularizer """no""" +256 51 optimizer """adam""" +256 51 training_loop """owa""" +256 51 negative_sampler """basic""" +256 51 evaluator """rankbased""" +256 52 dataset """kinships""" +256 52 model """ermlp""" +256 52 loss """marginranking""" +256 52 regularizer """no""" +256 52 optimizer """adam""" +256 52 training_loop """owa""" +256 52 negative_sampler """basic""" +256 52 evaluator """rankbased""" +256 53 dataset """kinships""" +256 53 model """ermlp""" +256 53 loss """marginranking""" +256 53 regularizer """no""" +256 53 optimizer """adam""" +256 53 training_loop """owa""" +256 53 negative_sampler """basic""" +256 53 evaluator """rankbased""" +256 54 dataset """kinships""" +256 54 model """ermlp""" +256 54 loss """marginranking""" +256 54 regularizer """no""" +256 54 optimizer """adam""" +256 54 training_loop """owa""" +256 54 negative_sampler """basic""" +256 54 evaluator """rankbased""" +256 55 dataset """kinships""" +256 55 model """ermlp""" +256 55 loss """marginranking""" +256 55 regularizer """no""" +256 55 optimizer """adam""" +256 55 training_loop """owa""" +256 55 negative_sampler """basic""" +256 55 evaluator """rankbased""" +256 56 dataset """kinships""" +256 56 model """ermlp""" +256 56 loss """marginranking""" +256 56 regularizer """no""" +256 56 optimizer """adam""" +256 56 training_loop """owa""" +256 56 negative_sampler """basic""" +256 56 evaluator """rankbased""" +256 57 dataset """kinships""" +256 57 model """ermlp""" +256 57 loss """marginranking""" +256 57 regularizer """no""" +256 57 optimizer """adam""" +256 57 training_loop """owa""" +256 57 negative_sampler """basic""" +256 57 evaluator """rankbased""" +256 58 dataset """kinships""" +256 58 model """ermlp""" +256 58 loss """marginranking""" +256 58 regularizer """no""" +256 58 optimizer """adam""" +256 58 training_loop """owa""" +256 58 negative_sampler """basic""" +256 58 evaluator """rankbased""" +256 59 dataset """kinships""" +256 59 model """ermlp""" +256 59 loss """marginranking""" +256 59 regularizer """no""" +256 59 optimizer """adam""" +256 59 training_loop """owa""" +256 59 negative_sampler """basic""" +256 59 evaluator """rankbased""" +256 60 dataset """kinships""" +256 60 model """ermlp""" +256 60 loss """marginranking""" +256 60 regularizer """no""" +256 60 optimizer """adam""" +256 60 training_loop """owa""" +256 60 negative_sampler """basic""" +256 60 evaluator """rankbased""" +256 61 dataset """kinships""" +256 61 model """ermlp""" +256 61 loss """marginranking""" +256 61 regularizer """no""" +256 61 optimizer """adam""" +256 61 training_loop """owa""" +256 61 negative_sampler """basic""" +256 61 evaluator """rankbased""" +256 62 dataset """kinships""" +256 62 model """ermlp""" +256 62 loss """marginranking""" +256 62 regularizer """no""" +256 62 optimizer """adam""" +256 62 training_loop """owa""" +256 62 negative_sampler """basic""" +256 62 evaluator """rankbased""" +256 63 dataset """kinships""" +256 63 model """ermlp""" +256 63 loss """marginranking""" +256 63 regularizer """no""" +256 63 optimizer """adam""" +256 63 training_loop """owa""" +256 63 negative_sampler """basic""" +256 63 evaluator """rankbased""" +256 64 dataset """kinships""" +256 64 model """ermlp""" +256 64 loss """marginranking""" +256 64 regularizer """no""" +256 64 optimizer """adam""" +256 64 training_loop """owa""" +256 64 negative_sampler """basic""" +256 64 evaluator """rankbased""" +256 65 dataset """kinships""" +256 65 model """ermlp""" +256 65 loss """marginranking""" +256 65 regularizer """no""" +256 65 optimizer """adam""" +256 65 training_loop """owa""" +256 65 negative_sampler """basic""" +256 65 evaluator """rankbased""" +256 66 dataset """kinships""" +256 66 model """ermlp""" +256 66 loss """marginranking""" +256 66 regularizer """no""" +256 66 optimizer """adam""" +256 66 training_loop """owa""" +256 66 negative_sampler """basic""" +256 66 evaluator """rankbased""" +256 67 dataset """kinships""" +256 67 model """ermlp""" +256 67 loss """marginranking""" +256 67 regularizer """no""" +256 67 optimizer """adam""" +256 67 training_loop """owa""" +256 67 negative_sampler """basic""" +256 67 evaluator """rankbased""" +256 68 dataset """kinships""" +256 68 model """ermlp""" +256 68 loss """marginranking""" +256 68 regularizer """no""" +256 68 optimizer """adam""" +256 68 training_loop """owa""" +256 68 negative_sampler """basic""" +256 68 evaluator """rankbased""" +256 69 dataset """kinships""" +256 69 model """ermlp""" +256 69 loss """marginranking""" +256 69 regularizer """no""" +256 69 optimizer """adam""" +256 69 training_loop """owa""" +256 69 negative_sampler """basic""" +256 69 evaluator """rankbased""" +256 70 dataset """kinships""" +256 70 model """ermlp""" +256 70 loss """marginranking""" +256 70 regularizer """no""" +256 70 optimizer """adam""" +256 70 training_loop """owa""" +256 70 negative_sampler """basic""" +256 70 evaluator """rankbased""" +256 71 dataset """kinships""" +256 71 model """ermlp""" +256 71 loss """marginranking""" +256 71 regularizer """no""" +256 71 optimizer """adam""" +256 71 training_loop """owa""" +256 71 negative_sampler """basic""" +256 71 evaluator """rankbased""" +256 72 dataset """kinships""" +256 72 model """ermlp""" +256 72 loss """marginranking""" +256 72 regularizer """no""" +256 72 optimizer """adam""" +256 72 training_loop """owa""" +256 72 negative_sampler """basic""" +256 72 evaluator """rankbased""" +256 73 dataset """kinships""" +256 73 model """ermlp""" +256 73 loss """marginranking""" +256 73 regularizer """no""" +256 73 optimizer """adam""" +256 73 training_loop """owa""" +256 73 negative_sampler """basic""" +256 73 evaluator """rankbased""" +256 74 dataset """kinships""" +256 74 model """ermlp""" +256 74 loss """marginranking""" +256 74 regularizer """no""" +256 74 optimizer """adam""" +256 74 training_loop """owa""" +256 74 negative_sampler """basic""" +256 74 evaluator """rankbased""" +256 75 dataset """kinships""" +256 75 model """ermlp""" +256 75 loss """marginranking""" +256 75 regularizer """no""" +256 75 optimizer """adam""" +256 75 training_loop """owa""" +256 75 negative_sampler """basic""" +256 75 evaluator """rankbased""" +256 76 dataset """kinships""" +256 76 model """ermlp""" +256 76 loss """marginranking""" +256 76 regularizer """no""" +256 76 optimizer """adam""" +256 76 training_loop """owa""" +256 76 negative_sampler """basic""" +256 76 evaluator """rankbased""" +256 77 dataset """kinships""" +256 77 model """ermlp""" +256 77 loss """marginranking""" +256 77 regularizer """no""" +256 77 optimizer """adam""" +256 77 training_loop """owa""" +256 77 negative_sampler """basic""" +256 77 evaluator """rankbased""" +256 78 dataset """kinships""" +256 78 model """ermlp""" +256 78 loss """marginranking""" +256 78 regularizer """no""" +256 78 optimizer """adam""" +256 78 training_loop """owa""" +256 78 negative_sampler """basic""" +256 78 evaluator """rankbased""" +256 79 dataset """kinships""" +256 79 model """ermlp""" +256 79 loss """marginranking""" +256 79 regularizer """no""" +256 79 optimizer """adam""" +256 79 training_loop """owa""" +256 79 negative_sampler """basic""" +256 79 evaluator """rankbased""" +256 80 dataset """kinships""" +256 80 model """ermlp""" +256 80 loss """marginranking""" +256 80 regularizer """no""" +256 80 optimizer """adam""" +256 80 training_loop """owa""" +256 80 negative_sampler """basic""" +256 80 evaluator """rankbased""" +256 81 dataset """kinships""" +256 81 model """ermlp""" +256 81 loss """marginranking""" +256 81 regularizer """no""" +256 81 optimizer """adam""" +256 81 training_loop """owa""" +256 81 negative_sampler """basic""" +256 81 evaluator """rankbased""" +256 82 dataset """kinships""" +256 82 model """ermlp""" +256 82 loss """marginranking""" +256 82 regularizer """no""" +256 82 optimizer """adam""" +256 82 training_loop """owa""" +256 82 negative_sampler """basic""" +256 82 evaluator """rankbased""" +256 83 dataset """kinships""" +256 83 model """ermlp""" +256 83 loss """marginranking""" +256 83 regularizer """no""" +256 83 optimizer """adam""" +256 83 training_loop """owa""" +256 83 negative_sampler """basic""" +256 83 evaluator """rankbased""" +256 84 dataset """kinships""" +256 84 model """ermlp""" +256 84 loss """marginranking""" +256 84 regularizer """no""" +256 84 optimizer """adam""" +256 84 training_loop """owa""" +256 84 negative_sampler """basic""" +256 84 evaluator """rankbased""" +256 85 dataset """kinships""" +256 85 model """ermlp""" +256 85 loss """marginranking""" +256 85 regularizer """no""" +256 85 optimizer """adam""" +256 85 training_loop """owa""" +256 85 negative_sampler """basic""" +256 85 evaluator """rankbased""" +256 86 dataset """kinships""" +256 86 model """ermlp""" +256 86 loss """marginranking""" +256 86 regularizer """no""" +256 86 optimizer """adam""" +256 86 training_loop """owa""" +256 86 negative_sampler """basic""" +256 86 evaluator """rankbased""" +256 87 dataset """kinships""" +256 87 model """ermlp""" +256 87 loss """marginranking""" +256 87 regularizer """no""" +256 87 optimizer """adam""" +256 87 training_loop """owa""" +256 87 negative_sampler """basic""" +256 87 evaluator """rankbased""" +256 88 dataset """kinships""" +256 88 model """ermlp""" +256 88 loss """marginranking""" +256 88 regularizer """no""" +256 88 optimizer """adam""" +256 88 training_loop """owa""" +256 88 negative_sampler """basic""" +256 88 evaluator """rankbased""" +256 89 dataset """kinships""" +256 89 model """ermlp""" +256 89 loss """marginranking""" +256 89 regularizer """no""" +256 89 optimizer """adam""" +256 89 training_loop """owa""" +256 89 negative_sampler """basic""" +256 89 evaluator """rankbased""" +256 90 dataset """kinships""" +256 90 model """ermlp""" +256 90 loss """marginranking""" +256 90 regularizer """no""" +256 90 optimizer """adam""" +256 90 training_loop """owa""" +256 90 negative_sampler """basic""" +256 90 evaluator """rankbased""" +256 91 dataset """kinships""" +256 91 model """ermlp""" +256 91 loss """marginranking""" +256 91 regularizer """no""" +256 91 optimizer """adam""" +256 91 training_loop """owa""" +256 91 negative_sampler """basic""" +256 91 evaluator """rankbased""" +256 92 dataset """kinships""" +256 92 model """ermlp""" +256 92 loss """marginranking""" +256 92 regularizer """no""" +256 92 optimizer """adam""" +256 92 training_loop """owa""" +256 92 negative_sampler """basic""" +256 92 evaluator """rankbased""" +256 93 dataset """kinships""" +256 93 model """ermlp""" +256 93 loss """marginranking""" +256 93 regularizer """no""" +256 93 optimizer """adam""" +256 93 training_loop """owa""" +256 93 negative_sampler """basic""" +256 93 evaluator """rankbased""" +256 94 dataset """kinships""" +256 94 model """ermlp""" +256 94 loss """marginranking""" +256 94 regularizer """no""" +256 94 optimizer """adam""" +256 94 training_loop """owa""" +256 94 negative_sampler """basic""" +256 94 evaluator """rankbased""" +256 95 dataset """kinships""" +256 95 model """ermlp""" +256 95 loss """marginranking""" +256 95 regularizer """no""" +256 95 optimizer """adam""" +256 95 training_loop """owa""" +256 95 negative_sampler """basic""" +256 95 evaluator """rankbased""" +256 96 dataset """kinships""" +256 96 model """ermlp""" +256 96 loss """marginranking""" +256 96 regularizer """no""" +256 96 optimizer """adam""" +256 96 training_loop """owa""" +256 96 negative_sampler """basic""" +256 96 evaluator """rankbased""" +256 97 dataset """kinships""" +256 97 model """ermlp""" +256 97 loss """marginranking""" +256 97 regularizer """no""" +256 97 optimizer """adam""" +256 97 training_loop """owa""" +256 97 negative_sampler """basic""" +256 97 evaluator """rankbased""" +256 98 dataset """kinships""" +256 98 model """ermlp""" +256 98 loss """marginranking""" +256 98 regularizer """no""" +256 98 optimizer """adam""" +256 98 training_loop """owa""" +256 98 negative_sampler """basic""" +256 98 evaluator """rankbased""" +256 99 dataset """kinships""" +256 99 model """ermlp""" +256 99 loss """marginranking""" +256 99 regularizer """no""" +256 99 optimizer """adam""" +256 99 training_loop """owa""" +256 99 negative_sampler """basic""" +256 99 evaluator """rankbased""" +256 100 dataset """kinships""" +256 100 model """ermlp""" +256 100 loss """marginranking""" +256 100 regularizer """no""" +256 100 optimizer """adam""" +256 100 training_loop """owa""" +256 100 negative_sampler """basic""" +256 100 evaluator """rankbased""" +257 1 model.embedding_dim 2.0 +257 1 loss.margin 0.9889642953587385 +257 1 optimizer.lr 0.015252110465876798 +257 1 negative_sampler.num_negs_per_pos 83.0 +257 1 training.batch_size 0.0 +257 2 model.embedding_dim 2.0 +257 2 loss.margin 0.5206690146371338 +257 2 optimizer.lr 0.052866605621899085 +257 2 negative_sampler.num_negs_per_pos 94.0 +257 2 training.batch_size 0.0 +257 3 model.embedding_dim 0.0 +257 3 loss.margin 0.6210814944917659 +257 3 optimizer.lr 0.008112347833692237 +257 3 negative_sampler.num_negs_per_pos 82.0 +257 3 training.batch_size 1.0 +257 4 model.embedding_dim 2.0 +257 4 loss.margin 8.032977835785747 +257 4 optimizer.lr 0.011671730990530479 +257 4 negative_sampler.num_negs_per_pos 49.0 +257 4 training.batch_size 1.0 +257 5 model.embedding_dim 0.0 +257 5 loss.margin 6.437483931158122 +257 5 optimizer.lr 0.006504267513307687 +257 5 negative_sampler.num_negs_per_pos 41.0 +257 5 training.batch_size 2.0 +257 6 model.embedding_dim 0.0 +257 6 loss.margin 4.543398959063169 +257 6 optimizer.lr 0.060452125771544865 +257 6 negative_sampler.num_negs_per_pos 61.0 +257 6 training.batch_size 2.0 +257 7 model.embedding_dim 0.0 +257 7 loss.margin 0.9249140774570123 +257 7 optimizer.lr 0.0039611890048551765 +257 7 negative_sampler.num_negs_per_pos 66.0 +257 7 training.batch_size 2.0 +257 8 model.embedding_dim 2.0 +257 8 loss.margin 4.538336887276408 +257 8 optimizer.lr 0.0234460203909797 +257 8 negative_sampler.num_negs_per_pos 59.0 +257 8 training.batch_size 0.0 +257 9 model.embedding_dim 0.0 +257 9 loss.margin 4.657275505049392 +257 9 optimizer.lr 0.06288581711268977 +257 9 negative_sampler.num_negs_per_pos 73.0 +257 9 training.batch_size 0.0 +257 10 model.embedding_dim 0.0 +257 10 loss.margin 7.036671543038494 +257 10 optimizer.lr 0.08483834659552184 +257 10 negative_sampler.num_negs_per_pos 77.0 +257 10 training.batch_size 0.0 +257 11 model.embedding_dim 0.0 +257 11 loss.margin 6.546497855839199 +257 11 optimizer.lr 0.0023099605987830156 +257 11 negative_sampler.num_negs_per_pos 93.0 +257 11 training.batch_size 1.0 +257 12 model.embedding_dim 2.0 +257 12 loss.margin 3.963291734280864 +257 12 optimizer.lr 0.010317220444001138 +257 12 negative_sampler.num_negs_per_pos 7.0 +257 12 training.batch_size 1.0 +257 13 model.embedding_dim 1.0 +257 13 loss.margin 6.7158742937208515 +257 13 optimizer.lr 0.0016929427276007264 +257 13 negative_sampler.num_negs_per_pos 92.0 +257 13 training.batch_size 1.0 +257 14 model.embedding_dim 0.0 +257 14 loss.margin 6.125987425681492 +257 14 optimizer.lr 0.06256039436720624 +257 14 negative_sampler.num_negs_per_pos 54.0 +257 14 training.batch_size 0.0 +257 15 model.embedding_dim 2.0 +257 15 loss.margin 6.379061043370029 +257 15 optimizer.lr 0.007903192271409613 +257 15 negative_sampler.num_negs_per_pos 80.0 +257 15 training.batch_size 1.0 +257 16 model.embedding_dim 1.0 +257 16 loss.margin 9.32818348616973 +257 16 optimizer.lr 0.001560064796072505 +257 16 negative_sampler.num_negs_per_pos 4.0 +257 16 training.batch_size 2.0 +257 17 model.embedding_dim 1.0 +257 17 loss.margin 2.7308806493387707 +257 17 optimizer.lr 0.06569249108274534 +257 17 negative_sampler.num_negs_per_pos 8.0 +257 17 training.batch_size 1.0 +257 18 model.embedding_dim 1.0 +257 18 loss.margin 3.8272239666992687 +257 18 optimizer.lr 0.05426273527757145 +257 18 negative_sampler.num_negs_per_pos 63.0 +257 18 training.batch_size 1.0 +257 19 model.embedding_dim 0.0 +257 19 loss.margin 9.660555885434166 +257 19 optimizer.lr 0.0013441421844593425 +257 19 negative_sampler.num_negs_per_pos 69.0 +257 19 training.batch_size 2.0 +257 20 model.embedding_dim 2.0 +257 20 loss.margin 9.859084288803125 +257 20 optimizer.lr 0.018244867008602273 +257 20 negative_sampler.num_negs_per_pos 82.0 +257 20 training.batch_size 2.0 +257 21 model.embedding_dim 0.0 +257 21 loss.margin 9.767582693358237 +257 21 optimizer.lr 0.0043163963061952455 +257 21 negative_sampler.num_negs_per_pos 64.0 +257 21 training.batch_size 2.0 +257 22 model.embedding_dim 0.0 +257 22 loss.margin 4.270802414683715 +257 22 optimizer.lr 0.005525113715899118 +257 22 negative_sampler.num_negs_per_pos 94.0 +257 22 training.batch_size 2.0 +257 23 model.embedding_dim 2.0 +257 23 loss.margin 6.35788714625362 +257 23 optimizer.lr 0.05886657154074251 +257 23 negative_sampler.num_negs_per_pos 93.0 +257 23 training.batch_size 2.0 +257 24 model.embedding_dim 2.0 +257 24 loss.margin 5.988963092146966 +257 24 optimizer.lr 0.05847272011150113 +257 24 negative_sampler.num_negs_per_pos 62.0 +257 24 training.batch_size 2.0 +257 25 model.embedding_dim 0.0 +257 25 loss.margin 6.180970118548182 +257 25 optimizer.lr 0.006332835171128549 +257 25 negative_sampler.num_negs_per_pos 94.0 +257 25 training.batch_size 2.0 +257 26 model.embedding_dim 1.0 +257 26 loss.margin 8.09211175401185 +257 26 optimizer.lr 0.003036184219600769 +257 26 negative_sampler.num_negs_per_pos 5.0 +257 26 training.batch_size 0.0 +257 27 model.embedding_dim 1.0 +257 27 loss.margin 6.658815932539543 +257 27 optimizer.lr 0.008734402997148413 +257 27 negative_sampler.num_negs_per_pos 48.0 +257 27 training.batch_size 0.0 +257 28 model.embedding_dim 0.0 +257 28 loss.margin 1.4902927509715909 +257 28 optimizer.lr 0.01747465897520393 +257 28 negative_sampler.num_negs_per_pos 70.0 +257 28 training.batch_size 1.0 +257 29 model.embedding_dim 2.0 +257 29 loss.margin 9.285337871798815 +257 29 optimizer.lr 0.03908019200655942 +257 29 negative_sampler.num_negs_per_pos 86.0 +257 29 training.batch_size 1.0 +257 30 model.embedding_dim 1.0 +257 30 loss.margin 1.6204955920237878 +257 30 optimizer.lr 0.05173621680246538 +257 30 negative_sampler.num_negs_per_pos 69.0 +257 30 training.batch_size 1.0 +257 31 model.embedding_dim 1.0 +257 31 loss.margin 8.600881662543248 +257 31 optimizer.lr 0.056274434085209886 +257 31 negative_sampler.num_negs_per_pos 33.0 +257 31 training.batch_size 1.0 +257 32 model.embedding_dim 2.0 +257 32 loss.margin 1.503433167214444 +257 32 optimizer.lr 0.00384590694332097 +257 32 negative_sampler.num_negs_per_pos 73.0 +257 32 training.batch_size 1.0 +257 33 model.embedding_dim 0.0 +257 33 loss.margin 7.5937656821959125 +257 33 optimizer.lr 0.006781394302141551 +257 33 negative_sampler.num_negs_per_pos 60.0 +257 33 training.batch_size 1.0 +257 34 model.embedding_dim 1.0 +257 34 loss.margin 3.071217819639962 +257 34 optimizer.lr 0.03192739254781749 +257 34 negative_sampler.num_negs_per_pos 49.0 +257 34 training.batch_size 1.0 +257 35 model.embedding_dim 0.0 +257 35 loss.margin 1.5048588250316928 +257 35 optimizer.lr 0.003075841429930805 +257 35 negative_sampler.num_negs_per_pos 54.0 +257 35 training.batch_size 1.0 +257 36 model.embedding_dim 0.0 +257 36 loss.margin 6.914717506186935 +257 36 optimizer.lr 0.004384859126988273 +257 36 negative_sampler.num_negs_per_pos 66.0 +257 36 training.batch_size 0.0 +257 37 model.embedding_dim 2.0 +257 37 loss.margin 5.323362592716828 +257 37 optimizer.lr 0.0023637835455469474 +257 37 negative_sampler.num_negs_per_pos 78.0 +257 37 training.batch_size 2.0 +257 38 model.embedding_dim 2.0 +257 38 loss.margin 0.885118917430668 +257 38 optimizer.lr 0.0769766526333334 +257 38 negative_sampler.num_negs_per_pos 33.0 +257 38 training.batch_size 2.0 +257 39 model.embedding_dim 0.0 +257 39 loss.margin 2.3514244241461544 +257 39 optimizer.lr 0.004565648211703496 +257 39 negative_sampler.num_negs_per_pos 8.0 +257 39 training.batch_size 0.0 +257 40 model.embedding_dim 1.0 +257 40 loss.margin 7.9797028172722415 +257 40 optimizer.lr 0.012729698832922152 +257 40 negative_sampler.num_negs_per_pos 32.0 +257 40 training.batch_size 0.0 +257 41 model.embedding_dim 1.0 +257 41 loss.margin 1.6348811001405505 +257 41 optimizer.lr 0.08089715457214017 +257 41 negative_sampler.num_negs_per_pos 80.0 +257 41 training.batch_size 2.0 +257 42 model.embedding_dim 1.0 +257 42 loss.margin 8.474323460028934 +257 42 optimizer.lr 0.007382020730005403 +257 42 negative_sampler.num_negs_per_pos 51.0 +257 42 training.batch_size 1.0 +257 43 model.embedding_dim 2.0 +257 43 loss.margin 7.441575038624952 +257 43 optimizer.lr 0.029470424839716215 +257 43 negative_sampler.num_negs_per_pos 28.0 +257 43 training.batch_size 1.0 +257 44 model.embedding_dim 2.0 +257 44 loss.margin 3.739601176441541 +257 44 optimizer.lr 0.03620302409077953 +257 44 negative_sampler.num_negs_per_pos 9.0 +257 44 training.batch_size 1.0 +257 45 model.embedding_dim 0.0 +257 45 loss.margin 2.441113579129737 +257 45 optimizer.lr 0.015793446422871105 +257 45 negative_sampler.num_negs_per_pos 49.0 +257 45 training.batch_size 2.0 +257 46 model.embedding_dim 2.0 +257 46 loss.margin 2.7226970552578624 +257 46 optimizer.lr 0.0032511842303929025 +257 46 negative_sampler.num_negs_per_pos 81.0 +257 46 training.batch_size 1.0 +257 47 model.embedding_dim 1.0 +257 47 loss.margin 7.429663613696677 +257 47 optimizer.lr 0.0015864655922839704 +257 47 negative_sampler.num_negs_per_pos 26.0 +257 47 training.batch_size 2.0 +257 48 model.embedding_dim 1.0 +257 48 loss.margin 7.253451038829006 +257 48 optimizer.lr 0.0015941614346452536 +257 48 negative_sampler.num_negs_per_pos 65.0 +257 48 training.batch_size 1.0 +257 49 model.embedding_dim 1.0 +257 49 loss.margin 5.079035163223177 +257 49 optimizer.lr 0.004966996646100352 +257 49 negative_sampler.num_negs_per_pos 4.0 +257 49 training.batch_size 2.0 +257 50 model.embedding_dim 2.0 +257 50 loss.margin 5.376463260703627 +257 50 optimizer.lr 0.023820904368791432 +257 50 negative_sampler.num_negs_per_pos 86.0 +257 50 training.batch_size 0.0 +257 51 model.embedding_dim 1.0 +257 51 loss.margin 4.493200021326454 +257 51 optimizer.lr 0.0073807436509597665 +257 51 negative_sampler.num_negs_per_pos 31.0 +257 51 training.batch_size 0.0 +257 52 model.embedding_dim 2.0 +257 52 loss.margin 6.859903481183177 +257 52 optimizer.lr 0.01093963458807883 +257 52 negative_sampler.num_negs_per_pos 84.0 +257 52 training.batch_size 0.0 +257 53 model.embedding_dim 2.0 +257 53 loss.margin 2.477874431167616 +257 53 optimizer.lr 0.055158413423457814 +257 53 negative_sampler.num_negs_per_pos 28.0 +257 53 training.batch_size 0.0 +257 54 model.embedding_dim 1.0 +257 54 loss.margin 7.169677614497222 +257 54 optimizer.lr 0.013807982115594678 +257 54 negative_sampler.num_negs_per_pos 9.0 +257 54 training.batch_size 0.0 +257 55 model.embedding_dim 2.0 +257 55 loss.margin 1.869251705742876 +257 55 optimizer.lr 0.0074555260952766395 +257 55 negative_sampler.num_negs_per_pos 65.0 +257 55 training.batch_size 1.0 +257 56 model.embedding_dim 0.0 +257 56 loss.margin 7.3398398717077935 +257 56 optimizer.lr 0.004011590688185162 +257 56 negative_sampler.num_negs_per_pos 66.0 +257 56 training.batch_size 0.0 +257 57 model.embedding_dim 2.0 +257 57 loss.margin 6.389740396834784 +257 57 optimizer.lr 0.012505276953145437 +257 57 negative_sampler.num_negs_per_pos 40.0 +257 57 training.batch_size 0.0 +257 58 model.embedding_dim 0.0 +257 58 loss.margin 9.440409439942806 +257 58 optimizer.lr 0.06961350916172525 +257 58 negative_sampler.num_negs_per_pos 7.0 +257 58 training.batch_size 1.0 +257 59 model.embedding_dim 0.0 +257 59 loss.margin 4.348109583661585 +257 59 optimizer.lr 0.004199087244477806 +257 59 negative_sampler.num_negs_per_pos 83.0 +257 59 training.batch_size 0.0 +257 60 model.embedding_dim 0.0 +257 60 loss.margin 5.071164270930274 +257 60 optimizer.lr 0.02407417492790682 +257 60 negative_sampler.num_negs_per_pos 27.0 +257 60 training.batch_size 2.0 +257 61 model.embedding_dim 1.0 +257 61 loss.margin 8.075755890447555 +257 61 optimizer.lr 0.0027460396756100124 +257 61 negative_sampler.num_negs_per_pos 90.0 +257 61 training.batch_size 0.0 +257 62 model.embedding_dim 0.0 +257 62 loss.margin 9.261740234563947 +257 62 optimizer.lr 0.09702161689068638 +257 62 negative_sampler.num_negs_per_pos 31.0 +257 62 training.batch_size 1.0 +257 63 model.embedding_dim 0.0 +257 63 loss.margin 8.572339582721055 +257 63 optimizer.lr 0.021601522410448472 +257 63 negative_sampler.num_negs_per_pos 97.0 +257 63 training.batch_size 0.0 +257 64 model.embedding_dim 0.0 +257 64 loss.margin 6.361467865355339 +257 64 optimizer.lr 0.0013184625434567684 +257 64 negative_sampler.num_negs_per_pos 46.0 +257 64 training.batch_size 1.0 +257 65 model.embedding_dim 1.0 +257 65 loss.margin 9.337977994136546 +257 65 optimizer.lr 0.0096269686604141 +257 65 negative_sampler.num_negs_per_pos 78.0 +257 65 training.batch_size 0.0 +257 66 model.embedding_dim 2.0 +257 66 loss.margin 1.0911699856901282 +257 66 optimizer.lr 0.060656309933421616 +257 66 negative_sampler.num_negs_per_pos 14.0 +257 66 training.batch_size 0.0 +257 67 model.embedding_dim 1.0 +257 67 loss.margin 8.456828133867955 +257 67 optimizer.lr 0.017545571462402237 +257 67 negative_sampler.num_negs_per_pos 19.0 +257 67 training.batch_size 0.0 +257 68 model.embedding_dim 1.0 +257 68 loss.margin 2.1017972068275865 +257 68 optimizer.lr 0.015222586870497167 +257 68 negative_sampler.num_negs_per_pos 79.0 +257 68 training.batch_size 1.0 +257 69 model.embedding_dim 0.0 +257 69 loss.margin 6.404692805451666 +257 69 optimizer.lr 0.0011871392548451303 +257 69 negative_sampler.num_negs_per_pos 26.0 +257 69 training.batch_size 0.0 +257 70 model.embedding_dim 1.0 +257 70 loss.margin 6.317522504637088 +257 70 optimizer.lr 0.08258636891044996 +257 70 negative_sampler.num_negs_per_pos 42.0 +257 70 training.batch_size 2.0 +257 71 model.embedding_dim 0.0 +257 71 loss.margin 1.148686717962719 +257 71 optimizer.lr 0.04277567765851746 +257 71 negative_sampler.num_negs_per_pos 38.0 +257 71 training.batch_size 2.0 +257 72 model.embedding_dim 2.0 +257 72 loss.margin 3.9411029181866257 +257 72 optimizer.lr 0.0018993578083069925 +257 72 negative_sampler.num_negs_per_pos 79.0 +257 72 training.batch_size 1.0 +257 73 model.embedding_dim 0.0 +257 73 loss.margin 8.174012570013694 +257 73 optimizer.lr 0.024643715173263615 +257 73 negative_sampler.num_negs_per_pos 64.0 +257 73 training.batch_size 1.0 +257 74 model.embedding_dim 0.0 +257 74 loss.margin 9.55402911005034 +257 74 optimizer.lr 0.0010650944901779856 +257 74 negative_sampler.num_negs_per_pos 5.0 +257 74 training.batch_size 2.0 +257 75 model.embedding_dim 1.0 +257 75 loss.margin 8.960541747997846 +257 75 optimizer.lr 0.04089293384547229 +257 75 negative_sampler.num_negs_per_pos 27.0 +257 75 training.batch_size 1.0 +257 76 model.embedding_dim 1.0 +257 76 loss.margin 9.974839545238824 +257 76 optimizer.lr 0.002966841576738862 +257 76 negative_sampler.num_negs_per_pos 13.0 +257 76 training.batch_size 1.0 +257 77 model.embedding_dim 0.0 +257 77 loss.margin 6.53108470644421 +257 77 optimizer.lr 0.04113621212162511 +257 77 negative_sampler.num_negs_per_pos 83.0 +257 77 training.batch_size 2.0 +257 78 model.embedding_dim 0.0 +257 78 loss.margin 7.838174564730358 +257 78 optimizer.lr 0.013155436449322166 +257 78 negative_sampler.num_negs_per_pos 20.0 +257 78 training.batch_size 0.0 +257 79 model.embedding_dim 0.0 +257 79 loss.margin 7.500776256935386 +257 79 optimizer.lr 0.025625602500030892 +257 79 negative_sampler.num_negs_per_pos 15.0 +257 79 training.batch_size 2.0 +257 80 model.embedding_dim 0.0 +257 80 loss.margin 1.126259645175441 +257 80 optimizer.lr 0.08710049726475495 +257 80 negative_sampler.num_negs_per_pos 9.0 +257 80 training.batch_size 0.0 +257 81 model.embedding_dim 0.0 +257 81 loss.margin 9.752245781484868 +257 81 optimizer.lr 0.004253151479069181 +257 81 negative_sampler.num_negs_per_pos 30.0 +257 81 training.batch_size 0.0 +257 82 model.embedding_dim 2.0 +257 82 loss.margin 4.655111754797379 +257 82 optimizer.lr 0.09025878827844713 +257 82 negative_sampler.num_negs_per_pos 5.0 +257 82 training.batch_size 1.0 +257 83 model.embedding_dim 0.0 +257 83 loss.margin 9.481139622213272 +257 83 optimizer.lr 0.008902929470234739 +257 83 negative_sampler.num_negs_per_pos 88.0 +257 83 training.batch_size 0.0 +257 84 model.embedding_dim 1.0 +257 84 loss.margin 9.86487407580614 +257 84 optimizer.lr 0.06875293392920002 +257 84 negative_sampler.num_negs_per_pos 85.0 +257 84 training.batch_size 1.0 +257 85 model.embedding_dim 1.0 +257 85 loss.margin 6.639708125337972 +257 85 optimizer.lr 0.01865987883499027 +257 85 negative_sampler.num_negs_per_pos 35.0 +257 85 training.batch_size 2.0 +257 86 model.embedding_dim 1.0 +257 86 loss.margin 3.479040740722338 +257 86 optimizer.lr 0.01156627188129336 +257 86 negative_sampler.num_negs_per_pos 79.0 +257 86 training.batch_size 2.0 +257 87 model.embedding_dim 0.0 +257 87 loss.margin 2.7893171004455892 +257 87 optimizer.lr 0.0035002459272958472 +257 87 negative_sampler.num_negs_per_pos 39.0 +257 87 training.batch_size 1.0 +257 88 model.embedding_dim 2.0 +257 88 loss.margin 4.384665482482637 +257 88 optimizer.lr 0.06307613349060207 +257 88 negative_sampler.num_negs_per_pos 73.0 +257 88 training.batch_size 1.0 +257 89 model.embedding_dim 0.0 +257 89 loss.margin 0.5325223840632878 +257 89 optimizer.lr 0.003768540832249108 +257 89 negative_sampler.num_negs_per_pos 31.0 +257 89 training.batch_size 1.0 +257 90 model.embedding_dim 0.0 +257 90 loss.margin 1.026611295440705 +257 90 optimizer.lr 0.0017380876710550686 +257 90 negative_sampler.num_negs_per_pos 94.0 +257 90 training.batch_size 0.0 +257 91 model.embedding_dim 2.0 +257 91 loss.margin 3.4223358420261665 +257 91 optimizer.lr 0.008091200216204356 +257 91 negative_sampler.num_negs_per_pos 60.0 +257 91 training.batch_size 0.0 +257 92 model.embedding_dim 0.0 +257 92 loss.margin 0.7293455188764544 +257 92 optimizer.lr 0.053818293462798344 +257 92 negative_sampler.num_negs_per_pos 61.0 +257 92 training.batch_size 0.0 +257 93 model.embedding_dim 0.0 +257 93 loss.margin 3.3731652301432433 +257 93 optimizer.lr 0.010042154081475758 +257 93 negative_sampler.num_negs_per_pos 8.0 +257 93 training.batch_size 2.0 +257 94 model.embedding_dim 2.0 +257 94 loss.margin 7.817746448343804 +257 94 optimizer.lr 0.02357159464738074 +257 94 negative_sampler.num_negs_per_pos 80.0 +257 94 training.batch_size 1.0 +257 95 model.embedding_dim 2.0 +257 95 loss.margin 4.270870291722115 +257 95 optimizer.lr 0.0901456390536227 +257 95 negative_sampler.num_negs_per_pos 46.0 +257 95 training.batch_size 1.0 +257 96 model.embedding_dim 2.0 +257 96 loss.margin 2.4061823075728066 +257 96 optimizer.lr 0.044383270259554394 +257 96 negative_sampler.num_negs_per_pos 47.0 +257 96 training.batch_size 1.0 +257 97 model.embedding_dim 0.0 +257 97 loss.margin 8.254185710717493 +257 97 optimizer.lr 0.060461122348252634 +257 97 negative_sampler.num_negs_per_pos 3.0 +257 97 training.batch_size 1.0 +257 98 model.embedding_dim 1.0 +257 98 loss.margin 7.467618982306168 +257 98 optimizer.lr 0.08126311905712182 +257 98 negative_sampler.num_negs_per_pos 28.0 +257 98 training.batch_size 0.0 +257 99 model.embedding_dim 2.0 +257 99 loss.margin 6.388066927819748 +257 99 optimizer.lr 0.008895412408975787 +257 99 negative_sampler.num_negs_per_pos 51.0 +257 99 training.batch_size 2.0 +257 100 model.embedding_dim 1.0 +257 100 loss.margin 9.795704450821338 +257 100 optimizer.lr 0.03559890943560432 +257 100 negative_sampler.num_negs_per_pos 4.0 +257 100 training.batch_size 1.0 +257 1 dataset """kinships""" +257 1 model """ermlp""" +257 1 loss """marginranking""" +257 1 regularizer """no""" +257 1 optimizer """adam""" +257 1 training_loop """owa""" +257 1 negative_sampler """basic""" +257 1 evaluator """rankbased""" +257 2 dataset """kinships""" +257 2 model """ermlp""" +257 2 loss """marginranking""" +257 2 regularizer """no""" +257 2 optimizer """adam""" +257 2 training_loop """owa""" +257 2 negative_sampler """basic""" +257 2 evaluator """rankbased""" +257 3 dataset """kinships""" +257 3 model """ermlp""" +257 3 loss """marginranking""" +257 3 regularizer """no""" +257 3 optimizer """adam""" +257 3 training_loop """owa""" +257 3 negative_sampler """basic""" +257 3 evaluator """rankbased""" +257 4 dataset """kinships""" +257 4 model """ermlp""" +257 4 loss """marginranking""" +257 4 regularizer """no""" +257 4 optimizer """adam""" +257 4 training_loop """owa""" +257 4 negative_sampler """basic""" +257 4 evaluator """rankbased""" +257 5 dataset """kinships""" +257 5 model """ermlp""" +257 5 loss """marginranking""" +257 5 regularizer """no""" +257 5 optimizer """adam""" +257 5 training_loop """owa""" +257 5 negative_sampler """basic""" +257 5 evaluator """rankbased""" +257 6 dataset """kinships""" +257 6 model """ermlp""" +257 6 loss """marginranking""" +257 6 regularizer """no""" +257 6 optimizer """adam""" +257 6 training_loop """owa""" +257 6 negative_sampler """basic""" +257 6 evaluator """rankbased""" +257 7 dataset """kinships""" +257 7 model """ermlp""" +257 7 loss """marginranking""" +257 7 regularizer """no""" +257 7 optimizer """adam""" +257 7 training_loop """owa""" +257 7 negative_sampler """basic""" +257 7 evaluator """rankbased""" +257 8 dataset """kinships""" +257 8 model """ermlp""" +257 8 loss """marginranking""" +257 8 regularizer """no""" +257 8 optimizer """adam""" +257 8 training_loop """owa""" +257 8 negative_sampler """basic""" +257 8 evaluator """rankbased""" +257 9 dataset """kinships""" +257 9 model """ermlp""" +257 9 loss """marginranking""" +257 9 regularizer """no""" +257 9 optimizer """adam""" +257 9 training_loop """owa""" +257 9 negative_sampler """basic""" +257 9 evaluator """rankbased""" +257 10 dataset """kinships""" +257 10 model """ermlp""" +257 10 loss """marginranking""" +257 10 regularizer """no""" +257 10 optimizer """adam""" +257 10 training_loop """owa""" +257 10 negative_sampler """basic""" +257 10 evaluator """rankbased""" +257 11 dataset """kinships""" +257 11 model """ermlp""" +257 11 loss """marginranking""" +257 11 regularizer """no""" +257 11 optimizer """adam""" +257 11 training_loop """owa""" +257 11 negative_sampler """basic""" +257 11 evaluator """rankbased""" +257 12 dataset """kinships""" +257 12 model """ermlp""" +257 12 loss """marginranking""" +257 12 regularizer """no""" +257 12 optimizer """adam""" +257 12 training_loop """owa""" +257 12 negative_sampler """basic""" +257 12 evaluator """rankbased""" +257 13 dataset """kinships""" +257 13 model """ermlp""" +257 13 loss """marginranking""" +257 13 regularizer """no""" +257 13 optimizer """adam""" +257 13 training_loop """owa""" +257 13 negative_sampler """basic""" +257 13 evaluator """rankbased""" +257 14 dataset """kinships""" +257 14 model """ermlp""" +257 14 loss """marginranking""" +257 14 regularizer """no""" +257 14 optimizer """adam""" +257 14 training_loop """owa""" +257 14 negative_sampler """basic""" +257 14 evaluator """rankbased""" +257 15 dataset """kinships""" +257 15 model """ermlp""" +257 15 loss """marginranking""" +257 15 regularizer """no""" +257 15 optimizer """adam""" +257 15 training_loop """owa""" +257 15 negative_sampler """basic""" +257 15 evaluator """rankbased""" +257 16 dataset """kinships""" +257 16 model """ermlp""" +257 16 loss """marginranking""" +257 16 regularizer """no""" +257 16 optimizer """adam""" +257 16 training_loop """owa""" +257 16 negative_sampler """basic""" +257 16 evaluator """rankbased""" +257 17 dataset """kinships""" +257 17 model """ermlp""" +257 17 loss """marginranking""" +257 17 regularizer """no""" +257 17 optimizer """adam""" +257 17 training_loop """owa""" +257 17 negative_sampler """basic""" +257 17 evaluator """rankbased""" +257 18 dataset """kinships""" +257 18 model """ermlp""" +257 18 loss """marginranking""" +257 18 regularizer """no""" +257 18 optimizer """adam""" +257 18 training_loop """owa""" +257 18 negative_sampler """basic""" +257 18 evaluator """rankbased""" +257 19 dataset """kinships""" +257 19 model """ermlp""" +257 19 loss """marginranking""" +257 19 regularizer """no""" +257 19 optimizer """adam""" +257 19 training_loop """owa""" +257 19 negative_sampler """basic""" +257 19 evaluator """rankbased""" +257 20 dataset """kinships""" +257 20 model """ermlp""" +257 20 loss """marginranking""" +257 20 regularizer """no""" +257 20 optimizer """adam""" +257 20 training_loop """owa""" +257 20 negative_sampler """basic""" +257 20 evaluator """rankbased""" +257 21 dataset """kinships""" +257 21 model """ermlp""" +257 21 loss """marginranking""" +257 21 regularizer """no""" +257 21 optimizer """adam""" +257 21 training_loop """owa""" +257 21 negative_sampler """basic""" +257 21 evaluator """rankbased""" +257 22 dataset """kinships""" +257 22 model """ermlp""" +257 22 loss """marginranking""" +257 22 regularizer """no""" +257 22 optimizer """adam""" +257 22 training_loop """owa""" +257 22 negative_sampler """basic""" +257 22 evaluator """rankbased""" +257 23 dataset """kinships""" +257 23 model """ermlp""" +257 23 loss """marginranking""" +257 23 regularizer """no""" +257 23 optimizer """adam""" +257 23 training_loop """owa""" +257 23 negative_sampler """basic""" +257 23 evaluator """rankbased""" +257 24 dataset """kinships""" +257 24 model """ermlp""" +257 24 loss """marginranking""" +257 24 regularizer """no""" +257 24 optimizer """adam""" +257 24 training_loop """owa""" +257 24 negative_sampler """basic""" +257 24 evaluator """rankbased""" +257 25 dataset """kinships""" +257 25 model """ermlp""" +257 25 loss """marginranking""" +257 25 regularizer """no""" +257 25 optimizer """adam""" +257 25 training_loop """owa""" +257 25 negative_sampler """basic""" +257 25 evaluator """rankbased""" +257 26 dataset """kinships""" +257 26 model """ermlp""" +257 26 loss """marginranking""" +257 26 regularizer """no""" +257 26 optimizer """adam""" +257 26 training_loop """owa""" +257 26 negative_sampler """basic""" +257 26 evaluator """rankbased""" +257 27 dataset """kinships""" +257 27 model """ermlp""" +257 27 loss """marginranking""" +257 27 regularizer """no""" +257 27 optimizer """adam""" +257 27 training_loop """owa""" +257 27 negative_sampler """basic""" +257 27 evaluator """rankbased""" +257 28 dataset """kinships""" +257 28 model """ermlp""" +257 28 loss """marginranking""" +257 28 regularizer """no""" +257 28 optimizer """adam""" +257 28 training_loop """owa""" +257 28 negative_sampler """basic""" +257 28 evaluator """rankbased""" +257 29 dataset """kinships""" +257 29 model """ermlp""" +257 29 loss """marginranking""" +257 29 regularizer """no""" +257 29 optimizer """adam""" +257 29 training_loop """owa""" +257 29 negative_sampler """basic""" +257 29 evaluator """rankbased""" +257 30 dataset """kinships""" +257 30 model """ermlp""" +257 30 loss """marginranking""" +257 30 regularizer """no""" +257 30 optimizer """adam""" +257 30 training_loop """owa""" +257 30 negative_sampler """basic""" +257 30 evaluator """rankbased""" +257 31 dataset """kinships""" +257 31 model """ermlp""" +257 31 loss """marginranking""" +257 31 regularizer """no""" +257 31 optimizer """adam""" +257 31 training_loop """owa""" +257 31 negative_sampler """basic""" +257 31 evaluator """rankbased""" +257 32 dataset """kinships""" +257 32 model """ermlp""" +257 32 loss """marginranking""" +257 32 regularizer """no""" +257 32 optimizer """adam""" +257 32 training_loop """owa""" +257 32 negative_sampler """basic""" +257 32 evaluator """rankbased""" +257 33 dataset """kinships""" +257 33 model """ermlp""" +257 33 loss """marginranking""" +257 33 regularizer """no""" +257 33 optimizer """adam""" +257 33 training_loop """owa""" +257 33 negative_sampler """basic""" +257 33 evaluator """rankbased""" +257 34 dataset """kinships""" +257 34 model """ermlp""" +257 34 loss """marginranking""" +257 34 regularizer """no""" +257 34 optimizer """adam""" +257 34 training_loop """owa""" +257 34 negative_sampler """basic""" +257 34 evaluator """rankbased""" +257 35 dataset """kinships""" +257 35 model """ermlp""" +257 35 loss """marginranking""" +257 35 regularizer """no""" +257 35 optimizer """adam""" +257 35 training_loop """owa""" +257 35 negative_sampler """basic""" +257 35 evaluator """rankbased""" +257 36 dataset """kinships""" +257 36 model """ermlp""" +257 36 loss """marginranking""" +257 36 regularizer """no""" +257 36 optimizer """adam""" +257 36 training_loop """owa""" +257 36 negative_sampler """basic""" +257 36 evaluator """rankbased""" +257 37 dataset """kinships""" +257 37 model """ermlp""" +257 37 loss """marginranking""" +257 37 regularizer """no""" +257 37 optimizer """adam""" +257 37 training_loop """owa""" +257 37 negative_sampler """basic""" +257 37 evaluator """rankbased""" +257 38 dataset """kinships""" +257 38 model """ermlp""" +257 38 loss """marginranking""" +257 38 regularizer """no""" +257 38 optimizer """adam""" +257 38 training_loop """owa""" +257 38 negative_sampler """basic""" +257 38 evaluator """rankbased""" +257 39 dataset """kinships""" +257 39 model """ermlp""" +257 39 loss """marginranking""" +257 39 regularizer """no""" +257 39 optimizer """adam""" +257 39 training_loop """owa""" +257 39 negative_sampler """basic""" +257 39 evaluator """rankbased""" +257 40 dataset """kinships""" +257 40 model """ermlp""" +257 40 loss """marginranking""" +257 40 regularizer """no""" +257 40 optimizer """adam""" +257 40 training_loop """owa""" +257 40 negative_sampler """basic""" +257 40 evaluator """rankbased""" +257 41 dataset """kinships""" +257 41 model """ermlp""" +257 41 loss """marginranking""" +257 41 regularizer """no""" +257 41 optimizer """adam""" +257 41 training_loop """owa""" +257 41 negative_sampler """basic""" +257 41 evaluator """rankbased""" +257 42 dataset """kinships""" +257 42 model """ermlp""" +257 42 loss """marginranking""" +257 42 regularizer """no""" +257 42 optimizer """adam""" +257 42 training_loop """owa""" +257 42 negative_sampler """basic""" +257 42 evaluator """rankbased""" +257 43 dataset """kinships""" +257 43 model """ermlp""" +257 43 loss """marginranking""" +257 43 regularizer """no""" +257 43 optimizer """adam""" +257 43 training_loop """owa""" +257 43 negative_sampler """basic""" +257 43 evaluator """rankbased""" +257 44 dataset """kinships""" +257 44 model """ermlp""" +257 44 loss """marginranking""" +257 44 regularizer """no""" +257 44 optimizer """adam""" +257 44 training_loop """owa""" +257 44 negative_sampler """basic""" +257 44 evaluator """rankbased""" +257 45 dataset """kinships""" +257 45 model """ermlp""" +257 45 loss """marginranking""" +257 45 regularizer """no""" +257 45 optimizer """adam""" +257 45 training_loop """owa""" +257 45 negative_sampler """basic""" +257 45 evaluator """rankbased""" +257 46 dataset """kinships""" +257 46 model """ermlp""" +257 46 loss """marginranking""" +257 46 regularizer """no""" +257 46 optimizer """adam""" +257 46 training_loop """owa""" +257 46 negative_sampler """basic""" +257 46 evaluator """rankbased""" +257 47 dataset """kinships""" +257 47 model """ermlp""" +257 47 loss """marginranking""" +257 47 regularizer """no""" +257 47 optimizer """adam""" +257 47 training_loop """owa""" +257 47 negative_sampler """basic""" +257 47 evaluator """rankbased""" +257 48 dataset """kinships""" +257 48 model """ermlp""" +257 48 loss """marginranking""" +257 48 regularizer """no""" +257 48 optimizer """adam""" +257 48 training_loop """owa""" +257 48 negative_sampler """basic""" +257 48 evaluator """rankbased""" +257 49 dataset """kinships""" +257 49 model """ermlp""" +257 49 loss """marginranking""" +257 49 regularizer """no""" +257 49 optimizer """adam""" +257 49 training_loop """owa""" +257 49 negative_sampler """basic""" +257 49 evaluator """rankbased""" +257 50 dataset """kinships""" +257 50 model """ermlp""" +257 50 loss """marginranking""" +257 50 regularizer """no""" +257 50 optimizer """adam""" +257 50 training_loop """owa""" +257 50 negative_sampler """basic""" +257 50 evaluator """rankbased""" +257 51 dataset """kinships""" +257 51 model """ermlp""" +257 51 loss """marginranking""" +257 51 regularizer """no""" +257 51 optimizer """adam""" +257 51 training_loop """owa""" +257 51 negative_sampler """basic""" +257 51 evaluator """rankbased""" +257 52 dataset """kinships""" +257 52 model """ermlp""" +257 52 loss """marginranking""" +257 52 regularizer """no""" +257 52 optimizer """adam""" +257 52 training_loop """owa""" +257 52 negative_sampler """basic""" +257 52 evaluator """rankbased""" +257 53 dataset """kinships""" +257 53 model """ermlp""" +257 53 loss """marginranking""" +257 53 regularizer """no""" +257 53 optimizer """adam""" +257 53 training_loop """owa""" +257 53 negative_sampler """basic""" +257 53 evaluator """rankbased""" +257 54 dataset """kinships""" +257 54 model """ermlp""" +257 54 loss """marginranking""" +257 54 regularizer """no""" +257 54 optimizer """adam""" +257 54 training_loop """owa""" +257 54 negative_sampler """basic""" +257 54 evaluator """rankbased""" +257 55 dataset """kinships""" +257 55 model """ermlp""" +257 55 loss """marginranking""" +257 55 regularizer """no""" +257 55 optimizer """adam""" +257 55 training_loop """owa""" +257 55 negative_sampler """basic""" +257 55 evaluator """rankbased""" +257 56 dataset """kinships""" +257 56 model """ermlp""" +257 56 loss """marginranking""" +257 56 regularizer """no""" +257 56 optimizer """adam""" +257 56 training_loop """owa""" +257 56 negative_sampler """basic""" +257 56 evaluator """rankbased""" +257 57 dataset """kinships""" +257 57 model """ermlp""" +257 57 loss """marginranking""" +257 57 regularizer """no""" +257 57 optimizer """adam""" +257 57 training_loop """owa""" +257 57 negative_sampler """basic""" +257 57 evaluator """rankbased""" +257 58 dataset """kinships""" +257 58 model """ermlp""" +257 58 loss """marginranking""" +257 58 regularizer """no""" +257 58 optimizer """adam""" +257 58 training_loop """owa""" +257 58 negative_sampler """basic""" +257 58 evaluator """rankbased""" +257 59 dataset """kinships""" +257 59 model """ermlp""" +257 59 loss """marginranking""" +257 59 regularizer """no""" +257 59 optimizer """adam""" +257 59 training_loop """owa""" +257 59 negative_sampler """basic""" +257 59 evaluator """rankbased""" +257 60 dataset """kinships""" +257 60 model """ermlp""" +257 60 loss """marginranking""" +257 60 regularizer """no""" +257 60 optimizer """adam""" +257 60 training_loop """owa""" +257 60 negative_sampler """basic""" +257 60 evaluator """rankbased""" +257 61 dataset """kinships""" +257 61 model """ermlp""" +257 61 loss """marginranking""" +257 61 regularizer """no""" +257 61 optimizer """adam""" +257 61 training_loop """owa""" +257 61 negative_sampler """basic""" +257 61 evaluator """rankbased""" +257 62 dataset """kinships""" +257 62 model """ermlp""" +257 62 loss """marginranking""" +257 62 regularizer """no""" +257 62 optimizer """adam""" +257 62 training_loop """owa""" +257 62 negative_sampler """basic""" +257 62 evaluator """rankbased""" +257 63 dataset """kinships""" +257 63 model """ermlp""" +257 63 loss """marginranking""" +257 63 regularizer """no""" +257 63 optimizer """adam""" +257 63 training_loop """owa""" +257 63 negative_sampler """basic""" +257 63 evaluator """rankbased""" +257 64 dataset """kinships""" +257 64 model """ermlp""" +257 64 loss """marginranking""" +257 64 regularizer """no""" +257 64 optimizer """adam""" +257 64 training_loop """owa""" +257 64 negative_sampler """basic""" +257 64 evaluator """rankbased""" +257 65 dataset """kinships""" +257 65 model """ermlp""" +257 65 loss """marginranking""" +257 65 regularizer """no""" +257 65 optimizer """adam""" +257 65 training_loop """owa""" +257 65 negative_sampler """basic""" +257 65 evaluator """rankbased""" +257 66 dataset """kinships""" +257 66 model """ermlp""" +257 66 loss """marginranking""" +257 66 regularizer """no""" +257 66 optimizer """adam""" +257 66 training_loop """owa""" +257 66 negative_sampler """basic""" +257 66 evaluator """rankbased""" +257 67 dataset """kinships""" +257 67 model """ermlp""" +257 67 loss """marginranking""" +257 67 regularizer """no""" +257 67 optimizer """adam""" +257 67 training_loop """owa""" +257 67 negative_sampler """basic""" +257 67 evaluator """rankbased""" +257 68 dataset """kinships""" +257 68 model """ermlp""" +257 68 loss """marginranking""" +257 68 regularizer """no""" +257 68 optimizer """adam""" +257 68 training_loop """owa""" +257 68 negative_sampler """basic""" +257 68 evaluator """rankbased""" +257 69 dataset """kinships""" +257 69 model """ermlp""" +257 69 loss """marginranking""" +257 69 regularizer """no""" +257 69 optimizer """adam""" +257 69 training_loop """owa""" +257 69 negative_sampler """basic""" +257 69 evaluator """rankbased""" +257 70 dataset """kinships""" +257 70 model """ermlp""" +257 70 loss """marginranking""" +257 70 regularizer """no""" +257 70 optimizer """adam""" +257 70 training_loop """owa""" +257 70 negative_sampler """basic""" +257 70 evaluator """rankbased""" +257 71 dataset """kinships""" +257 71 model """ermlp""" +257 71 loss """marginranking""" +257 71 regularizer """no""" +257 71 optimizer """adam""" +257 71 training_loop """owa""" +257 71 negative_sampler """basic""" +257 71 evaluator """rankbased""" +257 72 dataset """kinships""" +257 72 model """ermlp""" +257 72 loss """marginranking""" +257 72 regularizer """no""" +257 72 optimizer """adam""" +257 72 training_loop """owa""" +257 72 negative_sampler """basic""" +257 72 evaluator """rankbased""" +257 73 dataset """kinships""" +257 73 model """ermlp""" +257 73 loss """marginranking""" +257 73 regularizer """no""" +257 73 optimizer """adam""" +257 73 training_loop """owa""" +257 73 negative_sampler """basic""" +257 73 evaluator """rankbased""" +257 74 dataset """kinships""" +257 74 model """ermlp""" +257 74 loss """marginranking""" +257 74 regularizer """no""" +257 74 optimizer """adam""" +257 74 training_loop """owa""" +257 74 negative_sampler """basic""" +257 74 evaluator """rankbased""" +257 75 dataset """kinships""" +257 75 model """ermlp""" +257 75 loss """marginranking""" +257 75 regularizer """no""" +257 75 optimizer """adam""" +257 75 training_loop """owa""" +257 75 negative_sampler """basic""" +257 75 evaluator """rankbased""" +257 76 dataset """kinships""" +257 76 model """ermlp""" +257 76 loss """marginranking""" +257 76 regularizer """no""" +257 76 optimizer """adam""" +257 76 training_loop """owa""" +257 76 negative_sampler """basic""" +257 76 evaluator """rankbased""" +257 77 dataset """kinships""" +257 77 model """ermlp""" +257 77 loss """marginranking""" +257 77 regularizer """no""" +257 77 optimizer """adam""" +257 77 training_loop """owa""" +257 77 negative_sampler """basic""" +257 77 evaluator """rankbased""" +257 78 dataset """kinships""" +257 78 model """ermlp""" +257 78 loss """marginranking""" +257 78 regularizer """no""" +257 78 optimizer """adam""" +257 78 training_loop """owa""" +257 78 negative_sampler """basic""" +257 78 evaluator """rankbased""" +257 79 dataset """kinships""" +257 79 model """ermlp""" +257 79 loss """marginranking""" +257 79 regularizer """no""" +257 79 optimizer """adam""" +257 79 training_loop """owa""" +257 79 negative_sampler """basic""" +257 79 evaluator """rankbased""" +257 80 dataset """kinships""" +257 80 model """ermlp""" +257 80 loss """marginranking""" +257 80 regularizer """no""" +257 80 optimizer """adam""" +257 80 training_loop """owa""" +257 80 negative_sampler """basic""" +257 80 evaluator """rankbased""" +257 81 dataset """kinships""" +257 81 model """ermlp""" +257 81 loss """marginranking""" +257 81 regularizer """no""" +257 81 optimizer """adam""" +257 81 training_loop """owa""" +257 81 negative_sampler """basic""" +257 81 evaluator """rankbased""" +257 82 dataset """kinships""" +257 82 model """ermlp""" +257 82 loss """marginranking""" +257 82 regularizer """no""" +257 82 optimizer """adam""" +257 82 training_loop """owa""" +257 82 negative_sampler """basic""" +257 82 evaluator """rankbased""" +257 83 dataset """kinships""" +257 83 model """ermlp""" +257 83 loss """marginranking""" +257 83 regularizer """no""" +257 83 optimizer """adam""" +257 83 training_loop """owa""" +257 83 negative_sampler """basic""" +257 83 evaluator """rankbased""" +257 84 dataset """kinships""" +257 84 model """ermlp""" +257 84 loss """marginranking""" +257 84 regularizer """no""" +257 84 optimizer """adam""" +257 84 training_loop """owa""" +257 84 negative_sampler """basic""" +257 84 evaluator """rankbased""" +257 85 dataset """kinships""" +257 85 model """ermlp""" +257 85 loss """marginranking""" +257 85 regularizer """no""" +257 85 optimizer """adam""" +257 85 training_loop """owa""" +257 85 negative_sampler """basic""" +257 85 evaluator """rankbased""" +257 86 dataset """kinships""" +257 86 model """ermlp""" +257 86 loss """marginranking""" +257 86 regularizer """no""" +257 86 optimizer """adam""" +257 86 training_loop """owa""" +257 86 negative_sampler """basic""" +257 86 evaluator """rankbased""" +257 87 dataset """kinships""" +257 87 model """ermlp""" +257 87 loss """marginranking""" +257 87 regularizer """no""" +257 87 optimizer """adam""" +257 87 training_loop """owa""" +257 87 negative_sampler """basic""" +257 87 evaluator """rankbased""" +257 88 dataset """kinships""" +257 88 model """ermlp""" +257 88 loss """marginranking""" +257 88 regularizer """no""" +257 88 optimizer """adam""" +257 88 training_loop """owa""" +257 88 negative_sampler """basic""" +257 88 evaluator """rankbased""" +257 89 dataset """kinships""" +257 89 model """ermlp""" +257 89 loss """marginranking""" +257 89 regularizer """no""" +257 89 optimizer """adam""" +257 89 training_loop """owa""" +257 89 negative_sampler """basic""" +257 89 evaluator """rankbased""" +257 90 dataset """kinships""" +257 90 model """ermlp""" +257 90 loss """marginranking""" +257 90 regularizer """no""" +257 90 optimizer """adam""" +257 90 training_loop """owa""" +257 90 negative_sampler """basic""" +257 90 evaluator """rankbased""" +257 91 dataset """kinships""" +257 91 model """ermlp""" +257 91 loss """marginranking""" +257 91 regularizer """no""" +257 91 optimizer """adam""" +257 91 training_loop """owa""" +257 91 negative_sampler """basic""" +257 91 evaluator """rankbased""" +257 92 dataset """kinships""" +257 92 model """ermlp""" +257 92 loss """marginranking""" +257 92 regularizer """no""" +257 92 optimizer """adam""" +257 92 training_loop """owa""" +257 92 negative_sampler """basic""" +257 92 evaluator """rankbased""" +257 93 dataset """kinships""" +257 93 model """ermlp""" +257 93 loss """marginranking""" +257 93 regularizer """no""" +257 93 optimizer """adam""" +257 93 training_loop """owa""" +257 93 negative_sampler """basic""" +257 93 evaluator """rankbased""" +257 94 dataset """kinships""" +257 94 model """ermlp""" +257 94 loss """marginranking""" +257 94 regularizer """no""" +257 94 optimizer """adam""" +257 94 training_loop """owa""" +257 94 negative_sampler """basic""" +257 94 evaluator """rankbased""" +257 95 dataset """kinships""" +257 95 model """ermlp""" +257 95 loss """marginranking""" +257 95 regularizer """no""" +257 95 optimizer """adam""" +257 95 training_loop """owa""" +257 95 negative_sampler """basic""" +257 95 evaluator """rankbased""" +257 96 dataset """kinships""" +257 96 model """ermlp""" +257 96 loss """marginranking""" +257 96 regularizer """no""" +257 96 optimizer """adam""" +257 96 training_loop """owa""" +257 96 negative_sampler """basic""" +257 96 evaluator """rankbased""" +257 97 dataset """kinships""" +257 97 model """ermlp""" +257 97 loss """marginranking""" +257 97 regularizer """no""" +257 97 optimizer """adam""" +257 97 training_loop """owa""" +257 97 negative_sampler """basic""" +257 97 evaluator """rankbased""" +257 98 dataset """kinships""" +257 98 model """ermlp""" +257 98 loss """marginranking""" +257 98 regularizer """no""" +257 98 optimizer """adam""" +257 98 training_loop """owa""" +257 98 negative_sampler """basic""" +257 98 evaluator """rankbased""" +257 99 dataset """kinships""" +257 99 model """ermlp""" +257 99 loss """marginranking""" +257 99 regularizer """no""" +257 99 optimizer """adam""" +257 99 training_loop """owa""" +257 99 negative_sampler """basic""" +257 99 evaluator """rankbased""" +257 100 dataset """kinships""" +257 100 model """ermlp""" +257 100 loss """marginranking""" +257 100 regularizer """no""" +257 100 optimizer """adam""" +257 100 training_loop """owa""" +257 100 negative_sampler """basic""" +257 100 evaluator """rankbased""" +258 1 model.embedding_dim 1.0 +258 1 loss.margin 18.552687417393447 +258 1 loss.adversarial_temperature 0.7114674025961871 +258 1 optimizer.lr 0.0013706922117401547 +258 1 negative_sampler.num_negs_per_pos 66.0 +258 1 training.batch_size 0.0 +258 2 model.embedding_dim 2.0 +258 2 loss.margin 17.010859532086393 +258 2 loss.adversarial_temperature 0.6225116888900328 +258 2 optimizer.lr 0.023778137884705872 +258 2 negative_sampler.num_negs_per_pos 63.0 +258 2 training.batch_size 2.0 +258 3 model.embedding_dim 0.0 +258 3 loss.margin 10.040510938425518 +258 3 loss.adversarial_temperature 0.700570118837198 +258 3 optimizer.lr 0.015740490365147672 +258 3 negative_sampler.num_negs_per_pos 58.0 +258 3 training.batch_size 1.0 +258 4 model.embedding_dim 1.0 +258 4 loss.margin 6.156739065819437 +258 4 loss.adversarial_temperature 0.40343965693645667 +258 4 optimizer.lr 0.011239055908070549 +258 4 negative_sampler.num_negs_per_pos 38.0 +258 4 training.batch_size 2.0 +258 5 model.embedding_dim 2.0 +258 5 loss.margin 4.258696226499547 +258 5 loss.adversarial_temperature 0.35622830840510367 +258 5 optimizer.lr 0.00936217893778908 +258 5 negative_sampler.num_negs_per_pos 48.0 +258 5 training.batch_size 1.0 +258 6 model.embedding_dim 1.0 +258 6 loss.margin 1.8465666250627408 +258 6 loss.adversarial_temperature 0.8295124313807887 +258 6 optimizer.lr 0.022646733194079217 +258 6 negative_sampler.num_negs_per_pos 55.0 +258 6 training.batch_size 0.0 +258 7 model.embedding_dim 1.0 +258 7 loss.margin 5.932523692665534 +258 7 loss.adversarial_temperature 0.6853434209217483 +258 7 optimizer.lr 0.003911066521354467 +258 7 negative_sampler.num_negs_per_pos 92.0 +258 7 training.batch_size 2.0 +258 8 model.embedding_dim 1.0 +258 8 loss.margin 4.076636825097641 +258 8 loss.adversarial_temperature 0.7363586834391345 +258 8 optimizer.lr 0.004643372280680089 +258 8 negative_sampler.num_negs_per_pos 23.0 +258 8 training.batch_size 1.0 +258 9 model.embedding_dim 1.0 +258 9 loss.margin 28.34015474488459 +258 9 loss.adversarial_temperature 0.5363882865250995 +258 9 optimizer.lr 0.07426873590951277 +258 9 negative_sampler.num_negs_per_pos 8.0 +258 9 training.batch_size 1.0 +258 10 model.embedding_dim 2.0 +258 10 loss.margin 16.740768166923687 +258 10 loss.adversarial_temperature 0.28653246399403576 +258 10 optimizer.lr 0.048136834240937544 +258 10 negative_sampler.num_negs_per_pos 67.0 +258 10 training.batch_size 2.0 +258 11 model.embedding_dim 0.0 +258 11 loss.margin 9.954877962940365 +258 11 loss.adversarial_temperature 0.2094579335113318 +258 11 optimizer.lr 0.033303436752589795 +258 11 negative_sampler.num_negs_per_pos 93.0 +258 11 training.batch_size 1.0 +258 12 model.embedding_dim 1.0 +258 12 loss.margin 18.82827855126093 +258 12 loss.adversarial_temperature 0.6755183247985077 +258 12 optimizer.lr 0.07974776613438707 +258 12 negative_sampler.num_negs_per_pos 21.0 +258 12 training.batch_size 2.0 +258 13 model.embedding_dim 2.0 +258 13 loss.margin 21.491794000583237 +258 13 loss.adversarial_temperature 0.7634697363150272 +258 13 optimizer.lr 0.06210637928988393 +258 13 negative_sampler.num_negs_per_pos 88.0 +258 13 training.batch_size 2.0 +258 14 model.embedding_dim 0.0 +258 14 loss.margin 21.13461395635375 +258 14 loss.adversarial_temperature 0.9947939698663554 +258 14 optimizer.lr 0.022976586792970606 +258 14 negative_sampler.num_negs_per_pos 3.0 +258 14 training.batch_size 2.0 +258 15 model.embedding_dim 2.0 +258 15 loss.margin 14.889896539590328 +258 15 loss.adversarial_temperature 0.5106354990413818 +258 15 optimizer.lr 0.021557265946050813 +258 15 negative_sampler.num_negs_per_pos 32.0 +258 15 training.batch_size 0.0 +258 16 model.embedding_dim 1.0 +258 16 loss.margin 21.626271162914065 +258 16 loss.adversarial_temperature 0.7443139690510403 +258 16 optimizer.lr 0.005434814534964308 +258 16 negative_sampler.num_negs_per_pos 60.0 +258 16 training.batch_size 0.0 +258 17 model.embedding_dim 1.0 +258 17 loss.margin 5.871072036122425 +258 17 loss.adversarial_temperature 0.2986398274318874 +258 17 optimizer.lr 0.013793712656151907 +258 17 negative_sampler.num_negs_per_pos 27.0 +258 17 training.batch_size 2.0 +258 18 model.embedding_dim 2.0 +258 18 loss.margin 21.822979591362095 +258 18 loss.adversarial_temperature 0.24427851127346042 +258 18 optimizer.lr 0.001815221998293531 +258 18 negative_sampler.num_negs_per_pos 6.0 +258 18 training.batch_size 0.0 +258 19 model.embedding_dim 2.0 +258 19 loss.margin 2.310870246035875 +258 19 loss.adversarial_temperature 0.5970041435692969 +258 19 optimizer.lr 0.015451763983079722 +258 19 negative_sampler.num_negs_per_pos 83.0 +258 19 training.batch_size 0.0 +258 20 model.embedding_dim 2.0 +258 20 loss.margin 15.286437958522521 +258 20 loss.adversarial_temperature 0.10847956526815808 +258 20 optimizer.lr 0.09713399587422532 +258 20 negative_sampler.num_negs_per_pos 38.0 +258 20 training.batch_size 0.0 +258 21 model.embedding_dim 2.0 +258 21 loss.margin 29.329328188423474 +258 21 loss.adversarial_temperature 0.4378498676033913 +258 21 optimizer.lr 0.004638789756559986 +258 21 negative_sampler.num_negs_per_pos 82.0 +258 21 training.batch_size 0.0 +258 22 model.embedding_dim 2.0 +258 22 loss.margin 12.772020159557927 +258 22 loss.adversarial_temperature 0.37492800281202376 +258 22 optimizer.lr 0.0035590061605151064 +258 22 negative_sampler.num_negs_per_pos 40.0 +258 22 training.batch_size 0.0 +258 23 model.embedding_dim 1.0 +258 23 loss.margin 16.763778658166945 +258 23 loss.adversarial_temperature 0.7838894478909065 +258 23 optimizer.lr 0.0038013949766764884 +258 23 negative_sampler.num_negs_per_pos 61.0 +258 23 training.batch_size 2.0 +258 24 model.embedding_dim 0.0 +258 24 loss.margin 12.671048681091124 +258 24 loss.adversarial_temperature 0.1882195757337169 +258 24 optimizer.lr 0.0383654439154434 +258 24 negative_sampler.num_negs_per_pos 12.0 +258 24 training.batch_size 1.0 +258 25 model.embedding_dim 1.0 +258 25 loss.margin 19.269262681001152 +258 25 loss.adversarial_temperature 0.8041714386568367 +258 25 optimizer.lr 0.08601676751821082 +258 25 negative_sampler.num_negs_per_pos 32.0 +258 25 training.batch_size 1.0 +258 26 model.embedding_dim 1.0 +258 26 loss.margin 14.522277081908559 +258 26 loss.adversarial_temperature 0.44705885315241023 +258 26 optimizer.lr 0.04839969411818081 +258 26 negative_sampler.num_negs_per_pos 3.0 +258 26 training.batch_size 0.0 +258 27 model.embedding_dim 2.0 +258 27 loss.margin 14.741156145881646 +258 27 loss.adversarial_temperature 0.7720502765737729 +258 27 optimizer.lr 0.0010328304385561818 +258 27 negative_sampler.num_negs_per_pos 56.0 +258 27 training.batch_size 0.0 +258 28 model.embedding_dim 0.0 +258 28 loss.margin 11.433315956513429 +258 28 loss.adversarial_temperature 0.7693997853024706 +258 28 optimizer.lr 0.0012742576324207128 +258 28 negative_sampler.num_negs_per_pos 11.0 +258 28 training.batch_size 2.0 +258 29 model.embedding_dim 2.0 +258 29 loss.margin 14.717573904821638 +258 29 loss.adversarial_temperature 0.21726547672712282 +258 29 optimizer.lr 0.03333153812537295 +258 29 negative_sampler.num_negs_per_pos 24.0 +258 29 training.batch_size 2.0 +258 30 model.embedding_dim 1.0 +258 30 loss.margin 7.6688129623938615 +258 30 loss.adversarial_temperature 0.1310094228558897 +258 30 optimizer.lr 0.040133941964998886 +258 30 negative_sampler.num_negs_per_pos 98.0 +258 30 training.batch_size 2.0 +258 31 model.embedding_dim 0.0 +258 31 loss.margin 15.70583278321185 +258 31 loss.adversarial_temperature 0.9754453572866587 +258 31 optimizer.lr 0.004162927017568243 +258 31 negative_sampler.num_negs_per_pos 67.0 +258 31 training.batch_size 0.0 +258 32 model.embedding_dim 2.0 +258 32 loss.margin 4.332458910154044 +258 32 loss.adversarial_temperature 0.20350995269737193 +258 32 optimizer.lr 0.003789592617665528 +258 32 negative_sampler.num_negs_per_pos 9.0 +258 32 training.batch_size 2.0 +258 33 model.embedding_dim 0.0 +258 33 loss.margin 29.33200268003758 +258 33 loss.adversarial_temperature 0.920779052779007 +258 33 optimizer.lr 0.0818026385125973 +258 33 negative_sampler.num_negs_per_pos 10.0 +258 33 training.batch_size 0.0 +258 34 model.embedding_dim 0.0 +258 34 loss.margin 27.30646046712017 +258 34 loss.adversarial_temperature 0.4161974640726639 +258 34 optimizer.lr 0.005180006801627209 +258 34 negative_sampler.num_negs_per_pos 47.0 +258 34 training.batch_size 0.0 +258 35 model.embedding_dim 0.0 +258 35 loss.margin 9.874492428701073 +258 35 loss.adversarial_temperature 0.22533703293308804 +258 35 optimizer.lr 0.0030748463308525784 +258 35 negative_sampler.num_negs_per_pos 69.0 +258 35 training.batch_size 2.0 +258 36 model.embedding_dim 0.0 +258 36 loss.margin 5.995949818546038 +258 36 loss.adversarial_temperature 0.3319572040925505 +258 36 optimizer.lr 0.0405844215924856 +258 36 negative_sampler.num_negs_per_pos 16.0 +258 36 training.batch_size 2.0 +258 37 model.embedding_dim 2.0 +258 37 loss.margin 21.833642942410805 +258 37 loss.adversarial_temperature 0.34315192570917596 +258 37 optimizer.lr 0.05157648253800233 +258 37 negative_sampler.num_negs_per_pos 66.0 +258 37 training.batch_size 1.0 +258 38 model.embedding_dim 1.0 +258 38 loss.margin 7.2986985761946634 +258 38 loss.adversarial_temperature 0.11457436149157244 +258 38 optimizer.lr 0.04836783996414116 +258 38 negative_sampler.num_negs_per_pos 14.0 +258 38 training.batch_size 0.0 +258 39 model.embedding_dim 1.0 +258 39 loss.margin 3.0333710235488067 +258 39 loss.adversarial_temperature 0.6078961781406168 +258 39 optimizer.lr 0.007812377521405571 +258 39 negative_sampler.num_negs_per_pos 98.0 +258 39 training.batch_size 1.0 +258 40 model.embedding_dim 0.0 +258 40 loss.margin 21.511238121961256 +258 40 loss.adversarial_temperature 0.5922677834829293 +258 40 optimizer.lr 0.006496067415772286 +258 40 negative_sampler.num_negs_per_pos 73.0 +258 40 training.batch_size 0.0 +258 41 model.embedding_dim 2.0 +258 41 loss.margin 27.80005378680624 +258 41 loss.adversarial_temperature 0.7933492486064089 +258 41 optimizer.lr 0.06388830705180697 +258 41 negative_sampler.num_negs_per_pos 44.0 +258 41 training.batch_size 2.0 +258 42 model.embedding_dim 1.0 +258 42 loss.margin 7.708930248857808 +258 42 loss.adversarial_temperature 0.10115275836942651 +258 42 optimizer.lr 0.015938019277522246 +258 42 negative_sampler.num_negs_per_pos 3.0 +258 42 training.batch_size 1.0 +258 43 model.embedding_dim 0.0 +258 43 loss.margin 18.41522488162037 +258 43 loss.adversarial_temperature 0.6989913950370173 +258 43 optimizer.lr 0.0020508092509257856 +258 43 negative_sampler.num_negs_per_pos 78.0 +258 43 training.batch_size 1.0 +258 44 model.embedding_dim 2.0 +258 44 loss.margin 22.217423560592223 +258 44 loss.adversarial_temperature 0.8024101214437894 +258 44 optimizer.lr 0.001351588628513536 +258 44 negative_sampler.num_negs_per_pos 72.0 +258 44 training.batch_size 0.0 +258 45 model.embedding_dim 0.0 +258 45 loss.margin 24.704413190269683 +258 45 loss.adversarial_temperature 0.46298129120312836 +258 45 optimizer.lr 0.07660992385349487 +258 45 negative_sampler.num_negs_per_pos 69.0 +258 45 training.batch_size 2.0 +258 46 model.embedding_dim 1.0 +258 46 loss.margin 28.617586187663118 +258 46 loss.adversarial_temperature 0.48979075369086267 +258 46 optimizer.lr 0.02620472518608665 +258 46 negative_sampler.num_negs_per_pos 93.0 +258 46 training.batch_size 2.0 +258 47 model.embedding_dim 2.0 +258 47 loss.margin 18.839732229911952 +258 47 loss.adversarial_temperature 0.8936425247884804 +258 47 optimizer.lr 0.007549934900151721 +258 47 negative_sampler.num_negs_per_pos 25.0 +258 47 training.batch_size 1.0 +258 48 model.embedding_dim 1.0 +258 48 loss.margin 12.927742641516659 +258 48 loss.adversarial_temperature 0.28593109153552254 +258 48 optimizer.lr 0.019907755506499553 +258 48 negative_sampler.num_negs_per_pos 71.0 +258 48 training.batch_size 0.0 +258 49 model.embedding_dim 2.0 +258 49 loss.margin 16.69954662641535 +258 49 loss.adversarial_temperature 0.2766846083604362 +258 49 optimizer.lr 0.0011833216649402394 +258 49 negative_sampler.num_negs_per_pos 5.0 +258 49 training.batch_size 1.0 +258 50 model.embedding_dim 2.0 +258 50 loss.margin 1.3312402388001092 +258 50 loss.adversarial_temperature 0.4193842858748221 +258 50 optimizer.lr 0.0031909004789891075 +258 50 negative_sampler.num_negs_per_pos 40.0 +258 50 training.batch_size 0.0 +258 51 model.embedding_dim 1.0 +258 51 loss.margin 17.152266726134958 +258 51 loss.adversarial_temperature 0.1762117568218272 +258 51 optimizer.lr 0.0025268295365236102 +258 51 negative_sampler.num_negs_per_pos 75.0 +258 51 training.batch_size 2.0 +258 52 model.embedding_dim 1.0 +258 52 loss.margin 17.233326599379055 +258 52 loss.adversarial_temperature 0.36400244969570683 +258 52 optimizer.lr 0.0261281037120972 +258 52 negative_sampler.num_negs_per_pos 25.0 +258 52 training.batch_size 0.0 +258 53 model.embedding_dim 0.0 +258 53 loss.margin 25.031159961449248 +258 53 loss.adversarial_temperature 0.30571484790364645 +258 53 optimizer.lr 0.006274479684933195 +258 53 negative_sampler.num_negs_per_pos 50.0 +258 53 training.batch_size 1.0 +258 54 model.embedding_dim 0.0 +258 54 loss.margin 8.532417624198287 +258 54 loss.adversarial_temperature 0.8603754965990076 +258 54 optimizer.lr 0.004018777695682138 +258 54 negative_sampler.num_negs_per_pos 54.0 +258 54 training.batch_size 1.0 +258 55 model.embedding_dim 1.0 +258 55 loss.margin 8.370197086965568 +258 55 loss.adversarial_temperature 0.4607133006118067 +258 55 optimizer.lr 0.08630256379914947 +258 55 negative_sampler.num_negs_per_pos 92.0 +258 55 training.batch_size 0.0 +258 56 model.embedding_dim 2.0 +258 56 loss.margin 19.937159220322588 +258 56 loss.adversarial_temperature 0.8962966273269456 +258 56 optimizer.lr 0.0031124637142604145 +258 56 negative_sampler.num_negs_per_pos 16.0 +258 56 training.batch_size 0.0 +258 57 model.embedding_dim 0.0 +258 57 loss.margin 22.455191759938312 +258 57 loss.adversarial_temperature 0.8565394590069331 +258 57 optimizer.lr 0.07650611774191414 +258 57 negative_sampler.num_negs_per_pos 37.0 +258 57 training.batch_size 1.0 +258 58 model.embedding_dim 2.0 +258 58 loss.margin 16.717696265495505 +258 58 loss.adversarial_temperature 0.18811387828650364 +258 58 optimizer.lr 0.02251034485696199 +258 58 negative_sampler.num_negs_per_pos 75.0 +258 58 training.batch_size 1.0 +258 59 model.embedding_dim 2.0 +258 59 loss.margin 15.06501264817301 +258 59 loss.adversarial_temperature 0.36300292965568126 +258 59 optimizer.lr 0.00667084993706958 +258 59 negative_sampler.num_negs_per_pos 49.0 +258 59 training.batch_size 0.0 +258 60 model.embedding_dim 2.0 +258 60 loss.margin 21.925290289007926 +258 60 loss.adversarial_temperature 0.3515913327742044 +258 60 optimizer.lr 0.061383677717421856 +258 60 negative_sampler.num_negs_per_pos 92.0 +258 60 training.batch_size 2.0 +258 61 model.embedding_dim 0.0 +258 61 loss.margin 8.843629044052697 +258 61 loss.adversarial_temperature 0.804876718434127 +258 61 optimizer.lr 0.0038155066914005824 +258 61 negative_sampler.num_negs_per_pos 97.0 +258 61 training.batch_size 2.0 +258 62 model.embedding_dim 0.0 +258 62 loss.margin 12.313776312792909 +258 62 loss.adversarial_temperature 0.10273577965403807 +258 62 optimizer.lr 0.007186613306380767 +258 62 negative_sampler.num_negs_per_pos 49.0 +258 62 training.batch_size 2.0 +258 63 model.embedding_dim 2.0 +258 63 loss.margin 10.720319164992537 +258 63 loss.adversarial_temperature 0.5499720052477532 +258 63 optimizer.lr 0.007003370871218818 +258 63 negative_sampler.num_negs_per_pos 40.0 +258 63 training.batch_size 2.0 +258 64 model.embedding_dim 1.0 +258 64 loss.margin 24.069967224721328 +258 64 loss.adversarial_temperature 0.2697530412318882 +258 64 optimizer.lr 0.040292427616919514 +258 64 negative_sampler.num_negs_per_pos 86.0 +258 64 training.batch_size 0.0 +258 65 model.embedding_dim 1.0 +258 65 loss.margin 2.048615935231289 +258 65 loss.adversarial_temperature 0.2315993178160691 +258 65 optimizer.lr 0.05286532702617779 +258 65 negative_sampler.num_negs_per_pos 63.0 +258 65 training.batch_size 0.0 +258 66 model.embedding_dim 0.0 +258 66 loss.margin 8.655961196087762 +258 66 loss.adversarial_temperature 0.753488555415605 +258 66 optimizer.lr 0.007151567433396999 +258 66 negative_sampler.num_negs_per_pos 21.0 +258 66 training.batch_size 0.0 +258 67 model.embedding_dim 0.0 +258 67 loss.margin 27.393164946713302 +258 67 loss.adversarial_temperature 0.848167966745749 +258 67 optimizer.lr 0.024391463670483227 +258 67 negative_sampler.num_negs_per_pos 75.0 +258 67 training.batch_size 2.0 +258 68 model.embedding_dim 2.0 +258 68 loss.margin 14.982401950824928 +258 68 loss.adversarial_temperature 0.6027624081803507 +258 68 optimizer.lr 0.025384175081189432 +258 68 negative_sampler.num_negs_per_pos 70.0 +258 68 training.batch_size 0.0 +258 69 model.embedding_dim 1.0 +258 69 loss.margin 2.5447727189475193 +258 69 loss.adversarial_temperature 0.5227919182971744 +258 69 optimizer.lr 0.0072392156527123185 +258 69 negative_sampler.num_negs_per_pos 89.0 +258 69 training.batch_size 2.0 +258 70 model.embedding_dim 1.0 +258 70 loss.margin 3.162174979778855 +258 70 loss.adversarial_temperature 0.6133617751413034 +258 70 optimizer.lr 0.036527766439430134 +258 70 negative_sampler.num_negs_per_pos 50.0 +258 70 training.batch_size 2.0 +258 71 model.embedding_dim 2.0 +258 71 loss.margin 11.385171074482207 +258 71 loss.adversarial_temperature 0.4472793695857611 +258 71 optimizer.lr 0.014888451073831988 +258 71 negative_sampler.num_negs_per_pos 6.0 +258 71 training.batch_size 0.0 +258 72 model.embedding_dim 0.0 +258 72 loss.margin 5.6471497095255 +258 72 loss.adversarial_temperature 0.6938857149899074 +258 72 optimizer.lr 0.05626538583207022 +258 72 negative_sampler.num_negs_per_pos 86.0 +258 72 training.batch_size 2.0 +258 73 model.embedding_dim 2.0 +258 73 loss.margin 4.629102212907922 +258 73 loss.adversarial_temperature 0.9414775777813107 +258 73 optimizer.lr 0.023643531615741606 +258 73 negative_sampler.num_negs_per_pos 86.0 +258 73 training.batch_size 0.0 +258 74 model.embedding_dim 1.0 +258 74 loss.margin 29.44603776836496 +258 74 loss.adversarial_temperature 0.9060480115871278 +258 74 optimizer.lr 0.00533278873470694 +258 74 negative_sampler.num_negs_per_pos 74.0 +258 74 training.batch_size 0.0 +258 75 model.embedding_dim 2.0 +258 75 loss.margin 13.38017545185152 +258 75 loss.adversarial_temperature 0.25179895934781027 +258 75 optimizer.lr 0.040363245221093 +258 75 negative_sampler.num_negs_per_pos 77.0 +258 75 training.batch_size 2.0 +258 76 model.embedding_dim 2.0 +258 76 loss.margin 1.9394156232619797 +258 76 loss.adversarial_temperature 0.6555671891504171 +258 76 optimizer.lr 0.05921411838775774 +258 76 negative_sampler.num_negs_per_pos 40.0 +258 76 training.batch_size 0.0 +258 77 model.embedding_dim 2.0 +258 77 loss.margin 22.262987107109133 +258 77 loss.adversarial_temperature 0.360275726567572 +258 77 optimizer.lr 0.06924020829859559 +258 77 negative_sampler.num_negs_per_pos 34.0 +258 77 training.batch_size 1.0 +258 78 model.embedding_dim 0.0 +258 78 loss.margin 12.527956484950495 +258 78 loss.adversarial_temperature 0.6602131845635298 +258 78 optimizer.lr 0.0390193854914687 +258 78 negative_sampler.num_negs_per_pos 79.0 +258 78 training.batch_size 2.0 +258 79 model.embedding_dim 2.0 +258 79 loss.margin 7.451431869036138 +258 79 loss.adversarial_temperature 0.8164058927447649 +258 79 optimizer.lr 0.010144828731601808 +258 79 negative_sampler.num_negs_per_pos 74.0 +258 79 training.batch_size 2.0 +258 80 model.embedding_dim 0.0 +258 80 loss.margin 4.825631073162349 +258 80 loss.adversarial_temperature 0.2813477427379981 +258 80 optimizer.lr 0.04447458724750501 +258 80 negative_sampler.num_negs_per_pos 93.0 +258 80 training.batch_size 1.0 +258 81 model.embedding_dim 0.0 +258 81 loss.margin 6.886739321591006 +258 81 loss.adversarial_temperature 0.9605109393057903 +258 81 optimizer.lr 0.011151651365598028 +258 81 negative_sampler.num_negs_per_pos 54.0 +258 81 training.batch_size 0.0 +258 82 model.embedding_dim 0.0 +258 82 loss.margin 11.948594677863257 +258 82 loss.adversarial_temperature 0.6187619466896669 +258 82 optimizer.lr 0.04229294466778767 +258 82 negative_sampler.num_negs_per_pos 53.0 +258 82 training.batch_size 0.0 +258 83 model.embedding_dim 1.0 +258 83 loss.margin 19.582936941008832 +258 83 loss.adversarial_temperature 0.5341401226356975 +258 83 optimizer.lr 0.007816175130352581 +258 83 negative_sampler.num_negs_per_pos 97.0 +258 83 training.batch_size 1.0 +258 84 model.embedding_dim 0.0 +258 84 loss.margin 16.535049764721535 +258 84 loss.adversarial_temperature 0.10347647921343633 +258 84 optimizer.lr 0.056973989687696465 +258 84 negative_sampler.num_negs_per_pos 91.0 +258 84 training.batch_size 1.0 +258 85 model.embedding_dim 1.0 +258 85 loss.margin 6.606091039115286 +258 85 loss.adversarial_temperature 0.4216708534933844 +258 85 optimizer.lr 0.0017732936843267485 +258 85 negative_sampler.num_negs_per_pos 24.0 +258 85 training.batch_size 1.0 +258 86 model.embedding_dim 0.0 +258 86 loss.margin 3.0631315296365353 +258 86 loss.adversarial_temperature 0.8769746690173825 +258 86 optimizer.lr 0.007995675628689943 +258 86 negative_sampler.num_negs_per_pos 66.0 +258 86 training.batch_size 1.0 +258 87 model.embedding_dim 0.0 +258 87 loss.margin 17.40987840049088 +258 87 loss.adversarial_temperature 0.10851165866859068 +258 87 optimizer.lr 0.026124535892894696 +258 87 negative_sampler.num_negs_per_pos 95.0 +258 87 training.batch_size 1.0 +258 88 model.embedding_dim 0.0 +258 88 loss.margin 19.948293266617966 +258 88 loss.adversarial_temperature 0.5722137423262035 +258 88 optimizer.lr 0.030521396430231565 +258 88 negative_sampler.num_negs_per_pos 41.0 +258 88 training.batch_size 1.0 +258 89 model.embedding_dim 1.0 +258 89 loss.margin 23.058472845380756 +258 89 loss.adversarial_temperature 0.7243450483671344 +258 89 optimizer.lr 0.03426317836037402 +258 89 negative_sampler.num_negs_per_pos 79.0 +258 89 training.batch_size 1.0 +258 90 model.embedding_dim 0.0 +258 90 loss.margin 2.2497608903389485 +258 90 loss.adversarial_temperature 0.4962799687018715 +258 90 optimizer.lr 0.008112338105201473 +258 90 negative_sampler.num_negs_per_pos 98.0 +258 90 training.batch_size 2.0 +258 91 model.embedding_dim 0.0 +258 91 loss.margin 27.331186118841096 +258 91 loss.adversarial_temperature 0.9552255775235375 +258 91 optimizer.lr 0.001959366990725529 +258 91 negative_sampler.num_negs_per_pos 67.0 +258 91 training.batch_size 0.0 +258 92 model.embedding_dim 1.0 +258 92 loss.margin 26.94317943855806 +258 92 loss.adversarial_temperature 0.15349745248836705 +258 92 optimizer.lr 0.06130020915494817 +258 92 negative_sampler.num_negs_per_pos 10.0 +258 92 training.batch_size 2.0 +258 93 model.embedding_dim 1.0 +258 93 loss.margin 18.65781892055904 +258 93 loss.adversarial_temperature 0.9449611287343406 +258 93 optimizer.lr 0.050151818176103355 +258 93 negative_sampler.num_negs_per_pos 55.0 +258 93 training.batch_size 0.0 +258 94 model.embedding_dim 2.0 +258 94 loss.margin 8.49657844863467 +258 94 loss.adversarial_temperature 0.11949957583826093 +258 94 optimizer.lr 0.0014479311760053962 +258 94 negative_sampler.num_negs_per_pos 3.0 +258 94 training.batch_size 2.0 +258 95 model.embedding_dim 0.0 +258 95 loss.margin 15.114674523237897 +258 95 loss.adversarial_temperature 0.3028322812293145 +258 95 optimizer.lr 0.02485931221850545 +258 95 negative_sampler.num_negs_per_pos 56.0 +258 95 training.batch_size 0.0 +258 96 model.embedding_dim 1.0 +258 96 loss.margin 10.042623655867352 +258 96 loss.adversarial_temperature 0.513269414057474 +258 96 optimizer.lr 0.015118569246592892 +258 96 negative_sampler.num_negs_per_pos 73.0 +258 96 training.batch_size 2.0 +258 97 model.embedding_dim 2.0 +258 97 loss.margin 7.684709151050068 +258 97 loss.adversarial_temperature 0.8196496809171769 +258 97 optimizer.lr 0.009570370607285758 +258 97 negative_sampler.num_negs_per_pos 52.0 +258 97 training.batch_size 2.0 +258 98 model.embedding_dim 1.0 +258 98 loss.margin 4.123228712426522 +258 98 loss.adversarial_temperature 0.3719447168343809 +258 98 optimizer.lr 0.023086402576385914 +258 98 negative_sampler.num_negs_per_pos 40.0 +258 98 training.batch_size 2.0 +258 99 model.embedding_dim 1.0 +258 99 loss.margin 22.156059092906574 +258 99 loss.adversarial_temperature 0.4354841192691262 +258 99 optimizer.lr 0.02145268485155974 +258 99 negative_sampler.num_negs_per_pos 76.0 +258 99 training.batch_size 1.0 +258 100 model.embedding_dim 1.0 +258 100 loss.margin 19.223201412011132 +258 100 loss.adversarial_temperature 0.8375466369718185 +258 100 optimizer.lr 0.016541606950392834 +258 100 negative_sampler.num_negs_per_pos 12.0 +258 100 training.batch_size 1.0 +258 1 dataset """kinships""" +258 1 model """ermlp""" +258 1 loss """nssa""" +258 1 regularizer """no""" +258 1 optimizer """adam""" +258 1 training_loop """owa""" +258 1 negative_sampler """basic""" +258 1 evaluator """rankbased""" +258 2 dataset """kinships""" +258 2 model """ermlp""" +258 2 loss """nssa""" +258 2 regularizer """no""" +258 2 optimizer """adam""" +258 2 training_loop """owa""" +258 2 negative_sampler """basic""" +258 2 evaluator """rankbased""" +258 3 dataset """kinships""" +258 3 model """ermlp""" +258 3 loss """nssa""" +258 3 regularizer """no""" +258 3 optimizer """adam""" +258 3 training_loop """owa""" +258 3 negative_sampler """basic""" +258 3 evaluator """rankbased""" +258 4 dataset """kinships""" +258 4 model """ermlp""" +258 4 loss """nssa""" +258 4 regularizer """no""" +258 4 optimizer """adam""" +258 4 training_loop """owa""" +258 4 negative_sampler """basic""" +258 4 evaluator """rankbased""" +258 5 dataset """kinships""" +258 5 model """ermlp""" +258 5 loss """nssa""" +258 5 regularizer """no""" +258 5 optimizer """adam""" +258 5 training_loop """owa""" +258 5 negative_sampler """basic""" +258 5 evaluator """rankbased""" +258 6 dataset """kinships""" +258 6 model """ermlp""" +258 6 loss """nssa""" +258 6 regularizer """no""" +258 6 optimizer """adam""" +258 6 training_loop """owa""" +258 6 negative_sampler """basic""" +258 6 evaluator """rankbased""" +258 7 dataset """kinships""" +258 7 model """ermlp""" +258 7 loss """nssa""" +258 7 regularizer """no""" +258 7 optimizer """adam""" +258 7 training_loop """owa""" +258 7 negative_sampler """basic""" +258 7 evaluator """rankbased""" +258 8 dataset """kinships""" +258 8 model """ermlp""" +258 8 loss """nssa""" +258 8 regularizer """no""" +258 8 optimizer """adam""" +258 8 training_loop """owa""" +258 8 negative_sampler """basic""" +258 8 evaluator """rankbased""" +258 9 dataset """kinships""" +258 9 model """ermlp""" +258 9 loss """nssa""" +258 9 regularizer """no""" +258 9 optimizer """adam""" +258 9 training_loop """owa""" +258 9 negative_sampler """basic""" +258 9 evaluator """rankbased""" +258 10 dataset """kinships""" +258 10 model """ermlp""" +258 10 loss """nssa""" +258 10 regularizer """no""" +258 10 optimizer """adam""" +258 10 training_loop """owa""" +258 10 negative_sampler """basic""" +258 10 evaluator """rankbased""" +258 11 dataset """kinships""" +258 11 model """ermlp""" +258 11 loss """nssa""" +258 11 regularizer """no""" +258 11 optimizer """adam""" +258 11 training_loop """owa""" +258 11 negative_sampler """basic""" +258 11 evaluator """rankbased""" +258 12 dataset """kinships""" +258 12 model """ermlp""" +258 12 loss """nssa""" +258 12 regularizer """no""" +258 12 optimizer """adam""" +258 12 training_loop """owa""" +258 12 negative_sampler """basic""" +258 12 evaluator """rankbased""" +258 13 dataset """kinships""" +258 13 model """ermlp""" +258 13 loss """nssa""" +258 13 regularizer """no""" +258 13 optimizer """adam""" +258 13 training_loop """owa""" +258 13 negative_sampler """basic""" +258 13 evaluator """rankbased""" +258 14 dataset """kinships""" +258 14 model """ermlp""" +258 14 loss """nssa""" +258 14 regularizer """no""" +258 14 optimizer """adam""" +258 14 training_loop """owa""" +258 14 negative_sampler """basic""" +258 14 evaluator """rankbased""" +258 15 dataset """kinships""" +258 15 model """ermlp""" +258 15 loss """nssa""" +258 15 regularizer """no""" +258 15 optimizer """adam""" +258 15 training_loop """owa""" +258 15 negative_sampler """basic""" +258 15 evaluator """rankbased""" +258 16 dataset """kinships""" +258 16 model """ermlp""" +258 16 loss """nssa""" +258 16 regularizer """no""" +258 16 optimizer """adam""" +258 16 training_loop """owa""" +258 16 negative_sampler """basic""" +258 16 evaluator """rankbased""" +258 17 dataset """kinships""" +258 17 model """ermlp""" +258 17 loss """nssa""" +258 17 regularizer """no""" +258 17 optimizer """adam""" +258 17 training_loop """owa""" +258 17 negative_sampler """basic""" +258 17 evaluator """rankbased""" +258 18 dataset """kinships""" +258 18 model """ermlp""" +258 18 loss """nssa""" +258 18 regularizer """no""" +258 18 optimizer """adam""" +258 18 training_loop """owa""" +258 18 negative_sampler """basic""" +258 18 evaluator """rankbased""" +258 19 dataset """kinships""" +258 19 model """ermlp""" +258 19 loss """nssa""" +258 19 regularizer """no""" +258 19 optimizer """adam""" +258 19 training_loop """owa""" +258 19 negative_sampler """basic""" +258 19 evaluator """rankbased""" +258 20 dataset """kinships""" +258 20 model """ermlp""" +258 20 loss """nssa""" +258 20 regularizer """no""" +258 20 optimizer """adam""" +258 20 training_loop """owa""" +258 20 negative_sampler """basic""" +258 20 evaluator """rankbased""" +258 21 dataset """kinships""" +258 21 model """ermlp""" +258 21 loss """nssa""" +258 21 regularizer """no""" +258 21 optimizer """adam""" +258 21 training_loop """owa""" +258 21 negative_sampler """basic""" +258 21 evaluator """rankbased""" +258 22 dataset """kinships""" +258 22 model """ermlp""" +258 22 loss """nssa""" +258 22 regularizer """no""" +258 22 optimizer """adam""" +258 22 training_loop """owa""" +258 22 negative_sampler """basic""" +258 22 evaluator """rankbased""" +258 23 dataset """kinships""" +258 23 model """ermlp""" +258 23 loss """nssa""" +258 23 regularizer """no""" +258 23 optimizer """adam""" +258 23 training_loop """owa""" +258 23 negative_sampler """basic""" +258 23 evaluator """rankbased""" +258 24 dataset """kinships""" +258 24 model """ermlp""" +258 24 loss """nssa""" +258 24 regularizer """no""" +258 24 optimizer """adam""" +258 24 training_loop """owa""" +258 24 negative_sampler """basic""" +258 24 evaluator """rankbased""" +258 25 dataset """kinships""" +258 25 model """ermlp""" +258 25 loss """nssa""" +258 25 regularizer """no""" +258 25 optimizer """adam""" +258 25 training_loop """owa""" +258 25 negative_sampler """basic""" +258 25 evaluator """rankbased""" +258 26 dataset """kinships""" +258 26 model """ermlp""" +258 26 loss """nssa""" +258 26 regularizer """no""" +258 26 optimizer """adam""" +258 26 training_loop """owa""" +258 26 negative_sampler """basic""" +258 26 evaluator """rankbased""" +258 27 dataset """kinships""" +258 27 model """ermlp""" +258 27 loss """nssa""" +258 27 regularizer """no""" +258 27 optimizer """adam""" +258 27 training_loop """owa""" +258 27 negative_sampler """basic""" +258 27 evaluator """rankbased""" +258 28 dataset """kinships""" +258 28 model """ermlp""" +258 28 loss """nssa""" +258 28 regularizer """no""" +258 28 optimizer """adam""" +258 28 training_loop """owa""" +258 28 negative_sampler """basic""" +258 28 evaluator """rankbased""" +258 29 dataset """kinships""" +258 29 model """ermlp""" +258 29 loss """nssa""" +258 29 regularizer """no""" +258 29 optimizer """adam""" +258 29 training_loop """owa""" +258 29 negative_sampler """basic""" +258 29 evaluator """rankbased""" +258 30 dataset """kinships""" +258 30 model """ermlp""" +258 30 loss """nssa""" +258 30 regularizer """no""" +258 30 optimizer """adam""" +258 30 training_loop """owa""" +258 30 negative_sampler """basic""" +258 30 evaluator """rankbased""" +258 31 dataset """kinships""" +258 31 model """ermlp""" +258 31 loss """nssa""" +258 31 regularizer """no""" +258 31 optimizer """adam""" +258 31 training_loop """owa""" +258 31 negative_sampler """basic""" +258 31 evaluator """rankbased""" +258 32 dataset """kinships""" +258 32 model """ermlp""" +258 32 loss """nssa""" +258 32 regularizer """no""" +258 32 optimizer """adam""" +258 32 training_loop """owa""" +258 32 negative_sampler """basic""" +258 32 evaluator """rankbased""" +258 33 dataset """kinships""" +258 33 model """ermlp""" +258 33 loss """nssa""" +258 33 regularizer """no""" +258 33 optimizer """adam""" +258 33 training_loop """owa""" +258 33 negative_sampler """basic""" +258 33 evaluator """rankbased""" +258 34 dataset """kinships""" +258 34 model """ermlp""" +258 34 loss """nssa""" +258 34 regularizer """no""" +258 34 optimizer """adam""" +258 34 training_loop """owa""" +258 34 negative_sampler """basic""" +258 34 evaluator """rankbased""" +258 35 dataset """kinships""" +258 35 model """ermlp""" +258 35 loss """nssa""" +258 35 regularizer """no""" +258 35 optimizer """adam""" +258 35 training_loop """owa""" +258 35 negative_sampler """basic""" +258 35 evaluator """rankbased""" +258 36 dataset """kinships""" +258 36 model """ermlp""" +258 36 loss """nssa""" +258 36 regularizer """no""" +258 36 optimizer """adam""" +258 36 training_loop """owa""" +258 36 negative_sampler """basic""" +258 36 evaluator """rankbased""" +258 37 dataset """kinships""" +258 37 model """ermlp""" +258 37 loss """nssa""" +258 37 regularizer """no""" +258 37 optimizer """adam""" +258 37 training_loop """owa""" +258 37 negative_sampler """basic""" +258 37 evaluator """rankbased""" +258 38 dataset """kinships""" +258 38 model """ermlp""" +258 38 loss """nssa""" +258 38 regularizer """no""" +258 38 optimizer """adam""" +258 38 training_loop """owa""" +258 38 negative_sampler """basic""" +258 38 evaluator """rankbased""" +258 39 dataset """kinships""" +258 39 model """ermlp""" +258 39 loss """nssa""" +258 39 regularizer """no""" +258 39 optimizer """adam""" +258 39 training_loop """owa""" +258 39 negative_sampler """basic""" +258 39 evaluator """rankbased""" +258 40 dataset """kinships""" +258 40 model """ermlp""" +258 40 loss """nssa""" +258 40 regularizer """no""" +258 40 optimizer """adam""" +258 40 training_loop """owa""" +258 40 negative_sampler """basic""" +258 40 evaluator """rankbased""" +258 41 dataset """kinships""" +258 41 model """ermlp""" +258 41 loss """nssa""" +258 41 regularizer """no""" +258 41 optimizer """adam""" +258 41 training_loop """owa""" +258 41 negative_sampler """basic""" +258 41 evaluator """rankbased""" +258 42 dataset """kinships""" +258 42 model """ermlp""" +258 42 loss """nssa""" +258 42 regularizer """no""" +258 42 optimizer """adam""" +258 42 training_loop """owa""" +258 42 negative_sampler """basic""" +258 42 evaluator """rankbased""" +258 43 dataset """kinships""" +258 43 model """ermlp""" +258 43 loss """nssa""" +258 43 regularizer """no""" +258 43 optimizer """adam""" +258 43 training_loop """owa""" +258 43 negative_sampler """basic""" +258 43 evaluator """rankbased""" +258 44 dataset """kinships""" +258 44 model """ermlp""" +258 44 loss """nssa""" +258 44 regularizer """no""" +258 44 optimizer """adam""" +258 44 training_loop """owa""" +258 44 negative_sampler """basic""" +258 44 evaluator """rankbased""" +258 45 dataset """kinships""" +258 45 model """ermlp""" +258 45 loss """nssa""" +258 45 regularizer """no""" +258 45 optimizer """adam""" +258 45 training_loop """owa""" +258 45 negative_sampler """basic""" +258 45 evaluator """rankbased""" +258 46 dataset """kinships""" +258 46 model """ermlp""" +258 46 loss """nssa""" +258 46 regularizer """no""" +258 46 optimizer """adam""" +258 46 training_loop """owa""" +258 46 negative_sampler """basic""" +258 46 evaluator """rankbased""" +258 47 dataset """kinships""" +258 47 model """ermlp""" +258 47 loss """nssa""" +258 47 regularizer """no""" +258 47 optimizer """adam""" +258 47 training_loop """owa""" +258 47 negative_sampler """basic""" +258 47 evaluator """rankbased""" +258 48 dataset """kinships""" +258 48 model """ermlp""" +258 48 loss """nssa""" +258 48 regularizer """no""" +258 48 optimizer """adam""" +258 48 training_loop """owa""" +258 48 negative_sampler """basic""" +258 48 evaluator """rankbased""" +258 49 dataset """kinships""" +258 49 model """ermlp""" +258 49 loss """nssa""" +258 49 regularizer """no""" +258 49 optimizer """adam""" +258 49 training_loop """owa""" +258 49 negative_sampler """basic""" +258 49 evaluator """rankbased""" +258 50 dataset """kinships""" +258 50 model """ermlp""" +258 50 loss """nssa""" +258 50 regularizer """no""" +258 50 optimizer """adam""" +258 50 training_loop """owa""" +258 50 negative_sampler """basic""" +258 50 evaluator """rankbased""" +258 51 dataset """kinships""" +258 51 model """ermlp""" +258 51 loss """nssa""" +258 51 regularizer """no""" +258 51 optimizer """adam""" +258 51 training_loop """owa""" +258 51 negative_sampler """basic""" +258 51 evaluator """rankbased""" +258 52 dataset """kinships""" +258 52 model """ermlp""" +258 52 loss """nssa""" +258 52 regularizer """no""" +258 52 optimizer """adam""" +258 52 training_loop """owa""" +258 52 negative_sampler """basic""" +258 52 evaluator """rankbased""" +258 53 dataset """kinships""" +258 53 model """ermlp""" +258 53 loss """nssa""" +258 53 regularizer """no""" +258 53 optimizer """adam""" +258 53 training_loop """owa""" +258 53 negative_sampler """basic""" +258 53 evaluator """rankbased""" +258 54 dataset """kinships""" +258 54 model """ermlp""" +258 54 loss """nssa""" +258 54 regularizer """no""" +258 54 optimizer """adam""" +258 54 training_loop """owa""" +258 54 negative_sampler """basic""" +258 54 evaluator """rankbased""" +258 55 dataset """kinships""" +258 55 model """ermlp""" +258 55 loss """nssa""" +258 55 regularizer """no""" +258 55 optimizer """adam""" +258 55 training_loop """owa""" +258 55 negative_sampler """basic""" +258 55 evaluator """rankbased""" +258 56 dataset """kinships""" +258 56 model """ermlp""" +258 56 loss """nssa""" +258 56 regularizer """no""" +258 56 optimizer """adam""" +258 56 training_loop """owa""" +258 56 negative_sampler """basic""" +258 56 evaluator """rankbased""" +258 57 dataset """kinships""" +258 57 model """ermlp""" +258 57 loss """nssa""" +258 57 regularizer """no""" +258 57 optimizer """adam""" +258 57 training_loop """owa""" +258 57 negative_sampler """basic""" +258 57 evaluator """rankbased""" +258 58 dataset """kinships""" +258 58 model """ermlp""" +258 58 loss """nssa""" +258 58 regularizer """no""" +258 58 optimizer """adam""" +258 58 training_loop """owa""" +258 58 negative_sampler """basic""" +258 58 evaluator """rankbased""" +258 59 dataset """kinships""" +258 59 model """ermlp""" +258 59 loss """nssa""" +258 59 regularizer """no""" +258 59 optimizer """adam""" +258 59 training_loop """owa""" +258 59 negative_sampler """basic""" +258 59 evaluator """rankbased""" +258 60 dataset """kinships""" +258 60 model """ermlp""" +258 60 loss """nssa""" +258 60 regularizer """no""" +258 60 optimizer """adam""" +258 60 training_loop """owa""" +258 60 negative_sampler """basic""" +258 60 evaluator """rankbased""" +258 61 dataset """kinships""" +258 61 model """ermlp""" +258 61 loss """nssa""" +258 61 regularizer """no""" +258 61 optimizer """adam""" +258 61 training_loop """owa""" +258 61 negative_sampler """basic""" +258 61 evaluator """rankbased""" +258 62 dataset """kinships""" +258 62 model """ermlp""" +258 62 loss """nssa""" +258 62 regularizer """no""" +258 62 optimizer """adam""" +258 62 training_loop """owa""" +258 62 negative_sampler """basic""" +258 62 evaluator """rankbased""" +258 63 dataset """kinships""" +258 63 model """ermlp""" +258 63 loss """nssa""" +258 63 regularizer """no""" +258 63 optimizer """adam""" +258 63 training_loop """owa""" +258 63 negative_sampler """basic""" +258 63 evaluator """rankbased""" +258 64 dataset """kinships""" +258 64 model """ermlp""" +258 64 loss """nssa""" +258 64 regularizer """no""" +258 64 optimizer """adam""" +258 64 training_loop """owa""" +258 64 negative_sampler """basic""" +258 64 evaluator """rankbased""" +258 65 dataset """kinships""" +258 65 model """ermlp""" +258 65 loss """nssa""" +258 65 regularizer """no""" +258 65 optimizer """adam""" +258 65 training_loop """owa""" +258 65 negative_sampler """basic""" +258 65 evaluator """rankbased""" +258 66 dataset """kinships""" +258 66 model """ermlp""" +258 66 loss """nssa""" +258 66 regularizer """no""" +258 66 optimizer """adam""" +258 66 training_loop """owa""" +258 66 negative_sampler """basic""" +258 66 evaluator """rankbased""" +258 67 dataset """kinships""" +258 67 model """ermlp""" +258 67 loss """nssa""" +258 67 regularizer """no""" +258 67 optimizer """adam""" +258 67 training_loop """owa""" +258 67 negative_sampler """basic""" +258 67 evaluator """rankbased""" +258 68 dataset """kinships""" +258 68 model """ermlp""" +258 68 loss """nssa""" +258 68 regularizer """no""" +258 68 optimizer """adam""" +258 68 training_loop """owa""" +258 68 negative_sampler """basic""" +258 68 evaluator """rankbased""" +258 69 dataset """kinships""" +258 69 model """ermlp""" +258 69 loss """nssa""" +258 69 regularizer """no""" +258 69 optimizer """adam""" +258 69 training_loop """owa""" +258 69 negative_sampler """basic""" +258 69 evaluator """rankbased""" +258 70 dataset """kinships""" +258 70 model """ermlp""" +258 70 loss """nssa""" +258 70 regularizer """no""" +258 70 optimizer """adam""" +258 70 training_loop """owa""" +258 70 negative_sampler """basic""" +258 70 evaluator """rankbased""" +258 71 dataset """kinships""" +258 71 model """ermlp""" +258 71 loss """nssa""" +258 71 regularizer """no""" +258 71 optimizer """adam""" +258 71 training_loop """owa""" +258 71 negative_sampler """basic""" +258 71 evaluator """rankbased""" +258 72 dataset """kinships""" +258 72 model """ermlp""" +258 72 loss """nssa""" +258 72 regularizer """no""" +258 72 optimizer """adam""" +258 72 training_loop """owa""" +258 72 negative_sampler """basic""" +258 72 evaluator """rankbased""" +258 73 dataset """kinships""" +258 73 model """ermlp""" +258 73 loss """nssa""" +258 73 regularizer """no""" +258 73 optimizer """adam""" +258 73 training_loop """owa""" +258 73 negative_sampler """basic""" +258 73 evaluator """rankbased""" +258 74 dataset """kinships""" +258 74 model """ermlp""" +258 74 loss """nssa""" +258 74 regularizer """no""" +258 74 optimizer """adam""" +258 74 training_loop """owa""" +258 74 negative_sampler """basic""" +258 74 evaluator """rankbased""" +258 75 dataset """kinships""" +258 75 model """ermlp""" +258 75 loss """nssa""" +258 75 regularizer """no""" +258 75 optimizer """adam""" +258 75 training_loop """owa""" +258 75 negative_sampler """basic""" +258 75 evaluator """rankbased""" +258 76 dataset """kinships""" +258 76 model """ermlp""" +258 76 loss """nssa""" +258 76 regularizer """no""" +258 76 optimizer """adam""" +258 76 training_loop """owa""" +258 76 negative_sampler """basic""" +258 76 evaluator """rankbased""" +258 77 dataset """kinships""" +258 77 model """ermlp""" +258 77 loss """nssa""" +258 77 regularizer """no""" +258 77 optimizer """adam""" +258 77 training_loop """owa""" +258 77 negative_sampler """basic""" +258 77 evaluator """rankbased""" +258 78 dataset """kinships""" +258 78 model """ermlp""" +258 78 loss """nssa""" +258 78 regularizer """no""" +258 78 optimizer """adam""" +258 78 training_loop """owa""" +258 78 negative_sampler """basic""" +258 78 evaluator """rankbased""" +258 79 dataset """kinships""" +258 79 model """ermlp""" +258 79 loss """nssa""" +258 79 regularizer """no""" +258 79 optimizer """adam""" +258 79 training_loop """owa""" +258 79 negative_sampler """basic""" +258 79 evaluator """rankbased""" +258 80 dataset """kinships""" +258 80 model """ermlp""" +258 80 loss """nssa""" +258 80 regularizer """no""" +258 80 optimizer """adam""" +258 80 training_loop """owa""" +258 80 negative_sampler """basic""" +258 80 evaluator """rankbased""" +258 81 dataset """kinships""" +258 81 model """ermlp""" +258 81 loss """nssa""" +258 81 regularizer """no""" +258 81 optimizer """adam""" +258 81 training_loop """owa""" +258 81 negative_sampler """basic""" +258 81 evaluator """rankbased""" +258 82 dataset """kinships""" +258 82 model """ermlp""" +258 82 loss """nssa""" +258 82 regularizer """no""" +258 82 optimizer """adam""" +258 82 training_loop """owa""" +258 82 negative_sampler """basic""" +258 82 evaluator """rankbased""" +258 83 dataset """kinships""" +258 83 model """ermlp""" +258 83 loss """nssa""" +258 83 regularizer """no""" +258 83 optimizer """adam""" +258 83 training_loop """owa""" +258 83 negative_sampler """basic""" +258 83 evaluator """rankbased""" +258 84 dataset """kinships""" +258 84 model """ermlp""" +258 84 loss """nssa""" +258 84 regularizer """no""" +258 84 optimizer """adam""" +258 84 training_loop """owa""" +258 84 negative_sampler """basic""" +258 84 evaluator """rankbased""" +258 85 dataset """kinships""" +258 85 model """ermlp""" +258 85 loss """nssa""" +258 85 regularizer """no""" +258 85 optimizer """adam""" +258 85 training_loop """owa""" +258 85 negative_sampler """basic""" +258 85 evaluator """rankbased""" +258 86 dataset """kinships""" +258 86 model """ermlp""" +258 86 loss """nssa""" +258 86 regularizer """no""" +258 86 optimizer """adam""" +258 86 training_loop """owa""" +258 86 negative_sampler """basic""" +258 86 evaluator """rankbased""" +258 87 dataset """kinships""" +258 87 model """ermlp""" +258 87 loss """nssa""" +258 87 regularizer """no""" +258 87 optimizer """adam""" +258 87 training_loop """owa""" +258 87 negative_sampler """basic""" +258 87 evaluator """rankbased""" +258 88 dataset """kinships""" +258 88 model """ermlp""" +258 88 loss """nssa""" +258 88 regularizer """no""" +258 88 optimizer """adam""" +258 88 training_loop """owa""" +258 88 negative_sampler """basic""" +258 88 evaluator """rankbased""" +258 89 dataset """kinships""" +258 89 model """ermlp""" +258 89 loss """nssa""" +258 89 regularizer """no""" +258 89 optimizer """adam""" +258 89 training_loop """owa""" +258 89 negative_sampler """basic""" +258 89 evaluator """rankbased""" +258 90 dataset """kinships""" +258 90 model """ermlp""" +258 90 loss """nssa""" +258 90 regularizer """no""" +258 90 optimizer """adam""" +258 90 training_loop """owa""" +258 90 negative_sampler """basic""" +258 90 evaluator """rankbased""" +258 91 dataset """kinships""" +258 91 model """ermlp""" +258 91 loss """nssa""" +258 91 regularizer """no""" +258 91 optimizer """adam""" +258 91 training_loop """owa""" +258 91 negative_sampler """basic""" +258 91 evaluator """rankbased""" +258 92 dataset """kinships""" +258 92 model """ermlp""" +258 92 loss """nssa""" +258 92 regularizer """no""" +258 92 optimizer """adam""" +258 92 training_loop """owa""" +258 92 negative_sampler """basic""" +258 92 evaluator """rankbased""" +258 93 dataset """kinships""" +258 93 model """ermlp""" +258 93 loss """nssa""" +258 93 regularizer """no""" +258 93 optimizer """adam""" +258 93 training_loop """owa""" +258 93 negative_sampler """basic""" +258 93 evaluator """rankbased""" +258 94 dataset """kinships""" +258 94 model """ermlp""" +258 94 loss """nssa""" +258 94 regularizer """no""" +258 94 optimizer """adam""" +258 94 training_loop """owa""" +258 94 negative_sampler """basic""" +258 94 evaluator """rankbased""" +258 95 dataset """kinships""" +258 95 model """ermlp""" +258 95 loss """nssa""" +258 95 regularizer """no""" +258 95 optimizer """adam""" +258 95 training_loop """owa""" +258 95 negative_sampler """basic""" +258 95 evaluator """rankbased""" +258 96 dataset """kinships""" +258 96 model """ermlp""" +258 96 loss """nssa""" +258 96 regularizer """no""" +258 96 optimizer """adam""" +258 96 training_loop """owa""" +258 96 negative_sampler """basic""" +258 96 evaluator """rankbased""" +258 97 dataset """kinships""" +258 97 model """ermlp""" +258 97 loss """nssa""" +258 97 regularizer """no""" +258 97 optimizer """adam""" +258 97 training_loop """owa""" +258 97 negative_sampler """basic""" +258 97 evaluator """rankbased""" +258 98 dataset """kinships""" +258 98 model """ermlp""" +258 98 loss """nssa""" +258 98 regularizer """no""" +258 98 optimizer """adam""" +258 98 training_loop """owa""" +258 98 negative_sampler """basic""" +258 98 evaluator """rankbased""" +258 99 dataset """kinships""" +258 99 model """ermlp""" +258 99 loss """nssa""" +258 99 regularizer """no""" +258 99 optimizer """adam""" +258 99 training_loop """owa""" +258 99 negative_sampler """basic""" +258 99 evaluator """rankbased""" +258 100 dataset """kinships""" +258 100 model """ermlp""" +258 100 loss """nssa""" +258 100 regularizer """no""" +258 100 optimizer """adam""" +258 100 training_loop """owa""" +258 100 negative_sampler """basic""" +258 100 evaluator """rankbased""" +259 1 model.embedding_dim 0.0 +259 1 loss.margin 22.562063860516012 +259 1 loss.adversarial_temperature 0.5514477229304799 +259 1 optimizer.lr 0.034455504658338125 +259 1 negative_sampler.num_negs_per_pos 5.0 +259 1 training.batch_size 2.0 +259 2 model.embedding_dim 2.0 +259 2 loss.margin 24.965342525311463 +259 2 loss.adversarial_temperature 0.8166320252645315 +259 2 optimizer.lr 0.034559120580206326 +259 2 negative_sampler.num_negs_per_pos 87.0 +259 2 training.batch_size 0.0 +259 3 model.embedding_dim 1.0 +259 3 loss.margin 28.161094739098683 +259 3 loss.adversarial_temperature 0.4738292073862146 +259 3 optimizer.lr 0.010321960650192228 +259 3 negative_sampler.num_negs_per_pos 36.0 +259 3 training.batch_size 2.0 +259 4 model.embedding_dim 0.0 +259 4 loss.margin 23.602766942591177 +259 4 loss.adversarial_temperature 0.36041443951214125 +259 4 optimizer.lr 0.007607531626303039 +259 4 negative_sampler.num_negs_per_pos 79.0 +259 4 training.batch_size 2.0 +259 5 model.embedding_dim 0.0 +259 5 loss.margin 20.113568193154617 +259 5 loss.adversarial_temperature 0.9204956022495429 +259 5 optimizer.lr 0.0012973233525041543 +259 5 negative_sampler.num_negs_per_pos 39.0 +259 5 training.batch_size 2.0 +259 6 model.embedding_dim 1.0 +259 6 loss.margin 6.963258497502317 +259 6 loss.adversarial_temperature 0.36440030805631063 +259 6 optimizer.lr 0.08066266445900336 +259 6 negative_sampler.num_negs_per_pos 72.0 +259 6 training.batch_size 1.0 +259 7 model.embedding_dim 0.0 +259 7 loss.margin 21.88900619874081 +259 7 loss.adversarial_temperature 0.12545888124504126 +259 7 optimizer.lr 0.05542951286574928 +259 7 negative_sampler.num_negs_per_pos 87.0 +259 7 training.batch_size 1.0 +259 8 model.embedding_dim 0.0 +259 8 loss.margin 11.970840296190943 +259 8 loss.adversarial_temperature 0.2929049476624734 +259 8 optimizer.lr 0.0800839251634255 +259 8 negative_sampler.num_negs_per_pos 48.0 +259 8 training.batch_size 1.0 +259 9 model.embedding_dim 0.0 +259 9 loss.margin 13.145895969694507 +259 9 loss.adversarial_temperature 0.968593942603209 +259 9 optimizer.lr 0.001962443125365504 +259 9 negative_sampler.num_negs_per_pos 61.0 +259 9 training.batch_size 0.0 +259 10 model.embedding_dim 0.0 +259 10 loss.margin 19.527096381342755 +259 10 loss.adversarial_temperature 0.8412320080400243 +259 10 optimizer.lr 0.0010505385871184315 +259 10 negative_sampler.num_negs_per_pos 3.0 +259 10 training.batch_size 1.0 +259 11 model.embedding_dim 0.0 +259 11 loss.margin 11.944384973280789 +259 11 loss.adversarial_temperature 0.6309312922161646 +259 11 optimizer.lr 0.008476499902541897 +259 11 negative_sampler.num_negs_per_pos 23.0 +259 11 training.batch_size 1.0 +259 12 model.embedding_dim 2.0 +259 12 loss.margin 22.07638591715777 +259 12 loss.adversarial_temperature 0.39982622048731753 +259 12 optimizer.lr 0.0033327369131014354 +259 12 negative_sampler.num_negs_per_pos 63.0 +259 12 training.batch_size 1.0 +259 13 model.embedding_dim 2.0 +259 13 loss.margin 5.140885663712675 +259 13 loss.adversarial_temperature 0.7517469758455742 +259 13 optimizer.lr 0.011053612435772048 +259 13 negative_sampler.num_negs_per_pos 42.0 +259 13 training.batch_size 2.0 +259 14 model.embedding_dim 2.0 +259 14 loss.margin 26.43921085418209 +259 14 loss.adversarial_temperature 0.7355712674726014 +259 14 optimizer.lr 0.060676171867577004 +259 14 negative_sampler.num_negs_per_pos 59.0 +259 14 training.batch_size 2.0 +259 15 model.embedding_dim 2.0 +259 15 loss.margin 10.67904871966849 +259 15 loss.adversarial_temperature 0.694706844541897 +259 15 optimizer.lr 0.004463426927278342 +259 15 negative_sampler.num_negs_per_pos 68.0 +259 15 training.batch_size 2.0 +259 16 model.embedding_dim 0.0 +259 16 loss.margin 26.18271755175181 +259 16 loss.adversarial_temperature 0.37069135066888914 +259 16 optimizer.lr 0.06338398889579125 +259 16 negative_sampler.num_negs_per_pos 43.0 +259 16 training.batch_size 1.0 +259 17 model.embedding_dim 2.0 +259 17 loss.margin 13.518491475635086 +259 17 loss.adversarial_temperature 0.5478149246154499 +259 17 optimizer.lr 0.061703408562415574 +259 17 negative_sampler.num_negs_per_pos 21.0 +259 17 training.batch_size 1.0 +259 18 model.embedding_dim 0.0 +259 18 loss.margin 7.458435930448148 +259 18 loss.adversarial_temperature 0.7977275498308654 +259 18 optimizer.lr 0.047661108572357676 +259 18 negative_sampler.num_negs_per_pos 97.0 +259 18 training.batch_size 2.0 +259 19 model.embedding_dim 2.0 +259 19 loss.margin 13.747985913792201 +259 19 loss.adversarial_temperature 0.463976422937856 +259 19 optimizer.lr 0.014321647977780598 +259 19 negative_sampler.num_negs_per_pos 68.0 +259 19 training.batch_size 1.0 +259 20 model.embedding_dim 0.0 +259 20 loss.margin 20.999415244571992 +259 20 loss.adversarial_temperature 0.7945125112791832 +259 20 optimizer.lr 0.020008241784942756 +259 20 negative_sampler.num_negs_per_pos 40.0 +259 20 training.batch_size 2.0 +259 21 model.embedding_dim 0.0 +259 21 loss.margin 21.890639608677205 +259 21 loss.adversarial_temperature 0.36819882692541583 +259 21 optimizer.lr 0.013651285778275735 +259 21 negative_sampler.num_negs_per_pos 94.0 +259 21 training.batch_size 1.0 +259 22 model.embedding_dim 2.0 +259 22 loss.margin 26.763646710012633 +259 22 loss.adversarial_temperature 0.7884472383126485 +259 22 optimizer.lr 0.08432929158145297 +259 22 negative_sampler.num_negs_per_pos 55.0 +259 22 training.batch_size 2.0 +259 23 model.embedding_dim 1.0 +259 23 loss.margin 4.907467364503441 +259 23 loss.adversarial_temperature 0.2745555266834445 +259 23 optimizer.lr 0.08145154680400392 +259 23 negative_sampler.num_negs_per_pos 31.0 +259 23 training.batch_size 2.0 +259 24 model.embedding_dim 0.0 +259 24 loss.margin 19.037976324932224 +259 24 loss.adversarial_temperature 0.980775088143408 +259 24 optimizer.lr 0.0010235761921073027 +259 24 negative_sampler.num_negs_per_pos 93.0 +259 24 training.batch_size 0.0 +259 25 model.embedding_dim 1.0 +259 25 loss.margin 6.5392035669989 +259 25 loss.adversarial_temperature 0.6153013030789167 +259 25 optimizer.lr 0.0354055306626479 +259 25 negative_sampler.num_negs_per_pos 10.0 +259 25 training.batch_size 1.0 +259 26 model.embedding_dim 2.0 +259 26 loss.margin 19.56565455021179 +259 26 loss.adversarial_temperature 0.8629910010339193 +259 26 optimizer.lr 0.09238115513270323 +259 26 negative_sampler.num_negs_per_pos 29.0 +259 26 training.batch_size 1.0 +259 27 model.embedding_dim 1.0 +259 27 loss.margin 24.252369801737604 +259 27 loss.adversarial_temperature 0.3341527960026593 +259 27 optimizer.lr 0.0036796029447190076 +259 27 negative_sampler.num_negs_per_pos 85.0 +259 27 training.batch_size 0.0 +259 28 model.embedding_dim 0.0 +259 28 loss.margin 20.598894595809817 +259 28 loss.adversarial_temperature 0.3009887151763536 +259 28 optimizer.lr 0.009889233824264172 +259 28 negative_sampler.num_negs_per_pos 87.0 +259 28 training.batch_size 1.0 +259 29 model.embedding_dim 1.0 +259 29 loss.margin 22.571512602035913 +259 29 loss.adversarial_temperature 0.5944291477735437 +259 29 optimizer.lr 0.05460888979332051 +259 29 negative_sampler.num_negs_per_pos 81.0 +259 29 training.batch_size 2.0 +259 30 model.embedding_dim 1.0 +259 30 loss.margin 22.65872728793664 +259 30 loss.adversarial_temperature 0.2927809005874534 +259 30 optimizer.lr 0.004023816543485575 +259 30 negative_sampler.num_negs_per_pos 11.0 +259 30 training.batch_size 1.0 +259 31 model.embedding_dim 1.0 +259 31 loss.margin 26.112777163738905 +259 31 loss.adversarial_temperature 0.8247655748135246 +259 31 optimizer.lr 0.0825858033344382 +259 31 negative_sampler.num_negs_per_pos 82.0 +259 31 training.batch_size 1.0 +259 32 model.embedding_dim 0.0 +259 32 loss.margin 29.629824918895263 +259 32 loss.adversarial_temperature 0.4358004595963376 +259 32 optimizer.lr 0.010358096811501108 +259 32 negative_sampler.num_negs_per_pos 58.0 +259 32 training.batch_size 2.0 +259 33 model.embedding_dim 0.0 +259 33 loss.margin 24.006178977966005 +259 33 loss.adversarial_temperature 0.9024667272414922 +259 33 optimizer.lr 0.0012161489624252107 +259 33 negative_sampler.num_negs_per_pos 77.0 +259 33 training.batch_size 2.0 +259 34 model.embedding_dim 2.0 +259 34 loss.margin 9.32482066576326 +259 34 loss.adversarial_temperature 0.2913110529082692 +259 34 optimizer.lr 0.009968294651801386 +259 34 negative_sampler.num_negs_per_pos 55.0 +259 34 training.batch_size 0.0 +259 35 model.embedding_dim 1.0 +259 35 loss.margin 1.3530906096088957 +259 35 loss.adversarial_temperature 0.644375891946531 +259 35 optimizer.lr 0.0021197975697738935 +259 35 negative_sampler.num_negs_per_pos 52.0 +259 35 training.batch_size 2.0 +259 36 model.embedding_dim 0.0 +259 36 loss.margin 20.647153842396854 +259 36 loss.adversarial_temperature 0.8268521447219591 +259 36 optimizer.lr 0.07199006097386483 +259 36 negative_sampler.num_negs_per_pos 82.0 +259 36 training.batch_size 2.0 +259 37 model.embedding_dim 0.0 +259 37 loss.margin 7.3699605538314215 +259 37 loss.adversarial_temperature 0.9128381913002381 +259 37 optimizer.lr 0.0036332448099663825 +259 37 negative_sampler.num_negs_per_pos 23.0 +259 37 training.batch_size 2.0 +259 38 model.embedding_dim 1.0 +259 38 loss.margin 29.91777816796684 +259 38 loss.adversarial_temperature 0.12329093888967291 +259 38 optimizer.lr 0.010604375228410005 +259 38 negative_sampler.num_negs_per_pos 86.0 +259 38 training.batch_size 0.0 +259 39 model.embedding_dim 1.0 +259 39 loss.margin 28.431595154458382 +259 39 loss.adversarial_temperature 0.8064440013586192 +259 39 optimizer.lr 0.0024283474066695 +259 39 negative_sampler.num_negs_per_pos 12.0 +259 39 training.batch_size 1.0 +259 40 model.embedding_dim 2.0 +259 40 loss.margin 7.118464167725784 +259 40 loss.adversarial_temperature 0.4775503523957203 +259 40 optimizer.lr 0.05633555221955186 +259 40 negative_sampler.num_negs_per_pos 92.0 +259 40 training.batch_size 1.0 +259 41 model.embedding_dim 2.0 +259 41 loss.margin 22.048161066989373 +259 41 loss.adversarial_temperature 0.655388317577987 +259 41 optimizer.lr 0.004197889803154288 +259 41 negative_sampler.num_negs_per_pos 63.0 +259 41 training.batch_size 1.0 +259 42 model.embedding_dim 0.0 +259 42 loss.margin 21.850426290348445 +259 42 loss.adversarial_temperature 0.7817632437079566 +259 42 optimizer.lr 0.013869833273609128 +259 42 negative_sampler.num_negs_per_pos 74.0 +259 42 training.batch_size 0.0 +259 43 model.embedding_dim 1.0 +259 43 loss.margin 13.800538263811251 +259 43 loss.adversarial_temperature 0.7458020649862594 +259 43 optimizer.lr 0.028922612865234634 +259 43 negative_sampler.num_negs_per_pos 56.0 +259 43 training.batch_size 1.0 +259 44 model.embedding_dim 0.0 +259 44 loss.margin 15.900204523932553 +259 44 loss.adversarial_temperature 0.5723774249157182 +259 44 optimizer.lr 0.0018708991539248567 +259 44 negative_sampler.num_negs_per_pos 59.0 +259 44 training.batch_size 0.0 +259 45 model.embedding_dim 1.0 +259 45 loss.margin 3.7020686551009083 +259 45 loss.adversarial_temperature 0.10071097666559536 +259 45 optimizer.lr 0.030549803021280193 +259 45 negative_sampler.num_negs_per_pos 47.0 +259 45 training.batch_size 0.0 +259 46 model.embedding_dim 1.0 +259 46 loss.margin 17.474442821142222 +259 46 loss.adversarial_temperature 0.5070028291190677 +259 46 optimizer.lr 0.00710848243981304 +259 46 negative_sampler.num_negs_per_pos 10.0 +259 46 training.batch_size 1.0 +259 47 model.embedding_dim 2.0 +259 47 loss.margin 12.811711056906734 +259 47 loss.adversarial_temperature 0.7050760181982715 +259 47 optimizer.lr 0.003450656214784065 +259 47 negative_sampler.num_negs_per_pos 44.0 +259 47 training.batch_size 2.0 +259 48 model.embedding_dim 2.0 +259 48 loss.margin 22.738250002130084 +259 48 loss.adversarial_temperature 0.40076665103304876 +259 48 optimizer.lr 0.043344327818985645 +259 48 negative_sampler.num_negs_per_pos 74.0 +259 48 training.batch_size 0.0 +259 49 model.embedding_dim 2.0 +259 49 loss.margin 16.358790437994358 +259 49 loss.adversarial_temperature 0.2948876923040278 +259 49 optimizer.lr 0.07236341520835088 +259 49 negative_sampler.num_negs_per_pos 32.0 +259 49 training.batch_size 1.0 +259 50 model.embedding_dim 1.0 +259 50 loss.margin 25.43148561343839 +259 50 loss.adversarial_temperature 0.6926207579921586 +259 50 optimizer.lr 0.021168343791475416 +259 50 negative_sampler.num_negs_per_pos 4.0 +259 50 training.batch_size 2.0 +259 51 model.embedding_dim 2.0 +259 51 loss.margin 19.194635505385683 +259 51 loss.adversarial_temperature 0.5382939247510001 +259 51 optimizer.lr 0.04261651437614825 +259 51 negative_sampler.num_negs_per_pos 61.0 +259 51 training.batch_size 0.0 +259 52 model.embedding_dim 0.0 +259 52 loss.margin 5.382011249923084 +259 52 loss.adversarial_temperature 0.441429358570386 +259 52 optimizer.lr 0.058327389086203225 +259 52 negative_sampler.num_negs_per_pos 1.0 +259 52 training.batch_size 2.0 +259 53 model.embedding_dim 1.0 +259 53 loss.margin 19.98631137837899 +259 53 loss.adversarial_temperature 0.5912869331277495 +259 53 optimizer.lr 0.0015443086097828992 +259 53 negative_sampler.num_negs_per_pos 71.0 +259 53 training.batch_size 0.0 +259 54 model.embedding_dim 1.0 +259 54 loss.margin 11.628659866157598 +259 54 loss.adversarial_temperature 0.9680517862585618 +259 54 optimizer.lr 0.0022265034649512935 +259 54 negative_sampler.num_negs_per_pos 10.0 +259 54 training.batch_size 0.0 +259 55 model.embedding_dim 2.0 +259 55 loss.margin 19.396575943794467 +259 55 loss.adversarial_temperature 0.5210479354510413 +259 55 optimizer.lr 0.02943634835697261 +259 55 negative_sampler.num_negs_per_pos 92.0 +259 55 training.batch_size 0.0 +259 56 model.embedding_dim 2.0 +259 56 loss.margin 27.137865847601947 +259 56 loss.adversarial_temperature 0.11807776037091017 +259 56 optimizer.lr 0.003190095538398515 +259 56 negative_sampler.num_negs_per_pos 65.0 +259 56 training.batch_size 1.0 +259 57 model.embedding_dim 0.0 +259 57 loss.margin 12.56473636778183 +259 57 loss.adversarial_temperature 0.12934341931481427 +259 57 optimizer.lr 0.01737627015937315 +259 57 negative_sampler.num_negs_per_pos 47.0 +259 57 training.batch_size 1.0 +259 58 model.embedding_dim 2.0 +259 58 loss.margin 6.555493480127828 +259 58 loss.adversarial_temperature 0.6617052509685374 +259 58 optimizer.lr 0.0036120216145765854 +259 58 negative_sampler.num_negs_per_pos 96.0 +259 58 training.batch_size 1.0 +259 59 model.embedding_dim 0.0 +259 59 loss.margin 5.4372568425939924 +259 59 loss.adversarial_temperature 0.6742624916637049 +259 59 optimizer.lr 0.013700448551331032 +259 59 negative_sampler.num_negs_per_pos 98.0 +259 59 training.batch_size 0.0 +259 60 model.embedding_dim 1.0 +259 60 loss.margin 2.287791158371478 +259 60 loss.adversarial_temperature 0.913403494792378 +259 60 optimizer.lr 0.010400865123065999 +259 60 negative_sampler.num_negs_per_pos 77.0 +259 60 training.batch_size 1.0 +259 61 model.embedding_dim 0.0 +259 61 loss.margin 18.63882285588124 +259 61 loss.adversarial_temperature 0.9699043079263255 +259 61 optimizer.lr 0.01190079254119473 +259 61 negative_sampler.num_negs_per_pos 10.0 +259 61 training.batch_size 1.0 +259 62 model.embedding_dim 0.0 +259 62 loss.margin 19.165701800447813 +259 62 loss.adversarial_temperature 0.9282654529284721 +259 62 optimizer.lr 0.01950562613951369 +259 62 negative_sampler.num_negs_per_pos 97.0 +259 62 training.batch_size 0.0 +259 63 model.embedding_dim 0.0 +259 63 loss.margin 28.660505193819425 +259 63 loss.adversarial_temperature 0.4701959034907517 +259 63 optimizer.lr 0.0463071517197528 +259 63 negative_sampler.num_negs_per_pos 74.0 +259 63 training.batch_size 2.0 +259 64 model.embedding_dim 1.0 +259 64 loss.margin 6.652692293150882 +259 64 loss.adversarial_temperature 0.5919942660642743 +259 64 optimizer.lr 0.004847216878354315 +259 64 negative_sampler.num_negs_per_pos 1.0 +259 64 training.batch_size 0.0 +259 65 model.embedding_dim 2.0 +259 65 loss.margin 8.861710062224045 +259 65 loss.adversarial_temperature 0.8506670779921857 +259 65 optimizer.lr 0.009367543425720474 +259 65 negative_sampler.num_negs_per_pos 11.0 +259 65 training.batch_size 1.0 +259 66 model.embedding_dim 0.0 +259 66 loss.margin 8.061824886490673 +259 66 loss.adversarial_temperature 0.47606238618478436 +259 66 optimizer.lr 0.003013788976101587 +259 66 negative_sampler.num_negs_per_pos 40.0 +259 66 training.batch_size 2.0 +259 67 model.embedding_dim 2.0 +259 67 loss.margin 8.222103640233989 +259 67 loss.adversarial_temperature 0.9411217466931526 +259 67 optimizer.lr 0.08409955790269798 +259 67 negative_sampler.num_negs_per_pos 3.0 +259 67 training.batch_size 0.0 +259 68 model.embedding_dim 2.0 +259 68 loss.margin 26.524811538925757 +259 68 loss.adversarial_temperature 0.26206746869193864 +259 68 optimizer.lr 0.0060520433681441835 +259 68 negative_sampler.num_negs_per_pos 51.0 +259 68 training.batch_size 1.0 +259 69 model.embedding_dim 2.0 +259 69 loss.margin 16.103801915521963 +259 69 loss.adversarial_temperature 0.2091374180921981 +259 69 optimizer.lr 0.015315888196287984 +259 69 negative_sampler.num_negs_per_pos 68.0 +259 69 training.batch_size 2.0 +259 70 model.embedding_dim 2.0 +259 70 loss.margin 27.81041277690303 +259 70 loss.adversarial_temperature 0.778342564457216 +259 70 optimizer.lr 0.024247761575520166 +259 70 negative_sampler.num_negs_per_pos 77.0 +259 70 training.batch_size 1.0 +259 71 model.embedding_dim 2.0 +259 71 loss.margin 25.81859880691383 +259 71 loss.adversarial_temperature 0.7046289515120354 +259 71 optimizer.lr 0.0012243335119520053 +259 71 negative_sampler.num_negs_per_pos 4.0 +259 71 training.batch_size 1.0 +259 72 model.embedding_dim 0.0 +259 72 loss.margin 2.626923177538001 +259 72 loss.adversarial_temperature 0.4576207174831146 +259 72 optimizer.lr 0.002754782312425863 +259 72 negative_sampler.num_negs_per_pos 69.0 +259 72 training.batch_size 1.0 +259 73 model.embedding_dim 0.0 +259 73 loss.margin 3.6335456040482312 +259 73 loss.adversarial_temperature 0.5826934853521339 +259 73 optimizer.lr 0.028506382446617535 +259 73 negative_sampler.num_negs_per_pos 7.0 +259 73 training.batch_size 2.0 +259 74 model.embedding_dim 0.0 +259 74 loss.margin 19.761450904873218 +259 74 loss.adversarial_temperature 0.8941716023781199 +259 74 optimizer.lr 0.003790088320767105 +259 74 negative_sampler.num_negs_per_pos 67.0 +259 74 training.batch_size 0.0 +259 75 model.embedding_dim 0.0 +259 75 loss.margin 5.405591027525913 +259 75 loss.adversarial_temperature 0.6253715834459954 +259 75 optimizer.lr 0.009317671722106673 +259 75 negative_sampler.num_negs_per_pos 13.0 +259 75 training.batch_size 1.0 +259 76 model.embedding_dim 2.0 +259 76 loss.margin 4.648943625764117 +259 76 loss.adversarial_temperature 0.8207105516475588 +259 76 optimizer.lr 0.04698850421267914 +259 76 negative_sampler.num_negs_per_pos 15.0 +259 76 training.batch_size 0.0 +259 77 model.embedding_dim 1.0 +259 77 loss.margin 16.30163343835939 +259 77 loss.adversarial_temperature 0.516074400835455 +259 77 optimizer.lr 0.0012006170178653992 +259 77 negative_sampler.num_negs_per_pos 43.0 +259 77 training.batch_size 0.0 +259 78 model.embedding_dim 1.0 +259 78 loss.margin 21.02150412530993 +259 78 loss.adversarial_temperature 0.9664549852007578 +259 78 optimizer.lr 0.07189281566567723 +259 78 negative_sampler.num_negs_per_pos 49.0 +259 78 training.batch_size 1.0 +259 79 model.embedding_dim 1.0 +259 79 loss.margin 17.123126763940828 +259 79 loss.adversarial_temperature 0.8929698159844038 +259 79 optimizer.lr 0.09344897896875995 +259 79 negative_sampler.num_negs_per_pos 64.0 +259 79 training.batch_size 2.0 +259 80 model.embedding_dim 2.0 +259 80 loss.margin 4.397706601856131 +259 80 loss.adversarial_temperature 0.4914900727502427 +259 80 optimizer.lr 0.0014283483261119782 +259 80 negative_sampler.num_negs_per_pos 55.0 +259 80 training.batch_size 2.0 +259 81 model.embedding_dim 0.0 +259 81 loss.margin 21.068756999428167 +259 81 loss.adversarial_temperature 0.9411307622768622 +259 81 optimizer.lr 0.01419979290360184 +259 81 negative_sampler.num_negs_per_pos 40.0 +259 81 training.batch_size 2.0 +259 82 model.embedding_dim 0.0 +259 82 loss.margin 9.953711076391162 +259 82 loss.adversarial_temperature 0.6915158670540904 +259 82 optimizer.lr 0.03566707354974146 +259 82 negative_sampler.num_negs_per_pos 58.0 +259 82 training.batch_size 1.0 +259 83 model.embedding_dim 2.0 +259 83 loss.margin 23.12534663848006 +259 83 loss.adversarial_temperature 0.6001630092448726 +259 83 optimizer.lr 0.0013969294054249975 +259 83 negative_sampler.num_negs_per_pos 0.0 +259 83 training.batch_size 0.0 +259 84 model.embedding_dim 2.0 +259 84 loss.margin 11.30420690316784 +259 84 loss.adversarial_temperature 0.7061136009936754 +259 84 optimizer.lr 0.0547426875558149 +259 84 negative_sampler.num_negs_per_pos 90.0 +259 84 training.batch_size 0.0 +259 85 model.embedding_dim 0.0 +259 85 loss.margin 20.69555386148468 +259 85 loss.adversarial_temperature 0.8036471254054207 +259 85 optimizer.lr 0.0016012723330447714 +259 85 negative_sampler.num_negs_per_pos 29.0 +259 85 training.batch_size 2.0 +259 86 model.embedding_dim 2.0 +259 86 loss.margin 19.757973509538953 +259 86 loss.adversarial_temperature 0.45137077808353476 +259 86 optimizer.lr 0.01090810397064322 +259 86 negative_sampler.num_negs_per_pos 74.0 +259 86 training.batch_size 2.0 +259 87 model.embedding_dim 0.0 +259 87 loss.margin 6.661142203105685 +259 87 loss.adversarial_temperature 0.8888833437026948 +259 87 optimizer.lr 0.00395369946520007 +259 87 negative_sampler.num_negs_per_pos 21.0 +259 87 training.batch_size 1.0 +259 88 model.embedding_dim 2.0 +259 88 loss.margin 6.479888695023616 +259 88 loss.adversarial_temperature 0.9820343166649431 +259 88 optimizer.lr 0.0012424579292577254 +259 88 negative_sampler.num_negs_per_pos 85.0 +259 88 training.batch_size 2.0 +259 89 model.embedding_dim 2.0 +259 89 loss.margin 1.9543679908240694 +259 89 loss.adversarial_temperature 0.49142930480909197 +259 89 optimizer.lr 0.006009550586025948 +259 89 negative_sampler.num_negs_per_pos 63.0 +259 89 training.batch_size 2.0 +259 90 model.embedding_dim 1.0 +259 90 loss.margin 19.85509414435947 +259 90 loss.adversarial_temperature 0.6577455870346974 +259 90 optimizer.lr 0.0027866868049578474 +259 90 negative_sampler.num_negs_per_pos 70.0 +259 90 training.batch_size 0.0 +259 91 model.embedding_dim 1.0 +259 91 loss.margin 17.744102137955284 +259 91 loss.adversarial_temperature 0.9137339551309215 +259 91 optimizer.lr 0.00992306946038476 +259 91 negative_sampler.num_negs_per_pos 73.0 +259 91 training.batch_size 1.0 +259 92 model.embedding_dim 1.0 +259 92 loss.margin 1.7845570087325897 +259 92 loss.adversarial_temperature 0.15168295621476113 +259 92 optimizer.lr 0.0013817887557265934 +259 92 negative_sampler.num_negs_per_pos 52.0 +259 92 training.batch_size 1.0 +259 93 model.embedding_dim 0.0 +259 93 loss.margin 3.4922012322861056 +259 93 loss.adversarial_temperature 0.4888321697345933 +259 93 optimizer.lr 0.0467227288609622 +259 93 negative_sampler.num_negs_per_pos 64.0 +259 93 training.batch_size 2.0 +259 94 model.embedding_dim 2.0 +259 94 loss.margin 12.329518804483127 +259 94 loss.adversarial_temperature 0.5835378823101657 +259 94 optimizer.lr 0.003728849487348327 +259 94 negative_sampler.num_negs_per_pos 12.0 +259 94 training.batch_size 2.0 +259 95 model.embedding_dim 2.0 +259 95 loss.margin 7.707272283844933 +259 95 loss.adversarial_temperature 0.5597246890816725 +259 95 optimizer.lr 0.06842608453968795 +259 95 negative_sampler.num_negs_per_pos 49.0 +259 95 training.batch_size 1.0 +259 96 model.embedding_dim 0.0 +259 96 loss.margin 17.981352047819012 +259 96 loss.adversarial_temperature 0.5573564817326546 +259 96 optimizer.lr 0.049638827684731476 +259 96 negative_sampler.num_negs_per_pos 87.0 +259 96 training.batch_size 1.0 +259 97 model.embedding_dim 0.0 +259 97 loss.margin 2.017412416969248 +259 97 loss.adversarial_temperature 0.36214337957532605 +259 97 optimizer.lr 0.007032911129449165 +259 97 negative_sampler.num_negs_per_pos 98.0 +259 97 training.batch_size 0.0 +259 98 model.embedding_dim 1.0 +259 98 loss.margin 7.535154378685875 +259 98 loss.adversarial_temperature 0.6670963454198181 +259 98 optimizer.lr 0.017729145491362463 +259 98 negative_sampler.num_negs_per_pos 14.0 +259 98 training.batch_size 0.0 +259 99 model.embedding_dim 1.0 +259 99 loss.margin 4.834704356778261 +259 99 loss.adversarial_temperature 0.3365555683215577 +259 99 optimizer.lr 0.03255527645758917 +259 99 negative_sampler.num_negs_per_pos 32.0 +259 99 training.batch_size 2.0 +259 100 model.embedding_dim 1.0 +259 100 loss.margin 10.96521854163528 +259 100 loss.adversarial_temperature 0.4174264136620187 +259 100 optimizer.lr 0.007874928894353024 +259 100 negative_sampler.num_negs_per_pos 85.0 +259 100 training.batch_size 2.0 +259 1 dataset """kinships""" +259 1 model """ermlp""" +259 1 loss """nssa""" +259 1 regularizer """no""" +259 1 optimizer """adam""" +259 1 training_loop """owa""" +259 1 negative_sampler """basic""" +259 1 evaluator """rankbased""" +259 2 dataset """kinships""" +259 2 model """ermlp""" +259 2 loss """nssa""" +259 2 regularizer """no""" +259 2 optimizer """adam""" +259 2 training_loop """owa""" +259 2 negative_sampler """basic""" +259 2 evaluator """rankbased""" +259 3 dataset """kinships""" +259 3 model """ermlp""" +259 3 loss """nssa""" +259 3 regularizer """no""" +259 3 optimizer """adam""" +259 3 training_loop """owa""" +259 3 negative_sampler """basic""" +259 3 evaluator """rankbased""" +259 4 dataset """kinships""" +259 4 model """ermlp""" +259 4 loss """nssa""" +259 4 regularizer """no""" +259 4 optimizer """adam""" +259 4 training_loop """owa""" +259 4 negative_sampler """basic""" +259 4 evaluator """rankbased""" +259 5 dataset """kinships""" +259 5 model """ermlp""" +259 5 loss """nssa""" +259 5 regularizer """no""" +259 5 optimizer """adam""" +259 5 training_loop """owa""" +259 5 negative_sampler """basic""" +259 5 evaluator """rankbased""" +259 6 dataset """kinships""" +259 6 model """ermlp""" +259 6 loss """nssa""" +259 6 regularizer """no""" +259 6 optimizer """adam""" +259 6 training_loop """owa""" +259 6 negative_sampler """basic""" +259 6 evaluator """rankbased""" +259 7 dataset """kinships""" +259 7 model """ermlp""" +259 7 loss """nssa""" +259 7 regularizer """no""" +259 7 optimizer """adam""" +259 7 training_loop """owa""" +259 7 negative_sampler """basic""" +259 7 evaluator """rankbased""" +259 8 dataset """kinships""" +259 8 model """ermlp""" +259 8 loss """nssa""" +259 8 regularizer """no""" +259 8 optimizer """adam""" +259 8 training_loop """owa""" +259 8 negative_sampler """basic""" +259 8 evaluator """rankbased""" +259 9 dataset """kinships""" +259 9 model """ermlp""" +259 9 loss """nssa""" +259 9 regularizer """no""" +259 9 optimizer """adam""" +259 9 training_loop """owa""" +259 9 negative_sampler """basic""" +259 9 evaluator """rankbased""" +259 10 dataset """kinships""" +259 10 model """ermlp""" +259 10 loss """nssa""" +259 10 regularizer """no""" +259 10 optimizer """adam""" +259 10 training_loop """owa""" +259 10 negative_sampler """basic""" +259 10 evaluator """rankbased""" +259 11 dataset """kinships""" +259 11 model """ermlp""" +259 11 loss """nssa""" +259 11 regularizer """no""" +259 11 optimizer """adam""" +259 11 training_loop """owa""" +259 11 negative_sampler """basic""" +259 11 evaluator """rankbased""" +259 12 dataset """kinships""" +259 12 model """ermlp""" +259 12 loss """nssa""" +259 12 regularizer """no""" +259 12 optimizer """adam""" +259 12 training_loop """owa""" +259 12 negative_sampler """basic""" +259 12 evaluator """rankbased""" +259 13 dataset """kinships""" +259 13 model """ermlp""" +259 13 loss """nssa""" +259 13 regularizer """no""" +259 13 optimizer """adam""" +259 13 training_loop """owa""" +259 13 negative_sampler """basic""" +259 13 evaluator """rankbased""" +259 14 dataset """kinships""" +259 14 model """ermlp""" +259 14 loss """nssa""" +259 14 regularizer """no""" +259 14 optimizer """adam""" +259 14 training_loop """owa""" +259 14 negative_sampler """basic""" +259 14 evaluator """rankbased""" +259 15 dataset """kinships""" +259 15 model """ermlp""" +259 15 loss """nssa""" +259 15 regularizer """no""" +259 15 optimizer """adam""" +259 15 training_loop """owa""" +259 15 negative_sampler """basic""" +259 15 evaluator """rankbased""" +259 16 dataset """kinships""" +259 16 model """ermlp""" +259 16 loss """nssa""" +259 16 regularizer """no""" +259 16 optimizer """adam""" +259 16 training_loop """owa""" +259 16 negative_sampler """basic""" +259 16 evaluator """rankbased""" +259 17 dataset """kinships""" +259 17 model """ermlp""" +259 17 loss """nssa""" +259 17 regularizer """no""" +259 17 optimizer """adam""" +259 17 training_loop """owa""" +259 17 negative_sampler """basic""" +259 17 evaluator """rankbased""" +259 18 dataset """kinships""" +259 18 model """ermlp""" +259 18 loss """nssa""" +259 18 regularizer """no""" +259 18 optimizer """adam""" +259 18 training_loop """owa""" +259 18 negative_sampler """basic""" +259 18 evaluator """rankbased""" +259 19 dataset """kinships""" +259 19 model """ermlp""" +259 19 loss """nssa""" +259 19 regularizer """no""" +259 19 optimizer """adam""" +259 19 training_loop """owa""" +259 19 negative_sampler """basic""" +259 19 evaluator """rankbased""" +259 20 dataset """kinships""" +259 20 model """ermlp""" +259 20 loss """nssa""" +259 20 regularizer """no""" +259 20 optimizer """adam""" +259 20 training_loop """owa""" +259 20 negative_sampler """basic""" +259 20 evaluator """rankbased""" +259 21 dataset """kinships""" +259 21 model """ermlp""" +259 21 loss """nssa""" +259 21 regularizer """no""" +259 21 optimizer """adam""" +259 21 training_loop """owa""" +259 21 negative_sampler """basic""" +259 21 evaluator """rankbased""" +259 22 dataset """kinships""" +259 22 model """ermlp""" +259 22 loss """nssa""" +259 22 regularizer """no""" +259 22 optimizer """adam""" +259 22 training_loop """owa""" +259 22 negative_sampler """basic""" +259 22 evaluator """rankbased""" +259 23 dataset """kinships""" +259 23 model """ermlp""" +259 23 loss """nssa""" +259 23 regularizer """no""" +259 23 optimizer """adam""" +259 23 training_loop """owa""" +259 23 negative_sampler """basic""" +259 23 evaluator """rankbased""" +259 24 dataset """kinships""" +259 24 model """ermlp""" +259 24 loss """nssa""" +259 24 regularizer """no""" +259 24 optimizer """adam""" +259 24 training_loop """owa""" +259 24 negative_sampler """basic""" +259 24 evaluator """rankbased""" +259 25 dataset """kinships""" +259 25 model """ermlp""" +259 25 loss """nssa""" +259 25 regularizer """no""" +259 25 optimizer """adam""" +259 25 training_loop """owa""" +259 25 negative_sampler """basic""" +259 25 evaluator """rankbased""" +259 26 dataset """kinships""" +259 26 model """ermlp""" +259 26 loss """nssa""" +259 26 regularizer """no""" +259 26 optimizer """adam""" +259 26 training_loop """owa""" +259 26 negative_sampler """basic""" +259 26 evaluator """rankbased""" +259 27 dataset """kinships""" +259 27 model """ermlp""" +259 27 loss """nssa""" +259 27 regularizer """no""" +259 27 optimizer """adam""" +259 27 training_loop """owa""" +259 27 negative_sampler """basic""" +259 27 evaluator """rankbased""" +259 28 dataset """kinships""" +259 28 model """ermlp""" +259 28 loss """nssa""" +259 28 regularizer """no""" +259 28 optimizer """adam""" +259 28 training_loop """owa""" +259 28 negative_sampler """basic""" +259 28 evaluator """rankbased""" +259 29 dataset """kinships""" +259 29 model """ermlp""" +259 29 loss """nssa""" +259 29 regularizer """no""" +259 29 optimizer """adam""" +259 29 training_loop """owa""" +259 29 negative_sampler """basic""" +259 29 evaluator """rankbased""" +259 30 dataset """kinships""" +259 30 model """ermlp""" +259 30 loss """nssa""" +259 30 regularizer """no""" +259 30 optimizer """adam""" +259 30 training_loop """owa""" +259 30 negative_sampler """basic""" +259 30 evaluator """rankbased""" +259 31 dataset """kinships""" +259 31 model """ermlp""" +259 31 loss """nssa""" +259 31 regularizer """no""" +259 31 optimizer """adam""" +259 31 training_loop """owa""" +259 31 negative_sampler """basic""" +259 31 evaluator """rankbased""" +259 32 dataset """kinships""" +259 32 model """ermlp""" +259 32 loss """nssa""" +259 32 regularizer """no""" +259 32 optimizer """adam""" +259 32 training_loop """owa""" +259 32 negative_sampler """basic""" +259 32 evaluator """rankbased""" +259 33 dataset """kinships""" +259 33 model """ermlp""" +259 33 loss """nssa""" +259 33 regularizer """no""" +259 33 optimizer """adam""" +259 33 training_loop """owa""" +259 33 negative_sampler """basic""" +259 33 evaluator """rankbased""" +259 34 dataset """kinships""" +259 34 model """ermlp""" +259 34 loss """nssa""" +259 34 regularizer """no""" +259 34 optimizer """adam""" +259 34 training_loop """owa""" +259 34 negative_sampler """basic""" +259 34 evaluator """rankbased""" +259 35 dataset """kinships""" +259 35 model """ermlp""" +259 35 loss """nssa""" +259 35 regularizer """no""" +259 35 optimizer """adam""" +259 35 training_loop """owa""" +259 35 negative_sampler """basic""" +259 35 evaluator """rankbased""" +259 36 dataset """kinships""" +259 36 model """ermlp""" +259 36 loss """nssa""" +259 36 regularizer """no""" +259 36 optimizer """adam""" +259 36 training_loop """owa""" +259 36 negative_sampler """basic""" +259 36 evaluator """rankbased""" +259 37 dataset """kinships""" +259 37 model """ermlp""" +259 37 loss """nssa""" +259 37 regularizer """no""" +259 37 optimizer """adam""" +259 37 training_loop """owa""" +259 37 negative_sampler """basic""" +259 37 evaluator """rankbased""" +259 38 dataset """kinships""" +259 38 model """ermlp""" +259 38 loss """nssa""" +259 38 regularizer """no""" +259 38 optimizer """adam""" +259 38 training_loop """owa""" +259 38 negative_sampler """basic""" +259 38 evaluator """rankbased""" +259 39 dataset """kinships""" +259 39 model """ermlp""" +259 39 loss """nssa""" +259 39 regularizer """no""" +259 39 optimizer """adam""" +259 39 training_loop """owa""" +259 39 negative_sampler """basic""" +259 39 evaluator """rankbased""" +259 40 dataset """kinships""" +259 40 model """ermlp""" +259 40 loss """nssa""" +259 40 regularizer """no""" +259 40 optimizer """adam""" +259 40 training_loop """owa""" +259 40 negative_sampler """basic""" +259 40 evaluator """rankbased""" +259 41 dataset """kinships""" +259 41 model """ermlp""" +259 41 loss """nssa""" +259 41 regularizer """no""" +259 41 optimizer """adam""" +259 41 training_loop """owa""" +259 41 negative_sampler """basic""" +259 41 evaluator """rankbased""" +259 42 dataset """kinships""" +259 42 model """ermlp""" +259 42 loss """nssa""" +259 42 regularizer """no""" +259 42 optimizer """adam""" +259 42 training_loop """owa""" +259 42 negative_sampler """basic""" +259 42 evaluator """rankbased""" +259 43 dataset """kinships""" +259 43 model """ermlp""" +259 43 loss """nssa""" +259 43 regularizer """no""" +259 43 optimizer """adam""" +259 43 training_loop """owa""" +259 43 negative_sampler """basic""" +259 43 evaluator """rankbased""" +259 44 dataset """kinships""" +259 44 model """ermlp""" +259 44 loss """nssa""" +259 44 regularizer """no""" +259 44 optimizer """adam""" +259 44 training_loop """owa""" +259 44 negative_sampler """basic""" +259 44 evaluator """rankbased""" +259 45 dataset """kinships""" +259 45 model """ermlp""" +259 45 loss """nssa""" +259 45 regularizer """no""" +259 45 optimizer """adam""" +259 45 training_loop """owa""" +259 45 negative_sampler """basic""" +259 45 evaluator """rankbased""" +259 46 dataset """kinships""" +259 46 model """ermlp""" +259 46 loss """nssa""" +259 46 regularizer """no""" +259 46 optimizer """adam""" +259 46 training_loop """owa""" +259 46 negative_sampler """basic""" +259 46 evaluator """rankbased""" +259 47 dataset """kinships""" +259 47 model """ermlp""" +259 47 loss """nssa""" +259 47 regularizer """no""" +259 47 optimizer """adam""" +259 47 training_loop """owa""" +259 47 negative_sampler """basic""" +259 47 evaluator """rankbased""" +259 48 dataset """kinships""" +259 48 model """ermlp""" +259 48 loss """nssa""" +259 48 regularizer """no""" +259 48 optimizer """adam""" +259 48 training_loop """owa""" +259 48 negative_sampler """basic""" +259 48 evaluator """rankbased""" +259 49 dataset """kinships""" +259 49 model """ermlp""" +259 49 loss """nssa""" +259 49 regularizer """no""" +259 49 optimizer """adam""" +259 49 training_loop """owa""" +259 49 negative_sampler """basic""" +259 49 evaluator """rankbased""" +259 50 dataset """kinships""" +259 50 model """ermlp""" +259 50 loss """nssa""" +259 50 regularizer """no""" +259 50 optimizer """adam""" +259 50 training_loop """owa""" +259 50 negative_sampler """basic""" +259 50 evaluator """rankbased""" +259 51 dataset """kinships""" +259 51 model """ermlp""" +259 51 loss """nssa""" +259 51 regularizer """no""" +259 51 optimizer """adam""" +259 51 training_loop """owa""" +259 51 negative_sampler """basic""" +259 51 evaluator """rankbased""" +259 52 dataset """kinships""" +259 52 model """ermlp""" +259 52 loss """nssa""" +259 52 regularizer """no""" +259 52 optimizer """adam""" +259 52 training_loop """owa""" +259 52 negative_sampler """basic""" +259 52 evaluator """rankbased""" +259 53 dataset """kinships""" +259 53 model """ermlp""" +259 53 loss """nssa""" +259 53 regularizer """no""" +259 53 optimizer """adam""" +259 53 training_loop """owa""" +259 53 negative_sampler """basic""" +259 53 evaluator """rankbased""" +259 54 dataset """kinships""" +259 54 model """ermlp""" +259 54 loss """nssa""" +259 54 regularizer """no""" +259 54 optimizer """adam""" +259 54 training_loop """owa""" +259 54 negative_sampler """basic""" +259 54 evaluator """rankbased""" +259 55 dataset """kinships""" +259 55 model """ermlp""" +259 55 loss """nssa""" +259 55 regularizer """no""" +259 55 optimizer """adam""" +259 55 training_loop """owa""" +259 55 negative_sampler """basic""" +259 55 evaluator """rankbased""" +259 56 dataset """kinships""" +259 56 model """ermlp""" +259 56 loss """nssa""" +259 56 regularizer """no""" +259 56 optimizer """adam""" +259 56 training_loop """owa""" +259 56 negative_sampler """basic""" +259 56 evaluator """rankbased""" +259 57 dataset """kinships""" +259 57 model """ermlp""" +259 57 loss """nssa""" +259 57 regularizer """no""" +259 57 optimizer """adam""" +259 57 training_loop """owa""" +259 57 negative_sampler """basic""" +259 57 evaluator """rankbased""" +259 58 dataset """kinships""" +259 58 model """ermlp""" +259 58 loss """nssa""" +259 58 regularizer """no""" +259 58 optimizer """adam""" +259 58 training_loop """owa""" +259 58 negative_sampler """basic""" +259 58 evaluator """rankbased""" +259 59 dataset """kinships""" +259 59 model """ermlp""" +259 59 loss """nssa""" +259 59 regularizer """no""" +259 59 optimizer """adam""" +259 59 training_loop """owa""" +259 59 negative_sampler """basic""" +259 59 evaluator """rankbased""" +259 60 dataset """kinships""" +259 60 model """ermlp""" +259 60 loss """nssa""" +259 60 regularizer """no""" +259 60 optimizer """adam""" +259 60 training_loop """owa""" +259 60 negative_sampler """basic""" +259 60 evaluator """rankbased""" +259 61 dataset """kinships""" +259 61 model """ermlp""" +259 61 loss """nssa""" +259 61 regularizer """no""" +259 61 optimizer """adam""" +259 61 training_loop """owa""" +259 61 negative_sampler """basic""" +259 61 evaluator """rankbased""" +259 62 dataset """kinships""" +259 62 model """ermlp""" +259 62 loss """nssa""" +259 62 regularizer """no""" +259 62 optimizer """adam""" +259 62 training_loop """owa""" +259 62 negative_sampler """basic""" +259 62 evaluator """rankbased""" +259 63 dataset """kinships""" +259 63 model """ermlp""" +259 63 loss """nssa""" +259 63 regularizer """no""" +259 63 optimizer """adam""" +259 63 training_loop """owa""" +259 63 negative_sampler """basic""" +259 63 evaluator """rankbased""" +259 64 dataset """kinships""" +259 64 model """ermlp""" +259 64 loss """nssa""" +259 64 regularizer """no""" +259 64 optimizer """adam""" +259 64 training_loop """owa""" +259 64 negative_sampler """basic""" +259 64 evaluator """rankbased""" +259 65 dataset """kinships""" +259 65 model """ermlp""" +259 65 loss """nssa""" +259 65 regularizer """no""" +259 65 optimizer """adam""" +259 65 training_loop """owa""" +259 65 negative_sampler """basic""" +259 65 evaluator """rankbased""" +259 66 dataset """kinships""" +259 66 model """ermlp""" +259 66 loss """nssa""" +259 66 regularizer """no""" +259 66 optimizer """adam""" +259 66 training_loop """owa""" +259 66 negative_sampler """basic""" +259 66 evaluator """rankbased""" +259 67 dataset """kinships""" +259 67 model """ermlp""" +259 67 loss """nssa""" +259 67 regularizer """no""" +259 67 optimizer """adam""" +259 67 training_loop """owa""" +259 67 negative_sampler """basic""" +259 67 evaluator """rankbased""" +259 68 dataset """kinships""" +259 68 model """ermlp""" +259 68 loss """nssa""" +259 68 regularizer """no""" +259 68 optimizer """adam""" +259 68 training_loop """owa""" +259 68 negative_sampler """basic""" +259 68 evaluator """rankbased""" +259 69 dataset """kinships""" +259 69 model """ermlp""" +259 69 loss """nssa""" +259 69 regularizer """no""" +259 69 optimizer """adam""" +259 69 training_loop """owa""" +259 69 negative_sampler """basic""" +259 69 evaluator """rankbased""" +259 70 dataset """kinships""" +259 70 model """ermlp""" +259 70 loss """nssa""" +259 70 regularizer """no""" +259 70 optimizer """adam""" +259 70 training_loop """owa""" +259 70 negative_sampler """basic""" +259 70 evaluator """rankbased""" +259 71 dataset """kinships""" +259 71 model """ermlp""" +259 71 loss """nssa""" +259 71 regularizer """no""" +259 71 optimizer """adam""" +259 71 training_loop """owa""" +259 71 negative_sampler """basic""" +259 71 evaluator """rankbased""" +259 72 dataset """kinships""" +259 72 model """ermlp""" +259 72 loss """nssa""" +259 72 regularizer """no""" +259 72 optimizer """adam""" +259 72 training_loop """owa""" +259 72 negative_sampler """basic""" +259 72 evaluator """rankbased""" +259 73 dataset """kinships""" +259 73 model """ermlp""" +259 73 loss """nssa""" +259 73 regularizer """no""" +259 73 optimizer """adam""" +259 73 training_loop """owa""" +259 73 negative_sampler """basic""" +259 73 evaluator """rankbased""" +259 74 dataset """kinships""" +259 74 model """ermlp""" +259 74 loss """nssa""" +259 74 regularizer """no""" +259 74 optimizer """adam""" +259 74 training_loop """owa""" +259 74 negative_sampler """basic""" +259 74 evaluator """rankbased""" +259 75 dataset """kinships""" +259 75 model """ermlp""" +259 75 loss """nssa""" +259 75 regularizer """no""" +259 75 optimizer """adam""" +259 75 training_loop """owa""" +259 75 negative_sampler """basic""" +259 75 evaluator """rankbased""" +259 76 dataset """kinships""" +259 76 model """ermlp""" +259 76 loss """nssa""" +259 76 regularizer """no""" +259 76 optimizer """adam""" +259 76 training_loop """owa""" +259 76 negative_sampler """basic""" +259 76 evaluator """rankbased""" +259 77 dataset """kinships""" +259 77 model """ermlp""" +259 77 loss """nssa""" +259 77 regularizer """no""" +259 77 optimizer """adam""" +259 77 training_loop """owa""" +259 77 negative_sampler """basic""" +259 77 evaluator """rankbased""" +259 78 dataset """kinships""" +259 78 model """ermlp""" +259 78 loss """nssa""" +259 78 regularizer """no""" +259 78 optimizer """adam""" +259 78 training_loop """owa""" +259 78 negative_sampler """basic""" +259 78 evaluator """rankbased""" +259 79 dataset """kinships""" +259 79 model """ermlp""" +259 79 loss """nssa""" +259 79 regularizer """no""" +259 79 optimizer """adam""" +259 79 training_loop """owa""" +259 79 negative_sampler """basic""" +259 79 evaluator """rankbased""" +259 80 dataset """kinships""" +259 80 model """ermlp""" +259 80 loss """nssa""" +259 80 regularizer """no""" +259 80 optimizer """adam""" +259 80 training_loop """owa""" +259 80 negative_sampler """basic""" +259 80 evaluator """rankbased""" +259 81 dataset """kinships""" +259 81 model """ermlp""" +259 81 loss """nssa""" +259 81 regularizer """no""" +259 81 optimizer """adam""" +259 81 training_loop """owa""" +259 81 negative_sampler """basic""" +259 81 evaluator """rankbased""" +259 82 dataset """kinships""" +259 82 model """ermlp""" +259 82 loss """nssa""" +259 82 regularizer """no""" +259 82 optimizer """adam""" +259 82 training_loop """owa""" +259 82 negative_sampler """basic""" +259 82 evaluator """rankbased""" +259 83 dataset """kinships""" +259 83 model """ermlp""" +259 83 loss """nssa""" +259 83 regularizer """no""" +259 83 optimizer """adam""" +259 83 training_loop """owa""" +259 83 negative_sampler """basic""" +259 83 evaluator """rankbased""" +259 84 dataset """kinships""" +259 84 model """ermlp""" +259 84 loss """nssa""" +259 84 regularizer """no""" +259 84 optimizer """adam""" +259 84 training_loop """owa""" +259 84 negative_sampler """basic""" +259 84 evaluator """rankbased""" +259 85 dataset """kinships""" +259 85 model """ermlp""" +259 85 loss """nssa""" +259 85 regularizer """no""" +259 85 optimizer """adam""" +259 85 training_loop """owa""" +259 85 negative_sampler """basic""" +259 85 evaluator """rankbased""" +259 86 dataset """kinships""" +259 86 model """ermlp""" +259 86 loss """nssa""" +259 86 regularizer """no""" +259 86 optimizer """adam""" +259 86 training_loop """owa""" +259 86 negative_sampler """basic""" +259 86 evaluator """rankbased""" +259 87 dataset """kinships""" +259 87 model """ermlp""" +259 87 loss """nssa""" +259 87 regularizer """no""" +259 87 optimizer """adam""" +259 87 training_loop """owa""" +259 87 negative_sampler """basic""" +259 87 evaluator """rankbased""" +259 88 dataset """kinships""" +259 88 model """ermlp""" +259 88 loss """nssa""" +259 88 regularizer """no""" +259 88 optimizer """adam""" +259 88 training_loop """owa""" +259 88 negative_sampler """basic""" +259 88 evaluator """rankbased""" +259 89 dataset """kinships""" +259 89 model """ermlp""" +259 89 loss """nssa""" +259 89 regularizer """no""" +259 89 optimizer """adam""" +259 89 training_loop """owa""" +259 89 negative_sampler """basic""" +259 89 evaluator """rankbased""" +259 90 dataset """kinships""" +259 90 model """ermlp""" +259 90 loss """nssa""" +259 90 regularizer """no""" +259 90 optimizer """adam""" +259 90 training_loop """owa""" +259 90 negative_sampler """basic""" +259 90 evaluator """rankbased""" +259 91 dataset """kinships""" +259 91 model """ermlp""" +259 91 loss """nssa""" +259 91 regularizer """no""" +259 91 optimizer """adam""" +259 91 training_loop """owa""" +259 91 negative_sampler """basic""" +259 91 evaluator """rankbased""" +259 92 dataset """kinships""" +259 92 model """ermlp""" +259 92 loss """nssa""" +259 92 regularizer """no""" +259 92 optimizer """adam""" +259 92 training_loop """owa""" +259 92 negative_sampler """basic""" +259 92 evaluator """rankbased""" +259 93 dataset """kinships""" +259 93 model """ermlp""" +259 93 loss """nssa""" +259 93 regularizer """no""" +259 93 optimizer """adam""" +259 93 training_loop """owa""" +259 93 negative_sampler """basic""" +259 93 evaluator """rankbased""" +259 94 dataset """kinships""" +259 94 model """ermlp""" +259 94 loss """nssa""" +259 94 regularizer """no""" +259 94 optimizer """adam""" +259 94 training_loop """owa""" +259 94 negative_sampler """basic""" +259 94 evaluator """rankbased""" +259 95 dataset """kinships""" +259 95 model """ermlp""" +259 95 loss """nssa""" +259 95 regularizer """no""" +259 95 optimizer """adam""" +259 95 training_loop """owa""" +259 95 negative_sampler """basic""" +259 95 evaluator """rankbased""" +259 96 dataset """kinships""" +259 96 model """ermlp""" +259 96 loss """nssa""" +259 96 regularizer """no""" +259 96 optimizer """adam""" +259 96 training_loop """owa""" +259 96 negative_sampler """basic""" +259 96 evaluator """rankbased""" +259 97 dataset """kinships""" +259 97 model """ermlp""" +259 97 loss """nssa""" +259 97 regularizer """no""" +259 97 optimizer """adam""" +259 97 training_loop """owa""" +259 97 negative_sampler """basic""" +259 97 evaluator """rankbased""" +259 98 dataset """kinships""" +259 98 model """ermlp""" +259 98 loss """nssa""" +259 98 regularizer """no""" +259 98 optimizer """adam""" +259 98 training_loop """owa""" +259 98 negative_sampler """basic""" +259 98 evaluator """rankbased""" +259 99 dataset """kinships""" +259 99 model """ermlp""" +259 99 loss """nssa""" +259 99 regularizer """no""" +259 99 optimizer """adam""" +259 99 training_loop """owa""" +259 99 negative_sampler """basic""" +259 99 evaluator """rankbased""" +259 100 dataset """kinships""" +259 100 model """ermlp""" +259 100 loss """nssa""" +259 100 regularizer """no""" +259 100 optimizer """adam""" +259 100 training_loop """owa""" +259 100 negative_sampler """basic""" +259 100 evaluator """rankbased""" +260 1 model.embedding_dim 2.0 +260 1 loss.margin 1.3256315071608444 +260 1 optimizer.lr 0.026106542286432034 +260 1 negative_sampler.num_negs_per_pos 42.0 +260 1 training.batch_size 0.0 +260 2 model.embedding_dim 2.0 +260 2 loss.margin 2.685536285502361 +260 2 optimizer.lr 0.022677109501293438 +260 2 negative_sampler.num_negs_per_pos 67.0 +260 2 training.batch_size 2.0 +260 3 model.embedding_dim 0.0 +260 3 loss.margin 6.488599146336697 +260 3 optimizer.lr 0.0011941629208272924 +260 3 negative_sampler.num_negs_per_pos 0.0 +260 3 training.batch_size 1.0 +260 4 model.embedding_dim 1.0 +260 4 loss.margin 2.574989865121938 +260 4 optimizer.lr 0.00772852329366243 +260 4 negative_sampler.num_negs_per_pos 9.0 +260 4 training.batch_size 0.0 +260 5 model.embedding_dim 1.0 +260 5 loss.margin 9.247156857090346 +260 5 optimizer.lr 0.02225794988933011 +260 5 negative_sampler.num_negs_per_pos 16.0 +260 5 training.batch_size 2.0 +260 6 model.embedding_dim 0.0 +260 6 loss.margin 1.437900957010129 +260 6 optimizer.lr 0.0023671126401741166 +260 6 negative_sampler.num_negs_per_pos 99.0 +260 6 training.batch_size 1.0 +260 7 model.embedding_dim 1.0 +260 7 loss.margin 3.4648414860662804 +260 7 optimizer.lr 0.007477115590590749 +260 7 negative_sampler.num_negs_per_pos 77.0 +260 7 training.batch_size 1.0 +260 8 model.embedding_dim 1.0 +260 8 loss.margin 2.359431519783774 +260 8 optimizer.lr 0.02559278598767096 +260 8 negative_sampler.num_negs_per_pos 37.0 +260 8 training.batch_size 2.0 +260 9 model.embedding_dim 0.0 +260 9 loss.margin 6.268636633128686 +260 9 optimizer.lr 0.004679232462498862 +260 9 negative_sampler.num_negs_per_pos 62.0 +260 9 training.batch_size 1.0 +260 10 model.embedding_dim 0.0 +260 10 loss.margin 7.5282814587334705 +260 10 optimizer.lr 0.04177678758133547 +260 10 negative_sampler.num_negs_per_pos 34.0 +260 10 training.batch_size 1.0 +260 11 model.embedding_dim 0.0 +260 11 loss.margin 7.079987670292095 +260 11 optimizer.lr 0.06263856656090394 +260 11 negative_sampler.num_negs_per_pos 97.0 +260 11 training.batch_size 0.0 +260 12 model.embedding_dim 0.0 +260 12 loss.margin 3.0572078225069363 +260 12 optimizer.lr 0.005883497760927781 +260 12 negative_sampler.num_negs_per_pos 51.0 +260 12 training.batch_size 1.0 +260 13 model.embedding_dim 2.0 +260 13 loss.margin 5.616029128419341 +260 13 optimizer.lr 0.00380133626436694 +260 13 negative_sampler.num_negs_per_pos 98.0 +260 13 training.batch_size 2.0 +260 14 model.embedding_dim 1.0 +260 14 loss.margin 8.181224874603915 +260 14 optimizer.lr 0.0028612379470609973 +260 14 negative_sampler.num_negs_per_pos 84.0 +260 14 training.batch_size 0.0 +260 15 model.embedding_dim 2.0 +260 15 loss.margin 3.8519125677291792 +260 15 optimizer.lr 0.06446459901055937 +260 15 negative_sampler.num_negs_per_pos 93.0 +260 15 training.batch_size 0.0 +260 16 model.embedding_dim 1.0 +260 16 loss.margin 4.281754831956913 +260 16 optimizer.lr 0.07386348427331293 +260 16 negative_sampler.num_negs_per_pos 91.0 +260 16 training.batch_size 1.0 +260 17 model.embedding_dim 2.0 +260 17 loss.margin 1.6235812115970876 +260 17 optimizer.lr 0.01802171333888812 +260 17 negative_sampler.num_negs_per_pos 37.0 +260 17 training.batch_size 2.0 +260 18 model.embedding_dim 1.0 +260 18 loss.margin 3.8376300849201517 +260 18 optimizer.lr 0.0031636007709615875 +260 18 negative_sampler.num_negs_per_pos 38.0 +260 18 training.batch_size 1.0 +260 19 model.embedding_dim 1.0 +260 19 loss.margin 3.7091894647370647 +260 19 optimizer.lr 0.09614945280953476 +260 19 negative_sampler.num_negs_per_pos 48.0 +260 19 training.batch_size 0.0 +260 20 model.embedding_dim 1.0 +260 20 loss.margin 3.206667575631279 +260 20 optimizer.lr 0.005457438670862411 +260 20 negative_sampler.num_negs_per_pos 40.0 +260 20 training.batch_size 2.0 +260 21 model.embedding_dim 0.0 +260 21 loss.margin 7.003210264286394 +260 21 optimizer.lr 0.006964085694004948 +260 21 negative_sampler.num_negs_per_pos 78.0 +260 21 training.batch_size 2.0 +260 22 model.embedding_dim 2.0 +260 22 loss.margin 6.844249961040648 +260 22 optimizer.lr 0.09585661189574946 +260 22 negative_sampler.num_negs_per_pos 11.0 +260 22 training.batch_size 1.0 +260 23 model.embedding_dim 1.0 +260 23 loss.margin 6.310250466730541 +260 23 optimizer.lr 0.0028743760974721333 +260 23 negative_sampler.num_negs_per_pos 90.0 +260 23 training.batch_size 1.0 +260 24 model.embedding_dim 1.0 +260 24 loss.margin 0.5617253865659906 +260 24 optimizer.lr 0.0067056884157373564 +260 24 negative_sampler.num_negs_per_pos 84.0 +260 24 training.batch_size 2.0 +260 25 model.embedding_dim 0.0 +260 25 loss.margin 8.672688005871922 +260 25 optimizer.lr 0.0011453831798690984 +260 25 negative_sampler.num_negs_per_pos 86.0 +260 25 training.batch_size 1.0 +260 26 model.embedding_dim 0.0 +260 26 loss.margin 7.845759186107732 +260 26 optimizer.lr 0.0038320133558976234 +260 26 negative_sampler.num_negs_per_pos 37.0 +260 26 training.batch_size 1.0 +260 27 model.embedding_dim 0.0 +260 27 loss.margin 9.621817780787628 +260 27 optimizer.lr 0.0033235174441673776 +260 27 negative_sampler.num_negs_per_pos 95.0 +260 27 training.batch_size 2.0 +260 28 model.embedding_dim 0.0 +260 28 loss.margin 9.08211316712105 +260 28 optimizer.lr 0.0037411535187824112 +260 28 negative_sampler.num_negs_per_pos 93.0 +260 28 training.batch_size 0.0 +260 29 model.embedding_dim 2.0 +260 29 loss.margin 0.8772430835001457 +260 29 optimizer.lr 0.002051387310754057 +260 29 negative_sampler.num_negs_per_pos 29.0 +260 29 training.batch_size 0.0 +260 30 model.embedding_dim 0.0 +260 30 loss.margin 6.806352141410264 +260 30 optimizer.lr 0.03500811662790655 +260 30 negative_sampler.num_negs_per_pos 56.0 +260 30 training.batch_size 2.0 +260 31 model.embedding_dim 0.0 +260 31 loss.margin 5.951306645905738 +260 31 optimizer.lr 0.002811058959325364 +260 31 negative_sampler.num_negs_per_pos 24.0 +260 31 training.batch_size 1.0 +260 32 model.embedding_dim 0.0 +260 32 loss.margin 8.560203307246203 +260 32 optimizer.lr 0.0011167299314252623 +260 32 negative_sampler.num_negs_per_pos 48.0 +260 32 training.batch_size 1.0 +260 1 dataset """wn18rr""" +260 1 model """ermlp""" +260 1 loss """marginranking""" +260 1 regularizer """no""" +260 1 optimizer """adam""" +260 1 training_loop """owa""" +260 1 negative_sampler """basic""" +260 1 evaluator """rankbased""" +260 2 dataset """wn18rr""" +260 2 model """ermlp""" +260 2 loss """marginranking""" +260 2 regularizer """no""" +260 2 optimizer """adam""" +260 2 training_loop """owa""" +260 2 negative_sampler """basic""" +260 2 evaluator """rankbased""" +260 3 dataset """wn18rr""" +260 3 model """ermlp""" +260 3 loss """marginranking""" +260 3 regularizer """no""" +260 3 optimizer """adam""" +260 3 training_loop """owa""" +260 3 negative_sampler """basic""" +260 3 evaluator """rankbased""" +260 4 dataset """wn18rr""" +260 4 model """ermlp""" +260 4 loss """marginranking""" +260 4 regularizer """no""" +260 4 optimizer """adam""" +260 4 training_loop """owa""" +260 4 negative_sampler """basic""" +260 4 evaluator """rankbased""" +260 5 dataset """wn18rr""" +260 5 model """ermlp""" +260 5 loss """marginranking""" +260 5 regularizer """no""" +260 5 optimizer """adam""" +260 5 training_loop """owa""" +260 5 negative_sampler """basic""" +260 5 evaluator """rankbased""" +260 6 dataset """wn18rr""" +260 6 model """ermlp""" +260 6 loss """marginranking""" +260 6 regularizer """no""" +260 6 optimizer """adam""" +260 6 training_loop """owa""" +260 6 negative_sampler """basic""" +260 6 evaluator """rankbased""" +260 7 dataset """wn18rr""" +260 7 model """ermlp""" +260 7 loss """marginranking""" +260 7 regularizer """no""" +260 7 optimizer """adam""" +260 7 training_loop """owa""" +260 7 negative_sampler """basic""" +260 7 evaluator """rankbased""" +260 8 dataset """wn18rr""" +260 8 model """ermlp""" +260 8 loss """marginranking""" +260 8 regularizer """no""" +260 8 optimizer """adam""" +260 8 training_loop """owa""" +260 8 negative_sampler """basic""" +260 8 evaluator """rankbased""" +260 9 dataset """wn18rr""" +260 9 model """ermlp""" +260 9 loss """marginranking""" +260 9 regularizer """no""" +260 9 optimizer """adam""" +260 9 training_loop """owa""" +260 9 negative_sampler """basic""" +260 9 evaluator """rankbased""" +260 10 dataset """wn18rr""" +260 10 model """ermlp""" +260 10 loss """marginranking""" +260 10 regularizer """no""" +260 10 optimizer """adam""" +260 10 training_loop """owa""" +260 10 negative_sampler """basic""" +260 10 evaluator """rankbased""" +260 11 dataset """wn18rr""" +260 11 model """ermlp""" +260 11 loss """marginranking""" +260 11 regularizer """no""" +260 11 optimizer """adam""" +260 11 training_loop """owa""" +260 11 negative_sampler """basic""" +260 11 evaluator """rankbased""" +260 12 dataset """wn18rr""" +260 12 model """ermlp""" +260 12 loss """marginranking""" +260 12 regularizer """no""" +260 12 optimizer """adam""" +260 12 training_loop """owa""" +260 12 negative_sampler """basic""" +260 12 evaluator """rankbased""" +260 13 dataset """wn18rr""" +260 13 model """ermlp""" +260 13 loss """marginranking""" +260 13 regularizer """no""" +260 13 optimizer """adam""" +260 13 training_loop """owa""" +260 13 negative_sampler """basic""" +260 13 evaluator """rankbased""" +260 14 dataset """wn18rr""" +260 14 model """ermlp""" +260 14 loss """marginranking""" +260 14 regularizer """no""" +260 14 optimizer """adam""" +260 14 training_loop """owa""" +260 14 negative_sampler """basic""" +260 14 evaluator """rankbased""" +260 15 dataset """wn18rr""" +260 15 model """ermlp""" +260 15 loss """marginranking""" +260 15 regularizer """no""" +260 15 optimizer """adam""" +260 15 training_loop """owa""" +260 15 negative_sampler """basic""" +260 15 evaluator """rankbased""" +260 16 dataset """wn18rr""" +260 16 model """ermlp""" +260 16 loss """marginranking""" +260 16 regularizer """no""" +260 16 optimizer """adam""" +260 16 training_loop """owa""" +260 16 negative_sampler """basic""" +260 16 evaluator """rankbased""" +260 17 dataset """wn18rr""" +260 17 model """ermlp""" +260 17 loss """marginranking""" +260 17 regularizer """no""" +260 17 optimizer """adam""" +260 17 training_loop """owa""" +260 17 negative_sampler """basic""" +260 17 evaluator """rankbased""" +260 18 dataset """wn18rr""" +260 18 model """ermlp""" +260 18 loss """marginranking""" +260 18 regularizer """no""" +260 18 optimizer """adam""" +260 18 training_loop """owa""" +260 18 negative_sampler """basic""" +260 18 evaluator """rankbased""" +260 19 dataset """wn18rr""" +260 19 model """ermlp""" +260 19 loss """marginranking""" +260 19 regularizer """no""" +260 19 optimizer """adam""" +260 19 training_loop """owa""" +260 19 negative_sampler """basic""" +260 19 evaluator """rankbased""" +260 20 dataset """wn18rr""" +260 20 model """ermlp""" +260 20 loss """marginranking""" +260 20 regularizer """no""" +260 20 optimizer """adam""" +260 20 training_loop """owa""" +260 20 negative_sampler """basic""" +260 20 evaluator """rankbased""" +260 21 dataset """wn18rr""" +260 21 model """ermlp""" +260 21 loss """marginranking""" +260 21 regularizer """no""" +260 21 optimizer """adam""" +260 21 training_loop """owa""" +260 21 negative_sampler """basic""" +260 21 evaluator """rankbased""" +260 22 dataset """wn18rr""" +260 22 model """ermlp""" +260 22 loss """marginranking""" +260 22 regularizer """no""" +260 22 optimizer """adam""" +260 22 training_loop """owa""" +260 22 negative_sampler """basic""" +260 22 evaluator """rankbased""" +260 23 dataset """wn18rr""" +260 23 model """ermlp""" +260 23 loss """marginranking""" +260 23 regularizer """no""" +260 23 optimizer """adam""" +260 23 training_loop """owa""" +260 23 negative_sampler """basic""" +260 23 evaluator """rankbased""" +260 24 dataset """wn18rr""" +260 24 model """ermlp""" +260 24 loss """marginranking""" +260 24 regularizer """no""" +260 24 optimizer """adam""" +260 24 training_loop """owa""" +260 24 negative_sampler """basic""" +260 24 evaluator """rankbased""" +260 25 dataset """wn18rr""" +260 25 model """ermlp""" +260 25 loss """marginranking""" +260 25 regularizer """no""" +260 25 optimizer """adam""" +260 25 training_loop """owa""" +260 25 negative_sampler """basic""" +260 25 evaluator """rankbased""" +260 26 dataset """wn18rr""" +260 26 model """ermlp""" +260 26 loss """marginranking""" +260 26 regularizer """no""" +260 26 optimizer """adam""" +260 26 training_loop """owa""" +260 26 negative_sampler """basic""" +260 26 evaluator """rankbased""" +260 27 dataset """wn18rr""" +260 27 model """ermlp""" +260 27 loss """marginranking""" +260 27 regularizer """no""" +260 27 optimizer """adam""" +260 27 training_loop """owa""" +260 27 negative_sampler """basic""" +260 27 evaluator """rankbased""" +260 28 dataset """wn18rr""" +260 28 model """ermlp""" +260 28 loss """marginranking""" +260 28 regularizer """no""" +260 28 optimizer """adam""" +260 28 training_loop """owa""" +260 28 negative_sampler """basic""" +260 28 evaluator """rankbased""" +260 29 dataset """wn18rr""" +260 29 model """ermlp""" +260 29 loss """marginranking""" +260 29 regularizer """no""" +260 29 optimizer """adam""" +260 29 training_loop """owa""" +260 29 negative_sampler """basic""" +260 29 evaluator """rankbased""" +260 30 dataset """wn18rr""" +260 30 model """ermlp""" +260 30 loss """marginranking""" +260 30 regularizer """no""" +260 30 optimizer """adam""" +260 30 training_loop """owa""" +260 30 negative_sampler """basic""" +260 30 evaluator """rankbased""" +260 31 dataset """wn18rr""" +260 31 model """ermlp""" +260 31 loss """marginranking""" +260 31 regularizer """no""" +260 31 optimizer """adam""" +260 31 training_loop """owa""" +260 31 negative_sampler """basic""" +260 31 evaluator """rankbased""" +260 32 dataset """wn18rr""" +260 32 model """ermlp""" +260 32 loss """marginranking""" +260 32 regularizer """no""" +260 32 optimizer """adam""" +260 32 training_loop """owa""" +260 32 negative_sampler """basic""" +260 32 evaluator """rankbased""" +261 1 model.embedding_dim 0.0 +261 1 loss.margin 5.207891238295343 +261 1 optimizer.lr 0.0036557393474539153 +261 1 negative_sampler.num_negs_per_pos 81.0 +261 1 training.batch_size 2.0 +261 2 model.embedding_dim 2.0 +261 2 loss.margin 3.6127688727077722 +261 2 optimizer.lr 0.02598339718366953 +261 2 negative_sampler.num_negs_per_pos 99.0 +261 2 training.batch_size 0.0 +261 3 model.embedding_dim 0.0 +261 3 loss.margin 6.4915203365783825 +261 3 optimizer.lr 0.06538504758565325 +261 3 negative_sampler.num_negs_per_pos 79.0 +261 3 training.batch_size 1.0 +261 4 model.embedding_dim 0.0 +261 4 loss.margin 4.830854414296615 +261 4 optimizer.lr 0.0059063896670658545 +261 4 negative_sampler.num_negs_per_pos 44.0 +261 4 training.batch_size 1.0 +261 5 model.embedding_dim 1.0 +261 5 loss.margin 7.705135700794347 +261 5 optimizer.lr 0.026521906303975665 +261 5 negative_sampler.num_negs_per_pos 63.0 +261 5 training.batch_size 0.0 +261 6 model.embedding_dim 2.0 +261 6 loss.margin 2.8908819458022714 +261 6 optimizer.lr 0.012435634031421809 +261 6 negative_sampler.num_negs_per_pos 29.0 +261 6 training.batch_size 0.0 +261 7 model.embedding_dim 2.0 +261 7 loss.margin 1.5231129951277298 +261 7 optimizer.lr 0.03393664957057801 +261 7 negative_sampler.num_negs_per_pos 89.0 +261 7 training.batch_size 0.0 +261 8 model.embedding_dim 0.0 +261 8 loss.margin 3.9852120791422223 +261 8 optimizer.lr 0.029633434577787707 +261 8 negative_sampler.num_negs_per_pos 83.0 +261 8 training.batch_size 2.0 +261 9 model.embedding_dim 1.0 +261 9 loss.margin 2.131401247755751 +261 9 optimizer.lr 0.039003468141288584 +261 9 negative_sampler.num_negs_per_pos 94.0 +261 9 training.batch_size 1.0 +261 10 model.embedding_dim 1.0 +261 10 loss.margin 3.410955849694113 +261 10 optimizer.lr 0.00952062232392498 +261 10 negative_sampler.num_negs_per_pos 57.0 +261 10 training.batch_size 2.0 +261 11 model.embedding_dim 0.0 +261 11 loss.margin 8.077926151974427 +261 11 optimizer.lr 0.006083539464388242 +261 11 negative_sampler.num_negs_per_pos 58.0 +261 11 training.batch_size 0.0 +261 12 model.embedding_dim 0.0 +261 12 loss.margin 8.18968562400228 +261 12 optimizer.lr 0.003296420162493124 +261 12 negative_sampler.num_negs_per_pos 30.0 +261 12 training.batch_size 2.0 +261 13 model.embedding_dim 0.0 +261 13 loss.margin 4.449609235553642 +261 13 optimizer.lr 0.0011245489554625033 +261 13 negative_sampler.num_negs_per_pos 51.0 +261 13 training.batch_size 1.0 +261 14 model.embedding_dim 1.0 +261 14 loss.margin 7.421200994320106 +261 14 optimizer.lr 0.04665683283543736 +261 14 negative_sampler.num_negs_per_pos 30.0 +261 14 training.batch_size 2.0 +261 15 model.embedding_dim 0.0 +261 15 loss.margin 9.701559415180359 +261 15 optimizer.lr 0.020241492714736496 +261 15 negative_sampler.num_negs_per_pos 13.0 +261 15 training.batch_size 1.0 +261 16 model.embedding_dim 1.0 +261 16 loss.margin 9.947363982084374 +261 16 optimizer.lr 0.063719137569609 +261 16 negative_sampler.num_negs_per_pos 83.0 +261 16 training.batch_size 1.0 +261 17 model.embedding_dim 1.0 +261 17 loss.margin 4.131218528857769 +261 17 optimizer.lr 0.004021806147157458 +261 17 negative_sampler.num_negs_per_pos 79.0 +261 17 training.batch_size 1.0 +261 18 model.embedding_dim 1.0 +261 18 loss.margin 1.0671641354890202 +261 18 optimizer.lr 0.0011725386357216321 +261 18 negative_sampler.num_negs_per_pos 15.0 +261 18 training.batch_size 1.0 +261 19 model.embedding_dim 0.0 +261 19 loss.margin 7.151897680479443 +261 19 optimizer.lr 0.005395974599982754 +261 19 negative_sampler.num_negs_per_pos 22.0 +261 19 training.batch_size 0.0 +261 20 model.embedding_dim 1.0 +261 20 loss.margin 2.490281967685489 +261 20 optimizer.lr 0.012773283407534917 +261 20 negative_sampler.num_negs_per_pos 73.0 +261 20 training.batch_size 1.0 +261 21 model.embedding_dim 1.0 +261 21 loss.margin 4.235247742033826 +261 21 optimizer.lr 0.002441589453742981 +261 21 negative_sampler.num_negs_per_pos 90.0 +261 21 training.batch_size 1.0 +261 22 model.embedding_dim 0.0 +261 22 loss.margin 1.70501371101838 +261 22 optimizer.lr 0.06084687491683384 +261 22 negative_sampler.num_negs_per_pos 29.0 +261 22 training.batch_size 1.0 +261 23 model.embedding_dim 2.0 +261 23 loss.margin 3.9804174192821993 +261 23 optimizer.lr 0.00236009482500156 +261 23 negative_sampler.num_negs_per_pos 10.0 +261 23 training.batch_size 2.0 +261 24 model.embedding_dim 2.0 +261 24 loss.margin 1.3283223858738484 +261 24 optimizer.lr 0.0020559026244740563 +261 24 negative_sampler.num_negs_per_pos 76.0 +261 24 training.batch_size 1.0 +261 25 model.embedding_dim 2.0 +261 25 loss.margin 9.368379000161244 +261 25 optimizer.lr 0.05356310814892676 +261 25 negative_sampler.num_negs_per_pos 66.0 +261 25 training.batch_size 0.0 +261 26 model.embedding_dim 0.0 +261 26 loss.margin 9.044638616897215 +261 26 optimizer.lr 0.05092432313774546 +261 26 negative_sampler.num_negs_per_pos 12.0 +261 26 training.batch_size 0.0 +261 27 model.embedding_dim 2.0 +261 27 loss.margin 8.997663572475089 +261 27 optimizer.lr 0.08600576568811513 +261 27 negative_sampler.num_negs_per_pos 99.0 +261 27 training.batch_size 1.0 +261 28 model.embedding_dim 1.0 +261 28 loss.margin 8.002344218431656 +261 28 optimizer.lr 0.06984339139884417 +261 28 negative_sampler.num_negs_per_pos 85.0 +261 28 training.batch_size 2.0 +261 29 model.embedding_dim 0.0 +261 29 loss.margin 6.559786036343235 +261 29 optimizer.lr 0.014670391637912972 +261 29 negative_sampler.num_negs_per_pos 19.0 +261 29 training.batch_size 1.0 +261 30 model.embedding_dim 0.0 +261 30 loss.margin 7.098792642273386 +261 30 optimizer.lr 0.004988807686196074 +261 30 negative_sampler.num_negs_per_pos 55.0 +261 30 training.batch_size 0.0 +261 31 model.embedding_dim 0.0 +261 31 loss.margin 1.5408695288364656 +261 31 optimizer.lr 0.005568747130538901 +261 31 negative_sampler.num_negs_per_pos 38.0 +261 31 training.batch_size 0.0 +261 32 model.embedding_dim 2.0 +261 32 loss.margin 9.476280878670593 +261 32 optimizer.lr 0.031040728044495584 +261 32 negative_sampler.num_negs_per_pos 25.0 +261 32 training.batch_size 0.0 +261 33 model.embedding_dim 0.0 +261 33 loss.margin 4.145901478452207 +261 33 optimizer.lr 0.002709243531181508 +261 33 negative_sampler.num_negs_per_pos 55.0 +261 33 training.batch_size 1.0 +261 34 model.embedding_dim 2.0 +261 34 loss.margin 9.872597961764413 +261 34 optimizer.lr 0.0010944550828902646 +261 34 negative_sampler.num_negs_per_pos 88.0 +261 34 training.batch_size 2.0 +261 35 model.embedding_dim 0.0 +261 35 loss.margin 2.9080847018309663 +261 35 optimizer.lr 0.004086141000602999 +261 35 negative_sampler.num_negs_per_pos 73.0 +261 35 training.batch_size 0.0 +261 36 model.embedding_dim 0.0 +261 36 loss.margin 4.175110322797527 +261 36 optimizer.lr 0.013982447400835289 +261 36 negative_sampler.num_negs_per_pos 77.0 +261 36 training.batch_size 1.0 +261 37 model.embedding_dim 0.0 +261 37 loss.margin 3.2395428342367816 +261 37 optimizer.lr 0.006117976279847251 +261 37 negative_sampler.num_negs_per_pos 56.0 +261 37 training.batch_size 1.0 +261 38 model.embedding_dim 0.0 +261 38 loss.margin 9.56147593284521 +261 38 optimizer.lr 0.03471381787281496 +261 38 negative_sampler.num_negs_per_pos 86.0 +261 38 training.batch_size 0.0 +261 39 model.embedding_dim 1.0 +261 39 loss.margin 1.305467728513395 +261 39 optimizer.lr 0.0025310342934821233 +261 39 negative_sampler.num_negs_per_pos 30.0 +261 39 training.batch_size 2.0 +261 40 model.embedding_dim 2.0 +261 40 loss.margin 8.283365171601595 +261 40 optimizer.lr 0.016703704813475866 +261 40 negative_sampler.num_negs_per_pos 61.0 +261 40 training.batch_size 2.0 +261 41 model.embedding_dim 2.0 +261 41 loss.margin 5.77564559836236 +261 41 optimizer.lr 0.02301561471326775 +261 41 negative_sampler.num_negs_per_pos 80.0 +261 41 training.batch_size 2.0 +261 42 model.embedding_dim 0.0 +261 42 loss.margin 7.8387160656128145 +261 42 optimizer.lr 0.03562479077064509 +261 42 negative_sampler.num_negs_per_pos 56.0 +261 42 training.batch_size 2.0 +261 43 model.embedding_dim 0.0 +261 43 loss.margin 3.044751656247845 +261 43 optimizer.lr 0.041763690801996226 +261 43 negative_sampler.num_negs_per_pos 34.0 +261 43 training.batch_size 1.0 +261 44 model.embedding_dim 2.0 +261 44 loss.margin 5.249604090591654 +261 44 optimizer.lr 0.0029883399159650187 +261 44 negative_sampler.num_negs_per_pos 97.0 +261 44 training.batch_size 1.0 +261 45 model.embedding_dim 2.0 +261 45 loss.margin 1.812416500568027 +261 45 optimizer.lr 0.002465535461497452 +261 45 negative_sampler.num_negs_per_pos 31.0 +261 45 training.batch_size 2.0 +261 46 model.embedding_dim 1.0 +261 46 loss.margin 7.090161643461119 +261 46 optimizer.lr 0.04877593593584468 +261 46 negative_sampler.num_negs_per_pos 43.0 +261 46 training.batch_size 1.0 +261 47 model.embedding_dim 0.0 +261 47 loss.margin 7.584475076613774 +261 47 optimizer.lr 0.007493511517810143 +261 47 negative_sampler.num_negs_per_pos 52.0 +261 47 training.batch_size 0.0 +261 48 model.embedding_dim 1.0 +261 48 loss.margin 8.532554112108599 +261 48 optimizer.lr 0.0035167715900522112 +261 48 negative_sampler.num_negs_per_pos 59.0 +261 48 training.batch_size 0.0 +261 49 model.embedding_dim 1.0 +261 49 loss.margin 5.627407255000036 +261 49 optimizer.lr 0.09388581657070007 +261 49 negative_sampler.num_negs_per_pos 42.0 +261 49 training.batch_size 0.0 +261 50 model.embedding_dim 1.0 +261 50 loss.margin 0.8656741570133077 +261 50 optimizer.lr 0.06322567929843041 +261 50 negative_sampler.num_negs_per_pos 24.0 +261 50 training.batch_size 2.0 +261 51 model.embedding_dim 0.0 +261 51 loss.margin 3.760961938741365 +261 51 optimizer.lr 0.06671155326934172 +261 51 negative_sampler.num_negs_per_pos 16.0 +261 51 training.batch_size 0.0 +261 52 model.embedding_dim 2.0 +261 52 loss.margin 8.592815542923518 +261 52 optimizer.lr 0.02947856291319665 +261 52 negative_sampler.num_negs_per_pos 89.0 +261 52 training.batch_size 1.0 +261 1 dataset """wn18rr""" +261 1 model """ermlp""" +261 1 loss """marginranking""" +261 1 regularizer """no""" +261 1 optimizer """adam""" +261 1 training_loop """owa""" +261 1 negative_sampler """basic""" +261 1 evaluator """rankbased""" +261 2 dataset """wn18rr""" +261 2 model """ermlp""" +261 2 loss """marginranking""" +261 2 regularizer """no""" +261 2 optimizer """adam""" +261 2 training_loop """owa""" +261 2 negative_sampler """basic""" +261 2 evaluator """rankbased""" +261 3 dataset """wn18rr""" +261 3 model """ermlp""" +261 3 loss """marginranking""" +261 3 regularizer """no""" +261 3 optimizer """adam""" +261 3 training_loop """owa""" +261 3 negative_sampler """basic""" +261 3 evaluator """rankbased""" +261 4 dataset """wn18rr""" +261 4 model """ermlp""" +261 4 loss """marginranking""" +261 4 regularizer """no""" +261 4 optimizer """adam""" +261 4 training_loop """owa""" +261 4 negative_sampler """basic""" +261 4 evaluator """rankbased""" +261 5 dataset """wn18rr""" +261 5 model """ermlp""" +261 5 loss """marginranking""" +261 5 regularizer """no""" +261 5 optimizer """adam""" +261 5 training_loop """owa""" +261 5 negative_sampler """basic""" +261 5 evaluator """rankbased""" +261 6 dataset """wn18rr""" +261 6 model """ermlp""" +261 6 loss """marginranking""" +261 6 regularizer """no""" +261 6 optimizer """adam""" +261 6 training_loop """owa""" +261 6 negative_sampler """basic""" +261 6 evaluator """rankbased""" +261 7 dataset """wn18rr""" +261 7 model """ermlp""" +261 7 loss """marginranking""" +261 7 regularizer """no""" +261 7 optimizer """adam""" +261 7 training_loop """owa""" +261 7 negative_sampler """basic""" +261 7 evaluator """rankbased""" +261 8 dataset """wn18rr""" +261 8 model """ermlp""" +261 8 loss """marginranking""" +261 8 regularizer """no""" +261 8 optimizer """adam""" +261 8 training_loop """owa""" +261 8 negative_sampler """basic""" +261 8 evaluator """rankbased""" +261 9 dataset """wn18rr""" +261 9 model """ermlp""" +261 9 loss """marginranking""" +261 9 regularizer """no""" +261 9 optimizer """adam""" +261 9 training_loop """owa""" +261 9 negative_sampler """basic""" +261 9 evaluator """rankbased""" +261 10 dataset """wn18rr""" +261 10 model """ermlp""" +261 10 loss """marginranking""" +261 10 regularizer """no""" +261 10 optimizer """adam""" +261 10 training_loop """owa""" +261 10 negative_sampler """basic""" +261 10 evaluator """rankbased""" +261 11 dataset """wn18rr""" +261 11 model """ermlp""" +261 11 loss """marginranking""" +261 11 regularizer """no""" +261 11 optimizer """adam""" +261 11 training_loop """owa""" +261 11 negative_sampler """basic""" +261 11 evaluator """rankbased""" +261 12 dataset """wn18rr""" +261 12 model """ermlp""" +261 12 loss """marginranking""" +261 12 regularizer """no""" +261 12 optimizer """adam""" +261 12 training_loop """owa""" +261 12 negative_sampler """basic""" +261 12 evaluator """rankbased""" +261 13 dataset """wn18rr""" +261 13 model """ermlp""" +261 13 loss """marginranking""" +261 13 regularizer """no""" +261 13 optimizer """adam""" +261 13 training_loop """owa""" +261 13 negative_sampler """basic""" +261 13 evaluator """rankbased""" +261 14 dataset """wn18rr""" +261 14 model """ermlp""" +261 14 loss """marginranking""" +261 14 regularizer """no""" +261 14 optimizer """adam""" +261 14 training_loop """owa""" +261 14 negative_sampler """basic""" +261 14 evaluator """rankbased""" +261 15 dataset """wn18rr""" +261 15 model """ermlp""" +261 15 loss """marginranking""" +261 15 regularizer """no""" +261 15 optimizer """adam""" +261 15 training_loop """owa""" +261 15 negative_sampler """basic""" +261 15 evaluator """rankbased""" +261 16 dataset """wn18rr""" +261 16 model """ermlp""" +261 16 loss """marginranking""" +261 16 regularizer """no""" +261 16 optimizer """adam""" +261 16 training_loop """owa""" +261 16 negative_sampler """basic""" +261 16 evaluator """rankbased""" +261 17 dataset """wn18rr""" +261 17 model """ermlp""" +261 17 loss """marginranking""" +261 17 regularizer """no""" +261 17 optimizer """adam""" +261 17 training_loop """owa""" +261 17 negative_sampler """basic""" +261 17 evaluator """rankbased""" +261 18 dataset """wn18rr""" +261 18 model """ermlp""" +261 18 loss """marginranking""" +261 18 regularizer """no""" +261 18 optimizer """adam""" +261 18 training_loop """owa""" +261 18 negative_sampler """basic""" +261 18 evaluator """rankbased""" +261 19 dataset """wn18rr""" +261 19 model """ermlp""" +261 19 loss """marginranking""" +261 19 regularizer """no""" +261 19 optimizer """adam""" +261 19 training_loop """owa""" +261 19 negative_sampler """basic""" +261 19 evaluator """rankbased""" +261 20 dataset """wn18rr""" +261 20 model """ermlp""" +261 20 loss """marginranking""" +261 20 regularizer """no""" +261 20 optimizer """adam""" +261 20 training_loop """owa""" +261 20 negative_sampler """basic""" +261 20 evaluator """rankbased""" +261 21 dataset """wn18rr""" +261 21 model """ermlp""" +261 21 loss """marginranking""" +261 21 regularizer """no""" +261 21 optimizer """adam""" +261 21 training_loop """owa""" +261 21 negative_sampler """basic""" +261 21 evaluator """rankbased""" +261 22 dataset """wn18rr""" +261 22 model """ermlp""" +261 22 loss """marginranking""" +261 22 regularizer """no""" +261 22 optimizer """adam""" +261 22 training_loop """owa""" +261 22 negative_sampler """basic""" +261 22 evaluator """rankbased""" +261 23 dataset """wn18rr""" +261 23 model """ermlp""" +261 23 loss """marginranking""" +261 23 regularizer """no""" +261 23 optimizer """adam""" +261 23 training_loop """owa""" +261 23 negative_sampler """basic""" +261 23 evaluator """rankbased""" +261 24 dataset """wn18rr""" +261 24 model """ermlp""" +261 24 loss """marginranking""" +261 24 regularizer """no""" +261 24 optimizer """adam""" +261 24 training_loop """owa""" +261 24 negative_sampler """basic""" +261 24 evaluator """rankbased""" +261 25 dataset """wn18rr""" +261 25 model """ermlp""" +261 25 loss """marginranking""" +261 25 regularizer """no""" +261 25 optimizer """adam""" +261 25 training_loop """owa""" +261 25 negative_sampler """basic""" +261 25 evaluator """rankbased""" +261 26 dataset """wn18rr""" +261 26 model """ermlp""" +261 26 loss """marginranking""" +261 26 regularizer """no""" +261 26 optimizer """adam""" +261 26 training_loop """owa""" +261 26 negative_sampler """basic""" +261 26 evaluator """rankbased""" +261 27 dataset """wn18rr""" +261 27 model """ermlp""" +261 27 loss """marginranking""" +261 27 regularizer """no""" +261 27 optimizer """adam""" +261 27 training_loop """owa""" +261 27 negative_sampler """basic""" +261 27 evaluator """rankbased""" +261 28 dataset """wn18rr""" +261 28 model """ermlp""" +261 28 loss """marginranking""" +261 28 regularizer """no""" +261 28 optimizer """adam""" +261 28 training_loop """owa""" +261 28 negative_sampler """basic""" +261 28 evaluator """rankbased""" +261 29 dataset """wn18rr""" +261 29 model """ermlp""" +261 29 loss """marginranking""" +261 29 regularizer """no""" +261 29 optimizer """adam""" +261 29 training_loop """owa""" +261 29 negative_sampler """basic""" +261 29 evaluator """rankbased""" +261 30 dataset """wn18rr""" +261 30 model """ermlp""" +261 30 loss """marginranking""" +261 30 regularizer """no""" +261 30 optimizer """adam""" +261 30 training_loop """owa""" +261 30 negative_sampler """basic""" +261 30 evaluator """rankbased""" +261 31 dataset """wn18rr""" +261 31 model """ermlp""" +261 31 loss """marginranking""" +261 31 regularizer """no""" +261 31 optimizer """adam""" +261 31 training_loop """owa""" +261 31 negative_sampler """basic""" +261 31 evaluator """rankbased""" +261 32 dataset """wn18rr""" +261 32 model """ermlp""" +261 32 loss """marginranking""" +261 32 regularizer """no""" +261 32 optimizer """adam""" +261 32 training_loop """owa""" +261 32 negative_sampler """basic""" +261 32 evaluator """rankbased""" +261 33 dataset """wn18rr""" +261 33 model """ermlp""" +261 33 loss """marginranking""" +261 33 regularizer """no""" +261 33 optimizer """adam""" +261 33 training_loop """owa""" +261 33 negative_sampler """basic""" +261 33 evaluator """rankbased""" +261 34 dataset """wn18rr""" +261 34 model """ermlp""" +261 34 loss """marginranking""" +261 34 regularizer """no""" +261 34 optimizer """adam""" +261 34 training_loop """owa""" +261 34 negative_sampler """basic""" +261 34 evaluator """rankbased""" +261 35 dataset """wn18rr""" +261 35 model """ermlp""" +261 35 loss """marginranking""" +261 35 regularizer """no""" +261 35 optimizer """adam""" +261 35 training_loop """owa""" +261 35 negative_sampler """basic""" +261 35 evaluator """rankbased""" +261 36 dataset """wn18rr""" +261 36 model """ermlp""" +261 36 loss """marginranking""" +261 36 regularizer """no""" +261 36 optimizer """adam""" +261 36 training_loop """owa""" +261 36 negative_sampler """basic""" +261 36 evaluator """rankbased""" +261 37 dataset """wn18rr""" +261 37 model """ermlp""" +261 37 loss """marginranking""" +261 37 regularizer """no""" +261 37 optimizer """adam""" +261 37 training_loop """owa""" +261 37 negative_sampler """basic""" +261 37 evaluator """rankbased""" +261 38 dataset """wn18rr""" +261 38 model """ermlp""" +261 38 loss """marginranking""" +261 38 regularizer """no""" +261 38 optimizer """adam""" +261 38 training_loop """owa""" +261 38 negative_sampler """basic""" +261 38 evaluator """rankbased""" +261 39 dataset """wn18rr""" +261 39 model """ermlp""" +261 39 loss """marginranking""" +261 39 regularizer """no""" +261 39 optimizer """adam""" +261 39 training_loop """owa""" +261 39 negative_sampler """basic""" +261 39 evaluator """rankbased""" +261 40 dataset """wn18rr""" +261 40 model """ermlp""" +261 40 loss """marginranking""" +261 40 regularizer """no""" +261 40 optimizer """adam""" +261 40 training_loop """owa""" +261 40 negative_sampler """basic""" +261 40 evaluator """rankbased""" +261 41 dataset """wn18rr""" +261 41 model """ermlp""" +261 41 loss """marginranking""" +261 41 regularizer """no""" +261 41 optimizer """adam""" +261 41 training_loop """owa""" +261 41 negative_sampler """basic""" +261 41 evaluator """rankbased""" +261 42 dataset """wn18rr""" +261 42 model """ermlp""" +261 42 loss """marginranking""" +261 42 regularizer """no""" +261 42 optimizer """adam""" +261 42 training_loop """owa""" +261 42 negative_sampler """basic""" +261 42 evaluator """rankbased""" +261 43 dataset """wn18rr""" +261 43 model """ermlp""" +261 43 loss """marginranking""" +261 43 regularizer """no""" +261 43 optimizer """adam""" +261 43 training_loop """owa""" +261 43 negative_sampler """basic""" +261 43 evaluator """rankbased""" +261 44 dataset """wn18rr""" +261 44 model """ermlp""" +261 44 loss """marginranking""" +261 44 regularizer """no""" +261 44 optimizer """adam""" +261 44 training_loop """owa""" +261 44 negative_sampler """basic""" +261 44 evaluator """rankbased""" +261 45 dataset """wn18rr""" +261 45 model """ermlp""" +261 45 loss """marginranking""" +261 45 regularizer """no""" +261 45 optimizer """adam""" +261 45 training_loop """owa""" +261 45 negative_sampler """basic""" +261 45 evaluator """rankbased""" +261 46 dataset """wn18rr""" +261 46 model """ermlp""" +261 46 loss """marginranking""" +261 46 regularizer """no""" +261 46 optimizer """adam""" +261 46 training_loop """owa""" +261 46 negative_sampler """basic""" +261 46 evaluator """rankbased""" +261 47 dataset """wn18rr""" +261 47 model """ermlp""" +261 47 loss """marginranking""" +261 47 regularizer """no""" +261 47 optimizer """adam""" +261 47 training_loop """owa""" +261 47 negative_sampler """basic""" +261 47 evaluator """rankbased""" +261 48 dataset """wn18rr""" +261 48 model """ermlp""" +261 48 loss """marginranking""" +261 48 regularizer """no""" +261 48 optimizer """adam""" +261 48 training_loop """owa""" +261 48 negative_sampler """basic""" +261 48 evaluator """rankbased""" +261 49 dataset """wn18rr""" +261 49 model """ermlp""" +261 49 loss """marginranking""" +261 49 regularizer """no""" +261 49 optimizer """adam""" +261 49 training_loop """owa""" +261 49 negative_sampler """basic""" +261 49 evaluator """rankbased""" +261 50 dataset """wn18rr""" +261 50 model """ermlp""" +261 50 loss """marginranking""" +261 50 regularizer """no""" +261 50 optimizer """adam""" +261 50 training_loop """owa""" +261 50 negative_sampler """basic""" +261 50 evaluator """rankbased""" +261 51 dataset """wn18rr""" +261 51 model """ermlp""" +261 51 loss """marginranking""" +261 51 regularizer """no""" +261 51 optimizer """adam""" +261 51 training_loop """owa""" +261 51 negative_sampler """basic""" +261 51 evaluator """rankbased""" +261 52 dataset """wn18rr""" +261 52 model """ermlp""" +261 52 loss """marginranking""" +261 52 regularizer """no""" +261 52 optimizer """adam""" +261 52 training_loop """owa""" +261 52 negative_sampler """basic""" +261 52 evaluator """rankbased""" +262 1 model.embedding_dim 1.0 +262 1 loss.margin 22.045788405868944 +262 1 loss.adversarial_temperature 0.6125329635111196 +262 1 optimizer.lr 0.002000336053782344 +262 1 negative_sampler.num_negs_per_pos 21.0 +262 1 training.batch_size 2.0 +262 2 model.embedding_dim 0.0 +262 2 loss.margin 2.085213985118331 +262 2 loss.adversarial_temperature 0.8857034204628705 +262 2 optimizer.lr 0.002403187471899723 +262 2 negative_sampler.num_negs_per_pos 84.0 +262 2 training.batch_size 2.0 +262 3 model.embedding_dim 2.0 +262 3 loss.margin 2.2638025329474125 +262 3 loss.adversarial_temperature 0.5665980019942635 +262 3 optimizer.lr 0.08922261828184848 +262 3 negative_sampler.num_negs_per_pos 97.0 +262 3 training.batch_size 2.0 +262 4 model.embedding_dim 2.0 +262 4 loss.margin 19.899427073332046 +262 4 loss.adversarial_temperature 0.3871898202996287 +262 4 optimizer.lr 0.07790224568164089 +262 4 negative_sampler.num_negs_per_pos 22.0 +262 4 training.batch_size 1.0 +262 5 model.embedding_dim 2.0 +262 5 loss.margin 13.197564642578634 +262 5 loss.adversarial_temperature 0.6817819223733522 +262 5 optimizer.lr 0.005876499416185538 +262 5 negative_sampler.num_negs_per_pos 16.0 +262 5 training.batch_size 1.0 +262 6 model.embedding_dim 0.0 +262 6 loss.margin 25.739118463653462 +262 6 loss.adversarial_temperature 0.4583207219765674 +262 6 optimizer.lr 0.04932742526719181 +262 6 negative_sampler.num_negs_per_pos 64.0 +262 6 training.batch_size 2.0 +262 7 model.embedding_dim 1.0 +262 7 loss.margin 26.93951908647765 +262 7 loss.adversarial_temperature 0.9187080835032507 +262 7 optimizer.lr 0.07477344382615693 +262 7 negative_sampler.num_negs_per_pos 60.0 +262 7 training.batch_size 0.0 +262 8 model.embedding_dim 1.0 +262 8 loss.margin 28.596357285541185 +262 8 loss.adversarial_temperature 0.3388286166440232 +262 8 optimizer.lr 0.0067475210605658625 +262 8 negative_sampler.num_negs_per_pos 88.0 +262 8 training.batch_size 1.0 +262 9 model.embedding_dim 1.0 +262 9 loss.margin 15.54063236824872 +262 9 loss.adversarial_temperature 0.8454376795515479 +262 9 optimizer.lr 0.002410696015397392 +262 9 negative_sampler.num_negs_per_pos 52.0 +262 9 training.batch_size 2.0 +262 10 model.embedding_dim 0.0 +262 10 loss.margin 5.26006454128466 +262 10 loss.adversarial_temperature 0.5210125374385525 +262 10 optimizer.lr 0.026302795004478897 +262 10 negative_sampler.num_negs_per_pos 47.0 +262 10 training.batch_size 2.0 +262 11 model.embedding_dim 0.0 +262 11 loss.margin 7.973279914912647 +262 11 loss.adversarial_temperature 0.7903971477562943 +262 11 optimizer.lr 0.008906470070109978 +262 11 negative_sampler.num_negs_per_pos 2.0 +262 11 training.batch_size 1.0 +262 12 model.embedding_dim 0.0 +262 12 loss.margin 8.112046943453002 +262 12 loss.adversarial_temperature 0.6744306360441973 +262 12 optimizer.lr 0.004057610717347234 +262 12 negative_sampler.num_negs_per_pos 18.0 +262 12 training.batch_size 2.0 +262 13 model.embedding_dim 1.0 +262 13 loss.margin 28.77576251206679 +262 13 loss.adversarial_temperature 0.23651196351446865 +262 13 optimizer.lr 0.09151776578228009 +262 13 negative_sampler.num_negs_per_pos 69.0 +262 13 training.batch_size 0.0 +262 14 model.embedding_dim 1.0 +262 14 loss.margin 11.948584161314901 +262 14 loss.adversarial_temperature 0.1607579451297716 +262 14 optimizer.lr 0.01875430973615084 +262 14 negative_sampler.num_negs_per_pos 52.0 +262 14 training.batch_size 1.0 +262 15 model.embedding_dim 2.0 +262 15 loss.margin 3.286962519167871 +262 15 loss.adversarial_temperature 0.24187437291324868 +262 15 optimizer.lr 0.056519134585042714 +262 15 negative_sampler.num_negs_per_pos 52.0 +262 15 training.batch_size 1.0 +262 16 model.embedding_dim 2.0 +262 16 loss.margin 9.790484076150955 +262 16 loss.adversarial_temperature 0.5452784057337345 +262 16 optimizer.lr 0.09465442919147522 +262 16 negative_sampler.num_negs_per_pos 89.0 +262 16 training.batch_size 1.0 +262 17 model.embedding_dim 0.0 +262 17 loss.margin 20.106619194397677 +262 17 loss.adversarial_temperature 0.7000436812928411 +262 17 optimizer.lr 0.08614118465660062 +262 17 negative_sampler.num_negs_per_pos 72.0 +262 17 training.batch_size 2.0 +262 18 model.embedding_dim 2.0 +262 18 loss.margin 12.844925473842926 +262 18 loss.adversarial_temperature 0.78738895728469 +262 18 optimizer.lr 0.010163352281717714 +262 18 negative_sampler.num_negs_per_pos 63.0 +262 18 training.batch_size 1.0 +262 19 model.embedding_dim 1.0 +262 19 loss.margin 13.856618936312248 +262 19 loss.adversarial_temperature 0.40799489857865207 +262 19 optimizer.lr 0.017483636299639824 +262 19 negative_sampler.num_negs_per_pos 35.0 +262 19 training.batch_size 2.0 +262 20 model.embedding_dim 0.0 +262 20 loss.margin 23.812975864796552 +262 20 loss.adversarial_temperature 0.6629009233651288 +262 20 optimizer.lr 0.0013078034151928347 +262 20 negative_sampler.num_negs_per_pos 32.0 +262 20 training.batch_size 0.0 +262 21 model.embedding_dim 2.0 +262 21 loss.margin 5.929951169098994 +262 21 loss.adversarial_temperature 0.4308472125689178 +262 21 optimizer.lr 0.0019974400846184026 +262 21 negative_sampler.num_negs_per_pos 87.0 +262 21 training.batch_size 2.0 +262 22 model.embedding_dim 2.0 +262 22 loss.margin 13.588559942068526 +262 22 loss.adversarial_temperature 0.6995872124050404 +262 22 optimizer.lr 0.06346026158265723 +262 22 negative_sampler.num_negs_per_pos 44.0 +262 22 training.batch_size 1.0 +262 23 model.embedding_dim 2.0 +262 23 loss.margin 10.640783639340908 +262 23 loss.adversarial_temperature 0.8334893705594666 +262 23 optimizer.lr 0.0038773144906104826 +262 23 negative_sampler.num_negs_per_pos 51.0 +262 23 training.batch_size 1.0 +262 24 model.embedding_dim 0.0 +262 24 loss.margin 14.821664469853392 +262 24 loss.adversarial_temperature 0.1463815540406066 +262 24 optimizer.lr 0.050448572347830255 +262 24 negative_sampler.num_negs_per_pos 27.0 +262 24 training.batch_size 0.0 +262 25 model.embedding_dim 1.0 +262 25 loss.margin 5.128274164442364 +262 25 loss.adversarial_temperature 0.3011569499663944 +262 25 optimizer.lr 0.02967652644906825 +262 25 negative_sampler.num_negs_per_pos 51.0 +262 25 training.batch_size 0.0 +262 26 model.embedding_dim 0.0 +262 26 loss.margin 22.21625230289748 +262 26 loss.adversarial_temperature 0.7073039291083597 +262 26 optimizer.lr 0.013865672864745252 +262 26 negative_sampler.num_negs_per_pos 68.0 +262 26 training.batch_size 0.0 +262 27 model.embedding_dim 0.0 +262 27 loss.margin 12.235602965416824 +262 27 loss.adversarial_temperature 0.16274258973580877 +262 27 optimizer.lr 0.016565793119346986 +262 27 negative_sampler.num_negs_per_pos 26.0 +262 27 training.batch_size 2.0 +262 28 model.embedding_dim 2.0 +262 28 loss.margin 10.643191485634423 +262 28 loss.adversarial_temperature 0.9488118686838518 +262 28 optimizer.lr 0.002644259282067706 +262 28 negative_sampler.num_negs_per_pos 64.0 +262 28 training.batch_size 1.0 +262 29 model.embedding_dim 0.0 +262 29 loss.margin 9.266070323103696 +262 29 loss.adversarial_temperature 0.5704248562575178 +262 29 optimizer.lr 0.03666555882135994 +262 29 negative_sampler.num_negs_per_pos 81.0 +262 29 training.batch_size 2.0 +262 30 model.embedding_dim 0.0 +262 30 loss.margin 1.4126515530184 +262 30 loss.adversarial_temperature 0.763733372281083 +262 30 optimizer.lr 0.07922335323755433 +262 30 negative_sampler.num_negs_per_pos 37.0 +262 30 training.batch_size 0.0 +262 31 model.embedding_dim 1.0 +262 31 loss.margin 27.31421258708699 +262 31 loss.adversarial_temperature 0.22531758375740457 +262 31 optimizer.lr 0.01371501265741866 +262 31 negative_sampler.num_negs_per_pos 54.0 +262 31 training.batch_size 1.0 +262 32 model.embedding_dim 0.0 +262 32 loss.margin 20.974421430535685 +262 32 loss.adversarial_temperature 0.4156569533568805 +262 32 optimizer.lr 0.0013823886594852507 +262 32 negative_sampler.num_negs_per_pos 92.0 +262 32 training.batch_size 1.0 +262 33 model.embedding_dim 2.0 +262 33 loss.margin 24.41327937728834 +262 33 loss.adversarial_temperature 0.515614615306646 +262 33 optimizer.lr 0.003327868047633044 +262 33 negative_sampler.num_negs_per_pos 14.0 +262 33 training.batch_size 1.0 +262 34 model.embedding_dim 2.0 +262 34 loss.margin 23.024682204297363 +262 34 loss.adversarial_temperature 0.23402070624219135 +262 34 optimizer.lr 0.02228935980491015 +262 34 negative_sampler.num_negs_per_pos 22.0 +262 34 training.batch_size 1.0 +262 35 model.embedding_dim 2.0 +262 35 loss.margin 16.16254840737577 +262 35 loss.adversarial_temperature 0.5644612421750991 +262 35 optimizer.lr 0.002405336101006175 +262 35 negative_sampler.num_negs_per_pos 32.0 +262 35 training.batch_size 2.0 +262 36 model.embedding_dim 0.0 +262 36 loss.margin 28.899274411059196 +262 36 loss.adversarial_temperature 0.3973257937987976 +262 36 optimizer.lr 0.001382773451918326 +262 36 negative_sampler.num_negs_per_pos 50.0 +262 36 training.batch_size 1.0 +262 37 model.embedding_dim 0.0 +262 37 loss.margin 19.423085865190416 +262 37 loss.adversarial_temperature 0.5665709506762888 +262 37 optimizer.lr 0.013661465445995334 +262 37 negative_sampler.num_negs_per_pos 97.0 +262 37 training.batch_size 2.0 +262 38 model.embedding_dim 1.0 +262 38 loss.margin 20.743312283532795 +262 38 loss.adversarial_temperature 0.9353133180591573 +262 38 optimizer.lr 0.03882270332799931 +262 38 negative_sampler.num_negs_per_pos 51.0 +262 38 training.batch_size 0.0 +262 39 model.embedding_dim 1.0 +262 39 loss.margin 6.0320408226534115 +262 39 loss.adversarial_temperature 0.8288249670459488 +262 39 optimizer.lr 0.04008273766879792 +262 39 negative_sampler.num_negs_per_pos 70.0 +262 39 training.batch_size 2.0 +262 40 model.embedding_dim 1.0 +262 40 loss.margin 13.310137807248989 +262 40 loss.adversarial_temperature 0.702469010111243 +262 40 optimizer.lr 0.005616633533879 +262 40 negative_sampler.num_negs_per_pos 6.0 +262 40 training.batch_size 0.0 +262 41 model.embedding_dim 0.0 +262 41 loss.margin 14.689248865072651 +262 41 loss.adversarial_temperature 0.31969538659784535 +262 41 optimizer.lr 0.04249973083653775 +262 41 negative_sampler.num_negs_per_pos 2.0 +262 41 training.batch_size 0.0 +262 42 model.embedding_dim 1.0 +262 42 loss.margin 28.41470691763055 +262 42 loss.adversarial_temperature 0.5185509812955376 +262 42 optimizer.lr 0.0294721567330459 +262 42 negative_sampler.num_negs_per_pos 36.0 +262 42 training.batch_size 2.0 +262 43 model.embedding_dim 2.0 +262 43 loss.margin 26.74033370839346 +262 43 loss.adversarial_temperature 0.6838628340299926 +262 43 optimizer.lr 0.020153353561882865 +262 43 negative_sampler.num_negs_per_pos 67.0 +262 43 training.batch_size 0.0 +262 44 model.embedding_dim 2.0 +262 44 loss.margin 22.70977996290748 +262 44 loss.adversarial_temperature 0.23674618266418834 +262 44 optimizer.lr 0.01141420912686408 +262 44 negative_sampler.num_negs_per_pos 15.0 +262 44 training.batch_size 1.0 +262 45 model.embedding_dim 2.0 +262 45 loss.margin 22.743549369720668 +262 45 loss.adversarial_temperature 0.36235838616246674 +262 45 optimizer.lr 0.01665642386334587 +262 45 negative_sampler.num_negs_per_pos 56.0 +262 45 training.batch_size 2.0 +262 46 model.embedding_dim 0.0 +262 46 loss.margin 17.368474200293353 +262 46 loss.adversarial_temperature 0.25912618758899597 +262 46 optimizer.lr 0.011646257367509546 +262 46 negative_sampler.num_negs_per_pos 62.0 +262 46 training.batch_size 0.0 +262 47 model.embedding_dim 1.0 +262 47 loss.margin 13.804020246640526 +262 47 loss.adversarial_temperature 0.3367751838501356 +262 47 optimizer.lr 0.018540051174312826 +262 47 negative_sampler.num_negs_per_pos 71.0 +262 47 training.batch_size 2.0 +262 48 model.embedding_dim 1.0 +262 48 loss.margin 7.026763295364029 +262 48 loss.adversarial_temperature 0.9318147501731853 +262 48 optimizer.lr 0.007415460832203365 +262 48 negative_sampler.num_negs_per_pos 39.0 +262 48 training.batch_size 0.0 +262 49 model.embedding_dim 2.0 +262 49 loss.margin 26.79650845776127 +262 49 loss.adversarial_temperature 0.6759046839747069 +262 49 optimizer.lr 0.0025772085908811024 +262 49 negative_sampler.num_negs_per_pos 18.0 +262 49 training.batch_size 1.0 +262 50 model.embedding_dim 1.0 +262 50 loss.margin 24.759015007927676 +262 50 loss.adversarial_temperature 0.11948044018165754 +262 50 optimizer.lr 0.04375138222547984 +262 50 negative_sampler.num_negs_per_pos 10.0 +262 50 training.batch_size 2.0 +262 51 model.embedding_dim 2.0 +262 51 loss.margin 10.622034559433349 +262 51 loss.adversarial_temperature 0.2499468345870224 +262 51 optimizer.lr 0.013339391480165257 +262 51 negative_sampler.num_negs_per_pos 14.0 +262 51 training.batch_size 2.0 +262 52 model.embedding_dim 0.0 +262 52 loss.margin 28.68554377615237 +262 52 loss.adversarial_temperature 0.10936054001754596 +262 52 optimizer.lr 0.002844867288846111 +262 52 negative_sampler.num_negs_per_pos 71.0 +262 52 training.batch_size 0.0 +262 53 model.embedding_dim 2.0 +262 53 loss.margin 29.40791647561342 +262 53 loss.adversarial_temperature 0.4573920220089493 +262 53 optimizer.lr 0.06519429617225037 +262 53 negative_sampler.num_negs_per_pos 75.0 +262 53 training.batch_size 1.0 +262 54 model.embedding_dim 1.0 +262 54 loss.margin 11.642090015339624 +262 54 loss.adversarial_temperature 0.2708918659565674 +262 54 optimizer.lr 0.029311235952079132 +262 54 negative_sampler.num_negs_per_pos 71.0 +262 54 training.batch_size 1.0 +262 55 model.embedding_dim 1.0 +262 55 loss.margin 2.6724468642524393 +262 55 loss.adversarial_temperature 0.24951276405013653 +262 55 optimizer.lr 0.014212309843425952 +262 55 negative_sampler.num_negs_per_pos 4.0 +262 55 training.batch_size 2.0 +262 56 model.embedding_dim 2.0 +262 56 loss.margin 9.674954547050561 +262 56 loss.adversarial_temperature 0.9071950598096162 +262 56 optimizer.lr 0.09596861646201238 +262 56 negative_sampler.num_negs_per_pos 8.0 +262 56 training.batch_size 1.0 +262 57 model.embedding_dim 2.0 +262 57 loss.margin 29.950514438324493 +262 57 loss.adversarial_temperature 0.22099046180440218 +262 57 optimizer.lr 0.002608880530511651 +262 57 negative_sampler.num_negs_per_pos 76.0 +262 57 training.batch_size 0.0 +262 58 model.embedding_dim 1.0 +262 58 loss.margin 8.344539827645715 +262 58 loss.adversarial_temperature 0.8607425311873133 +262 58 optimizer.lr 0.07930618443032014 +262 58 negative_sampler.num_negs_per_pos 20.0 +262 58 training.batch_size 1.0 +262 59 model.embedding_dim 0.0 +262 59 loss.margin 9.851691093788286 +262 59 loss.adversarial_temperature 0.43333703679923236 +262 59 optimizer.lr 0.08490683368128199 +262 59 negative_sampler.num_negs_per_pos 19.0 +262 59 training.batch_size 0.0 +262 60 model.embedding_dim 1.0 +262 60 loss.margin 15.736653606883962 +262 60 loss.adversarial_temperature 0.34139536960508127 +262 60 optimizer.lr 0.003303687979653574 +262 60 negative_sampler.num_negs_per_pos 92.0 +262 60 training.batch_size 0.0 +262 61 model.embedding_dim 1.0 +262 61 loss.margin 9.553055848320124 +262 61 loss.adversarial_temperature 0.549761866083151 +262 61 optimizer.lr 0.014546522841727656 +262 61 negative_sampler.num_negs_per_pos 71.0 +262 61 training.batch_size 2.0 +262 62 model.embedding_dim 1.0 +262 62 loss.margin 3.494850835720948 +262 62 loss.adversarial_temperature 0.4507529487203133 +262 62 optimizer.lr 0.004692005852992891 +262 62 negative_sampler.num_negs_per_pos 23.0 +262 62 training.batch_size 0.0 +262 63 model.embedding_dim 2.0 +262 63 loss.margin 13.442443287715854 +262 63 loss.adversarial_temperature 0.41397230798335405 +262 63 optimizer.lr 0.001003787428832899 +262 63 negative_sampler.num_negs_per_pos 36.0 +262 63 training.batch_size 2.0 +262 1 dataset """wn18rr""" +262 1 model """ermlp""" +262 1 loss """nssa""" +262 1 regularizer """no""" +262 1 optimizer """adam""" +262 1 training_loop """owa""" +262 1 negative_sampler """basic""" +262 1 evaluator """rankbased""" +262 2 dataset """wn18rr""" +262 2 model """ermlp""" +262 2 loss """nssa""" +262 2 regularizer """no""" +262 2 optimizer """adam""" +262 2 training_loop """owa""" +262 2 negative_sampler """basic""" +262 2 evaluator """rankbased""" +262 3 dataset """wn18rr""" +262 3 model """ermlp""" +262 3 loss """nssa""" +262 3 regularizer """no""" +262 3 optimizer """adam""" +262 3 training_loop """owa""" +262 3 negative_sampler """basic""" +262 3 evaluator """rankbased""" +262 4 dataset """wn18rr""" +262 4 model """ermlp""" +262 4 loss """nssa""" +262 4 regularizer """no""" +262 4 optimizer """adam""" +262 4 training_loop """owa""" +262 4 negative_sampler """basic""" +262 4 evaluator """rankbased""" +262 5 dataset """wn18rr""" +262 5 model """ermlp""" +262 5 loss """nssa""" +262 5 regularizer """no""" +262 5 optimizer """adam""" +262 5 training_loop """owa""" +262 5 negative_sampler """basic""" +262 5 evaluator """rankbased""" +262 6 dataset """wn18rr""" +262 6 model """ermlp""" +262 6 loss """nssa""" +262 6 regularizer """no""" +262 6 optimizer """adam""" +262 6 training_loop """owa""" +262 6 negative_sampler """basic""" +262 6 evaluator """rankbased""" +262 7 dataset """wn18rr""" +262 7 model """ermlp""" +262 7 loss """nssa""" +262 7 regularizer """no""" +262 7 optimizer """adam""" +262 7 training_loop """owa""" +262 7 negative_sampler """basic""" +262 7 evaluator """rankbased""" +262 8 dataset """wn18rr""" +262 8 model """ermlp""" +262 8 loss """nssa""" +262 8 regularizer """no""" +262 8 optimizer """adam""" +262 8 training_loop """owa""" +262 8 negative_sampler """basic""" +262 8 evaluator """rankbased""" +262 9 dataset """wn18rr""" +262 9 model """ermlp""" +262 9 loss """nssa""" +262 9 regularizer """no""" +262 9 optimizer """adam""" +262 9 training_loop """owa""" +262 9 negative_sampler """basic""" +262 9 evaluator """rankbased""" +262 10 dataset """wn18rr""" +262 10 model """ermlp""" +262 10 loss """nssa""" +262 10 regularizer """no""" +262 10 optimizer """adam""" +262 10 training_loop """owa""" +262 10 negative_sampler """basic""" +262 10 evaluator """rankbased""" +262 11 dataset """wn18rr""" +262 11 model """ermlp""" +262 11 loss """nssa""" +262 11 regularizer """no""" +262 11 optimizer """adam""" +262 11 training_loop """owa""" +262 11 negative_sampler """basic""" +262 11 evaluator """rankbased""" +262 12 dataset """wn18rr""" +262 12 model """ermlp""" +262 12 loss """nssa""" +262 12 regularizer """no""" +262 12 optimizer """adam""" +262 12 training_loop """owa""" +262 12 negative_sampler """basic""" +262 12 evaluator """rankbased""" +262 13 dataset """wn18rr""" +262 13 model """ermlp""" +262 13 loss """nssa""" +262 13 regularizer """no""" +262 13 optimizer """adam""" +262 13 training_loop """owa""" +262 13 negative_sampler """basic""" +262 13 evaluator """rankbased""" +262 14 dataset """wn18rr""" +262 14 model """ermlp""" +262 14 loss """nssa""" +262 14 regularizer """no""" +262 14 optimizer """adam""" +262 14 training_loop """owa""" +262 14 negative_sampler """basic""" +262 14 evaluator """rankbased""" +262 15 dataset """wn18rr""" +262 15 model """ermlp""" +262 15 loss """nssa""" +262 15 regularizer """no""" +262 15 optimizer """adam""" +262 15 training_loop """owa""" +262 15 negative_sampler """basic""" +262 15 evaluator """rankbased""" +262 16 dataset """wn18rr""" +262 16 model """ermlp""" +262 16 loss """nssa""" +262 16 regularizer """no""" +262 16 optimizer """adam""" +262 16 training_loop """owa""" +262 16 negative_sampler """basic""" +262 16 evaluator """rankbased""" +262 17 dataset """wn18rr""" +262 17 model """ermlp""" +262 17 loss """nssa""" +262 17 regularizer """no""" +262 17 optimizer """adam""" +262 17 training_loop """owa""" +262 17 negative_sampler """basic""" +262 17 evaluator """rankbased""" +262 18 dataset """wn18rr""" +262 18 model """ermlp""" +262 18 loss """nssa""" +262 18 regularizer """no""" +262 18 optimizer """adam""" +262 18 training_loop """owa""" +262 18 negative_sampler """basic""" +262 18 evaluator """rankbased""" +262 19 dataset """wn18rr""" +262 19 model """ermlp""" +262 19 loss """nssa""" +262 19 regularizer """no""" +262 19 optimizer """adam""" +262 19 training_loop """owa""" +262 19 negative_sampler """basic""" +262 19 evaluator """rankbased""" +262 20 dataset """wn18rr""" +262 20 model """ermlp""" +262 20 loss """nssa""" +262 20 regularizer """no""" +262 20 optimizer """adam""" +262 20 training_loop """owa""" +262 20 negative_sampler """basic""" +262 20 evaluator """rankbased""" +262 21 dataset """wn18rr""" +262 21 model """ermlp""" +262 21 loss """nssa""" +262 21 regularizer """no""" +262 21 optimizer """adam""" +262 21 training_loop """owa""" +262 21 negative_sampler """basic""" +262 21 evaluator """rankbased""" +262 22 dataset """wn18rr""" +262 22 model """ermlp""" +262 22 loss """nssa""" +262 22 regularizer """no""" +262 22 optimizer """adam""" +262 22 training_loop """owa""" +262 22 negative_sampler """basic""" +262 22 evaluator """rankbased""" +262 23 dataset """wn18rr""" +262 23 model """ermlp""" +262 23 loss """nssa""" +262 23 regularizer """no""" +262 23 optimizer """adam""" +262 23 training_loop """owa""" +262 23 negative_sampler """basic""" +262 23 evaluator """rankbased""" +262 24 dataset """wn18rr""" +262 24 model """ermlp""" +262 24 loss """nssa""" +262 24 regularizer """no""" +262 24 optimizer """adam""" +262 24 training_loop """owa""" +262 24 negative_sampler """basic""" +262 24 evaluator """rankbased""" +262 25 dataset """wn18rr""" +262 25 model """ermlp""" +262 25 loss """nssa""" +262 25 regularizer """no""" +262 25 optimizer """adam""" +262 25 training_loop """owa""" +262 25 negative_sampler """basic""" +262 25 evaluator """rankbased""" +262 26 dataset """wn18rr""" +262 26 model """ermlp""" +262 26 loss """nssa""" +262 26 regularizer """no""" +262 26 optimizer """adam""" +262 26 training_loop """owa""" +262 26 negative_sampler """basic""" +262 26 evaluator """rankbased""" +262 27 dataset """wn18rr""" +262 27 model """ermlp""" +262 27 loss """nssa""" +262 27 regularizer """no""" +262 27 optimizer """adam""" +262 27 training_loop """owa""" +262 27 negative_sampler """basic""" +262 27 evaluator """rankbased""" +262 28 dataset """wn18rr""" +262 28 model """ermlp""" +262 28 loss """nssa""" +262 28 regularizer """no""" +262 28 optimizer """adam""" +262 28 training_loop """owa""" +262 28 negative_sampler """basic""" +262 28 evaluator """rankbased""" +262 29 dataset """wn18rr""" +262 29 model """ermlp""" +262 29 loss """nssa""" +262 29 regularizer """no""" +262 29 optimizer """adam""" +262 29 training_loop """owa""" +262 29 negative_sampler """basic""" +262 29 evaluator """rankbased""" +262 30 dataset """wn18rr""" +262 30 model """ermlp""" +262 30 loss """nssa""" +262 30 regularizer """no""" +262 30 optimizer """adam""" +262 30 training_loop """owa""" +262 30 negative_sampler """basic""" +262 30 evaluator """rankbased""" +262 31 dataset """wn18rr""" +262 31 model """ermlp""" +262 31 loss """nssa""" +262 31 regularizer """no""" +262 31 optimizer """adam""" +262 31 training_loop """owa""" +262 31 negative_sampler """basic""" +262 31 evaluator """rankbased""" +262 32 dataset """wn18rr""" +262 32 model """ermlp""" +262 32 loss """nssa""" +262 32 regularizer """no""" +262 32 optimizer """adam""" +262 32 training_loop """owa""" +262 32 negative_sampler """basic""" +262 32 evaluator """rankbased""" +262 33 dataset """wn18rr""" +262 33 model """ermlp""" +262 33 loss """nssa""" +262 33 regularizer """no""" +262 33 optimizer """adam""" +262 33 training_loop """owa""" +262 33 negative_sampler """basic""" +262 33 evaluator """rankbased""" +262 34 dataset """wn18rr""" +262 34 model """ermlp""" +262 34 loss """nssa""" +262 34 regularizer """no""" +262 34 optimizer """adam""" +262 34 training_loop """owa""" +262 34 negative_sampler """basic""" +262 34 evaluator """rankbased""" +262 35 dataset """wn18rr""" +262 35 model """ermlp""" +262 35 loss """nssa""" +262 35 regularizer """no""" +262 35 optimizer """adam""" +262 35 training_loop """owa""" +262 35 negative_sampler """basic""" +262 35 evaluator """rankbased""" +262 36 dataset """wn18rr""" +262 36 model """ermlp""" +262 36 loss """nssa""" +262 36 regularizer """no""" +262 36 optimizer """adam""" +262 36 training_loop """owa""" +262 36 negative_sampler """basic""" +262 36 evaluator """rankbased""" +262 37 dataset """wn18rr""" +262 37 model """ermlp""" +262 37 loss """nssa""" +262 37 regularizer """no""" +262 37 optimizer """adam""" +262 37 training_loop """owa""" +262 37 negative_sampler """basic""" +262 37 evaluator """rankbased""" +262 38 dataset """wn18rr""" +262 38 model """ermlp""" +262 38 loss """nssa""" +262 38 regularizer """no""" +262 38 optimizer """adam""" +262 38 training_loop """owa""" +262 38 negative_sampler """basic""" +262 38 evaluator """rankbased""" +262 39 dataset """wn18rr""" +262 39 model """ermlp""" +262 39 loss """nssa""" +262 39 regularizer """no""" +262 39 optimizer """adam""" +262 39 training_loop """owa""" +262 39 negative_sampler """basic""" +262 39 evaluator """rankbased""" +262 40 dataset """wn18rr""" +262 40 model """ermlp""" +262 40 loss """nssa""" +262 40 regularizer """no""" +262 40 optimizer """adam""" +262 40 training_loop """owa""" +262 40 negative_sampler """basic""" +262 40 evaluator """rankbased""" +262 41 dataset """wn18rr""" +262 41 model """ermlp""" +262 41 loss """nssa""" +262 41 regularizer """no""" +262 41 optimizer """adam""" +262 41 training_loop """owa""" +262 41 negative_sampler """basic""" +262 41 evaluator """rankbased""" +262 42 dataset """wn18rr""" +262 42 model """ermlp""" +262 42 loss """nssa""" +262 42 regularizer """no""" +262 42 optimizer """adam""" +262 42 training_loop """owa""" +262 42 negative_sampler """basic""" +262 42 evaluator """rankbased""" +262 43 dataset """wn18rr""" +262 43 model """ermlp""" +262 43 loss """nssa""" +262 43 regularizer """no""" +262 43 optimizer """adam""" +262 43 training_loop """owa""" +262 43 negative_sampler """basic""" +262 43 evaluator """rankbased""" +262 44 dataset """wn18rr""" +262 44 model """ermlp""" +262 44 loss """nssa""" +262 44 regularizer """no""" +262 44 optimizer """adam""" +262 44 training_loop """owa""" +262 44 negative_sampler """basic""" +262 44 evaluator """rankbased""" +262 45 dataset """wn18rr""" +262 45 model """ermlp""" +262 45 loss """nssa""" +262 45 regularizer """no""" +262 45 optimizer """adam""" +262 45 training_loop """owa""" +262 45 negative_sampler """basic""" +262 45 evaluator """rankbased""" +262 46 dataset """wn18rr""" +262 46 model """ermlp""" +262 46 loss """nssa""" +262 46 regularizer """no""" +262 46 optimizer """adam""" +262 46 training_loop """owa""" +262 46 negative_sampler """basic""" +262 46 evaluator """rankbased""" +262 47 dataset """wn18rr""" +262 47 model """ermlp""" +262 47 loss """nssa""" +262 47 regularizer """no""" +262 47 optimizer """adam""" +262 47 training_loop """owa""" +262 47 negative_sampler """basic""" +262 47 evaluator """rankbased""" +262 48 dataset """wn18rr""" +262 48 model """ermlp""" +262 48 loss """nssa""" +262 48 regularizer """no""" +262 48 optimizer """adam""" +262 48 training_loop """owa""" +262 48 negative_sampler """basic""" +262 48 evaluator """rankbased""" +262 49 dataset """wn18rr""" +262 49 model """ermlp""" +262 49 loss """nssa""" +262 49 regularizer """no""" +262 49 optimizer """adam""" +262 49 training_loop """owa""" +262 49 negative_sampler """basic""" +262 49 evaluator """rankbased""" +262 50 dataset """wn18rr""" +262 50 model """ermlp""" +262 50 loss """nssa""" +262 50 regularizer """no""" +262 50 optimizer """adam""" +262 50 training_loop """owa""" +262 50 negative_sampler """basic""" +262 50 evaluator """rankbased""" +262 51 dataset """wn18rr""" +262 51 model """ermlp""" +262 51 loss """nssa""" +262 51 regularizer """no""" +262 51 optimizer """adam""" +262 51 training_loop """owa""" +262 51 negative_sampler """basic""" +262 51 evaluator """rankbased""" +262 52 dataset """wn18rr""" +262 52 model """ermlp""" +262 52 loss """nssa""" +262 52 regularizer """no""" +262 52 optimizer """adam""" +262 52 training_loop """owa""" +262 52 negative_sampler """basic""" +262 52 evaluator """rankbased""" +262 53 dataset """wn18rr""" +262 53 model """ermlp""" +262 53 loss """nssa""" +262 53 regularizer """no""" +262 53 optimizer """adam""" +262 53 training_loop """owa""" +262 53 negative_sampler """basic""" +262 53 evaluator """rankbased""" +262 54 dataset """wn18rr""" +262 54 model """ermlp""" +262 54 loss """nssa""" +262 54 regularizer """no""" +262 54 optimizer """adam""" +262 54 training_loop """owa""" +262 54 negative_sampler """basic""" +262 54 evaluator """rankbased""" +262 55 dataset """wn18rr""" +262 55 model """ermlp""" +262 55 loss """nssa""" +262 55 regularizer """no""" +262 55 optimizer """adam""" +262 55 training_loop """owa""" +262 55 negative_sampler """basic""" +262 55 evaluator """rankbased""" +262 56 dataset """wn18rr""" +262 56 model """ermlp""" +262 56 loss """nssa""" +262 56 regularizer """no""" +262 56 optimizer """adam""" +262 56 training_loop """owa""" +262 56 negative_sampler """basic""" +262 56 evaluator """rankbased""" +262 57 dataset """wn18rr""" +262 57 model """ermlp""" +262 57 loss """nssa""" +262 57 regularizer """no""" +262 57 optimizer """adam""" +262 57 training_loop """owa""" +262 57 negative_sampler """basic""" +262 57 evaluator """rankbased""" +262 58 dataset """wn18rr""" +262 58 model """ermlp""" +262 58 loss """nssa""" +262 58 regularizer """no""" +262 58 optimizer """adam""" +262 58 training_loop """owa""" +262 58 negative_sampler """basic""" +262 58 evaluator """rankbased""" +262 59 dataset """wn18rr""" +262 59 model """ermlp""" +262 59 loss """nssa""" +262 59 regularizer """no""" +262 59 optimizer """adam""" +262 59 training_loop """owa""" +262 59 negative_sampler """basic""" +262 59 evaluator """rankbased""" +262 60 dataset """wn18rr""" +262 60 model """ermlp""" +262 60 loss """nssa""" +262 60 regularizer """no""" +262 60 optimizer """adam""" +262 60 training_loop """owa""" +262 60 negative_sampler """basic""" +262 60 evaluator """rankbased""" +262 61 dataset """wn18rr""" +262 61 model """ermlp""" +262 61 loss """nssa""" +262 61 regularizer """no""" +262 61 optimizer """adam""" +262 61 training_loop """owa""" +262 61 negative_sampler """basic""" +262 61 evaluator """rankbased""" +262 62 dataset """wn18rr""" +262 62 model """ermlp""" +262 62 loss """nssa""" +262 62 regularizer """no""" +262 62 optimizer """adam""" +262 62 training_loop """owa""" +262 62 negative_sampler """basic""" +262 62 evaluator """rankbased""" +262 63 dataset """wn18rr""" +262 63 model """ermlp""" +262 63 loss """nssa""" +262 63 regularizer """no""" +262 63 optimizer """adam""" +262 63 training_loop """owa""" +262 63 negative_sampler """basic""" +262 63 evaluator """rankbased""" +263 1 model.embedding_dim 0.0 +263 1 loss.margin 2.710540595696613 +263 1 loss.adversarial_temperature 0.3429725472862224 +263 1 optimizer.lr 0.004223228214672629 +263 1 negative_sampler.num_negs_per_pos 91.0 +263 1 training.batch_size 2.0 +263 2 model.embedding_dim 2.0 +263 2 loss.margin 4.663442888995027 +263 2 loss.adversarial_temperature 0.33092806477148545 +263 2 optimizer.lr 0.012496064437934534 +263 2 negative_sampler.num_negs_per_pos 70.0 +263 2 training.batch_size 2.0 +263 3 model.embedding_dim 2.0 +263 3 loss.margin 12.296585511347248 +263 3 loss.adversarial_temperature 0.4217035383393962 +263 3 optimizer.lr 0.004367798555717829 +263 3 negative_sampler.num_negs_per_pos 7.0 +263 3 training.batch_size 2.0 +263 4 model.embedding_dim 1.0 +263 4 loss.margin 2.278059394923984 +263 4 loss.adversarial_temperature 0.9699237024098017 +263 4 optimizer.lr 0.002684081674192747 +263 4 negative_sampler.num_negs_per_pos 28.0 +263 4 training.batch_size 1.0 +263 5 model.embedding_dim 0.0 +263 5 loss.margin 28.947244508757773 +263 5 loss.adversarial_temperature 0.3034374227726865 +263 5 optimizer.lr 0.0021584321300290492 +263 5 negative_sampler.num_negs_per_pos 47.0 +263 5 training.batch_size 2.0 +263 6 model.embedding_dim 1.0 +263 6 loss.margin 9.195848566284429 +263 6 loss.adversarial_temperature 0.7085277942839728 +263 6 optimizer.lr 0.0011763775233445686 +263 6 negative_sampler.num_negs_per_pos 50.0 +263 6 training.batch_size 2.0 +263 7 model.embedding_dim 2.0 +263 7 loss.margin 27.965653156882674 +263 7 loss.adversarial_temperature 0.9638436647864508 +263 7 optimizer.lr 0.0013728536722747294 +263 7 negative_sampler.num_negs_per_pos 80.0 +263 7 training.batch_size 2.0 +263 8 model.embedding_dim 2.0 +263 8 loss.margin 18.26167082502421 +263 8 loss.adversarial_temperature 0.8270074247170566 +263 8 optimizer.lr 0.006005575215976201 +263 8 negative_sampler.num_negs_per_pos 13.0 +263 8 training.batch_size 0.0 +263 9 model.embedding_dim 2.0 +263 9 loss.margin 9.597582292694547 +263 9 loss.adversarial_temperature 0.9042458836714296 +263 9 optimizer.lr 0.01029263966396662 +263 9 negative_sampler.num_negs_per_pos 95.0 +263 9 training.batch_size 2.0 +263 10 model.embedding_dim 1.0 +263 10 loss.margin 28.103976752671667 +263 10 loss.adversarial_temperature 0.4509395557947036 +263 10 optimizer.lr 0.04463007761952735 +263 10 negative_sampler.num_negs_per_pos 30.0 +263 10 training.batch_size 2.0 +263 11 model.embedding_dim 0.0 +263 11 loss.margin 4.144010782170352 +263 11 loss.adversarial_temperature 0.8395063770468264 +263 11 optimizer.lr 0.04476959614376581 +263 11 negative_sampler.num_negs_per_pos 71.0 +263 11 training.batch_size 1.0 +263 12 model.embedding_dim 1.0 +263 12 loss.margin 7.6748325274700635 +263 12 loss.adversarial_temperature 0.4491099922099509 +263 12 optimizer.lr 0.09880195999674872 +263 12 negative_sampler.num_negs_per_pos 20.0 +263 12 training.batch_size 2.0 +263 13 model.embedding_dim 0.0 +263 13 loss.margin 8.183657580333513 +263 13 loss.adversarial_temperature 0.7713506247558526 +263 13 optimizer.lr 0.005166498715151791 +263 13 negative_sampler.num_negs_per_pos 34.0 +263 13 training.batch_size 1.0 +263 14 model.embedding_dim 2.0 +263 14 loss.margin 24.460705422774748 +263 14 loss.adversarial_temperature 0.6137600839733439 +263 14 optimizer.lr 0.00130350960525063 +263 14 negative_sampler.num_negs_per_pos 79.0 +263 14 training.batch_size 1.0 +263 15 model.embedding_dim 2.0 +263 15 loss.margin 9.866442737461572 +263 15 loss.adversarial_temperature 0.4898035755108915 +263 15 optimizer.lr 0.009966599759269048 +263 15 negative_sampler.num_negs_per_pos 85.0 +263 15 training.batch_size 1.0 +263 16 model.embedding_dim 1.0 +263 16 loss.margin 23.644812468953216 +263 16 loss.adversarial_temperature 0.7689861276773723 +263 16 optimizer.lr 0.01868582619217956 +263 16 negative_sampler.num_negs_per_pos 53.0 +263 16 training.batch_size 1.0 +263 17 model.embedding_dim 2.0 +263 17 loss.margin 7.909574906547383 +263 17 loss.adversarial_temperature 0.6924139805542397 +263 17 optimizer.lr 0.019395759760460055 +263 17 negative_sampler.num_negs_per_pos 57.0 +263 17 training.batch_size 0.0 +263 18 model.embedding_dim 1.0 +263 18 loss.margin 25.71423271972391 +263 18 loss.adversarial_temperature 0.7920642362293839 +263 18 optimizer.lr 0.005210411476420673 +263 18 negative_sampler.num_negs_per_pos 21.0 +263 18 training.batch_size 2.0 +263 19 model.embedding_dim 0.0 +263 19 loss.margin 29.50227176969534 +263 19 loss.adversarial_temperature 0.15020213226216328 +263 19 optimizer.lr 0.04701889736718828 +263 19 negative_sampler.num_negs_per_pos 26.0 +263 19 training.batch_size 0.0 +263 20 model.embedding_dim 1.0 +263 20 loss.margin 23.396394491023067 +263 20 loss.adversarial_temperature 0.6936676458015033 +263 20 optimizer.lr 0.0033802337554371923 +263 20 negative_sampler.num_negs_per_pos 95.0 +263 20 training.batch_size 1.0 +263 21 model.embedding_dim 2.0 +263 21 loss.margin 2.804321894481898 +263 21 loss.adversarial_temperature 0.5360806164716466 +263 21 optimizer.lr 0.05382796546140226 +263 21 negative_sampler.num_negs_per_pos 64.0 +263 21 training.batch_size 0.0 +263 22 model.embedding_dim 0.0 +263 22 loss.margin 28.57663205609546 +263 22 loss.adversarial_temperature 0.6157767713351759 +263 22 optimizer.lr 0.008928846746401188 +263 22 negative_sampler.num_negs_per_pos 48.0 +263 22 training.batch_size 2.0 +263 23 model.embedding_dim 2.0 +263 23 loss.margin 19.44115442973344 +263 23 loss.adversarial_temperature 0.13355946569564797 +263 23 optimizer.lr 0.027082678654732396 +263 23 negative_sampler.num_negs_per_pos 6.0 +263 23 training.batch_size 2.0 +263 24 model.embedding_dim 0.0 +263 24 loss.margin 11.857303939180936 +263 24 loss.adversarial_temperature 0.406629761456947 +263 24 optimizer.lr 0.002664616202704805 +263 24 negative_sampler.num_negs_per_pos 77.0 +263 24 training.batch_size 1.0 +263 25 model.embedding_dim 1.0 +263 25 loss.margin 8.897117188226877 +263 25 loss.adversarial_temperature 0.9713806822225726 +263 25 optimizer.lr 0.012800545969820953 +263 25 negative_sampler.num_negs_per_pos 11.0 +263 25 training.batch_size 0.0 +263 26 model.embedding_dim 1.0 +263 26 loss.margin 10.813699960116471 +263 26 loss.adversarial_temperature 0.395373123376736 +263 26 optimizer.lr 0.006436352744426742 +263 26 negative_sampler.num_negs_per_pos 40.0 +263 26 training.batch_size 0.0 +263 27 model.embedding_dim 1.0 +263 27 loss.margin 6.763084268145149 +263 27 loss.adversarial_temperature 0.10600187749283864 +263 27 optimizer.lr 0.017498194360438604 +263 27 negative_sampler.num_negs_per_pos 97.0 +263 27 training.batch_size 0.0 +263 28 model.embedding_dim 0.0 +263 28 loss.margin 19.095524254469506 +263 28 loss.adversarial_temperature 0.9845198436538397 +263 28 optimizer.lr 0.002431804727510379 +263 28 negative_sampler.num_negs_per_pos 27.0 +263 28 training.batch_size 1.0 +263 29 model.embedding_dim 0.0 +263 29 loss.margin 7.8123571497264965 +263 29 loss.adversarial_temperature 0.9236731824954285 +263 29 optimizer.lr 0.0031549251805971954 +263 29 negative_sampler.num_negs_per_pos 92.0 +263 29 training.batch_size 0.0 +263 30 model.embedding_dim 0.0 +263 30 loss.margin 15.747985419987607 +263 30 loss.adversarial_temperature 0.9768452286851534 +263 30 optimizer.lr 0.013905366172841726 +263 30 negative_sampler.num_negs_per_pos 35.0 +263 30 training.batch_size 1.0 +263 31 model.embedding_dim 0.0 +263 31 loss.margin 9.711379542019857 +263 31 loss.adversarial_temperature 0.9920098956138388 +263 31 optimizer.lr 0.0014997166641363465 +263 31 negative_sampler.num_negs_per_pos 10.0 +263 31 training.batch_size 0.0 +263 32 model.embedding_dim 0.0 +263 32 loss.margin 23.46871021076805 +263 32 loss.adversarial_temperature 0.7614814534532783 +263 32 optimizer.lr 0.006564942706235455 +263 32 negative_sampler.num_negs_per_pos 84.0 +263 32 training.batch_size 2.0 +263 33 model.embedding_dim 1.0 +263 33 loss.margin 7.765152362937101 +263 33 loss.adversarial_temperature 0.6023100869129611 +263 33 optimizer.lr 0.004157396679359232 +263 33 negative_sampler.num_negs_per_pos 49.0 +263 33 training.batch_size 2.0 +263 34 model.embedding_dim 2.0 +263 34 loss.margin 17.793886602428337 +263 34 loss.adversarial_temperature 0.6059089310395418 +263 34 optimizer.lr 0.009904218698059082 +263 34 negative_sampler.num_negs_per_pos 61.0 +263 34 training.batch_size 0.0 +263 35 model.embedding_dim 0.0 +263 35 loss.margin 25.588961372252683 +263 35 loss.adversarial_temperature 0.7006114900840165 +263 35 optimizer.lr 0.010238620312683867 +263 35 negative_sampler.num_negs_per_pos 84.0 +263 35 training.batch_size 1.0 +263 36 model.embedding_dim 0.0 +263 36 loss.margin 22.09173283645811 +263 36 loss.adversarial_temperature 0.7982017191982399 +263 36 optimizer.lr 0.03960722948642801 +263 36 negative_sampler.num_negs_per_pos 45.0 +263 36 training.batch_size 0.0 +263 37 model.embedding_dim 0.0 +263 37 loss.margin 22.303470019024637 +263 37 loss.adversarial_temperature 0.14824045852399786 +263 37 optimizer.lr 0.04540830524179899 +263 37 negative_sampler.num_negs_per_pos 47.0 +263 37 training.batch_size 0.0 +263 38 model.embedding_dim 1.0 +263 38 loss.margin 24.243075609853154 +263 38 loss.adversarial_temperature 0.45796632031056145 +263 38 optimizer.lr 0.0011928834733483074 +263 38 negative_sampler.num_negs_per_pos 81.0 +263 38 training.batch_size 0.0 +263 39 model.embedding_dim 1.0 +263 39 loss.margin 24.17701593380899 +263 39 loss.adversarial_temperature 0.6872715969290687 +263 39 optimizer.lr 0.004600113882838185 +263 39 negative_sampler.num_negs_per_pos 44.0 +263 39 training.batch_size 0.0 +263 40 model.embedding_dim 0.0 +263 40 loss.margin 18.232778643963535 +263 40 loss.adversarial_temperature 0.8852293025155424 +263 40 optimizer.lr 0.04706987606640625 +263 40 negative_sampler.num_negs_per_pos 8.0 +263 40 training.batch_size 0.0 +263 41 model.embedding_dim 0.0 +263 41 loss.margin 25.906502752774273 +263 41 loss.adversarial_temperature 0.8696811722362849 +263 41 optimizer.lr 0.001684348814220854 +263 41 negative_sampler.num_negs_per_pos 9.0 +263 41 training.batch_size 1.0 +263 42 model.embedding_dim 2.0 +263 42 loss.margin 7.478092157756018 +263 42 loss.adversarial_temperature 0.9273328012203959 +263 42 optimizer.lr 0.05393360978604566 +263 42 negative_sampler.num_negs_per_pos 32.0 +263 42 training.batch_size 0.0 +263 43 model.embedding_dim 0.0 +263 43 loss.margin 18.291883464798243 +263 43 loss.adversarial_temperature 0.3476392945586881 +263 43 optimizer.lr 0.0011761221180560737 +263 43 negative_sampler.num_negs_per_pos 99.0 +263 43 training.batch_size 1.0 +263 44 model.embedding_dim 0.0 +263 44 loss.margin 23.99758955413241 +263 44 loss.adversarial_temperature 0.5059741639456182 +263 44 optimizer.lr 0.00410441796518054 +263 44 negative_sampler.num_negs_per_pos 91.0 +263 44 training.batch_size 0.0 +263 45 model.embedding_dim 0.0 +263 45 loss.margin 7.49020965288102 +263 45 loss.adversarial_temperature 0.5812320911246858 +263 45 optimizer.lr 0.018025820750203685 +263 45 negative_sampler.num_negs_per_pos 79.0 +263 45 training.batch_size 1.0 +263 46 model.embedding_dim 1.0 +263 46 loss.margin 16.41348417206118 +263 46 loss.adversarial_temperature 0.9953821877513975 +263 46 optimizer.lr 0.0024797250594948885 +263 46 negative_sampler.num_negs_per_pos 92.0 +263 46 training.batch_size 1.0 +263 47 model.embedding_dim 1.0 +263 47 loss.margin 14.025254374510547 +263 47 loss.adversarial_temperature 0.7833394164428112 +263 47 optimizer.lr 0.02598539995988313 +263 47 negative_sampler.num_negs_per_pos 88.0 +263 47 training.batch_size 2.0 +263 48 model.embedding_dim 2.0 +263 48 loss.margin 23.02103651195039 +263 48 loss.adversarial_temperature 0.21773736913816977 +263 48 optimizer.lr 0.0012429255240093804 +263 48 negative_sampler.num_negs_per_pos 82.0 +263 48 training.batch_size 0.0 +263 49 model.embedding_dim 2.0 +263 49 loss.margin 22.81045049789036 +263 49 loss.adversarial_temperature 0.1422971377986774 +263 49 optimizer.lr 0.0028183613185531423 +263 49 negative_sampler.num_negs_per_pos 21.0 +263 49 training.batch_size 2.0 +263 50 model.embedding_dim 0.0 +263 50 loss.margin 22.543130951801736 +263 50 loss.adversarial_temperature 0.15344414721314067 +263 50 optimizer.lr 0.0011634329505579887 +263 50 negative_sampler.num_negs_per_pos 20.0 +263 50 training.batch_size 1.0 +263 51 model.embedding_dim 0.0 +263 51 loss.margin 26.467738708301795 +263 51 loss.adversarial_temperature 0.8887233869359726 +263 51 optimizer.lr 0.04378214674942564 +263 51 negative_sampler.num_negs_per_pos 33.0 +263 51 training.batch_size 1.0 +263 52 model.embedding_dim 0.0 +263 52 loss.margin 29.59835414132458 +263 52 loss.adversarial_temperature 0.7557374541840868 +263 52 optimizer.lr 0.007710729182676124 +263 52 negative_sampler.num_negs_per_pos 88.0 +263 52 training.batch_size 1.0 +263 53 model.embedding_dim 0.0 +263 53 loss.margin 22.534975715903723 +263 53 loss.adversarial_temperature 0.2665201988538159 +263 53 optimizer.lr 0.00212678310174601 +263 53 negative_sampler.num_negs_per_pos 29.0 +263 53 training.batch_size 0.0 +263 54 model.embedding_dim 0.0 +263 54 loss.margin 13.436617958471562 +263 54 loss.adversarial_temperature 0.4379449507795703 +263 54 optimizer.lr 0.024326657707729342 +263 54 negative_sampler.num_negs_per_pos 66.0 +263 54 training.batch_size 1.0 +263 55 model.embedding_dim 2.0 +263 55 loss.margin 23.14702874210073 +263 55 loss.adversarial_temperature 0.8676991290170113 +263 55 optimizer.lr 0.003845118359585928 +263 55 negative_sampler.num_negs_per_pos 94.0 +263 55 training.batch_size 2.0 +263 56 model.embedding_dim 2.0 +263 56 loss.margin 5.654853546389563 +263 56 loss.adversarial_temperature 0.6055256339187015 +263 56 optimizer.lr 0.00634889078573744 +263 56 negative_sampler.num_negs_per_pos 46.0 +263 56 training.batch_size 2.0 +263 57 model.embedding_dim 2.0 +263 57 loss.margin 12.807536659909774 +263 57 loss.adversarial_temperature 0.1890974582479025 +263 57 optimizer.lr 0.06436611546891799 +263 57 negative_sampler.num_negs_per_pos 15.0 +263 57 training.batch_size 0.0 +263 58 model.embedding_dim 1.0 +263 58 loss.margin 20.726382417980062 +263 58 loss.adversarial_temperature 0.34152572692377264 +263 58 optimizer.lr 0.05061203733299209 +263 58 negative_sampler.num_negs_per_pos 31.0 +263 58 training.batch_size 2.0 +263 59 model.embedding_dim 2.0 +263 59 loss.margin 17.923568368524563 +263 59 loss.adversarial_temperature 0.31854797757323067 +263 59 optimizer.lr 0.023219580196989647 +263 59 negative_sampler.num_negs_per_pos 91.0 +263 59 training.batch_size 0.0 +263 60 model.embedding_dim 2.0 +263 60 loss.margin 11.65598348343652 +263 60 loss.adversarial_temperature 0.13814973690801505 +263 60 optimizer.lr 0.002181979296642206 +263 60 negative_sampler.num_negs_per_pos 32.0 +263 60 training.batch_size 2.0 +263 61 model.embedding_dim 2.0 +263 61 loss.margin 21.460938867537514 +263 61 loss.adversarial_temperature 0.3945825403560846 +263 61 optimizer.lr 0.02509358197123944 +263 61 negative_sampler.num_negs_per_pos 56.0 +263 61 training.batch_size 2.0 +263 62 model.embedding_dim 2.0 +263 62 loss.margin 9.000831131743341 +263 62 loss.adversarial_temperature 0.5436246632247287 +263 62 optimizer.lr 0.013376072948265965 +263 62 negative_sampler.num_negs_per_pos 67.0 +263 62 training.batch_size 0.0 +263 63 model.embedding_dim 1.0 +263 63 loss.margin 24.93935056415471 +263 63 loss.adversarial_temperature 0.42427036445366434 +263 63 optimizer.lr 0.008616865537843669 +263 63 negative_sampler.num_negs_per_pos 22.0 +263 63 training.batch_size 1.0 +263 64 model.embedding_dim 1.0 +263 64 loss.margin 26.894592033957878 +263 64 loss.adversarial_temperature 0.3048838838816067 +263 64 optimizer.lr 0.03708732021508438 +263 64 negative_sampler.num_negs_per_pos 25.0 +263 64 training.batch_size 0.0 +263 65 model.embedding_dim 1.0 +263 65 loss.margin 4.27623985209151 +263 65 loss.adversarial_temperature 0.31838043127865717 +263 65 optimizer.lr 0.06594218136291002 +263 65 negative_sampler.num_negs_per_pos 51.0 +263 65 training.batch_size 1.0 +263 66 model.embedding_dim 0.0 +263 66 loss.margin 1.7967424933390053 +263 66 loss.adversarial_temperature 0.8844965088292264 +263 66 optimizer.lr 0.0026385911334386556 +263 66 negative_sampler.num_negs_per_pos 71.0 +263 66 training.batch_size 2.0 +263 67 model.embedding_dim 0.0 +263 67 loss.margin 29.639435826347817 +263 67 loss.adversarial_temperature 0.9413522029645416 +263 67 optimizer.lr 0.05191295079930753 +263 67 negative_sampler.num_negs_per_pos 1.0 +263 67 training.batch_size 1.0 +263 68 model.embedding_dim 2.0 +263 68 loss.margin 20.70931599428321 +263 68 loss.adversarial_temperature 0.48856426119373303 +263 68 optimizer.lr 0.010470221289579468 +263 68 negative_sampler.num_negs_per_pos 4.0 +263 68 training.batch_size 1.0 +263 69 model.embedding_dim 1.0 +263 69 loss.margin 2.279717108779001 +263 69 loss.adversarial_temperature 0.25649728053369075 +263 69 optimizer.lr 0.010824749275638482 +263 69 negative_sampler.num_negs_per_pos 36.0 +263 69 training.batch_size 1.0 +263 70 model.embedding_dim 0.0 +263 70 loss.margin 5.512894066836043 +263 70 loss.adversarial_temperature 0.9619558720936201 +263 70 optimizer.lr 0.0026204583880272223 +263 70 negative_sampler.num_negs_per_pos 86.0 +263 70 training.batch_size 2.0 +263 71 model.embedding_dim 1.0 +263 71 loss.margin 4.3976402609908725 +263 71 loss.adversarial_temperature 0.6831075104203558 +263 71 optimizer.lr 0.005681086939747745 +263 71 negative_sampler.num_negs_per_pos 96.0 +263 71 training.batch_size 2.0 +263 72 model.embedding_dim 2.0 +263 72 loss.margin 25.520822562626385 +263 72 loss.adversarial_temperature 0.28128747228692447 +263 72 optimizer.lr 0.006455087611803129 +263 72 negative_sampler.num_negs_per_pos 66.0 +263 72 training.batch_size 0.0 +263 73 model.embedding_dim 1.0 +263 73 loss.margin 10.437976477790103 +263 73 loss.adversarial_temperature 0.33193662774624666 +263 73 optimizer.lr 0.0068934869863593205 +263 73 negative_sampler.num_negs_per_pos 35.0 +263 73 training.batch_size 2.0 +263 74 model.embedding_dim 2.0 +263 74 loss.margin 14.541077213229753 +263 74 loss.adversarial_temperature 0.6833806683758713 +263 74 optimizer.lr 0.021359521090597362 +263 74 negative_sampler.num_negs_per_pos 75.0 +263 74 training.batch_size 2.0 +263 75 model.embedding_dim 1.0 +263 75 loss.margin 28.816584358606036 +263 75 loss.adversarial_temperature 0.9964890206317765 +263 75 optimizer.lr 0.03570144784109247 +263 75 negative_sampler.num_negs_per_pos 65.0 +263 75 training.batch_size 0.0 +263 76 model.embedding_dim 0.0 +263 76 loss.margin 23.676649513493416 +263 76 loss.adversarial_temperature 0.7260687386135793 +263 76 optimizer.lr 0.008233674858794794 +263 76 negative_sampler.num_negs_per_pos 80.0 +263 76 training.batch_size 2.0 +263 77 model.embedding_dim 0.0 +263 77 loss.margin 26.71841808644643 +263 77 loss.adversarial_temperature 0.4823735121279896 +263 77 optimizer.lr 0.017989850895111573 +263 77 negative_sampler.num_negs_per_pos 88.0 +263 77 training.batch_size 2.0 +263 78 model.embedding_dim 2.0 +263 78 loss.margin 5.4004891879879535 +263 78 loss.adversarial_temperature 0.16732683903340628 +263 78 optimizer.lr 0.001300042508669426 +263 78 negative_sampler.num_negs_per_pos 81.0 +263 78 training.batch_size 0.0 +263 79 model.embedding_dim 1.0 +263 79 loss.margin 21.05335155381045 +263 79 loss.adversarial_temperature 0.6881621754412908 +263 79 optimizer.lr 0.0013310409132093037 +263 79 negative_sampler.num_negs_per_pos 83.0 +263 79 training.batch_size 1.0 +263 80 model.embedding_dim 2.0 +263 80 loss.margin 19.712957542048258 +263 80 loss.adversarial_temperature 0.57340420551831 +263 80 optimizer.lr 0.013349367699466257 +263 80 negative_sampler.num_negs_per_pos 94.0 +263 80 training.batch_size 1.0 +263 81 model.embedding_dim 0.0 +263 81 loss.margin 12.061695100910168 +263 81 loss.adversarial_temperature 0.7978254884428146 +263 81 optimizer.lr 0.0013465758013437884 +263 81 negative_sampler.num_negs_per_pos 22.0 +263 81 training.batch_size 2.0 +263 1 dataset """wn18rr""" +263 1 model """ermlp""" +263 1 loss """nssa""" +263 1 regularizer """no""" +263 1 optimizer """adam""" +263 1 training_loop """owa""" +263 1 negative_sampler """basic""" +263 1 evaluator """rankbased""" +263 2 dataset """wn18rr""" +263 2 model """ermlp""" +263 2 loss """nssa""" +263 2 regularizer """no""" +263 2 optimizer """adam""" +263 2 training_loop """owa""" +263 2 negative_sampler """basic""" +263 2 evaluator """rankbased""" +263 3 dataset """wn18rr""" +263 3 model """ermlp""" +263 3 loss """nssa""" +263 3 regularizer """no""" +263 3 optimizer """adam""" +263 3 training_loop """owa""" +263 3 negative_sampler """basic""" +263 3 evaluator """rankbased""" +263 4 dataset """wn18rr""" +263 4 model """ermlp""" +263 4 loss """nssa""" +263 4 regularizer """no""" +263 4 optimizer """adam""" +263 4 training_loop """owa""" +263 4 negative_sampler """basic""" +263 4 evaluator """rankbased""" +263 5 dataset """wn18rr""" +263 5 model """ermlp""" +263 5 loss """nssa""" +263 5 regularizer """no""" +263 5 optimizer """adam""" +263 5 training_loop """owa""" +263 5 negative_sampler """basic""" +263 5 evaluator """rankbased""" +263 6 dataset """wn18rr""" +263 6 model """ermlp""" +263 6 loss """nssa""" +263 6 regularizer """no""" +263 6 optimizer """adam""" +263 6 training_loop """owa""" +263 6 negative_sampler """basic""" +263 6 evaluator """rankbased""" +263 7 dataset """wn18rr""" +263 7 model """ermlp""" +263 7 loss """nssa""" +263 7 regularizer """no""" +263 7 optimizer """adam""" +263 7 training_loop """owa""" +263 7 negative_sampler """basic""" +263 7 evaluator """rankbased""" +263 8 dataset """wn18rr""" +263 8 model """ermlp""" +263 8 loss """nssa""" +263 8 regularizer """no""" +263 8 optimizer """adam""" +263 8 training_loop """owa""" +263 8 negative_sampler """basic""" +263 8 evaluator """rankbased""" +263 9 dataset """wn18rr""" +263 9 model """ermlp""" +263 9 loss """nssa""" +263 9 regularizer """no""" +263 9 optimizer """adam""" +263 9 training_loop """owa""" +263 9 negative_sampler """basic""" +263 9 evaluator """rankbased""" +263 10 dataset """wn18rr""" +263 10 model """ermlp""" +263 10 loss """nssa""" +263 10 regularizer """no""" +263 10 optimizer """adam""" +263 10 training_loop """owa""" +263 10 negative_sampler """basic""" +263 10 evaluator """rankbased""" +263 11 dataset """wn18rr""" +263 11 model """ermlp""" +263 11 loss """nssa""" +263 11 regularizer """no""" +263 11 optimizer """adam""" +263 11 training_loop """owa""" +263 11 negative_sampler """basic""" +263 11 evaluator """rankbased""" +263 12 dataset """wn18rr""" +263 12 model """ermlp""" +263 12 loss """nssa""" +263 12 regularizer """no""" +263 12 optimizer """adam""" +263 12 training_loop """owa""" +263 12 negative_sampler """basic""" +263 12 evaluator """rankbased""" +263 13 dataset """wn18rr""" +263 13 model """ermlp""" +263 13 loss """nssa""" +263 13 regularizer """no""" +263 13 optimizer """adam""" +263 13 training_loop """owa""" +263 13 negative_sampler """basic""" +263 13 evaluator """rankbased""" +263 14 dataset """wn18rr""" +263 14 model """ermlp""" +263 14 loss """nssa""" +263 14 regularizer """no""" +263 14 optimizer """adam""" +263 14 training_loop """owa""" +263 14 negative_sampler """basic""" +263 14 evaluator """rankbased""" +263 15 dataset """wn18rr""" +263 15 model """ermlp""" +263 15 loss """nssa""" +263 15 regularizer """no""" +263 15 optimizer """adam""" +263 15 training_loop """owa""" +263 15 negative_sampler """basic""" +263 15 evaluator """rankbased""" +263 16 dataset """wn18rr""" +263 16 model """ermlp""" +263 16 loss """nssa""" +263 16 regularizer """no""" +263 16 optimizer """adam""" +263 16 training_loop """owa""" +263 16 negative_sampler """basic""" +263 16 evaluator """rankbased""" +263 17 dataset """wn18rr""" +263 17 model """ermlp""" +263 17 loss """nssa""" +263 17 regularizer """no""" +263 17 optimizer """adam""" +263 17 training_loop """owa""" +263 17 negative_sampler """basic""" +263 17 evaluator """rankbased""" +263 18 dataset """wn18rr""" +263 18 model """ermlp""" +263 18 loss """nssa""" +263 18 regularizer """no""" +263 18 optimizer """adam""" +263 18 training_loop """owa""" +263 18 negative_sampler """basic""" +263 18 evaluator """rankbased""" +263 19 dataset """wn18rr""" +263 19 model """ermlp""" +263 19 loss """nssa""" +263 19 regularizer """no""" +263 19 optimizer """adam""" +263 19 training_loop """owa""" +263 19 negative_sampler """basic""" +263 19 evaluator """rankbased""" +263 20 dataset """wn18rr""" +263 20 model """ermlp""" +263 20 loss """nssa""" +263 20 regularizer """no""" +263 20 optimizer """adam""" +263 20 training_loop """owa""" +263 20 negative_sampler """basic""" +263 20 evaluator """rankbased""" +263 21 dataset """wn18rr""" +263 21 model """ermlp""" +263 21 loss """nssa""" +263 21 regularizer """no""" +263 21 optimizer """adam""" +263 21 training_loop """owa""" +263 21 negative_sampler """basic""" +263 21 evaluator """rankbased""" +263 22 dataset """wn18rr""" +263 22 model """ermlp""" +263 22 loss """nssa""" +263 22 regularizer """no""" +263 22 optimizer """adam""" +263 22 training_loop """owa""" +263 22 negative_sampler """basic""" +263 22 evaluator """rankbased""" +263 23 dataset """wn18rr""" +263 23 model """ermlp""" +263 23 loss """nssa""" +263 23 regularizer """no""" +263 23 optimizer """adam""" +263 23 training_loop """owa""" +263 23 negative_sampler """basic""" +263 23 evaluator """rankbased""" +263 24 dataset """wn18rr""" +263 24 model """ermlp""" +263 24 loss """nssa""" +263 24 regularizer """no""" +263 24 optimizer """adam""" +263 24 training_loop """owa""" +263 24 negative_sampler """basic""" +263 24 evaluator """rankbased""" +263 25 dataset """wn18rr""" +263 25 model """ermlp""" +263 25 loss """nssa""" +263 25 regularizer """no""" +263 25 optimizer """adam""" +263 25 training_loop """owa""" +263 25 negative_sampler """basic""" +263 25 evaluator """rankbased""" +263 26 dataset """wn18rr""" +263 26 model """ermlp""" +263 26 loss """nssa""" +263 26 regularizer """no""" +263 26 optimizer """adam""" +263 26 training_loop """owa""" +263 26 negative_sampler """basic""" +263 26 evaluator """rankbased""" +263 27 dataset """wn18rr""" +263 27 model """ermlp""" +263 27 loss """nssa""" +263 27 regularizer """no""" +263 27 optimizer """adam""" +263 27 training_loop """owa""" +263 27 negative_sampler """basic""" +263 27 evaluator """rankbased""" +263 28 dataset """wn18rr""" +263 28 model """ermlp""" +263 28 loss """nssa""" +263 28 regularizer """no""" +263 28 optimizer """adam""" +263 28 training_loop """owa""" +263 28 negative_sampler """basic""" +263 28 evaluator """rankbased""" +263 29 dataset """wn18rr""" +263 29 model """ermlp""" +263 29 loss """nssa""" +263 29 regularizer """no""" +263 29 optimizer """adam""" +263 29 training_loop """owa""" +263 29 negative_sampler """basic""" +263 29 evaluator """rankbased""" +263 30 dataset """wn18rr""" +263 30 model """ermlp""" +263 30 loss """nssa""" +263 30 regularizer """no""" +263 30 optimizer """adam""" +263 30 training_loop """owa""" +263 30 negative_sampler """basic""" +263 30 evaluator """rankbased""" +263 31 dataset """wn18rr""" +263 31 model """ermlp""" +263 31 loss """nssa""" +263 31 regularizer """no""" +263 31 optimizer """adam""" +263 31 training_loop """owa""" +263 31 negative_sampler """basic""" +263 31 evaluator """rankbased""" +263 32 dataset """wn18rr""" +263 32 model """ermlp""" +263 32 loss """nssa""" +263 32 regularizer """no""" +263 32 optimizer """adam""" +263 32 training_loop """owa""" +263 32 negative_sampler """basic""" +263 32 evaluator """rankbased""" +263 33 dataset """wn18rr""" +263 33 model """ermlp""" +263 33 loss """nssa""" +263 33 regularizer """no""" +263 33 optimizer """adam""" +263 33 training_loop """owa""" +263 33 negative_sampler """basic""" +263 33 evaluator """rankbased""" +263 34 dataset """wn18rr""" +263 34 model """ermlp""" +263 34 loss """nssa""" +263 34 regularizer """no""" +263 34 optimizer """adam""" +263 34 training_loop """owa""" +263 34 negative_sampler """basic""" +263 34 evaluator """rankbased""" +263 35 dataset """wn18rr""" +263 35 model """ermlp""" +263 35 loss """nssa""" +263 35 regularizer """no""" +263 35 optimizer """adam""" +263 35 training_loop """owa""" +263 35 negative_sampler """basic""" +263 35 evaluator """rankbased""" +263 36 dataset """wn18rr""" +263 36 model """ermlp""" +263 36 loss """nssa""" +263 36 regularizer """no""" +263 36 optimizer """adam""" +263 36 training_loop """owa""" +263 36 negative_sampler """basic""" +263 36 evaluator """rankbased""" +263 37 dataset """wn18rr""" +263 37 model """ermlp""" +263 37 loss """nssa""" +263 37 regularizer """no""" +263 37 optimizer """adam""" +263 37 training_loop """owa""" +263 37 negative_sampler """basic""" +263 37 evaluator """rankbased""" +263 38 dataset """wn18rr""" +263 38 model """ermlp""" +263 38 loss """nssa""" +263 38 regularizer """no""" +263 38 optimizer """adam""" +263 38 training_loop """owa""" +263 38 negative_sampler """basic""" +263 38 evaluator """rankbased""" +263 39 dataset """wn18rr""" +263 39 model """ermlp""" +263 39 loss """nssa""" +263 39 regularizer """no""" +263 39 optimizer """adam""" +263 39 training_loop """owa""" +263 39 negative_sampler """basic""" +263 39 evaluator """rankbased""" +263 40 dataset """wn18rr""" +263 40 model """ermlp""" +263 40 loss """nssa""" +263 40 regularizer """no""" +263 40 optimizer """adam""" +263 40 training_loop """owa""" +263 40 negative_sampler """basic""" +263 40 evaluator """rankbased""" +263 41 dataset """wn18rr""" +263 41 model """ermlp""" +263 41 loss """nssa""" +263 41 regularizer """no""" +263 41 optimizer """adam""" +263 41 training_loop """owa""" +263 41 negative_sampler """basic""" +263 41 evaluator """rankbased""" +263 42 dataset """wn18rr""" +263 42 model """ermlp""" +263 42 loss """nssa""" +263 42 regularizer """no""" +263 42 optimizer """adam""" +263 42 training_loop """owa""" +263 42 negative_sampler """basic""" +263 42 evaluator """rankbased""" +263 43 dataset """wn18rr""" +263 43 model """ermlp""" +263 43 loss """nssa""" +263 43 regularizer """no""" +263 43 optimizer """adam""" +263 43 training_loop """owa""" +263 43 negative_sampler """basic""" +263 43 evaluator """rankbased""" +263 44 dataset """wn18rr""" +263 44 model """ermlp""" +263 44 loss """nssa""" +263 44 regularizer """no""" +263 44 optimizer """adam""" +263 44 training_loop """owa""" +263 44 negative_sampler """basic""" +263 44 evaluator """rankbased""" +263 45 dataset """wn18rr""" +263 45 model """ermlp""" +263 45 loss """nssa""" +263 45 regularizer """no""" +263 45 optimizer """adam""" +263 45 training_loop """owa""" +263 45 negative_sampler """basic""" +263 45 evaluator """rankbased""" +263 46 dataset """wn18rr""" +263 46 model """ermlp""" +263 46 loss """nssa""" +263 46 regularizer """no""" +263 46 optimizer """adam""" +263 46 training_loop """owa""" +263 46 negative_sampler """basic""" +263 46 evaluator """rankbased""" +263 47 dataset """wn18rr""" +263 47 model """ermlp""" +263 47 loss """nssa""" +263 47 regularizer """no""" +263 47 optimizer """adam""" +263 47 training_loop """owa""" +263 47 negative_sampler """basic""" +263 47 evaluator """rankbased""" +263 48 dataset """wn18rr""" +263 48 model """ermlp""" +263 48 loss """nssa""" +263 48 regularizer """no""" +263 48 optimizer """adam""" +263 48 training_loop """owa""" +263 48 negative_sampler """basic""" +263 48 evaluator """rankbased""" +263 49 dataset """wn18rr""" +263 49 model """ermlp""" +263 49 loss """nssa""" +263 49 regularizer """no""" +263 49 optimizer """adam""" +263 49 training_loop """owa""" +263 49 negative_sampler """basic""" +263 49 evaluator """rankbased""" +263 50 dataset """wn18rr""" +263 50 model """ermlp""" +263 50 loss """nssa""" +263 50 regularizer """no""" +263 50 optimizer """adam""" +263 50 training_loop """owa""" +263 50 negative_sampler """basic""" +263 50 evaluator """rankbased""" +263 51 dataset """wn18rr""" +263 51 model """ermlp""" +263 51 loss """nssa""" +263 51 regularizer """no""" +263 51 optimizer """adam""" +263 51 training_loop """owa""" +263 51 negative_sampler """basic""" +263 51 evaluator """rankbased""" +263 52 dataset """wn18rr""" +263 52 model """ermlp""" +263 52 loss """nssa""" +263 52 regularizer """no""" +263 52 optimizer """adam""" +263 52 training_loop """owa""" +263 52 negative_sampler """basic""" +263 52 evaluator """rankbased""" +263 53 dataset """wn18rr""" +263 53 model """ermlp""" +263 53 loss """nssa""" +263 53 regularizer """no""" +263 53 optimizer """adam""" +263 53 training_loop """owa""" +263 53 negative_sampler """basic""" +263 53 evaluator """rankbased""" +263 54 dataset """wn18rr""" +263 54 model """ermlp""" +263 54 loss """nssa""" +263 54 regularizer """no""" +263 54 optimizer """adam""" +263 54 training_loop """owa""" +263 54 negative_sampler """basic""" +263 54 evaluator """rankbased""" +263 55 dataset """wn18rr""" +263 55 model """ermlp""" +263 55 loss """nssa""" +263 55 regularizer """no""" +263 55 optimizer """adam""" +263 55 training_loop """owa""" +263 55 negative_sampler """basic""" +263 55 evaluator """rankbased""" +263 56 dataset """wn18rr""" +263 56 model """ermlp""" +263 56 loss """nssa""" +263 56 regularizer """no""" +263 56 optimizer """adam""" +263 56 training_loop """owa""" +263 56 negative_sampler """basic""" +263 56 evaluator """rankbased""" +263 57 dataset """wn18rr""" +263 57 model """ermlp""" +263 57 loss """nssa""" +263 57 regularizer """no""" +263 57 optimizer """adam""" +263 57 training_loop """owa""" +263 57 negative_sampler """basic""" +263 57 evaluator """rankbased""" +263 58 dataset """wn18rr""" +263 58 model """ermlp""" +263 58 loss """nssa""" +263 58 regularizer """no""" +263 58 optimizer """adam""" +263 58 training_loop """owa""" +263 58 negative_sampler """basic""" +263 58 evaluator """rankbased""" +263 59 dataset """wn18rr""" +263 59 model """ermlp""" +263 59 loss """nssa""" +263 59 regularizer """no""" +263 59 optimizer """adam""" +263 59 training_loop """owa""" +263 59 negative_sampler """basic""" +263 59 evaluator """rankbased""" +263 60 dataset """wn18rr""" +263 60 model """ermlp""" +263 60 loss """nssa""" +263 60 regularizer """no""" +263 60 optimizer """adam""" +263 60 training_loop """owa""" +263 60 negative_sampler """basic""" +263 60 evaluator """rankbased""" +263 61 dataset """wn18rr""" +263 61 model """ermlp""" +263 61 loss """nssa""" +263 61 regularizer """no""" +263 61 optimizer """adam""" +263 61 training_loop """owa""" +263 61 negative_sampler """basic""" +263 61 evaluator """rankbased""" +263 62 dataset """wn18rr""" +263 62 model """ermlp""" +263 62 loss """nssa""" +263 62 regularizer """no""" +263 62 optimizer """adam""" +263 62 training_loop """owa""" +263 62 negative_sampler """basic""" +263 62 evaluator """rankbased""" +263 63 dataset """wn18rr""" +263 63 model """ermlp""" +263 63 loss """nssa""" +263 63 regularizer """no""" +263 63 optimizer """adam""" +263 63 training_loop """owa""" +263 63 negative_sampler """basic""" +263 63 evaluator """rankbased""" +263 64 dataset """wn18rr""" +263 64 model """ermlp""" +263 64 loss """nssa""" +263 64 regularizer """no""" +263 64 optimizer """adam""" +263 64 training_loop """owa""" +263 64 negative_sampler """basic""" +263 64 evaluator """rankbased""" +263 65 dataset """wn18rr""" +263 65 model """ermlp""" +263 65 loss """nssa""" +263 65 regularizer """no""" +263 65 optimizer """adam""" +263 65 training_loop """owa""" +263 65 negative_sampler """basic""" +263 65 evaluator """rankbased""" +263 66 dataset """wn18rr""" +263 66 model """ermlp""" +263 66 loss """nssa""" +263 66 regularizer """no""" +263 66 optimizer """adam""" +263 66 training_loop """owa""" +263 66 negative_sampler """basic""" +263 66 evaluator """rankbased""" +263 67 dataset """wn18rr""" +263 67 model """ermlp""" +263 67 loss """nssa""" +263 67 regularizer """no""" +263 67 optimizer """adam""" +263 67 training_loop """owa""" +263 67 negative_sampler """basic""" +263 67 evaluator """rankbased""" +263 68 dataset """wn18rr""" +263 68 model """ermlp""" +263 68 loss """nssa""" +263 68 regularizer """no""" +263 68 optimizer """adam""" +263 68 training_loop """owa""" +263 68 negative_sampler """basic""" +263 68 evaluator """rankbased""" +263 69 dataset """wn18rr""" +263 69 model """ermlp""" +263 69 loss """nssa""" +263 69 regularizer """no""" +263 69 optimizer """adam""" +263 69 training_loop """owa""" +263 69 negative_sampler """basic""" +263 69 evaluator """rankbased""" +263 70 dataset """wn18rr""" +263 70 model """ermlp""" +263 70 loss """nssa""" +263 70 regularizer """no""" +263 70 optimizer """adam""" +263 70 training_loop """owa""" +263 70 negative_sampler """basic""" +263 70 evaluator """rankbased""" +263 71 dataset """wn18rr""" +263 71 model """ermlp""" +263 71 loss """nssa""" +263 71 regularizer """no""" +263 71 optimizer """adam""" +263 71 training_loop """owa""" +263 71 negative_sampler """basic""" +263 71 evaluator """rankbased""" +263 72 dataset """wn18rr""" +263 72 model """ermlp""" +263 72 loss """nssa""" +263 72 regularizer """no""" +263 72 optimizer """adam""" +263 72 training_loop """owa""" +263 72 negative_sampler """basic""" +263 72 evaluator """rankbased""" +263 73 dataset """wn18rr""" +263 73 model """ermlp""" +263 73 loss """nssa""" +263 73 regularizer """no""" +263 73 optimizer """adam""" +263 73 training_loop """owa""" +263 73 negative_sampler """basic""" +263 73 evaluator """rankbased""" +263 74 dataset """wn18rr""" +263 74 model """ermlp""" +263 74 loss """nssa""" +263 74 regularizer """no""" +263 74 optimizer """adam""" +263 74 training_loop """owa""" +263 74 negative_sampler """basic""" +263 74 evaluator """rankbased""" +263 75 dataset """wn18rr""" +263 75 model """ermlp""" +263 75 loss """nssa""" +263 75 regularizer """no""" +263 75 optimizer """adam""" +263 75 training_loop """owa""" +263 75 negative_sampler """basic""" +263 75 evaluator """rankbased""" +263 76 dataset """wn18rr""" +263 76 model """ermlp""" +263 76 loss """nssa""" +263 76 regularizer """no""" +263 76 optimizer """adam""" +263 76 training_loop """owa""" +263 76 negative_sampler """basic""" +263 76 evaluator """rankbased""" +263 77 dataset """wn18rr""" +263 77 model """ermlp""" +263 77 loss """nssa""" +263 77 regularizer """no""" +263 77 optimizer """adam""" +263 77 training_loop """owa""" +263 77 negative_sampler """basic""" +263 77 evaluator """rankbased""" +263 78 dataset """wn18rr""" +263 78 model """ermlp""" +263 78 loss """nssa""" +263 78 regularizer """no""" +263 78 optimizer """adam""" +263 78 training_loop """owa""" +263 78 negative_sampler """basic""" +263 78 evaluator """rankbased""" +263 79 dataset """wn18rr""" +263 79 model """ermlp""" +263 79 loss """nssa""" +263 79 regularizer """no""" +263 79 optimizer """adam""" +263 79 training_loop """owa""" +263 79 negative_sampler """basic""" +263 79 evaluator """rankbased""" +263 80 dataset """wn18rr""" +263 80 model """ermlp""" +263 80 loss """nssa""" +263 80 regularizer """no""" +263 80 optimizer """adam""" +263 80 training_loop """owa""" +263 80 negative_sampler """basic""" +263 80 evaluator """rankbased""" +263 81 dataset """wn18rr""" +263 81 model """ermlp""" +263 81 loss """nssa""" +263 81 regularizer """no""" +263 81 optimizer """adam""" +263 81 training_loop """owa""" +263 81 negative_sampler """basic""" +263 81 evaluator """rankbased""" +264 1 model.embedding_dim 2.0 +264 1 optimizer.lr 0.014246611275204208 +264 1 negative_sampler.num_negs_per_pos 14.0 +264 1 training.batch_size 0.0 +264 2 model.embedding_dim 1.0 +264 2 optimizer.lr 0.009220138633881705 +264 2 negative_sampler.num_negs_per_pos 32.0 +264 2 training.batch_size 2.0 +264 3 model.embedding_dim 1.0 +264 3 optimizer.lr 0.010240992164988378 +264 3 negative_sampler.num_negs_per_pos 57.0 +264 3 training.batch_size 2.0 +264 4 model.embedding_dim 0.0 +264 4 optimizer.lr 0.008721210552733314 +264 4 negative_sampler.num_negs_per_pos 69.0 +264 4 training.batch_size 1.0 +264 5 model.embedding_dim 2.0 +264 5 optimizer.lr 0.0015256868408044955 +264 5 negative_sampler.num_negs_per_pos 46.0 +264 5 training.batch_size 1.0 +264 6 model.embedding_dim 1.0 +264 6 optimizer.lr 0.0013839448997090135 +264 6 negative_sampler.num_negs_per_pos 2.0 +264 6 training.batch_size 2.0 +264 7 model.embedding_dim 0.0 +264 7 optimizer.lr 0.0021329747063123213 +264 7 negative_sampler.num_negs_per_pos 78.0 +264 7 training.batch_size 0.0 +264 8 model.embedding_dim 1.0 +264 8 optimizer.lr 0.015445079191022597 +264 8 negative_sampler.num_negs_per_pos 63.0 +264 8 training.batch_size 0.0 +264 9 model.embedding_dim 1.0 +264 9 optimizer.lr 0.08956332200130201 +264 9 negative_sampler.num_negs_per_pos 45.0 +264 9 training.batch_size 0.0 +264 10 model.embedding_dim 2.0 +264 10 optimizer.lr 0.051804334276714395 +264 10 negative_sampler.num_negs_per_pos 27.0 +264 10 training.batch_size 0.0 +264 11 model.embedding_dim 1.0 +264 11 optimizer.lr 0.055784256630900905 +264 11 negative_sampler.num_negs_per_pos 92.0 +264 11 training.batch_size 1.0 +264 12 model.embedding_dim 2.0 +264 12 optimizer.lr 0.02685028245166627 +264 12 negative_sampler.num_negs_per_pos 36.0 +264 12 training.batch_size 1.0 +264 13 model.embedding_dim 0.0 +264 13 optimizer.lr 0.00798018023937743 +264 13 negative_sampler.num_negs_per_pos 51.0 +264 13 training.batch_size 2.0 +264 14 model.embedding_dim 0.0 +264 14 optimizer.lr 0.004039352563582705 +264 14 negative_sampler.num_negs_per_pos 16.0 +264 14 training.batch_size 1.0 +264 15 model.embedding_dim 2.0 +264 15 optimizer.lr 0.026767348875260312 +264 15 negative_sampler.num_negs_per_pos 59.0 +264 15 training.batch_size 0.0 +264 16 model.embedding_dim 1.0 +264 16 optimizer.lr 0.019388064657300693 +264 16 negative_sampler.num_negs_per_pos 60.0 +264 16 training.batch_size 0.0 +264 17 model.embedding_dim 1.0 +264 17 optimizer.lr 0.01546440459117831 +264 17 negative_sampler.num_negs_per_pos 42.0 +264 17 training.batch_size 1.0 +264 18 model.embedding_dim 0.0 +264 18 optimizer.lr 0.0014335258216767151 +264 18 negative_sampler.num_negs_per_pos 37.0 +264 18 training.batch_size 0.0 +264 19 model.embedding_dim 1.0 +264 19 optimizer.lr 0.09024384731801585 +264 19 negative_sampler.num_negs_per_pos 56.0 +264 19 training.batch_size 1.0 +264 20 model.embedding_dim 2.0 +264 20 optimizer.lr 0.007850469051317826 +264 20 negative_sampler.num_negs_per_pos 52.0 +264 20 training.batch_size 1.0 +264 21 model.embedding_dim 2.0 +264 21 optimizer.lr 0.04639869802053928 +264 21 negative_sampler.num_negs_per_pos 4.0 +264 21 training.batch_size 1.0 +264 22 model.embedding_dim 0.0 +264 22 optimizer.lr 0.00273010246108428 +264 22 negative_sampler.num_negs_per_pos 28.0 +264 22 training.batch_size 1.0 +264 23 model.embedding_dim 2.0 +264 23 optimizer.lr 0.05324570399357409 +264 23 negative_sampler.num_negs_per_pos 1.0 +264 23 training.batch_size 0.0 +264 24 model.embedding_dim 2.0 +264 24 optimizer.lr 0.04661931342124278 +264 24 negative_sampler.num_negs_per_pos 60.0 +264 24 training.batch_size 2.0 +264 25 model.embedding_dim 0.0 +264 25 optimizer.lr 0.012173258862227622 +264 25 negative_sampler.num_negs_per_pos 80.0 +264 25 training.batch_size 0.0 +264 26 model.embedding_dim 2.0 +264 26 optimizer.lr 0.009407753126481814 +264 26 negative_sampler.num_negs_per_pos 79.0 +264 26 training.batch_size 0.0 +264 27 model.embedding_dim 0.0 +264 27 optimizer.lr 0.0011153940936204224 +264 27 negative_sampler.num_negs_per_pos 76.0 +264 27 training.batch_size 1.0 +264 28 model.embedding_dim 2.0 +264 28 optimizer.lr 0.002844044760136086 +264 28 negative_sampler.num_negs_per_pos 47.0 +264 28 training.batch_size 0.0 +264 29 model.embedding_dim 1.0 +264 29 optimizer.lr 0.004063361855957762 +264 29 negative_sampler.num_negs_per_pos 88.0 +264 29 training.batch_size 1.0 +264 30 model.embedding_dim 0.0 +264 30 optimizer.lr 0.039355966042685715 +264 30 negative_sampler.num_negs_per_pos 32.0 +264 30 training.batch_size 1.0 +264 31 model.embedding_dim 0.0 +264 31 optimizer.lr 0.033008376787063184 +264 31 negative_sampler.num_negs_per_pos 26.0 +264 31 training.batch_size 0.0 +264 32 model.embedding_dim 2.0 +264 32 optimizer.lr 0.022429145762818473 +264 32 negative_sampler.num_negs_per_pos 25.0 +264 32 training.batch_size 2.0 +264 33 model.embedding_dim 0.0 +264 33 optimizer.lr 0.02400244394713425 +264 33 negative_sampler.num_negs_per_pos 70.0 +264 33 training.batch_size 1.0 +264 34 model.embedding_dim 0.0 +264 34 optimizer.lr 0.0016997275160613623 +264 34 negative_sampler.num_negs_per_pos 56.0 +264 34 training.batch_size 2.0 +264 35 model.embedding_dim 2.0 +264 35 optimizer.lr 0.04054938361508228 +264 35 negative_sampler.num_negs_per_pos 38.0 +264 35 training.batch_size 2.0 +264 36 model.embedding_dim 1.0 +264 36 optimizer.lr 0.0016185608052892535 +264 36 negative_sampler.num_negs_per_pos 33.0 +264 36 training.batch_size 0.0 +264 37 model.embedding_dim 1.0 +264 37 optimizer.lr 0.0059472207297399885 +264 37 negative_sampler.num_negs_per_pos 3.0 +264 37 training.batch_size 2.0 +264 38 model.embedding_dim 0.0 +264 38 optimizer.lr 0.09300893823899693 +264 38 negative_sampler.num_negs_per_pos 75.0 +264 38 training.batch_size 2.0 +264 39 model.embedding_dim 1.0 +264 39 optimizer.lr 0.027825168165710453 +264 39 negative_sampler.num_negs_per_pos 50.0 +264 39 training.batch_size 1.0 +264 40 model.embedding_dim 0.0 +264 40 optimizer.lr 0.022632763166991896 +264 40 negative_sampler.num_negs_per_pos 90.0 +264 40 training.batch_size 2.0 +264 41 model.embedding_dim 0.0 +264 41 optimizer.lr 0.0011724788974529267 +264 41 negative_sampler.num_negs_per_pos 32.0 +264 41 training.batch_size 0.0 +264 42 model.embedding_dim 1.0 +264 42 optimizer.lr 0.024259955147461362 +264 42 negative_sampler.num_negs_per_pos 61.0 +264 42 training.batch_size 1.0 +264 43 model.embedding_dim 0.0 +264 43 optimizer.lr 0.07262393332708632 +264 43 negative_sampler.num_negs_per_pos 81.0 +264 43 training.batch_size 2.0 +264 44 model.embedding_dim 0.0 +264 44 optimizer.lr 0.03068087532004661 +264 44 negative_sampler.num_negs_per_pos 21.0 +264 44 training.batch_size 2.0 +264 45 model.embedding_dim 2.0 +264 45 optimizer.lr 0.023918826845316467 +264 45 negative_sampler.num_negs_per_pos 71.0 +264 45 training.batch_size 0.0 +264 46 model.embedding_dim 1.0 +264 46 optimizer.lr 0.013105522254022318 +264 46 negative_sampler.num_negs_per_pos 35.0 +264 46 training.batch_size 2.0 +264 47 model.embedding_dim 0.0 +264 47 optimizer.lr 0.009760979108122774 +264 47 negative_sampler.num_negs_per_pos 62.0 +264 47 training.batch_size 2.0 +264 48 model.embedding_dim 1.0 +264 48 optimizer.lr 0.012754350833512507 +264 48 negative_sampler.num_negs_per_pos 16.0 +264 48 training.batch_size 0.0 +264 49 model.embedding_dim 0.0 +264 49 optimizer.lr 0.04396678482571522 +264 49 negative_sampler.num_negs_per_pos 77.0 +264 49 training.batch_size 1.0 +264 50 model.embedding_dim 0.0 +264 50 optimizer.lr 0.007537918089393626 +264 50 negative_sampler.num_negs_per_pos 56.0 +264 50 training.batch_size 0.0 +264 51 model.embedding_dim 0.0 +264 51 optimizer.lr 0.008288175577683863 +264 51 negative_sampler.num_negs_per_pos 34.0 +264 51 training.batch_size 2.0 +264 52 model.embedding_dim 0.0 +264 52 optimizer.lr 0.06790158584395918 +264 52 negative_sampler.num_negs_per_pos 89.0 +264 52 training.batch_size 0.0 +264 53 model.embedding_dim 2.0 +264 53 optimizer.lr 0.00847525706915742 +264 53 negative_sampler.num_negs_per_pos 49.0 +264 53 training.batch_size 2.0 +264 54 model.embedding_dim 0.0 +264 54 optimizer.lr 0.005368088357036944 +264 54 negative_sampler.num_negs_per_pos 93.0 +264 54 training.batch_size 2.0 +264 55 model.embedding_dim 2.0 +264 55 optimizer.lr 0.0028301327555595194 +264 55 negative_sampler.num_negs_per_pos 97.0 +264 55 training.batch_size 1.0 +264 1 dataset """wn18rr""" +264 1 model """ermlp""" +264 1 loss """bceaftersigmoid""" +264 1 regularizer """no""" +264 1 optimizer """adam""" +264 1 training_loop """owa""" +264 1 negative_sampler """basic""" +264 1 evaluator """rankbased""" +264 2 dataset """wn18rr""" +264 2 model """ermlp""" +264 2 loss """bceaftersigmoid""" +264 2 regularizer """no""" +264 2 optimizer """adam""" +264 2 training_loop """owa""" +264 2 negative_sampler """basic""" +264 2 evaluator """rankbased""" +264 3 dataset """wn18rr""" +264 3 model """ermlp""" +264 3 loss """bceaftersigmoid""" +264 3 regularizer """no""" +264 3 optimizer """adam""" +264 3 training_loop """owa""" +264 3 negative_sampler """basic""" +264 3 evaluator """rankbased""" +264 4 dataset """wn18rr""" +264 4 model """ermlp""" +264 4 loss """bceaftersigmoid""" +264 4 regularizer """no""" +264 4 optimizer """adam""" +264 4 training_loop """owa""" +264 4 negative_sampler """basic""" +264 4 evaluator """rankbased""" +264 5 dataset """wn18rr""" +264 5 model """ermlp""" +264 5 loss """bceaftersigmoid""" +264 5 regularizer """no""" +264 5 optimizer """adam""" +264 5 training_loop """owa""" +264 5 negative_sampler """basic""" +264 5 evaluator """rankbased""" +264 6 dataset """wn18rr""" +264 6 model """ermlp""" +264 6 loss """bceaftersigmoid""" +264 6 regularizer """no""" +264 6 optimizer """adam""" +264 6 training_loop """owa""" +264 6 negative_sampler """basic""" +264 6 evaluator """rankbased""" +264 7 dataset """wn18rr""" +264 7 model """ermlp""" +264 7 loss """bceaftersigmoid""" +264 7 regularizer """no""" +264 7 optimizer """adam""" +264 7 training_loop """owa""" +264 7 negative_sampler """basic""" +264 7 evaluator """rankbased""" +264 8 dataset """wn18rr""" +264 8 model """ermlp""" +264 8 loss """bceaftersigmoid""" +264 8 regularizer """no""" +264 8 optimizer """adam""" +264 8 training_loop """owa""" +264 8 negative_sampler """basic""" +264 8 evaluator """rankbased""" +264 9 dataset """wn18rr""" +264 9 model """ermlp""" +264 9 loss """bceaftersigmoid""" +264 9 regularizer """no""" +264 9 optimizer """adam""" +264 9 training_loop """owa""" +264 9 negative_sampler """basic""" +264 9 evaluator """rankbased""" +264 10 dataset """wn18rr""" +264 10 model """ermlp""" +264 10 loss """bceaftersigmoid""" +264 10 regularizer """no""" +264 10 optimizer """adam""" +264 10 training_loop """owa""" +264 10 negative_sampler """basic""" +264 10 evaluator """rankbased""" +264 11 dataset """wn18rr""" +264 11 model """ermlp""" +264 11 loss """bceaftersigmoid""" +264 11 regularizer """no""" +264 11 optimizer """adam""" +264 11 training_loop """owa""" +264 11 negative_sampler """basic""" +264 11 evaluator """rankbased""" +264 12 dataset """wn18rr""" +264 12 model """ermlp""" +264 12 loss """bceaftersigmoid""" +264 12 regularizer """no""" +264 12 optimizer """adam""" +264 12 training_loop """owa""" +264 12 negative_sampler """basic""" +264 12 evaluator """rankbased""" +264 13 dataset """wn18rr""" +264 13 model """ermlp""" +264 13 loss """bceaftersigmoid""" +264 13 regularizer """no""" +264 13 optimizer """adam""" +264 13 training_loop """owa""" +264 13 negative_sampler """basic""" +264 13 evaluator """rankbased""" +264 14 dataset """wn18rr""" +264 14 model """ermlp""" +264 14 loss """bceaftersigmoid""" +264 14 regularizer """no""" +264 14 optimizer """adam""" +264 14 training_loop """owa""" +264 14 negative_sampler """basic""" +264 14 evaluator """rankbased""" +264 15 dataset """wn18rr""" +264 15 model """ermlp""" +264 15 loss """bceaftersigmoid""" +264 15 regularizer """no""" +264 15 optimizer """adam""" +264 15 training_loop """owa""" +264 15 negative_sampler """basic""" +264 15 evaluator """rankbased""" +264 16 dataset """wn18rr""" +264 16 model """ermlp""" +264 16 loss """bceaftersigmoid""" +264 16 regularizer """no""" +264 16 optimizer """adam""" +264 16 training_loop """owa""" +264 16 negative_sampler """basic""" +264 16 evaluator """rankbased""" +264 17 dataset """wn18rr""" +264 17 model """ermlp""" +264 17 loss """bceaftersigmoid""" +264 17 regularizer """no""" +264 17 optimizer """adam""" +264 17 training_loop """owa""" +264 17 negative_sampler """basic""" +264 17 evaluator """rankbased""" +264 18 dataset """wn18rr""" +264 18 model """ermlp""" +264 18 loss """bceaftersigmoid""" +264 18 regularizer """no""" +264 18 optimizer """adam""" +264 18 training_loop """owa""" +264 18 negative_sampler """basic""" +264 18 evaluator """rankbased""" +264 19 dataset """wn18rr""" +264 19 model """ermlp""" +264 19 loss """bceaftersigmoid""" +264 19 regularizer """no""" +264 19 optimizer """adam""" +264 19 training_loop """owa""" +264 19 negative_sampler """basic""" +264 19 evaluator """rankbased""" +264 20 dataset """wn18rr""" +264 20 model """ermlp""" +264 20 loss """bceaftersigmoid""" +264 20 regularizer """no""" +264 20 optimizer """adam""" +264 20 training_loop """owa""" +264 20 negative_sampler """basic""" +264 20 evaluator """rankbased""" +264 21 dataset """wn18rr""" +264 21 model """ermlp""" +264 21 loss """bceaftersigmoid""" +264 21 regularizer """no""" +264 21 optimizer """adam""" +264 21 training_loop """owa""" +264 21 negative_sampler """basic""" +264 21 evaluator """rankbased""" +264 22 dataset """wn18rr""" +264 22 model """ermlp""" +264 22 loss """bceaftersigmoid""" +264 22 regularizer """no""" +264 22 optimizer """adam""" +264 22 training_loop """owa""" +264 22 negative_sampler """basic""" +264 22 evaluator """rankbased""" +264 23 dataset """wn18rr""" +264 23 model """ermlp""" +264 23 loss """bceaftersigmoid""" +264 23 regularizer """no""" +264 23 optimizer """adam""" +264 23 training_loop """owa""" +264 23 negative_sampler """basic""" +264 23 evaluator """rankbased""" +264 24 dataset """wn18rr""" +264 24 model """ermlp""" +264 24 loss """bceaftersigmoid""" +264 24 regularizer """no""" +264 24 optimizer """adam""" +264 24 training_loop """owa""" +264 24 negative_sampler """basic""" +264 24 evaluator """rankbased""" +264 25 dataset """wn18rr""" +264 25 model """ermlp""" +264 25 loss """bceaftersigmoid""" +264 25 regularizer """no""" +264 25 optimizer """adam""" +264 25 training_loop """owa""" +264 25 negative_sampler """basic""" +264 25 evaluator """rankbased""" +264 26 dataset """wn18rr""" +264 26 model """ermlp""" +264 26 loss """bceaftersigmoid""" +264 26 regularizer """no""" +264 26 optimizer """adam""" +264 26 training_loop """owa""" +264 26 negative_sampler """basic""" +264 26 evaluator """rankbased""" +264 27 dataset """wn18rr""" +264 27 model """ermlp""" +264 27 loss """bceaftersigmoid""" +264 27 regularizer """no""" +264 27 optimizer """adam""" +264 27 training_loop """owa""" +264 27 negative_sampler """basic""" +264 27 evaluator """rankbased""" +264 28 dataset """wn18rr""" +264 28 model """ermlp""" +264 28 loss """bceaftersigmoid""" +264 28 regularizer """no""" +264 28 optimizer """adam""" +264 28 training_loop """owa""" +264 28 negative_sampler """basic""" +264 28 evaluator """rankbased""" +264 29 dataset """wn18rr""" +264 29 model """ermlp""" +264 29 loss """bceaftersigmoid""" +264 29 regularizer """no""" +264 29 optimizer """adam""" +264 29 training_loop """owa""" +264 29 negative_sampler """basic""" +264 29 evaluator """rankbased""" +264 30 dataset """wn18rr""" +264 30 model """ermlp""" +264 30 loss """bceaftersigmoid""" +264 30 regularizer """no""" +264 30 optimizer """adam""" +264 30 training_loop """owa""" +264 30 negative_sampler """basic""" +264 30 evaluator """rankbased""" +264 31 dataset """wn18rr""" +264 31 model """ermlp""" +264 31 loss """bceaftersigmoid""" +264 31 regularizer """no""" +264 31 optimizer """adam""" +264 31 training_loop """owa""" +264 31 negative_sampler """basic""" +264 31 evaluator """rankbased""" +264 32 dataset """wn18rr""" +264 32 model """ermlp""" +264 32 loss """bceaftersigmoid""" +264 32 regularizer """no""" +264 32 optimizer """adam""" +264 32 training_loop """owa""" +264 32 negative_sampler """basic""" +264 32 evaluator """rankbased""" +264 33 dataset """wn18rr""" +264 33 model """ermlp""" +264 33 loss """bceaftersigmoid""" +264 33 regularizer """no""" +264 33 optimizer """adam""" +264 33 training_loop """owa""" +264 33 negative_sampler """basic""" +264 33 evaluator """rankbased""" +264 34 dataset """wn18rr""" +264 34 model """ermlp""" +264 34 loss """bceaftersigmoid""" +264 34 regularizer """no""" +264 34 optimizer """adam""" +264 34 training_loop """owa""" +264 34 negative_sampler """basic""" +264 34 evaluator """rankbased""" +264 35 dataset """wn18rr""" +264 35 model """ermlp""" +264 35 loss """bceaftersigmoid""" +264 35 regularizer """no""" +264 35 optimizer """adam""" +264 35 training_loop """owa""" +264 35 negative_sampler """basic""" +264 35 evaluator """rankbased""" +264 36 dataset """wn18rr""" +264 36 model """ermlp""" +264 36 loss """bceaftersigmoid""" +264 36 regularizer """no""" +264 36 optimizer """adam""" +264 36 training_loop """owa""" +264 36 negative_sampler """basic""" +264 36 evaluator """rankbased""" +264 37 dataset """wn18rr""" +264 37 model """ermlp""" +264 37 loss """bceaftersigmoid""" +264 37 regularizer """no""" +264 37 optimizer """adam""" +264 37 training_loop """owa""" +264 37 negative_sampler """basic""" +264 37 evaluator """rankbased""" +264 38 dataset """wn18rr""" +264 38 model """ermlp""" +264 38 loss """bceaftersigmoid""" +264 38 regularizer """no""" +264 38 optimizer """adam""" +264 38 training_loop """owa""" +264 38 negative_sampler """basic""" +264 38 evaluator """rankbased""" +264 39 dataset """wn18rr""" +264 39 model """ermlp""" +264 39 loss """bceaftersigmoid""" +264 39 regularizer """no""" +264 39 optimizer """adam""" +264 39 training_loop """owa""" +264 39 negative_sampler """basic""" +264 39 evaluator """rankbased""" +264 40 dataset """wn18rr""" +264 40 model """ermlp""" +264 40 loss """bceaftersigmoid""" +264 40 regularizer """no""" +264 40 optimizer """adam""" +264 40 training_loop """owa""" +264 40 negative_sampler """basic""" +264 40 evaluator """rankbased""" +264 41 dataset """wn18rr""" +264 41 model """ermlp""" +264 41 loss """bceaftersigmoid""" +264 41 regularizer """no""" +264 41 optimizer """adam""" +264 41 training_loop """owa""" +264 41 negative_sampler """basic""" +264 41 evaluator """rankbased""" +264 42 dataset """wn18rr""" +264 42 model """ermlp""" +264 42 loss """bceaftersigmoid""" +264 42 regularizer """no""" +264 42 optimizer """adam""" +264 42 training_loop """owa""" +264 42 negative_sampler """basic""" +264 42 evaluator """rankbased""" +264 43 dataset """wn18rr""" +264 43 model """ermlp""" +264 43 loss """bceaftersigmoid""" +264 43 regularizer """no""" +264 43 optimizer """adam""" +264 43 training_loop """owa""" +264 43 negative_sampler """basic""" +264 43 evaluator """rankbased""" +264 44 dataset """wn18rr""" +264 44 model """ermlp""" +264 44 loss """bceaftersigmoid""" +264 44 regularizer """no""" +264 44 optimizer """adam""" +264 44 training_loop """owa""" +264 44 negative_sampler """basic""" +264 44 evaluator """rankbased""" +264 45 dataset """wn18rr""" +264 45 model """ermlp""" +264 45 loss """bceaftersigmoid""" +264 45 regularizer """no""" +264 45 optimizer """adam""" +264 45 training_loop """owa""" +264 45 negative_sampler """basic""" +264 45 evaluator """rankbased""" +264 46 dataset """wn18rr""" +264 46 model """ermlp""" +264 46 loss """bceaftersigmoid""" +264 46 regularizer """no""" +264 46 optimizer """adam""" +264 46 training_loop """owa""" +264 46 negative_sampler """basic""" +264 46 evaluator """rankbased""" +264 47 dataset """wn18rr""" +264 47 model """ermlp""" +264 47 loss """bceaftersigmoid""" +264 47 regularizer """no""" +264 47 optimizer """adam""" +264 47 training_loop """owa""" +264 47 negative_sampler """basic""" +264 47 evaluator """rankbased""" +264 48 dataset """wn18rr""" +264 48 model """ermlp""" +264 48 loss """bceaftersigmoid""" +264 48 regularizer """no""" +264 48 optimizer """adam""" +264 48 training_loop """owa""" +264 48 negative_sampler """basic""" +264 48 evaluator """rankbased""" +264 49 dataset """wn18rr""" +264 49 model """ermlp""" +264 49 loss """bceaftersigmoid""" +264 49 regularizer """no""" +264 49 optimizer """adam""" +264 49 training_loop """owa""" +264 49 negative_sampler """basic""" +264 49 evaluator """rankbased""" +264 50 dataset """wn18rr""" +264 50 model """ermlp""" +264 50 loss """bceaftersigmoid""" +264 50 regularizer """no""" +264 50 optimizer """adam""" +264 50 training_loop """owa""" +264 50 negative_sampler """basic""" +264 50 evaluator """rankbased""" +264 51 dataset """wn18rr""" +264 51 model """ermlp""" +264 51 loss """bceaftersigmoid""" +264 51 regularizer """no""" +264 51 optimizer """adam""" +264 51 training_loop """owa""" +264 51 negative_sampler """basic""" +264 51 evaluator """rankbased""" +264 52 dataset """wn18rr""" +264 52 model """ermlp""" +264 52 loss """bceaftersigmoid""" +264 52 regularizer """no""" +264 52 optimizer """adam""" +264 52 training_loop """owa""" +264 52 negative_sampler """basic""" +264 52 evaluator """rankbased""" +264 53 dataset """wn18rr""" +264 53 model """ermlp""" +264 53 loss """bceaftersigmoid""" +264 53 regularizer """no""" +264 53 optimizer """adam""" +264 53 training_loop """owa""" +264 53 negative_sampler """basic""" +264 53 evaluator """rankbased""" +264 54 dataset """wn18rr""" +264 54 model """ermlp""" +264 54 loss """bceaftersigmoid""" +264 54 regularizer """no""" +264 54 optimizer """adam""" +264 54 training_loop """owa""" +264 54 negative_sampler """basic""" +264 54 evaluator """rankbased""" +264 55 dataset """wn18rr""" +264 55 model """ermlp""" +264 55 loss """bceaftersigmoid""" +264 55 regularizer """no""" +264 55 optimizer """adam""" +264 55 training_loop """owa""" +264 55 negative_sampler """basic""" +264 55 evaluator """rankbased""" +265 1 model.embedding_dim 1.0 +265 1 optimizer.lr 0.0030320679085200143 +265 1 negative_sampler.num_negs_per_pos 48.0 +265 1 training.batch_size 0.0 +265 2 model.embedding_dim 2.0 +265 2 optimizer.lr 0.003913957755510532 +265 2 negative_sampler.num_negs_per_pos 52.0 +265 2 training.batch_size 2.0 +265 3 model.embedding_dim 0.0 +265 3 optimizer.lr 0.00409460699295981 +265 3 negative_sampler.num_negs_per_pos 32.0 +265 3 training.batch_size 1.0 +265 4 model.embedding_dim 2.0 +265 4 optimizer.lr 0.00239144780078876 +265 4 negative_sampler.num_negs_per_pos 82.0 +265 4 training.batch_size 2.0 +265 5 model.embedding_dim 0.0 +265 5 optimizer.lr 0.00403366532328504 +265 5 negative_sampler.num_negs_per_pos 97.0 +265 5 training.batch_size 0.0 +265 6 model.embedding_dim 0.0 +265 6 optimizer.lr 0.01232948788418028 +265 6 negative_sampler.num_negs_per_pos 42.0 +265 6 training.batch_size 0.0 +265 7 model.embedding_dim 2.0 +265 7 optimizer.lr 0.017135909295341166 +265 7 negative_sampler.num_negs_per_pos 27.0 +265 7 training.batch_size 2.0 +265 8 model.embedding_dim 1.0 +265 8 optimizer.lr 0.0034189176747151423 +265 8 negative_sampler.num_negs_per_pos 23.0 +265 8 training.batch_size 2.0 +265 9 model.embedding_dim 2.0 +265 9 optimizer.lr 0.006369095043648627 +265 9 negative_sampler.num_negs_per_pos 90.0 +265 9 training.batch_size 2.0 +265 10 model.embedding_dim 1.0 +265 10 optimizer.lr 0.08742330096499354 +265 10 negative_sampler.num_negs_per_pos 96.0 +265 10 training.batch_size 2.0 +265 11 model.embedding_dim 2.0 +265 11 optimizer.lr 0.036451160357824974 +265 11 negative_sampler.num_negs_per_pos 13.0 +265 11 training.batch_size 2.0 +265 12 model.embedding_dim 2.0 +265 12 optimizer.lr 0.04422467346975465 +265 12 negative_sampler.num_negs_per_pos 11.0 +265 12 training.batch_size 0.0 +265 13 model.embedding_dim 0.0 +265 13 optimizer.lr 0.0013019587001596908 +265 13 negative_sampler.num_negs_per_pos 33.0 +265 13 training.batch_size 0.0 +265 14 model.embedding_dim 1.0 +265 14 optimizer.lr 0.0075806264151484495 +265 14 negative_sampler.num_negs_per_pos 76.0 +265 14 training.batch_size 2.0 +265 15 model.embedding_dim 1.0 +265 15 optimizer.lr 0.005831252838367393 +265 15 negative_sampler.num_negs_per_pos 63.0 +265 15 training.batch_size 2.0 +265 16 model.embedding_dim 2.0 +265 16 optimizer.lr 0.0760568325100015 +265 16 negative_sampler.num_negs_per_pos 77.0 +265 16 training.batch_size 0.0 +265 17 model.embedding_dim 2.0 +265 17 optimizer.lr 0.007115152037135865 +265 17 negative_sampler.num_negs_per_pos 21.0 +265 17 training.batch_size 2.0 +265 18 model.embedding_dim 2.0 +265 18 optimizer.lr 0.034642709517057765 +265 18 negative_sampler.num_negs_per_pos 26.0 +265 18 training.batch_size 0.0 +265 19 model.embedding_dim 2.0 +265 19 optimizer.lr 0.001429270717754546 +265 19 negative_sampler.num_negs_per_pos 8.0 +265 19 training.batch_size 1.0 +265 20 model.embedding_dim 0.0 +265 20 optimizer.lr 0.0068113783135821315 +265 20 negative_sampler.num_negs_per_pos 76.0 +265 20 training.batch_size 2.0 +265 21 model.embedding_dim 2.0 +265 21 optimizer.lr 0.07792317428038709 +265 21 negative_sampler.num_negs_per_pos 36.0 +265 21 training.batch_size 1.0 +265 22 model.embedding_dim 2.0 +265 22 optimizer.lr 0.021285134094356478 +265 22 negative_sampler.num_negs_per_pos 14.0 +265 22 training.batch_size 0.0 +265 23 model.embedding_dim 2.0 +265 23 optimizer.lr 0.021508588858670918 +265 23 negative_sampler.num_negs_per_pos 35.0 +265 23 training.batch_size 1.0 +265 24 model.embedding_dim 0.0 +265 24 optimizer.lr 0.001105399865255128 +265 24 negative_sampler.num_negs_per_pos 88.0 +265 24 training.batch_size 1.0 +265 25 model.embedding_dim 2.0 +265 25 optimizer.lr 0.01203831838365533 +265 25 negative_sampler.num_negs_per_pos 25.0 +265 25 training.batch_size 0.0 +265 26 model.embedding_dim 1.0 +265 26 optimizer.lr 0.018361438777119092 +265 26 negative_sampler.num_negs_per_pos 23.0 +265 26 training.batch_size 0.0 +265 27 model.embedding_dim 0.0 +265 27 optimizer.lr 0.0801348700882321 +265 27 negative_sampler.num_negs_per_pos 55.0 +265 27 training.batch_size 2.0 +265 28 model.embedding_dim 0.0 +265 28 optimizer.lr 0.03609022719865608 +265 28 negative_sampler.num_negs_per_pos 56.0 +265 28 training.batch_size 0.0 +265 29 model.embedding_dim 2.0 +265 29 optimizer.lr 0.0041184464064389584 +265 29 negative_sampler.num_negs_per_pos 43.0 +265 29 training.batch_size 2.0 +265 30 model.embedding_dim 0.0 +265 30 optimizer.lr 0.0929363651337967 +265 30 negative_sampler.num_negs_per_pos 47.0 +265 30 training.batch_size 2.0 +265 31 model.embedding_dim 1.0 +265 31 optimizer.lr 0.08834267995158118 +265 31 negative_sampler.num_negs_per_pos 2.0 +265 31 training.batch_size 2.0 +265 32 model.embedding_dim 2.0 +265 32 optimizer.lr 0.019371006391497476 +265 32 negative_sampler.num_negs_per_pos 53.0 +265 32 training.batch_size 1.0 +265 33 model.embedding_dim 2.0 +265 33 optimizer.lr 0.02537524687067183 +265 33 negative_sampler.num_negs_per_pos 14.0 +265 33 training.batch_size 1.0 +265 34 model.embedding_dim 2.0 +265 34 optimizer.lr 0.019804605528998268 +265 34 negative_sampler.num_negs_per_pos 91.0 +265 34 training.batch_size 0.0 +265 35 model.embedding_dim 0.0 +265 35 optimizer.lr 0.03412042718107897 +265 35 negative_sampler.num_negs_per_pos 28.0 +265 35 training.batch_size 0.0 +265 36 model.embedding_dim 0.0 +265 36 optimizer.lr 0.0012527534542396553 +265 36 negative_sampler.num_negs_per_pos 51.0 +265 36 training.batch_size 2.0 +265 37 model.embedding_dim 0.0 +265 37 optimizer.lr 0.08988633646863425 +265 37 negative_sampler.num_negs_per_pos 70.0 +265 37 training.batch_size 2.0 +265 38 model.embedding_dim 2.0 +265 38 optimizer.lr 0.058859155857465734 +265 38 negative_sampler.num_negs_per_pos 22.0 +265 38 training.batch_size 1.0 +265 39 model.embedding_dim 0.0 +265 39 optimizer.lr 0.0017090393255693942 +265 39 negative_sampler.num_negs_per_pos 10.0 +265 39 training.batch_size 0.0 +265 40 model.embedding_dim 0.0 +265 40 optimizer.lr 0.049048879887795556 +265 40 negative_sampler.num_negs_per_pos 53.0 +265 40 training.batch_size 1.0 +265 41 model.embedding_dim 0.0 +265 41 optimizer.lr 0.0841628568474348 +265 41 negative_sampler.num_negs_per_pos 46.0 +265 41 training.batch_size 1.0 +265 42 model.embedding_dim 0.0 +265 42 optimizer.lr 0.0017089597664017866 +265 42 negative_sampler.num_negs_per_pos 35.0 +265 42 training.batch_size 0.0 +265 43 model.embedding_dim 1.0 +265 43 optimizer.lr 0.029024091591750702 +265 43 negative_sampler.num_negs_per_pos 2.0 +265 43 training.batch_size 2.0 +265 44 model.embedding_dim 1.0 +265 44 optimizer.lr 0.0045353882670589535 +265 44 negative_sampler.num_negs_per_pos 88.0 +265 44 training.batch_size 0.0 +265 45 model.embedding_dim 2.0 +265 45 optimizer.lr 0.011624104625476257 +265 45 negative_sampler.num_negs_per_pos 9.0 +265 45 training.batch_size 0.0 +265 46 model.embedding_dim 1.0 +265 46 optimizer.lr 0.09990464511569236 +265 46 negative_sampler.num_negs_per_pos 41.0 +265 46 training.batch_size 1.0 +265 47 model.embedding_dim 2.0 +265 47 optimizer.lr 0.015768317000377997 +265 47 negative_sampler.num_negs_per_pos 20.0 +265 47 training.batch_size 1.0 +265 48 model.embedding_dim 2.0 +265 48 optimizer.lr 0.01605664048821402 +265 48 negative_sampler.num_negs_per_pos 2.0 +265 48 training.batch_size 0.0 +265 49 model.embedding_dim 0.0 +265 49 optimizer.lr 0.02466569087511792 +265 49 negative_sampler.num_negs_per_pos 17.0 +265 49 training.batch_size 0.0 +265 50 model.embedding_dim 1.0 +265 50 optimizer.lr 0.0019726538923923605 +265 50 negative_sampler.num_negs_per_pos 33.0 +265 50 training.batch_size 2.0 +265 1 dataset """wn18rr""" +265 1 model """ermlp""" +265 1 loss """softplus""" +265 1 regularizer """no""" +265 1 optimizer """adam""" +265 1 training_loop """owa""" +265 1 negative_sampler """basic""" +265 1 evaluator """rankbased""" +265 2 dataset """wn18rr""" +265 2 model """ermlp""" +265 2 loss """softplus""" +265 2 regularizer """no""" +265 2 optimizer """adam""" +265 2 training_loop """owa""" +265 2 negative_sampler """basic""" +265 2 evaluator """rankbased""" +265 3 dataset """wn18rr""" +265 3 model """ermlp""" +265 3 loss """softplus""" +265 3 regularizer """no""" +265 3 optimizer """adam""" +265 3 training_loop """owa""" +265 3 negative_sampler """basic""" +265 3 evaluator """rankbased""" +265 4 dataset """wn18rr""" +265 4 model """ermlp""" +265 4 loss """softplus""" +265 4 regularizer """no""" +265 4 optimizer """adam""" +265 4 training_loop """owa""" +265 4 negative_sampler """basic""" +265 4 evaluator """rankbased""" +265 5 dataset """wn18rr""" +265 5 model """ermlp""" +265 5 loss """softplus""" +265 5 regularizer """no""" +265 5 optimizer """adam""" +265 5 training_loop """owa""" +265 5 negative_sampler """basic""" +265 5 evaluator """rankbased""" +265 6 dataset """wn18rr""" +265 6 model """ermlp""" +265 6 loss """softplus""" +265 6 regularizer """no""" +265 6 optimizer """adam""" +265 6 training_loop """owa""" +265 6 negative_sampler """basic""" +265 6 evaluator """rankbased""" +265 7 dataset """wn18rr""" +265 7 model """ermlp""" +265 7 loss """softplus""" +265 7 regularizer """no""" +265 7 optimizer """adam""" +265 7 training_loop """owa""" +265 7 negative_sampler """basic""" +265 7 evaluator """rankbased""" +265 8 dataset """wn18rr""" +265 8 model """ermlp""" +265 8 loss """softplus""" +265 8 regularizer """no""" +265 8 optimizer """adam""" +265 8 training_loop """owa""" +265 8 negative_sampler """basic""" +265 8 evaluator """rankbased""" +265 9 dataset """wn18rr""" +265 9 model """ermlp""" +265 9 loss """softplus""" +265 9 regularizer """no""" +265 9 optimizer """adam""" +265 9 training_loop """owa""" +265 9 negative_sampler """basic""" +265 9 evaluator """rankbased""" +265 10 dataset """wn18rr""" +265 10 model """ermlp""" +265 10 loss """softplus""" +265 10 regularizer """no""" +265 10 optimizer """adam""" +265 10 training_loop """owa""" +265 10 negative_sampler """basic""" +265 10 evaluator """rankbased""" +265 11 dataset """wn18rr""" +265 11 model """ermlp""" +265 11 loss """softplus""" +265 11 regularizer """no""" +265 11 optimizer """adam""" +265 11 training_loop """owa""" +265 11 negative_sampler """basic""" +265 11 evaluator """rankbased""" +265 12 dataset """wn18rr""" +265 12 model """ermlp""" +265 12 loss """softplus""" +265 12 regularizer """no""" +265 12 optimizer """adam""" +265 12 training_loop """owa""" +265 12 negative_sampler """basic""" +265 12 evaluator """rankbased""" +265 13 dataset """wn18rr""" +265 13 model """ermlp""" +265 13 loss """softplus""" +265 13 regularizer """no""" +265 13 optimizer """adam""" +265 13 training_loop """owa""" +265 13 negative_sampler """basic""" +265 13 evaluator """rankbased""" +265 14 dataset """wn18rr""" +265 14 model """ermlp""" +265 14 loss """softplus""" +265 14 regularizer """no""" +265 14 optimizer """adam""" +265 14 training_loop """owa""" +265 14 negative_sampler """basic""" +265 14 evaluator """rankbased""" +265 15 dataset """wn18rr""" +265 15 model """ermlp""" +265 15 loss """softplus""" +265 15 regularizer """no""" +265 15 optimizer """adam""" +265 15 training_loop """owa""" +265 15 negative_sampler """basic""" +265 15 evaluator """rankbased""" +265 16 dataset """wn18rr""" +265 16 model """ermlp""" +265 16 loss """softplus""" +265 16 regularizer """no""" +265 16 optimizer """adam""" +265 16 training_loop """owa""" +265 16 negative_sampler """basic""" +265 16 evaluator """rankbased""" +265 17 dataset """wn18rr""" +265 17 model """ermlp""" +265 17 loss """softplus""" +265 17 regularizer """no""" +265 17 optimizer """adam""" +265 17 training_loop """owa""" +265 17 negative_sampler """basic""" +265 17 evaluator """rankbased""" +265 18 dataset """wn18rr""" +265 18 model """ermlp""" +265 18 loss """softplus""" +265 18 regularizer """no""" +265 18 optimizer """adam""" +265 18 training_loop """owa""" +265 18 negative_sampler """basic""" +265 18 evaluator """rankbased""" +265 19 dataset """wn18rr""" +265 19 model """ermlp""" +265 19 loss """softplus""" +265 19 regularizer """no""" +265 19 optimizer """adam""" +265 19 training_loop """owa""" +265 19 negative_sampler """basic""" +265 19 evaluator """rankbased""" +265 20 dataset """wn18rr""" +265 20 model """ermlp""" +265 20 loss """softplus""" +265 20 regularizer """no""" +265 20 optimizer """adam""" +265 20 training_loop """owa""" +265 20 negative_sampler """basic""" +265 20 evaluator """rankbased""" +265 21 dataset """wn18rr""" +265 21 model """ermlp""" +265 21 loss """softplus""" +265 21 regularizer """no""" +265 21 optimizer """adam""" +265 21 training_loop """owa""" +265 21 negative_sampler """basic""" +265 21 evaluator """rankbased""" +265 22 dataset """wn18rr""" +265 22 model """ermlp""" +265 22 loss """softplus""" +265 22 regularizer """no""" +265 22 optimizer """adam""" +265 22 training_loop """owa""" +265 22 negative_sampler """basic""" +265 22 evaluator """rankbased""" +265 23 dataset """wn18rr""" +265 23 model """ermlp""" +265 23 loss """softplus""" +265 23 regularizer """no""" +265 23 optimizer """adam""" +265 23 training_loop """owa""" +265 23 negative_sampler """basic""" +265 23 evaluator """rankbased""" +265 24 dataset """wn18rr""" +265 24 model """ermlp""" +265 24 loss """softplus""" +265 24 regularizer """no""" +265 24 optimizer """adam""" +265 24 training_loop """owa""" +265 24 negative_sampler """basic""" +265 24 evaluator """rankbased""" +265 25 dataset """wn18rr""" +265 25 model """ermlp""" +265 25 loss """softplus""" +265 25 regularizer """no""" +265 25 optimizer """adam""" +265 25 training_loop """owa""" +265 25 negative_sampler """basic""" +265 25 evaluator """rankbased""" +265 26 dataset """wn18rr""" +265 26 model """ermlp""" +265 26 loss """softplus""" +265 26 regularizer """no""" +265 26 optimizer """adam""" +265 26 training_loop """owa""" +265 26 negative_sampler """basic""" +265 26 evaluator """rankbased""" +265 27 dataset """wn18rr""" +265 27 model """ermlp""" +265 27 loss """softplus""" +265 27 regularizer """no""" +265 27 optimizer """adam""" +265 27 training_loop """owa""" +265 27 negative_sampler """basic""" +265 27 evaluator """rankbased""" +265 28 dataset """wn18rr""" +265 28 model """ermlp""" +265 28 loss """softplus""" +265 28 regularizer """no""" +265 28 optimizer """adam""" +265 28 training_loop """owa""" +265 28 negative_sampler """basic""" +265 28 evaluator """rankbased""" +265 29 dataset """wn18rr""" +265 29 model """ermlp""" +265 29 loss """softplus""" +265 29 regularizer """no""" +265 29 optimizer """adam""" +265 29 training_loop """owa""" +265 29 negative_sampler """basic""" +265 29 evaluator """rankbased""" +265 30 dataset """wn18rr""" +265 30 model """ermlp""" +265 30 loss """softplus""" +265 30 regularizer """no""" +265 30 optimizer """adam""" +265 30 training_loop """owa""" +265 30 negative_sampler """basic""" +265 30 evaluator """rankbased""" +265 31 dataset """wn18rr""" +265 31 model """ermlp""" +265 31 loss """softplus""" +265 31 regularizer """no""" +265 31 optimizer """adam""" +265 31 training_loop """owa""" +265 31 negative_sampler """basic""" +265 31 evaluator """rankbased""" +265 32 dataset """wn18rr""" +265 32 model """ermlp""" +265 32 loss """softplus""" +265 32 regularizer """no""" +265 32 optimizer """adam""" +265 32 training_loop """owa""" +265 32 negative_sampler """basic""" +265 32 evaluator """rankbased""" +265 33 dataset """wn18rr""" +265 33 model """ermlp""" +265 33 loss """softplus""" +265 33 regularizer """no""" +265 33 optimizer """adam""" +265 33 training_loop """owa""" +265 33 negative_sampler """basic""" +265 33 evaluator """rankbased""" +265 34 dataset """wn18rr""" +265 34 model """ermlp""" +265 34 loss """softplus""" +265 34 regularizer """no""" +265 34 optimizer """adam""" +265 34 training_loop """owa""" +265 34 negative_sampler """basic""" +265 34 evaluator """rankbased""" +265 35 dataset """wn18rr""" +265 35 model """ermlp""" +265 35 loss """softplus""" +265 35 regularizer """no""" +265 35 optimizer """adam""" +265 35 training_loop """owa""" +265 35 negative_sampler """basic""" +265 35 evaluator """rankbased""" +265 36 dataset """wn18rr""" +265 36 model """ermlp""" +265 36 loss """softplus""" +265 36 regularizer """no""" +265 36 optimizer """adam""" +265 36 training_loop """owa""" +265 36 negative_sampler """basic""" +265 36 evaluator """rankbased""" +265 37 dataset """wn18rr""" +265 37 model """ermlp""" +265 37 loss """softplus""" +265 37 regularizer """no""" +265 37 optimizer """adam""" +265 37 training_loop """owa""" +265 37 negative_sampler """basic""" +265 37 evaluator """rankbased""" +265 38 dataset """wn18rr""" +265 38 model """ermlp""" +265 38 loss """softplus""" +265 38 regularizer """no""" +265 38 optimizer """adam""" +265 38 training_loop """owa""" +265 38 negative_sampler """basic""" +265 38 evaluator """rankbased""" +265 39 dataset """wn18rr""" +265 39 model """ermlp""" +265 39 loss """softplus""" +265 39 regularizer """no""" +265 39 optimizer """adam""" +265 39 training_loop """owa""" +265 39 negative_sampler """basic""" +265 39 evaluator """rankbased""" +265 40 dataset """wn18rr""" +265 40 model """ermlp""" +265 40 loss """softplus""" +265 40 regularizer """no""" +265 40 optimizer """adam""" +265 40 training_loop """owa""" +265 40 negative_sampler """basic""" +265 40 evaluator """rankbased""" +265 41 dataset """wn18rr""" +265 41 model """ermlp""" +265 41 loss """softplus""" +265 41 regularizer """no""" +265 41 optimizer """adam""" +265 41 training_loop """owa""" +265 41 negative_sampler """basic""" +265 41 evaluator """rankbased""" +265 42 dataset """wn18rr""" +265 42 model """ermlp""" +265 42 loss """softplus""" +265 42 regularizer """no""" +265 42 optimizer """adam""" +265 42 training_loop """owa""" +265 42 negative_sampler """basic""" +265 42 evaluator """rankbased""" +265 43 dataset """wn18rr""" +265 43 model """ermlp""" +265 43 loss """softplus""" +265 43 regularizer """no""" +265 43 optimizer """adam""" +265 43 training_loop """owa""" +265 43 negative_sampler """basic""" +265 43 evaluator """rankbased""" +265 44 dataset """wn18rr""" +265 44 model """ermlp""" +265 44 loss """softplus""" +265 44 regularizer """no""" +265 44 optimizer """adam""" +265 44 training_loop """owa""" +265 44 negative_sampler """basic""" +265 44 evaluator """rankbased""" +265 45 dataset """wn18rr""" +265 45 model """ermlp""" +265 45 loss """softplus""" +265 45 regularizer """no""" +265 45 optimizer """adam""" +265 45 training_loop """owa""" +265 45 negative_sampler """basic""" +265 45 evaluator """rankbased""" +265 46 dataset """wn18rr""" +265 46 model """ermlp""" +265 46 loss """softplus""" +265 46 regularizer """no""" +265 46 optimizer """adam""" +265 46 training_loop """owa""" +265 46 negative_sampler """basic""" +265 46 evaluator """rankbased""" +265 47 dataset """wn18rr""" +265 47 model """ermlp""" +265 47 loss """softplus""" +265 47 regularizer """no""" +265 47 optimizer """adam""" +265 47 training_loop """owa""" +265 47 negative_sampler """basic""" +265 47 evaluator """rankbased""" +265 48 dataset """wn18rr""" +265 48 model """ermlp""" +265 48 loss """softplus""" +265 48 regularizer """no""" +265 48 optimizer """adam""" +265 48 training_loop """owa""" +265 48 negative_sampler """basic""" +265 48 evaluator """rankbased""" +265 49 dataset """wn18rr""" +265 49 model """ermlp""" +265 49 loss """softplus""" +265 49 regularizer """no""" +265 49 optimizer """adam""" +265 49 training_loop """owa""" +265 49 negative_sampler """basic""" +265 49 evaluator """rankbased""" +265 50 dataset """wn18rr""" +265 50 model """ermlp""" +265 50 loss """softplus""" +265 50 regularizer """no""" +265 50 optimizer """adam""" +265 50 training_loop """owa""" +265 50 negative_sampler """basic""" +265 50 evaluator """rankbased""" +266 1 model.embedding_dim 1.0 +266 1 optimizer.lr 0.017036065624567012 +266 1 negative_sampler.num_negs_per_pos 71.0 +266 1 training.batch_size 2.0 +266 2 model.embedding_dim 1.0 +266 2 optimizer.lr 0.025126460951758963 +266 2 negative_sampler.num_negs_per_pos 65.0 +266 2 training.batch_size 2.0 +266 3 model.embedding_dim 2.0 +266 3 optimizer.lr 0.006300147912411428 +266 3 negative_sampler.num_negs_per_pos 69.0 +266 3 training.batch_size 1.0 +266 4 model.embedding_dim 1.0 +266 4 optimizer.lr 0.04241459263111001 +266 4 negative_sampler.num_negs_per_pos 74.0 +266 4 training.batch_size 1.0 +266 5 model.embedding_dim 0.0 +266 5 optimizer.lr 0.011410017668417085 +266 5 negative_sampler.num_negs_per_pos 74.0 +266 5 training.batch_size 1.0 +266 6 model.embedding_dim 0.0 +266 6 optimizer.lr 0.056408050313197504 +266 6 negative_sampler.num_negs_per_pos 8.0 +266 6 training.batch_size 1.0 +266 7 model.embedding_dim 2.0 +266 7 optimizer.lr 0.009818014574242322 +266 7 negative_sampler.num_negs_per_pos 65.0 +266 7 training.batch_size 2.0 +266 8 model.embedding_dim 0.0 +266 8 optimizer.lr 0.004627671029165186 +266 8 negative_sampler.num_negs_per_pos 31.0 +266 8 training.batch_size 0.0 +266 9 model.embedding_dim 2.0 +266 9 optimizer.lr 0.03678637811018764 +266 9 negative_sampler.num_negs_per_pos 26.0 +266 9 training.batch_size 2.0 +266 10 model.embedding_dim 0.0 +266 10 optimizer.lr 0.054862207260320324 +266 10 negative_sampler.num_negs_per_pos 73.0 +266 10 training.batch_size 1.0 +266 11 model.embedding_dim 2.0 +266 11 optimizer.lr 0.08311405079546541 +266 11 negative_sampler.num_negs_per_pos 25.0 +266 11 training.batch_size 0.0 +266 12 model.embedding_dim 0.0 +266 12 optimizer.lr 0.009118898798570583 +266 12 negative_sampler.num_negs_per_pos 22.0 +266 12 training.batch_size 1.0 +266 13 model.embedding_dim 1.0 +266 13 optimizer.lr 0.013965543347605799 +266 13 negative_sampler.num_negs_per_pos 47.0 +266 13 training.batch_size 0.0 +266 14 model.embedding_dim 2.0 +266 14 optimizer.lr 0.011209656185475253 +266 14 negative_sampler.num_negs_per_pos 66.0 +266 14 training.batch_size 2.0 +266 15 model.embedding_dim 2.0 +266 15 optimizer.lr 0.00327800375600071 +266 15 negative_sampler.num_negs_per_pos 90.0 +266 15 training.batch_size 1.0 +266 16 model.embedding_dim 1.0 +266 16 optimizer.lr 0.003400023072407021 +266 16 negative_sampler.num_negs_per_pos 26.0 +266 16 training.batch_size 2.0 +266 17 model.embedding_dim 2.0 +266 17 optimizer.lr 0.058706018931542675 +266 17 negative_sampler.num_negs_per_pos 18.0 +266 17 training.batch_size 0.0 +266 18 model.embedding_dim 0.0 +266 18 optimizer.lr 0.003052419424946181 +266 18 negative_sampler.num_negs_per_pos 24.0 +266 18 training.batch_size 2.0 +266 19 model.embedding_dim 0.0 +266 19 optimizer.lr 0.01779570162671721 +266 19 negative_sampler.num_negs_per_pos 14.0 +266 19 training.batch_size 1.0 +266 20 model.embedding_dim 0.0 +266 20 optimizer.lr 0.007333711037588021 +266 20 negative_sampler.num_negs_per_pos 53.0 +266 20 training.batch_size 1.0 +266 21 model.embedding_dim 1.0 +266 21 optimizer.lr 0.018258448994721618 +266 21 negative_sampler.num_negs_per_pos 64.0 +266 21 training.batch_size 0.0 +266 22 model.embedding_dim 0.0 +266 22 optimizer.lr 0.022996851130079828 +266 22 negative_sampler.num_negs_per_pos 74.0 +266 22 training.batch_size 0.0 +266 23 model.embedding_dim 1.0 +266 23 optimizer.lr 0.0015794793068185124 +266 23 negative_sampler.num_negs_per_pos 71.0 +266 23 training.batch_size 1.0 +266 24 model.embedding_dim 2.0 +266 24 optimizer.lr 0.0021566627413109342 +266 24 negative_sampler.num_negs_per_pos 28.0 +266 24 training.batch_size 0.0 +266 25 model.embedding_dim 2.0 +266 25 optimizer.lr 0.004506352223017578 +266 25 negative_sampler.num_negs_per_pos 51.0 +266 25 training.batch_size 0.0 +266 26 model.embedding_dim 0.0 +266 26 optimizer.lr 0.002091597694538249 +266 26 negative_sampler.num_negs_per_pos 21.0 +266 26 training.batch_size 1.0 +266 27 model.embedding_dim 0.0 +266 27 optimizer.lr 0.0032700707960677685 +266 27 negative_sampler.num_negs_per_pos 14.0 +266 27 training.batch_size 2.0 +266 28 model.embedding_dim 0.0 +266 28 optimizer.lr 0.004963282677950901 +266 28 negative_sampler.num_negs_per_pos 28.0 +266 28 training.batch_size 1.0 +266 29 model.embedding_dim 1.0 +266 29 optimizer.lr 0.01739021443721272 +266 29 negative_sampler.num_negs_per_pos 17.0 +266 29 training.batch_size 0.0 +266 30 model.embedding_dim 0.0 +266 30 optimizer.lr 0.012137112995732856 +266 30 negative_sampler.num_negs_per_pos 73.0 +266 30 training.batch_size 1.0 +266 31 model.embedding_dim 1.0 +266 31 optimizer.lr 0.007207908784506968 +266 31 negative_sampler.num_negs_per_pos 10.0 +266 31 training.batch_size 0.0 +266 32 model.embedding_dim 1.0 +266 32 optimizer.lr 0.04810952045351789 +266 32 negative_sampler.num_negs_per_pos 60.0 +266 32 training.batch_size 1.0 +266 33 model.embedding_dim 2.0 +266 33 optimizer.lr 0.022827176297699644 +266 33 negative_sampler.num_negs_per_pos 26.0 +266 33 training.batch_size 1.0 +266 34 model.embedding_dim 1.0 +266 34 optimizer.lr 0.005730467634228764 +266 34 negative_sampler.num_negs_per_pos 53.0 +266 34 training.batch_size 2.0 +266 35 model.embedding_dim 2.0 +266 35 optimizer.lr 0.009802185860443928 +266 35 negative_sampler.num_negs_per_pos 51.0 +266 35 training.batch_size 2.0 +266 36 model.embedding_dim 2.0 +266 36 optimizer.lr 0.0033626499628337165 +266 36 negative_sampler.num_negs_per_pos 85.0 +266 36 training.batch_size 2.0 +266 37 model.embedding_dim 1.0 +266 37 optimizer.lr 0.0015396452612681884 +266 37 negative_sampler.num_negs_per_pos 53.0 +266 37 training.batch_size 1.0 +266 38 model.embedding_dim 0.0 +266 38 optimizer.lr 0.0016426418232620276 +266 38 negative_sampler.num_negs_per_pos 9.0 +266 38 training.batch_size 2.0 +266 39 model.embedding_dim 0.0 +266 39 optimizer.lr 0.06708196674670738 +266 39 negative_sampler.num_negs_per_pos 65.0 +266 39 training.batch_size 2.0 +266 40 model.embedding_dim 0.0 +266 40 optimizer.lr 0.0015301198052447048 +266 40 negative_sampler.num_negs_per_pos 53.0 +266 40 training.batch_size 2.0 +266 41 model.embedding_dim 1.0 +266 41 optimizer.lr 0.06228638620764301 +266 41 negative_sampler.num_negs_per_pos 82.0 +266 41 training.batch_size 0.0 +266 42 model.embedding_dim 0.0 +266 42 optimizer.lr 0.020910955605419914 +266 42 negative_sampler.num_negs_per_pos 28.0 +266 42 training.batch_size 0.0 +266 43 model.embedding_dim 0.0 +266 43 optimizer.lr 0.008731388653519329 +266 43 negative_sampler.num_negs_per_pos 34.0 +266 43 training.batch_size 2.0 +266 44 model.embedding_dim 0.0 +266 44 optimizer.lr 0.001758972469102033 +266 44 negative_sampler.num_negs_per_pos 93.0 +266 44 training.batch_size 2.0 +266 45 model.embedding_dim 1.0 +266 45 optimizer.lr 0.006966807486929515 +266 45 negative_sampler.num_negs_per_pos 41.0 +266 45 training.batch_size 2.0 +266 46 model.embedding_dim 1.0 +266 46 optimizer.lr 0.008615893506060924 +266 46 negative_sampler.num_negs_per_pos 47.0 +266 46 training.batch_size 2.0 +266 47 model.embedding_dim 2.0 +266 47 optimizer.lr 0.048522258219445814 +266 47 negative_sampler.num_negs_per_pos 94.0 +266 47 training.batch_size 2.0 +266 48 model.embedding_dim 2.0 +266 48 optimizer.lr 0.09155877787816266 +266 48 negative_sampler.num_negs_per_pos 87.0 +266 48 training.batch_size 1.0 +266 49 model.embedding_dim 0.0 +266 49 optimizer.lr 0.0052421520957027755 +266 49 negative_sampler.num_negs_per_pos 75.0 +266 49 training.batch_size 1.0 +266 50 model.embedding_dim 1.0 +266 50 optimizer.lr 0.0015334513709485981 +266 50 negative_sampler.num_negs_per_pos 99.0 +266 50 training.batch_size 0.0 +266 51 model.embedding_dim 1.0 +266 51 optimizer.lr 0.0035631536905822957 +266 51 negative_sampler.num_negs_per_pos 35.0 +266 51 training.batch_size 0.0 +266 52 model.embedding_dim 1.0 +266 52 optimizer.lr 0.003519848661090185 +266 52 negative_sampler.num_negs_per_pos 22.0 +266 52 training.batch_size 2.0 +266 53 model.embedding_dim 0.0 +266 53 optimizer.lr 0.024311462282026908 +266 53 negative_sampler.num_negs_per_pos 40.0 +266 53 training.batch_size 0.0 +266 54 model.embedding_dim 1.0 +266 54 optimizer.lr 0.0028652225594523423 +266 54 negative_sampler.num_negs_per_pos 31.0 +266 54 training.batch_size 1.0 +266 55 model.embedding_dim 0.0 +266 55 optimizer.lr 0.020089465211021194 +266 55 negative_sampler.num_negs_per_pos 89.0 +266 55 training.batch_size 1.0 +266 56 model.embedding_dim 0.0 +266 56 optimizer.lr 0.006944829525166112 +266 56 negative_sampler.num_negs_per_pos 33.0 +266 56 training.batch_size 2.0 +266 57 model.embedding_dim 0.0 +266 57 optimizer.lr 0.08079601830308589 +266 57 negative_sampler.num_negs_per_pos 96.0 +266 57 training.batch_size 2.0 +266 58 model.embedding_dim 2.0 +266 58 optimizer.lr 0.015731554643275567 +266 58 negative_sampler.num_negs_per_pos 9.0 +266 58 training.batch_size 0.0 +266 59 model.embedding_dim 2.0 +266 59 optimizer.lr 0.003962667352046384 +266 59 negative_sampler.num_negs_per_pos 26.0 +266 59 training.batch_size 1.0 +266 60 model.embedding_dim 0.0 +266 60 optimizer.lr 0.0023918727246269884 +266 60 negative_sampler.num_negs_per_pos 70.0 +266 60 training.batch_size 2.0 +266 61 model.embedding_dim 2.0 +266 61 optimizer.lr 0.04222450100837639 +266 61 negative_sampler.num_negs_per_pos 23.0 +266 61 training.batch_size 0.0 +266 62 model.embedding_dim 2.0 +266 62 optimizer.lr 0.006307737642828532 +266 62 negative_sampler.num_negs_per_pos 2.0 +266 62 training.batch_size 0.0 +266 63 model.embedding_dim 0.0 +266 63 optimizer.lr 0.03749337679034069 +266 63 negative_sampler.num_negs_per_pos 21.0 +266 63 training.batch_size 0.0 +266 64 model.embedding_dim 0.0 +266 64 optimizer.lr 0.00619900166546564 +266 64 negative_sampler.num_negs_per_pos 99.0 +266 64 training.batch_size 2.0 +266 65 model.embedding_dim 1.0 +266 65 optimizer.lr 0.002716794694480531 +266 65 negative_sampler.num_negs_per_pos 64.0 +266 65 training.batch_size 0.0 +266 66 model.embedding_dim 1.0 +266 66 optimizer.lr 0.0013993735813051986 +266 66 negative_sampler.num_negs_per_pos 62.0 +266 66 training.batch_size 0.0 +266 67 model.embedding_dim 1.0 +266 67 optimizer.lr 0.0072837265520171975 +266 67 negative_sampler.num_negs_per_pos 32.0 +266 67 training.batch_size 1.0 +266 68 model.embedding_dim 0.0 +266 68 optimizer.lr 0.0013731079297694272 +266 68 negative_sampler.num_negs_per_pos 47.0 +266 68 training.batch_size 0.0 +266 69 model.embedding_dim 2.0 +266 69 optimizer.lr 0.004913545795335515 +266 69 negative_sampler.num_negs_per_pos 35.0 +266 69 training.batch_size 1.0 +266 70 model.embedding_dim 2.0 +266 70 optimizer.lr 0.0053139246215885725 +266 70 negative_sampler.num_negs_per_pos 23.0 +266 70 training.batch_size 2.0 +266 71 model.embedding_dim 0.0 +266 71 optimizer.lr 0.006343460317960803 +266 71 negative_sampler.num_negs_per_pos 75.0 +266 71 training.batch_size 1.0 +266 72 model.embedding_dim 1.0 +266 72 optimizer.lr 0.02797148453685712 +266 72 negative_sampler.num_negs_per_pos 54.0 +266 72 training.batch_size 0.0 +266 73 model.embedding_dim 2.0 +266 73 optimizer.lr 0.03802595714737875 +266 73 negative_sampler.num_negs_per_pos 82.0 +266 73 training.batch_size 1.0 +266 74 model.embedding_dim 2.0 +266 74 optimizer.lr 0.037507252204566494 +266 74 negative_sampler.num_negs_per_pos 99.0 +266 74 training.batch_size 2.0 +266 75 model.embedding_dim 2.0 +266 75 optimizer.lr 0.02298104722953096 +266 75 negative_sampler.num_negs_per_pos 91.0 +266 75 training.batch_size 1.0 +266 76 model.embedding_dim 2.0 +266 76 optimizer.lr 0.0018357857496541488 +266 76 negative_sampler.num_negs_per_pos 22.0 +266 76 training.batch_size 1.0 +266 77 model.embedding_dim 1.0 +266 77 optimizer.lr 0.0058401270864835405 +266 77 negative_sampler.num_negs_per_pos 32.0 +266 77 training.batch_size 2.0 +266 78 model.embedding_dim 2.0 +266 78 optimizer.lr 0.09618865074891132 +266 78 negative_sampler.num_negs_per_pos 95.0 +266 78 training.batch_size 2.0 +266 79 model.embedding_dim 1.0 +266 79 optimizer.lr 0.022091235194595417 +266 79 negative_sampler.num_negs_per_pos 10.0 +266 79 training.batch_size 0.0 +266 80 model.embedding_dim 1.0 +266 80 optimizer.lr 0.013814395301240345 +266 80 negative_sampler.num_negs_per_pos 96.0 +266 80 training.batch_size 0.0 +266 81 model.embedding_dim 0.0 +266 81 optimizer.lr 0.050284235346861085 +266 81 negative_sampler.num_negs_per_pos 46.0 +266 81 training.batch_size 0.0 +266 82 model.embedding_dim 0.0 +266 82 optimizer.lr 0.053597884210828395 +266 82 negative_sampler.num_negs_per_pos 83.0 +266 82 training.batch_size 0.0 +266 1 dataset """wn18rr""" +266 1 model """ermlp""" +266 1 loss """bceaftersigmoid""" +266 1 regularizer """no""" +266 1 optimizer """adam""" +266 1 training_loop """owa""" +266 1 negative_sampler """basic""" +266 1 evaluator """rankbased""" +266 2 dataset """wn18rr""" +266 2 model """ermlp""" +266 2 loss """bceaftersigmoid""" +266 2 regularizer """no""" +266 2 optimizer """adam""" +266 2 training_loop """owa""" +266 2 negative_sampler """basic""" +266 2 evaluator """rankbased""" +266 3 dataset """wn18rr""" +266 3 model """ermlp""" +266 3 loss """bceaftersigmoid""" +266 3 regularizer """no""" +266 3 optimizer """adam""" +266 3 training_loop """owa""" +266 3 negative_sampler """basic""" +266 3 evaluator """rankbased""" +266 4 dataset """wn18rr""" +266 4 model """ermlp""" +266 4 loss """bceaftersigmoid""" +266 4 regularizer """no""" +266 4 optimizer """adam""" +266 4 training_loop """owa""" +266 4 negative_sampler """basic""" +266 4 evaluator """rankbased""" +266 5 dataset """wn18rr""" +266 5 model """ermlp""" +266 5 loss """bceaftersigmoid""" +266 5 regularizer """no""" +266 5 optimizer """adam""" +266 5 training_loop """owa""" +266 5 negative_sampler """basic""" +266 5 evaluator """rankbased""" +266 6 dataset """wn18rr""" +266 6 model """ermlp""" +266 6 loss """bceaftersigmoid""" +266 6 regularizer """no""" +266 6 optimizer """adam""" +266 6 training_loop """owa""" +266 6 negative_sampler """basic""" +266 6 evaluator """rankbased""" +266 7 dataset """wn18rr""" +266 7 model """ermlp""" +266 7 loss """bceaftersigmoid""" +266 7 regularizer """no""" +266 7 optimizer """adam""" +266 7 training_loop """owa""" +266 7 negative_sampler """basic""" +266 7 evaluator """rankbased""" +266 8 dataset """wn18rr""" +266 8 model """ermlp""" +266 8 loss """bceaftersigmoid""" +266 8 regularizer """no""" +266 8 optimizer """adam""" +266 8 training_loop """owa""" +266 8 negative_sampler """basic""" +266 8 evaluator """rankbased""" +266 9 dataset """wn18rr""" +266 9 model """ermlp""" +266 9 loss """bceaftersigmoid""" +266 9 regularizer """no""" +266 9 optimizer """adam""" +266 9 training_loop """owa""" +266 9 negative_sampler """basic""" +266 9 evaluator """rankbased""" +266 10 dataset """wn18rr""" +266 10 model """ermlp""" +266 10 loss """bceaftersigmoid""" +266 10 regularizer """no""" +266 10 optimizer """adam""" +266 10 training_loop """owa""" +266 10 negative_sampler """basic""" +266 10 evaluator """rankbased""" +266 11 dataset """wn18rr""" +266 11 model """ermlp""" +266 11 loss """bceaftersigmoid""" +266 11 regularizer """no""" +266 11 optimizer """adam""" +266 11 training_loop """owa""" +266 11 negative_sampler """basic""" +266 11 evaluator """rankbased""" +266 12 dataset """wn18rr""" +266 12 model """ermlp""" +266 12 loss """bceaftersigmoid""" +266 12 regularizer """no""" +266 12 optimizer """adam""" +266 12 training_loop """owa""" +266 12 negative_sampler """basic""" +266 12 evaluator """rankbased""" +266 13 dataset """wn18rr""" +266 13 model """ermlp""" +266 13 loss """bceaftersigmoid""" +266 13 regularizer """no""" +266 13 optimizer """adam""" +266 13 training_loop """owa""" +266 13 negative_sampler """basic""" +266 13 evaluator """rankbased""" +266 14 dataset """wn18rr""" +266 14 model """ermlp""" +266 14 loss """bceaftersigmoid""" +266 14 regularizer """no""" +266 14 optimizer """adam""" +266 14 training_loop """owa""" +266 14 negative_sampler """basic""" +266 14 evaluator """rankbased""" +266 15 dataset """wn18rr""" +266 15 model """ermlp""" +266 15 loss """bceaftersigmoid""" +266 15 regularizer """no""" +266 15 optimizer """adam""" +266 15 training_loop """owa""" +266 15 negative_sampler """basic""" +266 15 evaluator """rankbased""" +266 16 dataset """wn18rr""" +266 16 model """ermlp""" +266 16 loss """bceaftersigmoid""" +266 16 regularizer """no""" +266 16 optimizer """adam""" +266 16 training_loop """owa""" +266 16 negative_sampler """basic""" +266 16 evaluator """rankbased""" +266 17 dataset """wn18rr""" +266 17 model """ermlp""" +266 17 loss """bceaftersigmoid""" +266 17 regularizer """no""" +266 17 optimizer """adam""" +266 17 training_loop """owa""" +266 17 negative_sampler """basic""" +266 17 evaluator """rankbased""" +266 18 dataset """wn18rr""" +266 18 model """ermlp""" +266 18 loss """bceaftersigmoid""" +266 18 regularizer """no""" +266 18 optimizer """adam""" +266 18 training_loop """owa""" +266 18 negative_sampler """basic""" +266 18 evaluator """rankbased""" +266 19 dataset """wn18rr""" +266 19 model """ermlp""" +266 19 loss """bceaftersigmoid""" +266 19 regularizer """no""" +266 19 optimizer """adam""" +266 19 training_loop """owa""" +266 19 negative_sampler """basic""" +266 19 evaluator """rankbased""" +266 20 dataset """wn18rr""" +266 20 model """ermlp""" +266 20 loss """bceaftersigmoid""" +266 20 regularizer """no""" +266 20 optimizer """adam""" +266 20 training_loop """owa""" +266 20 negative_sampler """basic""" +266 20 evaluator """rankbased""" +266 21 dataset """wn18rr""" +266 21 model """ermlp""" +266 21 loss """bceaftersigmoid""" +266 21 regularizer """no""" +266 21 optimizer """adam""" +266 21 training_loop """owa""" +266 21 negative_sampler """basic""" +266 21 evaluator """rankbased""" +266 22 dataset """wn18rr""" +266 22 model """ermlp""" +266 22 loss """bceaftersigmoid""" +266 22 regularizer """no""" +266 22 optimizer """adam""" +266 22 training_loop """owa""" +266 22 negative_sampler """basic""" +266 22 evaluator """rankbased""" +266 23 dataset """wn18rr""" +266 23 model """ermlp""" +266 23 loss """bceaftersigmoid""" +266 23 regularizer """no""" +266 23 optimizer """adam""" +266 23 training_loop """owa""" +266 23 negative_sampler """basic""" +266 23 evaluator """rankbased""" +266 24 dataset """wn18rr""" +266 24 model """ermlp""" +266 24 loss """bceaftersigmoid""" +266 24 regularizer """no""" +266 24 optimizer """adam""" +266 24 training_loop """owa""" +266 24 negative_sampler """basic""" +266 24 evaluator """rankbased""" +266 25 dataset """wn18rr""" +266 25 model """ermlp""" +266 25 loss """bceaftersigmoid""" +266 25 regularizer """no""" +266 25 optimizer """adam""" +266 25 training_loop """owa""" +266 25 negative_sampler """basic""" +266 25 evaluator """rankbased""" +266 26 dataset """wn18rr""" +266 26 model """ermlp""" +266 26 loss """bceaftersigmoid""" +266 26 regularizer """no""" +266 26 optimizer """adam""" +266 26 training_loop """owa""" +266 26 negative_sampler """basic""" +266 26 evaluator """rankbased""" +266 27 dataset """wn18rr""" +266 27 model """ermlp""" +266 27 loss """bceaftersigmoid""" +266 27 regularizer """no""" +266 27 optimizer """adam""" +266 27 training_loop """owa""" +266 27 negative_sampler """basic""" +266 27 evaluator """rankbased""" +266 28 dataset """wn18rr""" +266 28 model """ermlp""" +266 28 loss """bceaftersigmoid""" +266 28 regularizer """no""" +266 28 optimizer """adam""" +266 28 training_loop """owa""" +266 28 negative_sampler """basic""" +266 28 evaluator """rankbased""" +266 29 dataset """wn18rr""" +266 29 model """ermlp""" +266 29 loss """bceaftersigmoid""" +266 29 regularizer """no""" +266 29 optimizer """adam""" +266 29 training_loop """owa""" +266 29 negative_sampler """basic""" +266 29 evaluator """rankbased""" +266 30 dataset """wn18rr""" +266 30 model """ermlp""" +266 30 loss """bceaftersigmoid""" +266 30 regularizer """no""" +266 30 optimizer """adam""" +266 30 training_loop """owa""" +266 30 negative_sampler """basic""" +266 30 evaluator """rankbased""" +266 31 dataset """wn18rr""" +266 31 model """ermlp""" +266 31 loss """bceaftersigmoid""" +266 31 regularizer """no""" +266 31 optimizer """adam""" +266 31 training_loop """owa""" +266 31 negative_sampler """basic""" +266 31 evaluator """rankbased""" +266 32 dataset """wn18rr""" +266 32 model """ermlp""" +266 32 loss """bceaftersigmoid""" +266 32 regularizer """no""" +266 32 optimizer """adam""" +266 32 training_loop """owa""" +266 32 negative_sampler """basic""" +266 32 evaluator """rankbased""" +266 33 dataset """wn18rr""" +266 33 model """ermlp""" +266 33 loss """bceaftersigmoid""" +266 33 regularizer """no""" +266 33 optimizer """adam""" +266 33 training_loop """owa""" +266 33 negative_sampler """basic""" +266 33 evaluator """rankbased""" +266 34 dataset """wn18rr""" +266 34 model """ermlp""" +266 34 loss """bceaftersigmoid""" +266 34 regularizer """no""" +266 34 optimizer """adam""" +266 34 training_loop """owa""" +266 34 negative_sampler """basic""" +266 34 evaluator """rankbased""" +266 35 dataset """wn18rr""" +266 35 model """ermlp""" +266 35 loss """bceaftersigmoid""" +266 35 regularizer """no""" +266 35 optimizer """adam""" +266 35 training_loop """owa""" +266 35 negative_sampler """basic""" +266 35 evaluator """rankbased""" +266 36 dataset """wn18rr""" +266 36 model """ermlp""" +266 36 loss """bceaftersigmoid""" +266 36 regularizer """no""" +266 36 optimizer """adam""" +266 36 training_loop """owa""" +266 36 negative_sampler """basic""" +266 36 evaluator """rankbased""" +266 37 dataset """wn18rr""" +266 37 model """ermlp""" +266 37 loss """bceaftersigmoid""" +266 37 regularizer """no""" +266 37 optimizer """adam""" +266 37 training_loop """owa""" +266 37 negative_sampler """basic""" +266 37 evaluator """rankbased""" +266 38 dataset """wn18rr""" +266 38 model """ermlp""" +266 38 loss """bceaftersigmoid""" +266 38 regularizer """no""" +266 38 optimizer """adam""" +266 38 training_loop """owa""" +266 38 negative_sampler """basic""" +266 38 evaluator """rankbased""" +266 39 dataset """wn18rr""" +266 39 model """ermlp""" +266 39 loss """bceaftersigmoid""" +266 39 regularizer """no""" +266 39 optimizer """adam""" +266 39 training_loop """owa""" +266 39 negative_sampler """basic""" +266 39 evaluator """rankbased""" +266 40 dataset """wn18rr""" +266 40 model """ermlp""" +266 40 loss """bceaftersigmoid""" +266 40 regularizer """no""" +266 40 optimizer """adam""" +266 40 training_loop """owa""" +266 40 negative_sampler """basic""" +266 40 evaluator """rankbased""" +266 41 dataset """wn18rr""" +266 41 model """ermlp""" +266 41 loss """bceaftersigmoid""" +266 41 regularizer """no""" +266 41 optimizer """adam""" +266 41 training_loop """owa""" +266 41 negative_sampler """basic""" +266 41 evaluator """rankbased""" +266 42 dataset """wn18rr""" +266 42 model """ermlp""" +266 42 loss """bceaftersigmoid""" +266 42 regularizer """no""" +266 42 optimizer """adam""" +266 42 training_loop """owa""" +266 42 negative_sampler """basic""" +266 42 evaluator """rankbased""" +266 43 dataset """wn18rr""" +266 43 model """ermlp""" +266 43 loss """bceaftersigmoid""" +266 43 regularizer """no""" +266 43 optimizer """adam""" +266 43 training_loop """owa""" +266 43 negative_sampler """basic""" +266 43 evaluator """rankbased""" +266 44 dataset """wn18rr""" +266 44 model """ermlp""" +266 44 loss """bceaftersigmoid""" +266 44 regularizer """no""" +266 44 optimizer """adam""" +266 44 training_loop """owa""" +266 44 negative_sampler """basic""" +266 44 evaluator """rankbased""" +266 45 dataset """wn18rr""" +266 45 model """ermlp""" +266 45 loss """bceaftersigmoid""" +266 45 regularizer """no""" +266 45 optimizer """adam""" +266 45 training_loop """owa""" +266 45 negative_sampler """basic""" +266 45 evaluator """rankbased""" +266 46 dataset """wn18rr""" +266 46 model """ermlp""" +266 46 loss """bceaftersigmoid""" +266 46 regularizer """no""" +266 46 optimizer """adam""" +266 46 training_loop """owa""" +266 46 negative_sampler """basic""" +266 46 evaluator """rankbased""" +266 47 dataset """wn18rr""" +266 47 model """ermlp""" +266 47 loss """bceaftersigmoid""" +266 47 regularizer """no""" +266 47 optimizer """adam""" +266 47 training_loop """owa""" +266 47 negative_sampler """basic""" +266 47 evaluator """rankbased""" +266 48 dataset """wn18rr""" +266 48 model """ermlp""" +266 48 loss """bceaftersigmoid""" +266 48 regularizer """no""" +266 48 optimizer """adam""" +266 48 training_loop """owa""" +266 48 negative_sampler """basic""" +266 48 evaluator """rankbased""" +266 49 dataset """wn18rr""" +266 49 model """ermlp""" +266 49 loss """bceaftersigmoid""" +266 49 regularizer """no""" +266 49 optimizer """adam""" +266 49 training_loop """owa""" +266 49 negative_sampler """basic""" +266 49 evaluator """rankbased""" +266 50 dataset """wn18rr""" +266 50 model """ermlp""" +266 50 loss """bceaftersigmoid""" +266 50 regularizer """no""" +266 50 optimizer """adam""" +266 50 training_loop """owa""" +266 50 negative_sampler """basic""" +266 50 evaluator """rankbased""" +266 51 dataset """wn18rr""" +266 51 model """ermlp""" +266 51 loss """bceaftersigmoid""" +266 51 regularizer """no""" +266 51 optimizer """adam""" +266 51 training_loop """owa""" +266 51 negative_sampler """basic""" +266 51 evaluator """rankbased""" +266 52 dataset """wn18rr""" +266 52 model """ermlp""" +266 52 loss """bceaftersigmoid""" +266 52 regularizer """no""" +266 52 optimizer """adam""" +266 52 training_loop """owa""" +266 52 negative_sampler """basic""" +266 52 evaluator """rankbased""" +266 53 dataset """wn18rr""" +266 53 model """ermlp""" +266 53 loss """bceaftersigmoid""" +266 53 regularizer """no""" +266 53 optimizer """adam""" +266 53 training_loop """owa""" +266 53 negative_sampler """basic""" +266 53 evaluator """rankbased""" +266 54 dataset """wn18rr""" +266 54 model """ermlp""" +266 54 loss """bceaftersigmoid""" +266 54 regularizer """no""" +266 54 optimizer """adam""" +266 54 training_loop """owa""" +266 54 negative_sampler """basic""" +266 54 evaluator """rankbased""" +266 55 dataset """wn18rr""" +266 55 model """ermlp""" +266 55 loss """bceaftersigmoid""" +266 55 regularizer """no""" +266 55 optimizer """adam""" +266 55 training_loop """owa""" +266 55 negative_sampler """basic""" +266 55 evaluator """rankbased""" +266 56 dataset """wn18rr""" +266 56 model """ermlp""" +266 56 loss """bceaftersigmoid""" +266 56 regularizer """no""" +266 56 optimizer """adam""" +266 56 training_loop """owa""" +266 56 negative_sampler """basic""" +266 56 evaluator """rankbased""" +266 57 dataset """wn18rr""" +266 57 model """ermlp""" +266 57 loss """bceaftersigmoid""" +266 57 regularizer """no""" +266 57 optimizer """adam""" +266 57 training_loop """owa""" +266 57 negative_sampler """basic""" +266 57 evaluator """rankbased""" +266 58 dataset """wn18rr""" +266 58 model """ermlp""" +266 58 loss """bceaftersigmoid""" +266 58 regularizer """no""" +266 58 optimizer """adam""" +266 58 training_loop """owa""" +266 58 negative_sampler """basic""" +266 58 evaluator """rankbased""" +266 59 dataset """wn18rr""" +266 59 model """ermlp""" +266 59 loss """bceaftersigmoid""" +266 59 regularizer """no""" +266 59 optimizer """adam""" +266 59 training_loop """owa""" +266 59 negative_sampler """basic""" +266 59 evaluator """rankbased""" +266 60 dataset """wn18rr""" +266 60 model """ermlp""" +266 60 loss """bceaftersigmoid""" +266 60 regularizer """no""" +266 60 optimizer """adam""" +266 60 training_loop """owa""" +266 60 negative_sampler """basic""" +266 60 evaluator """rankbased""" +266 61 dataset """wn18rr""" +266 61 model """ermlp""" +266 61 loss """bceaftersigmoid""" +266 61 regularizer """no""" +266 61 optimizer """adam""" +266 61 training_loop """owa""" +266 61 negative_sampler """basic""" +266 61 evaluator """rankbased""" +266 62 dataset """wn18rr""" +266 62 model """ermlp""" +266 62 loss """bceaftersigmoid""" +266 62 regularizer """no""" +266 62 optimizer """adam""" +266 62 training_loop """owa""" +266 62 negative_sampler """basic""" +266 62 evaluator """rankbased""" +266 63 dataset """wn18rr""" +266 63 model """ermlp""" +266 63 loss """bceaftersigmoid""" +266 63 regularizer """no""" +266 63 optimizer """adam""" +266 63 training_loop """owa""" +266 63 negative_sampler """basic""" +266 63 evaluator """rankbased""" +266 64 dataset """wn18rr""" +266 64 model """ermlp""" +266 64 loss """bceaftersigmoid""" +266 64 regularizer """no""" +266 64 optimizer """adam""" +266 64 training_loop """owa""" +266 64 negative_sampler """basic""" +266 64 evaluator """rankbased""" +266 65 dataset """wn18rr""" +266 65 model """ermlp""" +266 65 loss """bceaftersigmoid""" +266 65 regularizer """no""" +266 65 optimizer """adam""" +266 65 training_loop """owa""" +266 65 negative_sampler """basic""" +266 65 evaluator """rankbased""" +266 66 dataset """wn18rr""" +266 66 model """ermlp""" +266 66 loss """bceaftersigmoid""" +266 66 regularizer """no""" +266 66 optimizer """adam""" +266 66 training_loop """owa""" +266 66 negative_sampler """basic""" +266 66 evaluator """rankbased""" +266 67 dataset """wn18rr""" +266 67 model """ermlp""" +266 67 loss """bceaftersigmoid""" +266 67 regularizer """no""" +266 67 optimizer """adam""" +266 67 training_loop """owa""" +266 67 negative_sampler """basic""" +266 67 evaluator """rankbased""" +266 68 dataset """wn18rr""" +266 68 model """ermlp""" +266 68 loss """bceaftersigmoid""" +266 68 regularizer """no""" +266 68 optimizer """adam""" +266 68 training_loop """owa""" +266 68 negative_sampler """basic""" +266 68 evaluator """rankbased""" +266 69 dataset """wn18rr""" +266 69 model """ermlp""" +266 69 loss """bceaftersigmoid""" +266 69 regularizer """no""" +266 69 optimizer """adam""" +266 69 training_loop """owa""" +266 69 negative_sampler """basic""" +266 69 evaluator """rankbased""" +266 70 dataset """wn18rr""" +266 70 model """ermlp""" +266 70 loss """bceaftersigmoid""" +266 70 regularizer """no""" +266 70 optimizer """adam""" +266 70 training_loop """owa""" +266 70 negative_sampler """basic""" +266 70 evaluator """rankbased""" +266 71 dataset """wn18rr""" +266 71 model """ermlp""" +266 71 loss """bceaftersigmoid""" +266 71 regularizer """no""" +266 71 optimizer """adam""" +266 71 training_loop """owa""" +266 71 negative_sampler """basic""" +266 71 evaluator """rankbased""" +266 72 dataset """wn18rr""" +266 72 model """ermlp""" +266 72 loss """bceaftersigmoid""" +266 72 regularizer """no""" +266 72 optimizer """adam""" +266 72 training_loop """owa""" +266 72 negative_sampler """basic""" +266 72 evaluator """rankbased""" +266 73 dataset """wn18rr""" +266 73 model """ermlp""" +266 73 loss """bceaftersigmoid""" +266 73 regularizer """no""" +266 73 optimizer """adam""" +266 73 training_loop """owa""" +266 73 negative_sampler """basic""" +266 73 evaluator """rankbased""" +266 74 dataset """wn18rr""" +266 74 model """ermlp""" +266 74 loss """bceaftersigmoid""" +266 74 regularizer """no""" +266 74 optimizer """adam""" +266 74 training_loop """owa""" +266 74 negative_sampler """basic""" +266 74 evaluator """rankbased""" +266 75 dataset """wn18rr""" +266 75 model """ermlp""" +266 75 loss """bceaftersigmoid""" +266 75 regularizer """no""" +266 75 optimizer """adam""" +266 75 training_loop """owa""" +266 75 negative_sampler """basic""" +266 75 evaluator """rankbased""" +266 76 dataset """wn18rr""" +266 76 model """ermlp""" +266 76 loss """bceaftersigmoid""" +266 76 regularizer """no""" +266 76 optimizer """adam""" +266 76 training_loop """owa""" +266 76 negative_sampler """basic""" +266 76 evaluator """rankbased""" +266 77 dataset """wn18rr""" +266 77 model """ermlp""" +266 77 loss """bceaftersigmoid""" +266 77 regularizer """no""" +266 77 optimizer """adam""" +266 77 training_loop """owa""" +266 77 negative_sampler """basic""" +266 77 evaluator """rankbased""" +266 78 dataset """wn18rr""" +266 78 model """ermlp""" +266 78 loss """bceaftersigmoid""" +266 78 regularizer """no""" +266 78 optimizer """adam""" +266 78 training_loop """owa""" +266 78 negative_sampler """basic""" +266 78 evaluator """rankbased""" +266 79 dataset """wn18rr""" +266 79 model """ermlp""" +266 79 loss """bceaftersigmoid""" +266 79 regularizer """no""" +266 79 optimizer """adam""" +266 79 training_loop """owa""" +266 79 negative_sampler """basic""" +266 79 evaluator """rankbased""" +266 80 dataset """wn18rr""" +266 80 model """ermlp""" +266 80 loss """bceaftersigmoid""" +266 80 regularizer """no""" +266 80 optimizer """adam""" +266 80 training_loop """owa""" +266 80 negative_sampler """basic""" +266 80 evaluator """rankbased""" +266 81 dataset """wn18rr""" +266 81 model """ermlp""" +266 81 loss """bceaftersigmoid""" +266 81 regularizer """no""" +266 81 optimizer """adam""" +266 81 training_loop """owa""" +266 81 negative_sampler """basic""" +266 81 evaluator """rankbased""" +266 82 dataset """wn18rr""" +266 82 model """ermlp""" +266 82 loss """bceaftersigmoid""" +266 82 regularizer """no""" +266 82 optimizer """adam""" +266 82 training_loop """owa""" +266 82 negative_sampler """basic""" +266 82 evaluator """rankbased""" +267 1 model.embedding_dim 1.0 +267 1 optimizer.lr 0.0022130895823220423 +267 1 negative_sampler.num_negs_per_pos 60.0 +267 1 training.batch_size 0.0 +267 2 model.embedding_dim 1.0 +267 2 optimizer.lr 0.0033289654766522686 +267 2 negative_sampler.num_negs_per_pos 48.0 +267 2 training.batch_size 1.0 +267 3 model.embedding_dim 0.0 +267 3 optimizer.lr 0.0951344234749166 +267 3 negative_sampler.num_negs_per_pos 66.0 +267 3 training.batch_size 1.0 +267 4 model.embedding_dim 2.0 +267 4 optimizer.lr 0.03632992162970777 +267 4 negative_sampler.num_negs_per_pos 10.0 +267 4 training.batch_size 0.0 +267 5 model.embedding_dim 2.0 +267 5 optimizer.lr 0.009629398483382087 +267 5 negative_sampler.num_negs_per_pos 14.0 +267 5 training.batch_size 0.0 +267 6 model.embedding_dim 0.0 +267 6 optimizer.lr 0.05408651474659266 +267 6 negative_sampler.num_negs_per_pos 83.0 +267 6 training.batch_size 2.0 +267 7 model.embedding_dim 2.0 +267 7 optimizer.lr 0.0016349746381901418 +267 7 negative_sampler.num_negs_per_pos 12.0 +267 7 training.batch_size 2.0 +267 8 model.embedding_dim 1.0 +267 8 optimizer.lr 0.01722185711897924 +267 8 negative_sampler.num_negs_per_pos 21.0 +267 8 training.batch_size 1.0 +267 9 model.embedding_dim 2.0 +267 9 optimizer.lr 0.0011317182903628781 +267 9 negative_sampler.num_negs_per_pos 24.0 +267 9 training.batch_size 0.0 +267 10 model.embedding_dim 1.0 +267 10 optimizer.lr 0.02237456107220611 +267 10 negative_sampler.num_negs_per_pos 35.0 +267 10 training.batch_size 0.0 +267 11 model.embedding_dim 1.0 +267 11 optimizer.lr 0.02774760745619086 +267 11 negative_sampler.num_negs_per_pos 89.0 +267 11 training.batch_size 0.0 +267 12 model.embedding_dim 0.0 +267 12 optimizer.lr 0.008329316700570485 +267 12 negative_sampler.num_negs_per_pos 53.0 +267 12 training.batch_size 2.0 +267 13 model.embedding_dim 1.0 +267 13 optimizer.lr 0.052221155393548185 +267 13 negative_sampler.num_negs_per_pos 72.0 +267 13 training.batch_size 1.0 +267 14 model.embedding_dim 1.0 +267 14 optimizer.lr 0.01209971928143591 +267 14 negative_sampler.num_negs_per_pos 25.0 +267 14 training.batch_size 2.0 +267 15 model.embedding_dim 1.0 +267 15 optimizer.lr 0.052734351708091015 +267 15 negative_sampler.num_negs_per_pos 61.0 +267 15 training.batch_size 1.0 +267 16 model.embedding_dim 2.0 +267 16 optimizer.lr 0.0017364204871895006 +267 16 negative_sampler.num_negs_per_pos 35.0 +267 16 training.batch_size 0.0 +267 17 model.embedding_dim 0.0 +267 17 optimizer.lr 0.0014089573212082288 +267 17 negative_sampler.num_negs_per_pos 54.0 +267 17 training.batch_size 2.0 +267 18 model.embedding_dim 0.0 +267 18 optimizer.lr 0.004683953662859497 +267 18 negative_sampler.num_negs_per_pos 97.0 +267 18 training.batch_size 2.0 +267 19 model.embedding_dim 2.0 +267 19 optimizer.lr 0.0580768985999687 +267 19 negative_sampler.num_negs_per_pos 50.0 +267 19 training.batch_size 2.0 +267 20 model.embedding_dim 0.0 +267 20 optimizer.lr 0.019082275711718822 +267 20 negative_sampler.num_negs_per_pos 38.0 +267 20 training.batch_size 2.0 +267 21 model.embedding_dim 2.0 +267 21 optimizer.lr 0.03745249314275102 +267 21 negative_sampler.num_negs_per_pos 62.0 +267 21 training.batch_size 1.0 +267 22 model.embedding_dim 2.0 +267 22 optimizer.lr 0.05643259680571205 +267 22 negative_sampler.num_negs_per_pos 89.0 +267 22 training.batch_size 0.0 +267 23 model.embedding_dim 1.0 +267 23 optimizer.lr 0.011618528372137558 +267 23 negative_sampler.num_negs_per_pos 10.0 +267 23 training.batch_size 0.0 +267 24 model.embedding_dim 2.0 +267 24 optimizer.lr 0.02685413807338831 +267 24 negative_sampler.num_negs_per_pos 96.0 +267 24 training.batch_size 2.0 +267 25 model.embedding_dim 1.0 +267 25 optimizer.lr 0.022862515379650575 +267 25 negative_sampler.num_negs_per_pos 13.0 +267 25 training.batch_size 2.0 +267 26 model.embedding_dim 2.0 +267 26 optimizer.lr 0.0013815087576272234 +267 26 negative_sampler.num_negs_per_pos 56.0 +267 26 training.batch_size 2.0 +267 27 model.embedding_dim 1.0 +267 27 optimizer.lr 0.00173534568764944 +267 27 negative_sampler.num_negs_per_pos 78.0 +267 27 training.batch_size 2.0 +267 28 model.embedding_dim 2.0 +267 28 optimizer.lr 0.0037453134071156564 +267 28 negative_sampler.num_negs_per_pos 62.0 +267 28 training.batch_size 2.0 +267 29 model.embedding_dim 0.0 +267 29 optimizer.lr 0.005537646374184097 +267 29 negative_sampler.num_negs_per_pos 21.0 +267 29 training.batch_size 2.0 +267 30 model.embedding_dim 0.0 +267 30 optimizer.lr 0.03947744920738052 +267 30 negative_sampler.num_negs_per_pos 19.0 +267 30 training.batch_size 1.0 +267 31 model.embedding_dim 1.0 +267 31 optimizer.lr 0.09774883470458993 +267 31 negative_sampler.num_negs_per_pos 32.0 +267 31 training.batch_size 1.0 +267 32 model.embedding_dim 0.0 +267 32 optimizer.lr 0.006273066486866412 +267 32 negative_sampler.num_negs_per_pos 14.0 +267 32 training.batch_size 2.0 +267 33 model.embedding_dim 2.0 +267 33 optimizer.lr 0.004678861211741409 +267 33 negative_sampler.num_negs_per_pos 55.0 +267 33 training.batch_size 2.0 +267 34 model.embedding_dim 0.0 +267 34 optimizer.lr 0.008022270210101623 +267 34 negative_sampler.num_negs_per_pos 38.0 +267 34 training.batch_size 2.0 +267 35 model.embedding_dim 0.0 +267 35 optimizer.lr 0.0021679123612871136 +267 35 negative_sampler.num_negs_per_pos 81.0 +267 35 training.batch_size 1.0 +267 36 model.embedding_dim 2.0 +267 36 optimizer.lr 0.020082469359897836 +267 36 negative_sampler.num_negs_per_pos 70.0 +267 36 training.batch_size 2.0 +267 37 model.embedding_dim 1.0 +267 37 optimizer.lr 0.010866247360467462 +267 37 negative_sampler.num_negs_per_pos 82.0 +267 37 training.batch_size 0.0 +267 38 model.embedding_dim 0.0 +267 38 optimizer.lr 0.010210735481237555 +267 38 negative_sampler.num_negs_per_pos 89.0 +267 38 training.batch_size 1.0 +267 39 model.embedding_dim 0.0 +267 39 optimizer.lr 0.0016481052596386662 +267 39 negative_sampler.num_negs_per_pos 44.0 +267 39 training.batch_size 1.0 +267 40 model.embedding_dim 0.0 +267 40 optimizer.lr 0.015961618275028382 +267 40 negative_sampler.num_negs_per_pos 45.0 +267 40 training.batch_size 1.0 +267 41 model.embedding_dim 2.0 +267 41 optimizer.lr 0.020021363909113486 +267 41 negative_sampler.num_negs_per_pos 46.0 +267 41 training.batch_size 2.0 +267 42 model.embedding_dim 1.0 +267 42 optimizer.lr 0.053459956064267455 +267 42 negative_sampler.num_negs_per_pos 34.0 +267 42 training.batch_size 2.0 +267 43 model.embedding_dim 1.0 +267 43 optimizer.lr 0.011590075822729863 +267 43 negative_sampler.num_negs_per_pos 32.0 +267 43 training.batch_size 0.0 +267 44 model.embedding_dim 1.0 +267 44 optimizer.lr 0.001788113444029161 +267 44 negative_sampler.num_negs_per_pos 81.0 +267 44 training.batch_size 1.0 +267 45 model.embedding_dim 2.0 +267 45 optimizer.lr 0.01069069011078037 +267 45 negative_sampler.num_negs_per_pos 36.0 +267 45 training.batch_size 2.0 +267 46 model.embedding_dim 1.0 +267 46 optimizer.lr 0.00216336076836531 +267 46 negative_sampler.num_negs_per_pos 67.0 +267 46 training.batch_size 0.0 +267 47 model.embedding_dim 2.0 +267 47 optimizer.lr 0.0012063106844867459 +267 47 negative_sampler.num_negs_per_pos 28.0 +267 47 training.batch_size 1.0 +267 48 model.embedding_dim 1.0 +267 48 optimizer.lr 0.03840025148262062 +267 48 negative_sampler.num_negs_per_pos 45.0 +267 48 training.batch_size 2.0 +267 49 model.embedding_dim 1.0 +267 49 optimizer.lr 0.09799533909297074 +267 49 negative_sampler.num_negs_per_pos 6.0 +267 49 training.batch_size 2.0 +267 50 model.embedding_dim 1.0 +267 50 optimizer.lr 0.001273769926984742 +267 50 negative_sampler.num_negs_per_pos 63.0 +267 50 training.batch_size 2.0 +267 51 model.embedding_dim 0.0 +267 51 optimizer.lr 0.011846918028592552 +267 51 negative_sampler.num_negs_per_pos 19.0 +267 51 training.batch_size 1.0 +267 52 model.embedding_dim 1.0 +267 52 optimizer.lr 0.0022985549457285024 +267 52 negative_sampler.num_negs_per_pos 70.0 +267 52 training.batch_size 1.0 +267 53 model.embedding_dim 1.0 +267 53 optimizer.lr 0.006733505940561051 +267 53 negative_sampler.num_negs_per_pos 77.0 +267 53 training.batch_size 0.0 +267 54 model.embedding_dim 1.0 +267 54 optimizer.lr 0.015010197179024973 +267 54 negative_sampler.num_negs_per_pos 6.0 +267 54 training.batch_size 0.0 +267 55 model.embedding_dim 1.0 +267 55 optimizer.lr 0.0030115070912668826 +267 55 negative_sampler.num_negs_per_pos 32.0 +267 55 training.batch_size 0.0 +267 56 model.embedding_dim 2.0 +267 56 optimizer.lr 0.002133773058625402 +267 56 negative_sampler.num_negs_per_pos 44.0 +267 56 training.batch_size 1.0 +267 57 model.embedding_dim 1.0 +267 57 optimizer.lr 0.004645235996059716 +267 57 negative_sampler.num_negs_per_pos 33.0 +267 57 training.batch_size 0.0 +267 58 model.embedding_dim 0.0 +267 58 optimizer.lr 0.0014944239508025993 +267 58 negative_sampler.num_negs_per_pos 16.0 +267 58 training.batch_size 1.0 +267 59 model.embedding_dim 1.0 +267 59 optimizer.lr 0.01361026674534951 +267 59 negative_sampler.num_negs_per_pos 45.0 +267 59 training.batch_size 0.0 +267 60 model.embedding_dim 2.0 +267 60 optimizer.lr 0.011026668206596592 +267 60 negative_sampler.num_negs_per_pos 62.0 +267 60 training.batch_size 2.0 +267 61 model.embedding_dim 2.0 +267 61 optimizer.lr 0.004429451633957097 +267 61 negative_sampler.num_negs_per_pos 31.0 +267 61 training.batch_size 1.0 +267 62 model.embedding_dim 1.0 +267 62 optimizer.lr 0.006316848189270727 +267 62 negative_sampler.num_negs_per_pos 29.0 +267 62 training.batch_size 1.0 +267 63 model.embedding_dim 2.0 +267 63 optimizer.lr 0.044124167362648375 +267 63 negative_sampler.num_negs_per_pos 89.0 +267 63 training.batch_size 2.0 +267 64 model.embedding_dim 2.0 +267 64 optimizer.lr 0.0011511044561292518 +267 64 negative_sampler.num_negs_per_pos 44.0 +267 64 training.batch_size 1.0 +267 65 model.embedding_dim 0.0 +267 65 optimizer.lr 0.05698104507598848 +267 65 negative_sampler.num_negs_per_pos 62.0 +267 65 training.batch_size 1.0 +267 66 model.embedding_dim 0.0 +267 66 optimizer.lr 0.015861697805470146 +267 66 negative_sampler.num_negs_per_pos 2.0 +267 66 training.batch_size 1.0 +267 67 model.embedding_dim 1.0 +267 67 optimizer.lr 0.001465874881371077 +267 67 negative_sampler.num_negs_per_pos 66.0 +267 67 training.batch_size 2.0 +267 68 model.embedding_dim 2.0 +267 68 optimizer.lr 0.055517887581470374 +267 68 negative_sampler.num_negs_per_pos 71.0 +267 68 training.batch_size 1.0 +267 69 model.embedding_dim 1.0 +267 69 optimizer.lr 0.008120519723139728 +267 69 negative_sampler.num_negs_per_pos 7.0 +267 69 training.batch_size 0.0 +267 70 model.embedding_dim 0.0 +267 70 optimizer.lr 0.0029976885660599885 +267 70 negative_sampler.num_negs_per_pos 70.0 +267 70 training.batch_size 0.0 +267 71 model.embedding_dim 2.0 +267 71 optimizer.lr 0.004415292980333968 +267 71 negative_sampler.num_negs_per_pos 27.0 +267 71 training.batch_size 2.0 +267 72 model.embedding_dim 0.0 +267 72 optimizer.lr 0.007991977572440773 +267 72 negative_sampler.num_negs_per_pos 57.0 +267 72 training.batch_size 1.0 +267 73 model.embedding_dim 1.0 +267 73 optimizer.lr 0.004173149416927739 +267 73 negative_sampler.num_negs_per_pos 45.0 +267 73 training.batch_size 0.0 +267 74 model.embedding_dim 1.0 +267 74 optimizer.lr 0.03299252091628988 +267 74 negative_sampler.num_negs_per_pos 22.0 +267 74 training.batch_size 2.0 +267 75 model.embedding_dim 2.0 +267 75 optimizer.lr 0.07699464832470262 +267 75 negative_sampler.num_negs_per_pos 36.0 +267 75 training.batch_size 2.0 +267 76 model.embedding_dim 0.0 +267 76 optimizer.lr 0.019996162325453464 +267 76 negative_sampler.num_negs_per_pos 41.0 +267 76 training.batch_size 0.0 +267 77 model.embedding_dim 2.0 +267 77 optimizer.lr 0.037331163703426526 +267 77 negative_sampler.num_negs_per_pos 4.0 +267 77 training.batch_size 1.0 +267 78 model.embedding_dim 1.0 +267 78 optimizer.lr 0.0011032534711562531 +267 78 negative_sampler.num_negs_per_pos 68.0 +267 78 training.batch_size 1.0 +267 1 dataset """wn18rr""" +267 1 model """ermlp""" +267 1 loss """softplus""" +267 1 regularizer """no""" +267 1 optimizer """adam""" +267 1 training_loop """owa""" +267 1 negative_sampler """basic""" +267 1 evaluator """rankbased""" +267 2 dataset """wn18rr""" +267 2 model """ermlp""" +267 2 loss """softplus""" +267 2 regularizer """no""" +267 2 optimizer """adam""" +267 2 training_loop """owa""" +267 2 negative_sampler """basic""" +267 2 evaluator """rankbased""" +267 3 dataset """wn18rr""" +267 3 model """ermlp""" +267 3 loss """softplus""" +267 3 regularizer """no""" +267 3 optimizer """adam""" +267 3 training_loop """owa""" +267 3 negative_sampler """basic""" +267 3 evaluator """rankbased""" +267 4 dataset """wn18rr""" +267 4 model """ermlp""" +267 4 loss """softplus""" +267 4 regularizer """no""" +267 4 optimizer """adam""" +267 4 training_loop """owa""" +267 4 negative_sampler """basic""" +267 4 evaluator """rankbased""" +267 5 dataset """wn18rr""" +267 5 model """ermlp""" +267 5 loss """softplus""" +267 5 regularizer """no""" +267 5 optimizer """adam""" +267 5 training_loop """owa""" +267 5 negative_sampler """basic""" +267 5 evaluator """rankbased""" +267 6 dataset """wn18rr""" +267 6 model """ermlp""" +267 6 loss """softplus""" +267 6 regularizer """no""" +267 6 optimizer """adam""" +267 6 training_loop """owa""" +267 6 negative_sampler """basic""" +267 6 evaluator """rankbased""" +267 7 dataset """wn18rr""" +267 7 model """ermlp""" +267 7 loss """softplus""" +267 7 regularizer """no""" +267 7 optimizer """adam""" +267 7 training_loop """owa""" +267 7 negative_sampler """basic""" +267 7 evaluator """rankbased""" +267 8 dataset """wn18rr""" +267 8 model """ermlp""" +267 8 loss """softplus""" +267 8 regularizer """no""" +267 8 optimizer """adam""" +267 8 training_loop """owa""" +267 8 negative_sampler """basic""" +267 8 evaluator """rankbased""" +267 9 dataset """wn18rr""" +267 9 model """ermlp""" +267 9 loss """softplus""" +267 9 regularizer """no""" +267 9 optimizer """adam""" +267 9 training_loop """owa""" +267 9 negative_sampler """basic""" +267 9 evaluator """rankbased""" +267 10 dataset """wn18rr""" +267 10 model """ermlp""" +267 10 loss """softplus""" +267 10 regularizer """no""" +267 10 optimizer """adam""" +267 10 training_loop """owa""" +267 10 negative_sampler """basic""" +267 10 evaluator """rankbased""" +267 11 dataset """wn18rr""" +267 11 model """ermlp""" +267 11 loss """softplus""" +267 11 regularizer """no""" +267 11 optimizer """adam""" +267 11 training_loop """owa""" +267 11 negative_sampler """basic""" +267 11 evaluator """rankbased""" +267 12 dataset """wn18rr""" +267 12 model """ermlp""" +267 12 loss """softplus""" +267 12 regularizer """no""" +267 12 optimizer """adam""" +267 12 training_loop """owa""" +267 12 negative_sampler """basic""" +267 12 evaluator """rankbased""" +267 13 dataset """wn18rr""" +267 13 model """ermlp""" +267 13 loss """softplus""" +267 13 regularizer """no""" +267 13 optimizer """adam""" +267 13 training_loop """owa""" +267 13 negative_sampler """basic""" +267 13 evaluator """rankbased""" +267 14 dataset """wn18rr""" +267 14 model """ermlp""" +267 14 loss """softplus""" +267 14 regularizer """no""" +267 14 optimizer """adam""" +267 14 training_loop """owa""" +267 14 negative_sampler """basic""" +267 14 evaluator """rankbased""" +267 15 dataset """wn18rr""" +267 15 model """ermlp""" +267 15 loss """softplus""" +267 15 regularizer """no""" +267 15 optimizer """adam""" +267 15 training_loop """owa""" +267 15 negative_sampler """basic""" +267 15 evaluator """rankbased""" +267 16 dataset """wn18rr""" +267 16 model """ermlp""" +267 16 loss """softplus""" +267 16 regularizer """no""" +267 16 optimizer """adam""" +267 16 training_loop """owa""" +267 16 negative_sampler """basic""" +267 16 evaluator """rankbased""" +267 17 dataset """wn18rr""" +267 17 model """ermlp""" +267 17 loss """softplus""" +267 17 regularizer """no""" +267 17 optimizer """adam""" +267 17 training_loop """owa""" +267 17 negative_sampler """basic""" +267 17 evaluator """rankbased""" +267 18 dataset """wn18rr""" +267 18 model """ermlp""" +267 18 loss """softplus""" +267 18 regularizer """no""" +267 18 optimizer """adam""" +267 18 training_loop """owa""" +267 18 negative_sampler """basic""" +267 18 evaluator """rankbased""" +267 19 dataset """wn18rr""" +267 19 model """ermlp""" +267 19 loss """softplus""" +267 19 regularizer """no""" +267 19 optimizer """adam""" +267 19 training_loop """owa""" +267 19 negative_sampler """basic""" +267 19 evaluator """rankbased""" +267 20 dataset """wn18rr""" +267 20 model """ermlp""" +267 20 loss """softplus""" +267 20 regularizer """no""" +267 20 optimizer """adam""" +267 20 training_loop """owa""" +267 20 negative_sampler """basic""" +267 20 evaluator """rankbased""" +267 21 dataset """wn18rr""" +267 21 model """ermlp""" +267 21 loss """softplus""" +267 21 regularizer """no""" +267 21 optimizer """adam""" +267 21 training_loop """owa""" +267 21 negative_sampler """basic""" +267 21 evaluator """rankbased""" +267 22 dataset """wn18rr""" +267 22 model """ermlp""" +267 22 loss """softplus""" +267 22 regularizer """no""" +267 22 optimizer """adam""" +267 22 training_loop """owa""" +267 22 negative_sampler """basic""" +267 22 evaluator """rankbased""" +267 23 dataset """wn18rr""" +267 23 model """ermlp""" +267 23 loss """softplus""" +267 23 regularizer """no""" +267 23 optimizer """adam""" +267 23 training_loop """owa""" +267 23 negative_sampler """basic""" +267 23 evaluator """rankbased""" +267 24 dataset """wn18rr""" +267 24 model """ermlp""" +267 24 loss """softplus""" +267 24 regularizer """no""" +267 24 optimizer """adam""" +267 24 training_loop """owa""" +267 24 negative_sampler """basic""" +267 24 evaluator """rankbased""" +267 25 dataset """wn18rr""" +267 25 model """ermlp""" +267 25 loss """softplus""" +267 25 regularizer """no""" +267 25 optimizer """adam""" +267 25 training_loop """owa""" +267 25 negative_sampler """basic""" +267 25 evaluator """rankbased""" +267 26 dataset """wn18rr""" +267 26 model """ermlp""" +267 26 loss """softplus""" +267 26 regularizer """no""" +267 26 optimizer """adam""" +267 26 training_loop """owa""" +267 26 negative_sampler """basic""" +267 26 evaluator """rankbased""" +267 27 dataset """wn18rr""" +267 27 model """ermlp""" +267 27 loss """softplus""" +267 27 regularizer """no""" +267 27 optimizer """adam""" +267 27 training_loop """owa""" +267 27 negative_sampler """basic""" +267 27 evaluator """rankbased""" +267 28 dataset """wn18rr""" +267 28 model """ermlp""" +267 28 loss """softplus""" +267 28 regularizer """no""" +267 28 optimizer """adam""" +267 28 training_loop """owa""" +267 28 negative_sampler """basic""" +267 28 evaluator """rankbased""" +267 29 dataset """wn18rr""" +267 29 model """ermlp""" +267 29 loss """softplus""" +267 29 regularizer """no""" +267 29 optimizer """adam""" +267 29 training_loop """owa""" +267 29 negative_sampler """basic""" +267 29 evaluator """rankbased""" +267 30 dataset """wn18rr""" +267 30 model """ermlp""" +267 30 loss """softplus""" +267 30 regularizer """no""" +267 30 optimizer """adam""" +267 30 training_loop """owa""" +267 30 negative_sampler """basic""" +267 30 evaluator """rankbased""" +267 31 dataset """wn18rr""" +267 31 model """ermlp""" +267 31 loss """softplus""" +267 31 regularizer """no""" +267 31 optimizer """adam""" +267 31 training_loop """owa""" +267 31 negative_sampler """basic""" +267 31 evaluator """rankbased""" +267 32 dataset """wn18rr""" +267 32 model """ermlp""" +267 32 loss """softplus""" +267 32 regularizer """no""" +267 32 optimizer """adam""" +267 32 training_loop """owa""" +267 32 negative_sampler """basic""" +267 32 evaluator """rankbased""" +267 33 dataset """wn18rr""" +267 33 model """ermlp""" +267 33 loss """softplus""" +267 33 regularizer """no""" +267 33 optimizer """adam""" +267 33 training_loop """owa""" +267 33 negative_sampler """basic""" +267 33 evaluator """rankbased""" +267 34 dataset """wn18rr""" +267 34 model """ermlp""" +267 34 loss """softplus""" +267 34 regularizer """no""" +267 34 optimizer """adam""" +267 34 training_loop """owa""" +267 34 negative_sampler """basic""" +267 34 evaluator """rankbased""" +267 35 dataset """wn18rr""" +267 35 model """ermlp""" +267 35 loss """softplus""" +267 35 regularizer """no""" +267 35 optimizer """adam""" +267 35 training_loop """owa""" +267 35 negative_sampler """basic""" +267 35 evaluator """rankbased""" +267 36 dataset """wn18rr""" +267 36 model """ermlp""" +267 36 loss """softplus""" +267 36 regularizer """no""" +267 36 optimizer """adam""" +267 36 training_loop """owa""" +267 36 negative_sampler """basic""" +267 36 evaluator """rankbased""" +267 37 dataset """wn18rr""" +267 37 model """ermlp""" +267 37 loss """softplus""" +267 37 regularizer """no""" +267 37 optimizer """adam""" +267 37 training_loop """owa""" +267 37 negative_sampler """basic""" +267 37 evaluator """rankbased""" +267 38 dataset """wn18rr""" +267 38 model """ermlp""" +267 38 loss """softplus""" +267 38 regularizer """no""" +267 38 optimizer """adam""" +267 38 training_loop """owa""" +267 38 negative_sampler """basic""" +267 38 evaluator """rankbased""" +267 39 dataset """wn18rr""" +267 39 model """ermlp""" +267 39 loss """softplus""" +267 39 regularizer """no""" +267 39 optimizer """adam""" +267 39 training_loop """owa""" +267 39 negative_sampler """basic""" +267 39 evaluator """rankbased""" +267 40 dataset """wn18rr""" +267 40 model """ermlp""" +267 40 loss """softplus""" +267 40 regularizer """no""" +267 40 optimizer """adam""" +267 40 training_loop """owa""" +267 40 negative_sampler """basic""" +267 40 evaluator """rankbased""" +267 41 dataset """wn18rr""" +267 41 model """ermlp""" +267 41 loss """softplus""" +267 41 regularizer """no""" +267 41 optimizer """adam""" +267 41 training_loop """owa""" +267 41 negative_sampler """basic""" +267 41 evaluator """rankbased""" +267 42 dataset """wn18rr""" +267 42 model """ermlp""" +267 42 loss """softplus""" +267 42 regularizer """no""" +267 42 optimizer """adam""" +267 42 training_loop """owa""" +267 42 negative_sampler """basic""" +267 42 evaluator """rankbased""" +267 43 dataset """wn18rr""" +267 43 model """ermlp""" +267 43 loss """softplus""" +267 43 regularizer """no""" +267 43 optimizer """adam""" +267 43 training_loop """owa""" +267 43 negative_sampler """basic""" +267 43 evaluator """rankbased""" +267 44 dataset """wn18rr""" +267 44 model """ermlp""" +267 44 loss """softplus""" +267 44 regularizer """no""" +267 44 optimizer """adam""" +267 44 training_loop """owa""" +267 44 negative_sampler """basic""" +267 44 evaluator """rankbased""" +267 45 dataset """wn18rr""" +267 45 model """ermlp""" +267 45 loss """softplus""" +267 45 regularizer """no""" +267 45 optimizer """adam""" +267 45 training_loop """owa""" +267 45 negative_sampler """basic""" +267 45 evaluator """rankbased""" +267 46 dataset """wn18rr""" +267 46 model """ermlp""" +267 46 loss """softplus""" +267 46 regularizer """no""" +267 46 optimizer """adam""" +267 46 training_loop """owa""" +267 46 negative_sampler """basic""" +267 46 evaluator """rankbased""" +267 47 dataset """wn18rr""" +267 47 model """ermlp""" +267 47 loss """softplus""" +267 47 regularizer """no""" +267 47 optimizer """adam""" +267 47 training_loop """owa""" +267 47 negative_sampler """basic""" +267 47 evaluator """rankbased""" +267 48 dataset """wn18rr""" +267 48 model """ermlp""" +267 48 loss """softplus""" +267 48 regularizer """no""" +267 48 optimizer """adam""" +267 48 training_loop """owa""" +267 48 negative_sampler """basic""" +267 48 evaluator """rankbased""" +267 49 dataset """wn18rr""" +267 49 model """ermlp""" +267 49 loss """softplus""" +267 49 regularizer """no""" +267 49 optimizer """adam""" +267 49 training_loop """owa""" +267 49 negative_sampler """basic""" +267 49 evaluator """rankbased""" +267 50 dataset """wn18rr""" +267 50 model """ermlp""" +267 50 loss """softplus""" +267 50 regularizer """no""" +267 50 optimizer """adam""" +267 50 training_loop """owa""" +267 50 negative_sampler """basic""" +267 50 evaluator """rankbased""" +267 51 dataset """wn18rr""" +267 51 model """ermlp""" +267 51 loss """softplus""" +267 51 regularizer """no""" +267 51 optimizer """adam""" +267 51 training_loop """owa""" +267 51 negative_sampler """basic""" +267 51 evaluator """rankbased""" +267 52 dataset """wn18rr""" +267 52 model """ermlp""" +267 52 loss """softplus""" +267 52 regularizer """no""" +267 52 optimizer """adam""" +267 52 training_loop """owa""" +267 52 negative_sampler """basic""" +267 52 evaluator """rankbased""" +267 53 dataset """wn18rr""" +267 53 model """ermlp""" +267 53 loss """softplus""" +267 53 regularizer """no""" +267 53 optimizer """adam""" +267 53 training_loop """owa""" +267 53 negative_sampler """basic""" +267 53 evaluator """rankbased""" +267 54 dataset """wn18rr""" +267 54 model """ermlp""" +267 54 loss """softplus""" +267 54 regularizer """no""" +267 54 optimizer """adam""" +267 54 training_loop """owa""" +267 54 negative_sampler """basic""" +267 54 evaluator """rankbased""" +267 55 dataset """wn18rr""" +267 55 model """ermlp""" +267 55 loss """softplus""" +267 55 regularizer """no""" +267 55 optimizer """adam""" +267 55 training_loop """owa""" +267 55 negative_sampler """basic""" +267 55 evaluator """rankbased""" +267 56 dataset """wn18rr""" +267 56 model """ermlp""" +267 56 loss """softplus""" +267 56 regularizer """no""" +267 56 optimizer """adam""" +267 56 training_loop """owa""" +267 56 negative_sampler """basic""" +267 56 evaluator """rankbased""" +267 57 dataset """wn18rr""" +267 57 model """ermlp""" +267 57 loss """softplus""" +267 57 regularizer """no""" +267 57 optimizer """adam""" +267 57 training_loop """owa""" +267 57 negative_sampler """basic""" +267 57 evaluator """rankbased""" +267 58 dataset """wn18rr""" +267 58 model """ermlp""" +267 58 loss """softplus""" +267 58 regularizer """no""" +267 58 optimizer """adam""" +267 58 training_loop """owa""" +267 58 negative_sampler """basic""" +267 58 evaluator """rankbased""" +267 59 dataset """wn18rr""" +267 59 model """ermlp""" +267 59 loss """softplus""" +267 59 regularizer """no""" +267 59 optimizer """adam""" +267 59 training_loop """owa""" +267 59 negative_sampler """basic""" +267 59 evaluator """rankbased""" +267 60 dataset """wn18rr""" +267 60 model """ermlp""" +267 60 loss """softplus""" +267 60 regularizer """no""" +267 60 optimizer """adam""" +267 60 training_loop """owa""" +267 60 negative_sampler """basic""" +267 60 evaluator """rankbased""" +267 61 dataset """wn18rr""" +267 61 model """ermlp""" +267 61 loss """softplus""" +267 61 regularizer """no""" +267 61 optimizer """adam""" +267 61 training_loop """owa""" +267 61 negative_sampler """basic""" +267 61 evaluator """rankbased""" +267 62 dataset """wn18rr""" +267 62 model """ermlp""" +267 62 loss """softplus""" +267 62 regularizer """no""" +267 62 optimizer """adam""" +267 62 training_loop """owa""" +267 62 negative_sampler """basic""" +267 62 evaluator """rankbased""" +267 63 dataset """wn18rr""" +267 63 model """ermlp""" +267 63 loss """softplus""" +267 63 regularizer """no""" +267 63 optimizer """adam""" +267 63 training_loop """owa""" +267 63 negative_sampler """basic""" +267 63 evaluator """rankbased""" +267 64 dataset """wn18rr""" +267 64 model """ermlp""" +267 64 loss """softplus""" +267 64 regularizer """no""" +267 64 optimizer """adam""" +267 64 training_loop """owa""" +267 64 negative_sampler """basic""" +267 64 evaluator """rankbased""" +267 65 dataset """wn18rr""" +267 65 model """ermlp""" +267 65 loss """softplus""" +267 65 regularizer """no""" +267 65 optimizer """adam""" +267 65 training_loop """owa""" +267 65 negative_sampler """basic""" +267 65 evaluator """rankbased""" +267 66 dataset """wn18rr""" +267 66 model """ermlp""" +267 66 loss """softplus""" +267 66 regularizer """no""" +267 66 optimizer """adam""" +267 66 training_loop """owa""" +267 66 negative_sampler """basic""" +267 66 evaluator """rankbased""" +267 67 dataset """wn18rr""" +267 67 model """ermlp""" +267 67 loss """softplus""" +267 67 regularizer """no""" +267 67 optimizer """adam""" +267 67 training_loop """owa""" +267 67 negative_sampler """basic""" +267 67 evaluator """rankbased""" +267 68 dataset """wn18rr""" +267 68 model """ermlp""" +267 68 loss """softplus""" +267 68 regularizer """no""" +267 68 optimizer """adam""" +267 68 training_loop """owa""" +267 68 negative_sampler """basic""" +267 68 evaluator """rankbased""" +267 69 dataset """wn18rr""" +267 69 model """ermlp""" +267 69 loss """softplus""" +267 69 regularizer """no""" +267 69 optimizer """adam""" +267 69 training_loop """owa""" +267 69 negative_sampler """basic""" +267 69 evaluator """rankbased""" +267 70 dataset """wn18rr""" +267 70 model """ermlp""" +267 70 loss """softplus""" +267 70 regularizer """no""" +267 70 optimizer """adam""" +267 70 training_loop """owa""" +267 70 negative_sampler """basic""" +267 70 evaluator """rankbased""" +267 71 dataset """wn18rr""" +267 71 model """ermlp""" +267 71 loss """softplus""" +267 71 regularizer """no""" +267 71 optimizer """adam""" +267 71 training_loop """owa""" +267 71 negative_sampler """basic""" +267 71 evaluator """rankbased""" +267 72 dataset """wn18rr""" +267 72 model """ermlp""" +267 72 loss """softplus""" +267 72 regularizer """no""" +267 72 optimizer """adam""" +267 72 training_loop """owa""" +267 72 negative_sampler """basic""" +267 72 evaluator """rankbased""" +267 73 dataset """wn18rr""" +267 73 model """ermlp""" +267 73 loss """softplus""" +267 73 regularizer """no""" +267 73 optimizer """adam""" +267 73 training_loop """owa""" +267 73 negative_sampler """basic""" +267 73 evaluator """rankbased""" +267 74 dataset """wn18rr""" +267 74 model """ermlp""" +267 74 loss """softplus""" +267 74 regularizer """no""" +267 74 optimizer """adam""" +267 74 training_loop """owa""" +267 74 negative_sampler """basic""" +267 74 evaluator """rankbased""" +267 75 dataset """wn18rr""" +267 75 model """ermlp""" +267 75 loss """softplus""" +267 75 regularizer """no""" +267 75 optimizer """adam""" +267 75 training_loop """owa""" +267 75 negative_sampler """basic""" +267 75 evaluator """rankbased""" +267 76 dataset """wn18rr""" +267 76 model """ermlp""" +267 76 loss """softplus""" +267 76 regularizer """no""" +267 76 optimizer """adam""" +267 76 training_loop """owa""" +267 76 negative_sampler """basic""" +267 76 evaluator """rankbased""" +267 77 dataset """wn18rr""" +267 77 model """ermlp""" +267 77 loss """softplus""" +267 77 regularizer """no""" +267 77 optimizer """adam""" +267 77 training_loop """owa""" +267 77 negative_sampler """basic""" +267 77 evaluator """rankbased""" +267 78 dataset """wn18rr""" +267 78 model """ermlp""" +267 78 loss """softplus""" +267 78 regularizer """no""" +267 78 optimizer """adam""" +267 78 training_loop """owa""" +267 78 negative_sampler """basic""" +267 78 evaluator """rankbased""" +268 1 model.embedding_dim 1.0 +268 1 optimizer.lr 0.014569698187213374 +268 1 training.batch_size 1.0 +268 1 training.label_smoothing 0.016305950964191702 +268 2 model.embedding_dim 0.0 +268 2 optimizer.lr 0.009446467144799535 +268 2 training.batch_size 0.0 +268 2 training.label_smoothing 0.00258172415647081 +268 3 model.embedding_dim 2.0 +268 3 optimizer.lr 0.0993271612899245 +268 3 training.batch_size 1.0 +268 3 training.label_smoothing 0.2674594096531358 +268 4 model.embedding_dim 2.0 +268 4 optimizer.lr 0.018125275810933365 +268 4 training.batch_size 1.0 +268 4 training.label_smoothing 0.003079305984169882 +268 5 model.embedding_dim 0.0 +268 5 optimizer.lr 0.01341660098755647 +268 5 training.batch_size 2.0 +268 5 training.label_smoothing 0.00902145050819791 +268 6 model.embedding_dim 0.0 +268 6 optimizer.lr 0.03358252599925698 +268 6 training.batch_size 0.0 +268 6 training.label_smoothing 0.8945154720301325 +268 1 dataset """wn18rr""" +268 1 model """ermlp""" +268 1 loss """crossentropy""" +268 1 regularizer """no""" +268 1 optimizer """adam""" +268 1 training_loop """lcwa""" +268 1 evaluator """rankbased""" +268 2 dataset """wn18rr""" +268 2 model """ermlp""" +268 2 loss """crossentropy""" +268 2 regularizer """no""" +268 2 optimizer """adam""" +268 2 training_loop """lcwa""" +268 2 evaluator """rankbased""" +268 3 dataset """wn18rr""" +268 3 model """ermlp""" +268 3 loss """crossentropy""" +268 3 regularizer """no""" +268 3 optimizer """adam""" +268 3 training_loop """lcwa""" +268 3 evaluator """rankbased""" +268 4 dataset """wn18rr""" +268 4 model """ermlp""" +268 4 loss """crossentropy""" +268 4 regularizer """no""" +268 4 optimizer """adam""" +268 4 training_loop """lcwa""" +268 4 evaluator """rankbased""" +268 5 dataset """wn18rr""" +268 5 model """ermlp""" +268 5 loss """crossentropy""" +268 5 regularizer """no""" +268 5 optimizer """adam""" +268 5 training_loop """lcwa""" +268 5 evaluator """rankbased""" +268 6 dataset """wn18rr""" +268 6 model """ermlp""" +268 6 loss """crossentropy""" +268 6 regularizer """no""" +268 6 optimizer """adam""" +268 6 training_loop """lcwa""" +268 6 evaluator """rankbased""" +269 1 model.embedding_dim 1.0 +269 1 optimizer.lr 0.004577579558417393 +269 1 training.batch_size 2.0 +269 1 training.label_smoothing 0.27897501888267096 +269 2 model.embedding_dim 2.0 +269 2 optimizer.lr 0.06652503139508521 +269 2 training.batch_size 2.0 +269 2 training.label_smoothing 0.02744951568420038 +269 3 model.embedding_dim 1.0 +269 3 optimizer.lr 0.024625134714366653 +269 3 training.batch_size 1.0 +269 3 training.label_smoothing 0.049651915443920655 +269 4 model.embedding_dim 0.0 +269 4 optimizer.lr 0.031139115412453608 +269 4 training.batch_size 0.0 +269 4 training.label_smoothing 0.011731337712774114 +269 5 model.embedding_dim 2.0 +269 5 optimizer.lr 0.02800381121824729 +269 5 training.batch_size 2.0 +269 5 training.label_smoothing 0.2954190217745879 +269 6 model.embedding_dim 1.0 +269 6 optimizer.lr 0.07292853090884352 +269 6 training.batch_size 1.0 +269 6 training.label_smoothing 0.004568635412649612 +269 7 model.embedding_dim 0.0 +269 7 optimizer.lr 0.017422049096469317 +269 7 training.batch_size 1.0 +269 7 training.label_smoothing 0.07003213904921646 +269 8 model.embedding_dim 2.0 +269 8 optimizer.lr 0.006297391062265945 +269 8 training.batch_size 2.0 +269 8 training.label_smoothing 0.016067270978086943 +269 1 dataset """wn18rr""" +269 1 model """ermlp""" +269 1 loss """crossentropy""" +269 1 regularizer """no""" +269 1 optimizer """adam""" +269 1 training_loop """lcwa""" +269 1 evaluator """rankbased""" +269 2 dataset """wn18rr""" +269 2 model """ermlp""" +269 2 loss """crossentropy""" +269 2 regularizer """no""" +269 2 optimizer """adam""" +269 2 training_loop """lcwa""" +269 2 evaluator """rankbased""" +269 3 dataset """wn18rr""" +269 3 model """ermlp""" +269 3 loss """crossentropy""" +269 3 regularizer """no""" +269 3 optimizer """adam""" +269 3 training_loop """lcwa""" +269 3 evaluator """rankbased""" +269 4 dataset """wn18rr""" +269 4 model """ermlp""" +269 4 loss """crossentropy""" +269 4 regularizer """no""" +269 4 optimizer """adam""" +269 4 training_loop """lcwa""" +269 4 evaluator """rankbased""" +269 5 dataset """wn18rr""" +269 5 model """ermlp""" +269 5 loss """crossentropy""" +269 5 regularizer """no""" +269 5 optimizer """adam""" +269 5 training_loop """lcwa""" +269 5 evaluator """rankbased""" +269 6 dataset """wn18rr""" +269 6 model """ermlp""" +269 6 loss """crossentropy""" +269 6 regularizer """no""" +269 6 optimizer """adam""" +269 6 training_loop """lcwa""" +269 6 evaluator """rankbased""" +269 7 dataset """wn18rr""" +269 7 model """ermlp""" +269 7 loss """crossentropy""" +269 7 regularizer """no""" +269 7 optimizer """adam""" +269 7 training_loop """lcwa""" +269 7 evaluator """rankbased""" +269 8 dataset """wn18rr""" +269 8 model """ermlp""" +269 8 loss """crossentropy""" +269 8 regularizer """no""" +269 8 optimizer """adam""" +269 8 training_loop """lcwa""" +269 8 evaluator """rankbased""" +270 1 model.embedding_dim 0.0 +270 1 optimizer.lr 0.0012539485363101967 +270 1 training.batch_size 2.0 +270 1 training.label_smoothing 0.002283278769001987 +270 2 model.embedding_dim 0.0 +270 2 optimizer.lr 0.007606230503229974 +270 2 training.batch_size 1.0 +270 2 training.label_smoothing 0.00986936353115151 +270 3 model.embedding_dim 2.0 +270 3 optimizer.lr 0.0033118060673194643 +270 3 training.batch_size 2.0 +270 3 training.label_smoothing 0.7921350028571117 +270 1 dataset """wn18rr""" +270 1 model """ermlp""" +270 1 loss """bceaftersigmoid""" +270 1 regularizer """no""" +270 1 optimizer """adam""" +270 1 training_loop """lcwa""" +270 1 evaluator """rankbased""" +270 2 dataset """wn18rr""" +270 2 model """ermlp""" +270 2 loss """bceaftersigmoid""" +270 2 regularizer """no""" +270 2 optimizer """adam""" +270 2 training_loop """lcwa""" +270 2 evaluator """rankbased""" +270 3 dataset """wn18rr""" +270 3 model """ermlp""" +270 3 loss """bceaftersigmoid""" +270 3 regularizer """no""" +270 3 optimizer """adam""" +270 3 training_loop """lcwa""" +270 3 evaluator """rankbased""" +271 1 model.embedding_dim 0.0 +271 1 optimizer.lr 0.037099249062318616 +271 1 training.batch_size 2.0 +271 1 training.label_smoothing 0.058781724941697214 +271 2 model.embedding_dim 1.0 +271 2 optimizer.lr 0.018295940556159893 +271 2 training.batch_size 2.0 +271 2 training.label_smoothing 0.663850367387425 +271 3 model.embedding_dim 1.0 +271 3 optimizer.lr 0.06866166063519359 +271 3 training.batch_size 2.0 +271 3 training.label_smoothing 0.03107718651612679 +271 4 model.embedding_dim 0.0 +271 4 optimizer.lr 0.051034081596530174 +271 4 training.batch_size 0.0 +271 4 training.label_smoothing 0.046917825927304986 +271 5 model.embedding_dim 0.0 +271 5 optimizer.lr 0.006349866160667794 +271 5 training.batch_size 0.0 +271 5 training.label_smoothing 0.09775463951486915 +271 6 model.embedding_dim 2.0 +271 6 optimizer.lr 0.003735049765088534 +271 6 training.batch_size 2.0 +271 6 training.label_smoothing 0.005959638033668257 +271 1 dataset """wn18rr""" +271 1 model """ermlp""" +271 1 loss """softplus""" +271 1 regularizer """no""" +271 1 optimizer """adam""" +271 1 training_loop """lcwa""" +271 1 evaluator """rankbased""" +271 2 dataset """wn18rr""" +271 2 model """ermlp""" +271 2 loss """softplus""" +271 2 regularizer """no""" +271 2 optimizer """adam""" +271 2 training_loop """lcwa""" +271 2 evaluator """rankbased""" +271 3 dataset """wn18rr""" +271 3 model """ermlp""" +271 3 loss """softplus""" +271 3 regularizer """no""" +271 3 optimizer """adam""" +271 3 training_loop """lcwa""" +271 3 evaluator """rankbased""" +271 4 dataset """wn18rr""" +271 4 model """ermlp""" +271 4 loss """softplus""" +271 4 regularizer """no""" +271 4 optimizer """adam""" +271 4 training_loop """lcwa""" +271 4 evaluator """rankbased""" +271 5 dataset """wn18rr""" +271 5 model """ermlp""" +271 5 loss """softplus""" +271 5 regularizer """no""" +271 5 optimizer """adam""" +271 5 training_loop """lcwa""" +271 5 evaluator """rankbased""" +271 6 dataset """wn18rr""" +271 6 model """ermlp""" +271 6 loss """softplus""" +271 6 regularizer """no""" +271 6 optimizer """adam""" +271 6 training_loop """lcwa""" +271 6 evaluator """rankbased""" +272 1 model.embedding_dim 1.0 +272 1 optimizer.lr 0.014107390811937104 +272 1 training.batch_size 0.0 +272 1 training.label_smoothing 0.002667098527180087 +272 2 model.embedding_dim 2.0 +272 2 optimizer.lr 0.09260218730811941 +272 2 training.batch_size 1.0 +272 2 training.label_smoothing 0.16250705275145633 +272 3 model.embedding_dim 0.0 +272 3 optimizer.lr 0.005708598724977155 +272 3 training.batch_size 0.0 +272 3 training.label_smoothing 0.04017132370519101 +272 4 model.embedding_dim 0.0 +272 4 optimizer.lr 0.003799305492188261 +272 4 training.batch_size 1.0 +272 4 training.label_smoothing 0.0017692939448760228 +272 5 model.embedding_dim 2.0 +272 5 optimizer.lr 0.011843155852447744 +272 5 training.batch_size 1.0 +272 5 training.label_smoothing 0.0011732721299899452 +272 6 model.embedding_dim 0.0 +272 6 optimizer.lr 0.009719457807938049 +272 6 training.batch_size 0.0 +272 6 training.label_smoothing 0.009703974974476769 +272 7 model.embedding_dim 1.0 +272 7 optimizer.lr 0.0023741993877939194 +272 7 training.batch_size 1.0 +272 7 training.label_smoothing 0.0962094434902198 +272 8 model.embedding_dim 2.0 +272 8 optimizer.lr 0.03915798139910323 +272 8 training.batch_size 1.0 +272 8 training.label_smoothing 0.0010987664222856515 +272 9 model.embedding_dim 2.0 +272 9 optimizer.lr 0.038163949548545 +272 9 training.batch_size 2.0 +272 9 training.label_smoothing 0.002749187906752056 +272 1 dataset """wn18rr""" +272 1 model """ermlp""" +272 1 loss """bceaftersigmoid""" +272 1 regularizer """no""" +272 1 optimizer """adam""" +272 1 training_loop """lcwa""" +272 1 evaluator """rankbased""" +272 2 dataset """wn18rr""" +272 2 model """ermlp""" +272 2 loss """bceaftersigmoid""" +272 2 regularizer """no""" +272 2 optimizer """adam""" +272 2 training_loop """lcwa""" +272 2 evaluator """rankbased""" +272 3 dataset """wn18rr""" +272 3 model """ermlp""" +272 3 loss """bceaftersigmoid""" +272 3 regularizer """no""" +272 3 optimizer """adam""" +272 3 training_loop """lcwa""" +272 3 evaluator """rankbased""" +272 4 dataset """wn18rr""" +272 4 model """ermlp""" +272 4 loss """bceaftersigmoid""" +272 4 regularizer """no""" +272 4 optimizer """adam""" +272 4 training_loop """lcwa""" +272 4 evaluator """rankbased""" +272 5 dataset """wn18rr""" +272 5 model """ermlp""" +272 5 loss """bceaftersigmoid""" +272 5 regularizer """no""" +272 5 optimizer """adam""" +272 5 training_loop """lcwa""" +272 5 evaluator """rankbased""" +272 6 dataset """wn18rr""" +272 6 model """ermlp""" +272 6 loss """bceaftersigmoid""" +272 6 regularizer """no""" +272 6 optimizer """adam""" +272 6 training_loop """lcwa""" +272 6 evaluator """rankbased""" +272 7 dataset """wn18rr""" +272 7 model """ermlp""" +272 7 loss """bceaftersigmoid""" +272 7 regularizer """no""" +272 7 optimizer """adam""" +272 7 training_loop """lcwa""" +272 7 evaluator """rankbased""" +272 8 dataset """wn18rr""" +272 8 model """ermlp""" +272 8 loss """bceaftersigmoid""" +272 8 regularizer """no""" +272 8 optimizer """adam""" +272 8 training_loop """lcwa""" +272 8 evaluator """rankbased""" +272 9 dataset """wn18rr""" +272 9 model """ermlp""" +272 9 loss """bceaftersigmoid""" +272 9 regularizer """no""" +272 9 optimizer """adam""" +272 9 training_loop """lcwa""" +272 9 evaluator """rankbased""" +273 1 model.embedding_dim 2.0 +273 1 optimizer.lr 0.003602311299299433 +273 1 training.batch_size 1.0 +273 1 training.label_smoothing 0.0072286833707149575 +273 2 model.embedding_dim 2.0 +273 2 optimizer.lr 0.036011992696292995 +273 2 training.batch_size 1.0 +273 2 training.label_smoothing 0.0328653691093115 +273 3 model.embedding_dim 2.0 +273 3 optimizer.lr 0.08915073480052027 +273 3 training.batch_size 2.0 +273 3 training.label_smoothing 0.0014751379361910234 +273 4 model.embedding_dim 0.0 +273 4 optimizer.lr 0.08778183123217835 +273 4 training.batch_size 1.0 +273 4 training.label_smoothing 0.7578342947853687 +273 5 model.embedding_dim 1.0 +273 5 optimizer.lr 0.027270812533106138 +273 5 training.batch_size 1.0 +273 5 training.label_smoothing 0.00452049491354582 +273 6 model.embedding_dim 2.0 +273 6 optimizer.lr 0.005598319423992623 +273 6 training.batch_size 1.0 +273 6 training.label_smoothing 0.7201613727634275 +273 7 model.embedding_dim 1.0 +273 7 optimizer.lr 0.0014037825189847106 +273 7 training.batch_size 1.0 +273 7 training.label_smoothing 0.04700999921409688 +273 8 model.embedding_dim 0.0 +273 8 optimizer.lr 0.017868564940265484 +273 8 training.batch_size 0.0 +273 8 training.label_smoothing 0.43425128803307106 +273 9 model.embedding_dim 0.0 +273 9 optimizer.lr 0.04884607949313503 +273 9 training.batch_size 0.0 +273 9 training.label_smoothing 0.009282721948839648 +273 10 model.embedding_dim 0.0 +273 10 optimizer.lr 0.041602527098389346 +273 10 training.batch_size 1.0 +273 10 training.label_smoothing 0.3007130084559541 +273 1 dataset """wn18rr""" +273 1 model """ermlp""" +273 1 loss """softplus""" +273 1 regularizer """no""" +273 1 optimizer """adam""" +273 1 training_loop """lcwa""" +273 1 evaluator """rankbased""" +273 2 dataset """wn18rr""" +273 2 model """ermlp""" +273 2 loss """softplus""" +273 2 regularizer """no""" +273 2 optimizer """adam""" +273 2 training_loop """lcwa""" +273 2 evaluator """rankbased""" +273 3 dataset """wn18rr""" +273 3 model """ermlp""" +273 3 loss """softplus""" +273 3 regularizer """no""" +273 3 optimizer """adam""" +273 3 training_loop """lcwa""" +273 3 evaluator """rankbased""" +273 4 dataset """wn18rr""" +273 4 model """ermlp""" +273 4 loss """softplus""" +273 4 regularizer """no""" +273 4 optimizer """adam""" +273 4 training_loop """lcwa""" +273 4 evaluator """rankbased""" +273 5 dataset """wn18rr""" +273 5 model """ermlp""" +273 5 loss """softplus""" +273 5 regularizer """no""" +273 5 optimizer """adam""" +273 5 training_loop """lcwa""" +273 5 evaluator """rankbased""" +273 6 dataset """wn18rr""" +273 6 model """ermlp""" +273 6 loss """softplus""" +273 6 regularizer """no""" +273 6 optimizer """adam""" +273 6 training_loop """lcwa""" +273 6 evaluator """rankbased""" +273 7 dataset """wn18rr""" +273 7 model """ermlp""" +273 7 loss """softplus""" +273 7 regularizer """no""" +273 7 optimizer """adam""" +273 7 training_loop """lcwa""" +273 7 evaluator """rankbased""" +273 8 dataset """wn18rr""" +273 8 model """ermlp""" +273 8 loss """softplus""" +273 8 regularizer """no""" +273 8 optimizer """adam""" +273 8 training_loop """lcwa""" +273 8 evaluator """rankbased""" +273 9 dataset """wn18rr""" +273 9 model """ermlp""" +273 9 loss """softplus""" +273 9 regularizer """no""" +273 9 optimizer """adam""" +273 9 training_loop """lcwa""" +273 9 evaluator """rankbased""" +273 10 dataset """wn18rr""" +273 10 model """ermlp""" +273 10 loss """softplus""" +273 10 regularizer """no""" +273 10 optimizer """adam""" +273 10 training_loop """lcwa""" +273 10 evaluator """rankbased""" +274 1 model.embedding_dim 1.0 +274 1 optimizer.lr 0.007816437513637076 +274 1 training.batch_size 1.0 +274 1 training.label_smoothing 0.012419119260294409 +274 2 model.embedding_dim 1.0 +274 2 optimizer.lr 0.0319778719779333 +274 2 training.batch_size 0.0 +274 2 training.label_smoothing 0.0037406594558845835 +274 3 model.embedding_dim 1.0 +274 3 optimizer.lr 0.0162477366954813 +274 3 training.batch_size 0.0 +274 3 training.label_smoothing 0.0014438065880108494 +274 4 model.embedding_dim 1.0 +274 4 optimizer.lr 0.026879866021604033 +274 4 training.batch_size 0.0 +274 4 training.label_smoothing 0.06685204612718804 +274 5 model.embedding_dim 0.0 +274 5 optimizer.lr 0.006421016239696553 +274 5 training.batch_size 0.0 +274 5 training.label_smoothing 0.004299947606807855 +274 1 dataset """wn18rr""" +274 1 model """ermlp""" +274 1 loss """bceaftersigmoid""" +274 1 regularizer """no""" +274 1 optimizer """adam""" +274 1 training_loop """lcwa""" +274 1 evaluator """rankbased""" +274 2 dataset """wn18rr""" +274 2 model """ermlp""" +274 2 loss """bceaftersigmoid""" +274 2 regularizer """no""" +274 2 optimizer """adam""" +274 2 training_loop """lcwa""" +274 2 evaluator """rankbased""" +274 3 dataset """wn18rr""" +274 3 model """ermlp""" +274 3 loss """bceaftersigmoid""" +274 3 regularizer """no""" +274 3 optimizer """adam""" +274 3 training_loop """lcwa""" +274 3 evaluator """rankbased""" +274 4 dataset """wn18rr""" +274 4 model """ermlp""" +274 4 loss """bceaftersigmoid""" +274 4 regularizer """no""" +274 4 optimizer """adam""" +274 4 training_loop """lcwa""" +274 4 evaluator """rankbased""" +274 5 dataset """wn18rr""" +274 5 model """ermlp""" +274 5 loss """bceaftersigmoid""" +274 5 regularizer """no""" +274 5 optimizer """adam""" +274 5 training_loop """lcwa""" +274 5 evaluator """rankbased""" +275 1 model.embedding_dim 2.0 +275 1 optimizer.lr 0.0082013939077271 +275 1 training.batch_size 0.0 +275 1 training.label_smoothing 0.36150779381659964 +275 2 model.embedding_dim 1.0 +275 2 optimizer.lr 0.0036379086126434685 +275 2 training.batch_size 2.0 +275 2 training.label_smoothing 0.13326899068987516 +275 3 model.embedding_dim 0.0 +275 3 optimizer.lr 0.005792375927778756 +275 3 training.batch_size 1.0 +275 3 training.label_smoothing 0.02996481747810023 +275 1 dataset """wn18rr""" +275 1 model """ermlp""" +275 1 loss """softplus""" +275 1 regularizer """no""" +275 1 optimizer """adam""" +275 1 training_loop """lcwa""" +275 1 evaluator """rankbased""" +275 2 dataset """wn18rr""" +275 2 model """ermlp""" +275 2 loss """softplus""" +275 2 regularizer """no""" +275 2 optimizer """adam""" +275 2 training_loop """lcwa""" +275 2 evaluator """rankbased""" +275 3 dataset """wn18rr""" +275 3 model """ermlp""" +275 3 loss """softplus""" +275 3 regularizer """no""" +275 3 optimizer """adam""" +275 3 training_loop """lcwa""" +275 3 evaluator """rankbased""" +276 1 model.embedding_dim 0.0 +276 1 optimizer.lr 0.001004410452448773 +276 1 training.batch_size 1.0 +276 1 training.label_smoothing 0.0791476498216973 +276 2 model.embedding_dim 0.0 +276 2 optimizer.lr 0.0013949345158686022 +276 2 training.batch_size 0.0 +276 2 training.label_smoothing 0.2967460252882131 +276 3 model.embedding_dim 1.0 +276 3 optimizer.lr 0.009775722057806692 +276 3 training.batch_size 1.0 +276 3 training.label_smoothing 0.49594746793331135 +276 4 model.embedding_dim 1.0 +276 4 optimizer.lr 0.0029488605506958238 +276 4 training.batch_size 1.0 +276 4 training.label_smoothing 0.020840399333923385 +276 5 model.embedding_dim 2.0 +276 5 optimizer.lr 0.003914441572800252 +276 5 training.batch_size 2.0 +276 5 training.label_smoothing 0.05224738384516465 +276 6 model.embedding_dim 1.0 +276 6 optimizer.lr 0.0010321384343259937 +276 6 training.batch_size 0.0 +276 6 training.label_smoothing 0.04555471085222802 +276 7 model.embedding_dim 2.0 +276 7 optimizer.lr 0.003302514717042705 +276 7 training.batch_size 2.0 +276 7 training.label_smoothing 0.024350211159665982 +276 8 model.embedding_dim 2.0 +276 8 optimizer.lr 0.0027797054241654557 +276 8 training.batch_size 1.0 +276 8 training.label_smoothing 0.010308793734519267 +276 1 dataset """wn18rr""" +276 1 model """ermlp""" +276 1 loss """bceaftersigmoid""" +276 1 regularizer """no""" +276 1 optimizer """adam""" +276 1 training_loop """lcwa""" +276 1 evaluator """rankbased""" +276 2 dataset """wn18rr""" +276 2 model """ermlp""" +276 2 loss """bceaftersigmoid""" +276 2 regularizer """no""" +276 2 optimizer """adam""" +276 2 training_loop """lcwa""" +276 2 evaluator """rankbased""" +276 3 dataset """wn18rr""" +276 3 model """ermlp""" +276 3 loss """bceaftersigmoid""" +276 3 regularizer """no""" +276 3 optimizer """adam""" +276 3 training_loop """lcwa""" +276 3 evaluator """rankbased""" +276 4 dataset """wn18rr""" +276 4 model """ermlp""" +276 4 loss """bceaftersigmoid""" +276 4 regularizer """no""" +276 4 optimizer """adam""" +276 4 training_loop """lcwa""" +276 4 evaluator """rankbased""" +276 5 dataset """wn18rr""" +276 5 model """ermlp""" +276 5 loss """bceaftersigmoid""" +276 5 regularizer """no""" +276 5 optimizer """adam""" +276 5 training_loop """lcwa""" +276 5 evaluator """rankbased""" +276 6 dataset """wn18rr""" +276 6 model """ermlp""" +276 6 loss """bceaftersigmoid""" +276 6 regularizer """no""" +276 6 optimizer """adam""" +276 6 training_loop """lcwa""" +276 6 evaluator """rankbased""" +276 7 dataset """wn18rr""" +276 7 model """ermlp""" +276 7 loss """bceaftersigmoid""" +276 7 regularizer """no""" +276 7 optimizer """adam""" +276 7 training_loop """lcwa""" +276 7 evaluator """rankbased""" +276 8 dataset """wn18rr""" +276 8 model """ermlp""" +276 8 loss """bceaftersigmoid""" +276 8 regularizer """no""" +276 8 optimizer """adam""" +276 8 training_loop """lcwa""" +276 8 evaluator """rankbased""" +277 1 model.embedding_dim 1.0 +277 1 optimizer.lr 0.036312472806840246 +277 1 training.batch_size 0.0 +277 1 training.label_smoothing 0.0026668814798345613 +277 2 model.embedding_dim 2.0 +277 2 optimizer.lr 0.029143124961263783 +277 2 training.batch_size 1.0 +277 2 training.label_smoothing 0.00369120502505288 +277 1 dataset """wn18rr""" +277 1 model """ermlp""" +277 1 loss """softplus""" +277 1 regularizer """no""" +277 1 optimizer """adam""" +277 1 training_loop """lcwa""" +277 1 evaluator """rankbased""" +277 2 dataset """wn18rr""" +277 2 model """ermlp""" +277 2 loss """softplus""" +277 2 regularizer """no""" +277 2 optimizer """adam""" +277 2 training_loop """lcwa""" +277 2 evaluator """rankbased""" +278 1 model.embedding_dim 1.0 +278 1 loss.margin 2.2674372853015026 +278 1 optimizer.lr 0.02166383230650884 +278 1 negative_sampler.num_negs_per_pos 28.0 +278 1 training.batch_size 0.0 +278 2 model.embedding_dim 1.0 +278 2 loss.margin 4.031354753862483 +278 2 optimizer.lr 0.0542057882690251 +278 2 negative_sampler.num_negs_per_pos 10.0 +278 2 training.batch_size 3.0 +278 3 model.embedding_dim 0.0 +278 3 loss.margin 9.5747364990454 +278 3 optimizer.lr 0.0020507414670746372 +278 3 negative_sampler.num_negs_per_pos 13.0 +278 3 training.batch_size 0.0 +278 4 model.embedding_dim 2.0 +278 4 loss.margin 8.29674054350361 +278 4 optimizer.lr 0.009440682454115955 +278 4 negative_sampler.num_negs_per_pos 4.0 +278 4 training.batch_size 1.0 +278 5 model.embedding_dim 0.0 +278 5 loss.margin 7.120398757302812 +278 5 optimizer.lr 0.04170792144603148 +278 5 negative_sampler.num_negs_per_pos 48.0 +278 5 training.batch_size 1.0 +278 6 model.embedding_dim 0.0 +278 6 loss.margin 2.9029848597213967 +278 6 optimizer.lr 0.02077543702565964 +278 6 negative_sampler.num_negs_per_pos 33.0 +278 6 training.batch_size 0.0 +278 7 model.embedding_dim 0.0 +278 7 loss.margin 9.933069482450426 +278 7 optimizer.lr 0.0361910496973843 +278 7 negative_sampler.num_negs_per_pos 18.0 +278 7 training.batch_size 0.0 +278 8 model.embedding_dim 1.0 +278 8 loss.margin 2.416776380288025 +278 8 optimizer.lr 0.06570635014175182 +278 8 negative_sampler.num_negs_per_pos 28.0 +278 8 training.batch_size 2.0 +278 9 model.embedding_dim 2.0 +278 9 loss.margin 0.8406603357960964 +278 9 optimizer.lr 0.022976651333881996 +278 9 negative_sampler.num_negs_per_pos 33.0 +278 9 training.batch_size 2.0 +278 10 model.embedding_dim 1.0 +278 10 loss.margin 7.162967294780979 +278 10 optimizer.lr 0.002493042373883894 +278 10 negative_sampler.num_negs_per_pos 14.0 +278 10 training.batch_size 2.0 +278 11 model.embedding_dim 0.0 +278 11 loss.margin 4.766241227750937 +278 11 optimizer.lr 0.0018503614305903694 +278 11 negative_sampler.num_negs_per_pos 11.0 +278 11 training.batch_size 3.0 +278 1 dataset """yago310""" +278 1 model """ermlp""" +278 1 loss """marginranking""" +278 1 regularizer """no""" +278 1 optimizer """adam""" +278 1 training_loop """owa""" +278 1 negative_sampler """basic""" +278 1 evaluator """rankbased""" +278 2 dataset """yago310""" +278 2 model """ermlp""" +278 2 loss """marginranking""" +278 2 regularizer """no""" +278 2 optimizer """adam""" +278 2 training_loop """owa""" +278 2 negative_sampler """basic""" +278 2 evaluator """rankbased""" +278 3 dataset """yago310""" +278 3 model """ermlp""" +278 3 loss """marginranking""" +278 3 regularizer """no""" +278 3 optimizer """adam""" +278 3 training_loop """owa""" +278 3 negative_sampler """basic""" +278 3 evaluator """rankbased""" +278 4 dataset """yago310""" +278 4 model """ermlp""" +278 4 loss """marginranking""" +278 4 regularizer """no""" +278 4 optimizer """adam""" +278 4 training_loop """owa""" +278 4 negative_sampler """basic""" +278 4 evaluator """rankbased""" +278 5 dataset """yago310""" +278 5 model """ermlp""" +278 5 loss """marginranking""" +278 5 regularizer """no""" +278 5 optimizer """adam""" +278 5 training_loop """owa""" +278 5 negative_sampler """basic""" +278 5 evaluator """rankbased""" +278 6 dataset """yago310""" +278 6 model """ermlp""" +278 6 loss """marginranking""" +278 6 regularizer """no""" +278 6 optimizer """adam""" +278 6 training_loop """owa""" +278 6 negative_sampler """basic""" +278 6 evaluator """rankbased""" +278 7 dataset """yago310""" +278 7 model """ermlp""" +278 7 loss """marginranking""" +278 7 regularizer """no""" +278 7 optimizer """adam""" +278 7 training_loop """owa""" +278 7 negative_sampler """basic""" +278 7 evaluator """rankbased""" +278 8 dataset """yago310""" +278 8 model """ermlp""" +278 8 loss """marginranking""" +278 8 regularizer """no""" +278 8 optimizer """adam""" +278 8 training_loop """owa""" +278 8 negative_sampler """basic""" +278 8 evaluator """rankbased""" +278 9 dataset """yago310""" +278 9 model """ermlp""" +278 9 loss """marginranking""" +278 9 regularizer """no""" +278 9 optimizer """adam""" +278 9 training_loop """owa""" +278 9 negative_sampler """basic""" +278 9 evaluator """rankbased""" +278 10 dataset """yago310""" +278 10 model """ermlp""" +278 10 loss """marginranking""" +278 10 regularizer """no""" +278 10 optimizer """adam""" +278 10 training_loop """owa""" +278 10 negative_sampler """basic""" +278 10 evaluator """rankbased""" +278 11 dataset """yago310""" +278 11 model """ermlp""" +278 11 loss """marginranking""" +278 11 regularizer """no""" +278 11 optimizer """adam""" +278 11 training_loop """owa""" +278 11 negative_sampler """basic""" +278 11 evaluator """rankbased""" +279 1 model.embedding_dim 2.0 +279 1 loss.margin 4.346300222306928 +279 1 optimizer.lr 0.08192009707111295 +279 1 negative_sampler.num_negs_per_pos 37.0 +279 1 training.batch_size 0.0 +279 2 model.embedding_dim 1.0 +279 2 loss.margin 6.228269385051158 +279 2 optimizer.lr 0.015018927810029895 +279 2 negative_sampler.num_negs_per_pos 11.0 +279 2 training.batch_size 2.0 +279 3 model.embedding_dim 0.0 +279 3 loss.margin 5.3096162988958415 +279 3 optimizer.lr 0.008257509063343714 +279 3 negative_sampler.num_negs_per_pos 28.0 +279 3 training.batch_size 2.0 +279 4 model.embedding_dim 1.0 +279 4 loss.margin 8.709643701495972 +279 4 optimizer.lr 0.017210312998797758 +279 4 negative_sampler.num_negs_per_pos 2.0 +279 4 training.batch_size 3.0 +279 5 model.embedding_dim 2.0 +279 5 loss.margin 4.539629985383364 +279 5 optimizer.lr 0.004254661059849077 +279 5 negative_sampler.num_negs_per_pos 34.0 +279 5 training.batch_size 1.0 +279 6 model.embedding_dim 1.0 +279 6 loss.margin 3.4522885991245413 +279 6 optimizer.lr 0.019400460846148316 +279 6 negative_sampler.num_negs_per_pos 19.0 +279 6 training.batch_size 0.0 +279 7 model.embedding_dim 0.0 +279 7 loss.margin 9.38578756238419 +279 7 optimizer.lr 0.019388797209479544 +279 7 negative_sampler.num_negs_per_pos 45.0 +279 7 training.batch_size 2.0 +279 8 model.embedding_dim 1.0 +279 8 loss.margin 8.74050936550902 +279 8 optimizer.lr 0.011576816350163653 +279 8 negative_sampler.num_negs_per_pos 18.0 +279 8 training.batch_size 2.0 +279 9 model.embedding_dim 1.0 +279 9 loss.margin 4.151685134162818 +279 9 optimizer.lr 0.0032412585743629057 +279 9 negative_sampler.num_negs_per_pos 0.0 +279 9 training.batch_size 1.0 +279 10 model.embedding_dim 1.0 +279 10 loss.margin 8.162691639266553 +279 10 optimizer.lr 0.007633472927073508 +279 10 negative_sampler.num_negs_per_pos 2.0 +279 10 training.batch_size 2.0 +279 11 model.embedding_dim 1.0 +279 11 loss.margin 4.4546612153603675 +279 11 optimizer.lr 0.027704794546970658 +279 11 negative_sampler.num_negs_per_pos 16.0 +279 11 training.batch_size 1.0 +279 12 model.embedding_dim 0.0 +279 12 loss.margin 2.3850419847630686 +279 12 optimizer.lr 0.005310180017189963 +279 12 negative_sampler.num_negs_per_pos 2.0 +279 12 training.batch_size 2.0 +279 13 model.embedding_dim 0.0 +279 13 loss.margin 4.203183982848719 +279 13 optimizer.lr 0.001354442099564594 +279 13 negative_sampler.num_negs_per_pos 22.0 +279 13 training.batch_size 0.0 +279 14 model.embedding_dim 1.0 +279 14 loss.margin 9.687260827643305 +279 14 optimizer.lr 0.0017620815842638985 +279 14 negative_sampler.num_negs_per_pos 30.0 +279 14 training.batch_size 0.0 +279 1 dataset """yago310""" +279 1 model """ermlp""" +279 1 loss """marginranking""" +279 1 regularizer """no""" +279 1 optimizer """adam""" +279 1 training_loop """owa""" +279 1 negative_sampler """basic""" +279 1 evaluator """rankbased""" +279 2 dataset """yago310""" +279 2 model """ermlp""" +279 2 loss """marginranking""" +279 2 regularizer """no""" +279 2 optimizer """adam""" +279 2 training_loop """owa""" +279 2 negative_sampler """basic""" +279 2 evaluator """rankbased""" +279 3 dataset """yago310""" +279 3 model """ermlp""" +279 3 loss """marginranking""" +279 3 regularizer """no""" +279 3 optimizer """adam""" +279 3 training_loop """owa""" +279 3 negative_sampler """basic""" +279 3 evaluator """rankbased""" +279 4 dataset """yago310""" +279 4 model """ermlp""" +279 4 loss """marginranking""" +279 4 regularizer """no""" +279 4 optimizer """adam""" +279 4 training_loop """owa""" +279 4 negative_sampler """basic""" +279 4 evaluator """rankbased""" +279 5 dataset """yago310""" +279 5 model """ermlp""" +279 5 loss """marginranking""" +279 5 regularizer """no""" +279 5 optimizer """adam""" +279 5 training_loop """owa""" +279 5 negative_sampler """basic""" +279 5 evaluator """rankbased""" +279 6 dataset """yago310""" +279 6 model """ermlp""" +279 6 loss """marginranking""" +279 6 regularizer """no""" +279 6 optimizer """adam""" +279 6 training_loop """owa""" +279 6 negative_sampler """basic""" +279 6 evaluator """rankbased""" +279 7 dataset """yago310""" +279 7 model """ermlp""" +279 7 loss """marginranking""" +279 7 regularizer """no""" +279 7 optimizer """adam""" +279 7 training_loop """owa""" +279 7 negative_sampler """basic""" +279 7 evaluator """rankbased""" +279 8 dataset """yago310""" +279 8 model """ermlp""" +279 8 loss """marginranking""" +279 8 regularizer """no""" +279 8 optimizer """adam""" +279 8 training_loop """owa""" +279 8 negative_sampler """basic""" +279 8 evaluator """rankbased""" +279 9 dataset """yago310""" +279 9 model """ermlp""" +279 9 loss """marginranking""" +279 9 regularizer """no""" +279 9 optimizer """adam""" +279 9 training_loop """owa""" +279 9 negative_sampler """basic""" +279 9 evaluator """rankbased""" +279 10 dataset """yago310""" +279 10 model """ermlp""" +279 10 loss """marginranking""" +279 10 regularizer """no""" +279 10 optimizer """adam""" +279 10 training_loop """owa""" +279 10 negative_sampler """basic""" +279 10 evaluator """rankbased""" +279 11 dataset """yago310""" +279 11 model """ermlp""" +279 11 loss """marginranking""" +279 11 regularizer """no""" +279 11 optimizer """adam""" +279 11 training_loop """owa""" +279 11 negative_sampler """basic""" +279 11 evaluator """rankbased""" +279 12 dataset """yago310""" +279 12 model """ermlp""" +279 12 loss """marginranking""" +279 12 regularizer """no""" +279 12 optimizer """adam""" +279 12 training_loop """owa""" +279 12 negative_sampler """basic""" +279 12 evaluator """rankbased""" +279 13 dataset """yago310""" +279 13 model """ermlp""" +279 13 loss """marginranking""" +279 13 regularizer """no""" +279 13 optimizer """adam""" +279 13 training_loop """owa""" +279 13 negative_sampler """basic""" +279 13 evaluator """rankbased""" +279 14 dataset """yago310""" +279 14 model """ermlp""" +279 14 loss """marginranking""" +279 14 regularizer """no""" +279 14 optimizer """adam""" +279 14 training_loop """owa""" +279 14 negative_sampler """basic""" +279 14 evaluator """rankbased""" +280 1 model.embedding_dim 2.0 +280 1 optimizer.lr 0.00167361149004918 +280 1 negative_sampler.num_negs_per_pos 49.0 +280 1 training.batch_size 2.0 +280 2 model.embedding_dim 0.0 +280 2 optimizer.lr 0.0010157331410579008 +280 2 negative_sampler.num_negs_per_pos 26.0 +280 2 training.batch_size 3.0 +280 3 model.embedding_dim 2.0 +280 3 optimizer.lr 0.005686524553479544 +280 3 negative_sampler.num_negs_per_pos 34.0 +280 3 training.batch_size 2.0 +280 4 model.embedding_dim 2.0 +280 4 optimizer.lr 0.0010168002171307632 +280 4 negative_sampler.num_negs_per_pos 21.0 +280 4 training.batch_size 3.0 +280 5 model.embedding_dim 1.0 +280 5 optimizer.lr 0.001094076012729922 +280 5 negative_sampler.num_negs_per_pos 5.0 +280 5 training.batch_size 2.0 +280 6 model.embedding_dim 1.0 +280 6 optimizer.lr 0.08775037065775543 +280 6 negative_sampler.num_negs_per_pos 40.0 +280 6 training.batch_size 1.0 +280 7 model.embedding_dim 1.0 +280 7 optimizer.lr 0.0035071125490104983 +280 7 negative_sampler.num_negs_per_pos 36.0 +280 7 training.batch_size 0.0 +280 1 dataset """yago310""" +280 1 model """ermlp""" +280 1 loss """bceaftersigmoid""" +280 1 regularizer """no""" +280 1 optimizer """adam""" +280 1 training_loop """owa""" +280 1 negative_sampler """basic""" +280 1 evaluator """rankbased""" +280 2 dataset """yago310""" +280 2 model """ermlp""" +280 2 loss """bceaftersigmoid""" +280 2 regularizer """no""" +280 2 optimizer """adam""" +280 2 training_loop """owa""" +280 2 negative_sampler """basic""" +280 2 evaluator """rankbased""" +280 3 dataset """yago310""" +280 3 model """ermlp""" +280 3 loss """bceaftersigmoid""" +280 3 regularizer """no""" +280 3 optimizer """adam""" +280 3 training_loop """owa""" +280 3 negative_sampler """basic""" +280 3 evaluator """rankbased""" +280 4 dataset """yago310""" +280 4 model """ermlp""" +280 4 loss """bceaftersigmoid""" +280 4 regularizer """no""" +280 4 optimizer """adam""" +280 4 training_loop """owa""" +280 4 negative_sampler """basic""" +280 4 evaluator """rankbased""" +280 5 dataset """yago310""" +280 5 model """ermlp""" +280 5 loss """bceaftersigmoid""" +280 5 regularizer """no""" +280 5 optimizer """adam""" +280 5 training_loop """owa""" +280 5 negative_sampler """basic""" +280 5 evaluator """rankbased""" +280 6 dataset """yago310""" +280 6 model """ermlp""" +280 6 loss """bceaftersigmoid""" +280 6 regularizer """no""" +280 6 optimizer """adam""" +280 6 training_loop """owa""" +280 6 negative_sampler """basic""" +280 6 evaluator """rankbased""" +280 7 dataset """yago310""" +280 7 model """ermlp""" +280 7 loss """bceaftersigmoid""" +280 7 regularizer """no""" +280 7 optimizer """adam""" +280 7 training_loop """owa""" +280 7 negative_sampler """basic""" +280 7 evaluator """rankbased""" +281 1 model.embedding_dim 1.0 +281 1 optimizer.lr 0.00947772247714081 +281 1 negative_sampler.num_negs_per_pos 35.0 +281 1 training.batch_size 3.0 +281 2 model.embedding_dim 2.0 +281 2 optimizer.lr 0.006968326563651091 +281 2 negative_sampler.num_negs_per_pos 2.0 +281 2 training.batch_size 0.0 +281 3 model.embedding_dim 0.0 +281 3 optimizer.lr 0.004273358571560813 +281 3 negative_sampler.num_negs_per_pos 15.0 +281 3 training.batch_size 2.0 +281 4 model.embedding_dim 2.0 +281 4 optimizer.lr 0.0190670684585565 +281 4 negative_sampler.num_negs_per_pos 27.0 +281 4 training.batch_size 0.0 +281 5 model.embedding_dim 2.0 +281 5 optimizer.lr 0.002103289148881089 +281 5 negative_sampler.num_negs_per_pos 6.0 +281 5 training.batch_size 1.0 +281 6 model.embedding_dim 1.0 +281 6 optimizer.lr 0.0024233423421468423 +281 6 negative_sampler.num_negs_per_pos 4.0 +281 6 training.batch_size 3.0 +281 7 model.embedding_dim 2.0 +281 7 optimizer.lr 0.029384044702970857 +281 7 negative_sampler.num_negs_per_pos 27.0 +281 7 training.batch_size 0.0 +281 8 model.embedding_dim 2.0 +281 8 optimizer.lr 0.0010918468004160373 +281 8 negative_sampler.num_negs_per_pos 47.0 +281 8 training.batch_size 2.0 +281 9 model.embedding_dim 1.0 +281 9 optimizer.lr 0.0395488793119134 +281 9 negative_sampler.num_negs_per_pos 29.0 +281 9 training.batch_size 1.0 +281 10 model.embedding_dim 2.0 +281 10 optimizer.lr 0.041480398839306244 +281 10 negative_sampler.num_negs_per_pos 27.0 +281 10 training.batch_size 0.0 +281 11 model.embedding_dim 0.0 +281 11 optimizer.lr 0.03377497991919168 +281 11 negative_sampler.num_negs_per_pos 10.0 +281 11 training.batch_size 0.0 +281 12 model.embedding_dim 2.0 +281 12 optimizer.lr 0.00311333014209865 +281 12 negative_sampler.num_negs_per_pos 47.0 +281 12 training.batch_size 3.0 +281 13 model.embedding_dim 0.0 +281 13 optimizer.lr 0.0029164399424618966 +281 13 negative_sampler.num_negs_per_pos 42.0 +281 13 training.batch_size 0.0 +281 14 model.embedding_dim 1.0 +281 14 optimizer.lr 0.09928277292021961 +281 14 negative_sampler.num_negs_per_pos 16.0 +281 14 training.batch_size 2.0 +281 15 model.embedding_dim 2.0 +281 15 optimizer.lr 0.0035887901985711236 +281 15 negative_sampler.num_negs_per_pos 2.0 +281 15 training.batch_size 2.0 +281 16 model.embedding_dim 2.0 +281 16 optimizer.lr 0.03142298970653618 +281 16 negative_sampler.num_negs_per_pos 17.0 +281 16 training.batch_size 1.0 +281 17 model.embedding_dim 2.0 +281 17 optimizer.lr 0.016669000523708448 +281 17 negative_sampler.num_negs_per_pos 26.0 +281 17 training.batch_size 3.0 +281 18 model.embedding_dim 1.0 +281 18 optimizer.lr 0.0020597506982639536 +281 18 negative_sampler.num_negs_per_pos 48.0 +281 18 training.batch_size 1.0 +281 19 model.embedding_dim 1.0 +281 19 optimizer.lr 0.0012540903936732448 +281 19 negative_sampler.num_negs_per_pos 22.0 +281 19 training.batch_size 0.0 +281 20 model.embedding_dim 0.0 +281 20 optimizer.lr 0.004619192001023339 +281 20 negative_sampler.num_negs_per_pos 14.0 +281 20 training.batch_size 2.0 +281 21 model.embedding_dim 0.0 +281 21 optimizer.lr 0.0881917860107427 +281 21 negative_sampler.num_negs_per_pos 32.0 +281 21 training.batch_size 2.0 +281 22 model.embedding_dim 0.0 +281 22 optimizer.lr 0.0024817041823741418 +281 22 negative_sampler.num_negs_per_pos 21.0 +281 22 training.batch_size 3.0 +281 1 dataset """yago310""" +281 1 model """ermlp""" +281 1 loss """bceaftersigmoid""" +281 1 regularizer """no""" +281 1 optimizer """adam""" +281 1 training_loop """owa""" +281 1 negative_sampler """basic""" +281 1 evaluator """rankbased""" +281 2 dataset """yago310""" +281 2 model """ermlp""" +281 2 loss """bceaftersigmoid""" +281 2 regularizer """no""" +281 2 optimizer """adam""" +281 2 training_loop """owa""" +281 2 negative_sampler """basic""" +281 2 evaluator """rankbased""" +281 3 dataset """yago310""" +281 3 model """ermlp""" +281 3 loss """bceaftersigmoid""" +281 3 regularizer """no""" +281 3 optimizer """adam""" +281 3 training_loop """owa""" +281 3 negative_sampler """basic""" +281 3 evaluator """rankbased""" +281 4 dataset """yago310""" +281 4 model """ermlp""" +281 4 loss """bceaftersigmoid""" +281 4 regularizer """no""" +281 4 optimizer """adam""" +281 4 training_loop """owa""" +281 4 negative_sampler """basic""" +281 4 evaluator """rankbased""" +281 5 dataset """yago310""" +281 5 model """ermlp""" +281 5 loss """bceaftersigmoid""" +281 5 regularizer """no""" +281 5 optimizer """adam""" +281 5 training_loop """owa""" +281 5 negative_sampler """basic""" +281 5 evaluator """rankbased""" +281 6 dataset """yago310""" +281 6 model """ermlp""" +281 6 loss """bceaftersigmoid""" +281 6 regularizer """no""" +281 6 optimizer """adam""" +281 6 training_loop """owa""" +281 6 negative_sampler """basic""" +281 6 evaluator """rankbased""" +281 7 dataset """yago310""" +281 7 model """ermlp""" +281 7 loss """bceaftersigmoid""" +281 7 regularizer """no""" +281 7 optimizer """adam""" +281 7 training_loop """owa""" +281 7 negative_sampler """basic""" +281 7 evaluator """rankbased""" +281 8 dataset """yago310""" +281 8 model """ermlp""" +281 8 loss """bceaftersigmoid""" +281 8 regularizer """no""" +281 8 optimizer """adam""" +281 8 training_loop """owa""" +281 8 negative_sampler """basic""" +281 8 evaluator """rankbased""" +281 9 dataset """yago310""" +281 9 model """ermlp""" +281 9 loss """bceaftersigmoid""" +281 9 regularizer """no""" +281 9 optimizer """adam""" +281 9 training_loop """owa""" +281 9 negative_sampler """basic""" +281 9 evaluator """rankbased""" +281 10 dataset """yago310""" +281 10 model """ermlp""" +281 10 loss """bceaftersigmoid""" +281 10 regularizer """no""" +281 10 optimizer """adam""" +281 10 training_loop """owa""" +281 10 negative_sampler """basic""" +281 10 evaluator """rankbased""" +281 11 dataset """yago310""" +281 11 model """ermlp""" +281 11 loss """bceaftersigmoid""" +281 11 regularizer """no""" +281 11 optimizer """adam""" +281 11 training_loop """owa""" +281 11 negative_sampler """basic""" +281 11 evaluator """rankbased""" +281 12 dataset """yago310""" +281 12 model """ermlp""" +281 12 loss """bceaftersigmoid""" +281 12 regularizer """no""" +281 12 optimizer """adam""" +281 12 training_loop """owa""" +281 12 negative_sampler """basic""" +281 12 evaluator """rankbased""" +281 13 dataset """yago310""" +281 13 model """ermlp""" +281 13 loss """bceaftersigmoid""" +281 13 regularizer """no""" +281 13 optimizer """adam""" +281 13 training_loop """owa""" +281 13 negative_sampler """basic""" +281 13 evaluator """rankbased""" +281 14 dataset """yago310""" +281 14 model """ermlp""" +281 14 loss """bceaftersigmoid""" +281 14 regularizer """no""" +281 14 optimizer """adam""" +281 14 training_loop """owa""" +281 14 negative_sampler """basic""" +281 14 evaluator """rankbased""" +281 15 dataset """yago310""" +281 15 model """ermlp""" +281 15 loss """bceaftersigmoid""" +281 15 regularizer """no""" +281 15 optimizer """adam""" +281 15 training_loop """owa""" +281 15 negative_sampler """basic""" +281 15 evaluator """rankbased""" +281 16 dataset """yago310""" +281 16 model """ermlp""" +281 16 loss """bceaftersigmoid""" +281 16 regularizer """no""" +281 16 optimizer """adam""" +281 16 training_loop """owa""" +281 16 negative_sampler """basic""" +281 16 evaluator """rankbased""" +281 17 dataset """yago310""" +281 17 model """ermlp""" +281 17 loss """bceaftersigmoid""" +281 17 regularizer """no""" +281 17 optimizer """adam""" +281 17 training_loop """owa""" +281 17 negative_sampler """basic""" +281 17 evaluator """rankbased""" +281 18 dataset """yago310""" +281 18 model """ermlp""" +281 18 loss """bceaftersigmoid""" +281 18 regularizer """no""" +281 18 optimizer """adam""" +281 18 training_loop """owa""" +281 18 negative_sampler """basic""" +281 18 evaluator """rankbased""" +281 19 dataset """yago310""" +281 19 model """ermlp""" +281 19 loss """bceaftersigmoid""" +281 19 regularizer """no""" +281 19 optimizer """adam""" +281 19 training_loop """owa""" +281 19 negative_sampler """basic""" +281 19 evaluator """rankbased""" +281 20 dataset """yago310""" +281 20 model """ermlp""" +281 20 loss """bceaftersigmoid""" +281 20 regularizer """no""" +281 20 optimizer """adam""" +281 20 training_loop """owa""" +281 20 negative_sampler """basic""" +281 20 evaluator """rankbased""" +281 21 dataset """yago310""" +281 21 model """ermlp""" +281 21 loss """bceaftersigmoid""" +281 21 regularizer """no""" +281 21 optimizer """adam""" +281 21 training_loop """owa""" +281 21 negative_sampler """basic""" +281 21 evaluator """rankbased""" +281 22 dataset """yago310""" +281 22 model """ermlp""" +281 22 loss """bceaftersigmoid""" +281 22 regularizer """no""" +281 22 optimizer """adam""" +281 22 training_loop """owa""" +281 22 negative_sampler """basic""" +281 22 evaluator """rankbased""" +282 1 model.embedding_dim 1.0 +282 1 loss.margin 17.248398183944637 +282 1 loss.adversarial_temperature 0.8420707304913295 +282 1 optimizer.lr 0.0011018852873140076 +282 1 negative_sampler.num_negs_per_pos 15.0 +282 1 training.batch_size 3.0 +282 2 model.embedding_dim 2.0 +282 2 loss.margin 28.596933077998685 +282 2 loss.adversarial_temperature 0.2244329269289671 +282 2 optimizer.lr 0.06368093474345382 +282 2 negative_sampler.num_negs_per_pos 42.0 +282 2 training.batch_size 0.0 +282 3 model.embedding_dim 1.0 +282 3 loss.margin 20.344298195626113 +282 3 loss.adversarial_temperature 0.24840738134468215 +282 3 optimizer.lr 0.004198952989891113 +282 3 negative_sampler.num_negs_per_pos 34.0 +282 3 training.batch_size 1.0 +282 4 model.embedding_dim 1.0 +282 4 loss.margin 29.805862401213755 +282 4 loss.adversarial_temperature 0.16708813648666282 +282 4 optimizer.lr 0.0010670112526023601 +282 4 negative_sampler.num_negs_per_pos 0.0 +282 4 training.batch_size 1.0 +282 5 model.embedding_dim 1.0 +282 5 loss.margin 13.569022773121137 +282 5 loss.adversarial_temperature 0.34230730275670407 +282 5 optimizer.lr 0.007792308827216079 +282 5 negative_sampler.num_negs_per_pos 26.0 +282 5 training.batch_size 3.0 +282 6 model.embedding_dim 1.0 +282 6 loss.margin 13.33037078646031 +282 6 loss.adversarial_temperature 0.2989406583039699 +282 6 optimizer.lr 0.0013554800055280297 +282 6 negative_sampler.num_negs_per_pos 7.0 +282 6 training.batch_size 0.0 +282 7 model.embedding_dim 0.0 +282 7 loss.margin 1.5594847195117012 +282 7 loss.adversarial_temperature 0.5079911823759117 +282 7 optimizer.lr 0.0049716505316530525 +282 7 negative_sampler.num_negs_per_pos 45.0 +282 7 training.batch_size 3.0 +282 8 model.embedding_dim 2.0 +282 8 loss.margin 24.02053873536306 +282 8 loss.adversarial_temperature 0.3906407418943356 +282 8 optimizer.lr 0.008480509953370604 +282 8 negative_sampler.num_negs_per_pos 7.0 +282 8 training.batch_size 2.0 +282 9 model.embedding_dim 0.0 +282 9 loss.margin 17.619960691324874 +282 9 loss.adversarial_temperature 0.8602873034623567 +282 9 optimizer.lr 0.008672489084118596 +282 9 negative_sampler.num_negs_per_pos 0.0 +282 9 training.batch_size 2.0 +282 10 model.embedding_dim 0.0 +282 10 loss.margin 10.409689307679269 +282 10 loss.adversarial_temperature 0.9061587370774727 +282 10 optimizer.lr 0.03644636424967334 +282 10 negative_sampler.num_negs_per_pos 33.0 +282 10 training.batch_size 2.0 +282 11 model.embedding_dim 2.0 +282 11 loss.margin 15.929491085146319 +282 11 loss.adversarial_temperature 0.12373675908268783 +282 11 optimizer.lr 0.024929078241438855 +282 11 negative_sampler.num_negs_per_pos 42.0 +282 11 training.batch_size 1.0 +282 12 model.embedding_dim 0.0 +282 12 loss.margin 17.53167296019036 +282 12 loss.adversarial_temperature 0.6450711215885674 +282 12 optimizer.lr 0.02919102013501502 +282 12 negative_sampler.num_negs_per_pos 48.0 +282 12 training.batch_size 0.0 +282 13 model.embedding_dim 1.0 +282 13 loss.margin 23.557165170363835 +282 13 loss.adversarial_temperature 0.6027256638407257 +282 13 optimizer.lr 0.005157193673299173 +282 13 negative_sampler.num_negs_per_pos 25.0 +282 13 training.batch_size 1.0 +282 14 model.embedding_dim 0.0 +282 14 loss.margin 6.84175422807083 +282 14 loss.adversarial_temperature 0.5331468494355732 +282 14 optimizer.lr 0.004416189942230177 +282 14 negative_sampler.num_negs_per_pos 40.0 +282 14 training.batch_size 0.0 +282 1 dataset """yago310""" +282 1 model """ermlp""" +282 1 loss """nssa""" +282 1 regularizer """no""" +282 1 optimizer """adam""" +282 1 training_loop """owa""" +282 1 negative_sampler """basic""" +282 1 evaluator """rankbased""" +282 2 dataset """yago310""" +282 2 model """ermlp""" +282 2 loss """nssa""" +282 2 regularizer """no""" +282 2 optimizer """adam""" +282 2 training_loop """owa""" +282 2 negative_sampler """basic""" +282 2 evaluator """rankbased""" +282 3 dataset """yago310""" +282 3 model """ermlp""" +282 3 loss """nssa""" +282 3 regularizer """no""" +282 3 optimizer """adam""" +282 3 training_loop """owa""" +282 3 negative_sampler """basic""" +282 3 evaluator """rankbased""" +282 4 dataset """yago310""" +282 4 model """ermlp""" +282 4 loss """nssa""" +282 4 regularizer """no""" +282 4 optimizer """adam""" +282 4 training_loop """owa""" +282 4 negative_sampler """basic""" +282 4 evaluator """rankbased""" +282 5 dataset """yago310""" +282 5 model """ermlp""" +282 5 loss """nssa""" +282 5 regularizer """no""" +282 5 optimizer """adam""" +282 5 training_loop """owa""" +282 5 negative_sampler """basic""" +282 5 evaluator """rankbased""" +282 6 dataset """yago310""" +282 6 model """ermlp""" +282 6 loss """nssa""" +282 6 regularizer """no""" +282 6 optimizer """adam""" +282 6 training_loop """owa""" +282 6 negative_sampler """basic""" +282 6 evaluator """rankbased""" +282 7 dataset """yago310""" +282 7 model """ermlp""" +282 7 loss """nssa""" +282 7 regularizer """no""" +282 7 optimizer """adam""" +282 7 training_loop """owa""" +282 7 negative_sampler """basic""" +282 7 evaluator """rankbased""" +282 8 dataset """yago310""" +282 8 model """ermlp""" +282 8 loss """nssa""" +282 8 regularizer """no""" +282 8 optimizer """adam""" +282 8 training_loop """owa""" +282 8 negative_sampler """basic""" +282 8 evaluator """rankbased""" +282 9 dataset """yago310""" +282 9 model """ermlp""" +282 9 loss """nssa""" +282 9 regularizer """no""" +282 9 optimizer """adam""" +282 9 training_loop """owa""" +282 9 negative_sampler """basic""" +282 9 evaluator """rankbased""" +282 10 dataset """yago310""" +282 10 model """ermlp""" +282 10 loss """nssa""" +282 10 regularizer """no""" +282 10 optimizer """adam""" +282 10 training_loop """owa""" +282 10 negative_sampler """basic""" +282 10 evaluator """rankbased""" +282 11 dataset """yago310""" +282 11 model """ermlp""" +282 11 loss """nssa""" +282 11 regularizer """no""" +282 11 optimizer """adam""" +282 11 training_loop """owa""" +282 11 negative_sampler """basic""" +282 11 evaluator """rankbased""" +282 12 dataset """yago310""" +282 12 model """ermlp""" +282 12 loss """nssa""" +282 12 regularizer """no""" +282 12 optimizer """adam""" +282 12 training_loop """owa""" +282 12 negative_sampler """basic""" +282 12 evaluator """rankbased""" +282 13 dataset """yago310""" +282 13 model """ermlp""" +282 13 loss """nssa""" +282 13 regularizer """no""" +282 13 optimizer """adam""" +282 13 training_loop """owa""" +282 13 negative_sampler """basic""" +282 13 evaluator """rankbased""" +282 14 dataset """yago310""" +282 14 model """ermlp""" +282 14 loss """nssa""" +282 14 regularizer """no""" +282 14 optimizer """adam""" +282 14 training_loop """owa""" +282 14 negative_sampler """basic""" +282 14 evaluator """rankbased""" +283 1 model.embedding_dim 1.0 +283 1 loss.margin 5.73921609090286 +283 1 loss.adversarial_temperature 0.9783879866248271 +283 1 optimizer.lr 0.005519334119045062 +283 1 negative_sampler.num_negs_per_pos 22.0 +283 1 training.batch_size 2.0 +283 2 model.embedding_dim 0.0 +283 2 loss.margin 13.723843678500158 +283 2 loss.adversarial_temperature 0.7347517921891374 +283 2 optimizer.lr 0.0010311363883800978 +283 2 negative_sampler.num_negs_per_pos 29.0 +283 2 training.batch_size 3.0 +283 3 model.embedding_dim 1.0 +283 3 loss.margin 13.442259789373287 +283 3 loss.adversarial_temperature 0.3863238510987367 +283 3 optimizer.lr 0.08445825724636427 +283 3 negative_sampler.num_negs_per_pos 40.0 +283 3 training.batch_size 1.0 +283 4 model.embedding_dim 1.0 +283 4 loss.margin 5.439636504937229 +283 4 loss.adversarial_temperature 0.5472329606319158 +283 4 optimizer.lr 0.028201371842874242 +283 4 negative_sampler.num_negs_per_pos 44.0 +283 4 training.batch_size 0.0 +283 5 model.embedding_dim 1.0 +283 5 loss.margin 24.480984693398646 +283 5 loss.adversarial_temperature 0.3816637274116451 +283 5 optimizer.lr 0.002832226236923749 +283 5 negative_sampler.num_negs_per_pos 35.0 +283 5 training.batch_size 1.0 +283 6 model.embedding_dim 1.0 +283 6 loss.margin 13.289702307525586 +283 6 loss.adversarial_temperature 0.8239513556459748 +283 6 optimizer.lr 0.04039249257821543 +283 6 negative_sampler.num_negs_per_pos 23.0 +283 6 training.batch_size 0.0 +283 7 model.embedding_dim 2.0 +283 7 loss.margin 6.587941568126528 +283 7 loss.adversarial_temperature 0.7642326417073408 +283 7 optimizer.lr 0.0016264731446087193 +283 7 negative_sampler.num_negs_per_pos 22.0 +283 7 training.batch_size 1.0 +283 8 model.embedding_dim 0.0 +283 8 loss.margin 5.22338676110273 +283 8 loss.adversarial_temperature 0.3568330638865036 +283 8 optimizer.lr 0.00644333636450842 +283 8 negative_sampler.num_negs_per_pos 13.0 +283 8 training.batch_size 3.0 +283 9 model.embedding_dim 1.0 +283 9 loss.margin 17.409766730969334 +283 9 loss.adversarial_temperature 0.413199713722074 +283 9 optimizer.lr 0.0012555759204178193 +283 9 negative_sampler.num_negs_per_pos 43.0 +283 9 training.batch_size 1.0 +283 10 model.embedding_dim 1.0 +283 10 loss.margin 5.716494953089366 +283 10 loss.adversarial_temperature 0.9803692164937996 +283 10 optimizer.lr 0.009566045540044367 +283 10 negative_sampler.num_negs_per_pos 13.0 +283 10 training.batch_size 0.0 +283 11 model.embedding_dim 0.0 +283 11 loss.margin 13.638330166045325 +283 11 loss.adversarial_temperature 0.3896784782590449 +283 11 optimizer.lr 0.0010573812106858345 +283 11 negative_sampler.num_negs_per_pos 12.0 +283 11 training.batch_size 0.0 +283 12 model.embedding_dim 1.0 +283 12 loss.margin 2.741232575811966 +283 12 loss.adversarial_temperature 0.6340927965738155 +283 12 optimizer.lr 0.0029106191560172175 +283 12 negative_sampler.num_negs_per_pos 0.0 +283 12 training.batch_size 0.0 +283 13 model.embedding_dim 2.0 +283 13 loss.margin 27.306583159673753 +283 13 loss.adversarial_temperature 0.711612877236851 +283 13 optimizer.lr 0.004693132106858303 +283 13 negative_sampler.num_negs_per_pos 26.0 +283 13 training.batch_size 3.0 +283 14 model.embedding_dim 1.0 +283 14 loss.margin 8.202920934871983 +283 14 loss.adversarial_temperature 0.9674478970479745 +283 14 optimizer.lr 0.0017239493000298187 +283 14 negative_sampler.num_negs_per_pos 11.0 +283 14 training.batch_size 2.0 +283 15 model.embedding_dim 0.0 +283 15 loss.margin 1.9319648116710755 +283 15 loss.adversarial_temperature 0.5111061905198406 +283 15 optimizer.lr 0.0438788406945843 +283 15 negative_sampler.num_negs_per_pos 42.0 +283 15 training.batch_size 0.0 +283 16 model.embedding_dim 2.0 +283 16 loss.margin 24.084636994148887 +283 16 loss.adversarial_temperature 0.4833360688532288 +283 16 optimizer.lr 0.0748898694452157 +283 16 negative_sampler.num_negs_per_pos 15.0 +283 16 training.batch_size 1.0 +283 17 model.embedding_dim 0.0 +283 17 loss.margin 17.271497633118173 +283 17 loss.adversarial_temperature 0.9666579773940103 +283 17 optimizer.lr 0.0026797216910523615 +283 17 negative_sampler.num_negs_per_pos 19.0 +283 17 training.batch_size 1.0 +283 18 model.embedding_dim 2.0 +283 18 loss.margin 2.5885754007929735 +283 18 loss.adversarial_temperature 0.3409126270212377 +283 18 optimizer.lr 0.031231106320915292 +283 18 negative_sampler.num_negs_per_pos 34.0 +283 18 training.batch_size 3.0 +283 19 model.embedding_dim 2.0 +283 19 loss.margin 5.624311435202397 +283 19 loss.adversarial_temperature 0.8428436758567895 +283 19 optimizer.lr 0.04910139016547322 +283 19 negative_sampler.num_negs_per_pos 26.0 +283 19 training.batch_size 0.0 +283 20 model.embedding_dim 1.0 +283 20 loss.margin 10.957847810176368 +283 20 loss.adversarial_temperature 0.49494095289602136 +283 20 optimizer.lr 0.011796072038476275 +283 20 negative_sampler.num_negs_per_pos 18.0 +283 20 training.batch_size 2.0 +283 1 dataset """yago310""" +283 1 model """ermlp""" +283 1 loss """nssa""" +283 1 regularizer """no""" +283 1 optimizer """adam""" +283 1 training_loop """owa""" +283 1 negative_sampler """basic""" +283 1 evaluator """rankbased""" +283 2 dataset """yago310""" +283 2 model """ermlp""" +283 2 loss """nssa""" +283 2 regularizer """no""" +283 2 optimizer """adam""" +283 2 training_loop """owa""" +283 2 negative_sampler """basic""" +283 2 evaluator """rankbased""" +283 3 dataset """yago310""" +283 3 model """ermlp""" +283 3 loss """nssa""" +283 3 regularizer """no""" +283 3 optimizer """adam""" +283 3 training_loop """owa""" +283 3 negative_sampler """basic""" +283 3 evaluator """rankbased""" +283 4 dataset """yago310""" +283 4 model """ermlp""" +283 4 loss """nssa""" +283 4 regularizer """no""" +283 4 optimizer """adam""" +283 4 training_loop """owa""" +283 4 negative_sampler """basic""" +283 4 evaluator """rankbased""" +283 5 dataset """yago310""" +283 5 model """ermlp""" +283 5 loss """nssa""" +283 5 regularizer """no""" +283 5 optimizer """adam""" +283 5 training_loop """owa""" +283 5 negative_sampler """basic""" +283 5 evaluator """rankbased""" +283 6 dataset """yago310""" +283 6 model """ermlp""" +283 6 loss """nssa""" +283 6 regularizer """no""" +283 6 optimizer """adam""" +283 6 training_loop """owa""" +283 6 negative_sampler """basic""" +283 6 evaluator """rankbased""" +283 7 dataset """yago310""" +283 7 model """ermlp""" +283 7 loss """nssa""" +283 7 regularizer """no""" +283 7 optimizer """adam""" +283 7 training_loop """owa""" +283 7 negative_sampler """basic""" +283 7 evaluator """rankbased""" +283 8 dataset """yago310""" +283 8 model """ermlp""" +283 8 loss """nssa""" +283 8 regularizer """no""" +283 8 optimizer """adam""" +283 8 training_loop """owa""" +283 8 negative_sampler """basic""" +283 8 evaluator """rankbased""" +283 9 dataset """yago310""" +283 9 model """ermlp""" +283 9 loss """nssa""" +283 9 regularizer """no""" +283 9 optimizer """adam""" +283 9 training_loop """owa""" +283 9 negative_sampler """basic""" +283 9 evaluator """rankbased""" +283 10 dataset """yago310""" +283 10 model """ermlp""" +283 10 loss """nssa""" +283 10 regularizer """no""" +283 10 optimizer """adam""" +283 10 training_loop """owa""" +283 10 negative_sampler """basic""" +283 10 evaluator """rankbased""" +283 11 dataset """yago310""" +283 11 model """ermlp""" +283 11 loss """nssa""" +283 11 regularizer """no""" +283 11 optimizer """adam""" +283 11 training_loop """owa""" +283 11 negative_sampler """basic""" +283 11 evaluator """rankbased""" +283 12 dataset """yago310""" +283 12 model """ermlp""" +283 12 loss """nssa""" +283 12 regularizer """no""" +283 12 optimizer """adam""" +283 12 training_loop """owa""" +283 12 negative_sampler """basic""" +283 12 evaluator """rankbased""" +283 13 dataset """yago310""" +283 13 model """ermlp""" +283 13 loss """nssa""" +283 13 regularizer """no""" +283 13 optimizer """adam""" +283 13 training_loop """owa""" +283 13 negative_sampler """basic""" +283 13 evaluator """rankbased""" +283 14 dataset """yago310""" +283 14 model """ermlp""" +283 14 loss """nssa""" +283 14 regularizer """no""" +283 14 optimizer """adam""" +283 14 training_loop """owa""" +283 14 negative_sampler """basic""" +283 14 evaluator """rankbased""" +283 15 dataset """yago310""" +283 15 model """ermlp""" +283 15 loss """nssa""" +283 15 regularizer """no""" +283 15 optimizer """adam""" +283 15 training_loop """owa""" +283 15 negative_sampler """basic""" +283 15 evaluator """rankbased""" +283 16 dataset """yago310""" +283 16 model """ermlp""" +283 16 loss """nssa""" +283 16 regularizer """no""" +283 16 optimizer """adam""" +283 16 training_loop """owa""" +283 16 negative_sampler """basic""" +283 16 evaluator """rankbased""" +283 17 dataset """yago310""" +283 17 model """ermlp""" +283 17 loss """nssa""" +283 17 regularizer """no""" +283 17 optimizer """adam""" +283 17 training_loop """owa""" +283 17 negative_sampler """basic""" +283 17 evaluator """rankbased""" +283 18 dataset """yago310""" +283 18 model """ermlp""" +283 18 loss """nssa""" +283 18 regularizer """no""" +283 18 optimizer """adam""" +283 18 training_loop """owa""" +283 18 negative_sampler """basic""" +283 18 evaluator """rankbased""" +283 19 dataset """yago310""" +283 19 model """ermlp""" +283 19 loss """nssa""" +283 19 regularizer """no""" +283 19 optimizer """adam""" +283 19 training_loop """owa""" +283 19 negative_sampler """basic""" +283 19 evaluator """rankbased""" +283 20 dataset """yago310""" +283 20 model """ermlp""" +283 20 loss """nssa""" +283 20 regularizer """no""" +283 20 optimizer """adam""" +283 20 training_loop """owa""" +283 20 negative_sampler """basic""" +283 20 evaluator """rankbased""" +284 1 model.embedding_dim 2.0 +284 1 loss.margin 25.001461183970182 +284 1 loss.adversarial_temperature 0.6681150114253915 +284 1 optimizer.lr 0.0021137668736313244 +284 1 negative_sampler.num_negs_per_pos 75.0 +284 1 training.batch_size 0.0 +284 2 model.embedding_dim 1.0 +284 2 loss.margin 19.467894501227608 +284 2 loss.adversarial_temperature 0.22529083760637614 +284 2 optimizer.lr 0.0017573825951435283 +284 2 negative_sampler.num_negs_per_pos 81.0 +284 2 training.batch_size 1.0 +284 3 model.embedding_dim 0.0 +284 3 loss.margin 24.54453293679026 +284 3 loss.adversarial_temperature 0.3098403832535308 +284 3 optimizer.lr 0.0065617632617218855 +284 3 negative_sampler.num_negs_per_pos 3.0 +284 3 training.batch_size 0.0 +284 4 model.embedding_dim 1.0 +284 4 loss.margin 11.378883947300555 +284 4 loss.adversarial_temperature 0.5187635919132946 +284 4 optimizer.lr 0.012836607719936659 +284 4 negative_sampler.num_negs_per_pos 55.0 +284 4 training.batch_size 2.0 +284 5 model.embedding_dim 0.0 +284 5 loss.margin 16.661958254363448 +284 5 loss.adversarial_temperature 0.8467307803500886 +284 5 optimizer.lr 0.06215146989691601 +284 5 negative_sampler.num_negs_per_pos 2.0 +284 5 training.batch_size 1.0 +284 6 model.embedding_dim 0.0 +284 6 loss.margin 10.125576003060823 +284 6 loss.adversarial_temperature 0.9453705422753317 +284 6 optimizer.lr 0.02555739540663877 +284 6 negative_sampler.num_negs_per_pos 46.0 +284 6 training.batch_size 1.0 +284 7 model.embedding_dim 0.0 +284 7 loss.margin 7.032312578008631 +284 7 loss.adversarial_temperature 0.9065247062351616 +284 7 optimizer.lr 0.018558890109988967 +284 7 negative_sampler.num_negs_per_pos 19.0 +284 7 training.batch_size 2.0 +284 8 model.embedding_dim 1.0 +284 8 loss.margin 19.581199908897396 +284 8 loss.adversarial_temperature 0.4121990608602931 +284 8 optimizer.lr 0.09037063186809366 +284 8 negative_sampler.num_negs_per_pos 62.0 +284 8 training.batch_size 2.0 +284 9 model.embedding_dim 2.0 +284 9 loss.margin 19.45990409377265 +284 9 loss.adversarial_temperature 0.8158779435564899 +284 9 optimizer.lr 0.027891589737754765 +284 9 negative_sampler.num_negs_per_pos 66.0 +284 9 training.batch_size 2.0 +284 10 model.embedding_dim 1.0 +284 10 loss.margin 1.339819306901844 +284 10 loss.adversarial_temperature 0.16687419233869344 +284 10 optimizer.lr 0.001872096323083437 +284 10 negative_sampler.num_negs_per_pos 98.0 +284 10 training.batch_size 0.0 +284 11 model.embedding_dim 1.0 +284 11 loss.margin 7.39926340430161 +284 11 loss.adversarial_temperature 0.8419422251180233 +284 11 optimizer.lr 0.002612356825759891 +284 11 negative_sampler.num_negs_per_pos 4.0 +284 11 training.batch_size 1.0 +284 12 model.embedding_dim 1.0 +284 12 loss.margin 14.463803072132349 +284 12 loss.adversarial_temperature 0.8311499420363391 +284 12 optimizer.lr 0.03250580508561798 +284 12 negative_sampler.num_negs_per_pos 51.0 +284 12 training.batch_size 0.0 +284 13 model.embedding_dim 2.0 +284 13 loss.margin 9.761718822450517 +284 13 loss.adversarial_temperature 0.45572962822627305 +284 13 optimizer.lr 0.0411830517892844 +284 13 negative_sampler.num_negs_per_pos 57.0 +284 13 training.batch_size 0.0 +284 14 model.embedding_dim 2.0 +284 14 loss.margin 22.8530725012838 +284 14 loss.adversarial_temperature 0.2533375424208652 +284 14 optimizer.lr 0.06496462519250662 +284 14 negative_sampler.num_negs_per_pos 16.0 +284 14 training.batch_size 0.0 +284 15 model.embedding_dim 2.0 +284 15 loss.margin 23.873884647141978 +284 15 loss.adversarial_temperature 0.8505486307066094 +284 15 optimizer.lr 0.020069204634271524 +284 15 negative_sampler.num_negs_per_pos 57.0 +284 15 training.batch_size 0.0 +284 1 dataset """fb15k237""" +284 1 model """hole""" +284 1 loss """nssa""" +284 1 regularizer """no""" +284 1 optimizer """adam""" +284 1 training_loop """owa""" +284 1 negative_sampler """basic""" +284 1 evaluator """rankbased""" +284 2 dataset """fb15k237""" +284 2 model """hole""" +284 2 loss """nssa""" +284 2 regularizer """no""" +284 2 optimizer """adam""" +284 2 training_loop """owa""" +284 2 negative_sampler """basic""" +284 2 evaluator """rankbased""" +284 3 dataset """fb15k237""" +284 3 model """hole""" +284 3 loss """nssa""" +284 3 regularizer """no""" +284 3 optimizer """adam""" +284 3 training_loop """owa""" +284 3 negative_sampler """basic""" +284 3 evaluator """rankbased""" +284 4 dataset """fb15k237""" +284 4 model """hole""" +284 4 loss """nssa""" +284 4 regularizer """no""" +284 4 optimizer """adam""" +284 4 training_loop """owa""" +284 4 negative_sampler """basic""" +284 4 evaluator """rankbased""" +284 5 dataset """fb15k237""" +284 5 model """hole""" +284 5 loss """nssa""" +284 5 regularizer """no""" +284 5 optimizer """adam""" +284 5 training_loop """owa""" +284 5 negative_sampler """basic""" +284 5 evaluator """rankbased""" +284 6 dataset """fb15k237""" +284 6 model """hole""" +284 6 loss """nssa""" +284 6 regularizer """no""" +284 6 optimizer """adam""" +284 6 training_loop """owa""" +284 6 negative_sampler """basic""" +284 6 evaluator """rankbased""" +284 7 dataset """fb15k237""" +284 7 model """hole""" +284 7 loss """nssa""" +284 7 regularizer """no""" +284 7 optimizer """adam""" +284 7 training_loop """owa""" +284 7 negative_sampler """basic""" +284 7 evaluator """rankbased""" +284 8 dataset """fb15k237""" +284 8 model """hole""" +284 8 loss """nssa""" +284 8 regularizer """no""" +284 8 optimizer """adam""" +284 8 training_loop """owa""" +284 8 negative_sampler """basic""" +284 8 evaluator """rankbased""" +284 9 dataset """fb15k237""" +284 9 model """hole""" +284 9 loss """nssa""" +284 9 regularizer """no""" +284 9 optimizer """adam""" +284 9 training_loop """owa""" +284 9 negative_sampler """basic""" +284 9 evaluator """rankbased""" +284 10 dataset """fb15k237""" +284 10 model """hole""" +284 10 loss """nssa""" +284 10 regularizer """no""" +284 10 optimizer """adam""" +284 10 training_loop """owa""" +284 10 negative_sampler """basic""" +284 10 evaluator """rankbased""" +284 11 dataset """fb15k237""" +284 11 model """hole""" +284 11 loss """nssa""" +284 11 regularizer """no""" +284 11 optimizer """adam""" +284 11 training_loop """owa""" +284 11 negative_sampler """basic""" +284 11 evaluator """rankbased""" +284 12 dataset """fb15k237""" +284 12 model """hole""" +284 12 loss """nssa""" +284 12 regularizer """no""" +284 12 optimizer """adam""" +284 12 training_loop """owa""" +284 12 negative_sampler """basic""" +284 12 evaluator """rankbased""" +284 13 dataset """fb15k237""" +284 13 model """hole""" +284 13 loss """nssa""" +284 13 regularizer """no""" +284 13 optimizer """adam""" +284 13 training_loop """owa""" +284 13 negative_sampler """basic""" +284 13 evaluator """rankbased""" +284 14 dataset """fb15k237""" +284 14 model """hole""" +284 14 loss """nssa""" +284 14 regularizer """no""" +284 14 optimizer """adam""" +284 14 training_loop """owa""" +284 14 negative_sampler """basic""" +284 14 evaluator """rankbased""" +284 15 dataset """fb15k237""" +284 15 model """hole""" +284 15 loss """nssa""" +284 15 regularizer """no""" +284 15 optimizer """adam""" +284 15 training_loop """owa""" +284 15 negative_sampler """basic""" +284 15 evaluator """rankbased""" +285 1 model.embedding_dim 0.0 +285 1 loss.margin 21.659310812886734 +285 1 loss.adversarial_temperature 0.49612269426400674 +285 1 optimizer.lr 0.03734080261564525 +285 1 negative_sampler.num_negs_per_pos 83.0 +285 1 training.batch_size 1.0 +285 2 model.embedding_dim 2.0 +285 2 loss.margin 2.184842945646779 +285 2 loss.adversarial_temperature 0.822526537255576 +285 2 optimizer.lr 0.004075523339550379 +285 2 negative_sampler.num_negs_per_pos 83.0 +285 2 training.batch_size 0.0 +285 3 model.embedding_dim 2.0 +285 3 loss.margin 12.995103460606813 +285 3 loss.adversarial_temperature 0.9755715279854382 +285 3 optimizer.lr 0.01040592982730956 +285 3 negative_sampler.num_negs_per_pos 17.0 +285 3 training.batch_size 2.0 +285 4 model.embedding_dim 0.0 +285 4 loss.margin 7.901307170882081 +285 4 loss.adversarial_temperature 0.6838178741639682 +285 4 optimizer.lr 0.01481400948026845 +285 4 negative_sampler.num_negs_per_pos 37.0 +285 4 training.batch_size 0.0 +285 5 model.embedding_dim 1.0 +285 5 loss.margin 13.344572337146868 +285 5 loss.adversarial_temperature 0.6827890483026389 +285 5 optimizer.lr 0.0010721022961978522 +285 5 negative_sampler.num_negs_per_pos 51.0 +285 5 training.batch_size 0.0 +285 6 model.embedding_dim 2.0 +285 6 loss.margin 22.345298339048565 +285 6 loss.adversarial_temperature 0.6260059579035847 +285 6 optimizer.lr 0.020179753188227272 +285 6 negative_sampler.num_negs_per_pos 19.0 +285 6 training.batch_size 0.0 +285 7 model.embedding_dim 2.0 +285 7 loss.margin 10.660059407515657 +285 7 loss.adversarial_temperature 0.7146769453712364 +285 7 optimizer.lr 0.02791301751307037 +285 7 negative_sampler.num_negs_per_pos 86.0 +285 7 training.batch_size 0.0 +285 8 model.embedding_dim 2.0 +285 8 loss.margin 10.195958138582103 +285 8 loss.adversarial_temperature 0.9779026085143531 +285 8 optimizer.lr 0.0016600689480715644 +285 8 negative_sampler.num_negs_per_pos 41.0 +285 8 training.batch_size 2.0 +285 9 model.embedding_dim 1.0 +285 9 loss.margin 8.118708156577817 +285 9 loss.adversarial_temperature 0.21712788109641598 +285 9 optimizer.lr 0.02670365802393188 +285 9 negative_sampler.num_negs_per_pos 63.0 +285 9 training.batch_size 1.0 +285 10 model.embedding_dim 2.0 +285 10 loss.margin 28.799486389367967 +285 10 loss.adversarial_temperature 0.31984122322101544 +285 10 optimizer.lr 0.03253922906556551 +285 10 negative_sampler.num_negs_per_pos 70.0 +285 10 training.batch_size 1.0 +285 11 model.embedding_dim 0.0 +285 11 loss.margin 24.4986742211785 +285 11 loss.adversarial_temperature 0.6669924019788378 +285 11 optimizer.lr 0.008054566412530965 +285 11 negative_sampler.num_negs_per_pos 56.0 +285 11 training.batch_size 2.0 +285 12 model.embedding_dim 0.0 +285 12 loss.margin 11.2119536797093 +285 12 loss.adversarial_temperature 0.30113998197368663 +285 12 optimizer.lr 0.007333888504710019 +285 12 negative_sampler.num_negs_per_pos 95.0 +285 12 training.batch_size 2.0 +285 13 model.embedding_dim 2.0 +285 13 loss.margin 19.329704609157538 +285 13 loss.adversarial_temperature 0.40885390308906533 +285 13 optimizer.lr 0.004020197288881427 +285 13 negative_sampler.num_negs_per_pos 92.0 +285 13 training.batch_size 2.0 +285 14 model.embedding_dim 1.0 +285 14 loss.margin 17.073559091077307 +285 14 loss.adversarial_temperature 0.8784240993435734 +285 14 optimizer.lr 0.011190190238875235 +285 14 negative_sampler.num_negs_per_pos 62.0 +285 14 training.batch_size 1.0 +285 15 model.embedding_dim 2.0 +285 15 loss.margin 2.691796812972328 +285 15 loss.adversarial_temperature 0.9407134099307788 +285 15 optimizer.lr 0.016780501797543174 +285 15 negative_sampler.num_negs_per_pos 57.0 +285 15 training.batch_size 2.0 +285 16 model.embedding_dim 0.0 +285 16 loss.margin 18.58138346598789 +285 16 loss.adversarial_temperature 0.7229320582311295 +285 16 optimizer.lr 0.029918254658583898 +285 16 negative_sampler.num_negs_per_pos 81.0 +285 16 training.batch_size 0.0 +285 17 model.embedding_dim 0.0 +285 17 loss.margin 27.439154506910562 +285 17 loss.adversarial_temperature 0.8834013721145508 +285 17 optimizer.lr 0.07389206849981834 +285 17 negative_sampler.num_negs_per_pos 50.0 +285 17 training.batch_size 1.0 +285 18 model.embedding_dim 2.0 +285 18 loss.margin 15.525183806414159 +285 18 loss.adversarial_temperature 0.3313434353064238 +285 18 optimizer.lr 0.08703234271295561 +285 18 negative_sampler.num_negs_per_pos 87.0 +285 18 training.batch_size 1.0 +285 19 model.embedding_dim 1.0 +285 19 loss.margin 11.730152746153642 +285 19 loss.adversarial_temperature 0.3216041069583845 +285 19 optimizer.lr 0.0010897389407425815 +285 19 negative_sampler.num_negs_per_pos 83.0 +285 19 training.batch_size 0.0 +285 20 model.embedding_dim 1.0 +285 20 loss.margin 13.448893454315622 +285 20 loss.adversarial_temperature 0.8746965068065894 +285 20 optimizer.lr 0.04101692600133251 +285 20 negative_sampler.num_negs_per_pos 29.0 +285 20 training.batch_size 2.0 +285 21 model.embedding_dim 2.0 +285 21 loss.margin 3.7768432616159053 +285 21 loss.adversarial_temperature 0.7323993385356147 +285 21 optimizer.lr 0.034011844811274665 +285 21 negative_sampler.num_negs_per_pos 79.0 +285 21 training.batch_size 1.0 +285 22 model.embedding_dim 2.0 +285 22 loss.margin 19.3507197762204 +285 22 loss.adversarial_temperature 0.4355234663813227 +285 22 optimizer.lr 0.04284503931728287 +285 22 negative_sampler.num_negs_per_pos 46.0 +285 22 training.batch_size 2.0 +285 23 model.embedding_dim 2.0 +285 23 loss.margin 7.015676754382314 +285 23 loss.adversarial_temperature 0.7852879116920812 +285 23 optimizer.lr 0.0233839557249086 +285 23 negative_sampler.num_negs_per_pos 6.0 +285 23 training.batch_size 0.0 +285 24 model.embedding_dim 2.0 +285 24 loss.margin 3.6502899221677882 +285 24 loss.adversarial_temperature 0.4813916108747266 +285 24 optimizer.lr 0.08882940984703065 +285 24 negative_sampler.num_negs_per_pos 92.0 +285 24 training.batch_size 2.0 +285 25 model.embedding_dim 2.0 +285 25 loss.margin 22.440046765011946 +285 25 loss.adversarial_temperature 0.6202204311121886 +285 25 optimizer.lr 0.0031309674051416096 +285 25 negative_sampler.num_negs_per_pos 59.0 +285 25 training.batch_size 0.0 +285 1 dataset """fb15k237""" +285 1 model """hole""" +285 1 loss """nssa""" +285 1 regularizer """no""" +285 1 optimizer """adam""" +285 1 training_loop """owa""" +285 1 negative_sampler """basic""" +285 1 evaluator """rankbased""" +285 2 dataset """fb15k237""" +285 2 model """hole""" +285 2 loss """nssa""" +285 2 regularizer """no""" +285 2 optimizer """adam""" +285 2 training_loop """owa""" +285 2 negative_sampler """basic""" +285 2 evaluator """rankbased""" +285 3 dataset """fb15k237""" +285 3 model """hole""" +285 3 loss """nssa""" +285 3 regularizer """no""" +285 3 optimizer """adam""" +285 3 training_loop """owa""" +285 3 negative_sampler """basic""" +285 3 evaluator """rankbased""" +285 4 dataset """fb15k237""" +285 4 model """hole""" +285 4 loss """nssa""" +285 4 regularizer """no""" +285 4 optimizer """adam""" +285 4 training_loop """owa""" +285 4 negative_sampler """basic""" +285 4 evaluator """rankbased""" +285 5 dataset """fb15k237""" +285 5 model """hole""" +285 5 loss """nssa""" +285 5 regularizer """no""" +285 5 optimizer """adam""" +285 5 training_loop """owa""" +285 5 negative_sampler """basic""" +285 5 evaluator """rankbased""" +285 6 dataset """fb15k237""" +285 6 model """hole""" +285 6 loss """nssa""" +285 6 regularizer """no""" +285 6 optimizer """adam""" +285 6 training_loop """owa""" +285 6 negative_sampler """basic""" +285 6 evaluator """rankbased""" +285 7 dataset """fb15k237""" +285 7 model """hole""" +285 7 loss """nssa""" +285 7 regularizer """no""" +285 7 optimizer """adam""" +285 7 training_loop """owa""" +285 7 negative_sampler """basic""" +285 7 evaluator """rankbased""" +285 8 dataset """fb15k237""" +285 8 model """hole""" +285 8 loss """nssa""" +285 8 regularizer """no""" +285 8 optimizer """adam""" +285 8 training_loop """owa""" +285 8 negative_sampler """basic""" +285 8 evaluator """rankbased""" +285 9 dataset """fb15k237""" +285 9 model """hole""" +285 9 loss """nssa""" +285 9 regularizer """no""" +285 9 optimizer """adam""" +285 9 training_loop """owa""" +285 9 negative_sampler """basic""" +285 9 evaluator """rankbased""" +285 10 dataset """fb15k237""" +285 10 model """hole""" +285 10 loss """nssa""" +285 10 regularizer """no""" +285 10 optimizer """adam""" +285 10 training_loop """owa""" +285 10 negative_sampler """basic""" +285 10 evaluator """rankbased""" +285 11 dataset """fb15k237""" +285 11 model """hole""" +285 11 loss """nssa""" +285 11 regularizer """no""" +285 11 optimizer """adam""" +285 11 training_loop """owa""" +285 11 negative_sampler """basic""" +285 11 evaluator """rankbased""" +285 12 dataset """fb15k237""" +285 12 model """hole""" +285 12 loss """nssa""" +285 12 regularizer """no""" +285 12 optimizer """adam""" +285 12 training_loop """owa""" +285 12 negative_sampler """basic""" +285 12 evaluator """rankbased""" +285 13 dataset """fb15k237""" +285 13 model """hole""" +285 13 loss """nssa""" +285 13 regularizer """no""" +285 13 optimizer """adam""" +285 13 training_loop """owa""" +285 13 negative_sampler """basic""" +285 13 evaluator """rankbased""" +285 14 dataset """fb15k237""" +285 14 model """hole""" +285 14 loss """nssa""" +285 14 regularizer """no""" +285 14 optimizer """adam""" +285 14 training_loop """owa""" +285 14 negative_sampler """basic""" +285 14 evaluator """rankbased""" +285 15 dataset """fb15k237""" +285 15 model """hole""" +285 15 loss """nssa""" +285 15 regularizer """no""" +285 15 optimizer """adam""" +285 15 training_loop """owa""" +285 15 negative_sampler """basic""" +285 15 evaluator """rankbased""" +285 16 dataset """fb15k237""" +285 16 model """hole""" +285 16 loss """nssa""" +285 16 regularizer """no""" +285 16 optimizer """adam""" +285 16 training_loop """owa""" +285 16 negative_sampler """basic""" +285 16 evaluator """rankbased""" +285 17 dataset """fb15k237""" +285 17 model """hole""" +285 17 loss """nssa""" +285 17 regularizer """no""" +285 17 optimizer """adam""" +285 17 training_loop """owa""" +285 17 negative_sampler """basic""" +285 17 evaluator """rankbased""" +285 18 dataset """fb15k237""" +285 18 model """hole""" +285 18 loss """nssa""" +285 18 regularizer """no""" +285 18 optimizer """adam""" +285 18 training_loop """owa""" +285 18 negative_sampler """basic""" +285 18 evaluator """rankbased""" +285 19 dataset """fb15k237""" +285 19 model """hole""" +285 19 loss """nssa""" +285 19 regularizer """no""" +285 19 optimizer """adam""" +285 19 training_loop """owa""" +285 19 negative_sampler """basic""" +285 19 evaluator """rankbased""" +285 20 dataset """fb15k237""" +285 20 model """hole""" +285 20 loss """nssa""" +285 20 regularizer """no""" +285 20 optimizer """adam""" +285 20 training_loop """owa""" +285 20 negative_sampler """basic""" +285 20 evaluator """rankbased""" +285 21 dataset """fb15k237""" +285 21 model """hole""" +285 21 loss """nssa""" +285 21 regularizer """no""" +285 21 optimizer """adam""" +285 21 training_loop """owa""" +285 21 negative_sampler """basic""" +285 21 evaluator """rankbased""" +285 22 dataset """fb15k237""" +285 22 model """hole""" +285 22 loss """nssa""" +285 22 regularizer """no""" +285 22 optimizer """adam""" +285 22 training_loop """owa""" +285 22 negative_sampler """basic""" +285 22 evaluator """rankbased""" +285 23 dataset """fb15k237""" +285 23 model """hole""" +285 23 loss """nssa""" +285 23 regularizer """no""" +285 23 optimizer """adam""" +285 23 training_loop """owa""" +285 23 negative_sampler """basic""" +285 23 evaluator """rankbased""" +285 24 dataset """fb15k237""" +285 24 model """hole""" +285 24 loss """nssa""" +285 24 regularizer """no""" +285 24 optimizer """adam""" +285 24 training_loop """owa""" +285 24 negative_sampler """basic""" +285 24 evaluator """rankbased""" +285 25 dataset """fb15k237""" +285 25 model """hole""" +285 25 loss """nssa""" +285 25 regularizer """no""" +285 25 optimizer """adam""" +285 25 training_loop """owa""" +285 25 negative_sampler """basic""" +285 25 evaluator """rankbased""" +286 1 model.embedding_dim 2.0 +286 1 optimizer.lr 0.0037064411040486664 +286 1 negative_sampler.num_negs_per_pos 10.0 +286 1 training.batch_size 1.0 +286 2 model.embedding_dim 0.0 +286 2 optimizer.lr 0.022778390983330104 +286 2 negative_sampler.num_negs_per_pos 24.0 +286 2 training.batch_size 1.0 +286 3 model.embedding_dim 0.0 +286 3 optimizer.lr 0.006645497164748106 +286 3 negative_sampler.num_negs_per_pos 5.0 +286 3 training.batch_size 1.0 +286 4 model.embedding_dim 1.0 +286 4 optimizer.lr 0.05015148220172494 +286 4 negative_sampler.num_negs_per_pos 99.0 +286 4 training.batch_size 0.0 +286 5 model.embedding_dim 0.0 +286 5 optimizer.lr 0.017517293641876493 +286 5 negative_sampler.num_negs_per_pos 52.0 +286 5 training.batch_size 0.0 +286 6 model.embedding_dim 0.0 +286 6 optimizer.lr 0.05504474199706162 +286 6 negative_sampler.num_negs_per_pos 8.0 +286 6 training.batch_size 0.0 +286 7 model.embedding_dim 2.0 +286 7 optimizer.lr 0.0016452734337472612 +286 7 negative_sampler.num_negs_per_pos 65.0 +286 7 training.batch_size 1.0 +286 8 model.embedding_dim 1.0 +286 8 optimizer.lr 0.002804016019564752 +286 8 negative_sampler.num_negs_per_pos 45.0 +286 8 training.batch_size 1.0 +286 9 model.embedding_dim 2.0 +286 9 optimizer.lr 0.00559466123402576 +286 9 negative_sampler.num_negs_per_pos 3.0 +286 9 training.batch_size 1.0 +286 10 model.embedding_dim 0.0 +286 10 optimizer.lr 0.0033459670853057603 +286 10 negative_sampler.num_negs_per_pos 39.0 +286 10 training.batch_size 0.0 +286 11 model.embedding_dim 1.0 +286 11 optimizer.lr 0.032376251803794 +286 11 negative_sampler.num_negs_per_pos 45.0 +286 11 training.batch_size 0.0 +286 12 model.embedding_dim 1.0 +286 12 optimizer.lr 0.005932897816190371 +286 12 negative_sampler.num_negs_per_pos 83.0 +286 12 training.batch_size 1.0 +286 13 model.embedding_dim 1.0 +286 13 optimizer.lr 0.08191640799530678 +286 13 negative_sampler.num_negs_per_pos 65.0 +286 13 training.batch_size 0.0 +286 14 model.embedding_dim 2.0 +286 14 optimizer.lr 0.0013425287728226329 +286 14 negative_sampler.num_negs_per_pos 56.0 +286 14 training.batch_size 2.0 +286 15 model.embedding_dim 1.0 +286 15 optimizer.lr 0.01208524392550863 +286 15 negative_sampler.num_negs_per_pos 89.0 +286 15 training.batch_size 1.0 +286 16 model.embedding_dim 0.0 +286 16 optimizer.lr 0.05194866952939228 +286 16 negative_sampler.num_negs_per_pos 52.0 +286 16 training.batch_size 1.0 +286 17 model.embedding_dim 1.0 +286 17 optimizer.lr 0.006590453951916319 +286 17 negative_sampler.num_negs_per_pos 9.0 +286 17 training.batch_size 2.0 +286 18 model.embedding_dim 1.0 +286 18 optimizer.lr 0.0012315120312942295 +286 18 negative_sampler.num_negs_per_pos 3.0 +286 18 training.batch_size 1.0 +286 1 dataset """fb15k237""" +286 1 model """hole""" +286 1 loss """bceaftersigmoid""" +286 1 regularizer """no""" +286 1 optimizer """adam""" +286 1 training_loop """owa""" +286 1 negative_sampler """basic""" +286 1 evaluator """rankbased""" +286 2 dataset """fb15k237""" +286 2 model """hole""" +286 2 loss """bceaftersigmoid""" +286 2 regularizer """no""" +286 2 optimizer """adam""" +286 2 training_loop """owa""" +286 2 negative_sampler """basic""" +286 2 evaluator """rankbased""" +286 3 dataset """fb15k237""" +286 3 model """hole""" +286 3 loss """bceaftersigmoid""" +286 3 regularizer """no""" +286 3 optimizer """adam""" +286 3 training_loop """owa""" +286 3 negative_sampler """basic""" +286 3 evaluator """rankbased""" +286 4 dataset """fb15k237""" +286 4 model """hole""" +286 4 loss """bceaftersigmoid""" +286 4 regularizer """no""" +286 4 optimizer """adam""" +286 4 training_loop """owa""" +286 4 negative_sampler """basic""" +286 4 evaluator """rankbased""" +286 5 dataset """fb15k237""" +286 5 model """hole""" +286 5 loss """bceaftersigmoid""" +286 5 regularizer """no""" +286 5 optimizer """adam""" +286 5 training_loop """owa""" +286 5 negative_sampler """basic""" +286 5 evaluator """rankbased""" +286 6 dataset """fb15k237""" +286 6 model """hole""" +286 6 loss """bceaftersigmoid""" +286 6 regularizer """no""" +286 6 optimizer """adam""" +286 6 training_loop """owa""" +286 6 negative_sampler """basic""" +286 6 evaluator """rankbased""" +286 7 dataset """fb15k237""" +286 7 model """hole""" +286 7 loss """bceaftersigmoid""" +286 7 regularizer """no""" +286 7 optimizer """adam""" +286 7 training_loop """owa""" +286 7 negative_sampler """basic""" +286 7 evaluator """rankbased""" +286 8 dataset """fb15k237""" +286 8 model """hole""" +286 8 loss """bceaftersigmoid""" +286 8 regularizer """no""" +286 8 optimizer """adam""" +286 8 training_loop """owa""" +286 8 negative_sampler """basic""" +286 8 evaluator """rankbased""" +286 9 dataset """fb15k237""" +286 9 model """hole""" +286 9 loss """bceaftersigmoid""" +286 9 regularizer """no""" +286 9 optimizer """adam""" +286 9 training_loop """owa""" +286 9 negative_sampler """basic""" +286 9 evaluator """rankbased""" +286 10 dataset """fb15k237""" +286 10 model """hole""" +286 10 loss """bceaftersigmoid""" +286 10 regularizer """no""" +286 10 optimizer """adam""" +286 10 training_loop """owa""" +286 10 negative_sampler """basic""" +286 10 evaluator """rankbased""" +286 11 dataset """fb15k237""" +286 11 model """hole""" +286 11 loss """bceaftersigmoid""" +286 11 regularizer """no""" +286 11 optimizer """adam""" +286 11 training_loop """owa""" +286 11 negative_sampler """basic""" +286 11 evaluator """rankbased""" +286 12 dataset """fb15k237""" +286 12 model """hole""" +286 12 loss """bceaftersigmoid""" +286 12 regularizer """no""" +286 12 optimizer """adam""" +286 12 training_loop """owa""" +286 12 negative_sampler """basic""" +286 12 evaluator """rankbased""" +286 13 dataset """fb15k237""" +286 13 model """hole""" +286 13 loss """bceaftersigmoid""" +286 13 regularizer """no""" +286 13 optimizer """adam""" +286 13 training_loop """owa""" +286 13 negative_sampler """basic""" +286 13 evaluator """rankbased""" +286 14 dataset """fb15k237""" +286 14 model """hole""" +286 14 loss """bceaftersigmoid""" +286 14 regularizer """no""" +286 14 optimizer """adam""" +286 14 training_loop """owa""" +286 14 negative_sampler """basic""" +286 14 evaluator """rankbased""" +286 15 dataset """fb15k237""" +286 15 model """hole""" +286 15 loss """bceaftersigmoid""" +286 15 regularizer """no""" +286 15 optimizer """adam""" +286 15 training_loop """owa""" +286 15 negative_sampler """basic""" +286 15 evaluator """rankbased""" +286 16 dataset """fb15k237""" +286 16 model """hole""" +286 16 loss """bceaftersigmoid""" +286 16 regularizer """no""" +286 16 optimizer """adam""" +286 16 training_loop """owa""" +286 16 negative_sampler """basic""" +286 16 evaluator """rankbased""" +286 17 dataset """fb15k237""" +286 17 model """hole""" +286 17 loss """bceaftersigmoid""" +286 17 regularizer """no""" +286 17 optimizer """adam""" +286 17 training_loop """owa""" +286 17 negative_sampler """basic""" +286 17 evaluator """rankbased""" +286 18 dataset """fb15k237""" +286 18 model """hole""" +286 18 loss """bceaftersigmoid""" +286 18 regularizer """no""" +286 18 optimizer """adam""" +286 18 training_loop """owa""" +286 18 negative_sampler """basic""" +286 18 evaluator """rankbased""" +287 1 model.embedding_dim 0.0 +287 1 optimizer.lr 0.03215349120098353 +287 1 negative_sampler.num_negs_per_pos 3.0 +287 1 training.batch_size 1.0 +287 2 model.embedding_dim 0.0 +287 2 optimizer.lr 0.004939777502046291 +287 2 negative_sampler.num_negs_per_pos 3.0 +287 2 training.batch_size 0.0 +287 3 model.embedding_dim 2.0 +287 3 optimizer.lr 0.0058226065289394965 +287 3 negative_sampler.num_negs_per_pos 21.0 +287 3 training.batch_size 0.0 +287 4 model.embedding_dim 0.0 +287 4 optimizer.lr 0.005232844390680475 +287 4 negative_sampler.num_negs_per_pos 92.0 +287 4 training.batch_size 0.0 +287 5 model.embedding_dim 2.0 +287 5 optimizer.lr 0.0015427696112341738 +287 5 negative_sampler.num_negs_per_pos 75.0 +287 5 training.batch_size 2.0 +287 6 model.embedding_dim 0.0 +287 6 optimizer.lr 0.004683224750763389 +287 6 negative_sampler.num_negs_per_pos 36.0 +287 6 training.batch_size 1.0 +287 7 model.embedding_dim 1.0 +287 7 optimizer.lr 0.0024655214678415385 +287 7 negative_sampler.num_negs_per_pos 76.0 +287 7 training.batch_size 2.0 +287 8 model.embedding_dim 2.0 +287 8 optimizer.lr 0.0015777320906429726 +287 8 negative_sampler.num_negs_per_pos 80.0 +287 8 training.batch_size 0.0 +287 9 model.embedding_dim 1.0 +287 9 optimizer.lr 0.001826050082909983 +287 9 negative_sampler.num_negs_per_pos 21.0 +287 9 training.batch_size 1.0 +287 10 model.embedding_dim 2.0 +287 10 optimizer.lr 0.022792965265121555 +287 10 negative_sampler.num_negs_per_pos 70.0 +287 10 training.batch_size 0.0 +287 11 model.embedding_dim 0.0 +287 11 optimizer.lr 0.001145136844000523 +287 11 negative_sampler.num_negs_per_pos 77.0 +287 11 training.batch_size 0.0 +287 12 model.embedding_dim 0.0 +287 12 optimizer.lr 0.0015111045868124874 +287 12 negative_sampler.num_negs_per_pos 44.0 +287 12 training.batch_size 1.0 +287 1 dataset """fb15k237""" +287 1 model """hole""" +287 1 loss """softplus""" +287 1 regularizer """no""" +287 1 optimizer """adam""" +287 1 training_loop """owa""" +287 1 negative_sampler """basic""" +287 1 evaluator """rankbased""" +287 2 dataset """fb15k237""" +287 2 model """hole""" +287 2 loss """softplus""" +287 2 regularizer """no""" +287 2 optimizer """adam""" +287 2 training_loop """owa""" +287 2 negative_sampler """basic""" +287 2 evaluator """rankbased""" +287 3 dataset """fb15k237""" +287 3 model """hole""" +287 3 loss """softplus""" +287 3 regularizer """no""" +287 3 optimizer """adam""" +287 3 training_loop """owa""" +287 3 negative_sampler """basic""" +287 3 evaluator """rankbased""" +287 4 dataset """fb15k237""" +287 4 model """hole""" +287 4 loss """softplus""" +287 4 regularizer """no""" +287 4 optimizer """adam""" +287 4 training_loop """owa""" +287 4 negative_sampler """basic""" +287 4 evaluator """rankbased""" +287 5 dataset """fb15k237""" +287 5 model """hole""" +287 5 loss """softplus""" +287 5 regularizer """no""" +287 5 optimizer """adam""" +287 5 training_loop """owa""" +287 5 negative_sampler """basic""" +287 5 evaluator """rankbased""" +287 6 dataset """fb15k237""" +287 6 model """hole""" +287 6 loss """softplus""" +287 6 regularizer """no""" +287 6 optimizer """adam""" +287 6 training_loop """owa""" +287 6 negative_sampler """basic""" +287 6 evaluator """rankbased""" +287 7 dataset """fb15k237""" +287 7 model """hole""" +287 7 loss """softplus""" +287 7 regularizer """no""" +287 7 optimizer """adam""" +287 7 training_loop """owa""" +287 7 negative_sampler """basic""" +287 7 evaluator """rankbased""" +287 8 dataset """fb15k237""" +287 8 model """hole""" +287 8 loss """softplus""" +287 8 regularizer """no""" +287 8 optimizer """adam""" +287 8 training_loop """owa""" +287 8 negative_sampler """basic""" +287 8 evaluator """rankbased""" +287 9 dataset """fb15k237""" +287 9 model """hole""" +287 9 loss """softplus""" +287 9 regularizer """no""" +287 9 optimizer """adam""" +287 9 training_loop """owa""" +287 9 negative_sampler """basic""" +287 9 evaluator """rankbased""" +287 10 dataset """fb15k237""" +287 10 model """hole""" +287 10 loss """softplus""" +287 10 regularizer """no""" +287 10 optimizer """adam""" +287 10 training_loop """owa""" +287 10 negative_sampler """basic""" +287 10 evaluator """rankbased""" +287 11 dataset """fb15k237""" +287 11 model """hole""" +287 11 loss """softplus""" +287 11 regularizer """no""" +287 11 optimizer """adam""" +287 11 training_loop """owa""" +287 11 negative_sampler """basic""" +287 11 evaluator """rankbased""" +287 12 dataset """fb15k237""" +287 12 model """hole""" +287 12 loss """softplus""" +287 12 regularizer """no""" +287 12 optimizer """adam""" +287 12 training_loop """owa""" +287 12 negative_sampler """basic""" +287 12 evaluator """rankbased""" +288 1 model.embedding_dim 0.0 +288 1 optimizer.lr 0.007464542565762488 +288 1 negative_sampler.num_negs_per_pos 2.0 +288 1 training.batch_size 0.0 +288 2 model.embedding_dim 2.0 +288 2 optimizer.lr 0.014548194579592362 +288 2 negative_sampler.num_negs_per_pos 58.0 +288 2 training.batch_size 2.0 +288 3 model.embedding_dim 0.0 +288 3 optimizer.lr 0.06612676557173736 +288 3 negative_sampler.num_negs_per_pos 20.0 +288 3 training.batch_size 2.0 +288 4 model.embedding_dim 2.0 +288 4 optimizer.lr 0.04819803848711848 +288 4 negative_sampler.num_negs_per_pos 97.0 +288 4 training.batch_size 2.0 +288 5 model.embedding_dim 2.0 +288 5 optimizer.lr 0.036452958375661926 +288 5 negative_sampler.num_negs_per_pos 18.0 +288 5 training.batch_size 2.0 +288 6 model.embedding_dim 2.0 +288 6 optimizer.lr 0.02447160251194074 +288 6 negative_sampler.num_negs_per_pos 48.0 +288 6 training.batch_size 2.0 +288 7 model.embedding_dim 2.0 +288 7 optimizer.lr 0.0712263399340621 +288 7 negative_sampler.num_negs_per_pos 65.0 +288 7 training.batch_size 0.0 +288 8 model.embedding_dim 2.0 +288 8 optimizer.lr 0.05795565646494681 +288 8 negative_sampler.num_negs_per_pos 41.0 +288 8 training.batch_size 2.0 +288 9 model.embedding_dim 0.0 +288 9 optimizer.lr 0.005571574229748454 +288 9 negative_sampler.num_negs_per_pos 74.0 +288 9 training.batch_size 0.0 +288 10 model.embedding_dim 1.0 +288 10 optimizer.lr 0.017632859170337207 +288 10 negative_sampler.num_negs_per_pos 46.0 +288 10 training.batch_size 0.0 +288 11 model.embedding_dim 0.0 +288 11 optimizer.lr 0.056464953736193604 +288 11 negative_sampler.num_negs_per_pos 36.0 +288 11 training.batch_size 0.0 +288 12 model.embedding_dim 1.0 +288 12 optimizer.lr 0.06994108466465032 +288 12 negative_sampler.num_negs_per_pos 13.0 +288 12 training.batch_size 0.0 +288 13 model.embedding_dim 1.0 +288 13 optimizer.lr 0.03963954112789034 +288 13 negative_sampler.num_negs_per_pos 69.0 +288 13 training.batch_size 2.0 +288 14 model.embedding_dim 0.0 +288 14 optimizer.lr 0.002304086012052675 +288 14 negative_sampler.num_negs_per_pos 4.0 +288 14 training.batch_size 1.0 +288 15 model.embedding_dim 0.0 +288 15 optimizer.lr 0.0011116287098519007 +288 15 negative_sampler.num_negs_per_pos 30.0 +288 15 training.batch_size 0.0 +288 16 model.embedding_dim 1.0 +288 16 optimizer.lr 0.043199080251416236 +288 16 negative_sampler.num_negs_per_pos 27.0 +288 16 training.batch_size 1.0 +288 17 model.embedding_dim 2.0 +288 17 optimizer.lr 0.003997792108579514 +288 17 negative_sampler.num_negs_per_pos 86.0 +288 17 training.batch_size 2.0 +288 18 model.embedding_dim 0.0 +288 18 optimizer.lr 0.00867725003866509 +288 18 negative_sampler.num_negs_per_pos 56.0 +288 18 training.batch_size 2.0 +288 19 model.embedding_dim 1.0 +288 19 optimizer.lr 0.04277498641319492 +288 19 negative_sampler.num_negs_per_pos 54.0 +288 19 training.batch_size 2.0 +288 20 model.embedding_dim 0.0 +288 20 optimizer.lr 0.001037540027508892 +288 20 negative_sampler.num_negs_per_pos 93.0 +288 20 training.batch_size 0.0 +288 21 model.embedding_dim 2.0 +288 21 optimizer.lr 0.01102155397349384 +288 21 negative_sampler.num_negs_per_pos 8.0 +288 21 training.batch_size 1.0 +288 22 model.embedding_dim 1.0 +288 22 optimizer.lr 0.001926399349077412 +288 22 negative_sampler.num_negs_per_pos 91.0 +288 22 training.batch_size 0.0 +288 23 model.embedding_dim 2.0 +288 23 optimizer.lr 0.0016297506578359852 +288 23 negative_sampler.num_negs_per_pos 58.0 +288 23 training.batch_size 2.0 +288 24 model.embedding_dim 0.0 +288 24 optimizer.lr 0.01974883894296077 +288 24 negative_sampler.num_negs_per_pos 42.0 +288 24 training.batch_size 2.0 +288 25 model.embedding_dim 0.0 +288 25 optimizer.lr 0.030340652147918364 +288 25 negative_sampler.num_negs_per_pos 42.0 +288 25 training.batch_size 1.0 +288 26 model.embedding_dim 0.0 +288 26 optimizer.lr 0.018357978807767992 +288 26 negative_sampler.num_negs_per_pos 42.0 +288 26 training.batch_size 0.0 +288 27 model.embedding_dim 1.0 +288 27 optimizer.lr 0.01931813377186649 +288 27 negative_sampler.num_negs_per_pos 96.0 +288 27 training.batch_size 0.0 +288 28 model.embedding_dim 0.0 +288 28 optimizer.lr 0.01604915493158615 +288 28 negative_sampler.num_negs_per_pos 20.0 +288 28 training.batch_size 2.0 +288 29 model.embedding_dim 0.0 +288 29 optimizer.lr 0.01765029715612766 +288 29 negative_sampler.num_negs_per_pos 6.0 +288 29 training.batch_size 0.0 +288 30 model.embedding_dim 0.0 +288 30 optimizer.lr 0.045851852882775775 +288 30 negative_sampler.num_negs_per_pos 57.0 +288 30 training.batch_size 2.0 +288 31 model.embedding_dim 1.0 +288 31 optimizer.lr 0.006313213091219214 +288 31 negative_sampler.num_negs_per_pos 19.0 +288 31 training.batch_size 1.0 +288 32 model.embedding_dim 1.0 +288 32 optimizer.lr 0.005030993279791298 +288 32 negative_sampler.num_negs_per_pos 76.0 +288 32 training.batch_size 0.0 +288 33 model.embedding_dim 0.0 +288 33 optimizer.lr 0.06631019128105492 +288 33 negative_sampler.num_negs_per_pos 5.0 +288 33 training.batch_size 2.0 +288 34 model.embedding_dim 1.0 +288 34 optimizer.lr 0.003819266709060899 +288 34 negative_sampler.num_negs_per_pos 83.0 +288 34 training.batch_size 1.0 +288 35 model.embedding_dim 1.0 +288 35 optimizer.lr 0.03253564044528215 +288 35 negative_sampler.num_negs_per_pos 49.0 +288 35 training.batch_size 0.0 +288 36 model.embedding_dim 2.0 +288 36 optimizer.lr 0.004607385916377482 +288 36 negative_sampler.num_negs_per_pos 75.0 +288 36 training.batch_size 1.0 +288 37 model.embedding_dim 2.0 +288 37 optimizer.lr 0.027271970585126877 +288 37 negative_sampler.num_negs_per_pos 41.0 +288 37 training.batch_size 1.0 +288 38 model.embedding_dim 1.0 +288 38 optimizer.lr 0.03730737974367112 +288 38 negative_sampler.num_negs_per_pos 25.0 +288 38 training.batch_size 1.0 +288 39 model.embedding_dim 2.0 +288 39 optimizer.lr 0.003507945242347027 +288 39 negative_sampler.num_negs_per_pos 58.0 +288 39 training.batch_size 1.0 +288 1 dataset """fb15k237""" +288 1 model """hole""" +288 1 loss """bceaftersigmoid""" +288 1 regularizer """no""" +288 1 optimizer """adam""" +288 1 training_loop """owa""" +288 1 negative_sampler """basic""" +288 1 evaluator """rankbased""" +288 2 dataset """fb15k237""" +288 2 model """hole""" +288 2 loss """bceaftersigmoid""" +288 2 regularizer """no""" +288 2 optimizer """adam""" +288 2 training_loop """owa""" +288 2 negative_sampler """basic""" +288 2 evaluator """rankbased""" +288 3 dataset """fb15k237""" +288 3 model """hole""" +288 3 loss """bceaftersigmoid""" +288 3 regularizer """no""" +288 3 optimizer """adam""" +288 3 training_loop """owa""" +288 3 negative_sampler """basic""" +288 3 evaluator """rankbased""" +288 4 dataset """fb15k237""" +288 4 model """hole""" +288 4 loss """bceaftersigmoid""" +288 4 regularizer """no""" +288 4 optimizer """adam""" +288 4 training_loop """owa""" +288 4 negative_sampler """basic""" +288 4 evaluator """rankbased""" +288 5 dataset """fb15k237""" +288 5 model """hole""" +288 5 loss """bceaftersigmoid""" +288 5 regularizer """no""" +288 5 optimizer """adam""" +288 5 training_loop """owa""" +288 5 negative_sampler """basic""" +288 5 evaluator """rankbased""" +288 6 dataset """fb15k237""" +288 6 model """hole""" +288 6 loss """bceaftersigmoid""" +288 6 regularizer """no""" +288 6 optimizer """adam""" +288 6 training_loop """owa""" +288 6 negative_sampler """basic""" +288 6 evaluator """rankbased""" +288 7 dataset """fb15k237""" +288 7 model """hole""" +288 7 loss """bceaftersigmoid""" +288 7 regularizer """no""" +288 7 optimizer """adam""" +288 7 training_loop """owa""" +288 7 negative_sampler """basic""" +288 7 evaluator """rankbased""" +288 8 dataset """fb15k237""" +288 8 model """hole""" +288 8 loss """bceaftersigmoid""" +288 8 regularizer """no""" +288 8 optimizer """adam""" +288 8 training_loop """owa""" +288 8 negative_sampler """basic""" +288 8 evaluator """rankbased""" +288 9 dataset """fb15k237""" +288 9 model """hole""" +288 9 loss """bceaftersigmoid""" +288 9 regularizer """no""" +288 9 optimizer """adam""" +288 9 training_loop """owa""" +288 9 negative_sampler """basic""" +288 9 evaluator """rankbased""" +288 10 dataset """fb15k237""" +288 10 model """hole""" +288 10 loss """bceaftersigmoid""" +288 10 regularizer """no""" +288 10 optimizer """adam""" +288 10 training_loop """owa""" +288 10 negative_sampler """basic""" +288 10 evaluator """rankbased""" +288 11 dataset """fb15k237""" +288 11 model """hole""" +288 11 loss """bceaftersigmoid""" +288 11 regularizer """no""" +288 11 optimizer """adam""" +288 11 training_loop """owa""" +288 11 negative_sampler """basic""" +288 11 evaluator """rankbased""" +288 12 dataset """fb15k237""" +288 12 model """hole""" +288 12 loss """bceaftersigmoid""" +288 12 regularizer """no""" +288 12 optimizer """adam""" +288 12 training_loop """owa""" +288 12 negative_sampler """basic""" +288 12 evaluator """rankbased""" +288 13 dataset """fb15k237""" +288 13 model """hole""" +288 13 loss """bceaftersigmoid""" +288 13 regularizer """no""" +288 13 optimizer """adam""" +288 13 training_loop """owa""" +288 13 negative_sampler """basic""" +288 13 evaluator """rankbased""" +288 14 dataset """fb15k237""" +288 14 model """hole""" +288 14 loss """bceaftersigmoid""" +288 14 regularizer """no""" +288 14 optimizer """adam""" +288 14 training_loop """owa""" +288 14 negative_sampler """basic""" +288 14 evaluator """rankbased""" +288 15 dataset """fb15k237""" +288 15 model """hole""" +288 15 loss """bceaftersigmoid""" +288 15 regularizer """no""" +288 15 optimizer """adam""" +288 15 training_loop """owa""" +288 15 negative_sampler """basic""" +288 15 evaluator """rankbased""" +288 16 dataset """fb15k237""" +288 16 model """hole""" +288 16 loss """bceaftersigmoid""" +288 16 regularizer """no""" +288 16 optimizer """adam""" +288 16 training_loop """owa""" +288 16 negative_sampler """basic""" +288 16 evaluator """rankbased""" +288 17 dataset """fb15k237""" +288 17 model """hole""" +288 17 loss """bceaftersigmoid""" +288 17 regularizer """no""" +288 17 optimizer """adam""" +288 17 training_loop """owa""" +288 17 negative_sampler """basic""" +288 17 evaluator """rankbased""" +288 18 dataset """fb15k237""" +288 18 model """hole""" +288 18 loss """bceaftersigmoid""" +288 18 regularizer """no""" +288 18 optimizer """adam""" +288 18 training_loop """owa""" +288 18 negative_sampler """basic""" +288 18 evaluator """rankbased""" +288 19 dataset """fb15k237""" +288 19 model """hole""" +288 19 loss """bceaftersigmoid""" +288 19 regularizer """no""" +288 19 optimizer """adam""" +288 19 training_loop """owa""" +288 19 negative_sampler """basic""" +288 19 evaluator """rankbased""" +288 20 dataset """fb15k237""" +288 20 model """hole""" +288 20 loss """bceaftersigmoid""" +288 20 regularizer """no""" +288 20 optimizer """adam""" +288 20 training_loop """owa""" +288 20 negative_sampler """basic""" +288 20 evaluator """rankbased""" +288 21 dataset """fb15k237""" +288 21 model """hole""" +288 21 loss """bceaftersigmoid""" +288 21 regularizer """no""" +288 21 optimizer """adam""" +288 21 training_loop """owa""" +288 21 negative_sampler """basic""" +288 21 evaluator """rankbased""" +288 22 dataset """fb15k237""" +288 22 model """hole""" +288 22 loss """bceaftersigmoid""" +288 22 regularizer """no""" +288 22 optimizer """adam""" +288 22 training_loop """owa""" +288 22 negative_sampler """basic""" +288 22 evaluator """rankbased""" +288 23 dataset """fb15k237""" +288 23 model """hole""" +288 23 loss """bceaftersigmoid""" +288 23 regularizer """no""" +288 23 optimizer """adam""" +288 23 training_loop """owa""" +288 23 negative_sampler """basic""" +288 23 evaluator """rankbased""" +288 24 dataset """fb15k237""" +288 24 model """hole""" +288 24 loss """bceaftersigmoid""" +288 24 regularizer """no""" +288 24 optimizer """adam""" +288 24 training_loop """owa""" +288 24 negative_sampler """basic""" +288 24 evaluator """rankbased""" +288 25 dataset """fb15k237""" +288 25 model """hole""" +288 25 loss """bceaftersigmoid""" +288 25 regularizer """no""" +288 25 optimizer """adam""" +288 25 training_loop """owa""" +288 25 negative_sampler """basic""" +288 25 evaluator """rankbased""" +288 26 dataset """fb15k237""" +288 26 model """hole""" +288 26 loss """bceaftersigmoid""" +288 26 regularizer """no""" +288 26 optimizer """adam""" +288 26 training_loop """owa""" +288 26 negative_sampler """basic""" +288 26 evaluator """rankbased""" +288 27 dataset """fb15k237""" +288 27 model """hole""" +288 27 loss """bceaftersigmoid""" +288 27 regularizer """no""" +288 27 optimizer """adam""" +288 27 training_loop """owa""" +288 27 negative_sampler """basic""" +288 27 evaluator """rankbased""" +288 28 dataset """fb15k237""" +288 28 model """hole""" +288 28 loss """bceaftersigmoid""" +288 28 regularizer """no""" +288 28 optimizer """adam""" +288 28 training_loop """owa""" +288 28 negative_sampler """basic""" +288 28 evaluator """rankbased""" +288 29 dataset """fb15k237""" +288 29 model """hole""" +288 29 loss """bceaftersigmoid""" +288 29 regularizer """no""" +288 29 optimizer """adam""" +288 29 training_loop """owa""" +288 29 negative_sampler """basic""" +288 29 evaluator """rankbased""" +288 30 dataset """fb15k237""" +288 30 model """hole""" +288 30 loss """bceaftersigmoid""" +288 30 regularizer """no""" +288 30 optimizer """adam""" +288 30 training_loop """owa""" +288 30 negative_sampler """basic""" +288 30 evaluator """rankbased""" +288 31 dataset """fb15k237""" +288 31 model """hole""" +288 31 loss """bceaftersigmoid""" +288 31 regularizer """no""" +288 31 optimizer """adam""" +288 31 training_loop """owa""" +288 31 negative_sampler """basic""" +288 31 evaluator """rankbased""" +288 32 dataset """fb15k237""" +288 32 model """hole""" +288 32 loss """bceaftersigmoid""" +288 32 regularizer """no""" +288 32 optimizer """adam""" +288 32 training_loop """owa""" +288 32 negative_sampler """basic""" +288 32 evaluator """rankbased""" +288 33 dataset """fb15k237""" +288 33 model """hole""" +288 33 loss """bceaftersigmoid""" +288 33 regularizer """no""" +288 33 optimizer """adam""" +288 33 training_loop """owa""" +288 33 negative_sampler """basic""" +288 33 evaluator """rankbased""" +288 34 dataset """fb15k237""" +288 34 model """hole""" +288 34 loss """bceaftersigmoid""" +288 34 regularizer """no""" +288 34 optimizer """adam""" +288 34 training_loop """owa""" +288 34 negative_sampler """basic""" +288 34 evaluator """rankbased""" +288 35 dataset """fb15k237""" +288 35 model """hole""" +288 35 loss """bceaftersigmoid""" +288 35 regularizer """no""" +288 35 optimizer """adam""" +288 35 training_loop """owa""" +288 35 negative_sampler """basic""" +288 35 evaluator """rankbased""" +288 36 dataset """fb15k237""" +288 36 model """hole""" +288 36 loss """bceaftersigmoid""" +288 36 regularizer """no""" +288 36 optimizer """adam""" +288 36 training_loop """owa""" +288 36 negative_sampler """basic""" +288 36 evaluator """rankbased""" +288 37 dataset """fb15k237""" +288 37 model """hole""" +288 37 loss """bceaftersigmoid""" +288 37 regularizer """no""" +288 37 optimizer """adam""" +288 37 training_loop """owa""" +288 37 negative_sampler """basic""" +288 37 evaluator """rankbased""" +288 38 dataset """fb15k237""" +288 38 model """hole""" +288 38 loss """bceaftersigmoid""" +288 38 regularizer """no""" +288 38 optimizer """adam""" +288 38 training_loop """owa""" +288 38 negative_sampler """basic""" +288 38 evaluator """rankbased""" +288 39 dataset """fb15k237""" +288 39 model """hole""" +288 39 loss """bceaftersigmoid""" +288 39 regularizer """no""" +288 39 optimizer """adam""" +288 39 training_loop """owa""" +288 39 negative_sampler """basic""" +288 39 evaluator """rankbased""" +289 1 model.embedding_dim 1.0 +289 1 optimizer.lr 0.0014745207656897258 +289 1 negative_sampler.num_negs_per_pos 62.0 +289 1 training.batch_size 0.0 +289 2 model.embedding_dim 0.0 +289 2 optimizer.lr 0.0012579010368859197 +289 2 negative_sampler.num_negs_per_pos 85.0 +289 2 training.batch_size 1.0 +289 3 model.embedding_dim 1.0 +289 3 optimizer.lr 0.09678218993640997 +289 3 negative_sampler.num_negs_per_pos 12.0 +289 3 training.batch_size 2.0 +289 4 model.embedding_dim 0.0 +289 4 optimizer.lr 0.058802704128383294 +289 4 negative_sampler.num_negs_per_pos 46.0 +289 4 training.batch_size 0.0 +289 5 model.embedding_dim 1.0 +289 5 optimizer.lr 0.0319365689628154 +289 5 negative_sampler.num_negs_per_pos 28.0 +289 5 training.batch_size 1.0 +289 6 model.embedding_dim 0.0 +289 6 optimizer.lr 0.00329294033516574 +289 6 negative_sampler.num_negs_per_pos 61.0 +289 6 training.batch_size 0.0 +289 7 model.embedding_dim 0.0 +289 7 optimizer.lr 0.044110390854189406 +289 7 negative_sampler.num_negs_per_pos 2.0 +289 7 training.batch_size 1.0 +289 8 model.embedding_dim 1.0 +289 8 optimizer.lr 0.0022079772133200937 +289 8 negative_sampler.num_negs_per_pos 81.0 +289 8 training.batch_size 0.0 +289 9 model.embedding_dim 0.0 +289 9 optimizer.lr 0.037776956282492 +289 9 negative_sampler.num_negs_per_pos 33.0 +289 9 training.batch_size 0.0 +289 10 model.embedding_dim 0.0 +289 10 optimizer.lr 0.0846767358330813 +289 10 negative_sampler.num_negs_per_pos 18.0 +289 10 training.batch_size 0.0 +289 11 model.embedding_dim 1.0 +289 11 optimizer.lr 0.006182950952003412 +289 11 negative_sampler.num_negs_per_pos 85.0 +289 11 training.batch_size 0.0 +289 12 model.embedding_dim 0.0 +289 12 optimizer.lr 0.001977207488319447 +289 12 negative_sampler.num_negs_per_pos 18.0 +289 12 training.batch_size 1.0 +289 13 model.embedding_dim 0.0 +289 13 optimizer.lr 0.005102465278476841 +289 13 negative_sampler.num_negs_per_pos 35.0 +289 13 training.batch_size 1.0 +289 14 model.embedding_dim 2.0 +289 14 optimizer.lr 0.006114441595942585 +289 14 negative_sampler.num_negs_per_pos 33.0 +289 14 training.batch_size 0.0 +289 15 model.embedding_dim 0.0 +289 15 optimizer.lr 0.004543672956925169 +289 15 negative_sampler.num_negs_per_pos 70.0 +289 15 training.batch_size 0.0 +289 16 model.embedding_dim 1.0 +289 16 optimizer.lr 0.009844221641543463 +289 16 negative_sampler.num_negs_per_pos 79.0 +289 16 training.batch_size 0.0 +289 17 model.embedding_dim 1.0 +289 17 optimizer.lr 0.00488146176939171 +289 17 negative_sampler.num_negs_per_pos 5.0 +289 17 training.batch_size 2.0 +289 18 model.embedding_dim 2.0 +289 18 optimizer.lr 0.04599373419013042 +289 18 negative_sampler.num_negs_per_pos 66.0 +289 18 training.batch_size 2.0 +289 19 model.embedding_dim 0.0 +289 19 optimizer.lr 0.013592999898549282 +289 19 negative_sampler.num_negs_per_pos 87.0 +289 19 training.batch_size 1.0 +289 20 model.embedding_dim 1.0 +289 20 optimizer.lr 0.08263638244957411 +289 20 negative_sampler.num_negs_per_pos 48.0 +289 20 training.batch_size 2.0 +289 21 model.embedding_dim 1.0 +289 21 optimizer.lr 0.01162873170519199 +289 21 negative_sampler.num_negs_per_pos 32.0 +289 21 training.batch_size 0.0 +289 22 model.embedding_dim 1.0 +289 22 optimizer.lr 0.002976508704179578 +289 22 negative_sampler.num_negs_per_pos 80.0 +289 22 training.batch_size 0.0 +289 23 model.embedding_dim 2.0 +289 23 optimizer.lr 0.036645744440366626 +289 23 negative_sampler.num_negs_per_pos 26.0 +289 23 training.batch_size 2.0 +289 24 model.embedding_dim 0.0 +289 24 optimizer.lr 0.06105138850423303 +289 24 negative_sampler.num_negs_per_pos 99.0 +289 24 training.batch_size 0.0 +289 25 model.embedding_dim 1.0 +289 25 optimizer.lr 0.0012892178186934451 +289 25 negative_sampler.num_negs_per_pos 85.0 +289 25 training.batch_size 1.0 +289 26 model.embedding_dim 1.0 +289 26 optimizer.lr 0.09295776204838471 +289 26 negative_sampler.num_negs_per_pos 31.0 +289 26 training.batch_size 0.0 +289 27 model.embedding_dim 0.0 +289 27 optimizer.lr 0.0019339641130130525 +289 27 negative_sampler.num_negs_per_pos 32.0 +289 27 training.batch_size 1.0 +289 28 model.embedding_dim 2.0 +289 28 optimizer.lr 0.08883613073350345 +289 28 negative_sampler.num_negs_per_pos 76.0 +289 28 training.batch_size 2.0 +289 29 model.embedding_dim 1.0 +289 29 optimizer.lr 0.0014275048714891877 +289 29 negative_sampler.num_negs_per_pos 57.0 +289 29 training.batch_size 2.0 +289 30 model.embedding_dim 2.0 +289 30 optimizer.lr 0.02570618251541115 +289 30 negative_sampler.num_negs_per_pos 65.0 +289 30 training.batch_size 0.0 +289 31 model.embedding_dim 0.0 +289 31 optimizer.lr 0.05157895666730736 +289 31 negative_sampler.num_negs_per_pos 5.0 +289 31 training.batch_size 2.0 +289 32 model.embedding_dim 0.0 +289 32 optimizer.lr 0.0024619128202832824 +289 32 negative_sampler.num_negs_per_pos 89.0 +289 32 training.batch_size 2.0 +289 33 model.embedding_dim 1.0 +289 33 optimizer.lr 0.01461390097661921 +289 33 negative_sampler.num_negs_per_pos 76.0 +289 33 training.batch_size 1.0 +289 34 model.embedding_dim 0.0 +289 34 optimizer.lr 0.004511953978832982 +289 34 negative_sampler.num_negs_per_pos 61.0 +289 34 training.batch_size 1.0 +289 35 model.embedding_dim 2.0 +289 35 optimizer.lr 0.002525228338678919 +289 35 negative_sampler.num_negs_per_pos 62.0 +289 35 training.batch_size 1.0 +289 36 model.embedding_dim 0.0 +289 36 optimizer.lr 0.0028517591499682303 +289 36 negative_sampler.num_negs_per_pos 41.0 +289 36 training.batch_size 1.0 +289 37 model.embedding_dim 1.0 +289 37 optimizer.lr 0.009691480438386206 +289 37 negative_sampler.num_negs_per_pos 2.0 +289 37 training.batch_size 0.0 +289 1 dataset """fb15k237""" +289 1 model """hole""" +289 1 loss """softplus""" +289 1 regularizer """no""" +289 1 optimizer """adam""" +289 1 training_loop """owa""" +289 1 negative_sampler """basic""" +289 1 evaluator """rankbased""" +289 2 dataset """fb15k237""" +289 2 model """hole""" +289 2 loss """softplus""" +289 2 regularizer """no""" +289 2 optimizer """adam""" +289 2 training_loop """owa""" +289 2 negative_sampler """basic""" +289 2 evaluator """rankbased""" +289 3 dataset """fb15k237""" +289 3 model """hole""" +289 3 loss """softplus""" +289 3 regularizer """no""" +289 3 optimizer """adam""" +289 3 training_loop """owa""" +289 3 negative_sampler """basic""" +289 3 evaluator """rankbased""" +289 4 dataset """fb15k237""" +289 4 model """hole""" +289 4 loss """softplus""" +289 4 regularizer """no""" +289 4 optimizer """adam""" +289 4 training_loop """owa""" +289 4 negative_sampler """basic""" +289 4 evaluator """rankbased""" +289 5 dataset """fb15k237""" +289 5 model """hole""" +289 5 loss """softplus""" +289 5 regularizer """no""" +289 5 optimizer """adam""" +289 5 training_loop """owa""" +289 5 negative_sampler """basic""" +289 5 evaluator """rankbased""" +289 6 dataset """fb15k237""" +289 6 model """hole""" +289 6 loss """softplus""" +289 6 regularizer """no""" +289 6 optimizer """adam""" +289 6 training_loop """owa""" +289 6 negative_sampler """basic""" +289 6 evaluator """rankbased""" +289 7 dataset """fb15k237""" +289 7 model """hole""" +289 7 loss """softplus""" +289 7 regularizer """no""" +289 7 optimizer """adam""" +289 7 training_loop """owa""" +289 7 negative_sampler """basic""" +289 7 evaluator """rankbased""" +289 8 dataset """fb15k237""" +289 8 model """hole""" +289 8 loss """softplus""" +289 8 regularizer """no""" +289 8 optimizer """adam""" +289 8 training_loop """owa""" +289 8 negative_sampler """basic""" +289 8 evaluator """rankbased""" +289 9 dataset """fb15k237""" +289 9 model """hole""" +289 9 loss """softplus""" +289 9 regularizer """no""" +289 9 optimizer """adam""" +289 9 training_loop """owa""" +289 9 negative_sampler """basic""" +289 9 evaluator """rankbased""" +289 10 dataset """fb15k237""" +289 10 model """hole""" +289 10 loss """softplus""" +289 10 regularizer """no""" +289 10 optimizer """adam""" +289 10 training_loop """owa""" +289 10 negative_sampler """basic""" +289 10 evaluator """rankbased""" +289 11 dataset """fb15k237""" +289 11 model """hole""" +289 11 loss """softplus""" +289 11 regularizer """no""" +289 11 optimizer """adam""" +289 11 training_loop """owa""" +289 11 negative_sampler """basic""" +289 11 evaluator """rankbased""" +289 12 dataset """fb15k237""" +289 12 model """hole""" +289 12 loss """softplus""" +289 12 regularizer """no""" +289 12 optimizer """adam""" +289 12 training_loop """owa""" +289 12 negative_sampler """basic""" +289 12 evaluator """rankbased""" +289 13 dataset """fb15k237""" +289 13 model """hole""" +289 13 loss """softplus""" +289 13 regularizer """no""" +289 13 optimizer """adam""" +289 13 training_loop """owa""" +289 13 negative_sampler """basic""" +289 13 evaluator """rankbased""" +289 14 dataset """fb15k237""" +289 14 model """hole""" +289 14 loss """softplus""" +289 14 regularizer """no""" +289 14 optimizer """adam""" +289 14 training_loop """owa""" +289 14 negative_sampler """basic""" +289 14 evaluator """rankbased""" +289 15 dataset """fb15k237""" +289 15 model """hole""" +289 15 loss """softplus""" +289 15 regularizer """no""" +289 15 optimizer """adam""" +289 15 training_loop """owa""" +289 15 negative_sampler """basic""" +289 15 evaluator """rankbased""" +289 16 dataset """fb15k237""" +289 16 model """hole""" +289 16 loss """softplus""" +289 16 regularizer """no""" +289 16 optimizer """adam""" +289 16 training_loop """owa""" +289 16 negative_sampler """basic""" +289 16 evaluator """rankbased""" +289 17 dataset """fb15k237""" +289 17 model """hole""" +289 17 loss """softplus""" +289 17 regularizer """no""" +289 17 optimizer """adam""" +289 17 training_loop """owa""" +289 17 negative_sampler """basic""" +289 17 evaluator """rankbased""" +289 18 dataset """fb15k237""" +289 18 model """hole""" +289 18 loss """softplus""" +289 18 regularizer """no""" +289 18 optimizer """adam""" +289 18 training_loop """owa""" +289 18 negative_sampler """basic""" +289 18 evaluator """rankbased""" +289 19 dataset """fb15k237""" +289 19 model """hole""" +289 19 loss """softplus""" +289 19 regularizer """no""" +289 19 optimizer """adam""" +289 19 training_loop """owa""" +289 19 negative_sampler """basic""" +289 19 evaluator """rankbased""" +289 20 dataset """fb15k237""" +289 20 model """hole""" +289 20 loss """softplus""" +289 20 regularizer """no""" +289 20 optimizer """adam""" +289 20 training_loop """owa""" +289 20 negative_sampler """basic""" +289 20 evaluator """rankbased""" +289 21 dataset """fb15k237""" +289 21 model """hole""" +289 21 loss """softplus""" +289 21 regularizer """no""" +289 21 optimizer """adam""" +289 21 training_loop """owa""" +289 21 negative_sampler """basic""" +289 21 evaluator """rankbased""" +289 22 dataset """fb15k237""" +289 22 model """hole""" +289 22 loss """softplus""" +289 22 regularizer """no""" +289 22 optimizer """adam""" +289 22 training_loop """owa""" +289 22 negative_sampler """basic""" +289 22 evaluator """rankbased""" +289 23 dataset """fb15k237""" +289 23 model """hole""" +289 23 loss """softplus""" +289 23 regularizer """no""" +289 23 optimizer """adam""" +289 23 training_loop """owa""" +289 23 negative_sampler """basic""" +289 23 evaluator """rankbased""" +289 24 dataset """fb15k237""" +289 24 model """hole""" +289 24 loss """softplus""" +289 24 regularizer """no""" +289 24 optimizer """adam""" +289 24 training_loop """owa""" +289 24 negative_sampler """basic""" +289 24 evaluator """rankbased""" +289 25 dataset """fb15k237""" +289 25 model """hole""" +289 25 loss """softplus""" +289 25 regularizer """no""" +289 25 optimizer """adam""" +289 25 training_loop """owa""" +289 25 negative_sampler """basic""" +289 25 evaluator """rankbased""" +289 26 dataset """fb15k237""" +289 26 model """hole""" +289 26 loss """softplus""" +289 26 regularizer """no""" +289 26 optimizer """adam""" +289 26 training_loop """owa""" +289 26 negative_sampler """basic""" +289 26 evaluator """rankbased""" +289 27 dataset """fb15k237""" +289 27 model """hole""" +289 27 loss """softplus""" +289 27 regularizer """no""" +289 27 optimizer """adam""" +289 27 training_loop """owa""" +289 27 negative_sampler """basic""" +289 27 evaluator """rankbased""" +289 28 dataset """fb15k237""" +289 28 model """hole""" +289 28 loss """softplus""" +289 28 regularizer """no""" +289 28 optimizer """adam""" +289 28 training_loop """owa""" +289 28 negative_sampler """basic""" +289 28 evaluator """rankbased""" +289 29 dataset """fb15k237""" +289 29 model """hole""" +289 29 loss """softplus""" +289 29 regularizer """no""" +289 29 optimizer """adam""" +289 29 training_loop """owa""" +289 29 negative_sampler """basic""" +289 29 evaluator """rankbased""" +289 30 dataset """fb15k237""" +289 30 model """hole""" +289 30 loss """softplus""" +289 30 regularizer """no""" +289 30 optimizer """adam""" +289 30 training_loop """owa""" +289 30 negative_sampler """basic""" +289 30 evaluator """rankbased""" +289 31 dataset """fb15k237""" +289 31 model """hole""" +289 31 loss """softplus""" +289 31 regularizer """no""" +289 31 optimizer """adam""" +289 31 training_loop """owa""" +289 31 negative_sampler """basic""" +289 31 evaluator """rankbased""" +289 32 dataset """fb15k237""" +289 32 model """hole""" +289 32 loss """softplus""" +289 32 regularizer """no""" +289 32 optimizer """adam""" +289 32 training_loop """owa""" +289 32 negative_sampler """basic""" +289 32 evaluator """rankbased""" +289 33 dataset """fb15k237""" +289 33 model """hole""" +289 33 loss """softplus""" +289 33 regularizer """no""" +289 33 optimizer """adam""" +289 33 training_loop """owa""" +289 33 negative_sampler """basic""" +289 33 evaluator """rankbased""" +289 34 dataset """fb15k237""" +289 34 model """hole""" +289 34 loss """softplus""" +289 34 regularizer """no""" +289 34 optimizer """adam""" +289 34 training_loop """owa""" +289 34 negative_sampler """basic""" +289 34 evaluator """rankbased""" +289 35 dataset """fb15k237""" +289 35 model """hole""" +289 35 loss """softplus""" +289 35 regularizer """no""" +289 35 optimizer """adam""" +289 35 training_loop """owa""" +289 35 negative_sampler """basic""" +289 35 evaluator """rankbased""" +289 36 dataset """fb15k237""" +289 36 model """hole""" +289 36 loss """softplus""" +289 36 regularizer """no""" +289 36 optimizer """adam""" +289 36 training_loop """owa""" +289 36 negative_sampler """basic""" +289 36 evaluator """rankbased""" +289 37 dataset """fb15k237""" +289 37 model """hole""" +289 37 loss """softplus""" +289 37 regularizer """no""" +289 37 optimizer """adam""" +289 37 training_loop """owa""" +289 37 negative_sampler """basic""" +289 37 evaluator """rankbased""" +290 1 model.embedding_dim 0.0 +290 1 loss.margin 8.007828419277221 +290 1 optimizer.lr 0.08028376856954932 +290 1 negative_sampler.num_negs_per_pos 25.0 +290 1 training.batch_size 0.0 +290 2 model.embedding_dim 2.0 +290 2 loss.margin 9.007115940600496 +290 2 optimizer.lr 0.052757134207259276 +290 2 negative_sampler.num_negs_per_pos 48.0 +290 2 training.batch_size 0.0 +290 3 model.embedding_dim 0.0 +290 3 loss.margin 9.317682540250635 +290 3 optimizer.lr 0.0524949148068284 +290 3 negative_sampler.num_negs_per_pos 30.0 +290 3 training.batch_size 0.0 +290 4 model.embedding_dim 0.0 +290 4 loss.margin 5.777128828095224 +290 4 optimizer.lr 0.011119306605334412 +290 4 negative_sampler.num_negs_per_pos 10.0 +290 4 training.batch_size 2.0 +290 5 model.embedding_dim 0.0 +290 5 loss.margin 2.5511704239870623 +290 5 optimizer.lr 0.0036575089575487264 +290 5 negative_sampler.num_negs_per_pos 65.0 +290 5 training.batch_size 1.0 +290 6 model.embedding_dim 0.0 +290 6 loss.margin 5.590014116959727 +290 6 optimizer.lr 0.045048786246922 +290 6 negative_sampler.num_negs_per_pos 35.0 +290 6 training.batch_size 0.0 +290 7 model.embedding_dim 0.0 +290 7 loss.margin 3.4209319924647907 +290 7 optimizer.lr 0.024811834005614986 +290 7 negative_sampler.num_negs_per_pos 41.0 +290 7 training.batch_size 1.0 +290 8 model.embedding_dim 0.0 +290 8 loss.margin 2.8927944493621407 +290 8 optimizer.lr 0.007659967589958287 +290 8 negative_sampler.num_negs_per_pos 45.0 +290 8 training.batch_size 2.0 +290 9 model.embedding_dim 0.0 +290 9 loss.margin 5.990678490678485 +290 9 optimizer.lr 0.015006205173132482 +290 9 negative_sampler.num_negs_per_pos 24.0 +290 9 training.batch_size 0.0 +290 10 model.embedding_dim 2.0 +290 10 loss.margin 3.0874770821103077 +290 10 optimizer.lr 0.014131591663396786 +290 10 negative_sampler.num_negs_per_pos 95.0 +290 10 training.batch_size 0.0 +290 11 model.embedding_dim 0.0 +290 11 loss.margin 8.071631813645391 +290 11 optimizer.lr 0.04379268431439251 +290 11 negative_sampler.num_negs_per_pos 58.0 +290 11 training.batch_size 0.0 +290 12 model.embedding_dim 2.0 +290 12 loss.margin 8.26992367317295 +290 12 optimizer.lr 0.0698183964580696 +290 12 negative_sampler.num_negs_per_pos 65.0 +290 12 training.batch_size 2.0 +290 1 dataset """fb15k237""" +290 1 model """hole""" +290 1 loss """marginranking""" +290 1 regularizer """no""" +290 1 optimizer """adam""" +290 1 training_loop """owa""" +290 1 negative_sampler """basic""" +290 1 evaluator """rankbased""" +290 2 dataset """fb15k237""" +290 2 model """hole""" +290 2 loss """marginranking""" +290 2 regularizer """no""" +290 2 optimizer """adam""" +290 2 training_loop """owa""" +290 2 negative_sampler """basic""" +290 2 evaluator """rankbased""" +290 3 dataset """fb15k237""" +290 3 model """hole""" +290 3 loss """marginranking""" +290 3 regularizer """no""" +290 3 optimizer """adam""" +290 3 training_loop """owa""" +290 3 negative_sampler """basic""" +290 3 evaluator """rankbased""" +290 4 dataset """fb15k237""" +290 4 model """hole""" +290 4 loss """marginranking""" +290 4 regularizer """no""" +290 4 optimizer """adam""" +290 4 training_loop """owa""" +290 4 negative_sampler """basic""" +290 4 evaluator """rankbased""" +290 5 dataset """fb15k237""" +290 5 model """hole""" +290 5 loss """marginranking""" +290 5 regularizer """no""" +290 5 optimizer """adam""" +290 5 training_loop """owa""" +290 5 negative_sampler """basic""" +290 5 evaluator """rankbased""" +290 6 dataset """fb15k237""" +290 6 model """hole""" +290 6 loss """marginranking""" +290 6 regularizer """no""" +290 6 optimizer """adam""" +290 6 training_loop """owa""" +290 6 negative_sampler """basic""" +290 6 evaluator """rankbased""" +290 7 dataset """fb15k237""" +290 7 model """hole""" +290 7 loss """marginranking""" +290 7 regularizer """no""" +290 7 optimizer """adam""" +290 7 training_loop """owa""" +290 7 negative_sampler """basic""" +290 7 evaluator """rankbased""" +290 8 dataset """fb15k237""" +290 8 model """hole""" +290 8 loss """marginranking""" +290 8 regularizer """no""" +290 8 optimizer """adam""" +290 8 training_loop """owa""" +290 8 negative_sampler """basic""" +290 8 evaluator """rankbased""" +290 9 dataset """fb15k237""" +290 9 model """hole""" +290 9 loss """marginranking""" +290 9 regularizer """no""" +290 9 optimizer """adam""" +290 9 training_loop """owa""" +290 9 negative_sampler """basic""" +290 9 evaluator """rankbased""" +290 10 dataset """fb15k237""" +290 10 model """hole""" +290 10 loss """marginranking""" +290 10 regularizer """no""" +290 10 optimizer """adam""" +290 10 training_loop """owa""" +290 10 negative_sampler """basic""" +290 10 evaluator """rankbased""" +290 11 dataset """fb15k237""" +290 11 model """hole""" +290 11 loss """marginranking""" +290 11 regularizer """no""" +290 11 optimizer """adam""" +290 11 training_loop """owa""" +290 11 negative_sampler """basic""" +290 11 evaluator """rankbased""" +290 12 dataset """fb15k237""" +290 12 model """hole""" +290 12 loss """marginranking""" +290 12 regularizer """no""" +290 12 optimizer """adam""" +290 12 training_loop """owa""" +290 12 negative_sampler """basic""" +290 12 evaluator """rankbased""" +291 1 model.embedding_dim 0.0 +291 1 loss.margin 1.4290380036774777 +291 1 optimizer.lr 0.03774705989506999 +291 1 negative_sampler.num_negs_per_pos 55.0 +291 1 training.batch_size 1.0 +291 2 model.embedding_dim 1.0 +291 2 loss.margin 9.709335271501358 +291 2 optimizer.lr 0.04636448897224152 +291 2 negative_sampler.num_negs_per_pos 23.0 +291 2 training.batch_size 0.0 +291 3 model.embedding_dim 1.0 +291 3 loss.margin 0.5533589421214036 +291 3 optimizer.lr 0.04683223768129388 +291 3 negative_sampler.num_negs_per_pos 94.0 +291 3 training.batch_size 1.0 +291 4 model.embedding_dim 0.0 +291 4 loss.margin 5.287944114200199 +291 4 optimizer.lr 0.002760819876999798 +291 4 negative_sampler.num_negs_per_pos 24.0 +291 4 training.batch_size 1.0 +291 5 model.embedding_dim 2.0 +291 5 loss.margin 5.774500494535293 +291 5 optimizer.lr 0.0018934176503118691 +291 5 negative_sampler.num_negs_per_pos 65.0 +291 5 training.batch_size 0.0 +291 6 model.embedding_dim 2.0 +291 6 loss.margin 6.074736146210705 +291 6 optimizer.lr 0.005362738834060583 +291 6 negative_sampler.num_negs_per_pos 18.0 +291 6 training.batch_size 2.0 +291 7 model.embedding_dim 2.0 +291 7 loss.margin 6.224342864459103 +291 7 optimizer.lr 0.021504586188865207 +291 7 negative_sampler.num_negs_per_pos 80.0 +291 7 training.batch_size 0.0 +291 8 model.embedding_dim 2.0 +291 8 loss.margin 6.320699248877744 +291 8 optimizer.lr 0.010833721314168968 +291 8 negative_sampler.num_negs_per_pos 18.0 +291 8 training.batch_size 0.0 +291 9 model.embedding_dim 1.0 +291 9 loss.margin 3.0181970549580535 +291 9 optimizer.lr 0.020703699361710005 +291 9 negative_sampler.num_negs_per_pos 5.0 +291 9 training.batch_size 2.0 +291 10 model.embedding_dim 1.0 +291 10 loss.margin 8.997945353820027 +291 10 optimizer.lr 0.001055618244959681 +291 10 negative_sampler.num_negs_per_pos 83.0 +291 10 training.batch_size 0.0 +291 11 model.embedding_dim 1.0 +291 11 loss.margin 6.982262919308014 +291 11 optimizer.lr 0.008056159119740242 +291 11 negative_sampler.num_negs_per_pos 70.0 +291 11 training.batch_size 1.0 +291 12 model.embedding_dim 0.0 +291 12 loss.margin 1.3560486762204556 +291 12 optimizer.lr 0.0177495674933154 +291 12 negative_sampler.num_negs_per_pos 12.0 +291 12 training.batch_size 0.0 +291 13 model.embedding_dim 1.0 +291 13 loss.margin 2.6044932829251253 +291 13 optimizer.lr 0.03183218947743613 +291 13 negative_sampler.num_negs_per_pos 17.0 +291 13 training.batch_size 2.0 +291 14 model.embedding_dim 1.0 +291 14 loss.margin 3.69089987596718 +291 14 optimizer.lr 0.036135505908698774 +291 14 negative_sampler.num_negs_per_pos 19.0 +291 14 training.batch_size 1.0 +291 15 model.embedding_dim 2.0 +291 15 loss.margin 8.177807339968801 +291 15 optimizer.lr 0.0972308675201109 +291 15 negative_sampler.num_negs_per_pos 24.0 +291 15 training.batch_size 2.0 +291 16 model.embedding_dim 0.0 +291 16 loss.margin 7.028136843755746 +291 16 optimizer.lr 0.0012478712746143562 +291 16 negative_sampler.num_negs_per_pos 54.0 +291 16 training.batch_size 2.0 +291 17 model.embedding_dim 0.0 +291 17 loss.margin 9.992449632807915 +291 17 optimizer.lr 0.011938403719960404 +291 17 negative_sampler.num_negs_per_pos 69.0 +291 17 training.batch_size 2.0 +291 18 model.embedding_dim 0.0 +291 18 loss.margin 7.088078969410206 +291 18 optimizer.lr 0.027909464399967176 +291 18 negative_sampler.num_negs_per_pos 86.0 +291 18 training.batch_size 2.0 +291 19 model.embedding_dim 0.0 +291 19 loss.margin 6.9136099989970745 +291 19 optimizer.lr 0.0010969901034833204 +291 19 negative_sampler.num_negs_per_pos 46.0 +291 19 training.batch_size 1.0 +291 20 model.embedding_dim 0.0 +291 20 loss.margin 3.834256083321548 +291 20 optimizer.lr 0.0015019406541463066 +291 20 negative_sampler.num_negs_per_pos 71.0 +291 20 training.batch_size 1.0 +291 21 model.embedding_dim 1.0 +291 21 loss.margin 9.23996661115863 +291 21 optimizer.lr 0.01449570048403421 +291 21 negative_sampler.num_negs_per_pos 42.0 +291 21 training.batch_size 2.0 +291 22 model.embedding_dim 2.0 +291 22 loss.margin 1.3291901041087675 +291 22 optimizer.lr 0.03809778477863526 +291 22 negative_sampler.num_negs_per_pos 50.0 +291 22 training.batch_size 2.0 +291 23 model.embedding_dim 1.0 +291 23 loss.margin 6.4646014837203625 +291 23 optimizer.lr 0.021786344842076477 +291 23 negative_sampler.num_negs_per_pos 17.0 +291 23 training.batch_size 1.0 +291 24 model.embedding_dim 1.0 +291 24 loss.margin 9.418936742845965 +291 24 optimizer.lr 0.05770905544158178 +291 24 negative_sampler.num_negs_per_pos 9.0 +291 24 training.batch_size 2.0 +291 25 model.embedding_dim 0.0 +291 25 loss.margin 3.89473473790728 +291 25 optimizer.lr 0.0030976857180376686 +291 25 negative_sampler.num_negs_per_pos 16.0 +291 25 training.batch_size 0.0 +291 26 model.embedding_dim 0.0 +291 26 loss.margin 2.7434745816039143 +291 26 optimizer.lr 0.0018070960047005414 +291 26 negative_sampler.num_negs_per_pos 65.0 +291 26 training.batch_size 2.0 +291 27 model.embedding_dim 2.0 +291 27 loss.margin 2.1239089825139095 +291 27 optimizer.lr 0.007551190906803996 +291 27 negative_sampler.num_negs_per_pos 39.0 +291 27 training.batch_size 1.0 +291 28 model.embedding_dim 1.0 +291 28 loss.margin 1.2155288231121393 +291 28 optimizer.lr 0.0022209857802647985 +291 28 negative_sampler.num_negs_per_pos 34.0 +291 28 training.batch_size 0.0 +291 29 model.embedding_dim 2.0 +291 29 loss.margin 9.930103103162896 +291 29 optimizer.lr 0.0010976259823565876 +291 29 negative_sampler.num_negs_per_pos 89.0 +291 29 training.batch_size 2.0 +291 30 model.embedding_dim 0.0 +291 30 loss.margin 7.815134887018301 +291 30 optimizer.lr 0.0035607973532907886 +291 30 negative_sampler.num_negs_per_pos 90.0 +291 30 training.batch_size 0.0 +291 1 dataset """fb15k237""" +291 1 model """hole""" +291 1 loss """marginranking""" +291 1 regularizer """no""" +291 1 optimizer """adam""" +291 1 training_loop """owa""" +291 1 negative_sampler """basic""" +291 1 evaluator """rankbased""" +291 2 dataset """fb15k237""" +291 2 model """hole""" +291 2 loss """marginranking""" +291 2 regularizer """no""" +291 2 optimizer """adam""" +291 2 training_loop """owa""" +291 2 negative_sampler """basic""" +291 2 evaluator """rankbased""" +291 3 dataset """fb15k237""" +291 3 model """hole""" +291 3 loss """marginranking""" +291 3 regularizer """no""" +291 3 optimizer """adam""" +291 3 training_loop """owa""" +291 3 negative_sampler """basic""" +291 3 evaluator """rankbased""" +291 4 dataset """fb15k237""" +291 4 model """hole""" +291 4 loss """marginranking""" +291 4 regularizer """no""" +291 4 optimizer """adam""" +291 4 training_loop """owa""" +291 4 negative_sampler """basic""" +291 4 evaluator """rankbased""" +291 5 dataset """fb15k237""" +291 5 model """hole""" +291 5 loss """marginranking""" +291 5 regularizer """no""" +291 5 optimizer """adam""" +291 5 training_loop """owa""" +291 5 negative_sampler """basic""" +291 5 evaluator """rankbased""" +291 6 dataset """fb15k237""" +291 6 model """hole""" +291 6 loss """marginranking""" +291 6 regularizer """no""" +291 6 optimizer """adam""" +291 6 training_loop """owa""" +291 6 negative_sampler """basic""" +291 6 evaluator """rankbased""" +291 7 dataset """fb15k237""" +291 7 model """hole""" +291 7 loss """marginranking""" +291 7 regularizer """no""" +291 7 optimizer """adam""" +291 7 training_loop """owa""" +291 7 negative_sampler """basic""" +291 7 evaluator """rankbased""" +291 8 dataset """fb15k237""" +291 8 model """hole""" +291 8 loss """marginranking""" +291 8 regularizer """no""" +291 8 optimizer """adam""" +291 8 training_loop """owa""" +291 8 negative_sampler """basic""" +291 8 evaluator """rankbased""" +291 9 dataset """fb15k237""" +291 9 model """hole""" +291 9 loss """marginranking""" +291 9 regularizer """no""" +291 9 optimizer """adam""" +291 9 training_loop """owa""" +291 9 negative_sampler """basic""" +291 9 evaluator """rankbased""" +291 10 dataset """fb15k237""" +291 10 model """hole""" +291 10 loss """marginranking""" +291 10 regularizer """no""" +291 10 optimizer """adam""" +291 10 training_loop """owa""" +291 10 negative_sampler """basic""" +291 10 evaluator """rankbased""" +291 11 dataset """fb15k237""" +291 11 model """hole""" +291 11 loss """marginranking""" +291 11 regularizer """no""" +291 11 optimizer """adam""" +291 11 training_loop """owa""" +291 11 negative_sampler """basic""" +291 11 evaluator """rankbased""" +291 12 dataset """fb15k237""" +291 12 model """hole""" +291 12 loss """marginranking""" +291 12 regularizer """no""" +291 12 optimizer """adam""" +291 12 training_loop """owa""" +291 12 negative_sampler """basic""" +291 12 evaluator """rankbased""" +291 13 dataset """fb15k237""" +291 13 model """hole""" +291 13 loss """marginranking""" +291 13 regularizer """no""" +291 13 optimizer """adam""" +291 13 training_loop """owa""" +291 13 negative_sampler """basic""" +291 13 evaluator """rankbased""" +291 14 dataset """fb15k237""" +291 14 model """hole""" +291 14 loss """marginranking""" +291 14 regularizer """no""" +291 14 optimizer """adam""" +291 14 training_loop """owa""" +291 14 negative_sampler """basic""" +291 14 evaluator """rankbased""" +291 15 dataset """fb15k237""" +291 15 model """hole""" +291 15 loss """marginranking""" +291 15 regularizer """no""" +291 15 optimizer """adam""" +291 15 training_loop """owa""" +291 15 negative_sampler """basic""" +291 15 evaluator """rankbased""" +291 16 dataset """fb15k237""" +291 16 model """hole""" +291 16 loss """marginranking""" +291 16 regularizer """no""" +291 16 optimizer """adam""" +291 16 training_loop """owa""" +291 16 negative_sampler """basic""" +291 16 evaluator """rankbased""" +291 17 dataset """fb15k237""" +291 17 model """hole""" +291 17 loss """marginranking""" +291 17 regularizer """no""" +291 17 optimizer """adam""" +291 17 training_loop """owa""" +291 17 negative_sampler """basic""" +291 17 evaluator """rankbased""" +291 18 dataset """fb15k237""" +291 18 model """hole""" +291 18 loss """marginranking""" +291 18 regularizer """no""" +291 18 optimizer """adam""" +291 18 training_loop """owa""" +291 18 negative_sampler """basic""" +291 18 evaluator """rankbased""" +291 19 dataset """fb15k237""" +291 19 model """hole""" +291 19 loss """marginranking""" +291 19 regularizer """no""" +291 19 optimizer """adam""" +291 19 training_loop """owa""" +291 19 negative_sampler """basic""" +291 19 evaluator """rankbased""" +291 20 dataset """fb15k237""" +291 20 model """hole""" +291 20 loss """marginranking""" +291 20 regularizer """no""" +291 20 optimizer """adam""" +291 20 training_loop """owa""" +291 20 negative_sampler """basic""" +291 20 evaluator """rankbased""" +291 21 dataset """fb15k237""" +291 21 model """hole""" +291 21 loss """marginranking""" +291 21 regularizer """no""" +291 21 optimizer """adam""" +291 21 training_loop """owa""" +291 21 negative_sampler """basic""" +291 21 evaluator """rankbased""" +291 22 dataset """fb15k237""" +291 22 model """hole""" +291 22 loss """marginranking""" +291 22 regularizer """no""" +291 22 optimizer """adam""" +291 22 training_loop """owa""" +291 22 negative_sampler """basic""" +291 22 evaluator """rankbased""" +291 23 dataset """fb15k237""" +291 23 model """hole""" +291 23 loss """marginranking""" +291 23 regularizer """no""" +291 23 optimizer """adam""" +291 23 training_loop """owa""" +291 23 negative_sampler """basic""" +291 23 evaluator """rankbased""" +291 24 dataset """fb15k237""" +291 24 model """hole""" +291 24 loss """marginranking""" +291 24 regularizer """no""" +291 24 optimizer """adam""" +291 24 training_loop """owa""" +291 24 negative_sampler """basic""" +291 24 evaluator """rankbased""" +291 25 dataset """fb15k237""" +291 25 model """hole""" +291 25 loss """marginranking""" +291 25 regularizer """no""" +291 25 optimizer """adam""" +291 25 training_loop """owa""" +291 25 negative_sampler """basic""" +291 25 evaluator """rankbased""" +291 26 dataset """fb15k237""" +291 26 model """hole""" +291 26 loss """marginranking""" +291 26 regularizer """no""" +291 26 optimizer """adam""" +291 26 training_loop """owa""" +291 26 negative_sampler """basic""" +291 26 evaluator """rankbased""" +291 27 dataset """fb15k237""" +291 27 model """hole""" +291 27 loss """marginranking""" +291 27 regularizer """no""" +291 27 optimizer """adam""" +291 27 training_loop """owa""" +291 27 negative_sampler """basic""" +291 27 evaluator """rankbased""" +291 28 dataset """fb15k237""" +291 28 model """hole""" +291 28 loss """marginranking""" +291 28 regularizer """no""" +291 28 optimizer """adam""" +291 28 training_loop """owa""" +291 28 negative_sampler """basic""" +291 28 evaluator """rankbased""" +291 29 dataset """fb15k237""" +291 29 model """hole""" +291 29 loss """marginranking""" +291 29 regularizer """no""" +291 29 optimizer """adam""" +291 29 training_loop """owa""" +291 29 negative_sampler """basic""" +291 29 evaluator """rankbased""" +291 30 dataset """fb15k237""" +291 30 model """hole""" +291 30 loss """marginranking""" +291 30 regularizer """no""" +291 30 optimizer """adam""" +291 30 training_loop """owa""" +291 30 negative_sampler """basic""" +291 30 evaluator """rankbased""" +292 1 model.embedding_dim 2.0 +292 1 optimizer.lr 0.00991037329209612 +292 1 training.batch_size 1.0 +292 1 training.label_smoothing 0.07387132940431616 +292 2 model.embedding_dim 1.0 +292 2 optimizer.lr 0.0010842432780069052 +292 2 training.batch_size 2.0 +292 2 training.label_smoothing 0.0016023264302845763 +292 3 model.embedding_dim 1.0 +292 3 optimizer.lr 0.013865058857394072 +292 3 training.batch_size 2.0 +292 3 training.label_smoothing 0.001960972209668501 +292 4 model.embedding_dim 0.0 +292 4 optimizer.lr 0.0034924794030898893 +292 4 training.batch_size 2.0 +292 4 training.label_smoothing 0.015580136443188602 +292 5 model.embedding_dim 2.0 +292 5 optimizer.lr 0.004773212167180756 +292 5 training.batch_size 1.0 +292 5 training.label_smoothing 0.16503454753931338 +292 6 model.embedding_dim 0.0 +292 6 optimizer.lr 0.0011929217856086584 +292 6 training.batch_size 2.0 +292 6 training.label_smoothing 0.008816697234412935 +292 7 model.embedding_dim 1.0 +292 7 optimizer.lr 0.0036477270384811782 +292 7 training.batch_size 2.0 +292 7 training.label_smoothing 0.0030021654500665 +292 1 dataset """fb15k237""" +292 1 model """hole""" +292 1 loss """crossentropy""" +292 1 regularizer """no""" +292 1 optimizer """adam""" +292 1 training_loop """lcwa""" +292 1 evaluator """rankbased""" +292 2 dataset """fb15k237""" +292 2 model """hole""" +292 2 loss """crossentropy""" +292 2 regularizer """no""" +292 2 optimizer """adam""" +292 2 training_loop """lcwa""" +292 2 evaluator """rankbased""" +292 3 dataset """fb15k237""" +292 3 model """hole""" +292 3 loss """crossentropy""" +292 3 regularizer """no""" +292 3 optimizer """adam""" +292 3 training_loop """lcwa""" +292 3 evaluator """rankbased""" +292 4 dataset """fb15k237""" +292 4 model """hole""" +292 4 loss """crossentropy""" +292 4 regularizer """no""" +292 4 optimizer """adam""" +292 4 training_loop """lcwa""" +292 4 evaluator """rankbased""" +292 5 dataset """fb15k237""" +292 5 model """hole""" +292 5 loss """crossentropy""" +292 5 regularizer """no""" +292 5 optimizer """adam""" +292 5 training_loop """lcwa""" +292 5 evaluator """rankbased""" +292 6 dataset """fb15k237""" +292 6 model """hole""" +292 6 loss """crossentropy""" +292 6 regularizer """no""" +292 6 optimizer """adam""" +292 6 training_loop """lcwa""" +292 6 evaluator """rankbased""" +292 7 dataset """fb15k237""" +292 7 model """hole""" +292 7 loss """crossentropy""" +292 7 regularizer """no""" +292 7 optimizer """adam""" +292 7 training_loop """lcwa""" +292 7 evaluator """rankbased""" +293 1 model.embedding_dim 0.0 +293 1 optimizer.lr 0.002989817967585768 +293 1 training.batch_size 2.0 +293 1 training.label_smoothing 0.062084953014929806 +293 2 model.embedding_dim 1.0 +293 2 optimizer.lr 0.004444285201467393 +293 2 training.batch_size 1.0 +293 2 training.label_smoothing 0.3066490466367881 +293 3 model.embedding_dim 1.0 +293 3 optimizer.lr 0.008597425589702437 +293 3 training.batch_size 0.0 +293 3 training.label_smoothing 0.20219374225739842 +293 4 model.embedding_dim 1.0 +293 4 optimizer.lr 0.0185821272877481 +293 4 training.batch_size 1.0 +293 4 training.label_smoothing 0.00181551352283794 +293 5 model.embedding_dim 2.0 +293 5 optimizer.lr 0.002914211571465914 +293 5 training.batch_size 0.0 +293 5 training.label_smoothing 0.017018313406891004 +293 6 model.embedding_dim 1.0 +293 6 optimizer.lr 0.002855207475388118 +293 6 training.batch_size 1.0 +293 6 training.label_smoothing 0.1009326210154804 +293 7 model.embedding_dim 1.0 +293 7 optimizer.lr 0.0010922313851710291 +293 7 training.batch_size 2.0 +293 7 training.label_smoothing 0.12653604664113297 +293 8 model.embedding_dim 2.0 +293 8 optimizer.lr 0.003329479670088775 +293 8 training.batch_size 0.0 +293 8 training.label_smoothing 0.042209010066432705 +293 9 model.embedding_dim 1.0 +293 9 optimizer.lr 0.04587662519209835 +293 9 training.batch_size 1.0 +293 9 training.label_smoothing 0.02743898838672252 +293 10 model.embedding_dim 1.0 +293 10 optimizer.lr 0.005831650420390804 +293 10 training.batch_size 0.0 +293 10 training.label_smoothing 0.012928278023527478 +293 1 dataset """fb15k237""" +293 1 model """hole""" +293 1 loss """crossentropy""" +293 1 regularizer """no""" +293 1 optimizer """adam""" +293 1 training_loop """lcwa""" +293 1 evaluator """rankbased""" +293 2 dataset """fb15k237""" +293 2 model """hole""" +293 2 loss """crossentropy""" +293 2 regularizer """no""" +293 2 optimizer """adam""" +293 2 training_loop """lcwa""" +293 2 evaluator """rankbased""" +293 3 dataset """fb15k237""" +293 3 model """hole""" +293 3 loss """crossentropy""" +293 3 regularizer """no""" +293 3 optimizer """adam""" +293 3 training_loop """lcwa""" +293 3 evaluator """rankbased""" +293 4 dataset """fb15k237""" +293 4 model """hole""" +293 4 loss """crossentropy""" +293 4 regularizer """no""" +293 4 optimizer """adam""" +293 4 training_loop """lcwa""" +293 4 evaluator """rankbased""" +293 5 dataset """fb15k237""" +293 5 model """hole""" +293 5 loss """crossentropy""" +293 5 regularizer """no""" +293 5 optimizer """adam""" +293 5 training_loop """lcwa""" +293 5 evaluator """rankbased""" +293 6 dataset """fb15k237""" +293 6 model """hole""" +293 6 loss """crossentropy""" +293 6 regularizer """no""" +293 6 optimizer """adam""" +293 6 training_loop """lcwa""" +293 6 evaluator """rankbased""" +293 7 dataset """fb15k237""" +293 7 model """hole""" +293 7 loss """crossentropy""" +293 7 regularizer """no""" +293 7 optimizer """adam""" +293 7 training_loop """lcwa""" +293 7 evaluator """rankbased""" +293 8 dataset """fb15k237""" +293 8 model """hole""" +293 8 loss """crossentropy""" +293 8 regularizer """no""" +293 8 optimizer """adam""" +293 8 training_loop """lcwa""" +293 8 evaluator """rankbased""" +293 9 dataset """fb15k237""" +293 9 model """hole""" +293 9 loss """crossentropy""" +293 9 regularizer """no""" +293 9 optimizer """adam""" +293 9 training_loop """lcwa""" +293 9 evaluator """rankbased""" +293 10 dataset """fb15k237""" +293 10 model """hole""" +293 10 loss """crossentropy""" +293 10 regularizer """no""" +293 10 optimizer """adam""" +293 10 training_loop """lcwa""" +293 10 evaluator """rankbased""" +294 1 model.embedding_dim 1.0 +294 1 optimizer.lr 0.015202498755108672 +294 1 training.batch_size 1.0 +294 1 training.label_smoothing 0.0020922829014758913 +294 2 model.embedding_dim 1.0 +294 2 optimizer.lr 0.005840755122880987 +294 2 training.batch_size 2.0 +294 2 training.label_smoothing 0.019438352032566126 +294 3 model.embedding_dim 1.0 +294 3 optimizer.lr 0.0041343233926303185 +294 3 training.batch_size 0.0 +294 3 training.label_smoothing 0.0014966732298400411 +294 4 model.embedding_dim 1.0 +294 4 optimizer.lr 0.0034042081072029463 +294 4 training.batch_size 2.0 +294 4 training.label_smoothing 0.0022384827016707935 +294 5 model.embedding_dim 1.0 +294 5 optimizer.lr 0.006710051268214137 +294 5 training.batch_size 2.0 +294 5 training.label_smoothing 0.3955858434137402 +294 6 model.embedding_dim 1.0 +294 6 optimizer.lr 0.016628279311444262 +294 6 training.batch_size 0.0 +294 6 training.label_smoothing 0.012764135506223408 +294 7 model.embedding_dim 2.0 +294 7 optimizer.lr 0.09402208134784025 +294 7 training.batch_size 2.0 +294 7 training.label_smoothing 0.001838090786565996 +294 1 dataset """fb15k237""" +294 1 model """hole""" +294 1 loss """softplus""" +294 1 regularizer """no""" +294 1 optimizer """adam""" +294 1 training_loop """lcwa""" +294 1 evaluator """rankbased""" +294 2 dataset """fb15k237""" +294 2 model """hole""" +294 2 loss """softplus""" +294 2 regularizer """no""" +294 2 optimizer """adam""" +294 2 training_loop """lcwa""" +294 2 evaluator """rankbased""" +294 3 dataset """fb15k237""" +294 3 model """hole""" +294 3 loss """softplus""" +294 3 regularizer """no""" +294 3 optimizer """adam""" +294 3 training_loop """lcwa""" +294 3 evaluator """rankbased""" +294 4 dataset """fb15k237""" +294 4 model """hole""" +294 4 loss """softplus""" +294 4 regularizer """no""" +294 4 optimizer """adam""" +294 4 training_loop """lcwa""" +294 4 evaluator """rankbased""" +294 5 dataset """fb15k237""" +294 5 model """hole""" +294 5 loss """softplus""" +294 5 regularizer """no""" +294 5 optimizer """adam""" +294 5 training_loop """lcwa""" +294 5 evaluator """rankbased""" +294 6 dataset """fb15k237""" +294 6 model """hole""" +294 6 loss """softplus""" +294 6 regularizer """no""" +294 6 optimizer """adam""" +294 6 training_loop """lcwa""" +294 6 evaluator """rankbased""" +294 7 dataset """fb15k237""" +294 7 model """hole""" +294 7 loss """softplus""" +294 7 regularizer """no""" +294 7 optimizer """adam""" +294 7 training_loop """lcwa""" +294 7 evaluator """rankbased""" +295 1 model.embedding_dim 0.0 +295 1 optimizer.lr 0.04393325731836351 +295 1 training.batch_size 1.0 +295 1 training.label_smoothing 0.3154379337751726 +295 2 model.embedding_dim 0.0 +295 2 optimizer.lr 0.013624307232933069 +295 2 training.batch_size 1.0 +295 2 training.label_smoothing 0.41247604725529263 +295 3 model.embedding_dim 2.0 +295 3 optimizer.lr 0.0012048886306830226 +295 3 training.batch_size 1.0 +295 3 training.label_smoothing 0.8173176135489397 +295 4 model.embedding_dim 2.0 +295 4 optimizer.lr 0.006117821950588835 +295 4 training.batch_size 1.0 +295 4 training.label_smoothing 0.7321468814928801 +295 5 model.embedding_dim 0.0 +295 5 optimizer.lr 0.001462335432956916 +295 5 training.batch_size 1.0 +295 5 training.label_smoothing 0.00848294343334454 +295 6 model.embedding_dim 2.0 +295 6 optimizer.lr 0.002962675076172044 +295 6 training.batch_size 2.0 +295 6 training.label_smoothing 0.282893032383469 +295 7 model.embedding_dim 0.0 +295 7 optimizer.lr 0.006797715888648721 +295 7 training.batch_size 0.0 +295 7 training.label_smoothing 0.15085322106105414 +295 8 model.embedding_dim 0.0 +295 8 optimizer.lr 0.002244452196179883 +295 8 training.batch_size 1.0 +295 8 training.label_smoothing 0.27819516630927693 +295 9 model.embedding_dim 2.0 +295 9 optimizer.lr 0.004734630691610721 +295 9 training.batch_size 1.0 +295 9 training.label_smoothing 0.1797584117370991 +295 10 model.embedding_dim 2.0 +295 10 optimizer.lr 0.05518721507202277 +295 10 training.batch_size 2.0 +295 10 training.label_smoothing 0.02231218659884533 +295 11 model.embedding_dim 1.0 +295 11 optimizer.lr 0.0014257978467981543 +295 11 training.batch_size 0.0 +295 11 training.label_smoothing 0.004349325688499903 +295 12 model.embedding_dim 0.0 +295 12 optimizer.lr 0.003763934910055872 +295 12 training.batch_size 0.0 +295 12 training.label_smoothing 0.0062841107124450854 +295 13 model.embedding_dim 2.0 +295 13 optimizer.lr 0.007629460211632393 +295 13 training.batch_size 0.0 +295 13 training.label_smoothing 0.1760195783621003 +295 1 dataset """fb15k237""" +295 1 model """hole""" +295 1 loss """softplus""" +295 1 regularizer """no""" +295 1 optimizer """adam""" +295 1 training_loop """lcwa""" +295 1 evaluator """rankbased""" +295 2 dataset """fb15k237""" +295 2 model """hole""" +295 2 loss """softplus""" +295 2 regularizer """no""" +295 2 optimizer """adam""" +295 2 training_loop """lcwa""" +295 2 evaluator """rankbased""" +295 3 dataset """fb15k237""" +295 3 model """hole""" +295 3 loss """softplus""" +295 3 regularizer """no""" +295 3 optimizer """adam""" +295 3 training_loop """lcwa""" +295 3 evaluator """rankbased""" +295 4 dataset """fb15k237""" +295 4 model """hole""" +295 4 loss """softplus""" +295 4 regularizer """no""" +295 4 optimizer """adam""" +295 4 training_loop """lcwa""" +295 4 evaluator """rankbased""" +295 5 dataset """fb15k237""" +295 5 model """hole""" +295 5 loss """softplus""" +295 5 regularizer """no""" +295 5 optimizer """adam""" +295 5 training_loop """lcwa""" +295 5 evaluator """rankbased""" +295 6 dataset """fb15k237""" +295 6 model """hole""" +295 6 loss """softplus""" +295 6 regularizer """no""" +295 6 optimizer """adam""" +295 6 training_loop """lcwa""" +295 6 evaluator """rankbased""" +295 7 dataset """fb15k237""" +295 7 model """hole""" +295 7 loss """softplus""" +295 7 regularizer """no""" +295 7 optimizer """adam""" +295 7 training_loop """lcwa""" +295 7 evaluator """rankbased""" +295 8 dataset """fb15k237""" +295 8 model """hole""" +295 8 loss """softplus""" +295 8 regularizer """no""" +295 8 optimizer """adam""" +295 8 training_loop """lcwa""" +295 8 evaluator """rankbased""" +295 9 dataset """fb15k237""" +295 9 model """hole""" +295 9 loss """softplus""" +295 9 regularizer """no""" +295 9 optimizer """adam""" +295 9 training_loop """lcwa""" +295 9 evaluator """rankbased""" +295 10 dataset """fb15k237""" +295 10 model """hole""" +295 10 loss """softplus""" +295 10 regularizer """no""" +295 10 optimizer """adam""" +295 10 training_loop """lcwa""" +295 10 evaluator """rankbased""" +295 11 dataset """fb15k237""" +295 11 model """hole""" +295 11 loss """softplus""" +295 11 regularizer """no""" +295 11 optimizer """adam""" +295 11 training_loop """lcwa""" +295 11 evaluator """rankbased""" +295 12 dataset """fb15k237""" +295 12 model """hole""" +295 12 loss """softplus""" +295 12 regularizer """no""" +295 12 optimizer """adam""" +295 12 training_loop """lcwa""" +295 12 evaluator """rankbased""" +295 13 dataset """fb15k237""" +295 13 model """hole""" +295 13 loss """softplus""" +295 13 regularizer """no""" +295 13 optimizer """adam""" +295 13 training_loop """lcwa""" +295 13 evaluator """rankbased""" +296 1 model.embedding_dim 0.0 +296 1 optimizer.lr 0.05577175475938688 +296 1 training.batch_size 1.0 +296 1 training.label_smoothing 0.010698238448765524 +296 2 model.embedding_dim 0.0 +296 2 optimizer.lr 0.058839822622041675 +296 2 training.batch_size 2.0 +296 2 training.label_smoothing 0.15786196159722737 +296 3 model.embedding_dim 0.0 +296 3 optimizer.lr 0.04020733758611871 +296 3 training.batch_size 2.0 +296 3 training.label_smoothing 0.0018082148181873486 +296 4 model.embedding_dim 2.0 +296 4 optimizer.lr 0.053044223533289823 +296 4 training.batch_size 1.0 +296 4 training.label_smoothing 0.004561309458036133 +296 5 model.embedding_dim 1.0 +296 5 optimizer.lr 0.004395901170761039 +296 5 training.batch_size 2.0 +296 5 training.label_smoothing 0.002169481869823937 +296 6 model.embedding_dim 1.0 +296 6 optimizer.lr 0.015772830343399137 +296 6 training.batch_size 0.0 +296 6 training.label_smoothing 0.0024604211869497794 +296 7 model.embedding_dim 0.0 +296 7 optimizer.lr 0.00840139627590413 +296 7 training.batch_size 2.0 +296 7 training.label_smoothing 0.1458033325499318 +296 8 model.embedding_dim 0.0 +296 8 optimizer.lr 0.01105712095823895 +296 8 training.batch_size 0.0 +296 8 training.label_smoothing 0.09830785247042693 +296 9 model.embedding_dim 1.0 +296 9 optimizer.lr 0.015704909445700948 +296 9 training.batch_size 0.0 +296 9 training.label_smoothing 0.006144628214792297 +296 1 dataset """fb15k237""" +296 1 model """hole""" +296 1 loss """bceaftersigmoid""" +296 1 regularizer """no""" +296 1 optimizer """adam""" +296 1 training_loop """lcwa""" +296 1 evaluator """rankbased""" +296 2 dataset """fb15k237""" +296 2 model """hole""" +296 2 loss """bceaftersigmoid""" +296 2 regularizer """no""" +296 2 optimizer """adam""" +296 2 training_loop """lcwa""" +296 2 evaluator """rankbased""" +296 3 dataset """fb15k237""" +296 3 model """hole""" +296 3 loss """bceaftersigmoid""" +296 3 regularizer """no""" +296 3 optimizer """adam""" +296 3 training_loop """lcwa""" +296 3 evaluator """rankbased""" +296 4 dataset """fb15k237""" +296 4 model """hole""" +296 4 loss """bceaftersigmoid""" +296 4 regularizer """no""" +296 4 optimizer """adam""" +296 4 training_loop """lcwa""" +296 4 evaluator """rankbased""" +296 5 dataset """fb15k237""" +296 5 model """hole""" +296 5 loss """bceaftersigmoid""" +296 5 regularizer """no""" +296 5 optimizer """adam""" +296 5 training_loop """lcwa""" +296 5 evaluator """rankbased""" +296 6 dataset """fb15k237""" +296 6 model """hole""" +296 6 loss """bceaftersigmoid""" +296 6 regularizer """no""" +296 6 optimizer """adam""" +296 6 training_loop """lcwa""" +296 6 evaluator """rankbased""" +296 7 dataset """fb15k237""" +296 7 model """hole""" +296 7 loss """bceaftersigmoid""" +296 7 regularizer """no""" +296 7 optimizer """adam""" +296 7 training_loop """lcwa""" +296 7 evaluator """rankbased""" +296 8 dataset """fb15k237""" +296 8 model """hole""" +296 8 loss """bceaftersigmoid""" +296 8 regularizer """no""" +296 8 optimizer """adam""" +296 8 training_loop """lcwa""" +296 8 evaluator """rankbased""" +296 9 dataset """fb15k237""" +296 9 model """hole""" +296 9 loss """bceaftersigmoid""" +296 9 regularizer """no""" +296 9 optimizer """adam""" +296 9 training_loop """lcwa""" +296 9 evaluator """rankbased""" +297 1 model.embedding_dim 1.0 +297 1 optimizer.lr 0.00310404623533 +297 1 training.batch_size 1.0 +297 1 training.label_smoothing 0.00381500952357784 +297 2 model.embedding_dim 1.0 +297 2 optimizer.lr 0.002273795138798426 +297 2 training.batch_size 2.0 +297 2 training.label_smoothing 0.03822973340457664 +297 3 model.embedding_dim 2.0 +297 3 optimizer.lr 0.018920881102434114 +297 3 training.batch_size 2.0 +297 3 training.label_smoothing 0.001792531805249406 +297 4 model.embedding_dim 1.0 +297 4 optimizer.lr 0.002395512491279603 +297 4 training.batch_size 1.0 +297 4 training.label_smoothing 0.00116019559047984 +297 5 model.embedding_dim 0.0 +297 5 optimizer.lr 0.0013026438151664543 +297 5 training.batch_size 2.0 +297 5 training.label_smoothing 0.6539737603189947 +297 6 model.embedding_dim 1.0 +297 6 optimizer.lr 0.018804233646499094 +297 6 training.batch_size 2.0 +297 6 training.label_smoothing 0.0020288539889068225 +297 7 model.embedding_dim 0.0 +297 7 optimizer.lr 0.01323608105123401 +297 7 training.batch_size 2.0 +297 7 training.label_smoothing 0.1941476958900328 +297 8 model.embedding_dim 0.0 +297 8 optimizer.lr 0.0017005255814263479 +297 8 training.batch_size 2.0 +297 8 training.label_smoothing 0.004333661504473916 +297 9 model.embedding_dim 1.0 +297 9 optimizer.lr 0.03918963534974793 +297 9 training.batch_size 0.0 +297 9 training.label_smoothing 0.002353923343953278 +297 10 model.embedding_dim 0.0 +297 10 optimizer.lr 0.06486382219187059 +297 10 training.batch_size 1.0 +297 10 training.label_smoothing 0.2655436726493798 +297 11 model.embedding_dim 1.0 +297 11 optimizer.lr 0.0030189203679844437 +297 11 training.batch_size 0.0 +297 11 training.label_smoothing 0.0043685014034576496 +297 12 model.embedding_dim 0.0 +297 12 optimizer.lr 0.007235743463496828 +297 12 training.batch_size 2.0 +297 12 training.label_smoothing 0.005366969823632496 +297 13 model.embedding_dim 2.0 +297 13 optimizer.lr 0.0036677123164271814 +297 13 training.batch_size 0.0 +297 13 training.label_smoothing 0.058904864579773855 +297 1 dataset """fb15k237""" +297 1 model """hole""" +297 1 loss """bceaftersigmoid""" +297 1 regularizer """no""" +297 1 optimizer """adam""" +297 1 training_loop """lcwa""" +297 1 evaluator """rankbased""" +297 2 dataset """fb15k237""" +297 2 model """hole""" +297 2 loss """bceaftersigmoid""" +297 2 regularizer """no""" +297 2 optimizer """adam""" +297 2 training_loop """lcwa""" +297 2 evaluator """rankbased""" +297 3 dataset """fb15k237""" +297 3 model """hole""" +297 3 loss """bceaftersigmoid""" +297 3 regularizer """no""" +297 3 optimizer """adam""" +297 3 training_loop """lcwa""" +297 3 evaluator """rankbased""" +297 4 dataset """fb15k237""" +297 4 model """hole""" +297 4 loss """bceaftersigmoid""" +297 4 regularizer """no""" +297 4 optimizer """adam""" +297 4 training_loop """lcwa""" +297 4 evaluator """rankbased""" +297 5 dataset """fb15k237""" +297 5 model """hole""" +297 5 loss """bceaftersigmoid""" +297 5 regularizer """no""" +297 5 optimizer """adam""" +297 5 training_loop """lcwa""" +297 5 evaluator """rankbased""" +297 6 dataset """fb15k237""" +297 6 model """hole""" +297 6 loss """bceaftersigmoid""" +297 6 regularizer """no""" +297 6 optimizer """adam""" +297 6 training_loop """lcwa""" +297 6 evaluator """rankbased""" +297 7 dataset """fb15k237""" +297 7 model """hole""" +297 7 loss """bceaftersigmoid""" +297 7 regularizer """no""" +297 7 optimizer """adam""" +297 7 training_loop """lcwa""" +297 7 evaluator """rankbased""" +297 8 dataset """fb15k237""" +297 8 model """hole""" +297 8 loss """bceaftersigmoid""" +297 8 regularizer """no""" +297 8 optimizer """adam""" +297 8 training_loop """lcwa""" +297 8 evaluator """rankbased""" +297 9 dataset """fb15k237""" +297 9 model """hole""" +297 9 loss """bceaftersigmoid""" +297 9 regularizer """no""" +297 9 optimizer """adam""" +297 9 training_loop """lcwa""" +297 9 evaluator """rankbased""" +297 10 dataset """fb15k237""" +297 10 model """hole""" +297 10 loss """bceaftersigmoid""" +297 10 regularizer """no""" +297 10 optimizer """adam""" +297 10 training_loop """lcwa""" +297 10 evaluator """rankbased""" +297 11 dataset """fb15k237""" +297 11 model """hole""" +297 11 loss """bceaftersigmoid""" +297 11 regularizer """no""" +297 11 optimizer """adam""" +297 11 training_loop """lcwa""" +297 11 evaluator """rankbased""" +297 12 dataset """fb15k237""" +297 12 model """hole""" +297 12 loss """bceaftersigmoid""" +297 12 regularizer """no""" +297 12 optimizer """adam""" +297 12 training_loop """lcwa""" +297 12 evaluator """rankbased""" +297 13 dataset """fb15k237""" +297 13 model """hole""" +297 13 loss """bceaftersigmoid""" +297 13 regularizer """no""" +297 13 optimizer """adam""" +297 13 training_loop """lcwa""" +297 13 evaluator """rankbased""" +298 1 model.embedding_dim 0.0 +298 1 training.batch_size 1.0 +298 1 training.label_smoothing 0.31030624103054727 +298 2 model.embedding_dim 0.0 +298 2 training.batch_size 1.0 +298 2 training.label_smoothing 0.016158393095001236 +298 3 model.embedding_dim 1.0 +298 3 training.batch_size 2.0 +298 3 training.label_smoothing 0.16491894173089225 +298 4 model.embedding_dim 2.0 +298 4 training.batch_size 2.0 +298 4 training.label_smoothing 0.1767826640919862 +298 5 model.embedding_dim 2.0 +298 5 training.batch_size 2.0 +298 5 training.label_smoothing 0.002143346021935844 +298 6 model.embedding_dim 1.0 +298 6 training.batch_size 1.0 +298 6 training.label_smoothing 0.45853393391249503 +298 7 model.embedding_dim 0.0 +298 7 training.batch_size 2.0 +298 7 training.label_smoothing 0.0056281740907798555 +298 8 model.embedding_dim 1.0 +298 8 training.batch_size 0.0 +298 8 training.label_smoothing 0.6542790588258838 +298 9 model.embedding_dim 0.0 +298 9 training.batch_size 1.0 +298 9 training.label_smoothing 0.3081514036752773 +298 10 model.embedding_dim 0.0 +298 10 training.batch_size 0.0 +298 10 training.label_smoothing 0.0831289566897687 +298 11 model.embedding_dim 2.0 +298 11 training.batch_size 1.0 +298 11 training.label_smoothing 0.0011871627078956417 +298 12 model.embedding_dim 1.0 +298 12 training.batch_size 1.0 +298 12 training.label_smoothing 0.0019683450257186705 +298 13 model.embedding_dim 1.0 +298 13 training.batch_size 1.0 +298 13 training.label_smoothing 0.03433641453762257 +298 14 model.embedding_dim 1.0 +298 14 training.batch_size 1.0 +298 14 training.label_smoothing 0.006243385910188837 +298 15 model.embedding_dim 2.0 +298 15 training.batch_size 2.0 +298 15 training.label_smoothing 0.0016151018909249376 +298 16 model.embedding_dim 0.0 +298 16 training.batch_size 0.0 +298 16 training.label_smoothing 0.2649910833346283 +298 17 model.embedding_dim 2.0 +298 17 training.batch_size 0.0 +298 17 training.label_smoothing 0.772470554702532 +298 18 model.embedding_dim 1.0 +298 18 training.batch_size 0.0 +298 18 training.label_smoothing 0.005360707501965907 +298 19 model.embedding_dim 2.0 +298 19 training.batch_size 2.0 +298 19 training.label_smoothing 0.44981477822491567 +298 20 model.embedding_dim 0.0 +298 20 training.batch_size 0.0 +298 20 training.label_smoothing 0.001238805392485364 +298 21 model.embedding_dim 2.0 +298 21 training.batch_size 1.0 +298 21 training.label_smoothing 0.13658437522993938 +298 22 model.embedding_dim 2.0 +298 22 training.batch_size 0.0 +298 22 training.label_smoothing 0.0918527117954603 +298 23 model.embedding_dim 1.0 +298 23 training.batch_size 1.0 +298 23 training.label_smoothing 0.1542517960014655 +298 24 model.embedding_dim 1.0 +298 24 training.batch_size 0.0 +298 24 training.label_smoothing 0.021577197431219077 +298 25 model.embedding_dim 2.0 +298 25 training.batch_size 1.0 +298 25 training.label_smoothing 0.04626248791170292 +298 26 model.embedding_dim 0.0 +298 26 training.batch_size 1.0 +298 26 training.label_smoothing 0.05654572303904055 +298 27 model.embedding_dim 0.0 +298 27 training.batch_size 2.0 +298 27 training.label_smoothing 0.005474582980663964 +298 28 model.embedding_dim 1.0 +298 28 training.batch_size 0.0 +298 28 training.label_smoothing 0.017434017376987285 +298 29 model.embedding_dim 2.0 +298 29 training.batch_size 1.0 +298 29 training.label_smoothing 0.7414659371597455 +298 30 model.embedding_dim 1.0 +298 30 training.batch_size 2.0 +298 30 training.label_smoothing 0.011359370802908867 +298 31 model.embedding_dim 1.0 +298 31 training.batch_size 0.0 +298 31 training.label_smoothing 0.21448021252230087 +298 32 model.embedding_dim 2.0 +298 32 training.batch_size 2.0 +298 32 training.label_smoothing 0.01263266162208087 +298 33 model.embedding_dim 0.0 +298 33 training.batch_size 2.0 +298 33 training.label_smoothing 0.03803896947327357 +298 34 model.embedding_dim 0.0 +298 34 training.batch_size 1.0 +298 34 training.label_smoothing 0.0014649938657596614 +298 35 model.embedding_dim 0.0 +298 35 training.batch_size 2.0 +298 35 training.label_smoothing 0.2824213076690995 +298 36 model.embedding_dim 0.0 +298 36 training.batch_size 2.0 +298 36 training.label_smoothing 0.02359091403480184 +298 37 model.embedding_dim 2.0 +298 37 training.batch_size 1.0 +298 37 training.label_smoothing 0.11544860782471511 +298 38 model.embedding_dim 1.0 +298 38 training.batch_size 1.0 +298 38 training.label_smoothing 0.27861657492567093 +298 39 model.embedding_dim 1.0 +298 39 training.batch_size 0.0 +298 39 training.label_smoothing 0.01844692283286295 +298 40 model.embedding_dim 0.0 +298 40 training.batch_size 0.0 +298 40 training.label_smoothing 0.025891021536780693 +298 41 model.embedding_dim 1.0 +298 41 training.batch_size 2.0 +298 41 training.label_smoothing 0.08560470710912664 +298 42 model.embedding_dim 1.0 +298 42 training.batch_size 0.0 +298 42 training.label_smoothing 0.0010641404606422547 +298 43 model.embedding_dim 2.0 +298 43 training.batch_size 1.0 +298 43 training.label_smoothing 0.003009965502937374 +298 44 model.embedding_dim 0.0 +298 44 training.batch_size 0.0 +298 44 training.label_smoothing 0.012101501063425883 +298 45 model.embedding_dim 0.0 +298 45 training.batch_size 2.0 +298 45 training.label_smoothing 0.017070642201261334 +298 46 model.embedding_dim 2.0 +298 46 training.batch_size 1.0 +298 46 training.label_smoothing 0.0028000278826271078 +298 47 model.embedding_dim 2.0 +298 47 training.batch_size 2.0 +298 47 training.label_smoothing 0.002096249927993234 +298 48 model.embedding_dim 1.0 +298 48 training.batch_size 2.0 +298 48 training.label_smoothing 0.0011320449489145748 +298 49 model.embedding_dim 1.0 +298 49 training.batch_size 1.0 +298 49 training.label_smoothing 0.018424503483722876 +298 50 model.embedding_dim 1.0 +298 50 training.batch_size 0.0 +298 50 training.label_smoothing 0.02749391777483402 +298 51 model.embedding_dim 2.0 +298 51 training.batch_size 2.0 +298 51 training.label_smoothing 0.2608384074589365 +298 52 model.embedding_dim 2.0 +298 52 training.batch_size 1.0 +298 52 training.label_smoothing 0.006621567010512619 +298 53 model.embedding_dim 2.0 +298 53 training.batch_size 1.0 +298 53 training.label_smoothing 0.0054443130397381695 +298 54 model.embedding_dim 2.0 +298 54 training.batch_size 2.0 +298 54 training.label_smoothing 0.03370786846914253 +298 55 model.embedding_dim 2.0 +298 55 training.batch_size 0.0 +298 55 training.label_smoothing 0.0967279082566586 +298 56 model.embedding_dim 0.0 +298 56 training.batch_size 2.0 +298 56 training.label_smoothing 0.20652817377902166 +298 57 model.embedding_dim 1.0 +298 57 training.batch_size 0.0 +298 57 training.label_smoothing 0.36518798197957963 +298 58 model.embedding_dim 2.0 +298 58 training.batch_size 0.0 +298 58 training.label_smoothing 0.01325582542161478 +298 59 model.embedding_dim 0.0 +298 59 training.batch_size 1.0 +298 59 training.label_smoothing 0.006393417519908578 +298 60 model.embedding_dim 2.0 +298 60 training.batch_size 0.0 +298 60 training.label_smoothing 0.02988714283793142 +298 61 model.embedding_dim 0.0 +298 61 training.batch_size 0.0 +298 61 training.label_smoothing 0.005729259360418039 +298 62 model.embedding_dim 2.0 +298 62 training.batch_size 2.0 +298 62 training.label_smoothing 0.0017285255989846556 +298 63 model.embedding_dim 1.0 +298 63 training.batch_size 0.0 +298 63 training.label_smoothing 0.023753921087652213 +298 64 model.embedding_dim 1.0 +298 64 training.batch_size 0.0 +298 64 training.label_smoothing 0.0035844478809538755 +298 65 model.embedding_dim 2.0 +298 65 training.batch_size 2.0 +298 65 training.label_smoothing 0.0011550682183984842 +298 66 model.embedding_dim 0.0 +298 66 training.batch_size 2.0 +298 66 training.label_smoothing 0.013534837175424032 +298 67 model.embedding_dim 0.0 +298 67 training.batch_size 1.0 +298 67 training.label_smoothing 0.4356020469292502 +298 68 model.embedding_dim 1.0 +298 68 training.batch_size 0.0 +298 68 training.label_smoothing 0.009388648023898454 +298 69 model.embedding_dim 2.0 +298 69 training.batch_size 2.0 +298 69 training.label_smoothing 0.02828005843342109 +298 70 model.embedding_dim 2.0 +298 70 training.batch_size 0.0 +298 70 training.label_smoothing 0.039450697654743 +298 71 model.embedding_dim 2.0 +298 71 training.batch_size 2.0 +298 71 training.label_smoothing 0.9775579044571829 +298 72 model.embedding_dim 2.0 +298 72 training.batch_size 1.0 +298 72 training.label_smoothing 0.0017581691736657538 +298 73 model.embedding_dim 2.0 +298 73 training.batch_size 1.0 +298 73 training.label_smoothing 0.01179254305521207 +298 74 model.embedding_dim 1.0 +298 74 training.batch_size 1.0 +298 74 training.label_smoothing 0.013664895913782836 +298 75 model.embedding_dim 0.0 +298 75 training.batch_size 1.0 +298 75 training.label_smoothing 0.0019619578589566704 +298 76 model.embedding_dim 1.0 +298 76 training.batch_size 0.0 +298 76 training.label_smoothing 0.016910669668159167 +298 77 model.embedding_dim 1.0 +298 77 training.batch_size 2.0 +298 77 training.label_smoothing 0.9312005783527693 +298 78 model.embedding_dim 2.0 +298 78 training.batch_size 2.0 +298 78 training.label_smoothing 0.012291154652925169 +298 79 model.embedding_dim 1.0 +298 79 training.batch_size 1.0 +298 79 training.label_smoothing 0.005003418641515286 +298 80 model.embedding_dim 1.0 +298 80 training.batch_size 0.0 +298 80 training.label_smoothing 0.10196972440180789 +298 81 model.embedding_dim 1.0 +298 81 training.batch_size 0.0 +298 81 training.label_smoothing 0.20890753270667925 +298 82 model.embedding_dim 0.0 +298 82 training.batch_size 1.0 +298 82 training.label_smoothing 0.022077993939659726 +298 83 model.embedding_dim 0.0 +298 83 training.batch_size 0.0 +298 83 training.label_smoothing 0.03742689659969904 +298 84 model.embedding_dim 0.0 +298 84 training.batch_size 1.0 +298 84 training.label_smoothing 0.007545702665066923 +298 85 model.embedding_dim 0.0 +298 85 training.batch_size 1.0 +298 85 training.label_smoothing 0.03144448811921558 +298 86 model.embedding_dim 2.0 +298 86 training.batch_size 1.0 +298 86 training.label_smoothing 0.13357408959340164 +298 87 model.embedding_dim 0.0 +298 87 training.batch_size 1.0 +298 87 training.label_smoothing 0.008102858043559747 +298 88 model.embedding_dim 0.0 +298 88 training.batch_size 2.0 +298 88 training.label_smoothing 0.003957834047813938 +298 89 model.embedding_dim 2.0 +298 89 training.batch_size 1.0 +298 89 training.label_smoothing 0.07846661823380136 +298 90 model.embedding_dim 0.0 +298 90 training.batch_size 0.0 +298 90 training.label_smoothing 0.0013772718903566396 +298 91 model.embedding_dim 1.0 +298 91 training.batch_size 0.0 +298 91 training.label_smoothing 0.06855154025285348 +298 92 model.embedding_dim 0.0 +298 92 training.batch_size 0.0 +298 92 training.label_smoothing 0.32071512223106563 +298 93 model.embedding_dim 1.0 +298 93 training.batch_size 2.0 +298 93 training.label_smoothing 0.013991738867165643 +298 94 model.embedding_dim 0.0 +298 94 training.batch_size 1.0 +298 94 training.label_smoothing 0.010267407041645549 +298 95 model.embedding_dim 1.0 +298 95 training.batch_size 2.0 +298 95 training.label_smoothing 0.40221465957442265 +298 96 model.embedding_dim 1.0 +298 96 training.batch_size 0.0 +298 96 training.label_smoothing 0.018684552172247466 +298 97 model.embedding_dim 0.0 +298 97 training.batch_size 0.0 +298 97 training.label_smoothing 0.13505515378331986 +298 98 model.embedding_dim 2.0 +298 98 training.batch_size 2.0 +298 98 training.label_smoothing 0.0018437324205793687 +298 99 model.embedding_dim 2.0 +298 99 training.batch_size 2.0 +298 99 training.label_smoothing 0.10229700718038627 +298 100 model.embedding_dim 1.0 +298 100 training.batch_size 2.0 +298 100 training.label_smoothing 0.014067971325025144 +298 1 dataset """kinships""" +298 1 model """hole""" +298 1 loss """bceaftersigmoid""" +298 1 regularizer """no""" +298 1 optimizer """adadelta""" +298 1 training_loop """lcwa""" +298 1 evaluator """rankbased""" +298 2 dataset """kinships""" +298 2 model """hole""" +298 2 loss """bceaftersigmoid""" +298 2 regularizer """no""" +298 2 optimizer """adadelta""" +298 2 training_loop """lcwa""" +298 2 evaluator """rankbased""" +298 3 dataset """kinships""" +298 3 model """hole""" +298 3 loss """bceaftersigmoid""" +298 3 regularizer """no""" +298 3 optimizer """adadelta""" +298 3 training_loop """lcwa""" +298 3 evaluator """rankbased""" +298 4 dataset """kinships""" +298 4 model """hole""" +298 4 loss """bceaftersigmoid""" +298 4 regularizer """no""" +298 4 optimizer """adadelta""" +298 4 training_loop """lcwa""" +298 4 evaluator """rankbased""" +298 5 dataset """kinships""" +298 5 model """hole""" +298 5 loss """bceaftersigmoid""" +298 5 regularizer """no""" +298 5 optimizer """adadelta""" +298 5 training_loop """lcwa""" +298 5 evaluator """rankbased""" +298 6 dataset """kinships""" +298 6 model """hole""" +298 6 loss """bceaftersigmoid""" +298 6 regularizer """no""" +298 6 optimizer """adadelta""" +298 6 training_loop """lcwa""" +298 6 evaluator """rankbased""" +298 7 dataset """kinships""" +298 7 model """hole""" +298 7 loss """bceaftersigmoid""" +298 7 regularizer """no""" +298 7 optimizer """adadelta""" +298 7 training_loop """lcwa""" +298 7 evaluator """rankbased""" +298 8 dataset """kinships""" +298 8 model """hole""" +298 8 loss """bceaftersigmoid""" +298 8 regularizer """no""" +298 8 optimizer """adadelta""" +298 8 training_loop """lcwa""" +298 8 evaluator """rankbased""" +298 9 dataset """kinships""" +298 9 model """hole""" +298 9 loss """bceaftersigmoid""" +298 9 regularizer """no""" +298 9 optimizer """adadelta""" +298 9 training_loop """lcwa""" +298 9 evaluator """rankbased""" +298 10 dataset """kinships""" +298 10 model """hole""" +298 10 loss """bceaftersigmoid""" +298 10 regularizer """no""" +298 10 optimizer """adadelta""" +298 10 training_loop """lcwa""" +298 10 evaluator """rankbased""" +298 11 dataset """kinships""" +298 11 model """hole""" +298 11 loss """bceaftersigmoid""" +298 11 regularizer """no""" +298 11 optimizer """adadelta""" +298 11 training_loop """lcwa""" +298 11 evaluator """rankbased""" +298 12 dataset """kinships""" +298 12 model """hole""" +298 12 loss """bceaftersigmoid""" +298 12 regularizer """no""" +298 12 optimizer """adadelta""" +298 12 training_loop """lcwa""" +298 12 evaluator """rankbased""" +298 13 dataset """kinships""" +298 13 model """hole""" +298 13 loss """bceaftersigmoid""" +298 13 regularizer """no""" +298 13 optimizer """adadelta""" +298 13 training_loop """lcwa""" +298 13 evaluator """rankbased""" +298 14 dataset """kinships""" +298 14 model """hole""" +298 14 loss """bceaftersigmoid""" +298 14 regularizer """no""" +298 14 optimizer """adadelta""" +298 14 training_loop """lcwa""" +298 14 evaluator """rankbased""" +298 15 dataset """kinships""" +298 15 model """hole""" +298 15 loss """bceaftersigmoid""" +298 15 regularizer """no""" +298 15 optimizer """adadelta""" +298 15 training_loop """lcwa""" +298 15 evaluator """rankbased""" +298 16 dataset """kinships""" +298 16 model """hole""" +298 16 loss """bceaftersigmoid""" +298 16 regularizer """no""" +298 16 optimizer """adadelta""" +298 16 training_loop """lcwa""" +298 16 evaluator """rankbased""" +298 17 dataset """kinships""" +298 17 model """hole""" +298 17 loss """bceaftersigmoid""" +298 17 regularizer """no""" +298 17 optimizer """adadelta""" +298 17 training_loop """lcwa""" +298 17 evaluator """rankbased""" +298 18 dataset """kinships""" +298 18 model """hole""" +298 18 loss """bceaftersigmoid""" +298 18 regularizer """no""" +298 18 optimizer """adadelta""" +298 18 training_loop """lcwa""" +298 18 evaluator """rankbased""" +298 19 dataset """kinships""" +298 19 model """hole""" +298 19 loss """bceaftersigmoid""" +298 19 regularizer """no""" +298 19 optimizer """adadelta""" +298 19 training_loop """lcwa""" +298 19 evaluator """rankbased""" +298 20 dataset """kinships""" +298 20 model """hole""" +298 20 loss """bceaftersigmoid""" +298 20 regularizer """no""" +298 20 optimizer """adadelta""" +298 20 training_loop """lcwa""" +298 20 evaluator """rankbased""" +298 21 dataset """kinships""" +298 21 model """hole""" +298 21 loss """bceaftersigmoid""" +298 21 regularizer """no""" +298 21 optimizer """adadelta""" +298 21 training_loop """lcwa""" +298 21 evaluator """rankbased""" +298 22 dataset """kinships""" +298 22 model """hole""" +298 22 loss """bceaftersigmoid""" +298 22 regularizer """no""" +298 22 optimizer """adadelta""" +298 22 training_loop """lcwa""" +298 22 evaluator """rankbased""" +298 23 dataset """kinships""" +298 23 model """hole""" +298 23 loss """bceaftersigmoid""" +298 23 regularizer """no""" +298 23 optimizer """adadelta""" +298 23 training_loop """lcwa""" +298 23 evaluator """rankbased""" +298 24 dataset """kinships""" +298 24 model """hole""" +298 24 loss """bceaftersigmoid""" +298 24 regularizer """no""" +298 24 optimizer """adadelta""" +298 24 training_loop """lcwa""" +298 24 evaluator """rankbased""" +298 25 dataset """kinships""" +298 25 model """hole""" +298 25 loss """bceaftersigmoid""" +298 25 regularizer """no""" +298 25 optimizer """adadelta""" +298 25 training_loop """lcwa""" +298 25 evaluator """rankbased""" +298 26 dataset """kinships""" +298 26 model """hole""" +298 26 loss """bceaftersigmoid""" +298 26 regularizer """no""" +298 26 optimizer """adadelta""" +298 26 training_loop """lcwa""" +298 26 evaluator """rankbased""" +298 27 dataset """kinships""" +298 27 model """hole""" +298 27 loss """bceaftersigmoid""" +298 27 regularizer """no""" +298 27 optimizer """adadelta""" +298 27 training_loop """lcwa""" +298 27 evaluator """rankbased""" +298 28 dataset """kinships""" +298 28 model """hole""" +298 28 loss """bceaftersigmoid""" +298 28 regularizer """no""" +298 28 optimizer """adadelta""" +298 28 training_loop """lcwa""" +298 28 evaluator """rankbased""" +298 29 dataset """kinships""" +298 29 model """hole""" +298 29 loss """bceaftersigmoid""" +298 29 regularizer """no""" +298 29 optimizer """adadelta""" +298 29 training_loop """lcwa""" +298 29 evaluator """rankbased""" +298 30 dataset """kinships""" +298 30 model """hole""" +298 30 loss """bceaftersigmoid""" +298 30 regularizer """no""" +298 30 optimizer """adadelta""" +298 30 training_loop """lcwa""" +298 30 evaluator """rankbased""" +298 31 dataset """kinships""" +298 31 model """hole""" +298 31 loss """bceaftersigmoid""" +298 31 regularizer """no""" +298 31 optimizer """adadelta""" +298 31 training_loop """lcwa""" +298 31 evaluator """rankbased""" +298 32 dataset """kinships""" +298 32 model """hole""" +298 32 loss """bceaftersigmoid""" +298 32 regularizer """no""" +298 32 optimizer """adadelta""" +298 32 training_loop """lcwa""" +298 32 evaluator """rankbased""" +298 33 dataset """kinships""" +298 33 model """hole""" +298 33 loss """bceaftersigmoid""" +298 33 regularizer """no""" +298 33 optimizer """adadelta""" +298 33 training_loop """lcwa""" +298 33 evaluator """rankbased""" +298 34 dataset """kinships""" +298 34 model """hole""" +298 34 loss """bceaftersigmoid""" +298 34 regularizer """no""" +298 34 optimizer """adadelta""" +298 34 training_loop """lcwa""" +298 34 evaluator """rankbased""" +298 35 dataset """kinships""" +298 35 model """hole""" +298 35 loss """bceaftersigmoid""" +298 35 regularizer """no""" +298 35 optimizer """adadelta""" +298 35 training_loop """lcwa""" +298 35 evaluator """rankbased""" +298 36 dataset """kinships""" +298 36 model """hole""" +298 36 loss """bceaftersigmoid""" +298 36 regularizer """no""" +298 36 optimizer """adadelta""" +298 36 training_loop """lcwa""" +298 36 evaluator """rankbased""" +298 37 dataset """kinships""" +298 37 model """hole""" +298 37 loss """bceaftersigmoid""" +298 37 regularizer """no""" +298 37 optimizer """adadelta""" +298 37 training_loop """lcwa""" +298 37 evaluator """rankbased""" +298 38 dataset """kinships""" +298 38 model """hole""" +298 38 loss """bceaftersigmoid""" +298 38 regularizer """no""" +298 38 optimizer """adadelta""" +298 38 training_loop """lcwa""" +298 38 evaluator """rankbased""" +298 39 dataset """kinships""" +298 39 model """hole""" +298 39 loss """bceaftersigmoid""" +298 39 regularizer """no""" +298 39 optimizer """adadelta""" +298 39 training_loop """lcwa""" +298 39 evaluator """rankbased""" +298 40 dataset """kinships""" +298 40 model """hole""" +298 40 loss """bceaftersigmoid""" +298 40 regularizer """no""" +298 40 optimizer """adadelta""" +298 40 training_loop """lcwa""" +298 40 evaluator """rankbased""" +298 41 dataset """kinships""" +298 41 model """hole""" +298 41 loss """bceaftersigmoid""" +298 41 regularizer """no""" +298 41 optimizer """adadelta""" +298 41 training_loop """lcwa""" +298 41 evaluator """rankbased""" +298 42 dataset """kinships""" +298 42 model """hole""" +298 42 loss """bceaftersigmoid""" +298 42 regularizer """no""" +298 42 optimizer """adadelta""" +298 42 training_loop """lcwa""" +298 42 evaluator """rankbased""" +298 43 dataset """kinships""" +298 43 model """hole""" +298 43 loss """bceaftersigmoid""" +298 43 regularizer """no""" +298 43 optimizer """adadelta""" +298 43 training_loop """lcwa""" +298 43 evaluator """rankbased""" +298 44 dataset """kinships""" +298 44 model """hole""" +298 44 loss """bceaftersigmoid""" +298 44 regularizer """no""" +298 44 optimizer """adadelta""" +298 44 training_loop """lcwa""" +298 44 evaluator """rankbased""" +298 45 dataset """kinships""" +298 45 model """hole""" +298 45 loss """bceaftersigmoid""" +298 45 regularizer """no""" +298 45 optimizer """adadelta""" +298 45 training_loop """lcwa""" +298 45 evaluator """rankbased""" +298 46 dataset """kinships""" +298 46 model """hole""" +298 46 loss """bceaftersigmoid""" +298 46 regularizer """no""" +298 46 optimizer """adadelta""" +298 46 training_loop """lcwa""" +298 46 evaluator """rankbased""" +298 47 dataset """kinships""" +298 47 model """hole""" +298 47 loss """bceaftersigmoid""" +298 47 regularizer """no""" +298 47 optimizer """adadelta""" +298 47 training_loop """lcwa""" +298 47 evaluator """rankbased""" +298 48 dataset """kinships""" +298 48 model """hole""" +298 48 loss """bceaftersigmoid""" +298 48 regularizer """no""" +298 48 optimizer """adadelta""" +298 48 training_loop """lcwa""" +298 48 evaluator """rankbased""" +298 49 dataset """kinships""" +298 49 model """hole""" +298 49 loss """bceaftersigmoid""" +298 49 regularizer """no""" +298 49 optimizer """adadelta""" +298 49 training_loop """lcwa""" +298 49 evaluator """rankbased""" +298 50 dataset """kinships""" +298 50 model """hole""" +298 50 loss """bceaftersigmoid""" +298 50 regularizer """no""" +298 50 optimizer """adadelta""" +298 50 training_loop """lcwa""" +298 50 evaluator """rankbased""" +298 51 dataset """kinships""" +298 51 model """hole""" +298 51 loss """bceaftersigmoid""" +298 51 regularizer """no""" +298 51 optimizer """adadelta""" +298 51 training_loop """lcwa""" +298 51 evaluator """rankbased""" +298 52 dataset """kinships""" +298 52 model """hole""" +298 52 loss """bceaftersigmoid""" +298 52 regularizer """no""" +298 52 optimizer """adadelta""" +298 52 training_loop """lcwa""" +298 52 evaluator """rankbased""" +298 53 dataset """kinships""" +298 53 model """hole""" +298 53 loss """bceaftersigmoid""" +298 53 regularizer """no""" +298 53 optimizer """adadelta""" +298 53 training_loop """lcwa""" +298 53 evaluator """rankbased""" +298 54 dataset """kinships""" +298 54 model """hole""" +298 54 loss """bceaftersigmoid""" +298 54 regularizer """no""" +298 54 optimizer """adadelta""" +298 54 training_loop """lcwa""" +298 54 evaluator """rankbased""" +298 55 dataset """kinships""" +298 55 model """hole""" +298 55 loss """bceaftersigmoid""" +298 55 regularizer """no""" +298 55 optimizer """adadelta""" +298 55 training_loop """lcwa""" +298 55 evaluator """rankbased""" +298 56 dataset """kinships""" +298 56 model """hole""" +298 56 loss """bceaftersigmoid""" +298 56 regularizer """no""" +298 56 optimizer """adadelta""" +298 56 training_loop """lcwa""" +298 56 evaluator """rankbased""" +298 57 dataset """kinships""" +298 57 model """hole""" +298 57 loss """bceaftersigmoid""" +298 57 regularizer """no""" +298 57 optimizer """adadelta""" +298 57 training_loop """lcwa""" +298 57 evaluator """rankbased""" +298 58 dataset """kinships""" +298 58 model """hole""" +298 58 loss """bceaftersigmoid""" +298 58 regularizer """no""" +298 58 optimizer """adadelta""" +298 58 training_loop """lcwa""" +298 58 evaluator """rankbased""" +298 59 dataset """kinships""" +298 59 model """hole""" +298 59 loss """bceaftersigmoid""" +298 59 regularizer """no""" +298 59 optimizer """adadelta""" +298 59 training_loop """lcwa""" +298 59 evaluator """rankbased""" +298 60 dataset """kinships""" +298 60 model """hole""" +298 60 loss """bceaftersigmoid""" +298 60 regularizer """no""" +298 60 optimizer """adadelta""" +298 60 training_loop """lcwa""" +298 60 evaluator """rankbased""" +298 61 dataset """kinships""" +298 61 model """hole""" +298 61 loss """bceaftersigmoid""" +298 61 regularizer """no""" +298 61 optimizer """adadelta""" +298 61 training_loop """lcwa""" +298 61 evaluator """rankbased""" +298 62 dataset """kinships""" +298 62 model """hole""" +298 62 loss """bceaftersigmoid""" +298 62 regularizer """no""" +298 62 optimizer """adadelta""" +298 62 training_loop """lcwa""" +298 62 evaluator """rankbased""" +298 63 dataset """kinships""" +298 63 model """hole""" +298 63 loss """bceaftersigmoid""" +298 63 regularizer """no""" +298 63 optimizer """adadelta""" +298 63 training_loop """lcwa""" +298 63 evaluator """rankbased""" +298 64 dataset """kinships""" +298 64 model """hole""" +298 64 loss """bceaftersigmoid""" +298 64 regularizer """no""" +298 64 optimizer """adadelta""" +298 64 training_loop """lcwa""" +298 64 evaluator """rankbased""" +298 65 dataset """kinships""" +298 65 model """hole""" +298 65 loss """bceaftersigmoid""" +298 65 regularizer """no""" +298 65 optimizer """adadelta""" +298 65 training_loop """lcwa""" +298 65 evaluator """rankbased""" +298 66 dataset """kinships""" +298 66 model """hole""" +298 66 loss """bceaftersigmoid""" +298 66 regularizer """no""" +298 66 optimizer """adadelta""" +298 66 training_loop """lcwa""" +298 66 evaluator """rankbased""" +298 67 dataset """kinships""" +298 67 model """hole""" +298 67 loss """bceaftersigmoid""" +298 67 regularizer """no""" +298 67 optimizer """adadelta""" +298 67 training_loop """lcwa""" +298 67 evaluator """rankbased""" +298 68 dataset """kinships""" +298 68 model """hole""" +298 68 loss """bceaftersigmoid""" +298 68 regularizer """no""" +298 68 optimizer """adadelta""" +298 68 training_loop """lcwa""" +298 68 evaluator """rankbased""" +298 69 dataset """kinships""" +298 69 model """hole""" +298 69 loss """bceaftersigmoid""" +298 69 regularizer """no""" +298 69 optimizer """adadelta""" +298 69 training_loop """lcwa""" +298 69 evaluator """rankbased""" +298 70 dataset """kinships""" +298 70 model """hole""" +298 70 loss """bceaftersigmoid""" +298 70 regularizer """no""" +298 70 optimizer """adadelta""" +298 70 training_loop """lcwa""" +298 70 evaluator """rankbased""" +298 71 dataset """kinships""" +298 71 model """hole""" +298 71 loss """bceaftersigmoid""" +298 71 regularizer """no""" +298 71 optimizer """adadelta""" +298 71 training_loop """lcwa""" +298 71 evaluator """rankbased""" +298 72 dataset """kinships""" +298 72 model """hole""" +298 72 loss """bceaftersigmoid""" +298 72 regularizer """no""" +298 72 optimizer """adadelta""" +298 72 training_loop """lcwa""" +298 72 evaluator """rankbased""" +298 73 dataset """kinships""" +298 73 model """hole""" +298 73 loss """bceaftersigmoid""" +298 73 regularizer """no""" +298 73 optimizer """adadelta""" +298 73 training_loop """lcwa""" +298 73 evaluator """rankbased""" +298 74 dataset """kinships""" +298 74 model """hole""" +298 74 loss """bceaftersigmoid""" +298 74 regularizer """no""" +298 74 optimizer """adadelta""" +298 74 training_loop """lcwa""" +298 74 evaluator """rankbased""" +298 75 dataset """kinships""" +298 75 model """hole""" +298 75 loss """bceaftersigmoid""" +298 75 regularizer """no""" +298 75 optimizer """adadelta""" +298 75 training_loop """lcwa""" +298 75 evaluator """rankbased""" +298 76 dataset """kinships""" +298 76 model """hole""" +298 76 loss """bceaftersigmoid""" +298 76 regularizer """no""" +298 76 optimizer """adadelta""" +298 76 training_loop """lcwa""" +298 76 evaluator """rankbased""" +298 77 dataset """kinships""" +298 77 model """hole""" +298 77 loss """bceaftersigmoid""" +298 77 regularizer """no""" +298 77 optimizer """adadelta""" +298 77 training_loop """lcwa""" +298 77 evaluator """rankbased""" +298 78 dataset """kinships""" +298 78 model """hole""" +298 78 loss """bceaftersigmoid""" +298 78 regularizer """no""" +298 78 optimizer """adadelta""" +298 78 training_loop """lcwa""" +298 78 evaluator """rankbased""" +298 79 dataset """kinships""" +298 79 model """hole""" +298 79 loss """bceaftersigmoid""" +298 79 regularizer """no""" +298 79 optimizer """adadelta""" +298 79 training_loop """lcwa""" +298 79 evaluator """rankbased""" +298 80 dataset """kinships""" +298 80 model """hole""" +298 80 loss """bceaftersigmoid""" +298 80 regularizer """no""" +298 80 optimizer """adadelta""" +298 80 training_loop """lcwa""" +298 80 evaluator """rankbased""" +298 81 dataset """kinships""" +298 81 model """hole""" +298 81 loss """bceaftersigmoid""" +298 81 regularizer """no""" +298 81 optimizer """adadelta""" +298 81 training_loop """lcwa""" +298 81 evaluator """rankbased""" +298 82 dataset """kinships""" +298 82 model """hole""" +298 82 loss """bceaftersigmoid""" +298 82 regularizer """no""" +298 82 optimizer """adadelta""" +298 82 training_loop """lcwa""" +298 82 evaluator """rankbased""" +298 83 dataset """kinships""" +298 83 model """hole""" +298 83 loss """bceaftersigmoid""" +298 83 regularizer """no""" +298 83 optimizer """adadelta""" +298 83 training_loop """lcwa""" +298 83 evaluator """rankbased""" +298 84 dataset """kinships""" +298 84 model """hole""" +298 84 loss """bceaftersigmoid""" +298 84 regularizer """no""" +298 84 optimizer """adadelta""" +298 84 training_loop """lcwa""" +298 84 evaluator """rankbased""" +298 85 dataset """kinships""" +298 85 model """hole""" +298 85 loss """bceaftersigmoid""" +298 85 regularizer """no""" +298 85 optimizer """adadelta""" +298 85 training_loop """lcwa""" +298 85 evaluator """rankbased""" +298 86 dataset """kinships""" +298 86 model """hole""" +298 86 loss """bceaftersigmoid""" +298 86 regularizer """no""" +298 86 optimizer """adadelta""" +298 86 training_loop """lcwa""" +298 86 evaluator """rankbased""" +298 87 dataset """kinships""" +298 87 model """hole""" +298 87 loss """bceaftersigmoid""" +298 87 regularizer """no""" +298 87 optimizer """adadelta""" +298 87 training_loop """lcwa""" +298 87 evaluator """rankbased""" +298 88 dataset """kinships""" +298 88 model """hole""" +298 88 loss """bceaftersigmoid""" +298 88 regularizer """no""" +298 88 optimizer """adadelta""" +298 88 training_loop """lcwa""" +298 88 evaluator """rankbased""" +298 89 dataset """kinships""" +298 89 model """hole""" +298 89 loss """bceaftersigmoid""" +298 89 regularizer """no""" +298 89 optimizer """adadelta""" +298 89 training_loop """lcwa""" +298 89 evaluator """rankbased""" +298 90 dataset """kinships""" +298 90 model """hole""" +298 90 loss """bceaftersigmoid""" +298 90 regularizer """no""" +298 90 optimizer """adadelta""" +298 90 training_loop """lcwa""" +298 90 evaluator """rankbased""" +298 91 dataset """kinships""" +298 91 model """hole""" +298 91 loss """bceaftersigmoid""" +298 91 regularizer """no""" +298 91 optimizer """adadelta""" +298 91 training_loop """lcwa""" +298 91 evaluator """rankbased""" +298 92 dataset """kinships""" +298 92 model """hole""" +298 92 loss """bceaftersigmoid""" +298 92 regularizer """no""" +298 92 optimizer """adadelta""" +298 92 training_loop """lcwa""" +298 92 evaluator """rankbased""" +298 93 dataset """kinships""" +298 93 model """hole""" +298 93 loss """bceaftersigmoid""" +298 93 regularizer """no""" +298 93 optimizer """adadelta""" +298 93 training_loop """lcwa""" +298 93 evaluator """rankbased""" +298 94 dataset """kinships""" +298 94 model """hole""" +298 94 loss """bceaftersigmoid""" +298 94 regularizer """no""" +298 94 optimizer """adadelta""" +298 94 training_loop """lcwa""" +298 94 evaluator """rankbased""" +298 95 dataset """kinships""" +298 95 model """hole""" +298 95 loss """bceaftersigmoid""" +298 95 regularizer """no""" +298 95 optimizer """adadelta""" +298 95 training_loop """lcwa""" +298 95 evaluator """rankbased""" +298 96 dataset """kinships""" +298 96 model """hole""" +298 96 loss """bceaftersigmoid""" +298 96 regularizer """no""" +298 96 optimizer """adadelta""" +298 96 training_loop """lcwa""" +298 96 evaluator """rankbased""" +298 97 dataset """kinships""" +298 97 model """hole""" +298 97 loss """bceaftersigmoid""" +298 97 regularizer """no""" +298 97 optimizer """adadelta""" +298 97 training_loop """lcwa""" +298 97 evaluator """rankbased""" +298 98 dataset """kinships""" +298 98 model """hole""" +298 98 loss """bceaftersigmoid""" +298 98 regularizer """no""" +298 98 optimizer """adadelta""" +298 98 training_loop """lcwa""" +298 98 evaluator """rankbased""" +298 99 dataset """kinships""" +298 99 model """hole""" +298 99 loss """bceaftersigmoid""" +298 99 regularizer """no""" +298 99 optimizer """adadelta""" +298 99 training_loop """lcwa""" +298 99 evaluator """rankbased""" +298 100 dataset """kinships""" +298 100 model """hole""" +298 100 loss """bceaftersigmoid""" +298 100 regularizer """no""" +298 100 optimizer """adadelta""" +298 100 training_loop """lcwa""" +298 100 evaluator """rankbased""" +299 1 model.embedding_dim 1.0 +299 1 training.batch_size 2.0 +299 1 training.label_smoothing 0.2552547947002594 +299 2 model.embedding_dim 1.0 +299 2 training.batch_size 1.0 +299 2 training.label_smoothing 0.028869664315413806 +299 3 model.embedding_dim 1.0 +299 3 training.batch_size 0.0 +299 3 training.label_smoothing 0.9897043095894875 +299 4 model.embedding_dim 2.0 +299 4 training.batch_size 2.0 +299 4 training.label_smoothing 0.03607605349359093 +299 5 model.embedding_dim 1.0 +299 5 training.batch_size 2.0 +299 5 training.label_smoothing 0.8437119570895645 +299 6 model.embedding_dim 1.0 +299 6 training.batch_size 0.0 +299 6 training.label_smoothing 0.0015629605265787998 +299 7 model.embedding_dim 0.0 +299 7 training.batch_size 2.0 +299 7 training.label_smoothing 0.09518720632322786 +299 8 model.embedding_dim 2.0 +299 8 training.batch_size 1.0 +299 8 training.label_smoothing 0.0016114262495208161 +299 9 model.embedding_dim 1.0 +299 9 training.batch_size 2.0 +299 9 training.label_smoothing 0.003948232752319128 +299 10 model.embedding_dim 1.0 +299 10 training.batch_size 0.0 +299 10 training.label_smoothing 0.00749268992404187 +299 11 model.embedding_dim 2.0 +299 11 training.batch_size 2.0 +299 11 training.label_smoothing 0.42385094547813784 +299 12 model.embedding_dim 0.0 +299 12 training.batch_size 1.0 +299 12 training.label_smoothing 0.5228649773608739 +299 13 model.embedding_dim 0.0 +299 13 training.batch_size 0.0 +299 13 training.label_smoothing 0.2926336033440765 +299 14 model.embedding_dim 0.0 +299 14 training.batch_size 0.0 +299 14 training.label_smoothing 0.3982732319203307 +299 15 model.embedding_dim 1.0 +299 15 training.batch_size 1.0 +299 15 training.label_smoothing 0.002253837683563765 +299 16 model.embedding_dim 2.0 +299 16 training.batch_size 2.0 +299 16 training.label_smoothing 0.020503214564323653 +299 17 model.embedding_dim 0.0 +299 17 training.batch_size 2.0 +299 17 training.label_smoothing 0.07822926318823978 +299 18 model.embedding_dim 2.0 +299 18 training.batch_size 2.0 +299 18 training.label_smoothing 0.05686312052451086 +299 19 model.embedding_dim 1.0 +299 19 training.batch_size 2.0 +299 19 training.label_smoothing 0.34078137331404573 +299 20 model.embedding_dim 2.0 +299 20 training.batch_size 0.0 +299 20 training.label_smoothing 0.035758984015337676 +299 21 model.embedding_dim 1.0 +299 21 training.batch_size 0.0 +299 21 training.label_smoothing 0.004310757779442155 +299 22 model.embedding_dim 1.0 +299 22 training.batch_size 0.0 +299 22 training.label_smoothing 0.0070284608192041995 +299 23 model.embedding_dim 1.0 +299 23 training.batch_size 1.0 +299 23 training.label_smoothing 0.016505470532832286 +299 24 model.embedding_dim 2.0 +299 24 training.batch_size 2.0 +299 24 training.label_smoothing 0.39288453173200605 +299 25 model.embedding_dim 1.0 +299 25 training.batch_size 0.0 +299 25 training.label_smoothing 0.05022050984992218 +299 26 model.embedding_dim 2.0 +299 26 training.batch_size 2.0 +299 26 training.label_smoothing 0.6345148423954354 +299 27 model.embedding_dim 2.0 +299 27 training.batch_size 1.0 +299 27 training.label_smoothing 0.521467884442473 +299 28 model.embedding_dim 2.0 +299 28 training.batch_size 2.0 +299 28 training.label_smoothing 0.3705858211694857 +299 29 model.embedding_dim 1.0 +299 29 training.batch_size 2.0 +299 29 training.label_smoothing 0.19873531869754849 +299 30 model.embedding_dim 1.0 +299 30 training.batch_size 2.0 +299 30 training.label_smoothing 0.06252672349883936 +299 31 model.embedding_dim 0.0 +299 31 training.batch_size 0.0 +299 31 training.label_smoothing 0.030895694867395032 +299 32 model.embedding_dim 1.0 +299 32 training.batch_size 1.0 +299 32 training.label_smoothing 0.04774608267506386 +299 33 model.embedding_dim 2.0 +299 33 training.batch_size 1.0 +299 33 training.label_smoothing 0.05245400313463386 +299 34 model.embedding_dim 2.0 +299 34 training.batch_size 2.0 +299 34 training.label_smoothing 0.9994763718781318 +299 35 model.embedding_dim 2.0 +299 35 training.batch_size 1.0 +299 35 training.label_smoothing 0.23045510514235484 +299 36 model.embedding_dim 0.0 +299 36 training.batch_size 1.0 +299 36 training.label_smoothing 0.0024234610244756277 +299 37 model.embedding_dim 1.0 +299 37 training.batch_size 2.0 +299 37 training.label_smoothing 0.32006146924093326 +299 38 model.embedding_dim 2.0 +299 38 training.batch_size 2.0 +299 38 training.label_smoothing 0.0244122023620625 +299 39 model.embedding_dim 1.0 +299 39 training.batch_size 2.0 +299 39 training.label_smoothing 0.004211090663345413 +299 40 model.embedding_dim 1.0 +299 40 training.batch_size 1.0 +299 40 training.label_smoothing 0.046287716464077144 +299 41 model.embedding_dim 1.0 +299 41 training.batch_size 0.0 +299 41 training.label_smoothing 0.016637355364103695 +299 42 model.embedding_dim 0.0 +299 42 training.batch_size 2.0 +299 42 training.label_smoothing 0.012816263245707945 +299 43 model.embedding_dim 2.0 +299 43 training.batch_size 1.0 +299 43 training.label_smoothing 0.15409163010812182 +299 44 model.embedding_dim 0.0 +299 44 training.batch_size 2.0 +299 44 training.label_smoothing 0.0030272083255108388 +299 45 model.embedding_dim 1.0 +299 45 training.batch_size 0.0 +299 45 training.label_smoothing 0.001498612514941701 +299 46 model.embedding_dim 1.0 +299 46 training.batch_size 1.0 +299 46 training.label_smoothing 0.006271439155172572 +299 47 model.embedding_dim 2.0 +299 47 training.batch_size 1.0 +299 47 training.label_smoothing 0.7168057613606982 +299 48 model.embedding_dim 0.0 +299 48 training.batch_size 1.0 +299 48 training.label_smoothing 0.15829374070194074 +299 49 model.embedding_dim 1.0 +299 49 training.batch_size 1.0 +299 49 training.label_smoothing 0.6388333666533186 +299 50 model.embedding_dim 0.0 +299 50 training.batch_size 0.0 +299 50 training.label_smoothing 0.02123043094206607 +299 51 model.embedding_dim 1.0 +299 51 training.batch_size 2.0 +299 51 training.label_smoothing 0.0010075430768053985 +299 52 model.embedding_dim 2.0 +299 52 training.batch_size 2.0 +299 52 training.label_smoothing 0.022155757721036128 +299 53 model.embedding_dim 0.0 +299 53 training.batch_size 0.0 +299 53 training.label_smoothing 0.22544778343099883 +299 54 model.embedding_dim 0.0 +299 54 training.batch_size 0.0 +299 54 training.label_smoothing 0.004844620810491657 +299 55 model.embedding_dim 0.0 +299 55 training.batch_size 0.0 +299 55 training.label_smoothing 0.00926457671650639 +299 56 model.embedding_dim 0.0 +299 56 training.batch_size 1.0 +299 56 training.label_smoothing 0.29782406762143904 +299 57 model.embedding_dim 2.0 +299 57 training.batch_size 2.0 +299 57 training.label_smoothing 0.24643787073062745 +299 58 model.embedding_dim 2.0 +299 58 training.batch_size 1.0 +299 58 training.label_smoothing 0.017831534617723892 +299 59 model.embedding_dim 1.0 +299 59 training.batch_size 2.0 +299 59 training.label_smoothing 0.18846437806223512 +299 60 model.embedding_dim 0.0 +299 60 training.batch_size 2.0 +299 60 training.label_smoothing 0.008314862635744404 +299 61 model.embedding_dim 1.0 +299 61 training.batch_size 1.0 +299 61 training.label_smoothing 0.0011077419794396728 +299 62 model.embedding_dim 2.0 +299 62 training.batch_size 1.0 +299 62 training.label_smoothing 0.1727488327505172 +299 63 model.embedding_dim 2.0 +299 63 training.batch_size 1.0 +299 63 training.label_smoothing 0.0012775149000764986 +299 64 model.embedding_dim 2.0 +299 64 training.batch_size 1.0 +299 64 training.label_smoothing 0.0012044133714435798 +299 65 model.embedding_dim 2.0 +299 65 training.batch_size 2.0 +299 65 training.label_smoothing 0.18416511111309386 +299 66 model.embedding_dim 1.0 +299 66 training.batch_size 2.0 +299 66 training.label_smoothing 0.00460241932171758 +299 67 model.embedding_dim 2.0 +299 67 training.batch_size 2.0 +299 67 training.label_smoothing 0.011905594142426064 +299 68 model.embedding_dim 0.0 +299 68 training.batch_size 2.0 +299 68 training.label_smoothing 0.1034855317540834 +299 69 model.embedding_dim 1.0 +299 69 training.batch_size 0.0 +299 69 training.label_smoothing 0.004074635123079072 +299 70 model.embedding_dim 1.0 +299 70 training.batch_size 1.0 +299 70 training.label_smoothing 0.014850453428043994 +299 71 model.embedding_dim 1.0 +299 71 training.batch_size 0.0 +299 71 training.label_smoothing 0.1752683482457217 +299 72 model.embedding_dim 0.0 +299 72 training.batch_size 1.0 +299 72 training.label_smoothing 0.5967833452058032 +299 73 model.embedding_dim 2.0 +299 73 training.batch_size 1.0 +299 73 training.label_smoothing 0.013887918352124329 +299 74 model.embedding_dim 2.0 +299 74 training.batch_size 2.0 +299 74 training.label_smoothing 0.5468702763777709 +299 75 model.embedding_dim 1.0 +299 75 training.batch_size 2.0 +299 75 training.label_smoothing 0.0021960053752256505 +299 76 model.embedding_dim 1.0 +299 76 training.batch_size 0.0 +299 76 training.label_smoothing 0.006243116290464714 +299 77 model.embedding_dim 2.0 +299 77 training.batch_size 2.0 +299 77 training.label_smoothing 0.22439736716107778 +299 78 model.embedding_dim 1.0 +299 78 training.batch_size 1.0 +299 78 training.label_smoothing 0.17228901868996715 +299 79 model.embedding_dim 2.0 +299 79 training.batch_size 1.0 +299 79 training.label_smoothing 0.33582402820729046 +299 80 model.embedding_dim 1.0 +299 80 training.batch_size 2.0 +299 80 training.label_smoothing 0.006083838502175053 +299 81 model.embedding_dim 2.0 +299 81 training.batch_size 2.0 +299 81 training.label_smoothing 0.030077395122173833 +299 82 model.embedding_dim 0.0 +299 82 training.batch_size 1.0 +299 82 training.label_smoothing 0.0010790570111666711 +299 83 model.embedding_dim 1.0 +299 83 training.batch_size 1.0 +299 83 training.label_smoothing 0.021576143249289106 +299 84 model.embedding_dim 0.0 +299 84 training.batch_size 2.0 +299 84 training.label_smoothing 0.028079810074196913 +299 85 model.embedding_dim 1.0 +299 85 training.batch_size 0.0 +299 85 training.label_smoothing 0.008523200188963995 +299 86 model.embedding_dim 0.0 +299 86 training.batch_size 1.0 +299 86 training.label_smoothing 0.015662547537959176 +299 87 model.embedding_dim 1.0 +299 87 training.batch_size 0.0 +299 87 training.label_smoothing 0.01466939153772291 +299 88 model.embedding_dim 1.0 +299 88 training.batch_size 0.0 +299 88 training.label_smoothing 0.02348794957156433 +299 89 model.embedding_dim 1.0 +299 89 training.batch_size 2.0 +299 89 training.label_smoothing 0.2377416390967718 +299 90 model.embedding_dim 0.0 +299 90 training.batch_size 0.0 +299 90 training.label_smoothing 0.0043863342776043895 +299 91 model.embedding_dim 2.0 +299 91 training.batch_size 0.0 +299 91 training.label_smoothing 0.0028362076475517456 +299 92 model.embedding_dim 1.0 +299 92 training.batch_size 0.0 +299 92 training.label_smoothing 0.3187473190439029 +299 93 model.embedding_dim 1.0 +299 93 training.batch_size 2.0 +299 93 training.label_smoothing 0.10886822922522028 +299 94 model.embedding_dim 2.0 +299 94 training.batch_size 0.0 +299 94 training.label_smoothing 0.0040944054253667286 +299 95 model.embedding_dim 1.0 +299 95 training.batch_size 0.0 +299 95 training.label_smoothing 0.0010009734191641895 +299 96 model.embedding_dim 1.0 +299 96 training.batch_size 2.0 +299 96 training.label_smoothing 0.006045033850366011 +299 97 model.embedding_dim 2.0 +299 97 training.batch_size 0.0 +299 97 training.label_smoothing 0.025914661677824295 +299 98 model.embedding_dim 2.0 +299 98 training.batch_size 1.0 +299 98 training.label_smoothing 0.025553419896904298 +299 99 model.embedding_dim 0.0 +299 99 training.batch_size 0.0 +299 99 training.label_smoothing 0.0025668776482996787 +299 100 model.embedding_dim 2.0 +299 100 training.batch_size 0.0 +299 100 training.label_smoothing 0.10284468400337869 +299 1 dataset """kinships""" +299 1 model """hole""" +299 1 loss """softplus""" +299 1 regularizer """no""" +299 1 optimizer """adadelta""" +299 1 training_loop """lcwa""" +299 1 evaluator """rankbased""" +299 2 dataset """kinships""" +299 2 model """hole""" +299 2 loss """softplus""" +299 2 regularizer """no""" +299 2 optimizer """adadelta""" +299 2 training_loop """lcwa""" +299 2 evaluator """rankbased""" +299 3 dataset """kinships""" +299 3 model """hole""" +299 3 loss """softplus""" +299 3 regularizer """no""" +299 3 optimizer """adadelta""" +299 3 training_loop """lcwa""" +299 3 evaluator """rankbased""" +299 4 dataset """kinships""" +299 4 model """hole""" +299 4 loss """softplus""" +299 4 regularizer """no""" +299 4 optimizer """adadelta""" +299 4 training_loop """lcwa""" +299 4 evaluator """rankbased""" +299 5 dataset """kinships""" +299 5 model """hole""" +299 5 loss """softplus""" +299 5 regularizer """no""" +299 5 optimizer """adadelta""" +299 5 training_loop """lcwa""" +299 5 evaluator """rankbased""" +299 6 dataset """kinships""" +299 6 model """hole""" +299 6 loss """softplus""" +299 6 regularizer """no""" +299 6 optimizer """adadelta""" +299 6 training_loop """lcwa""" +299 6 evaluator """rankbased""" +299 7 dataset """kinships""" +299 7 model """hole""" +299 7 loss """softplus""" +299 7 regularizer """no""" +299 7 optimizer """adadelta""" +299 7 training_loop """lcwa""" +299 7 evaluator """rankbased""" +299 8 dataset """kinships""" +299 8 model """hole""" +299 8 loss """softplus""" +299 8 regularizer """no""" +299 8 optimizer """adadelta""" +299 8 training_loop """lcwa""" +299 8 evaluator """rankbased""" +299 9 dataset """kinships""" +299 9 model """hole""" +299 9 loss """softplus""" +299 9 regularizer """no""" +299 9 optimizer """adadelta""" +299 9 training_loop """lcwa""" +299 9 evaluator """rankbased""" +299 10 dataset """kinships""" +299 10 model """hole""" +299 10 loss """softplus""" +299 10 regularizer """no""" +299 10 optimizer """adadelta""" +299 10 training_loop """lcwa""" +299 10 evaluator """rankbased""" +299 11 dataset """kinships""" +299 11 model """hole""" +299 11 loss """softplus""" +299 11 regularizer """no""" +299 11 optimizer """adadelta""" +299 11 training_loop """lcwa""" +299 11 evaluator """rankbased""" +299 12 dataset """kinships""" +299 12 model """hole""" +299 12 loss """softplus""" +299 12 regularizer """no""" +299 12 optimizer """adadelta""" +299 12 training_loop """lcwa""" +299 12 evaluator """rankbased""" +299 13 dataset """kinships""" +299 13 model """hole""" +299 13 loss """softplus""" +299 13 regularizer """no""" +299 13 optimizer """adadelta""" +299 13 training_loop """lcwa""" +299 13 evaluator """rankbased""" +299 14 dataset """kinships""" +299 14 model """hole""" +299 14 loss """softplus""" +299 14 regularizer """no""" +299 14 optimizer """adadelta""" +299 14 training_loop """lcwa""" +299 14 evaluator """rankbased""" +299 15 dataset """kinships""" +299 15 model """hole""" +299 15 loss """softplus""" +299 15 regularizer """no""" +299 15 optimizer """adadelta""" +299 15 training_loop """lcwa""" +299 15 evaluator """rankbased""" +299 16 dataset """kinships""" +299 16 model """hole""" +299 16 loss """softplus""" +299 16 regularizer """no""" +299 16 optimizer """adadelta""" +299 16 training_loop """lcwa""" +299 16 evaluator """rankbased""" +299 17 dataset """kinships""" +299 17 model """hole""" +299 17 loss """softplus""" +299 17 regularizer """no""" +299 17 optimizer """adadelta""" +299 17 training_loop """lcwa""" +299 17 evaluator """rankbased""" +299 18 dataset """kinships""" +299 18 model """hole""" +299 18 loss """softplus""" +299 18 regularizer """no""" +299 18 optimizer """adadelta""" +299 18 training_loop """lcwa""" +299 18 evaluator """rankbased""" +299 19 dataset """kinships""" +299 19 model """hole""" +299 19 loss """softplus""" +299 19 regularizer """no""" +299 19 optimizer """adadelta""" +299 19 training_loop """lcwa""" +299 19 evaluator """rankbased""" +299 20 dataset """kinships""" +299 20 model """hole""" +299 20 loss """softplus""" +299 20 regularizer """no""" +299 20 optimizer """adadelta""" +299 20 training_loop """lcwa""" +299 20 evaluator """rankbased""" +299 21 dataset """kinships""" +299 21 model """hole""" +299 21 loss """softplus""" +299 21 regularizer """no""" +299 21 optimizer """adadelta""" +299 21 training_loop """lcwa""" +299 21 evaluator """rankbased""" +299 22 dataset """kinships""" +299 22 model """hole""" +299 22 loss """softplus""" +299 22 regularizer """no""" +299 22 optimizer """adadelta""" +299 22 training_loop """lcwa""" +299 22 evaluator """rankbased""" +299 23 dataset """kinships""" +299 23 model """hole""" +299 23 loss """softplus""" +299 23 regularizer """no""" +299 23 optimizer """adadelta""" +299 23 training_loop """lcwa""" +299 23 evaluator """rankbased""" +299 24 dataset """kinships""" +299 24 model """hole""" +299 24 loss """softplus""" +299 24 regularizer """no""" +299 24 optimizer """adadelta""" +299 24 training_loop """lcwa""" +299 24 evaluator """rankbased""" +299 25 dataset """kinships""" +299 25 model """hole""" +299 25 loss """softplus""" +299 25 regularizer """no""" +299 25 optimizer """adadelta""" +299 25 training_loop """lcwa""" +299 25 evaluator """rankbased""" +299 26 dataset """kinships""" +299 26 model """hole""" +299 26 loss """softplus""" +299 26 regularizer """no""" +299 26 optimizer """adadelta""" +299 26 training_loop """lcwa""" +299 26 evaluator """rankbased""" +299 27 dataset """kinships""" +299 27 model """hole""" +299 27 loss """softplus""" +299 27 regularizer """no""" +299 27 optimizer """adadelta""" +299 27 training_loop """lcwa""" +299 27 evaluator """rankbased""" +299 28 dataset """kinships""" +299 28 model """hole""" +299 28 loss """softplus""" +299 28 regularizer """no""" +299 28 optimizer """adadelta""" +299 28 training_loop """lcwa""" +299 28 evaluator """rankbased""" +299 29 dataset """kinships""" +299 29 model """hole""" +299 29 loss """softplus""" +299 29 regularizer """no""" +299 29 optimizer """adadelta""" +299 29 training_loop """lcwa""" +299 29 evaluator """rankbased""" +299 30 dataset """kinships""" +299 30 model """hole""" +299 30 loss """softplus""" +299 30 regularizer """no""" +299 30 optimizer """adadelta""" +299 30 training_loop """lcwa""" +299 30 evaluator """rankbased""" +299 31 dataset """kinships""" +299 31 model """hole""" +299 31 loss """softplus""" +299 31 regularizer """no""" +299 31 optimizer """adadelta""" +299 31 training_loop """lcwa""" +299 31 evaluator """rankbased""" +299 32 dataset """kinships""" +299 32 model """hole""" +299 32 loss """softplus""" +299 32 regularizer """no""" +299 32 optimizer """adadelta""" +299 32 training_loop """lcwa""" +299 32 evaluator """rankbased""" +299 33 dataset """kinships""" +299 33 model """hole""" +299 33 loss """softplus""" +299 33 regularizer """no""" +299 33 optimizer """adadelta""" +299 33 training_loop """lcwa""" +299 33 evaluator """rankbased""" +299 34 dataset """kinships""" +299 34 model """hole""" +299 34 loss """softplus""" +299 34 regularizer """no""" +299 34 optimizer """adadelta""" +299 34 training_loop """lcwa""" +299 34 evaluator """rankbased""" +299 35 dataset """kinships""" +299 35 model """hole""" +299 35 loss """softplus""" +299 35 regularizer """no""" +299 35 optimizer """adadelta""" +299 35 training_loop """lcwa""" +299 35 evaluator """rankbased""" +299 36 dataset """kinships""" +299 36 model """hole""" +299 36 loss """softplus""" +299 36 regularizer """no""" +299 36 optimizer """adadelta""" +299 36 training_loop """lcwa""" +299 36 evaluator """rankbased""" +299 37 dataset """kinships""" +299 37 model """hole""" +299 37 loss """softplus""" +299 37 regularizer """no""" +299 37 optimizer """adadelta""" +299 37 training_loop """lcwa""" +299 37 evaluator """rankbased""" +299 38 dataset """kinships""" +299 38 model """hole""" +299 38 loss """softplus""" +299 38 regularizer """no""" +299 38 optimizer """adadelta""" +299 38 training_loop """lcwa""" +299 38 evaluator """rankbased""" +299 39 dataset """kinships""" +299 39 model """hole""" +299 39 loss """softplus""" +299 39 regularizer """no""" +299 39 optimizer """adadelta""" +299 39 training_loop """lcwa""" +299 39 evaluator """rankbased""" +299 40 dataset """kinships""" +299 40 model """hole""" +299 40 loss """softplus""" +299 40 regularizer """no""" +299 40 optimizer """adadelta""" +299 40 training_loop """lcwa""" +299 40 evaluator """rankbased""" +299 41 dataset """kinships""" +299 41 model """hole""" +299 41 loss """softplus""" +299 41 regularizer """no""" +299 41 optimizer """adadelta""" +299 41 training_loop """lcwa""" +299 41 evaluator """rankbased""" +299 42 dataset """kinships""" +299 42 model """hole""" +299 42 loss """softplus""" +299 42 regularizer """no""" +299 42 optimizer """adadelta""" +299 42 training_loop """lcwa""" +299 42 evaluator """rankbased""" +299 43 dataset """kinships""" +299 43 model """hole""" +299 43 loss """softplus""" +299 43 regularizer """no""" +299 43 optimizer """adadelta""" +299 43 training_loop """lcwa""" +299 43 evaluator """rankbased""" +299 44 dataset """kinships""" +299 44 model """hole""" +299 44 loss """softplus""" +299 44 regularizer """no""" +299 44 optimizer """adadelta""" +299 44 training_loop """lcwa""" +299 44 evaluator """rankbased""" +299 45 dataset """kinships""" +299 45 model """hole""" +299 45 loss """softplus""" +299 45 regularizer """no""" +299 45 optimizer """adadelta""" +299 45 training_loop """lcwa""" +299 45 evaluator """rankbased""" +299 46 dataset """kinships""" +299 46 model """hole""" +299 46 loss """softplus""" +299 46 regularizer """no""" +299 46 optimizer """adadelta""" +299 46 training_loop """lcwa""" +299 46 evaluator """rankbased""" +299 47 dataset """kinships""" +299 47 model """hole""" +299 47 loss """softplus""" +299 47 regularizer """no""" +299 47 optimizer """adadelta""" +299 47 training_loop """lcwa""" +299 47 evaluator """rankbased""" +299 48 dataset """kinships""" +299 48 model """hole""" +299 48 loss """softplus""" +299 48 regularizer """no""" +299 48 optimizer """adadelta""" +299 48 training_loop """lcwa""" +299 48 evaluator """rankbased""" +299 49 dataset """kinships""" +299 49 model """hole""" +299 49 loss """softplus""" +299 49 regularizer """no""" +299 49 optimizer """adadelta""" +299 49 training_loop """lcwa""" +299 49 evaluator """rankbased""" +299 50 dataset """kinships""" +299 50 model """hole""" +299 50 loss """softplus""" +299 50 regularizer """no""" +299 50 optimizer """adadelta""" +299 50 training_loop """lcwa""" +299 50 evaluator """rankbased""" +299 51 dataset """kinships""" +299 51 model """hole""" +299 51 loss """softplus""" +299 51 regularizer """no""" +299 51 optimizer """adadelta""" +299 51 training_loop """lcwa""" +299 51 evaluator """rankbased""" +299 52 dataset """kinships""" +299 52 model """hole""" +299 52 loss """softplus""" +299 52 regularizer """no""" +299 52 optimizer """adadelta""" +299 52 training_loop """lcwa""" +299 52 evaluator """rankbased""" +299 53 dataset """kinships""" +299 53 model """hole""" +299 53 loss """softplus""" +299 53 regularizer """no""" +299 53 optimizer """adadelta""" +299 53 training_loop """lcwa""" +299 53 evaluator """rankbased""" +299 54 dataset """kinships""" +299 54 model """hole""" +299 54 loss """softplus""" +299 54 regularizer """no""" +299 54 optimizer """adadelta""" +299 54 training_loop """lcwa""" +299 54 evaluator """rankbased""" +299 55 dataset """kinships""" +299 55 model """hole""" +299 55 loss """softplus""" +299 55 regularizer """no""" +299 55 optimizer """adadelta""" +299 55 training_loop """lcwa""" +299 55 evaluator """rankbased""" +299 56 dataset """kinships""" +299 56 model """hole""" +299 56 loss """softplus""" +299 56 regularizer """no""" +299 56 optimizer """adadelta""" +299 56 training_loop """lcwa""" +299 56 evaluator """rankbased""" +299 57 dataset """kinships""" +299 57 model """hole""" +299 57 loss """softplus""" +299 57 regularizer """no""" +299 57 optimizer """adadelta""" +299 57 training_loop """lcwa""" +299 57 evaluator """rankbased""" +299 58 dataset """kinships""" +299 58 model """hole""" +299 58 loss """softplus""" +299 58 regularizer """no""" +299 58 optimizer """adadelta""" +299 58 training_loop """lcwa""" +299 58 evaluator """rankbased""" +299 59 dataset """kinships""" +299 59 model """hole""" +299 59 loss """softplus""" +299 59 regularizer """no""" +299 59 optimizer """adadelta""" +299 59 training_loop """lcwa""" +299 59 evaluator """rankbased""" +299 60 dataset """kinships""" +299 60 model """hole""" +299 60 loss """softplus""" +299 60 regularizer """no""" +299 60 optimizer """adadelta""" +299 60 training_loop """lcwa""" +299 60 evaluator """rankbased""" +299 61 dataset """kinships""" +299 61 model """hole""" +299 61 loss """softplus""" +299 61 regularizer """no""" +299 61 optimizer """adadelta""" +299 61 training_loop """lcwa""" +299 61 evaluator """rankbased""" +299 62 dataset """kinships""" +299 62 model """hole""" +299 62 loss """softplus""" +299 62 regularizer """no""" +299 62 optimizer """adadelta""" +299 62 training_loop """lcwa""" +299 62 evaluator """rankbased""" +299 63 dataset """kinships""" +299 63 model """hole""" +299 63 loss """softplus""" +299 63 regularizer """no""" +299 63 optimizer """adadelta""" +299 63 training_loop """lcwa""" +299 63 evaluator """rankbased""" +299 64 dataset """kinships""" +299 64 model """hole""" +299 64 loss """softplus""" +299 64 regularizer """no""" +299 64 optimizer """adadelta""" +299 64 training_loop """lcwa""" +299 64 evaluator """rankbased""" +299 65 dataset """kinships""" +299 65 model """hole""" +299 65 loss """softplus""" +299 65 regularizer """no""" +299 65 optimizer """adadelta""" +299 65 training_loop """lcwa""" +299 65 evaluator """rankbased""" +299 66 dataset """kinships""" +299 66 model """hole""" +299 66 loss """softplus""" +299 66 regularizer """no""" +299 66 optimizer """adadelta""" +299 66 training_loop """lcwa""" +299 66 evaluator """rankbased""" +299 67 dataset """kinships""" +299 67 model """hole""" +299 67 loss """softplus""" +299 67 regularizer """no""" +299 67 optimizer """adadelta""" +299 67 training_loop """lcwa""" +299 67 evaluator """rankbased""" +299 68 dataset """kinships""" +299 68 model """hole""" +299 68 loss """softplus""" +299 68 regularizer """no""" +299 68 optimizer """adadelta""" +299 68 training_loop """lcwa""" +299 68 evaluator """rankbased""" +299 69 dataset """kinships""" +299 69 model """hole""" +299 69 loss """softplus""" +299 69 regularizer """no""" +299 69 optimizer """adadelta""" +299 69 training_loop """lcwa""" +299 69 evaluator """rankbased""" +299 70 dataset """kinships""" +299 70 model """hole""" +299 70 loss """softplus""" +299 70 regularizer """no""" +299 70 optimizer """adadelta""" +299 70 training_loop """lcwa""" +299 70 evaluator """rankbased""" +299 71 dataset """kinships""" +299 71 model """hole""" +299 71 loss """softplus""" +299 71 regularizer """no""" +299 71 optimizer """adadelta""" +299 71 training_loop """lcwa""" +299 71 evaluator """rankbased""" +299 72 dataset """kinships""" +299 72 model """hole""" +299 72 loss """softplus""" +299 72 regularizer """no""" +299 72 optimizer """adadelta""" +299 72 training_loop """lcwa""" +299 72 evaluator """rankbased""" +299 73 dataset """kinships""" +299 73 model """hole""" +299 73 loss """softplus""" +299 73 regularizer """no""" +299 73 optimizer """adadelta""" +299 73 training_loop """lcwa""" +299 73 evaluator """rankbased""" +299 74 dataset """kinships""" +299 74 model """hole""" +299 74 loss """softplus""" +299 74 regularizer """no""" +299 74 optimizer """adadelta""" +299 74 training_loop """lcwa""" +299 74 evaluator """rankbased""" +299 75 dataset """kinships""" +299 75 model """hole""" +299 75 loss """softplus""" +299 75 regularizer """no""" +299 75 optimizer """adadelta""" +299 75 training_loop """lcwa""" +299 75 evaluator """rankbased""" +299 76 dataset """kinships""" +299 76 model """hole""" +299 76 loss """softplus""" +299 76 regularizer """no""" +299 76 optimizer """adadelta""" +299 76 training_loop """lcwa""" +299 76 evaluator """rankbased""" +299 77 dataset """kinships""" +299 77 model """hole""" +299 77 loss """softplus""" +299 77 regularizer """no""" +299 77 optimizer """adadelta""" +299 77 training_loop """lcwa""" +299 77 evaluator """rankbased""" +299 78 dataset """kinships""" +299 78 model """hole""" +299 78 loss """softplus""" +299 78 regularizer """no""" +299 78 optimizer """adadelta""" +299 78 training_loop """lcwa""" +299 78 evaluator """rankbased""" +299 79 dataset """kinships""" +299 79 model """hole""" +299 79 loss """softplus""" +299 79 regularizer """no""" +299 79 optimizer """adadelta""" +299 79 training_loop """lcwa""" +299 79 evaluator """rankbased""" +299 80 dataset """kinships""" +299 80 model """hole""" +299 80 loss """softplus""" +299 80 regularizer """no""" +299 80 optimizer """adadelta""" +299 80 training_loop """lcwa""" +299 80 evaluator """rankbased""" +299 81 dataset """kinships""" +299 81 model """hole""" +299 81 loss """softplus""" +299 81 regularizer """no""" +299 81 optimizer """adadelta""" +299 81 training_loop """lcwa""" +299 81 evaluator """rankbased""" +299 82 dataset """kinships""" +299 82 model """hole""" +299 82 loss """softplus""" +299 82 regularizer """no""" +299 82 optimizer """adadelta""" +299 82 training_loop """lcwa""" +299 82 evaluator """rankbased""" +299 83 dataset """kinships""" +299 83 model """hole""" +299 83 loss """softplus""" +299 83 regularizer """no""" +299 83 optimizer """adadelta""" +299 83 training_loop """lcwa""" +299 83 evaluator """rankbased""" +299 84 dataset """kinships""" +299 84 model """hole""" +299 84 loss """softplus""" +299 84 regularizer """no""" +299 84 optimizer """adadelta""" +299 84 training_loop """lcwa""" +299 84 evaluator """rankbased""" +299 85 dataset """kinships""" +299 85 model """hole""" +299 85 loss """softplus""" +299 85 regularizer """no""" +299 85 optimizer """adadelta""" +299 85 training_loop """lcwa""" +299 85 evaluator """rankbased""" +299 86 dataset """kinships""" +299 86 model """hole""" +299 86 loss """softplus""" +299 86 regularizer """no""" +299 86 optimizer """adadelta""" +299 86 training_loop """lcwa""" +299 86 evaluator """rankbased""" +299 87 dataset """kinships""" +299 87 model """hole""" +299 87 loss """softplus""" +299 87 regularizer """no""" +299 87 optimizer """adadelta""" +299 87 training_loop """lcwa""" +299 87 evaluator """rankbased""" +299 88 dataset """kinships""" +299 88 model """hole""" +299 88 loss """softplus""" +299 88 regularizer """no""" +299 88 optimizer """adadelta""" +299 88 training_loop """lcwa""" +299 88 evaluator """rankbased""" +299 89 dataset """kinships""" +299 89 model """hole""" +299 89 loss """softplus""" +299 89 regularizer """no""" +299 89 optimizer """adadelta""" +299 89 training_loop """lcwa""" +299 89 evaluator """rankbased""" +299 90 dataset """kinships""" +299 90 model """hole""" +299 90 loss """softplus""" +299 90 regularizer """no""" +299 90 optimizer """adadelta""" +299 90 training_loop """lcwa""" +299 90 evaluator """rankbased""" +299 91 dataset """kinships""" +299 91 model """hole""" +299 91 loss """softplus""" +299 91 regularizer """no""" +299 91 optimizer """adadelta""" +299 91 training_loop """lcwa""" +299 91 evaluator """rankbased""" +299 92 dataset """kinships""" +299 92 model """hole""" +299 92 loss """softplus""" +299 92 regularizer """no""" +299 92 optimizer """adadelta""" +299 92 training_loop """lcwa""" +299 92 evaluator """rankbased""" +299 93 dataset """kinships""" +299 93 model """hole""" +299 93 loss """softplus""" +299 93 regularizer """no""" +299 93 optimizer """adadelta""" +299 93 training_loop """lcwa""" +299 93 evaluator """rankbased""" +299 94 dataset """kinships""" +299 94 model """hole""" +299 94 loss """softplus""" +299 94 regularizer """no""" +299 94 optimizer """adadelta""" +299 94 training_loop """lcwa""" +299 94 evaluator """rankbased""" +299 95 dataset """kinships""" +299 95 model """hole""" +299 95 loss """softplus""" +299 95 regularizer """no""" +299 95 optimizer """adadelta""" +299 95 training_loop """lcwa""" +299 95 evaluator """rankbased""" +299 96 dataset """kinships""" +299 96 model """hole""" +299 96 loss """softplus""" +299 96 regularizer """no""" +299 96 optimizer """adadelta""" +299 96 training_loop """lcwa""" +299 96 evaluator """rankbased""" +299 97 dataset """kinships""" +299 97 model """hole""" +299 97 loss """softplus""" +299 97 regularizer """no""" +299 97 optimizer """adadelta""" +299 97 training_loop """lcwa""" +299 97 evaluator """rankbased""" +299 98 dataset """kinships""" +299 98 model """hole""" +299 98 loss """softplus""" +299 98 regularizer """no""" +299 98 optimizer """adadelta""" +299 98 training_loop """lcwa""" +299 98 evaluator """rankbased""" +299 99 dataset """kinships""" +299 99 model """hole""" +299 99 loss """softplus""" +299 99 regularizer """no""" +299 99 optimizer """adadelta""" +299 99 training_loop """lcwa""" +299 99 evaluator """rankbased""" +299 100 dataset """kinships""" +299 100 model """hole""" +299 100 loss """softplus""" +299 100 regularizer """no""" +299 100 optimizer """adadelta""" +299 100 training_loop """lcwa""" +299 100 evaluator """rankbased""" +300 1 model.embedding_dim 2.0 +300 1 training.batch_size 2.0 +300 1 training.label_smoothing 0.05344591701139349 +300 2 model.embedding_dim 2.0 +300 2 training.batch_size 1.0 +300 2 training.label_smoothing 0.010675578020329977 +300 3 model.embedding_dim 2.0 +300 3 training.batch_size 1.0 +300 3 training.label_smoothing 0.03315957570641844 +300 4 model.embedding_dim 1.0 +300 4 training.batch_size 2.0 +300 4 training.label_smoothing 0.3382464651540863 +300 5 model.embedding_dim 0.0 +300 5 training.batch_size 1.0 +300 5 training.label_smoothing 0.003666202178703064 +300 6 model.embedding_dim 2.0 +300 6 training.batch_size 1.0 +300 6 training.label_smoothing 0.0012276627054024361 +300 7 model.embedding_dim 2.0 +300 7 training.batch_size 2.0 +300 7 training.label_smoothing 0.00951047716917974 +300 8 model.embedding_dim 2.0 +300 8 training.batch_size 1.0 +300 8 training.label_smoothing 0.005824233890866138 +300 9 model.embedding_dim 2.0 +300 9 training.batch_size 2.0 +300 9 training.label_smoothing 0.10391769161774789 +300 10 model.embedding_dim 2.0 +300 10 training.batch_size 2.0 +300 10 training.label_smoothing 0.003633346517583493 +300 11 model.embedding_dim 1.0 +300 11 training.batch_size 0.0 +300 11 training.label_smoothing 0.14152592458657984 +300 12 model.embedding_dim 2.0 +300 12 training.batch_size 1.0 +300 12 training.label_smoothing 0.04664745939196466 +300 13 model.embedding_dim 0.0 +300 13 training.batch_size 0.0 +300 13 training.label_smoothing 0.3133600133088974 +300 14 model.embedding_dim 1.0 +300 14 training.batch_size 0.0 +300 14 training.label_smoothing 0.05011617484289032 +300 15 model.embedding_dim 0.0 +300 15 training.batch_size 1.0 +300 15 training.label_smoothing 0.3647342079798311 +300 16 model.embedding_dim 2.0 +300 16 training.batch_size 1.0 +300 16 training.label_smoothing 0.001211768709913095 +300 17 model.embedding_dim 0.0 +300 17 training.batch_size 2.0 +300 17 training.label_smoothing 0.3242260521657374 +300 18 model.embedding_dim 2.0 +300 18 training.batch_size 2.0 +300 18 training.label_smoothing 0.008910517279993282 +300 19 model.embedding_dim 0.0 +300 19 training.batch_size 0.0 +300 19 training.label_smoothing 0.8517302784102255 +300 20 model.embedding_dim 0.0 +300 20 training.batch_size 0.0 +300 20 training.label_smoothing 0.03970052313460466 +300 21 model.embedding_dim 2.0 +300 21 training.batch_size 1.0 +300 21 training.label_smoothing 0.1330350057788739 +300 22 model.embedding_dim 0.0 +300 22 training.batch_size 1.0 +300 22 training.label_smoothing 0.03262638346790424 +300 23 model.embedding_dim 0.0 +300 23 training.batch_size 1.0 +300 23 training.label_smoothing 0.0049515316867308635 +300 24 model.embedding_dim 0.0 +300 24 training.batch_size 1.0 +300 24 training.label_smoothing 0.033349031111755335 +300 25 model.embedding_dim 1.0 +300 25 training.batch_size 0.0 +300 25 training.label_smoothing 0.38498492130209294 +300 26 model.embedding_dim 2.0 +300 26 training.batch_size 1.0 +300 26 training.label_smoothing 0.0033379142963291457 +300 27 model.embedding_dim 0.0 +300 27 training.batch_size 2.0 +300 27 training.label_smoothing 0.0022725052467323847 +300 28 model.embedding_dim 2.0 +300 28 training.batch_size 1.0 +300 28 training.label_smoothing 0.987248871955856 +300 29 model.embedding_dim 0.0 +300 29 training.batch_size 1.0 +300 29 training.label_smoothing 0.004547968892098979 +300 30 model.embedding_dim 0.0 +300 30 training.batch_size 1.0 +300 30 training.label_smoothing 0.005923976643863518 +300 31 model.embedding_dim 2.0 +300 31 training.batch_size 0.0 +300 31 training.label_smoothing 0.2516521356433795 +300 32 model.embedding_dim 2.0 +300 32 training.batch_size 1.0 +300 32 training.label_smoothing 0.5331599144273325 +300 33 model.embedding_dim 0.0 +300 33 training.batch_size 2.0 +300 33 training.label_smoothing 0.09424309678652296 +300 34 model.embedding_dim 2.0 +300 34 training.batch_size 0.0 +300 34 training.label_smoothing 0.016435823028919397 +300 35 model.embedding_dim 1.0 +300 35 training.batch_size 1.0 +300 35 training.label_smoothing 0.002793407675321217 +300 36 model.embedding_dim 0.0 +300 36 training.batch_size 0.0 +300 36 training.label_smoothing 0.2623808994030663 +300 37 model.embedding_dim 2.0 +300 37 training.batch_size 1.0 +300 37 training.label_smoothing 0.03570090747418051 +300 38 model.embedding_dim 2.0 +300 38 training.batch_size 1.0 +300 38 training.label_smoothing 0.4190159867725843 +300 39 model.embedding_dim 2.0 +300 39 training.batch_size 0.0 +300 39 training.label_smoothing 0.006360601352520656 +300 40 model.embedding_dim 2.0 +300 40 training.batch_size 0.0 +300 40 training.label_smoothing 0.008072459635534511 +300 41 model.embedding_dim 2.0 +300 41 training.batch_size 2.0 +300 41 training.label_smoothing 0.00447787220702672 +300 42 model.embedding_dim 1.0 +300 42 training.batch_size 1.0 +300 42 training.label_smoothing 0.22150431273041402 +300 43 model.embedding_dim 2.0 +300 43 training.batch_size 2.0 +300 43 training.label_smoothing 0.00643168045062695 +300 44 model.embedding_dim 1.0 +300 44 training.batch_size 2.0 +300 44 training.label_smoothing 0.14930770129637996 +300 45 model.embedding_dim 2.0 +300 45 training.batch_size 2.0 +300 45 training.label_smoothing 0.012606082793822023 +300 46 model.embedding_dim 2.0 +300 46 training.batch_size 1.0 +300 46 training.label_smoothing 0.029623044548336214 +300 47 model.embedding_dim 1.0 +300 47 training.batch_size 0.0 +300 47 training.label_smoothing 0.2053575528394901 +300 48 model.embedding_dim 2.0 +300 48 training.batch_size 1.0 +300 48 training.label_smoothing 0.0021265630061091437 +300 49 model.embedding_dim 0.0 +300 49 training.batch_size 0.0 +300 49 training.label_smoothing 0.03980933119943532 +300 50 model.embedding_dim 2.0 +300 50 training.batch_size 2.0 +300 50 training.label_smoothing 0.03475969335994931 +300 51 model.embedding_dim 0.0 +300 51 training.batch_size 0.0 +300 51 training.label_smoothing 0.007370235186528223 +300 52 model.embedding_dim 2.0 +300 52 training.batch_size 0.0 +300 52 training.label_smoothing 0.0036858139813533398 +300 53 model.embedding_dim 2.0 +300 53 training.batch_size 0.0 +300 53 training.label_smoothing 0.10312606242893785 +300 54 model.embedding_dim 1.0 +300 54 training.batch_size 1.0 +300 54 training.label_smoothing 0.3423925858798691 +300 55 model.embedding_dim 1.0 +300 55 training.batch_size 1.0 +300 55 training.label_smoothing 0.31473736262513174 +300 56 model.embedding_dim 2.0 +300 56 training.batch_size 2.0 +300 56 training.label_smoothing 0.0010621836777025277 +300 57 model.embedding_dim 0.0 +300 57 training.batch_size 0.0 +300 57 training.label_smoothing 0.25877108450877623 +300 58 model.embedding_dim 1.0 +300 58 training.batch_size 0.0 +300 58 training.label_smoothing 0.001165490704418291 +300 59 model.embedding_dim 1.0 +300 59 training.batch_size 2.0 +300 59 training.label_smoothing 0.01877539428562426 +300 60 model.embedding_dim 2.0 +300 60 training.batch_size 2.0 +300 60 training.label_smoothing 0.006510391311043486 +300 61 model.embedding_dim 1.0 +300 61 training.batch_size 2.0 +300 61 training.label_smoothing 0.04557035871896033 +300 62 model.embedding_dim 1.0 +300 62 training.batch_size 2.0 +300 62 training.label_smoothing 0.2929034305320651 +300 63 model.embedding_dim 1.0 +300 63 training.batch_size 1.0 +300 63 training.label_smoothing 0.21673800581315686 +300 64 model.embedding_dim 0.0 +300 64 training.batch_size 2.0 +300 64 training.label_smoothing 0.15308414967353312 +300 65 model.embedding_dim 2.0 +300 65 training.batch_size 0.0 +300 65 training.label_smoothing 0.013322569294159039 +300 66 model.embedding_dim 0.0 +300 66 training.batch_size 0.0 +300 66 training.label_smoothing 0.0019947270691683897 +300 67 model.embedding_dim 1.0 +300 67 training.batch_size 0.0 +300 67 training.label_smoothing 0.0017668559095330423 +300 68 model.embedding_dim 2.0 +300 68 training.batch_size 1.0 +300 68 training.label_smoothing 0.40329871482465746 +300 69 model.embedding_dim 1.0 +300 69 training.batch_size 2.0 +300 69 training.label_smoothing 0.06313071188994354 +300 70 model.embedding_dim 2.0 +300 70 training.batch_size 1.0 +300 70 training.label_smoothing 0.10915739672821546 +300 71 model.embedding_dim 1.0 +300 71 training.batch_size 2.0 +300 71 training.label_smoothing 0.0027389843047141405 +300 72 model.embedding_dim 0.0 +300 72 training.batch_size 0.0 +300 72 training.label_smoothing 0.5741258312240598 +300 73 model.embedding_dim 2.0 +300 73 training.batch_size 1.0 +300 73 training.label_smoothing 0.002366579123505136 +300 74 model.embedding_dim 2.0 +300 74 training.batch_size 0.0 +300 74 training.label_smoothing 0.010285130794157443 +300 75 model.embedding_dim 2.0 +300 75 training.batch_size 0.0 +300 75 training.label_smoothing 0.009557247799261044 +300 76 model.embedding_dim 0.0 +300 76 training.batch_size 2.0 +300 76 training.label_smoothing 0.002502890978285576 +300 77 model.embedding_dim 2.0 +300 77 training.batch_size 0.0 +300 77 training.label_smoothing 0.0015917155117004182 +300 78 model.embedding_dim 1.0 +300 78 training.batch_size 0.0 +300 78 training.label_smoothing 0.685653968181076 +300 79 model.embedding_dim 1.0 +300 79 training.batch_size 2.0 +300 79 training.label_smoothing 0.0034462549618379276 +300 80 model.embedding_dim 0.0 +300 80 training.batch_size 1.0 +300 80 training.label_smoothing 0.06844727223768725 +300 81 model.embedding_dim 2.0 +300 81 training.batch_size 2.0 +300 81 training.label_smoothing 0.7440089393448063 +300 82 model.embedding_dim 2.0 +300 82 training.batch_size 0.0 +300 82 training.label_smoothing 0.0230681545521978 +300 83 model.embedding_dim 2.0 +300 83 training.batch_size 1.0 +300 83 training.label_smoothing 0.0010172847929795015 +300 84 model.embedding_dim 1.0 +300 84 training.batch_size 2.0 +300 84 training.label_smoothing 0.003690055328301953 +300 85 model.embedding_dim 0.0 +300 85 training.batch_size 2.0 +300 85 training.label_smoothing 0.26171729440197605 +300 86 model.embedding_dim 2.0 +300 86 training.batch_size 0.0 +300 86 training.label_smoothing 0.0778579934827243 +300 87 model.embedding_dim 0.0 +300 87 training.batch_size 0.0 +300 87 training.label_smoothing 0.03245502289932845 +300 88 model.embedding_dim 0.0 +300 88 training.batch_size 1.0 +300 88 training.label_smoothing 0.051137725738171054 +300 89 model.embedding_dim 1.0 +300 89 training.batch_size 0.0 +300 89 training.label_smoothing 0.025174764303299808 +300 90 model.embedding_dim 2.0 +300 90 training.batch_size 1.0 +300 90 training.label_smoothing 0.030119081130629912 +300 91 model.embedding_dim 2.0 +300 91 training.batch_size 0.0 +300 91 training.label_smoothing 0.11424119797957433 +300 92 model.embedding_dim 2.0 +300 92 training.batch_size 0.0 +300 92 training.label_smoothing 0.0010970108890668397 +300 93 model.embedding_dim 2.0 +300 93 training.batch_size 1.0 +300 93 training.label_smoothing 0.09499760894503208 +300 94 model.embedding_dim 0.0 +300 94 training.batch_size 2.0 +300 94 training.label_smoothing 0.002538663404961205 +300 95 model.embedding_dim 2.0 +300 95 training.batch_size 1.0 +300 95 training.label_smoothing 0.2752338655490277 +300 96 model.embedding_dim 1.0 +300 96 training.batch_size 0.0 +300 96 training.label_smoothing 0.017704823272445658 +300 97 model.embedding_dim 2.0 +300 97 training.batch_size 2.0 +300 97 training.label_smoothing 0.008576003457672099 +300 98 model.embedding_dim 1.0 +300 98 training.batch_size 0.0 +300 98 training.label_smoothing 0.01935314129363103 +300 99 model.embedding_dim 0.0 +300 99 training.batch_size 2.0 +300 99 training.label_smoothing 0.0011036274837120219 +300 100 model.embedding_dim 2.0 +300 100 training.batch_size 1.0 +300 100 training.label_smoothing 0.006760678709882156 +300 1 dataset """kinships""" +300 1 model """hole""" +300 1 loss """bceaftersigmoid""" +300 1 regularizer """no""" +300 1 optimizer """adadelta""" +300 1 training_loop """lcwa""" +300 1 evaluator """rankbased""" +300 2 dataset """kinships""" +300 2 model """hole""" +300 2 loss """bceaftersigmoid""" +300 2 regularizer """no""" +300 2 optimizer """adadelta""" +300 2 training_loop """lcwa""" +300 2 evaluator """rankbased""" +300 3 dataset """kinships""" +300 3 model """hole""" +300 3 loss """bceaftersigmoid""" +300 3 regularizer """no""" +300 3 optimizer """adadelta""" +300 3 training_loop """lcwa""" +300 3 evaluator """rankbased""" +300 4 dataset """kinships""" +300 4 model """hole""" +300 4 loss """bceaftersigmoid""" +300 4 regularizer """no""" +300 4 optimizer """adadelta""" +300 4 training_loop """lcwa""" +300 4 evaluator """rankbased""" +300 5 dataset """kinships""" +300 5 model """hole""" +300 5 loss """bceaftersigmoid""" +300 5 regularizer """no""" +300 5 optimizer """adadelta""" +300 5 training_loop """lcwa""" +300 5 evaluator """rankbased""" +300 6 dataset """kinships""" +300 6 model """hole""" +300 6 loss """bceaftersigmoid""" +300 6 regularizer """no""" +300 6 optimizer """adadelta""" +300 6 training_loop """lcwa""" +300 6 evaluator """rankbased""" +300 7 dataset """kinships""" +300 7 model """hole""" +300 7 loss """bceaftersigmoid""" +300 7 regularizer """no""" +300 7 optimizer """adadelta""" +300 7 training_loop """lcwa""" +300 7 evaluator """rankbased""" +300 8 dataset """kinships""" +300 8 model """hole""" +300 8 loss """bceaftersigmoid""" +300 8 regularizer """no""" +300 8 optimizer """adadelta""" +300 8 training_loop """lcwa""" +300 8 evaluator """rankbased""" +300 9 dataset """kinships""" +300 9 model """hole""" +300 9 loss """bceaftersigmoid""" +300 9 regularizer """no""" +300 9 optimizer """adadelta""" +300 9 training_loop """lcwa""" +300 9 evaluator """rankbased""" +300 10 dataset """kinships""" +300 10 model """hole""" +300 10 loss """bceaftersigmoid""" +300 10 regularizer """no""" +300 10 optimizer """adadelta""" +300 10 training_loop """lcwa""" +300 10 evaluator """rankbased""" +300 11 dataset """kinships""" +300 11 model """hole""" +300 11 loss """bceaftersigmoid""" +300 11 regularizer """no""" +300 11 optimizer """adadelta""" +300 11 training_loop """lcwa""" +300 11 evaluator """rankbased""" +300 12 dataset """kinships""" +300 12 model """hole""" +300 12 loss """bceaftersigmoid""" +300 12 regularizer """no""" +300 12 optimizer """adadelta""" +300 12 training_loop """lcwa""" +300 12 evaluator """rankbased""" +300 13 dataset """kinships""" +300 13 model """hole""" +300 13 loss """bceaftersigmoid""" +300 13 regularizer """no""" +300 13 optimizer """adadelta""" +300 13 training_loop """lcwa""" +300 13 evaluator """rankbased""" +300 14 dataset """kinships""" +300 14 model """hole""" +300 14 loss """bceaftersigmoid""" +300 14 regularizer """no""" +300 14 optimizer """adadelta""" +300 14 training_loop """lcwa""" +300 14 evaluator """rankbased""" +300 15 dataset """kinships""" +300 15 model """hole""" +300 15 loss """bceaftersigmoid""" +300 15 regularizer """no""" +300 15 optimizer """adadelta""" +300 15 training_loop """lcwa""" +300 15 evaluator """rankbased""" +300 16 dataset """kinships""" +300 16 model """hole""" +300 16 loss """bceaftersigmoid""" +300 16 regularizer """no""" +300 16 optimizer """adadelta""" +300 16 training_loop """lcwa""" +300 16 evaluator """rankbased""" +300 17 dataset """kinships""" +300 17 model """hole""" +300 17 loss """bceaftersigmoid""" +300 17 regularizer """no""" +300 17 optimizer """adadelta""" +300 17 training_loop """lcwa""" +300 17 evaluator """rankbased""" +300 18 dataset """kinships""" +300 18 model """hole""" +300 18 loss """bceaftersigmoid""" +300 18 regularizer """no""" +300 18 optimizer """adadelta""" +300 18 training_loop """lcwa""" +300 18 evaluator """rankbased""" +300 19 dataset """kinships""" +300 19 model """hole""" +300 19 loss """bceaftersigmoid""" +300 19 regularizer """no""" +300 19 optimizer """adadelta""" +300 19 training_loop """lcwa""" +300 19 evaluator """rankbased""" +300 20 dataset """kinships""" +300 20 model """hole""" +300 20 loss """bceaftersigmoid""" +300 20 regularizer """no""" +300 20 optimizer """adadelta""" +300 20 training_loop """lcwa""" +300 20 evaluator """rankbased""" +300 21 dataset """kinships""" +300 21 model """hole""" +300 21 loss """bceaftersigmoid""" +300 21 regularizer """no""" +300 21 optimizer """adadelta""" +300 21 training_loop """lcwa""" +300 21 evaluator """rankbased""" +300 22 dataset """kinships""" +300 22 model """hole""" +300 22 loss """bceaftersigmoid""" +300 22 regularizer """no""" +300 22 optimizer """adadelta""" +300 22 training_loop """lcwa""" +300 22 evaluator """rankbased""" +300 23 dataset """kinships""" +300 23 model """hole""" +300 23 loss """bceaftersigmoid""" +300 23 regularizer """no""" +300 23 optimizer """adadelta""" +300 23 training_loop """lcwa""" +300 23 evaluator """rankbased""" +300 24 dataset """kinships""" +300 24 model """hole""" +300 24 loss """bceaftersigmoid""" +300 24 regularizer """no""" +300 24 optimizer """adadelta""" +300 24 training_loop """lcwa""" +300 24 evaluator """rankbased""" +300 25 dataset """kinships""" +300 25 model """hole""" +300 25 loss """bceaftersigmoid""" +300 25 regularizer """no""" +300 25 optimizer """adadelta""" +300 25 training_loop """lcwa""" +300 25 evaluator """rankbased""" +300 26 dataset """kinships""" +300 26 model """hole""" +300 26 loss """bceaftersigmoid""" +300 26 regularizer """no""" +300 26 optimizer """adadelta""" +300 26 training_loop """lcwa""" +300 26 evaluator """rankbased""" +300 27 dataset """kinships""" +300 27 model """hole""" +300 27 loss """bceaftersigmoid""" +300 27 regularizer """no""" +300 27 optimizer """adadelta""" +300 27 training_loop """lcwa""" +300 27 evaluator """rankbased""" +300 28 dataset """kinships""" +300 28 model """hole""" +300 28 loss """bceaftersigmoid""" +300 28 regularizer """no""" +300 28 optimizer """adadelta""" +300 28 training_loop """lcwa""" +300 28 evaluator """rankbased""" +300 29 dataset """kinships""" +300 29 model """hole""" +300 29 loss """bceaftersigmoid""" +300 29 regularizer """no""" +300 29 optimizer """adadelta""" +300 29 training_loop """lcwa""" +300 29 evaluator """rankbased""" +300 30 dataset """kinships""" +300 30 model """hole""" +300 30 loss """bceaftersigmoid""" +300 30 regularizer """no""" +300 30 optimizer """adadelta""" +300 30 training_loop """lcwa""" +300 30 evaluator """rankbased""" +300 31 dataset """kinships""" +300 31 model """hole""" +300 31 loss """bceaftersigmoid""" +300 31 regularizer """no""" +300 31 optimizer """adadelta""" +300 31 training_loop """lcwa""" +300 31 evaluator """rankbased""" +300 32 dataset """kinships""" +300 32 model """hole""" +300 32 loss """bceaftersigmoid""" +300 32 regularizer """no""" +300 32 optimizer """adadelta""" +300 32 training_loop """lcwa""" +300 32 evaluator """rankbased""" +300 33 dataset """kinships""" +300 33 model """hole""" +300 33 loss """bceaftersigmoid""" +300 33 regularizer """no""" +300 33 optimizer """adadelta""" +300 33 training_loop """lcwa""" +300 33 evaluator """rankbased""" +300 34 dataset """kinships""" +300 34 model """hole""" +300 34 loss """bceaftersigmoid""" +300 34 regularizer """no""" +300 34 optimizer """adadelta""" +300 34 training_loop """lcwa""" +300 34 evaluator """rankbased""" +300 35 dataset """kinships""" +300 35 model """hole""" +300 35 loss """bceaftersigmoid""" +300 35 regularizer """no""" +300 35 optimizer """adadelta""" +300 35 training_loop """lcwa""" +300 35 evaluator """rankbased""" +300 36 dataset """kinships""" +300 36 model """hole""" +300 36 loss """bceaftersigmoid""" +300 36 regularizer """no""" +300 36 optimizer """adadelta""" +300 36 training_loop """lcwa""" +300 36 evaluator """rankbased""" +300 37 dataset """kinships""" +300 37 model """hole""" +300 37 loss """bceaftersigmoid""" +300 37 regularizer """no""" +300 37 optimizer """adadelta""" +300 37 training_loop """lcwa""" +300 37 evaluator """rankbased""" +300 38 dataset """kinships""" +300 38 model """hole""" +300 38 loss """bceaftersigmoid""" +300 38 regularizer """no""" +300 38 optimizer """adadelta""" +300 38 training_loop """lcwa""" +300 38 evaluator """rankbased""" +300 39 dataset """kinships""" +300 39 model """hole""" +300 39 loss """bceaftersigmoid""" +300 39 regularizer """no""" +300 39 optimizer """adadelta""" +300 39 training_loop """lcwa""" +300 39 evaluator """rankbased""" +300 40 dataset """kinships""" +300 40 model """hole""" +300 40 loss """bceaftersigmoid""" +300 40 regularizer """no""" +300 40 optimizer """adadelta""" +300 40 training_loop """lcwa""" +300 40 evaluator """rankbased""" +300 41 dataset """kinships""" +300 41 model """hole""" +300 41 loss """bceaftersigmoid""" +300 41 regularizer """no""" +300 41 optimizer """adadelta""" +300 41 training_loop """lcwa""" +300 41 evaluator """rankbased""" +300 42 dataset """kinships""" +300 42 model """hole""" +300 42 loss """bceaftersigmoid""" +300 42 regularizer """no""" +300 42 optimizer """adadelta""" +300 42 training_loop """lcwa""" +300 42 evaluator """rankbased""" +300 43 dataset """kinships""" +300 43 model """hole""" +300 43 loss """bceaftersigmoid""" +300 43 regularizer """no""" +300 43 optimizer """adadelta""" +300 43 training_loop """lcwa""" +300 43 evaluator """rankbased""" +300 44 dataset """kinships""" +300 44 model """hole""" +300 44 loss """bceaftersigmoid""" +300 44 regularizer """no""" +300 44 optimizer """adadelta""" +300 44 training_loop """lcwa""" +300 44 evaluator """rankbased""" +300 45 dataset """kinships""" +300 45 model """hole""" +300 45 loss """bceaftersigmoid""" +300 45 regularizer """no""" +300 45 optimizer """adadelta""" +300 45 training_loop """lcwa""" +300 45 evaluator """rankbased""" +300 46 dataset """kinships""" +300 46 model """hole""" +300 46 loss """bceaftersigmoid""" +300 46 regularizer """no""" +300 46 optimizer """adadelta""" +300 46 training_loop """lcwa""" +300 46 evaluator """rankbased""" +300 47 dataset """kinships""" +300 47 model """hole""" +300 47 loss """bceaftersigmoid""" +300 47 regularizer """no""" +300 47 optimizer """adadelta""" +300 47 training_loop """lcwa""" +300 47 evaluator """rankbased""" +300 48 dataset """kinships""" +300 48 model """hole""" +300 48 loss """bceaftersigmoid""" +300 48 regularizer """no""" +300 48 optimizer """adadelta""" +300 48 training_loop """lcwa""" +300 48 evaluator """rankbased""" +300 49 dataset """kinships""" +300 49 model """hole""" +300 49 loss """bceaftersigmoid""" +300 49 regularizer """no""" +300 49 optimizer """adadelta""" +300 49 training_loop """lcwa""" +300 49 evaluator """rankbased""" +300 50 dataset """kinships""" +300 50 model """hole""" +300 50 loss """bceaftersigmoid""" +300 50 regularizer """no""" +300 50 optimizer """adadelta""" +300 50 training_loop """lcwa""" +300 50 evaluator """rankbased""" +300 51 dataset """kinships""" +300 51 model """hole""" +300 51 loss """bceaftersigmoid""" +300 51 regularizer """no""" +300 51 optimizer """adadelta""" +300 51 training_loop """lcwa""" +300 51 evaluator """rankbased""" +300 52 dataset """kinships""" +300 52 model """hole""" +300 52 loss """bceaftersigmoid""" +300 52 regularizer """no""" +300 52 optimizer """adadelta""" +300 52 training_loop """lcwa""" +300 52 evaluator """rankbased""" +300 53 dataset """kinships""" +300 53 model """hole""" +300 53 loss """bceaftersigmoid""" +300 53 regularizer """no""" +300 53 optimizer """adadelta""" +300 53 training_loop """lcwa""" +300 53 evaluator """rankbased""" +300 54 dataset """kinships""" +300 54 model """hole""" +300 54 loss """bceaftersigmoid""" +300 54 regularizer """no""" +300 54 optimizer """adadelta""" +300 54 training_loop """lcwa""" +300 54 evaluator """rankbased""" +300 55 dataset """kinships""" +300 55 model """hole""" +300 55 loss """bceaftersigmoid""" +300 55 regularizer """no""" +300 55 optimizer """adadelta""" +300 55 training_loop """lcwa""" +300 55 evaluator """rankbased""" +300 56 dataset """kinships""" +300 56 model """hole""" +300 56 loss """bceaftersigmoid""" +300 56 regularizer """no""" +300 56 optimizer """adadelta""" +300 56 training_loop """lcwa""" +300 56 evaluator """rankbased""" +300 57 dataset """kinships""" +300 57 model """hole""" +300 57 loss """bceaftersigmoid""" +300 57 regularizer """no""" +300 57 optimizer """adadelta""" +300 57 training_loop """lcwa""" +300 57 evaluator """rankbased""" +300 58 dataset """kinships""" +300 58 model """hole""" +300 58 loss """bceaftersigmoid""" +300 58 regularizer """no""" +300 58 optimizer """adadelta""" +300 58 training_loop """lcwa""" +300 58 evaluator """rankbased""" +300 59 dataset """kinships""" +300 59 model """hole""" +300 59 loss """bceaftersigmoid""" +300 59 regularizer """no""" +300 59 optimizer """adadelta""" +300 59 training_loop """lcwa""" +300 59 evaluator """rankbased""" +300 60 dataset """kinships""" +300 60 model """hole""" +300 60 loss """bceaftersigmoid""" +300 60 regularizer """no""" +300 60 optimizer """adadelta""" +300 60 training_loop """lcwa""" +300 60 evaluator """rankbased""" +300 61 dataset """kinships""" +300 61 model """hole""" +300 61 loss """bceaftersigmoid""" +300 61 regularizer """no""" +300 61 optimizer """adadelta""" +300 61 training_loop """lcwa""" +300 61 evaluator """rankbased""" +300 62 dataset """kinships""" +300 62 model """hole""" +300 62 loss """bceaftersigmoid""" +300 62 regularizer """no""" +300 62 optimizer """adadelta""" +300 62 training_loop """lcwa""" +300 62 evaluator """rankbased""" +300 63 dataset """kinships""" +300 63 model """hole""" +300 63 loss """bceaftersigmoid""" +300 63 regularizer """no""" +300 63 optimizer """adadelta""" +300 63 training_loop """lcwa""" +300 63 evaluator """rankbased""" +300 64 dataset """kinships""" +300 64 model """hole""" +300 64 loss """bceaftersigmoid""" +300 64 regularizer """no""" +300 64 optimizer """adadelta""" +300 64 training_loop """lcwa""" +300 64 evaluator """rankbased""" +300 65 dataset """kinships""" +300 65 model """hole""" +300 65 loss """bceaftersigmoid""" +300 65 regularizer """no""" +300 65 optimizer """adadelta""" +300 65 training_loop """lcwa""" +300 65 evaluator """rankbased""" +300 66 dataset """kinships""" +300 66 model """hole""" +300 66 loss """bceaftersigmoid""" +300 66 regularizer """no""" +300 66 optimizer """adadelta""" +300 66 training_loop """lcwa""" +300 66 evaluator """rankbased""" +300 67 dataset """kinships""" +300 67 model """hole""" +300 67 loss """bceaftersigmoid""" +300 67 regularizer """no""" +300 67 optimizer """adadelta""" +300 67 training_loop """lcwa""" +300 67 evaluator """rankbased""" +300 68 dataset """kinships""" +300 68 model """hole""" +300 68 loss """bceaftersigmoid""" +300 68 regularizer """no""" +300 68 optimizer """adadelta""" +300 68 training_loop """lcwa""" +300 68 evaluator """rankbased""" +300 69 dataset """kinships""" +300 69 model """hole""" +300 69 loss """bceaftersigmoid""" +300 69 regularizer """no""" +300 69 optimizer """adadelta""" +300 69 training_loop """lcwa""" +300 69 evaluator """rankbased""" +300 70 dataset """kinships""" +300 70 model """hole""" +300 70 loss """bceaftersigmoid""" +300 70 regularizer """no""" +300 70 optimizer """adadelta""" +300 70 training_loop """lcwa""" +300 70 evaluator """rankbased""" +300 71 dataset """kinships""" +300 71 model """hole""" +300 71 loss """bceaftersigmoid""" +300 71 regularizer """no""" +300 71 optimizer """adadelta""" +300 71 training_loop """lcwa""" +300 71 evaluator """rankbased""" +300 72 dataset """kinships""" +300 72 model """hole""" +300 72 loss """bceaftersigmoid""" +300 72 regularizer """no""" +300 72 optimizer """adadelta""" +300 72 training_loop """lcwa""" +300 72 evaluator """rankbased""" +300 73 dataset """kinships""" +300 73 model """hole""" +300 73 loss """bceaftersigmoid""" +300 73 regularizer """no""" +300 73 optimizer """adadelta""" +300 73 training_loop """lcwa""" +300 73 evaluator """rankbased""" +300 74 dataset """kinships""" +300 74 model """hole""" +300 74 loss """bceaftersigmoid""" +300 74 regularizer """no""" +300 74 optimizer """adadelta""" +300 74 training_loop """lcwa""" +300 74 evaluator """rankbased""" +300 75 dataset """kinships""" +300 75 model """hole""" +300 75 loss """bceaftersigmoid""" +300 75 regularizer """no""" +300 75 optimizer """adadelta""" +300 75 training_loop """lcwa""" +300 75 evaluator """rankbased""" +300 76 dataset """kinships""" +300 76 model """hole""" +300 76 loss """bceaftersigmoid""" +300 76 regularizer """no""" +300 76 optimizer """adadelta""" +300 76 training_loop """lcwa""" +300 76 evaluator """rankbased""" +300 77 dataset """kinships""" +300 77 model """hole""" +300 77 loss """bceaftersigmoid""" +300 77 regularizer """no""" +300 77 optimizer """adadelta""" +300 77 training_loop """lcwa""" +300 77 evaluator """rankbased""" +300 78 dataset """kinships""" +300 78 model """hole""" +300 78 loss """bceaftersigmoid""" +300 78 regularizer """no""" +300 78 optimizer """adadelta""" +300 78 training_loop """lcwa""" +300 78 evaluator """rankbased""" +300 79 dataset """kinships""" +300 79 model """hole""" +300 79 loss """bceaftersigmoid""" +300 79 regularizer """no""" +300 79 optimizer """adadelta""" +300 79 training_loop """lcwa""" +300 79 evaluator """rankbased""" +300 80 dataset """kinships""" +300 80 model """hole""" +300 80 loss """bceaftersigmoid""" +300 80 regularizer """no""" +300 80 optimizer """adadelta""" +300 80 training_loop """lcwa""" +300 80 evaluator """rankbased""" +300 81 dataset """kinships""" +300 81 model """hole""" +300 81 loss """bceaftersigmoid""" +300 81 regularizer """no""" +300 81 optimizer """adadelta""" +300 81 training_loop """lcwa""" +300 81 evaluator """rankbased""" +300 82 dataset """kinships""" +300 82 model """hole""" +300 82 loss """bceaftersigmoid""" +300 82 regularizer """no""" +300 82 optimizer """adadelta""" +300 82 training_loop """lcwa""" +300 82 evaluator """rankbased""" +300 83 dataset """kinships""" +300 83 model """hole""" +300 83 loss """bceaftersigmoid""" +300 83 regularizer """no""" +300 83 optimizer """adadelta""" +300 83 training_loop """lcwa""" +300 83 evaluator """rankbased""" +300 84 dataset """kinships""" +300 84 model """hole""" +300 84 loss """bceaftersigmoid""" +300 84 regularizer """no""" +300 84 optimizer """adadelta""" +300 84 training_loop """lcwa""" +300 84 evaluator """rankbased""" +300 85 dataset """kinships""" +300 85 model """hole""" +300 85 loss """bceaftersigmoid""" +300 85 regularizer """no""" +300 85 optimizer """adadelta""" +300 85 training_loop """lcwa""" +300 85 evaluator """rankbased""" +300 86 dataset """kinships""" +300 86 model """hole""" +300 86 loss """bceaftersigmoid""" +300 86 regularizer """no""" +300 86 optimizer """adadelta""" +300 86 training_loop """lcwa""" +300 86 evaluator """rankbased""" +300 87 dataset """kinships""" +300 87 model """hole""" +300 87 loss """bceaftersigmoid""" +300 87 regularizer """no""" +300 87 optimizer """adadelta""" +300 87 training_loop """lcwa""" +300 87 evaluator """rankbased""" +300 88 dataset """kinships""" +300 88 model """hole""" +300 88 loss """bceaftersigmoid""" +300 88 regularizer """no""" +300 88 optimizer """adadelta""" +300 88 training_loop """lcwa""" +300 88 evaluator """rankbased""" +300 89 dataset """kinships""" +300 89 model """hole""" +300 89 loss """bceaftersigmoid""" +300 89 regularizer """no""" +300 89 optimizer """adadelta""" +300 89 training_loop """lcwa""" +300 89 evaluator """rankbased""" +300 90 dataset """kinships""" +300 90 model """hole""" +300 90 loss """bceaftersigmoid""" +300 90 regularizer """no""" +300 90 optimizer """adadelta""" +300 90 training_loop """lcwa""" +300 90 evaluator """rankbased""" +300 91 dataset """kinships""" +300 91 model """hole""" +300 91 loss """bceaftersigmoid""" +300 91 regularizer """no""" +300 91 optimizer """adadelta""" +300 91 training_loop """lcwa""" +300 91 evaluator """rankbased""" +300 92 dataset """kinships""" +300 92 model """hole""" +300 92 loss """bceaftersigmoid""" +300 92 regularizer """no""" +300 92 optimizer """adadelta""" +300 92 training_loop """lcwa""" +300 92 evaluator """rankbased""" +300 93 dataset """kinships""" +300 93 model """hole""" +300 93 loss """bceaftersigmoid""" +300 93 regularizer """no""" +300 93 optimizer """adadelta""" +300 93 training_loop """lcwa""" +300 93 evaluator """rankbased""" +300 94 dataset """kinships""" +300 94 model """hole""" +300 94 loss """bceaftersigmoid""" +300 94 regularizer """no""" +300 94 optimizer """adadelta""" +300 94 training_loop """lcwa""" +300 94 evaluator """rankbased""" +300 95 dataset """kinships""" +300 95 model """hole""" +300 95 loss """bceaftersigmoid""" +300 95 regularizer """no""" +300 95 optimizer """adadelta""" +300 95 training_loop """lcwa""" +300 95 evaluator """rankbased""" +300 96 dataset """kinships""" +300 96 model """hole""" +300 96 loss """bceaftersigmoid""" +300 96 regularizer """no""" +300 96 optimizer """adadelta""" +300 96 training_loop """lcwa""" +300 96 evaluator """rankbased""" +300 97 dataset """kinships""" +300 97 model """hole""" +300 97 loss """bceaftersigmoid""" +300 97 regularizer """no""" +300 97 optimizer """adadelta""" +300 97 training_loop """lcwa""" +300 97 evaluator """rankbased""" +300 98 dataset """kinships""" +300 98 model """hole""" +300 98 loss """bceaftersigmoid""" +300 98 regularizer """no""" +300 98 optimizer """adadelta""" +300 98 training_loop """lcwa""" +300 98 evaluator """rankbased""" +300 99 dataset """kinships""" +300 99 model """hole""" +300 99 loss """bceaftersigmoid""" +300 99 regularizer """no""" +300 99 optimizer """adadelta""" +300 99 training_loop """lcwa""" +300 99 evaluator """rankbased""" +300 100 dataset """kinships""" +300 100 model """hole""" +300 100 loss """bceaftersigmoid""" +300 100 regularizer """no""" +300 100 optimizer """adadelta""" +300 100 training_loop """lcwa""" +300 100 evaluator """rankbased""" +301 1 model.embedding_dim 0.0 +301 1 training.batch_size 2.0 +301 1 training.label_smoothing 0.1567259563134438 +301 2 model.embedding_dim 2.0 +301 2 training.batch_size 2.0 +301 2 training.label_smoothing 0.19676591343389427 +301 3 model.embedding_dim 0.0 +301 3 training.batch_size 2.0 +301 3 training.label_smoothing 0.23134269024504292 +301 4 model.embedding_dim 0.0 +301 4 training.batch_size 2.0 +301 4 training.label_smoothing 0.010159859917231834 +301 5 model.embedding_dim 1.0 +301 5 training.batch_size 1.0 +301 5 training.label_smoothing 0.20200373426841647 +301 6 model.embedding_dim 0.0 +301 6 training.batch_size 2.0 +301 6 training.label_smoothing 0.0031231877560698356 +301 7 model.embedding_dim 1.0 +301 7 training.batch_size 0.0 +301 7 training.label_smoothing 0.8627037941801751 +301 8 model.embedding_dim 2.0 +301 8 training.batch_size 1.0 +301 8 training.label_smoothing 0.04476633803573183 +301 9 model.embedding_dim 1.0 +301 9 training.batch_size 0.0 +301 9 training.label_smoothing 0.11910752248709158 +301 10 model.embedding_dim 0.0 +301 10 training.batch_size 2.0 +301 10 training.label_smoothing 0.007433090585424237 +301 11 model.embedding_dim 1.0 +301 11 training.batch_size 1.0 +301 11 training.label_smoothing 0.024520708134076183 +301 12 model.embedding_dim 0.0 +301 12 training.batch_size 0.0 +301 12 training.label_smoothing 0.46930224267186876 +301 13 model.embedding_dim 2.0 +301 13 training.batch_size 0.0 +301 13 training.label_smoothing 0.034568467044461225 +301 14 model.embedding_dim 0.0 +301 14 training.batch_size 2.0 +301 14 training.label_smoothing 0.057903264183538604 +301 15 model.embedding_dim 1.0 +301 15 training.batch_size 1.0 +301 15 training.label_smoothing 0.026162852137599647 +301 16 model.embedding_dim 1.0 +301 16 training.batch_size 2.0 +301 16 training.label_smoothing 0.027805633207382104 +301 17 model.embedding_dim 0.0 +301 17 training.batch_size 0.0 +301 17 training.label_smoothing 0.0781535467120463 +301 18 model.embedding_dim 1.0 +301 18 training.batch_size 0.0 +301 18 training.label_smoothing 0.0013083189044996025 +301 19 model.embedding_dim 1.0 +301 19 training.batch_size 2.0 +301 19 training.label_smoothing 0.01805900484790214 +301 20 model.embedding_dim 1.0 +301 20 training.batch_size 2.0 +301 20 training.label_smoothing 0.016815566639817328 +301 21 model.embedding_dim 1.0 +301 21 training.batch_size 0.0 +301 21 training.label_smoothing 0.5555716218167905 +301 22 model.embedding_dim 0.0 +301 22 training.batch_size 1.0 +301 22 training.label_smoothing 0.12320210952766368 +301 23 model.embedding_dim 2.0 +301 23 training.batch_size 0.0 +301 23 training.label_smoothing 0.0014440847741806879 +301 24 model.embedding_dim 2.0 +301 24 training.batch_size 1.0 +301 24 training.label_smoothing 0.0017312124793069001 +301 25 model.embedding_dim 2.0 +301 25 training.batch_size 2.0 +301 25 training.label_smoothing 0.26158396328615935 +301 26 model.embedding_dim 2.0 +301 26 training.batch_size 2.0 +301 26 training.label_smoothing 0.14581732217602525 +301 27 model.embedding_dim 2.0 +301 27 training.batch_size 2.0 +301 27 training.label_smoothing 0.07174208211476674 +301 28 model.embedding_dim 1.0 +301 28 training.batch_size 2.0 +301 28 training.label_smoothing 0.691088568416326 +301 29 model.embedding_dim 0.0 +301 29 training.batch_size 2.0 +301 29 training.label_smoothing 0.03781613239503641 +301 30 model.embedding_dim 1.0 +301 30 training.batch_size 2.0 +301 30 training.label_smoothing 0.01105239017772532 +301 31 model.embedding_dim 0.0 +301 31 training.batch_size 2.0 +301 31 training.label_smoothing 0.001564166959195369 +301 32 model.embedding_dim 1.0 +301 32 training.batch_size 1.0 +301 32 training.label_smoothing 0.21102655269672496 +301 33 model.embedding_dim 1.0 +301 33 training.batch_size 0.0 +301 33 training.label_smoothing 0.976198565492081 +301 34 model.embedding_dim 1.0 +301 34 training.batch_size 0.0 +301 34 training.label_smoothing 0.005046733026175285 +301 35 model.embedding_dim 2.0 +301 35 training.batch_size 2.0 +301 35 training.label_smoothing 0.047623678385574866 +301 36 model.embedding_dim 0.0 +301 36 training.batch_size 0.0 +301 36 training.label_smoothing 0.19704929613402963 +301 37 model.embedding_dim 1.0 +301 37 training.batch_size 1.0 +301 37 training.label_smoothing 0.16883745086486424 +301 38 model.embedding_dim 2.0 +301 38 training.batch_size 1.0 +301 38 training.label_smoothing 0.001596749569915287 +301 39 model.embedding_dim 0.0 +301 39 training.batch_size 0.0 +301 39 training.label_smoothing 0.13334305427766815 +301 40 model.embedding_dim 1.0 +301 40 training.batch_size 2.0 +301 40 training.label_smoothing 0.011400189049438866 +301 41 model.embedding_dim 0.0 +301 41 training.batch_size 2.0 +301 41 training.label_smoothing 0.016999952071960672 +301 42 model.embedding_dim 1.0 +301 42 training.batch_size 2.0 +301 42 training.label_smoothing 0.002596567552426347 +301 43 model.embedding_dim 1.0 +301 43 training.batch_size 0.0 +301 43 training.label_smoothing 0.02423550893588053 +301 44 model.embedding_dim 1.0 +301 44 training.batch_size 2.0 +301 44 training.label_smoothing 0.5120082167846846 +301 45 model.embedding_dim 0.0 +301 45 training.batch_size 2.0 +301 45 training.label_smoothing 0.038893537543580436 +301 46 model.embedding_dim 0.0 +301 46 training.batch_size 0.0 +301 46 training.label_smoothing 0.2598165919852541 +301 47 model.embedding_dim 0.0 +301 47 training.batch_size 0.0 +301 47 training.label_smoothing 0.05742269371072488 +301 48 model.embedding_dim 1.0 +301 48 training.batch_size 1.0 +301 48 training.label_smoothing 0.7316519088404684 +301 49 model.embedding_dim 1.0 +301 49 training.batch_size 1.0 +301 49 training.label_smoothing 0.005210408843720467 +301 50 model.embedding_dim 0.0 +301 50 training.batch_size 1.0 +301 50 training.label_smoothing 0.002047666496915085 +301 51 model.embedding_dim 2.0 +301 51 training.batch_size 0.0 +301 51 training.label_smoothing 0.005809171165774433 +301 52 model.embedding_dim 2.0 +301 52 training.batch_size 0.0 +301 52 training.label_smoothing 0.2980737374408689 +301 53 model.embedding_dim 1.0 +301 53 training.batch_size 0.0 +301 53 training.label_smoothing 0.04522529257063268 +301 54 model.embedding_dim 2.0 +301 54 training.batch_size 2.0 +301 54 training.label_smoothing 0.12732327439973012 +301 55 model.embedding_dim 1.0 +301 55 training.batch_size 0.0 +301 55 training.label_smoothing 0.008011976958186983 +301 56 model.embedding_dim 1.0 +301 56 training.batch_size 2.0 +301 56 training.label_smoothing 0.045425469763463136 +301 57 model.embedding_dim 0.0 +301 57 training.batch_size 1.0 +301 57 training.label_smoothing 0.025870985666503414 +301 58 model.embedding_dim 1.0 +301 58 training.batch_size 1.0 +301 58 training.label_smoothing 0.17306168693280746 +301 59 model.embedding_dim 2.0 +301 59 training.batch_size 1.0 +301 59 training.label_smoothing 0.027379049235335597 +301 60 model.embedding_dim 0.0 +301 60 training.batch_size 2.0 +301 60 training.label_smoothing 0.006095213276848376 +301 61 model.embedding_dim 0.0 +301 61 training.batch_size 2.0 +301 61 training.label_smoothing 0.29263050855579076 +301 62 model.embedding_dim 2.0 +301 62 training.batch_size 2.0 +301 62 training.label_smoothing 0.757999063666581 +301 63 model.embedding_dim 1.0 +301 63 training.batch_size 0.0 +301 63 training.label_smoothing 0.47087551072378864 +301 64 model.embedding_dim 0.0 +301 64 training.batch_size 0.0 +301 64 training.label_smoothing 0.15127684597330276 +301 65 model.embedding_dim 2.0 +301 65 training.batch_size 2.0 +301 65 training.label_smoothing 0.014785223586973954 +301 66 model.embedding_dim 0.0 +301 66 training.batch_size 1.0 +301 66 training.label_smoothing 0.0071008746071833315 +301 67 model.embedding_dim 1.0 +301 67 training.batch_size 0.0 +301 67 training.label_smoothing 0.08368402532418578 +301 68 model.embedding_dim 0.0 +301 68 training.batch_size 0.0 +301 68 training.label_smoothing 0.0016206971124623213 +301 69 model.embedding_dim 2.0 +301 69 training.batch_size 2.0 +301 69 training.label_smoothing 0.02184443748363431 +301 70 model.embedding_dim 2.0 +301 70 training.batch_size 1.0 +301 70 training.label_smoothing 0.041301776542631216 +301 71 model.embedding_dim 2.0 +301 71 training.batch_size 0.0 +301 71 training.label_smoothing 0.9335582000486518 +301 72 model.embedding_dim 2.0 +301 72 training.batch_size 1.0 +301 72 training.label_smoothing 0.0015871733608570618 +301 73 model.embedding_dim 0.0 +301 73 training.batch_size 0.0 +301 73 training.label_smoothing 0.008866586767827924 +301 74 model.embedding_dim 1.0 +301 74 training.batch_size 2.0 +301 74 training.label_smoothing 0.0010566842742535604 +301 75 model.embedding_dim 1.0 +301 75 training.batch_size 0.0 +301 75 training.label_smoothing 0.9164244755627872 +301 76 model.embedding_dim 1.0 +301 76 training.batch_size 1.0 +301 76 training.label_smoothing 0.5571841874408628 +301 77 model.embedding_dim 2.0 +301 77 training.batch_size 1.0 +301 77 training.label_smoothing 0.15228468490176095 +301 78 model.embedding_dim 2.0 +301 78 training.batch_size 1.0 +301 78 training.label_smoothing 0.419896668621309 +301 79 model.embedding_dim 2.0 +301 79 training.batch_size 2.0 +301 79 training.label_smoothing 0.01759680348322122 +301 80 model.embedding_dim 0.0 +301 80 training.batch_size 1.0 +301 80 training.label_smoothing 0.2946375215906229 +301 81 model.embedding_dim 1.0 +301 81 training.batch_size 0.0 +301 81 training.label_smoothing 0.0193068439661032 +301 82 model.embedding_dim 2.0 +301 82 training.batch_size 2.0 +301 82 training.label_smoothing 0.0014516288706972124 +301 83 model.embedding_dim 2.0 +301 83 training.batch_size 1.0 +301 83 training.label_smoothing 0.00604799408689625 +301 84 model.embedding_dim 2.0 +301 84 training.batch_size 1.0 +301 84 training.label_smoothing 0.009641091105161545 +301 85 model.embedding_dim 2.0 +301 85 training.batch_size 2.0 +301 85 training.label_smoothing 0.1207944561965176 +301 86 model.embedding_dim 0.0 +301 86 training.batch_size 2.0 +301 86 training.label_smoothing 0.543448099035072 +301 87 model.embedding_dim 1.0 +301 87 training.batch_size 0.0 +301 87 training.label_smoothing 0.6676088363907926 +301 88 model.embedding_dim 0.0 +301 88 training.batch_size 1.0 +301 88 training.label_smoothing 0.03793301363574288 +301 89 model.embedding_dim 1.0 +301 89 training.batch_size 2.0 +301 89 training.label_smoothing 0.0029295388971390876 +301 90 model.embedding_dim 2.0 +301 90 training.batch_size 1.0 +301 90 training.label_smoothing 0.003809121997840555 +301 91 model.embedding_dim 1.0 +301 91 training.batch_size 0.0 +301 91 training.label_smoothing 0.18102404494032384 +301 92 model.embedding_dim 2.0 +301 92 training.batch_size 0.0 +301 92 training.label_smoothing 0.2891096251130889 +301 93 model.embedding_dim 1.0 +301 93 training.batch_size 0.0 +301 93 training.label_smoothing 0.018433953417570068 +301 94 model.embedding_dim 2.0 +301 94 training.batch_size 2.0 +301 94 training.label_smoothing 0.0027197640596319417 +301 95 model.embedding_dim 1.0 +301 95 training.batch_size 0.0 +301 95 training.label_smoothing 0.6468109763313598 +301 96 model.embedding_dim 0.0 +301 96 training.batch_size 2.0 +301 96 training.label_smoothing 0.3369035563196634 +301 97 model.embedding_dim 0.0 +301 97 training.batch_size 2.0 +301 97 training.label_smoothing 0.014871618432039255 +301 98 model.embedding_dim 2.0 +301 98 training.batch_size 1.0 +301 98 training.label_smoothing 0.001702085850979639 +301 99 model.embedding_dim 0.0 +301 99 training.batch_size 2.0 +301 99 training.label_smoothing 0.04191700464278867 +301 100 model.embedding_dim 0.0 +301 100 training.batch_size 2.0 +301 100 training.label_smoothing 0.0015134760074156396 +301 1 dataset """kinships""" +301 1 model """hole""" +301 1 loss """softplus""" +301 1 regularizer """no""" +301 1 optimizer """adadelta""" +301 1 training_loop """lcwa""" +301 1 evaluator """rankbased""" +301 2 dataset """kinships""" +301 2 model """hole""" +301 2 loss """softplus""" +301 2 regularizer """no""" +301 2 optimizer """adadelta""" +301 2 training_loop """lcwa""" +301 2 evaluator """rankbased""" +301 3 dataset """kinships""" +301 3 model """hole""" +301 3 loss """softplus""" +301 3 regularizer """no""" +301 3 optimizer """adadelta""" +301 3 training_loop """lcwa""" +301 3 evaluator """rankbased""" +301 4 dataset """kinships""" +301 4 model """hole""" +301 4 loss """softplus""" +301 4 regularizer """no""" +301 4 optimizer """adadelta""" +301 4 training_loop """lcwa""" +301 4 evaluator """rankbased""" +301 5 dataset """kinships""" +301 5 model """hole""" +301 5 loss """softplus""" +301 5 regularizer """no""" +301 5 optimizer """adadelta""" +301 5 training_loop """lcwa""" +301 5 evaluator """rankbased""" +301 6 dataset """kinships""" +301 6 model """hole""" +301 6 loss """softplus""" +301 6 regularizer """no""" +301 6 optimizer """adadelta""" +301 6 training_loop """lcwa""" +301 6 evaluator """rankbased""" +301 7 dataset """kinships""" +301 7 model """hole""" +301 7 loss """softplus""" +301 7 regularizer """no""" +301 7 optimizer """adadelta""" +301 7 training_loop """lcwa""" +301 7 evaluator """rankbased""" +301 8 dataset """kinships""" +301 8 model """hole""" +301 8 loss """softplus""" +301 8 regularizer """no""" +301 8 optimizer """adadelta""" +301 8 training_loop """lcwa""" +301 8 evaluator """rankbased""" +301 9 dataset """kinships""" +301 9 model """hole""" +301 9 loss """softplus""" +301 9 regularizer """no""" +301 9 optimizer """adadelta""" +301 9 training_loop """lcwa""" +301 9 evaluator """rankbased""" +301 10 dataset """kinships""" +301 10 model """hole""" +301 10 loss """softplus""" +301 10 regularizer """no""" +301 10 optimizer """adadelta""" +301 10 training_loop """lcwa""" +301 10 evaluator """rankbased""" +301 11 dataset """kinships""" +301 11 model """hole""" +301 11 loss """softplus""" +301 11 regularizer """no""" +301 11 optimizer """adadelta""" +301 11 training_loop """lcwa""" +301 11 evaluator """rankbased""" +301 12 dataset """kinships""" +301 12 model """hole""" +301 12 loss """softplus""" +301 12 regularizer """no""" +301 12 optimizer """adadelta""" +301 12 training_loop """lcwa""" +301 12 evaluator """rankbased""" +301 13 dataset """kinships""" +301 13 model """hole""" +301 13 loss """softplus""" +301 13 regularizer """no""" +301 13 optimizer """adadelta""" +301 13 training_loop """lcwa""" +301 13 evaluator """rankbased""" +301 14 dataset """kinships""" +301 14 model """hole""" +301 14 loss """softplus""" +301 14 regularizer """no""" +301 14 optimizer """adadelta""" +301 14 training_loop """lcwa""" +301 14 evaluator """rankbased""" +301 15 dataset """kinships""" +301 15 model """hole""" +301 15 loss """softplus""" +301 15 regularizer """no""" +301 15 optimizer """adadelta""" +301 15 training_loop """lcwa""" +301 15 evaluator """rankbased""" +301 16 dataset """kinships""" +301 16 model """hole""" +301 16 loss """softplus""" +301 16 regularizer """no""" +301 16 optimizer """adadelta""" +301 16 training_loop """lcwa""" +301 16 evaluator """rankbased""" +301 17 dataset """kinships""" +301 17 model """hole""" +301 17 loss """softplus""" +301 17 regularizer """no""" +301 17 optimizer """adadelta""" +301 17 training_loop """lcwa""" +301 17 evaluator """rankbased""" +301 18 dataset """kinships""" +301 18 model """hole""" +301 18 loss """softplus""" +301 18 regularizer """no""" +301 18 optimizer """adadelta""" +301 18 training_loop """lcwa""" +301 18 evaluator """rankbased""" +301 19 dataset """kinships""" +301 19 model """hole""" +301 19 loss """softplus""" +301 19 regularizer """no""" +301 19 optimizer """adadelta""" +301 19 training_loop """lcwa""" +301 19 evaluator """rankbased""" +301 20 dataset """kinships""" +301 20 model """hole""" +301 20 loss """softplus""" +301 20 regularizer """no""" +301 20 optimizer """adadelta""" +301 20 training_loop """lcwa""" +301 20 evaluator """rankbased""" +301 21 dataset """kinships""" +301 21 model """hole""" +301 21 loss """softplus""" +301 21 regularizer """no""" +301 21 optimizer """adadelta""" +301 21 training_loop """lcwa""" +301 21 evaluator """rankbased""" +301 22 dataset """kinships""" +301 22 model """hole""" +301 22 loss """softplus""" +301 22 regularizer """no""" +301 22 optimizer """adadelta""" +301 22 training_loop """lcwa""" +301 22 evaluator """rankbased""" +301 23 dataset """kinships""" +301 23 model """hole""" +301 23 loss """softplus""" +301 23 regularizer """no""" +301 23 optimizer """adadelta""" +301 23 training_loop """lcwa""" +301 23 evaluator """rankbased""" +301 24 dataset """kinships""" +301 24 model """hole""" +301 24 loss """softplus""" +301 24 regularizer """no""" +301 24 optimizer """adadelta""" +301 24 training_loop """lcwa""" +301 24 evaluator """rankbased""" +301 25 dataset """kinships""" +301 25 model """hole""" +301 25 loss """softplus""" +301 25 regularizer """no""" +301 25 optimizer """adadelta""" +301 25 training_loop """lcwa""" +301 25 evaluator """rankbased""" +301 26 dataset """kinships""" +301 26 model """hole""" +301 26 loss """softplus""" +301 26 regularizer """no""" +301 26 optimizer """adadelta""" +301 26 training_loop """lcwa""" +301 26 evaluator """rankbased""" +301 27 dataset """kinships""" +301 27 model """hole""" +301 27 loss """softplus""" +301 27 regularizer """no""" +301 27 optimizer """adadelta""" +301 27 training_loop """lcwa""" +301 27 evaluator """rankbased""" +301 28 dataset """kinships""" +301 28 model """hole""" +301 28 loss """softplus""" +301 28 regularizer """no""" +301 28 optimizer """adadelta""" +301 28 training_loop """lcwa""" +301 28 evaluator """rankbased""" +301 29 dataset """kinships""" +301 29 model """hole""" +301 29 loss """softplus""" +301 29 regularizer """no""" +301 29 optimizer """adadelta""" +301 29 training_loop """lcwa""" +301 29 evaluator """rankbased""" +301 30 dataset """kinships""" +301 30 model """hole""" +301 30 loss """softplus""" +301 30 regularizer """no""" +301 30 optimizer """adadelta""" +301 30 training_loop """lcwa""" +301 30 evaluator """rankbased""" +301 31 dataset """kinships""" +301 31 model """hole""" +301 31 loss """softplus""" +301 31 regularizer """no""" +301 31 optimizer """adadelta""" +301 31 training_loop """lcwa""" +301 31 evaluator """rankbased""" +301 32 dataset """kinships""" +301 32 model """hole""" +301 32 loss """softplus""" +301 32 regularizer """no""" +301 32 optimizer """adadelta""" +301 32 training_loop """lcwa""" +301 32 evaluator """rankbased""" +301 33 dataset """kinships""" +301 33 model """hole""" +301 33 loss """softplus""" +301 33 regularizer """no""" +301 33 optimizer """adadelta""" +301 33 training_loop """lcwa""" +301 33 evaluator """rankbased""" +301 34 dataset """kinships""" +301 34 model """hole""" +301 34 loss """softplus""" +301 34 regularizer """no""" +301 34 optimizer """adadelta""" +301 34 training_loop """lcwa""" +301 34 evaluator """rankbased""" +301 35 dataset """kinships""" +301 35 model """hole""" +301 35 loss """softplus""" +301 35 regularizer """no""" +301 35 optimizer """adadelta""" +301 35 training_loop """lcwa""" +301 35 evaluator """rankbased""" +301 36 dataset """kinships""" +301 36 model """hole""" +301 36 loss """softplus""" +301 36 regularizer """no""" +301 36 optimizer """adadelta""" +301 36 training_loop """lcwa""" +301 36 evaluator """rankbased""" +301 37 dataset """kinships""" +301 37 model """hole""" +301 37 loss """softplus""" +301 37 regularizer """no""" +301 37 optimizer """adadelta""" +301 37 training_loop """lcwa""" +301 37 evaluator """rankbased""" +301 38 dataset """kinships""" +301 38 model """hole""" +301 38 loss """softplus""" +301 38 regularizer """no""" +301 38 optimizer """adadelta""" +301 38 training_loop """lcwa""" +301 38 evaluator """rankbased""" +301 39 dataset """kinships""" +301 39 model """hole""" +301 39 loss """softplus""" +301 39 regularizer """no""" +301 39 optimizer """adadelta""" +301 39 training_loop """lcwa""" +301 39 evaluator """rankbased""" +301 40 dataset """kinships""" +301 40 model """hole""" +301 40 loss """softplus""" +301 40 regularizer """no""" +301 40 optimizer """adadelta""" +301 40 training_loop """lcwa""" +301 40 evaluator """rankbased""" +301 41 dataset """kinships""" +301 41 model """hole""" +301 41 loss """softplus""" +301 41 regularizer """no""" +301 41 optimizer """adadelta""" +301 41 training_loop """lcwa""" +301 41 evaluator """rankbased""" +301 42 dataset """kinships""" +301 42 model """hole""" +301 42 loss """softplus""" +301 42 regularizer """no""" +301 42 optimizer """adadelta""" +301 42 training_loop """lcwa""" +301 42 evaluator """rankbased""" +301 43 dataset """kinships""" +301 43 model """hole""" +301 43 loss """softplus""" +301 43 regularizer """no""" +301 43 optimizer """adadelta""" +301 43 training_loop """lcwa""" +301 43 evaluator """rankbased""" +301 44 dataset """kinships""" +301 44 model """hole""" +301 44 loss """softplus""" +301 44 regularizer """no""" +301 44 optimizer """adadelta""" +301 44 training_loop """lcwa""" +301 44 evaluator """rankbased""" +301 45 dataset """kinships""" +301 45 model """hole""" +301 45 loss """softplus""" +301 45 regularizer """no""" +301 45 optimizer """adadelta""" +301 45 training_loop """lcwa""" +301 45 evaluator """rankbased""" +301 46 dataset """kinships""" +301 46 model """hole""" +301 46 loss """softplus""" +301 46 regularizer """no""" +301 46 optimizer """adadelta""" +301 46 training_loop """lcwa""" +301 46 evaluator """rankbased""" +301 47 dataset """kinships""" +301 47 model """hole""" +301 47 loss """softplus""" +301 47 regularizer """no""" +301 47 optimizer """adadelta""" +301 47 training_loop """lcwa""" +301 47 evaluator """rankbased""" +301 48 dataset """kinships""" +301 48 model """hole""" +301 48 loss """softplus""" +301 48 regularizer """no""" +301 48 optimizer """adadelta""" +301 48 training_loop """lcwa""" +301 48 evaluator """rankbased""" +301 49 dataset """kinships""" +301 49 model """hole""" +301 49 loss """softplus""" +301 49 regularizer """no""" +301 49 optimizer """adadelta""" +301 49 training_loop """lcwa""" +301 49 evaluator """rankbased""" +301 50 dataset """kinships""" +301 50 model """hole""" +301 50 loss """softplus""" +301 50 regularizer """no""" +301 50 optimizer """adadelta""" +301 50 training_loop """lcwa""" +301 50 evaluator """rankbased""" +301 51 dataset """kinships""" +301 51 model """hole""" +301 51 loss """softplus""" +301 51 regularizer """no""" +301 51 optimizer """adadelta""" +301 51 training_loop """lcwa""" +301 51 evaluator """rankbased""" +301 52 dataset """kinships""" +301 52 model """hole""" +301 52 loss """softplus""" +301 52 regularizer """no""" +301 52 optimizer """adadelta""" +301 52 training_loop """lcwa""" +301 52 evaluator """rankbased""" +301 53 dataset """kinships""" +301 53 model """hole""" +301 53 loss """softplus""" +301 53 regularizer """no""" +301 53 optimizer """adadelta""" +301 53 training_loop """lcwa""" +301 53 evaluator """rankbased""" +301 54 dataset """kinships""" +301 54 model """hole""" +301 54 loss """softplus""" +301 54 regularizer """no""" +301 54 optimizer """adadelta""" +301 54 training_loop """lcwa""" +301 54 evaluator """rankbased""" +301 55 dataset """kinships""" +301 55 model """hole""" +301 55 loss """softplus""" +301 55 regularizer """no""" +301 55 optimizer """adadelta""" +301 55 training_loop """lcwa""" +301 55 evaluator """rankbased""" +301 56 dataset """kinships""" +301 56 model """hole""" +301 56 loss """softplus""" +301 56 regularizer """no""" +301 56 optimizer """adadelta""" +301 56 training_loop """lcwa""" +301 56 evaluator """rankbased""" +301 57 dataset """kinships""" +301 57 model """hole""" +301 57 loss """softplus""" +301 57 regularizer """no""" +301 57 optimizer """adadelta""" +301 57 training_loop """lcwa""" +301 57 evaluator """rankbased""" +301 58 dataset """kinships""" +301 58 model """hole""" +301 58 loss """softplus""" +301 58 regularizer """no""" +301 58 optimizer """adadelta""" +301 58 training_loop """lcwa""" +301 58 evaluator """rankbased""" +301 59 dataset """kinships""" +301 59 model """hole""" +301 59 loss """softplus""" +301 59 regularizer """no""" +301 59 optimizer """adadelta""" +301 59 training_loop """lcwa""" +301 59 evaluator """rankbased""" +301 60 dataset """kinships""" +301 60 model """hole""" +301 60 loss """softplus""" +301 60 regularizer """no""" +301 60 optimizer """adadelta""" +301 60 training_loop """lcwa""" +301 60 evaluator """rankbased""" +301 61 dataset """kinships""" +301 61 model """hole""" +301 61 loss """softplus""" +301 61 regularizer """no""" +301 61 optimizer """adadelta""" +301 61 training_loop """lcwa""" +301 61 evaluator """rankbased""" +301 62 dataset """kinships""" +301 62 model """hole""" +301 62 loss """softplus""" +301 62 regularizer """no""" +301 62 optimizer """adadelta""" +301 62 training_loop """lcwa""" +301 62 evaluator """rankbased""" +301 63 dataset """kinships""" +301 63 model """hole""" +301 63 loss """softplus""" +301 63 regularizer """no""" +301 63 optimizer """adadelta""" +301 63 training_loop """lcwa""" +301 63 evaluator """rankbased""" +301 64 dataset """kinships""" +301 64 model """hole""" +301 64 loss """softplus""" +301 64 regularizer """no""" +301 64 optimizer """adadelta""" +301 64 training_loop """lcwa""" +301 64 evaluator """rankbased""" +301 65 dataset """kinships""" +301 65 model """hole""" +301 65 loss """softplus""" +301 65 regularizer """no""" +301 65 optimizer """adadelta""" +301 65 training_loop """lcwa""" +301 65 evaluator """rankbased""" +301 66 dataset """kinships""" +301 66 model """hole""" +301 66 loss """softplus""" +301 66 regularizer """no""" +301 66 optimizer """adadelta""" +301 66 training_loop """lcwa""" +301 66 evaluator """rankbased""" +301 67 dataset """kinships""" +301 67 model """hole""" +301 67 loss """softplus""" +301 67 regularizer """no""" +301 67 optimizer """adadelta""" +301 67 training_loop """lcwa""" +301 67 evaluator """rankbased""" +301 68 dataset """kinships""" +301 68 model """hole""" +301 68 loss """softplus""" +301 68 regularizer """no""" +301 68 optimizer """adadelta""" +301 68 training_loop """lcwa""" +301 68 evaluator """rankbased""" +301 69 dataset """kinships""" +301 69 model """hole""" +301 69 loss """softplus""" +301 69 regularizer """no""" +301 69 optimizer """adadelta""" +301 69 training_loop """lcwa""" +301 69 evaluator """rankbased""" +301 70 dataset """kinships""" +301 70 model """hole""" +301 70 loss """softplus""" +301 70 regularizer """no""" +301 70 optimizer """adadelta""" +301 70 training_loop """lcwa""" +301 70 evaluator """rankbased""" +301 71 dataset """kinships""" +301 71 model """hole""" +301 71 loss """softplus""" +301 71 regularizer """no""" +301 71 optimizer """adadelta""" +301 71 training_loop """lcwa""" +301 71 evaluator """rankbased""" +301 72 dataset """kinships""" +301 72 model """hole""" +301 72 loss """softplus""" +301 72 regularizer """no""" +301 72 optimizer """adadelta""" +301 72 training_loop """lcwa""" +301 72 evaluator """rankbased""" +301 73 dataset """kinships""" +301 73 model """hole""" +301 73 loss """softplus""" +301 73 regularizer """no""" +301 73 optimizer """adadelta""" +301 73 training_loop """lcwa""" +301 73 evaluator """rankbased""" +301 74 dataset """kinships""" +301 74 model """hole""" +301 74 loss """softplus""" +301 74 regularizer """no""" +301 74 optimizer """adadelta""" +301 74 training_loop """lcwa""" +301 74 evaluator """rankbased""" +301 75 dataset """kinships""" +301 75 model """hole""" +301 75 loss """softplus""" +301 75 regularizer """no""" +301 75 optimizer """adadelta""" +301 75 training_loop """lcwa""" +301 75 evaluator """rankbased""" +301 76 dataset """kinships""" +301 76 model """hole""" +301 76 loss """softplus""" +301 76 regularizer """no""" +301 76 optimizer """adadelta""" +301 76 training_loop """lcwa""" +301 76 evaluator """rankbased""" +301 77 dataset """kinships""" +301 77 model """hole""" +301 77 loss """softplus""" +301 77 regularizer """no""" +301 77 optimizer """adadelta""" +301 77 training_loop """lcwa""" +301 77 evaluator """rankbased""" +301 78 dataset """kinships""" +301 78 model """hole""" +301 78 loss """softplus""" +301 78 regularizer """no""" +301 78 optimizer """adadelta""" +301 78 training_loop """lcwa""" +301 78 evaluator """rankbased""" +301 79 dataset """kinships""" +301 79 model """hole""" +301 79 loss """softplus""" +301 79 regularizer """no""" +301 79 optimizer """adadelta""" +301 79 training_loop """lcwa""" +301 79 evaluator """rankbased""" +301 80 dataset """kinships""" +301 80 model """hole""" +301 80 loss """softplus""" +301 80 regularizer """no""" +301 80 optimizer """adadelta""" +301 80 training_loop """lcwa""" +301 80 evaluator """rankbased""" +301 81 dataset """kinships""" +301 81 model """hole""" +301 81 loss """softplus""" +301 81 regularizer """no""" +301 81 optimizer """adadelta""" +301 81 training_loop """lcwa""" +301 81 evaluator """rankbased""" +301 82 dataset """kinships""" +301 82 model """hole""" +301 82 loss """softplus""" +301 82 regularizer """no""" +301 82 optimizer """adadelta""" +301 82 training_loop """lcwa""" +301 82 evaluator """rankbased""" +301 83 dataset """kinships""" +301 83 model """hole""" +301 83 loss """softplus""" +301 83 regularizer """no""" +301 83 optimizer """adadelta""" +301 83 training_loop """lcwa""" +301 83 evaluator """rankbased""" +301 84 dataset """kinships""" +301 84 model """hole""" +301 84 loss """softplus""" +301 84 regularizer """no""" +301 84 optimizer """adadelta""" +301 84 training_loop """lcwa""" +301 84 evaluator """rankbased""" +301 85 dataset """kinships""" +301 85 model """hole""" +301 85 loss """softplus""" +301 85 regularizer """no""" +301 85 optimizer """adadelta""" +301 85 training_loop """lcwa""" +301 85 evaluator """rankbased""" +301 86 dataset """kinships""" +301 86 model """hole""" +301 86 loss """softplus""" +301 86 regularizer """no""" +301 86 optimizer """adadelta""" +301 86 training_loop """lcwa""" +301 86 evaluator """rankbased""" +301 87 dataset """kinships""" +301 87 model """hole""" +301 87 loss """softplus""" +301 87 regularizer """no""" +301 87 optimizer """adadelta""" +301 87 training_loop """lcwa""" +301 87 evaluator """rankbased""" +301 88 dataset """kinships""" +301 88 model """hole""" +301 88 loss """softplus""" +301 88 regularizer """no""" +301 88 optimizer """adadelta""" +301 88 training_loop """lcwa""" +301 88 evaluator """rankbased""" +301 89 dataset """kinships""" +301 89 model """hole""" +301 89 loss """softplus""" +301 89 regularizer """no""" +301 89 optimizer """adadelta""" +301 89 training_loop """lcwa""" +301 89 evaluator """rankbased""" +301 90 dataset """kinships""" +301 90 model """hole""" +301 90 loss """softplus""" +301 90 regularizer """no""" +301 90 optimizer """adadelta""" +301 90 training_loop """lcwa""" +301 90 evaluator """rankbased""" +301 91 dataset """kinships""" +301 91 model """hole""" +301 91 loss """softplus""" +301 91 regularizer """no""" +301 91 optimizer """adadelta""" +301 91 training_loop """lcwa""" +301 91 evaluator """rankbased""" +301 92 dataset """kinships""" +301 92 model """hole""" +301 92 loss """softplus""" +301 92 regularizer """no""" +301 92 optimizer """adadelta""" +301 92 training_loop """lcwa""" +301 92 evaluator """rankbased""" +301 93 dataset """kinships""" +301 93 model """hole""" +301 93 loss """softplus""" +301 93 regularizer """no""" +301 93 optimizer """adadelta""" +301 93 training_loop """lcwa""" +301 93 evaluator """rankbased""" +301 94 dataset """kinships""" +301 94 model """hole""" +301 94 loss """softplus""" +301 94 regularizer """no""" +301 94 optimizer """adadelta""" +301 94 training_loop """lcwa""" +301 94 evaluator """rankbased""" +301 95 dataset """kinships""" +301 95 model """hole""" +301 95 loss """softplus""" +301 95 regularizer """no""" +301 95 optimizer """adadelta""" +301 95 training_loop """lcwa""" +301 95 evaluator """rankbased""" +301 96 dataset """kinships""" +301 96 model """hole""" +301 96 loss """softplus""" +301 96 regularizer """no""" +301 96 optimizer """adadelta""" +301 96 training_loop """lcwa""" +301 96 evaluator """rankbased""" +301 97 dataset """kinships""" +301 97 model """hole""" +301 97 loss """softplus""" +301 97 regularizer """no""" +301 97 optimizer """adadelta""" +301 97 training_loop """lcwa""" +301 97 evaluator """rankbased""" +301 98 dataset """kinships""" +301 98 model """hole""" +301 98 loss """softplus""" +301 98 regularizer """no""" +301 98 optimizer """adadelta""" +301 98 training_loop """lcwa""" +301 98 evaluator """rankbased""" +301 99 dataset """kinships""" +301 99 model """hole""" +301 99 loss """softplus""" +301 99 regularizer """no""" +301 99 optimizer """adadelta""" +301 99 training_loop """lcwa""" +301 99 evaluator """rankbased""" +301 100 dataset """kinships""" +301 100 model """hole""" +301 100 loss """softplus""" +301 100 regularizer """no""" +301 100 optimizer """adadelta""" +301 100 training_loop """lcwa""" +301 100 evaluator """rankbased""" +302 1 model.embedding_dim 2.0 +302 1 training.batch_size 2.0 +302 1 training.label_smoothing 0.29117908737050624 +302 2 model.embedding_dim 0.0 +302 2 training.batch_size 0.0 +302 2 training.label_smoothing 0.0023769693277758347 +302 3 model.embedding_dim 0.0 +302 3 training.batch_size 2.0 +302 3 training.label_smoothing 0.4189541652787693 +302 4 model.embedding_dim 2.0 +302 4 training.batch_size 1.0 +302 4 training.label_smoothing 0.0012385812832952634 +302 5 model.embedding_dim 1.0 +302 5 training.batch_size 2.0 +302 5 training.label_smoothing 0.030299256599851505 +302 6 model.embedding_dim 2.0 +302 6 training.batch_size 1.0 +302 6 training.label_smoothing 0.0011965859491226529 +302 7 model.embedding_dim 2.0 +302 7 training.batch_size 2.0 +302 7 training.label_smoothing 0.0014776031034729085 +302 8 model.embedding_dim 0.0 +302 8 training.batch_size 2.0 +302 8 training.label_smoothing 0.014113240265658189 +302 9 model.embedding_dim 0.0 +302 9 training.batch_size 1.0 +302 9 training.label_smoothing 0.016579073868762992 +302 10 model.embedding_dim 1.0 +302 10 training.batch_size 2.0 +302 10 training.label_smoothing 0.08491073006262091 +302 11 model.embedding_dim 2.0 +302 11 training.batch_size 1.0 +302 11 training.label_smoothing 0.0025194152004904863 +302 12 model.embedding_dim 2.0 +302 12 training.batch_size 0.0 +302 12 training.label_smoothing 0.2575572909385943 +302 13 model.embedding_dim 2.0 +302 13 training.batch_size 0.0 +302 13 training.label_smoothing 0.004799514867447024 +302 14 model.embedding_dim 2.0 +302 14 training.batch_size 2.0 +302 14 training.label_smoothing 0.7109267717185038 +302 15 model.embedding_dim 1.0 +302 15 training.batch_size 0.0 +302 15 training.label_smoothing 0.7662481718595066 +302 16 model.embedding_dim 0.0 +302 16 training.batch_size 1.0 +302 16 training.label_smoothing 0.04437616863214666 +302 17 model.embedding_dim 1.0 +302 17 training.batch_size 0.0 +302 17 training.label_smoothing 0.02676326605520772 +302 18 model.embedding_dim 2.0 +302 18 training.batch_size 2.0 +302 18 training.label_smoothing 0.014441108436636417 +302 19 model.embedding_dim 2.0 +302 19 training.batch_size 1.0 +302 19 training.label_smoothing 0.0037876980953076627 +302 20 model.embedding_dim 2.0 +302 20 training.batch_size 0.0 +302 20 training.label_smoothing 0.12413106574578532 +302 21 model.embedding_dim 2.0 +302 21 training.batch_size 1.0 +302 21 training.label_smoothing 0.07418249921048357 +302 22 model.embedding_dim 1.0 +302 22 training.batch_size 1.0 +302 22 training.label_smoothing 0.0016384593573980696 +302 23 model.embedding_dim 0.0 +302 23 training.batch_size 2.0 +302 23 training.label_smoothing 0.058090756524218246 +302 24 model.embedding_dim 2.0 +302 24 training.batch_size 2.0 +302 24 training.label_smoothing 0.1916877693259822 +302 25 model.embedding_dim 1.0 +302 25 training.batch_size 1.0 +302 25 training.label_smoothing 0.22075956051772846 +302 26 model.embedding_dim 1.0 +302 26 training.batch_size 2.0 +302 26 training.label_smoothing 0.0026790406446228917 +302 27 model.embedding_dim 1.0 +302 27 training.batch_size 2.0 +302 27 training.label_smoothing 0.02515262037716162 +302 28 model.embedding_dim 2.0 +302 28 training.batch_size 0.0 +302 28 training.label_smoothing 0.858099231631176 +302 29 model.embedding_dim 1.0 +302 29 training.batch_size 2.0 +302 29 training.label_smoothing 0.009030713390539511 +302 30 model.embedding_dim 2.0 +302 30 training.batch_size 1.0 +302 30 training.label_smoothing 0.0012997038552765513 +302 31 model.embedding_dim 0.0 +302 31 training.batch_size 0.0 +302 31 training.label_smoothing 0.3137956557265673 +302 32 model.embedding_dim 1.0 +302 32 training.batch_size 1.0 +302 32 training.label_smoothing 0.28590988275047113 +302 33 model.embedding_dim 1.0 +302 33 training.batch_size 0.0 +302 33 training.label_smoothing 0.8443908011942153 +302 34 model.embedding_dim 0.0 +302 34 training.batch_size 1.0 +302 34 training.label_smoothing 0.0011919181542839896 +302 35 model.embedding_dim 2.0 +302 35 training.batch_size 2.0 +302 35 training.label_smoothing 0.03315938858020783 +302 36 model.embedding_dim 2.0 +302 36 training.batch_size 0.0 +302 36 training.label_smoothing 0.015650170633484516 +302 37 model.embedding_dim 1.0 +302 37 training.batch_size 0.0 +302 37 training.label_smoothing 0.004518971410550907 +302 38 model.embedding_dim 1.0 +302 38 training.batch_size 2.0 +302 38 training.label_smoothing 0.10914102056984043 +302 39 model.embedding_dim 1.0 +302 39 training.batch_size 1.0 +302 39 training.label_smoothing 0.11000217307950517 +302 40 model.embedding_dim 2.0 +302 40 training.batch_size 0.0 +302 40 training.label_smoothing 0.0026550601464359995 +302 41 model.embedding_dim 0.0 +302 41 training.batch_size 2.0 +302 41 training.label_smoothing 0.10060205010769138 +302 42 model.embedding_dim 0.0 +302 42 training.batch_size 2.0 +302 42 training.label_smoothing 0.013042040476233065 +302 43 model.embedding_dim 1.0 +302 43 training.batch_size 1.0 +302 43 training.label_smoothing 0.05057224448061347 +302 44 model.embedding_dim 2.0 +302 44 training.batch_size 0.0 +302 44 training.label_smoothing 0.3198062361665394 +302 45 model.embedding_dim 0.0 +302 45 training.batch_size 1.0 +302 45 training.label_smoothing 0.0033419798182455756 +302 46 model.embedding_dim 1.0 +302 46 training.batch_size 1.0 +302 46 training.label_smoothing 0.07597724013464917 +302 47 model.embedding_dim 2.0 +302 47 training.batch_size 2.0 +302 47 training.label_smoothing 0.017044000965893702 +302 48 model.embedding_dim 2.0 +302 48 training.batch_size 2.0 +302 48 training.label_smoothing 0.0019570420852360933 +302 49 model.embedding_dim 0.0 +302 49 training.batch_size 0.0 +302 49 training.label_smoothing 0.14771505915836342 +302 50 model.embedding_dim 2.0 +302 50 training.batch_size 2.0 +302 50 training.label_smoothing 0.014369987555180812 +302 51 model.embedding_dim 2.0 +302 51 training.batch_size 1.0 +302 51 training.label_smoothing 0.12341613171888487 +302 52 model.embedding_dim 0.0 +302 52 training.batch_size 1.0 +302 52 training.label_smoothing 0.16836568745841873 +302 53 model.embedding_dim 2.0 +302 53 training.batch_size 0.0 +302 53 training.label_smoothing 0.9206156366493478 +302 54 model.embedding_dim 0.0 +302 54 training.batch_size 1.0 +302 54 training.label_smoothing 0.9360173851824526 +302 55 model.embedding_dim 2.0 +302 55 training.batch_size 0.0 +302 55 training.label_smoothing 0.03114496263500117 +302 56 model.embedding_dim 1.0 +302 56 training.batch_size 0.0 +302 56 training.label_smoothing 0.0036166670885439795 +302 57 model.embedding_dim 1.0 +302 57 training.batch_size 1.0 +302 57 training.label_smoothing 0.10411665710652725 +302 58 model.embedding_dim 0.0 +302 58 training.batch_size 0.0 +302 58 training.label_smoothing 0.00814165007761227 +302 59 model.embedding_dim 2.0 +302 59 training.batch_size 1.0 +302 59 training.label_smoothing 0.038811097215233924 +302 60 model.embedding_dim 1.0 +302 60 training.batch_size 0.0 +302 60 training.label_smoothing 0.0024145284756936997 +302 61 model.embedding_dim 0.0 +302 61 training.batch_size 2.0 +302 61 training.label_smoothing 0.10785527917884288 +302 62 model.embedding_dim 0.0 +302 62 training.batch_size 2.0 +302 62 training.label_smoothing 0.01994251717517755 +302 63 model.embedding_dim 1.0 +302 63 training.batch_size 1.0 +302 63 training.label_smoothing 0.006414244852350843 +302 64 model.embedding_dim 1.0 +302 64 training.batch_size 2.0 +302 64 training.label_smoothing 0.03157863650243077 +302 65 model.embedding_dim 0.0 +302 65 training.batch_size 1.0 +302 65 training.label_smoothing 0.07240450533800981 +302 66 model.embedding_dim 2.0 +302 66 training.batch_size 1.0 +302 66 training.label_smoothing 0.03131440803771735 +302 67 model.embedding_dim 0.0 +302 67 training.batch_size 1.0 +302 67 training.label_smoothing 0.0019104697924750415 +302 68 model.embedding_dim 2.0 +302 68 training.batch_size 0.0 +302 68 training.label_smoothing 0.002523254638181779 +302 69 model.embedding_dim 2.0 +302 69 training.batch_size 1.0 +302 69 training.label_smoothing 0.0021662109864489937 +302 70 model.embedding_dim 2.0 +302 70 training.batch_size 0.0 +302 70 training.label_smoothing 0.11351800397547263 +302 71 model.embedding_dim 0.0 +302 71 training.batch_size 0.0 +302 71 training.label_smoothing 0.03297873566321238 +302 72 model.embedding_dim 0.0 +302 72 training.batch_size 2.0 +302 72 training.label_smoothing 0.017316580619418633 +302 73 model.embedding_dim 2.0 +302 73 training.batch_size 2.0 +302 73 training.label_smoothing 0.5379531724918526 +302 74 model.embedding_dim 1.0 +302 74 training.batch_size 0.0 +302 74 training.label_smoothing 0.0012921426850110865 +302 75 model.embedding_dim 0.0 +302 75 training.batch_size 0.0 +302 75 training.label_smoothing 0.20846241151675787 +302 76 model.embedding_dim 2.0 +302 76 training.batch_size 1.0 +302 76 training.label_smoothing 0.09346409189148612 +302 77 model.embedding_dim 1.0 +302 77 training.batch_size 0.0 +302 77 training.label_smoothing 0.04394852887098939 +302 78 model.embedding_dim 1.0 +302 78 training.batch_size 2.0 +302 78 training.label_smoothing 0.11272647652811447 +302 79 model.embedding_dim 1.0 +302 79 training.batch_size 1.0 +302 79 training.label_smoothing 0.006610155544140367 +302 80 model.embedding_dim 0.0 +302 80 training.batch_size 2.0 +302 80 training.label_smoothing 0.0023320120640376958 +302 81 model.embedding_dim 0.0 +302 81 training.batch_size 0.0 +302 81 training.label_smoothing 0.001003093080941554 +302 82 model.embedding_dim 2.0 +302 82 training.batch_size 0.0 +302 82 training.label_smoothing 0.2166572352130486 +302 83 model.embedding_dim 1.0 +302 83 training.batch_size 1.0 +302 83 training.label_smoothing 0.667462855991489 +302 84 model.embedding_dim 2.0 +302 84 training.batch_size 0.0 +302 84 training.label_smoothing 0.012727481778999981 +302 85 model.embedding_dim 2.0 +302 85 training.batch_size 2.0 +302 85 training.label_smoothing 0.2900711460404399 +302 86 model.embedding_dim 0.0 +302 86 training.batch_size 2.0 +302 86 training.label_smoothing 0.004892180456079493 +302 87 model.embedding_dim 2.0 +302 87 training.batch_size 1.0 +302 87 training.label_smoothing 0.0025238837900313075 +302 88 model.embedding_dim 2.0 +302 88 training.batch_size 0.0 +302 88 training.label_smoothing 0.012382180385690882 +302 89 model.embedding_dim 2.0 +302 89 training.batch_size 0.0 +302 89 training.label_smoothing 0.0024156712307212692 +302 90 model.embedding_dim 2.0 +302 90 training.batch_size 0.0 +302 90 training.label_smoothing 0.022159067572692447 +302 91 model.embedding_dim 2.0 +302 91 training.batch_size 2.0 +302 91 training.label_smoothing 0.002341662561419866 +302 92 model.embedding_dim 2.0 +302 92 training.batch_size 1.0 +302 92 training.label_smoothing 0.777989271867802 +302 93 model.embedding_dim 1.0 +302 93 training.batch_size 2.0 +302 93 training.label_smoothing 0.01872765843899807 +302 94 model.embedding_dim 0.0 +302 94 training.batch_size 1.0 +302 94 training.label_smoothing 0.5432818742492348 +302 95 model.embedding_dim 2.0 +302 95 training.batch_size 1.0 +302 95 training.label_smoothing 0.2676807716680466 +302 96 model.embedding_dim 0.0 +302 96 training.batch_size 0.0 +302 96 training.label_smoothing 0.9388435853168853 +302 97 model.embedding_dim 0.0 +302 97 training.batch_size 2.0 +302 97 training.label_smoothing 0.0010095103234015307 +302 98 model.embedding_dim 1.0 +302 98 training.batch_size 1.0 +302 98 training.label_smoothing 0.002291107200609623 +302 99 model.embedding_dim 1.0 +302 99 training.batch_size 0.0 +302 99 training.label_smoothing 0.19901565142846595 +302 100 model.embedding_dim 2.0 +302 100 training.batch_size 2.0 +302 100 training.label_smoothing 0.015849241170300298 +302 1 dataset """kinships""" +302 1 model """hole""" +302 1 loss """crossentropy""" +302 1 regularizer """no""" +302 1 optimizer """adadelta""" +302 1 training_loop """lcwa""" +302 1 evaluator """rankbased""" +302 2 dataset """kinships""" +302 2 model """hole""" +302 2 loss """crossentropy""" +302 2 regularizer """no""" +302 2 optimizer """adadelta""" +302 2 training_loop """lcwa""" +302 2 evaluator """rankbased""" +302 3 dataset """kinships""" +302 3 model """hole""" +302 3 loss """crossentropy""" +302 3 regularizer """no""" +302 3 optimizer """adadelta""" +302 3 training_loop """lcwa""" +302 3 evaluator """rankbased""" +302 4 dataset """kinships""" +302 4 model """hole""" +302 4 loss """crossentropy""" +302 4 regularizer """no""" +302 4 optimizer """adadelta""" +302 4 training_loop """lcwa""" +302 4 evaluator """rankbased""" +302 5 dataset """kinships""" +302 5 model """hole""" +302 5 loss """crossentropy""" +302 5 regularizer """no""" +302 5 optimizer """adadelta""" +302 5 training_loop """lcwa""" +302 5 evaluator """rankbased""" +302 6 dataset """kinships""" +302 6 model """hole""" +302 6 loss """crossentropy""" +302 6 regularizer """no""" +302 6 optimizer """adadelta""" +302 6 training_loop """lcwa""" +302 6 evaluator """rankbased""" +302 7 dataset """kinships""" +302 7 model """hole""" +302 7 loss """crossentropy""" +302 7 regularizer """no""" +302 7 optimizer """adadelta""" +302 7 training_loop """lcwa""" +302 7 evaluator """rankbased""" +302 8 dataset """kinships""" +302 8 model """hole""" +302 8 loss """crossentropy""" +302 8 regularizer """no""" +302 8 optimizer """adadelta""" +302 8 training_loop """lcwa""" +302 8 evaluator """rankbased""" +302 9 dataset """kinships""" +302 9 model """hole""" +302 9 loss """crossentropy""" +302 9 regularizer """no""" +302 9 optimizer """adadelta""" +302 9 training_loop """lcwa""" +302 9 evaluator """rankbased""" +302 10 dataset """kinships""" +302 10 model """hole""" +302 10 loss """crossentropy""" +302 10 regularizer """no""" +302 10 optimizer """adadelta""" +302 10 training_loop """lcwa""" +302 10 evaluator """rankbased""" +302 11 dataset """kinships""" +302 11 model """hole""" +302 11 loss """crossentropy""" +302 11 regularizer """no""" +302 11 optimizer """adadelta""" +302 11 training_loop """lcwa""" +302 11 evaluator """rankbased""" +302 12 dataset """kinships""" +302 12 model """hole""" +302 12 loss """crossentropy""" +302 12 regularizer """no""" +302 12 optimizer """adadelta""" +302 12 training_loop """lcwa""" +302 12 evaluator """rankbased""" +302 13 dataset """kinships""" +302 13 model """hole""" +302 13 loss """crossentropy""" +302 13 regularizer """no""" +302 13 optimizer """adadelta""" +302 13 training_loop """lcwa""" +302 13 evaluator """rankbased""" +302 14 dataset """kinships""" +302 14 model """hole""" +302 14 loss """crossentropy""" +302 14 regularizer """no""" +302 14 optimizer """adadelta""" +302 14 training_loop """lcwa""" +302 14 evaluator """rankbased""" +302 15 dataset """kinships""" +302 15 model """hole""" +302 15 loss """crossentropy""" +302 15 regularizer """no""" +302 15 optimizer """adadelta""" +302 15 training_loop """lcwa""" +302 15 evaluator """rankbased""" +302 16 dataset """kinships""" +302 16 model """hole""" +302 16 loss """crossentropy""" +302 16 regularizer """no""" +302 16 optimizer """adadelta""" +302 16 training_loop """lcwa""" +302 16 evaluator """rankbased""" +302 17 dataset """kinships""" +302 17 model """hole""" +302 17 loss """crossentropy""" +302 17 regularizer """no""" +302 17 optimizer """adadelta""" +302 17 training_loop """lcwa""" +302 17 evaluator """rankbased""" +302 18 dataset """kinships""" +302 18 model """hole""" +302 18 loss """crossentropy""" +302 18 regularizer """no""" +302 18 optimizer """adadelta""" +302 18 training_loop """lcwa""" +302 18 evaluator """rankbased""" +302 19 dataset """kinships""" +302 19 model """hole""" +302 19 loss """crossentropy""" +302 19 regularizer """no""" +302 19 optimizer """adadelta""" +302 19 training_loop """lcwa""" +302 19 evaluator """rankbased""" +302 20 dataset """kinships""" +302 20 model """hole""" +302 20 loss """crossentropy""" +302 20 regularizer """no""" +302 20 optimizer """adadelta""" +302 20 training_loop """lcwa""" +302 20 evaluator """rankbased""" +302 21 dataset """kinships""" +302 21 model """hole""" +302 21 loss """crossentropy""" +302 21 regularizer """no""" +302 21 optimizer """adadelta""" +302 21 training_loop """lcwa""" +302 21 evaluator """rankbased""" +302 22 dataset """kinships""" +302 22 model """hole""" +302 22 loss """crossentropy""" +302 22 regularizer """no""" +302 22 optimizer """adadelta""" +302 22 training_loop """lcwa""" +302 22 evaluator """rankbased""" +302 23 dataset """kinships""" +302 23 model """hole""" +302 23 loss """crossentropy""" +302 23 regularizer """no""" +302 23 optimizer """adadelta""" +302 23 training_loop """lcwa""" +302 23 evaluator """rankbased""" +302 24 dataset """kinships""" +302 24 model """hole""" +302 24 loss """crossentropy""" +302 24 regularizer """no""" +302 24 optimizer """adadelta""" +302 24 training_loop """lcwa""" +302 24 evaluator """rankbased""" +302 25 dataset """kinships""" +302 25 model """hole""" +302 25 loss """crossentropy""" +302 25 regularizer """no""" +302 25 optimizer """adadelta""" +302 25 training_loop """lcwa""" +302 25 evaluator """rankbased""" +302 26 dataset """kinships""" +302 26 model """hole""" +302 26 loss """crossentropy""" +302 26 regularizer """no""" +302 26 optimizer """adadelta""" +302 26 training_loop """lcwa""" +302 26 evaluator """rankbased""" +302 27 dataset """kinships""" +302 27 model """hole""" +302 27 loss """crossentropy""" +302 27 regularizer """no""" +302 27 optimizer """adadelta""" +302 27 training_loop """lcwa""" +302 27 evaluator """rankbased""" +302 28 dataset """kinships""" +302 28 model """hole""" +302 28 loss """crossentropy""" +302 28 regularizer """no""" +302 28 optimizer """adadelta""" +302 28 training_loop """lcwa""" +302 28 evaluator """rankbased""" +302 29 dataset """kinships""" +302 29 model """hole""" +302 29 loss """crossentropy""" +302 29 regularizer """no""" +302 29 optimizer """adadelta""" +302 29 training_loop """lcwa""" +302 29 evaluator """rankbased""" +302 30 dataset """kinships""" +302 30 model """hole""" +302 30 loss """crossentropy""" +302 30 regularizer """no""" +302 30 optimizer """adadelta""" +302 30 training_loop """lcwa""" +302 30 evaluator """rankbased""" +302 31 dataset """kinships""" +302 31 model """hole""" +302 31 loss """crossentropy""" +302 31 regularizer """no""" +302 31 optimizer """adadelta""" +302 31 training_loop """lcwa""" +302 31 evaluator """rankbased""" +302 32 dataset """kinships""" +302 32 model """hole""" +302 32 loss """crossentropy""" +302 32 regularizer """no""" +302 32 optimizer """adadelta""" +302 32 training_loop """lcwa""" +302 32 evaluator """rankbased""" +302 33 dataset """kinships""" +302 33 model """hole""" +302 33 loss """crossentropy""" +302 33 regularizer """no""" +302 33 optimizer """adadelta""" +302 33 training_loop """lcwa""" +302 33 evaluator """rankbased""" +302 34 dataset """kinships""" +302 34 model """hole""" +302 34 loss """crossentropy""" +302 34 regularizer """no""" +302 34 optimizer """adadelta""" +302 34 training_loop """lcwa""" +302 34 evaluator """rankbased""" +302 35 dataset """kinships""" +302 35 model """hole""" +302 35 loss """crossentropy""" +302 35 regularizer """no""" +302 35 optimizer """adadelta""" +302 35 training_loop """lcwa""" +302 35 evaluator """rankbased""" +302 36 dataset """kinships""" +302 36 model """hole""" +302 36 loss """crossentropy""" +302 36 regularizer """no""" +302 36 optimizer """adadelta""" +302 36 training_loop """lcwa""" +302 36 evaluator """rankbased""" +302 37 dataset """kinships""" +302 37 model """hole""" +302 37 loss """crossentropy""" +302 37 regularizer """no""" +302 37 optimizer """adadelta""" +302 37 training_loop """lcwa""" +302 37 evaluator """rankbased""" +302 38 dataset """kinships""" +302 38 model """hole""" +302 38 loss """crossentropy""" +302 38 regularizer """no""" +302 38 optimizer """adadelta""" +302 38 training_loop """lcwa""" +302 38 evaluator """rankbased""" +302 39 dataset """kinships""" +302 39 model """hole""" +302 39 loss """crossentropy""" +302 39 regularizer """no""" +302 39 optimizer """adadelta""" +302 39 training_loop """lcwa""" +302 39 evaluator """rankbased""" +302 40 dataset """kinships""" +302 40 model """hole""" +302 40 loss """crossentropy""" +302 40 regularizer """no""" +302 40 optimizer """adadelta""" +302 40 training_loop """lcwa""" +302 40 evaluator """rankbased""" +302 41 dataset """kinships""" +302 41 model """hole""" +302 41 loss """crossentropy""" +302 41 regularizer """no""" +302 41 optimizer """adadelta""" +302 41 training_loop """lcwa""" +302 41 evaluator """rankbased""" +302 42 dataset """kinships""" +302 42 model """hole""" +302 42 loss """crossentropy""" +302 42 regularizer """no""" +302 42 optimizer """adadelta""" +302 42 training_loop """lcwa""" +302 42 evaluator """rankbased""" +302 43 dataset """kinships""" +302 43 model """hole""" +302 43 loss """crossentropy""" +302 43 regularizer """no""" +302 43 optimizer """adadelta""" +302 43 training_loop """lcwa""" +302 43 evaluator """rankbased""" +302 44 dataset """kinships""" +302 44 model """hole""" +302 44 loss """crossentropy""" +302 44 regularizer """no""" +302 44 optimizer """adadelta""" +302 44 training_loop """lcwa""" +302 44 evaluator """rankbased""" +302 45 dataset """kinships""" +302 45 model """hole""" +302 45 loss """crossentropy""" +302 45 regularizer """no""" +302 45 optimizer """adadelta""" +302 45 training_loop """lcwa""" +302 45 evaluator """rankbased""" +302 46 dataset """kinships""" +302 46 model """hole""" +302 46 loss """crossentropy""" +302 46 regularizer """no""" +302 46 optimizer """adadelta""" +302 46 training_loop """lcwa""" +302 46 evaluator """rankbased""" +302 47 dataset """kinships""" +302 47 model """hole""" +302 47 loss """crossentropy""" +302 47 regularizer """no""" +302 47 optimizer """adadelta""" +302 47 training_loop """lcwa""" +302 47 evaluator """rankbased""" +302 48 dataset """kinships""" +302 48 model """hole""" +302 48 loss """crossentropy""" +302 48 regularizer """no""" +302 48 optimizer """adadelta""" +302 48 training_loop """lcwa""" +302 48 evaluator """rankbased""" +302 49 dataset """kinships""" +302 49 model """hole""" +302 49 loss """crossentropy""" +302 49 regularizer """no""" +302 49 optimizer """adadelta""" +302 49 training_loop """lcwa""" +302 49 evaluator """rankbased""" +302 50 dataset """kinships""" +302 50 model """hole""" +302 50 loss """crossentropy""" +302 50 regularizer """no""" +302 50 optimizer """adadelta""" +302 50 training_loop """lcwa""" +302 50 evaluator """rankbased""" +302 51 dataset """kinships""" +302 51 model """hole""" +302 51 loss """crossentropy""" +302 51 regularizer """no""" +302 51 optimizer """adadelta""" +302 51 training_loop """lcwa""" +302 51 evaluator """rankbased""" +302 52 dataset """kinships""" +302 52 model """hole""" +302 52 loss """crossentropy""" +302 52 regularizer """no""" +302 52 optimizer """adadelta""" +302 52 training_loop """lcwa""" +302 52 evaluator """rankbased""" +302 53 dataset """kinships""" +302 53 model """hole""" +302 53 loss """crossentropy""" +302 53 regularizer """no""" +302 53 optimizer """adadelta""" +302 53 training_loop """lcwa""" +302 53 evaluator """rankbased""" +302 54 dataset """kinships""" +302 54 model """hole""" +302 54 loss """crossentropy""" +302 54 regularizer """no""" +302 54 optimizer """adadelta""" +302 54 training_loop """lcwa""" +302 54 evaluator """rankbased""" +302 55 dataset """kinships""" +302 55 model """hole""" +302 55 loss """crossentropy""" +302 55 regularizer """no""" +302 55 optimizer """adadelta""" +302 55 training_loop """lcwa""" +302 55 evaluator """rankbased""" +302 56 dataset """kinships""" +302 56 model """hole""" +302 56 loss """crossentropy""" +302 56 regularizer """no""" +302 56 optimizer """adadelta""" +302 56 training_loop """lcwa""" +302 56 evaluator """rankbased""" +302 57 dataset """kinships""" +302 57 model """hole""" +302 57 loss """crossentropy""" +302 57 regularizer """no""" +302 57 optimizer """adadelta""" +302 57 training_loop """lcwa""" +302 57 evaluator """rankbased""" +302 58 dataset """kinships""" +302 58 model """hole""" +302 58 loss """crossentropy""" +302 58 regularizer """no""" +302 58 optimizer """adadelta""" +302 58 training_loop """lcwa""" +302 58 evaluator """rankbased""" +302 59 dataset """kinships""" +302 59 model """hole""" +302 59 loss """crossentropy""" +302 59 regularizer """no""" +302 59 optimizer """adadelta""" +302 59 training_loop """lcwa""" +302 59 evaluator """rankbased""" +302 60 dataset """kinships""" +302 60 model """hole""" +302 60 loss """crossentropy""" +302 60 regularizer """no""" +302 60 optimizer """adadelta""" +302 60 training_loop """lcwa""" +302 60 evaluator """rankbased""" +302 61 dataset """kinships""" +302 61 model """hole""" +302 61 loss """crossentropy""" +302 61 regularizer """no""" +302 61 optimizer """adadelta""" +302 61 training_loop """lcwa""" +302 61 evaluator """rankbased""" +302 62 dataset """kinships""" +302 62 model """hole""" +302 62 loss """crossentropy""" +302 62 regularizer """no""" +302 62 optimizer """adadelta""" +302 62 training_loop """lcwa""" +302 62 evaluator """rankbased""" +302 63 dataset """kinships""" +302 63 model """hole""" +302 63 loss """crossentropy""" +302 63 regularizer """no""" +302 63 optimizer """adadelta""" +302 63 training_loop """lcwa""" +302 63 evaluator """rankbased""" +302 64 dataset """kinships""" +302 64 model """hole""" +302 64 loss """crossentropy""" +302 64 regularizer """no""" +302 64 optimizer """adadelta""" +302 64 training_loop """lcwa""" +302 64 evaluator """rankbased""" +302 65 dataset """kinships""" +302 65 model """hole""" +302 65 loss """crossentropy""" +302 65 regularizer """no""" +302 65 optimizer """adadelta""" +302 65 training_loop """lcwa""" +302 65 evaluator """rankbased""" +302 66 dataset """kinships""" +302 66 model """hole""" +302 66 loss """crossentropy""" +302 66 regularizer """no""" +302 66 optimizer """adadelta""" +302 66 training_loop """lcwa""" +302 66 evaluator """rankbased""" +302 67 dataset """kinships""" +302 67 model """hole""" +302 67 loss """crossentropy""" +302 67 regularizer """no""" +302 67 optimizer """adadelta""" +302 67 training_loop """lcwa""" +302 67 evaluator """rankbased""" +302 68 dataset """kinships""" +302 68 model """hole""" +302 68 loss """crossentropy""" +302 68 regularizer """no""" +302 68 optimizer """adadelta""" +302 68 training_loop """lcwa""" +302 68 evaluator """rankbased""" +302 69 dataset """kinships""" +302 69 model """hole""" +302 69 loss """crossentropy""" +302 69 regularizer """no""" +302 69 optimizer """adadelta""" +302 69 training_loop """lcwa""" +302 69 evaluator """rankbased""" +302 70 dataset """kinships""" +302 70 model """hole""" +302 70 loss """crossentropy""" +302 70 regularizer """no""" +302 70 optimizer """adadelta""" +302 70 training_loop """lcwa""" +302 70 evaluator """rankbased""" +302 71 dataset """kinships""" +302 71 model """hole""" +302 71 loss """crossentropy""" +302 71 regularizer """no""" +302 71 optimizer """adadelta""" +302 71 training_loop """lcwa""" +302 71 evaluator """rankbased""" +302 72 dataset """kinships""" +302 72 model """hole""" +302 72 loss """crossentropy""" +302 72 regularizer """no""" +302 72 optimizer """adadelta""" +302 72 training_loop """lcwa""" +302 72 evaluator """rankbased""" +302 73 dataset """kinships""" +302 73 model """hole""" +302 73 loss """crossentropy""" +302 73 regularizer """no""" +302 73 optimizer """adadelta""" +302 73 training_loop """lcwa""" +302 73 evaluator """rankbased""" +302 74 dataset """kinships""" +302 74 model """hole""" +302 74 loss """crossentropy""" +302 74 regularizer """no""" +302 74 optimizer """adadelta""" +302 74 training_loop """lcwa""" +302 74 evaluator """rankbased""" +302 75 dataset """kinships""" +302 75 model """hole""" +302 75 loss """crossentropy""" +302 75 regularizer """no""" +302 75 optimizer """adadelta""" +302 75 training_loop """lcwa""" +302 75 evaluator """rankbased""" +302 76 dataset """kinships""" +302 76 model """hole""" +302 76 loss """crossentropy""" +302 76 regularizer """no""" +302 76 optimizer """adadelta""" +302 76 training_loop """lcwa""" +302 76 evaluator """rankbased""" +302 77 dataset """kinships""" +302 77 model """hole""" +302 77 loss """crossentropy""" +302 77 regularizer """no""" +302 77 optimizer """adadelta""" +302 77 training_loop """lcwa""" +302 77 evaluator """rankbased""" +302 78 dataset """kinships""" +302 78 model """hole""" +302 78 loss """crossentropy""" +302 78 regularizer """no""" +302 78 optimizer """adadelta""" +302 78 training_loop """lcwa""" +302 78 evaluator """rankbased""" +302 79 dataset """kinships""" +302 79 model """hole""" +302 79 loss """crossentropy""" +302 79 regularizer """no""" +302 79 optimizer """adadelta""" +302 79 training_loop """lcwa""" +302 79 evaluator """rankbased""" +302 80 dataset """kinships""" +302 80 model """hole""" +302 80 loss """crossentropy""" +302 80 regularizer """no""" +302 80 optimizer """adadelta""" +302 80 training_loop """lcwa""" +302 80 evaluator """rankbased""" +302 81 dataset """kinships""" +302 81 model """hole""" +302 81 loss """crossentropy""" +302 81 regularizer """no""" +302 81 optimizer """adadelta""" +302 81 training_loop """lcwa""" +302 81 evaluator """rankbased""" +302 82 dataset """kinships""" +302 82 model """hole""" +302 82 loss """crossentropy""" +302 82 regularizer """no""" +302 82 optimizer """adadelta""" +302 82 training_loop """lcwa""" +302 82 evaluator """rankbased""" +302 83 dataset """kinships""" +302 83 model """hole""" +302 83 loss """crossentropy""" +302 83 regularizer """no""" +302 83 optimizer """adadelta""" +302 83 training_loop """lcwa""" +302 83 evaluator """rankbased""" +302 84 dataset """kinships""" +302 84 model """hole""" +302 84 loss """crossentropy""" +302 84 regularizer """no""" +302 84 optimizer """adadelta""" +302 84 training_loop """lcwa""" +302 84 evaluator """rankbased""" +302 85 dataset """kinships""" +302 85 model """hole""" +302 85 loss """crossentropy""" +302 85 regularizer """no""" +302 85 optimizer """adadelta""" +302 85 training_loop """lcwa""" +302 85 evaluator """rankbased""" +302 86 dataset """kinships""" +302 86 model """hole""" +302 86 loss """crossentropy""" +302 86 regularizer """no""" +302 86 optimizer """adadelta""" +302 86 training_loop """lcwa""" +302 86 evaluator """rankbased""" +302 87 dataset """kinships""" +302 87 model """hole""" +302 87 loss """crossentropy""" +302 87 regularizer """no""" +302 87 optimizer """adadelta""" +302 87 training_loop """lcwa""" +302 87 evaluator """rankbased""" +302 88 dataset """kinships""" +302 88 model """hole""" +302 88 loss """crossentropy""" +302 88 regularizer """no""" +302 88 optimizer """adadelta""" +302 88 training_loop """lcwa""" +302 88 evaluator """rankbased""" +302 89 dataset """kinships""" +302 89 model """hole""" +302 89 loss """crossentropy""" +302 89 regularizer """no""" +302 89 optimizer """adadelta""" +302 89 training_loop """lcwa""" +302 89 evaluator """rankbased""" +302 90 dataset """kinships""" +302 90 model """hole""" +302 90 loss """crossentropy""" +302 90 regularizer """no""" +302 90 optimizer """adadelta""" +302 90 training_loop """lcwa""" +302 90 evaluator """rankbased""" +302 91 dataset """kinships""" +302 91 model """hole""" +302 91 loss """crossentropy""" +302 91 regularizer """no""" +302 91 optimizer """adadelta""" +302 91 training_loop """lcwa""" +302 91 evaluator """rankbased""" +302 92 dataset """kinships""" +302 92 model """hole""" +302 92 loss """crossentropy""" +302 92 regularizer """no""" +302 92 optimizer """adadelta""" +302 92 training_loop """lcwa""" +302 92 evaluator """rankbased""" +302 93 dataset """kinships""" +302 93 model """hole""" +302 93 loss """crossentropy""" +302 93 regularizer """no""" +302 93 optimizer """adadelta""" +302 93 training_loop """lcwa""" +302 93 evaluator """rankbased""" +302 94 dataset """kinships""" +302 94 model """hole""" +302 94 loss """crossentropy""" +302 94 regularizer """no""" +302 94 optimizer """adadelta""" +302 94 training_loop """lcwa""" +302 94 evaluator """rankbased""" +302 95 dataset """kinships""" +302 95 model """hole""" +302 95 loss """crossentropy""" +302 95 regularizer """no""" +302 95 optimizer """adadelta""" +302 95 training_loop """lcwa""" +302 95 evaluator """rankbased""" +302 96 dataset """kinships""" +302 96 model """hole""" +302 96 loss """crossentropy""" +302 96 regularizer """no""" +302 96 optimizer """adadelta""" +302 96 training_loop """lcwa""" +302 96 evaluator """rankbased""" +302 97 dataset """kinships""" +302 97 model """hole""" +302 97 loss """crossentropy""" +302 97 regularizer """no""" +302 97 optimizer """adadelta""" +302 97 training_loop """lcwa""" +302 97 evaluator """rankbased""" +302 98 dataset """kinships""" +302 98 model """hole""" +302 98 loss """crossentropy""" +302 98 regularizer """no""" +302 98 optimizer """adadelta""" +302 98 training_loop """lcwa""" +302 98 evaluator """rankbased""" +302 99 dataset """kinships""" +302 99 model """hole""" +302 99 loss """crossentropy""" +302 99 regularizer """no""" +302 99 optimizer """adadelta""" +302 99 training_loop """lcwa""" +302 99 evaluator """rankbased""" +302 100 dataset """kinships""" +302 100 model """hole""" +302 100 loss """crossentropy""" +302 100 regularizer """no""" +302 100 optimizer """adadelta""" +302 100 training_loop """lcwa""" +302 100 evaluator """rankbased""" +303 1 model.embedding_dim 1.0 +303 1 training.batch_size 1.0 +303 1 training.label_smoothing 0.16600260438222966 +303 2 model.embedding_dim 0.0 +303 2 training.batch_size 0.0 +303 2 training.label_smoothing 0.135134688185542 +303 3 model.embedding_dim 1.0 +303 3 training.batch_size 0.0 +303 3 training.label_smoothing 0.5198406995130694 +303 4 model.embedding_dim 0.0 +303 4 training.batch_size 0.0 +303 4 training.label_smoothing 0.0023742714175100997 +303 5 model.embedding_dim 1.0 +303 5 training.batch_size 1.0 +303 5 training.label_smoothing 0.12513445721598518 +303 6 model.embedding_dim 0.0 +303 6 training.batch_size 0.0 +303 6 training.label_smoothing 0.006941694285408753 +303 7 model.embedding_dim 0.0 +303 7 training.batch_size 0.0 +303 7 training.label_smoothing 0.0027907083925053054 +303 8 model.embedding_dim 1.0 +303 8 training.batch_size 0.0 +303 8 training.label_smoothing 0.001139916122627543 +303 9 model.embedding_dim 1.0 +303 9 training.batch_size 0.0 +303 9 training.label_smoothing 0.0017056780347337835 +303 10 model.embedding_dim 1.0 +303 10 training.batch_size 0.0 +303 10 training.label_smoothing 0.005050349406650789 +303 11 model.embedding_dim 2.0 +303 11 training.batch_size 0.0 +303 11 training.label_smoothing 0.5967037467298072 +303 12 model.embedding_dim 2.0 +303 12 training.batch_size 1.0 +303 12 training.label_smoothing 0.07152682961735397 +303 13 model.embedding_dim 2.0 +303 13 training.batch_size 1.0 +303 13 training.label_smoothing 0.07938708649535696 +303 14 model.embedding_dim 2.0 +303 14 training.batch_size 0.0 +303 14 training.label_smoothing 0.0026946649955692996 +303 15 model.embedding_dim 2.0 +303 15 training.batch_size 2.0 +303 15 training.label_smoothing 0.005586389472267532 +303 16 model.embedding_dim 0.0 +303 16 training.batch_size 0.0 +303 16 training.label_smoothing 0.06545139716071101 +303 17 model.embedding_dim 2.0 +303 17 training.batch_size 0.0 +303 17 training.label_smoothing 0.01480965369973078 +303 18 model.embedding_dim 0.0 +303 18 training.batch_size 0.0 +303 18 training.label_smoothing 0.015824727280370234 +303 19 model.embedding_dim 0.0 +303 19 training.batch_size 0.0 +303 19 training.label_smoothing 0.001891535372949433 +303 20 model.embedding_dim 2.0 +303 20 training.batch_size 2.0 +303 20 training.label_smoothing 0.0060698530803602775 +303 21 model.embedding_dim 2.0 +303 21 training.batch_size 2.0 +303 21 training.label_smoothing 0.002146296898157734 +303 22 model.embedding_dim 0.0 +303 22 training.batch_size 2.0 +303 22 training.label_smoothing 0.17530003812158731 +303 23 model.embedding_dim 0.0 +303 23 training.batch_size 1.0 +303 23 training.label_smoothing 0.022853367475415335 +303 24 model.embedding_dim 2.0 +303 24 training.batch_size 1.0 +303 24 training.label_smoothing 0.003971630066254991 +303 25 model.embedding_dim 1.0 +303 25 training.batch_size 2.0 +303 25 training.label_smoothing 0.0014382096367077822 +303 26 model.embedding_dim 0.0 +303 26 training.batch_size 2.0 +303 26 training.label_smoothing 0.0071104872134052826 +303 27 model.embedding_dim 0.0 +303 27 training.batch_size 2.0 +303 27 training.label_smoothing 0.3064420940348463 +303 28 model.embedding_dim 1.0 +303 28 training.batch_size 1.0 +303 28 training.label_smoothing 0.020273492354965606 +303 29 model.embedding_dim 1.0 +303 29 training.batch_size 0.0 +303 29 training.label_smoothing 0.5764869396989328 +303 30 model.embedding_dim 0.0 +303 30 training.batch_size 0.0 +303 30 training.label_smoothing 0.010739066744933126 +303 31 model.embedding_dim 0.0 +303 31 training.batch_size 0.0 +303 31 training.label_smoothing 0.1994657997204496 +303 32 model.embedding_dim 2.0 +303 32 training.batch_size 2.0 +303 32 training.label_smoothing 0.024570301117586752 +303 33 model.embedding_dim 0.0 +303 33 training.batch_size 1.0 +303 33 training.label_smoothing 0.022684817004768917 +303 34 model.embedding_dim 1.0 +303 34 training.batch_size 0.0 +303 34 training.label_smoothing 0.8399340788822209 +303 35 model.embedding_dim 1.0 +303 35 training.batch_size 2.0 +303 35 training.label_smoothing 0.10814018704279507 +303 36 model.embedding_dim 2.0 +303 36 training.batch_size 1.0 +303 36 training.label_smoothing 0.003356904086576775 +303 37 model.embedding_dim 0.0 +303 37 training.batch_size 1.0 +303 37 training.label_smoothing 0.970545675278417 +303 38 model.embedding_dim 2.0 +303 38 training.batch_size 0.0 +303 38 training.label_smoothing 0.0019024569903790612 +303 39 model.embedding_dim 2.0 +303 39 training.batch_size 0.0 +303 39 training.label_smoothing 0.001014957220897631 +303 40 model.embedding_dim 2.0 +303 40 training.batch_size 2.0 +303 40 training.label_smoothing 0.010848816811207918 +303 41 model.embedding_dim 0.0 +303 41 training.batch_size 0.0 +303 41 training.label_smoothing 0.048290169964355166 +303 42 model.embedding_dim 2.0 +303 42 training.batch_size 1.0 +303 42 training.label_smoothing 0.3140402948898821 +303 43 model.embedding_dim 1.0 +303 43 training.batch_size 1.0 +303 43 training.label_smoothing 0.007852906998296444 +303 44 model.embedding_dim 2.0 +303 44 training.batch_size 0.0 +303 44 training.label_smoothing 0.011967965539009702 +303 45 model.embedding_dim 2.0 +303 45 training.batch_size 1.0 +303 45 training.label_smoothing 0.4823874746219758 +303 46 model.embedding_dim 2.0 +303 46 training.batch_size 1.0 +303 46 training.label_smoothing 0.0718431219108361 +303 47 model.embedding_dim 2.0 +303 47 training.batch_size 1.0 +303 47 training.label_smoothing 0.029386315314743873 +303 48 model.embedding_dim 2.0 +303 48 training.batch_size 0.0 +303 48 training.label_smoothing 0.14848496958135582 +303 49 model.embedding_dim 1.0 +303 49 training.batch_size 1.0 +303 49 training.label_smoothing 0.025094562930343567 +303 50 model.embedding_dim 0.0 +303 50 training.batch_size 1.0 +303 50 training.label_smoothing 0.13177288420678024 +303 51 model.embedding_dim 0.0 +303 51 training.batch_size 2.0 +303 51 training.label_smoothing 0.0020760595801721624 +303 52 model.embedding_dim 0.0 +303 52 training.batch_size 1.0 +303 52 training.label_smoothing 0.021625199089353314 +303 53 model.embedding_dim 2.0 +303 53 training.batch_size 2.0 +303 53 training.label_smoothing 0.22696082605434828 +303 54 model.embedding_dim 1.0 +303 54 training.batch_size 2.0 +303 54 training.label_smoothing 0.002173746641262018 +303 55 model.embedding_dim 2.0 +303 55 training.batch_size 2.0 +303 55 training.label_smoothing 0.0011723414939144717 +303 56 model.embedding_dim 2.0 +303 56 training.batch_size 1.0 +303 56 training.label_smoothing 0.0014518590037217094 +303 57 model.embedding_dim 2.0 +303 57 training.batch_size 2.0 +303 57 training.label_smoothing 0.03866317777550867 +303 58 model.embedding_dim 0.0 +303 58 training.batch_size 1.0 +303 58 training.label_smoothing 0.018595043469892367 +303 59 model.embedding_dim 2.0 +303 59 training.batch_size 1.0 +303 59 training.label_smoothing 0.030253459297979297 +303 60 model.embedding_dim 0.0 +303 60 training.batch_size 0.0 +303 60 training.label_smoothing 0.13667940093800435 +303 61 model.embedding_dim 0.0 +303 61 training.batch_size 2.0 +303 61 training.label_smoothing 0.437718221705269 +303 62 model.embedding_dim 1.0 +303 62 training.batch_size 2.0 +303 62 training.label_smoothing 0.031895037563693614 +303 63 model.embedding_dim 0.0 +303 63 training.batch_size 2.0 +303 63 training.label_smoothing 0.7712087088902212 +303 64 model.embedding_dim 1.0 +303 64 training.batch_size 1.0 +303 64 training.label_smoothing 0.11097262608887169 +303 65 model.embedding_dim 1.0 +303 65 training.batch_size 0.0 +303 65 training.label_smoothing 0.022159199302404887 +303 66 model.embedding_dim 0.0 +303 66 training.batch_size 2.0 +303 66 training.label_smoothing 0.04969966755220556 +303 67 model.embedding_dim 0.0 +303 67 training.batch_size 0.0 +303 67 training.label_smoothing 0.25084042264325335 +303 68 model.embedding_dim 1.0 +303 68 training.batch_size 2.0 +303 68 training.label_smoothing 0.001981407207257568 +303 69 model.embedding_dim 2.0 +303 69 training.batch_size 1.0 +303 69 training.label_smoothing 0.14136994767018388 +303 70 model.embedding_dim 0.0 +303 70 training.batch_size 2.0 +303 70 training.label_smoothing 0.01012966625780947 +303 71 model.embedding_dim 2.0 +303 71 training.batch_size 2.0 +303 71 training.label_smoothing 0.015275115279496257 +303 72 model.embedding_dim 2.0 +303 72 training.batch_size 2.0 +303 72 training.label_smoothing 0.17529297909890018 +303 73 model.embedding_dim 0.0 +303 73 training.batch_size 2.0 +303 73 training.label_smoothing 0.23642761447292354 +303 74 model.embedding_dim 1.0 +303 74 training.batch_size 1.0 +303 74 training.label_smoothing 0.8060504842564835 +303 75 model.embedding_dim 1.0 +303 75 training.batch_size 2.0 +303 75 training.label_smoothing 0.5800740579587159 +303 76 model.embedding_dim 0.0 +303 76 training.batch_size 1.0 +303 76 training.label_smoothing 0.45584855189818524 +303 77 model.embedding_dim 1.0 +303 77 training.batch_size 1.0 +303 77 training.label_smoothing 0.0017700441719238594 +303 78 model.embedding_dim 1.0 +303 78 training.batch_size 1.0 +303 78 training.label_smoothing 0.3295815446751385 +303 79 model.embedding_dim 2.0 +303 79 training.batch_size 0.0 +303 79 training.label_smoothing 0.08670496960250283 +303 80 model.embedding_dim 1.0 +303 80 training.batch_size 2.0 +303 80 training.label_smoothing 0.09022897257313009 +303 81 model.embedding_dim 1.0 +303 81 training.batch_size 1.0 +303 81 training.label_smoothing 0.34550579633799444 +303 82 model.embedding_dim 1.0 +303 82 training.batch_size 1.0 +303 82 training.label_smoothing 0.14758704402494266 +303 83 model.embedding_dim 2.0 +303 83 training.batch_size 0.0 +303 83 training.label_smoothing 0.12052970664544592 +303 84 model.embedding_dim 0.0 +303 84 training.batch_size 1.0 +303 84 training.label_smoothing 0.14558322119432404 +303 85 model.embedding_dim 2.0 +303 85 training.batch_size 0.0 +303 85 training.label_smoothing 0.0038191828722438765 +303 86 model.embedding_dim 0.0 +303 86 training.batch_size 2.0 +303 86 training.label_smoothing 0.0016053391571527044 +303 87 model.embedding_dim 0.0 +303 87 training.batch_size 0.0 +303 87 training.label_smoothing 0.169075460431747 +303 88 model.embedding_dim 2.0 +303 88 training.batch_size 1.0 +303 88 training.label_smoothing 0.311344246672833 +303 89 model.embedding_dim 2.0 +303 89 training.batch_size 0.0 +303 89 training.label_smoothing 0.07943504185587533 +303 90 model.embedding_dim 2.0 +303 90 training.batch_size 1.0 +303 90 training.label_smoothing 0.10887115811927874 +303 91 model.embedding_dim 2.0 +303 91 training.batch_size 0.0 +303 91 training.label_smoothing 0.020875569857599176 +303 92 model.embedding_dim 2.0 +303 92 training.batch_size 0.0 +303 92 training.label_smoothing 0.06711050323501047 +303 93 model.embedding_dim 2.0 +303 93 training.batch_size 2.0 +303 93 training.label_smoothing 0.0014210156915839339 +303 94 model.embedding_dim 0.0 +303 94 training.batch_size 2.0 +303 94 training.label_smoothing 0.04314786761761777 +303 95 model.embedding_dim 0.0 +303 95 training.batch_size 1.0 +303 95 training.label_smoothing 0.07653858917939942 +303 96 model.embedding_dim 0.0 +303 96 training.batch_size 2.0 +303 96 training.label_smoothing 0.014665321799085501 +303 97 model.embedding_dim 0.0 +303 97 training.batch_size 1.0 +303 97 training.label_smoothing 0.03906230706911603 +303 98 model.embedding_dim 0.0 +303 98 training.batch_size 2.0 +303 98 training.label_smoothing 0.3289221708591558 +303 99 model.embedding_dim 1.0 +303 99 training.batch_size 0.0 +303 99 training.label_smoothing 0.019318012146005648 +303 100 model.embedding_dim 0.0 +303 100 training.batch_size 1.0 +303 100 training.label_smoothing 0.014168738028529857 +303 1 dataset """kinships""" +303 1 model """hole""" +303 1 loss """crossentropy""" +303 1 regularizer """no""" +303 1 optimizer """adadelta""" +303 1 training_loop """lcwa""" +303 1 evaluator """rankbased""" +303 2 dataset """kinships""" +303 2 model """hole""" +303 2 loss """crossentropy""" +303 2 regularizer """no""" +303 2 optimizer """adadelta""" +303 2 training_loop """lcwa""" +303 2 evaluator """rankbased""" +303 3 dataset """kinships""" +303 3 model """hole""" +303 3 loss """crossentropy""" +303 3 regularizer """no""" +303 3 optimizer """adadelta""" +303 3 training_loop """lcwa""" +303 3 evaluator """rankbased""" +303 4 dataset """kinships""" +303 4 model """hole""" +303 4 loss """crossentropy""" +303 4 regularizer """no""" +303 4 optimizer """adadelta""" +303 4 training_loop """lcwa""" +303 4 evaluator """rankbased""" +303 5 dataset """kinships""" +303 5 model """hole""" +303 5 loss """crossentropy""" +303 5 regularizer """no""" +303 5 optimizer """adadelta""" +303 5 training_loop """lcwa""" +303 5 evaluator """rankbased""" +303 6 dataset """kinships""" +303 6 model """hole""" +303 6 loss """crossentropy""" +303 6 regularizer """no""" +303 6 optimizer """adadelta""" +303 6 training_loop """lcwa""" +303 6 evaluator """rankbased""" +303 7 dataset """kinships""" +303 7 model """hole""" +303 7 loss """crossentropy""" +303 7 regularizer """no""" +303 7 optimizer """adadelta""" +303 7 training_loop """lcwa""" +303 7 evaluator """rankbased""" +303 8 dataset """kinships""" +303 8 model """hole""" +303 8 loss """crossentropy""" +303 8 regularizer """no""" +303 8 optimizer """adadelta""" +303 8 training_loop """lcwa""" +303 8 evaluator """rankbased""" +303 9 dataset """kinships""" +303 9 model """hole""" +303 9 loss """crossentropy""" +303 9 regularizer """no""" +303 9 optimizer """adadelta""" +303 9 training_loop """lcwa""" +303 9 evaluator """rankbased""" +303 10 dataset """kinships""" +303 10 model """hole""" +303 10 loss """crossentropy""" +303 10 regularizer """no""" +303 10 optimizer """adadelta""" +303 10 training_loop """lcwa""" +303 10 evaluator """rankbased""" +303 11 dataset """kinships""" +303 11 model """hole""" +303 11 loss """crossentropy""" +303 11 regularizer """no""" +303 11 optimizer """adadelta""" +303 11 training_loop """lcwa""" +303 11 evaluator """rankbased""" +303 12 dataset """kinships""" +303 12 model """hole""" +303 12 loss """crossentropy""" +303 12 regularizer """no""" +303 12 optimizer """adadelta""" +303 12 training_loop """lcwa""" +303 12 evaluator """rankbased""" +303 13 dataset """kinships""" +303 13 model """hole""" +303 13 loss """crossentropy""" +303 13 regularizer """no""" +303 13 optimizer """adadelta""" +303 13 training_loop """lcwa""" +303 13 evaluator """rankbased""" +303 14 dataset """kinships""" +303 14 model """hole""" +303 14 loss """crossentropy""" +303 14 regularizer """no""" +303 14 optimizer """adadelta""" +303 14 training_loop """lcwa""" +303 14 evaluator """rankbased""" +303 15 dataset """kinships""" +303 15 model """hole""" +303 15 loss """crossentropy""" +303 15 regularizer """no""" +303 15 optimizer """adadelta""" +303 15 training_loop """lcwa""" +303 15 evaluator """rankbased""" +303 16 dataset """kinships""" +303 16 model """hole""" +303 16 loss """crossentropy""" +303 16 regularizer """no""" +303 16 optimizer """adadelta""" +303 16 training_loop """lcwa""" +303 16 evaluator """rankbased""" +303 17 dataset """kinships""" +303 17 model """hole""" +303 17 loss """crossentropy""" +303 17 regularizer """no""" +303 17 optimizer """adadelta""" +303 17 training_loop """lcwa""" +303 17 evaluator """rankbased""" +303 18 dataset """kinships""" +303 18 model """hole""" +303 18 loss """crossentropy""" +303 18 regularizer """no""" +303 18 optimizer """adadelta""" +303 18 training_loop """lcwa""" +303 18 evaluator """rankbased""" +303 19 dataset """kinships""" +303 19 model """hole""" +303 19 loss """crossentropy""" +303 19 regularizer """no""" +303 19 optimizer """adadelta""" +303 19 training_loop """lcwa""" +303 19 evaluator """rankbased""" +303 20 dataset """kinships""" +303 20 model """hole""" +303 20 loss """crossentropy""" +303 20 regularizer """no""" +303 20 optimizer """adadelta""" +303 20 training_loop """lcwa""" +303 20 evaluator """rankbased""" +303 21 dataset """kinships""" +303 21 model """hole""" +303 21 loss """crossentropy""" +303 21 regularizer """no""" +303 21 optimizer """adadelta""" +303 21 training_loop """lcwa""" +303 21 evaluator """rankbased""" +303 22 dataset """kinships""" +303 22 model """hole""" +303 22 loss """crossentropy""" +303 22 regularizer """no""" +303 22 optimizer """adadelta""" +303 22 training_loop """lcwa""" +303 22 evaluator """rankbased""" +303 23 dataset """kinships""" +303 23 model """hole""" +303 23 loss """crossentropy""" +303 23 regularizer """no""" +303 23 optimizer """adadelta""" +303 23 training_loop """lcwa""" +303 23 evaluator """rankbased""" +303 24 dataset """kinships""" +303 24 model """hole""" +303 24 loss """crossentropy""" +303 24 regularizer """no""" +303 24 optimizer """adadelta""" +303 24 training_loop """lcwa""" +303 24 evaluator """rankbased""" +303 25 dataset """kinships""" +303 25 model """hole""" +303 25 loss """crossentropy""" +303 25 regularizer """no""" +303 25 optimizer """adadelta""" +303 25 training_loop """lcwa""" +303 25 evaluator """rankbased""" +303 26 dataset """kinships""" +303 26 model """hole""" +303 26 loss """crossentropy""" +303 26 regularizer """no""" +303 26 optimizer """adadelta""" +303 26 training_loop """lcwa""" +303 26 evaluator """rankbased""" +303 27 dataset """kinships""" +303 27 model """hole""" +303 27 loss """crossentropy""" +303 27 regularizer """no""" +303 27 optimizer """adadelta""" +303 27 training_loop """lcwa""" +303 27 evaluator """rankbased""" +303 28 dataset """kinships""" +303 28 model """hole""" +303 28 loss """crossentropy""" +303 28 regularizer """no""" +303 28 optimizer """adadelta""" +303 28 training_loop """lcwa""" +303 28 evaluator """rankbased""" +303 29 dataset """kinships""" +303 29 model """hole""" +303 29 loss """crossentropy""" +303 29 regularizer """no""" +303 29 optimizer """adadelta""" +303 29 training_loop """lcwa""" +303 29 evaluator """rankbased""" +303 30 dataset """kinships""" +303 30 model """hole""" +303 30 loss """crossentropy""" +303 30 regularizer """no""" +303 30 optimizer """adadelta""" +303 30 training_loop """lcwa""" +303 30 evaluator """rankbased""" +303 31 dataset """kinships""" +303 31 model """hole""" +303 31 loss """crossentropy""" +303 31 regularizer """no""" +303 31 optimizer """adadelta""" +303 31 training_loop """lcwa""" +303 31 evaluator """rankbased""" +303 32 dataset """kinships""" +303 32 model """hole""" +303 32 loss """crossentropy""" +303 32 regularizer """no""" +303 32 optimizer """adadelta""" +303 32 training_loop """lcwa""" +303 32 evaluator """rankbased""" +303 33 dataset """kinships""" +303 33 model """hole""" +303 33 loss """crossentropy""" +303 33 regularizer """no""" +303 33 optimizer """adadelta""" +303 33 training_loop """lcwa""" +303 33 evaluator """rankbased""" +303 34 dataset """kinships""" +303 34 model """hole""" +303 34 loss """crossentropy""" +303 34 regularizer """no""" +303 34 optimizer """adadelta""" +303 34 training_loop """lcwa""" +303 34 evaluator """rankbased""" +303 35 dataset """kinships""" +303 35 model """hole""" +303 35 loss """crossentropy""" +303 35 regularizer """no""" +303 35 optimizer """adadelta""" +303 35 training_loop """lcwa""" +303 35 evaluator """rankbased""" +303 36 dataset """kinships""" +303 36 model """hole""" +303 36 loss """crossentropy""" +303 36 regularizer """no""" +303 36 optimizer """adadelta""" +303 36 training_loop """lcwa""" +303 36 evaluator """rankbased""" +303 37 dataset """kinships""" +303 37 model """hole""" +303 37 loss """crossentropy""" +303 37 regularizer """no""" +303 37 optimizer """adadelta""" +303 37 training_loop """lcwa""" +303 37 evaluator """rankbased""" +303 38 dataset """kinships""" +303 38 model """hole""" +303 38 loss """crossentropy""" +303 38 regularizer """no""" +303 38 optimizer """adadelta""" +303 38 training_loop """lcwa""" +303 38 evaluator """rankbased""" +303 39 dataset """kinships""" +303 39 model """hole""" +303 39 loss """crossentropy""" +303 39 regularizer """no""" +303 39 optimizer """adadelta""" +303 39 training_loop """lcwa""" +303 39 evaluator """rankbased""" +303 40 dataset """kinships""" +303 40 model """hole""" +303 40 loss """crossentropy""" +303 40 regularizer """no""" +303 40 optimizer """adadelta""" +303 40 training_loop """lcwa""" +303 40 evaluator """rankbased""" +303 41 dataset """kinships""" +303 41 model """hole""" +303 41 loss """crossentropy""" +303 41 regularizer """no""" +303 41 optimizer """adadelta""" +303 41 training_loop """lcwa""" +303 41 evaluator """rankbased""" +303 42 dataset """kinships""" +303 42 model """hole""" +303 42 loss """crossentropy""" +303 42 regularizer """no""" +303 42 optimizer """adadelta""" +303 42 training_loop """lcwa""" +303 42 evaluator """rankbased""" +303 43 dataset """kinships""" +303 43 model """hole""" +303 43 loss """crossentropy""" +303 43 regularizer """no""" +303 43 optimizer """adadelta""" +303 43 training_loop """lcwa""" +303 43 evaluator """rankbased""" +303 44 dataset """kinships""" +303 44 model """hole""" +303 44 loss """crossentropy""" +303 44 regularizer """no""" +303 44 optimizer """adadelta""" +303 44 training_loop """lcwa""" +303 44 evaluator """rankbased""" +303 45 dataset """kinships""" +303 45 model """hole""" +303 45 loss """crossentropy""" +303 45 regularizer """no""" +303 45 optimizer """adadelta""" +303 45 training_loop """lcwa""" +303 45 evaluator """rankbased""" +303 46 dataset """kinships""" +303 46 model """hole""" +303 46 loss """crossentropy""" +303 46 regularizer """no""" +303 46 optimizer """adadelta""" +303 46 training_loop """lcwa""" +303 46 evaluator """rankbased""" +303 47 dataset """kinships""" +303 47 model """hole""" +303 47 loss """crossentropy""" +303 47 regularizer """no""" +303 47 optimizer """adadelta""" +303 47 training_loop """lcwa""" +303 47 evaluator """rankbased""" +303 48 dataset """kinships""" +303 48 model """hole""" +303 48 loss """crossentropy""" +303 48 regularizer """no""" +303 48 optimizer """adadelta""" +303 48 training_loop """lcwa""" +303 48 evaluator """rankbased""" +303 49 dataset """kinships""" +303 49 model """hole""" +303 49 loss """crossentropy""" +303 49 regularizer """no""" +303 49 optimizer """adadelta""" +303 49 training_loop """lcwa""" +303 49 evaluator """rankbased""" +303 50 dataset """kinships""" +303 50 model """hole""" +303 50 loss """crossentropy""" +303 50 regularizer """no""" +303 50 optimizer """adadelta""" +303 50 training_loop """lcwa""" +303 50 evaluator """rankbased""" +303 51 dataset """kinships""" +303 51 model """hole""" +303 51 loss """crossentropy""" +303 51 regularizer """no""" +303 51 optimizer """adadelta""" +303 51 training_loop """lcwa""" +303 51 evaluator """rankbased""" +303 52 dataset """kinships""" +303 52 model """hole""" +303 52 loss """crossentropy""" +303 52 regularizer """no""" +303 52 optimizer """adadelta""" +303 52 training_loop """lcwa""" +303 52 evaluator """rankbased""" +303 53 dataset """kinships""" +303 53 model """hole""" +303 53 loss """crossentropy""" +303 53 regularizer """no""" +303 53 optimizer """adadelta""" +303 53 training_loop """lcwa""" +303 53 evaluator """rankbased""" +303 54 dataset """kinships""" +303 54 model """hole""" +303 54 loss """crossentropy""" +303 54 regularizer """no""" +303 54 optimizer """adadelta""" +303 54 training_loop """lcwa""" +303 54 evaluator """rankbased""" +303 55 dataset """kinships""" +303 55 model """hole""" +303 55 loss """crossentropy""" +303 55 regularizer """no""" +303 55 optimizer """adadelta""" +303 55 training_loop """lcwa""" +303 55 evaluator """rankbased""" +303 56 dataset """kinships""" +303 56 model """hole""" +303 56 loss """crossentropy""" +303 56 regularizer """no""" +303 56 optimizer """adadelta""" +303 56 training_loop """lcwa""" +303 56 evaluator """rankbased""" +303 57 dataset """kinships""" +303 57 model """hole""" +303 57 loss """crossentropy""" +303 57 regularizer """no""" +303 57 optimizer """adadelta""" +303 57 training_loop """lcwa""" +303 57 evaluator """rankbased""" +303 58 dataset """kinships""" +303 58 model """hole""" +303 58 loss """crossentropy""" +303 58 regularizer """no""" +303 58 optimizer """adadelta""" +303 58 training_loop """lcwa""" +303 58 evaluator """rankbased""" +303 59 dataset """kinships""" +303 59 model """hole""" +303 59 loss """crossentropy""" +303 59 regularizer """no""" +303 59 optimizer """adadelta""" +303 59 training_loop """lcwa""" +303 59 evaluator """rankbased""" +303 60 dataset """kinships""" +303 60 model """hole""" +303 60 loss """crossentropy""" +303 60 regularizer """no""" +303 60 optimizer """adadelta""" +303 60 training_loop """lcwa""" +303 60 evaluator """rankbased""" +303 61 dataset """kinships""" +303 61 model """hole""" +303 61 loss """crossentropy""" +303 61 regularizer """no""" +303 61 optimizer """adadelta""" +303 61 training_loop """lcwa""" +303 61 evaluator """rankbased""" +303 62 dataset """kinships""" +303 62 model """hole""" +303 62 loss """crossentropy""" +303 62 regularizer """no""" +303 62 optimizer """adadelta""" +303 62 training_loop """lcwa""" +303 62 evaluator """rankbased""" +303 63 dataset """kinships""" +303 63 model """hole""" +303 63 loss """crossentropy""" +303 63 regularizer """no""" +303 63 optimizer """adadelta""" +303 63 training_loop """lcwa""" +303 63 evaluator """rankbased""" +303 64 dataset """kinships""" +303 64 model """hole""" +303 64 loss """crossentropy""" +303 64 regularizer """no""" +303 64 optimizer """adadelta""" +303 64 training_loop """lcwa""" +303 64 evaluator """rankbased""" +303 65 dataset """kinships""" +303 65 model """hole""" +303 65 loss """crossentropy""" +303 65 regularizer """no""" +303 65 optimizer """adadelta""" +303 65 training_loop """lcwa""" +303 65 evaluator """rankbased""" +303 66 dataset """kinships""" +303 66 model """hole""" +303 66 loss """crossentropy""" +303 66 regularizer """no""" +303 66 optimizer """adadelta""" +303 66 training_loop """lcwa""" +303 66 evaluator """rankbased""" +303 67 dataset """kinships""" +303 67 model """hole""" +303 67 loss """crossentropy""" +303 67 regularizer """no""" +303 67 optimizer """adadelta""" +303 67 training_loop """lcwa""" +303 67 evaluator """rankbased""" +303 68 dataset """kinships""" +303 68 model """hole""" +303 68 loss """crossentropy""" +303 68 regularizer """no""" +303 68 optimizer """adadelta""" +303 68 training_loop """lcwa""" +303 68 evaluator """rankbased""" +303 69 dataset """kinships""" +303 69 model """hole""" +303 69 loss """crossentropy""" +303 69 regularizer """no""" +303 69 optimizer """adadelta""" +303 69 training_loop """lcwa""" +303 69 evaluator """rankbased""" +303 70 dataset """kinships""" +303 70 model """hole""" +303 70 loss """crossentropy""" +303 70 regularizer """no""" +303 70 optimizer """adadelta""" +303 70 training_loop """lcwa""" +303 70 evaluator """rankbased""" +303 71 dataset """kinships""" +303 71 model """hole""" +303 71 loss """crossentropy""" +303 71 regularizer """no""" +303 71 optimizer """adadelta""" +303 71 training_loop """lcwa""" +303 71 evaluator """rankbased""" +303 72 dataset """kinships""" +303 72 model """hole""" +303 72 loss """crossentropy""" +303 72 regularizer """no""" +303 72 optimizer """adadelta""" +303 72 training_loop """lcwa""" +303 72 evaluator """rankbased""" +303 73 dataset """kinships""" +303 73 model """hole""" +303 73 loss """crossentropy""" +303 73 regularizer """no""" +303 73 optimizer """adadelta""" +303 73 training_loop """lcwa""" +303 73 evaluator """rankbased""" +303 74 dataset """kinships""" +303 74 model """hole""" +303 74 loss """crossentropy""" +303 74 regularizer """no""" +303 74 optimizer """adadelta""" +303 74 training_loop """lcwa""" +303 74 evaluator """rankbased""" +303 75 dataset """kinships""" +303 75 model """hole""" +303 75 loss """crossentropy""" +303 75 regularizer """no""" +303 75 optimizer """adadelta""" +303 75 training_loop """lcwa""" +303 75 evaluator """rankbased""" +303 76 dataset """kinships""" +303 76 model """hole""" +303 76 loss """crossentropy""" +303 76 regularizer """no""" +303 76 optimizer """adadelta""" +303 76 training_loop """lcwa""" +303 76 evaluator """rankbased""" +303 77 dataset """kinships""" +303 77 model """hole""" +303 77 loss """crossentropy""" +303 77 regularizer """no""" +303 77 optimizer """adadelta""" +303 77 training_loop """lcwa""" +303 77 evaluator """rankbased""" +303 78 dataset """kinships""" +303 78 model """hole""" +303 78 loss """crossentropy""" +303 78 regularizer """no""" +303 78 optimizer """adadelta""" +303 78 training_loop """lcwa""" +303 78 evaluator """rankbased""" +303 79 dataset """kinships""" +303 79 model """hole""" +303 79 loss """crossentropy""" +303 79 regularizer """no""" +303 79 optimizer """adadelta""" +303 79 training_loop """lcwa""" +303 79 evaluator """rankbased""" +303 80 dataset """kinships""" +303 80 model """hole""" +303 80 loss """crossentropy""" +303 80 regularizer """no""" +303 80 optimizer """adadelta""" +303 80 training_loop """lcwa""" +303 80 evaluator """rankbased""" +303 81 dataset """kinships""" +303 81 model """hole""" +303 81 loss """crossentropy""" +303 81 regularizer """no""" +303 81 optimizer """adadelta""" +303 81 training_loop """lcwa""" +303 81 evaluator """rankbased""" +303 82 dataset """kinships""" +303 82 model """hole""" +303 82 loss """crossentropy""" +303 82 regularizer """no""" +303 82 optimizer """adadelta""" +303 82 training_loop """lcwa""" +303 82 evaluator """rankbased""" +303 83 dataset """kinships""" +303 83 model """hole""" +303 83 loss """crossentropy""" +303 83 regularizer """no""" +303 83 optimizer """adadelta""" +303 83 training_loop """lcwa""" +303 83 evaluator """rankbased""" +303 84 dataset """kinships""" +303 84 model """hole""" +303 84 loss """crossentropy""" +303 84 regularizer """no""" +303 84 optimizer """adadelta""" +303 84 training_loop """lcwa""" +303 84 evaluator """rankbased""" +303 85 dataset """kinships""" +303 85 model """hole""" +303 85 loss """crossentropy""" +303 85 regularizer """no""" +303 85 optimizer """adadelta""" +303 85 training_loop """lcwa""" +303 85 evaluator """rankbased""" +303 86 dataset """kinships""" +303 86 model """hole""" +303 86 loss """crossentropy""" +303 86 regularizer """no""" +303 86 optimizer """adadelta""" +303 86 training_loop """lcwa""" +303 86 evaluator """rankbased""" +303 87 dataset """kinships""" +303 87 model """hole""" +303 87 loss """crossentropy""" +303 87 regularizer """no""" +303 87 optimizer """adadelta""" +303 87 training_loop """lcwa""" +303 87 evaluator """rankbased""" +303 88 dataset """kinships""" +303 88 model """hole""" +303 88 loss """crossentropy""" +303 88 regularizer """no""" +303 88 optimizer """adadelta""" +303 88 training_loop """lcwa""" +303 88 evaluator """rankbased""" +303 89 dataset """kinships""" +303 89 model """hole""" +303 89 loss """crossentropy""" +303 89 regularizer """no""" +303 89 optimizer """adadelta""" +303 89 training_loop """lcwa""" +303 89 evaluator """rankbased""" +303 90 dataset """kinships""" +303 90 model """hole""" +303 90 loss """crossentropy""" +303 90 regularizer """no""" +303 90 optimizer """adadelta""" +303 90 training_loop """lcwa""" +303 90 evaluator """rankbased""" +303 91 dataset """kinships""" +303 91 model """hole""" +303 91 loss """crossentropy""" +303 91 regularizer """no""" +303 91 optimizer """adadelta""" +303 91 training_loop """lcwa""" +303 91 evaluator """rankbased""" +303 92 dataset """kinships""" +303 92 model """hole""" +303 92 loss """crossentropy""" +303 92 regularizer """no""" +303 92 optimizer """adadelta""" +303 92 training_loop """lcwa""" +303 92 evaluator """rankbased""" +303 93 dataset """kinships""" +303 93 model """hole""" +303 93 loss """crossentropy""" +303 93 regularizer """no""" +303 93 optimizer """adadelta""" +303 93 training_loop """lcwa""" +303 93 evaluator """rankbased""" +303 94 dataset """kinships""" +303 94 model """hole""" +303 94 loss """crossentropy""" +303 94 regularizer """no""" +303 94 optimizer """adadelta""" +303 94 training_loop """lcwa""" +303 94 evaluator """rankbased""" +303 95 dataset """kinships""" +303 95 model """hole""" +303 95 loss """crossentropy""" +303 95 regularizer """no""" +303 95 optimizer """adadelta""" +303 95 training_loop """lcwa""" +303 95 evaluator """rankbased""" +303 96 dataset """kinships""" +303 96 model """hole""" +303 96 loss """crossentropy""" +303 96 regularizer """no""" +303 96 optimizer """adadelta""" +303 96 training_loop """lcwa""" +303 96 evaluator """rankbased""" +303 97 dataset """kinships""" +303 97 model """hole""" +303 97 loss """crossentropy""" +303 97 regularizer """no""" +303 97 optimizer """adadelta""" +303 97 training_loop """lcwa""" +303 97 evaluator """rankbased""" +303 98 dataset """kinships""" +303 98 model """hole""" +303 98 loss """crossentropy""" +303 98 regularizer """no""" +303 98 optimizer """adadelta""" +303 98 training_loop """lcwa""" +303 98 evaluator """rankbased""" +303 99 dataset """kinships""" +303 99 model """hole""" +303 99 loss """crossentropy""" +303 99 regularizer """no""" +303 99 optimizer """adadelta""" +303 99 training_loop """lcwa""" +303 99 evaluator """rankbased""" +303 100 dataset """kinships""" +303 100 model """hole""" +303 100 loss """crossentropy""" +303 100 regularizer """no""" +303 100 optimizer """adadelta""" +303 100 training_loop """lcwa""" +303 100 evaluator """rankbased""" +304 1 model.embedding_dim 2.0 +304 1 negative_sampler.num_negs_per_pos 84.0 +304 1 training.batch_size 1.0 +304 2 model.embedding_dim 1.0 +304 2 negative_sampler.num_negs_per_pos 87.0 +304 2 training.batch_size 0.0 +304 3 model.embedding_dim 0.0 +304 3 negative_sampler.num_negs_per_pos 27.0 +304 3 training.batch_size 2.0 +304 4 model.embedding_dim 0.0 +304 4 negative_sampler.num_negs_per_pos 53.0 +304 4 training.batch_size 1.0 +304 5 model.embedding_dim 2.0 +304 5 negative_sampler.num_negs_per_pos 57.0 +304 5 training.batch_size 0.0 +304 6 model.embedding_dim 0.0 +304 6 negative_sampler.num_negs_per_pos 23.0 +304 6 training.batch_size 2.0 +304 7 model.embedding_dim 1.0 +304 7 negative_sampler.num_negs_per_pos 71.0 +304 7 training.batch_size 1.0 +304 8 model.embedding_dim 2.0 +304 8 negative_sampler.num_negs_per_pos 32.0 +304 8 training.batch_size 1.0 +304 9 model.embedding_dim 2.0 +304 9 negative_sampler.num_negs_per_pos 62.0 +304 9 training.batch_size 0.0 +304 10 model.embedding_dim 0.0 +304 10 negative_sampler.num_negs_per_pos 68.0 +304 10 training.batch_size 0.0 +304 11 model.embedding_dim 0.0 +304 11 negative_sampler.num_negs_per_pos 56.0 +304 11 training.batch_size 1.0 +304 12 model.embedding_dim 2.0 +304 12 negative_sampler.num_negs_per_pos 39.0 +304 12 training.batch_size 1.0 +304 13 model.embedding_dim 0.0 +304 13 negative_sampler.num_negs_per_pos 48.0 +304 13 training.batch_size 2.0 +304 14 model.embedding_dim 1.0 +304 14 negative_sampler.num_negs_per_pos 89.0 +304 14 training.batch_size 1.0 +304 15 model.embedding_dim 1.0 +304 15 negative_sampler.num_negs_per_pos 64.0 +304 15 training.batch_size 1.0 +304 16 model.embedding_dim 1.0 +304 16 negative_sampler.num_negs_per_pos 4.0 +304 16 training.batch_size 2.0 +304 17 model.embedding_dim 1.0 +304 17 negative_sampler.num_negs_per_pos 96.0 +304 17 training.batch_size 1.0 +304 18 model.embedding_dim 0.0 +304 18 negative_sampler.num_negs_per_pos 87.0 +304 18 training.batch_size 2.0 +304 19 model.embedding_dim 0.0 +304 19 negative_sampler.num_negs_per_pos 46.0 +304 19 training.batch_size 1.0 +304 20 model.embedding_dim 0.0 +304 20 negative_sampler.num_negs_per_pos 46.0 +304 20 training.batch_size 1.0 +304 21 model.embedding_dim 1.0 +304 21 negative_sampler.num_negs_per_pos 50.0 +304 21 training.batch_size 0.0 +304 22 model.embedding_dim 2.0 +304 22 negative_sampler.num_negs_per_pos 88.0 +304 22 training.batch_size 0.0 +304 23 model.embedding_dim 1.0 +304 23 negative_sampler.num_negs_per_pos 91.0 +304 23 training.batch_size 2.0 +304 24 model.embedding_dim 2.0 +304 24 negative_sampler.num_negs_per_pos 0.0 +304 24 training.batch_size 1.0 +304 25 model.embedding_dim 1.0 +304 25 negative_sampler.num_negs_per_pos 53.0 +304 25 training.batch_size 0.0 +304 26 model.embedding_dim 2.0 +304 26 negative_sampler.num_negs_per_pos 34.0 +304 26 training.batch_size 2.0 +304 27 model.embedding_dim 2.0 +304 27 negative_sampler.num_negs_per_pos 96.0 +304 27 training.batch_size 1.0 +304 28 model.embedding_dim 1.0 +304 28 negative_sampler.num_negs_per_pos 39.0 +304 28 training.batch_size 0.0 +304 29 model.embedding_dim 2.0 +304 29 negative_sampler.num_negs_per_pos 77.0 +304 29 training.batch_size 1.0 +304 30 model.embedding_dim 2.0 +304 30 negative_sampler.num_negs_per_pos 66.0 +304 30 training.batch_size 0.0 +304 31 model.embedding_dim 1.0 +304 31 negative_sampler.num_negs_per_pos 56.0 +304 31 training.batch_size 2.0 +304 32 model.embedding_dim 1.0 +304 32 negative_sampler.num_negs_per_pos 95.0 +304 32 training.batch_size 0.0 +304 33 model.embedding_dim 0.0 +304 33 negative_sampler.num_negs_per_pos 9.0 +304 33 training.batch_size 0.0 +304 34 model.embedding_dim 2.0 +304 34 negative_sampler.num_negs_per_pos 80.0 +304 34 training.batch_size 1.0 +304 35 model.embedding_dim 2.0 +304 35 negative_sampler.num_negs_per_pos 79.0 +304 35 training.batch_size 2.0 +304 36 model.embedding_dim 1.0 +304 36 negative_sampler.num_negs_per_pos 93.0 +304 36 training.batch_size 2.0 +304 37 model.embedding_dim 1.0 +304 37 negative_sampler.num_negs_per_pos 15.0 +304 37 training.batch_size 1.0 +304 38 model.embedding_dim 2.0 +304 38 negative_sampler.num_negs_per_pos 82.0 +304 38 training.batch_size 0.0 +304 39 model.embedding_dim 2.0 +304 39 negative_sampler.num_negs_per_pos 47.0 +304 39 training.batch_size 1.0 +304 40 model.embedding_dim 2.0 +304 40 negative_sampler.num_negs_per_pos 27.0 +304 40 training.batch_size 0.0 +304 41 model.embedding_dim 0.0 +304 41 negative_sampler.num_negs_per_pos 58.0 +304 41 training.batch_size 1.0 +304 42 model.embedding_dim 0.0 +304 42 negative_sampler.num_negs_per_pos 8.0 +304 42 training.batch_size 2.0 +304 43 model.embedding_dim 0.0 +304 43 negative_sampler.num_negs_per_pos 99.0 +304 43 training.batch_size 0.0 +304 44 model.embedding_dim 0.0 +304 44 negative_sampler.num_negs_per_pos 21.0 +304 44 training.batch_size 0.0 +304 45 model.embedding_dim 1.0 +304 45 negative_sampler.num_negs_per_pos 44.0 +304 45 training.batch_size 2.0 +304 46 model.embedding_dim 2.0 +304 46 negative_sampler.num_negs_per_pos 45.0 +304 46 training.batch_size 2.0 +304 47 model.embedding_dim 2.0 +304 47 negative_sampler.num_negs_per_pos 36.0 +304 47 training.batch_size 2.0 +304 48 model.embedding_dim 0.0 +304 48 negative_sampler.num_negs_per_pos 2.0 +304 48 training.batch_size 0.0 +304 49 model.embedding_dim 2.0 +304 49 negative_sampler.num_negs_per_pos 96.0 +304 49 training.batch_size 1.0 +304 50 model.embedding_dim 1.0 +304 50 negative_sampler.num_negs_per_pos 99.0 +304 50 training.batch_size 1.0 +304 51 model.embedding_dim 0.0 +304 51 negative_sampler.num_negs_per_pos 63.0 +304 51 training.batch_size 1.0 +304 52 model.embedding_dim 1.0 +304 52 negative_sampler.num_negs_per_pos 0.0 +304 52 training.batch_size 2.0 +304 53 model.embedding_dim 1.0 +304 53 negative_sampler.num_negs_per_pos 46.0 +304 53 training.batch_size 1.0 +304 54 model.embedding_dim 0.0 +304 54 negative_sampler.num_negs_per_pos 13.0 +304 54 training.batch_size 0.0 +304 55 model.embedding_dim 0.0 +304 55 negative_sampler.num_negs_per_pos 18.0 +304 55 training.batch_size 2.0 +304 56 model.embedding_dim 2.0 +304 56 negative_sampler.num_negs_per_pos 57.0 +304 56 training.batch_size 1.0 +304 57 model.embedding_dim 0.0 +304 57 negative_sampler.num_negs_per_pos 28.0 +304 57 training.batch_size 2.0 +304 58 model.embedding_dim 2.0 +304 58 negative_sampler.num_negs_per_pos 42.0 +304 58 training.batch_size 0.0 +304 59 model.embedding_dim 0.0 +304 59 negative_sampler.num_negs_per_pos 83.0 +304 59 training.batch_size 0.0 +304 60 model.embedding_dim 0.0 +304 60 negative_sampler.num_negs_per_pos 33.0 +304 60 training.batch_size 1.0 +304 61 model.embedding_dim 1.0 +304 61 negative_sampler.num_negs_per_pos 68.0 +304 61 training.batch_size 0.0 +304 62 model.embedding_dim 2.0 +304 62 negative_sampler.num_negs_per_pos 41.0 +304 62 training.batch_size 1.0 +304 63 model.embedding_dim 2.0 +304 63 negative_sampler.num_negs_per_pos 10.0 +304 63 training.batch_size 1.0 +304 64 model.embedding_dim 2.0 +304 64 negative_sampler.num_negs_per_pos 14.0 +304 64 training.batch_size 1.0 +304 65 model.embedding_dim 0.0 +304 65 negative_sampler.num_negs_per_pos 52.0 +304 65 training.batch_size 0.0 +304 66 model.embedding_dim 2.0 +304 66 negative_sampler.num_negs_per_pos 51.0 +304 66 training.batch_size 2.0 +304 67 model.embedding_dim 0.0 +304 67 negative_sampler.num_negs_per_pos 34.0 +304 67 training.batch_size 0.0 +304 68 model.embedding_dim 1.0 +304 68 negative_sampler.num_negs_per_pos 56.0 +304 68 training.batch_size 1.0 +304 69 model.embedding_dim 2.0 +304 69 negative_sampler.num_negs_per_pos 33.0 +304 69 training.batch_size 1.0 +304 70 model.embedding_dim 1.0 +304 70 negative_sampler.num_negs_per_pos 55.0 +304 70 training.batch_size 2.0 +304 71 model.embedding_dim 0.0 +304 71 negative_sampler.num_negs_per_pos 87.0 +304 71 training.batch_size 2.0 +304 72 model.embedding_dim 2.0 +304 72 negative_sampler.num_negs_per_pos 18.0 +304 72 training.batch_size 1.0 +304 73 model.embedding_dim 1.0 +304 73 negative_sampler.num_negs_per_pos 85.0 +304 73 training.batch_size 2.0 +304 74 model.embedding_dim 1.0 +304 74 negative_sampler.num_negs_per_pos 59.0 +304 74 training.batch_size 1.0 +304 75 model.embedding_dim 0.0 +304 75 negative_sampler.num_negs_per_pos 51.0 +304 75 training.batch_size 1.0 +304 76 model.embedding_dim 1.0 +304 76 negative_sampler.num_negs_per_pos 88.0 +304 76 training.batch_size 2.0 +304 77 model.embedding_dim 0.0 +304 77 negative_sampler.num_negs_per_pos 42.0 +304 77 training.batch_size 2.0 +304 78 model.embedding_dim 2.0 +304 78 negative_sampler.num_negs_per_pos 67.0 +304 78 training.batch_size 2.0 +304 79 model.embedding_dim 2.0 +304 79 negative_sampler.num_negs_per_pos 49.0 +304 79 training.batch_size 2.0 +304 80 model.embedding_dim 2.0 +304 80 negative_sampler.num_negs_per_pos 57.0 +304 80 training.batch_size 2.0 +304 81 model.embedding_dim 1.0 +304 81 negative_sampler.num_negs_per_pos 56.0 +304 81 training.batch_size 2.0 +304 82 model.embedding_dim 2.0 +304 82 negative_sampler.num_negs_per_pos 55.0 +304 82 training.batch_size 0.0 +304 83 model.embedding_dim 2.0 +304 83 negative_sampler.num_negs_per_pos 16.0 +304 83 training.batch_size 2.0 +304 84 model.embedding_dim 2.0 +304 84 negative_sampler.num_negs_per_pos 29.0 +304 84 training.batch_size 1.0 +304 85 model.embedding_dim 2.0 +304 85 negative_sampler.num_negs_per_pos 97.0 +304 85 training.batch_size 1.0 +304 86 model.embedding_dim 0.0 +304 86 negative_sampler.num_negs_per_pos 31.0 +304 86 training.batch_size 2.0 +304 87 model.embedding_dim 0.0 +304 87 negative_sampler.num_negs_per_pos 89.0 +304 87 training.batch_size 2.0 +304 88 model.embedding_dim 1.0 +304 88 negative_sampler.num_negs_per_pos 66.0 +304 88 training.batch_size 0.0 +304 89 model.embedding_dim 0.0 +304 89 negative_sampler.num_negs_per_pos 86.0 +304 89 training.batch_size 2.0 +304 90 model.embedding_dim 1.0 +304 90 negative_sampler.num_negs_per_pos 72.0 +304 90 training.batch_size 1.0 +304 91 model.embedding_dim 0.0 +304 91 negative_sampler.num_negs_per_pos 79.0 +304 91 training.batch_size 2.0 +304 92 model.embedding_dim 2.0 +304 92 negative_sampler.num_negs_per_pos 27.0 +304 92 training.batch_size 2.0 +304 93 model.embedding_dim 2.0 +304 93 negative_sampler.num_negs_per_pos 81.0 +304 93 training.batch_size 0.0 +304 94 model.embedding_dim 2.0 +304 94 negative_sampler.num_negs_per_pos 16.0 +304 94 training.batch_size 2.0 +304 95 model.embedding_dim 1.0 +304 95 negative_sampler.num_negs_per_pos 69.0 +304 95 training.batch_size 0.0 +304 96 model.embedding_dim 0.0 +304 96 negative_sampler.num_negs_per_pos 91.0 +304 96 training.batch_size 0.0 +304 97 model.embedding_dim 2.0 +304 97 negative_sampler.num_negs_per_pos 48.0 +304 97 training.batch_size 2.0 +304 98 model.embedding_dim 0.0 +304 98 negative_sampler.num_negs_per_pos 31.0 +304 98 training.batch_size 1.0 +304 99 model.embedding_dim 1.0 +304 99 negative_sampler.num_negs_per_pos 45.0 +304 99 training.batch_size 1.0 +304 100 model.embedding_dim 0.0 +304 100 negative_sampler.num_negs_per_pos 1.0 +304 100 training.batch_size 0.0 +304 1 dataset """kinships""" +304 1 model """hole""" +304 1 loss """bceaftersigmoid""" +304 1 regularizer """no""" +304 1 optimizer """adadelta""" +304 1 training_loop """owa""" +304 1 negative_sampler """basic""" +304 1 evaluator """rankbased""" +304 2 dataset """kinships""" +304 2 model """hole""" +304 2 loss """bceaftersigmoid""" +304 2 regularizer """no""" +304 2 optimizer """adadelta""" +304 2 training_loop """owa""" +304 2 negative_sampler """basic""" +304 2 evaluator """rankbased""" +304 3 dataset """kinships""" +304 3 model """hole""" +304 3 loss """bceaftersigmoid""" +304 3 regularizer """no""" +304 3 optimizer """adadelta""" +304 3 training_loop """owa""" +304 3 negative_sampler """basic""" +304 3 evaluator """rankbased""" +304 4 dataset """kinships""" +304 4 model """hole""" +304 4 loss """bceaftersigmoid""" +304 4 regularizer """no""" +304 4 optimizer """adadelta""" +304 4 training_loop """owa""" +304 4 negative_sampler """basic""" +304 4 evaluator """rankbased""" +304 5 dataset """kinships""" +304 5 model """hole""" +304 5 loss """bceaftersigmoid""" +304 5 regularizer """no""" +304 5 optimizer """adadelta""" +304 5 training_loop """owa""" +304 5 negative_sampler """basic""" +304 5 evaluator """rankbased""" +304 6 dataset """kinships""" +304 6 model """hole""" +304 6 loss """bceaftersigmoid""" +304 6 regularizer """no""" +304 6 optimizer """adadelta""" +304 6 training_loop """owa""" +304 6 negative_sampler """basic""" +304 6 evaluator """rankbased""" +304 7 dataset """kinships""" +304 7 model """hole""" +304 7 loss """bceaftersigmoid""" +304 7 regularizer """no""" +304 7 optimizer """adadelta""" +304 7 training_loop """owa""" +304 7 negative_sampler """basic""" +304 7 evaluator """rankbased""" +304 8 dataset """kinships""" +304 8 model """hole""" +304 8 loss """bceaftersigmoid""" +304 8 regularizer """no""" +304 8 optimizer """adadelta""" +304 8 training_loop """owa""" +304 8 negative_sampler """basic""" +304 8 evaluator """rankbased""" +304 9 dataset """kinships""" +304 9 model """hole""" +304 9 loss """bceaftersigmoid""" +304 9 regularizer """no""" +304 9 optimizer """adadelta""" +304 9 training_loop """owa""" +304 9 negative_sampler """basic""" +304 9 evaluator """rankbased""" +304 10 dataset """kinships""" +304 10 model """hole""" +304 10 loss """bceaftersigmoid""" +304 10 regularizer """no""" +304 10 optimizer """adadelta""" +304 10 training_loop """owa""" +304 10 negative_sampler """basic""" +304 10 evaluator """rankbased""" +304 11 dataset """kinships""" +304 11 model """hole""" +304 11 loss """bceaftersigmoid""" +304 11 regularizer """no""" +304 11 optimizer """adadelta""" +304 11 training_loop """owa""" +304 11 negative_sampler """basic""" +304 11 evaluator """rankbased""" +304 12 dataset """kinships""" +304 12 model """hole""" +304 12 loss """bceaftersigmoid""" +304 12 regularizer """no""" +304 12 optimizer """adadelta""" +304 12 training_loop """owa""" +304 12 negative_sampler """basic""" +304 12 evaluator """rankbased""" +304 13 dataset """kinships""" +304 13 model """hole""" +304 13 loss """bceaftersigmoid""" +304 13 regularizer """no""" +304 13 optimizer """adadelta""" +304 13 training_loop """owa""" +304 13 negative_sampler """basic""" +304 13 evaluator """rankbased""" +304 14 dataset """kinships""" +304 14 model """hole""" +304 14 loss """bceaftersigmoid""" +304 14 regularizer """no""" +304 14 optimizer """adadelta""" +304 14 training_loop """owa""" +304 14 negative_sampler """basic""" +304 14 evaluator """rankbased""" +304 15 dataset """kinships""" +304 15 model """hole""" +304 15 loss """bceaftersigmoid""" +304 15 regularizer """no""" +304 15 optimizer """adadelta""" +304 15 training_loop """owa""" +304 15 negative_sampler """basic""" +304 15 evaluator """rankbased""" +304 16 dataset """kinships""" +304 16 model """hole""" +304 16 loss """bceaftersigmoid""" +304 16 regularizer """no""" +304 16 optimizer """adadelta""" +304 16 training_loop """owa""" +304 16 negative_sampler """basic""" +304 16 evaluator """rankbased""" +304 17 dataset """kinships""" +304 17 model """hole""" +304 17 loss """bceaftersigmoid""" +304 17 regularizer """no""" +304 17 optimizer """adadelta""" +304 17 training_loop """owa""" +304 17 negative_sampler """basic""" +304 17 evaluator """rankbased""" +304 18 dataset """kinships""" +304 18 model """hole""" +304 18 loss """bceaftersigmoid""" +304 18 regularizer """no""" +304 18 optimizer """adadelta""" +304 18 training_loop """owa""" +304 18 negative_sampler """basic""" +304 18 evaluator """rankbased""" +304 19 dataset """kinships""" +304 19 model """hole""" +304 19 loss """bceaftersigmoid""" +304 19 regularizer """no""" +304 19 optimizer """adadelta""" +304 19 training_loop """owa""" +304 19 negative_sampler """basic""" +304 19 evaluator """rankbased""" +304 20 dataset """kinships""" +304 20 model """hole""" +304 20 loss """bceaftersigmoid""" +304 20 regularizer """no""" +304 20 optimizer """adadelta""" +304 20 training_loop """owa""" +304 20 negative_sampler """basic""" +304 20 evaluator """rankbased""" +304 21 dataset """kinships""" +304 21 model """hole""" +304 21 loss """bceaftersigmoid""" +304 21 regularizer """no""" +304 21 optimizer """adadelta""" +304 21 training_loop """owa""" +304 21 negative_sampler """basic""" +304 21 evaluator """rankbased""" +304 22 dataset """kinships""" +304 22 model """hole""" +304 22 loss """bceaftersigmoid""" +304 22 regularizer """no""" +304 22 optimizer """adadelta""" +304 22 training_loop """owa""" +304 22 negative_sampler """basic""" +304 22 evaluator """rankbased""" +304 23 dataset """kinships""" +304 23 model """hole""" +304 23 loss """bceaftersigmoid""" +304 23 regularizer """no""" +304 23 optimizer """adadelta""" +304 23 training_loop """owa""" +304 23 negative_sampler """basic""" +304 23 evaluator """rankbased""" +304 24 dataset """kinships""" +304 24 model """hole""" +304 24 loss """bceaftersigmoid""" +304 24 regularizer """no""" +304 24 optimizer """adadelta""" +304 24 training_loop """owa""" +304 24 negative_sampler """basic""" +304 24 evaluator """rankbased""" +304 25 dataset """kinships""" +304 25 model """hole""" +304 25 loss """bceaftersigmoid""" +304 25 regularizer """no""" +304 25 optimizer """adadelta""" +304 25 training_loop """owa""" +304 25 negative_sampler """basic""" +304 25 evaluator """rankbased""" +304 26 dataset """kinships""" +304 26 model """hole""" +304 26 loss """bceaftersigmoid""" +304 26 regularizer """no""" +304 26 optimizer """adadelta""" +304 26 training_loop """owa""" +304 26 negative_sampler """basic""" +304 26 evaluator """rankbased""" +304 27 dataset """kinships""" +304 27 model """hole""" +304 27 loss """bceaftersigmoid""" +304 27 regularizer """no""" +304 27 optimizer """adadelta""" +304 27 training_loop """owa""" +304 27 negative_sampler """basic""" +304 27 evaluator """rankbased""" +304 28 dataset """kinships""" +304 28 model """hole""" +304 28 loss """bceaftersigmoid""" +304 28 regularizer """no""" +304 28 optimizer """adadelta""" +304 28 training_loop """owa""" +304 28 negative_sampler """basic""" +304 28 evaluator """rankbased""" +304 29 dataset """kinships""" +304 29 model """hole""" +304 29 loss """bceaftersigmoid""" +304 29 regularizer """no""" +304 29 optimizer """adadelta""" +304 29 training_loop """owa""" +304 29 negative_sampler """basic""" +304 29 evaluator """rankbased""" +304 30 dataset """kinships""" +304 30 model """hole""" +304 30 loss """bceaftersigmoid""" +304 30 regularizer """no""" +304 30 optimizer """adadelta""" +304 30 training_loop """owa""" +304 30 negative_sampler """basic""" +304 30 evaluator """rankbased""" +304 31 dataset """kinships""" +304 31 model """hole""" +304 31 loss """bceaftersigmoid""" +304 31 regularizer """no""" +304 31 optimizer """adadelta""" +304 31 training_loop """owa""" +304 31 negative_sampler """basic""" +304 31 evaluator """rankbased""" +304 32 dataset """kinships""" +304 32 model """hole""" +304 32 loss """bceaftersigmoid""" +304 32 regularizer """no""" +304 32 optimizer """adadelta""" +304 32 training_loop """owa""" +304 32 negative_sampler """basic""" +304 32 evaluator """rankbased""" +304 33 dataset """kinships""" +304 33 model """hole""" +304 33 loss """bceaftersigmoid""" +304 33 regularizer """no""" +304 33 optimizer """adadelta""" +304 33 training_loop """owa""" +304 33 negative_sampler """basic""" +304 33 evaluator """rankbased""" +304 34 dataset """kinships""" +304 34 model """hole""" +304 34 loss """bceaftersigmoid""" +304 34 regularizer """no""" +304 34 optimizer """adadelta""" +304 34 training_loop """owa""" +304 34 negative_sampler """basic""" +304 34 evaluator """rankbased""" +304 35 dataset """kinships""" +304 35 model """hole""" +304 35 loss """bceaftersigmoid""" +304 35 regularizer """no""" +304 35 optimizer """adadelta""" +304 35 training_loop """owa""" +304 35 negative_sampler """basic""" +304 35 evaluator """rankbased""" +304 36 dataset """kinships""" +304 36 model """hole""" +304 36 loss """bceaftersigmoid""" +304 36 regularizer """no""" +304 36 optimizer """adadelta""" +304 36 training_loop """owa""" +304 36 negative_sampler """basic""" +304 36 evaluator """rankbased""" +304 37 dataset """kinships""" +304 37 model """hole""" +304 37 loss """bceaftersigmoid""" +304 37 regularizer """no""" +304 37 optimizer """adadelta""" +304 37 training_loop """owa""" +304 37 negative_sampler """basic""" +304 37 evaluator """rankbased""" +304 38 dataset """kinships""" +304 38 model """hole""" +304 38 loss """bceaftersigmoid""" +304 38 regularizer """no""" +304 38 optimizer """adadelta""" +304 38 training_loop """owa""" +304 38 negative_sampler """basic""" +304 38 evaluator """rankbased""" +304 39 dataset """kinships""" +304 39 model """hole""" +304 39 loss """bceaftersigmoid""" +304 39 regularizer """no""" +304 39 optimizer """adadelta""" +304 39 training_loop """owa""" +304 39 negative_sampler """basic""" +304 39 evaluator """rankbased""" +304 40 dataset """kinships""" +304 40 model """hole""" +304 40 loss """bceaftersigmoid""" +304 40 regularizer """no""" +304 40 optimizer """adadelta""" +304 40 training_loop """owa""" +304 40 negative_sampler """basic""" +304 40 evaluator """rankbased""" +304 41 dataset """kinships""" +304 41 model """hole""" +304 41 loss """bceaftersigmoid""" +304 41 regularizer """no""" +304 41 optimizer """adadelta""" +304 41 training_loop """owa""" +304 41 negative_sampler """basic""" +304 41 evaluator """rankbased""" +304 42 dataset """kinships""" +304 42 model """hole""" +304 42 loss """bceaftersigmoid""" +304 42 regularizer """no""" +304 42 optimizer """adadelta""" +304 42 training_loop """owa""" +304 42 negative_sampler """basic""" +304 42 evaluator """rankbased""" +304 43 dataset """kinships""" +304 43 model """hole""" +304 43 loss """bceaftersigmoid""" +304 43 regularizer """no""" +304 43 optimizer """adadelta""" +304 43 training_loop """owa""" +304 43 negative_sampler """basic""" +304 43 evaluator """rankbased""" +304 44 dataset """kinships""" +304 44 model """hole""" +304 44 loss """bceaftersigmoid""" +304 44 regularizer """no""" +304 44 optimizer """adadelta""" +304 44 training_loop """owa""" +304 44 negative_sampler """basic""" +304 44 evaluator """rankbased""" +304 45 dataset """kinships""" +304 45 model """hole""" +304 45 loss """bceaftersigmoid""" +304 45 regularizer """no""" +304 45 optimizer """adadelta""" +304 45 training_loop """owa""" +304 45 negative_sampler """basic""" +304 45 evaluator """rankbased""" +304 46 dataset """kinships""" +304 46 model """hole""" +304 46 loss """bceaftersigmoid""" +304 46 regularizer """no""" +304 46 optimizer """adadelta""" +304 46 training_loop """owa""" +304 46 negative_sampler """basic""" +304 46 evaluator """rankbased""" +304 47 dataset """kinships""" +304 47 model """hole""" +304 47 loss """bceaftersigmoid""" +304 47 regularizer """no""" +304 47 optimizer """adadelta""" +304 47 training_loop """owa""" +304 47 negative_sampler """basic""" +304 47 evaluator """rankbased""" +304 48 dataset """kinships""" +304 48 model """hole""" +304 48 loss """bceaftersigmoid""" +304 48 regularizer """no""" +304 48 optimizer """adadelta""" +304 48 training_loop """owa""" +304 48 negative_sampler """basic""" +304 48 evaluator """rankbased""" +304 49 dataset """kinships""" +304 49 model """hole""" +304 49 loss """bceaftersigmoid""" +304 49 regularizer """no""" +304 49 optimizer """adadelta""" +304 49 training_loop """owa""" +304 49 negative_sampler """basic""" +304 49 evaluator """rankbased""" +304 50 dataset """kinships""" +304 50 model """hole""" +304 50 loss """bceaftersigmoid""" +304 50 regularizer """no""" +304 50 optimizer """adadelta""" +304 50 training_loop """owa""" +304 50 negative_sampler """basic""" +304 50 evaluator """rankbased""" +304 51 dataset """kinships""" +304 51 model """hole""" +304 51 loss """bceaftersigmoid""" +304 51 regularizer """no""" +304 51 optimizer """adadelta""" +304 51 training_loop """owa""" +304 51 negative_sampler """basic""" +304 51 evaluator """rankbased""" +304 52 dataset """kinships""" +304 52 model """hole""" +304 52 loss """bceaftersigmoid""" +304 52 regularizer """no""" +304 52 optimizer """adadelta""" +304 52 training_loop """owa""" +304 52 negative_sampler """basic""" +304 52 evaluator """rankbased""" +304 53 dataset """kinships""" +304 53 model """hole""" +304 53 loss """bceaftersigmoid""" +304 53 regularizer """no""" +304 53 optimizer """adadelta""" +304 53 training_loop """owa""" +304 53 negative_sampler """basic""" +304 53 evaluator """rankbased""" +304 54 dataset """kinships""" +304 54 model """hole""" +304 54 loss """bceaftersigmoid""" +304 54 regularizer """no""" +304 54 optimizer """adadelta""" +304 54 training_loop """owa""" +304 54 negative_sampler """basic""" +304 54 evaluator """rankbased""" +304 55 dataset """kinships""" +304 55 model """hole""" +304 55 loss """bceaftersigmoid""" +304 55 regularizer """no""" +304 55 optimizer """adadelta""" +304 55 training_loop """owa""" +304 55 negative_sampler """basic""" +304 55 evaluator """rankbased""" +304 56 dataset """kinships""" +304 56 model """hole""" +304 56 loss """bceaftersigmoid""" +304 56 regularizer """no""" +304 56 optimizer """adadelta""" +304 56 training_loop """owa""" +304 56 negative_sampler """basic""" +304 56 evaluator """rankbased""" +304 57 dataset """kinships""" +304 57 model """hole""" +304 57 loss """bceaftersigmoid""" +304 57 regularizer """no""" +304 57 optimizer """adadelta""" +304 57 training_loop """owa""" +304 57 negative_sampler """basic""" +304 57 evaluator """rankbased""" +304 58 dataset """kinships""" +304 58 model """hole""" +304 58 loss """bceaftersigmoid""" +304 58 regularizer """no""" +304 58 optimizer """adadelta""" +304 58 training_loop """owa""" +304 58 negative_sampler """basic""" +304 58 evaluator """rankbased""" +304 59 dataset """kinships""" +304 59 model """hole""" +304 59 loss """bceaftersigmoid""" +304 59 regularizer """no""" +304 59 optimizer """adadelta""" +304 59 training_loop """owa""" +304 59 negative_sampler """basic""" +304 59 evaluator """rankbased""" +304 60 dataset """kinships""" +304 60 model """hole""" +304 60 loss """bceaftersigmoid""" +304 60 regularizer """no""" +304 60 optimizer """adadelta""" +304 60 training_loop """owa""" +304 60 negative_sampler """basic""" +304 60 evaluator """rankbased""" +304 61 dataset """kinships""" +304 61 model """hole""" +304 61 loss """bceaftersigmoid""" +304 61 regularizer """no""" +304 61 optimizer """adadelta""" +304 61 training_loop """owa""" +304 61 negative_sampler """basic""" +304 61 evaluator """rankbased""" +304 62 dataset """kinships""" +304 62 model """hole""" +304 62 loss """bceaftersigmoid""" +304 62 regularizer """no""" +304 62 optimizer """adadelta""" +304 62 training_loop """owa""" +304 62 negative_sampler """basic""" +304 62 evaluator """rankbased""" +304 63 dataset """kinships""" +304 63 model """hole""" +304 63 loss """bceaftersigmoid""" +304 63 regularizer """no""" +304 63 optimizer """adadelta""" +304 63 training_loop """owa""" +304 63 negative_sampler """basic""" +304 63 evaluator """rankbased""" +304 64 dataset """kinships""" +304 64 model """hole""" +304 64 loss """bceaftersigmoid""" +304 64 regularizer """no""" +304 64 optimizer """adadelta""" +304 64 training_loop """owa""" +304 64 negative_sampler """basic""" +304 64 evaluator """rankbased""" +304 65 dataset """kinships""" +304 65 model """hole""" +304 65 loss """bceaftersigmoid""" +304 65 regularizer """no""" +304 65 optimizer """adadelta""" +304 65 training_loop """owa""" +304 65 negative_sampler """basic""" +304 65 evaluator """rankbased""" +304 66 dataset """kinships""" +304 66 model """hole""" +304 66 loss """bceaftersigmoid""" +304 66 regularizer """no""" +304 66 optimizer """adadelta""" +304 66 training_loop """owa""" +304 66 negative_sampler """basic""" +304 66 evaluator """rankbased""" +304 67 dataset """kinships""" +304 67 model """hole""" +304 67 loss """bceaftersigmoid""" +304 67 regularizer """no""" +304 67 optimizer """adadelta""" +304 67 training_loop """owa""" +304 67 negative_sampler """basic""" +304 67 evaluator """rankbased""" +304 68 dataset """kinships""" +304 68 model """hole""" +304 68 loss """bceaftersigmoid""" +304 68 regularizer """no""" +304 68 optimizer """adadelta""" +304 68 training_loop """owa""" +304 68 negative_sampler """basic""" +304 68 evaluator """rankbased""" +304 69 dataset """kinships""" +304 69 model """hole""" +304 69 loss """bceaftersigmoid""" +304 69 regularizer """no""" +304 69 optimizer """adadelta""" +304 69 training_loop """owa""" +304 69 negative_sampler """basic""" +304 69 evaluator """rankbased""" +304 70 dataset """kinships""" +304 70 model """hole""" +304 70 loss """bceaftersigmoid""" +304 70 regularizer """no""" +304 70 optimizer """adadelta""" +304 70 training_loop """owa""" +304 70 negative_sampler """basic""" +304 70 evaluator """rankbased""" +304 71 dataset """kinships""" +304 71 model """hole""" +304 71 loss """bceaftersigmoid""" +304 71 regularizer """no""" +304 71 optimizer """adadelta""" +304 71 training_loop """owa""" +304 71 negative_sampler """basic""" +304 71 evaluator """rankbased""" +304 72 dataset """kinships""" +304 72 model """hole""" +304 72 loss """bceaftersigmoid""" +304 72 regularizer """no""" +304 72 optimizer """adadelta""" +304 72 training_loop """owa""" +304 72 negative_sampler """basic""" +304 72 evaluator """rankbased""" +304 73 dataset """kinships""" +304 73 model """hole""" +304 73 loss """bceaftersigmoid""" +304 73 regularizer """no""" +304 73 optimizer """adadelta""" +304 73 training_loop """owa""" +304 73 negative_sampler """basic""" +304 73 evaluator """rankbased""" +304 74 dataset """kinships""" +304 74 model """hole""" +304 74 loss """bceaftersigmoid""" +304 74 regularizer """no""" +304 74 optimizer """adadelta""" +304 74 training_loop """owa""" +304 74 negative_sampler """basic""" +304 74 evaluator """rankbased""" +304 75 dataset """kinships""" +304 75 model """hole""" +304 75 loss """bceaftersigmoid""" +304 75 regularizer """no""" +304 75 optimizer """adadelta""" +304 75 training_loop """owa""" +304 75 negative_sampler """basic""" +304 75 evaluator """rankbased""" +304 76 dataset """kinships""" +304 76 model """hole""" +304 76 loss """bceaftersigmoid""" +304 76 regularizer """no""" +304 76 optimizer """adadelta""" +304 76 training_loop """owa""" +304 76 negative_sampler """basic""" +304 76 evaluator """rankbased""" +304 77 dataset """kinships""" +304 77 model """hole""" +304 77 loss """bceaftersigmoid""" +304 77 regularizer """no""" +304 77 optimizer """adadelta""" +304 77 training_loop """owa""" +304 77 negative_sampler """basic""" +304 77 evaluator """rankbased""" +304 78 dataset """kinships""" +304 78 model """hole""" +304 78 loss """bceaftersigmoid""" +304 78 regularizer """no""" +304 78 optimizer """adadelta""" +304 78 training_loop """owa""" +304 78 negative_sampler """basic""" +304 78 evaluator """rankbased""" +304 79 dataset """kinships""" +304 79 model """hole""" +304 79 loss """bceaftersigmoid""" +304 79 regularizer """no""" +304 79 optimizer """adadelta""" +304 79 training_loop """owa""" +304 79 negative_sampler """basic""" +304 79 evaluator """rankbased""" +304 80 dataset """kinships""" +304 80 model """hole""" +304 80 loss """bceaftersigmoid""" +304 80 regularizer """no""" +304 80 optimizer """adadelta""" +304 80 training_loop """owa""" +304 80 negative_sampler """basic""" +304 80 evaluator """rankbased""" +304 81 dataset """kinships""" +304 81 model """hole""" +304 81 loss """bceaftersigmoid""" +304 81 regularizer """no""" +304 81 optimizer """adadelta""" +304 81 training_loop """owa""" +304 81 negative_sampler """basic""" +304 81 evaluator """rankbased""" +304 82 dataset """kinships""" +304 82 model """hole""" +304 82 loss """bceaftersigmoid""" +304 82 regularizer """no""" +304 82 optimizer """adadelta""" +304 82 training_loop """owa""" +304 82 negative_sampler """basic""" +304 82 evaluator """rankbased""" +304 83 dataset """kinships""" +304 83 model """hole""" +304 83 loss """bceaftersigmoid""" +304 83 regularizer """no""" +304 83 optimizer """adadelta""" +304 83 training_loop """owa""" +304 83 negative_sampler """basic""" +304 83 evaluator """rankbased""" +304 84 dataset """kinships""" +304 84 model """hole""" +304 84 loss """bceaftersigmoid""" +304 84 regularizer """no""" +304 84 optimizer """adadelta""" +304 84 training_loop """owa""" +304 84 negative_sampler """basic""" +304 84 evaluator """rankbased""" +304 85 dataset """kinships""" +304 85 model """hole""" +304 85 loss """bceaftersigmoid""" +304 85 regularizer """no""" +304 85 optimizer """adadelta""" +304 85 training_loop """owa""" +304 85 negative_sampler """basic""" +304 85 evaluator """rankbased""" +304 86 dataset """kinships""" +304 86 model """hole""" +304 86 loss """bceaftersigmoid""" +304 86 regularizer """no""" +304 86 optimizer """adadelta""" +304 86 training_loop """owa""" +304 86 negative_sampler """basic""" +304 86 evaluator """rankbased""" +304 87 dataset """kinships""" +304 87 model """hole""" +304 87 loss """bceaftersigmoid""" +304 87 regularizer """no""" +304 87 optimizer """adadelta""" +304 87 training_loop """owa""" +304 87 negative_sampler """basic""" +304 87 evaluator """rankbased""" +304 88 dataset """kinships""" +304 88 model """hole""" +304 88 loss """bceaftersigmoid""" +304 88 regularizer """no""" +304 88 optimizer """adadelta""" +304 88 training_loop """owa""" +304 88 negative_sampler """basic""" +304 88 evaluator """rankbased""" +304 89 dataset """kinships""" +304 89 model """hole""" +304 89 loss """bceaftersigmoid""" +304 89 regularizer """no""" +304 89 optimizer """adadelta""" +304 89 training_loop """owa""" +304 89 negative_sampler """basic""" +304 89 evaluator """rankbased""" +304 90 dataset """kinships""" +304 90 model """hole""" +304 90 loss """bceaftersigmoid""" +304 90 regularizer """no""" +304 90 optimizer """adadelta""" +304 90 training_loop """owa""" +304 90 negative_sampler """basic""" +304 90 evaluator """rankbased""" +304 91 dataset """kinships""" +304 91 model """hole""" +304 91 loss """bceaftersigmoid""" +304 91 regularizer """no""" +304 91 optimizer """adadelta""" +304 91 training_loop """owa""" +304 91 negative_sampler """basic""" +304 91 evaluator """rankbased""" +304 92 dataset """kinships""" +304 92 model """hole""" +304 92 loss """bceaftersigmoid""" +304 92 regularizer """no""" +304 92 optimizer """adadelta""" +304 92 training_loop """owa""" +304 92 negative_sampler """basic""" +304 92 evaluator """rankbased""" +304 93 dataset """kinships""" +304 93 model """hole""" +304 93 loss """bceaftersigmoid""" +304 93 regularizer """no""" +304 93 optimizer """adadelta""" +304 93 training_loop """owa""" +304 93 negative_sampler """basic""" +304 93 evaluator """rankbased""" +304 94 dataset """kinships""" +304 94 model """hole""" +304 94 loss """bceaftersigmoid""" +304 94 regularizer """no""" +304 94 optimizer """adadelta""" +304 94 training_loop """owa""" +304 94 negative_sampler """basic""" +304 94 evaluator """rankbased""" +304 95 dataset """kinships""" +304 95 model """hole""" +304 95 loss """bceaftersigmoid""" +304 95 regularizer """no""" +304 95 optimizer """adadelta""" +304 95 training_loop """owa""" +304 95 negative_sampler """basic""" +304 95 evaluator """rankbased""" +304 96 dataset """kinships""" +304 96 model """hole""" +304 96 loss """bceaftersigmoid""" +304 96 regularizer """no""" +304 96 optimizer """adadelta""" +304 96 training_loop """owa""" +304 96 negative_sampler """basic""" +304 96 evaluator """rankbased""" +304 97 dataset """kinships""" +304 97 model """hole""" +304 97 loss """bceaftersigmoid""" +304 97 regularizer """no""" +304 97 optimizer """adadelta""" +304 97 training_loop """owa""" +304 97 negative_sampler """basic""" +304 97 evaluator """rankbased""" +304 98 dataset """kinships""" +304 98 model """hole""" +304 98 loss """bceaftersigmoid""" +304 98 regularizer """no""" +304 98 optimizer """adadelta""" +304 98 training_loop """owa""" +304 98 negative_sampler """basic""" +304 98 evaluator """rankbased""" +304 99 dataset """kinships""" +304 99 model """hole""" +304 99 loss """bceaftersigmoid""" +304 99 regularizer """no""" +304 99 optimizer """adadelta""" +304 99 training_loop """owa""" +304 99 negative_sampler """basic""" +304 99 evaluator """rankbased""" +304 100 dataset """kinships""" +304 100 model """hole""" +304 100 loss """bceaftersigmoid""" +304 100 regularizer """no""" +304 100 optimizer """adadelta""" +304 100 training_loop """owa""" +304 100 negative_sampler """basic""" +304 100 evaluator """rankbased""" +305 1 model.embedding_dim 1.0 +305 1 negative_sampler.num_negs_per_pos 72.0 +305 1 training.batch_size 1.0 +305 2 model.embedding_dim 0.0 +305 2 negative_sampler.num_negs_per_pos 79.0 +305 2 training.batch_size 1.0 +305 3 model.embedding_dim 2.0 +305 3 negative_sampler.num_negs_per_pos 88.0 +305 3 training.batch_size 0.0 +305 4 model.embedding_dim 0.0 +305 4 negative_sampler.num_negs_per_pos 43.0 +305 4 training.batch_size 0.0 +305 5 model.embedding_dim 0.0 +305 5 negative_sampler.num_negs_per_pos 74.0 +305 5 training.batch_size 0.0 +305 6 model.embedding_dim 2.0 +305 6 negative_sampler.num_negs_per_pos 73.0 +305 6 training.batch_size 1.0 +305 7 model.embedding_dim 1.0 +305 7 negative_sampler.num_negs_per_pos 74.0 +305 7 training.batch_size 2.0 +305 8 model.embedding_dim 0.0 +305 8 negative_sampler.num_negs_per_pos 67.0 +305 8 training.batch_size 0.0 +305 9 model.embedding_dim 0.0 +305 9 negative_sampler.num_negs_per_pos 58.0 +305 9 training.batch_size 1.0 +305 10 model.embedding_dim 0.0 +305 10 negative_sampler.num_negs_per_pos 90.0 +305 10 training.batch_size 0.0 +305 11 model.embedding_dim 1.0 +305 11 negative_sampler.num_negs_per_pos 7.0 +305 11 training.batch_size 1.0 +305 12 model.embedding_dim 0.0 +305 12 negative_sampler.num_negs_per_pos 27.0 +305 12 training.batch_size 1.0 +305 13 model.embedding_dim 2.0 +305 13 negative_sampler.num_negs_per_pos 2.0 +305 13 training.batch_size 1.0 +305 14 model.embedding_dim 0.0 +305 14 negative_sampler.num_negs_per_pos 65.0 +305 14 training.batch_size 0.0 +305 15 model.embedding_dim 0.0 +305 15 negative_sampler.num_negs_per_pos 82.0 +305 15 training.batch_size 0.0 +305 16 model.embedding_dim 2.0 +305 16 negative_sampler.num_negs_per_pos 20.0 +305 16 training.batch_size 0.0 +305 17 model.embedding_dim 0.0 +305 17 negative_sampler.num_negs_per_pos 46.0 +305 17 training.batch_size 0.0 +305 18 model.embedding_dim 1.0 +305 18 negative_sampler.num_negs_per_pos 39.0 +305 18 training.batch_size 1.0 +305 19 model.embedding_dim 0.0 +305 19 negative_sampler.num_negs_per_pos 99.0 +305 19 training.batch_size 2.0 +305 20 model.embedding_dim 1.0 +305 20 negative_sampler.num_negs_per_pos 39.0 +305 20 training.batch_size 2.0 +305 21 model.embedding_dim 2.0 +305 21 negative_sampler.num_negs_per_pos 69.0 +305 21 training.batch_size 2.0 +305 22 model.embedding_dim 2.0 +305 22 negative_sampler.num_negs_per_pos 38.0 +305 22 training.batch_size 1.0 +305 23 model.embedding_dim 0.0 +305 23 negative_sampler.num_negs_per_pos 58.0 +305 23 training.batch_size 0.0 +305 24 model.embedding_dim 2.0 +305 24 negative_sampler.num_negs_per_pos 44.0 +305 24 training.batch_size 2.0 +305 25 model.embedding_dim 2.0 +305 25 negative_sampler.num_negs_per_pos 22.0 +305 25 training.batch_size 2.0 +305 26 model.embedding_dim 2.0 +305 26 negative_sampler.num_negs_per_pos 98.0 +305 26 training.batch_size 0.0 +305 27 model.embedding_dim 0.0 +305 27 negative_sampler.num_negs_per_pos 89.0 +305 27 training.batch_size 1.0 +305 28 model.embedding_dim 1.0 +305 28 negative_sampler.num_negs_per_pos 83.0 +305 28 training.batch_size 0.0 +305 29 model.embedding_dim 2.0 +305 29 negative_sampler.num_negs_per_pos 61.0 +305 29 training.batch_size 0.0 +305 30 model.embedding_dim 2.0 +305 30 negative_sampler.num_negs_per_pos 37.0 +305 30 training.batch_size 1.0 +305 31 model.embedding_dim 0.0 +305 31 negative_sampler.num_negs_per_pos 34.0 +305 31 training.batch_size 2.0 +305 32 model.embedding_dim 1.0 +305 32 negative_sampler.num_negs_per_pos 31.0 +305 32 training.batch_size 1.0 +305 33 model.embedding_dim 2.0 +305 33 negative_sampler.num_negs_per_pos 87.0 +305 33 training.batch_size 0.0 +305 34 model.embedding_dim 0.0 +305 34 negative_sampler.num_negs_per_pos 75.0 +305 34 training.batch_size 0.0 +305 35 model.embedding_dim 0.0 +305 35 negative_sampler.num_negs_per_pos 96.0 +305 35 training.batch_size 1.0 +305 36 model.embedding_dim 0.0 +305 36 negative_sampler.num_negs_per_pos 97.0 +305 36 training.batch_size 0.0 +305 37 model.embedding_dim 1.0 +305 37 negative_sampler.num_negs_per_pos 68.0 +305 37 training.batch_size 0.0 +305 38 model.embedding_dim 0.0 +305 38 negative_sampler.num_negs_per_pos 8.0 +305 38 training.batch_size 1.0 +305 39 model.embedding_dim 1.0 +305 39 negative_sampler.num_negs_per_pos 26.0 +305 39 training.batch_size 1.0 +305 40 model.embedding_dim 2.0 +305 40 negative_sampler.num_negs_per_pos 5.0 +305 40 training.batch_size 2.0 +305 41 model.embedding_dim 1.0 +305 41 negative_sampler.num_negs_per_pos 78.0 +305 41 training.batch_size 1.0 +305 42 model.embedding_dim 2.0 +305 42 negative_sampler.num_negs_per_pos 33.0 +305 42 training.batch_size 2.0 +305 43 model.embedding_dim 2.0 +305 43 negative_sampler.num_negs_per_pos 24.0 +305 43 training.batch_size 2.0 +305 44 model.embedding_dim 2.0 +305 44 negative_sampler.num_negs_per_pos 4.0 +305 44 training.batch_size 0.0 +305 45 model.embedding_dim 0.0 +305 45 negative_sampler.num_negs_per_pos 23.0 +305 45 training.batch_size 1.0 +305 46 model.embedding_dim 2.0 +305 46 negative_sampler.num_negs_per_pos 65.0 +305 46 training.batch_size 1.0 +305 47 model.embedding_dim 1.0 +305 47 negative_sampler.num_negs_per_pos 27.0 +305 47 training.batch_size 2.0 +305 48 model.embedding_dim 2.0 +305 48 negative_sampler.num_negs_per_pos 26.0 +305 48 training.batch_size 0.0 +305 49 model.embedding_dim 0.0 +305 49 negative_sampler.num_negs_per_pos 13.0 +305 49 training.batch_size 2.0 +305 50 model.embedding_dim 0.0 +305 50 negative_sampler.num_negs_per_pos 76.0 +305 50 training.batch_size 1.0 +305 51 model.embedding_dim 1.0 +305 51 negative_sampler.num_negs_per_pos 49.0 +305 51 training.batch_size 1.0 +305 52 model.embedding_dim 2.0 +305 52 negative_sampler.num_negs_per_pos 39.0 +305 52 training.batch_size 0.0 +305 53 model.embedding_dim 0.0 +305 53 negative_sampler.num_negs_per_pos 63.0 +305 53 training.batch_size 2.0 +305 54 model.embedding_dim 1.0 +305 54 negative_sampler.num_negs_per_pos 14.0 +305 54 training.batch_size 0.0 +305 55 model.embedding_dim 1.0 +305 55 negative_sampler.num_negs_per_pos 0.0 +305 55 training.batch_size 2.0 +305 56 model.embedding_dim 2.0 +305 56 negative_sampler.num_negs_per_pos 84.0 +305 56 training.batch_size 1.0 +305 57 model.embedding_dim 0.0 +305 57 negative_sampler.num_negs_per_pos 65.0 +305 57 training.batch_size 2.0 +305 58 model.embedding_dim 0.0 +305 58 negative_sampler.num_negs_per_pos 43.0 +305 58 training.batch_size 0.0 +305 59 model.embedding_dim 2.0 +305 59 negative_sampler.num_negs_per_pos 54.0 +305 59 training.batch_size 1.0 +305 60 model.embedding_dim 1.0 +305 60 negative_sampler.num_negs_per_pos 41.0 +305 60 training.batch_size 2.0 +305 61 model.embedding_dim 2.0 +305 61 negative_sampler.num_negs_per_pos 39.0 +305 61 training.batch_size 0.0 +305 62 model.embedding_dim 0.0 +305 62 negative_sampler.num_negs_per_pos 2.0 +305 62 training.batch_size 2.0 +305 63 model.embedding_dim 2.0 +305 63 negative_sampler.num_negs_per_pos 85.0 +305 63 training.batch_size 2.0 +305 64 model.embedding_dim 1.0 +305 64 negative_sampler.num_negs_per_pos 45.0 +305 64 training.batch_size 0.0 +305 65 model.embedding_dim 0.0 +305 65 negative_sampler.num_negs_per_pos 70.0 +305 65 training.batch_size 0.0 +305 66 model.embedding_dim 1.0 +305 66 negative_sampler.num_negs_per_pos 95.0 +305 66 training.batch_size 1.0 +305 67 model.embedding_dim 2.0 +305 67 negative_sampler.num_negs_per_pos 65.0 +305 67 training.batch_size 2.0 +305 68 model.embedding_dim 0.0 +305 68 negative_sampler.num_negs_per_pos 85.0 +305 68 training.batch_size 1.0 +305 69 model.embedding_dim 1.0 +305 69 negative_sampler.num_negs_per_pos 92.0 +305 69 training.batch_size 2.0 +305 70 model.embedding_dim 2.0 +305 70 negative_sampler.num_negs_per_pos 25.0 +305 70 training.batch_size 1.0 +305 71 model.embedding_dim 1.0 +305 71 negative_sampler.num_negs_per_pos 79.0 +305 71 training.batch_size 0.0 +305 72 model.embedding_dim 1.0 +305 72 negative_sampler.num_negs_per_pos 75.0 +305 72 training.batch_size 1.0 +305 73 model.embedding_dim 2.0 +305 73 negative_sampler.num_negs_per_pos 44.0 +305 73 training.batch_size 1.0 +305 74 model.embedding_dim 0.0 +305 74 negative_sampler.num_negs_per_pos 15.0 +305 74 training.batch_size 0.0 +305 75 model.embedding_dim 0.0 +305 75 negative_sampler.num_negs_per_pos 25.0 +305 75 training.batch_size 2.0 +305 76 model.embedding_dim 0.0 +305 76 negative_sampler.num_negs_per_pos 18.0 +305 76 training.batch_size 0.0 +305 77 model.embedding_dim 1.0 +305 77 negative_sampler.num_negs_per_pos 81.0 +305 77 training.batch_size 2.0 +305 78 model.embedding_dim 2.0 +305 78 negative_sampler.num_negs_per_pos 76.0 +305 78 training.batch_size 1.0 +305 79 model.embedding_dim 1.0 +305 79 negative_sampler.num_negs_per_pos 6.0 +305 79 training.batch_size 0.0 +305 80 model.embedding_dim 1.0 +305 80 negative_sampler.num_negs_per_pos 59.0 +305 80 training.batch_size 1.0 +305 81 model.embedding_dim 1.0 +305 81 negative_sampler.num_negs_per_pos 40.0 +305 81 training.batch_size 2.0 +305 82 model.embedding_dim 1.0 +305 82 negative_sampler.num_negs_per_pos 69.0 +305 82 training.batch_size 2.0 +305 83 model.embedding_dim 0.0 +305 83 negative_sampler.num_negs_per_pos 42.0 +305 83 training.batch_size 0.0 +305 84 model.embedding_dim 2.0 +305 84 negative_sampler.num_negs_per_pos 48.0 +305 84 training.batch_size 2.0 +305 85 model.embedding_dim 1.0 +305 85 negative_sampler.num_negs_per_pos 49.0 +305 85 training.batch_size 2.0 +305 86 model.embedding_dim 0.0 +305 86 negative_sampler.num_negs_per_pos 30.0 +305 86 training.batch_size 1.0 +305 87 model.embedding_dim 1.0 +305 87 negative_sampler.num_negs_per_pos 98.0 +305 87 training.batch_size 1.0 +305 88 model.embedding_dim 0.0 +305 88 negative_sampler.num_negs_per_pos 39.0 +305 88 training.batch_size 0.0 +305 89 model.embedding_dim 0.0 +305 89 negative_sampler.num_negs_per_pos 49.0 +305 89 training.batch_size 2.0 +305 90 model.embedding_dim 0.0 +305 90 negative_sampler.num_negs_per_pos 86.0 +305 90 training.batch_size 2.0 +305 91 model.embedding_dim 1.0 +305 91 negative_sampler.num_negs_per_pos 20.0 +305 91 training.batch_size 0.0 +305 92 model.embedding_dim 2.0 +305 92 negative_sampler.num_negs_per_pos 89.0 +305 92 training.batch_size 0.0 +305 93 model.embedding_dim 2.0 +305 93 negative_sampler.num_negs_per_pos 86.0 +305 93 training.batch_size 0.0 +305 94 model.embedding_dim 1.0 +305 94 negative_sampler.num_negs_per_pos 93.0 +305 94 training.batch_size 2.0 +305 95 model.embedding_dim 1.0 +305 95 negative_sampler.num_negs_per_pos 56.0 +305 95 training.batch_size 1.0 +305 96 model.embedding_dim 0.0 +305 96 negative_sampler.num_negs_per_pos 42.0 +305 96 training.batch_size 1.0 +305 97 model.embedding_dim 2.0 +305 97 negative_sampler.num_negs_per_pos 5.0 +305 97 training.batch_size 0.0 +305 98 model.embedding_dim 2.0 +305 98 negative_sampler.num_negs_per_pos 81.0 +305 98 training.batch_size 0.0 +305 99 model.embedding_dim 0.0 +305 99 negative_sampler.num_negs_per_pos 60.0 +305 99 training.batch_size 0.0 +305 100 model.embedding_dim 2.0 +305 100 negative_sampler.num_negs_per_pos 78.0 +305 100 training.batch_size 1.0 +305 1 dataset """kinships""" +305 1 model """hole""" +305 1 loss """softplus""" +305 1 regularizer """no""" +305 1 optimizer """adadelta""" +305 1 training_loop """owa""" +305 1 negative_sampler """basic""" +305 1 evaluator """rankbased""" +305 2 dataset """kinships""" +305 2 model """hole""" +305 2 loss """softplus""" +305 2 regularizer """no""" +305 2 optimizer """adadelta""" +305 2 training_loop """owa""" +305 2 negative_sampler """basic""" +305 2 evaluator """rankbased""" +305 3 dataset """kinships""" +305 3 model """hole""" +305 3 loss """softplus""" +305 3 regularizer """no""" +305 3 optimizer """adadelta""" +305 3 training_loop """owa""" +305 3 negative_sampler """basic""" +305 3 evaluator """rankbased""" +305 4 dataset """kinships""" +305 4 model """hole""" +305 4 loss """softplus""" +305 4 regularizer """no""" +305 4 optimizer """adadelta""" +305 4 training_loop """owa""" +305 4 negative_sampler """basic""" +305 4 evaluator """rankbased""" +305 5 dataset """kinships""" +305 5 model """hole""" +305 5 loss """softplus""" +305 5 regularizer """no""" +305 5 optimizer """adadelta""" +305 5 training_loop """owa""" +305 5 negative_sampler """basic""" +305 5 evaluator """rankbased""" +305 6 dataset """kinships""" +305 6 model """hole""" +305 6 loss """softplus""" +305 6 regularizer """no""" +305 6 optimizer """adadelta""" +305 6 training_loop """owa""" +305 6 negative_sampler """basic""" +305 6 evaluator """rankbased""" +305 7 dataset """kinships""" +305 7 model """hole""" +305 7 loss """softplus""" +305 7 regularizer """no""" +305 7 optimizer """adadelta""" +305 7 training_loop """owa""" +305 7 negative_sampler """basic""" +305 7 evaluator """rankbased""" +305 8 dataset """kinships""" +305 8 model """hole""" +305 8 loss """softplus""" +305 8 regularizer """no""" +305 8 optimizer """adadelta""" +305 8 training_loop """owa""" +305 8 negative_sampler """basic""" +305 8 evaluator """rankbased""" +305 9 dataset """kinships""" +305 9 model """hole""" +305 9 loss """softplus""" +305 9 regularizer """no""" +305 9 optimizer """adadelta""" +305 9 training_loop """owa""" +305 9 negative_sampler """basic""" +305 9 evaluator """rankbased""" +305 10 dataset """kinships""" +305 10 model """hole""" +305 10 loss """softplus""" +305 10 regularizer """no""" +305 10 optimizer """adadelta""" +305 10 training_loop """owa""" +305 10 negative_sampler """basic""" +305 10 evaluator """rankbased""" +305 11 dataset """kinships""" +305 11 model """hole""" +305 11 loss """softplus""" +305 11 regularizer """no""" +305 11 optimizer """adadelta""" +305 11 training_loop """owa""" +305 11 negative_sampler """basic""" +305 11 evaluator """rankbased""" +305 12 dataset """kinships""" +305 12 model """hole""" +305 12 loss """softplus""" +305 12 regularizer """no""" +305 12 optimizer """adadelta""" +305 12 training_loop """owa""" +305 12 negative_sampler """basic""" +305 12 evaluator """rankbased""" +305 13 dataset """kinships""" +305 13 model """hole""" +305 13 loss """softplus""" +305 13 regularizer """no""" +305 13 optimizer """adadelta""" +305 13 training_loop """owa""" +305 13 negative_sampler """basic""" +305 13 evaluator """rankbased""" +305 14 dataset """kinships""" +305 14 model """hole""" +305 14 loss """softplus""" +305 14 regularizer """no""" +305 14 optimizer """adadelta""" +305 14 training_loop """owa""" +305 14 negative_sampler """basic""" +305 14 evaluator """rankbased""" +305 15 dataset """kinships""" +305 15 model """hole""" +305 15 loss """softplus""" +305 15 regularizer """no""" +305 15 optimizer """adadelta""" +305 15 training_loop """owa""" +305 15 negative_sampler """basic""" +305 15 evaluator """rankbased""" +305 16 dataset """kinships""" +305 16 model """hole""" +305 16 loss """softplus""" +305 16 regularizer """no""" +305 16 optimizer """adadelta""" +305 16 training_loop """owa""" +305 16 negative_sampler """basic""" +305 16 evaluator """rankbased""" +305 17 dataset """kinships""" +305 17 model """hole""" +305 17 loss """softplus""" +305 17 regularizer """no""" +305 17 optimizer """adadelta""" +305 17 training_loop """owa""" +305 17 negative_sampler """basic""" +305 17 evaluator """rankbased""" +305 18 dataset """kinships""" +305 18 model """hole""" +305 18 loss """softplus""" +305 18 regularizer """no""" +305 18 optimizer """adadelta""" +305 18 training_loop """owa""" +305 18 negative_sampler """basic""" +305 18 evaluator """rankbased""" +305 19 dataset """kinships""" +305 19 model """hole""" +305 19 loss """softplus""" +305 19 regularizer """no""" +305 19 optimizer """adadelta""" +305 19 training_loop """owa""" +305 19 negative_sampler """basic""" +305 19 evaluator """rankbased""" +305 20 dataset """kinships""" +305 20 model """hole""" +305 20 loss """softplus""" +305 20 regularizer """no""" +305 20 optimizer """adadelta""" +305 20 training_loop """owa""" +305 20 negative_sampler """basic""" +305 20 evaluator """rankbased""" +305 21 dataset """kinships""" +305 21 model """hole""" +305 21 loss """softplus""" +305 21 regularizer """no""" +305 21 optimizer """adadelta""" +305 21 training_loop """owa""" +305 21 negative_sampler """basic""" +305 21 evaluator """rankbased""" +305 22 dataset """kinships""" +305 22 model """hole""" +305 22 loss """softplus""" +305 22 regularizer """no""" +305 22 optimizer """adadelta""" +305 22 training_loop """owa""" +305 22 negative_sampler """basic""" +305 22 evaluator """rankbased""" +305 23 dataset """kinships""" +305 23 model """hole""" +305 23 loss """softplus""" +305 23 regularizer """no""" +305 23 optimizer """adadelta""" +305 23 training_loop """owa""" +305 23 negative_sampler """basic""" +305 23 evaluator """rankbased""" +305 24 dataset """kinships""" +305 24 model """hole""" +305 24 loss """softplus""" +305 24 regularizer """no""" +305 24 optimizer """adadelta""" +305 24 training_loop """owa""" +305 24 negative_sampler """basic""" +305 24 evaluator """rankbased""" +305 25 dataset """kinships""" +305 25 model """hole""" +305 25 loss """softplus""" +305 25 regularizer """no""" +305 25 optimizer """adadelta""" +305 25 training_loop """owa""" +305 25 negative_sampler """basic""" +305 25 evaluator """rankbased""" +305 26 dataset """kinships""" +305 26 model """hole""" +305 26 loss """softplus""" +305 26 regularizer """no""" +305 26 optimizer """adadelta""" +305 26 training_loop """owa""" +305 26 negative_sampler """basic""" +305 26 evaluator """rankbased""" +305 27 dataset """kinships""" +305 27 model """hole""" +305 27 loss """softplus""" +305 27 regularizer """no""" +305 27 optimizer """adadelta""" +305 27 training_loop """owa""" +305 27 negative_sampler """basic""" +305 27 evaluator """rankbased""" +305 28 dataset """kinships""" +305 28 model """hole""" +305 28 loss """softplus""" +305 28 regularizer """no""" +305 28 optimizer """adadelta""" +305 28 training_loop """owa""" +305 28 negative_sampler """basic""" +305 28 evaluator """rankbased""" +305 29 dataset """kinships""" +305 29 model """hole""" +305 29 loss """softplus""" +305 29 regularizer """no""" +305 29 optimizer """adadelta""" +305 29 training_loop """owa""" +305 29 negative_sampler """basic""" +305 29 evaluator """rankbased""" +305 30 dataset """kinships""" +305 30 model """hole""" +305 30 loss """softplus""" +305 30 regularizer """no""" +305 30 optimizer """adadelta""" +305 30 training_loop """owa""" +305 30 negative_sampler """basic""" +305 30 evaluator """rankbased""" +305 31 dataset """kinships""" +305 31 model """hole""" +305 31 loss """softplus""" +305 31 regularizer """no""" +305 31 optimizer """adadelta""" +305 31 training_loop """owa""" +305 31 negative_sampler """basic""" +305 31 evaluator """rankbased""" +305 32 dataset """kinships""" +305 32 model """hole""" +305 32 loss """softplus""" +305 32 regularizer """no""" +305 32 optimizer """adadelta""" +305 32 training_loop """owa""" +305 32 negative_sampler """basic""" +305 32 evaluator """rankbased""" +305 33 dataset """kinships""" +305 33 model """hole""" +305 33 loss """softplus""" +305 33 regularizer """no""" +305 33 optimizer """adadelta""" +305 33 training_loop """owa""" +305 33 negative_sampler """basic""" +305 33 evaluator """rankbased""" +305 34 dataset """kinships""" +305 34 model """hole""" +305 34 loss """softplus""" +305 34 regularizer """no""" +305 34 optimizer """adadelta""" +305 34 training_loop """owa""" +305 34 negative_sampler """basic""" +305 34 evaluator """rankbased""" +305 35 dataset """kinships""" +305 35 model """hole""" +305 35 loss """softplus""" +305 35 regularizer """no""" +305 35 optimizer """adadelta""" +305 35 training_loop """owa""" +305 35 negative_sampler """basic""" +305 35 evaluator """rankbased""" +305 36 dataset """kinships""" +305 36 model """hole""" +305 36 loss """softplus""" +305 36 regularizer """no""" +305 36 optimizer """adadelta""" +305 36 training_loop """owa""" +305 36 negative_sampler """basic""" +305 36 evaluator """rankbased""" +305 37 dataset """kinships""" +305 37 model """hole""" +305 37 loss """softplus""" +305 37 regularizer """no""" +305 37 optimizer """adadelta""" +305 37 training_loop """owa""" +305 37 negative_sampler """basic""" +305 37 evaluator """rankbased""" +305 38 dataset """kinships""" +305 38 model """hole""" +305 38 loss """softplus""" +305 38 regularizer """no""" +305 38 optimizer """adadelta""" +305 38 training_loop """owa""" +305 38 negative_sampler """basic""" +305 38 evaluator """rankbased""" +305 39 dataset """kinships""" +305 39 model """hole""" +305 39 loss """softplus""" +305 39 regularizer """no""" +305 39 optimizer """adadelta""" +305 39 training_loop """owa""" +305 39 negative_sampler """basic""" +305 39 evaluator """rankbased""" +305 40 dataset """kinships""" +305 40 model """hole""" +305 40 loss """softplus""" +305 40 regularizer """no""" +305 40 optimizer """adadelta""" +305 40 training_loop """owa""" +305 40 negative_sampler """basic""" +305 40 evaluator """rankbased""" +305 41 dataset """kinships""" +305 41 model """hole""" +305 41 loss """softplus""" +305 41 regularizer """no""" +305 41 optimizer """adadelta""" +305 41 training_loop """owa""" +305 41 negative_sampler """basic""" +305 41 evaluator """rankbased""" +305 42 dataset """kinships""" +305 42 model """hole""" +305 42 loss """softplus""" +305 42 regularizer """no""" +305 42 optimizer """adadelta""" +305 42 training_loop """owa""" +305 42 negative_sampler """basic""" +305 42 evaluator """rankbased""" +305 43 dataset """kinships""" +305 43 model """hole""" +305 43 loss """softplus""" +305 43 regularizer """no""" +305 43 optimizer """adadelta""" +305 43 training_loop """owa""" +305 43 negative_sampler """basic""" +305 43 evaluator """rankbased""" +305 44 dataset """kinships""" +305 44 model """hole""" +305 44 loss """softplus""" +305 44 regularizer """no""" +305 44 optimizer """adadelta""" +305 44 training_loop """owa""" +305 44 negative_sampler """basic""" +305 44 evaluator """rankbased""" +305 45 dataset """kinships""" +305 45 model """hole""" +305 45 loss """softplus""" +305 45 regularizer """no""" +305 45 optimizer """adadelta""" +305 45 training_loop """owa""" +305 45 negative_sampler """basic""" +305 45 evaluator """rankbased""" +305 46 dataset """kinships""" +305 46 model """hole""" +305 46 loss """softplus""" +305 46 regularizer """no""" +305 46 optimizer """adadelta""" +305 46 training_loop """owa""" +305 46 negative_sampler """basic""" +305 46 evaluator """rankbased""" +305 47 dataset """kinships""" +305 47 model """hole""" +305 47 loss """softplus""" +305 47 regularizer """no""" +305 47 optimizer """adadelta""" +305 47 training_loop """owa""" +305 47 negative_sampler """basic""" +305 47 evaluator """rankbased""" +305 48 dataset """kinships""" +305 48 model """hole""" +305 48 loss """softplus""" +305 48 regularizer """no""" +305 48 optimizer """adadelta""" +305 48 training_loop """owa""" +305 48 negative_sampler """basic""" +305 48 evaluator """rankbased""" +305 49 dataset """kinships""" +305 49 model """hole""" +305 49 loss """softplus""" +305 49 regularizer """no""" +305 49 optimizer """adadelta""" +305 49 training_loop """owa""" +305 49 negative_sampler """basic""" +305 49 evaluator """rankbased""" +305 50 dataset """kinships""" +305 50 model """hole""" +305 50 loss """softplus""" +305 50 regularizer """no""" +305 50 optimizer """adadelta""" +305 50 training_loop """owa""" +305 50 negative_sampler """basic""" +305 50 evaluator """rankbased""" +305 51 dataset """kinships""" +305 51 model """hole""" +305 51 loss """softplus""" +305 51 regularizer """no""" +305 51 optimizer """adadelta""" +305 51 training_loop """owa""" +305 51 negative_sampler """basic""" +305 51 evaluator """rankbased""" +305 52 dataset """kinships""" +305 52 model """hole""" +305 52 loss """softplus""" +305 52 regularizer """no""" +305 52 optimizer """adadelta""" +305 52 training_loop """owa""" +305 52 negative_sampler """basic""" +305 52 evaluator """rankbased""" +305 53 dataset """kinships""" +305 53 model """hole""" +305 53 loss """softplus""" +305 53 regularizer """no""" +305 53 optimizer """adadelta""" +305 53 training_loop """owa""" +305 53 negative_sampler """basic""" +305 53 evaluator """rankbased""" +305 54 dataset """kinships""" +305 54 model """hole""" +305 54 loss """softplus""" +305 54 regularizer """no""" +305 54 optimizer """adadelta""" +305 54 training_loop """owa""" +305 54 negative_sampler """basic""" +305 54 evaluator """rankbased""" +305 55 dataset """kinships""" +305 55 model """hole""" +305 55 loss """softplus""" +305 55 regularizer """no""" +305 55 optimizer """adadelta""" +305 55 training_loop """owa""" +305 55 negative_sampler """basic""" +305 55 evaluator """rankbased""" +305 56 dataset """kinships""" +305 56 model """hole""" +305 56 loss """softplus""" +305 56 regularizer """no""" +305 56 optimizer """adadelta""" +305 56 training_loop """owa""" +305 56 negative_sampler """basic""" +305 56 evaluator """rankbased""" +305 57 dataset """kinships""" +305 57 model """hole""" +305 57 loss """softplus""" +305 57 regularizer """no""" +305 57 optimizer """adadelta""" +305 57 training_loop """owa""" +305 57 negative_sampler """basic""" +305 57 evaluator """rankbased""" +305 58 dataset """kinships""" +305 58 model """hole""" +305 58 loss """softplus""" +305 58 regularizer """no""" +305 58 optimizer """adadelta""" +305 58 training_loop """owa""" +305 58 negative_sampler """basic""" +305 58 evaluator """rankbased""" +305 59 dataset """kinships""" +305 59 model """hole""" +305 59 loss """softplus""" +305 59 regularizer """no""" +305 59 optimizer """adadelta""" +305 59 training_loop """owa""" +305 59 negative_sampler """basic""" +305 59 evaluator """rankbased""" +305 60 dataset """kinships""" +305 60 model """hole""" +305 60 loss """softplus""" +305 60 regularizer """no""" +305 60 optimizer """adadelta""" +305 60 training_loop """owa""" +305 60 negative_sampler """basic""" +305 60 evaluator """rankbased""" +305 61 dataset """kinships""" +305 61 model """hole""" +305 61 loss """softplus""" +305 61 regularizer """no""" +305 61 optimizer """adadelta""" +305 61 training_loop """owa""" +305 61 negative_sampler """basic""" +305 61 evaluator """rankbased""" +305 62 dataset """kinships""" +305 62 model """hole""" +305 62 loss """softplus""" +305 62 regularizer """no""" +305 62 optimizer """adadelta""" +305 62 training_loop """owa""" +305 62 negative_sampler """basic""" +305 62 evaluator """rankbased""" +305 63 dataset """kinships""" +305 63 model """hole""" +305 63 loss """softplus""" +305 63 regularizer """no""" +305 63 optimizer """adadelta""" +305 63 training_loop """owa""" +305 63 negative_sampler """basic""" +305 63 evaluator """rankbased""" +305 64 dataset """kinships""" +305 64 model """hole""" +305 64 loss """softplus""" +305 64 regularizer """no""" +305 64 optimizer """adadelta""" +305 64 training_loop """owa""" +305 64 negative_sampler """basic""" +305 64 evaluator """rankbased""" +305 65 dataset """kinships""" +305 65 model """hole""" +305 65 loss """softplus""" +305 65 regularizer """no""" +305 65 optimizer """adadelta""" +305 65 training_loop """owa""" +305 65 negative_sampler """basic""" +305 65 evaluator """rankbased""" +305 66 dataset """kinships""" +305 66 model """hole""" +305 66 loss """softplus""" +305 66 regularizer """no""" +305 66 optimizer """adadelta""" +305 66 training_loop """owa""" +305 66 negative_sampler """basic""" +305 66 evaluator """rankbased""" +305 67 dataset """kinships""" +305 67 model """hole""" +305 67 loss """softplus""" +305 67 regularizer """no""" +305 67 optimizer """adadelta""" +305 67 training_loop """owa""" +305 67 negative_sampler """basic""" +305 67 evaluator """rankbased""" +305 68 dataset """kinships""" +305 68 model """hole""" +305 68 loss """softplus""" +305 68 regularizer """no""" +305 68 optimizer """adadelta""" +305 68 training_loop """owa""" +305 68 negative_sampler """basic""" +305 68 evaluator """rankbased""" +305 69 dataset """kinships""" +305 69 model """hole""" +305 69 loss """softplus""" +305 69 regularizer """no""" +305 69 optimizer """adadelta""" +305 69 training_loop """owa""" +305 69 negative_sampler """basic""" +305 69 evaluator """rankbased""" +305 70 dataset """kinships""" +305 70 model """hole""" +305 70 loss """softplus""" +305 70 regularizer """no""" +305 70 optimizer """adadelta""" +305 70 training_loop """owa""" +305 70 negative_sampler """basic""" +305 70 evaluator """rankbased""" +305 71 dataset """kinships""" +305 71 model """hole""" +305 71 loss """softplus""" +305 71 regularizer """no""" +305 71 optimizer """adadelta""" +305 71 training_loop """owa""" +305 71 negative_sampler """basic""" +305 71 evaluator """rankbased""" +305 72 dataset """kinships""" +305 72 model """hole""" +305 72 loss """softplus""" +305 72 regularizer """no""" +305 72 optimizer """adadelta""" +305 72 training_loop """owa""" +305 72 negative_sampler """basic""" +305 72 evaluator """rankbased""" +305 73 dataset """kinships""" +305 73 model """hole""" +305 73 loss """softplus""" +305 73 regularizer """no""" +305 73 optimizer """adadelta""" +305 73 training_loop """owa""" +305 73 negative_sampler """basic""" +305 73 evaluator """rankbased""" +305 74 dataset """kinships""" +305 74 model """hole""" +305 74 loss """softplus""" +305 74 regularizer """no""" +305 74 optimizer """adadelta""" +305 74 training_loop """owa""" +305 74 negative_sampler """basic""" +305 74 evaluator """rankbased""" +305 75 dataset """kinships""" +305 75 model """hole""" +305 75 loss """softplus""" +305 75 regularizer """no""" +305 75 optimizer """adadelta""" +305 75 training_loop """owa""" +305 75 negative_sampler """basic""" +305 75 evaluator """rankbased""" +305 76 dataset """kinships""" +305 76 model """hole""" +305 76 loss """softplus""" +305 76 regularizer """no""" +305 76 optimizer """adadelta""" +305 76 training_loop """owa""" +305 76 negative_sampler """basic""" +305 76 evaluator """rankbased""" +305 77 dataset """kinships""" +305 77 model """hole""" +305 77 loss """softplus""" +305 77 regularizer """no""" +305 77 optimizer """adadelta""" +305 77 training_loop """owa""" +305 77 negative_sampler """basic""" +305 77 evaluator """rankbased""" +305 78 dataset """kinships""" +305 78 model """hole""" +305 78 loss """softplus""" +305 78 regularizer """no""" +305 78 optimizer """adadelta""" +305 78 training_loop """owa""" +305 78 negative_sampler """basic""" +305 78 evaluator """rankbased""" +305 79 dataset """kinships""" +305 79 model """hole""" +305 79 loss """softplus""" +305 79 regularizer """no""" +305 79 optimizer """adadelta""" +305 79 training_loop """owa""" +305 79 negative_sampler """basic""" +305 79 evaluator """rankbased""" +305 80 dataset """kinships""" +305 80 model """hole""" +305 80 loss """softplus""" +305 80 regularizer """no""" +305 80 optimizer """adadelta""" +305 80 training_loop """owa""" +305 80 negative_sampler """basic""" +305 80 evaluator """rankbased""" +305 81 dataset """kinships""" +305 81 model """hole""" +305 81 loss """softplus""" +305 81 regularizer """no""" +305 81 optimizer """adadelta""" +305 81 training_loop """owa""" +305 81 negative_sampler """basic""" +305 81 evaluator """rankbased""" +305 82 dataset """kinships""" +305 82 model """hole""" +305 82 loss """softplus""" +305 82 regularizer """no""" +305 82 optimizer """adadelta""" +305 82 training_loop """owa""" +305 82 negative_sampler """basic""" +305 82 evaluator """rankbased""" +305 83 dataset """kinships""" +305 83 model """hole""" +305 83 loss """softplus""" +305 83 regularizer """no""" +305 83 optimizer """adadelta""" +305 83 training_loop """owa""" +305 83 negative_sampler """basic""" +305 83 evaluator """rankbased""" +305 84 dataset """kinships""" +305 84 model """hole""" +305 84 loss """softplus""" +305 84 regularizer """no""" +305 84 optimizer """adadelta""" +305 84 training_loop """owa""" +305 84 negative_sampler """basic""" +305 84 evaluator """rankbased""" +305 85 dataset """kinships""" +305 85 model """hole""" +305 85 loss """softplus""" +305 85 regularizer """no""" +305 85 optimizer """adadelta""" +305 85 training_loop """owa""" +305 85 negative_sampler """basic""" +305 85 evaluator """rankbased""" +305 86 dataset """kinships""" +305 86 model """hole""" +305 86 loss """softplus""" +305 86 regularizer """no""" +305 86 optimizer """adadelta""" +305 86 training_loop """owa""" +305 86 negative_sampler """basic""" +305 86 evaluator """rankbased""" +305 87 dataset """kinships""" +305 87 model """hole""" +305 87 loss """softplus""" +305 87 regularizer """no""" +305 87 optimizer """adadelta""" +305 87 training_loop """owa""" +305 87 negative_sampler """basic""" +305 87 evaluator """rankbased""" +305 88 dataset """kinships""" +305 88 model """hole""" +305 88 loss """softplus""" +305 88 regularizer """no""" +305 88 optimizer """adadelta""" +305 88 training_loop """owa""" +305 88 negative_sampler """basic""" +305 88 evaluator """rankbased""" +305 89 dataset """kinships""" +305 89 model """hole""" +305 89 loss """softplus""" +305 89 regularizer """no""" +305 89 optimizer """adadelta""" +305 89 training_loop """owa""" +305 89 negative_sampler """basic""" +305 89 evaluator """rankbased""" +305 90 dataset """kinships""" +305 90 model """hole""" +305 90 loss """softplus""" +305 90 regularizer """no""" +305 90 optimizer """adadelta""" +305 90 training_loop """owa""" +305 90 negative_sampler """basic""" +305 90 evaluator """rankbased""" +305 91 dataset """kinships""" +305 91 model """hole""" +305 91 loss """softplus""" +305 91 regularizer """no""" +305 91 optimizer """adadelta""" +305 91 training_loop """owa""" +305 91 negative_sampler """basic""" +305 91 evaluator """rankbased""" +305 92 dataset """kinships""" +305 92 model """hole""" +305 92 loss """softplus""" +305 92 regularizer """no""" +305 92 optimizer """adadelta""" +305 92 training_loop """owa""" +305 92 negative_sampler """basic""" +305 92 evaluator """rankbased""" +305 93 dataset """kinships""" +305 93 model """hole""" +305 93 loss """softplus""" +305 93 regularizer """no""" +305 93 optimizer """adadelta""" +305 93 training_loop """owa""" +305 93 negative_sampler """basic""" +305 93 evaluator """rankbased""" +305 94 dataset """kinships""" +305 94 model """hole""" +305 94 loss """softplus""" +305 94 regularizer """no""" +305 94 optimizer """adadelta""" +305 94 training_loop """owa""" +305 94 negative_sampler """basic""" +305 94 evaluator """rankbased""" +305 95 dataset """kinships""" +305 95 model """hole""" +305 95 loss """softplus""" +305 95 regularizer """no""" +305 95 optimizer """adadelta""" +305 95 training_loop """owa""" +305 95 negative_sampler """basic""" +305 95 evaluator """rankbased""" +305 96 dataset """kinships""" +305 96 model """hole""" +305 96 loss """softplus""" +305 96 regularizer """no""" +305 96 optimizer """adadelta""" +305 96 training_loop """owa""" +305 96 negative_sampler """basic""" +305 96 evaluator """rankbased""" +305 97 dataset """kinships""" +305 97 model """hole""" +305 97 loss """softplus""" +305 97 regularizer """no""" +305 97 optimizer """adadelta""" +305 97 training_loop """owa""" +305 97 negative_sampler """basic""" +305 97 evaluator """rankbased""" +305 98 dataset """kinships""" +305 98 model """hole""" +305 98 loss """softplus""" +305 98 regularizer """no""" +305 98 optimizer """adadelta""" +305 98 training_loop """owa""" +305 98 negative_sampler """basic""" +305 98 evaluator """rankbased""" +305 99 dataset """kinships""" +305 99 model """hole""" +305 99 loss """softplus""" +305 99 regularizer """no""" +305 99 optimizer """adadelta""" +305 99 training_loop """owa""" +305 99 negative_sampler """basic""" +305 99 evaluator """rankbased""" +305 100 dataset """kinships""" +305 100 model """hole""" +305 100 loss """softplus""" +305 100 regularizer """no""" +305 100 optimizer """adadelta""" +305 100 training_loop """owa""" +305 100 negative_sampler """basic""" +305 100 evaluator """rankbased""" +306 1 model.embedding_dim 1.0 +306 1 negative_sampler.num_negs_per_pos 88.0 +306 1 training.batch_size 1.0 +306 2 model.embedding_dim 2.0 +306 2 negative_sampler.num_negs_per_pos 28.0 +306 2 training.batch_size 0.0 +306 3 model.embedding_dim 0.0 +306 3 negative_sampler.num_negs_per_pos 14.0 +306 3 training.batch_size 2.0 +306 4 model.embedding_dim 1.0 +306 4 negative_sampler.num_negs_per_pos 87.0 +306 4 training.batch_size 1.0 +306 5 model.embedding_dim 2.0 +306 5 negative_sampler.num_negs_per_pos 30.0 +306 5 training.batch_size 2.0 +306 6 model.embedding_dim 1.0 +306 6 negative_sampler.num_negs_per_pos 25.0 +306 6 training.batch_size 0.0 +306 7 model.embedding_dim 2.0 +306 7 negative_sampler.num_negs_per_pos 34.0 +306 7 training.batch_size 2.0 +306 8 model.embedding_dim 2.0 +306 8 negative_sampler.num_negs_per_pos 60.0 +306 8 training.batch_size 0.0 +306 9 model.embedding_dim 0.0 +306 9 negative_sampler.num_negs_per_pos 30.0 +306 9 training.batch_size 1.0 +306 10 model.embedding_dim 0.0 +306 10 negative_sampler.num_negs_per_pos 79.0 +306 10 training.batch_size 1.0 +306 11 model.embedding_dim 0.0 +306 11 negative_sampler.num_negs_per_pos 66.0 +306 11 training.batch_size 1.0 +306 12 model.embedding_dim 1.0 +306 12 negative_sampler.num_negs_per_pos 32.0 +306 12 training.batch_size 1.0 +306 13 model.embedding_dim 2.0 +306 13 negative_sampler.num_negs_per_pos 10.0 +306 13 training.batch_size 2.0 +306 14 model.embedding_dim 0.0 +306 14 negative_sampler.num_negs_per_pos 63.0 +306 14 training.batch_size 2.0 +306 15 model.embedding_dim 1.0 +306 15 negative_sampler.num_negs_per_pos 76.0 +306 15 training.batch_size 0.0 +306 16 model.embedding_dim 0.0 +306 16 negative_sampler.num_negs_per_pos 14.0 +306 16 training.batch_size 2.0 +306 17 model.embedding_dim 2.0 +306 17 negative_sampler.num_negs_per_pos 49.0 +306 17 training.batch_size 1.0 +306 18 model.embedding_dim 2.0 +306 18 negative_sampler.num_negs_per_pos 87.0 +306 18 training.batch_size 1.0 +306 19 model.embedding_dim 1.0 +306 19 negative_sampler.num_negs_per_pos 53.0 +306 19 training.batch_size 0.0 +306 20 model.embedding_dim 0.0 +306 20 negative_sampler.num_negs_per_pos 36.0 +306 20 training.batch_size 1.0 +306 21 model.embedding_dim 1.0 +306 21 negative_sampler.num_negs_per_pos 74.0 +306 21 training.batch_size 0.0 +306 22 model.embedding_dim 2.0 +306 22 negative_sampler.num_negs_per_pos 67.0 +306 22 training.batch_size 0.0 +306 23 model.embedding_dim 2.0 +306 23 negative_sampler.num_negs_per_pos 83.0 +306 23 training.batch_size 2.0 +306 24 model.embedding_dim 0.0 +306 24 negative_sampler.num_negs_per_pos 43.0 +306 24 training.batch_size 2.0 +306 25 model.embedding_dim 2.0 +306 25 negative_sampler.num_negs_per_pos 88.0 +306 25 training.batch_size 1.0 +306 26 model.embedding_dim 0.0 +306 26 negative_sampler.num_negs_per_pos 50.0 +306 26 training.batch_size 0.0 +306 27 model.embedding_dim 1.0 +306 27 negative_sampler.num_negs_per_pos 40.0 +306 27 training.batch_size 2.0 +306 28 model.embedding_dim 1.0 +306 28 negative_sampler.num_negs_per_pos 81.0 +306 28 training.batch_size 0.0 +306 29 model.embedding_dim 1.0 +306 29 negative_sampler.num_negs_per_pos 64.0 +306 29 training.batch_size 1.0 +306 30 model.embedding_dim 2.0 +306 30 negative_sampler.num_negs_per_pos 75.0 +306 30 training.batch_size 2.0 +306 31 model.embedding_dim 0.0 +306 31 negative_sampler.num_negs_per_pos 20.0 +306 31 training.batch_size 1.0 +306 32 model.embedding_dim 2.0 +306 32 negative_sampler.num_negs_per_pos 9.0 +306 32 training.batch_size 0.0 +306 33 model.embedding_dim 2.0 +306 33 negative_sampler.num_negs_per_pos 24.0 +306 33 training.batch_size 2.0 +306 34 model.embedding_dim 0.0 +306 34 negative_sampler.num_negs_per_pos 70.0 +306 34 training.batch_size 1.0 +306 35 model.embedding_dim 0.0 +306 35 negative_sampler.num_negs_per_pos 49.0 +306 35 training.batch_size 1.0 +306 36 model.embedding_dim 2.0 +306 36 negative_sampler.num_negs_per_pos 84.0 +306 36 training.batch_size 2.0 +306 37 model.embedding_dim 0.0 +306 37 negative_sampler.num_negs_per_pos 21.0 +306 37 training.batch_size 1.0 +306 38 model.embedding_dim 1.0 +306 38 negative_sampler.num_negs_per_pos 87.0 +306 38 training.batch_size 1.0 +306 39 model.embedding_dim 2.0 +306 39 negative_sampler.num_negs_per_pos 32.0 +306 39 training.batch_size 2.0 +306 40 model.embedding_dim 0.0 +306 40 negative_sampler.num_negs_per_pos 58.0 +306 40 training.batch_size 2.0 +306 41 model.embedding_dim 1.0 +306 41 negative_sampler.num_negs_per_pos 51.0 +306 41 training.batch_size 1.0 +306 42 model.embedding_dim 1.0 +306 42 negative_sampler.num_negs_per_pos 45.0 +306 42 training.batch_size 1.0 +306 43 model.embedding_dim 0.0 +306 43 negative_sampler.num_negs_per_pos 49.0 +306 43 training.batch_size 1.0 +306 44 model.embedding_dim 2.0 +306 44 negative_sampler.num_negs_per_pos 29.0 +306 44 training.batch_size 2.0 +306 45 model.embedding_dim 0.0 +306 45 negative_sampler.num_negs_per_pos 39.0 +306 45 training.batch_size 1.0 +306 46 model.embedding_dim 0.0 +306 46 negative_sampler.num_negs_per_pos 22.0 +306 46 training.batch_size 0.0 +306 47 model.embedding_dim 1.0 +306 47 negative_sampler.num_negs_per_pos 60.0 +306 47 training.batch_size 0.0 +306 48 model.embedding_dim 0.0 +306 48 negative_sampler.num_negs_per_pos 18.0 +306 48 training.batch_size 1.0 +306 49 model.embedding_dim 1.0 +306 49 negative_sampler.num_negs_per_pos 73.0 +306 49 training.batch_size 0.0 +306 50 model.embedding_dim 0.0 +306 50 negative_sampler.num_negs_per_pos 17.0 +306 50 training.batch_size 0.0 +306 51 model.embedding_dim 1.0 +306 51 negative_sampler.num_negs_per_pos 89.0 +306 51 training.batch_size 2.0 +306 52 model.embedding_dim 1.0 +306 52 negative_sampler.num_negs_per_pos 64.0 +306 52 training.batch_size 2.0 +306 53 model.embedding_dim 0.0 +306 53 negative_sampler.num_negs_per_pos 59.0 +306 53 training.batch_size 0.0 +306 54 model.embedding_dim 1.0 +306 54 negative_sampler.num_negs_per_pos 25.0 +306 54 training.batch_size 1.0 +306 55 model.embedding_dim 0.0 +306 55 negative_sampler.num_negs_per_pos 70.0 +306 55 training.batch_size 0.0 +306 56 model.embedding_dim 0.0 +306 56 negative_sampler.num_negs_per_pos 51.0 +306 56 training.batch_size 1.0 +306 57 model.embedding_dim 2.0 +306 57 negative_sampler.num_negs_per_pos 7.0 +306 57 training.batch_size 0.0 +306 58 model.embedding_dim 1.0 +306 58 negative_sampler.num_negs_per_pos 2.0 +306 58 training.batch_size 1.0 +306 59 model.embedding_dim 0.0 +306 59 negative_sampler.num_negs_per_pos 17.0 +306 59 training.batch_size 0.0 +306 60 model.embedding_dim 1.0 +306 60 negative_sampler.num_negs_per_pos 30.0 +306 60 training.batch_size 2.0 +306 61 model.embedding_dim 1.0 +306 61 negative_sampler.num_negs_per_pos 66.0 +306 61 training.batch_size 2.0 +306 62 model.embedding_dim 0.0 +306 62 negative_sampler.num_negs_per_pos 15.0 +306 62 training.batch_size 1.0 +306 63 model.embedding_dim 2.0 +306 63 negative_sampler.num_negs_per_pos 51.0 +306 63 training.batch_size 2.0 +306 64 model.embedding_dim 2.0 +306 64 negative_sampler.num_negs_per_pos 67.0 +306 64 training.batch_size 2.0 +306 65 model.embedding_dim 0.0 +306 65 negative_sampler.num_negs_per_pos 68.0 +306 65 training.batch_size 1.0 +306 66 model.embedding_dim 0.0 +306 66 negative_sampler.num_negs_per_pos 38.0 +306 66 training.batch_size 1.0 +306 67 model.embedding_dim 0.0 +306 67 negative_sampler.num_negs_per_pos 3.0 +306 67 training.batch_size 2.0 +306 68 model.embedding_dim 0.0 +306 68 negative_sampler.num_negs_per_pos 68.0 +306 68 training.batch_size 2.0 +306 69 model.embedding_dim 0.0 +306 69 negative_sampler.num_negs_per_pos 86.0 +306 69 training.batch_size 2.0 +306 70 model.embedding_dim 2.0 +306 70 negative_sampler.num_negs_per_pos 75.0 +306 70 training.batch_size 1.0 +306 71 model.embedding_dim 1.0 +306 71 negative_sampler.num_negs_per_pos 60.0 +306 71 training.batch_size 1.0 +306 72 model.embedding_dim 2.0 +306 72 negative_sampler.num_negs_per_pos 66.0 +306 72 training.batch_size 2.0 +306 73 model.embedding_dim 2.0 +306 73 negative_sampler.num_negs_per_pos 64.0 +306 73 training.batch_size 1.0 +306 74 model.embedding_dim 1.0 +306 74 negative_sampler.num_negs_per_pos 14.0 +306 74 training.batch_size 1.0 +306 75 model.embedding_dim 2.0 +306 75 negative_sampler.num_negs_per_pos 27.0 +306 75 training.batch_size 0.0 +306 76 model.embedding_dim 1.0 +306 76 negative_sampler.num_negs_per_pos 62.0 +306 76 training.batch_size 0.0 +306 77 model.embedding_dim 0.0 +306 77 negative_sampler.num_negs_per_pos 99.0 +306 77 training.batch_size 2.0 +306 78 model.embedding_dim 2.0 +306 78 negative_sampler.num_negs_per_pos 44.0 +306 78 training.batch_size 2.0 +306 79 model.embedding_dim 0.0 +306 79 negative_sampler.num_negs_per_pos 17.0 +306 79 training.batch_size 0.0 +306 80 model.embedding_dim 0.0 +306 80 negative_sampler.num_negs_per_pos 9.0 +306 80 training.batch_size 0.0 +306 81 model.embedding_dim 0.0 +306 81 negative_sampler.num_negs_per_pos 77.0 +306 81 training.batch_size 2.0 +306 82 model.embedding_dim 2.0 +306 82 negative_sampler.num_negs_per_pos 0.0 +306 82 training.batch_size 0.0 +306 83 model.embedding_dim 1.0 +306 83 negative_sampler.num_negs_per_pos 1.0 +306 83 training.batch_size 2.0 +306 84 model.embedding_dim 0.0 +306 84 negative_sampler.num_negs_per_pos 74.0 +306 84 training.batch_size 1.0 +306 85 model.embedding_dim 1.0 +306 85 negative_sampler.num_negs_per_pos 96.0 +306 85 training.batch_size 1.0 +306 86 model.embedding_dim 1.0 +306 86 negative_sampler.num_negs_per_pos 24.0 +306 86 training.batch_size 2.0 +306 87 model.embedding_dim 0.0 +306 87 negative_sampler.num_negs_per_pos 11.0 +306 87 training.batch_size 0.0 +306 88 model.embedding_dim 2.0 +306 88 negative_sampler.num_negs_per_pos 89.0 +306 88 training.batch_size 1.0 +306 89 model.embedding_dim 1.0 +306 89 negative_sampler.num_negs_per_pos 58.0 +306 89 training.batch_size 0.0 +306 90 model.embedding_dim 1.0 +306 90 negative_sampler.num_negs_per_pos 45.0 +306 90 training.batch_size 0.0 +306 91 model.embedding_dim 2.0 +306 91 negative_sampler.num_negs_per_pos 20.0 +306 91 training.batch_size 2.0 +306 92 model.embedding_dim 0.0 +306 92 negative_sampler.num_negs_per_pos 43.0 +306 92 training.batch_size 0.0 +306 93 model.embedding_dim 0.0 +306 93 negative_sampler.num_negs_per_pos 95.0 +306 93 training.batch_size 2.0 +306 94 model.embedding_dim 0.0 +306 94 negative_sampler.num_negs_per_pos 61.0 +306 94 training.batch_size 2.0 +306 95 model.embedding_dim 1.0 +306 95 negative_sampler.num_negs_per_pos 0.0 +306 95 training.batch_size 2.0 +306 96 model.embedding_dim 2.0 +306 96 negative_sampler.num_negs_per_pos 96.0 +306 96 training.batch_size 1.0 +306 97 model.embedding_dim 2.0 +306 97 negative_sampler.num_negs_per_pos 61.0 +306 97 training.batch_size 0.0 +306 98 model.embedding_dim 1.0 +306 98 negative_sampler.num_negs_per_pos 30.0 +306 98 training.batch_size 0.0 +306 99 model.embedding_dim 0.0 +306 99 negative_sampler.num_negs_per_pos 98.0 +306 99 training.batch_size 1.0 +306 100 model.embedding_dim 0.0 +306 100 negative_sampler.num_negs_per_pos 2.0 +306 100 training.batch_size 2.0 +306 1 dataset """kinships""" +306 1 model """hole""" +306 1 loss """bceaftersigmoid""" +306 1 regularizer """no""" +306 1 optimizer """adadelta""" +306 1 training_loop """owa""" +306 1 negative_sampler """basic""" +306 1 evaluator """rankbased""" +306 2 dataset """kinships""" +306 2 model """hole""" +306 2 loss """bceaftersigmoid""" +306 2 regularizer """no""" +306 2 optimizer """adadelta""" +306 2 training_loop """owa""" +306 2 negative_sampler """basic""" +306 2 evaluator """rankbased""" +306 3 dataset """kinships""" +306 3 model """hole""" +306 3 loss """bceaftersigmoid""" +306 3 regularizer """no""" +306 3 optimizer """adadelta""" +306 3 training_loop """owa""" +306 3 negative_sampler """basic""" +306 3 evaluator """rankbased""" +306 4 dataset """kinships""" +306 4 model """hole""" +306 4 loss """bceaftersigmoid""" +306 4 regularizer """no""" +306 4 optimizer """adadelta""" +306 4 training_loop """owa""" +306 4 negative_sampler """basic""" +306 4 evaluator """rankbased""" +306 5 dataset """kinships""" +306 5 model """hole""" +306 5 loss """bceaftersigmoid""" +306 5 regularizer """no""" +306 5 optimizer """adadelta""" +306 5 training_loop """owa""" +306 5 negative_sampler """basic""" +306 5 evaluator """rankbased""" +306 6 dataset """kinships""" +306 6 model """hole""" +306 6 loss """bceaftersigmoid""" +306 6 regularizer """no""" +306 6 optimizer """adadelta""" +306 6 training_loop """owa""" +306 6 negative_sampler """basic""" +306 6 evaluator """rankbased""" +306 7 dataset """kinships""" +306 7 model """hole""" +306 7 loss """bceaftersigmoid""" +306 7 regularizer """no""" +306 7 optimizer """adadelta""" +306 7 training_loop """owa""" +306 7 negative_sampler """basic""" +306 7 evaluator """rankbased""" +306 8 dataset """kinships""" +306 8 model """hole""" +306 8 loss """bceaftersigmoid""" +306 8 regularizer """no""" +306 8 optimizer """adadelta""" +306 8 training_loop """owa""" +306 8 negative_sampler """basic""" +306 8 evaluator """rankbased""" +306 9 dataset """kinships""" +306 9 model """hole""" +306 9 loss """bceaftersigmoid""" +306 9 regularizer """no""" +306 9 optimizer """adadelta""" +306 9 training_loop """owa""" +306 9 negative_sampler """basic""" +306 9 evaluator """rankbased""" +306 10 dataset """kinships""" +306 10 model """hole""" +306 10 loss """bceaftersigmoid""" +306 10 regularizer """no""" +306 10 optimizer """adadelta""" +306 10 training_loop """owa""" +306 10 negative_sampler """basic""" +306 10 evaluator """rankbased""" +306 11 dataset """kinships""" +306 11 model """hole""" +306 11 loss """bceaftersigmoid""" +306 11 regularizer """no""" +306 11 optimizer """adadelta""" +306 11 training_loop """owa""" +306 11 negative_sampler """basic""" +306 11 evaluator """rankbased""" +306 12 dataset """kinships""" +306 12 model """hole""" +306 12 loss """bceaftersigmoid""" +306 12 regularizer """no""" +306 12 optimizer """adadelta""" +306 12 training_loop """owa""" +306 12 negative_sampler """basic""" +306 12 evaluator """rankbased""" +306 13 dataset """kinships""" +306 13 model """hole""" +306 13 loss """bceaftersigmoid""" +306 13 regularizer """no""" +306 13 optimizer """adadelta""" +306 13 training_loop """owa""" +306 13 negative_sampler """basic""" +306 13 evaluator """rankbased""" +306 14 dataset """kinships""" +306 14 model """hole""" +306 14 loss """bceaftersigmoid""" +306 14 regularizer """no""" +306 14 optimizer """adadelta""" +306 14 training_loop """owa""" +306 14 negative_sampler """basic""" +306 14 evaluator """rankbased""" +306 15 dataset """kinships""" +306 15 model """hole""" +306 15 loss """bceaftersigmoid""" +306 15 regularizer """no""" +306 15 optimizer """adadelta""" +306 15 training_loop """owa""" +306 15 negative_sampler """basic""" +306 15 evaluator """rankbased""" +306 16 dataset """kinships""" +306 16 model """hole""" +306 16 loss """bceaftersigmoid""" +306 16 regularizer """no""" +306 16 optimizer """adadelta""" +306 16 training_loop """owa""" +306 16 negative_sampler """basic""" +306 16 evaluator """rankbased""" +306 17 dataset """kinships""" +306 17 model """hole""" +306 17 loss """bceaftersigmoid""" +306 17 regularizer """no""" +306 17 optimizer """adadelta""" +306 17 training_loop """owa""" +306 17 negative_sampler """basic""" +306 17 evaluator """rankbased""" +306 18 dataset """kinships""" +306 18 model """hole""" +306 18 loss """bceaftersigmoid""" +306 18 regularizer """no""" +306 18 optimizer """adadelta""" +306 18 training_loop """owa""" +306 18 negative_sampler """basic""" +306 18 evaluator """rankbased""" +306 19 dataset """kinships""" +306 19 model """hole""" +306 19 loss """bceaftersigmoid""" +306 19 regularizer """no""" +306 19 optimizer """adadelta""" +306 19 training_loop """owa""" +306 19 negative_sampler """basic""" +306 19 evaluator """rankbased""" +306 20 dataset """kinships""" +306 20 model """hole""" +306 20 loss """bceaftersigmoid""" +306 20 regularizer """no""" +306 20 optimizer """adadelta""" +306 20 training_loop """owa""" +306 20 negative_sampler """basic""" +306 20 evaluator """rankbased""" +306 21 dataset """kinships""" +306 21 model """hole""" +306 21 loss """bceaftersigmoid""" +306 21 regularizer """no""" +306 21 optimizer """adadelta""" +306 21 training_loop """owa""" +306 21 negative_sampler """basic""" +306 21 evaluator """rankbased""" +306 22 dataset """kinships""" +306 22 model """hole""" +306 22 loss """bceaftersigmoid""" +306 22 regularizer """no""" +306 22 optimizer """adadelta""" +306 22 training_loop """owa""" +306 22 negative_sampler """basic""" +306 22 evaluator """rankbased""" +306 23 dataset """kinships""" +306 23 model """hole""" +306 23 loss """bceaftersigmoid""" +306 23 regularizer """no""" +306 23 optimizer """adadelta""" +306 23 training_loop """owa""" +306 23 negative_sampler """basic""" +306 23 evaluator """rankbased""" +306 24 dataset """kinships""" +306 24 model """hole""" +306 24 loss """bceaftersigmoid""" +306 24 regularizer """no""" +306 24 optimizer """adadelta""" +306 24 training_loop """owa""" +306 24 negative_sampler """basic""" +306 24 evaluator """rankbased""" +306 25 dataset """kinships""" +306 25 model """hole""" +306 25 loss """bceaftersigmoid""" +306 25 regularizer """no""" +306 25 optimizer """adadelta""" +306 25 training_loop """owa""" +306 25 negative_sampler """basic""" +306 25 evaluator """rankbased""" +306 26 dataset """kinships""" +306 26 model """hole""" +306 26 loss """bceaftersigmoid""" +306 26 regularizer """no""" +306 26 optimizer """adadelta""" +306 26 training_loop """owa""" +306 26 negative_sampler """basic""" +306 26 evaluator """rankbased""" +306 27 dataset """kinships""" +306 27 model """hole""" +306 27 loss """bceaftersigmoid""" +306 27 regularizer """no""" +306 27 optimizer """adadelta""" +306 27 training_loop """owa""" +306 27 negative_sampler """basic""" +306 27 evaluator """rankbased""" +306 28 dataset """kinships""" +306 28 model """hole""" +306 28 loss """bceaftersigmoid""" +306 28 regularizer """no""" +306 28 optimizer """adadelta""" +306 28 training_loop """owa""" +306 28 negative_sampler """basic""" +306 28 evaluator """rankbased""" +306 29 dataset """kinships""" +306 29 model """hole""" +306 29 loss """bceaftersigmoid""" +306 29 regularizer """no""" +306 29 optimizer """adadelta""" +306 29 training_loop """owa""" +306 29 negative_sampler """basic""" +306 29 evaluator """rankbased""" +306 30 dataset """kinships""" +306 30 model """hole""" +306 30 loss """bceaftersigmoid""" +306 30 regularizer """no""" +306 30 optimizer """adadelta""" +306 30 training_loop """owa""" +306 30 negative_sampler """basic""" +306 30 evaluator """rankbased""" +306 31 dataset """kinships""" +306 31 model """hole""" +306 31 loss """bceaftersigmoid""" +306 31 regularizer """no""" +306 31 optimizer """adadelta""" +306 31 training_loop """owa""" +306 31 negative_sampler """basic""" +306 31 evaluator """rankbased""" +306 32 dataset """kinships""" +306 32 model """hole""" +306 32 loss """bceaftersigmoid""" +306 32 regularizer """no""" +306 32 optimizer """adadelta""" +306 32 training_loop """owa""" +306 32 negative_sampler """basic""" +306 32 evaluator """rankbased""" +306 33 dataset """kinships""" +306 33 model """hole""" +306 33 loss """bceaftersigmoid""" +306 33 regularizer """no""" +306 33 optimizer """adadelta""" +306 33 training_loop """owa""" +306 33 negative_sampler """basic""" +306 33 evaluator """rankbased""" +306 34 dataset """kinships""" +306 34 model """hole""" +306 34 loss """bceaftersigmoid""" +306 34 regularizer """no""" +306 34 optimizer """adadelta""" +306 34 training_loop """owa""" +306 34 negative_sampler """basic""" +306 34 evaluator """rankbased""" +306 35 dataset """kinships""" +306 35 model """hole""" +306 35 loss """bceaftersigmoid""" +306 35 regularizer """no""" +306 35 optimizer """adadelta""" +306 35 training_loop """owa""" +306 35 negative_sampler """basic""" +306 35 evaluator """rankbased""" +306 36 dataset """kinships""" +306 36 model """hole""" +306 36 loss """bceaftersigmoid""" +306 36 regularizer """no""" +306 36 optimizer """adadelta""" +306 36 training_loop """owa""" +306 36 negative_sampler """basic""" +306 36 evaluator """rankbased""" +306 37 dataset """kinships""" +306 37 model """hole""" +306 37 loss """bceaftersigmoid""" +306 37 regularizer """no""" +306 37 optimizer """adadelta""" +306 37 training_loop """owa""" +306 37 negative_sampler """basic""" +306 37 evaluator """rankbased""" +306 38 dataset """kinships""" +306 38 model """hole""" +306 38 loss """bceaftersigmoid""" +306 38 regularizer """no""" +306 38 optimizer """adadelta""" +306 38 training_loop """owa""" +306 38 negative_sampler """basic""" +306 38 evaluator """rankbased""" +306 39 dataset """kinships""" +306 39 model """hole""" +306 39 loss """bceaftersigmoid""" +306 39 regularizer """no""" +306 39 optimizer """adadelta""" +306 39 training_loop """owa""" +306 39 negative_sampler """basic""" +306 39 evaluator """rankbased""" +306 40 dataset """kinships""" +306 40 model """hole""" +306 40 loss """bceaftersigmoid""" +306 40 regularizer """no""" +306 40 optimizer """adadelta""" +306 40 training_loop """owa""" +306 40 negative_sampler """basic""" +306 40 evaluator """rankbased""" +306 41 dataset """kinships""" +306 41 model """hole""" +306 41 loss """bceaftersigmoid""" +306 41 regularizer """no""" +306 41 optimizer """adadelta""" +306 41 training_loop """owa""" +306 41 negative_sampler """basic""" +306 41 evaluator """rankbased""" +306 42 dataset """kinships""" +306 42 model """hole""" +306 42 loss """bceaftersigmoid""" +306 42 regularizer """no""" +306 42 optimizer """adadelta""" +306 42 training_loop """owa""" +306 42 negative_sampler """basic""" +306 42 evaluator """rankbased""" +306 43 dataset """kinships""" +306 43 model """hole""" +306 43 loss """bceaftersigmoid""" +306 43 regularizer """no""" +306 43 optimizer """adadelta""" +306 43 training_loop """owa""" +306 43 negative_sampler """basic""" +306 43 evaluator """rankbased""" +306 44 dataset """kinships""" +306 44 model """hole""" +306 44 loss """bceaftersigmoid""" +306 44 regularizer """no""" +306 44 optimizer """adadelta""" +306 44 training_loop """owa""" +306 44 negative_sampler """basic""" +306 44 evaluator """rankbased""" +306 45 dataset """kinships""" +306 45 model """hole""" +306 45 loss """bceaftersigmoid""" +306 45 regularizer """no""" +306 45 optimizer """adadelta""" +306 45 training_loop """owa""" +306 45 negative_sampler """basic""" +306 45 evaluator """rankbased""" +306 46 dataset """kinships""" +306 46 model """hole""" +306 46 loss """bceaftersigmoid""" +306 46 regularizer """no""" +306 46 optimizer """adadelta""" +306 46 training_loop """owa""" +306 46 negative_sampler """basic""" +306 46 evaluator """rankbased""" +306 47 dataset """kinships""" +306 47 model """hole""" +306 47 loss """bceaftersigmoid""" +306 47 regularizer """no""" +306 47 optimizer """adadelta""" +306 47 training_loop """owa""" +306 47 negative_sampler """basic""" +306 47 evaluator """rankbased""" +306 48 dataset """kinships""" +306 48 model """hole""" +306 48 loss """bceaftersigmoid""" +306 48 regularizer """no""" +306 48 optimizer """adadelta""" +306 48 training_loop """owa""" +306 48 negative_sampler """basic""" +306 48 evaluator """rankbased""" +306 49 dataset """kinships""" +306 49 model """hole""" +306 49 loss """bceaftersigmoid""" +306 49 regularizer """no""" +306 49 optimizer """adadelta""" +306 49 training_loop """owa""" +306 49 negative_sampler """basic""" +306 49 evaluator """rankbased""" +306 50 dataset """kinships""" +306 50 model """hole""" +306 50 loss """bceaftersigmoid""" +306 50 regularizer """no""" +306 50 optimizer """adadelta""" +306 50 training_loop """owa""" +306 50 negative_sampler """basic""" +306 50 evaluator """rankbased""" +306 51 dataset """kinships""" +306 51 model """hole""" +306 51 loss """bceaftersigmoid""" +306 51 regularizer """no""" +306 51 optimizer """adadelta""" +306 51 training_loop """owa""" +306 51 negative_sampler """basic""" +306 51 evaluator """rankbased""" +306 52 dataset """kinships""" +306 52 model """hole""" +306 52 loss """bceaftersigmoid""" +306 52 regularizer """no""" +306 52 optimizer """adadelta""" +306 52 training_loop """owa""" +306 52 negative_sampler """basic""" +306 52 evaluator """rankbased""" +306 53 dataset """kinships""" +306 53 model """hole""" +306 53 loss """bceaftersigmoid""" +306 53 regularizer """no""" +306 53 optimizer """adadelta""" +306 53 training_loop """owa""" +306 53 negative_sampler """basic""" +306 53 evaluator """rankbased""" +306 54 dataset """kinships""" +306 54 model """hole""" +306 54 loss """bceaftersigmoid""" +306 54 regularizer """no""" +306 54 optimizer """adadelta""" +306 54 training_loop """owa""" +306 54 negative_sampler """basic""" +306 54 evaluator """rankbased""" +306 55 dataset """kinships""" +306 55 model """hole""" +306 55 loss """bceaftersigmoid""" +306 55 regularizer """no""" +306 55 optimizer """adadelta""" +306 55 training_loop """owa""" +306 55 negative_sampler """basic""" +306 55 evaluator """rankbased""" +306 56 dataset """kinships""" +306 56 model """hole""" +306 56 loss """bceaftersigmoid""" +306 56 regularizer """no""" +306 56 optimizer """adadelta""" +306 56 training_loop """owa""" +306 56 negative_sampler """basic""" +306 56 evaluator """rankbased""" +306 57 dataset """kinships""" +306 57 model """hole""" +306 57 loss """bceaftersigmoid""" +306 57 regularizer """no""" +306 57 optimizer """adadelta""" +306 57 training_loop """owa""" +306 57 negative_sampler """basic""" +306 57 evaluator """rankbased""" +306 58 dataset """kinships""" +306 58 model """hole""" +306 58 loss """bceaftersigmoid""" +306 58 regularizer """no""" +306 58 optimizer """adadelta""" +306 58 training_loop """owa""" +306 58 negative_sampler """basic""" +306 58 evaluator """rankbased""" +306 59 dataset """kinships""" +306 59 model """hole""" +306 59 loss """bceaftersigmoid""" +306 59 regularizer """no""" +306 59 optimizer """adadelta""" +306 59 training_loop """owa""" +306 59 negative_sampler """basic""" +306 59 evaluator """rankbased""" +306 60 dataset """kinships""" +306 60 model """hole""" +306 60 loss """bceaftersigmoid""" +306 60 regularizer """no""" +306 60 optimizer """adadelta""" +306 60 training_loop """owa""" +306 60 negative_sampler """basic""" +306 60 evaluator """rankbased""" +306 61 dataset """kinships""" +306 61 model """hole""" +306 61 loss """bceaftersigmoid""" +306 61 regularizer """no""" +306 61 optimizer """adadelta""" +306 61 training_loop """owa""" +306 61 negative_sampler """basic""" +306 61 evaluator """rankbased""" +306 62 dataset """kinships""" +306 62 model """hole""" +306 62 loss """bceaftersigmoid""" +306 62 regularizer """no""" +306 62 optimizer """adadelta""" +306 62 training_loop """owa""" +306 62 negative_sampler """basic""" +306 62 evaluator """rankbased""" +306 63 dataset """kinships""" +306 63 model """hole""" +306 63 loss """bceaftersigmoid""" +306 63 regularizer """no""" +306 63 optimizer """adadelta""" +306 63 training_loop """owa""" +306 63 negative_sampler """basic""" +306 63 evaluator """rankbased""" +306 64 dataset """kinships""" +306 64 model """hole""" +306 64 loss """bceaftersigmoid""" +306 64 regularizer """no""" +306 64 optimizer """adadelta""" +306 64 training_loop """owa""" +306 64 negative_sampler """basic""" +306 64 evaluator """rankbased""" +306 65 dataset """kinships""" +306 65 model """hole""" +306 65 loss """bceaftersigmoid""" +306 65 regularizer """no""" +306 65 optimizer """adadelta""" +306 65 training_loop """owa""" +306 65 negative_sampler """basic""" +306 65 evaluator """rankbased""" +306 66 dataset """kinships""" +306 66 model """hole""" +306 66 loss """bceaftersigmoid""" +306 66 regularizer """no""" +306 66 optimizer """adadelta""" +306 66 training_loop """owa""" +306 66 negative_sampler """basic""" +306 66 evaluator """rankbased""" +306 67 dataset """kinships""" +306 67 model """hole""" +306 67 loss """bceaftersigmoid""" +306 67 regularizer """no""" +306 67 optimizer """adadelta""" +306 67 training_loop """owa""" +306 67 negative_sampler """basic""" +306 67 evaluator """rankbased""" +306 68 dataset """kinships""" +306 68 model """hole""" +306 68 loss """bceaftersigmoid""" +306 68 regularizer """no""" +306 68 optimizer """adadelta""" +306 68 training_loop """owa""" +306 68 negative_sampler """basic""" +306 68 evaluator """rankbased""" +306 69 dataset """kinships""" +306 69 model """hole""" +306 69 loss """bceaftersigmoid""" +306 69 regularizer """no""" +306 69 optimizer """adadelta""" +306 69 training_loop """owa""" +306 69 negative_sampler """basic""" +306 69 evaluator """rankbased""" +306 70 dataset """kinships""" +306 70 model """hole""" +306 70 loss """bceaftersigmoid""" +306 70 regularizer """no""" +306 70 optimizer """adadelta""" +306 70 training_loop """owa""" +306 70 negative_sampler """basic""" +306 70 evaluator """rankbased""" +306 71 dataset """kinships""" +306 71 model """hole""" +306 71 loss """bceaftersigmoid""" +306 71 regularizer """no""" +306 71 optimizer """adadelta""" +306 71 training_loop """owa""" +306 71 negative_sampler """basic""" +306 71 evaluator """rankbased""" +306 72 dataset """kinships""" +306 72 model """hole""" +306 72 loss """bceaftersigmoid""" +306 72 regularizer """no""" +306 72 optimizer """adadelta""" +306 72 training_loop """owa""" +306 72 negative_sampler """basic""" +306 72 evaluator """rankbased""" +306 73 dataset """kinships""" +306 73 model """hole""" +306 73 loss """bceaftersigmoid""" +306 73 regularizer """no""" +306 73 optimizer """adadelta""" +306 73 training_loop """owa""" +306 73 negative_sampler """basic""" +306 73 evaluator """rankbased""" +306 74 dataset """kinships""" +306 74 model """hole""" +306 74 loss """bceaftersigmoid""" +306 74 regularizer """no""" +306 74 optimizer """adadelta""" +306 74 training_loop """owa""" +306 74 negative_sampler """basic""" +306 74 evaluator """rankbased""" +306 75 dataset """kinships""" +306 75 model """hole""" +306 75 loss """bceaftersigmoid""" +306 75 regularizer """no""" +306 75 optimizer """adadelta""" +306 75 training_loop """owa""" +306 75 negative_sampler """basic""" +306 75 evaluator """rankbased""" +306 76 dataset """kinships""" +306 76 model """hole""" +306 76 loss """bceaftersigmoid""" +306 76 regularizer """no""" +306 76 optimizer """adadelta""" +306 76 training_loop """owa""" +306 76 negative_sampler """basic""" +306 76 evaluator """rankbased""" +306 77 dataset """kinships""" +306 77 model """hole""" +306 77 loss """bceaftersigmoid""" +306 77 regularizer """no""" +306 77 optimizer """adadelta""" +306 77 training_loop """owa""" +306 77 negative_sampler """basic""" +306 77 evaluator """rankbased""" +306 78 dataset """kinships""" +306 78 model """hole""" +306 78 loss """bceaftersigmoid""" +306 78 regularizer """no""" +306 78 optimizer """adadelta""" +306 78 training_loop """owa""" +306 78 negative_sampler """basic""" +306 78 evaluator """rankbased""" +306 79 dataset """kinships""" +306 79 model """hole""" +306 79 loss """bceaftersigmoid""" +306 79 regularizer """no""" +306 79 optimizer """adadelta""" +306 79 training_loop """owa""" +306 79 negative_sampler """basic""" +306 79 evaluator """rankbased""" +306 80 dataset """kinships""" +306 80 model """hole""" +306 80 loss """bceaftersigmoid""" +306 80 regularizer """no""" +306 80 optimizer """adadelta""" +306 80 training_loop """owa""" +306 80 negative_sampler """basic""" +306 80 evaluator """rankbased""" +306 81 dataset """kinships""" +306 81 model """hole""" +306 81 loss """bceaftersigmoid""" +306 81 regularizer """no""" +306 81 optimizer """adadelta""" +306 81 training_loop """owa""" +306 81 negative_sampler """basic""" +306 81 evaluator """rankbased""" +306 82 dataset """kinships""" +306 82 model """hole""" +306 82 loss """bceaftersigmoid""" +306 82 regularizer """no""" +306 82 optimizer """adadelta""" +306 82 training_loop """owa""" +306 82 negative_sampler """basic""" +306 82 evaluator """rankbased""" +306 83 dataset """kinships""" +306 83 model """hole""" +306 83 loss """bceaftersigmoid""" +306 83 regularizer """no""" +306 83 optimizer """adadelta""" +306 83 training_loop """owa""" +306 83 negative_sampler """basic""" +306 83 evaluator """rankbased""" +306 84 dataset """kinships""" +306 84 model """hole""" +306 84 loss """bceaftersigmoid""" +306 84 regularizer """no""" +306 84 optimizer """adadelta""" +306 84 training_loop """owa""" +306 84 negative_sampler """basic""" +306 84 evaluator """rankbased""" +306 85 dataset """kinships""" +306 85 model """hole""" +306 85 loss """bceaftersigmoid""" +306 85 regularizer """no""" +306 85 optimizer """adadelta""" +306 85 training_loop """owa""" +306 85 negative_sampler """basic""" +306 85 evaluator """rankbased""" +306 86 dataset """kinships""" +306 86 model """hole""" +306 86 loss """bceaftersigmoid""" +306 86 regularizer """no""" +306 86 optimizer """adadelta""" +306 86 training_loop """owa""" +306 86 negative_sampler """basic""" +306 86 evaluator """rankbased""" +306 87 dataset """kinships""" +306 87 model """hole""" +306 87 loss """bceaftersigmoid""" +306 87 regularizer """no""" +306 87 optimizer """adadelta""" +306 87 training_loop """owa""" +306 87 negative_sampler """basic""" +306 87 evaluator """rankbased""" +306 88 dataset """kinships""" +306 88 model """hole""" +306 88 loss """bceaftersigmoid""" +306 88 regularizer """no""" +306 88 optimizer """adadelta""" +306 88 training_loop """owa""" +306 88 negative_sampler """basic""" +306 88 evaluator """rankbased""" +306 89 dataset """kinships""" +306 89 model """hole""" +306 89 loss """bceaftersigmoid""" +306 89 regularizer """no""" +306 89 optimizer """adadelta""" +306 89 training_loop """owa""" +306 89 negative_sampler """basic""" +306 89 evaluator """rankbased""" +306 90 dataset """kinships""" +306 90 model """hole""" +306 90 loss """bceaftersigmoid""" +306 90 regularizer """no""" +306 90 optimizer """adadelta""" +306 90 training_loop """owa""" +306 90 negative_sampler """basic""" +306 90 evaluator """rankbased""" +306 91 dataset """kinships""" +306 91 model """hole""" +306 91 loss """bceaftersigmoid""" +306 91 regularizer """no""" +306 91 optimizer """adadelta""" +306 91 training_loop """owa""" +306 91 negative_sampler """basic""" +306 91 evaluator """rankbased""" +306 92 dataset """kinships""" +306 92 model """hole""" +306 92 loss """bceaftersigmoid""" +306 92 regularizer """no""" +306 92 optimizer """adadelta""" +306 92 training_loop """owa""" +306 92 negative_sampler """basic""" +306 92 evaluator """rankbased""" +306 93 dataset """kinships""" +306 93 model """hole""" +306 93 loss """bceaftersigmoid""" +306 93 regularizer """no""" +306 93 optimizer """adadelta""" +306 93 training_loop """owa""" +306 93 negative_sampler """basic""" +306 93 evaluator """rankbased""" +306 94 dataset """kinships""" +306 94 model """hole""" +306 94 loss """bceaftersigmoid""" +306 94 regularizer """no""" +306 94 optimizer """adadelta""" +306 94 training_loop """owa""" +306 94 negative_sampler """basic""" +306 94 evaluator """rankbased""" +306 95 dataset """kinships""" +306 95 model """hole""" +306 95 loss """bceaftersigmoid""" +306 95 regularizer """no""" +306 95 optimizer """adadelta""" +306 95 training_loop """owa""" +306 95 negative_sampler """basic""" +306 95 evaluator """rankbased""" +306 96 dataset """kinships""" +306 96 model """hole""" +306 96 loss """bceaftersigmoid""" +306 96 regularizer """no""" +306 96 optimizer """adadelta""" +306 96 training_loop """owa""" +306 96 negative_sampler """basic""" +306 96 evaluator """rankbased""" +306 97 dataset """kinships""" +306 97 model """hole""" +306 97 loss """bceaftersigmoid""" +306 97 regularizer """no""" +306 97 optimizer """adadelta""" +306 97 training_loop """owa""" +306 97 negative_sampler """basic""" +306 97 evaluator """rankbased""" +306 98 dataset """kinships""" +306 98 model """hole""" +306 98 loss """bceaftersigmoid""" +306 98 regularizer """no""" +306 98 optimizer """adadelta""" +306 98 training_loop """owa""" +306 98 negative_sampler """basic""" +306 98 evaluator """rankbased""" +306 99 dataset """kinships""" +306 99 model """hole""" +306 99 loss """bceaftersigmoid""" +306 99 regularizer """no""" +306 99 optimizer """adadelta""" +306 99 training_loop """owa""" +306 99 negative_sampler """basic""" +306 99 evaluator """rankbased""" +306 100 dataset """kinships""" +306 100 model """hole""" +306 100 loss """bceaftersigmoid""" +306 100 regularizer """no""" +306 100 optimizer """adadelta""" +306 100 training_loop """owa""" +306 100 negative_sampler """basic""" +306 100 evaluator """rankbased""" +307 1 model.embedding_dim 0.0 +307 1 negative_sampler.num_negs_per_pos 48.0 +307 1 training.batch_size 1.0 +307 2 model.embedding_dim 2.0 +307 2 negative_sampler.num_negs_per_pos 81.0 +307 2 training.batch_size 0.0 +307 3 model.embedding_dim 1.0 +307 3 negative_sampler.num_negs_per_pos 65.0 +307 3 training.batch_size 1.0 +307 4 model.embedding_dim 0.0 +307 4 negative_sampler.num_negs_per_pos 14.0 +307 4 training.batch_size 1.0 +307 5 model.embedding_dim 0.0 +307 5 negative_sampler.num_negs_per_pos 1.0 +307 5 training.batch_size 0.0 +307 6 model.embedding_dim 2.0 +307 6 negative_sampler.num_negs_per_pos 8.0 +307 6 training.batch_size 2.0 +307 7 model.embedding_dim 0.0 +307 7 negative_sampler.num_negs_per_pos 77.0 +307 7 training.batch_size 0.0 +307 8 model.embedding_dim 0.0 +307 8 negative_sampler.num_negs_per_pos 46.0 +307 8 training.batch_size 0.0 +307 9 model.embedding_dim 1.0 +307 9 negative_sampler.num_negs_per_pos 86.0 +307 9 training.batch_size 0.0 +307 10 model.embedding_dim 0.0 +307 10 negative_sampler.num_negs_per_pos 33.0 +307 10 training.batch_size 0.0 +307 11 model.embedding_dim 1.0 +307 11 negative_sampler.num_negs_per_pos 7.0 +307 11 training.batch_size 2.0 +307 12 model.embedding_dim 1.0 +307 12 negative_sampler.num_negs_per_pos 67.0 +307 12 training.batch_size 2.0 +307 13 model.embedding_dim 1.0 +307 13 negative_sampler.num_negs_per_pos 66.0 +307 13 training.batch_size 1.0 +307 14 model.embedding_dim 1.0 +307 14 negative_sampler.num_negs_per_pos 91.0 +307 14 training.batch_size 2.0 +307 15 model.embedding_dim 1.0 +307 15 negative_sampler.num_negs_per_pos 75.0 +307 15 training.batch_size 1.0 +307 16 model.embedding_dim 2.0 +307 16 negative_sampler.num_negs_per_pos 88.0 +307 16 training.batch_size 2.0 +307 17 model.embedding_dim 2.0 +307 17 negative_sampler.num_negs_per_pos 87.0 +307 17 training.batch_size 0.0 +307 18 model.embedding_dim 1.0 +307 18 negative_sampler.num_negs_per_pos 10.0 +307 18 training.batch_size 1.0 +307 19 model.embedding_dim 2.0 +307 19 negative_sampler.num_negs_per_pos 7.0 +307 19 training.batch_size 2.0 +307 20 model.embedding_dim 2.0 +307 20 negative_sampler.num_negs_per_pos 77.0 +307 20 training.batch_size 1.0 +307 21 model.embedding_dim 0.0 +307 21 negative_sampler.num_negs_per_pos 46.0 +307 21 training.batch_size 1.0 +307 22 model.embedding_dim 0.0 +307 22 negative_sampler.num_negs_per_pos 97.0 +307 22 training.batch_size 0.0 +307 23 model.embedding_dim 2.0 +307 23 negative_sampler.num_negs_per_pos 17.0 +307 23 training.batch_size 1.0 +307 24 model.embedding_dim 1.0 +307 24 negative_sampler.num_negs_per_pos 3.0 +307 24 training.batch_size 2.0 +307 25 model.embedding_dim 0.0 +307 25 negative_sampler.num_negs_per_pos 88.0 +307 25 training.batch_size 2.0 +307 26 model.embedding_dim 1.0 +307 26 negative_sampler.num_negs_per_pos 93.0 +307 26 training.batch_size 0.0 +307 27 model.embedding_dim 0.0 +307 27 negative_sampler.num_negs_per_pos 32.0 +307 27 training.batch_size 2.0 +307 28 model.embedding_dim 0.0 +307 28 negative_sampler.num_negs_per_pos 4.0 +307 28 training.batch_size 1.0 +307 29 model.embedding_dim 2.0 +307 29 negative_sampler.num_negs_per_pos 15.0 +307 29 training.batch_size 1.0 +307 30 model.embedding_dim 2.0 +307 30 negative_sampler.num_negs_per_pos 74.0 +307 30 training.batch_size 2.0 +307 31 model.embedding_dim 2.0 +307 31 negative_sampler.num_negs_per_pos 54.0 +307 31 training.batch_size 0.0 +307 32 model.embedding_dim 2.0 +307 32 negative_sampler.num_negs_per_pos 89.0 +307 32 training.batch_size 0.0 +307 33 model.embedding_dim 0.0 +307 33 negative_sampler.num_negs_per_pos 34.0 +307 33 training.batch_size 0.0 +307 34 model.embedding_dim 1.0 +307 34 negative_sampler.num_negs_per_pos 46.0 +307 34 training.batch_size 0.0 +307 35 model.embedding_dim 0.0 +307 35 negative_sampler.num_negs_per_pos 76.0 +307 35 training.batch_size 0.0 +307 36 model.embedding_dim 1.0 +307 36 negative_sampler.num_negs_per_pos 81.0 +307 36 training.batch_size 1.0 +307 37 model.embedding_dim 1.0 +307 37 negative_sampler.num_negs_per_pos 20.0 +307 37 training.batch_size 2.0 +307 38 model.embedding_dim 1.0 +307 38 negative_sampler.num_negs_per_pos 85.0 +307 38 training.batch_size 2.0 +307 39 model.embedding_dim 1.0 +307 39 negative_sampler.num_negs_per_pos 96.0 +307 39 training.batch_size 0.0 +307 40 model.embedding_dim 0.0 +307 40 negative_sampler.num_negs_per_pos 15.0 +307 40 training.batch_size 0.0 +307 41 model.embedding_dim 1.0 +307 41 negative_sampler.num_negs_per_pos 2.0 +307 41 training.batch_size 2.0 +307 42 model.embedding_dim 2.0 +307 42 negative_sampler.num_negs_per_pos 18.0 +307 42 training.batch_size 1.0 +307 43 model.embedding_dim 0.0 +307 43 negative_sampler.num_negs_per_pos 95.0 +307 43 training.batch_size 2.0 +307 44 model.embedding_dim 2.0 +307 44 negative_sampler.num_negs_per_pos 35.0 +307 44 training.batch_size 2.0 +307 45 model.embedding_dim 1.0 +307 45 negative_sampler.num_negs_per_pos 0.0 +307 45 training.batch_size 0.0 +307 46 model.embedding_dim 0.0 +307 46 negative_sampler.num_negs_per_pos 96.0 +307 46 training.batch_size 2.0 +307 47 model.embedding_dim 2.0 +307 47 negative_sampler.num_negs_per_pos 21.0 +307 47 training.batch_size 0.0 +307 48 model.embedding_dim 1.0 +307 48 negative_sampler.num_negs_per_pos 2.0 +307 48 training.batch_size 1.0 +307 49 model.embedding_dim 1.0 +307 49 negative_sampler.num_negs_per_pos 74.0 +307 49 training.batch_size 1.0 +307 50 model.embedding_dim 2.0 +307 50 negative_sampler.num_negs_per_pos 82.0 +307 50 training.batch_size 0.0 +307 51 model.embedding_dim 0.0 +307 51 negative_sampler.num_negs_per_pos 77.0 +307 51 training.batch_size 0.0 +307 52 model.embedding_dim 0.0 +307 52 negative_sampler.num_negs_per_pos 59.0 +307 52 training.batch_size 1.0 +307 53 model.embedding_dim 1.0 +307 53 negative_sampler.num_negs_per_pos 48.0 +307 53 training.batch_size 0.0 +307 54 model.embedding_dim 0.0 +307 54 negative_sampler.num_negs_per_pos 20.0 +307 54 training.batch_size 0.0 +307 55 model.embedding_dim 1.0 +307 55 negative_sampler.num_negs_per_pos 37.0 +307 55 training.batch_size 2.0 +307 56 model.embedding_dim 0.0 +307 56 negative_sampler.num_negs_per_pos 83.0 +307 56 training.batch_size 1.0 +307 57 model.embedding_dim 0.0 +307 57 negative_sampler.num_negs_per_pos 3.0 +307 57 training.batch_size 1.0 +307 58 model.embedding_dim 0.0 +307 58 negative_sampler.num_negs_per_pos 1.0 +307 58 training.batch_size 1.0 +307 59 model.embedding_dim 0.0 +307 59 negative_sampler.num_negs_per_pos 13.0 +307 59 training.batch_size 2.0 +307 60 model.embedding_dim 2.0 +307 60 negative_sampler.num_negs_per_pos 54.0 +307 60 training.batch_size 1.0 +307 61 model.embedding_dim 2.0 +307 61 negative_sampler.num_negs_per_pos 57.0 +307 61 training.batch_size 2.0 +307 62 model.embedding_dim 2.0 +307 62 negative_sampler.num_negs_per_pos 33.0 +307 62 training.batch_size 1.0 +307 63 model.embedding_dim 0.0 +307 63 negative_sampler.num_negs_per_pos 43.0 +307 63 training.batch_size 1.0 +307 64 model.embedding_dim 0.0 +307 64 negative_sampler.num_negs_per_pos 33.0 +307 64 training.batch_size 2.0 +307 65 model.embedding_dim 0.0 +307 65 negative_sampler.num_negs_per_pos 88.0 +307 65 training.batch_size 0.0 +307 66 model.embedding_dim 2.0 +307 66 negative_sampler.num_negs_per_pos 23.0 +307 66 training.batch_size 1.0 +307 67 model.embedding_dim 0.0 +307 67 negative_sampler.num_negs_per_pos 61.0 +307 67 training.batch_size 2.0 +307 68 model.embedding_dim 2.0 +307 68 negative_sampler.num_negs_per_pos 37.0 +307 68 training.batch_size 1.0 +307 69 model.embedding_dim 0.0 +307 69 negative_sampler.num_negs_per_pos 20.0 +307 69 training.batch_size 0.0 +307 70 model.embedding_dim 1.0 +307 70 negative_sampler.num_negs_per_pos 62.0 +307 70 training.batch_size 1.0 +307 71 model.embedding_dim 1.0 +307 71 negative_sampler.num_negs_per_pos 93.0 +307 71 training.batch_size 0.0 +307 72 model.embedding_dim 1.0 +307 72 negative_sampler.num_negs_per_pos 85.0 +307 72 training.batch_size 2.0 +307 73 model.embedding_dim 1.0 +307 73 negative_sampler.num_negs_per_pos 47.0 +307 73 training.batch_size 0.0 +307 74 model.embedding_dim 1.0 +307 74 negative_sampler.num_negs_per_pos 39.0 +307 74 training.batch_size 0.0 +307 75 model.embedding_dim 0.0 +307 75 negative_sampler.num_negs_per_pos 66.0 +307 75 training.batch_size 1.0 +307 76 model.embedding_dim 1.0 +307 76 negative_sampler.num_negs_per_pos 72.0 +307 76 training.batch_size 0.0 +307 77 model.embedding_dim 1.0 +307 77 negative_sampler.num_negs_per_pos 33.0 +307 77 training.batch_size 1.0 +307 78 model.embedding_dim 1.0 +307 78 negative_sampler.num_negs_per_pos 10.0 +307 78 training.batch_size 1.0 +307 79 model.embedding_dim 0.0 +307 79 negative_sampler.num_negs_per_pos 90.0 +307 79 training.batch_size 1.0 +307 80 model.embedding_dim 0.0 +307 80 negative_sampler.num_negs_per_pos 43.0 +307 80 training.batch_size 1.0 +307 81 model.embedding_dim 0.0 +307 81 negative_sampler.num_negs_per_pos 95.0 +307 81 training.batch_size 2.0 +307 82 model.embedding_dim 0.0 +307 82 negative_sampler.num_negs_per_pos 4.0 +307 82 training.batch_size 1.0 +307 83 model.embedding_dim 0.0 +307 83 negative_sampler.num_negs_per_pos 41.0 +307 83 training.batch_size 1.0 +307 84 model.embedding_dim 2.0 +307 84 negative_sampler.num_negs_per_pos 31.0 +307 84 training.batch_size 2.0 +307 85 model.embedding_dim 0.0 +307 85 negative_sampler.num_negs_per_pos 88.0 +307 85 training.batch_size 0.0 +307 86 model.embedding_dim 2.0 +307 86 negative_sampler.num_negs_per_pos 82.0 +307 86 training.batch_size 1.0 +307 87 model.embedding_dim 1.0 +307 87 negative_sampler.num_negs_per_pos 49.0 +307 87 training.batch_size 1.0 +307 88 model.embedding_dim 1.0 +307 88 negative_sampler.num_negs_per_pos 11.0 +307 88 training.batch_size 2.0 +307 89 model.embedding_dim 1.0 +307 89 negative_sampler.num_negs_per_pos 76.0 +307 89 training.batch_size 0.0 +307 90 model.embedding_dim 2.0 +307 90 negative_sampler.num_negs_per_pos 92.0 +307 90 training.batch_size 2.0 +307 91 model.embedding_dim 2.0 +307 91 negative_sampler.num_negs_per_pos 19.0 +307 91 training.batch_size 0.0 +307 92 model.embedding_dim 0.0 +307 92 negative_sampler.num_negs_per_pos 42.0 +307 92 training.batch_size 1.0 +307 93 model.embedding_dim 1.0 +307 93 negative_sampler.num_negs_per_pos 52.0 +307 93 training.batch_size 1.0 +307 94 model.embedding_dim 2.0 +307 94 negative_sampler.num_negs_per_pos 86.0 +307 94 training.batch_size 0.0 +307 95 model.embedding_dim 1.0 +307 95 negative_sampler.num_negs_per_pos 84.0 +307 95 training.batch_size 0.0 +307 96 model.embedding_dim 2.0 +307 96 negative_sampler.num_negs_per_pos 46.0 +307 96 training.batch_size 0.0 +307 97 model.embedding_dim 2.0 +307 97 negative_sampler.num_negs_per_pos 16.0 +307 97 training.batch_size 0.0 +307 98 model.embedding_dim 0.0 +307 98 negative_sampler.num_negs_per_pos 43.0 +307 98 training.batch_size 0.0 +307 99 model.embedding_dim 1.0 +307 99 negative_sampler.num_negs_per_pos 24.0 +307 99 training.batch_size 2.0 +307 100 model.embedding_dim 2.0 +307 100 negative_sampler.num_negs_per_pos 34.0 +307 100 training.batch_size 1.0 +307 1 dataset """kinships""" +307 1 model """hole""" +307 1 loss """softplus""" +307 1 regularizer """no""" +307 1 optimizer """adadelta""" +307 1 training_loop """owa""" +307 1 negative_sampler """basic""" +307 1 evaluator """rankbased""" +307 2 dataset """kinships""" +307 2 model """hole""" +307 2 loss """softplus""" +307 2 regularizer """no""" +307 2 optimizer """adadelta""" +307 2 training_loop """owa""" +307 2 negative_sampler """basic""" +307 2 evaluator """rankbased""" +307 3 dataset """kinships""" +307 3 model """hole""" +307 3 loss """softplus""" +307 3 regularizer """no""" +307 3 optimizer """adadelta""" +307 3 training_loop """owa""" +307 3 negative_sampler """basic""" +307 3 evaluator """rankbased""" +307 4 dataset """kinships""" +307 4 model """hole""" +307 4 loss """softplus""" +307 4 regularizer """no""" +307 4 optimizer """adadelta""" +307 4 training_loop """owa""" +307 4 negative_sampler """basic""" +307 4 evaluator """rankbased""" +307 5 dataset """kinships""" +307 5 model """hole""" +307 5 loss """softplus""" +307 5 regularizer """no""" +307 5 optimizer """adadelta""" +307 5 training_loop """owa""" +307 5 negative_sampler """basic""" +307 5 evaluator """rankbased""" +307 6 dataset """kinships""" +307 6 model """hole""" +307 6 loss """softplus""" +307 6 regularizer """no""" +307 6 optimizer """adadelta""" +307 6 training_loop """owa""" +307 6 negative_sampler """basic""" +307 6 evaluator """rankbased""" +307 7 dataset """kinships""" +307 7 model """hole""" +307 7 loss """softplus""" +307 7 regularizer """no""" +307 7 optimizer """adadelta""" +307 7 training_loop """owa""" +307 7 negative_sampler """basic""" +307 7 evaluator """rankbased""" +307 8 dataset """kinships""" +307 8 model """hole""" +307 8 loss """softplus""" +307 8 regularizer """no""" +307 8 optimizer """adadelta""" +307 8 training_loop """owa""" +307 8 negative_sampler """basic""" +307 8 evaluator """rankbased""" +307 9 dataset """kinships""" +307 9 model """hole""" +307 9 loss """softplus""" +307 9 regularizer """no""" +307 9 optimizer """adadelta""" +307 9 training_loop """owa""" +307 9 negative_sampler """basic""" +307 9 evaluator """rankbased""" +307 10 dataset """kinships""" +307 10 model """hole""" +307 10 loss """softplus""" +307 10 regularizer """no""" +307 10 optimizer """adadelta""" +307 10 training_loop """owa""" +307 10 negative_sampler """basic""" +307 10 evaluator """rankbased""" +307 11 dataset """kinships""" +307 11 model """hole""" +307 11 loss """softplus""" +307 11 regularizer """no""" +307 11 optimizer """adadelta""" +307 11 training_loop """owa""" +307 11 negative_sampler """basic""" +307 11 evaluator """rankbased""" +307 12 dataset """kinships""" +307 12 model """hole""" +307 12 loss """softplus""" +307 12 regularizer """no""" +307 12 optimizer """adadelta""" +307 12 training_loop """owa""" +307 12 negative_sampler """basic""" +307 12 evaluator """rankbased""" +307 13 dataset """kinships""" +307 13 model """hole""" +307 13 loss """softplus""" +307 13 regularizer """no""" +307 13 optimizer """adadelta""" +307 13 training_loop """owa""" +307 13 negative_sampler """basic""" +307 13 evaluator """rankbased""" +307 14 dataset """kinships""" +307 14 model """hole""" +307 14 loss """softplus""" +307 14 regularizer """no""" +307 14 optimizer """adadelta""" +307 14 training_loop """owa""" +307 14 negative_sampler """basic""" +307 14 evaluator """rankbased""" +307 15 dataset """kinships""" +307 15 model """hole""" +307 15 loss """softplus""" +307 15 regularizer """no""" +307 15 optimizer """adadelta""" +307 15 training_loop """owa""" +307 15 negative_sampler """basic""" +307 15 evaluator """rankbased""" +307 16 dataset """kinships""" +307 16 model """hole""" +307 16 loss """softplus""" +307 16 regularizer """no""" +307 16 optimizer """adadelta""" +307 16 training_loop """owa""" +307 16 negative_sampler """basic""" +307 16 evaluator """rankbased""" +307 17 dataset """kinships""" +307 17 model """hole""" +307 17 loss """softplus""" +307 17 regularizer """no""" +307 17 optimizer """adadelta""" +307 17 training_loop """owa""" +307 17 negative_sampler """basic""" +307 17 evaluator """rankbased""" +307 18 dataset """kinships""" +307 18 model """hole""" +307 18 loss """softplus""" +307 18 regularizer """no""" +307 18 optimizer """adadelta""" +307 18 training_loop """owa""" +307 18 negative_sampler """basic""" +307 18 evaluator """rankbased""" +307 19 dataset """kinships""" +307 19 model """hole""" +307 19 loss """softplus""" +307 19 regularizer """no""" +307 19 optimizer """adadelta""" +307 19 training_loop """owa""" +307 19 negative_sampler """basic""" +307 19 evaluator """rankbased""" +307 20 dataset """kinships""" +307 20 model """hole""" +307 20 loss """softplus""" +307 20 regularizer """no""" +307 20 optimizer """adadelta""" +307 20 training_loop """owa""" +307 20 negative_sampler """basic""" +307 20 evaluator """rankbased""" +307 21 dataset """kinships""" +307 21 model """hole""" +307 21 loss """softplus""" +307 21 regularizer """no""" +307 21 optimizer """adadelta""" +307 21 training_loop """owa""" +307 21 negative_sampler """basic""" +307 21 evaluator """rankbased""" +307 22 dataset """kinships""" +307 22 model """hole""" +307 22 loss """softplus""" +307 22 regularizer """no""" +307 22 optimizer """adadelta""" +307 22 training_loop """owa""" +307 22 negative_sampler """basic""" +307 22 evaluator """rankbased""" +307 23 dataset """kinships""" +307 23 model """hole""" +307 23 loss """softplus""" +307 23 regularizer """no""" +307 23 optimizer """adadelta""" +307 23 training_loop """owa""" +307 23 negative_sampler """basic""" +307 23 evaluator """rankbased""" +307 24 dataset """kinships""" +307 24 model """hole""" +307 24 loss """softplus""" +307 24 regularizer """no""" +307 24 optimizer """adadelta""" +307 24 training_loop """owa""" +307 24 negative_sampler """basic""" +307 24 evaluator """rankbased""" +307 25 dataset """kinships""" +307 25 model """hole""" +307 25 loss """softplus""" +307 25 regularizer """no""" +307 25 optimizer """adadelta""" +307 25 training_loop """owa""" +307 25 negative_sampler """basic""" +307 25 evaluator """rankbased""" +307 26 dataset """kinships""" +307 26 model """hole""" +307 26 loss """softplus""" +307 26 regularizer """no""" +307 26 optimizer """adadelta""" +307 26 training_loop """owa""" +307 26 negative_sampler """basic""" +307 26 evaluator """rankbased""" +307 27 dataset """kinships""" +307 27 model """hole""" +307 27 loss """softplus""" +307 27 regularizer """no""" +307 27 optimizer """adadelta""" +307 27 training_loop """owa""" +307 27 negative_sampler """basic""" +307 27 evaluator """rankbased""" +307 28 dataset """kinships""" +307 28 model """hole""" +307 28 loss """softplus""" +307 28 regularizer """no""" +307 28 optimizer """adadelta""" +307 28 training_loop """owa""" +307 28 negative_sampler """basic""" +307 28 evaluator """rankbased""" +307 29 dataset """kinships""" +307 29 model """hole""" +307 29 loss """softplus""" +307 29 regularizer """no""" +307 29 optimizer """adadelta""" +307 29 training_loop """owa""" +307 29 negative_sampler """basic""" +307 29 evaluator """rankbased""" +307 30 dataset """kinships""" +307 30 model """hole""" +307 30 loss """softplus""" +307 30 regularizer """no""" +307 30 optimizer """adadelta""" +307 30 training_loop """owa""" +307 30 negative_sampler """basic""" +307 30 evaluator """rankbased""" +307 31 dataset """kinships""" +307 31 model """hole""" +307 31 loss """softplus""" +307 31 regularizer """no""" +307 31 optimizer """adadelta""" +307 31 training_loop """owa""" +307 31 negative_sampler """basic""" +307 31 evaluator """rankbased""" +307 32 dataset """kinships""" +307 32 model """hole""" +307 32 loss """softplus""" +307 32 regularizer """no""" +307 32 optimizer """adadelta""" +307 32 training_loop """owa""" +307 32 negative_sampler """basic""" +307 32 evaluator """rankbased""" +307 33 dataset """kinships""" +307 33 model """hole""" +307 33 loss """softplus""" +307 33 regularizer """no""" +307 33 optimizer """adadelta""" +307 33 training_loop """owa""" +307 33 negative_sampler """basic""" +307 33 evaluator """rankbased""" +307 34 dataset """kinships""" +307 34 model """hole""" +307 34 loss """softplus""" +307 34 regularizer """no""" +307 34 optimizer """adadelta""" +307 34 training_loop """owa""" +307 34 negative_sampler """basic""" +307 34 evaluator """rankbased""" +307 35 dataset """kinships""" +307 35 model """hole""" +307 35 loss """softplus""" +307 35 regularizer """no""" +307 35 optimizer """adadelta""" +307 35 training_loop """owa""" +307 35 negative_sampler """basic""" +307 35 evaluator """rankbased""" +307 36 dataset """kinships""" +307 36 model """hole""" +307 36 loss """softplus""" +307 36 regularizer """no""" +307 36 optimizer """adadelta""" +307 36 training_loop """owa""" +307 36 negative_sampler """basic""" +307 36 evaluator """rankbased""" +307 37 dataset """kinships""" +307 37 model """hole""" +307 37 loss """softplus""" +307 37 regularizer """no""" +307 37 optimizer """adadelta""" +307 37 training_loop """owa""" +307 37 negative_sampler """basic""" +307 37 evaluator """rankbased""" +307 38 dataset """kinships""" +307 38 model """hole""" +307 38 loss """softplus""" +307 38 regularizer """no""" +307 38 optimizer """adadelta""" +307 38 training_loop """owa""" +307 38 negative_sampler """basic""" +307 38 evaluator """rankbased""" +307 39 dataset """kinships""" +307 39 model """hole""" +307 39 loss """softplus""" +307 39 regularizer """no""" +307 39 optimizer """adadelta""" +307 39 training_loop """owa""" +307 39 negative_sampler """basic""" +307 39 evaluator """rankbased""" +307 40 dataset """kinships""" +307 40 model """hole""" +307 40 loss """softplus""" +307 40 regularizer """no""" +307 40 optimizer """adadelta""" +307 40 training_loop """owa""" +307 40 negative_sampler """basic""" +307 40 evaluator """rankbased""" +307 41 dataset """kinships""" +307 41 model """hole""" +307 41 loss """softplus""" +307 41 regularizer """no""" +307 41 optimizer """adadelta""" +307 41 training_loop """owa""" +307 41 negative_sampler """basic""" +307 41 evaluator """rankbased""" +307 42 dataset """kinships""" +307 42 model """hole""" +307 42 loss """softplus""" +307 42 regularizer """no""" +307 42 optimizer """adadelta""" +307 42 training_loop """owa""" +307 42 negative_sampler """basic""" +307 42 evaluator """rankbased""" +307 43 dataset """kinships""" +307 43 model """hole""" +307 43 loss """softplus""" +307 43 regularizer """no""" +307 43 optimizer """adadelta""" +307 43 training_loop """owa""" +307 43 negative_sampler """basic""" +307 43 evaluator """rankbased""" +307 44 dataset """kinships""" +307 44 model """hole""" +307 44 loss """softplus""" +307 44 regularizer """no""" +307 44 optimizer """adadelta""" +307 44 training_loop """owa""" +307 44 negative_sampler """basic""" +307 44 evaluator """rankbased""" +307 45 dataset """kinships""" +307 45 model """hole""" +307 45 loss """softplus""" +307 45 regularizer """no""" +307 45 optimizer """adadelta""" +307 45 training_loop """owa""" +307 45 negative_sampler """basic""" +307 45 evaluator """rankbased""" +307 46 dataset """kinships""" +307 46 model """hole""" +307 46 loss """softplus""" +307 46 regularizer """no""" +307 46 optimizer """adadelta""" +307 46 training_loop """owa""" +307 46 negative_sampler """basic""" +307 46 evaluator """rankbased""" +307 47 dataset """kinships""" +307 47 model """hole""" +307 47 loss """softplus""" +307 47 regularizer """no""" +307 47 optimizer """adadelta""" +307 47 training_loop """owa""" +307 47 negative_sampler """basic""" +307 47 evaluator """rankbased""" +307 48 dataset """kinships""" +307 48 model """hole""" +307 48 loss """softplus""" +307 48 regularizer """no""" +307 48 optimizer """adadelta""" +307 48 training_loop """owa""" +307 48 negative_sampler """basic""" +307 48 evaluator """rankbased""" +307 49 dataset """kinships""" +307 49 model """hole""" +307 49 loss """softplus""" +307 49 regularizer """no""" +307 49 optimizer """adadelta""" +307 49 training_loop """owa""" +307 49 negative_sampler """basic""" +307 49 evaluator """rankbased""" +307 50 dataset """kinships""" +307 50 model """hole""" +307 50 loss """softplus""" +307 50 regularizer """no""" +307 50 optimizer """adadelta""" +307 50 training_loop """owa""" +307 50 negative_sampler """basic""" +307 50 evaluator """rankbased""" +307 51 dataset """kinships""" +307 51 model """hole""" +307 51 loss """softplus""" +307 51 regularizer """no""" +307 51 optimizer """adadelta""" +307 51 training_loop """owa""" +307 51 negative_sampler """basic""" +307 51 evaluator """rankbased""" +307 52 dataset """kinships""" +307 52 model """hole""" +307 52 loss """softplus""" +307 52 regularizer """no""" +307 52 optimizer """adadelta""" +307 52 training_loop """owa""" +307 52 negative_sampler """basic""" +307 52 evaluator """rankbased""" +307 53 dataset """kinships""" +307 53 model """hole""" +307 53 loss """softplus""" +307 53 regularizer """no""" +307 53 optimizer """adadelta""" +307 53 training_loop """owa""" +307 53 negative_sampler """basic""" +307 53 evaluator """rankbased""" +307 54 dataset """kinships""" +307 54 model """hole""" +307 54 loss """softplus""" +307 54 regularizer """no""" +307 54 optimizer """adadelta""" +307 54 training_loop """owa""" +307 54 negative_sampler """basic""" +307 54 evaluator """rankbased""" +307 55 dataset """kinships""" +307 55 model """hole""" +307 55 loss """softplus""" +307 55 regularizer """no""" +307 55 optimizer """adadelta""" +307 55 training_loop """owa""" +307 55 negative_sampler """basic""" +307 55 evaluator """rankbased""" +307 56 dataset """kinships""" +307 56 model """hole""" +307 56 loss """softplus""" +307 56 regularizer """no""" +307 56 optimizer """adadelta""" +307 56 training_loop """owa""" +307 56 negative_sampler """basic""" +307 56 evaluator """rankbased""" +307 57 dataset """kinships""" +307 57 model """hole""" +307 57 loss """softplus""" +307 57 regularizer """no""" +307 57 optimizer """adadelta""" +307 57 training_loop """owa""" +307 57 negative_sampler """basic""" +307 57 evaluator """rankbased""" +307 58 dataset """kinships""" +307 58 model """hole""" +307 58 loss """softplus""" +307 58 regularizer """no""" +307 58 optimizer """adadelta""" +307 58 training_loop """owa""" +307 58 negative_sampler """basic""" +307 58 evaluator """rankbased""" +307 59 dataset """kinships""" +307 59 model """hole""" +307 59 loss """softplus""" +307 59 regularizer """no""" +307 59 optimizer """adadelta""" +307 59 training_loop """owa""" +307 59 negative_sampler """basic""" +307 59 evaluator """rankbased""" +307 60 dataset """kinships""" +307 60 model """hole""" +307 60 loss """softplus""" +307 60 regularizer """no""" +307 60 optimizer """adadelta""" +307 60 training_loop """owa""" +307 60 negative_sampler """basic""" +307 60 evaluator """rankbased""" +307 61 dataset """kinships""" +307 61 model """hole""" +307 61 loss """softplus""" +307 61 regularizer """no""" +307 61 optimizer """adadelta""" +307 61 training_loop """owa""" +307 61 negative_sampler """basic""" +307 61 evaluator """rankbased""" +307 62 dataset """kinships""" +307 62 model """hole""" +307 62 loss """softplus""" +307 62 regularizer """no""" +307 62 optimizer """adadelta""" +307 62 training_loop """owa""" +307 62 negative_sampler """basic""" +307 62 evaluator """rankbased""" +307 63 dataset """kinships""" +307 63 model """hole""" +307 63 loss """softplus""" +307 63 regularizer """no""" +307 63 optimizer """adadelta""" +307 63 training_loop """owa""" +307 63 negative_sampler """basic""" +307 63 evaluator """rankbased""" +307 64 dataset """kinships""" +307 64 model """hole""" +307 64 loss """softplus""" +307 64 regularizer """no""" +307 64 optimizer """adadelta""" +307 64 training_loop """owa""" +307 64 negative_sampler """basic""" +307 64 evaluator """rankbased""" +307 65 dataset """kinships""" +307 65 model """hole""" +307 65 loss """softplus""" +307 65 regularizer """no""" +307 65 optimizer """adadelta""" +307 65 training_loop """owa""" +307 65 negative_sampler """basic""" +307 65 evaluator """rankbased""" +307 66 dataset """kinships""" +307 66 model """hole""" +307 66 loss """softplus""" +307 66 regularizer """no""" +307 66 optimizer """adadelta""" +307 66 training_loop """owa""" +307 66 negative_sampler """basic""" +307 66 evaluator """rankbased""" +307 67 dataset """kinships""" +307 67 model """hole""" +307 67 loss """softplus""" +307 67 regularizer """no""" +307 67 optimizer """adadelta""" +307 67 training_loop """owa""" +307 67 negative_sampler """basic""" +307 67 evaluator """rankbased""" +307 68 dataset """kinships""" +307 68 model """hole""" +307 68 loss """softplus""" +307 68 regularizer """no""" +307 68 optimizer """adadelta""" +307 68 training_loop """owa""" +307 68 negative_sampler """basic""" +307 68 evaluator """rankbased""" +307 69 dataset """kinships""" +307 69 model """hole""" +307 69 loss """softplus""" +307 69 regularizer """no""" +307 69 optimizer """adadelta""" +307 69 training_loop """owa""" +307 69 negative_sampler """basic""" +307 69 evaluator """rankbased""" +307 70 dataset """kinships""" +307 70 model """hole""" +307 70 loss """softplus""" +307 70 regularizer """no""" +307 70 optimizer """adadelta""" +307 70 training_loop """owa""" +307 70 negative_sampler """basic""" +307 70 evaluator """rankbased""" +307 71 dataset """kinships""" +307 71 model """hole""" +307 71 loss """softplus""" +307 71 regularizer """no""" +307 71 optimizer """adadelta""" +307 71 training_loop """owa""" +307 71 negative_sampler """basic""" +307 71 evaluator """rankbased""" +307 72 dataset """kinships""" +307 72 model """hole""" +307 72 loss """softplus""" +307 72 regularizer """no""" +307 72 optimizer """adadelta""" +307 72 training_loop """owa""" +307 72 negative_sampler """basic""" +307 72 evaluator """rankbased""" +307 73 dataset """kinships""" +307 73 model """hole""" +307 73 loss """softplus""" +307 73 regularizer """no""" +307 73 optimizer """adadelta""" +307 73 training_loop """owa""" +307 73 negative_sampler """basic""" +307 73 evaluator """rankbased""" +307 74 dataset """kinships""" +307 74 model """hole""" +307 74 loss """softplus""" +307 74 regularizer """no""" +307 74 optimizer """adadelta""" +307 74 training_loop """owa""" +307 74 negative_sampler """basic""" +307 74 evaluator """rankbased""" +307 75 dataset """kinships""" +307 75 model """hole""" +307 75 loss """softplus""" +307 75 regularizer """no""" +307 75 optimizer """adadelta""" +307 75 training_loop """owa""" +307 75 negative_sampler """basic""" +307 75 evaluator """rankbased""" +307 76 dataset """kinships""" +307 76 model """hole""" +307 76 loss """softplus""" +307 76 regularizer """no""" +307 76 optimizer """adadelta""" +307 76 training_loop """owa""" +307 76 negative_sampler """basic""" +307 76 evaluator """rankbased""" +307 77 dataset """kinships""" +307 77 model """hole""" +307 77 loss """softplus""" +307 77 regularizer """no""" +307 77 optimizer """adadelta""" +307 77 training_loop """owa""" +307 77 negative_sampler """basic""" +307 77 evaluator """rankbased""" +307 78 dataset """kinships""" +307 78 model """hole""" +307 78 loss """softplus""" +307 78 regularizer """no""" +307 78 optimizer """adadelta""" +307 78 training_loop """owa""" +307 78 negative_sampler """basic""" +307 78 evaluator """rankbased""" +307 79 dataset """kinships""" +307 79 model """hole""" +307 79 loss """softplus""" +307 79 regularizer """no""" +307 79 optimizer """adadelta""" +307 79 training_loop """owa""" +307 79 negative_sampler """basic""" +307 79 evaluator """rankbased""" +307 80 dataset """kinships""" +307 80 model """hole""" +307 80 loss """softplus""" +307 80 regularizer """no""" +307 80 optimizer """adadelta""" +307 80 training_loop """owa""" +307 80 negative_sampler """basic""" +307 80 evaluator """rankbased""" +307 81 dataset """kinships""" +307 81 model """hole""" +307 81 loss """softplus""" +307 81 regularizer """no""" +307 81 optimizer """adadelta""" +307 81 training_loop """owa""" +307 81 negative_sampler """basic""" +307 81 evaluator """rankbased""" +307 82 dataset """kinships""" +307 82 model """hole""" +307 82 loss """softplus""" +307 82 regularizer """no""" +307 82 optimizer """adadelta""" +307 82 training_loop """owa""" +307 82 negative_sampler """basic""" +307 82 evaluator """rankbased""" +307 83 dataset """kinships""" +307 83 model """hole""" +307 83 loss """softplus""" +307 83 regularizer """no""" +307 83 optimizer """adadelta""" +307 83 training_loop """owa""" +307 83 negative_sampler """basic""" +307 83 evaluator """rankbased""" +307 84 dataset """kinships""" +307 84 model """hole""" +307 84 loss """softplus""" +307 84 regularizer """no""" +307 84 optimizer """adadelta""" +307 84 training_loop """owa""" +307 84 negative_sampler """basic""" +307 84 evaluator """rankbased""" +307 85 dataset """kinships""" +307 85 model """hole""" +307 85 loss """softplus""" +307 85 regularizer """no""" +307 85 optimizer """adadelta""" +307 85 training_loop """owa""" +307 85 negative_sampler """basic""" +307 85 evaluator """rankbased""" +307 86 dataset """kinships""" +307 86 model """hole""" +307 86 loss """softplus""" +307 86 regularizer """no""" +307 86 optimizer """adadelta""" +307 86 training_loop """owa""" +307 86 negative_sampler """basic""" +307 86 evaluator """rankbased""" +307 87 dataset """kinships""" +307 87 model """hole""" +307 87 loss """softplus""" +307 87 regularizer """no""" +307 87 optimizer """adadelta""" +307 87 training_loop """owa""" +307 87 negative_sampler """basic""" +307 87 evaluator """rankbased""" +307 88 dataset """kinships""" +307 88 model """hole""" +307 88 loss """softplus""" +307 88 regularizer """no""" +307 88 optimizer """adadelta""" +307 88 training_loop """owa""" +307 88 negative_sampler """basic""" +307 88 evaluator """rankbased""" +307 89 dataset """kinships""" +307 89 model """hole""" +307 89 loss """softplus""" +307 89 regularizer """no""" +307 89 optimizer """adadelta""" +307 89 training_loop """owa""" +307 89 negative_sampler """basic""" +307 89 evaluator """rankbased""" +307 90 dataset """kinships""" +307 90 model """hole""" +307 90 loss """softplus""" +307 90 regularizer """no""" +307 90 optimizer """adadelta""" +307 90 training_loop """owa""" +307 90 negative_sampler """basic""" +307 90 evaluator """rankbased""" +307 91 dataset """kinships""" +307 91 model """hole""" +307 91 loss """softplus""" +307 91 regularizer """no""" +307 91 optimizer """adadelta""" +307 91 training_loop """owa""" +307 91 negative_sampler """basic""" +307 91 evaluator """rankbased""" +307 92 dataset """kinships""" +307 92 model """hole""" +307 92 loss """softplus""" +307 92 regularizer """no""" +307 92 optimizer """adadelta""" +307 92 training_loop """owa""" +307 92 negative_sampler """basic""" +307 92 evaluator """rankbased""" +307 93 dataset """kinships""" +307 93 model """hole""" +307 93 loss """softplus""" +307 93 regularizer """no""" +307 93 optimizer """adadelta""" +307 93 training_loop """owa""" +307 93 negative_sampler """basic""" +307 93 evaluator """rankbased""" +307 94 dataset """kinships""" +307 94 model """hole""" +307 94 loss """softplus""" +307 94 regularizer """no""" +307 94 optimizer """adadelta""" +307 94 training_loop """owa""" +307 94 negative_sampler """basic""" +307 94 evaluator """rankbased""" +307 95 dataset """kinships""" +307 95 model """hole""" +307 95 loss """softplus""" +307 95 regularizer """no""" +307 95 optimizer """adadelta""" +307 95 training_loop """owa""" +307 95 negative_sampler """basic""" +307 95 evaluator """rankbased""" +307 96 dataset """kinships""" +307 96 model """hole""" +307 96 loss """softplus""" +307 96 regularizer """no""" +307 96 optimizer """adadelta""" +307 96 training_loop """owa""" +307 96 negative_sampler """basic""" +307 96 evaluator """rankbased""" +307 97 dataset """kinships""" +307 97 model """hole""" +307 97 loss """softplus""" +307 97 regularizer """no""" +307 97 optimizer """adadelta""" +307 97 training_loop """owa""" +307 97 negative_sampler """basic""" +307 97 evaluator """rankbased""" +307 98 dataset """kinships""" +307 98 model """hole""" +307 98 loss """softplus""" +307 98 regularizer """no""" +307 98 optimizer """adadelta""" +307 98 training_loop """owa""" +307 98 negative_sampler """basic""" +307 98 evaluator """rankbased""" +307 99 dataset """kinships""" +307 99 model """hole""" +307 99 loss """softplus""" +307 99 regularizer """no""" +307 99 optimizer """adadelta""" +307 99 training_loop """owa""" +307 99 negative_sampler """basic""" +307 99 evaluator """rankbased""" +307 100 dataset """kinships""" +307 100 model """hole""" +307 100 loss """softplus""" +307 100 regularizer """no""" +307 100 optimizer """adadelta""" +307 100 training_loop """owa""" +307 100 negative_sampler """basic""" +307 100 evaluator """rankbased""" +308 1 model.embedding_dim 0.0 +308 1 loss.margin 29.57293075708772 +308 1 loss.adversarial_temperature 0.9023670980976193 +308 1 negative_sampler.num_negs_per_pos 17.0 +308 1 training.batch_size 0.0 +308 2 model.embedding_dim 1.0 +308 2 loss.margin 21.535887120085462 +308 2 loss.adversarial_temperature 0.31579954245238706 +308 2 negative_sampler.num_negs_per_pos 64.0 +308 2 training.batch_size 1.0 +308 3 model.embedding_dim 0.0 +308 3 loss.margin 23.185226808751093 +308 3 loss.adversarial_temperature 0.9760400393228081 +308 3 negative_sampler.num_negs_per_pos 55.0 +308 3 training.batch_size 1.0 +308 4 model.embedding_dim 0.0 +308 4 loss.margin 2.693190398171918 +308 4 loss.adversarial_temperature 0.9009713836950748 +308 4 negative_sampler.num_negs_per_pos 81.0 +308 4 training.batch_size 1.0 +308 5 model.embedding_dim 0.0 +308 5 loss.margin 22.378027299203985 +308 5 loss.adversarial_temperature 0.8838701198237356 +308 5 negative_sampler.num_negs_per_pos 46.0 +308 5 training.batch_size 0.0 +308 6 model.embedding_dim 2.0 +308 6 loss.margin 29.64647638577116 +308 6 loss.adversarial_temperature 0.2668361820685551 +308 6 negative_sampler.num_negs_per_pos 14.0 +308 6 training.batch_size 0.0 +308 7 model.embedding_dim 1.0 +308 7 loss.margin 22.86810238849737 +308 7 loss.adversarial_temperature 0.9817354411572096 +308 7 negative_sampler.num_negs_per_pos 4.0 +308 7 training.batch_size 1.0 +308 8 model.embedding_dim 0.0 +308 8 loss.margin 28.9857334495008 +308 8 loss.adversarial_temperature 0.3845717882867604 +308 8 negative_sampler.num_negs_per_pos 88.0 +308 8 training.batch_size 0.0 +308 9 model.embedding_dim 1.0 +308 9 loss.margin 3.888315523974108 +308 9 loss.adversarial_temperature 0.8244138938113226 +308 9 negative_sampler.num_negs_per_pos 0.0 +308 9 training.batch_size 1.0 +308 10 model.embedding_dim 0.0 +308 10 loss.margin 1.3084657080820818 +308 10 loss.adversarial_temperature 0.9612959555632665 +308 10 negative_sampler.num_negs_per_pos 83.0 +308 10 training.batch_size 0.0 +308 11 model.embedding_dim 2.0 +308 11 loss.margin 6.7359012068444 +308 11 loss.adversarial_temperature 0.15861671036682368 +308 11 negative_sampler.num_negs_per_pos 24.0 +308 11 training.batch_size 0.0 +308 12 model.embedding_dim 2.0 +308 12 loss.margin 24.075632841785886 +308 12 loss.adversarial_temperature 0.6979605640023482 +308 12 negative_sampler.num_negs_per_pos 56.0 +308 12 training.batch_size 1.0 +308 13 model.embedding_dim 1.0 +308 13 loss.margin 17.272525042722084 +308 13 loss.adversarial_temperature 0.10632090768858514 +308 13 negative_sampler.num_negs_per_pos 51.0 +308 13 training.batch_size 0.0 +308 14 model.embedding_dim 0.0 +308 14 loss.margin 1.741724822646542 +308 14 loss.adversarial_temperature 0.867970754585075 +308 14 negative_sampler.num_negs_per_pos 98.0 +308 14 training.batch_size 1.0 +308 15 model.embedding_dim 2.0 +308 15 loss.margin 18.186949182322827 +308 15 loss.adversarial_temperature 0.3064796493486829 +308 15 negative_sampler.num_negs_per_pos 29.0 +308 15 training.batch_size 0.0 +308 16 model.embedding_dim 2.0 +308 16 loss.margin 7.326493909696322 +308 16 loss.adversarial_temperature 0.23819932518777262 +308 16 negative_sampler.num_negs_per_pos 89.0 +308 16 training.batch_size 0.0 +308 17 model.embedding_dim 0.0 +308 17 loss.margin 27.70895029572738 +308 17 loss.adversarial_temperature 0.24120795533869138 +308 17 negative_sampler.num_negs_per_pos 1.0 +308 17 training.batch_size 1.0 +308 18 model.embedding_dim 0.0 +308 18 loss.margin 7.236093222426487 +308 18 loss.adversarial_temperature 0.18238362548237144 +308 18 negative_sampler.num_negs_per_pos 16.0 +308 18 training.batch_size 0.0 +308 19 model.embedding_dim 0.0 +308 19 loss.margin 13.499029733476275 +308 19 loss.adversarial_temperature 0.9224741645500961 +308 19 negative_sampler.num_negs_per_pos 40.0 +308 19 training.batch_size 0.0 +308 20 model.embedding_dim 2.0 +308 20 loss.margin 24.58494912235523 +308 20 loss.adversarial_temperature 0.7116967147443778 +308 20 negative_sampler.num_negs_per_pos 46.0 +308 20 training.batch_size 1.0 +308 21 model.embedding_dim 2.0 +308 21 loss.margin 7.927109299186413 +308 21 loss.adversarial_temperature 0.8212513125120131 +308 21 negative_sampler.num_negs_per_pos 19.0 +308 21 training.batch_size 1.0 +308 22 model.embedding_dim 2.0 +308 22 loss.margin 21.48757565872312 +308 22 loss.adversarial_temperature 0.9803348990036141 +308 22 negative_sampler.num_negs_per_pos 72.0 +308 22 training.batch_size 2.0 +308 23 model.embedding_dim 0.0 +308 23 loss.margin 11.064992471583603 +308 23 loss.adversarial_temperature 0.5315331829636402 +308 23 negative_sampler.num_negs_per_pos 10.0 +308 23 training.batch_size 0.0 +308 24 model.embedding_dim 1.0 +308 24 loss.margin 10.311783982518865 +308 24 loss.adversarial_temperature 0.7296215493694617 +308 24 negative_sampler.num_negs_per_pos 57.0 +308 24 training.batch_size 0.0 +308 25 model.embedding_dim 1.0 +308 25 loss.margin 4.515030831598918 +308 25 loss.adversarial_temperature 0.15443779665244972 +308 25 negative_sampler.num_negs_per_pos 19.0 +308 25 training.batch_size 1.0 +308 26 model.embedding_dim 2.0 +308 26 loss.margin 27.09115117898223 +308 26 loss.adversarial_temperature 0.49840681179083945 +308 26 negative_sampler.num_negs_per_pos 26.0 +308 26 training.batch_size 0.0 +308 27 model.embedding_dim 1.0 +308 27 loss.margin 19.67086400602154 +308 27 loss.adversarial_temperature 0.5179088645650608 +308 27 negative_sampler.num_negs_per_pos 97.0 +308 27 training.batch_size 0.0 +308 28 model.embedding_dim 2.0 +308 28 loss.margin 5.408903123727263 +308 28 loss.adversarial_temperature 0.559448236527728 +308 28 negative_sampler.num_negs_per_pos 39.0 +308 28 training.batch_size 2.0 +308 29 model.embedding_dim 1.0 +308 29 loss.margin 24.888858838079514 +308 29 loss.adversarial_temperature 0.8557135062511458 +308 29 negative_sampler.num_negs_per_pos 29.0 +308 29 training.batch_size 0.0 +308 30 model.embedding_dim 2.0 +308 30 loss.margin 19.556042399697308 +308 30 loss.adversarial_temperature 0.3389052205831658 +308 30 negative_sampler.num_negs_per_pos 27.0 +308 30 training.batch_size 2.0 +308 31 model.embedding_dim 2.0 +308 31 loss.margin 14.06860269045637 +308 31 loss.adversarial_temperature 0.5432719449101874 +308 31 negative_sampler.num_negs_per_pos 24.0 +308 31 training.batch_size 2.0 +308 32 model.embedding_dim 0.0 +308 32 loss.margin 6.83775846009035 +308 32 loss.adversarial_temperature 0.6296521678656976 +308 32 negative_sampler.num_negs_per_pos 83.0 +308 32 training.batch_size 1.0 +308 33 model.embedding_dim 1.0 +308 33 loss.margin 18.68296828426707 +308 33 loss.adversarial_temperature 0.14949861030966785 +308 33 negative_sampler.num_negs_per_pos 55.0 +308 33 training.batch_size 1.0 +308 34 model.embedding_dim 1.0 +308 34 loss.margin 23.168001758827245 +308 34 loss.adversarial_temperature 0.9721769169416652 +308 34 negative_sampler.num_negs_per_pos 75.0 +308 34 training.batch_size 1.0 +308 35 model.embedding_dim 1.0 +308 35 loss.margin 24.835674992391787 +308 35 loss.adversarial_temperature 0.8944921646797231 +308 35 negative_sampler.num_negs_per_pos 43.0 +308 35 training.batch_size 2.0 +308 36 model.embedding_dim 2.0 +308 36 loss.margin 6.3083817881379165 +308 36 loss.adversarial_temperature 0.7047875374679562 +308 36 negative_sampler.num_negs_per_pos 50.0 +308 36 training.batch_size 0.0 +308 37 model.embedding_dim 0.0 +308 37 loss.margin 15.41297012243846 +308 37 loss.adversarial_temperature 0.487775120229047 +308 37 negative_sampler.num_negs_per_pos 78.0 +308 37 training.batch_size 0.0 +308 38 model.embedding_dim 1.0 +308 38 loss.margin 13.876072914256685 +308 38 loss.adversarial_temperature 0.6619912529783162 +308 38 negative_sampler.num_negs_per_pos 1.0 +308 38 training.batch_size 2.0 +308 39 model.embedding_dim 2.0 +308 39 loss.margin 22.8930432621634 +308 39 loss.adversarial_temperature 0.9857018434227777 +308 39 negative_sampler.num_negs_per_pos 69.0 +308 39 training.batch_size 2.0 +308 40 model.embedding_dim 1.0 +308 40 loss.margin 7.947714065606751 +308 40 loss.adversarial_temperature 0.8077772575956117 +308 40 negative_sampler.num_negs_per_pos 6.0 +308 40 training.batch_size 1.0 +308 41 model.embedding_dim 2.0 +308 41 loss.margin 24.610247694963046 +308 41 loss.adversarial_temperature 0.2806900238758465 +308 41 negative_sampler.num_negs_per_pos 64.0 +308 41 training.batch_size 2.0 +308 42 model.embedding_dim 1.0 +308 42 loss.margin 28.62257856422309 +308 42 loss.adversarial_temperature 0.7746484120554041 +308 42 negative_sampler.num_negs_per_pos 55.0 +308 42 training.batch_size 1.0 +308 43 model.embedding_dim 1.0 +308 43 loss.margin 20.061288315105728 +308 43 loss.adversarial_temperature 0.1788228644563618 +308 43 negative_sampler.num_negs_per_pos 10.0 +308 43 training.batch_size 1.0 +308 44 model.embedding_dim 0.0 +308 44 loss.margin 5.849044805886622 +308 44 loss.adversarial_temperature 0.9922807996682194 +308 44 negative_sampler.num_negs_per_pos 88.0 +308 44 training.batch_size 2.0 +308 45 model.embedding_dim 2.0 +308 45 loss.margin 28.639478843517153 +308 45 loss.adversarial_temperature 0.42364498647496174 +308 45 negative_sampler.num_negs_per_pos 83.0 +308 45 training.batch_size 2.0 +308 46 model.embedding_dim 0.0 +308 46 loss.margin 27.323562881206104 +308 46 loss.adversarial_temperature 0.22537930084771848 +308 46 negative_sampler.num_negs_per_pos 40.0 +308 46 training.batch_size 1.0 +308 47 model.embedding_dim 0.0 +308 47 loss.margin 11.605954338649248 +308 47 loss.adversarial_temperature 0.6035970871965348 +308 47 negative_sampler.num_negs_per_pos 5.0 +308 47 training.batch_size 2.0 +308 48 model.embedding_dim 2.0 +308 48 loss.margin 3.350278521075781 +308 48 loss.adversarial_temperature 0.8233303833049069 +308 48 negative_sampler.num_negs_per_pos 29.0 +308 48 training.batch_size 1.0 +308 49 model.embedding_dim 1.0 +308 49 loss.margin 28.427898353708454 +308 49 loss.adversarial_temperature 0.7953988931642217 +308 49 negative_sampler.num_negs_per_pos 74.0 +308 49 training.batch_size 0.0 +308 50 model.embedding_dim 2.0 +308 50 loss.margin 8.011374366518499 +308 50 loss.adversarial_temperature 0.7643980356378993 +308 50 negative_sampler.num_negs_per_pos 91.0 +308 50 training.batch_size 2.0 +308 51 model.embedding_dim 1.0 +308 51 loss.margin 16.168105711496366 +308 51 loss.adversarial_temperature 0.9786451908463272 +308 51 negative_sampler.num_negs_per_pos 49.0 +308 51 training.batch_size 0.0 +308 52 model.embedding_dim 0.0 +308 52 loss.margin 21.709347195117036 +308 52 loss.adversarial_temperature 0.8317580919970669 +308 52 negative_sampler.num_negs_per_pos 21.0 +308 52 training.batch_size 1.0 +308 53 model.embedding_dim 1.0 +308 53 loss.margin 20.24466209935737 +308 53 loss.adversarial_temperature 0.21214306661044052 +308 53 negative_sampler.num_negs_per_pos 84.0 +308 53 training.batch_size 0.0 +308 54 model.embedding_dim 1.0 +308 54 loss.margin 28.245019231432554 +308 54 loss.adversarial_temperature 0.38779295967313354 +308 54 negative_sampler.num_negs_per_pos 75.0 +308 54 training.batch_size 1.0 +308 55 model.embedding_dim 0.0 +308 55 loss.margin 27.284378073052295 +308 55 loss.adversarial_temperature 0.9275267405847818 +308 55 negative_sampler.num_negs_per_pos 5.0 +308 55 training.batch_size 1.0 +308 56 model.embedding_dim 1.0 +308 56 loss.margin 10.063355179472529 +308 56 loss.adversarial_temperature 0.837863263729793 +308 56 negative_sampler.num_negs_per_pos 14.0 +308 56 training.batch_size 0.0 +308 57 model.embedding_dim 2.0 +308 57 loss.margin 2.1198400562779343 +308 57 loss.adversarial_temperature 0.6481697508625534 +308 57 negative_sampler.num_negs_per_pos 43.0 +308 57 training.batch_size 0.0 +308 58 model.embedding_dim 2.0 +308 58 loss.margin 26.67944508079548 +308 58 loss.adversarial_temperature 0.6796484316169548 +308 58 negative_sampler.num_negs_per_pos 78.0 +308 58 training.batch_size 0.0 +308 59 model.embedding_dim 0.0 +308 59 loss.margin 25.54334722680006 +308 59 loss.adversarial_temperature 0.256750876545153 +308 59 negative_sampler.num_negs_per_pos 85.0 +308 59 training.batch_size 2.0 +308 60 model.embedding_dim 1.0 +308 60 loss.margin 16.298982843771167 +308 60 loss.adversarial_temperature 0.378371185597028 +308 60 negative_sampler.num_negs_per_pos 58.0 +308 60 training.batch_size 0.0 +308 61 model.embedding_dim 0.0 +308 61 loss.margin 6.254531896078403 +308 61 loss.adversarial_temperature 0.5798508515868445 +308 61 negative_sampler.num_negs_per_pos 0.0 +308 61 training.batch_size 0.0 +308 62 model.embedding_dim 1.0 +308 62 loss.margin 9.867809795785838 +308 62 loss.adversarial_temperature 0.8628394028594829 +308 62 negative_sampler.num_negs_per_pos 44.0 +308 62 training.batch_size 0.0 +308 63 model.embedding_dim 2.0 +308 63 loss.margin 14.269886867120588 +308 63 loss.adversarial_temperature 0.7913460610251102 +308 63 negative_sampler.num_negs_per_pos 34.0 +308 63 training.batch_size 2.0 +308 64 model.embedding_dim 2.0 +308 64 loss.margin 26.400093544374855 +308 64 loss.adversarial_temperature 0.5963655191783818 +308 64 negative_sampler.num_negs_per_pos 12.0 +308 64 training.batch_size 1.0 +308 65 model.embedding_dim 2.0 +308 65 loss.margin 12.050464725214571 +308 65 loss.adversarial_temperature 0.6626868879013463 +308 65 negative_sampler.num_negs_per_pos 5.0 +308 65 training.batch_size 2.0 +308 66 model.embedding_dim 0.0 +308 66 loss.margin 22.922609720899953 +308 66 loss.adversarial_temperature 0.16151326445393138 +308 66 negative_sampler.num_negs_per_pos 31.0 +308 66 training.batch_size 0.0 +308 67 model.embedding_dim 0.0 +308 67 loss.margin 12.891533140694856 +308 67 loss.adversarial_temperature 0.9734817873079484 +308 67 negative_sampler.num_negs_per_pos 39.0 +308 67 training.batch_size 1.0 +308 68 model.embedding_dim 2.0 +308 68 loss.margin 20.03975639984115 +308 68 loss.adversarial_temperature 0.2523713621693043 +308 68 negative_sampler.num_negs_per_pos 86.0 +308 68 training.batch_size 1.0 +308 69 model.embedding_dim 2.0 +308 69 loss.margin 16.063749992557668 +308 69 loss.adversarial_temperature 0.7966500792074478 +308 69 negative_sampler.num_negs_per_pos 89.0 +308 69 training.batch_size 2.0 +308 70 model.embedding_dim 1.0 +308 70 loss.margin 22.742209063420198 +308 70 loss.adversarial_temperature 0.35119911328568987 +308 70 negative_sampler.num_negs_per_pos 50.0 +308 70 training.batch_size 1.0 +308 71 model.embedding_dim 2.0 +308 71 loss.margin 26.79162067427752 +308 71 loss.adversarial_temperature 0.5870064773150925 +308 71 negative_sampler.num_negs_per_pos 13.0 +308 71 training.batch_size 1.0 +308 72 model.embedding_dim 0.0 +308 72 loss.margin 16.34337799999858 +308 72 loss.adversarial_temperature 0.93431225815682 +308 72 negative_sampler.num_negs_per_pos 3.0 +308 72 training.batch_size 2.0 +308 73 model.embedding_dim 0.0 +308 73 loss.margin 24.821626949016412 +308 73 loss.adversarial_temperature 0.5637427747933269 +308 73 negative_sampler.num_negs_per_pos 32.0 +308 73 training.batch_size 1.0 +308 74 model.embedding_dim 1.0 +308 74 loss.margin 19.467300473890738 +308 74 loss.adversarial_temperature 0.3562823776490004 +308 74 negative_sampler.num_negs_per_pos 8.0 +308 74 training.batch_size 2.0 +308 75 model.embedding_dim 1.0 +308 75 loss.margin 7.445099252141661 +308 75 loss.adversarial_temperature 0.7221257238113805 +308 75 negative_sampler.num_negs_per_pos 26.0 +308 75 training.batch_size 0.0 +308 76 model.embedding_dim 1.0 +308 76 loss.margin 17.142507664234685 +308 76 loss.adversarial_temperature 0.5205449159160356 +308 76 negative_sampler.num_negs_per_pos 72.0 +308 76 training.batch_size 0.0 +308 77 model.embedding_dim 0.0 +308 77 loss.margin 12.793770353937276 +308 77 loss.adversarial_temperature 0.1660551425411374 +308 77 negative_sampler.num_negs_per_pos 74.0 +308 77 training.batch_size 0.0 +308 78 model.embedding_dim 0.0 +308 78 loss.margin 11.069163608563699 +308 78 loss.adversarial_temperature 0.9961854462916727 +308 78 negative_sampler.num_negs_per_pos 71.0 +308 78 training.batch_size 1.0 +308 79 model.embedding_dim 1.0 +308 79 loss.margin 25.93151366648963 +308 79 loss.adversarial_temperature 0.46261758680639853 +308 79 negative_sampler.num_negs_per_pos 82.0 +308 79 training.batch_size 2.0 +308 80 model.embedding_dim 2.0 +308 80 loss.margin 19.8293575671947 +308 80 loss.adversarial_temperature 0.6543131052858203 +308 80 negative_sampler.num_negs_per_pos 39.0 +308 80 training.batch_size 1.0 +308 81 model.embedding_dim 0.0 +308 81 loss.margin 7.826071698869697 +308 81 loss.adversarial_temperature 0.31149537696105073 +308 81 negative_sampler.num_negs_per_pos 31.0 +308 81 training.batch_size 2.0 +308 82 model.embedding_dim 1.0 +308 82 loss.margin 24.68634162339308 +308 82 loss.adversarial_temperature 0.6110947045899527 +308 82 negative_sampler.num_negs_per_pos 72.0 +308 82 training.batch_size 1.0 +308 83 model.embedding_dim 1.0 +308 83 loss.margin 26.698983225151792 +308 83 loss.adversarial_temperature 0.7249400973688958 +308 83 negative_sampler.num_negs_per_pos 89.0 +308 83 training.batch_size 2.0 +308 84 model.embedding_dim 2.0 +308 84 loss.margin 7.354385111742516 +308 84 loss.adversarial_temperature 0.8845992798856586 +308 84 negative_sampler.num_negs_per_pos 47.0 +308 84 training.batch_size 1.0 +308 85 model.embedding_dim 2.0 +308 85 loss.margin 6.469094181096107 +308 85 loss.adversarial_temperature 0.9318646082272193 +308 85 negative_sampler.num_negs_per_pos 96.0 +308 85 training.batch_size 2.0 +308 86 model.embedding_dim 2.0 +308 86 loss.margin 25.807640546675913 +308 86 loss.adversarial_temperature 0.2701897689780621 +308 86 negative_sampler.num_negs_per_pos 18.0 +308 86 training.batch_size 1.0 +308 87 model.embedding_dim 2.0 +308 87 loss.margin 24.401571195271757 +308 87 loss.adversarial_temperature 0.5507847221555542 +308 87 negative_sampler.num_negs_per_pos 42.0 +308 87 training.batch_size 0.0 +308 88 model.embedding_dim 0.0 +308 88 loss.margin 1.6652059783223216 +308 88 loss.adversarial_temperature 0.9334556576499673 +308 88 negative_sampler.num_negs_per_pos 40.0 +308 88 training.batch_size 0.0 +308 89 model.embedding_dim 0.0 +308 89 loss.margin 24.255861203930127 +308 89 loss.adversarial_temperature 0.19604243348867684 +308 89 negative_sampler.num_negs_per_pos 54.0 +308 89 training.batch_size 2.0 +308 90 model.embedding_dim 0.0 +308 90 loss.margin 22.54649307560614 +308 90 loss.adversarial_temperature 0.6917538580075151 +308 90 negative_sampler.num_negs_per_pos 98.0 +308 90 training.batch_size 2.0 +308 91 model.embedding_dim 1.0 +308 91 loss.margin 18.331021486259207 +308 91 loss.adversarial_temperature 0.8753113148219325 +308 91 negative_sampler.num_negs_per_pos 93.0 +308 91 training.batch_size 2.0 +308 92 model.embedding_dim 1.0 +308 92 loss.margin 14.95856937061248 +308 92 loss.adversarial_temperature 0.8160680321415409 +308 92 negative_sampler.num_negs_per_pos 29.0 +308 92 training.batch_size 1.0 +308 93 model.embedding_dim 0.0 +308 93 loss.margin 21.086015769470567 +308 93 loss.adversarial_temperature 0.7856490596499023 +308 93 negative_sampler.num_negs_per_pos 65.0 +308 93 training.batch_size 0.0 +308 94 model.embedding_dim 0.0 +308 94 loss.margin 22.781484346794922 +308 94 loss.adversarial_temperature 0.825185657336869 +308 94 negative_sampler.num_negs_per_pos 89.0 +308 94 training.batch_size 1.0 +308 95 model.embedding_dim 0.0 +308 95 loss.margin 2.6467939848237667 +308 95 loss.adversarial_temperature 0.17958246940259417 +308 95 negative_sampler.num_negs_per_pos 44.0 +308 95 training.batch_size 2.0 +308 96 model.embedding_dim 2.0 +308 96 loss.margin 21.458540710828675 +308 96 loss.adversarial_temperature 0.11828843222618275 +308 96 negative_sampler.num_negs_per_pos 61.0 +308 96 training.batch_size 0.0 +308 97 model.embedding_dim 2.0 +308 97 loss.margin 28.93476498658747 +308 97 loss.adversarial_temperature 0.3539758561493368 +308 97 negative_sampler.num_negs_per_pos 14.0 +308 97 training.batch_size 2.0 +308 98 model.embedding_dim 0.0 +308 98 loss.margin 27.985288588868773 +308 98 loss.adversarial_temperature 0.2899364154284241 +308 98 negative_sampler.num_negs_per_pos 2.0 +308 98 training.batch_size 2.0 +308 99 model.embedding_dim 0.0 +308 99 loss.margin 16.843047672577207 +308 99 loss.adversarial_temperature 0.8985629457136994 +308 99 negative_sampler.num_negs_per_pos 19.0 +308 99 training.batch_size 2.0 +308 100 model.embedding_dim 1.0 +308 100 loss.margin 6.313623434638831 +308 100 loss.adversarial_temperature 0.2903607978689657 +308 100 negative_sampler.num_negs_per_pos 59.0 +308 100 training.batch_size 0.0 +308 1 dataset """kinships""" +308 1 model """hole""" +308 1 loss """nssa""" +308 1 regularizer """no""" +308 1 optimizer """adadelta""" +308 1 training_loop """owa""" +308 1 negative_sampler """basic""" +308 1 evaluator """rankbased""" +308 2 dataset """kinships""" +308 2 model """hole""" +308 2 loss """nssa""" +308 2 regularizer """no""" +308 2 optimizer """adadelta""" +308 2 training_loop """owa""" +308 2 negative_sampler """basic""" +308 2 evaluator """rankbased""" +308 3 dataset """kinships""" +308 3 model """hole""" +308 3 loss """nssa""" +308 3 regularizer """no""" +308 3 optimizer """adadelta""" +308 3 training_loop """owa""" +308 3 negative_sampler """basic""" +308 3 evaluator """rankbased""" +308 4 dataset """kinships""" +308 4 model """hole""" +308 4 loss """nssa""" +308 4 regularizer """no""" +308 4 optimizer """adadelta""" +308 4 training_loop """owa""" +308 4 negative_sampler """basic""" +308 4 evaluator """rankbased""" +308 5 dataset """kinships""" +308 5 model """hole""" +308 5 loss """nssa""" +308 5 regularizer """no""" +308 5 optimizer """adadelta""" +308 5 training_loop """owa""" +308 5 negative_sampler """basic""" +308 5 evaluator """rankbased""" +308 6 dataset """kinships""" +308 6 model """hole""" +308 6 loss """nssa""" +308 6 regularizer """no""" +308 6 optimizer """adadelta""" +308 6 training_loop """owa""" +308 6 negative_sampler """basic""" +308 6 evaluator """rankbased""" +308 7 dataset """kinships""" +308 7 model """hole""" +308 7 loss """nssa""" +308 7 regularizer """no""" +308 7 optimizer """adadelta""" +308 7 training_loop """owa""" +308 7 negative_sampler """basic""" +308 7 evaluator """rankbased""" +308 8 dataset """kinships""" +308 8 model """hole""" +308 8 loss """nssa""" +308 8 regularizer """no""" +308 8 optimizer """adadelta""" +308 8 training_loop """owa""" +308 8 negative_sampler """basic""" +308 8 evaluator """rankbased""" +308 9 dataset """kinships""" +308 9 model """hole""" +308 9 loss """nssa""" +308 9 regularizer """no""" +308 9 optimizer """adadelta""" +308 9 training_loop """owa""" +308 9 negative_sampler """basic""" +308 9 evaluator """rankbased""" +308 10 dataset """kinships""" +308 10 model """hole""" +308 10 loss """nssa""" +308 10 regularizer """no""" +308 10 optimizer """adadelta""" +308 10 training_loop """owa""" +308 10 negative_sampler """basic""" +308 10 evaluator """rankbased""" +308 11 dataset """kinships""" +308 11 model """hole""" +308 11 loss """nssa""" +308 11 regularizer """no""" +308 11 optimizer """adadelta""" +308 11 training_loop """owa""" +308 11 negative_sampler """basic""" +308 11 evaluator """rankbased""" +308 12 dataset """kinships""" +308 12 model """hole""" +308 12 loss """nssa""" +308 12 regularizer """no""" +308 12 optimizer """adadelta""" +308 12 training_loop """owa""" +308 12 negative_sampler """basic""" +308 12 evaluator """rankbased""" +308 13 dataset """kinships""" +308 13 model """hole""" +308 13 loss """nssa""" +308 13 regularizer """no""" +308 13 optimizer """adadelta""" +308 13 training_loop """owa""" +308 13 negative_sampler """basic""" +308 13 evaluator """rankbased""" +308 14 dataset """kinships""" +308 14 model """hole""" +308 14 loss """nssa""" +308 14 regularizer """no""" +308 14 optimizer """adadelta""" +308 14 training_loop """owa""" +308 14 negative_sampler """basic""" +308 14 evaluator """rankbased""" +308 15 dataset """kinships""" +308 15 model """hole""" +308 15 loss """nssa""" +308 15 regularizer """no""" +308 15 optimizer """adadelta""" +308 15 training_loop """owa""" +308 15 negative_sampler """basic""" +308 15 evaluator """rankbased""" +308 16 dataset """kinships""" +308 16 model """hole""" +308 16 loss """nssa""" +308 16 regularizer """no""" +308 16 optimizer """adadelta""" +308 16 training_loop """owa""" +308 16 negative_sampler """basic""" +308 16 evaluator """rankbased""" +308 17 dataset """kinships""" +308 17 model """hole""" +308 17 loss """nssa""" +308 17 regularizer """no""" +308 17 optimizer """adadelta""" +308 17 training_loop """owa""" +308 17 negative_sampler """basic""" +308 17 evaluator """rankbased""" +308 18 dataset """kinships""" +308 18 model """hole""" +308 18 loss """nssa""" +308 18 regularizer """no""" +308 18 optimizer """adadelta""" +308 18 training_loop """owa""" +308 18 negative_sampler """basic""" +308 18 evaluator """rankbased""" +308 19 dataset """kinships""" +308 19 model """hole""" +308 19 loss """nssa""" +308 19 regularizer """no""" +308 19 optimizer """adadelta""" +308 19 training_loop """owa""" +308 19 negative_sampler """basic""" +308 19 evaluator """rankbased""" +308 20 dataset """kinships""" +308 20 model """hole""" +308 20 loss """nssa""" +308 20 regularizer """no""" +308 20 optimizer """adadelta""" +308 20 training_loop """owa""" +308 20 negative_sampler """basic""" +308 20 evaluator """rankbased""" +308 21 dataset """kinships""" +308 21 model """hole""" +308 21 loss """nssa""" +308 21 regularizer """no""" +308 21 optimizer """adadelta""" +308 21 training_loop """owa""" +308 21 negative_sampler """basic""" +308 21 evaluator """rankbased""" +308 22 dataset """kinships""" +308 22 model """hole""" +308 22 loss """nssa""" +308 22 regularizer """no""" +308 22 optimizer """adadelta""" +308 22 training_loop """owa""" +308 22 negative_sampler """basic""" +308 22 evaluator """rankbased""" +308 23 dataset """kinships""" +308 23 model """hole""" +308 23 loss """nssa""" +308 23 regularizer """no""" +308 23 optimizer """adadelta""" +308 23 training_loop """owa""" +308 23 negative_sampler """basic""" +308 23 evaluator """rankbased""" +308 24 dataset """kinships""" +308 24 model """hole""" +308 24 loss """nssa""" +308 24 regularizer """no""" +308 24 optimizer """adadelta""" +308 24 training_loop """owa""" +308 24 negative_sampler """basic""" +308 24 evaluator """rankbased""" +308 25 dataset """kinships""" +308 25 model """hole""" +308 25 loss """nssa""" +308 25 regularizer """no""" +308 25 optimizer """adadelta""" +308 25 training_loop """owa""" +308 25 negative_sampler """basic""" +308 25 evaluator """rankbased""" +308 26 dataset """kinships""" +308 26 model """hole""" +308 26 loss """nssa""" +308 26 regularizer """no""" +308 26 optimizer """adadelta""" +308 26 training_loop """owa""" +308 26 negative_sampler """basic""" +308 26 evaluator """rankbased""" +308 27 dataset """kinships""" +308 27 model """hole""" +308 27 loss """nssa""" +308 27 regularizer """no""" +308 27 optimizer """adadelta""" +308 27 training_loop """owa""" +308 27 negative_sampler """basic""" +308 27 evaluator """rankbased""" +308 28 dataset """kinships""" +308 28 model """hole""" +308 28 loss """nssa""" +308 28 regularizer """no""" +308 28 optimizer """adadelta""" +308 28 training_loop """owa""" +308 28 negative_sampler """basic""" +308 28 evaluator """rankbased""" +308 29 dataset """kinships""" +308 29 model """hole""" +308 29 loss """nssa""" +308 29 regularizer """no""" +308 29 optimizer """adadelta""" +308 29 training_loop """owa""" +308 29 negative_sampler """basic""" +308 29 evaluator """rankbased""" +308 30 dataset """kinships""" +308 30 model """hole""" +308 30 loss """nssa""" +308 30 regularizer """no""" +308 30 optimizer """adadelta""" +308 30 training_loop """owa""" +308 30 negative_sampler """basic""" +308 30 evaluator """rankbased""" +308 31 dataset """kinships""" +308 31 model """hole""" +308 31 loss """nssa""" +308 31 regularizer """no""" +308 31 optimizer """adadelta""" +308 31 training_loop """owa""" +308 31 negative_sampler """basic""" +308 31 evaluator """rankbased""" +308 32 dataset """kinships""" +308 32 model """hole""" +308 32 loss """nssa""" +308 32 regularizer """no""" +308 32 optimizer """adadelta""" +308 32 training_loop """owa""" +308 32 negative_sampler """basic""" +308 32 evaluator """rankbased""" +308 33 dataset """kinships""" +308 33 model """hole""" +308 33 loss """nssa""" +308 33 regularizer """no""" +308 33 optimizer """adadelta""" +308 33 training_loop """owa""" +308 33 negative_sampler """basic""" +308 33 evaluator """rankbased""" +308 34 dataset """kinships""" +308 34 model """hole""" +308 34 loss """nssa""" +308 34 regularizer """no""" +308 34 optimizer """adadelta""" +308 34 training_loop """owa""" +308 34 negative_sampler """basic""" +308 34 evaluator """rankbased""" +308 35 dataset """kinships""" +308 35 model """hole""" +308 35 loss """nssa""" +308 35 regularizer """no""" +308 35 optimizer """adadelta""" +308 35 training_loop """owa""" +308 35 negative_sampler """basic""" +308 35 evaluator """rankbased""" +308 36 dataset """kinships""" +308 36 model """hole""" +308 36 loss """nssa""" +308 36 regularizer """no""" +308 36 optimizer """adadelta""" +308 36 training_loop """owa""" +308 36 negative_sampler """basic""" +308 36 evaluator """rankbased""" +308 37 dataset """kinships""" +308 37 model """hole""" +308 37 loss """nssa""" +308 37 regularizer """no""" +308 37 optimizer """adadelta""" +308 37 training_loop """owa""" +308 37 negative_sampler """basic""" +308 37 evaluator """rankbased""" +308 38 dataset """kinships""" +308 38 model """hole""" +308 38 loss """nssa""" +308 38 regularizer """no""" +308 38 optimizer """adadelta""" +308 38 training_loop """owa""" +308 38 negative_sampler """basic""" +308 38 evaluator """rankbased""" +308 39 dataset """kinships""" +308 39 model """hole""" +308 39 loss """nssa""" +308 39 regularizer """no""" +308 39 optimizer """adadelta""" +308 39 training_loop """owa""" +308 39 negative_sampler """basic""" +308 39 evaluator """rankbased""" +308 40 dataset """kinships""" +308 40 model """hole""" +308 40 loss """nssa""" +308 40 regularizer """no""" +308 40 optimizer """adadelta""" +308 40 training_loop """owa""" +308 40 negative_sampler """basic""" +308 40 evaluator """rankbased""" +308 41 dataset """kinships""" +308 41 model """hole""" +308 41 loss """nssa""" +308 41 regularizer """no""" +308 41 optimizer """adadelta""" +308 41 training_loop """owa""" +308 41 negative_sampler """basic""" +308 41 evaluator """rankbased""" +308 42 dataset """kinships""" +308 42 model """hole""" +308 42 loss """nssa""" +308 42 regularizer """no""" +308 42 optimizer """adadelta""" +308 42 training_loop """owa""" +308 42 negative_sampler """basic""" +308 42 evaluator """rankbased""" +308 43 dataset """kinships""" +308 43 model """hole""" +308 43 loss """nssa""" +308 43 regularizer """no""" +308 43 optimizer """adadelta""" +308 43 training_loop """owa""" +308 43 negative_sampler """basic""" +308 43 evaluator """rankbased""" +308 44 dataset """kinships""" +308 44 model """hole""" +308 44 loss """nssa""" +308 44 regularizer """no""" +308 44 optimizer """adadelta""" +308 44 training_loop """owa""" +308 44 negative_sampler """basic""" +308 44 evaluator """rankbased""" +308 45 dataset """kinships""" +308 45 model """hole""" +308 45 loss """nssa""" +308 45 regularizer """no""" +308 45 optimizer """adadelta""" +308 45 training_loop """owa""" +308 45 negative_sampler """basic""" +308 45 evaluator """rankbased""" +308 46 dataset """kinships""" +308 46 model """hole""" +308 46 loss """nssa""" +308 46 regularizer """no""" +308 46 optimizer """adadelta""" +308 46 training_loop """owa""" +308 46 negative_sampler """basic""" +308 46 evaluator """rankbased""" +308 47 dataset """kinships""" +308 47 model """hole""" +308 47 loss """nssa""" +308 47 regularizer """no""" +308 47 optimizer """adadelta""" +308 47 training_loop """owa""" +308 47 negative_sampler """basic""" +308 47 evaluator """rankbased""" +308 48 dataset """kinships""" +308 48 model """hole""" +308 48 loss """nssa""" +308 48 regularizer """no""" +308 48 optimizer """adadelta""" +308 48 training_loop """owa""" +308 48 negative_sampler """basic""" +308 48 evaluator """rankbased""" +308 49 dataset """kinships""" +308 49 model """hole""" +308 49 loss """nssa""" +308 49 regularizer """no""" +308 49 optimizer """adadelta""" +308 49 training_loop """owa""" +308 49 negative_sampler """basic""" +308 49 evaluator """rankbased""" +308 50 dataset """kinships""" +308 50 model """hole""" +308 50 loss """nssa""" +308 50 regularizer """no""" +308 50 optimizer """adadelta""" +308 50 training_loop """owa""" +308 50 negative_sampler """basic""" +308 50 evaluator """rankbased""" +308 51 dataset """kinships""" +308 51 model """hole""" +308 51 loss """nssa""" +308 51 regularizer """no""" +308 51 optimizer """adadelta""" +308 51 training_loop """owa""" +308 51 negative_sampler """basic""" +308 51 evaluator """rankbased""" +308 52 dataset """kinships""" +308 52 model """hole""" +308 52 loss """nssa""" +308 52 regularizer """no""" +308 52 optimizer """adadelta""" +308 52 training_loop """owa""" +308 52 negative_sampler """basic""" +308 52 evaluator """rankbased""" +308 53 dataset """kinships""" +308 53 model """hole""" +308 53 loss """nssa""" +308 53 regularizer """no""" +308 53 optimizer """adadelta""" +308 53 training_loop """owa""" +308 53 negative_sampler """basic""" +308 53 evaluator """rankbased""" +308 54 dataset """kinships""" +308 54 model """hole""" +308 54 loss """nssa""" +308 54 regularizer """no""" +308 54 optimizer """adadelta""" +308 54 training_loop """owa""" +308 54 negative_sampler """basic""" +308 54 evaluator """rankbased""" +308 55 dataset """kinships""" +308 55 model """hole""" +308 55 loss """nssa""" +308 55 regularizer """no""" +308 55 optimizer """adadelta""" +308 55 training_loop """owa""" +308 55 negative_sampler """basic""" +308 55 evaluator """rankbased""" +308 56 dataset """kinships""" +308 56 model """hole""" +308 56 loss """nssa""" +308 56 regularizer """no""" +308 56 optimizer """adadelta""" +308 56 training_loop """owa""" +308 56 negative_sampler """basic""" +308 56 evaluator """rankbased""" +308 57 dataset """kinships""" +308 57 model """hole""" +308 57 loss """nssa""" +308 57 regularizer """no""" +308 57 optimizer """adadelta""" +308 57 training_loop """owa""" +308 57 negative_sampler """basic""" +308 57 evaluator """rankbased""" +308 58 dataset """kinships""" +308 58 model """hole""" +308 58 loss """nssa""" +308 58 regularizer """no""" +308 58 optimizer """adadelta""" +308 58 training_loop """owa""" +308 58 negative_sampler """basic""" +308 58 evaluator """rankbased""" +308 59 dataset """kinships""" +308 59 model """hole""" +308 59 loss """nssa""" +308 59 regularizer """no""" +308 59 optimizer """adadelta""" +308 59 training_loop """owa""" +308 59 negative_sampler """basic""" +308 59 evaluator """rankbased""" +308 60 dataset """kinships""" +308 60 model """hole""" +308 60 loss """nssa""" +308 60 regularizer """no""" +308 60 optimizer """adadelta""" +308 60 training_loop """owa""" +308 60 negative_sampler """basic""" +308 60 evaluator """rankbased""" +308 61 dataset """kinships""" +308 61 model """hole""" +308 61 loss """nssa""" +308 61 regularizer """no""" +308 61 optimizer """adadelta""" +308 61 training_loop """owa""" +308 61 negative_sampler """basic""" +308 61 evaluator """rankbased""" +308 62 dataset """kinships""" +308 62 model """hole""" +308 62 loss """nssa""" +308 62 regularizer """no""" +308 62 optimizer """adadelta""" +308 62 training_loop """owa""" +308 62 negative_sampler """basic""" +308 62 evaluator """rankbased""" +308 63 dataset """kinships""" +308 63 model """hole""" +308 63 loss """nssa""" +308 63 regularizer """no""" +308 63 optimizer """adadelta""" +308 63 training_loop """owa""" +308 63 negative_sampler """basic""" +308 63 evaluator """rankbased""" +308 64 dataset """kinships""" +308 64 model """hole""" +308 64 loss """nssa""" +308 64 regularizer """no""" +308 64 optimizer """adadelta""" +308 64 training_loop """owa""" +308 64 negative_sampler """basic""" +308 64 evaluator """rankbased""" +308 65 dataset """kinships""" +308 65 model """hole""" +308 65 loss """nssa""" +308 65 regularizer """no""" +308 65 optimizer """adadelta""" +308 65 training_loop """owa""" +308 65 negative_sampler """basic""" +308 65 evaluator """rankbased""" +308 66 dataset """kinships""" +308 66 model """hole""" +308 66 loss """nssa""" +308 66 regularizer """no""" +308 66 optimizer """adadelta""" +308 66 training_loop """owa""" +308 66 negative_sampler """basic""" +308 66 evaluator """rankbased""" +308 67 dataset """kinships""" +308 67 model """hole""" +308 67 loss """nssa""" +308 67 regularizer """no""" +308 67 optimizer """adadelta""" +308 67 training_loop """owa""" +308 67 negative_sampler """basic""" +308 67 evaluator """rankbased""" +308 68 dataset """kinships""" +308 68 model """hole""" +308 68 loss """nssa""" +308 68 regularizer """no""" +308 68 optimizer """adadelta""" +308 68 training_loop """owa""" +308 68 negative_sampler """basic""" +308 68 evaluator """rankbased""" +308 69 dataset """kinships""" +308 69 model """hole""" +308 69 loss """nssa""" +308 69 regularizer """no""" +308 69 optimizer """adadelta""" +308 69 training_loop """owa""" +308 69 negative_sampler """basic""" +308 69 evaluator """rankbased""" +308 70 dataset """kinships""" +308 70 model """hole""" +308 70 loss """nssa""" +308 70 regularizer """no""" +308 70 optimizer """adadelta""" +308 70 training_loop """owa""" +308 70 negative_sampler """basic""" +308 70 evaluator """rankbased""" +308 71 dataset """kinships""" +308 71 model """hole""" +308 71 loss """nssa""" +308 71 regularizer """no""" +308 71 optimizer """adadelta""" +308 71 training_loop """owa""" +308 71 negative_sampler """basic""" +308 71 evaluator """rankbased""" +308 72 dataset """kinships""" +308 72 model """hole""" +308 72 loss """nssa""" +308 72 regularizer """no""" +308 72 optimizer """adadelta""" +308 72 training_loop """owa""" +308 72 negative_sampler """basic""" +308 72 evaluator """rankbased""" +308 73 dataset """kinships""" +308 73 model """hole""" +308 73 loss """nssa""" +308 73 regularizer """no""" +308 73 optimizer """adadelta""" +308 73 training_loop """owa""" +308 73 negative_sampler """basic""" +308 73 evaluator """rankbased""" +308 74 dataset """kinships""" +308 74 model """hole""" +308 74 loss """nssa""" +308 74 regularizer """no""" +308 74 optimizer """adadelta""" +308 74 training_loop """owa""" +308 74 negative_sampler """basic""" +308 74 evaluator """rankbased""" +308 75 dataset """kinships""" +308 75 model """hole""" +308 75 loss """nssa""" +308 75 regularizer """no""" +308 75 optimizer """adadelta""" +308 75 training_loop """owa""" +308 75 negative_sampler """basic""" +308 75 evaluator """rankbased""" +308 76 dataset """kinships""" +308 76 model """hole""" +308 76 loss """nssa""" +308 76 regularizer """no""" +308 76 optimizer """adadelta""" +308 76 training_loop """owa""" +308 76 negative_sampler """basic""" +308 76 evaluator """rankbased""" +308 77 dataset """kinships""" +308 77 model """hole""" +308 77 loss """nssa""" +308 77 regularizer """no""" +308 77 optimizer """adadelta""" +308 77 training_loop """owa""" +308 77 negative_sampler """basic""" +308 77 evaluator """rankbased""" +308 78 dataset """kinships""" +308 78 model """hole""" +308 78 loss """nssa""" +308 78 regularizer """no""" +308 78 optimizer """adadelta""" +308 78 training_loop """owa""" +308 78 negative_sampler """basic""" +308 78 evaluator """rankbased""" +308 79 dataset """kinships""" +308 79 model """hole""" +308 79 loss """nssa""" +308 79 regularizer """no""" +308 79 optimizer """adadelta""" +308 79 training_loop """owa""" +308 79 negative_sampler """basic""" +308 79 evaluator """rankbased""" +308 80 dataset """kinships""" +308 80 model """hole""" +308 80 loss """nssa""" +308 80 regularizer """no""" +308 80 optimizer """adadelta""" +308 80 training_loop """owa""" +308 80 negative_sampler """basic""" +308 80 evaluator """rankbased""" +308 81 dataset """kinships""" +308 81 model """hole""" +308 81 loss """nssa""" +308 81 regularizer """no""" +308 81 optimizer """adadelta""" +308 81 training_loop """owa""" +308 81 negative_sampler """basic""" +308 81 evaluator """rankbased""" +308 82 dataset """kinships""" +308 82 model """hole""" +308 82 loss """nssa""" +308 82 regularizer """no""" +308 82 optimizer """adadelta""" +308 82 training_loop """owa""" +308 82 negative_sampler """basic""" +308 82 evaluator """rankbased""" +308 83 dataset """kinships""" +308 83 model """hole""" +308 83 loss """nssa""" +308 83 regularizer """no""" +308 83 optimizer """adadelta""" +308 83 training_loop """owa""" +308 83 negative_sampler """basic""" +308 83 evaluator """rankbased""" +308 84 dataset """kinships""" +308 84 model """hole""" +308 84 loss """nssa""" +308 84 regularizer """no""" +308 84 optimizer """adadelta""" +308 84 training_loop """owa""" +308 84 negative_sampler """basic""" +308 84 evaluator """rankbased""" +308 85 dataset """kinships""" +308 85 model """hole""" +308 85 loss """nssa""" +308 85 regularizer """no""" +308 85 optimizer """adadelta""" +308 85 training_loop """owa""" +308 85 negative_sampler """basic""" +308 85 evaluator """rankbased""" +308 86 dataset """kinships""" +308 86 model """hole""" +308 86 loss """nssa""" +308 86 regularizer """no""" +308 86 optimizer """adadelta""" +308 86 training_loop """owa""" +308 86 negative_sampler """basic""" +308 86 evaluator """rankbased""" +308 87 dataset """kinships""" +308 87 model """hole""" +308 87 loss """nssa""" +308 87 regularizer """no""" +308 87 optimizer """adadelta""" +308 87 training_loop """owa""" +308 87 negative_sampler """basic""" +308 87 evaluator """rankbased""" +308 88 dataset """kinships""" +308 88 model """hole""" +308 88 loss """nssa""" +308 88 regularizer """no""" +308 88 optimizer """adadelta""" +308 88 training_loop """owa""" +308 88 negative_sampler """basic""" +308 88 evaluator """rankbased""" +308 89 dataset """kinships""" +308 89 model """hole""" +308 89 loss """nssa""" +308 89 regularizer """no""" +308 89 optimizer """adadelta""" +308 89 training_loop """owa""" +308 89 negative_sampler """basic""" +308 89 evaluator """rankbased""" +308 90 dataset """kinships""" +308 90 model """hole""" +308 90 loss """nssa""" +308 90 regularizer """no""" +308 90 optimizer """adadelta""" +308 90 training_loop """owa""" +308 90 negative_sampler """basic""" +308 90 evaluator """rankbased""" +308 91 dataset """kinships""" +308 91 model """hole""" +308 91 loss """nssa""" +308 91 regularizer """no""" +308 91 optimizer """adadelta""" +308 91 training_loop """owa""" +308 91 negative_sampler """basic""" +308 91 evaluator """rankbased""" +308 92 dataset """kinships""" +308 92 model """hole""" +308 92 loss """nssa""" +308 92 regularizer """no""" +308 92 optimizer """adadelta""" +308 92 training_loop """owa""" +308 92 negative_sampler """basic""" +308 92 evaluator """rankbased""" +308 93 dataset """kinships""" +308 93 model """hole""" +308 93 loss """nssa""" +308 93 regularizer """no""" +308 93 optimizer """adadelta""" +308 93 training_loop """owa""" +308 93 negative_sampler """basic""" +308 93 evaluator """rankbased""" +308 94 dataset """kinships""" +308 94 model """hole""" +308 94 loss """nssa""" +308 94 regularizer """no""" +308 94 optimizer """adadelta""" +308 94 training_loop """owa""" +308 94 negative_sampler """basic""" +308 94 evaluator """rankbased""" +308 95 dataset """kinships""" +308 95 model """hole""" +308 95 loss """nssa""" +308 95 regularizer """no""" +308 95 optimizer """adadelta""" +308 95 training_loop """owa""" +308 95 negative_sampler """basic""" +308 95 evaluator """rankbased""" +308 96 dataset """kinships""" +308 96 model """hole""" +308 96 loss """nssa""" +308 96 regularizer """no""" +308 96 optimizer """adadelta""" +308 96 training_loop """owa""" +308 96 negative_sampler """basic""" +308 96 evaluator """rankbased""" +308 97 dataset """kinships""" +308 97 model """hole""" +308 97 loss """nssa""" +308 97 regularizer """no""" +308 97 optimizer """adadelta""" +308 97 training_loop """owa""" +308 97 negative_sampler """basic""" +308 97 evaluator """rankbased""" +308 98 dataset """kinships""" +308 98 model """hole""" +308 98 loss """nssa""" +308 98 regularizer """no""" +308 98 optimizer """adadelta""" +308 98 training_loop """owa""" +308 98 negative_sampler """basic""" +308 98 evaluator """rankbased""" +308 99 dataset """kinships""" +308 99 model """hole""" +308 99 loss """nssa""" +308 99 regularizer """no""" +308 99 optimizer """adadelta""" +308 99 training_loop """owa""" +308 99 negative_sampler """basic""" +308 99 evaluator """rankbased""" +308 100 dataset """kinships""" +308 100 model """hole""" +308 100 loss """nssa""" +308 100 regularizer """no""" +308 100 optimizer """adadelta""" +308 100 training_loop """owa""" +308 100 negative_sampler """basic""" +308 100 evaluator """rankbased""" +309 1 model.embedding_dim 1.0 +309 1 loss.margin 7.378529038963694 +309 1 loss.adversarial_temperature 0.30336989235629225 +309 1 negative_sampler.num_negs_per_pos 45.0 +309 1 training.batch_size 1.0 +309 2 model.embedding_dim 1.0 +309 2 loss.margin 15.489327101839041 +309 2 loss.adversarial_temperature 0.5036728269169081 +309 2 negative_sampler.num_negs_per_pos 2.0 +309 2 training.batch_size 0.0 +309 3 model.embedding_dim 2.0 +309 3 loss.margin 5.43717284400638 +309 3 loss.adversarial_temperature 0.8838882441384192 +309 3 negative_sampler.num_negs_per_pos 69.0 +309 3 training.batch_size 0.0 +309 4 model.embedding_dim 2.0 +309 4 loss.margin 29.780094405507846 +309 4 loss.adversarial_temperature 0.6560353364716947 +309 4 negative_sampler.num_negs_per_pos 27.0 +309 4 training.batch_size 0.0 +309 5 model.embedding_dim 2.0 +309 5 loss.margin 25.8897591121793 +309 5 loss.adversarial_temperature 0.33924984457953034 +309 5 negative_sampler.num_negs_per_pos 10.0 +309 5 training.batch_size 1.0 +309 6 model.embedding_dim 2.0 +309 6 loss.margin 16.46000640927459 +309 6 loss.adversarial_temperature 0.37127653710696407 +309 6 negative_sampler.num_negs_per_pos 94.0 +309 6 training.batch_size 1.0 +309 7 model.embedding_dim 0.0 +309 7 loss.margin 23.86737982386193 +309 7 loss.adversarial_temperature 0.9779493837726652 +309 7 negative_sampler.num_negs_per_pos 1.0 +309 7 training.batch_size 0.0 +309 8 model.embedding_dim 2.0 +309 8 loss.margin 22.26742533138869 +309 8 loss.adversarial_temperature 0.7390467207378322 +309 8 negative_sampler.num_negs_per_pos 49.0 +309 8 training.batch_size 1.0 +309 9 model.embedding_dim 0.0 +309 9 loss.margin 26.308599224111447 +309 9 loss.adversarial_temperature 0.24896438478871258 +309 9 negative_sampler.num_negs_per_pos 8.0 +309 9 training.batch_size 2.0 +309 10 model.embedding_dim 0.0 +309 10 loss.margin 18.890256470778482 +309 10 loss.adversarial_temperature 0.4175353664241014 +309 10 negative_sampler.num_negs_per_pos 38.0 +309 10 training.batch_size 2.0 +309 11 model.embedding_dim 0.0 +309 11 loss.margin 26.52382745477074 +309 11 loss.adversarial_temperature 0.4356508084593158 +309 11 negative_sampler.num_negs_per_pos 13.0 +309 11 training.batch_size 2.0 +309 12 model.embedding_dim 2.0 +309 12 loss.margin 22.397446787622993 +309 12 loss.adversarial_temperature 0.5238378196094986 +309 12 negative_sampler.num_negs_per_pos 22.0 +309 12 training.batch_size 0.0 +309 13 model.embedding_dim 1.0 +309 13 loss.margin 12.494896623172242 +309 13 loss.adversarial_temperature 0.3255625853823547 +309 13 negative_sampler.num_negs_per_pos 87.0 +309 13 training.batch_size 1.0 +309 14 model.embedding_dim 2.0 +309 14 loss.margin 14.390190557571975 +309 14 loss.adversarial_temperature 0.7858767405607441 +309 14 negative_sampler.num_negs_per_pos 14.0 +309 14 training.batch_size 1.0 +309 15 model.embedding_dim 1.0 +309 15 loss.margin 23.559140676489104 +309 15 loss.adversarial_temperature 0.12235165395359653 +309 15 negative_sampler.num_negs_per_pos 59.0 +309 15 training.batch_size 2.0 +309 16 model.embedding_dim 1.0 +309 16 loss.margin 19.51616366402686 +309 16 loss.adversarial_temperature 0.9823095519933769 +309 16 negative_sampler.num_negs_per_pos 97.0 +309 16 training.batch_size 2.0 +309 17 model.embedding_dim 1.0 +309 17 loss.margin 22.334336411145095 +309 17 loss.adversarial_temperature 0.5987563225984027 +309 17 negative_sampler.num_negs_per_pos 23.0 +309 17 training.batch_size 0.0 +309 18 model.embedding_dim 1.0 +309 18 loss.margin 16.64495849937927 +309 18 loss.adversarial_temperature 0.6924544730310795 +309 18 negative_sampler.num_negs_per_pos 92.0 +309 18 training.batch_size 0.0 +309 19 model.embedding_dim 0.0 +309 19 loss.margin 28.234441572276744 +309 19 loss.adversarial_temperature 0.5238231282946989 +309 19 negative_sampler.num_negs_per_pos 98.0 +309 19 training.batch_size 0.0 +309 20 model.embedding_dim 0.0 +309 20 loss.margin 22.236455416710484 +309 20 loss.adversarial_temperature 0.7842077606570701 +309 20 negative_sampler.num_negs_per_pos 14.0 +309 20 training.batch_size 1.0 +309 21 model.embedding_dim 1.0 +309 21 loss.margin 22.256967698193122 +309 21 loss.adversarial_temperature 0.7278668615492303 +309 21 negative_sampler.num_negs_per_pos 42.0 +309 21 training.batch_size 0.0 +309 22 model.embedding_dim 0.0 +309 22 loss.margin 6.04630639838762 +309 22 loss.adversarial_temperature 0.5975415108530612 +309 22 negative_sampler.num_negs_per_pos 30.0 +309 22 training.batch_size 1.0 +309 23 model.embedding_dim 2.0 +309 23 loss.margin 12.784905896503467 +309 23 loss.adversarial_temperature 0.8122379346131717 +309 23 negative_sampler.num_negs_per_pos 87.0 +309 23 training.batch_size 2.0 +309 24 model.embedding_dim 2.0 +309 24 loss.margin 12.84270621654406 +309 24 loss.adversarial_temperature 0.7566259707068379 +309 24 negative_sampler.num_negs_per_pos 62.0 +309 24 training.batch_size 2.0 +309 25 model.embedding_dim 2.0 +309 25 loss.margin 12.270283581411958 +309 25 loss.adversarial_temperature 0.32566650027210964 +309 25 negative_sampler.num_negs_per_pos 69.0 +309 25 training.batch_size 0.0 +309 26 model.embedding_dim 2.0 +309 26 loss.margin 11.743932622859658 +309 26 loss.adversarial_temperature 0.3161249143889293 +309 26 negative_sampler.num_negs_per_pos 6.0 +309 26 training.batch_size 1.0 +309 27 model.embedding_dim 2.0 +309 27 loss.margin 1.737938236155449 +309 27 loss.adversarial_temperature 0.8919208110770863 +309 27 negative_sampler.num_negs_per_pos 21.0 +309 27 training.batch_size 1.0 +309 28 model.embedding_dim 0.0 +309 28 loss.margin 13.91401064391057 +309 28 loss.adversarial_temperature 0.32972834588335126 +309 28 negative_sampler.num_negs_per_pos 28.0 +309 28 training.batch_size 1.0 +309 29 model.embedding_dim 1.0 +309 29 loss.margin 26.346044672214262 +309 29 loss.adversarial_temperature 0.4003278329245339 +309 29 negative_sampler.num_negs_per_pos 78.0 +309 29 training.batch_size 0.0 +309 30 model.embedding_dim 0.0 +309 30 loss.margin 9.356974457871864 +309 30 loss.adversarial_temperature 0.942572524033826 +309 30 negative_sampler.num_negs_per_pos 73.0 +309 30 training.batch_size 2.0 +309 31 model.embedding_dim 2.0 +309 31 loss.margin 4.438920809274295 +309 31 loss.adversarial_temperature 0.5019967827021963 +309 31 negative_sampler.num_negs_per_pos 9.0 +309 31 training.batch_size 2.0 +309 32 model.embedding_dim 2.0 +309 32 loss.margin 8.969984717606858 +309 32 loss.adversarial_temperature 0.3416210895452507 +309 32 negative_sampler.num_negs_per_pos 78.0 +309 32 training.batch_size 2.0 +309 33 model.embedding_dim 1.0 +309 33 loss.margin 13.290644647929861 +309 33 loss.adversarial_temperature 0.22899765718459972 +309 33 negative_sampler.num_negs_per_pos 81.0 +309 33 training.batch_size 0.0 +309 34 model.embedding_dim 1.0 +309 34 loss.margin 2.6299745705595505 +309 34 loss.adversarial_temperature 0.39138988477938996 +309 34 negative_sampler.num_negs_per_pos 55.0 +309 34 training.batch_size 2.0 +309 35 model.embedding_dim 0.0 +309 35 loss.margin 2.2649705810265597 +309 35 loss.adversarial_temperature 0.5594047704919344 +309 35 negative_sampler.num_negs_per_pos 55.0 +309 35 training.batch_size 2.0 +309 36 model.embedding_dim 2.0 +309 36 loss.margin 3.9364483441603686 +309 36 loss.adversarial_temperature 0.7707279426239886 +309 36 negative_sampler.num_negs_per_pos 78.0 +309 36 training.batch_size 0.0 +309 37 model.embedding_dim 2.0 +309 37 loss.margin 29.49527374569393 +309 37 loss.adversarial_temperature 0.7659834758087125 +309 37 negative_sampler.num_negs_per_pos 42.0 +309 37 training.batch_size 1.0 +309 38 model.embedding_dim 0.0 +309 38 loss.margin 28.294760470487997 +309 38 loss.adversarial_temperature 0.8386536615795845 +309 38 negative_sampler.num_negs_per_pos 13.0 +309 38 training.batch_size 2.0 +309 39 model.embedding_dim 1.0 +309 39 loss.margin 6.966446094349127 +309 39 loss.adversarial_temperature 0.5040049117063248 +309 39 negative_sampler.num_negs_per_pos 56.0 +309 39 training.batch_size 1.0 +309 40 model.embedding_dim 1.0 +309 40 loss.margin 29.4606256351137 +309 40 loss.adversarial_temperature 0.38586717888822086 +309 40 negative_sampler.num_negs_per_pos 45.0 +309 40 training.batch_size 2.0 +309 41 model.embedding_dim 1.0 +309 41 loss.margin 2.975185588147315 +309 41 loss.adversarial_temperature 0.6791756093738653 +309 41 negative_sampler.num_negs_per_pos 7.0 +309 41 training.batch_size 0.0 +309 42 model.embedding_dim 0.0 +309 42 loss.margin 12.734350417304595 +309 42 loss.adversarial_temperature 0.2589401545843386 +309 42 negative_sampler.num_negs_per_pos 61.0 +309 42 training.batch_size 1.0 +309 43 model.embedding_dim 2.0 +309 43 loss.margin 23.49310532486578 +309 43 loss.adversarial_temperature 0.731993073180553 +309 43 negative_sampler.num_negs_per_pos 57.0 +309 43 training.batch_size 0.0 +309 44 model.embedding_dim 2.0 +309 44 loss.margin 8.538142042118409 +309 44 loss.adversarial_temperature 0.18223539886422363 +309 44 negative_sampler.num_negs_per_pos 53.0 +309 44 training.batch_size 2.0 +309 45 model.embedding_dim 1.0 +309 45 loss.margin 4.369397683201327 +309 45 loss.adversarial_temperature 0.7469620305039169 +309 45 negative_sampler.num_negs_per_pos 74.0 +309 45 training.batch_size 2.0 +309 46 model.embedding_dim 2.0 +309 46 loss.margin 27.469159203320224 +309 46 loss.adversarial_temperature 0.14505322196527445 +309 46 negative_sampler.num_negs_per_pos 45.0 +309 46 training.batch_size 1.0 +309 47 model.embedding_dim 0.0 +309 47 loss.margin 9.361021774382872 +309 47 loss.adversarial_temperature 0.30949159078635785 +309 47 negative_sampler.num_negs_per_pos 22.0 +309 47 training.batch_size 2.0 +309 48 model.embedding_dim 2.0 +309 48 loss.margin 15.135576811062277 +309 48 loss.adversarial_temperature 0.8614603576056575 +309 48 negative_sampler.num_negs_per_pos 18.0 +309 48 training.batch_size 0.0 +309 49 model.embedding_dim 2.0 +309 49 loss.margin 18.014394759522375 +309 49 loss.adversarial_temperature 0.2945022077022812 +309 49 negative_sampler.num_negs_per_pos 42.0 +309 49 training.batch_size 0.0 +309 50 model.embedding_dim 1.0 +309 50 loss.margin 15.513273138721654 +309 50 loss.adversarial_temperature 0.40851649990637684 +309 50 negative_sampler.num_negs_per_pos 17.0 +309 50 training.batch_size 2.0 +309 51 model.embedding_dim 2.0 +309 51 loss.margin 11.589742118234023 +309 51 loss.adversarial_temperature 0.5912266361555554 +309 51 negative_sampler.num_negs_per_pos 84.0 +309 51 training.batch_size 2.0 +309 52 model.embedding_dim 1.0 +309 52 loss.margin 11.22005685926168 +309 52 loss.adversarial_temperature 0.36209677389052564 +309 52 negative_sampler.num_negs_per_pos 24.0 +309 52 training.batch_size 1.0 +309 53 model.embedding_dim 2.0 +309 53 loss.margin 10.597162845968949 +309 53 loss.adversarial_temperature 0.7155306106614361 +309 53 negative_sampler.num_negs_per_pos 12.0 +309 53 training.batch_size 2.0 +309 54 model.embedding_dim 0.0 +309 54 loss.margin 9.364848141489464 +309 54 loss.adversarial_temperature 0.496088135564124 +309 54 negative_sampler.num_negs_per_pos 4.0 +309 54 training.batch_size 0.0 +309 55 model.embedding_dim 1.0 +309 55 loss.margin 18.163736638408523 +309 55 loss.adversarial_temperature 0.9061120315354129 +309 55 negative_sampler.num_negs_per_pos 29.0 +309 55 training.batch_size 0.0 +309 56 model.embedding_dim 0.0 +309 56 loss.margin 15.52260966349653 +309 56 loss.adversarial_temperature 0.11631445605231439 +309 56 negative_sampler.num_negs_per_pos 64.0 +309 56 training.batch_size 1.0 +309 57 model.embedding_dim 2.0 +309 57 loss.margin 7.145946124587115 +309 57 loss.adversarial_temperature 0.24802302391796663 +309 57 negative_sampler.num_negs_per_pos 96.0 +309 57 training.batch_size 0.0 +309 58 model.embedding_dim 1.0 +309 58 loss.margin 4.0580741425705575 +309 58 loss.adversarial_temperature 0.5638465162554184 +309 58 negative_sampler.num_negs_per_pos 31.0 +309 58 training.batch_size 0.0 +309 59 model.embedding_dim 1.0 +309 59 loss.margin 11.977119464257129 +309 59 loss.adversarial_temperature 0.16432856623673575 +309 59 negative_sampler.num_negs_per_pos 78.0 +309 59 training.batch_size 0.0 +309 60 model.embedding_dim 0.0 +309 60 loss.margin 14.033007666765664 +309 60 loss.adversarial_temperature 0.2986745954104334 +309 60 negative_sampler.num_negs_per_pos 14.0 +309 60 training.batch_size 2.0 +309 61 model.embedding_dim 0.0 +309 61 loss.margin 25.96460917630632 +309 61 loss.adversarial_temperature 0.1322048902478387 +309 61 negative_sampler.num_negs_per_pos 25.0 +309 61 training.batch_size 0.0 +309 62 model.embedding_dim 0.0 +309 62 loss.margin 16.386253110997785 +309 62 loss.adversarial_temperature 0.44977620493159576 +309 62 negative_sampler.num_negs_per_pos 6.0 +309 62 training.batch_size 2.0 +309 63 model.embedding_dim 0.0 +309 63 loss.margin 8.635329372779125 +309 63 loss.adversarial_temperature 0.35690981945490996 +309 63 negative_sampler.num_negs_per_pos 5.0 +309 63 training.batch_size 1.0 +309 64 model.embedding_dim 0.0 +309 64 loss.margin 3.4331189291828377 +309 64 loss.adversarial_temperature 0.8248235908854509 +309 64 negative_sampler.num_negs_per_pos 15.0 +309 64 training.batch_size 0.0 +309 65 model.embedding_dim 1.0 +309 65 loss.margin 23.630372557496738 +309 65 loss.adversarial_temperature 0.3400913787045065 +309 65 negative_sampler.num_negs_per_pos 1.0 +309 65 training.batch_size 2.0 +309 66 model.embedding_dim 2.0 +309 66 loss.margin 12.580082308829086 +309 66 loss.adversarial_temperature 0.6493239940240562 +309 66 negative_sampler.num_negs_per_pos 57.0 +309 66 training.batch_size 1.0 +309 67 model.embedding_dim 2.0 +309 67 loss.margin 2.3980886677121744 +309 67 loss.adversarial_temperature 0.25660090838945193 +309 67 negative_sampler.num_negs_per_pos 10.0 +309 67 training.batch_size 1.0 +309 68 model.embedding_dim 0.0 +309 68 loss.margin 22.04687058761985 +309 68 loss.adversarial_temperature 0.29681834650247685 +309 68 negative_sampler.num_negs_per_pos 74.0 +309 68 training.batch_size 0.0 +309 69 model.embedding_dim 2.0 +309 69 loss.margin 21.907796774902014 +309 69 loss.adversarial_temperature 0.2964887176438533 +309 69 negative_sampler.num_negs_per_pos 50.0 +309 69 training.batch_size 2.0 +309 70 model.embedding_dim 2.0 +309 70 loss.margin 9.511849857392683 +309 70 loss.adversarial_temperature 0.8530514415145133 +309 70 negative_sampler.num_negs_per_pos 66.0 +309 70 training.batch_size 0.0 +309 71 model.embedding_dim 2.0 +309 71 loss.margin 29.60618420471818 +309 71 loss.adversarial_temperature 0.2589073726534311 +309 71 negative_sampler.num_negs_per_pos 18.0 +309 71 training.batch_size 1.0 +309 72 model.embedding_dim 0.0 +309 72 loss.margin 23.667058071136438 +309 72 loss.adversarial_temperature 0.7787586146959147 +309 72 negative_sampler.num_negs_per_pos 96.0 +309 72 training.batch_size 1.0 +309 73 model.embedding_dim 1.0 +309 73 loss.margin 18.52802282934337 +309 73 loss.adversarial_temperature 0.9137968783908935 +309 73 negative_sampler.num_negs_per_pos 67.0 +309 73 training.batch_size 1.0 +309 74 model.embedding_dim 0.0 +309 74 loss.margin 19.787655569539297 +309 74 loss.adversarial_temperature 0.8348953628377666 +309 74 negative_sampler.num_negs_per_pos 9.0 +309 74 training.batch_size 1.0 +309 75 model.embedding_dim 0.0 +309 75 loss.margin 4.895201204856978 +309 75 loss.adversarial_temperature 0.9321948177861619 +309 75 negative_sampler.num_negs_per_pos 12.0 +309 75 training.batch_size 0.0 +309 76 model.embedding_dim 0.0 +309 76 loss.margin 17.410543867873255 +309 76 loss.adversarial_temperature 0.16918685804390426 +309 76 negative_sampler.num_negs_per_pos 76.0 +309 76 training.batch_size 2.0 +309 77 model.embedding_dim 0.0 +309 77 loss.margin 27.039859655636132 +309 77 loss.adversarial_temperature 0.7745664531872312 +309 77 negative_sampler.num_negs_per_pos 86.0 +309 77 training.batch_size 0.0 +309 78 model.embedding_dim 1.0 +309 78 loss.margin 22.77142916248847 +309 78 loss.adversarial_temperature 0.37309588777055747 +309 78 negative_sampler.num_negs_per_pos 57.0 +309 78 training.batch_size 2.0 +309 79 model.embedding_dim 2.0 +309 79 loss.margin 20.23086086959077 +309 79 loss.adversarial_temperature 0.26401369865422875 +309 79 negative_sampler.num_negs_per_pos 16.0 +309 79 training.batch_size 0.0 +309 80 model.embedding_dim 2.0 +309 80 loss.margin 4.696437812217865 +309 80 loss.adversarial_temperature 0.2881946516285599 +309 80 negative_sampler.num_negs_per_pos 62.0 +309 80 training.batch_size 0.0 +309 81 model.embedding_dim 1.0 +309 81 loss.margin 16.05297269802041 +309 81 loss.adversarial_temperature 0.8387525318570498 +309 81 negative_sampler.num_negs_per_pos 40.0 +309 81 training.batch_size 2.0 +309 82 model.embedding_dim 0.0 +309 82 loss.margin 6.467108180415644 +309 82 loss.adversarial_temperature 0.425590380798029 +309 82 negative_sampler.num_negs_per_pos 0.0 +309 82 training.batch_size 2.0 +309 83 model.embedding_dim 2.0 +309 83 loss.margin 22.40491441179113 +309 83 loss.adversarial_temperature 0.8028500966887718 +309 83 negative_sampler.num_negs_per_pos 54.0 +309 83 training.batch_size 2.0 +309 84 model.embedding_dim 1.0 +309 84 loss.margin 2.785152421330361 +309 84 loss.adversarial_temperature 0.5165357265919273 +309 84 negative_sampler.num_negs_per_pos 7.0 +309 84 training.batch_size 0.0 +309 85 model.embedding_dim 1.0 +309 85 loss.margin 1.9562892994984078 +309 85 loss.adversarial_temperature 0.9891019429844398 +309 85 negative_sampler.num_negs_per_pos 85.0 +309 85 training.batch_size 2.0 +309 86 model.embedding_dim 2.0 +309 86 loss.margin 22.041175997394472 +309 86 loss.adversarial_temperature 0.5860896516056106 +309 86 negative_sampler.num_negs_per_pos 73.0 +309 86 training.batch_size 1.0 +309 87 model.embedding_dim 2.0 +309 87 loss.margin 23.237064251348695 +309 87 loss.adversarial_temperature 0.17936455877053997 +309 87 negative_sampler.num_negs_per_pos 63.0 +309 87 training.batch_size 0.0 +309 88 model.embedding_dim 0.0 +309 88 loss.margin 9.50780417377149 +309 88 loss.adversarial_temperature 0.6370331935347947 +309 88 negative_sampler.num_negs_per_pos 44.0 +309 88 training.batch_size 0.0 +309 89 model.embedding_dim 1.0 +309 89 loss.margin 18.32943932000565 +309 89 loss.adversarial_temperature 0.3795611151076381 +309 89 negative_sampler.num_negs_per_pos 67.0 +309 89 training.batch_size 1.0 +309 90 model.embedding_dim 0.0 +309 90 loss.margin 25.309092260632728 +309 90 loss.adversarial_temperature 0.34523558491199613 +309 90 negative_sampler.num_negs_per_pos 31.0 +309 90 training.batch_size 2.0 +309 91 model.embedding_dim 2.0 +309 91 loss.margin 11.038081298243776 +309 91 loss.adversarial_temperature 0.32147353036738646 +309 91 negative_sampler.num_negs_per_pos 86.0 +309 91 training.batch_size 1.0 +309 92 model.embedding_dim 2.0 +309 92 loss.margin 2.407074172477381 +309 92 loss.adversarial_temperature 0.9817753796846369 +309 92 negative_sampler.num_negs_per_pos 88.0 +309 92 training.batch_size 1.0 +309 93 model.embedding_dim 2.0 +309 93 loss.margin 26.63099240404534 +309 93 loss.adversarial_temperature 0.603657632171886 +309 93 negative_sampler.num_negs_per_pos 54.0 +309 93 training.batch_size 0.0 +309 94 model.embedding_dim 2.0 +309 94 loss.margin 21.162034264695162 +309 94 loss.adversarial_temperature 0.6499043616735704 +309 94 negative_sampler.num_negs_per_pos 7.0 +309 94 training.batch_size 2.0 +309 95 model.embedding_dim 0.0 +309 95 loss.margin 28.845747604615184 +309 95 loss.adversarial_temperature 0.8940317058403289 +309 95 negative_sampler.num_negs_per_pos 15.0 +309 95 training.batch_size 2.0 +309 96 model.embedding_dim 0.0 +309 96 loss.margin 28.573097523230356 +309 96 loss.adversarial_temperature 0.5173729461136383 +309 96 negative_sampler.num_negs_per_pos 8.0 +309 96 training.batch_size 1.0 +309 97 model.embedding_dim 1.0 +309 97 loss.margin 7.253244203059468 +309 97 loss.adversarial_temperature 0.18056497084540357 +309 97 negative_sampler.num_negs_per_pos 71.0 +309 97 training.batch_size 2.0 +309 98 model.embedding_dim 2.0 +309 98 loss.margin 29.35116155251024 +309 98 loss.adversarial_temperature 0.9549468183584505 +309 98 negative_sampler.num_negs_per_pos 46.0 +309 98 training.batch_size 2.0 +309 99 model.embedding_dim 2.0 +309 99 loss.margin 17.331821259555447 +309 99 loss.adversarial_temperature 0.45191625469400976 +309 99 negative_sampler.num_negs_per_pos 41.0 +309 99 training.batch_size 0.0 +309 100 model.embedding_dim 0.0 +309 100 loss.margin 29.869120297709706 +309 100 loss.adversarial_temperature 0.2997560247461497 +309 100 negative_sampler.num_negs_per_pos 79.0 +309 100 training.batch_size 1.0 +309 1 dataset """kinships""" +309 1 model """hole""" +309 1 loss """nssa""" +309 1 regularizer """no""" +309 1 optimizer """adadelta""" +309 1 training_loop """owa""" +309 1 negative_sampler """basic""" +309 1 evaluator """rankbased""" +309 2 dataset """kinships""" +309 2 model """hole""" +309 2 loss """nssa""" +309 2 regularizer """no""" +309 2 optimizer """adadelta""" +309 2 training_loop """owa""" +309 2 negative_sampler """basic""" +309 2 evaluator """rankbased""" +309 3 dataset """kinships""" +309 3 model """hole""" +309 3 loss """nssa""" +309 3 regularizer """no""" +309 3 optimizer """adadelta""" +309 3 training_loop """owa""" +309 3 negative_sampler """basic""" +309 3 evaluator """rankbased""" +309 4 dataset """kinships""" +309 4 model """hole""" +309 4 loss """nssa""" +309 4 regularizer """no""" +309 4 optimizer """adadelta""" +309 4 training_loop """owa""" +309 4 negative_sampler """basic""" +309 4 evaluator """rankbased""" +309 5 dataset """kinships""" +309 5 model """hole""" +309 5 loss """nssa""" +309 5 regularizer """no""" +309 5 optimizer """adadelta""" +309 5 training_loop """owa""" +309 5 negative_sampler """basic""" +309 5 evaluator """rankbased""" +309 6 dataset """kinships""" +309 6 model """hole""" +309 6 loss """nssa""" +309 6 regularizer """no""" +309 6 optimizer """adadelta""" +309 6 training_loop """owa""" +309 6 negative_sampler """basic""" +309 6 evaluator """rankbased""" +309 7 dataset """kinships""" +309 7 model """hole""" +309 7 loss """nssa""" +309 7 regularizer """no""" +309 7 optimizer """adadelta""" +309 7 training_loop """owa""" +309 7 negative_sampler """basic""" +309 7 evaluator """rankbased""" +309 8 dataset """kinships""" +309 8 model """hole""" +309 8 loss """nssa""" +309 8 regularizer """no""" +309 8 optimizer """adadelta""" +309 8 training_loop """owa""" +309 8 negative_sampler """basic""" +309 8 evaluator """rankbased""" +309 9 dataset """kinships""" +309 9 model """hole""" +309 9 loss """nssa""" +309 9 regularizer """no""" +309 9 optimizer """adadelta""" +309 9 training_loop """owa""" +309 9 negative_sampler """basic""" +309 9 evaluator """rankbased""" +309 10 dataset """kinships""" +309 10 model """hole""" +309 10 loss """nssa""" +309 10 regularizer """no""" +309 10 optimizer """adadelta""" +309 10 training_loop """owa""" +309 10 negative_sampler """basic""" +309 10 evaluator """rankbased""" +309 11 dataset """kinships""" +309 11 model """hole""" +309 11 loss """nssa""" +309 11 regularizer """no""" +309 11 optimizer """adadelta""" +309 11 training_loop """owa""" +309 11 negative_sampler """basic""" +309 11 evaluator """rankbased""" +309 12 dataset """kinships""" +309 12 model """hole""" +309 12 loss """nssa""" +309 12 regularizer """no""" +309 12 optimizer """adadelta""" +309 12 training_loop """owa""" +309 12 negative_sampler """basic""" +309 12 evaluator """rankbased""" +309 13 dataset """kinships""" +309 13 model """hole""" +309 13 loss """nssa""" +309 13 regularizer """no""" +309 13 optimizer """adadelta""" +309 13 training_loop """owa""" +309 13 negative_sampler """basic""" +309 13 evaluator """rankbased""" +309 14 dataset """kinships""" +309 14 model """hole""" +309 14 loss """nssa""" +309 14 regularizer """no""" +309 14 optimizer """adadelta""" +309 14 training_loop """owa""" +309 14 negative_sampler """basic""" +309 14 evaluator """rankbased""" +309 15 dataset """kinships""" +309 15 model """hole""" +309 15 loss """nssa""" +309 15 regularizer """no""" +309 15 optimizer """adadelta""" +309 15 training_loop """owa""" +309 15 negative_sampler """basic""" +309 15 evaluator """rankbased""" +309 16 dataset """kinships""" +309 16 model """hole""" +309 16 loss """nssa""" +309 16 regularizer """no""" +309 16 optimizer """adadelta""" +309 16 training_loop """owa""" +309 16 negative_sampler """basic""" +309 16 evaluator """rankbased""" +309 17 dataset """kinships""" +309 17 model """hole""" +309 17 loss """nssa""" +309 17 regularizer """no""" +309 17 optimizer """adadelta""" +309 17 training_loop """owa""" +309 17 negative_sampler """basic""" +309 17 evaluator """rankbased""" +309 18 dataset """kinships""" +309 18 model """hole""" +309 18 loss """nssa""" +309 18 regularizer """no""" +309 18 optimizer """adadelta""" +309 18 training_loop """owa""" +309 18 negative_sampler """basic""" +309 18 evaluator """rankbased""" +309 19 dataset """kinships""" +309 19 model """hole""" +309 19 loss """nssa""" +309 19 regularizer """no""" +309 19 optimizer """adadelta""" +309 19 training_loop """owa""" +309 19 negative_sampler """basic""" +309 19 evaluator """rankbased""" +309 20 dataset """kinships""" +309 20 model """hole""" +309 20 loss """nssa""" +309 20 regularizer """no""" +309 20 optimizer """adadelta""" +309 20 training_loop """owa""" +309 20 negative_sampler """basic""" +309 20 evaluator """rankbased""" +309 21 dataset """kinships""" +309 21 model """hole""" +309 21 loss """nssa""" +309 21 regularizer """no""" +309 21 optimizer """adadelta""" +309 21 training_loop """owa""" +309 21 negative_sampler """basic""" +309 21 evaluator """rankbased""" +309 22 dataset """kinships""" +309 22 model """hole""" +309 22 loss """nssa""" +309 22 regularizer """no""" +309 22 optimizer """adadelta""" +309 22 training_loop """owa""" +309 22 negative_sampler """basic""" +309 22 evaluator """rankbased""" +309 23 dataset """kinships""" +309 23 model """hole""" +309 23 loss """nssa""" +309 23 regularizer """no""" +309 23 optimizer """adadelta""" +309 23 training_loop """owa""" +309 23 negative_sampler """basic""" +309 23 evaluator """rankbased""" +309 24 dataset """kinships""" +309 24 model """hole""" +309 24 loss """nssa""" +309 24 regularizer """no""" +309 24 optimizer """adadelta""" +309 24 training_loop """owa""" +309 24 negative_sampler """basic""" +309 24 evaluator """rankbased""" +309 25 dataset """kinships""" +309 25 model """hole""" +309 25 loss """nssa""" +309 25 regularizer """no""" +309 25 optimizer """adadelta""" +309 25 training_loop """owa""" +309 25 negative_sampler """basic""" +309 25 evaluator """rankbased""" +309 26 dataset """kinships""" +309 26 model """hole""" +309 26 loss """nssa""" +309 26 regularizer """no""" +309 26 optimizer """adadelta""" +309 26 training_loop """owa""" +309 26 negative_sampler """basic""" +309 26 evaluator """rankbased""" +309 27 dataset """kinships""" +309 27 model """hole""" +309 27 loss """nssa""" +309 27 regularizer """no""" +309 27 optimizer """adadelta""" +309 27 training_loop """owa""" +309 27 negative_sampler """basic""" +309 27 evaluator """rankbased""" +309 28 dataset """kinships""" +309 28 model """hole""" +309 28 loss """nssa""" +309 28 regularizer """no""" +309 28 optimizer """adadelta""" +309 28 training_loop """owa""" +309 28 negative_sampler """basic""" +309 28 evaluator """rankbased""" +309 29 dataset """kinships""" +309 29 model """hole""" +309 29 loss """nssa""" +309 29 regularizer """no""" +309 29 optimizer """adadelta""" +309 29 training_loop """owa""" +309 29 negative_sampler """basic""" +309 29 evaluator """rankbased""" +309 30 dataset """kinships""" +309 30 model """hole""" +309 30 loss """nssa""" +309 30 regularizer """no""" +309 30 optimizer """adadelta""" +309 30 training_loop """owa""" +309 30 negative_sampler """basic""" +309 30 evaluator """rankbased""" +309 31 dataset """kinships""" +309 31 model """hole""" +309 31 loss """nssa""" +309 31 regularizer """no""" +309 31 optimizer """adadelta""" +309 31 training_loop """owa""" +309 31 negative_sampler """basic""" +309 31 evaluator """rankbased""" +309 32 dataset """kinships""" +309 32 model """hole""" +309 32 loss """nssa""" +309 32 regularizer """no""" +309 32 optimizer """adadelta""" +309 32 training_loop """owa""" +309 32 negative_sampler """basic""" +309 32 evaluator """rankbased""" +309 33 dataset """kinships""" +309 33 model """hole""" +309 33 loss """nssa""" +309 33 regularizer """no""" +309 33 optimizer """adadelta""" +309 33 training_loop """owa""" +309 33 negative_sampler """basic""" +309 33 evaluator """rankbased""" +309 34 dataset """kinships""" +309 34 model """hole""" +309 34 loss """nssa""" +309 34 regularizer """no""" +309 34 optimizer """adadelta""" +309 34 training_loop """owa""" +309 34 negative_sampler """basic""" +309 34 evaluator """rankbased""" +309 35 dataset """kinships""" +309 35 model """hole""" +309 35 loss """nssa""" +309 35 regularizer """no""" +309 35 optimizer """adadelta""" +309 35 training_loop """owa""" +309 35 negative_sampler """basic""" +309 35 evaluator """rankbased""" +309 36 dataset """kinships""" +309 36 model """hole""" +309 36 loss """nssa""" +309 36 regularizer """no""" +309 36 optimizer """adadelta""" +309 36 training_loop """owa""" +309 36 negative_sampler """basic""" +309 36 evaluator """rankbased""" +309 37 dataset """kinships""" +309 37 model """hole""" +309 37 loss """nssa""" +309 37 regularizer """no""" +309 37 optimizer """adadelta""" +309 37 training_loop """owa""" +309 37 negative_sampler """basic""" +309 37 evaluator """rankbased""" +309 38 dataset """kinships""" +309 38 model """hole""" +309 38 loss """nssa""" +309 38 regularizer """no""" +309 38 optimizer """adadelta""" +309 38 training_loop """owa""" +309 38 negative_sampler """basic""" +309 38 evaluator """rankbased""" +309 39 dataset """kinships""" +309 39 model """hole""" +309 39 loss """nssa""" +309 39 regularizer """no""" +309 39 optimizer """adadelta""" +309 39 training_loop """owa""" +309 39 negative_sampler """basic""" +309 39 evaluator """rankbased""" +309 40 dataset """kinships""" +309 40 model """hole""" +309 40 loss """nssa""" +309 40 regularizer """no""" +309 40 optimizer """adadelta""" +309 40 training_loop """owa""" +309 40 negative_sampler """basic""" +309 40 evaluator """rankbased""" +309 41 dataset """kinships""" +309 41 model """hole""" +309 41 loss """nssa""" +309 41 regularizer """no""" +309 41 optimizer """adadelta""" +309 41 training_loop """owa""" +309 41 negative_sampler """basic""" +309 41 evaluator """rankbased""" +309 42 dataset """kinships""" +309 42 model """hole""" +309 42 loss """nssa""" +309 42 regularizer """no""" +309 42 optimizer """adadelta""" +309 42 training_loop """owa""" +309 42 negative_sampler """basic""" +309 42 evaluator """rankbased""" +309 43 dataset """kinships""" +309 43 model """hole""" +309 43 loss """nssa""" +309 43 regularizer """no""" +309 43 optimizer """adadelta""" +309 43 training_loop """owa""" +309 43 negative_sampler """basic""" +309 43 evaluator """rankbased""" +309 44 dataset """kinships""" +309 44 model """hole""" +309 44 loss """nssa""" +309 44 regularizer """no""" +309 44 optimizer """adadelta""" +309 44 training_loop """owa""" +309 44 negative_sampler """basic""" +309 44 evaluator """rankbased""" +309 45 dataset """kinships""" +309 45 model """hole""" +309 45 loss """nssa""" +309 45 regularizer """no""" +309 45 optimizer """adadelta""" +309 45 training_loop """owa""" +309 45 negative_sampler """basic""" +309 45 evaluator """rankbased""" +309 46 dataset """kinships""" +309 46 model """hole""" +309 46 loss """nssa""" +309 46 regularizer """no""" +309 46 optimizer """adadelta""" +309 46 training_loop """owa""" +309 46 negative_sampler """basic""" +309 46 evaluator """rankbased""" +309 47 dataset """kinships""" +309 47 model """hole""" +309 47 loss """nssa""" +309 47 regularizer """no""" +309 47 optimizer """adadelta""" +309 47 training_loop """owa""" +309 47 negative_sampler """basic""" +309 47 evaluator """rankbased""" +309 48 dataset """kinships""" +309 48 model """hole""" +309 48 loss """nssa""" +309 48 regularizer """no""" +309 48 optimizer """adadelta""" +309 48 training_loop """owa""" +309 48 negative_sampler """basic""" +309 48 evaluator """rankbased""" +309 49 dataset """kinships""" +309 49 model """hole""" +309 49 loss """nssa""" +309 49 regularizer """no""" +309 49 optimizer """adadelta""" +309 49 training_loop """owa""" +309 49 negative_sampler """basic""" +309 49 evaluator """rankbased""" +309 50 dataset """kinships""" +309 50 model """hole""" +309 50 loss """nssa""" +309 50 regularizer """no""" +309 50 optimizer """adadelta""" +309 50 training_loop """owa""" +309 50 negative_sampler """basic""" +309 50 evaluator """rankbased""" +309 51 dataset """kinships""" +309 51 model """hole""" +309 51 loss """nssa""" +309 51 regularizer """no""" +309 51 optimizer """adadelta""" +309 51 training_loop """owa""" +309 51 negative_sampler """basic""" +309 51 evaluator """rankbased""" +309 52 dataset """kinships""" +309 52 model """hole""" +309 52 loss """nssa""" +309 52 regularizer """no""" +309 52 optimizer """adadelta""" +309 52 training_loop """owa""" +309 52 negative_sampler """basic""" +309 52 evaluator """rankbased""" +309 53 dataset """kinships""" +309 53 model """hole""" +309 53 loss """nssa""" +309 53 regularizer """no""" +309 53 optimizer """adadelta""" +309 53 training_loop """owa""" +309 53 negative_sampler """basic""" +309 53 evaluator """rankbased""" +309 54 dataset """kinships""" +309 54 model """hole""" +309 54 loss """nssa""" +309 54 regularizer """no""" +309 54 optimizer """adadelta""" +309 54 training_loop """owa""" +309 54 negative_sampler """basic""" +309 54 evaluator """rankbased""" +309 55 dataset """kinships""" +309 55 model """hole""" +309 55 loss """nssa""" +309 55 regularizer """no""" +309 55 optimizer """adadelta""" +309 55 training_loop """owa""" +309 55 negative_sampler """basic""" +309 55 evaluator """rankbased""" +309 56 dataset """kinships""" +309 56 model """hole""" +309 56 loss """nssa""" +309 56 regularizer """no""" +309 56 optimizer """adadelta""" +309 56 training_loop """owa""" +309 56 negative_sampler """basic""" +309 56 evaluator """rankbased""" +309 57 dataset """kinships""" +309 57 model """hole""" +309 57 loss """nssa""" +309 57 regularizer """no""" +309 57 optimizer """adadelta""" +309 57 training_loop """owa""" +309 57 negative_sampler """basic""" +309 57 evaluator """rankbased""" +309 58 dataset """kinships""" +309 58 model """hole""" +309 58 loss """nssa""" +309 58 regularizer """no""" +309 58 optimizer """adadelta""" +309 58 training_loop """owa""" +309 58 negative_sampler """basic""" +309 58 evaluator """rankbased""" +309 59 dataset """kinships""" +309 59 model """hole""" +309 59 loss """nssa""" +309 59 regularizer """no""" +309 59 optimizer """adadelta""" +309 59 training_loop """owa""" +309 59 negative_sampler """basic""" +309 59 evaluator """rankbased""" +309 60 dataset """kinships""" +309 60 model """hole""" +309 60 loss """nssa""" +309 60 regularizer """no""" +309 60 optimizer """adadelta""" +309 60 training_loop """owa""" +309 60 negative_sampler """basic""" +309 60 evaluator """rankbased""" +309 61 dataset """kinships""" +309 61 model """hole""" +309 61 loss """nssa""" +309 61 regularizer """no""" +309 61 optimizer """adadelta""" +309 61 training_loop """owa""" +309 61 negative_sampler """basic""" +309 61 evaluator """rankbased""" +309 62 dataset """kinships""" +309 62 model """hole""" +309 62 loss """nssa""" +309 62 regularizer """no""" +309 62 optimizer """adadelta""" +309 62 training_loop """owa""" +309 62 negative_sampler """basic""" +309 62 evaluator """rankbased""" +309 63 dataset """kinships""" +309 63 model """hole""" +309 63 loss """nssa""" +309 63 regularizer """no""" +309 63 optimizer """adadelta""" +309 63 training_loop """owa""" +309 63 negative_sampler """basic""" +309 63 evaluator """rankbased""" +309 64 dataset """kinships""" +309 64 model """hole""" +309 64 loss """nssa""" +309 64 regularizer """no""" +309 64 optimizer """adadelta""" +309 64 training_loop """owa""" +309 64 negative_sampler """basic""" +309 64 evaluator """rankbased""" +309 65 dataset """kinships""" +309 65 model """hole""" +309 65 loss """nssa""" +309 65 regularizer """no""" +309 65 optimizer """adadelta""" +309 65 training_loop """owa""" +309 65 negative_sampler """basic""" +309 65 evaluator """rankbased""" +309 66 dataset """kinships""" +309 66 model """hole""" +309 66 loss """nssa""" +309 66 regularizer """no""" +309 66 optimizer """adadelta""" +309 66 training_loop """owa""" +309 66 negative_sampler """basic""" +309 66 evaluator """rankbased""" +309 67 dataset """kinships""" +309 67 model """hole""" +309 67 loss """nssa""" +309 67 regularizer """no""" +309 67 optimizer """adadelta""" +309 67 training_loop """owa""" +309 67 negative_sampler """basic""" +309 67 evaluator """rankbased""" +309 68 dataset """kinships""" +309 68 model """hole""" +309 68 loss """nssa""" +309 68 regularizer """no""" +309 68 optimizer """adadelta""" +309 68 training_loop """owa""" +309 68 negative_sampler """basic""" +309 68 evaluator """rankbased""" +309 69 dataset """kinships""" +309 69 model """hole""" +309 69 loss """nssa""" +309 69 regularizer """no""" +309 69 optimizer """adadelta""" +309 69 training_loop """owa""" +309 69 negative_sampler """basic""" +309 69 evaluator """rankbased""" +309 70 dataset """kinships""" +309 70 model """hole""" +309 70 loss """nssa""" +309 70 regularizer """no""" +309 70 optimizer """adadelta""" +309 70 training_loop """owa""" +309 70 negative_sampler """basic""" +309 70 evaluator """rankbased""" +309 71 dataset """kinships""" +309 71 model """hole""" +309 71 loss """nssa""" +309 71 regularizer """no""" +309 71 optimizer """adadelta""" +309 71 training_loop """owa""" +309 71 negative_sampler """basic""" +309 71 evaluator """rankbased""" +309 72 dataset """kinships""" +309 72 model """hole""" +309 72 loss """nssa""" +309 72 regularizer """no""" +309 72 optimizer """adadelta""" +309 72 training_loop """owa""" +309 72 negative_sampler """basic""" +309 72 evaluator """rankbased""" +309 73 dataset """kinships""" +309 73 model """hole""" +309 73 loss """nssa""" +309 73 regularizer """no""" +309 73 optimizer """adadelta""" +309 73 training_loop """owa""" +309 73 negative_sampler """basic""" +309 73 evaluator """rankbased""" +309 74 dataset """kinships""" +309 74 model """hole""" +309 74 loss """nssa""" +309 74 regularizer """no""" +309 74 optimizer """adadelta""" +309 74 training_loop """owa""" +309 74 negative_sampler """basic""" +309 74 evaluator """rankbased""" +309 75 dataset """kinships""" +309 75 model """hole""" +309 75 loss """nssa""" +309 75 regularizer """no""" +309 75 optimizer """adadelta""" +309 75 training_loop """owa""" +309 75 negative_sampler """basic""" +309 75 evaluator """rankbased""" +309 76 dataset """kinships""" +309 76 model """hole""" +309 76 loss """nssa""" +309 76 regularizer """no""" +309 76 optimizer """adadelta""" +309 76 training_loop """owa""" +309 76 negative_sampler """basic""" +309 76 evaluator """rankbased""" +309 77 dataset """kinships""" +309 77 model """hole""" +309 77 loss """nssa""" +309 77 regularizer """no""" +309 77 optimizer """adadelta""" +309 77 training_loop """owa""" +309 77 negative_sampler """basic""" +309 77 evaluator """rankbased""" +309 78 dataset """kinships""" +309 78 model """hole""" +309 78 loss """nssa""" +309 78 regularizer """no""" +309 78 optimizer """adadelta""" +309 78 training_loop """owa""" +309 78 negative_sampler """basic""" +309 78 evaluator """rankbased""" +309 79 dataset """kinships""" +309 79 model """hole""" +309 79 loss """nssa""" +309 79 regularizer """no""" +309 79 optimizer """adadelta""" +309 79 training_loop """owa""" +309 79 negative_sampler """basic""" +309 79 evaluator """rankbased""" +309 80 dataset """kinships""" +309 80 model """hole""" +309 80 loss """nssa""" +309 80 regularizer """no""" +309 80 optimizer """adadelta""" +309 80 training_loop """owa""" +309 80 negative_sampler """basic""" +309 80 evaluator """rankbased""" +309 81 dataset """kinships""" +309 81 model """hole""" +309 81 loss """nssa""" +309 81 regularizer """no""" +309 81 optimizer """adadelta""" +309 81 training_loop """owa""" +309 81 negative_sampler """basic""" +309 81 evaluator """rankbased""" +309 82 dataset """kinships""" +309 82 model """hole""" +309 82 loss """nssa""" +309 82 regularizer """no""" +309 82 optimizer """adadelta""" +309 82 training_loop """owa""" +309 82 negative_sampler """basic""" +309 82 evaluator """rankbased""" +309 83 dataset """kinships""" +309 83 model """hole""" +309 83 loss """nssa""" +309 83 regularizer """no""" +309 83 optimizer """adadelta""" +309 83 training_loop """owa""" +309 83 negative_sampler """basic""" +309 83 evaluator """rankbased""" +309 84 dataset """kinships""" +309 84 model """hole""" +309 84 loss """nssa""" +309 84 regularizer """no""" +309 84 optimizer """adadelta""" +309 84 training_loop """owa""" +309 84 negative_sampler """basic""" +309 84 evaluator """rankbased""" +309 85 dataset """kinships""" +309 85 model """hole""" +309 85 loss """nssa""" +309 85 regularizer """no""" +309 85 optimizer """adadelta""" +309 85 training_loop """owa""" +309 85 negative_sampler """basic""" +309 85 evaluator """rankbased""" +309 86 dataset """kinships""" +309 86 model """hole""" +309 86 loss """nssa""" +309 86 regularizer """no""" +309 86 optimizer """adadelta""" +309 86 training_loop """owa""" +309 86 negative_sampler """basic""" +309 86 evaluator """rankbased""" +309 87 dataset """kinships""" +309 87 model """hole""" +309 87 loss """nssa""" +309 87 regularizer """no""" +309 87 optimizer """adadelta""" +309 87 training_loop """owa""" +309 87 negative_sampler """basic""" +309 87 evaluator """rankbased""" +309 88 dataset """kinships""" +309 88 model """hole""" +309 88 loss """nssa""" +309 88 regularizer """no""" +309 88 optimizer """adadelta""" +309 88 training_loop """owa""" +309 88 negative_sampler """basic""" +309 88 evaluator """rankbased""" +309 89 dataset """kinships""" +309 89 model """hole""" +309 89 loss """nssa""" +309 89 regularizer """no""" +309 89 optimizer """adadelta""" +309 89 training_loop """owa""" +309 89 negative_sampler """basic""" +309 89 evaluator """rankbased""" +309 90 dataset """kinships""" +309 90 model """hole""" +309 90 loss """nssa""" +309 90 regularizer """no""" +309 90 optimizer """adadelta""" +309 90 training_loop """owa""" +309 90 negative_sampler """basic""" +309 90 evaluator """rankbased""" +309 91 dataset """kinships""" +309 91 model """hole""" +309 91 loss """nssa""" +309 91 regularizer """no""" +309 91 optimizer """adadelta""" +309 91 training_loop """owa""" +309 91 negative_sampler """basic""" +309 91 evaluator """rankbased""" +309 92 dataset """kinships""" +309 92 model """hole""" +309 92 loss """nssa""" +309 92 regularizer """no""" +309 92 optimizer """adadelta""" +309 92 training_loop """owa""" +309 92 negative_sampler """basic""" +309 92 evaluator """rankbased""" +309 93 dataset """kinships""" +309 93 model """hole""" +309 93 loss """nssa""" +309 93 regularizer """no""" +309 93 optimizer """adadelta""" +309 93 training_loop """owa""" +309 93 negative_sampler """basic""" +309 93 evaluator """rankbased""" +309 94 dataset """kinships""" +309 94 model """hole""" +309 94 loss """nssa""" +309 94 regularizer """no""" +309 94 optimizer """adadelta""" +309 94 training_loop """owa""" +309 94 negative_sampler """basic""" +309 94 evaluator """rankbased""" +309 95 dataset """kinships""" +309 95 model """hole""" +309 95 loss """nssa""" +309 95 regularizer """no""" +309 95 optimizer """adadelta""" +309 95 training_loop """owa""" +309 95 negative_sampler """basic""" +309 95 evaluator """rankbased""" +309 96 dataset """kinships""" +309 96 model """hole""" +309 96 loss """nssa""" +309 96 regularizer """no""" +309 96 optimizer """adadelta""" +309 96 training_loop """owa""" +309 96 negative_sampler """basic""" +309 96 evaluator """rankbased""" +309 97 dataset """kinships""" +309 97 model """hole""" +309 97 loss """nssa""" +309 97 regularizer """no""" +309 97 optimizer """adadelta""" +309 97 training_loop """owa""" +309 97 negative_sampler """basic""" +309 97 evaluator """rankbased""" +309 98 dataset """kinships""" +309 98 model """hole""" +309 98 loss """nssa""" +309 98 regularizer """no""" +309 98 optimizer """adadelta""" +309 98 training_loop """owa""" +309 98 negative_sampler """basic""" +309 98 evaluator """rankbased""" +309 99 dataset """kinships""" +309 99 model """hole""" +309 99 loss """nssa""" +309 99 regularizer """no""" +309 99 optimizer """adadelta""" +309 99 training_loop """owa""" +309 99 negative_sampler """basic""" +309 99 evaluator """rankbased""" +309 100 dataset """kinships""" +309 100 model """hole""" +309 100 loss """nssa""" +309 100 regularizer """no""" +309 100 optimizer """adadelta""" +309 100 training_loop """owa""" +309 100 negative_sampler """basic""" +309 100 evaluator """rankbased""" +310 1 model.embedding_dim 0.0 +310 1 loss.margin 7.743873047501852 +310 1 negative_sampler.num_negs_per_pos 61.0 +310 1 training.batch_size 2.0 +310 2 model.embedding_dim 1.0 +310 2 loss.margin 5.76868136891549 +310 2 negative_sampler.num_negs_per_pos 75.0 +310 2 training.batch_size 0.0 +310 3 model.embedding_dim 1.0 +310 3 loss.margin 2.3296383286442626 +310 3 negative_sampler.num_negs_per_pos 60.0 +310 3 training.batch_size 1.0 +310 4 model.embedding_dim 0.0 +310 4 loss.margin 6.118000963747405 +310 4 negative_sampler.num_negs_per_pos 10.0 +310 4 training.batch_size 1.0 +310 5 model.embedding_dim 2.0 +310 5 loss.margin 1.0833239410033717 +310 5 negative_sampler.num_negs_per_pos 66.0 +310 5 training.batch_size 1.0 +310 6 model.embedding_dim 2.0 +310 6 loss.margin 8.003021490483349 +310 6 negative_sampler.num_negs_per_pos 36.0 +310 6 training.batch_size 1.0 +310 7 model.embedding_dim 2.0 +310 7 loss.margin 1.2123669710501346 +310 7 negative_sampler.num_negs_per_pos 70.0 +310 7 training.batch_size 0.0 +310 8 model.embedding_dim 2.0 +310 8 loss.margin 3.706416685423414 +310 8 negative_sampler.num_negs_per_pos 99.0 +310 8 training.batch_size 2.0 +310 9 model.embedding_dim 0.0 +310 9 loss.margin 8.353613453030059 +310 9 negative_sampler.num_negs_per_pos 7.0 +310 9 training.batch_size 2.0 +310 10 model.embedding_dim 0.0 +310 10 loss.margin 9.556100009433077 +310 10 negative_sampler.num_negs_per_pos 59.0 +310 10 training.batch_size 0.0 +310 11 model.embedding_dim 1.0 +310 11 loss.margin 3.875471046852038 +310 11 negative_sampler.num_negs_per_pos 63.0 +310 11 training.batch_size 1.0 +310 12 model.embedding_dim 1.0 +310 12 loss.margin 6.987651295947371 +310 12 negative_sampler.num_negs_per_pos 36.0 +310 12 training.batch_size 0.0 +310 13 model.embedding_dim 1.0 +310 13 loss.margin 8.955388908323426 +310 13 negative_sampler.num_negs_per_pos 65.0 +310 13 training.batch_size 2.0 +310 14 model.embedding_dim 0.0 +310 14 loss.margin 5.764481250440461 +310 14 negative_sampler.num_negs_per_pos 77.0 +310 14 training.batch_size 0.0 +310 15 model.embedding_dim 0.0 +310 15 loss.margin 7.428168434398377 +310 15 negative_sampler.num_negs_per_pos 31.0 +310 15 training.batch_size 2.0 +310 16 model.embedding_dim 0.0 +310 16 loss.margin 8.76860774530974 +310 16 negative_sampler.num_negs_per_pos 63.0 +310 16 training.batch_size 2.0 +310 17 model.embedding_dim 1.0 +310 17 loss.margin 6.200101650668544 +310 17 negative_sampler.num_negs_per_pos 89.0 +310 17 training.batch_size 1.0 +310 18 model.embedding_dim 1.0 +310 18 loss.margin 0.5617010369013629 +310 18 negative_sampler.num_negs_per_pos 72.0 +310 18 training.batch_size 0.0 +310 19 model.embedding_dim 1.0 +310 19 loss.margin 4.250474469322709 +310 19 negative_sampler.num_negs_per_pos 98.0 +310 19 training.batch_size 1.0 +310 20 model.embedding_dim 0.0 +310 20 loss.margin 3.65220582156163 +310 20 negative_sampler.num_negs_per_pos 73.0 +310 20 training.batch_size 2.0 +310 21 model.embedding_dim 1.0 +310 21 loss.margin 5.402746415220984 +310 21 negative_sampler.num_negs_per_pos 79.0 +310 21 training.batch_size 0.0 +310 22 model.embedding_dim 0.0 +310 22 loss.margin 4.066097966275042 +310 22 negative_sampler.num_negs_per_pos 36.0 +310 22 training.batch_size 1.0 +310 23 model.embedding_dim 0.0 +310 23 loss.margin 6.833840618734057 +310 23 negative_sampler.num_negs_per_pos 54.0 +310 23 training.batch_size 0.0 +310 24 model.embedding_dim 0.0 +310 24 loss.margin 3.024020342623681 +310 24 negative_sampler.num_negs_per_pos 99.0 +310 24 training.batch_size 0.0 +310 25 model.embedding_dim 1.0 +310 25 loss.margin 2.6921379326863892 +310 25 negative_sampler.num_negs_per_pos 49.0 +310 25 training.batch_size 1.0 +310 26 model.embedding_dim 0.0 +310 26 loss.margin 4.1030708197059775 +310 26 negative_sampler.num_negs_per_pos 11.0 +310 26 training.batch_size 2.0 +310 27 model.embedding_dim 0.0 +310 27 loss.margin 1.0411007349013417 +310 27 negative_sampler.num_negs_per_pos 94.0 +310 27 training.batch_size 0.0 +310 28 model.embedding_dim 1.0 +310 28 loss.margin 8.310203725433965 +310 28 negative_sampler.num_negs_per_pos 68.0 +310 28 training.batch_size 0.0 +310 29 model.embedding_dim 0.0 +310 29 loss.margin 5.78442816661071 +310 29 negative_sampler.num_negs_per_pos 75.0 +310 29 training.batch_size 0.0 +310 30 model.embedding_dim 0.0 +310 30 loss.margin 8.230111029770804 +310 30 negative_sampler.num_negs_per_pos 51.0 +310 30 training.batch_size 0.0 +310 31 model.embedding_dim 1.0 +310 31 loss.margin 3.2843271198459845 +310 31 negative_sampler.num_negs_per_pos 27.0 +310 31 training.batch_size 2.0 +310 32 model.embedding_dim 2.0 +310 32 loss.margin 2.2516968375056665 +310 32 negative_sampler.num_negs_per_pos 48.0 +310 32 training.batch_size 0.0 +310 33 model.embedding_dim 2.0 +310 33 loss.margin 3.6505250317654525 +310 33 negative_sampler.num_negs_per_pos 32.0 +310 33 training.batch_size 0.0 +310 34 model.embedding_dim 1.0 +310 34 loss.margin 8.556857444138805 +310 34 negative_sampler.num_negs_per_pos 64.0 +310 34 training.batch_size 0.0 +310 35 model.embedding_dim 2.0 +310 35 loss.margin 2.585590462304532 +310 35 negative_sampler.num_negs_per_pos 10.0 +310 35 training.batch_size 2.0 +310 36 model.embedding_dim 2.0 +310 36 loss.margin 5.236081407971181 +310 36 negative_sampler.num_negs_per_pos 17.0 +310 36 training.batch_size 1.0 +310 37 model.embedding_dim 2.0 +310 37 loss.margin 6.30377190484188 +310 37 negative_sampler.num_negs_per_pos 98.0 +310 37 training.batch_size 2.0 +310 38 model.embedding_dim 1.0 +310 38 loss.margin 2.6023805645196454 +310 38 negative_sampler.num_negs_per_pos 39.0 +310 38 training.batch_size 1.0 +310 39 model.embedding_dim 2.0 +310 39 loss.margin 6.273069113577463 +310 39 negative_sampler.num_negs_per_pos 33.0 +310 39 training.batch_size 1.0 +310 40 model.embedding_dim 2.0 +310 40 loss.margin 7.160537481131493 +310 40 negative_sampler.num_negs_per_pos 70.0 +310 40 training.batch_size 1.0 +310 41 model.embedding_dim 1.0 +310 41 loss.margin 6.708146582175713 +310 41 negative_sampler.num_negs_per_pos 51.0 +310 41 training.batch_size 0.0 +310 42 model.embedding_dim 1.0 +310 42 loss.margin 7.849603962343821 +310 42 negative_sampler.num_negs_per_pos 11.0 +310 42 training.batch_size 1.0 +310 43 model.embedding_dim 2.0 +310 43 loss.margin 2.3312401964762586 +310 43 negative_sampler.num_negs_per_pos 62.0 +310 43 training.batch_size 1.0 +310 44 model.embedding_dim 0.0 +310 44 loss.margin 7.796474808801776 +310 44 negative_sampler.num_negs_per_pos 15.0 +310 44 training.batch_size 2.0 +310 45 model.embedding_dim 2.0 +310 45 loss.margin 4.44489054631119 +310 45 negative_sampler.num_negs_per_pos 97.0 +310 45 training.batch_size 2.0 +310 46 model.embedding_dim 0.0 +310 46 loss.margin 2.8621797882932185 +310 46 negative_sampler.num_negs_per_pos 20.0 +310 46 training.batch_size 0.0 +310 47 model.embedding_dim 1.0 +310 47 loss.margin 0.8095872177791934 +310 47 negative_sampler.num_negs_per_pos 3.0 +310 47 training.batch_size 2.0 +310 48 model.embedding_dim 2.0 +310 48 loss.margin 1.8982220221212427 +310 48 negative_sampler.num_negs_per_pos 61.0 +310 48 training.batch_size 0.0 +310 49 model.embedding_dim 0.0 +310 49 loss.margin 7.960129149889315 +310 49 negative_sampler.num_negs_per_pos 91.0 +310 49 training.batch_size 0.0 +310 50 model.embedding_dim 1.0 +310 50 loss.margin 9.439542268814925 +310 50 negative_sampler.num_negs_per_pos 64.0 +310 50 training.batch_size 1.0 +310 51 model.embedding_dim 1.0 +310 51 loss.margin 5.363103899592624 +310 51 negative_sampler.num_negs_per_pos 22.0 +310 51 training.batch_size 1.0 +310 52 model.embedding_dim 2.0 +310 52 loss.margin 1.857450644224072 +310 52 negative_sampler.num_negs_per_pos 48.0 +310 52 training.batch_size 2.0 +310 53 model.embedding_dim 0.0 +310 53 loss.margin 3.869694149141293 +310 53 negative_sampler.num_negs_per_pos 40.0 +310 53 training.batch_size 2.0 +310 54 model.embedding_dim 1.0 +310 54 loss.margin 8.322536168693048 +310 54 negative_sampler.num_negs_per_pos 94.0 +310 54 training.batch_size 1.0 +310 55 model.embedding_dim 2.0 +310 55 loss.margin 4.2498652200128415 +310 55 negative_sampler.num_negs_per_pos 59.0 +310 55 training.batch_size 2.0 +310 56 model.embedding_dim 1.0 +310 56 loss.margin 9.85365326798265 +310 56 negative_sampler.num_negs_per_pos 28.0 +310 56 training.batch_size 1.0 +310 57 model.embedding_dim 2.0 +310 57 loss.margin 6.239117722167512 +310 57 negative_sampler.num_negs_per_pos 45.0 +310 57 training.batch_size 0.0 +310 58 model.embedding_dim 1.0 +310 58 loss.margin 7.550667489192416 +310 58 negative_sampler.num_negs_per_pos 3.0 +310 58 training.batch_size 1.0 +310 59 model.embedding_dim 2.0 +310 59 loss.margin 1.1494448801123793 +310 59 negative_sampler.num_negs_per_pos 64.0 +310 59 training.batch_size 1.0 +310 60 model.embedding_dim 0.0 +310 60 loss.margin 1.8990009430003971 +310 60 negative_sampler.num_negs_per_pos 34.0 +310 60 training.batch_size 0.0 +310 61 model.embedding_dim 1.0 +310 61 loss.margin 7.55540357145871 +310 61 negative_sampler.num_negs_per_pos 86.0 +310 61 training.batch_size 1.0 +310 62 model.embedding_dim 2.0 +310 62 loss.margin 1.1709629024598465 +310 62 negative_sampler.num_negs_per_pos 63.0 +310 62 training.batch_size 2.0 +310 63 model.embedding_dim 0.0 +310 63 loss.margin 9.895559478336745 +310 63 negative_sampler.num_negs_per_pos 21.0 +310 63 training.batch_size 0.0 +310 64 model.embedding_dim 0.0 +310 64 loss.margin 0.8451137288711619 +310 64 negative_sampler.num_negs_per_pos 53.0 +310 64 training.batch_size 2.0 +310 65 model.embedding_dim 0.0 +310 65 loss.margin 2.2897038318174925 +310 65 negative_sampler.num_negs_per_pos 62.0 +310 65 training.batch_size 1.0 +310 66 model.embedding_dim 0.0 +310 66 loss.margin 3.0493801170242514 +310 66 negative_sampler.num_negs_per_pos 5.0 +310 66 training.batch_size 1.0 +310 67 model.embedding_dim 1.0 +310 67 loss.margin 1.572452562272625 +310 67 negative_sampler.num_negs_per_pos 29.0 +310 67 training.batch_size 0.0 +310 68 model.embedding_dim 2.0 +310 68 loss.margin 4.855142733432029 +310 68 negative_sampler.num_negs_per_pos 80.0 +310 68 training.batch_size 1.0 +310 69 model.embedding_dim 2.0 +310 69 loss.margin 2.2011373950095026 +310 69 negative_sampler.num_negs_per_pos 37.0 +310 69 training.batch_size 1.0 +310 70 model.embedding_dim 1.0 +310 70 loss.margin 8.178831905573816 +310 70 negative_sampler.num_negs_per_pos 34.0 +310 70 training.batch_size 0.0 +310 71 model.embedding_dim 0.0 +310 71 loss.margin 5.913792049220065 +310 71 negative_sampler.num_negs_per_pos 26.0 +310 71 training.batch_size 0.0 +310 72 model.embedding_dim 2.0 +310 72 loss.margin 9.62254063034599 +310 72 negative_sampler.num_negs_per_pos 78.0 +310 72 training.batch_size 0.0 +310 73 model.embedding_dim 0.0 +310 73 loss.margin 3.1658854039077347 +310 73 negative_sampler.num_negs_per_pos 34.0 +310 73 training.batch_size 0.0 +310 74 model.embedding_dim 0.0 +310 74 loss.margin 5.266805411461983 +310 74 negative_sampler.num_negs_per_pos 39.0 +310 74 training.batch_size 1.0 +310 75 model.embedding_dim 1.0 +310 75 loss.margin 4.612375493939621 +310 75 negative_sampler.num_negs_per_pos 16.0 +310 75 training.batch_size 0.0 +310 76 model.embedding_dim 1.0 +310 76 loss.margin 1.7568649991807352 +310 76 negative_sampler.num_negs_per_pos 98.0 +310 76 training.batch_size 0.0 +310 77 model.embedding_dim 0.0 +310 77 loss.margin 5.495462385175597 +310 77 negative_sampler.num_negs_per_pos 86.0 +310 77 training.batch_size 2.0 +310 78 model.embedding_dim 0.0 +310 78 loss.margin 3.969449118871828 +310 78 negative_sampler.num_negs_per_pos 22.0 +310 78 training.batch_size 2.0 +310 79 model.embedding_dim 2.0 +310 79 loss.margin 3.80780586978159 +310 79 negative_sampler.num_negs_per_pos 26.0 +310 79 training.batch_size 2.0 +310 80 model.embedding_dim 2.0 +310 80 loss.margin 8.217371500697192 +310 80 negative_sampler.num_negs_per_pos 69.0 +310 80 training.batch_size 1.0 +310 81 model.embedding_dim 0.0 +310 81 loss.margin 5.8362540524510615 +310 81 negative_sampler.num_negs_per_pos 0.0 +310 81 training.batch_size 2.0 +310 82 model.embedding_dim 1.0 +310 82 loss.margin 4.073829039518644 +310 82 negative_sampler.num_negs_per_pos 83.0 +310 82 training.batch_size 2.0 +310 83 model.embedding_dim 2.0 +310 83 loss.margin 4.7604702650875925 +310 83 negative_sampler.num_negs_per_pos 95.0 +310 83 training.batch_size 2.0 +310 84 model.embedding_dim 2.0 +310 84 loss.margin 4.5803999526649 +310 84 negative_sampler.num_negs_per_pos 27.0 +310 84 training.batch_size 0.0 +310 85 model.embedding_dim 2.0 +310 85 loss.margin 9.081440833987774 +310 85 negative_sampler.num_negs_per_pos 12.0 +310 85 training.batch_size 1.0 +310 86 model.embedding_dim 0.0 +310 86 loss.margin 6.927100888291012 +310 86 negative_sampler.num_negs_per_pos 85.0 +310 86 training.batch_size 2.0 +310 87 model.embedding_dim 2.0 +310 87 loss.margin 3.4663355349138594 +310 87 negative_sampler.num_negs_per_pos 84.0 +310 87 training.batch_size 0.0 +310 88 model.embedding_dim 2.0 +310 88 loss.margin 1.5897224034729212 +310 88 negative_sampler.num_negs_per_pos 56.0 +310 88 training.batch_size 0.0 +310 89 model.embedding_dim 1.0 +310 89 loss.margin 5.417333273836687 +310 89 negative_sampler.num_negs_per_pos 65.0 +310 89 training.batch_size 0.0 +310 90 model.embedding_dim 1.0 +310 90 loss.margin 8.328380366869975 +310 90 negative_sampler.num_negs_per_pos 38.0 +310 90 training.batch_size 1.0 +310 91 model.embedding_dim 2.0 +310 91 loss.margin 6.546789445450644 +310 91 negative_sampler.num_negs_per_pos 93.0 +310 91 training.batch_size 0.0 +310 92 model.embedding_dim 2.0 +310 92 loss.margin 2.7557020754411488 +310 92 negative_sampler.num_negs_per_pos 43.0 +310 92 training.batch_size 0.0 +310 93 model.embedding_dim 0.0 +310 93 loss.margin 5.269882152854244 +310 93 negative_sampler.num_negs_per_pos 92.0 +310 93 training.batch_size 2.0 +310 94 model.embedding_dim 0.0 +310 94 loss.margin 0.643922747977745 +310 94 negative_sampler.num_negs_per_pos 1.0 +310 94 training.batch_size 2.0 +310 95 model.embedding_dim 2.0 +310 95 loss.margin 0.5231809335004203 +310 95 negative_sampler.num_negs_per_pos 15.0 +310 95 training.batch_size 2.0 +310 96 model.embedding_dim 2.0 +310 96 loss.margin 8.264033579288586 +310 96 negative_sampler.num_negs_per_pos 61.0 +310 96 training.batch_size 2.0 +310 97 model.embedding_dim 1.0 +310 97 loss.margin 7.315569983132545 +310 97 negative_sampler.num_negs_per_pos 26.0 +310 97 training.batch_size 1.0 +310 98 model.embedding_dim 1.0 +310 98 loss.margin 1.2005041724446825 +310 98 negative_sampler.num_negs_per_pos 16.0 +310 98 training.batch_size 0.0 +310 99 model.embedding_dim 1.0 +310 99 loss.margin 4.0077072481853975 +310 99 negative_sampler.num_negs_per_pos 56.0 +310 99 training.batch_size 0.0 +310 100 model.embedding_dim 0.0 +310 100 loss.margin 3.9756998596516144 +310 100 negative_sampler.num_negs_per_pos 41.0 +310 100 training.batch_size 2.0 +310 1 dataset """kinships""" +310 1 model """hole""" +310 1 loss """marginranking""" +310 1 regularizer """no""" +310 1 optimizer """adadelta""" +310 1 training_loop """owa""" +310 1 negative_sampler """basic""" +310 1 evaluator """rankbased""" +310 2 dataset """kinships""" +310 2 model """hole""" +310 2 loss """marginranking""" +310 2 regularizer """no""" +310 2 optimizer """adadelta""" +310 2 training_loop """owa""" +310 2 negative_sampler """basic""" +310 2 evaluator """rankbased""" +310 3 dataset """kinships""" +310 3 model """hole""" +310 3 loss """marginranking""" +310 3 regularizer """no""" +310 3 optimizer """adadelta""" +310 3 training_loop """owa""" +310 3 negative_sampler """basic""" +310 3 evaluator """rankbased""" +310 4 dataset """kinships""" +310 4 model """hole""" +310 4 loss """marginranking""" +310 4 regularizer """no""" +310 4 optimizer """adadelta""" +310 4 training_loop """owa""" +310 4 negative_sampler """basic""" +310 4 evaluator """rankbased""" +310 5 dataset """kinships""" +310 5 model """hole""" +310 5 loss """marginranking""" +310 5 regularizer """no""" +310 5 optimizer """adadelta""" +310 5 training_loop """owa""" +310 5 negative_sampler """basic""" +310 5 evaluator """rankbased""" +310 6 dataset """kinships""" +310 6 model """hole""" +310 6 loss """marginranking""" +310 6 regularizer """no""" +310 6 optimizer """adadelta""" +310 6 training_loop """owa""" +310 6 negative_sampler """basic""" +310 6 evaluator """rankbased""" +310 7 dataset """kinships""" +310 7 model """hole""" +310 7 loss """marginranking""" +310 7 regularizer """no""" +310 7 optimizer """adadelta""" +310 7 training_loop """owa""" +310 7 negative_sampler """basic""" +310 7 evaluator """rankbased""" +310 8 dataset """kinships""" +310 8 model """hole""" +310 8 loss """marginranking""" +310 8 regularizer """no""" +310 8 optimizer """adadelta""" +310 8 training_loop """owa""" +310 8 negative_sampler """basic""" +310 8 evaluator """rankbased""" +310 9 dataset """kinships""" +310 9 model """hole""" +310 9 loss """marginranking""" +310 9 regularizer """no""" +310 9 optimizer """adadelta""" +310 9 training_loop """owa""" +310 9 negative_sampler """basic""" +310 9 evaluator """rankbased""" +310 10 dataset """kinships""" +310 10 model """hole""" +310 10 loss """marginranking""" +310 10 regularizer """no""" +310 10 optimizer """adadelta""" +310 10 training_loop """owa""" +310 10 negative_sampler """basic""" +310 10 evaluator """rankbased""" +310 11 dataset """kinships""" +310 11 model """hole""" +310 11 loss """marginranking""" +310 11 regularizer """no""" +310 11 optimizer """adadelta""" +310 11 training_loop """owa""" +310 11 negative_sampler """basic""" +310 11 evaluator """rankbased""" +310 12 dataset """kinships""" +310 12 model """hole""" +310 12 loss """marginranking""" +310 12 regularizer """no""" +310 12 optimizer """adadelta""" +310 12 training_loop """owa""" +310 12 negative_sampler """basic""" +310 12 evaluator """rankbased""" +310 13 dataset """kinships""" +310 13 model """hole""" +310 13 loss """marginranking""" +310 13 regularizer """no""" +310 13 optimizer """adadelta""" +310 13 training_loop """owa""" +310 13 negative_sampler """basic""" +310 13 evaluator """rankbased""" +310 14 dataset """kinships""" +310 14 model """hole""" +310 14 loss """marginranking""" +310 14 regularizer """no""" +310 14 optimizer """adadelta""" +310 14 training_loop """owa""" +310 14 negative_sampler """basic""" +310 14 evaluator """rankbased""" +310 15 dataset """kinships""" +310 15 model """hole""" +310 15 loss """marginranking""" +310 15 regularizer """no""" +310 15 optimizer """adadelta""" +310 15 training_loop """owa""" +310 15 negative_sampler """basic""" +310 15 evaluator """rankbased""" +310 16 dataset """kinships""" +310 16 model """hole""" +310 16 loss """marginranking""" +310 16 regularizer """no""" +310 16 optimizer """adadelta""" +310 16 training_loop """owa""" +310 16 negative_sampler """basic""" +310 16 evaluator """rankbased""" +310 17 dataset """kinships""" +310 17 model """hole""" +310 17 loss """marginranking""" +310 17 regularizer """no""" +310 17 optimizer """adadelta""" +310 17 training_loop """owa""" +310 17 negative_sampler """basic""" +310 17 evaluator """rankbased""" +310 18 dataset """kinships""" +310 18 model """hole""" +310 18 loss """marginranking""" +310 18 regularizer """no""" +310 18 optimizer """adadelta""" +310 18 training_loop """owa""" +310 18 negative_sampler """basic""" +310 18 evaluator """rankbased""" +310 19 dataset """kinships""" +310 19 model """hole""" +310 19 loss """marginranking""" +310 19 regularizer """no""" +310 19 optimizer """adadelta""" +310 19 training_loop """owa""" +310 19 negative_sampler """basic""" +310 19 evaluator """rankbased""" +310 20 dataset """kinships""" +310 20 model """hole""" +310 20 loss """marginranking""" +310 20 regularizer """no""" +310 20 optimizer """adadelta""" +310 20 training_loop """owa""" +310 20 negative_sampler """basic""" +310 20 evaluator """rankbased""" +310 21 dataset """kinships""" +310 21 model """hole""" +310 21 loss """marginranking""" +310 21 regularizer """no""" +310 21 optimizer """adadelta""" +310 21 training_loop """owa""" +310 21 negative_sampler """basic""" +310 21 evaluator """rankbased""" +310 22 dataset """kinships""" +310 22 model """hole""" +310 22 loss """marginranking""" +310 22 regularizer """no""" +310 22 optimizer """adadelta""" +310 22 training_loop """owa""" +310 22 negative_sampler """basic""" +310 22 evaluator """rankbased""" +310 23 dataset """kinships""" +310 23 model """hole""" +310 23 loss """marginranking""" +310 23 regularizer """no""" +310 23 optimizer """adadelta""" +310 23 training_loop """owa""" +310 23 negative_sampler """basic""" +310 23 evaluator """rankbased""" +310 24 dataset """kinships""" +310 24 model """hole""" +310 24 loss """marginranking""" +310 24 regularizer """no""" +310 24 optimizer """adadelta""" +310 24 training_loop """owa""" +310 24 negative_sampler """basic""" +310 24 evaluator """rankbased""" +310 25 dataset """kinships""" +310 25 model """hole""" +310 25 loss """marginranking""" +310 25 regularizer """no""" +310 25 optimizer """adadelta""" +310 25 training_loop """owa""" +310 25 negative_sampler """basic""" +310 25 evaluator """rankbased""" +310 26 dataset """kinships""" +310 26 model """hole""" +310 26 loss """marginranking""" +310 26 regularizer """no""" +310 26 optimizer """adadelta""" +310 26 training_loop """owa""" +310 26 negative_sampler """basic""" +310 26 evaluator """rankbased""" +310 27 dataset """kinships""" +310 27 model """hole""" +310 27 loss """marginranking""" +310 27 regularizer """no""" +310 27 optimizer """adadelta""" +310 27 training_loop """owa""" +310 27 negative_sampler """basic""" +310 27 evaluator """rankbased""" +310 28 dataset """kinships""" +310 28 model """hole""" +310 28 loss """marginranking""" +310 28 regularizer """no""" +310 28 optimizer """adadelta""" +310 28 training_loop """owa""" +310 28 negative_sampler """basic""" +310 28 evaluator """rankbased""" +310 29 dataset """kinships""" +310 29 model """hole""" +310 29 loss """marginranking""" +310 29 regularizer """no""" +310 29 optimizer """adadelta""" +310 29 training_loop """owa""" +310 29 negative_sampler """basic""" +310 29 evaluator """rankbased""" +310 30 dataset """kinships""" +310 30 model """hole""" +310 30 loss """marginranking""" +310 30 regularizer """no""" +310 30 optimizer """adadelta""" +310 30 training_loop """owa""" +310 30 negative_sampler """basic""" +310 30 evaluator """rankbased""" +310 31 dataset """kinships""" +310 31 model """hole""" +310 31 loss """marginranking""" +310 31 regularizer """no""" +310 31 optimizer """adadelta""" +310 31 training_loop """owa""" +310 31 negative_sampler """basic""" +310 31 evaluator """rankbased""" +310 32 dataset """kinships""" +310 32 model """hole""" +310 32 loss """marginranking""" +310 32 regularizer """no""" +310 32 optimizer """adadelta""" +310 32 training_loop """owa""" +310 32 negative_sampler """basic""" +310 32 evaluator """rankbased""" +310 33 dataset """kinships""" +310 33 model """hole""" +310 33 loss """marginranking""" +310 33 regularizer """no""" +310 33 optimizer """adadelta""" +310 33 training_loop """owa""" +310 33 negative_sampler """basic""" +310 33 evaluator """rankbased""" +310 34 dataset """kinships""" +310 34 model """hole""" +310 34 loss """marginranking""" +310 34 regularizer """no""" +310 34 optimizer """adadelta""" +310 34 training_loop """owa""" +310 34 negative_sampler """basic""" +310 34 evaluator """rankbased""" +310 35 dataset """kinships""" +310 35 model """hole""" +310 35 loss """marginranking""" +310 35 regularizer """no""" +310 35 optimizer """adadelta""" +310 35 training_loop """owa""" +310 35 negative_sampler """basic""" +310 35 evaluator """rankbased""" +310 36 dataset """kinships""" +310 36 model """hole""" +310 36 loss """marginranking""" +310 36 regularizer """no""" +310 36 optimizer """adadelta""" +310 36 training_loop """owa""" +310 36 negative_sampler """basic""" +310 36 evaluator """rankbased""" +310 37 dataset """kinships""" +310 37 model """hole""" +310 37 loss """marginranking""" +310 37 regularizer """no""" +310 37 optimizer """adadelta""" +310 37 training_loop """owa""" +310 37 negative_sampler """basic""" +310 37 evaluator """rankbased""" +310 38 dataset """kinships""" +310 38 model """hole""" +310 38 loss """marginranking""" +310 38 regularizer """no""" +310 38 optimizer """adadelta""" +310 38 training_loop """owa""" +310 38 negative_sampler """basic""" +310 38 evaluator """rankbased""" +310 39 dataset """kinships""" +310 39 model """hole""" +310 39 loss """marginranking""" +310 39 regularizer """no""" +310 39 optimizer """adadelta""" +310 39 training_loop """owa""" +310 39 negative_sampler """basic""" +310 39 evaluator """rankbased""" +310 40 dataset """kinships""" +310 40 model """hole""" +310 40 loss """marginranking""" +310 40 regularizer """no""" +310 40 optimizer """adadelta""" +310 40 training_loop """owa""" +310 40 negative_sampler """basic""" +310 40 evaluator """rankbased""" +310 41 dataset """kinships""" +310 41 model """hole""" +310 41 loss """marginranking""" +310 41 regularizer """no""" +310 41 optimizer """adadelta""" +310 41 training_loop """owa""" +310 41 negative_sampler """basic""" +310 41 evaluator """rankbased""" +310 42 dataset """kinships""" +310 42 model """hole""" +310 42 loss """marginranking""" +310 42 regularizer """no""" +310 42 optimizer """adadelta""" +310 42 training_loop """owa""" +310 42 negative_sampler """basic""" +310 42 evaluator """rankbased""" +310 43 dataset """kinships""" +310 43 model """hole""" +310 43 loss """marginranking""" +310 43 regularizer """no""" +310 43 optimizer """adadelta""" +310 43 training_loop """owa""" +310 43 negative_sampler """basic""" +310 43 evaluator """rankbased""" +310 44 dataset """kinships""" +310 44 model """hole""" +310 44 loss """marginranking""" +310 44 regularizer """no""" +310 44 optimizer """adadelta""" +310 44 training_loop """owa""" +310 44 negative_sampler """basic""" +310 44 evaluator """rankbased""" +310 45 dataset """kinships""" +310 45 model """hole""" +310 45 loss """marginranking""" +310 45 regularizer """no""" +310 45 optimizer """adadelta""" +310 45 training_loop """owa""" +310 45 negative_sampler """basic""" +310 45 evaluator """rankbased""" +310 46 dataset """kinships""" +310 46 model """hole""" +310 46 loss """marginranking""" +310 46 regularizer """no""" +310 46 optimizer """adadelta""" +310 46 training_loop """owa""" +310 46 negative_sampler """basic""" +310 46 evaluator """rankbased""" +310 47 dataset """kinships""" +310 47 model """hole""" +310 47 loss """marginranking""" +310 47 regularizer """no""" +310 47 optimizer """adadelta""" +310 47 training_loop """owa""" +310 47 negative_sampler """basic""" +310 47 evaluator """rankbased""" +310 48 dataset """kinships""" +310 48 model """hole""" +310 48 loss """marginranking""" +310 48 regularizer """no""" +310 48 optimizer """adadelta""" +310 48 training_loop """owa""" +310 48 negative_sampler """basic""" +310 48 evaluator """rankbased""" +310 49 dataset """kinships""" +310 49 model """hole""" +310 49 loss """marginranking""" +310 49 regularizer """no""" +310 49 optimizer """adadelta""" +310 49 training_loop """owa""" +310 49 negative_sampler """basic""" +310 49 evaluator """rankbased""" +310 50 dataset """kinships""" +310 50 model """hole""" +310 50 loss """marginranking""" +310 50 regularizer """no""" +310 50 optimizer """adadelta""" +310 50 training_loop """owa""" +310 50 negative_sampler """basic""" +310 50 evaluator """rankbased""" +310 51 dataset """kinships""" +310 51 model """hole""" +310 51 loss """marginranking""" +310 51 regularizer """no""" +310 51 optimizer """adadelta""" +310 51 training_loop """owa""" +310 51 negative_sampler """basic""" +310 51 evaluator """rankbased""" +310 52 dataset """kinships""" +310 52 model """hole""" +310 52 loss """marginranking""" +310 52 regularizer """no""" +310 52 optimizer """adadelta""" +310 52 training_loop """owa""" +310 52 negative_sampler """basic""" +310 52 evaluator """rankbased""" +310 53 dataset """kinships""" +310 53 model """hole""" +310 53 loss """marginranking""" +310 53 regularizer """no""" +310 53 optimizer """adadelta""" +310 53 training_loop """owa""" +310 53 negative_sampler """basic""" +310 53 evaluator """rankbased""" +310 54 dataset """kinships""" +310 54 model """hole""" +310 54 loss """marginranking""" +310 54 regularizer """no""" +310 54 optimizer """adadelta""" +310 54 training_loop """owa""" +310 54 negative_sampler """basic""" +310 54 evaluator """rankbased""" +310 55 dataset """kinships""" +310 55 model """hole""" +310 55 loss """marginranking""" +310 55 regularizer """no""" +310 55 optimizer """adadelta""" +310 55 training_loop """owa""" +310 55 negative_sampler """basic""" +310 55 evaluator """rankbased""" +310 56 dataset """kinships""" +310 56 model """hole""" +310 56 loss """marginranking""" +310 56 regularizer """no""" +310 56 optimizer """adadelta""" +310 56 training_loop """owa""" +310 56 negative_sampler """basic""" +310 56 evaluator """rankbased""" +310 57 dataset """kinships""" +310 57 model """hole""" +310 57 loss """marginranking""" +310 57 regularizer """no""" +310 57 optimizer """adadelta""" +310 57 training_loop """owa""" +310 57 negative_sampler """basic""" +310 57 evaluator """rankbased""" +310 58 dataset """kinships""" +310 58 model """hole""" +310 58 loss """marginranking""" +310 58 regularizer """no""" +310 58 optimizer """adadelta""" +310 58 training_loop """owa""" +310 58 negative_sampler """basic""" +310 58 evaluator """rankbased""" +310 59 dataset """kinships""" +310 59 model """hole""" +310 59 loss """marginranking""" +310 59 regularizer """no""" +310 59 optimizer """adadelta""" +310 59 training_loop """owa""" +310 59 negative_sampler """basic""" +310 59 evaluator """rankbased""" +310 60 dataset """kinships""" +310 60 model """hole""" +310 60 loss """marginranking""" +310 60 regularizer """no""" +310 60 optimizer """adadelta""" +310 60 training_loop """owa""" +310 60 negative_sampler """basic""" +310 60 evaluator """rankbased""" +310 61 dataset """kinships""" +310 61 model """hole""" +310 61 loss """marginranking""" +310 61 regularizer """no""" +310 61 optimizer """adadelta""" +310 61 training_loop """owa""" +310 61 negative_sampler """basic""" +310 61 evaluator """rankbased""" +310 62 dataset """kinships""" +310 62 model """hole""" +310 62 loss """marginranking""" +310 62 regularizer """no""" +310 62 optimizer """adadelta""" +310 62 training_loop """owa""" +310 62 negative_sampler """basic""" +310 62 evaluator """rankbased""" +310 63 dataset """kinships""" +310 63 model """hole""" +310 63 loss """marginranking""" +310 63 regularizer """no""" +310 63 optimizer """adadelta""" +310 63 training_loop """owa""" +310 63 negative_sampler """basic""" +310 63 evaluator """rankbased""" +310 64 dataset """kinships""" +310 64 model """hole""" +310 64 loss """marginranking""" +310 64 regularizer """no""" +310 64 optimizer """adadelta""" +310 64 training_loop """owa""" +310 64 negative_sampler """basic""" +310 64 evaluator """rankbased""" +310 65 dataset """kinships""" +310 65 model """hole""" +310 65 loss """marginranking""" +310 65 regularizer """no""" +310 65 optimizer """adadelta""" +310 65 training_loop """owa""" +310 65 negative_sampler """basic""" +310 65 evaluator """rankbased""" +310 66 dataset """kinships""" +310 66 model """hole""" +310 66 loss """marginranking""" +310 66 regularizer """no""" +310 66 optimizer """adadelta""" +310 66 training_loop """owa""" +310 66 negative_sampler """basic""" +310 66 evaluator """rankbased""" +310 67 dataset """kinships""" +310 67 model """hole""" +310 67 loss """marginranking""" +310 67 regularizer """no""" +310 67 optimizer """adadelta""" +310 67 training_loop """owa""" +310 67 negative_sampler """basic""" +310 67 evaluator """rankbased""" +310 68 dataset """kinships""" +310 68 model """hole""" +310 68 loss """marginranking""" +310 68 regularizer """no""" +310 68 optimizer """adadelta""" +310 68 training_loop """owa""" +310 68 negative_sampler """basic""" +310 68 evaluator """rankbased""" +310 69 dataset """kinships""" +310 69 model """hole""" +310 69 loss """marginranking""" +310 69 regularizer """no""" +310 69 optimizer """adadelta""" +310 69 training_loop """owa""" +310 69 negative_sampler """basic""" +310 69 evaluator """rankbased""" +310 70 dataset """kinships""" +310 70 model """hole""" +310 70 loss """marginranking""" +310 70 regularizer """no""" +310 70 optimizer """adadelta""" +310 70 training_loop """owa""" +310 70 negative_sampler """basic""" +310 70 evaluator """rankbased""" +310 71 dataset """kinships""" +310 71 model """hole""" +310 71 loss """marginranking""" +310 71 regularizer """no""" +310 71 optimizer """adadelta""" +310 71 training_loop """owa""" +310 71 negative_sampler """basic""" +310 71 evaluator """rankbased""" +310 72 dataset """kinships""" +310 72 model """hole""" +310 72 loss """marginranking""" +310 72 regularizer """no""" +310 72 optimizer """adadelta""" +310 72 training_loop """owa""" +310 72 negative_sampler """basic""" +310 72 evaluator """rankbased""" +310 73 dataset """kinships""" +310 73 model """hole""" +310 73 loss """marginranking""" +310 73 regularizer """no""" +310 73 optimizer """adadelta""" +310 73 training_loop """owa""" +310 73 negative_sampler """basic""" +310 73 evaluator """rankbased""" +310 74 dataset """kinships""" +310 74 model """hole""" +310 74 loss """marginranking""" +310 74 regularizer """no""" +310 74 optimizer """adadelta""" +310 74 training_loop """owa""" +310 74 negative_sampler """basic""" +310 74 evaluator """rankbased""" +310 75 dataset """kinships""" +310 75 model """hole""" +310 75 loss """marginranking""" +310 75 regularizer """no""" +310 75 optimizer """adadelta""" +310 75 training_loop """owa""" +310 75 negative_sampler """basic""" +310 75 evaluator """rankbased""" +310 76 dataset """kinships""" +310 76 model """hole""" +310 76 loss """marginranking""" +310 76 regularizer """no""" +310 76 optimizer """adadelta""" +310 76 training_loop """owa""" +310 76 negative_sampler """basic""" +310 76 evaluator """rankbased""" +310 77 dataset """kinships""" +310 77 model """hole""" +310 77 loss """marginranking""" +310 77 regularizer """no""" +310 77 optimizer """adadelta""" +310 77 training_loop """owa""" +310 77 negative_sampler """basic""" +310 77 evaluator """rankbased""" +310 78 dataset """kinships""" +310 78 model """hole""" +310 78 loss """marginranking""" +310 78 regularizer """no""" +310 78 optimizer """adadelta""" +310 78 training_loop """owa""" +310 78 negative_sampler """basic""" +310 78 evaluator """rankbased""" +310 79 dataset """kinships""" +310 79 model """hole""" +310 79 loss """marginranking""" +310 79 regularizer """no""" +310 79 optimizer """adadelta""" +310 79 training_loop """owa""" +310 79 negative_sampler """basic""" +310 79 evaluator """rankbased""" +310 80 dataset """kinships""" +310 80 model """hole""" +310 80 loss """marginranking""" +310 80 regularizer """no""" +310 80 optimizer """adadelta""" +310 80 training_loop """owa""" +310 80 negative_sampler """basic""" +310 80 evaluator """rankbased""" +310 81 dataset """kinships""" +310 81 model """hole""" +310 81 loss """marginranking""" +310 81 regularizer """no""" +310 81 optimizer """adadelta""" +310 81 training_loop """owa""" +310 81 negative_sampler """basic""" +310 81 evaluator """rankbased""" +310 82 dataset """kinships""" +310 82 model """hole""" +310 82 loss """marginranking""" +310 82 regularizer """no""" +310 82 optimizer """adadelta""" +310 82 training_loop """owa""" +310 82 negative_sampler """basic""" +310 82 evaluator """rankbased""" +310 83 dataset """kinships""" +310 83 model """hole""" +310 83 loss """marginranking""" +310 83 regularizer """no""" +310 83 optimizer """adadelta""" +310 83 training_loop """owa""" +310 83 negative_sampler """basic""" +310 83 evaluator """rankbased""" +310 84 dataset """kinships""" +310 84 model """hole""" +310 84 loss """marginranking""" +310 84 regularizer """no""" +310 84 optimizer """adadelta""" +310 84 training_loop """owa""" +310 84 negative_sampler """basic""" +310 84 evaluator """rankbased""" +310 85 dataset """kinships""" +310 85 model """hole""" +310 85 loss """marginranking""" +310 85 regularizer """no""" +310 85 optimizer """adadelta""" +310 85 training_loop """owa""" +310 85 negative_sampler """basic""" +310 85 evaluator """rankbased""" +310 86 dataset """kinships""" +310 86 model """hole""" +310 86 loss """marginranking""" +310 86 regularizer """no""" +310 86 optimizer """adadelta""" +310 86 training_loop """owa""" +310 86 negative_sampler """basic""" +310 86 evaluator """rankbased""" +310 87 dataset """kinships""" +310 87 model """hole""" +310 87 loss """marginranking""" +310 87 regularizer """no""" +310 87 optimizer """adadelta""" +310 87 training_loop """owa""" +310 87 negative_sampler """basic""" +310 87 evaluator """rankbased""" +310 88 dataset """kinships""" +310 88 model """hole""" +310 88 loss """marginranking""" +310 88 regularizer """no""" +310 88 optimizer """adadelta""" +310 88 training_loop """owa""" +310 88 negative_sampler """basic""" +310 88 evaluator """rankbased""" +310 89 dataset """kinships""" +310 89 model """hole""" +310 89 loss """marginranking""" +310 89 regularizer """no""" +310 89 optimizer """adadelta""" +310 89 training_loop """owa""" +310 89 negative_sampler """basic""" +310 89 evaluator """rankbased""" +310 90 dataset """kinships""" +310 90 model """hole""" +310 90 loss """marginranking""" +310 90 regularizer """no""" +310 90 optimizer """adadelta""" +310 90 training_loop """owa""" +310 90 negative_sampler """basic""" +310 90 evaluator """rankbased""" +310 91 dataset """kinships""" +310 91 model """hole""" +310 91 loss """marginranking""" +310 91 regularizer """no""" +310 91 optimizer """adadelta""" +310 91 training_loop """owa""" +310 91 negative_sampler """basic""" +310 91 evaluator """rankbased""" +310 92 dataset """kinships""" +310 92 model """hole""" +310 92 loss """marginranking""" +310 92 regularizer """no""" +310 92 optimizer """adadelta""" +310 92 training_loop """owa""" +310 92 negative_sampler """basic""" +310 92 evaluator """rankbased""" +310 93 dataset """kinships""" +310 93 model """hole""" +310 93 loss """marginranking""" +310 93 regularizer """no""" +310 93 optimizer """adadelta""" +310 93 training_loop """owa""" +310 93 negative_sampler """basic""" +310 93 evaluator """rankbased""" +310 94 dataset """kinships""" +310 94 model """hole""" +310 94 loss """marginranking""" +310 94 regularizer """no""" +310 94 optimizer """adadelta""" +310 94 training_loop """owa""" +310 94 negative_sampler """basic""" +310 94 evaluator """rankbased""" +310 95 dataset """kinships""" +310 95 model """hole""" +310 95 loss """marginranking""" +310 95 regularizer """no""" +310 95 optimizer """adadelta""" +310 95 training_loop """owa""" +310 95 negative_sampler """basic""" +310 95 evaluator """rankbased""" +310 96 dataset """kinships""" +310 96 model """hole""" +310 96 loss """marginranking""" +310 96 regularizer """no""" +310 96 optimizer """adadelta""" +310 96 training_loop """owa""" +310 96 negative_sampler """basic""" +310 96 evaluator """rankbased""" +310 97 dataset """kinships""" +310 97 model """hole""" +310 97 loss """marginranking""" +310 97 regularizer """no""" +310 97 optimizer """adadelta""" +310 97 training_loop """owa""" +310 97 negative_sampler """basic""" +310 97 evaluator """rankbased""" +310 98 dataset """kinships""" +310 98 model """hole""" +310 98 loss """marginranking""" +310 98 regularizer """no""" +310 98 optimizer """adadelta""" +310 98 training_loop """owa""" +310 98 negative_sampler """basic""" +310 98 evaluator """rankbased""" +310 99 dataset """kinships""" +310 99 model """hole""" +310 99 loss """marginranking""" +310 99 regularizer """no""" +310 99 optimizer """adadelta""" +310 99 training_loop """owa""" +310 99 negative_sampler """basic""" +310 99 evaluator """rankbased""" +310 100 dataset """kinships""" +310 100 model """hole""" +310 100 loss """marginranking""" +310 100 regularizer """no""" +310 100 optimizer """adadelta""" +310 100 training_loop """owa""" +310 100 negative_sampler """basic""" +310 100 evaluator """rankbased""" +311 1 model.embedding_dim 0.0 +311 1 loss.margin 9.247233708302149 +311 1 negative_sampler.num_negs_per_pos 41.0 +311 1 training.batch_size 0.0 +311 2 model.embedding_dim 1.0 +311 2 loss.margin 7.92448057290523 +311 2 negative_sampler.num_negs_per_pos 36.0 +311 2 training.batch_size 0.0 +311 3 model.embedding_dim 2.0 +311 3 loss.margin 4.366637104649518 +311 3 negative_sampler.num_negs_per_pos 9.0 +311 3 training.batch_size 0.0 +311 4 model.embedding_dim 1.0 +311 4 loss.margin 8.519118720420163 +311 4 negative_sampler.num_negs_per_pos 81.0 +311 4 training.batch_size 0.0 +311 5 model.embedding_dim 0.0 +311 5 loss.margin 5.173068477454521 +311 5 negative_sampler.num_negs_per_pos 25.0 +311 5 training.batch_size 2.0 +311 6 model.embedding_dim 2.0 +311 6 loss.margin 9.288449826935468 +311 6 negative_sampler.num_negs_per_pos 55.0 +311 6 training.batch_size 1.0 +311 7 model.embedding_dim 0.0 +311 7 loss.margin 0.7202932255527155 +311 7 negative_sampler.num_negs_per_pos 41.0 +311 7 training.batch_size 1.0 +311 8 model.embedding_dim 0.0 +311 8 loss.margin 5.526844646181527 +311 8 negative_sampler.num_negs_per_pos 98.0 +311 8 training.batch_size 2.0 +311 9 model.embedding_dim 2.0 +311 9 loss.margin 2.2661332754498726 +311 9 negative_sampler.num_negs_per_pos 88.0 +311 9 training.batch_size 0.0 +311 10 model.embedding_dim 0.0 +311 10 loss.margin 8.448224311413256 +311 10 negative_sampler.num_negs_per_pos 37.0 +311 10 training.batch_size 2.0 +311 11 model.embedding_dim 0.0 +311 11 loss.margin 4.296593014315057 +311 11 negative_sampler.num_negs_per_pos 75.0 +311 11 training.batch_size 1.0 +311 12 model.embedding_dim 0.0 +311 12 loss.margin 1.7342278514331193 +311 12 negative_sampler.num_negs_per_pos 86.0 +311 12 training.batch_size 2.0 +311 13 model.embedding_dim 0.0 +311 13 loss.margin 9.622386220285996 +311 13 negative_sampler.num_negs_per_pos 22.0 +311 13 training.batch_size 1.0 +311 14 model.embedding_dim 0.0 +311 14 loss.margin 4.020389340334602 +311 14 negative_sampler.num_negs_per_pos 69.0 +311 14 training.batch_size 0.0 +311 15 model.embedding_dim 0.0 +311 15 loss.margin 4.472213033953869 +311 15 negative_sampler.num_negs_per_pos 93.0 +311 15 training.batch_size 0.0 +311 16 model.embedding_dim 2.0 +311 16 loss.margin 6.407450199261854 +311 16 negative_sampler.num_negs_per_pos 75.0 +311 16 training.batch_size 1.0 +311 17 model.embedding_dim 2.0 +311 17 loss.margin 8.282943620400705 +311 17 negative_sampler.num_negs_per_pos 25.0 +311 17 training.batch_size 2.0 +311 18 model.embedding_dim 2.0 +311 18 loss.margin 3.4926419811604204 +311 18 negative_sampler.num_negs_per_pos 65.0 +311 18 training.batch_size 1.0 +311 19 model.embedding_dim 1.0 +311 19 loss.margin 9.042986402926847 +311 19 negative_sampler.num_negs_per_pos 13.0 +311 19 training.batch_size 2.0 +311 20 model.embedding_dim 0.0 +311 20 loss.margin 9.023659659281753 +311 20 negative_sampler.num_negs_per_pos 31.0 +311 20 training.batch_size 0.0 +311 21 model.embedding_dim 2.0 +311 21 loss.margin 3.859658566081821 +311 21 negative_sampler.num_negs_per_pos 74.0 +311 21 training.batch_size 1.0 +311 22 model.embedding_dim 2.0 +311 22 loss.margin 6.47377506076982 +311 22 negative_sampler.num_negs_per_pos 26.0 +311 22 training.batch_size 2.0 +311 23 model.embedding_dim 2.0 +311 23 loss.margin 3.3861934574720083 +311 23 negative_sampler.num_negs_per_pos 11.0 +311 23 training.batch_size 1.0 +311 24 model.embedding_dim 0.0 +311 24 loss.margin 9.071035040393436 +311 24 negative_sampler.num_negs_per_pos 27.0 +311 24 training.batch_size 0.0 +311 25 model.embedding_dim 2.0 +311 25 loss.margin 9.915517902594402 +311 25 negative_sampler.num_negs_per_pos 85.0 +311 25 training.batch_size 0.0 +311 26 model.embedding_dim 0.0 +311 26 loss.margin 9.405078161972588 +311 26 negative_sampler.num_negs_per_pos 49.0 +311 26 training.batch_size 2.0 +311 27 model.embedding_dim 2.0 +311 27 loss.margin 7.925574517922649 +311 27 negative_sampler.num_negs_per_pos 4.0 +311 27 training.batch_size 1.0 +311 28 model.embedding_dim 0.0 +311 28 loss.margin 1.6083381437074002 +311 28 negative_sampler.num_negs_per_pos 45.0 +311 28 training.batch_size 1.0 +311 29 model.embedding_dim 0.0 +311 29 loss.margin 4.635165231654203 +311 29 negative_sampler.num_negs_per_pos 39.0 +311 29 training.batch_size 2.0 +311 30 model.embedding_dim 1.0 +311 30 loss.margin 4.9767021682130075 +311 30 negative_sampler.num_negs_per_pos 40.0 +311 30 training.batch_size 2.0 +311 31 model.embedding_dim 1.0 +311 31 loss.margin 9.868906480094621 +311 31 negative_sampler.num_negs_per_pos 36.0 +311 31 training.batch_size 1.0 +311 32 model.embedding_dim 2.0 +311 32 loss.margin 2.504276034341386 +311 32 negative_sampler.num_negs_per_pos 48.0 +311 32 training.batch_size 1.0 +311 33 model.embedding_dim 0.0 +311 33 loss.margin 2.938512982982904 +311 33 negative_sampler.num_negs_per_pos 57.0 +311 33 training.batch_size 1.0 +311 34 model.embedding_dim 1.0 +311 34 loss.margin 5.451608820920213 +311 34 negative_sampler.num_negs_per_pos 74.0 +311 34 training.batch_size 1.0 +311 35 model.embedding_dim 2.0 +311 35 loss.margin 1.5059918589974477 +311 35 negative_sampler.num_negs_per_pos 26.0 +311 35 training.batch_size 2.0 +311 36 model.embedding_dim 0.0 +311 36 loss.margin 9.548934326512569 +311 36 negative_sampler.num_negs_per_pos 31.0 +311 36 training.batch_size 2.0 +311 37 model.embedding_dim 1.0 +311 37 loss.margin 9.546657578635328 +311 37 negative_sampler.num_negs_per_pos 32.0 +311 37 training.batch_size 0.0 +311 38 model.embedding_dim 0.0 +311 38 loss.margin 9.69214894234124 +311 38 negative_sampler.num_negs_per_pos 91.0 +311 38 training.batch_size 0.0 +311 39 model.embedding_dim 1.0 +311 39 loss.margin 3.7475140110959377 +311 39 negative_sampler.num_negs_per_pos 14.0 +311 39 training.batch_size 1.0 +311 40 model.embedding_dim 0.0 +311 40 loss.margin 2.568867632157588 +311 40 negative_sampler.num_negs_per_pos 79.0 +311 40 training.batch_size 2.0 +311 41 model.embedding_dim 1.0 +311 41 loss.margin 3.491314849452239 +311 41 negative_sampler.num_negs_per_pos 75.0 +311 41 training.batch_size 1.0 +311 42 model.embedding_dim 0.0 +311 42 loss.margin 0.9902252829532754 +311 42 negative_sampler.num_negs_per_pos 85.0 +311 42 training.batch_size 1.0 +311 43 model.embedding_dim 0.0 +311 43 loss.margin 8.158599138787604 +311 43 negative_sampler.num_negs_per_pos 90.0 +311 43 training.batch_size 1.0 +311 44 model.embedding_dim 0.0 +311 44 loss.margin 0.9627177326774479 +311 44 negative_sampler.num_negs_per_pos 83.0 +311 44 training.batch_size 2.0 +311 45 model.embedding_dim 0.0 +311 45 loss.margin 2.067332323096769 +311 45 negative_sampler.num_negs_per_pos 17.0 +311 45 training.batch_size 0.0 +311 46 model.embedding_dim 2.0 +311 46 loss.margin 3.4763020719383673 +311 46 negative_sampler.num_negs_per_pos 67.0 +311 46 training.batch_size 2.0 +311 47 model.embedding_dim 1.0 +311 47 loss.margin 2.156852086066144 +311 47 negative_sampler.num_negs_per_pos 12.0 +311 47 training.batch_size 1.0 +311 48 model.embedding_dim 0.0 +311 48 loss.margin 4.909987766068047 +311 48 negative_sampler.num_negs_per_pos 91.0 +311 48 training.batch_size 1.0 +311 49 model.embedding_dim 0.0 +311 49 loss.margin 3.148099089574604 +311 49 negative_sampler.num_negs_per_pos 20.0 +311 49 training.batch_size 0.0 +311 50 model.embedding_dim 2.0 +311 50 loss.margin 8.60415267790646 +311 50 negative_sampler.num_negs_per_pos 60.0 +311 50 training.batch_size 0.0 +311 51 model.embedding_dim 0.0 +311 51 loss.margin 0.7499576566247678 +311 51 negative_sampler.num_negs_per_pos 52.0 +311 51 training.batch_size 0.0 +311 52 model.embedding_dim 0.0 +311 52 loss.margin 2.023909550615187 +311 52 negative_sampler.num_negs_per_pos 72.0 +311 52 training.batch_size 0.0 +311 53 model.embedding_dim 0.0 +311 53 loss.margin 6.884593057017318 +311 53 negative_sampler.num_negs_per_pos 95.0 +311 53 training.batch_size 0.0 +311 54 model.embedding_dim 2.0 +311 54 loss.margin 4.994947118688079 +311 54 negative_sampler.num_negs_per_pos 58.0 +311 54 training.batch_size 0.0 +311 55 model.embedding_dim 2.0 +311 55 loss.margin 9.414379082190239 +311 55 negative_sampler.num_negs_per_pos 32.0 +311 55 training.batch_size 0.0 +311 56 model.embedding_dim 0.0 +311 56 loss.margin 8.941530887732137 +311 56 negative_sampler.num_negs_per_pos 60.0 +311 56 training.batch_size 2.0 +311 57 model.embedding_dim 2.0 +311 57 loss.margin 7.743300266476272 +311 57 negative_sampler.num_negs_per_pos 30.0 +311 57 training.batch_size 0.0 +311 58 model.embedding_dim 1.0 +311 58 loss.margin 9.806135399249142 +311 58 negative_sampler.num_negs_per_pos 51.0 +311 58 training.batch_size 0.0 +311 59 model.embedding_dim 2.0 +311 59 loss.margin 5.415074812655799 +311 59 negative_sampler.num_negs_per_pos 72.0 +311 59 training.batch_size 0.0 +311 60 model.embedding_dim 2.0 +311 60 loss.margin 2.5107636032571747 +311 60 negative_sampler.num_negs_per_pos 77.0 +311 60 training.batch_size 2.0 +311 61 model.embedding_dim 0.0 +311 61 loss.margin 3.963555326454616 +311 61 negative_sampler.num_negs_per_pos 31.0 +311 61 training.batch_size 1.0 +311 62 model.embedding_dim 2.0 +311 62 loss.margin 9.754194697283657 +311 62 negative_sampler.num_negs_per_pos 6.0 +311 62 training.batch_size 2.0 +311 63 model.embedding_dim 1.0 +311 63 loss.margin 7.778887428558381 +311 63 negative_sampler.num_negs_per_pos 32.0 +311 63 training.batch_size 0.0 +311 64 model.embedding_dim 1.0 +311 64 loss.margin 5.857826941266579 +311 64 negative_sampler.num_negs_per_pos 93.0 +311 64 training.batch_size 0.0 +311 65 model.embedding_dim 2.0 +311 65 loss.margin 3.779882970900345 +311 65 negative_sampler.num_negs_per_pos 72.0 +311 65 training.batch_size 2.0 +311 66 model.embedding_dim 0.0 +311 66 loss.margin 8.088762889342462 +311 66 negative_sampler.num_negs_per_pos 13.0 +311 66 training.batch_size 0.0 +311 67 model.embedding_dim 1.0 +311 67 loss.margin 6.540795306694271 +311 67 negative_sampler.num_negs_per_pos 56.0 +311 67 training.batch_size 0.0 +311 68 model.embedding_dim 1.0 +311 68 loss.margin 0.5355666442487923 +311 68 negative_sampler.num_negs_per_pos 24.0 +311 68 training.batch_size 0.0 +311 69 model.embedding_dim 2.0 +311 69 loss.margin 5.135189844065945 +311 69 negative_sampler.num_negs_per_pos 20.0 +311 69 training.batch_size 0.0 +311 70 model.embedding_dim 0.0 +311 70 loss.margin 7.578915404780475 +311 70 negative_sampler.num_negs_per_pos 37.0 +311 70 training.batch_size 1.0 +311 71 model.embedding_dim 0.0 +311 71 loss.margin 4.251017148166854 +311 71 negative_sampler.num_negs_per_pos 91.0 +311 71 training.batch_size 1.0 +311 72 model.embedding_dim 2.0 +311 72 loss.margin 4.1805255655671765 +311 72 negative_sampler.num_negs_per_pos 29.0 +311 72 training.batch_size 0.0 +311 73 model.embedding_dim 1.0 +311 73 loss.margin 3.024279262181146 +311 73 negative_sampler.num_negs_per_pos 86.0 +311 73 training.batch_size 2.0 +311 74 model.embedding_dim 2.0 +311 74 loss.margin 1.4368498893532171 +311 74 negative_sampler.num_negs_per_pos 36.0 +311 74 training.batch_size 2.0 +311 75 model.embedding_dim 2.0 +311 75 loss.margin 3.601909095747021 +311 75 negative_sampler.num_negs_per_pos 24.0 +311 75 training.batch_size 2.0 +311 76 model.embedding_dim 2.0 +311 76 loss.margin 1.2357905459387788 +311 76 negative_sampler.num_negs_per_pos 45.0 +311 76 training.batch_size 2.0 +311 77 model.embedding_dim 1.0 +311 77 loss.margin 1.633049316843867 +311 77 negative_sampler.num_negs_per_pos 53.0 +311 77 training.batch_size 2.0 +311 78 model.embedding_dim 2.0 +311 78 loss.margin 2.778800830020754 +311 78 negative_sampler.num_negs_per_pos 82.0 +311 78 training.batch_size 1.0 +311 79 model.embedding_dim 2.0 +311 79 loss.margin 6.8977318071261395 +311 79 negative_sampler.num_negs_per_pos 41.0 +311 79 training.batch_size 1.0 +311 80 model.embedding_dim 2.0 +311 80 loss.margin 9.800386806012655 +311 80 negative_sampler.num_negs_per_pos 28.0 +311 80 training.batch_size 0.0 +311 81 model.embedding_dim 2.0 +311 81 loss.margin 2.844991987153034 +311 81 negative_sampler.num_negs_per_pos 66.0 +311 81 training.batch_size 2.0 +311 82 model.embedding_dim 1.0 +311 82 loss.margin 7.566973462365051 +311 82 negative_sampler.num_negs_per_pos 30.0 +311 82 training.batch_size 1.0 +311 83 model.embedding_dim 1.0 +311 83 loss.margin 1.2144866126569545 +311 83 negative_sampler.num_negs_per_pos 47.0 +311 83 training.batch_size 1.0 +311 84 model.embedding_dim 0.0 +311 84 loss.margin 9.93298530594394 +311 84 negative_sampler.num_negs_per_pos 94.0 +311 84 training.batch_size 1.0 +311 85 model.embedding_dim 1.0 +311 85 loss.margin 8.969039022889538 +311 85 negative_sampler.num_negs_per_pos 78.0 +311 85 training.batch_size 2.0 +311 86 model.embedding_dim 0.0 +311 86 loss.margin 8.310059179851642 +311 86 negative_sampler.num_negs_per_pos 96.0 +311 86 training.batch_size 2.0 +311 87 model.embedding_dim 2.0 +311 87 loss.margin 4.860672605695447 +311 87 negative_sampler.num_negs_per_pos 84.0 +311 87 training.batch_size 1.0 +311 88 model.embedding_dim 1.0 +311 88 loss.margin 7.8885270625059265 +311 88 negative_sampler.num_negs_per_pos 70.0 +311 88 training.batch_size 0.0 +311 89 model.embedding_dim 0.0 +311 89 loss.margin 0.7378413461443012 +311 89 negative_sampler.num_negs_per_pos 24.0 +311 89 training.batch_size 2.0 +311 90 model.embedding_dim 2.0 +311 90 loss.margin 9.28135934867808 +311 90 negative_sampler.num_negs_per_pos 2.0 +311 90 training.batch_size 2.0 +311 91 model.embedding_dim 2.0 +311 91 loss.margin 6.798592617366672 +311 91 negative_sampler.num_negs_per_pos 67.0 +311 91 training.batch_size 0.0 +311 92 model.embedding_dim 2.0 +311 92 loss.margin 8.402761774321522 +311 92 negative_sampler.num_negs_per_pos 56.0 +311 92 training.batch_size 2.0 +311 93 model.embedding_dim 1.0 +311 93 loss.margin 3.582663047014678 +311 93 negative_sampler.num_negs_per_pos 68.0 +311 93 training.batch_size 0.0 +311 94 model.embedding_dim 2.0 +311 94 loss.margin 2.5243411263302376 +311 94 negative_sampler.num_negs_per_pos 74.0 +311 94 training.batch_size 1.0 +311 95 model.embedding_dim 0.0 +311 95 loss.margin 5.833229610694393 +311 95 negative_sampler.num_negs_per_pos 75.0 +311 95 training.batch_size 1.0 +311 96 model.embedding_dim 0.0 +311 96 loss.margin 3.6890310685106984 +311 96 negative_sampler.num_negs_per_pos 27.0 +311 96 training.batch_size 2.0 +311 97 model.embedding_dim 0.0 +311 97 loss.margin 3.4148601849995064 +311 97 negative_sampler.num_negs_per_pos 54.0 +311 97 training.batch_size 2.0 +311 98 model.embedding_dim 0.0 +311 98 loss.margin 2.929565069852948 +311 98 negative_sampler.num_negs_per_pos 82.0 +311 98 training.batch_size 1.0 +311 99 model.embedding_dim 0.0 +311 99 loss.margin 8.70620597128542 +311 99 negative_sampler.num_negs_per_pos 55.0 +311 99 training.batch_size 2.0 +311 100 model.embedding_dim 2.0 +311 100 loss.margin 9.919598039504752 +311 100 negative_sampler.num_negs_per_pos 7.0 +311 100 training.batch_size 2.0 +311 1 dataset """kinships""" +311 1 model """hole""" +311 1 loss """marginranking""" +311 1 regularizer """no""" +311 1 optimizer """adadelta""" +311 1 training_loop """owa""" +311 1 negative_sampler """basic""" +311 1 evaluator """rankbased""" +311 2 dataset """kinships""" +311 2 model """hole""" +311 2 loss """marginranking""" +311 2 regularizer """no""" +311 2 optimizer """adadelta""" +311 2 training_loop """owa""" +311 2 negative_sampler """basic""" +311 2 evaluator """rankbased""" +311 3 dataset """kinships""" +311 3 model """hole""" +311 3 loss """marginranking""" +311 3 regularizer """no""" +311 3 optimizer """adadelta""" +311 3 training_loop """owa""" +311 3 negative_sampler """basic""" +311 3 evaluator """rankbased""" +311 4 dataset """kinships""" +311 4 model """hole""" +311 4 loss """marginranking""" +311 4 regularizer """no""" +311 4 optimizer """adadelta""" +311 4 training_loop """owa""" +311 4 negative_sampler """basic""" +311 4 evaluator """rankbased""" +311 5 dataset """kinships""" +311 5 model """hole""" +311 5 loss """marginranking""" +311 5 regularizer """no""" +311 5 optimizer """adadelta""" +311 5 training_loop """owa""" +311 5 negative_sampler """basic""" +311 5 evaluator """rankbased""" +311 6 dataset """kinships""" +311 6 model """hole""" +311 6 loss """marginranking""" +311 6 regularizer """no""" +311 6 optimizer """adadelta""" +311 6 training_loop """owa""" +311 6 negative_sampler """basic""" +311 6 evaluator """rankbased""" +311 7 dataset """kinships""" +311 7 model """hole""" +311 7 loss """marginranking""" +311 7 regularizer """no""" +311 7 optimizer """adadelta""" +311 7 training_loop """owa""" +311 7 negative_sampler """basic""" +311 7 evaluator """rankbased""" +311 8 dataset """kinships""" +311 8 model """hole""" +311 8 loss """marginranking""" +311 8 regularizer """no""" +311 8 optimizer """adadelta""" +311 8 training_loop """owa""" +311 8 negative_sampler """basic""" +311 8 evaluator """rankbased""" +311 9 dataset """kinships""" +311 9 model """hole""" +311 9 loss """marginranking""" +311 9 regularizer """no""" +311 9 optimizer """adadelta""" +311 9 training_loop """owa""" +311 9 negative_sampler """basic""" +311 9 evaluator """rankbased""" +311 10 dataset """kinships""" +311 10 model """hole""" +311 10 loss """marginranking""" +311 10 regularizer """no""" +311 10 optimizer """adadelta""" +311 10 training_loop """owa""" +311 10 negative_sampler """basic""" +311 10 evaluator """rankbased""" +311 11 dataset """kinships""" +311 11 model """hole""" +311 11 loss """marginranking""" +311 11 regularizer """no""" +311 11 optimizer """adadelta""" +311 11 training_loop """owa""" +311 11 negative_sampler """basic""" +311 11 evaluator """rankbased""" +311 12 dataset """kinships""" +311 12 model """hole""" +311 12 loss """marginranking""" +311 12 regularizer """no""" +311 12 optimizer """adadelta""" +311 12 training_loop """owa""" +311 12 negative_sampler """basic""" +311 12 evaluator """rankbased""" +311 13 dataset """kinships""" +311 13 model """hole""" +311 13 loss """marginranking""" +311 13 regularizer """no""" +311 13 optimizer """adadelta""" +311 13 training_loop """owa""" +311 13 negative_sampler """basic""" +311 13 evaluator """rankbased""" +311 14 dataset """kinships""" +311 14 model """hole""" +311 14 loss """marginranking""" +311 14 regularizer """no""" +311 14 optimizer """adadelta""" +311 14 training_loop """owa""" +311 14 negative_sampler """basic""" +311 14 evaluator """rankbased""" +311 15 dataset """kinships""" +311 15 model """hole""" +311 15 loss """marginranking""" +311 15 regularizer """no""" +311 15 optimizer """adadelta""" +311 15 training_loop """owa""" +311 15 negative_sampler """basic""" +311 15 evaluator """rankbased""" +311 16 dataset """kinships""" +311 16 model """hole""" +311 16 loss """marginranking""" +311 16 regularizer """no""" +311 16 optimizer """adadelta""" +311 16 training_loop """owa""" +311 16 negative_sampler """basic""" +311 16 evaluator """rankbased""" +311 17 dataset """kinships""" +311 17 model """hole""" +311 17 loss """marginranking""" +311 17 regularizer """no""" +311 17 optimizer """adadelta""" +311 17 training_loop """owa""" +311 17 negative_sampler """basic""" +311 17 evaluator """rankbased""" +311 18 dataset """kinships""" +311 18 model """hole""" +311 18 loss """marginranking""" +311 18 regularizer """no""" +311 18 optimizer """adadelta""" +311 18 training_loop """owa""" +311 18 negative_sampler """basic""" +311 18 evaluator """rankbased""" +311 19 dataset """kinships""" +311 19 model """hole""" +311 19 loss """marginranking""" +311 19 regularizer """no""" +311 19 optimizer """adadelta""" +311 19 training_loop """owa""" +311 19 negative_sampler """basic""" +311 19 evaluator """rankbased""" +311 20 dataset """kinships""" +311 20 model """hole""" +311 20 loss """marginranking""" +311 20 regularizer """no""" +311 20 optimizer """adadelta""" +311 20 training_loop """owa""" +311 20 negative_sampler """basic""" +311 20 evaluator """rankbased""" +311 21 dataset """kinships""" +311 21 model """hole""" +311 21 loss """marginranking""" +311 21 regularizer """no""" +311 21 optimizer """adadelta""" +311 21 training_loop """owa""" +311 21 negative_sampler """basic""" +311 21 evaluator """rankbased""" +311 22 dataset """kinships""" +311 22 model """hole""" +311 22 loss """marginranking""" +311 22 regularizer """no""" +311 22 optimizer """adadelta""" +311 22 training_loop """owa""" +311 22 negative_sampler """basic""" +311 22 evaluator """rankbased""" +311 23 dataset """kinships""" +311 23 model """hole""" +311 23 loss """marginranking""" +311 23 regularizer """no""" +311 23 optimizer """adadelta""" +311 23 training_loop """owa""" +311 23 negative_sampler """basic""" +311 23 evaluator """rankbased""" +311 24 dataset """kinships""" +311 24 model """hole""" +311 24 loss """marginranking""" +311 24 regularizer """no""" +311 24 optimizer """adadelta""" +311 24 training_loop """owa""" +311 24 negative_sampler """basic""" +311 24 evaluator """rankbased""" +311 25 dataset """kinships""" +311 25 model """hole""" +311 25 loss """marginranking""" +311 25 regularizer """no""" +311 25 optimizer """adadelta""" +311 25 training_loop """owa""" +311 25 negative_sampler """basic""" +311 25 evaluator """rankbased""" +311 26 dataset """kinships""" +311 26 model """hole""" +311 26 loss """marginranking""" +311 26 regularizer """no""" +311 26 optimizer """adadelta""" +311 26 training_loop """owa""" +311 26 negative_sampler """basic""" +311 26 evaluator """rankbased""" +311 27 dataset """kinships""" +311 27 model """hole""" +311 27 loss """marginranking""" +311 27 regularizer """no""" +311 27 optimizer """adadelta""" +311 27 training_loop """owa""" +311 27 negative_sampler """basic""" +311 27 evaluator """rankbased""" +311 28 dataset """kinships""" +311 28 model """hole""" +311 28 loss """marginranking""" +311 28 regularizer """no""" +311 28 optimizer """adadelta""" +311 28 training_loop """owa""" +311 28 negative_sampler """basic""" +311 28 evaluator """rankbased""" +311 29 dataset """kinships""" +311 29 model """hole""" +311 29 loss """marginranking""" +311 29 regularizer """no""" +311 29 optimizer """adadelta""" +311 29 training_loop """owa""" +311 29 negative_sampler """basic""" +311 29 evaluator """rankbased""" +311 30 dataset """kinships""" +311 30 model """hole""" +311 30 loss """marginranking""" +311 30 regularizer """no""" +311 30 optimizer """adadelta""" +311 30 training_loop """owa""" +311 30 negative_sampler """basic""" +311 30 evaluator """rankbased""" +311 31 dataset """kinships""" +311 31 model """hole""" +311 31 loss """marginranking""" +311 31 regularizer """no""" +311 31 optimizer """adadelta""" +311 31 training_loop """owa""" +311 31 negative_sampler """basic""" +311 31 evaluator """rankbased""" +311 32 dataset """kinships""" +311 32 model """hole""" +311 32 loss """marginranking""" +311 32 regularizer """no""" +311 32 optimizer """adadelta""" +311 32 training_loop """owa""" +311 32 negative_sampler """basic""" +311 32 evaluator """rankbased""" +311 33 dataset """kinships""" +311 33 model """hole""" +311 33 loss """marginranking""" +311 33 regularizer """no""" +311 33 optimizer """adadelta""" +311 33 training_loop """owa""" +311 33 negative_sampler """basic""" +311 33 evaluator """rankbased""" +311 34 dataset """kinships""" +311 34 model """hole""" +311 34 loss """marginranking""" +311 34 regularizer """no""" +311 34 optimizer """adadelta""" +311 34 training_loop """owa""" +311 34 negative_sampler """basic""" +311 34 evaluator """rankbased""" +311 35 dataset """kinships""" +311 35 model """hole""" +311 35 loss """marginranking""" +311 35 regularizer """no""" +311 35 optimizer """adadelta""" +311 35 training_loop """owa""" +311 35 negative_sampler """basic""" +311 35 evaluator """rankbased""" +311 36 dataset """kinships""" +311 36 model """hole""" +311 36 loss """marginranking""" +311 36 regularizer """no""" +311 36 optimizer """adadelta""" +311 36 training_loop """owa""" +311 36 negative_sampler """basic""" +311 36 evaluator """rankbased""" +311 37 dataset """kinships""" +311 37 model """hole""" +311 37 loss """marginranking""" +311 37 regularizer """no""" +311 37 optimizer """adadelta""" +311 37 training_loop """owa""" +311 37 negative_sampler """basic""" +311 37 evaluator """rankbased""" +311 38 dataset """kinships""" +311 38 model """hole""" +311 38 loss """marginranking""" +311 38 regularizer """no""" +311 38 optimizer """adadelta""" +311 38 training_loop """owa""" +311 38 negative_sampler """basic""" +311 38 evaluator """rankbased""" +311 39 dataset """kinships""" +311 39 model """hole""" +311 39 loss """marginranking""" +311 39 regularizer """no""" +311 39 optimizer """adadelta""" +311 39 training_loop """owa""" +311 39 negative_sampler """basic""" +311 39 evaluator """rankbased""" +311 40 dataset """kinships""" +311 40 model """hole""" +311 40 loss """marginranking""" +311 40 regularizer """no""" +311 40 optimizer """adadelta""" +311 40 training_loop """owa""" +311 40 negative_sampler """basic""" +311 40 evaluator """rankbased""" +311 41 dataset """kinships""" +311 41 model """hole""" +311 41 loss """marginranking""" +311 41 regularizer """no""" +311 41 optimizer """adadelta""" +311 41 training_loop """owa""" +311 41 negative_sampler """basic""" +311 41 evaluator """rankbased""" +311 42 dataset """kinships""" +311 42 model """hole""" +311 42 loss """marginranking""" +311 42 regularizer """no""" +311 42 optimizer """adadelta""" +311 42 training_loop """owa""" +311 42 negative_sampler """basic""" +311 42 evaluator """rankbased""" +311 43 dataset """kinships""" +311 43 model """hole""" +311 43 loss """marginranking""" +311 43 regularizer """no""" +311 43 optimizer """adadelta""" +311 43 training_loop """owa""" +311 43 negative_sampler """basic""" +311 43 evaluator """rankbased""" +311 44 dataset """kinships""" +311 44 model """hole""" +311 44 loss """marginranking""" +311 44 regularizer """no""" +311 44 optimizer """adadelta""" +311 44 training_loop """owa""" +311 44 negative_sampler """basic""" +311 44 evaluator """rankbased""" +311 45 dataset """kinships""" +311 45 model """hole""" +311 45 loss """marginranking""" +311 45 regularizer """no""" +311 45 optimizer """adadelta""" +311 45 training_loop """owa""" +311 45 negative_sampler """basic""" +311 45 evaluator """rankbased""" +311 46 dataset """kinships""" +311 46 model """hole""" +311 46 loss """marginranking""" +311 46 regularizer """no""" +311 46 optimizer """adadelta""" +311 46 training_loop """owa""" +311 46 negative_sampler """basic""" +311 46 evaluator """rankbased""" +311 47 dataset """kinships""" +311 47 model """hole""" +311 47 loss """marginranking""" +311 47 regularizer """no""" +311 47 optimizer """adadelta""" +311 47 training_loop """owa""" +311 47 negative_sampler """basic""" +311 47 evaluator """rankbased""" +311 48 dataset """kinships""" +311 48 model """hole""" +311 48 loss """marginranking""" +311 48 regularizer """no""" +311 48 optimizer """adadelta""" +311 48 training_loop """owa""" +311 48 negative_sampler """basic""" +311 48 evaluator """rankbased""" +311 49 dataset """kinships""" +311 49 model """hole""" +311 49 loss """marginranking""" +311 49 regularizer """no""" +311 49 optimizer """adadelta""" +311 49 training_loop """owa""" +311 49 negative_sampler """basic""" +311 49 evaluator """rankbased""" +311 50 dataset """kinships""" +311 50 model """hole""" +311 50 loss """marginranking""" +311 50 regularizer """no""" +311 50 optimizer """adadelta""" +311 50 training_loop """owa""" +311 50 negative_sampler """basic""" +311 50 evaluator """rankbased""" +311 51 dataset """kinships""" +311 51 model """hole""" +311 51 loss """marginranking""" +311 51 regularizer """no""" +311 51 optimizer """adadelta""" +311 51 training_loop """owa""" +311 51 negative_sampler """basic""" +311 51 evaluator """rankbased""" +311 52 dataset """kinships""" +311 52 model """hole""" +311 52 loss """marginranking""" +311 52 regularizer """no""" +311 52 optimizer """adadelta""" +311 52 training_loop """owa""" +311 52 negative_sampler """basic""" +311 52 evaluator """rankbased""" +311 53 dataset """kinships""" +311 53 model """hole""" +311 53 loss """marginranking""" +311 53 regularizer """no""" +311 53 optimizer """adadelta""" +311 53 training_loop """owa""" +311 53 negative_sampler """basic""" +311 53 evaluator """rankbased""" +311 54 dataset """kinships""" +311 54 model """hole""" +311 54 loss """marginranking""" +311 54 regularizer """no""" +311 54 optimizer """adadelta""" +311 54 training_loop """owa""" +311 54 negative_sampler """basic""" +311 54 evaluator """rankbased""" +311 55 dataset """kinships""" +311 55 model """hole""" +311 55 loss """marginranking""" +311 55 regularizer """no""" +311 55 optimizer """adadelta""" +311 55 training_loop """owa""" +311 55 negative_sampler """basic""" +311 55 evaluator """rankbased""" +311 56 dataset """kinships""" +311 56 model """hole""" +311 56 loss """marginranking""" +311 56 regularizer """no""" +311 56 optimizer """adadelta""" +311 56 training_loop """owa""" +311 56 negative_sampler """basic""" +311 56 evaluator """rankbased""" +311 57 dataset """kinships""" +311 57 model """hole""" +311 57 loss """marginranking""" +311 57 regularizer """no""" +311 57 optimizer """adadelta""" +311 57 training_loop """owa""" +311 57 negative_sampler """basic""" +311 57 evaluator """rankbased""" +311 58 dataset """kinships""" +311 58 model """hole""" +311 58 loss """marginranking""" +311 58 regularizer """no""" +311 58 optimizer """adadelta""" +311 58 training_loop """owa""" +311 58 negative_sampler """basic""" +311 58 evaluator """rankbased""" +311 59 dataset """kinships""" +311 59 model """hole""" +311 59 loss """marginranking""" +311 59 regularizer """no""" +311 59 optimizer """adadelta""" +311 59 training_loop """owa""" +311 59 negative_sampler """basic""" +311 59 evaluator """rankbased""" +311 60 dataset """kinships""" +311 60 model """hole""" +311 60 loss """marginranking""" +311 60 regularizer """no""" +311 60 optimizer """adadelta""" +311 60 training_loop """owa""" +311 60 negative_sampler """basic""" +311 60 evaluator """rankbased""" +311 61 dataset """kinships""" +311 61 model """hole""" +311 61 loss """marginranking""" +311 61 regularizer """no""" +311 61 optimizer """adadelta""" +311 61 training_loop """owa""" +311 61 negative_sampler """basic""" +311 61 evaluator """rankbased""" +311 62 dataset """kinships""" +311 62 model """hole""" +311 62 loss """marginranking""" +311 62 regularizer """no""" +311 62 optimizer """adadelta""" +311 62 training_loop """owa""" +311 62 negative_sampler """basic""" +311 62 evaluator """rankbased""" +311 63 dataset """kinships""" +311 63 model """hole""" +311 63 loss """marginranking""" +311 63 regularizer """no""" +311 63 optimizer """adadelta""" +311 63 training_loop """owa""" +311 63 negative_sampler """basic""" +311 63 evaluator """rankbased""" +311 64 dataset """kinships""" +311 64 model """hole""" +311 64 loss """marginranking""" +311 64 regularizer """no""" +311 64 optimizer """adadelta""" +311 64 training_loop """owa""" +311 64 negative_sampler """basic""" +311 64 evaluator """rankbased""" +311 65 dataset """kinships""" +311 65 model """hole""" +311 65 loss """marginranking""" +311 65 regularizer """no""" +311 65 optimizer """adadelta""" +311 65 training_loop """owa""" +311 65 negative_sampler """basic""" +311 65 evaluator """rankbased""" +311 66 dataset """kinships""" +311 66 model """hole""" +311 66 loss """marginranking""" +311 66 regularizer """no""" +311 66 optimizer """adadelta""" +311 66 training_loop """owa""" +311 66 negative_sampler """basic""" +311 66 evaluator """rankbased""" +311 67 dataset """kinships""" +311 67 model """hole""" +311 67 loss """marginranking""" +311 67 regularizer """no""" +311 67 optimizer """adadelta""" +311 67 training_loop """owa""" +311 67 negative_sampler """basic""" +311 67 evaluator """rankbased""" +311 68 dataset """kinships""" +311 68 model """hole""" +311 68 loss """marginranking""" +311 68 regularizer """no""" +311 68 optimizer """adadelta""" +311 68 training_loop """owa""" +311 68 negative_sampler """basic""" +311 68 evaluator """rankbased""" +311 69 dataset """kinships""" +311 69 model """hole""" +311 69 loss """marginranking""" +311 69 regularizer """no""" +311 69 optimizer """adadelta""" +311 69 training_loop """owa""" +311 69 negative_sampler """basic""" +311 69 evaluator """rankbased""" +311 70 dataset """kinships""" +311 70 model """hole""" +311 70 loss """marginranking""" +311 70 regularizer """no""" +311 70 optimizer """adadelta""" +311 70 training_loop """owa""" +311 70 negative_sampler """basic""" +311 70 evaluator """rankbased""" +311 71 dataset """kinships""" +311 71 model """hole""" +311 71 loss """marginranking""" +311 71 regularizer """no""" +311 71 optimizer """adadelta""" +311 71 training_loop """owa""" +311 71 negative_sampler """basic""" +311 71 evaluator """rankbased""" +311 72 dataset """kinships""" +311 72 model """hole""" +311 72 loss """marginranking""" +311 72 regularizer """no""" +311 72 optimizer """adadelta""" +311 72 training_loop """owa""" +311 72 negative_sampler """basic""" +311 72 evaluator """rankbased""" +311 73 dataset """kinships""" +311 73 model """hole""" +311 73 loss """marginranking""" +311 73 regularizer """no""" +311 73 optimizer """adadelta""" +311 73 training_loop """owa""" +311 73 negative_sampler """basic""" +311 73 evaluator """rankbased""" +311 74 dataset """kinships""" +311 74 model """hole""" +311 74 loss """marginranking""" +311 74 regularizer """no""" +311 74 optimizer """adadelta""" +311 74 training_loop """owa""" +311 74 negative_sampler """basic""" +311 74 evaluator """rankbased""" +311 75 dataset """kinships""" +311 75 model """hole""" +311 75 loss """marginranking""" +311 75 regularizer """no""" +311 75 optimizer """adadelta""" +311 75 training_loop """owa""" +311 75 negative_sampler """basic""" +311 75 evaluator """rankbased""" +311 76 dataset """kinships""" +311 76 model """hole""" +311 76 loss """marginranking""" +311 76 regularizer """no""" +311 76 optimizer """adadelta""" +311 76 training_loop """owa""" +311 76 negative_sampler """basic""" +311 76 evaluator """rankbased""" +311 77 dataset """kinships""" +311 77 model """hole""" +311 77 loss """marginranking""" +311 77 regularizer """no""" +311 77 optimizer """adadelta""" +311 77 training_loop """owa""" +311 77 negative_sampler """basic""" +311 77 evaluator """rankbased""" +311 78 dataset """kinships""" +311 78 model """hole""" +311 78 loss """marginranking""" +311 78 regularizer """no""" +311 78 optimizer """adadelta""" +311 78 training_loop """owa""" +311 78 negative_sampler """basic""" +311 78 evaluator """rankbased""" +311 79 dataset """kinships""" +311 79 model """hole""" +311 79 loss """marginranking""" +311 79 regularizer """no""" +311 79 optimizer """adadelta""" +311 79 training_loop """owa""" +311 79 negative_sampler """basic""" +311 79 evaluator """rankbased""" +311 80 dataset """kinships""" +311 80 model """hole""" +311 80 loss """marginranking""" +311 80 regularizer """no""" +311 80 optimizer """adadelta""" +311 80 training_loop """owa""" +311 80 negative_sampler """basic""" +311 80 evaluator """rankbased""" +311 81 dataset """kinships""" +311 81 model """hole""" +311 81 loss """marginranking""" +311 81 regularizer """no""" +311 81 optimizer """adadelta""" +311 81 training_loop """owa""" +311 81 negative_sampler """basic""" +311 81 evaluator """rankbased""" +311 82 dataset """kinships""" +311 82 model """hole""" +311 82 loss """marginranking""" +311 82 regularizer """no""" +311 82 optimizer """adadelta""" +311 82 training_loop """owa""" +311 82 negative_sampler """basic""" +311 82 evaluator """rankbased""" +311 83 dataset """kinships""" +311 83 model """hole""" +311 83 loss """marginranking""" +311 83 regularizer """no""" +311 83 optimizer """adadelta""" +311 83 training_loop """owa""" +311 83 negative_sampler """basic""" +311 83 evaluator """rankbased""" +311 84 dataset """kinships""" +311 84 model """hole""" +311 84 loss """marginranking""" +311 84 regularizer """no""" +311 84 optimizer """adadelta""" +311 84 training_loop """owa""" +311 84 negative_sampler """basic""" +311 84 evaluator """rankbased""" +311 85 dataset """kinships""" +311 85 model """hole""" +311 85 loss """marginranking""" +311 85 regularizer """no""" +311 85 optimizer """adadelta""" +311 85 training_loop """owa""" +311 85 negative_sampler """basic""" +311 85 evaluator """rankbased""" +311 86 dataset """kinships""" +311 86 model """hole""" +311 86 loss """marginranking""" +311 86 regularizer """no""" +311 86 optimizer """adadelta""" +311 86 training_loop """owa""" +311 86 negative_sampler """basic""" +311 86 evaluator """rankbased""" +311 87 dataset """kinships""" +311 87 model """hole""" +311 87 loss """marginranking""" +311 87 regularizer """no""" +311 87 optimizer """adadelta""" +311 87 training_loop """owa""" +311 87 negative_sampler """basic""" +311 87 evaluator """rankbased""" +311 88 dataset """kinships""" +311 88 model """hole""" +311 88 loss """marginranking""" +311 88 regularizer """no""" +311 88 optimizer """adadelta""" +311 88 training_loop """owa""" +311 88 negative_sampler """basic""" +311 88 evaluator """rankbased""" +311 89 dataset """kinships""" +311 89 model """hole""" +311 89 loss """marginranking""" +311 89 regularizer """no""" +311 89 optimizer """adadelta""" +311 89 training_loop """owa""" +311 89 negative_sampler """basic""" +311 89 evaluator """rankbased""" +311 90 dataset """kinships""" +311 90 model """hole""" +311 90 loss """marginranking""" +311 90 regularizer """no""" +311 90 optimizer """adadelta""" +311 90 training_loop """owa""" +311 90 negative_sampler """basic""" +311 90 evaluator """rankbased""" +311 91 dataset """kinships""" +311 91 model """hole""" +311 91 loss """marginranking""" +311 91 regularizer """no""" +311 91 optimizer """adadelta""" +311 91 training_loop """owa""" +311 91 negative_sampler """basic""" +311 91 evaluator """rankbased""" +311 92 dataset """kinships""" +311 92 model """hole""" +311 92 loss """marginranking""" +311 92 regularizer """no""" +311 92 optimizer """adadelta""" +311 92 training_loop """owa""" +311 92 negative_sampler """basic""" +311 92 evaluator """rankbased""" +311 93 dataset """kinships""" +311 93 model """hole""" +311 93 loss """marginranking""" +311 93 regularizer """no""" +311 93 optimizer """adadelta""" +311 93 training_loop """owa""" +311 93 negative_sampler """basic""" +311 93 evaluator """rankbased""" +311 94 dataset """kinships""" +311 94 model """hole""" +311 94 loss """marginranking""" +311 94 regularizer """no""" +311 94 optimizer """adadelta""" +311 94 training_loop """owa""" +311 94 negative_sampler """basic""" +311 94 evaluator """rankbased""" +311 95 dataset """kinships""" +311 95 model """hole""" +311 95 loss """marginranking""" +311 95 regularizer """no""" +311 95 optimizer """adadelta""" +311 95 training_loop """owa""" +311 95 negative_sampler """basic""" +311 95 evaluator """rankbased""" +311 96 dataset """kinships""" +311 96 model """hole""" +311 96 loss """marginranking""" +311 96 regularizer """no""" +311 96 optimizer """adadelta""" +311 96 training_loop """owa""" +311 96 negative_sampler """basic""" +311 96 evaluator """rankbased""" +311 97 dataset """kinships""" +311 97 model """hole""" +311 97 loss """marginranking""" +311 97 regularizer """no""" +311 97 optimizer """adadelta""" +311 97 training_loop """owa""" +311 97 negative_sampler """basic""" +311 97 evaluator """rankbased""" +311 98 dataset """kinships""" +311 98 model """hole""" +311 98 loss """marginranking""" +311 98 regularizer """no""" +311 98 optimizer """adadelta""" +311 98 training_loop """owa""" +311 98 negative_sampler """basic""" +311 98 evaluator """rankbased""" +311 99 dataset """kinships""" +311 99 model """hole""" +311 99 loss """marginranking""" +311 99 regularizer """no""" +311 99 optimizer """adadelta""" +311 99 training_loop """owa""" +311 99 negative_sampler """basic""" +311 99 evaluator """rankbased""" +311 100 dataset """kinships""" +311 100 model """hole""" +311 100 loss """marginranking""" +311 100 regularizer """no""" +311 100 optimizer """adadelta""" +311 100 training_loop """owa""" +311 100 negative_sampler """basic""" +311 100 evaluator """rankbased""" +312 1 model.embedding_dim 2.0 +312 1 optimizer.lr 0.005449293720278468 +312 1 training.batch_size 2.0 +312 1 training.label_smoothing 0.0018243390434907023 +312 2 model.embedding_dim 1.0 +312 2 optimizer.lr 0.0058818033625789605 +312 2 training.batch_size 1.0 +312 2 training.label_smoothing 0.0017437337822961384 +312 3 model.embedding_dim 0.0 +312 3 optimizer.lr 0.04783001282999461 +312 3 training.batch_size 2.0 +312 3 training.label_smoothing 0.001526784459122344 +312 4 model.embedding_dim 0.0 +312 4 optimizer.lr 0.019959932107182495 +312 4 training.batch_size 0.0 +312 4 training.label_smoothing 0.002686774401482847 +312 5 model.embedding_dim 2.0 +312 5 optimizer.lr 0.01795163444593449 +312 5 training.batch_size 0.0 +312 5 training.label_smoothing 0.4830568869767575 +312 6 model.embedding_dim 2.0 +312 6 optimizer.lr 0.014175595542110406 +312 6 training.batch_size 2.0 +312 6 training.label_smoothing 0.04381248506175812 +312 7 model.embedding_dim 2.0 +312 7 optimizer.lr 0.0013968424839416599 +312 7 training.batch_size 2.0 +312 7 training.label_smoothing 0.06144722548360899 +312 8 model.embedding_dim 0.0 +312 8 optimizer.lr 0.0053602456034131975 +312 8 training.batch_size 1.0 +312 8 training.label_smoothing 0.022080717432175104 +312 9 model.embedding_dim 2.0 +312 9 optimizer.lr 0.005961510665894223 +312 9 training.batch_size 1.0 +312 9 training.label_smoothing 0.42761124733948647 +312 10 model.embedding_dim 1.0 +312 10 optimizer.lr 0.008820555123023849 +312 10 training.batch_size 1.0 +312 10 training.label_smoothing 0.005951328377381302 +312 11 model.embedding_dim 1.0 +312 11 optimizer.lr 0.021864599552490552 +312 11 training.batch_size 0.0 +312 11 training.label_smoothing 0.023645451820792474 +312 12 model.embedding_dim 0.0 +312 12 optimizer.lr 0.0011141925309850382 +312 12 training.batch_size 1.0 +312 12 training.label_smoothing 0.09149670938522622 +312 13 model.embedding_dim 0.0 +312 13 optimizer.lr 0.023276104010170797 +312 13 training.batch_size 2.0 +312 13 training.label_smoothing 0.002929066069652386 +312 14 model.embedding_dim 2.0 +312 14 optimizer.lr 0.0406615360168502 +312 14 training.batch_size 1.0 +312 14 training.label_smoothing 0.028330968883361896 +312 15 model.embedding_dim 1.0 +312 15 optimizer.lr 0.02279466728774242 +312 15 training.batch_size 1.0 +312 15 training.label_smoothing 0.11826447681750779 +312 16 model.embedding_dim 0.0 +312 16 optimizer.lr 0.005286807942095487 +312 16 training.batch_size 0.0 +312 16 training.label_smoothing 0.00427259279023969 +312 17 model.embedding_dim 1.0 +312 17 optimizer.lr 0.030930039099128823 +312 17 training.batch_size 1.0 +312 17 training.label_smoothing 0.0013169419962737441 +312 18 model.embedding_dim 1.0 +312 18 optimizer.lr 0.0030283966469285993 +312 18 training.batch_size 1.0 +312 18 training.label_smoothing 0.0018054912381103502 +312 19 model.embedding_dim 2.0 +312 19 optimizer.lr 0.09067172102243967 +312 19 training.batch_size 1.0 +312 19 training.label_smoothing 0.30910705924274556 +312 20 model.embedding_dim 0.0 +312 20 optimizer.lr 0.023226782190185393 +312 20 training.batch_size 0.0 +312 20 training.label_smoothing 0.007925893978717105 +312 21 model.embedding_dim 1.0 +312 21 optimizer.lr 0.0016348488488195612 +312 21 training.batch_size 0.0 +312 21 training.label_smoothing 0.002726350028806438 +312 22 model.embedding_dim 2.0 +312 22 optimizer.lr 0.012961068445788268 +312 22 training.batch_size 1.0 +312 22 training.label_smoothing 0.0037209199052791564 +312 23 model.embedding_dim 0.0 +312 23 optimizer.lr 0.0018893585603348556 +312 23 training.batch_size 0.0 +312 23 training.label_smoothing 0.15685762649251003 +312 24 model.embedding_dim 1.0 +312 24 optimizer.lr 0.02037872933803377 +312 24 training.batch_size 0.0 +312 24 training.label_smoothing 0.10227307544776587 +312 25 model.embedding_dim 0.0 +312 25 optimizer.lr 0.0016702546404062826 +312 25 training.batch_size 2.0 +312 25 training.label_smoothing 0.018812897855328862 +312 26 model.embedding_dim 0.0 +312 26 optimizer.lr 0.0010725409521337584 +312 26 training.batch_size 1.0 +312 26 training.label_smoothing 0.0054725739471785315 +312 27 model.embedding_dim 0.0 +312 27 optimizer.lr 0.07063127241045593 +312 27 training.batch_size 0.0 +312 27 training.label_smoothing 0.7519408173584876 +312 28 model.embedding_dim 0.0 +312 28 optimizer.lr 0.010345793244354357 +312 28 training.batch_size 0.0 +312 28 training.label_smoothing 0.24018756525884918 +312 29 model.embedding_dim 0.0 +312 29 optimizer.lr 0.007511639939079357 +312 29 training.batch_size 2.0 +312 29 training.label_smoothing 0.007417035581465693 +312 30 model.embedding_dim 2.0 +312 30 optimizer.lr 0.017153036254317067 +312 30 training.batch_size 2.0 +312 30 training.label_smoothing 0.07680576900381085 +312 31 model.embedding_dim 0.0 +312 31 optimizer.lr 0.0014985243198303829 +312 31 training.batch_size 2.0 +312 31 training.label_smoothing 0.284743305499156 +312 32 model.embedding_dim 2.0 +312 32 optimizer.lr 0.017993523456170484 +312 32 training.batch_size 2.0 +312 32 training.label_smoothing 0.2065542417690676 +312 33 model.embedding_dim 2.0 +312 33 optimizer.lr 0.05773003441848484 +312 33 training.batch_size 2.0 +312 33 training.label_smoothing 0.003338586008546694 +312 34 model.embedding_dim 1.0 +312 34 optimizer.lr 0.0191802827976044 +312 34 training.batch_size 1.0 +312 34 training.label_smoothing 0.004435448760637139 +312 35 model.embedding_dim 0.0 +312 35 optimizer.lr 0.007847339292929422 +312 35 training.batch_size 1.0 +312 35 training.label_smoothing 0.01932699862463733 +312 36 model.embedding_dim 2.0 +312 36 optimizer.lr 0.04765686445031863 +312 36 training.batch_size 0.0 +312 36 training.label_smoothing 0.012966739819812126 +312 37 model.embedding_dim 0.0 +312 37 optimizer.lr 0.05165464384256352 +312 37 training.batch_size 2.0 +312 37 training.label_smoothing 0.017891788422237282 +312 38 model.embedding_dim 2.0 +312 38 optimizer.lr 0.03984068558312891 +312 38 training.batch_size 1.0 +312 38 training.label_smoothing 0.23518370310709075 +312 39 model.embedding_dim 1.0 +312 39 optimizer.lr 0.05719273722383488 +312 39 training.batch_size 2.0 +312 39 training.label_smoothing 0.2902896901198017 +312 40 model.embedding_dim 1.0 +312 40 optimizer.lr 0.0010100146288990197 +312 40 training.batch_size 0.0 +312 40 training.label_smoothing 0.16283168112154536 +312 41 model.embedding_dim 1.0 +312 41 optimizer.lr 0.0011940395886085716 +312 41 training.batch_size 0.0 +312 41 training.label_smoothing 0.07483595930191718 +312 42 model.embedding_dim 0.0 +312 42 optimizer.lr 0.00976798465353699 +312 42 training.batch_size 2.0 +312 42 training.label_smoothing 0.06056072583045639 +312 43 model.embedding_dim 2.0 +312 43 optimizer.lr 0.001718045678902133 +312 43 training.batch_size 2.0 +312 43 training.label_smoothing 0.7217244295447519 +312 44 model.embedding_dim 1.0 +312 44 optimizer.lr 0.031830349911542884 +312 44 training.batch_size 0.0 +312 44 training.label_smoothing 0.3395614280130767 +312 45 model.embedding_dim 0.0 +312 45 optimizer.lr 0.026137201179326046 +312 45 training.batch_size 0.0 +312 45 training.label_smoothing 0.3176890291601961 +312 46 model.embedding_dim 0.0 +312 46 optimizer.lr 0.02336839684466369 +312 46 training.batch_size 0.0 +312 46 training.label_smoothing 0.04144315409858653 +312 47 model.embedding_dim 1.0 +312 47 optimizer.lr 0.03925306909761849 +312 47 training.batch_size 0.0 +312 47 training.label_smoothing 0.15485627981634445 +312 48 model.embedding_dim 1.0 +312 48 optimizer.lr 0.048363480751741295 +312 48 training.batch_size 0.0 +312 48 training.label_smoothing 0.04936555780557133 +312 49 model.embedding_dim 2.0 +312 49 optimizer.lr 0.020573645593484173 +312 49 training.batch_size 0.0 +312 49 training.label_smoothing 0.09510625119641915 +312 50 model.embedding_dim 0.0 +312 50 optimizer.lr 0.02788139272543537 +312 50 training.batch_size 0.0 +312 50 training.label_smoothing 0.06001459193772652 +312 51 model.embedding_dim 2.0 +312 51 optimizer.lr 0.009585834113187482 +312 51 training.batch_size 1.0 +312 51 training.label_smoothing 0.0010285458630868221 +312 52 model.embedding_dim 2.0 +312 52 optimizer.lr 0.04561818423290264 +312 52 training.batch_size 0.0 +312 52 training.label_smoothing 0.004079312133333388 +312 53 model.embedding_dim 0.0 +312 53 optimizer.lr 0.008457023965892943 +312 53 training.batch_size 0.0 +312 53 training.label_smoothing 0.7538686368104833 +312 54 model.embedding_dim 1.0 +312 54 optimizer.lr 0.011663140758184592 +312 54 training.batch_size 2.0 +312 54 training.label_smoothing 0.08665748302630608 +312 55 model.embedding_dim 2.0 +312 55 optimizer.lr 0.005942768156389476 +312 55 training.batch_size 2.0 +312 55 training.label_smoothing 0.05351347436064379 +312 56 model.embedding_dim 0.0 +312 56 optimizer.lr 0.04598770789884832 +312 56 training.batch_size 1.0 +312 56 training.label_smoothing 0.9030044744086876 +312 57 model.embedding_dim 2.0 +312 57 optimizer.lr 0.004135945383647099 +312 57 training.batch_size 0.0 +312 57 training.label_smoothing 0.12398156184877247 +312 58 model.embedding_dim 2.0 +312 58 optimizer.lr 0.0043182968392761235 +312 58 training.batch_size 1.0 +312 58 training.label_smoothing 0.23501971362170093 +312 59 model.embedding_dim 0.0 +312 59 optimizer.lr 0.0025053301050532153 +312 59 training.batch_size 1.0 +312 59 training.label_smoothing 0.0844043137041612 +312 60 model.embedding_dim 2.0 +312 60 optimizer.lr 0.001745082315922346 +312 60 training.batch_size 0.0 +312 60 training.label_smoothing 0.08329466569792278 +312 61 model.embedding_dim 2.0 +312 61 optimizer.lr 0.004095030039018582 +312 61 training.batch_size 2.0 +312 61 training.label_smoothing 0.5457205255153603 +312 62 model.embedding_dim 0.0 +312 62 optimizer.lr 0.04461417634550186 +312 62 training.batch_size 0.0 +312 62 training.label_smoothing 0.6771705444710744 +312 63 model.embedding_dim 2.0 +312 63 optimizer.lr 0.006351348102454464 +312 63 training.batch_size 1.0 +312 63 training.label_smoothing 0.9887399080052768 +312 64 model.embedding_dim 2.0 +312 64 optimizer.lr 0.008132434775300384 +312 64 training.batch_size 0.0 +312 64 training.label_smoothing 0.06409073519087466 +312 65 model.embedding_dim 1.0 +312 65 optimizer.lr 0.003165760841637512 +312 65 training.batch_size 0.0 +312 65 training.label_smoothing 0.00462502975936042 +312 66 model.embedding_dim 1.0 +312 66 optimizer.lr 0.004954877652552605 +312 66 training.batch_size 0.0 +312 66 training.label_smoothing 0.9430475154436385 +312 67 model.embedding_dim 2.0 +312 67 optimizer.lr 0.0014189996703991696 +312 67 training.batch_size 0.0 +312 67 training.label_smoothing 0.0012851194649870338 +312 68 model.embedding_dim 0.0 +312 68 optimizer.lr 0.07334767530539543 +312 68 training.batch_size 2.0 +312 68 training.label_smoothing 0.04382837373008491 +312 69 model.embedding_dim 0.0 +312 69 optimizer.lr 0.00869449092451277 +312 69 training.batch_size 0.0 +312 69 training.label_smoothing 0.001268723205757431 +312 70 model.embedding_dim 2.0 +312 70 optimizer.lr 0.007219285543492611 +312 70 training.batch_size 2.0 +312 70 training.label_smoothing 0.1964344817086035 +312 71 model.embedding_dim 2.0 +312 71 optimizer.lr 0.0016117326199072315 +312 71 training.batch_size 1.0 +312 71 training.label_smoothing 0.18145204290085976 +312 72 model.embedding_dim 2.0 +312 72 optimizer.lr 0.0056551088117153215 +312 72 training.batch_size 0.0 +312 72 training.label_smoothing 0.0374094154462013 +312 73 model.embedding_dim 1.0 +312 73 optimizer.lr 0.0330512961626621 +312 73 training.batch_size 1.0 +312 73 training.label_smoothing 0.3612927385883508 +312 74 model.embedding_dim 1.0 +312 74 optimizer.lr 0.04937520417517698 +312 74 training.batch_size 1.0 +312 74 training.label_smoothing 0.11909725893285084 +312 75 model.embedding_dim 1.0 +312 75 optimizer.lr 0.002703788364647137 +312 75 training.batch_size 0.0 +312 75 training.label_smoothing 0.005412094348823897 +312 76 model.embedding_dim 0.0 +312 76 optimizer.lr 0.031607850827759076 +312 76 training.batch_size 2.0 +312 76 training.label_smoothing 0.01915075070480334 +312 77 model.embedding_dim 2.0 +312 77 optimizer.lr 0.031358506808084156 +312 77 training.batch_size 1.0 +312 77 training.label_smoothing 0.0010800863771819364 +312 78 model.embedding_dim 2.0 +312 78 optimizer.lr 0.006017853438114134 +312 78 training.batch_size 0.0 +312 78 training.label_smoothing 0.09408489259338211 +312 79 model.embedding_dim 1.0 +312 79 optimizer.lr 0.04913639779842463 +312 79 training.batch_size 1.0 +312 79 training.label_smoothing 0.001320649520052065 +312 80 model.embedding_dim 1.0 +312 80 optimizer.lr 0.050344697143405476 +312 80 training.batch_size 2.0 +312 80 training.label_smoothing 0.015443847964796259 +312 81 model.embedding_dim 2.0 +312 81 optimizer.lr 0.014329222541182816 +312 81 training.batch_size 2.0 +312 81 training.label_smoothing 0.4443232420266851 +312 82 model.embedding_dim 2.0 +312 82 optimizer.lr 0.0018158149134556102 +312 82 training.batch_size 1.0 +312 82 training.label_smoothing 0.0014983142615405324 +312 83 model.embedding_dim 2.0 +312 83 optimizer.lr 0.010458953528244252 +312 83 training.batch_size 2.0 +312 83 training.label_smoothing 0.1937904446979046 +312 84 model.embedding_dim 0.0 +312 84 optimizer.lr 0.0036303776158204248 +312 84 training.batch_size 0.0 +312 84 training.label_smoothing 0.001268689980246509 +312 85 model.embedding_dim 2.0 +312 85 optimizer.lr 0.06556629552006615 +312 85 training.batch_size 0.0 +312 85 training.label_smoothing 0.07403813813669177 +312 86 model.embedding_dim 0.0 +312 86 optimizer.lr 0.004685685343856333 +312 86 training.batch_size 0.0 +312 86 training.label_smoothing 0.052760527625702734 +312 87 model.embedding_dim 2.0 +312 87 optimizer.lr 0.015585405135049833 +312 87 training.batch_size 2.0 +312 87 training.label_smoothing 0.01333044680474813 +312 88 model.embedding_dim 0.0 +312 88 optimizer.lr 0.007405017745155089 +312 88 training.batch_size 0.0 +312 88 training.label_smoothing 0.48782356308682817 +312 89 model.embedding_dim 2.0 +312 89 optimizer.lr 0.03267179761303645 +312 89 training.batch_size 1.0 +312 89 training.label_smoothing 0.1530857860996377 +312 90 model.embedding_dim 2.0 +312 90 optimizer.lr 0.0010265611119675238 +312 90 training.batch_size 1.0 +312 90 training.label_smoothing 0.008149098250935233 +312 91 model.embedding_dim 1.0 +312 91 optimizer.lr 0.002326572942221591 +312 91 training.batch_size 0.0 +312 91 training.label_smoothing 0.25516809966290693 +312 92 model.embedding_dim 1.0 +312 92 optimizer.lr 0.08120726848020082 +312 92 training.batch_size 1.0 +312 92 training.label_smoothing 0.03062854221004069 +312 93 model.embedding_dim 0.0 +312 93 optimizer.lr 0.004198420369180308 +312 93 training.batch_size 0.0 +312 93 training.label_smoothing 0.023558060034764166 +312 94 model.embedding_dim 0.0 +312 94 optimizer.lr 0.0019991025000226544 +312 94 training.batch_size 1.0 +312 94 training.label_smoothing 0.028115675364429958 +312 95 model.embedding_dim 1.0 +312 95 optimizer.lr 0.05526234476263272 +312 95 training.batch_size 0.0 +312 95 training.label_smoothing 0.07329367521847582 +312 96 model.embedding_dim 1.0 +312 96 optimizer.lr 0.008313339313849264 +312 96 training.batch_size 0.0 +312 96 training.label_smoothing 0.01182957653301885 +312 97 model.embedding_dim 2.0 +312 97 optimizer.lr 0.0022133677629724026 +312 97 training.batch_size 0.0 +312 97 training.label_smoothing 0.1113714131971634 +312 98 model.embedding_dim 2.0 +312 98 optimizer.lr 0.0059325827001103695 +312 98 training.batch_size 0.0 +312 98 training.label_smoothing 0.05377777333834199 +312 99 model.embedding_dim 1.0 +312 99 optimizer.lr 0.007845015865611425 +312 99 training.batch_size 1.0 +312 99 training.label_smoothing 0.06988133261207373 +312 100 model.embedding_dim 0.0 +312 100 optimizer.lr 0.020217997372304206 +312 100 training.batch_size 1.0 +312 100 training.label_smoothing 0.06902309308736693 +312 1 dataset """kinships""" +312 1 model """hole""" +312 1 loss """bceaftersigmoid""" +312 1 regularizer """no""" +312 1 optimizer """adam""" +312 1 training_loop """lcwa""" +312 1 evaluator """rankbased""" +312 2 dataset """kinships""" +312 2 model """hole""" +312 2 loss """bceaftersigmoid""" +312 2 regularizer """no""" +312 2 optimizer """adam""" +312 2 training_loop """lcwa""" +312 2 evaluator """rankbased""" +312 3 dataset """kinships""" +312 3 model """hole""" +312 3 loss """bceaftersigmoid""" +312 3 regularizer """no""" +312 3 optimizer """adam""" +312 3 training_loop """lcwa""" +312 3 evaluator """rankbased""" +312 4 dataset """kinships""" +312 4 model """hole""" +312 4 loss """bceaftersigmoid""" +312 4 regularizer """no""" +312 4 optimizer """adam""" +312 4 training_loop """lcwa""" +312 4 evaluator """rankbased""" +312 5 dataset """kinships""" +312 5 model """hole""" +312 5 loss """bceaftersigmoid""" +312 5 regularizer """no""" +312 5 optimizer """adam""" +312 5 training_loop """lcwa""" +312 5 evaluator """rankbased""" +312 6 dataset """kinships""" +312 6 model """hole""" +312 6 loss """bceaftersigmoid""" +312 6 regularizer """no""" +312 6 optimizer """adam""" +312 6 training_loop """lcwa""" +312 6 evaluator """rankbased""" +312 7 dataset """kinships""" +312 7 model """hole""" +312 7 loss """bceaftersigmoid""" +312 7 regularizer """no""" +312 7 optimizer """adam""" +312 7 training_loop """lcwa""" +312 7 evaluator """rankbased""" +312 8 dataset """kinships""" +312 8 model """hole""" +312 8 loss """bceaftersigmoid""" +312 8 regularizer """no""" +312 8 optimizer """adam""" +312 8 training_loop """lcwa""" +312 8 evaluator """rankbased""" +312 9 dataset """kinships""" +312 9 model """hole""" +312 9 loss """bceaftersigmoid""" +312 9 regularizer """no""" +312 9 optimizer """adam""" +312 9 training_loop """lcwa""" +312 9 evaluator """rankbased""" +312 10 dataset """kinships""" +312 10 model """hole""" +312 10 loss """bceaftersigmoid""" +312 10 regularizer """no""" +312 10 optimizer """adam""" +312 10 training_loop """lcwa""" +312 10 evaluator """rankbased""" +312 11 dataset """kinships""" +312 11 model """hole""" +312 11 loss """bceaftersigmoid""" +312 11 regularizer """no""" +312 11 optimizer """adam""" +312 11 training_loop """lcwa""" +312 11 evaluator """rankbased""" +312 12 dataset """kinships""" +312 12 model """hole""" +312 12 loss """bceaftersigmoid""" +312 12 regularizer """no""" +312 12 optimizer """adam""" +312 12 training_loop """lcwa""" +312 12 evaluator """rankbased""" +312 13 dataset """kinships""" +312 13 model """hole""" +312 13 loss """bceaftersigmoid""" +312 13 regularizer """no""" +312 13 optimizer """adam""" +312 13 training_loop """lcwa""" +312 13 evaluator """rankbased""" +312 14 dataset """kinships""" +312 14 model """hole""" +312 14 loss """bceaftersigmoid""" +312 14 regularizer """no""" +312 14 optimizer """adam""" +312 14 training_loop """lcwa""" +312 14 evaluator """rankbased""" +312 15 dataset """kinships""" +312 15 model """hole""" +312 15 loss """bceaftersigmoid""" +312 15 regularizer """no""" +312 15 optimizer """adam""" +312 15 training_loop """lcwa""" +312 15 evaluator """rankbased""" +312 16 dataset """kinships""" +312 16 model """hole""" +312 16 loss """bceaftersigmoid""" +312 16 regularizer """no""" +312 16 optimizer """adam""" +312 16 training_loop """lcwa""" +312 16 evaluator """rankbased""" +312 17 dataset """kinships""" +312 17 model """hole""" +312 17 loss """bceaftersigmoid""" +312 17 regularizer """no""" +312 17 optimizer """adam""" +312 17 training_loop """lcwa""" +312 17 evaluator """rankbased""" +312 18 dataset """kinships""" +312 18 model """hole""" +312 18 loss """bceaftersigmoid""" +312 18 regularizer """no""" +312 18 optimizer """adam""" +312 18 training_loop """lcwa""" +312 18 evaluator """rankbased""" +312 19 dataset """kinships""" +312 19 model """hole""" +312 19 loss """bceaftersigmoid""" +312 19 regularizer """no""" +312 19 optimizer """adam""" +312 19 training_loop """lcwa""" +312 19 evaluator """rankbased""" +312 20 dataset """kinships""" +312 20 model """hole""" +312 20 loss """bceaftersigmoid""" +312 20 regularizer """no""" +312 20 optimizer """adam""" +312 20 training_loop """lcwa""" +312 20 evaluator """rankbased""" +312 21 dataset """kinships""" +312 21 model """hole""" +312 21 loss """bceaftersigmoid""" +312 21 regularizer """no""" +312 21 optimizer """adam""" +312 21 training_loop """lcwa""" +312 21 evaluator """rankbased""" +312 22 dataset """kinships""" +312 22 model """hole""" +312 22 loss """bceaftersigmoid""" +312 22 regularizer """no""" +312 22 optimizer """adam""" +312 22 training_loop """lcwa""" +312 22 evaluator """rankbased""" +312 23 dataset """kinships""" +312 23 model """hole""" +312 23 loss """bceaftersigmoid""" +312 23 regularizer """no""" +312 23 optimizer """adam""" +312 23 training_loop """lcwa""" +312 23 evaluator """rankbased""" +312 24 dataset """kinships""" +312 24 model """hole""" +312 24 loss """bceaftersigmoid""" +312 24 regularizer """no""" +312 24 optimizer """adam""" +312 24 training_loop """lcwa""" +312 24 evaluator """rankbased""" +312 25 dataset """kinships""" +312 25 model """hole""" +312 25 loss """bceaftersigmoid""" +312 25 regularizer """no""" +312 25 optimizer """adam""" +312 25 training_loop """lcwa""" +312 25 evaluator """rankbased""" +312 26 dataset """kinships""" +312 26 model """hole""" +312 26 loss """bceaftersigmoid""" +312 26 regularizer """no""" +312 26 optimizer """adam""" +312 26 training_loop """lcwa""" +312 26 evaluator """rankbased""" +312 27 dataset """kinships""" +312 27 model """hole""" +312 27 loss """bceaftersigmoid""" +312 27 regularizer """no""" +312 27 optimizer """adam""" +312 27 training_loop """lcwa""" +312 27 evaluator """rankbased""" +312 28 dataset """kinships""" +312 28 model """hole""" +312 28 loss """bceaftersigmoid""" +312 28 regularizer """no""" +312 28 optimizer """adam""" +312 28 training_loop """lcwa""" +312 28 evaluator """rankbased""" +312 29 dataset """kinships""" +312 29 model """hole""" +312 29 loss """bceaftersigmoid""" +312 29 regularizer """no""" +312 29 optimizer """adam""" +312 29 training_loop """lcwa""" +312 29 evaluator """rankbased""" +312 30 dataset """kinships""" +312 30 model """hole""" +312 30 loss """bceaftersigmoid""" +312 30 regularizer """no""" +312 30 optimizer """adam""" +312 30 training_loop """lcwa""" +312 30 evaluator """rankbased""" +312 31 dataset """kinships""" +312 31 model """hole""" +312 31 loss """bceaftersigmoid""" +312 31 regularizer """no""" +312 31 optimizer """adam""" +312 31 training_loop """lcwa""" +312 31 evaluator """rankbased""" +312 32 dataset """kinships""" +312 32 model """hole""" +312 32 loss """bceaftersigmoid""" +312 32 regularizer """no""" +312 32 optimizer """adam""" +312 32 training_loop """lcwa""" +312 32 evaluator """rankbased""" +312 33 dataset """kinships""" +312 33 model """hole""" +312 33 loss """bceaftersigmoid""" +312 33 regularizer """no""" +312 33 optimizer """adam""" +312 33 training_loop """lcwa""" +312 33 evaluator """rankbased""" +312 34 dataset """kinships""" +312 34 model """hole""" +312 34 loss """bceaftersigmoid""" +312 34 regularizer """no""" +312 34 optimizer """adam""" +312 34 training_loop """lcwa""" +312 34 evaluator """rankbased""" +312 35 dataset """kinships""" +312 35 model """hole""" +312 35 loss """bceaftersigmoid""" +312 35 regularizer """no""" +312 35 optimizer """adam""" +312 35 training_loop """lcwa""" +312 35 evaluator """rankbased""" +312 36 dataset """kinships""" +312 36 model """hole""" +312 36 loss """bceaftersigmoid""" +312 36 regularizer """no""" +312 36 optimizer """adam""" +312 36 training_loop """lcwa""" +312 36 evaluator """rankbased""" +312 37 dataset """kinships""" +312 37 model """hole""" +312 37 loss """bceaftersigmoid""" +312 37 regularizer """no""" +312 37 optimizer """adam""" +312 37 training_loop """lcwa""" +312 37 evaluator """rankbased""" +312 38 dataset """kinships""" +312 38 model """hole""" +312 38 loss """bceaftersigmoid""" +312 38 regularizer """no""" +312 38 optimizer """adam""" +312 38 training_loop """lcwa""" +312 38 evaluator """rankbased""" +312 39 dataset """kinships""" +312 39 model """hole""" +312 39 loss """bceaftersigmoid""" +312 39 regularizer """no""" +312 39 optimizer """adam""" +312 39 training_loop """lcwa""" +312 39 evaluator """rankbased""" +312 40 dataset """kinships""" +312 40 model """hole""" +312 40 loss """bceaftersigmoid""" +312 40 regularizer """no""" +312 40 optimizer """adam""" +312 40 training_loop """lcwa""" +312 40 evaluator """rankbased""" +312 41 dataset """kinships""" +312 41 model """hole""" +312 41 loss """bceaftersigmoid""" +312 41 regularizer """no""" +312 41 optimizer """adam""" +312 41 training_loop """lcwa""" +312 41 evaluator """rankbased""" +312 42 dataset """kinships""" +312 42 model """hole""" +312 42 loss """bceaftersigmoid""" +312 42 regularizer """no""" +312 42 optimizer """adam""" +312 42 training_loop """lcwa""" +312 42 evaluator """rankbased""" +312 43 dataset """kinships""" +312 43 model """hole""" +312 43 loss """bceaftersigmoid""" +312 43 regularizer """no""" +312 43 optimizer """adam""" +312 43 training_loop """lcwa""" +312 43 evaluator """rankbased""" +312 44 dataset """kinships""" +312 44 model """hole""" +312 44 loss """bceaftersigmoid""" +312 44 regularizer """no""" +312 44 optimizer """adam""" +312 44 training_loop """lcwa""" +312 44 evaluator """rankbased""" +312 45 dataset """kinships""" +312 45 model """hole""" +312 45 loss """bceaftersigmoid""" +312 45 regularizer """no""" +312 45 optimizer """adam""" +312 45 training_loop """lcwa""" +312 45 evaluator """rankbased""" +312 46 dataset """kinships""" +312 46 model """hole""" +312 46 loss """bceaftersigmoid""" +312 46 regularizer """no""" +312 46 optimizer """adam""" +312 46 training_loop """lcwa""" +312 46 evaluator """rankbased""" +312 47 dataset """kinships""" +312 47 model """hole""" +312 47 loss """bceaftersigmoid""" +312 47 regularizer """no""" +312 47 optimizer """adam""" +312 47 training_loop """lcwa""" +312 47 evaluator """rankbased""" +312 48 dataset """kinships""" +312 48 model """hole""" +312 48 loss """bceaftersigmoid""" +312 48 regularizer """no""" +312 48 optimizer """adam""" +312 48 training_loop """lcwa""" +312 48 evaluator """rankbased""" +312 49 dataset """kinships""" +312 49 model """hole""" +312 49 loss """bceaftersigmoid""" +312 49 regularizer """no""" +312 49 optimizer """adam""" +312 49 training_loop """lcwa""" +312 49 evaluator """rankbased""" +312 50 dataset """kinships""" +312 50 model """hole""" +312 50 loss """bceaftersigmoid""" +312 50 regularizer """no""" +312 50 optimizer """adam""" +312 50 training_loop """lcwa""" +312 50 evaluator """rankbased""" +312 51 dataset """kinships""" +312 51 model """hole""" +312 51 loss """bceaftersigmoid""" +312 51 regularizer """no""" +312 51 optimizer """adam""" +312 51 training_loop """lcwa""" +312 51 evaluator """rankbased""" +312 52 dataset """kinships""" +312 52 model """hole""" +312 52 loss """bceaftersigmoid""" +312 52 regularizer """no""" +312 52 optimizer """adam""" +312 52 training_loop """lcwa""" +312 52 evaluator """rankbased""" +312 53 dataset """kinships""" +312 53 model """hole""" +312 53 loss """bceaftersigmoid""" +312 53 regularizer """no""" +312 53 optimizer """adam""" +312 53 training_loop """lcwa""" +312 53 evaluator """rankbased""" +312 54 dataset """kinships""" +312 54 model """hole""" +312 54 loss """bceaftersigmoid""" +312 54 regularizer """no""" +312 54 optimizer """adam""" +312 54 training_loop """lcwa""" +312 54 evaluator """rankbased""" +312 55 dataset """kinships""" +312 55 model """hole""" +312 55 loss """bceaftersigmoid""" +312 55 regularizer """no""" +312 55 optimizer """adam""" +312 55 training_loop """lcwa""" +312 55 evaluator """rankbased""" +312 56 dataset """kinships""" +312 56 model """hole""" +312 56 loss """bceaftersigmoid""" +312 56 regularizer """no""" +312 56 optimizer """adam""" +312 56 training_loop """lcwa""" +312 56 evaluator """rankbased""" +312 57 dataset """kinships""" +312 57 model """hole""" +312 57 loss """bceaftersigmoid""" +312 57 regularizer """no""" +312 57 optimizer """adam""" +312 57 training_loop """lcwa""" +312 57 evaluator """rankbased""" +312 58 dataset """kinships""" +312 58 model """hole""" +312 58 loss """bceaftersigmoid""" +312 58 regularizer """no""" +312 58 optimizer """adam""" +312 58 training_loop """lcwa""" +312 58 evaluator """rankbased""" +312 59 dataset """kinships""" +312 59 model """hole""" +312 59 loss """bceaftersigmoid""" +312 59 regularizer """no""" +312 59 optimizer """adam""" +312 59 training_loop """lcwa""" +312 59 evaluator """rankbased""" +312 60 dataset """kinships""" +312 60 model """hole""" +312 60 loss """bceaftersigmoid""" +312 60 regularizer """no""" +312 60 optimizer """adam""" +312 60 training_loop """lcwa""" +312 60 evaluator """rankbased""" +312 61 dataset """kinships""" +312 61 model """hole""" +312 61 loss """bceaftersigmoid""" +312 61 regularizer """no""" +312 61 optimizer """adam""" +312 61 training_loop """lcwa""" +312 61 evaluator """rankbased""" +312 62 dataset """kinships""" +312 62 model """hole""" +312 62 loss """bceaftersigmoid""" +312 62 regularizer """no""" +312 62 optimizer """adam""" +312 62 training_loop """lcwa""" +312 62 evaluator """rankbased""" +312 63 dataset """kinships""" +312 63 model """hole""" +312 63 loss """bceaftersigmoid""" +312 63 regularizer """no""" +312 63 optimizer """adam""" +312 63 training_loop """lcwa""" +312 63 evaluator """rankbased""" +312 64 dataset """kinships""" +312 64 model """hole""" +312 64 loss """bceaftersigmoid""" +312 64 regularizer """no""" +312 64 optimizer """adam""" +312 64 training_loop """lcwa""" +312 64 evaluator """rankbased""" +312 65 dataset """kinships""" +312 65 model """hole""" +312 65 loss """bceaftersigmoid""" +312 65 regularizer """no""" +312 65 optimizer """adam""" +312 65 training_loop """lcwa""" +312 65 evaluator """rankbased""" +312 66 dataset """kinships""" +312 66 model """hole""" +312 66 loss """bceaftersigmoid""" +312 66 regularizer """no""" +312 66 optimizer """adam""" +312 66 training_loop """lcwa""" +312 66 evaluator """rankbased""" +312 67 dataset """kinships""" +312 67 model """hole""" +312 67 loss """bceaftersigmoid""" +312 67 regularizer """no""" +312 67 optimizer """adam""" +312 67 training_loop """lcwa""" +312 67 evaluator """rankbased""" +312 68 dataset """kinships""" +312 68 model """hole""" +312 68 loss """bceaftersigmoid""" +312 68 regularizer """no""" +312 68 optimizer """adam""" +312 68 training_loop """lcwa""" +312 68 evaluator """rankbased""" +312 69 dataset """kinships""" +312 69 model """hole""" +312 69 loss """bceaftersigmoid""" +312 69 regularizer """no""" +312 69 optimizer """adam""" +312 69 training_loop """lcwa""" +312 69 evaluator """rankbased""" +312 70 dataset """kinships""" +312 70 model """hole""" +312 70 loss """bceaftersigmoid""" +312 70 regularizer """no""" +312 70 optimizer """adam""" +312 70 training_loop """lcwa""" +312 70 evaluator """rankbased""" +312 71 dataset """kinships""" +312 71 model """hole""" +312 71 loss """bceaftersigmoid""" +312 71 regularizer """no""" +312 71 optimizer """adam""" +312 71 training_loop """lcwa""" +312 71 evaluator """rankbased""" +312 72 dataset """kinships""" +312 72 model """hole""" +312 72 loss """bceaftersigmoid""" +312 72 regularizer """no""" +312 72 optimizer """adam""" +312 72 training_loop """lcwa""" +312 72 evaluator """rankbased""" +312 73 dataset """kinships""" +312 73 model """hole""" +312 73 loss """bceaftersigmoid""" +312 73 regularizer """no""" +312 73 optimizer """adam""" +312 73 training_loop """lcwa""" +312 73 evaluator """rankbased""" +312 74 dataset """kinships""" +312 74 model """hole""" +312 74 loss """bceaftersigmoid""" +312 74 regularizer """no""" +312 74 optimizer """adam""" +312 74 training_loop """lcwa""" +312 74 evaluator """rankbased""" +312 75 dataset """kinships""" +312 75 model """hole""" +312 75 loss """bceaftersigmoid""" +312 75 regularizer """no""" +312 75 optimizer """adam""" +312 75 training_loop """lcwa""" +312 75 evaluator """rankbased""" +312 76 dataset """kinships""" +312 76 model """hole""" +312 76 loss """bceaftersigmoid""" +312 76 regularizer """no""" +312 76 optimizer """adam""" +312 76 training_loop """lcwa""" +312 76 evaluator """rankbased""" +312 77 dataset """kinships""" +312 77 model """hole""" +312 77 loss """bceaftersigmoid""" +312 77 regularizer """no""" +312 77 optimizer """adam""" +312 77 training_loop """lcwa""" +312 77 evaluator """rankbased""" +312 78 dataset """kinships""" +312 78 model """hole""" +312 78 loss """bceaftersigmoid""" +312 78 regularizer """no""" +312 78 optimizer """adam""" +312 78 training_loop """lcwa""" +312 78 evaluator """rankbased""" +312 79 dataset """kinships""" +312 79 model """hole""" +312 79 loss """bceaftersigmoid""" +312 79 regularizer """no""" +312 79 optimizer """adam""" +312 79 training_loop """lcwa""" +312 79 evaluator """rankbased""" +312 80 dataset """kinships""" +312 80 model """hole""" +312 80 loss """bceaftersigmoid""" +312 80 regularizer """no""" +312 80 optimizer """adam""" +312 80 training_loop """lcwa""" +312 80 evaluator """rankbased""" +312 81 dataset """kinships""" +312 81 model """hole""" +312 81 loss """bceaftersigmoid""" +312 81 regularizer """no""" +312 81 optimizer """adam""" +312 81 training_loop """lcwa""" +312 81 evaluator """rankbased""" +312 82 dataset """kinships""" +312 82 model """hole""" +312 82 loss """bceaftersigmoid""" +312 82 regularizer """no""" +312 82 optimizer """adam""" +312 82 training_loop """lcwa""" +312 82 evaluator """rankbased""" +312 83 dataset """kinships""" +312 83 model """hole""" +312 83 loss """bceaftersigmoid""" +312 83 regularizer """no""" +312 83 optimizer """adam""" +312 83 training_loop """lcwa""" +312 83 evaluator """rankbased""" +312 84 dataset """kinships""" +312 84 model """hole""" +312 84 loss """bceaftersigmoid""" +312 84 regularizer """no""" +312 84 optimizer """adam""" +312 84 training_loop """lcwa""" +312 84 evaluator """rankbased""" +312 85 dataset """kinships""" +312 85 model """hole""" +312 85 loss """bceaftersigmoid""" +312 85 regularizer """no""" +312 85 optimizer """adam""" +312 85 training_loop """lcwa""" +312 85 evaluator """rankbased""" +312 86 dataset """kinships""" +312 86 model """hole""" +312 86 loss """bceaftersigmoid""" +312 86 regularizer """no""" +312 86 optimizer """adam""" +312 86 training_loop """lcwa""" +312 86 evaluator """rankbased""" +312 87 dataset """kinships""" +312 87 model """hole""" +312 87 loss """bceaftersigmoid""" +312 87 regularizer """no""" +312 87 optimizer """adam""" +312 87 training_loop """lcwa""" +312 87 evaluator """rankbased""" +312 88 dataset """kinships""" +312 88 model """hole""" +312 88 loss """bceaftersigmoid""" +312 88 regularizer """no""" +312 88 optimizer """adam""" +312 88 training_loop """lcwa""" +312 88 evaluator """rankbased""" +312 89 dataset """kinships""" +312 89 model """hole""" +312 89 loss """bceaftersigmoid""" +312 89 regularizer """no""" +312 89 optimizer """adam""" +312 89 training_loop """lcwa""" +312 89 evaluator """rankbased""" +312 90 dataset """kinships""" +312 90 model """hole""" +312 90 loss """bceaftersigmoid""" +312 90 regularizer """no""" +312 90 optimizer """adam""" +312 90 training_loop """lcwa""" +312 90 evaluator """rankbased""" +312 91 dataset """kinships""" +312 91 model """hole""" +312 91 loss """bceaftersigmoid""" +312 91 regularizer """no""" +312 91 optimizer """adam""" +312 91 training_loop """lcwa""" +312 91 evaluator """rankbased""" +312 92 dataset """kinships""" +312 92 model """hole""" +312 92 loss """bceaftersigmoid""" +312 92 regularizer """no""" +312 92 optimizer """adam""" +312 92 training_loop """lcwa""" +312 92 evaluator """rankbased""" +312 93 dataset """kinships""" +312 93 model """hole""" +312 93 loss """bceaftersigmoid""" +312 93 regularizer """no""" +312 93 optimizer """adam""" +312 93 training_loop """lcwa""" +312 93 evaluator """rankbased""" +312 94 dataset """kinships""" +312 94 model """hole""" +312 94 loss """bceaftersigmoid""" +312 94 regularizer """no""" +312 94 optimizer """adam""" +312 94 training_loop """lcwa""" +312 94 evaluator """rankbased""" +312 95 dataset """kinships""" +312 95 model """hole""" +312 95 loss """bceaftersigmoid""" +312 95 regularizer """no""" +312 95 optimizer """adam""" +312 95 training_loop """lcwa""" +312 95 evaluator """rankbased""" +312 96 dataset """kinships""" +312 96 model """hole""" +312 96 loss """bceaftersigmoid""" +312 96 regularizer """no""" +312 96 optimizer """adam""" +312 96 training_loop """lcwa""" +312 96 evaluator """rankbased""" +312 97 dataset """kinships""" +312 97 model """hole""" +312 97 loss """bceaftersigmoid""" +312 97 regularizer """no""" +312 97 optimizer """adam""" +312 97 training_loop """lcwa""" +312 97 evaluator """rankbased""" +312 98 dataset """kinships""" +312 98 model """hole""" +312 98 loss """bceaftersigmoid""" +312 98 regularizer """no""" +312 98 optimizer """adam""" +312 98 training_loop """lcwa""" +312 98 evaluator """rankbased""" +312 99 dataset """kinships""" +312 99 model """hole""" +312 99 loss """bceaftersigmoid""" +312 99 regularizer """no""" +312 99 optimizer """adam""" +312 99 training_loop """lcwa""" +312 99 evaluator """rankbased""" +312 100 dataset """kinships""" +312 100 model """hole""" +312 100 loss """bceaftersigmoid""" +312 100 regularizer """no""" +312 100 optimizer """adam""" +312 100 training_loop """lcwa""" +312 100 evaluator """rankbased""" +313 1 model.embedding_dim 0.0 +313 1 optimizer.lr 0.07025048578745188 +313 1 training.batch_size 1.0 +313 1 training.label_smoothing 0.14797647187463234 +313 2 model.embedding_dim 2.0 +313 2 optimizer.lr 0.03279059107587705 +313 2 training.batch_size 2.0 +313 2 training.label_smoothing 0.28452170744224387 +313 3 model.embedding_dim 0.0 +313 3 optimizer.lr 0.016384004870179804 +313 3 training.batch_size 2.0 +313 3 training.label_smoothing 0.44832159967411833 +313 4 model.embedding_dim 2.0 +313 4 optimizer.lr 0.0016173816940543361 +313 4 training.batch_size 2.0 +313 4 training.label_smoothing 0.002327259472403412 +313 5 model.embedding_dim 0.0 +313 5 optimizer.lr 0.03325354757653519 +313 5 training.batch_size 2.0 +313 5 training.label_smoothing 0.0023472579451849604 +313 6 model.embedding_dim 1.0 +313 6 optimizer.lr 0.013142480923536318 +313 6 training.batch_size 0.0 +313 6 training.label_smoothing 0.2267677078786583 +313 7 model.embedding_dim 2.0 +313 7 optimizer.lr 0.001029165907165725 +313 7 training.batch_size 1.0 +313 7 training.label_smoothing 0.8240524499263782 +313 8 model.embedding_dim 2.0 +313 8 optimizer.lr 0.004756585518701827 +313 8 training.batch_size 0.0 +313 8 training.label_smoothing 0.047392230863277394 +313 9 model.embedding_dim 0.0 +313 9 optimizer.lr 0.0048707703171316065 +313 9 training.batch_size 0.0 +313 9 training.label_smoothing 0.001910284431298234 +313 10 model.embedding_dim 0.0 +313 10 optimizer.lr 0.02628512680384192 +313 10 training.batch_size 1.0 +313 10 training.label_smoothing 0.04040083171734753 +313 11 model.embedding_dim 0.0 +313 11 optimizer.lr 0.049023446541530444 +313 11 training.batch_size 0.0 +313 11 training.label_smoothing 0.0860338922711487 +313 12 model.embedding_dim 2.0 +313 12 optimizer.lr 0.0025529205278563235 +313 12 training.batch_size 2.0 +313 12 training.label_smoothing 0.11330217003115657 +313 13 model.embedding_dim 0.0 +313 13 optimizer.lr 0.002429035010128821 +313 13 training.batch_size 2.0 +313 13 training.label_smoothing 0.0032085483094229494 +313 14 model.embedding_dim 0.0 +313 14 optimizer.lr 0.0010169652175424737 +313 14 training.batch_size 2.0 +313 14 training.label_smoothing 0.11178110009219747 +313 15 model.embedding_dim 0.0 +313 15 optimizer.lr 0.07401632399166726 +313 15 training.batch_size 2.0 +313 15 training.label_smoothing 0.30459600312077756 +313 16 model.embedding_dim 1.0 +313 16 optimizer.lr 0.020770800190858727 +313 16 training.batch_size 1.0 +313 16 training.label_smoothing 0.22287035850844733 +313 17 model.embedding_dim 1.0 +313 17 optimizer.lr 0.0016218968815817144 +313 17 training.batch_size 2.0 +313 17 training.label_smoothing 0.028703249772766254 +313 18 model.embedding_dim 2.0 +313 18 optimizer.lr 0.028560038417576362 +313 18 training.batch_size 1.0 +313 18 training.label_smoothing 0.009755896410767547 +313 19 model.embedding_dim 0.0 +313 19 optimizer.lr 0.0029697947397290527 +313 19 training.batch_size 0.0 +313 19 training.label_smoothing 0.25884740786459765 +313 20 model.embedding_dim 2.0 +313 20 optimizer.lr 0.03161452207990449 +313 20 training.batch_size 2.0 +313 20 training.label_smoothing 0.07411425227409792 +313 21 model.embedding_dim 2.0 +313 21 optimizer.lr 0.09023441378405264 +313 21 training.batch_size 1.0 +313 21 training.label_smoothing 0.003539267564226958 +313 22 model.embedding_dim 2.0 +313 22 optimizer.lr 0.09255843917851303 +313 22 training.batch_size 1.0 +313 22 training.label_smoothing 0.20726066409118946 +313 23 model.embedding_dim 0.0 +313 23 optimizer.lr 0.02608974628304355 +313 23 training.batch_size 0.0 +313 23 training.label_smoothing 0.002228087812178576 +313 24 model.embedding_dim 0.0 +313 24 optimizer.lr 0.0047574637467883985 +313 24 training.batch_size 1.0 +313 24 training.label_smoothing 0.4846332239867213 +313 25 model.embedding_dim 0.0 +313 25 optimizer.lr 0.017906655661645298 +313 25 training.batch_size 0.0 +313 25 training.label_smoothing 0.006234845285315236 +313 26 model.embedding_dim 0.0 +313 26 optimizer.lr 0.04844572057012925 +313 26 training.batch_size 0.0 +313 26 training.label_smoothing 0.03372211854458082 +313 27 model.embedding_dim 0.0 +313 27 optimizer.lr 0.003029584679326762 +313 27 training.batch_size 1.0 +313 27 training.label_smoothing 0.007692346054393209 +313 28 model.embedding_dim 1.0 +313 28 optimizer.lr 0.0025863193097839226 +313 28 training.batch_size 2.0 +313 28 training.label_smoothing 0.048643010708271296 +313 29 model.embedding_dim 2.0 +313 29 optimizer.lr 0.005506928070796899 +313 29 training.batch_size 1.0 +313 29 training.label_smoothing 0.011699686501092195 +313 30 model.embedding_dim 1.0 +313 30 optimizer.lr 0.0013464671521837678 +313 30 training.batch_size 2.0 +313 30 training.label_smoothing 0.006706967980362257 +313 31 model.embedding_dim 0.0 +313 31 optimizer.lr 0.006350705894142026 +313 31 training.batch_size 0.0 +313 31 training.label_smoothing 0.5964467694656774 +313 32 model.embedding_dim 0.0 +313 32 optimizer.lr 0.005087859472477082 +313 32 training.batch_size 0.0 +313 32 training.label_smoothing 0.1943893358056927 +313 33 model.embedding_dim 2.0 +313 33 optimizer.lr 0.004720639488412024 +313 33 training.batch_size 2.0 +313 33 training.label_smoothing 0.6966765797630707 +313 34 model.embedding_dim 2.0 +313 34 optimizer.lr 0.011403396491245001 +313 34 training.batch_size 2.0 +313 34 training.label_smoothing 0.0065101090774585 +313 35 model.embedding_dim 1.0 +313 35 optimizer.lr 0.0024893473342743554 +313 35 training.batch_size 2.0 +313 35 training.label_smoothing 0.00437986042086617 +313 36 model.embedding_dim 1.0 +313 36 optimizer.lr 0.0039053916088396218 +313 36 training.batch_size 2.0 +313 36 training.label_smoothing 0.06717258776102232 +313 37 model.embedding_dim 1.0 +313 37 optimizer.lr 0.003817728319207629 +313 37 training.batch_size 0.0 +313 37 training.label_smoothing 0.015324394063637647 +313 38 model.embedding_dim 2.0 +313 38 optimizer.lr 0.03407116901783397 +313 38 training.batch_size 0.0 +313 38 training.label_smoothing 0.001820414578583394 +313 39 model.embedding_dim 1.0 +313 39 optimizer.lr 0.002408993344816103 +313 39 training.batch_size 0.0 +313 39 training.label_smoothing 0.02609514735876722 +313 40 model.embedding_dim 1.0 +313 40 optimizer.lr 0.006286980145680831 +313 40 training.batch_size 0.0 +313 40 training.label_smoothing 0.08348405109184341 +313 41 model.embedding_dim 1.0 +313 41 optimizer.lr 0.018850757058678065 +313 41 training.batch_size 2.0 +313 41 training.label_smoothing 0.014641453439098288 +313 42 model.embedding_dim 2.0 +313 42 optimizer.lr 0.01816373424924256 +313 42 training.batch_size 2.0 +313 42 training.label_smoothing 0.006358843286573186 +313 43 model.embedding_dim 1.0 +313 43 optimizer.lr 0.008864847548037097 +313 43 training.batch_size 0.0 +313 43 training.label_smoothing 0.001257482510218097 +313 44 model.embedding_dim 2.0 +313 44 optimizer.lr 0.022733593015019222 +313 44 training.batch_size 0.0 +313 44 training.label_smoothing 0.6629393847891484 +313 45 model.embedding_dim 2.0 +313 45 optimizer.lr 0.030090556344635894 +313 45 training.batch_size 2.0 +313 45 training.label_smoothing 0.014068829737653759 +313 46 model.embedding_dim 1.0 +313 46 optimizer.lr 0.0019751273022232643 +313 46 training.batch_size 1.0 +313 46 training.label_smoothing 0.8193145796931528 +313 47 model.embedding_dim 1.0 +313 47 optimizer.lr 0.0046123709307150235 +313 47 training.batch_size 1.0 +313 47 training.label_smoothing 0.4407470709870444 +313 48 model.embedding_dim 0.0 +313 48 optimizer.lr 0.0016609340836480225 +313 48 training.batch_size 0.0 +313 48 training.label_smoothing 0.3866356374878988 +313 49 model.embedding_dim 1.0 +313 49 optimizer.lr 0.02169156727414057 +313 49 training.batch_size 1.0 +313 49 training.label_smoothing 0.08836484746212668 +313 50 model.embedding_dim 1.0 +313 50 optimizer.lr 0.010509517126135829 +313 50 training.batch_size 2.0 +313 50 training.label_smoothing 0.9308151157078215 +313 51 model.embedding_dim 2.0 +313 51 optimizer.lr 0.0018861613350493567 +313 51 training.batch_size 2.0 +313 51 training.label_smoothing 0.245513819495589 +313 52 model.embedding_dim 1.0 +313 52 optimizer.lr 0.04733699106160004 +313 52 training.batch_size 2.0 +313 52 training.label_smoothing 0.4904829776524325 +313 53 model.embedding_dim 2.0 +313 53 optimizer.lr 0.022343117821424514 +313 53 training.batch_size 1.0 +313 53 training.label_smoothing 0.0022594927210326373 +313 54 model.embedding_dim 2.0 +313 54 optimizer.lr 0.030425816631505634 +313 54 training.batch_size 2.0 +313 54 training.label_smoothing 0.0012385053091790422 +313 55 model.embedding_dim 2.0 +313 55 optimizer.lr 0.0027640935998416946 +313 55 training.batch_size 1.0 +313 55 training.label_smoothing 0.009339877888502504 +313 56 model.embedding_dim 1.0 +313 56 optimizer.lr 0.010451626578260055 +313 56 training.batch_size 0.0 +313 56 training.label_smoothing 0.10350389532558804 +313 57 model.embedding_dim 1.0 +313 57 optimizer.lr 0.004634333031980898 +313 57 training.batch_size 2.0 +313 57 training.label_smoothing 0.14345110872053837 +313 58 model.embedding_dim 1.0 +313 58 optimizer.lr 0.0902209164903237 +313 58 training.batch_size 0.0 +313 58 training.label_smoothing 0.006460798400069739 +313 59 model.embedding_dim 0.0 +313 59 optimizer.lr 0.009128501368737593 +313 59 training.batch_size 1.0 +313 59 training.label_smoothing 0.006030030703908313 +313 60 model.embedding_dim 1.0 +313 60 optimizer.lr 0.01878036992459839 +313 60 training.batch_size 0.0 +313 60 training.label_smoothing 0.0010010057812386483 +313 61 model.embedding_dim 0.0 +313 61 optimizer.lr 0.0014989385716464155 +313 61 training.batch_size 1.0 +313 61 training.label_smoothing 0.49152249549094784 +313 62 model.embedding_dim 1.0 +313 62 optimizer.lr 0.015664922320310037 +313 62 training.batch_size 2.0 +313 62 training.label_smoothing 0.0017167233715507022 +313 63 model.embedding_dim 2.0 +313 63 optimizer.lr 0.005702201101944227 +313 63 training.batch_size 0.0 +313 63 training.label_smoothing 0.05838133176298381 +313 64 model.embedding_dim 0.0 +313 64 optimizer.lr 0.001113261506491676 +313 64 training.batch_size 1.0 +313 64 training.label_smoothing 0.023893741717562755 +313 65 model.embedding_dim 0.0 +313 65 optimizer.lr 0.09075047710301815 +313 65 training.batch_size 0.0 +313 65 training.label_smoothing 0.025004120502668918 +313 66 model.embedding_dim 0.0 +313 66 optimizer.lr 0.009145675558647409 +313 66 training.batch_size 2.0 +313 66 training.label_smoothing 0.008386567029207077 +313 67 model.embedding_dim 0.0 +313 67 optimizer.lr 0.004620988541158377 +313 67 training.batch_size 0.0 +313 67 training.label_smoothing 0.0190788298346912 +313 68 model.embedding_dim 1.0 +313 68 optimizer.lr 0.037124081405910676 +313 68 training.batch_size 1.0 +313 68 training.label_smoothing 0.07963854159348742 +313 69 model.embedding_dim 0.0 +313 69 optimizer.lr 0.00831548773196033 +313 69 training.batch_size 0.0 +313 69 training.label_smoothing 0.020229968547789093 +313 70 model.embedding_dim 0.0 +313 70 optimizer.lr 0.001929497182965959 +313 70 training.batch_size 1.0 +313 70 training.label_smoothing 0.0298918469957395 +313 71 model.embedding_dim 2.0 +313 71 optimizer.lr 0.0017158650058703225 +313 71 training.batch_size 1.0 +313 71 training.label_smoothing 0.8996797752667087 +313 72 model.embedding_dim 0.0 +313 72 optimizer.lr 0.019564683191367154 +313 72 training.batch_size 1.0 +313 72 training.label_smoothing 0.3971460863092994 +313 73 model.embedding_dim 1.0 +313 73 optimizer.lr 0.019157332773948692 +313 73 training.batch_size 2.0 +313 73 training.label_smoothing 0.0014150121716772544 +313 74 model.embedding_dim 2.0 +313 74 optimizer.lr 0.011299731960948007 +313 74 training.batch_size 2.0 +313 74 training.label_smoothing 0.012348768127056889 +313 75 model.embedding_dim 0.0 +313 75 optimizer.lr 0.029129949720101675 +313 75 training.batch_size 0.0 +313 75 training.label_smoothing 0.6330701743173479 +313 76 model.embedding_dim 2.0 +313 76 optimizer.lr 0.09339395745893188 +313 76 training.batch_size 1.0 +313 76 training.label_smoothing 0.031887396709857865 +313 77 model.embedding_dim 1.0 +313 77 optimizer.lr 0.003529946952249684 +313 77 training.batch_size 1.0 +313 77 training.label_smoothing 0.0016441049227718827 +313 78 model.embedding_dim 0.0 +313 78 optimizer.lr 0.001528521159605343 +313 78 training.batch_size 1.0 +313 78 training.label_smoothing 0.004630667278416684 +313 79 model.embedding_dim 2.0 +313 79 optimizer.lr 0.02110486432123778 +313 79 training.batch_size 0.0 +313 79 training.label_smoothing 0.00446802010652663 +313 80 model.embedding_dim 0.0 +313 80 optimizer.lr 0.022286257509678577 +313 80 training.batch_size 0.0 +313 80 training.label_smoothing 0.009583242974276384 +313 81 model.embedding_dim 1.0 +313 81 optimizer.lr 0.058426047748751014 +313 81 training.batch_size 1.0 +313 81 training.label_smoothing 0.18883488331191955 +313 82 model.embedding_dim 2.0 +313 82 optimizer.lr 0.0012848129508050284 +313 82 training.batch_size 0.0 +313 82 training.label_smoothing 0.00725343850858967 +313 83 model.embedding_dim 0.0 +313 83 optimizer.lr 0.0022334365167132855 +313 83 training.batch_size 0.0 +313 83 training.label_smoothing 0.45466659551817795 +313 84 model.embedding_dim 0.0 +313 84 optimizer.lr 0.02823030459977941 +313 84 training.batch_size 1.0 +313 84 training.label_smoothing 0.01872052122234882 +313 85 model.embedding_dim 2.0 +313 85 optimizer.lr 0.003621845332084091 +313 85 training.batch_size 1.0 +313 85 training.label_smoothing 0.2797023638383016 +313 86 model.embedding_dim 0.0 +313 86 optimizer.lr 0.0026038074273800997 +313 86 training.batch_size 0.0 +313 86 training.label_smoothing 0.04033630692458984 +313 87 model.embedding_dim 2.0 +313 87 optimizer.lr 0.005957582837705573 +313 87 training.batch_size 2.0 +313 87 training.label_smoothing 0.005327527924531808 +313 88 model.embedding_dim 2.0 +313 88 optimizer.lr 0.0013617246049651482 +313 88 training.batch_size 2.0 +313 88 training.label_smoothing 0.01974226645658765 +313 89 model.embedding_dim 2.0 +313 89 optimizer.lr 0.009130622002118113 +313 89 training.batch_size 2.0 +313 89 training.label_smoothing 0.002488293132874814 +313 90 model.embedding_dim 2.0 +313 90 optimizer.lr 0.08965016696946211 +313 90 training.batch_size 2.0 +313 90 training.label_smoothing 0.06758641822871457 +313 91 model.embedding_dim 2.0 +313 91 optimizer.lr 0.0190754386948827 +313 91 training.batch_size 2.0 +313 91 training.label_smoothing 0.005740684313436607 +313 92 model.embedding_dim 1.0 +313 92 optimizer.lr 0.0031577494862536043 +313 92 training.batch_size 1.0 +313 92 training.label_smoothing 0.27602186657957684 +313 93 model.embedding_dim 1.0 +313 93 optimizer.lr 0.002486502710496688 +313 93 training.batch_size 2.0 +313 93 training.label_smoothing 0.44874977616877565 +313 94 model.embedding_dim 2.0 +313 94 optimizer.lr 0.03780441186784437 +313 94 training.batch_size 1.0 +313 94 training.label_smoothing 0.32542458823198855 +313 95 model.embedding_dim 1.0 +313 95 optimizer.lr 0.0010025557270035295 +313 95 training.batch_size 2.0 +313 95 training.label_smoothing 0.003533598263818038 +313 96 model.embedding_dim 0.0 +313 96 optimizer.lr 0.011086837781868473 +313 96 training.batch_size 1.0 +313 96 training.label_smoothing 0.9022615570063378 +313 97 model.embedding_dim 2.0 +313 97 optimizer.lr 0.009843512622070753 +313 97 training.batch_size 2.0 +313 97 training.label_smoothing 0.05664676268420373 +313 98 model.embedding_dim 2.0 +313 98 optimizer.lr 0.04459450193389048 +313 98 training.batch_size 0.0 +313 98 training.label_smoothing 0.0010519428998509088 +313 99 model.embedding_dim 0.0 +313 99 optimizer.lr 0.02302307106397455 +313 99 training.batch_size 0.0 +313 99 training.label_smoothing 0.0014334507160013658 +313 100 model.embedding_dim 1.0 +313 100 optimizer.lr 0.04914840476777574 +313 100 training.batch_size 0.0 +313 100 training.label_smoothing 0.0010854843676879344 +313 1 dataset """kinships""" +313 1 model """hole""" +313 1 loss """softplus""" +313 1 regularizer """no""" +313 1 optimizer """adam""" +313 1 training_loop """lcwa""" +313 1 evaluator """rankbased""" +313 2 dataset """kinships""" +313 2 model """hole""" +313 2 loss """softplus""" +313 2 regularizer """no""" +313 2 optimizer """adam""" +313 2 training_loop """lcwa""" +313 2 evaluator """rankbased""" +313 3 dataset """kinships""" +313 3 model """hole""" +313 3 loss """softplus""" +313 3 regularizer """no""" +313 3 optimizer """adam""" +313 3 training_loop """lcwa""" +313 3 evaluator """rankbased""" +313 4 dataset """kinships""" +313 4 model """hole""" +313 4 loss """softplus""" +313 4 regularizer """no""" +313 4 optimizer """adam""" +313 4 training_loop """lcwa""" +313 4 evaluator """rankbased""" +313 5 dataset """kinships""" +313 5 model """hole""" +313 5 loss """softplus""" +313 5 regularizer """no""" +313 5 optimizer """adam""" +313 5 training_loop """lcwa""" +313 5 evaluator """rankbased""" +313 6 dataset """kinships""" +313 6 model """hole""" +313 6 loss """softplus""" +313 6 regularizer """no""" +313 6 optimizer """adam""" +313 6 training_loop """lcwa""" +313 6 evaluator """rankbased""" +313 7 dataset """kinships""" +313 7 model """hole""" +313 7 loss """softplus""" +313 7 regularizer """no""" +313 7 optimizer """adam""" +313 7 training_loop """lcwa""" +313 7 evaluator """rankbased""" +313 8 dataset """kinships""" +313 8 model """hole""" +313 8 loss """softplus""" +313 8 regularizer """no""" +313 8 optimizer """adam""" +313 8 training_loop """lcwa""" +313 8 evaluator """rankbased""" +313 9 dataset """kinships""" +313 9 model """hole""" +313 9 loss """softplus""" +313 9 regularizer """no""" +313 9 optimizer """adam""" +313 9 training_loop """lcwa""" +313 9 evaluator """rankbased""" +313 10 dataset """kinships""" +313 10 model """hole""" +313 10 loss """softplus""" +313 10 regularizer """no""" +313 10 optimizer """adam""" +313 10 training_loop """lcwa""" +313 10 evaluator """rankbased""" +313 11 dataset """kinships""" +313 11 model """hole""" +313 11 loss """softplus""" +313 11 regularizer """no""" +313 11 optimizer """adam""" +313 11 training_loop """lcwa""" +313 11 evaluator """rankbased""" +313 12 dataset """kinships""" +313 12 model """hole""" +313 12 loss """softplus""" +313 12 regularizer """no""" +313 12 optimizer """adam""" +313 12 training_loop """lcwa""" +313 12 evaluator """rankbased""" +313 13 dataset """kinships""" +313 13 model """hole""" +313 13 loss """softplus""" +313 13 regularizer """no""" +313 13 optimizer """adam""" +313 13 training_loop """lcwa""" +313 13 evaluator """rankbased""" +313 14 dataset """kinships""" +313 14 model """hole""" +313 14 loss """softplus""" +313 14 regularizer """no""" +313 14 optimizer """adam""" +313 14 training_loop """lcwa""" +313 14 evaluator """rankbased""" +313 15 dataset """kinships""" +313 15 model """hole""" +313 15 loss """softplus""" +313 15 regularizer """no""" +313 15 optimizer """adam""" +313 15 training_loop """lcwa""" +313 15 evaluator """rankbased""" +313 16 dataset """kinships""" +313 16 model """hole""" +313 16 loss """softplus""" +313 16 regularizer """no""" +313 16 optimizer """adam""" +313 16 training_loop """lcwa""" +313 16 evaluator """rankbased""" +313 17 dataset """kinships""" +313 17 model """hole""" +313 17 loss """softplus""" +313 17 regularizer """no""" +313 17 optimizer """adam""" +313 17 training_loop """lcwa""" +313 17 evaluator """rankbased""" +313 18 dataset """kinships""" +313 18 model """hole""" +313 18 loss """softplus""" +313 18 regularizer """no""" +313 18 optimizer """adam""" +313 18 training_loop """lcwa""" +313 18 evaluator """rankbased""" +313 19 dataset """kinships""" +313 19 model """hole""" +313 19 loss """softplus""" +313 19 regularizer """no""" +313 19 optimizer """adam""" +313 19 training_loop """lcwa""" +313 19 evaluator """rankbased""" +313 20 dataset """kinships""" +313 20 model """hole""" +313 20 loss """softplus""" +313 20 regularizer """no""" +313 20 optimizer """adam""" +313 20 training_loop """lcwa""" +313 20 evaluator """rankbased""" +313 21 dataset """kinships""" +313 21 model """hole""" +313 21 loss """softplus""" +313 21 regularizer """no""" +313 21 optimizer """adam""" +313 21 training_loop """lcwa""" +313 21 evaluator """rankbased""" +313 22 dataset """kinships""" +313 22 model """hole""" +313 22 loss """softplus""" +313 22 regularizer """no""" +313 22 optimizer """adam""" +313 22 training_loop """lcwa""" +313 22 evaluator """rankbased""" +313 23 dataset """kinships""" +313 23 model """hole""" +313 23 loss """softplus""" +313 23 regularizer """no""" +313 23 optimizer """adam""" +313 23 training_loop """lcwa""" +313 23 evaluator """rankbased""" +313 24 dataset """kinships""" +313 24 model """hole""" +313 24 loss """softplus""" +313 24 regularizer """no""" +313 24 optimizer """adam""" +313 24 training_loop """lcwa""" +313 24 evaluator """rankbased""" +313 25 dataset """kinships""" +313 25 model """hole""" +313 25 loss """softplus""" +313 25 regularizer """no""" +313 25 optimizer """adam""" +313 25 training_loop """lcwa""" +313 25 evaluator """rankbased""" +313 26 dataset """kinships""" +313 26 model """hole""" +313 26 loss """softplus""" +313 26 regularizer """no""" +313 26 optimizer """adam""" +313 26 training_loop """lcwa""" +313 26 evaluator """rankbased""" +313 27 dataset """kinships""" +313 27 model """hole""" +313 27 loss """softplus""" +313 27 regularizer """no""" +313 27 optimizer """adam""" +313 27 training_loop """lcwa""" +313 27 evaluator """rankbased""" +313 28 dataset """kinships""" +313 28 model """hole""" +313 28 loss """softplus""" +313 28 regularizer """no""" +313 28 optimizer """adam""" +313 28 training_loop """lcwa""" +313 28 evaluator """rankbased""" +313 29 dataset """kinships""" +313 29 model """hole""" +313 29 loss """softplus""" +313 29 regularizer """no""" +313 29 optimizer """adam""" +313 29 training_loop """lcwa""" +313 29 evaluator """rankbased""" +313 30 dataset """kinships""" +313 30 model """hole""" +313 30 loss """softplus""" +313 30 regularizer """no""" +313 30 optimizer """adam""" +313 30 training_loop """lcwa""" +313 30 evaluator """rankbased""" +313 31 dataset """kinships""" +313 31 model """hole""" +313 31 loss """softplus""" +313 31 regularizer """no""" +313 31 optimizer """adam""" +313 31 training_loop """lcwa""" +313 31 evaluator """rankbased""" +313 32 dataset """kinships""" +313 32 model """hole""" +313 32 loss """softplus""" +313 32 regularizer """no""" +313 32 optimizer """adam""" +313 32 training_loop """lcwa""" +313 32 evaluator """rankbased""" +313 33 dataset """kinships""" +313 33 model """hole""" +313 33 loss """softplus""" +313 33 regularizer """no""" +313 33 optimizer """adam""" +313 33 training_loop """lcwa""" +313 33 evaluator """rankbased""" +313 34 dataset """kinships""" +313 34 model """hole""" +313 34 loss """softplus""" +313 34 regularizer """no""" +313 34 optimizer """adam""" +313 34 training_loop """lcwa""" +313 34 evaluator """rankbased""" +313 35 dataset """kinships""" +313 35 model """hole""" +313 35 loss """softplus""" +313 35 regularizer """no""" +313 35 optimizer """adam""" +313 35 training_loop """lcwa""" +313 35 evaluator """rankbased""" +313 36 dataset """kinships""" +313 36 model """hole""" +313 36 loss """softplus""" +313 36 regularizer """no""" +313 36 optimizer """adam""" +313 36 training_loop """lcwa""" +313 36 evaluator """rankbased""" +313 37 dataset """kinships""" +313 37 model """hole""" +313 37 loss """softplus""" +313 37 regularizer """no""" +313 37 optimizer """adam""" +313 37 training_loop """lcwa""" +313 37 evaluator """rankbased""" +313 38 dataset """kinships""" +313 38 model """hole""" +313 38 loss """softplus""" +313 38 regularizer """no""" +313 38 optimizer """adam""" +313 38 training_loop """lcwa""" +313 38 evaluator """rankbased""" +313 39 dataset """kinships""" +313 39 model """hole""" +313 39 loss """softplus""" +313 39 regularizer """no""" +313 39 optimizer """adam""" +313 39 training_loop """lcwa""" +313 39 evaluator """rankbased""" +313 40 dataset """kinships""" +313 40 model """hole""" +313 40 loss """softplus""" +313 40 regularizer """no""" +313 40 optimizer """adam""" +313 40 training_loop """lcwa""" +313 40 evaluator """rankbased""" +313 41 dataset """kinships""" +313 41 model """hole""" +313 41 loss """softplus""" +313 41 regularizer """no""" +313 41 optimizer """adam""" +313 41 training_loop """lcwa""" +313 41 evaluator """rankbased""" +313 42 dataset """kinships""" +313 42 model """hole""" +313 42 loss """softplus""" +313 42 regularizer """no""" +313 42 optimizer """adam""" +313 42 training_loop """lcwa""" +313 42 evaluator """rankbased""" +313 43 dataset """kinships""" +313 43 model """hole""" +313 43 loss """softplus""" +313 43 regularizer """no""" +313 43 optimizer """adam""" +313 43 training_loop """lcwa""" +313 43 evaluator """rankbased""" +313 44 dataset """kinships""" +313 44 model """hole""" +313 44 loss """softplus""" +313 44 regularizer """no""" +313 44 optimizer """adam""" +313 44 training_loop """lcwa""" +313 44 evaluator """rankbased""" +313 45 dataset """kinships""" +313 45 model """hole""" +313 45 loss """softplus""" +313 45 regularizer """no""" +313 45 optimizer """adam""" +313 45 training_loop """lcwa""" +313 45 evaluator """rankbased""" +313 46 dataset """kinships""" +313 46 model """hole""" +313 46 loss """softplus""" +313 46 regularizer """no""" +313 46 optimizer """adam""" +313 46 training_loop """lcwa""" +313 46 evaluator """rankbased""" +313 47 dataset """kinships""" +313 47 model """hole""" +313 47 loss """softplus""" +313 47 regularizer """no""" +313 47 optimizer """adam""" +313 47 training_loop """lcwa""" +313 47 evaluator """rankbased""" +313 48 dataset """kinships""" +313 48 model """hole""" +313 48 loss """softplus""" +313 48 regularizer """no""" +313 48 optimizer """adam""" +313 48 training_loop """lcwa""" +313 48 evaluator """rankbased""" +313 49 dataset """kinships""" +313 49 model """hole""" +313 49 loss """softplus""" +313 49 regularizer """no""" +313 49 optimizer """adam""" +313 49 training_loop """lcwa""" +313 49 evaluator """rankbased""" +313 50 dataset """kinships""" +313 50 model """hole""" +313 50 loss """softplus""" +313 50 regularizer """no""" +313 50 optimizer """adam""" +313 50 training_loop """lcwa""" +313 50 evaluator """rankbased""" +313 51 dataset """kinships""" +313 51 model """hole""" +313 51 loss """softplus""" +313 51 regularizer """no""" +313 51 optimizer """adam""" +313 51 training_loop """lcwa""" +313 51 evaluator """rankbased""" +313 52 dataset """kinships""" +313 52 model """hole""" +313 52 loss """softplus""" +313 52 regularizer """no""" +313 52 optimizer """adam""" +313 52 training_loop """lcwa""" +313 52 evaluator """rankbased""" +313 53 dataset """kinships""" +313 53 model """hole""" +313 53 loss """softplus""" +313 53 regularizer """no""" +313 53 optimizer """adam""" +313 53 training_loop """lcwa""" +313 53 evaluator """rankbased""" +313 54 dataset """kinships""" +313 54 model """hole""" +313 54 loss """softplus""" +313 54 regularizer """no""" +313 54 optimizer """adam""" +313 54 training_loop """lcwa""" +313 54 evaluator """rankbased""" +313 55 dataset """kinships""" +313 55 model """hole""" +313 55 loss """softplus""" +313 55 regularizer """no""" +313 55 optimizer """adam""" +313 55 training_loop """lcwa""" +313 55 evaluator """rankbased""" +313 56 dataset """kinships""" +313 56 model """hole""" +313 56 loss """softplus""" +313 56 regularizer """no""" +313 56 optimizer """adam""" +313 56 training_loop """lcwa""" +313 56 evaluator """rankbased""" +313 57 dataset """kinships""" +313 57 model """hole""" +313 57 loss """softplus""" +313 57 regularizer """no""" +313 57 optimizer """adam""" +313 57 training_loop """lcwa""" +313 57 evaluator """rankbased""" +313 58 dataset """kinships""" +313 58 model """hole""" +313 58 loss """softplus""" +313 58 regularizer """no""" +313 58 optimizer """adam""" +313 58 training_loop """lcwa""" +313 58 evaluator """rankbased""" +313 59 dataset """kinships""" +313 59 model """hole""" +313 59 loss """softplus""" +313 59 regularizer """no""" +313 59 optimizer """adam""" +313 59 training_loop """lcwa""" +313 59 evaluator """rankbased""" +313 60 dataset """kinships""" +313 60 model """hole""" +313 60 loss """softplus""" +313 60 regularizer """no""" +313 60 optimizer """adam""" +313 60 training_loop """lcwa""" +313 60 evaluator """rankbased""" +313 61 dataset """kinships""" +313 61 model """hole""" +313 61 loss """softplus""" +313 61 regularizer """no""" +313 61 optimizer """adam""" +313 61 training_loop """lcwa""" +313 61 evaluator """rankbased""" +313 62 dataset """kinships""" +313 62 model """hole""" +313 62 loss """softplus""" +313 62 regularizer """no""" +313 62 optimizer """adam""" +313 62 training_loop """lcwa""" +313 62 evaluator """rankbased""" +313 63 dataset """kinships""" +313 63 model """hole""" +313 63 loss """softplus""" +313 63 regularizer """no""" +313 63 optimizer """adam""" +313 63 training_loop """lcwa""" +313 63 evaluator """rankbased""" +313 64 dataset """kinships""" +313 64 model """hole""" +313 64 loss """softplus""" +313 64 regularizer """no""" +313 64 optimizer """adam""" +313 64 training_loop """lcwa""" +313 64 evaluator """rankbased""" +313 65 dataset """kinships""" +313 65 model """hole""" +313 65 loss """softplus""" +313 65 regularizer """no""" +313 65 optimizer """adam""" +313 65 training_loop """lcwa""" +313 65 evaluator """rankbased""" +313 66 dataset """kinships""" +313 66 model """hole""" +313 66 loss """softplus""" +313 66 regularizer """no""" +313 66 optimizer """adam""" +313 66 training_loop """lcwa""" +313 66 evaluator """rankbased""" +313 67 dataset """kinships""" +313 67 model """hole""" +313 67 loss """softplus""" +313 67 regularizer """no""" +313 67 optimizer """adam""" +313 67 training_loop """lcwa""" +313 67 evaluator """rankbased""" +313 68 dataset """kinships""" +313 68 model """hole""" +313 68 loss """softplus""" +313 68 regularizer """no""" +313 68 optimizer """adam""" +313 68 training_loop """lcwa""" +313 68 evaluator """rankbased""" +313 69 dataset """kinships""" +313 69 model """hole""" +313 69 loss """softplus""" +313 69 regularizer """no""" +313 69 optimizer """adam""" +313 69 training_loop """lcwa""" +313 69 evaluator """rankbased""" +313 70 dataset """kinships""" +313 70 model """hole""" +313 70 loss """softplus""" +313 70 regularizer """no""" +313 70 optimizer """adam""" +313 70 training_loop """lcwa""" +313 70 evaluator """rankbased""" +313 71 dataset """kinships""" +313 71 model """hole""" +313 71 loss """softplus""" +313 71 regularizer """no""" +313 71 optimizer """adam""" +313 71 training_loop """lcwa""" +313 71 evaluator """rankbased""" +313 72 dataset """kinships""" +313 72 model """hole""" +313 72 loss """softplus""" +313 72 regularizer """no""" +313 72 optimizer """adam""" +313 72 training_loop """lcwa""" +313 72 evaluator """rankbased""" +313 73 dataset """kinships""" +313 73 model """hole""" +313 73 loss """softplus""" +313 73 regularizer """no""" +313 73 optimizer """adam""" +313 73 training_loop """lcwa""" +313 73 evaluator """rankbased""" +313 74 dataset """kinships""" +313 74 model """hole""" +313 74 loss """softplus""" +313 74 regularizer """no""" +313 74 optimizer """adam""" +313 74 training_loop """lcwa""" +313 74 evaluator """rankbased""" +313 75 dataset """kinships""" +313 75 model """hole""" +313 75 loss """softplus""" +313 75 regularizer """no""" +313 75 optimizer """adam""" +313 75 training_loop """lcwa""" +313 75 evaluator """rankbased""" +313 76 dataset """kinships""" +313 76 model """hole""" +313 76 loss """softplus""" +313 76 regularizer """no""" +313 76 optimizer """adam""" +313 76 training_loop """lcwa""" +313 76 evaluator """rankbased""" +313 77 dataset """kinships""" +313 77 model """hole""" +313 77 loss """softplus""" +313 77 regularizer """no""" +313 77 optimizer """adam""" +313 77 training_loop """lcwa""" +313 77 evaluator """rankbased""" +313 78 dataset """kinships""" +313 78 model """hole""" +313 78 loss """softplus""" +313 78 regularizer """no""" +313 78 optimizer """adam""" +313 78 training_loop """lcwa""" +313 78 evaluator """rankbased""" +313 79 dataset """kinships""" +313 79 model """hole""" +313 79 loss """softplus""" +313 79 regularizer """no""" +313 79 optimizer """adam""" +313 79 training_loop """lcwa""" +313 79 evaluator """rankbased""" +313 80 dataset """kinships""" +313 80 model """hole""" +313 80 loss """softplus""" +313 80 regularizer """no""" +313 80 optimizer """adam""" +313 80 training_loop """lcwa""" +313 80 evaluator """rankbased""" +313 81 dataset """kinships""" +313 81 model """hole""" +313 81 loss """softplus""" +313 81 regularizer """no""" +313 81 optimizer """adam""" +313 81 training_loop """lcwa""" +313 81 evaluator """rankbased""" +313 82 dataset """kinships""" +313 82 model """hole""" +313 82 loss """softplus""" +313 82 regularizer """no""" +313 82 optimizer """adam""" +313 82 training_loop """lcwa""" +313 82 evaluator """rankbased""" +313 83 dataset """kinships""" +313 83 model """hole""" +313 83 loss """softplus""" +313 83 regularizer """no""" +313 83 optimizer """adam""" +313 83 training_loop """lcwa""" +313 83 evaluator """rankbased""" +313 84 dataset """kinships""" +313 84 model """hole""" +313 84 loss """softplus""" +313 84 regularizer """no""" +313 84 optimizer """adam""" +313 84 training_loop """lcwa""" +313 84 evaluator """rankbased""" +313 85 dataset """kinships""" +313 85 model """hole""" +313 85 loss """softplus""" +313 85 regularizer """no""" +313 85 optimizer """adam""" +313 85 training_loop """lcwa""" +313 85 evaluator """rankbased""" +313 86 dataset """kinships""" +313 86 model """hole""" +313 86 loss """softplus""" +313 86 regularizer """no""" +313 86 optimizer """adam""" +313 86 training_loop """lcwa""" +313 86 evaluator """rankbased""" +313 87 dataset """kinships""" +313 87 model """hole""" +313 87 loss """softplus""" +313 87 regularizer """no""" +313 87 optimizer """adam""" +313 87 training_loop """lcwa""" +313 87 evaluator """rankbased""" +313 88 dataset """kinships""" +313 88 model """hole""" +313 88 loss """softplus""" +313 88 regularizer """no""" +313 88 optimizer """adam""" +313 88 training_loop """lcwa""" +313 88 evaluator """rankbased""" +313 89 dataset """kinships""" +313 89 model """hole""" +313 89 loss """softplus""" +313 89 regularizer """no""" +313 89 optimizer """adam""" +313 89 training_loop """lcwa""" +313 89 evaluator """rankbased""" +313 90 dataset """kinships""" +313 90 model """hole""" +313 90 loss """softplus""" +313 90 regularizer """no""" +313 90 optimizer """adam""" +313 90 training_loop """lcwa""" +313 90 evaluator """rankbased""" +313 91 dataset """kinships""" +313 91 model """hole""" +313 91 loss """softplus""" +313 91 regularizer """no""" +313 91 optimizer """adam""" +313 91 training_loop """lcwa""" +313 91 evaluator """rankbased""" +313 92 dataset """kinships""" +313 92 model """hole""" +313 92 loss """softplus""" +313 92 regularizer """no""" +313 92 optimizer """adam""" +313 92 training_loop """lcwa""" +313 92 evaluator """rankbased""" +313 93 dataset """kinships""" +313 93 model """hole""" +313 93 loss """softplus""" +313 93 regularizer """no""" +313 93 optimizer """adam""" +313 93 training_loop """lcwa""" +313 93 evaluator """rankbased""" +313 94 dataset """kinships""" +313 94 model """hole""" +313 94 loss """softplus""" +313 94 regularizer """no""" +313 94 optimizer """adam""" +313 94 training_loop """lcwa""" +313 94 evaluator """rankbased""" +313 95 dataset """kinships""" +313 95 model """hole""" +313 95 loss """softplus""" +313 95 regularizer """no""" +313 95 optimizer """adam""" +313 95 training_loop """lcwa""" +313 95 evaluator """rankbased""" +313 96 dataset """kinships""" +313 96 model """hole""" +313 96 loss """softplus""" +313 96 regularizer """no""" +313 96 optimizer """adam""" +313 96 training_loop """lcwa""" +313 96 evaluator """rankbased""" +313 97 dataset """kinships""" +313 97 model """hole""" +313 97 loss """softplus""" +313 97 regularizer """no""" +313 97 optimizer """adam""" +313 97 training_loop """lcwa""" +313 97 evaluator """rankbased""" +313 98 dataset """kinships""" +313 98 model """hole""" +313 98 loss """softplus""" +313 98 regularizer """no""" +313 98 optimizer """adam""" +313 98 training_loop """lcwa""" +313 98 evaluator """rankbased""" +313 99 dataset """kinships""" +313 99 model """hole""" +313 99 loss """softplus""" +313 99 regularizer """no""" +313 99 optimizer """adam""" +313 99 training_loop """lcwa""" +313 99 evaluator """rankbased""" +313 100 dataset """kinships""" +313 100 model """hole""" +313 100 loss """softplus""" +313 100 regularizer """no""" +313 100 optimizer """adam""" +313 100 training_loop """lcwa""" +313 100 evaluator """rankbased""" +314 1 model.embedding_dim 0.0 +314 1 optimizer.lr 0.05864005974847643 +314 1 training.batch_size 1.0 +314 1 training.label_smoothing 0.1459952701615188 +314 2 model.embedding_dim 2.0 +314 2 optimizer.lr 0.0018662966062642056 +314 2 training.batch_size 2.0 +314 2 training.label_smoothing 0.13919353594453124 +314 3 model.embedding_dim 0.0 +314 3 optimizer.lr 0.00867029345984948 +314 3 training.batch_size 0.0 +314 3 training.label_smoothing 0.015465197622948717 +314 4 model.embedding_dim 1.0 +314 4 optimizer.lr 0.005019545695369865 +314 4 training.batch_size 2.0 +314 4 training.label_smoothing 0.005524244197312896 +314 5 model.embedding_dim 2.0 +314 5 optimizer.lr 0.017276694742593055 +314 5 training.batch_size 0.0 +314 5 training.label_smoothing 0.700833210545354 +314 6 model.embedding_dim 0.0 +314 6 optimizer.lr 0.007192301366531833 +314 6 training.batch_size 0.0 +314 6 training.label_smoothing 0.008831850103469808 +314 7 model.embedding_dim 2.0 +314 7 optimizer.lr 0.002670392945024541 +314 7 training.batch_size 0.0 +314 7 training.label_smoothing 0.016792754536472568 +314 8 model.embedding_dim 1.0 +314 8 optimizer.lr 0.0027741682498221318 +314 8 training.batch_size 0.0 +314 8 training.label_smoothing 0.004294479446960429 +314 9 model.embedding_dim 2.0 +314 9 optimizer.lr 0.009881184551796384 +314 9 training.batch_size 1.0 +314 9 training.label_smoothing 0.01822227199202305 +314 10 model.embedding_dim 1.0 +314 10 optimizer.lr 0.001356217663063008 +314 10 training.batch_size 1.0 +314 10 training.label_smoothing 0.5361658650575672 +314 11 model.embedding_dim 0.0 +314 11 optimizer.lr 0.07101700708854457 +314 11 training.batch_size 0.0 +314 11 training.label_smoothing 0.02645857685015195 +314 12 model.embedding_dim 0.0 +314 12 optimizer.lr 0.00194269993691462 +314 12 training.batch_size 1.0 +314 12 training.label_smoothing 0.002827514472054332 +314 13 model.embedding_dim 0.0 +314 13 optimizer.lr 0.0019780117553999848 +314 13 training.batch_size 1.0 +314 13 training.label_smoothing 0.010765303067330477 +314 14 model.embedding_dim 0.0 +314 14 optimizer.lr 0.0013728466990505393 +314 14 training.batch_size 0.0 +314 14 training.label_smoothing 0.08730690222092857 +314 15 model.embedding_dim 0.0 +314 15 optimizer.lr 0.05377194338609395 +314 15 training.batch_size 0.0 +314 15 training.label_smoothing 0.003028060542932292 +314 16 model.embedding_dim 1.0 +314 16 optimizer.lr 0.011425608729523534 +314 16 training.batch_size 0.0 +314 16 training.label_smoothing 0.0011443994652847256 +314 17 model.embedding_dim 0.0 +314 17 optimizer.lr 0.03384798988906493 +314 17 training.batch_size 0.0 +314 17 training.label_smoothing 0.012002374023551976 +314 18 model.embedding_dim 1.0 +314 18 optimizer.lr 0.0022183040931390303 +314 18 training.batch_size 0.0 +314 18 training.label_smoothing 0.4550487550079929 +314 19 model.embedding_dim 1.0 +314 19 optimizer.lr 0.014378052138057465 +314 19 training.batch_size 0.0 +314 19 training.label_smoothing 0.02459473289133661 +314 20 model.embedding_dim 1.0 +314 20 optimizer.lr 0.027632734351285583 +314 20 training.batch_size 0.0 +314 20 training.label_smoothing 0.011327329303056792 +314 21 model.embedding_dim 1.0 +314 21 optimizer.lr 0.003498081824159048 +314 21 training.batch_size 0.0 +314 21 training.label_smoothing 0.0010387771835768467 +314 22 model.embedding_dim 1.0 +314 22 optimizer.lr 0.0032669411693819436 +314 22 training.batch_size 1.0 +314 22 training.label_smoothing 0.0018261687934263192 +314 23 model.embedding_dim 2.0 +314 23 optimizer.lr 0.026424096840978694 +314 23 training.batch_size 0.0 +314 23 training.label_smoothing 0.004419693621379544 +314 24 model.embedding_dim 1.0 +314 24 optimizer.lr 0.006357199977268888 +314 24 training.batch_size 1.0 +314 24 training.label_smoothing 0.10952006452334849 +314 25 model.embedding_dim 0.0 +314 25 optimizer.lr 0.013831542988132257 +314 25 training.batch_size 2.0 +314 25 training.label_smoothing 0.020678565868668414 +314 26 model.embedding_dim 1.0 +314 26 optimizer.lr 0.0026188367493976366 +314 26 training.batch_size 1.0 +314 26 training.label_smoothing 0.1982779600105017 +314 27 model.embedding_dim 1.0 +314 27 optimizer.lr 0.057776778725219184 +314 27 training.batch_size 1.0 +314 27 training.label_smoothing 0.0697760267950342 +314 28 model.embedding_dim 1.0 +314 28 optimizer.lr 0.0012442160846331274 +314 28 training.batch_size 1.0 +314 28 training.label_smoothing 0.011941700314819557 +314 29 model.embedding_dim 2.0 +314 29 optimizer.lr 0.00946887145515501 +314 29 training.batch_size 0.0 +314 29 training.label_smoothing 0.23696105368125986 +314 30 model.embedding_dim 0.0 +314 30 optimizer.lr 0.016049189170474168 +314 30 training.batch_size 1.0 +314 30 training.label_smoothing 0.05075801656532794 +314 31 model.embedding_dim 2.0 +314 31 optimizer.lr 0.0020607703536618944 +314 31 training.batch_size 0.0 +314 31 training.label_smoothing 0.0037094530320673654 +314 32 model.embedding_dim 0.0 +314 32 optimizer.lr 0.011607447534255124 +314 32 training.batch_size 2.0 +314 32 training.label_smoothing 0.0016893441030929488 +314 33 model.embedding_dim 2.0 +314 33 optimizer.lr 0.04148830507588157 +314 33 training.batch_size 0.0 +314 33 training.label_smoothing 0.2952681640139149 +314 34 model.embedding_dim 2.0 +314 34 optimizer.lr 0.001842498478241363 +314 34 training.batch_size 2.0 +314 34 training.label_smoothing 0.041719665972960354 +314 35 model.embedding_dim 1.0 +314 35 optimizer.lr 0.008463680994840248 +314 35 training.batch_size 0.0 +314 35 training.label_smoothing 0.21075921152876717 +314 36 model.embedding_dim 1.0 +314 36 optimizer.lr 0.005495566320096431 +314 36 training.batch_size 1.0 +314 36 training.label_smoothing 0.16326533696070542 +314 37 model.embedding_dim 0.0 +314 37 optimizer.lr 0.0824927172400093 +314 37 training.batch_size 2.0 +314 37 training.label_smoothing 0.23023844376172328 +314 38 model.embedding_dim 1.0 +314 38 optimizer.lr 0.0011413156677613368 +314 38 training.batch_size 2.0 +314 38 training.label_smoothing 0.3013291568377104 +314 39 model.embedding_dim 1.0 +314 39 optimizer.lr 0.010302545757905901 +314 39 training.batch_size 0.0 +314 39 training.label_smoothing 0.007914294667635349 +314 40 model.embedding_dim 0.0 +314 40 optimizer.lr 0.005456580494439755 +314 40 training.batch_size 0.0 +314 40 training.label_smoothing 0.0047800481181150606 +314 41 model.embedding_dim 2.0 +314 41 optimizer.lr 0.0010434262184498488 +314 41 training.batch_size 1.0 +314 41 training.label_smoothing 0.05422667923398938 +314 42 model.embedding_dim 2.0 +314 42 optimizer.lr 0.002002587626233909 +314 42 training.batch_size 2.0 +314 42 training.label_smoothing 0.0019940050343375836 +314 43 model.embedding_dim 2.0 +314 43 optimizer.lr 0.002546409294678283 +314 43 training.batch_size 0.0 +314 43 training.label_smoothing 0.4931174033317003 +314 44 model.embedding_dim 0.0 +314 44 optimizer.lr 0.026075542825215927 +314 44 training.batch_size 1.0 +314 44 training.label_smoothing 0.014429432082740537 +314 45 model.embedding_dim 0.0 +314 45 optimizer.lr 0.005335668204984186 +314 45 training.batch_size 1.0 +314 45 training.label_smoothing 0.020717170384618868 +314 46 model.embedding_dim 1.0 +314 46 optimizer.lr 0.0012879342195868853 +314 46 training.batch_size 0.0 +314 46 training.label_smoothing 0.13903038419600533 +314 47 model.embedding_dim 1.0 +314 47 optimizer.lr 0.04955093316694737 +314 47 training.batch_size 2.0 +314 47 training.label_smoothing 0.08752533903386542 +314 48 model.embedding_dim 2.0 +314 48 optimizer.lr 0.012787192504409578 +314 48 training.batch_size 0.0 +314 48 training.label_smoothing 0.4223383935783015 +314 49 model.embedding_dim 2.0 +314 49 optimizer.lr 0.045892677374989435 +314 49 training.batch_size 2.0 +314 49 training.label_smoothing 0.026685909407929708 +314 50 model.embedding_dim 1.0 +314 50 optimizer.lr 0.001565153071731313 +314 50 training.batch_size 1.0 +314 50 training.label_smoothing 0.08734610822003358 +314 51 model.embedding_dim 2.0 +314 51 optimizer.lr 0.0032031895945835763 +314 51 training.batch_size 0.0 +314 51 training.label_smoothing 0.09709062868580812 +314 52 model.embedding_dim 2.0 +314 52 optimizer.lr 0.0015440434488570636 +314 52 training.batch_size 1.0 +314 52 training.label_smoothing 0.8220596050461442 +314 53 model.embedding_dim 2.0 +314 53 optimizer.lr 0.0011891198340504001 +314 53 training.batch_size 0.0 +314 53 training.label_smoothing 0.5295305104700592 +314 54 model.embedding_dim 2.0 +314 54 optimizer.lr 0.04026818779963235 +314 54 training.batch_size 1.0 +314 54 training.label_smoothing 0.3590689977977418 +314 55 model.embedding_dim 0.0 +314 55 optimizer.lr 0.07897586029273378 +314 55 training.batch_size 1.0 +314 55 training.label_smoothing 0.1580622874394316 +314 56 model.embedding_dim 1.0 +314 56 optimizer.lr 0.03460152295698668 +314 56 training.batch_size 2.0 +314 56 training.label_smoothing 0.0012867695304577768 +314 57 model.embedding_dim 2.0 +314 57 optimizer.lr 0.002966443608730099 +314 57 training.batch_size 0.0 +314 57 training.label_smoothing 0.0070015997964395565 +314 58 model.embedding_dim 1.0 +314 58 optimizer.lr 0.0011893855468246865 +314 58 training.batch_size 2.0 +314 58 training.label_smoothing 0.019312270274849934 +314 59 model.embedding_dim 0.0 +314 59 optimizer.lr 0.0031737194578932964 +314 59 training.batch_size 1.0 +314 59 training.label_smoothing 0.13010949812188605 +314 60 model.embedding_dim 0.0 +314 60 optimizer.lr 0.05034033514436911 +314 60 training.batch_size 2.0 +314 60 training.label_smoothing 0.028636735333271726 +314 61 model.embedding_dim 1.0 +314 61 optimizer.lr 0.001042129480418159 +314 61 training.batch_size 1.0 +314 61 training.label_smoothing 0.009207551340607755 +314 62 model.embedding_dim 1.0 +314 62 optimizer.lr 0.003038187523602402 +314 62 training.batch_size 0.0 +314 62 training.label_smoothing 0.007186105808338076 +314 63 model.embedding_dim 2.0 +314 63 optimizer.lr 0.006541106077044743 +314 63 training.batch_size 1.0 +314 63 training.label_smoothing 0.0016330978529968433 +314 64 model.embedding_dim 0.0 +314 64 optimizer.lr 0.0012421605292792432 +314 64 training.batch_size 0.0 +314 64 training.label_smoothing 0.10307243331113154 +314 65 model.embedding_dim 0.0 +314 65 optimizer.lr 0.018165756011990365 +314 65 training.batch_size 0.0 +314 65 training.label_smoothing 0.005301293660158408 +314 66 model.embedding_dim 2.0 +314 66 optimizer.lr 0.024225242319298006 +314 66 training.batch_size 1.0 +314 66 training.label_smoothing 0.011924959877441833 +314 67 model.embedding_dim 2.0 +314 67 optimizer.lr 0.004104585244451123 +314 67 training.batch_size 0.0 +314 67 training.label_smoothing 0.5687539294766857 +314 68 model.embedding_dim 0.0 +314 68 optimizer.lr 0.0011815494882023245 +314 68 training.batch_size 2.0 +314 68 training.label_smoothing 0.003409925993822879 +314 69 model.embedding_dim 1.0 +314 69 optimizer.lr 0.007660972889339741 +314 69 training.batch_size 1.0 +314 69 training.label_smoothing 0.6849582446762409 +314 70 model.embedding_dim 1.0 +314 70 optimizer.lr 0.002127997821344083 +314 70 training.batch_size 2.0 +314 70 training.label_smoothing 0.01290466177251803 +314 71 model.embedding_dim 0.0 +314 71 optimizer.lr 0.02271956306759936 +314 71 training.batch_size 2.0 +314 71 training.label_smoothing 0.003098046237297355 +314 72 model.embedding_dim 1.0 +314 72 optimizer.lr 0.026537229234541323 +314 72 training.batch_size 1.0 +314 72 training.label_smoothing 0.03872265531373665 +314 73 model.embedding_dim 1.0 +314 73 optimizer.lr 0.0012053439722859022 +314 73 training.batch_size 1.0 +314 73 training.label_smoothing 0.08539259811413402 +314 74 model.embedding_dim 1.0 +314 74 optimizer.lr 0.012437929571253198 +314 74 training.batch_size 0.0 +314 74 training.label_smoothing 0.08747345120772303 +314 75 model.embedding_dim 0.0 +314 75 optimizer.lr 0.004285365248996374 +314 75 training.batch_size 2.0 +314 75 training.label_smoothing 0.008405039421476568 +314 76 model.embedding_dim 2.0 +314 76 optimizer.lr 0.0015257379548633883 +314 76 training.batch_size 0.0 +314 76 training.label_smoothing 0.004785128667617004 +314 77 model.embedding_dim 1.0 +314 77 optimizer.lr 0.007448786519202368 +314 77 training.batch_size 2.0 +314 77 training.label_smoothing 0.0691627805337302 +314 78 model.embedding_dim 0.0 +314 78 optimizer.lr 0.009054601115389343 +314 78 training.batch_size 1.0 +314 78 training.label_smoothing 0.0981772476064317 +314 79 model.embedding_dim 2.0 +314 79 optimizer.lr 0.001509427674700923 +314 79 training.batch_size 0.0 +314 79 training.label_smoothing 0.08962091091413239 +314 80 model.embedding_dim 2.0 +314 80 optimizer.lr 0.04586835246449395 +314 80 training.batch_size 2.0 +314 80 training.label_smoothing 0.19924860024755697 +314 81 model.embedding_dim 1.0 +314 81 optimizer.lr 0.0025835622048496487 +314 81 training.batch_size 0.0 +314 81 training.label_smoothing 0.014367682852156827 +314 82 model.embedding_dim 1.0 +314 82 optimizer.lr 0.0010327483815918996 +314 82 training.batch_size 0.0 +314 82 training.label_smoothing 0.03419818941287589 +314 83 model.embedding_dim 1.0 +314 83 optimizer.lr 0.00858011894067463 +314 83 training.batch_size 1.0 +314 83 training.label_smoothing 0.9112102307371015 +314 84 model.embedding_dim 2.0 +314 84 optimizer.lr 0.006008796657949352 +314 84 training.batch_size 0.0 +314 84 training.label_smoothing 0.017201537737593753 +314 85 model.embedding_dim 2.0 +314 85 optimizer.lr 0.02081330773242998 +314 85 training.batch_size 0.0 +314 85 training.label_smoothing 0.014407913173032486 +314 86 model.embedding_dim 2.0 +314 86 optimizer.lr 0.009882505516492243 +314 86 training.batch_size 2.0 +314 86 training.label_smoothing 0.3453259085986992 +314 87 model.embedding_dim 2.0 +314 87 optimizer.lr 0.058202392111916045 +314 87 training.batch_size 1.0 +314 87 training.label_smoothing 0.013295882595236449 +314 88 model.embedding_dim 1.0 +314 88 optimizer.lr 0.08082333378729556 +314 88 training.batch_size 1.0 +314 88 training.label_smoothing 0.3432636618657953 +314 89 model.embedding_dim 1.0 +314 89 optimizer.lr 0.03265116294090104 +314 89 training.batch_size 1.0 +314 89 training.label_smoothing 0.0022968500495745337 +314 90 model.embedding_dim 0.0 +314 90 optimizer.lr 0.004753751056821083 +314 90 training.batch_size 1.0 +314 90 training.label_smoothing 0.3501978693064744 +314 91 model.embedding_dim 2.0 +314 91 optimizer.lr 0.006513005056424248 +314 91 training.batch_size 2.0 +314 91 training.label_smoothing 0.08268342868604261 +314 92 model.embedding_dim 1.0 +314 92 optimizer.lr 0.003707716571651886 +314 92 training.batch_size 2.0 +314 92 training.label_smoothing 0.02949433423568125 +314 93 model.embedding_dim 1.0 +314 93 optimizer.lr 0.013212933947267044 +314 93 training.batch_size 1.0 +314 93 training.label_smoothing 0.034266802104860605 +314 94 model.embedding_dim 1.0 +314 94 optimizer.lr 0.06774887423004422 +314 94 training.batch_size 1.0 +314 94 training.label_smoothing 0.010621491422492116 +314 95 model.embedding_dim 0.0 +314 95 optimizer.lr 0.0023063363957714408 +314 95 training.batch_size 2.0 +314 95 training.label_smoothing 0.8580300332947933 +314 96 model.embedding_dim 1.0 +314 96 optimizer.lr 0.003155380053486904 +314 96 training.batch_size 1.0 +314 96 training.label_smoothing 0.0010657542970818226 +314 97 model.embedding_dim 1.0 +314 97 optimizer.lr 0.0137582545237895 +314 97 training.batch_size 1.0 +314 97 training.label_smoothing 0.20015652128956335 +314 98 model.embedding_dim 1.0 +314 98 optimizer.lr 0.006661554946788476 +314 98 training.batch_size 0.0 +314 98 training.label_smoothing 0.055445758575806116 +314 99 model.embedding_dim 0.0 +314 99 optimizer.lr 0.06914827228172644 +314 99 training.batch_size 0.0 +314 99 training.label_smoothing 0.7290263497842333 +314 100 model.embedding_dim 1.0 +314 100 optimizer.lr 0.011106458197208733 +314 100 training.batch_size 1.0 +314 100 training.label_smoothing 0.050303830643252445 +314 1 dataset """kinships""" +314 1 model """hole""" +314 1 loss """bceaftersigmoid""" +314 1 regularizer """no""" +314 1 optimizer """adam""" +314 1 training_loop """lcwa""" +314 1 evaluator """rankbased""" +314 2 dataset """kinships""" +314 2 model """hole""" +314 2 loss """bceaftersigmoid""" +314 2 regularizer """no""" +314 2 optimizer """adam""" +314 2 training_loop """lcwa""" +314 2 evaluator """rankbased""" +314 3 dataset """kinships""" +314 3 model """hole""" +314 3 loss """bceaftersigmoid""" +314 3 regularizer """no""" +314 3 optimizer """adam""" +314 3 training_loop """lcwa""" +314 3 evaluator """rankbased""" +314 4 dataset """kinships""" +314 4 model """hole""" +314 4 loss """bceaftersigmoid""" +314 4 regularizer """no""" +314 4 optimizer """adam""" +314 4 training_loop """lcwa""" +314 4 evaluator """rankbased""" +314 5 dataset """kinships""" +314 5 model """hole""" +314 5 loss """bceaftersigmoid""" +314 5 regularizer """no""" +314 5 optimizer """adam""" +314 5 training_loop """lcwa""" +314 5 evaluator """rankbased""" +314 6 dataset """kinships""" +314 6 model """hole""" +314 6 loss """bceaftersigmoid""" +314 6 regularizer """no""" +314 6 optimizer """adam""" +314 6 training_loop """lcwa""" +314 6 evaluator """rankbased""" +314 7 dataset """kinships""" +314 7 model """hole""" +314 7 loss """bceaftersigmoid""" +314 7 regularizer """no""" +314 7 optimizer """adam""" +314 7 training_loop """lcwa""" +314 7 evaluator """rankbased""" +314 8 dataset """kinships""" +314 8 model """hole""" +314 8 loss """bceaftersigmoid""" +314 8 regularizer """no""" +314 8 optimizer """adam""" +314 8 training_loop """lcwa""" +314 8 evaluator """rankbased""" +314 9 dataset """kinships""" +314 9 model """hole""" +314 9 loss """bceaftersigmoid""" +314 9 regularizer """no""" +314 9 optimizer """adam""" +314 9 training_loop """lcwa""" +314 9 evaluator """rankbased""" +314 10 dataset """kinships""" +314 10 model """hole""" +314 10 loss """bceaftersigmoid""" +314 10 regularizer """no""" +314 10 optimizer """adam""" +314 10 training_loop """lcwa""" +314 10 evaluator """rankbased""" +314 11 dataset """kinships""" +314 11 model """hole""" +314 11 loss """bceaftersigmoid""" +314 11 regularizer """no""" +314 11 optimizer """adam""" +314 11 training_loop """lcwa""" +314 11 evaluator """rankbased""" +314 12 dataset """kinships""" +314 12 model """hole""" +314 12 loss """bceaftersigmoid""" +314 12 regularizer """no""" +314 12 optimizer """adam""" +314 12 training_loop """lcwa""" +314 12 evaluator """rankbased""" +314 13 dataset """kinships""" +314 13 model """hole""" +314 13 loss """bceaftersigmoid""" +314 13 regularizer """no""" +314 13 optimizer """adam""" +314 13 training_loop """lcwa""" +314 13 evaluator """rankbased""" +314 14 dataset """kinships""" +314 14 model """hole""" +314 14 loss """bceaftersigmoid""" +314 14 regularizer """no""" +314 14 optimizer """adam""" +314 14 training_loop """lcwa""" +314 14 evaluator """rankbased""" +314 15 dataset """kinships""" +314 15 model """hole""" +314 15 loss """bceaftersigmoid""" +314 15 regularizer """no""" +314 15 optimizer """adam""" +314 15 training_loop """lcwa""" +314 15 evaluator """rankbased""" +314 16 dataset """kinships""" +314 16 model """hole""" +314 16 loss """bceaftersigmoid""" +314 16 regularizer """no""" +314 16 optimizer """adam""" +314 16 training_loop """lcwa""" +314 16 evaluator """rankbased""" +314 17 dataset """kinships""" +314 17 model """hole""" +314 17 loss """bceaftersigmoid""" +314 17 regularizer """no""" +314 17 optimizer """adam""" +314 17 training_loop """lcwa""" +314 17 evaluator """rankbased""" +314 18 dataset """kinships""" +314 18 model """hole""" +314 18 loss """bceaftersigmoid""" +314 18 regularizer """no""" +314 18 optimizer """adam""" +314 18 training_loop """lcwa""" +314 18 evaluator """rankbased""" +314 19 dataset """kinships""" +314 19 model """hole""" +314 19 loss """bceaftersigmoid""" +314 19 regularizer """no""" +314 19 optimizer """adam""" +314 19 training_loop """lcwa""" +314 19 evaluator """rankbased""" +314 20 dataset """kinships""" +314 20 model """hole""" +314 20 loss """bceaftersigmoid""" +314 20 regularizer """no""" +314 20 optimizer """adam""" +314 20 training_loop """lcwa""" +314 20 evaluator """rankbased""" +314 21 dataset """kinships""" +314 21 model """hole""" +314 21 loss """bceaftersigmoid""" +314 21 regularizer """no""" +314 21 optimizer """adam""" +314 21 training_loop """lcwa""" +314 21 evaluator """rankbased""" +314 22 dataset """kinships""" +314 22 model """hole""" +314 22 loss """bceaftersigmoid""" +314 22 regularizer """no""" +314 22 optimizer """adam""" +314 22 training_loop """lcwa""" +314 22 evaluator """rankbased""" +314 23 dataset """kinships""" +314 23 model """hole""" +314 23 loss """bceaftersigmoid""" +314 23 regularizer """no""" +314 23 optimizer """adam""" +314 23 training_loop """lcwa""" +314 23 evaluator """rankbased""" +314 24 dataset """kinships""" +314 24 model """hole""" +314 24 loss """bceaftersigmoid""" +314 24 regularizer """no""" +314 24 optimizer """adam""" +314 24 training_loop """lcwa""" +314 24 evaluator """rankbased""" +314 25 dataset """kinships""" +314 25 model """hole""" +314 25 loss """bceaftersigmoid""" +314 25 regularizer """no""" +314 25 optimizer """adam""" +314 25 training_loop """lcwa""" +314 25 evaluator """rankbased""" +314 26 dataset """kinships""" +314 26 model """hole""" +314 26 loss """bceaftersigmoid""" +314 26 regularizer """no""" +314 26 optimizer """adam""" +314 26 training_loop """lcwa""" +314 26 evaluator """rankbased""" +314 27 dataset """kinships""" +314 27 model """hole""" +314 27 loss """bceaftersigmoid""" +314 27 regularizer """no""" +314 27 optimizer """adam""" +314 27 training_loop """lcwa""" +314 27 evaluator """rankbased""" +314 28 dataset """kinships""" +314 28 model """hole""" +314 28 loss """bceaftersigmoid""" +314 28 regularizer """no""" +314 28 optimizer """adam""" +314 28 training_loop """lcwa""" +314 28 evaluator """rankbased""" +314 29 dataset """kinships""" +314 29 model """hole""" +314 29 loss """bceaftersigmoid""" +314 29 regularizer """no""" +314 29 optimizer """adam""" +314 29 training_loop """lcwa""" +314 29 evaluator """rankbased""" +314 30 dataset """kinships""" +314 30 model """hole""" +314 30 loss """bceaftersigmoid""" +314 30 regularizer """no""" +314 30 optimizer """adam""" +314 30 training_loop """lcwa""" +314 30 evaluator """rankbased""" +314 31 dataset """kinships""" +314 31 model """hole""" +314 31 loss """bceaftersigmoid""" +314 31 regularizer """no""" +314 31 optimizer """adam""" +314 31 training_loop """lcwa""" +314 31 evaluator """rankbased""" +314 32 dataset """kinships""" +314 32 model """hole""" +314 32 loss """bceaftersigmoid""" +314 32 regularizer """no""" +314 32 optimizer """adam""" +314 32 training_loop """lcwa""" +314 32 evaluator """rankbased""" +314 33 dataset """kinships""" +314 33 model """hole""" +314 33 loss """bceaftersigmoid""" +314 33 regularizer """no""" +314 33 optimizer """adam""" +314 33 training_loop """lcwa""" +314 33 evaluator """rankbased""" +314 34 dataset """kinships""" +314 34 model """hole""" +314 34 loss """bceaftersigmoid""" +314 34 regularizer """no""" +314 34 optimizer """adam""" +314 34 training_loop """lcwa""" +314 34 evaluator """rankbased""" +314 35 dataset """kinships""" +314 35 model """hole""" +314 35 loss """bceaftersigmoid""" +314 35 regularizer """no""" +314 35 optimizer """adam""" +314 35 training_loop """lcwa""" +314 35 evaluator """rankbased""" +314 36 dataset """kinships""" +314 36 model """hole""" +314 36 loss """bceaftersigmoid""" +314 36 regularizer """no""" +314 36 optimizer """adam""" +314 36 training_loop """lcwa""" +314 36 evaluator """rankbased""" +314 37 dataset """kinships""" +314 37 model """hole""" +314 37 loss """bceaftersigmoid""" +314 37 regularizer """no""" +314 37 optimizer """adam""" +314 37 training_loop """lcwa""" +314 37 evaluator """rankbased""" +314 38 dataset """kinships""" +314 38 model """hole""" +314 38 loss """bceaftersigmoid""" +314 38 regularizer """no""" +314 38 optimizer """adam""" +314 38 training_loop """lcwa""" +314 38 evaluator """rankbased""" +314 39 dataset """kinships""" +314 39 model """hole""" +314 39 loss """bceaftersigmoid""" +314 39 regularizer """no""" +314 39 optimizer """adam""" +314 39 training_loop """lcwa""" +314 39 evaluator """rankbased""" +314 40 dataset """kinships""" +314 40 model """hole""" +314 40 loss """bceaftersigmoid""" +314 40 regularizer """no""" +314 40 optimizer """adam""" +314 40 training_loop """lcwa""" +314 40 evaluator """rankbased""" +314 41 dataset """kinships""" +314 41 model """hole""" +314 41 loss """bceaftersigmoid""" +314 41 regularizer """no""" +314 41 optimizer """adam""" +314 41 training_loop """lcwa""" +314 41 evaluator """rankbased""" +314 42 dataset """kinships""" +314 42 model """hole""" +314 42 loss """bceaftersigmoid""" +314 42 regularizer """no""" +314 42 optimizer """adam""" +314 42 training_loop """lcwa""" +314 42 evaluator """rankbased""" +314 43 dataset """kinships""" +314 43 model """hole""" +314 43 loss """bceaftersigmoid""" +314 43 regularizer """no""" +314 43 optimizer """adam""" +314 43 training_loop """lcwa""" +314 43 evaluator """rankbased""" +314 44 dataset """kinships""" +314 44 model """hole""" +314 44 loss """bceaftersigmoid""" +314 44 regularizer """no""" +314 44 optimizer """adam""" +314 44 training_loop """lcwa""" +314 44 evaluator """rankbased""" +314 45 dataset """kinships""" +314 45 model """hole""" +314 45 loss """bceaftersigmoid""" +314 45 regularizer """no""" +314 45 optimizer """adam""" +314 45 training_loop """lcwa""" +314 45 evaluator """rankbased""" +314 46 dataset """kinships""" +314 46 model """hole""" +314 46 loss """bceaftersigmoid""" +314 46 regularizer """no""" +314 46 optimizer """adam""" +314 46 training_loop """lcwa""" +314 46 evaluator """rankbased""" +314 47 dataset """kinships""" +314 47 model """hole""" +314 47 loss """bceaftersigmoid""" +314 47 regularizer """no""" +314 47 optimizer """adam""" +314 47 training_loop """lcwa""" +314 47 evaluator """rankbased""" +314 48 dataset """kinships""" +314 48 model """hole""" +314 48 loss """bceaftersigmoid""" +314 48 regularizer """no""" +314 48 optimizer """adam""" +314 48 training_loop """lcwa""" +314 48 evaluator """rankbased""" +314 49 dataset """kinships""" +314 49 model """hole""" +314 49 loss """bceaftersigmoid""" +314 49 regularizer """no""" +314 49 optimizer """adam""" +314 49 training_loop """lcwa""" +314 49 evaluator """rankbased""" +314 50 dataset """kinships""" +314 50 model """hole""" +314 50 loss """bceaftersigmoid""" +314 50 regularizer """no""" +314 50 optimizer """adam""" +314 50 training_loop """lcwa""" +314 50 evaluator """rankbased""" +314 51 dataset """kinships""" +314 51 model """hole""" +314 51 loss """bceaftersigmoid""" +314 51 regularizer """no""" +314 51 optimizer """adam""" +314 51 training_loop """lcwa""" +314 51 evaluator """rankbased""" +314 52 dataset """kinships""" +314 52 model """hole""" +314 52 loss """bceaftersigmoid""" +314 52 regularizer """no""" +314 52 optimizer """adam""" +314 52 training_loop """lcwa""" +314 52 evaluator """rankbased""" +314 53 dataset """kinships""" +314 53 model """hole""" +314 53 loss """bceaftersigmoid""" +314 53 regularizer """no""" +314 53 optimizer """adam""" +314 53 training_loop """lcwa""" +314 53 evaluator """rankbased""" +314 54 dataset """kinships""" +314 54 model """hole""" +314 54 loss """bceaftersigmoid""" +314 54 regularizer """no""" +314 54 optimizer """adam""" +314 54 training_loop """lcwa""" +314 54 evaluator """rankbased""" +314 55 dataset """kinships""" +314 55 model """hole""" +314 55 loss """bceaftersigmoid""" +314 55 regularizer """no""" +314 55 optimizer """adam""" +314 55 training_loop """lcwa""" +314 55 evaluator """rankbased""" +314 56 dataset """kinships""" +314 56 model """hole""" +314 56 loss """bceaftersigmoid""" +314 56 regularizer """no""" +314 56 optimizer """adam""" +314 56 training_loop """lcwa""" +314 56 evaluator """rankbased""" +314 57 dataset """kinships""" +314 57 model """hole""" +314 57 loss """bceaftersigmoid""" +314 57 regularizer """no""" +314 57 optimizer """adam""" +314 57 training_loop """lcwa""" +314 57 evaluator """rankbased""" +314 58 dataset """kinships""" +314 58 model """hole""" +314 58 loss """bceaftersigmoid""" +314 58 regularizer """no""" +314 58 optimizer """adam""" +314 58 training_loop """lcwa""" +314 58 evaluator """rankbased""" +314 59 dataset """kinships""" +314 59 model """hole""" +314 59 loss """bceaftersigmoid""" +314 59 regularizer """no""" +314 59 optimizer """adam""" +314 59 training_loop """lcwa""" +314 59 evaluator """rankbased""" +314 60 dataset """kinships""" +314 60 model """hole""" +314 60 loss """bceaftersigmoid""" +314 60 regularizer """no""" +314 60 optimizer """adam""" +314 60 training_loop """lcwa""" +314 60 evaluator """rankbased""" +314 61 dataset """kinships""" +314 61 model """hole""" +314 61 loss """bceaftersigmoid""" +314 61 regularizer """no""" +314 61 optimizer """adam""" +314 61 training_loop """lcwa""" +314 61 evaluator """rankbased""" +314 62 dataset """kinships""" +314 62 model """hole""" +314 62 loss """bceaftersigmoid""" +314 62 regularizer """no""" +314 62 optimizer """adam""" +314 62 training_loop """lcwa""" +314 62 evaluator """rankbased""" +314 63 dataset """kinships""" +314 63 model """hole""" +314 63 loss """bceaftersigmoid""" +314 63 regularizer """no""" +314 63 optimizer """adam""" +314 63 training_loop """lcwa""" +314 63 evaluator """rankbased""" +314 64 dataset """kinships""" +314 64 model """hole""" +314 64 loss """bceaftersigmoid""" +314 64 regularizer """no""" +314 64 optimizer """adam""" +314 64 training_loop """lcwa""" +314 64 evaluator """rankbased""" +314 65 dataset """kinships""" +314 65 model """hole""" +314 65 loss """bceaftersigmoid""" +314 65 regularizer """no""" +314 65 optimizer """adam""" +314 65 training_loop """lcwa""" +314 65 evaluator """rankbased""" +314 66 dataset """kinships""" +314 66 model """hole""" +314 66 loss """bceaftersigmoid""" +314 66 regularizer """no""" +314 66 optimizer """adam""" +314 66 training_loop """lcwa""" +314 66 evaluator """rankbased""" +314 67 dataset """kinships""" +314 67 model """hole""" +314 67 loss """bceaftersigmoid""" +314 67 regularizer """no""" +314 67 optimizer """adam""" +314 67 training_loop """lcwa""" +314 67 evaluator """rankbased""" +314 68 dataset """kinships""" +314 68 model """hole""" +314 68 loss """bceaftersigmoid""" +314 68 regularizer """no""" +314 68 optimizer """adam""" +314 68 training_loop """lcwa""" +314 68 evaluator """rankbased""" +314 69 dataset """kinships""" +314 69 model """hole""" +314 69 loss """bceaftersigmoid""" +314 69 regularizer """no""" +314 69 optimizer """adam""" +314 69 training_loop """lcwa""" +314 69 evaluator """rankbased""" +314 70 dataset """kinships""" +314 70 model """hole""" +314 70 loss """bceaftersigmoid""" +314 70 regularizer """no""" +314 70 optimizer """adam""" +314 70 training_loop """lcwa""" +314 70 evaluator """rankbased""" +314 71 dataset """kinships""" +314 71 model """hole""" +314 71 loss """bceaftersigmoid""" +314 71 regularizer """no""" +314 71 optimizer """adam""" +314 71 training_loop """lcwa""" +314 71 evaluator """rankbased""" +314 72 dataset """kinships""" +314 72 model """hole""" +314 72 loss """bceaftersigmoid""" +314 72 regularizer """no""" +314 72 optimizer """adam""" +314 72 training_loop """lcwa""" +314 72 evaluator """rankbased""" +314 73 dataset """kinships""" +314 73 model """hole""" +314 73 loss """bceaftersigmoid""" +314 73 regularizer """no""" +314 73 optimizer """adam""" +314 73 training_loop """lcwa""" +314 73 evaluator """rankbased""" +314 74 dataset """kinships""" +314 74 model """hole""" +314 74 loss """bceaftersigmoid""" +314 74 regularizer """no""" +314 74 optimizer """adam""" +314 74 training_loop """lcwa""" +314 74 evaluator """rankbased""" +314 75 dataset """kinships""" +314 75 model """hole""" +314 75 loss """bceaftersigmoid""" +314 75 regularizer """no""" +314 75 optimizer """adam""" +314 75 training_loop """lcwa""" +314 75 evaluator """rankbased""" +314 76 dataset """kinships""" +314 76 model """hole""" +314 76 loss """bceaftersigmoid""" +314 76 regularizer """no""" +314 76 optimizer """adam""" +314 76 training_loop """lcwa""" +314 76 evaluator """rankbased""" +314 77 dataset """kinships""" +314 77 model """hole""" +314 77 loss """bceaftersigmoid""" +314 77 regularizer """no""" +314 77 optimizer """adam""" +314 77 training_loop """lcwa""" +314 77 evaluator """rankbased""" +314 78 dataset """kinships""" +314 78 model """hole""" +314 78 loss """bceaftersigmoid""" +314 78 regularizer """no""" +314 78 optimizer """adam""" +314 78 training_loop """lcwa""" +314 78 evaluator """rankbased""" +314 79 dataset """kinships""" +314 79 model """hole""" +314 79 loss """bceaftersigmoid""" +314 79 regularizer """no""" +314 79 optimizer """adam""" +314 79 training_loop """lcwa""" +314 79 evaluator """rankbased""" +314 80 dataset """kinships""" +314 80 model """hole""" +314 80 loss """bceaftersigmoid""" +314 80 regularizer """no""" +314 80 optimizer """adam""" +314 80 training_loop """lcwa""" +314 80 evaluator """rankbased""" +314 81 dataset """kinships""" +314 81 model """hole""" +314 81 loss """bceaftersigmoid""" +314 81 regularizer """no""" +314 81 optimizer """adam""" +314 81 training_loop """lcwa""" +314 81 evaluator """rankbased""" +314 82 dataset """kinships""" +314 82 model """hole""" +314 82 loss """bceaftersigmoid""" +314 82 regularizer """no""" +314 82 optimizer """adam""" +314 82 training_loop """lcwa""" +314 82 evaluator """rankbased""" +314 83 dataset """kinships""" +314 83 model """hole""" +314 83 loss """bceaftersigmoid""" +314 83 regularizer """no""" +314 83 optimizer """adam""" +314 83 training_loop """lcwa""" +314 83 evaluator """rankbased""" +314 84 dataset """kinships""" +314 84 model """hole""" +314 84 loss """bceaftersigmoid""" +314 84 regularizer """no""" +314 84 optimizer """adam""" +314 84 training_loop """lcwa""" +314 84 evaluator """rankbased""" +314 85 dataset """kinships""" +314 85 model """hole""" +314 85 loss """bceaftersigmoid""" +314 85 regularizer """no""" +314 85 optimizer """adam""" +314 85 training_loop """lcwa""" +314 85 evaluator """rankbased""" +314 86 dataset """kinships""" +314 86 model """hole""" +314 86 loss """bceaftersigmoid""" +314 86 regularizer """no""" +314 86 optimizer """adam""" +314 86 training_loop """lcwa""" +314 86 evaluator """rankbased""" +314 87 dataset """kinships""" +314 87 model """hole""" +314 87 loss """bceaftersigmoid""" +314 87 regularizer """no""" +314 87 optimizer """adam""" +314 87 training_loop """lcwa""" +314 87 evaluator """rankbased""" +314 88 dataset """kinships""" +314 88 model """hole""" +314 88 loss """bceaftersigmoid""" +314 88 regularizer """no""" +314 88 optimizer """adam""" +314 88 training_loop """lcwa""" +314 88 evaluator """rankbased""" +314 89 dataset """kinships""" +314 89 model """hole""" +314 89 loss """bceaftersigmoid""" +314 89 regularizer """no""" +314 89 optimizer """adam""" +314 89 training_loop """lcwa""" +314 89 evaluator """rankbased""" +314 90 dataset """kinships""" +314 90 model """hole""" +314 90 loss """bceaftersigmoid""" +314 90 regularizer """no""" +314 90 optimizer """adam""" +314 90 training_loop """lcwa""" +314 90 evaluator """rankbased""" +314 91 dataset """kinships""" +314 91 model """hole""" +314 91 loss """bceaftersigmoid""" +314 91 regularizer """no""" +314 91 optimizer """adam""" +314 91 training_loop """lcwa""" +314 91 evaluator """rankbased""" +314 92 dataset """kinships""" +314 92 model """hole""" +314 92 loss """bceaftersigmoid""" +314 92 regularizer """no""" +314 92 optimizer """adam""" +314 92 training_loop """lcwa""" +314 92 evaluator """rankbased""" +314 93 dataset """kinships""" +314 93 model """hole""" +314 93 loss """bceaftersigmoid""" +314 93 regularizer """no""" +314 93 optimizer """adam""" +314 93 training_loop """lcwa""" +314 93 evaluator """rankbased""" +314 94 dataset """kinships""" +314 94 model """hole""" +314 94 loss """bceaftersigmoid""" +314 94 regularizer """no""" +314 94 optimizer """adam""" +314 94 training_loop """lcwa""" +314 94 evaluator """rankbased""" +314 95 dataset """kinships""" +314 95 model """hole""" +314 95 loss """bceaftersigmoid""" +314 95 regularizer """no""" +314 95 optimizer """adam""" +314 95 training_loop """lcwa""" +314 95 evaluator """rankbased""" +314 96 dataset """kinships""" +314 96 model """hole""" +314 96 loss """bceaftersigmoid""" +314 96 regularizer """no""" +314 96 optimizer """adam""" +314 96 training_loop """lcwa""" +314 96 evaluator """rankbased""" +314 97 dataset """kinships""" +314 97 model """hole""" +314 97 loss """bceaftersigmoid""" +314 97 regularizer """no""" +314 97 optimizer """adam""" +314 97 training_loop """lcwa""" +314 97 evaluator """rankbased""" +314 98 dataset """kinships""" +314 98 model """hole""" +314 98 loss """bceaftersigmoid""" +314 98 regularizer """no""" +314 98 optimizer """adam""" +314 98 training_loop """lcwa""" +314 98 evaluator """rankbased""" +314 99 dataset """kinships""" +314 99 model """hole""" +314 99 loss """bceaftersigmoid""" +314 99 regularizer """no""" +314 99 optimizer """adam""" +314 99 training_loop """lcwa""" +314 99 evaluator """rankbased""" +314 100 dataset """kinships""" +314 100 model """hole""" +314 100 loss """bceaftersigmoid""" +314 100 regularizer """no""" +314 100 optimizer """adam""" +314 100 training_loop """lcwa""" +314 100 evaluator """rankbased""" +315 1 model.embedding_dim 1.0 +315 1 optimizer.lr 0.014856620703281156 +315 1 training.batch_size 2.0 +315 1 training.label_smoothing 0.5274320666517324 +315 2 model.embedding_dim 2.0 +315 2 optimizer.lr 0.04097786840740569 +315 2 training.batch_size 1.0 +315 2 training.label_smoothing 0.03775904322666467 +315 3 model.embedding_dim 1.0 +315 3 optimizer.lr 0.0015383743794422757 +315 3 training.batch_size 2.0 +315 3 training.label_smoothing 0.05810318565728967 +315 4 model.embedding_dim 1.0 +315 4 optimizer.lr 0.09260966574680406 +315 4 training.batch_size 0.0 +315 4 training.label_smoothing 0.14368724480455533 +315 5 model.embedding_dim 1.0 +315 5 optimizer.lr 0.023800153069660702 +315 5 training.batch_size 1.0 +315 5 training.label_smoothing 0.00860209320609262 +315 6 model.embedding_dim 2.0 +315 6 optimizer.lr 0.040818571166905133 +315 6 training.batch_size 1.0 +315 6 training.label_smoothing 0.005789273658247994 +315 7 model.embedding_dim 1.0 +315 7 optimizer.lr 0.00927890529493821 +315 7 training.batch_size 0.0 +315 7 training.label_smoothing 0.0022733324309750836 +315 8 model.embedding_dim 2.0 +315 8 optimizer.lr 0.030763037232808578 +315 8 training.batch_size 1.0 +315 8 training.label_smoothing 0.1331407551177818 +315 9 model.embedding_dim 1.0 +315 9 optimizer.lr 0.014561524297235813 +315 9 training.batch_size 1.0 +315 9 training.label_smoothing 0.029010752711183464 +315 10 model.embedding_dim 0.0 +315 10 optimizer.lr 0.018767006030953094 +315 10 training.batch_size 2.0 +315 10 training.label_smoothing 0.0030675452111647355 +315 11 model.embedding_dim 1.0 +315 11 optimizer.lr 0.004370666117071074 +315 11 training.batch_size 2.0 +315 11 training.label_smoothing 0.12703697521931032 +315 12 model.embedding_dim 1.0 +315 12 optimizer.lr 0.0012416953744883007 +315 12 training.batch_size 1.0 +315 12 training.label_smoothing 0.02701885452744366 +315 13 model.embedding_dim 1.0 +315 13 optimizer.lr 0.011687550644191707 +315 13 training.batch_size 1.0 +315 13 training.label_smoothing 0.35677615243847505 +315 14 model.embedding_dim 1.0 +315 14 optimizer.lr 0.03912587499294436 +315 14 training.batch_size 1.0 +315 14 training.label_smoothing 0.08977167854739199 +315 15 model.embedding_dim 2.0 +315 15 optimizer.lr 0.006035451106476978 +315 15 training.batch_size 2.0 +315 15 training.label_smoothing 0.0012001589337200895 +315 16 model.embedding_dim 2.0 +315 16 optimizer.lr 0.003794029537327959 +315 16 training.batch_size 1.0 +315 16 training.label_smoothing 0.002000762290106039 +315 17 model.embedding_dim 2.0 +315 17 optimizer.lr 0.014327253323484791 +315 17 training.batch_size 0.0 +315 17 training.label_smoothing 0.06991823034263556 +315 18 model.embedding_dim 1.0 +315 18 optimizer.lr 0.08394826957454851 +315 18 training.batch_size 1.0 +315 18 training.label_smoothing 0.03392053610064902 +315 19 model.embedding_dim 2.0 +315 19 optimizer.lr 0.02356062792595343 +315 19 training.batch_size 0.0 +315 19 training.label_smoothing 0.8241464667920898 +315 20 model.embedding_dim 2.0 +315 20 optimizer.lr 0.013635642927189428 +315 20 training.batch_size 0.0 +315 20 training.label_smoothing 0.001536098796728433 +315 21 model.embedding_dim 2.0 +315 21 optimizer.lr 0.01256471165344174 +315 21 training.batch_size 2.0 +315 21 training.label_smoothing 0.0011765962000375265 +315 22 model.embedding_dim 2.0 +315 22 optimizer.lr 0.02621085320896033 +315 22 training.batch_size 0.0 +315 22 training.label_smoothing 0.09348805482135807 +315 23 model.embedding_dim 0.0 +315 23 optimizer.lr 0.0018272033172941396 +315 23 training.batch_size 0.0 +315 23 training.label_smoothing 0.007191037037821554 +315 24 model.embedding_dim 2.0 +315 24 optimizer.lr 0.0017351535915403895 +315 24 training.batch_size 1.0 +315 24 training.label_smoothing 0.05527797523842465 +315 25 model.embedding_dim 0.0 +315 25 optimizer.lr 0.0015070701901025046 +315 25 training.batch_size 0.0 +315 25 training.label_smoothing 0.05249716476051989 +315 26 model.embedding_dim 0.0 +315 26 optimizer.lr 0.001009101962875806 +315 26 training.batch_size 1.0 +315 26 training.label_smoothing 0.005578228801810643 +315 27 model.embedding_dim 2.0 +315 27 optimizer.lr 0.006515312782167978 +315 27 training.batch_size 2.0 +315 27 training.label_smoothing 0.010662602853348042 +315 28 model.embedding_dim 1.0 +315 28 optimizer.lr 0.004094515741012464 +315 28 training.batch_size 1.0 +315 28 training.label_smoothing 0.004054406562376456 +315 29 model.embedding_dim 1.0 +315 29 optimizer.lr 0.004018610627607442 +315 29 training.batch_size 1.0 +315 29 training.label_smoothing 0.00906007390283349 +315 30 model.embedding_dim 0.0 +315 30 optimizer.lr 0.0030601678770665833 +315 30 training.batch_size 1.0 +315 30 training.label_smoothing 0.015077989571687614 +315 31 model.embedding_dim 2.0 +315 31 optimizer.lr 0.04532386621673921 +315 31 training.batch_size 0.0 +315 31 training.label_smoothing 0.030783110957988642 +315 32 model.embedding_dim 1.0 +315 32 optimizer.lr 0.015859976293087073 +315 32 training.batch_size 2.0 +315 32 training.label_smoothing 0.04223679894052746 +315 33 model.embedding_dim 1.0 +315 33 optimizer.lr 0.016940539586835494 +315 33 training.batch_size 2.0 +315 33 training.label_smoothing 0.055487137151448755 +315 34 model.embedding_dim 2.0 +315 34 optimizer.lr 0.04404277325885371 +315 34 training.batch_size 1.0 +315 34 training.label_smoothing 0.3760130887510474 +315 35 model.embedding_dim 2.0 +315 35 optimizer.lr 0.010151767543669869 +315 35 training.batch_size 0.0 +315 35 training.label_smoothing 0.007733842312954785 +315 36 model.embedding_dim 0.0 +315 36 optimizer.lr 0.04331709276581529 +315 36 training.batch_size 2.0 +315 36 training.label_smoothing 0.0020198579434506704 +315 37 model.embedding_dim 0.0 +315 37 optimizer.lr 0.00903126838126652 +315 37 training.batch_size 1.0 +315 37 training.label_smoothing 0.0045304848946817865 +315 38 model.embedding_dim 1.0 +315 38 optimizer.lr 0.0011183740738499866 +315 38 training.batch_size 2.0 +315 38 training.label_smoothing 0.0016858291624833825 +315 39 model.embedding_dim 0.0 +315 39 optimizer.lr 0.05461253327762784 +315 39 training.batch_size 2.0 +315 39 training.label_smoothing 0.012699171443695522 +315 40 model.embedding_dim 0.0 +315 40 optimizer.lr 0.008945269311802898 +315 40 training.batch_size 1.0 +315 40 training.label_smoothing 0.0014362432087567717 +315 41 model.embedding_dim 1.0 +315 41 optimizer.lr 0.009980450819463783 +315 41 training.batch_size 0.0 +315 41 training.label_smoothing 0.0021531514835839874 +315 42 model.embedding_dim 0.0 +315 42 optimizer.lr 0.00386305037045722 +315 42 training.batch_size 1.0 +315 42 training.label_smoothing 0.005424869956078476 +315 43 model.embedding_dim 0.0 +315 43 optimizer.lr 0.0022958077852669465 +315 43 training.batch_size 2.0 +315 43 training.label_smoothing 0.20809387730300402 +315 44 model.embedding_dim 2.0 +315 44 optimizer.lr 0.004191945490626144 +315 44 training.batch_size 1.0 +315 44 training.label_smoothing 0.5788183179899482 +315 45 model.embedding_dim 2.0 +315 45 optimizer.lr 0.0025753071875647643 +315 45 training.batch_size 0.0 +315 45 training.label_smoothing 0.0023688707894134447 +315 46 model.embedding_dim 2.0 +315 46 optimizer.lr 0.0011052071751805543 +315 46 training.batch_size 1.0 +315 46 training.label_smoothing 0.022391219420219925 +315 47 model.embedding_dim 2.0 +315 47 optimizer.lr 0.05303924006338155 +315 47 training.batch_size 1.0 +315 47 training.label_smoothing 0.016570469048469642 +315 48 model.embedding_dim 0.0 +315 48 optimizer.lr 0.0011137607696714684 +315 48 training.batch_size 2.0 +315 48 training.label_smoothing 0.003218431177893861 +315 49 model.embedding_dim 0.0 +315 49 optimizer.lr 0.001301418816397606 +315 49 training.batch_size 2.0 +315 49 training.label_smoothing 0.0064016875722489675 +315 50 model.embedding_dim 1.0 +315 50 optimizer.lr 0.0019121424081709473 +315 50 training.batch_size 1.0 +315 50 training.label_smoothing 0.0022911306544192846 +315 51 model.embedding_dim 0.0 +315 51 optimizer.lr 0.0017280003162465475 +315 51 training.batch_size 2.0 +315 51 training.label_smoothing 0.2900075796746746 +315 52 model.embedding_dim 2.0 +315 52 optimizer.lr 0.024585821952967598 +315 52 training.batch_size 2.0 +315 52 training.label_smoothing 0.12767186646823883 +315 53 model.embedding_dim 0.0 +315 53 optimizer.lr 0.03576653939773396 +315 53 training.batch_size 0.0 +315 53 training.label_smoothing 0.005889530838221761 +315 54 model.embedding_dim 2.0 +315 54 optimizer.lr 0.0024984245843959235 +315 54 training.batch_size 1.0 +315 54 training.label_smoothing 0.0014386455129642753 +315 55 model.embedding_dim 0.0 +315 55 optimizer.lr 0.025202454540569758 +315 55 training.batch_size 1.0 +315 55 training.label_smoothing 0.002369762594347234 +315 56 model.embedding_dim 1.0 +315 56 optimizer.lr 0.004133002848558067 +315 56 training.batch_size 2.0 +315 56 training.label_smoothing 0.05361075146850665 +315 57 model.embedding_dim 1.0 +315 57 optimizer.lr 0.0016402211397667636 +315 57 training.batch_size 0.0 +315 57 training.label_smoothing 0.005341058859486357 +315 58 model.embedding_dim 1.0 +315 58 optimizer.lr 0.07178571954283558 +315 58 training.batch_size 0.0 +315 58 training.label_smoothing 0.022742641467672433 +315 59 model.embedding_dim 2.0 +315 59 optimizer.lr 0.0027191481295351662 +315 59 training.batch_size 1.0 +315 59 training.label_smoothing 0.001965748405453109 +315 60 model.embedding_dim 1.0 +315 60 optimizer.lr 0.014972479202167568 +315 60 training.batch_size 1.0 +315 60 training.label_smoothing 0.001390232938195668 +315 61 model.embedding_dim 2.0 +315 61 optimizer.lr 0.07537523044804206 +315 61 training.batch_size 1.0 +315 61 training.label_smoothing 0.0027764536941109856 +315 62 model.embedding_dim 1.0 +315 62 optimizer.lr 0.043832201772730235 +315 62 training.batch_size 1.0 +315 62 training.label_smoothing 0.0010637883694855337 +315 63 model.embedding_dim 2.0 +315 63 optimizer.lr 0.0012085603348228585 +315 63 training.batch_size 1.0 +315 63 training.label_smoothing 0.006438612021779394 +315 64 model.embedding_dim 1.0 +315 64 optimizer.lr 0.014418289610825565 +315 64 training.batch_size 0.0 +315 64 training.label_smoothing 0.04128787438479717 +315 65 model.embedding_dim 2.0 +315 65 optimizer.lr 0.024187037884064247 +315 65 training.batch_size 0.0 +315 65 training.label_smoothing 0.04014654200777737 +315 66 model.embedding_dim 1.0 +315 66 optimizer.lr 0.050880043217017834 +315 66 training.batch_size 2.0 +315 66 training.label_smoothing 0.036742147875422085 +315 67 model.embedding_dim 2.0 +315 67 optimizer.lr 0.014674971759102335 +315 67 training.batch_size 1.0 +315 67 training.label_smoothing 0.26740477996630513 +315 68 model.embedding_dim 2.0 +315 68 optimizer.lr 0.0032481641300129787 +315 68 training.batch_size 0.0 +315 68 training.label_smoothing 0.02286084827800552 +315 69 model.embedding_dim 0.0 +315 69 optimizer.lr 0.014973080133155679 +315 69 training.batch_size 1.0 +315 69 training.label_smoothing 0.018990798031060206 +315 70 model.embedding_dim 2.0 +315 70 optimizer.lr 0.01001818949506577 +315 70 training.batch_size 0.0 +315 70 training.label_smoothing 0.003230331783633544 +315 71 model.embedding_dim 1.0 +315 71 optimizer.lr 0.02932574738511071 +315 71 training.batch_size 2.0 +315 71 training.label_smoothing 0.09055895145058857 +315 72 model.embedding_dim 1.0 +315 72 optimizer.lr 0.01505544927260538 +315 72 training.batch_size 0.0 +315 72 training.label_smoothing 0.629293682206033 +315 73 model.embedding_dim 0.0 +315 73 optimizer.lr 0.0012419998949244392 +315 73 training.batch_size 1.0 +315 73 training.label_smoothing 0.022012904095182202 +315 74 model.embedding_dim 1.0 +315 74 optimizer.lr 0.004368830355237971 +315 74 training.batch_size 1.0 +315 74 training.label_smoothing 0.4438996428730155 +315 75 model.embedding_dim 2.0 +315 75 optimizer.lr 0.0022991709874917257 +315 75 training.batch_size 2.0 +315 75 training.label_smoothing 0.6032775608077079 +315 76 model.embedding_dim 1.0 +315 76 optimizer.lr 0.060356488602623194 +315 76 training.batch_size 2.0 +315 76 training.label_smoothing 0.012733950628262491 +315 77 model.embedding_dim 2.0 +315 77 optimizer.lr 0.0018352524059993187 +315 77 training.batch_size 0.0 +315 77 training.label_smoothing 0.05899071329545608 +315 78 model.embedding_dim 1.0 +315 78 optimizer.lr 0.006941419459776655 +315 78 training.batch_size 2.0 +315 78 training.label_smoothing 0.14794327437541546 +315 79 model.embedding_dim 1.0 +315 79 optimizer.lr 0.02312171932195381 +315 79 training.batch_size 2.0 +315 79 training.label_smoothing 0.0014069273146003897 +315 80 model.embedding_dim 2.0 +315 80 optimizer.lr 0.001133128466857509 +315 80 training.batch_size 1.0 +315 80 training.label_smoothing 0.017771091485194408 +315 81 model.embedding_dim 0.0 +315 81 optimizer.lr 0.003334749932018308 +315 81 training.batch_size 2.0 +315 81 training.label_smoothing 0.1134919099945077 +315 82 model.embedding_dim 0.0 +315 82 optimizer.lr 0.0701277951869304 +315 82 training.batch_size 0.0 +315 82 training.label_smoothing 0.7695525100114576 +315 83 model.embedding_dim 2.0 +315 83 optimizer.lr 0.01668410832302026 +315 83 training.batch_size 0.0 +315 83 training.label_smoothing 0.3913865677432168 +315 84 model.embedding_dim 1.0 +315 84 optimizer.lr 0.016472386894151356 +315 84 training.batch_size 2.0 +315 84 training.label_smoothing 0.0018661701926242298 +315 85 model.embedding_dim 0.0 +315 85 optimizer.lr 0.008970312934364877 +315 85 training.batch_size 0.0 +315 85 training.label_smoothing 0.0027881314860210313 +315 86 model.embedding_dim 0.0 +315 86 optimizer.lr 0.0038464181959817966 +315 86 training.batch_size 1.0 +315 86 training.label_smoothing 0.21149343194358827 +315 87 model.embedding_dim 1.0 +315 87 optimizer.lr 0.025664067152474185 +315 87 training.batch_size 0.0 +315 87 training.label_smoothing 0.047398322795317006 +315 88 model.embedding_dim 0.0 +315 88 optimizer.lr 0.009364591997840954 +315 88 training.batch_size 0.0 +315 88 training.label_smoothing 0.03559929941782506 +315 89 model.embedding_dim 2.0 +315 89 optimizer.lr 0.02641180491998293 +315 89 training.batch_size 2.0 +315 89 training.label_smoothing 0.03967009670306694 +315 90 model.embedding_dim 1.0 +315 90 optimizer.lr 0.004669747103072748 +315 90 training.batch_size 0.0 +315 90 training.label_smoothing 0.5042752383914354 +315 91 model.embedding_dim 0.0 +315 91 optimizer.lr 0.005380024302256599 +315 91 training.batch_size 1.0 +315 91 training.label_smoothing 0.005125499140124183 +315 92 model.embedding_dim 1.0 +315 92 optimizer.lr 0.002461061101384222 +315 92 training.batch_size 2.0 +315 92 training.label_smoothing 0.1336028704268509 +315 93 model.embedding_dim 2.0 +315 93 optimizer.lr 0.05462697407533734 +315 93 training.batch_size 2.0 +315 93 training.label_smoothing 0.0011703366509745253 +315 94 model.embedding_dim 1.0 +315 94 optimizer.lr 0.0015035570910776307 +315 94 training.batch_size 0.0 +315 94 training.label_smoothing 0.0025324707560974956 +315 95 model.embedding_dim 0.0 +315 95 optimizer.lr 0.007700754386038106 +315 95 training.batch_size 1.0 +315 95 training.label_smoothing 0.08512802365445173 +315 96 model.embedding_dim 2.0 +315 96 optimizer.lr 0.004203453905253703 +315 96 training.batch_size 2.0 +315 96 training.label_smoothing 0.0057503007663026575 +315 97 model.embedding_dim 2.0 +315 97 optimizer.lr 0.08782060969001272 +315 97 training.batch_size 0.0 +315 97 training.label_smoothing 0.007879501885638553 +315 98 model.embedding_dim 0.0 +315 98 optimizer.lr 0.005757214572894251 +315 98 training.batch_size 1.0 +315 98 training.label_smoothing 0.004678675579451671 +315 99 model.embedding_dim 0.0 +315 99 optimizer.lr 0.06258096503711252 +315 99 training.batch_size 2.0 +315 99 training.label_smoothing 0.005438449406611305 +315 100 model.embedding_dim 1.0 +315 100 optimizer.lr 0.005899619038710082 +315 100 training.batch_size 2.0 +315 100 training.label_smoothing 0.14996227045900648 +315 1 dataset """kinships""" +315 1 model """hole""" +315 1 loss """softplus""" +315 1 regularizer """no""" +315 1 optimizer """adam""" +315 1 training_loop """lcwa""" +315 1 evaluator """rankbased""" +315 2 dataset """kinships""" +315 2 model """hole""" +315 2 loss """softplus""" +315 2 regularizer """no""" +315 2 optimizer """adam""" +315 2 training_loop """lcwa""" +315 2 evaluator """rankbased""" +315 3 dataset """kinships""" +315 3 model """hole""" +315 3 loss """softplus""" +315 3 regularizer """no""" +315 3 optimizer """adam""" +315 3 training_loop """lcwa""" +315 3 evaluator """rankbased""" +315 4 dataset """kinships""" +315 4 model """hole""" +315 4 loss """softplus""" +315 4 regularizer """no""" +315 4 optimizer """adam""" +315 4 training_loop """lcwa""" +315 4 evaluator """rankbased""" +315 5 dataset """kinships""" +315 5 model """hole""" +315 5 loss """softplus""" +315 5 regularizer """no""" +315 5 optimizer """adam""" +315 5 training_loop """lcwa""" +315 5 evaluator """rankbased""" +315 6 dataset """kinships""" +315 6 model """hole""" +315 6 loss """softplus""" +315 6 regularizer """no""" +315 6 optimizer """adam""" +315 6 training_loop """lcwa""" +315 6 evaluator """rankbased""" +315 7 dataset """kinships""" +315 7 model """hole""" +315 7 loss """softplus""" +315 7 regularizer """no""" +315 7 optimizer """adam""" +315 7 training_loop """lcwa""" +315 7 evaluator """rankbased""" +315 8 dataset """kinships""" +315 8 model """hole""" +315 8 loss """softplus""" +315 8 regularizer """no""" +315 8 optimizer """adam""" +315 8 training_loop """lcwa""" +315 8 evaluator """rankbased""" +315 9 dataset """kinships""" +315 9 model """hole""" +315 9 loss """softplus""" +315 9 regularizer """no""" +315 9 optimizer """adam""" +315 9 training_loop """lcwa""" +315 9 evaluator """rankbased""" +315 10 dataset """kinships""" +315 10 model """hole""" +315 10 loss """softplus""" +315 10 regularizer """no""" +315 10 optimizer """adam""" +315 10 training_loop """lcwa""" +315 10 evaluator """rankbased""" +315 11 dataset """kinships""" +315 11 model """hole""" +315 11 loss """softplus""" +315 11 regularizer """no""" +315 11 optimizer """adam""" +315 11 training_loop """lcwa""" +315 11 evaluator """rankbased""" +315 12 dataset """kinships""" +315 12 model """hole""" +315 12 loss """softplus""" +315 12 regularizer """no""" +315 12 optimizer """adam""" +315 12 training_loop """lcwa""" +315 12 evaluator """rankbased""" +315 13 dataset """kinships""" +315 13 model """hole""" +315 13 loss """softplus""" +315 13 regularizer """no""" +315 13 optimizer """adam""" +315 13 training_loop """lcwa""" +315 13 evaluator """rankbased""" +315 14 dataset """kinships""" +315 14 model """hole""" +315 14 loss """softplus""" +315 14 regularizer """no""" +315 14 optimizer """adam""" +315 14 training_loop """lcwa""" +315 14 evaluator """rankbased""" +315 15 dataset """kinships""" +315 15 model """hole""" +315 15 loss """softplus""" +315 15 regularizer """no""" +315 15 optimizer """adam""" +315 15 training_loop """lcwa""" +315 15 evaluator """rankbased""" +315 16 dataset """kinships""" +315 16 model """hole""" +315 16 loss """softplus""" +315 16 regularizer """no""" +315 16 optimizer """adam""" +315 16 training_loop """lcwa""" +315 16 evaluator """rankbased""" +315 17 dataset """kinships""" +315 17 model """hole""" +315 17 loss """softplus""" +315 17 regularizer """no""" +315 17 optimizer """adam""" +315 17 training_loop """lcwa""" +315 17 evaluator """rankbased""" +315 18 dataset """kinships""" +315 18 model """hole""" +315 18 loss """softplus""" +315 18 regularizer """no""" +315 18 optimizer """adam""" +315 18 training_loop """lcwa""" +315 18 evaluator """rankbased""" +315 19 dataset """kinships""" +315 19 model """hole""" +315 19 loss """softplus""" +315 19 regularizer """no""" +315 19 optimizer """adam""" +315 19 training_loop """lcwa""" +315 19 evaluator """rankbased""" +315 20 dataset """kinships""" +315 20 model """hole""" +315 20 loss """softplus""" +315 20 regularizer """no""" +315 20 optimizer """adam""" +315 20 training_loop """lcwa""" +315 20 evaluator """rankbased""" +315 21 dataset """kinships""" +315 21 model """hole""" +315 21 loss """softplus""" +315 21 regularizer """no""" +315 21 optimizer """adam""" +315 21 training_loop """lcwa""" +315 21 evaluator """rankbased""" +315 22 dataset """kinships""" +315 22 model """hole""" +315 22 loss """softplus""" +315 22 regularizer """no""" +315 22 optimizer """adam""" +315 22 training_loop """lcwa""" +315 22 evaluator """rankbased""" +315 23 dataset """kinships""" +315 23 model """hole""" +315 23 loss """softplus""" +315 23 regularizer """no""" +315 23 optimizer """adam""" +315 23 training_loop """lcwa""" +315 23 evaluator """rankbased""" +315 24 dataset """kinships""" +315 24 model """hole""" +315 24 loss """softplus""" +315 24 regularizer """no""" +315 24 optimizer """adam""" +315 24 training_loop """lcwa""" +315 24 evaluator """rankbased""" +315 25 dataset """kinships""" +315 25 model """hole""" +315 25 loss """softplus""" +315 25 regularizer """no""" +315 25 optimizer """adam""" +315 25 training_loop """lcwa""" +315 25 evaluator """rankbased""" +315 26 dataset """kinships""" +315 26 model """hole""" +315 26 loss """softplus""" +315 26 regularizer """no""" +315 26 optimizer """adam""" +315 26 training_loop """lcwa""" +315 26 evaluator """rankbased""" +315 27 dataset """kinships""" +315 27 model """hole""" +315 27 loss """softplus""" +315 27 regularizer """no""" +315 27 optimizer """adam""" +315 27 training_loop """lcwa""" +315 27 evaluator """rankbased""" +315 28 dataset """kinships""" +315 28 model """hole""" +315 28 loss """softplus""" +315 28 regularizer """no""" +315 28 optimizer """adam""" +315 28 training_loop """lcwa""" +315 28 evaluator """rankbased""" +315 29 dataset """kinships""" +315 29 model """hole""" +315 29 loss """softplus""" +315 29 regularizer """no""" +315 29 optimizer """adam""" +315 29 training_loop """lcwa""" +315 29 evaluator """rankbased""" +315 30 dataset """kinships""" +315 30 model """hole""" +315 30 loss """softplus""" +315 30 regularizer """no""" +315 30 optimizer """adam""" +315 30 training_loop """lcwa""" +315 30 evaluator """rankbased""" +315 31 dataset """kinships""" +315 31 model """hole""" +315 31 loss """softplus""" +315 31 regularizer """no""" +315 31 optimizer """adam""" +315 31 training_loop """lcwa""" +315 31 evaluator """rankbased""" +315 32 dataset """kinships""" +315 32 model """hole""" +315 32 loss """softplus""" +315 32 regularizer """no""" +315 32 optimizer """adam""" +315 32 training_loop """lcwa""" +315 32 evaluator """rankbased""" +315 33 dataset """kinships""" +315 33 model """hole""" +315 33 loss """softplus""" +315 33 regularizer """no""" +315 33 optimizer """adam""" +315 33 training_loop """lcwa""" +315 33 evaluator """rankbased""" +315 34 dataset """kinships""" +315 34 model """hole""" +315 34 loss """softplus""" +315 34 regularizer """no""" +315 34 optimizer """adam""" +315 34 training_loop """lcwa""" +315 34 evaluator """rankbased""" +315 35 dataset """kinships""" +315 35 model """hole""" +315 35 loss """softplus""" +315 35 regularizer """no""" +315 35 optimizer """adam""" +315 35 training_loop """lcwa""" +315 35 evaluator """rankbased""" +315 36 dataset """kinships""" +315 36 model """hole""" +315 36 loss """softplus""" +315 36 regularizer """no""" +315 36 optimizer """adam""" +315 36 training_loop """lcwa""" +315 36 evaluator """rankbased""" +315 37 dataset """kinships""" +315 37 model """hole""" +315 37 loss """softplus""" +315 37 regularizer """no""" +315 37 optimizer """adam""" +315 37 training_loop """lcwa""" +315 37 evaluator """rankbased""" +315 38 dataset """kinships""" +315 38 model """hole""" +315 38 loss """softplus""" +315 38 regularizer """no""" +315 38 optimizer """adam""" +315 38 training_loop """lcwa""" +315 38 evaluator """rankbased""" +315 39 dataset """kinships""" +315 39 model """hole""" +315 39 loss """softplus""" +315 39 regularizer """no""" +315 39 optimizer """adam""" +315 39 training_loop """lcwa""" +315 39 evaluator """rankbased""" +315 40 dataset """kinships""" +315 40 model """hole""" +315 40 loss """softplus""" +315 40 regularizer """no""" +315 40 optimizer """adam""" +315 40 training_loop """lcwa""" +315 40 evaluator """rankbased""" +315 41 dataset """kinships""" +315 41 model """hole""" +315 41 loss """softplus""" +315 41 regularizer """no""" +315 41 optimizer """adam""" +315 41 training_loop """lcwa""" +315 41 evaluator """rankbased""" +315 42 dataset """kinships""" +315 42 model """hole""" +315 42 loss """softplus""" +315 42 regularizer """no""" +315 42 optimizer """adam""" +315 42 training_loop """lcwa""" +315 42 evaluator """rankbased""" +315 43 dataset """kinships""" +315 43 model """hole""" +315 43 loss """softplus""" +315 43 regularizer """no""" +315 43 optimizer """adam""" +315 43 training_loop """lcwa""" +315 43 evaluator """rankbased""" +315 44 dataset """kinships""" +315 44 model """hole""" +315 44 loss """softplus""" +315 44 regularizer """no""" +315 44 optimizer """adam""" +315 44 training_loop """lcwa""" +315 44 evaluator """rankbased""" +315 45 dataset """kinships""" +315 45 model """hole""" +315 45 loss """softplus""" +315 45 regularizer """no""" +315 45 optimizer """adam""" +315 45 training_loop """lcwa""" +315 45 evaluator """rankbased""" +315 46 dataset """kinships""" +315 46 model """hole""" +315 46 loss """softplus""" +315 46 regularizer """no""" +315 46 optimizer """adam""" +315 46 training_loop """lcwa""" +315 46 evaluator """rankbased""" +315 47 dataset """kinships""" +315 47 model """hole""" +315 47 loss """softplus""" +315 47 regularizer """no""" +315 47 optimizer """adam""" +315 47 training_loop """lcwa""" +315 47 evaluator """rankbased""" +315 48 dataset """kinships""" +315 48 model """hole""" +315 48 loss """softplus""" +315 48 regularizer """no""" +315 48 optimizer """adam""" +315 48 training_loop """lcwa""" +315 48 evaluator """rankbased""" +315 49 dataset """kinships""" +315 49 model """hole""" +315 49 loss """softplus""" +315 49 regularizer """no""" +315 49 optimizer """adam""" +315 49 training_loop """lcwa""" +315 49 evaluator """rankbased""" +315 50 dataset """kinships""" +315 50 model """hole""" +315 50 loss """softplus""" +315 50 regularizer """no""" +315 50 optimizer """adam""" +315 50 training_loop """lcwa""" +315 50 evaluator """rankbased""" +315 51 dataset """kinships""" +315 51 model """hole""" +315 51 loss """softplus""" +315 51 regularizer """no""" +315 51 optimizer """adam""" +315 51 training_loop """lcwa""" +315 51 evaluator """rankbased""" +315 52 dataset """kinships""" +315 52 model """hole""" +315 52 loss """softplus""" +315 52 regularizer """no""" +315 52 optimizer """adam""" +315 52 training_loop """lcwa""" +315 52 evaluator """rankbased""" +315 53 dataset """kinships""" +315 53 model """hole""" +315 53 loss """softplus""" +315 53 regularizer """no""" +315 53 optimizer """adam""" +315 53 training_loop """lcwa""" +315 53 evaluator """rankbased""" +315 54 dataset """kinships""" +315 54 model """hole""" +315 54 loss """softplus""" +315 54 regularizer """no""" +315 54 optimizer """adam""" +315 54 training_loop """lcwa""" +315 54 evaluator """rankbased""" +315 55 dataset """kinships""" +315 55 model """hole""" +315 55 loss """softplus""" +315 55 regularizer """no""" +315 55 optimizer """adam""" +315 55 training_loop """lcwa""" +315 55 evaluator """rankbased""" +315 56 dataset """kinships""" +315 56 model """hole""" +315 56 loss """softplus""" +315 56 regularizer """no""" +315 56 optimizer """adam""" +315 56 training_loop """lcwa""" +315 56 evaluator """rankbased""" +315 57 dataset """kinships""" +315 57 model """hole""" +315 57 loss """softplus""" +315 57 regularizer """no""" +315 57 optimizer """adam""" +315 57 training_loop """lcwa""" +315 57 evaluator """rankbased""" +315 58 dataset """kinships""" +315 58 model """hole""" +315 58 loss """softplus""" +315 58 regularizer """no""" +315 58 optimizer """adam""" +315 58 training_loop """lcwa""" +315 58 evaluator """rankbased""" +315 59 dataset """kinships""" +315 59 model """hole""" +315 59 loss """softplus""" +315 59 regularizer """no""" +315 59 optimizer """adam""" +315 59 training_loop """lcwa""" +315 59 evaluator """rankbased""" +315 60 dataset """kinships""" +315 60 model """hole""" +315 60 loss """softplus""" +315 60 regularizer """no""" +315 60 optimizer """adam""" +315 60 training_loop """lcwa""" +315 60 evaluator """rankbased""" +315 61 dataset """kinships""" +315 61 model """hole""" +315 61 loss """softplus""" +315 61 regularizer """no""" +315 61 optimizer """adam""" +315 61 training_loop """lcwa""" +315 61 evaluator """rankbased""" +315 62 dataset """kinships""" +315 62 model """hole""" +315 62 loss """softplus""" +315 62 regularizer """no""" +315 62 optimizer """adam""" +315 62 training_loop """lcwa""" +315 62 evaluator """rankbased""" +315 63 dataset """kinships""" +315 63 model """hole""" +315 63 loss """softplus""" +315 63 regularizer """no""" +315 63 optimizer """adam""" +315 63 training_loop """lcwa""" +315 63 evaluator """rankbased""" +315 64 dataset """kinships""" +315 64 model """hole""" +315 64 loss """softplus""" +315 64 regularizer """no""" +315 64 optimizer """adam""" +315 64 training_loop """lcwa""" +315 64 evaluator """rankbased""" +315 65 dataset """kinships""" +315 65 model """hole""" +315 65 loss """softplus""" +315 65 regularizer """no""" +315 65 optimizer """adam""" +315 65 training_loop """lcwa""" +315 65 evaluator """rankbased""" +315 66 dataset """kinships""" +315 66 model """hole""" +315 66 loss """softplus""" +315 66 regularizer """no""" +315 66 optimizer """adam""" +315 66 training_loop """lcwa""" +315 66 evaluator """rankbased""" +315 67 dataset """kinships""" +315 67 model """hole""" +315 67 loss """softplus""" +315 67 regularizer """no""" +315 67 optimizer """adam""" +315 67 training_loop """lcwa""" +315 67 evaluator """rankbased""" +315 68 dataset """kinships""" +315 68 model """hole""" +315 68 loss """softplus""" +315 68 regularizer """no""" +315 68 optimizer """adam""" +315 68 training_loop """lcwa""" +315 68 evaluator """rankbased""" +315 69 dataset """kinships""" +315 69 model """hole""" +315 69 loss """softplus""" +315 69 regularizer """no""" +315 69 optimizer """adam""" +315 69 training_loop """lcwa""" +315 69 evaluator """rankbased""" +315 70 dataset """kinships""" +315 70 model """hole""" +315 70 loss """softplus""" +315 70 regularizer """no""" +315 70 optimizer """adam""" +315 70 training_loop """lcwa""" +315 70 evaluator """rankbased""" +315 71 dataset """kinships""" +315 71 model """hole""" +315 71 loss """softplus""" +315 71 regularizer """no""" +315 71 optimizer """adam""" +315 71 training_loop """lcwa""" +315 71 evaluator """rankbased""" +315 72 dataset """kinships""" +315 72 model """hole""" +315 72 loss """softplus""" +315 72 regularizer """no""" +315 72 optimizer """adam""" +315 72 training_loop """lcwa""" +315 72 evaluator """rankbased""" +315 73 dataset """kinships""" +315 73 model """hole""" +315 73 loss """softplus""" +315 73 regularizer """no""" +315 73 optimizer """adam""" +315 73 training_loop """lcwa""" +315 73 evaluator """rankbased""" +315 74 dataset """kinships""" +315 74 model """hole""" +315 74 loss """softplus""" +315 74 regularizer """no""" +315 74 optimizer """adam""" +315 74 training_loop """lcwa""" +315 74 evaluator """rankbased""" +315 75 dataset """kinships""" +315 75 model """hole""" +315 75 loss """softplus""" +315 75 regularizer """no""" +315 75 optimizer """adam""" +315 75 training_loop """lcwa""" +315 75 evaluator """rankbased""" +315 76 dataset """kinships""" +315 76 model """hole""" +315 76 loss """softplus""" +315 76 regularizer """no""" +315 76 optimizer """adam""" +315 76 training_loop """lcwa""" +315 76 evaluator """rankbased""" +315 77 dataset """kinships""" +315 77 model """hole""" +315 77 loss """softplus""" +315 77 regularizer """no""" +315 77 optimizer """adam""" +315 77 training_loop """lcwa""" +315 77 evaluator """rankbased""" +315 78 dataset """kinships""" +315 78 model """hole""" +315 78 loss """softplus""" +315 78 regularizer """no""" +315 78 optimizer """adam""" +315 78 training_loop """lcwa""" +315 78 evaluator """rankbased""" +315 79 dataset """kinships""" +315 79 model """hole""" +315 79 loss """softplus""" +315 79 regularizer """no""" +315 79 optimizer """adam""" +315 79 training_loop """lcwa""" +315 79 evaluator """rankbased""" +315 80 dataset """kinships""" +315 80 model """hole""" +315 80 loss """softplus""" +315 80 regularizer """no""" +315 80 optimizer """adam""" +315 80 training_loop """lcwa""" +315 80 evaluator """rankbased""" +315 81 dataset """kinships""" +315 81 model """hole""" +315 81 loss """softplus""" +315 81 regularizer """no""" +315 81 optimizer """adam""" +315 81 training_loop """lcwa""" +315 81 evaluator """rankbased""" +315 82 dataset """kinships""" +315 82 model """hole""" +315 82 loss """softplus""" +315 82 regularizer """no""" +315 82 optimizer """adam""" +315 82 training_loop """lcwa""" +315 82 evaluator """rankbased""" +315 83 dataset """kinships""" +315 83 model """hole""" +315 83 loss """softplus""" +315 83 regularizer """no""" +315 83 optimizer """adam""" +315 83 training_loop """lcwa""" +315 83 evaluator """rankbased""" +315 84 dataset """kinships""" +315 84 model """hole""" +315 84 loss """softplus""" +315 84 regularizer """no""" +315 84 optimizer """adam""" +315 84 training_loop """lcwa""" +315 84 evaluator """rankbased""" +315 85 dataset """kinships""" +315 85 model """hole""" +315 85 loss """softplus""" +315 85 regularizer """no""" +315 85 optimizer """adam""" +315 85 training_loop """lcwa""" +315 85 evaluator """rankbased""" +315 86 dataset """kinships""" +315 86 model """hole""" +315 86 loss """softplus""" +315 86 regularizer """no""" +315 86 optimizer """adam""" +315 86 training_loop """lcwa""" +315 86 evaluator """rankbased""" +315 87 dataset """kinships""" +315 87 model """hole""" +315 87 loss """softplus""" +315 87 regularizer """no""" +315 87 optimizer """adam""" +315 87 training_loop """lcwa""" +315 87 evaluator """rankbased""" +315 88 dataset """kinships""" +315 88 model """hole""" +315 88 loss """softplus""" +315 88 regularizer """no""" +315 88 optimizer """adam""" +315 88 training_loop """lcwa""" +315 88 evaluator """rankbased""" +315 89 dataset """kinships""" +315 89 model """hole""" +315 89 loss """softplus""" +315 89 regularizer """no""" +315 89 optimizer """adam""" +315 89 training_loop """lcwa""" +315 89 evaluator """rankbased""" +315 90 dataset """kinships""" +315 90 model """hole""" +315 90 loss """softplus""" +315 90 regularizer """no""" +315 90 optimizer """adam""" +315 90 training_loop """lcwa""" +315 90 evaluator """rankbased""" +315 91 dataset """kinships""" +315 91 model """hole""" +315 91 loss """softplus""" +315 91 regularizer """no""" +315 91 optimizer """adam""" +315 91 training_loop """lcwa""" +315 91 evaluator """rankbased""" +315 92 dataset """kinships""" +315 92 model """hole""" +315 92 loss """softplus""" +315 92 regularizer """no""" +315 92 optimizer """adam""" +315 92 training_loop """lcwa""" +315 92 evaluator """rankbased""" +315 93 dataset """kinships""" +315 93 model """hole""" +315 93 loss """softplus""" +315 93 regularizer """no""" +315 93 optimizer """adam""" +315 93 training_loop """lcwa""" +315 93 evaluator """rankbased""" +315 94 dataset """kinships""" +315 94 model """hole""" +315 94 loss """softplus""" +315 94 regularizer """no""" +315 94 optimizer """adam""" +315 94 training_loop """lcwa""" +315 94 evaluator """rankbased""" +315 95 dataset """kinships""" +315 95 model """hole""" +315 95 loss """softplus""" +315 95 regularizer """no""" +315 95 optimizer """adam""" +315 95 training_loop """lcwa""" +315 95 evaluator """rankbased""" +315 96 dataset """kinships""" +315 96 model """hole""" +315 96 loss """softplus""" +315 96 regularizer """no""" +315 96 optimizer """adam""" +315 96 training_loop """lcwa""" +315 96 evaluator """rankbased""" +315 97 dataset """kinships""" +315 97 model """hole""" +315 97 loss """softplus""" +315 97 regularizer """no""" +315 97 optimizer """adam""" +315 97 training_loop """lcwa""" +315 97 evaluator """rankbased""" +315 98 dataset """kinships""" +315 98 model """hole""" +315 98 loss """softplus""" +315 98 regularizer """no""" +315 98 optimizer """adam""" +315 98 training_loop """lcwa""" +315 98 evaluator """rankbased""" +315 99 dataset """kinships""" +315 99 model """hole""" +315 99 loss """softplus""" +315 99 regularizer """no""" +315 99 optimizer """adam""" +315 99 training_loop """lcwa""" +315 99 evaluator """rankbased""" +315 100 dataset """kinships""" +315 100 model """hole""" +315 100 loss """softplus""" +315 100 regularizer """no""" +315 100 optimizer """adam""" +315 100 training_loop """lcwa""" +315 100 evaluator """rankbased""" +316 1 model.embedding_dim 1.0 +316 1 optimizer.lr 0.015601323225522674 +316 1 training.batch_size 2.0 +316 1 training.label_smoothing 0.00718134699377138 +316 2 model.embedding_dim 2.0 +316 2 optimizer.lr 0.0019889184025032544 +316 2 training.batch_size 0.0 +316 2 training.label_smoothing 0.376157420833037 +316 3 model.embedding_dim 1.0 +316 3 optimizer.lr 0.05250142349113356 +316 3 training.batch_size 0.0 +316 3 training.label_smoothing 0.024758860518471367 +316 4 model.embedding_dim 2.0 +316 4 optimizer.lr 0.07815682806839908 +316 4 training.batch_size 2.0 +316 4 training.label_smoothing 0.0055487797612402785 +316 5 model.embedding_dim 2.0 +316 5 optimizer.lr 0.0032953736104920158 +316 5 training.batch_size 2.0 +316 5 training.label_smoothing 0.04359575249328695 +316 6 model.embedding_dim 0.0 +316 6 optimizer.lr 0.004401934267983953 +316 6 training.batch_size 1.0 +316 6 training.label_smoothing 0.003509902555411819 +316 7 model.embedding_dim 2.0 +316 7 optimizer.lr 0.014084845283643675 +316 7 training.batch_size 1.0 +316 7 training.label_smoothing 0.2194931660655311 +316 8 model.embedding_dim 1.0 +316 8 optimizer.lr 0.004987857024191145 +316 8 training.batch_size 0.0 +316 8 training.label_smoothing 0.28591479185934293 +316 9 model.embedding_dim 2.0 +316 9 optimizer.lr 0.06851866435456187 +316 9 training.batch_size 0.0 +316 9 training.label_smoothing 0.052659496819244346 +316 10 model.embedding_dim 2.0 +316 10 optimizer.lr 0.007925584406215129 +316 10 training.batch_size 0.0 +316 10 training.label_smoothing 0.0018673030557910114 +316 11 model.embedding_dim 2.0 +316 11 optimizer.lr 0.025859765834147112 +316 11 training.batch_size 2.0 +316 11 training.label_smoothing 0.01302034810868812 +316 12 model.embedding_dim 2.0 +316 12 optimizer.lr 0.051497418428034604 +316 12 training.batch_size 0.0 +316 12 training.label_smoothing 0.1350862871322109 +316 13 model.embedding_dim 1.0 +316 13 optimizer.lr 0.002067187747298355 +316 13 training.batch_size 1.0 +316 13 training.label_smoothing 0.018888259288813835 +316 14 model.embedding_dim 2.0 +316 14 optimizer.lr 0.003874554348380422 +316 14 training.batch_size 1.0 +316 14 training.label_smoothing 0.02306742956867254 +316 15 model.embedding_dim 2.0 +316 15 optimizer.lr 0.0609755197990225 +316 15 training.batch_size 1.0 +316 15 training.label_smoothing 0.1784858299346805 +316 16 model.embedding_dim 0.0 +316 16 optimizer.lr 0.022676926015820848 +316 16 training.batch_size 2.0 +316 16 training.label_smoothing 0.0010752623718447694 +316 17 model.embedding_dim 1.0 +316 17 optimizer.lr 0.03476343569131521 +316 17 training.batch_size 0.0 +316 17 training.label_smoothing 0.09769419410418881 +316 18 model.embedding_dim 0.0 +316 18 optimizer.lr 0.002484018886801272 +316 18 training.batch_size 2.0 +316 18 training.label_smoothing 0.028607513797583882 +316 19 model.embedding_dim 2.0 +316 19 optimizer.lr 0.009639529545107564 +316 19 training.batch_size 1.0 +316 19 training.label_smoothing 0.009687784137245506 +316 20 model.embedding_dim 0.0 +316 20 optimizer.lr 0.0018752606467324748 +316 20 training.batch_size 1.0 +316 20 training.label_smoothing 0.0012583157398474176 +316 21 model.embedding_dim 2.0 +316 21 optimizer.lr 0.021731606640934015 +316 21 training.batch_size 2.0 +316 21 training.label_smoothing 0.02750175612537296 +316 22 model.embedding_dim 0.0 +316 22 optimizer.lr 0.03845281869499778 +316 22 training.batch_size 1.0 +316 22 training.label_smoothing 0.0306452898293249 +316 23 model.embedding_dim 2.0 +316 23 optimizer.lr 0.005251583615012697 +316 23 training.batch_size 1.0 +316 23 training.label_smoothing 0.018274948826757467 +316 24 model.embedding_dim 1.0 +316 24 optimizer.lr 0.06977309868094622 +316 24 training.batch_size 2.0 +316 24 training.label_smoothing 0.004048218233945074 +316 25 model.embedding_dim 0.0 +316 25 optimizer.lr 0.012551680266011387 +316 25 training.batch_size 1.0 +316 25 training.label_smoothing 0.08909695387073248 +316 26 model.embedding_dim 0.0 +316 26 optimizer.lr 0.0021785115182395656 +316 26 training.batch_size 2.0 +316 26 training.label_smoothing 0.0028009946425204767 +316 27 model.embedding_dim 1.0 +316 27 optimizer.lr 0.0015349363724382506 +316 27 training.batch_size 1.0 +316 27 training.label_smoothing 0.41659165370605794 +316 28 model.embedding_dim 0.0 +316 28 optimizer.lr 0.0044455416771935225 +316 28 training.batch_size 0.0 +316 28 training.label_smoothing 0.004969384385688947 +316 29 model.embedding_dim 1.0 +316 29 optimizer.lr 0.0974530863523094 +316 29 training.batch_size 2.0 +316 29 training.label_smoothing 0.09044678814152753 +316 30 model.embedding_dim 0.0 +316 30 optimizer.lr 0.07314229475686033 +316 30 training.batch_size 1.0 +316 30 training.label_smoothing 0.010995296934353749 +316 31 model.embedding_dim 2.0 +316 31 optimizer.lr 0.0014076305463239232 +316 31 training.batch_size 0.0 +316 31 training.label_smoothing 0.0030795044258565404 +316 32 model.embedding_dim 2.0 +316 32 optimizer.lr 0.004959940049193186 +316 32 training.batch_size 0.0 +316 32 training.label_smoothing 0.01008014503099332 +316 33 model.embedding_dim 1.0 +316 33 optimizer.lr 0.0887975958575485 +316 33 training.batch_size 1.0 +316 33 training.label_smoothing 0.00660783103127865 +316 34 model.embedding_dim 0.0 +316 34 optimizer.lr 0.007730308338939898 +316 34 training.batch_size 0.0 +316 34 training.label_smoothing 0.336676434176186 +316 35 model.embedding_dim 1.0 +316 35 optimizer.lr 0.002120585390871615 +316 35 training.batch_size 1.0 +316 35 training.label_smoothing 0.05238332985798443 +316 36 model.embedding_dim 0.0 +316 36 optimizer.lr 0.054153458239739743 +316 36 training.batch_size 2.0 +316 36 training.label_smoothing 0.12297332822219635 +316 37 model.embedding_dim 1.0 +316 37 optimizer.lr 0.035699179092149765 +316 37 training.batch_size 1.0 +316 37 training.label_smoothing 0.024819406507365072 +316 38 model.embedding_dim 1.0 +316 38 optimizer.lr 0.004569763691925522 +316 38 training.batch_size 0.0 +316 38 training.label_smoothing 0.30912416438038887 +316 39 model.embedding_dim 2.0 +316 39 optimizer.lr 0.0051054183840505635 +316 39 training.batch_size 0.0 +316 39 training.label_smoothing 0.0037197879771059326 +316 40 model.embedding_dim 1.0 +316 40 optimizer.lr 0.07353959871429311 +316 40 training.batch_size 2.0 +316 40 training.label_smoothing 0.0024821434622025788 +316 41 model.embedding_dim 2.0 +316 41 optimizer.lr 0.044386360519487585 +316 41 training.batch_size 0.0 +316 41 training.label_smoothing 0.07400601383338488 +316 42 model.embedding_dim 0.0 +316 42 optimizer.lr 0.05482076228611984 +316 42 training.batch_size 2.0 +316 42 training.label_smoothing 0.004685091842792751 +316 43 model.embedding_dim 0.0 +316 43 optimizer.lr 0.03681989108490524 +316 43 training.batch_size 1.0 +316 43 training.label_smoothing 0.0061588902009077785 +316 44 model.embedding_dim 1.0 +316 44 optimizer.lr 0.0035943606059664936 +316 44 training.batch_size 1.0 +316 44 training.label_smoothing 0.056720677105243075 +316 45 model.embedding_dim 2.0 +316 45 optimizer.lr 0.007642689059745543 +316 45 training.batch_size 0.0 +316 45 training.label_smoothing 0.7669197593190121 +316 46 model.embedding_dim 1.0 +316 46 optimizer.lr 0.04246217926339671 +316 46 training.batch_size 1.0 +316 46 training.label_smoothing 0.47951228324824224 +316 47 model.embedding_dim 0.0 +316 47 optimizer.lr 0.007066104740870168 +316 47 training.batch_size 1.0 +316 47 training.label_smoothing 0.4360648979765045 +316 48 model.embedding_dim 0.0 +316 48 optimizer.lr 0.0016406053674418548 +316 48 training.batch_size 1.0 +316 48 training.label_smoothing 0.0737261391069063 +316 49 model.embedding_dim 2.0 +316 49 optimizer.lr 0.0052271263307989155 +316 49 training.batch_size 2.0 +316 49 training.label_smoothing 0.03521202574149386 +316 50 model.embedding_dim 0.0 +316 50 optimizer.lr 0.0266485032519862 +316 50 training.batch_size 0.0 +316 50 training.label_smoothing 0.019467448148047493 +316 51 model.embedding_dim 0.0 +316 51 optimizer.lr 0.002839973291502111 +316 51 training.batch_size 0.0 +316 51 training.label_smoothing 0.5664108773571174 +316 52 model.embedding_dim 2.0 +316 52 optimizer.lr 0.04678123887779599 +316 52 training.batch_size 1.0 +316 52 training.label_smoothing 0.0034611057221539112 +316 53 model.embedding_dim 0.0 +316 53 optimizer.lr 0.008491123468753994 +316 53 training.batch_size 2.0 +316 53 training.label_smoothing 0.2862961979023349 +316 54 model.embedding_dim 1.0 +316 54 optimizer.lr 0.00464587231274651 +316 54 training.batch_size 1.0 +316 54 training.label_smoothing 0.0037562461480120488 +316 55 model.embedding_dim 0.0 +316 55 optimizer.lr 0.0065849567652512256 +316 55 training.batch_size 1.0 +316 55 training.label_smoothing 0.36464877820187264 +316 56 model.embedding_dim 0.0 +316 56 optimizer.lr 0.013192622803503378 +316 56 training.batch_size 1.0 +316 56 training.label_smoothing 0.430276396465124 +316 57 model.embedding_dim 0.0 +316 57 optimizer.lr 0.0010714226302601 +316 57 training.batch_size 1.0 +316 57 training.label_smoothing 0.0022124467460629895 +316 58 model.embedding_dim 1.0 +316 58 optimizer.lr 0.09249953713270999 +316 58 training.batch_size 2.0 +316 58 training.label_smoothing 0.9542518483985126 +316 59 model.embedding_dim 2.0 +316 59 optimizer.lr 0.01592791844497236 +316 59 training.batch_size 0.0 +316 59 training.label_smoothing 0.024206745863525034 +316 60 model.embedding_dim 0.0 +316 60 optimizer.lr 0.0012420163811942824 +316 60 training.batch_size 2.0 +316 60 training.label_smoothing 0.0026846455499668883 +316 61 model.embedding_dim 0.0 +316 61 optimizer.lr 0.021316492453078942 +316 61 training.batch_size 2.0 +316 61 training.label_smoothing 0.02178009884894602 +316 62 model.embedding_dim 1.0 +316 62 optimizer.lr 0.008534955291521406 +316 62 training.batch_size 0.0 +316 62 training.label_smoothing 0.03543946641791566 +316 63 model.embedding_dim 0.0 +316 63 optimizer.lr 0.0017764357807555818 +316 63 training.batch_size 2.0 +316 63 training.label_smoothing 0.6141701734322814 +316 64 model.embedding_dim 2.0 +316 64 optimizer.lr 0.020460247733452456 +316 64 training.batch_size 1.0 +316 64 training.label_smoothing 0.006885844311508937 +316 65 model.embedding_dim 2.0 +316 65 optimizer.lr 0.0010016730248080113 +316 65 training.batch_size 2.0 +316 65 training.label_smoothing 0.035239952107431326 +316 66 model.embedding_dim 0.0 +316 66 optimizer.lr 0.016147445272872134 +316 66 training.batch_size 2.0 +316 66 training.label_smoothing 0.7865571788583264 +316 67 model.embedding_dim 0.0 +316 67 optimizer.lr 0.012370496593744125 +316 67 training.batch_size 0.0 +316 67 training.label_smoothing 0.04042023009280488 +316 68 model.embedding_dim 0.0 +316 68 optimizer.lr 0.05715151532972541 +316 68 training.batch_size 2.0 +316 68 training.label_smoothing 0.04429064557191945 +316 69 model.embedding_dim 2.0 +316 69 optimizer.lr 0.0021475676429070097 +316 69 training.batch_size 2.0 +316 69 training.label_smoothing 0.05879282223402615 +316 70 model.embedding_dim 1.0 +316 70 optimizer.lr 0.00473229490980895 +316 70 training.batch_size 2.0 +316 70 training.label_smoothing 0.0011979053021776238 +316 71 model.embedding_dim 1.0 +316 71 optimizer.lr 0.08329104965673192 +316 71 training.batch_size 0.0 +316 71 training.label_smoothing 0.0492232541591453 +316 72 model.embedding_dim 0.0 +316 72 optimizer.lr 0.012505686902461548 +316 72 training.batch_size 1.0 +316 72 training.label_smoothing 0.17758889458738344 +316 73 model.embedding_dim 2.0 +316 73 optimizer.lr 0.021590720830060462 +316 73 training.batch_size 2.0 +316 73 training.label_smoothing 0.6583972581321954 +316 74 model.embedding_dim 2.0 +316 74 optimizer.lr 0.04171461469900517 +316 74 training.batch_size 1.0 +316 74 training.label_smoothing 0.05754274872017311 +316 75 model.embedding_dim 0.0 +316 75 optimizer.lr 0.0035816575034739347 +316 75 training.batch_size 2.0 +316 75 training.label_smoothing 0.0018524766751742677 +316 76 model.embedding_dim 1.0 +316 76 optimizer.lr 0.001802886703216587 +316 76 training.batch_size 0.0 +316 76 training.label_smoothing 0.0010228939300083876 +316 77 model.embedding_dim 1.0 +316 77 optimizer.lr 0.004958099293302324 +316 77 training.batch_size 1.0 +316 77 training.label_smoothing 0.0011995550738833026 +316 78 model.embedding_dim 0.0 +316 78 optimizer.lr 0.0021804842971994893 +316 78 training.batch_size 2.0 +316 78 training.label_smoothing 0.009829242939947948 +316 79 model.embedding_dim 0.0 +316 79 optimizer.lr 0.005753309707716692 +316 79 training.batch_size 1.0 +316 79 training.label_smoothing 0.023453660329592722 +316 80 model.embedding_dim 0.0 +316 80 optimizer.lr 0.04356515345917083 +316 80 training.batch_size 1.0 +316 80 training.label_smoothing 0.0209253495642854 +316 81 model.embedding_dim 0.0 +316 81 optimizer.lr 0.09215009130294226 +316 81 training.batch_size 1.0 +316 81 training.label_smoothing 0.09476353175789724 +316 82 model.embedding_dim 0.0 +316 82 optimizer.lr 0.001588656835757231 +316 82 training.batch_size 1.0 +316 82 training.label_smoothing 0.7645746317450122 +316 83 model.embedding_dim 2.0 +316 83 optimizer.lr 0.006787357149996417 +316 83 training.batch_size 0.0 +316 83 training.label_smoothing 0.0026108113440264946 +316 84 model.embedding_dim 1.0 +316 84 optimizer.lr 0.0010437672858324538 +316 84 training.batch_size 1.0 +316 84 training.label_smoothing 0.013432855262648615 +316 85 model.embedding_dim 0.0 +316 85 optimizer.lr 0.0016514863636880725 +316 85 training.batch_size 1.0 +316 85 training.label_smoothing 0.08468176634796838 +316 86 model.embedding_dim 2.0 +316 86 optimizer.lr 0.0011293237469073795 +316 86 training.batch_size 1.0 +316 86 training.label_smoothing 0.004256968381048732 +316 87 model.embedding_dim 0.0 +316 87 optimizer.lr 0.009932087039416847 +316 87 training.batch_size 2.0 +316 87 training.label_smoothing 0.013613823035538592 +316 88 model.embedding_dim 2.0 +316 88 optimizer.lr 0.002575719114611789 +316 88 training.batch_size 1.0 +316 88 training.label_smoothing 0.03225451328281962 +316 89 model.embedding_dim 2.0 +316 89 optimizer.lr 0.0013902683833506816 +316 89 training.batch_size 1.0 +316 89 training.label_smoothing 0.0032145065781312245 +316 90 model.embedding_dim 0.0 +316 90 optimizer.lr 0.06724144322178352 +316 90 training.batch_size 0.0 +316 90 training.label_smoothing 0.12942471739434516 +316 91 model.embedding_dim 2.0 +316 91 optimizer.lr 0.004098558770889188 +316 91 training.batch_size 0.0 +316 91 training.label_smoothing 0.4267669480356783 +316 92 model.embedding_dim 0.0 +316 92 optimizer.lr 0.04964182736932409 +316 92 training.batch_size 1.0 +316 92 training.label_smoothing 0.005723999357414029 +316 93 model.embedding_dim 2.0 +316 93 optimizer.lr 0.009640868414851523 +316 93 training.batch_size 2.0 +316 93 training.label_smoothing 0.009321594360340111 +316 94 model.embedding_dim 0.0 +316 94 optimizer.lr 0.08771436608454689 +316 94 training.batch_size 1.0 +316 94 training.label_smoothing 0.17411981464699502 +316 95 model.embedding_dim 2.0 +316 95 optimizer.lr 0.055454081163325133 +316 95 training.batch_size 2.0 +316 95 training.label_smoothing 0.09229949072849147 +316 96 model.embedding_dim 1.0 +316 96 optimizer.lr 0.05788543093337415 +316 96 training.batch_size 2.0 +316 96 training.label_smoothing 0.44101107141310725 +316 97 model.embedding_dim 2.0 +316 97 optimizer.lr 0.054083716109176824 +316 97 training.batch_size 1.0 +316 97 training.label_smoothing 0.20321241714038968 +316 98 model.embedding_dim 2.0 +316 98 optimizer.lr 0.0018651642612722615 +316 98 training.batch_size 1.0 +316 98 training.label_smoothing 0.00842449406994791 +316 99 model.embedding_dim 1.0 +316 99 optimizer.lr 0.005564488769452315 +316 99 training.batch_size 1.0 +316 99 training.label_smoothing 0.00378604266595064 +316 100 model.embedding_dim 2.0 +316 100 optimizer.lr 0.06692248102365597 +316 100 training.batch_size 0.0 +316 100 training.label_smoothing 0.0439527686629443 +316 1 dataset """kinships""" +316 1 model """hole""" +316 1 loss """crossentropy""" +316 1 regularizer """no""" +316 1 optimizer """adam""" +316 1 training_loop """lcwa""" +316 1 evaluator """rankbased""" +316 2 dataset """kinships""" +316 2 model """hole""" +316 2 loss """crossentropy""" +316 2 regularizer """no""" +316 2 optimizer """adam""" +316 2 training_loop """lcwa""" +316 2 evaluator """rankbased""" +316 3 dataset """kinships""" +316 3 model """hole""" +316 3 loss """crossentropy""" +316 3 regularizer """no""" +316 3 optimizer """adam""" +316 3 training_loop """lcwa""" +316 3 evaluator """rankbased""" +316 4 dataset """kinships""" +316 4 model """hole""" +316 4 loss """crossentropy""" +316 4 regularizer """no""" +316 4 optimizer """adam""" +316 4 training_loop """lcwa""" +316 4 evaluator """rankbased""" +316 5 dataset """kinships""" +316 5 model """hole""" +316 5 loss """crossentropy""" +316 5 regularizer """no""" +316 5 optimizer """adam""" +316 5 training_loop """lcwa""" +316 5 evaluator """rankbased""" +316 6 dataset """kinships""" +316 6 model """hole""" +316 6 loss """crossentropy""" +316 6 regularizer """no""" +316 6 optimizer """adam""" +316 6 training_loop """lcwa""" +316 6 evaluator """rankbased""" +316 7 dataset """kinships""" +316 7 model """hole""" +316 7 loss """crossentropy""" +316 7 regularizer """no""" +316 7 optimizer """adam""" +316 7 training_loop """lcwa""" +316 7 evaluator """rankbased""" +316 8 dataset """kinships""" +316 8 model """hole""" +316 8 loss """crossentropy""" +316 8 regularizer """no""" +316 8 optimizer """adam""" +316 8 training_loop """lcwa""" +316 8 evaluator """rankbased""" +316 9 dataset """kinships""" +316 9 model """hole""" +316 9 loss """crossentropy""" +316 9 regularizer """no""" +316 9 optimizer """adam""" +316 9 training_loop """lcwa""" +316 9 evaluator """rankbased""" +316 10 dataset """kinships""" +316 10 model """hole""" +316 10 loss """crossentropy""" +316 10 regularizer """no""" +316 10 optimizer """adam""" +316 10 training_loop """lcwa""" +316 10 evaluator """rankbased""" +316 11 dataset """kinships""" +316 11 model """hole""" +316 11 loss """crossentropy""" +316 11 regularizer """no""" +316 11 optimizer """adam""" +316 11 training_loop """lcwa""" +316 11 evaluator """rankbased""" +316 12 dataset """kinships""" +316 12 model """hole""" +316 12 loss """crossentropy""" +316 12 regularizer """no""" +316 12 optimizer """adam""" +316 12 training_loop """lcwa""" +316 12 evaluator """rankbased""" +316 13 dataset """kinships""" +316 13 model """hole""" +316 13 loss """crossentropy""" +316 13 regularizer """no""" +316 13 optimizer """adam""" +316 13 training_loop """lcwa""" +316 13 evaluator """rankbased""" +316 14 dataset """kinships""" +316 14 model """hole""" +316 14 loss """crossentropy""" +316 14 regularizer """no""" +316 14 optimizer """adam""" +316 14 training_loop """lcwa""" +316 14 evaluator """rankbased""" +316 15 dataset """kinships""" +316 15 model """hole""" +316 15 loss """crossentropy""" +316 15 regularizer """no""" +316 15 optimizer """adam""" +316 15 training_loop """lcwa""" +316 15 evaluator """rankbased""" +316 16 dataset """kinships""" +316 16 model """hole""" +316 16 loss """crossentropy""" +316 16 regularizer """no""" +316 16 optimizer """adam""" +316 16 training_loop """lcwa""" +316 16 evaluator """rankbased""" +316 17 dataset """kinships""" +316 17 model """hole""" +316 17 loss """crossentropy""" +316 17 regularizer """no""" +316 17 optimizer """adam""" +316 17 training_loop """lcwa""" +316 17 evaluator """rankbased""" +316 18 dataset """kinships""" +316 18 model """hole""" +316 18 loss """crossentropy""" +316 18 regularizer """no""" +316 18 optimizer """adam""" +316 18 training_loop """lcwa""" +316 18 evaluator """rankbased""" +316 19 dataset """kinships""" +316 19 model """hole""" +316 19 loss """crossentropy""" +316 19 regularizer """no""" +316 19 optimizer """adam""" +316 19 training_loop """lcwa""" +316 19 evaluator """rankbased""" +316 20 dataset """kinships""" +316 20 model """hole""" +316 20 loss """crossentropy""" +316 20 regularizer """no""" +316 20 optimizer """adam""" +316 20 training_loop """lcwa""" +316 20 evaluator """rankbased""" +316 21 dataset """kinships""" +316 21 model """hole""" +316 21 loss """crossentropy""" +316 21 regularizer """no""" +316 21 optimizer """adam""" +316 21 training_loop """lcwa""" +316 21 evaluator """rankbased""" +316 22 dataset """kinships""" +316 22 model """hole""" +316 22 loss """crossentropy""" +316 22 regularizer """no""" +316 22 optimizer """adam""" +316 22 training_loop """lcwa""" +316 22 evaluator """rankbased""" +316 23 dataset """kinships""" +316 23 model """hole""" +316 23 loss """crossentropy""" +316 23 regularizer """no""" +316 23 optimizer """adam""" +316 23 training_loop """lcwa""" +316 23 evaluator """rankbased""" +316 24 dataset """kinships""" +316 24 model """hole""" +316 24 loss """crossentropy""" +316 24 regularizer """no""" +316 24 optimizer """adam""" +316 24 training_loop """lcwa""" +316 24 evaluator """rankbased""" +316 25 dataset """kinships""" +316 25 model """hole""" +316 25 loss """crossentropy""" +316 25 regularizer """no""" +316 25 optimizer """adam""" +316 25 training_loop """lcwa""" +316 25 evaluator """rankbased""" +316 26 dataset """kinships""" +316 26 model """hole""" +316 26 loss """crossentropy""" +316 26 regularizer """no""" +316 26 optimizer """adam""" +316 26 training_loop """lcwa""" +316 26 evaluator """rankbased""" +316 27 dataset """kinships""" +316 27 model """hole""" +316 27 loss """crossentropy""" +316 27 regularizer """no""" +316 27 optimizer """adam""" +316 27 training_loop """lcwa""" +316 27 evaluator """rankbased""" +316 28 dataset """kinships""" +316 28 model """hole""" +316 28 loss """crossentropy""" +316 28 regularizer """no""" +316 28 optimizer """adam""" +316 28 training_loop """lcwa""" +316 28 evaluator """rankbased""" +316 29 dataset """kinships""" +316 29 model """hole""" +316 29 loss """crossentropy""" +316 29 regularizer """no""" +316 29 optimizer """adam""" +316 29 training_loop """lcwa""" +316 29 evaluator """rankbased""" +316 30 dataset """kinships""" +316 30 model """hole""" +316 30 loss """crossentropy""" +316 30 regularizer """no""" +316 30 optimizer """adam""" +316 30 training_loop """lcwa""" +316 30 evaluator """rankbased""" +316 31 dataset """kinships""" +316 31 model """hole""" +316 31 loss """crossentropy""" +316 31 regularizer """no""" +316 31 optimizer """adam""" +316 31 training_loop """lcwa""" +316 31 evaluator """rankbased""" +316 32 dataset """kinships""" +316 32 model """hole""" +316 32 loss """crossentropy""" +316 32 regularizer """no""" +316 32 optimizer """adam""" +316 32 training_loop """lcwa""" +316 32 evaluator """rankbased""" +316 33 dataset """kinships""" +316 33 model """hole""" +316 33 loss """crossentropy""" +316 33 regularizer """no""" +316 33 optimizer """adam""" +316 33 training_loop """lcwa""" +316 33 evaluator """rankbased""" +316 34 dataset """kinships""" +316 34 model """hole""" +316 34 loss """crossentropy""" +316 34 regularizer """no""" +316 34 optimizer """adam""" +316 34 training_loop """lcwa""" +316 34 evaluator """rankbased""" +316 35 dataset """kinships""" +316 35 model """hole""" +316 35 loss """crossentropy""" +316 35 regularizer """no""" +316 35 optimizer """adam""" +316 35 training_loop """lcwa""" +316 35 evaluator """rankbased""" +316 36 dataset """kinships""" +316 36 model """hole""" +316 36 loss """crossentropy""" +316 36 regularizer """no""" +316 36 optimizer """adam""" +316 36 training_loop """lcwa""" +316 36 evaluator """rankbased""" +316 37 dataset """kinships""" +316 37 model """hole""" +316 37 loss """crossentropy""" +316 37 regularizer """no""" +316 37 optimizer """adam""" +316 37 training_loop """lcwa""" +316 37 evaluator """rankbased""" +316 38 dataset """kinships""" +316 38 model """hole""" +316 38 loss """crossentropy""" +316 38 regularizer """no""" +316 38 optimizer """adam""" +316 38 training_loop """lcwa""" +316 38 evaluator """rankbased""" +316 39 dataset """kinships""" +316 39 model """hole""" +316 39 loss """crossentropy""" +316 39 regularizer """no""" +316 39 optimizer """adam""" +316 39 training_loop """lcwa""" +316 39 evaluator """rankbased""" +316 40 dataset """kinships""" +316 40 model """hole""" +316 40 loss """crossentropy""" +316 40 regularizer """no""" +316 40 optimizer """adam""" +316 40 training_loop """lcwa""" +316 40 evaluator """rankbased""" +316 41 dataset """kinships""" +316 41 model """hole""" +316 41 loss """crossentropy""" +316 41 regularizer """no""" +316 41 optimizer """adam""" +316 41 training_loop """lcwa""" +316 41 evaluator """rankbased""" +316 42 dataset """kinships""" +316 42 model """hole""" +316 42 loss """crossentropy""" +316 42 regularizer """no""" +316 42 optimizer """adam""" +316 42 training_loop """lcwa""" +316 42 evaluator """rankbased""" +316 43 dataset """kinships""" +316 43 model """hole""" +316 43 loss """crossentropy""" +316 43 regularizer """no""" +316 43 optimizer """adam""" +316 43 training_loop """lcwa""" +316 43 evaluator """rankbased""" +316 44 dataset """kinships""" +316 44 model """hole""" +316 44 loss """crossentropy""" +316 44 regularizer """no""" +316 44 optimizer """adam""" +316 44 training_loop """lcwa""" +316 44 evaluator """rankbased""" +316 45 dataset """kinships""" +316 45 model """hole""" +316 45 loss """crossentropy""" +316 45 regularizer """no""" +316 45 optimizer """adam""" +316 45 training_loop """lcwa""" +316 45 evaluator """rankbased""" +316 46 dataset """kinships""" +316 46 model """hole""" +316 46 loss """crossentropy""" +316 46 regularizer """no""" +316 46 optimizer """adam""" +316 46 training_loop """lcwa""" +316 46 evaluator """rankbased""" +316 47 dataset """kinships""" +316 47 model """hole""" +316 47 loss """crossentropy""" +316 47 regularizer """no""" +316 47 optimizer """adam""" +316 47 training_loop """lcwa""" +316 47 evaluator """rankbased""" +316 48 dataset """kinships""" +316 48 model """hole""" +316 48 loss """crossentropy""" +316 48 regularizer """no""" +316 48 optimizer """adam""" +316 48 training_loop """lcwa""" +316 48 evaluator """rankbased""" +316 49 dataset """kinships""" +316 49 model """hole""" +316 49 loss """crossentropy""" +316 49 regularizer """no""" +316 49 optimizer """adam""" +316 49 training_loop """lcwa""" +316 49 evaluator """rankbased""" +316 50 dataset """kinships""" +316 50 model """hole""" +316 50 loss """crossentropy""" +316 50 regularizer """no""" +316 50 optimizer """adam""" +316 50 training_loop """lcwa""" +316 50 evaluator """rankbased""" +316 51 dataset """kinships""" +316 51 model """hole""" +316 51 loss """crossentropy""" +316 51 regularizer """no""" +316 51 optimizer """adam""" +316 51 training_loop """lcwa""" +316 51 evaluator """rankbased""" +316 52 dataset """kinships""" +316 52 model """hole""" +316 52 loss """crossentropy""" +316 52 regularizer """no""" +316 52 optimizer """adam""" +316 52 training_loop """lcwa""" +316 52 evaluator """rankbased""" +316 53 dataset """kinships""" +316 53 model """hole""" +316 53 loss """crossentropy""" +316 53 regularizer """no""" +316 53 optimizer """adam""" +316 53 training_loop """lcwa""" +316 53 evaluator """rankbased""" +316 54 dataset """kinships""" +316 54 model """hole""" +316 54 loss """crossentropy""" +316 54 regularizer """no""" +316 54 optimizer """adam""" +316 54 training_loop """lcwa""" +316 54 evaluator """rankbased""" +316 55 dataset """kinships""" +316 55 model """hole""" +316 55 loss """crossentropy""" +316 55 regularizer """no""" +316 55 optimizer """adam""" +316 55 training_loop """lcwa""" +316 55 evaluator """rankbased""" +316 56 dataset """kinships""" +316 56 model """hole""" +316 56 loss """crossentropy""" +316 56 regularizer """no""" +316 56 optimizer """adam""" +316 56 training_loop """lcwa""" +316 56 evaluator """rankbased""" +316 57 dataset """kinships""" +316 57 model """hole""" +316 57 loss """crossentropy""" +316 57 regularizer """no""" +316 57 optimizer """adam""" +316 57 training_loop """lcwa""" +316 57 evaluator """rankbased""" +316 58 dataset """kinships""" +316 58 model """hole""" +316 58 loss """crossentropy""" +316 58 regularizer """no""" +316 58 optimizer """adam""" +316 58 training_loop """lcwa""" +316 58 evaluator """rankbased""" +316 59 dataset """kinships""" +316 59 model """hole""" +316 59 loss """crossentropy""" +316 59 regularizer """no""" +316 59 optimizer """adam""" +316 59 training_loop """lcwa""" +316 59 evaluator """rankbased""" +316 60 dataset """kinships""" +316 60 model """hole""" +316 60 loss """crossentropy""" +316 60 regularizer """no""" +316 60 optimizer """adam""" +316 60 training_loop """lcwa""" +316 60 evaluator """rankbased""" +316 61 dataset """kinships""" +316 61 model """hole""" +316 61 loss """crossentropy""" +316 61 regularizer """no""" +316 61 optimizer """adam""" +316 61 training_loop """lcwa""" +316 61 evaluator """rankbased""" +316 62 dataset """kinships""" +316 62 model """hole""" +316 62 loss """crossentropy""" +316 62 regularizer """no""" +316 62 optimizer """adam""" +316 62 training_loop """lcwa""" +316 62 evaluator """rankbased""" +316 63 dataset """kinships""" +316 63 model """hole""" +316 63 loss """crossentropy""" +316 63 regularizer """no""" +316 63 optimizer """adam""" +316 63 training_loop """lcwa""" +316 63 evaluator """rankbased""" +316 64 dataset """kinships""" +316 64 model """hole""" +316 64 loss """crossentropy""" +316 64 regularizer """no""" +316 64 optimizer """adam""" +316 64 training_loop """lcwa""" +316 64 evaluator """rankbased""" +316 65 dataset """kinships""" +316 65 model """hole""" +316 65 loss """crossentropy""" +316 65 regularizer """no""" +316 65 optimizer """adam""" +316 65 training_loop """lcwa""" +316 65 evaluator """rankbased""" +316 66 dataset """kinships""" +316 66 model """hole""" +316 66 loss """crossentropy""" +316 66 regularizer """no""" +316 66 optimizer """adam""" +316 66 training_loop """lcwa""" +316 66 evaluator """rankbased""" +316 67 dataset """kinships""" +316 67 model """hole""" +316 67 loss """crossentropy""" +316 67 regularizer """no""" +316 67 optimizer """adam""" +316 67 training_loop """lcwa""" +316 67 evaluator """rankbased""" +316 68 dataset """kinships""" +316 68 model """hole""" +316 68 loss """crossentropy""" +316 68 regularizer """no""" +316 68 optimizer """adam""" +316 68 training_loop """lcwa""" +316 68 evaluator """rankbased""" +316 69 dataset """kinships""" +316 69 model """hole""" +316 69 loss """crossentropy""" +316 69 regularizer """no""" +316 69 optimizer """adam""" +316 69 training_loop """lcwa""" +316 69 evaluator """rankbased""" +316 70 dataset """kinships""" +316 70 model """hole""" +316 70 loss """crossentropy""" +316 70 regularizer """no""" +316 70 optimizer """adam""" +316 70 training_loop """lcwa""" +316 70 evaluator """rankbased""" +316 71 dataset """kinships""" +316 71 model """hole""" +316 71 loss """crossentropy""" +316 71 regularizer """no""" +316 71 optimizer """adam""" +316 71 training_loop """lcwa""" +316 71 evaluator """rankbased""" +316 72 dataset """kinships""" +316 72 model """hole""" +316 72 loss """crossentropy""" +316 72 regularizer """no""" +316 72 optimizer """adam""" +316 72 training_loop """lcwa""" +316 72 evaluator """rankbased""" +316 73 dataset """kinships""" +316 73 model """hole""" +316 73 loss """crossentropy""" +316 73 regularizer """no""" +316 73 optimizer """adam""" +316 73 training_loop """lcwa""" +316 73 evaluator """rankbased""" +316 74 dataset """kinships""" +316 74 model """hole""" +316 74 loss """crossentropy""" +316 74 regularizer """no""" +316 74 optimizer """adam""" +316 74 training_loop """lcwa""" +316 74 evaluator """rankbased""" +316 75 dataset """kinships""" +316 75 model """hole""" +316 75 loss """crossentropy""" +316 75 regularizer """no""" +316 75 optimizer """adam""" +316 75 training_loop """lcwa""" +316 75 evaluator """rankbased""" +316 76 dataset """kinships""" +316 76 model """hole""" +316 76 loss """crossentropy""" +316 76 regularizer """no""" +316 76 optimizer """adam""" +316 76 training_loop """lcwa""" +316 76 evaluator """rankbased""" +316 77 dataset """kinships""" +316 77 model """hole""" +316 77 loss """crossentropy""" +316 77 regularizer """no""" +316 77 optimizer """adam""" +316 77 training_loop """lcwa""" +316 77 evaluator """rankbased""" +316 78 dataset """kinships""" +316 78 model """hole""" +316 78 loss """crossentropy""" +316 78 regularizer """no""" +316 78 optimizer """adam""" +316 78 training_loop """lcwa""" +316 78 evaluator """rankbased""" +316 79 dataset """kinships""" +316 79 model """hole""" +316 79 loss """crossentropy""" +316 79 regularizer """no""" +316 79 optimizer """adam""" +316 79 training_loop """lcwa""" +316 79 evaluator """rankbased""" +316 80 dataset """kinships""" +316 80 model """hole""" +316 80 loss """crossentropy""" +316 80 regularizer """no""" +316 80 optimizer """adam""" +316 80 training_loop """lcwa""" +316 80 evaluator """rankbased""" +316 81 dataset """kinships""" +316 81 model """hole""" +316 81 loss """crossentropy""" +316 81 regularizer """no""" +316 81 optimizer """adam""" +316 81 training_loop """lcwa""" +316 81 evaluator """rankbased""" +316 82 dataset """kinships""" +316 82 model """hole""" +316 82 loss """crossentropy""" +316 82 regularizer """no""" +316 82 optimizer """adam""" +316 82 training_loop """lcwa""" +316 82 evaluator """rankbased""" +316 83 dataset """kinships""" +316 83 model """hole""" +316 83 loss """crossentropy""" +316 83 regularizer """no""" +316 83 optimizer """adam""" +316 83 training_loop """lcwa""" +316 83 evaluator """rankbased""" +316 84 dataset """kinships""" +316 84 model """hole""" +316 84 loss """crossentropy""" +316 84 regularizer """no""" +316 84 optimizer """adam""" +316 84 training_loop """lcwa""" +316 84 evaluator """rankbased""" +316 85 dataset """kinships""" +316 85 model """hole""" +316 85 loss """crossentropy""" +316 85 regularizer """no""" +316 85 optimizer """adam""" +316 85 training_loop """lcwa""" +316 85 evaluator """rankbased""" +316 86 dataset """kinships""" +316 86 model """hole""" +316 86 loss """crossentropy""" +316 86 regularizer """no""" +316 86 optimizer """adam""" +316 86 training_loop """lcwa""" +316 86 evaluator """rankbased""" +316 87 dataset """kinships""" +316 87 model """hole""" +316 87 loss """crossentropy""" +316 87 regularizer """no""" +316 87 optimizer """adam""" +316 87 training_loop """lcwa""" +316 87 evaluator """rankbased""" +316 88 dataset """kinships""" +316 88 model """hole""" +316 88 loss """crossentropy""" +316 88 regularizer """no""" +316 88 optimizer """adam""" +316 88 training_loop """lcwa""" +316 88 evaluator """rankbased""" +316 89 dataset """kinships""" +316 89 model """hole""" +316 89 loss """crossentropy""" +316 89 regularizer """no""" +316 89 optimizer """adam""" +316 89 training_loop """lcwa""" +316 89 evaluator """rankbased""" +316 90 dataset """kinships""" +316 90 model """hole""" +316 90 loss """crossentropy""" +316 90 regularizer """no""" +316 90 optimizer """adam""" +316 90 training_loop """lcwa""" +316 90 evaluator """rankbased""" +316 91 dataset """kinships""" +316 91 model """hole""" +316 91 loss """crossentropy""" +316 91 regularizer """no""" +316 91 optimizer """adam""" +316 91 training_loop """lcwa""" +316 91 evaluator """rankbased""" +316 92 dataset """kinships""" +316 92 model """hole""" +316 92 loss """crossentropy""" +316 92 regularizer """no""" +316 92 optimizer """adam""" +316 92 training_loop """lcwa""" +316 92 evaluator """rankbased""" +316 93 dataset """kinships""" +316 93 model """hole""" +316 93 loss """crossentropy""" +316 93 regularizer """no""" +316 93 optimizer """adam""" +316 93 training_loop """lcwa""" +316 93 evaluator """rankbased""" +316 94 dataset """kinships""" +316 94 model """hole""" +316 94 loss """crossentropy""" +316 94 regularizer """no""" +316 94 optimizer """adam""" +316 94 training_loop """lcwa""" +316 94 evaluator """rankbased""" +316 95 dataset """kinships""" +316 95 model """hole""" +316 95 loss """crossentropy""" +316 95 regularizer """no""" +316 95 optimizer """adam""" +316 95 training_loop """lcwa""" +316 95 evaluator """rankbased""" +316 96 dataset """kinships""" +316 96 model """hole""" +316 96 loss """crossentropy""" +316 96 regularizer """no""" +316 96 optimizer """adam""" +316 96 training_loop """lcwa""" +316 96 evaluator """rankbased""" +316 97 dataset """kinships""" +316 97 model """hole""" +316 97 loss """crossentropy""" +316 97 regularizer """no""" +316 97 optimizer """adam""" +316 97 training_loop """lcwa""" +316 97 evaluator """rankbased""" +316 98 dataset """kinships""" +316 98 model """hole""" +316 98 loss """crossentropy""" +316 98 regularizer """no""" +316 98 optimizer """adam""" +316 98 training_loop """lcwa""" +316 98 evaluator """rankbased""" +316 99 dataset """kinships""" +316 99 model """hole""" +316 99 loss """crossentropy""" +316 99 regularizer """no""" +316 99 optimizer """adam""" +316 99 training_loop """lcwa""" +316 99 evaluator """rankbased""" +316 100 dataset """kinships""" +316 100 model """hole""" +316 100 loss """crossentropy""" +316 100 regularizer """no""" +316 100 optimizer """adam""" +316 100 training_loop """lcwa""" +316 100 evaluator """rankbased""" +317 1 model.embedding_dim 2.0 +317 1 optimizer.lr 0.01977590197183224 +317 1 training.batch_size 1.0 +317 1 training.label_smoothing 0.0035665375615919 +317 2 model.embedding_dim 2.0 +317 2 optimizer.lr 0.03151611258279309 +317 2 training.batch_size 0.0 +317 2 training.label_smoothing 0.0011811342756427423 +317 3 model.embedding_dim 0.0 +317 3 optimizer.lr 0.013069391189535212 +317 3 training.batch_size 2.0 +317 3 training.label_smoothing 0.009986554493471547 +317 4 model.embedding_dim 2.0 +317 4 optimizer.lr 0.07203899607633539 +317 4 training.batch_size 0.0 +317 4 training.label_smoothing 0.05815266343644771 +317 5 model.embedding_dim 0.0 +317 5 optimizer.lr 0.0015960188529198637 +317 5 training.batch_size 1.0 +317 5 training.label_smoothing 0.09477393495994364 +317 6 model.embedding_dim 2.0 +317 6 optimizer.lr 0.028729913554676988 +317 6 training.batch_size 2.0 +317 6 training.label_smoothing 0.01126210863800558 +317 7 model.embedding_dim 2.0 +317 7 optimizer.lr 0.0026477967910073985 +317 7 training.batch_size 0.0 +317 7 training.label_smoothing 0.4175130820466276 +317 8 model.embedding_dim 1.0 +317 8 optimizer.lr 0.015191234372783073 +317 8 training.batch_size 1.0 +317 8 training.label_smoothing 0.0010146827747799694 +317 9 model.embedding_dim 1.0 +317 9 optimizer.lr 0.09007907260775135 +317 9 training.batch_size 1.0 +317 9 training.label_smoothing 0.8507022075315274 +317 10 model.embedding_dim 2.0 +317 10 optimizer.lr 0.001032809674460297 +317 10 training.batch_size 0.0 +317 10 training.label_smoothing 0.9783615556787579 +317 11 model.embedding_dim 2.0 +317 11 optimizer.lr 0.04194914880015453 +317 11 training.batch_size 1.0 +317 11 training.label_smoothing 0.19276658962025878 +317 12 model.embedding_dim 2.0 +317 12 optimizer.lr 0.04982315126703262 +317 12 training.batch_size 2.0 +317 12 training.label_smoothing 0.11153628685336793 +317 13 model.embedding_dim 0.0 +317 13 optimizer.lr 0.036983876629848804 +317 13 training.batch_size 1.0 +317 13 training.label_smoothing 0.36470535167066576 +317 14 model.embedding_dim 2.0 +317 14 optimizer.lr 0.0018130002471036415 +317 14 training.batch_size 0.0 +317 14 training.label_smoothing 0.7811400828464078 +317 15 model.embedding_dim 0.0 +317 15 optimizer.lr 0.09145379766940108 +317 15 training.batch_size 2.0 +317 15 training.label_smoothing 0.03975208198811822 +317 16 model.embedding_dim 0.0 +317 16 optimizer.lr 0.006664701710910809 +317 16 training.batch_size 2.0 +317 16 training.label_smoothing 0.07655544884377412 +317 17 model.embedding_dim 2.0 +317 17 optimizer.lr 0.09402442255793217 +317 17 training.batch_size 2.0 +317 17 training.label_smoothing 0.04200674891219068 +317 18 model.embedding_dim 1.0 +317 18 optimizer.lr 0.00448854452180794 +317 18 training.batch_size 0.0 +317 18 training.label_smoothing 0.24816185622618903 +317 19 model.embedding_dim 2.0 +317 19 optimizer.lr 0.00283287356277948 +317 19 training.batch_size 1.0 +317 19 training.label_smoothing 0.029170867701814176 +317 20 model.embedding_dim 0.0 +317 20 optimizer.lr 0.043774247933841194 +317 20 training.batch_size 1.0 +317 20 training.label_smoothing 0.0011462034913937303 +317 21 model.embedding_dim 2.0 +317 21 optimizer.lr 0.04452964085149193 +317 21 training.batch_size 2.0 +317 21 training.label_smoothing 0.28454312156620026 +317 22 model.embedding_dim 1.0 +317 22 optimizer.lr 0.001487583202584968 +317 22 training.batch_size 2.0 +317 22 training.label_smoothing 0.0021674490477729016 +317 23 model.embedding_dim 1.0 +317 23 optimizer.lr 0.009808424588429733 +317 23 training.batch_size 1.0 +317 23 training.label_smoothing 0.012170008559708779 +317 24 model.embedding_dim 1.0 +317 24 optimizer.lr 0.039924013720730366 +317 24 training.batch_size 1.0 +317 24 training.label_smoothing 0.11534232895390192 +317 25 model.embedding_dim 2.0 +317 25 optimizer.lr 0.002729942589717552 +317 25 training.batch_size 2.0 +317 25 training.label_smoothing 0.12387604278679863 +317 26 model.embedding_dim 0.0 +317 26 optimizer.lr 0.008074878107493683 +317 26 training.batch_size 0.0 +317 26 training.label_smoothing 0.0014293734933492552 +317 27 model.embedding_dim 2.0 +317 27 optimizer.lr 0.011461850872847738 +317 27 training.batch_size 1.0 +317 27 training.label_smoothing 0.47773519582057405 +317 28 model.embedding_dim 0.0 +317 28 optimizer.lr 0.027634404880222098 +317 28 training.batch_size 2.0 +317 28 training.label_smoothing 0.03321322703063394 +317 29 model.embedding_dim 1.0 +317 29 optimizer.lr 0.015708495902593298 +317 29 training.batch_size 0.0 +317 29 training.label_smoothing 0.2543599314851916 +317 30 model.embedding_dim 2.0 +317 30 optimizer.lr 0.028830805358861344 +317 30 training.batch_size 2.0 +317 30 training.label_smoothing 0.0027598337449235653 +317 31 model.embedding_dim 1.0 +317 31 optimizer.lr 0.0033539931234618076 +317 31 training.batch_size 0.0 +317 31 training.label_smoothing 0.010611955445768723 +317 32 model.embedding_dim 2.0 +317 32 optimizer.lr 0.014624505813658283 +317 32 training.batch_size 1.0 +317 32 training.label_smoothing 0.37467699630224405 +317 33 model.embedding_dim 2.0 +317 33 optimizer.lr 0.004176307425808848 +317 33 training.batch_size 0.0 +317 33 training.label_smoothing 0.041518656216660454 +317 34 model.embedding_dim 2.0 +317 34 optimizer.lr 0.001663932418123998 +317 34 training.batch_size 2.0 +317 34 training.label_smoothing 0.11531306420316424 +317 35 model.embedding_dim 2.0 +317 35 optimizer.lr 0.002491407545495381 +317 35 training.batch_size 1.0 +317 35 training.label_smoothing 0.3962762628597668 +317 36 model.embedding_dim 1.0 +317 36 optimizer.lr 0.002108167761209977 +317 36 training.batch_size 1.0 +317 36 training.label_smoothing 0.22992728436900473 +317 37 model.embedding_dim 0.0 +317 37 optimizer.lr 0.008065391887914303 +317 37 training.batch_size 1.0 +317 37 training.label_smoothing 0.0025678450737824277 +317 38 model.embedding_dim 2.0 +317 38 optimizer.lr 0.03277760900175714 +317 38 training.batch_size 2.0 +317 38 training.label_smoothing 0.009292908610960035 +317 39 model.embedding_dim 0.0 +317 39 optimizer.lr 0.04092439435306432 +317 39 training.batch_size 0.0 +317 39 training.label_smoothing 0.20990605785836677 +317 40 model.embedding_dim 1.0 +317 40 optimizer.lr 0.005484524786584322 +317 40 training.batch_size 0.0 +317 40 training.label_smoothing 0.42833004090434507 +317 41 model.embedding_dim 0.0 +317 41 optimizer.lr 0.0012560920958374893 +317 41 training.batch_size 1.0 +317 41 training.label_smoothing 0.004770936987750899 +317 42 model.embedding_dim 2.0 +317 42 optimizer.lr 0.014762036631660883 +317 42 training.batch_size 1.0 +317 42 training.label_smoothing 0.0818609237441284 +317 43 model.embedding_dim 1.0 +317 43 optimizer.lr 0.009654462215032763 +317 43 training.batch_size 1.0 +317 43 training.label_smoothing 0.15502929775764923 +317 44 model.embedding_dim 1.0 +317 44 optimizer.lr 0.0023755314249764344 +317 44 training.batch_size 2.0 +317 44 training.label_smoothing 0.007440345882238678 +317 45 model.embedding_dim 2.0 +317 45 optimizer.lr 0.01903864135915634 +317 45 training.batch_size 1.0 +317 45 training.label_smoothing 0.009960988317734551 +317 46 model.embedding_dim 0.0 +317 46 optimizer.lr 0.0022242192360619633 +317 46 training.batch_size 0.0 +317 46 training.label_smoothing 0.013981804795934015 +317 47 model.embedding_dim 0.0 +317 47 optimizer.lr 0.0017294892035069303 +317 47 training.batch_size 0.0 +317 47 training.label_smoothing 0.029793296428078486 +317 48 model.embedding_dim 2.0 +317 48 optimizer.lr 0.009985574762995027 +317 48 training.batch_size 2.0 +317 48 training.label_smoothing 0.03679427678991424 +317 49 model.embedding_dim 2.0 +317 49 optimizer.lr 0.0017635876633738466 +317 49 training.batch_size 2.0 +317 49 training.label_smoothing 0.1784660263187958 +317 50 model.embedding_dim 1.0 +317 50 optimizer.lr 0.07104093845460767 +317 50 training.batch_size 1.0 +317 50 training.label_smoothing 0.010680911481688773 +317 51 model.embedding_dim 1.0 +317 51 optimizer.lr 0.001108536753577495 +317 51 training.batch_size 2.0 +317 51 training.label_smoothing 0.004228049611715296 +317 52 model.embedding_dim 0.0 +317 52 optimizer.lr 0.0010285966723738021 +317 52 training.batch_size 0.0 +317 52 training.label_smoothing 0.0012871787527187542 +317 53 model.embedding_dim 0.0 +317 53 optimizer.lr 0.04160463587726322 +317 53 training.batch_size 2.0 +317 53 training.label_smoothing 0.010182514098456706 +317 54 model.embedding_dim 0.0 +317 54 optimizer.lr 0.04987913464990839 +317 54 training.batch_size 2.0 +317 54 training.label_smoothing 0.2816878806152473 +317 55 model.embedding_dim 0.0 +317 55 optimizer.lr 0.036748786894343195 +317 55 training.batch_size 1.0 +317 55 training.label_smoothing 0.011346628140387132 +317 56 model.embedding_dim 0.0 +317 56 optimizer.lr 0.05191958245698468 +317 56 training.batch_size 2.0 +317 56 training.label_smoothing 0.4168986755117475 +317 57 model.embedding_dim 1.0 +317 57 optimizer.lr 0.0505073609142461 +317 57 training.batch_size 0.0 +317 57 training.label_smoothing 0.011497318396971122 +317 58 model.embedding_dim 1.0 +317 58 optimizer.lr 0.03675007713823846 +317 58 training.batch_size 2.0 +317 58 training.label_smoothing 0.29259106626149534 +317 59 model.embedding_dim 2.0 +317 59 optimizer.lr 0.003164517889183872 +317 59 training.batch_size 2.0 +317 59 training.label_smoothing 0.01339547067352453 +317 60 model.embedding_dim 1.0 +317 60 optimizer.lr 0.0017186120643230595 +317 60 training.batch_size 1.0 +317 60 training.label_smoothing 0.005227309589735602 +317 61 model.embedding_dim 2.0 +317 61 optimizer.lr 0.040945353886069014 +317 61 training.batch_size 2.0 +317 61 training.label_smoothing 0.3006442658899791 +317 62 model.embedding_dim 2.0 +317 62 optimizer.lr 0.0863043486042325 +317 62 training.batch_size 1.0 +317 62 training.label_smoothing 0.015669485558435583 +317 63 model.embedding_dim 1.0 +317 63 optimizer.lr 0.09815771469086154 +317 63 training.batch_size 0.0 +317 63 training.label_smoothing 0.04740285976813428 +317 64 model.embedding_dim 1.0 +317 64 optimizer.lr 0.023090120385576517 +317 64 training.batch_size 0.0 +317 64 training.label_smoothing 0.0014224282581895996 +317 65 model.embedding_dim 2.0 +317 65 optimizer.lr 0.003103052472605562 +317 65 training.batch_size 1.0 +317 65 training.label_smoothing 0.027695365927473407 +317 66 model.embedding_dim 2.0 +317 66 optimizer.lr 0.003140683136773082 +317 66 training.batch_size 2.0 +317 66 training.label_smoothing 0.02167632877693244 +317 67 model.embedding_dim 1.0 +317 67 optimizer.lr 0.004717284763953944 +317 67 training.batch_size 0.0 +317 67 training.label_smoothing 0.08680749006888801 +317 68 model.embedding_dim 0.0 +317 68 optimizer.lr 0.00468656236012586 +317 68 training.batch_size 0.0 +317 68 training.label_smoothing 0.10983003597516919 +317 69 model.embedding_dim 2.0 +317 69 optimizer.lr 0.0013598692618020894 +317 69 training.batch_size 0.0 +317 69 training.label_smoothing 0.007148329454857163 +317 70 model.embedding_dim 1.0 +317 70 optimizer.lr 0.0018214876569576314 +317 70 training.batch_size 1.0 +317 70 training.label_smoothing 0.006866520455019946 +317 71 model.embedding_dim 1.0 +317 71 optimizer.lr 0.02930581807998361 +317 71 training.batch_size 2.0 +317 71 training.label_smoothing 0.08808636686908017 +317 72 model.embedding_dim 2.0 +317 72 optimizer.lr 0.0013186776796247379 +317 72 training.batch_size 2.0 +317 72 training.label_smoothing 0.0910663754751683 +317 73 model.embedding_dim 1.0 +317 73 optimizer.lr 0.020642180534372695 +317 73 training.batch_size 2.0 +317 73 training.label_smoothing 0.4254230005805439 +317 74 model.embedding_dim 2.0 +317 74 optimizer.lr 0.003802040036237353 +317 74 training.batch_size 1.0 +317 74 training.label_smoothing 0.09415497007614558 +317 75 model.embedding_dim 2.0 +317 75 optimizer.lr 0.004873578203036515 +317 75 training.batch_size 2.0 +317 75 training.label_smoothing 0.0015470998228378132 +317 76 model.embedding_dim 2.0 +317 76 optimizer.lr 0.04541744447129701 +317 76 training.batch_size 1.0 +317 76 training.label_smoothing 0.9663975830932909 +317 77 model.embedding_dim 0.0 +317 77 optimizer.lr 0.00857929636306554 +317 77 training.batch_size 1.0 +317 77 training.label_smoothing 0.2572233088297916 +317 78 model.embedding_dim 2.0 +317 78 optimizer.lr 0.0015948874203988602 +317 78 training.batch_size 0.0 +317 78 training.label_smoothing 0.0017842278535281878 +317 79 model.embedding_dim 1.0 +317 79 optimizer.lr 0.0013301876955958954 +317 79 training.batch_size 1.0 +317 79 training.label_smoothing 0.001159207152834914 +317 80 model.embedding_dim 0.0 +317 80 optimizer.lr 0.001868347999084271 +317 80 training.batch_size 0.0 +317 80 training.label_smoothing 0.004629735436441694 +317 81 model.embedding_dim 0.0 +317 81 optimizer.lr 0.007120219240586602 +317 81 training.batch_size 0.0 +317 81 training.label_smoothing 0.5924952424992951 +317 82 model.embedding_dim 2.0 +317 82 optimizer.lr 0.09207309840109243 +317 82 training.batch_size 2.0 +317 82 training.label_smoothing 0.02038803582123464 +317 83 model.embedding_dim 1.0 +317 83 optimizer.lr 0.006854241994885096 +317 83 training.batch_size 2.0 +317 83 training.label_smoothing 0.043082375922784064 +317 84 model.embedding_dim 2.0 +317 84 optimizer.lr 0.0027818110076113125 +317 84 training.batch_size 1.0 +317 84 training.label_smoothing 0.1518285839806251 +317 85 model.embedding_dim 0.0 +317 85 optimizer.lr 0.08351782068566602 +317 85 training.batch_size 1.0 +317 85 training.label_smoothing 0.0129672740135875 +317 86 model.embedding_dim 2.0 +317 86 optimizer.lr 0.003009219666547072 +317 86 training.batch_size 0.0 +317 86 training.label_smoothing 0.5430944683441357 +317 87 model.embedding_dim 2.0 +317 87 optimizer.lr 0.0013878369460826013 +317 87 training.batch_size 0.0 +317 87 training.label_smoothing 0.001822902563751923 +317 88 model.embedding_dim 2.0 +317 88 optimizer.lr 0.010789103754050915 +317 88 training.batch_size 1.0 +317 88 training.label_smoothing 0.01809811984805174 +317 89 model.embedding_dim 0.0 +317 89 optimizer.lr 0.003230030852405969 +317 89 training.batch_size 0.0 +317 89 training.label_smoothing 0.016117303382678493 +317 90 model.embedding_dim 0.0 +317 90 optimizer.lr 0.025139042505783873 +317 90 training.batch_size 1.0 +317 90 training.label_smoothing 0.19412309256384644 +317 91 model.embedding_dim 0.0 +317 91 optimizer.lr 0.023555925228639876 +317 91 training.batch_size 1.0 +317 91 training.label_smoothing 0.011087222208151375 +317 92 model.embedding_dim 0.0 +317 92 optimizer.lr 0.022472468712319452 +317 92 training.batch_size 0.0 +317 92 training.label_smoothing 0.006675457562027857 +317 93 model.embedding_dim 0.0 +317 93 optimizer.lr 0.0013577328134431754 +317 93 training.batch_size 2.0 +317 93 training.label_smoothing 0.012627492903224782 +317 94 model.embedding_dim 0.0 +317 94 optimizer.lr 0.03263787360149329 +317 94 training.batch_size 0.0 +317 94 training.label_smoothing 0.019361547393082212 +317 95 model.embedding_dim 1.0 +317 95 optimizer.lr 0.01663756663765646 +317 95 training.batch_size 0.0 +317 95 training.label_smoothing 0.0012624832301692053 +317 96 model.embedding_dim 2.0 +317 96 optimizer.lr 0.015016469952279784 +317 96 training.batch_size 0.0 +317 96 training.label_smoothing 0.004977995652737704 +317 97 model.embedding_dim 2.0 +317 97 optimizer.lr 0.013615964437009529 +317 97 training.batch_size 1.0 +317 97 training.label_smoothing 0.2818712450824738 +317 98 model.embedding_dim 0.0 +317 98 optimizer.lr 0.006151238998883296 +317 98 training.batch_size 0.0 +317 98 training.label_smoothing 0.01432973131652472 +317 99 model.embedding_dim 0.0 +317 99 optimizer.lr 0.003052738738330853 +317 99 training.batch_size 1.0 +317 99 training.label_smoothing 0.5411345423672124 +317 100 model.embedding_dim 1.0 +317 100 optimizer.lr 0.018052592999121573 +317 100 training.batch_size 0.0 +317 100 training.label_smoothing 0.0772271710183017 +317 1 dataset """kinships""" +317 1 model """hole""" +317 1 loss """crossentropy""" +317 1 regularizer """no""" +317 1 optimizer """adam""" +317 1 training_loop """lcwa""" +317 1 evaluator """rankbased""" +317 2 dataset """kinships""" +317 2 model """hole""" +317 2 loss """crossentropy""" +317 2 regularizer """no""" +317 2 optimizer """adam""" +317 2 training_loop """lcwa""" +317 2 evaluator """rankbased""" +317 3 dataset """kinships""" +317 3 model """hole""" +317 3 loss """crossentropy""" +317 3 regularizer """no""" +317 3 optimizer """adam""" +317 3 training_loop """lcwa""" +317 3 evaluator """rankbased""" +317 4 dataset """kinships""" +317 4 model """hole""" +317 4 loss """crossentropy""" +317 4 regularizer """no""" +317 4 optimizer """adam""" +317 4 training_loop """lcwa""" +317 4 evaluator """rankbased""" +317 5 dataset """kinships""" +317 5 model """hole""" +317 5 loss """crossentropy""" +317 5 regularizer """no""" +317 5 optimizer """adam""" +317 5 training_loop """lcwa""" +317 5 evaluator """rankbased""" +317 6 dataset """kinships""" +317 6 model """hole""" +317 6 loss """crossentropy""" +317 6 regularizer """no""" +317 6 optimizer """adam""" +317 6 training_loop """lcwa""" +317 6 evaluator """rankbased""" +317 7 dataset """kinships""" +317 7 model """hole""" +317 7 loss """crossentropy""" +317 7 regularizer """no""" +317 7 optimizer """adam""" +317 7 training_loop """lcwa""" +317 7 evaluator """rankbased""" +317 8 dataset """kinships""" +317 8 model """hole""" +317 8 loss """crossentropy""" +317 8 regularizer """no""" +317 8 optimizer """adam""" +317 8 training_loop """lcwa""" +317 8 evaluator """rankbased""" +317 9 dataset """kinships""" +317 9 model """hole""" +317 9 loss """crossentropy""" +317 9 regularizer """no""" +317 9 optimizer """adam""" +317 9 training_loop """lcwa""" +317 9 evaluator """rankbased""" +317 10 dataset """kinships""" +317 10 model """hole""" +317 10 loss """crossentropy""" +317 10 regularizer """no""" +317 10 optimizer """adam""" +317 10 training_loop """lcwa""" +317 10 evaluator """rankbased""" +317 11 dataset """kinships""" +317 11 model """hole""" +317 11 loss """crossentropy""" +317 11 regularizer """no""" +317 11 optimizer """adam""" +317 11 training_loop """lcwa""" +317 11 evaluator """rankbased""" +317 12 dataset """kinships""" +317 12 model """hole""" +317 12 loss """crossentropy""" +317 12 regularizer """no""" +317 12 optimizer """adam""" +317 12 training_loop """lcwa""" +317 12 evaluator """rankbased""" +317 13 dataset """kinships""" +317 13 model """hole""" +317 13 loss """crossentropy""" +317 13 regularizer """no""" +317 13 optimizer """adam""" +317 13 training_loop """lcwa""" +317 13 evaluator """rankbased""" +317 14 dataset """kinships""" +317 14 model """hole""" +317 14 loss """crossentropy""" +317 14 regularizer """no""" +317 14 optimizer """adam""" +317 14 training_loop """lcwa""" +317 14 evaluator """rankbased""" +317 15 dataset """kinships""" +317 15 model """hole""" +317 15 loss """crossentropy""" +317 15 regularizer """no""" +317 15 optimizer """adam""" +317 15 training_loop """lcwa""" +317 15 evaluator """rankbased""" +317 16 dataset """kinships""" +317 16 model """hole""" +317 16 loss """crossentropy""" +317 16 regularizer """no""" +317 16 optimizer """adam""" +317 16 training_loop """lcwa""" +317 16 evaluator """rankbased""" +317 17 dataset """kinships""" +317 17 model """hole""" +317 17 loss """crossentropy""" +317 17 regularizer """no""" +317 17 optimizer """adam""" +317 17 training_loop """lcwa""" +317 17 evaluator """rankbased""" +317 18 dataset """kinships""" +317 18 model """hole""" +317 18 loss """crossentropy""" +317 18 regularizer """no""" +317 18 optimizer """adam""" +317 18 training_loop """lcwa""" +317 18 evaluator """rankbased""" +317 19 dataset """kinships""" +317 19 model """hole""" +317 19 loss """crossentropy""" +317 19 regularizer """no""" +317 19 optimizer """adam""" +317 19 training_loop """lcwa""" +317 19 evaluator """rankbased""" +317 20 dataset """kinships""" +317 20 model """hole""" +317 20 loss """crossentropy""" +317 20 regularizer """no""" +317 20 optimizer """adam""" +317 20 training_loop """lcwa""" +317 20 evaluator """rankbased""" +317 21 dataset """kinships""" +317 21 model """hole""" +317 21 loss """crossentropy""" +317 21 regularizer """no""" +317 21 optimizer """adam""" +317 21 training_loop """lcwa""" +317 21 evaluator """rankbased""" +317 22 dataset """kinships""" +317 22 model """hole""" +317 22 loss """crossentropy""" +317 22 regularizer """no""" +317 22 optimizer """adam""" +317 22 training_loop """lcwa""" +317 22 evaluator """rankbased""" +317 23 dataset """kinships""" +317 23 model """hole""" +317 23 loss """crossentropy""" +317 23 regularizer """no""" +317 23 optimizer """adam""" +317 23 training_loop """lcwa""" +317 23 evaluator """rankbased""" +317 24 dataset """kinships""" +317 24 model """hole""" +317 24 loss """crossentropy""" +317 24 regularizer """no""" +317 24 optimizer """adam""" +317 24 training_loop """lcwa""" +317 24 evaluator """rankbased""" +317 25 dataset """kinships""" +317 25 model """hole""" +317 25 loss """crossentropy""" +317 25 regularizer """no""" +317 25 optimizer """adam""" +317 25 training_loop """lcwa""" +317 25 evaluator """rankbased""" +317 26 dataset """kinships""" +317 26 model """hole""" +317 26 loss """crossentropy""" +317 26 regularizer """no""" +317 26 optimizer """adam""" +317 26 training_loop """lcwa""" +317 26 evaluator """rankbased""" +317 27 dataset """kinships""" +317 27 model """hole""" +317 27 loss """crossentropy""" +317 27 regularizer """no""" +317 27 optimizer """adam""" +317 27 training_loop """lcwa""" +317 27 evaluator """rankbased""" +317 28 dataset """kinships""" +317 28 model """hole""" +317 28 loss """crossentropy""" +317 28 regularizer """no""" +317 28 optimizer """adam""" +317 28 training_loop """lcwa""" +317 28 evaluator """rankbased""" +317 29 dataset """kinships""" +317 29 model """hole""" +317 29 loss """crossentropy""" +317 29 regularizer """no""" +317 29 optimizer """adam""" +317 29 training_loop """lcwa""" +317 29 evaluator """rankbased""" +317 30 dataset """kinships""" +317 30 model """hole""" +317 30 loss """crossentropy""" +317 30 regularizer """no""" +317 30 optimizer """adam""" +317 30 training_loop """lcwa""" +317 30 evaluator """rankbased""" +317 31 dataset """kinships""" +317 31 model """hole""" +317 31 loss """crossentropy""" +317 31 regularizer """no""" +317 31 optimizer """adam""" +317 31 training_loop """lcwa""" +317 31 evaluator """rankbased""" +317 32 dataset """kinships""" +317 32 model """hole""" +317 32 loss """crossentropy""" +317 32 regularizer """no""" +317 32 optimizer """adam""" +317 32 training_loop """lcwa""" +317 32 evaluator """rankbased""" +317 33 dataset """kinships""" +317 33 model """hole""" +317 33 loss """crossentropy""" +317 33 regularizer """no""" +317 33 optimizer """adam""" +317 33 training_loop """lcwa""" +317 33 evaluator """rankbased""" +317 34 dataset """kinships""" +317 34 model """hole""" +317 34 loss """crossentropy""" +317 34 regularizer """no""" +317 34 optimizer """adam""" +317 34 training_loop """lcwa""" +317 34 evaluator """rankbased""" +317 35 dataset """kinships""" +317 35 model """hole""" +317 35 loss """crossentropy""" +317 35 regularizer """no""" +317 35 optimizer """adam""" +317 35 training_loop """lcwa""" +317 35 evaluator """rankbased""" +317 36 dataset """kinships""" +317 36 model """hole""" +317 36 loss """crossentropy""" +317 36 regularizer """no""" +317 36 optimizer """adam""" +317 36 training_loop """lcwa""" +317 36 evaluator """rankbased""" +317 37 dataset """kinships""" +317 37 model """hole""" +317 37 loss """crossentropy""" +317 37 regularizer """no""" +317 37 optimizer """adam""" +317 37 training_loop """lcwa""" +317 37 evaluator """rankbased""" +317 38 dataset """kinships""" +317 38 model """hole""" +317 38 loss """crossentropy""" +317 38 regularizer """no""" +317 38 optimizer """adam""" +317 38 training_loop """lcwa""" +317 38 evaluator """rankbased""" +317 39 dataset """kinships""" +317 39 model """hole""" +317 39 loss """crossentropy""" +317 39 regularizer """no""" +317 39 optimizer """adam""" +317 39 training_loop """lcwa""" +317 39 evaluator """rankbased""" +317 40 dataset """kinships""" +317 40 model """hole""" +317 40 loss """crossentropy""" +317 40 regularizer """no""" +317 40 optimizer """adam""" +317 40 training_loop """lcwa""" +317 40 evaluator """rankbased""" +317 41 dataset """kinships""" +317 41 model """hole""" +317 41 loss """crossentropy""" +317 41 regularizer """no""" +317 41 optimizer """adam""" +317 41 training_loop """lcwa""" +317 41 evaluator """rankbased""" +317 42 dataset """kinships""" +317 42 model """hole""" +317 42 loss """crossentropy""" +317 42 regularizer """no""" +317 42 optimizer """adam""" +317 42 training_loop """lcwa""" +317 42 evaluator """rankbased""" +317 43 dataset """kinships""" +317 43 model """hole""" +317 43 loss """crossentropy""" +317 43 regularizer """no""" +317 43 optimizer """adam""" +317 43 training_loop """lcwa""" +317 43 evaluator """rankbased""" +317 44 dataset """kinships""" +317 44 model """hole""" +317 44 loss """crossentropy""" +317 44 regularizer """no""" +317 44 optimizer """adam""" +317 44 training_loop """lcwa""" +317 44 evaluator """rankbased""" +317 45 dataset """kinships""" +317 45 model """hole""" +317 45 loss """crossentropy""" +317 45 regularizer """no""" +317 45 optimizer """adam""" +317 45 training_loop """lcwa""" +317 45 evaluator """rankbased""" +317 46 dataset """kinships""" +317 46 model """hole""" +317 46 loss """crossentropy""" +317 46 regularizer """no""" +317 46 optimizer """adam""" +317 46 training_loop """lcwa""" +317 46 evaluator """rankbased""" +317 47 dataset """kinships""" +317 47 model """hole""" +317 47 loss """crossentropy""" +317 47 regularizer """no""" +317 47 optimizer """adam""" +317 47 training_loop """lcwa""" +317 47 evaluator """rankbased""" +317 48 dataset """kinships""" +317 48 model """hole""" +317 48 loss """crossentropy""" +317 48 regularizer """no""" +317 48 optimizer """adam""" +317 48 training_loop """lcwa""" +317 48 evaluator """rankbased""" +317 49 dataset """kinships""" +317 49 model """hole""" +317 49 loss """crossentropy""" +317 49 regularizer """no""" +317 49 optimizer """adam""" +317 49 training_loop """lcwa""" +317 49 evaluator """rankbased""" +317 50 dataset """kinships""" +317 50 model """hole""" +317 50 loss """crossentropy""" +317 50 regularizer """no""" +317 50 optimizer """adam""" +317 50 training_loop """lcwa""" +317 50 evaluator """rankbased""" +317 51 dataset """kinships""" +317 51 model """hole""" +317 51 loss """crossentropy""" +317 51 regularizer """no""" +317 51 optimizer """adam""" +317 51 training_loop """lcwa""" +317 51 evaluator """rankbased""" +317 52 dataset """kinships""" +317 52 model """hole""" +317 52 loss """crossentropy""" +317 52 regularizer """no""" +317 52 optimizer """adam""" +317 52 training_loop """lcwa""" +317 52 evaluator """rankbased""" +317 53 dataset """kinships""" +317 53 model """hole""" +317 53 loss """crossentropy""" +317 53 regularizer """no""" +317 53 optimizer """adam""" +317 53 training_loop """lcwa""" +317 53 evaluator """rankbased""" +317 54 dataset """kinships""" +317 54 model """hole""" +317 54 loss """crossentropy""" +317 54 regularizer """no""" +317 54 optimizer """adam""" +317 54 training_loop """lcwa""" +317 54 evaluator """rankbased""" +317 55 dataset """kinships""" +317 55 model """hole""" +317 55 loss """crossentropy""" +317 55 regularizer """no""" +317 55 optimizer """adam""" +317 55 training_loop """lcwa""" +317 55 evaluator """rankbased""" +317 56 dataset """kinships""" +317 56 model """hole""" +317 56 loss """crossentropy""" +317 56 regularizer """no""" +317 56 optimizer """adam""" +317 56 training_loop """lcwa""" +317 56 evaluator """rankbased""" +317 57 dataset """kinships""" +317 57 model """hole""" +317 57 loss """crossentropy""" +317 57 regularizer """no""" +317 57 optimizer """adam""" +317 57 training_loop """lcwa""" +317 57 evaluator """rankbased""" +317 58 dataset """kinships""" +317 58 model """hole""" +317 58 loss """crossentropy""" +317 58 regularizer """no""" +317 58 optimizer """adam""" +317 58 training_loop """lcwa""" +317 58 evaluator """rankbased""" +317 59 dataset """kinships""" +317 59 model """hole""" +317 59 loss """crossentropy""" +317 59 regularizer """no""" +317 59 optimizer """adam""" +317 59 training_loop """lcwa""" +317 59 evaluator """rankbased""" +317 60 dataset """kinships""" +317 60 model """hole""" +317 60 loss """crossentropy""" +317 60 regularizer """no""" +317 60 optimizer """adam""" +317 60 training_loop """lcwa""" +317 60 evaluator """rankbased""" +317 61 dataset """kinships""" +317 61 model """hole""" +317 61 loss """crossentropy""" +317 61 regularizer """no""" +317 61 optimizer """adam""" +317 61 training_loop """lcwa""" +317 61 evaluator """rankbased""" +317 62 dataset """kinships""" +317 62 model """hole""" +317 62 loss """crossentropy""" +317 62 regularizer """no""" +317 62 optimizer """adam""" +317 62 training_loop """lcwa""" +317 62 evaluator """rankbased""" +317 63 dataset """kinships""" +317 63 model """hole""" +317 63 loss """crossentropy""" +317 63 regularizer """no""" +317 63 optimizer """adam""" +317 63 training_loop """lcwa""" +317 63 evaluator """rankbased""" +317 64 dataset """kinships""" +317 64 model """hole""" +317 64 loss """crossentropy""" +317 64 regularizer """no""" +317 64 optimizer """adam""" +317 64 training_loop """lcwa""" +317 64 evaluator """rankbased""" +317 65 dataset """kinships""" +317 65 model """hole""" +317 65 loss """crossentropy""" +317 65 regularizer """no""" +317 65 optimizer """adam""" +317 65 training_loop """lcwa""" +317 65 evaluator """rankbased""" +317 66 dataset """kinships""" +317 66 model """hole""" +317 66 loss """crossentropy""" +317 66 regularizer """no""" +317 66 optimizer """adam""" +317 66 training_loop """lcwa""" +317 66 evaluator """rankbased""" +317 67 dataset """kinships""" +317 67 model """hole""" +317 67 loss """crossentropy""" +317 67 regularizer """no""" +317 67 optimizer """adam""" +317 67 training_loop """lcwa""" +317 67 evaluator """rankbased""" +317 68 dataset """kinships""" +317 68 model """hole""" +317 68 loss """crossentropy""" +317 68 regularizer """no""" +317 68 optimizer """adam""" +317 68 training_loop """lcwa""" +317 68 evaluator """rankbased""" +317 69 dataset """kinships""" +317 69 model """hole""" +317 69 loss """crossentropy""" +317 69 regularizer """no""" +317 69 optimizer """adam""" +317 69 training_loop """lcwa""" +317 69 evaluator """rankbased""" +317 70 dataset """kinships""" +317 70 model """hole""" +317 70 loss """crossentropy""" +317 70 regularizer """no""" +317 70 optimizer """adam""" +317 70 training_loop """lcwa""" +317 70 evaluator """rankbased""" +317 71 dataset """kinships""" +317 71 model """hole""" +317 71 loss """crossentropy""" +317 71 regularizer """no""" +317 71 optimizer """adam""" +317 71 training_loop """lcwa""" +317 71 evaluator """rankbased""" +317 72 dataset """kinships""" +317 72 model """hole""" +317 72 loss """crossentropy""" +317 72 regularizer """no""" +317 72 optimizer """adam""" +317 72 training_loop """lcwa""" +317 72 evaluator """rankbased""" +317 73 dataset """kinships""" +317 73 model """hole""" +317 73 loss """crossentropy""" +317 73 regularizer """no""" +317 73 optimizer """adam""" +317 73 training_loop """lcwa""" +317 73 evaluator """rankbased""" +317 74 dataset """kinships""" +317 74 model """hole""" +317 74 loss """crossentropy""" +317 74 regularizer """no""" +317 74 optimizer """adam""" +317 74 training_loop """lcwa""" +317 74 evaluator """rankbased""" +317 75 dataset """kinships""" +317 75 model """hole""" +317 75 loss """crossentropy""" +317 75 regularizer """no""" +317 75 optimizer """adam""" +317 75 training_loop """lcwa""" +317 75 evaluator """rankbased""" +317 76 dataset """kinships""" +317 76 model """hole""" +317 76 loss """crossentropy""" +317 76 regularizer """no""" +317 76 optimizer """adam""" +317 76 training_loop """lcwa""" +317 76 evaluator """rankbased""" +317 77 dataset """kinships""" +317 77 model """hole""" +317 77 loss """crossentropy""" +317 77 regularizer """no""" +317 77 optimizer """adam""" +317 77 training_loop """lcwa""" +317 77 evaluator """rankbased""" +317 78 dataset """kinships""" +317 78 model """hole""" +317 78 loss """crossentropy""" +317 78 regularizer """no""" +317 78 optimizer """adam""" +317 78 training_loop """lcwa""" +317 78 evaluator """rankbased""" +317 79 dataset """kinships""" +317 79 model """hole""" +317 79 loss """crossentropy""" +317 79 regularizer """no""" +317 79 optimizer """adam""" +317 79 training_loop """lcwa""" +317 79 evaluator """rankbased""" +317 80 dataset """kinships""" +317 80 model """hole""" +317 80 loss """crossentropy""" +317 80 regularizer """no""" +317 80 optimizer """adam""" +317 80 training_loop """lcwa""" +317 80 evaluator """rankbased""" +317 81 dataset """kinships""" +317 81 model """hole""" +317 81 loss """crossentropy""" +317 81 regularizer """no""" +317 81 optimizer """adam""" +317 81 training_loop """lcwa""" +317 81 evaluator """rankbased""" +317 82 dataset """kinships""" +317 82 model """hole""" +317 82 loss """crossentropy""" +317 82 regularizer """no""" +317 82 optimizer """adam""" +317 82 training_loop """lcwa""" +317 82 evaluator """rankbased""" +317 83 dataset """kinships""" +317 83 model """hole""" +317 83 loss """crossentropy""" +317 83 regularizer """no""" +317 83 optimizer """adam""" +317 83 training_loop """lcwa""" +317 83 evaluator """rankbased""" +317 84 dataset """kinships""" +317 84 model """hole""" +317 84 loss """crossentropy""" +317 84 regularizer """no""" +317 84 optimizer """adam""" +317 84 training_loop """lcwa""" +317 84 evaluator """rankbased""" +317 85 dataset """kinships""" +317 85 model """hole""" +317 85 loss """crossentropy""" +317 85 regularizer """no""" +317 85 optimizer """adam""" +317 85 training_loop """lcwa""" +317 85 evaluator """rankbased""" +317 86 dataset """kinships""" +317 86 model """hole""" +317 86 loss """crossentropy""" +317 86 regularizer """no""" +317 86 optimizer """adam""" +317 86 training_loop """lcwa""" +317 86 evaluator """rankbased""" +317 87 dataset """kinships""" +317 87 model """hole""" +317 87 loss """crossentropy""" +317 87 regularizer """no""" +317 87 optimizer """adam""" +317 87 training_loop """lcwa""" +317 87 evaluator """rankbased""" +317 88 dataset """kinships""" +317 88 model """hole""" +317 88 loss """crossentropy""" +317 88 regularizer """no""" +317 88 optimizer """adam""" +317 88 training_loop """lcwa""" +317 88 evaluator """rankbased""" +317 89 dataset """kinships""" +317 89 model """hole""" +317 89 loss """crossentropy""" +317 89 regularizer """no""" +317 89 optimizer """adam""" +317 89 training_loop """lcwa""" +317 89 evaluator """rankbased""" +317 90 dataset """kinships""" +317 90 model """hole""" +317 90 loss """crossentropy""" +317 90 regularizer """no""" +317 90 optimizer """adam""" +317 90 training_loop """lcwa""" +317 90 evaluator """rankbased""" +317 91 dataset """kinships""" +317 91 model """hole""" +317 91 loss """crossentropy""" +317 91 regularizer """no""" +317 91 optimizer """adam""" +317 91 training_loop """lcwa""" +317 91 evaluator """rankbased""" +317 92 dataset """kinships""" +317 92 model """hole""" +317 92 loss """crossentropy""" +317 92 regularizer """no""" +317 92 optimizer """adam""" +317 92 training_loop """lcwa""" +317 92 evaluator """rankbased""" +317 93 dataset """kinships""" +317 93 model """hole""" +317 93 loss """crossentropy""" +317 93 regularizer """no""" +317 93 optimizer """adam""" +317 93 training_loop """lcwa""" +317 93 evaluator """rankbased""" +317 94 dataset """kinships""" +317 94 model """hole""" +317 94 loss """crossentropy""" +317 94 regularizer """no""" +317 94 optimizer """adam""" +317 94 training_loop """lcwa""" +317 94 evaluator """rankbased""" +317 95 dataset """kinships""" +317 95 model """hole""" +317 95 loss """crossentropy""" +317 95 regularizer """no""" +317 95 optimizer """adam""" +317 95 training_loop """lcwa""" +317 95 evaluator """rankbased""" +317 96 dataset """kinships""" +317 96 model """hole""" +317 96 loss """crossentropy""" +317 96 regularizer """no""" +317 96 optimizer """adam""" +317 96 training_loop """lcwa""" +317 96 evaluator """rankbased""" +317 97 dataset """kinships""" +317 97 model """hole""" +317 97 loss """crossentropy""" +317 97 regularizer """no""" +317 97 optimizer """adam""" +317 97 training_loop """lcwa""" +317 97 evaluator """rankbased""" +317 98 dataset """kinships""" +317 98 model """hole""" +317 98 loss """crossentropy""" +317 98 regularizer """no""" +317 98 optimizer """adam""" +317 98 training_loop """lcwa""" +317 98 evaluator """rankbased""" +317 99 dataset """kinships""" +317 99 model """hole""" +317 99 loss """crossentropy""" +317 99 regularizer """no""" +317 99 optimizer """adam""" +317 99 training_loop """lcwa""" +317 99 evaluator """rankbased""" +317 100 dataset """kinships""" +317 100 model """hole""" +317 100 loss """crossentropy""" +317 100 regularizer """no""" +317 100 optimizer """adam""" +317 100 training_loop """lcwa""" +317 100 evaluator """rankbased""" +318 1 model.embedding_dim 1.0 +318 1 optimizer.lr 0.003563852703374653 +318 1 negative_sampler.num_negs_per_pos 72.0 +318 1 training.batch_size 2.0 +318 2 model.embedding_dim 1.0 +318 2 optimizer.lr 0.04268105253078028 +318 2 negative_sampler.num_negs_per_pos 86.0 +318 2 training.batch_size 1.0 +318 3 model.embedding_dim 2.0 +318 3 optimizer.lr 0.007692767514862787 +318 3 negative_sampler.num_negs_per_pos 24.0 +318 3 training.batch_size 0.0 +318 4 model.embedding_dim 1.0 +318 4 optimizer.lr 0.015537806164282896 +318 4 negative_sampler.num_negs_per_pos 36.0 +318 4 training.batch_size 0.0 +318 5 model.embedding_dim 2.0 +318 5 optimizer.lr 0.002377610160738445 +318 5 negative_sampler.num_negs_per_pos 37.0 +318 5 training.batch_size 2.0 +318 6 model.embedding_dim 0.0 +318 6 optimizer.lr 0.006450690450814219 +318 6 negative_sampler.num_negs_per_pos 34.0 +318 6 training.batch_size 2.0 +318 7 model.embedding_dim 1.0 +318 7 optimizer.lr 0.013621135274693472 +318 7 negative_sampler.num_negs_per_pos 58.0 +318 7 training.batch_size 2.0 +318 8 model.embedding_dim 0.0 +318 8 optimizer.lr 0.005490268553459176 +318 8 negative_sampler.num_negs_per_pos 95.0 +318 8 training.batch_size 2.0 +318 9 model.embedding_dim 1.0 +318 9 optimizer.lr 0.0019126819360218966 +318 9 negative_sampler.num_negs_per_pos 63.0 +318 9 training.batch_size 0.0 +318 10 model.embedding_dim 2.0 +318 10 optimizer.lr 0.019624867961563795 +318 10 negative_sampler.num_negs_per_pos 80.0 +318 10 training.batch_size 2.0 +318 11 model.embedding_dim 1.0 +318 11 optimizer.lr 0.018406990656030704 +318 11 negative_sampler.num_negs_per_pos 49.0 +318 11 training.batch_size 0.0 +318 12 model.embedding_dim 2.0 +318 12 optimizer.lr 0.002661836715801773 +318 12 negative_sampler.num_negs_per_pos 73.0 +318 12 training.batch_size 1.0 +318 13 model.embedding_dim 1.0 +318 13 optimizer.lr 0.005028786725342548 +318 13 negative_sampler.num_negs_per_pos 64.0 +318 13 training.batch_size 1.0 +318 14 model.embedding_dim 2.0 +318 14 optimizer.lr 0.05776002537891509 +318 14 negative_sampler.num_negs_per_pos 98.0 +318 14 training.batch_size 2.0 +318 15 model.embedding_dim 2.0 +318 15 optimizer.lr 0.009742297106820298 +318 15 negative_sampler.num_negs_per_pos 47.0 +318 15 training.batch_size 2.0 +318 16 model.embedding_dim 1.0 +318 16 optimizer.lr 0.003244760085047664 +318 16 negative_sampler.num_negs_per_pos 59.0 +318 16 training.batch_size 2.0 +318 17 model.embedding_dim 1.0 +318 17 optimizer.lr 0.004350835732808728 +318 17 negative_sampler.num_negs_per_pos 4.0 +318 17 training.batch_size 2.0 +318 18 model.embedding_dim 0.0 +318 18 optimizer.lr 0.0031491197279613623 +318 18 negative_sampler.num_negs_per_pos 72.0 +318 18 training.batch_size 0.0 +318 19 model.embedding_dim 2.0 +318 19 optimizer.lr 0.0028783268169273327 +318 19 negative_sampler.num_negs_per_pos 34.0 +318 19 training.batch_size 0.0 +318 20 model.embedding_dim 2.0 +318 20 optimizer.lr 0.02291238269947543 +318 20 negative_sampler.num_negs_per_pos 52.0 +318 20 training.batch_size 0.0 +318 21 model.embedding_dim 2.0 +318 21 optimizer.lr 0.020203823919526027 +318 21 negative_sampler.num_negs_per_pos 14.0 +318 21 training.batch_size 2.0 +318 22 model.embedding_dim 1.0 +318 22 optimizer.lr 0.0727091237511742 +318 22 negative_sampler.num_negs_per_pos 38.0 +318 22 training.batch_size 1.0 +318 23 model.embedding_dim 1.0 +318 23 optimizer.lr 0.005194328700590711 +318 23 negative_sampler.num_negs_per_pos 18.0 +318 23 training.batch_size 2.0 +318 24 model.embedding_dim 0.0 +318 24 optimizer.lr 0.007067899694908071 +318 24 negative_sampler.num_negs_per_pos 34.0 +318 24 training.batch_size 1.0 +318 25 model.embedding_dim 1.0 +318 25 optimizer.lr 0.011987855539409962 +318 25 negative_sampler.num_negs_per_pos 36.0 +318 25 training.batch_size 2.0 +318 26 model.embedding_dim 1.0 +318 26 optimizer.lr 0.0041314151110332775 +318 26 negative_sampler.num_negs_per_pos 24.0 +318 26 training.batch_size 0.0 +318 27 model.embedding_dim 0.0 +318 27 optimizer.lr 0.09880995004890124 +318 27 negative_sampler.num_negs_per_pos 5.0 +318 27 training.batch_size 1.0 +318 28 model.embedding_dim 1.0 +318 28 optimizer.lr 0.08215305104340476 +318 28 negative_sampler.num_negs_per_pos 58.0 +318 28 training.batch_size 1.0 +318 29 model.embedding_dim 1.0 +318 29 optimizer.lr 0.015122609929288223 +318 29 negative_sampler.num_negs_per_pos 64.0 +318 29 training.batch_size 0.0 +318 30 model.embedding_dim 1.0 +318 30 optimizer.lr 0.02185629677169631 +318 30 negative_sampler.num_negs_per_pos 39.0 +318 30 training.batch_size 1.0 +318 31 model.embedding_dim 0.0 +318 31 optimizer.lr 0.0015592452861945194 +318 31 negative_sampler.num_negs_per_pos 38.0 +318 31 training.batch_size 2.0 +318 32 model.embedding_dim 1.0 +318 32 optimizer.lr 0.0587249747416973 +318 32 negative_sampler.num_negs_per_pos 92.0 +318 32 training.batch_size 1.0 +318 33 model.embedding_dim 0.0 +318 33 optimizer.lr 0.01912211801686772 +318 33 negative_sampler.num_negs_per_pos 7.0 +318 33 training.batch_size 1.0 +318 34 model.embedding_dim 2.0 +318 34 optimizer.lr 0.051162545499211794 +318 34 negative_sampler.num_negs_per_pos 48.0 +318 34 training.batch_size 1.0 +318 35 model.embedding_dim 1.0 +318 35 optimizer.lr 0.002620396872685545 +318 35 negative_sampler.num_negs_per_pos 68.0 +318 35 training.batch_size 2.0 +318 36 model.embedding_dim 2.0 +318 36 optimizer.lr 0.038510060101427504 +318 36 negative_sampler.num_negs_per_pos 21.0 +318 36 training.batch_size 2.0 +318 37 model.embedding_dim 0.0 +318 37 optimizer.lr 0.0010564521008264924 +318 37 negative_sampler.num_negs_per_pos 72.0 +318 37 training.batch_size 2.0 +318 38 model.embedding_dim 0.0 +318 38 optimizer.lr 0.04572452137603747 +318 38 negative_sampler.num_negs_per_pos 80.0 +318 38 training.batch_size 2.0 +318 39 model.embedding_dim 0.0 +318 39 optimizer.lr 0.028597756185552513 +318 39 negative_sampler.num_negs_per_pos 14.0 +318 39 training.batch_size 0.0 +318 40 model.embedding_dim 0.0 +318 40 optimizer.lr 0.0235152743445721 +318 40 negative_sampler.num_negs_per_pos 87.0 +318 40 training.batch_size 2.0 +318 41 model.embedding_dim 1.0 +318 41 optimizer.lr 0.0018219005642523963 +318 41 negative_sampler.num_negs_per_pos 66.0 +318 41 training.batch_size 0.0 +318 42 model.embedding_dim 0.0 +318 42 optimizer.lr 0.029551521923943055 +318 42 negative_sampler.num_negs_per_pos 33.0 +318 42 training.batch_size 1.0 +318 43 model.embedding_dim 0.0 +318 43 optimizer.lr 0.004234464622325423 +318 43 negative_sampler.num_negs_per_pos 86.0 +318 43 training.batch_size 1.0 +318 44 model.embedding_dim 0.0 +318 44 optimizer.lr 0.06962194096734771 +318 44 negative_sampler.num_negs_per_pos 6.0 +318 44 training.batch_size 0.0 +318 45 model.embedding_dim 2.0 +318 45 optimizer.lr 0.014294680839215009 +318 45 negative_sampler.num_negs_per_pos 51.0 +318 45 training.batch_size 1.0 +318 46 model.embedding_dim 1.0 +318 46 optimizer.lr 0.059566072060666816 +318 46 negative_sampler.num_negs_per_pos 62.0 +318 46 training.batch_size 2.0 +318 47 model.embedding_dim 0.0 +318 47 optimizer.lr 0.005127619065390536 +318 47 negative_sampler.num_negs_per_pos 61.0 +318 47 training.batch_size 2.0 +318 48 model.embedding_dim 0.0 +318 48 optimizer.lr 0.001228191934569492 +318 48 negative_sampler.num_negs_per_pos 13.0 +318 48 training.batch_size 1.0 +318 49 model.embedding_dim 2.0 +318 49 optimizer.lr 0.001490144891814769 +318 49 negative_sampler.num_negs_per_pos 40.0 +318 49 training.batch_size 0.0 +318 50 model.embedding_dim 0.0 +318 50 optimizer.lr 0.0011711169270374481 +318 50 negative_sampler.num_negs_per_pos 84.0 +318 50 training.batch_size 1.0 +318 51 model.embedding_dim 1.0 +318 51 optimizer.lr 0.008025867409979246 +318 51 negative_sampler.num_negs_per_pos 46.0 +318 51 training.batch_size 0.0 +318 52 model.embedding_dim 2.0 +318 52 optimizer.lr 0.035360911961987325 +318 52 negative_sampler.num_negs_per_pos 76.0 +318 52 training.batch_size 1.0 +318 53 model.embedding_dim 1.0 +318 53 optimizer.lr 0.001128856586854898 +318 53 negative_sampler.num_negs_per_pos 60.0 +318 53 training.batch_size 2.0 +318 54 model.embedding_dim 0.0 +318 54 optimizer.lr 0.026314553751712808 +318 54 negative_sampler.num_negs_per_pos 72.0 +318 54 training.batch_size 0.0 +318 55 model.embedding_dim 0.0 +318 55 optimizer.lr 0.021929101446025452 +318 55 negative_sampler.num_negs_per_pos 2.0 +318 55 training.batch_size 2.0 +318 56 model.embedding_dim 2.0 +318 56 optimizer.lr 0.002215463459808556 +318 56 negative_sampler.num_negs_per_pos 19.0 +318 56 training.batch_size 0.0 +318 57 model.embedding_dim 1.0 +318 57 optimizer.lr 0.002578821379005328 +318 57 negative_sampler.num_negs_per_pos 78.0 +318 57 training.batch_size 2.0 +318 58 model.embedding_dim 2.0 +318 58 optimizer.lr 0.0026154342625972138 +318 58 negative_sampler.num_negs_per_pos 58.0 +318 58 training.batch_size 2.0 +318 59 model.embedding_dim 1.0 +318 59 optimizer.lr 0.03874076429983155 +318 59 negative_sampler.num_negs_per_pos 35.0 +318 59 training.batch_size 0.0 +318 60 model.embedding_dim 1.0 +318 60 optimizer.lr 0.039194537923451574 +318 60 negative_sampler.num_negs_per_pos 73.0 +318 60 training.batch_size 0.0 +318 61 model.embedding_dim 0.0 +318 61 optimizer.lr 0.0034938660454102394 +318 61 negative_sampler.num_negs_per_pos 62.0 +318 61 training.batch_size 1.0 +318 62 model.embedding_dim 1.0 +318 62 optimizer.lr 0.0019590787888017656 +318 62 negative_sampler.num_negs_per_pos 18.0 +318 62 training.batch_size 0.0 +318 63 model.embedding_dim 2.0 +318 63 optimizer.lr 0.006571401733547729 +318 63 negative_sampler.num_negs_per_pos 73.0 +318 63 training.batch_size 0.0 +318 64 model.embedding_dim 2.0 +318 64 optimizer.lr 0.01594419036012147 +318 64 negative_sampler.num_negs_per_pos 69.0 +318 64 training.batch_size 0.0 +318 65 model.embedding_dim 1.0 +318 65 optimizer.lr 0.00528923738116892 +318 65 negative_sampler.num_negs_per_pos 65.0 +318 65 training.batch_size 0.0 +318 66 model.embedding_dim 0.0 +318 66 optimizer.lr 0.06451358164862318 +318 66 negative_sampler.num_negs_per_pos 46.0 +318 66 training.batch_size 2.0 +318 67 model.embedding_dim 2.0 +318 67 optimizer.lr 0.009065506207792886 +318 67 negative_sampler.num_negs_per_pos 55.0 +318 67 training.batch_size 2.0 +318 68 model.embedding_dim 1.0 +318 68 optimizer.lr 0.07106689947810654 +318 68 negative_sampler.num_negs_per_pos 62.0 +318 68 training.batch_size 2.0 +318 69 model.embedding_dim 2.0 +318 69 optimizer.lr 0.0167009208212927 +318 69 negative_sampler.num_negs_per_pos 89.0 +318 69 training.batch_size 0.0 +318 70 model.embedding_dim 1.0 +318 70 optimizer.lr 0.012546432247390582 +318 70 negative_sampler.num_negs_per_pos 26.0 +318 70 training.batch_size 1.0 +318 71 model.embedding_dim 2.0 +318 71 optimizer.lr 0.04358027764621014 +318 71 negative_sampler.num_negs_per_pos 29.0 +318 71 training.batch_size 1.0 +318 72 model.embedding_dim 0.0 +318 72 optimizer.lr 0.005159609781629139 +318 72 negative_sampler.num_negs_per_pos 36.0 +318 72 training.batch_size 1.0 +318 73 model.embedding_dim 1.0 +318 73 optimizer.lr 0.0017313126309643338 +318 73 negative_sampler.num_negs_per_pos 68.0 +318 73 training.batch_size 2.0 +318 74 model.embedding_dim 1.0 +318 74 optimizer.lr 0.003370688156571512 +318 74 negative_sampler.num_negs_per_pos 86.0 +318 74 training.batch_size 2.0 +318 75 model.embedding_dim 1.0 +318 75 optimizer.lr 0.007853377870919071 +318 75 negative_sampler.num_negs_per_pos 33.0 +318 75 training.batch_size 0.0 +318 76 model.embedding_dim 1.0 +318 76 optimizer.lr 0.0011261083152749435 +318 76 negative_sampler.num_negs_per_pos 46.0 +318 76 training.batch_size 0.0 +318 77 model.embedding_dim 2.0 +318 77 optimizer.lr 0.06705490435807708 +318 77 negative_sampler.num_negs_per_pos 14.0 +318 77 training.batch_size 1.0 +318 78 model.embedding_dim 1.0 +318 78 optimizer.lr 0.07006350424652748 +318 78 negative_sampler.num_negs_per_pos 52.0 +318 78 training.batch_size 0.0 +318 79 model.embedding_dim 0.0 +318 79 optimizer.lr 0.005485344726388098 +318 79 negative_sampler.num_negs_per_pos 19.0 +318 79 training.batch_size 1.0 +318 80 model.embedding_dim 2.0 +318 80 optimizer.lr 0.02670086044777809 +318 80 negative_sampler.num_negs_per_pos 88.0 +318 80 training.batch_size 0.0 +318 81 model.embedding_dim 2.0 +318 81 optimizer.lr 0.02220673900985947 +318 81 negative_sampler.num_negs_per_pos 20.0 +318 81 training.batch_size 2.0 +318 82 model.embedding_dim 0.0 +318 82 optimizer.lr 0.025177805160633795 +318 82 negative_sampler.num_negs_per_pos 9.0 +318 82 training.batch_size 0.0 +318 83 model.embedding_dim 2.0 +318 83 optimizer.lr 0.07702390779571755 +318 83 negative_sampler.num_negs_per_pos 29.0 +318 83 training.batch_size 1.0 +318 84 model.embedding_dim 2.0 +318 84 optimizer.lr 0.001797471944200027 +318 84 negative_sampler.num_negs_per_pos 28.0 +318 84 training.batch_size 1.0 +318 85 model.embedding_dim 0.0 +318 85 optimizer.lr 0.003364492366061316 +318 85 negative_sampler.num_negs_per_pos 83.0 +318 85 training.batch_size 0.0 +318 86 model.embedding_dim 1.0 +318 86 optimizer.lr 0.04834464590103561 +318 86 negative_sampler.num_negs_per_pos 70.0 +318 86 training.batch_size 0.0 +318 87 model.embedding_dim 1.0 +318 87 optimizer.lr 0.005896236707888172 +318 87 negative_sampler.num_negs_per_pos 31.0 +318 87 training.batch_size 1.0 +318 88 model.embedding_dim 0.0 +318 88 optimizer.lr 0.004127941372417029 +318 88 negative_sampler.num_negs_per_pos 68.0 +318 88 training.batch_size 1.0 +318 89 model.embedding_dim 0.0 +318 89 optimizer.lr 0.005964498771131311 +318 89 negative_sampler.num_negs_per_pos 17.0 +318 89 training.batch_size 0.0 +318 90 model.embedding_dim 2.0 +318 90 optimizer.lr 0.01462182181215796 +318 90 negative_sampler.num_negs_per_pos 63.0 +318 90 training.batch_size 1.0 +318 91 model.embedding_dim 0.0 +318 91 optimizer.lr 0.032713342904949864 +318 91 negative_sampler.num_negs_per_pos 62.0 +318 91 training.batch_size 1.0 +318 92 model.embedding_dim 0.0 +318 92 optimizer.lr 0.008216981680695316 +318 92 negative_sampler.num_negs_per_pos 87.0 +318 92 training.batch_size 0.0 +318 93 model.embedding_dim 1.0 +318 93 optimizer.lr 0.0023052282532132207 +318 93 negative_sampler.num_negs_per_pos 3.0 +318 93 training.batch_size 1.0 +318 94 model.embedding_dim 2.0 +318 94 optimizer.lr 0.02808678826943411 +318 94 negative_sampler.num_negs_per_pos 86.0 +318 94 training.batch_size 2.0 +318 95 model.embedding_dim 1.0 +318 95 optimizer.lr 0.013689980192042697 +318 95 negative_sampler.num_negs_per_pos 63.0 +318 95 training.batch_size 0.0 +318 96 model.embedding_dim 2.0 +318 96 optimizer.lr 0.010194269168190969 +318 96 negative_sampler.num_negs_per_pos 91.0 +318 96 training.batch_size 0.0 +318 97 model.embedding_dim 2.0 +318 97 optimizer.lr 0.06465848479839424 +318 97 negative_sampler.num_negs_per_pos 48.0 +318 97 training.batch_size 0.0 +318 98 model.embedding_dim 2.0 +318 98 optimizer.lr 0.0031206914196263724 +318 98 negative_sampler.num_negs_per_pos 46.0 +318 98 training.batch_size 2.0 +318 99 model.embedding_dim 0.0 +318 99 optimizer.lr 0.0020308984101973545 +318 99 negative_sampler.num_negs_per_pos 65.0 +318 99 training.batch_size 0.0 +318 100 model.embedding_dim 0.0 +318 100 optimizer.lr 0.0270148294965798 +318 100 negative_sampler.num_negs_per_pos 66.0 +318 100 training.batch_size 1.0 +318 1 dataset """kinships""" +318 1 model """hole""" +318 1 loss """bceaftersigmoid""" +318 1 regularizer """no""" +318 1 optimizer """adam""" +318 1 training_loop """owa""" +318 1 negative_sampler """basic""" +318 1 evaluator """rankbased""" +318 2 dataset """kinships""" +318 2 model """hole""" +318 2 loss """bceaftersigmoid""" +318 2 regularizer """no""" +318 2 optimizer """adam""" +318 2 training_loop """owa""" +318 2 negative_sampler """basic""" +318 2 evaluator """rankbased""" +318 3 dataset """kinships""" +318 3 model """hole""" +318 3 loss """bceaftersigmoid""" +318 3 regularizer """no""" +318 3 optimizer """adam""" +318 3 training_loop """owa""" +318 3 negative_sampler """basic""" +318 3 evaluator """rankbased""" +318 4 dataset """kinships""" +318 4 model """hole""" +318 4 loss """bceaftersigmoid""" +318 4 regularizer """no""" +318 4 optimizer """adam""" +318 4 training_loop """owa""" +318 4 negative_sampler """basic""" +318 4 evaluator """rankbased""" +318 5 dataset """kinships""" +318 5 model """hole""" +318 5 loss """bceaftersigmoid""" +318 5 regularizer """no""" +318 5 optimizer """adam""" +318 5 training_loop """owa""" +318 5 negative_sampler """basic""" +318 5 evaluator """rankbased""" +318 6 dataset """kinships""" +318 6 model """hole""" +318 6 loss """bceaftersigmoid""" +318 6 regularizer """no""" +318 6 optimizer """adam""" +318 6 training_loop """owa""" +318 6 negative_sampler """basic""" +318 6 evaluator """rankbased""" +318 7 dataset """kinships""" +318 7 model """hole""" +318 7 loss """bceaftersigmoid""" +318 7 regularizer """no""" +318 7 optimizer """adam""" +318 7 training_loop """owa""" +318 7 negative_sampler """basic""" +318 7 evaluator """rankbased""" +318 8 dataset """kinships""" +318 8 model """hole""" +318 8 loss """bceaftersigmoid""" +318 8 regularizer """no""" +318 8 optimizer """adam""" +318 8 training_loop """owa""" +318 8 negative_sampler """basic""" +318 8 evaluator """rankbased""" +318 9 dataset """kinships""" +318 9 model """hole""" +318 9 loss """bceaftersigmoid""" +318 9 regularizer """no""" +318 9 optimizer """adam""" +318 9 training_loop """owa""" +318 9 negative_sampler """basic""" +318 9 evaluator """rankbased""" +318 10 dataset """kinships""" +318 10 model """hole""" +318 10 loss """bceaftersigmoid""" +318 10 regularizer """no""" +318 10 optimizer """adam""" +318 10 training_loop """owa""" +318 10 negative_sampler """basic""" +318 10 evaluator """rankbased""" +318 11 dataset """kinships""" +318 11 model """hole""" +318 11 loss """bceaftersigmoid""" +318 11 regularizer """no""" +318 11 optimizer """adam""" +318 11 training_loop """owa""" +318 11 negative_sampler """basic""" +318 11 evaluator """rankbased""" +318 12 dataset """kinships""" +318 12 model """hole""" +318 12 loss """bceaftersigmoid""" +318 12 regularizer """no""" +318 12 optimizer """adam""" +318 12 training_loop """owa""" +318 12 negative_sampler """basic""" +318 12 evaluator """rankbased""" +318 13 dataset """kinships""" +318 13 model """hole""" +318 13 loss """bceaftersigmoid""" +318 13 regularizer """no""" +318 13 optimizer """adam""" +318 13 training_loop """owa""" +318 13 negative_sampler """basic""" +318 13 evaluator """rankbased""" +318 14 dataset """kinships""" +318 14 model """hole""" +318 14 loss """bceaftersigmoid""" +318 14 regularizer """no""" +318 14 optimizer """adam""" +318 14 training_loop """owa""" +318 14 negative_sampler """basic""" +318 14 evaluator """rankbased""" +318 15 dataset """kinships""" +318 15 model """hole""" +318 15 loss """bceaftersigmoid""" +318 15 regularizer """no""" +318 15 optimizer """adam""" +318 15 training_loop """owa""" +318 15 negative_sampler """basic""" +318 15 evaluator """rankbased""" +318 16 dataset """kinships""" +318 16 model """hole""" +318 16 loss """bceaftersigmoid""" +318 16 regularizer """no""" +318 16 optimizer """adam""" +318 16 training_loop """owa""" +318 16 negative_sampler """basic""" +318 16 evaluator """rankbased""" +318 17 dataset """kinships""" +318 17 model """hole""" +318 17 loss """bceaftersigmoid""" +318 17 regularizer """no""" +318 17 optimizer """adam""" +318 17 training_loop """owa""" +318 17 negative_sampler """basic""" +318 17 evaluator """rankbased""" +318 18 dataset """kinships""" +318 18 model """hole""" +318 18 loss """bceaftersigmoid""" +318 18 regularizer """no""" +318 18 optimizer """adam""" +318 18 training_loop """owa""" +318 18 negative_sampler """basic""" +318 18 evaluator """rankbased""" +318 19 dataset """kinships""" +318 19 model """hole""" +318 19 loss """bceaftersigmoid""" +318 19 regularizer """no""" +318 19 optimizer """adam""" +318 19 training_loop """owa""" +318 19 negative_sampler """basic""" +318 19 evaluator """rankbased""" +318 20 dataset """kinships""" +318 20 model """hole""" +318 20 loss """bceaftersigmoid""" +318 20 regularizer """no""" +318 20 optimizer """adam""" +318 20 training_loop """owa""" +318 20 negative_sampler """basic""" +318 20 evaluator """rankbased""" +318 21 dataset """kinships""" +318 21 model """hole""" +318 21 loss """bceaftersigmoid""" +318 21 regularizer """no""" +318 21 optimizer """adam""" +318 21 training_loop """owa""" +318 21 negative_sampler """basic""" +318 21 evaluator """rankbased""" +318 22 dataset """kinships""" +318 22 model """hole""" +318 22 loss """bceaftersigmoid""" +318 22 regularizer """no""" +318 22 optimizer """adam""" +318 22 training_loop """owa""" +318 22 negative_sampler """basic""" +318 22 evaluator """rankbased""" +318 23 dataset """kinships""" +318 23 model """hole""" +318 23 loss """bceaftersigmoid""" +318 23 regularizer """no""" +318 23 optimizer """adam""" +318 23 training_loop """owa""" +318 23 negative_sampler """basic""" +318 23 evaluator """rankbased""" +318 24 dataset """kinships""" +318 24 model """hole""" +318 24 loss """bceaftersigmoid""" +318 24 regularizer """no""" +318 24 optimizer """adam""" +318 24 training_loop """owa""" +318 24 negative_sampler """basic""" +318 24 evaluator """rankbased""" +318 25 dataset """kinships""" +318 25 model """hole""" +318 25 loss """bceaftersigmoid""" +318 25 regularizer """no""" +318 25 optimizer """adam""" +318 25 training_loop """owa""" +318 25 negative_sampler """basic""" +318 25 evaluator """rankbased""" +318 26 dataset """kinships""" +318 26 model """hole""" +318 26 loss """bceaftersigmoid""" +318 26 regularizer """no""" +318 26 optimizer """adam""" +318 26 training_loop """owa""" +318 26 negative_sampler """basic""" +318 26 evaluator """rankbased""" +318 27 dataset """kinships""" +318 27 model """hole""" +318 27 loss """bceaftersigmoid""" +318 27 regularizer """no""" +318 27 optimizer """adam""" +318 27 training_loop """owa""" +318 27 negative_sampler """basic""" +318 27 evaluator """rankbased""" +318 28 dataset """kinships""" +318 28 model """hole""" +318 28 loss """bceaftersigmoid""" +318 28 regularizer """no""" +318 28 optimizer """adam""" +318 28 training_loop """owa""" +318 28 negative_sampler """basic""" +318 28 evaluator """rankbased""" +318 29 dataset """kinships""" +318 29 model """hole""" +318 29 loss """bceaftersigmoid""" +318 29 regularizer """no""" +318 29 optimizer """adam""" +318 29 training_loop """owa""" +318 29 negative_sampler """basic""" +318 29 evaluator """rankbased""" +318 30 dataset """kinships""" +318 30 model """hole""" +318 30 loss """bceaftersigmoid""" +318 30 regularizer """no""" +318 30 optimizer """adam""" +318 30 training_loop """owa""" +318 30 negative_sampler """basic""" +318 30 evaluator """rankbased""" +318 31 dataset """kinships""" +318 31 model """hole""" +318 31 loss """bceaftersigmoid""" +318 31 regularizer """no""" +318 31 optimizer """adam""" +318 31 training_loop """owa""" +318 31 negative_sampler """basic""" +318 31 evaluator """rankbased""" +318 32 dataset """kinships""" +318 32 model """hole""" +318 32 loss """bceaftersigmoid""" +318 32 regularizer """no""" +318 32 optimizer """adam""" +318 32 training_loop """owa""" +318 32 negative_sampler """basic""" +318 32 evaluator """rankbased""" +318 33 dataset """kinships""" +318 33 model """hole""" +318 33 loss """bceaftersigmoid""" +318 33 regularizer """no""" +318 33 optimizer """adam""" +318 33 training_loop """owa""" +318 33 negative_sampler """basic""" +318 33 evaluator """rankbased""" +318 34 dataset """kinships""" +318 34 model """hole""" +318 34 loss """bceaftersigmoid""" +318 34 regularizer """no""" +318 34 optimizer """adam""" +318 34 training_loop """owa""" +318 34 negative_sampler """basic""" +318 34 evaluator """rankbased""" +318 35 dataset """kinships""" +318 35 model """hole""" +318 35 loss """bceaftersigmoid""" +318 35 regularizer """no""" +318 35 optimizer """adam""" +318 35 training_loop """owa""" +318 35 negative_sampler """basic""" +318 35 evaluator """rankbased""" +318 36 dataset """kinships""" +318 36 model """hole""" +318 36 loss """bceaftersigmoid""" +318 36 regularizer """no""" +318 36 optimizer """adam""" +318 36 training_loop """owa""" +318 36 negative_sampler """basic""" +318 36 evaluator """rankbased""" +318 37 dataset """kinships""" +318 37 model """hole""" +318 37 loss """bceaftersigmoid""" +318 37 regularizer """no""" +318 37 optimizer """adam""" +318 37 training_loop """owa""" +318 37 negative_sampler """basic""" +318 37 evaluator """rankbased""" +318 38 dataset """kinships""" +318 38 model """hole""" +318 38 loss """bceaftersigmoid""" +318 38 regularizer """no""" +318 38 optimizer """adam""" +318 38 training_loop """owa""" +318 38 negative_sampler """basic""" +318 38 evaluator """rankbased""" +318 39 dataset """kinships""" +318 39 model """hole""" +318 39 loss """bceaftersigmoid""" +318 39 regularizer """no""" +318 39 optimizer """adam""" +318 39 training_loop """owa""" +318 39 negative_sampler """basic""" +318 39 evaluator """rankbased""" +318 40 dataset """kinships""" +318 40 model """hole""" +318 40 loss """bceaftersigmoid""" +318 40 regularizer """no""" +318 40 optimizer """adam""" +318 40 training_loop """owa""" +318 40 negative_sampler """basic""" +318 40 evaluator """rankbased""" +318 41 dataset """kinships""" +318 41 model """hole""" +318 41 loss """bceaftersigmoid""" +318 41 regularizer """no""" +318 41 optimizer """adam""" +318 41 training_loop """owa""" +318 41 negative_sampler """basic""" +318 41 evaluator """rankbased""" +318 42 dataset """kinships""" +318 42 model """hole""" +318 42 loss """bceaftersigmoid""" +318 42 regularizer """no""" +318 42 optimizer """adam""" +318 42 training_loop """owa""" +318 42 negative_sampler """basic""" +318 42 evaluator """rankbased""" +318 43 dataset """kinships""" +318 43 model """hole""" +318 43 loss """bceaftersigmoid""" +318 43 regularizer """no""" +318 43 optimizer """adam""" +318 43 training_loop """owa""" +318 43 negative_sampler """basic""" +318 43 evaluator """rankbased""" +318 44 dataset """kinships""" +318 44 model """hole""" +318 44 loss """bceaftersigmoid""" +318 44 regularizer """no""" +318 44 optimizer """adam""" +318 44 training_loop """owa""" +318 44 negative_sampler """basic""" +318 44 evaluator """rankbased""" +318 45 dataset """kinships""" +318 45 model """hole""" +318 45 loss """bceaftersigmoid""" +318 45 regularizer """no""" +318 45 optimizer """adam""" +318 45 training_loop """owa""" +318 45 negative_sampler """basic""" +318 45 evaluator """rankbased""" +318 46 dataset """kinships""" +318 46 model """hole""" +318 46 loss """bceaftersigmoid""" +318 46 regularizer """no""" +318 46 optimizer """adam""" +318 46 training_loop """owa""" +318 46 negative_sampler """basic""" +318 46 evaluator """rankbased""" +318 47 dataset """kinships""" +318 47 model """hole""" +318 47 loss """bceaftersigmoid""" +318 47 regularizer """no""" +318 47 optimizer """adam""" +318 47 training_loop """owa""" +318 47 negative_sampler """basic""" +318 47 evaluator """rankbased""" +318 48 dataset """kinships""" +318 48 model """hole""" +318 48 loss """bceaftersigmoid""" +318 48 regularizer """no""" +318 48 optimizer """adam""" +318 48 training_loop """owa""" +318 48 negative_sampler """basic""" +318 48 evaluator """rankbased""" +318 49 dataset """kinships""" +318 49 model """hole""" +318 49 loss """bceaftersigmoid""" +318 49 regularizer """no""" +318 49 optimizer """adam""" +318 49 training_loop """owa""" +318 49 negative_sampler """basic""" +318 49 evaluator """rankbased""" +318 50 dataset """kinships""" +318 50 model """hole""" +318 50 loss """bceaftersigmoid""" +318 50 regularizer """no""" +318 50 optimizer """adam""" +318 50 training_loop """owa""" +318 50 negative_sampler """basic""" +318 50 evaluator """rankbased""" +318 51 dataset """kinships""" +318 51 model """hole""" +318 51 loss """bceaftersigmoid""" +318 51 regularizer """no""" +318 51 optimizer """adam""" +318 51 training_loop """owa""" +318 51 negative_sampler """basic""" +318 51 evaluator """rankbased""" +318 52 dataset """kinships""" +318 52 model """hole""" +318 52 loss """bceaftersigmoid""" +318 52 regularizer """no""" +318 52 optimizer """adam""" +318 52 training_loop """owa""" +318 52 negative_sampler """basic""" +318 52 evaluator """rankbased""" +318 53 dataset """kinships""" +318 53 model """hole""" +318 53 loss """bceaftersigmoid""" +318 53 regularizer """no""" +318 53 optimizer """adam""" +318 53 training_loop """owa""" +318 53 negative_sampler """basic""" +318 53 evaluator """rankbased""" +318 54 dataset """kinships""" +318 54 model """hole""" +318 54 loss """bceaftersigmoid""" +318 54 regularizer """no""" +318 54 optimizer """adam""" +318 54 training_loop """owa""" +318 54 negative_sampler """basic""" +318 54 evaluator """rankbased""" +318 55 dataset """kinships""" +318 55 model """hole""" +318 55 loss """bceaftersigmoid""" +318 55 regularizer """no""" +318 55 optimizer """adam""" +318 55 training_loop """owa""" +318 55 negative_sampler """basic""" +318 55 evaluator """rankbased""" +318 56 dataset """kinships""" +318 56 model """hole""" +318 56 loss """bceaftersigmoid""" +318 56 regularizer """no""" +318 56 optimizer """adam""" +318 56 training_loop """owa""" +318 56 negative_sampler """basic""" +318 56 evaluator """rankbased""" +318 57 dataset """kinships""" +318 57 model """hole""" +318 57 loss """bceaftersigmoid""" +318 57 regularizer """no""" +318 57 optimizer """adam""" +318 57 training_loop """owa""" +318 57 negative_sampler """basic""" +318 57 evaluator """rankbased""" +318 58 dataset """kinships""" +318 58 model """hole""" +318 58 loss """bceaftersigmoid""" +318 58 regularizer """no""" +318 58 optimizer """adam""" +318 58 training_loop """owa""" +318 58 negative_sampler """basic""" +318 58 evaluator """rankbased""" +318 59 dataset """kinships""" +318 59 model """hole""" +318 59 loss """bceaftersigmoid""" +318 59 regularizer """no""" +318 59 optimizer """adam""" +318 59 training_loop """owa""" +318 59 negative_sampler """basic""" +318 59 evaluator """rankbased""" +318 60 dataset """kinships""" +318 60 model """hole""" +318 60 loss """bceaftersigmoid""" +318 60 regularizer """no""" +318 60 optimizer """adam""" +318 60 training_loop """owa""" +318 60 negative_sampler """basic""" +318 60 evaluator """rankbased""" +318 61 dataset """kinships""" +318 61 model """hole""" +318 61 loss """bceaftersigmoid""" +318 61 regularizer """no""" +318 61 optimizer """adam""" +318 61 training_loop """owa""" +318 61 negative_sampler """basic""" +318 61 evaluator """rankbased""" +318 62 dataset """kinships""" +318 62 model """hole""" +318 62 loss """bceaftersigmoid""" +318 62 regularizer """no""" +318 62 optimizer """adam""" +318 62 training_loop """owa""" +318 62 negative_sampler """basic""" +318 62 evaluator """rankbased""" +318 63 dataset """kinships""" +318 63 model """hole""" +318 63 loss """bceaftersigmoid""" +318 63 regularizer """no""" +318 63 optimizer """adam""" +318 63 training_loop """owa""" +318 63 negative_sampler """basic""" +318 63 evaluator """rankbased""" +318 64 dataset """kinships""" +318 64 model """hole""" +318 64 loss """bceaftersigmoid""" +318 64 regularizer """no""" +318 64 optimizer """adam""" +318 64 training_loop """owa""" +318 64 negative_sampler """basic""" +318 64 evaluator """rankbased""" +318 65 dataset """kinships""" +318 65 model """hole""" +318 65 loss """bceaftersigmoid""" +318 65 regularizer """no""" +318 65 optimizer """adam""" +318 65 training_loop """owa""" +318 65 negative_sampler """basic""" +318 65 evaluator """rankbased""" +318 66 dataset """kinships""" +318 66 model """hole""" +318 66 loss """bceaftersigmoid""" +318 66 regularizer """no""" +318 66 optimizer """adam""" +318 66 training_loop """owa""" +318 66 negative_sampler """basic""" +318 66 evaluator """rankbased""" +318 67 dataset """kinships""" +318 67 model """hole""" +318 67 loss """bceaftersigmoid""" +318 67 regularizer """no""" +318 67 optimizer """adam""" +318 67 training_loop """owa""" +318 67 negative_sampler """basic""" +318 67 evaluator """rankbased""" +318 68 dataset """kinships""" +318 68 model """hole""" +318 68 loss """bceaftersigmoid""" +318 68 regularizer """no""" +318 68 optimizer """adam""" +318 68 training_loop """owa""" +318 68 negative_sampler """basic""" +318 68 evaluator """rankbased""" +318 69 dataset """kinships""" +318 69 model """hole""" +318 69 loss """bceaftersigmoid""" +318 69 regularizer """no""" +318 69 optimizer """adam""" +318 69 training_loop """owa""" +318 69 negative_sampler """basic""" +318 69 evaluator """rankbased""" +318 70 dataset """kinships""" +318 70 model """hole""" +318 70 loss """bceaftersigmoid""" +318 70 regularizer """no""" +318 70 optimizer """adam""" +318 70 training_loop """owa""" +318 70 negative_sampler """basic""" +318 70 evaluator """rankbased""" +318 71 dataset """kinships""" +318 71 model """hole""" +318 71 loss """bceaftersigmoid""" +318 71 regularizer """no""" +318 71 optimizer """adam""" +318 71 training_loop """owa""" +318 71 negative_sampler """basic""" +318 71 evaluator """rankbased""" +318 72 dataset """kinships""" +318 72 model """hole""" +318 72 loss """bceaftersigmoid""" +318 72 regularizer """no""" +318 72 optimizer """adam""" +318 72 training_loop """owa""" +318 72 negative_sampler """basic""" +318 72 evaluator """rankbased""" +318 73 dataset """kinships""" +318 73 model """hole""" +318 73 loss """bceaftersigmoid""" +318 73 regularizer """no""" +318 73 optimizer """adam""" +318 73 training_loop """owa""" +318 73 negative_sampler """basic""" +318 73 evaluator """rankbased""" +318 74 dataset """kinships""" +318 74 model """hole""" +318 74 loss """bceaftersigmoid""" +318 74 regularizer """no""" +318 74 optimizer """adam""" +318 74 training_loop """owa""" +318 74 negative_sampler """basic""" +318 74 evaluator """rankbased""" +318 75 dataset """kinships""" +318 75 model """hole""" +318 75 loss """bceaftersigmoid""" +318 75 regularizer """no""" +318 75 optimizer """adam""" +318 75 training_loop """owa""" +318 75 negative_sampler """basic""" +318 75 evaluator """rankbased""" +318 76 dataset """kinships""" +318 76 model """hole""" +318 76 loss """bceaftersigmoid""" +318 76 regularizer """no""" +318 76 optimizer """adam""" +318 76 training_loop """owa""" +318 76 negative_sampler """basic""" +318 76 evaluator """rankbased""" +318 77 dataset """kinships""" +318 77 model """hole""" +318 77 loss """bceaftersigmoid""" +318 77 regularizer """no""" +318 77 optimizer """adam""" +318 77 training_loop """owa""" +318 77 negative_sampler """basic""" +318 77 evaluator """rankbased""" +318 78 dataset """kinships""" +318 78 model """hole""" +318 78 loss """bceaftersigmoid""" +318 78 regularizer """no""" +318 78 optimizer """adam""" +318 78 training_loop """owa""" +318 78 negative_sampler """basic""" +318 78 evaluator """rankbased""" +318 79 dataset """kinships""" +318 79 model """hole""" +318 79 loss """bceaftersigmoid""" +318 79 regularizer """no""" +318 79 optimizer """adam""" +318 79 training_loop """owa""" +318 79 negative_sampler """basic""" +318 79 evaluator """rankbased""" +318 80 dataset """kinships""" +318 80 model """hole""" +318 80 loss """bceaftersigmoid""" +318 80 regularizer """no""" +318 80 optimizer """adam""" +318 80 training_loop """owa""" +318 80 negative_sampler """basic""" +318 80 evaluator """rankbased""" +318 81 dataset """kinships""" +318 81 model """hole""" +318 81 loss """bceaftersigmoid""" +318 81 regularizer """no""" +318 81 optimizer """adam""" +318 81 training_loop """owa""" +318 81 negative_sampler """basic""" +318 81 evaluator """rankbased""" +318 82 dataset """kinships""" +318 82 model """hole""" +318 82 loss """bceaftersigmoid""" +318 82 regularizer """no""" +318 82 optimizer """adam""" +318 82 training_loop """owa""" +318 82 negative_sampler """basic""" +318 82 evaluator """rankbased""" +318 83 dataset """kinships""" +318 83 model """hole""" +318 83 loss """bceaftersigmoid""" +318 83 regularizer """no""" +318 83 optimizer """adam""" +318 83 training_loop """owa""" +318 83 negative_sampler """basic""" +318 83 evaluator """rankbased""" +318 84 dataset """kinships""" +318 84 model """hole""" +318 84 loss """bceaftersigmoid""" +318 84 regularizer """no""" +318 84 optimizer """adam""" +318 84 training_loop """owa""" +318 84 negative_sampler """basic""" +318 84 evaluator """rankbased""" +318 85 dataset """kinships""" +318 85 model """hole""" +318 85 loss """bceaftersigmoid""" +318 85 regularizer """no""" +318 85 optimizer """adam""" +318 85 training_loop """owa""" +318 85 negative_sampler """basic""" +318 85 evaluator """rankbased""" +318 86 dataset """kinships""" +318 86 model """hole""" +318 86 loss """bceaftersigmoid""" +318 86 regularizer """no""" +318 86 optimizer """adam""" +318 86 training_loop """owa""" +318 86 negative_sampler """basic""" +318 86 evaluator """rankbased""" +318 87 dataset """kinships""" +318 87 model """hole""" +318 87 loss """bceaftersigmoid""" +318 87 regularizer """no""" +318 87 optimizer """adam""" +318 87 training_loop """owa""" +318 87 negative_sampler """basic""" +318 87 evaluator """rankbased""" +318 88 dataset """kinships""" +318 88 model """hole""" +318 88 loss """bceaftersigmoid""" +318 88 regularizer """no""" +318 88 optimizer """adam""" +318 88 training_loop """owa""" +318 88 negative_sampler """basic""" +318 88 evaluator """rankbased""" +318 89 dataset """kinships""" +318 89 model """hole""" +318 89 loss """bceaftersigmoid""" +318 89 regularizer """no""" +318 89 optimizer """adam""" +318 89 training_loop """owa""" +318 89 negative_sampler """basic""" +318 89 evaluator """rankbased""" +318 90 dataset """kinships""" +318 90 model """hole""" +318 90 loss """bceaftersigmoid""" +318 90 regularizer """no""" +318 90 optimizer """adam""" +318 90 training_loop """owa""" +318 90 negative_sampler """basic""" +318 90 evaluator """rankbased""" +318 91 dataset """kinships""" +318 91 model """hole""" +318 91 loss """bceaftersigmoid""" +318 91 regularizer """no""" +318 91 optimizer """adam""" +318 91 training_loop """owa""" +318 91 negative_sampler """basic""" +318 91 evaluator """rankbased""" +318 92 dataset """kinships""" +318 92 model """hole""" +318 92 loss """bceaftersigmoid""" +318 92 regularizer """no""" +318 92 optimizer """adam""" +318 92 training_loop """owa""" +318 92 negative_sampler """basic""" +318 92 evaluator """rankbased""" +318 93 dataset """kinships""" +318 93 model """hole""" +318 93 loss """bceaftersigmoid""" +318 93 regularizer """no""" +318 93 optimizer """adam""" +318 93 training_loop """owa""" +318 93 negative_sampler """basic""" +318 93 evaluator """rankbased""" +318 94 dataset """kinships""" +318 94 model """hole""" +318 94 loss """bceaftersigmoid""" +318 94 regularizer """no""" +318 94 optimizer """adam""" +318 94 training_loop """owa""" +318 94 negative_sampler """basic""" +318 94 evaluator """rankbased""" +318 95 dataset """kinships""" +318 95 model """hole""" +318 95 loss """bceaftersigmoid""" +318 95 regularizer """no""" +318 95 optimizer """adam""" +318 95 training_loop """owa""" +318 95 negative_sampler """basic""" +318 95 evaluator """rankbased""" +318 96 dataset """kinships""" +318 96 model """hole""" +318 96 loss """bceaftersigmoid""" +318 96 regularizer """no""" +318 96 optimizer """adam""" +318 96 training_loop """owa""" +318 96 negative_sampler """basic""" +318 96 evaluator """rankbased""" +318 97 dataset """kinships""" +318 97 model """hole""" +318 97 loss """bceaftersigmoid""" +318 97 regularizer """no""" +318 97 optimizer """adam""" +318 97 training_loop """owa""" +318 97 negative_sampler """basic""" +318 97 evaluator """rankbased""" +318 98 dataset """kinships""" +318 98 model """hole""" +318 98 loss """bceaftersigmoid""" +318 98 regularizer """no""" +318 98 optimizer """adam""" +318 98 training_loop """owa""" +318 98 negative_sampler """basic""" +318 98 evaluator """rankbased""" +318 99 dataset """kinships""" +318 99 model """hole""" +318 99 loss """bceaftersigmoid""" +318 99 regularizer """no""" +318 99 optimizer """adam""" +318 99 training_loop """owa""" +318 99 negative_sampler """basic""" +318 99 evaluator """rankbased""" +318 100 dataset """kinships""" +318 100 model """hole""" +318 100 loss """bceaftersigmoid""" +318 100 regularizer """no""" +318 100 optimizer """adam""" +318 100 training_loop """owa""" +318 100 negative_sampler """basic""" +318 100 evaluator """rankbased""" +319 1 model.embedding_dim 2.0 +319 1 optimizer.lr 0.024906967723129222 +319 1 negative_sampler.num_negs_per_pos 86.0 +319 1 training.batch_size 1.0 +319 2 model.embedding_dim 1.0 +319 2 optimizer.lr 0.0011669756484523658 +319 2 negative_sampler.num_negs_per_pos 21.0 +319 2 training.batch_size 0.0 +319 3 model.embedding_dim 2.0 +319 3 optimizer.lr 0.08540332646710398 +319 3 negative_sampler.num_negs_per_pos 71.0 +319 3 training.batch_size 1.0 +319 4 model.embedding_dim 0.0 +319 4 optimizer.lr 0.0022302761040928656 +319 4 negative_sampler.num_negs_per_pos 68.0 +319 4 training.batch_size 1.0 +319 5 model.embedding_dim 1.0 +319 5 optimizer.lr 0.04177851148413956 +319 5 negative_sampler.num_negs_per_pos 37.0 +319 5 training.batch_size 2.0 +319 6 model.embedding_dim 0.0 +319 6 optimizer.lr 0.09774781832809629 +319 6 negative_sampler.num_negs_per_pos 81.0 +319 6 training.batch_size 1.0 +319 7 model.embedding_dim 1.0 +319 7 optimizer.lr 0.0010120148883564421 +319 7 negative_sampler.num_negs_per_pos 29.0 +319 7 training.batch_size 2.0 +319 8 model.embedding_dim 2.0 +319 8 optimizer.lr 0.02958626239660917 +319 8 negative_sampler.num_negs_per_pos 20.0 +319 8 training.batch_size 2.0 +319 9 model.embedding_dim 1.0 +319 9 optimizer.lr 0.011616168850905658 +319 9 negative_sampler.num_negs_per_pos 58.0 +319 9 training.batch_size 1.0 +319 10 model.embedding_dim 2.0 +319 10 optimizer.lr 0.0013310360382721533 +319 10 negative_sampler.num_negs_per_pos 61.0 +319 10 training.batch_size 2.0 +319 11 model.embedding_dim 2.0 +319 11 optimizer.lr 0.001139891336958579 +319 11 negative_sampler.num_negs_per_pos 38.0 +319 11 training.batch_size 2.0 +319 12 model.embedding_dim 1.0 +319 12 optimizer.lr 0.08052873278072605 +319 12 negative_sampler.num_negs_per_pos 7.0 +319 12 training.batch_size 1.0 +319 13 model.embedding_dim 2.0 +319 13 optimizer.lr 0.056302435434513874 +319 13 negative_sampler.num_negs_per_pos 73.0 +319 13 training.batch_size 1.0 +319 14 model.embedding_dim 1.0 +319 14 optimizer.lr 0.02223739701723516 +319 14 negative_sampler.num_negs_per_pos 23.0 +319 14 training.batch_size 2.0 +319 15 model.embedding_dim 2.0 +319 15 optimizer.lr 0.00306456519226934 +319 15 negative_sampler.num_negs_per_pos 45.0 +319 15 training.batch_size 0.0 +319 16 model.embedding_dim 2.0 +319 16 optimizer.lr 0.07289273880891155 +319 16 negative_sampler.num_negs_per_pos 21.0 +319 16 training.batch_size 0.0 +319 17 model.embedding_dim 1.0 +319 17 optimizer.lr 0.06952605232342508 +319 17 negative_sampler.num_negs_per_pos 67.0 +319 17 training.batch_size 1.0 +319 18 model.embedding_dim 0.0 +319 18 optimizer.lr 0.002583490378636877 +319 18 negative_sampler.num_negs_per_pos 15.0 +319 18 training.batch_size 2.0 +319 19 model.embedding_dim 1.0 +319 19 optimizer.lr 0.06603817539643694 +319 19 negative_sampler.num_negs_per_pos 7.0 +319 19 training.batch_size 1.0 +319 20 model.embedding_dim 0.0 +319 20 optimizer.lr 0.032790594548016416 +319 20 negative_sampler.num_negs_per_pos 93.0 +319 20 training.batch_size 1.0 +319 21 model.embedding_dim 2.0 +319 21 optimizer.lr 0.01433151470175182 +319 21 negative_sampler.num_negs_per_pos 5.0 +319 21 training.batch_size 0.0 +319 22 model.embedding_dim 0.0 +319 22 optimizer.lr 0.001639048803968811 +319 22 negative_sampler.num_negs_per_pos 15.0 +319 22 training.batch_size 0.0 +319 23 model.embedding_dim 0.0 +319 23 optimizer.lr 0.0795075167875453 +319 23 negative_sampler.num_negs_per_pos 93.0 +319 23 training.batch_size 0.0 +319 24 model.embedding_dim 1.0 +319 24 optimizer.lr 0.07132331428369054 +319 24 negative_sampler.num_negs_per_pos 54.0 +319 24 training.batch_size 0.0 +319 25 model.embedding_dim 0.0 +319 25 optimizer.lr 0.05535647489724672 +319 25 negative_sampler.num_negs_per_pos 91.0 +319 25 training.batch_size 0.0 +319 26 model.embedding_dim 0.0 +319 26 optimizer.lr 0.007879593613107133 +319 26 negative_sampler.num_negs_per_pos 34.0 +319 26 training.batch_size 1.0 +319 27 model.embedding_dim 1.0 +319 27 optimizer.lr 0.034595196338899535 +319 27 negative_sampler.num_negs_per_pos 78.0 +319 27 training.batch_size 0.0 +319 28 model.embedding_dim 0.0 +319 28 optimizer.lr 0.007899513484522292 +319 28 negative_sampler.num_negs_per_pos 84.0 +319 28 training.batch_size 0.0 +319 29 model.embedding_dim 0.0 +319 29 optimizer.lr 0.002122656609905827 +319 29 negative_sampler.num_negs_per_pos 11.0 +319 29 training.batch_size 2.0 +319 30 model.embedding_dim 0.0 +319 30 optimizer.lr 0.07762180370799372 +319 30 negative_sampler.num_negs_per_pos 37.0 +319 30 training.batch_size 2.0 +319 31 model.embedding_dim 2.0 +319 31 optimizer.lr 0.0021996013171754285 +319 31 negative_sampler.num_negs_per_pos 54.0 +319 31 training.batch_size 2.0 +319 32 model.embedding_dim 0.0 +319 32 optimizer.lr 0.006526938857036627 +319 32 negative_sampler.num_negs_per_pos 84.0 +319 32 training.batch_size 1.0 +319 33 model.embedding_dim 0.0 +319 33 optimizer.lr 0.009531801876126032 +319 33 negative_sampler.num_negs_per_pos 21.0 +319 33 training.batch_size 0.0 +319 34 model.embedding_dim 1.0 +319 34 optimizer.lr 0.022259721067712294 +319 34 negative_sampler.num_negs_per_pos 87.0 +319 34 training.batch_size 2.0 +319 35 model.embedding_dim 1.0 +319 35 optimizer.lr 0.006882343014701559 +319 35 negative_sampler.num_negs_per_pos 6.0 +319 35 training.batch_size 1.0 +319 36 model.embedding_dim 0.0 +319 36 optimizer.lr 0.054596969082220316 +319 36 negative_sampler.num_negs_per_pos 43.0 +319 36 training.batch_size 1.0 +319 37 model.embedding_dim 2.0 +319 37 optimizer.lr 0.04330175453993896 +319 37 negative_sampler.num_negs_per_pos 84.0 +319 37 training.batch_size 0.0 +319 38 model.embedding_dim 1.0 +319 38 optimizer.lr 0.008583650342820498 +319 38 negative_sampler.num_negs_per_pos 69.0 +319 38 training.batch_size 2.0 +319 39 model.embedding_dim 0.0 +319 39 optimizer.lr 0.007578082889404761 +319 39 negative_sampler.num_negs_per_pos 94.0 +319 39 training.batch_size 0.0 +319 40 model.embedding_dim 2.0 +319 40 optimizer.lr 0.07365476233025584 +319 40 negative_sampler.num_negs_per_pos 6.0 +319 40 training.batch_size 0.0 +319 41 model.embedding_dim 1.0 +319 41 optimizer.lr 0.0024269073436211357 +319 41 negative_sampler.num_negs_per_pos 55.0 +319 41 training.batch_size 2.0 +319 42 model.embedding_dim 2.0 +319 42 optimizer.lr 0.002186166531890477 +319 42 negative_sampler.num_negs_per_pos 12.0 +319 42 training.batch_size 0.0 +319 43 model.embedding_dim 1.0 +319 43 optimizer.lr 0.009254536744335486 +319 43 negative_sampler.num_negs_per_pos 28.0 +319 43 training.batch_size 0.0 +319 44 model.embedding_dim 0.0 +319 44 optimizer.lr 0.004826307085894435 +319 44 negative_sampler.num_negs_per_pos 91.0 +319 44 training.batch_size 2.0 +319 45 model.embedding_dim 2.0 +319 45 optimizer.lr 0.009977330071401584 +319 45 negative_sampler.num_negs_per_pos 1.0 +319 45 training.batch_size 2.0 +319 46 model.embedding_dim 1.0 +319 46 optimizer.lr 0.00268545180382278 +319 46 negative_sampler.num_negs_per_pos 47.0 +319 46 training.batch_size 0.0 +319 47 model.embedding_dim 0.0 +319 47 optimizer.lr 0.0030883181917234076 +319 47 negative_sampler.num_negs_per_pos 51.0 +319 47 training.batch_size 1.0 +319 48 model.embedding_dim 2.0 +319 48 optimizer.lr 0.015080994234956825 +319 48 negative_sampler.num_negs_per_pos 25.0 +319 48 training.batch_size 1.0 +319 49 model.embedding_dim 1.0 +319 49 optimizer.lr 0.003911984317935226 +319 49 negative_sampler.num_negs_per_pos 93.0 +319 49 training.batch_size 0.0 +319 50 model.embedding_dim 2.0 +319 50 optimizer.lr 0.007539390871308831 +319 50 negative_sampler.num_negs_per_pos 38.0 +319 50 training.batch_size 1.0 +319 51 model.embedding_dim 1.0 +319 51 optimizer.lr 0.005633874464330499 +319 51 negative_sampler.num_negs_per_pos 97.0 +319 51 training.batch_size 1.0 +319 52 model.embedding_dim 0.0 +319 52 optimizer.lr 0.006681041044240055 +319 52 negative_sampler.num_negs_per_pos 13.0 +319 52 training.batch_size 1.0 +319 53 model.embedding_dim 2.0 +319 53 optimizer.lr 0.05692994166734769 +319 53 negative_sampler.num_negs_per_pos 98.0 +319 53 training.batch_size 1.0 +319 54 model.embedding_dim 0.0 +319 54 optimizer.lr 0.010690959864387686 +319 54 negative_sampler.num_negs_per_pos 11.0 +319 54 training.batch_size 0.0 +319 55 model.embedding_dim 1.0 +319 55 optimizer.lr 0.004202873541131428 +319 55 negative_sampler.num_negs_per_pos 13.0 +319 55 training.batch_size 1.0 +319 56 model.embedding_dim 2.0 +319 56 optimizer.lr 0.03697483962702172 +319 56 negative_sampler.num_negs_per_pos 99.0 +319 56 training.batch_size 0.0 +319 57 model.embedding_dim 1.0 +319 57 optimizer.lr 0.004631792285907441 +319 57 negative_sampler.num_negs_per_pos 27.0 +319 57 training.batch_size 2.0 +319 58 model.embedding_dim 1.0 +319 58 optimizer.lr 0.003195671010061647 +319 58 negative_sampler.num_negs_per_pos 34.0 +319 58 training.batch_size 2.0 +319 59 model.embedding_dim 1.0 +319 59 optimizer.lr 0.0199955935473873 +319 59 negative_sampler.num_negs_per_pos 58.0 +319 59 training.batch_size 2.0 +319 60 model.embedding_dim 2.0 +319 60 optimizer.lr 0.0012128874004750708 +319 60 negative_sampler.num_negs_per_pos 10.0 +319 60 training.batch_size 2.0 +319 61 model.embedding_dim 1.0 +319 61 optimizer.lr 0.00422853989188152 +319 61 negative_sampler.num_negs_per_pos 56.0 +319 61 training.batch_size 2.0 +319 62 model.embedding_dim 0.0 +319 62 optimizer.lr 0.08443842255844478 +319 62 negative_sampler.num_negs_per_pos 10.0 +319 62 training.batch_size 1.0 +319 63 model.embedding_dim 2.0 +319 63 optimizer.lr 0.004918391177534512 +319 63 negative_sampler.num_negs_per_pos 79.0 +319 63 training.batch_size 1.0 +319 64 model.embedding_dim 1.0 +319 64 optimizer.lr 0.0017366451512073988 +319 64 negative_sampler.num_negs_per_pos 96.0 +319 64 training.batch_size 2.0 +319 65 model.embedding_dim 0.0 +319 65 optimizer.lr 0.0030405528958780243 +319 65 negative_sampler.num_negs_per_pos 67.0 +319 65 training.batch_size 0.0 +319 66 model.embedding_dim 1.0 +319 66 optimizer.lr 0.007517588706735713 +319 66 negative_sampler.num_negs_per_pos 61.0 +319 66 training.batch_size 1.0 +319 67 model.embedding_dim 1.0 +319 67 optimizer.lr 0.02134061054895296 +319 67 negative_sampler.num_negs_per_pos 75.0 +319 67 training.batch_size 2.0 +319 68 model.embedding_dim 2.0 +319 68 optimizer.lr 0.028123928333833946 +319 68 negative_sampler.num_negs_per_pos 12.0 +319 68 training.batch_size 2.0 +319 69 model.embedding_dim 0.0 +319 69 optimizer.lr 0.07609271725197002 +319 69 negative_sampler.num_negs_per_pos 99.0 +319 69 training.batch_size 2.0 +319 70 model.embedding_dim 2.0 +319 70 optimizer.lr 0.032718802789324736 +319 70 negative_sampler.num_negs_per_pos 54.0 +319 70 training.batch_size 1.0 +319 71 model.embedding_dim 1.0 +319 71 optimizer.lr 0.0012895284966338014 +319 71 negative_sampler.num_negs_per_pos 5.0 +319 71 training.batch_size 0.0 +319 72 model.embedding_dim 1.0 +319 72 optimizer.lr 0.005810162320661557 +319 72 negative_sampler.num_negs_per_pos 22.0 +319 72 training.batch_size 1.0 +319 73 model.embedding_dim 0.0 +319 73 optimizer.lr 0.001410230048014998 +319 73 negative_sampler.num_negs_per_pos 88.0 +319 73 training.batch_size 1.0 +319 74 model.embedding_dim 0.0 +319 74 optimizer.lr 0.002488031228166633 +319 74 negative_sampler.num_negs_per_pos 65.0 +319 74 training.batch_size 2.0 +319 75 model.embedding_dim 2.0 +319 75 optimizer.lr 0.03983743088102962 +319 75 negative_sampler.num_negs_per_pos 51.0 +319 75 training.batch_size 2.0 +319 76 model.embedding_dim 2.0 +319 76 optimizer.lr 0.007579498273288818 +319 76 negative_sampler.num_negs_per_pos 1.0 +319 76 training.batch_size 0.0 +319 77 model.embedding_dim 1.0 +319 77 optimizer.lr 0.02389477849163403 +319 77 negative_sampler.num_negs_per_pos 17.0 +319 77 training.batch_size 1.0 +319 78 model.embedding_dim 0.0 +319 78 optimizer.lr 0.04708423716217549 +319 78 negative_sampler.num_negs_per_pos 61.0 +319 78 training.batch_size 0.0 +319 79 model.embedding_dim 0.0 +319 79 optimizer.lr 0.013564478612062175 +319 79 negative_sampler.num_negs_per_pos 43.0 +319 79 training.batch_size 0.0 +319 80 model.embedding_dim 0.0 +319 80 optimizer.lr 0.004602698987705636 +319 80 negative_sampler.num_negs_per_pos 7.0 +319 80 training.batch_size 2.0 +319 81 model.embedding_dim 0.0 +319 81 optimizer.lr 0.014324708793211087 +319 81 negative_sampler.num_negs_per_pos 9.0 +319 81 training.batch_size 1.0 +319 82 model.embedding_dim 2.0 +319 82 optimizer.lr 0.003654877818489738 +319 82 negative_sampler.num_negs_per_pos 56.0 +319 82 training.batch_size 1.0 +319 83 model.embedding_dim 0.0 +319 83 optimizer.lr 0.012536321650957591 +319 83 negative_sampler.num_negs_per_pos 26.0 +319 83 training.batch_size 1.0 +319 84 model.embedding_dim 0.0 +319 84 optimizer.lr 0.0010235684872335364 +319 84 negative_sampler.num_negs_per_pos 74.0 +319 84 training.batch_size 1.0 +319 85 model.embedding_dim 0.0 +319 85 optimizer.lr 0.0817114536658958 +319 85 negative_sampler.num_negs_per_pos 27.0 +319 85 training.batch_size 0.0 +319 86 model.embedding_dim 1.0 +319 86 optimizer.lr 0.06118866710702344 +319 86 negative_sampler.num_negs_per_pos 25.0 +319 86 training.batch_size 2.0 +319 87 model.embedding_dim 2.0 +319 87 optimizer.lr 0.001030995344441387 +319 87 negative_sampler.num_negs_per_pos 59.0 +319 87 training.batch_size 0.0 +319 88 model.embedding_dim 1.0 +319 88 optimizer.lr 0.003821714219010007 +319 88 negative_sampler.num_negs_per_pos 30.0 +319 88 training.batch_size 2.0 +319 89 model.embedding_dim 2.0 +319 89 optimizer.lr 0.051290568205146166 +319 89 negative_sampler.num_negs_per_pos 22.0 +319 89 training.batch_size 1.0 +319 90 model.embedding_dim 2.0 +319 90 optimizer.lr 0.0011080476793717887 +319 90 negative_sampler.num_negs_per_pos 6.0 +319 90 training.batch_size 2.0 +319 91 model.embedding_dim 0.0 +319 91 optimizer.lr 0.0020672959539024343 +319 91 negative_sampler.num_negs_per_pos 55.0 +319 91 training.batch_size 0.0 +319 92 model.embedding_dim 1.0 +319 92 optimizer.lr 0.0035186184274045578 +319 92 negative_sampler.num_negs_per_pos 18.0 +319 92 training.batch_size 0.0 +319 93 model.embedding_dim 2.0 +319 93 optimizer.lr 0.0038338989407969808 +319 93 negative_sampler.num_negs_per_pos 74.0 +319 93 training.batch_size 0.0 +319 94 model.embedding_dim 0.0 +319 94 optimizer.lr 0.07187928708051329 +319 94 negative_sampler.num_negs_per_pos 73.0 +319 94 training.batch_size 2.0 +319 95 model.embedding_dim 2.0 +319 95 optimizer.lr 0.03790812070715467 +319 95 negative_sampler.num_negs_per_pos 6.0 +319 95 training.batch_size 1.0 +319 96 model.embedding_dim 0.0 +319 96 optimizer.lr 0.0015862202722565608 +319 96 negative_sampler.num_negs_per_pos 46.0 +319 96 training.batch_size 1.0 +319 97 model.embedding_dim 0.0 +319 97 optimizer.lr 0.01290133332428174 +319 97 negative_sampler.num_negs_per_pos 43.0 +319 97 training.batch_size 1.0 +319 98 model.embedding_dim 1.0 +319 98 optimizer.lr 0.03640881342013516 +319 98 negative_sampler.num_negs_per_pos 73.0 +319 98 training.batch_size 0.0 +319 99 model.embedding_dim 0.0 +319 99 optimizer.lr 0.005607271162862205 +319 99 negative_sampler.num_negs_per_pos 68.0 +319 99 training.batch_size 1.0 +319 100 model.embedding_dim 0.0 +319 100 optimizer.lr 0.015044318364826748 +319 100 negative_sampler.num_negs_per_pos 41.0 +319 100 training.batch_size 2.0 +319 1 dataset """kinships""" +319 1 model """hole""" +319 1 loss """softplus""" +319 1 regularizer """no""" +319 1 optimizer """adam""" +319 1 training_loop """owa""" +319 1 negative_sampler """basic""" +319 1 evaluator """rankbased""" +319 2 dataset """kinships""" +319 2 model """hole""" +319 2 loss """softplus""" +319 2 regularizer """no""" +319 2 optimizer """adam""" +319 2 training_loop """owa""" +319 2 negative_sampler """basic""" +319 2 evaluator """rankbased""" +319 3 dataset """kinships""" +319 3 model """hole""" +319 3 loss """softplus""" +319 3 regularizer """no""" +319 3 optimizer """adam""" +319 3 training_loop """owa""" +319 3 negative_sampler """basic""" +319 3 evaluator """rankbased""" +319 4 dataset """kinships""" +319 4 model """hole""" +319 4 loss """softplus""" +319 4 regularizer """no""" +319 4 optimizer """adam""" +319 4 training_loop """owa""" +319 4 negative_sampler """basic""" +319 4 evaluator """rankbased""" +319 5 dataset """kinships""" +319 5 model """hole""" +319 5 loss """softplus""" +319 5 regularizer """no""" +319 5 optimizer """adam""" +319 5 training_loop """owa""" +319 5 negative_sampler """basic""" +319 5 evaluator """rankbased""" +319 6 dataset """kinships""" +319 6 model """hole""" +319 6 loss """softplus""" +319 6 regularizer """no""" +319 6 optimizer """adam""" +319 6 training_loop """owa""" +319 6 negative_sampler """basic""" +319 6 evaluator """rankbased""" +319 7 dataset """kinships""" +319 7 model """hole""" +319 7 loss """softplus""" +319 7 regularizer """no""" +319 7 optimizer """adam""" +319 7 training_loop """owa""" +319 7 negative_sampler """basic""" +319 7 evaluator """rankbased""" +319 8 dataset """kinships""" +319 8 model """hole""" +319 8 loss """softplus""" +319 8 regularizer """no""" +319 8 optimizer """adam""" +319 8 training_loop """owa""" +319 8 negative_sampler """basic""" +319 8 evaluator """rankbased""" +319 9 dataset """kinships""" +319 9 model """hole""" +319 9 loss """softplus""" +319 9 regularizer """no""" +319 9 optimizer """adam""" +319 9 training_loop """owa""" +319 9 negative_sampler """basic""" +319 9 evaluator """rankbased""" +319 10 dataset """kinships""" +319 10 model """hole""" +319 10 loss """softplus""" +319 10 regularizer """no""" +319 10 optimizer """adam""" +319 10 training_loop """owa""" +319 10 negative_sampler """basic""" +319 10 evaluator """rankbased""" +319 11 dataset """kinships""" +319 11 model """hole""" +319 11 loss """softplus""" +319 11 regularizer """no""" +319 11 optimizer """adam""" +319 11 training_loop """owa""" +319 11 negative_sampler """basic""" +319 11 evaluator """rankbased""" +319 12 dataset """kinships""" +319 12 model """hole""" +319 12 loss """softplus""" +319 12 regularizer """no""" +319 12 optimizer """adam""" +319 12 training_loop """owa""" +319 12 negative_sampler """basic""" +319 12 evaluator """rankbased""" +319 13 dataset """kinships""" +319 13 model """hole""" +319 13 loss """softplus""" +319 13 regularizer """no""" +319 13 optimizer """adam""" +319 13 training_loop """owa""" +319 13 negative_sampler """basic""" +319 13 evaluator """rankbased""" +319 14 dataset """kinships""" +319 14 model """hole""" +319 14 loss """softplus""" +319 14 regularizer """no""" +319 14 optimizer """adam""" +319 14 training_loop """owa""" +319 14 negative_sampler """basic""" +319 14 evaluator """rankbased""" +319 15 dataset """kinships""" +319 15 model """hole""" +319 15 loss """softplus""" +319 15 regularizer """no""" +319 15 optimizer """adam""" +319 15 training_loop """owa""" +319 15 negative_sampler """basic""" +319 15 evaluator """rankbased""" +319 16 dataset """kinships""" +319 16 model """hole""" +319 16 loss """softplus""" +319 16 regularizer """no""" +319 16 optimizer """adam""" +319 16 training_loop """owa""" +319 16 negative_sampler """basic""" +319 16 evaluator """rankbased""" +319 17 dataset """kinships""" +319 17 model """hole""" +319 17 loss """softplus""" +319 17 regularizer """no""" +319 17 optimizer """adam""" +319 17 training_loop """owa""" +319 17 negative_sampler """basic""" +319 17 evaluator """rankbased""" +319 18 dataset """kinships""" +319 18 model """hole""" +319 18 loss """softplus""" +319 18 regularizer """no""" +319 18 optimizer """adam""" +319 18 training_loop """owa""" +319 18 negative_sampler """basic""" +319 18 evaluator """rankbased""" +319 19 dataset """kinships""" +319 19 model """hole""" +319 19 loss """softplus""" +319 19 regularizer """no""" +319 19 optimizer """adam""" +319 19 training_loop """owa""" +319 19 negative_sampler """basic""" +319 19 evaluator """rankbased""" +319 20 dataset """kinships""" +319 20 model """hole""" +319 20 loss """softplus""" +319 20 regularizer """no""" +319 20 optimizer """adam""" +319 20 training_loop """owa""" +319 20 negative_sampler """basic""" +319 20 evaluator """rankbased""" +319 21 dataset """kinships""" +319 21 model """hole""" +319 21 loss """softplus""" +319 21 regularizer """no""" +319 21 optimizer """adam""" +319 21 training_loop """owa""" +319 21 negative_sampler """basic""" +319 21 evaluator """rankbased""" +319 22 dataset """kinships""" +319 22 model """hole""" +319 22 loss """softplus""" +319 22 regularizer """no""" +319 22 optimizer """adam""" +319 22 training_loop """owa""" +319 22 negative_sampler """basic""" +319 22 evaluator """rankbased""" +319 23 dataset """kinships""" +319 23 model """hole""" +319 23 loss """softplus""" +319 23 regularizer """no""" +319 23 optimizer """adam""" +319 23 training_loop """owa""" +319 23 negative_sampler """basic""" +319 23 evaluator """rankbased""" +319 24 dataset """kinships""" +319 24 model """hole""" +319 24 loss """softplus""" +319 24 regularizer """no""" +319 24 optimizer """adam""" +319 24 training_loop """owa""" +319 24 negative_sampler """basic""" +319 24 evaluator """rankbased""" +319 25 dataset """kinships""" +319 25 model """hole""" +319 25 loss """softplus""" +319 25 regularizer """no""" +319 25 optimizer """adam""" +319 25 training_loop """owa""" +319 25 negative_sampler """basic""" +319 25 evaluator """rankbased""" +319 26 dataset """kinships""" +319 26 model """hole""" +319 26 loss """softplus""" +319 26 regularizer """no""" +319 26 optimizer """adam""" +319 26 training_loop """owa""" +319 26 negative_sampler """basic""" +319 26 evaluator """rankbased""" +319 27 dataset """kinships""" +319 27 model """hole""" +319 27 loss """softplus""" +319 27 regularizer """no""" +319 27 optimizer """adam""" +319 27 training_loop """owa""" +319 27 negative_sampler """basic""" +319 27 evaluator """rankbased""" +319 28 dataset """kinships""" +319 28 model """hole""" +319 28 loss """softplus""" +319 28 regularizer """no""" +319 28 optimizer """adam""" +319 28 training_loop """owa""" +319 28 negative_sampler """basic""" +319 28 evaluator """rankbased""" +319 29 dataset """kinships""" +319 29 model """hole""" +319 29 loss """softplus""" +319 29 regularizer """no""" +319 29 optimizer """adam""" +319 29 training_loop """owa""" +319 29 negative_sampler """basic""" +319 29 evaluator """rankbased""" +319 30 dataset """kinships""" +319 30 model """hole""" +319 30 loss """softplus""" +319 30 regularizer """no""" +319 30 optimizer """adam""" +319 30 training_loop """owa""" +319 30 negative_sampler """basic""" +319 30 evaluator """rankbased""" +319 31 dataset """kinships""" +319 31 model """hole""" +319 31 loss """softplus""" +319 31 regularizer """no""" +319 31 optimizer """adam""" +319 31 training_loop """owa""" +319 31 negative_sampler """basic""" +319 31 evaluator """rankbased""" +319 32 dataset """kinships""" +319 32 model """hole""" +319 32 loss """softplus""" +319 32 regularizer """no""" +319 32 optimizer """adam""" +319 32 training_loop """owa""" +319 32 negative_sampler """basic""" +319 32 evaluator """rankbased""" +319 33 dataset """kinships""" +319 33 model """hole""" +319 33 loss """softplus""" +319 33 regularizer """no""" +319 33 optimizer """adam""" +319 33 training_loop """owa""" +319 33 negative_sampler """basic""" +319 33 evaluator """rankbased""" +319 34 dataset """kinships""" +319 34 model """hole""" +319 34 loss """softplus""" +319 34 regularizer """no""" +319 34 optimizer """adam""" +319 34 training_loop """owa""" +319 34 negative_sampler """basic""" +319 34 evaluator """rankbased""" +319 35 dataset """kinships""" +319 35 model """hole""" +319 35 loss """softplus""" +319 35 regularizer """no""" +319 35 optimizer """adam""" +319 35 training_loop """owa""" +319 35 negative_sampler """basic""" +319 35 evaluator """rankbased""" +319 36 dataset """kinships""" +319 36 model """hole""" +319 36 loss """softplus""" +319 36 regularizer """no""" +319 36 optimizer """adam""" +319 36 training_loop """owa""" +319 36 negative_sampler """basic""" +319 36 evaluator """rankbased""" +319 37 dataset """kinships""" +319 37 model """hole""" +319 37 loss """softplus""" +319 37 regularizer """no""" +319 37 optimizer """adam""" +319 37 training_loop """owa""" +319 37 negative_sampler """basic""" +319 37 evaluator """rankbased""" +319 38 dataset """kinships""" +319 38 model """hole""" +319 38 loss """softplus""" +319 38 regularizer """no""" +319 38 optimizer """adam""" +319 38 training_loop """owa""" +319 38 negative_sampler """basic""" +319 38 evaluator """rankbased""" +319 39 dataset """kinships""" +319 39 model """hole""" +319 39 loss """softplus""" +319 39 regularizer """no""" +319 39 optimizer """adam""" +319 39 training_loop """owa""" +319 39 negative_sampler """basic""" +319 39 evaluator """rankbased""" +319 40 dataset """kinships""" +319 40 model """hole""" +319 40 loss """softplus""" +319 40 regularizer """no""" +319 40 optimizer """adam""" +319 40 training_loop """owa""" +319 40 negative_sampler """basic""" +319 40 evaluator """rankbased""" +319 41 dataset """kinships""" +319 41 model """hole""" +319 41 loss """softplus""" +319 41 regularizer """no""" +319 41 optimizer """adam""" +319 41 training_loop """owa""" +319 41 negative_sampler """basic""" +319 41 evaluator """rankbased""" +319 42 dataset """kinships""" +319 42 model """hole""" +319 42 loss """softplus""" +319 42 regularizer """no""" +319 42 optimizer """adam""" +319 42 training_loop """owa""" +319 42 negative_sampler """basic""" +319 42 evaluator """rankbased""" +319 43 dataset """kinships""" +319 43 model """hole""" +319 43 loss """softplus""" +319 43 regularizer """no""" +319 43 optimizer """adam""" +319 43 training_loop """owa""" +319 43 negative_sampler """basic""" +319 43 evaluator """rankbased""" +319 44 dataset """kinships""" +319 44 model """hole""" +319 44 loss """softplus""" +319 44 regularizer """no""" +319 44 optimizer """adam""" +319 44 training_loop """owa""" +319 44 negative_sampler """basic""" +319 44 evaluator """rankbased""" +319 45 dataset """kinships""" +319 45 model """hole""" +319 45 loss """softplus""" +319 45 regularizer """no""" +319 45 optimizer """adam""" +319 45 training_loop """owa""" +319 45 negative_sampler """basic""" +319 45 evaluator """rankbased""" +319 46 dataset """kinships""" +319 46 model """hole""" +319 46 loss """softplus""" +319 46 regularizer """no""" +319 46 optimizer """adam""" +319 46 training_loop """owa""" +319 46 negative_sampler """basic""" +319 46 evaluator """rankbased""" +319 47 dataset """kinships""" +319 47 model """hole""" +319 47 loss """softplus""" +319 47 regularizer """no""" +319 47 optimizer """adam""" +319 47 training_loop """owa""" +319 47 negative_sampler """basic""" +319 47 evaluator """rankbased""" +319 48 dataset """kinships""" +319 48 model """hole""" +319 48 loss """softplus""" +319 48 regularizer """no""" +319 48 optimizer """adam""" +319 48 training_loop """owa""" +319 48 negative_sampler """basic""" +319 48 evaluator """rankbased""" +319 49 dataset """kinships""" +319 49 model """hole""" +319 49 loss """softplus""" +319 49 regularizer """no""" +319 49 optimizer """adam""" +319 49 training_loop """owa""" +319 49 negative_sampler """basic""" +319 49 evaluator """rankbased""" +319 50 dataset """kinships""" +319 50 model """hole""" +319 50 loss """softplus""" +319 50 regularizer """no""" +319 50 optimizer """adam""" +319 50 training_loop """owa""" +319 50 negative_sampler """basic""" +319 50 evaluator """rankbased""" +319 51 dataset """kinships""" +319 51 model """hole""" +319 51 loss """softplus""" +319 51 regularizer """no""" +319 51 optimizer """adam""" +319 51 training_loop """owa""" +319 51 negative_sampler """basic""" +319 51 evaluator """rankbased""" +319 52 dataset """kinships""" +319 52 model """hole""" +319 52 loss """softplus""" +319 52 regularizer """no""" +319 52 optimizer """adam""" +319 52 training_loop """owa""" +319 52 negative_sampler """basic""" +319 52 evaluator """rankbased""" +319 53 dataset """kinships""" +319 53 model """hole""" +319 53 loss """softplus""" +319 53 regularizer """no""" +319 53 optimizer """adam""" +319 53 training_loop """owa""" +319 53 negative_sampler """basic""" +319 53 evaluator """rankbased""" +319 54 dataset """kinships""" +319 54 model """hole""" +319 54 loss """softplus""" +319 54 regularizer """no""" +319 54 optimizer """adam""" +319 54 training_loop """owa""" +319 54 negative_sampler """basic""" +319 54 evaluator """rankbased""" +319 55 dataset """kinships""" +319 55 model """hole""" +319 55 loss """softplus""" +319 55 regularizer """no""" +319 55 optimizer """adam""" +319 55 training_loop """owa""" +319 55 negative_sampler """basic""" +319 55 evaluator """rankbased""" +319 56 dataset """kinships""" +319 56 model """hole""" +319 56 loss """softplus""" +319 56 regularizer """no""" +319 56 optimizer """adam""" +319 56 training_loop """owa""" +319 56 negative_sampler """basic""" +319 56 evaluator """rankbased""" +319 57 dataset """kinships""" +319 57 model """hole""" +319 57 loss """softplus""" +319 57 regularizer """no""" +319 57 optimizer """adam""" +319 57 training_loop """owa""" +319 57 negative_sampler """basic""" +319 57 evaluator """rankbased""" +319 58 dataset """kinships""" +319 58 model """hole""" +319 58 loss """softplus""" +319 58 regularizer """no""" +319 58 optimizer """adam""" +319 58 training_loop """owa""" +319 58 negative_sampler """basic""" +319 58 evaluator """rankbased""" +319 59 dataset """kinships""" +319 59 model """hole""" +319 59 loss """softplus""" +319 59 regularizer """no""" +319 59 optimizer """adam""" +319 59 training_loop """owa""" +319 59 negative_sampler """basic""" +319 59 evaluator """rankbased""" +319 60 dataset """kinships""" +319 60 model """hole""" +319 60 loss """softplus""" +319 60 regularizer """no""" +319 60 optimizer """adam""" +319 60 training_loop """owa""" +319 60 negative_sampler """basic""" +319 60 evaluator """rankbased""" +319 61 dataset """kinships""" +319 61 model """hole""" +319 61 loss """softplus""" +319 61 regularizer """no""" +319 61 optimizer """adam""" +319 61 training_loop """owa""" +319 61 negative_sampler """basic""" +319 61 evaluator """rankbased""" +319 62 dataset """kinships""" +319 62 model """hole""" +319 62 loss """softplus""" +319 62 regularizer """no""" +319 62 optimizer """adam""" +319 62 training_loop """owa""" +319 62 negative_sampler """basic""" +319 62 evaluator """rankbased""" +319 63 dataset """kinships""" +319 63 model """hole""" +319 63 loss """softplus""" +319 63 regularizer """no""" +319 63 optimizer """adam""" +319 63 training_loop """owa""" +319 63 negative_sampler """basic""" +319 63 evaluator """rankbased""" +319 64 dataset """kinships""" +319 64 model """hole""" +319 64 loss """softplus""" +319 64 regularizer """no""" +319 64 optimizer """adam""" +319 64 training_loop """owa""" +319 64 negative_sampler """basic""" +319 64 evaluator """rankbased""" +319 65 dataset """kinships""" +319 65 model """hole""" +319 65 loss """softplus""" +319 65 regularizer """no""" +319 65 optimizer """adam""" +319 65 training_loop """owa""" +319 65 negative_sampler """basic""" +319 65 evaluator """rankbased""" +319 66 dataset """kinships""" +319 66 model """hole""" +319 66 loss """softplus""" +319 66 regularizer """no""" +319 66 optimizer """adam""" +319 66 training_loop """owa""" +319 66 negative_sampler """basic""" +319 66 evaluator """rankbased""" +319 67 dataset """kinships""" +319 67 model """hole""" +319 67 loss """softplus""" +319 67 regularizer """no""" +319 67 optimizer """adam""" +319 67 training_loop """owa""" +319 67 negative_sampler """basic""" +319 67 evaluator """rankbased""" +319 68 dataset """kinships""" +319 68 model """hole""" +319 68 loss """softplus""" +319 68 regularizer """no""" +319 68 optimizer """adam""" +319 68 training_loop """owa""" +319 68 negative_sampler """basic""" +319 68 evaluator """rankbased""" +319 69 dataset """kinships""" +319 69 model """hole""" +319 69 loss """softplus""" +319 69 regularizer """no""" +319 69 optimizer """adam""" +319 69 training_loop """owa""" +319 69 negative_sampler """basic""" +319 69 evaluator """rankbased""" +319 70 dataset """kinships""" +319 70 model """hole""" +319 70 loss """softplus""" +319 70 regularizer """no""" +319 70 optimizer """adam""" +319 70 training_loop """owa""" +319 70 negative_sampler """basic""" +319 70 evaluator """rankbased""" +319 71 dataset """kinships""" +319 71 model """hole""" +319 71 loss """softplus""" +319 71 regularizer """no""" +319 71 optimizer """adam""" +319 71 training_loop """owa""" +319 71 negative_sampler """basic""" +319 71 evaluator """rankbased""" +319 72 dataset """kinships""" +319 72 model """hole""" +319 72 loss """softplus""" +319 72 regularizer """no""" +319 72 optimizer """adam""" +319 72 training_loop """owa""" +319 72 negative_sampler """basic""" +319 72 evaluator """rankbased""" +319 73 dataset """kinships""" +319 73 model """hole""" +319 73 loss """softplus""" +319 73 regularizer """no""" +319 73 optimizer """adam""" +319 73 training_loop """owa""" +319 73 negative_sampler """basic""" +319 73 evaluator """rankbased""" +319 74 dataset """kinships""" +319 74 model """hole""" +319 74 loss """softplus""" +319 74 regularizer """no""" +319 74 optimizer """adam""" +319 74 training_loop """owa""" +319 74 negative_sampler """basic""" +319 74 evaluator """rankbased""" +319 75 dataset """kinships""" +319 75 model """hole""" +319 75 loss """softplus""" +319 75 regularizer """no""" +319 75 optimizer """adam""" +319 75 training_loop """owa""" +319 75 negative_sampler """basic""" +319 75 evaluator """rankbased""" +319 76 dataset """kinships""" +319 76 model """hole""" +319 76 loss """softplus""" +319 76 regularizer """no""" +319 76 optimizer """adam""" +319 76 training_loop """owa""" +319 76 negative_sampler """basic""" +319 76 evaluator """rankbased""" +319 77 dataset """kinships""" +319 77 model """hole""" +319 77 loss """softplus""" +319 77 regularizer """no""" +319 77 optimizer """adam""" +319 77 training_loop """owa""" +319 77 negative_sampler """basic""" +319 77 evaluator """rankbased""" +319 78 dataset """kinships""" +319 78 model """hole""" +319 78 loss """softplus""" +319 78 regularizer """no""" +319 78 optimizer """adam""" +319 78 training_loop """owa""" +319 78 negative_sampler """basic""" +319 78 evaluator """rankbased""" +319 79 dataset """kinships""" +319 79 model """hole""" +319 79 loss """softplus""" +319 79 regularizer """no""" +319 79 optimizer """adam""" +319 79 training_loop """owa""" +319 79 negative_sampler """basic""" +319 79 evaluator """rankbased""" +319 80 dataset """kinships""" +319 80 model """hole""" +319 80 loss """softplus""" +319 80 regularizer """no""" +319 80 optimizer """adam""" +319 80 training_loop """owa""" +319 80 negative_sampler """basic""" +319 80 evaluator """rankbased""" +319 81 dataset """kinships""" +319 81 model """hole""" +319 81 loss """softplus""" +319 81 regularizer """no""" +319 81 optimizer """adam""" +319 81 training_loop """owa""" +319 81 negative_sampler """basic""" +319 81 evaluator """rankbased""" +319 82 dataset """kinships""" +319 82 model """hole""" +319 82 loss """softplus""" +319 82 regularizer """no""" +319 82 optimizer """adam""" +319 82 training_loop """owa""" +319 82 negative_sampler """basic""" +319 82 evaluator """rankbased""" +319 83 dataset """kinships""" +319 83 model """hole""" +319 83 loss """softplus""" +319 83 regularizer """no""" +319 83 optimizer """adam""" +319 83 training_loop """owa""" +319 83 negative_sampler """basic""" +319 83 evaluator """rankbased""" +319 84 dataset """kinships""" +319 84 model """hole""" +319 84 loss """softplus""" +319 84 regularizer """no""" +319 84 optimizer """adam""" +319 84 training_loop """owa""" +319 84 negative_sampler """basic""" +319 84 evaluator """rankbased""" +319 85 dataset """kinships""" +319 85 model """hole""" +319 85 loss """softplus""" +319 85 regularizer """no""" +319 85 optimizer """adam""" +319 85 training_loop """owa""" +319 85 negative_sampler """basic""" +319 85 evaluator """rankbased""" +319 86 dataset """kinships""" +319 86 model """hole""" +319 86 loss """softplus""" +319 86 regularizer """no""" +319 86 optimizer """adam""" +319 86 training_loop """owa""" +319 86 negative_sampler """basic""" +319 86 evaluator """rankbased""" +319 87 dataset """kinships""" +319 87 model """hole""" +319 87 loss """softplus""" +319 87 regularizer """no""" +319 87 optimizer """adam""" +319 87 training_loop """owa""" +319 87 negative_sampler """basic""" +319 87 evaluator """rankbased""" +319 88 dataset """kinships""" +319 88 model """hole""" +319 88 loss """softplus""" +319 88 regularizer """no""" +319 88 optimizer """adam""" +319 88 training_loop """owa""" +319 88 negative_sampler """basic""" +319 88 evaluator """rankbased""" +319 89 dataset """kinships""" +319 89 model """hole""" +319 89 loss """softplus""" +319 89 regularizer """no""" +319 89 optimizer """adam""" +319 89 training_loop """owa""" +319 89 negative_sampler """basic""" +319 89 evaluator """rankbased""" +319 90 dataset """kinships""" +319 90 model """hole""" +319 90 loss """softplus""" +319 90 regularizer """no""" +319 90 optimizer """adam""" +319 90 training_loop """owa""" +319 90 negative_sampler """basic""" +319 90 evaluator """rankbased""" +319 91 dataset """kinships""" +319 91 model """hole""" +319 91 loss """softplus""" +319 91 regularizer """no""" +319 91 optimizer """adam""" +319 91 training_loop """owa""" +319 91 negative_sampler """basic""" +319 91 evaluator """rankbased""" +319 92 dataset """kinships""" +319 92 model """hole""" +319 92 loss """softplus""" +319 92 regularizer """no""" +319 92 optimizer """adam""" +319 92 training_loop """owa""" +319 92 negative_sampler """basic""" +319 92 evaluator """rankbased""" +319 93 dataset """kinships""" +319 93 model """hole""" +319 93 loss """softplus""" +319 93 regularizer """no""" +319 93 optimizer """adam""" +319 93 training_loop """owa""" +319 93 negative_sampler """basic""" +319 93 evaluator """rankbased""" +319 94 dataset """kinships""" +319 94 model """hole""" +319 94 loss """softplus""" +319 94 regularizer """no""" +319 94 optimizer """adam""" +319 94 training_loop """owa""" +319 94 negative_sampler """basic""" +319 94 evaluator """rankbased""" +319 95 dataset """kinships""" +319 95 model """hole""" +319 95 loss """softplus""" +319 95 regularizer """no""" +319 95 optimizer """adam""" +319 95 training_loop """owa""" +319 95 negative_sampler """basic""" +319 95 evaluator """rankbased""" +319 96 dataset """kinships""" +319 96 model """hole""" +319 96 loss """softplus""" +319 96 regularizer """no""" +319 96 optimizer """adam""" +319 96 training_loop """owa""" +319 96 negative_sampler """basic""" +319 96 evaluator """rankbased""" +319 97 dataset """kinships""" +319 97 model """hole""" +319 97 loss """softplus""" +319 97 regularizer """no""" +319 97 optimizer """adam""" +319 97 training_loop """owa""" +319 97 negative_sampler """basic""" +319 97 evaluator """rankbased""" +319 98 dataset """kinships""" +319 98 model """hole""" +319 98 loss """softplus""" +319 98 regularizer """no""" +319 98 optimizer """adam""" +319 98 training_loop """owa""" +319 98 negative_sampler """basic""" +319 98 evaluator """rankbased""" +319 99 dataset """kinships""" +319 99 model """hole""" +319 99 loss """softplus""" +319 99 regularizer """no""" +319 99 optimizer """adam""" +319 99 training_loop """owa""" +319 99 negative_sampler """basic""" +319 99 evaluator """rankbased""" +319 100 dataset """kinships""" +319 100 model """hole""" +319 100 loss """softplus""" +319 100 regularizer """no""" +319 100 optimizer """adam""" +319 100 training_loop """owa""" +319 100 negative_sampler """basic""" +319 100 evaluator """rankbased""" +320 1 model.embedding_dim 2.0 +320 1 optimizer.lr 0.023755275635385584 +320 1 negative_sampler.num_negs_per_pos 62.0 +320 1 training.batch_size 2.0 +320 2 model.embedding_dim 2.0 +320 2 optimizer.lr 0.01948218330793269 +320 2 negative_sampler.num_negs_per_pos 19.0 +320 2 training.batch_size 0.0 +320 3 model.embedding_dim 1.0 +320 3 optimizer.lr 0.013461419559095801 +320 3 negative_sampler.num_negs_per_pos 59.0 +320 3 training.batch_size 2.0 +320 4 model.embedding_dim 0.0 +320 4 optimizer.lr 0.02814460735809489 +320 4 negative_sampler.num_negs_per_pos 1.0 +320 4 training.batch_size 1.0 +320 5 model.embedding_dim 0.0 +320 5 optimizer.lr 0.07335805884178723 +320 5 negative_sampler.num_negs_per_pos 0.0 +320 5 training.batch_size 1.0 +320 6 model.embedding_dim 2.0 +320 6 optimizer.lr 0.005370425489734611 +320 6 negative_sampler.num_negs_per_pos 23.0 +320 6 training.batch_size 0.0 +320 7 model.embedding_dim 2.0 +320 7 optimizer.lr 0.0895046147786704 +320 7 negative_sampler.num_negs_per_pos 7.0 +320 7 training.batch_size 1.0 +320 8 model.embedding_dim 0.0 +320 8 optimizer.lr 0.0022112105102116064 +320 8 negative_sampler.num_negs_per_pos 6.0 +320 8 training.batch_size 1.0 +320 9 model.embedding_dim 0.0 +320 9 optimizer.lr 0.04118294031599887 +320 9 negative_sampler.num_negs_per_pos 36.0 +320 9 training.batch_size 0.0 +320 10 model.embedding_dim 0.0 +320 10 optimizer.lr 0.0024839701948784192 +320 10 negative_sampler.num_negs_per_pos 35.0 +320 10 training.batch_size 2.0 +320 11 model.embedding_dim 1.0 +320 11 optimizer.lr 0.001088302988134532 +320 11 negative_sampler.num_negs_per_pos 59.0 +320 11 training.batch_size 0.0 +320 12 model.embedding_dim 0.0 +320 12 optimizer.lr 0.019278406936373633 +320 12 negative_sampler.num_negs_per_pos 95.0 +320 12 training.batch_size 1.0 +320 13 model.embedding_dim 1.0 +320 13 optimizer.lr 0.002208541553478843 +320 13 negative_sampler.num_negs_per_pos 81.0 +320 13 training.batch_size 0.0 +320 14 model.embedding_dim 2.0 +320 14 optimizer.lr 0.009981749861265338 +320 14 negative_sampler.num_negs_per_pos 16.0 +320 14 training.batch_size 1.0 +320 15 model.embedding_dim 2.0 +320 15 optimizer.lr 0.01126266303485747 +320 15 negative_sampler.num_negs_per_pos 75.0 +320 15 training.batch_size 1.0 +320 16 model.embedding_dim 0.0 +320 16 optimizer.lr 0.027298870536815924 +320 16 negative_sampler.num_negs_per_pos 28.0 +320 16 training.batch_size 0.0 +320 17 model.embedding_dim 0.0 +320 17 optimizer.lr 0.004033903775345477 +320 17 negative_sampler.num_negs_per_pos 6.0 +320 17 training.batch_size 2.0 +320 18 model.embedding_dim 2.0 +320 18 optimizer.lr 0.09347038217657493 +320 18 negative_sampler.num_negs_per_pos 72.0 +320 18 training.batch_size 0.0 +320 19 model.embedding_dim 0.0 +320 19 optimizer.lr 0.021784263425719402 +320 19 negative_sampler.num_negs_per_pos 88.0 +320 19 training.batch_size 0.0 +320 20 model.embedding_dim 1.0 +320 20 optimizer.lr 0.002687512278671736 +320 20 negative_sampler.num_negs_per_pos 22.0 +320 20 training.batch_size 1.0 +320 21 model.embedding_dim 0.0 +320 21 optimizer.lr 0.0018352295403106923 +320 21 negative_sampler.num_negs_per_pos 63.0 +320 21 training.batch_size 1.0 +320 22 model.embedding_dim 0.0 +320 22 optimizer.lr 0.030963969490798802 +320 22 negative_sampler.num_negs_per_pos 97.0 +320 22 training.batch_size 1.0 +320 23 model.embedding_dim 1.0 +320 23 optimizer.lr 0.0015501521836690198 +320 23 negative_sampler.num_negs_per_pos 97.0 +320 23 training.batch_size 1.0 +320 24 model.embedding_dim 0.0 +320 24 optimizer.lr 0.09156215835082347 +320 24 negative_sampler.num_negs_per_pos 62.0 +320 24 training.batch_size 2.0 +320 25 model.embedding_dim 2.0 +320 25 optimizer.lr 0.011313777685295935 +320 25 negative_sampler.num_negs_per_pos 7.0 +320 25 training.batch_size 0.0 +320 26 model.embedding_dim 0.0 +320 26 optimizer.lr 0.01390166229414616 +320 26 negative_sampler.num_negs_per_pos 26.0 +320 26 training.batch_size 0.0 +320 27 model.embedding_dim 0.0 +320 27 optimizer.lr 0.005859598945613773 +320 27 negative_sampler.num_negs_per_pos 98.0 +320 27 training.batch_size 2.0 +320 28 model.embedding_dim 2.0 +320 28 optimizer.lr 0.012586565698855547 +320 28 negative_sampler.num_negs_per_pos 40.0 +320 28 training.batch_size 2.0 +320 29 model.embedding_dim 2.0 +320 29 optimizer.lr 0.002381660229578034 +320 29 negative_sampler.num_negs_per_pos 34.0 +320 29 training.batch_size 0.0 +320 30 model.embedding_dim 0.0 +320 30 optimizer.lr 0.07480280935810044 +320 30 negative_sampler.num_negs_per_pos 68.0 +320 30 training.batch_size 1.0 +320 31 model.embedding_dim 1.0 +320 31 optimizer.lr 0.002359219689375189 +320 31 negative_sampler.num_negs_per_pos 76.0 +320 31 training.batch_size 2.0 +320 32 model.embedding_dim 1.0 +320 32 optimizer.lr 0.0013158337525605716 +320 32 negative_sampler.num_negs_per_pos 4.0 +320 32 training.batch_size 1.0 +320 33 model.embedding_dim 1.0 +320 33 optimizer.lr 0.005534598380556846 +320 33 negative_sampler.num_negs_per_pos 21.0 +320 33 training.batch_size 1.0 +320 34 model.embedding_dim 0.0 +320 34 optimizer.lr 0.01172369244411496 +320 34 negative_sampler.num_negs_per_pos 24.0 +320 34 training.batch_size 1.0 +320 35 model.embedding_dim 1.0 +320 35 optimizer.lr 0.00680103698325626 +320 35 negative_sampler.num_negs_per_pos 41.0 +320 35 training.batch_size 1.0 +320 36 model.embedding_dim 1.0 +320 36 optimizer.lr 0.06305589847299248 +320 36 negative_sampler.num_negs_per_pos 12.0 +320 36 training.batch_size 0.0 +320 37 model.embedding_dim 2.0 +320 37 optimizer.lr 0.006997637423716089 +320 37 negative_sampler.num_negs_per_pos 32.0 +320 37 training.batch_size 1.0 +320 38 model.embedding_dim 0.0 +320 38 optimizer.lr 0.002306294890549247 +320 38 negative_sampler.num_negs_per_pos 36.0 +320 38 training.batch_size 2.0 +320 39 model.embedding_dim 0.0 +320 39 optimizer.lr 0.0192749941648947 +320 39 negative_sampler.num_negs_per_pos 13.0 +320 39 training.batch_size 2.0 +320 40 model.embedding_dim 1.0 +320 40 optimizer.lr 0.008861574117485527 +320 40 negative_sampler.num_negs_per_pos 43.0 +320 40 training.batch_size 1.0 +320 41 model.embedding_dim 0.0 +320 41 optimizer.lr 0.009815629304536133 +320 41 negative_sampler.num_negs_per_pos 3.0 +320 41 training.batch_size 1.0 +320 42 model.embedding_dim 1.0 +320 42 optimizer.lr 0.07112498837161606 +320 42 negative_sampler.num_negs_per_pos 51.0 +320 42 training.batch_size 1.0 +320 43 model.embedding_dim 0.0 +320 43 optimizer.lr 0.008030952739867422 +320 43 negative_sampler.num_negs_per_pos 94.0 +320 43 training.batch_size 0.0 +320 44 model.embedding_dim 1.0 +320 44 optimizer.lr 0.06985337508268268 +320 44 negative_sampler.num_negs_per_pos 20.0 +320 44 training.batch_size 1.0 +320 45 model.embedding_dim 2.0 +320 45 optimizer.lr 0.0303387114810345 +320 45 negative_sampler.num_negs_per_pos 98.0 +320 45 training.batch_size 1.0 +320 46 model.embedding_dim 2.0 +320 46 optimizer.lr 0.02304715185940602 +320 46 negative_sampler.num_negs_per_pos 57.0 +320 46 training.batch_size 2.0 +320 47 model.embedding_dim 0.0 +320 47 optimizer.lr 0.08273136101490351 +320 47 negative_sampler.num_negs_per_pos 1.0 +320 47 training.batch_size 2.0 +320 48 model.embedding_dim 1.0 +320 48 optimizer.lr 0.001116937664364081 +320 48 negative_sampler.num_negs_per_pos 24.0 +320 48 training.batch_size 1.0 +320 49 model.embedding_dim 1.0 +320 49 optimizer.lr 0.04312580017890479 +320 49 negative_sampler.num_negs_per_pos 48.0 +320 49 training.batch_size 2.0 +320 50 model.embedding_dim 1.0 +320 50 optimizer.lr 0.007920955021253156 +320 50 negative_sampler.num_negs_per_pos 78.0 +320 50 training.batch_size 0.0 +320 51 model.embedding_dim 0.0 +320 51 optimizer.lr 0.0013810780062639579 +320 51 negative_sampler.num_negs_per_pos 50.0 +320 51 training.batch_size 1.0 +320 52 model.embedding_dim 0.0 +320 52 optimizer.lr 0.019164607577145557 +320 52 negative_sampler.num_negs_per_pos 20.0 +320 52 training.batch_size 1.0 +320 53 model.embedding_dim 1.0 +320 53 optimizer.lr 0.004947917771046533 +320 53 negative_sampler.num_negs_per_pos 53.0 +320 53 training.batch_size 0.0 +320 54 model.embedding_dim 0.0 +320 54 optimizer.lr 0.0045346050679876794 +320 54 negative_sampler.num_negs_per_pos 45.0 +320 54 training.batch_size 1.0 +320 55 model.embedding_dim 1.0 +320 55 optimizer.lr 0.007170555685113202 +320 55 negative_sampler.num_negs_per_pos 89.0 +320 55 training.batch_size 2.0 +320 56 model.embedding_dim 1.0 +320 56 optimizer.lr 0.05750593831088282 +320 56 negative_sampler.num_negs_per_pos 31.0 +320 56 training.batch_size 2.0 +320 57 model.embedding_dim 0.0 +320 57 optimizer.lr 0.05695007977069503 +320 57 negative_sampler.num_negs_per_pos 71.0 +320 57 training.batch_size 2.0 +320 58 model.embedding_dim 2.0 +320 58 optimizer.lr 0.02751091718018492 +320 58 negative_sampler.num_negs_per_pos 18.0 +320 58 training.batch_size 1.0 +320 59 model.embedding_dim 0.0 +320 59 optimizer.lr 0.0077502692449144235 +320 59 negative_sampler.num_negs_per_pos 79.0 +320 59 training.batch_size 1.0 +320 60 model.embedding_dim 1.0 +320 60 optimizer.lr 0.05896244122587268 +320 60 negative_sampler.num_negs_per_pos 55.0 +320 60 training.batch_size 0.0 +320 61 model.embedding_dim 0.0 +320 61 optimizer.lr 0.06217800482060002 +320 61 negative_sampler.num_negs_per_pos 60.0 +320 61 training.batch_size 0.0 +320 62 model.embedding_dim 1.0 +320 62 optimizer.lr 0.001331631261512199 +320 62 negative_sampler.num_negs_per_pos 40.0 +320 62 training.batch_size 1.0 +320 63 model.embedding_dim 1.0 +320 63 optimizer.lr 0.02636839003257996 +320 63 negative_sampler.num_negs_per_pos 26.0 +320 63 training.batch_size 2.0 +320 64 model.embedding_dim 1.0 +320 64 optimizer.lr 0.016979956488315528 +320 64 negative_sampler.num_negs_per_pos 9.0 +320 64 training.batch_size 2.0 +320 65 model.embedding_dim 0.0 +320 65 optimizer.lr 0.01918889880909252 +320 65 negative_sampler.num_negs_per_pos 61.0 +320 65 training.batch_size 0.0 +320 66 model.embedding_dim 1.0 +320 66 optimizer.lr 0.09181433063931375 +320 66 negative_sampler.num_negs_per_pos 29.0 +320 66 training.batch_size 0.0 +320 67 model.embedding_dim 0.0 +320 67 optimizer.lr 0.006640648137724582 +320 67 negative_sampler.num_negs_per_pos 62.0 +320 67 training.batch_size 0.0 +320 68 model.embedding_dim 0.0 +320 68 optimizer.lr 0.001860698745771461 +320 68 negative_sampler.num_negs_per_pos 78.0 +320 68 training.batch_size 1.0 +320 69 model.embedding_dim 1.0 +320 69 optimizer.lr 0.0029422188749796993 +320 69 negative_sampler.num_negs_per_pos 51.0 +320 69 training.batch_size 2.0 +320 70 model.embedding_dim 2.0 +320 70 optimizer.lr 0.0025048765694307053 +320 70 negative_sampler.num_negs_per_pos 10.0 +320 70 training.batch_size 0.0 +320 71 model.embedding_dim 0.0 +320 71 optimizer.lr 0.010032614623200398 +320 71 negative_sampler.num_negs_per_pos 56.0 +320 71 training.batch_size 2.0 +320 72 model.embedding_dim 2.0 +320 72 optimizer.lr 0.092030924820419 +320 72 negative_sampler.num_negs_per_pos 2.0 +320 72 training.batch_size 1.0 +320 73 model.embedding_dim 2.0 +320 73 optimizer.lr 0.0020109670770633364 +320 73 negative_sampler.num_negs_per_pos 84.0 +320 73 training.batch_size 0.0 +320 74 model.embedding_dim 2.0 +320 74 optimizer.lr 0.0040201185970978525 +320 74 negative_sampler.num_negs_per_pos 94.0 +320 74 training.batch_size 2.0 +320 75 model.embedding_dim 2.0 +320 75 optimizer.lr 0.0033713512751743046 +320 75 negative_sampler.num_negs_per_pos 84.0 +320 75 training.batch_size 2.0 +320 76 model.embedding_dim 0.0 +320 76 optimizer.lr 0.05882418276178101 +320 76 negative_sampler.num_negs_per_pos 38.0 +320 76 training.batch_size 2.0 +320 77 model.embedding_dim 2.0 +320 77 optimizer.lr 0.012926650985449128 +320 77 negative_sampler.num_negs_per_pos 22.0 +320 77 training.batch_size 2.0 +320 78 model.embedding_dim 2.0 +320 78 optimizer.lr 0.003737069517130978 +320 78 negative_sampler.num_negs_per_pos 32.0 +320 78 training.batch_size 2.0 +320 79 model.embedding_dim 1.0 +320 79 optimizer.lr 0.0017638924480273188 +320 79 negative_sampler.num_negs_per_pos 61.0 +320 79 training.batch_size 0.0 +320 80 model.embedding_dim 1.0 +320 80 optimizer.lr 0.06265687805082391 +320 80 negative_sampler.num_negs_per_pos 85.0 +320 80 training.batch_size 2.0 +320 81 model.embedding_dim 2.0 +320 81 optimizer.lr 0.02937486638413246 +320 81 negative_sampler.num_negs_per_pos 33.0 +320 81 training.batch_size 2.0 +320 82 model.embedding_dim 1.0 +320 82 optimizer.lr 0.05007682003520231 +320 82 negative_sampler.num_negs_per_pos 94.0 +320 82 training.batch_size 1.0 +320 83 model.embedding_dim 0.0 +320 83 optimizer.lr 0.012768102938816958 +320 83 negative_sampler.num_negs_per_pos 43.0 +320 83 training.batch_size 0.0 +320 84 model.embedding_dim 2.0 +320 84 optimizer.lr 0.005484355921017607 +320 84 negative_sampler.num_negs_per_pos 82.0 +320 84 training.batch_size 2.0 +320 85 model.embedding_dim 2.0 +320 85 optimizer.lr 0.0055356618580840525 +320 85 negative_sampler.num_negs_per_pos 62.0 +320 85 training.batch_size 2.0 +320 86 model.embedding_dim 0.0 +320 86 optimizer.lr 0.0015575298416456544 +320 86 negative_sampler.num_negs_per_pos 27.0 +320 86 training.batch_size 1.0 +320 87 model.embedding_dim 2.0 +320 87 optimizer.lr 0.0010748537486975959 +320 87 negative_sampler.num_negs_per_pos 13.0 +320 87 training.batch_size 1.0 +320 88 model.embedding_dim 1.0 +320 88 optimizer.lr 0.05909126479923148 +320 88 negative_sampler.num_negs_per_pos 61.0 +320 88 training.batch_size 2.0 +320 89 model.embedding_dim 0.0 +320 89 optimizer.lr 0.005422658453027775 +320 89 negative_sampler.num_negs_per_pos 1.0 +320 89 training.batch_size 1.0 +320 90 model.embedding_dim 0.0 +320 90 optimizer.lr 0.0034278830415185252 +320 90 negative_sampler.num_negs_per_pos 66.0 +320 90 training.batch_size 2.0 +320 91 model.embedding_dim 2.0 +320 91 optimizer.lr 0.006098600175169456 +320 91 negative_sampler.num_negs_per_pos 34.0 +320 91 training.batch_size 0.0 +320 92 model.embedding_dim 0.0 +320 92 optimizer.lr 0.04797230518532274 +320 92 negative_sampler.num_negs_per_pos 72.0 +320 92 training.batch_size 1.0 +320 93 model.embedding_dim 2.0 +320 93 optimizer.lr 0.016901975628353293 +320 93 negative_sampler.num_negs_per_pos 46.0 +320 93 training.batch_size 1.0 +320 94 model.embedding_dim 2.0 +320 94 optimizer.lr 0.02386243097723434 +320 94 negative_sampler.num_negs_per_pos 67.0 +320 94 training.batch_size 2.0 +320 95 model.embedding_dim 1.0 +320 95 optimizer.lr 0.029633900366930387 +320 95 negative_sampler.num_negs_per_pos 90.0 +320 95 training.batch_size 0.0 +320 96 model.embedding_dim 0.0 +320 96 optimizer.lr 0.0023979116344161806 +320 96 negative_sampler.num_negs_per_pos 47.0 +320 96 training.batch_size 1.0 +320 97 model.embedding_dim 2.0 +320 97 optimizer.lr 0.09101141210438571 +320 97 negative_sampler.num_negs_per_pos 32.0 +320 97 training.batch_size 0.0 +320 98 model.embedding_dim 1.0 +320 98 optimizer.lr 0.003837741997489426 +320 98 negative_sampler.num_negs_per_pos 89.0 +320 98 training.batch_size 1.0 +320 99 model.embedding_dim 1.0 +320 99 optimizer.lr 0.001776570344525159 +320 99 negative_sampler.num_negs_per_pos 14.0 +320 99 training.batch_size 0.0 +320 100 model.embedding_dim 0.0 +320 100 optimizer.lr 0.003052579250672331 +320 100 negative_sampler.num_negs_per_pos 59.0 +320 100 training.batch_size 1.0 +320 1 dataset """kinships""" +320 1 model """hole""" +320 1 loss """bceaftersigmoid""" +320 1 regularizer """no""" +320 1 optimizer """adam""" +320 1 training_loop """owa""" +320 1 negative_sampler """basic""" +320 1 evaluator """rankbased""" +320 2 dataset """kinships""" +320 2 model """hole""" +320 2 loss """bceaftersigmoid""" +320 2 regularizer """no""" +320 2 optimizer """adam""" +320 2 training_loop """owa""" +320 2 negative_sampler """basic""" +320 2 evaluator """rankbased""" +320 3 dataset """kinships""" +320 3 model """hole""" +320 3 loss """bceaftersigmoid""" +320 3 regularizer """no""" +320 3 optimizer """adam""" +320 3 training_loop """owa""" +320 3 negative_sampler """basic""" +320 3 evaluator """rankbased""" +320 4 dataset """kinships""" +320 4 model """hole""" +320 4 loss """bceaftersigmoid""" +320 4 regularizer """no""" +320 4 optimizer """adam""" +320 4 training_loop """owa""" +320 4 negative_sampler """basic""" +320 4 evaluator """rankbased""" +320 5 dataset """kinships""" +320 5 model """hole""" +320 5 loss """bceaftersigmoid""" +320 5 regularizer """no""" +320 5 optimizer """adam""" +320 5 training_loop """owa""" +320 5 negative_sampler """basic""" +320 5 evaluator """rankbased""" +320 6 dataset """kinships""" +320 6 model """hole""" +320 6 loss """bceaftersigmoid""" +320 6 regularizer """no""" +320 6 optimizer """adam""" +320 6 training_loop """owa""" +320 6 negative_sampler """basic""" +320 6 evaluator """rankbased""" +320 7 dataset """kinships""" +320 7 model """hole""" +320 7 loss """bceaftersigmoid""" +320 7 regularizer """no""" +320 7 optimizer """adam""" +320 7 training_loop """owa""" +320 7 negative_sampler """basic""" +320 7 evaluator """rankbased""" +320 8 dataset """kinships""" +320 8 model """hole""" +320 8 loss """bceaftersigmoid""" +320 8 regularizer """no""" +320 8 optimizer """adam""" +320 8 training_loop """owa""" +320 8 negative_sampler """basic""" +320 8 evaluator """rankbased""" +320 9 dataset """kinships""" +320 9 model """hole""" +320 9 loss """bceaftersigmoid""" +320 9 regularizer """no""" +320 9 optimizer """adam""" +320 9 training_loop """owa""" +320 9 negative_sampler """basic""" +320 9 evaluator """rankbased""" +320 10 dataset """kinships""" +320 10 model """hole""" +320 10 loss """bceaftersigmoid""" +320 10 regularizer """no""" +320 10 optimizer """adam""" +320 10 training_loop """owa""" +320 10 negative_sampler """basic""" +320 10 evaluator """rankbased""" +320 11 dataset """kinships""" +320 11 model """hole""" +320 11 loss """bceaftersigmoid""" +320 11 regularizer """no""" +320 11 optimizer """adam""" +320 11 training_loop """owa""" +320 11 negative_sampler """basic""" +320 11 evaluator """rankbased""" +320 12 dataset """kinships""" +320 12 model """hole""" +320 12 loss """bceaftersigmoid""" +320 12 regularizer """no""" +320 12 optimizer """adam""" +320 12 training_loop """owa""" +320 12 negative_sampler """basic""" +320 12 evaluator """rankbased""" +320 13 dataset """kinships""" +320 13 model """hole""" +320 13 loss """bceaftersigmoid""" +320 13 regularizer """no""" +320 13 optimizer """adam""" +320 13 training_loop """owa""" +320 13 negative_sampler """basic""" +320 13 evaluator """rankbased""" +320 14 dataset """kinships""" +320 14 model """hole""" +320 14 loss """bceaftersigmoid""" +320 14 regularizer """no""" +320 14 optimizer """adam""" +320 14 training_loop """owa""" +320 14 negative_sampler """basic""" +320 14 evaluator """rankbased""" +320 15 dataset """kinships""" +320 15 model """hole""" +320 15 loss """bceaftersigmoid""" +320 15 regularizer """no""" +320 15 optimizer """adam""" +320 15 training_loop """owa""" +320 15 negative_sampler """basic""" +320 15 evaluator """rankbased""" +320 16 dataset """kinships""" +320 16 model """hole""" +320 16 loss """bceaftersigmoid""" +320 16 regularizer """no""" +320 16 optimizer """adam""" +320 16 training_loop """owa""" +320 16 negative_sampler """basic""" +320 16 evaluator """rankbased""" +320 17 dataset """kinships""" +320 17 model """hole""" +320 17 loss """bceaftersigmoid""" +320 17 regularizer """no""" +320 17 optimizer """adam""" +320 17 training_loop """owa""" +320 17 negative_sampler """basic""" +320 17 evaluator """rankbased""" +320 18 dataset """kinships""" +320 18 model """hole""" +320 18 loss """bceaftersigmoid""" +320 18 regularizer """no""" +320 18 optimizer """adam""" +320 18 training_loop """owa""" +320 18 negative_sampler """basic""" +320 18 evaluator """rankbased""" +320 19 dataset """kinships""" +320 19 model """hole""" +320 19 loss """bceaftersigmoid""" +320 19 regularizer """no""" +320 19 optimizer """adam""" +320 19 training_loop """owa""" +320 19 negative_sampler """basic""" +320 19 evaluator """rankbased""" +320 20 dataset """kinships""" +320 20 model """hole""" +320 20 loss """bceaftersigmoid""" +320 20 regularizer """no""" +320 20 optimizer """adam""" +320 20 training_loop """owa""" +320 20 negative_sampler """basic""" +320 20 evaluator """rankbased""" +320 21 dataset """kinships""" +320 21 model """hole""" +320 21 loss """bceaftersigmoid""" +320 21 regularizer """no""" +320 21 optimizer """adam""" +320 21 training_loop """owa""" +320 21 negative_sampler """basic""" +320 21 evaluator """rankbased""" +320 22 dataset """kinships""" +320 22 model """hole""" +320 22 loss """bceaftersigmoid""" +320 22 regularizer """no""" +320 22 optimizer """adam""" +320 22 training_loop """owa""" +320 22 negative_sampler """basic""" +320 22 evaluator """rankbased""" +320 23 dataset """kinships""" +320 23 model """hole""" +320 23 loss """bceaftersigmoid""" +320 23 regularizer """no""" +320 23 optimizer """adam""" +320 23 training_loop """owa""" +320 23 negative_sampler """basic""" +320 23 evaluator """rankbased""" +320 24 dataset """kinships""" +320 24 model """hole""" +320 24 loss """bceaftersigmoid""" +320 24 regularizer """no""" +320 24 optimizer """adam""" +320 24 training_loop """owa""" +320 24 negative_sampler """basic""" +320 24 evaluator """rankbased""" +320 25 dataset """kinships""" +320 25 model """hole""" +320 25 loss """bceaftersigmoid""" +320 25 regularizer """no""" +320 25 optimizer """adam""" +320 25 training_loop """owa""" +320 25 negative_sampler """basic""" +320 25 evaluator """rankbased""" +320 26 dataset """kinships""" +320 26 model """hole""" +320 26 loss """bceaftersigmoid""" +320 26 regularizer """no""" +320 26 optimizer """adam""" +320 26 training_loop """owa""" +320 26 negative_sampler """basic""" +320 26 evaluator """rankbased""" +320 27 dataset """kinships""" +320 27 model """hole""" +320 27 loss """bceaftersigmoid""" +320 27 regularizer """no""" +320 27 optimizer """adam""" +320 27 training_loop """owa""" +320 27 negative_sampler """basic""" +320 27 evaluator """rankbased""" +320 28 dataset """kinships""" +320 28 model """hole""" +320 28 loss """bceaftersigmoid""" +320 28 regularizer """no""" +320 28 optimizer """adam""" +320 28 training_loop """owa""" +320 28 negative_sampler """basic""" +320 28 evaluator """rankbased""" +320 29 dataset """kinships""" +320 29 model """hole""" +320 29 loss """bceaftersigmoid""" +320 29 regularizer """no""" +320 29 optimizer """adam""" +320 29 training_loop """owa""" +320 29 negative_sampler """basic""" +320 29 evaluator """rankbased""" +320 30 dataset """kinships""" +320 30 model """hole""" +320 30 loss """bceaftersigmoid""" +320 30 regularizer """no""" +320 30 optimizer """adam""" +320 30 training_loop """owa""" +320 30 negative_sampler """basic""" +320 30 evaluator """rankbased""" +320 31 dataset """kinships""" +320 31 model """hole""" +320 31 loss """bceaftersigmoid""" +320 31 regularizer """no""" +320 31 optimizer """adam""" +320 31 training_loop """owa""" +320 31 negative_sampler """basic""" +320 31 evaluator """rankbased""" +320 32 dataset """kinships""" +320 32 model """hole""" +320 32 loss """bceaftersigmoid""" +320 32 regularizer """no""" +320 32 optimizer """adam""" +320 32 training_loop """owa""" +320 32 negative_sampler """basic""" +320 32 evaluator """rankbased""" +320 33 dataset """kinships""" +320 33 model """hole""" +320 33 loss """bceaftersigmoid""" +320 33 regularizer """no""" +320 33 optimizer """adam""" +320 33 training_loop """owa""" +320 33 negative_sampler """basic""" +320 33 evaluator """rankbased""" +320 34 dataset """kinships""" +320 34 model """hole""" +320 34 loss """bceaftersigmoid""" +320 34 regularizer """no""" +320 34 optimizer """adam""" +320 34 training_loop """owa""" +320 34 negative_sampler """basic""" +320 34 evaluator """rankbased""" +320 35 dataset """kinships""" +320 35 model """hole""" +320 35 loss """bceaftersigmoid""" +320 35 regularizer """no""" +320 35 optimizer """adam""" +320 35 training_loop """owa""" +320 35 negative_sampler """basic""" +320 35 evaluator """rankbased""" +320 36 dataset """kinships""" +320 36 model """hole""" +320 36 loss """bceaftersigmoid""" +320 36 regularizer """no""" +320 36 optimizer """adam""" +320 36 training_loop """owa""" +320 36 negative_sampler """basic""" +320 36 evaluator """rankbased""" +320 37 dataset """kinships""" +320 37 model """hole""" +320 37 loss """bceaftersigmoid""" +320 37 regularizer """no""" +320 37 optimizer """adam""" +320 37 training_loop """owa""" +320 37 negative_sampler """basic""" +320 37 evaluator """rankbased""" +320 38 dataset """kinships""" +320 38 model """hole""" +320 38 loss """bceaftersigmoid""" +320 38 regularizer """no""" +320 38 optimizer """adam""" +320 38 training_loop """owa""" +320 38 negative_sampler """basic""" +320 38 evaluator """rankbased""" +320 39 dataset """kinships""" +320 39 model """hole""" +320 39 loss """bceaftersigmoid""" +320 39 regularizer """no""" +320 39 optimizer """adam""" +320 39 training_loop """owa""" +320 39 negative_sampler """basic""" +320 39 evaluator """rankbased""" +320 40 dataset """kinships""" +320 40 model """hole""" +320 40 loss """bceaftersigmoid""" +320 40 regularizer """no""" +320 40 optimizer """adam""" +320 40 training_loop """owa""" +320 40 negative_sampler """basic""" +320 40 evaluator """rankbased""" +320 41 dataset """kinships""" +320 41 model """hole""" +320 41 loss """bceaftersigmoid""" +320 41 regularizer """no""" +320 41 optimizer """adam""" +320 41 training_loop """owa""" +320 41 negative_sampler """basic""" +320 41 evaluator """rankbased""" +320 42 dataset """kinships""" +320 42 model """hole""" +320 42 loss """bceaftersigmoid""" +320 42 regularizer """no""" +320 42 optimizer """adam""" +320 42 training_loop """owa""" +320 42 negative_sampler """basic""" +320 42 evaluator """rankbased""" +320 43 dataset """kinships""" +320 43 model """hole""" +320 43 loss """bceaftersigmoid""" +320 43 regularizer """no""" +320 43 optimizer """adam""" +320 43 training_loop """owa""" +320 43 negative_sampler """basic""" +320 43 evaluator """rankbased""" +320 44 dataset """kinships""" +320 44 model """hole""" +320 44 loss """bceaftersigmoid""" +320 44 regularizer """no""" +320 44 optimizer """adam""" +320 44 training_loop """owa""" +320 44 negative_sampler """basic""" +320 44 evaluator """rankbased""" +320 45 dataset """kinships""" +320 45 model """hole""" +320 45 loss """bceaftersigmoid""" +320 45 regularizer """no""" +320 45 optimizer """adam""" +320 45 training_loop """owa""" +320 45 negative_sampler """basic""" +320 45 evaluator """rankbased""" +320 46 dataset """kinships""" +320 46 model """hole""" +320 46 loss """bceaftersigmoid""" +320 46 regularizer """no""" +320 46 optimizer """adam""" +320 46 training_loop """owa""" +320 46 negative_sampler """basic""" +320 46 evaluator """rankbased""" +320 47 dataset """kinships""" +320 47 model """hole""" +320 47 loss """bceaftersigmoid""" +320 47 regularizer """no""" +320 47 optimizer """adam""" +320 47 training_loop """owa""" +320 47 negative_sampler """basic""" +320 47 evaluator """rankbased""" +320 48 dataset """kinships""" +320 48 model """hole""" +320 48 loss """bceaftersigmoid""" +320 48 regularizer """no""" +320 48 optimizer """adam""" +320 48 training_loop """owa""" +320 48 negative_sampler """basic""" +320 48 evaluator """rankbased""" +320 49 dataset """kinships""" +320 49 model """hole""" +320 49 loss """bceaftersigmoid""" +320 49 regularizer """no""" +320 49 optimizer """adam""" +320 49 training_loop """owa""" +320 49 negative_sampler """basic""" +320 49 evaluator """rankbased""" +320 50 dataset """kinships""" +320 50 model """hole""" +320 50 loss """bceaftersigmoid""" +320 50 regularizer """no""" +320 50 optimizer """adam""" +320 50 training_loop """owa""" +320 50 negative_sampler """basic""" +320 50 evaluator """rankbased""" +320 51 dataset """kinships""" +320 51 model """hole""" +320 51 loss """bceaftersigmoid""" +320 51 regularizer """no""" +320 51 optimizer """adam""" +320 51 training_loop """owa""" +320 51 negative_sampler """basic""" +320 51 evaluator """rankbased""" +320 52 dataset """kinships""" +320 52 model """hole""" +320 52 loss """bceaftersigmoid""" +320 52 regularizer """no""" +320 52 optimizer """adam""" +320 52 training_loop """owa""" +320 52 negative_sampler """basic""" +320 52 evaluator """rankbased""" +320 53 dataset """kinships""" +320 53 model """hole""" +320 53 loss """bceaftersigmoid""" +320 53 regularizer """no""" +320 53 optimizer """adam""" +320 53 training_loop """owa""" +320 53 negative_sampler """basic""" +320 53 evaluator """rankbased""" +320 54 dataset """kinships""" +320 54 model """hole""" +320 54 loss """bceaftersigmoid""" +320 54 regularizer """no""" +320 54 optimizer """adam""" +320 54 training_loop """owa""" +320 54 negative_sampler """basic""" +320 54 evaluator """rankbased""" +320 55 dataset """kinships""" +320 55 model """hole""" +320 55 loss """bceaftersigmoid""" +320 55 regularizer """no""" +320 55 optimizer """adam""" +320 55 training_loop """owa""" +320 55 negative_sampler """basic""" +320 55 evaluator """rankbased""" +320 56 dataset """kinships""" +320 56 model """hole""" +320 56 loss """bceaftersigmoid""" +320 56 regularizer """no""" +320 56 optimizer """adam""" +320 56 training_loop """owa""" +320 56 negative_sampler """basic""" +320 56 evaluator """rankbased""" +320 57 dataset """kinships""" +320 57 model """hole""" +320 57 loss """bceaftersigmoid""" +320 57 regularizer """no""" +320 57 optimizer """adam""" +320 57 training_loop """owa""" +320 57 negative_sampler """basic""" +320 57 evaluator """rankbased""" +320 58 dataset """kinships""" +320 58 model """hole""" +320 58 loss """bceaftersigmoid""" +320 58 regularizer """no""" +320 58 optimizer """adam""" +320 58 training_loop """owa""" +320 58 negative_sampler """basic""" +320 58 evaluator """rankbased""" +320 59 dataset """kinships""" +320 59 model """hole""" +320 59 loss """bceaftersigmoid""" +320 59 regularizer """no""" +320 59 optimizer """adam""" +320 59 training_loop """owa""" +320 59 negative_sampler """basic""" +320 59 evaluator """rankbased""" +320 60 dataset """kinships""" +320 60 model """hole""" +320 60 loss """bceaftersigmoid""" +320 60 regularizer """no""" +320 60 optimizer """adam""" +320 60 training_loop """owa""" +320 60 negative_sampler """basic""" +320 60 evaluator """rankbased""" +320 61 dataset """kinships""" +320 61 model """hole""" +320 61 loss """bceaftersigmoid""" +320 61 regularizer """no""" +320 61 optimizer """adam""" +320 61 training_loop """owa""" +320 61 negative_sampler """basic""" +320 61 evaluator """rankbased""" +320 62 dataset """kinships""" +320 62 model """hole""" +320 62 loss """bceaftersigmoid""" +320 62 regularizer """no""" +320 62 optimizer """adam""" +320 62 training_loop """owa""" +320 62 negative_sampler """basic""" +320 62 evaluator """rankbased""" +320 63 dataset """kinships""" +320 63 model """hole""" +320 63 loss """bceaftersigmoid""" +320 63 regularizer """no""" +320 63 optimizer """adam""" +320 63 training_loop """owa""" +320 63 negative_sampler """basic""" +320 63 evaluator """rankbased""" +320 64 dataset """kinships""" +320 64 model """hole""" +320 64 loss """bceaftersigmoid""" +320 64 regularizer """no""" +320 64 optimizer """adam""" +320 64 training_loop """owa""" +320 64 negative_sampler """basic""" +320 64 evaluator """rankbased""" +320 65 dataset """kinships""" +320 65 model """hole""" +320 65 loss """bceaftersigmoid""" +320 65 regularizer """no""" +320 65 optimizer """adam""" +320 65 training_loop """owa""" +320 65 negative_sampler """basic""" +320 65 evaluator """rankbased""" +320 66 dataset """kinships""" +320 66 model """hole""" +320 66 loss """bceaftersigmoid""" +320 66 regularizer """no""" +320 66 optimizer """adam""" +320 66 training_loop """owa""" +320 66 negative_sampler """basic""" +320 66 evaluator """rankbased""" +320 67 dataset """kinships""" +320 67 model """hole""" +320 67 loss """bceaftersigmoid""" +320 67 regularizer """no""" +320 67 optimizer """adam""" +320 67 training_loop """owa""" +320 67 negative_sampler """basic""" +320 67 evaluator """rankbased""" +320 68 dataset """kinships""" +320 68 model """hole""" +320 68 loss """bceaftersigmoid""" +320 68 regularizer """no""" +320 68 optimizer """adam""" +320 68 training_loop """owa""" +320 68 negative_sampler """basic""" +320 68 evaluator """rankbased""" +320 69 dataset """kinships""" +320 69 model """hole""" +320 69 loss """bceaftersigmoid""" +320 69 regularizer """no""" +320 69 optimizer """adam""" +320 69 training_loop """owa""" +320 69 negative_sampler """basic""" +320 69 evaluator """rankbased""" +320 70 dataset """kinships""" +320 70 model """hole""" +320 70 loss """bceaftersigmoid""" +320 70 regularizer """no""" +320 70 optimizer """adam""" +320 70 training_loop """owa""" +320 70 negative_sampler """basic""" +320 70 evaluator """rankbased""" +320 71 dataset """kinships""" +320 71 model """hole""" +320 71 loss """bceaftersigmoid""" +320 71 regularizer """no""" +320 71 optimizer """adam""" +320 71 training_loop """owa""" +320 71 negative_sampler """basic""" +320 71 evaluator """rankbased""" +320 72 dataset """kinships""" +320 72 model """hole""" +320 72 loss """bceaftersigmoid""" +320 72 regularizer """no""" +320 72 optimizer """adam""" +320 72 training_loop """owa""" +320 72 negative_sampler """basic""" +320 72 evaluator """rankbased""" +320 73 dataset """kinships""" +320 73 model """hole""" +320 73 loss """bceaftersigmoid""" +320 73 regularizer """no""" +320 73 optimizer """adam""" +320 73 training_loop """owa""" +320 73 negative_sampler """basic""" +320 73 evaluator """rankbased""" +320 74 dataset """kinships""" +320 74 model """hole""" +320 74 loss """bceaftersigmoid""" +320 74 regularizer """no""" +320 74 optimizer """adam""" +320 74 training_loop """owa""" +320 74 negative_sampler """basic""" +320 74 evaluator """rankbased""" +320 75 dataset """kinships""" +320 75 model """hole""" +320 75 loss """bceaftersigmoid""" +320 75 regularizer """no""" +320 75 optimizer """adam""" +320 75 training_loop """owa""" +320 75 negative_sampler """basic""" +320 75 evaluator """rankbased""" +320 76 dataset """kinships""" +320 76 model """hole""" +320 76 loss """bceaftersigmoid""" +320 76 regularizer """no""" +320 76 optimizer """adam""" +320 76 training_loop """owa""" +320 76 negative_sampler """basic""" +320 76 evaluator """rankbased""" +320 77 dataset """kinships""" +320 77 model """hole""" +320 77 loss """bceaftersigmoid""" +320 77 regularizer """no""" +320 77 optimizer """adam""" +320 77 training_loop """owa""" +320 77 negative_sampler """basic""" +320 77 evaluator """rankbased""" +320 78 dataset """kinships""" +320 78 model """hole""" +320 78 loss """bceaftersigmoid""" +320 78 regularizer """no""" +320 78 optimizer """adam""" +320 78 training_loop """owa""" +320 78 negative_sampler """basic""" +320 78 evaluator """rankbased""" +320 79 dataset """kinships""" +320 79 model """hole""" +320 79 loss """bceaftersigmoid""" +320 79 regularizer """no""" +320 79 optimizer """adam""" +320 79 training_loop """owa""" +320 79 negative_sampler """basic""" +320 79 evaluator """rankbased""" +320 80 dataset """kinships""" +320 80 model """hole""" +320 80 loss """bceaftersigmoid""" +320 80 regularizer """no""" +320 80 optimizer """adam""" +320 80 training_loop """owa""" +320 80 negative_sampler """basic""" +320 80 evaluator """rankbased""" +320 81 dataset """kinships""" +320 81 model """hole""" +320 81 loss """bceaftersigmoid""" +320 81 regularizer """no""" +320 81 optimizer """adam""" +320 81 training_loop """owa""" +320 81 negative_sampler """basic""" +320 81 evaluator """rankbased""" +320 82 dataset """kinships""" +320 82 model """hole""" +320 82 loss """bceaftersigmoid""" +320 82 regularizer """no""" +320 82 optimizer """adam""" +320 82 training_loop """owa""" +320 82 negative_sampler """basic""" +320 82 evaluator """rankbased""" +320 83 dataset """kinships""" +320 83 model """hole""" +320 83 loss """bceaftersigmoid""" +320 83 regularizer """no""" +320 83 optimizer """adam""" +320 83 training_loop """owa""" +320 83 negative_sampler """basic""" +320 83 evaluator """rankbased""" +320 84 dataset """kinships""" +320 84 model """hole""" +320 84 loss """bceaftersigmoid""" +320 84 regularizer """no""" +320 84 optimizer """adam""" +320 84 training_loop """owa""" +320 84 negative_sampler """basic""" +320 84 evaluator """rankbased""" +320 85 dataset """kinships""" +320 85 model """hole""" +320 85 loss """bceaftersigmoid""" +320 85 regularizer """no""" +320 85 optimizer """adam""" +320 85 training_loop """owa""" +320 85 negative_sampler """basic""" +320 85 evaluator """rankbased""" +320 86 dataset """kinships""" +320 86 model """hole""" +320 86 loss """bceaftersigmoid""" +320 86 regularizer """no""" +320 86 optimizer """adam""" +320 86 training_loop """owa""" +320 86 negative_sampler """basic""" +320 86 evaluator """rankbased""" +320 87 dataset """kinships""" +320 87 model """hole""" +320 87 loss """bceaftersigmoid""" +320 87 regularizer """no""" +320 87 optimizer """adam""" +320 87 training_loop """owa""" +320 87 negative_sampler """basic""" +320 87 evaluator """rankbased""" +320 88 dataset """kinships""" +320 88 model """hole""" +320 88 loss """bceaftersigmoid""" +320 88 regularizer """no""" +320 88 optimizer """adam""" +320 88 training_loop """owa""" +320 88 negative_sampler """basic""" +320 88 evaluator """rankbased""" +320 89 dataset """kinships""" +320 89 model """hole""" +320 89 loss """bceaftersigmoid""" +320 89 regularizer """no""" +320 89 optimizer """adam""" +320 89 training_loop """owa""" +320 89 negative_sampler """basic""" +320 89 evaluator """rankbased""" +320 90 dataset """kinships""" +320 90 model """hole""" +320 90 loss """bceaftersigmoid""" +320 90 regularizer """no""" +320 90 optimizer """adam""" +320 90 training_loop """owa""" +320 90 negative_sampler """basic""" +320 90 evaluator """rankbased""" +320 91 dataset """kinships""" +320 91 model """hole""" +320 91 loss """bceaftersigmoid""" +320 91 regularizer """no""" +320 91 optimizer """adam""" +320 91 training_loop """owa""" +320 91 negative_sampler """basic""" +320 91 evaluator """rankbased""" +320 92 dataset """kinships""" +320 92 model """hole""" +320 92 loss """bceaftersigmoid""" +320 92 regularizer """no""" +320 92 optimizer """adam""" +320 92 training_loop """owa""" +320 92 negative_sampler """basic""" +320 92 evaluator """rankbased""" +320 93 dataset """kinships""" +320 93 model """hole""" +320 93 loss """bceaftersigmoid""" +320 93 regularizer """no""" +320 93 optimizer """adam""" +320 93 training_loop """owa""" +320 93 negative_sampler """basic""" +320 93 evaluator """rankbased""" +320 94 dataset """kinships""" +320 94 model """hole""" +320 94 loss """bceaftersigmoid""" +320 94 regularizer """no""" +320 94 optimizer """adam""" +320 94 training_loop """owa""" +320 94 negative_sampler """basic""" +320 94 evaluator """rankbased""" +320 95 dataset """kinships""" +320 95 model """hole""" +320 95 loss """bceaftersigmoid""" +320 95 regularizer """no""" +320 95 optimizer """adam""" +320 95 training_loop """owa""" +320 95 negative_sampler """basic""" +320 95 evaluator """rankbased""" +320 96 dataset """kinships""" +320 96 model """hole""" +320 96 loss """bceaftersigmoid""" +320 96 regularizer """no""" +320 96 optimizer """adam""" +320 96 training_loop """owa""" +320 96 negative_sampler """basic""" +320 96 evaluator """rankbased""" +320 97 dataset """kinships""" +320 97 model """hole""" +320 97 loss """bceaftersigmoid""" +320 97 regularizer """no""" +320 97 optimizer """adam""" +320 97 training_loop """owa""" +320 97 negative_sampler """basic""" +320 97 evaluator """rankbased""" +320 98 dataset """kinships""" +320 98 model """hole""" +320 98 loss """bceaftersigmoid""" +320 98 regularizer """no""" +320 98 optimizer """adam""" +320 98 training_loop """owa""" +320 98 negative_sampler """basic""" +320 98 evaluator """rankbased""" +320 99 dataset """kinships""" +320 99 model """hole""" +320 99 loss """bceaftersigmoid""" +320 99 regularizer """no""" +320 99 optimizer """adam""" +320 99 training_loop """owa""" +320 99 negative_sampler """basic""" +320 99 evaluator """rankbased""" +320 100 dataset """kinships""" +320 100 model """hole""" +320 100 loss """bceaftersigmoid""" +320 100 regularizer """no""" +320 100 optimizer """adam""" +320 100 training_loop """owa""" +320 100 negative_sampler """basic""" +320 100 evaluator """rankbased""" +321 1 model.embedding_dim 1.0 +321 1 optimizer.lr 0.0016337932606867145 +321 1 negative_sampler.num_negs_per_pos 21.0 +321 1 training.batch_size 2.0 +321 2 model.embedding_dim 2.0 +321 2 optimizer.lr 0.04150575188306939 +321 2 negative_sampler.num_negs_per_pos 89.0 +321 2 training.batch_size 2.0 +321 3 model.embedding_dim 1.0 +321 3 optimizer.lr 0.05693739586746095 +321 3 negative_sampler.num_negs_per_pos 95.0 +321 3 training.batch_size 1.0 +321 4 model.embedding_dim 2.0 +321 4 optimizer.lr 0.0012991761865746077 +321 4 negative_sampler.num_negs_per_pos 85.0 +321 4 training.batch_size 2.0 +321 5 model.embedding_dim 0.0 +321 5 optimizer.lr 0.0017158710993792341 +321 5 negative_sampler.num_negs_per_pos 96.0 +321 5 training.batch_size 0.0 +321 6 model.embedding_dim 1.0 +321 6 optimizer.lr 0.002943969019222486 +321 6 negative_sampler.num_negs_per_pos 5.0 +321 6 training.batch_size 2.0 +321 7 model.embedding_dim 2.0 +321 7 optimizer.lr 0.07299760086431695 +321 7 negative_sampler.num_negs_per_pos 52.0 +321 7 training.batch_size 0.0 +321 8 model.embedding_dim 2.0 +321 8 optimizer.lr 0.005740505895860601 +321 8 negative_sampler.num_negs_per_pos 19.0 +321 8 training.batch_size 0.0 +321 9 model.embedding_dim 1.0 +321 9 optimizer.lr 0.0022573116212586514 +321 9 negative_sampler.num_negs_per_pos 72.0 +321 9 training.batch_size 0.0 +321 10 model.embedding_dim 1.0 +321 10 optimizer.lr 0.08180212412473827 +321 10 negative_sampler.num_negs_per_pos 45.0 +321 10 training.batch_size 2.0 +321 11 model.embedding_dim 1.0 +321 11 optimizer.lr 0.002035803411069424 +321 11 negative_sampler.num_negs_per_pos 27.0 +321 11 training.batch_size 0.0 +321 12 model.embedding_dim 2.0 +321 12 optimizer.lr 0.07458967571249837 +321 12 negative_sampler.num_negs_per_pos 62.0 +321 12 training.batch_size 2.0 +321 13 model.embedding_dim 0.0 +321 13 optimizer.lr 0.044293176036182734 +321 13 negative_sampler.num_negs_per_pos 19.0 +321 13 training.batch_size 2.0 +321 14 model.embedding_dim 1.0 +321 14 optimizer.lr 0.0035699820743232452 +321 14 negative_sampler.num_negs_per_pos 14.0 +321 14 training.batch_size 0.0 +321 15 model.embedding_dim 0.0 +321 15 optimizer.lr 0.009941023217603841 +321 15 negative_sampler.num_negs_per_pos 58.0 +321 15 training.batch_size 1.0 +321 16 model.embedding_dim 1.0 +321 16 optimizer.lr 0.007339649738455068 +321 16 negative_sampler.num_negs_per_pos 5.0 +321 16 training.batch_size 2.0 +321 17 model.embedding_dim 1.0 +321 17 optimizer.lr 0.0012463855866512794 +321 17 negative_sampler.num_negs_per_pos 34.0 +321 17 training.batch_size 1.0 +321 18 model.embedding_dim 0.0 +321 18 optimizer.lr 0.0015397700045752768 +321 18 negative_sampler.num_negs_per_pos 37.0 +321 18 training.batch_size 2.0 +321 19 model.embedding_dim 0.0 +321 19 optimizer.lr 0.001264371927916522 +321 19 negative_sampler.num_negs_per_pos 52.0 +321 19 training.batch_size 0.0 +321 20 model.embedding_dim 2.0 +321 20 optimizer.lr 0.01648496306466747 +321 20 negative_sampler.num_negs_per_pos 59.0 +321 20 training.batch_size 0.0 +321 21 model.embedding_dim 0.0 +321 21 optimizer.lr 0.006420348953180394 +321 21 negative_sampler.num_negs_per_pos 39.0 +321 21 training.batch_size 1.0 +321 22 model.embedding_dim 2.0 +321 22 optimizer.lr 0.05489753298693723 +321 22 negative_sampler.num_negs_per_pos 9.0 +321 22 training.batch_size 2.0 +321 23 model.embedding_dim 0.0 +321 23 optimizer.lr 0.026501393491960196 +321 23 negative_sampler.num_negs_per_pos 23.0 +321 23 training.batch_size 1.0 +321 24 model.embedding_dim 1.0 +321 24 optimizer.lr 0.004908791421786808 +321 24 negative_sampler.num_negs_per_pos 77.0 +321 24 training.batch_size 1.0 +321 25 model.embedding_dim 1.0 +321 25 optimizer.lr 0.06405518451665354 +321 25 negative_sampler.num_negs_per_pos 18.0 +321 25 training.batch_size 0.0 +321 26 model.embedding_dim 0.0 +321 26 optimizer.lr 0.09959295100139547 +321 26 negative_sampler.num_negs_per_pos 3.0 +321 26 training.batch_size 0.0 +321 27 model.embedding_dim 0.0 +321 27 optimizer.lr 0.0014045634188815566 +321 27 negative_sampler.num_negs_per_pos 73.0 +321 27 training.batch_size 2.0 +321 28 model.embedding_dim 1.0 +321 28 optimizer.lr 0.022157479603857246 +321 28 negative_sampler.num_negs_per_pos 1.0 +321 28 training.batch_size 0.0 +321 29 model.embedding_dim 2.0 +321 29 optimizer.lr 0.02982607583829133 +321 29 negative_sampler.num_negs_per_pos 94.0 +321 29 training.batch_size 2.0 +321 30 model.embedding_dim 0.0 +321 30 optimizer.lr 0.0026796785014317633 +321 30 negative_sampler.num_negs_per_pos 59.0 +321 30 training.batch_size 2.0 +321 31 model.embedding_dim 2.0 +321 31 optimizer.lr 0.006886680810684015 +321 31 negative_sampler.num_negs_per_pos 46.0 +321 31 training.batch_size 1.0 +321 32 model.embedding_dim 2.0 +321 32 optimizer.lr 0.07982683984593958 +321 32 negative_sampler.num_negs_per_pos 4.0 +321 32 training.batch_size 0.0 +321 33 model.embedding_dim 2.0 +321 33 optimizer.lr 0.0031684559727561943 +321 33 negative_sampler.num_negs_per_pos 27.0 +321 33 training.batch_size 2.0 +321 34 model.embedding_dim 1.0 +321 34 optimizer.lr 0.005575594653717998 +321 34 negative_sampler.num_negs_per_pos 3.0 +321 34 training.batch_size 2.0 +321 35 model.embedding_dim 2.0 +321 35 optimizer.lr 0.06034049983510374 +321 35 negative_sampler.num_negs_per_pos 57.0 +321 35 training.batch_size 1.0 +321 36 model.embedding_dim 2.0 +321 36 optimizer.lr 0.0055730838420506345 +321 36 negative_sampler.num_negs_per_pos 60.0 +321 36 training.batch_size 1.0 +321 37 model.embedding_dim 0.0 +321 37 optimizer.lr 0.009176949205156094 +321 37 negative_sampler.num_negs_per_pos 55.0 +321 37 training.batch_size 1.0 +321 38 model.embedding_dim 1.0 +321 38 optimizer.lr 0.08795924253318962 +321 38 negative_sampler.num_negs_per_pos 26.0 +321 38 training.batch_size 2.0 +321 39 model.embedding_dim 2.0 +321 39 optimizer.lr 0.0011547974385612923 +321 39 negative_sampler.num_negs_per_pos 42.0 +321 39 training.batch_size 1.0 +321 40 model.embedding_dim 2.0 +321 40 optimizer.lr 0.00903290316896652 +321 40 negative_sampler.num_negs_per_pos 91.0 +321 40 training.batch_size 0.0 +321 41 model.embedding_dim 1.0 +321 41 optimizer.lr 0.01272428983091955 +321 41 negative_sampler.num_negs_per_pos 97.0 +321 41 training.batch_size 2.0 +321 42 model.embedding_dim 0.0 +321 42 optimizer.lr 0.002348840518659957 +321 42 negative_sampler.num_negs_per_pos 49.0 +321 42 training.batch_size 0.0 +321 43 model.embedding_dim 2.0 +321 43 optimizer.lr 0.08329834184395242 +321 43 negative_sampler.num_negs_per_pos 82.0 +321 43 training.batch_size 2.0 +321 44 model.embedding_dim 0.0 +321 44 optimizer.lr 0.09714653747308256 +321 44 negative_sampler.num_negs_per_pos 5.0 +321 44 training.batch_size 0.0 +321 45 model.embedding_dim 2.0 +321 45 optimizer.lr 0.010264412446750464 +321 45 negative_sampler.num_negs_per_pos 23.0 +321 45 training.batch_size 0.0 +321 46 model.embedding_dim 1.0 +321 46 optimizer.lr 0.0012378795820260833 +321 46 negative_sampler.num_negs_per_pos 20.0 +321 46 training.batch_size 1.0 +321 47 model.embedding_dim 2.0 +321 47 optimizer.lr 0.007961988278678115 +321 47 negative_sampler.num_negs_per_pos 0.0 +321 47 training.batch_size 0.0 +321 48 model.embedding_dim 2.0 +321 48 optimizer.lr 0.013954972520458816 +321 48 negative_sampler.num_negs_per_pos 50.0 +321 48 training.batch_size 0.0 +321 49 model.embedding_dim 1.0 +321 49 optimizer.lr 0.007115035159520771 +321 49 negative_sampler.num_negs_per_pos 14.0 +321 49 training.batch_size 0.0 +321 50 model.embedding_dim 0.0 +321 50 optimizer.lr 0.002347210011932413 +321 50 negative_sampler.num_negs_per_pos 95.0 +321 50 training.batch_size 2.0 +321 51 model.embedding_dim 1.0 +321 51 optimizer.lr 0.002687930390377183 +321 51 negative_sampler.num_negs_per_pos 21.0 +321 51 training.batch_size 2.0 +321 52 model.embedding_dim 2.0 +321 52 optimizer.lr 0.004265603265386456 +321 52 negative_sampler.num_negs_per_pos 17.0 +321 52 training.batch_size 2.0 +321 53 model.embedding_dim 2.0 +321 53 optimizer.lr 0.001974039666179683 +321 53 negative_sampler.num_negs_per_pos 60.0 +321 53 training.batch_size 0.0 +321 54 model.embedding_dim 2.0 +321 54 optimizer.lr 0.046882529318774305 +321 54 negative_sampler.num_negs_per_pos 77.0 +321 54 training.batch_size 1.0 +321 55 model.embedding_dim 2.0 +321 55 optimizer.lr 0.024247434433304786 +321 55 negative_sampler.num_negs_per_pos 36.0 +321 55 training.batch_size 1.0 +321 56 model.embedding_dim 0.0 +321 56 optimizer.lr 0.008236758649538335 +321 56 negative_sampler.num_negs_per_pos 38.0 +321 56 training.batch_size 2.0 +321 57 model.embedding_dim 0.0 +321 57 optimizer.lr 0.0030188756725912694 +321 57 negative_sampler.num_negs_per_pos 18.0 +321 57 training.batch_size 0.0 +321 58 model.embedding_dim 0.0 +321 58 optimizer.lr 0.003264300466017168 +321 58 negative_sampler.num_negs_per_pos 39.0 +321 58 training.batch_size 2.0 +321 59 model.embedding_dim 1.0 +321 59 optimizer.lr 0.006440970426956418 +321 59 negative_sampler.num_negs_per_pos 23.0 +321 59 training.batch_size 2.0 +321 60 model.embedding_dim 2.0 +321 60 optimizer.lr 0.026952621167450647 +321 60 negative_sampler.num_negs_per_pos 78.0 +321 60 training.batch_size 0.0 +321 61 model.embedding_dim 2.0 +321 61 optimizer.lr 0.06400178799021543 +321 61 negative_sampler.num_negs_per_pos 62.0 +321 61 training.batch_size 1.0 +321 62 model.embedding_dim 1.0 +321 62 optimizer.lr 0.03671286083835159 +321 62 negative_sampler.num_negs_per_pos 76.0 +321 62 training.batch_size 2.0 +321 63 model.embedding_dim 2.0 +321 63 optimizer.lr 0.0010628533417330476 +321 63 negative_sampler.num_negs_per_pos 77.0 +321 63 training.batch_size 2.0 +321 64 model.embedding_dim 2.0 +321 64 optimizer.lr 0.006004651372004656 +321 64 negative_sampler.num_negs_per_pos 86.0 +321 64 training.batch_size 0.0 +321 65 model.embedding_dim 2.0 +321 65 optimizer.lr 0.00397835138570511 +321 65 negative_sampler.num_negs_per_pos 87.0 +321 65 training.batch_size 0.0 +321 66 model.embedding_dim 0.0 +321 66 optimizer.lr 0.007523301744390658 +321 66 negative_sampler.num_negs_per_pos 13.0 +321 66 training.batch_size 1.0 +321 67 model.embedding_dim 1.0 +321 67 optimizer.lr 0.006199715093855283 +321 67 negative_sampler.num_negs_per_pos 92.0 +321 67 training.batch_size 1.0 +321 68 model.embedding_dim 2.0 +321 68 optimizer.lr 0.0703029193951903 +321 68 negative_sampler.num_negs_per_pos 15.0 +321 68 training.batch_size 0.0 +321 69 model.embedding_dim 1.0 +321 69 optimizer.lr 0.004688701188214449 +321 69 negative_sampler.num_negs_per_pos 93.0 +321 69 training.batch_size 1.0 +321 70 model.embedding_dim 2.0 +321 70 optimizer.lr 0.009938525801627384 +321 70 negative_sampler.num_negs_per_pos 28.0 +321 70 training.batch_size 0.0 +321 71 model.embedding_dim 0.0 +321 71 optimizer.lr 0.017237338097983335 +321 71 negative_sampler.num_negs_per_pos 32.0 +321 71 training.batch_size 2.0 +321 72 model.embedding_dim 0.0 +321 72 optimizer.lr 0.03271140235344885 +321 72 negative_sampler.num_negs_per_pos 90.0 +321 72 training.batch_size 1.0 +321 73 model.embedding_dim 2.0 +321 73 optimizer.lr 0.08770357349054123 +321 73 negative_sampler.num_negs_per_pos 78.0 +321 73 training.batch_size 0.0 +321 74 model.embedding_dim 0.0 +321 74 optimizer.lr 0.0014495407232307854 +321 74 negative_sampler.num_negs_per_pos 19.0 +321 74 training.batch_size 0.0 +321 75 model.embedding_dim 1.0 +321 75 optimizer.lr 0.001589505151076931 +321 75 negative_sampler.num_negs_per_pos 10.0 +321 75 training.batch_size 0.0 +321 76 model.embedding_dim 0.0 +321 76 optimizer.lr 0.02876143587179624 +321 76 negative_sampler.num_negs_per_pos 90.0 +321 76 training.batch_size 1.0 +321 77 model.embedding_dim 2.0 +321 77 optimizer.lr 0.015776929757016715 +321 77 negative_sampler.num_negs_per_pos 4.0 +321 77 training.batch_size 0.0 +321 78 model.embedding_dim 2.0 +321 78 optimizer.lr 0.0011090923141133264 +321 78 negative_sampler.num_negs_per_pos 52.0 +321 78 training.batch_size 2.0 +321 79 model.embedding_dim 1.0 +321 79 optimizer.lr 0.0950877583128553 +321 79 negative_sampler.num_negs_per_pos 12.0 +321 79 training.batch_size 0.0 +321 80 model.embedding_dim 0.0 +321 80 optimizer.lr 0.032305177001317 +321 80 negative_sampler.num_negs_per_pos 79.0 +321 80 training.batch_size 1.0 +321 81 model.embedding_dim 2.0 +321 81 optimizer.lr 0.02668309288816895 +321 81 negative_sampler.num_negs_per_pos 5.0 +321 81 training.batch_size 0.0 +321 82 model.embedding_dim 2.0 +321 82 optimizer.lr 0.0055631870318386765 +321 82 negative_sampler.num_negs_per_pos 21.0 +321 82 training.batch_size 0.0 +321 83 model.embedding_dim 2.0 +321 83 optimizer.lr 0.019186040889193096 +321 83 negative_sampler.num_negs_per_pos 68.0 +321 83 training.batch_size 0.0 +321 84 model.embedding_dim 0.0 +321 84 optimizer.lr 0.001284676457696422 +321 84 negative_sampler.num_negs_per_pos 93.0 +321 84 training.batch_size 2.0 +321 85 model.embedding_dim 0.0 +321 85 optimizer.lr 0.0835723938650717 +321 85 negative_sampler.num_negs_per_pos 57.0 +321 85 training.batch_size 0.0 +321 86 model.embedding_dim 1.0 +321 86 optimizer.lr 0.010055623942287538 +321 86 negative_sampler.num_negs_per_pos 53.0 +321 86 training.batch_size 1.0 +321 87 model.embedding_dim 1.0 +321 87 optimizer.lr 0.007790788564111407 +321 87 negative_sampler.num_negs_per_pos 32.0 +321 87 training.batch_size 2.0 +321 88 model.embedding_dim 2.0 +321 88 optimizer.lr 0.0012177572242999704 +321 88 negative_sampler.num_negs_per_pos 45.0 +321 88 training.batch_size 2.0 +321 89 model.embedding_dim 2.0 +321 89 optimizer.lr 0.03243043537219116 +321 89 negative_sampler.num_negs_per_pos 24.0 +321 89 training.batch_size 2.0 +321 90 model.embedding_dim 1.0 +321 90 optimizer.lr 0.03338352711912462 +321 90 negative_sampler.num_negs_per_pos 37.0 +321 90 training.batch_size 1.0 +321 91 model.embedding_dim 1.0 +321 91 optimizer.lr 0.017223241701606478 +321 91 negative_sampler.num_negs_per_pos 53.0 +321 91 training.batch_size 0.0 +321 92 model.embedding_dim 1.0 +321 92 optimizer.lr 0.017890111847101292 +321 92 negative_sampler.num_negs_per_pos 94.0 +321 92 training.batch_size 2.0 +321 93 model.embedding_dim 0.0 +321 93 optimizer.lr 0.015926199562651767 +321 93 negative_sampler.num_negs_per_pos 95.0 +321 93 training.batch_size 2.0 +321 94 model.embedding_dim 1.0 +321 94 optimizer.lr 0.004808351268547463 +321 94 negative_sampler.num_negs_per_pos 42.0 +321 94 training.batch_size 0.0 +321 95 model.embedding_dim 0.0 +321 95 optimizer.lr 0.07736855080754196 +321 95 negative_sampler.num_negs_per_pos 95.0 +321 95 training.batch_size 2.0 +321 96 model.embedding_dim 0.0 +321 96 optimizer.lr 0.00130854094814172 +321 96 negative_sampler.num_negs_per_pos 39.0 +321 96 training.batch_size 2.0 +321 97 model.embedding_dim 1.0 +321 97 optimizer.lr 0.05135343368339501 +321 97 negative_sampler.num_negs_per_pos 6.0 +321 97 training.batch_size 2.0 +321 98 model.embedding_dim 0.0 +321 98 optimizer.lr 0.019414374974219308 +321 98 negative_sampler.num_negs_per_pos 55.0 +321 98 training.batch_size 2.0 +321 99 model.embedding_dim 0.0 +321 99 optimizer.lr 0.06464105009181476 +321 99 negative_sampler.num_negs_per_pos 73.0 +321 99 training.batch_size 0.0 +321 100 model.embedding_dim 1.0 +321 100 optimizer.lr 0.0010086864027170543 +321 100 negative_sampler.num_negs_per_pos 34.0 +321 100 training.batch_size 2.0 +321 1 dataset """kinships""" +321 1 model """hole""" +321 1 loss """softplus""" +321 1 regularizer """no""" +321 1 optimizer """adam""" +321 1 training_loop """owa""" +321 1 negative_sampler """basic""" +321 1 evaluator """rankbased""" +321 2 dataset """kinships""" +321 2 model """hole""" +321 2 loss """softplus""" +321 2 regularizer """no""" +321 2 optimizer """adam""" +321 2 training_loop """owa""" +321 2 negative_sampler """basic""" +321 2 evaluator """rankbased""" +321 3 dataset """kinships""" +321 3 model """hole""" +321 3 loss """softplus""" +321 3 regularizer """no""" +321 3 optimizer """adam""" +321 3 training_loop """owa""" +321 3 negative_sampler """basic""" +321 3 evaluator """rankbased""" +321 4 dataset """kinships""" +321 4 model """hole""" +321 4 loss """softplus""" +321 4 regularizer """no""" +321 4 optimizer """adam""" +321 4 training_loop """owa""" +321 4 negative_sampler """basic""" +321 4 evaluator """rankbased""" +321 5 dataset """kinships""" +321 5 model """hole""" +321 5 loss """softplus""" +321 5 regularizer """no""" +321 5 optimizer """adam""" +321 5 training_loop """owa""" +321 5 negative_sampler """basic""" +321 5 evaluator """rankbased""" +321 6 dataset """kinships""" +321 6 model """hole""" +321 6 loss """softplus""" +321 6 regularizer """no""" +321 6 optimizer """adam""" +321 6 training_loop """owa""" +321 6 negative_sampler """basic""" +321 6 evaluator """rankbased""" +321 7 dataset """kinships""" +321 7 model """hole""" +321 7 loss """softplus""" +321 7 regularizer """no""" +321 7 optimizer """adam""" +321 7 training_loop """owa""" +321 7 negative_sampler """basic""" +321 7 evaluator """rankbased""" +321 8 dataset """kinships""" +321 8 model """hole""" +321 8 loss """softplus""" +321 8 regularizer """no""" +321 8 optimizer """adam""" +321 8 training_loop """owa""" +321 8 negative_sampler """basic""" +321 8 evaluator """rankbased""" +321 9 dataset """kinships""" +321 9 model """hole""" +321 9 loss """softplus""" +321 9 regularizer """no""" +321 9 optimizer """adam""" +321 9 training_loop """owa""" +321 9 negative_sampler """basic""" +321 9 evaluator """rankbased""" +321 10 dataset """kinships""" +321 10 model """hole""" +321 10 loss """softplus""" +321 10 regularizer """no""" +321 10 optimizer """adam""" +321 10 training_loop """owa""" +321 10 negative_sampler """basic""" +321 10 evaluator """rankbased""" +321 11 dataset """kinships""" +321 11 model """hole""" +321 11 loss """softplus""" +321 11 regularizer """no""" +321 11 optimizer """adam""" +321 11 training_loop """owa""" +321 11 negative_sampler """basic""" +321 11 evaluator """rankbased""" +321 12 dataset """kinships""" +321 12 model """hole""" +321 12 loss """softplus""" +321 12 regularizer """no""" +321 12 optimizer """adam""" +321 12 training_loop """owa""" +321 12 negative_sampler """basic""" +321 12 evaluator """rankbased""" +321 13 dataset """kinships""" +321 13 model """hole""" +321 13 loss """softplus""" +321 13 regularizer """no""" +321 13 optimizer """adam""" +321 13 training_loop """owa""" +321 13 negative_sampler """basic""" +321 13 evaluator """rankbased""" +321 14 dataset """kinships""" +321 14 model """hole""" +321 14 loss """softplus""" +321 14 regularizer """no""" +321 14 optimizer """adam""" +321 14 training_loop """owa""" +321 14 negative_sampler """basic""" +321 14 evaluator """rankbased""" +321 15 dataset """kinships""" +321 15 model """hole""" +321 15 loss """softplus""" +321 15 regularizer """no""" +321 15 optimizer """adam""" +321 15 training_loop """owa""" +321 15 negative_sampler """basic""" +321 15 evaluator """rankbased""" +321 16 dataset """kinships""" +321 16 model """hole""" +321 16 loss """softplus""" +321 16 regularizer """no""" +321 16 optimizer """adam""" +321 16 training_loop """owa""" +321 16 negative_sampler """basic""" +321 16 evaluator """rankbased""" +321 17 dataset """kinships""" +321 17 model """hole""" +321 17 loss """softplus""" +321 17 regularizer """no""" +321 17 optimizer """adam""" +321 17 training_loop """owa""" +321 17 negative_sampler """basic""" +321 17 evaluator """rankbased""" +321 18 dataset """kinships""" +321 18 model """hole""" +321 18 loss """softplus""" +321 18 regularizer """no""" +321 18 optimizer """adam""" +321 18 training_loop """owa""" +321 18 negative_sampler """basic""" +321 18 evaluator """rankbased""" +321 19 dataset """kinships""" +321 19 model """hole""" +321 19 loss """softplus""" +321 19 regularizer """no""" +321 19 optimizer """adam""" +321 19 training_loop """owa""" +321 19 negative_sampler """basic""" +321 19 evaluator """rankbased""" +321 20 dataset """kinships""" +321 20 model """hole""" +321 20 loss """softplus""" +321 20 regularizer """no""" +321 20 optimizer """adam""" +321 20 training_loop """owa""" +321 20 negative_sampler """basic""" +321 20 evaluator """rankbased""" +321 21 dataset """kinships""" +321 21 model """hole""" +321 21 loss """softplus""" +321 21 regularizer """no""" +321 21 optimizer """adam""" +321 21 training_loop """owa""" +321 21 negative_sampler """basic""" +321 21 evaluator """rankbased""" +321 22 dataset """kinships""" +321 22 model """hole""" +321 22 loss """softplus""" +321 22 regularizer """no""" +321 22 optimizer """adam""" +321 22 training_loop """owa""" +321 22 negative_sampler """basic""" +321 22 evaluator """rankbased""" +321 23 dataset """kinships""" +321 23 model """hole""" +321 23 loss """softplus""" +321 23 regularizer """no""" +321 23 optimizer """adam""" +321 23 training_loop """owa""" +321 23 negative_sampler """basic""" +321 23 evaluator """rankbased""" +321 24 dataset """kinships""" +321 24 model """hole""" +321 24 loss """softplus""" +321 24 regularizer """no""" +321 24 optimizer """adam""" +321 24 training_loop """owa""" +321 24 negative_sampler """basic""" +321 24 evaluator """rankbased""" +321 25 dataset """kinships""" +321 25 model """hole""" +321 25 loss """softplus""" +321 25 regularizer """no""" +321 25 optimizer """adam""" +321 25 training_loop """owa""" +321 25 negative_sampler """basic""" +321 25 evaluator """rankbased""" +321 26 dataset """kinships""" +321 26 model """hole""" +321 26 loss """softplus""" +321 26 regularizer """no""" +321 26 optimizer """adam""" +321 26 training_loop """owa""" +321 26 negative_sampler """basic""" +321 26 evaluator """rankbased""" +321 27 dataset """kinships""" +321 27 model """hole""" +321 27 loss """softplus""" +321 27 regularizer """no""" +321 27 optimizer """adam""" +321 27 training_loop """owa""" +321 27 negative_sampler """basic""" +321 27 evaluator """rankbased""" +321 28 dataset """kinships""" +321 28 model """hole""" +321 28 loss """softplus""" +321 28 regularizer """no""" +321 28 optimizer """adam""" +321 28 training_loop """owa""" +321 28 negative_sampler """basic""" +321 28 evaluator """rankbased""" +321 29 dataset """kinships""" +321 29 model """hole""" +321 29 loss """softplus""" +321 29 regularizer """no""" +321 29 optimizer """adam""" +321 29 training_loop """owa""" +321 29 negative_sampler """basic""" +321 29 evaluator """rankbased""" +321 30 dataset """kinships""" +321 30 model """hole""" +321 30 loss """softplus""" +321 30 regularizer """no""" +321 30 optimizer """adam""" +321 30 training_loop """owa""" +321 30 negative_sampler """basic""" +321 30 evaluator """rankbased""" +321 31 dataset """kinships""" +321 31 model """hole""" +321 31 loss """softplus""" +321 31 regularizer """no""" +321 31 optimizer """adam""" +321 31 training_loop """owa""" +321 31 negative_sampler """basic""" +321 31 evaluator """rankbased""" +321 32 dataset """kinships""" +321 32 model """hole""" +321 32 loss """softplus""" +321 32 regularizer """no""" +321 32 optimizer """adam""" +321 32 training_loop """owa""" +321 32 negative_sampler """basic""" +321 32 evaluator """rankbased""" +321 33 dataset """kinships""" +321 33 model """hole""" +321 33 loss """softplus""" +321 33 regularizer """no""" +321 33 optimizer """adam""" +321 33 training_loop """owa""" +321 33 negative_sampler """basic""" +321 33 evaluator """rankbased""" +321 34 dataset """kinships""" +321 34 model """hole""" +321 34 loss """softplus""" +321 34 regularizer """no""" +321 34 optimizer """adam""" +321 34 training_loop """owa""" +321 34 negative_sampler """basic""" +321 34 evaluator """rankbased""" +321 35 dataset """kinships""" +321 35 model """hole""" +321 35 loss """softplus""" +321 35 regularizer """no""" +321 35 optimizer """adam""" +321 35 training_loop """owa""" +321 35 negative_sampler """basic""" +321 35 evaluator """rankbased""" +321 36 dataset """kinships""" +321 36 model """hole""" +321 36 loss """softplus""" +321 36 regularizer """no""" +321 36 optimizer """adam""" +321 36 training_loop """owa""" +321 36 negative_sampler """basic""" +321 36 evaluator """rankbased""" +321 37 dataset """kinships""" +321 37 model """hole""" +321 37 loss """softplus""" +321 37 regularizer """no""" +321 37 optimizer """adam""" +321 37 training_loop """owa""" +321 37 negative_sampler """basic""" +321 37 evaluator """rankbased""" +321 38 dataset """kinships""" +321 38 model """hole""" +321 38 loss """softplus""" +321 38 regularizer """no""" +321 38 optimizer """adam""" +321 38 training_loop """owa""" +321 38 negative_sampler """basic""" +321 38 evaluator """rankbased""" +321 39 dataset """kinships""" +321 39 model """hole""" +321 39 loss """softplus""" +321 39 regularizer """no""" +321 39 optimizer """adam""" +321 39 training_loop """owa""" +321 39 negative_sampler """basic""" +321 39 evaluator """rankbased""" +321 40 dataset """kinships""" +321 40 model """hole""" +321 40 loss """softplus""" +321 40 regularizer """no""" +321 40 optimizer """adam""" +321 40 training_loop """owa""" +321 40 negative_sampler """basic""" +321 40 evaluator """rankbased""" +321 41 dataset """kinships""" +321 41 model """hole""" +321 41 loss """softplus""" +321 41 regularizer """no""" +321 41 optimizer """adam""" +321 41 training_loop """owa""" +321 41 negative_sampler """basic""" +321 41 evaluator """rankbased""" +321 42 dataset """kinships""" +321 42 model """hole""" +321 42 loss """softplus""" +321 42 regularizer """no""" +321 42 optimizer """adam""" +321 42 training_loop """owa""" +321 42 negative_sampler """basic""" +321 42 evaluator """rankbased""" +321 43 dataset """kinships""" +321 43 model """hole""" +321 43 loss """softplus""" +321 43 regularizer """no""" +321 43 optimizer """adam""" +321 43 training_loop """owa""" +321 43 negative_sampler """basic""" +321 43 evaluator """rankbased""" +321 44 dataset """kinships""" +321 44 model """hole""" +321 44 loss """softplus""" +321 44 regularizer """no""" +321 44 optimizer """adam""" +321 44 training_loop """owa""" +321 44 negative_sampler """basic""" +321 44 evaluator """rankbased""" +321 45 dataset """kinships""" +321 45 model """hole""" +321 45 loss """softplus""" +321 45 regularizer """no""" +321 45 optimizer """adam""" +321 45 training_loop """owa""" +321 45 negative_sampler """basic""" +321 45 evaluator """rankbased""" +321 46 dataset """kinships""" +321 46 model """hole""" +321 46 loss """softplus""" +321 46 regularizer """no""" +321 46 optimizer """adam""" +321 46 training_loop """owa""" +321 46 negative_sampler """basic""" +321 46 evaluator """rankbased""" +321 47 dataset """kinships""" +321 47 model """hole""" +321 47 loss """softplus""" +321 47 regularizer """no""" +321 47 optimizer """adam""" +321 47 training_loop """owa""" +321 47 negative_sampler """basic""" +321 47 evaluator """rankbased""" +321 48 dataset """kinships""" +321 48 model """hole""" +321 48 loss """softplus""" +321 48 regularizer """no""" +321 48 optimizer """adam""" +321 48 training_loop """owa""" +321 48 negative_sampler """basic""" +321 48 evaluator """rankbased""" +321 49 dataset """kinships""" +321 49 model """hole""" +321 49 loss """softplus""" +321 49 regularizer """no""" +321 49 optimizer """adam""" +321 49 training_loop """owa""" +321 49 negative_sampler """basic""" +321 49 evaluator """rankbased""" +321 50 dataset """kinships""" +321 50 model """hole""" +321 50 loss """softplus""" +321 50 regularizer """no""" +321 50 optimizer """adam""" +321 50 training_loop """owa""" +321 50 negative_sampler """basic""" +321 50 evaluator """rankbased""" +321 51 dataset """kinships""" +321 51 model """hole""" +321 51 loss """softplus""" +321 51 regularizer """no""" +321 51 optimizer """adam""" +321 51 training_loop """owa""" +321 51 negative_sampler """basic""" +321 51 evaluator """rankbased""" +321 52 dataset """kinships""" +321 52 model """hole""" +321 52 loss """softplus""" +321 52 regularizer """no""" +321 52 optimizer """adam""" +321 52 training_loop """owa""" +321 52 negative_sampler """basic""" +321 52 evaluator """rankbased""" +321 53 dataset """kinships""" +321 53 model """hole""" +321 53 loss """softplus""" +321 53 regularizer """no""" +321 53 optimizer """adam""" +321 53 training_loop """owa""" +321 53 negative_sampler """basic""" +321 53 evaluator """rankbased""" +321 54 dataset """kinships""" +321 54 model """hole""" +321 54 loss """softplus""" +321 54 regularizer """no""" +321 54 optimizer """adam""" +321 54 training_loop """owa""" +321 54 negative_sampler """basic""" +321 54 evaluator """rankbased""" +321 55 dataset """kinships""" +321 55 model """hole""" +321 55 loss """softplus""" +321 55 regularizer """no""" +321 55 optimizer """adam""" +321 55 training_loop """owa""" +321 55 negative_sampler """basic""" +321 55 evaluator """rankbased""" +321 56 dataset """kinships""" +321 56 model """hole""" +321 56 loss """softplus""" +321 56 regularizer """no""" +321 56 optimizer """adam""" +321 56 training_loop """owa""" +321 56 negative_sampler """basic""" +321 56 evaluator """rankbased""" +321 57 dataset """kinships""" +321 57 model """hole""" +321 57 loss """softplus""" +321 57 regularizer """no""" +321 57 optimizer """adam""" +321 57 training_loop """owa""" +321 57 negative_sampler """basic""" +321 57 evaluator """rankbased""" +321 58 dataset """kinships""" +321 58 model """hole""" +321 58 loss """softplus""" +321 58 regularizer """no""" +321 58 optimizer """adam""" +321 58 training_loop """owa""" +321 58 negative_sampler """basic""" +321 58 evaluator """rankbased""" +321 59 dataset """kinships""" +321 59 model """hole""" +321 59 loss """softplus""" +321 59 regularizer """no""" +321 59 optimizer """adam""" +321 59 training_loop """owa""" +321 59 negative_sampler """basic""" +321 59 evaluator """rankbased""" +321 60 dataset """kinships""" +321 60 model """hole""" +321 60 loss """softplus""" +321 60 regularizer """no""" +321 60 optimizer """adam""" +321 60 training_loop """owa""" +321 60 negative_sampler """basic""" +321 60 evaluator """rankbased""" +321 61 dataset """kinships""" +321 61 model """hole""" +321 61 loss """softplus""" +321 61 regularizer """no""" +321 61 optimizer """adam""" +321 61 training_loop """owa""" +321 61 negative_sampler """basic""" +321 61 evaluator """rankbased""" +321 62 dataset """kinships""" +321 62 model """hole""" +321 62 loss """softplus""" +321 62 regularizer """no""" +321 62 optimizer """adam""" +321 62 training_loop """owa""" +321 62 negative_sampler """basic""" +321 62 evaluator """rankbased""" +321 63 dataset """kinships""" +321 63 model """hole""" +321 63 loss """softplus""" +321 63 regularizer """no""" +321 63 optimizer """adam""" +321 63 training_loop """owa""" +321 63 negative_sampler """basic""" +321 63 evaluator """rankbased""" +321 64 dataset """kinships""" +321 64 model """hole""" +321 64 loss """softplus""" +321 64 regularizer """no""" +321 64 optimizer """adam""" +321 64 training_loop """owa""" +321 64 negative_sampler """basic""" +321 64 evaluator """rankbased""" +321 65 dataset """kinships""" +321 65 model """hole""" +321 65 loss """softplus""" +321 65 regularizer """no""" +321 65 optimizer """adam""" +321 65 training_loop """owa""" +321 65 negative_sampler """basic""" +321 65 evaluator """rankbased""" +321 66 dataset """kinships""" +321 66 model """hole""" +321 66 loss """softplus""" +321 66 regularizer """no""" +321 66 optimizer """adam""" +321 66 training_loop """owa""" +321 66 negative_sampler """basic""" +321 66 evaluator """rankbased""" +321 67 dataset """kinships""" +321 67 model """hole""" +321 67 loss """softplus""" +321 67 regularizer """no""" +321 67 optimizer """adam""" +321 67 training_loop """owa""" +321 67 negative_sampler """basic""" +321 67 evaluator """rankbased""" +321 68 dataset """kinships""" +321 68 model """hole""" +321 68 loss """softplus""" +321 68 regularizer """no""" +321 68 optimizer """adam""" +321 68 training_loop """owa""" +321 68 negative_sampler """basic""" +321 68 evaluator """rankbased""" +321 69 dataset """kinships""" +321 69 model """hole""" +321 69 loss """softplus""" +321 69 regularizer """no""" +321 69 optimizer """adam""" +321 69 training_loop """owa""" +321 69 negative_sampler """basic""" +321 69 evaluator """rankbased""" +321 70 dataset """kinships""" +321 70 model """hole""" +321 70 loss """softplus""" +321 70 regularizer """no""" +321 70 optimizer """adam""" +321 70 training_loop """owa""" +321 70 negative_sampler """basic""" +321 70 evaluator """rankbased""" +321 71 dataset """kinships""" +321 71 model """hole""" +321 71 loss """softplus""" +321 71 regularizer """no""" +321 71 optimizer """adam""" +321 71 training_loop """owa""" +321 71 negative_sampler """basic""" +321 71 evaluator """rankbased""" +321 72 dataset """kinships""" +321 72 model """hole""" +321 72 loss """softplus""" +321 72 regularizer """no""" +321 72 optimizer """adam""" +321 72 training_loop """owa""" +321 72 negative_sampler """basic""" +321 72 evaluator """rankbased""" +321 73 dataset """kinships""" +321 73 model """hole""" +321 73 loss """softplus""" +321 73 regularizer """no""" +321 73 optimizer """adam""" +321 73 training_loop """owa""" +321 73 negative_sampler """basic""" +321 73 evaluator """rankbased""" +321 74 dataset """kinships""" +321 74 model """hole""" +321 74 loss """softplus""" +321 74 regularizer """no""" +321 74 optimizer """adam""" +321 74 training_loop """owa""" +321 74 negative_sampler """basic""" +321 74 evaluator """rankbased""" +321 75 dataset """kinships""" +321 75 model """hole""" +321 75 loss """softplus""" +321 75 regularizer """no""" +321 75 optimizer """adam""" +321 75 training_loop """owa""" +321 75 negative_sampler """basic""" +321 75 evaluator """rankbased""" +321 76 dataset """kinships""" +321 76 model """hole""" +321 76 loss """softplus""" +321 76 regularizer """no""" +321 76 optimizer """adam""" +321 76 training_loop """owa""" +321 76 negative_sampler """basic""" +321 76 evaluator """rankbased""" +321 77 dataset """kinships""" +321 77 model """hole""" +321 77 loss """softplus""" +321 77 regularizer """no""" +321 77 optimizer """adam""" +321 77 training_loop """owa""" +321 77 negative_sampler """basic""" +321 77 evaluator """rankbased""" +321 78 dataset """kinships""" +321 78 model """hole""" +321 78 loss """softplus""" +321 78 regularizer """no""" +321 78 optimizer """adam""" +321 78 training_loop """owa""" +321 78 negative_sampler """basic""" +321 78 evaluator """rankbased""" +321 79 dataset """kinships""" +321 79 model """hole""" +321 79 loss """softplus""" +321 79 regularizer """no""" +321 79 optimizer """adam""" +321 79 training_loop """owa""" +321 79 negative_sampler """basic""" +321 79 evaluator """rankbased""" +321 80 dataset """kinships""" +321 80 model """hole""" +321 80 loss """softplus""" +321 80 regularizer """no""" +321 80 optimizer """adam""" +321 80 training_loop """owa""" +321 80 negative_sampler """basic""" +321 80 evaluator """rankbased""" +321 81 dataset """kinships""" +321 81 model """hole""" +321 81 loss """softplus""" +321 81 regularizer """no""" +321 81 optimizer """adam""" +321 81 training_loop """owa""" +321 81 negative_sampler """basic""" +321 81 evaluator """rankbased""" +321 82 dataset """kinships""" +321 82 model """hole""" +321 82 loss """softplus""" +321 82 regularizer """no""" +321 82 optimizer """adam""" +321 82 training_loop """owa""" +321 82 negative_sampler """basic""" +321 82 evaluator """rankbased""" +321 83 dataset """kinships""" +321 83 model """hole""" +321 83 loss """softplus""" +321 83 regularizer """no""" +321 83 optimizer """adam""" +321 83 training_loop """owa""" +321 83 negative_sampler """basic""" +321 83 evaluator """rankbased""" +321 84 dataset """kinships""" +321 84 model """hole""" +321 84 loss """softplus""" +321 84 regularizer """no""" +321 84 optimizer """adam""" +321 84 training_loop """owa""" +321 84 negative_sampler """basic""" +321 84 evaluator """rankbased""" +321 85 dataset """kinships""" +321 85 model """hole""" +321 85 loss """softplus""" +321 85 regularizer """no""" +321 85 optimizer """adam""" +321 85 training_loop """owa""" +321 85 negative_sampler """basic""" +321 85 evaluator """rankbased""" +321 86 dataset """kinships""" +321 86 model """hole""" +321 86 loss """softplus""" +321 86 regularizer """no""" +321 86 optimizer """adam""" +321 86 training_loop """owa""" +321 86 negative_sampler """basic""" +321 86 evaluator """rankbased""" +321 87 dataset """kinships""" +321 87 model """hole""" +321 87 loss """softplus""" +321 87 regularizer """no""" +321 87 optimizer """adam""" +321 87 training_loop """owa""" +321 87 negative_sampler """basic""" +321 87 evaluator """rankbased""" +321 88 dataset """kinships""" +321 88 model """hole""" +321 88 loss """softplus""" +321 88 regularizer """no""" +321 88 optimizer """adam""" +321 88 training_loop """owa""" +321 88 negative_sampler """basic""" +321 88 evaluator """rankbased""" +321 89 dataset """kinships""" +321 89 model """hole""" +321 89 loss """softplus""" +321 89 regularizer """no""" +321 89 optimizer """adam""" +321 89 training_loop """owa""" +321 89 negative_sampler """basic""" +321 89 evaluator """rankbased""" +321 90 dataset """kinships""" +321 90 model """hole""" +321 90 loss """softplus""" +321 90 regularizer """no""" +321 90 optimizer """adam""" +321 90 training_loop """owa""" +321 90 negative_sampler """basic""" +321 90 evaluator """rankbased""" +321 91 dataset """kinships""" +321 91 model """hole""" +321 91 loss """softplus""" +321 91 regularizer """no""" +321 91 optimizer """adam""" +321 91 training_loop """owa""" +321 91 negative_sampler """basic""" +321 91 evaluator """rankbased""" +321 92 dataset """kinships""" +321 92 model """hole""" +321 92 loss """softplus""" +321 92 regularizer """no""" +321 92 optimizer """adam""" +321 92 training_loop """owa""" +321 92 negative_sampler """basic""" +321 92 evaluator """rankbased""" +321 93 dataset """kinships""" +321 93 model """hole""" +321 93 loss """softplus""" +321 93 regularizer """no""" +321 93 optimizer """adam""" +321 93 training_loop """owa""" +321 93 negative_sampler """basic""" +321 93 evaluator """rankbased""" +321 94 dataset """kinships""" +321 94 model """hole""" +321 94 loss """softplus""" +321 94 regularizer """no""" +321 94 optimizer """adam""" +321 94 training_loop """owa""" +321 94 negative_sampler """basic""" +321 94 evaluator """rankbased""" +321 95 dataset """kinships""" +321 95 model """hole""" +321 95 loss """softplus""" +321 95 regularizer """no""" +321 95 optimizer """adam""" +321 95 training_loop """owa""" +321 95 negative_sampler """basic""" +321 95 evaluator """rankbased""" +321 96 dataset """kinships""" +321 96 model """hole""" +321 96 loss """softplus""" +321 96 regularizer """no""" +321 96 optimizer """adam""" +321 96 training_loop """owa""" +321 96 negative_sampler """basic""" +321 96 evaluator """rankbased""" +321 97 dataset """kinships""" +321 97 model """hole""" +321 97 loss """softplus""" +321 97 regularizer """no""" +321 97 optimizer """adam""" +321 97 training_loop """owa""" +321 97 negative_sampler """basic""" +321 97 evaluator """rankbased""" +321 98 dataset """kinships""" +321 98 model """hole""" +321 98 loss """softplus""" +321 98 regularizer """no""" +321 98 optimizer """adam""" +321 98 training_loop """owa""" +321 98 negative_sampler """basic""" +321 98 evaluator """rankbased""" +321 99 dataset """kinships""" +321 99 model """hole""" +321 99 loss """softplus""" +321 99 regularizer """no""" +321 99 optimizer """adam""" +321 99 training_loop """owa""" +321 99 negative_sampler """basic""" +321 99 evaluator """rankbased""" +321 100 dataset """kinships""" +321 100 model """hole""" +321 100 loss """softplus""" +321 100 regularizer """no""" +321 100 optimizer """adam""" +321 100 training_loop """owa""" +321 100 negative_sampler """basic""" +321 100 evaluator """rankbased""" +322 1 model.embedding_dim 1.0 +322 1 loss.margin 0.8908146913434322 +322 1 optimizer.lr 0.06831757686817903 +322 1 negative_sampler.num_negs_per_pos 75.0 +322 1 training.batch_size 2.0 +322 2 model.embedding_dim 0.0 +322 2 loss.margin 4.554484975982342 +322 2 optimizer.lr 0.013391052504825984 +322 2 negative_sampler.num_negs_per_pos 12.0 +322 2 training.batch_size 2.0 +322 3 model.embedding_dim 1.0 +322 3 loss.margin 2.294423353000809 +322 3 optimizer.lr 0.011284426484726627 +322 3 negative_sampler.num_negs_per_pos 54.0 +322 3 training.batch_size 2.0 +322 4 model.embedding_dim 0.0 +322 4 loss.margin 9.298815362659369 +322 4 optimizer.lr 0.012970592733269958 +322 4 negative_sampler.num_negs_per_pos 90.0 +322 4 training.batch_size 0.0 +322 5 model.embedding_dim 0.0 +322 5 loss.margin 1.6001592876999478 +322 5 optimizer.lr 0.0022419470128798163 +322 5 negative_sampler.num_negs_per_pos 44.0 +322 5 training.batch_size 0.0 +322 6 model.embedding_dim 1.0 +322 6 loss.margin 1.2924527496034024 +322 6 optimizer.lr 0.004892957917013643 +322 6 negative_sampler.num_negs_per_pos 58.0 +322 6 training.batch_size 0.0 +322 7 model.embedding_dim 2.0 +322 7 loss.margin 6.8197493021944 +322 7 optimizer.lr 0.004959446088124088 +322 7 negative_sampler.num_negs_per_pos 10.0 +322 7 training.batch_size 2.0 +322 8 model.embedding_dim 2.0 +322 8 loss.margin 3.9249521001597443 +322 8 optimizer.lr 0.0020941691553451294 +322 8 negative_sampler.num_negs_per_pos 26.0 +322 8 training.batch_size 0.0 +322 9 model.embedding_dim 2.0 +322 9 loss.margin 1.487517028862957 +322 9 optimizer.lr 0.0035833205420927384 +322 9 negative_sampler.num_negs_per_pos 72.0 +322 9 training.batch_size 1.0 +322 10 model.embedding_dim 0.0 +322 10 loss.margin 9.859535864955008 +322 10 optimizer.lr 0.07302982125523089 +322 10 negative_sampler.num_negs_per_pos 53.0 +322 10 training.batch_size 0.0 +322 11 model.embedding_dim 0.0 +322 11 loss.margin 0.7889204510929305 +322 11 optimizer.lr 0.002829424814649026 +322 11 negative_sampler.num_negs_per_pos 18.0 +322 11 training.batch_size 1.0 +322 12 model.embedding_dim 1.0 +322 12 loss.margin 2.239443863879851 +322 12 optimizer.lr 0.02141158988942337 +322 12 negative_sampler.num_negs_per_pos 0.0 +322 12 training.batch_size 1.0 +322 13 model.embedding_dim 2.0 +322 13 loss.margin 9.65691815560157 +322 13 optimizer.lr 0.06536339961320799 +322 13 negative_sampler.num_negs_per_pos 0.0 +322 13 training.batch_size 2.0 +322 14 model.embedding_dim 1.0 +322 14 loss.margin 4.495392132907204 +322 14 optimizer.lr 0.0018194125057233662 +322 14 negative_sampler.num_negs_per_pos 43.0 +322 14 training.batch_size 2.0 +322 15 model.embedding_dim 1.0 +322 15 loss.margin 2.8689656467560622 +322 15 optimizer.lr 0.0624020546574854 +322 15 negative_sampler.num_negs_per_pos 7.0 +322 15 training.batch_size 1.0 +322 16 model.embedding_dim 1.0 +322 16 loss.margin 5.520429822307001 +322 16 optimizer.lr 0.019054054644346365 +322 16 negative_sampler.num_negs_per_pos 43.0 +322 16 training.batch_size 2.0 +322 17 model.embedding_dim 1.0 +322 17 loss.margin 5.740962042104138 +322 17 optimizer.lr 0.06418091191131574 +322 17 negative_sampler.num_negs_per_pos 81.0 +322 17 training.batch_size 0.0 +322 18 model.embedding_dim 0.0 +322 18 loss.margin 1.8595311608270886 +322 18 optimizer.lr 0.0011554032445146524 +322 18 negative_sampler.num_negs_per_pos 44.0 +322 18 training.batch_size 2.0 +322 19 model.embedding_dim 1.0 +322 19 loss.margin 1.4191141401598844 +322 19 optimizer.lr 0.006853808912409369 +322 19 negative_sampler.num_negs_per_pos 88.0 +322 19 training.batch_size 2.0 +322 20 model.embedding_dim 2.0 +322 20 loss.margin 5.651746942392268 +322 20 optimizer.lr 0.025781846847573245 +322 20 negative_sampler.num_negs_per_pos 80.0 +322 20 training.batch_size 2.0 +322 21 model.embedding_dim 2.0 +322 21 loss.margin 8.338928965640171 +322 21 optimizer.lr 0.03225888394594041 +322 21 negative_sampler.num_negs_per_pos 53.0 +322 21 training.batch_size 2.0 +322 22 model.embedding_dim 1.0 +322 22 loss.margin 6.499172104315298 +322 22 optimizer.lr 0.040081338303781223 +322 22 negative_sampler.num_negs_per_pos 97.0 +322 22 training.batch_size 2.0 +322 23 model.embedding_dim 2.0 +322 23 loss.margin 1.3726900755847469 +322 23 optimizer.lr 0.08129953674752657 +322 23 negative_sampler.num_negs_per_pos 67.0 +322 23 training.batch_size 0.0 +322 24 model.embedding_dim 1.0 +322 24 loss.margin 8.461075788696974 +322 24 optimizer.lr 0.0011682749094543167 +322 24 negative_sampler.num_negs_per_pos 73.0 +322 24 training.batch_size 2.0 +322 25 model.embedding_dim 0.0 +322 25 loss.margin 5.193151254329321 +322 25 optimizer.lr 0.03465265515219201 +322 25 negative_sampler.num_negs_per_pos 96.0 +322 25 training.batch_size 2.0 +322 26 model.embedding_dim 2.0 +322 26 loss.margin 6.004167821191798 +322 26 optimizer.lr 0.005178131896087755 +322 26 negative_sampler.num_negs_per_pos 74.0 +322 26 training.batch_size 1.0 +322 27 model.embedding_dim 0.0 +322 27 loss.margin 2.8962363831755242 +322 27 optimizer.lr 0.040511226033572136 +322 27 negative_sampler.num_negs_per_pos 59.0 +322 27 training.batch_size 1.0 +322 28 model.embedding_dim 2.0 +322 28 loss.margin 6.29460915795418 +322 28 optimizer.lr 0.04557675030510165 +322 28 negative_sampler.num_negs_per_pos 88.0 +322 28 training.batch_size 2.0 +322 29 model.embedding_dim 0.0 +322 29 loss.margin 8.810152399697078 +322 29 optimizer.lr 0.008596293881629349 +322 29 negative_sampler.num_negs_per_pos 81.0 +322 29 training.batch_size 0.0 +322 30 model.embedding_dim 0.0 +322 30 loss.margin 9.7161138475793 +322 30 optimizer.lr 0.00904087309102136 +322 30 negative_sampler.num_negs_per_pos 92.0 +322 30 training.batch_size 0.0 +322 31 model.embedding_dim 0.0 +322 31 loss.margin 8.058842656061294 +322 31 optimizer.lr 0.016695877092187495 +322 31 negative_sampler.num_negs_per_pos 10.0 +322 31 training.batch_size 2.0 +322 32 model.embedding_dim 2.0 +322 32 loss.margin 9.758998850202277 +322 32 optimizer.lr 0.004333353487748389 +322 32 negative_sampler.num_negs_per_pos 35.0 +322 32 training.batch_size 2.0 +322 33 model.embedding_dim 1.0 +322 33 loss.margin 4.955585287166624 +322 33 optimizer.lr 0.0011649149369875743 +322 33 negative_sampler.num_negs_per_pos 4.0 +322 33 training.batch_size 2.0 +322 34 model.embedding_dim 1.0 +322 34 loss.margin 3.634907971278676 +322 34 optimizer.lr 0.09090417420568427 +322 34 negative_sampler.num_negs_per_pos 82.0 +322 34 training.batch_size 0.0 +322 35 model.embedding_dim 2.0 +322 35 loss.margin 9.022439576824894 +322 35 optimizer.lr 0.00277428444160494 +322 35 negative_sampler.num_negs_per_pos 96.0 +322 35 training.batch_size 2.0 +322 36 model.embedding_dim 1.0 +322 36 loss.margin 8.593071696989027 +322 36 optimizer.lr 0.0018655068338784043 +322 36 negative_sampler.num_negs_per_pos 65.0 +322 36 training.batch_size 1.0 +322 37 model.embedding_dim 0.0 +322 37 loss.margin 8.928553434319818 +322 37 optimizer.lr 0.07517723190328679 +322 37 negative_sampler.num_negs_per_pos 75.0 +322 37 training.batch_size 0.0 +322 38 model.embedding_dim 0.0 +322 38 loss.margin 4.928722951668835 +322 38 optimizer.lr 0.0014925931092714686 +322 38 negative_sampler.num_negs_per_pos 76.0 +322 38 training.batch_size 1.0 +322 39 model.embedding_dim 2.0 +322 39 loss.margin 6.276941852126326 +322 39 optimizer.lr 0.0027900815690265605 +322 39 negative_sampler.num_negs_per_pos 63.0 +322 39 training.batch_size 1.0 +322 40 model.embedding_dim 2.0 +322 40 loss.margin 7.694485715562939 +322 40 optimizer.lr 0.005457645529250847 +322 40 negative_sampler.num_negs_per_pos 53.0 +322 40 training.batch_size 1.0 +322 41 model.embedding_dim 2.0 +322 41 loss.margin 8.85892217231406 +322 41 optimizer.lr 0.0013259791485328606 +322 41 negative_sampler.num_negs_per_pos 95.0 +322 41 training.batch_size 1.0 +322 42 model.embedding_dim 2.0 +322 42 loss.margin 6.60970969666579 +322 42 optimizer.lr 0.05216705625856393 +322 42 negative_sampler.num_negs_per_pos 69.0 +322 42 training.batch_size 2.0 +322 43 model.embedding_dim 1.0 +322 43 loss.margin 9.880327484643482 +322 43 optimizer.lr 0.0023193246163607647 +322 43 negative_sampler.num_negs_per_pos 87.0 +322 43 training.batch_size 2.0 +322 44 model.embedding_dim 0.0 +322 44 loss.margin 6.321225859504986 +322 44 optimizer.lr 0.010594166774379314 +322 44 negative_sampler.num_negs_per_pos 57.0 +322 44 training.batch_size 0.0 +322 45 model.embedding_dim 1.0 +322 45 loss.margin 0.9624921565221526 +322 45 optimizer.lr 0.09278083167493677 +322 45 negative_sampler.num_negs_per_pos 27.0 +322 45 training.batch_size 0.0 +322 46 model.embedding_dim 1.0 +322 46 loss.margin 0.9376790757878106 +322 46 optimizer.lr 0.011309513345679768 +322 46 negative_sampler.num_negs_per_pos 2.0 +322 46 training.batch_size 0.0 +322 47 model.embedding_dim 0.0 +322 47 loss.margin 9.769773160427647 +322 47 optimizer.lr 0.005713076773873417 +322 47 negative_sampler.num_negs_per_pos 25.0 +322 47 training.batch_size 2.0 +322 48 model.embedding_dim 1.0 +322 48 loss.margin 1.4179535082197379 +322 48 optimizer.lr 0.08960318108643342 +322 48 negative_sampler.num_negs_per_pos 95.0 +322 48 training.batch_size 1.0 +322 49 model.embedding_dim 0.0 +322 49 loss.margin 0.9797552282548216 +322 49 optimizer.lr 0.0020287110374496192 +322 49 negative_sampler.num_negs_per_pos 75.0 +322 49 training.batch_size 1.0 +322 50 model.embedding_dim 2.0 +322 50 loss.margin 6.617155133869458 +322 50 optimizer.lr 0.0019447980137322803 +322 50 negative_sampler.num_negs_per_pos 91.0 +322 50 training.batch_size 1.0 +322 51 model.embedding_dim 2.0 +322 51 loss.margin 5.641257521901077 +322 51 optimizer.lr 0.010897361798914875 +322 51 negative_sampler.num_negs_per_pos 21.0 +322 51 training.batch_size 1.0 +322 52 model.embedding_dim 1.0 +322 52 loss.margin 2.5825934630007255 +322 52 optimizer.lr 0.035273436594310745 +322 52 negative_sampler.num_negs_per_pos 38.0 +322 52 training.batch_size 2.0 +322 53 model.embedding_dim 0.0 +322 53 loss.margin 3.5977625679282763 +322 53 optimizer.lr 0.0021131011535153493 +322 53 negative_sampler.num_negs_per_pos 87.0 +322 53 training.batch_size 0.0 +322 54 model.embedding_dim 0.0 +322 54 loss.margin 2.7594631376250147 +322 54 optimizer.lr 0.002791606721475015 +322 54 negative_sampler.num_negs_per_pos 37.0 +322 54 training.batch_size 1.0 +322 55 model.embedding_dim 0.0 +322 55 loss.margin 9.156219828119024 +322 55 optimizer.lr 0.028479965051795873 +322 55 negative_sampler.num_negs_per_pos 50.0 +322 55 training.batch_size 2.0 +322 56 model.embedding_dim 0.0 +322 56 loss.margin 9.44698938992913 +322 56 optimizer.lr 0.001869497842939085 +322 56 negative_sampler.num_negs_per_pos 70.0 +322 56 training.batch_size 0.0 +322 57 model.embedding_dim 2.0 +322 57 loss.margin 8.877358652305102 +322 57 optimizer.lr 0.003945321618476923 +322 57 negative_sampler.num_negs_per_pos 64.0 +322 57 training.batch_size 2.0 +322 58 model.embedding_dim 1.0 +322 58 loss.margin 9.302459966724706 +322 58 optimizer.lr 0.0026873917428445193 +322 58 negative_sampler.num_negs_per_pos 0.0 +322 58 training.batch_size 1.0 +322 59 model.embedding_dim 2.0 +322 59 loss.margin 9.626321161582533 +322 59 optimizer.lr 0.003309981709400045 +322 59 negative_sampler.num_negs_per_pos 38.0 +322 59 training.batch_size 0.0 +322 60 model.embedding_dim 1.0 +322 60 loss.margin 7.013795403195362 +322 60 optimizer.lr 0.03962242906896504 +322 60 negative_sampler.num_negs_per_pos 32.0 +322 60 training.batch_size 2.0 +322 61 model.embedding_dim 0.0 +322 61 loss.margin 2.3093247436412647 +322 61 optimizer.lr 0.003923011360455001 +322 61 negative_sampler.num_negs_per_pos 20.0 +322 61 training.batch_size 1.0 +322 62 model.embedding_dim 0.0 +322 62 loss.margin 1.2445400037500733 +322 62 optimizer.lr 0.05861884352926236 +322 62 negative_sampler.num_negs_per_pos 4.0 +322 62 training.batch_size 1.0 +322 63 model.embedding_dim 1.0 +322 63 loss.margin 0.5315226977305585 +322 63 optimizer.lr 0.01636976051286965 +322 63 negative_sampler.num_negs_per_pos 99.0 +322 63 training.batch_size 2.0 +322 64 model.embedding_dim 2.0 +322 64 loss.margin 2.9445973126361635 +322 64 optimizer.lr 0.007078286543807294 +322 64 negative_sampler.num_negs_per_pos 13.0 +322 64 training.batch_size 2.0 +322 65 model.embedding_dim 0.0 +322 65 loss.margin 5.500258452414163 +322 65 optimizer.lr 0.008144862903374052 +322 65 negative_sampler.num_negs_per_pos 87.0 +322 65 training.batch_size 0.0 +322 66 model.embedding_dim 2.0 +322 66 loss.margin 3.903557977407205 +322 66 optimizer.lr 0.002192378129234608 +322 66 negative_sampler.num_negs_per_pos 51.0 +322 66 training.batch_size 1.0 +322 67 model.embedding_dim 2.0 +322 67 loss.margin 3.1176282182571153 +322 67 optimizer.lr 0.03880433746295237 +322 67 negative_sampler.num_negs_per_pos 14.0 +322 67 training.batch_size 0.0 +322 68 model.embedding_dim 1.0 +322 68 loss.margin 5.638817731128771 +322 68 optimizer.lr 0.0034890426426877376 +322 68 negative_sampler.num_negs_per_pos 23.0 +322 68 training.batch_size 0.0 +322 69 model.embedding_dim 0.0 +322 69 loss.margin 7.189140280553737 +322 69 optimizer.lr 0.005509173171628343 +322 69 negative_sampler.num_negs_per_pos 59.0 +322 69 training.batch_size 1.0 +322 70 model.embedding_dim 2.0 +322 70 loss.margin 6.2121507309757495 +322 70 optimizer.lr 0.002811037284534815 +322 70 negative_sampler.num_negs_per_pos 30.0 +322 70 training.batch_size 0.0 +322 71 model.embedding_dim 0.0 +322 71 loss.margin 8.023345003469647 +322 71 optimizer.lr 0.0021120121132579394 +322 71 negative_sampler.num_negs_per_pos 67.0 +322 71 training.batch_size 0.0 +322 72 model.embedding_dim 0.0 +322 72 loss.margin 1.8000223738295666 +322 72 optimizer.lr 0.007023772402350412 +322 72 negative_sampler.num_negs_per_pos 58.0 +322 72 training.batch_size 2.0 +322 73 model.embedding_dim 0.0 +322 73 loss.margin 4.668616407342584 +322 73 optimizer.lr 0.0862533137619232 +322 73 negative_sampler.num_negs_per_pos 70.0 +322 73 training.batch_size 0.0 +322 74 model.embedding_dim 2.0 +322 74 loss.margin 0.5302487499495095 +322 74 optimizer.lr 0.08332819422570184 +322 74 negative_sampler.num_negs_per_pos 52.0 +322 74 training.batch_size 0.0 +322 75 model.embedding_dim 0.0 +322 75 loss.margin 7.195049505628785 +322 75 optimizer.lr 0.00496945760161315 +322 75 negative_sampler.num_negs_per_pos 22.0 +322 75 training.batch_size 2.0 +322 76 model.embedding_dim 2.0 +322 76 loss.margin 1.0494911323916694 +322 76 optimizer.lr 0.05142662830673179 +322 76 negative_sampler.num_negs_per_pos 66.0 +322 76 training.batch_size 1.0 +322 77 model.embedding_dim 0.0 +322 77 loss.margin 1.589441340676917 +322 77 optimizer.lr 0.001051551884722503 +322 77 negative_sampler.num_negs_per_pos 79.0 +322 77 training.batch_size 1.0 +322 78 model.embedding_dim 0.0 +322 78 loss.margin 2.941155670081237 +322 78 optimizer.lr 0.00253053281000007 +322 78 negative_sampler.num_negs_per_pos 62.0 +322 78 training.batch_size 0.0 +322 79 model.embedding_dim 2.0 +322 79 loss.margin 7.749213219658964 +322 79 optimizer.lr 0.003013912854241621 +322 79 negative_sampler.num_negs_per_pos 40.0 +322 79 training.batch_size 0.0 +322 80 model.embedding_dim 0.0 +322 80 loss.margin 4.667624680615518 +322 80 optimizer.lr 0.002502553176527628 +322 80 negative_sampler.num_negs_per_pos 1.0 +322 80 training.batch_size 0.0 +322 81 model.embedding_dim 0.0 +322 81 loss.margin 8.586227354505773 +322 81 optimizer.lr 0.0038037947963036082 +322 81 negative_sampler.num_negs_per_pos 21.0 +322 81 training.batch_size 2.0 +322 82 model.embedding_dim 2.0 +322 82 loss.margin 1.7086585520709576 +322 82 optimizer.lr 0.07323717820903189 +322 82 negative_sampler.num_negs_per_pos 77.0 +322 82 training.batch_size 2.0 +322 83 model.embedding_dim 0.0 +322 83 loss.margin 7.915856180853845 +322 83 optimizer.lr 0.006892179715927729 +322 83 negative_sampler.num_negs_per_pos 91.0 +322 83 training.batch_size 1.0 +322 84 model.embedding_dim 0.0 +322 84 loss.margin 4.284875089790138 +322 84 optimizer.lr 0.019888072421431507 +322 84 negative_sampler.num_negs_per_pos 73.0 +322 84 training.batch_size 2.0 +322 85 model.embedding_dim 0.0 +322 85 loss.margin 3.9200052096381612 +322 85 optimizer.lr 0.0022824958004582785 +322 85 negative_sampler.num_negs_per_pos 90.0 +322 85 training.batch_size 2.0 +322 86 model.embedding_dim 1.0 +322 86 loss.margin 4.8080897899801425 +322 86 optimizer.lr 0.00865701270040236 +322 86 negative_sampler.num_negs_per_pos 40.0 +322 86 training.batch_size 1.0 +322 87 model.embedding_dim 2.0 +322 87 loss.margin 1.0301103746537794 +322 87 optimizer.lr 0.003830108241301436 +322 87 negative_sampler.num_negs_per_pos 15.0 +322 87 training.batch_size 2.0 +322 88 model.embedding_dim 0.0 +322 88 loss.margin 8.480449165017362 +322 88 optimizer.lr 0.017143807520488757 +322 88 negative_sampler.num_negs_per_pos 58.0 +322 88 training.batch_size 0.0 +322 89 model.embedding_dim 2.0 +322 89 loss.margin 4.815260145313054 +322 89 optimizer.lr 0.05735672404383741 +322 89 negative_sampler.num_negs_per_pos 69.0 +322 89 training.batch_size 1.0 +322 90 model.embedding_dim 0.0 +322 90 loss.margin 5.921074555213858 +322 90 optimizer.lr 0.0016302465923029505 +322 90 negative_sampler.num_negs_per_pos 10.0 +322 90 training.batch_size 2.0 +322 91 model.embedding_dim 0.0 +322 91 loss.margin 7.410608110376637 +322 91 optimizer.lr 0.032412729736077166 +322 91 negative_sampler.num_negs_per_pos 74.0 +322 91 training.batch_size 1.0 +322 92 model.embedding_dim 1.0 +322 92 loss.margin 9.327501085214896 +322 92 optimizer.lr 0.005344782993849434 +322 92 negative_sampler.num_negs_per_pos 49.0 +322 92 training.batch_size 2.0 +322 93 model.embedding_dim 2.0 +322 93 loss.margin 2.2212027158322605 +322 93 optimizer.lr 0.034930379779141214 +322 93 negative_sampler.num_negs_per_pos 69.0 +322 93 training.batch_size 1.0 +322 94 model.embedding_dim 2.0 +322 94 loss.margin 4.679550045498879 +322 94 optimizer.lr 0.007504346312016438 +322 94 negative_sampler.num_negs_per_pos 10.0 +322 94 training.batch_size 2.0 +322 95 model.embedding_dim 0.0 +322 95 loss.margin 3.162948375709773 +322 95 optimizer.lr 0.020922811664405162 +322 95 negative_sampler.num_negs_per_pos 73.0 +322 95 training.batch_size 2.0 +322 96 model.embedding_dim 1.0 +322 96 loss.margin 1.9474769911741532 +322 96 optimizer.lr 0.08044693271934376 +322 96 negative_sampler.num_negs_per_pos 39.0 +322 96 training.batch_size 2.0 +322 97 model.embedding_dim 0.0 +322 97 loss.margin 4.435120996528909 +322 97 optimizer.lr 0.004459015341836763 +322 97 negative_sampler.num_negs_per_pos 75.0 +322 97 training.batch_size 2.0 +322 98 model.embedding_dim 0.0 +322 98 loss.margin 9.964233268858967 +322 98 optimizer.lr 0.001277307157354542 +322 98 negative_sampler.num_negs_per_pos 70.0 +322 98 training.batch_size 2.0 +322 99 model.embedding_dim 2.0 +322 99 loss.margin 1.0642173036402982 +322 99 optimizer.lr 0.03932712459209568 +322 99 negative_sampler.num_negs_per_pos 71.0 +322 99 training.batch_size 1.0 +322 100 model.embedding_dim 2.0 +322 100 loss.margin 8.218529635705892 +322 100 optimizer.lr 0.002674120737285562 +322 100 negative_sampler.num_negs_per_pos 81.0 +322 100 training.batch_size 0.0 +322 1 dataset """kinships""" +322 1 model """hole""" +322 1 loss """marginranking""" +322 1 regularizer """no""" +322 1 optimizer """adam""" +322 1 training_loop """owa""" +322 1 negative_sampler """basic""" +322 1 evaluator """rankbased""" +322 2 dataset """kinships""" +322 2 model """hole""" +322 2 loss """marginranking""" +322 2 regularizer """no""" +322 2 optimizer """adam""" +322 2 training_loop """owa""" +322 2 negative_sampler """basic""" +322 2 evaluator """rankbased""" +322 3 dataset """kinships""" +322 3 model """hole""" +322 3 loss """marginranking""" +322 3 regularizer """no""" +322 3 optimizer """adam""" +322 3 training_loop """owa""" +322 3 negative_sampler """basic""" +322 3 evaluator """rankbased""" +322 4 dataset """kinships""" +322 4 model """hole""" +322 4 loss """marginranking""" +322 4 regularizer """no""" +322 4 optimizer """adam""" +322 4 training_loop """owa""" +322 4 negative_sampler """basic""" +322 4 evaluator """rankbased""" +322 5 dataset """kinships""" +322 5 model """hole""" +322 5 loss """marginranking""" +322 5 regularizer """no""" +322 5 optimizer """adam""" +322 5 training_loop """owa""" +322 5 negative_sampler """basic""" +322 5 evaluator """rankbased""" +322 6 dataset """kinships""" +322 6 model """hole""" +322 6 loss """marginranking""" +322 6 regularizer """no""" +322 6 optimizer """adam""" +322 6 training_loop """owa""" +322 6 negative_sampler """basic""" +322 6 evaluator """rankbased""" +322 7 dataset """kinships""" +322 7 model """hole""" +322 7 loss """marginranking""" +322 7 regularizer """no""" +322 7 optimizer """adam""" +322 7 training_loop """owa""" +322 7 negative_sampler """basic""" +322 7 evaluator """rankbased""" +322 8 dataset """kinships""" +322 8 model """hole""" +322 8 loss """marginranking""" +322 8 regularizer """no""" +322 8 optimizer """adam""" +322 8 training_loop """owa""" +322 8 negative_sampler """basic""" +322 8 evaluator """rankbased""" +322 9 dataset """kinships""" +322 9 model """hole""" +322 9 loss """marginranking""" +322 9 regularizer """no""" +322 9 optimizer """adam""" +322 9 training_loop """owa""" +322 9 negative_sampler """basic""" +322 9 evaluator """rankbased""" +322 10 dataset """kinships""" +322 10 model """hole""" +322 10 loss """marginranking""" +322 10 regularizer """no""" +322 10 optimizer """adam""" +322 10 training_loop """owa""" +322 10 negative_sampler """basic""" +322 10 evaluator """rankbased""" +322 11 dataset """kinships""" +322 11 model """hole""" +322 11 loss """marginranking""" +322 11 regularizer """no""" +322 11 optimizer """adam""" +322 11 training_loop """owa""" +322 11 negative_sampler """basic""" +322 11 evaluator """rankbased""" +322 12 dataset """kinships""" +322 12 model """hole""" +322 12 loss """marginranking""" +322 12 regularizer """no""" +322 12 optimizer """adam""" +322 12 training_loop """owa""" +322 12 negative_sampler """basic""" +322 12 evaluator """rankbased""" +322 13 dataset """kinships""" +322 13 model """hole""" +322 13 loss """marginranking""" +322 13 regularizer """no""" +322 13 optimizer """adam""" +322 13 training_loop """owa""" +322 13 negative_sampler """basic""" +322 13 evaluator """rankbased""" +322 14 dataset """kinships""" +322 14 model """hole""" +322 14 loss """marginranking""" +322 14 regularizer """no""" +322 14 optimizer """adam""" +322 14 training_loop """owa""" +322 14 negative_sampler """basic""" +322 14 evaluator """rankbased""" +322 15 dataset """kinships""" +322 15 model """hole""" +322 15 loss """marginranking""" +322 15 regularizer """no""" +322 15 optimizer """adam""" +322 15 training_loop """owa""" +322 15 negative_sampler """basic""" +322 15 evaluator """rankbased""" +322 16 dataset """kinships""" +322 16 model """hole""" +322 16 loss """marginranking""" +322 16 regularizer """no""" +322 16 optimizer """adam""" +322 16 training_loop """owa""" +322 16 negative_sampler """basic""" +322 16 evaluator """rankbased""" +322 17 dataset """kinships""" +322 17 model """hole""" +322 17 loss """marginranking""" +322 17 regularizer """no""" +322 17 optimizer """adam""" +322 17 training_loop """owa""" +322 17 negative_sampler """basic""" +322 17 evaluator """rankbased""" +322 18 dataset """kinships""" +322 18 model """hole""" +322 18 loss """marginranking""" +322 18 regularizer """no""" +322 18 optimizer """adam""" +322 18 training_loop """owa""" +322 18 negative_sampler """basic""" +322 18 evaluator """rankbased""" +322 19 dataset """kinships""" +322 19 model """hole""" +322 19 loss """marginranking""" +322 19 regularizer """no""" +322 19 optimizer """adam""" +322 19 training_loop """owa""" +322 19 negative_sampler """basic""" +322 19 evaluator """rankbased""" +322 20 dataset """kinships""" +322 20 model """hole""" +322 20 loss """marginranking""" +322 20 regularizer """no""" +322 20 optimizer """adam""" +322 20 training_loop """owa""" +322 20 negative_sampler """basic""" +322 20 evaluator """rankbased""" +322 21 dataset """kinships""" +322 21 model """hole""" +322 21 loss """marginranking""" +322 21 regularizer """no""" +322 21 optimizer """adam""" +322 21 training_loop """owa""" +322 21 negative_sampler """basic""" +322 21 evaluator """rankbased""" +322 22 dataset """kinships""" +322 22 model """hole""" +322 22 loss """marginranking""" +322 22 regularizer """no""" +322 22 optimizer """adam""" +322 22 training_loop """owa""" +322 22 negative_sampler """basic""" +322 22 evaluator """rankbased""" +322 23 dataset """kinships""" +322 23 model """hole""" +322 23 loss """marginranking""" +322 23 regularizer """no""" +322 23 optimizer """adam""" +322 23 training_loop """owa""" +322 23 negative_sampler """basic""" +322 23 evaluator """rankbased""" +322 24 dataset """kinships""" +322 24 model """hole""" +322 24 loss """marginranking""" +322 24 regularizer """no""" +322 24 optimizer """adam""" +322 24 training_loop """owa""" +322 24 negative_sampler """basic""" +322 24 evaluator """rankbased""" +322 25 dataset """kinships""" +322 25 model """hole""" +322 25 loss """marginranking""" +322 25 regularizer """no""" +322 25 optimizer """adam""" +322 25 training_loop """owa""" +322 25 negative_sampler """basic""" +322 25 evaluator """rankbased""" +322 26 dataset """kinships""" +322 26 model """hole""" +322 26 loss """marginranking""" +322 26 regularizer """no""" +322 26 optimizer """adam""" +322 26 training_loop """owa""" +322 26 negative_sampler """basic""" +322 26 evaluator """rankbased""" +322 27 dataset """kinships""" +322 27 model """hole""" +322 27 loss """marginranking""" +322 27 regularizer """no""" +322 27 optimizer """adam""" +322 27 training_loop """owa""" +322 27 negative_sampler """basic""" +322 27 evaluator """rankbased""" +322 28 dataset """kinships""" +322 28 model """hole""" +322 28 loss """marginranking""" +322 28 regularizer """no""" +322 28 optimizer """adam""" +322 28 training_loop """owa""" +322 28 negative_sampler """basic""" +322 28 evaluator """rankbased""" +322 29 dataset """kinships""" +322 29 model """hole""" +322 29 loss """marginranking""" +322 29 regularizer """no""" +322 29 optimizer """adam""" +322 29 training_loop """owa""" +322 29 negative_sampler """basic""" +322 29 evaluator """rankbased""" +322 30 dataset """kinships""" +322 30 model """hole""" +322 30 loss """marginranking""" +322 30 regularizer """no""" +322 30 optimizer """adam""" +322 30 training_loop """owa""" +322 30 negative_sampler """basic""" +322 30 evaluator """rankbased""" +322 31 dataset """kinships""" +322 31 model """hole""" +322 31 loss """marginranking""" +322 31 regularizer """no""" +322 31 optimizer """adam""" +322 31 training_loop """owa""" +322 31 negative_sampler """basic""" +322 31 evaluator """rankbased""" +322 32 dataset """kinships""" +322 32 model """hole""" +322 32 loss """marginranking""" +322 32 regularizer """no""" +322 32 optimizer """adam""" +322 32 training_loop """owa""" +322 32 negative_sampler """basic""" +322 32 evaluator """rankbased""" +322 33 dataset """kinships""" +322 33 model """hole""" +322 33 loss """marginranking""" +322 33 regularizer """no""" +322 33 optimizer """adam""" +322 33 training_loop """owa""" +322 33 negative_sampler """basic""" +322 33 evaluator """rankbased""" +322 34 dataset """kinships""" +322 34 model """hole""" +322 34 loss """marginranking""" +322 34 regularizer """no""" +322 34 optimizer """adam""" +322 34 training_loop """owa""" +322 34 negative_sampler """basic""" +322 34 evaluator """rankbased""" +322 35 dataset """kinships""" +322 35 model """hole""" +322 35 loss """marginranking""" +322 35 regularizer """no""" +322 35 optimizer """adam""" +322 35 training_loop """owa""" +322 35 negative_sampler """basic""" +322 35 evaluator """rankbased""" +322 36 dataset """kinships""" +322 36 model """hole""" +322 36 loss """marginranking""" +322 36 regularizer """no""" +322 36 optimizer """adam""" +322 36 training_loop """owa""" +322 36 negative_sampler """basic""" +322 36 evaluator """rankbased""" +322 37 dataset """kinships""" +322 37 model """hole""" +322 37 loss """marginranking""" +322 37 regularizer """no""" +322 37 optimizer """adam""" +322 37 training_loop """owa""" +322 37 negative_sampler """basic""" +322 37 evaluator """rankbased""" +322 38 dataset """kinships""" +322 38 model """hole""" +322 38 loss """marginranking""" +322 38 regularizer """no""" +322 38 optimizer """adam""" +322 38 training_loop """owa""" +322 38 negative_sampler """basic""" +322 38 evaluator """rankbased""" +322 39 dataset """kinships""" +322 39 model """hole""" +322 39 loss """marginranking""" +322 39 regularizer """no""" +322 39 optimizer """adam""" +322 39 training_loop """owa""" +322 39 negative_sampler """basic""" +322 39 evaluator """rankbased""" +322 40 dataset """kinships""" +322 40 model """hole""" +322 40 loss """marginranking""" +322 40 regularizer """no""" +322 40 optimizer """adam""" +322 40 training_loop """owa""" +322 40 negative_sampler """basic""" +322 40 evaluator """rankbased""" +322 41 dataset """kinships""" +322 41 model """hole""" +322 41 loss """marginranking""" +322 41 regularizer """no""" +322 41 optimizer """adam""" +322 41 training_loop """owa""" +322 41 negative_sampler """basic""" +322 41 evaluator """rankbased""" +322 42 dataset """kinships""" +322 42 model """hole""" +322 42 loss """marginranking""" +322 42 regularizer """no""" +322 42 optimizer """adam""" +322 42 training_loop """owa""" +322 42 negative_sampler """basic""" +322 42 evaluator """rankbased""" +322 43 dataset """kinships""" +322 43 model """hole""" +322 43 loss """marginranking""" +322 43 regularizer """no""" +322 43 optimizer """adam""" +322 43 training_loop """owa""" +322 43 negative_sampler """basic""" +322 43 evaluator """rankbased""" +322 44 dataset """kinships""" +322 44 model """hole""" +322 44 loss """marginranking""" +322 44 regularizer """no""" +322 44 optimizer """adam""" +322 44 training_loop """owa""" +322 44 negative_sampler """basic""" +322 44 evaluator """rankbased""" +322 45 dataset """kinships""" +322 45 model """hole""" +322 45 loss """marginranking""" +322 45 regularizer """no""" +322 45 optimizer """adam""" +322 45 training_loop """owa""" +322 45 negative_sampler """basic""" +322 45 evaluator """rankbased""" +322 46 dataset """kinships""" +322 46 model """hole""" +322 46 loss """marginranking""" +322 46 regularizer """no""" +322 46 optimizer """adam""" +322 46 training_loop """owa""" +322 46 negative_sampler """basic""" +322 46 evaluator """rankbased""" +322 47 dataset """kinships""" +322 47 model """hole""" +322 47 loss """marginranking""" +322 47 regularizer """no""" +322 47 optimizer """adam""" +322 47 training_loop """owa""" +322 47 negative_sampler """basic""" +322 47 evaluator """rankbased""" +322 48 dataset """kinships""" +322 48 model """hole""" +322 48 loss """marginranking""" +322 48 regularizer """no""" +322 48 optimizer """adam""" +322 48 training_loop """owa""" +322 48 negative_sampler """basic""" +322 48 evaluator """rankbased""" +322 49 dataset """kinships""" +322 49 model """hole""" +322 49 loss """marginranking""" +322 49 regularizer """no""" +322 49 optimizer """adam""" +322 49 training_loop """owa""" +322 49 negative_sampler """basic""" +322 49 evaluator """rankbased""" +322 50 dataset """kinships""" +322 50 model """hole""" +322 50 loss """marginranking""" +322 50 regularizer """no""" +322 50 optimizer """adam""" +322 50 training_loop """owa""" +322 50 negative_sampler """basic""" +322 50 evaluator """rankbased""" +322 51 dataset """kinships""" +322 51 model """hole""" +322 51 loss """marginranking""" +322 51 regularizer """no""" +322 51 optimizer """adam""" +322 51 training_loop """owa""" +322 51 negative_sampler """basic""" +322 51 evaluator """rankbased""" +322 52 dataset """kinships""" +322 52 model """hole""" +322 52 loss """marginranking""" +322 52 regularizer """no""" +322 52 optimizer """adam""" +322 52 training_loop """owa""" +322 52 negative_sampler """basic""" +322 52 evaluator """rankbased""" +322 53 dataset """kinships""" +322 53 model """hole""" +322 53 loss """marginranking""" +322 53 regularizer """no""" +322 53 optimizer """adam""" +322 53 training_loop """owa""" +322 53 negative_sampler """basic""" +322 53 evaluator """rankbased""" +322 54 dataset """kinships""" +322 54 model """hole""" +322 54 loss """marginranking""" +322 54 regularizer """no""" +322 54 optimizer """adam""" +322 54 training_loop """owa""" +322 54 negative_sampler """basic""" +322 54 evaluator """rankbased""" +322 55 dataset """kinships""" +322 55 model """hole""" +322 55 loss """marginranking""" +322 55 regularizer """no""" +322 55 optimizer """adam""" +322 55 training_loop """owa""" +322 55 negative_sampler """basic""" +322 55 evaluator """rankbased""" +322 56 dataset """kinships""" +322 56 model """hole""" +322 56 loss """marginranking""" +322 56 regularizer """no""" +322 56 optimizer """adam""" +322 56 training_loop """owa""" +322 56 negative_sampler """basic""" +322 56 evaluator """rankbased""" +322 57 dataset """kinships""" +322 57 model """hole""" +322 57 loss """marginranking""" +322 57 regularizer """no""" +322 57 optimizer """adam""" +322 57 training_loop """owa""" +322 57 negative_sampler """basic""" +322 57 evaluator """rankbased""" +322 58 dataset """kinships""" +322 58 model """hole""" +322 58 loss """marginranking""" +322 58 regularizer """no""" +322 58 optimizer """adam""" +322 58 training_loop """owa""" +322 58 negative_sampler """basic""" +322 58 evaluator """rankbased""" +322 59 dataset """kinships""" +322 59 model """hole""" +322 59 loss """marginranking""" +322 59 regularizer """no""" +322 59 optimizer """adam""" +322 59 training_loop """owa""" +322 59 negative_sampler """basic""" +322 59 evaluator """rankbased""" +322 60 dataset """kinships""" +322 60 model """hole""" +322 60 loss """marginranking""" +322 60 regularizer """no""" +322 60 optimizer """adam""" +322 60 training_loop """owa""" +322 60 negative_sampler """basic""" +322 60 evaluator """rankbased""" +322 61 dataset """kinships""" +322 61 model """hole""" +322 61 loss """marginranking""" +322 61 regularizer """no""" +322 61 optimizer """adam""" +322 61 training_loop """owa""" +322 61 negative_sampler """basic""" +322 61 evaluator """rankbased""" +322 62 dataset """kinships""" +322 62 model """hole""" +322 62 loss """marginranking""" +322 62 regularizer """no""" +322 62 optimizer """adam""" +322 62 training_loop """owa""" +322 62 negative_sampler """basic""" +322 62 evaluator """rankbased""" +322 63 dataset """kinships""" +322 63 model """hole""" +322 63 loss """marginranking""" +322 63 regularizer """no""" +322 63 optimizer """adam""" +322 63 training_loop """owa""" +322 63 negative_sampler """basic""" +322 63 evaluator """rankbased""" +322 64 dataset """kinships""" +322 64 model """hole""" +322 64 loss """marginranking""" +322 64 regularizer """no""" +322 64 optimizer """adam""" +322 64 training_loop """owa""" +322 64 negative_sampler """basic""" +322 64 evaluator """rankbased""" +322 65 dataset """kinships""" +322 65 model """hole""" +322 65 loss """marginranking""" +322 65 regularizer """no""" +322 65 optimizer """adam""" +322 65 training_loop """owa""" +322 65 negative_sampler """basic""" +322 65 evaluator """rankbased""" +322 66 dataset """kinships""" +322 66 model """hole""" +322 66 loss """marginranking""" +322 66 regularizer """no""" +322 66 optimizer """adam""" +322 66 training_loop """owa""" +322 66 negative_sampler """basic""" +322 66 evaluator """rankbased""" +322 67 dataset """kinships""" +322 67 model """hole""" +322 67 loss """marginranking""" +322 67 regularizer """no""" +322 67 optimizer """adam""" +322 67 training_loop """owa""" +322 67 negative_sampler """basic""" +322 67 evaluator """rankbased""" +322 68 dataset """kinships""" +322 68 model """hole""" +322 68 loss """marginranking""" +322 68 regularizer """no""" +322 68 optimizer """adam""" +322 68 training_loop """owa""" +322 68 negative_sampler """basic""" +322 68 evaluator """rankbased""" +322 69 dataset """kinships""" +322 69 model """hole""" +322 69 loss """marginranking""" +322 69 regularizer """no""" +322 69 optimizer """adam""" +322 69 training_loop """owa""" +322 69 negative_sampler """basic""" +322 69 evaluator """rankbased""" +322 70 dataset """kinships""" +322 70 model """hole""" +322 70 loss """marginranking""" +322 70 regularizer """no""" +322 70 optimizer """adam""" +322 70 training_loop """owa""" +322 70 negative_sampler """basic""" +322 70 evaluator """rankbased""" +322 71 dataset """kinships""" +322 71 model """hole""" +322 71 loss """marginranking""" +322 71 regularizer """no""" +322 71 optimizer """adam""" +322 71 training_loop """owa""" +322 71 negative_sampler """basic""" +322 71 evaluator """rankbased""" +322 72 dataset """kinships""" +322 72 model """hole""" +322 72 loss """marginranking""" +322 72 regularizer """no""" +322 72 optimizer """adam""" +322 72 training_loop """owa""" +322 72 negative_sampler """basic""" +322 72 evaluator """rankbased""" +322 73 dataset """kinships""" +322 73 model """hole""" +322 73 loss """marginranking""" +322 73 regularizer """no""" +322 73 optimizer """adam""" +322 73 training_loop """owa""" +322 73 negative_sampler """basic""" +322 73 evaluator """rankbased""" +322 74 dataset """kinships""" +322 74 model """hole""" +322 74 loss """marginranking""" +322 74 regularizer """no""" +322 74 optimizer """adam""" +322 74 training_loop """owa""" +322 74 negative_sampler """basic""" +322 74 evaluator """rankbased""" +322 75 dataset """kinships""" +322 75 model """hole""" +322 75 loss """marginranking""" +322 75 regularizer """no""" +322 75 optimizer """adam""" +322 75 training_loop """owa""" +322 75 negative_sampler """basic""" +322 75 evaluator """rankbased""" +322 76 dataset """kinships""" +322 76 model """hole""" +322 76 loss """marginranking""" +322 76 regularizer """no""" +322 76 optimizer """adam""" +322 76 training_loop """owa""" +322 76 negative_sampler """basic""" +322 76 evaluator """rankbased""" +322 77 dataset """kinships""" +322 77 model """hole""" +322 77 loss """marginranking""" +322 77 regularizer """no""" +322 77 optimizer """adam""" +322 77 training_loop """owa""" +322 77 negative_sampler """basic""" +322 77 evaluator """rankbased""" +322 78 dataset """kinships""" +322 78 model """hole""" +322 78 loss """marginranking""" +322 78 regularizer """no""" +322 78 optimizer """adam""" +322 78 training_loop """owa""" +322 78 negative_sampler """basic""" +322 78 evaluator """rankbased""" +322 79 dataset """kinships""" +322 79 model """hole""" +322 79 loss """marginranking""" +322 79 regularizer """no""" +322 79 optimizer """adam""" +322 79 training_loop """owa""" +322 79 negative_sampler """basic""" +322 79 evaluator """rankbased""" +322 80 dataset """kinships""" +322 80 model """hole""" +322 80 loss """marginranking""" +322 80 regularizer """no""" +322 80 optimizer """adam""" +322 80 training_loop """owa""" +322 80 negative_sampler """basic""" +322 80 evaluator """rankbased""" +322 81 dataset """kinships""" +322 81 model """hole""" +322 81 loss """marginranking""" +322 81 regularizer """no""" +322 81 optimizer """adam""" +322 81 training_loop """owa""" +322 81 negative_sampler """basic""" +322 81 evaluator """rankbased""" +322 82 dataset """kinships""" +322 82 model """hole""" +322 82 loss """marginranking""" +322 82 regularizer """no""" +322 82 optimizer """adam""" +322 82 training_loop """owa""" +322 82 negative_sampler """basic""" +322 82 evaluator """rankbased""" +322 83 dataset """kinships""" +322 83 model """hole""" +322 83 loss """marginranking""" +322 83 regularizer """no""" +322 83 optimizer """adam""" +322 83 training_loop """owa""" +322 83 negative_sampler """basic""" +322 83 evaluator """rankbased""" +322 84 dataset """kinships""" +322 84 model """hole""" +322 84 loss """marginranking""" +322 84 regularizer """no""" +322 84 optimizer """adam""" +322 84 training_loop """owa""" +322 84 negative_sampler """basic""" +322 84 evaluator """rankbased""" +322 85 dataset """kinships""" +322 85 model """hole""" +322 85 loss """marginranking""" +322 85 regularizer """no""" +322 85 optimizer """adam""" +322 85 training_loop """owa""" +322 85 negative_sampler """basic""" +322 85 evaluator """rankbased""" +322 86 dataset """kinships""" +322 86 model """hole""" +322 86 loss """marginranking""" +322 86 regularizer """no""" +322 86 optimizer """adam""" +322 86 training_loop """owa""" +322 86 negative_sampler """basic""" +322 86 evaluator """rankbased""" +322 87 dataset """kinships""" +322 87 model """hole""" +322 87 loss """marginranking""" +322 87 regularizer """no""" +322 87 optimizer """adam""" +322 87 training_loop """owa""" +322 87 negative_sampler """basic""" +322 87 evaluator """rankbased""" +322 88 dataset """kinships""" +322 88 model """hole""" +322 88 loss """marginranking""" +322 88 regularizer """no""" +322 88 optimizer """adam""" +322 88 training_loop """owa""" +322 88 negative_sampler """basic""" +322 88 evaluator """rankbased""" +322 89 dataset """kinships""" +322 89 model """hole""" +322 89 loss """marginranking""" +322 89 regularizer """no""" +322 89 optimizer """adam""" +322 89 training_loop """owa""" +322 89 negative_sampler """basic""" +322 89 evaluator """rankbased""" +322 90 dataset """kinships""" +322 90 model """hole""" +322 90 loss """marginranking""" +322 90 regularizer """no""" +322 90 optimizer """adam""" +322 90 training_loop """owa""" +322 90 negative_sampler """basic""" +322 90 evaluator """rankbased""" +322 91 dataset """kinships""" +322 91 model """hole""" +322 91 loss """marginranking""" +322 91 regularizer """no""" +322 91 optimizer """adam""" +322 91 training_loop """owa""" +322 91 negative_sampler """basic""" +322 91 evaluator """rankbased""" +322 92 dataset """kinships""" +322 92 model """hole""" +322 92 loss """marginranking""" +322 92 regularizer """no""" +322 92 optimizer """adam""" +322 92 training_loop """owa""" +322 92 negative_sampler """basic""" +322 92 evaluator """rankbased""" +322 93 dataset """kinships""" +322 93 model """hole""" +322 93 loss """marginranking""" +322 93 regularizer """no""" +322 93 optimizer """adam""" +322 93 training_loop """owa""" +322 93 negative_sampler """basic""" +322 93 evaluator """rankbased""" +322 94 dataset """kinships""" +322 94 model """hole""" +322 94 loss """marginranking""" +322 94 regularizer """no""" +322 94 optimizer """adam""" +322 94 training_loop """owa""" +322 94 negative_sampler """basic""" +322 94 evaluator """rankbased""" +322 95 dataset """kinships""" +322 95 model """hole""" +322 95 loss """marginranking""" +322 95 regularizer """no""" +322 95 optimizer """adam""" +322 95 training_loop """owa""" +322 95 negative_sampler """basic""" +322 95 evaluator """rankbased""" +322 96 dataset """kinships""" +322 96 model """hole""" +322 96 loss """marginranking""" +322 96 regularizer """no""" +322 96 optimizer """adam""" +322 96 training_loop """owa""" +322 96 negative_sampler """basic""" +322 96 evaluator """rankbased""" +322 97 dataset """kinships""" +322 97 model """hole""" +322 97 loss """marginranking""" +322 97 regularizer """no""" +322 97 optimizer """adam""" +322 97 training_loop """owa""" +322 97 negative_sampler """basic""" +322 97 evaluator """rankbased""" +322 98 dataset """kinships""" +322 98 model """hole""" +322 98 loss """marginranking""" +322 98 regularizer """no""" +322 98 optimizer """adam""" +322 98 training_loop """owa""" +322 98 negative_sampler """basic""" +322 98 evaluator """rankbased""" +322 99 dataset """kinships""" +322 99 model """hole""" +322 99 loss """marginranking""" +322 99 regularizer """no""" +322 99 optimizer """adam""" +322 99 training_loop """owa""" +322 99 negative_sampler """basic""" +322 99 evaluator """rankbased""" +322 100 dataset """kinships""" +322 100 model """hole""" +322 100 loss """marginranking""" +322 100 regularizer """no""" +322 100 optimizer """adam""" +322 100 training_loop """owa""" +322 100 negative_sampler """basic""" +322 100 evaluator """rankbased""" +323 1 model.embedding_dim 2.0 +323 1 loss.margin 5.1650829735282136 +323 1 optimizer.lr 0.006129955086826441 +323 1 negative_sampler.num_negs_per_pos 47.0 +323 1 training.batch_size 2.0 +323 2 model.embedding_dim 0.0 +323 2 loss.margin 5.058838851341746 +323 2 optimizer.lr 0.0014896181572794066 +323 2 negative_sampler.num_negs_per_pos 79.0 +323 2 training.batch_size 0.0 +323 3 model.embedding_dim 1.0 +323 3 loss.margin 9.64172118800342 +323 3 optimizer.lr 0.003090457859344287 +323 3 negative_sampler.num_negs_per_pos 67.0 +323 3 training.batch_size 1.0 +323 4 model.embedding_dim 1.0 +323 4 loss.margin 3.32909438776143 +323 4 optimizer.lr 0.01868290801871699 +323 4 negative_sampler.num_negs_per_pos 63.0 +323 4 training.batch_size 0.0 +323 5 model.embedding_dim 0.0 +323 5 loss.margin 1.1464824390563184 +323 5 optimizer.lr 0.0224825121805155 +323 5 negative_sampler.num_negs_per_pos 6.0 +323 5 training.batch_size 0.0 +323 6 model.embedding_dim 2.0 +323 6 loss.margin 7.872418266223728 +323 6 optimizer.lr 0.014936997481273818 +323 6 negative_sampler.num_negs_per_pos 62.0 +323 6 training.batch_size 0.0 +323 7 model.embedding_dim 2.0 +323 7 loss.margin 3.9750044389943118 +323 7 optimizer.lr 0.029786238737079546 +323 7 negative_sampler.num_negs_per_pos 26.0 +323 7 training.batch_size 0.0 +323 8 model.embedding_dim 2.0 +323 8 loss.margin 1.1492278916937928 +323 8 optimizer.lr 0.0028539977572506533 +323 8 negative_sampler.num_negs_per_pos 17.0 +323 8 training.batch_size 1.0 +323 9 model.embedding_dim 1.0 +323 9 loss.margin 9.901916393579722 +323 9 optimizer.lr 0.024108444847556542 +323 9 negative_sampler.num_negs_per_pos 85.0 +323 9 training.batch_size 1.0 +323 10 model.embedding_dim 2.0 +323 10 loss.margin 6.748594816980672 +323 10 optimizer.lr 0.003645754060398956 +323 10 negative_sampler.num_negs_per_pos 41.0 +323 10 training.batch_size 2.0 +323 11 model.embedding_dim 2.0 +323 11 loss.margin 6.685036335095514 +323 11 optimizer.lr 0.06206096422167874 +323 11 negative_sampler.num_negs_per_pos 82.0 +323 11 training.batch_size 2.0 +323 12 model.embedding_dim 0.0 +323 12 loss.margin 6.400792948106176 +323 12 optimizer.lr 0.002504194873640464 +323 12 negative_sampler.num_negs_per_pos 15.0 +323 12 training.batch_size 0.0 +323 13 model.embedding_dim 1.0 +323 13 loss.margin 6.620790028566743 +323 13 optimizer.lr 0.012754830161894368 +323 13 negative_sampler.num_negs_per_pos 90.0 +323 13 training.batch_size 2.0 +323 14 model.embedding_dim 1.0 +323 14 loss.margin 4.992316603364652 +323 14 optimizer.lr 0.07049278160649397 +323 14 negative_sampler.num_negs_per_pos 69.0 +323 14 training.batch_size 1.0 +323 15 model.embedding_dim 0.0 +323 15 loss.margin 2.6778833438958043 +323 15 optimizer.lr 0.011605883991828195 +323 15 negative_sampler.num_negs_per_pos 84.0 +323 15 training.batch_size 0.0 +323 16 model.embedding_dim 2.0 +323 16 loss.margin 7.840844633489672 +323 16 optimizer.lr 0.001441550279624915 +323 16 negative_sampler.num_negs_per_pos 45.0 +323 16 training.batch_size 2.0 +323 17 model.embedding_dim 1.0 +323 17 loss.margin 1.5107804295380005 +323 17 optimizer.lr 0.02499460413123424 +323 17 negative_sampler.num_negs_per_pos 74.0 +323 17 training.batch_size 0.0 +323 18 model.embedding_dim 1.0 +323 18 loss.margin 3.921585395841327 +323 18 optimizer.lr 0.006543950357355145 +323 18 negative_sampler.num_negs_per_pos 0.0 +323 18 training.batch_size 0.0 +323 19 model.embedding_dim 0.0 +323 19 loss.margin 1.395916134427341 +323 19 optimizer.lr 0.001004486212653147 +323 19 negative_sampler.num_negs_per_pos 53.0 +323 19 training.batch_size 1.0 +323 20 model.embedding_dim 2.0 +323 20 loss.margin 2.617221878258539 +323 20 optimizer.lr 0.011879663806167187 +323 20 negative_sampler.num_negs_per_pos 18.0 +323 20 training.batch_size 2.0 +323 21 model.embedding_dim 1.0 +323 21 loss.margin 8.477049089236097 +323 21 optimizer.lr 0.002227681793102669 +323 21 negative_sampler.num_negs_per_pos 52.0 +323 21 training.batch_size 0.0 +323 22 model.embedding_dim 0.0 +323 22 loss.margin 4.0063281659334535 +323 22 optimizer.lr 0.07207010911385237 +323 22 negative_sampler.num_negs_per_pos 40.0 +323 22 training.batch_size 0.0 +323 23 model.embedding_dim 1.0 +323 23 loss.margin 7.333383088262269 +323 23 optimizer.lr 0.013821198640498931 +323 23 negative_sampler.num_negs_per_pos 5.0 +323 23 training.batch_size 1.0 +323 24 model.embedding_dim 1.0 +323 24 loss.margin 3.5744830553100067 +323 24 optimizer.lr 0.009132562491946383 +323 24 negative_sampler.num_negs_per_pos 0.0 +323 24 training.batch_size 1.0 +323 25 model.embedding_dim 2.0 +323 25 loss.margin 2.3262657593124763 +323 25 optimizer.lr 0.007521530041552892 +323 25 negative_sampler.num_negs_per_pos 14.0 +323 25 training.batch_size 0.0 +323 26 model.embedding_dim 0.0 +323 26 loss.margin 9.413830284715292 +323 26 optimizer.lr 0.003099897741492595 +323 26 negative_sampler.num_negs_per_pos 80.0 +323 26 training.batch_size 0.0 +323 27 model.embedding_dim 1.0 +323 27 loss.margin 9.59475599549551 +323 27 optimizer.lr 0.09181709518197889 +323 27 negative_sampler.num_negs_per_pos 68.0 +323 27 training.batch_size 1.0 +323 28 model.embedding_dim 0.0 +323 28 loss.margin 2.327753774982973 +323 28 optimizer.lr 0.02361669167569255 +323 28 negative_sampler.num_negs_per_pos 23.0 +323 28 training.batch_size 1.0 +323 29 model.embedding_dim 1.0 +323 29 loss.margin 6.403412758690736 +323 29 optimizer.lr 0.021783413652106648 +323 29 negative_sampler.num_negs_per_pos 82.0 +323 29 training.batch_size 2.0 +323 30 model.embedding_dim 2.0 +323 30 loss.margin 4.611256172640403 +323 30 optimizer.lr 0.005247363972278115 +323 30 negative_sampler.num_negs_per_pos 28.0 +323 30 training.batch_size 2.0 +323 31 model.embedding_dim 1.0 +323 31 loss.margin 5.054228976702296 +323 31 optimizer.lr 0.0010355416164859533 +323 31 negative_sampler.num_negs_per_pos 15.0 +323 31 training.batch_size 1.0 +323 32 model.embedding_dim 2.0 +323 32 loss.margin 1.7634702089761536 +323 32 optimizer.lr 0.01554270341369452 +323 32 negative_sampler.num_negs_per_pos 60.0 +323 32 training.batch_size 0.0 +323 33 model.embedding_dim 0.0 +323 33 loss.margin 7.0140351486808905 +323 33 optimizer.lr 0.012678706842311606 +323 33 negative_sampler.num_negs_per_pos 2.0 +323 33 training.batch_size 2.0 +323 34 model.embedding_dim 2.0 +323 34 loss.margin 1.7508771092971394 +323 34 optimizer.lr 0.09203102926876965 +323 34 negative_sampler.num_negs_per_pos 1.0 +323 34 training.batch_size 1.0 +323 35 model.embedding_dim 1.0 +323 35 loss.margin 6.174528214271241 +323 35 optimizer.lr 0.009882953990311178 +323 35 negative_sampler.num_negs_per_pos 82.0 +323 35 training.batch_size 2.0 +323 36 model.embedding_dim 1.0 +323 36 loss.margin 3.1349975640655803 +323 36 optimizer.lr 0.003400069063818544 +323 36 negative_sampler.num_negs_per_pos 87.0 +323 36 training.batch_size 2.0 +323 37 model.embedding_dim 1.0 +323 37 loss.margin 7.764344748148476 +323 37 optimizer.lr 0.00294486481005958 +323 37 negative_sampler.num_negs_per_pos 48.0 +323 37 training.batch_size 2.0 +323 38 model.embedding_dim 1.0 +323 38 loss.margin 1.343240006113461 +323 38 optimizer.lr 0.005684905215553501 +323 38 negative_sampler.num_negs_per_pos 1.0 +323 38 training.batch_size 1.0 +323 39 model.embedding_dim 2.0 +323 39 loss.margin 9.67580196049307 +323 39 optimizer.lr 0.028783795389361266 +323 39 negative_sampler.num_negs_per_pos 43.0 +323 39 training.batch_size 1.0 +323 40 model.embedding_dim 0.0 +323 40 loss.margin 4.665160283078167 +323 40 optimizer.lr 0.023472154453028648 +323 40 negative_sampler.num_negs_per_pos 43.0 +323 40 training.batch_size 2.0 +323 41 model.embedding_dim 1.0 +323 41 loss.margin 6.592180627574104 +323 41 optimizer.lr 0.0012891587910773344 +323 41 negative_sampler.num_negs_per_pos 74.0 +323 41 training.batch_size 0.0 +323 42 model.embedding_dim 0.0 +323 42 loss.margin 4.368482330714365 +323 42 optimizer.lr 0.04138071391260251 +323 42 negative_sampler.num_negs_per_pos 21.0 +323 42 training.batch_size 0.0 +323 43 model.embedding_dim 1.0 +323 43 loss.margin 9.49389010986843 +323 43 optimizer.lr 0.0036706910542196407 +323 43 negative_sampler.num_negs_per_pos 60.0 +323 43 training.batch_size 0.0 +323 44 model.embedding_dim 1.0 +323 44 loss.margin 8.436333282849223 +323 44 optimizer.lr 0.07406325216867315 +323 44 negative_sampler.num_negs_per_pos 78.0 +323 44 training.batch_size 2.0 +323 45 model.embedding_dim 1.0 +323 45 loss.margin 1.7034468237487606 +323 45 optimizer.lr 0.017504716915627182 +323 45 negative_sampler.num_negs_per_pos 96.0 +323 45 training.batch_size 1.0 +323 46 model.embedding_dim 0.0 +323 46 loss.margin 8.576372254053878 +323 46 optimizer.lr 0.003058592047738224 +323 46 negative_sampler.num_negs_per_pos 53.0 +323 46 training.batch_size 0.0 +323 47 model.embedding_dim 0.0 +323 47 loss.margin 9.052008329805021 +323 47 optimizer.lr 0.009629541778725138 +323 47 negative_sampler.num_negs_per_pos 97.0 +323 47 training.batch_size 1.0 +323 48 model.embedding_dim 2.0 +323 48 loss.margin 3.1575171152316264 +323 48 optimizer.lr 0.08042539197801042 +323 48 negative_sampler.num_negs_per_pos 65.0 +323 48 training.batch_size 0.0 +323 49 model.embedding_dim 0.0 +323 49 loss.margin 5.470478274266869 +323 49 optimizer.lr 0.00834602910412913 +323 49 negative_sampler.num_negs_per_pos 94.0 +323 49 training.batch_size 0.0 +323 50 model.embedding_dim 0.0 +323 50 loss.margin 2.1429301780751815 +323 50 optimizer.lr 0.0019463999074570558 +323 50 negative_sampler.num_negs_per_pos 57.0 +323 50 training.batch_size 2.0 +323 51 model.embedding_dim 1.0 +323 51 loss.margin 1.692577929834708 +323 51 optimizer.lr 0.03503789905308975 +323 51 negative_sampler.num_negs_per_pos 83.0 +323 51 training.batch_size 1.0 +323 52 model.embedding_dim 2.0 +323 52 loss.margin 9.241444923478841 +323 52 optimizer.lr 0.0678148179045187 +323 52 negative_sampler.num_negs_per_pos 66.0 +323 52 training.batch_size 2.0 +323 53 model.embedding_dim 0.0 +323 53 loss.margin 4.890788016186096 +323 53 optimizer.lr 0.062386791735465866 +323 53 negative_sampler.num_negs_per_pos 74.0 +323 53 training.batch_size 0.0 +323 54 model.embedding_dim 2.0 +323 54 loss.margin 4.319303002233564 +323 54 optimizer.lr 0.0011999180589753761 +323 54 negative_sampler.num_negs_per_pos 6.0 +323 54 training.batch_size 2.0 +323 55 model.embedding_dim 1.0 +323 55 loss.margin 3.1933313919265744 +323 55 optimizer.lr 0.001972958066180446 +323 55 negative_sampler.num_negs_per_pos 65.0 +323 55 training.batch_size 2.0 +323 56 model.embedding_dim 0.0 +323 56 loss.margin 5.552731762407244 +323 56 optimizer.lr 0.0024101735421235677 +323 56 negative_sampler.num_negs_per_pos 37.0 +323 56 training.batch_size 0.0 +323 57 model.embedding_dim 0.0 +323 57 loss.margin 2.0137613680090003 +323 57 optimizer.lr 0.023895838331369195 +323 57 negative_sampler.num_negs_per_pos 69.0 +323 57 training.batch_size 2.0 +323 58 model.embedding_dim 0.0 +323 58 loss.margin 1.363362822683043 +323 58 optimizer.lr 0.0023024219026042078 +323 58 negative_sampler.num_negs_per_pos 6.0 +323 58 training.batch_size 1.0 +323 59 model.embedding_dim 0.0 +323 59 loss.margin 0.6961831446586165 +323 59 optimizer.lr 0.004019246385802703 +323 59 negative_sampler.num_negs_per_pos 93.0 +323 59 training.batch_size 1.0 +323 60 model.embedding_dim 1.0 +323 60 loss.margin 5.559604461593644 +323 60 optimizer.lr 0.0332282986237465 +323 60 negative_sampler.num_negs_per_pos 52.0 +323 60 training.batch_size 0.0 +323 61 model.embedding_dim 2.0 +323 61 loss.margin 2.065844127076967 +323 61 optimizer.lr 0.01118343064156514 +323 61 negative_sampler.num_negs_per_pos 83.0 +323 61 training.batch_size 2.0 +323 62 model.embedding_dim 0.0 +323 62 loss.margin 7.401464518509101 +323 62 optimizer.lr 0.019853978679212358 +323 62 negative_sampler.num_negs_per_pos 15.0 +323 62 training.batch_size 2.0 +323 63 model.embedding_dim 2.0 +323 63 loss.margin 6.355196703597863 +323 63 optimizer.lr 0.00121713141622557 +323 63 negative_sampler.num_negs_per_pos 94.0 +323 63 training.batch_size 0.0 +323 64 model.embedding_dim 1.0 +323 64 loss.margin 6.451397504659775 +323 64 optimizer.lr 0.01425130656971561 +323 64 negative_sampler.num_negs_per_pos 81.0 +323 64 training.batch_size 0.0 +323 65 model.embedding_dim 0.0 +323 65 loss.margin 7.245724632919351 +323 65 optimizer.lr 0.012207312119502264 +323 65 negative_sampler.num_negs_per_pos 20.0 +323 65 training.batch_size 0.0 +323 66 model.embedding_dim 1.0 +323 66 loss.margin 4.485276854030393 +323 66 optimizer.lr 0.0011211510793477132 +323 66 negative_sampler.num_negs_per_pos 43.0 +323 66 training.batch_size 0.0 +323 67 model.embedding_dim 2.0 +323 67 loss.margin 9.182119907429785 +323 67 optimizer.lr 0.039558364215885496 +323 67 negative_sampler.num_negs_per_pos 86.0 +323 67 training.batch_size 0.0 +323 68 model.embedding_dim 2.0 +323 68 loss.margin 5.255862120701566 +323 68 optimizer.lr 0.0023562034439377696 +323 68 negative_sampler.num_negs_per_pos 30.0 +323 68 training.batch_size 1.0 +323 69 model.embedding_dim 1.0 +323 69 loss.margin 0.8113655445053869 +323 69 optimizer.lr 0.03825791227548259 +323 69 negative_sampler.num_negs_per_pos 8.0 +323 69 training.batch_size 1.0 +323 70 model.embedding_dim 1.0 +323 70 loss.margin 4.446483828729826 +323 70 optimizer.lr 0.007556194947989792 +323 70 negative_sampler.num_negs_per_pos 5.0 +323 70 training.batch_size 2.0 +323 71 model.embedding_dim 1.0 +323 71 loss.margin 9.526350872144807 +323 71 optimizer.lr 0.00176761949159108 +323 71 negative_sampler.num_negs_per_pos 62.0 +323 71 training.batch_size 2.0 +323 72 model.embedding_dim 2.0 +323 72 loss.margin 6.681629857149677 +323 72 optimizer.lr 0.004135142273370579 +323 72 negative_sampler.num_negs_per_pos 10.0 +323 72 training.batch_size 1.0 +323 73 model.embedding_dim 2.0 +323 73 loss.margin 5.784173128535532 +323 73 optimizer.lr 0.07781080890723233 +323 73 negative_sampler.num_negs_per_pos 79.0 +323 73 training.batch_size 0.0 +323 74 model.embedding_dim 1.0 +323 74 loss.margin 5.771877509467954 +323 74 optimizer.lr 0.013652172342242473 +323 74 negative_sampler.num_negs_per_pos 25.0 +323 74 training.batch_size 1.0 +323 75 model.embedding_dim 2.0 +323 75 loss.margin 4.199412580712178 +323 75 optimizer.lr 0.06472063606953816 +323 75 negative_sampler.num_negs_per_pos 2.0 +323 75 training.batch_size 1.0 +323 76 model.embedding_dim 1.0 +323 76 loss.margin 7.062612045757937 +323 76 optimizer.lr 0.03537239726527017 +323 76 negative_sampler.num_negs_per_pos 60.0 +323 76 training.batch_size 1.0 +323 77 model.embedding_dim 0.0 +323 77 loss.margin 8.718447192945389 +323 77 optimizer.lr 0.09297822253250773 +323 77 negative_sampler.num_negs_per_pos 95.0 +323 77 training.batch_size 2.0 +323 78 model.embedding_dim 2.0 +323 78 loss.margin 6.201468115271689 +323 78 optimizer.lr 0.029548953653478142 +323 78 negative_sampler.num_negs_per_pos 20.0 +323 78 training.batch_size 2.0 +323 79 model.embedding_dim 1.0 +323 79 loss.margin 9.67436067338506 +323 79 optimizer.lr 0.029129344359196715 +323 79 negative_sampler.num_negs_per_pos 28.0 +323 79 training.batch_size 2.0 +323 80 model.embedding_dim 0.0 +323 80 loss.margin 3.1640196569584984 +323 80 optimizer.lr 0.007997616618747157 +323 80 negative_sampler.num_negs_per_pos 64.0 +323 80 training.batch_size 2.0 +323 81 model.embedding_dim 1.0 +323 81 loss.margin 2.5773661398226384 +323 81 optimizer.lr 0.0028750225263688175 +323 81 negative_sampler.num_negs_per_pos 23.0 +323 81 training.batch_size 0.0 +323 82 model.embedding_dim 1.0 +323 82 loss.margin 4.492868810198894 +323 82 optimizer.lr 0.003056663328992146 +323 82 negative_sampler.num_negs_per_pos 33.0 +323 82 training.batch_size 1.0 +323 83 model.embedding_dim 0.0 +323 83 loss.margin 6.947567182259406 +323 83 optimizer.lr 0.0032609153450518626 +323 83 negative_sampler.num_negs_per_pos 4.0 +323 83 training.batch_size 1.0 +323 84 model.embedding_dim 2.0 +323 84 loss.margin 2.3204212289722097 +323 84 optimizer.lr 0.002158989917812652 +323 84 negative_sampler.num_negs_per_pos 25.0 +323 84 training.batch_size 0.0 +323 85 model.embedding_dim 1.0 +323 85 loss.margin 7.457600948859341 +323 85 optimizer.lr 0.010689454981007663 +323 85 negative_sampler.num_negs_per_pos 89.0 +323 85 training.batch_size 2.0 +323 86 model.embedding_dim 2.0 +323 86 loss.margin 3.3681373020444796 +323 86 optimizer.lr 0.009771566890724373 +323 86 negative_sampler.num_negs_per_pos 87.0 +323 86 training.batch_size 0.0 +323 87 model.embedding_dim 0.0 +323 87 loss.margin 8.625739959737807 +323 87 optimizer.lr 0.051697970425093505 +323 87 negative_sampler.num_negs_per_pos 43.0 +323 87 training.batch_size 0.0 +323 88 model.embedding_dim 0.0 +323 88 loss.margin 9.40766888539395 +323 88 optimizer.lr 0.005700667038469964 +323 88 negative_sampler.num_negs_per_pos 82.0 +323 88 training.batch_size 1.0 +323 89 model.embedding_dim 1.0 +323 89 loss.margin 8.857118939337905 +323 89 optimizer.lr 0.005521455855920222 +323 89 negative_sampler.num_negs_per_pos 84.0 +323 89 training.batch_size 0.0 +323 90 model.embedding_dim 2.0 +323 90 loss.margin 6.550508084938843 +323 90 optimizer.lr 0.0013972137396700596 +323 90 negative_sampler.num_negs_per_pos 32.0 +323 90 training.batch_size 0.0 +323 91 model.embedding_dim 2.0 +323 91 loss.margin 1.602536209484627 +323 91 optimizer.lr 0.001231086445201504 +323 91 negative_sampler.num_negs_per_pos 35.0 +323 91 training.batch_size 1.0 +323 92 model.embedding_dim 2.0 +323 92 loss.margin 5.57359825287272 +323 92 optimizer.lr 0.010012167570469641 +323 92 negative_sampler.num_negs_per_pos 28.0 +323 92 training.batch_size 2.0 +323 93 model.embedding_dim 1.0 +323 93 loss.margin 8.888982494929866 +323 93 optimizer.lr 0.05103289602406586 +323 93 negative_sampler.num_negs_per_pos 22.0 +323 93 training.batch_size 2.0 +323 94 model.embedding_dim 1.0 +323 94 loss.margin 1.1308005006211361 +323 94 optimizer.lr 0.005071941151765444 +323 94 negative_sampler.num_negs_per_pos 33.0 +323 94 training.batch_size 1.0 +323 95 model.embedding_dim 1.0 +323 95 loss.margin 6.442043396368856 +323 95 optimizer.lr 0.009078649431598824 +323 95 negative_sampler.num_negs_per_pos 49.0 +323 95 training.batch_size 2.0 +323 96 model.embedding_dim 2.0 +323 96 loss.margin 5.078473141073574 +323 96 optimizer.lr 0.016817640340593465 +323 96 negative_sampler.num_negs_per_pos 26.0 +323 96 training.batch_size 2.0 +323 97 model.embedding_dim 0.0 +323 97 loss.margin 8.880289344502847 +323 97 optimizer.lr 0.0019458678059111679 +323 97 negative_sampler.num_negs_per_pos 41.0 +323 97 training.batch_size 1.0 +323 98 model.embedding_dim 0.0 +323 98 loss.margin 1.679575551857488 +323 98 optimizer.lr 0.0028243083227842947 +323 98 negative_sampler.num_negs_per_pos 95.0 +323 98 training.batch_size 0.0 +323 99 model.embedding_dim 2.0 +323 99 loss.margin 9.086830186717469 +323 99 optimizer.lr 0.014120241294029388 +323 99 negative_sampler.num_negs_per_pos 65.0 +323 99 training.batch_size 0.0 +323 100 model.embedding_dim 1.0 +323 100 loss.margin 8.226529676040752 +323 100 optimizer.lr 0.0011964933187745008 +323 100 negative_sampler.num_negs_per_pos 20.0 +323 100 training.batch_size 0.0 +323 1 dataset """kinships""" +323 1 model """hole""" +323 1 loss """marginranking""" +323 1 regularizer """no""" +323 1 optimizer """adam""" +323 1 training_loop """owa""" +323 1 negative_sampler """basic""" +323 1 evaluator """rankbased""" +323 2 dataset """kinships""" +323 2 model """hole""" +323 2 loss """marginranking""" +323 2 regularizer """no""" +323 2 optimizer """adam""" +323 2 training_loop """owa""" +323 2 negative_sampler """basic""" +323 2 evaluator """rankbased""" +323 3 dataset """kinships""" +323 3 model """hole""" +323 3 loss """marginranking""" +323 3 regularizer """no""" +323 3 optimizer """adam""" +323 3 training_loop """owa""" +323 3 negative_sampler """basic""" +323 3 evaluator """rankbased""" +323 4 dataset """kinships""" +323 4 model """hole""" +323 4 loss """marginranking""" +323 4 regularizer """no""" +323 4 optimizer """adam""" +323 4 training_loop """owa""" +323 4 negative_sampler """basic""" +323 4 evaluator """rankbased""" +323 5 dataset """kinships""" +323 5 model """hole""" +323 5 loss """marginranking""" +323 5 regularizer """no""" +323 5 optimizer """adam""" +323 5 training_loop """owa""" +323 5 negative_sampler """basic""" +323 5 evaluator """rankbased""" +323 6 dataset """kinships""" +323 6 model """hole""" +323 6 loss """marginranking""" +323 6 regularizer """no""" +323 6 optimizer """adam""" +323 6 training_loop """owa""" +323 6 negative_sampler """basic""" +323 6 evaluator """rankbased""" +323 7 dataset """kinships""" +323 7 model """hole""" +323 7 loss """marginranking""" +323 7 regularizer """no""" +323 7 optimizer """adam""" +323 7 training_loop """owa""" +323 7 negative_sampler """basic""" +323 7 evaluator """rankbased""" +323 8 dataset """kinships""" +323 8 model """hole""" +323 8 loss """marginranking""" +323 8 regularizer """no""" +323 8 optimizer """adam""" +323 8 training_loop """owa""" +323 8 negative_sampler """basic""" +323 8 evaluator """rankbased""" +323 9 dataset """kinships""" +323 9 model """hole""" +323 9 loss """marginranking""" +323 9 regularizer """no""" +323 9 optimizer """adam""" +323 9 training_loop """owa""" +323 9 negative_sampler """basic""" +323 9 evaluator """rankbased""" +323 10 dataset """kinships""" +323 10 model """hole""" +323 10 loss """marginranking""" +323 10 regularizer """no""" +323 10 optimizer """adam""" +323 10 training_loop """owa""" +323 10 negative_sampler """basic""" +323 10 evaluator """rankbased""" +323 11 dataset """kinships""" +323 11 model """hole""" +323 11 loss """marginranking""" +323 11 regularizer """no""" +323 11 optimizer """adam""" +323 11 training_loop """owa""" +323 11 negative_sampler """basic""" +323 11 evaluator """rankbased""" +323 12 dataset """kinships""" +323 12 model """hole""" +323 12 loss """marginranking""" +323 12 regularizer """no""" +323 12 optimizer """adam""" +323 12 training_loop """owa""" +323 12 negative_sampler """basic""" +323 12 evaluator """rankbased""" +323 13 dataset """kinships""" +323 13 model """hole""" +323 13 loss """marginranking""" +323 13 regularizer """no""" +323 13 optimizer """adam""" +323 13 training_loop """owa""" +323 13 negative_sampler """basic""" +323 13 evaluator """rankbased""" +323 14 dataset """kinships""" +323 14 model """hole""" +323 14 loss """marginranking""" +323 14 regularizer """no""" +323 14 optimizer """adam""" +323 14 training_loop """owa""" +323 14 negative_sampler """basic""" +323 14 evaluator """rankbased""" +323 15 dataset """kinships""" +323 15 model """hole""" +323 15 loss """marginranking""" +323 15 regularizer """no""" +323 15 optimizer """adam""" +323 15 training_loop """owa""" +323 15 negative_sampler """basic""" +323 15 evaluator """rankbased""" +323 16 dataset """kinships""" +323 16 model """hole""" +323 16 loss """marginranking""" +323 16 regularizer """no""" +323 16 optimizer """adam""" +323 16 training_loop """owa""" +323 16 negative_sampler """basic""" +323 16 evaluator """rankbased""" +323 17 dataset """kinships""" +323 17 model """hole""" +323 17 loss """marginranking""" +323 17 regularizer """no""" +323 17 optimizer """adam""" +323 17 training_loop """owa""" +323 17 negative_sampler """basic""" +323 17 evaluator """rankbased""" +323 18 dataset """kinships""" +323 18 model """hole""" +323 18 loss """marginranking""" +323 18 regularizer """no""" +323 18 optimizer """adam""" +323 18 training_loop """owa""" +323 18 negative_sampler """basic""" +323 18 evaluator """rankbased""" +323 19 dataset """kinships""" +323 19 model """hole""" +323 19 loss """marginranking""" +323 19 regularizer """no""" +323 19 optimizer """adam""" +323 19 training_loop """owa""" +323 19 negative_sampler """basic""" +323 19 evaluator """rankbased""" +323 20 dataset """kinships""" +323 20 model """hole""" +323 20 loss """marginranking""" +323 20 regularizer """no""" +323 20 optimizer """adam""" +323 20 training_loop """owa""" +323 20 negative_sampler """basic""" +323 20 evaluator """rankbased""" +323 21 dataset """kinships""" +323 21 model """hole""" +323 21 loss """marginranking""" +323 21 regularizer """no""" +323 21 optimizer """adam""" +323 21 training_loop """owa""" +323 21 negative_sampler """basic""" +323 21 evaluator """rankbased""" +323 22 dataset """kinships""" +323 22 model """hole""" +323 22 loss """marginranking""" +323 22 regularizer """no""" +323 22 optimizer """adam""" +323 22 training_loop """owa""" +323 22 negative_sampler """basic""" +323 22 evaluator """rankbased""" +323 23 dataset """kinships""" +323 23 model """hole""" +323 23 loss """marginranking""" +323 23 regularizer """no""" +323 23 optimizer """adam""" +323 23 training_loop """owa""" +323 23 negative_sampler """basic""" +323 23 evaluator """rankbased""" +323 24 dataset """kinships""" +323 24 model """hole""" +323 24 loss """marginranking""" +323 24 regularizer """no""" +323 24 optimizer """adam""" +323 24 training_loop """owa""" +323 24 negative_sampler """basic""" +323 24 evaluator """rankbased""" +323 25 dataset """kinships""" +323 25 model """hole""" +323 25 loss """marginranking""" +323 25 regularizer """no""" +323 25 optimizer """adam""" +323 25 training_loop """owa""" +323 25 negative_sampler """basic""" +323 25 evaluator """rankbased""" +323 26 dataset """kinships""" +323 26 model """hole""" +323 26 loss """marginranking""" +323 26 regularizer """no""" +323 26 optimizer """adam""" +323 26 training_loop """owa""" +323 26 negative_sampler """basic""" +323 26 evaluator """rankbased""" +323 27 dataset """kinships""" +323 27 model """hole""" +323 27 loss """marginranking""" +323 27 regularizer """no""" +323 27 optimizer """adam""" +323 27 training_loop """owa""" +323 27 negative_sampler """basic""" +323 27 evaluator """rankbased""" +323 28 dataset """kinships""" +323 28 model """hole""" +323 28 loss """marginranking""" +323 28 regularizer """no""" +323 28 optimizer """adam""" +323 28 training_loop """owa""" +323 28 negative_sampler """basic""" +323 28 evaluator """rankbased""" +323 29 dataset """kinships""" +323 29 model """hole""" +323 29 loss """marginranking""" +323 29 regularizer """no""" +323 29 optimizer """adam""" +323 29 training_loop """owa""" +323 29 negative_sampler """basic""" +323 29 evaluator """rankbased""" +323 30 dataset """kinships""" +323 30 model """hole""" +323 30 loss """marginranking""" +323 30 regularizer """no""" +323 30 optimizer """adam""" +323 30 training_loop """owa""" +323 30 negative_sampler """basic""" +323 30 evaluator """rankbased""" +323 31 dataset """kinships""" +323 31 model """hole""" +323 31 loss """marginranking""" +323 31 regularizer """no""" +323 31 optimizer """adam""" +323 31 training_loop """owa""" +323 31 negative_sampler """basic""" +323 31 evaluator """rankbased""" +323 32 dataset """kinships""" +323 32 model """hole""" +323 32 loss """marginranking""" +323 32 regularizer """no""" +323 32 optimizer """adam""" +323 32 training_loop """owa""" +323 32 negative_sampler """basic""" +323 32 evaluator """rankbased""" +323 33 dataset """kinships""" +323 33 model """hole""" +323 33 loss """marginranking""" +323 33 regularizer """no""" +323 33 optimizer """adam""" +323 33 training_loop """owa""" +323 33 negative_sampler """basic""" +323 33 evaluator """rankbased""" +323 34 dataset """kinships""" +323 34 model """hole""" +323 34 loss """marginranking""" +323 34 regularizer """no""" +323 34 optimizer """adam""" +323 34 training_loop """owa""" +323 34 negative_sampler """basic""" +323 34 evaluator """rankbased""" +323 35 dataset """kinships""" +323 35 model """hole""" +323 35 loss """marginranking""" +323 35 regularizer """no""" +323 35 optimizer """adam""" +323 35 training_loop """owa""" +323 35 negative_sampler """basic""" +323 35 evaluator """rankbased""" +323 36 dataset """kinships""" +323 36 model """hole""" +323 36 loss """marginranking""" +323 36 regularizer """no""" +323 36 optimizer """adam""" +323 36 training_loop """owa""" +323 36 negative_sampler """basic""" +323 36 evaluator """rankbased""" +323 37 dataset """kinships""" +323 37 model """hole""" +323 37 loss """marginranking""" +323 37 regularizer """no""" +323 37 optimizer """adam""" +323 37 training_loop """owa""" +323 37 negative_sampler """basic""" +323 37 evaluator """rankbased""" +323 38 dataset """kinships""" +323 38 model """hole""" +323 38 loss """marginranking""" +323 38 regularizer """no""" +323 38 optimizer """adam""" +323 38 training_loop """owa""" +323 38 negative_sampler """basic""" +323 38 evaluator """rankbased""" +323 39 dataset """kinships""" +323 39 model """hole""" +323 39 loss """marginranking""" +323 39 regularizer """no""" +323 39 optimizer """adam""" +323 39 training_loop """owa""" +323 39 negative_sampler """basic""" +323 39 evaluator """rankbased""" +323 40 dataset """kinships""" +323 40 model """hole""" +323 40 loss """marginranking""" +323 40 regularizer """no""" +323 40 optimizer """adam""" +323 40 training_loop """owa""" +323 40 negative_sampler """basic""" +323 40 evaluator """rankbased""" +323 41 dataset """kinships""" +323 41 model """hole""" +323 41 loss """marginranking""" +323 41 regularizer """no""" +323 41 optimizer """adam""" +323 41 training_loop """owa""" +323 41 negative_sampler """basic""" +323 41 evaluator """rankbased""" +323 42 dataset """kinships""" +323 42 model """hole""" +323 42 loss """marginranking""" +323 42 regularizer """no""" +323 42 optimizer """adam""" +323 42 training_loop """owa""" +323 42 negative_sampler """basic""" +323 42 evaluator """rankbased""" +323 43 dataset """kinships""" +323 43 model """hole""" +323 43 loss """marginranking""" +323 43 regularizer """no""" +323 43 optimizer """adam""" +323 43 training_loop """owa""" +323 43 negative_sampler """basic""" +323 43 evaluator """rankbased""" +323 44 dataset """kinships""" +323 44 model """hole""" +323 44 loss """marginranking""" +323 44 regularizer """no""" +323 44 optimizer """adam""" +323 44 training_loop """owa""" +323 44 negative_sampler """basic""" +323 44 evaluator """rankbased""" +323 45 dataset """kinships""" +323 45 model """hole""" +323 45 loss """marginranking""" +323 45 regularizer """no""" +323 45 optimizer """adam""" +323 45 training_loop """owa""" +323 45 negative_sampler """basic""" +323 45 evaluator """rankbased""" +323 46 dataset """kinships""" +323 46 model """hole""" +323 46 loss """marginranking""" +323 46 regularizer """no""" +323 46 optimizer """adam""" +323 46 training_loop """owa""" +323 46 negative_sampler """basic""" +323 46 evaluator """rankbased""" +323 47 dataset """kinships""" +323 47 model """hole""" +323 47 loss """marginranking""" +323 47 regularizer """no""" +323 47 optimizer """adam""" +323 47 training_loop """owa""" +323 47 negative_sampler """basic""" +323 47 evaluator """rankbased""" +323 48 dataset """kinships""" +323 48 model """hole""" +323 48 loss """marginranking""" +323 48 regularizer """no""" +323 48 optimizer """adam""" +323 48 training_loop """owa""" +323 48 negative_sampler """basic""" +323 48 evaluator """rankbased""" +323 49 dataset """kinships""" +323 49 model """hole""" +323 49 loss """marginranking""" +323 49 regularizer """no""" +323 49 optimizer """adam""" +323 49 training_loop """owa""" +323 49 negative_sampler """basic""" +323 49 evaluator """rankbased""" +323 50 dataset """kinships""" +323 50 model """hole""" +323 50 loss """marginranking""" +323 50 regularizer """no""" +323 50 optimizer """adam""" +323 50 training_loop """owa""" +323 50 negative_sampler """basic""" +323 50 evaluator """rankbased""" +323 51 dataset """kinships""" +323 51 model """hole""" +323 51 loss """marginranking""" +323 51 regularizer """no""" +323 51 optimizer """adam""" +323 51 training_loop """owa""" +323 51 negative_sampler """basic""" +323 51 evaluator """rankbased""" +323 52 dataset """kinships""" +323 52 model """hole""" +323 52 loss """marginranking""" +323 52 regularizer """no""" +323 52 optimizer """adam""" +323 52 training_loop """owa""" +323 52 negative_sampler """basic""" +323 52 evaluator """rankbased""" +323 53 dataset """kinships""" +323 53 model """hole""" +323 53 loss """marginranking""" +323 53 regularizer """no""" +323 53 optimizer """adam""" +323 53 training_loop """owa""" +323 53 negative_sampler """basic""" +323 53 evaluator """rankbased""" +323 54 dataset """kinships""" +323 54 model """hole""" +323 54 loss """marginranking""" +323 54 regularizer """no""" +323 54 optimizer """adam""" +323 54 training_loop """owa""" +323 54 negative_sampler """basic""" +323 54 evaluator """rankbased""" +323 55 dataset """kinships""" +323 55 model """hole""" +323 55 loss """marginranking""" +323 55 regularizer """no""" +323 55 optimizer """adam""" +323 55 training_loop """owa""" +323 55 negative_sampler """basic""" +323 55 evaluator """rankbased""" +323 56 dataset """kinships""" +323 56 model """hole""" +323 56 loss """marginranking""" +323 56 regularizer """no""" +323 56 optimizer """adam""" +323 56 training_loop """owa""" +323 56 negative_sampler """basic""" +323 56 evaluator """rankbased""" +323 57 dataset """kinships""" +323 57 model """hole""" +323 57 loss """marginranking""" +323 57 regularizer """no""" +323 57 optimizer """adam""" +323 57 training_loop """owa""" +323 57 negative_sampler """basic""" +323 57 evaluator """rankbased""" +323 58 dataset """kinships""" +323 58 model """hole""" +323 58 loss """marginranking""" +323 58 regularizer """no""" +323 58 optimizer """adam""" +323 58 training_loop """owa""" +323 58 negative_sampler """basic""" +323 58 evaluator """rankbased""" +323 59 dataset """kinships""" +323 59 model """hole""" +323 59 loss """marginranking""" +323 59 regularizer """no""" +323 59 optimizer """adam""" +323 59 training_loop """owa""" +323 59 negative_sampler """basic""" +323 59 evaluator """rankbased""" +323 60 dataset """kinships""" +323 60 model """hole""" +323 60 loss """marginranking""" +323 60 regularizer """no""" +323 60 optimizer """adam""" +323 60 training_loop """owa""" +323 60 negative_sampler """basic""" +323 60 evaluator """rankbased""" +323 61 dataset """kinships""" +323 61 model """hole""" +323 61 loss """marginranking""" +323 61 regularizer """no""" +323 61 optimizer """adam""" +323 61 training_loop """owa""" +323 61 negative_sampler """basic""" +323 61 evaluator """rankbased""" +323 62 dataset """kinships""" +323 62 model """hole""" +323 62 loss """marginranking""" +323 62 regularizer """no""" +323 62 optimizer """adam""" +323 62 training_loop """owa""" +323 62 negative_sampler """basic""" +323 62 evaluator """rankbased""" +323 63 dataset """kinships""" +323 63 model """hole""" +323 63 loss """marginranking""" +323 63 regularizer """no""" +323 63 optimizer """adam""" +323 63 training_loop """owa""" +323 63 negative_sampler """basic""" +323 63 evaluator """rankbased""" +323 64 dataset """kinships""" +323 64 model """hole""" +323 64 loss """marginranking""" +323 64 regularizer """no""" +323 64 optimizer """adam""" +323 64 training_loop """owa""" +323 64 negative_sampler """basic""" +323 64 evaluator """rankbased""" +323 65 dataset """kinships""" +323 65 model """hole""" +323 65 loss """marginranking""" +323 65 regularizer """no""" +323 65 optimizer """adam""" +323 65 training_loop """owa""" +323 65 negative_sampler """basic""" +323 65 evaluator """rankbased""" +323 66 dataset """kinships""" +323 66 model """hole""" +323 66 loss """marginranking""" +323 66 regularizer """no""" +323 66 optimizer """adam""" +323 66 training_loop """owa""" +323 66 negative_sampler """basic""" +323 66 evaluator """rankbased""" +323 67 dataset """kinships""" +323 67 model """hole""" +323 67 loss """marginranking""" +323 67 regularizer """no""" +323 67 optimizer """adam""" +323 67 training_loop """owa""" +323 67 negative_sampler """basic""" +323 67 evaluator """rankbased""" +323 68 dataset """kinships""" +323 68 model """hole""" +323 68 loss """marginranking""" +323 68 regularizer """no""" +323 68 optimizer """adam""" +323 68 training_loop """owa""" +323 68 negative_sampler """basic""" +323 68 evaluator """rankbased""" +323 69 dataset """kinships""" +323 69 model """hole""" +323 69 loss """marginranking""" +323 69 regularizer """no""" +323 69 optimizer """adam""" +323 69 training_loop """owa""" +323 69 negative_sampler """basic""" +323 69 evaluator """rankbased""" +323 70 dataset """kinships""" +323 70 model """hole""" +323 70 loss """marginranking""" +323 70 regularizer """no""" +323 70 optimizer """adam""" +323 70 training_loop """owa""" +323 70 negative_sampler """basic""" +323 70 evaluator """rankbased""" +323 71 dataset """kinships""" +323 71 model """hole""" +323 71 loss """marginranking""" +323 71 regularizer """no""" +323 71 optimizer """adam""" +323 71 training_loop """owa""" +323 71 negative_sampler """basic""" +323 71 evaluator """rankbased""" +323 72 dataset """kinships""" +323 72 model """hole""" +323 72 loss """marginranking""" +323 72 regularizer """no""" +323 72 optimizer """adam""" +323 72 training_loop """owa""" +323 72 negative_sampler """basic""" +323 72 evaluator """rankbased""" +323 73 dataset """kinships""" +323 73 model """hole""" +323 73 loss """marginranking""" +323 73 regularizer """no""" +323 73 optimizer """adam""" +323 73 training_loop """owa""" +323 73 negative_sampler """basic""" +323 73 evaluator """rankbased""" +323 74 dataset """kinships""" +323 74 model """hole""" +323 74 loss """marginranking""" +323 74 regularizer """no""" +323 74 optimizer """adam""" +323 74 training_loop """owa""" +323 74 negative_sampler """basic""" +323 74 evaluator """rankbased""" +323 75 dataset """kinships""" +323 75 model """hole""" +323 75 loss """marginranking""" +323 75 regularizer """no""" +323 75 optimizer """adam""" +323 75 training_loop """owa""" +323 75 negative_sampler """basic""" +323 75 evaluator """rankbased""" +323 76 dataset """kinships""" +323 76 model """hole""" +323 76 loss """marginranking""" +323 76 regularizer """no""" +323 76 optimizer """adam""" +323 76 training_loop """owa""" +323 76 negative_sampler """basic""" +323 76 evaluator """rankbased""" +323 77 dataset """kinships""" +323 77 model """hole""" +323 77 loss """marginranking""" +323 77 regularizer """no""" +323 77 optimizer """adam""" +323 77 training_loop """owa""" +323 77 negative_sampler """basic""" +323 77 evaluator """rankbased""" +323 78 dataset """kinships""" +323 78 model """hole""" +323 78 loss """marginranking""" +323 78 regularizer """no""" +323 78 optimizer """adam""" +323 78 training_loop """owa""" +323 78 negative_sampler """basic""" +323 78 evaluator """rankbased""" +323 79 dataset """kinships""" +323 79 model """hole""" +323 79 loss """marginranking""" +323 79 regularizer """no""" +323 79 optimizer """adam""" +323 79 training_loop """owa""" +323 79 negative_sampler """basic""" +323 79 evaluator """rankbased""" +323 80 dataset """kinships""" +323 80 model """hole""" +323 80 loss """marginranking""" +323 80 regularizer """no""" +323 80 optimizer """adam""" +323 80 training_loop """owa""" +323 80 negative_sampler """basic""" +323 80 evaluator """rankbased""" +323 81 dataset """kinships""" +323 81 model """hole""" +323 81 loss """marginranking""" +323 81 regularizer """no""" +323 81 optimizer """adam""" +323 81 training_loop """owa""" +323 81 negative_sampler """basic""" +323 81 evaluator """rankbased""" +323 82 dataset """kinships""" +323 82 model """hole""" +323 82 loss """marginranking""" +323 82 regularizer """no""" +323 82 optimizer """adam""" +323 82 training_loop """owa""" +323 82 negative_sampler """basic""" +323 82 evaluator """rankbased""" +323 83 dataset """kinships""" +323 83 model """hole""" +323 83 loss """marginranking""" +323 83 regularizer """no""" +323 83 optimizer """adam""" +323 83 training_loop """owa""" +323 83 negative_sampler """basic""" +323 83 evaluator """rankbased""" +323 84 dataset """kinships""" +323 84 model """hole""" +323 84 loss """marginranking""" +323 84 regularizer """no""" +323 84 optimizer """adam""" +323 84 training_loop """owa""" +323 84 negative_sampler """basic""" +323 84 evaluator """rankbased""" +323 85 dataset """kinships""" +323 85 model """hole""" +323 85 loss """marginranking""" +323 85 regularizer """no""" +323 85 optimizer """adam""" +323 85 training_loop """owa""" +323 85 negative_sampler """basic""" +323 85 evaluator """rankbased""" +323 86 dataset """kinships""" +323 86 model """hole""" +323 86 loss """marginranking""" +323 86 regularizer """no""" +323 86 optimizer """adam""" +323 86 training_loop """owa""" +323 86 negative_sampler """basic""" +323 86 evaluator """rankbased""" +323 87 dataset """kinships""" +323 87 model """hole""" +323 87 loss """marginranking""" +323 87 regularizer """no""" +323 87 optimizer """adam""" +323 87 training_loop """owa""" +323 87 negative_sampler """basic""" +323 87 evaluator """rankbased""" +323 88 dataset """kinships""" +323 88 model """hole""" +323 88 loss """marginranking""" +323 88 regularizer """no""" +323 88 optimizer """adam""" +323 88 training_loop """owa""" +323 88 negative_sampler """basic""" +323 88 evaluator """rankbased""" +323 89 dataset """kinships""" +323 89 model """hole""" +323 89 loss """marginranking""" +323 89 regularizer """no""" +323 89 optimizer """adam""" +323 89 training_loop """owa""" +323 89 negative_sampler """basic""" +323 89 evaluator """rankbased""" +323 90 dataset """kinships""" +323 90 model """hole""" +323 90 loss """marginranking""" +323 90 regularizer """no""" +323 90 optimizer """adam""" +323 90 training_loop """owa""" +323 90 negative_sampler """basic""" +323 90 evaluator """rankbased""" +323 91 dataset """kinships""" +323 91 model """hole""" +323 91 loss """marginranking""" +323 91 regularizer """no""" +323 91 optimizer """adam""" +323 91 training_loop """owa""" +323 91 negative_sampler """basic""" +323 91 evaluator """rankbased""" +323 92 dataset """kinships""" +323 92 model """hole""" +323 92 loss """marginranking""" +323 92 regularizer """no""" +323 92 optimizer """adam""" +323 92 training_loop """owa""" +323 92 negative_sampler """basic""" +323 92 evaluator """rankbased""" +323 93 dataset """kinships""" +323 93 model """hole""" +323 93 loss """marginranking""" +323 93 regularizer """no""" +323 93 optimizer """adam""" +323 93 training_loop """owa""" +323 93 negative_sampler """basic""" +323 93 evaluator """rankbased""" +323 94 dataset """kinships""" +323 94 model """hole""" +323 94 loss """marginranking""" +323 94 regularizer """no""" +323 94 optimizer """adam""" +323 94 training_loop """owa""" +323 94 negative_sampler """basic""" +323 94 evaluator """rankbased""" +323 95 dataset """kinships""" +323 95 model """hole""" +323 95 loss """marginranking""" +323 95 regularizer """no""" +323 95 optimizer """adam""" +323 95 training_loop """owa""" +323 95 negative_sampler """basic""" +323 95 evaluator """rankbased""" +323 96 dataset """kinships""" +323 96 model """hole""" +323 96 loss """marginranking""" +323 96 regularizer """no""" +323 96 optimizer """adam""" +323 96 training_loop """owa""" +323 96 negative_sampler """basic""" +323 96 evaluator """rankbased""" +323 97 dataset """kinships""" +323 97 model """hole""" +323 97 loss """marginranking""" +323 97 regularizer """no""" +323 97 optimizer """adam""" +323 97 training_loop """owa""" +323 97 negative_sampler """basic""" +323 97 evaluator """rankbased""" +323 98 dataset """kinships""" +323 98 model """hole""" +323 98 loss """marginranking""" +323 98 regularizer """no""" +323 98 optimizer """adam""" +323 98 training_loop """owa""" +323 98 negative_sampler """basic""" +323 98 evaluator """rankbased""" +323 99 dataset """kinships""" +323 99 model """hole""" +323 99 loss """marginranking""" +323 99 regularizer """no""" +323 99 optimizer """adam""" +323 99 training_loop """owa""" +323 99 negative_sampler """basic""" +323 99 evaluator """rankbased""" +323 100 dataset """kinships""" +323 100 model """hole""" +323 100 loss """marginranking""" +323 100 regularizer """no""" +323 100 optimizer """adam""" +323 100 training_loop """owa""" +323 100 negative_sampler """basic""" +323 100 evaluator """rankbased""" +324 1 model.embedding_dim 1.0 +324 1 loss.margin 8.792299549537853 +324 1 loss.adversarial_temperature 0.9589465824617729 +324 1 optimizer.lr 0.00247010543184085 +324 1 negative_sampler.num_negs_per_pos 87.0 +324 1 training.batch_size 2.0 +324 2 model.embedding_dim 1.0 +324 2 loss.margin 27.27112817472061 +324 2 loss.adversarial_temperature 0.4043907953459649 +324 2 optimizer.lr 0.0033026836506397134 +324 2 negative_sampler.num_negs_per_pos 25.0 +324 2 training.batch_size 2.0 +324 3 model.embedding_dim 2.0 +324 3 loss.margin 3.64886253506096 +324 3 loss.adversarial_temperature 0.7227844016804169 +324 3 optimizer.lr 0.003982466209343801 +324 3 negative_sampler.num_negs_per_pos 33.0 +324 3 training.batch_size 1.0 +324 4 model.embedding_dim 0.0 +324 4 loss.margin 1.721505915178659 +324 4 loss.adversarial_temperature 0.4385424199301171 +324 4 optimizer.lr 0.022284298972036452 +324 4 negative_sampler.num_negs_per_pos 72.0 +324 4 training.batch_size 0.0 +324 5 model.embedding_dim 0.0 +324 5 loss.margin 2.4203223960271476 +324 5 loss.adversarial_temperature 0.6541000897730385 +324 5 optimizer.lr 0.0013944131376632464 +324 5 negative_sampler.num_negs_per_pos 79.0 +324 5 training.batch_size 1.0 +324 6 model.embedding_dim 1.0 +324 6 loss.margin 16.952478388490725 +324 6 loss.adversarial_temperature 0.41383626165230514 +324 6 optimizer.lr 0.0010914192571776617 +324 6 negative_sampler.num_negs_per_pos 72.0 +324 6 training.batch_size 2.0 +324 7 model.embedding_dim 1.0 +324 7 loss.margin 20.222819746892323 +324 7 loss.adversarial_temperature 0.24423061920850153 +324 7 optimizer.lr 0.0012349616565654946 +324 7 negative_sampler.num_negs_per_pos 71.0 +324 7 training.batch_size 2.0 +324 8 model.embedding_dim 0.0 +324 8 loss.margin 21.443132551774493 +324 8 loss.adversarial_temperature 0.5873415063194982 +324 8 optimizer.lr 0.051460328975703816 +324 8 negative_sampler.num_negs_per_pos 18.0 +324 8 training.batch_size 2.0 +324 9 model.embedding_dim 1.0 +324 9 loss.margin 8.258637191452538 +324 9 loss.adversarial_temperature 0.40644055687814173 +324 9 optimizer.lr 0.015538264032978399 +324 9 negative_sampler.num_negs_per_pos 6.0 +324 9 training.batch_size 1.0 +324 10 model.embedding_dim 1.0 +324 10 loss.margin 26.70639994788247 +324 10 loss.adversarial_temperature 0.5040863667818203 +324 10 optimizer.lr 0.0026047566689857775 +324 10 negative_sampler.num_negs_per_pos 76.0 +324 10 training.batch_size 1.0 +324 11 model.embedding_dim 2.0 +324 11 loss.margin 3.2523164187887366 +324 11 loss.adversarial_temperature 0.6848290043257352 +324 11 optimizer.lr 0.0012969564238748477 +324 11 negative_sampler.num_negs_per_pos 97.0 +324 11 training.batch_size 2.0 +324 12 model.embedding_dim 2.0 +324 12 loss.margin 11.243129565636142 +324 12 loss.adversarial_temperature 0.8796955480870293 +324 12 optimizer.lr 0.0031597019856520058 +324 12 negative_sampler.num_negs_per_pos 11.0 +324 12 training.batch_size 0.0 +324 13 model.embedding_dim 2.0 +324 13 loss.margin 22.736783045947035 +324 13 loss.adversarial_temperature 0.6425616181815477 +324 13 optimizer.lr 0.05019351434589878 +324 13 negative_sampler.num_negs_per_pos 78.0 +324 13 training.batch_size 0.0 +324 14 model.embedding_dim 1.0 +324 14 loss.margin 5.865924549715418 +324 14 loss.adversarial_temperature 0.7942261172047358 +324 14 optimizer.lr 0.004534866134046859 +324 14 negative_sampler.num_negs_per_pos 70.0 +324 14 training.batch_size 2.0 +324 15 model.embedding_dim 2.0 +324 15 loss.margin 7.232953969040342 +324 15 loss.adversarial_temperature 0.12841057844032172 +324 15 optimizer.lr 0.0024332880556443594 +324 15 negative_sampler.num_negs_per_pos 3.0 +324 15 training.batch_size 0.0 +324 16 model.embedding_dim 2.0 +324 16 loss.margin 16.387370321899862 +324 16 loss.adversarial_temperature 0.4505203369259517 +324 16 optimizer.lr 0.012171679176178488 +324 16 negative_sampler.num_negs_per_pos 36.0 +324 16 training.batch_size 1.0 +324 17 model.embedding_dim 0.0 +324 17 loss.margin 3.2897850611436983 +324 17 loss.adversarial_temperature 0.5704761772799294 +324 17 optimizer.lr 0.005071160813421079 +324 17 negative_sampler.num_negs_per_pos 48.0 +324 17 training.batch_size 2.0 +324 18 model.embedding_dim 2.0 +324 18 loss.margin 7.874186975158171 +324 18 loss.adversarial_temperature 0.7439861224280756 +324 18 optimizer.lr 0.01112825007697113 +324 18 negative_sampler.num_negs_per_pos 98.0 +324 18 training.batch_size 2.0 +324 19 model.embedding_dim 0.0 +324 19 loss.margin 12.066880670895586 +324 19 loss.adversarial_temperature 0.24710207263700298 +324 19 optimizer.lr 0.0024943077552970283 +324 19 negative_sampler.num_negs_per_pos 79.0 +324 19 training.batch_size 1.0 +324 20 model.embedding_dim 1.0 +324 20 loss.margin 8.201586345602486 +324 20 loss.adversarial_temperature 0.3991387916786665 +324 20 optimizer.lr 0.0010499364256685206 +324 20 negative_sampler.num_negs_per_pos 84.0 +324 20 training.batch_size 0.0 +324 21 model.embedding_dim 0.0 +324 21 loss.margin 21.28734434887799 +324 21 loss.adversarial_temperature 0.11747858177945561 +324 21 optimizer.lr 0.0330805140051095 +324 21 negative_sampler.num_negs_per_pos 9.0 +324 21 training.batch_size 0.0 +324 22 model.embedding_dim 2.0 +324 22 loss.margin 7.378254096535144 +324 22 loss.adversarial_temperature 0.4424846630843623 +324 22 optimizer.lr 0.01715304219418627 +324 22 negative_sampler.num_negs_per_pos 84.0 +324 22 training.batch_size 0.0 +324 23 model.embedding_dim 2.0 +324 23 loss.margin 20.05266264767451 +324 23 loss.adversarial_temperature 0.8539999131414359 +324 23 optimizer.lr 0.04193221530344384 +324 23 negative_sampler.num_negs_per_pos 16.0 +324 23 training.batch_size 0.0 +324 24 model.embedding_dim 0.0 +324 24 loss.margin 6.328498713428326 +324 24 loss.adversarial_temperature 0.30739575427173293 +324 24 optimizer.lr 0.00510229059615684 +324 24 negative_sampler.num_negs_per_pos 47.0 +324 24 training.batch_size 1.0 +324 25 model.embedding_dim 0.0 +324 25 loss.margin 9.930420288271527 +324 25 loss.adversarial_temperature 0.7927592578188346 +324 25 optimizer.lr 0.022473769548706213 +324 25 negative_sampler.num_negs_per_pos 23.0 +324 25 training.batch_size 2.0 +324 26 model.embedding_dim 2.0 +324 26 loss.margin 10.451054120590225 +324 26 loss.adversarial_temperature 0.17995738539603362 +324 26 optimizer.lr 0.008058413224493943 +324 26 negative_sampler.num_negs_per_pos 17.0 +324 26 training.batch_size 1.0 +324 27 model.embedding_dim 1.0 +324 27 loss.margin 6.489145840886001 +324 27 loss.adversarial_temperature 0.7636192284358714 +324 27 optimizer.lr 0.01048872879870285 +324 27 negative_sampler.num_negs_per_pos 29.0 +324 27 training.batch_size 2.0 +324 28 model.embedding_dim 1.0 +324 28 loss.margin 16.001682425024033 +324 28 loss.adversarial_temperature 0.8232275448652042 +324 28 optimizer.lr 0.023074409423471165 +324 28 negative_sampler.num_negs_per_pos 80.0 +324 28 training.batch_size 0.0 +324 29 model.embedding_dim 2.0 +324 29 loss.margin 14.335490675335505 +324 29 loss.adversarial_temperature 0.1658919248972755 +324 29 optimizer.lr 0.006039116666705196 +324 29 negative_sampler.num_negs_per_pos 47.0 +324 29 training.batch_size 0.0 +324 30 model.embedding_dim 0.0 +324 30 loss.margin 12.376894624146848 +324 30 loss.adversarial_temperature 0.36455957132051653 +324 30 optimizer.lr 0.035446463726234834 +324 30 negative_sampler.num_negs_per_pos 70.0 +324 30 training.batch_size 0.0 +324 31 model.embedding_dim 1.0 +324 31 loss.margin 16.55812644368934 +324 31 loss.adversarial_temperature 0.30691800371129824 +324 31 optimizer.lr 0.0519266819900387 +324 31 negative_sampler.num_negs_per_pos 67.0 +324 31 training.batch_size 0.0 +324 32 model.embedding_dim 2.0 +324 32 loss.margin 16.321038415550234 +324 32 loss.adversarial_temperature 0.8853038514952933 +324 32 optimizer.lr 0.002406948082188256 +324 32 negative_sampler.num_negs_per_pos 76.0 +324 32 training.batch_size 2.0 +324 33 model.embedding_dim 2.0 +324 33 loss.margin 1.9681255029645417 +324 33 loss.adversarial_temperature 0.6793362101260993 +324 33 optimizer.lr 0.002267163003243155 +324 33 negative_sampler.num_negs_per_pos 9.0 +324 33 training.batch_size 1.0 +324 34 model.embedding_dim 2.0 +324 34 loss.margin 18.223545131751614 +324 34 loss.adversarial_temperature 0.8002280835918475 +324 34 optimizer.lr 0.0054608579413672 +324 34 negative_sampler.num_negs_per_pos 86.0 +324 34 training.batch_size 0.0 +324 35 model.embedding_dim 0.0 +324 35 loss.margin 19.671425228841397 +324 35 loss.adversarial_temperature 0.9391069874709332 +324 35 optimizer.lr 0.007582737961250356 +324 35 negative_sampler.num_negs_per_pos 0.0 +324 35 training.batch_size 0.0 +324 36 model.embedding_dim 1.0 +324 36 loss.margin 13.19581609485878 +324 36 loss.adversarial_temperature 0.12504261697114935 +324 36 optimizer.lr 0.07929789178717087 +324 36 negative_sampler.num_negs_per_pos 30.0 +324 36 training.batch_size 1.0 +324 37 model.embedding_dim 2.0 +324 37 loss.margin 11.364366556189095 +324 37 loss.adversarial_temperature 0.3605619726238428 +324 37 optimizer.lr 0.0013758578302572252 +324 37 negative_sampler.num_negs_per_pos 23.0 +324 37 training.batch_size 2.0 +324 38 model.embedding_dim 2.0 +324 38 loss.margin 20.86237379789626 +324 38 loss.adversarial_temperature 0.7837177941003638 +324 38 optimizer.lr 0.002365995126894593 +324 38 negative_sampler.num_negs_per_pos 68.0 +324 38 training.batch_size 1.0 +324 39 model.embedding_dim 1.0 +324 39 loss.margin 6.123357677283922 +324 39 loss.adversarial_temperature 0.8459920321485319 +324 39 optimizer.lr 0.005457493316000817 +324 39 negative_sampler.num_negs_per_pos 0.0 +324 39 training.batch_size 2.0 +324 40 model.embedding_dim 1.0 +324 40 loss.margin 12.322773730364563 +324 40 loss.adversarial_temperature 0.8997189320250825 +324 40 optimizer.lr 0.0013592258588973022 +324 40 negative_sampler.num_negs_per_pos 56.0 +324 40 training.batch_size 0.0 +324 41 model.embedding_dim 1.0 +324 41 loss.margin 26.692934924835196 +324 41 loss.adversarial_temperature 0.3725614022941841 +324 41 optimizer.lr 0.0018101243208245865 +324 41 negative_sampler.num_negs_per_pos 94.0 +324 41 training.batch_size 2.0 +324 42 model.embedding_dim 0.0 +324 42 loss.margin 20.642977006311916 +324 42 loss.adversarial_temperature 0.645538488103535 +324 42 optimizer.lr 0.008084958166688326 +324 42 negative_sampler.num_negs_per_pos 56.0 +324 42 training.batch_size 1.0 +324 43 model.embedding_dim 0.0 +324 43 loss.margin 13.89615710592888 +324 43 loss.adversarial_temperature 0.6034255016418706 +324 43 optimizer.lr 0.008837110292361398 +324 43 negative_sampler.num_negs_per_pos 45.0 +324 43 training.batch_size 1.0 +324 44 model.embedding_dim 2.0 +324 44 loss.margin 10.342119675700921 +324 44 loss.adversarial_temperature 0.32425580528770126 +324 44 optimizer.lr 0.021090660521321563 +324 44 negative_sampler.num_negs_per_pos 27.0 +324 44 training.batch_size 2.0 +324 45 model.embedding_dim 2.0 +324 45 loss.margin 12.735762198281662 +324 45 loss.adversarial_temperature 0.8772819390215979 +324 45 optimizer.lr 0.061606908751795274 +324 45 negative_sampler.num_negs_per_pos 52.0 +324 45 training.batch_size 1.0 +324 46 model.embedding_dim 2.0 +324 46 loss.margin 12.860858406065649 +324 46 loss.adversarial_temperature 0.10280050397869284 +324 46 optimizer.lr 0.004321776855604718 +324 46 negative_sampler.num_negs_per_pos 63.0 +324 46 training.batch_size 2.0 +324 47 model.embedding_dim 1.0 +324 47 loss.margin 27.04315510888293 +324 47 loss.adversarial_temperature 0.39405985285522005 +324 47 optimizer.lr 0.010094951970447373 +324 47 negative_sampler.num_negs_per_pos 75.0 +324 47 training.batch_size 1.0 +324 48 model.embedding_dim 0.0 +324 48 loss.margin 25.96057532147164 +324 48 loss.adversarial_temperature 0.5491237417589305 +324 48 optimizer.lr 0.0011805613515794336 +324 48 negative_sampler.num_negs_per_pos 14.0 +324 48 training.batch_size 0.0 +324 49 model.embedding_dim 0.0 +324 49 loss.margin 21.410120590949525 +324 49 loss.adversarial_temperature 0.6686360719708286 +324 49 optimizer.lr 0.0062964613746721455 +324 49 negative_sampler.num_negs_per_pos 29.0 +324 49 training.batch_size 2.0 +324 50 model.embedding_dim 0.0 +324 50 loss.margin 29.507383172389233 +324 50 loss.adversarial_temperature 0.3598235933608237 +324 50 optimizer.lr 0.05950231878631087 +324 50 negative_sampler.num_negs_per_pos 45.0 +324 50 training.batch_size 2.0 +324 51 model.embedding_dim 2.0 +324 51 loss.margin 1.4584182748730703 +324 51 loss.adversarial_temperature 0.22600927524390524 +324 51 optimizer.lr 0.012927480015534832 +324 51 negative_sampler.num_negs_per_pos 90.0 +324 51 training.batch_size 1.0 +324 52 model.embedding_dim 2.0 +324 52 loss.margin 13.2842332544679 +324 52 loss.adversarial_temperature 0.7114280109910396 +324 52 optimizer.lr 0.006913816291309072 +324 52 negative_sampler.num_negs_per_pos 91.0 +324 52 training.batch_size 1.0 +324 53 model.embedding_dim 2.0 +324 53 loss.margin 11.84178522688938 +324 53 loss.adversarial_temperature 0.8888358470599426 +324 53 optimizer.lr 0.04476607567507288 +324 53 negative_sampler.num_negs_per_pos 25.0 +324 53 training.batch_size 0.0 +324 54 model.embedding_dim 0.0 +324 54 loss.margin 21.3647185129363 +324 54 loss.adversarial_temperature 0.8304463407982694 +324 54 optimizer.lr 0.013875029914846836 +324 54 negative_sampler.num_negs_per_pos 63.0 +324 54 training.batch_size 2.0 +324 55 model.embedding_dim 1.0 +324 55 loss.margin 15.467436293934233 +324 55 loss.adversarial_temperature 0.6669774393567256 +324 55 optimizer.lr 0.02905772003479809 +324 55 negative_sampler.num_negs_per_pos 96.0 +324 55 training.batch_size 0.0 +324 56 model.embedding_dim 0.0 +324 56 loss.margin 14.838455752649537 +324 56 loss.adversarial_temperature 0.4353891319240174 +324 56 optimizer.lr 0.0783542874502504 +324 56 negative_sampler.num_negs_per_pos 21.0 +324 56 training.batch_size 2.0 +324 57 model.embedding_dim 0.0 +324 57 loss.margin 12.116068442838456 +324 57 loss.adversarial_temperature 0.8554616568080875 +324 57 optimizer.lr 0.05683981828913192 +324 57 negative_sampler.num_negs_per_pos 29.0 +324 57 training.batch_size 0.0 +324 58 model.embedding_dim 1.0 +324 58 loss.margin 28.907267362034037 +324 58 loss.adversarial_temperature 0.7440415492989352 +324 58 optimizer.lr 0.02710358231733899 +324 58 negative_sampler.num_negs_per_pos 2.0 +324 58 training.batch_size 0.0 +324 59 model.embedding_dim 2.0 +324 59 loss.margin 14.888046356810612 +324 59 loss.adversarial_temperature 0.12381612215560378 +324 59 optimizer.lr 0.002677960014420488 +324 59 negative_sampler.num_negs_per_pos 66.0 +324 59 training.batch_size 2.0 +324 60 model.embedding_dim 0.0 +324 60 loss.margin 1.505244588451495 +324 60 loss.adversarial_temperature 0.4780205409972281 +324 60 optimizer.lr 0.03668115810222388 +324 60 negative_sampler.num_negs_per_pos 22.0 +324 60 training.batch_size 2.0 +324 61 model.embedding_dim 2.0 +324 61 loss.margin 13.522947047558091 +324 61 loss.adversarial_temperature 0.11479253918360345 +324 61 optimizer.lr 0.0013100882027833613 +324 61 negative_sampler.num_negs_per_pos 2.0 +324 61 training.batch_size 1.0 +324 62 model.embedding_dim 0.0 +324 62 loss.margin 14.536173621835738 +324 62 loss.adversarial_temperature 0.9357656897657118 +324 62 optimizer.lr 0.017785966793506757 +324 62 negative_sampler.num_negs_per_pos 39.0 +324 62 training.batch_size 0.0 +324 63 model.embedding_dim 0.0 +324 63 loss.margin 21.184438028655936 +324 63 loss.adversarial_temperature 0.9308133737994506 +324 63 optimizer.lr 0.03411323827690686 +324 63 negative_sampler.num_negs_per_pos 24.0 +324 63 training.batch_size 1.0 +324 64 model.embedding_dim 2.0 +324 64 loss.margin 24.934059331459395 +324 64 loss.adversarial_temperature 0.7654246097559165 +324 64 optimizer.lr 0.029648687765718166 +324 64 negative_sampler.num_negs_per_pos 8.0 +324 64 training.batch_size 1.0 +324 65 model.embedding_dim 0.0 +324 65 loss.margin 16.151499276178857 +324 65 loss.adversarial_temperature 0.9173656507630563 +324 65 optimizer.lr 0.05681817076365341 +324 65 negative_sampler.num_negs_per_pos 16.0 +324 65 training.batch_size 2.0 +324 66 model.embedding_dim 0.0 +324 66 loss.margin 10.839021827681364 +324 66 loss.adversarial_temperature 0.7674255646806627 +324 66 optimizer.lr 0.00195093083122799 +324 66 negative_sampler.num_negs_per_pos 54.0 +324 66 training.batch_size 2.0 +324 67 model.embedding_dim 0.0 +324 67 loss.margin 5.306318546799888 +324 67 loss.adversarial_temperature 0.12336528297199463 +324 67 optimizer.lr 0.06135411237513288 +324 67 negative_sampler.num_negs_per_pos 50.0 +324 67 training.batch_size 1.0 +324 68 model.embedding_dim 2.0 +324 68 loss.margin 21.28547965882551 +324 68 loss.adversarial_temperature 0.7021676705833556 +324 68 optimizer.lr 0.010344594321943587 +324 68 negative_sampler.num_negs_per_pos 5.0 +324 68 training.batch_size 2.0 +324 69 model.embedding_dim 0.0 +324 69 loss.margin 3.1442361155113767 +324 69 loss.adversarial_temperature 0.7542931799228368 +324 69 optimizer.lr 0.0013359835091673465 +324 69 negative_sampler.num_negs_per_pos 79.0 +324 69 training.batch_size 2.0 +324 70 model.embedding_dim 2.0 +324 70 loss.margin 21.450573388409406 +324 70 loss.adversarial_temperature 0.48179606810715314 +324 70 optimizer.lr 0.05003445911900406 +324 70 negative_sampler.num_negs_per_pos 64.0 +324 70 training.batch_size 2.0 +324 71 model.embedding_dim 2.0 +324 71 loss.margin 8.531441676178112 +324 71 loss.adversarial_temperature 0.29330314980790584 +324 71 optimizer.lr 0.0010450627399224777 +324 71 negative_sampler.num_negs_per_pos 2.0 +324 71 training.batch_size 0.0 +324 72 model.embedding_dim 1.0 +324 72 loss.margin 29.615178042356558 +324 72 loss.adversarial_temperature 0.22375426867924494 +324 72 optimizer.lr 0.004437370478091243 +324 72 negative_sampler.num_negs_per_pos 86.0 +324 72 training.batch_size 0.0 +324 73 model.embedding_dim 1.0 +324 73 loss.margin 29.79802666153832 +324 73 loss.adversarial_temperature 0.42924747064189994 +324 73 optimizer.lr 0.001816960279473161 +324 73 negative_sampler.num_negs_per_pos 80.0 +324 73 training.batch_size 0.0 +324 74 model.embedding_dim 0.0 +324 74 loss.margin 8.023192944319998 +324 74 loss.adversarial_temperature 0.5703536865038538 +324 74 optimizer.lr 0.003028460861033169 +324 74 negative_sampler.num_negs_per_pos 35.0 +324 74 training.batch_size 1.0 +324 75 model.embedding_dim 1.0 +324 75 loss.margin 13.395864839039252 +324 75 loss.adversarial_temperature 0.5911966050887437 +324 75 optimizer.lr 0.07405171751632408 +324 75 negative_sampler.num_negs_per_pos 23.0 +324 75 training.batch_size 2.0 +324 76 model.embedding_dim 2.0 +324 76 loss.margin 27.753933367261308 +324 76 loss.adversarial_temperature 0.10245163246374665 +324 76 optimizer.lr 0.05550787814919265 +324 76 negative_sampler.num_negs_per_pos 89.0 +324 76 training.batch_size 1.0 +324 77 model.embedding_dim 1.0 +324 77 loss.margin 4.233471592272734 +324 77 loss.adversarial_temperature 0.7884085288939445 +324 77 optimizer.lr 0.015549115236393313 +324 77 negative_sampler.num_negs_per_pos 54.0 +324 77 training.batch_size 1.0 +324 78 model.embedding_dim 1.0 +324 78 loss.margin 23.64930305925984 +324 78 loss.adversarial_temperature 0.26837980876761675 +324 78 optimizer.lr 0.0020484694496879477 +324 78 negative_sampler.num_negs_per_pos 58.0 +324 78 training.batch_size 2.0 +324 79 model.embedding_dim 1.0 +324 79 loss.margin 1.3250565416035096 +324 79 loss.adversarial_temperature 0.7338232228723384 +324 79 optimizer.lr 0.0013821053837332897 +324 79 negative_sampler.num_negs_per_pos 47.0 +324 79 training.batch_size 0.0 +324 80 model.embedding_dim 0.0 +324 80 loss.margin 11.913092787026832 +324 80 loss.adversarial_temperature 0.5743030291926309 +324 80 optimizer.lr 0.0302239753543301 +324 80 negative_sampler.num_negs_per_pos 0.0 +324 80 training.batch_size 0.0 +324 81 model.embedding_dim 0.0 +324 81 loss.margin 13.917744105272046 +324 81 loss.adversarial_temperature 0.17485179419720664 +324 81 optimizer.lr 0.0010385922786926144 +324 81 negative_sampler.num_negs_per_pos 31.0 +324 81 training.batch_size 1.0 +324 82 model.embedding_dim 0.0 +324 82 loss.margin 4.306553550509019 +324 82 loss.adversarial_temperature 0.4588014135878099 +324 82 optimizer.lr 0.005823341114490902 +324 82 negative_sampler.num_negs_per_pos 60.0 +324 82 training.batch_size 0.0 +324 83 model.embedding_dim 2.0 +324 83 loss.margin 16.404903217143644 +324 83 loss.adversarial_temperature 0.37446504794526936 +324 83 optimizer.lr 0.01347078488527182 +324 83 negative_sampler.num_negs_per_pos 7.0 +324 83 training.batch_size 2.0 +324 84 model.embedding_dim 0.0 +324 84 loss.margin 10.018892226405404 +324 84 loss.adversarial_temperature 0.17868772302044095 +324 84 optimizer.lr 0.013084337628045502 +324 84 negative_sampler.num_negs_per_pos 9.0 +324 84 training.batch_size 2.0 +324 85 model.embedding_dim 2.0 +324 85 loss.margin 6.575069594909311 +324 85 loss.adversarial_temperature 0.5542372632024717 +324 85 optimizer.lr 0.013944782460501197 +324 85 negative_sampler.num_negs_per_pos 22.0 +324 85 training.batch_size 0.0 +324 86 model.embedding_dim 0.0 +324 86 loss.margin 11.8941552084731 +324 86 loss.adversarial_temperature 0.398007591323872 +324 86 optimizer.lr 0.006625531397505639 +324 86 negative_sampler.num_negs_per_pos 43.0 +324 86 training.batch_size 2.0 +324 87 model.embedding_dim 1.0 +324 87 loss.margin 8.753421473013848 +324 87 loss.adversarial_temperature 0.5262712509263326 +324 87 optimizer.lr 0.010873977585141432 +324 87 negative_sampler.num_negs_per_pos 59.0 +324 87 training.batch_size 1.0 +324 88 model.embedding_dim 1.0 +324 88 loss.margin 24.301826307653947 +324 88 loss.adversarial_temperature 0.6349593620608074 +324 88 optimizer.lr 0.020579914487925764 +324 88 negative_sampler.num_negs_per_pos 32.0 +324 88 training.batch_size 0.0 +324 89 model.embedding_dim 1.0 +324 89 loss.margin 27.810470075697925 +324 89 loss.adversarial_temperature 0.6300745612351336 +324 89 optimizer.lr 0.008925636641163515 +324 89 negative_sampler.num_negs_per_pos 64.0 +324 89 training.batch_size 1.0 +324 90 model.embedding_dim 1.0 +324 90 loss.margin 29.40887535331716 +324 90 loss.adversarial_temperature 0.8149709152709791 +324 90 optimizer.lr 0.01166901134417733 +324 90 negative_sampler.num_negs_per_pos 3.0 +324 90 training.batch_size 1.0 +324 91 model.embedding_dim 0.0 +324 91 loss.margin 16.866314887003078 +324 91 loss.adversarial_temperature 0.6944262055045476 +324 91 optimizer.lr 0.0012888744139069837 +324 91 negative_sampler.num_negs_per_pos 92.0 +324 91 training.batch_size 2.0 +324 92 model.embedding_dim 2.0 +324 92 loss.margin 18.714649300614102 +324 92 loss.adversarial_temperature 0.19060887389574224 +324 92 optimizer.lr 0.02641817561863181 +324 92 negative_sampler.num_negs_per_pos 93.0 +324 92 training.batch_size 0.0 +324 93 model.embedding_dim 0.0 +324 93 loss.margin 13.055647741766323 +324 93 loss.adversarial_temperature 0.5155543079016064 +324 93 optimizer.lr 0.015949239902768978 +324 93 negative_sampler.num_negs_per_pos 63.0 +324 93 training.batch_size 2.0 +324 94 model.embedding_dim 1.0 +324 94 loss.margin 13.742986382030947 +324 94 loss.adversarial_temperature 0.7257023511060027 +324 94 optimizer.lr 0.018167277981212825 +324 94 negative_sampler.num_negs_per_pos 9.0 +324 94 training.batch_size 1.0 +324 95 model.embedding_dim 1.0 +324 95 loss.margin 3.0748261682122324 +324 95 loss.adversarial_temperature 0.8103483907231622 +324 95 optimizer.lr 0.019060791946018306 +324 95 negative_sampler.num_negs_per_pos 66.0 +324 95 training.batch_size 1.0 +324 96 model.embedding_dim 0.0 +324 96 loss.margin 16.782485506591282 +324 96 loss.adversarial_temperature 0.3313000488024284 +324 96 optimizer.lr 0.002719590925440291 +324 96 negative_sampler.num_negs_per_pos 59.0 +324 96 training.batch_size 0.0 +324 97 model.embedding_dim 1.0 +324 97 loss.margin 8.103639863238518 +324 97 loss.adversarial_temperature 0.464496474483587 +324 97 optimizer.lr 0.028841989031858695 +324 97 negative_sampler.num_negs_per_pos 2.0 +324 97 training.batch_size 1.0 +324 98 model.embedding_dim 2.0 +324 98 loss.margin 25.540350661564016 +324 98 loss.adversarial_temperature 0.477345462948073 +324 98 optimizer.lr 0.03261300861424131 +324 98 negative_sampler.num_negs_per_pos 68.0 +324 98 training.batch_size 1.0 +324 99 model.embedding_dim 0.0 +324 99 loss.margin 20.175017090778354 +324 99 loss.adversarial_temperature 0.6832841462675812 +324 99 optimizer.lr 0.04733408182424117 +324 99 negative_sampler.num_negs_per_pos 63.0 +324 99 training.batch_size 2.0 +324 100 model.embedding_dim 2.0 +324 100 loss.margin 10.860705178661192 +324 100 loss.adversarial_temperature 0.6030697714892189 +324 100 optimizer.lr 0.05863140548802294 +324 100 negative_sampler.num_negs_per_pos 16.0 +324 100 training.batch_size 0.0 +324 1 dataset """kinships""" +324 1 model """hole""" +324 1 loss """nssa""" +324 1 regularizer """no""" +324 1 optimizer """adam""" +324 1 training_loop """owa""" +324 1 negative_sampler """basic""" +324 1 evaluator """rankbased""" +324 2 dataset """kinships""" +324 2 model """hole""" +324 2 loss """nssa""" +324 2 regularizer """no""" +324 2 optimizer """adam""" +324 2 training_loop """owa""" +324 2 negative_sampler """basic""" +324 2 evaluator """rankbased""" +324 3 dataset """kinships""" +324 3 model """hole""" +324 3 loss """nssa""" +324 3 regularizer """no""" +324 3 optimizer """adam""" +324 3 training_loop """owa""" +324 3 negative_sampler """basic""" +324 3 evaluator """rankbased""" +324 4 dataset """kinships""" +324 4 model """hole""" +324 4 loss """nssa""" +324 4 regularizer """no""" +324 4 optimizer """adam""" +324 4 training_loop """owa""" +324 4 negative_sampler """basic""" +324 4 evaluator """rankbased""" +324 5 dataset """kinships""" +324 5 model """hole""" +324 5 loss """nssa""" +324 5 regularizer """no""" +324 5 optimizer """adam""" +324 5 training_loop """owa""" +324 5 negative_sampler """basic""" +324 5 evaluator """rankbased""" +324 6 dataset """kinships""" +324 6 model """hole""" +324 6 loss """nssa""" +324 6 regularizer """no""" +324 6 optimizer """adam""" +324 6 training_loop """owa""" +324 6 negative_sampler """basic""" +324 6 evaluator """rankbased""" +324 7 dataset """kinships""" +324 7 model """hole""" +324 7 loss """nssa""" +324 7 regularizer """no""" +324 7 optimizer """adam""" +324 7 training_loop """owa""" +324 7 negative_sampler """basic""" +324 7 evaluator """rankbased""" +324 8 dataset """kinships""" +324 8 model """hole""" +324 8 loss """nssa""" +324 8 regularizer """no""" +324 8 optimizer """adam""" +324 8 training_loop """owa""" +324 8 negative_sampler """basic""" +324 8 evaluator """rankbased""" +324 9 dataset """kinships""" +324 9 model """hole""" +324 9 loss """nssa""" +324 9 regularizer """no""" +324 9 optimizer """adam""" +324 9 training_loop """owa""" +324 9 negative_sampler """basic""" +324 9 evaluator """rankbased""" +324 10 dataset """kinships""" +324 10 model """hole""" +324 10 loss """nssa""" +324 10 regularizer """no""" +324 10 optimizer """adam""" +324 10 training_loop """owa""" +324 10 negative_sampler """basic""" +324 10 evaluator """rankbased""" +324 11 dataset """kinships""" +324 11 model """hole""" +324 11 loss """nssa""" +324 11 regularizer """no""" +324 11 optimizer """adam""" +324 11 training_loop """owa""" +324 11 negative_sampler """basic""" +324 11 evaluator """rankbased""" +324 12 dataset """kinships""" +324 12 model """hole""" +324 12 loss """nssa""" +324 12 regularizer """no""" +324 12 optimizer """adam""" +324 12 training_loop """owa""" +324 12 negative_sampler """basic""" +324 12 evaluator """rankbased""" +324 13 dataset """kinships""" +324 13 model """hole""" +324 13 loss """nssa""" +324 13 regularizer """no""" +324 13 optimizer """adam""" +324 13 training_loop """owa""" +324 13 negative_sampler """basic""" +324 13 evaluator """rankbased""" +324 14 dataset """kinships""" +324 14 model """hole""" +324 14 loss """nssa""" +324 14 regularizer """no""" +324 14 optimizer """adam""" +324 14 training_loop """owa""" +324 14 negative_sampler """basic""" +324 14 evaluator """rankbased""" +324 15 dataset """kinships""" +324 15 model """hole""" +324 15 loss """nssa""" +324 15 regularizer """no""" +324 15 optimizer """adam""" +324 15 training_loop """owa""" +324 15 negative_sampler """basic""" +324 15 evaluator """rankbased""" +324 16 dataset """kinships""" +324 16 model """hole""" +324 16 loss """nssa""" +324 16 regularizer """no""" +324 16 optimizer """adam""" +324 16 training_loop """owa""" +324 16 negative_sampler """basic""" +324 16 evaluator """rankbased""" +324 17 dataset """kinships""" +324 17 model """hole""" +324 17 loss """nssa""" +324 17 regularizer """no""" +324 17 optimizer """adam""" +324 17 training_loop """owa""" +324 17 negative_sampler """basic""" +324 17 evaluator """rankbased""" +324 18 dataset """kinships""" +324 18 model """hole""" +324 18 loss """nssa""" +324 18 regularizer """no""" +324 18 optimizer """adam""" +324 18 training_loop """owa""" +324 18 negative_sampler """basic""" +324 18 evaluator """rankbased""" +324 19 dataset """kinships""" +324 19 model """hole""" +324 19 loss """nssa""" +324 19 regularizer """no""" +324 19 optimizer """adam""" +324 19 training_loop """owa""" +324 19 negative_sampler """basic""" +324 19 evaluator """rankbased""" +324 20 dataset """kinships""" +324 20 model """hole""" +324 20 loss """nssa""" +324 20 regularizer """no""" +324 20 optimizer """adam""" +324 20 training_loop """owa""" +324 20 negative_sampler """basic""" +324 20 evaluator """rankbased""" +324 21 dataset """kinships""" +324 21 model """hole""" +324 21 loss """nssa""" +324 21 regularizer """no""" +324 21 optimizer """adam""" +324 21 training_loop """owa""" +324 21 negative_sampler """basic""" +324 21 evaluator """rankbased""" +324 22 dataset """kinships""" +324 22 model """hole""" +324 22 loss """nssa""" +324 22 regularizer """no""" +324 22 optimizer """adam""" +324 22 training_loop """owa""" +324 22 negative_sampler """basic""" +324 22 evaluator """rankbased""" +324 23 dataset """kinships""" +324 23 model """hole""" +324 23 loss """nssa""" +324 23 regularizer """no""" +324 23 optimizer """adam""" +324 23 training_loop """owa""" +324 23 negative_sampler """basic""" +324 23 evaluator """rankbased""" +324 24 dataset """kinships""" +324 24 model """hole""" +324 24 loss """nssa""" +324 24 regularizer """no""" +324 24 optimizer """adam""" +324 24 training_loop """owa""" +324 24 negative_sampler """basic""" +324 24 evaluator """rankbased""" +324 25 dataset """kinships""" +324 25 model """hole""" +324 25 loss """nssa""" +324 25 regularizer """no""" +324 25 optimizer """adam""" +324 25 training_loop """owa""" +324 25 negative_sampler """basic""" +324 25 evaluator """rankbased""" +324 26 dataset """kinships""" +324 26 model """hole""" +324 26 loss """nssa""" +324 26 regularizer """no""" +324 26 optimizer """adam""" +324 26 training_loop """owa""" +324 26 negative_sampler """basic""" +324 26 evaluator """rankbased""" +324 27 dataset """kinships""" +324 27 model """hole""" +324 27 loss """nssa""" +324 27 regularizer """no""" +324 27 optimizer """adam""" +324 27 training_loop """owa""" +324 27 negative_sampler """basic""" +324 27 evaluator """rankbased""" +324 28 dataset """kinships""" +324 28 model """hole""" +324 28 loss """nssa""" +324 28 regularizer """no""" +324 28 optimizer """adam""" +324 28 training_loop """owa""" +324 28 negative_sampler """basic""" +324 28 evaluator """rankbased""" +324 29 dataset """kinships""" +324 29 model """hole""" +324 29 loss """nssa""" +324 29 regularizer """no""" +324 29 optimizer """adam""" +324 29 training_loop """owa""" +324 29 negative_sampler """basic""" +324 29 evaluator """rankbased""" +324 30 dataset """kinships""" +324 30 model """hole""" +324 30 loss """nssa""" +324 30 regularizer """no""" +324 30 optimizer """adam""" +324 30 training_loop """owa""" +324 30 negative_sampler """basic""" +324 30 evaluator """rankbased""" +324 31 dataset """kinships""" +324 31 model """hole""" +324 31 loss """nssa""" +324 31 regularizer """no""" +324 31 optimizer """adam""" +324 31 training_loop """owa""" +324 31 negative_sampler """basic""" +324 31 evaluator """rankbased""" +324 32 dataset """kinships""" +324 32 model """hole""" +324 32 loss """nssa""" +324 32 regularizer """no""" +324 32 optimizer """adam""" +324 32 training_loop """owa""" +324 32 negative_sampler """basic""" +324 32 evaluator """rankbased""" +324 33 dataset """kinships""" +324 33 model """hole""" +324 33 loss """nssa""" +324 33 regularizer """no""" +324 33 optimizer """adam""" +324 33 training_loop """owa""" +324 33 negative_sampler """basic""" +324 33 evaluator """rankbased""" +324 34 dataset """kinships""" +324 34 model """hole""" +324 34 loss """nssa""" +324 34 regularizer """no""" +324 34 optimizer """adam""" +324 34 training_loop """owa""" +324 34 negative_sampler """basic""" +324 34 evaluator """rankbased""" +324 35 dataset """kinships""" +324 35 model """hole""" +324 35 loss """nssa""" +324 35 regularizer """no""" +324 35 optimizer """adam""" +324 35 training_loop """owa""" +324 35 negative_sampler """basic""" +324 35 evaluator """rankbased""" +324 36 dataset """kinships""" +324 36 model """hole""" +324 36 loss """nssa""" +324 36 regularizer """no""" +324 36 optimizer """adam""" +324 36 training_loop """owa""" +324 36 negative_sampler """basic""" +324 36 evaluator """rankbased""" +324 37 dataset """kinships""" +324 37 model """hole""" +324 37 loss """nssa""" +324 37 regularizer """no""" +324 37 optimizer """adam""" +324 37 training_loop """owa""" +324 37 negative_sampler """basic""" +324 37 evaluator """rankbased""" +324 38 dataset """kinships""" +324 38 model """hole""" +324 38 loss """nssa""" +324 38 regularizer """no""" +324 38 optimizer """adam""" +324 38 training_loop """owa""" +324 38 negative_sampler """basic""" +324 38 evaluator """rankbased""" +324 39 dataset """kinships""" +324 39 model """hole""" +324 39 loss """nssa""" +324 39 regularizer """no""" +324 39 optimizer """adam""" +324 39 training_loop """owa""" +324 39 negative_sampler """basic""" +324 39 evaluator """rankbased""" +324 40 dataset """kinships""" +324 40 model """hole""" +324 40 loss """nssa""" +324 40 regularizer """no""" +324 40 optimizer """adam""" +324 40 training_loop """owa""" +324 40 negative_sampler """basic""" +324 40 evaluator """rankbased""" +324 41 dataset """kinships""" +324 41 model """hole""" +324 41 loss """nssa""" +324 41 regularizer """no""" +324 41 optimizer """adam""" +324 41 training_loop """owa""" +324 41 negative_sampler """basic""" +324 41 evaluator """rankbased""" +324 42 dataset """kinships""" +324 42 model """hole""" +324 42 loss """nssa""" +324 42 regularizer """no""" +324 42 optimizer """adam""" +324 42 training_loop """owa""" +324 42 negative_sampler """basic""" +324 42 evaluator """rankbased""" +324 43 dataset """kinships""" +324 43 model """hole""" +324 43 loss """nssa""" +324 43 regularizer """no""" +324 43 optimizer """adam""" +324 43 training_loop """owa""" +324 43 negative_sampler """basic""" +324 43 evaluator """rankbased""" +324 44 dataset """kinships""" +324 44 model """hole""" +324 44 loss """nssa""" +324 44 regularizer """no""" +324 44 optimizer """adam""" +324 44 training_loop """owa""" +324 44 negative_sampler """basic""" +324 44 evaluator """rankbased""" +324 45 dataset """kinships""" +324 45 model """hole""" +324 45 loss """nssa""" +324 45 regularizer """no""" +324 45 optimizer """adam""" +324 45 training_loop """owa""" +324 45 negative_sampler """basic""" +324 45 evaluator """rankbased""" +324 46 dataset """kinships""" +324 46 model """hole""" +324 46 loss """nssa""" +324 46 regularizer """no""" +324 46 optimizer """adam""" +324 46 training_loop """owa""" +324 46 negative_sampler """basic""" +324 46 evaluator """rankbased""" +324 47 dataset """kinships""" +324 47 model """hole""" +324 47 loss """nssa""" +324 47 regularizer """no""" +324 47 optimizer """adam""" +324 47 training_loop """owa""" +324 47 negative_sampler """basic""" +324 47 evaluator """rankbased""" +324 48 dataset """kinships""" +324 48 model """hole""" +324 48 loss """nssa""" +324 48 regularizer """no""" +324 48 optimizer """adam""" +324 48 training_loop """owa""" +324 48 negative_sampler """basic""" +324 48 evaluator """rankbased""" +324 49 dataset """kinships""" +324 49 model """hole""" +324 49 loss """nssa""" +324 49 regularizer """no""" +324 49 optimizer """adam""" +324 49 training_loop """owa""" +324 49 negative_sampler """basic""" +324 49 evaluator """rankbased""" +324 50 dataset """kinships""" +324 50 model """hole""" +324 50 loss """nssa""" +324 50 regularizer """no""" +324 50 optimizer """adam""" +324 50 training_loop """owa""" +324 50 negative_sampler """basic""" +324 50 evaluator """rankbased""" +324 51 dataset """kinships""" +324 51 model """hole""" +324 51 loss """nssa""" +324 51 regularizer """no""" +324 51 optimizer """adam""" +324 51 training_loop """owa""" +324 51 negative_sampler """basic""" +324 51 evaluator """rankbased""" +324 52 dataset """kinships""" +324 52 model """hole""" +324 52 loss """nssa""" +324 52 regularizer """no""" +324 52 optimizer """adam""" +324 52 training_loop """owa""" +324 52 negative_sampler """basic""" +324 52 evaluator """rankbased""" +324 53 dataset """kinships""" +324 53 model """hole""" +324 53 loss """nssa""" +324 53 regularizer """no""" +324 53 optimizer """adam""" +324 53 training_loop """owa""" +324 53 negative_sampler """basic""" +324 53 evaluator """rankbased""" +324 54 dataset """kinships""" +324 54 model """hole""" +324 54 loss """nssa""" +324 54 regularizer """no""" +324 54 optimizer """adam""" +324 54 training_loop """owa""" +324 54 negative_sampler """basic""" +324 54 evaluator """rankbased""" +324 55 dataset """kinships""" +324 55 model """hole""" +324 55 loss """nssa""" +324 55 regularizer """no""" +324 55 optimizer """adam""" +324 55 training_loop """owa""" +324 55 negative_sampler """basic""" +324 55 evaluator """rankbased""" +324 56 dataset """kinships""" +324 56 model """hole""" +324 56 loss """nssa""" +324 56 regularizer """no""" +324 56 optimizer """adam""" +324 56 training_loop """owa""" +324 56 negative_sampler """basic""" +324 56 evaluator """rankbased""" +324 57 dataset """kinships""" +324 57 model """hole""" +324 57 loss """nssa""" +324 57 regularizer """no""" +324 57 optimizer """adam""" +324 57 training_loop """owa""" +324 57 negative_sampler """basic""" +324 57 evaluator """rankbased""" +324 58 dataset """kinships""" +324 58 model """hole""" +324 58 loss """nssa""" +324 58 regularizer """no""" +324 58 optimizer """adam""" +324 58 training_loop """owa""" +324 58 negative_sampler """basic""" +324 58 evaluator """rankbased""" +324 59 dataset """kinships""" +324 59 model """hole""" +324 59 loss """nssa""" +324 59 regularizer """no""" +324 59 optimizer """adam""" +324 59 training_loop """owa""" +324 59 negative_sampler """basic""" +324 59 evaluator """rankbased""" +324 60 dataset """kinships""" +324 60 model """hole""" +324 60 loss """nssa""" +324 60 regularizer """no""" +324 60 optimizer """adam""" +324 60 training_loop """owa""" +324 60 negative_sampler """basic""" +324 60 evaluator """rankbased""" +324 61 dataset """kinships""" +324 61 model """hole""" +324 61 loss """nssa""" +324 61 regularizer """no""" +324 61 optimizer """adam""" +324 61 training_loop """owa""" +324 61 negative_sampler """basic""" +324 61 evaluator """rankbased""" +324 62 dataset """kinships""" +324 62 model """hole""" +324 62 loss """nssa""" +324 62 regularizer """no""" +324 62 optimizer """adam""" +324 62 training_loop """owa""" +324 62 negative_sampler """basic""" +324 62 evaluator """rankbased""" +324 63 dataset """kinships""" +324 63 model """hole""" +324 63 loss """nssa""" +324 63 regularizer """no""" +324 63 optimizer """adam""" +324 63 training_loop """owa""" +324 63 negative_sampler """basic""" +324 63 evaluator """rankbased""" +324 64 dataset """kinships""" +324 64 model """hole""" +324 64 loss """nssa""" +324 64 regularizer """no""" +324 64 optimizer """adam""" +324 64 training_loop """owa""" +324 64 negative_sampler """basic""" +324 64 evaluator """rankbased""" +324 65 dataset """kinships""" +324 65 model """hole""" +324 65 loss """nssa""" +324 65 regularizer """no""" +324 65 optimizer """adam""" +324 65 training_loop """owa""" +324 65 negative_sampler """basic""" +324 65 evaluator """rankbased""" +324 66 dataset """kinships""" +324 66 model """hole""" +324 66 loss """nssa""" +324 66 regularizer """no""" +324 66 optimizer """adam""" +324 66 training_loop """owa""" +324 66 negative_sampler """basic""" +324 66 evaluator """rankbased""" +324 67 dataset """kinships""" +324 67 model """hole""" +324 67 loss """nssa""" +324 67 regularizer """no""" +324 67 optimizer """adam""" +324 67 training_loop """owa""" +324 67 negative_sampler """basic""" +324 67 evaluator """rankbased""" +324 68 dataset """kinships""" +324 68 model """hole""" +324 68 loss """nssa""" +324 68 regularizer """no""" +324 68 optimizer """adam""" +324 68 training_loop """owa""" +324 68 negative_sampler """basic""" +324 68 evaluator """rankbased""" +324 69 dataset """kinships""" +324 69 model """hole""" +324 69 loss """nssa""" +324 69 regularizer """no""" +324 69 optimizer """adam""" +324 69 training_loop """owa""" +324 69 negative_sampler """basic""" +324 69 evaluator """rankbased""" +324 70 dataset """kinships""" +324 70 model """hole""" +324 70 loss """nssa""" +324 70 regularizer """no""" +324 70 optimizer """adam""" +324 70 training_loop """owa""" +324 70 negative_sampler """basic""" +324 70 evaluator """rankbased""" +324 71 dataset """kinships""" +324 71 model """hole""" +324 71 loss """nssa""" +324 71 regularizer """no""" +324 71 optimizer """adam""" +324 71 training_loop """owa""" +324 71 negative_sampler """basic""" +324 71 evaluator """rankbased""" +324 72 dataset """kinships""" +324 72 model """hole""" +324 72 loss """nssa""" +324 72 regularizer """no""" +324 72 optimizer """adam""" +324 72 training_loop """owa""" +324 72 negative_sampler """basic""" +324 72 evaluator """rankbased""" +324 73 dataset """kinships""" +324 73 model """hole""" +324 73 loss """nssa""" +324 73 regularizer """no""" +324 73 optimizer """adam""" +324 73 training_loop """owa""" +324 73 negative_sampler """basic""" +324 73 evaluator """rankbased""" +324 74 dataset """kinships""" +324 74 model """hole""" +324 74 loss """nssa""" +324 74 regularizer """no""" +324 74 optimizer """adam""" +324 74 training_loop """owa""" +324 74 negative_sampler """basic""" +324 74 evaluator """rankbased""" +324 75 dataset """kinships""" +324 75 model """hole""" +324 75 loss """nssa""" +324 75 regularizer """no""" +324 75 optimizer """adam""" +324 75 training_loop """owa""" +324 75 negative_sampler """basic""" +324 75 evaluator """rankbased""" +324 76 dataset """kinships""" +324 76 model """hole""" +324 76 loss """nssa""" +324 76 regularizer """no""" +324 76 optimizer """adam""" +324 76 training_loop """owa""" +324 76 negative_sampler """basic""" +324 76 evaluator """rankbased""" +324 77 dataset """kinships""" +324 77 model """hole""" +324 77 loss """nssa""" +324 77 regularizer """no""" +324 77 optimizer """adam""" +324 77 training_loop """owa""" +324 77 negative_sampler """basic""" +324 77 evaluator """rankbased""" +324 78 dataset """kinships""" +324 78 model """hole""" +324 78 loss """nssa""" +324 78 regularizer """no""" +324 78 optimizer """adam""" +324 78 training_loop """owa""" +324 78 negative_sampler """basic""" +324 78 evaluator """rankbased""" +324 79 dataset """kinships""" +324 79 model """hole""" +324 79 loss """nssa""" +324 79 regularizer """no""" +324 79 optimizer """adam""" +324 79 training_loop """owa""" +324 79 negative_sampler """basic""" +324 79 evaluator """rankbased""" +324 80 dataset """kinships""" +324 80 model """hole""" +324 80 loss """nssa""" +324 80 regularizer """no""" +324 80 optimizer """adam""" +324 80 training_loop """owa""" +324 80 negative_sampler """basic""" +324 80 evaluator """rankbased""" +324 81 dataset """kinships""" +324 81 model """hole""" +324 81 loss """nssa""" +324 81 regularizer """no""" +324 81 optimizer """adam""" +324 81 training_loop """owa""" +324 81 negative_sampler """basic""" +324 81 evaluator """rankbased""" +324 82 dataset """kinships""" +324 82 model """hole""" +324 82 loss """nssa""" +324 82 regularizer """no""" +324 82 optimizer """adam""" +324 82 training_loop """owa""" +324 82 negative_sampler """basic""" +324 82 evaluator """rankbased""" +324 83 dataset """kinships""" +324 83 model """hole""" +324 83 loss """nssa""" +324 83 regularizer """no""" +324 83 optimizer """adam""" +324 83 training_loop """owa""" +324 83 negative_sampler """basic""" +324 83 evaluator """rankbased""" +324 84 dataset """kinships""" +324 84 model """hole""" +324 84 loss """nssa""" +324 84 regularizer """no""" +324 84 optimizer """adam""" +324 84 training_loop """owa""" +324 84 negative_sampler """basic""" +324 84 evaluator """rankbased""" +324 85 dataset """kinships""" +324 85 model """hole""" +324 85 loss """nssa""" +324 85 regularizer """no""" +324 85 optimizer """adam""" +324 85 training_loop """owa""" +324 85 negative_sampler """basic""" +324 85 evaluator """rankbased""" +324 86 dataset """kinships""" +324 86 model """hole""" +324 86 loss """nssa""" +324 86 regularizer """no""" +324 86 optimizer """adam""" +324 86 training_loop """owa""" +324 86 negative_sampler """basic""" +324 86 evaluator """rankbased""" +324 87 dataset """kinships""" +324 87 model """hole""" +324 87 loss """nssa""" +324 87 regularizer """no""" +324 87 optimizer """adam""" +324 87 training_loop """owa""" +324 87 negative_sampler """basic""" +324 87 evaluator """rankbased""" +324 88 dataset """kinships""" +324 88 model """hole""" +324 88 loss """nssa""" +324 88 regularizer """no""" +324 88 optimizer """adam""" +324 88 training_loop """owa""" +324 88 negative_sampler """basic""" +324 88 evaluator """rankbased""" +324 89 dataset """kinships""" +324 89 model """hole""" +324 89 loss """nssa""" +324 89 regularizer """no""" +324 89 optimizer """adam""" +324 89 training_loop """owa""" +324 89 negative_sampler """basic""" +324 89 evaluator """rankbased""" +324 90 dataset """kinships""" +324 90 model """hole""" +324 90 loss """nssa""" +324 90 regularizer """no""" +324 90 optimizer """adam""" +324 90 training_loop """owa""" +324 90 negative_sampler """basic""" +324 90 evaluator """rankbased""" +324 91 dataset """kinships""" +324 91 model """hole""" +324 91 loss """nssa""" +324 91 regularizer """no""" +324 91 optimizer """adam""" +324 91 training_loop """owa""" +324 91 negative_sampler """basic""" +324 91 evaluator """rankbased""" +324 92 dataset """kinships""" +324 92 model """hole""" +324 92 loss """nssa""" +324 92 regularizer """no""" +324 92 optimizer """adam""" +324 92 training_loop """owa""" +324 92 negative_sampler """basic""" +324 92 evaluator """rankbased""" +324 93 dataset """kinships""" +324 93 model """hole""" +324 93 loss """nssa""" +324 93 regularizer """no""" +324 93 optimizer """adam""" +324 93 training_loop """owa""" +324 93 negative_sampler """basic""" +324 93 evaluator """rankbased""" +324 94 dataset """kinships""" +324 94 model """hole""" +324 94 loss """nssa""" +324 94 regularizer """no""" +324 94 optimizer """adam""" +324 94 training_loop """owa""" +324 94 negative_sampler """basic""" +324 94 evaluator """rankbased""" +324 95 dataset """kinships""" +324 95 model """hole""" +324 95 loss """nssa""" +324 95 regularizer """no""" +324 95 optimizer """adam""" +324 95 training_loop """owa""" +324 95 negative_sampler """basic""" +324 95 evaluator """rankbased""" +324 96 dataset """kinships""" +324 96 model """hole""" +324 96 loss """nssa""" +324 96 regularizer """no""" +324 96 optimizer """adam""" +324 96 training_loop """owa""" +324 96 negative_sampler """basic""" +324 96 evaluator """rankbased""" +324 97 dataset """kinships""" +324 97 model """hole""" +324 97 loss """nssa""" +324 97 regularizer """no""" +324 97 optimizer """adam""" +324 97 training_loop """owa""" +324 97 negative_sampler """basic""" +324 97 evaluator """rankbased""" +324 98 dataset """kinships""" +324 98 model """hole""" +324 98 loss """nssa""" +324 98 regularizer """no""" +324 98 optimizer """adam""" +324 98 training_loop """owa""" +324 98 negative_sampler """basic""" +324 98 evaluator """rankbased""" +324 99 dataset """kinships""" +324 99 model """hole""" +324 99 loss """nssa""" +324 99 regularizer """no""" +324 99 optimizer """adam""" +324 99 training_loop """owa""" +324 99 negative_sampler """basic""" +324 99 evaluator """rankbased""" +324 100 dataset """kinships""" +324 100 model """hole""" +324 100 loss """nssa""" +324 100 regularizer """no""" +324 100 optimizer """adam""" +324 100 training_loop """owa""" +324 100 negative_sampler """basic""" +324 100 evaluator """rankbased""" +325 1 model.embedding_dim 2.0 +325 1 loss.margin 5.796342164969061 +325 1 loss.adversarial_temperature 0.6317270932542228 +325 1 optimizer.lr 0.01178479236405712 +325 1 negative_sampler.num_negs_per_pos 45.0 +325 1 training.batch_size 0.0 +325 2 model.embedding_dim 1.0 +325 2 loss.margin 2.824056657303286 +325 2 loss.adversarial_temperature 0.5032174528681492 +325 2 optimizer.lr 0.028374844455533836 +325 2 negative_sampler.num_negs_per_pos 50.0 +325 2 training.batch_size 1.0 +325 3 model.embedding_dim 1.0 +325 3 loss.margin 23.11975128201705 +325 3 loss.adversarial_temperature 0.4747751959762341 +325 3 optimizer.lr 0.013007503283182105 +325 3 negative_sampler.num_negs_per_pos 87.0 +325 3 training.batch_size 0.0 +325 4 model.embedding_dim 2.0 +325 4 loss.margin 21.885035074652517 +325 4 loss.adversarial_temperature 0.18661639705888206 +325 4 optimizer.lr 0.09146704429369713 +325 4 negative_sampler.num_negs_per_pos 63.0 +325 4 training.batch_size 2.0 +325 5 model.embedding_dim 0.0 +325 5 loss.margin 5.1419954623536155 +325 5 loss.adversarial_temperature 0.7949030756588333 +325 5 optimizer.lr 0.013792579073293685 +325 5 negative_sampler.num_negs_per_pos 57.0 +325 5 training.batch_size 1.0 +325 6 model.embedding_dim 0.0 +325 6 loss.margin 14.205716968545044 +325 6 loss.adversarial_temperature 0.8881412163363867 +325 6 optimizer.lr 0.03946808196858093 +325 6 negative_sampler.num_negs_per_pos 51.0 +325 6 training.batch_size 2.0 +325 7 model.embedding_dim 1.0 +325 7 loss.margin 12.230016294562258 +325 7 loss.adversarial_temperature 0.33209714015761016 +325 7 optimizer.lr 0.0036768154521407696 +325 7 negative_sampler.num_negs_per_pos 61.0 +325 7 training.batch_size 2.0 +325 8 model.embedding_dim 0.0 +325 8 loss.margin 16.237487103571972 +325 8 loss.adversarial_temperature 0.514750169747784 +325 8 optimizer.lr 0.002879479381965779 +325 8 negative_sampler.num_negs_per_pos 45.0 +325 8 training.batch_size 1.0 +325 9 model.embedding_dim 2.0 +325 9 loss.margin 21.63826967582683 +325 9 loss.adversarial_temperature 0.7338486084474369 +325 9 optimizer.lr 0.005068326957744439 +325 9 negative_sampler.num_negs_per_pos 66.0 +325 9 training.batch_size 2.0 +325 10 model.embedding_dim 1.0 +325 10 loss.margin 12.03363640113132 +325 10 loss.adversarial_temperature 0.20310110440424245 +325 10 optimizer.lr 0.0032221735862302645 +325 10 negative_sampler.num_negs_per_pos 3.0 +325 10 training.batch_size 1.0 +325 11 model.embedding_dim 2.0 +325 11 loss.margin 9.812923018158315 +325 11 loss.adversarial_temperature 0.2747814489938047 +325 11 optimizer.lr 0.0015599047207273792 +325 11 negative_sampler.num_negs_per_pos 68.0 +325 11 training.batch_size 2.0 +325 12 model.embedding_dim 1.0 +325 12 loss.margin 8.508235741114401 +325 12 loss.adversarial_temperature 0.9017099452926357 +325 12 optimizer.lr 0.05021608811303724 +325 12 negative_sampler.num_negs_per_pos 71.0 +325 12 training.batch_size 2.0 +325 13 model.embedding_dim 2.0 +325 13 loss.margin 5.288581174127195 +325 13 loss.adversarial_temperature 0.4414636468389521 +325 13 optimizer.lr 0.0010549390723091565 +325 13 negative_sampler.num_negs_per_pos 86.0 +325 13 training.batch_size 0.0 +325 14 model.embedding_dim 1.0 +325 14 loss.margin 22.154394553586698 +325 14 loss.adversarial_temperature 0.7723745040567476 +325 14 optimizer.lr 0.01859884632124285 +325 14 negative_sampler.num_negs_per_pos 27.0 +325 14 training.batch_size 1.0 +325 15 model.embedding_dim 2.0 +325 15 loss.margin 9.69404625827915 +325 15 loss.adversarial_temperature 0.6584265885014323 +325 15 optimizer.lr 0.003705876306332746 +325 15 negative_sampler.num_negs_per_pos 96.0 +325 15 training.batch_size 1.0 +325 16 model.embedding_dim 0.0 +325 16 loss.margin 6.096525224364493 +325 16 loss.adversarial_temperature 0.5195777021848788 +325 16 optimizer.lr 0.002402074475419574 +325 16 negative_sampler.num_negs_per_pos 47.0 +325 16 training.batch_size 0.0 +325 17 model.embedding_dim 0.0 +325 17 loss.margin 24.871690868672573 +325 17 loss.adversarial_temperature 0.7572232977813079 +325 17 optimizer.lr 0.002223156618322421 +325 17 negative_sampler.num_negs_per_pos 71.0 +325 17 training.batch_size 1.0 +325 18 model.embedding_dim 2.0 +325 18 loss.margin 3.2015558093986107 +325 18 loss.adversarial_temperature 0.8774241509601786 +325 18 optimizer.lr 0.002494482777218974 +325 18 negative_sampler.num_negs_per_pos 96.0 +325 18 training.batch_size 1.0 +325 19 model.embedding_dim 0.0 +325 19 loss.margin 24.520443193364645 +325 19 loss.adversarial_temperature 0.949045023948389 +325 19 optimizer.lr 0.003849905687190005 +325 19 negative_sampler.num_negs_per_pos 85.0 +325 19 training.batch_size 1.0 +325 20 model.embedding_dim 2.0 +325 20 loss.margin 21.67331693961936 +325 20 loss.adversarial_temperature 0.3781834284561496 +325 20 optimizer.lr 0.042514149388141305 +325 20 negative_sampler.num_negs_per_pos 69.0 +325 20 training.batch_size 2.0 +325 21 model.embedding_dim 2.0 +325 21 loss.margin 5.067650539393808 +325 21 loss.adversarial_temperature 0.7650162030903699 +325 21 optimizer.lr 0.0015472647042928474 +325 21 negative_sampler.num_negs_per_pos 23.0 +325 21 training.batch_size 0.0 +325 22 model.embedding_dim 2.0 +325 22 loss.margin 20.524944065735603 +325 22 loss.adversarial_temperature 0.7845812536111968 +325 22 optimizer.lr 0.05753633214170592 +325 22 negative_sampler.num_negs_per_pos 58.0 +325 22 training.batch_size 2.0 +325 23 model.embedding_dim 2.0 +325 23 loss.margin 17.65351099877607 +325 23 loss.adversarial_temperature 0.8327722524321792 +325 23 optimizer.lr 0.006903516312060826 +325 23 negative_sampler.num_negs_per_pos 43.0 +325 23 training.batch_size 0.0 +325 24 model.embedding_dim 0.0 +325 24 loss.margin 12.748379140220713 +325 24 loss.adversarial_temperature 0.1272305024279324 +325 24 optimizer.lr 0.08513058519067594 +325 24 negative_sampler.num_negs_per_pos 34.0 +325 24 training.batch_size 0.0 +325 25 model.embedding_dim 2.0 +325 25 loss.margin 27.884238330587273 +325 25 loss.adversarial_temperature 0.957487263765273 +325 25 optimizer.lr 0.01800040426612431 +325 25 negative_sampler.num_negs_per_pos 87.0 +325 25 training.batch_size 1.0 +325 26 model.embedding_dim 2.0 +325 26 loss.margin 23.555853459084037 +325 26 loss.adversarial_temperature 0.4069952736452117 +325 26 optimizer.lr 0.002213873524426463 +325 26 negative_sampler.num_negs_per_pos 16.0 +325 26 training.batch_size 1.0 +325 27 model.embedding_dim 2.0 +325 27 loss.margin 28.94923355115479 +325 27 loss.adversarial_temperature 0.8433153042866768 +325 27 optimizer.lr 0.0027405968922049133 +325 27 negative_sampler.num_negs_per_pos 20.0 +325 27 training.batch_size 2.0 +325 28 model.embedding_dim 0.0 +325 28 loss.margin 22.431451707222777 +325 28 loss.adversarial_temperature 0.6819061774741472 +325 28 optimizer.lr 0.04991920974012578 +325 28 negative_sampler.num_negs_per_pos 94.0 +325 28 training.batch_size 2.0 +325 29 model.embedding_dim 1.0 +325 29 loss.margin 15.932609585766823 +325 29 loss.adversarial_temperature 0.4308587921559196 +325 29 optimizer.lr 0.004497720040120512 +325 29 negative_sampler.num_negs_per_pos 73.0 +325 29 training.batch_size 2.0 +325 30 model.embedding_dim 0.0 +325 30 loss.margin 6.687463747018974 +325 30 loss.adversarial_temperature 0.38056420118400935 +325 30 optimizer.lr 0.0012105917817006103 +325 30 negative_sampler.num_negs_per_pos 70.0 +325 30 training.batch_size 1.0 +325 31 model.embedding_dim 1.0 +325 31 loss.margin 28.133655303335324 +325 31 loss.adversarial_temperature 0.21732943048574843 +325 31 optimizer.lr 0.017828246417573447 +325 31 negative_sampler.num_negs_per_pos 59.0 +325 31 training.batch_size 2.0 +325 32 model.embedding_dim 1.0 +325 32 loss.margin 3.8979196980869517 +325 32 loss.adversarial_temperature 0.41797880267243204 +325 32 optimizer.lr 0.03548138413024986 +325 32 negative_sampler.num_negs_per_pos 96.0 +325 32 training.batch_size 2.0 +325 33 model.embedding_dim 0.0 +325 33 loss.margin 15.289079072061266 +325 33 loss.adversarial_temperature 0.6689949294403161 +325 33 optimizer.lr 0.0011582694109854572 +325 33 negative_sampler.num_negs_per_pos 40.0 +325 33 training.batch_size 2.0 +325 34 model.embedding_dim 2.0 +325 34 loss.margin 7.520354232312173 +325 34 loss.adversarial_temperature 0.5940728673726563 +325 34 optimizer.lr 0.05263539012577181 +325 34 negative_sampler.num_negs_per_pos 75.0 +325 34 training.batch_size 2.0 +325 35 model.embedding_dim 1.0 +325 35 loss.margin 4.506689432125077 +325 35 loss.adversarial_temperature 0.5267702454542438 +325 35 optimizer.lr 0.002189745846697864 +325 35 negative_sampler.num_negs_per_pos 68.0 +325 35 training.batch_size 2.0 +325 36 model.embedding_dim 2.0 +325 36 loss.margin 24.792900273739743 +325 36 loss.adversarial_temperature 0.7610569029410017 +325 36 optimizer.lr 0.03731473470341755 +325 36 negative_sampler.num_negs_per_pos 33.0 +325 36 training.batch_size 1.0 +325 37 model.embedding_dim 2.0 +325 37 loss.margin 9.417453736925017 +325 37 loss.adversarial_temperature 0.5451983750973883 +325 37 optimizer.lr 0.011505653362668445 +325 37 negative_sampler.num_negs_per_pos 44.0 +325 37 training.batch_size 0.0 +325 38 model.embedding_dim 0.0 +325 38 loss.margin 7.568746684575205 +325 38 loss.adversarial_temperature 0.853425135396674 +325 38 optimizer.lr 0.014375549408449515 +325 38 negative_sampler.num_negs_per_pos 91.0 +325 38 training.batch_size 1.0 +325 39 model.embedding_dim 1.0 +325 39 loss.margin 23.132533475003807 +325 39 loss.adversarial_temperature 0.7855116393727253 +325 39 optimizer.lr 0.08443707721254771 +325 39 negative_sampler.num_negs_per_pos 45.0 +325 39 training.batch_size 2.0 +325 40 model.embedding_dim 2.0 +325 40 loss.margin 6.70365842402502 +325 40 loss.adversarial_temperature 0.23757383126435375 +325 40 optimizer.lr 0.026537902584328206 +325 40 negative_sampler.num_negs_per_pos 81.0 +325 40 training.batch_size 0.0 +325 41 model.embedding_dim 0.0 +325 41 loss.margin 4.566226577184184 +325 41 loss.adversarial_temperature 0.9047269701344395 +325 41 optimizer.lr 0.029357113447501734 +325 41 negative_sampler.num_negs_per_pos 53.0 +325 41 training.batch_size 1.0 +325 42 model.embedding_dim 1.0 +325 42 loss.margin 2.4102015388129656 +325 42 loss.adversarial_temperature 0.4426691778858888 +325 42 optimizer.lr 0.004955844135794481 +325 42 negative_sampler.num_negs_per_pos 51.0 +325 42 training.batch_size 1.0 +325 43 model.embedding_dim 1.0 +325 43 loss.margin 10.114428464910546 +325 43 loss.adversarial_temperature 0.7124993282691338 +325 43 optimizer.lr 0.014546885254893695 +325 43 negative_sampler.num_negs_per_pos 68.0 +325 43 training.batch_size 2.0 +325 44 model.embedding_dim 0.0 +325 44 loss.margin 15.185001488329178 +325 44 loss.adversarial_temperature 0.39861296322405737 +325 44 optimizer.lr 0.007398570677272238 +325 44 negative_sampler.num_negs_per_pos 51.0 +325 44 training.batch_size 0.0 +325 45 model.embedding_dim 2.0 +325 45 loss.margin 9.545799225082032 +325 45 loss.adversarial_temperature 0.7690787213906787 +325 45 optimizer.lr 0.0072012110159684305 +325 45 negative_sampler.num_negs_per_pos 55.0 +325 45 training.batch_size 2.0 +325 46 model.embedding_dim 1.0 +325 46 loss.margin 12.747335137695753 +325 46 loss.adversarial_temperature 0.5785367058193076 +325 46 optimizer.lr 0.00717789498618456 +325 46 negative_sampler.num_negs_per_pos 67.0 +325 46 training.batch_size 2.0 +325 47 model.embedding_dim 1.0 +325 47 loss.margin 22.387821425526546 +325 47 loss.adversarial_temperature 0.5506084420906822 +325 47 optimizer.lr 0.05297638646651693 +325 47 negative_sampler.num_negs_per_pos 71.0 +325 47 training.batch_size 0.0 +325 48 model.embedding_dim 1.0 +325 48 loss.margin 10.464586242406138 +325 48 loss.adversarial_temperature 0.7434884032130051 +325 48 optimizer.lr 0.005297102293065569 +325 48 negative_sampler.num_negs_per_pos 25.0 +325 48 training.batch_size 2.0 +325 49 model.embedding_dim 2.0 +325 49 loss.margin 9.315934864673867 +325 49 loss.adversarial_temperature 0.8712779793652755 +325 49 optimizer.lr 0.07137370330743989 +325 49 negative_sampler.num_negs_per_pos 95.0 +325 49 training.batch_size 1.0 +325 50 model.embedding_dim 2.0 +325 50 loss.margin 24.32079625950869 +325 50 loss.adversarial_temperature 0.2633874454819407 +325 50 optimizer.lr 0.0046530827058948645 +325 50 negative_sampler.num_negs_per_pos 51.0 +325 50 training.batch_size 1.0 +325 51 model.embedding_dim 0.0 +325 51 loss.margin 14.850575331765526 +325 51 loss.adversarial_temperature 0.32243545694327 +325 51 optimizer.lr 0.07339580560649923 +325 51 negative_sampler.num_negs_per_pos 36.0 +325 51 training.batch_size 2.0 +325 52 model.embedding_dim 2.0 +325 52 loss.margin 13.93152085467836 +325 52 loss.adversarial_temperature 0.7966570398778765 +325 52 optimizer.lr 0.026628035892235846 +325 52 negative_sampler.num_negs_per_pos 97.0 +325 52 training.batch_size 1.0 +325 53 model.embedding_dim 0.0 +325 53 loss.margin 14.713666227577356 +325 53 loss.adversarial_temperature 0.9601000051742992 +325 53 optimizer.lr 0.006600169219012035 +325 53 negative_sampler.num_negs_per_pos 40.0 +325 53 training.batch_size 1.0 +325 54 model.embedding_dim 2.0 +325 54 loss.margin 17.86014320934844 +325 54 loss.adversarial_temperature 0.8859398530736343 +325 54 optimizer.lr 0.050700512508333656 +325 54 negative_sampler.num_negs_per_pos 14.0 +325 54 training.batch_size 0.0 +325 55 model.embedding_dim 2.0 +325 55 loss.margin 2.6093586752281825 +325 55 loss.adversarial_temperature 0.9473581477003079 +325 55 optimizer.lr 0.03745662956517375 +325 55 negative_sampler.num_negs_per_pos 17.0 +325 55 training.batch_size 2.0 +325 56 model.embedding_dim 0.0 +325 56 loss.margin 1.1378934115296002 +325 56 loss.adversarial_temperature 0.12398703537547012 +325 56 optimizer.lr 0.05228312914361707 +325 56 negative_sampler.num_negs_per_pos 14.0 +325 56 training.batch_size 0.0 +325 57 model.embedding_dim 2.0 +325 57 loss.margin 28.625678986258333 +325 57 loss.adversarial_temperature 0.666760306496091 +325 57 optimizer.lr 0.001546419664321709 +325 57 negative_sampler.num_negs_per_pos 84.0 +325 57 training.batch_size 1.0 +325 58 model.embedding_dim 1.0 +325 58 loss.margin 4.3768520542298495 +325 58 loss.adversarial_temperature 0.6874508189454538 +325 58 optimizer.lr 0.008758724589995083 +325 58 negative_sampler.num_negs_per_pos 78.0 +325 58 training.batch_size 2.0 +325 59 model.embedding_dim 2.0 +325 59 loss.margin 20.55748366813189 +325 59 loss.adversarial_temperature 0.4158344208801258 +325 59 optimizer.lr 0.021260524706326417 +325 59 negative_sampler.num_negs_per_pos 49.0 +325 59 training.batch_size 2.0 +325 60 model.embedding_dim 2.0 +325 60 loss.margin 14.713728166533755 +325 60 loss.adversarial_temperature 0.7520157242787185 +325 60 optimizer.lr 0.004573386003691657 +325 60 negative_sampler.num_negs_per_pos 75.0 +325 60 training.batch_size 1.0 +325 61 model.embedding_dim 1.0 +325 61 loss.margin 22.284912560156297 +325 61 loss.adversarial_temperature 0.25220345437752956 +325 61 optimizer.lr 0.007114204822755887 +325 61 negative_sampler.num_negs_per_pos 43.0 +325 61 training.batch_size 1.0 +325 62 model.embedding_dim 1.0 +325 62 loss.margin 11.367696546347613 +325 62 loss.adversarial_temperature 0.5684973943944522 +325 62 optimizer.lr 0.04598042748117226 +325 62 negative_sampler.num_negs_per_pos 98.0 +325 62 training.batch_size 0.0 +325 63 model.embedding_dim 0.0 +325 63 loss.margin 21.040170485886346 +325 63 loss.adversarial_temperature 0.7298666292009227 +325 63 optimizer.lr 0.021190347036993993 +325 63 negative_sampler.num_negs_per_pos 5.0 +325 63 training.batch_size 2.0 +325 64 model.embedding_dim 2.0 +325 64 loss.margin 4.71806659171326 +325 64 loss.adversarial_temperature 0.34848220816355785 +325 64 optimizer.lr 0.004319610110629907 +325 64 negative_sampler.num_negs_per_pos 28.0 +325 64 training.batch_size 1.0 +325 65 model.embedding_dim 1.0 +325 65 loss.margin 3.9053139998637896 +325 65 loss.adversarial_temperature 0.3930038367980718 +325 65 optimizer.lr 0.001284725479168593 +325 65 negative_sampler.num_negs_per_pos 21.0 +325 65 training.batch_size 2.0 +325 66 model.embedding_dim 1.0 +325 66 loss.margin 21.7373515284874 +325 66 loss.adversarial_temperature 0.41902801757255637 +325 66 optimizer.lr 0.029660305505049032 +325 66 negative_sampler.num_negs_per_pos 67.0 +325 66 training.batch_size 0.0 +325 67 model.embedding_dim 2.0 +325 67 loss.margin 21.418212164732097 +325 67 loss.adversarial_temperature 0.7666685470125534 +325 67 optimizer.lr 0.045627240049996146 +325 67 negative_sampler.num_negs_per_pos 49.0 +325 67 training.batch_size 2.0 +325 68 model.embedding_dim 2.0 +325 68 loss.margin 1.722555951312168 +325 68 loss.adversarial_temperature 0.8656020188353969 +325 68 optimizer.lr 0.04143253652196406 +325 68 negative_sampler.num_negs_per_pos 84.0 +325 68 training.batch_size 1.0 +325 69 model.embedding_dim 2.0 +325 69 loss.margin 8.182330378332654 +325 69 loss.adversarial_temperature 0.24182852744081293 +325 69 optimizer.lr 0.03201114504230359 +325 69 negative_sampler.num_negs_per_pos 40.0 +325 69 training.batch_size 2.0 +325 70 model.embedding_dim 0.0 +325 70 loss.margin 6.92251934505093 +325 70 loss.adversarial_temperature 0.4703609336657516 +325 70 optimizer.lr 0.0049962493148253415 +325 70 negative_sampler.num_negs_per_pos 69.0 +325 70 training.batch_size 1.0 +325 71 model.embedding_dim 2.0 +325 71 loss.margin 3.903361544045493 +325 71 loss.adversarial_temperature 0.3861732841551554 +325 71 optimizer.lr 0.009051557008512167 +325 71 negative_sampler.num_negs_per_pos 42.0 +325 71 training.batch_size 1.0 +325 72 model.embedding_dim 0.0 +325 72 loss.margin 11.625073744380794 +325 72 loss.adversarial_temperature 0.27196431366102014 +325 72 optimizer.lr 0.07257916707522387 +325 72 negative_sampler.num_negs_per_pos 81.0 +325 72 training.batch_size 0.0 +325 73 model.embedding_dim 0.0 +325 73 loss.margin 27.293662238143504 +325 73 loss.adversarial_temperature 0.5889425901168113 +325 73 optimizer.lr 0.09921165456510482 +325 73 negative_sampler.num_negs_per_pos 49.0 +325 73 training.batch_size 2.0 +325 74 model.embedding_dim 1.0 +325 74 loss.margin 19.912514847126484 +325 74 loss.adversarial_temperature 0.36109182617921065 +325 74 optimizer.lr 0.07559830894726616 +325 74 negative_sampler.num_negs_per_pos 89.0 +325 74 training.batch_size 1.0 +325 75 model.embedding_dim 0.0 +325 75 loss.margin 26.372674717227014 +325 75 loss.adversarial_temperature 0.9467899819849439 +325 75 optimizer.lr 0.013939935355381627 +325 75 negative_sampler.num_negs_per_pos 35.0 +325 75 training.batch_size 2.0 +325 76 model.embedding_dim 2.0 +325 76 loss.margin 14.450817642675126 +325 76 loss.adversarial_temperature 0.3249925611250435 +325 76 optimizer.lr 0.008275665010218251 +325 76 negative_sampler.num_negs_per_pos 0.0 +325 76 training.batch_size 1.0 +325 77 model.embedding_dim 0.0 +325 77 loss.margin 7.919821758210951 +325 77 loss.adversarial_temperature 0.5950511642773891 +325 77 optimizer.lr 0.0017454079508358354 +325 77 negative_sampler.num_negs_per_pos 98.0 +325 77 training.batch_size 0.0 +325 78 model.embedding_dim 0.0 +325 78 loss.margin 25.116792715210536 +325 78 loss.adversarial_temperature 0.4158271841073885 +325 78 optimizer.lr 0.06723365617994669 +325 78 negative_sampler.num_negs_per_pos 50.0 +325 78 training.batch_size 0.0 +325 79 model.embedding_dim 2.0 +325 79 loss.margin 21.205864957730768 +325 79 loss.adversarial_temperature 0.32259537703174834 +325 79 optimizer.lr 0.002713437913026803 +325 79 negative_sampler.num_negs_per_pos 3.0 +325 79 training.batch_size 2.0 +325 80 model.embedding_dim 0.0 +325 80 loss.margin 17.715469225737834 +325 80 loss.adversarial_temperature 0.9594517909035399 +325 80 optimizer.lr 0.08059723972545536 +325 80 negative_sampler.num_negs_per_pos 39.0 +325 80 training.batch_size 2.0 +325 81 model.embedding_dim 0.0 +325 81 loss.margin 4.2309697755281395 +325 81 loss.adversarial_temperature 0.9233043495552554 +325 81 optimizer.lr 0.011891230597237474 +325 81 negative_sampler.num_negs_per_pos 93.0 +325 81 training.batch_size 1.0 +325 82 model.embedding_dim 0.0 +325 82 loss.margin 16.41075623330517 +325 82 loss.adversarial_temperature 0.9744769089581802 +325 82 optimizer.lr 0.020390430752843938 +325 82 negative_sampler.num_negs_per_pos 71.0 +325 82 training.batch_size 0.0 +325 83 model.embedding_dim 1.0 +325 83 loss.margin 2.4431094617381963 +325 83 loss.adversarial_temperature 0.6731478940652809 +325 83 optimizer.lr 0.0010790141768958514 +325 83 negative_sampler.num_negs_per_pos 16.0 +325 83 training.batch_size 0.0 +325 84 model.embedding_dim 0.0 +325 84 loss.margin 16.174004658997518 +325 84 loss.adversarial_temperature 0.289419534798083 +325 84 optimizer.lr 0.002812474102124574 +325 84 negative_sampler.num_negs_per_pos 39.0 +325 84 training.batch_size 0.0 +325 85 model.embedding_dim 2.0 +325 85 loss.margin 27.73439707890351 +325 85 loss.adversarial_temperature 0.44088702238045563 +325 85 optimizer.lr 0.051021688936946516 +325 85 negative_sampler.num_negs_per_pos 61.0 +325 85 training.batch_size 2.0 +325 86 model.embedding_dim 2.0 +325 86 loss.margin 23.22570306855254 +325 86 loss.adversarial_temperature 0.25206706970120807 +325 86 optimizer.lr 0.026690088254221783 +325 86 negative_sampler.num_negs_per_pos 54.0 +325 86 training.batch_size 2.0 +325 87 model.embedding_dim 0.0 +325 87 loss.margin 6.4182213101845536 +325 87 loss.adversarial_temperature 0.2882622334188367 +325 87 optimizer.lr 0.06911583174008522 +325 87 negative_sampler.num_negs_per_pos 37.0 +325 87 training.batch_size 0.0 +325 88 model.embedding_dim 0.0 +325 88 loss.margin 28.118171941919574 +325 88 loss.adversarial_temperature 0.9737744796228803 +325 88 optimizer.lr 0.0017367552315832727 +325 88 negative_sampler.num_negs_per_pos 2.0 +325 88 training.batch_size 0.0 +325 89 model.embedding_dim 2.0 +325 89 loss.margin 25.14929241233889 +325 89 loss.adversarial_temperature 0.9350105635972412 +325 89 optimizer.lr 0.0061176246011444314 +325 89 negative_sampler.num_negs_per_pos 43.0 +325 89 training.batch_size 2.0 +325 90 model.embedding_dim 0.0 +325 90 loss.margin 16.509097410334775 +325 90 loss.adversarial_temperature 0.20789954263742993 +325 90 optimizer.lr 0.06164404351394709 +325 90 negative_sampler.num_negs_per_pos 19.0 +325 90 training.batch_size 0.0 +325 91 model.embedding_dim 1.0 +325 91 loss.margin 10.717394144962558 +325 91 loss.adversarial_temperature 0.1749648226357915 +325 91 optimizer.lr 0.00216251704189358 +325 91 negative_sampler.num_negs_per_pos 5.0 +325 91 training.batch_size 2.0 +325 92 model.embedding_dim 2.0 +325 92 loss.margin 24.01961695742675 +325 92 loss.adversarial_temperature 0.15955764878784745 +325 92 optimizer.lr 0.0013728535557433056 +325 92 negative_sampler.num_negs_per_pos 82.0 +325 92 training.batch_size 1.0 +325 93 model.embedding_dim 2.0 +325 93 loss.margin 20.566946559026725 +325 93 loss.adversarial_temperature 0.736341363062391 +325 93 optimizer.lr 0.0011711593973325006 +325 93 negative_sampler.num_negs_per_pos 27.0 +325 93 training.batch_size 2.0 +325 94 model.embedding_dim 1.0 +325 94 loss.margin 15.623226923159972 +325 94 loss.adversarial_temperature 0.928475828399253 +325 94 optimizer.lr 0.0012123947484833483 +325 94 negative_sampler.num_negs_per_pos 13.0 +325 94 training.batch_size 0.0 +325 95 model.embedding_dim 0.0 +325 95 loss.margin 9.797635471139227 +325 95 loss.adversarial_temperature 0.9945672651828404 +325 95 optimizer.lr 0.012523868550618612 +325 95 negative_sampler.num_negs_per_pos 22.0 +325 95 training.batch_size 0.0 +325 96 model.embedding_dim 0.0 +325 96 loss.margin 5.26280864687368 +325 96 loss.adversarial_temperature 0.7438870382992653 +325 96 optimizer.lr 0.0022609824648151234 +325 96 negative_sampler.num_negs_per_pos 44.0 +325 96 training.batch_size 0.0 +325 97 model.embedding_dim 0.0 +325 97 loss.margin 25.048165697295005 +325 97 loss.adversarial_temperature 0.9743502168169077 +325 97 optimizer.lr 0.061422350165166395 +325 97 negative_sampler.num_negs_per_pos 55.0 +325 97 training.batch_size 0.0 +325 98 model.embedding_dim 0.0 +325 98 loss.margin 4.181332738417626 +325 98 loss.adversarial_temperature 0.8570570477905651 +325 98 optimizer.lr 0.02957927151847647 +325 98 negative_sampler.num_negs_per_pos 89.0 +325 98 training.batch_size 2.0 +325 99 model.embedding_dim 2.0 +325 99 loss.margin 10.331885345198243 +325 99 loss.adversarial_temperature 0.8513701617838666 +325 99 optimizer.lr 0.0018364152503689349 +325 99 negative_sampler.num_negs_per_pos 64.0 +325 99 training.batch_size 0.0 +325 100 model.embedding_dim 2.0 +325 100 loss.margin 11.98205315870935 +325 100 loss.adversarial_temperature 0.7213232395869489 +325 100 optimizer.lr 0.012538118861365946 +325 100 negative_sampler.num_negs_per_pos 3.0 +325 100 training.batch_size 2.0 +325 1 dataset """kinships""" +325 1 model """hole""" +325 1 loss """nssa""" +325 1 regularizer """no""" +325 1 optimizer """adam""" +325 1 training_loop """owa""" +325 1 negative_sampler """basic""" +325 1 evaluator """rankbased""" +325 2 dataset """kinships""" +325 2 model """hole""" +325 2 loss """nssa""" +325 2 regularizer """no""" +325 2 optimizer """adam""" +325 2 training_loop """owa""" +325 2 negative_sampler """basic""" +325 2 evaluator """rankbased""" +325 3 dataset """kinships""" +325 3 model """hole""" +325 3 loss """nssa""" +325 3 regularizer """no""" +325 3 optimizer """adam""" +325 3 training_loop """owa""" +325 3 negative_sampler """basic""" +325 3 evaluator """rankbased""" +325 4 dataset """kinships""" +325 4 model """hole""" +325 4 loss """nssa""" +325 4 regularizer """no""" +325 4 optimizer """adam""" +325 4 training_loop """owa""" +325 4 negative_sampler """basic""" +325 4 evaluator """rankbased""" +325 5 dataset """kinships""" +325 5 model """hole""" +325 5 loss """nssa""" +325 5 regularizer """no""" +325 5 optimizer """adam""" +325 5 training_loop """owa""" +325 5 negative_sampler """basic""" +325 5 evaluator """rankbased""" +325 6 dataset """kinships""" +325 6 model """hole""" +325 6 loss """nssa""" +325 6 regularizer """no""" +325 6 optimizer """adam""" +325 6 training_loop """owa""" +325 6 negative_sampler """basic""" +325 6 evaluator """rankbased""" +325 7 dataset """kinships""" +325 7 model """hole""" +325 7 loss """nssa""" +325 7 regularizer """no""" +325 7 optimizer """adam""" +325 7 training_loop """owa""" +325 7 negative_sampler """basic""" +325 7 evaluator """rankbased""" +325 8 dataset """kinships""" +325 8 model """hole""" +325 8 loss """nssa""" +325 8 regularizer """no""" +325 8 optimizer """adam""" +325 8 training_loop """owa""" +325 8 negative_sampler """basic""" +325 8 evaluator """rankbased""" +325 9 dataset """kinships""" +325 9 model """hole""" +325 9 loss """nssa""" +325 9 regularizer """no""" +325 9 optimizer """adam""" +325 9 training_loop """owa""" +325 9 negative_sampler """basic""" +325 9 evaluator """rankbased""" +325 10 dataset """kinships""" +325 10 model """hole""" +325 10 loss """nssa""" +325 10 regularizer """no""" +325 10 optimizer """adam""" +325 10 training_loop """owa""" +325 10 negative_sampler """basic""" +325 10 evaluator """rankbased""" +325 11 dataset """kinships""" +325 11 model """hole""" +325 11 loss """nssa""" +325 11 regularizer """no""" +325 11 optimizer """adam""" +325 11 training_loop """owa""" +325 11 negative_sampler """basic""" +325 11 evaluator """rankbased""" +325 12 dataset """kinships""" +325 12 model """hole""" +325 12 loss """nssa""" +325 12 regularizer """no""" +325 12 optimizer """adam""" +325 12 training_loop """owa""" +325 12 negative_sampler """basic""" +325 12 evaluator """rankbased""" +325 13 dataset """kinships""" +325 13 model """hole""" +325 13 loss """nssa""" +325 13 regularizer """no""" +325 13 optimizer """adam""" +325 13 training_loop """owa""" +325 13 negative_sampler """basic""" +325 13 evaluator """rankbased""" +325 14 dataset """kinships""" +325 14 model """hole""" +325 14 loss """nssa""" +325 14 regularizer """no""" +325 14 optimizer """adam""" +325 14 training_loop """owa""" +325 14 negative_sampler """basic""" +325 14 evaluator """rankbased""" +325 15 dataset """kinships""" +325 15 model """hole""" +325 15 loss """nssa""" +325 15 regularizer """no""" +325 15 optimizer """adam""" +325 15 training_loop """owa""" +325 15 negative_sampler """basic""" +325 15 evaluator """rankbased""" +325 16 dataset """kinships""" +325 16 model """hole""" +325 16 loss """nssa""" +325 16 regularizer """no""" +325 16 optimizer """adam""" +325 16 training_loop """owa""" +325 16 negative_sampler """basic""" +325 16 evaluator """rankbased""" +325 17 dataset """kinships""" +325 17 model """hole""" +325 17 loss """nssa""" +325 17 regularizer """no""" +325 17 optimizer """adam""" +325 17 training_loop """owa""" +325 17 negative_sampler """basic""" +325 17 evaluator """rankbased""" +325 18 dataset """kinships""" +325 18 model """hole""" +325 18 loss """nssa""" +325 18 regularizer """no""" +325 18 optimizer """adam""" +325 18 training_loop """owa""" +325 18 negative_sampler """basic""" +325 18 evaluator """rankbased""" +325 19 dataset """kinships""" +325 19 model """hole""" +325 19 loss """nssa""" +325 19 regularizer """no""" +325 19 optimizer """adam""" +325 19 training_loop """owa""" +325 19 negative_sampler """basic""" +325 19 evaluator """rankbased""" +325 20 dataset """kinships""" +325 20 model """hole""" +325 20 loss """nssa""" +325 20 regularizer """no""" +325 20 optimizer """adam""" +325 20 training_loop """owa""" +325 20 negative_sampler """basic""" +325 20 evaluator """rankbased""" +325 21 dataset """kinships""" +325 21 model """hole""" +325 21 loss """nssa""" +325 21 regularizer """no""" +325 21 optimizer """adam""" +325 21 training_loop """owa""" +325 21 negative_sampler """basic""" +325 21 evaluator """rankbased""" +325 22 dataset """kinships""" +325 22 model """hole""" +325 22 loss """nssa""" +325 22 regularizer """no""" +325 22 optimizer """adam""" +325 22 training_loop """owa""" +325 22 negative_sampler """basic""" +325 22 evaluator """rankbased""" +325 23 dataset """kinships""" +325 23 model """hole""" +325 23 loss """nssa""" +325 23 regularizer """no""" +325 23 optimizer """adam""" +325 23 training_loop """owa""" +325 23 negative_sampler """basic""" +325 23 evaluator """rankbased""" +325 24 dataset """kinships""" +325 24 model """hole""" +325 24 loss """nssa""" +325 24 regularizer """no""" +325 24 optimizer """adam""" +325 24 training_loop """owa""" +325 24 negative_sampler """basic""" +325 24 evaluator """rankbased""" +325 25 dataset """kinships""" +325 25 model """hole""" +325 25 loss """nssa""" +325 25 regularizer """no""" +325 25 optimizer """adam""" +325 25 training_loop """owa""" +325 25 negative_sampler """basic""" +325 25 evaluator """rankbased""" +325 26 dataset """kinships""" +325 26 model """hole""" +325 26 loss """nssa""" +325 26 regularizer """no""" +325 26 optimizer """adam""" +325 26 training_loop """owa""" +325 26 negative_sampler """basic""" +325 26 evaluator """rankbased""" +325 27 dataset """kinships""" +325 27 model """hole""" +325 27 loss """nssa""" +325 27 regularizer """no""" +325 27 optimizer """adam""" +325 27 training_loop """owa""" +325 27 negative_sampler """basic""" +325 27 evaluator """rankbased""" +325 28 dataset """kinships""" +325 28 model """hole""" +325 28 loss """nssa""" +325 28 regularizer """no""" +325 28 optimizer """adam""" +325 28 training_loop """owa""" +325 28 negative_sampler """basic""" +325 28 evaluator """rankbased""" +325 29 dataset """kinships""" +325 29 model """hole""" +325 29 loss """nssa""" +325 29 regularizer """no""" +325 29 optimizer """adam""" +325 29 training_loop """owa""" +325 29 negative_sampler """basic""" +325 29 evaluator """rankbased""" +325 30 dataset """kinships""" +325 30 model """hole""" +325 30 loss """nssa""" +325 30 regularizer """no""" +325 30 optimizer """adam""" +325 30 training_loop """owa""" +325 30 negative_sampler """basic""" +325 30 evaluator """rankbased""" +325 31 dataset """kinships""" +325 31 model """hole""" +325 31 loss """nssa""" +325 31 regularizer """no""" +325 31 optimizer """adam""" +325 31 training_loop """owa""" +325 31 negative_sampler """basic""" +325 31 evaluator """rankbased""" +325 32 dataset """kinships""" +325 32 model """hole""" +325 32 loss """nssa""" +325 32 regularizer """no""" +325 32 optimizer """adam""" +325 32 training_loop """owa""" +325 32 negative_sampler """basic""" +325 32 evaluator """rankbased""" +325 33 dataset """kinships""" +325 33 model """hole""" +325 33 loss """nssa""" +325 33 regularizer """no""" +325 33 optimizer """adam""" +325 33 training_loop """owa""" +325 33 negative_sampler """basic""" +325 33 evaluator """rankbased""" +325 34 dataset """kinships""" +325 34 model """hole""" +325 34 loss """nssa""" +325 34 regularizer """no""" +325 34 optimizer """adam""" +325 34 training_loop """owa""" +325 34 negative_sampler """basic""" +325 34 evaluator """rankbased""" +325 35 dataset """kinships""" +325 35 model """hole""" +325 35 loss """nssa""" +325 35 regularizer """no""" +325 35 optimizer """adam""" +325 35 training_loop """owa""" +325 35 negative_sampler """basic""" +325 35 evaluator """rankbased""" +325 36 dataset """kinships""" +325 36 model """hole""" +325 36 loss """nssa""" +325 36 regularizer """no""" +325 36 optimizer """adam""" +325 36 training_loop """owa""" +325 36 negative_sampler """basic""" +325 36 evaluator """rankbased""" +325 37 dataset """kinships""" +325 37 model """hole""" +325 37 loss """nssa""" +325 37 regularizer """no""" +325 37 optimizer """adam""" +325 37 training_loop """owa""" +325 37 negative_sampler """basic""" +325 37 evaluator """rankbased""" +325 38 dataset """kinships""" +325 38 model """hole""" +325 38 loss """nssa""" +325 38 regularizer """no""" +325 38 optimizer """adam""" +325 38 training_loop """owa""" +325 38 negative_sampler """basic""" +325 38 evaluator """rankbased""" +325 39 dataset """kinships""" +325 39 model """hole""" +325 39 loss """nssa""" +325 39 regularizer """no""" +325 39 optimizer """adam""" +325 39 training_loop """owa""" +325 39 negative_sampler """basic""" +325 39 evaluator """rankbased""" +325 40 dataset """kinships""" +325 40 model """hole""" +325 40 loss """nssa""" +325 40 regularizer """no""" +325 40 optimizer """adam""" +325 40 training_loop """owa""" +325 40 negative_sampler """basic""" +325 40 evaluator """rankbased""" +325 41 dataset """kinships""" +325 41 model """hole""" +325 41 loss """nssa""" +325 41 regularizer """no""" +325 41 optimizer """adam""" +325 41 training_loop """owa""" +325 41 negative_sampler """basic""" +325 41 evaluator """rankbased""" +325 42 dataset """kinships""" +325 42 model """hole""" +325 42 loss """nssa""" +325 42 regularizer """no""" +325 42 optimizer """adam""" +325 42 training_loop """owa""" +325 42 negative_sampler """basic""" +325 42 evaluator """rankbased""" +325 43 dataset """kinships""" +325 43 model """hole""" +325 43 loss """nssa""" +325 43 regularizer """no""" +325 43 optimizer """adam""" +325 43 training_loop """owa""" +325 43 negative_sampler """basic""" +325 43 evaluator """rankbased""" +325 44 dataset """kinships""" +325 44 model """hole""" +325 44 loss """nssa""" +325 44 regularizer """no""" +325 44 optimizer """adam""" +325 44 training_loop """owa""" +325 44 negative_sampler """basic""" +325 44 evaluator """rankbased""" +325 45 dataset """kinships""" +325 45 model """hole""" +325 45 loss """nssa""" +325 45 regularizer """no""" +325 45 optimizer """adam""" +325 45 training_loop """owa""" +325 45 negative_sampler """basic""" +325 45 evaluator """rankbased""" +325 46 dataset """kinships""" +325 46 model """hole""" +325 46 loss """nssa""" +325 46 regularizer """no""" +325 46 optimizer """adam""" +325 46 training_loop """owa""" +325 46 negative_sampler """basic""" +325 46 evaluator """rankbased""" +325 47 dataset """kinships""" +325 47 model """hole""" +325 47 loss """nssa""" +325 47 regularizer """no""" +325 47 optimizer """adam""" +325 47 training_loop """owa""" +325 47 negative_sampler """basic""" +325 47 evaluator """rankbased""" +325 48 dataset """kinships""" +325 48 model """hole""" +325 48 loss """nssa""" +325 48 regularizer """no""" +325 48 optimizer """adam""" +325 48 training_loop """owa""" +325 48 negative_sampler """basic""" +325 48 evaluator """rankbased""" +325 49 dataset """kinships""" +325 49 model """hole""" +325 49 loss """nssa""" +325 49 regularizer """no""" +325 49 optimizer """adam""" +325 49 training_loop """owa""" +325 49 negative_sampler """basic""" +325 49 evaluator """rankbased""" +325 50 dataset """kinships""" +325 50 model """hole""" +325 50 loss """nssa""" +325 50 regularizer """no""" +325 50 optimizer """adam""" +325 50 training_loop """owa""" +325 50 negative_sampler """basic""" +325 50 evaluator """rankbased""" +325 51 dataset """kinships""" +325 51 model """hole""" +325 51 loss """nssa""" +325 51 regularizer """no""" +325 51 optimizer """adam""" +325 51 training_loop """owa""" +325 51 negative_sampler """basic""" +325 51 evaluator """rankbased""" +325 52 dataset """kinships""" +325 52 model """hole""" +325 52 loss """nssa""" +325 52 regularizer """no""" +325 52 optimizer """adam""" +325 52 training_loop """owa""" +325 52 negative_sampler """basic""" +325 52 evaluator """rankbased""" +325 53 dataset """kinships""" +325 53 model """hole""" +325 53 loss """nssa""" +325 53 regularizer """no""" +325 53 optimizer """adam""" +325 53 training_loop """owa""" +325 53 negative_sampler """basic""" +325 53 evaluator """rankbased""" +325 54 dataset """kinships""" +325 54 model """hole""" +325 54 loss """nssa""" +325 54 regularizer """no""" +325 54 optimizer """adam""" +325 54 training_loop """owa""" +325 54 negative_sampler """basic""" +325 54 evaluator """rankbased""" +325 55 dataset """kinships""" +325 55 model """hole""" +325 55 loss """nssa""" +325 55 regularizer """no""" +325 55 optimizer """adam""" +325 55 training_loop """owa""" +325 55 negative_sampler """basic""" +325 55 evaluator """rankbased""" +325 56 dataset """kinships""" +325 56 model """hole""" +325 56 loss """nssa""" +325 56 regularizer """no""" +325 56 optimizer """adam""" +325 56 training_loop """owa""" +325 56 negative_sampler """basic""" +325 56 evaluator """rankbased""" +325 57 dataset """kinships""" +325 57 model """hole""" +325 57 loss """nssa""" +325 57 regularizer """no""" +325 57 optimizer """adam""" +325 57 training_loop """owa""" +325 57 negative_sampler """basic""" +325 57 evaluator """rankbased""" +325 58 dataset """kinships""" +325 58 model """hole""" +325 58 loss """nssa""" +325 58 regularizer """no""" +325 58 optimizer """adam""" +325 58 training_loop """owa""" +325 58 negative_sampler """basic""" +325 58 evaluator """rankbased""" +325 59 dataset """kinships""" +325 59 model """hole""" +325 59 loss """nssa""" +325 59 regularizer """no""" +325 59 optimizer """adam""" +325 59 training_loop """owa""" +325 59 negative_sampler """basic""" +325 59 evaluator """rankbased""" +325 60 dataset """kinships""" +325 60 model """hole""" +325 60 loss """nssa""" +325 60 regularizer """no""" +325 60 optimizer """adam""" +325 60 training_loop """owa""" +325 60 negative_sampler """basic""" +325 60 evaluator """rankbased""" +325 61 dataset """kinships""" +325 61 model """hole""" +325 61 loss """nssa""" +325 61 regularizer """no""" +325 61 optimizer """adam""" +325 61 training_loop """owa""" +325 61 negative_sampler """basic""" +325 61 evaluator """rankbased""" +325 62 dataset """kinships""" +325 62 model """hole""" +325 62 loss """nssa""" +325 62 regularizer """no""" +325 62 optimizer """adam""" +325 62 training_loop """owa""" +325 62 negative_sampler """basic""" +325 62 evaluator """rankbased""" +325 63 dataset """kinships""" +325 63 model """hole""" +325 63 loss """nssa""" +325 63 regularizer """no""" +325 63 optimizer """adam""" +325 63 training_loop """owa""" +325 63 negative_sampler """basic""" +325 63 evaluator """rankbased""" +325 64 dataset """kinships""" +325 64 model """hole""" +325 64 loss """nssa""" +325 64 regularizer """no""" +325 64 optimizer """adam""" +325 64 training_loop """owa""" +325 64 negative_sampler """basic""" +325 64 evaluator """rankbased""" +325 65 dataset """kinships""" +325 65 model """hole""" +325 65 loss """nssa""" +325 65 regularizer """no""" +325 65 optimizer """adam""" +325 65 training_loop """owa""" +325 65 negative_sampler """basic""" +325 65 evaluator """rankbased""" +325 66 dataset """kinships""" +325 66 model """hole""" +325 66 loss """nssa""" +325 66 regularizer """no""" +325 66 optimizer """adam""" +325 66 training_loop """owa""" +325 66 negative_sampler """basic""" +325 66 evaluator """rankbased""" +325 67 dataset """kinships""" +325 67 model """hole""" +325 67 loss """nssa""" +325 67 regularizer """no""" +325 67 optimizer """adam""" +325 67 training_loop """owa""" +325 67 negative_sampler """basic""" +325 67 evaluator """rankbased""" +325 68 dataset """kinships""" +325 68 model """hole""" +325 68 loss """nssa""" +325 68 regularizer """no""" +325 68 optimizer """adam""" +325 68 training_loop """owa""" +325 68 negative_sampler """basic""" +325 68 evaluator """rankbased""" +325 69 dataset """kinships""" +325 69 model """hole""" +325 69 loss """nssa""" +325 69 regularizer """no""" +325 69 optimizer """adam""" +325 69 training_loop """owa""" +325 69 negative_sampler """basic""" +325 69 evaluator """rankbased""" +325 70 dataset """kinships""" +325 70 model """hole""" +325 70 loss """nssa""" +325 70 regularizer """no""" +325 70 optimizer """adam""" +325 70 training_loop """owa""" +325 70 negative_sampler """basic""" +325 70 evaluator """rankbased""" +325 71 dataset """kinships""" +325 71 model """hole""" +325 71 loss """nssa""" +325 71 regularizer """no""" +325 71 optimizer """adam""" +325 71 training_loop """owa""" +325 71 negative_sampler """basic""" +325 71 evaluator """rankbased""" +325 72 dataset """kinships""" +325 72 model """hole""" +325 72 loss """nssa""" +325 72 regularizer """no""" +325 72 optimizer """adam""" +325 72 training_loop """owa""" +325 72 negative_sampler """basic""" +325 72 evaluator """rankbased""" +325 73 dataset """kinships""" +325 73 model """hole""" +325 73 loss """nssa""" +325 73 regularizer """no""" +325 73 optimizer """adam""" +325 73 training_loop """owa""" +325 73 negative_sampler """basic""" +325 73 evaluator """rankbased""" +325 74 dataset """kinships""" +325 74 model """hole""" +325 74 loss """nssa""" +325 74 regularizer """no""" +325 74 optimizer """adam""" +325 74 training_loop """owa""" +325 74 negative_sampler """basic""" +325 74 evaluator """rankbased""" +325 75 dataset """kinships""" +325 75 model """hole""" +325 75 loss """nssa""" +325 75 regularizer """no""" +325 75 optimizer """adam""" +325 75 training_loop """owa""" +325 75 negative_sampler """basic""" +325 75 evaluator """rankbased""" +325 76 dataset """kinships""" +325 76 model """hole""" +325 76 loss """nssa""" +325 76 regularizer """no""" +325 76 optimizer """adam""" +325 76 training_loop """owa""" +325 76 negative_sampler """basic""" +325 76 evaluator """rankbased""" +325 77 dataset """kinships""" +325 77 model """hole""" +325 77 loss """nssa""" +325 77 regularizer """no""" +325 77 optimizer """adam""" +325 77 training_loop """owa""" +325 77 negative_sampler """basic""" +325 77 evaluator """rankbased""" +325 78 dataset """kinships""" +325 78 model """hole""" +325 78 loss """nssa""" +325 78 regularizer """no""" +325 78 optimizer """adam""" +325 78 training_loop """owa""" +325 78 negative_sampler """basic""" +325 78 evaluator """rankbased""" +325 79 dataset """kinships""" +325 79 model """hole""" +325 79 loss """nssa""" +325 79 regularizer """no""" +325 79 optimizer """adam""" +325 79 training_loop """owa""" +325 79 negative_sampler """basic""" +325 79 evaluator """rankbased""" +325 80 dataset """kinships""" +325 80 model """hole""" +325 80 loss """nssa""" +325 80 regularizer """no""" +325 80 optimizer """adam""" +325 80 training_loop """owa""" +325 80 negative_sampler """basic""" +325 80 evaluator """rankbased""" +325 81 dataset """kinships""" +325 81 model """hole""" +325 81 loss """nssa""" +325 81 regularizer """no""" +325 81 optimizer """adam""" +325 81 training_loop """owa""" +325 81 negative_sampler """basic""" +325 81 evaluator """rankbased""" +325 82 dataset """kinships""" +325 82 model """hole""" +325 82 loss """nssa""" +325 82 regularizer """no""" +325 82 optimizer """adam""" +325 82 training_loop """owa""" +325 82 negative_sampler """basic""" +325 82 evaluator """rankbased""" +325 83 dataset """kinships""" +325 83 model """hole""" +325 83 loss """nssa""" +325 83 regularizer """no""" +325 83 optimizer """adam""" +325 83 training_loop """owa""" +325 83 negative_sampler """basic""" +325 83 evaluator """rankbased""" +325 84 dataset """kinships""" +325 84 model """hole""" +325 84 loss """nssa""" +325 84 regularizer """no""" +325 84 optimizer """adam""" +325 84 training_loop """owa""" +325 84 negative_sampler """basic""" +325 84 evaluator """rankbased""" +325 85 dataset """kinships""" +325 85 model """hole""" +325 85 loss """nssa""" +325 85 regularizer """no""" +325 85 optimizer """adam""" +325 85 training_loop """owa""" +325 85 negative_sampler """basic""" +325 85 evaluator """rankbased""" +325 86 dataset """kinships""" +325 86 model """hole""" +325 86 loss """nssa""" +325 86 regularizer """no""" +325 86 optimizer """adam""" +325 86 training_loop """owa""" +325 86 negative_sampler """basic""" +325 86 evaluator """rankbased""" +325 87 dataset """kinships""" +325 87 model """hole""" +325 87 loss """nssa""" +325 87 regularizer """no""" +325 87 optimizer """adam""" +325 87 training_loop """owa""" +325 87 negative_sampler """basic""" +325 87 evaluator """rankbased""" +325 88 dataset """kinships""" +325 88 model """hole""" +325 88 loss """nssa""" +325 88 regularizer """no""" +325 88 optimizer """adam""" +325 88 training_loop """owa""" +325 88 negative_sampler """basic""" +325 88 evaluator """rankbased""" +325 89 dataset """kinships""" +325 89 model """hole""" +325 89 loss """nssa""" +325 89 regularizer """no""" +325 89 optimizer """adam""" +325 89 training_loop """owa""" +325 89 negative_sampler """basic""" +325 89 evaluator """rankbased""" +325 90 dataset """kinships""" +325 90 model """hole""" +325 90 loss """nssa""" +325 90 regularizer """no""" +325 90 optimizer """adam""" +325 90 training_loop """owa""" +325 90 negative_sampler """basic""" +325 90 evaluator """rankbased""" +325 91 dataset """kinships""" +325 91 model """hole""" +325 91 loss """nssa""" +325 91 regularizer """no""" +325 91 optimizer """adam""" +325 91 training_loop """owa""" +325 91 negative_sampler """basic""" +325 91 evaluator """rankbased""" +325 92 dataset """kinships""" +325 92 model """hole""" +325 92 loss """nssa""" +325 92 regularizer """no""" +325 92 optimizer """adam""" +325 92 training_loop """owa""" +325 92 negative_sampler """basic""" +325 92 evaluator """rankbased""" +325 93 dataset """kinships""" +325 93 model """hole""" +325 93 loss """nssa""" +325 93 regularizer """no""" +325 93 optimizer """adam""" +325 93 training_loop """owa""" +325 93 negative_sampler """basic""" +325 93 evaluator """rankbased""" +325 94 dataset """kinships""" +325 94 model """hole""" +325 94 loss """nssa""" +325 94 regularizer """no""" +325 94 optimizer """adam""" +325 94 training_loop """owa""" +325 94 negative_sampler """basic""" +325 94 evaluator """rankbased""" +325 95 dataset """kinships""" +325 95 model """hole""" +325 95 loss """nssa""" +325 95 regularizer """no""" +325 95 optimizer """adam""" +325 95 training_loop """owa""" +325 95 negative_sampler """basic""" +325 95 evaluator """rankbased""" +325 96 dataset """kinships""" +325 96 model """hole""" +325 96 loss """nssa""" +325 96 regularizer """no""" +325 96 optimizer """adam""" +325 96 training_loop """owa""" +325 96 negative_sampler """basic""" +325 96 evaluator """rankbased""" +325 97 dataset """kinships""" +325 97 model """hole""" +325 97 loss """nssa""" +325 97 regularizer """no""" +325 97 optimizer """adam""" +325 97 training_loop """owa""" +325 97 negative_sampler """basic""" +325 97 evaluator """rankbased""" +325 98 dataset """kinships""" +325 98 model """hole""" +325 98 loss """nssa""" +325 98 regularizer """no""" +325 98 optimizer """adam""" +325 98 training_loop """owa""" +325 98 negative_sampler """basic""" +325 98 evaluator """rankbased""" +325 99 dataset """kinships""" +325 99 model """hole""" +325 99 loss """nssa""" +325 99 regularizer """no""" +325 99 optimizer """adam""" +325 99 training_loop """owa""" +325 99 negative_sampler """basic""" +325 99 evaluator """rankbased""" +325 100 dataset """kinships""" +325 100 model """hole""" +325 100 loss """nssa""" +325 100 regularizer """no""" +325 100 optimizer """adam""" +325 100 training_loop """owa""" +325 100 negative_sampler """basic""" +325 100 evaluator """rankbased""" +326 1 model.embedding_dim 1.0 +326 1 optimizer.lr 0.023131951035571807 +326 1 negative_sampler.num_negs_per_pos 80.0 +326 1 training.batch_size 1.0 +326 2 model.embedding_dim 1.0 +326 2 optimizer.lr 0.004483588663838649 +326 2 negative_sampler.num_negs_per_pos 27.0 +326 2 training.batch_size 2.0 +326 3 model.embedding_dim 2.0 +326 3 optimizer.lr 0.003975358876728633 +326 3 negative_sampler.num_negs_per_pos 69.0 +326 3 training.batch_size 1.0 +326 4 model.embedding_dim 1.0 +326 4 optimizer.lr 0.0010060524072901882 +326 4 negative_sampler.num_negs_per_pos 96.0 +326 4 training.batch_size 0.0 +326 5 model.embedding_dim 2.0 +326 5 optimizer.lr 0.01323040367664363 +326 5 negative_sampler.num_negs_per_pos 87.0 +326 5 training.batch_size 2.0 +326 6 model.embedding_dim 1.0 +326 6 optimizer.lr 0.07019587824374582 +326 6 negative_sampler.num_negs_per_pos 85.0 +326 6 training.batch_size 0.0 +326 7 model.embedding_dim 0.0 +326 7 optimizer.lr 0.021775898237463946 +326 7 negative_sampler.num_negs_per_pos 26.0 +326 7 training.batch_size 0.0 +326 8 model.embedding_dim 2.0 +326 8 optimizer.lr 0.0011751987259181304 +326 8 negative_sampler.num_negs_per_pos 43.0 +326 8 training.batch_size 0.0 +326 9 model.embedding_dim 2.0 +326 9 optimizer.lr 0.009572242468086307 +326 9 negative_sampler.num_negs_per_pos 38.0 +326 9 training.batch_size 2.0 +326 10 model.embedding_dim 1.0 +326 10 optimizer.lr 0.00500549167464769 +326 10 negative_sampler.num_negs_per_pos 56.0 +326 10 training.batch_size 0.0 +326 11 model.embedding_dim 1.0 +326 11 optimizer.lr 0.06177075674052032 +326 11 negative_sampler.num_negs_per_pos 33.0 +326 11 training.batch_size 1.0 +326 12 model.embedding_dim 0.0 +326 12 optimizer.lr 0.007215599417984434 +326 12 negative_sampler.num_negs_per_pos 65.0 +326 12 training.batch_size 1.0 +326 13 model.embedding_dim 1.0 +326 13 optimizer.lr 0.001881613501372472 +326 13 negative_sampler.num_negs_per_pos 82.0 +326 13 training.batch_size 2.0 +326 14 model.embedding_dim 2.0 +326 14 optimizer.lr 0.015085249202067842 +326 14 negative_sampler.num_negs_per_pos 17.0 +326 14 training.batch_size 2.0 +326 15 model.embedding_dim 1.0 +326 15 optimizer.lr 0.028137403379456586 +326 15 negative_sampler.num_negs_per_pos 3.0 +326 15 training.batch_size 0.0 +326 16 model.embedding_dim 1.0 +326 16 optimizer.lr 0.0179208355223779 +326 16 negative_sampler.num_negs_per_pos 66.0 +326 16 training.batch_size 0.0 +326 17 model.embedding_dim 2.0 +326 17 optimizer.lr 0.002735324226346478 +326 17 negative_sampler.num_negs_per_pos 7.0 +326 17 training.batch_size 0.0 +326 18 model.embedding_dim 0.0 +326 18 optimizer.lr 0.03736040760980281 +326 18 negative_sampler.num_negs_per_pos 33.0 +326 18 training.batch_size 0.0 +326 19 model.embedding_dim 2.0 +326 19 optimizer.lr 0.004802539357714064 +326 19 negative_sampler.num_negs_per_pos 74.0 +326 19 training.batch_size 1.0 +326 20 model.embedding_dim 2.0 +326 20 optimizer.lr 0.016301568248977442 +326 20 negative_sampler.num_negs_per_pos 21.0 +326 20 training.batch_size 1.0 +326 21 model.embedding_dim 0.0 +326 21 optimizer.lr 0.02753165758592706 +326 21 negative_sampler.num_negs_per_pos 77.0 +326 21 training.batch_size 2.0 +326 22 model.embedding_dim 1.0 +326 22 optimizer.lr 0.03539147947094287 +326 22 negative_sampler.num_negs_per_pos 50.0 +326 22 training.batch_size 1.0 +326 23 model.embedding_dim 1.0 +326 23 optimizer.lr 0.0012283603521630552 +326 23 negative_sampler.num_negs_per_pos 35.0 +326 23 training.batch_size 0.0 +326 24 model.embedding_dim 1.0 +326 24 optimizer.lr 0.002251993423218099 +326 24 negative_sampler.num_negs_per_pos 15.0 +326 24 training.batch_size 2.0 +326 25 model.embedding_dim 2.0 +326 25 optimizer.lr 0.0028575252499695733 +326 25 negative_sampler.num_negs_per_pos 42.0 +326 25 training.batch_size 0.0 +326 26 model.embedding_dim 0.0 +326 26 optimizer.lr 0.00864281740775258 +326 26 negative_sampler.num_negs_per_pos 17.0 +326 26 training.batch_size 1.0 +326 27 model.embedding_dim 1.0 +326 27 optimizer.lr 0.0026886757118320635 +326 27 negative_sampler.num_negs_per_pos 92.0 +326 27 training.batch_size 0.0 +326 28 model.embedding_dim 0.0 +326 28 optimizer.lr 0.07608085777789092 +326 28 negative_sampler.num_negs_per_pos 99.0 +326 28 training.batch_size 2.0 +326 29 model.embedding_dim 2.0 +326 29 optimizer.lr 0.002562438645364263 +326 29 negative_sampler.num_negs_per_pos 71.0 +326 29 training.batch_size 2.0 +326 30 model.embedding_dim 0.0 +326 30 optimizer.lr 0.0019159386676992768 +326 30 negative_sampler.num_negs_per_pos 1.0 +326 30 training.batch_size 2.0 +326 31 model.embedding_dim 1.0 +326 31 optimizer.lr 0.0377704040734926 +326 31 negative_sampler.num_negs_per_pos 41.0 +326 31 training.batch_size 0.0 +326 32 model.embedding_dim 2.0 +326 32 optimizer.lr 0.004549685935319802 +326 32 negative_sampler.num_negs_per_pos 14.0 +326 32 training.batch_size 1.0 +326 33 model.embedding_dim 0.0 +326 33 optimizer.lr 0.01031469469451656 +326 33 negative_sampler.num_negs_per_pos 3.0 +326 33 training.batch_size 0.0 +326 34 model.embedding_dim 1.0 +326 34 optimizer.lr 0.02547080336398095 +326 34 negative_sampler.num_negs_per_pos 31.0 +326 34 training.batch_size 0.0 +326 35 model.embedding_dim 1.0 +326 35 optimizer.lr 0.0012310779109624987 +326 35 negative_sampler.num_negs_per_pos 63.0 +326 35 training.batch_size 2.0 +326 36 model.embedding_dim 2.0 +326 36 optimizer.lr 0.053105787471394376 +326 36 negative_sampler.num_negs_per_pos 44.0 +326 36 training.batch_size 2.0 +326 1 dataset """wn18rr""" +326 1 model """hole""" +326 1 loss """bceaftersigmoid""" +326 1 regularizer """no""" +326 1 optimizer """adam""" +326 1 training_loop """owa""" +326 1 negative_sampler """basic""" +326 1 evaluator """rankbased""" +326 2 dataset """wn18rr""" +326 2 model """hole""" +326 2 loss """bceaftersigmoid""" +326 2 regularizer """no""" +326 2 optimizer """adam""" +326 2 training_loop """owa""" +326 2 negative_sampler """basic""" +326 2 evaluator """rankbased""" +326 3 dataset """wn18rr""" +326 3 model """hole""" +326 3 loss """bceaftersigmoid""" +326 3 regularizer """no""" +326 3 optimizer """adam""" +326 3 training_loop """owa""" +326 3 negative_sampler """basic""" +326 3 evaluator """rankbased""" +326 4 dataset """wn18rr""" +326 4 model """hole""" +326 4 loss """bceaftersigmoid""" +326 4 regularizer """no""" +326 4 optimizer """adam""" +326 4 training_loop """owa""" +326 4 negative_sampler """basic""" +326 4 evaluator """rankbased""" +326 5 dataset """wn18rr""" +326 5 model """hole""" +326 5 loss """bceaftersigmoid""" +326 5 regularizer """no""" +326 5 optimizer """adam""" +326 5 training_loop """owa""" +326 5 negative_sampler """basic""" +326 5 evaluator """rankbased""" +326 6 dataset """wn18rr""" +326 6 model """hole""" +326 6 loss """bceaftersigmoid""" +326 6 regularizer """no""" +326 6 optimizer """adam""" +326 6 training_loop """owa""" +326 6 negative_sampler """basic""" +326 6 evaluator """rankbased""" +326 7 dataset """wn18rr""" +326 7 model """hole""" +326 7 loss """bceaftersigmoid""" +326 7 regularizer """no""" +326 7 optimizer """adam""" +326 7 training_loop """owa""" +326 7 negative_sampler """basic""" +326 7 evaluator """rankbased""" +326 8 dataset """wn18rr""" +326 8 model """hole""" +326 8 loss """bceaftersigmoid""" +326 8 regularizer """no""" +326 8 optimizer """adam""" +326 8 training_loop """owa""" +326 8 negative_sampler """basic""" +326 8 evaluator """rankbased""" +326 9 dataset """wn18rr""" +326 9 model """hole""" +326 9 loss """bceaftersigmoid""" +326 9 regularizer """no""" +326 9 optimizer """adam""" +326 9 training_loop """owa""" +326 9 negative_sampler """basic""" +326 9 evaluator """rankbased""" +326 10 dataset """wn18rr""" +326 10 model """hole""" +326 10 loss """bceaftersigmoid""" +326 10 regularizer """no""" +326 10 optimizer """adam""" +326 10 training_loop """owa""" +326 10 negative_sampler """basic""" +326 10 evaluator """rankbased""" +326 11 dataset """wn18rr""" +326 11 model """hole""" +326 11 loss """bceaftersigmoid""" +326 11 regularizer """no""" +326 11 optimizer """adam""" +326 11 training_loop """owa""" +326 11 negative_sampler """basic""" +326 11 evaluator """rankbased""" +326 12 dataset """wn18rr""" +326 12 model """hole""" +326 12 loss """bceaftersigmoid""" +326 12 regularizer """no""" +326 12 optimizer """adam""" +326 12 training_loop """owa""" +326 12 negative_sampler """basic""" +326 12 evaluator """rankbased""" +326 13 dataset """wn18rr""" +326 13 model """hole""" +326 13 loss """bceaftersigmoid""" +326 13 regularizer """no""" +326 13 optimizer """adam""" +326 13 training_loop """owa""" +326 13 negative_sampler """basic""" +326 13 evaluator """rankbased""" +326 14 dataset """wn18rr""" +326 14 model """hole""" +326 14 loss """bceaftersigmoid""" +326 14 regularizer """no""" +326 14 optimizer """adam""" +326 14 training_loop """owa""" +326 14 negative_sampler """basic""" +326 14 evaluator """rankbased""" +326 15 dataset """wn18rr""" +326 15 model """hole""" +326 15 loss """bceaftersigmoid""" +326 15 regularizer """no""" +326 15 optimizer """adam""" +326 15 training_loop """owa""" +326 15 negative_sampler """basic""" +326 15 evaluator """rankbased""" +326 16 dataset """wn18rr""" +326 16 model """hole""" +326 16 loss """bceaftersigmoid""" +326 16 regularizer """no""" +326 16 optimizer """adam""" +326 16 training_loop """owa""" +326 16 negative_sampler """basic""" +326 16 evaluator """rankbased""" +326 17 dataset """wn18rr""" +326 17 model """hole""" +326 17 loss """bceaftersigmoid""" +326 17 regularizer """no""" +326 17 optimizer """adam""" +326 17 training_loop """owa""" +326 17 negative_sampler """basic""" +326 17 evaluator """rankbased""" +326 18 dataset """wn18rr""" +326 18 model """hole""" +326 18 loss """bceaftersigmoid""" +326 18 regularizer """no""" +326 18 optimizer """adam""" +326 18 training_loop """owa""" +326 18 negative_sampler """basic""" +326 18 evaluator """rankbased""" +326 19 dataset """wn18rr""" +326 19 model """hole""" +326 19 loss """bceaftersigmoid""" +326 19 regularizer """no""" +326 19 optimizer """adam""" +326 19 training_loop """owa""" +326 19 negative_sampler """basic""" +326 19 evaluator """rankbased""" +326 20 dataset """wn18rr""" +326 20 model """hole""" +326 20 loss """bceaftersigmoid""" +326 20 regularizer """no""" +326 20 optimizer """adam""" +326 20 training_loop """owa""" +326 20 negative_sampler """basic""" +326 20 evaluator """rankbased""" +326 21 dataset """wn18rr""" +326 21 model """hole""" +326 21 loss """bceaftersigmoid""" +326 21 regularizer """no""" +326 21 optimizer """adam""" +326 21 training_loop """owa""" +326 21 negative_sampler """basic""" +326 21 evaluator """rankbased""" +326 22 dataset """wn18rr""" +326 22 model """hole""" +326 22 loss """bceaftersigmoid""" +326 22 regularizer """no""" +326 22 optimizer """adam""" +326 22 training_loop """owa""" +326 22 negative_sampler """basic""" +326 22 evaluator """rankbased""" +326 23 dataset """wn18rr""" +326 23 model """hole""" +326 23 loss """bceaftersigmoid""" +326 23 regularizer """no""" +326 23 optimizer """adam""" +326 23 training_loop """owa""" +326 23 negative_sampler """basic""" +326 23 evaluator """rankbased""" +326 24 dataset """wn18rr""" +326 24 model """hole""" +326 24 loss """bceaftersigmoid""" +326 24 regularizer """no""" +326 24 optimizer """adam""" +326 24 training_loop """owa""" +326 24 negative_sampler """basic""" +326 24 evaluator """rankbased""" +326 25 dataset """wn18rr""" +326 25 model """hole""" +326 25 loss """bceaftersigmoid""" +326 25 regularizer """no""" +326 25 optimizer """adam""" +326 25 training_loop """owa""" +326 25 negative_sampler """basic""" +326 25 evaluator """rankbased""" +326 26 dataset """wn18rr""" +326 26 model """hole""" +326 26 loss """bceaftersigmoid""" +326 26 regularizer """no""" +326 26 optimizer """adam""" +326 26 training_loop """owa""" +326 26 negative_sampler """basic""" +326 26 evaluator """rankbased""" +326 27 dataset """wn18rr""" +326 27 model """hole""" +326 27 loss """bceaftersigmoid""" +326 27 regularizer """no""" +326 27 optimizer """adam""" +326 27 training_loop """owa""" +326 27 negative_sampler """basic""" +326 27 evaluator """rankbased""" +326 28 dataset """wn18rr""" +326 28 model """hole""" +326 28 loss """bceaftersigmoid""" +326 28 regularizer """no""" +326 28 optimizer """adam""" +326 28 training_loop """owa""" +326 28 negative_sampler """basic""" +326 28 evaluator """rankbased""" +326 29 dataset """wn18rr""" +326 29 model """hole""" +326 29 loss """bceaftersigmoid""" +326 29 regularizer """no""" +326 29 optimizer """adam""" +326 29 training_loop """owa""" +326 29 negative_sampler """basic""" +326 29 evaluator """rankbased""" +326 30 dataset """wn18rr""" +326 30 model """hole""" +326 30 loss """bceaftersigmoid""" +326 30 regularizer """no""" +326 30 optimizer """adam""" +326 30 training_loop """owa""" +326 30 negative_sampler """basic""" +326 30 evaluator """rankbased""" +326 31 dataset """wn18rr""" +326 31 model """hole""" +326 31 loss """bceaftersigmoid""" +326 31 regularizer """no""" +326 31 optimizer """adam""" +326 31 training_loop """owa""" +326 31 negative_sampler """basic""" +326 31 evaluator """rankbased""" +326 32 dataset """wn18rr""" +326 32 model """hole""" +326 32 loss """bceaftersigmoid""" +326 32 regularizer """no""" +326 32 optimizer """adam""" +326 32 training_loop """owa""" +326 32 negative_sampler """basic""" +326 32 evaluator """rankbased""" +326 33 dataset """wn18rr""" +326 33 model """hole""" +326 33 loss """bceaftersigmoid""" +326 33 regularizer """no""" +326 33 optimizer """adam""" +326 33 training_loop """owa""" +326 33 negative_sampler """basic""" +326 33 evaluator """rankbased""" +326 34 dataset """wn18rr""" +326 34 model """hole""" +326 34 loss """bceaftersigmoid""" +326 34 regularizer """no""" +326 34 optimizer """adam""" +326 34 training_loop """owa""" +326 34 negative_sampler """basic""" +326 34 evaluator """rankbased""" +326 35 dataset """wn18rr""" +326 35 model """hole""" +326 35 loss """bceaftersigmoid""" +326 35 regularizer """no""" +326 35 optimizer """adam""" +326 35 training_loop """owa""" +326 35 negative_sampler """basic""" +326 35 evaluator """rankbased""" +326 36 dataset """wn18rr""" +326 36 model """hole""" +326 36 loss """bceaftersigmoid""" +326 36 regularizer """no""" +326 36 optimizer """adam""" +326 36 training_loop """owa""" +326 36 negative_sampler """basic""" +326 36 evaluator """rankbased""" +327 1 model.embedding_dim 2.0 +327 1 optimizer.lr 0.004668619498401407 +327 1 negative_sampler.num_negs_per_pos 80.0 +327 1 training.batch_size 1.0 +327 2 model.embedding_dim 2.0 +327 2 optimizer.lr 0.0017135709022073362 +327 2 negative_sampler.num_negs_per_pos 54.0 +327 2 training.batch_size 1.0 +327 3 model.embedding_dim 1.0 +327 3 optimizer.lr 0.09352154307422475 +327 3 negative_sampler.num_negs_per_pos 45.0 +327 3 training.batch_size 0.0 +327 4 model.embedding_dim 2.0 +327 4 optimizer.lr 0.0013434348942663198 +327 4 negative_sampler.num_negs_per_pos 50.0 +327 4 training.batch_size 2.0 +327 5 model.embedding_dim 1.0 +327 5 optimizer.lr 0.07911308166438048 +327 5 negative_sampler.num_negs_per_pos 81.0 +327 5 training.batch_size 0.0 +327 6 model.embedding_dim 2.0 +327 6 optimizer.lr 0.002512494296631454 +327 6 negative_sampler.num_negs_per_pos 74.0 +327 6 training.batch_size 0.0 +327 7 model.embedding_dim 0.0 +327 7 optimizer.lr 0.006934335331075061 +327 7 negative_sampler.num_negs_per_pos 3.0 +327 7 training.batch_size 1.0 +327 8 model.embedding_dim 2.0 +327 8 optimizer.lr 0.007146488570101722 +327 8 negative_sampler.num_negs_per_pos 75.0 +327 8 training.batch_size 2.0 +327 9 model.embedding_dim 0.0 +327 9 optimizer.lr 0.03478014506162564 +327 9 negative_sampler.num_negs_per_pos 29.0 +327 9 training.batch_size 2.0 +327 10 model.embedding_dim 1.0 +327 10 optimizer.lr 0.002560521034448797 +327 10 negative_sampler.num_negs_per_pos 88.0 +327 10 training.batch_size 1.0 +327 11 model.embedding_dim 1.0 +327 11 optimizer.lr 0.08986080289532548 +327 11 negative_sampler.num_negs_per_pos 48.0 +327 11 training.batch_size 0.0 +327 12 model.embedding_dim 0.0 +327 12 optimizer.lr 0.03270486193944681 +327 12 negative_sampler.num_negs_per_pos 30.0 +327 12 training.batch_size 0.0 +327 13 model.embedding_dim 1.0 +327 13 optimizer.lr 0.0010047944637202552 +327 13 negative_sampler.num_negs_per_pos 29.0 +327 13 training.batch_size 1.0 +327 14 model.embedding_dim 0.0 +327 14 optimizer.lr 0.02413423724134727 +327 14 negative_sampler.num_negs_per_pos 42.0 +327 14 training.batch_size 0.0 +327 15 model.embedding_dim 1.0 +327 15 optimizer.lr 0.0010046794502435743 +327 15 negative_sampler.num_negs_per_pos 10.0 +327 15 training.batch_size 2.0 +327 16 model.embedding_dim 1.0 +327 16 optimizer.lr 0.06630692754437187 +327 16 negative_sampler.num_negs_per_pos 69.0 +327 16 training.batch_size 1.0 +327 17 model.embedding_dim 2.0 +327 17 optimizer.lr 0.002967344659697979 +327 17 negative_sampler.num_negs_per_pos 80.0 +327 17 training.batch_size 0.0 +327 18 model.embedding_dim 2.0 +327 18 optimizer.lr 0.028156028684272517 +327 18 negative_sampler.num_negs_per_pos 8.0 +327 18 training.batch_size 2.0 +327 19 model.embedding_dim 2.0 +327 19 optimizer.lr 0.0013756373768980604 +327 19 negative_sampler.num_negs_per_pos 33.0 +327 19 training.batch_size 2.0 +327 20 model.embedding_dim 1.0 +327 20 optimizer.lr 0.012437767098091542 +327 20 negative_sampler.num_negs_per_pos 73.0 +327 20 training.batch_size 2.0 +327 21 model.embedding_dim 2.0 +327 21 optimizer.lr 0.008803765510030119 +327 21 negative_sampler.num_negs_per_pos 15.0 +327 21 training.batch_size 1.0 +327 22 model.embedding_dim 2.0 +327 22 optimizer.lr 0.010721613242762495 +327 22 negative_sampler.num_negs_per_pos 96.0 +327 22 training.batch_size 0.0 +327 23 model.embedding_dim 2.0 +327 23 optimizer.lr 0.0038807704936779652 +327 23 negative_sampler.num_negs_per_pos 98.0 +327 23 training.batch_size 2.0 +327 24 model.embedding_dim 2.0 +327 24 optimizer.lr 0.0042106194834616775 +327 24 negative_sampler.num_negs_per_pos 97.0 +327 24 training.batch_size 0.0 +327 25 model.embedding_dim 2.0 +327 25 optimizer.lr 0.0010844285441765755 +327 25 negative_sampler.num_negs_per_pos 3.0 +327 25 training.batch_size 0.0 +327 26 model.embedding_dim 0.0 +327 26 optimizer.lr 0.016402812934532746 +327 26 negative_sampler.num_negs_per_pos 73.0 +327 26 training.batch_size 2.0 +327 27 model.embedding_dim 2.0 +327 27 optimizer.lr 0.0018126137131190315 +327 27 negative_sampler.num_negs_per_pos 65.0 +327 27 training.batch_size 0.0 +327 28 model.embedding_dim 0.0 +327 28 optimizer.lr 0.007326663842840461 +327 28 negative_sampler.num_negs_per_pos 83.0 +327 28 training.batch_size 1.0 +327 29 model.embedding_dim 1.0 +327 29 optimizer.lr 0.0033294255443802132 +327 29 negative_sampler.num_negs_per_pos 38.0 +327 29 training.batch_size 1.0 +327 30 model.embedding_dim 1.0 +327 30 optimizer.lr 0.006807148085951415 +327 30 negative_sampler.num_negs_per_pos 84.0 +327 30 training.batch_size 0.0 +327 31 model.embedding_dim 2.0 +327 31 optimizer.lr 0.0038353072280603805 +327 31 negative_sampler.num_negs_per_pos 93.0 +327 31 training.batch_size 1.0 +327 1 dataset """wn18rr""" +327 1 model """hole""" +327 1 loss """softplus""" +327 1 regularizer """no""" +327 1 optimizer """adam""" +327 1 training_loop """owa""" +327 1 negative_sampler """basic""" +327 1 evaluator """rankbased""" +327 2 dataset """wn18rr""" +327 2 model """hole""" +327 2 loss """softplus""" +327 2 regularizer """no""" +327 2 optimizer """adam""" +327 2 training_loop """owa""" +327 2 negative_sampler """basic""" +327 2 evaluator """rankbased""" +327 3 dataset """wn18rr""" +327 3 model """hole""" +327 3 loss """softplus""" +327 3 regularizer """no""" +327 3 optimizer """adam""" +327 3 training_loop """owa""" +327 3 negative_sampler """basic""" +327 3 evaluator """rankbased""" +327 4 dataset """wn18rr""" +327 4 model """hole""" +327 4 loss """softplus""" +327 4 regularizer """no""" +327 4 optimizer """adam""" +327 4 training_loop """owa""" +327 4 negative_sampler """basic""" +327 4 evaluator """rankbased""" +327 5 dataset """wn18rr""" +327 5 model """hole""" +327 5 loss """softplus""" +327 5 regularizer """no""" +327 5 optimizer """adam""" +327 5 training_loop """owa""" +327 5 negative_sampler """basic""" +327 5 evaluator """rankbased""" +327 6 dataset """wn18rr""" +327 6 model """hole""" +327 6 loss """softplus""" +327 6 regularizer """no""" +327 6 optimizer """adam""" +327 6 training_loop """owa""" +327 6 negative_sampler """basic""" +327 6 evaluator """rankbased""" +327 7 dataset """wn18rr""" +327 7 model """hole""" +327 7 loss """softplus""" +327 7 regularizer """no""" +327 7 optimizer """adam""" +327 7 training_loop """owa""" +327 7 negative_sampler """basic""" +327 7 evaluator """rankbased""" +327 8 dataset """wn18rr""" +327 8 model """hole""" +327 8 loss """softplus""" +327 8 regularizer """no""" +327 8 optimizer """adam""" +327 8 training_loop """owa""" +327 8 negative_sampler """basic""" +327 8 evaluator """rankbased""" +327 9 dataset """wn18rr""" +327 9 model """hole""" +327 9 loss """softplus""" +327 9 regularizer """no""" +327 9 optimizer """adam""" +327 9 training_loop """owa""" +327 9 negative_sampler """basic""" +327 9 evaluator """rankbased""" +327 10 dataset """wn18rr""" +327 10 model """hole""" +327 10 loss """softplus""" +327 10 regularizer """no""" +327 10 optimizer """adam""" +327 10 training_loop """owa""" +327 10 negative_sampler """basic""" +327 10 evaluator """rankbased""" +327 11 dataset """wn18rr""" +327 11 model """hole""" +327 11 loss """softplus""" +327 11 regularizer """no""" +327 11 optimizer """adam""" +327 11 training_loop """owa""" +327 11 negative_sampler """basic""" +327 11 evaluator """rankbased""" +327 12 dataset """wn18rr""" +327 12 model """hole""" +327 12 loss """softplus""" +327 12 regularizer """no""" +327 12 optimizer """adam""" +327 12 training_loop """owa""" +327 12 negative_sampler """basic""" +327 12 evaluator """rankbased""" +327 13 dataset """wn18rr""" +327 13 model """hole""" +327 13 loss """softplus""" +327 13 regularizer """no""" +327 13 optimizer """adam""" +327 13 training_loop """owa""" +327 13 negative_sampler """basic""" +327 13 evaluator """rankbased""" +327 14 dataset """wn18rr""" +327 14 model """hole""" +327 14 loss """softplus""" +327 14 regularizer """no""" +327 14 optimizer """adam""" +327 14 training_loop """owa""" +327 14 negative_sampler """basic""" +327 14 evaluator """rankbased""" +327 15 dataset """wn18rr""" +327 15 model """hole""" +327 15 loss """softplus""" +327 15 regularizer """no""" +327 15 optimizer """adam""" +327 15 training_loop """owa""" +327 15 negative_sampler """basic""" +327 15 evaluator """rankbased""" +327 16 dataset """wn18rr""" +327 16 model """hole""" +327 16 loss """softplus""" +327 16 regularizer """no""" +327 16 optimizer """adam""" +327 16 training_loop """owa""" +327 16 negative_sampler """basic""" +327 16 evaluator """rankbased""" +327 17 dataset """wn18rr""" +327 17 model """hole""" +327 17 loss """softplus""" +327 17 regularizer """no""" +327 17 optimizer """adam""" +327 17 training_loop """owa""" +327 17 negative_sampler """basic""" +327 17 evaluator """rankbased""" +327 18 dataset """wn18rr""" +327 18 model """hole""" +327 18 loss """softplus""" +327 18 regularizer """no""" +327 18 optimizer """adam""" +327 18 training_loop """owa""" +327 18 negative_sampler """basic""" +327 18 evaluator """rankbased""" +327 19 dataset """wn18rr""" +327 19 model """hole""" +327 19 loss """softplus""" +327 19 regularizer """no""" +327 19 optimizer """adam""" +327 19 training_loop """owa""" +327 19 negative_sampler """basic""" +327 19 evaluator """rankbased""" +327 20 dataset """wn18rr""" +327 20 model """hole""" +327 20 loss """softplus""" +327 20 regularizer """no""" +327 20 optimizer """adam""" +327 20 training_loop """owa""" +327 20 negative_sampler """basic""" +327 20 evaluator """rankbased""" +327 21 dataset """wn18rr""" +327 21 model """hole""" +327 21 loss """softplus""" +327 21 regularizer """no""" +327 21 optimizer """adam""" +327 21 training_loop """owa""" +327 21 negative_sampler """basic""" +327 21 evaluator """rankbased""" +327 22 dataset """wn18rr""" +327 22 model """hole""" +327 22 loss """softplus""" +327 22 regularizer """no""" +327 22 optimizer """adam""" +327 22 training_loop """owa""" +327 22 negative_sampler """basic""" +327 22 evaluator """rankbased""" +327 23 dataset """wn18rr""" +327 23 model """hole""" +327 23 loss """softplus""" +327 23 regularizer """no""" +327 23 optimizer """adam""" +327 23 training_loop """owa""" +327 23 negative_sampler """basic""" +327 23 evaluator """rankbased""" +327 24 dataset """wn18rr""" +327 24 model """hole""" +327 24 loss """softplus""" +327 24 regularizer """no""" +327 24 optimizer """adam""" +327 24 training_loop """owa""" +327 24 negative_sampler """basic""" +327 24 evaluator """rankbased""" +327 25 dataset """wn18rr""" +327 25 model """hole""" +327 25 loss """softplus""" +327 25 regularizer """no""" +327 25 optimizer """adam""" +327 25 training_loop """owa""" +327 25 negative_sampler """basic""" +327 25 evaluator """rankbased""" +327 26 dataset """wn18rr""" +327 26 model """hole""" +327 26 loss """softplus""" +327 26 regularizer """no""" +327 26 optimizer """adam""" +327 26 training_loop """owa""" +327 26 negative_sampler """basic""" +327 26 evaluator """rankbased""" +327 27 dataset """wn18rr""" +327 27 model """hole""" +327 27 loss """softplus""" +327 27 regularizer """no""" +327 27 optimizer """adam""" +327 27 training_loop """owa""" +327 27 negative_sampler """basic""" +327 27 evaluator """rankbased""" +327 28 dataset """wn18rr""" +327 28 model """hole""" +327 28 loss """softplus""" +327 28 regularizer """no""" +327 28 optimizer """adam""" +327 28 training_loop """owa""" +327 28 negative_sampler """basic""" +327 28 evaluator """rankbased""" +327 29 dataset """wn18rr""" +327 29 model """hole""" +327 29 loss """softplus""" +327 29 regularizer """no""" +327 29 optimizer """adam""" +327 29 training_loop """owa""" +327 29 negative_sampler """basic""" +327 29 evaluator """rankbased""" +327 30 dataset """wn18rr""" +327 30 model """hole""" +327 30 loss """softplus""" +327 30 regularizer """no""" +327 30 optimizer """adam""" +327 30 training_loop """owa""" +327 30 negative_sampler """basic""" +327 30 evaluator """rankbased""" +327 31 dataset """wn18rr""" +327 31 model """hole""" +327 31 loss """softplus""" +327 31 regularizer """no""" +327 31 optimizer """adam""" +327 31 training_loop """owa""" +327 31 negative_sampler """basic""" +327 31 evaluator """rankbased""" +328 1 model.embedding_dim 1.0 +328 1 optimizer.lr 0.005448100598234247 +328 1 negative_sampler.num_negs_per_pos 16.0 +328 1 training.batch_size 0.0 +328 2 model.embedding_dim 0.0 +328 2 optimizer.lr 0.0422614007004378 +328 2 negative_sampler.num_negs_per_pos 25.0 +328 2 training.batch_size 0.0 +328 3 model.embedding_dim 1.0 +328 3 optimizer.lr 0.07402607090533987 +328 3 negative_sampler.num_negs_per_pos 53.0 +328 3 training.batch_size 2.0 +328 4 model.embedding_dim 1.0 +328 4 optimizer.lr 0.03037075857596651 +328 4 negative_sampler.num_negs_per_pos 74.0 +328 4 training.batch_size 2.0 +328 5 model.embedding_dim 1.0 +328 5 optimizer.lr 0.005305427654248553 +328 5 negative_sampler.num_negs_per_pos 22.0 +328 5 training.batch_size 2.0 +328 6 model.embedding_dim 1.0 +328 6 optimizer.lr 0.09838540048338196 +328 6 negative_sampler.num_negs_per_pos 57.0 +328 6 training.batch_size 2.0 +328 7 model.embedding_dim 0.0 +328 7 optimizer.lr 0.00406487844049876 +328 7 negative_sampler.num_negs_per_pos 30.0 +328 7 training.batch_size 2.0 +328 8 model.embedding_dim 1.0 +328 8 optimizer.lr 0.022854079760737313 +328 8 negative_sampler.num_negs_per_pos 93.0 +328 8 training.batch_size 2.0 +328 9 model.embedding_dim 1.0 +328 9 optimizer.lr 0.00992801051952774 +328 9 negative_sampler.num_negs_per_pos 28.0 +328 9 training.batch_size 2.0 +328 10 model.embedding_dim 2.0 +328 10 optimizer.lr 0.0017504714931972473 +328 10 negative_sampler.num_negs_per_pos 17.0 +328 10 training.batch_size 1.0 +328 11 model.embedding_dim 1.0 +328 11 optimizer.lr 0.001568111493461273 +328 11 negative_sampler.num_negs_per_pos 84.0 +328 11 training.batch_size 0.0 +328 12 model.embedding_dim 0.0 +328 12 optimizer.lr 0.002768508696815087 +328 12 negative_sampler.num_negs_per_pos 54.0 +328 12 training.batch_size 2.0 +328 13 model.embedding_dim 2.0 +328 13 optimizer.lr 0.0139256945386794 +328 13 negative_sampler.num_negs_per_pos 64.0 +328 13 training.batch_size 0.0 +328 14 model.embedding_dim 2.0 +328 14 optimizer.lr 0.046426705250670226 +328 14 negative_sampler.num_negs_per_pos 50.0 +328 14 training.batch_size 0.0 +328 15 model.embedding_dim 1.0 +328 15 optimizer.lr 0.014026095937378724 +328 15 negative_sampler.num_negs_per_pos 30.0 +328 15 training.batch_size 2.0 +328 16 model.embedding_dim 0.0 +328 16 optimizer.lr 0.01391368454358342 +328 16 negative_sampler.num_negs_per_pos 22.0 +328 16 training.batch_size 2.0 +328 17 model.embedding_dim 0.0 +328 17 optimizer.lr 0.005822081004588313 +328 17 negative_sampler.num_negs_per_pos 95.0 +328 17 training.batch_size 1.0 +328 18 model.embedding_dim 1.0 +328 18 optimizer.lr 0.028094219238723187 +328 18 negative_sampler.num_negs_per_pos 95.0 +328 18 training.batch_size 1.0 +328 19 model.embedding_dim 2.0 +328 19 optimizer.lr 0.018428515434059187 +328 19 negative_sampler.num_negs_per_pos 52.0 +328 19 training.batch_size 1.0 +328 20 model.embedding_dim 0.0 +328 20 optimizer.lr 0.03725551538755254 +328 20 negative_sampler.num_negs_per_pos 34.0 +328 20 training.batch_size 1.0 +328 21 model.embedding_dim 2.0 +328 21 optimizer.lr 0.0016604611047613392 +328 21 negative_sampler.num_negs_per_pos 90.0 +328 21 training.batch_size 0.0 +328 22 model.embedding_dim 2.0 +328 22 optimizer.lr 0.033475708989110724 +328 22 negative_sampler.num_negs_per_pos 17.0 +328 22 training.batch_size 1.0 +328 23 model.embedding_dim 1.0 +328 23 optimizer.lr 0.009915882414698337 +328 23 negative_sampler.num_negs_per_pos 47.0 +328 23 training.batch_size 0.0 +328 24 model.embedding_dim 0.0 +328 24 optimizer.lr 0.013087841366454543 +328 24 negative_sampler.num_negs_per_pos 7.0 +328 24 training.batch_size 1.0 +328 25 model.embedding_dim 1.0 +328 25 optimizer.lr 0.020887307612421732 +328 25 negative_sampler.num_negs_per_pos 60.0 +328 25 training.batch_size 2.0 +328 26 model.embedding_dim 1.0 +328 26 optimizer.lr 0.0031583355812837568 +328 26 negative_sampler.num_negs_per_pos 34.0 +328 26 training.batch_size 0.0 +328 27 model.embedding_dim 0.0 +328 27 optimizer.lr 0.014334660440182355 +328 27 negative_sampler.num_negs_per_pos 91.0 +328 27 training.batch_size 0.0 +328 28 model.embedding_dim 1.0 +328 28 optimizer.lr 0.0013294166523278013 +328 28 negative_sampler.num_negs_per_pos 86.0 +328 28 training.batch_size 0.0 +328 29 model.embedding_dim 2.0 +328 29 optimizer.lr 0.03439142159401056 +328 29 negative_sampler.num_negs_per_pos 39.0 +328 29 training.batch_size 1.0 +328 30 model.embedding_dim 0.0 +328 30 optimizer.lr 0.0018607188238328025 +328 30 negative_sampler.num_negs_per_pos 86.0 +328 30 training.batch_size 2.0 +328 31 model.embedding_dim 0.0 +328 31 optimizer.lr 0.002084480204426802 +328 31 negative_sampler.num_negs_per_pos 84.0 +328 31 training.batch_size 1.0 +328 32 model.embedding_dim 2.0 +328 32 optimizer.lr 0.0010676450496931212 +328 32 negative_sampler.num_negs_per_pos 69.0 +328 32 training.batch_size 0.0 +328 33 model.embedding_dim 1.0 +328 33 optimizer.lr 0.010819781668934615 +328 33 negative_sampler.num_negs_per_pos 19.0 +328 33 training.batch_size 2.0 +328 34 model.embedding_dim 0.0 +328 34 optimizer.lr 0.002399811170845812 +328 34 negative_sampler.num_negs_per_pos 30.0 +328 34 training.batch_size 0.0 +328 35 model.embedding_dim 2.0 +328 35 optimizer.lr 0.0011496669184391656 +328 35 negative_sampler.num_negs_per_pos 64.0 +328 35 training.batch_size 1.0 +328 36 model.embedding_dim 0.0 +328 36 optimizer.lr 0.008218446541104553 +328 36 negative_sampler.num_negs_per_pos 7.0 +328 36 training.batch_size 2.0 +328 37 model.embedding_dim 0.0 +328 37 optimizer.lr 0.008443882503001357 +328 37 negative_sampler.num_negs_per_pos 20.0 +328 37 training.batch_size 2.0 +328 38 model.embedding_dim 1.0 +328 38 optimizer.lr 0.007669473931787307 +328 38 negative_sampler.num_negs_per_pos 25.0 +328 38 training.batch_size 0.0 +328 39 model.embedding_dim 2.0 +328 39 optimizer.lr 0.023367126490070313 +328 39 negative_sampler.num_negs_per_pos 38.0 +328 39 training.batch_size 1.0 +328 40 model.embedding_dim 1.0 +328 40 optimizer.lr 0.008536429512930013 +328 40 negative_sampler.num_negs_per_pos 3.0 +328 40 training.batch_size 2.0 +328 41 model.embedding_dim 2.0 +328 41 optimizer.lr 0.0022934668704396527 +328 41 negative_sampler.num_negs_per_pos 78.0 +328 41 training.batch_size 1.0 +328 42 model.embedding_dim 0.0 +328 42 optimizer.lr 0.003822543859247423 +328 42 negative_sampler.num_negs_per_pos 51.0 +328 42 training.batch_size 1.0 +328 43 model.embedding_dim 0.0 +328 43 optimizer.lr 0.03846153532209667 +328 43 negative_sampler.num_negs_per_pos 26.0 +328 43 training.batch_size 0.0 +328 44 model.embedding_dim 1.0 +328 44 optimizer.lr 0.01031071751594517 +328 44 negative_sampler.num_negs_per_pos 54.0 +328 44 training.batch_size 2.0 +328 45 model.embedding_dim 1.0 +328 45 optimizer.lr 0.005817393742553762 +328 45 negative_sampler.num_negs_per_pos 61.0 +328 45 training.batch_size 2.0 +328 46 model.embedding_dim 1.0 +328 46 optimizer.lr 0.06556315180667036 +328 46 negative_sampler.num_negs_per_pos 80.0 +328 46 training.batch_size 2.0 +328 47 model.embedding_dim 2.0 +328 47 optimizer.lr 0.012328333224813899 +328 47 negative_sampler.num_negs_per_pos 6.0 +328 47 training.batch_size 1.0 +328 48 model.embedding_dim 0.0 +328 48 optimizer.lr 0.002294031838319683 +328 48 negative_sampler.num_negs_per_pos 18.0 +328 48 training.batch_size 1.0 +328 49 model.embedding_dim 0.0 +328 49 optimizer.lr 0.0026347308473363205 +328 49 negative_sampler.num_negs_per_pos 7.0 +328 49 training.batch_size 0.0 +328 50 model.embedding_dim 2.0 +328 50 optimizer.lr 0.0057852883818837455 +328 50 negative_sampler.num_negs_per_pos 79.0 +328 50 training.batch_size 1.0 +328 51 model.embedding_dim 2.0 +328 51 optimizer.lr 0.0028928337917916353 +328 51 negative_sampler.num_negs_per_pos 56.0 +328 51 training.batch_size 0.0 +328 52 model.embedding_dim 0.0 +328 52 optimizer.lr 0.016995875786266795 +328 52 negative_sampler.num_negs_per_pos 30.0 +328 52 training.batch_size 1.0 +328 53 model.embedding_dim 0.0 +328 53 optimizer.lr 0.0023462440470163183 +328 53 negative_sampler.num_negs_per_pos 90.0 +328 53 training.batch_size 2.0 +328 54 model.embedding_dim 1.0 +328 54 optimizer.lr 0.016608720940819104 +328 54 negative_sampler.num_negs_per_pos 10.0 +328 54 training.batch_size 0.0 +328 55 model.embedding_dim 0.0 +328 55 optimizer.lr 0.09868651486596747 +328 55 negative_sampler.num_negs_per_pos 89.0 +328 55 training.batch_size 2.0 +328 56 model.embedding_dim 2.0 +328 56 optimizer.lr 0.0030157606168838198 +328 56 negative_sampler.num_negs_per_pos 42.0 +328 56 training.batch_size 0.0 +328 57 model.embedding_dim 1.0 +328 57 optimizer.lr 0.002083293331023416 +328 57 negative_sampler.num_negs_per_pos 33.0 +328 57 training.batch_size 0.0 +328 58 model.embedding_dim 2.0 +328 58 optimizer.lr 0.0018269201886003616 +328 58 negative_sampler.num_negs_per_pos 97.0 +328 58 training.batch_size 1.0 +328 59 model.embedding_dim 0.0 +328 59 optimizer.lr 0.0018368270088444999 +328 59 negative_sampler.num_negs_per_pos 32.0 +328 59 training.batch_size 0.0 +328 60 model.embedding_dim 0.0 +328 60 optimizer.lr 0.029833304531871618 +328 60 negative_sampler.num_negs_per_pos 0.0 +328 60 training.batch_size 2.0 +328 61 model.embedding_dim 2.0 +328 61 optimizer.lr 0.005124535292106092 +328 61 negative_sampler.num_negs_per_pos 42.0 +328 61 training.batch_size 0.0 +328 62 model.embedding_dim 0.0 +328 62 optimizer.lr 0.002193457325281631 +328 62 negative_sampler.num_negs_per_pos 79.0 +328 62 training.batch_size 0.0 +328 63 model.embedding_dim 2.0 +328 63 optimizer.lr 0.023342040676932525 +328 63 negative_sampler.num_negs_per_pos 66.0 +328 63 training.batch_size 2.0 +328 64 model.embedding_dim 2.0 +328 64 optimizer.lr 0.03247974085305863 +328 64 negative_sampler.num_negs_per_pos 3.0 +328 64 training.batch_size 2.0 +328 65 model.embedding_dim 0.0 +328 65 optimizer.lr 0.020688127711979826 +328 65 negative_sampler.num_negs_per_pos 10.0 +328 65 training.batch_size 1.0 +328 66 model.embedding_dim 1.0 +328 66 optimizer.lr 0.009722612788228705 +328 66 negative_sampler.num_negs_per_pos 23.0 +328 66 training.batch_size 1.0 +328 67 model.embedding_dim 2.0 +328 67 optimizer.lr 0.001768142101510379 +328 67 negative_sampler.num_negs_per_pos 71.0 +328 67 training.batch_size 2.0 +328 1 dataset """wn18rr""" +328 1 model """hole""" +328 1 loss """bceaftersigmoid""" +328 1 regularizer """no""" +328 1 optimizer """adam""" +328 1 training_loop """owa""" +328 1 negative_sampler """basic""" +328 1 evaluator """rankbased""" +328 2 dataset """wn18rr""" +328 2 model """hole""" +328 2 loss """bceaftersigmoid""" +328 2 regularizer """no""" +328 2 optimizer """adam""" +328 2 training_loop """owa""" +328 2 negative_sampler """basic""" +328 2 evaluator """rankbased""" +328 3 dataset """wn18rr""" +328 3 model """hole""" +328 3 loss """bceaftersigmoid""" +328 3 regularizer """no""" +328 3 optimizer """adam""" +328 3 training_loop """owa""" +328 3 negative_sampler """basic""" +328 3 evaluator """rankbased""" +328 4 dataset """wn18rr""" +328 4 model """hole""" +328 4 loss """bceaftersigmoid""" +328 4 regularizer """no""" +328 4 optimizer """adam""" +328 4 training_loop """owa""" +328 4 negative_sampler """basic""" +328 4 evaluator """rankbased""" +328 5 dataset """wn18rr""" +328 5 model """hole""" +328 5 loss """bceaftersigmoid""" +328 5 regularizer """no""" +328 5 optimizer """adam""" +328 5 training_loop """owa""" +328 5 negative_sampler """basic""" +328 5 evaluator """rankbased""" +328 6 dataset """wn18rr""" +328 6 model """hole""" +328 6 loss """bceaftersigmoid""" +328 6 regularizer """no""" +328 6 optimizer """adam""" +328 6 training_loop """owa""" +328 6 negative_sampler """basic""" +328 6 evaluator """rankbased""" +328 7 dataset """wn18rr""" +328 7 model """hole""" +328 7 loss """bceaftersigmoid""" +328 7 regularizer """no""" +328 7 optimizer """adam""" +328 7 training_loop """owa""" +328 7 negative_sampler """basic""" +328 7 evaluator """rankbased""" +328 8 dataset """wn18rr""" +328 8 model """hole""" +328 8 loss """bceaftersigmoid""" +328 8 regularizer """no""" +328 8 optimizer """adam""" +328 8 training_loop """owa""" +328 8 negative_sampler """basic""" +328 8 evaluator """rankbased""" +328 9 dataset """wn18rr""" +328 9 model """hole""" +328 9 loss """bceaftersigmoid""" +328 9 regularizer """no""" +328 9 optimizer """adam""" +328 9 training_loop """owa""" +328 9 negative_sampler """basic""" +328 9 evaluator """rankbased""" +328 10 dataset """wn18rr""" +328 10 model """hole""" +328 10 loss """bceaftersigmoid""" +328 10 regularizer """no""" +328 10 optimizer """adam""" +328 10 training_loop """owa""" +328 10 negative_sampler """basic""" +328 10 evaluator """rankbased""" +328 11 dataset """wn18rr""" +328 11 model """hole""" +328 11 loss """bceaftersigmoid""" +328 11 regularizer """no""" +328 11 optimizer """adam""" +328 11 training_loop """owa""" +328 11 negative_sampler """basic""" +328 11 evaluator """rankbased""" +328 12 dataset """wn18rr""" +328 12 model """hole""" +328 12 loss """bceaftersigmoid""" +328 12 regularizer """no""" +328 12 optimizer """adam""" +328 12 training_loop """owa""" +328 12 negative_sampler """basic""" +328 12 evaluator """rankbased""" +328 13 dataset """wn18rr""" +328 13 model """hole""" +328 13 loss """bceaftersigmoid""" +328 13 regularizer """no""" +328 13 optimizer """adam""" +328 13 training_loop """owa""" +328 13 negative_sampler """basic""" +328 13 evaluator """rankbased""" +328 14 dataset """wn18rr""" +328 14 model """hole""" +328 14 loss """bceaftersigmoid""" +328 14 regularizer """no""" +328 14 optimizer """adam""" +328 14 training_loop """owa""" +328 14 negative_sampler """basic""" +328 14 evaluator """rankbased""" +328 15 dataset """wn18rr""" +328 15 model """hole""" +328 15 loss """bceaftersigmoid""" +328 15 regularizer """no""" +328 15 optimizer """adam""" +328 15 training_loop """owa""" +328 15 negative_sampler """basic""" +328 15 evaluator """rankbased""" +328 16 dataset """wn18rr""" +328 16 model """hole""" +328 16 loss """bceaftersigmoid""" +328 16 regularizer """no""" +328 16 optimizer """adam""" +328 16 training_loop """owa""" +328 16 negative_sampler """basic""" +328 16 evaluator """rankbased""" +328 17 dataset """wn18rr""" +328 17 model """hole""" +328 17 loss """bceaftersigmoid""" +328 17 regularizer """no""" +328 17 optimizer """adam""" +328 17 training_loop """owa""" +328 17 negative_sampler """basic""" +328 17 evaluator """rankbased""" +328 18 dataset """wn18rr""" +328 18 model """hole""" +328 18 loss """bceaftersigmoid""" +328 18 regularizer """no""" +328 18 optimizer """adam""" +328 18 training_loop """owa""" +328 18 negative_sampler """basic""" +328 18 evaluator """rankbased""" +328 19 dataset """wn18rr""" +328 19 model """hole""" +328 19 loss """bceaftersigmoid""" +328 19 regularizer """no""" +328 19 optimizer """adam""" +328 19 training_loop """owa""" +328 19 negative_sampler """basic""" +328 19 evaluator """rankbased""" +328 20 dataset """wn18rr""" +328 20 model """hole""" +328 20 loss """bceaftersigmoid""" +328 20 regularizer """no""" +328 20 optimizer """adam""" +328 20 training_loop """owa""" +328 20 negative_sampler """basic""" +328 20 evaluator """rankbased""" +328 21 dataset """wn18rr""" +328 21 model """hole""" +328 21 loss """bceaftersigmoid""" +328 21 regularizer """no""" +328 21 optimizer """adam""" +328 21 training_loop """owa""" +328 21 negative_sampler """basic""" +328 21 evaluator """rankbased""" +328 22 dataset """wn18rr""" +328 22 model """hole""" +328 22 loss """bceaftersigmoid""" +328 22 regularizer """no""" +328 22 optimizer """adam""" +328 22 training_loop """owa""" +328 22 negative_sampler """basic""" +328 22 evaluator """rankbased""" +328 23 dataset """wn18rr""" +328 23 model """hole""" +328 23 loss """bceaftersigmoid""" +328 23 regularizer """no""" +328 23 optimizer """adam""" +328 23 training_loop """owa""" +328 23 negative_sampler """basic""" +328 23 evaluator """rankbased""" +328 24 dataset """wn18rr""" +328 24 model """hole""" +328 24 loss """bceaftersigmoid""" +328 24 regularizer """no""" +328 24 optimizer """adam""" +328 24 training_loop """owa""" +328 24 negative_sampler """basic""" +328 24 evaluator """rankbased""" +328 25 dataset """wn18rr""" +328 25 model """hole""" +328 25 loss """bceaftersigmoid""" +328 25 regularizer """no""" +328 25 optimizer """adam""" +328 25 training_loop """owa""" +328 25 negative_sampler """basic""" +328 25 evaluator """rankbased""" +328 26 dataset """wn18rr""" +328 26 model """hole""" +328 26 loss """bceaftersigmoid""" +328 26 regularizer """no""" +328 26 optimizer """adam""" +328 26 training_loop """owa""" +328 26 negative_sampler """basic""" +328 26 evaluator """rankbased""" +328 27 dataset """wn18rr""" +328 27 model """hole""" +328 27 loss """bceaftersigmoid""" +328 27 regularizer """no""" +328 27 optimizer """adam""" +328 27 training_loop """owa""" +328 27 negative_sampler """basic""" +328 27 evaluator """rankbased""" +328 28 dataset """wn18rr""" +328 28 model """hole""" +328 28 loss """bceaftersigmoid""" +328 28 regularizer """no""" +328 28 optimizer """adam""" +328 28 training_loop """owa""" +328 28 negative_sampler """basic""" +328 28 evaluator """rankbased""" +328 29 dataset """wn18rr""" +328 29 model """hole""" +328 29 loss """bceaftersigmoid""" +328 29 regularizer """no""" +328 29 optimizer """adam""" +328 29 training_loop """owa""" +328 29 negative_sampler """basic""" +328 29 evaluator """rankbased""" +328 30 dataset """wn18rr""" +328 30 model """hole""" +328 30 loss """bceaftersigmoid""" +328 30 regularizer """no""" +328 30 optimizer """adam""" +328 30 training_loop """owa""" +328 30 negative_sampler """basic""" +328 30 evaluator """rankbased""" +328 31 dataset """wn18rr""" +328 31 model """hole""" +328 31 loss """bceaftersigmoid""" +328 31 regularizer """no""" +328 31 optimizer """adam""" +328 31 training_loop """owa""" +328 31 negative_sampler """basic""" +328 31 evaluator """rankbased""" +328 32 dataset """wn18rr""" +328 32 model """hole""" +328 32 loss """bceaftersigmoid""" +328 32 regularizer """no""" +328 32 optimizer """adam""" +328 32 training_loop """owa""" +328 32 negative_sampler """basic""" +328 32 evaluator """rankbased""" +328 33 dataset """wn18rr""" +328 33 model """hole""" +328 33 loss """bceaftersigmoid""" +328 33 regularizer """no""" +328 33 optimizer """adam""" +328 33 training_loop """owa""" +328 33 negative_sampler """basic""" +328 33 evaluator """rankbased""" +328 34 dataset """wn18rr""" +328 34 model """hole""" +328 34 loss """bceaftersigmoid""" +328 34 regularizer """no""" +328 34 optimizer """adam""" +328 34 training_loop """owa""" +328 34 negative_sampler """basic""" +328 34 evaluator """rankbased""" +328 35 dataset """wn18rr""" +328 35 model """hole""" +328 35 loss """bceaftersigmoid""" +328 35 regularizer """no""" +328 35 optimizer """adam""" +328 35 training_loop """owa""" +328 35 negative_sampler """basic""" +328 35 evaluator """rankbased""" +328 36 dataset """wn18rr""" +328 36 model """hole""" +328 36 loss """bceaftersigmoid""" +328 36 regularizer """no""" +328 36 optimizer """adam""" +328 36 training_loop """owa""" +328 36 negative_sampler """basic""" +328 36 evaluator """rankbased""" +328 37 dataset """wn18rr""" +328 37 model """hole""" +328 37 loss """bceaftersigmoid""" +328 37 regularizer """no""" +328 37 optimizer """adam""" +328 37 training_loop """owa""" +328 37 negative_sampler """basic""" +328 37 evaluator """rankbased""" +328 38 dataset """wn18rr""" +328 38 model """hole""" +328 38 loss """bceaftersigmoid""" +328 38 regularizer """no""" +328 38 optimizer """adam""" +328 38 training_loop """owa""" +328 38 negative_sampler """basic""" +328 38 evaluator """rankbased""" +328 39 dataset """wn18rr""" +328 39 model """hole""" +328 39 loss """bceaftersigmoid""" +328 39 regularizer """no""" +328 39 optimizer """adam""" +328 39 training_loop """owa""" +328 39 negative_sampler """basic""" +328 39 evaluator """rankbased""" +328 40 dataset """wn18rr""" +328 40 model """hole""" +328 40 loss """bceaftersigmoid""" +328 40 regularizer """no""" +328 40 optimizer """adam""" +328 40 training_loop """owa""" +328 40 negative_sampler """basic""" +328 40 evaluator """rankbased""" +328 41 dataset """wn18rr""" +328 41 model """hole""" +328 41 loss """bceaftersigmoid""" +328 41 regularizer """no""" +328 41 optimizer """adam""" +328 41 training_loop """owa""" +328 41 negative_sampler """basic""" +328 41 evaluator """rankbased""" +328 42 dataset """wn18rr""" +328 42 model """hole""" +328 42 loss """bceaftersigmoid""" +328 42 regularizer """no""" +328 42 optimizer """adam""" +328 42 training_loop """owa""" +328 42 negative_sampler """basic""" +328 42 evaluator """rankbased""" +328 43 dataset """wn18rr""" +328 43 model """hole""" +328 43 loss """bceaftersigmoid""" +328 43 regularizer """no""" +328 43 optimizer """adam""" +328 43 training_loop """owa""" +328 43 negative_sampler """basic""" +328 43 evaluator """rankbased""" +328 44 dataset """wn18rr""" +328 44 model """hole""" +328 44 loss """bceaftersigmoid""" +328 44 regularizer """no""" +328 44 optimizer """adam""" +328 44 training_loop """owa""" +328 44 negative_sampler """basic""" +328 44 evaluator """rankbased""" +328 45 dataset """wn18rr""" +328 45 model """hole""" +328 45 loss """bceaftersigmoid""" +328 45 regularizer """no""" +328 45 optimizer """adam""" +328 45 training_loop """owa""" +328 45 negative_sampler """basic""" +328 45 evaluator """rankbased""" +328 46 dataset """wn18rr""" +328 46 model """hole""" +328 46 loss """bceaftersigmoid""" +328 46 regularizer """no""" +328 46 optimizer """adam""" +328 46 training_loop """owa""" +328 46 negative_sampler """basic""" +328 46 evaluator """rankbased""" +328 47 dataset """wn18rr""" +328 47 model """hole""" +328 47 loss """bceaftersigmoid""" +328 47 regularizer """no""" +328 47 optimizer """adam""" +328 47 training_loop """owa""" +328 47 negative_sampler """basic""" +328 47 evaluator """rankbased""" +328 48 dataset """wn18rr""" +328 48 model """hole""" +328 48 loss """bceaftersigmoid""" +328 48 regularizer """no""" +328 48 optimizer """adam""" +328 48 training_loop """owa""" +328 48 negative_sampler """basic""" +328 48 evaluator """rankbased""" +328 49 dataset """wn18rr""" +328 49 model """hole""" +328 49 loss """bceaftersigmoid""" +328 49 regularizer """no""" +328 49 optimizer """adam""" +328 49 training_loop """owa""" +328 49 negative_sampler """basic""" +328 49 evaluator """rankbased""" +328 50 dataset """wn18rr""" +328 50 model """hole""" +328 50 loss """bceaftersigmoid""" +328 50 regularizer """no""" +328 50 optimizer """adam""" +328 50 training_loop """owa""" +328 50 negative_sampler """basic""" +328 50 evaluator """rankbased""" +328 51 dataset """wn18rr""" +328 51 model """hole""" +328 51 loss """bceaftersigmoid""" +328 51 regularizer """no""" +328 51 optimizer """adam""" +328 51 training_loop """owa""" +328 51 negative_sampler """basic""" +328 51 evaluator """rankbased""" +328 52 dataset """wn18rr""" +328 52 model """hole""" +328 52 loss """bceaftersigmoid""" +328 52 regularizer """no""" +328 52 optimizer """adam""" +328 52 training_loop """owa""" +328 52 negative_sampler """basic""" +328 52 evaluator """rankbased""" +328 53 dataset """wn18rr""" +328 53 model """hole""" +328 53 loss """bceaftersigmoid""" +328 53 regularizer """no""" +328 53 optimizer """adam""" +328 53 training_loop """owa""" +328 53 negative_sampler """basic""" +328 53 evaluator """rankbased""" +328 54 dataset """wn18rr""" +328 54 model """hole""" +328 54 loss """bceaftersigmoid""" +328 54 regularizer """no""" +328 54 optimizer """adam""" +328 54 training_loop """owa""" +328 54 negative_sampler """basic""" +328 54 evaluator """rankbased""" +328 55 dataset """wn18rr""" +328 55 model """hole""" +328 55 loss """bceaftersigmoid""" +328 55 regularizer """no""" +328 55 optimizer """adam""" +328 55 training_loop """owa""" +328 55 negative_sampler """basic""" +328 55 evaluator """rankbased""" +328 56 dataset """wn18rr""" +328 56 model """hole""" +328 56 loss """bceaftersigmoid""" +328 56 regularizer """no""" +328 56 optimizer """adam""" +328 56 training_loop """owa""" +328 56 negative_sampler """basic""" +328 56 evaluator """rankbased""" +328 57 dataset """wn18rr""" +328 57 model """hole""" +328 57 loss """bceaftersigmoid""" +328 57 regularizer """no""" +328 57 optimizer """adam""" +328 57 training_loop """owa""" +328 57 negative_sampler """basic""" +328 57 evaluator """rankbased""" +328 58 dataset """wn18rr""" +328 58 model """hole""" +328 58 loss """bceaftersigmoid""" +328 58 regularizer """no""" +328 58 optimizer """adam""" +328 58 training_loop """owa""" +328 58 negative_sampler """basic""" +328 58 evaluator """rankbased""" +328 59 dataset """wn18rr""" +328 59 model """hole""" +328 59 loss """bceaftersigmoid""" +328 59 regularizer """no""" +328 59 optimizer """adam""" +328 59 training_loop """owa""" +328 59 negative_sampler """basic""" +328 59 evaluator """rankbased""" +328 60 dataset """wn18rr""" +328 60 model """hole""" +328 60 loss """bceaftersigmoid""" +328 60 regularizer """no""" +328 60 optimizer """adam""" +328 60 training_loop """owa""" +328 60 negative_sampler """basic""" +328 60 evaluator """rankbased""" +328 61 dataset """wn18rr""" +328 61 model """hole""" +328 61 loss """bceaftersigmoid""" +328 61 regularizer """no""" +328 61 optimizer """adam""" +328 61 training_loop """owa""" +328 61 negative_sampler """basic""" +328 61 evaluator """rankbased""" +328 62 dataset """wn18rr""" +328 62 model """hole""" +328 62 loss """bceaftersigmoid""" +328 62 regularizer """no""" +328 62 optimizer """adam""" +328 62 training_loop """owa""" +328 62 negative_sampler """basic""" +328 62 evaluator """rankbased""" +328 63 dataset """wn18rr""" +328 63 model """hole""" +328 63 loss """bceaftersigmoid""" +328 63 regularizer """no""" +328 63 optimizer """adam""" +328 63 training_loop """owa""" +328 63 negative_sampler """basic""" +328 63 evaluator """rankbased""" +328 64 dataset """wn18rr""" +328 64 model """hole""" +328 64 loss """bceaftersigmoid""" +328 64 regularizer """no""" +328 64 optimizer """adam""" +328 64 training_loop """owa""" +328 64 negative_sampler """basic""" +328 64 evaluator """rankbased""" +328 65 dataset """wn18rr""" +328 65 model """hole""" +328 65 loss """bceaftersigmoid""" +328 65 regularizer """no""" +328 65 optimizer """adam""" +328 65 training_loop """owa""" +328 65 negative_sampler """basic""" +328 65 evaluator """rankbased""" +328 66 dataset """wn18rr""" +328 66 model """hole""" +328 66 loss """bceaftersigmoid""" +328 66 regularizer """no""" +328 66 optimizer """adam""" +328 66 training_loop """owa""" +328 66 negative_sampler """basic""" +328 66 evaluator """rankbased""" +328 67 dataset """wn18rr""" +328 67 model """hole""" +328 67 loss """bceaftersigmoid""" +328 67 regularizer """no""" +328 67 optimizer """adam""" +328 67 training_loop """owa""" +328 67 negative_sampler """basic""" +328 67 evaluator """rankbased""" +329 1 model.embedding_dim 1.0 +329 1 optimizer.lr 0.0039090420783621715 +329 1 negative_sampler.num_negs_per_pos 12.0 +329 1 training.batch_size 1.0 +329 2 model.embedding_dim 0.0 +329 2 optimizer.lr 0.006567928010741169 +329 2 negative_sampler.num_negs_per_pos 85.0 +329 2 training.batch_size 1.0 +329 3 model.embedding_dim 0.0 +329 3 optimizer.lr 0.02965290333124637 +329 3 negative_sampler.num_negs_per_pos 35.0 +329 3 training.batch_size 0.0 +329 4 model.embedding_dim 0.0 +329 4 optimizer.lr 0.02300703881422366 +329 4 negative_sampler.num_negs_per_pos 92.0 +329 4 training.batch_size 2.0 +329 5 model.embedding_dim 2.0 +329 5 optimizer.lr 0.04571790276203354 +329 5 negative_sampler.num_negs_per_pos 97.0 +329 5 training.batch_size 2.0 +329 6 model.embedding_dim 2.0 +329 6 optimizer.lr 0.00441164550511504 +329 6 negative_sampler.num_negs_per_pos 54.0 +329 6 training.batch_size 2.0 +329 7 model.embedding_dim 0.0 +329 7 optimizer.lr 0.009129149115540554 +329 7 negative_sampler.num_negs_per_pos 75.0 +329 7 training.batch_size 2.0 +329 8 model.embedding_dim 2.0 +329 8 optimizer.lr 0.054429225612610106 +329 8 negative_sampler.num_negs_per_pos 35.0 +329 8 training.batch_size 2.0 +329 9 model.embedding_dim 1.0 +329 9 optimizer.lr 0.0021438187823066886 +329 9 negative_sampler.num_negs_per_pos 73.0 +329 9 training.batch_size 0.0 +329 10 model.embedding_dim 0.0 +329 10 optimizer.lr 0.0895973918109105 +329 10 negative_sampler.num_negs_per_pos 9.0 +329 10 training.batch_size 0.0 +329 11 model.embedding_dim 0.0 +329 11 optimizer.lr 0.00417967098322383 +329 11 negative_sampler.num_negs_per_pos 56.0 +329 11 training.batch_size 0.0 +329 12 model.embedding_dim 1.0 +329 12 optimizer.lr 0.0019405009653146188 +329 12 negative_sampler.num_negs_per_pos 72.0 +329 12 training.batch_size 0.0 +329 13 model.embedding_dim 2.0 +329 13 optimizer.lr 0.004656586293383152 +329 13 negative_sampler.num_negs_per_pos 83.0 +329 13 training.batch_size 2.0 +329 14 model.embedding_dim 0.0 +329 14 optimizer.lr 0.006413940713823321 +329 14 negative_sampler.num_negs_per_pos 4.0 +329 14 training.batch_size 2.0 +329 15 model.embedding_dim 0.0 +329 15 optimizer.lr 0.0572401915543885 +329 15 negative_sampler.num_negs_per_pos 50.0 +329 15 training.batch_size 2.0 +329 16 model.embedding_dim 2.0 +329 16 optimizer.lr 0.05833627927986318 +329 16 negative_sampler.num_negs_per_pos 48.0 +329 16 training.batch_size 0.0 +329 17 model.embedding_dim 2.0 +329 17 optimizer.lr 0.024948360204619685 +329 17 negative_sampler.num_negs_per_pos 37.0 +329 17 training.batch_size 2.0 +329 18 model.embedding_dim 1.0 +329 18 optimizer.lr 0.004950573907628829 +329 18 negative_sampler.num_negs_per_pos 51.0 +329 18 training.batch_size 2.0 +329 19 model.embedding_dim 2.0 +329 19 optimizer.lr 0.001677254144224557 +329 19 negative_sampler.num_negs_per_pos 24.0 +329 19 training.batch_size 2.0 +329 20 model.embedding_dim 2.0 +329 20 optimizer.lr 0.028421055552689155 +329 20 negative_sampler.num_negs_per_pos 71.0 +329 20 training.batch_size 1.0 +329 21 model.embedding_dim 0.0 +329 21 optimizer.lr 0.0026700910096205117 +329 21 negative_sampler.num_negs_per_pos 85.0 +329 21 training.batch_size 1.0 +329 22 model.embedding_dim 0.0 +329 22 optimizer.lr 0.0033399891960470074 +329 22 negative_sampler.num_negs_per_pos 96.0 +329 22 training.batch_size 0.0 +329 23 model.embedding_dim 2.0 +329 23 optimizer.lr 0.03332038385420747 +329 23 negative_sampler.num_negs_per_pos 9.0 +329 23 training.batch_size 0.0 +329 24 model.embedding_dim 2.0 +329 24 optimizer.lr 0.0018869879725812598 +329 24 negative_sampler.num_negs_per_pos 18.0 +329 24 training.batch_size 1.0 +329 25 model.embedding_dim 2.0 +329 25 optimizer.lr 0.07866456010195477 +329 25 negative_sampler.num_negs_per_pos 20.0 +329 25 training.batch_size 1.0 +329 26 model.embedding_dim 1.0 +329 26 optimizer.lr 0.033767854776989514 +329 26 negative_sampler.num_negs_per_pos 85.0 +329 26 training.batch_size 1.0 +329 27 model.embedding_dim 1.0 +329 27 optimizer.lr 0.045047689017253256 +329 27 negative_sampler.num_negs_per_pos 98.0 +329 27 training.batch_size 0.0 +329 28 model.embedding_dim 0.0 +329 28 optimizer.lr 0.002108418335997651 +329 28 negative_sampler.num_negs_per_pos 76.0 +329 28 training.batch_size 1.0 +329 29 model.embedding_dim 1.0 +329 29 optimizer.lr 0.014249449993833128 +329 29 negative_sampler.num_negs_per_pos 90.0 +329 29 training.batch_size 0.0 +329 30 model.embedding_dim 0.0 +329 30 optimizer.lr 0.001066825672694062 +329 30 negative_sampler.num_negs_per_pos 53.0 +329 30 training.batch_size 0.0 +329 31 model.embedding_dim 1.0 +329 31 optimizer.lr 0.03266405965721729 +329 31 negative_sampler.num_negs_per_pos 99.0 +329 31 training.batch_size 1.0 +329 32 model.embedding_dim 0.0 +329 32 optimizer.lr 0.006271734989880226 +329 32 negative_sampler.num_negs_per_pos 57.0 +329 32 training.batch_size 1.0 +329 33 model.embedding_dim 2.0 +329 33 optimizer.lr 0.0021611576197014737 +329 33 negative_sampler.num_negs_per_pos 25.0 +329 33 training.batch_size 0.0 +329 34 model.embedding_dim 1.0 +329 34 optimizer.lr 0.0011840552736839245 +329 34 negative_sampler.num_negs_per_pos 67.0 +329 34 training.batch_size 0.0 +329 35 model.embedding_dim 0.0 +329 35 optimizer.lr 0.052241563289382516 +329 35 negative_sampler.num_negs_per_pos 8.0 +329 35 training.batch_size 2.0 +329 36 model.embedding_dim 2.0 +329 36 optimizer.lr 0.09722448390862891 +329 36 negative_sampler.num_negs_per_pos 11.0 +329 36 training.batch_size 1.0 +329 37 model.embedding_dim 0.0 +329 37 optimizer.lr 0.001253462841252032 +329 37 negative_sampler.num_negs_per_pos 82.0 +329 37 training.batch_size 2.0 +329 38 model.embedding_dim 0.0 +329 38 optimizer.lr 0.019654947959894708 +329 38 negative_sampler.num_negs_per_pos 26.0 +329 38 training.batch_size 2.0 +329 39 model.embedding_dim 1.0 +329 39 optimizer.lr 0.04883912427255491 +329 39 negative_sampler.num_negs_per_pos 42.0 +329 39 training.batch_size 1.0 +329 40 model.embedding_dim 0.0 +329 40 optimizer.lr 0.007951363561662286 +329 40 negative_sampler.num_negs_per_pos 31.0 +329 40 training.batch_size 0.0 +329 41 model.embedding_dim 0.0 +329 41 optimizer.lr 0.06765424187906373 +329 41 negative_sampler.num_negs_per_pos 70.0 +329 41 training.batch_size 1.0 +329 42 model.embedding_dim 2.0 +329 42 optimizer.lr 0.01483258516003193 +329 42 negative_sampler.num_negs_per_pos 27.0 +329 42 training.batch_size 1.0 +329 43 model.embedding_dim 1.0 +329 43 optimizer.lr 0.001832986137011284 +329 43 negative_sampler.num_negs_per_pos 3.0 +329 43 training.batch_size 0.0 +329 44 model.embedding_dim 1.0 +329 44 optimizer.lr 0.012806711828733514 +329 44 negative_sampler.num_negs_per_pos 10.0 +329 44 training.batch_size 1.0 +329 45 model.embedding_dim 1.0 +329 45 optimizer.lr 0.03369080505351591 +329 45 negative_sampler.num_negs_per_pos 32.0 +329 45 training.batch_size 2.0 +329 46 model.embedding_dim 0.0 +329 46 optimizer.lr 0.014367749347220942 +329 46 negative_sampler.num_negs_per_pos 41.0 +329 46 training.batch_size 0.0 +329 47 model.embedding_dim 0.0 +329 47 optimizer.lr 0.006860819603953479 +329 47 negative_sampler.num_negs_per_pos 39.0 +329 47 training.batch_size 2.0 +329 48 model.embedding_dim 0.0 +329 48 optimizer.lr 0.037315865325364184 +329 48 negative_sampler.num_negs_per_pos 86.0 +329 48 training.batch_size 2.0 +329 49 model.embedding_dim 1.0 +329 49 optimizer.lr 0.0017789988206529336 +329 49 negative_sampler.num_negs_per_pos 97.0 +329 49 training.batch_size 1.0 +329 50 model.embedding_dim 2.0 +329 50 optimizer.lr 0.07100336490163617 +329 50 negative_sampler.num_negs_per_pos 78.0 +329 50 training.batch_size 0.0 +329 51 model.embedding_dim 2.0 +329 51 optimizer.lr 0.005482511661319112 +329 51 negative_sampler.num_negs_per_pos 22.0 +329 51 training.batch_size 0.0 +329 52 model.embedding_dim 1.0 +329 52 optimizer.lr 0.027285279297265787 +329 52 negative_sampler.num_negs_per_pos 50.0 +329 52 training.batch_size 0.0 +329 53 model.embedding_dim 0.0 +329 53 optimizer.lr 0.04344001787043711 +329 53 negative_sampler.num_negs_per_pos 86.0 +329 53 training.batch_size 2.0 +329 54 model.embedding_dim 0.0 +329 54 optimizer.lr 0.09794056978458603 +329 54 negative_sampler.num_negs_per_pos 5.0 +329 54 training.batch_size 2.0 +329 55 model.embedding_dim 2.0 +329 55 optimizer.lr 0.0021656535265336604 +329 55 negative_sampler.num_negs_per_pos 30.0 +329 55 training.batch_size 1.0 +329 56 model.embedding_dim 1.0 +329 56 optimizer.lr 0.006434748711977175 +329 56 negative_sampler.num_negs_per_pos 57.0 +329 56 training.batch_size 0.0 +329 57 model.embedding_dim 1.0 +329 57 optimizer.lr 0.004972104119583405 +329 57 negative_sampler.num_negs_per_pos 28.0 +329 57 training.batch_size 0.0 +329 58 model.embedding_dim 1.0 +329 58 optimizer.lr 0.007555323007143727 +329 58 negative_sampler.num_negs_per_pos 5.0 +329 58 training.batch_size 0.0 +329 59 model.embedding_dim 1.0 +329 59 optimizer.lr 0.0596281274499872 +329 59 negative_sampler.num_negs_per_pos 10.0 +329 59 training.batch_size 1.0 +329 60 model.embedding_dim 2.0 +329 60 optimizer.lr 0.0012396606360861825 +329 60 negative_sampler.num_negs_per_pos 80.0 +329 60 training.batch_size 1.0 +329 61 model.embedding_dim 2.0 +329 61 optimizer.lr 0.004399769803709346 +329 61 negative_sampler.num_negs_per_pos 75.0 +329 61 training.batch_size 0.0 +329 62 model.embedding_dim 1.0 +329 62 optimizer.lr 0.0019598646698988885 +329 62 negative_sampler.num_negs_per_pos 20.0 +329 62 training.batch_size 1.0 +329 63 model.embedding_dim 0.0 +329 63 optimizer.lr 0.01569019672852352 +329 63 negative_sampler.num_negs_per_pos 90.0 +329 63 training.batch_size 0.0 +329 64 model.embedding_dim 0.0 +329 64 optimizer.lr 0.0019664531061361155 +329 64 negative_sampler.num_negs_per_pos 48.0 +329 64 training.batch_size 1.0 +329 65 model.embedding_dim 0.0 +329 65 optimizer.lr 0.09846643819069273 +329 65 negative_sampler.num_negs_per_pos 28.0 +329 65 training.batch_size 2.0 +329 1 dataset """wn18rr""" +329 1 model """hole""" +329 1 loss """softplus""" +329 1 regularizer """no""" +329 1 optimizer """adam""" +329 1 training_loop """owa""" +329 1 negative_sampler """basic""" +329 1 evaluator """rankbased""" +329 2 dataset """wn18rr""" +329 2 model """hole""" +329 2 loss """softplus""" +329 2 regularizer """no""" +329 2 optimizer """adam""" +329 2 training_loop """owa""" +329 2 negative_sampler """basic""" +329 2 evaluator """rankbased""" +329 3 dataset """wn18rr""" +329 3 model """hole""" +329 3 loss """softplus""" +329 3 regularizer """no""" +329 3 optimizer """adam""" +329 3 training_loop """owa""" +329 3 negative_sampler """basic""" +329 3 evaluator """rankbased""" +329 4 dataset """wn18rr""" +329 4 model """hole""" +329 4 loss """softplus""" +329 4 regularizer """no""" +329 4 optimizer """adam""" +329 4 training_loop """owa""" +329 4 negative_sampler """basic""" +329 4 evaluator """rankbased""" +329 5 dataset """wn18rr""" +329 5 model """hole""" +329 5 loss """softplus""" +329 5 regularizer """no""" +329 5 optimizer """adam""" +329 5 training_loop """owa""" +329 5 negative_sampler """basic""" +329 5 evaluator """rankbased""" +329 6 dataset """wn18rr""" +329 6 model """hole""" +329 6 loss """softplus""" +329 6 regularizer """no""" +329 6 optimizer """adam""" +329 6 training_loop """owa""" +329 6 negative_sampler """basic""" +329 6 evaluator """rankbased""" +329 7 dataset """wn18rr""" +329 7 model """hole""" +329 7 loss """softplus""" +329 7 regularizer """no""" +329 7 optimizer """adam""" +329 7 training_loop """owa""" +329 7 negative_sampler """basic""" +329 7 evaluator """rankbased""" +329 8 dataset """wn18rr""" +329 8 model """hole""" +329 8 loss """softplus""" +329 8 regularizer """no""" +329 8 optimizer """adam""" +329 8 training_loop """owa""" +329 8 negative_sampler """basic""" +329 8 evaluator """rankbased""" +329 9 dataset """wn18rr""" +329 9 model """hole""" +329 9 loss """softplus""" +329 9 regularizer """no""" +329 9 optimizer """adam""" +329 9 training_loop """owa""" +329 9 negative_sampler """basic""" +329 9 evaluator """rankbased""" +329 10 dataset """wn18rr""" +329 10 model """hole""" +329 10 loss """softplus""" +329 10 regularizer """no""" +329 10 optimizer """adam""" +329 10 training_loop """owa""" +329 10 negative_sampler """basic""" +329 10 evaluator """rankbased""" +329 11 dataset """wn18rr""" +329 11 model """hole""" +329 11 loss """softplus""" +329 11 regularizer """no""" +329 11 optimizer """adam""" +329 11 training_loop """owa""" +329 11 negative_sampler """basic""" +329 11 evaluator """rankbased""" +329 12 dataset """wn18rr""" +329 12 model """hole""" +329 12 loss """softplus""" +329 12 regularizer """no""" +329 12 optimizer """adam""" +329 12 training_loop """owa""" +329 12 negative_sampler """basic""" +329 12 evaluator """rankbased""" +329 13 dataset """wn18rr""" +329 13 model """hole""" +329 13 loss """softplus""" +329 13 regularizer """no""" +329 13 optimizer """adam""" +329 13 training_loop """owa""" +329 13 negative_sampler """basic""" +329 13 evaluator """rankbased""" +329 14 dataset """wn18rr""" +329 14 model """hole""" +329 14 loss """softplus""" +329 14 regularizer """no""" +329 14 optimizer """adam""" +329 14 training_loop """owa""" +329 14 negative_sampler """basic""" +329 14 evaluator """rankbased""" +329 15 dataset """wn18rr""" +329 15 model """hole""" +329 15 loss """softplus""" +329 15 regularizer """no""" +329 15 optimizer """adam""" +329 15 training_loop """owa""" +329 15 negative_sampler """basic""" +329 15 evaluator """rankbased""" +329 16 dataset """wn18rr""" +329 16 model """hole""" +329 16 loss """softplus""" +329 16 regularizer """no""" +329 16 optimizer """adam""" +329 16 training_loop """owa""" +329 16 negative_sampler """basic""" +329 16 evaluator """rankbased""" +329 17 dataset """wn18rr""" +329 17 model """hole""" +329 17 loss """softplus""" +329 17 regularizer """no""" +329 17 optimizer """adam""" +329 17 training_loop """owa""" +329 17 negative_sampler """basic""" +329 17 evaluator """rankbased""" +329 18 dataset """wn18rr""" +329 18 model """hole""" +329 18 loss """softplus""" +329 18 regularizer """no""" +329 18 optimizer """adam""" +329 18 training_loop """owa""" +329 18 negative_sampler """basic""" +329 18 evaluator """rankbased""" +329 19 dataset """wn18rr""" +329 19 model """hole""" +329 19 loss """softplus""" +329 19 regularizer """no""" +329 19 optimizer """adam""" +329 19 training_loop """owa""" +329 19 negative_sampler """basic""" +329 19 evaluator """rankbased""" +329 20 dataset """wn18rr""" +329 20 model """hole""" +329 20 loss """softplus""" +329 20 regularizer """no""" +329 20 optimizer """adam""" +329 20 training_loop """owa""" +329 20 negative_sampler """basic""" +329 20 evaluator """rankbased""" +329 21 dataset """wn18rr""" +329 21 model """hole""" +329 21 loss """softplus""" +329 21 regularizer """no""" +329 21 optimizer """adam""" +329 21 training_loop """owa""" +329 21 negative_sampler """basic""" +329 21 evaluator """rankbased""" +329 22 dataset """wn18rr""" +329 22 model """hole""" +329 22 loss """softplus""" +329 22 regularizer """no""" +329 22 optimizer """adam""" +329 22 training_loop """owa""" +329 22 negative_sampler """basic""" +329 22 evaluator """rankbased""" +329 23 dataset """wn18rr""" +329 23 model """hole""" +329 23 loss """softplus""" +329 23 regularizer """no""" +329 23 optimizer """adam""" +329 23 training_loop """owa""" +329 23 negative_sampler """basic""" +329 23 evaluator """rankbased""" +329 24 dataset """wn18rr""" +329 24 model """hole""" +329 24 loss """softplus""" +329 24 regularizer """no""" +329 24 optimizer """adam""" +329 24 training_loop """owa""" +329 24 negative_sampler """basic""" +329 24 evaluator """rankbased""" +329 25 dataset """wn18rr""" +329 25 model """hole""" +329 25 loss """softplus""" +329 25 regularizer """no""" +329 25 optimizer """adam""" +329 25 training_loop """owa""" +329 25 negative_sampler """basic""" +329 25 evaluator """rankbased""" +329 26 dataset """wn18rr""" +329 26 model """hole""" +329 26 loss """softplus""" +329 26 regularizer """no""" +329 26 optimizer """adam""" +329 26 training_loop """owa""" +329 26 negative_sampler """basic""" +329 26 evaluator """rankbased""" +329 27 dataset """wn18rr""" +329 27 model """hole""" +329 27 loss """softplus""" +329 27 regularizer """no""" +329 27 optimizer """adam""" +329 27 training_loop """owa""" +329 27 negative_sampler """basic""" +329 27 evaluator """rankbased""" +329 28 dataset """wn18rr""" +329 28 model """hole""" +329 28 loss """softplus""" +329 28 regularizer """no""" +329 28 optimizer """adam""" +329 28 training_loop """owa""" +329 28 negative_sampler """basic""" +329 28 evaluator """rankbased""" +329 29 dataset """wn18rr""" +329 29 model """hole""" +329 29 loss """softplus""" +329 29 regularizer """no""" +329 29 optimizer """adam""" +329 29 training_loop """owa""" +329 29 negative_sampler """basic""" +329 29 evaluator """rankbased""" +329 30 dataset """wn18rr""" +329 30 model """hole""" +329 30 loss """softplus""" +329 30 regularizer """no""" +329 30 optimizer """adam""" +329 30 training_loop """owa""" +329 30 negative_sampler """basic""" +329 30 evaluator """rankbased""" +329 31 dataset """wn18rr""" +329 31 model """hole""" +329 31 loss """softplus""" +329 31 regularizer """no""" +329 31 optimizer """adam""" +329 31 training_loop """owa""" +329 31 negative_sampler """basic""" +329 31 evaluator """rankbased""" +329 32 dataset """wn18rr""" +329 32 model """hole""" +329 32 loss """softplus""" +329 32 regularizer """no""" +329 32 optimizer """adam""" +329 32 training_loop """owa""" +329 32 negative_sampler """basic""" +329 32 evaluator """rankbased""" +329 33 dataset """wn18rr""" +329 33 model """hole""" +329 33 loss """softplus""" +329 33 regularizer """no""" +329 33 optimizer """adam""" +329 33 training_loop """owa""" +329 33 negative_sampler """basic""" +329 33 evaluator """rankbased""" +329 34 dataset """wn18rr""" +329 34 model """hole""" +329 34 loss """softplus""" +329 34 regularizer """no""" +329 34 optimizer """adam""" +329 34 training_loop """owa""" +329 34 negative_sampler """basic""" +329 34 evaluator """rankbased""" +329 35 dataset """wn18rr""" +329 35 model """hole""" +329 35 loss """softplus""" +329 35 regularizer """no""" +329 35 optimizer """adam""" +329 35 training_loop """owa""" +329 35 negative_sampler """basic""" +329 35 evaluator """rankbased""" +329 36 dataset """wn18rr""" +329 36 model """hole""" +329 36 loss """softplus""" +329 36 regularizer """no""" +329 36 optimizer """adam""" +329 36 training_loop """owa""" +329 36 negative_sampler """basic""" +329 36 evaluator """rankbased""" +329 37 dataset """wn18rr""" +329 37 model """hole""" +329 37 loss """softplus""" +329 37 regularizer """no""" +329 37 optimizer """adam""" +329 37 training_loop """owa""" +329 37 negative_sampler """basic""" +329 37 evaluator """rankbased""" +329 38 dataset """wn18rr""" +329 38 model """hole""" +329 38 loss """softplus""" +329 38 regularizer """no""" +329 38 optimizer """adam""" +329 38 training_loop """owa""" +329 38 negative_sampler """basic""" +329 38 evaluator """rankbased""" +329 39 dataset """wn18rr""" +329 39 model """hole""" +329 39 loss """softplus""" +329 39 regularizer """no""" +329 39 optimizer """adam""" +329 39 training_loop """owa""" +329 39 negative_sampler """basic""" +329 39 evaluator """rankbased""" +329 40 dataset """wn18rr""" +329 40 model """hole""" +329 40 loss """softplus""" +329 40 regularizer """no""" +329 40 optimizer """adam""" +329 40 training_loop """owa""" +329 40 negative_sampler """basic""" +329 40 evaluator """rankbased""" +329 41 dataset """wn18rr""" +329 41 model """hole""" +329 41 loss """softplus""" +329 41 regularizer """no""" +329 41 optimizer """adam""" +329 41 training_loop """owa""" +329 41 negative_sampler """basic""" +329 41 evaluator """rankbased""" +329 42 dataset """wn18rr""" +329 42 model """hole""" +329 42 loss """softplus""" +329 42 regularizer """no""" +329 42 optimizer """adam""" +329 42 training_loop """owa""" +329 42 negative_sampler """basic""" +329 42 evaluator """rankbased""" +329 43 dataset """wn18rr""" +329 43 model """hole""" +329 43 loss """softplus""" +329 43 regularizer """no""" +329 43 optimizer """adam""" +329 43 training_loop """owa""" +329 43 negative_sampler """basic""" +329 43 evaluator """rankbased""" +329 44 dataset """wn18rr""" +329 44 model """hole""" +329 44 loss """softplus""" +329 44 regularizer """no""" +329 44 optimizer """adam""" +329 44 training_loop """owa""" +329 44 negative_sampler """basic""" +329 44 evaluator """rankbased""" +329 45 dataset """wn18rr""" +329 45 model """hole""" +329 45 loss """softplus""" +329 45 regularizer """no""" +329 45 optimizer """adam""" +329 45 training_loop """owa""" +329 45 negative_sampler """basic""" +329 45 evaluator """rankbased""" +329 46 dataset """wn18rr""" +329 46 model """hole""" +329 46 loss """softplus""" +329 46 regularizer """no""" +329 46 optimizer """adam""" +329 46 training_loop """owa""" +329 46 negative_sampler """basic""" +329 46 evaluator """rankbased""" +329 47 dataset """wn18rr""" +329 47 model """hole""" +329 47 loss """softplus""" +329 47 regularizer """no""" +329 47 optimizer """adam""" +329 47 training_loop """owa""" +329 47 negative_sampler """basic""" +329 47 evaluator """rankbased""" +329 48 dataset """wn18rr""" +329 48 model """hole""" +329 48 loss """softplus""" +329 48 regularizer """no""" +329 48 optimizer """adam""" +329 48 training_loop """owa""" +329 48 negative_sampler """basic""" +329 48 evaluator """rankbased""" +329 49 dataset """wn18rr""" +329 49 model """hole""" +329 49 loss """softplus""" +329 49 regularizer """no""" +329 49 optimizer """adam""" +329 49 training_loop """owa""" +329 49 negative_sampler """basic""" +329 49 evaluator """rankbased""" +329 50 dataset """wn18rr""" +329 50 model """hole""" +329 50 loss """softplus""" +329 50 regularizer """no""" +329 50 optimizer """adam""" +329 50 training_loop """owa""" +329 50 negative_sampler """basic""" +329 50 evaluator """rankbased""" +329 51 dataset """wn18rr""" +329 51 model """hole""" +329 51 loss """softplus""" +329 51 regularizer """no""" +329 51 optimizer """adam""" +329 51 training_loop """owa""" +329 51 negative_sampler """basic""" +329 51 evaluator """rankbased""" +329 52 dataset """wn18rr""" +329 52 model """hole""" +329 52 loss """softplus""" +329 52 regularizer """no""" +329 52 optimizer """adam""" +329 52 training_loop """owa""" +329 52 negative_sampler """basic""" +329 52 evaluator """rankbased""" +329 53 dataset """wn18rr""" +329 53 model """hole""" +329 53 loss """softplus""" +329 53 regularizer """no""" +329 53 optimizer """adam""" +329 53 training_loop """owa""" +329 53 negative_sampler """basic""" +329 53 evaluator """rankbased""" +329 54 dataset """wn18rr""" +329 54 model """hole""" +329 54 loss """softplus""" +329 54 regularizer """no""" +329 54 optimizer """adam""" +329 54 training_loop """owa""" +329 54 negative_sampler """basic""" +329 54 evaluator """rankbased""" +329 55 dataset """wn18rr""" +329 55 model """hole""" +329 55 loss """softplus""" +329 55 regularizer """no""" +329 55 optimizer """adam""" +329 55 training_loop """owa""" +329 55 negative_sampler """basic""" +329 55 evaluator """rankbased""" +329 56 dataset """wn18rr""" +329 56 model """hole""" +329 56 loss """softplus""" +329 56 regularizer """no""" +329 56 optimizer """adam""" +329 56 training_loop """owa""" +329 56 negative_sampler """basic""" +329 56 evaluator """rankbased""" +329 57 dataset """wn18rr""" +329 57 model """hole""" +329 57 loss """softplus""" +329 57 regularizer """no""" +329 57 optimizer """adam""" +329 57 training_loop """owa""" +329 57 negative_sampler """basic""" +329 57 evaluator """rankbased""" +329 58 dataset """wn18rr""" +329 58 model """hole""" +329 58 loss """softplus""" +329 58 regularizer """no""" +329 58 optimizer """adam""" +329 58 training_loop """owa""" +329 58 negative_sampler """basic""" +329 58 evaluator """rankbased""" +329 59 dataset """wn18rr""" +329 59 model """hole""" +329 59 loss """softplus""" +329 59 regularizer """no""" +329 59 optimizer """adam""" +329 59 training_loop """owa""" +329 59 negative_sampler """basic""" +329 59 evaluator """rankbased""" +329 60 dataset """wn18rr""" +329 60 model """hole""" +329 60 loss """softplus""" +329 60 regularizer """no""" +329 60 optimizer """adam""" +329 60 training_loop """owa""" +329 60 negative_sampler """basic""" +329 60 evaluator """rankbased""" +329 61 dataset """wn18rr""" +329 61 model """hole""" +329 61 loss """softplus""" +329 61 regularizer """no""" +329 61 optimizer """adam""" +329 61 training_loop """owa""" +329 61 negative_sampler """basic""" +329 61 evaluator """rankbased""" +329 62 dataset """wn18rr""" +329 62 model """hole""" +329 62 loss """softplus""" +329 62 regularizer """no""" +329 62 optimizer """adam""" +329 62 training_loop """owa""" +329 62 negative_sampler """basic""" +329 62 evaluator """rankbased""" +329 63 dataset """wn18rr""" +329 63 model """hole""" +329 63 loss """softplus""" +329 63 regularizer """no""" +329 63 optimizer """adam""" +329 63 training_loop """owa""" +329 63 negative_sampler """basic""" +329 63 evaluator """rankbased""" +329 64 dataset """wn18rr""" +329 64 model """hole""" +329 64 loss """softplus""" +329 64 regularizer """no""" +329 64 optimizer """adam""" +329 64 training_loop """owa""" +329 64 negative_sampler """basic""" +329 64 evaluator """rankbased""" +329 65 dataset """wn18rr""" +329 65 model """hole""" +329 65 loss """softplus""" +329 65 regularizer """no""" +329 65 optimizer """adam""" +329 65 training_loop """owa""" +329 65 negative_sampler """basic""" +329 65 evaluator """rankbased""" +330 1 model.embedding_dim 2.0 +330 1 optimizer.lr 0.006446629467179357 +330 1 training.batch_size 0.0 +330 1 training.label_smoothing 0.0037877831290459695 +330 2 model.embedding_dim 2.0 +330 2 optimizer.lr 0.050561400447269554 +330 2 training.batch_size 2.0 +330 2 training.label_smoothing 0.027021580410525634 +330 1 dataset """wn18rr""" +330 1 model """hole""" +330 1 loss """crossentropy""" +330 1 regularizer """no""" +330 1 optimizer """adam""" +330 1 training_loop """lcwa""" +330 1 evaluator """rankbased""" +330 2 dataset """wn18rr""" +330 2 model """hole""" +330 2 loss """crossentropy""" +330 2 regularizer """no""" +330 2 optimizer """adam""" +330 2 training_loop """lcwa""" +330 2 evaluator """rankbased""" +331 1 model.embedding_dim 1.0 +331 1 optimizer.lr 0.007639090336841558 +331 1 training.batch_size 0.0 +331 1 training.label_smoothing 0.12038567120304977 +331 2 model.embedding_dim 2.0 +331 2 optimizer.lr 0.010557856303946754 +331 2 training.batch_size 0.0 +331 2 training.label_smoothing 0.3217009542639881 +331 3 model.embedding_dim 0.0 +331 3 optimizer.lr 0.004762759395718602 +331 3 training.batch_size 1.0 +331 3 training.label_smoothing 0.0030099055295378816 +331 4 model.embedding_dim 1.0 +331 4 optimizer.lr 0.04128543502892083 +331 4 training.batch_size 0.0 +331 4 training.label_smoothing 0.8089520646048276 +331 5 model.embedding_dim 0.0 +331 5 optimizer.lr 0.013119282124500945 +331 5 training.batch_size 0.0 +331 5 training.label_smoothing 0.4535142174988899 +331 6 model.embedding_dim 2.0 +331 6 optimizer.lr 0.002051341740204166 +331 6 training.batch_size 1.0 +331 6 training.label_smoothing 0.15861500741905965 +331 1 dataset """wn18rr""" +331 1 model """hole""" +331 1 loss """crossentropy""" +331 1 regularizer """no""" +331 1 optimizer """adam""" +331 1 training_loop """lcwa""" +331 1 evaluator """rankbased""" +331 2 dataset """wn18rr""" +331 2 model """hole""" +331 2 loss """crossentropy""" +331 2 regularizer """no""" +331 2 optimizer """adam""" +331 2 training_loop """lcwa""" +331 2 evaluator """rankbased""" +331 3 dataset """wn18rr""" +331 3 model """hole""" +331 3 loss """crossentropy""" +331 3 regularizer """no""" +331 3 optimizer """adam""" +331 3 training_loop """lcwa""" +331 3 evaluator """rankbased""" +331 4 dataset """wn18rr""" +331 4 model """hole""" +331 4 loss """crossentropy""" +331 4 regularizer """no""" +331 4 optimizer """adam""" +331 4 training_loop """lcwa""" +331 4 evaluator """rankbased""" +331 5 dataset """wn18rr""" +331 5 model """hole""" +331 5 loss """crossentropy""" +331 5 regularizer """no""" +331 5 optimizer """adam""" +331 5 training_loop """lcwa""" +331 5 evaluator """rankbased""" +331 6 dataset """wn18rr""" +331 6 model """hole""" +331 6 loss """crossentropy""" +331 6 regularizer """no""" +331 6 optimizer """adam""" +331 6 training_loop """lcwa""" +331 6 evaluator """rankbased""" +332 1 model.embedding_dim 2.0 +332 1 loss.margin 4.463351673806529 +332 1 optimizer.lr 0.09331362199318921 +332 1 negative_sampler.num_negs_per_pos 84.0 +332 1 training.batch_size 0.0 +332 2 model.embedding_dim 0.0 +332 2 loss.margin 7.366317150233155 +332 2 optimizer.lr 0.071713764770172 +332 2 negative_sampler.num_negs_per_pos 68.0 +332 2 training.batch_size 0.0 +332 3 model.embedding_dim 1.0 +332 3 loss.margin 4.407554147347163 +332 3 optimizer.lr 0.003931789626977441 +332 3 negative_sampler.num_negs_per_pos 86.0 +332 3 training.batch_size 2.0 +332 4 model.embedding_dim 2.0 +332 4 loss.margin 9.544672805558289 +332 4 optimizer.lr 0.029446379312677087 +332 4 negative_sampler.num_negs_per_pos 25.0 +332 4 training.batch_size 0.0 +332 5 model.embedding_dim 0.0 +332 5 loss.margin 9.045076448457193 +332 5 optimizer.lr 0.0021040423230898942 +332 5 negative_sampler.num_negs_per_pos 26.0 +332 5 training.batch_size 0.0 +332 6 model.embedding_dim 2.0 +332 6 loss.margin 8.419158280322979 +332 6 optimizer.lr 0.006242283637194862 +332 6 negative_sampler.num_negs_per_pos 29.0 +332 6 training.batch_size 0.0 +332 7 model.embedding_dim 1.0 +332 7 loss.margin 4.271690080430108 +332 7 optimizer.lr 0.019358306593421807 +332 7 negative_sampler.num_negs_per_pos 32.0 +332 7 training.batch_size 1.0 +332 8 model.embedding_dim 2.0 +332 8 loss.margin 3.79813536178056 +332 8 optimizer.lr 0.0025947612914097792 +332 8 negative_sampler.num_negs_per_pos 3.0 +332 8 training.batch_size 1.0 +332 9 model.embedding_dim 1.0 +332 9 loss.margin 1.8467462591883268 +332 9 optimizer.lr 0.017203929327794583 +332 9 negative_sampler.num_negs_per_pos 81.0 +332 9 training.batch_size 0.0 +332 10 model.embedding_dim 0.0 +332 10 loss.margin 7.3449072417972765 +332 10 optimizer.lr 0.011797217942294064 +332 10 negative_sampler.num_negs_per_pos 34.0 +332 10 training.batch_size 1.0 +332 11 model.embedding_dim 2.0 +332 11 loss.margin 2.38356459918449 +332 11 optimizer.lr 0.03261038039864571 +332 11 negative_sampler.num_negs_per_pos 80.0 +332 11 training.batch_size 0.0 +332 12 model.embedding_dim 1.0 +332 12 loss.margin 5.0276058280018345 +332 12 optimizer.lr 0.0027645091830426887 +332 12 negative_sampler.num_negs_per_pos 93.0 +332 12 training.batch_size 0.0 +332 13 model.embedding_dim 2.0 +332 13 loss.margin 2.172500674889012 +332 13 optimizer.lr 0.001618543864555634 +332 13 negative_sampler.num_negs_per_pos 42.0 +332 13 training.batch_size 0.0 +332 14 model.embedding_dim 1.0 +332 14 loss.margin 2.2871552205143866 +332 14 optimizer.lr 0.041546334479080335 +332 14 negative_sampler.num_negs_per_pos 50.0 +332 14 training.batch_size 0.0 +332 15 model.embedding_dim 2.0 +332 15 loss.margin 6.267958837952656 +332 15 optimizer.lr 0.0574013190115236 +332 15 negative_sampler.num_negs_per_pos 21.0 +332 15 training.batch_size 0.0 +332 16 model.embedding_dim 1.0 +332 16 loss.margin 5.586650129763241 +332 16 optimizer.lr 0.05154496486975566 +332 16 negative_sampler.num_negs_per_pos 22.0 +332 16 training.batch_size 1.0 +332 17 model.embedding_dim 0.0 +332 17 loss.margin 3.097284960829133 +332 17 optimizer.lr 0.002297967911819726 +332 17 negative_sampler.num_negs_per_pos 62.0 +332 17 training.batch_size 2.0 +332 18 model.embedding_dim 1.0 +332 18 loss.margin 5.684275685888249 +332 18 optimizer.lr 0.022651665000234817 +332 18 negative_sampler.num_negs_per_pos 47.0 +332 18 training.batch_size 1.0 +332 19 model.embedding_dim 1.0 +332 19 loss.margin 5.884150410996407 +332 19 optimizer.lr 0.00427710834265635 +332 19 negative_sampler.num_negs_per_pos 66.0 +332 19 training.batch_size 1.0 +332 20 model.embedding_dim 0.0 +332 20 loss.margin 0.8382076178024594 +332 20 optimizer.lr 0.001417663173890976 +332 20 negative_sampler.num_negs_per_pos 69.0 +332 20 training.batch_size 1.0 +332 21 model.embedding_dim 2.0 +332 21 loss.margin 4.559711151163788 +332 21 optimizer.lr 0.008657465414077206 +332 21 negative_sampler.num_negs_per_pos 90.0 +332 21 training.batch_size 2.0 +332 22 model.embedding_dim 2.0 +332 22 loss.margin 9.42154097176253 +332 22 optimizer.lr 0.07478399294178018 +332 22 negative_sampler.num_negs_per_pos 82.0 +332 22 training.batch_size 2.0 +332 23 model.embedding_dim 2.0 +332 23 loss.margin 7.457270455517331 +332 23 optimizer.lr 0.02235367272795279 +332 23 negative_sampler.num_negs_per_pos 49.0 +332 23 training.batch_size 0.0 +332 1 dataset """wn18rr""" +332 1 model """hole""" +332 1 loss """marginranking""" +332 1 regularizer """no""" +332 1 optimizer """adam""" +332 1 training_loop """owa""" +332 1 negative_sampler """basic""" +332 1 evaluator """rankbased""" +332 2 dataset """wn18rr""" +332 2 model """hole""" +332 2 loss """marginranking""" +332 2 regularizer """no""" +332 2 optimizer """adam""" +332 2 training_loop """owa""" +332 2 negative_sampler """basic""" +332 2 evaluator """rankbased""" +332 3 dataset """wn18rr""" +332 3 model """hole""" +332 3 loss """marginranking""" +332 3 regularizer """no""" +332 3 optimizer """adam""" +332 3 training_loop """owa""" +332 3 negative_sampler """basic""" +332 3 evaluator """rankbased""" +332 4 dataset """wn18rr""" +332 4 model """hole""" +332 4 loss """marginranking""" +332 4 regularizer """no""" +332 4 optimizer """adam""" +332 4 training_loop """owa""" +332 4 negative_sampler """basic""" +332 4 evaluator """rankbased""" +332 5 dataset """wn18rr""" +332 5 model """hole""" +332 5 loss """marginranking""" +332 5 regularizer """no""" +332 5 optimizer """adam""" +332 5 training_loop """owa""" +332 5 negative_sampler """basic""" +332 5 evaluator """rankbased""" +332 6 dataset """wn18rr""" +332 6 model """hole""" +332 6 loss """marginranking""" +332 6 regularizer """no""" +332 6 optimizer """adam""" +332 6 training_loop """owa""" +332 6 negative_sampler """basic""" +332 6 evaluator """rankbased""" +332 7 dataset """wn18rr""" +332 7 model """hole""" +332 7 loss """marginranking""" +332 7 regularizer """no""" +332 7 optimizer """adam""" +332 7 training_loop """owa""" +332 7 negative_sampler """basic""" +332 7 evaluator """rankbased""" +332 8 dataset """wn18rr""" +332 8 model """hole""" +332 8 loss """marginranking""" +332 8 regularizer """no""" +332 8 optimizer """adam""" +332 8 training_loop """owa""" +332 8 negative_sampler """basic""" +332 8 evaluator """rankbased""" +332 9 dataset """wn18rr""" +332 9 model """hole""" +332 9 loss """marginranking""" +332 9 regularizer """no""" +332 9 optimizer """adam""" +332 9 training_loop """owa""" +332 9 negative_sampler """basic""" +332 9 evaluator """rankbased""" +332 10 dataset """wn18rr""" +332 10 model """hole""" +332 10 loss """marginranking""" +332 10 regularizer """no""" +332 10 optimizer """adam""" +332 10 training_loop """owa""" +332 10 negative_sampler """basic""" +332 10 evaluator """rankbased""" +332 11 dataset """wn18rr""" +332 11 model """hole""" +332 11 loss """marginranking""" +332 11 regularizer """no""" +332 11 optimizer """adam""" +332 11 training_loop """owa""" +332 11 negative_sampler """basic""" +332 11 evaluator """rankbased""" +332 12 dataset """wn18rr""" +332 12 model """hole""" +332 12 loss """marginranking""" +332 12 regularizer """no""" +332 12 optimizer """adam""" +332 12 training_loop """owa""" +332 12 negative_sampler """basic""" +332 12 evaluator """rankbased""" +332 13 dataset """wn18rr""" +332 13 model """hole""" +332 13 loss """marginranking""" +332 13 regularizer """no""" +332 13 optimizer """adam""" +332 13 training_loop """owa""" +332 13 negative_sampler """basic""" +332 13 evaluator """rankbased""" +332 14 dataset """wn18rr""" +332 14 model """hole""" +332 14 loss """marginranking""" +332 14 regularizer """no""" +332 14 optimizer """adam""" +332 14 training_loop """owa""" +332 14 negative_sampler """basic""" +332 14 evaluator """rankbased""" +332 15 dataset """wn18rr""" +332 15 model """hole""" +332 15 loss """marginranking""" +332 15 regularizer """no""" +332 15 optimizer """adam""" +332 15 training_loop """owa""" +332 15 negative_sampler """basic""" +332 15 evaluator """rankbased""" +332 16 dataset """wn18rr""" +332 16 model """hole""" +332 16 loss """marginranking""" +332 16 regularizer """no""" +332 16 optimizer """adam""" +332 16 training_loop """owa""" +332 16 negative_sampler """basic""" +332 16 evaluator """rankbased""" +332 17 dataset """wn18rr""" +332 17 model """hole""" +332 17 loss """marginranking""" +332 17 regularizer """no""" +332 17 optimizer """adam""" +332 17 training_loop """owa""" +332 17 negative_sampler """basic""" +332 17 evaluator """rankbased""" +332 18 dataset """wn18rr""" +332 18 model """hole""" +332 18 loss """marginranking""" +332 18 regularizer """no""" +332 18 optimizer """adam""" +332 18 training_loop """owa""" +332 18 negative_sampler """basic""" +332 18 evaluator """rankbased""" +332 19 dataset """wn18rr""" +332 19 model """hole""" +332 19 loss """marginranking""" +332 19 regularizer """no""" +332 19 optimizer """adam""" +332 19 training_loop """owa""" +332 19 negative_sampler """basic""" +332 19 evaluator """rankbased""" +332 20 dataset """wn18rr""" +332 20 model """hole""" +332 20 loss """marginranking""" +332 20 regularizer """no""" +332 20 optimizer """adam""" +332 20 training_loop """owa""" +332 20 negative_sampler """basic""" +332 20 evaluator """rankbased""" +332 21 dataset """wn18rr""" +332 21 model """hole""" +332 21 loss """marginranking""" +332 21 regularizer """no""" +332 21 optimizer """adam""" +332 21 training_loop """owa""" +332 21 negative_sampler """basic""" +332 21 evaluator """rankbased""" +332 22 dataset """wn18rr""" +332 22 model """hole""" +332 22 loss """marginranking""" +332 22 regularizer """no""" +332 22 optimizer """adam""" +332 22 training_loop """owa""" +332 22 negative_sampler """basic""" +332 22 evaluator """rankbased""" +332 23 dataset """wn18rr""" +332 23 model """hole""" +332 23 loss """marginranking""" +332 23 regularizer """no""" +332 23 optimizer """adam""" +332 23 training_loop """owa""" +332 23 negative_sampler """basic""" +332 23 evaluator """rankbased""" +333 1 model.embedding_dim 0.0 +333 1 loss.margin 1.036040667094538 +333 1 optimizer.lr 0.0017543502584726578 +333 1 negative_sampler.num_negs_per_pos 39.0 +333 1 training.batch_size 2.0 +333 2 model.embedding_dim 0.0 +333 2 loss.margin 3.727405500311979 +333 2 optimizer.lr 0.010484434093495277 +333 2 negative_sampler.num_negs_per_pos 77.0 +333 2 training.batch_size 0.0 +333 3 model.embedding_dim 2.0 +333 3 loss.margin 7.241573830345192 +333 3 optimizer.lr 0.009956143549103682 +333 3 negative_sampler.num_negs_per_pos 22.0 +333 3 training.batch_size 2.0 +333 4 model.embedding_dim 0.0 +333 4 loss.margin 2.255970597571477 +333 4 optimizer.lr 0.09185519623260638 +333 4 negative_sampler.num_negs_per_pos 55.0 +333 4 training.batch_size 2.0 +333 5 model.embedding_dim 1.0 +333 5 loss.margin 4.9302871342667585 +333 5 optimizer.lr 0.03138578884971499 +333 5 negative_sampler.num_negs_per_pos 91.0 +333 5 training.batch_size 0.0 +333 6 model.embedding_dim 0.0 +333 6 loss.margin 7.65936777536258 +333 6 optimizer.lr 0.002786691443931173 +333 6 negative_sampler.num_negs_per_pos 48.0 +333 6 training.batch_size 0.0 +333 7 model.embedding_dim 0.0 +333 7 loss.margin 6.556389646780337 +333 7 optimizer.lr 0.09724403256309541 +333 7 negative_sampler.num_negs_per_pos 86.0 +333 7 training.batch_size 2.0 +333 8 model.embedding_dim 1.0 +333 8 loss.margin 3.0350349632333073 +333 8 optimizer.lr 0.06211049795612743 +333 8 negative_sampler.num_negs_per_pos 62.0 +333 8 training.batch_size 2.0 +333 9 model.embedding_dim 0.0 +333 9 loss.margin 2.266674673025642 +333 9 optimizer.lr 0.0029689167563624838 +333 9 negative_sampler.num_negs_per_pos 33.0 +333 9 training.batch_size 1.0 +333 10 model.embedding_dim 0.0 +333 10 loss.margin 4.57082232895709 +333 10 optimizer.lr 0.08962795606781584 +333 10 negative_sampler.num_negs_per_pos 61.0 +333 10 training.batch_size 1.0 +333 11 model.embedding_dim 1.0 +333 11 loss.margin 5.545287203112949 +333 11 optimizer.lr 0.00520231945345363 +333 11 negative_sampler.num_negs_per_pos 19.0 +333 11 training.batch_size 1.0 +333 12 model.embedding_dim 2.0 +333 12 loss.margin 8.258160109379379 +333 12 optimizer.lr 0.0039274541860696175 +333 12 negative_sampler.num_negs_per_pos 95.0 +333 12 training.batch_size 1.0 +333 13 model.embedding_dim 1.0 +333 13 loss.margin 9.342697393671124 +333 13 optimizer.lr 0.01395885208105792 +333 13 negative_sampler.num_negs_per_pos 44.0 +333 13 training.batch_size 1.0 +333 14 model.embedding_dim 0.0 +333 14 loss.margin 4.3098761048216945 +333 14 optimizer.lr 0.001160223295934961 +333 14 negative_sampler.num_negs_per_pos 53.0 +333 14 training.batch_size 0.0 +333 15 model.embedding_dim 1.0 +333 15 loss.margin 4.121915596428903 +333 15 optimizer.lr 0.001222667810561431 +333 15 negative_sampler.num_negs_per_pos 13.0 +333 15 training.batch_size 0.0 +333 16 model.embedding_dim 1.0 +333 16 loss.margin 0.8224404728084275 +333 16 optimizer.lr 0.0013823375584060666 +333 16 negative_sampler.num_negs_per_pos 45.0 +333 16 training.batch_size 1.0 +333 17 model.embedding_dim 0.0 +333 17 loss.margin 7.921529349140046 +333 17 optimizer.lr 0.020513167601021953 +333 17 negative_sampler.num_negs_per_pos 0.0 +333 17 training.batch_size 2.0 +333 18 model.embedding_dim 2.0 +333 18 loss.margin 1.5614646156434842 +333 18 optimizer.lr 0.0469181567510122 +333 18 negative_sampler.num_negs_per_pos 77.0 +333 18 training.batch_size 0.0 +333 19 model.embedding_dim 2.0 +333 19 loss.margin 5.881138552556728 +333 19 optimizer.lr 0.0026907994120672915 +333 19 negative_sampler.num_negs_per_pos 34.0 +333 19 training.batch_size 1.0 +333 20 model.embedding_dim 0.0 +333 20 loss.margin 6.23653186596193 +333 20 optimizer.lr 0.0030073712178168436 +333 20 negative_sampler.num_negs_per_pos 98.0 +333 20 training.batch_size 2.0 +333 21 model.embedding_dim 1.0 +333 21 loss.margin 3.96517639550241 +333 21 optimizer.lr 0.004631527810470226 +333 21 negative_sampler.num_negs_per_pos 45.0 +333 21 training.batch_size 2.0 +333 22 model.embedding_dim 0.0 +333 22 loss.margin 2.2183997341548096 +333 22 optimizer.lr 0.057042849278670295 +333 22 negative_sampler.num_negs_per_pos 1.0 +333 22 training.batch_size 0.0 +333 23 model.embedding_dim 2.0 +333 23 loss.margin 6.003144909668543 +333 23 optimizer.lr 0.0014031060990333931 +333 23 negative_sampler.num_negs_per_pos 30.0 +333 23 training.batch_size 2.0 +333 24 model.embedding_dim 0.0 +333 24 loss.margin 7.430747077351357 +333 24 optimizer.lr 0.03201102316853047 +333 24 negative_sampler.num_negs_per_pos 39.0 +333 24 training.batch_size 1.0 +333 25 model.embedding_dim 2.0 +333 25 loss.margin 3.2096819226494433 +333 25 optimizer.lr 0.003376795038593479 +333 25 negative_sampler.num_negs_per_pos 54.0 +333 25 training.batch_size 0.0 +333 26 model.embedding_dim 0.0 +333 26 loss.margin 2.4603516811964146 +333 26 optimizer.lr 0.010847284765238278 +333 26 negative_sampler.num_negs_per_pos 22.0 +333 26 training.batch_size 0.0 +333 27 model.embedding_dim 1.0 +333 27 loss.margin 0.9527985826809364 +333 27 optimizer.lr 0.001334197288538692 +333 27 negative_sampler.num_negs_per_pos 55.0 +333 27 training.batch_size 2.0 +333 28 model.embedding_dim 0.0 +333 28 loss.margin 5.6238804022135485 +333 28 optimizer.lr 0.031681004898532386 +333 28 negative_sampler.num_negs_per_pos 9.0 +333 28 training.batch_size 1.0 +333 29 model.embedding_dim 2.0 +333 29 loss.margin 5.579450439633264 +333 29 optimizer.lr 0.006086443589862491 +333 29 negative_sampler.num_negs_per_pos 22.0 +333 29 training.batch_size 2.0 +333 30 model.embedding_dim 1.0 +333 30 loss.margin 8.130395183416494 +333 30 optimizer.lr 0.001015870499440008 +333 30 negative_sampler.num_negs_per_pos 14.0 +333 30 training.batch_size 1.0 +333 31 model.embedding_dim 0.0 +333 31 loss.margin 2.3521041216375376 +333 31 optimizer.lr 0.031123942047532587 +333 31 negative_sampler.num_negs_per_pos 20.0 +333 31 training.batch_size 1.0 +333 32 model.embedding_dim 0.0 +333 32 loss.margin 2.9637234639003838 +333 32 optimizer.lr 0.0016658569942769798 +333 32 negative_sampler.num_negs_per_pos 37.0 +333 32 training.batch_size 0.0 +333 33 model.embedding_dim 0.0 +333 33 loss.margin 1.169554861026824 +333 33 optimizer.lr 0.0017678158866818575 +333 33 negative_sampler.num_negs_per_pos 48.0 +333 33 training.batch_size 0.0 +333 34 model.embedding_dim 1.0 +333 34 loss.margin 5.974345532046843 +333 34 optimizer.lr 0.0015899111059071332 +333 34 negative_sampler.num_negs_per_pos 63.0 +333 34 training.batch_size 1.0 +333 35 model.embedding_dim 2.0 +333 35 loss.margin 4.424145047868827 +333 35 optimizer.lr 0.01662573512860716 +333 35 negative_sampler.num_negs_per_pos 79.0 +333 35 training.batch_size 2.0 +333 36 model.embedding_dim 0.0 +333 36 loss.margin 8.454010597468827 +333 36 optimizer.lr 0.07818302253702848 +333 36 negative_sampler.num_negs_per_pos 24.0 +333 36 training.batch_size 2.0 +333 37 model.embedding_dim 1.0 +333 37 loss.margin 1.8044412537469245 +333 37 optimizer.lr 0.07193060340667501 +333 37 negative_sampler.num_negs_per_pos 14.0 +333 37 training.batch_size 1.0 +333 38 model.embedding_dim 1.0 +333 38 loss.margin 3.894012109328775 +333 38 optimizer.lr 0.003957703560913003 +333 38 negative_sampler.num_negs_per_pos 58.0 +333 38 training.batch_size 1.0 +333 39 model.embedding_dim 0.0 +333 39 loss.margin 6.4623203185866185 +333 39 optimizer.lr 0.006947613559663061 +333 39 negative_sampler.num_negs_per_pos 25.0 +333 39 training.batch_size 0.0 +333 40 model.embedding_dim 2.0 +333 40 loss.margin 4.631775004425929 +333 40 optimizer.lr 0.003157935112625671 +333 40 negative_sampler.num_negs_per_pos 64.0 +333 40 training.batch_size 1.0 +333 41 model.embedding_dim 2.0 +333 41 loss.margin 7.68972719614339 +333 41 optimizer.lr 0.0018468841354229288 +333 41 negative_sampler.num_negs_per_pos 74.0 +333 41 training.batch_size 1.0 +333 42 model.embedding_dim 0.0 +333 42 loss.margin 6.327474953856362 +333 42 optimizer.lr 0.014661417056895875 +333 42 negative_sampler.num_negs_per_pos 97.0 +333 42 training.batch_size 1.0 +333 43 model.embedding_dim 2.0 +333 43 loss.margin 3.415403057137695 +333 43 optimizer.lr 0.030135675480889672 +333 43 negative_sampler.num_negs_per_pos 74.0 +333 43 training.batch_size 2.0 +333 44 model.embedding_dim 0.0 +333 44 loss.margin 3.10933717038559 +333 44 optimizer.lr 0.001065119276714804 +333 44 negative_sampler.num_negs_per_pos 96.0 +333 44 training.batch_size 0.0 +333 45 model.embedding_dim 1.0 +333 45 loss.margin 5.038147210075035 +333 45 optimizer.lr 0.009236282374194498 +333 45 negative_sampler.num_negs_per_pos 47.0 +333 45 training.batch_size 0.0 +333 46 model.embedding_dim 1.0 +333 46 loss.margin 2.4100542231698854 +333 46 optimizer.lr 0.006481393132328457 +333 46 negative_sampler.num_negs_per_pos 77.0 +333 46 training.batch_size 1.0 +333 47 model.embedding_dim 0.0 +333 47 loss.margin 7.171626466943303 +333 47 optimizer.lr 0.018016819946264626 +333 47 negative_sampler.num_negs_per_pos 12.0 +333 47 training.batch_size 0.0 +333 48 model.embedding_dim 2.0 +333 48 loss.margin 3.9515597282201442 +333 48 optimizer.lr 0.0017932517722824243 +333 48 negative_sampler.num_negs_per_pos 74.0 +333 48 training.batch_size 0.0 +333 49 model.embedding_dim 0.0 +333 49 loss.margin 1.5168123509971956 +333 49 optimizer.lr 0.0339800262846934 +333 49 negative_sampler.num_negs_per_pos 36.0 +333 49 training.batch_size 0.0 +333 50 model.embedding_dim 0.0 +333 50 loss.margin 8.085926438754987 +333 50 optimizer.lr 0.09564689792247805 +333 50 negative_sampler.num_negs_per_pos 94.0 +333 50 training.batch_size 2.0 +333 51 model.embedding_dim 0.0 +333 51 loss.margin 6.224986629933966 +333 51 optimizer.lr 0.03849781349140384 +333 51 negative_sampler.num_negs_per_pos 84.0 +333 51 training.batch_size 1.0 +333 52 model.embedding_dim 0.0 +333 52 loss.margin 3.038107259348503 +333 52 optimizer.lr 0.009832946516898132 +333 52 negative_sampler.num_negs_per_pos 60.0 +333 52 training.batch_size 0.0 +333 53 model.embedding_dim 2.0 +333 53 loss.margin 7.14565793963043 +333 53 optimizer.lr 0.004583916660941791 +333 53 negative_sampler.num_negs_per_pos 37.0 +333 53 training.batch_size 1.0 +333 54 model.embedding_dim 2.0 +333 54 loss.margin 2.344302800356972 +333 54 optimizer.lr 0.0093985859752671 +333 54 negative_sampler.num_negs_per_pos 0.0 +333 54 training.batch_size 1.0 +333 55 model.embedding_dim 0.0 +333 55 loss.margin 6.7624996125007 +333 55 optimizer.lr 0.010974967995029295 +333 55 negative_sampler.num_negs_per_pos 60.0 +333 55 training.batch_size 1.0 +333 56 model.embedding_dim 0.0 +333 56 loss.margin 6.870216637001777 +333 56 optimizer.lr 0.005800868961540196 +333 56 negative_sampler.num_negs_per_pos 66.0 +333 56 training.batch_size 1.0 +333 57 model.embedding_dim 2.0 +333 57 loss.margin 6.049174491503337 +333 57 optimizer.lr 0.05826568908881175 +333 57 negative_sampler.num_negs_per_pos 94.0 +333 57 training.batch_size 0.0 +333 58 model.embedding_dim 0.0 +333 58 loss.margin 0.5063099102558675 +333 58 optimizer.lr 0.008339375931262407 +333 58 negative_sampler.num_negs_per_pos 70.0 +333 58 training.batch_size 2.0 +333 59 model.embedding_dim 1.0 +333 59 loss.margin 5.0688223203690415 +333 59 optimizer.lr 0.001218026146960183 +333 59 negative_sampler.num_negs_per_pos 71.0 +333 59 training.batch_size 2.0 +333 60 model.embedding_dim 0.0 +333 60 loss.margin 9.152783016752727 +333 60 optimizer.lr 0.008474701096220212 +333 60 negative_sampler.num_negs_per_pos 98.0 +333 60 training.batch_size 0.0 +333 1 dataset """wn18rr""" +333 1 model """hole""" +333 1 loss """marginranking""" +333 1 regularizer """no""" +333 1 optimizer """adam""" +333 1 training_loop """owa""" +333 1 negative_sampler """basic""" +333 1 evaluator """rankbased""" +333 2 dataset """wn18rr""" +333 2 model """hole""" +333 2 loss """marginranking""" +333 2 regularizer """no""" +333 2 optimizer """adam""" +333 2 training_loop """owa""" +333 2 negative_sampler """basic""" +333 2 evaluator """rankbased""" +333 3 dataset """wn18rr""" +333 3 model """hole""" +333 3 loss """marginranking""" +333 3 regularizer """no""" +333 3 optimizer """adam""" +333 3 training_loop """owa""" +333 3 negative_sampler """basic""" +333 3 evaluator """rankbased""" +333 4 dataset """wn18rr""" +333 4 model """hole""" +333 4 loss """marginranking""" +333 4 regularizer """no""" +333 4 optimizer """adam""" +333 4 training_loop """owa""" +333 4 negative_sampler """basic""" +333 4 evaluator """rankbased""" +333 5 dataset """wn18rr""" +333 5 model """hole""" +333 5 loss """marginranking""" +333 5 regularizer """no""" +333 5 optimizer """adam""" +333 5 training_loop """owa""" +333 5 negative_sampler """basic""" +333 5 evaluator """rankbased""" +333 6 dataset """wn18rr""" +333 6 model """hole""" +333 6 loss """marginranking""" +333 6 regularizer """no""" +333 6 optimizer """adam""" +333 6 training_loop """owa""" +333 6 negative_sampler """basic""" +333 6 evaluator """rankbased""" +333 7 dataset """wn18rr""" +333 7 model """hole""" +333 7 loss """marginranking""" +333 7 regularizer """no""" +333 7 optimizer """adam""" +333 7 training_loop """owa""" +333 7 negative_sampler """basic""" +333 7 evaluator """rankbased""" +333 8 dataset """wn18rr""" +333 8 model """hole""" +333 8 loss """marginranking""" +333 8 regularizer """no""" +333 8 optimizer """adam""" +333 8 training_loop """owa""" +333 8 negative_sampler """basic""" +333 8 evaluator """rankbased""" +333 9 dataset """wn18rr""" +333 9 model """hole""" +333 9 loss """marginranking""" +333 9 regularizer """no""" +333 9 optimizer """adam""" +333 9 training_loop """owa""" +333 9 negative_sampler """basic""" +333 9 evaluator """rankbased""" +333 10 dataset """wn18rr""" +333 10 model """hole""" +333 10 loss """marginranking""" +333 10 regularizer """no""" +333 10 optimizer """adam""" +333 10 training_loop """owa""" +333 10 negative_sampler """basic""" +333 10 evaluator """rankbased""" +333 11 dataset """wn18rr""" +333 11 model """hole""" +333 11 loss """marginranking""" +333 11 regularizer """no""" +333 11 optimizer """adam""" +333 11 training_loop """owa""" +333 11 negative_sampler """basic""" +333 11 evaluator """rankbased""" +333 12 dataset """wn18rr""" +333 12 model """hole""" +333 12 loss """marginranking""" +333 12 regularizer """no""" +333 12 optimizer """adam""" +333 12 training_loop """owa""" +333 12 negative_sampler """basic""" +333 12 evaluator """rankbased""" +333 13 dataset """wn18rr""" +333 13 model """hole""" +333 13 loss """marginranking""" +333 13 regularizer """no""" +333 13 optimizer """adam""" +333 13 training_loop """owa""" +333 13 negative_sampler """basic""" +333 13 evaluator """rankbased""" +333 14 dataset """wn18rr""" +333 14 model """hole""" +333 14 loss """marginranking""" +333 14 regularizer """no""" +333 14 optimizer """adam""" +333 14 training_loop """owa""" +333 14 negative_sampler """basic""" +333 14 evaluator """rankbased""" +333 15 dataset """wn18rr""" +333 15 model """hole""" +333 15 loss """marginranking""" +333 15 regularizer """no""" +333 15 optimizer """adam""" +333 15 training_loop """owa""" +333 15 negative_sampler """basic""" +333 15 evaluator """rankbased""" +333 16 dataset """wn18rr""" +333 16 model """hole""" +333 16 loss """marginranking""" +333 16 regularizer """no""" +333 16 optimizer """adam""" +333 16 training_loop """owa""" +333 16 negative_sampler """basic""" +333 16 evaluator """rankbased""" +333 17 dataset """wn18rr""" +333 17 model """hole""" +333 17 loss """marginranking""" +333 17 regularizer """no""" +333 17 optimizer """adam""" +333 17 training_loop """owa""" +333 17 negative_sampler """basic""" +333 17 evaluator """rankbased""" +333 18 dataset """wn18rr""" +333 18 model """hole""" +333 18 loss """marginranking""" +333 18 regularizer """no""" +333 18 optimizer """adam""" +333 18 training_loop """owa""" +333 18 negative_sampler """basic""" +333 18 evaluator """rankbased""" +333 19 dataset """wn18rr""" +333 19 model """hole""" +333 19 loss """marginranking""" +333 19 regularizer """no""" +333 19 optimizer """adam""" +333 19 training_loop """owa""" +333 19 negative_sampler """basic""" +333 19 evaluator """rankbased""" +333 20 dataset """wn18rr""" +333 20 model """hole""" +333 20 loss """marginranking""" +333 20 regularizer """no""" +333 20 optimizer """adam""" +333 20 training_loop """owa""" +333 20 negative_sampler """basic""" +333 20 evaluator """rankbased""" +333 21 dataset """wn18rr""" +333 21 model """hole""" +333 21 loss """marginranking""" +333 21 regularizer """no""" +333 21 optimizer """adam""" +333 21 training_loop """owa""" +333 21 negative_sampler """basic""" +333 21 evaluator """rankbased""" +333 22 dataset """wn18rr""" +333 22 model """hole""" +333 22 loss """marginranking""" +333 22 regularizer """no""" +333 22 optimizer """adam""" +333 22 training_loop """owa""" +333 22 negative_sampler """basic""" +333 22 evaluator """rankbased""" +333 23 dataset """wn18rr""" +333 23 model """hole""" +333 23 loss """marginranking""" +333 23 regularizer """no""" +333 23 optimizer """adam""" +333 23 training_loop """owa""" +333 23 negative_sampler """basic""" +333 23 evaluator """rankbased""" +333 24 dataset """wn18rr""" +333 24 model """hole""" +333 24 loss """marginranking""" +333 24 regularizer """no""" +333 24 optimizer """adam""" +333 24 training_loop """owa""" +333 24 negative_sampler """basic""" +333 24 evaluator """rankbased""" +333 25 dataset """wn18rr""" +333 25 model """hole""" +333 25 loss """marginranking""" +333 25 regularizer """no""" +333 25 optimizer """adam""" +333 25 training_loop """owa""" +333 25 negative_sampler """basic""" +333 25 evaluator """rankbased""" +333 26 dataset """wn18rr""" +333 26 model """hole""" +333 26 loss """marginranking""" +333 26 regularizer """no""" +333 26 optimizer """adam""" +333 26 training_loop """owa""" +333 26 negative_sampler """basic""" +333 26 evaluator """rankbased""" +333 27 dataset """wn18rr""" +333 27 model """hole""" +333 27 loss """marginranking""" +333 27 regularizer """no""" +333 27 optimizer """adam""" +333 27 training_loop """owa""" +333 27 negative_sampler """basic""" +333 27 evaluator """rankbased""" +333 28 dataset """wn18rr""" +333 28 model """hole""" +333 28 loss """marginranking""" +333 28 regularizer """no""" +333 28 optimizer """adam""" +333 28 training_loop """owa""" +333 28 negative_sampler """basic""" +333 28 evaluator """rankbased""" +333 29 dataset """wn18rr""" +333 29 model """hole""" +333 29 loss """marginranking""" +333 29 regularizer """no""" +333 29 optimizer """adam""" +333 29 training_loop """owa""" +333 29 negative_sampler """basic""" +333 29 evaluator """rankbased""" +333 30 dataset """wn18rr""" +333 30 model """hole""" +333 30 loss """marginranking""" +333 30 regularizer """no""" +333 30 optimizer """adam""" +333 30 training_loop """owa""" +333 30 negative_sampler """basic""" +333 30 evaluator """rankbased""" +333 31 dataset """wn18rr""" +333 31 model """hole""" +333 31 loss """marginranking""" +333 31 regularizer """no""" +333 31 optimizer """adam""" +333 31 training_loop """owa""" +333 31 negative_sampler """basic""" +333 31 evaluator """rankbased""" +333 32 dataset """wn18rr""" +333 32 model """hole""" +333 32 loss """marginranking""" +333 32 regularizer """no""" +333 32 optimizer """adam""" +333 32 training_loop """owa""" +333 32 negative_sampler """basic""" +333 32 evaluator """rankbased""" +333 33 dataset """wn18rr""" +333 33 model """hole""" +333 33 loss """marginranking""" +333 33 regularizer """no""" +333 33 optimizer """adam""" +333 33 training_loop """owa""" +333 33 negative_sampler """basic""" +333 33 evaluator """rankbased""" +333 34 dataset """wn18rr""" +333 34 model """hole""" +333 34 loss """marginranking""" +333 34 regularizer """no""" +333 34 optimizer """adam""" +333 34 training_loop """owa""" +333 34 negative_sampler """basic""" +333 34 evaluator """rankbased""" +333 35 dataset """wn18rr""" +333 35 model """hole""" +333 35 loss """marginranking""" +333 35 regularizer """no""" +333 35 optimizer """adam""" +333 35 training_loop """owa""" +333 35 negative_sampler """basic""" +333 35 evaluator """rankbased""" +333 36 dataset """wn18rr""" +333 36 model """hole""" +333 36 loss """marginranking""" +333 36 regularizer """no""" +333 36 optimizer """adam""" +333 36 training_loop """owa""" +333 36 negative_sampler """basic""" +333 36 evaluator """rankbased""" +333 37 dataset """wn18rr""" +333 37 model """hole""" +333 37 loss """marginranking""" +333 37 regularizer """no""" +333 37 optimizer """adam""" +333 37 training_loop """owa""" +333 37 negative_sampler """basic""" +333 37 evaluator """rankbased""" +333 38 dataset """wn18rr""" +333 38 model """hole""" +333 38 loss """marginranking""" +333 38 regularizer """no""" +333 38 optimizer """adam""" +333 38 training_loop """owa""" +333 38 negative_sampler """basic""" +333 38 evaluator """rankbased""" +333 39 dataset """wn18rr""" +333 39 model """hole""" +333 39 loss """marginranking""" +333 39 regularizer """no""" +333 39 optimizer """adam""" +333 39 training_loop """owa""" +333 39 negative_sampler """basic""" +333 39 evaluator """rankbased""" +333 40 dataset """wn18rr""" +333 40 model """hole""" +333 40 loss """marginranking""" +333 40 regularizer """no""" +333 40 optimizer """adam""" +333 40 training_loop """owa""" +333 40 negative_sampler """basic""" +333 40 evaluator """rankbased""" +333 41 dataset """wn18rr""" +333 41 model """hole""" +333 41 loss """marginranking""" +333 41 regularizer """no""" +333 41 optimizer """adam""" +333 41 training_loop """owa""" +333 41 negative_sampler """basic""" +333 41 evaluator """rankbased""" +333 42 dataset """wn18rr""" +333 42 model """hole""" +333 42 loss """marginranking""" +333 42 regularizer """no""" +333 42 optimizer """adam""" +333 42 training_loop """owa""" +333 42 negative_sampler """basic""" +333 42 evaluator """rankbased""" +333 43 dataset """wn18rr""" +333 43 model """hole""" +333 43 loss """marginranking""" +333 43 regularizer """no""" +333 43 optimizer """adam""" +333 43 training_loop """owa""" +333 43 negative_sampler """basic""" +333 43 evaluator """rankbased""" +333 44 dataset """wn18rr""" +333 44 model """hole""" +333 44 loss """marginranking""" +333 44 regularizer """no""" +333 44 optimizer """adam""" +333 44 training_loop """owa""" +333 44 negative_sampler """basic""" +333 44 evaluator """rankbased""" +333 45 dataset """wn18rr""" +333 45 model """hole""" +333 45 loss """marginranking""" +333 45 regularizer """no""" +333 45 optimizer """adam""" +333 45 training_loop """owa""" +333 45 negative_sampler """basic""" +333 45 evaluator """rankbased""" +333 46 dataset """wn18rr""" +333 46 model """hole""" +333 46 loss """marginranking""" +333 46 regularizer """no""" +333 46 optimizer """adam""" +333 46 training_loop """owa""" +333 46 negative_sampler """basic""" +333 46 evaluator """rankbased""" +333 47 dataset """wn18rr""" +333 47 model """hole""" +333 47 loss """marginranking""" +333 47 regularizer """no""" +333 47 optimizer """adam""" +333 47 training_loop """owa""" +333 47 negative_sampler """basic""" +333 47 evaluator """rankbased""" +333 48 dataset """wn18rr""" +333 48 model """hole""" +333 48 loss """marginranking""" +333 48 regularizer """no""" +333 48 optimizer """adam""" +333 48 training_loop """owa""" +333 48 negative_sampler """basic""" +333 48 evaluator """rankbased""" +333 49 dataset """wn18rr""" +333 49 model """hole""" +333 49 loss """marginranking""" +333 49 regularizer """no""" +333 49 optimizer """adam""" +333 49 training_loop """owa""" +333 49 negative_sampler """basic""" +333 49 evaluator """rankbased""" +333 50 dataset """wn18rr""" +333 50 model """hole""" +333 50 loss """marginranking""" +333 50 regularizer """no""" +333 50 optimizer """adam""" +333 50 training_loop """owa""" +333 50 negative_sampler """basic""" +333 50 evaluator """rankbased""" +333 51 dataset """wn18rr""" +333 51 model """hole""" +333 51 loss """marginranking""" +333 51 regularizer """no""" +333 51 optimizer """adam""" +333 51 training_loop """owa""" +333 51 negative_sampler """basic""" +333 51 evaluator """rankbased""" +333 52 dataset """wn18rr""" +333 52 model """hole""" +333 52 loss """marginranking""" +333 52 regularizer """no""" +333 52 optimizer """adam""" +333 52 training_loop """owa""" +333 52 negative_sampler """basic""" +333 52 evaluator """rankbased""" +333 53 dataset """wn18rr""" +333 53 model """hole""" +333 53 loss """marginranking""" +333 53 regularizer """no""" +333 53 optimizer """adam""" +333 53 training_loop """owa""" +333 53 negative_sampler """basic""" +333 53 evaluator """rankbased""" +333 54 dataset """wn18rr""" +333 54 model """hole""" +333 54 loss """marginranking""" +333 54 regularizer """no""" +333 54 optimizer """adam""" +333 54 training_loop """owa""" +333 54 negative_sampler """basic""" +333 54 evaluator """rankbased""" +333 55 dataset """wn18rr""" +333 55 model """hole""" +333 55 loss """marginranking""" +333 55 regularizer """no""" +333 55 optimizer """adam""" +333 55 training_loop """owa""" +333 55 negative_sampler """basic""" +333 55 evaluator """rankbased""" +333 56 dataset """wn18rr""" +333 56 model """hole""" +333 56 loss """marginranking""" +333 56 regularizer """no""" +333 56 optimizer """adam""" +333 56 training_loop """owa""" +333 56 negative_sampler """basic""" +333 56 evaluator """rankbased""" +333 57 dataset """wn18rr""" +333 57 model """hole""" +333 57 loss """marginranking""" +333 57 regularizer """no""" +333 57 optimizer """adam""" +333 57 training_loop """owa""" +333 57 negative_sampler """basic""" +333 57 evaluator """rankbased""" +333 58 dataset """wn18rr""" +333 58 model """hole""" +333 58 loss """marginranking""" +333 58 regularizer """no""" +333 58 optimizer """adam""" +333 58 training_loop """owa""" +333 58 negative_sampler """basic""" +333 58 evaluator """rankbased""" +333 59 dataset """wn18rr""" +333 59 model """hole""" +333 59 loss """marginranking""" +333 59 regularizer """no""" +333 59 optimizer """adam""" +333 59 training_loop """owa""" +333 59 negative_sampler """basic""" +333 59 evaluator """rankbased""" +333 60 dataset """wn18rr""" +333 60 model """hole""" +333 60 loss """marginranking""" +333 60 regularizer """no""" +333 60 optimizer """adam""" +333 60 training_loop """owa""" +333 60 negative_sampler """basic""" +333 60 evaluator """rankbased""" +334 1 model.embedding_dim 0.0 +334 1 loss.margin 6.216766533377296 +334 1 loss.adversarial_temperature 0.14180515504183597 +334 1 optimizer.lr 0.0035286427158614047 +334 1 negative_sampler.num_negs_per_pos 49.0 +334 1 training.batch_size 1.0 +334 2 model.embedding_dim 2.0 +334 2 loss.margin 27.951262015065033 +334 2 loss.adversarial_temperature 0.18837611861320838 +334 2 optimizer.lr 0.015957620337759004 +334 2 negative_sampler.num_negs_per_pos 54.0 +334 2 training.batch_size 0.0 +334 3 model.embedding_dim 2.0 +334 3 loss.margin 24.99998507335283 +334 3 loss.adversarial_temperature 0.2033576233520809 +334 3 optimizer.lr 0.011583658805570149 +334 3 negative_sampler.num_negs_per_pos 34.0 +334 3 training.batch_size 0.0 +334 4 model.embedding_dim 2.0 +334 4 loss.margin 22.00644967080616 +334 4 loss.adversarial_temperature 0.44288994383368213 +334 4 optimizer.lr 0.005618780810288169 +334 4 negative_sampler.num_negs_per_pos 86.0 +334 4 training.batch_size 1.0 +334 5 model.embedding_dim 1.0 +334 5 loss.margin 29.5510058483882 +334 5 loss.adversarial_temperature 0.20789220941282738 +334 5 optimizer.lr 0.08651542206570086 +334 5 negative_sampler.num_negs_per_pos 0.0 +334 5 training.batch_size 1.0 +334 6 model.embedding_dim 0.0 +334 6 loss.margin 3.3470235141142712 +334 6 loss.adversarial_temperature 0.7950998582162025 +334 6 optimizer.lr 0.016063998535566094 +334 6 negative_sampler.num_negs_per_pos 90.0 +334 6 training.batch_size 2.0 +334 7 model.embedding_dim 1.0 +334 7 loss.margin 29.108942198069347 +334 7 loss.adversarial_temperature 0.46945742924980033 +334 7 optimizer.lr 0.006058545965126544 +334 7 negative_sampler.num_negs_per_pos 59.0 +334 7 training.batch_size 0.0 +334 8 model.embedding_dim 0.0 +334 8 loss.margin 28.309690367324222 +334 8 loss.adversarial_temperature 0.7886193263837682 +334 8 optimizer.lr 0.045585275919391544 +334 8 negative_sampler.num_negs_per_pos 96.0 +334 8 training.batch_size 2.0 +334 9 model.embedding_dim 1.0 +334 9 loss.margin 25.954013016344575 +334 9 loss.adversarial_temperature 0.6714515697736999 +334 9 optimizer.lr 0.032099836784424345 +334 9 negative_sampler.num_negs_per_pos 6.0 +334 9 training.batch_size 0.0 +334 10 model.embedding_dim 2.0 +334 10 loss.margin 2.143266707596873 +334 10 loss.adversarial_temperature 0.851861129918517 +334 10 optimizer.lr 0.005610822998015278 +334 10 negative_sampler.num_negs_per_pos 61.0 +334 10 training.batch_size 2.0 +334 11 model.embedding_dim 0.0 +334 11 loss.margin 27.50142737533441 +334 11 loss.adversarial_temperature 0.6767181220857038 +334 11 optimizer.lr 0.011646188941552798 +334 11 negative_sampler.num_negs_per_pos 41.0 +334 11 training.batch_size 1.0 +334 12 model.embedding_dim 1.0 +334 12 loss.margin 1.9744418375562418 +334 12 loss.adversarial_temperature 0.8355050520067181 +334 12 optimizer.lr 0.022859471922059227 +334 12 negative_sampler.num_negs_per_pos 14.0 +334 12 training.batch_size 1.0 +334 13 model.embedding_dim 1.0 +334 13 loss.margin 10.77166342998043 +334 13 loss.adversarial_temperature 0.9006383614171151 +334 13 optimizer.lr 0.00886590178319497 +334 13 negative_sampler.num_negs_per_pos 37.0 +334 13 training.batch_size 2.0 +334 14 model.embedding_dim 0.0 +334 14 loss.margin 2.534001914300859 +334 14 loss.adversarial_temperature 0.9232555313255717 +334 14 optimizer.lr 0.0024226931124754145 +334 14 negative_sampler.num_negs_per_pos 14.0 +334 14 training.batch_size 1.0 +334 15 model.embedding_dim 2.0 +334 15 loss.margin 14.760150943242888 +334 15 loss.adversarial_temperature 0.6096195850326208 +334 15 optimizer.lr 0.03394907587566075 +334 15 negative_sampler.num_negs_per_pos 86.0 +334 15 training.batch_size 2.0 +334 16 model.embedding_dim 1.0 +334 16 loss.margin 1.3548021066891154 +334 16 loss.adversarial_temperature 0.5861006078074573 +334 16 optimizer.lr 0.08543746284028274 +334 16 negative_sampler.num_negs_per_pos 4.0 +334 16 training.batch_size 2.0 +334 17 model.embedding_dim 0.0 +334 17 loss.margin 23.5014787491673 +334 17 loss.adversarial_temperature 0.26932886411111356 +334 17 optimizer.lr 0.04001687668701412 +334 17 negative_sampler.num_negs_per_pos 7.0 +334 17 training.batch_size 2.0 +334 18 model.embedding_dim 1.0 +334 18 loss.margin 15.447078973142284 +334 18 loss.adversarial_temperature 0.3711246890998823 +334 18 optimizer.lr 0.011965762937341435 +334 18 negative_sampler.num_negs_per_pos 61.0 +334 18 training.batch_size 1.0 +334 19 model.embedding_dim 2.0 +334 19 loss.margin 8.686602121036726 +334 19 loss.adversarial_temperature 0.7417096893080528 +334 19 optimizer.lr 0.015480738260136682 +334 19 negative_sampler.num_negs_per_pos 35.0 +334 19 training.batch_size 1.0 +334 20 model.embedding_dim 0.0 +334 20 loss.margin 26.59706589903385 +334 20 loss.adversarial_temperature 0.4092438776271678 +334 20 optimizer.lr 0.01356608324858632 +334 20 negative_sampler.num_negs_per_pos 10.0 +334 20 training.batch_size 1.0 +334 21 model.embedding_dim 2.0 +334 21 loss.margin 23.33384669361798 +334 21 loss.adversarial_temperature 0.3611878541878623 +334 21 optimizer.lr 0.0032508400090971844 +334 21 negative_sampler.num_negs_per_pos 6.0 +334 21 training.batch_size 0.0 +334 22 model.embedding_dim 1.0 +334 22 loss.margin 2.4831141469878837 +334 22 loss.adversarial_temperature 0.2822515354183864 +334 22 optimizer.lr 0.001835353614690669 +334 22 negative_sampler.num_negs_per_pos 68.0 +334 22 training.batch_size 0.0 +334 23 model.embedding_dim 1.0 +334 23 loss.margin 13.631864565360551 +334 23 loss.adversarial_temperature 0.6897815715730231 +334 23 optimizer.lr 0.00883877545642517 +334 23 negative_sampler.num_negs_per_pos 9.0 +334 23 training.batch_size 2.0 +334 24 model.embedding_dim 2.0 +334 24 loss.margin 23.78260272271143 +334 24 loss.adversarial_temperature 0.21203881142349507 +334 24 optimizer.lr 0.022454251145714924 +334 24 negative_sampler.num_negs_per_pos 24.0 +334 24 training.batch_size 0.0 +334 25 model.embedding_dim 2.0 +334 25 loss.margin 13.558041162284947 +334 25 loss.adversarial_temperature 0.2938975691309 +334 25 optimizer.lr 0.010741409723867727 +334 25 negative_sampler.num_negs_per_pos 28.0 +334 25 training.batch_size 0.0 +334 26 model.embedding_dim 2.0 +334 26 loss.margin 20.94029976619162 +334 26 loss.adversarial_temperature 0.12960037119511253 +334 26 optimizer.lr 0.06514879062934452 +334 26 negative_sampler.num_negs_per_pos 24.0 +334 26 training.batch_size 0.0 +334 27 model.embedding_dim 1.0 +334 27 loss.margin 5.758600310897892 +334 27 loss.adversarial_temperature 0.361443556006462 +334 27 optimizer.lr 0.0038002511680843495 +334 27 negative_sampler.num_negs_per_pos 22.0 +334 27 training.batch_size 1.0 +334 28 model.embedding_dim 1.0 +334 28 loss.margin 13.55011726347054 +334 28 loss.adversarial_temperature 0.32261231291277304 +334 28 optimizer.lr 0.028321763777988198 +334 28 negative_sampler.num_negs_per_pos 20.0 +334 28 training.batch_size 1.0 +334 29 model.embedding_dim 0.0 +334 29 loss.margin 23.287494942873398 +334 29 loss.adversarial_temperature 0.6546930030232851 +334 29 optimizer.lr 0.00661280246268333 +334 29 negative_sampler.num_negs_per_pos 69.0 +334 29 training.batch_size 1.0 +334 30 model.embedding_dim 0.0 +334 30 loss.margin 8.072959227429022 +334 30 loss.adversarial_temperature 0.2450639948351222 +334 30 optimizer.lr 0.0014112931321597917 +334 30 negative_sampler.num_negs_per_pos 98.0 +334 30 training.batch_size 0.0 +334 31 model.embedding_dim 1.0 +334 31 loss.margin 27.295343276208513 +334 31 loss.adversarial_temperature 0.5177888783193869 +334 31 optimizer.lr 0.007195922943733208 +334 31 negative_sampler.num_negs_per_pos 5.0 +334 31 training.batch_size 1.0 +334 32 model.embedding_dim 0.0 +334 32 loss.margin 9.689624234052616 +334 32 loss.adversarial_temperature 0.5260014586081955 +334 32 optimizer.lr 0.0021308146321584853 +334 32 negative_sampler.num_negs_per_pos 98.0 +334 32 training.batch_size 1.0 +334 33 model.embedding_dim 2.0 +334 33 loss.margin 13.975854790184256 +334 33 loss.adversarial_temperature 0.15963523010887784 +334 33 optimizer.lr 0.017982362330502065 +334 33 negative_sampler.num_negs_per_pos 7.0 +334 33 training.batch_size 2.0 +334 34 model.embedding_dim 0.0 +334 34 loss.margin 23.239916660923733 +334 34 loss.adversarial_temperature 0.990977301968069 +334 34 optimizer.lr 0.06818548626686598 +334 34 negative_sampler.num_negs_per_pos 86.0 +334 34 training.batch_size 0.0 +334 35 model.embedding_dim 2.0 +334 35 loss.margin 18.244072462597725 +334 35 loss.adversarial_temperature 0.5901529026619753 +334 35 optimizer.lr 0.005354512810519652 +334 35 negative_sampler.num_negs_per_pos 64.0 +334 35 training.batch_size 2.0 +334 36 model.embedding_dim 0.0 +334 36 loss.margin 25.95107544655203 +334 36 loss.adversarial_temperature 0.7455229298761273 +334 36 optimizer.lr 0.0012470358455289761 +334 36 negative_sampler.num_negs_per_pos 72.0 +334 36 training.batch_size 0.0 +334 1 dataset """wn18rr""" +334 1 model """hole""" +334 1 loss """nssa""" +334 1 regularizer """no""" +334 1 optimizer """adam""" +334 1 training_loop """owa""" +334 1 negative_sampler """basic""" +334 1 evaluator """rankbased""" +334 2 dataset """wn18rr""" +334 2 model """hole""" +334 2 loss """nssa""" +334 2 regularizer """no""" +334 2 optimizer """adam""" +334 2 training_loop """owa""" +334 2 negative_sampler """basic""" +334 2 evaluator """rankbased""" +334 3 dataset """wn18rr""" +334 3 model """hole""" +334 3 loss """nssa""" +334 3 regularizer """no""" +334 3 optimizer """adam""" +334 3 training_loop """owa""" +334 3 negative_sampler """basic""" +334 3 evaluator """rankbased""" +334 4 dataset """wn18rr""" +334 4 model """hole""" +334 4 loss """nssa""" +334 4 regularizer """no""" +334 4 optimizer """adam""" +334 4 training_loop """owa""" +334 4 negative_sampler """basic""" +334 4 evaluator """rankbased""" +334 5 dataset """wn18rr""" +334 5 model """hole""" +334 5 loss """nssa""" +334 5 regularizer """no""" +334 5 optimizer """adam""" +334 5 training_loop """owa""" +334 5 negative_sampler """basic""" +334 5 evaluator """rankbased""" +334 6 dataset """wn18rr""" +334 6 model """hole""" +334 6 loss """nssa""" +334 6 regularizer """no""" +334 6 optimizer """adam""" +334 6 training_loop """owa""" +334 6 negative_sampler """basic""" +334 6 evaluator """rankbased""" +334 7 dataset """wn18rr""" +334 7 model """hole""" +334 7 loss """nssa""" +334 7 regularizer """no""" +334 7 optimizer """adam""" +334 7 training_loop """owa""" +334 7 negative_sampler """basic""" +334 7 evaluator """rankbased""" +334 8 dataset """wn18rr""" +334 8 model """hole""" +334 8 loss """nssa""" +334 8 regularizer """no""" +334 8 optimizer """adam""" +334 8 training_loop """owa""" +334 8 negative_sampler """basic""" +334 8 evaluator """rankbased""" +334 9 dataset """wn18rr""" +334 9 model """hole""" +334 9 loss """nssa""" +334 9 regularizer """no""" +334 9 optimizer """adam""" +334 9 training_loop """owa""" +334 9 negative_sampler """basic""" +334 9 evaluator """rankbased""" +334 10 dataset """wn18rr""" +334 10 model """hole""" +334 10 loss """nssa""" +334 10 regularizer """no""" +334 10 optimizer """adam""" +334 10 training_loop """owa""" +334 10 negative_sampler """basic""" +334 10 evaluator """rankbased""" +334 11 dataset """wn18rr""" +334 11 model """hole""" +334 11 loss """nssa""" +334 11 regularizer """no""" +334 11 optimizer """adam""" +334 11 training_loop """owa""" +334 11 negative_sampler """basic""" +334 11 evaluator """rankbased""" +334 12 dataset """wn18rr""" +334 12 model """hole""" +334 12 loss """nssa""" +334 12 regularizer """no""" +334 12 optimizer """adam""" +334 12 training_loop """owa""" +334 12 negative_sampler """basic""" +334 12 evaluator """rankbased""" +334 13 dataset """wn18rr""" +334 13 model """hole""" +334 13 loss """nssa""" +334 13 regularizer """no""" +334 13 optimizer """adam""" +334 13 training_loop """owa""" +334 13 negative_sampler """basic""" +334 13 evaluator """rankbased""" +334 14 dataset """wn18rr""" +334 14 model """hole""" +334 14 loss """nssa""" +334 14 regularizer """no""" +334 14 optimizer """adam""" +334 14 training_loop """owa""" +334 14 negative_sampler """basic""" +334 14 evaluator """rankbased""" +334 15 dataset """wn18rr""" +334 15 model """hole""" +334 15 loss """nssa""" +334 15 regularizer """no""" +334 15 optimizer """adam""" +334 15 training_loop """owa""" +334 15 negative_sampler """basic""" +334 15 evaluator """rankbased""" +334 16 dataset """wn18rr""" +334 16 model """hole""" +334 16 loss """nssa""" +334 16 regularizer """no""" +334 16 optimizer """adam""" +334 16 training_loop """owa""" +334 16 negative_sampler """basic""" +334 16 evaluator """rankbased""" +334 17 dataset """wn18rr""" +334 17 model """hole""" +334 17 loss """nssa""" +334 17 regularizer """no""" +334 17 optimizer """adam""" +334 17 training_loop """owa""" +334 17 negative_sampler """basic""" +334 17 evaluator """rankbased""" +334 18 dataset """wn18rr""" +334 18 model """hole""" +334 18 loss """nssa""" +334 18 regularizer """no""" +334 18 optimizer """adam""" +334 18 training_loop """owa""" +334 18 negative_sampler """basic""" +334 18 evaluator """rankbased""" +334 19 dataset """wn18rr""" +334 19 model """hole""" +334 19 loss """nssa""" +334 19 regularizer """no""" +334 19 optimizer """adam""" +334 19 training_loop """owa""" +334 19 negative_sampler """basic""" +334 19 evaluator """rankbased""" +334 20 dataset """wn18rr""" +334 20 model """hole""" +334 20 loss """nssa""" +334 20 regularizer """no""" +334 20 optimizer """adam""" +334 20 training_loop """owa""" +334 20 negative_sampler """basic""" +334 20 evaluator """rankbased""" +334 21 dataset """wn18rr""" +334 21 model """hole""" +334 21 loss """nssa""" +334 21 regularizer """no""" +334 21 optimizer """adam""" +334 21 training_loop """owa""" +334 21 negative_sampler """basic""" +334 21 evaluator """rankbased""" +334 22 dataset """wn18rr""" +334 22 model """hole""" +334 22 loss """nssa""" +334 22 regularizer """no""" +334 22 optimizer """adam""" +334 22 training_loop """owa""" +334 22 negative_sampler """basic""" +334 22 evaluator """rankbased""" +334 23 dataset """wn18rr""" +334 23 model """hole""" +334 23 loss """nssa""" +334 23 regularizer """no""" +334 23 optimizer """adam""" +334 23 training_loop """owa""" +334 23 negative_sampler """basic""" +334 23 evaluator """rankbased""" +334 24 dataset """wn18rr""" +334 24 model """hole""" +334 24 loss """nssa""" +334 24 regularizer """no""" +334 24 optimizer """adam""" +334 24 training_loop """owa""" +334 24 negative_sampler """basic""" +334 24 evaluator """rankbased""" +334 25 dataset """wn18rr""" +334 25 model """hole""" +334 25 loss """nssa""" +334 25 regularizer """no""" +334 25 optimizer """adam""" +334 25 training_loop """owa""" +334 25 negative_sampler """basic""" +334 25 evaluator """rankbased""" +334 26 dataset """wn18rr""" +334 26 model """hole""" +334 26 loss """nssa""" +334 26 regularizer """no""" +334 26 optimizer """adam""" +334 26 training_loop """owa""" +334 26 negative_sampler """basic""" +334 26 evaluator """rankbased""" +334 27 dataset """wn18rr""" +334 27 model """hole""" +334 27 loss """nssa""" +334 27 regularizer """no""" +334 27 optimizer """adam""" +334 27 training_loop """owa""" +334 27 negative_sampler """basic""" +334 27 evaluator """rankbased""" +334 28 dataset """wn18rr""" +334 28 model """hole""" +334 28 loss """nssa""" +334 28 regularizer """no""" +334 28 optimizer """adam""" +334 28 training_loop """owa""" +334 28 negative_sampler """basic""" +334 28 evaluator """rankbased""" +334 29 dataset """wn18rr""" +334 29 model """hole""" +334 29 loss """nssa""" +334 29 regularizer """no""" +334 29 optimizer """adam""" +334 29 training_loop """owa""" +334 29 negative_sampler """basic""" +334 29 evaluator """rankbased""" +334 30 dataset """wn18rr""" +334 30 model """hole""" +334 30 loss """nssa""" +334 30 regularizer """no""" +334 30 optimizer """adam""" +334 30 training_loop """owa""" +334 30 negative_sampler """basic""" +334 30 evaluator """rankbased""" +334 31 dataset """wn18rr""" +334 31 model """hole""" +334 31 loss """nssa""" +334 31 regularizer """no""" +334 31 optimizer """adam""" +334 31 training_loop """owa""" +334 31 negative_sampler """basic""" +334 31 evaluator """rankbased""" +334 32 dataset """wn18rr""" +334 32 model """hole""" +334 32 loss """nssa""" +334 32 regularizer """no""" +334 32 optimizer """adam""" +334 32 training_loop """owa""" +334 32 negative_sampler """basic""" +334 32 evaluator """rankbased""" +334 33 dataset """wn18rr""" +334 33 model """hole""" +334 33 loss """nssa""" +334 33 regularizer """no""" +334 33 optimizer """adam""" +334 33 training_loop """owa""" +334 33 negative_sampler """basic""" +334 33 evaluator """rankbased""" +334 34 dataset """wn18rr""" +334 34 model """hole""" +334 34 loss """nssa""" +334 34 regularizer """no""" +334 34 optimizer """adam""" +334 34 training_loop """owa""" +334 34 negative_sampler """basic""" +334 34 evaluator """rankbased""" +334 35 dataset """wn18rr""" +334 35 model """hole""" +334 35 loss """nssa""" +334 35 regularizer """no""" +334 35 optimizer """adam""" +334 35 training_loop """owa""" +334 35 negative_sampler """basic""" +334 35 evaluator """rankbased""" +334 36 dataset """wn18rr""" +334 36 model """hole""" +334 36 loss """nssa""" +334 36 regularizer """no""" +334 36 optimizer """adam""" +334 36 training_loop """owa""" +334 36 negative_sampler """basic""" +334 36 evaluator """rankbased""" +335 1 model.embedding_dim 2.0 +335 1 loss.margin 6.602217279313757 +335 1 loss.adversarial_temperature 0.6746499108405809 +335 1 optimizer.lr 0.007769150550758568 +335 1 negative_sampler.num_negs_per_pos 62.0 +335 1 training.batch_size 1.0 +335 2 model.embedding_dim 2.0 +335 2 loss.margin 11.766376571001238 +335 2 loss.adversarial_temperature 0.9624663540848192 +335 2 optimizer.lr 0.002600340991466881 +335 2 negative_sampler.num_negs_per_pos 41.0 +335 2 training.batch_size 2.0 +335 3 model.embedding_dim 1.0 +335 3 loss.margin 24.35308798509084 +335 3 loss.adversarial_temperature 0.46668038345375246 +335 3 optimizer.lr 0.016722219740011895 +335 3 negative_sampler.num_negs_per_pos 64.0 +335 3 training.batch_size 2.0 +335 4 model.embedding_dim 2.0 +335 4 loss.margin 12.791579281578443 +335 4 loss.adversarial_temperature 0.781395985234022 +335 4 optimizer.lr 0.02266373602780999 +335 4 negative_sampler.num_negs_per_pos 38.0 +335 4 training.batch_size 0.0 +335 5 model.embedding_dim 1.0 +335 5 loss.margin 28.498660979405344 +335 5 loss.adversarial_temperature 0.7313053712023787 +335 5 optimizer.lr 0.0018006020231192755 +335 5 negative_sampler.num_negs_per_pos 90.0 +335 5 training.batch_size 2.0 +335 6 model.embedding_dim 0.0 +335 6 loss.margin 26.63591709718686 +335 6 loss.adversarial_temperature 0.6818156917376231 +335 6 optimizer.lr 0.02605157752739349 +335 6 negative_sampler.num_negs_per_pos 53.0 +335 6 training.batch_size 1.0 +335 7 model.embedding_dim 1.0 +335 7 loss.margin 7.946060366412018 +335 7 loss.adversarial_temperature 0.22443814013916247 +335 7 optimizer.lr 0.05652986553411066 +335 7 negative_sampler.num_negs_per_pos 81.0 +335 7 training.batch_size 1.0 +335 8 model.embedding_dim 2.0 +335 8 loss.margin 10.825635946422489 +335 8 loss.adversarial_temperature 0.8361996437125923 +335 8 optimizer.lr 0.0014994615829164333 +335 8 negative_sampler.num_negs_per_pos 24.0 +335 8 training.batch_size 1.0 +335 9 model.embedding_dim 0.0 +335 9 loss.margin 1.4628394507615254 +335 9 loss.adversarial_temperature 0.6075459243526266 +335 9 optimizer.lr 0.020946489817727808 +335 9 negative_sampler.num_negs_per_pos 11.0 +335 9 training.batch_size 1.0 +335 10 model.embedding_dim 0.0 +335 10 loss.margin 18.43746532232103 +335 10 loss.adversarial_temperature 0.812870703828113 +335 10 optimizer.lr 0.002531380278831066 +335 10 negative_sampler.num_negs_per_pos 56.0 +335 10 training.batch_size 2.0 +335 11 model.embedding_dim 2.0 +335 11 loss.margin 10.339053246375077 +335 11 loss.adversarial_temperature 0.8159223685071185 +335 11 optimizer.lr 0.05659496640231772 +335 11 negative_sampler.num_negs_per_pos 67.0 +335 11 training.batch_size 0.0 +335 12 model.embedding_dim 2.0 +335 12 loss.margin 11.98904183388206 +335 12 loss.adversarial_temperature 0.25639235373465374 +335 12 optimizer.lr 0.012735267795036692 +335 12 negative_sampler.num_negs_per_pos 17.0 +335 12 training.batch_size 2.0 +335 13 model.embedding_dim 2.0 +335 13 loss.margin 4.350817576665023 +335 13 loss.adversarial_temperature 0.957702005798269 +335 13 optimizer.lr 0.028642227324761366 +335 13 negative_sampler.num_negs_per_pos 40.0 +335 13 training.batch_size 1.0 +335 14 model.embedding_dim 2.0 +335 14 loss.margin 16.063976705242247 +335 14 loss.adversarial_temperature 0.9506960487691143 +335 14 optimizer.lr 0.01965655394974371 +335 14 negative_sampler.num_negs_per_pos 41.0 +335 14 training.batch_size 2.0 +335 15 model.embedding_dim 0.0 +335 15 loss.margin 23.529151557887417 +335 15 loss.adversarial_temperature 0.505382864466664 +335 15 optimizer.lr 0.023677814475977783 +335 15 negative_sampler.num_negs_per_pos 75.0 +335 15 training.batch_size 0.0 +335 16 model.embedding_dim 0.0 +335 16 loss.margin 13.299907555133629 +335 16 loss.adversarial_temperature 0.6364528180428127 +335 16 optimizer.lr 0.05150994970993871 +335 16 negative_sampler.num_negs_per_pos 94.0 +335 16 training.batch_size 0.0 +335 17 model.embedding_dim 2.0 +335 17 loss.margin 19.282768494202394 +335 17 loss.adversarial_temperature 0.7191631056946546 +335 17 optimizer.lr 0.002789687671064677 +335 17 negative_sampler.num_negs_per_pos 21.0 +335 17 training.batch_size 0.0 +335 18 model.embedding_dim 2.0 +335 18 loss.margin 13.144801547160602 +335 18 loss.adversarial_temperature 0.12803518446166456 +335 18 optimizer.lr 0.001685821403906106 +335 18 negative_sampler.num_negs_per_pos 12.0 +335 18 training.batch_size 2.0 +335 19 model.embedding_dim 1.0 +335 19 loss.margin 13.418208232358271 +335 19 loss.adversarial_temperature 0.637076011941917 +335 19 optimizer.lr 0.0017216911776838893 +335 19 negative_sampler.num_negs_per_pos 73.0 +335 19 training.batch_size 2.0 +335 20 model.embedding_dim 0.0 +335 20 loss.margin 27.967509590177457 +335 20 loss.adversarial_temperature 0.1636006617547239 +335 20 optimizer.lr 0.0022712478075858685 +335 20 negative_sampler.num_negs_per_pos 46.0 +335 20 training.batch_size 0.0 +335 21 model.embedding_dim 0.0 +335 21 loss.margin 13.968307267107553 +335 21 loss.adversarial_temperature 0.34447667825953643 +335 21 optimizer.lr 0.02120641277274313 +335 21 negative_sampler.num_negs_per_pos 22.0 +335 21 training.batch_size 1.0 +335 22 model.embedding_dim 1.0 +335 22 loss.margin 6.434757182190523 +335 22 loss.adversarial_temperature 0.39239911524439364 +335 22 optimizer.lr 0.00533445682956854 +335 22 negative_sampler.num_negs_per_pos 30.0 +335 22 training.batch_size 0.0 +335 23 model.embedding_dim 0.0 +335 23 loss.margin 27.300073728713407 +335 23 loss.adversarial_temperature 0.9864650121413112 +335 23 optimizer.lr 0.022414363672890753 +335 23 negative_sampler.num_negs_per_pos 89.0 +335 23 training.batch_size 0.0 +335 24 model.embedding_dim 0.0 +335 24 loss.margin 20.54194838930851 +335 24 loss.adversarial_temperature 0.24184757643320978 +335 24 optimizer.lr 0.01746028618557593 +335 24 negative_sampler.num_negs_per_pos 47.0 +335 24 training.batch_size 2.0 +335 25 model.embedding_dim 0.0 +335 25 loss.margin 15.77773249313101 +335 25 loss.adversarial_temperature 0.5938799691362215 +335 25 optimizer.lr 0.0029885295237361445 +335 25 negative_sampler.num_negs_per_pos 18.0 +335 25 training.batch_size 2.0 +335 26 model.embedding_dim 1.0 +335 26 loss.margin 21.641925431992192 +335 26 loss.adversarial_temperature 0.24768393728590357 +335 26 optimizer.lr 0.003571555099435942 +335 26 negative_sampler.num_negs_per_pos 16.0 +335 26 training.batch_size 1.0 +335 27 model.embedding_dim 0.0 +335 27 loss.margin 21.821143096049987 +335 27 loss.adversarial_temperature 0.3320195063956585 +335 27 optimizer.lr 0.046596453801781575 +335 27 negative_sampler.num_negs_per_pos 69.0 +335 27 training.batch_size 2.0 +335 28 model.embedding_dim 0.0 +335 28 loss.margin 24.735568589510883 +335 28 loss.adversarial_temperature 0.8859812211022107 +335 28 optimizer.lr 0.003002544224507847 +335 28 negative_sampler.num_negs_per_pos 56.0 +335 28 training.batch_size 0.0 +335 29 model.embedding_dim 1.0 +335 29 loss.margin 23.678700898695382 +335 29 loss.adversarial_temperature 0.712052951959271 +335 29 optimizer.lr 0.005329873091736712 +335 29 negative_sampler.num_negs_per_pos 22.0 +335 29 training.batch_size 0.0 +335 30 model.embedding_dim 0.0 +335 30 loss.margin 3.991026843062394 +335 30 loss.adversarial_temperature 0.9406973125536515 +335 30 optimizer.lr 0.0030716997895343825 +335 30 negative_sampler.num_negs_per_pos 78.0 +335 30 training.batch_size 2.0 +335 31 model.embedding_dim 1.0 +335 31 loss.margin 10.822438671720874 +335 31 loss.adversarial_temperature 0.3612382185771331 +335 31 optimizer.lr 0.031691886729901544 +335 31 negative_sampler.num_negs_per_pos 24.0 +335 31 training.batch_size 2.0 +335 32 model.embedding_dim 2.0 +335 32 loss.margin 19.425484873033955 +335 32 loss.adversarial_temperature 0.45634098774931076 +335 32 optimizer.lr 0.005461830945685079 +335 32 negative_sampler.num_negs_per_pos 1.0 +335 32 training.batch_size 1.0 +335 33 model.embedding_dim 1.0 +335 33 loss.margin 21.5224340892395 +335 33 loss.adversarial_temperature 0.33123670508843933 +335 33 optimizer.lr 0.08796620398248751 +335 33 negative_sampler.num_negs_per_pos 84.0 +335 33 training.batch_size 2.0 +335 34 model.embedding_dim 0.0 +335 34 loss.margin 4.581426328465341 +335 34 loss.adversarial_temperature 0.9040924336302434 +335 34 optimizer.lr 0.002268164772245073 +335 34 negative_sampler.num_negs_per_pos 84.0 +335 34 training.batch_size 1.0 +335 35 model.embedding_dim 1.0 +335 35 loss.margin 9.762924013004994 +335 35 loss.adversarial_temperature 0.6261713590164769 +335 35 optimizer.lr 0.002844464590462018 +335 35 negative_sampler.num_negs_per_pos 84.0 +335 35 training.batch_size 0.0 +335 36 model.embedding_dim 0.0 +335 36 loss.margin 9.24062038535569 +335 36 loss.adversarial_temperature 0.6437242224883909 +335 36 optimizer.lr 0.02252036743345903 +335 36 negative_sampler.num_negs_per_pos 27.0 +335 36 training.batch_size 1.0 +335 37 model.embedding_dim 0.0 +335 37 loss.margin 8.30138008588385 +335 37 loss.adversarial_temperature 0.3365588378410605 +335 37 optimizer.lr 0.014934488400525709 +335 37 negative_sampler.num_negs_per_pos 12.0 +335 37 training.batch_size 2.0 +335 38 model.embedding_dim 0.0 +335 38 loss.margin 6.676468662584159 +335 38 loss.adversarial_temperature 0.4010431055322702 +335 38 optimizer.lr 0.001889930245079335 +335 38 negative_sampler.num_negs_per_pos 67.0 +335 38 training.batch_size 1.0 +335 39 model.embedding_dim 0.0 +335 39 loss.margin 19.775537353061132 +335 39 loss.adversarial_temperature 0.3157655403622158 +335 39 optimizer.lr 0.022014084337334434 +335 39 negative_sampler.num_negs_per_pos 97.0 +335 39 training.batch_size 2.0 +335 40 model.embedding_dim 1.0 +335 40 loss.margin 2.286415831183992 +335 40 loss.adversarial_temperature 0.45092005879614894 +335 40 optimizer.lr 0.014748081553405413 +335 40 negative_sampler.num_negs_per_pos 43.0 +335 40 training.batch_size 1.0 +335 41 model.embedding_dim 0.0 +335 41 loss.margin 16.488250103207164 +335 41 loss.adversarial_temperature 0.7007960630785137 +335 41 optimizer.lr 0.09720657368185964 +335 41 negative_sampler.num_negs_per_pos 77.0 +335 41 training.batch_size 2.0 +335 42 model.embedding_dim 2.0 +335 42 loss.margin 25.98598292891716 +335 42 loss.adversarial_temperature 0.11131358273781698 +335 42 optimizer.lr 0.004081919203183124 +335 42 negative_sampler.num_negs_per_pos 84.0 +335 42 training.batch_size 2.0 +335 43 model.embedding_dim 1.0 +335 43 loss.margin 23.637127140887173 +335 43 loss.adversarial_temperature 0.8719836074124907 +335 43 optimizer.lr 0.02665481895699945 +335 43 negative_sampler.num_negs_per_pos 70.0 +335 43 training.batch_size 1.0 +335 44 model.embedding_dim 0.0 +335 44 loss.margin 3.6184340280894953 +335 44 loss.adversarial_temperature 0.8231954785533397 +335 44 optimizer.lr 0.043951240021735886 +335 44 negative_sampler.num_negs_per_pos 9.0 +335 44 training.batch_size 1.0 +335 45 model.embedding_dim 1.0 +335 45 loss.margin 16.187158362573744 +335 45 loss.adversarial_temperature 0.8898154420761898 +335 45 optimizer.lr 0.005351695910513112 +335 45 negative_sampler.num_negs_per_pos 83.0 +335 45 training.batch_size 2.0 +335 46 model.embedding_dim 2.0 +335 46 loss.margin 28.241232652668252 +335 46 loss.adversarial_temperature 0.7272409713949315 +335 46 optimizer.lr 0.004625117079887488 +335 46 negative_sampler.num_negs_per_pos 57.0 +335 46 training.batch_size 0.0 +335 47 model.embedding_dim 2.0 +335 47 loss.margin 24.352799559726005 +335 47 loss.adversarial_temperature 0.49711790278708157 +335 47 optimizer.lr 0.024610607955483893 +335 47 negative_sampler.num_negs_per_pos 95.0 +335 47 training.batch_size 1.0 +335 48 model.embedding_dim 1.0 +335 48 loss.margin 20.81894553876189 +335 48 loss.adversarial_temperature 0.12371158840262819 +335 48 optimizer.lr 0.002167361432342505 +335 48 negative_sampler.num_negs_per_pos 52.0 +335 48 training.batch_size 1.0 +335 49 model.embedding_dim 2.0 +335 49 loss.margin 5.359360898696713 +335 49 loss.adversarial_temperature 0.47181219859266327 +335 49 optimizer.lr 0.007715102174228906 +335 49 negative_sampler.num_negs_per_pos 32.0 +335 49 training.batch_size 2.0 +335 50 model.embedding_dim 1.0 +335 50 loss.margin 14.805697092967991 +335 50 loss.adversarial_temperature 0.4306643685889118 +335 50 optimizer.lr 0.001796995340967607 +335 50 negative_sampler.num_negs_per_pos 17.0 +335 50 training.batch_size 2.0 +335 51 model.embedding_dim 1.0 +335 51 loss.margin 28.841501529568895 +335 51 loss.adversarial_temperature 0.9285747645047912 +335 51 optimizer.lr 0.049174509917095176 +335 51 negative_sampler.num_negs_per_pos 85.0 +335 51 training.batch_size 1.0 +335 52 model.embedding_dim 0.0 +335 52 loss.margin 27.93781122188904 +335 52 loss.adversarial_temperature 0.6538617589402703 +335 52 optimizer.lr 0.001448076004501078 +335 52 negative_sampler.num_negs_per_pos 3.0 +335 52 training.batch_size 2.0 +335 53 model.embedding_dim 2.0 +335 53 loss.margin 22.412019958036417 +335 53 loss.adversarial_temperature 0.7210550722062674 +335 53 optimizer.lr 0.01731836937668281 +335 53 negative_sampler.num_negs_per_pos 58.0 +335 53 training.batch_size 2.0 +335 1 dataset """wn18rr""" +335 1 model """hole""" +335 1 loss """nssa""" +335 1 regularizer """no""" +335 1 optimizer """adam""" +335 1 training_loop """owa""" +335 1 negative_sampler """basic""" +335 1 evaluator """rankbased""" +335 2 dataset """wn18rr""" +335 2 model """hole""" +335 2 loss """nssa""" +335 2 regularizer """no""" +335 2 optimizer """adam""" +335 2 training_loop """owa""" +335 2 negative_sampler """basic""" +335 2 evaluator """rankbased""" +335 3 dataset """wn18rr""" +335 3 model """hole""" +335 3 loss """nssa""" +335 3 regularizer """no""" +335 3 optimizer """adam""" +335 3 training_loop """owa""" +335 3 negative_sampler """basic""" +335 3 evaluator """rankbased""" +335 4 dataset """wn18rr""" +335 4 model """hole""" +335 4 loss """nssa""" +335 4 regularizer """no""" +335 4 optimizer """adam""" +335 4 training_loop """owa""" +335 4 negative_sampler """basic""" +335 4 evaluator """rankbased""" +335 5 dataset """wn18rr""" +335 5 model """hole""" +335 5 loss """nssa""" +335 5 regularizer """no""" +335 5 optimizer """adam""" +335 5 training_loop """owa""" +335 5 negative_sampler """basic""" +335 5 evaluator """rankbased""" +335 6 dataset """wn18rr""" +335 6 model """hole""" +335 6 loss """nssa""" +335 6 regularizer """no""" +335 6 optimizer """adam""" +335 6 training_loop """owa""" +335 6 negative_sampler """basic""" +335 6 evaluator """rankbased""" +335 7 dataset """wn18rr""" +335 7 model """hole""" +335 7 loss """nssa""" +335 7 regularizer """no""" +335 7 optimizer """adam""" +335 7 training_loop """owa""" +335 7 negative_sampler """basic""" +335 7 evaluator """rankbased""" +335 8 dataset """wn18rr""" +335 8 model """hole""" +335 8 loss """nssa""" +335 8 regularizer """no""" +335 8 optimizer """adam""" +335 8 training_loop """owa""" +335 8 negative_sampler """basic""" +335 8 evaluator """rankbased""" +335 9 dataset """wn18rr""" +335 9 model """hole""" +335 9 loss """nssa""" +335 9 regularizer """no""" +335 9 optimizer """adam""" +335 9 training_loop """owa""" +335 9 negative_sampler """basic""" +335 9 evaluator """rankbased""" +335 10 dataset """wn18rr""" +335 10 model """hole""" +335 10 loss """nssa""" +335 10 regularizer """no""" +335 10 optimizer """adam""" +335 10 training_loop """owa""" +335 10 negative_sampler """basic""" +335 10 evaluator """rankbased""" +335 11 dataset """wn18rr""" +335 11 model """hole""" +335 11 loss """nssa""" +335 11 regularizer """no""" +335 11 optimizer """adam""" +335 11 training_loop """owa""" +335 11 negative_sampler """basic""" +335 11 evaluator """rankbased""" +335 12 dataset """wn18rr""" +335 12 model """hole""" +335 12 loss """nssa""" +335 12 regularizer """no""" +335 12 optimizer """adam""" +335 12 training_loop """owa""" +335 12 negative_sampler """basic""" +335 12 evaluator """rankbased""" +335 13 dataset """wn18rr""" +335 13 model """hole""" +335 13 loss """nssa""" +335 13 regularizer """no""" +335 13 optimizer """adam""" +335 13 training_loop """owa""" +335 13 negative_sampler """basic""" +335 13 evaluator """rankbased""" +335 14 dataset """wn18rr""" +335 14 model """hole""" +335 14 loss """nssa""" +335 14 regularizer """no""" +335 14 optimizer """adam""" +335 14 training_loop """owa""" +335 14 negative_sampler """basic""" +335 14 evaluator """rankbased""" +335 15 dataset """wn18rr""" +335 15 model """hole""" +335 15 loss """nssa""" +335 15 regularizer """no""" +335 15 optimizer """adam""" +335 15 training_loop """owa""" +335 15 negative_sampler """basic""" +335 15 evaluator """rankbased""" +335 16 dataset """wn18rr""" +335 16 model """hole""" +335 16 loss """nssa""" +335 16 regularizer """no""" +335 16 optimizer """adam""" +335 16 training_loop """owa""" +335 16 negative_sampler """basic""" +335 16 evaluator """rankbased""" +335 17 dataset """wn18rr""" +335 17 model """hole""" +335 17 loss """nssa""" +335 17 regularizer """no""" +335 17 optimizer """adam""" +335 17 training_loop """owa""" +335 17 negative_sampler """basic""" +335 17 evaluator """rankbased""" +335 18 dataset """wn18rr""" +335 18 model """hole""" +335 18 loss """nssa""" +335 18 regularizer """no""" +335 18 optimizer """adam""" +335 18 training_loop """owa""" +335 18 negative_sampler """basic""" +335 18 evaluator """rankbased""" +335 19 dataset """wn18rr""" +335 19 model """hole""" +335 19 loss """nssa""" +335 19 regularizer """no""" +335 19 optimizer """adam""" +335 19 training_loop """owa""" +335 19 negative_sampler """basic""" +335 19 evaluator """rankbased""" +335 20 dataset """wn18rr""" +335 20 model """hole""" +335 20 loss """nssa""" +335 20 regularizer """no""" +335 20 optimizer """adam""" +335 20 training_loop """owa""" +335 20 negative_sampler """basic""" +335 20 evaluator """rankbased""" +335 21 dataset """wn18rr""" +335 21 model """hole""" +335 21 loss """nssa""" +335 21 regularizer """no""" +335 21 optimizer """adam""" +335 21 training_loop """owa""" +335 21 negative_sampler """basic""" +335 21 evaluator """rankbased""" +335 22 dataset """wn18rr""" +335 22 model """hole""" +335 22 loss """nssa""" +335 22 regularizer """no""" +335 22 optimizer """adam""" +335 22 training_loop """owa""" +335 22 negative_sampler """basic""" +335 22 evaluator """rankbased""" +335 23 dataset """wn18rr""" +335 23 model """hole""" +335 23 loss """nssa""" +335 23 regularizer """no""" +335 23 optimizer """adam""" +335 23 training_loop """owa""" +335 23 negative_sampler """basic""" +335 23 evaluator """rankbased""" +335 24 dataset """wn18rr""" +335 24 model """hole""" +335 24 loss """nssa""" +335 24 regularizer """no""" +335 24 optimizer """adam""" +335 24 training_loop """owa""" +335 24 negative_sampler """basic""" +335 24 evaluator """rankbased""" +335 25 dataset """wn18rr""" +335 25 model """hole""" +335 25 loss """nssa""" +335 25 regularizer """no""" +335 25 optimizer """adam""" +335 25 training_loop """owa""" +335 25 negative_sampler """basic""" +335 25 evaluator """rankbased""" +335 26 dataset """wn18rr""" +335 26 model """hole""" +335 26 loss """nssa""" +335 26 regularizer """no""" +335 26 optimizer """adam""" +335 26 training_loop """owa""" +335 26 negative_sampler """basic""" +335 26 evaluator """rankbased""" +335 27 dataset """wn18rr""" +335 27 model """hole""" +335 27 loss """nssa""" +335 27 regularizer """no""" +335 27 optimizer """adam""" +335 27 training_loop """owa""" +335 27 negative_sampler """basic""" +335 27 evaluator """rankbased""" +335 28 dataset """wn18rr""" +335 28 model """hole""" +335 28 loss """nssa""" +335 28 regularizer """no""" +335 28 optimizer """adam""" +335 28 training_loop """owa""" +335 28 negative_sampler """basic""" +335 28 evaluator """rankbased""" +335 29 dataset """wn18rr""" +335 29 model """hole""" +335 29 loss """nssa""" +335 29 regularizer """no""" +335 29 optimizer """adam""" +335 29 training_loop """owa""" +335 29 negative_sampler """basic""" +335 29 evaluator """rankbased""" +335 30 dataset """wn18rr""" +335 30 model """hole""" +335 30 loss """nssa""" +335 30 regularizer """no""" +335 30 optimizer """adam""" +335 30 training_loop """owa""" +335 30 negative_sampler """basic""" +335 30 evaluator """rankbased""" +335 31 dataset """wn18rr""" +335 31 model """hole""" +335 31 loss """nssa""" +335 31 regularizer """no""" +335 31 optimizer """adam""" +335 31 training_loop """owa""" +335 31 negative_sampler """basic""" +335 31 evaluator """rankbased""" +335 32 dataset """wn18rr""" +335 32 model """hole""" +335 32 loss """nssa""" +335 32 regularizer """no""" +335 32 optimizer """adam""" +335 32 training_loop """owa""" +335 32 negative_sampler """basic""" +335 32 evaluator """rankbased""" +335 33 dataset """wn18rr""" +335 33 model """hole""" +335 33 loss """nssa""" +335 33 regularizer """no""" +335 33 optimizer """adam""" +335 33 training_loop """owa""" +335 33 negative_sampler """basic""" +335 33 evaluator """rankbased""" +335 34 dataset """wn18rr""" +335 34 model """hole""" +335 34 loss """nssa""" +335 34 regularizer """no""" +335 34 optimizer """adam""" +335 34 training_loop """owa""" +335 34 negative_sampler """basic""" +335 34 evaluator """rankbased""" +335 35 dataset """wn18rr""" +335 35 model """hole""" +335 35 loss """nssa""" +335 35 regularizer """no""" +335 35 optimizer """adam""" +335 35 training_loop """owa""" +335 35 negative_sampler """basic""" +335 35 evaluator """rankbased""" +335 36 dataset """wn18rr""" +335 36 model """hole""" +335 36 loss """nssa""" +335 36 regularizer """no""" +335 36 optimizer """adam""" +335 36 training_loop """owa""" +335 36 negative_sampler """basic""" +335 36 evaluator """rankbased""" +335 37 dataset """wn18rr""" +335 37 model """hole""" +335 37 loss """nssa""" +335 37 regularizer """no""" +335 37 optimizer """adam""" +335 37 training_loop """owa""" +335 37 negative_sampler """basic""" +335 37 evaluator """rankbased""" +335 38 dataset """wn18rr""" +335 38 model """hole""" +335 38 loss """nssa""" +335 38 regularizer """no""" +335 38 optimizer """adam""" +335 38 training_loop """owa""" +335 38 negative_sampler """basic""" +335 38 evaluator """rankbased""" +335 39 dataset """wn18rr""" +335 39 model """hole""" +335 39 loss """nssa""" +335 39 regularizer """no""" +335 39 optimizer """adam""" +335 39 training_loop """owa""" +335 39 negative_sampler """basic""" +335 39 evaluator """rankbased""" +335 40 dataset """wn18rr""" +335 40 model """hole""" +335 40 loss """nssa""" +335 40 regularizer """no""" +335 40 optimizer """adam""" +335 40 training_loop """owa""" +335 40 negative_sampler """basic""" +335 40 evaluator """rankbased""" +335 41 dataset """wn18rr""" +335 41 model """hole""" +335 41 loss """nssa""" +335 41 regularizer """no""" +335 41 optimizer """adam""" +335 41 training_loop """owa""" +335 41 negative_sampler """basic""" +335 41 evaluator """rankbased""" +335 42 dataset """wn18rr""" +335 42 model """hole""" +335 42 loss """nssa""" +335 42 regularizer """no""" +335 42 optimizer """adam""" +335 42 training_loop """owa""" +335 42 negative_sampler """basic""" +335 42 evaluator """rankbased""" +335 43 dataset """wn18rr""" +335 43 model """hole""" +335 43 loss """nssa""" +335 43 regularizer """no""" +335 43 optimizer """adam""" +335 43 training_loop """owa""" +335 43 negative_sampler """basic""" +335 43 evaluator """rankbased""" +335 44 dataset """wn18rr""" +335 44 model """hole""" +335 44 loss """nssa""" +335 44 regularizer """no""" +335 44 optimizer """adam""" +335 44 training_loop """owa""" +335 44 negative_sampler """basic""" +335 44 evaluator """rankbased""" +335 45 dataset """wn18rr""" +335 45 model """hole""" +335 45 loss """nssa""" +335 45 regularizer """no""" +335 45 optimizer """adam""" +335 45 training_loop """owa""" +335 45 negative_sampler """basic""" +335 45 evaluator """rankbased""" +335 46 dataset """wn18rr""" +335 46 model """hole""" +335 46 loss """nssa""" +335 46 regularizer """no""" +335 46 optimizer """adam""" +335 46 training_loop """owa""" +335 46 negative_sampler """basic""" +335 46 evaluator """rankbased""" +335 47 dataset """wn18rr""" +335 47 model """hole""" +335 47 loss """nssa""" +335 47 regularizer """no""" +335 47 optimizer """adam""" +335 47 training_loop """owa""" +335 47 negative_sampler """basic""" +335 47 evaluator """rankbased""" +335 48 dataset """wn18rr""" +335 48 model """hole""" +335 48 loss """nssa""" +335 48 regularizer """no""" +335 48 optimizer """adam""" +335 48 training_loop """owa""" +335 48 negative_sampler """basic""" +335 48 evaluator """rankbased""" +335 49 dataset """wn18rr""" +335 49 model """hole""" +335 49 loss """nssa""" +335 49 regularizer """no""" +335 49 optimizer """adam""" +335 49 training_loop """owa""" +335 49 negative_sampler """basic""" +335 49 evaluator """rankbased""" +335 50 dataset """wn18rr""" +335 50 model """hole""" +335 50 loss """nssa""" +335 50 regularizer """no""" +335 50 optimizer """adam""" +335 50 training_loop """owa""" +335 50 negative_sampler """basic""" +335 50 evaluator """rankbased""" +335 51 dataset """wn18rr""" +335 51 model """hole""" +335 51 loss """nssa""" +335 51 regularizer """no""" +335 51 optimizer """adam""" +335 51 training_loop """owa""" +335 51 negative_sampler """basic""" +335 51 evaluator """rankbased""" +335 52 dataset """wn18rr""" +335 52 model """hole""" +335 52 loss """nssa""" +335 52 regularizer """no""" +335 52 optimizer """adam""" +335 52 training_loop """owa""" +335 52 negative_sampler """basic""" +335 52 evaluator """rankbased""" +335 53 dataset """wn18rr""" +335 53 model """hole""" +335 53 loss """nssa""" +335 53 regularizer """no""" +335 53 optimizer """adam""" +335 53 training_loop """owa""" +335 53 negative_sampler """basic""" +335 53 evaluator """rankbased""" +336 1 model.embedding_dim 2.0 +336 1 optimizer.lr 0.04088023752634611 +336 1 training.batch_size 1.0 +336 1 training.label_smoothing 0.005635418249314759 +336 2 model.embedding_dim 2.0 +336 2 optimizer.lr 0.001274344401586451 +336 2 training.batch_size 2.0 +336 2 training.label_smoothing 0.0012713011623118832 +336 1 dataset """wn18rr""" +336 1 model """hole""" +336 1 loss """bceaftersigmoid""" +336 1 regularizer """no""" +336 1 optimizer """adam""" +336 1 training_loop """lcwa""" +336 1 evaluator """rankbased""" +336 2 dataset """wn18rr""" +336 2 model """hole""" +336 2 loss """bceaftersigmoid""" +336 2 regularizer """no""" +336 2 optimizer """adam""" +336 2 training_loop """lcwa""" +336 2 evaluator """rankbased""" +337 1 model.embedding_dim 0.0 +337 1 optimizer.lr 0.09649864045738102 +337 1 training.batch_size 2.0 +337 1 training.label_smoothing 0.12049605098068446 +337 2 model.embedding_dim 0.0 +337 2 optimizer.lr 0.012746886373613635 +337 2 training.batch_size 1.0 +337 2 training.label_smoothing 0.13810707960999727 +337 3 model.embedding_dim 0.0 +337 3 optimizer.lr 0.03962921664833801 +337 3 training.batch_size 1.0 +337 3 training.label_smoothing 0.5571641978178214 +337 4 model.embedding_dim 1.0 +337 4 optimizer.lr 0.0011257073922642344 +337 4 training.batch_size 1.0 +337 4 training.label_smoothing 0.16116014866833595 +337 5 model.embedding_dim 2.0 +337 5 optimizer.lr 0.08005231817669688 +337 5 training.batch_size 0.0 +337 5 training.label_smoothing 0.08532695170107303 +337 6 model.embedding_dim 2.0 +337 6 optimizer.lr 0.011816815217381524 +337 6 training.batch_size 0.0 +337 6 training.label_smoothing 0.19480589702436443 +337 7 model.embedding_dim 1.0 +337 7 optimizer.lr 0.08177987412520787 +337 7 training.batch_size 1.0 +337 7 training.label_smoothing 0.05964722485995399 +337 8 model.embedding_dim 1.0 +337 8 optimizer.lr 0.004254245774435597 +337 8 training.batch_size 2.0 +337 8 training.label_smoothing 0.06720933769302247 +337 9 model.embedding_dim 0.0 +337 9 optimizer.lr 0.01236764463371673 +337 9 training.batch_size 0.0 +337 9 training.label_smoothing 0.015273227658135786 +337 10 model.embedding_dim 1.0 +337 10 optimizer.lr 0.0017748029942143996 +337 10 training.batch_size 2.0 +337 10 training.label_smoothing 0.2688721250522606 +337 11 model.embedding_dim 0.0 +337 11 optimizer.lr 0.01548360702602415 +337 11 training.batch_size 0.0 +337 11 training.label_smoothing 0.08759833496954281 +337 1 dataset """wn18rr""" +337 1 model """hole""" +337 1 loss """bceaftersigmoid""" +337 1 regularizer """no""" +337 1 optimizer """adam""" +337 1 training_loop """lcwa""" +337 1 evaluator """rankbased""" +337 2 dataset """wn18rr""" +337 2 model """hole""" +337 2 loss """bceaftersigmoid""" +337 2 regularizer """no""" +337 2 optimizer """adam""" +337 2 training_loop """lcwa""" +337 2 evaluator """rankbased""" +337 3 dataset """wn18rr""" +337 3 model """hole""" +337 3 loss """bceaftersigmoid""" +337 3 regularizer """no""" +337 3 optimizer """adam""" +337 3 training_loop """lcwa""" +337 3 evaluator """rankbased""" +337 4 dataset """wn18rr""" +337 4 model """hole""" +337 4 loss """bceaftersigmoid""" +337 4 regularizer """no""" +337 4 optimizer """adam""" +337 4 training_loop """lcwa""" +337 4 evaluator """rankbased""" +337 5 dataset """wn18rr""" +337 5 model """hole""" +337 5 loss """bceaftersigmoid""" +337 5 regularizer """no""" +337 5 optimizer """adam""" +337 5 training_loop """lcwa""" +337 5 evaluator """rankbased""" +337 6 dataset """wn18rr""" +337 6 model """hole""" +337 6 loss """bceaftersigmoid""" +337 6 regularizer """no""" +337 6 optimizer """adam""" +337 6 training_loop """lcwa""" +337 6 evaluator """rankbased""" +337 7 dataset """wn18rr""" +337 7 model """hole""" +337 7 loss """bceaftersigmoid""" +337 7 regularizer """no""" +337 7 optimizer """adam""" +337 7 training_loop """lcwa""" +337 7 evaluator """rankbased""" +337 8 dataset """wn18rr""" +337 8 model """hole""" +337 8 loss """bceaftersigmoid""" +337 8 regularizer """no""" +337 8 optimizer """adam""" +337 8 training_loop """lcwa""" +337 8 evaluator """rankbased""" +337 9 dataset """wn18rr""" +337 9 model """hole""" +337 9 loss """bceaftersigmoid""" +337 9 regularizer """no""" +337 9 optimizer """adam""" +337 9 training_loop """lcwa""" +337 9 evaluator """rankbased""" +337 10 dataset """wn18rr""" +337 10 model """hole""" +337 10 loss """bceaftersigmoid""" +337 10 regularizer """no""" +337 10 optimizer """adam""" +337 10 training_loop """lcwa""" +337 10 evaluator """rankbased""" +337 11 dataset """wn18rr""" +337 11 model """hole""" +337 11 loss """bceaftersigmoid""" +337 11 regularizer """no""" +337 11 optimizer """adam""" +337 11 training_loop """lcwa""" +337 11 evaluator """rankbased""" +338 1 model.embedding_dim 0.0 +338 1 optimizer.lr 0.009274871065636956 +338 1 training.batch_size 2.0 +338 1 training.label_smoothing 0.024055976966306326 +338 2 model.embedding_dim 0.0 +338 2 optimizer.lr 0.05588186070962774 +338 2 training.batch_size 1.0 +338 2 training.label_smoothing 0.08619865663531756 +338 3 model.embedding_dim 1.0 +338 3 optimizer.lr 0.013130176448692925 +338 3 training.batch_size 0.0 +338 3 training.label_smoothing 0.003489757939130746 +338 4 model.embedding_dim 1.0 +338 4 optimizer.lr 0.015566034131974962 +338 4 training.batch_size 1.0 +338 4 training.label_smoothing 0.46622184708190906 +338 5 model.embedding_dim 2.0 +338 5 optimizer.lr 0.006854801191396065 +338 5 training.batch_size 1.0 +338 5 training.label_smoothing 0.12084125716401757 +338 1 dataset """wn18rr""" +338 1 model """hole""" +338 1 loss """softplus""" +338 1 regularizer """no""" +338 1 optimizer """adam""" +338 1 training_loop """lcwa""" +338 1 evaluator """rankbased""" +338 2 dataset """wn18rr""" +338 2 model """hole""" +338 2 loss """softplus""" +338 2 regularizer """no""" +338 2 optimizer """adam""" +338 2 training_loop """lcwa""" +338 2 evaluator """rankbased""" +338 3 dataset """wn18rr""" +338 3 model """hole""" +338 3 loss """softplus""" +338 3 regularizer """no""" +338 3 optimizer """adam""" +338 3 training_loop """lcwa""" +338 3 evaluator """rankbased""" +338 4 dataset """wn18rr""" +338 4 model """hole""" +338 4 loss """softplus""" +338 4 regularizer """no""" +338 4 optimizer """adam""" +338 4 training_loop """lcwa""" +338 4 evaluator """rankbased""" +338 5 dataset """wn18rr""" +338 5 model """hole""" +338 5 loss """softplus""" +338 5 regularizer """no""" +338 5 optimizer """adam""" +338 5 training_loop """lcwa""" +338 5 evaluator """rankbased""" +339 1 model.embedding_dim 1.0 +339 1 optimizer.lr 0.007254966782397902 +339 1 training.batch_size 2.0 +339 1 training.label_smoothing 0.04858832227355605 +339 2 model.embedding_dim 0.0 +339 2 optimizer.lr 0.045526481698206736 +339 2 training.batch_size 1.0 +339 2 training.label_smoothing 0.49821326509541647 +339 3 model.embedding_dim 1.0 +339 3 optimizer.lr 0.0033554534761470528 +339 3 training.batch_size 1.0 +339 3 training.label_smoothing 0.745761706385047 +339 4 model.embedding_dim 0.0 +339 4 optimizer.lr 0.002159447005331957 +339 4 training.batch_size 1.0 +339 4 training.label_smoothing 0.10556570592913543 +339 5 model.embedding_dim 2.0 +339 5 optimizer.lr 0.03811988460788739 +339 5 training.batch_size 2.0 +339 5 training.label_smoothing 0.0019688799412368872 +339 6 model.embedding_dim 1.0 +339 6 optimizer.lr 0.020573095775888468 +339 6 training.batch_size 0.0 +339 6 training.label_smoothing 0.7565142004585812 +339 7 model.embedding_dim 1.0 +339 7 optimizer.lr 0.07047087977124193 +339 7 training.batch_size 2.0 +339 7 training.label_smoothing 0.003506443891481544 +339 8 model.embedding_dim 0.0 +339 8 optimizer.lr 0.05910846717636209 +339 8 training.batch_size 1.0 +339 8 training.label_smoothing 0.003589853175286986 +339 1 dataset """wn18rr""" +339 1 model """hole""" +339 1 loss """softplus""" +339 1 regularizer """no""" +339 1 optimizer """adam""" +339 1 training_loop """lcwa""" +339 1 evaluator """rankbased""" +339 2 dataset """wn18rr""" +339 2 model """hole""" +339 2 loss """softplus""" +339 2 regularizer """no""" +339 2 optimizer """adam""" +339 2 training_loop """lcwa""" +339 2 evaluator """rankbased""" +339 3 dataset """wn18rr""" +339 3 model """hole""" +339 3 loss """softplus""" +339 3 regularizer """no""" +339 3 optimizer """adam""" +339 3 training_loop """lcwa""" +339 3 evaluator """rankbased""" +339 4 dataset """wn18rr""" +339 4 model """hole""" +339 4 loss """softplus""" +339 4 regularizer """no""" +339 4 optimizer """adam""" +339 4 training_loop """lcwa""" +339 4 evaluator """rankbased""" +339 5 dataset """wn18rr""" +339 5 model """hole""" +339 5 loss """softplus""" +339 5 regularizer """no""" +339 5 optimizer """adam""" +339 5 training_loop """lcwa""" +339 5 evaluator """rankbased""" +339 6 dataset """wn18rr""" +339 6 model """hole""" +339 6 loss """softplus""" +339 6 regularizer """no""" +339 6 optimizer """adam""" +339 6 training_loop """lcwa""" +339 6 evaluator """rankbased""" +339 7 dataset """wn18rr""" +339 7 model """hole""" +339 7 loss """softplus""" +339 7 regularizer """no""" +339 7 optimizer """adam""" +339 7 training_loop """lcwa""" +339 7 evaluator """rankbased""" +339 8 dataset """wn18rr""" +339 8 model """hole""" +339 8 loss """softplus""" +339 8 regularizer """no""" +339 8 optimizer """adam""" +339 8 training_loop """lcwa""" +339 8 evaluator """rankbased""" +340 1 model.embedding_dim 1.0 +340 1 loss.margin 16.431839719594095 +340 1 loss.adversarial_temperature 0.16650550393678615 +340 1 optimizer.lr 0.005818100346722075 +340 1 negative_sampler.num_negs_per_pos 46.0 +340 1 training.batch_size 0.0 +340 2 model.embedding_dim 2.0 +340 2 loss.margin 13.37444828386858 +340 2 loss.adversarial_temperature 0.6115846252531847 +340 2 optimizer.lr 0.01781379202260077 +340 2 negative_sampler.num_negs_per_pos 35.0 +340 2 training.batch_size 3.0 +340 3 model.embedding_dim 2.0 +340 3 loss.margin 22.57994459106711 +340 3 loss.adversarial_temperature 0.43328958620158853 +340 3 optimizer.lr 0.0016809585187514493 +340 3 negative_sampler.num_negs_per_pos 23.0 +340 3 training.batch_size 1.0 +340 4 model.embedding_dim 2.0 +340 4 loss.margin 26.201858780707205 +340 4 loss.adversarial_temperature 0.394647642212971 +340 4 optimizer.lr 0.023973017897912396 +340 4 negative_sampler.num_negs_per_pos 29.0 +340 4 training.batch_size 3.0 +340 5 model.embedding_dim 0.0 +340 5 loss.margin 23.45912800884442 +340 5 loss.adversarial_temperature 0.12398770023017712 +340 5 optimizer.lr 0.010253066089779326 +340 5 negative_sampler.num_negs_per_pos 14.0 +340 5 training.batch_size 2.0 +340 6 model.embedding_dim 1.0 +340 6 loss.margin 14.617405646831857 +340 6 loss.adversarial_temperature 0.7761607487573438 +340 6 optimizer.lr 0.022108845812231034 +340 6 negative_sampler.num_negs_per_pos 7.0 +340 6 training.batch_size 1.0 +340 7 model.embedding_dim 2.0 +340 7 loss.margin 12.450569408107908 +340 7 loss.adversarial_temperature 0.7258299846749464 +340 7 optimizer.lr 0.0019208651993082673 +340 7 negative_sampler.num_negs_per_pos 23.0 +340 7 training.batch_size 0.0 +340 8 model.embedding_dim 0.0 +340 8 loss.margin 21.797145975836393 +340 8 loss.adversarial_temperature 0.9158898165507637 +340 8 optimizer.lr 0.0011412298038206985 +340 8 negative_sampler.num_negs_per_pos 14.0 +340 8 training.batch_size 3.0 +340 9 model.embedding_dim 1.0 +340 9 loss.margin 19.779586304360432 +340 9 loss.adversarial_temperature 0.9245273652112944 +340 9 optimizer.lr 0.002357568771143915 +340 9 negative_sampler.num_negs_per_pos 12.0 +340 9 training.batch_size 0.0 +340 10 model.embedding_dim 1.0 +340 10 loss.margin 4.582762895928143 +340 10 loss.adversarial_temperature 0.24913426438364766 +340 10 optimizer.lr 0.0020533512351580737 +340 10 negative_sampler.num_negs_per_pos 14.0 +340 10 training.batch_size 2.0 +340 11 model.embedding_dim 0.0 +340 11 loss.margin 21.510263148243535 +340 11 loss.adversarial_temperature 0.6917356026424492 +340 11 optimizer.lr 0.004929694428647926 +340 11 negative_sampler.num_negs_per_pos 49.0 +340 11 training.batch_size 0.0 +340 12 model.embedding_dim 0.0 +340 12 loss.margin 6.148428523173585 +340 12 loss.adversarial_temperature 0.3705364808319165 +340 12 optimizer.lr 0.0021345507586791905 +340 12 negative_sampler.num_negs_per_pos 48.0 +340 12 training.batch_size 0.0 +340 13 model.embedding_dim 0.0 +340 13 loss.margin 7.944130075907955 +340 13 loss.adversarial_temperature 0.12776711092669468 +340 13 optimizer.lr 0.003946717187045782 +340 13 negative_sampler.num_negs_per_pos 31.0 +340 13 training.batch_size 3.0 +340 1 dataset """yago310""" +340 1 model """hole""" +340 1 loss """nssa""" +340 1 regularizer """no""" +340 1 optimizer """adam""" +340 1 training_loop """owa""" +340 1 negative_sampler """basic""" +340 1 evaluator """rankbased""" +340 2 dataset """yago310""" +340 2 model """hole""" +340 2 loss """nssa""" +340 2 regularizer """no""" +340 2 optimizer """adam""" +340 2 training_loop """owa""" +340 2 negative_sampler """basic""" +340 2 evaluator """rankbased""" +340 3 dataset """yago310""" +340 3 model """hole""" +340 3 loss """nssa""" +340 3 regularizer """no""" +340 3 optimizer """adam""" +340 3 training_loop """owa""" +340 3 negative_sampler """basic""" +340 3 evaluator """rankbased""" +340 4 dataset """yago310""" +340 4 model """hole""" +340 4 loss """nssa""" +340 4 regularizer """no""" +340 4 optimizer """adam""" +340 4 training_loop """owa""" +340 4 negative_sampler """basic""" +340 4 evaluator """rankbased""" +340 5 dataset """yago310""" +340 5 model """hole""" +340 5 loss """nssa""" +340 5 regularizer """no""" +340 5 optimizer """adam""" +340 5 training_loop """owa""" +340 5 negative_sampler """basic""" +340 5 evaluator """rankbased""" +340 6 dataset """yago310""" +340 6 model """hole""" +340 6 loss """nssa""" +340 6 regularizer """no""" +340 6 optimizer """adam""" +340 6 training_loop """owa""" +340 6 negative_sampler """basic""" +340 6 evaluator """rankbased""" +340 7 dataset """yago310""" +340 7 model """hole""" +340 7 loss """nssa""" +340 7 regularizer """no""" +340 7 optimizer """adam""" +340 7 training_loop """owa""" +340 7 negative_sampler """basic""" +340 7 evaluator """rankbased""" +340 8 dataset """yago310""" +340 8 model """hole""" +340 8 loss """nssa""" +340 8 regularizer """no""" +340 8 optimizer """adam""" +340 8 training_loop """owa""" +340 8 negative_sampler """basic""" +340 8 evaluator """rankbased""" +340 9 dataset """yago310""" +340 9 model """hole""" +340 9 loss """nssa""" +340 9 regularizer """no""" +340 9 optimizer """adam""" +340 9 training_loop """owa""" +340 9 negative_sampler """basic""" +340 9 evaluator """rankbased""" +340 10 dataset """yago310""" +340 10 model """hole""" +340 10 loss """nssa""" +340 10 regularizer """no""" +340 10 optimizer """adam""" +340 10 training_loop """owa""" +340 10 negative_sampler """basic""" +340 10 evaluator """rankbased""" +340 11 dataset """yago310""" +340 11 model """hole""" +340 11 loss """nssa""" +340 11 regularizer """no""" +340 11 optimizer """adam""" +340 11 training_loop """owa""" +340 11 negative_sampler """basic""" +340 11 evaluator """rankbased""" +340 12 dataset """yago310""" +340 12 model """hole""" +340 12 loss """nssa""" +340 12 regularizer """no""" +340 12 optimizer """adam""" +340 12 training_loop """owa""" +340 12 negative_sampler """basic""" +340 12 evaluator """rankbased""" +340 13 dataset """yago310""" +340 13 model """hole""" +340 13 loss """nssa""" +340 13 regularizer """no""" +340 13 optimizer """adam""" +340 13 training_loop """owa""" +340 13 negative_sampler """basic""" +340 13 evaluator """rankbased""" +341 1 model.embedding_dim 1.0 +341 1 optimizer.lr 0.0018775539188749788 +341 1 negative_sampler.num_negs_per_pos 4.0 +341 1 training.batch_size 2.0 +341 2 model.embedding_dim 2.0 +341 2 optimizer.lr 0.011531892210665985 +341 2 negative_sampler.num_negs_per_pos 0.0 +341 2 training.batch_size 0.0 +341 3 model.embedding_dim 0.0 +341 3 optimizer.lr 0.0037383419758425076 +341 3 negative_sampler.num_negs_per_pos 48.0 +341 3 training.batch_size 1.0 +341 4 model.embedding_dim 0.0 +341 4 optimizer.lr 0.006717105745870171 +341 4 negative_sampler.num_negs_per_pos 4.0 +341 4 training.batch_size 1.0 +341 5 model.embedding_dim 2.0 +341 5 optimizer.lr 0.01193928395382625 +341 5 negative_sampler.num_negs_per_pos 17.0 +341 5 training.batch_size 0.0 +341 6 model.embedding_dim 0.0 +341 6 optimizer.lr 0.011593219662942123 +341 6 negative_sampler.num_negs_per_pos 10.0 +341 6 training.batch_size 0.0 +341 7 model.embedding_dim 2.0 +341 7 optimizer.lr 0.011823894518799718 +341 7 negative_sampler.num_negs_per_pos 32.0 +341 7 training.batch_size 1.0 +341 8 model.embedding_dim 2.0 +341 8 optimizer.lr 0.007061990350059508 +341 8 negative_sampler.num_negs_per_pos 49.0 +341 8 training.batch_size 1.0 +341 9 model.embedding_dim 0.0 +341 9 optimizer.lr 0.0027559278097648585 +341 9 negative_sampler.num_negs_per_pos 40.0 +341 9 training.batch_size 3.0 +341 10 model.embedding_dim 1.0 +341 10 optimizer.lr 0.013137917272721428 +341 10 negative_sampler.num_negs_per_pos 3.0 +341 10 training.batch_size 2.0 +341 11 model.embedding_dim 2.0 +341 11 optimizer.lr 0.002476830159492117 +341 11 negative_sampler.num_negs_per_pos 29.0 +341 11 training.batch_size 3.0 +341 1 dataset """yago310""" +341 1 model """hole""" +341 1 loss """bceaftersigmoid""" +341 1 regularizer """no""" +341 1 optimizer """adam""" +341 1 training_loop """owa""" +341 1 negative_sampler """basic""" +341 1 evaluator """rankbased""" +341 2 dataset """yago310""" +341 2 model """hole""" +341 2 loss """bceaftersigmoid""" +341 2 regularizer """no""" +341 2 optimizer """adam""" +341 2 training_loop """owa""" +341 2 negative_sampler """basic""" +341 2 evaluator """rankbased""" +341 3 dataset """yago310""" +341 3 model """hole""" +341 3 loss """bceaftersigmoid""" +341 3 regularizer """no""" +341 3 optimizer """adam""" +341 3 training_loop """owa""" +341 3 negative_sampler """basic""" +341 3 evaluator """rankbased""" +341 4 dataset """yago310""" +341 4 model """hole""" +341 4 loss """bceaftersigmoid""" +341 4 regularizer """no""" +341 4 optimizer """adam""" +341 4 training_loop """owa""" +341 4 negative_sampler """basic""" +341 4 evaluator """rankbased""" +341 5 dataset """yago310""" +341 5 model """hole""" +341 5 loss """bceaftersigmoid""" +341 5 regularizer """no""" +341 5 optimizer """adam""" +341 5 training_loop """owa""" +341 5 negative_sampler """basic""" +341 5 evaluator """rankbased""" +341 6 dataset """yago310""" +341 6 model """hole""" +341 6 loss """bceaftersigmoid""" +341 6 regularizer """no""" +341 6 optimizer """adam""" +341 6 training_loop """owa""" +341 6 negative_sampler """basic""" +341 6 evaluator """rankbased""" +341 7 dataset """yago310""" +341 7 model """hole""" +341 7 loss """bceaftersigmoid""" +341 7 regularizer """no""" +341 7 optimizer """adam""" +341 7 training_loop """owa""" +341 7 negative_sampler """basic""" +341 7 evaluator """rankbased""" +341 8 dataset """yago310""" +341 8 model """hole""" +341 8 loss """bceaftersigmoid""" +341 8 regularizer """no""" +341 8 optimizer """adam""" +341 8 training_loop """owa""" +341 8 negative_sampler """basic""" +341 8 evaluator """rankbased""" +341 9 dataset """yago310""" +341 9 model """hole""" +341 9 loss """bceaftersigmoid""" +341 9 regularizer """no""" +341 9 optimizer """adam""" +341 9 training_loop """owa""" +341 9 negative_sampler """basic""" +341 9 evaluator """rankbased""" +341 10 dataset """yago310""" +341 10 model """hole""" +341 10 loss """bceaftersigmoid""" +341 10 regularizer """no""" +341 10 optimizer """adam""" +341 10 training_loop """owa""" +341 10 negative_sampler """basic""" +341 10 evaluator """rankbased""" +341 11 dataset """yago310""" +341 11 model """hole""" +341 11 loss """bceaftersigmoid""" +341 11 regularizer """no""" +341 11 optimizer """adam""" +341 11 training_loop """owa""" +341 11 negative_sampler """basic""" +341 11 evaluator """rankbased""" +342 1 model.embedding_dim 2.0 +342 1 optimizer.lr 0.0029215970519474396 +342 1 negative_sampler.num_negs_per_pos 31.0 +342 1 training.batch_size 1.0 +342 2 model.embedding_dim 0.0 +342 2 optimizer.lr 0.06779087651966818 +342 2 negative_sampler.num_negs_per_pos 10.0 +342 2 training.batch_size 2.0 +342 3 model.embedding_dim 2.0 +342 3 optimizer.lr 0.0030227418488553075 +342 3 negative_sampler.num_negs_per_pos 30.0 +342 3 training.batch_size 3.0 +342 4 model.embedding_dim 1.0 +342 4 optimizer.lr 0.0026852270044640716 +342 4 negative_sampler.num_negs_per_pos 8.0 +342 4 training.batch_size 3.0 +342 5 model.embedding_dim 1.0 +342 5 optimizer.lr 0.010363709058989623 +342 5 negative_sampler.num_negs_per_pos 34.0 +342 5 training.batch_size 0.0 +342 6 model.embedding_dim 1.0 +342 6 optimizer.lr 0.011986283844622128 +342 6 negative_sampler.num_negs_per_pos 11.0 +342 6 training.batch_size 2.0 +342 7 model.embedding_dim 0.0 +342 7 optimizer.lr 0.008290562927829748 +342 7 negative_sampler.num_negs_per_pos 21.0 +342 7 training.batch_size 0.0 +342 8 model.embedding_dim 0.0 +342 8 optimizer.lr 0.002297477769967503 +342 8 negative_sampler.num_negs_per_pos 40.0 +342 8 training.batch_size 2.0 +342 9 model.embedding_dim 2.0 +342 9 optimizer.lr 0.007969841341780227 +342 9 negative_sampler.num_negs_per_pos 28.0 +342 9 training.batch_size 2.0 +342 10 model.embedding_dim 2.0 +342 10 optimizer.lr 0.015385824519234486 +342 10 negative_sampler.num_negs_per_pos 39.0 +342 10 training.batch_size 1.0 +342 11 model.embedding_dim 2.0 +342 11 optimizer.lr 0.007745834663593003 +342 11 negative_sampler.num_negs_per_pos 42.0 +342 11 training.batch_size 3.0 +342 12 model.embedding_dim 1.0 +342 12 optimizer.lr 0.0011668347667987764 +342 12 negative_sampler.num_negs_per_pos 46.0 +342 12 training.batch_size 2.0 +342 13 model.embedding_dim 2.0 +342 13 optimizer.lr 0.02395761236086182 +342 13 negative_sampler.num_negs_per_pos 48.0 +342 13 training.batch_size 0.0 +342 14 model.embedding_dim 2.0 +342 14 optimizer.lr 0.07744443459080039 +342 14 negative_sampler.num_negs_per_pos 34.0 +342 14 training.batch_size 0.0 +342 15 model.embedding_dim 2.0 +342 15 optimizer.lr 0.044442491253347795 +342 15 negative_sampler.num_negs_per_pos 34.0 +342 15 training.batch_size 2.0 +342 16 model.embedding_dim 2.0 +342 16 optimizer.lr 0.011544898081581208 +342 16 negative_sampler.num_negs_per_pos 35.0 +342 16 training.batch_size 0.0 +342 17 model.embedding_dim 2.0 +342 17 optimizer.lr 0.00602527820744625 +342 17 negative_sampler.num_negs_per_pos 36.0 +342 17 training.batch_size 1.0 +342 1 dataset """yago310""" +342 1 model """hole""" +342 1 loss """bceaftersigmoid""" +342 1 regularizer """no""" +342 1 optimizer """adam""" +342 1 training_loop """owa""" +342 1 negative_sampler """basic""" +342 1 evaluator """rankbased""" +342 2 dataset """yago310""" +342 2 model """hole""" +342 2 loss """bceaftersigmoid""" +342 2 regularizer """no""" +342 2 optimizer """adam""" +342 2 training_loop """owa""" +342 2 negative_sampler """basic""" +342 2 evaluator """rankbased""" +342 3 dataset """yago310""" +342 3 model """hole""" +342 3 loss """bceaftersigmoid""" +342 3 regularizer """no""" +342 3 optimizer """adam""" +342 3 training_loop """owa""" +342 3 negative_sampler """basic""" +342 3 evaluator """rankbased""" +342 4 dataset """yago310""" +342 4 model """hole""" +342 4 loss """bceaftersigmoid""" +342 4 regularizer """no""" +342 4 optimizer """adam""" +342 4 training_loop """owa""" +342 4 negative_sampler """basic""" +342 4 evaluator """rankbased""" +342 5 dataset """yago310""" +342 5 model """hole""" +342 5 loss """bceaftersigmoid""" +342 5 regularizer """no""" +342 5 optimizer """adam""" +342 5 training_loop """owa""" +342 5 negative_sampler """basic""" +342 5 evaluator """rankbased""" +342 6 dataset """yago310""" +342 6 model """hole""" +342 6 loss """bceaftersigmoid""" +342 6 regularizer """no""" +342 6 optimizer """adam""" +342 6 training_loop """owa""" +342 6 negative_sampler """basic""" +342 6 evaluator """rankbased""" +342 7 dataset """yago310""" +342 7 model """hole""" +342 7 loss """bceaftersigmoid""" +342 7 regularizer """no""" +342 7 optimizer """adam""" +342 7 training_loop """owa""" +342 7 negative_sampler """basic""" +342 7 evaluator """rankbased""" +342 8 dataset """yago310""" +342 8 model """hole""" +342 8 loss """bceaftersigmoid""" +342 8 regularizer """no""" +342 8 optimizer """adam""" +342 8 training_loop """owa""" +342 8 negative_sampler """basic""" +342 8 evaluator """rankbased""" +342 9 dataset """yago310""" +342 9 model """hole""" +342 9 loss """bceaftersigmoid""" +342 9 regularizer """no""" +342 9 optimizer """adam""" +342 9 training_loop """owa""" +342 9 negative_sampler """basic""" +342 9 evaluator """rankbased""" +342 10 dataset """yago310""" +342 10 model """hole""" +342 10 loss """bceaftersigmoid""" +342 10 regularizer """no""" +342 10 optimizer """adam""" +342 10 training_loop """owa""" +342 10 negative_sampler """basic""" +342 10 evaluator """rankbased""" +342 11 dataset """yago310""" +342 11 model """hole""" +342 11 loss """bceaftersigmoid""" +342 11 regularizer """no""" +342 11 optimizer """adam""" +342 11 training_loop """owa""" +342 11 negative_sampler """basic""" +342 11 evaluator """rankbased""" +342 12 dataset """yago310""" +342 12 model """hole""" +342 12 loss """bceaftersigmoid""" +342 12 regularizer """no""" +342 12 optimizer """adam""" +342 12 training_loop """owa""" +342 12 negative_sampler """basic""" +342 12 evaluator """rankbased""" +342 13 dataset """yago310""" +342 13 model """hole""" +342 13 loss """bceaftersigmoid""" +342 13 regularizer """no""" +342 13 optimizer """adam""" +342 13 training_loop """owa""" +342 13 negative_sampler """basic""" +342 13 evaluator """rankbased""" +342 14 dataset """yago310""" +342 14 model """hole""" +342 14 loss """bceaftersigmoid""" +342 14 regularizer """no""" +342 14 optimizer """adam""" +342 14 training_loop """owa""" +342 14 negative_sampler """basic""" +342 14 evaluator """rankbased""" +342 15 dataset """yago310""" +342 15 model """hole""" +342 15 loss """bceaftersigmoid""" +342 15 regularizer """no""" +342 15 optimizer """adam""" +342 15 training_loop """owa""" +342 15 negative_sampler """basic""" +342 15 evaluator """rankbased""" +342 16 dataset """yago310""" +342 16 model """hole""" +342 16 loss """bceaftersigmoid""" +342 16 regularizer """no""" +342 16 optimizer """adam""" +342 16 training_loop """owa""" +342 16 negative_sampler """basic""" +342 16 evaluator """rankbased""" +342 17 dataset """yago310""" +342 17 model """hole""" +342 17 loss """bceaftersigmoid""" +342 17 regularizer """no""" +342 17 optimizer """adam""" +342 17 training_loop """owa""" +342 17 negative_sampler """basic""" +342 17 evaluator """rankbased""" +343 1 model.embedding_dim 1.0 +343 1 loss.margin 3.4920057098598623 +343 1 optimizer.lr 0.0012262550185239281 +343 1 negative_sampler.num_negs_per_pos 28.0 +343 1 training.batch_size 1.0 +343 2 model.embedding_dim 1.0 +343 2 loss.margin 5.529045124629193 +343 2 optimizer.lr 0.005404106772372098 +343 2 negative_sampler.num_negs_per_pos 1.0 +343 2 training.batch_size 1.0 +343 3 model.embedding_dim 1.0 +343 3 loss.margin 6.293207882573419 +343 3 optimizer.lr 0.007401559494246273 +343 3 negative_sampler.num_negs_per_pos 46.0 +343 3 training.batch_size 3.0 +343 4 model.embedding_dim 2.0 +343 4 loss.margin 7.9697054609808315 +343 4 optimizer.lr 0.0012926494993989732 +343 4 negative_sampler.num_negs_per_pos 23.0 +343 4 training.batch_size 3.0 +343 5 model.embedding_dim 2.0 +343 5 loss.margin 1.1641439736045904 +343 5 optimizer.lr 0.07701129803012623 +343 5 negative_sampler.num_negs_per_pos 0.0 +343 5 training.batch_size 2.0 +343 6 model.embedding_dim 2.0 +343 6 loss.margin 1.9618698365618195 +343 6 optimizer.lr 0.004554015951440488 +343 6 negative_sampler.num_negs_per_pos 40.0 +343 6 training.batch_size 3.0 +343 7 model.embedding_dim 0.0 +343 7 loss.margin 9.252949603325314 +343 7 optimizer.lr 0.0019215800382463698 +343 7 negative_sampler.num_negs_per_pos 13.0 +343 7 training.batch_size 0.0 +343 8 model.embedding_dim 0.0 +343 8 loss.margin 7.051679196853012 +343 8 optimizer.lr 0.016068928121274792 +343 8 negative_sampler.num_negs_per_pos 34.0 +343 8 training.batch_size 3.0 +343 9 model.embedding_dim 0.0 +343 9 loss.margin 6.469901374282426 +343 9 optimizer.lr 0.029190742081930173 +343 9 negative_sampler.num_negs_per_pos 40.0 +343 9 training.batch_size 1.0 +343 1 dataset """yago310""" +343 1 model """hole""" +343 1 loss """marginranking""" +343 1 regularizer """no""" +343 1 optimizer """adam""" +343 1 training_loop """owa""" +343 1 negative_sampler """basic""" +343 1 evaluator """rankbased""" +343 2 dataset """yago310""" +343 2 model """hole""" +343 2 loss """marginranking""" +343 2 regularizer """no""" +343 2 optimizer """adam""" +343 2 training_loop """owa""" +343 2 negative_sampler """basic""" +343 2 evaluator """rankbased""" +343 3 dataset """yago310""" +343 3 model """hole""" +343 3 loss """marginranking""" +343 3 regularizer """no""" +343 3 optimizer """adam""" +343 3 training_loop """owa""" +343 3 negative_sampler """basic""" +343 3 evaluator """rankbased""" +343 4 dataset """yago310""" +343 4 model """hole""" +343 4 loss """marginranking""" +343 4 regularizer """no""" +343 4 optimizer """adam""" +343 4 training_loop """owa""" +343 4 negative_sampler """basic""" +343 4 evaluator """rankbased""" +343 5 dataset """yago310""" +343 5 model """hole""" +343 5 loss """marginranking""" +343 5 regularizer """no""" +343 5 optimizer """adam""" +343 5 training_loop """owa""" +343 5 negative_sampler """basic""" +343 5 evaluator """rankbased""" +343 6 dataset """yago310""" +343 6 model """hole""" +343 6 loss """marginranking""" +343 6 regularizer """no""" +343 6 optimizer """adam""" +343 6 training_loop """owa""" +343 6 negative_sampler """basic""" +343 6 evaluator """rankbased""" +343 7 dataset """yago310""" +343 7 model """hole""" +343 7 loss """marginranking""" +343 7 regularizer """no""" +343 7 optimizer """adam""" +343 7 training_loop """owa""" +343 7 negative_sampler """basic""" +343 7 evaluator """rankbased""" +343 8 dataset """yago310""" +343 8 model """hole""" +343 8 loss """marginranking""" +343 8 regularizer """no""" +343 8 optimizer """adam""" +343 8 training_loop """owa""" +343 8 negative_sampler """basic""" +343 8 evaluator """rankbased""" +343 9 dataset """yago310""" +343 9 model """hole""" +343 9 loss """marginranking""" +343 9 regularizer """no""" +343 9 optimizer """adam""" +343 9 training_loop """owa""" +343 9 negative_sampler """basic""" +343 9 evaluator """rankbased""" +344 1 model.embedding_dim 2.0 +344 1 loss.margin 9.570269985818694 +344 1 optimizer.lr 0.001427409365037469 +344 1 negative_sampler.num_negs_per_pos 38.0 +344 1 training.batch_size 1.0 +344 2 model.embedding_dim 2.0 +344 2 loss.margin 8.904949924942494 +344 2 optimizer.lr 0.005588757434955028 +344 2 negative_sampler.num_negs_per_pos 33.0 +344 2 training.batch_size 3.0 +344 3 model.embedding_dim 2.0 +344 3 loss.margin 6.249997198991587 +344 3 optimizer.lr 0.013212173279863033 +344 3 negative_sampler.num_negs_per_pos 37.0 +344 3 training.batch_size 1.0 +344 4 model.embedding_dim 1.0 +344 4 loss.margin 5.899089407447418 +344 4 optimizer.lr 0.029253638501455542 +344 4 negative_sampler.num_negs_per_pos 8.0 +344 4 training.batch_size 3.0 +344 5 model.embedding_dim 2.0 +344 5 loss.margin 1.9778137849987738 +344 5 optimizer.lr 0.06759423235417057 +344 5 negative_sampler.num_negs_per_pos 22.0 +344 5 training.batch_size 2.0 +344 6 model.embedding_dim 0.0 +344 6 loss.margin 3.9171615182114357 +344 6 optimizer.lr 0.005534836926493487 +344 6 negative_sampler.num_negs_per_pos 46.0 +344 6 training.batch_size 0.0 +344 7 model.embedding_dim 2.0 +344 7 loss.margin 4.062688512186904 +344 7 optimizer.lr 0.06385186244362744 +344 7 negative_sampler.num_negs_per_pos 35.0 +344 7 training.batch_size 0.0 +344 8 model.embedding_dim 1.0 +344 8 loss.margin 8.595940712356956 +344 8 optimizer.lr 0.001850521864030194 +344 8 negative_sampler.num_negs_per_pos 40.0 +344 8 training.batch_size 2.0 +344 9 model.embedding_dim 2.0 +344 9 loss.margin 1.8517334739885913 +344 9 optimizer.lr 0.0043287601259363135 +344 9 negative_sampler.num_negs_per_pos 38.0 +344 9 training.batch_size 0.0 +344 10 model.embedding_dim 0.0 +344 10 loss.margin 3.3801010097088446 +344 10 optimizer.lr 0.018544110016910555 +344 10 negative_sampler.num_negs_per_pos 41.0 +344 10 training.batch_size 1.0 +344 11 model.embedding_dim 1.0 +344 11 loss.margin 9.09087283513036 +344 11 optimizer.lr 0.0026949145502254686 +344 11 negative_sampler.num_negs_per_pos 13.0 +344 11 training.batch_size 3.0 +344 12 model.embedding_dim 1.0 +344 12 loss.margin 3.913027848010884 +344 12 optimizer.lr 0.041070781296839796 +344 12 negative_sampler.num_negs_per_pos 47.0 +344 12 training.batch_size 3.0 +344 13 model.embedding_dim 2.0 +344 13 loss.margin 6.614029994449835 +344 13 optimizer.lr 0.042806146238743556 +344 13 negative_sampler.num_negs_per_pos 30.0 +344 13 training.batch_size 0.0 +344 1 dataset """yago310""" +344 1 model """hole""" +344 1 loss """marginranking""" +344 1 regularizer """no""" +344 1 optimizer """adam""" +344 1 training_loop """owa""" +344 1 negative_sampler """basic""" +344 1 evaluator """rankbased""" +344 2 dataset """yago310""" +344 2 model """hole""" +344 2 loss """marginranking""" +344 2 regularizer """no""" +344 2 optimizer """adam""" +344 2 training_loop """owa""" +344 2 negative_sampler """basic""" +344 2 evaluator """rankbased""" +344 3 dataset """yago310""" +344 3 model """hole""" +344 3 loss """marginranking""" +344 3 regularizer """no""" +344 3 optimizer """adam""" +344 3 training_loop """owa""" +344 3 negative_sampler """basic""" +344 3 evaluator """rankbased""" +344 4 dataset """yago310""" +344 4 model """hole""" +344 4 loss """marginranking""" +344 4 regularizer """no""" +344 4 optimizer """adam""" +344 4 training_loop """owa""" +344 4 negative_sampler """basic""" +344 4 evaluator """rankbased""" +344 5 dataset """yago310""" +344 5 model """hole""" +344 5 loss """marginranking""" +344 5 regularizer """no""" +344 5 optimizer """adam""" +344 5 training_loop """owa""" +344 5 negative_sampler """basic""" +344 5 evaluator """rankbased""" +344 6 dataset """yago310""" +344 6 model """hole""" +344 6 loss """marginranking""" +344 6 regularizer """no""" +344 6 optimizer """adam""" +344 6 training_loop """owa""" +344 6 negative_sampler """basic""" +344 6 evaluator """rankbased""" +344 7 dataset """yago310""" +344 7 model """hole""" +344 7 loss """marginranking""" +344 7 regularizer """no""" +344 7 optimizer """adam""" +344 7 training_loop """owa""" +344 7 negative_sampler """basic""" +344 7 evaluator """rankbased""" +344 8 dataset """yago310""" +344 8 model """hole""" +344 8 loss """marginranking""" +344 8 regularizer """no""" +344 8 optimizer """adam""" +344 8 training_loop """owa""" +344 8 negative_sampler """basic""" +344 8 evaluator """rankbased""" +344 9 dataset """yago310""" +344 9 model """hole""" +344 9 loss """marginranking""" +344 9 regularizer """no""" +344 9 optimizer """adam""" +344 9 training_loop """owa""" +344 9 negative_sampler """basic""" +344 9 evaluator """rankbased""" +344 10 dataset """yago310""" +344 10 model """hole""" +344 10 loss """marginranking""" +344 10 regularizer """no""" +344 10 optimizer """adam""" +344 10 training_loop """owa""" +344 10 negative_sampler """basic""" +344 10 evaluator """rankbased""" +344 11 dataset """yago310""" +344 11 model """hole""" +344 11 loss """marginranking""" +344 11 regularizer """no""" +344 11 optimizer """adam""" +344 11 training_loop """owa""" +344 11 negative_sampler """basic""" +344 11 evaluator """rankbased""" +344 12 dataset """yago310""" +344 12 model """hole""" +344 12 loss """marginranking""" +344 12 regularizer """no""" +344 12 optimizer """adam""" +344 12 training_loop """owa""" +344 12 negative_sampler """basic""" +344 12 evaluator """rankbased""" +344 13 dataset """yago310""" +344 13 model """hole""" +344 13 loss """marginranking""" +344 13 regularizer """no""" +344 13 optimizer """adam""" +344 13 training_loop """owa""" +344 13 negative_sampler """basic""" +344 13 evaluator """rankbased""" +345 1 model.embedding_dim 1.0 +345 1 optimizer.lr 0.06643016844930362 +345 1 negative_sampler.num_negs_per_pos 19.0 +345 1 training.batch_size 0.0 +345 2 model.embedding_dim 2.0 +345 2 optimizer.lr 0.005419382623112439 +345 2 negative_sampler.num_negs_per_pos 22.0 +345 2 training.batch_size 3.0 +345 3 model.embedding_dim 0.0 +345 3 optimizer.lr 0.01091043190787882 +345 3 negative_sampler.num_negs_per_pos 44.0 +345 3 training.batch_size 2.0 +345 4 model.embedding_dim 0.0 +345 4 optimizer.lr 0.00370606374109701 +345 4 negative_sampler.num_negs_per_pos 25.0 +345 4 training.batch_size 3.0 +345 5 model.embedding_dim 2.0 +345 5 optimizer.lr 0.0031986006689835814 +345 5 negative_sampler.num_negs_per_pos 49.0 +345 5 training.batch_size 2.0 +345 6 model.embedding_dim 0.0 +345 6 optimizer.lr 0.0017946510889233502 +345 6 negative_sampler.num_negs_per_pos 29.0 +345 6 training.batch_size 0.0 +345 7 model.embedding_dim 2.0 +345 7 optimizer.lr 0.05512232272075986 +345 7 negative_sampler.num_negs_per_pos 27.0 +345 7 training.batch_size 0.0 +345 8 model.embedding_dim 2.0 +345 8 optimizer.lr 0.0038712343279594585 +345 8 negative_sampler.num_negs_per_pos 22.0 +345 8 training.batch_size 2.0 +345 9 model.embedding_dim 2.0 +345 9 optimizer.lr 0.006088010732490043 +345 9 negative_sampler.num_negs_per_pos 7.0 +345 9 training.batch_size 3.0 +345 10 model.embedding_dim 1.0 +345 10 optimizer.lr 0.0011066885819803294 +345 10 negative_sampler.num_negs_per_pos 24.0 +345 10 training.batch_size 0.0 +345 1 dataset """yago310""" +345 1 model """hole""" +345 1 loss """softplus""" +345 1 regularizer """no""" +345 1 optimizer """adam""" +345 1 training_loop """owa""" +345 1 negative_sampler """basic""" +345 1 evaluator """rankbased""" +345 2 dataset """yago310""" +345 2 model """hole""" +345 2 loss """softplus""" +345 2 regularizer """no""" +345 2 optimizer """adam""" +345 2 training_loop """owa""" +345 2 negative_sampler """basic""" +345 2 evaluator """rankbased""" +345 3 dataset """yago310""" +345 3 model """hole""" +345 3 loss """softplus""" +345 3 regularizer """no""" +345 3 optimizer """adam""" +345 3 training_loop """owa""" +345 3 negative_sampler """basic""" +345 3 evaluator """rankbased""" +345 4 dataset """yago310""" +345 4 model """hole""" +345 4 loss """softplus""" +345 4 regularizer """no""" +345 4 optimizer """adam""" +345 4 training_loop """owa""" +345 4 negative_sampler """basic""" +345 4 evaluator """rankbased""" +345 5 dataset """yago310""" +345 5 model """hole""" +345 5 loss """softplus""" +345 5 regularizer """no""" +345 5 optimizer """adam""" +345 5 training_loop """owa""" +345 5 negative_sampler """basic""" +345 5 evaluator """rankbased""" +345 6 dataset """yago310""" +345 6 model """hole""" +345 6 loss """softplus""" +345 6 regularizer """no""" +345 6 optimizer """adam""" +345 6 training_loop """owa""" +345 6 negative_sampler """basic""" +345 6 evaluator """rankbased""" +345 7 dataset """yago310""" +345 7 model """hole""" +345 7 loss """softplus""" +345 7 regularizer """no""" +345 7 optimizer """adam""" +345 7 training_loop """owa""" +345 7 negative_sampler """basic""" +345 7 evaluator """rankbased""" +345 8 dataset """yago310""" +345 8 model """hole""" +345 8 loss """softplus""" +345 8 regularizer """no""" +345 8 optimizer """adam""" +345 8 training_loop """owa""" +345 8 negative_sampler """basic""" +345 8 evaluator """rankbased""" +345 9 dataset """yago310""" +345 9 model """hole""" +345 9 loss """softplus""" +345 9 regularizer """no""" +345 9 optimizer """adam""" +345 9 training_loop """owa""" +345 9 negative_sampler """basic""" +345 9 evaluator """rankbased""" +345 10 dataset """yago310""" +345 10 model """hole""" +345 10 loss """softplus""" +345 10 regularizer """no""" +345 10 optimizer """adam""" +345 10 training_loop """owa""" +345 10 negative_sampler """basic""" +345 10 evaluator """rankbased""" +346 1 model.embedding_dim 2.0 +346 1 optimizer.lr 0.06609216390766953 +346 1 negative_sampler.num_negs_per_pos 3.0 +346 1 training.batch_size 2.0 +346 2 model.embedding_dim 2.0 +346 2 optimizer.lr 0.03890429539251766 +346 2 negative_sampler.num_negs_per_pos 14.0 +346 2 training.batch_size 3.0 +346 3 model.embedding_dim 1.0 +346 3 optimizer.lr 0.003872675023497385 +346 3 negative_sampler.num_negs_per_pos 10.0 +346 3 training.batch_size 1.0 +346 4 model.embedding_dim 0.0 +346 4 optimizer.lr 0.009847877733615977 +346 4 negative_sampler.num_negs_per_pos 16.0 +346 4 training.batch_size 3.0 +346 5 model.embedding_dim 0.0 +346 5 optimizer.lr 0.08783560453167352 +346 5 negative_sampler.num_negs_per_pos 29.0 +346 5 training.batch_size 3.0 +346 6 model.embedding_dim 0.0 +346 6 optimizer.lr 0.0020596684504313734 +346 6 negative_sampler.num_negs_per_pos 36.0 +346 6 training.batch_size 2.0 +346 7 model.embedding_dim 2.0 +346 7 optimizer.lr 0.00308670424890856 +346 7 negative_sampler.num_negs_per_pos 11.0 +346 7 training.batch_size 0.0 +346 8 model.embedding_dim 0.0 +346 8 optimizer.lr 0.03776645438711098 +346 8 negative_sampler.num_negs_per_pos 35.0 +346 8 training.batch_size 2.0 +346 9 model.embedding_dim 0.0 +346 9 optimizer.lr 0.0011603912652235547 +346 9 negative_sampler.num_negs_per_pos 22.0 +346 9 training.batch_size 0.0 +346 10 model.embedding_dim 1.0 +346 10 optimizer.lr 0.0020640119953545476 +346 10 negative_sampler.num_negs_per_pos 33.0 +346 10 training.batch_size 2.0 +346 11 model.embedding_dim 0.0 +346 11 optimizer.lr 0.026524328291483195 +346 11 negative_sampler.num_negs_per_pos 11.0 +346 11 training.batch_size 2.0 +346 12 model.embedding_dim 0.0 +346 12 optimizer.lr 0.006402586019605727 +346 12 negative_sampler.num_negs_per_pos 31.0 +346 12 training.batch_size 0.0 +346 13 model.embedding_dim 1.0 +346 13 optimizer.lr 0.0013829971172587034 +346 13 negative_sampler.num_negs_per_pos 41.0 +346 13 training.batch_size 1.0 +346 14 model.embedding_dim 0.0 +346 14 optimizer.lr 0.02475801534586058 +346 14 negative_sampler.num_negs_per_pos 2.0 +346 14 training.batch_size 3.0 +346 15 model.embedding_dim 0.0 +346 15 optimizer.lr 0.00283345665444339 +346 15 negative_sampler.num_negs_per_pos 6.0 +346 15 training.batch_size 0.0 +346 16 model.embedding_dim 2.0 +346 16 optimizer.lr 0.07048940715499684 +346 16 negative_sampler.num_negs_per_pos 28.0 +346 16 training.batch_size 1.0 +346 17 model.embedding_dim 1.0 +346 17 optimizer.lr 0.02914748348980122 +346 17 negative_sampler.num_negs_per_pos 44.0 +346 17 training.batch_size 3.0 +346 18 model.embedding_dim 2.0 +346 18 optimizer.lr 0.04031004486672567 +346 18 negative_sampler.num_negs_per_pos 40.0 +346 18 training.batch_size 1.0 +346 19 model.embedding_dim 0.0 +346 19 optimizer.lr 0.03389395104484116 +346 19 negative_sampler.num_negs_per_pos 12.0 +346 19 training.batch_size 3.0 +346 20 model.embedding_dim 1.0 +346 20 optimizer.lr 0.0549197901877224 +346 20 negative_sampler.num_negs_per_pos 42.0 +346 20 training.batch_size 2.0 +346 21 model.embedding_dim 2.0 +346 21 optimizer.lr 0.05516059184360302 +346 21 negative_sampler.num_negs_per_pos 40.0 +346 21 training.batch_size 3.0 +346 22 model.embedding_dim 1.0 +346 22 optimizer.lr 0.031039715652867457 +346 22 negative_sampler.num_negs_per_pos 45.0 +346 22 training.batch_size 3.0 +346 23 model.embedding_dim 0.0 +346 23 optimizer.lr 0.0051962996411814705 +346 23 negative_sampler.num_negs_per_pos 21.0 +346 23 training.batch_size 2.0 +346 1 dataset """yago310""" +346 1 model """hole""" +346 1 loss """softplus""" +346 1 regularizer """no""" +346 1 optimizer """adam""" +346 1 training_loop """owa""" +346 1 negative_sampler """basic""" +346 1 evaluator """rankbased""" +346 2 dataset """yago310""" +346 2 model """hole""" +346 2 loss """softplus""" +346 2 regularizer """no""" +346 2 optimizer """adam""" +346 2 training_loop """owa""" +346 2 negative_sampler """basic""" +346 2 evaluator """rankbased""" +346 3 dataset """yago310""" +346 3 model """hole""" +346 3 loss """softplus""" +346 3 regularizer """no""" +346 3 optimizer """adam""" +346 3 training_loop """owa""" +346 3 negative_sampler """basic""" +346 3 evaluator """rankbased""" +346 4 dataset """yago310""" +346 4 model """hole""" +346 4 loss """softplus""" +346 4 regularizer """no""" +346 4 optimizer """adam""" +346 4 training_loop """owa""" +346 4 negative_sampler """basic""" +346 4 evaluator """rankbased""" +346 5 dataset """yago310""" +346 5 model """hole""" +346 5 loss """softplus""" +346 5 regularizer """no""" +346 5 optimizer """adam""" +346 5 training_loop """owa""" +346 5 negative_sampler """basic""" +346 5 evaluator """rankbased""" +346 6 dataset """yago310""" +346 6 model """hole""" +346 6 loss """softplus""" +346 6 regularizer """no""" +346 6 optimizer """adam""" +346 6 training_loop """owa""" +346 6 negative_sampler """basic""" +346 6 evaluator """rankbased""" +346 7 dataset """yago310""" +346 7 model """hole""" +346 7 loss """softplus""" +346 7 regularizer """no""" +346 7 optimizer """adam""" +346 7 training_loop """owa""" +346 7 negative_sampler """basic""" +346 7 evaluator """rankbased""" +346 8 dataset """yago310""" +346 8 model """hole""" +346 8 loss """softplus""" +346 8 regularizer """no""" +346 8 optimizer """adam""" +346 8 training_loop """owa""" +346 8 negative_sampler """basic""" +346 8 evaluator """rankbased""" +346 9 dataset """yago310""" +346 9 model """hole""" +346 9 loss """softplus""" +346 9 regularizer """no""" +346 9 optimizer """adam""" +346 9 training_loop """owa""" +346 9 negative_sampler """basic""" +346 9 evaluator """rankbased""" +346 10 dataset """yago310""" +346 10 model """hole""" +346 10 loss """softplus""" +346 10 regularizer """no""" +346 10 optimizer """adam""" +346 10 training_loop """owa""" +346 10 negative_sampler """basic""" +346 10 evaluator """rankbased""" +346 11 dataset """yago310""" +346 11 model """hole""" +346 11 loss """softplus""" +346 11 regularizer """no""" +346 11 optimizer """adam""" +346 11 training_loop """owa""" +346 11 negative_sampler """basic""" +346 11 evaluator """rankbased""" +346 12 dataset """yago310""" +346 12 model """hole""" +346 12 loss """softplus""" +346 12 regularizer """no""" +346 12 optimizer """adam""" +346 12 training_loop """owa""" +346 12 negative_sampler """basic""" +346 12 evaluator """rankbased""" +346 13 dataset """yago310""" +346 13 model """hole""" +346 13 loss """softplus""" +346 13 regularizer """no""" +346 13 optimizer """adam""" +346 13 training_loop """owa""" +346 13 negative_sampler """basic""" +346 13 evaluator """rankbased""" +346 14 dataset """yago310""" +346 14 model """hole""" +346 14 loss """softplus""" +346 14 regularizer """no""" +346 14 optimizer """adam""" +346 14 training_loop """owa""" +346 14 negative_sampler """basic""" +346 14 evaluator """rankbased""" +346 15 dataset """yago310""" +346 15 model """hole""" +346 15 loss """softplus""" +346 15 regularizer """no""" +346 15 optimizer """adam""" +346 15 training_loop """owa""" +346 15 negative_sampler """basic""" +346 15 evaluator """rankbased""" +346 16 dataset """yago310""" +346 16 model """hole""" +346 16 loss """softplus""" +346 16 regularizer """no""" +346 16 optimizer """adam""" +346 16 training_loop """owa""" +346 16 negative_sampler """basic""" +346 16 evaluator """rankbased""" +346 17 dataset """yago310""" +346 17 model """hole""" +346 17 loss """softplus""" +346 17 regularizer """no""" +346 17 optimizer """adam""" +346 17 training_loop """owa""" +346 17 negative_sampler """basic""" +346 17 evaluator """rankbased""" +346 18 dataset """yago310""" +346 18 model """hole""" +346 18 loss """softplus""" +346 18 regularizer """no""" +346 18 optimizer """adam""" +346 18 training_loop """owa""" +346 18 negative_sampler """basic""" +346 18 evaluator """rankbased""" +346 19 dataset """yago310""" +346 19 model """hole""" +346 19 loss """softplus""" +346 19 regularizer """no""" +346 19 optimizer """adam""" +346 19 training_loop """owa""" +346 19 negative_sampler """basic""" +346 19 evaluator """rankbased""" +346 20 dataset """yago310""" +346 20 model """hole""" +346 20 loss """softplus""" +346 20 regularizer """no""" +346 20 optimizer """adam""" +346 20 training_loop """owa""" +346 20 negative_sampler """basic""" +346 20 evaluator """rankbased""" +346 21 dataset """yago310""" +346 21 model """hole""" +346 21 loss """softplus""" +346 21 regularizer """no""" +346 21 optimizer """adam""" +346 21 training_loop """owa""" +346 21 negative_sampler """basic""" +346 21 evaluator """rankbased""" +346 22 dataset """yago310""" +346 22 model """hole""" +346 22 loss """softplus""" +346 22 regularizer """no""" +346 22 optimizer """adam""" +346 22 training_loop """owa""" +346 22 negative_sampler """basic""" +346 22 evaluator """rankbased""" +346 23 dataset """yago310""" +346 23 model """hole""" +346 23 loss """softplus""" +346 23 regularizer """no""" +346 23 optimizer """adam""" +346 23 training_loop """owa""" +346 23 negative_sampler """basic""" +346 23 evaluator """rankbased""" +347 1 model.embedding_dim 0.0 +347 1 model.c_min 0.04642999560085234 +347 1 model.c_max 6.350614255968342 +347 1 loss.margin 6.239315902952493 +347 1 optimizer.lr 0.0073833316585865715 +347 1 negative_sampler.num_negs_per_pos 59.0 +347 1 training.batch_size 2.0 +347 2 model.embedding_dim 1.0 +347 2 model.c_min 0.011635515372375133 +347 2 model.c_max 8.337636041812972 +347 2 loss.margin 5.176777120372758 +347 2 optimizer.lr 0.04676978747711673 +347 2 negative_sampler.num_negs_per_pos 21.0 +347 2 training.batch_size 2.0 +347 3 model.embedding_dim 0.0 +347 3 model.c_min 0.026491309771904663 +347 3 model.c_max 2.8160512098746184 +347 3 loss.margin 5.143433410734116 +347 3 optimizer.lr 0.018407080747570845 +347 3 negative_sampler.num_negs_per_pos 93.0 +347 3 training.batch_size 0.0 +347 4 model.embedding_dim 2.0 +347 4 model.c_min 0.038675903938728294 +347 4 model.c_max 7.438472971334988 +347 4 loss.margin 3.0318593687702267 +347 4 optimizer.lr 0.0049369871675440085 +347 4 negative_sampler.num_negs_per_pos 42.0 +347 4 training.batch_size 0.0 +347 5 model.embedding_dim 2.0 +347 5 model.c_min 0.013194321825860795 +347 5 model.c_max 5.694429697003921 +347 5 loss.margin 7.322921724991736 +347 5 optimizer.lr 0.004325293283796132 +347 5 negative_sampler.num_negs_per_pos 85.0 +347 5 training.batch_size 1.0 +347 6 model.embedding_dim 1.0 +347 6 model.c_min 0.09798043021673111 +347 6 model.c_max 6.621559062823424 +347 6 loss.margin 4.754323185485432 +347 6 optimizer.lr 0.01455701290126856 +347 6 negative_sampler.num_negs_per_pos 80.0 +347 6 training.batch_size 2.0 +347 7 model.embedding_dim 0.0 +347 7 model.c_min 0.011826079011413033 +347 7 model.c_max 6.339367547476646 +347 7 loss.margin 8.512044819059707 +347 7 optimizer.lr 0.04768430763373313 +347 7 negative_sampler.num_negs_per_pos 48.0 +347 7 training.batch_size 2.0 +347 8 model.embedding_dim 2.0 +347 8 model.c_min 0.010287074635872792 +347 8 model.c_max 8.758464168972322 +347 8 loss.margin 3.785163725797571 +347 8 optimizer.lr 0.07153254870849969 +347 8 negative_sampler.num_negs_per_pos 74.0 +347 8 training.batch_size 0.0 +347 9 model.embedding_dim 1.0 +347 9 model.c_min 0.06107530976788639 +347 9 model.c_max 3.6380005871938255 +347 9 loss.margin 4.456775486215119 +347 9 optimizer.lr 0.0037159046171570302 +347 9 negative_sampler.num_negs_per_pos 9.0 +347 9 training.batch_size 2.0 +347 10 model.embedding_dim 0.0 +347 10 model.c_min 0.06206565459990826 +347 10 model.c_max 1.8267753419366528 +347 10 loss.margin 4.79541739485771 +347 10 optimizer.lr 0.05719076686279704 +347 10 negative_sampler.num_negs_per_pos 9.0 +347 10 training.batch_size 1.0 +347 11 model.embedding_dim 0.0 +347 11 model.c_min 0.015701246032016024 +347 11 model.c_max 7.217632681197612 +347 11 loss.margin 6.2625956345289016 +347 11 optimizer.lr 0.0024367855739995956 +347 11 negative_sampler.num_negs_per_pos 56.0 +347 11 training.batch_size 0.0 +347 12 model.embedding_dim 0.0 +347 12 model.c_min 0.06214829268937218 +347 12 model.c_max 3.4593987826910864 +347 12 loss.margin 8.002628123175237 +347 12 optimizer.lr 0.008660819060774982 +347 12 negative_sampler.num_negs_per_pos 70.0 +347 12 training.batch_size 0.0 +347 13 model.embedding_dim 0.0 +347 13 model.c_min 0.03654730916920324 +347 13 model.c_max 5.547670063870763 +347 13 loss.margin 2.5846451502031966 +347 13 optimizer.lr 0.017138986146983445 +347 13 negative_sampler.num_negs_per_pos 86.0 +347 13 training.batch_size 0.0 +347 14 model.embedding_dim 0.0 +347 14 model.c_min 0.010471724283499993 +347 14 model.c_max 8.492101265508014 +347 14 loss.margin 2.5742004434464265 +347 14 optimizer.lr 0.003954404682599126 +347 14 negative_sampler.num_negs_per_pos 34.0 +347 14 training.batch_size 1.0 +347 15 model.embedding_dim 2.0 +347 15 model.c_min 0.011347880207594124 +347 15 model.c_max 7.1878259595717475 +347 15 loss.margin 3.255844109316878 +347 15 optimizer.lr 0.02445182945479773 +347 15 negative_sampler.num_negs_per_pos 99.0 +347 15 training.batch_size 0.0 +347 1 dataset """fb15k237""" +347 1 model """kg2e""" +347 1 loss """marginranking""" +347 1 regularizer """no""" +347 1 optimizer """adam""" +347 1 training_loop """owa""" +347 1 negative_sampler """basic""" +347 1 evaluator """rankbased""" +347 2 dataset """fb15k237""" +347 2 model """kg2e""" +347 2 loss """marginranking""" +347 2 regularizer """no""" +347 2 optimizer """adam""" +347 2 training_loop """owa""" +347 2 negative_sampler """basic""" +347 2 evaluator """rankbased""" +347 3 dataset """fb15k237""" +347 3 model """kg2e""" +347 3 loss """marginranking""" +347 3 regularizer """no""" +347 3 optimizer """adam""" +347 3 training_loop """owa""" +347 3 negative_sampler """basic""" +347 3 evaluator """rankbased""" +347 4 dataset """fb15k237""" +347 4 model """kg2e""" +347 4 loss """marginranking""" +347 4 regularizer """no""" +347 4 optimizer """adam""" +347 4 training_loop """owa""" +347 4 negative_sampler """basic""" +347 4 evaluator """rankbased""" +347 5 dataset """fb15k237""" +347 5 model """kg2e""" +347 5 loss """marginranking""" +347 5 regularizer """no""" +347 5 optimizer """adam""" +347 5 training_loop """owa""" +347 5 negative_sampler """basic""" +347 5 evaluator """rankbased""" +347 6 dataset """fb15k237""" +347 6 model """kg2e""" +347 6 loss """marginranking""" +347 6 regularizer """no""" +347 6 optimizer """adam""" +347 6 training_loop """owa""" +347 6 negative_sampler """basic""" +347 6 evaluator """rankbased""" +347 7 dataset """fb15k237""" +347 7 model """kg2e""" +347 7 loss """marginranking""" +347 7 regularizer """no""" +347 7 optimizer """adam""" +347 7 training_loop """owa""" +347 7 negative_sampler """basic""" +347 7 evaluator """rankbased""" +347 8 dataset """fb15k237""" +347 8 model """kg2e""" +347 8 loss """marginranking""" +347 8 regularizer """no""" +347 8 optimizer """adam""" +347 8 training_loop """owa""" +347 8 negative_sampler """basic""" +347 8 evaluator """rankbased""" +347 9 dataset """fb15k237""" +347 9 model """kg2e""" +347 9 loss """marginranking""" +347 9 regularizer """no""" +347 9 optimizer """adam""" +347 9 training_loop """owa""" +347 9 negative_sampler """basic""" +347 9 evaluator """rankbased""" +347 10 dataset """fb15k237""" +347 10 model """kg2e""" +347 10 loss """marginranking""" +347 10 regularizer """no""" +347 10 optimizer """adam""" +347 10 training_loop """owa""" +347 10 negative_sampler """basic""" +347 10 evaluator """rankbased""" +347 11 dataset """fb15k237""" +347 11 model """kg2e""" +347 11 loss """marginranking""" +347 11 regularizer """no""" +347 11 optimizer """adam""" +347 11 training_loop """owa""" +347 11 negative_sampler """basic""" +347 11 evaluator """rankbased""" +347 12 dataset """fb15k237""" +347 12 model """kg2e""" +347 12 loss """marginranking""" +347 12 regularizer """no""" +347 12 optimizer """adam""" +347 12 training_loop """owa""" +347 12 negative_sampler """basic""" +347 12 evaluator """rankbased""" +347 13 dataset """fb15k237""" +347 13 model """kg2e""" +347 13 loss """marginranking""" +347 13 regularizer """no""" +347 13 optimizer """adam""" +347 13 training_loop """owa""" +347 13 negative_sampler """basic""" +347 13 evaluator """rankbased""" +347 14 dataset """fb15k237""" +347 14 model """kg2e""" +347 14 loss """marginranking""" +347 14 regularizer """no""" +347 14 optimizer """adam""" +347 14 training_loop """owa""" +347 14 negative_sampler """basic""" +347 14 evaluator """rankbased""" +347 15 dataset """fb15k237""" +347 15 model """kg2e""" +347 15 loss """marginranking""" +347 15 regularizer """no""" +347 15 optimizer """adam""" +347 15 training_loop """owa""" +347 15 negative_sampler """basic""" +347 15 evaluator """rankbased""" +348 1 model.embedding_dim 2.0 +348 1 model.c_min 0.07416216649921505 +348 1 model.c_max 5.824390018429144 +348 1 loss.margin 7.711742755295368 +348 1 optimizer.lr 0.006030657878809431 +348 1 negative_sampler.num_negs_per_pos 96.0 +348 1 training.batch_size 1.0 +348 2 model.embedding_dim 1.0 +348 2 model.c_min 0.08714410529595074 +348 2 model.c_max 7.715299231785801 +348 2 loss.margin 7.489467987633208 +348 2 optimizer.lr 0.06038777251263735 +348 2 negative_sampler.num_negs_per_pos 57.0 +348 2 training.batch_size 2.0 +348 3 model.embedding_dim 1.0 +348 3 model.c_min 0.06348429508287248 +348 3 model.c_max 8.422598128881411 +348 3 loss.margin 2.6094752594114614 +348 3 optimizer.lr 0.01176121730078006 +348 3 negative_sampler.num_negs_per_pos 89.0 +348 3 training.batch_size 1.0 +348 4 model.embedding_dim 1.0 +348 4 model.c_min 0.09014696379663326 +348 4 model.c_max 8.589167633536714 +348 4 loss.margin 2.022458487995722 +348 4 optimizer.lr 0.008810756507524557 +348 4 negative_sampler.num_negs_per_pos 40.0 +348 4 training.batch_size 1.0 +348 5 model.embedding_dim 1.0 +348 5 model.c_min 0.026692488140028302 +348 5 model.c_max 1.786456250312301 +348 5 loss.margin 7.455116934075625 +348 5 optimizer.lr 0.054122485842416815 +348 5 negative_sampler.num_negs_per_pos 93.0 +348 5 training.batch_size 1.0 +348 6 model.embedding_dim 2.0 +348 6 model.c_min 0.04771298617571411 +348 6 model.c_max 8.731853291276074 +348 6 loss.margin 2.7242264704837056 +348 6 optimizer.lr 0.001113644046033776 +348 6 negative_sampler.num_negs_per_pos 28.0 +348 6 training.batch_size 0.0 +348 7 model.embedding_dim 0.0 +348 7 model.c_min 0.0114620386956542 +348 7 model.c_max 3.7477801111207265 +348 7 loss.margin 1.8182673819532384 +348 7 optimizer.lr 0.006445435452854688 +348 7 negative_sampler.num_negs_per_pos 82.0 +348 7 training.batch_size 0.0 +348 8 model.embedding_dim 0.0 +348 8 model.c_min 0.01020502308002458 +348 8 model.c_max 8.4445108587267 +348 8 loss.margin 8.270352855100187 +348 8 optimizer.lr 0.010271884990046297 +348 8 negative_sampler.num_negs_per_pos 9.0 +348 8 training.batch_size 0.0 +348 9 model.embedding_dim 1.0 +348 9 model.c_min 0.0220049032490353 +348 9 model.c_max 5.569459058559205 +348 9 loss.margin 9.216594452275718 +348 9 optimizer.lr 0.013717807436507221 +348 9 negative_sampler.num_negs_per_pos 24.0 +348 9 training.batch_size 0.0 +348 10 model.embedding_dim 2.0 +348 10 model.c_min 0.015048719033267998 +348 10 model.c_max 6.745068618156776 +348 10 loss.margin 3.9014057220904537 +348 10 optimizer.lr 0.009020139817578163 +348 10 negative_sampler.num_negs_per_pos 51.0 +348 10 training.batch_size 1.0 +348 11 model.embedding_dim 1.0 +348 11 model.c_min 0.07449450351796229 +348 11 model.c_max 4.06593404512086 +348 11 loss.margin 1.254092300462952 +348 11 optimizer.lr 0.009339285773840227 +348 11 negative_sampler.num_negs_per_pos 0.0 +348 11 training.batch_size 2.0 +348 12 model.embedding_dim 2.0 +348 12 model.c_min 0.03813779951216647 +348 12 model.c_max 8.40115121137293 +348 12 loss.margin 9.462035830039945 +348 12 optimizer.lr 0.05787574843196118 +348 12 negative_sampler.num_negs_per_pos 97.0 +348 12 training.batch_size 2.0 +348 13 model.embedding_dim 2.0 +348 13 model.c_min 0.013276648826400386 +348 13 model.c_max 6.963380063010626 +348 13 loss.margin 7.127810027013052 +348 13 optimizer.lr 0.05444172922179236 +348 13 negative_sampler.num_negs_per_pos 70.0 +348 13 training.batch_size 2.0 +348 14 model.embedding_dim 1.0 +348 14 model.c_min 0.011456318244103187 +348 14 model.c_max 9.740582068769827 +348 14 loss.margin 7.7330324770467564 +348 14 optimizer.lr 0.010658319870434944 +348 14 negative_sampler.num_negs_per_pos 51.0 +348 14 training.batch_size 1.0 +348 15 model.embedding_dim 2.0 +348 15 model.c_min 0.012697039916544823 +348 15 model.c_max 5.386945253272276 +348 15 loss.margin 6.9242247817285225 +348 15 optimizer.lr 0.001039296473153652 +348 15 negative_sampler.num_negs_per_pos 82.0 +348 15 training.batch_size 2.0 +348 16 model.embedding_dim 0.0 +348 16 model.c_min 0.023386563079175193 +348 16 model.c_max 8.606869721815773 +348 16 loss.margin 5.805178916159285 +348 16 optimizer.lr 0.006908902842781274 +348 16 negative_sampler.num_negs_per_pos 12.0 +348 16 training.batch_size 0.0 +348 17 model.embedding_dim 1.0 +348 17 model.c_min 0.027748067277999673 +348 17 model.c_max 3.329760608872091 +348 17 loss.margin 7.3544385616505545 +348 17 optimizer.lr 0.0022871234299890976 +348 17 negative_sampler.num_negs_per_pos 37.0 +348 17 training.batch_size 1.0 +348 18 model.embedding_dim 1.0 +348 18 model.c_min 0.012988951577384191 +348 18 model.c_max 7.937701653384503 +348 18 loss.margin 6.683893476460998 +348 18 optimizer.lr 0.0028580626284561072 +348 18 negative_sampler.num_negs_per_pos 66.0 +348 18 training.batch_size 0.0 +348 19 model.embedding_dim 1.0 +348 19 model.c_min 0.035202603432958786 +348 19 model.c_max 2.688665271346492 +348 19 loss.margin 2.3750367879588676 +348 19 optimizer.lr 0.04514140504560013 +348 19 negative_sampler.num_negs_per_pos 49.0 +348 19 training.batch_size 0.0 +348 20 model.embedding_dim 0.0 +348 20 model.c_min 0.06145788351768657 +348 20 model.c_max 5.294724730285012 +348 20 loss.margin 3.9811222331063227 +348 20 optimizer.lr 0.0032053261837962396 +348 20 negative_sampler.num_negs_per_pos 27.0 +348 20 training.batch_size 2.0 +348 21 model.embedding_dim 1.0 +348 21 model.c_min 0.06491032452202379 +348 21 model.c_max 5.455947080591863 +348 21 loss.margin 3.3060183762800293 +348 21 optimizer.lr 0.02070411090293528 +348 21 negative_sampler.num_negs_per_pos 29.0 +348 21 training.batch_size 0.0 +348 22 model.embedding_dim 1.0 +348 22 model.c_min 0.05203536995662125 +348 22 model.c_max 7.516185890864454 +348 22 loss.margin 4.843838517603452 +348 22 optimizer.lr 0.018865701012219075 +348 22 negative_sampler.num_negs_per_pos 23.0 +348 22 training.batch_size 2.0 +348 23 model.embedding_dim 0.0 +348 23 model.c_min 0.02898418281466011 +348 23 model.c_max 8.351397151967904 +348 23 loss.margin 9.667675486782727 +348 23 optimizer.lr 0.07828006664200228 +348 23 negative_sampler.num_negs_per_pos 10.0 +348 23 training.batch_size 1.0 +348 24 model.embedding_dim 2.0 +348 24 model.c_min 0.023385961914479336 +348 24 model.c_max 4.189609509913776 +348 24 loss.margin 9.538422710913364 +348 24 optimizer.lr 0.007708801261989254 +348 24 negative_sampler.num_negs_per_pos 43.0 +348 24 training.batch_size 0.0 +348 25 model.embedding_dim 2.0 +348 25 model.c_min 0.020740025135570304 +348 25 model.c_max 5.9880424189176775 +348 25 loss.margin 0.9843046950697537 +348 25 optimizer.lr 0.0021239679304216547 +348 25 negative_sampler.num_negs_per_pos 11.0 +348 25 training.batch_size 2.0 +348 26 model.embedding_dim 1.0 +348 26 model.c_min 0.023076319491993676 +348 26 model.c_max 7.595468116701276 +348 26 loss.margin 3.4588476730511712 +348 26 optimizer.lr 0.04970290989804747 +348 26 negative_sampler.num_negs_per_pos 58.0 +348 26 training.batch_size 0.0 +348 27 model.embedding_dim 1.0 +348 27 model.c_min 0.08717596866615072 +348 27 model.c_max 7.8104997465027095 +348 27 loss.margin 3.8515069697457944 +348 27 optimizer.lr 0.027172009651191577 +348 27 negative_sampler.num_negs_per_pos 40.0 +348 27 training.batch_size 2.0 +348 28 model.embedding_dim 1.0 +348 28 model.c_min 0.025754944988739436 +348 28 model.c_max 5.880787449090279 +348 28 loss.margin 5.952364843346829 +348 28 optimizer.lr 0.005434614969431698 +348 28 negative_sampler.num_negs_per_pos 31.0 +348 28 training.batch_size 2.0 +348 29 model.embedding_dim 2.0 +348 29 model.c_min 0.013078279329547795 +348 29 model.c_max 2.3726635309721353 +348 29 loss.margin 8.238281137321923 +348 29 optimizer.lr 0.002795615742935917 +348 29 negative_sampler.num_negs_per_pos 26.0 +348 29 training.batch_size 1.0 +348 30 model.embedding_dim 0.0 +348 30 model.c_min 0.017108903704089115 +348 30 model.c_max 4.501366755384702 +348 30 loss.margin 8.41245485284115 +348 30 optimizer.lr 0.0021501921804593115 +348 30 negative_sampler.num_negs_per_pos 34.0 +348 30 training.batch_size 1.0 +348 31 model.embedding_dim 2.0 +348 31 model.c_min 0.019664938666759106 +348 31 model.c_max 8.148897017437852 +348 31 loss.margin 6.146448734967414 +348 31 optimizer.lr 0.01876867942241976 +348 31 negative_sampler.num_negs_per_pos 49.0 +348 31 training.batch_size 2.0 +348 32 model.embedding_dim 1.0 +348 32 model.c_min 0.03485533142555043 +348 32 model.c_max 6.354973411959413 +348 32 loss.margin 6.31665091883684 +348 32 optimizer.lr 0.011513040265943436 +348 32 negative_sampler.num_negs_per_pos 83.0 +348 32 training.batch_size 1.0 +348 1 dataset """fb15k237""" +348 1 model """kg2e""" +348 1 loss """marginranking""" +348 1 regularizer """no""" +348 1 optimizer """adam""" +348 1 training_loop """owa""" +348 1 negative_sampler """basic""" +348 1 evaluator """rankbased""" +348 2 dataset """fb15k237""" +348 2 model """kg2e""" +348 2 loss """marginranking""" +348 2 regularizer """no""" +348 2 optimizer """adam""" +348 2 training_loop """owa""" +348 2 negative_sampler """basic""" +348 2 evaluator """rankbased""" +348 3 dataset """fb15k237""" +348 3 model """kg2e""" +348 3 loss """marginranking""" +348 3 regularizer """no""" +348 3 optimizer """adam""" +348 3 training_loop """owa""" +348 3 negative_sampler """basic""" +348 3 evaluator """rankbased""" +348 4 dataset """fb15k237""" +348 4 model """kg2e""" +348 4 loss """marginranking""" +348 4 regularizer """no""" +348 4 optimizer """adam""" +348 4 training_loop """owa""" +348 4 negative_sampler """basic""" +348 4 evaluator """rankbased""" +348 5 dataset """fb15k237""" +348 5 model """kg2e""" +348 5 loss """marginranking""" +348 5 regularizer """no""" +348 5 optimizer """adam""" +348 5 training_loop """owa""" +348 5 negative_sampler """basic""" +348 5 evaluator """rankbased""" +348 6 dataset """fb15k237""" +348 6 model """kg2e""" +348 6 loss """marginranking""" +348 6 regularizer """no""" +348 6 optimizer """adam""" +348 6 training_loop """owa""" +348 6 negative_sampler """basic""" +348 6 evaluator """rankbased""" +348 7 dataset """fb15k237""" +348 7 model """kg2e""" +348 7 loss """marginranking""" +348 7 regularizer """no""" +348 7 optimizer """adam""" +348 7 training_loop """owa""" +348 7 negative_sampler """basic""" +348 7 evaluator """rankbased""" +348 8 dataset """fb15k237""" +348 8 model """kg2e""" +348 8 loss """marginranking""" +348 8 regularizer """no""" +348 8 optimizer """adam""" +348 8 training_loop """owa""" +348 8 negative_sampler """basic""" +348 8 evaluator """rankbased""" +348 9 dataset """fb15k237""" +348 9 model """kg2e""" +348 9 loss """marginranking""" +348 9 regularizer """no""" +348 9 optimizer """adam""" +348 9 training_loop """owa""" +348 9 negative_sampler """basic""" +348 9 evaluator """rankbased""" +348 10 dataset """fb15k237""" +348 10 model """kg2e""" +348 10 loss """marginranking""" +348 10 regularizer """no""" +348 10 optimizer """adam""" +348 10 training_loop """owa""" +348 10 negative_sampler """basic""" +348 10 evaluator """rankbased""" +348 11 dataset """fb15k237""" +348 11 model """kg2e""" +348 11 loss """marginranking""" +348 11 regularizer """no""" +348 11 optimizer """adam""" +348 11 training_loop """owa""" +348 11 negative_sampler """basic""" +348 11 evaluator """rankbased""" +348 12 dataset """fb15k237""" +348 12 model """kg2e""" +348 12 loss """marginranking""" +348 12 regularizer """no""" +348 12 optimizer """adam""" +348 12 training_loop """owa""" +348 12 negative_sampler """basic""" +348 12 evaluator """rankbased""" +348 13 dataset """fb15k237""" +348 13 model """kg2e""" +348 13 loss """marginranking""" +348 13 regularizer """no""" +348 13 optimizer """adam""" +348 13 training_loop """owa""" +348 13 negative_sampler """basic""" +348 13 evaluator """rankbased""" +348 14 dataset """fb15k237""" +348 14 model """kg2e""" +348 14 loss """marginranking""" +348 14 regularizer """no""" +348 14 optimizer """adam""" +348 14 training_loop """owa""" +348 14 negative_sampler """basic""" +348 14 evaluator """rankbased""" +348 15 dataset """fb15k237""" +348 15 model """kg2e""" +348 15 loss """marginranking""" +348 15 regularizer """no""" +348 15 optimizer """adam""" +348 15 training_loop """owa""" +348 15 negative_sampler """basic""" +348 15 evaluator """rankbased""" +348 16 dataset """fb15k237""" +348 16 model """kg2e""" +348 16 loss """marginranking""" +348 16 regularizer """no""" +348 16 optimizer """adam""" +348 16 training_loop """owa""" +348 16 negative_sampler """basic""" +348 16 evaluator """rankbased""" +348 17 dataset """fb15k237""" +348 17 model """kg2e""" +348 17 loss """marginranking""" +348 17 regularizer """no""" +348 17 optimizer """adam""" +348 17 training_loop """owa""" +348 17 negative_sampler """basic""" +348 17 evaluator """rankbased""" +348 18 dataset """fb15k237""" +348 18 model """kg2e""" +348 18 loss """marginranking""" +348 18 regularizer """no""" +348 18 optimizer """adam""" +348 18 training_loop """owa""" +348 18 negative_sampler """basic""" +348 18 evaluator """rankbased""" +348 19 dataset """fb15k237""" +348 19 model """kg2e""" +348 19 loss """marginranking""" +348 19 regularizer """no""" +348 19 optimizer """adam""" +348 19 training_loop """owa""" +348 19 negative_sampler """basic""" +348 19 evaluator """rankbased""" +348 20 dataset """fb15k237""" +348 20 model """kg2e""" +348 20 loss """marginranking""" +348 20 regularizer """no""" +348 20 optimizer """adam""" +348 20 training_loop """owa""" +348 20 negative_sampler """basic""" +348 20 evaluator """rankbased""" +348 21 dataset """fb15k237""" +348 21 model """kg2e""" +348 21 loss """marginranking""" +348 21 regularizer """no""" +348 21 optimizer """adam""" +348 21 training_loop """owa""" +348 21 negative_sampler """basic""" +348 21 evaluator """rankbased""" +348 22 dataset """fb15k237""" +348 22 model """kg2e""" +348 22 loss """marginranking""" +348 22 regularizer """no""" +348 22 optimizer """adam""" +348 22 training_loop """owa""" +348 22 negative_sampler """basic""" +348 22 evaluator """rankbased""" +348 23 dataset """fb15k237""" +348 23 model """kg2e""" +348 23 loss """marginranking""" +348 23 regularizer """no""" +348 23 optimizer """adam""" +348 23 training_loop """owa""" +348 23 negative_sampler """basic""" +348 23 evaluator """rankbased""" +348 24 dataset """fb15k237""" +348 24 model """kg2e""" +348 24 loss """marginranking""" +348 24 regularizer """no""" +348 24 optimizer """adam""" +348 24 training_loop """owa""" +348 24 negative_sampler """basic""" +348 24 evaluator """rankbased""" +348 25 dataset """fb15k237""" +348 25 model """kg2e""" +348 25 loss """marginranking""" +348 25 regularizer """no""" +348 25 optimizer """adam""" +348 25 training_loop """owa""" +348 25 negative_sampler """basic""" +348 25 evaluator """rankbased""" +348 26 dataset """fb15k237""" +348 26 model """kg2e""" +348 26 loss """marginranking""" +348 26 regularizer """no""" +348 26 optimizer """adam""" +348 26 training_loop """owa""" +348 26 negative_sampler """basic""" +348 26 evaluator """rankbased""" +348 27 dataset """fb15k237""" +348 27 model """kg2e""" +348 27 loss """marginranking""" +348 27 regularizer """no""" +348 27 optimizer """adam""" +348 27 training_loop """owa""" +348 27 negative_sampler """basic""" +348 27 evaluator """rankbased""" +348 28 dataset """fb15k237""" +348 28 model """kg2e""" +348 28 loss """marginranking""" +348 28 regularizer """no""" +348 28 optimizer """adam""" +348 28 training_loop """owa""" +348 28 negative_sampler """basic""" +348 28 evaluator """rankbased""" +348 29 dataset """fb15k237""" +348 29 model """kg2e""" +348 29 loss """marginranking""" +348 29 regularizer """no""" +348 29 optimizer """adam""" +348 29 training_loop """owa""" +348 29 negative_sampler """basic""" +348 29 evaluator """rankbased""" +348 30 dataset """fb15k237""" +348 30 model """kg2e""" +348 30 loss """marginranking""" +348 30 regularizer """no""" +348 30 optimizer """adam""" +348 30 training_loop """owa""" +348 30 negative_sampler """basic""" +348 30 evaluator """rankbased""" +348 31 dataset """fb15k237""" +348 31 model """kg2e""" +348 31 loss """marginranking""" +348 31 regularizer """no""" +348 31 optimizer """adam""" +348 31 training_loop """owa""" +348 31 negative_sampler """basic""" +348 31 evaluator """rankbased""" +348 32 dataset """fb15k237""" +348 32 model """kg2e""" +348 32 loss """marginranking""" +348 32 regularizer """no""" +348 32 optimizer """adam""" +348 32 training_loop """owa""" +348 32 negative_sampler """basic""" +348 32 evaluator """rankbased""" +349 1 model.embedding_dim 0.0 +349 1 model.c_min 0.010420621196388676 +349 1 model.c_max 2.4873239256838304 +349 1 optimizer.lr 0.03454623798617101 +349 1 negative_sampler.num_negs_per_pos 83.0 +349 1 training.batch_size 2.0 +349 2 model.embedding_dim 0.0 +349 2 model.c_min 0.09182189547277572 +349 2 model.c_max 3.0061272047117225 +349 2 optimizer.lr 0.08407132621552123 +349 2 negative_sampler.num_negs_per_pos 24.0 +349 2 training.batch_size 0.0 +349 3 model.embedding_dim 0.0 +349 3 model.c_min 0.058585059531753775 +349 3 model.c_max 4.330883220116079 +349 3 optimizer.lr 0.003233672295246268 +349 3 negative_sampler.num_negs_per_pos 4.0 +349 3 training.batch_size 1.0 +349 4 model.embedding_dim 0.0 +349 4 model.c_min 0.01744673904254706 +349 4 model.c_max 1.2304713454216012 +349 4 optimizer.lr 0.02617679705162595 +349 4 negative_sampler.num_negs_per_pos 40.0 +349 4 training.batch_size 2.0 +349 5 model.embedding_dim 0.0 +349 5 model.c_min 0.013486221581995056 +349 5 model.c_max 4.929228538827969 +349 5 optimizer.lr 0.00778473507209013 +349 5 negative_sampler.num_negs_per_pos 58.0 +349 5 training.batch_size 2.0 +349 6 model.embedding_dim 0.0 +349 6 model.c_min 0.05687622580412062 +349 6 model.c_max 2.28720268498528 +349 6 optimizer.lr 0.009641827013362726 +349 6 negative_sampler.num_negs_per_pos 15.0 +349 6 training.batch_size 0.0 +349 7 model.embedding_dim 2.0 +349 7 model.c_min 0.033227741074013835 +349 7 model.c_max 3.428047850089202 +349 7 optimizer.lr 0.03235765624116021 +349 7 negative_sampler.num_negs_per_pos 76.0 +349 7 training.batch_size 2.0 +349 8 model.embedding_dim 2.0 +349 8 model.c_min 0.03371052056903262 +349 8 model.c_max 3.7441003337979506 +349 8 optimizer.lr 0.0021818191204213164 +349 8 negative_sampler.num_negs_per_pos 75.0 +349 8 training.batch_size 1.0 +349 9 model.embedding_dim 0.0 +349 9 model.c_min 0.010301969053328816 +349 9 model.c_max 5.95006778480129 +349 9 optimizer.lr 0.0012724840179107186 +349 9 negative_sampler.num_negs_per_pos 91.0 +349 9 training.batch_size 0.0 +349 10 model.embedding_dim 1.0 +349 10 model.c_min 0.02056355608595416 +349 10 model.c_max 5.391548323033313 +349 10 optimizer.lr 0.0030597765891482295 +349 10 negative_sampler.num_negs_per_pos 58.0 +349 10 training.batch_size 2.0 +349 11 model.embedding_dim 1.0 +349 11 model.c_min 0.019511094456502894 +349 11 model.c_max 3.3859364761383137 +349 11 optimizer.lr 0.05715285437677418 +349 11 negative_sampler.num_negs_per_pos 82.0 +349 11 training.batch_size 0.0 +349 12 model.embedding_dim 0.0 +349 12 model.c_min 0.01713897015901696 +349 12 model.c_max 2.22674653543032 +349 12 optimizer.lr 0.05715956845023894 +349 12 negative_sampler.num_negs_per_pos 28.0 +349 12 training.batch_size 0.0 +349 13 model.embedding_dim 2.0 +349 13 model.c_min 0.016173095785026688 +349 13 model.c_max 7.130041201876607 +349 13 optimizer.lr 0.019264418219738948 +349 13 negative_sampler.num_negs_per_pos 81.0 +349 13 training.batch_size 2.0 +349 14 model.embedding_dim 1.0 +349 14 model.c_min 0.01731880105444916 +349 14 model.c_max 5.425236906517848 +349 14 optimizer.lr 0.003330851422354031 +349 14 negative_sampler.num_negs_per_pos 90.0 +349 14 training.batch_size 0.0 +349 15 model.embedding_dim 0.0 +349 15 model.c_min 0.07761317220747377 +349 15 model.c_max 8.40715176762407 +349 15 optimizer.lr 0.00130050583536153 +349 15 negative_sampler.num_negs_per_pos 79.0 +349 15 training.batch_size 1.0 +349 16 model.embedding_dim 1.0 +349 16 model.c_min 0.09125336212709552 +349 16 model.c_max 6.136950778974636 +349 16 optimizer.lr 0.05661304528660218 +349 16 negative_sampler.num_negs_per_pos 89.0 +349 16 training.batch_size 2.0 +349 17 model.embedding_dim 1.0 +349 17 model.c_min 0.018195368300737935 +349 17 model.c_max 1.317603063187482 +349 17 optimizer.lr 0.0018135536474227884 +349 17 negative_sampler.num_negs_per_pos 96.0 +349 17 training.batch_size 1.0 +349 18 model.embedding_dim 2.0 +349 18 model.c_min 0.037137999302404315 +349 18 model.c_max 7.786195764230848 +349 18 optimizer.lr 0.0019229688313105817 +349 18 negative_sampler.num_negs_per_pos 63.0 +349 18 training.batch_size 0.0 +349 19 model.embedding_dim 0.0 +349 19 model.c_min 0.038548695592434894 +349 19 model.c_max 4.601797480375357 +349 19 optimizer.lr 0.0240235801657506 +349 19 negative_sampler.num_negs_per_pos 95.0 +349 19 training.batch_size 2.0 +349 20 model.embedding_dim 2.0 +349 20 model.c_min 0.053799589139943205 +349 20 model.c_max 9.075868323970981 +349 20 optimizer.lr 0.09875333430499704 +349 20 negative_sampler.num_negs_per_pos 59.0 +349 20 training.batch_size 2.0 +349 21 model.embedding_dim 2.0 +349 21 model.c_min 0.08787166156232937 +349 21 model.c_max 3.433350293758073 +349 21 optimizer.lr 0.001276759496584044 +349 21 negative_sampler.num_negs_per_pos 38.0 +349 21 training.batch_size 0.0 +349 22 model.embedding_dim 0.0 +349 22 model.c_min 0.09981856302266465 +349 22 model.c_max 4.490647688846007 +349 22 optimizer.lr 0.040505261427848195 +349 22 negative_sampler.num_negs_per_pos 18.0 +349 22 training.batch_size 2.0 +349 23 model.embedding_dim 0.0 +349 23 model.c_min 0.023785794166235 +349 23 model.c_max 1.7404816025572907 +349 23 optimizer.lr 0.06716081780227284 +349 23 negative_sampler.num_negs_per_pos 95.0 +349 23 training.batch_size 1.0 +349 24 model.embedding_dim 1.0 +349 24 model.c_min 0.04253199752952558 +349 24 model.c_max 6.111277293193632 +349 24 optimizer.lr 0.01252591271516615 +349 24 negative_sampler.num_negs_per_pos 85.0 +349 24 training.batch_size 0.0 +349 25 model.embedding_dim 2.0 +349 25 model.c_min 0.05085429563919236 +349 25 model.c_max 2.9199632041187416 +349 25 optimizer.lr 0.0010479257421835565 +349 25 negative_sampler.num_negs_per_pos 31.0 +349 25 training.batch_size 1.0 +349 26 model.embedding_dim 1.0 +349 26 model.c_min 0.08689669126704892 +349 26 model.c_max 9.797332063138473 +349 26 optimizer.lr 0.0012260952081617777 +349 26 negative_sampler.num_negs_per_pos 59.0 +349 26 training.batch_size 1.0 +349 27 model.embedding_dim 1.0 +349 27 model.c_min 0.0743329124469807 +349 27 model.c_max 3.318660085505364 +349 27 optimizer.lr 0.022652644542772576 +349 27 negative_sampler.num_negs_per_pos 80.0 +349 27 training.batch_size 0.0 +349 28 model.embedding_dim 0.0 +349 28 model.c_min 0.0415016921998923 +349 28 model.c_max 4.976107760306986 +349 28 optimizer.lr 0.08215326263842612 +349 28 negative_sampler.num_negs_per_pos 43.0 +349 28 training.batch_size 2.0 +349 29 model.embedding_dim 2.0 +349 29 model.c_min 0.034211059968539 +349 29 model.c_max 6.079540033204845 +349 29 optimizer.lr 0.0032001948409818176 +349 29 negative_sampler.num_negs_per_pos 50.0 +349 29 training.batch_size 2.0 +349 30 model.embedding_dim 0.0 +349 30 model.c_min 0.09162205058322036 +349 30 model.c_max 7.619141697573715 +349 30 optimizer.lr 0.007086540000961738 +349 30 negative_sampler.num_negs_per_pos 9.0 +349 30 training.batch_size 1.0 +349 31 model.embedding_dim 0.0 +349 31 model.c_min 0.03801459777662329 +349 31 model.c_max 5.95796411950987 +349 31 optimizer.lr 0.015229318686896238 +349 31 negative_sampler.num_negs_per_pos 18.0 +349 31 training.batch_size 2.0 +349 32 model.embedding_dim 1.0 +349 32 model.c_min 0.0796712412768976 +349 32 model.c_max 4.99648444002142 +349 32 optimizer.lr 0.028084427965946946 +349 32 negative_sampler.num_negs_per_pos 34.0 +349 32 training.batch_size 2.0 +349 1 dataset """fb15k237""" +349 1 model """kg2e""" +349 1 loss """bceaftersigmoid""" +349 1 regularizer """no""" +349 1 optimizer """adam""" +349 1 training_loop """owa""" +349 1 negative_sampler """basic""" +349 1 evaluator """rankbased""" +349 2 dataset """fb15k237""" +349 2 model """kg2e""" +349 2 loss """bceaftersigmoid""" +349 2 regularizer """no""" +349 2 optimizer """adam""" +349 2 training_loop """owa""" +349 2 negative_sampler """basic""" +349 2 evaluator """rankbased""" +349 3 dataset """fb15k237""" +349 3 model """kg2e""" +349 3 loss """bceaftersigmoid""" +349 3 regularizer """no""" +349 3 optimizer """adam""" +349 3 training_loop """owa""" +349 3 negative_sampler """basic""" +349 3 evaluator """rankbased""" +349 4 dataset """fb15k237""" +349 4 model """kg2e""" +349 4 loss """bceaftersigmoid""" +349 4 regularizer """no""" +349 4 optimizer """adam""" +349 4 training_loop """owa""" +349 4 negative_sampler """basic""" +349 4 evaluator """rankbased""" +349 5 dataset """fb15k237""" +349 5 model """kg2e""" +349 5 loss """bceaftersigmoid""" +349 5 regularizer """no""" +349 5 optimizer """adam""" +349 5 training_loop """owa""" +349 5 negative_sampler """basic""" +349 5 evaluator """rankbased""" +349 6 dataset """fb15k237""" +349 6 model """kg2e""" +349 6 loss """bceaftersigmoid""" +349 6 regularizer """no""" +349 6 optimizer """adam""" +349 6 training_loop """owa""" +349 6 negative_sampler """basic""" +349 6 evaluator """rankbased""" +349 7 dataset """fb15k237""" +349 7 model """kg2e""" +349 7 loss """bceaftersigmoid""" +349 7 regularizer """no""" +349 7 optimizer """adam""" +349 7 training_loop """owa""" +349 7 negative_sampler """basic""" +349 7 evaluator """rankbased""" +349 8 dataset """fb15k237""" +349 8 model """kg2e""" +349 8 loss """bceaftersigmoid""" +349 8 regularizer """no""" +349 8 optimizer """adam""" +349 8 training_loop """owa""" +349 8 negative_sampler """basic""" +349 8 evaluator """rankbased""" +349 9 dataset """fb15k237""" +349 9 model """kg2e""" +349 9 loss """bceaftersigmoid""" +349 9 regularizer """no""" +349 9 optimizer """adam""" +349 9 training_loop """owa""" +349 9 negative_sampler """basic""" +349 9 evaluator """rankbased""" +349 10 dataset """fb15k237""" +349 10 model """kg2e""" +349 10 loss """bceaftersigmoid""" +349 10 regularizer """no""" +349 10 optimizer """adam""" +349 10 training_loop """owa""" +349 10 negative_sampler """basic""" +349 10 evaluator """rankbased""" +349 11 dataset """fb15k237""" +349 11 model """kg2e""" +349 11 loss """bceaftersigmoid""" +349 11 regularizer """no""" +349 11 optimizer """adam""" +349 11 training_loop """owa""" +349 11 negative_sampler """basic""" +349 11 evaluator """rankbased""" +349 12 dataset """fb15k237""" +349 12 model """kg2e""" +349 12 loss """bceaftersigmoid""" +349 12 regularizer """no""" +349 12 optimizer """adam""" +349 12 training_loop """owa""" +349 12 negative_sampler """basic""" +349 12 evaluator """rankbased""" +349 13 dataset """fb15k237""" +349 13 model """kg2e""" +349 13 loss """bceaftersigmoid""" +349 13 regularizer """no""" +349 13 optimizer """adam""" +349 13 training_loop """owa""" +349 13 negative_sampler """basic""" +349 13 evaluator """rankbased""" +349 14 dataset """fb15k237""" +349 14 model """kg2e""" +349 14 loss """bceaftersigmoid""" +349 14 regularizer """no""" +349 14 optimizer """adam""" +349 14 training_loop """owa""" +349 14 negative_sampler """basic""" +349 14 evaluator """rankbased""" +349 15 dataset """fb15k237""" +349 15 model """kg2e""" +349 15 loss """bceaftersigmoid""" +349 15 regularizer """no""" +349 15 optimizer """adam""" +349 15 training_loop """owa""" +349 15 negative_sampler """basic""" +349 15 evaluator """rankbased""" +349 16 dataset """fb15k237""" +349 16 model """kg2e""" +349 16 loss """bceaftersigmoid""" +349 16 regularizer """no""" +349 16 optimizer """adam""" +349 16 training_loop """owa""" +349 16 negative_sampler """basic""" +349 16 evaluator """rankbased""" +349 17 dataset """fb15k237""" +349 17 model """kg2e""" +349 17 loss """bceaftersigmoid""" +349 17 regularizer """no""" +349 17 optimizer """adam""" +349 17 training_loop """owa""" +349 17 negative_sampler """basic""" +349 17 evaluator """rankbased""" +349 18 dataset """fb15k237""" +349 18 model """kg2e""" +349 18 loss """bceaftersigmoid""" +349 18 regularizer """no""" +349 18 optimizer """adam""" +349 18 training_loop """owa""" +349 18 negative_sampler """basic""" +349 18 evaluator """rankbased""" +349 19 dataset """fb15k237""" +349 19 model """kg2e""" +349 19 loss """bceaftersigmoid""" +349 19 regularizer """no""" +349 19 optimizer """adam""" +349 19 training_loop """owa""" +349 19 negative_sampler """basic""" +349 19 evaluator """rankbased""" +349 20 dataset """fb15k237""" +349 20 model """kg2e""" +349 20 loss """bceaftersigmoid""" +349 20 regularizer """no""" +349 20 optimizer """adam""" +349 20 training_loop """owa""" +349 20 negative_sampler """basic""" +349 20 evaluator """rankbased""" +349 21 dataset """fb15k237""" +349 21 model """kg2e""" +349 21 loss """bceaftersigmoid""" +349 21 regularizer """no""" +349 21 optimizer """adam""" +349 21 training_loop """owa""" +349 21 negative_sampler """basic""" +349 21 evaluator """rankbased""" +349 22 dataset """fb15k237""" +349 22 model """kg2e""" +349 22 loss """bceaftersigmoid""" +349 22 regularizer """no""" +349 22 optimizer """adam""" +349 22 training_loop """owa""" +349 22 negative_sampler """basic""" +349 22 evaluator """rankbased""" +349 23 dataset """fb15k237""" +349 23 model """kg2e""" +349 23 loss """bceaftersigmoid""" +349 23 regularizer """no""" +349 23 optimizer """adam""" +349 23 training_loop """owa""" +349 23 negative_sampler """basic""" +349 23 evaluator """rankbased""" +349 24 dataset """fb15k237""" +349 24 model """kg2e""" +349 24 loss """bceaftersigmoid""" +349 24 regularizer """no""" +349 24 optimizer """adam""" +349 24 training_loop """owa""" +349 24 negative_sampler """basic""" +349 24 evaluator """rankbased""" +349 25 dataset """fb15k237""" +349 25 model """kg2e""" +349 25 loss """bceaftersigmoid""" +349 25 regularizer """no""" +349 25 optimizer """adam""" +349 25 training_loop """owa""" +349 25 negative_sampler """basic""" +349 25 evaluator """rankbased""" +349 26 dataset """fb15k237""" +349 26 model """kg2e""" +349 26 loss """bceaftersigmoid""" +349 26 regularizer """no""" +349 26 optimizer """adam""" +349 26 training_loop """owa""" +349 26 negative_sampler """basic""" +349 26 evaluator """rankbased""" +349 27 dataset """fb15k237""" +349 27 model """kg2e""" +349 27 loss """bceaftersigmoid""" +349 27 regularizer """no""" +349 27 optimizer """adam""" +349 27 training_loop """owa""" +349 27 negative_sampler """basic""" +349 27 evaluator """rankbased""" +349 28 dataset """fb15k237""" +349 28 model """kg2e""" +349 28 loss """bceaftersigmoid""" +349 28 regularizer """no""" +349 28 optimizer """adam""" +349 28 training_loop """owa""" +349 28 negative_sampler """basic""" +349 28 evaluator """rankbased""" +349 29 dataset """fb15k237""" +349 29 model """kg2e""" +349 29 loss """bceaftersigmoid""" +349 29 regularizer """no""" +349 29 optimizer """adam""" +349 29 training_loop """owa""" +349 29 negative_sampler """basic""" +349 29 evaluator """rankbased""" +349 30 dataset """fb15k237""" +349 30 model """kg2e""" +349 30 loss """bceaftersigmoid""" +349 30 regularizer """no""" +349 30 optimizer """adam""" +349 30 training_loop """owa""" +349 30 negative_sampler """basic""" +349 30 evaluator """rankbased""" +349 31 dataset """fb15k237""" +349 31 model """kg2e""" +349 31 loss """bceaftersigmoid""" +349 31 regularizer """no""" +349 31 optimizer """adam""" +349 31 training_loop """owa""" +349 31 negative_sampler """basic""" +349 31 evaluator """rankbased""" +349 32 dataset """fb15k237""" +349 32 model """kg2e""" +349 32 loss """bceaftersigmoid""" +349 32 regularizer """no""" +349 32 optimizer """adam""" +349 32 training_loop """owa""" +349 32 negative_sampler """basic""" +349 32 evaluator """rankbased""" +350 1 model.embedding_dim 0.0 +350 1 model.c_min 0.01100257632527315 +350 1 model.c_max 8.86220307867033 +350 1 optimizer.lr 0.005125545710297675 +350 1 negative_sampler.num_negs_per_pos 49.0 +350 1 training.batch_size 2.0 +350 2 model.embedding_dim 1.0 +350 2 model.c_min 0.02186391338589885 +350 2 model.c_max 9.834938463013929 +350 2 optimizer.lr 0.04953604900081334 +350 2 negative_sampler.num_negs_per_pos 72.0 +350 2 training.batch_size 0.0 +350 3 model.embedding_dim 0.0 +350 3 model.c_min 0.01556767598987384 +350 3 model.c_max 4.185229681273604 +350 3 optimizer.lr 0.006153401755905561 +350 3 negative_sampler.num_negs_per_pos 66.0 +350 3 training.batch_size 1.0 +350 4 model.embedding_dim 0.0 +350 4 model.c_min 0.013152417129587748 +350 4 model.c_max 9.878026857025294 +350 4 optimizer.lr 0.0010445952668255555 +350 4 negative_sampler.num_negs_per_pos 10.0 +350 4 training.batch_size 2.0 +350 5 model.embedding_dim 1.0 +350 5 model.c_min 0.04549029938648689 +350 5 model.c_max 2.572328030308313 +350 5 optimizer.lr 0.020381731989493786 +350 5 negative_sampler.num_negs_per_pos 38.0 +350 5 training.batch_size 1.0 +350 6 model.embedding_dim 1.0 +350 6 model.c_min 0.010963979203239545 +350 6 model.c_max 6.769460378057102 +350 6 optimizer.lr 0.04144296364893264 +350 6 negative_sampler.num_negs_per_pos 45.0 +350 6 training.batch_size 2.0 +350 7 model.embedding_dim 0.0 +350 7 model.c_min 0.03489675203971347 +350 7 model.c_max 4.654665613072741 +350 7 optimizer.lr 0.09641109742635526 +350 7 negative_sampler.num_negs_per_pos 81.0 +350 7 training.batch_size 0.0 +350 8 model.embedding_dim 2.0 +350 8 model.c_min 0.02757723538262592 +350 8 model.c_max 8.422798468020696 +350 8 optimizer.lr 0.03571316475082679 +350 8 negative_sampler.num_negs_per_pos 47.0 +350 8 training.batch_size 2.0 +350 9 model.embedding_dim 1.0 +350 9 model.c_min 0.07146190857911373 +350 9 model.c_max 5.485196368221447 +350 9 optimizer.lr 0.003434099907535766 +350 9 negative_sampler.num_negs_per_pos 16.0 +350 9 training.batch_size 2.0 +350 10 model.embedding_dim 2.0 +350 10 model.c_min 0.01497546059368483 +350 10 model.c_max 9.343022412837584 +350 10 optimizer.lr 0.004136832158793098 +350 10 negative_sampler.num_negs_per_pos 59.0 +350 10 training.batch_size 2.0 +350 11 model.embedding_dim 0.0 +350 11 model.c_min 0.02475441190206167 +350 11 model.c_max 2.854859229006633 +350 11 optimizer.lr 0.0015534861578399485 +350 11 negative_sampler.num_negs_per_pos 73.0 +350 11 training.batch_size 0.0 +350 12 model.embedding_dim 1.0 +350 12 model.c_min 0.028027550447458107 +350 12 model.c_max 1.8902519311379735 +350 12 optimizer.lr 0.01968762465870462 +350 12 negative_sampler.num_negs_per_pos 42.0 +350 12 training.batch_size 0.0 +350 13 model.embedding_dim 2.0 +350 13 model.c_min 0.017769911103122017 +350 13 model.c_max 2.7786356595507806 +350 13 optimizer.lr 0.003828129946996331 +350 13 negative_sampler.num_negs_per_pos 85.0 +350 13 training.batch_size 2.0 +350 14 model.embedding_dim 0.0 +350 14 model.c_min 0.05720458622721907 +350 14 model.c_max 8.81536706953621 +350 14 optimizer.lr 0.006720663348302485 +350 14 negative_sampler.num_negs_per_pos 8.0 +350 14 training.batch_size 2.0 +350 15 model.embedding_dim 0.0 +350 15 model.c_min 0.05302302844590794 +350 15 model.c_max 4.146227163074853 +350 15 optimizer.lr 0.001574768364670881 +350 15 negative_sampler.num_negs_per_pos 10.0 +350 15 training.batch_size 2.0 +350 16 model.embedding_dim 2.0 +350 16 model.c_min 0.019353792452093833 +350 16 model.c_max 9.349321979397873 +350 16 optimizer.lr 0.00506153010205509 +350 16 negative_sampler.num_negs_per_pos 65.0 +350 16 training.batch_size 1.0 +350 17 model.embedding_dim 0.0 +350 17 model.c_min 0.021120302566216224 +350 17 model.c_max 2.3693065173786425 +350 17 optimizer.lr 0.0029509529703996536 +350 17 negative_sampler.num_negs_per_pos 64.0 +350 17 training.batch_size 1.0 +350 18 model.embedding_dim 1.0 +350 18 model.c_min 0.045783230829400376 +350 18 model.c_max 9.055218632546378 +350 18 optimizer.lr 0.0027107131689023383 +350 18 negative_sampler.num_negs_per_pos 25.0 +350 18 training.batch_size 2.0 +350 19 model.embedding_dim 0.0 +350 19 model.c_min 0.03245508212522413 +350 19 model.c_max 4.590053235549893 +350 19 optimizer.lr 0.0025448767708259683 +350 19 negative_sampler.num_negs_per_pos 30.0 +350 19 training.batch_size 1.0 +350 20 model.embedding_dim 0.0 +350 20 model.c_min 0.018443992441494368 +350 20 model.c_max 7.5937700504099785 +350 20 optimizer.lr 0.010713442199671592 +350 20 negative_sampler.num_negs_per_pos 93.0 +350 20 training.batch_size 1.0 +350 21 model.embedding_dim 1.0 +350 21 model.c_min 0.06334605374347387 +350 21 model.c_max 6.757585374110591 +350 21 optimizer.lr 0.04896974185381962 +350 21 negative_sampler.num_negs_per_pos 66.0 +350 21 training.batch_size 1.0 +350 1 dataset """fb15k237""" +350 1 model """kg2e""" +350 1 loss """softplus""" +350 1 regularizer """no""" +350 1 optimizer """adam""" +350 1 training_loop """owa""" +350 1 negative_sampler """basic""" +350 1 evaluator """rankbased""" +350 2 dataset """fb15k237""" +350 2 model """kg2e""" +350 2 loss """softplus""" +350 2 regularizer """no""" +350 2 optimizer """adam""" +350 2 training_loop """owa""" +350 2 negative_sampler """basic""" +350 2 evaluator """rankbased""" +350 3 dataset """fb15k237""" +350 3 model """kg2e""" +350 3 loss """softplus""" +350 3 regularizer """no""" +350 3 optimizer """adam""" +350 3 training_loop """owa""" +350 3 negative_sampler """basic""" +350 3 evaluator """rankbased""" +350 4 dataset """fb15k237""" +350 4 model """kg2e""" +350 4 loss """softplus""" +350 4 regularizer """no""" +350 4 optimizer """adam""" +350 4 training_loop """owa""" +350 4 negative_sampler """basic""" +350 4 evaluator """rankbased""" +350 5 dataset """fb15k237""" +350 5 model """kg2e""" +350 5 loss """softplus""" +350 5 regularizer """no""" +350 5 optimizer """adam""" +350 5 training_loop """owa""" +350 5 negative_sampler """basic""" +350 5 evaluator """rankbased""" +350 6 dataset """fb15k237""" +350 6 model """kg2e""" +350 6 loss """softplus""" +350 6 regularizer """no""" +350 6 optimizer """adam""" +350 6 training_loop """owa""" +350 6 negative_sampler """basic""" +350 6 evaluator """rankbased""" +350 7 dataset """fb15k237""" +350 7 model """kg2e""" +350 7 loss """softplus""" +350 7 regularizer """no""" +350 7 optimizer """adam""" +350 7 training_loop """owa""" +350 7 negative_sampler """basic""" +350 7 evaluator """rankbased""" +350 8 dataset """fb15k237""" +350 8 model """kg2e""" +350 8 loss """softplus""" +350 8 regularizer """no""" +350 8 optimizer """adam""" +350 8 training_loop """owa""" +350 8 negative_sampler """basic""" +350 8 evaluator """rankbased""" +350 9 dataset """fb15k237""" +350 9 model """kg2e""" +350 9 loss """softplus""" +350 9 regularizer """no""" +350 9 optimizer """adam""" +350 9 training_loop """owa""" +350 9 negative_sampler """basic""" +350 9 evaluator """rankbased""" +350 10 dataset """fb15k237""" +350 10 model """kg2e""" +350 10 loss """softplus""" +350 10 regularizer """no""" +350 10 optimizer """adam""" +350 10 training_loop """owa""" +350 10 negative_sampler """basic""" +350 10 evaluator """rankbased""" +350 11 dataset """fb15k237""" +350 11 model """kg2e""" +350 11 loss """softplus""" +350 11 regularizer """no""" +350 11 optimizer """adam""" +350 11 training_loop """owa""" +350 11 negative_sampler """basic""" +350 11 evaluator """rankbased""" +350 12 dataset """fb15k237""" +350 12 model """kg2e""" +350 12 loss """softplus""" +350 12 regularizer """no""" +350 12 optimizer """adam""" +350 12 training_loop """owa""" +350 12 negative_sampler """basic""" +350 12 evaluator """rankbased""" +350 13 dataset """fb15k237""" +350 13 model """kg2e""" +350 13 loss """softplus""" +350 13 regularizer """no""" +350 13 optimizer """adam""" +350 13 training_loop """owa""" +350 13 negative_sampler """basic""" +350 13 evaluator """rankbased""" +350 14 dataset """fb15k237""" +350 14 model """kg2e""" +350 14 loss """softplus""" +350 14 regularizer """no""" +350 14 optimizer """adam""" +350 14 training_loop """owa""" +350 14 negative_sampler """basic""" +350 14 evaluator """rankbased""" +350 15 dataset """fb15k237""" +350 15 model """kg2e""" +350 15 loss """softplus""" +350 15 regularizer """no""" +350 15 optimizer """adam""" +350 15 training_loop """owa""" +350 15 negative_sampler """basic""" +350 15 evaluator """rankbased""" +350 16 dataset """fb15k237""" +350 16 model """kg2e""" +350 16 loss """softplus""" +350 16 regularizer """no""" +350 16 optimizer """adam""" +350 16 training_loop """owa""" +350 16 negative_sampler """basic""" +350 16 evaluator """rankbased""" +350 17 dataset """fb15k237""" +350 17 model """kg2e""" +350 17 loss """softplus""" +350 17 regularizer """no""" +350 17 optimizer """adam""" +350 17 training_loop """owa""" +350 17 negative_sampler """basic""" +350 17 evaluator """rankbased""" +350 18 dataset """fb15k237""" +350 18 model """kg2e""" +350 18 loss """softplus""" +350 18 regularizer """no""" +350 18 optimizer """adam""" +350 18 training_loop """owa""" +350 18 negative_sampler """basic""" +350 18 evaluator """rankbased""" +350 19 dataset """fb15k237""" +350 19 model """kg2e""" +350 19 loss """softplus""" +350 19 regularizer """no""" +350 19 optimizer """adam""" +350 19 training_loop """owa""" +350 19 negative_sampler """basic""" +350 19 evaluator """rankbased""" +350 20 dataset """fb15k237""" +350 20 model """kg2e""" +350 20 loss """softplus""" +350 20 regularizer """no""" +350 20 optimizer """adam""" +350 20 training_loop """owa""" +350 20 negative_sampler """basic""" +350 20 evaluator """rankbased""" +350 21 dataset """fb15k237""" +350 21 model """kg2e""" +350 21 loss """softplus""" +350 21 regularizer """no""" +350 21 optimizer """adam""" +350 21 training_loop """owa""" +350 21 negative_sampler """basic""" +350 21 evaluator """rankbased""" +351 1 model.embedding_dim 0.0 +351 1 model.c_min 0.016224800407371124 +351 1 model.c_max 1.4837096669177288 +351 1 optimizer.lr 0.005192162205733605 +351 1 negative_sampler.num_negs_per_pos 42.0 +351 1 training.batch_size 0.0 +351 2 model.embedding_dim 1.0 +351 2 model.c_min 0.015957764083295998 +351 2 model.c_max 9.506411860561423 +351 2 optimizer.lr 0.0011830833074739165 +351 2 negative_sampler.num_negs_per_pos 20.0 +351 2 training.batch_size 1.0 +351 3 model.embedding_dim 1.0 +351 3 model.c_min 0.035642492659601184 +351 3 model.c_max 1.4727590834605704 +351 3 optimizer.lr 0.09629651800884685 +351 3 negative_sampler.num_negs_per_pos 35.0 +351 3 training.batch_size 2.0 +351 4 model.embedding_dim 1.0 +351 4 model.c_min 0.010970712044752173 +351 4 model.c_max 7.032741287901079 +351 4 optimizer.lr 0.0014707722421851335 +351 4 negative_sampler.num_negs_per_pos 81.0 +351 4 training.batch_size 1.0 +351 5 model.embedding_dim 0.0 +351 5 model.c_min 0.08774843357879536 +351 5 model.c_max 1.8215290290364599 +351 5 optimizer.lr 0.06060164360027902 +351 5 negative_sampler.num_negs_per_pos 49.0 +351 5 training.batch_size 0.0 +351 6 model.embedding_dim 0.0 +351 6 model.c_min 0.01115387788386489 +351 6 model.c_max 3.9062546870638517 +351 6 optimizer.lr 0.001527117830934177 +351 6 negative_sampler.num_negs_per_pos 42.0 +351 6 training.batch_size 0.0 +351 7 model.embedding_dim 1.0 +351 7 model.c_min 0.03302512895088677 +351 7 model.c_max 3.3314528665666003 +351 7 optimizer.lr 0.043104080457602896 +351 7 negative_sampler.num_negs_per_pos 68.0 +351 7 training.batch_size 0.0 +351 8 model.embedding_dim 1.0 +351 8 model.c_min 0.011224603267189343 +351 8 model.c_max 7.797446407192562 +351 8 optimizer.lr 0.006149065897962801 +351 8 negative_sampler.num_negs_per_pos 97.0 +351 8 training.batch_size 1.0 +351 9 model.embedding_dim 0.0 +351 9 model.c_min 0.024196184680753942 +351 9 model.c_max 3.3848819479989656 +351 9 optimizer.lr 0.0010572838363905872 +351 9 negative_sampler.num_negs_per_pos 1.0 +351 9 training.batch_size 1.0 +351 10 model.embedding_dim 0.0 +351 10 model.c_min 0.06839591185259329 +351 10 model.c_max 7.206410385214754 +351 10 optimizer.lr 0.05918852964440216 +351 10 negative_sampler.num_negs_per_pos 62.0 +351 10 training.batch_size 2.0 +351 11 model.embedding_dim 2.0 +351 11 model.c_min 0.03768576533352667 +351 11 model.c_max 8.122396367135016 +351 11 optimizer.lr 0.05172976579129198 +351 11 negative_sampler.num_negs_per_pos 7.0 +351 11 training.batch_size 1.0 +351 12 model.embedding_dim 1.0 +351 12 model.c_min 0.011817763023944597 +351 12 model.c_max 5.358966780385922 +351 12 optimizer.lr 0.0034925321640313705 +351 12 negative_sampler.num_negs_per_pos 51.0 +351 12 training.batch_size 2.0 +351 13 model.embedding_dim 1.0 +351 13 model.c_min 0.03637941786652854 +351 13 model.c_max 5.428987507106855 +351 13 optimizer.lr 0.051356340874165546 +351 13 negative_sampler.num_negs_per_pos 80.0 +351 13 training.batch_size 0.0 +351 14 model.embedding_dim 0.0 +351 14 model.c_min 0.029622961784652287 +351 14 model.c_max 8.228962954757066 +351 14 optimizer.lr 0.0842323951319827 +351 14 negative_sampler.num_negs_per_pos 4.0 +351 14 training.batch_size 1.0 +351 15 model.embedding_dim 1.0 +351 15 model.c_min 0.013802907034847798 +351 15 model.c_max 2.7648617136578277 +351 15 optimizer.lr 0.020701793247827796 +351 15 negative_sampler.num_negs_per_pos 87.0 +351 15 training.batch_size 0.0 +351 16 model.embedding_dim 1.0 +351 16 model.c_min 0.016412376689810045 +351 16 model.c_max 6.779678905863239 +351 16 optimizer.lr 0.012469947907752752 +351 16 negative_sampler.num_negs_per_pos 78.0 +351 16 training.batch_size 0.0 +351 17 model.embedding_dim 2.0 +351 17 model.c_min 0.020216482376811135 +351 17 model.c_max 8.74699592368454 +351 17 optimizer.lr 0.010209191845892347 +351 17 negative_sampler.num_negs_per_pos 66.0 +351 17 training.batch_size 2.0 +351 18 model.embedding_dim 1.0 +351 18 model.c_min 0.012926174553265612 +351 18 model.c_max 9.979772458543687 +351 18 optimizer.lr 0.017610297356698317 +351 18 negative_sampler.num_negs_per_pos 96.0 +351 18 training.batch_size 2.0 +351 19 model.embedding_dim 2.0 +351 19 model.c_min 0.05170208296314598 +351 19 model.c_max 2.0493519881517352 +351 19 optimizer.lr 0.06460872883479331 +351 19 negative_sampler.num_negs_per_pos 95.0 +351 19 training.batch_size 2.0 +351 20 model.embedding_dim 1.0 +351 20 model.c_min 0.04778862995985156 +351 20 model.c_max 4.172458192239596 +351 20 optimizer.lr 0.008705879617548356 +351 20 negative_sampler.num_negs_per_pos 23.0 +351 20 training.batch_size 1.0 +351 21 model.embedding_dim 1.0 +351 21 model.c_min 0.04532370693326779 +351 21 model.c_max 4.351613430904588 +351 21 optimizer.lr 0.0060483791966284 +351 21 negative_sampler.num_negs_per_pos 86.0 +351 21 training.batch_size 2.0 +351 22 model.embedding_dim 2.0 +351 22 model.c_min 0.08637597537272278 +351 22 model.c_max 9.858871460733416 +351 22 optimizer.lr 0.0016279169832636479 +351 22 negative_sampler.num_negs_per_pos 38.0 +351 22 training.batch_size 0.0 +351 23 model.embedding_dim 2.0 +351 23 model.c_min 0.01571177674803832 +351 23 model.c_max 5.416270967575874 +351 23 optimizer.lr 0.05143831794791958 +351 23 negative_sampler.num_negs_per_pos 97.0 +351 23 training.batch_size 0.0 +351 24 model.embedding_dim 1.0 +351 24 model.c_min 0.0962557718559715 +351 24 model.c_max 4.140473419414988 +351 24 optimizer.lr 0.0018758137523698993 +351 24 negative_sampler.num_negs_per_pos 81.0 +351 24 training.batch_size 0.0 +351 25 model.embedding_dim 1.0 +351 25 model.c_min 0.028844552055332384 +351 25 model.c_max 5.735522629913843 +351 25 optimizer.lr 0.0012970390340278687 +351 25 negative_sampler.num_negs_per_pos 51.0 +351 25 training.batch_size 0.0 +351 26 model.embedding_dim 1.0 +351 26 model.c_min 0.015101500219000913 +351 26 model.c_max 7.712942235362599 +351 26 optimizer.lr 0.008568397177069139 +351 26 negative_sampler.num_negs_per_pos 57.0 +351 26 training.batch_size 0.0 +351 27 model.embedding_dim 0.0 +351 27 model.c_min 0.06686506269595144 +351 27 model.c_max 2.1179296752601378 +351 27 optimizer.lr 0.005379094907137091 +351 27 negative_sampler.num_negs_per_pos 87.0 +351 27 training.batch_size 1.0 +351 28 model.embedding_dim 0.0 +351 28 model.c_min 0.017689342741591854 +351 28 model.c_max 8.619664259305333 +351 28 optimizer.lr 0.010989539977882597 +351 28 negative_sampler.num_negs_per_pos 61.0 +351 28 training.batch_size 2.0 +351 29 model.embedding_dim 0.0 +351 29 model.c_min 0.03722555648281155 +351 29 model.c_max 1.1739629440693937 +351 29 optimizer.lr 0.01909509987518396 +351 29 negative_sampler.num_negs_per_pos 90.0 +351 29 training.batch_size 2.0 +351 30 model.embedding_dim 0.0 +351 30 model.c_min 0.013201438042883987 +351 30 model.c_max 6.045330474948243 +351 30 optimizer.lr 0.006389892413844507 +351 30 negative_sampler.num_negs_per_pos 1.0 +351 30 training.batch_size 0.0 +351 31 model.embedding_dim 1.0 +351 31 model.c_min 0.011392382449369453 +351 31 model.c_max 1.636730287515805 +351 31 optimizer.lr 0.001799872636703768 +351 31 negative_sampler.num_negs_per_pos 89.0 +351 31 training.batch_size 0.0 +351 32 model.embedding_dim 2.0 +351 32 model.c_min 0.012534171721328265 +351 32 model.c_max 6.646932786946448 +351 32 optimizer.lr 0.017666960977051057 +351 32 negative_sampler.num_negs_per_pos 91.0 +351 32 training.batch_size 2.0 +351 33 model.embedding_dim 2.0 +351 33 model.c_min 0.01405612425063195 +351 33 model.c_max 8.03204692933751 +351 33 optimizer.lr 0.008314294424180855 +351 33 negative_sampler.num_negs_per_pos 2.0 +351 33 training.batch_size 2.0 +351 34 model.embedding_dim 2.0 +351 34 model.c_min 0.035360474156656864 +351 34 model.c_max 6.756396632085831 +351 34 optimizer.lr 0.008444605435303017 +351 34 negative_sampler.num_negs_per_pos 53.0 +351 34 training.batch_size 2.0 +351 35 model.embedding_dim 1.0 +351 35 model.c_min 0.014106321331548042 +351 35 model.c_max 5.312245725440605 +351 35 optimizer.lr 0.0353112646232436 +351 35 negative_sampler.num_negs_per_pos 22.0 +351 35 training.batch_size 0.0 +351 36 model.embedding_dim 0.0 +351 36 model.c_min 0.0417299506501517 +351 36 model.c_max 2.423537449761847 +351 36 optimizer.lr 0.00496356133625515 +351 36 negative_sampler.num_negs_per_pos 97.0 +351 36 training.batch_size 1.0 +351 37 model.embedding_dim 0.0 +351 37 model.c_min 0.024489257651530226 +351 37 model.c_max 3.906138820414311 +351 37 optimizer.lr 0.004981845830179856 +351 37 negative_sampler.num_negs_per_pos 75.0 +351 37 training.batch_size 0.0 +351 38 model.embedding_dim 2.0 +351 38 model.c_min 0.013144290077356456 +351 38 model.c_max 6.933981809162269 +351 38 optimizer.lr 0.05003646300148358 +351 38 negative_sampler.num_negs_per_pos 2.0 +351 38 training.batch_size 2.0 +351 39 model.embedding_dim 1.0 +351 39 model.c_min 0.030502587391829268 +351 39 model.c_max 1.4058533193939926 +351 39 optimizer.lr 0.005113715559598589 +351 39 negative_sampler.num_negs_per_pos 76.0 +351 39 training.batch_size 2.0 +351 40 model.embedding_dim 0.0 +351 40 model.c_min 0.08624895412258912 +351 40 model.c_max 3.6796110266285917 +351 40 optimizer.lr 0.0018067216751933225 +351 40 negative_sampler.num_negs_per_pos 98.0 +351 40 training.batch_size 0.0 +351 41 model.embedding_dim 1.0 +351 41 model.c_min 0.018037696058637386 +351 41 model.c_max 5.808726410897648 +351 41 optimizer.lr 0.001723027634982407 +351 41 negative_sampler.num_negs_per_pos 26.0 +351 41 training.batch_size 1.0 +351 42 model.embedding_dim 1.0 +351 42 model.c_min 0.07972144756559862 +351 42 model.c_max 5.49173688700963 +351 42 optimizer.lr 0.05860244581213016 +351 42 negative_sampler.num_negs_per_pos 97.0 +351 42 training.batch_size 0.0 +351 43 model.embedding_dim 0.0 +351 43 model.c_min 0.02831403418126102 +351 43 model.c_max 3.5216293930631113 +351 43 optimizer.lr 0.004501035044265163 +351 43 negative_sampler.num_negs_per_pos 65.0 +351 43 training.batch_size 0.0 +351 44 model.embedding_dim 2.0 +351 44 model.c_min 0.062249906222238 +351 44 model.c_max 9.091878989556703 +351 44 optimizer.lr 0.01658335363711238 +351 44 negative_sampler.num_negs_per_pos 33.0 +351 44 training.batch_size 2.0 +351 45 model.embedding_dim 1.0 +351 45 model.c_min 0.04223800839752641 +351 45 model.c_max 9.437021087692909 +351 45 optimizer.lr 0.09427056898536476 +351 45 negative_sampler.num_negs_per_pos 41.0 +351 45 training.batch_size 1.0 +351 46 model.embedding_dim 0.0 +351 46 model.c_min 0.021997434585701765 +351 46 model.c_max 3.4806989711124903 +351 46 optimizer.lr 0.08600595846363689 +351 46 negative_sampler.num_negs_per_pos 31.0 +351 46 training.batch_size 1.0 +351 47 model.embedding_dim 1.0 +351 47 model.c_min 0.05865605866067203 +351 47 model.c_max 7.100624974333614 +351 47 optimizer.lr 0.055147203332662556 +351 47 negative_sampler.num_negs_per_pos 51.0 +351 47 training.batch_size 0.0 +351 48 model.embedding_dim 0.0 +351 48 model.c_min 0.016337372462565215 +351 48 model.c_max 8.52403516063032 +351 48 optimizer.lr 0.052269024039138426 +351 48 negative_sampler.num_negs_per_pos 18.0 +351 48 training.batch_size 0.0 +351 49 model.embedding_dim 0.0 +351 49 model.c_min 0.09678749521302434 +351 49 model.c_max 7.940807148054228 +351 49 optimizer.lr 0.02719671438091243 +351 49 negative_sampler.num_negs_per_pos 0.0 +351 49 training.batch_size 2.0 +351 50 model.embedding_dim 2.0 +351 50 model.c_min 0.013281758499808142 +351 50 model.c_max 7.690045890338015 +351 50 optimizer.lr 0.04290009778722552 +351 50 negative_sampler.num_negs_per_pos 52.0 +351 50 training.batch_size 2.0 +351 51 model.embedding_dim 0.0 +351 51 model.c_min 0.09098155066781027 +351 51 model.c_max 5.1200407444991685 +351 51 optimizer.lr 0.005828282482552224 +351 51 negative_sampler.num_negs_per_pos 63.0 +351 51 training.batch_size 1.0 +351 52 model.embedding_dim 2.0 +351 52 model.c_min 0.07101434708755877 +351 52 model.c_max 5.738168007652068 +351 52 optimizer.lr 0.08273633686482136 +351 52 negative_sampler.num_negs_per_pos 12.0 +351 52 training.batch_size 0.0 +351 53 model.embedding_dim 0.0 +351 53 model.c_min 0.046764372454555524 +351 53 model.c_max 2.539758845469745 +351 53 optimizer.lr 0.04357154288815392 +351 53 negative_sampler.num_negs_per_pos 35.0 +351 53 training.batch_size 0.0 +351 54 model.embedding_dim 0.0 +351 54 model.c_min 0.05531351945393108 +351 54 model.c_max 5.1049509249573095 +351 54 optimizer.lr 0.002947526724113802 +351 54 negative_sampler.num_negs_per_pos 52.0 +351 54 training.batch_size 1.0 +351 55 model.embedding_dim 2.0 +351 55 model.c_min 0.05580880058538194 +351 55 model.c_max 7.036594988945595 +351 55 optimizer.lr 0.0812627374119751 +351 55 negative_sampler.num_negs_per_pos 55.0 +351 55 training.batch_size 1.0 +351 56 model.embedding_dim 0.0 +351 56 model.c_min 0.08935565799946382 +351 56 model.c_max 8.236106127684437 +351 56 optimizer.lr 0.04289027816546971 +351 56 negative_sampler.num_negs_per_pos 64.0 +351 56 training.batch_size 2.0 +351 57 model.embedding_dim 2.0 +351 57 model.c_min 0.05137136054967902 +351 57 model.c_max 2.3697046693120116 +351 57 optimizer.lr 0.001965115121611345 +351 57 negative_sampler.num_negs_per_pos 39.0 +351 57 training.batch_size 1.0 +351 58 model.embedding_dim 0.0 +351 58 model.c_min 0.04317408760487008 +351 58 model.c_max 7.185432608501852 +351 58 optimizer.lr 0.04050125330205617 +351 58 negative_sampler.num_negs_per_pos 89.0 +351 58 training.batch_size 1.0 +351 59 model.embedding_dim 0.0 +351 59 model.c_min 0.09879129314210755 +351 59 model.c_max 1.9064380669076657 +351 59 optimizer.lr 0.026349382935569942 +351 59 negative_sampler.num_negs_per_pos 9.0 +351 59 training.batch_size 2.0 +351 1 dataset """fb15k237""" +351 1 model """kg2e""" +351 1 loss """bceaftersigmoid""" +351 1 regularizer """no""" +351 1 optimizer """adam""" +351 1 training_loop """owa""" +351 1 negative_sampler """basic""" +351 1 evaluator """rankbased""" +351 2 dataset """fb15k237""" +351 2 model """kg2e""" +351 2 loss """bceaftersigmoid""" +351 2 regularizer """no""" +351 2 optimizer """adam""" +351 2 training_loop """owa""" +351 2 negative_sampler """basic""" +351 2 evaluator """rankbased""" +351 3 dataset """fb15k237""" +351 3 model """kg2e""" +351 3 loss """bceaftersigmoid""" +351 3 regularizer """no""" +351 3 optimizer """adam""" +351 3 training_loop """owa""" +351 3 negative_sampler """basic""" +351 3 evaluator """rankbased""" +351 4 dataset """fb15k237""" +351 4 model """kg2e""" +351 4 loss """bceaftersigmoid""" +351 4 regularizer """no""" +351 4 optimizer """adam""" +351 4 training_loop """owa""" +351 4 negative_sampler """basic""" +351 4 evaluator """rankbased""" +351 5 dataset """fb15k237""" +351 5 model """kg2e""" +351 5 loss """bceaftersigmoid""" +351 5 regularizer """no""" +351 5 optimizer """adam""" +351 5 training_loop """owa""" +351 5 negative_sampler """basic""" +351 5 evaluator """rankbased""" +351 6 dataset """fb15k237""" +351 6 model """kg2e""" +351 6 loss """bceaftersigmoid""" +351 6 regularizer """no""" +351 6 optimizer """adam""" +351 6 training_loop """owa""" +351 6 negative_sampler """basic""" +351 6 evaluator """rankbased""" +351 7 dataset """fb15k237""" +351 7 model """kg2e""" +351 7 loss """bceaftersigmoid""" +351 7 regularizer """no""" +351 7 optimizer """adam""" +351 7 training_loop """owa""" +351 7 negative_sampler """basic""" +351 7 evaluator """rankbased""" +351 8 dataset """fb15k237""" +351 8 model """kg2e""" +351 8 loss """bceaftersigmoid""" +351 8 regularizer """no""" +351 8 optimizer """adam""" +351 8 training_loop """owa""" +351 8 negative_sampler """basic""" +351 8 evaluator """rankbased""" +351 9 dataset """fb15k237""" +351 9 model """kg2e""" +351 9 loss """bceaftersigmoid""" +351 9 regularizer """no""" +351 9 optimizer """adam""" +351 9 training_loop """owa""" +351 9 negative_sampler """basic""" +351 9 evaluator """rankbased""" +351 10 dataset """fb15k237""" +351 10 model """kg2e""" +351 10 loss """bceaftersigmoid""" +351 10 regularizer """no""" +351 10 optimizer """adam""" +351 10 training_loop """owa""" +351 10 negative_sampler """basic""" +351 10 evaluator """rankbased""" +351 11 dataset """fb15k237""" +351 11 model """kg2e""" +351 11 loss """bceaftersigmoid""" +351 11 regularizer """no""" +351 11 optimizer """adam""" +351 11 training_loop """owa""" +351 11 negative_sampler """basic""" +351 11 evaluator """rankbased""" +351 12 dataset """fb15k237""" +351 12 model """kg2e""" +351 12 loss """bceaftersigmoid""" +351 12 regularizer """no""" +351 12 optimizer """adam""" +351 12 training_loop """owa""" +351 12 negative_sampler """basic""" +351 12 evaluator """rankbased""" +351 13 dataset """fb15k237""" +351 13 model """kg2e""" +351 13 loss """bceaftersigmoid""" +351 13 regularizer """no""" +351 13 optimizer """adam""" +351 13 training_loop """owa""" +351 13 negative_sampler """basic""" +351 13 evaluator """rankbased""" +351 14 dataset """fb15k237""" +351 14 model """kg2e""" +351 14 loss """bceaftersigmoid""" +351 14 regularizer """no""" +351 14 optimizer """adam""" +351 14 training_loop """owa""" +351 14 negative_sampler """basic""" +351 14 evaluator """rankbased""" +351 15 dataset """fb15k237""" +351 15 model """kg2e""" +351 15 loss """bceaftersigmoid""" +351 15 regularizer """no""" +351 15 optimizer """adam""" +351 15 training_loop """owa""" +351 15 negative_sampler """basic""" +351 15 evaluator """rankbased""" +351 16 dataset """fb15k237""" +351 16 model """kg2e""" +351 16 loss """bceaftersigmoid""" +351 16 regularizer """no""" +351 16 optimizer """adam""" +351 16 training_loop """owa""" +351 16 negative_sampler """basic""" +351 16 evaluator """rankbased""" +351 17 dataset """fb15k237""" +351 17 model """kg2e""" +351 17 loss """bceaftersigmoid""" +351 17 regularizer """no""" +351 17 optimizer """adam""" +351 17 training_loop """owa""" +351 17 negative_sampler """basic""" +351 17 evaluator """rankbased""" +351 18 dataset """fb15k237""" +351 18 model """kg2e""" +351 18 loss """bceaftersigmoid""" +351 18 regularizer """no""" +351 18 optimizer """adam""" +351 18 training_loop """owa""" +351 18 negative_sampler """basic""" +351 18 evaluator """rankbased""" +351 19 dataset """fb15k237""" +351 19 model """kg2e""" +351 19 loss """bceaftersigmoid""" +351 19 regularizer """no""" +351 19 optimizer """adam""" +351 19 training_loop """owa""" +351 19 negative_sampler """basic""" +351 19 evaluator """rankbased""" +351 20 dataset """fb15k237""" +351 20 model """kg2e""" +351 20 loss """bceaftersigmoid""" +351 20 regularizer """no""" +351 20 optimizer """adam""" +351 20 training_loop """owa""" +351 20 negative_sampler """basic""" +351 20 evaluator """rankbased""" +351 21 dataset """fb15k237""" +351 21 model """kg2e""" +351 21 loss """bceaftersigmoid""" +351 21 regularizer """no""" +351 21 optimizer """adam""" +351 21 training_loop """owa""" +351 21 negative_sampler """basic""" +351 21 evaluator """rankbased""" +351 22 dataset """fb15k237""" +351 22 model """kg2e""" +351 22 loss """bceaftersigmoid""" +351 22 regularizer """no""" +351 22 optimizer """adam""" +351 22 training_loop """owa""" +351 22 negative_sampler """basic""" +351 22 evaluator """rankbased""" +351 23 dataset """fb15k237""" +351 23 model """kg2e""" +351 23 loss """bceaftersigmoid""" +351 23 regularizer """no""" +351 23 optimizer """adam""" +351 23 training_loop """owa""" +351 23 negative_sampler """basic""" +351 23 evaluator """rankbased""" +351 24 dataset """fb15k237""" +351 24 model """kg2e""" +351 24 loss """bceaftersigmoid""" +351 24 regularizer """no""" +351 24 optimizer """adam""" +351 24 training_loop """owa""" +351 24 negative_sampler """basic""" +351 24 evaluator """rankbased""" +351 25 dataset """fb15k237""" +351 25 model """kg2e""" +351 25 loss """bceaftersigmoid""" +351 25 regularizer """no""" +351 25 optimizer """adam""" +351 25 training_loop """owa""" +351 25 negative_sampler """basic""" +351 25 evaluator """rankbased""" +351 26 dataset """fb15k237""" +351 26 model """kg2e""" +351 26 loss """bceaftersigmoid""" +351 26 regularizer """no""" +351 26 optimizer """adam""" +351 26 training_loop """owa""" +351 26 negative_sampler """basic""" +351 26 evaluator """rankbased""" +351 27 dataset """fb15k237""" +351 27 model """kg2e""" +351 27 loss """bceaftersigmoid""" +351 27 regularizer """no""" +351 27 optimizer """adam""" +351 27 training_loop """owa""" +351 27 negative_sampler """basic""" +351 27 evaluator """rankbased""" +351 28 dataset """fb15k237""" +351 28 model """kg2e""" +351 28 loss """bceaftersigmoid""" +351 28 regularizer """no""" +351 28 optimizer """adam""" +351 28 training_loop """owa""" +351 28 negative_sampler """basic""" +351 28 evaluator """rankbased""" +351 29 dataset """fb15k237""" +351 29 model """kg2e""" +351 29 loss """bceaftersigmoid""" +351 29 regularizer """no""" +351 29 optimizer """adam""" +351 29 training_loop """owa""" +351 29 negative_sampler """basic""" +351 29 evaluator """rankbased""" +351 30 dataset """fb15k237""" +351 30 model """kg2e""" +351 30 loss """bceaftersigmoid""" +351 30 regularizer """no""" +351 30 optimizer """adam""" +351 30 training_loop """owa""" +351 30 negative_sampler """basic""" +351 30 evaluator """rankbased""" +351 31 dataset """fb15k237""" +351 31 model """kg2e""" +351 31 loss """bceaftersigmoid""" +351 31 regularizer """no""" +351 31 optimizer """adam""" +351 31 training_loop """owa""" +351 31 negative_sampler """basic""" +351 31 evaluator """rankbased""" +351 32 dataset """fb15k237""" +351 32 model """kg2e""" +351 32 loss """bceaftersigmoid""" +351 32 regularizer """no""" +351 32 optimizer """adam""" +351 32 training_loop """owa""" +351 32 negative_sampler """basic""" +351 32 evaluator """rankbased""" +351 33 dataset """fb15k237""" +351 33 model """kg2e""" +351 33 loss """bceaftersigmoid""" +351 33 regularizer """no""" +351 33 optimizer """adam""" +351 33 training_loop """owa""" +351 33 negative_sampler """basic""" +351 33 evaluator """rankbased""" +351 34 dataset """fb15k237""" +351 34 model """kg2e""" +351 34 loss """bceaftersigmoid""" +351 34 regularizer """no""" +351 34 optimizer """adam""" +351 34 training_loop """owa""" +351 34 negative_sampler """basic""" +351 34 evaluator """rankbased""" +351 35 dataset """fb15k237""" +351 35 model """kg2e""" +351 35 loss """bceaftersigmoid""" +351 35 regularizer """no""" +351 35 optimizer """adam""" +351 35 training_loop """owa""" +351 35 negative_sampler """basic""" +351 35 evaluator """rankbased""" +351 36 dataset """fb15k237""" +351 36 model """kg2e""" +351 36 loss """bceaftersigmoid""" +351 36 regularizer """no""" +351 36 optimizer """adam""" +351 36 training_loop """owa""" +351 36 negative_sampler """basic""" +351 36 evaluator """rankbased""" +351 37 dataset """fb15k237""" +351 37 model """kg2e""" +351 37 loss """bceaftersigmoid""" +351 37 regularizer """no""" +351 37 optimizer """adam""" +351 37 training_loop """owa""" +351 37 negative_sampler """basic""" +351 37 evaluator """rankbased""" +351 38 dataset """fb15k237""" +351 38 model """kg2e""" +351 38 loss """bceaftersigmoid""" +351 38 regularizer """no""" +351 38 optimizer """adam""" +351 38 training_loop """owa""" +351 38 negative_sampler """basic""" +351 38 evaluator """rankbased""" +351 39 dataset """fb15k237""" +351 39 model """kg2e""" +351 39 loss """bceaftersigmoid""" +351 39 regularizer """no""" +351 39 optimizer """adam""" +351 39 training_loop """owa""" +351 39 negative_sampler """basic""" +351 39 evaluator """rankbased""" +351 40 dataset """fb15k237""" +351 40 model """kg2e""" +351 40 loss """bceaftersigmoid""" +351 40 regularizer """no""" +351 40 optimizer """adam""" +351 40 training_loop """owa""" +351 40 negative_sampler """basic""" +351 40 evaluator """rankbased""" +351 41 dataset """fb15k237""" +351 41 model """kg2e""" +351 41 loss """bceaftersigmoid""" +351 41 regularizer """no""" +351 41 optimizer """adam""" +351 41 training_loop """owa""" +351 41 negative_sampler """basic""" +351 41 evaluator """rankbased""" +351 42 dataset """fb15k237""" +351 42 model """kg2e""" +351 42 loss """bceaftersigmoid""" +351 42 regularizer """no""" +351 42 optimizer """adam""" +351 42 training_loop """owa""" +351 42 negative_sampler """basic""" +351 42 evaluator """rankbased""" +351 43 dataset """fb15k237""" +351 43 model """kg2e""" +351 43 loss """bceaftersigmoid""" +351 43 regularizer """no""" +351 43 optimizer """adam""" +351 43 training_loop """owa""" +351 43 negative_sampler """basic""" +351 43 evaluator """rankbased""" +351 44 dataset """fb15k237""" +351 44 model """kg2e""" +351 44 loss """bceaftersigmoid""" +351 44 regularizer """no""" +351 44 optimizer """adam""" +351 44 training_loop """owa""" +351 44 negative_sampler """basic""" +351 44 evaluator """rankbased""" +351 45 dataset """fb15k237""" +351 45 model """kg2e""" +351 45 loss """bceaftersigmoid""" +351 45 regularizer """no""" +351 45 optimizer """adam""" +351 45 training_loop """owa""" +351 45 negative_sampler """basic""" +351 45 evaluator """rankbased""" +351 46 dataset """fb15k237""" +351 46 model """kg2e""" +351 46 loss """bceaftersigmoid""" +351 46 regularizer """no""" +351 46 optimizer """adam""" +351 46 training_loop """owa""" +351 46 negative_sampler """basic""" +351 46 evaluator """rankbased""" +351 47 dataset """fb15k237""" +351 47 model """kg2e""" +351 47 loss """bceaftersigmoid""" +351 47 regularizer """no""" +351 47 optimizer """adam""" +351 47 training_loop """owa""" +351 47 negative_sampler """basic""" +351 47 evaluator """rankbased""" +351 48 dataset """fb15k237""" +351 48 model """kg2e""" +351 48 loss """bceaftersigmoid""" +351 48 regularizer """no""" +351 48 optimizer """adam""" +351 48 training_loop """owa""" +351 48 negative_sampler """basic""" +351 48 evaluator """rankbased""" +351 49 dataset """fb15k237""" +351 49 model """kg2e""" +351 49 loss """bceaftersigmoid""" +351 49 regularizer """no""" +351 49 optimizer """adam""" +351 49 training_loop """owa""" +351 49 negative_sampler """basic""" +351 49 evaluator """rankbased""" +351 50 dataset """fb15k237""" +351 50 model """kg2e""" +351 50 loss """bceaftersigmoid""" +351 50 regularizer """no""" +351 50 optimizer """adam""" +351 50 training_loop """owa""" +351 50 negative_sampler """basic""" +351 50 evaluator """rankbased""" +351 51 dataset """fb15k237""" +351 51 model """kg2e""" +351 51 loss """bceaftersigmoid""" +351 51 regularizer """no""" +351 51 optimizer """adam""" +351 51 training_loop """owa""" +351 51 negative_sampler """basic""" +351 51 evaluator """rankbased""" +351 52 dataset """fb15k237""" +351 52 model """kg2e""" +351 52 loss """bceaftersigmoid""" +351 52 regularizer """no""" +351 52 optimizer """adam""" +351 52 training_loop """owa""" +351 52 negative_sampler """basic""" +351 52 evaluator """rankbased""" +351 53 dataset """fb15k237""" +351 53 model """kg2e""" +351 53 loss """bceaftersigmoid""" +351 53 regularizer """no""" +351 53 optimizer """adam""" +351 53 training_loop """owa""" +351 53 negative_sampler """basic""" +351 53 evaluator """rankbased""" +351 54 dataset """fb15k237""" +351 54 model """kg2e""" +351 54 loss """bceaftersigmoid""" +351 54 regularizer """no""" +351 54 optimizer """adam""" +351 54 training_loop """owa""" +351 54 negative_sampler """basic""" +351 54 evaluator """rankbased""" +351 55 dataset """fb15k237""" +351 55 model """kg2e""" +351 55 loss """bceaftersigmoid""" +351 55 regularizer """no""" +351 55 optimizer """adam""" +351 55 training_loop """owa""" +351 55 negative_sampler """basic""" +351 55 evaluator """rankbased""" +351 56 dataset """fb15k237""" +351 56 model """kg2e""" +351 56 loss """bceaftersigmoid""" +351 56 regularizer """no""" +351 56 optimizer """adam""" +351 56 training_loop """owa""" +351 56 negative_sampler """basic""" +351 56 evaluator """rankbased""" +351 57 dataset """fb15k237""" +351 57 model """kg2e""" +351 57 loss """bceaftersigmoid""" +351 57 regularizer """no""" +351 57 optimizer """adam""" +351 57 training_loop """owa""" +351 57 negative_sampler """basic""" +351 57 evaluator """rankbased""" +351 58 dataset """fb15k237""" +351 58 model """kg2e""" +351 58 loss """bceaftersigmoid""" +351 58 regularizer """no""" +351 58 optimizer """adam""" +351 58 training_loop """owa""" +351 58 negative_sampler """basic""" +351 58 evaluator """rankbased""" +351 59 dataset """fb15k237""" +351 59 model """kg2e""" +351 59 loss """bceaftersigmoid""" +351 59 regularizer """no""" +351 59 optimizer """adam""" +351 59 training_loop """owa""" +351 59 negative_sampler """basic""" +351 59 evaluator """rankbased""" +352 1 model.embedding_dim 0.0 +352 1 model.c_min 0.03697416665773456 +352 1 model.c_max 3.2638690306489506 +352 1 optimizer.lr 0.008755050582782873 +352 1 negative_sampler.num_negs_per_pos 79.0 +352 1 training.batch_size 1.0 +352 2 model.embedding_dim 1.0 +352 2 model.c_min 0.0388554504592952 +352 2 model.c_max 3.2218896895269444 +352 2 optimizer.lr 0.009744299262561245 +352 2 negative_sampler.num_negs_per_pos 94.0 +352 2 training.batch_size 1.0 +352 3 model.embedding_dim 0.0 +352 3 model.c_min 0.01998318274149128 +352 3 model.c_max 6.090578617328982 +352 3 optimizer.lr 0.012693655955226138 +352 3 negative_sampler.num_negs_per_pos 34.0 +352 3 training.batch_size 0.0 +352 4 model.embedding_dim 2.0 +352 4 model.c_min 0.02162975467124414 +352 4 model.c_max 5.077879325158932 +352 4 optimizer.lr 0.006273407056426491 +352 4 negative_sampler.num_negs_per_pos 19.0 +352 4 training.batch_size 2.0 +352 5 model.embedding_dim 1.0 +352 5 model.c_min 0.09625899336130657 +352 5 model.c_max 6.437202412028222 +352 5 optimizer.lr 0.01057634040079219 +352 5 negative_sampler.num_negs_per_pos 94.0 +352 5 training.batch_size 2.0 +352 6 model.embedding_dim 0.0 +352 6 model.c_min 0.010371855635700412 +352 6 model.c_max 2.3023086512401956 +352 6 optimizer.lr 0.012365230038242478 +352 6 negative_sampler.num_negs_per_pos 78.0 +352 6 training.batch_size 2.0 +352 7 model.embedding_dim 2.0 +352 7 model.c_min 0.02870306504008502 +352 7 model.c_max 4.249404097576775 +352 7 optimizer.lr 0.08765355959055504 +352 7 negative_sampler.num_negs_per_pos 68.0 +352 7 training.batch_size 2.0 +352 8 model.embedding_dim 1.0 +352 8 model.c_min 0.03092398418921146 +352 8 model.c_max 5.805561725003311 +352 8 optimizer.lr 0.06108826407514028 +352 8 negative_sampler.num_negs_per_pos 20.0 +352 8 training.batch_size 2.0 +352 9 model.embedding_dim 2.0 +352 9 model.c_min 0.09143648921453426 +352 9 model.c_max 8.020718943416306 +352 9 optimizer.lr 0.0057792621190016595 +352 9 negative_sampler.num_negs_per_pos 8.0 +352 9 training.batch_size 0.0 +352 10 model.embedding_dim 1.0 +352 10 model.c_min 0.06684888969595906 +352 10 model.c_max 3.0865770414102194 +352 10 optimizer.lr 0.09282305007696053 +352 10 negative_sampler.num_negs_per_pos 1.0 +352 10 training.batch_size 0.0 +352 11 model.embedding_dim 0.0 +352 11 model.c_min 0.0843400192613418 +352 11 model.c_max 5.187525118632182 +352 11 optimizer.lr 0.0062623775392581064 +352 11 negative_sampler.num_negs_per_pos 51.0 +352 11 training.batch_size 1.0 +352 12 model.embedding_dim 0.0 +352 12 model.c_min 0.013594842357781417 +352 12 model.c_max 5.167722066109084 +352 12 optimizer.lr 0.03800630702482301 +352 12 negative_sampler.num_negs_per_pos 72.0 +352 12 training.batch_size 2.0 +352 13 model.embedding_dim 0.0 +352 13 model.c_min 0.02496380924051027 +352 13 model.c_max 5.851326131057781 +352 13 optimizer.lr 0.022821790935120857 +352 13 negative_sampler.num_negs_per_pos 85.0 +352 13 training.batch_size 0.0 +352 14 model.embedding_dim 2.0 +352 14 model.c_min 0.015458550179132167 +352 14 model.c_max 8.881099042251686 +352 14 optimizer.lr 0.008415523112347771 +352 14 negative_sampler.num_negs_per_pos 1.0 +352 14 training.batch_size 0.0 +352 15 model.embedding_dim 1.0 +352 15 model.c_min 0.08871063713836261 +352 15 model.c_max 7.659186714209626 +352 15 optimizer.lr 0.0014006740253487738 +352 15 negative_sampler.num_negs_per_pos 15.0 +352 15 training.batch_size 1.0 +352 16 model.embedding_dim 2.0 +352 16 model.c_min 0.029196877309568887 +352 16 model.c_max 9.520920568075084 +352 16 optimizer.lr 0.08873542205548098 +352 16 negative_sampler.num_negs_per_pos 52.0 +352 16 training.batch_size 2.0 +352 17 model.embedding_dim 0.0 +352 17 model.c_min 0.01459426042780997 +352 17 model.c_max 1.3103880269352814 +352 17 optimizer.lr 0.016377323681360113 +352 17 negative_sampler.num_negs_per_pos 80.0 +352 17 training.batch_size 2.0 +352 18 model.embedding_dim 0.0 +352 18 model.c_min 0.03938668754175468 +352 18 model.c_max 8.616051576398931 +352 18 optimizer.lr 0.006183543834250649 +352 18 negative_sampler.num_negs_per_pos 20.0 +352 18 training.batch_size 1.0 +352 19 model.embedding_dim 0.0 +352 19 model.c_min 0.08197180338787946 +352 19 model.c_max 7.528661638696326 +352 19 optimizer.lr 0.003071706272409099 +352 19 negative_sampler.num_negs_per_pos 40.0 +352 19 training.batch_size 1.0 +352 20 model.embedding_dim 0.0 +352 20 model.c_min 0.07861214428249771 +352 20 model.c_max 9.293175845763084 +352 20 optimizer.lr 0.02893297196019176 +352 20 negative_sampler.num_negs_per_pos 4.0 +352 20 training.batch_size 2.0 +352 21 model.embedding_dim 2.0 +352 21 model.c_min 0.07690207230778413 +352 21 model.c_max 3.1552085318103744 +352 21 optimizer.lr 0.03469257234952671 +352 21 negative_sampler.num_negs_per_pos 36.0 +352 21 training.batch_size 1.0 +352 22 model.embedding_dim 0.0 +352 22 model.c_min 0.05936471159109919 +352 22 model.c_max 2.1247657154786337 +352 22 optimizer.lr 0.01984339186877314 +352 22 negative_sampler.num_negs_per_pos 45.0 +352 22 training.batch_size 1.0 +352 23 model.embedding_dim 2.0 +352 23 model.c_min 0.025111328405432825 +352 23 model.c_max 8.145692635395994 +352 23 optimizer.lr 0.0252473421703774 +352 23 negative_sampler.num_negs_per_pos 99.0 +352 23 training.batch_size 1.0 +352 24 model.embedding_dim 1.0 +352 24 model.c_min 0.022682651426182844 +352 24 model.c_max 2.6148247239829363 +352 24 optimizer.lr 0.0017267095737632041 +352 24 negative_sampler.num_negs_per_pos 64.0 +352 24 training.batch_size 1.0 +352 25 model.embedding_dim 1.0 +352 25 model.c_min 0.0659420357449707 +352 25 model.c_max 8.200303664684247 +352 25 optimizer.lr 0.0011033103316370084 +352 25 negative_sampler.num_negs_per_pos 95.0 +352 25 training.batch_size 2.0 +352 26 model.embedding_dim 2.0 +352 26 model.c_min 0.017132630937134928 +352 26 model.c_max 5.614885223408174 +352 26 optimizer.lr 0.004926367576000186 +352 26 negative_sampler.num_negs_per_pos 56.0 +352 26 training.batch_size 2.0 +352 27 model.embedding_dim 2.0 +352 27 model.c_min 0.013722894480703334 +352 27 model.c_max 4.05146169866749 +352 27 optimizer.lr 0.003100005196130843 +352 27 negative_sampler.num_negs_per_pos 29.0 +352 27 training.batch_size 1.0 +352 28 model.embedding_dim 2.0 +352 28 model.c_min 0.04916334372423852 +352 28 model.c_max 7.931888695981364 +352 28 optimizer.lr 0.009816688926846893 +352 28 negative_sampler.num_negs_per_pos 87.0 +352 28 training.batch_size 1.0 +352 29 model.embedding_dim 0.0 +352 29 model.c_min 0.020676687977398082 +352 29 model.c_max 5.065143484199583 +352 29 optimizer.lr 0.018922195758556144 +352 29 negative_sampler.num_negs_per_pos 75.0 +352 29 training.batch_size 2.0 +352 30 model.embedding_dim 1.0 +352 30 model.c_min 0.018619646090220644 +352 30 model.c_max 6.189243166857306 +352 30 optimizer.lr 0.02106564077007262 +352 30 negative_sampler.num_negs_per_pos 34.0 +352 30 training.batch_size 2.0 +352 31 model.embedding_dim 0.0 +352 31 model.c_min 0.07022315335991985 +352 31 model.c_max 5.330367953533649 +352 31 optimizer.lr 0.02558663901310968 +352 31 negative_sampler.num_negs_per_pos 99.0 +352 31 training.batch_size 0.0 +352 32 model.embedding_dim 0.0 +352 32 model.c_min 0.02636397773599945 +352 32 model.c_max 9.209756230607441 +352 32 optimizer.lr 0.030813285184639383 +352 32 negative_sampler.num_negs_per_pos 65.0 +352 32 training.batch_size 0.0 +352 33 model.embedding_dim 0.0 +352 33 model.c_min 0.0253586368759215 +352 33 model.c_max 5.003001272638466 +352 33 optimizer.lr 0.005256525439874708 +352 33 negative_sampler.num_negs_per_pos 75.0 +352 33 training.batch_size 0.0 +352 34 model.embedding_dim 0.0 +352 34 model.c_min 0.08695540088858032 +352 34 model.c_max 8.686881364805814 +352 34 optimizer.lr 0.006828510444814733 +352 34 negative_sampler.num_negs_per_pos 49.0 +352 34 training.batch_size 0.0 +352 1 dataset """fb15k237""" +352 1 model """kg2e""" +352 1 loss """softplus""" +352 1 regularizer """no""" +352 1 optimizer """adam""" +352 1 training_loop """owa""" +352 1 negative_sampler """basic""" +352 1 evaluator """rankbased""" +352 2 dataset """fb15k237""" +352 2 model """kg2e""" +352 2 loss """softplus""" +352 2 regularizer """no""" +352 2 optimizer """adam""" +352 2 training_loop """owa""" +352 2 negative_sampler """basic""" +352 2 evaluator """rankbased""" +352 3 dataset """fb15k237""" +352 3 model """kg2e""" +352 3 loss """softplus""" +352 3 regularizer """no""" +352 3 optimizer """adam""" +352 3 training_loop """owa""" +352 3 negative_sampler """basic""" +352 3 evaluator """rankbased""" +352 4 dataset """fb15k237""" +352 4 model """kg2e""" +352 4 loss """softplus""" +352 4 regularizer """no""" +352 4 optimizer """adam""" +352 4 training_loop """owa""" +352 4 negative_sampler """basic""" +352 4 evaluator """rankbased""" +352 5 dataset """fb15k237""" +352 5 model """kg2e""" +352 5 loss """softplus""" +352 5 regularizer """no""" +352 5 optimizer """adam""" +352 5 training_loop """owa""" +352 5 negative_sampler """basic""" +352 5 evaluator """rankbased""" +352 6 dataset """fb15k237""" +352 6 model """kg2e""" +352 6 loss """softplus""" +352 6 regularizer """no""" +352 6 optimizer """adam""" +352 6 training_loop """owa""" +352 6 negative_sampler """basic""" +352 6 evaluator """rankbased""" +352 7 dataset """fb15k237""" +352 7 model """kg2e""" +352 7 loss """softplus""" +352 7 regularizer """no""" +352 7 optimizer """adam""" +352 7 training_loop """owa""" +352 7 negative_sampler """basic""" +352 7 evaluator """rankbased""" +352 8 dataset """fb15k237""" +352 8 model """kg2e""" +352 8 loss """softplus""" +352 8 regularizer """no""" +352 8 optimizer """adam""" +352 8 training_loop """owa""" +352 8 negative_sampler """basic""" +352 8 evaluator """rankbased""" +352 9 dataset """fb15k237""" +352 9 model """kg2e""" +352 9 loss """softplus""" +352 9 regularizer """no""" +352 9 optimizer """adam""" +352 9 training_loop """owa""" +352 9 negative_sampler """basic""" +352 9 evaluator """rankbased""" +352 10 dataset """fb15k237""" +352 10 model """kg2e""" +352 10 loss """softplus""" +352 10 regularizer """no""" +352 10 optimizer """adam""" +352 10 training_loop """owa""" +352 10 negative_sampler """basic""" +352 10 evaluator """rankbased""" +352 11 dataset """fb15k237""" +352 11 model """kg2e""" +352 11 loss """softplus""" +352 11 regularizer """no""" +352 11 optimizer """adam""" +352 11 training_loop """owa""" +352 11 negative_sampler """basic""" +352 11 evaluator """rankbased""" +352 12 dataset """fb15k237""" +352 12 model """kg2e""" +352 12 loss """softplus""" +352 12 regularizer """no""" +352 12 optimizer """adam""" +352 12 training_loop """owa""" +352 12 negative_sampler """basic""" +352 12 evaluator """rankbased""" +352 13 dataset """fb15k237""" +352 13 model """kg2e""" +352 13 loss """softplus""" +352 13 regularizer """no""" +352 13 optimizer """adam""" +352 13 training_loop """owa""" +352 13 negative_sampler """basic""" +352 13 evaluator """rankbased""" +352 14 dataset """fb15k237""" +352 14 model """kg2e""" +352 14 loss """softplus""" +352 14 regularizer """no""" +352 14 optimizer """adam""" +352 14 training_loop """owa""" +352 14 negative_sampler """basic""" +352 14 evaluator """rankbased""" +352 15 dataset """fb15k237""" +352 15 model """kg2e""" +352 15 loss """softplus""" +352 15 regularizer """no""" +352 15 optimizer """adam""" +352 15 training_loop """owa""" +352 15 negative_sampler """basic""" +352 15 evaluator """rankbased""" +352 16 dataset """fb15k237""" +352 16 model """kg2e""" +352 16 loss """softplus""" +352 16 regularizer """no""" +352 16 optimizer """adam""" +352 16 training_loop """owa""" +352 16 negative_sampler """basic""" +352 16 evaluator """rankbased""" +352 17 dataset """fb15k237""" +352 17 model """kg2e""" +352 17 loss """softplus""" +352 17 regularizer """no""" +352 17 optimizer """adam""" +352 17 training_loop """owa""" +352 17 negative_sampler """basic""" +352 17 evaluator """rankbased""" +352 18 dataset """fb15k237""" +352 18 model """kg2e""" +352 18 loss """softplus""" +352 18 regularizer """no""" +352 18 optimizer """adam""" +352 18 training_loop """owa""" +352 18 negative_sampler """basic""" +352 18 evaluator """rankbased""" +352 19 dataset """fb15k237""" +352 19 model """kg2e""" +352 19 loss """softplus""" +352 19 regularizer """no""" +352 19 optimizer """adam""" +352 19 training_loop """owa""" +352 19 negative_sampler """basic""" +352 19 evaluator """rankbased""" +352 20 dataset """fb15k237""" +352 20 model """kg2e""" +352 20 loss """softplus""" +352 20 regularizer """no""" +352 20 optimizer """adam""" +352 20 training_loop """owa""" +352 20 negative_sampler """basic""" +352 20 evaluator """rankbased""" +352 21 dataset """fb15k237""" +352 21 model """kg2e""" +352 21 loss """softplus""" +352 21 regularizer """no""" +352 21 optimizer """adam""" +352 21 training_loop """owa""" +352 21 negative_sampler """basic""" +352 21 evaluator """rankbased""" +352 22 dataset """fb15k237""" +352 22 model """kg2e""" +352 22 loss """softplus""" +352 22 regularizer """no""" +352 22 optimizer """adam""" +352 22 training_loop """owa""" +352 22 negative_sampler """basic""" +352 22 evaluator """rankbased""" +352 23 dataset """fb15k237""" +352 23 model """kg2e""" +352 23 loss """softplus""" +352 23 regularizer """no""" +352 23 optimizer """adam""" +352 23 training_loop """owa""" +352 23 negative_sampler """basic""" +352 23 evaluator """rankbased""" +352 24 dataset """fb15k237""" +352 24 model """kg2e""" +352 24 loss """softplus""" +352 24 regularizer """no""" +352 24 optimizer """adam""" +352 24 training_loop """owa""" +352 24 negative_sampler """basic""" +352 24 evaluator """rankbased""" +352 25 dataset """fb15k237""" +352 25 model """kg2e""" +352 25 loss """softplus""" +352 25 regularizer """no""" +352 25 optimizer """adam""" +352 25 training_loop """owa""" +352 25 negative_sampler """basic""" +352 25 evaluator """rankbased""" +352 26 dataset """fb15k237""" +352 26 model """kg2e""" +352 26 loss """softplus""" +352 26 regularizer """no""" +352 26 optimizer """adam""" +352 26 training_loop """owa""" +352 26 negative_sampler """basic""" +352 26 evaluator """rankbased""" +352 27 dataset """fb15k237""" +352 27 model """kg2e""" +352 27 loss """softplus""" +352 27 regularizer """no""" +352 27 optimizer """adam""" +352 27 training_loop """owa""" +352 27 negative_sampler """basic""" +352 27 evaluator """rankbased""" +352 28 dataset """fb15k237""" +352 28 model """kg2e""" +352 28 loss """softplus""" +352 28 regularizer """no""" +352 28 optimizer """adam""" +352 28 training_loop """owa""" +352 28 negative_sampler """basic""" +352 28 evaluator """rankbased""" +352 29 dataset """fb15k237""" +352 29 model """kg2e""" +352 29 loss """softplus""" +352 29 regularizer """no""" +352 29 optimizer """adam""" +352 29 training_loop """owa""" +352 29 negative_sampler """basic""" +352 29 evaluator """rankbased""" +352 30 dataset """fb15k237""" +352 30 model """kg2e""" +352 30 loss """softplus""" +352 30 regularizer """no""" +352 30 optimizer """adam""" +352 30 training_loop """owa""" +352 30 negative_sampler """basic""" +352 30 evaluator """rankbased""" +352 31 dataset """fb15k237""" +352 31 model """kg2e""" +352 31 loss """softplus""" +352 31 regularizer """no""" +352 31 optimizer """adam""" +352 31 training_loop """owa""" +352 31 negative_sampler """basic""" +352 31 evaluator """rankbased""" +352 32 dataset """fb15k237""" +352 32 model """kg2e""" +352 32 loss """softplus""" +352 32 regularizer """no""" +352 32 optimizer """adam""" +352 32 training_loop """owa""" +352 32 negative_sampler """basic""" +352 32 evaluator """rankbased""" +352 33 dataset """fb15k237""" +352 33 model """kg2e""" +352 33 loss """softplus""" +352 33 regularizer """no""" +352 33 optimizer """adam""" +352 33 training_loop """owa""" +352 33 negative_sampler """basic""" +352 33 evaluator """rankbased""" +352 34 dataset """fb15k237""" +352 34 model """kg2e""" +352 34 loss """softplus""" +352 34 regularizer """no""" +352 34 optimizer """adam""" +352 34 training_loop """owa""" +352 34 negative_sampler """basic""" +352 34 evaluator """rankbased""" +353 1 model.embedding_dim 0.0 +353 1 model.c_min 0.02015491613462065 +353 1 model.c_max 2.4309575190842923 +353 1 loss.margin 26.393117023098434 +353 1 loss.adversarial_temperature 0.8303908089523526 +353 1 optimizer.lr 0.03538041276572711 +353 1 negative_sampler.num_negs_per_pos 72.0 +353 1 training.batch_size 1.0 +353 2 model.embedding_dim 1.0 +353 2 model.c_min 0.05399192921426798 +353 2 model.c_max 3.7395817403480924 +353 2 loss.margin 13.367218146360145 +353 2 loss.adversarial_temperature 0.13333119741480903 +353 2 optimizer.lr 0.03767223506657671 +353 2 negative_sampler.num_negs_per_pos 28.0 +353 2 training.batch_size 1.0 +353 3 model.embedding_dim 1.0 +353 3 model.c_min 0.031779167782551306 +353 3 model.c_max 7.221089611608886 +353 3 loss.margin 28.83348823720549 +353 3 loss.adversarial_temperature 0.4002135330887413 +353 3 optimizer.lr 0.01282813395245305 +353 3 negative_sampler.num_negs_per_pos 65.0 +353 3 training.batch_size 2.0 +353 4 model.embedding_dim 2.0 +353 4 model.c_min 0.041285439768823436 +353 4 model.c_max 2.1700992156640293 +353 4 loss.margin 3.105922273057298 +353 4 loss.adversarial_temperature 0.29132080333934884 +353 4 optimizer.lr 0.02085317820147767 +353 4 negative_sampler.num_negs_per_pos 59.0 +353 4 training.batch_size 1.0 +353 5 model.embedding_dim 1.0 +353 5 model.c_min 0.01175142574264574 +353 5 model.c_max 1.343876133684227 +353 5 loss.margin 7.672757664436479 +353 5 loss.adversarial_temperature 0.4662471723591264 +353 5 optimizer.lr 0.015197767540252717 +353 5 negative_sampler.num_negs_per_pos 20.0 +353 5 training.batch_size 1.0 +353 6 model.embedding_dim 1.0 +353 6 model.c_min 0.02049162669164612 +353 6 model.c_max 9.57386720545542 +353 6 loss.margin 19.05872803553801 +353 6 loss.adversarial_temperature 0.8298866556086628 +353 6 optimizer.lr 0.04920193782225971 +353 6 negative_sampler.num_negs_per_pos 71.0 +353 6 training.batch_size 1.0 +353 7 model.embedding_dim 1.0 +353 7 model.c_min 0.0197186537852451 +353 7 model.c_max 4.985259455150379 +353 7 loss.margin 25.178937157183803 +353 7 loss.adversarial_temperature 0.8333182029817787 +353 7 optimizer.lr 0.008317845732657023 +353 7 negative_sampler.num_negs_per_pos 85.0 +353 7 training.batch_size 2.0 +353 8 model.embedding_dim 0.0 +353 8 model.c_min 0.026784100697285488 +353 8 model.c_max 1.2188335050196843 +353 8 loss.margin 11.907183662499355 +353 8 loss.adversarial_temperature 0.1333503275882415 +353 8 optimizer.lr 0.003323481835830372 +353 8 negative_sampler.num_negs_per_pos 63.0 +353 8 training.batch_size 0.0 +353 9 model.embedding_dim 2.0 +353 9 model.c_min 0.06419950631191822 +353 9 model.c_max 2.2300616332963648 +353 9 loss.margin 11.61255479794722 +353 9 loss.adversarial_temperature 0.8274146683184193 +353 9 optimizer.lr 0.0020638769578266063 +353 9 negative_sampler.num_negs_per_pos 27.0 +353 9 training.batch_size 0.0 +353 10 model.embedding_dim 0.0 +353 10 model.c_min 0.0881491250750393 +353 10 model.c_max 4.464420123555607 +353 10 loss.margin 19.185561299681446 +353 10 loss.adversarial_temperature 0.23799471039451528 +353 10 optimizer.lr 0.04688428379918573 +353 10 negative_sampler.num_negs_per_pos 81.0 +353 10 training.batch_size 0.0 +353 11 model.embedding_dim 0.0 +353 11 model.c_min 0.07262050439529803 +353 11 model.c_max 6.544771026014144 +353 11 loss.margin 27.9379454058954 +353 11 loss.adversarial_temperature 0.1925536982880488 +353 11 optimizer.lr 0.09613957305107475 +353 11 negative_sampler.num_negs_per_pos 25.0 +353 11 training.batch_size 2.0 +353 12 model.embedding_dim 1.0 +353 12 model.c_min 0.01413258623330266 +353 12 model.c_max 5.8250826927776105 +353 12 loss.margin 28.095594296174326 +353 12 loss.adversarial_temperature 0.8359600857997808 +353 12 optimizer.lr 0.022475529957259845 +353 12 negative_sampler.num_negs_per_pos 38.0 +353 12 training.batch_size 1.0 +353 13 model.embedding_dim 2.0 +353 13 model.c_min 0.011179231706960852 +353 13 model.c_max 8.065724577137312 +353 13 loss.margin 24.522349372235634 +353 13 loss.adversarial_temperature 0.9578938113327234 +353 13 optimizer.lr 0.00753313747254003 +353 13 negative_sampler.num_negs_per_pos 61.0 +353 13 training.batch_size 1.0 +353 14 model.embedding_dim 0.0 +353 14 model.c_min 0.05027337523974515 +353 14 model.c_max 5.2954720073707975 +353 14 loss.margin 28.629379243990687 +353 14 loss.adversarial_temperature 0.8108737921156576 +353 14 optimizer.lr 0.014491218535915265 +353 14 negative_sampler.num_negs_per_pos 97.0 +353 14 training.batch_size 2.0 +353 15 model.embedding_dim 0.0 +353 15 model.c_min 0.02319489729902877 +353 15 model.c_max 2.6375369473711645 +353 15 loss.margin 9.527333129394123 +353 15 loss.adversarial_temperature 0.28396660718240885 +353 15 optimizer.lr 0.013959018744045714 +353 15 negative_sampler.num_negs_per_pos 62.0 +353 15 training.batch_size 0.0 +353 16 model.embedding_dim 2.0 +353 16 model.c_min 0.010329122505128168 +353 16 model.c_max 5.9599138204222974 +353 16 loss.margin 18.86142888622143 +353 16 loss.adversarial_temperature 0.710441507334998 +353 16 optimizer.lr 0.09892852613898423 +353 16 negative_sampler.num_negs_per_pos 66.0 +353 16 training.batch_size 2.0 +353 17 model.embedding_dim 0.0 +353 17 model.c_min 0.041632364036885916 +353 17 model.c_max 3.3993865413932536 +353 17 loss.margin 15.243227284323545 +353 17 loss.adversarial_temperature 0.9748675830281421 +353 17 optimizer.lr 0.0017586665322191937 +353 17 negative_sampler.num_negs_per_pos 76.0 +353 17 training.batch_size 0.0 +353 18 model.embedding_dim 2.0 +353 18 model.c_min 0.040207768745041665 +353 18 model.c_max 1.6503262468682585 +353 18 loss.margin 9.014711127745146 +353 18 loss.adversarial_temperature 0.3362431597081035 +353 18 optimizer.lr 0.017539768864604885 +353 18 negative_sampler.num_negs_per_pos 56.0 +353 18 training.batch_size 0.0 +353 1 dataset """fb15k237""" +353 1 model """kg2e""" +353 1 loss """nssa""" +353 1 regularizer """no""" +353 1 optimizer """adam""" +353 1 training_loop """owa""" +353 1 negative_sampler """basic""" +353 1 evaluator """rankbased""" +353 2 dataset """fb15k237""" +353 2 model """kg2e""" +353 2 loss """nssa""" +353 2 regularizer """no""" +353 2 optimizer """adam""" +353 2 training_loop """owa""" +353 2 negative_sampler """basic""" +353 2 evaluator """rankbased""" +353 3 dataset """fb15k237""" +353 3 model """kg2e""" +353 3 loss """nssa""" +353 3 regularizer """no""" +353 3 optimizer """adam""" +353 3 training_loop """owa""" +353 3 negative_sampler """basic""" +353 3 evaluator """rankbased""" +353 4 dataset """fb15k237""" +353 4 model """kg2e""" +353 4 loss """nssa""" +353 4 regularizer """no""" +353 4 optimizer """adam""" +353 4 training_loop """owa""" +353 4 negative_sampler """basic""" +353 4 evaluator """rankbased""" +353 5 dataset """fb15k237""" +353 5 model """kg2e""" +353 5 loss """nssa""" +353 5 regularizer """no""" +353 5 optimizer """adam""" +353 5 training_loop """owa""" +353 5 negative_sampler """basic""" +353 5 evaluator """rankbased""" +353 6 dataset """fb15k237""" +353 6 model """kg2e""" +353 6 loss """nssa""" +353 6 regularizer """no""" +353 6 optimizer """adam""" +353 6 training_loop """owa""" +353 6 negative_sampler """basic""" +353 6 evaluator """rankbased""" +353 7 dataset """fb15k237""" +353 7 model """kg2e""" +353 7 loss """nssa""" +353 7 regularizer """no""" +353 7 optimizer """adam""" +353 7 training_loop """owa""" +353 7 negative_sampler """basic""" +353 7 evaluator """rankbased""" +353 8 dataset """fb15k237""" +353 8 model """kg2e""" +353 8 loss """nssa""" +353 8 regularizer """no""" +353 8 optimizer """adam""" +353 8 training_loop """owa""" +353 8 negative_sampler """basic""" +353 8 evaluator """rankbased""" +353 9 dataset """fb15k237""" +353 9 model """kg2e""" +353 9 loss """nssa""" +353 9 regularizer """no""" +353 9 optimizer """adam""" +353 9 training_loop """owa""" +353 9 negative_sampler """basic""" +353 9 evaluator """rankbased""" +353 10 dataset """fb15k237""" +353 10 model """kg2e""" +353 10 loss """nssa""" +353 10 regularizer """no""" +353 10 optimizer """adam""" +353 10 training_loop """owa""" +353 10 negative_sampler """basic""" +353 10 evaluator """rankbased""" +353 11 dataset """fb15k237""" +353 11 model """kg2e""" +353 11 loss """nssa""" +353 11 regularizer """no""" +353 11 optimizer """adam""" +353 11 training_loop """owa""" +353 11 negative_sampler """basic""" +353 11 evaluator """rankbased""" +353 12 dataset """fb15k237""" +353 12 model """kg2e""" +353 12 loss """nssa""" +353 12 regularizer """no""" +353 12 optimizer """adam""" +353 12 training_loop """owa""" +353 12 negative_sampler """basic""" +353 12 evaluator """rankbased""" +353 13 dataset """fb15k237""" +353 13 model """kg2e""" +353 13 loss """nssa""" +353 13 regularizer """no""" +353 13 optimizer """adam""" +353 13 training_loop """owa""" +353 13 negative_sampler """basic""" +353 13 evaluator """rankbased""" +353 14 dataset """fb15k237""" +353 14 model """kg2e""" +353 14 loss """nssa""" +353 14 regularizer """no""" +353 14 optimizer """adam""" +353 14 training_loop """owa""" +353 14 negative_sampler """basic""" +353 14 evaluator """rankbased""" +353 15 dataset """fb15k237""" +353 15 model """kg2e""" +353 15 loss """nssa""" +353 15 regularizer """no""" +353 15 optimizer """adam""" +353 15 training_loop """owa""" +353 15 negative_sampler """basic""" +353 15 evaluator """rankbased""" +353 16 dataset """fb15k237""" +353 16 model """kg2e""" +353 16 loss """nssa""" +353 16 regularizer """no""" +353 16 optimizer """adam""" +353 16 training_loop """owa""" +353 16 negative_sampler """basic""" +353 16 evaluator """rankbased""" +353 17 dataset """fb15k237""" +353 17 model """kg2e""" +353 17 loss """nssa""" +353 17 regularizer """no""" +353 17 optimizer """adam""" +353 17 training_loop """owa""" +353 17 negative_sampler """basic""" +353 17 evaluator """rankbased""" +353 18 dataset """fb15k237""" +353 18 model """kg2e""" +353 18 loss """nssa""" +353 18 regularizer """no""" +353 18 optimizer """adam""" +353 18 training_loop """owa""" +353 18 negative_sampler """basic""" +353 18 evaluator """rankbased""" +354 1 model.embedding_dim 0.0 +354 1 model.c_min 0.0301871767452839 +354 1 model.c_max 1.2180631474074932 +354 1 loss.margin 27.679545166766 +354 1 loss.adversarial_temperature 0.34267029819335915 +354 1 optimizer.lr 0.027498950478066594 +354 1 negative_sampler.num_negs_per_pos 49.0 +354 1 training.batch_size 1.0 +354 2 model.embedding_dim 2.0 +354 2 model.c_min 0.06589712679121391 +354 2 model.c_max 2.7114839206771464 +354 2 loss.margin 21.43657516606825 +354 2 loss.adversarial_temperature 0.8318323401137997 +354 2 optimizer.lr 0.006816044510692029 +354 2 negative_sampler.num_negs_per_pos 57.0 +354 2 training.batch_size 1.0 +354 3 model.embedding_dim 1.0 +354 3 model.c_min 0.05243930631525614 +354 3 model.c_max 9.36118652734664 +354 3 loss.margin 19.2270016142241 +354 3 loss.adversarial_temperature 0.4647520076331279 +354 3 optimizer.lr 0.07990057290097893 +354 3 negative_sampler.num_negs_per_pos 12.0 +354 3 training.batch_size 2.0 +354 4 model.embedding_dim 1.0 +354 4 model.c_min 0.043105592213303054 +354 4 model.c_max 3.415532985821544 +354 4 loss.margin 23.550625226120804 +354 4 loss.adversarial_temperature 0.4184354454955935 +354 4 optimizer.lr 0.0024130429554769955 +354 4 negative_sampler.num_negs_per_pos 49.0 +354 4 training.batch_size 1.0 +354 5 model.embedding_dim 2.0 +354 5 model.c_min 0.05607652521208217 +354 5 model.c_max 3.563845836757311 +354 5 loss.margin 10.091884793130806 +354 5 loss.adversarial_temperature 0.36788212528307795 +354 5 optimizer.lr 0.0015410992353058533 +354 5 negative_sampler.num_negs_per_pos 31.0 +354 5 training.batch_size 0.0 +354 6 model.embedding_dim 1.0 +354 6 model.c_min 0.012516602442089467 +354 6 model.c_max 5.386328652922247 +354 6 loss.margin 9.630754596128206 +354 6 loss.adversarial_temperature 0.6694708868079327 +354 6 optimizer.lr 0.020942377428062202 +354 6 negative_sampler.num_negs_per_pos 23.0 +354 6 training.batch_size 2.0 +354 7 model.embedding_dim 0.0 +354 7 model.c_min 0.062431027308868264 +354 7 model.c_max 5.740190442514058 +354 7 loss.margin 14.590612462094331 +354 7 loss.adversarial_temperature 0.12822292647609068 +354 7 optimizer.lr 0.007169861210926641 +354 7 negative_sampler.num_negs_per_pos 3.0 +354 7 training.batch_size 2.0 +354 8 model.embedding_dim 0.0 +354 8 model.c_min 0.04304974494204384 +354 8 model.c_max 7.296650477956922 +354 8 loss.margin 10.931582141276118 +354 8 loss.adversarial_temperature 0.37694158070910494 +354 8 optimizer.lr 0.0020984424010452693 +354 8 negative_sampler.num_negs_per_pos 38.0 +354 8 training.batch_size 1.0 +354 9 model.embedding_dim 2.0 +354 9 model.c_min 0.010781660803052694 +354 9 model.c_max 3.5733419806983884 +354 9 loss.margin 23.172833789565907 +354 9 loss.adversarial_temperature 0.8150232616337422 +354 9 optimizer.lr 0.0232359126698233 +354 9 negative_sampler.num_negs_per_pos 1.0 +354 9 training.batch_size 2.0 +354 10 model.embedding_dim 1.0 +354 10 model.c_min 0.014573152620540092 +354 10 model.c_max 4.535546374232921 +354 10 loss.margin 23.6474500115055 +354 10 loss.adversarial_temperature 0.1627594601025776 +354 10 optimizer.lr 0.06168290715028797 +354 10 negative_sampler.num_negs_per_pos 0.0 +354 10 training.batch_size 2.0 +354 11 model.embedding_dim 2.0 +354 11 model.c_min 0.028798041496129072 +354 11 model.c_max 1.3303861257436544 +354 11 loss.margin 15.813671319383854 +354 11 loss.adversarial_temperature 0.5083889830321205 +354 11 optimizer.lr 0.0013798038718290148 +354 11 negative_sampler.num_negs_per_pos 8.0 +354 11 training.batch_size 2.0 +354 12 model.embedding_dim 0.0 +354 12 model.c_min 0.01680477248597104 +354 12 model.c_max 3.0533639333761085 +354 12 loss.margin 15.222548811306732 +354 12 loss.adversarial_temperature 0.9397073883818591 +354 12 optimizer.lr 0.011367124113324063 +354 12 negative_sampler.num_negs_per_pos 7.0 +354 12 training.batch_size 2.0 +354 13 model.embedding_dim 0.0 +354 13 model.c_min 0.013251381969798073 +354 13 model.c_max 2.921725943698317 +354 13 loss.margin 19.557450895716748 +354 13 loss.adversarial_temperature 0.9517059298573511 +354 13 optimizer.lr 0.0013107630805958307 +354 13 negative_sampler.num_negs_per_pos 19.0 +354 13 training.batch_size 2.0 +354 14 model.embedding_dim 0.0 +354 14 model.c_min 0.036483122251877366 +354 14 model.c_max 3.1379870929067994 +354 14 loss.margin 6.259308879972151 +354 14 loss.adversarial_temperature 0.5887306133872173 +354 14 optimizer.lr 0.001022683378369957 +354 14 negative_sampler.num_negs_per_pos 28.0 +354 14 training.batch_size 0.0 +354 15 model.embedding_dim 0.0 +354 15 model.c_min 0.01055764166057955 +354 15 model.c_max 8.891141600638443 +354 15 loss.margin 18.60304180103812 +354 15 loss.adversarial_temperature 0.28101330313899714 +354 15 optimizer.lr 0.02140398502281483 +354 15 negative_sampler.num_negs_per_pos 65.0 +354 15 training.batch_size 2.0 +354 16 model.embedding_dim 0.0 +354 16 model.c_min 0.03220811457982133 +354 16 model.c_max 1.797373874970908 +354 16 loss.margin 17.33544120758942 +354 16 loss.adversarial_temperature 0.9763952559412665 +354 16 optimizer.lr 0.04104196318337892 +354 16 negative_sampler.num_negs_per_pos 74.0 +354 16 training.batch_size 1.0 +354 17 model.embedding_dim 2.0 +354 17 model.c_min 0.06475935637569219 +354 17 model.c_max 1.613353156826017 +354 17 loss.margin 24.894855989880057 +354 17 loss.adversarial_temperature 0.3499578128335516 +354 17 optimizer.lr 0.010102476864876754 +354 17 negative_sampler.num_negs_per_pos 94.0 +354 17 training.batch_size 2.0 +354 18 model.embedding_dim 0.0 +354 18 model.c_min 0.02829098764420225 +354 18 model.c_max 5.007691103619346 +354 18 loss.margin 24.04784454582647 +354 18 loss.adversarial_temperature 0.7672471329114994 +354 18 optimizer.lr 0.02579231789642803 +354 18 negative_sampler.num_negs_per_pos 3.0 +354 18 training.batch_size 2.0 +354 19 model.embedding_dim 2.0 +354 19 model.c_min 0.03880363334417818 +354 19 model.c_max 7.155759845351393 +354 19 loss.margin 14.785935089485523 +354 19 loss.adversarial_temperature 0.10832114423807684 +354 19 optimizer.lr 0.023936881314567577 +354 19 negative_sampler.num_negs_per_pos 35.0 +354 19 training.batch_size 0.0 +354 20 model.embedding_dim 0.0 +354 20 model.c_min 0.08956750714811439 +354 20 model.c_max 2.4320361532262567 +354 20 loss.margin 7.97919162857517 +354 20 loss.adversarial_temperature 0.5086210217992253 +354 20 optimizer.lr 0.004790240466574032 +354 20 negative_sampler.num_negs_per_pos 36.0 +354 20 training.batch_size 0.0 +354 21 model.embedding_dim 1.0 +354 21 model.c_min 0.026333241699297707 +354 21 model.c_max 7.887770049910048 +354 21 loss.margin 2.591080492101148 +354 21 loss.adversarial_temperature 0.28524909674257665 +354 21 optimizer.lr 0.0018501135151343397 +354 21 negative_sampler.num_negs_per_pos 41.0 +354 21 training.batch_size 0.0 +354 22 model.embedding_dim 1.0 +354 22 model.c_min 0.03839750933188231 +354 22 model.c_max 9.017005877764428 +354 22 loss.margin 6.460434372697851 +354 22 loss.adversarial_temperature 0.24173228128737337 +354 22 optimizer.lr 0.0026159608824557327 +354 22 negative_sampler.num_negs_per_pos 0.0 +354 22 training.batch_size 1.0 +354 23 model.embedding_dim 1.0 +354 23 model.c_min 0.01681681121004099 +354 23 model.c_max 5.232716164678703 +354 23 loss.margin 29.6541015835543 +354 23 loss.adversarial_temperature 0.4689065586995787 +354 23 optimizer.lr 0.0029837709346302137 +354 23 negative_sampler.num_negs_per_pos 93.0 +354 23 training.batch_size 1.0 +354 24 model.embedding_dim 2.0 +354 24 model.c_min 0.026969071655326877 +354 24 model.c_max 1.7626833806496496 +354 24 loss.margin 6.481918436040086 +354 24 loss.adversarial_temperature 0.1076111424836163 +354 24 optimizer.lr 0.0015270626133483372 +354 24 negative_sampler.num_negs_per_pos 91.0 +354 24 training.batch_size 0.0 +354 25 model.embedding_dim 0.0 +354 25 model.c_min 0.027649267781598656 +354 25 model.c_max 7.674107909025234 +354 25 loss.margin 12.040545672503706 +354 25 loss.adversarial_temperature 0.15190962719475068 +354 25 optimizer.lr 0.0014919626358516794 +354 25 negative_sampler.num_negs_per_pos 22.0 +354 25 training.batch_size 2.0 +354 26 model.embedding_dim 0.0 +354 26 model.c_min 0.03403867054634622 +354 26 model.c_max 3.6561363654319554 +354 26 loss.margin 2.4191742996142733 +354 26 loss.adversarial_temperature 0.14545460755239661 +354 26 optimizer.lr 0.006795237524488791 +354 26 negative_sampler.num_negs_per_pos 38.0 +354 26 training.batch_size 2.0 +354 27 model.embedding_dim 0.0 +354 27 model.c_min 0.016888988481591396 +354 27 model.c_max 6.045416788090452 +354 27 loss.margin 6.351797803087166 +354 27 loss.adversarial_temperature 0.45489408775558815 +354 27 optimizer.lr 0.026352322729432734 +354 27 negative_sampler.num_negs_per_pos 19.0 +354 27 training.batch_size 0.0 +354 28 model.embedding_dim 1.0 +354 28 model.c_min 0.029766344122557634 +354 28 model.c_max 5.304695172986252 +354 28 loss.margin 11.856521217839394 +354 28 loss.adversarial_temperature 0.21615814837678976 +354 28 optimizer.lr 0.06476034970070935 +354 28 negative_sampler.num_negs_per_pos 32.0 +354 28 training.batch_size 2.0 +354 29 model.embedding_dim 0.0 +354 29 model.c_min 0.04152186615618869 +354 29 model.c_max 5.8837245264851274 +354 29 loss.margin 15.218939992961445 +354 29 loss.adversarial_temperature 0.17280863643736297 +354 29 optimizer.lr 0.02989288628455112 +354 29 negative_sampler.num_negs_per_pos 54.0 +354 29 training.batch_size 0.0 +354 30 model.embedding_dim 0.0 +354 30 model.c_min 0.01197148212679739 +354 30 model.c_max 4.749349537608575 +354 30 loss.margin 27.767455760833034 +354 30 loss.adversarial_temperature 0.3496525118258929 +354 30 optimizer.lr 0.0025792154973926736 +354 30 negative_sampler.num_negs_per_pos 93.0 +354 30 training.batch_size 0.0 +354 31 model.embedding_dim 2.0 +354 31 model.c_min 0.03401678082654056 +354 31 model.c_max 2.2516269318553537 +354 31 loss.margin 19.5236605378661 +354 31 loss.adversarial_temperature 0.23581386967937837 +354 31 optimizer.lr 0.002755778612432171 +354 31 negative_sampler.num_negs_per_pos 42.0 +354 31 training.batch_size 1.0 +354 32 model.embedding_dim 1.0 +354 32 model.c_min 0.0973376491525887 +354 32 model.c_max 2.595231352945625 +354 32 loss.margin 29.49012357188752 +354 32 loss.adversarial_temperature 0.5283193860592155 +354 32 optimizer.lr 0.0286346367824357 +354 32 negative_sampler.num_negs_per_pos 94.0 +354 32 training.batch_size 1.0 +354 33 model.embedding_dim 2.0 +354 33 model.c_min 0.09713098046855653 +354 33 model.c_max 7.035625490723749 +354 33 loss.margin 14.8525802820289 +354 33 loss.adversarial_temperature 0.6755969962277248 +354 33 optimizer.lr 0.0015445791811021746 +354 33 negative_sampler.num_negs_per_pos 32.0 +354 33 training.batch_size 0.0 +354 34 model.embedding_dim 2.0 +354 34 model.c_min 0.025621330053863654 +354 34 model.c_max 5.281974872095133 +354 34 loss.margin 10.852316701368098 +354 34 loss.adversarial_temperature 0.18582511054127746 +354 34 optimizer.lr 0.0045464109568382925 +354 34 negative_sampler.num_negs_per_pos 5.0 +354 34 training.batch_size 2.0 +354 35 model.embedding_dim 0.0 +354 35 model.c_min 0.02455115908216323 +354 35 model.c_max 7.511557489252076 +354 35 loss.margin 15.626161747340717 +354 35 loss.adversarial_temperature 0.22620137437825663 +354 35 optimizer.lr 0.011868105615270484 +354 35 negative_sampler.num_negs_per_pos 83.0 +354 35 training.batch_size 2.0 +354 36 model.embedding_dim 1.0 +354 36 model.c_min 0.054373945521624854 +354 36 model.c_max 8.650122741659247 +354 36 loss.margin 9.798346091474645 +354 36 loss.adversarial_temperature 0.8449697437932938 +354 36 optimizer.lr 0.0010422317172489024 +354 36 negative_sampler.num_negs_per_pos 49.0 +354 36 training.batch_size 1.0 +354 1 dataset """fb15k237""" +354 1 model """kg2e""" +354 1 loss """nssa""" +354 1 regularizer """no""" +354 1 optimizer """adam""" +354 1 training_loop """owa""" +354 1 negative_sampler """basic""" +354 1 evaluator """rankbased""" +354 2 dataset """fb15k237""" +354 2 model """kg2e""" +354 2 loss """nssa""" +354 2 regularizer """no""" +354 2 optimizer """adam""" +354 2 training_loop """owa""" +354 2 negative_sampler """basic""" +354 2 evaluator """rankbased""" +354 3 dataset """fb15k237""" +354 3 model """kg2e""" +354 3 loss """nssa""" +354 3 regularizer """no""" +354 3 optimizer """adam""" +354 3 training_loop """owa""" +354 3 negative_sampler """basic""" +354 3 evaluator """rankbased""" +354 4 dataset """fb15k237""" +354 4 model """kg2e""" +354 4 loss """nssa""" +354 4 regularizer """no""" +354 4 optimizer """adam""" +354 4 training_loop """owa""" +354 4 negative_sampler """basic""" +354 4 evaluator """rankbased""" +354 5 dataset """fb15k237""" +354 5 model """kg2e""" +354 5 loss """nssa""" +354 5 regularizer """no""" +354 5 optimizer """adam""" +354 5 training_loop """owa""" +354 5 negative_sampler """basic""" +354 5 evaluator """rankbased""" +354 6 dataset """fb15k237""" +354 6 model """kg2e""" +354 6 loss """nssa""" +354 6 regularizer """no""" +354 6 optimizer """adam""" +354 6 training_loop """owa""" +354 6 negative_sampler """basic""" +354 6 evaluator """rankbased""" +354 7 dataset """fb15k237""" +354 7 model """kg2e""" +354 7 loss """nssa""" +354 7 regularizer """no""" +354 7 optimizer """adam""" +354 7 training_loop """owa""" +354 7 negative_sampler """basic""" +354 7 evaluator """rankbased""" +354 8 dataset """fb15k237""" +354 8 model """kg2e""" +354 8 loss """nssa""" +354 8 regularizer """no""" +354 8 optimizer """adam""" +354 8 training_loop """owa""" +354 8 negative_sampler """basic""" +354 8 evaluator """rankbased""" +354 9 dataset """fb15k237""" +354 9 model """kg2e""" +354 9 loss """nssa""" +354 9 regularizer """no""" +354 9 optimizer """adam""" +354 9 training_loop """owa""" +354 9 negative_sampler """basic""" +354 9 evaluator """rankbased""" +354 10 dataset """fb15k237""" +354 10 model """kg2e""" +354 10 loss """nssa""" +354 10 regularizer """no""" +354 10 optimizer """adam""" +354 10 training_loop """owa""" +354 10 negative_sampler """basic""" +354 10 evaluator """rankbased""" +354 11 dataset """fb15k237""" +354 11 model """kg2e""" +354 11 loss """nssa""" +354 11 regularizer """no""" +354 11 optimizer """adam""" +354 11 training_loop """owa""" +354 11 negative_sampler """basic""" +354 11 evaluator """rankbased""" +354 12 dataset """fb15k237""" +354 12 model """kg2e""" +354 12 loss """nssa""" +354 12 regularizer """no""" +354 12 optimizer """adam""" +354 12 training_loop """owa""" +354 12 negative_sampler """basic""" +354 12 evaluator """rankbased""" +354 13 dataset """fb15k237""" +354 13 model """kg2e""" +354 13 loss """nssa""" +354 13 regularizer """no""" +354 13 optimizer """adam""" +354 13 training_loop """owa""" +354 13 negative_sampler """basic""" +354 13 evaluator """rankbased""" +354 14 dataset """fb15k237""" +354 14 model """kg2e""" +354 14 loss """nssa""" +354 14 regularizer """no""" +354 14 optimizer """adam""" +354 14 training_loop """owa""" +354 14 negative_sampler """basic""" +354 14 evaluator """rankbased""" +354 15 dataset """fb15k237""" +354 15 model """kg2e""" +354 15 loss """nssa""" +354 15 regularizer """no""" +354 15 optimizer """adam""" +354 15 training_loop """owa""" +354 15 negative_sampler """basic""" +354 15 evaluator """rankbased""" +354 16 dataset """fb15k237""" +354 16 model """kg2e""" +354 16 loss """nssa""" +354 16 regularizer """no""" +354 16 optimizer """adam""" +354 16 training_loop """owa""" +354 16 negative_sampler """basic""" +354 16 evaluator """rankbased""" +354 17 dataset """fb15k237""" +354 17 model """kg2e""" +354 17 loss """nssa""" +354 17 regularizer """no""" +354 17 optimizer """adam""" +354 17 training_loop """owa""" +354 17 negative_sampler """basic""" +354 17 evaluator """rankbased""" +354 18 dataset """fb15k237""" +354 18 model """kg2e""" +354 18 loss """nssa""" +354 18 regularizer """no""" +354 18 optimizer """adam""" +354 18 training_loop """owa""" +354 18 negative_sampler """basic""" +354 18 evaluator """rankbased""" +354 19 dataset """fb15k237""" +354 19 model """kg2e""" +354 19 loss """nssa""" +354 19 regularizer """no""" +354 19 optimizer """adam""" +354 19 training_loop """owa""" +354 19 negative_sampler """basic""" +354 19 evaluator """rankbased""" +354 20 dataset """fb15k237""" +354 20 model """kg2e""" +354 20 loss """nssa""" +354 20 regularizer """no""" +354 20 optimizer """adam""" +354 20 training_loop """owa""" +354 20 negative_sampler """basic""" +354 20 evaluator """rankbased""" +354 21 dataset """fb15k237""" +354 21 model """kg2e""" +354 21 loss """nssa""" +354 21 regularizer """no""" +354 21 optimizer """adam""" +354 21 training_loop """owa""" +354 21 negative_sampler """basic""" +354 21 evaluator """rankbased""" +354 22 dataset """fb15k237""" +354 22 model """kg2e""" +354 22 loss """nssa""" +354 22 regularizer """no""" +354 22 optimizer """adam""" +354 22 training_loop """owa""" +354 22 negative_sampler """basic""" +354 22 evaluator """rankbased""" +354 23 dataset """fb15k237""" +354 23 model """kg2e""" +354 23 loss """nssa""" +354 23 regularizer """no""" +354 23 optimizer """adam""" +354 23 training_loop """owa""" +354 23 negative_sampler """basic""" +354 23 evaluator """rankbased""" +354 24 dataset """fb15k237""" +354 24 model """kg2e""" +354 24 loss """nssa""" +354 24 regularizer """no""" +354 24 optimizer """adam""" +354 24 training_loop """owa""" +354 24 negative_sampler """basic""" +354 24 evaluator """rankbased""" +354 25 dataset """fb15k237""" +354 25 model """kg2e""" +354 25 loss """nssa""" +354 25 regularizer """no""" +354 25 optimizer """adam""" +354 25 training_loop """owa""" +354 25 negative_sampler """basic""" +354 25 evaluator """rankbased""" +354 26 dataset """fb15k237""" +354 26 model """kg2e""" +354 26 loss """nssa""" +354 26 regularizer """no""" +354 26 optimizer """adam""" +354 26 training_loop """owa""" +354 26 negative_sampler """basic""" +354 26 evaluator """rankbased""" +354 27 dataset """fb15k237""" +354 27 model """kg2e""" +354 27 loss """nssa""" +354 27 regularizer """no""" +354 27 optimizer """adam""" +354 27 training_loop """owa""" +354 27 negative_sampler """basic""" +354 27 evaluator """rankbased""" +354 28 dataset """fb15k237""" +354 28 model """kg2e""" +354 28 loss """nssa""" +354 28 regularizer """no""" +354 28 optimizer """adam""" +354 28 training_loop """owa""" +354 28 negative_sampler """basic""" +354 28 evaluator """rankbased""" +354 29 dataset """fb15k237""" +354 29 model """kg2e""" +354 29 loss """nssa""" +354 29 regularizer """no""" +354 29 optimizer """adam""" +354 29 training_loop """owa""" +354 29 negative_sampler """basic""" +354 29 evaluator """rankbased""" +354 30 dataset """fb15k237""" +354 30 model """kg2e""" +354 30 loss """nssa""" +354 30 regularizer """no""" +354 30 optimizer """adam""" +354 30 training_loop """owa""" +354 30 negative_sampler """basic""" +354 30 evaluator """rankbased""" +354 31 dataset """fb15k237""" +354 31 model """kg2e""" +354 31 loss """nssa""" +354 31 regularizer """no""" +354 31 optimizer """adam""" +354 31 training_loop """owa""" +354 31 negative_sampler """basic""" +354 31 evaluator """rankbased""" +354 32 dataset """fb15k237""" +354 32 model """kg2e""" +354 32 loss """nssa""" +354 32 regularizer """no""" +354 32 optimizer """adam""" +354 32 training_loop """owa""" +354 32 negative_sampler """basic""" +354 32 evaluator """rankbased""" +354 33 dataset """fb15k237""" +354 33 model """kg2e""" +354 33 loss """nssa""" +354 33 regularizer """no""" +354 33 optimizer """adam""" +354 33 training_loop """owa""" +354 33 negative_sampler """basic""" +354 33 evaluator """rankbased""" +354 34 dataset """fb15k237""" +354 34 model """kg2e""" +354 34 loss """nssa""" +354 34 regularizer """no""" +354 34 optimizer """adam""" +354 34 training_loop """owa""" +354 34 negative_sampler """basic""" +354 34 evaluator """rankbased""" +354 35 dataset """fb15k237""" +354 35 model """kg2e""" +354 35 loss """nssa""" +354 35 regularizer """no""" +354 35 optimizer """adam""" +354 35 training_loop """owa""" +354 35 negative_sampler """basic""" +354 35 evaluator """rankbased""" +354 36 dataset """fb15k237""" +354 36 model """kg2e""" +354 36 loss """nssa""" +354 36 regularizer """no""" +354 36 optimizer """adam""" +354 36 training_loop """owa""" +354 36 negative_sampler """basic""" +354 36 evaluator """rankbased""" +355 1 model.embedding_dim 1.0 +355 1 model.c_min 0.09497071793347357 +355 1 model.c_max 7.484611688620057 +355 1 optimizer.lr 0.003730130967095799 +355 1 training.batch_size 2.0 +355 1 training.label_smoothing 0.17249001628841007 +355 2 model.embedding_dim 1.0 +355 2 model.c_min 0.07745991560269244 +355 2 model.c_max 8.52053608019844 +355 2 optimizer.lr 0.0038818457433092628 +355 2 training.batch_size 2.0 +355 2 training.label_smoothing 0.43503498386113504 +355 3 model.embedding_dim 1.0 +355 3 model.c_min 0.024406079735815888 +355 3 model.c_max 4.56654185007833 +355 3 optimizer.lr 0.006015748409468486 +355 3 training.batch_size 0.0 +355 3 training.label_smoothing 0.0018428219916198872 +355 4 model.embedding_dim 0.0 +355 4 model.c_min 0.020386983683713635 +355 4 model.c_max 5.047243225054988 +355 4 optimizer.lr 0.012525331940361628 +355 4 training.batch_size 0.0 +355 4 training.label_smoothing 0.22092231476299554 +355 5 model.embedding_dim 0.0 +355 5 model.c_min 0.014103972121183675 +355 5 model.c_max 6.01066692867239 +355 5 optimizer.lr 0.0067625470226271205 +355 5 training.batch_size 0.0 +355 5 training.label_smoothing 0.2964221202560865 +355 6 model.embedding_dim 0.0 +355 6 model.c_min 0.09850648616881487 +355 6 model.c_max 8.55281200660062 +355 6 optimizer.lr 0.011096523158325914 +355 6 training.batch_size 1.0 +355 6 training.label_smoothing 0.10935621696913911 +355 7 model.embedding_dim 0.0 +355 7 model.c_min 0.032892428571627434 +355 7 model.c_max 2.2103454568623024 +355 7 optimizer.lr 0.0011370030299452482 +355 7 training.batch_size 1.0 +355 7 training.label_smoothing 0.19277016874614955 +355 8 model.embedding_dim 0.0 +355 8 model.c_min 0.047672493371037385 +355 8 model.c_max 7.846219699108328 +355 8 optimizer.lr 0.0027008925239187776 +355 8 training.batch_size 2.0 +355 8 training.label_smoothing 0.09028312946934891 +355 9 model.embedding_dim 0.0 +355 9 model.c_min 0.025636310750001074 +355 9 model.c_max 6.332981389860753 +355 9 optimizer.lr 0.002515741033297663 +355 9 training.batch_size 1.0 +355 9 training.label_smoothing 0.007852022370549785 +355 10 model.embedding_dim 0.0 +355 10 model.c_min 0.02665226746226207 +355 10 model.c_max 1.1926115181702963 +355 10 optimizer.lr 0.004136047217640242 +355 10 training.batch_size 2.0 +355 10 training.label_smoothing 0.07784930274732157 +355 11 model.embedding_dim 1.0 +355 11 model.c_min 0.011321625514352146 +355 11 model.c_max 9.768809960672622 +355 11 optimizer.lr 0.01792127149873695 +355 11 training.batch_size 2.0 +355 11 training.label_smoothing 0.364310369901632 +355 12 model.embedding_dim 0.0 +355 12 model.c_min 0.01032305482377047 +355 12 model.c_max 9.103009857928189 +355 12 optimizer.lr 0.025920027729833546 +355 12 training.batch_size 2.0 +355 12 training.label_smoothing 0.1272427001055464 +355 13 model.embedding_dim 0.0 +355 13 model.c_min 0.013253645798387306 +355 13 model.c_max 2.683231774418246 +355 13 optimizer.lr 0.03644250744601631 +355 13 training.batch_size 1.0 +355 13 training.label_smoothing 0.00250002588333274 +355 14 model.embedding_dim 2.0 +355 14 model.c_min 0.012758175362824191 +355 14 model.c_max 3.3997125299760023 +355 14 optimizer.lr 0.002089926091475503 +355 14 training.batch_size 0.0 +355 14 training.label_smoothing 0.026698537049532838 +355 1 dataset """fb15k237""" +355 1 model """kg2e""" +355 1 loss """bceaftersigmoid""" +355 1 regularizer """no""" +355 1 optimizer """adam""" +355 1 training_loop """lcwa""" +355 1 evaluator """rankbased""" +355 2 dataset """fb15k237""" +355 2 model """kg2e""" +355 2 loss """bceaftersigmoid""" +355 2 regularizer """no""" +355 2 optimizer """adam""" +355 2 training_loop """lcwa""" +355 2 evaluator """rankbased""" +355 3 dataset """fb15k237""" +355 3 model """kg2e""" +355 3 loss """bceaftersigmoid""" +355 3 regularizer """no""" +355 3 optimizer """adam""" +355 3 training_loop """lcwa""" +355 3 evaluator """rankbased""" +355 4 dataset """fb15k237""" +355 4 model """kg2e""" +355 4 loss """bceaftersigmoid""" +355 4 regularizer """no""" +355 4 optimizer """adam""" +355 4 training_loop """lcwa""" +355 4 evaluator """rankbased""" +355 5 dataset """fb15k237""" +355 5 model """kg2e""" +355 5 loss """bceaftersigmoid""" +355 5 regularizer """no""" +355 5 optimizer """adam""" +355 5 training_loop """lcwa""" +355 5 evaluator """rankbased""" +355 6 dataset """fb15k237""" +355 6 model """kg2e""" +355 6 loss """bceaftersigmoid""" +355 6 regularizer """no""" +355 6 optimizer """adam""" +355 6 training_loop """lcwa""" +355 6 evaluator """rankbased""" +355 7 dataset """fb15k237""" +355 7 model """kg2e""" +355 7 loss """bceaftersigmoid""" +355 7 regularizer """no""" +355 7 optimizer """adam""" +355 7 training_loop """lcwa""" +355 7 evaluator """rankbased""" +355 8 dataset """fb15k237""" +355 8 model """kg2e""" +355 8 loss """bceaftersigmoid""" +355 8 regularizer """no""" +355 8 optimizer """adam""" +355 8 training_loop """lcwa""" +355 8 evaluator """rankbased""" +355 9 dataset """fb15k237""" +355 9 model """kg2e""" +355 9 loss """bceaftersigmoid""" +355 9 regularizer """no""" +355 9 optimizer """adam""" +355 9 training_loop """lcwa""" +355 9 evaluator """rankbased""" +355 10 dataset """fb15k237""" +355 10 model """kg2e""" +355 10 loss """bceaftersigmoid""" +355 10 regularizer """no""" +355 10 optimizer """adam""" +355 10 training_loop """lcwa""" +355 10 evaluator """rankbased""" +355 11 dataset """fb15k237""" +355 11 model """kg2e""" +355 11 loss """bceaftersigmoid""" +355 11 regularizer """no""" +355 11 optimizer """adam""" +355 11 training_loop """lcwa""" +355 11 evaluator """rankbased""" +355 12 dataset """fb15k237""" +355 12 model """kg2e""" +355 12 loss """bceaftersigmoid""" +355 12 regularizer """no""" +355 12 optimizer """adam""" +355 12 training_loop """lcwa""" +355 12 evaluator """rankbased""" +355 13 dataset """fb15k237""" +355 13 model """kg2e""" +355 13 loss """bceaftersigmoid""" +355 13 regularizer """no""" +355 13 optimizer """adam""" +355 13 training_loop """lcwa""" +355 13 evaluator """rankbased""" +355 14 dataset """fb15k237""" +355 14 model """kg2e""" +355 14 loss """bceaftersigmoid""" +355 14 regularizer """no""" +355 14 optimizer """adam""" +355 14 training_loop """lcwa""" +355 14 evaluator """rankbased""" +356 1 model.embedding_dim 0.0 +356 1 model.c_min 0.06060163222240221 +356 1 model.c_max 3.6561352251662695 +356 1 optimizer.lr 0.07721822949303248 +356 1 training.batch_size 2.0 +356 1 training.label_smoothing 0.11541442858694051 +356 2 model.embedding_dim 2.0 +356 2 model.c_min 0.06259373760405966 +356 2 model.c_max 6.476357002881225 +356 2 optimizer.lr 0.025549356261481415 +356 2 training.batch_size 0.0 +356 2 training.label_smoothing 0.002318347655049966 +356 3 model.embedding_dim 1.0 +356 3 model.c_min 0.010554939770801184 +356 3 model.c_max 8.50041382030783 +356 3 optimizer.lr 0.004512130523500356 +356 3 training.batch_size 2.0 +356 3 training.label_smoothing 0.0026026557196724706 +356 4 model.embedding_dim 1.0 +356 4 model.c_min 0.027058444549196595 +356 4 model.c_max 6.605818707733078 +356 4 optimizer.lr 0.0016411086760090346 +356 4 training.batch_size 2.0 +356 4 training.label_smoothing 0.020990557347295248 +356 1 dataset """fb15k237""" +356 1 model """kg2e""" +356 1 loss """softplus""" +356 1 regularizer """no""" +356 1 optimizer """adam""" +356 1 training_loop """lcwa""" +356 1 evaluator """rankbased""" +356 2 dataset """fb15k237""" +356 2 model """kg2e""" +356 2 loss """softplus""" +356 2 regularizer """no""" +356 2 optimizer """adam""" +356 2 training_loop """lcwa""" +356 2 evaluator """rankbased""" +356 3 dataset """fb15k237""" +356 3 model """kg2e""" +356 3 loss """softplus""" +356 3 regularizer """no""" +356 3 optimizer """adam""" +356 3 training_loop """lcwa""" +356 3 evaluator """rankbased""" +356 4 dataset """fb15k237""" +356 4 model """kg2e""" +356 4 loss """softplus""" +356 4 regularizer """no""" +356 4 optimizer """adam""" +356 4 training_loop """lcwa""" +356 4 evaluator """rankbased""" +357 1 model.embedding_dim 2.0 +357 1 model.c_min 0.06736055580703233 +357 1 model.c_max 7.262974154344439 +357 1 optimizer.lr 0.006306231369746588 +357 1 training.batch_size 1.0 +357 1 training.label_smoothing 0.0029101933286137254 +357 2 model.embedding_dim 1.0 +357 2 model.c_min 0.012584429597804356 +357 2 model.c_max 7.653103362026086 +357 2 optimizer.lr 0.0016376896043218234 +357 2 training.batch_size 0.0 +357 2 training.label_smoothing 0.029252930254890824 +357 3 model.embedding_dim 2.0 +357 3 model.c_min 0.07592088742998686 +357 3 model.c_max 9.21933406309583 +357 3 optimizer.lr 0.005335844308742847 +357 3 training.batch_size 1.0 +357 3 training.label_smoothing 0.004843202807668815 +357 4 model.embedding_dim 0.0 +357 4 model.c_min 0.021725403269538025 +357 4 model.c_max 5.894489716149437 +357 4 optimizer.lr 0.09132210093522523 +357 4 training.batch_size 0.0 +357 4 training.label_smoothing 0.1676714057451165 +357 5 model.embedding_dim 1.0 +357 5 model.c_min 0.011789878271091826 +357 5 model.c_max 4.6752439487752975 +357 5 optimizer.lr 0.0020434835205002592 +357 5 training.batch_size 2.0 +357 5 training.label_smoothing 0.0013464368466342869 +357 6 model.embedding_dim 1.0 +357 6 model.c_min 0.05882712799308996 +357 6 model.c_max 5.447811875883918 +357 6 optimizer.lr 0.008597320306539247 +357 6 training.batch_size 2.0 +357 6 training.label_smoothing 0.057067923221737586 +357 7 model.embedding_dim 2.0 +357 7 model.c_min 0.06681969079279916 +357 7 model.c_max 9.364058449773037 +357 7 optimizer.lr 0.0038110580825388577 +357 7 training.batch_size 0.0 +357 7 training.label_smoothing 0.029372435138166727 +357 8 model.embedding_dim 2.0 +357 8 model.c_min 0.01045532138097919 +357 8 model.c_max 2.8691884460314854 +357 8 optimizer.lr 0.019689984227416735 +357 8 training.batch_size 1.0 +357 8 training.label_smoothing 0.37627050171126275 +357 9 model.embedding_dim 1.0 +357 9 model.c_min 0.01209875340068695 +357 9 model.c_max 2.7794356764650416 +357 9 optimizer.lr 0.01665273138535723 +357 9 training.batch_size 2.0 +357 9 training.label_smoothing 0.022074109047971042 +357 10 model.embedding_dim 1.0 +357 10 model.c_min 0.024201336394357856 +357 10 model.c_max 9.120644455943347 +357 10 optimizer.lr 0.004727584705567662 +357 10 training.batch_size 0.0 +357 10 training.label_smoothing 0.271178709770119 +357 11 model.embedding_dim 2.0 +357 11 model.c_min 0.018043719650316894 +357 11 model.c_max 9.961115811367577 +357 11 optimizer.lr 0.009396263529270532 +357 11 training.batch_size 0.0 +357 11 training.label_smoothing 0.0019541497376960094 +357 1 dataset """fb15k237""" +357 1 model """kg2e""" +357 1 loss """bceaftersigmoid""" +357 1 regularizer """no""" +357 1 optimizer """adam""" +357 1 training_loop """lcwa""" +357 1 evaluator """rankbased""" +357 2 dataset """fb15k237""" +357 2 model """kg2e""" +357 2 loss """bceaftersigmoid""" +357 2 regularizer """no""" +357 2 optimizer """adam""" +357 2 training_loop """lcwa""" +357 2 evaluator """rankbased""" +357 3 dataset """fb15k237""" +357 3 model """kg2e""" +357 3 loss """bceaftersigmoid""" +357 3 regularizer """no""" +357 3 optimizer """adam""" +357 3 training_loop """lcwa""" +357 3 evaluator """rankbased""" +357 4 dataset """fb15k237""" +357 4 model """kg2e""" +357 4 loss """bceaftersigmoid""" +357 4 regularizer """no""" +357 4 optimizer """adam""" +357 4 training_loop """lcwa""" +357 4 evaluator """rankbased""" +357 5 dataset """fb15k237""" +357 5 model """kg2e""" +357 5 loss """bceaftersigmoid""" +357 5 regularizer """no""" +357 5 optimizer """adam""" +357 5 training_loop """lcwa""" +357 5 evaluator """rankbased""" +357 6 dataset """fb15k237""" +357 6 model """kg2e""" +357 6 loss """bceaftersigmoid""" +357 6 regularizer """no""" +357 6 optimizer """adam""" +357 6 training_loop """lcwa""" +357 6 evaluator """rankbased""" +357 7 dataset """fb15k237""" +357 7 model """kg2e""" +357 7 loss """bceaftersigmoid""" +357 7 regularizer """no""" +357 7 optimizer """adam""" +357 7 training_loop """lcwa""" +357 7 evaluator """rankbased""" +357 8 dataset """fb15k237""" +357 8 model """kg2e""" +357 8 loss """bceaftersigmoid""" +357 8 regularizer """no""" +357 8 optimizer """adam""" +357 8 training_loop """lcwa""" +357 8 evaluator """rankbased""" +357 9 dataset """fb15k237""" +357 9 model """kg2e""" +357 9 loss """bceaftersigmoid""" +357 9 regularizer """no""" +357 9 optimizer """adam""" +357 9 training_loop """lcwa""" +357 9 evaluator """rankbased""" +357 10 dataset """fb15k237""" +357 10 model """kg2e""" +357 10 loss """bceaftersigmoid""" +357 10 regularizer """no""" +357 10 optimizer """adam""" +357 10 training_loop """lcwa""" +357 10 evaluator """rankbased""" +357 11 dataset """fb15k237""" +357 11 model """kg2e""" +357 11 loss """bceaftersigmoid""" +357 11 regularizer """no""" +357 11 optimizer """adam""" +357 11 training_loop """lcwa""" +357 11 evaluator """rankbased""" +358 1 model.embedding_dim 1.0 +358 1 model.c_min 0.06621836109726402 +358 1 model.c_max 9.62824210446229 +358 1 optimizer.lr 0.019773023195625412 +358 1 training.batch_size 2.0 +358 1 training.label_smoothing 0.01115883006178316 +358 2 model.embedding_dim 2.0 +358 2 model.c_min 0.03200333769372327 +358 2 model.c_max 4.773622906669411 +358 2 optimizer.lr 0.0022136331304963923 +358 2 training.batch_size 2.0 +358 2 training.label_smoothing 0.7979629186713668 +358 3 model.embedding_dim 1.0 +358 3 model.c_min 0.024274220622653945 +358 3 model.c_max 1.236995090879296 +358 3 optimizer.lr 0.04828534243501362 +358 3 training.batch_size 0.0 +358 3 training.label_smoothing 0.1791268193960119 +358 4 model.embedding_dim 2.0 +358 4 model.c_min 0.015056629137630326 +358 4 model.c_max 3.4060963013838492 +358 4 optimizer.lr 0.08496758874022836 +358 4 training.batch_size 2.0 +358 4 training.label_smoothing 0.2304376535853541 +358 5 model.embedding_dim 2.0 +358 5 model.c_min 0.07585778748285253 +358 5 model.c_max 2.821581261425264 +358 5 optimizer.lr 0.0612886146646933 +358 5 training.batch_size 1.0 +358 5 training.label_smoothing 0.309029207377489 +358 1 dataset """fb15k237""" +358 1 model """kg2e""" +358 1 loss """softplus""" +358 1 regularizer """no""" +358 1 optimizer """adam""" +358 1 training_loop """lcwa""" +358 1 evaluator """rankbased""" +358 2 dataset """fb15k237""" +358 2 model """kg2e""" +358 2 loss """softplus""" +358 2 regularizer """no""" +358 2 optimizer """adam""" +358 2 training_loop """lcwa""" +358 2 evaluator """rankbased""" +358 3 dataset """fb15k237""" +358 3 model """kg2e""" +358 3 loss """softplus""" +358 3 regularizer """no""" +358 3 optimizer """adam""" +358 3 training_loop """lcwa""" +358 3 evaluator """rankbased""" +358 4 dataset """fb15k237""" +358 4 model """kg2e""" +358 4 loss """softplus""" +358 4 regularizer """no""" +358 4 optimizer """adam""" +358 4 training_loop """lcwa""" +358 4 evaluator """rankbased""" +358 5 dataset """fb15k237""" +358 5 model """kg2e""" +358 5 loss """softplus""" +358 5 regularizer """no""" +358 5 optimizer """adam""" +358 5 training_loop """lcwa""" +358 5 evaluator """rankbased""" +359 1 model.embedding_dim 2.0 +359 1 model.c_min 0.058848830563162806 +359 1 model.c_max 9.0804462265742 +359 1 optimizer.lr 0.05630169982185258 +359 1 training.batch_size 1.0 +359 1 training.label_smoothing 0.9931667262505758 +359 2 model.embedding_dim 1.0 +359 2 model.c_min 0.020375116694181083 +359 2 model.c_max 6.269607087569727 +359 2 optimizer.lr 0.0018499517748655735 +359 2 training.batch_size 1.0 +359 2 training.label_smoothing 0.060808341712215636 +359 3 model.embedding_dim 2.0 +359 3 model.c_min 0.060957234929107595 +359 3 model.c_max 9.044282646264083 +359 3 optimizer.lr 0.003502237994575488 +359 3 training.batch_size 1.0 +359 3 training.label_smoothing 0.04717981777228424 +359 1 dataset """fb15k237""" +359 1 model """kg2e""" +359 1 loss """crossentropy""" +359 1 regularizer """no""" +359 1 optimizer """adam""" +359 1 training_loop """lcwa""" +359 1 evaluator """rankbased""" +359 2 dataset """fb15k237""" +359 2 model """kg2e""" +359 2 loss """crossentropy""" +359 2 regularizer """no""" +359 2 optimizer """adam""" +359 2 training_loop """lcwa""" +359 2 evaluator """rankbased""" +359 3 dataset """fb15k237""" +359 3 model """kg2e""" +359 3 loss """crossentropy""" +359 3 regularizer """no""" +359 3 optimizer """adam""" +359 3 training_loop """lcwa""" +359 3 evaluator """rankbased""" +360 1 model.embedding_dim 0.0 +360 1 model.c_min 0.031459383308083656 +360 1 model.c_max 8.24940799562891 +360 1 optimizer.lr 0.03495973538302128 +360 1 training.batch_size 2.0 +360 1 training.label_smoothing 0.02291008741352003 +360 2 model.embedding_dim 1.0 +360 2 model.c_min 0.015757312742549327 +360 2 model.c_max 3.830259831542925 +360 2 optimizer.lr 0.018317066206063704 +360 2 training.batch_size 1.0 +360 2 training.label_smoothing 0.0010924614740808782 +360 3 model.embedding_dim 2.0 +360 3 model.c_min 0.012762636653029447 +360 3 model.c_max 7.66978409240801 +360 3 optimizer.lr 0.0032157149601986013 +360 3 training.batch_size 0.0 +360 3 training.label_smoothing 0.34190924336037837 +360 4 model.embedding_dim 2.0 +360 4 model.c_min 0.01037969020857825 +360 4 model.c_max 7.800977145635644 +360 4 optimizer.lr 0.0700358455469361 +360 4 training.batch_size 2.0 +360 4 training.label_smoothing 0.012190114491741856 +360 5 model.embedding_dim 1.0 +360 5 model.c_min 0.010922774547452064 +360 5 model.c_max 7.455399071577645 +360 5 optimizer.lr 0.00498869116272894 +360 5 training.batch_size 0.0 +360 5 training.label_smoothing 0.029484874911373176 +360 6 model.embedding_dim 1.0 +360 6 model.c_min 0.06034853823533426 +360 6 model.c_max 3.611700925063076 +360 6 optimizer.lr 0.025519006176788175 +360 6 training.batch_size 0.0 +360 6 training.label_smoothing 0.00257388419338917 +360 1 dataset """fb15k237""" +360 1 model """kg2e""" +360 1 loss """crossentropy""" +360 1 regularizer """no""" +360 1 optimizer """adam""" +360 1 training_loop """lcwa""" +360 1 evaluator """rankbased""" +360 2 dataset """fb15k237""" +360 2 model """kg2e""" +360 2 loss """crossentropy""" +360 2 regularizer """no""" +360 2 optimizer """adam""" +360 2 training_loop """lcwa""" +360 2 evaluator """rankbased""" +360 3 dataset """fb15k237""" +360 3 model """kg2e""" +360 3 loss """crossentropy""" +360 3 regularizer """no""" +360 3 optimizer """adam""" +360 3 training_loop """lcwa""" +360 3 evaluator """rankbased""" +360 4 dataset """fb15k237""" +360 4 model """kg2e""" +360 4 loss """crossentropy""" +360 4 regularizer """no""" +360 4 optimizer """adam""" +360 4 training_loop """lcwa""" +360 4 evaluator """rankbased""" +360 5 dataset """fb15k237""" +360 5 model """kg2e""" +360 5 loss """crossentropy""" +360 5 regularizer """no""" +360 5 optimizer """adam""" +360 5 training_loop """lcwa""" +360 5 evaluator """rankbased""" +360 6 dataset """fb15k237""" +360 6 model """kg2e""" +360 6 loss """crossentropy""" +360 6 regularizer """no""" +360 6 optimizer """adam""" +360 6 training_loop """lcwa""" +360 6 evaluator """rankbased""" +361 1 model.embedding_dim 0.0 +361 1 model.c_min 0.022949752988140063 +361 1 model.c_max 7.372066397539312 +361 1 training.batch_size 1.0 +361 1 training.label_smoothing 0.18965400575757976 +361 2 model.embedding_dim 0.0 +361 2 model.c_min 0.02015349336580418 +361 2 model.c_max 7.8664082729176705 +361 2 training.batch_size 0.0 +361 2 training.label_smoothing 0.6352084075191291 +361 3 model.embedding_dim 2.0 +361 3 model.c_min 0.010086529123016632 +361 3 model.c_max 9.221987642820263 +361 3 training.batch_size 1.0 +361 3 training.label_smoothing 0.58777658758191 +361 4 model.embedding_dim 1.0 +361 4 model.c_min 0.054773813501172865 +361 4 model.c_max 9.263003588868944 +361 4 training.batch_size 1.0 +361 4 training.label_smoothing 0.33692210180964455 +361 5 model.embedding_dim 0.0 +361 5 model.c_min 0.016802724577308786 +361 5 model.c_max 4.77005452184086 +361 5 training.batch_size 1.0 +361 5 training.label_smoothing 0.041420567380069696 +361 6 model.embedding_dim 2.0 +361 6 model.c_min 0.058476868744355516 +361 6 model.c_max 9.163993809171835 +361 6 training.batch_size 2.0 +361 6 training.label_smoothing 0.6815809365084858 +361 7 model.embedding_dim 1.0 +361 7 model.c_min 0.05718152526777564 +361 7 model.c_max 5.170233064343286 +361 7 training.batch_size 0.0 +361 7 training.label_smoothing 0.12864931141466043 +361 8 model.embedding_dim 1.0 +361 8 model.c_min 0.014245375838785915 +361 8 model.c_max 4.592375629290119 +361 8 training.batch_size 0.0 +361 8 training.label_smoothing 0.0032551327446502442 +361 9 model.embedding_dim 2.0 +361 9 model.c_min 0.03259824338426227 +361 9 model.c_max 3.2483309012765944 +361 9 training.batch_size 0.0 +361 9 training.label_smoothing 0.07714397368931311 +361 10 model.embedding_dim 1.0 +361 10 model.c_min 0.01136798321212865 +361 10 model.c_max 8.226529902208405 +361 10 training.batch_size 0.0 +361 10 training.label_smoothing 0.0025629088496773623 +361 11 model.embedding_dim 0.0 +361 11 model.c_min 0.022295822092225637 +361 11 model.c_max 3.343539469373998 +361 11 training.batch_size 0.0 +361 11 training.label_smoothing 0.016801054585117717 +361 12 model.embedding_dim 2.0 +361 12 model.c_min 0.016496688349926462 +361 12 model.c_max 3.0776537470918517 +361 12 training.batch_size 0.0 +361 12 training.label_smoothing 0.017075134794408968 +361 13 model.embedding_dim 2.0 +361 13 model.c_min 0.044006404032310736 +361 13 model.c_max 2.9654450680559092 +361 13 training.batch_size 1.0 +361 13 training.label_smoothing 0.0021159157439735532 +361 14 model.embedding_dim 1.0 +361 14 model.c_min 0.0419078263253751 +361 14 model.c_max 9.195521599048273 +361 14 training.batch_size 2.0 +361 14 training.label_smoothing 0.7240025742276442 +361 15 model.embedding_dim 0.0 +361 15 model.c_min 0.02622348797669687 +361 15 model.c_max 9.253251760116138 +361 15 training.batch_size 1.0 +361 15 training.label_smoothing 0.105602980813869 +361 16 model.embedding_dim 0.0 +361 16 model.c_min 0.0688965879777596 +361 16 model.c_max 3.2032724989784227 +361 16 training.batch_size 0.0 +361 16 training.label_smoothing 0.6263992439247915 +361 17 model.embedding_dim 0.0 +361 17 model.c_min 0.010886628678070099 +361 17 model.c_max 4.199327213204355 +361 17 training.batch_size 0.0 +361 17 training.label_smoothing 0.06714583710651129 +361 18 model.embedding_dim 0.0 +361 18 model.c_min 0.021526897247696318 +361 18 model.c_max 4.035400067152098 +361 18 training.batch_size 0.0 +361 18 training.label_smoothing 0.002373684609099527 +361 19 model.embedding_dim 0.0 +361 19 model.c_min 0.0625575389592033 +361 19 model.c_max 1.1686955135781072 +361 19 training.batch_size 2.0 +361 19 training.label_smoothing 0.020074177168611828 +361 20 model.embedding_dim 0.0 +361 20 model.c_min 0.012271669694721342 +361 20 model.c_max 1.0513806056178376 +361 20 training.batch_size 1.0 +361 20 training.label_smoothing 0.02249594017938793 +361 21 model.embedding_dim 1.0 +361 21 model.c_min 0.08179031989908703 +361 21 model.c_max 3.5834405507980396 +361 21 training.batch_size 1.0 +361 21 training.label_smoothing 0.06910150324460085 +361 22 model.embedding_dim 2.0 +361 22 model.c_min 0.03475715277897146 +361 22 model.c_max 1.3998809791754474 +361 22 training.batch_size 2.0 +361 22 training.label_smoothing 0.1633776219429333 +361 23 model.embedding_dim 0.0 +361 23 model.c_min 0.022022276427785713 +361 23 model.c_max 6.01628876108999 +361 23 training.batch_size 1.0 +361 23 training.label_smoothing 0.2914715464883257 +361 24 model.embedding_dim 1.0 +361 24 model.c_min 0.025528696679391647 +361 24 model.c_max 6.222954747471781 +361 24 training.batch_size 1.0 +361 24 training.label_smoothing 0.5334234514917604 +361 25 model.embedding_dim 2.0 +361 25 model.c_min 0.013264876546855911 +361 25 model.c_max 3.139513698342405 +361 25 training.batch_size 2.0 +361 25 training.label_smoothing 0.003111336578594617 +361 26 model.embedding_dim 1.0 +361 26 model.c_min 0.08979281813439452 +361 26 model.c_max 4.313888172466895 +361 26 training.batch_size 0.0 +361 26 training.label_smoothing 0.0758528741254482 +361 27 model.embedding_dim 0.0 +361 27 model.c_min 0.08641036947559821 +361 27 model.c_max 9.684749601900508 +361 27 training.batch_size 0.0 +361 27 training.label_smoothing 0.08724288754532461 +361 28 model.embedding_dim 1.0 +361 28 model.c_min 0.049158649686502215 +361 28 model.c_max 7.035311622329599 +361 28 training.batch_size 1.0 +361 28 training.label_smoothing 0.03391421970701226 +361 29 model.embedding_dim 0.0 +361 29 model.c_min 0.040614075907457914 +361 29 model.c_max 7.45310757449655 +361 29 training.batch_size 0.0 +361 29 training.label_smoothing 0.0022815524690744317 +361 30 model.embedding_dim 1.0 +361 30 model.c_min 0.04580052327446304 +361 30 model.c_max 7.643136846934095 +361 30 training.batch_size 1.0 +361 30 training.label_smoothing 0.7490815464022125 +361 31 model.embedding_dim 0.0 +361 31 model.c_min 0.017023282638761666 +361 31 model.c_max 4.657496682288301 +361 31 training.batch_size 1.0 +361 31 training.label_smoothing 0.09307918883694047 +361 32 model.embedding_dim 2.0 +361 32 model.c_min 0.08796254281516255 +361 32 model.c_max 1.8257635291833005 +361 32 training.batch_size 0.0 +361 32 training.label_smoothing 0.006876637878510649 +361 33 model.embedding_dim 1.0 +361 33 model.c_min 0.013649466340771252 +361 33 model.c_max 8.803313654159846 +361 33 training.batch_size 2.0 +361 33 training.label_smoothing 0.6683326758711421 +361 34 model.embedding_dim 1.0 +361 34 model.c_min 0.051205928700088665 +361 34 model.c_max 5.161017151260608 +361 34 training.batch_size 2.0 +361 34 training.label_smoothing 0.0021311462227577155 +361 35 model.embedding_dim 0.0 +361 35 model.c_min 0.02029558424996511 +361 35 model.c_max 5.540921486999489 +361 35 training.batch_size 0.0 +361 35 training.label_smoothing 0.018175175326101682 +361 36 model.embedding_dim 2.0 +361 36 model.c_min 0.08881714775766769 +361 36 model.c_max 8.466867712668495 +361 36 training.batch_size 0.0 +361 36 training.label_smoothing 0.2395221989385714 +361 37 model.embedding_dim 0.0 +361 37 model.c_min 0.07832076236479008 +361 37 model.c_max 8.030027493483715 +361 37 training.batch_size 0.0 +361 37 training.label_smoothing 0.012024636797140944 +361 38 model.embedding_dim 1.0 +361 38 model.c_min 0.017523147610124426 +361 38 model.c_max 5.801361507061389 +361 38 training.batch_size 1.0 +361 38 training.label_smoothing 0.01095591665288183 +361 39 model.embedding_dim 0.0 +361 39 model.c_min 0.0214410126932453 +361 39 model.c_max 9.429195293180708 +361 39 training.batch_size 2.0 +361 39 training.label_smoothing 0.012153016836794938 +361 40 model.embedding_dim 0.0 +361 40 model.c_min 0.01399899232302716 +361 40 model.c_max 5.025958602253284 +361 40 training.batch_size 0.0 +361 40 training.label_smoothing 0.11799606929036571 +361 41 model.embedding_dim 2.0 +361 41 model.c_min 0.013861407956192163 +361 41 model.c_max 8.877395219082437 +361 41 training.batch_size 2.0 +361 41 training.label_smoothing 0.7371907737454001 +361 42 model.embedding_dim 1.0 +361 42 model.c_min 0.01087227998618141 +361 42 model.c_max 3.3411845263802875 +361 42 training.batch_size 2.0 +361 42 training.label_smoothing 0.3947787299593829 +361 43 model.embedding_dim 0.0 +361 43 model.c_min 0.03400288547952671 +361 43 model.c_max 9.659350009963148 +361 43 training.batch_size 2.0 +361 43 training.label_smoothing 0.797361554386198 +361 44 model.embedding_dim 1.0 +361 44 model.c_min 0.016555954646402023 +361 44 model.c_max 3.1671441397578635 +361 44 training.batch_size 0.0 +361 44 training.label_smoothing 0.3790332589589352 +361 45 model.embedding_dim 2.0 +361 45 model.c_min 0.0787390534309463 +361 45 model.c_max 6.902989648302007 +361 45 training.batch_size 2.0 +361 45 training.label_smoothing 0.00471959100112336 +361 46 model.embedding_dim 2.0 +361 46 model.c_min 0.049370323030167335 +361 46 model.c_max 8.49716454515547 +361 46 training.batch_size 1.0 +361 46 training.label_smoothing 0.20394453596914186 +361 47 model.embedding_dim 1.0 +361 47 model.c_min 0.02775052182064618 +361 47 model.c_max 3.367119626775937 +361 47 training.batch_size 2.0 +361 47 training.label_smoothing 0.17749235073331018 +361 48 model.embedding_dim 0.0 +361 48 model.c_min 0.08142319916630554 +361 48 model.c_max 4.357952394934793 +361 48 training.batch_size 0.0 +361 48 training.label_smoothing 0.11059021775761063 +361 49 model.embedding_dim 0.0 +361 49 model.c_min 0.012909413677248288 +361 49 model.c_max 8.302048878097695 +361 49 training.batch_size 2.0 +361 49 training.label_smoothing 0.13497434424618485 +361 50 model.embedding_dim 2.0 +361 50 model.c_min 0.0177185344555783 +361 50 model.c_max 1.0850112175487292 +361 50 training.batch_size 2.0 +361 50 training.label_smoothing 0.0067911341825937985 +361 51 model.embedding_dim 2.0 +361 51 model.c_min 0.012651901257888997 +361 51 model.c_max 5.500189707354424 +361 51 training.batch_size 0.0 +361 51 training.label_smoothing 0.01959033325824006 +361 52 model.embedding_dim 2.0 +361 52 model.c_min 0.03398020352192117 +361 52 model.c_max 4.6279846248220675 +361 52 training.batch_size 0.0 +361 52 training.label_smoothing 0.22012404349543205 +361 53 model.embedding_dim 2.0 +361 53 model.c_min 0.022247033351012736 +361 53 model.c_max 2.0216046005032036 +361 53 training.batch_size 1.0 +361 53 training.label_smoothing 0.04395292580300898 +361 54 model.embedding_dim 1.0 +361 54 model.c_min 0.0647738250143673 +361 54 model.c_max 3.166772208622528 +361 54 training.batch_size 1.0 +361 54 training.label_smoothing 0.5117822176413767 +361 55 model.embedding_dim 0.0 +361 55 model.c_min 0.08199934568394268 +361 55 model.c_max 7.136435509947237 +361 55 training.batch_size 0.0 +361 55 training.label_smoothing 0.006679160568540321 +361 56 model.embedding_dim 1.0 +361 56 model.c_min 0.09890765094792295 +361 56 model.c_max 2.483102033404372 +361 56 training.batch_size 1.0 +361 56 training.label_smoothing 0.0344935066281057 +361 57 model.embedding_dim 1.0 +361 57 model.c_min 0.07957778998007331 +361 57 model.c_max 4.884223537900294 +361 57 training.batch_size 2.0 +361 57 training.label_smoothing 0.001225101942107435 +361 58 model.embedding_dim 1.0 +361 58 model.c_min 0.011637823194995373 +361 58 model.c_max 8.898080419733425 +361 58 training.batch_size 0.0 +361 58 training.label_smoothing 0.1291842670930831 +361 59 model.embedding_dim 0.0 +361 59 model.c_min 0.04375664762683137 +361 59 model.c_max 3.6200715921472364 +361 59 training.batch_size 1.0 +361 59 training.label_smoothing 0.42652498517537324 +361 60 model.embedding_dim 2.0 +361 60 model.c_min 0.020320316282297197 +361 60 model.c_max 3.293287440963629 +361 60 training.batch_size 1.0 +361 60 training.label_smoothing 0.002771524561310398 +361 61 model.embedding_dim 2.0 +361 61 model.c_min 0.015357420791716925 +361 61 model.c_max 5.792828581972295 +361 61 training.batch_size 1.0 +361 61 training.label_smoothing 0.04828280931498195 +361 62 model.embedding_dim 2.0 +361 62 model.c_min 0.04873962236524069 +361 62 model.c_max 9.102469636810646 +361 62 training.batch_size 0.0 +361 62 training.label_smoothing 0.07287494844359144 +361 63 model.embedding_dim 2.0 +361 63 model.c_min 0.04662461553036682 +361 63 model.c_max 8.015935545398015 +361 63 training.batch_size 2.0 +361 63 training.label_smoothing 0.0074342044209271055 +361 64 model.embedding_dim 1.0 +361 64 model.c_min 0.025446733134644062 +361 64 model.c_max 6.709288556770128 +361 64 training.batch_size 0.0 +361 64 training.label_smoothing 0.43063666302283826 +361 65 model.embedding_dim 0.0 +361 65 model.c_min 0.031591136083759234 +361 65 model.c_max 1.5371839754160803 +361 65 training.batch_size 2.0 +361 65 training.label_smoothing 0.20066155806448074 +361 66 model.embedding_dim 0.0 +361 66 model.c_min 0.021482769017694095 +361 66 model.c_max 2.1168671532144185 +361 66 training.batch_size 2.0 +361 66 training.label_smoothing 0.22700966641143758 +361 67 model.embedding_dim 1.0 +361 67 model.c_min 0.01684393856372325 +361 67 model.c_max 2.08378760590428 +361 67 training.batch_size 0.0 +361 67 training.label_smoothing 0.021421355335830956 +361 68 model.embedding_dim 2.0 +361 68 model.c_min 0.02037123645138654 +361 68 model.c_max 7.5380817400294315 +361 68 training.batch_size 2.0 +361 68 training.label_smoothing 0.0010531928811075033 +361 69 model.embedding_dim 2.0 +361 69 model.c_min 0.02886358698981604 +361 69 model.c_max 4.215981617018185 +361 69 training.batch_size 0.0 +361 69 training.label_smoothing 0.0370773107699897 +361 70 model.embedding_dim 0.0 +361 70 model.c_min 0.06757815268636677 +361 70 model.c_max 6.9216157824374065 +361 70 training.batch_size 1.0 +361 70 training.label_smoothing 0.1933053282038096 +361 71 model.embedding_dim 0.0 +361 71 model.c_min 0.07285824443445109 +361 71 model.c_max 4.61878833990475 +361 71 training.batch_size 0.0 +361 71 training.label_smoothing 0.038350240756130624 +361 72 model.embedding_dim 1.0 +361 72 model.c_min 0.04208619323388957 +361 72 model.c_max 6.349552415732015 +361 72 training.batch_size 0.0 +361 72 training.label_smoothing 0.04390115415551358 +361 73 model.embedding_dim 0.0 +361 73 model.c_min 0.05685138593281083 +361 73 model.c_max 6.042697682852987 +361 73 training.batch_size 1.0 +361 73 training.label_smoothing 0.0022515563639834437 +361 74 model.embedding_dim 0.0 +361 74 model.c_min 0.05035852140054394 +361 74 model.c_max 5.327479634056883 +361 74 training.batch_size 2.0 +361 74 training.label_smoothing 0.0115897287227415 +361 75 model.embedding_dim 2.0 +361 75 model.c_min 0.010039767716539211 +361 75 model.c_max 4.051530586446334 +361 75 training.batch_size 2.0 +361 75 training.label_smoothing 0.04989409138386715 +361 76 model.embedding_dim 1.0 +361 76 model.c_min 0.011056747401768905 +361 76 model.c_max 6.97566474624635 +361 76 training.batch_size 1.0 +361 76 training.label_smoothing 0.013918395481590242 +361 77 model.embedding_dim 1.0 +361 77 model.c_min 0.07265917295938866 +361 77 model.c_max 3.752291572094651 +361 77 training.batch_size 2.0 +361 77 training.label_smoothing 0.791557478113513 +361 78 model.embedding_dim 0.0 +361 78 model.c_min 0.028929493845850043 +361 78 model.c_max 2.8023193801312796 +361 78 training.batch_size 1.0 +361 78 training.label_smoothing 0.008368093702729293 +361 79 model.embedding_dim 0.0 +361 79 model.c_min 0.010319269117758505 +361 79 model.c_max 6.541257097747526 +361 79 training.batch_size 0.0 +361 79 training.label_smoothing 0.4709007971945835 +361 80 model.embedding_dim 0.0 +361 80 model.c_min 0.041681688573893094 +361 80 model.c_max 2.806636066717981 +361 80 training.batch_size 0.0 +361 80 training.label_smoothing 0.06792167420334015 +361 81 model.embedding_dim 0.0 +361 81 model.c_min 0.01821443735004847 +361 81 model.c_max 3.224240809069819 +361 81 training.batch_size 2.0 +361 81 training.label_smoothing 0.04064844091589717 +361 82 model.embedding_dim 1.0 +361 82 model.c_min 0.025626724767415485 +361 82 model.c_max 5.254663466252855 +361 82 training.batch_size 1.0 +361 82 training.label_smoothing 0.1981261190375527 +361 83 model.embedding_dim 1.0 +361 83 model.c_min 0.011035843455691018 +361 83 model.c_max 6.700355956905229 +361 83 training.batch_size 1.0 +361 83 training.label_smoothing 0.398605099099481 +361 84 model.embedding_dim 2.0 +361 84 model.c_min 0.039832780742668064 +361 84 model.c_max 2.565990674547618 +361 84 training.batch_size 2.0 +361 84 training.label_smoothing 0.041252599593796874 +361 85 model.embedding_dim 1.0 +361 85 model.c_min 0.029020237866465144 +361 85 model.c_max 5.07859896547234 +361 85 training.batch_size 2.0 +361 85 training.label_smoothing 0.026538804086861798 +361 86 model.embedding_dim 2.0 +361 86 model.c_min 0.016742066871852732 +361 86 model.c_max 9.26023366961285 +361 86 training.batch_size 1.0 +361 86 training.label_smoothing 0.002176417951886582 +361 87 model.embedding_dim 0.0 +361 87 model.c_min 0.039349610693469456 +361 87 model.c_max 5.61696487527676 +361 87 training.batch_size 1.0 +361 87 training.label_smoothing 0.004492135938957628 +361 88 model.embedding_dim 2.0 +361 88 model.c_min 0.017796372225434343 +361 88 model.c_max 9.24413057177155 +361 88 training.batch_size 1.0 +361 88 training.label_smoothing 0.15899321457732124 +361 89 model.embedding_dim 2.0 +361 89 model.c_min 0.01697471947277926 +361 89 model.c_max 5.59407239597085 +361 89 training.batch_size 2.0 +361 89 training.label_smoothing 0.013697808647668433 +361 90 model.embedding_dim 0.0 +361 90 model.c_min 0.022980627960755975 +361 90 model.c_max 4.29057770813268 +361 90 training.batch_size 2.0 +361 90 training.label_smoothing 0.043054343856895 +361 91 model.embedding_dim 2.0 +361 91 model.c_min 0.07164698931201063 +361 91 model.c_max 3.0689786509490813 +361 91 training.batch_size 0.0 +361 91 training.label_smoothing 0.029599814998967945 +361 92 model.embedding_dim 2.0 +361 92 model.c_min 0.022334384934767238 +361 92 model.c_max 2.606667514089953 +361 92 training.batch_size 0.0 +361 92 training.label_smoothing 0.055012463437598386 +361 93 model.embedding_dim 0.0 +361 93 model.c_min 0.05712342560833652 +361 93 model.c_max 2.2999298542171003 +361 93 training.batch_size 1.0 +361 93 training.label_smoothing 0.023483918225355515 +361 94 model.embedding_dim 0.0 +361 94 model.c_min 0.02006651712430684 +361 94 model.c_max 9.358150398854008 +361 94 training.batch_size 0.0 +361 94 training.label_smoothing 0.007399610123381672 +361 95 model.embedding_dim 0.0 +361 95 model.c_min 0.024924858020446562 +361 95 model.c_max 7.3794698114919335 +361 95 training.batch_size 2.0 +361 95 training.label_smoothing 0.027695503802868632 +361 96 model.embedding_dim 0.0 +361 96 model.c_min 0.01803297158139146 +361 96 model.c_max 8.849541861287582 +361 96 training.batch_size 2.0 +361 96 training.label_smoothing 0.3371333968436076 +361 97 model.embedding_dim 2.0 +361 97 model.c_min 0.04159892004994701 +361 97 model.c_max 8.007029615007058 +361 97 training.batch_size 2.0 +361 97 training.label_smoothing 0.4421929346566754 +361 98 model.embedding_dim 2.0 +361 98 model.c_min 0.01352621372591591 +361 98 model.c_max 8.953119524036012 +361 98 training.batch_size 1.0 +361 98 training.label_smoothing 0.0055191247205736696 +361 99 model.embedding_dim 1.0 +361 99 model.c_min 0.02238903705180136 +361 99 model.c_max 2.499267678178087 +361 99 training.batch_size 2.0 +361 99 training.label_smoothing 0.816404296206589 +361 100 model.embedding_dim 2.0 +361 100 model.c_min 0.010270677318124423 +361 100 model.c_max 8.444003567124405 +361 100 training.batch_size 2.0 +361 100 training.label_smoothing 0.0014323010044204204 +361 1 dataset """kinships""" +361 1 model """kg2e""" +361 1 loss """bceaftersigmoid""" +361 1 regularizer """no""" +361 1 optimizer """adadelta""" +361 1 training_loop """lcwa""" +361 1 evaluator """rankbased""" +361 2 dataset """kinships""" +361 2 model """kg2e""" +361 2 loss """bceaftersigmoid""" +361 2 regularizer """no""" +361 2 optimizer """adadelta""" +361 2 training_loop """lcwa""" +361 2 evaluator """rankbased""" +361 3 dataset """kinships""" +361 3 model """kg2e""" +361 3 loss """bceaftersigmoid""" +361 3 regularizer """no""" +361 3 optimizer """adadelta""" +361 3 training_loop """lcwa""" +361 3 evaluator """rankbased""" +361 4 dataset """kinships""" +361 4 model """kg2e""" +361 4 loss """bceaftersigmoid""" +361 4 regularizer """no""" +361 4 optimizer """adadelta""" +361 4 training_loop """lcwa""" +361 4 evaluator """rankbased""" +361 5 dataset """kinships""" +361 5 model """kg2e""" +361 5 loss """bceaftersigmoid""" +361 5 regularizer """no""" +361 5 optimizer """adadelta""" +361 5 training_loop """lcwa""" +361 5 evaluator """rankbased""" +361 6 dataset """kinships""" +361 6 model """kg2e""" +361 6 loss """bceaftersigmoid""" +361 6 regularizer """no""" +361 6 optimizer """adadelta""" +361 6 training_loop """lcwa""" +361 6 evaluator """rankbased""" +361 7 dataset """kinships""" +361 7 model """kg2e""" +361 7 loss """bceaftersigmoid""" +361 7 regularizer """no""" +361 7 optimizer """adadelta""" +361 7 training_loop """lcwa""" +361 7 evaluator """rankbased""" +361 8 dataset """kinships""" +361 8 model """kg2e""" +361 8 loss """bceaftersigmoid""" +361 8 regularizer """no""" +361 8 optimizer """adadelta""" +361 8 training_loop """lcwa""" +361 8 evaluator """rankbased""" +361 9 dataset """kinships""" +361 9 model """kg2e""" +361 9 loss """bceaftersigmoid""" +361 9 regularizer """no""" +361 9 optimizer """adadelta""" +361 9 training_loop """lcwa""" +361 9 evaluator """rankbased""" +361 10 dataset """kinships""" +361 10 model """kg2e""" +361 10 loss """bceaftersigmoid""" +361 10 regularizer """no""" +361 10 optimizer """adadelta""" +361 10 training_loop """lcwa""" +361 10 evaluator """rankbased""" +361 11 dataset """kinships""" +361 11 model """kg2e""" +361 11 loss """bceaftersigmoid""" +361 11 regularizer """no""" +361 11 optimizer """adadelta""" +361 11 training_loop """lcwa""" +361 11 evaluator """rankbased""" +361 12 dataset """kinships""" +361 12 model """kg2e""" +361 12 loss """bceaftersigmoid""" +361 12 regularizer """no""" +361 12 optimizer """adadelta""" +361 12 training_loop """lcwa""" +361 12 evaluator """rankbased""" +361 13 dataset """kinships""" +361 13 model """kg2e""" +361 13 loss """bceaftersigmoid""" +361 13 regularizer """no""" +361 13 optimizer """adadelta""" +361 13 training_loop """lcwa""" +361 13 evaluator """rankbased""" +361 14 dataset """kinships""" +361 14 model """kg2e""" +361 14 loss """bceaftersigmoid""" +361 14 regularizer """no""" +361 14 optimizer """adadelta""" +361 14 training_loop """lcwa""" +361 14 evaluator """rankbased""" +361 15 dataset """kinships""" +361 15 model """kg2e""" +361 15 loss """bceaftersigmoid""" +361 15 regularizer """no""" +361 15 optimizer """adadelta""" +361 15 training_loop """lcwa""" +361 15 evaluator """rankbased""" +361 16 dataset """kinships""" +361 16 model """kg2e""" +361 16 loss """bceaftersigmoid""" +361 16 regularizer """no""" +361 16 optimizer """adadelta""" +361 16 training_loop """lcwa""" +361 16 evaluator """rankbased""" +361 17 dataset """kinships""" +361 17 model """kg2e""" +361 17 loss """bceaftersigmoid""" +361 17 regularizer """no""" +361 17 optimizer """adadelta""" +361 17 training_loop """lcwa""" +361 17 evaluator """rankbased""" +361 18 dataset """kinships""" +361 18 model """kg2e""" +361 18 loss """bceaftersigmoid""" +361 18 regularizer """no""" +361 18 optimizer """adadelta""" +361 18 training_loop """lcwa""" +361 18 evaluator """rankbased""" +361 19 dataset """kinships""" +361 19 model """kg2e""" +361 19 loss """bceaftersigmoid""" +361 19 regularizer """no""" +361 19 optimizer """adadelta""" +361 19 training_loop """lcwa""" +361 19 evaluator """rankbased""" +361 20 dataset """kinships""" +361 20 model """kg2e""" +361 20 loss """bceaftersigmoid""" +361 20 regularizer """no""" +361 20 optimizer """adadelta""" +361 20 training_loop """lcwa""" +361 20 evaluator """rankbased""" +361 21 dataset """kinships""" +361 21 model """kg2e""" +361 21 loss """bceaftersigmoid""" +361 21 regularizer """no""" +361 21 optimizer """adadelta""" +361 21 training_loop """lcwa""" +361 21 evaluator """rankbased""" +361 22 dataset """kinships""" +361 22 model """kg2e""" +361 22 loss """bceaftersigmoid""" +361 22 regularizer """no""" +361 22 optimizer """adadelta""" +361 22 training_loop """lcwa""" +361 22 evaluator """rankbased""" +361 23 dataset """kinships""" +361 23 model """kg2e""" +361 23 loss """bceaftersigmoid""" +361 23 regularizer """no""" +361 23 optimizer """adadelta""" +361 23 training_loop """lcwa""" +361 23 evaluator """rankbased""" +361 24 dataset """kinships""" +361 24 model """kg2e""" +361 24 loss """bceaftersigmoid""" +361 24 regularizer """no""" +361 24 optimizer """adadelta""" +361 24 training_loop """lcwa""" +361 24 evaluator """rankbased""" +361 25 dataset """kinships""" +361 25 model """kg2e""" +361 25 loss """bceaftersigmoid""" +361 25 regularizer """no""" +361 25 optimizer """adadelta""" +361 25 training_loop """lcwa""" +361 25 evaluator """rankbased""" +361 26 dataset """kinships""" +361 26 model """kg2e""" +361 26 loss """bceaftersigmoid""" +361 26 regularizer """no""" +361 26 optimizer """adadelta""" +361 26 training_loop """lcwa""" +361 26 evaluator """rankbased""" +361 27 dataset """kinships""" +361 27 model """kg2e""" +361 27 loss """bceaftersigmoid""" +361 27 regularizer """no""" +361 27 optimizer """adadelta""" +361 27 training_loop """lcwa""" +361 27 evaluator """rankbased""" +361 28 dataset """kinships""" +361 28 model """kg2e""" +361 28 loss """bceaftersigmoid""" +361 28 regularizer """no""" +361 28 optimizer """adadelta""" +361 28 training_loop """lcwa""" +361 28 evaluator """rankbased""" +361 29 dataset """kinships""" +361 29 model """kg2e""" +361 29 loss """bceaftersigmoid""" +361 29 regularizer """no""" +361 29 optimizer """adadelta""" +361 29 training_loop """lcwa""" +361 29 evaluator """rankbased""" +361 30 dataset """kinships""" +361 30 model """kg2e""" +361 30 loss """bceaftersigmoid""" +361 30 regularizer """no""" +361 30 optimizer """adadelta""" +361 30 training_loop """lcwa""" +361 30 evaluator """rankbased""" +361 31 dataset """kinships""" +361 31 model """kg2e""" +361 31 loss """bceaftersigmoid""" +361 31 regularizer """no""" +361 31 optimizer """adadelta""" +361 31 training_loop """lcwa""" +361 31 evaluator """rankbased""" +361 32 dataset """kinships""" +361 32 model """kg2e""" +361 32 loss """bceaftersigmoid""" +361 32 regularizer """no""" +361 32 optimizer """adadelta""" +361 32 training_loop """lcwa""" +361 32 evaluator """rankbased""" +361 33 dataset """kinships""" +361 33 model """kg2e""" +361 33 loss """bceaftersigmoid""" +361 33 regularizer """no""" +361 33 optimizer """adadelta""" +361 33 training_loop """lcwa""" +361 33 evaluator """rankbased""" +361 34 dataset """kinships""" +361 34 model """kg2e""" +361 34 loss """bceaftersigmoid""" +361 34 regularizer """no""" +361 34 optimizer """adadelta""" +361 34 training_loop """lcwa""" +361 34 evaluator """rankbased""" +361 35 dataset """kinships""" +361 35 model """kg2e""" +361 35 loss """bceaftersigmoid""" +361 35 regularizer """no""" +361 35 optimizer """adadelta""" +361 35 training_loop """lcwa""" +361 35 evaluator """rankbased""" +361 36 dataset """kinships""" +361 36 model """kg2e""" +361 36 loss """bceaftersigmoid""" +361 36 regularizer """no""" +361 36 optimizer """adadelta""" +361 36 training_loop """lcwa""" +361 36 evaluator """rankbased""" +361 37 dataset """kinships""" +361 37 model """kg2e""" +361 37 loss """bceaftersigmoid""" +361 37 regularizer """no""" +361 37 optimizer """adadelta""" +361 37 training_loop """lcwa""" +361 37 evaluator """rankbased""" +361 38 dataset """kinships""" +361 38 model """kg2e""" +361 38 loss """bceaftersigmoid""" +361 38 regularizer """no""" +361 38 optimizer """adadelta""" +361 38 training_loop """lcwa""" +361 38 evaluator """rankbased""" +361 39 dataset """kinships""" +361 39 model """kg2e""" +361 39 loss """bceaftersigmoid""" +361 39 regularizer """no""" +361 39 optimizer """adadelta""" +361 39 training_loop """lcwa""" +361 39 evaluator """rankbased""" +361 40 dataset """kinships""" +361 40 model """kg2e""" +361 40 loss """bceaftersigmoid""" +361 40 regularizer """no""" +361 40 optimizer """adadelta""" +361 40 training_loop """lcwa""" +361 40 evaluator """rankbased""" +361 41 dataset """kinships""" +361 41 model """kg2e""" +361 41 loss """bceaftersigmoid""" +361 41 regularizer """no""" +361 41 optimizer """adadelta""" +361 41 training_loop """lcwa""" +361 41 evaluator """rankbased""" +361 42 dataset """kinships""" +361 42 model """kg2e""" +361 42 loss """bceaftersigmoid""" +361 42 regularizer """no""" +361 42 optimizer """adadelta""" +361 42 training_loop """lcwa""" +361 42 evaluator """rankbased""" +361 43 dataset """kinships""" +361 43 model """kg2e""" +361 43 loss """bceaftersigmoid""" +361 43 regularizer """no""" +361 43 optimizer """adadelta""" +361 43 training_loop """lcwa""" +361 43 evaluator """rankbased""" +361 44 dataset """kinships""" +361 44 model """kg2e""" +361 44 loss """bceaftersigmoid""" +361 44 regularizer """no""" +361 44 optimizer """adadelta""" +361 44 training_loop """lcwa""" +361 44 evaluator """rankbased""" +361 45 dataset """kinships""" +361 45 model """kg2e""" +361 45 loss """bceaftersigmoid""" +361 45 regularizer """no""" +361 45 optimizer """adadelta""" +361 45 training_loop """lcwa""" +361 45 evaluator """rankbased""" +361 46 dataset """kinships""" +361 46 model """kg2e""" +361 46 loss """bceaftersigmoid""" +361 46 regularizer """no""" +361 46 optimizer """adadelta""" +361 46 training_loop """lcwa""" +361 46 evaluator """rankbased""" +361 47 dataset """kinships""" +361 47 model """kg2e""" +361 47 loss """bceaftersigmoid""" +361 47 regularizer """no""" +361 47 optimizer """adadelta""" +361 47 training_loop """lcwa""" +361 47 evaluator """rankbased""" +361 48 dataset """kinships""" +361 48 model """kg2e""" +361 48 loss """bceaftersigmoid""" +361 48 regularizer """no""" +361 48 optimizer """adadelta""" +361 48 training_loop """lcwa""" +361 48 evaluator """rankbased""" +361 49 dataset """kinships""" +361 49 model """kg2e""" +361 49 loss """bceaftersigmoid""" +361 49 regularizer """no""" +361 49 optimizer """adadelta""" +361 49 training_loop """lcwa""" +361 49 evaluator """rankbased""" +361 50 dataset """kinships""" +361 50 model """kg2e""" +361 50 loss """bceaftersigmoid""" +361 50 regularizer """no""" +361 50 optimizer """adadelta""" +361 50 training_loop """lcwa""" +361 50 evaluator """rankbased""" +361 51 dataset """kinships""" +361 51 model """kg2e""" +361 51 loss """bceaftersigmoid""" +361 51 regularizer """no""" +361 51 optimizer """adadelta""" +361 51 training_loop """lcwa""" +361 51 evaluator """rankbased""" +361 52 dataset """kinships""" +361 52 model """kg2e""" +361 52 loss """bceaftersigmoid""" +361 52 regularizer """no""" +361 52 optimizer """adadelta""" +361 52 training_loop """lcwa""" +361 52 evaluator """rankbased""" +361 53 dataset """kinships""" +361 53 model """kg2e""" +361 53 loss """bceaftersigmoid""" +361 53 regularizer """no""" +361 53 optimizer """adadelta""" +361 53 training_loop """lcwa""" +361 53 evaluator """rankbased""" +361 54 dataset """kinships""" +361 54 model """kg2e""" +361 54 loss """bceaftersigmoid""" +361 54 regularizer """no""" +361 54 optimizer """adadelta""" +361 54 training_loop """lcwa""" +361 54 evaluator """rankbased""" +361 55 dataset """kinships""" +361 55 model """kg2e""" +361 55 loss """bceaftersigmoid""" +361 55 regularizer """no""" +361 55 optimizer """adadelta""" +361 55 training_loop """lcwa""" +361 55 evaluator """rankbased""" +361 56 dataset """kinships""" +361 56 model """kg2e""" +361 56 loss """bceaftersigmoid""" +361 56 regularizer """no""" +361 56 optimizer """adadelta""" +361 56 training_loop """lcwa""" +361 56 evaluator """rankbased""" +361 57 dataset """kinships""" +361 57 model """kg2e""" +361 57 loss """bceaftersigmoid""" +361 57 regularizer """no""" +361 57 optimizer """adadelta""" +361 57 training_loop """lcwa""" +361 57 evaluator """rankbased""" +361 58 dataset """kinships""" +361 58 model """kg2e""" +361 58 loss """bceaftersigmoid""" +361 58 regularizer """no""" +361 58 optimizer """adadelta""" +361 58 training_loop """lcwa""" +361 58 evaluator """rankbased""" +361 59 dataset """kinships""" +361 59 model """kg2e""" +361 59 loss """bceaftersigmoid""" +361 59 regularizer """no""" +361 59 optimizer """adadelta""" +361 59 training_loop """lcwa""" +361 59 evaluator """rankbased""" +361 60 dataset """kinships""" +361 60 model """kg2e""" +361 60 loss """bceaftersigmoid""" +361 60 regularizer """no""" +361 60 optimizer """adadelta""" +361 60 training_loop """lcwa""" +361 60 evaluator """rankbased""" +361 61 dataset """kinships""" +361 61 model """kg2e""" +361 61 loss """bceaftersigmoid""" +361 61 regularizer """no""" +361 61 optimizer """adadelta""" +361 61 training_loop """lcwa""" +361 61 evaluator """rankbased""" +361 62 dataset """kinships""" +361 62 model """kg2e""" +361 62 loss """bceaftersigmoid""" +361 62 regularizer """no""" +361 62 optimizer """adadelta""" +361 62 training_loop """lcwa""" +361 62 evaluator """rankbased""" +361 63 dataset """kinships""" +361 63 model """kg2e""" +361 63 loss """bceaftersigmoid""" +361 63 regularizer """no""" +361 63 optimizer """adadelta""" +361 63 training_loop """lcwa""" +361 63 evaluator """rankbased""" +361 64 dataset """kinships""" +361 64 model """kg2e""" +361 64 loss """bceaftersigmoid""" +361 64 regularizer """no""" +361 64 optimizer """adadelta""" +361 64 training_loop """lcwa""" +361 64 evaluator """rankbased""" +361 65 dataset """kinships""" +361 65 model """kg2e""" +361 65 loss """bceaftersigmoid""" +361 65 regularizer """no""" +361 65 optimizer """adadelta""" +361 65 training_loop """lcwa""" +361 65 evaluator """rankbased""" +361 66 dataset """kinships""" +361 66 model """kg2e""" +361 66 loss """bceaftersigmoid""" +361 66 regularizer """no""" +361 66 optimizer """adadelta""" +361 66 training_loop """lcwa""" +361 66 evaluator """rankbased""" +361 67 dataset """kinships""" +361 67 model """kg2e""" +361 67 loss """bceaftersigmoid""" +361 67 regularizer """no""" +361 67 optimizer """adadelta""" +361 67 training_loop """lcwa""" +361 67 evaluator """rankbased""" +361 68 dataset """kinships""" +361 68 model """kg2e""" +361 68 loss """bceaftersigmoid""" +361 68 regularizer """no""" +361 68 optimizer """adadelta""" +361 68 training_loop """lcwa""" +361 68 evaluator """rankbased""" +361 69 dataset """kinships""" +361 69 model """kg2e""" +361 69 loss """bceaftersigmoid""" +361 69 regularizer """no""" +361 69 optimizer """adadelta""" +361 69 training_loop """lcwa""" +361 69 evaluator """rankbased""" +361 70 dataset """kinships""" +361 70 model """kg2e""" +361 70 loss """bceaftersigmoid""" +361 70 regularizer """no""" +361 70 optimizer """adadelta""" +361 70 training_loop """lcwa""" +361 70 evaluator """rankbased""" +361 71 dataset """kinships""" +361 71 model """kg2e""" +361 71 loss """bceaftersigmoid""" +361 71 regularizer """no""" +361 71 optimizer """adadelta""" +361 71 training_loop """lcwa""" +361 71 evaluator """rankbased""" +361 72 dataset """kinships""" +361 72 model """kg2e""" +361 72 loss """bceaftersigmoid""" +361 72 regularizer """no""" +361 72 optimizer """adadelta""" +361 72 training_loop """lcwa""" +361 72 evaluator """rankbased""" +361 73 dataset """kinships""" +361 73 model """kg2e""" +361 73 loss """bceaftersigmoid""" +361 73 regularizer """no""" +361 73 optimizer """adadelta""" +361 73 training_loop """lcwa""" +361 73 evaluator """rankbased""" +361 74 dataset """kinships""" +361 74 model """kg2e""" +361 74 loss """bceaftersigmoid""" +361 74 regularizer """no""" +361 74 optimizer """adadelta""" +361 74 training_loop """lcwa""" +361 74 evaluator """rankbased""" +361 75 dataset """kinships""" +361 75 model """kg2e""" +361 75 loss """bceaftersigmoid""" +361 75 regularizer """no""" +361 75 optimizer """adadelta""" +361 75 training_loop """lcwa""" +361 75 evaluator """rankbased""" +361 76 dataset """kinships""" +361 76 model """kg2e""" +361 76 loss """bceaftersigmoid""" +361 76 regularizer """no""" +361 76 optimizer """adadelta""" +361 76 training_loop """lcwa""" +361 76 evaluator """rankbased""" +361 77 dataset """kinships""" +361 77 model """kg2e""" +361 77 loss """bceaftersigmoid""" +361 77 regularizer """no""" +361 77 optimizer """adadelta""" +361 77 training_loop """lcwa""" +361 77 evaluator """rankbased""" +361 78 dataset """kinships""" +361 78 model """kg2e""" +361 78 loss """bceaftersigmoid""" +361 78 regularizer """no""" +361 78 optimizer """adadelta""" +361 78 training_loop """lcwa""" +361 78 evaluator """rankbased""" +361 79 dataset """kinships""" +361 79 model """kg2e""" +361 79 loss """bceaftersigmoid""" +361 79 regularizer """no""" +361 79 optimizer """adadelta""" +361 79 training_loop """lcwa""" +361 79 evaluator """rankbased""" +361 80 dataset """kinships""" +361 80 model """kg2e""" +361 80 loss """bceaftersigmoid""" +361 80 regularizer """no""" +361 80 optimizer """adadelta""" +361 80 training_loop """lcwa""" +361 80 evaluator """rankbased""" +361 81 dataset """kinships""" +361 81 model """kg2e""" +361 81 loss """bceaftersigmoid""" +361 81 regularizer """no""" +361 81 optimizer """adadelta""" +361 81 training_loop """lcwa""" +361 81 evaluator """rankbased""" +361 82 dataset """kinships""" +361 82 model """kg2e""" +361 82 loss """bceaftersigmoid""" +361 82 regularizer """no""" +361 82 optimizer """adadelta""" +361 82 training_loop """lcwa""" +361 82 evaluator """rankbased""" +361 83 dataset """kinships""" +361 83 model """kg2e""" +361 83 loss """bceaftersigmoid""" +361 83 regularizer """no""" +361 83 optimizer """adadelta""" +361 83 training_loop """lcwa""" +361 83 evaluator """rankbased""" +361 84 dataset """kinships""" +361 84 model """kg2e""" +361 84 loss """bceaftersigmoid""" +361 84 regularizer """no""" +361 84 optimizer """adadelta""" +361 84 training_loop """lcwa""" +361 84 evaluator """rankbased""" +361 85 dataset """kinships""" +361 85 model """kg2e""" +361 85 loss """bceaftersigmoid""" +361 85 regularizer """no""" +361 85 optimizer """adadelta""" +361 85 training_loop """lcwa""" +361 85 evaluator """rankbased""" +361 86 dataset """kinships""" +361 86 model """kg2e""" +361 86 loss """bceaftersigmoid""" +361 86 regularizer """no""" +361 86 optimizer """adadelta""" +361 86 training_loop """lcwa""" +361 86 evaluator """rankbased""" +361 87 dataset """kinships""" +361 87 model """kg2e""" +361 87 loss """bceaftersigmoid""" +361 87 regularizer """no""" +361 87 optimizer """adadelta""" +361 87 training_loop """lcwa""" +361 87 evaluator """rankbased""" +361 88 dataset """kinships""" +361 88 model """kg2e""" +361 88 loss """bceaftersigmoid""" +361 88 regularizer """no""" +361 88 optimizer """adadelta""" +361 88 training_loop """lcwa""" +361 88 evaluator """rankbased""" +361 89 dataset """kinships""" +361 89 model """kg2e""" +361 89 loss """bceaftersigmoid""" +361 89 regularizer """no""" +361 89 optimizer """adadelta""" +361 89 training_loop """lcwa""" +361 89 evaluator """rankbased""" +361 90 dataset """kinships""" +361 90 model """kg2e""" +361 90 loss """bceaftersigmoid""" +361 90 regularizer """no""" +361 90 optimizer """adadelta""" +361 90 training_loop """lcwa""" +361 90 evaluator """rankbased""" +361 91 dataset """kinships""" +361 91 model """kg2e""" +361 91 loss """bceaftersigmoid""" +361 91 regularizer """no""" +361 91 optimizer """adadelta""" +361 91 training_loop """lcwa""" +361 91 evaluator """rankbased""" +361 92 dataset """kinships""" +361 92 model """kg2e""" +361 92 loss """bceaftersigmoid""" +361 92 regularizer """no""" +361 92 optimizer """adadelta""" +361 92 training_loop """lcwa""" +361 92 evaluator """rankbased""" +361 93 dataset """kinships""" +361 93 model """kg2e""" +361 93 loss """bceaftersigmoid""" +361 93 regularizer """no""" +361 93 optimizer """adadelta""" +361 93 training_loop """lcwa""" +361 93 evaluator """rankbased""" +361 94 dataset """kinships""" +361 94 model """kg2e""" +361 94 loss """bceaftersigmoid""" +361 94 regularizer """no""" +361 94 optimizer """adadelta""" +361 94 training_loop """lcwa""" +361 94 evaluator """rankbased""" +361 95 dataset """kinships""" +361 95 model """kg2e""" +361 95 loss """bceaftersigmoid""" +361 95 regularizer """no""" +361 95 optimizer """adadelta""" +361 95 training_loop """lcwa""" +361 95 evaluator """rankbased""" +361 96 dataset """kinships""" +361 96 model """kg2e""" +361 96 loss """bceaftersigmoid""" +361 96 regularizer """no""" +361 96 optimizer """adadelta""" +361 96 training_loop """lcwa""" +361 96 evaluator """rankbased""" +361 97 dataset """kinships""" +361 97 model """kg2e""" +361 97 loss """bceaftersigmoid""" +361 97 regularizer """no""" +361 97 optimizer """adadelta""" +361 97 training_loop """lcwa""" +361 97 evaluator """rankbased""" +361 98 dataset """kinships""" +361 98 model """kg2e""" +361 98 loss """bceaftersigmoid""" +361 98 regularizer """no""" +361 98 optimizer """adadelta""" +361 98 training_loop """lcwa""" +361 98 evaluator """rankbased""" +361 99 dataset """kinships""" +361 99 model """kg2e""" +361 99 loss """bceaftersigmoid""" +361 99 regularizer """no""" +361 99 optimizer """adadelta""" +361 99 training_loop """lcwa""" +361 99 evaluator """rankbased""" +361 100 dataset """kinships""" +361 100 model """kg2e""" +361 100 loss """bceaftersigmoid""" +361 100 regularizer """no""" +361 100 optimizer """adadelta""" +361 100 training_loop """lcwa""" +361 100 evaluator """rankbased""" +362 1 model.embedding_dim 0.0 +362 1 model.c_min 0.031142810485135626 +362 1 model.c_max 6.193982558648862 +362 1 training.batch_size 2.0 +362 1 training.label_smoothing 0.4164730241880885 +362 2 model.embedding_dim 0.0 +362 2 model.c_min 0.06992172423402264 +362 2 model.c_max 9.354632159380397 +362 2 training.batch_size 1.0 +362 2 training.label_smoothing 0.0409457316771101 +362 3 model.embedding_dim 1.0 +362 3 model.c_min 0.09144372975796052 +362 3 model.c_max 6.561474401331688 +362 3 training.batch_size 1.0 +362 3 training.label_smoothing 0.006754866701840304 +362 4 model.embedding_dim 2.0 +362 4 model.c_min 0.03909254291576662 +362 4 model.c_max 3.3288645180719607 +362 4 training.batch_size 0.0 +362 4 training.label_smoothing 0.031249371735226315 +362 5 model.embedding_dim 2.0 +362 5 model.c_min 0.03280310017168448 +362 5 model.c_max 4.853340520513781 +362 5 training.batch_size 2.0 +362 5 training.label_smoothing 0.032640526960068 +362 6 model.embedding_dim 0.0 +362 6 model.c_min 0.08574408331484666 +362 6 model.c_max 2.1410422707066568 +362 6 training.batch_size 0.0 +362 6 training.label_smoothing 0.010452459658326764 +362 7 model.embedding_dim 1.0 +362 7 model.c_min 0.03630043895645244 +362 7 model.c_max 2.9905106862836575 +362 7 training.batch_size 2.0 +362 7 training.label_smoothing 0.043576187093848924 +362 8 model.embedding_dim 0.0 +362 8 model.c_min 0.0933266788135254 +362 8 model.c_max 4.71875250776519 +362 8 training.batch_size 0.0 +362 8 training.label_smoothing 0.16656575697814305 +362 9 model.embedding_dim 2.0 +362 9 model.c_min 0.07424647952614696 +362 9 model.c_max 6.634650751030405 +362 9 training.batch_size 2.0 +362 9 training.label_smoothing 0.004079214498600187 +362 10 model.embedding_dim 2.0 +362 10 model.c_min 0.044500079271363374 +362 10 model.c_max 9.422925676269001 +362 10 training.batch_size 2.0 +362 10 training.label_smoothing 0.031788222528388546 +362 11 model.embedding_dim 1.0 +362 11 model.c_min 0.05544888901119162 +362 11 model.c_max 4.818740015992135 +362 11 training.batch_size 2.0 +362 11 training.label_smoothing 0.8317231694067241 +362 12 model.embedding_dim 2.0 +362 12 model.c_min 0.09745380142298034 +362 12 model.c_max 3.204881071315104 +362 12 training.batch_size 2.0 +362 12 training.label_smoothing 0.019073728538222035 +362 13 model.embedding_dim 0.0 +362 13 model.c_min 0.0746296960814224 +362 13 model.c_max 8.055249962101163 +362 13 training.batch_size 2.0 +362 13 training.label_smoothing 0.006169857446891059 +362 14 model.embedding_dim 2.0 +362 14 model.c_min 0.07357743790616197 +362 14 model.c_max 8.672149721013458 +362 14 training.batch_size 0.0 +362 14 training.label_smoothing 0.01751022887086621 +362 15 model.embedding_dim 1.0 +362 15 model.c_min 0.07339386842505144 +362 15 model.c_max 9.921892864500464 +362 15 training.batch_size 0.0 +362 15 training.label_smoothing 0.010322751174035678 +362 16 model.embedding_dim 0.0 +362 16 model.c_min 0.05343286340781176 +362 16 model.c_max 1.43401721449972 +362 16 training.batch_size 0.0 +362 16 training.label_smoothing 0.02513689031443225 +362 17 model.embedding_dim 2.0 +362 17 model.c_min 0.014401953678501585 +362 17 model.c_max 7.077104046307756 +362 17 training.batch_size 0.0 +362 17 training.label_smoothing 0.17789271310560492 +362 18 model.embedding_dim 1.0 +362 18 model.c_min 0.010791592611387747 +362 18 model.c_max 6.970535811472499 +362 18 training.batch_size 0.0 +362 18 training.label_smoothing 0.0838514685570064 +362 19 model.embedding_dim 1.0 +362 19 model.c_min 0.09532233665631303 +362 19 model.c_max 5.3349670027977485 +362 19 training.batch_size 2.0 +362 19 training.label_smoothing 0.14427706250283956 +362 20 model.embedding_dim 0.0 +362 20 model.c_min 0.013104520342648564 +362 20 model.c_max 3.8356237486667157 +362 20 training.batch_size 1.0 +362 20 training.label_smoothing 0.001348955128166526 +362 21 model.embedding_dim 0.0 +362 21 model.c_min 0.06837892646101967 +362 21 model.c_max 4.865060081364787 +362 21 training.batch_size 2.0 +362 21 training.label_smoothing 0.009746374344323329 +362 22 model.embedding_dim 0.0 +362 22 model.c_min 0.031104595508318653 +362 22 model.c_max 3.1332815419851325 +362 22 training.batch_size 0.0 +362 22 training.label_smoothing 0.7015649105860893 +362 23 model.embedding_dim 0.0 +362 23 model.c_min 0.03122140581024809 +362 23 model.c_max 3.9608873599288392 +362 23 training.batch_size 1.0 +362 23 training.label_smoothing 0.08126830706075082 +362 24 model.embedding_dim 2.0 +362 24 model.c_min 0.011769425135462105 +362 24 model.c_max 9.589358393970505 +362 24 training.batch_size 2.0 +362 24 training.label_smoothing 0.0031101193135371144 +362 25 model.embedding_dim 1.0 +362 25 model.c_min 0.013138654802135134 +362 25 model.c_max 4.236961007656921 +362 25 training.batch_size 2.0 +362 25 training.label_smoothing 0.22352317169697622 +362 26 model.embedding_dim 2.0 +362 26 model.c_min 0.02050104052633913 +362 26 model.c_max 4.732288178369013 +362 26 training.batch_size 2.0 +362 26 training.label_smoothing 0.664473099987324 +362 27 model.embedding_dim 0.0 +362 27 model.c_min 0.015399299656216343 +362 27 model.c_max 8.89224236261585 +362 27 training.batch_size 1.0 +362 27 training.label_smoothing 0.19421608235436028 +362 28 model.embedding_dim 0.0 +362 28 model.c_min 0.016992894837607526 +362 28 model.c_max 7.74595246402659 +362 28 training.batch_size 0.0 +362 28 training.label_smoothing 0.005820932827238361 +362 29 model.embedding_dim 2.0 +362 29 model.c_min 0.0632077637349745 +362 29 model.c_max 9.536868381825585 +362 29 training.batch_size 1.0 +362 29 training.label_smoothing 0.055094451355971095 +362 30 model.embedding_dim 2.0 +362 30 model.c_min 0.03370182154920479 +362 30 model.c_max 2.579433205857599 +362 30 training.batch_size 2.0 +362 30 training.label_smoothing 0.0024032342259597984 +362 31 model.embedding_dim 0.0 +362 31 model.c_min 0.04959029589170239 +362 31 model.c_max 9.870231242613691 +362 31 training.batch_size 0.0 +362 31 training.label_smoothing 0.29109620131392633 +362 32 model.embedding_dim 0.0 +362 32 model.c_min 0.05091164288728136 +362 32 model.c_max 7.951014455979053 +362 32 training.batch_size 0.0 +362 32 training.label_smoothing 0.6140080317467904 +362 33 model.embedding_dim 0.0 +362 33 model.c_min 0.08355101207261573 +362 33 model.c_max 1.7788922059665342 +362 33 training.batch_size 0.0 +362 33 training.label_smoothing 0.026819486912367962 +362 34 model.embedding_dim 2.0 +362 34 model.c_min 0.01095560688385652 +362 34 model.c_max 7.757493002679039 +362 34 training.batch_size 1.0 +362 34 training.label_smoothing 0.3085792936946983 +362 35 model.embedding_dim 1.0 +362 35 model.c_min 0.019687026884620103 +362 35 model.c_max 3.537563479184309 +362 35 training.batch_size 0.0 +362 35 training.label_smoothing 0.005967800060229255 +362 36 model.embedding_dim 2.0 +362 36 model.c_min 0.02785270256172663 +362 36 model.c_max 3.9809811000597497 +362 36 training.batch_size 2.0 +362 36 training.label_smoothing 0.0025058547889278086 +362 37 model.embedding_dim 2.0 +362 37 model.c_min 0.024897016749545486 +362 37 model.c_max 7.058499538259692 +362 37 training.batch_size 1.0 +362 37 training.label_smoothing 0.010674290460148343 +362 38 model.embedding_dim 2.0 +362 38 model.c_min 0.030141668812285305 +362 38 model.c_max 5.526754807358044 +362 38 training.batch_size 0.0 +362 38 training.label_smoothing 0.03904845212593957 +362 39 model.embedding_dim 2.0 +362 39 model.c_min 0.07490808479448094 +362 39 model.c_max 3.8856714731817146 +362 39 training.batch_size 0.0 +362 39 training.label_smoothing 0.14935328852035512 +362 40 model.embedding_dim 2.0 +362 40 model.c_min 0.029667699530444982 +362 40 model.c_max 7.496984790248801 +362 40 training.batch_size 1.0 +362 40 training.label_smoothing 0.00934151149329487 +362 41 model.embedding_dim 2.0 +362 41 model.c_min 0.012122532054571991 +362 41 model.c_max 7.843037437601118 +362 41 training.batch_size 0.0 +362 41 training.label_smoothing 0.0036318442591946683 +362 42 model.embedding_dim 2.0 +362 42 model.c_min 0.019169954462230434 +362 42 model.c_max 8.362535261149972 +362 42 training.batch_size 1.0 +362 42 training.label_smoothing 0.01757032500248696 +362 43 model.embedding_dim 0.0 +362 43 model.c_min 0.02002190991808127 +362 43 model.c_max 6.21154780924965 +362 43 training.batch_size 0.0 +362 43 training.label_smoothing 0.002300336996827126 +362 44 model.embedding_dim 0.0 +362 44 model.c_min 0.023449105510158254 +362 44 model.c_max 1.4060795671549327 +362 44 training.batch_size 1.0 +362 44 training.label_smoothing 0.14343715273896038 +362 45 model.embedding_dim 1.0 +362 45 model.c_min 0.021219160699682382 +362 45 model.c_max 3.427018607710406 +362 45 training.batch_size 0.0 +362 45 training.label_smoothing 0.05716736997223906 +362 46 model.embedding_dim 0.0 +362 46 model.c_min 0.048842950543769914 +362 46 model.c_max 9.789058712247515 +362 46 training.batch_size 1.0 +362 46 training.label_smoothing 0.12220302440194838 +362 47 model.embedding_dim 2.0 +362 47 model.c_min 0.047598201135914724 +362 47 model.c_max 8.729498057971393 +362 47 training.batch_size 1.0 +362 47 training.label_smoothing 0.015419711096801795 +362 48 model.embedding_dim 0.0 +362 48 model.c_min 0.029842560296397872 +362 48 model.c_max 3.7907520308653835 +362 48 training.batch_size 0.0 +362 48 training.label_smoothing 0.06639384645897645 +362 49 model.embedding_dim 1.0 +362 49 model.c_min 0.0692312064478795 +362 49 model.c_max 2.58259985866093 +362 49 training.batch_size 1.0 +362 49 training.label_smoothing 0.42621995272285157 +362 50 model.embedding_dim 0.0 +362 50 model.c_min 0.04415857134106089 +362 50 model.c_max 9.630098041672841 +362 50 training.batch_size 2.0 +362 50 training.label_smoothing 0.012569195655812297 +362 51 model.embedding_dim 1.0 +362 51 model.c_min 0.022378764743235864 +362 51 model.c_max 1.5426494273233777 +362 51 training.batch_size 2.0 +362 51 training.label_smoothing 0.007912011330806051 +362 52 model.embedding_dim 2.0 +362 52 model.c_min 0.03759393914275944 +362 52 model.c_max 4.344931235062194 +362 52 training.batch_size 1.0 +362 52 training.label_smoothing 0.013125708809897073 +362 53 model.embedding_dim 1.0 +362 53 model.c_min 0.014249711366392966 +362 53 model.c_max 1.3319389160524255 +362 53 training.batch_size 2.0 +362 53 training.label_smoothing 0.0019726173658080703 +362 54 model.embedding_dim 0.0 +362 54 model.c_min 0.04109474496371008 +362 54 model.c_max 7.2464644727268155 +362 54 training.batch_size 0.0 +362 54 training.label_smoothing 0.03460409395529531 +362 55 model.embedding_dim 0.0 +362 55 model.c_min 0.07876494823111443 +362 55 model.c_max 1.9499572183867158 +362 55 training.batch_size 1.0 +362 55 training.label_smoothing 0.1695721893077821 +362 56 model.embedding_dim 1.0 +362 56 model.c_min 0.05528541842034388 +362 56 model.c_max 6.215272668564214 +362 56 training.batch_size 1.0 +362 56 training.label_smoothing 0.005102351117195431 +362 57 model.embedding_dim 2.0 +362 57 model.c_min 0.01237495855757615 +362 57 model.c_max 5.7186013440665455 +362 57 training.batch_size 1.0 +362 57 training.label_smoothing 0.006160252021923354 +362 58 model.embedding_dim 2.0 +362 58 model.c_min 0.029003569500474922 +362 58 model.c_max 5.039030324467257 +362 58 training.batch_size 1.0 +362 58 training.label_smoothing 0.3987544665868272 +362 59 model.embedding_dim 2.0 +362 59 model.c_min 0.012458738865153229 +362 59 model.c_max 9.175671700979798 +362 59 training.batch_size 1.0 +362 59 training.label_smoothing 0.0042069254983335405 +362 60 model.embedding_dim 2.0 +362 60 model.c_min 0.0221222992793716 +362 60 model.c_max 2.700146410286914 +362 60 training.batch_size 0.0 +362 60 training.label_smoothing 0.0069867525777338076 +362 61 model.embedding_dim 2.0 +362 61 model.c_min 0.019446815152112107 +362 61 model.c_max 3.0396184690502417 +362 61 training.batch_size 0.0 +362 61 training.label_smoothing 0.005206702452166248 +362 62 model.embedding_dim 0.0 +362 62 model.c_min 0.03733590489541344 +362 62 model.c_max 9.361945481525073 +362 62 training.batch_size 0.0 +362 62 training.label_smoothing 0.020696213197447275 +362 63 model.embedding_dim 2.0 +362 63 model.c_min 0.04898314819358045 +362 63 model.c_max 7.303430918088551 +362 63 training.batch_size 1.0 +362 63 training.label_smoothing 0.02890526508684826 +362 64 model.embedding_dim 2.0 +362 64 model.c_min 0.010681332992010371 +362 64 model.c_max 9.066314889512197 +362 64 training.batch_size 0.0 +362 64 training.label_smoothing 0.006131894711658835 +362 65 model.embedding_dim 0.0 +362 65 model.c_min 0.07522966282948561 +362 65 model.c_max 8.349034384761845 +362 65 training.batch_size 1.0 +362 65 training.label_smoothing 0.26053026155890874 +362 66 model.embedding_dim 0.0 +362 66 model.c_min 0.06647153403490058 +362 66 model.c_max 6.607530123522587 +362 66 training.batch_size 1.0 +362 66 training.label_smoothing 0.34241020316057247 +362 67 model.embedding_dim 2.0 +362 67 model.c_min 0.02996971213530099 +362 67 model.c_max 3.6143672308612027 +362 67 training.batch_size 2.0 +362 67 training.label_smoothing 0.05234908766532077 +362 68 model.embedding_dim 0.0 +362 68 model.c_min 0.029961477063777758 +362 68 model.c_max 6.7572342374802075 +362 68 training.batch_size 0.0 +362 68 training.label_smoothing 0.08564095437472852 +362 69 model.embedding_dim 0.0 +362 69 model.c_min 0.06824660795016829 +362 69 model.c_max 9.58642900570769 +362 69 training.batch_size 0.0 +362 69 training.label_smoothing 0.16138211764659122 +362 70 model.embedding_dim 0.0 +362 70 model.c_min 0.031066950125248287 +362 70 model.c_max 8.75340017216637 +362 70 training.batch_size 2.0 +362 70 training.label_smoothing 0.027537361107634713 +362 71 model.embedding_dim 2.0 +362 71 model.c_min 0.025974235966528654 +362 71 model.c_max 3.555542272725713 +362 71 training.batch_size 1.0 +362 71 training.label_smoothing 0.0033047008277313325 +362 72 model.embedding_dim 0.0 +362 72 model.c_min 0.03894007551119165 +362 72 model.c_max 1.1108653580471937 +362 72 training.batch_size 1.0 +362 72 training.label_smoothing 0.009769400235898453 +362 73 model.embedding_dim 0.0 +362 73 model.c_min 0.0213917972723476 +362 73 model.c_max 2.9250302407798863 +362 73 training.batch_size 1.0 +362 73 training.label_smoothing 0.24904523787390995 +362 74 model.embedding_dim 1.0 +362 74 model.c_min 0.030045947078313668 +362 74 model.c_max 7.672573735549221 +362 74 training.batch_size 1.0 +362 74 training.label_smoothing 0.005992289408970683 +362 75 model.embedding_dim 2.0 +362 75 model.c_min 0.02912157924613076 +362 75 model.c_max 3.1760368316252285 +362 75 training.batch_size 0.0 +362 75 training.label_smoothing 0.11022627557026243 +362 76 model.embedding_dim 0.0 +362 76 model.c_min 0.014733828877227935 +362 76 model.c_max 9.86982938171258 +362 76 training.batch_size 0.0 +362 76 training.label_smoothing 0.3095673177748429 +362 77 model.embedding_dim 2.0 +362 77 model.c_min 0.09399610156979792 +362 77 model.c_max 7.016808935492569 +362 77 training.batch_size 2.0 +362 77 training.label_smoothing 0.04356227324767241 +362 78 model.embedding_dim 1.0 +362 78 model.c_min 0.01001158908406663 +362 78 model.c_max 8.567520901704782 +362 78 training.batch_size 2.0 +362 78 training.label_smoothing 0.0010253227967083646 +362 79 model.embedding_dim 2.0 +362 79 model.c_min 0.01931864478525004 +362 79 model.c_max 9.039302460194643 +362 79 training.batch_size 1.0 +362 79 training.label_smoothing 0.08888656966535602 +362 80 model.embedding_dim 2.0 +362 80 model.c_min 0.015233903589928202 +362 80 model.c_max 7.674064200324082 +362 80 training.batch_size 2.0 +362 80 training.label_smoothing 0.0017518650305748216 +362 81 model.embedding_dim 2.0 +362 81 model.c_min 0.06409577349201724 +362 81 model.c_max 2.9943215722976317 +362 81 training.batch_size 2.0 +362 81 training.label_smoothing 0.07343567002211909 +362 82 model.embedding_dim 2.0 +362 82 model.c_min 0.024802013470382463 +362 82 model.c_max 5.387097949656665 +362 82 training.batch_size 2.0 +362 82 training.label_smoothing 0.049538343185511285 +362 83 model.embedding_dim 2.0 +362 83 model.c_min 0.011488463611062481 +362 83 model.c_max 7.598030947626494 +362 83 training.batch_size 2.0 +362 83 training.label_smoothing 0.0019129781242226416 +362 84 model.embedding_dim 1.0 +362 84 model.c_min 0.06860151713644196 +362 84 model.c_max 1.775342589362387 +362 84 training.batch_size 2.0 +362 84 training.label_smoothing 0.9719883039296159 +362 85 model.embedding_dim 1.0 +362 85 model.c_min 0.01575663236600594 +362 85 model.c_max 5.451808588089862 +362 85 training.batch_size 1.0 +362 85 training.label_smoothing 0.0076299443147167245 +362 86 model.embedding_dim 2.0 +362 86 model.c_min 0.02101307329425739 +362 86 model.c_max 8.562466391503758 +362 86 training.batch_size 2.0 +362 86 training.label_smoothing 0.07782820462147501 +362 87 model.embedding_dim 0.0 +362 87 model.c_min 0.01678518084804179 +362 87 model.c_max 7.799267913844107 +362 87 training.batch_size 1.0 +362 87 training.label_smoothing 0.16397320311175828 +362 88 model.embedding_dim 2.0 +362 88 model.c_min 0.05948794921427396 +362 88 model.c_max 4.062962893703819 +362 88 training.batch_size 2.0 +362 88 training.label_smoothing 0.026036252463225842 +362 89 model.embedding_dim 1.0 +362 89 model.c_min 0.04244727324474234 +362 89 model.c_max 4.825410808559224 +362 89 training.batch_size 1.0 +362 89 training.label_smoothing 0.021215247602888242 +362 90 model.embedding_dim 1.0 +362 90 model.c_min 0.02076463929288932 +362 90 model.c_max 9.01189198329377 +362 90 training.batch_size 2.0 +362 90 training.label_smoothing 0.0011320511801776833 +362 91 model.embedding_dim 2.0 +362 91 model.c_min 0.0375699157217272 +362 91 model.c_max 6.319217355944215 +362 91 training.batch_size 1.0 +362 91 training.label_smoothing 0.13880020995064243 +362 92 model.embedding_dim 2.0 +362 92 model.c_min 0.048485177334049535 +362 92 model.c_max 2.3924240788960014 +362 92 training.batch_size 0.0 +362 92 training.label_smoothing 0.5427848925712734 +362 93 model.embedding_dim 1.0 +362 93 model.c_min 0.022989462573606116 +362 93 model.c_max 6.210559237381421 +362 93 training.batch_size 1.0 +362 93 training.label_smoothing 0.8048542855366936 +362 94 model.embedding_dim 1.0 +362 94 model.c_min 0.012152361488998878 +362 94 model.c_max 7.288244363874052 +362 94 training.batch_size 0.0 +362 94 training.label_smoothing 0.14196039739606123 +362 95 model.embedding_dim 1.0 +362 95 model.c_min 0.016859226246904647 +362 95 model.c_max 5.088408568985302 +362 95 training.batch_size 2.0 +362 95 training.label_smoothing 0.5711117061366746 +362 96 model.embedding_dim 0.0 +362 96 model.c_min 0.0806747590094196 +362 96 model.c_max 9.345228056942833 +362 96 training.batch_size 2.0 +362 96 training.label_smoothing 0.22655833480044013 +362 97 model.embedding_dim 1.0 +362 97 model.c_min 0.045994262592193975 +362 97 model.c_max 5.870848846449111 +362 97 training.batch_size 0.0 +362 97 training.label_smoothing 0.002225823094939078 +362 98 model.embedding_dim 2.0 +362 98 model.c_min 0.061820271819158584 +362 98 model.c_max 4.9221060780090475 +362 98 training.batch_size 0.0 +362 98 training.label_smoothing 0.6135474132044284 +362 99 model.embedding_dim 2.0 +362 99 model.c_min 0.01009854486435734 +362 99 model.c_max 8.952562973298976 +362 99 training.batch_size 0.0 +362 99 training.label_smoothing 0.0023579733171160532 +362 100 model.embedding_dim 0.0 +362 100 model.c_min 0.0993244974694387 +362 100 model.c_max 9.079463487008294 +362 100 training.batch_size 1.0 +362 100 training.label_smoothing 0.007334439201036649 +362 1 dataset """kinships""" +362 1 model """kg2e""" +362 1 loss """softplus""" +362 1 regularizer """no""" +362 1 optimizer """adadelta""" +362 1 training_loop """lcwa""" +362 1 evaluator """rankbased""" +362 2 dataset """kinships""" +362 2 model """kg2e""" +362 2 loss """softplus""" +362 2 regularizer """no""" +362 2 optimizer """adadelta""" +362 2 training_loop """lcwa""" +362 2 evaluator """rankbased""" +362 3 dataset """kinships""" +362 3 model """kg2e""" +362 3 loss """softplus""" +362 3 regularizer """no""" +362 3 optimizer """adadelta""" +362 3 training_loop """lcwa""" +362 3 evaluator """rankbased""" +362 4 dataset """kinships""" +362 4 model """kg2e""" +362 4 loss """softplus""" +362 4 regularizer """no""" +362 4 optimizer """adadelta""" +362 4 training_loop """lcwa""" +362 4 evaluator """rankbased""" +362 5 dataset """kinships""" +362 5 model """kg2e""" +362 5 loss """softplus""" +362 5 regularizer """no""" +362 5 optimizer """adadelta""" +362 5 training_loop """lcwa""" +362 5 evaluator """rankbased""" +362 6 dataset """kinships""" +362 6 model """kg2e""" +362 6 loss """softplus""" +362 6 regularizer """no""" +362 6 optimizer """adadelta""" +362 6 training_loop """lcwa""" +362 6 evaluator """rankbased""" +362 7 dataset """kinships""" +362 7 model """kg2e""" +362 7 loss """softplus""" +362 7 regularizer """no""" +362 7 optimizer """adadelta""" +362 7 training_loop """lcwa""" +362 7 evaluator """rankbased""" +362 8 dataset """kinships""" +362 8 model """kg2e""" +362 8 loss """softplus""" +362 8 regularizer """no""" +362 8 optimizer """adadelta""" +362 8 training_loop """lcwa""" +362 8 evaluator """rankbased""" +362 9 dataset """kinships""" +362 9 model """kg2e""" +362 9 loss """softplus""" +362 9 regularizer """no""" +362 9 optimizer """adadelta""" +362 9 training_loop """lcwa""" +362 9 evaluator """rankbased""" +362 10 dataset """kinships""" +362 10 model """kg2e""" +362 10 loss """softplus""" +362 10 regularizer """no""" +362 10 optimizer """adadelta""" +362 10 training_loop """lcwa""" +362 10 evaluator """rankbased""" +362 11 dataset """kinships""" +362 11 model """kg2e""" +362 11 loss """softplus""" +362 11 regularizer """no""" +362 11 optimizer """adadelta""" +362 11 training_loop """lcwa""" +362 11 evaluator """rankbased""" +362 12 dataset """kinships""" +362 12 model """kg2e""" +362 12 loss """softplus""" +362 12 regularizer """no""" +362 12 optimizer """adadelta""" +362 12 training_loop """lcwa""" +362 12 evaluator """rankbased""" +362 13 dataset """kinships""" +362 13 model """kg2e""" +362 13 loss """softplus""" +362 13 regularizer """no""" +362 13 optimizer """adadelta""" +362 13 training_loop """lcwa""" +362 13 evaluator """rankbased""" +362 14 dataset """kinships""" +362 14 model """kg2e""" +362 14 loss """softplus""" +362 14 regularizer """no""" +362 14 optimizer """adadelta""" +362 14 training_loop """lcwa""" +362 14 evaluator """rankbased""" +362 15 dataset """kinships""" +362 15 model """kg2e""" +362 15 loss """softplus""" +362 15 regularizer """no""" +362 15 optimizer """adadelta""" +362 15 training_loop """lcwa""" +362 15 evaluator """rankbased""" +362 16 dataset """kinships""" +362 16 model """kg2e""" +362 16 loss """softplus""" +362 16 regularizer """no""" +362 16 optimizer """adadelta""" +362 16 training_loop """lcwa""" +362 16 evaluator """rankbased""" +362 17 dataset """kinships""" +362 17 model """kg2e""" +362 17 loss """softplus""" +362 17 regularizer """no""" +362 17 optimizer """adadelta""" +362 17 training_loop """lcwa""" +362 17 evaluator """rankbased""" +362 18 dataset """kinships""" +362 18 model """kg2e""" +362 18 loss """softplus""" +362 18 regularizer """no""" +362 18 optimizer """adadelta""" +362 18 training_loop """lcwa""" +362 18 evaluator """rankbased""" +362 19 dataset """kinships""" +362 19 model """kg2e""" +362 19 loss """softplus""" +362 19 regularizer """no""" +362 19 optimizer """adadelta""" +362 19 training_loop """lcwa""" +362 19 evaluator """rankbased""" +362 20 dataset """kinships""" +362 20 model """kg2e""" +362 20 loss """softplus""" +362 20 regularizer """no""" +362 20 optimizer """adadelta""" +362 20 training_loop """lcwa""" +362 20 evaluator """rankbased""" +362 21 dataset """kinships""" +362 21 model """kg2e""" +362 21 loss """softplus""" +362 21 regularizer """no""" +362 21 optimizer """adadelta""" +362 21 training_loop """lcwa""" +362 21 evaluator """rankbased""" +362 22 dataset """kinships""" +362 22 model """kg2e""" +362 22 loss """softplus""" +362 22 regularizer """no""" +362 22 optimizer """adadelta""" +362 22 training_loop """lcwa""" +362 22 evaluator """rankbased""" +362 23 dataset """kinships""" +362 23 model """kg2e""" +362 23 loss """softplus""" +362 23 regularizer """no""" +362 23 optimizer """adadelta""" +362 23 training_loop """lcwa""" +362 23 evaluator """rankbased""" +362 24 dataset """kinships""" +362 24 model """kg2e""" +362 24 loss """softplus""" +362 24 regularizer """no""" +362 24 optimizer """adadelta""" +362 24 training_loop """lcwa""" +362 24 evaluator """rankbased""" +362 25 dataset """kinships""" +362 25 model """kg2e""" +362 25 loss """softplus""" +362 25 regularizer """no""" +362 25 optimizer """adadelta""" +362 25 training_loop """lcwa""" +362 25 evaluator """rankbased""" +362 26 dataset """kinships""" +362 26 model """kg2e""" +362 26 loss """softplus""" +362 26 regularizer """no""" +362 26 optimizer """adadelta""" +362 26 training_loop """lcwa""" +362 26 evaluator """rankbased""" +362 27 dataset """kinships""" +362 27 model """kg2e""" +362 27 loss """softplus""" +362 27 regularizer """no""" +362 27 optimizer """adadelta""" +362 27 training_loop """lcwa""" +362 27 evaluator """rankbased""" +362 28 dataset """kinships""" +362 28 model """kg2e""" +362 28 loss """softplus""" +362 28 regularizer """no""" +362 28 optimizer """adadelta""" +362 28 training_loop """lcwa""" +362 28 evaluator """rankbased""" +362 29 dataset """kinships""" +362 29 model """kg2e""" +362 29 loss """softplus""" +362 29 regularizer """no""" +362 29 optimizer """adadelta""" +362 29 training_loop """lcwa""" +362 29 evaluator """rankbased""" +362 30 dataset """kinships""" +362 30 model """kg2e""" +362 30 loss """softplus""" +362 30 regularizer """no""" +362 30 optimizer """adadelta""" +362 30 training_loop """lcwa""" +362 30 evaluator """rankbased""" +362 31 dataset """kinships""" +362 31 model """kg2e""" +362 31 loss """softplus""" +362 31 regularizer """no""" +362 31 optimizer """adadelta""" +362 31 training_loop """lcwa""" +362 31 evaluator """rankbased""" +362 32 dataset """kinships""" +362 32 model """kg2e""" +362 32 loss """softplus""" +362 32 regularizer """no""" +362 32 optimizer """adadelta""" +362 32 training_loop """lcwa""" +362 32 evaluator """rankbased""" +362 33 dataset """kinships""" +362 33 model """kg2e""" +362 33 loss """softplus""" +362 33 regularizer """no""" +362 33 optimizer """adadelta""" +362 33 training_loop """lcwa""" +362 33 evaluator """rankbased""" +362 34 dataset """kinships""" +362 34 model """kg2e""" +362 34 loss """softplus""" +362 34 regularizer """no""" +362 34 optimizer """adadelta""" +362 34 training_loop """lcwa""" +362 34 evaluator """rankbased""" +362 35 dataset """kinships""" +362 35 model """kg2e""" +362 35 loss """softplus""" +362 35 regularizer """no""" +362 35 optimizer """adadelta""" +362 35 training_loop """lcwa""" +362 35 evaluator """rankbased""" +362 36 dataset """kinships""" +362 36 model """kg2e""" +362 36 loss """softplus""" +362 36 regularizer """no""" +362 36 optimizer """adadelta""" +362 36 training_loop """lcwa""" +362 36 evaluator """rankbased""" +362 37 dataset """kinships""" +362 37 model """kg2e""" +362 37 loss """softplus""" +362 37 regularizer """no""" +362 37 optimizer """adadelta""" +362 37 training_loop """lcwa""" +362 37 evaluator """rankbased""" +362 38 dataset """kinships""" +362 38 model """kg2e""" +362 38 loss """softplus""" +362 38 regularizer """no""" +362 38 optimizer """adadelta""" +362 38 training_loop """lcwa""" +362 38 evaluator """rankbased""" +362 39 dataset """kinships""" +362 39 model """kg2e""" +362 39 loss """softplus""" +362 39 regularizer """no""" +362 39 optimizer """adadelta""" +362 39 training_loop """lcwa""" +362 39 evaluator """rankbased""" +362 40 dataset """kinships""" +362 40 model """kg2e""" +362 40 loss """softplus""" +362 40 regularizer """no""" +362 40 optimizer """adadelta""" +362 40 training_loop """lcwa""" +362 40 evaluator """rankbased""" +362 41 dataset """kinships""" +362 41 model """kg2e""" +362 41 loss """softplus""" +362 41 regularizer """no""" +362 41 optimizer """adadelta""" +362 41 training_loop """lcwa""" +362 41 evaluator """rankbased""" +362 42 dataset """kinships""" +362 42 model """kg2e""" +362 42 loss """softplus""" +362 42 regularizer """no""" +362 42 optimizer """adadelta""" +362 42 training_loop """lcwa""" +362 42 evaluator """rankbased""" +362 43 dataset """kinships""" +362 43 model """kg2e""" +362 43 loss """softplus""" +362 43 regularizer """no""" +362 43 optimizer """adadelta""" +362 43 training_loop """lcwa""" +362 43 evaluator """rankbased""" +362 44 dataset """kinships""" +362 44 model """kg2e""" +362 44 loss """softplus""" +362 44 regularizer """no""" +362 44 optimizer """adadelta""" +362 44 training_loop """lcwa""" +362 44 evaluator """rankbased""" +362 45 dataset """kinships""" +362 45 model """kg2e""" +362 45 loss """softplus""" +362 45 regularizer """no""" +362 45 optimizer """adadelta""" +362 45 training_loop """lcwa""" +362 45 evaluator """rankbased""" +362 46 dataset """kinships""" +362 46 model """kg2e""" +362 46 loss """softplus""" +362 46 regularizer """no""" +362 46 optimizer """adadelta""" +362 46 training_loop """lcwa""" +362 46 evaluator """rankbased""" +362 47 dataset """kinships""" +362 47 model """kg2e""" +362 47 loss """softplus""" +362 47 regularizer """no""" +362 47 optimizer """adadelta""" +362 47 training_loop """lcwa""" +362 47 evaluator """rankbased""" +362 48 dataset """kinships""" +362 48 model """kg2e""" +362 48 loss """softplus""" +362 48 regularizer """no""" +362 48 optimizer """adadelta""" +362 48 training_loop """lcwa""" +362 48 evaluator """rankbased""" +362 49 dataset """kinships""" +362 49 model """kg2e""" +362 49 loss """softplus""" +362 49 regularizer """no""" +362 49 optimizer """adadelta""" +362 49 training_loop """lcwa""" +362 49 evaluator """rankbased""" +362 50 dataset """kinships""" +362 50 model """kg2e""" +362 50 loss """softplus""" +362 50 regularizer """no""" +362 50 optimizer """adadelta""" +362 50 training_loop """lcwa""" +362 50 evaluator """rankbased""" +362 51 dataset """kinships""" +362 51 model """kg2e""" +362 51 loss """softplus""" +362 51 regularizer """no""" +362 51 optimizer """adadelta""" +362 51 training_loop """lcwa""" +362 51 evaluator """rankbased""" +362 52 dataset """kinships""" +362 52 model """kg2e""" +362 52 loss """softplus""" +362 52 regularizer """no""" +362 52 optimizer """adadelta""" +362 52 training_loop """lcwa""" +362 52 evaluator """rankbased""" +362 53 dataset """kinships""" +362 53 model """kg2e""" +362 53 loss """softplus""" +362 53 regularizer """no""" +362 53 optimizer """adadelta""" +362 53 training_loop """lcwa""" +362 53 evaluator """rankbased""" +362 54 dataset """kinships""" +362 54 model """kg2e""" +362 54 loss """softplus""" +362 54 regularizer """no""" +362 54 optimizer """adadelta""" +362 54 training_loop """lcwa""" +362 54 evaluator """rankbased""" +362 55 dataset """kinships""" +362 55 model """kg2e""" +362 55 loss """softplus""" +362 55 regularizer """no""" +362 55 optimizer """adadelta""" +362 55 training_loop """lcwa""" +362 55 evaluator """rankbased""" +362 56 dataset """kinships""" +362 56 model """kg2e""" +362 56 loss """softplus""" +362 56 regularizer """no""" +362 56 optimizer """adadelta""" +362 56 training_loop """lcwa""" +362 56 evaluator """rankbased""" +362 57 dataset """kinships""" +362 57 model """kg2e""" +362 57 loss """softplus""" +362 57 regularizer """no""" +362 57 optimizer """adadelta""" +362 57 training_loop """lcwa""" +362 57 evaluator """rankbased""" +362 58 dataset """kinships""" +362 58 model """kg2e""" +362 58 loss """softplus""" +362 58 regularizer """no""" +362 58 optimizer """adadelta""" +362 58 training_loop """lcwa""" +362 58 evaluator """rankbased""" +362 59 dataset """kinships""" +362 59 model """kg2e""" +362 59 loss """softplus""" +362 59 regularizer """no""" +362 59 optimizer """adadelta""" +362 59 training_loop """lcwa""" +362 59 evaluator """rankbased""" +362 60 dataset """kinships""" +362 60 model """kg2e""" +362 60 loss """softplus""" +362 60 regularizer """no""" +362 60 optimizer """adadelta""" +362 60 training_loop """lcwa""" +362 60 evaluator """rankbased""" +362 61 dataset """kinships""" +362 61 model """kg2e""" +362 61 loss """softplus""" +362 61 regularizer """no""" +362 61 optimizer """adadelta""" +362 61 training_loop """lcwa""" +362 61 evaluator """rankbased""" +362 62 dataset """kinships""" +362 62 model """kg2e""" +362 62 loss """softplus""" +362 62 regularizer """no""" +362 62 optimizer """adadelta""" +362 62 training_loop """lcwa""" +362 62 evaluator """rankbased""" +362 63 dataset """kinships""" +362 63 model """kg2e""" +362 63 loss """softplus""" +362 63 regularizer """no""" +362 63 optimizer """adadelta""" +362 63 training_loop """lcwa""" +362 63 evaluator """rankbased""" +362 64 dataset """kinships""" +362 64 model """kg2e""" +362 64 loss """softplus""" +362 64 regularizer """no""" +362 64 optimizer """adadelta""" +362 64 training_loop """lcwa""" +362 64 evaluator """rankbased""" +362 65 dataset """kinships""" +362 65 model """kg2e""" +362 65 loss """softplus""" +362 65 regularizer """no""" +362 65 optimizer """adadelta""" +362 65 training_loop """lcwa""" +362 65 evaluator """rankbased""" +362 66 dataset """kinships""" +362 66 model """kg2e""" +362 66 loss """softplus""" +362 66 regularizer """no""" +362 66 optimizer """adadelta""" +362 66 training_loop """lcwa""" +362 66 evaluator """rankbased""" +362 67 dataset """kinships""" +362 67 model """kg2e""" +362 67 loss """softplus""" +362 67 regularizer """no""" +362 67 optimizer """adadelta""" +362 67 training_loop """lcwa""" +362 67 evaluator """rankbased""" +362 68 dataset """kinships""" +362 68 model """kg2e""" +362 68 loss """softplus""" +362 68 regularizer """no""" +362 68 optimizer """adadelta""" +362 68 training_loop """lcwa""" +362 68 evaluator """rankbased""" +362 69 dataset """kinships""" +362 69 model """kg2e""" +362 69 loss """softplus""" +362 69 regularizer """no""" +362 69 optimizer """adadelta""" +362 69 training_loop """lcwa""" +362 69 evaluator """rankbased""" +362 70 dataset """kinships""" +362 70 model """kg2e""" +362 70 loss """softplus""" +362 70 regularizer """no""" +362 70 optimizer """adadelta""" +362 70 training_loop """lcwa""" +362 70 evaluator """rankbased""" +362 71 dataset """kinships""" +362 71 model """kg2e""" +362 71 loss """softplus""" +362 71 regularizer """no""" +362 71 optimizer """adadelta""" +362 71 training_loop """lcwa""" +362 71 evaluator """rankbased""" +362 72 dataset """kinships""" +362 72 model """kg2e""" +362 72 loss """softplus""" +362 72 regularizer """no""" +362 72 optimizer """adadelta""" +362 72 training_loop """lcwa""" +362 72 evaluator """rankbased""" +362 73 dataset """kinships""" +362 73 model """kg2e""" +362 73 loss """softplus""" +362 73 regularizer """no""" +362 73 optimizer """adadelta""" +362 73 training_loop """lcwa""" +362 73 evaluator """rankbased""" +362 74 dataset """kinships""" +362 74 model """kg2e""" +362 74 loss """softplus""" +362 74 regularizer """no""" +362 74 optimizer """adadelta""" +362 74 training_loop """lcwa""" +362 74 evaluator """rankbased""" +362 75 dataset """kinships""" +362 75 model """kg2e""" +362 75 loss """softplus""" +362 75 regularizer """no""" +362 75 optimizer """adadelta""" +362 75 training_loop """lcwa""" +362 75 evaluator """rankbased""" +362 76 dataset """kinships""" +362 76 model """kg2e""" +362 76 loss """softplus""" +362 76 regularizer """no""" +362 76 optimizer """adadelta""" +362 76 training_loop """lcwa""" +362 76 evaluator """rankbased""" +362 77 dataset """kinships""" +362 77 model """kg2e""" +362 77 loss """softplus""" +362 77 regularizer """no""" +362 77 optimizer """adadelta""" +362 77 training_loop """lcwa""" +362 77 evaluator """rankbased""" +362 78 dataset """kinships""" +362 78 model """kg2e""" +362 78 loss """softplus""" +362 78 regularizer """no""" +362 78 optimizer """adadelta""" +362 78 training_loop """lcwa""" +362 78 evaluator """rankbased""" +362 79 dataset """kinships""" +362 79 model """kg2e""" +362 79 loss """softplus""" +362 79 regularizer """no""" +362 79 optimizer """adadelta""" +362 79 training_loop """lcwa""" +362 79 evaluator """rankbased""" +362 80 dataset """kinships""" +362 80 model """kg2e""" +362 80 loss """softplus""" +362 80 regularizer """no""" +362 80 optimizer """adadelta""" +362 80 training_loop """lcwa""" +362 80 evaluator """rankbased""" +362 81 dataset """kinships""" +362 81 model """kg2e""" +362 81 loss """softplus""" +362 81 regularizer """no""" +362 81 optimizer """adadelta""" +362 81 training_loop """lcwa""" +362 81 evaluator """rankbased""" +362 82 dataset """kinships""" +362 82 model """kg2e""" +362 82 loss """softplus""" +362 82 regularizer """no""" +362 82 optimizer """adadelta""" +362 82 training_loop """lcwa""" +362 82 evaluator """rankbased""" +362 83 dataset """kinships""" +362 83 model """kg2e""" +362 83 loss """softplus""" +362 83 regularizer """no""" +362 83 optimizer """adadelta""" +362 83 training_loop """lcwa""" +362 83 evaluator """rankbased""" +362 84 dataset """kinships""" +362 84 model """kg2e""" +362 84 loss """softplus""" +362 84 regularizer """no""" +362 84 optimizer """adadelta""" +362 84 training_loop """lcwa""" +362 84 evaluator """rankbased""" +362 85 dataset """kinships""" +362 85 model """kg2e""" +362 85 loss """softplus""" +362 85 regularizer """no""" +362 85 optimizer """adadelta""" +362 85 training_loop """lcwa""" +362 85 evaluator """rankbased""" +362 86 dataset """kinships""" +362 86 model """kg2e""" +362 86 loss """softplus""" +362 86 regularizer """no""" +362 86 optimizer """adadelta""" +362 86 training_loop """lcwa""" +362 86 evaluator """rankbased""" +362 87 dataset """kinships""" +362 87 model """kg2e""" +362 87 loss """softplus""" +362 87 regularizer """no""" +362 87 optimizer """adadelta""" +362 87 training_loop """lcwa""" +362 87 evaluator """rankbased""" +362 88 dataset """kinships""" +362 88 model """kg2e""" +362 88 loss """softplus""" +362 88 regularizer """no""" +362 88 optimizer """adadelta""" +362 88 training_loop """lcwa""" +362 88 evaluator """rankbased""" +362 89 dataset """kinships""" +362 89 model """kg2e""" +362 89 loss """softplus""" +362 89 regularizer """no""" +362 89 optimizer """adadelta""" +362 89 training_loop """lcwa""" +362 89 evaluator """rankbased""" +362 90 dataset """kinships""" +362 90 model """kg2e""" +362 90 loss """softplus""" +362 90 regularizer """no""" +362 90 optimizer """adadelta""" +362 90 training_loop """lcwa""" +362 90 evaluator """rankbased""" +362 91 dataset """kinships""" +362 91 model """kg2e""" +362 91 loss """softplus""" +362 91 regularizer """no""" +362 91 optimizer """adadelta""" +362 91 training_loop """lcwa""" +362 91 evaluator """rankbased""" +362 92 dataset """kinships""" +362 92 model """kg2e""" +362 92 loss """softplus""" +362 92 regularizer """no""" +362 92 optimizer """adadelta""" +362 92 training_loop """lcwa""" +362 92 evaluator """rankbased""" +362 93 dataset """kinships""" +362 93 model """kg2e""" +362 93 loss """softplus""" +362 93 regularizer """no""" +362 93 optimizer """adadelta""" +362 93 training_loop """lcwa""" +362 93 evaluator """rankbased""" +362 94 dataset """kinships""" +362 94 model """kg2e""" +362 94 loss """softplus""" +362 94 regularizer """no""" +362 94 optimizer """adadelta""" +362 94 training_loop """lcwa""" +362 94 evaluator """rankbased""" +362 95 dataset """kinships""" +362 95 model """kg2e""" +362 95 loss """softplus""" +362 95 regularizer """no""" +362 95 optimizer """adadelta""" +362 95 training_loop """lcwa""" +362 95 evaluator """rankbased""" +362 96 dataset """kinships""" +362 96 model """kg2e""" +362 96 loss """softplus""" +362 96 regularizer """no""" +362 96 optimizer """adadelta""" +362 96 training_loop """lcwa""" +362 96 evaluator """rankbased""" +362 97 dataset """kinships""" +362 97 model """kg2e""" +362 97 loss """softplus""" +362 97 regularizer """no""" +362 97 optimizer """adadelta""" +362 97 training_loop """lcwa""" +362 97 evaluator """rankbased""" +362 98 dataset """kinships""" +362 98 model """kg2e""" +362 98 loss """softplus""" +362 98 regularizer """no""" +362 98 optimizer """adadelta""" +362 98 training_loop """lcwa""" +362 98 evaluator """rankbased""" +362 99 dataset """kinships""" +362 99 model """kg2e""" +362 99 loss """softplus""" +362 99 regularizer """no""" +362 99 optimizer """adadelta""" +362 99 training_loop """lcwa""" +362 99 evaluator """rankbased""" +362 100 dataset """kinships""" +362 100 model """kg2e""" +362 100 loss """softplus""" +362 100 regularizer """no""" +362 100 optimizer """adadelta""" +362 100 training_loop """lcwa""" +362 100 evaluator """rankbased""" +363 1 model.embedding_dim 2.0 +363 1 model.c_min 0.09933499056604647 +363 1 model.c_max 2.3448972038959215 +363 1 training.batch_size 2.0 +363 1 training.label_smoothing 0.003038215362938959 +363 2 model.embedding_dim 1.0 +363 2 model.c_min 0.05802036961572837 +363 2 model.c_max 5.510901971150034 +363 2 training.batch_size 1.0 +363 2 training.label_smoothing 0.653882226643606 +363 3 model.embedding_dim 1.0 +363 3 model.c_min 0.024705539024704497 +363 3 model.c_max 9.203091899812406 +363 3 training.batch_size 2.0 +363 3 training.label_smoothing 0.7026285504564244 +363 4 model.embedding_dim 1.0 +363 4 model.c_min 0.03491424289208667 +363 4 model.c_max 5.923846723407323 +363 4 training.batch_size 0.0 +363 4 training.label_smoothing 0.2680499096000988 +363 5 model.embedding_dim 0.0 +363 5 model.c_min 0.011014622307315806 +363 5 model.c_max 9.683170950442152 +363 5 training.batch_size 1.0 +363 5 training.label_smoothing 0.042027246007076746 +363 6 model.embedding_dim 1.0 +363 6 model.c_min 0.01237583204041964 +363 6 model.c_max 2.271820242915975 +363 6 training.batch_size 2.0 +363 6 training.label_smoothing 0.0019192357101209102 +363 7 model.embedding_dim 0.0 +363 7 model.c_min 0.012286238588162808 +363 7 model.c_max 5.11321888777133 +363 7 training.batch_size 0.0 +363 7 training.label_smoothing 0.04140641701511417 +363 8 model.embedding_dim 0.0 +363 8 model.c_min 0.03768527435786215 +363 8 model.c_max 7.347642367552066 +363 8 training.batch_size 0.0 +363 8 training.label_smoothing 0.9109603636704505 +363 9 model.embedding_dim 1.0 +363 9 model.c_min 0.015430918782376754 +363 9 model.c_max 4.6609201685317885 +363 9 training.batch_size 2.0 +363 9 training.label_smoothing 0.001836755768337657 +363 10 model.embedding_dim 0.0 +363 10 model.c_min 0.09018511105456434 +363 10 model.c_max 5.939918053544518 +363 10 training.batch_size 2.0 +363 10 training.label_smoothing 0.008743048823626632 +363 11 model.embedding_dim 0.0 +363 11 model.c_min 0.018299889813851854 +363 11 model.c_max 7.766139765693623 +363 11 training.batch_size 2.0 +363 11 training.label_smoothing 0.0023232791371638833 +363 12 model.embedding_dim 0.0 +363 12 model.c_min 0.011917219209405095 +363 12 model.c_max 4.959315126587629 +363 12 training.batch_size 2.0 +363 12 training.label_smoothing 0.2946141034333271 +363 13 model.embedding_dim 0.0 +363 13 model.c_min 0.042417231902241656 +363 13 model.c_max 5.919264191066647 +363 13 training.batch_size 0.0 +363 13 training.label_smoothing 0.005393602195300572 +363 14 model.embedding_dim 0.0 +363 14 model.c_min 0.015533692021790632 +363 14 model.c_max 5.2902749104487 +363 14 training.batch_size 0.0 +363 14 training.label_smoothing 0.010479679419087678 +363 15 model.embedding_dim 2.0 +363 15 model.c_min 0.024009905135256 +363 15 model.c_max 1.7492308160928396 +363 15 training.batch_size 2.0 +363 15 training.label_smoothing 0.001371538920562379 +363 16 model.embedding_dim 1.0 +363 16 model.c_min 0.03949070666942442 +363 16 model.c_max 5.011047666579911 +363 16 training.batch_size 0.0 +363 16 training.label_smoothing 0.8838953623323569 +363 17 model.embedding_dim 0.0 +363 17 model.c_min 0.012015199282164879 +363 17 model.c_max 3.4398214128350904 +363 17 training.batch_size 2.0 +363 17 training.label_smoothing 0.04993776844327942 +363 18 model.embedding_dim 0.0 +363 18 model.c_min 0.04154374608855788 +363 18 model.c_max 9.214647519663652 +363 18 training.batch_size 1.0 +363 18 training.label_smoothing 0.3571305422509984 +363 19 model.embedding_dim 1.0 +363 19 model.c_min 0.09580466748411881 +363 19 model.c_max 1.657381709071867 +363 19 training.batch_size 0.0 +363 19 training.label_smoothing 0.0011240181563353339 +363 20 model.embedding_dim 2.0 +363 20 model.c_min 0.07557006880228835 +363 20 model.c_max 7.322127124070847 +363 20 training.batch_size 1.0 +363 20 training.label_smoothing 0.8543486782791381 +363 21 model.embedding_dim 0.0 +363 21 model.c_min 0.022546482529236748 +363 21 model.c_max 8.842848549943286 +363 21 training.batch_size 0.0 +363 21 training.label_smoothing 0.0736388196776472 +363 22 model.embedding_dim 0.0 +363 22 model.c_min 0.05300812874515776 +363 22 model.c_max 5.078925666607222 +363 22 training.batch_size 0.0 +363 22 training.label_smoothing 0.21432868809328112 +363 23 model.embedding_dim 0.0 +363 23 model.c_min 0.01642063614520029 +363 23 model.c_max 2.7998927597514554 +363 23 training.batch_size 2.0 +363 23 training.label_smoothing 0.0010728483514493802 +363 24 model.embedding_dim 1.0 +363 24 model.c_min 0.015053249786929034 +363 24 model.c_max 1.6130341943716862 +363 24 training.batch_size 1.0 +363 24 training.label_smoothing 0.625851466869295 +363 25 model.embedding_dim 1.0 +363 25 model.c_min 0.01795545248680085 +363 25 model.c_max 1.8215003199242712 +363 25 training.batch_size 1.0 +363 25 training.label_smoothing 0.004208699020392019 +363 26 model.embedding_dim 1.0 +363 26 model.c_min 0.04592499735755287 +363 26 model.c_max 7.711861928685073 +363 26 training.batch_size 1.0 +363 26 training.label_smoothing 0.17009231967594568 +363 27 model.embedding_dim 1.0 +363 27 model.c_min 0.08662714029753596 +363 27 model.c_max 5.611911498357778 +363 27 training.batch_size 2.0 +363 27 training.label_smoothing 0.00758214808896388 +363 28 model.embedding_dim 0.0 +363 28 model.c_min 0.01277451764081367 +363 28 model.c_max 3.6059552904441756 +363 28 training.batch_size 2.0 +363 28 training.label_smoothing 0.02473568359095663 +363 29 model.embedding_dim 0.0 +363 29 model.c_min 0.055233048687026075 +363 29 model.c_max 3.4745695963242005 +363 29 training.batch_size 2.0 +363 29 training.label_smoothing 0.008725528697526075 +363 30 model.embedding_dim 2.0 +363 30 model.c_min 0.08230703073768361 +363 30 model.c_max 3.7053179625952133 +363 30 training.batch_size 1.0 +363 30 training.label_smoothing 0.06463508766708893 +363 31 model.embedding_dim 0.0 +363 31 model.c_min 0.019367088343847462 +363 31 model.c_max 6.278448162435193 +363 31 training.batch_size 0.0 +363 31 training.label_smoothing 0.006750783662917943 +363 32 model.embedding_dim 2.0 +363 32 model.c_min 0.01585848481628745 +363 32 model.c_max 5.050661916481801 +363 32 training.batch_size 1.0 +363 32 training.label_smoothing 0.0016184933902983418 +363 33 model.embedding_dim 2.0 +363 33 model.c_min 0.028022197071810386 +363 33 model.c_max 3.433454794148202 +363 33 training.batch_size 2.0 +363 33 training.label_smoothing 0.361913249175983 +363 34 model.embedding_dim 2.0 +363 34 model.c_min 0.019000311725335107 +363 34 model.c_max 3.0230793065288486 +363 34 training.batch_size 1.0 +363 34 training.label_smoothing 0.0014301399226006068 +363 35 model.embedding_dim 0.0 +363 35 model.c_min 0.04890179604200926 +363 35 model.c_max 4.417480734400948 +363 35 training.batch_size 2.0 +363 35 training.label_smoothing 0.04579160779297806 +363 36 model.embedding_dim 0.0 +363 36 model.c_min 0.030949987064858586 +363 36 model.c_max 9.883517487037482 +363 36 training.batch_size 2.0 +363 36 training.label_smoothing 0.01398728056254228 +363 37 model.embedding_dim 2.0 +363 37 model.c_min 0.01560587698841857 +363 37 model.c_max 9.664066652018718 +363 37 training.batch_size 2.0 +363 37 training.label_smoothing 0.0036500633298697555 +363 38 model.embedding_dim 2.0 +363 38 model.c_min 0.016217014124387155 +363 38 model.c_max 1.3196988464180441 +363 38 training.batch_size 2.0 +363 38 training.label_smoothing 0.0030268780158308637 +363 39 model.embedding_dim 0.0 +363 39 model.c_min 0.031463599983423546 +363 39 model.c_max 5.6103989798736915 +363 39 training.batch_size 0.0 +363 39 training.label_smoothing 0.0022620167090347375 +363 40 model.embedding_dim 0.0 +363 40 model.c_min 0.012586705272197492 +363 40 model.c_max 8.647826368711637 +363 40 training.batch_size 0.0 +363 40 training.label_smoothing 0.0010524773816427834 +363 41 model.embedding_dim 2.0 +363 41 model.c_min 0.06528036969824656 +363 41 model.c_max 5.257838177379717 +363 41 training.batch_size 2.0 +363 41 training.label_smoothing 0.004455221942742107 +363 42 model.embedding_dim 2.0 +363 42 model.c_min 0.01852005504588883 +363 42 model.c_max 6.040971652076402 +363 42 training.batch_size 1.0 +363 42 training.label_smoothing 0.0025569945197058265 +363 43 model.embedding_dim 1.0 +363 43 model.c_min 0.046055211002899825 +363 43 model.c_max 2.46397301060915 +363 43 training.batch_size 1.0 +363 43 training.label_smoothing 0.0022200242673604733 +363 44 model.embedding_dim 2.0 +363 44 model.c_min 0.025713517762901435 +363 44 model.c_max 7.451502150953164 +363 44 training.batch_size 2.0 +363 44 training.label_smoothing 0.10407740766495319 +363 45 model.embedding_dim 2.0 +363 45 model.c_min 0.07824341615232185 +363 45 model.c_max 5.575404702672121 +363 45 training.batch_size 0.0 +363 45 training.label_smoothing 0.03674461360252331 +363 46 model.embedding_dim 1.0 +363 46 model.c_min 0.013703015017308487 +363 46 model.c_max 5.194805122458909 +363 46 training.batch_size 1.0 +363 46 training.label_smoothing 0.018248617696377235 +363 47 model.embedding_dim 0.0 +363 47 model.c_min 0.033925793808654126 +363 47 model.c_max 3.9247186634804825 +363 47 training.batch_size 1.0 +363 47 training.label_smoothing 0.0011034513241261444 +363 48 model.embedding_dim 2.0 +363 48 model.c_min 0.02422165082811812 +363 48 model.c_max 6.615997484169418 +363 48 training.batch_size 1.0 +363 48 training.label_smoothing 0.0028681417735151146 +363 49 model.embedding_dim 1.0 +363 49 model.c_min 0.012415736529167304 +363 49 model.c_max 1.7538220940563747 +363 49 training.batch_size 2.0 +363 49 training.label_smoothing 0.0071188004026376765 +363 50 model.embedding_dim 1.0 +363 50 model.c_min 0.020164585488730045 +363 50 model.c_max 8.006445811715786 +363 50 training.batch_size 2.0 +363 50 training.label_smoothing 0.03356915677912884 +363 51 model.embedding_dim 1.0 +363 51 model.c_min 0.029385320889203644 +363 51 model.c_max 4.847858962845435 +363 51 training.batch_size 1.0 +363 51 training.label_smoothing 0.5098204939819977 +363 52 model.embedding_dim 2.0 +363 52 model.c_min 0.012279580536374951 +363 52 model.c_max 2.5172710520557775 +363 52 training.batch_size 1.0 +363 52 training.label_smoothing 0.010548412225916525 +363 53 model.embedding_dim 0.0 +363 53 model.c_min 0.04811288864940664 +363 53 model.c_max 2.756376552389157 +363 53 training.batch_size 0.0 +363 53 training.label_smoothing 0.6962241705594565 +363 54 model.embedding_dim 2.0 +363 54 model.c_min 0.01837093993330161 +363 54 model.c_max 6.349781261712923 +363 54 training.batch_size 2.0 +363 54 training.label_smoothing 0.7047351549425886 +363 55 model.embedding_dim 1.0 +363 55 model.c_min 0.012068964094878978 +363 55 model.c_max 7.061775389581053 +363 55 training.batch_size 2.0 +363 55 training.label_smoothing 0.38742632328072063 +363 56 model.embedding_dim 2.0 +363 56 model.c_min 0.07037266106582642 +363 56 model.c_max 4.971525908539299 +363 56 training.batch_size 1.0 +363 56 training.label_smoothing 0.06370067196655818 +363 57 model.embedding_dim 0.0 +363 57 model.c_min 0.04493425974054442 +363 57 model.c_max 2.6727021081976754 +363 57 training.batch_size 0.0 +363 57 training.label_smoothing 0.009043899236614895 +363 58 model.embedding_dim 0.0 +363 58 model.c_min 0.08303156247304988 +363 58 model.c_max 8.335049073952678 +363 58 training.batch_size 1.0 +363 58 training.label_smoothing 0.876542236112241 +363 59 model.embedding_dim 2.0 +363 59 model.c_min 0.035037579759813746 +363 59 model.c_max 3.7775691138029215 +363 59 training.batch_size 0.0 +363 59 training.label_smoothing 0.567292528631332 +363 60 model.embedding_dim 0.0 +363 60 model.c_min 0.029036871728992122 +363 60 model.c_max 7.562931674044977 +363 60 training.batch_size 0.0 +363 60 training.label_smoothing 0.03139377656351768 +363 61 model.embedding_dim 0.0 +363 61 model.c_min 0.012338761642350171 +363 61 model.c_max 3.4135251312175647 +363 61 training.batch_size 2.0 +363 61 training.label_smoothing 0.41511303787093046 +363 62 model.embedding_dim 1.0 +363 62 model.c_min 0.027197309725553363 +363 62 model.c_max 8.818796506130976 +363 62 training.batch_size 2.0 +363 62 training.label_smoothing 0.0024305228264244554 +363 63 model.embedding_dim 1.0 +363 63 model.c_min 0.09290388840021803 +363 63 model.c_max 8.581194146796562 +363 63 training.batch_size 1.0 +363 63 training.label_smoothing 0.011579982902986377 +363 64 model.embedding_dim 0.0 +363 64 model.c_min 0.014722942796315745 +363 64 model.c_max 9.262095383555843 +363 64 training.batch_size 0.0 +363 64 training.label_smoothing 0.003975977803526345 +363 65 model.embedding_dim 1.0 +363 65 model.c_min 0.09396313606064745 +363 65 model.c_max 7.64777338226354 +363 65 training.batch_size 1.0 +363 65 training.label_smoothing 0.005589941282600794 +363 66 model.embedding_dim 1.0 +363 66 model.c_min 0.02889076602248415 +363 66 model.c_max 3.4134195584882034 +363 66 training.batch_size 2.0 +363 66 training.label_smoothing 0.15814076511617042 +363 67 model.embedding_dim 1.0 +363 67 model.c_min 0.07093991875229275 +363 67 model.c_max 7.7664620847293975 +363 67 training.batch_size 1.0 +363 67 training.label_smoothing 0.0014461801892297372 +363 68 model.embedding_dim 0.0 +363 68 model.c_min 0.06583105481942439 +363 68 model.c_max 3.0153378362937993 +363 68 training.batch_size 1.0 +363 68 training.label_smoothing 0.41807991215667273 +363 69 model.embedding_dim 1.0 +363 69 model.c_min 0.015393964743032937 +363 69 model.c_max 6.0426836883467345 +363 69 training.batch_size 0.0 +363 69 training.label_smoothing 0.5743411812713036 +363 70 model.embedding_dim 2.0 +363 70 model.c_min 0.038128175573609036 +363 70 model.c_max 4.037998765279221 +363 70 training.batch_size 0.0 +363 70 training.label_smoothing 0.06909834539761613 +363 71 model.embedding_dim 2.0 +363 71 model.c_min 0.06506059007759347 +363 71 model.c_max 4.046198908734767 +363 71 training.batch_size 2.0 +363 71 training.label_smoothing 0.07186453375633277 +363 72 model.embedding_dim 2.0 +363 72 model.c_min 0.01714100094084647 +363 72 model.c_max 2.622072448679771 +363 72 training.batch_size 1.0 +363 72 training.label_smoothing 0.029443264061747635 +363 73 model.embedding_dim 1.0 +363 73 model.c_min 0.025846406431386686 +363 73 model.c_max 3.828153664461177 +363 73 training.batch_size 2.0 +363 73 training.label_smoothing 0.0028118965348137193 +363 74 model.embedding_dim 1.0 +363 74 model.c_min 0.03026717109236388 +363 74 model.c_max 5.180320546338222 +363 74 training.batch_size 0.0 +363 74 training.label_smoothing 0.0022824047879924027 +363 75 model.embedding_dim 1.0 +363 75 model.c_min 0.06790115601493744 +363 75 model.c_max 7.25957652491679 +363 75 training.batch_size 1.0 +363 75 training.label_smoothing 0.006264625805223588 +363 76 model.embedding_dim 1.0 +363 76 model.c_min 0.06487001342087777 +363 76 model.c_max 3.4124453275019837 +363 76 training.batch_size 1.0 +363 76 training.label_smoothing 0.5165420439049976 +363 77 model.embedding_dim 0.0 +363 77 model.c_min 0.015151681893731976 +363 77 model.c_max 9.139074111955715 +363 77 training.batch_size 2.0 +363 77 training.label_smoothing 0.9843207338658505 +363 78 model.embedding_dim 0.0 +363 78 model.c_min 0.02228077551017337 +363 78 model.c_max 4.348982799927445 +363 78 training.batch_size 1.0 +363 78 training.label_smoothing 0.19711306061570402 +363 79 model.embedding_dim 0.0 +363 79 model.c_min 0.025311744180985706 +363 79 model.c_max 9.077140308219807 +363 79 training.batch_size 2.0 +363 79 training.label_smoothing 0.0025300074617691497 +363 80 model.embedding_dim 1.0 +363 80 model.c_min 0.0608692685078659 +363 80 model.c_max 8.914009592194049 +363 80 training.batch_size 2.0 +363 80 training.label_smoothing 0.05506220995562133 +363 81 model.embedding_dim 1.0 +363 81 model.c_min 0.015822901662427915 +363 81 model.c_max 4.822290630780251 +363 81 training.batch_size 1.0 +363 81 training.label_smoothing 0.07797007163277486 +363 82 model.embedding_dim 1.0 +363 82 model.c_min 0.01934086942393405 +363 82 model.c_max 6.242138845948428 +363 82 training.batch_size 1.0 +363 82 training.label_smoothing 0.24561400260237937 +363 83 model.embedding_dim 1.0 +363 83 model.c_min 0.02269042501897977 +363 83 model.c_max 7.21311365306549 +363 83 training.batch_size 1.0 +363 83 training.label_smoothing 0.009093625896955225 +363 84 model.embedding_dim 2.0 +363 84 model.c_min 0.05122509147768248 +363 84 model.c_max 2.7080005791098913 +363 84 training.batch_size 2.0 +363 84 training.label_smoothing 0.002661648271216168 +363 85 model.embedding_dim 1.0 +363 85 model.c_min 0.09340196875402429 +363 85 model.c_max 5.802969587400995 +363 85 training.batch_size 2.0 +363 85 training.label_smoothing 0.0017925385556558913 +363 86 model.embedding_dim 1.0 +363 86 model.c_min 0.0118124641793641 +363 86 model.c_max 5.197455311180278 +363 86 training.batch_size 1.0 +363 86 training.label_smoothing 0.04140495426694059 +363 87 model.embedding_dim 1.0 +363 87 model.c_min 0.06659683698730885 +363 87 model.c_max 5.197888304107337 +363 87 training.batch_size 1.0 +363 87 training.label_smoothing 0.6076327858932163 +363 88 model.embedding_dim 0.0 +363 88 model.c_min 0.08559866566124733 +363 88 model.c_max 7.956705045242834 +363 88 training.batch_size 1.0 +363 88 training.label_smoothing 0.653111783555866 +363 89 model.embedding_dim 1.0 +363 89 model.c_min 0.02260304958915538 +363 89 model.c_max 2.4357653883411787 +363 89 training.batch_size 2.0 +363 89 training.label_smoothing 0.007022423478248057 +363 90 model.embedding_dim 2.0 +363 90 model.c_min 0.0731346922056502 +363 90 model.c_max 2.7319267466016024 +363 90 training.batch_size 0.0 +363 90 training.label_smoothing 0.06639037396521617 +363 91 model.embedding_dim 1.0 +363 91 model.c_min 0.049834731049764096 +363 91 model.c_max 2.164916367704196 +363 91 training.batch_size 0.0 +363 91 training.label_smoothing 0.8511181455339758 +363 92 model.embedding_dim 1.0 +363 92 model.c_min 0.012968066265753041 +363 92 model.c_max 6.953810167327186 +363 92 training.batch_size 2.0 +363 92 training.label_smoothing 0.019912251400102925 +363 93 model.embedding_dim 1.0 +363 93 model.c_min 0.08402644332473033 +363 93 model.c_max 6.556327846770335 +363 93 training.batch_size 0.0 +363 93 training.label_smoothing 0.01433514576457133 +363 94 model.embedding_dim 1.0 +363 94 model.c_min 0.01959593705676274 +363 94 model.c_max 4.137722448092282 +363 94 training.batch_size 0.0 +363 94 training.label_smoothing 0.004517541935342008 +363 95 model.embedding_dim 2.0 +363 95 model.c_min 0.02596302762422766 +363 95 model.c_max 7.727006462732362 +363 95 training.batch_size 1.0 +363 95 training.label_smoothing 0.0016993053770083134 +363 96 model.embedding_dim 2.0 +363 96 model.c_min 0.019486644949544586 +363 96 model.c_max 3.204370800076617 +363 96 training.batch_size 2.0 +363 96 training.label_smoothing 0.008022888842836352 +363 97 model.embedding_dim 0.0 +363 97 model.c_min 0.04187464694721252 +363 97 model.c_max 3.782401595546073 +363 97 training.batch_size 2.0 +363 97 training.label_smoothing 0.021462954442000387 +363 98 model.embedding_dim 0.0 +363 98 model.c_min 0.03958948327836477 +363 98 model.c_max 2.4300714014261904 +363 98 training.batch_size 0.0 +363 98 training.label_smoothing 0.1661147965309233 +363 99 model.embedding_dim 2.0 +363 99 model.c_min 0.06765802642720507 +363 99 model.c_max 1.0198826699148296 +363 99 training.batch_size 0.0 +363 99 training.label_smoothing 0.0012658232017604622 +363 100 model.embedding_dim 0.0 +363 100 model.c_min 0.04104567793401712 +363 100 model.c_max 7.235835748728997 +363 100 training.batch_size 0.0 +363 100 training.label_smoothing 0.0014226717887775684 +363 1 dataset """kinships""" +363 1 model """kg2e""" +363 1 loss """bceaftersigmoid""" +363 1 regularizer """no""" +363 1 optimizer """adadelta""" +363 1 training_loop """lcwa""" +363 1 evaluator """rankbased""" +363 2 dataset """kinships""" +363 2 model """kg2e""" +363 2 loss """bceaftersigmoid""" +363 2 regularizer """no""" +363 2 optimizer """adadelta""" +363 2 training_loop """lcwa""" +363 2 evaluator """rankbased""" +363 3 dataset """kinships""" +363 3 model """kg2e""" +363 3 loss """bceaftersigmoid""" +363 3 regularizer """no""" +363 3 optimizer """adadelta""" +363 3 training_loop """lcwa""" +363 3 evaluator """rankbased""" +363 4 dataset """kinships""" +363 4 model """kg2e""" +363 4 loss """bceaftersigmoid""" +363 4 regularizer """no""" +363 4 optimizer """adadelta""" +363 4 training_loop """lcwa""" +363 4 evaluator """rankbased""" +363 5 dataset """kinships""" +363 5 model """kg2e""" +363 5 loss """bceaftersigmoid""" +363 5 regularizer """no""" +363 5 optimizer """adadelta""" +363 5 training_loop """lcwa""" +363 5 evaluator """rankbased""" +363 6 dataset """kinships""" +363 6 model """kg2e""" +363 6 loss """bceaftersigmoid""" +363 6 regularizer """no""" +363 6 optimizer """adadelta""" +363 6 training_loop """lcwa""" +363 6 evaluator """rankbased""" +363 7 dataset """kinships""" +363 7 model """kg2e""" +363 7 loss """bceaftersigmoid""" +363 7 regularizer """no""" +363 7 optimizer """adadelta""" +363 7 training_loop """lcwa""" +363 7 evaluator """rankbased""" +363 8 dataset """kinships""" +363 8 model """kg2e""" +363 8 loss """bceaftersigmoid""" +363 8 regularizer """no""" +363 8 optimizer """adadelta""" +363 8 training_loop """lcwa""" +363 8 evaluator """rankbased""" +363 9 dataset """kinships""" +363 9 model """kg2e""" +363 9 loss """bceaftersigmoid""" +363 9 regularizer """no""" +363 9 optimizer """adadelta""" +363 9 training_loop """lcwa""" +363 9 evaluator """rankbased""" +363 10 dataset """kinships""" +363 10 model """kg2e""" +363 10 loss """bceaftersigmoid""" +363 10 regularizer """no""" +363 10 optimizer """adadelta""" +363 10 training_loop """lcwa""" +363 10 evaluator """rankbased""" +363 11 dataset """kinships""" +363 11 model """kg2e""" +363 11 loss """bceaftersigmoid""" +363 11 regularizer """no""" +363 11 optimizer """adadelta""" +363 11 training_loop """lcwa""" +363 11 evaluator """rankbased""" +363 12 dataset """kinships""" +363 12 model """kg2e""" +363 12 loss """bceaftersigmoid""" +363 12 regularizer """no""" +363 12 optimizer """adadelta""" +363 12 training_loop """lcwa""" +363 12 evaluator """rankbased""" +363 13 dataset """kinships""" +363 13 model """kg2e""" +363 13 loss """bceaftersigmoid""" +363 13 regularizer """no""" +363 13 optimizer """adadelta""" +363 13 training_loop """lcwa""" +363 13 evaluator """rankbased""" +363 14 dataset """kinships""" +363 14 model """kg2e""" +363 14 loss """bceaftersigmoid""" +363 14 regularizer """no""" +363 14 optimizer """adadelta""" +363 14 training_loop """lcwa""" +363 14 evaluator """rankbased""" +363 15 dataset """kinships""" +363 15 model """kg2e""" +363 15 loss """bceaftersigmoid""" +363 15 regularizer """no""" +363 15 optimizer """adadelta""" +363 15 training_loop """lcwa""" +363 15 evaluator """rankbased""" +363 16 dataset """kinships""" +363 16 model """kg2e""" +363 16 loss """bceaftersigmoid""" +363 16 regularizer """no""" +363 16 optimizer """adadelta""" +363 16 training_loop """lcwa""" +363 16 evaluator """rankbased""" +363 17 dataset """kinships""" +363 17 model """kg2e""" +363 17 loss """bceaftersigmoid""" +363 17 regularizer """no""" +363 17 optimizer """adadelta""" +363 17 training_loop """lcwa""" +363 17 evaluator """rankbased""" +363 18 dataset """kinships""" +363 18 model """kg2e""" +363 18 loss """bceaftersigmoid""" +363 18 regularizer """no""" +363 18 optimizer """adadelta""" +363 18 training_loop """lcwa""" +363 18 evaluator """rankbased""" +363 19 dataset """kinships""" +363 19 model """kg2e""" +363 19 loss """bceaftersigmoid""" +363 19 regularizer """no""" +363 19 optimizer """adadelta""" +363 19 training_loop """lcwa""" +363 19 evaluator """rankbased""" +363 20 dataset """kinships""" +363 20 model """kg2e""" +363 20 loss """bceaftersigmoid""" +363 20 regularizer """no""" +363 20 optimizer """adadelta""" +363 20 training_loop """lcwa""" +363 20 evaluator """rankbased""" +363 21 dataset """kinships""" +363 21 model """kg2e""" +363 21 loss """bceaftersigmoid""" +363 21 regularizer """no""" +363 21 optimizer """adadelta""" +363 21 training_loop """lcwa""" +363 21 evaluator """rankbased""" +363 22 dataset """kinships""" +363 22 model """kg2e""" +363 22 loss """bceaftersigmoid""" +363 22 regularizer """no""" +363 22 optimizer """adadelta""" +363 22 training_loop """lcwa""" +363 22 evaluator """rankbased""" +363 23 dataset """kinships""" +363 23 model """kg2e""" +363 23 loss """bceaftersigmoid""" +363 23 regularizer """no""" +363 23 optimizer """adadelta""" +363 23 training_loop """lcwa""" +363 23 evaluator """rankbased""" +363 24 dataset """kinships""" +363 24 model """kg2e""" +363 24 loss """bceaftersigmoid""" +363 24 regularizer """no""" +363 24 optimizer """adadelta""" +363 24 training_loop """lcwa""" +363 24 evaluator """rankbased""" +363 25 dataset """kinships""" +363 25 model """kg2e""" +363 25 loss """bceaftersigmoid""" +363 25 regularizer """no""" +363 25 optimizer """adadelta""" +363 25 training_loop """lcwa""" +363 25 evaluator """rankbased""" +363 26 dataset """kinships""" +363 26 model """kg2e""" +363 26 loss """bceaftersigmoid""" +363 26 regularizer """no""" +363 26 optimizer """adadelta""" +363 26 training_loop """lcwa""" +363 26 evaluator """rankbased""" +363 27 dataset """kinships""" +363 27 model """kg2e""" +363 27 loss """bceaftersigmoid""" +363 27 regularizer """no""" +363 27 optimizer """adadelta""" +363 27 training_loop """lcwa""" +363 27 evaluator """rankbased""" +363 28 dataset """kinships""" +363 28 model """kg2e""" +363 28 loss """bceaftersigmoid""" +363 28 regularizer """no""" +363 28 optimizer """adadelta""" +363 28 training_loop """lcwa""" +363 28 evaluator """rankbased""" +363 29 dataset """kinships""" +363 29 model """kg2e""" +363 29 loss """bceaftersigmoid""" +363 29 regularizer """no""" +363 29 optimizer """adadelta""" +363 29 training_loop """lcwa""" +363 29 evaluator """rankbased""" +363 30 dataset """kinships""" +363 30 model """kg2e""" +363 30 loss """bceaftersigmoid""" +363 30 regularizer """no""" +363 30 optimizer """adadelta""" +363 30 training_loop """lcwa""" +363 30 evaluator """rankbased""" +363 31 dataset """kinships""" +363 31 model """kg2e""" +363 31 loss """bceaftersigmoid""" +363 31 regularizer """no""" +363 31 optimizer """adadelta""" +363 31 training_loop """lcwa""" +363 31 evaluator """rankbased""" +363 32 dataset """kinships""" +363 32 model """kg2e""" +363 32 loss """bceaftersigmoid""" +363 32 regularizer """no""" +363 32 optimizer """adadelta""" +363 32 training_loop """lcwa""" +363 32 evaluator """rankbased""" +363 33 dataset """kinships""" +363 33 model """kg2e""" +363 33 loss """bceaftersigmoid""" +363 33 regularizer """no""" +363 33 optimizer """adadelta""" +363 33 training_loop """lcwa""" +363 33 evaluator """rankbased""" +363 34 dataset """kinships""" +363 34 model """kg2e""" +363 34 loss """bceaftersigmoid""" +363 34 regularizer """no""" +363 34 optimizer """adadelta""" +363 34 training_loop """lcwa""" +363 34 evaluator """rankbased""" +363 35 dataset """kinships""" +363 35 model """kg2e""" +363 35 loss """bceaftersigmoid""" +363 35 regularizer """no""" +363 35 optimizer """adadelta""" +363 35 training_loop """lcwa""" +363 35 evaluator """rankbased""" +363 36 dataset """kinships""" +363 36 model """kg2e""" +363 36 loss """bceaftersigmoid""" +363 36 regularizer """no""" +363 36 optimizer """adadelta""" +363 36 training_loop """lcwa""" +363 36 evaluator """rankbased""" +363 37 dataset """kinships""" +363 37 model """kg2e""" +363 37 loss """bceaftersigmoid""" +363 37 regularizer """no""" +363 37 optimizer """adadelta""" +363 37 training_loop """lcwa""" +363 37 evaluator """rankbased""" +363 38 dataset """kinships""" +363 38 model """kg2e""" +363 38 loss """bceaftersigmoid""" +363 38 regularizer """no""" +363 38 optimizer """adadelta""" +363 38 training_loop """lcwa""" +363 38 evaluator """rankbased""" +363 39 dataset """kinships""" +363 39 model """kg2e""" +363 39 loss """bceaftersigmoid""" +363 39 regularizer """no""" +363 39 optimizer """adadelta""" +363 39 training_loop """lcwa""" +363 39 evaluator """rankbased""" +363 40 dataset """kinships""" +363 40 model """kg2e""" +363 40 loss """bceaftersigmoid""" +363 40 regularizer """no""" +363 40 optimizer """adadelta""" +363 40 training_loop """lcwa""" +363 40 evaluator """rankbased""" +363 41 dataset """kinships""" +363 41 model """kg2e""" +363 41 loss """bceaftersigmoid""" +363 41 regularizer """no""" +363 41 optimizer """adadelta""" +363 41 training_loop """lcwa""" +363 41 evaluator """rankbased""" +363 42 dataset """kinships""" +363 42 model """kg2e""" +363 42 loss """bceaftersigmoid""" +363 42 regularizer """no""" +363 42 optimizer """adadelta""" +363 42 training_loop """lcwa""" +363 42 evaluator """rankbased""" +363 43 dataset """kinships""" +363 43 model """kg2e""" +363 43 loss """bceaftersigmoid""" +363 43 regularizer """no""" +363 43 optimizer """adadelta""" +363 43 training_loop """lcwa""" +363 43 evaluator """rankbased""" +363 44 dataset """kinships""" +363 44 model """kg2e""" +363 44 loss """bceaftersigmoid""" +363 44 regularizer """no""" +363 44 optimizer """adadelta""" +363 44 training_loop """lcwa""" +363 44 evaluator """rankbased""" +363 45 dataset """kinships""" +363 45 model """kg2e""" +363 45 loss """bceaftersigmoid""" +363 45 regularizer """no""" +363 45 optimizer """adadelta""" +363 45 training_loop """lcwa""" +363 45 evaluator """rankbased""" +363 46 dataset """kinships""" +363 46 model """kg2e""" +363 46 loss """bceaftersigmoid""" +363 46 regularizer """no""" +363 46 optimizer """adadelta""" +363 46 training_loop """lcwa""" +363 46 evaluator """rankbased""" +363 47 dataset """kinships""" +363 47 model """kg2e""" +363 47 loss """bceaftersigmoid""" +363 47 regularizer """no""" +363 47 optimizer """adadelta""" +363 47 training_loop """lcwa""" +363 47 evaluator """rankbased""" +363 48 dataset """kinships""" +363 48 model """kg2e""" +363 48 loss """bceaftersigmoid""" +363 48 regularizer """no""" +363 48 optimizer """adadelta""" +363 48 training_loop """lcwa""" +363 48 evaluator """rankbased""" +363 49 dataset """kinships""" +363 49 model """kg2e""" +363 49 loss """bceaftersigmoid""" +363 49 regularizer """no""" +363 49 optimizer """adadelta""" +363 49 training_loop """lcwa""" +363 49 evaluator """rankbased""" +363 50 dataset """kinships""" +363 50 model """kg2e""" +363 50 loss """bceaftersigmoid""" +363 50 regularizer """no""" +363 50 optimizer """adadelta""" +363 50 training_loop """lcwa""" +363 50 evaluator """rankbased""" +363 51 dataset """kinships""" +363 51 model """kg2e""" +363 51 loss """bceaftersigmoid""" +363 51 regularizer """no""" +363 51 optimizer """adadelta""" +363 51 training_loop """lcwa""" +363 51 evaluator """rankbased""" +363 52 dataset """kinships""" +363 52 model """kg2e""" +363 52 loss """bceaftersigmoid""" +363 52 regularizer """no""" +363 52 optimizer """adadelta""" +363 52 training_loop """lcwa""" +363 52 evaluator """rankbased""" +363 53 dataset """kinships""" +363 53 model """kg2e""" +363 53 loss """bceaftersigmoid""" +363 53 regularizer """no""" +363 53 optimizer """adadelta""" +363 53 training_loop """lcwa""" +363 53 evaluator """rankbased""" +363 54 dataset """kinships""" +363 54 model """kg2e""" +363 54 loss """bceaftersigmoid""" +363 54 regularizer """no""" +363 54 optimizer """adadelta""" +363 54 training_loop """lcwa""" +363 54 evaluator """rankbased""" +363 55 dataset """kinships""" +363 55 model """kg2e""" +363 55 loss """bceaftersigmoid""" +363 55 regularizer """no""" +363 55 optimizer """adadelta""" +363 55 training_loop """lcwa""" +363 55 evaluator """rankbased""" +363 56 dataset """kinships""" +363 56 model """kg2e""" +363 56 loss """bceaftersigmoid""" +363 56 regularizer """no""" +363 56 optimizer """adadelta""" +363 56 training_loop """lcwa""" +363 56 evaluator """rankbased""" +363 57 dataset """kinships""" +363 57 model """kg2e""" +363 57 loss """bceaftersigmoid""" +363 57 regularizer """no""" +363 57 optimizer """adadelta""" +363 57 training_loop """lcwa""" +363 57 evaluator """rankbased""" +363 58 dataset """kinships""" +363 58 model """kg2e""" +363 58 loss """bceaftersigmoid""" +363 58 regularizer """no""" +363 58 optimizer """adadelta""" +363 58 training_loop """lcwa""" +363 58 evaluator """rankbased""" +363 59 dataset """kinships""" +363 59 model """kg2e""" +363 59 loss """bceaftersigmoid""" +363 59 regularizer """no""" +363 59 optimizer """adadelta""" +363 59 training_loop """lcwa""" +363 59 evaluator """rankbased""" +363 60 dataset """kinships""" +363 60 model """kg2e""" +363 60 loss """bceaftersigmoid""" +363 60 regularizer """no""" +363 60 optimizer """adadelta""" +363 60 training_loop """lcwa""" +363 60 evaluator """rankbased""" +363 61 dataset """kinships""" +363 61 model """kg2e""" +363 61 loss """bceaftersigmoid""" +363 61 regularizer """no""" +363 61 optimizer """adadelta""" +363 61 training_loop """lcwa""" +363 61 evaluator """rankbased""" +363 62 dataset """kinships""" +363 62 model """kg2e""" +363 62 loss """bceaftersigmoid""" +363 62 regularizer """no""" +363 62 optimizer """adadelta""" +363 62 training_loop """lcwa""" +363 62 evaluator """rankbased""" +363 63 dataset """kinships""" +363 63 model """kg2e""" +363 63 loss """bceaftersigmoid""" +363 63 regularizer """no""" +363 63 optimizer """adadelta""" +363 63 training_loop """lcwa""" +363 63 evaluator """rankbased""" +363 64 dataset """kinships""" +363 64 model """kg2e""" +363 64 loss """bceaftersigmoid""" +363 64 regularizer """no""" +363 64 optimizer """adadelta""" +363 64 training_loop """lcwa""" +363 64 evaluator """rankbased""" +363 65 dataset """kinships""" +363 65 model """kg2e""" +363 65 loss """bceaftersigmoid""" +363 65 regularizer """no""" +363 65 optimizer """adadelta""" +363 65 training_loop """lcwa""" +363 65 evaluator """rankbased""" +363 66 dataset """kinships""" +363 66 model """kg2e""" +363 66 loss """bceaftersigmoid""" +363 66 regularizer """no""" +363 66 optimizer """adadelta""" +363 66 training_loop """lcwa""" +363 66 evaluator """rankbased""" +363 67 dataset """kinships""" +363 67 model """kg2e""" +363 67 loss """bceaftersigmoid""" +363 67 regularizer """no""" +363 67 optimizer """adadelta""" +363 67 training_loop """lcwa""" +363 67 evaluator """rankbased""" +363 68 dataset """kinships""" +363 68 model """kg2e""" +363 68 loss """bceaftersigmoid""" +363 68 regularizer """no""" +363 68 optimizer """adadelta""" +363 68 training_loop """lcwa""" +363 68 evaluator """rankbased""" +363 69 dataset """kinships""" +363 69 model """kg2e""" +363 69 loss """bceaftersigmoid""" +363 69 regularizer """no""" +363 69 optimizer """adadelta""" +363 69 training_loop """lcwa""" +363 69 evaluator """rankbased""" +363 70 dataset """kinships""" +363 70 model """kg2e""" +363 70 loss """bceaftersigmoid""" +363 70 regularizer """no""" +363 70 optimizer """adadelta""" +363 70 training_loop """lcwa""" +363 70 evaluator """rankbased""" +363 71 dataset """kinships""" +363 71 model """kg2e""" +363 71 loss """bceaftersigmoid""" +363 71 regularizer """no""" +363 71 optimizer """adadelta""" +363 71 training_loop """lcwa""" +363 71 evaluator """rankbased""" +363 72 dataset """kinships""" +363 72 model """kg2e""" +363 72 loss """bceaftersigmoid""" +363 72 regularizer """no""" +363 72 optimizer """adadelta""" +363 72 training_loop """lcwa""" +363 72 evaluator """rankbased""" +363 73 dataset """kinships""" +363 73 model """kg2e""" +363 73 loss """bceaftersigmoid""" +363 73 regularizer """no""" +363 73 optimizer """adadelta""" +363 73 training_loop """lcwa""" +363 73 evaluator """rankbased""" +363 74 dataset """kinships""" +363 74 model """kg2e""" +363 74 loss """bceaftersigmoid""" +363 74 regularizer """no""" +363 74 optimizer """adadelta""" +363 74 training_loop """lcwa""" +363 74 evaluator """rankbased""" +363 75 dataset """kinships""" +363 75 model """kg2e""" +363 75 loss """bceaftersigmoid""" +363 75 regularizer """no""" +363 75 optimizer """adadelta""" +363 75 training_loop """lcwa""" +363 75 evaluator """rankbased""" +363 76 dataset """kinships""" +363 76 model """kg2e""" +363 76 loss """bceaftersigmoid""" +363 76 regularizer """no""" +363 76 optimizer """adadelta""" +363 76 training_loop """lcwa""" +363 76 evaluator """rankbased""" +363 77 dataset """kinships""" +363 77 model """kg2e""" +363 77 loss """bceaftersigmoid""" +363 77 regularizer """no""" +363 77 optimizer """adadelta""" +363 77 training_loop """lcwa""" +363 77 evaluator """rankbased""" +363 78 dataset """kinships""" +363 78 model """kg2e""" +363 78 loss """bceaftersigmoid""" +363 78 regularizer """no""" +363 78 optimizer """adadelta""" +363 78 training_loop """lcwa""" +363 78 evaluator """rankbased""" +363 79 dataset """kinships""" +363 79 model """kg2e""" +363 79 loss """bceaftersigmoid""" +363 79 regularizer """no""" +363 79 optimizer """adadelta""" +363 79 training_loop """lcwa""" +363 79 evaluator """rankbased""" +363 80 dataset """kinships""" +363 80 model """kg2e""" +363 80 loss """bceaftersigmoid""" +363 80 regularizer """no""" +363 80 optimizer """adadelta""" +363 80 training_loop """lcwa""" +363 80 evaluator """rankbased""" +363 81 dataset """kinships""" +363 81 model """kg2e""" +363 81 loss """bceaftersigmoid""" +363 81 regularizer """no""" +363 81 optimizer """adadelta""" +363 81 training_loop """lcwa""" +363 81 evaluator """rankbased""" +363 82 dataset """kinships""" +363 82 model """kg2e""" +363 82 loss """bceaftersigmoid""" +363 82 regularizer """no""" +363 82 optimizer """adadelta""" +363 82 training_loop """lcwa""" +363 82 evaluator """rankbased""" +363 83 dataset """kinships""" +363 83 model """kg2e""" +363 83 loss """bceaftersigmoid""" +363 83 regularizer """no""" +363 83 optimizer """adadelta""" +363 83 training_loop """lcwa""" +363 83 evaluator """rankbased""" +363 84 dataset """kinships""" +363 84 model """kg2e""" +363 84 loss """bceaftersigmoid""" +363 84 regularizer """no""" +363 84 optimizer """adadelta""" +363 84 training_loop """lcwa""" +363 84 evaluator """rankbased""" +363 85 dataset """kinships""" +363 85 model """kg2e""" +363 85 loss """bceaftersigmoid""" +363 85 regularizer """no""" +363 85 optimizer """adadelta""" +363 85 training_loop """lcwa""" +363 85 evaluator """rankbased""" +363 86 dataset """kinships""" +363 86 model """kg2e""" +363 86 loss """bceaftersigmoid""" +363 86 regularizer """no""" +363 86 optimizer """adadelta""" +363 86 training_loop """lcwa""" +363 86 evaluator """rankbased""" +363 87 dataset """kinships""" +363 87 model """kg2e""" +363 87 loss """bceaftersigmoid""" +363 87 regularizer """no""" +363 87 optimizer """adadelta""" +363 87 training_loop """lcwa""" +363 87 evaluator """rankbased""" +363 88 dataset """kinships""" +363 88 model """kg2e""" +363 88 loss """bceaftersigmoid""" +363 88 regularizer """no""" +363 88 optimizer """adadelta""" +363 88 training_loop """lcwa""" +363 88 evaluator """rankbased""" +363 89 dataset """kinships""" +363 89 model """kg2e""" +363 89 loss """bceaftersigmoid""" +363 89 regularizer """no""" +363 89 optimizer """adadelta""" +363 89 training_loop """lcwa""" +363 89 evaluator """rankbased""" +363 90 dataset """kinships""" +363 90 model """kg2e""" +363 90 loss """bceaftersigmoid""" +363 90 regularizer """no""" +363 90 optimizer """adadelta""" +363 90 training_loop """lcwa""" +363 90 evaluator """rankbased""" +363 91 dataset """kinships""" +363 91 model """kg2e""" +363 91 loss """bceaftersigmoid""" +363 91 regularizer """no""" +363 91 optimizer """adadelta""" +363 91 training_loop """lcwa""" +363 91 evaluator """rankbased""" +363 92 dataset """kinships""" +363 92 model """kg2e""" +363 92 loss """bceaftersigmoid""" +363 92 regularizer """no""" +363 92 optimizer """adadelta""" +363 92 training_loop """lcwa""" +363 92 evaluator """rankbased""" +363 93 dataset """kinships""" +363 93 model """kg2e""" +363 93 loss """bceaftersigmoid""" +363 93 regularizer """no""" +363 93 optimizer """adadelta""" +363 93 training_loop """lcwa""" +363 93 evaluator """rankbased""" +363 94 dataset """kinships""" +363 94 model """kg2e""" +363 94 loss """bceaftersigmoid""" +363 94 regularizer """no""" +363 94 optimizer """adadelta""" +363 94 training_loop """lcwa""" +363 94 evaluator """rankbased""" +363 95 dataset """kinships""" +363 95 model """kg2e""" +363 95 loss """bceaftersigmoid""" +363 95 regularizer """no""" +363 95 optimizer """adadelta""" +363 95 training_loop """lcwa""" +363 95 evaluator """rankbased""" +363 96 dataset """kinships""" +363 96 model """kg2e""" +363 96 loss """bceaftersigmoid""" +363 96 regularizer """no""" +363 96 optimizer """adadelta""" +363 96 training_loop """lcwa""" +363 96 evaluator """rankbased""" +363 97 dataset """kinships""" +363 97 model """kg2e""" +363 97 loss """bceaftersigmoid""" +363 97 regularizer """no""" +363 97 optimizer """adadelta""" +363 97 training_loop """lcwa""" +363 97 evaluator """rankbased""" +363 98 dataset """kinships""" +363 98 model """kg2e""" +363 98 loss """bceaftersigmoid""" +363 98 regularizer """no""" +363 98 optimizer """adadelta""" +363 98 training_loop """lcwa""" +363 98 evaluator """rankbased""" +363 99 dataset """kinships""" +363 99 model """kg2e""" +363 99 loss """bceaftersigmoid""" +363 99 regularizer """no""" +363 99 optimizer """adadelta""" +363 99 training_loop """lcwa""" +363 99 evaluator """rankbased""" +363 100 dataset """kinships""" +363 100 model """kg2e""" +363 100 loss """bceaftersigmoid""" +363 100 regularizer """no""" +363 100 optimizer """adadelta""" +363 100 training_loop """lcwa""" +363 100 evaluator """rankbased""" +364 1 model.embedding_dim 1.0 +364 1 model.c_min 0.08640722166792744 +364 1 model.c_max 6.638687397515823 +364 1 training.batch_size 0.0 +364 1 training.label_smoothing 0.24704671226107916 +364 2 model.embedding_dim 2.0 +364 2 model.c_min 0.09687879818939844 +364 2 model.c_max 6.501284228032481 +364 2 training.batch_size 2.0 +364 2 training.label_smoothing 0.004761259745097078 +364 3 model.embedding_dim 1.0 +364 3 model.c_min 0.029236065022229703 +364 3 model.c_max 3.4478160700696376 +364 3 training.batch_size 2.0 +364 3 training.label_smoothing 0.05837952962436684 +364 4 model.embedding_dim 2.0 +364 4 model.c_min 0.021101344407715904 +364 4 model.c_max 9.189921760561358 +364 4 training.batch_size 1.0 +364 4 training.label_smoothing 0.8194573527842536 +364 5 model.embedding_dim 0.0 +364 5 model.c_min 0.032202914158302114 +364 5 model.c_max 1.8819223932063514 +364 5 training.batch_size 2.0 +364 5 training.label_smoothing 0.007342748812191454 +364 6 model.embedding_dim 0.0 +364 6 model.c_min 0.017804147251825206 +364 6 model.c_max 8.22729411804973 +364 6 training.batch_size 2.0 +364 6 training.label_smoothing 0.32556386417546 +364 7 model.embedding_dim 1.0 +364 7 model.c_min 0.09785194704024075 +364 7 model.c_max 7.30174735118078 +364 7 training.batch_size 0.0 +364 7 training.label_smoothing 0.001638410531590959 +364 8 model.embedding_dim 1.0 +364 8 model.c_min 0.07238225819291973 +364 8 model.c_max 7.308614683847428 +364 8 training.batch_size 0.0 +364 8 training.label_smoothing 0.009848698070232295 +364 9 model.embedding_dim 0.0 +364 9 model.c_min 0.03701042776326455 +364 9 model.c_max 9.260184865603591 +364 9 training.batch_size 1.0 +364 9 training.label_smoothing 0.003921755687084242 +364 10 model.embedding_dim 1.0 +364 10 model.c_min 0.06888113039034337 +364 10 model.c_max 1.1826226005605882 +364 10 training.batch_size 1.0 +364 10 training.label_smoothing 0.008943734073013871 +364 11 model.embedding_dim 2.0 +364 11 model.c_min 0.030754121605557454 +364 11 model.c_max 9.894219022405702 +364 11 training.batch_size 2.0 +364 11 training.label_smoothing 0.004423125713276038 +364 12 model.embedding_dim 1.0 +364 12 model.c_min 0.04152958337758014 +364 12 model.c_max 4.949346997825125 +364 12 training.batch_size 2.0 +364 12 training.label_smoothing 0.14263347095808537 +364 13 model.embedding_dim 1.0 +364 13 model.c_min 0.027656434551212413 +364 13 model.c_max 2.2277340673295427 +364 13 training.batch_size 0.0 +364 13 training.label_smoothing 0.0910237254724869 +364 14 model.embedding_dim 0.0 +364 14 model.c_min 0.035861923334555394 +364 14 model.c_max 8.726206995062672 +364 14 training.batch_size 1.0 +364 14 training.label_smoothing 0.014086586455857638 +364 15 model.embedding_dim 0.0 +364 15 model.c_min 0.035335016151448996 +364 15 model.c_max 4.59460482076711 +364 15 training.batch_size 2.0 +364 15 training.label_smoothing 0.025812726477491135 +364 16 model.embedding_dim 0.0 +364 16 model.c_min 0.027730705737782083 +364 16 model.c_max 1.9504578798532535 +364 16 training.batch_size 0.0 +364 16 training.label_smoothing 0.06149564815935384 +364 17 model.embedding_dim 2.0 +364 17 model.c_min 0.034602656666969454 +364 17 model.c_max 7.31168713668667 +364 17 training.batch_size 1.0 +364 17 training.label_smoothing 0.2577693742972557 +364 18 model.embedding_dim 1.0 +364 18 model.c_min 0.01574534581885868 +364 18 model.c_max 8.730843777603576 +364 18 training.batch_size 1.0 +364 18 training.label_smoothing 0.07292410496806984 +364 19 model.embedding_dim 0.0 +364 19 model.c_min 0.05708334206712572 +364 19 model.c_max 4.347012530153966 +364 19 training.batch_size 1.0 +364 19 training.label_smoothing 0.8509541443047302 +364 20 model.embedding_dim 1.0 +364 20 model.c_min 0.04039723835275029 +364 20 model.c_max 6.405341267818482 +364 20 training.batch_size 1.0 +364 20 training.label_smoothing 0.0222248027714023 +364 21 model.embedding_dim 0.0 +364 21 model.c_min 0.023341601032134417 +364 21 model.c_max 3.3066720682101645 +364 21 training.batch_size 2.0 +364 21 training.label_smoothing 0.004489597764630146 +364 22 model.embedding_dim 0.0 +364 22 model.c_min 0.05699455409636393 +364 22 model.c_max 7.829543720339116 +364 22 training.batch_size 1.0 +364 22 training.label_smoothing 0.02122900502162775 +364 23 model.embedding_dim 1.0 +364 23 model.c_min 0.010583848153939684 +364 23 model.c_max 6.42170279931494 +364 23 training.batch_size 0.0 +364 23 training.label_smoothing 0.004113488164870591 +364 24 model.embedding_dim 2.0 +364 24 model.c_min 0.04884801647567288 +364 24 model.c_max 8.821617027885058 +364 24 training.batch_size 2.0 +364 24 training.label_smoothing 0.0011008374074915194 +364 25 model.embedding_dim 1.0 +364 25 model.c_min 0.022563184483554453 +364 25 model.c_max 8.40889925930677 +364 25 training.batch_size 1.0 +364 25 training.label_smoothing 0.45411728052763767 +364 26 model.embedding_dim 2.0 +364 26 model.c_min 0.019076171912670872 +364 26 model.c_max 9.491461232156588 +364 26 training.batch_size 0.0 +364 26 training.label_smoothing 0.06628824540191071 +364 27 model.embedding_dim 1.0 +364 27 model.c_min 0.04189485578742892 +364 27 model.c_max 2.9192213509619096 +364 27 training.batch_size 2.0 +364 27 training.label_smoothing 0.12401679932459561 +364 28 model.embedding_dim 0.0 +364 28 model.c_min 0.056452345974771306 +364 28 model.c_max 2.4973904686812984 +364 28 training.batch_size 1.0 +364 28 training.label_smoothing 0.016390548272438923 +364 29 model.embedding_dim 2.0 +364 29 model.c_min 0.020300731655909933 +364 29 model.c_max 8.741953038480922 +364 29 training.batch_size 2.0 +364 29 training.label_smoothing 0.0026364626568008975 +364 30 model.embedding_dim 2.0 +364 30 model.c_min 0.04013504422904811 +364 30 model.c_max 9.201747302334542 +364 30 training.batch_size 1.0 +364 30 training.label_smoothing 0.03064749108626703 +364 31 model.embedding_dim 2.0 +364 31 model.c_min 0.03098264464500282 +364 31 model.c_max 1.0796079229969493 +364 31 training.batch_size 2.0 +364 31 training.label_smoothing 0.14153590434133936 +364 32 model.embedding_dim 1.0 +364 32 model.c_min 0.015376761738364676 +364 32 model.c_max 7.200137369195964 +364 32 training.batch_size 2.0 +364 32 training.label_smoothing 0.2617440686875657 +364 33 model.embedding_dim 1.0 +364 33 model.c_min 0.019948372756639167 +364 33 model.c_max 7.482516484578908 +364 33 training.batch_size 0.0 +364 33 training.label_smoothing 0.06201099205544809 +364 34 model.embedding_dim 2.0 +364 34 model.c_min 0.07962799704564932 +364 34 model.c_max 8.85270032678806 +364 34 training.batch_size 2.0 +364 34 training.label_smoothing 0.26271837862799857 +364 35 model.embedding_dim 1.0 +364 35 model.c_min 0.08456312400409104 +364 35 model.c_max 4.746007320306035 +364 35 training.batch_size 1.0 +364 35 training.label_smoothing 0.0011224842725436465 +364 36 model.embedding_dim 2.0 +364 36 model.c_min 0.013132466984867775 +364 36 model.c_max 7.5346269650226265 +364 36 training.batch_size 1.0 +364 36 training.label_smoothing 0.042201940319472726 +364 37 model.embedding_dim 0.0 +364 37 model.c_min 0.03257380524606163 +364 37 model.c_max 8.170072707319743 +364 37 training.batch_size 1.0 +364 37 training.label_smoothing 0.005674677312625964 +364 38 model.embedding_dim 1.0 +364 38 model.c_min 0.07881342066978587 +364 38 model.c_max 3.7002774683573887 +364 38 training.batch_size 2.0 +364 38 training.label_smoothing 0.007034866733948484 +364 39 model.embedding_dim 2.0 +364 39 model.c_min 0.09672559320631141 +364 39 model.c_max 4.540179794193676 +364 39 training.batch_size 1.0 +364 39 training.label_smoothing 0.004935065692317493 +364 40 model.embedding_dim 2.0 +364 40 model.c_min 0.052745860839689386 +364 40 model.c_max 6.039210981191089 +364 40 training.batch_size 0.0 +364 40 training.label_smoothing 0.0029140971721496045 +364 41 model.embedding_dim 0.0 +364 41 model.c_min 0.02691396939885695 +364 41 model.c_max 4.438447397517679 +364 41 training.batch_size 0.0 +364 41 training.label_smoothing 0.4166853988250199 +364 42 model.embedding_dim 2.0 +364 42 model.c_min 0.042998971990447436 +364 42 model.c_max 8.808443475348163 +364 42 training.batch_size 1.0 +364 42 training.label_smoothing 0.6122082862462911 +364 43 model.embedding_dim 1.0 +364 43 model.c_min 0.02414597813557351 +364 43 model.c_max 9.500774919311915 +364 43 training.batch_size 0.0 +364 43 training.label_smoothing 0.7962805454328431 +364 44 model.embedding_dim 1.0 +364 44 model.c_min 0.02722505488347424 +364 44 model.c_max 4.495888322963153 +364 44 training.batch_size 0.0 +364 44 training.label_smoothing 0.22224392840342966 +364 45 model.embedding_dim 2.0 +364 45 model.c_min 0.010831195869078188 +364 45 model.c_max 8.801343501688757 +364 45 training.batch_size 1.0 +364 45 training.label_smoothing 0.00971802424212379 +364 46 model.embedding_dim 2.0 +364 46 model.c_min 0.05886166353299768 +364 46 model.c_max 4.359915897514502 +364 46 training.batch_size 1.0 +364 46 training.label_smoothing 0.08634064151199107 +364 47 model.embedding_dim 2.0 +364 47 model.c_min 0.023773406011237026 +364 47 model.c_max 7.6174057667856205 +364 47 training.batch_size 0.0 +364 47 training.label_smoothing 0.0012403558943494897 +364 48 model.embedding_dim 1.0 +364 48 model.c_min 0.045465151000177575 +364 48 model.c_max 2.757695169887352 +364 48 training.batch_size 2.0 +364 48 training.label_smoothing 0.09024974491294487 +364 49 model.embedding_dim 2.0 +364 49 model.c_min 0.016922872815040905 +364 49 model.c_max 1.1344110397610025 +364 49 training.batch_size 1.0 +364 49 training.label_smoothing 0.007247738974996039 +364 50 model.embedding_dim 1.0 +364 50 model.c_min 0.02460604529816296 +364 50 model.c_max 3.5978168360863725 +364 50 training.batch_size 0.0 +364 50 training.label_smoothing 0.2882616729804309 +364 51 model.embedding_dim 0.0 +364 51 model.c_min 0.031062712027417203 +364 51 model.c_max 5.90384396016971 +364 51 training.batch_size 2.0 +364 51 training.label_smoothing 0.013671413775931809 +364 52 model.embedding_dim 0.0 +364 52 model.c_min 0.010491657947569143 +364 52 model.c_max 6.752898254900534 +364 52 training.batch_size 1.0 +364 52 training.label_smoothing 0.0038966664108084576 +364 53 model.embedding_dim 1.0 +364 53 model.c_min 0.015556661542406284 +364 53 model.c_max 5.330655667466596 +364 53 training.batch_size 2.0 +364 53 training.label_smoothing 0.030221832940734276 +364 54 model.embedding_dim 2.0 +364 54 model.c_min 0.026481619409515884 +364 54 model.c_max 3.446296603219861 +364 54 training.batch_size 0.0 +364 54 training.label_smoothing 0.037334490135073374 +364 55 model.embedding_dim 1.0 +364 55 model.c_min 0.07177893591960288 +364 55 model.c_max 8.856420246708087 +364 55 training.batch_size 1.0 +364 55 training.label_smoothing 0.005274895091242855 +364 56 model.embedding_dim 2.0 +364 56 model.c_min 0.08530562074779613 +364 56 model.c_max 4.4435758076597285 +364 56 training.batch_size 2.0 +364 56 training.label_smoothing 0.0016882465174235506 +364 57 model.embedding_dim 0.0 +364 57 model.c_min 0.05292740692307897 +364 57 model.c_max 1.5994768107870587 +364 57 training.batch_size 0.0 +364 57 training.label_smoothing 0.09906835649499131 +364 58 model.embedding_dim 2.0 +364 58 model.c_min 0.010243642678899 +364 58 model.c_max 6.875012129546866 +364 58 training.batch_size 0.0 +364 58 training.label_smoothing 0.7284833354721723 +364 59 model.embedding_dim 0.0 +364 59 model.c_min 0.04175382868630809 +364 59 model.c_max 4.872911947613948 +364 59 training.batch_size 2.0 +364 59 training.label_smoothing 0.015763889281621285 +364 60 model.embedding_dim 1.0 +364 60 model.c_min 0.06079770973562331 +364 60 model.c_max 9.30647576831491 +364 60 training.batch_size 2.0 +364 60 training.label_smoothing 0.027701333826332677 +364 61 model.embedding_dim 0.0 +364 61 model.c_min 0.059041838488754064 +364 61 model.c_max 1.4835707968535887 +364 61 training.batch_size 0.0 +364 61 training.label_smoothing 0.5776093544101532 +364 62 model.embedding_dim 2.0 +364 62 model.c_min 0.021535794637157237 +364 62 model.c_max 1.6851436044721309 +364 62 training.batch_size 1.0 +364 62 training.label_smoothing 0.07547346317645243 +364 63 model.embedding_dim 0.0 +364 63 model.c_min 0.0866595257199191 +364 63 model.c_max 5.014023467729973 +364 63 training.batch_size 0.0 +364 63 training.label_smoothing 0.6567616168142375 +364 64 model.embedding_dim 0.0 +364 64 model.c_min 0.03881735778627205 +364 64 model.c_max 4.54156350776048 +364 64 training.batch_size 2.0 +364 64 training.label_smoothing 0.19520457442931685 +364 65 model.embedding_dim 1.0 +364 65 model.c_min 0.01730046204175436 +364 65 model.c_max 3.9971396674776587 +364 65 training.batch_size 0.0 +364 65 training.label_smoothing 0.06992585288245945 +364 66 model.embedding_dim 1.0 +364 66 model.c_min 0.019487175350713475 +364 66 model.c_max 4.34131194795235 +364 66 training.batch_size 1.0 +364 66 training.label_smoothing 0.7518036366057249 +364 67 model.embedding_dim 1.0 +364 67 model.c_min 0.015939719819514177 +364 67 model.c_max 9.41046897218745 +364 67 training.batch_size 2.0 +364 67 training.label_smoothing 0.005747135309577173 +364 68 model.embedding_dim 1.0 +364 68 model.c_min 0.027352710614027297 +364 68 model.c_max 1.9420831982605202 +364 68 training.batch_size 0.0 +364 68 training.label_smoothing 0.009886605504644 +364 69 model.embedding_dim 2.0 +364 69 model.c_min 0.02651321188289042 +364 69 model.c_max 6.9504763705700165 +364 69 training.batch_size 2.0 +364 69 training.label_smoothing 0.16279273170715847 +364 70 model.embedding_dim 2.0 +364 70 model.c_min 0.01986951312870146 +364 70 model.c_max 5.173941279323946 +364 70 training.batch_size 2.0 +364 70 training.label_smoothing 0.11459876484942519 +364 71 model.embedding_dim 1.0 +364 71 model.c_min 0.022718798946968898 +364 71 model.c_max 3.59363486432735 +364 71 training.batch_size 2.0 +364 71 training.label_smoothing 0.023921792247960216 +364 72 model.embedding_dim 2.0 +364 72 model.c_min 0.013357477727311955 +364 72 model.c_max 1.910415266345233 +364 72 training.batch_size 1.0 +364 72 training.label_smoothing 0.47808555504673145 +364 73 model.embedding_dim 0.0 +364 73 model.c_min 0.054001769265822425 +364 73 model.c_max 4.503926946423177 +364 73 training.batch_size 0.0 +364 73 training.label_smoothing 0.0036437545073157857 +364 74 model.embedding_dim 0.0 +364 74 model.c_min 0.07097991707958577 +364 74 model.c_max 7.261607915942813 +364 74 training.batch_size 0.0 +364 74 training.label_smoothing 0.10868337334922035 +364 75 model.embedding_dim 0.0 +364 75 model.c_min 0.05744089636950836 +364 75 model.c_max 4.438323636568252 +364 75 training.batch_size 0.0 +364 75 training.label_smoothing 0.0017653289786473622 +364 76 model.embedding_dim 2.0 +364 76 model.c_min 0.031713260034328936 +364 76 model.c_max 6.8451797276505415 +364 76 training.batch_size 2.0 +364 76 training.label_smoothing 0.008348328918576033 +364 77 model.embedding_dim 2.0 +364 77 model.c_min 0.08238944751411109 +364 77 model.c_max 9.413576418112084 +364 77 training.batch_size 1.0 +364 77 training.label_smoothing 0.003157715532444026 +364 78 model.embedding_dim 0.0 +364 78 model.c_min 0.02552147470821312 +364 78 model.c_max 3.2128692605684916 +364 78 training.batch_size 2.0 +364 78 training.label_smoothing 0.005608530721709419 +364 79 model.embedding_dim 2.0 +364 79 model.c_min 0.019849307023396774 +364 79 model.c_max 2.0678249120281205 +364 79 training.batch_size 2.0 +364 79 training.label_smoothing 0.5923206581140956 +364 80 model.embedding_dim 2.0 +364 80 model.c_min 0.016012780482138486 +364 80 model.c_max 3.48937287555297 +364 80 training.batch_size 2.0 +364 80 training.label_smoothing 0.03947534413190828 +364 81 model.embedding_dim 0.0 +364 81 model.c_min 0.012363673712258017 +364 81 model.c_max 9.35734101145947 +364 81 training.batch_size 2.0 +364 81 training.label_smoothing 0.0032707149580430465 +364 82 model.embedding_dim 2.0 +364 82 model.c_min 0.0248773636122789 +364 82 model.c_max 1.8889296341442665 +364 82 training.batch_size 2.0 +364 82 training.label_smoothing 0.0020780524829821867 +364 83 model.embedding_dim 2.0 +364 83 model.c_min 0.027768861893882286 +364 83 model.c_max 3.6062197408858943 +364 83 training.batch_size 1.0 +364 83 training.label_smoothing 0.12022255020775531 +364 84 model.embedding_dim 2.0 +364 84 model.c_min 0.028157811859057358 +364 84 model.c_max 7.594595396755179 +364 84 training.batch_size 2.0 +364 84 training.label_smoothing 0.06404563510945438 +364 85 model.embedding_dim 0.0 +364 85 model.c_min 0.021959996725670925 +364 85 model.c_max 8.464072749744503 +364 85 training.batch_size 2.0 +364 85 training.label_smoothing 0.11641880054938991 +364 86 model.embedding_dim 1.0 +364 86 model.c_min 0.02766306310087534 +364 86 model.c_max 3.1314022935344505 +364 86 training.batch_size 2.0 +364 86 training.label_smoothing 0.001370101836957546 +364 87 model.embedding_dim 0.0 +364 87 model.c_min 0.044120197867305616 +364 87 model.c_max 5.707052334481473 +364 87 training.batch_size 2.0 +364 87 training.label_smoothing 0.013040959488385323 +364 88 model.embedding_dim 0.0 +364 88 model.c_min 0.09945952018663537 +364 88 model.c_max 8.462848782805803 +364 88 training.batch_size 0.0 +364 88 training.label_smoothing 0.05623456604668926 +364 89 model.embedding_dim 1.0 +364 89 model.c_min 0.07364964637211174 +364 89 model.c_max 3.589885533892305 +364 89 training.batch_size 0.0 +364 89 training.label_smoothing 0.0074669015578201214 +364 90 model.embedding_dim 2.0 +364 90 model.c_min 0.0209500279826476 +364 90 model.c_max 8.625847513881572 +364 90 training.batch_size 1.0 +364 90 training.label_smoothing 0.04110232551907571 +364 91 model.embedding_dim 2.0 +364 91 model.c_min 0.019456484588282617 +364 91 model.c_max 8.07556192191657 +364 91 training.batch_size 2.0 +364 91 training.label_smoothing 0.15605630387622274 +364 92 model.embedding_dim 2.0 +364 92 model.c_min 0.09301522483277495 +364 92 model.c_max 3.4075251511759967 +364 92 training.batch_size 1.0 +364 92 training.label_smoothing 0.01955333161730292 +364 93 model.embedding_dim 0.0 +364 93 model.c_min 0.06214323114477733 +364 93 model.c_max 9.07378065467866 +364 93 training.batch_size 1.0 +364 93 training.label_smoothing 0.0016967980949497985 +364 94 model.embedding_dim 0.0 +364 94 model.c_min 0.016363084233766395 +364 94 model.c_max 1.745244884059312 +364 94 training.batch_size 2.0 +364 94 training.label_smoothing 0.13742419200880804 +364 95 model.embedding_dim 0.0 +364 95 model.c_min 0.025861737850563 +364 95 model.c_max 2.4865793566113386 +364 95 training.batch_size 0.0 +364 95 training.label_smoothing 0.11353844768988898 +364 96 model.embedding_dim 0.0 +364 96 model.c_min 0.030720802035699147 +364 96 model.c_max 1.025167202254289 +364 96 training.batch_size 2.0 +364 96 training.label_smoothing 0.24588114210501033 +364 97 model.embedding_dim 0.0 +364 97 model.c_min 0.06307571204901526 +364 97 model.c_max 9.79815688291962 +364 97 training.batch_size 0.0 +364 97 training.label_smoothing 0.19474903173758112 +364 98 model.embedding_dim 1.0 +364 98 model.c_min 0.0583029682000464 +364 98 model.c_max 4.307065696091667 +364 98 training.batch_size 1.0 +364 98 training.label_smoothing 0.04995520779700257 +364 99 model.embedding_dim 0.0 +364 99 model.c_min 0.03333205241380151 +364 99 model.c_max 9.796625877225052 +364 99 training.batch_size 1.0 +364 99 training.label_smoothing 0.08956737420788087 +364 100 model.embedding_dim 0.0 +364 100 model.c_min 0.020075970583704708 +364 100 model.c_max 9.444039426118199 +364 100 training.batch_size 0.0 +364 100 training.label_smoothing 0.025937070101919164 +364 1 dataset """kinships""" +364 1 model """kg2e""" +364 1 loss """softplus""" +364 1 regularizer """no""" +364 1 optimizer """adadelta""" +364 1 training_loop """lcwa""" +364 1 evaluator """rankbased""" +364 2 dataset """kinships""" +364 2 model """kg2e""" +364 2 loss """softplus""" +364 2 regularizer """no""" +364 2 optimizer """adadelta""" +364 2 training_loop """lcwa""" +364 2 evaluator """rankbased""" +364 3 dataset """kinships""" +364 3 model """kg2e""" +364 3 loss """softplus""" +364 3 regularizer """no""" +364 3 optimizer """adadelta""" +364 3 training_loop """lcwa""" +364 3 evaluator """rankbased""" +364 4 dataset """kinships""" +364 4 model """kg2e""" +364 4 loss """softplus""" +364 4 regularizer """no""" +364 4 optimizer """adadelta""" +364 4 training_loop """lcwa""" +364 4 evaluator """rankbased""" +364 5 dataset """kinships""" +364 5 model """kg2e""" +364 5 loss """softplus""" +364 5 regularizer """no""" +364 5 optimizer """adadelta""" +364 5 training_loop """lcwa""" +364 5 evaluator """rankbased""" +364 6 dataset """kinships""" +364 6 model """kg2e""" +364 6 loss """softplus""" +364 6 regularizer """no""" +364 6 optimizer """adadelta""" +364 6 training_loop """lcwa""" +364 6 evaluator """rankbased""" +364 7 dataset """kinships""" +364 7 model """kg2e""" +364 7 loss """softplus""" +364 7 regularizer """no""" +364 7 optimizer """adadelta""" +364 7 training_loop """lcwa""" +364 7 evaluator """rankbased""" +364 8 dataset """kinships""" +364 8 model """kg2e""" +364 8 loss """softplus""" +364 8 regularizer """no""" +364 8 optimizer """adadelta""" +364 8 training_loop """lcwa""" +364 8 evaluator """rankbased""" +364 9 dataset """kinships""" +364 9 model """kg2e""" +364 9 loss """softplus""" +364 9 regularizer """no""" +364 9 optimizer """adadelta""" +364 9 training_loop """lcwa""" +364 9 evaluator """rankbased""" +364 10 dataset """kinships""" +364 10 model """kg2e""" +364 10 loss """softplus""" +364 10 regularizer """no""" +364 10 optimizer """adadelta""" +364 10 training_loop """lcwa""" +364 10 evaluator """rankbased""" +364 11 dataset """kinships""" +364 11 model """kg2e""" +364 11 loss """softplus""" +364 11 regularizer """no""" +364 11 optimizer """adadelta""" +364 11 training_loop """lcwa""" +364 11 evaluator """rankbased""" +364 12 dataset """kinships""" +364 12 model """kg2e""" +364 12 loss """softplus""" +364 12 regularizer """no""" +364 12 optimizer """adadelta""" +364 12 training_loop """lcwa""" +364 12 evaluator """rankbased""" +364 13 dataset """kinships""" +364 13 model """kg2e""" +364 13 loss """softplus""" +364 13 regularizer """no""" +364 13 optimizer """adadelta""" +364 13 training_loop """lcwa""" +364 13 evaluator """rankbased""" +364 14 dataset """kinships""" +364 14 model """kg2e""" +364 14 loss """softplus""" +364 14 regularizer """no""" +364 14 optimizer """adadelta""" +364 14 training_loop """lcwa""" +364 14 evaluator """rankbased""" +364 15 dataset """kinships""" +364 15 model """kg2e""" +364 15 loss """softplus""" +364 15 regularizer """no""" +364 15 optimizer """adadelta""" +364 15 training_loop """lcwa""" +364 15 evaluator """rankbased""" +364 16 dataset """kinships""" +364 16 model """kg2e""" +364 16 loss """softplus""" +364 16 regularizer """no""" +364 16 optimizer """adadelta""" +364 16 training_loop """lcwa""" +364 16 evaluator """rankbased""" +364 17 dataset """kinships""" +364 17 model """kg2e""" +364 17 loss """softplus""" +364 17 regularizer """no""" +364 17 optimizer """adadelta""" +364 17 training_loop """lcwa""" +364 17 evaluator """rankbased""" +364 18 dataset """kinships""" +364 18 model """kg2e""" +364 18 loss """softplus""" +364 18 regularizer """no""" +364 18 optimizer """adadelta""" +364 18 training_loop """lcwa""" +364 18 evaluator """rankbased""" +364 19 dataset """kinships""" +364 19 model """kg2e""" +364 19 loss """softplus""" +364 19 regularizer """no""" +364 19 optimizer """adadelta""" +364 19 training_loop """lcwa""" +364 19 evaluator """rankbased""" +364 20 dataset """kinships""" +364 20 model """kg2e""" +364 20 loss """softplus""" +364 20 regularizer """no""" +364 20 optimizer """adadelta""" +364 20 training_loop """lcwa""" +364 20 evaluator """rankbased""" +364 21 dataset """kinships""" +364 21 model """kg2e""" +364 21 loss """softplus""" +364 21 regularizer """no""" +364 21 optimizer """adadelta""" +364 21 training_loop """lcwa""" +364 21 evaluator """rankbased""" +364 22 dataset """kinships""" +364 22 model """kg2e""" +364 22 loss """softplus""" +364 22 regularizer """no""" +364 22 optimizer """adadelta""" +364 22 training_loop """lcwa""" +364 22 evaluator """rankbased""" +364 23 dataset """kinships""" +364 23 model """kg2e""" +364 23 loss """softplus""" +364 23 regularizer """no""" +364 23 optimizer """adadelta""" +364 23 training_loop """lcwa""" +364 23 evaluator """rankbased""" +364 24 dataset """kinships""" +364 24 model """kg2e""" +364 24 loss """softplus""" +364 24 regularizer """no""" +364 24 optimizer """adadelta""" +364 24 training_loop """lcwa""" +364 24 evaluator """rankbased""" +364 25 dataset """kinships""" +364 25 model """kg2e""" +364 25 loss """softplus""" +364 25 regularizer """no""" +364 25 optimizer """adadelta""" +364 25 training_loop """lcwa""" +364 25 evaluator """rankbased""" +364 26 dataset """kinships""" +364 26 model """kg2e""" +364 26 loss """softplus""" +364 26 regularizer """no""" +364 26 optimizer """adadelta""" +364 26 training_loop """lcwa""" +364 26 evaluator """rankbased""" +364 27 dataset """kinships""" +364 27 model """kg2e""" +364 27 loss """softplus""" +364 27 regularizer """no""" +364 27 optimizer """adadelta""" +364 27 training_loop """lcwa""" +364 27 evaluator """rankbased""" +364 28 dataset """kinships""" +364 28 model """kg2e""" +364 28 loss """softplus""" +364 28 regularizer """no""" +364 28 optimizer """adadelta""" +364 28 training_loop """lcwa""" +364 28 evaluator """rankbased""" +364 29 dataset """kinships""" +364 29 model """kg2e""" +364 29 loss """softplus""" +364 29 regularizer """no""" +364 29 optimizer """adadelta""" +364 29 training_loop """lcwa""" +364 29 evaluator """rankbased""" +364 30 dataset """kinships""" +364 30 model """kg2e""" +364 30 loss """softplus""" +364 30 regularizer """no""" +364 30 optimizer """adadelta""" +364 30 training_loop """lcwa""" +364 30 evaluator """rankbased""" +364 31 dataset """kinships""" +364 31 model """kg2e""" +364 31 loss """softplus""" +364 31 regularizer """no""" +364 31 optimizer """adadelta""" +364 31 training_loop """lcwa""" +364 31 evaluator """rankbased""" +364 32 dataset """kinships""" +364 32 model """kg2e""" +364 32 loss """softplus""" +364 32 regularizer """no""" +364 32 optimizer """adadelta""" +364 32 training_loop """lcwa""" +364 32 evaluator """rankbased""" +364 33 dataset """kinships""" +364 33 model """kg2e""" +364 33 loss """softplus""" +364 33 regularizer """no""" +364 33 optimizer """adadelta""" +364 33 training_loop """lcwa""" +364 33 evaluator """rankbased""" +364 34 dataset """kinships""" +364 34 model """kg2e""" +364 34 loss """softplus""" +364 34 regularizer """no""" +364 34 optimizer """adadelta""" +364 34 training_loop """lcwa""" +364 34 evaluator """rankbased""" +364 35 dataset """kinships""" +364 35 model """kg2e""" +364 35 loss """softplus""" +364 35 regularizer """no""" +364 35 optimizer """adadelta""" +364 35 training_loop """lcwa""" +364 35 evaluator """rankbased""" +364 36 dataset """kinships""" +364 36 model """kg2e""" +364 36 loss """softplus""" +364 36 regularizer """no""" +364 36 optimizer """adadelta""" +364 36 training_loop """lcwa""" +364 36 evaluator """rankbased""" +364 37 dataset """kinships""" +364 37 model """kg2e""" +364 37 loss """softplus""" +364 37 regularizer """no""" +364 37 optimizer """adadelta""" +364 37 training_loop """lcwa""" +364 37 evaluator """rankbased""" +364 38 dataset """kinships""" +364 38 model """kg2e""" +364 38 loss """softplus""" +364 38 regularizer """no""" +364 38 optimizer """adadelta""" +364 38 training_loop """lcwa""" +364 38 evaluator """rankbased""" +364 39 dataset """kinships""" +364 39 model """kg2e""" +364 39 loss """softplus""" +364 39 regularizer """no""" +364 39 optimizer """adadelta""" +364 39 training_loop """lcwa""" +364 39 evaluator """rankbased""" +364 40 dataset """kinships""" +364 40 model """kg2e""" +364 40 loss """softplus""" +364 40 regularizer """no""" +364 40 optimizer """adadelta""" +364 40 training_loop """lcwa""" +364 40 evaluator """rankbased""" +364 41 dataset """kinships""" +364 41 model """kg2e""" +364 41 loss """softplus""" +364 41 regularizer """no""" +364 41 optimizer """adadelta""" +364 41 training_loop """lcwa""" +364 41 evaluator """rankbased""" +364 42 dataset """kinships""" +364 42 model """kg2e""" +364 42 loss """softplus""" +364 42 regularizer """no""" +364 42 optimizer """adadelta""" +364 42 training_loop """lcwa""" +364 42 evaluator """rankbased""" +364 43 dataset """kinships""" +364 43 model """kg2e""" +364 43 loss """softplus""" +364 43 regularizer """no""" +364 43 optimizer """adadelta""" +364 43 training_loop """lcwa""" +364 43 evaluator """rankbased""" +364 44 dataset """kinships""" +364 44 model """kg2e""" +364 44 loss """softplus""" +364 44 regularizer """no""" +364 44 optimizer """adadelta""" +364 44 training_loop """lcwa""" +364 44 evaluator """rankbased""" +364 45 dataset """kinships""" +364 45 model """kg2e""" +364 45 loss """softplus""" +364 45 regularizer """no""" +364 45 optimizer """adadelta""" +364 45 training_loop """lcwa""" +364 45 evaluator """rankbased""" +364 46 dataset """kinships""" +364 46 model """kg2e""" +364 46 loss """softplus""" +364 46 regularizer """no""" +364 46 optimizer """adadelta""" +364 46 training_loop """lcwa""" +364 46 evaluator """rankbased""" +364 47 dataset """kinships""" +364 47 model """kg2e""" +364 47 loss """softplus""" +364 47 regularizer """no""" +364 47 optimizer """adadelta""" +364 47 training_loop """lcwa""" +364 47 evaluator """rankbased""" +364 48 dataset """kinships""" +364 48 model """kg2e""" +364 48 loss """softplus""" +364 48 regularizer """no""" +364 48 optimizer """adadelta""" +364 48 training_loop """lcwa""" +364 48 evaluator """rankbased""" +364 49 dataset """kinships""" +364 49 model """kg2e""" +364 49 loss """softplus""" +364 49 regularizer """no""" +364 49 optimizer """adadelta""" +364 49 training_loop """lcwa""" +364 49 evaluator """rankbased""" +364 50 dataset """kinships""" +364 50 model """kg2e""" +364 50 loss """softplus""" +364 50 regularizer """no""" +364 50 optimizer """adadelta""" +364 50 training_loop """lcwa""" +364 50 evaluator """rankbased""" +364 51 dataset """kinships""" +364 51 model """kg2e""" +364 51 loss """softplus""" +364 51 regularizer """no""" +364 51 optimizer """adadelta""" +364 51 training_loop """lcwa""" +364 51 evaluator """rankbased""" +364 52 dataset """kinships""" +364 52 model """kg2e""" +364 52 loss """softplus""" +364 52 regularizer """no""" +364 52 optimizer """adadelta""" +364 52 training_loop """lcwa""" +364 52 evaluator """rankbased""" +364 53 dataset """kinships""" +364 53 model """kg2e""" +364 53 loss """softplus""" +364 53 regularizer """no""" +364 53 optimizer """adadelta""" +364 53 training_loop """lcwa""" +364 53 evaluator """rankbased""" +364 54 dataset """kinships""" +364 54 model """kg2e""" +364 54 loss """softplus""" +364 54 regularizer """no""" +364 54 optimizer """adadelta""" +364 54 training_loop """lcwa""" +364 54 evaluator """rankbased""" +364 55 dataset """kinships""" +364 55 model """kg2e""" +364 55 loss """softplus""" +364 55 regularizer """no""" +364 55 optimizer """adadelta""" +364 55 training_loop """lcwa""" +364 55 evaluator """rankbased""" +364 56 dataset """kinships""" +364 56 model """kg2e""" +364 56 loss """softplus""" +364 56 regularizer """no""" +364 56 optimizer """adadelta""" +364 56 training_loop """lcwa""" +364 56 evaluator """rankbased""" +364 57 dataset """kinships""" +364 57 model """kg2e""" +364 57 loss """softplus""" +364 57 regularizer """no""" +364 57 optimizer """adadelta""" +364 57 training_loop """lcwa""" +364 57 evaluator """rankbased""" +364 58 dataset """kinships""" +364 58 model """kg2e""" +364 58 loss """softplus""" +364 58 regularizer """no""" +364 58 optimizer """adadelta""" +364 58 training_loop """lcwa""" +364 58 evaluator """rankbased""" +364 59 dataset """kinships""" +364 59 model """kg2e""" +364 59 loss """softplus""" +364 59 regularizer """no""" +364 59 optimizer """adadelta""" +364 59 training_loop """lcwa""" +364 59 evaluator """rankbased""" +364 60 dataset """kinships""" +364 60 model """kg2e""" +364 60 loss """softplus""" +364 60 regularizer """no""" +364 60 optimizer """adadelta""" +364 60 training_loop """lcwa""" +364 60 evaluator """rankbased""" +364 61 dataset """kinships""" +364 61 model """kg2e""" +364 61 loss """softplus""" +364 61 regularizer """no""" +364 61 optimizer """adadelta""" +364 61 training_loop """lcwa""" +364 61 evaluator """rankbased""" +364 62 dataset """kinships""" +364 62 model """kg2e""" +364 62 loss """softplus""" +364 62 regularizer """no""" +364 62 optimizer """adadelta""" +364 62 training_loop """lcwa""" +364 62 evaluator """rankbased""" +364 63 dataset """kinships""" +364 63 model """kg2e""" +364 63 loss """softplus""" +364 63 regularizer """no""" +364 63 optimizer """adadelta""" +364 63 training_loop """lcwa""" +364 63 evaluator """rankbased""" +364 64 dataset """kinships""" +364 64 model """kg2e""" +364 64 loss """softplus""" +364 64 regularizer """no""" +364 64 optimizer """adadelta""" +364 64 training_loop """lcwa""" +364 64 evaluator """rankbased""" +364 65 dataset """kinships""" +364 65 model """kg2e""" +364 65 loss """softplus""" +364 65 regularizer """no""" +364 65 optimizer """adadelta""" +364 65 training_loop """lcwa""" +364 65 evaluator """rankbased""" +364 66 dataset """kinships""" +364 66 model """kg2e""" +364 66 loss """softplus""" +364 66 regularizer """no""" +364 66 optimizer """adadelta""" +364 66 training_loop """lcwa""" +364 66 evaluator """rankbased""" +364 67 dataset """kinships""" +364 67 model """kg2e""" +364 67 loss """softplus""" +364 67 regularizer """no""" +364 67 optimizer """adadelta""" +364 67 training_loop """lcwa""" +364 67 evaluator """rankbased""" +364 68 dataset """kinships""" +364 68 model """kg2e""" +364 68 loss """softplus""" +364 68 regularizer """no""" +364 68 optimizer """adadelta""" +364 68 training_loop """lcwa""" +364 68 evaluator """rankbased""" +364 69 dataset """kinships""" +364 69 model """kg2e""" +364 69 loss """softplus""" +364 69 regularizer """no""" +364 69 optimizer """adadelta""" +364 69 training_loop """lcwa""" +364 69 evaluator """rankbased""" +364 70 dataset """kinships""" +364 70 model """kg2e""" +364 70 loss """softplus""" +364 70 regularizer """no""" +364 70 optimizer """adadelta""" +364 70 training_loop """lcwa""" +364 70 evaluator """rankbased""" +364 71 dataset """kinships""" +364 71 model """kg2e""" +364 71 loss """softplus""" +364 71 regularizer """no""" +364 71 optimizer """adadelta""" +364 71 training_loop """lcwa""" +364 71 evaluator """rankbased""" +364 72 dataset """kinships""" +364 72 model """kg2e""" +364 72 loss """softplus""" +364 72 regularizer """no""" +364 72 optimizer """adadelta""" +364 72 training_loop """lcwa""" +364 72 evaluator """rankbased""" +364 73 dataset """kinships""" +364 73 model """kg2e""" +364 73 loss """softplus""" +364 73 regularizer """no""" +364 73 optimizer """adadelta""" +364 73 training_loop """lcwa""" +364 73 evaluator """rankbased""" +364 74 dataset """kinships""" +364 74 model """kg2e""" +364 74 loss """softplus""" +364 74 regularizer """no""" +364 74 optimizer """adadelta""" +364 74 training_loop """lcwa""" +364 74 evaluator """rankbased""" +364 75 dataset """kinships""" +364 75 model """kg2e""" +364 75 loss """softplus""" +364 75 regularizer """no""" +364 75 optimizer """adadelta""" +364 75 training_loop """lcwa""" +364 75 evaluator """rankbased""" +364 76 dataset """kinships""" +364 76 model """kg2e""" +364 76 loss """softplus""" +364 76 regularizer """no""" +364 76 optimizer """adadelta""" +364 76 training_loop """lcwa""" +364 76 evaluator """rankbased""" +364 77 dataset """kinships""" +364 77 model """kg2e""" +364 77 loss """softplus""" +364 77 regularizer """no""" +364 77 optimizer """adadelta""" +364 77 training_loop """lcwa""" +364 77 evaluator """rankbased""" +364 78 dataset """kinships""" +364 78 model """kg2e""" +364 78 loss """softplus""" +364 78 regularizer """no""" +364 78 optimizer """adadelta""" +364 78 training_loop """lcwa""" +364 78 evaluator """rankbased""" +364 79 dataset """kinships""" +364 79 model """kg2e""" +364 79 loss """softplus""" +364 79 regularizer """no""" +364 79 optimizer """adadelta""" +364 79 training_loop """lcwa""" +364 79 evaluator """rankbased""" +364 80 dataset """kinships""" +364 80 model """kg2e""" +364 80 loss """softplus""" +364 80 regularizer """no""" +364 80 optimizer """adadelta""" +364 80 training_loop """lcwa""" +364 80 evaluator """rankbased""" +364 81 dataset """kinships""" +364 81 model """kg2e""" +364 81 loss """softplus""" +364 81 regularizer """no""" +364 81 optimizer """adadelta""" +364 81 training_loop """lcwa""" +364 81 evaluator """rankbased""" +364 82 dataset """kinships""" +364 82 model """kg2e""" +364 82 loss """softplus""" +364 82 regularizer """no""" +364 82 optimizer """adadelta""" +364 82 training_loop """lcwa""" +364 82 evaluator """rankbased""" +364 83 dataset """kinships""" +364 83 model """kg2e""" +364 83 loss """softplus""" +364 83 regularizer """no""" +364 83 optimizer """adadelta""" +364 83 training_loop """lcwa""" +364 83 evaluator """rankbased""" +364 84 dataset """kinships""" +364 84 model """kg2e""" +364 84 loss """softplus""" +364 84 regularizer """no""" +364 84 optimizer """adadelta""" +364 84 training_loop """lcwa""" +364 84 evaluator """rankbased""" +364 85 dataset """kinships""" +364 85 model """kg2e""" +364 85 loss """softplus""" +364 85 regularizer """no""" +364 85 optimizer """adadelta""" +364 85 training_loop """lcwa""" +364 85 evaluator """rankbased""" +364 86 dataset """kinships""" +364 86 model """kg2e""" +364 86 loss """softplus""" +364 86 regularizer """no""" +364 86 optimizer """adadelta""" +364 86 training_loop """lcwa""" +364 86 evaluator """rankbased""" +364 87 dataset """kinships""" +364 87 model """kg2e""" +364 87 loss """softplus""" +364 87 regularizer """no""" +364 87 optimizer """adadelta""" +364 87 training_loop """lcwa""" +364 87 evaluator """rankbased""" +364 88 dataset """kinships""" +364 88 model """kg2e""" +364 88 loss """softplus""" +364 88 regularizer """no""" +364 88 optimizer """adadelta""" +364 88 training_loop """lcwa""" +364 88 evaluator """rankbased""" +364 89 dataset """kinships""" +364 89 model """kg2e""" +364 89 loss """softplus""" +364 89 regularizer """no""" +364 89 optimizer """adadelta""" +364 89 training_loop """lcwa""" +364 89 evaluator """rankbased""" +364 90 dataset """kinships""" +364 90 model """kg2e""" +364 90 loss """softplus""" +364 90 regularizer """no""" +364 90 optimizer """adadelta""" +364 90 training_loop """lcwa""" +364 90 evaluator """rankbased""" +364 91 dataset """kinships""" +364 91 model """kg2e""" +364 91 loss """softplus""" +364 91 regularizer """no""" +364 91 optimizer """adadelta""" +364 91 training_loop """lcwa""" +364 91 evaluator """rankbased""" +364 92 dataset """kinships""" +364 92 model """kg2e""" +364 92 loss """softplus""" +364 92 regularizer """no""" +364 92 optimizer """adadelta""" +364 92 training_loop """lcwa""" +364 92 evaluator """rankbased""" +364 93 dataset """kinships""" +364 93 model """kg2e""" +364 93 loss """softplus""" +364 93 regularizer """no""" +364 93 optimizer """adadelta""" +364 93 training_loop """lcwa""" +364 93 evaluator """rankbased""" +364 94 dataset """kinships""" +364 94 model """kg2e""" +364 94 loss """softplus""" +364 94 regularizer """no""" +364 94 optimizer """adadelta""" +364 94 training_loop """lcwa""" +364 94 evaluator """rankbased""" +364 95 dataset """kinships""" +364 95 model """kg2e""" +364 95 loss """softplus""" +364 95 regularizer """no""" +364 95 optimizer """adadelta""" +364 95 training_loop """lcwa""" +364 95 evaluator """rankbased""" +364 96 dataset """kinships""" +364 96 model """kg2e""" +364 96 loss """softplus""" +364 96 regularizer """no""" +364 96 optimizer """adadelta""" +364 96 training_loop """lcwa""" +364 96 evaluator """rankbased""" +364 97 dataset """kinships""" +364 97 model """kg2e""" +364 97 loss """softplus""" +364 97 regularizer """no""" +364 97 optimizer """adadelta""" +364 97 training_loop """lcwa""" +364 97 evaluator """rankbased""" +364 98 dataset """kinships""" +364 98 model """kg2e""" +364 98 loss """softplus""" +364 98 regularizer """no""" +364 98 optimizer """adadelta""" +364 98 training_loop """lcwa""" +364 98 evaluator """rankbased""" +364 99 dataset """kinships""" +364 99 model """kg2e""" +364 99 loss """softplus""" +364 99 regularizer """no""" +364 99 optimizer """adadelta""" +364 99 training_loop """lcwa""" +364 99 evaluator """rankbased""" +364 100 dataset """kinships""" +364 100 model """kg2e""" +364 100 loss """softplus""" +364 100 regularizer """no""" +364 100 optimizer """adadelta""" +364 100 training_loop """lcwa""" +364 100 evaluator """rankbased""" +365 1 model.embedding_dim 0.0 +365 1 model.c_min 0.025631968191417755 +365 1 model.c_max 4.478430714799534 +365 1 training.batch_size 2.0 +365 1 training.label_smoothing 0.0035920309039115627 +365 2 model.embedding_dim 1.0 +365 2 model.c_min 0.04105147170086741 +365 2 model.c_max 4.293570005248451 +365 2 training.batch_size 2.0 +365 2 training.label_smoothing 0.022048982694783 +365 3 model.embedding_dim 1.0 +365 3 model.c_min 0.07645991287860805 +365 3 model.c_max 3.3600948129947614 +365 3 training.batch_size 2.0 +365 3 training.label_smoothing 0.38722800599683443 +365 4 model.embedding_dim 1.0 +365 4 model.c_min 0.021828868232661006 +365 4 model.c_max 4.017466227565328 +365 4 training.batch_size 2.0 +365 4 training.label_smoothing 0.05644077555466511 +365 5 model.embedding_dim 2.0 +365 5 model.c_min 0.023352647562975296 +365 5 model.c_max 4.161315525547256 +365 5 training.batch_size 2.0 +365 5 training.label_smoothing 0.0067200748395226375 +365 6 model.embedding_dim 0.0 +365 6 model.c_min 0.05701463333124021 +365 6 model.c_max 7.453369662004263 +365 6 training.batch_size 2.0 +365 6 training.label_smoothing 0.005752586465349975 +365 7 model.embedding_dim 1.0 +365 7 model.c_min 0.014439010141271868 +365 7 model.c_max 6.733444549944724 +365 7 training.batch_size 0.0 +365 7 training.label_smoothing 0.059184386061356405 +365 8 model.embedding_dim 0.0 +365 8 model.c_min 0.04158729323680422 +365 8 model.c_max 6.363514684558899 +365 8 training.batch_size 1.0 +365 8 training.label_smoothing 0.001509547704665005 +365 9 model.embedding_dim 2.0 +365 9 model.c_min 0.03274621266544715 +365 9 model.c_max 2.3452018938976944 +365 9 training.batch_size 0.0 +365 9 training.label_smoothing 0.12118577773557908 +365 10 model.embedding_dim 2.0 +365 10 model.c_min 0.015109716598836177 +365 10 model.c_max 1.2217545804870764 +365 10 training.batch_size 1.0 +365 10 training.label_smoothing 0.033467339699786464 +365 11 model.embedding_dim 2.0 +365 11 model.c_min 0.06901908389912734 +365 11 model.c_max 8.196959702036619 +365 11 training.batch_size 0.0 +365 11 training.label_smoothing 0.3024888081099405 +365 12 model.embedding_dim 0.0 +365 12 model.c_min 0.043594974186890734 +365 12 model.c_max 9.951561409655724 +365 12 training.batch_size 2.0 +365 12 training.label_smoothing 0.02484539322360439 +365 13 model.embedding_dim 0.0 +365 13 model.c_min 0.09155412878545338 +365 13 model.c_max 5.603741669048933 +365 13 training.batch_size 2.0 +365 13 training.label_smoothing 0.020975213543649965 +365 14 model.embedding_dim 2.0 +365 14 model.c_min 0.0402968226909912 +365 14 model.c_max 6.2810137524278975 +365 14 training.batch_size 1.0 +365 14 training.label_smoothing 0.045653069484142154 +365 15 model.embedding_dim 1.0 +365 15 model.c_min 0.06403739565463525 +365 15 model.c_max 9.717849628435248 +365 15 training.batch_size 0.0 +365 15 training.label_smoothing 0.07396347636755962 +365 16 model.embedding_dim 0.0 +365 16 model.c_min 0.012387188115628816 +365 16 model.c_max 4.772034246185612 +365 16 training.batch_size 2.0 +365 16 training.label_smoothing 0.601027332556705 +365 17 model.embedding_dim 2.0 +365 17 model.c_min 0.05019304565153915 +365 17 model.c_max 6.0479583292234125 +365 17 training.batch_size 0.0 +365 17 training.label_smoothing 0.001634920212577194 +365 18 model.embedding_dim 2.0 +365 18 model.c_min 0.061362123103388344 +365 18 model.c_max 7.110177190256984 +365 18 training.batch_size 2.0 +365 18 training.label_smoothing 0.011922650074344975 +365 19 model.embedding_dim 2.0 +365 19 model.c_min 0.012357765177175028 +365 19 model.c_max 8.789853675191695 +365 19 training.batch_size 1.0 +365 19 training.label_smoothing 0.07715964665212956 +365 20 model.embedding_dim 2.0 +365 20 model.c_min 0.04099832290266799 +365 20 model.c_max 8.120587933884247 +365 20 training.batch_size 0.0 +365 20 training.label_smoothing 0.007804311952290859 +365 21 model.embedding_dim 1.0 +365 21 model.c_min 0.032244091582933086 +365 21 model.c_max 1.271983846560845 +365 21 training.batch_size 2.0 +365 21 training.label_smoothing 0.002752831247295796 +365 22 model.embedding_dim 0.0 +365 22 model.c_min 0.010692233111846484 +365 22 model.c_max 4.224640028559641 +365 22 training.batch_size 1.0 +365 22 training.label_smoothing 0.009755001296177077 +365 23 model.embedding_dim 1.0 +365 23 model.c_min 0.07328416248248495 +365 23 model.c_max 3.031779622846184 +365 23 training.batch_size 2.0 +365 23 training.label_smoothing 0.049900539896084624 +365 24 model.embedding_dim 0.0 +365 24 model.c_min 0.05223126542548478 +365 24 model.c_max 4.986919719422667 +365 24 training.batch_size 0.0 +365 24 training.label_smoothing 0.05216094444327884 +365 25 model.embedding_dim 2.0 +365 25 model.c_min 0.028340309396256192 +365 25 model.c_max 2.12215919609987 +365 25 training.batch_size 0.0 +365 25 training.label_smoothing 0.06514637383894116 +365 26 model.embedding_dim 2.0 +365 26 model.c_min 0.08703699958238217 +365 26 model.c_max 4.9560307307332705 +365 26 training.batch_size 2.0 +365 26 training.label_smoothing 0.004172664858199555 +365 27 model.embedding_dim 2.0 +365 27 model.c_min 0.09161947622524517 +365 27 model.c_max 7.541025790475387 +365 27 training.batch_size 1.0 +365 27 training.label_smoothing 0.07597723602808505 +365 28 model.embedding_dim 2.0 +365 28 model.c_min 0.04727902131459576 +365 28 model.c_max 6.450747213698901 +365 28 training.batch_size 1.0 +365 28 training.label_smoothing 0.0011341889146070452 +365 29 model.embedding_dim 1.0 +365 29 model.c_min 0.015748391967804285 +365 29 model.c_max 7.042645530574787 +365 29 training.batch_size 2.0 +365 29 training.label_smoothing 0.17762739920431023 +365 30 model.embedding_dim 2.0 +365 30 model.c_min 0.01804436326970431 +365 30 model.c_max 2.15865025162457 +365 30 training.batch_size 2.0 +365 30 training.label_smoothing 0.18541906494441865 +365 31 model.embedding_dim 1.0 +365 31 model.c_min 0.020799364708646725 +365 31 model.c_max 7.56509238956242 +365 31 training.batch_size 2.0 +365 31 training.label_smoothing 0.010002083655378772 +365 32 model.embedding_dim 0.0 +365 32 model.c_min 0.032887774786633205 +365 32 model.c_max 1.1405870136883887 +365 32 training.batch_size 0.0 +365 32 training.label_smoothing 0.014975462094927602 +365 33 model.embedding_dim 2.0 +365 33 model.c_min 0.05974068507984579 +365 33 model.c_max 5.904412177205831 +365 33 training.batch_size 1.0 +365 33 training.label_smoothing 0.018594530262260253 +365 34 model.embedding_dim 2.0 +365 34 model.c_min 0.022811296427789066 +365 34 model.c_max 8.251887510446638 +365 34 training.batch_size 0.0 +365 34 training.label_smoothing 0.014345116966291772 +365 35 model.embedding_dim 1.0 +365 35 model.c_min 0.08954145060486059 +365 35 model.c_max 1.95335053778395 +365 35 training.batch_size 1.0 +365 35 training.label_smoothing 0.005738421130840016 +365 36 model.embedding_dim 0.0 +365 36 model.c_min 0.04883728393499059 +365 36 model.c_max 9.094617625696655 +365 36 training.batch_size 2.0 +365 36 training.label_smoothing 0.08155003591760263 +365 37 model.embedding_dim 0.0 +365 37 model.c_min 0.010968919227684937 +365 37 model.c_max 5.179292588633362 +365 37 training.batch_size 0.0 +365 37 training.label_smoothing 0.226527711203113 +365 38 model.embedding_dim 0.0 +365 38 model.c_min 0.08104012308500949 +365 38 model.c_max 4.077855032106303 +365 38 training.batch_size 1.0 +365 38 training.label_smoothing 0.03030261899215318 +365 39 model.embedding_dim 0.0 +365 39 model.c_min 0.021518795964865108 +365 39 model.c_max 8.975063827177191 +365 39 training.batch_size 1.0 +365 39 training.label_smoothing 0.15321255657626076 +365 40 model.embedding_dim 0.0 +365 40 model.c_min 0.010662790059758167 +365 40 model.c_max 6.3905373344720084 +365 40 training.batch_size 0.0 +365 40 training.label_smoothing 0.002130393326574325 +365 41 model.embedding_dim 1.0 +365 41 model.c_min 0.012374563037102672 +365 41 model.c_max 8.52078219329798 +365 41 training.batch_size 1.0 +365 41 training.label_smoothing 0.07884647776854663 +365 42 model.embedding_dim 2.0 +365 42 model.c_min 0.02339917972185185 +365 42 model.c_max 5.428553883865195 +365 42 training.batch_size 0.0 +365 42 training.label_smoothing 0.013775239215853441 +365 43 model.embedding_dim 2.0 +365 43 model.c_min 0.08698091373013235 +365 43 model.c_max 8.596239260244957 +365 43 training.batch_size 2.0 +365 43 training.label_smoothing 0.040870983999532616 +365 44 model.embedding_dim 2.0 +365 44 model.c_min 0.04110355033870532 +365 44 model.c_max 3.1626698880334096 +365 44 training.batch_size 1.0 +365 44 training.label_smoothing 0.21720971449408313 +365 45 model.embedding_dim 2.0 +365 45 model.c_min 0.07963534881071545 +365 45 model.c_max 9.579621254200019 +365 45 training.batch_size 2.0 +365 45 training.label_smoothing 0.0190529980449217 +365 46 model.embedding_dim 0.0 +365 46 model.c_min 0.015878069669334416 +365 46 model.c_max 8.308552735046558 +365 46 training.batch_size 1.0 +365 46 training.label_smoothing 0.003715265896434616 +365 47 model.embedding_dim 1.0 +365 47 model.c_min 0.07955236397100533 +365 47 model.c_max 7.321337154527933 +365 47 training.batch_size 2.0 +365 47 training.label_smoothing 0.0011517713913782757 +365 48 model.embedding_dim 2.0 +365 48 model.c_min 0.021882024799387084 +365 48 model.c_max 2.0983374245295945 +365 48 training.batch_size 2.0 +365 48 training.label_smoothing 0.006027926323585507 +365 49 model.embedding_dim 0.0 +365 49 model.c_min 0.026545568403909613 +365 49 model.c_max 1.2888001846592982 +365 49 training.batch_size 2.0 +365 49 training.label_smoothing 0.015974628306871297 +365 50 model.embedding_dim 2.0 +365 50 model.c_min 0.07983484950766728 +365 50 model.c_max 1.4773121678570562 +365 50 training.batch_size 1.0 +365 50 training.label_smoothing 0.0038313380230987925 +365 51 model.embedding_dim 0.0 +365 51 model.c_min 0.031190050308739094 +365 51 model.c_max 2.5169530070226958 +365 51 training.batch_size 2.0 +365 51 training.label_smoothing 0.0149695928442045 +365 52 model.embedding_dim 0.0 +365 52 model.c_min 0.07965232100427033 +365 52 model.c_max 3.3227095163326004 +365 52 training.batch_size 1.0 +365 52 training.label_smoothing 0.0260502177364643 +365 53 model.embedding_dim 1.0 +365 53 model.c_min 0.03004442916082536 +365 53 model.c_max 6.803819308246547 +365 53 training.batch_size 1.0 +365 53 training.label_smoothing 0.0013991115017894461 +365 54 model.embedding_dim 2.0 +365 54 model.c_min 0.09813772956760732 +365 54 model.c_max 2.270430721158691 +365 54 training.batch_size 0.0 +365 54 training.label_smoothing 0.8095761075624965 +365 55 model.embedding_dim 1.0 +365 55 model.c_min 0.01772751886817846 +365 55 model.c_max 9.338659167731828 +365 55 training.batch_size 1.0 +365 55 training.label_smoothing 0.009768137195786316 +365 56 model.embedding_dim 0.0 +365 56 model.c_min 0.0913563858566351 +365 56 model.c_max 3.9371654083192498 +365 56 training.batch_size 0.0 +365 56 training.label_smoothing 0.18672150695075165 +365 57 model.embedding_dim 1.0 +365 57 model.c_min 0.04335649713555754 +365 57 model.c_max 3.177528871987217 +365 57 training.batch_size 0.0 +365 57 training.label_smoothing 0.03957184451195495 +365 58 model.embedding_dim 2.0 +365 58 model.c_min 0.047851850784855626 +365 58 model.c_max 2.3554280353518555 +365 58 training.batch_size 1.0 +365 58 training.label_smoothing 0.34918047038133243 +365 59 model.embedding_dim 1.0 +365 59 model.c_min 0.025359808155817228 +365 59 model.c_max 8.077770905829151 +365 59 training.batch_size 1.0 +365 59 training.label_smoothing 0.43409991639247275 +365 60 model.embedding_dim 2.0 +365 60 model.c_min 0.07626239124595875 +365 60 model.c_max 4.8337591878070665 +365 60 training.batch_size 2.0 +365 60 training.label_smoothing 0.021846616676633228 +365 61 model.embedding_dim 0.0 +365 61 model.c_min 0.095812307720183 +365 61 model.c_max 6.6796737178987495 +365 61 training.batch_size 1.0 +365 61 training.label_smoothing 0.0035951959708140405 +365 62 model.embedding_dim 1.0 +365 62 model.c_min 0.04999797633101631 +365 62 model.c_max 1.204245037605069 +365 62 training.batch_size 0.0 +365 62 training.label_smoothing 0.0012095135925534572 +365 63 model.embedding_dim 1.0 +365 63 model.c_min 0.027610531250634843 +365 63 model.c_max 6.0083912195198055 +365 63 training.batch_size 0.0 +365 63 training.label_smoothing 0.014752966355239193 +365 64 model.embedding_dim 2.0 +365 64 model.c_min 0.020319322067624546 +365 64 model.c_max 2.5503020207995273 +365 64 training.batch_size 2.0 +365 64 training.label_smoothing 0.017247171705465726 +365 65 model.embedding_dim 1.0 +365 65 model.c_min 0.04337939331848898 +365 65 model.c_max 4.053459327987907 +365 65 training.batch_size 2.0 +365 65 training.label_smoothing 0.0033480303548437335 +365 66 model.embedding_dim 1.0 +365 66 model.c_min 0.03421425246544 +365 66 model.c_max 4.56875513184055 +365 66 training.batch_size 1.0 +365 66 training.label_smoothing 0.018173759735865613 +365 67 model.embedding_dim 0.0 +365 67 model.c_min 0.05461650459650247 +365 67 model.c_max 2.4361321860668315 +365 67 training.batch_size 0.0 +365 67 training.label_smoothing 0.41885562559913053 +365 68 model.embedding_dim 0.0 +365 68 model.c_min 0.04243275581398151 +365 68 model.c_max 8.184934040852028 +365 68 training.batch_size 0.0 +365 68 training.label_smoothing 0.0016316476695667074 +365 69 model.embedding_dim 1.0 +365 69 model.c_min 0.016432393614058644 +365 69 model.c_max 7.252172229467977 +365 69 training.batch_size 1.0 +365 69 training.label_smoothing 0.12442856719927244 +365 70 model.embedding_dim 1.0 +365 70 model.c_min 0.03286025574134058 +365 70 model.c_max 9.637881958435063 +365 70 training.batch_size 0.0 +365 70 training.label_smoothing 0.002294864413972399 +365 71 model.embedding_dim 1.0 +365 71 model.c_min 0.03693053895103211 +365 71 model.c_max 2.6132940704660066 +365 71 training.batch_size 2.0 +365 71 training.label_smoothing 0.11940886123198002 +365 72 model.embedding_dim 1.0 +365 72 model.c_min 0.049014028026456186 +365 72 model.c_max 8.827737595176266 +365 72 training.batch_size 2.0 +365 72 training.label_smoothing 0.050339759221637685 +365 73 model.embedding_dim 0.0 +365 73 model.c_min 0.029621663933850906 +365 73 model.c_max 8.131677633428737 +365 73 training.batch_size 2.0 +365 73 training.label_smoothing 0.19114021776329107 +365 74 model.embedding_dim 0.0 +365 74 model.c_min 0.05530088178145157 +365 74 model.c_max 7.798426298902619 +365 74 training.batch_size 0.0 +365 74 training.label_smoothing 0.008812204018124313 +365 75 model.embedding_dim 2.0 +365 75 model.c_min 0.011100798616445029 +365 75 model.c_max 4.027000825030703 +365 75 training.batch_size 1.0 +365 75 training.label_smoothing 0.0014216610988520863 +365 76 model.embedding_dim 1.0 +365 76 model.c_min 0.03468235203209647 +365 76 model.c_max 7.3745420041631755 +365 76 training.batch_size 0.0 +365 76 training.label_smoothing 0.13164848646284294 +365 77 model.embedding_dim 2.0 +365 77 model.c_min 0.015989731266882055 +365 77 model.c_max 9.799779011291285 +365 77 training.batch_size 2.0 +365 77 training.label_smoothing 0.08071361306448951 +365 78 model.embedding_dim 1.0 +365 78 model.c_min 0.05807945728500184 +365 78 model.c_max 9.89193737380948 +365 78 training.batch_size 0.0 +365 78 training.label_smoothing 0.09673369294687569 +365 79 model.embedding_dim 2.0 +365 79 model.c_min 0.01840892600346444 +365 79 model.c_max 2.72387065651124 +365 79 training.batch_size 0.0 +365 79 training.label_smoothing 0.0036675970304704884 +365 80 model.embedding_dim 1.0 +365 80 model.c_min 0.06462321744543373 +365 80 model.c_max 3.9009006376522786 +365 80 training.batch_size 2.0 +365 80 training.label_smoothing 0.008818833014690873 +365 81 model.embedding_dim 1.0 +365 81 model.c_min 0.017612065608621157 +365 81 model.c_max 3.862961452279948 +365 81 training.batch_size 1.0 +365 81 training.label_smoothing 0.01928656557554357 +365 82 model.embedding_dim 2.0 +365 82 model.c_min 0.01433440913478373 +365 82 model.c_max 3.8340875867539577 +365 82 training.batch_size 0.0 +365 82 training.label_smoothing 0.07502590074394225 +365 83 model.embedding_dim 1.0 +365 83 model.c_min 0.05498056105795591 +365 83 model.c_max 4.887353793820266 +365 83 training.batch_size 0.0 +365 83 training.label_smoothing 0.01024052710105219 +365 84 model.embedding_dim 2.0 +365 84 model.c_min 0.015496578648553715 +365 84 model.c_max 5.070229548925416 +365 84 training.batch_size 0.0 +365 84 training.label_smoothing 0.0029831800603783956 +365 85 model.embedding_dim 0.0 +365 85 model.c_min 0.02128434395496127 +365 85 model.c_max 4.809465683377564 +365 85 training.batch_size 1.0 +365 85 training.label_smoothing 0.005684037770285771 +365 86 model.embedding_dim 0.0 +365 86 model.c_min 0.060770537474073474 +365 86 model.c_max 4.482398098408492 +365 86 training.batch_size 0.0 +365 86 training.label_smoothing 0.17013611616401156 +365 87 model.embedding_dim 2.0 +365 87 model.c_min 0.04503948813042908 +365 87 model.c_max 6.8756528389390486 +365 87 training.batch_size 1.0 +365 87 training.label_smoothing 0.01779946402012921 +365 88 model.embedding_dim 2.0 +365 88 model.c_min 0.015520769496945842 +365 88 model.c_max 5.946678007762166 +365 88 training.batch_size 0.0 +365 88 training.label_smoothing 0.7676045280145711 +365 89 model.embedding_dim 0.0 +365 89 model.c_min 0.0926860011267662 +365 89 model.c_max 9.438795795606792 +365 89 training.batch_size 1.0 +365 89 training.label_smoothing 0.019210803855663764 +365 90 model.embedding_dim 1.0 +365 90 model.c_min 0.0532848302654939 +365 90 model.c_max 5.4252404652827115 +365 90 training.batch_size 1.0 +365 90 training.label_smoothing 0.6447753392808058 +365 91 model.embedding_dim 1.0 +365 91 model.c_min 0.032308794339791676 +365 91 model.c_max 6.280382870197531 +365 91 training.batch_size 1.0 +365 91 training.label_smoothing 0.0024815897215449355 +365 92 model.embedding_dim 2.0 +365 92 model.c_min 0.08478464055792531 +365 92 model.c_max 5.966328705652663 +365 92 training.batch_size 1.0 +365 92 training.label_smoothing 0.008672843584491274 +365 93 model.embedding_dim 1.0 +365 93 model.c_min 0.022459230262925782 +365 93 model.c_max 3.993752121884828 +365 93 training.batch_size 2.0 +365 93 training.label_smoothing 0.016873557520653105 +365 94 model.embedding_dim 1.0 +365 94 model.c_min 0.07747543078028342 +365 94 model.c_max 1.6136493998471249 +365 94 training.batch_size 1.0 +365 94 training.label_smoothing 0.008154331946066693 +365 95 model.embedding_dim 0.0 +365 95 model.c_min 0.09407403828102383 +365 95 model.c_max 4.22201532452541 +365 95 training.batch_size 1.0 +365 95 training.label_smoothing 0.01677522964381641 +365 96 model.embedding_dim 0.0 +365 96 model.c_min 0.037483167138198804 +365 96 model.c_max 5.283897632147934 +365 96 training.batch_size 2.0 +365 96 training.label_smoothing 0.004518974955191645 +365 97 model.embedding_dim 0.0 +365 97 model.c_min 0.011743906689316517 +365 97 model.c_max 8.432071128658427 +365 97 training.batch_size 1.0 +365 97 training.label_smoothing 0.2635155030728624 +365 98 model.embedding_dim 0.0 +365 98 model.c_min 0.03242804625172892 +365 98 model.c_max 8.390207331716804 +365 98 training.batch_size 1.0 +365 98 training.label_smoothing 0.45649612435808373 +365 99 model.embedding_dim 2.0 +365 99 model.c_min 0.01487412191695975 +365 99 model.c_max 5.623894938211443 +365 99 training.batch_size 1.0 +365 99 training.label_smoothing 0.2070785218656711 +365 100 model.embedding_dim 2.0 +365 100 model.c_min 0.033364955978403624 +365 100 model.c_max 2.301055572033329 +365 100 training.batch_size 1.0 +365 100 training.label_smoothing 0.024801239496576394 +365 1 dataset """kinships""" +365 1 model """kg2e""" +365 1 loss """crossentropy""" +365 1 regularizer """no""" +365 1 optimizer """adadelta""" +365 1 training_loop """lcwa""" +365 1 evaluator """rankbased""" +365 2 dataset """kinships""" +365 2 model """kg2e""" +365 2 loss """crossentropy""" +365 2 regularizer """no""" +365 2 optimizer """adadelta""" +365 2 training_loop """lcwa""" +365 2 evaluator """rankbased""" +365 3 dataset """kinships""" +365 3 model """kg2e""" +365 3 loss """crossentropy""" +365 3 regularizer """no""" +365 3 optimizer """adadelta""" +365 3 training_loop """lcwa""" +365 3 evaluator """rankbased""" +365 4 dataset """kinships""" +365 4 model """kg2e""" +365 4 loss """crossentropy""" +365 4 regularizer """no""" +365 4 optimizer """adadelta""" +365 4 training_loop """lcwa""" +365 4 evaluator """rankbased""" +365 5 dataset """kinships""" +365 5 model """kg2e""" +365 5 loss """crossentropy""" +365 5 regularizer """no""" +365 5 optimizer """adadelta""" +365 5 training_loop """lcwa""" +365 5 evaluator """rankbased""" +365 6 dataset """kinships""" +365 6 model """kg2e""" +365 6 loss """crossentropy""" +365 6 regularizer """no""" +365 6 optimizer """adadelta""" +365 6 training_loop """lcwa""" +365 6 evaluator """rankbased""" +365 7 dataset """kinships""" +365 7 model """kg2e""" +365 7 loss """crossentropy""" +365 7 regularizer """no""" +365 7 optimizer """adadelta""" +365 7 training_loop """lcwa""" +365 7 evaluator """rankbased""" +365 8 dataset """kinships""" +365 8 model """kg2e""" +365 8 loss """crossentropy""" +365 8 regularizer """no""" +365 8 optimizer """adadelta""" +365 8 training_loop """lcwa""" +365 8 evaluator """rankbased""" +365 9 dataset """kinships""" +365 9 model """kg2e""" +365 9 loss """crossentropy""" +365 9 regularizer """no""" +365 9 optimizer """adadelta""" +365 9 training_loop """lcwa""" +365 9 evaluator """rankbased""" +365 10 dataset """kinships""" +365 10 model """kg2e""" +365 10 loss """crossentropy""" +365 10 regularizer """no""" +365 10 optimizer """adadelta""" +365 10 training_loop """lcwa""" +365 10 evaluator """rankbased""" +365 11 dataset """kinships""" +365 11 model """kg2e""" +365 11 loss """crossentropy""" +365 11 regularizer """no""" +365 11 optimizer """adadelta""" +365 11 training_loop """lcwa""" +365 11 evaluator """rankbased""" +365 12 dataset """kinships""" +365 12 model """kg2e""" +365 12 loss """crossentropy""" +365 12 regularizer """no""" +365 12 optimizer """adadelta""" +365 12 training_loop """lcwa""" +365 12 evaluator """rankbased""" +365 13 dataset """kinships""" +365 13 model """kg2e""" +365 13 loss """crossentropy""" +365 13 regularizer """no""" +365 13 optimizer """adadelta""" +365 13 training_loop """lcwa""" +365 13 evaluator """rankbased""" +365 14 dataset """kinships""" +365 14 model """kg2e""" +365 14 loss """crossentropy""" +365 14 regularizer """no""" +365 14 optimizer """adadelta""" +365 14 training_loop """lcwa""" +365 14 evaluator """rankbased""" +365 15 dataset """kinships""" +365 15 model """kg2e""" +365 15 loss """crossentropy""" +365 15 regularizer """no""" +365 15 optimizer """adadelta""" +365 15 training_loop """lcwa""" +365 15 evaluator """rankbased""" +365 16 dataset """kinships""" +365 16 model """kg2e""" +365 16 loss """crossentropy""" +365 16 regularizer """no""" +365 16 optimizer """adadelta""" +365 16 training_loop """lcwa""" +365 16 evaluator """rankbased""" +365 17 dataset """kinships""" +365 17 model """kg2e""" +365 17 loss """crossentropy""" +365 17 regularizer """no""" +365 17 optimizer """adadelta""" +365 17 training_loop """lcwa""" +365 17 evaluator """rankbased""" +365 18 dataset """kinships""" +365 18 model """kg2e""" +365 18 loss """crossentropy""" +365 18 regularizer """no""" +365 18 optimizer """adadelta""" +365 18 training_loop """lcwa""" +365 18 evaluator """rankbased""" +365 19 dataset """kinships""" +365 19 model """kg2e""" +365 19 loss """crossentropy""" +365 19 regularizer """no""" +365 19 optimizer """adadelta""" +365 19 training_loop """lcwa""" +365 19 evaluator """rankbased""" +365 20 dataset """kinships""" +365 20 model """kg2e""" +365 20 loss """crossentropy""" +365 20 regularizer """no""" +365 20 optimizer """adadelta""" +365 20 training_loop """lcwa""" +365 20 evaluator """rankbased""" +365 21 dataset """kinships""" +365 21 model """kg2e""" +365 21 loss """crossentropy""" +365 21 regularizer """no""" +365 21 optimizer """adadelta""" +365 21 training_loop """lcwa""" +365 21 evaluator """rankbased""" +365 22 dataset """kinships""" +365 22 model """kg2e""" +365 22 loss """crossentropy""" +365 22 regularizer """no""" +365 22 optimizer """adadelta""" +365 22 training_loop """lcwa""" +365 22 evaluator """rankbased""" +365 23 dataset """kinships""" +365 23 model """kg2e""" +365 23 loss """crossentropy""" +365 23 regularizer """no""" +365 23 optimizer """adadelta""" +365 23 training_loop """lcwa""" +365 23 evaluator """rankbased""" +365 24 dataset """kinships""" +365 24 model """kg2e""" +365 24 loss """crossentropy""" +365 24 regularizer """no""" +365 24 optimizer """adadelta""" +365 24 training_loop """lcwa""" +365 24 evaluator """rankbased""" +365 25 dataset """kinships""" +365 25 model """kg2e""" +365 25 loss """crossentropy""" +365 25 regularizer """no""" +365 25 optimizer """adadelta""" +365 25 training_loop """lcwa""" +365 25 evaluator """rankbased""" +365 26 dataset """kinships""" +365 26 model """kg2e""" +365 26 loss """crossentropy""" +365 26 regularizer """no""" +365 26 optimizer """adadelta""" +365 26 training_loop """lcwa""" +365 26 evaluator """rankbased""" +365 27 dataset """kinships""" +365 27 model """kg2e""" +365 27 loss """crossentropy""" +365 27 regularizer """no""" +365 27 optimizer """adadelta""" +365 27 training_loop """lcwa""" +365 27 evaluator """rankbased""" +365 28 dataset """kinships""" +365 28 model """kg2e""" +365 28 loss """crossentropy""" +365 28 regularizer """no""" +365 28 optimizer """adadelta""" +365 28 training_loop """lcwa""" +365 28 evaluator """rankbased""" +365 29 dataset """kinships""" +365 29 model """kg2e""" +365 29 loss """crossentropy""" +365 29 regularizer """no""" +365 29 optimizer """adadelta""" +365 29 training_loop """lcwa""" +365 29 evaluator """rankbased""" +365 30 dataset """kinships""" +365 30 model """kg2e""" +365 30 loss """crossentropy""" +365 30 regularizer """no""" +365 30 optimizer """adadelta""" +365 30 training_loop """lcwa""" +365 30 evaluator """rankbased""" +365 31 dataset """kinships""" +365 31 model """kg2e""" +365 31 loss """crossentropy""" +365 31 regularizer """no""" +365 31 optimizer """adadelta""" +365 31 training_loop """lcwa""" +365 31 evaluator """rankbased""" +365 32 dataset """kinships""" +365 32 model """kg2e""" +365 32 loss """crossentropy""" +365 32 regularizer """no""" +365 32 optimizer """adadelta""" +365 32 training_loop """lcwa""" +365 32 evaluator """rankbased""" +365 33 dataset """kinships""" +365 33 model """kg2e""" +365 33 loss """crossentropy""" +365 33 regularizer """no""" +365 33 optimizer """adadelta""" +365 33 training_loop """lcwa""" +365 33 evaluator """rankbased""" +365 34 dataset """kinships""" +365 34 model """kg2e""" +365 34 loss """crossentropy""" +365 34 regularizer """no""" +365 34 optimizer """adadelta""" +365 34 training_loop """lcwa""" +365 34 evaluator """rankbased""" +365 35 dataset """kinships""" +365 35 model """kg2e""" +365 35 loss """crossentropy""" +365 35 regularizer """no""" +365 35 optimizer """adadelta""" +365 35 training_loop """lcwa""" +365 35 evaluator """rankbased""" +365 36 dataset """kinships""" +365 36 model """kg2e""" +365 36 loss """crossentropy""" +365 36 regularizer """no""" +365 36 optimizer """adadelta""" +365 36 training_loop """lcwa""" +365 36 evaluator """rankbased""" +365 37 dataset """kinships""" +365 37 model """kg2e""" +365 37 loss """crossentropy""" +365 37 regularizer """no""" +365 37 optimizer """adadelta""" +365 37 training_loop """lcwa""" +365 37 evaluator """rankbased""" +365 38 dataset """kinships""" +365 38 model """kg2e""" +365 38 loss """crossentropy""" +365 38 regularizer """no""" +365 38 optimizer """adadelta""" +365 38 training_loop """lcwa""" +365 38 evaluator """rankbased""" +365 39 dataset """kinships""" +365 39 model """kg2e""" +365 39 loss """crossentropy""" +365 39 regularizer """no""" +365 39 optimizer """adadelta""" +365 39 training_loop """lcwa""" +365 39 evaluator """rankbased""" +365 40 dataset """kinships""" +365 40 model """kg2e""" +365 40 loss """crossentropy""" +365 40 regularizer """no""" +365 40 optimizer """adadelta""" +365 40 training_loop """lcwa""" +365 40 evaluator """rankbased""" +365 41 dataset """kinships""" +365 41 model """kg2e""" +365 41 loss """crossentropy""" +365 41 regularizer """no""" +365 41 optimizer """adadelta""" +365 41 training_loop """lcwa""" +365 41 evaluator """rankbased""" +365 42 dataset """kinships""" +365 42 model """kg2e""" +365 42 loss """crossentropy""" +365 42 regularizer """no""" +365 42 optimizer """adadelta""" +365 42 training_loop """lcwa""" +365 42 evaluator """rankbased""" +365 43 dataset """kinships""" +365 43 model """kg2e""" +365 43 loss """crossentropy""" +365 43 regularizer """no""" +365 43 optimizer """adadelta""" +365 43 training_loop """lcwa""" +365 43 evaluator """rankbased""" +365 44 dataset """kinships""" +365 44 model """kg2e""" +365 44 loss """crossentropy""" +365 44 regularizer """no""" +365 44 optimizer """adadelta""" +365 44 training_loop """lcwa""" +365 44 evaluator """rankbased""" +365 45 dataset """kinships""" +365 45 model """kg2e""" +365 45 loss """crossentropy""" +365 45 regularizer """no""" +365 45 optimizer """adadelta""" +365 45 training_loop """lcwa""" +365 45 evaluator """rankbased""" +365 46 dataset """kinships""" +365 46 model """kg2e""" +365 46 loss """crossentropy""" +365 46 regularizer """no""" +365 46 optimizer """adadelta""" +365 46 training_loop """lcwa""" +365 46 evaluator """rankbased""" +365 47 dataset """kinships""" +365 47 model """kg2e""" +365 47 loss """crossentropy""" +365 47 regularizer """no""" +365 47 optimizer """adadelta""" +365 47 training_loop """lcwa""" +365 47 evaluator """rankbased""" +365 48 dataset """kinships""" +365 48 model """kg2e""" +365 48 loss """crossentropy""" +365 48 regularizer """no""" +365 48 optimizer """adadelta""" +365 48 training_loop """lcwa""" +365 48 evaluator """rankbased""" +365 49 dataset """kinships""" +365 49 model """kg2e""" +365 49 loss """crossentropy""" +365 49 regularizer """no""" +365 49 optimizer """adadelta""" +365 49 training_loop """lcwa""" +365 49 evaluator """rankbased""" +365 50 dataset """kinships""" +365 50 model """kg2e""" +365 50 loss """crossentropy""" +365 50 regularizer """no""" +365 50 optimizer """adadelta""" +365 50 training_loop """lcwa""" +365 50 evaluator """rankbased""" +365 51 dataset """kinships""" +365 51 model """kg2e""" +365 51 loss """crossentropy""" +365 51 regularizer """no""" +365 51 optimizer """adadelta""" +365 51 training_loop """lcwa""" +365 51 evaluator """rankbased""" +365 52 dataset """kinships""" +365 52 model """kg2e""" +365 52 loss """crossentropy""" +365 52 regularizer """no""" +365 52 optimizer """adadelta""" +365 52 training_loop """lcwa""" +365 52 evaluator """rankbased""" +365 53 dataset """kinships""" +365 53 model """kg2e""" +365 53 loss """crossentropy""" +365 53 regularizer """no""" +365 53 optimizer """adadelta""" +365 53 training_loop """lcwa""" +365 53 evaluator """rankbased""" +365 54 dataset """kinships""" +365 54 model """kg2e""" +365 54 loss """crossentropy""" +365 54 regularizer """no""" +365 54 optimizer """adadelta""" +365 54 training_loop """lcwa""" +365 54 evaluator """rankbased""" +365 55 dataset """kinships""" +365 55 model """kg2e""" +365 55 loss """crossentropy""" +365 55 regularizer """no""" +365 55 optimizer """adadelta""" +365 55 training_loop """lcwa""" +365 55 evaluator """rankbased""" +365 56 dataset """kinships""" +365 56 model """kg2e""" +365 56 loss """crossentropy""" +365 56 regularizer """no""" +365 56 optimizer """adadelta""" +365 56 training_loop """lcwa""" +365 56 evaluator """rankbased""" +365 57 dataset """kinships""" +365 57 model """kg2e""" +365 57 loss """crossentropy""" +365 57 regularizer """no""" +365 57 optimizer """adadelta""" +365 57 training_loop """lcwa""" +365 57 evaluator """rankbased""" +365 58 dataset """kinships""" +365 58 model """kg2e""" +365 58 loss """crossentropy""" +365 58 regularizer """no""" +365 58 optimizer """adadelta""" +365 58 training_loop """lcwa""" +365 58 evaluator """rankbased""" +365 59 dataset """kinships""" +365 59 model """kg2e""" +365 59 loss """crossentropy""" +365 59 regularizer """no""" +365 59 optimizer """adadelta""" +365 59 training_loop """lcwa""" +365 59 evaluator """rankbased""" +365 60 dataset """kinships""" +365 60 model """kg2e""" +365 60 loss """crossentropy""" +365 60 regularizer """no""" +365 60 optimizer """adadelta""" +365 60 training_loop """lcwa""" +365 60 evaluator """rankbased""" +365 61 dataset """kinships""" +365 61 model """kg2e""" +365 61 loss """crossentropy""" +365 61 regularizer """no""" +365 61 optimizer """adadelta""" +365 61 training_loop """lcwa""" +365 61 evaluator """rankbased""" +365 62 dataset """kinships""" +365 62 model """kg2e""" +365 62 loss """crossentropy""" +365 62 regularizer """no""" +365 62 optimizer """adadelta""" +365 62 training_loop """lcwa""" +365 62 evaluator """rankbased""" +365 63 dataset """kinships""" +365 63 model """kg2e""" +365 63 loss """crossentropy""" +365 63 regularizer """no""" +365 63 optimizer """adadelta""" +365 63 training_loop """lcwa""" +365 63 evaluator """rankbased""" +365 64 dataset """kinships""" +365 64 model """kg2e""" +365 64 loss """crossentropy""" +365 64 regularizer """no""" +365 64 optimizer """adadelta""" +365 64 training_loop """lcwa""" +365 64 evaluator """rankbased""" +365 65 dataset """kinships""" +365 65 model """kg2e""" +365 65 loss """crossentropy""" +365 65 regularizer """no""" +365 65 optimizer """adadelta""" +365 65 training_loop """lcwa""" +365 65 evaluator """rankbased""" +365 66 dataset """kinships""" +365 66 model """kg2e""" +365 66 loss """crossentropy""" +365 66 regularizer """no""" +365 66 optimizer """adadelta""" +365 66 training_loop """lcwa""" +365 66 evaluator """rankbased""" +365 67 dataset """kinships""" +365 67 model """kg2e""" +365 67 loss """crossentropy""" +365 67 regularizer """no""" +365 67 optimizer """adadelta""" +365 67 training_loop """lcwa""" +365 67 evaluator """rankbased""" +365 68 dataset """kinships""" +365 68 model """kg2e""" +365 68 loss """crossentropy""" +365 68 regularizer """no""" +365 68 optimizer """adadelta""" +365 68 training_loop """lcwa""" +365 68 evaluator """rankbased""" +365 69 dataset """kinships""" +365 69 model """kg2e""" +365 69 loss """crossentropy""" +365 69 regularizer """no""" +365 69 optimizer """adadelta""" +365 69 training_loop """lcwa""" +365 69 evaluator """rankbased""" +365 70 dataset """kinships""" +365 70 model """kg2e""" +365 70 loss """crossentropy""" +365 70 regularizer """no""" +365 70 optimizer """adadelta""" +365 70 training_loop """lcwa""" +365 70 evaluator """rankbased""" +365 71 dataset """kinships""" +365 71 model """kg2e""" +365 71 loss """crossentropy""" +365 71 regularizer """no""" +365 71 optimizer """adadelta""" +365 71 training_loop """lcwa""" +365 71 evaluator """rankbased""" +365 72 dataset """kinships""" +365 72 model """kg2e""" +365 72 loss """crossentropy""" +365 72 regularizer """no""" +365 72 optimizer """adadelta""" +365 72 training_loop """lcwa""" +365 72 evaluator """rankbased""" +365 73 dataset """kinships""" +365 73 model """kg2e""" +365 73 loss """crossentropy""" +365 73 regularizer """no""" +365 73 optimizer """adadelta""" +365 73 training_loop """lcwa""" +365 73 evaluator """rankbased""" +365 74 dataset """kinships""" +365 74 model """kg2e""" +365 74 loss """crossentropy""" +365 74 regularizer """no""" +365 74 optimizer """adadelta""" +365 74 training_loop """lcwa""" +365 74 evaluator """rankbased""" +365 75 dataset """kinships""" +365 75 model """kg2e""" +365 75 loss """crossentropy""" +365 75 regularizer """no""" +365 75 optimizer """adadelta""" +365 75 training_loop """lcwa""" +365 75 evaluator """rankbased""" +365 76 dataset """kinships""" +365 76 model """kg2e""" +365 76 loss """crossentropy""" +365 76 regularizer """no""" +365 76 optimizer """adadelta""" +365 76 training_loop """lcwa""" +365 76 evaluator """rankbased""" +365 77 dataset """kinships""" +365 77 model """kg2e""" +365 77 loss """crossentropy""" +365 77 regularizer """no""" +365 77 optimizer """adadelta""" +365 77 training_loop """lcwa""" +365 77 evaluator """rankbased""" +365 78 dataset """kinships""" +365 78 model """kg2e""" +365 78 loss """crossentropy""" +365 78 regularizer """no""" +365 78 optimizer """adadelta""" +365 78 training_loop """lcwa""" +365 78 evaluator """rankbased""" +365 79 dataset """kinships""" +365 79 model """kg2e""" +365 79 loss """crossentropy""" +365 79 regularizer """no""" +365 79 optimizer """adadelta""" +365 79 training_loop """lcwa""" +365 79 evaluator """rankbased""" +365 80 dataset """kinships""" +365 80 model """kg2e""" +365 80 loss """crossentropy""" +365 80 regularizer """no""" +365 80 optimizer """adadelta""" +365 80 training_loop """lcwa""" +365 80 evaluator """rankbased""" +365 81 dataset """kinships""" +365 81 model """kg2e""" +365 81 loss """crossentropy""" +365 81 regularizer """no""" +365 81 optimizer """adadelta""" +365 81 training_loop """lcwa""" +365 81 evaluator """rankbased""" +365 82 dataset """kinships""" +365 82 model """kg2e""" +365 82 loss """crossentropy""" +365 82 regularizer """no""" +365 82 optimizer """adadelta""" +365 82 training_loop """lcwa""" +365 82 evaluator """rankbased""" +365 83 dataset """kinships""" +365 83 model """kg2e""" +365 83 loss """crossentropy""" +365 83 regularizer """no""" +365 83 optimizer """adadelta""" +365 83 training_loop """lcwa""" +365 83 evaluator """rankbased""" +365 84 dataset """kinships""" +365 84 model """kg2e""" +365 84 loss """crossentropy""" +365 84 regularizer """no""" +365 84 optimizer """adadelta""" +365 84 training_loop """lcwa""" +365 84 evaluator """rankbased""" +365 85 dataset """kinships""" +365 85 model """kg2e""" +365 85 loss """crossentropy""" +365 85 regularizer """no""" +365 85 optimizer """adadelta""" +365 85 training_loop """lcwa""" +365 85 evaluator """rankbased""" +365 86 dataset """kinships""" +365 86 model """kg2e""" +365 86 loss """crossentropy""" +365 86 regularizer """no""" +365 86 optimizer """adadelta""" +365 86 training_loop """lcwa""" +365 86 evaluator """rankbased""" +365 87 dataset """kinships""" +365 87 model """kg2e""" +365 87 loss """crossentropy""" +365 87 regularizer """no""" +365 87 optimizer """adadelta""" +365 87 training_loop """lcwa""" +365 87 evaluator """rankbased""" +365 88 dataset """kinships""" +365 88 model """kg2e""" +365 88 loss """crossentropy""" +365 88 regularizer """no""" +365 88 optimizer """adadelta""" +365 88 training_loop """lcwa""" +365 88 evaluator """rankbased""" +365 89 dataset """kinships""" +365 89 model """kg2e""" +365 89 loss """crossentropy""" +365 89 regularizer """no""" +365 89 optimizer """adadelta""" +365 89 training_loop """lcwa""" +365 89 evaluator """rankbased""" +365 90 dataset """kinships""" +365 90 model """kg2e""" +365 90 loss """crossentropy""" +365 90 regularizer """no""" +365 90 optimizer """adadelta""" +365 90 training_loop """lcwa""" +365 90 evaluator """rankbased""" +365 91 dataset """kinships""" +365 91 model """kg2e""" +365 91 loss """crossentropy""" +365 91 regularizer """no""" +365 91 optimizer """adadelta""" +365 91 training_loop """lcwa""" +365 91 evaluator """rankbased""" +365 92 dataset """kinships""" +365 92 model """kg2e""" +365 92 loss """crossentropy""" +365 92 regularizer """no""" +365 92 optimizer """adadelta""" +365 92 training_loop """lcwa""" +365 92 evaluator """rankbased""" +365 93 dataset """kinships""" +365 93 model """kg2e""" +365 93 loss """crossentropy""" +365 93 regularizer """no""" +365 93 optimizer """adadelta""" +365 93 training_loop """lcwa""" +365 93 evaluator """rankbased""" +365 94 dataset """kinships""" +365 94 model """kg2e""" +365 94 loss """crossentropy""" +365 94 regularizer """no""" +365 94 optimizer """adadelta""" +365 94 training_loop """lcwa""" +365 94 evaluator """rankbased""" +365 95 dataset """kinships""" +365 95 model """kg2e""" +365 95 loss """crossentropy""" +365 95 regularizer """no""" +365 95 optimizer """adadelta""" +365 95 training_loop """lcwa""" +365 95 evaluator """rankbased""" +365 96 dataset """kinships""" +365 96 model """kg2e""" +365 96 loss """crossentropy""" +365 96 regularizer """no""" +365 96 optimizer """adadelta""" +365 96 training_loop """lcwa""" +365 96 evaluator """rankbased""" +365 97 dataset """kinships""" +365 97 model """kg2e""" +365 97 loss """crossentropy""" +365 97 regularizer """no""" +365 97 optimizer """adadelta""" +365 97 training_loop """lcwa""" +365 97 evaluator """rankbased""" +365 98 dataset """kinships""" +365 98 model """kg2e""" +365 98 loss """crossentropy""" +365 98 regularizer """no""" +365 98 optimizer """adadelta""" +365 98 training_loop """lcwa""" +365 98 evaluator """rankbased""" +365 99 dataset """kinships""" +365 99 model """kg2e""" +365 99 loss """crossentropy""" +365 99 regularizer """no""" +365 99 optimizer """adadelta""" +365 99 training_loop """lcwa""" +365 99 evaluator """rankbased""" +365 100 dataset """kinships""" +365 100 model """kg2e""" +365 100 loss """crossentropy""" +365 100 regularizer """no""" +365 100 optimizer """adadelta""" +365 100 training_loop """lcwa""" +365 100 evaluator """rankbased""" +366 1 model.embedding_dim 1.0 +366 1 model.c_min 0.061708101577920026 +366 1 model.c_max 4.8532341204988 +366 1 training.batch_size 2.0 +366 1 training.label_smoothing 0.6168136985926151 +366 2 model.embedding_dim 0.0 +366 2 model.c_min 0.012439471578244921 +366 2 model.c_max 9.470095350136319 +366 2 training.batch_size 0.0 +366 2 training.label_smoothing 0.018425749213161473 +366 3 model.embedding_dim 2.0 +366 3 model.c_min 0.013155960427964545 +366 3 model.c_max 1.8584828555845454 +366 3 training.batch_size 2.0 +366 3 training.label_smoothing 0.0012505066918238343 +366 4 model.embedding_dim 2.0 +366 4 model.c_min 0.03152414487715276 +366 4 model.c_max 9.772632160652279 +366 4 training.batch_size 2.0 +366 4 training.label_smoothing 0.18649040819112514 +366 5 model.embedding_dim 2.0 +366 5 model.c_min 0.01414245602787741 +366 5 model.c_max 9.664031502616176 +366 5 training.batch_size 0.0 +366 5 training.label_smoothing 0.024947648244001155 +366 6 model.embedding_dim 1.0 +366 6 model.c_min 0.014181688171006781 +366 6 model.c_max 3.144610699725871 +366 6 training.batch_size 0.0 +366 6 training.label_smoothing 0.0571730080689896 +366 7 model.embedding_dim 0.0 +366 7 model.c_min 0.03672069348846495 +366 7 model.c_max 6.524056803280531 +366 7 training.batch_size 1.0 +366 7 training.label_smoothing 0.06446040054942 +366 8 model.embedding_dim 2.0 +366 8 model.c_min 0.021732055519121978 +366 8 model.c_max 7.624628049322408 +366 8 training.batch_size 2.0 +366 8 training.label_smoothing 0.062804513963411 +366 9 model.embedding_dim 0.0 +366 9 model.c_min 0.02589503916660959 +366 9 model.c_max 7.540370169181338 +366 9 training.batch_size 0.0 +366 9 training.label_smoothing 0.19261033919622936 +366 10 model.embedding_dim 1.0 +366 10 model.c_min 0.012570737099326202 +366 10 model.c_max 1.0257289015475965 +366 10 training.batch_size 2.0 +366 10 training.label_smoothing 0.0025554584332159553 +366 11 model.embedding_dim 1.0 +366 11 model.c_min 0.02833170093667421 +366 11 model.c_max 4.074218163303012 +366 11 training.batch_size 1.0 +366 11 training.label_smoothing 0.5964774775571748 +366 12 model.embedding_dim 1.0 +366 12 model.c_min 0.03460402903916266 +366 12 model.c_max 6.176130415717166 +366 12 training.batch_size 1.0 +366 12 training.label_smoothing 0.5987464219610439 +366 13 model.embedding_dim 1.0 +366 13 model.c_min 0.05456292978669767 +366 13 model.c_max 7.130178750703255 +366 13 training.batch_size 2.0 +366 13 training.label_smoothing 0.17984407247355225 +366 14 model.embedding_dim 2.0 +366 14 model.c_min 0.0652662777792894 +366 14 model.c_max 3.4607758308829695 +366 14 training.batch_size 2.0 +366 14 training.label_smoothing 0.011938885651494523 +366 15 model.embedding_dim 0.0 +366 15 model.c_min 0.06151188999406968 +366 15 model.c_max 5.673546636036591 +366 15 training.batch_size 2.0 +366 15 training.label_smoothing 0.6813742692940375 +366 16 model.embedding_dim 2.0 +366 16 model.c_min 0.02449007872986553 +366 16 model.c_max 3.321422751877759 +366 16 training.batch_size 1.0 +366 16 training.label_smoothing 0.8457347390887987 +366 17 model.embedding_dim 0.0 +366 17 model.c_min 0.03870240489489572 +366 17 model.c_max 4.762353847448848 +366 17 training.batch_size 1.0 +366 17 training.label_smoothing 0.025449265196137383 +366 18 model.embedding_dim 2.0 +366 18 model.c_min 0.01337204820418567 +366 18 model.c_max 7.35951420508567 +366 18 training.batch_size 2.0 +366 18 training.label_smoothing 0.25847665758981686 +366 19 model.embedding_dim 2.0 +366 19 model.c_min 0.023962657213529717 +366 19 model.c_max 4.054376780055371 +366 19 training.batch_size 1.0 +366 19 training.label_smoothing 0.056305037365832714 +366 20 model.embedding_dim 0.0 +366 20 model.c_min 0.034002798805864515 +366 20 model.c_max 6.0364408982267825 +366 20 training.batch_size 0.0 +366 20 training.label_smoothing 0.057162324007219274 +366 21 model.embedding_dim 0.0 +366 21 model.c_min 0.017938140555746277 +366 21 model.c_max 4.612830491115467 +366 21 training.batch_size 0.0 +366 21 training.label_smoothing 0.033261227568988604 +366 22 model.embedding_dim 0.0 +366 22 model.c_min 0.012522362726501396 +366 22 model.c_max 4.525665150181182 +366 22 training.batch_size 0.0 +366 22 training.label_smoothing 0.1756019462225914 +366 23 model.embedding_dim 2.0 +366 23 model.c_min 0.03387114609565602 +366 23 model.c_max 1.311314180372857 +366 23 training.batch_size 2.0 +366 23 training.label_smoothing 0.04961878086838956 +366 24 model.embedding_dim 0.0 +366 24 model.c_min 0.012406619305240856 +366 24 model.c_max 6.298016899356312 +366 24 training.batch_size 2.0 +366 24 training.label_smoothing 0.0045213151352458 +366 25 model.embedding_dim 0.0 +366 25 model.c_min 0.015991974810352704 +366 25 model.c_max 9.545097283983477 +366 25 training.batch_size 1.0 +366 25 training.label_smoothing 0.4739049728345816 +366 26 model.embedding_dim 0.0 +366 26 model.c_min 0.03178033967603739 +366 26 model.c_max 7.231944948225571 +366 26 training.batch_size 1.0 +366 26 training.label_smoothing 0.006798599130689447 +366 27 model.embedding_dim 0.0 +366 27 model.c_min 0.022581886053659198 +366 27 model.c_max 9.99750263385988 +366 27 training.batch_size 0.0 +366 27 training.label_smoothing 0.3759116990420349 +366 28 model.embedding_dim 1.0 +366 28 model.c_min 0.010862178888374683 +366 28 model.c_max 8.81495048681601 +366 28 training.batch_size 1.0 +366 28 training.label_smoothing 0.5964654839936024 +366 29 model.embedding_dim 0.0 +366 29 model.c_min 0.013021717574828449 +366 29 model.c_max 6.6083744507059325 +366 29 training.batch_size 2.0 +366 29 training.label_smoothing 0.0016104223546540516 +366 30 model.embedding_dim 0.0 +366 30 model.c_min 0.012258215790100545 +366 30 model.c_max 4.918384442563614 +366 30 training.batch_size 2.0 +366 30 training.label_smoothing 0.002923144903460276 +366 31 model.embedding_dim 2.0 +366 31 model.c_min 0.022237155722196957 +366 31 model.c_max 2.416987374069775 +366 31 training.batch_size 2.0 +366 31 training.label_smoothing 0.0029264412678063796 +366 32 model.embedding_dim 2.0 +366 32 model.c_min 0.038263765590135786 +366 32 model.c_max 8.212987959382186 +366 32 training.batch_size 2.0 +366 32 training.label_smoothing 0.001324653699659776 +366 33 model.embedding_dim 0.0 +366 33 model.c_min 0.023348557295333697 +366 33 model.c_max 5.173209623512119 +366 33 training.batch_size 0.0 +366 33 training.label_smoothing 0.0016517957427097697 +366 34 model.embedding_dim 0.0 +366 34 model.c_min 0.034174709722089205 +366 34 model.c_max 6.173843430310163 +366 34 training.batch_size 2.0 +366 34 training.label_smoothing 0.23358357115953646 +366 35 model.embedding_dim 1.0 +366 35 model.c_min 0.011159237580176893 +366 35 model.c_max 7.20609964885624 +366 35 training.batch_size 0.0 +366 35 training.label_smoothing 0.09484386164985728 +366 36 model.embedding_dim 1.0 +366 36 model.c_min 0.052364230874275296 +366 36 model.c_max 3.550116971415789 +366 36 training.batch_size 0.0 +366 36 training.label_smoothing 0.5922298487304377 +366 37 model.embedding_dim 1.0 +366 37 model.c_min 0.013468101228128868 +366 37 model.c_max 4.53192483547676 +366 37 training.batch_size 2.0 +366 37 training.label_smoothing 0.5001236595821529 +366 38 model.embedding_dim 2.0 +366 38 model.c_min 0.01213931260477873 +366 38 model.c_max 7.7607731388146055 +366 38 training.batch_size 1.0 +366 38 training.label_smoothing 0.010302922207804266 +366 39 model.embedding_dim 2.0 +366 39 model.c_min 0.07706160135431016 +366 39 model.c_max 2.0386461169772105 +366 39 training.batch_size 0.0 +366 39 training.label_smoothing 0.3505631441019519 +366 40 model.embedding_dim 1.0 +366 40 model.c_min 0.010518670357728497 +366 40 model.c_max 1.337440049071971 +366 40 training.batch_size 2.0 +366 40 training.label_smoothing 0.012643112270322573 +366 41 model.embedding_dim 2.0 +366 41 model.c_min 0.04919536890357528 +366 41 model.c_max 5.4430724390237755 +366 41 training.batch_size 1.0 +366 41 training.label_smoothing 0.1750758279845103 +366 42 model.embedding_dim 2.0 +366 42 model.c_min 0.07952110780996845 +366 42 model.c_max 8.856892606665797 +366 42 training.batch_size 1.0 +366 42 training.label_smoothing 0.11050455244436183 +366 43 model.embedding_dim 1.0 +366 43 model.c_min 0.09659184372976544 +366 43 model.c_max 6.7849999822659415 +366 43 training.batch_size 1.0 +366 43 training.label_smoothing 0.00158593107173529 +366 44 model.embedding_dim 1.0 +366 44 model.c_min 0.06474416317334979 +366 44 model.c_max 5.0108880435123435 +366 44 training.batch_size 2.0 +366 44 training.label_smoothing 0.004339086402480538 +366 45 model.embedding_dim 0.0 +366 45 model.c_min 0.031563078167717175 +366 45 model.c_max 8.180161543433844 +366 45 training.batch_size 2.0 +366 45 training.label_smoothing 0.033464631545854766 +366 46 model.embedding_dim 1.0 +366 46 model.c_min 0.031675582463447455 +366 46 model.c_max 1.1346384295118002 +366 46 training.batch_size 2.0 +366 46 training.label_smoothing 0.0030982778848764763 +366 47 model.embedding_dim 2.0 +366 47 model.c_min 0.08795940965667555 +366 47 model.c_max 4.064174516386984 +366 47 training.batch_size 0.0 +366 47 training.label_smoothing 0.0059336476979642725 +366 48 model.embedding_dim 0.0 +366 48 model.c_min 0.02072652833861371 +366 48 model.c_max 5.831821792390714 +366 48 training.batch_size 1.0 +366 48 training.label_smoothing 0.6016875561005774 +366 49 model.embedding_dim 1.0 +366 49 model.c_min 0.01534910930828005 +366 49 model.c_max 1.1955528402320776 +366 49 training.batch_size 1.0 +366 49 training.label_smoothing 0.0019740531849639737 +366 50 model.embedding_dim 1.0 +366 50 model.c_min 0.02512573327688748 +366 50 model.c_max 3.466114288250342 +366 50 training.batch_size 2.0 +366 50 training.label_smoothing 0.001505760569961697 +366 51 model.embedding_dim 0.0 +366 51 model.c_min 0.012269945722776683 +366 51 model.c_max 4.197590085192469 +366 51 training.batch_size 2.0 +366 51 training.label_smoothing 0.8483203889607931 +366 52 model.embedding_dim 2.0 +366 52 model.c_min 0.08238373170092458 +366 52 model.c_max 9.791879847301328 +366 52 training.batch_size 1.0 +366 52 training.label_smoothing 0.04485800992740824 +366 53 model.embedding_dim 2.0 +366 53 model.c_min 0.01682612077321514 +366 53 model.c_max 7.3583576178806345 +366 53 training.batch_size 1.0 +366 53 training.label_smoothing 0.0453318393022067 +366 54 model.embedding_dim 1.0 +366 54 model.c_min 0.011091630399602872 +366 54 model.c_max 5.883355426208733 +366 54 training.batch_size 0.0 +366 54 training.label_smoothing 0.012183520193953214 +366 55 model.embedding_dim 2.0 +366 55 model.c_min 0.01049533392154975 +366 55 model.c_max 8.78356075341242 +366 55 training.batch_size 2.0 +366 55 training.label_smoothing 0.004682188173515412 +366 56 model.embedding_dim 2.0 +366 56 model.c_min 0.09043326275934288 +366 56 model.c_max 6.111313242837352 +366 56 training.batch_size 1.0 +366 56 training.label_smoothing 0.08262974645673678 +366 57 model.embedding_dim 0.0 +366 57 model.c_min 0.047842998560612755 +366 57 model.c_max 3.5477922006367155 +366 57 training.batch_size 1.0 +366 57 training.label_smoothing 0.011735480649369075 +366 58 model.embedding_dim 1.0 +366 58 model.c_min 0.026585278602047756 +366 58 model.c_max 7.283953564296679 +366 58 training.batch_size 2.0 +366 58 training.label_smoothing 0.0028355555576739344 +366 59 model.embedding_dim 0.0 +366 59 model.c_min 0.01778467126982123 +366 59 model.c_max 9.000431979482698 +366 59 training.batch_size 1.0 +366 59 training.label_smoothing 0.08479428414183886 +366 60 model.embedding_dim 1.0 +366 60 model.c_min 0.08741575133531021 +366 60 model.c_max 9.345181484985652 +366 60 training.batch_size 2.0 +366 60 training.label_smoothing 0.003564524531033036 +366 61 model.embedding_dim 1.0 +366 61 model.c_min 0.016187495045667664 +366 61 model.c_max 4.013063858527081 +366 61 training.batch_size 1.0 +366 61 training.label_smoothing 0.925845440022975 +366 62 model.embedding_dim 1.0 +366 62 model.c_min 0.010362019657548753 +366 62 model.c_max 9.19040848811472 +366 62 training.batch_size 1.0 +366 62 training.label_smoothing 0.0012609345592513076 +366 63 model.embedding_dim 1.0 +366 63 model.c_min 0.018603262219111544 +366 63 model.c_max 9.737811227718261 +366 63 training.batch_size 0.0 +366 63 training.label_smoothing 0.023992739925535543 +366 64 model.embedding_dim 2.0 +366 64 model.c_min 0.014464932745880545 +366 64 model.c_max 8.643524325521067 +366 64 training.batch_size 2.0 +366 64 training.label_smoothing 0.3156557260010154 +366 65 model.embedding_dim 2.0 +366 65 model.c_min 0.036411615382686474 +366 65 model.c_max 1.6243799789851168 +366 65 training.batch_size 1.0 +366 65 training.label_smoothing 0.05418586416728914 +366 66 model.embedding_dim 0.0 +366 66 model.c_min 0.049094441286929115 +366 66 model.c_max 3.4551118082375076 +366 66 training.batch_size 1.0 +366 66 training.label_smoothing 0.8381874645430354 +366 67 model.embedding_dim 0.0 +366 67 model.c_min 0.043036495880673195 +366 67 model.c_max 7.604655894182311 +366 67 training.batch_size 1.0 +366 67 training.label_smoothing 0.015117157981621731 +366 68 model.embedding_dim 2.0 +366 68 model.c_min 0.035699679945321014 +366 68 model.c_max 5.5990567847513315 +366 68 training.batch_size 0.0 +366 68 training.label_smoothing 0.6443720983048873 +366 69 model.embedding_dim 2.0 +366 69 model.c_min 0.06201431994459616 +366 69 model.c_max 1.0291967491420335 +366 69 training.batch_size 2.0 +366 69 training.label_smoothing 0.7685381967268865 +366 70 model.embedding_dim 1.0 +366 70 model.c_min 0.0943979230812436 +366 70 model.c_max 6.754519545999238 +366 70 training.batch_size 1.0 +366 70 training.label_smoothing 0.9814041554886926 +366 71 model.embedding_dim 0.0 +366 71 model.c_min 0.014610688827033924 +366 71 model.c_max 3.005513867026526 +366 71 training.batch_size 1.0 +366 71 training.label_smoothing 0.002019320557328964 +366 72 model.embedding_dim 2.0 +366 72 model.c_min 0.08096586776475788 +366 72 model.c_max 4.321334031689615 +366 72 training.batch_size 1.0 +366 72 training.label_smoothing 0.010969106042336309 +366 73 model.embedding_dim 2.0 +366 73 model.c_min 0.0431598908837439 +366 73 model.c_max 8.703169262641255 +366 73 training.batch_size 2.0 +366 73 training.label_smoothing 0.21166661783515717 +366 74 model.embedding_dim 0.0 +366 74 model.c_min 0.023320352581912195 +366 74 model.c_max 4.400072251397969 +366 74 training.batch_size 0.0 +366 74 training.label_smoothing 0.2845845878616535 +366 75 model.embedding_dim 1.0 +366 75 model.c_min 0.012205926243995462 +366 75 model.c_max 1.0150304090283786 +366 75 training.batch_size 1.0 +366 75 training.label_smoothing 0.020154342513296154 +366 76 model.embedding_dim 1.0 +366 76 model.c_min 0.014726712034015392 +366 76 model.c_max 1.4776909438407064 +366 76 training.batch_size 1.0 +366 76 training.label_smoothing 0.0029286170150299665 +366 77 model.embedding_dim 1.0 +366 77 model.c_min 0.010813699709263767 +366 77 model.c_max 2.127234330386286 +366 77 training.batch_size 1.0 +366 77 training.label_smoothing 0.004418517129156809 +366 78 model.embedding_dim 2.0 +366 78 model.c_min 0.011994875273205004 +366 78 model.c_max 7.995602479361529 +366 78 training.batch_size 0.0 +366 78 training.label_smoothing 0.24910736096209024 +366 79 model.embedding_dim 0.0 +366 79 model.c_min 0.020391526499161376 +366 79 model.c_max 6.817248939880894 +366 79 training.batch_size 0.0 +366 79 training.label_smoothing 0.039034873320226746 +366 80 model.embedding_dim 2.0 +366 80 model.c_min 0.022526947791624088 +366 80 model.c_max 3.3026238507892014 +366 80 training.batch_size 0.0 +366 80 training.label_smoothing 0.10652512588810747 +366 81 model.embedding_dim 0.0 +366 81 model.c_min 0.0275076905427874 +366 81 model.c_max 4.0811863624160125 +366 81 training.batch_size 0.0 +366 81 training.label_smoothing 0.020383466483795232 +366 82 model.embedding_dim 0.0 +366 82 model.c_min 0.06626874473311477 +366 82 model.c_max 1.9365918820764414 +366 82 training.batch_size 1.0 +366 82 training.label_smoothing 0.008437703053615927 +366 83 model.embedding_dim 1.0 +366 83 model.c_min 0.011081298520342255 +366 83 model.c_max 3.92943220772062 +366 83 training.batch_size 0.0 +366 83 training.label_smoothing 0.03843927566214654 +366 84 model.embedding_dim 2.0 +366 84 model.c_min 0.048002572449907695 +366 84 model.c_max 9.43308744595119 +366 84 training.batch_size 0.0 +366 84 training.label_smoothing 0.1808868698642975 +366 85 model.embedding_dim 2.0 +366 85 model.c_min 0.04557414285253971 +366 85 model.c_max 5.223303521378345 +366 85 training.batch_size 0.0 +366 85 training.label_smoothing 0.14213558364098303 +366 86 model.embedding_dim 0.0 +366 86 model.c_min 0.010624519237926535 +366 86 model.c_max 7.4539607508942 +366 86 training.batch_size 1.0 +366 86 training.label_smoothing 0.5404569582169042 +366 87 model.embedding_dim 0.0 +366 87 model.c_min 0.08103147298318407 +366 87 model.c_max 7.717309139587401 +366 87 training.batch_size 1.0 +366 87 training.label_smoothing 0.005710591088279674 +366 88 model.embedding_dim 0.0 +366 88 model.c_min 0.08474232964952572 +366 88 model.c_max 1.4463658901384404 +366 88 training.batch_size 1.0 +366 88 training.label_smoothing 0.04397421682546606 +366 89 model.embedding_dim 0.0 +366 89 model.c_min 0.021634334553780964 +366 89 model.c_max 9.135922544806437 +366 89 training.batch_size 2.0 +366 89 training.label_smoothing 0.2561795530771259 +366 90 model.embedding_dim 2.0 +366 90 model.c_min 0.012334986335922334 +366 90 model.c_max 6.754949402660225 +366 90 training.batch_size 1.0 +366 90 training.label_smoothing 0.002303177316086114 +366 91 model.embedding_dim 0.0 +366 91 model.c_min 0.0213201081501022 +366 91 model.c_max 5.935994955823924 +366 91 training.batch_size 2.0 +366 91 training.label_smoothing 0.0026158910531761254 +366 92 model.embedding_dim 2.0 +366 92 model.c_min 0.03000833041101855 +366 92 model.c_max 7.677103271688794 +366 92 training.batch_size 1.0 +366 92 training.label_smoothing 0.03085448766226381 +366 93 model.embedding_dim 0.0 +366 93 model.c_min 0.011991395835913984 +366 93 model.c_max 5.105760989642471 +366 93 training.batch_size 0.0 +366 93 training.label_smoothing 0.07672795613198637 +366 94 model.embedding_dim 0.0 +366 94 model.c_min 0.015044493892346236 +366 94 model.c_max 5.302380536458964 +366 94 training.batch_size 0.0 +366 94 training.label_smoothing 0.05325439118027222 +366 95 model.embedding_dim 1.0 +366 95 model.c_min 0.03432535095566337 +366 95 model.c_max 7.10000074974219 +366 95 training.batch_size 1.0 +366 95 training.label_smoothing 0.004828192722108224 +366 96 model.embedding_dim 1.0 +366 96 model.c_min 0.014092363477882909 +366 96 model.c_max 3.8966696805598624 +366 96 training.batch_size 1.0 +366 96 training.label_smoothing 0.05046134105532835 +366 97 model.embedding_dim 1.0 +366 97 model.c_min 0.02387088963190031 +366 97 model.c_max 4.362261342445248 +366 97 training.batch_size 2.0 +366 97 training.label_smoothing 0.002301008802451522 +366 98 model.embedding_dim 0.0 +366 98 model.c_min 0.03971741522190991 +366 98 model.c_max 9.340092782706726 +366 98 training.batch_size 2.0 +366 98 training.label_smoothing 0.007133395270588024 +366 99 model.embedding_dim 1.0 +366 99 model.c_min 0.018256816973294485 +366 99 model.c_max 8.038922110592864 +366 99 training.batch_size 2.0 +366 99 training.label_smoothing 0.0021537487708150056 +366 100 model.embedding_dim 1.0 +366 100 model.c_min 0.010291547880984268 +366 100 model.c_max 1.1976169689860259 +366 100 training.batch_size 1.0 +366 100 training.label_smoothing 0.0015772556331968897 +366 1 dataset """kinships""" +366 1 model """kg2e""" +366 1 loss """crossentropy""" +366 1 regularizer """no""" +366 1 optimizer """adadelta""" +366 1 training_loop """lcwa""" +366 1 evaluator """rankbased""" +366 2 dataset """kinships""" +366 2 model """kg2e""" +366 2 loss """crossentropy""" +366 2 regularizer """no""" +366 2 optimizer """adadelta""" +366 2 training_loop """lcwa""" +366 2 evaluator """rankbased""" +366 3 dataset """kinships""" +366 3 model """kg2e""" +366 3 loss """crossentropy""" +366 3 regularizer """no""" +366 3 optimizer """adadelta""" +366 3 training_loop """lcwa""" +366 3 evaluator """rankbased""" +366 4 dataset """kinships""" +366 4 model """kg2e""" +366 4 loss """crossentropy""" +366 4 regularizer """no""" +366 4 optimizer """adadelta""" +366 4 training_loop """lcwa""" +366 4 evaluator """rankbased""" +366 5 dataset """kinships""" +366 5 model """kg2e""" +366 5 loss """crossentropy""" +366 5 regularizer """no""" +366 5 optimizer """adadelta""" +366 5 training_loop """lcwa""" +366 5 evaluator """rankbased""" +366 6 dataset """kinships""" +366 6 model """kg2e""" +366 6 loss """crossentropy""" +366 6 regularizer """no""" +366 6 optimizer """adadelta""" +366 6 training_loop """lcwa""" +366 6 evaluator """rankbased""" +366 7 dataset """kinships""" +366 7 model """kg2e""" +366 7 loss """crossentropy""" +366 7 regularizer """no""" +366 7 optimizer """adadelta""" +366 7 training_loop """lcwa""" +366 7 evaluator """rankbased""" +366 8 dataset """kinships""" +366 8 model """kg2e""" +366 8 loss """crossentropy""" +366 8 regularizer """no""" +366 8 optimizer """adadelta""" +366 8 training_loop """lcwa""" +366 8 evaluator """rankbased""" +366 9 dataset """kinships""" +366 9 model """kg2e""" +366 9 loss """crossentropy""" +366 9 regularizer """no""" +366 9 optimizer """adadelta""" +366 9 training_loop """lcwa""" +366 9 evaluator """rankbased""" +366 10 dataset """kinships""" +366 10 model """kg2e""" +366 10 loss """crossentropy""" +366 10 regularizer """no""" +366 10 optimizer """adadelta""" +366 10 training_loop """lcwa""" +366 10 evaluator """rankbased""" +366 11 dataset """kinships""" +366 11 model """kg2e""" +366 11 loss """crossentropy""" +366 11 regularizer """no""" +366 11 optimizer """adadelta""" +366 11 training_loop """lcwa""" +366 11 evaluator """rankbased""" +366 12 dataset """kinships""" +366 12 model """kg2e""" +366 12 loss """crossentropy""" +366 12 regularizer """no""" +366 12 optimizer """adadelta""" +366 12 training_loop """lcwa""" +366 12 evaluator """rankbased""" +366 13 dataset """kinships""" +366 13 model """kg2e""" +366 13 loss """crossentropy""" +366 13 regularizer """no""" +366 13 optimizer """adadelta""" +366 13 training_loop """lcwa""" +366 13 evaluator """rankbased""" +366 14 dataset """kinships""" +366 14 model """kg2e""" +366 14 loss """crossentropy""" +366 14 regularizer """no""" +366 14 optimizer """adadelta""" +366 14 training_loop """lcwa""" +366 14 evaluator """rankbased""" +366 15 dataset """kinships""" +366 15 model """kg2e""" +366 15 loss """crossentropy""" +366 15 regularizer """no""" +366 15 optimizer """adadelta""" +366 15 training_loop """lcwa""" +366 15 evaluator """rankbased""" +366 16 dataset """kinships""" +366 16 model """kg2e""" +366 16 loss """crossentropy""" +366 16 regularizer """no""" +366 16 optimizer """adadelta""" +366 16 training_loop """lcwa""" +366 16 evaluator """rankbased""" +366 17 dataset """kinships""" +366 17 model """kg2e""" +366 17 loss """crossentropy""" +366 17 regularizer """no""" +366 17 optimizer """adadelta""" +366 17 training_loop """lcwa""" +366 17 evaluator """rankbased""" +366 18 dataset """kinships""" +366 18 model """kg2e""" +366 18 loss """crossentropy""" +366 18 regularizer """no""" +366 18 optimizer """adadelta""" +366 18 training_loop """lcwa""" +366 18 evaluator """rankbased""" +366 19 dataset """kinships""" +366 19 model """kg2e""" +366 19 loss """crossentropy""" +366 19 regularizer """no""" +366 19 optimizer """adadelta""" +366 19 training_loop """lcwa""" +366 19 evaluator """rankbased""" +366 20 dataset """kinships""" +366 20 model """kg2e""" +366 20 loss """crossentropy""" +366 20 regularizer """no""" +366 20 optimizer """adadelta""" +366 20 training_loop """lcwa""" +366 20 evaluator """rankbased""" +366 21 dataset """kinships""" +366 21 model """kg2e""" +366 21 loss """crossentropy""" +366 21 regularizer """no""" +366 21 optimizer """adadelta""" +366 21 training_loop """lcwa""" +366 21 evaluator """rankbased""" +366 22 dataset """kinships""" +366 22 model """kg2e""" +366 22 loss """crossentropy""" +366 22 regularizer """no""" +366 22 optimizer """adadelta""" +366 22 training_loop """lcwa""" +366 22 evaluator """rankbased""" +366 23 dataset """kinships""" +366 23 model """kg2e""" +366 23 loss """crossentropy""" +366 23 regularizer """no""" +366 23 optimizer """adadelta""" +366 23 training_loop """lcwa""" +366 23 evaluator """rankbased""" +366 24 dataset """kinships""" +366 24 model """kg2e""" +366 24 loss """crossentropy""" +366 24 regularizer """no""" +366 24 optimizer """adadelta""" +366 24 training_loop """lcwa""" +366 24 evaluator """rankbased""" +366 25 dataset """kinships""" +366 25 model """kg2e""" +366 25 loss """crossentropy""" +366 25 regularizer """no""" +366 25 optimizer """adadelta""" +366 25 training_loop """lcwa""" +366 25 evaluator """rankbased""" +366 26 dataset """kinships""" +366 26 model """kg2e""" +366 26 loss """crossentropy""" +366 26 regularizer """no""" +366 26 optimizer """adadelta""" +366 26 training_loop """lcwa""" +366 26 evaluator """rankbased""" +366 27 dataset """kinships""" +366 27 model """kg2e""" +366 27 loss """crossentropy""" +366 27 regularizer """no""" +366 27 optimizer """adadelta""" +366 27 training_loop """lcwa""" +366 27 evaluator """rankbased""" +366 28 dataset """kinships""" +366 28 model """kg2e""" +366 28 loss """crossentropy""" +366 28 regularizer """no""" +366 28 optimizer """adadelta""" +366 28 training_loop """lcwa""" +366 28 evaluator """rankbased""" +366 29 dataset """kinships""" +366 29 model """kg2e""" +366 29 loss """crossentropy""" +366 29 regularizer """no""" +366 29 optimizer """adadelta""" +366 29 training_loop """lcwa""" +366 29 evaluator """rankbased""" +366 30 dataset """kinships""" +366 30 model """kg2e""" +366 30 loss """crossentropy""" +366 30 regularizer """no""" +366 30 optimizer """adadelta""" +366 30 training_loop """lcwa""" +366 30 evaluator """rankbased""" +366 31 dataset """kinships""" +366 31 model """kg2e""" +366 31 loss """crossentropy""" +366 31 regularizer """no""" +366 31 optimizer """adadelta""" +366 31 training_loop """lcwa""" +366 31 evaluator """rankbased""" +366 32 dataset """kinships""" +366 32 model """kg2e""" +366 32 loss """crossentropy""" +366 32 regularizer """no""" +366 32 optimizer """adadelta""" +366 32 training_loop """lcwa""" +366 32 evaluator """rankbased""" +366 33 dataset """kinships""" +366 33 model """kg2e""" +366 33 loss """crossentropy""" +366 33 regularizer """no""" +366 33 optimizer """adadelta""" +366 33 training_loop """lcwa""" +366 33 evaluator """rankbased""" +366 34 dataset """kinships""" +366 34 model """kg2e""" +366 34 loss """crossentropy""" +366 34 regularizer """no""" +366 34 optimizer """adadelta""" +366 34 training_loop """lcwa""" +366 34 evaluator """rankbased""" +366 35 dataset """kinships""" +366 35 model """kg2e""" +366 35 loss """crossentropy""" +366 35 regularizer """no""" +366 35 optimizer """adadelta""" +366 35 training_loop """lcwa""" +366 35 evaluator """rankbased""" +366 36 dataset """kinships""" +366 36 model """kg2e""" +366 36 loss """crossentropy""" +366 36 regularizer """no""" +366 36 optimizer """adadelta""" +366 36 training_loop """lcwa""" +366 36 evaluator """rankbased""" +366 37 dataset """kinships""" +366 37 model """kg2e""" +366 37 loss """crossentropy""" +366 37 regularizer """no""" +366 37 optimizer """adadelta""" +366 37 training_loop """lcwa""" +366 37 evaluator """rankbased""" +366 38 dataset """kinships""" +366 38 model """kg2e""" +366 38 loss """crossentropy""" +366 38 regularizer """no""" +366 38 optimizer """adadelta""" +366 38 training_loop """lcwa""" +366 38 evaluator """rankbased""" +366 39 dataset """kinships""" +366 39 model """kg2e""" +366 39 loss """crossentropy""" +366 39 regularizer """no""" +366 39 optimizer """adadelta""" +366 39 training_loop """lcwa""" +366 39 evaluator """rankbased""" +366 40 dataset """kinships""" +366 40 model """kg2e""" +366 40 loss """crossentropy""" +366 40 regularizer """no""" +366 40 optimizer """adadelta""" +366 40 training_loop """lcwa""" +366 40 evaluator """rankbased""" +366 41 dataset """kinships""" +366 41 model """kg2e""" +366 41 loss """crossentropy""" +366 41 regularizer """no""" +366 41 optimizer """adadelta""" +366 41 training_loop """lcwa""" +366 41 evaluator """rankbased""" +366 42 dataset """kinships""" +366 42 model """kg2e""" +366 42 loss """crossentropy""" +366 42 regularizer """no""" +366 42 optimizer """adadelta""" +366 42 training_loop """lcwa""" +366 42 evaluator """rankbased""" +366 43 dataset """kinships""" +366 43 model """kg2e""" +366 43 loss """crossentropy""" +366 43 regularizer """no""" +366 43 optimizer """adadelta""" +366 43 training_loop """lcwa""" +366 43 evaluator """rankbased""" +366 44 dataset """kinships""" +366 44 model """kg2e""" +366 44 loss """crossentropy""" +366 44 regularizer """no""" +366 44 optimizer """adadelta""" +366 44 training_loop """lcwa""" +366 44 evaluator """rankbased""" +366 45 dataset """kinships""" +366 45 model """kg2e""" +366 45 loss """crossentropy""" +366 45 regularizer """no""" +366 45 optimizer """adadelta""" +366 45 training_loop """lcwa""" +366 45 evaluator """rankbased""" +366 46 dataset """kinships""" +366 46 model """kg2e""" +366 46 loss """crossentropy""" +366 46 regularizer """no""" +366 46 optimizer """adadelta""" +366 46 training_loop """lcwa""" +366 46 evaluator """rankbased""" +366 47 dataset """kinships""" +366 47 model """kg2e""" +366 47 loss """crossentropy""" +366 47 regularizer """no""" +366 47 optimizer """adadelta""" +366 47 training_loop """lcwa""" +366 47 evaluator """rankbased""" +366 48 dataset """kinships""" +366 48 model """kg2e""" +366 48 loss """crossentropy""" +366 48 regularizer """no""" +366 48 optimizer """adadelta""" +366 48 training_loop """lcwa""" +366 48 evaluator """rankbased""" +366 49 dataset """kinships""" +366 49 model """kg2e""" +366 49 loss """crossentropy""" +366 49 regularizer """no""" +366 49 optimizer """adadelta""" +366 49 training_loop """lcwa""" +366 49 evaluator """rankbased""" +366 50 dataset """kinships""" +366 50 model """kg2e""" +366 50 loss """crossentropy""" +366 50 regularizer """no""" +366 50 optimizer """adadelta""" +366 50 training_loop """lcwa""" +366 50 evaluator """rankbased""" +366 51 dataset """kinships""" +366 51 model """kg2e""" +366 51 loss """crossentropy""" +366 51 regularizer """no""" +366 51 optimizer """adadelta""" +366 51 training_loop """lcwa""" +366 51 evaluator """rankbased""" +366 52 dataset """kinships""" +366 52 model """kg2e""" +366 52 loss """crossentropy""" +366 52 regularizer """no""" +366 52 optimizer """adadelta""" +366 52 training_loop """lcwa""" +366 52 evaluator """rankbased""" +366 53 dataset """kinships""" +366 53 model """kg2e""" +366 53 loss """crossentropy""" +366 53 regularizer """no""" +366 53 optimizer """adadelta""" +366 53 training_loop """lcwa""" +366 53 evaluator """rankbased""" +366 54 dataset """kinships""" +366 54 model """kg2e""" +366 54 loss """crossentropy""" +366 54 regularizer """no""" +366 54 optimizer """adadelta""" +366 54 training_loop """lcwa""" +366 54 evaluator """rankbased""" +366 55 dataset """kinships""" +366 55 model """kg2e""" +366 55 loss """crossentropy""" +366 55 regularizer """no""" +366 55 optimizer """adadelta""" +366 55 training_loop """lcwa""" +366 55 evaluator """rankbased""" +366 56 dataset """kinships""" +366 56 model """kg2e""" +366 56 loss """crossentropy""" +366 56 regularizer """no""" +366 56 optimizer """adadelta""" +366 56 training_loop """lcwa""" +366 56 evaluator """rankbased""" +366 57 dataset """kinships""" +366 57 model """kg2e""" +366 57 loss """crossentropy""" +366 57 regularizer """no""" +366 57 optimizer """adadelta""" +366 57 training_loop """lcwa""" +366 57 evaluator """rankbased""" +366 58 dataset """kinships""" +366 58 model """kg2e""" +366 58 loss """crossentropy""" +366 58 regularizer """no""" +366 58 optimizer """adadelta""" +366 58 training_loop """lcwa""" +366 58 evaluator """rankbased""" +366 59 dataset """kinships""" +366 59 model """kg2e""" +366 59 loss """crossentropy""" +366 59 regularizer """no""" +366 59 optimizer """adadelta""" +366 59 training_loop """lcwa""" +366 59 evaluator """rankbased""" +366 60 dataset """kinships""" +366 60 model """kg2e""" +366 60 loss """crossentropy""" +366 60 regularizer """no""" +366 60 optimizer """adadelta""" +366 60 training_loop """lcwa""" +366 60 evaluator """rankbased""" +366 61 dataset """kinships""" +366 61 model """kg2e""" +366 61 loss """crossentropy""" +366 61 regularizer """no""" +366 61 optimizer """adadelta""" +366 61 training_loop """lcwa""" +366 61 evaluator """rankbased""" +366 62 dataset """kinships""" +366 62 model """kg2e""" +366 62 loss """crossentropy""" +366 62 regularizer """no""" +366 62 optimizer """adadelta""" +366 62 training_loop """lcwa""" +366 62 evaluator """rankbased""" +366 63 dataset """kinships""" +366 63 model """kg2e""" +366 63 loss """crossentropy""" +366 63 regularizer """no""" +366 63 optimizer """adadelta""" +366 63 training_loop """lcwa""" +366 63 evaluator """rankbased""" +366 64 dataset """kinships""" +366 64 model """kg2e""" +366 64 loss """crossentropy""" +366 64 regularizer """no""" +366 64 optimizer """adadelta""" +366 64 training_loop """lcwa""" +366 64 evaluator """rankbased""" +366 65 dataset """kinships""" +366 65 model """kg2e""" +366 65 loss """crossentropy""" +366 65 regularizer """no""" +366 65 optimizer """adadelta""" +366 65 training_loop """lcwa""" +366 65 evaluator """rankbased""" +366 66 dataset """kinships""" +366 66 model """kg2e""" +366 66 loss """crossentropy""" +366 66 regularizer """no""" +366 66 optimizer """adadelta""" +366 66 training_loop """lcwa""" +366 66 evaluator """rankbased""" +366 67 dataset """kinships""" +366 67 model """kg2e""" +366 67 loss """crossentropy""" +366 67 regularizer """no""" +366 67 optimizer """adadelta""" +366 67 training_loop """lcwa""" +366 67 evaluator """rankbased""" +366 68 dataset """kinships""" +366 68 model """kg2e""" +366 68 loss """crossentropy""" +366 68 regularizer """no""" +366 68 optimizer """adadelta""" +366 68 training_loop """lcwa""" +366 68 evaluator """rankbased""" +366 69 dataset """kinships""" +366 69 model """kg2e""" +366 69 loss """crossentropy""" +366 69 regularizer """no""" +366 69 optimizer """adadelta""" +366 69 training_loop """lcwa""" +366 69 evaluator """rankbased""" +366 70 dataset """kinships""" +366 70 model """kg2e""" +366 70 loss """crossentropy""" +366 70 regularizer """no""" +366 70 optimizer """adadelta""" +366 70 training_loop """lcwa""" +366 70 evaluator """rankbased""" +366 71 dataset """kinships""" +366 71 model """kg2e""" +366 71 loss """crossentropy""" +366 71 regularizer """no""" +366 71 optimizer """adadelta""" +366 71 training_loop """lcwa""" +366 71 evaluator """rankbased""" +366 72 dataset """kinships""" +366 72 model """kg2e""" +366 72 loss """crossentropy""" +366 72 regularizer """no""" +366 72 optimizer """adadelta""" +366 72 training_loop """lcwa""" +366 72 evaluator """rankbased""" +366 73 dataset """kinships""" +366 73 model """kg2e""" +366 73 loss """crossentropy""" +366 73 regularizer """no""" +366 73 optimizer """adadelta""" +366 73 training_loop """lcwa""" +366 73 evaluator """rankbased""" +366 74 dataset """kinships""" +366 74 model """kg2e""" +366 74 loss """crossentropy""" +366 74 regularizer """no""" +366 74 optimizer """adadelta""" +366 74 training_loop """lcwa""" +366 74 evaluator """rankbased""" +366 75 dataset """kinships""" +366 75 model """kg2e""" +366 75 loss """crossentropy""" +366 75 regularizer """no""" +366 75 optimizer """adadelta""" +366 75 training_loop """lcwa""" +366 75 evaluator """rankbased""" +366 76 dataset """kinships""" +366 76 model """kg2e""" +366 76 loss """crossentropy""" +366 76 regularizer """no""" +366 76 optimizer """adadelta""" +366 76 training_loop """lcwa""" +366 76 evaluator """rankbased""" +366 77 dataset """kinships""" +366 77 model """kg2e""" +366 77 loss """crossentropy""" +366 77 regularizer """no""" +366 77 optimizer """adadelta""" +366 77 training_loop """lcwa""" +366 77 evaluator """rankbased""" +366 78 dataset """kinships""" +366 78 model """kg2e""" +366 78 loss """crossentropy""" +366 78 regularizer """no""" +366 78 optimizer """adadelta""" +366 78 training_loop """lcwa""" +366 78 evaluator """rankbased""" +366 79 dataset """kinships""" +366 79 model """kg2e""" +366 79 loss """crossentropy""" +366 79 regularizer """no""" +366 79 optimizer """adadelta""" +366 79 training_loop """lcwa""" +366 79 evaluator """rankbased""" +366 80 dataset """kinships""" +366 80 model """kg2e""" +366 80 loss """crossentropy""" +366 80 regularizer """no""" +366 80 optimizer """adadelta""" +366 80 training_loop """lcwa""" +366 80 evaluator """rankbased""" +366 81 dataset """kinships""" +366 81 model """kg2e""" +366 81 loss """crossentropy""" +366 81 regularizer """no""" +366 81 optimizer """adadelta""" +366 81 training_loop """lcwa""" +366 81 evaluator """rankbased""" +366 82 dataset """kinships""" +366 82 model """kg2e""" +366 82 loss """crossentropy""" +366 82 regularizer """no""" +366 82 optimizer """adadelta""" +366 82 training_loop """lcwa""" +366 82 evaluator """rankbased""" +366 83 dataset """kinships""" +366 83 model """kg2e""" +366 83 loss """crossentropy""" +366 83 regularizer """no""" +366 83 optimizer """adadelta""" +366 83 training_loop """lcwa""" +366 83 evaluator """rankbased""" +366 84 dataset """kinships""" +366 84 model """kg2e""" +366 84 loss """crossentropy""" +366 84 regularizer """no""" +366 84 optimizer """adadelta""" +366 84 training_loop """lcwa""" +366 84 evaluator """rankbased""" +366 85 dataset """kinships""" +366 85 model """kg2e""" +366 85 loss """crossentropy""" +366 85 regularizer """no""" +366 85 optimizer """adadelta""" +366 85 training_loop """lcwa""" +366 85 evaluator """rankbased""" +366 86 dataset """kinships""" +366 86 model """kg2e""" +366 86 loss """crossentropy""" +366 86 regularizer """no""" +366 86 optimizer """adadelta""" +366 86 training_loop """lcwa""" +366 86 evaluator """rankbased""" +366 87 dataset """kinships""" +366 87 model """kg2e""" +366 87 loss """crossentropy""" +366 87 regularizer """no""" +366 87 optimizer """adadelta""" +366 87 training_loop """lcwa""" +366 87 evaluator """rankbased""" +366 88 dataset """kinships""" +366 88 model """kg2e""" +366 88 loss """crossentropy""" +366 88 regularizer """no""" +366 88 optimizer """adadelta""" +366 88 training_loop """lcwa""" +366 88 evaluator """rankbased""" +366 89 dataset """kinships""" +366 89 model """kg2e""" +366 89 loss """crossentropy""" +366 89 regularizer """no""" +366 89 optimizer """adadelta""" +366 89 training_loop """lcwa""" +366 89 evaluator """rankbased""" +366 90 dataset """kinships""" +366 90 model """kg2e""" +366 90 loss """crossentropy""" +366 90 regularizer """no""" +366 90 optimizer """adadelta""" +366 90 training_loop """lcwa""" +366 90 evaluator """rankbased""" +366 91 dataset """kinships""" +366 91 model """kg2e""" +366 91 loss """crossentropy""" +366 91 regularizer """no""" +366 91 optimizer """adadelta""" +366 91 training_loop """lcwa""" +366 91 evaluator """rankbased""" +366 92 dataset """kinships""" +366 92 model """kg2e""" +366 92 loss """crossentropy""" +366 92 regularizer """no""" +366 92 optimizer """adadelta""" +366 92 training_loop """lcwa""" +366 92 evaluator """rankbased""" +366 93 dataset """kinships""" +366 93 model """kg2e""" +366 93 loss """crossentropy""" +366 93 regularizer """no""" +366 93 optimizer """adadelta""" +366 93 training_loop """lcwa""" +366 93 evaluator """rankbased""" +366 94 dataset """kinships""" +366 94 model """kg2e""" +366 94 loss """crossentropy""" +366 94 regularizer """no""" +366 94 optimizer """adadelta""" +366 94 training_loop """lcwa""" +366 94 evaluator """rankbased""" +366 95 dataset """kinships""" +366 95 model """kg2e""" +366 95 loss """crossentropy""" +366 95 regularizer """no""" +366 95 optimizer """adadelta""" +366 95 training_loop """lcwa""" +366 95 evaluator """rankbased""" +366 96 dataset """kinships""" +366 96 model """kg2e""" +366 96 loss """crossentropy""" +366 96 regularizer """no""" +366 96 optimizer """adadelta""" +366 96 training_loop """lcwa""" +366 96 evaluator """rankbased""" +366 97 dataset """kinships""" +366 97 model """kg2e""" +366 97 loss """crossentropy""" +366 97 regularizer """no""" +366 97 optimizer """adadelta""" +366 97 training_loop """lcwa""" +366 97 evaluator """rankbased""" +366 98 dataset """kinships""" +366 98 model """kg2e""" +366 98 loss """crossentropy""" +366 98 regularizer """no""" +366 98 optimizer """adadelta""" +366 98 training_loop """lcwa""" +366 98 evaluator """rankbased""" +366 99 dataset """kinships""" +366 99 model """kg2e""" +366 99 loss """crossentropy""" +366 99 regularizer """no""" +366 99 optimizer """adadelta""" +366 99 training_loop """lcwa""" +366 99 evaluator """rankbased""" +366 100 dataset """kinships""" +366 100 model """kg2e""" +366 100 loss """crossentropy""" +366 100 regularizer """no""" +366 100 optimizer """adadelta""" +366 100 training_loop """lcwa""" +366 100 evaluator """rankbased""" +367 1 model.embedding_dim 0.0 +367 1 model.c_min 0.0271590993516987 +367 1 model.c_max 8.655895546863178 +367 1 negative_sampler.num_negs_per_pos 98.0 +367 1 training.batch_size 0.0 +367 2 model.embedding_dim 1.0 +367 2 model.c_min 0.014000447584337664 +367 2 model.c_max 3.905050098642594 +367 2 negative_sampler.num_negs_per_pos 66.0 +367 2 training.batch_size 0.0 +367 3 model.embedding_dim 1.0 +367 3 model.c_min 0.03049727939991056 +367 3 model.c_max 2.6374727989515225 +367 3 negative_sampler.num_negs_per_pos 81.0 +367 3 training.batch_size 1.0 +367 4 model.embedding_dim 1.0 +367 4 model.c_min 0.016004929924653973 +367 4 model.c_max 9.4719793519565 +367 4 negative_sampler.num_negs_per_pos 7.0 +367 4 training.batch_size 2.0 +367 5 model.embedding_dim 0.0 +367 5 model.c_min 0.04756336097604475 +367 5 model.c_max 4.118409991333035 +367 5 negative_sampler.num_negs_per_pos 73.0 +367 5 training.batch_size 2.0 +367 6 model.embedding_dim 1.0 +367 6 model.c_min 0.014680581029861126 +367 6 model.c_max 1.2929966062166027 +367 6 negative_sampler.num_negs_per_pos 39.0 +367 6 training.batch_size 1.0 +367 7 model.embedding_dim 0.0 +367 7 model.c_min 0.028121582435392842 +367 7 model.c_max 6.474322300527238 +367 7 negative_sampler.num_negs_per_pos 51.0 +367 7 training.batch_size 0.0 +367 8 model.embedding_dim 0.0 +367 8 model.c_min 0.015021614329917561 +367 8 model.c_max 1.2992259136870588 +367 8 negative_sampler.num_negs_per_pos 87.0 +367 8 training.batch_size 1.0 +367 9 model.embedding_dim 0.0 +367 9 model.c_min 0.017539219345722494 +367 9 model.c_max 5.251502426407097 +367 9 negative_sampler.num_negs_per_pos 58.0 +367 9 training.batch_size 1.0 +367 10 model.embedding_dim 0.0 +367 10 model.c_min 0.07062488771976361 +367 10 model.c_max 1.6577317133955818 +367 10 negative_sampler.num_negs_per_pos 64.0 +367 10 training.batch_size 0.0 +367 11 model.embedding_dim 2.0 +367 11 model.c_min 0.09155536508054266 +367 11 model.c_max 3.9727107237769346 +367 11 negative_sampler.num_negs_per_pos 14.0 +367 11 training.batch_size 2.0 +367 12 model.embedding_dim 0.0 +367 12 model.c_min 0.037283578366053546 +367 12 model.c_max 1.9607572691783473 +367 12 negative_sampler.num_negs_per_pos 28.0 +367 12 training.batch_size 0.0 +367 13 model.embedding_dim 0.0 +367 13 model.c_min 0.019618941743187603 +367 13 model.c_max 1.8583491344205576 +367 13 negative_sampler.num_negs_per_pos 71.0 +367 13 training.batch_size 0.0 +367 14 model.embedding_dim 1.0 +367 14 model.c_min 0.010074164703882303 +367 14 model.c_max 6.984639601447269 +367 14 negative_sampler.num_negs_per_pos 54.0 +367 14 training.batch_size 2.0 +367 15 model.embedding_dim 1.0 +367 15 model.c_min 0.013105284713403812 +367 15 model.c_max 9.868035726383392 +367 15 negative_sampler.num_negs_per_pos 3.0 +367 15 training.batch_size 2.0 +367 16 model.embedding_dim 1.0 +367 16 model.c_min 0.01817074878989966 +367 16 model.c_max 7.986816602890192 +367 16 negative_sampler.num_negs_per_pos 80.0 +367 16 training.batch_size 2.0 +367 17 model.embedding_dim 2.0 +367 17 model.c_min 0.01681703442011948 +367 17 model.c_max 6.793637455111223 +367 17 negative_sampler.num_negs_per_pos 49.0 +367 17 training.batch_size 0.0 +367 18 model.embedding_dim 1.0 +367 18 model.c_min 0.06944099270698711 +367 18 model.c_max 9.773374834457028 +367 18 negative_sampler.num_negs_per_pos 17.0 +367 18 training.batch_size 2.0 +367 19 model.embedding_dim 1.0 +367 19 model.c_min 0.04876360471272694 +367 19 model.c_max 3.388624795768986 +367 19 negative_sampler.num_negs_per_pos 67.0 +367 19 training.batch_size 2.0 +367 20 model.embedding_dim 2.0 +367 20 model.c_min 0.013463055053821124 +367 20 model.c_max 4.9337910892612165 +367 20 negative_sampler.num_negs_per_pos 22.0 +367 20 training.batch_size 1.0 +367 21 model.embedding_dim 0.0 +367 21 model.c_min 0.021543101575135497 +367 21 model.c_max 2.8613755423017597 +367 21 negative_sampler.num_negs_per_pos 70.0 +367 21 training.batch_size 2.0 +367 22 model.embedding_dim 0.0 +367 22 model.c_min 0.0707460182731923 +367 22 model.c_max 2.2786342280058416 +367 22 negative_sampler.num_negs_per_pos 62.0 +367 22 training.batch_size 1.0 +367 23 model.embedding_dim 0.0 +367 23 model.c_min 0.0700726273443163 +367 23 model.c_max 3.003928268964423 +367 23 negative_sampler.num_negs_per_pos 40.0 +367 23 training.batch_size 0.0 +367 24 model.embedding_dim 2.0 +367 24 model.c_min 0.015282303430598094 +367 24 model.c_max 5.560022815336993 +367 24 negative_sampler.num_negs_per_pos 43.0 +367 24 training.batch_size 0.0 +367 25 model.embedding_dim 0.0 +367 25 model.c_min 0.01785889990749107 +367 25 model.c_max 9.438474248597656 +367 25 negative_sampler.num_negs_per_pos 19.0 +367 25 training.batch_size 1.0 +367 26 model.embedding_dim 2.0 +367 26 model.c_min 0.032359510890089614 +367 26 model.c_max 8.44930566561888 +367 26 negative_sampler.num_negs_per_pos 91.0 +367 26 training.batch_size 1.0 +367 27 model.embedding_dim 1.0 +367 27 model.c_min 0.014075282297636767 +367 27 model.c_max 3.728339008013468 +367 27 negative_sampler.num_negs_per_pos 4.0 +367 27 training.batch_size 1.0 +367 28 model.embedding_dim 2.0 +367 28 model.c_min 0.04133670029589385 +367 28 model.c_max 5.76756611379495 +367 28 negative_sampler.num_negs_per_pos 12.0 +367 28 training.batch_size 1.0 +367 29 model.embedding_dim 2.0 +367 29 model.c_min 0.023841709525528294 +367 29 model.c_max 1.2796985261678835 +367 29 negative_sampler.num_negs_per_pos 4.0 +367 29 training.batch_size 1.0 +367 30 model.embedding_dim 1.0 +367 30 model.c_min 0.012699173806483373 +367 30 model.c_max 2.72806391279258 +367 30 negative_sampler.num_negs_per_pos 97.0 +367 30 training.batch_size 1.0 +367 31 model.embedding_dim 1.0 +367 31 model.c_min 0.021565953694143183 +367 31 model.c_max 3.210972010619403 +367 31 negative_sampler.num_negs_per_pos 76.0 +367 31 training.batch_size 2.0 +367 32 model.embedding_dim 1.0 +367 32 model.c_min 0.01692239386594395 +367 32 model.c_max 7.744937448765341 +367 32 negative_sampler.num_negs_per_pos 37.0 +367 32 training.batch_size 1.0 +367 33 model.embedding_dim 1.0 +367 33 model.c_min 0.0210567008066584 +367 33 model.c_max 1.27473369330029 +367 33 negative_sampler.num_negs_per_pos 66.0 +367 33 training.batch_size 0.0 +367 34 model.embedding_dim 2.0 +367 34 model.c_min 0.049974810772253214 +367 34 model.c_max 6.3775464912419215 +367 34 negative_sampler.num_negs_per_pos 44.0 +367 34 training.batch_size 1.0 +367 35 model.embedding_dim 2.0 +367 35 model.c_min 0.020687961683673477 +367 35 model.c_max 6.69702805748328 +367 35 negative_sampler.num_negs_per_pos 69.0 +367 35 training.batch_size 0.0 +367 36 model.embedding_dim 1.0 +367 36 model.c_min 0.01249045237680721 +367 36 model.c_max 9.158369266072269 +367 36 negative_sampler.num_negs_per_pos 93.0 +367 36 training.batch_size 2.0 +367 37 model.embedding_dim 0.0 +367 37 model.c_min 0.04467300939740077 +367 37 model.c_max 9.33336362203267 +367 37 negative_sampler.num_negs_per_pos 87.0 +367 37 training.batch_size 2.0 +367 38 model.embedding_dim 0.0 +367 38 model.c_min 0.07868242541233054 +367 38 model.c_max 7.912549032117244 +367 38 negative_sampler.num_negs_per_pos 87.0 +367 38 training.batch_size 2.0 +367 39 model.embedding_dim 0.0 +367 39 model.c_min 0.032860547812866917 +367 39 model.c_max 8.107126043072997 +367 39 negative_sampler.num_negs_per_pos 17.0 +367 39 training.batch_size 1.0 +367 40 model.embedding_dim 1.0 +367 40 model.c_min 0.05921770159263338 +367 40 model.c_max 3.093208563126651 +367 40 negative_sampler.num_negs_per_pos 32.0 +367 40 training.batch_size 0.0 +367 41 model.embedding_dim 1.0 +367 41 model.c_min 0.056886926931361555 +367 41 model.c_max 1.8831595268110495 +367 41 negative_sampler.num_negs_per_pos 89.0 +367 41 training.batch_size 0.0 +367 42 model.embedding_dim 0.0 +367 42 model.c_min 0.020573699099657613 +367 42 model.c_max 2.632095126356919 +367 42 negative_sampler.num_negs_per_pos 99.0 +367 42 training.batch_size 0.0 +367 43 model.embedding_dim 1.0 +367 43 model.c_min 0.0444879228656703 +367 43 model.c_max 8.911199601991623 +367 43 negative_sampler.num_negs_per_pos 34.0 +367 43 training.batch_size 2.0 +367 44 model.embedding_dim 1.0 +367 44 model.c_min 0.07260178599935764 +367 44 model.c_max 4.8769128214718 +367 44 negative_sampler.num_negs_per_pos 77.0 +367 44 training.batch_size 0.0 +367 45 model.embedding_dim 0.0 +367 45 model.c_min 0.02165800501881079 +367 45 model.c_max 3.684026044867437 +367 45 negative_sampler.num_negs_per_pos 94.0 +367 45 training.batch_size 0.0 +367 46 model.embedding_dim 0.0 +367 46 model.c_min 0.08149636441669347 +367 46 model.c_max 2.8696603168552595 +367 46 negative_sampler.num_negs_per_pos 49.0 +367 46 training.batch_size 2.0 +367 47 model.embedding_dim 1.0 +367 47 model.c_min 0.02257852139812676 +367 47 model.c_max 4.949457217298603 +367 47 negative_sampler.num_negs_per_pos 77.0 +367 47 training.batch_size 0.0 +367 48 model.embedding_dim 2.0 +367 48 model.c_min 0.013710835366303851 +367 48 model.c_max 7.9844627374225805 +367 48 negative_sampler.num_negs_per_pos 60.0 +367 48 training.batch_size 2.0 +367 49 model.embedding_dim 0.0 +367 49 model.c_min 0.01988681959738232 +367 49 model.c_max 4.149009274067743 +367 49 negative_sampler.num_negs_per_pos 85.0 +367 49 training.batch_size 2.0 +367 50 model.embedding_dim 1.0 +367 50 model.c_min 0.01017540690833311 +367 50 model.c_max 9.952600397034251 +367 50 negative_sampler.num_negs_per_pos 72.0 +367 50 training.batch_size 1.0 +367 51 model.embedding_dim 1.0 +367 51 model.c_min 0.030902444440365134 +367 51 model.c_max 4.100012906089738 +367 51 negative_sampler.num_negs_per_pos 37.0 +367 51 training.batch_size 0.0 +367 52 model.embedding_dim 0.0 +367 52 model.c_min 0.08076453529731704 +367 52 model.c_max 8.505900453965882 +367 52 negative_sampler.num_negs_per_pos 38.0 +367 52 training.batch_size 1.0 +367 53 model.embedding_dim 2.0 +367 53 model.c_min 0.014703520874153058 +367 53 model.c_max 2.2700438680586172 +367 53 negative_sampler.num_negs_per_pos 17.0 +367 53 training.batch_size 2.0 +367 54 model.embedding_dim 2.0 +367 54 model.c_min 0.018618511626051323 +367 54 model.c_max 4.933639794845837 +367 54 negative_sampler.num_negs_per_pos 24.0 +367 54 training.batch_size 1.0 +367 55 model.embedding_dim 0.0 +367 55 model.c_min 0.09682377600253537 +367 55 model.c_max 2.8405803400221132 +367 55 negative_sampler.num_negs_per_pos 93.0 +367 55 training.batch_size 2.0 +367 56 model.embedding_dim 0.0 +367 56 model.c_min 0.023923150040750567 +367 56 model.c_max 6.067960563613626 +367 56 negative_sampler.num_negs_per_pos 10.0 +367 56 training.batch_size 2.0 +367 57 model.embedding_dim 1.0 +367 57 model.c_min 0.026776725746666893 +367 57 model.c_max 6.083479605341607 +367 57 negative_sampler.num_negs_per_pos 78.0 +367 57 training.batch_size 0.0 +367 58 model.embedding_dim 0.0 +367 58 model.c_min 0.027740492146146243 +367 58 model.c_max 6.212171466097335 +367 58 negative_sampler.num_negs_per_pos 61.0 +367 58 training.batch_size 0.0 +367 59 model.embedding_dim 2.0 +367 59 model.c_min 0.011638126860050071 +367 59 model.c_max 3.922921783665472 +367 59 negative_sampler.num_negs_per_pos 39.0 +367 59 training.batch_size 1.0 +367 60 model.embedding_dim 1.0 +367 60 model.c_min 0.02714611088927178 +367 60 model.c_max 8.014894108175039 +367 60 negative_sampler.num_negs_per_pos 99.0 +367 60 training.batch_size 1.0 +367 61 model.embedding_dim 1.0 +367 61 model.c_min 0.04311597582691477 +367 61 model.c_max 6.033166197719959 +367 61 negative_sampler.num_negs_per_pos 31.0 +367 61 training.batch_size 0.0 +367 62 model.embedding_dim 1.0 +367 62 model.c_min 0.015314357856931447 +367 62 model.c_max 7.571628737331374 +367 62 negative_sampler.num_negs_per_pos 31.0 +367 62 training.batch_size 0.0 +367 63 model.embedding_dim 2.0 +367 63 model.c_min 0.014669846513798759 +367 63 model.c_max 9.921661485382902 +367 63 negative_sampler.num_negs_per_pos 66.0 +367 63 training.batch_size 2.0 +367 64 model.embedding_dim 0.0 +367 64 model.c_min 0.020874253634605704 +367 64 model.c_max 5.9743945279044794 +367 64 negative_sampler.num_negs_per_pos 18.0 +367 64 training.batch_size 0.0 +367 65 model.embedding_dim 2.0 +367 65 model.c_min 0.022342535073723038 +367 65 model.c_max 7.29428853227827 +367 65 negative_sampler.num_negs_per_pos 61.0 +367 65 training.batch_size 1.0 +367 66 model.embedding_dim 1.0 +367 66 model.c_min 0.028294405815543544 +367 66 model.c_max 9.314577022173557 +367 66 negative_sampler.num_negs_per_pos 77.0 +367 66 training.batch_size 1.0 +367 67 model.embedding_dim 1.0 +367 67 model.c_min 0.012873192267157623 +367 67 model.c_max 6.88665293887992 +367 67 negative_sampler.num_negs_per_pos 15.0 +367 67 training.batch_size 1.0 +367 68 model.embedding_dim 2.0 +367 68 model.c_min 0.06422208184432732 +367 68 model.c_max 6.570506080435235 +367 68 negative_sampler.num_negs_per_pos 67.0 +367 68 training.batch_size 1.0 +367 69 model.embedding_dim 2.0 +367 69 model.c_min 0.028536832852048104 +367 69 model.c_max 8.38165247716653 +367 69 negative_sampler.num_negs_per_pos 83.0 +367 69 training.batch_size 1.0 +367 70 model.embedding_dim 2.0 +367 70 model.c_min 0.09104413757999764 +367 70 model.c_max 3.6606472038512687 +367 70 negative_sampler.num_negs_per_pos 82.0 +367 70 training.batch_size 2.0 +367 71 model.embedding_dim 2.0 +367 71 model.c_min 0.01754645831513666 +367 71 model.c_max 3.0219373914729695 +367 71 negative_sampler.num_negs_per_pos 75.0 +367 71 training.batch_size 1.0 +367 72 model.embedding_dim 0.0 +367 72 model.c_min 0.01668785313014276 +367 72 model.c_max 3.278920713107804 +367 72 negative_sampler.num_negs_per_pos 65.0 +367 72 training.batch_size 0.0 +367 73 model.embedding_dim 1.0 +367 73 model.c_min 0.09520275262186487 +367 73 model.c_max 3.2318331695700464 +367 73 negative_sampler.num_negs_per_pos 89.0 +367 73 training.batch_size 2.0 +367 74 model.embedding_dim 2.0 +367 74 model.c_min 0.0365906432275994 +367 74 model.c_max 6.46058442449423 +367 74 negative_sampler.num_negs_per_pos 81.0 +367 74 training.batch_size 1.0 +367 75 model.embedding_dim 0.0 +367 75 model.c_min 0.021058652344867358 +367 75 model.c_max 7.165478032962347 +367 75 negative_sampler.num_negs_per_pos 18.0 +367 75 training.batch_size 2.0 +367 76 model.embedding_dim 1.0 +367 76 model.c_min 0.013401528888303196 +367 76 model.c_max 1.6345120238704853 +367 76 negative_sampler.num_negs_per_pos 54.0 +367 76 training.batch_size 2.0 +367 77 model.embedding_dim 2.0 +367 77 model.c_min 0.010216374242377552 +367 77 model.c_max 3.654178630714207 +367 77 negative_sampler.num_negs_per_pos 97.0 +367 77 training.batch_size 2.0 +367 78 model.embedding_dim 2.0 +367 78 model.c_min 0.07788309370130016 +367 78 model.c_max 9.495254194009997 +367 78 negative_sampler.num_negs_per_pos 35.0 +367 78 training.batch_size 1.0 +367 79 model.embedding_dim 1.0 +367 79 model.c_min 0.01951950101814245 +367 79 model.c_max 6.918084662950317 +367 79 negative_sampler.num_negs_per_pos 33.0 +367 79 training.batch_size 0.0 +367 80 model.embedding_dim 2.0 +367 80 model.c_min 0.019001114543277364 +367 80 model.c_max 2.5417233430032593 +367 80 negative_sampler.num_negs_per_pos 95.0 +367 80 training.batch_size 1.0 +367 81 model.embedding_dim 0.0 +367 81 model.c_min 0.04168120280118536 +367 81 model.c_max 4.578573578329712 +367 81 negative_sampler.num_negs_per_pos 60.0 +367 81 training.batch_size 2.0 +367 82 model.embedding_dim 2.0 +367 82 model.c_min 0.013732530824696144 +367 82 model.c_max 8.159234163707021 +367 82 negative_sampler.num_negs_per_pos 91.0 +367 82 training.batch_size 2.0 +367 83 model.embedding_dim 2.0 +367 83 model.c_min 0.01024303384948726 +367 83 model.c_max 2.9659932442960955 +367 83 negative_sampler.num_negs_per_pos 66.0 +367 83 training.batch_size 2.0 +367 84 model.embedding_dim 1.0 +367 84 model.c_min 0.015535583129813289 +367 84 model.c_max 5.274711361874314 +367 84 negative_sampler.num_negs_per_pos 89.0 +367 84 training.batch_size 2.0 +367 85 model.embedding_dim 0.0 +367 85 model.c_min 0.055365370047656924 +367 85 model.c_max 1.9064477020024229 +367 85 negative_sampler.num_negs_per_pos 71.0 +367 85 training.batch_size 1.0 +367 86 model.embedding_dim 1.0 +367 86 model.c_min 0.03513292236320501 +367 86 model.c_max 8.714011446837423 +367 86 negative_sampler.num_negs_per_pos 41.0 +367 86 training.batch_size 2.0 +367 87 model.embedding_dim 1.0 +367 87 model.c_min 0.06409441579608811 +367 87 model.c_max 4.160474267465119 +367 87 negative_sampler.num_negs_per_pos 92.0 +367 87 training.batch_size 0.0 +367 88 model.embedding_dim 0.0 +367 88 model.c_min 0.02905464655082616 +367 88 model.c_max 2.619388034746061 +367 88 negative_sampler.num_negs_per_pos 92.0 +367 88 training.batch_size 2.0 +367 89 model.embedding_dim 1.0 +367 89 model.c_min 0.017581970678934303 +367 89 model.c_max 5.473292373008305 +367 89 negative_sampler.num_negs_per_pos 82.0 +367 89 training.batch_size 0.0 +367 90 model.embedding_dim 1.0 +367 90 model.c_min 0.06001410837145998 +367 90 model.c_max 1.1989072985258258 +367 90 negative_sampler.num_negs_per_pos 90.0 +367 90 training.batch_size 0.0 +367 91 model.embedding_dim 2.0 +367 91 model.c_min 0.030107721103211978 +367 91 model.c_max 4.78628550139378 +367 91 negative_sampler.num_negs_per_pos 22.0 +367 91 training.batch_size 1.0 +367 92 model.embedding_dim 0.0 +367 92 model.c_min 0.09742113733031012 +367 92 model.c_max 7.754222373532528 +367 92 negative_sampler.num_negs_per_pos 13.0 +367 92 training.batch_size 0.0 +367 93 model.embedding_dim 0.0 +367 93 model.c_min 0.05422176567208991 +367 93 model.c_max 4.7241793095274875 +367 93 negative_sampler.num_negs_per_pos 4.0 +367 93 training.batch_size 1.0 +367 94 model.embedding_dim 2.0 +367 94 model.c_min 0.034335525593145344 +367 94 model.c_max 4.272491885285319 +367 94 negative_sampler.num_negs_per_pos 94.0 +367 94 training.batch_size 2.0 +367 95 model.embedding_dim 2.0 +367 95 model.c_min 0.03126068796308006 +367 95 model.c_max 7.070543571665239 +367 95 negative_sampler.num_negs_per_pos 65.0 +367 95 training.batch_size 0.0 +367 96 model.embedding_dim 0.0 +367 96 model.c_min 0.018361328113982008 +367 96 model.c_max 8.758353599252537 +367 96 negative_sampler.num_negs_per_pos 87.0 +367 96 training.batch_size 2.0 +367 97 model.embedding_dim 1.0 +367 97 model.c_min 0.04383234533056686 +367 97 model.c_max 8.342023722802704 +367 97 negative_sampler.num_negs_per_pos 81.0 +367 97 training.batch_size 0.0 +367 98 model.embedding_dim 2.0 +367 98 model.c_min 0.015049263429730021 +367 98 model.c_max 9.28733493283534 +367 98 negative_sampler.num_negs_per_pos 35.0 +367 98 training.batch_size 0.0 +367 99 model.embedding_dim 2.0 +367 99 model.c_min 0.053578009731640196 +367 99 model.c_max 4.139937016828948 +367 99 negative_sampler.num_negs_per_pos 34.0 +367 99 training.batch_size 2.0 +367 100 model.embedding_dim 2.0 +367 100 model.c_min 0.011583344030703277 +367 100 model.c_max 6.380641546051875 +367 100 negative_sampler.num_negs_per_pos 96.0 +367 100 training.batch_size 1.0 +367 1 dataset """kinships""" +367 1 model """kg2e""" +367 1 loss """bceaftersigmoid""" +367 1 regularizer """no""" +367 1 optimizer """adadelta""" +367 1 training_loop """owa""" +367 1 negative_sampler """basic""" +367 1 evaluator """rankbased""" +367 2 dataset """kinships""" +367 2 model """kg2e""" +367 2 loss """bceaftersigmoid""" +367 2 regularizer """no""" +367 2 optimizer """adadelta""" +367 2 training_loop """owa""" +367 2 negative_sampler """basic""" +367 2 evaluator """rankbased""" +367 3 dataset """kinships""" +367 3 model """kg2e""" +367 3 loss """bceaftersigmoid""" +367 3 regularizer """no""" +367 3 optimizer """adadelta""" +367 3 training_loop """owa""" +367 3 negative_sampler """basic""" +367 3 evaluator """rankbased""" +367 4 dataset """kinships""" +367 4 model """kg2e""" +367 4 loss """bceaftersigmoid""" +367 4 regularizer """no""" +367 4 optimizer """adadelta""" +367 4 training_loop """owa""" +367 4 negative_sampler """basic""" +367 4 evaluator """rankbased""" +367 5 dataset """kinships""" +367 5 model """kg2e""" +367 5 loss """bceaftersigmoid""" +367 5 regularizer """no""" +367 5 optimizer """adadelta""" +367 5 training_loop """owa""" +367 5 negative_sampler """basic""" +367 5 evaluator """rankbased""" +367 6 dataset """kinships""" +367 6 model """kg2e""" +367 6 loss """bceaftersigmoid""" +367 6 regularizer """no""" +367 6 optimizer """adadelta""" +367 6 training_loop """owa""" +367 6 negative_sampler """basic""" +367 6 evaluator """rankbased""" +367 7 dataset """kinships""" +367 7 model """kg2e""" +367 7 loss """bceaftersigmoid""" +367 7 regularizer """no""" +367 7 optimizer """adadelta""" +367 7 training_loop """owa""" +367 7 negative_sampler """basic""" +367 7 evaluator """rankbased""" +367 8 dataset """kinships""" +367 8 model """kg2e""" +367 8 loss """bceaftersigmoid""" +367 8 regularizer """no""" +367 8 optimizer """adadelta""" +367 8 training_loop """owa""" +367 8 negative_sampler """basic""" +367 8 evaluator """rankbased""" +367 9 dataset """kinships""" +367 9 model """kg2e""" +367 9 loss """bceaftersigmoid""" +367 9 regularizer """no""" +367 9 optimizer """adadelta""" +367 9 training_loop """owa""" +367 9 negative_sampler """basic""" +367 9 evaluator """rankbased""" +367 10 dataset """kinships""" +367 10 model """kg2e""" +367 10 loss """bceaftersigmoid""" +367 10 regularizer """no""" +367 10 optimizer """adadelta""" +367 10 training_loop """owa""" +367 10 negative_sampler """basic""" +367 10 evaluator """rankbased""" +367 11 dataset """kinships""" +367 11 model """kg2e""" +367 11 loss """bceaftersigmoid""" +367 11 regularizer """no""" +367 11 optimizer """adadelta""" +367 11 training_loop """owa""" +367 11 negative_sampler """basic""" +367 11 evaluator """rankbased""" +367 12 dataset """kinships""" +367 12 model """kg2e""" +367 12 loss """bceaftersigmoid""" +367 12 regularizer """no""" +367 12 optimizer """adadelta""" +367 12 training_loop """owa""" +367 12 negative_sampler """basic""" +367 12 evaluator """rankbased""" +367 13 dataset """kinships""" +367 13 model """kg2e""" +367 13 loss """bceaftersigmoid""" +367 13 regularizer """no""" +367 13 optimizer """adadelta""" +367 13 training_loop """owa""" +367 13 negative_sampler """basic""" +367 13 evaluator """rankbased""" +367 14 dataset """kinships""" +367 14 model """kg2e""" +367 14 loss """bceaftersigmoid""" +367 14 regularizer """no""" +367 14 optimizer """adadelta""" +367 14 training_loop """owa""" +367 14 negative_sampler """basic""" +367 14 evaluator """rankbased""" +367 15 dataset """kinships""" +367 15 model """kg2e""" +367 15 loss """bceaftersigmoid""" +367 15 regularizer """no""" +367 15 optimizer """adadelta""" +367 15 training_loop """owa""" +367 15 negative_sampler """basic""" +367 15 evaluator """rankbased""" +367 16 dataset """kinships""" +367 16 model """kg2e""" +367 16 loss """bceaftersigmoid""" +367 16 regularizer """no""" +367 16 optimizer """adadelta""" +367 16 training_loop """owa""" +367 16 negative_sampler """basic""" +367 16 evaluator """rankbased""" +367 17 dataset """kinships""" +367 17 model """kg2e""" +367 17 loss """bceaftersigmoid""" +367 17 regularizer """no""" +367 17 optimizer """adadelta""" +367 17 training_loop """owa""" +367 17 negative_sampler """basic""" +367 17 evaluator """rankbased""" +367 18 dataset """kinships""" +367 18 model """kg2e""" +367 18 loss """bceaftersigmoid""" +367 18 regularizer """no""" +367 18 optimizer """adadelta""" +367 18 training_loop """owa""" +367 18 negative_sampler """basic""" +367 18 evaluator """rankbased""" +367 19 dataset """kinships""" +367 19 model """kg2e""" +367 19 loss """bceaftersigmoid""" +367 19 regularizer """no""" +367 19 optimizer """adadelta""" +367 19 training_loop """owa""" +367 19 negative_sampler """basic""" +367 19 evaluator """rankbased""" +367 20 dataset """kinships""" +367 20 model """kg2e""" +367 20 loss """bceaftersigmoid""" +367 20 regularizer """no""" +367 20 optimizer """adadelta""" +367 20 training_loop """owa""" +367 20 negative_sampler """basic""" +367 20 evaluator """rankbased""" +367 21 dataset """kinships""" +367 21 model """kg2e""" +367 21 loss """bceaftersigmoid""" +367 21 regularizer """no""" +367 21 optimizer """adadelta""" +367 21 training_loop """owa""" +367 21 negative_sampler """basic""" +367 21 evaluator """rankbased""" +367 22 dataset """kinships""" +367 22 model """kg2e""" +367 22 loss """bceaftersigmoid""" +367 22 regularizer """no""" +367 22 optimizer """adadelta""" +367 22 training_loop """owa""" +367 22 negative_sampler """basic""" +367 22 evaluator """rankbased""" +367 23 dataset """kinships""" +367 23 model """kg2e""" +367 23 loss """bceaftersigmoid""" +367 23 regularizer """no""" +367 23 optimizer """adadelta""" +367 23 training_loop """owa""" +367 23 negative_sampler """basic""" +367 23 evaluator """rankbased""" +367 24 dataset """kinships""" +367 24 model """kg2e""" +367 24 loss """bceaftersigmoid""" +367 24 regularizer """no""" +367 24 optimizer """adadelta""" +367 24 training_loop """owa""" +367 24 negative_sampler """basic""" +367 24 evaluator """rankbased""" +367 25 dataset """kinships""" +367 25 model """kg2e""" +367 25 loss """bceaftersigmoid""" +367 25 regularizer """no""" +367 25 optimizer """adadelta""" +367 25 training_loop """owa""" +367 25 negative_sampler """basic""" +367 25 evaluator """rankbased""" +367 26 dataset """kinships""" +367 26 model """kg2e""" +367 26 loss """bceaftersigmoid""" +367 26 regularizer """no""" +367 26 optimizer """adadelta""" +367 26 training_loop """owa""" +367 26 negative_sampler """basic""" +367 26 evaluator """rankbased""" +367 27 dataset """kinships""" +367 27 model """kg2e""" +367 27 loss """bceaftersigmoid""" +367 27 regularizer """no""" +367 27 optimizer """adadelta""" +367 27 training_loop """owa""" +367 27 negative_sampler """basic""" +367 27 evaluator """rankbased""" +367 28 dataset """kinships""" +367 28 model """kg2e""" +367 28 loss """bceaftersigmoid""" +367 28 regularizer """no""" +367 28 optimizer """adadelta""" +367 28 training_loop """owa""" +367 28 negative_sampler """basic""" +367 28 evaluator """rankbased""" +367 29 dataset """kinships""" +367 29 model """kg2e""" +367 29 loss """bceaftersigmoid""" +367 29 regularizer """no""" +367 29 optimizer """adadelta""" +367 29 training_loop """owa""" +367 29 negative_sampler """basic""" +367 29 evaluator """rankbased""" +367 30 dataset """kinships""" +367 30 model """kg2e""" +367 30 loss """bceaftersigmoid""" +367 30 regularizer """no""" +367 30 optimizer """adadelta""" +367 30 training_loop """owa""" +367 30 negative_sampler """basic""" +367 30 evaluator """rankbased""" +367 31 dataset """kinships""" +367 31 model """kg2e""" +367 31 loss """bceaftersigmoid""" +367 31 regularizer """no""" +367 31 optimizer """adadelta""" +367 31 training_loop """owa""" +367 31 negative_sampler """basic""" +367 31 evaluator """rankbased""" +367 32 dataset """kinships""" +367 32 model """kg2e""" +367 32 loss """bceaftersigmoid""" +367 32 regularizer """no""" +367 32 optimizer """adadelta""" +367 32 training_loop """owa""" +367 32 negative_sampler """basic""" +367 32 evaluator """rankbased""" +367 33 dataset """kinships""" +367 33 model """kg2e""" +367 33 loss """bceaftersigmoid""" +367 33 regularizer """no""" +367 33 optimizer """adadelta""" +367 33 training_loop """owa""" +367 33 negative_sampler """basic""" +367 33 evaluator """rankbased""" +367 34 dataset """kinships""" +367 34 model """kg2e""" +367 34 loss """bceaftersigmoid""" +367 34 regularizer """no""" +367 34 optimizer """adadelta""" +367 34 training_loop """owa""" +367 34 negative_sampler """basic""" +367 34 evaluator """rankbased""" +367 35 dataset """kinships""" +367 35 model """kg2e""" +367 35 loss """bceaftersigmoid""" +367 35 regularizer """no""" +367 35 optimizer """adadelta""" +367 35 training_loop """owa""" +367 35 negative_sampler """basic""" +367 35 evaluator """rankbased""" +367 36 dataset """kinships""" +367 36 model """kg2e""" +367 36 loss """bceaftersigmoid""" +367 36 regularizer """no""" +367 36 optimizer """adadelta""" +367 36 training_loop """owa""" +367 36 negative_sampler """basic""" +367 36 evaluator """rankbased""" +367 37 dataset """kinships""" +367 37 model """kg2e""" +367 37 loss """bceaftersigmoid""" +367 37 regularizer """no""" +367 37 optimizer """adadelta""" +367 37 training_loop """owa""" +367 37 negative_sampler """basic""" +367 37 evaluator """rankbased""" +367 38 dataset """kinships""" +367 38 model """kg2e""" +367 38 loss """bceaftersigmoid""" +367 38 regularizer """no""" +367 38 optimizer """adadelta""" +367 38 training_loop """owa""" +367 38 negative_sampler """basic""" +367 38 evaluator """rankbased""" +367 39 dataset """kinships""" +367 39 model """kg2e""" +367 39 loss """bceaftersigmoid""" +367 39 regularizer """no""" +367 39 optimizer """adadelta""" +367 39 training_loop """owa""" +367 39 negative_sampler """basic""" +367 39 evaluator """rankbased""" +367 40 dataset """kinships""" +367 40 model """kg2e""" +367 40 loss """bceaftersigmoid""" +367 40 regularizer """no""" +367 40 optimizer """adadelta""" +367 40 training_loop """owa""" +367 40 negative_sampler """basic""" +367 40 evaluator """rankbased""" +367 41 dataset """kinships""" +367 41 model """kg2e""" +367 41 loss """bceaftersigmoid""" +367 41 regularizer """no""" +367 41 optimizer """adadelta""" +367 41 training_loop """owa""" +367 41 negative_sampler """basic""" +367 41 evaluator """rankbased""" +367 42 dataset """kinships""" +367 42 model """kg2e""" +367 42 loss """bceaftersigmoid""" +367 42 regularizer """no""" +367 42 optimizer """adadelta""" +367 42 training_loop """owa""" +367 42 negative_sampler """basic""" +367 42 evaluator """rankbased""" +367 43 dataset """kinships""" +367 43 model """kg2e""" +367 43 loss """bceaftersigmoid""" +367 43 regularizer """no""" +367 43 optimizer """adadelta""" +367 43 training_loop """owa""" +367 43 negative_sampler """basic""" +367 43 evaluator """rankbased""" +367 44 dataset """kinships""" +367 44 model """kg2e""" +367 44 loss """bceaftersigmoid""" +367 44 regularizer """no""" +367 44 optimizer """adadelta""" +367 44 training_loop """owa""" +367 44 negative_sampler """basic""" +367 44 evaluator """rankbased""" +367 45 dataset """kinships""" +367 45 model """kg2e""" +367 45 loss """bceaftersigmoid""" +367 45 regularizer """no""" +367 45 optimizer """adadelta""" +367 45 training_loop """owa""" +367 45 negative_sampler """basic""" +367 45 evaluator """rankbased""" +367 46 dataset """kinships""" +367 46 model """kg2e""" +367 46 loss """bceaftersigmoid""" +367 46 regularizer """no""" +367 46 optimizer """adadelta""" +367 46 training_loop """owa""" +367 46 negative_sampler """basic""" +367 46 evaluator """rankbased""" +367 47 dataset """kinships""" +367 47 model """kg2e""" +367 47 loss """bceaftersigmoid""" +367 47 regularizer """no""" +367 47 optimizer """adadelta""" +367 47 training_loop """owa""" +367 47 negative_sampler """basic""" +367 47 evaluator """rankbased""" +367 48 dataset """kinships""" +367 48 model """kg2e""" +367 48 loss """bceaftersigmoid""" +367 48 regularizer """no""" +367 48 optimizer """adadelta""" +367 48 training_loop """owa""" +367 48 negative_sampler """basic""" +367 48 evaluator """rankbased""" +367 49 dataset """kinships""" +367 49 model """kg2e""" +367 49 loss """bceaftersigmoid""" +367 49 regularizer """no""" +367 49 optimizer """adadelta""" +367 49 training_loop """owa""" +367 49 negative_sampler """basic""" +367 49 evaluator """rankbased""" +367 50 dataset """kinships""" +367 50 model """kg2e""" +367 50 loss """bceaftersigmoid""" +367 50 regularizer """no""" +367 50 optimizer """adadelta""" +367 50 training_loop """owa""" +367 50 negative_sampler """basic""" +367 50 evaluator """rankbased""" +367 51 dataset """kinships""" +367 51 model """kg2e""" +367 51 loss """bceaftersigmoid""" +367 51 regularizer """no""" +367 51 optimizer """adadelta""" +367 51 training_loop """owa""" +367 51 negative_sampler """basic""" +367 51 evaluator """rankbased""" +367 52 dataset """kinships""" +367 52 model """kg2e""" +367 52 loss """bceaftersigmoid""" +367 52 regularizer """no""" +367 52 optimizer """adadelta""" +367 52 training_loop """owa""" +367 52 negative_sampler """basic""" +367 52 evaluator """rankbased""" +367 53 dataset """kinships""" +367 53 model """kg2e""" +367 53 loss """bceaftersigmoid""" +367 53 regularizer """no""" +367 53 optimizer """adadelta""" +367 53 training_loop """owa""" +367 53 negative_sampler """basic""" +367 53 evaluator """rankbased""" +367 54 dataset """kinships""" +367 54 model """kg2e""" +367 54 loss """bceaftersigmoid""" +367 54 regularizer """no""" +367 54 optimizer """adadelta""" +367 54 training_loop """owa""" +367 54 negative_sampler """basic""" +367 54 evaluator """rankbased""" +367 55 dataset """kinships""" +367 55 model """kg2e""" +367 55 loss """bceaftersigmoid""" +367 55 regularizer """no""" +367 55 optimizer """adadelta""" +367 55 training_loop """owa""" +367 55 negative_sampler """basic""" +367 55 evaluator """rankbased""" +367 56 dataset """kinships""" +367 56 model """kg2e""" +367 56 loss """bceaftersigmoid""" +367 56 regularizer """no""" +367 56 optimizer """adadelta""" +367 56 training_loop """owa""" +367 56 negative_sampler """basic""" +367 56 evaluator """rankbased""" +367 57 dataset """kinships""" +367 57 model """kg2e""" +367 57 loss """bceaftersigmoid""" +367 57 regularizer """no""" +367 57 optimizer """adadelta""" +367 57 training_loop """owa""" +367 57 negative_sampler """basic""" +367 57 evaluator """rankbased""" +367 58 dataset """kinships""" +367 58 model """kg2e""" +367 58 loss """bceaftersigmoid""" +367 58 regularizer """no""" +367 58 optimizer """adadelta""" +367 58 training_loop """owa""" +367 58 negative_sampler """basic""" +367 58 evaluator """rankbased""" +367 59 dataset """kinships""" +367 59 model """kg2e""" +367 59 loss """bceaftersigmoid""" +367 59 regularizer """no""" +367 59 optimizer """adadelta""" +367 59 training_loop """owa""" +367 59 negative_sampler """basic""" +367 59 evaluator """rankbased""" +367 60 dataset """kinships""" +367 60 model """kg2e""" +367 60 loss """bceaftersigmoid""" +367 60 regularizer """no""" +367 60 optimizer """adadelta""" +367 60 training_loop """owa""" +367 60 negative_sampler """basic""" +367 60 evaluator """rankbased""" +367 61 dataset """kinships""" +367 61 model """kg2e""" +367 61 loss """bceaftersigmoid""" +367 61 regularizer """no""" +367 61 optimizer """adadelta""" +367 61 training_loop """owa""" +367 61 negative_sampler """basic""" +367 61 evaluator """rankbased""" +367 62 dataset """kinships""" +367 62 model """kg2e""" +367 62 loss """bceaftersigmoid""" +367 62 regularizer """no""" +367 62 optimizer """adadelta""" +367 62 training_loop """owa""" +367 62 negative_sampler """basic""" +367 62 evaluator """rankbased""" +367 63 dataset """kinships""" +367 63 model """kg2e""" +367 63 loss """bceaftersigmoid""" +367 63 regularizer """no""" +367 63 optimizer """adadelta""" +367 63 training_loop """owa""" +367 63 negative_sampler """basic""" +367 63 evaluator """rankbased""" +367 64 dataset """kinships""" +367 64 model """kg2e""" +367 64 loss """bceaftersigmoid""" +367 64 regularizer """no""" +367 64 optimizer """adadelta""" +367 64 training_loop """owa""" +367 64 negative_sampler """basic""" +367 64 evaluator """rankbased""" +367 65 dataset """kinships""" +367 65 model """kg2e""" +367 65 loss """bceaftersigmoid""" +367 65 regularizer """no""" +367 65 optimizer """adadelta""" +367 65 training_loop """owa""" +367 65 negative_sampler """basic""" +367 65 evaluator """rankbased""" +367 66 dataset """kinships""" +367 66 model """kg2e""" +367 66 loss """bceaftersigmoid""" +367 66 regularizer """no""" +367 66 optimizer """adadelta""" +367 66 training_loop """owa""" +367 66 negative_sampler """basic""" +367 66 evaluator """rankbased""" +367 67 dataset """kinships""" +367 67 model """kg2e""" +367 67 loss """bceaftersigmoid""" +367 67 regularizer """no""" +367 67 optimizer """adadelta""" +367 67 training_loop """owa""" +367 67 negative_sampler """basic""" +367 67 evaluator """rankbased""" +367 68 dataset """kinships""" +367 68 model """kg2e""" +367 68 loss """bceaftersigmoid""" +367 68 regularizer """no""" +367 68 optimizer """adadelta""" +367 68 training_loop """owa""" +367 68 negative_sampler """basic""" +367 68 evaluator """rankbased""" +367 69 dataset """kinships""" +367 69 model """kg2e""" +367 69 loss """bceaftersigmoid""" +367 69 regularizer """no""" +367 69 optimizer """adadelta""" +367 69 training_loop """owa""" +367 69 negative_sampler """basic""" +367 69 evaluator """rankbased""" +367 70 dataset """kinships""" +367 70 model """kg2e""" +367 70 loss """bceaftersigmoid""" +367 70 regularizer """no""" +367 70 optimizer """adadelta""" +367 70 training_loop """owa""" +367 70 negative_sampler """basic""" +367 70 evaluator """rankbased""" +367 71 dataset """kinships""" +367 71 model """kg2e""" +367 71 loss """bceaftersigmoid""" +367 71 regularizer """no""" +367 71 optimizer """adadelta""" +367 71 training_loop """owa""" +367 71 negative_sampler """basic""" +367 71 evaluator """rankbased""" +367 72 dataset """kinships""" +367 72 model """kg2e""" +367 72 loss """bceaftersigmoid""" +367 72 regularizer """no""" +367 72 optimizer """adadelta""" +367 72 training_loop """owa""" +367 72 negative_sampler """basic""" +367 72 evaluator """rankbased""" +367 73 dataset """kinships""" +367 73 model """kg2e""" +367 73 loss """bceaftersigmoid""" +367 73 regularizer """no""" +367 73 optimizer """adadelta""" +367 73 training_loop """owa""" +367 73 negative_sampler """basic""" +367 73 evaluator """rankbased""" +367 74 dataset """kinships""" +367 74 model """kg2e""" +367 74 loss """bceaftersigmoid""" +367 74 regularizer """no""" +367 74 optimizer """adadelta""" +367 74 training_loop """owa""" +367 74 negative_sampler """basic""" +367 74 evaluator """rankbased""" +367 75 dataset """kinships""" +367 75 model """kg2e""" +367 75 loss """bceaftersigmoid""" +367 75 regularizer """no""" +367 75 optimizer """adadelta""" +367 75 training_loop """owa""" +367 75 negative_sampler """basic""" +367 75 evaluator """rankbased""" +367 76 dataset """kinships""" +367 76 model """kg2e""" +367 76 loss """bceaftersigmoid""" +367 76 regularizer """no""" +367 76 optimizer """adadelta""" +367 76 training_loop """owa""" +367 76 negative_sampler """basic""" +367 76 evaluator """rankbased""" +367 77 dataset """kinships""" +367 77 model """kg2e""" +367 77 loss """bceaftersigmoid""" +367 77 regularizer """no""" +367 77 optimizer """adadelta""" +367 77 training_loop """owa""" +367 77 negative_sampler """basic""" +367 77 evaluator """rankbased""" +367 78 dataset """kinships""" +367 78 model """kg2e""" +367 78 loss """bceaftersigmoid""" +367 78 regularizer """no""" +367 78 optimizer """adadelta""" +367 78 training_loop """owa""" +367 78 negative_sampler """basic""" +367 78 evaluator """rankbased""" +367 79 dataset """kinships""" +367 79 model """kg2e""" +367 79 loss """bceaftersigmoid""" +367 79 regularizer """no""" +367 79 optimizer """adadelta""" +367 79 training_loop """owa""" +367 79 negative_sampler """basic""" +367 79 evaluator """rankbased""" +367 80 dataset """kinships""" +367 80 model """kg2e""" +367 80 loss """bceaftersigmoid""" +367 80 regularizer """no""" +367 80 optimizer """adadelta""" +367 80 training_loop """owa""" +367 80 negative_sampler """basic""" +367 80 evaluator """rankbased""" +367 81 dataset """kinships""" +367 81 model """kg2e""" +367 81 loss """bceaftersigmoid""" +367 81 regularizer """no""" +367 81 optimizer """adadelta""" +367 81 training_loop """owa""" +367 81 negative_sampler """basic""" +367 81 evaluator """rankbased""" +367 82 dataset """kinships""" +367 82 model """kg2e""" +367 82 loss """bceaftersigmoid""" +367 82 regularizer """no""" +367 82 optimizer """adadelta""" +367 82 training_loop """owa""" +367 82 negative_sampler """basic""" +367 82 evaluator """rankbased""" +367 83 dataset """kinships""" +367 83 model """kg2e""" +367 83 loss """bceaftersigmoid""" +367 83 regularizer """no""" +367 83 optimizer """adadelta""" +367 83 training_loop """owa""" +367 83 negative_sampler """basic""" +367 83 evaluator """rankbased""" +367 84 dataset """kinships""" +367 84 model """kg2e""" +367 84 loss """bceaftersigmoid""" +367 84 regularizer """no""" +367 84 optimizer """adadelta""" +367 84 training_loop """owa""" +367 84 negative_sampler """basic""" +367 84 evaluator """rankbased""" +367 85 dataset """kinships""" +367 85 model """kg2e""" +367 85 loss """bceaftersigmoid""" +367 85 regularizer """no""" +367 85 optimizer """adadelta""" +367 85 training_loop """owa""" +367 85 negative_sampler """basic""" +367 85 evaluator """rankbased""" +367 86 dataset """kinships""" +367 86 model """kg2e""" +367 86 loss """bceaftersigmoid""" +367 86 regularizer """no""" +367 86 optimizer """adadelta""" +367 86 training_loop """owa""" +367 86 negative_sampler """basic""" +367 86 evaluator """rankbased""" +367 87 dataset """kinships""" +367 87 model """kg2e""" +367 87 loss """bceaftersigmoid""" +367 87 regularizer """no""" +367 87 optimizer """adadelta""" +367 87 training_loop """owa""" +367 87 negative_sampler """basic""" +367 87 evaluator """rankbased""" +367 88 dataset """kinships""" +367 88 model """kg2e""" +367 88 loss """bceaftersigmoid""" +367 88 regularizer """no""" +367 88 optimizer """adadelta""" +367 88 training_loop """owa""" +367 88 negative_sampler """basic""" +367 88 evaluator """rankbased""" +367 89 dataset """kinships""" +367 89 model """kg2e""" +367 89 loss """bceaftersigmoid""" +367 89 regularizer """no""" +367 89 optimizer """adadelta""" +367 89 training_loop """owa""" +367 89 negative_sampler """basic""" +367 89 evaluator """rankbased""" +367 90 dataset """kinships""" +367 90 model """kg2e""" +367 90 loss """bceaftersigmoid""" +367 90 regularizer """no""" +367 90 optimizer """adadelta""" +367 90 training_loop """owa""" +367 90 negative_sampler """basic""" +367 90 evaluator """rankbased""" +367 91 dataset """kinships""" +367 91 model """kg2e""" +367 91 loss """bceaftersigmoid""" +367 91 regularizer """no""" +367 91 optimizer """adadelta""" +367 91 training_loop """owa""" +367 91 negative_sampler """basic""" +367 91 evaluator """rankbased""" +367 92 dataset """kinships""" +367 92 model """kg2e""" +367 92 loss """bceaftersigmoid""" +367 92 regularizer """no""" +367 92 optimizer """adadelta""" +367 92 training_loop """owa""" +367 92 negative_sampler """basic""" +367 92 evaluator """rankbased""" +367 93 dataset """kinships""" +367 93 model """kg2e""" +367 93 loss """bceaftersigmoid""" +367 93 regularizer """no""" +367 93 optimizer """adadelta""" +367 93 training_loop """owa""" +367 93 negative_sampler """basic""" +367 93 evaluator """rankbased""" +367 94 dataset """kinships""" +367 94 model """kg2e""" +367 94 loss """bceaftersigmoid""" +367 94 regularizer """no""" +367 94 optimizer """adadelta""" +367 94 training_loop """owa""" +367 94 negative_sampler """basic""" +367 94 evaluator """rankbased""" +367 95 dataset """kinships""" +367 95 model """kg2e""" +367 95 loss """bceaftersigmoid""" +367 95 regularizer """no""" +367 95 optimizer """adadelta""" +367 95 training_loop """owa""" +367 95 negative_sampler """basic""" +367 95 evaluator """rankbased""" +367 96 dataset """kinships""" +367 96 model """kg2e""" +367 96 loss """bceaftersigmoid""" +367 96 regularizer """no""" +367 96 optimizer """adadelta""" +367 96 training_loop """owa""" +367 96 negative_sampler """basic""" +367 96 evaluator """rankbased""" +367 97 dataset """kinships""" +367 97 model """kg2e""" +367 97 loss """bceaftersigmoid""" +367 97 regularizer """no""" +367 97 optimizer """adadelta""" +367 97 training_loop """owa""" +367 97 negative_sampler """basic""" +367 97 evaluator """rankbased""" +367 98 dataset """kinships""" +367 98 model """kg2e""" +367 98 loss """bceaftersigmoid""" +367 98 regularizer """no""" +367 98 optimizer """adadelta""" +367 98 training_loop """owa""" +367 98 negative_sampler """basic""" +367 98 evaluator """rankbased""" +367 99 dataset """kinships""" +367 99 model """kg2e""" +367 99 loss """bceaftersigmoid""" +367 99 regularizer """no""" +367 99 optimizer """adadelta""" +367 99 training_loop """owa""" +367 99 negative_sampler """basic""" +367 99 evaluator """rankbased""" +367 100 dataset """kinships""" +367 100 model """kg2e""" +367 100 loss """bceaftersigmoid""" +367 100 regularizer """no""" +367 100 optimizer """adadelta""" +367 100 training_loop """owa""" +367 100 negative_sampler """basic""" +367 100 evaluator """rankbased""" +368 1 model.embedding_dim 0.0 +368 1 model.c_min 0.01671379456311593 +368 1 model.c_max 9.404359720091083 +368 1 negative_sampler.num_negs_per_pos 15.0 +368 1 training.batch_size 1.0 +368 2 model.embedding_dim 2.0 +368 2 model.c_min 0.012455507671327665 +368 2 model.c_max 2.3268783122321053 +368 2 negative_sampler.num_negs_per_pos 3.0 +368 2 training.batch_size 0.0 +368 3 model.embedding_dim 0.0 +368 3 model.c_min 0.05472158143663378 +368 3 model.c_max 5.117906927566025 +368 3 negative_sampler.num_negs_per_pos 34.0 +368 3 training.batch_size 1.0 +368 4 model.embedding_dim 2.0 +368 4 model.c_min 0.0404040894450288 +368 4 model.c_max 8.86720068931466 +368 4 negative_sampler.num_negs_per_pos 54.0 +368 4 training.batch_size 0.0 +368 5 model.embedding_dim 1.0 +368 5 model.c_min 0.05375704350882583 +368 5 model.c_max 2.727087034683627 +368 5 negative_sampler.num_negs_per_pos 54.0 +368 5 training.batch_size 1.0 +368 6 model.embedding_dim 0.0 +368 6 model.c_min 0.03928959248842312 +368 6 model.c_max 5.756154857005871 +368 6 negative_sampler.num_negs_per_pos 19.0 +368 6 training.batch_size 1.0 +368 7 model.embedding_dim 1.0 +368 7 model.c_min 0.05268641328242427 +368 7 model.c_max 5.736020972780226 +368 7 negative_sampler.num_negs_per_pos 36.0 +368 7 training.batch_size 0.0 +368 8 model.embedding_dim 1.0 +368 8 model.c_min 0.014111352813150358 +368 8 model.c_max 8.529981260469363 +368 8 negative_sampler.num_negs_per_pos 58.0 +368 8 training.batch_size 0.0 +368 9 model.embedding_dim 0.0 +368 9 model.c_min 0.01544509232380096 +368 9 model.c_max 2.6174499956784367 +368 9 negative_sampler.num_negs_per_pos 3.0 +368 9 training.batch_size 2.0 +368 10 model.embedding_dim 1.0 +368 10 model.c_min 0.04773743190188024 +368 10 model.c_max 2.775153031382015 +368 10 negative_sampler.num_negs_per_pos 79.0 +368 10 training.batch_size 0.0 +368 11 model.embedding_dim 1.0 +368 11 model.c_min 0.03201031680884863 +368 11 model.c_max 5.467500857404422 +368 11 negative_sampler.num_negs_per_pos 89.0 +368 11 training.batch_size 1.0 +368 12 model.embedding_dim 0.0 +368 12 model.c_min 0.011296078499826005 +368 12 model.c_max 7.417049760342908 +368 12 negative_sampler.num_negs_per_pos 90.0 +368 12 training.batch_size 0.0 +368 13 model.embedding_dim 2.0 +368 13 model.c_min 0.020628227358364417 +368 13 model.c_max 3.2763283230777507 +368 13 negative_sampler.num_negs_per_pos 71.0 +368 13 training.batch_size 0.0 +368 14 model.embedding_dim 0.0 +368 14 model.c_min 0.03261338016751637 +368 14 model.c_max 2.9759150954332103 +368 14 negative_sampler.num_negs_per_pos 33.0 +368 14 training.batch_size 0.0 +368 15 model.embedding_dim 0.0 +368 15 model.c_min 0.07351586688507776 +368 15 model.c_max 4.578713650264467 +368 15 negative_sampler.num_negs_per_pos 56.0 +368 15 training.batch_size 2.0 +368 16 model.embedding_dim 0.0 +368 16 model.c_min 0.03855701550680584 +368 16 model.c_max 5.317459985501745 +368 16 negative_sampler.num_negs_per_pos 87.0 +368 16 training.batch_size 1.0 +368 17 model.embedding_dim 0.0 +368 17 model.c_min 0.06874908208340506 +368 17 model.c_max 7.681400589921103 +368 17 negative_sampler.num_negs_per_pos 72.0 +368 17 training.batch_size 1.0 +368 18 model.embedding_dim 1.0 +368 18 model.c_min 0.0316412436133564 +368 18 model.c_max 6.880971300186448 +368 18 negative_sampler.num_negs_per_pos 68.0 +368 18 training.batch_size 2.0 +368 19 model.embedding_dim 0.0 +368 19 model.c_min 0.026656388669428037 +368 19 model.c_max 9.817274968157381 +368 19 negative_sampler.num_negs_per_pos 78.0 +368 19 training.batch_size 0.0 +368 20 model.embedding_dim 1.0 +368 20 model.c_min 0.06472911022919985 +368 20 model.c_max 1.296127861589399 +368 20 negative_sampler.num_negs_per_pos 52.0 +368 20 training.batch_size 2.0 +368 21 model.embedding_dim 2.0 +368 21 model.c_min 0.06797544643286155 +368 21 model.c_max 9.020307291682888 +368 21 negative_sampler.num_negs_per_pos 51.0 +368 21 training.batch_size 2.0 +368 22 model.embedding_dim 0.0 +368 22 model.c_min 0.034230363183811896 +368 22 model.c_max 4.472500681765798 +368 22 negative_sampler.num_negs_per_pos 9.0 +368 22 training.batch_size 0.0 +368 23 model.embedding_dim 1.0 +368 23 model.c_min 0.09957502882478139 +368 23 model.c_max 2.4881804767565403 +368 23 negative_sampler.num_negs_per_pos 11.0 +368 23 training.batch_size 0.0 +368 24 model.embedding_dim 2.0 +368 24 model.c_min 0.0948525057164213 +368 24 model.c_max 5.415332745236077 +368 24 negative_sampler.num_negs_per_pos 72.0 +368 24 training.batch_size 0.0 +368 25 model.embedding_dim 2.0 +368 25 model.c_min 0.051386035419817754 +368 25 model.c_max 3.349641672338187 +368 25 negative_sampler.num_negs_per_pos 52.0 +368 25 training.batch_size 2.0 +368 26 model.embedding_dim 0.0 +368 26 model.c_min 0.02930018472871608 +368 26 model.c_max 5.159140892627244 +368 26 negative_sampler.num_negs_per_pos 22.0 +368 26 training.batch_size 0.0 +368 27 model.embedding_dim 0.0 +368 27 model.c_min 0.04202014908939362 +368 27 model.c_max 9.360132257233806 +368 27 negative_sampler.num_negs_per_pos 56.0 +368 27 training.batch_size 0.0 +368 28 model.embedding_dim 1.0 +368 28 model.c_min 0.02327894956920069 +368 28 model.c_max 3.3981354950474643 +368 28 negative_sampler.num_negs_per_pos 22.0 +368 28 training.batch_size 0.0 +368 29 model.embedding_dim 0.0 +368 29 model.c_min 0.014043099952496526 +368 29 model.c_max 3.191848869092273 +368 29 negative_sampler.num_negs_per_pos 0.0 +368 29 training.batch_size 2.0 +368 30 model.embedding_dim 1.0 +368 30 model.c_min 0.016436798430194733 +368 30 model.c_max 2.7309268770343578 +368 30 negative_sampler.num_negs_per_pos 7.0 +368 30 training.batch_size 2.0 +368 31 model.embedding_dim 1.0 +368 31 model.c_min 0.018365295226380755 +368 31 model.c_max 2.5758698081461104 +368 31 negative_sampler.num_negs_per_pos 66.0 +368 31 training.batch_size 1.0 +368 32 model.embedding_dim 2.0 +368 32 model.c_min 0.06002275086064768 +368 32 model.c_max 9.724621666297613 +368 32 negative_sampler.num_negs_per_pos 3.0 +368 32 training.batch_size 0.0 +368 33 model.embedding_dim 1.0 +368 33 model.c_min 0.0266333170627113 +368 33 model.c_max 6.101282353312553 +368 33 negative_sampler.num_negs_per_pos 52.0 +368 33 training.batch_size 0.0 +368 34 model.embedding_dim 2.0 +368 34 model.c_min 0.07922515466859895 +368 34 model.c_max 2.8482485989492305 +368 34 negative_sampler.num_negs_per_pos 94.0 +368 34 training.batch_size 1.0 +368 35 model.embedding_dim 2.0 +368 35 model.c_min 0.06378831719086929 +368 35 model.c_max 8.92568139699184 +368 35 negative_sampler.num_negs_per_pos 62.0 +368 35 training.batch_size 1.0 +368 36 model.embedding_dim 1.0 +368 36 model.c_min 0.024079405917410115 +368 36 model.c_max 2.9106624083312145 +368 36 negative_sampler.num_negs_per_pos 90.0 +368 36 training.batch_size 0.0 +368 37 model.embedding_dim 0.0 +368 37 model.c_min 0.015908307532549956 +368 37 model.c_max 2.823498129847527 +368 37 negative_sampler.num_negs_per_pos 38.0 +368 37 training.batch_size 0.0 +368 38 model.embedding_dim 2.0 +368 38 model.c_min 0.014587676943268865 +368 38 model.c_max 5.925605250795148 +368 38 negative_sampler.num_negs_per_pos 54.0 +368 38 training.batch_size 2.0 +368 39 model.embedding_dim 1.0 +368 39 model.c_min 0.05342029819173458 +368 39 model.c_max 8.016846480541247 +368 39 negative_sampler.num_negs_per_pos 37.0 +368 39 training.batch_size 0.0 +368 40 model.embedding_dim 0.0 +368 40 model.c_min 0.03826321872321687 +368 40 model.c_max 7.049060476352239 +368 40 negative_sampler.num_negs_per_pos 82.0 +368 40 training.batch_size 0.0 +368 41 model.embedding_dim 0.0 +368 41 model.c_min 0.03315017681346301 +368 41 model.c_max 4.890673567013791 +368 41 negative_sampler.num_negs_per_pos 88.0 +368 41 training.batch_size 2.0 +368 42 model.embedding_dim 2.0 +368 42 model.c_min 0.03166014960314389 +368 42 model.c_max 8.684731844218863 +368 42 negative_sampler.num_negs_per_pos 88.0 +368 42 training.batch_size 2.0 +368 43 model.embedding_dim 1.0 +368 43 model.c_min 0.024902792483348653 +368 43 model.c_max 5.26365736983341 +368 43 negative_sampler.num_negs_per_pos 49.0 +368 43 training.batch_size 1.0 +368 44 model.embedding_dim 0.0 +368 44 model.c_min 0.019004406875203717 +368 44 model.c_max 1.9655637412083258 +368 44 negative_sampler.num_negs_per_pos 87.0 +368 44 training.batch_size 2.0 +368 45 model.embedding_dim 0.0 +368 45 model.c_min 0.029491216343386695 +368 45 model.c_max 6.576128088445488 +368 45 negative_sampler.num_negs_per_pos 26.0 +368 45 training.batch_size 2.0 +368 46 model.embedding_dim 1.0 +368 46 model.c_min 0.022794558088887234 +368 46 model.c_max 9.820068459867493 +368 46 negative_sampler.num_negs_per_pos 94.0 +368 46 training.batch_size 2.0 +368 47 model.embedding_dim 0.0 +368 47 model.c_min 0.07070139875211319 +368 47 model.c_max 2.543683546823619 +368 47 negative_sampler.num_negs_per_pos 57.0 +368 47 training.batch_size 2.0 +368 48 model.embedding_dim 0.0 +368 48 model.c_min 0.09814785194581073 +368 48 model.c_max 3.777810357887501 +368 48 negative_sampler.num_negs_per_pos 70.0 +368 48 training.batch_size 1.0 +368 49 model.embedding_dim 1.0 +368 49 model.c_min 0.08916730305138473 +368 49 model.c_max 5.663865121775852 +368 49 negative_sampler.num_negs_per_pos 15.0 +368 49 training.batch_size 0.0 +368 50 model.embedding_dim 0.0 +368 50 model.c_min 0.04102823039333081 +368 50 model.c_max 3.671800921924784 +368 50 negative_sampler.num_negs_per_pos 60.0 +368 50 training.batch_size 1.0 +368 51 model.embedding_dim 2.0 +368 51 model.c_min 0.01046686480776234 +368 51 model.c_max 9.906176862263896 +368 51 negative_sampler.num_negs_per_pos 39.0 +368 51 training.batch_size 2.0 +368 52 model.embedding_dim 0.0 +368 52 model.c_min 0.05317843184721687 +368 52 model.c_max 4.138793207054909 +368 52 negative_sampler.num_negs_per_pos 54.0 +368 52 training.batch_size 2.0 +368 53 model.embedding_dim 2.0 +368 53 model.c_min 0.027206853390626777 +368 53 model.c_max 8.176174716074321 +368 53 negative_sampler.num_negs_per_pos 20.0 +368 53 training.batch_size 0.0 +368 54 model.embedding_dim 0.0 +368 54 model.c_min 0.07574348324535414 +368 54 model.c_max 1.2497282565885013 +368 54 negative_sampler.num_negs_per_pos 87.0 +368 54 training.batch_size 1.0 +368 55 model.embedding_dim 0.0 +368 55 model.c_min 0.03381293441820573 +368 55 model.c_max 5.575339388516622 +368 55 negative_sampler.num_negs_per_pos 55.0 +368 55 training.batch_size 0.0 +368 56 model.embedding_dim 1.0 +368 56 model.c_min 0.055033115288011254 +368 56 model.c_max 4.05218028451918 +368 56 negative_sampler.num_negs_per_pos 97.0 +368 56 training.batch_size 2.0 +368 57 model.embedding_dim 0.0 +368 57 model.c_min 0.01654752224064683 +368 57 model.c_max 4.734114037588826 +368 57 negative_sampler.num_negs_per_pos 28.0 +368 57 training.batch_size 1.0 +368 58 model.embedding_dim 1.0 +368 58 model.c_min 0.028396317353761722 +368 58 model.c_max 4.429958867563487 +368 58 negative_sampler.num_negs_per_pos 31.0 +368 58 training.batch_size 0.0 +368 59 model.embedding_dim 0.0 +368 59 model.c_min 0.06972991244049019 +368 59 model.c_max 8.959714976396793 +368 59 negative_sampler.num_negs_per_pos 24.0 +368 59 training.batch_size 1.0 +368 60 model.embedding_dim 1.0 +368 60 model.c_min 0.019195017227400298 +368 60 model.c_max 1.0553925439301237 +368 60 negative_sampler.num_negs_per_pos 51.0 +368 60 training.batch_size 0.0 +368 61 model.embedding_dim 1.0 +368 61 model.c_min 0.026383640544620005 +368 61 model.c_max 3.8117869577439842 +368 61 negative_sampler.num_negs_per_pos 61.0 +368 61 training.batch_size 0.0 +368 62 model.embedding_dim 1.0 +368 62 model.c_min 0.04199804907701475 +368 62 model.c_max 8.83775428419725 +368 62 negative_sampler.num_negs_per_pos 69.0 +368 62 training.batch_size 0.0 +368 63 model.embedding_dim 1.0 +368 63 model.c_min 0.029327352782934246 +368 63 model.c_max 9.966287165789456 +368 63 negative_sampler.num_negs_per_pos 64.0 +368 63 training.batch_size 0.0 +368 64 model.embedding_dim 0.0 +368 64 model.c_min 0.01883911819225633 +368 64 model.c_max 6.015476469209186 +368 64 negative_sampler.num_negs_per_pos 7.0 +368 64 training.batch_size 1.0 +368 65 model.embedding_dim 2.0 +368 65 model.c_min 0.013518805233074202 +368 65 model.c_max 2.7646165047594478 +368 65 negative_sampler.num_negs_per_pos 9.0 +368 65 training.batch_size 0.0 +368 66 model.embedding_dim 2.0 +368 66 model.c_min 0.018189513858425214 +368 66 model.c_max 5.734793141401041 +368 66 negative_sampler.num_negs_per_pos 29.0 +368 66 training.batch_size 1.0 +368 67 model.embedding_dim 2.0 +368 67 model.c_min 0.08115905917412376 +368 67 model.c_max 8.562528542473892 +368 67 negative_sampler.num_negs_per_pos 80.0 +368 67 training.batch_size 1.0 +368 68 model.embedding_dim 2.0 +368 68 model.c_min 0.0590559953719188 +368 68 model.c_max 4.68911639724049 +368 68 negative_sampler.num_negs_per_pos 95.0 +368 68 training.batch_size 2.0 +368 69 model.embedding_dim 2.0 +368 69 model.c_min 0.03343778470748314 +368 69 model.c_max 9.530516315495273 +368 69 negative_sampler.num_negs_per_pos 97.0 +368 69 training.batch_size 2.0 +368 70 model.embedding_dim 2.0 +368 70 model.c_min 0.04105104075124047 +368 70 model.c_max 8.56403073938408 +368 70 negative_sampler.num_negs_per_pos 58.0 +368 70 training.batch_size 0.0 +368 71 model.embedding_dim 2.0 +368 71 model.c_min 0.029903720285748364 +368 71 model.c_max 6.547548741632526 +368 71 negative_sampler.num_negs_per_pos 74.0 +368 71 training.batch_size 0.0 +368 72 model.embedding_dim 2.0 +368 72 model.c_min 0.01480201310479726 +368 72 model.c_max 9.128042005060347 +368 72 negative_sampler.num_negs_per_pos 84.0 +368 72 training.batch_size 0.0 +368 73 model.embedding_dim 0.0 +368 73 model.c_min 0.06972066074306306 +368 73 model.c_max 2.5007768185974775 +368 73 negative_sampler.num_negs_per_pos 90.0 +368 73 training.batch_size 1.0 +368 74 model.embedding_dim 0.0 +368 74 model.c_min 0.036610546178263564 +368 74 model.c_max 9.388133315138973 +368 74 negative_sampler.num_negs_per_pos 8.0 +368 74 training.batch_size 0.0 +368 75 model.embedding_dim 1.0 +368 75 model.c_min 0.069822003065331 +368 75 model.c_max 1.4701515025333514 +368 75 negative_sampler.num_negs_per_pos 67.0 +368 75 training.batch_size 2.0 +368 76 model.embedding_dim 0.0 +368 76 model.c_min 0.017767340748845457 +368 76 model.c_max 4.076336673141052 +368 76 negative_sampler.num_negs_per_pos 98.0 +368 76 training.batch_size 0.0 +368 77 model.embedding_dim 0.0 +368 77 model.c_min 0.020332765528760594 +368 77 model.c_max 3.829063749617321 +368 77 negative_sampler.num_negs_per_pos 0.0 +368 77 training.batch_size 0.0 +368 78 model.embedding_dim 0.0 +368 78 model.c_min 0.05610672402179117 +368 78 model.c_max 8.902021533467266 +368 78 negative_sampler.num_negs_per_pos 94.0 +368 78 training.batch_size 1.0 +368 79 model.embedding_dim 0.0 +368 79 model.c_min 0.014078969362532093 +368 79 model.c_max 6.375748898173601 +368 79 negative_sampler.num_negs_per_pos 15.0 +368 79 training.batch_size 2.0 +368 80 model.embedding_dim 2.0 +368 80 model.c_min 0.06093899458405102 +368 80 model.c_max 3.7021333151066314 +368 80 negative_sampler.num_negs_per_pos 76.0 +368 80 training.batch_size 1.0 +368 81 model.embedding_dim 2.0 +368 81 model.c_min 0.05998012148546227 +368 81 model.c_max 2.9346044495675945 +368 81 negative_sampler.num_negs_per_pos 59.0 +368 81 training.batch_size 2.0 +368 82 model.embedding_dim 1.0 +368 82 model.c_min 0.04865225181589539 +368 82 model.c_max 1.5143388987120745 +368 82 negative_sampler.num_negs_per_pos 66.0 +368 82 training.batch_size 0.0 +368 83 model.embedding_dim 2.0 +368 83 model.c_min 0.03704102588562664 +368 83 model.c_max 7.554399870042505 +368 83 negative_sampler.num_negs_per_pos 41.0 +368 83 training.batch_size 1.0 +368 84 model.embedding_dim 1.0 +368 84 model.c_min 0.012751698684768557 +368 84 model.c_max 4.329284232419757 +368 84 negative_sampler.num_negs_per_pos 66.0 +368 84 training.batch_size 2.0 +368 85 model.embedding_dim 0.0 +368 85 model.c_min 0.02239816912175184 +368 85 model.c_max 9.611200174635472 +368 85 negative_sampler.num_negs_per_pos 34.0 +368 85 training.batch_size 2.0 +368 86 model.embedding_dim 1.0 +368 86 model.c_min 0.027424431675995606 +368 86 model.c_max 2.219403029493739 +368 86 negative_sampler.num_negs_per_pos 94.0 +368 86 training.batch_size 1.0 +368 87 model.embedding_dim 1.0 +368 87 model.c_min 0.01804242334898431 +368 87 model.c_max 2.4609482739079134 +368 87 negative_sampler.num_negs_per_pos 19.0 +368 87 training.batch_size 1.0 +368 88 model.embedding_dim 1.0 +368 88 model.c_min 0.0274159175821753 +368 88 model.c_max 5.934973637585847 +368 88 negative_sampler.num_negs_per_pos 53.0 +368 88 training.batch_size 2.0 +368 89 model.embedding_dim 0.0 +368 89 model.c_min 0.05908455416157679 +368 89 model.c_max 2.837857062739573 +368 89 negative_sampler.num_negs_per_pos 74.0 +368 89 training.batch_size 1.0 +368 90 model.embedding_dim 0.0 +368 90 model.c_min 0.03210081870012136 +368 90 model.c_max 3.538824601187862 +368 90 negative_sampler.num_negs_per_pos 65.0 +368 90 training.batch_size 2.0 +368 91 model.embedding_dim 0.0 +368 91 model.c_min 0.06485637571767117 +368 91 model.c_max 2.6557240939788387 +368 91 negative_sampler.num_negs_per_pos 30.0 +368 91 training.batch_size 1.0 +368 92 model.embedding_dim 2.0 +368 92 model.c_min 0.012272076308247056 +368 92 model.c_max 3.4483435332987282 +368 92 negative_sampler.num_negs_per_pos 87.0 +368 92 training.batch_size 0.0 +368 93 model.embedding_dim 1.0 +368 93 model.c_min 0.010818653688232588 +368 93 model.c_max 8.515494644099126 +368 93 negative_sampler.num_negs_per_pos 1.0 +368 93 training.batch_size 2.0 +368 94 model.embedding_dim 2.0 +368 94 model.c_min 0.05941680869123036 +368 94 model.c_max 3.275475215572294 +368 94 negative_sampler.num_negs_per_pos 46.0 +368 94 training.batch_size 0.0 +368 95 model.embedding_dim 0.0 +368 95 model.c_min 0.030184472976875754 +368 95 model.c_max 8.99101890857016 +368 95 negative_sampler.num_negs_per_pos 91.0 +368 95 training.batch_size 0.0 +368 96 model.embedding_dim 0.0 +368 96 model.c_min 0.017251098231460163 +368 96 model.c_max 7.4545057806317745 +368 96 negative_sampler.num_negs_per_pos 65.0 +368 96 training.batch_size 0.0 +368 97 model.embedding_dim 2.0 +368 97 model.c_min 0.0930380036538017 +368 97 model.c_max 8.138189176884346 +368 97 negative_sampler.num_negs_per_pos 60.0 +368 97 training.batch_size 1.0 +368 98 model.embedding_dim 1.0 +368 98 model.c_min 0.07180185347285152 +368 98 model.c_max 9.027958224574355 +368 98 negative_sampler.num_negs_per_pos 79.0 +368 98 training.batch_size 2.0 +368 99 model.embedding_dim 1.0 +368 99 model.c_min 0.09695953568337284 +368 99 model.c_max 7.963441276778694 +368 99 negative_sampler.num_negs_per_pos 92.0 +368 99 training.batch_size 1.0 +368 100 model.embedding_dim 0.0 +368 100 model.c_min 0.09657473806393896 +368 100 model.c_max 7.684620447636925 +368 100 negative_sampler.num_negs_per_pos 80.0 +368 100 training.batch_size 1.0 +368 1 dataset """kinships""" +368 1 model """kg2e""" +368 1 loss """softplus""" +368 1 regularizer """no""" +368 1 optimizer """adadelta""" +368 1 training_loop """owa""" +368 1 negative_sampler """basic""" +368 1 evaluator """rankbased""" +368 2 dataset """kinships""" +368 2 model """kg2e""" +368 2 loss """softplus""" +368 2 regularizer """no""" +368 2 optimizer """adadelta""" +368 2 training_loop """owa""" +368 2 negative_sampler """basic""" +368 2 evaluator """rankbased""" +368 3 dataset """kinships""" +368 3 model """kg2e""" +368 3 loss """softplus""" +368 3 regularizer """no""" +368 3 optimizer """adadelta""" +368 3 training_loop """owa""" +368 3 negative_sampler """basic""" +368 3 evaluator """rankbased""" +368 4 dataset """kinships""" +368 4 model """kg2e""" +368 4 loss """softplus""" +368 4 regularizer """no""" +368 4 optimizer """adadelta""" +368 4 training_loop """owa""" +368 4 negative_sampler """basic""" +368 4 evaluator """rankbased""" +368 5 dataset """kinships""" +368 5 model """kg2e""" +368 5 loss """softplus""" +368 5 regularizer """no""" +368 5 optimizer """adadelta""" +368 5 training_loop """owa""" +368 5 negative_sampler """basic""" +368 5 evaluator """rankbased""" +368 6 dataset """kinships""" +368 6 model """kg2e""" +368 6 loss """softplus""" +368 6 regularizer """no""" +368 6 optimizer """adadelta""" +368 6 training_loop """owa""" +368 6 negative_sampler """basic""" +368 6 evaluator """rankbased""" +368 7 dataset """kinships""" +368 7 model """kg2e""" +368 7 loss """softplus""" +368 7 regularizer """no""" +368 7 optimizer """adadelta""" +368 7 training_loop """owa""" +368 7 negative_sampler """basic""" +368 7 evaluator """rankbased""" +368 8 dataset """kinships""" +368 8 model """kg2e""" +368 8 loss """softplus""" +368 8 regularizer """no""" +368 8 optimizer """adadelta""" +368 8 training_loop """owa""" +368 8 negative_sampler """basic""" +368 8 evaluator """rankbased""" +368 9 dataset """kinships""" +368 9 model """kg2e""" +368 9 loss """softplus""" +368 9 regularizer """no""" +368 9 optimizer """adadelta""" +368 9 training_loop """owa""" +368 9 negative_sampler """basic""" +368 9 evaluator """rankbased""" +368 10 dataset """kinships""" +368 10 model """kg2e""" +368 10 loss """softplus""" +368 10 regularizer """no""" +368 10 optimizer """adadelta""" +368 10 training_loop """owa""" +368 10 negative_sampler """basic""" +368 10 evaluator """rankbased""" +368 11 dataset """kinships""" +368 11 model """kg2e""" +368 11 loss """softplus""" +368 11 regularizer """no""" +368 11 optimizer """adadelta""" +368 11 training_loop """owa""" +368 11 negative_sampler """basic""" +368 11 evaluator """rankbased""" +368 12 dataset """kinships""" +368 12 model """kg2e""" +368 12 loss """softplus""" +368 12 regularizer """no""" +368 12 optimizer """adadelta""" +368 12 training_loop """owa""" +368 12 negative_sampler """basic""" +368 12 evaluator """rankbased""" +368 13 dataset """kinships""" +368 13 model """kg2e""" +368 13 loss """softplus""" +368 13 regularizer """no""" +368 13 optimizer """adadelta""" +368 13 training_loop """owa""" +368 13 negative_sampler """basic""" +368 13 evaluator """rankbased""" +368 14 dataset """kinships""" +368 14 model """kg2e""" +368 14 loss """softplus""" +368 14 regularizer """no""" +368 14 optimizer """adadelta""" +368 14 training_loop """owa""" +368 14 negative_sampler """basic""" +368 14 evaluator """rankbased""" +368 15 dataset """kinships""" +368 15 model """kg2e""" +368 15 loss """softplus""" +368 15 regularizer """no""" +368 15 optimizer """adadelta""" +368 15 training_loop """owa""" +368 15 negative_sampler """basic""" +368 15 evaluator """rankbased""" +368 16 dataset """kinships""" +368 16 model """kg2e""" +368 16 loss """softplus""" +368 16 regularizer """no""" +368 16 optimizer """adadelta""" +368 16 training_loop """owa""" +368 16 negative_sampler """basic""" +368 16 evaluator """rankbased""" +368 17 dataset """kinships""" +368 17 model """kg2e""" +368 17 loss """softplus""" +368 17 regularizer """no""" +368 17 optimizer """adadelta""" +368 17 training_loop """owa""" +368 17 negative_sampler """basic""" +368 17 evaluator """rankbased""" +368 18 dataset """kinships""" +368 18 model """kg2e""" +368 18 loss """softplus""" +368 18 regularizer """no""" +368 18 optimizer """adadelta""" +368 18 training_loop """owa""" +368 18 negative_sampler """basic""" +368 18 evaluator """rankbased""" +368 19 dataset """kinships""" +368 19 model """kg2e""" +368 19 loss """softplus""" +368 19 regularizer """no""" +368 19 optimizer """adadelta""" +368 19 training_loop """owa""" +368 19 negative_sampler """basic""" +368 19 evaluator """rankbased""" +368 20 dataset """kinships""" +368 20 model """kg2e""" +368 20 loss """softplus""" +368 20 regularizer """no""" +368 20 optimizer """adadelta""" +368 20 training_loop """owa""" +368 20 negative_sampler """basic""" +368 20 evaluator """rankbased""" +368 21 dataset """kinships""" +368 21 model """kg2e""" +368 21 loss """softplus""" +368 21 regularizer """no""" +368 21 optimizer """adadelta""" +368 21 training_loop """owa""" +368 21 negative_sampler """basic""" +368 21 evaluator """rankbased""" +368 22 dataset """kinships""" +368 22 model """kg2e""" +368 22 loss """softplus""" +368 22 regularizer """no""" +368 22 optimizer """adadelta""" +368 22 training_loop """owa""" +368 22 negative_sampler """basic""" +368 22 evaluator """rankbased""" +368 23 dataset """kinships""" +368 23 model """kg2e""" +368 23 loss """softplus""" +368 23 regularizer """no""" +368 23 optimizer """adadelta""" +368 23 training_loop """owa""" +368 23 negative_sampler """basic""" +368 23 evaluator """rankbased""" +368 24 dataset """kinships""" +368 24 model """kg2e""" +368 24 loss """softplus""" +368 24 regularizer """no""" +368 24 optimizer """adadelta""" +368 24 training_loop """owa""" +368 24 negative_sampler """basic""" +368 24 evaluator """rankbased""" +368 25 dataset """kinships""" +368 25 model """kg2e""" +368 25 loss """softplus""" +368 25 regularizer """no""" +368 25 optimizer """adadelta""" +368 25 training_loop """owa""" +368 25 negative_sampler """basic""" +368 25 evaluator """rankbased""" +368 26 dataset """kinships""" +368 26 model """kg2e""" +368 26 loss """softplus""" +368 26 regularizer """no""" +368 26 optimizer """adadelta""" +368 26 training_loop """owa""" +368 26 negative_sampler """basic""" +368 26 evaluator """rankbased""" +368 27 dataset """kinships""" +368 27 model """kg2e""" +368 27 loss """softplus""" +368 27 regularizer """no""" +368 27 optimizer """adadelta""" +368 27 training_loop """owa""" +368 27 negative_sampler """basic""" +368 27 evaluator """rankbased""" +368 28 dataset """kinships""" +368 28 model """kg2e""" +368 28 loss """softplus""" +368 28 regularizer """no""" +368 28 optimizer """adadelta""" +368 28 training_loop """owa""" +368 28 negative_sampler """basic""" +368 28 evaluator """rankbased""" +368 29 dataset """kinships""" +368 29 model """kg2e""" +368 29 loss """softplus""" +368 29 regularizer """no""" +368 29 optimizer """adadelta""" +368 29 training_loop """owa""" +368 29 negative_sampler """basic""" +368 29 evaluator """rankbased""" +368 30 dataset """kinships""" +368 30 model """kg2e""" +368 30 loss """softplus""" +368 30 regularizer """no""" +368 30 optimizer """adadelta""" +368 30 training_loop """owa""" +368 30 negative_sampler """basic""" +368 30 evaluator """rankbased""" +368 31 dataset """kinships""" +368 31 model """kg2e""" +368 31 loss """softplus""" +368 31 regularizer """no""" +368 31 optimizer """adadelta""" +368 31 training_loop """owa""" +368 31 negative_sampler """basic""" +368 31 evaluator """rankbased""" +368 32 dataset """kinships""" +368 32 model """kg2e""" +368 32 loss """softplus""" +368 32 regularizer """no""" +368 32 optimizer """adadelta""" +368 32 training_loop """owa""" +368 32 negative_sampler """basic""" +368 32 evaluator """rankbased""" +368 33 dataset """kinships""" +368 33 model """kg2e""" +368 33 loss """softplus""" +368 33 regularizer """no""" +368 33 optimizer """adadelta""" +368 33 training_loop """owa""" +368 33 negative_sampler """basic""" +368 33 evaluator """rankbased""" +368 34 dataset """kinships""" +368 34 model """kg2e""" +368 34 loss """softplus""" +368 34 regularizer """no""" +368 34 optimizer """adadelta""" +368 34 training_loop """owa""" +368 34 negative_sampler """basic""" +368 34 evaluator """rankbased""" +368 35 dataset """kinships""" +368 35 model """kg2e""" +368 35 loss """softplus""" +368 35 regularizer """no""" +368 35 optimizer """adadelta""" +368 35 training_loop """owa""" +368 35 negative_sampler """basic""" +368 35 evaluator """rankbased""" +368 36 dataset """kinships""" +368 36 model """kg2e""" +368 36 loss """softplus""" +368 36 regularizer """no""" +368 36 optimizer """adadelta""" +368 36 training_loop """owa""" +368 36 negative_sampler """basic""" +368 36 evaluator """rankbased""" +368 37 dataset """kinships""" +368 37 model """kg2e""" +368 37 loss """softplus""" +368 37 regularizer """no""" +368 37 optimizer """adadelta""" +368 37 training_loop """owa""" +368 37 negative_sampler """basic""" +368 37 evaluator """rankbased""" +368 38 dataset """kinships""" +368 38 model """kg2e""" +368 38 loss """softplus""" +368 38 regularizer """no""" +368 38 optimizer """adadelta""" +368 38 training_loop """owa""" +368 38 negative_sampler """basic""" +368 38 evaluator """rankbased""" +368 39 dataset """kinships""" +368 39 model """kg2e""" +368 39 loss """softplus""" +368 39 regularizer """no""" +368 39 optimizer """adadelta""" +368 39 training_loop """owa""" +368 39 negative_sampler """basic""" +368 39 evaluator """rankbased""" +368 40 dataset """kinships""" +368 40 model """kg2e""" +368 40 loss """softplus""" +368 40 regularizer """no""" +368 40 optimizer """adadelta""" +368 40 training_loop """owa""" +368 40 negative_sampler """basic""" +368 40 evaluator """rankbased""" +368 41 dataset """kinships""" +368 41 model """kg2e""" +368 41 loss """softplus""" +368 41 regularizer """no""" +368 41 optimizer """adadelta""" +368 41 training_loop """owa""" +368 41 negative_sampler """basic""" +368 41 evaluator """rankbased""" +368 42 dataset """kinships""" +368 42 model """kg2e""" +368 42 loss """softplus""" +368 42 regularizer """no""" +368 42 optimizer """adadelta""" +368 42 training_loop """owa""" +368 42 negative_sampler """basic""" +368 42 evaluator """rankbased""" +368 43 dataset """kinships""" +368 43 model """kg2e""" +368 43 loss """softplus""" +368 43 regularizer """no""" +368 43 optimizer """adadelta""" +368 43 training_loop """owa""" +368 43 negative_sampler """basic""" +368 43 evaluator """rankbased""" +368 44 dataset """kinships""" +368 44 model """kg2e""" +368 44 loss """softplus""" +368 44 regularizer """no""" +368 44 optimizer """adadelta""" +368 44 training_loop """owa""" +368 44 negative_sampler """basic""" +368 44 evaluator """rankbased""" +368 45 dataset """kinships""" +368 45 model """kg2e""" +368 45 loss """softplus""" +368 45 regularizer """no""" +368 45 optimizer """adadelta""" +368 45 training_loop """owa""" +368 45 negative_sampler """basic""" +368 45 evaluator """rankbased""" +368 46 dataset """kinships""" +368 46 model """kg2e""" +368 46 loss """softplus""" +368 46 regularizer """no""" +368 46 optimizer """adadelta""" +368 46 training_loop """owa""" +368 46 negative_sampler """basic""" +368 46 evaluator """rankbased""" +368 47 dataset """kinships""" +368 47 model """kg2e""" +368 47 loss """softplus""" +368 47 regularizer """no""" +368 47 optimizer """adadelta""" +368 47 training_loop """owa""" +368 47 negative_sampler """basic""" +368 47 evaluator """rankbased""" +368 48 dataset """kinships""" +368 48 model """kg2e""" +368 48 loss """softplus""" +368 48 regularizer """no""" +368 48 optimizer """adadelta""" +368 48 training_loop """owa""" +368 48 negative_sampler """basic""" +368 48 evaluator """rankbased""" +368 49 dataset """kinships""" +368 49 model """kg2e""" +368 49 loss """softplus""" +368 49 regularizer """no""" +368 49 optimizer """adadelta""" +368 49 training_loop """owa""" +368 49 negative_sampler """basic""" +368 49 evaluator """rankbased""" +368 50 dataset """kinships""" +368 50 model """kg2e""" +368 50 loss """softplus""" +368 50 regularizer """no""" +368 50 optimizer """adadelta""" +368 50 training_loop """owa""" +368 50 negative_sampler """basic""" +368 50 evaluator """rankbased""" +368 51 dataset """kinships""" +368 51 model """kg2e""" +368 51 loss """softplus""" +368 51 regularizer """no""" +368 51 optimizer """adadelta""" +368 51 training_loop """owa""" +368 51 negative_sampler """basic""" +368 51 evaluator """rankbased""" +368 52 dataset """kinships""" +368 52 model """kg2e""" +368 52 loss """softplus""" +368 52 regularizer """no""" +368 52 optimizer """adadelta""" +368 52 training_loop """owa""" +368 52 negative_sampler """basic""" +368 52 evaluator """rankbased""" +368 53 dataset """kinships""" +368 53 model """kg2e""" +368 53 loss """softplus""" +368 53 regularizer """no""" +368 53 optimizer """adadelta""" +368 53 training_loop """owa""" +368 53 negative_sampler """basic""" +368 53 evaluator """rankbased""" +368 54 dataset """kinships""" +368 54 model """kg2e""" +368 54 loss """softplus""" +368 54 regularizer """no""" +368 54 optimizer """adadelta""" +368 54 training_loop """owa""" +368 54 negative_sampler """basic""" +368 54 evaluator """rankbased""" +368 55 dataset """kinships""" +368 55 model """kg2e""" +368 55 loss """softplus""" +368 55 regularizer """no""" +368 55 optimizer """adadelta""" +368 55 training_loop """owa""" +368 55 negative_sampler """basic""" +368 55 evaluator """rankbased""" +368 56 dataset """kinships""" +368 56 model """kg2e""" +368 56 loss """softplus""" +368 56 regularizer """no""" +368 56 optimizer """adadelta""" +368 56 training_loop """owa""" +368 56 negative_sampler """basic""" +368 56 evaluator """rankbased""" +368 57 dataset """kinships""" +368 57 model """kg2e""" +368 57 loss """softplus""" +368 57 regularizer """no""" +368 57 optimizer """adadelta""" +368 57 training_loop """owa""" +368 57 negative_sampler """basic""" +368 57 evaluator """rankbased""" +368 58 dataset """kinships""" +368 58 model """kg2e""" +368 58 loss """softplus""" +368 58 regularizer """no""" +368 58 optimizer """adadelta""" +368 58 training_loop """owa""" +368 58 negative_sampler """basic""" +368 58 evaluator """rankbased""" +368 59 dataset """kinships""" +368 59 model """kg2e""" +368 59 loss """softplus""" +368 59 regularizer """no""" +368 59 optimizer """adadelta""" +368 59 training_loop """owa""" +368 59 negative_sampler """basic""" +368 59 evaluator """rankbased""" +368 60 dataset """kinships""" +368 60 model """kg2e""" +368 60 loss """softplus""" +368 60 regularizer """no""" +368 60 optimizer """adadelta""" +368 60 training_loop """owa""" +368 60 negative_sampler """basic""" +368 60 evaluator """rankbased""" +368 61 dataset """kinships""" +368 61 model """kg2e""" +368 61 loss """softplus""" +368 61 regularizer """no""" +368 61 optimizer """adadelta""" +368 61 training_loop """owa""" +368 61 negative_sampler """basic""" +368 61 evaluator """rankbased""" +368 62 dataset """kinships""" +368 62 model """kg2e""" +368 62 loss """softplus""" +368 62 regularizer """no""" +368 62 optimizer """adadelta""" +368 62 training_loop """owa""" +368 62 negative_sampler """basic""" +368 62 evaluator """rankbased""" +368 63 dataset """kinships""" +368 63 model """kg2e""" +368 63 loss """softplus""" +368 63 regularizer """no""" +368 63 optimizer """adadelta""" +368 63 training_loop """owa""" +368 63 negative_sampler """basic""" +368 63 evaluator """rankbased""" +368 64 dataset """kinships""" +368 64 model """kg2e""" +368 64 loss """softplus""" +368 64 regularizer """no""" +368 64 optimizer """adadelta""" +368 64 training_loop """owa""" +368 64 negative_sampler """basic""" +368 64 evaluator """rankbased""" +368 65 dataset """kinships""" +368 65 model """kg2e""" +368 65 loss """softplus""" +368 65 regularizer """no""" +368 65 optimizer """adadelta""" +368 65 training_loop """owa""" +368 65 negative_sampler """basic""" +368 65 evaluator """rankbased""" +368 66 dataset """kinships""" +368 66 model """kg2e""" +368 66 loss """softplus""" +368 66 regularizer """no""" +368 66 optimizer """adadelta""" +368 66 training_loop """owa""" +368 66 negative_sampler """basic""" +368 66 evaluator """rankbased""" +368 67 dataset """kinships""" +368 67 model """kg2e""" +368 67 loss """softplus""" +368 67 regularizer """no""" +368 67 optimizer """adadelta""" +368 67 training_loop """owa""" +368 67 negative_sampler """basic""" +368 67 evaluator """rankbased""" +368 68 dataset """kinships""" +368 68 model """kg2e""" +368 68 loss """softplus""" +368 68 regularizer """no""" +368 68 optimizer """adadelta""" +368 68 training_loop """owa""" +368 68 negative_sampler """basic""" +368 68 evaluator """rankbased""" +368 69 dataset """kinships""" +368 69 model """kg2e""" +368 69 loss """softplus""" +368 69 regularizer """no""" +368 69 optimizer """adadelta""" +368 69 training_loop """owa""" +368 69 negative_sampler """basic""" +368 69 evaluator """rankbased""" +368 70 dataset """kinships""" +368 70 model """kg2e""" +368 70 loss """softplus""" +368 70 regularizer """no""" +368 70 optimizer """adadelta""" +368 70 training_loop """owa""" +368 70 negative_sampler """basic""" +368 70 evaluator """rankbased""" +368 71 dataset """kinships""" +368 71 model """kg2e""" +368 71 loss """softplus""" +368 71 regularizer """no""" +368 71 optimizer """adadelta""" +368 71 training_loop """owa""" +368 71 negative_sampler """basic""" +368 71 evaluator """rankbased""" +368 72 dataset """kinships""" +368 72 model """kg2e""" +368 72 loss """softplus""" +368 72 regularizer """no""" +368 72 optimizer """adadelta""" +368 72 training_loop """owa""" +368 72 negative_sampler """basic""" +368 72 evaluator """rankbased""" +368 73 dataset """kinships""" +368 73 model """kg2e""" +368 73 loss """softplus""" +368 73 regularizer """no""" +368 73 optimizer """adadelta""" +368 73 training_loop """owa""" +368 73 negative_sampler """basic""" +368 73 evaluator """rankbased""" +368 74 dataset """kinships""" +368 74 model """kg2e""" +368 74 loss """softplus""" +368 74 regularizer """no""" +368 74 optimizer """adadelta""" +368 74 training_loop """owa""" +368 74 negative_sampler """basic""" +368 74 evaluator """rankbased""" +368 75 dataset """kinships""" +368 75 model """kg2e""" +368 75 loss """softplus""" +368 75 regularizer """no""" +368 75 optimizer """adadelta""" +368 75 training_loop """owa""" +368 75 negative_sampler """basic""" +368 75 evaluator """rankbased""" +368 76 dataset """kinships""" +368 76 model """kg2e""" +368 76 loss """softplus""" +368 76 regularizer """no""" +368 76 optimizer """adadelta""" +368 76 training_loop """owa""" +368 76 negative_sampler """basic""" +368 76 evaluator """rankbased""" +368 77 dataset """kinships""" +368 77 model """kg2e""" +368 77 loss """softplus""" +368 77 regularizer """no""" +368 77 optimizer """adadelta""" +368 77 training_loop """owa""" +368 77 negative_sampler """basic""" +368 77 evaluator """rankbased""" +368 78 dataset """kinships""" +368 78 model """kg2e""" +368 78 loss """softplus""" +368 78 regularizer """no""" +368 78 optimizer """adadelta""" +368 78 training_loop """owa""" +368 78 negative_sampler """basic""" +368 78 evaluator """rankbased""" +368 79 dataset """kinships""" +368 79 model """kg2e""" +368 79 loss """softplus""" +368 79 regularizer """no""" +368 79 optimizer """adadelta""" +368 79 training_loop """owa""" +368 79 negative_sampler """basic""" +368 79 evaluator """rankbased""" +368 80 dataset """kinships""" +368 80 model """kg2e""" +368 80 loss """softplus""" +368 80 regularizer """no""" +368 80 optimizer """adadelta""" +368 80 training_loop """owa""" +368 80 negative_sampler """basic""" +368 80 evaluator """rankbased""" +368 81 dataset """kinships""" +368 81 model """kg2e""" +368 81 loss """softplus""" +368 81 regularizer """no""" +368 81 optimizer """adadelta""" +368 81 training_loop """owa""" +368 81 negative_sampler """basic""" +368 81 evaluator """rankbased""" +368 82 dataset """kinships""" +368 82 model """kg2e""" +368 82 loss """softplus""" +368 82 regularizer """no""" +368 82 optimizer """adadelta""" +368 82 training_loop """owa""" +368 82 negative_sampler """basic""" +368 82 evaluator """rankbased""" +368 83 dataset """kinships""" +368 83 model """kg2e""" +368 83 loss """softplus""" +368 83 regularizer """no""" +368 83 optimizer """adadelta""" +368 83 training_loop """owa""" +368 83 negative_sampler """basic""" +368 83 evaluator """rankbased""" +368 84 dataset """kinships""" +368 84 model """kg2e""" +368 84 loss """softplus""" +368 84 regularizer """no""" +368 84 optimizer """adadelta""" +368 84 training_loop """owa""" +368 84 negative_sampler """basic""" +368 84 evaluator """rankbased""" +368 85 dataset """kinships""" +368 85 model """kg2e""" +368 85 loss """softplus""" +368 85 regularizer """no""" +368 85 optimizer """adadelta""" +368 85 training_loop """owa""" +368 85 negative_sampler """basic""" +368 85 evaluator """rankbased""" +368 86 dataset """kinships""" +368 86 model """kg2e""" +368 86 loss """softplus""" +368 86 regularizer """no""" +368 86 optimizer """adadelta""" +368 86 training_loop """owa""" +368 86 negative_sampler """basic""" +368 86 evaluator """rankbased""" +368 87 dataset """kinships""" +368 87 model """kg2e""" +368 87 loss """softplus""" +368 87 regularizer """no""" +368 87 optimizer """adadelta""" +368 87 training_loop """owa""" +368 87 negative_sampler """basic""" +368 87 evaluator """rankbased""" +368 88 dataset """kinships""" +368 88 model """kg2e""" +368 88 loss """softplus""" +368 88 regularizer """no""" +368 88 optimizer """adadelta""" +368 88 training_loop """owa""" +368 88 negative_sampler """basic""" +368 88 evaluator """rankbased""" +368 89 dataset """kinships""" +368 89 model """kg2e""" +368 89 loss """softplus""" +368 89 regularizer """no""" +368 89 optimizer """adadelta""" +368 89 training_loop """owa""" +368 89 negative_sampler """basic""" +368 89 evaluator """rankbased""" +368 90 dataset """kinships""" +368 90 model """kg2e""" +368 90 loss """softplus""" +368 90 regularizer """no""" +368 90 optimizer """adadelta""" +368 90 training_loop """owa""" +368 90 negative_sampler """basic""" +368 90 evaluator """rankbased""" +368 91 dataset """kinships""" +368 91 model """kg2e""" +368 91 loss """softplus""" +368 91 regularizer """no""" +368 91 optimizer """adadelta""" +368 91 training_loop """owa""" +368 91 negative_sampler """basic""" +368 91 evaluator """rankbased""" +368 92 dataset """kinships""" +368 92 model """kg2e""" +368 92 loss """softplus""" +368 92 regularizer """no""" +368 92 optimizer """adadelta""" +368 92 training_loop """owa""" +368 92 negative_sampler """basic""" +368 92 evaluator """rankbased""" +368 93 dataset """kinships""" +368 93 model """kg2e""" +368 93 loss """softplus""" +368 93 regularizer """no""" +368 93 optimizer """adadelta""" +368 93 training_loop """owa""" +368 93 negative_sampler """basic""" +368 93 evaluator """rankbased""" +368 94 dataset """kinships""" +368 94 model """kg2e""" +368 94 loss """softplus""" +368 94 regularizer """no""" +368 94 optimizer """adadelta""" +368 94 training_loop """owa""" +368 94 negative_sampler """basic""" +368 94 evaluator """rankbased""" +368 95 dataset """kinships""" +368 95 model """kg2e""" +368 95 loss """softplus""" +368 95 regularizer """no""" +368 95 optimizer """adadelta""" +368 95 training_loop """owa""" +368 95 negative_sampler """basic""" +368 95 evaluator """rankbased""" +368 96 dataset """kinships""" +368 96 model """kg2e""" +368 96 loss """softplus""" +368 96 regularizer """no""" +368 96 optimizer """adadelta""" +368 96 training_loop """owa""" +368 96 negative_sampler """basic""" +368 96 evaluator """rankbased""" +368 97 dataset """kinships""" +368 97 model """kg2e""" +368 97 loss """softplus""" +368 97 regularizer """no""" +368 97 optimizer """adadelta""" +368 97 training_loop """owa""" +368 97 negative_sampler """basic""" +368 97 evaluator """rankbased""" +368 98 dataset """kinships""" +368 98 model """kg2e""" +368 98 loss """softplus""" +368 98 regularizer """no""" +368 98 optimizer """adadelta""" +368 98 training_loop """owa""" +368 98 negative_sampler """basic""" +368 98 evaluator """rankbased""" +368 99 dataset """kinships""" +368 99 model """kg2e""" +368 99 loss """softplus""" +368 99 regularizer """no""" +368 99 optimizer """adadelta""" +368 99 training_loop """owa""" +368 99 negative_sampler """basic""" +368 99 evaluator """rankbased""" +368 100 dataset """kinships""" +368 100 model """kg2e""" +368 100 loss """softplus""" +368 100 regularizer """no""" +368 100 optimizer """adadelta""" +368 100 training_loop """owa""" +368 100 negative_sampler """basic""" +368 100 evaluator """rankbased""" +369 1 model.embedding_dim 1.0 +369 1 model.c_min 0.04020298563319081 +369 1 model.c_max 9.662115431098087 +369 1 negative_sampler.num_negs_per_pos 56.0 +369 1 training.batch_size 1.0 +369 2 model.embedding_dim 2.0 +369 2 model.c_min 0.03720623946609314 +369 2 model.c_max 4.78827616523613 +369 2 negative_sampler.num_negs_per_pos 19.0 +369 2 training.batch_size 1.0 +369 3 model.embedding_dim 0.0 +369 3 model.c_min 0.029025257390556586 +369 3 model.c_max 5.26993226489585 +369 3 negative_sampler.num_negs_per_pos 77.0 +369 3 training.batch_size 1.0 +369 4 model.embedding_dim 2.0 +369 4 model.c_min 0.012524658585840053 +369 4 model.c_max 7.310376707570215 +369 4 negative_sampler.num_negs_per_pos 80.0 +369 4 training.batch_size 0.0 +369 5 model.embedding_dim 1.0 +369 5 model.c_min 0.048998880306483396 +369 5 model.c_max 1.275540676864762 +369 5 negative_sampler.num_negs_per_pos 33.0 +369 5 training.batch_size 2.0 +369 6 model.embedding_dim 2.0 +369 6 model.c_min 0.024870207959335207 +369 6 model.c_max 2.799504852847588 +369 6 negative_sampler.num_negs_per_pos 20.0 +369 6 training.batch_size 0.0 +369 7 model.embedding_dim 1.0 +369 7 model.c_min 0.099987056323414 +369 7 model.c_max 5.885775488149141 +369 7 negative_sampler.num_negs_per_pos 18.0 +369 7 training.batch_size 0.0 +369 8 model.embedding_dim 0.0 +369 8 model.c_min 0.07386444620129907 +369 8 model.c_max 7.5446025734908515 +369 8 negative_sampler.num_negs_per_pos 91.0 +369 8 training.batch_size 1.0 +369 9 model.embedding_dim 1.0 +369 9 model.c_min 0.011607992939164398 +369 9 model.c_max 3.430474851694936 +369 9 negative_sampler.num_negs_per_pos 97.0 +369 9 training.batch_size 2.0 +369 10 model.embedding_dim 2.0 +369 10 model.c_min 0.011017054957619345 +369 10 model.c_max 9.010970893968103 +369 10 negative_sampler.num_negs_per_pos 67.0 +369 10 training.batch_size 0.0 +369 11 model.embedding_dim 2.0 +369 11 model.c_min 0.010673204236517157 +369 11 model.c_max 1.5329901521123084 +369 11 negative_sampler.num_negs_per_pos 98.0 +369 11 training.batch_size 0.0 +369 12 model.embedding_dim 2.0 +369 12 model.c_min 0.0869088511224316 +369 12 model.c_max 1.467155125291458 +369 12 negative_sampler.num_negs_per_pos 83.0 +369 12 training.batch_size 0.0 +369 13 model.embedding_dim 0.0 +369 13 model.c_min 0.06893923492327433 +369 13 model.c_max 4.544639516461169 +369 13 negative_sampler.num_negs_per_pos 0.0 +369 13 training.batch_size 2.0 +369 14 model.embedding_dim 0.0 +369 14 model.c_min 0.021748070243543003 +369 14 model.c_max 3.3016436330664773 +369 14 negative_sampler.num_negs_per_pos 91.0 +369 14 training.batch_size 0.0 +369 15 model.embedding_dim 2.0 +369 15 model.c_min 0.014231381838650893 +369 15 model.c_max 2.642111529179761 +369 15 negative_sampler.num_negs_per_pos 66.0 +369 15 training.batch_size 0.0 +369 16 model.embedding_dim 2.0 +369 16 model.c_min 0.043126574607612114 +369 16 model.c_max 7.962634546522748 +369 16 negative_sampler.num_negs_per_pos 59.0 +369 16 training.batch_size 0.0 +369 17 model.embedding_dim 1.0 +369 17 model.c_min 0.014118363508606175 +369 17 model.c_max 4.768161352012548 +369 17 negative_sampler.num_negs_per_pos 45.0 +369 17 training.batch_size 1.0 +369 18 model.embedding_dim 1.0 +369 18 model.c_min 0.012409890667173937 +369 18 model.c_max 1.8464777261674565 +369 18 negative_sampler.num_negs_per_pos 32.0 +369 18 training.batch_size 2.0 +369 19 model.embedding_dim 2.0 +369 19 model.c_min 0.010573620821832408 +369 19 model.c_max 5.482026157089125 +369 19 negative_sampler.num_negs_per_pos 39.0 +369 19 training.batch_size 1.0 +369 20 model.embedding_dim 1.0 +369 20 model.c_min 0.013663924251648956 +369 20 model.c_max 4.354215077763378 +369 20 negative_sampler.num_negs_per_pos 19.0 +369 20 training.batch_size 1.0 +369 21 model.embedding_dim 1.0 +369 21 model.c_min 0.030901773636507576 +369 21 model.c_max 6.84057196057877 +369 21 negative_sampler.num_negs_per_pos 2.0 +369 21 training.batch_size 1.0 +369 22 model.embedding_dim 1.0 +369 22 model.c_min 0.09097628930491487 +369 22 model.c_max 9.031222930187937 +369 22 negative_sampler.num_negs_per_pos 26.0 +369 22 training.batch_size 2.0 +369 23 model.embedding_dim 1.0 +369 23 model.c_min 0.021501577093824047 +369 23 model.c_max 6.48960414515304 +369 23 negative_sampler.num_negs_per_pos 80.0 +369 23 training.batch_size 2.0 +369 24 model.embedding_dim 2.0 +369 24 model.c_min 0.03879189077655735 +369 24 model.c_max 9.58372153031602 +369 24 negative_sampler.num_negs_per_pos 90.0 +369 24 training.batch_size 0.0 +369 25 model.embedding_dim 1.0 +369 25 model.c_min 0.017755204878238635 +369 25 model.c_max 3.8111799183641573 +369 25 negative_sampler.num_negs_per_pos 3.0 +369 25 training.batch_size 0.0 +369 26 model.embedding_dim 1.0 +369 26 model.c_min 0.01489129061444985 +369 26 model.c_max 7.524233915442612 +369 26 negative_sampler.num_negs_per_pos 40.0 +369 26 training.batch_size 2.0 +369 27 model.embedding_dim 1.0 +369 27 model.c_min 0.06694241844699643 +369 27 model.c_max 9.553477680149802 +369 27 negative_sampler.num_negs_per_pos 63.0 +369 27 training.batch_size 0.0 +369 28 model.embedding_dim 1.0 +369 28 model.c_min 0.05602553493770145 +369 28 model.c_max 1.3029534979804815 +369 28 negative_sampler.num_negs_per_pos 63.0 +369 28 training.batch_size 1.0 +369 29 model.embedding_dim 2.0 +369 29 model.c_min 0.04691227309200111 +369 29 model.c_max 3.1693851771346693 +369 29 negative_sampler.num_negs_per_pos 26.0 +369 29 training.batch_size 1.0 +369 30 model.embedding_dim 0.0 +369 30 model.c_min 0.08560505837407238 +369 30 model.c_max 3.3618102029951866 +369 30 negative_sampler.num_negs_per_pos 23.0 +369 30 training.batch_size 1.0 +369 31 model.embedding_dim 0.0 +369 31 model.c_min 0.011232056580508383 +369 31 model.c_max 3.0204501890365236 +369 31 negative_sampler.num_negs_per_pos 73.0 +369 31 training.batch_size 1.0 +369 32 model.embedding_dim 0.0 +369 32 model.c_min 0.08090967952121526 +369 32 model.c_max 2.1924490038536666 +369 32 negative_sampler.num_negs_per_pos 51.0 +369 32 training.batch_size 2.0 +369 33 model.embedding_dim 0.0 +369 33 model.c_min 0.07448620319338019 +369 33 model.c_max 2.715145997536172 +369 33 negative_sampler.num_negs_per_pos 30.0 +369 33 training.batch_size 2.0 +369 34 model.embedding_dim 2.0 +369 34 model.c_min 0.014069097554592522 +369 34 model.c_max 5.6886834624649865 +369 34 negative_sampler.num_negs_per_pos 63.0 +369 34 training.batch_size 1.0 +369 35 model.embedding_dim 0.0 +369 35 model.c_min 0.011946109789442343 +369 35 model.c_max 1.827291396804638 +369 35 negative_sampler.num_negs_per_pos 0.0 +369 35 training.batch_size 0.0 +369 36 model.embedding_dim 2.0 +369 36 model.c_min 0.08886417879507486 +369 36 model.c_max 9.227497221092221 +369 36 negative_sampler.num_negs_per_pos 96.0 +369 36 training.batch_size 0.0 +369 37 model.embedding_dim 2.0 +369 37 model.c_min 0.08913595183132901 +369 37 model.c_max 3.9704601692340216 +369 37 negative_sampler.num_negs_per_pos 76.0 +369 37 training.batch_size 2.0 +369 38 model.embedding_dim 1.0 +369 38 model.c_min 0.05710435471885577 +369 38 model.c_max 5.953485705108551 +369 38 negative_sampler.num_negs_per_pos 6.0 +369 38 training.batch_size 0.0 +369 39 model.embedding_dim 1.0 +369 39 model.c_min 0.044761651279242594 +369 39 model.c_max 2.5047962020594032 +369 39 negative_sampler.num_negs_per_pos 49.0 +369 39 training.batch_size 1.0 +369 40 model.embedding_dim 2.0 +369 40 model.c_min 0.08457702590480722 +369 40 model.c_max 4.423932770239722 +369 40 negative_sampler.num_negs_per_pos 36.0 +369 40 training.batch_size 0.0 +369 41 model.embedding_dim 1.0 +369 41 model.c_min 0.07969335297316153 +369 41 model.c_max 2.07446085233658 +369 41 negative_sampler.num_negs_per_pos 73.0 +369 41 training.batch_size 2.0 +369 42 model.embedding_dim 1.0 +369 42 model.c_min 0.08036479554287372 +369 42 model.c_max 7.281707716919955 +369 42 negative_sampler.num_negs_per_pos 39.0 +369 42 training.batch_size 2.0 +369 43 model.embedding_dim 2.0 +369 43 model.c_min 0.02069769290871405 +369 43 model.c_max 2.0373050351159794 +369 43 negative_sampler.num_negs_per_pos 30.0 +369 43 training.batch_size 0.0 +369 44 model.embedding_dim 0.0 +369 44 model.c_min 0.021744542192515925 +369 44 model.c_max 2.9724115893298646 +369 44 negative_sampler.num_negs_per_pos 28.0 +369 44 training.batch_size 1.0 +369 45 model.embedding_dim 0.0 +369 45 model.c_min 0.02584371574488421 +369 45 model.c_max 9.540606245005604 +369 45 negative_sampler.num_negs_per_pos 22.0 +369 45 training.batch_size 2.0 +369 46 model.embedding_dim 0.0 +369 46 model.c_min 0.05331490694785266 +369 46 model.c_max 8.326315966587607 +369 46 negative_sampler.num_negs_per_pos 55.0 +369 46 training.batch_size 2.0 +369 47 model.embedding_dim 1.0 +369 47 model.c_min 0.023155811424861387 +369 47 model.c_max 8.327791726276217 +369 47 negative_sampler.num_negs_per_pos 51.0 +369 47 training.batch_size 2.0 +369 48 model.embedding_dim 2.0 +369 48 model.c_min 0.04215112873110971 +369 48 model.c_max 8.888057828099914 +369 48 negative_sampler.num_negs_per_pos 30.0 +369 48 training.batch_size 0.0 +369 49 model.embedding_dim 1.0 +369 49 model.c_min 0.08970126418717472 +369 49 model.c_max 5.897672563238474 +369 49 negative_sampler.num_negs_per_pos 19.0 +369 49 training.batch_size 1.0 +369 50 model.embedding_dim 1.0 +369 50 model.c_min 0.06829882618668157 +369 50 model.c_max 3.1688290662404563 +369 50 negative_sampler.num_negs_per_pos 43.0 +369 50 training.batch_size 1.0 +369 51 model.embedding_dim 0.0 +369 51 model.c_min 0.017974557516300324 +369 51 model.c_max 5.97501641670449 +369 51 negative_sampler.num_negs_per_pos 47.0 +369 51 training.batch_size 2.0 +369 52 model.embedding_dim 2.0 +369 52 model.c_min 0.05104982014453404 +369 52 model.c_max 9.609486525739086 +369 52 negative_sampler.num_negs_per_pos 53.0 +369 52 training.batch_size 1.0 +369 53 model.embedding_dim 1.0 +369 53 model.c_min 0.052715179461985714 +369 53 model.c_max 2.928197569592414 +369 53 negative_sampler.num_negs_per_pos 26.0 +369 53 training.batch_size 2.0 +369 54 model.embedding_dim 2.0 +369 54 model.c_min 0.038630917335685126 +369 54 model.c_max 7.808267129877655 +369 54 negative_sampler.num_negs_per_pos 48.0 +369 54 training.batch_size 0.0 +369 55 model.embedding_dim 2.0 +369 55 model.c_min 0.01122168249267592 +369 55 model.c_max 5.3317810689823615 +369 55 negative_sampler.num_negs_per_pos 30.0 +369 55 training.batch_size 2.0 +369 56 model.embedding_dim 2.0 +369 56 model.c_min 0.056056946973416566 +369 56 model.c_max 7.658874087140256 +369 56 negative_sampler.num_negs_per_pos 74.0 +369 56 training.batch_size 0.0 +369 57 model.embedding_dim 2.0 +369 57 model.c_min 0.010337017483999289 +369 57 model.c_max 2.889310186715907 +369 57 negative_sampler.num_negs_per_pos 21.0 +369 57 training.batch_size 0.0 +369 58 model.embedding_dim 0.0 +369 58 model.c_min 0.02491768844000025 +369 58 model.c_max 9.072913082147947 +369 58 negative_sampler.num_negs_per_pos 88.0 +369 58 training.batch_size 2.0 +369 59 model.embedding_dim 2.0 +369 59 model.c_min 0.033226893320012975 +369 59 model.c_max 3.4115374994397554 +369 59 negative_sampler.num_negs_per_pos 82.0 +369 59 training.batch_size 0.0 +369 60 model.embedding_dim 1.0 +369 60 model.c_min 0.050689875781499784 +369 60 model.c_max 6.115381216405522 +369 60 negative_sampler.num_negs_per_pos 14.0 +369 60 training.batch_size 1.0 +369 61 model.embedding_dim 1.0 +369 61 model.c_min 0.016525360887007824 +369 61 model.c_max 1.2953216863677985 +369 61 negative_sampler.num_negs_per_pos 85.0 +369 61 training.batch_size 0.0 +369 62 model.embedding_dim 2.0 +369 62 model.c_min 0.07739505574061692 +369 62 model.c_max 2.319974401932215 +369 62 negative_sampler.num_negs_per_pos 8.0 +369 62 training.batch_size 2.0 +369 63 model.embedding_dim 2.0 +369 63 model.c_min 0.03265323344946029 +369 63 model.c_max 8.726517172225725 +369 63 negative_sampler.num_negs_per_pos 32.0 +369 63 training.batch_size 1.0 +369 64 model.embedding_dim 0.0 +369 64 model.c_min 0.010138431096514336 +369 64 model.c_max 3.0617457043408334 +369 64 negative_sampler.num_negs_per_pos 49.0 +369 64 training.batch_size 0.0 +369 65 model.embedding_dim 0.0 +369 65 model.c_min 0.022128105072731333 +369 65 model.c_max 6.361283305190937 +369 65 negative_sampler.num_negs_per_pos 58.0 +369 65 training.batch_size 1.0 +369 66 model.embedding_dim 1.0 +369 66 model.c_min 0.023754502801987838 +369 66 model.c_max 3.3680544169266384 +369 66 negative_sampler.num_negs_per_pos 38.0 +369 66 training.batch_size 2.0 +369 67 model.embedding_dim 0.0 +369 67 model.c_min 0.010153821293563516 +369 67 model.c_max 7.061229482069312 +369 67 negative_sampler.num_negs_per_pos 3.0 +369 67 training.batch_size 2.0 +369 68 model.embedding_dim 2.0 +369 68 model.c_min 0.07747045630160661 +369 68 model.c_max 7.189672402130963 +369 68 negative_sampler.num_negs_per_pos 37.0 +369 68 training.batch_size 2.0 +369 69 model.embedding_dim 1.0 +369 69 model.c_min 0.01611482865672499 +369 69 model.c_max 8.79867012928539 +369 69 negative_sampler.num_negs_per_pos 33.0 +369 69 training.batch_size 2.0 +369 70 model.embedding_dim 2.0 +369 70 model.c_min 0.011217282740911277 +369 70 model.c_max 1.9890966922057485 +369 70 negative_sampler.num_negs_per_pos 29.0 +369 70 training.batch_size 2.0 +369 71 model.embedding_dim 1.0 +369 71 model.c_min 0.05588126034154211 +369 71 model.c_max 3.8559327836033788 +369 71 negative_sampler.num_negs_per_pos 63.0 +369 71 training.batch_size 0.0 +369 72 model.embedding_dim 2.0 +369 72 model.c_min 0.02215530644625504 +369 72 model.c_max 5.664216009452618 +369 72 negative_sampler.num_negs_per_pos 54.0 +369 72 training.batch_size 1.0 +369 73 model.embedding_dim 0.0 +369 73 model.c_min 0.01862233913027721 +369 73 model.c_max 6.419913469813823 +369 73 negative_sampler.num_negs_per_pos 9.0 +369 73 training.batch_size 0.0 +369 74 model.embedding_dim 2.0 +369 74 model.c_min 0.01790363409513289 +369 74 model.c_max 8.431875146074987 +369 74 negative_sampler.num_negs_per_pos 38.0 +369 74 training.batch_size 1.0 +369 75 model.embedding_dim 1.0 +369 75 model.c_min 0.025041411613831728 +369 75 model.c_max 7.426969272300854 +369 75 negative_sampler.num_negs_per_pos 32.0 +369 75 training.batch_size 2.0 +369 76 model.embedding_dim 2.0 +369 76 model.c_min 0.01931915946788978 +369 76 model.c_max 3.2403249554244224 +369 76 negative_sampler.num_negs_per_pos 21.0 +369 76 training.batch_size 0.0 +369 77 model.embedding_dim 1.0 +369 77 model.c_min 0.05153840424180092 +369 77 model.c_max 4.210082653142074 +369 77 negative_sampler.num_negs_per_pos 39.0 +369 77 training.batch_size 2.0 +369 78 model.embedding_dim 1.0 +369 78 model.c_min 0.010623786709014753 +369 78 model.c_max 7.804237737701797 +369 78 negative_sampler.num_negs_per_pos 0.0 +369 78 training.batch_size 0.0 +369 79 model.embedding_dim 0.0 +369 79 model.c_min 0.051649717400698285 +369 79 model.c_max 4.094629552929289 +369 79 negative_sampler.num_negs_per_pos 4.0 +369 79 training.batch_size 2.0 +369 80 model.embedding_dim 0.0 +369 80 model.c_min 0.04969396795765227 +369 80 model.c_max 9.519168287026078 +369 80 negative_sampler.num_negs_per_pos 74.0 +369 80 training.batch_size 2.0 +369 81 model.embedding_dim 1.0 +369 81 model.c_min 0.02674121227253782 +369 81 model.c_max 7.628074819798789 +369 81 negative_sampler.num_negs_per_pos 24.0 +369 81 training.batch_size 1.0 +369 82 model.embedding_dim 1.0 +369 82 model.c_min 0.029767397178431652 +369 82 model.c_max 8.998066926866914 +369 82 negative_sampler.num_negs_per_pos 63.0 +369 82 training.batch_size 1.0 +369 83 model.embedding_dim 0.0 +369 83 model.c_min 0.023210307060264158 +369 83 model.c_max 8.129432854839001 +369 83 negative_sampler.num_negs_per_pos 44.0 +369 83 training.batch_size 2.0 +369 84 model.embedding_dim 1.0 +369 84 model.c_min 0.023413200875907605 +369 84 model.c_max 3.935944642041738 +369 84 negative_sampler.num_negs_per_pos 58.0 +369 84 training.batch_size 2.0 +369 85 model.embedding_dim 0.0 +369 85 model.c_min 0.012018836323765422 +369 85 model.c_max 8.9129409730473 +369 85 negative_sampler.num_negs_per_pos 54.0 +369 85 training.batch_size 2.0 +369 86 model.embedding_dim 1.0 +369 86 model.c_min 0.011826457669249562 +369 86 model.c_max 9.875093538619177 +369 86 negative_sampler.num_negs_per_pos 45.0 +369 86 training.batch_size 2.0 +369 87 model.embedding_dim 2.0 +369 87 model.c_min 0.012520003190233988 +369 87 model.c_max 9.237586828770016 +369 87 negative_sampler.num_negs_per_pos 88.0 +369 87 training.batch_size 1.0 +369 88 model.embedding_dim 2.0 +369 88 model.c_min 0.05685879532871755 +369 88 model.c_max 2.139562327100579 +369 88 negative_sampler.num_negs_per_pos 4.0 +369 88 training.batch_size 2.0 +369 89 model.embedding_dim 1.0 +369 89 model.c_min 0.07545269126862493 +369 89 model.c_max 1.4533108721907166 +369 89 negative_sampler.num_negs_per_pos 26.0 +369 89 training.batch_size 1.0 +369 90 model.embedding_dim 1.0 +369 90 model.c_min 0.013992002995297398 +369 90 model.c_max 7.162776824509536 +369 90 negative_sampler.num_negs_per_pos 45.0 +369 90 training.batch_size 1.0 +369 91 model.embedding_dim 0.0 +369 91 model.c_min 0.09686411772092107 +369 91 model.c_max 8.79833713446434 +369 91 negative_sampler.num_negs_per_pos 66.0 +369 91 training.batch_size 0.0 +369 92 model.embedding_dim 0.0 +369 92 model.c_min 0.07433031493986872 +369 92 model.c_max 3.6747019325936874 +369 92 negative_sampler.num_negs_per_pos 68.0 +369 92 training.batch_size 2.0 +369 93 model.embedding_dim 1.0 +369 93 model.c_min 0.017970033716509997 +369 93 model.c_max 3.671019726548482 +369 93 negative_sampler.num_negs_per_pos 29.0 +369 93 training.batch_size 2.0 +369 94 model.embedding_dim 0.0 +369 94 model.c_min 0.08434446979382085 +369 94 model.c_max 6.073293865582722 +369 94 negative_sampler.num_negs_per_pos 8.0 +369 94 training.batch_size 0.0 +369 95 model.embedding_dim 2.0 +369 95 model.c_min 0.013288892821701715 +369 95 model.c_max 4.920980407925541 +369 95 negative_sampler.num_negs_per_pos 22.0 +369 95 training.batch_size 1.0 +369 96 model.embedding_dim 0.0 +369 96 model.c_min 0.08281087896482525 +369 96 model.c_max 7.939661242931474 +369 96 negative_sampler.num_negs_per_pos 21.0 +369 96 training.batch_size 1.0 +369 97 model.embedding_dim 2.0 +369 97 model.c_min 0.06849830365675422 +369 97 model.c_max 5.229323703523956 +369 97 negative_sampler.num_negs_per_pos 12.0 +369 97 training.batch_size 1.0 +369 98 model.embedding_dim 2.0 +369 98 model.c_min 0.07526801535612583 +369 98 model.c_max 5.379362815269918 +369 98 negative_sampler.num_negs_per_pos 61.0 +369 98 training.batch_size 0.0 +369 99 model.embedding_dim 0.0 +369 99 model.c_min 0.031032727835204246 +369 99 model.c_max 8.764113018387215 +369 99 negative_sampler.num_negs_per_pos 73.0 +369 99 training.batch_size 0.0 +369 100 model.embedding_dim 2.0 +369 100 model.c_min 0.08412118082561763 +369 100 model.c_max 7.712724827073584 +369 100 negative_sampler.num_negs_per_pos 98.0 +369 100 training.batch_size 0.0 +369 1 dataset """kinships""" +369 1 model """kg2e""" +369 1 loss """bceaftersigmoid""" +369 1 regularizer """no""" +369 1 optimizer """adadelta""" +369 1 training_loop """owa""" +369 1 negative_sampler """basic""" +369 1 evaluator """rankbased""" +369 2 dataset """kinships""" +369 2 model """kg2e""" +369 2 loss """bceaftersigmoid""" +369 2 regularizer """no""" +369 2 optimizer """adadelta""" +369 2 training_loop """owa""" +369 2 negative_sampler """basic""" +369 2 evaluator """rankbased""" +369 3 dataset """kinships""" +369 3 model """kg2e""" +369 3 loss """bceaftersigmoid""" +369 3 regularizer """no""" +369 3 optimizer """adadelta""" +369 3 training_loop """owa""" +369 3 negative_sampler """basic""" +369 3 evaluator """rankbased""" +369 4 dataset """kinships""" +369 4 model """kg2e""" +369 4 loss """bceaftersigmoid""" +369 4 regularizer """no""" +369 4 optimizer """adadelta""" +369 4 training_loop """owa""" +369 4 negative_sampler """basic""" +369 4 evaluator """rankbased""" +369 5 dataset """kinships""" +369 5 model """kg2e""" +369 5 loss """bceaftersigmoid""" +369 5 regularizer """no""" +369 5 optimizer """adadelta""" +369 5 training_loop """owa""" +369 5 negative_sampler """basic""" +369 5 evaluator """rankbased""" +369 6 dataset """kinships""" +369 6 model """kg2e""" +369 6 loss """bceaftersigmoid""" +369 6 regularizer """no""" +369 6 optimizer """adadelta""" +369 6 training_loop """owa""" +369 6 negative_sampler """basic""" +369 6 evaluator """rankbased""" +369 7 dataset """kinships""" +369 7 model """kg2e""" +369 7 loss """bceaftersigmoid""" +369 7 regularizer """no""" +369 7 optimizer """adadelta""" +369 7 training_loop """owa""" +369 7 negative_sampler """basic""" +369 7 evaluator """rankbased""" +369 8 dataset """kinships""" +369 8 model """kg2e""" +369 8 loss """bceaftersigmoid""" +369 8 regularizer """no""" +369 8 optimizer """adadelta""" +369 8 training_loop """owa""" +369 8 negative_sampler """basic""" +369 8 evaluator """rankbased""" +369 9 dataset """kinships""" +369 9 model """kg2e""" +369 9 loss """bceaftersigmoid""" +369 9 regularizer """no""" +369 9 optimizer """adadelta""" +369 9 training_loop """owa""" +369 9 negative_sampler """basic""" +369 9 evaluator """rankbased""" +369 10 dataset """kinships""" +369 10 model """kg2e""" +369 10 loss """bceaftersigmoid""" +369 10 regularizer """no""" +369 10 optimizer """adadelta""" +369 10 training_loop """owa""" +369 10 negative_sampler """basic""" +369 10 evaluator """rankbased""" +369 11 dataset """kinships""" +369 11 model """kg2e""" +369 11 loss """bceaftersigmoid""" +369 11 regularizer """no""" +369 11 optimizer """adadelta""" +369 11 training_loop """owa""" +369 11 negative_sampler """basic""" +369 11 evaluator """rankbased""" +369 12 dataset """kinships""" +369 12 model """kg2e""" +369 12 loss """bceaftersigmoid""" +369 12 regularizer """no""" +369 12 optimizer """adadelta""" +369 12 training_loop """owa""" +369 12 negative_sampler """basic""" +369 12 evaluator """rankbased""" +369 13 dataset """kinships""" +369 13 model """kg2e""" +369 13 loss """bceaftersigmoid""" +369 13 regularizer """no""" +369 13 optimizer """adadelta""" +369 13 training_loop """owa""" +369 13 negative_sampler """basic""" +369 13 evaluator """rankbased""" +369 14 dataset """kinships""" +369 14 model """kg2e""" +369 14 loss """bceaftersigmoid""" +369 14 regularizer """no""" +369 14 optimizer """adadelta""" +369 14 training_loop """owa""" +369 14 negative_sampler """basic""" +369 14 evaluator """rankbased""" +369 15 dataset """kinships""" +369 15 model """kg2e""" +369 15 loss """bceaftersigmoid""" +369 15 regularizer """no""" +369 15 optimizer """adadelta""" +369 15 training_loop """owa""" +369 15 negative_sampler """basic""" +369 15 evaluator """rankbased""" +369 16 dataset """kinships""" +369 16 model """kg2e""" +369 16 loss """bceaftersigmoid""" +369 16 regularizer """no""" +369 16 optimizer """adadelta""" +369 16 training_loop """owa""" +369 16 negative_sampler """basic""" +369 16 evaluator """rankbased""" +369 17 dataset """kinships""" +369 17 model """kg2e""" +369 17 loss """bceaftersigmoid""" +369 17 regularizer """no""" +369 17 optimizer """adadelta""" +369 17 training_loop """owa""" +369 17 negative_sampler """basic""" +369 17 evaluator """rankbased""" +369 18 dataset """kinships""" +369 18 model """kg2e""" +369 18 loss """bceaftersigmoid""" +369 18 regularizer """no""" +369 18 optimizer """adadelta""" +369 18 training_loop """owa""" +369 18 negative_sampler """basic""" +369 18 evaluator """rankbased""" +369 19 dataset """kinships""" +369 19 model """kg2e""" +369 19 loss """bceaftersigmoid""" +369 19 regularizer """no""" +369 19 optimizer """adadelta""" +369 19 training_loop """owa""" +369 19 negative_sampler """basic""" +369 19 evaluator """rankbased""" +369 20 dataset """kinships""" +369 20 model """kg2e""" +369 20 loss """bceaftersigmoid""" +369 20 regularizer """no""" +369 20 optimizer """adadelta""" +369 20 training_loop """owa""" +369 20 negative_sampler """basic""" +369 20 evaluator """rankbased""" +369 21 dataset """kinships""" +369 21 model """kg2e""" +369 21 loss """bceaftersigmoid""" +369 21 regularizer """no""" +369 21 optimizer """adadelta""" +369 21 training_loop """owa""" +369 21 negative_sampler """basic""" +369 21 evaluator """rankbased""" +369 22 dataset """kinships""" +369 22 model """kg2e""" +369 22 loss """bceaftersigmoid""" +369 22 regularizer """no""" +369 22 optimizer """adadelta""" +369 22 training_loop """owa""" +369 22 negative_sampler """basic""" +369 22 evaluator """rankbased""" +369 23 dataset """kinships""" +369 23 model """kg2e""" +369 23 loss """bceaftersigmoid""" +369 23 regularizer """no""" +369 23 optimizer """adadelta""" +369 23 training_loop """owa""" +369 23 negative_sampler """basic""" +369 23 evaluator """rankbased""" +369 24 dataset """kinships""" +369 24 model """kg2e""" +369 24 loss """bceaftersigmoid""" +369 24 regularizer """no""" +369 24 optimizer """adadelta""" +369 24 training_loop """owa""" +369 24 negative_sampler """basic""" +369 24 evaluator """rankbased""" +369 25 dataset """kinships""" +369 25 model """kg2e""" +369 25 loss """bceaftersigmoid""" +369 25 regularizer """no""" +369 25 optimizer """adadelta""" +369 25 training_loop """owa""" +369 25 negative_sampler """basic""" +369 25 evaluator """rankbased""" +369 26 dataset """kinships""" +369 26 model """kg2e""" +369 26 loss """bceaftersigmoid""" +369 26 regularizer """no""" +369 26 optimizer """adadelta""" +369 26 training_loop """owa""" +369 26 negative_sampler """basic""" +369 26 evaluator """rankbased""" +369 27 dataset """kinships""" +369 27 model """kg2e""" +369 27 loss """bceaftersigmoid""" +369 27 regularizer """no""" +369 27 optimizer """adadelta""" +369 27 training_loop """owa""" +369 27 negative_sampler """basic""" +369 27 evaluator """rankbased""" +369 28 dataset """kinships""" +369 28 model """kg2e""" +369 28 loss """bceaftersigmoid""" +369 28 regularizer """no""" +369 28 optimizer """adadelta""" +369 28 training_loop """owa""" +369 28 negative_sampler """basic""" +369 28 evaluator """rankbased""" +369 29 dataset """kinships""" +369 29 model """kg2e""" +369 29 loss """bceaftersigmoid""" +369 29 regularizer """no""" +369 29 optimizer """adadelta""" +369 29 training_loop """owa""" +369 29 negative_sampler """basic""" +369 29 evaluator """rankbased""" +369 30 dataset """kinships""" +369 30 model """kg2e""" +369 30 loss """bceaftersigmoid""" +369 30 regularizer """no""" +369 30 optimizer """adadelta""" +369 30 training_loop """owa""" +369 30 negative_sampler """basic""" +369 30 evaluator """rankbased""" +369 31 dataset """kinships""" +369 31 model """kg2e""" +369 31 loss """bceaftersigmoid""" +369 31 regularizer """no""" +369 31 optimizer """adadelta""" +369 31 training_loop """owa""" +369 31 negative_sampler """basic""" +369 31 evaluator """rankbased""" +369 32 dataset """kinships""" +369 32 model """kg2e""" +369 32 loss """bceaftersigmoid""" +369 32 regularizer """no""" +369 32 optimizer """adadelta""" +369 32 training_loop """owa""" +369 32 negative_sampler """basic""" +369 32 evaluator """rankbased""" +369 33 dataset """kinships""" +369 33 model """kg2e""" +369 33 loss """bceaftersigmoid""" +369 33 regularizer """no""" +369 33 optimizer """adadelta""" +369 33 training_loop """owa""" +369 33 negative_sampler """basic""" +369 33 evaluator """rankbased""" +369 34 dataset """kinships""" +369 34 model """kg2e""" +369 34 loss """bceaftersigmoid""" +369 34 regularizer """no""" +369 34 optimizer """adadelta""" +369 34 training_loop """owa""" +369 34 negative_sampler """basic""" +369 34 evaluator """rankbased""" +369 35 dataset """kinships""" +369 35 model """kg2e""" +369 35 loss """bceaftersigmoid""" +369 35 regularizer """no""" +369 35 optimizer """adadelta""" +369 35 training_loop """owa""" +369 35 negative_sampler """basic""" +369 35 evaluator """rankbased""" +369 36 dataset """kinships""" +369 36 model """kg2e""" +369 36 loss """bceaftersigmoid""" +369 36 regularizer """no""" +369 36 optimizer """adadelta""" +369 36 training_loop """owa""" +369 36 negative_sampler """basic""" +369 36 evaluator """rankbased""" +369 37 dataset """kinships""" +369 37 model """kg2e""" +369 37 loss """bceaftersigmoid""" +369 37 regularizer """no""" +369 37 optimizer """adadelta""" +369 37 training_loop """owa""" +369 37 negative_sampler """basic""" +369 37 evaluator """rankbased""" +369 38 dataset """kinships""" +369 38 model """kg2e""" +369 38 loss """bceaftersigmoid""" +369 38 regularizer """no""" +369 38 optimizer """adadelta""" +369 38 training_loop """owa""" +369 38 negative_sampler """basic""" +369 38 evaluator """rankbased""" +369 39 dataset """kinships""" +369 39 model """kg2e""" +369 39 loss """bceaftersigmoid""" +369 39 regularizer """no""" +369 39 optimizer """adadelta""" +369 39 training_loop """owa""" +369 39 negative_sampler """basic""" +369 39 evaluator """rankbased""" +369 40 dataset """kinships""" +369 40 model """kg2e""" +369 40 loss """bceaftersigmoid""" +369 40 regularizer """no""" +369 40 optimizer """adadelta""" +369 40 training_loop """owa""" +369 40 negative_sampler """basic""" +369 40 evaluator """rankbased""" +369 41 dataset """kinships""" +369 41 model """kg2e""" +369 41 loss """bceaftersigmoid""" +369 41 regularizer """no""" +369 41 optimizer """adadelta""" +369 41 training_loop """owa""" +369 41 negative_sampler """basic""" +369 41 evaluator """rankbased""" +369 42 dataset """kinships""" +369 42 model """kg2e""" +369 42 loss """bceaftersigmoid""" +369 42 regularizer """no""" +369 42 optimizer """adadelta""" +369 42 training_loop """owa""" +369 42 negative_sampler """basic""" +369 42 evaluator """rankbased""" +369 43 dataset """kinships""" +369 43 model """kg2e""" +369 43 loss """bceaftersigmoid""" +369 43 regularizer """no""" +369 43 optimizer """adadelta""" +369 43 training_loop """owa""" +369 43 negative_sampler """basic""" +369 43 evaluator """rankbased""" +369 44 dataset """kinships""" +369 44 model """kg2e""" +369 44 loss """bceaftersigmoid""" +369 44 regularizer """no""" +369 44 optimizer """adadelta""" +369 44 training_loop """owa""" +369 44 negative_sampler """basic""" +369 44 evaluator """rankbased""" +369 45 dataset """kinships""" +369 45 model """kg2e""" +369 45 loss """bceaftersigmoid""" +369 45 regularizer """no""" +369 45 optimizer """adadelta""" +369 45 training_loop """owa""" +369 45 negative_sampler """basic""" +369 45 evaluator """rankbased""" +369 46 dataset """kinships""" +369 46 model """kg2e""" +369 46 loss """bceaftersigmoid""" +369 46 regularizer """no""" +369 46 optimizer """adadelta""" +369 46 training_loop """owa""" +369 46 negative_sampler """basic""" +369 46 evaluator """rankbased""" +369 47 dataset """kinships""" +369 47 model """kg2e""" +369 47 loss """bceaftersigmoid""" +369 47 regularizer """no""" +369 47 optimizer """adadelta""" +369 47 training_loop """owa""" +369 47 negative_sampler """basic""" +369 47 evaluator """rankbased""" +369 48 dataset """kinships""" +369 48 model """kg2e""" +369 48 loss """bceaftersigmoid""" +369 48 regularizer """no""" +369 48 optimizer """adadelta""" +369 48 training_loop """owa""" +369 48 negative_sampler """basic""" +369 48 evaluator """rankbased""" +369 49 dataset """kinships""" +369 49 model """kg2e""" +369 49 loss """bceaftersigmoid""" +369 49 regularizer """no""" +369 49 optimizer """adadelta""" +369 49 training_loop """owa""" +369 49 negative_sampler """basic""" +369 49 evaluator """rankbased""" +369 50 dataset """kinships""" +369 50 model """kg2e""" +369 50 loss """bceaftersigmoid""" +369 50 regularizer """no""" +369 50 optimizer """adadelta""" +369 50 training_loop """owa""" +369 50 negative_sampler """basic""" +369 50 evaluator """rankbased""" +369 51 dataset """kinships""" +369 51 model """kg2e""" +369 51 loss """bceaftersigmoid""" +369 51 regularizer """no""" +369 51 optimizer """adadelta""" +369 51 training_loop """owa""" +369 51 negative_sampler """basic""" +369 51 evaluator """rankbased""" +369 52 dataset """kinships""" +369 52 model """kg2e""" +369 52 loss """bceaftersigmoid""" +369 52 regularizer """no""" +369 52 optimizer """adadelta""" +369 52 training_loop """owa""" +369 52 negative_sampler """basic""" +369 52 evaluator """rankbased""" +369 53 dataset """kinships""" +369 53 model """kg2e""" +369 53 loss """bceaftersigmoid""" +369 53 regularizer """no""" +369 53 optimizer """adadelta""" +369 53 training_loop """owa""" +369 53 negative_sampler """basic""" +369 53 evaluator """rankbased""" +369 54 dataset """kinships""" +369 54 model """kg2e""" +369 54 loss """bceaftersigmoid""" +369 54 regularizer """no""" +369 54 optimizer """adadelta""" +369 54 training_loop """owa""" +369 54 negative_sampler """basic""" +369 54 evaluator """rankbased""" +369 55 dataset """kinships""" +369 55 model """kg2e""" +369 55 loss """bceaftersigmoid""" +369 55 regularizer """no""" +369 55 optimizer """adadelta""" +369 55 training_loop """owa""" +369 55 negative_sampler """basic""" +369 55 evaluator """rankbased""" +369 56 dataset """kinships""" +369 56 model """kg2e""" +369 56 loss """bceaftersigmoid""" +369 56 regularizer """no""" +369 56 optimizer """adadelta""" +369 56 training_loop """owa""" +369 56 negative_sampler """basic""" +369 56 evaluator """rankbased""" +369 57 dataset """kinships""" +369 57 model """kg2e""" +369 57 loss """bceaftersigmoid""" +369 57 regularizer """no""" +369 57 optimizer """adadelta""" +369 57 training_loop """owa""" +369 57 negative_sampler """basic""" +369 57 evaluator """rankbased""" +369 58 dataset """kinships""" +369 58 model """kg2e""" +369 58 loss """bceaftersigmoid""" +369 58 regularizer """no""" +369 58 optimizer """adadelta""" +369 58 training_loop """owa""" +369 58 negative_sampler """basic""" +369 58 evaluator """rankbased""" +369 59 dataset """kinships""" +369 59 model """kg2e""" +369 59 loss """bceaftersigmoid""" +369 59 regularizer """no""" +369 59 optimizer """adadelta""" +369 59 training_loop """owa""" +369 59 negative_sampler """basic""" +369 59 evaluator """rankbased""" +369 60 dataset """kinships""" +369 60 model """kg2e""" +369 60 loss """bceaftersigmoid""" +369 60 regularizer """no""" +369 60 optimizer """adadelta""" +369 60 training_loop """owa""" +369 60 negative_sampler """basic""" +369 60 evaluator """rankbased""" +369 61 dataset """kinships""" +369 61 model """kg2e""" +369 61 loss """bceaftersigmoid""" +369 61 regularizer """no""" +369 61 optimizer """adadelta""" +369 61 training_loop """owa""" +369 61 negative_sampler """basic""" +369 61 evaluator """rankbased""" +369 62 dataset """kinships""" +369 62 model """kg2e""" +369 62 loss """bceaftersigmoid""" +369 62 regularizer """no""" +369 62 optimizer """adadelta""" +369 62 training_loop """owa""" +369 62 negative_sampler """basic""" +369 62 evaluator """rankbased""" +369 63 dataset """kinships""" +369 63 model """kg2e""" +369 63 loss """bceaftersigmoid""" +369 63 regularizer """no""" +369 63 optimizer """adadelta""" +369 63 training_loop """owa""" +369 63 negative_sampler """basic""" +369 63 evaluator """rankbased""" +369 64 dataset """kinships""" +369 64 model """kg2e""" +369 64 loss """bceaftersigmoid""" +369 64 regularizer """no""" +369 64 optimizer """adadelta""" +369 64 training_loop """owa""" +369 64 negative_sampler """basic""" +369 64 evaluator """rankbased""" +369 65 dataset """kinships""" +369 65 model """kg2e""" +369 65 loss """bceaftersigmoid""" +369 65 regularizer """no""" +369 65 optimizer """adadelta""" +369 65 training_loop """owa""" +369 65 negative_sampler """basic""" +369 65 evaluator """rankbased""" +369 66 dataset """kinships""" +369 66 model """kg2e""" +369 66 loss """bceaftersigmoid""" +369 66 regularizer """no""" +369 66 optimizer """adadelta""" +369 66 training_loop """owa""" +369 66 negative_sampler """basic""" +369 66 evaluator """rankbased""" +369 67 dataset """kinships""" +369 67 model """kg2e""" +369 67 loss """bceaftersigmoid""" +369 67 regularizer """no""" +369 67 optimizer """adadelta""" +369 67 training_loop """owa""" +369 67 negative_sampler """basic""" +369 67 evaluator """rankbased""" +369 68 dataset """kinships""" +369 68 model """kg2e""" +369 68 loss """bceaftersigmoid""" +369 68 regularizer """no""" +369 68 optimizer """adadelta""" +369 68 training_loop """owa""" +369 68 negative_sampler """basic""" +369 68 evaluator """rankbased""" +369 69 dataset """kinships""" +369 69 model """kg2e""" +369 69 loss """bceaftersigmoid""" +369 69 regularizer """no""" +369 69 optimizer """adadelta""" +369 69 training_loop """owa""" +369 69 negative_sampler """basic""" +369 69 evaluator """rankbased""" +369 70 dataset """kinships""" +369 70 model """kg2e""" +369 70 loss """bceaftersigmoid""" +369 70 regularizer """no""" +369 70 optimizer """adadelta""" +369 70 training_loop """owa""" +369 70 negative_sampler """basic""" +369 70 evaluator """rankbased""" +369 71 dataset """kinships""" +369 71 model """kg2e""" +369 71 loss """bceaftersigmoid""" +369 71 regularizer """no""" +369 71 optimizer """adadelta""" +369 71 training_loop """owa""" +369 71 negative_sampler """basic""" +369 71 evaluator """rankbased""" +369 72 dataset """kinships""" +369 72 model """kg2e""" +369 72 loss """bceaftersigmoid""" +369 72 regularizer """no""" +369 72 optimizer """adadelta""" +369 72 training_loop """owa""" +369 72 negative_sampler """basic""" +369 72 evaluator """rankbased""" +369 73 dataset """kinships""" +369 73 model """kg2e""" +369 73 loss """bceaftersigmoid""" +369 73 regularizer """no""" +369 73 optimizer """adadelta""" +369 73 training_loop """owa""" +369 73 negative_sampler """basic""" +369 73 evaluator """rankbased""" +369 74 dataset """kinships""" +369 74 model """kg2e""" +369 74 loss """bceaftersigmoid""" +369 74 regularizer """no""" +369 74 optimizer """adadelta""" +369 74 training_loop """owa""" +369 74 negative_sampler """basic""" +369 74 evaluator """rankbased""" +369 75 dataset """kinships""" +369 75 model """kg2e""" +369 75 loss """bceaftersigmoid""" +369 75 regularizer """no""" +369 75 optimizer """adadelta""" +369 75 training_loop """owa""" +369 75 negative_sampler """basic""" +369 75 evaluator """rankbased""" +369 76 dataset """kinships""" +369 76 model """kg2e""" +369 76 loss """bceaftersigmoid""" +369 76 regularizer """no""" +369 76 optimizer """adadelta""" +369 76 training_loop """owa""" +369 76 negative_sampler """basic""" +369 76 evaluator """rankbased""" +369 77 dataset """kinships""" +369 77 model """kg2e""" +369 77 loss """bceaftersigmoid""" +369 77 regularizer """no""" +369 77 optimizer """adadelta""" +369 77 training_loop """owa""" +369 77 negative_sampler """basic""" +369 77 evaluator """rankbased""" +369 78 dataset """kinships""" +369 78 model """kg2e""" +369 78 loss """bceaftersigmoid""" +369 78 regularizer """no""" +369 78 optimizer """adadelta""" +369 78 training_loop """owa""" +369 78 negative_sampler """basic""" +369 78 evaluator """rankbased""" +369 79 dataset """kinships""" +369 79 model """kg2e""" +369 79 loss """bceaftersigmoid""" +369 79 regularizer """no""" +369 79 optimizer """adadelta""" +369 79 training_loop """owa""" +369 79 negative_sampler """basic""" +369 79 evaluator """rankbased""" +369 80 dataset """kinships""" +369 80 model """kg2e""" +369 80 loss """bceaftersigmoid""" +369 80 regularizer """no""" +369 80 optimizer """adadelta""" +369 80 training_loop """owa""" +369 80 negative_sampler """basic""" +369 80 evaluator """rankbased""" +369 81 dataset """kinships""" +369 81 model """kg2e""" +369 81 loss """bceaftersigmoid""" +369 81 regularizer """no""" +369 81 optimizer """adadelta""" +369 81 training_loop """owa""" +369 81 negative_sampler """basic""" +369 81 evaluator """rankbased""" +369 82 dataset """kinships""" +369 82 model """kg2e""" +369 82 loss """bceaftersigmoid""" +369 82 regularizer """no""" +369 82 optimizer """adadelta""" +369 82 training_loop """owa""" +369 82 negative_sampler """basic""" +369 82 evaluator """rankbased""" +369 83 dataset """kinships""" +369 83 model """kg2e""" +369 83 loss """bceaftersigmoid""" +369 83 regularizer """no""" +369 83 optimizer """adadelta""" +369 83 training_loop """owa""" +369 83 negative_sampler """basic""" +369 83 evaluator """rankbased""" +369 84 dataset """kinships""" +369 84 model """kg2e""" +369 84 loss """bceaftersigmoid""" +369 84 regularizer """no""" +369 84 optimizer """adadelta""" +369 84 training_loop """owa""" +369 84 negative_sampler """basic""" +369 84 evaluator """rankbased""" +369 85 dataset """kinships""" +369 85 model """kg2e""" +369 85 loss """bceaftersigmoid""" +369 85 regularizer """no""" +369 85 optimizer """adadelta""" +369 85 training_loop """owa""" +369 85 negative_sampler """basic""" +369 85 evaluator """rankbased""" +369 86 dataset """kinships""" +369 86 model """kg2e""" +369 86 loss """bceaftersigmoid""" +369 86 regularizer """no""" +369 86 optimizer """adadelta""" +369 86 training_loop """owa""" +369 86 negative_sampler """basic""" +369 86 evaluator """rankbased""" +369 87 dataset """kinships""" +369 87 model """kg2e""" +369 87 loss """bceaftersigmoid""" +369 87 regularizer """no""" +369 87 optimizer """adadelta""" +369 87 training_loop """owa""" +369 87 negative_sampler """basic""" +369 87 evaluator """rankbased""" +369 88 dataset """kinships""" +369 88 model """kg2e""" +369 88 loss """bceaftersigmoid""" +369 88 regularizer """no""" +369 88 optimizer """adadelta""" +369 88 training_loop """owa""" +369 88 negative_sampler """basic""" +369 88 evaluator """rankbased""" +369 89 dataset """kinships""" +369 89 model """kg2e""" +369 89 loss """bceaftersigmoid""" +369 89 regularizer """no""" +369 89 optimizer """adadelta""" +369 89 training_loop """owa""" +369 89 negative_sampler """basic""" +369 89 evaluator """rankbased""" +369 90 dataset """kinships""" +369 90 model """kg2e""" +369 90 loss """bceaftersigmoid""" +369 90 regularizer """no""" +369 90 optimizer """adadelta""" +369 90 training_loop """owa""" +369 90 negative_sampler """basic""" +369 90 evaluator """rankbased""" +369 91 dataset """kinships""" +369 91 model """kg2e""" +369 91 loss """bceaftersigmoid""" +369 91 regularizer """no""" +369 91 optimizer """adadelta""" +369 91 training_loop """owa""" +369 91 negative_sampler """basic""" +369 91 evaluator """rankbased""" +369 92 dataset """kinships""" +369 92 model """kg2e""" +369 92 loss """bceaftersigmoid""" +369 92 regularizer """no""" +369 92 optimizer """adadelta""" +369 92 training_loop """owa""" +369 92 negative_sampler """basic""" +369 92 evaluator """rankbased""" +369 93 dataset """kinships""" +369 93 model """kg2e""" +369 93 loss """bceaftersigmoid""" +369 93 regularizer """no""" +369 93 optimizer """adadelta""" +369 93 training_loop """owa""" +369 93 negative_sampler """basic""" +369 93 evaluator """rankbased""" +369 94 dataset """kinships""" +369 94 model """kg2e""" +369 94 loss """bceaftersigmoid""" +369 94 regularizer """no""" +369 94 optimizer """adadelta""" +369 94 training_loop """owa""" +369 94 negative_sampler """basic""" +369 94 evaluator """rankbased""" +369 95 dataset """kinships""" +369 95 model """kg2e""" +369 95 loss """bceaftersigmoid""" +369 95 regularizer """no""" +369 95 optimizer """adadelta""" +369 95 training_loop """owa""" +369 95 negative_sampler """basic""" +369 95 evaluator """rankbased""" +369 96 dataset """kinships""" +369 96 model """kg2e""" +369 96 loss """bceaftersigmoid""" +369 96 regularizer """no""" +369 96 optimizer """adadelta""" +369 96 training_loop """owa""" +369 96 negative_sampler """basic""" +369 96 evaluator """rankbased""" +369 97 dataset """kinships""" +369 97 model """kg2e""" +369 97 loss """bceaftersigmoid""" +369 97 regularizer """no""" +369 97 optimizer """adadelta""" +369 97 training_loop """owa""" +369 97 negative_sampler """basic""" +369 97 evaluator """rankbased""" +369 98 dataset """kinships""" +369 98 model """kg2e""" +369 98 loss """bceaftersigmoid""" +369 98 regularizer """no""" +369 98 optimizer """adadelta""" +369 98 training_loop """owa""" +369 98 negative_sampler """basic""" +369 98 evaluator """rankbased""" +369 99 dataset """kinships""" +369 99 model """kg2e""" +369 99 loss """bceaftersigmoid""" +369 99 regularizer """no""" +369 99 optimizer """adadelta""" +369 99 training_loop """owa""" +369 99 negative_sampler """basic""" +369 99 evaluator """rankbased""" +369 100 dataset """kinships""" +369 100 model """kg2e""" +369 100 loss """bceaftersigmoid""" +369 100 regularizer """no""" +369 100 optimizer """adadelta""" +369 100 training_loop """owa""" +369 100 negative_sampler """basic""" +369 100 evaluator """rankbased""" +370 1 model.embedding_dim 2.0 +370 1 model.c_min 0.08931295024905486 +370 1 model.c_max 8.05312928655147 +370 1 negative_sampler.num_negs_per_pos 93.0 +370 1 training.batch_size 0.0 +370 2 model.embedding_dim 2.0 +370 2 model.c_min 0.019693838437606054 +370 2 model.c_max 1.9939932466382393 +370 2 negative_sampler.num_negs_per_pos 17.0 +370 2 training.batch_size 0.0 +370 3 model.embedding_dim 2.0 +370 3 model.c_min 0.021005998926362995 +370 3 model.c_max 1.3490884619323895 +370 3 negative_sampler.num_negs_per_pos 84.0 +370 3 training.batch_size 2.0 +370 4 model.embedding_dim 1.0 +370 4 model.c_min 0.011282640167880479 +370 4 model.c_max 7.803062801055078 +370 4 negative_sampler.num_negs_per_pos 55.0 +370 4 training.batch_size 2.0 +370 5 model.embedding_dim 1.0 +370 5 model.c_min 0.029022047268652852 +370 5 model.c_max 1.6846679466387449 +370 5 negative_sampler.num_negs_per_pos 91.0 +370 5 training.batch_size 1.0 +370 6 model.embedding_dim 2.0 +370 6 model.c_min 0.026774932468183167 +370 6 model.c_max 4.0340293181446825 +370 6 negative_sampler.num_negs_per_pos 27.0 +370 6 training.batch_size 0.0 +370 7 model.embedding_dim 0.0 +370 7 model.c_min 0.012848657575070031 +370 7 model.c_max 8.693502998787531 +370 7 negative_sampler.num_negs_per_pos 99.0 +370 7 training.batch_size 1.0 +370 8 model.embedding_dim 2.0 +370 8 model.c_min 0.08334005089018968 +370 8 model.c_max 3.7962750867252417 +370 8 negative_sampler.num_negs_per_pos 44.0 +370 8 training.batch_size 1.0 +370 9 model.embedding_dim 1.0 +370 9 model.c_min 0.023304626374962542 +370 9 model.c_max 4.209345455941173 +370 9 negative_sampler.num_negs_per_pos 21.0 +370 9 training.batch_size 1.0 +370 10 model.embedding_dim 2.0 +370 10 model.c_min 0.038975585496168294 +370 10 model.c_max 9.848727999544504 +370 10 negative_sampler.num_negs_per_pos 17.0 +370 10 training.batch_size 0.0 +370 11 model.embedding_dim 1.0 +370 11 model.c_min 0.017363280615572414 +370 11 model.c_max 7.911382143442484 +370 11 negative_sampler.num_negs_per_pos 64.0 +370 11 training.batch_size 0.0 +370 12 model.embedding_dim 0.0 +370 12 model.c_min 0.047484678835272055 +370 12 model.c_max 2.7626211693598735 +370 12 negative_sampler.num_negs_per_pos 69.0 +370 12 training.batch_size 1.0 +370 13 model.embedding_dim 2.0 +370 13 model.c_min 0.013302440789011327 +370 13 model.c_max 2.7748673331792446 +370 13 negative_sampler.num_negs_per_pos 15.0 +370 13 training.batch_size 1.0 +370 14 model.embedding_dim 1.0 +370 14 model.c_min 0.011739416470634147 +370 14 model.c_max 1.7225983511984353 +370 14 negative_sampler.num_negs_per_pos 79.0 +370 14 training.batch_size 2.0 +370 15 model.embedding_dim 2.0 +370 15 model.c_min 0.046735856936958226 +370 15 model.c_max 3.3636625008557437 +370 15 negative_sampler.num_negs_per_pos 81.0 +370 15 training.batch_size 1.0 +370 16 model.embedding_dim 1.0 +370 16 model.c_min 0.0683674441396006 +370 16 model.c_max 6.607526930012991 +370 16 negative_sampler.num_negs_per_pos 11.0 +370 16 training.batch_size 0.0 +370 17 model.embedding_dim 0.0 +370 17 model.c_min 0.02920613811237342 +370 17 model.c_max 1.3138626350170033 +370 17 negative_sampler.num_negs_per_pos 83.0 +370 17 training.batch_size 1.0 +370 18 model.embedding_dim 0.0 +370 18 model.c_min 0.013247516838260906 +370 18 model.c_max 1.7241154737590767 +370 18 negative_sampler.num_negs_per_pos 73.0 +370 18 training.batch_size 2.0 +370 19 model.embedding_dim 2.0 +370 19 model.c_min 0.0954175932237391 +370 19 model.c_max 6.480757582354189 +370 19 negative_sampler.num_negs_per_pos 8.0 +370 19 training.batch_size 0.0 +370 20 model.embedding_dim 2.0 +370 20 model.c_min 0.021560171574978092 +370 20 model.c_max 2.3934140083918454 +370 20 negative_sampler.num_negs_per_pos 59.0 +370 20 training.batch_size 2.0 +370 21 model.embedding_dim 0.0 +370 21 model.c_min 0.0338912513695222 +370 21 model.c_max 4.8505591655406315 +370 21 negative_sampler.num_negs_per_pos 85.0 +370 21 training.batch_size 1.0 +370 22 model.embedding_dim 1.0 +370 22 model.c_min 0.08858769367021872 +370 22 model.c_max 4.871391065696601 +370 22 negative_sampler.num_negs_per_pos 41.0 +370 22 training.batch_size 1.0 +370 23 model.embedding_dim 0.0 +370 23 model.c_min 0.030911117407355682 +370 23 model.c_max 9.1372948334096 +370 23 negative_sampler.num_negs_per_pos 38.0 +370 23 training.batch_size 0.0 +370 24 model.embedding_dim 2.0 +370 24 model.c_min 0.02069522204013034 +370 24 model.c_max 9.752278008747979 +370 24 negative_sampler.num_negs_per_pos 50.0 +370 24 training.batch_size 0.0 +370 25 model.embedding_dim 2.0 +370 25 model.c_min 0.027275242316412363 +370 25 model.c_max 5.609379207117844 +370 25 negative_sampler.num_negs_per_pos 60.0 +370 25 training.batch_size 1.0 +370 26 model.embedding_dim 0.0 +370 26 model.c_min 0.09422925529388816 +370 26 model.c_max 5.459990303102727 +370 26 negative_sampler.num_negs_per_pos 58.0 +370 26 training.batch_size 0.0 +370 27 model.embedding_dim 2.0 +370 27 model.c_min 0.012640920088713736 +370 27 model.c_max 5.444224444378182 +370 27 negative_sampler.num_negs_per_pos 13.0 +370 27 training.batch_size 0.0 +370 28 model.embedding_dim 0.0 +370 28 model.c_min 0.0983988252981112 +370 28 model.c_max 5.8759480267387785 +370 28 negative_sampler.num_negs_per_pos 6.0 +370 28 training.batch_size 1.0 +370 29 model.embedding_dim 0.0 +370 29 model.c_min 0.08348671586463953 +370 29 model.c_max 9.401875434564964 +370 29 negative_sampler.num_negs_per_pos 78.0 +370 29 training.batch_size 0.0 +370 30 model.embedding_dim 1.0 +370 30 model.c_min 0.06771750522463024 +370 30 model.c_max 6.194076526972635 +370 30 negative_sampler.num_negs_per_pos 7.0 +370 30 training.batch_size 1.0 +370 31 model.embedding_dim 2.0 +370 31 model.c_min 0.024726998356867275 +370 31 model.c_max 4.492530187704396 +370 31 negative_sampler.num_negs_per_pos 98.0 +370 31 training.batch_size 0.0 +370 32 model.embedding_dim 0.0 +370 32 model.c_min 0.05562733925366465 +370 32 model.c_max 8.097353681936875 +370 32 negative_sampler.num_negs_per_pos 6.0 +370 32 training.batch_size 1.0 +370 33 model.embedding_dim 1.0 +370 33 model.c_min 0.019979134787972287 +370 33 model.c_max 8.787383019895792 +370 33 negative_sampler.num_negs_per_pos 81.0 +370 33 training.batch_size 2.0 +370 34 model.embedding_dim 2.0 +370 34 model.c_min 0.021526011364257906 +370 34 model.c_max 8.61729063472787 +370 34 negative_sampler.num_negs_per_pos 15.0 +370 34 training.batch_size 0.0 +370 35 model.embedding_dim 2.0 +370 35 model.c_min 0.011662294886132457 +370 35 model.c_max 9.059865546797658 +370 35 negative_sampler.num_negs_per_pos 55.0 +370 35 training.batch_size 2.0 +370 36 model.embedding_dim 2.0 +370 36 model.c_min 0.011409887992329025 +370 36 model.c_max 9.039710165505072 +370 36 negative_sampler.num_negs_per_pos 90.0 +370 36 training.batch_size 0.0 +370 37 model.embedding_dim 2.0 +370 37 model.c_min 0.06078941297723806 +370 37 model.c_max 8.657950426195153 +370 37 negative_sampler.num_negs_per_pos 2.0 +370 37 training.batch_size 1.0 +370 38 model.embedding_dim 0.0 +370 38 model.c_min 0.012904948704393923 +370 38 model.c_max 3.8915756273077142 +370 38 negative_sampler.num_negs_per_pos 6.0 +370 38 training.batch_size 2.0 +370 39 model.embedding_dim 2.0 +370 39 model.c_min 0.07179227591872943 +370 39 model.c_max 8.326137365274938 +370 39 negative_sampler.num_negs_per_pos 85.0 +370 39 training.batch_size 1.0 +370 40 model.embedding_dim 0.0 +370 40 model.c_min 0.0889567781641173 +370 40 model.c_max 5.060350809850367 +370 40 negative_sampler.num_negs_per_pos 47.0 +370 40 training.batch_size 2.0 +370 41 model.embedding_dim 2.0 +370 41 model.c_min 0.021097366151715428 +370 41 model.c_max 6.087492797969338 +370 41 negative_sampler.num_negs_per_pos 69.0 +370 41 training.batch_size 1.0 +370 42 model.embedding_dim 1.0 +370 42 model.c_min 0.03776955296377765 +370 42 model.c_max 4.902140618993008 +370 42 negative_sampler.num_negs_per_pos 39.0 +370 42 training.batch_size 1.0 +370 43 model.embedding_dim 2.0 +370 43 model.c_min 0.07348901886431689 +370 43 model.c_max 6.545247724052226 +370 43 negative_sampler.num_negs_per_pos 82.0 +370 43 training.batch_size 0.0 +370 44 model.embedding_dim 0.0 +370 44 model.c_min 0.03775349708877471 +370 44 model.c_max 9.53000263911032 +370 44 negative_sampler.num_negs_per_pos 85.0 +370 44 training.batch_size 2.0 +370 45 model.embedding_dim 2.0 +370 45 model.c_min 0.03665467691682142 +370 45 model.c_max 4.717430350284995 +370 45 negative_sampler.num_negs_per_pos 75.0 +370 45 training.batch_size 0.0 +370 46 model.embedding_dim 2.0 +370 46 model.c_min 0.04497833082846381 +370 46 model.c_max 2.0435314276215677 +370 46 negative_sampler.num_negs_per_pos 47.0 +370 46 training.batch_size 2.0 +370 47 model.embedding_dim 2.0 +370 47 model.c_min 0.012731133834051243 +370 47 model.c_max 8.762505092278465 +370 47 negative_sampler.num_negs_per_pos 94.0 +370 47 training.batch_size 0.0 +370 48 model.embedding_dim 2.0 +370 48 model.c_min 0.024044375457426527 +370 48 model.c_max 2.0190570699938166 +370 48 negative_sampler.num_negs_per_pos 40.0 +370 48 training.batch_size 1.0 +370 49 model.embedding_dim 1.0 +370 49 model.c_min 0.017621320390762706 +370 49 model.c_max 4.499532939663941 +370 49 negative_sampler.num_negs_per_pos 26.0 +370 49 training.batch_size 0.0 +370 50 model.embedding_dim 1.0 +370 50 model.c_min 0.030647889145358646 +370 50 model.c_max 6.409175509669364 +370 50 negative_sampler.num_negs_per_pos 12.0 +370 50 training.batch_size 1.0 +370 51 model.embedding_dim 0.0 +370 51 model.c_min 0.04044514744803428 +370 51 model.c_max 4.032795360295719 +370 51 negative_sampler.num_negs_per_pos 34.0 +370 51 training.batch_size 1.0 +370 52 model.embedding_dim 0.0 +370 52 model.c_min 0.011047014679065237 +370 52 model.c_max 1.2659778486276547 +370 52 negative_sampler.num_negs_per_pos 90.0 +370 52 training.batch_size 0.0 +370 53 model.embedding_dim 0.0 +370 53 model.c_min 0.03036447007492884 +370 53 model.c_max 8.145752050468282 +370 53 negative_sampler.num_negs_per_pos 12.0 +370 53 training.batch_size 1.0 +370 54 model.embedding_dim 1.0 +370 54 model.c_min 0.02660822291481753 +370 54 model.c_max 3.4232926844825675 +370 54 negative_sampler.num_negs_per_pos 3.0 +370 54 training.batch_size 2.0 +370 55 model.embedding_dim 2.0 +370 55 model.c_min 0.03304527403781712 +370 55 model.c_max 8.694533585423436 +370 55 negative_sampler.num_negs_per_pos 59.0 +370 55 training.batch_size 2.0 +370 56 model.embedding_dim 0.0 +370 56 model.c_min 0.08856711917834394 +370 56 model.c_max 5.0178849605671765 +370 56 negative_sampler.num_negs_per_pos 98.0 +370 56 training.batch_size 1.0 +370 57 model.embedding_dim 1.0 +370 57 model.c_min 0.01582653621253482 +370 57 model.c_max 2.4854516608933253 +370 57 negative_sampler.num_negs_per_pos 55.0 +370 57 training.batch_size 0.0 +370 58 model.embedding_dim 2.0 +370 58 model.c_min 0.07406382753852896 +370 58 model.c_max 6.230571511543802 +370 58 negative_sampler.num_negs_per_pos 36.0 +370 58 training.batch_size 2.0 +370 59 model.embedding_dim 2.0 +370 59 model.c_min 0.08159220178212778 +370 59 model.c_max 8.714180093916678 +370 59 negative_sampler.num_negs_per_pos 15.0 +370 59 training.batch_size 0.0 +370 60 model.embedding_dim 2.0 +370 60 model.c_min 0.013666326955125254 +370 60 model.c_max 9.66968261810189 +370 60 negative_sampler.num_negs_per_pos 65.0 +370 60 training.batch_size 0.0 +370 61 model.embedding_dim 0.0 +370 61 model.c_min 0.013234990488403795 +370 61 model.c_max 7.481594066545241 +370 61 negative_sampler.num_negs_per_pos 67.0 +370 61 training.batch_size 0.0 +370 62 model.embedding_dim 2.0 +370 62 model.c_min 0.020054190411799715 +370 62 model.c_max 6.457963192846018 +370 62 negative_sampler.num_negs_per_pos 16.0 +370 62 training.batch_size 2.0 +370 63 model.embedding_dim 0.0 +370 63 model.c_min 0.04753631010237155 +370 63 model.c_max 6.931059152546709 +370 63 negative_sampler.num_negs_per_pos 77.0 +370 63 training.batch_size 1.0 +370 64 model.embedding_dim 1.0 +370 64 model.c_min 0.014500992637686002 +370 64 model.c_max 7.489600131612664 +370 64 negative_sampler.num_negs_per_pos 6.0 +370 64 training.batch_size 1.0 +370 65 model.embedding_dim 1.0 +370 65 model.c_min 0.0232997839118686 +370 65 model.c_max 7.29745240176285 +370 65 negative_sampler.num_negs_per_pos 91.0 +370 65 training.batch_size 0.0 +370 66 model.embedding_dim 1.0 +370 66 model.c_min 0.05528902192305489 +370 66 model.c_max 9.969886754976297 +370 66 negative_sampler.num_negs_per_pos 63.0 +370 66 training.batch_size 1.0 +370 67 model.embedding_dim 1.0 +370 67 model.c_min 0.01166123363244565 +370 67 model.c_max 3.008275661034864 +370 67 negative_sampler.num_negs_per_pos 65.0 +370 67 training.batch_size 0.0 +370 68 model.embedding_dim 0.0 +370 68 model.c_min 0.01785946931233742 +370 68 model.c_max 9.080963056657655 +370 68 negative_sampler.num_negs_per_pos 85.0 +370 68 training.batch_size 2.0 +370 69 model.embedding_dim 1.0 +370 69 model.c_min 0.09551087845107757 +370 69 model.c_max 6.050338103580023 +370 69 negative_sampler.num_negs_per_pos 19.0 +370 69 training.batch_size 2.0 +370 70 model.embedding_dim 1.0 +370 70 model.c_min 0.0467122779620428 +370 70 model.c_max 7.7373023650638775 +370 70 negative_sampler.num_negs_per_pos 11.0 +370 70 training.batch_size 0.0 +370 71 model.embedding_dim 1.0 +370 71 model.c_min 0.015699938668671743 +370 71 model.c_max 9.131450408472123 +370 71 negative_sampler.num_negs_per_pos 66.0 +370 71 training.batch_size 0.0 +370 72 model.embedding_dim 0.0 +370 72 model.c_min 0.042795844976024656 +370 72 model.c_max 8.06330789965686 +370 72 negative_sampler.num_negs_per_pos 49.0 +370 72 training.batch_size 0.0 +370 73 model.embedding_dim 1.0 +370 73 model.c_min 0.05772806824607495 +370 73 model.c_max 2.5026066275639485 +370 73 negative_sampler.num_negs_per_pos 24.0 +370 73 training.batch_size 0.0 +370 74 model.embedding_dim 2.0 +370 74 model.c_min 0.06146955487308594 +370 74 model.c_max 9.8961262021572 +370 74 negative_sampler.num_negs_per_pos 50.0 +370 74 training.batch_size 2.0 +370 75 model.embedding_dim 2.0 +370 75 model.c_min 0.04464911100670233 +370 75 model.c_max 8.990458874424522 +370 75 negative_sampler.num_negs_per_pos 90.0 +370 75 training.batch_size 2.0 +370 76 model.embedding_dim 0.0 +370 76 model.c_min 0.03072002056698731 +370 76 model.c_max 5.281756813913701 +370 76 negative_sampler.num_negs_per_pos 46.0 +370 76 training.batch_size 1.0 +370 77 model.embedding_dim 0.0 +370 77 model.c_min 0.011921341746806223 +370 77 model.c_max 3.047817898325141 +370 77 negative_sampler.num_negs_per_pos 99.0 +370 77 training.batch_size 0.0 +370 78 model.embedding_dim 2.0 +370 78 model.c_min 0.01497000040704587 +370 78 model.c_max 6.612633519789159 +370 78 negative_sampler.num_negs_per_pos 14.0 +370 78 training.batch_size 2.0 +370 79 model.embedding_dim 1.0 +370 79 model.c_min 0.030323629559813704 +370 79 model.c_max 8.109475501792001 +370 79 negative_sampler.num_negs_per_pos 68.0 +370 79 training.batch_size 0.0 +370 80 model.embedding_dim 0.0 +370 80 model.c_min 0.06881204567634051 +370 80 model.c_max 4.17830295217986 +370 80 negative_sampler.num_negs_per_pos 75.0 +370 80 training.batch_size 2.0 +370 81 model.embedding_dim 1.0 +370 81 model.c_min 0.021654140807368095 +370 81 model.c_max 1.3459978311974101 +370 81 negative_sampler.num_negs_per_pos 92.0 +370 81 training.batch_size 0.0 +370 82 model.embedding_dim 1.0 +370 82 model.c_min 0.08311301402235018 +370 82 model.c_max 9.689231659151131 +370 82 negative_sampler.num_negs_per_pos 54.0 +370 82 training.batch_size 1.0 +370 83 model.embedding_dim 1.0 +370 83 model.c_min 0.04512942820254569 +370 83 model.c_max 5.382308823001392 +370 83 negative_sampler.num_negs_per_pos 91.0 +370 83 training.batch_size 2.0 +370 84 model.embedding_dim 1.0 +370 84 model.c_min 0.0334173520394488 +370 84 model.c_max 7.48481323841364 +370 84 negative_sampler.num_negs_per_pos 95.0 +370 84 training.batch_size 2.0 +370 85 model.embedding_dim 2.0 +370 85 model.c_min 0.08981865566699901 +370 85 model.c_max 2.8208458815922874 +370 85 negative_sampler.num_negs_per_pos 98.0 +370 85 training.batch_size 1.0 +370 86 model.embedding_dim 0.0 +370 86 model.c_min 0.015071819023075701 +370 86 model.c_max 7.586575922572837 +370 86 negative_sampler.num_negs_per_pos 75.0 +370 86 training.batch_size 2.0 +370 87 model.embedding_dim 1.0 +370 87 model.c_min 0.024338381792936778 +370 87 model.c_max 4.642541980187025 +370 87 negative_sampler.num_negs_per_pos 97.0 +370 87 training.batch_size 0.0 +370 88 model.embedding_dim 2.0 +370 88 model.c_min 0.012874237662212759 +370 88 model.c_max 1.9434176287278768 +370 88 negative_sampler.num_negs_per_pos 61.0 +370 88 training.batch_size 1.0 +370 89 model.embedding_dim 2.0 +370 89 model.c_min 0.047260282815770056 +370 89 model.c_max 4.244400706734095 +370 89 negative_sampler.num_negs_per_pos 14.0 +370 89 training.batch_size 0.0 +370 90 model.embedding_dim 2.0 +370 90 model.c_min 0.053451372497132 +370 90 model.c_max 8.213189781871113 +370 90 negative_sampler.num_negs_per_pos 29.0 +370 90 training.batch_size 1.0 +370 91 model.embedding_dim 0.0 +370 91 model.c_min 0.012046194983563643 +370 91 model.c_max 1.076270836741835 +370 91 negative_sampler.num_negs_per_pos 8.0 +370 91 training.batch_size 1.0 +370 92 model.embedding_dim 2.0 +370 92 model.c_min 0.07144847340731289 +370 92 model.c_max 1.0739781230897418 +370 92 negative_sampler.num_negs_per_pos 6.0 +370 92 training.batch_size 1.0 +370 93 model.embedding_dim 2.0 +370 93 model.c_min 0.016769626499759095 +370 93 model.c_max 9.324867739289871 +370 93 negative_sampler.num_negs_per_pos 11.0 +370 93 training.batch_size 2.0 +370 94 model.embedding_dim 0.0 +370 94 model.c_min 0.04204373364553664 +370 94 model.c_max 1.0810726860839819 +370 94 negative_sampler.num_negs_per_pos 6.0 +370 94 training.batch_size 1.0 +370 95 model.embedding_dim 1.0 +370 95 model.c_min 0.0946411577705431 +370 95 model.c_max 8.673444141046854 +370 95 negative_sampler.num_negs_per_pos 22.0 +370 95 training.batch_size 2.0 +370 96 model.embedding_dim 1.0 +370 96 model.c_min 0.014090954236363671 +370 96 model.c_max 9.16000463128518 +370 96 negative_sampler.num_negs_per_pos 21.0 +370 96 training.batch_size 2.0 +370 97 model.embedding_dim 1.0 +370 97 model.c_min 0.04040198513780993 +370 97 model.c_max 5.669812029880775 +370 97 negative_sampler.num_negs_per_pos 49.0 +370 97 training.batch_size 2.0 +370 98 model.embedding_dim 1.0 +370 98 model.c_min 0.07076393507005059 +370 98 model.c_max 7.996989177979499 +370 98 negative_sampler.num_negs_per_pos 45.0 +370 98 training.batch_size 0.0 +370 99 model.embedding_dim 1.0 +370 99 model.c_min 0.014238190760142965 +370 99 model.c_max 5.514579718418943 +370 99 negative_sampler.num_negs_per_pos 19.0 +370 99 training.batch_size 1.0 +370 100 model.embedding_dim 0.0 +370 100 model.c_min 0.08781726277791853 +370 100 model.c_max 2.4767572402628666 +370 100 negative_sampler.num_negs_per_pos 92.0 +370 100 training.batch_size 2.0 +370 1 dataset """kinships""" +370 1 model """kg2e""" +370 1 loss """softplus""" +370 1 regularizer """no""" +370 1 optimizer """adadelta""" +370 1 training_loop """owa""" +370 1 negative_sampler """basic""" +370 1 evaluator """rankbased""" +370 2 dataset """kinships""" +370 2 model """kg2e""" +370 2 loss """softplus""" +370 2 regularizer """no""" +370 2 optimizer """adadelta""" +370 2 training_loop """owa""" +370 2 negative_sampler """basic""" +370 2 evaluator """rankbased""" +370 3 dataset """kinships""" +370 3 model """kg2e""" +370 3 loss """softplus""" +370 3 regularizer """no""" +370 3 optimizer """adadelta""" +370 3 training_loop """owa""" +370 3 negative_sampler """basic""" +370 3 evaluator """rankbased""" +370 4 dataset """kinships""" +370 4 model """kg2e""" +370 4 loss """softplus""" +370 4 regularizer """no""" +370 4 optimizer """adadelta""" +370 4 training_loop """owa""" +370 4 negative_sampler """basic""" +370 4 evaluator """rankbased""" +370 5 dataset """kinships""" +370 5 model """kg2e""" +370 5 loss """softplus""" +370 5 regularizer """no""" +370 5 optimizer """adadelta""" +370 5 training_loop """owa""" +370 5 negative_sampler """basic""" +370 5 evaluator """rankbased""" +370 6 dataset """kinships""" +370 6 model """kg2e""" +370 6 loss """softplus""" +370 6 regularizer """no""" +370 6 optimizer """adadelta""" +370 6 training_loop """owa""" +370 6 negative_sampler """basic""" +370 6 evaluator """rankbased""" +370 7 dataset """kinships""" +370 7 model """kg2e""" +370 7 loss """softplus""" +370 7 regularizer """no""" +370 7 optimizer """adadelta""" +370 7 training_loop """owa""" +370 7 negative_sampler """basic""" +370 7 evaluator """rankbased""" +370 8 dataset """kinships""" +370 8 model """kg2e""" +370 8 loss """softplus""" +370 8 regularizer """no""" +370 8 optimizer """adadelta""" +370 8 training_loop """owa""" +370 8 negative_sampler """basic""" +370 8 evaluator """rankbased""" +370 9 dataset """kinships""" +370 9 model """kg2e""" +370 9 loss """softplus""" +370 9 regularizer """no""" +370 9 optimizer """adadelta""" +370 9 training_loop """owa""" +370 9 negative_sampler """basic""" +370 9 evaluator """rankbased""" +370 10 dataset """kinships""" +370 10 model """kg2e""" +370 10 loss """softplus""" +370 10 regularizer """no""" +370 10 optimizer """adadelta""" +370 10 training_loop """owa""" +370 10 negative_sampler """basic""" +370 10 evaluator """rankbased""" +370 11 dataset """kinships""" +370 11 model """kg2e""" +370 11 loss """softplus""" +370 11 regularizer """no""" +370 11 optimizer """adadelta""" +370 11 training_loop """owa""" +370 11 negative_sampler """basic""" +370 11 evaluator """rankbased""" +370 12 dataset """kinships""" +370 12 model """kg2e""" +370 12 loss """softplus""" +370 12 regularizer """no""" +370 12 optimizer """adadelta""" +370 12 training_loop """owa""" +370 12 negative_sampler """basic""" +370 12 evaluator """rankbased""" +370 13 dataset """kinships""" +370 13 model """kg2e""" +370 13 loss """softplus""" +370 13 regularizer """no""" +370 13 optimizer """adadelta""" +370 13 training_loop """owa""" +370 13 negative_sampler """basic""" +370 13 evaluator """rankbased""" +370 14 dataset """kinships""" +370 14 model """kg2e""" +370 14 loss """softplus""" +370 14 regularizer """no""" +370 14 optimizer """adadelta""" +370 14 training_loop """owa""" +370 14 negative_sampler """basic""" +370 14 evaluator """rankbased""" +370 15 dataset """kinships""" +370 15 model """kg2e""" +370 15 loss """softplus""" +370 15 regularizer """no""" +370 15 optimizer """adadelta""" +370 15 training_loop """owa""" +370 15 negative_sampler """basic""" +370 15 evaluator """rankbased""" +370 16 dataset """kinships""" +370 16 model """kg2e""" +370 16 loss """softplus""" +370 16 regularizer """no""" +370 16 optimizer """adadelta""" +370 16 training_loop """owa""" +370 16 negative_sampler """basic""" +370 16 evaluator """rankbased""" +370 17 dataset """kinships""" +370 17 model """kg2e""" +370 17 loss """softplus""" +370 17 regularizer """no""" +370 17 optimizer """adadelta""" +370 17 training_loop """owa""" +370 17 negative_sampler """basic""" +370 17 evaluator """rankbased""" +370 18 dataset """kinships""" +370 18 model """kg2e""" +370 18 loss """softplus""" +370 18 regularizer """no""" +370 18 optimizer """adadelta""" +370 18 training_loop """owa""" +370 18 negative_sampler """basic""" +370 18 evaluator """rankbased""" +370 19 dataset """kinships""" +370 19 model """kg2e""" +370 19 loss """softplus""" +370 19 regularizer """no""" +370 19 optimizer """adadelta""" +370 19 training_loop """owa""" +370 19 negative_sampler """basic""" +370 19 evaluator """rankbased""" +370 20 dataset """kinships""" +370 20 model """kg2e""" +370 20 loss """softplus""" +370 20 regularizer """no""" +370 20 optimizer """adadelta""" +370 20 training_loop """owa""" +370 20 negative_sampler """basic""" +370 20 evaluator """rankbased""" +370 21 dataset """kinships""" +370 21 model """kg2e""" +370 21 loss """softplus""" +370 21 regularizer """no""" +370 21 optimizer """adadelta""" +370 21 training_loop """owa""" +370 21 negative_sampler """basic""" +370 21 evaluator """rankbased""" +370 22 dataset """kinships""" +370 22 model """kg2e""" +370 22 loss """softplus""" +370 22 regularizer """no""" +370 22 optimizer """adadelta""" +370 22 training_loop """owa""" +370 22 negative_sampler """basic""" +370 22 evaluator """rankbased""" +370 23 dataset """kinships""" +370 23 model """kg2e""" +370 23 loss """softplus""" +370 23 regularizer """no""" +370 23 optimizer """adadelta""" +370 23 training_loop """owa""" +370 23 negative_sampler """basic""" +370 23 evaluator """rankbased""" +370 24 dataset """kinships""" +370 24 model """kg2e""" +370 24 loss """softplus""" +370 24 regularizer """no""" +370 24 optimizer """adadelta""" +370 24 training_loop """owa""" +370 24 negative_sampler """basic""" +370 24 evaluator """rankbased""" +370 25 dataset """kinships""" +370 25 model """kg2e""" +370 25 loss """softplus""" +370 25 regularizer """no""" +370 25 optimizer """adadelta""" +370 25 training_loop """owa""" +370 25 negative_sampler """basic""" +370 25 evaluator """rankbased""" +370 26 dataset """kinships""" +370 26 model """kg2e""" +370 26 loss """softplus""" +370 26 regularizer """no""" +370 26 optimizer """adadelta""" +370 26 training_loop """owa""" +370 26 negative_sampler """basic""" +370 26 evaluator """rankbased""" +370 27 dataset """kinships""" +370 27 model """kg2e""" +370 27 loss """softplus""" +370 27 regularizer """no""" +370 27 optimizer """adadelta""" +370 27 training_loop """owa""" +370 27 negative_sampler """basic""" +370 27 evaluator """rankbased""" +370 28 dataset """kinships""" +370 28 model """kg2e""" +370 28 loss """softplus""" +370 28 regularizer """no""" +370 28 optimizer """adadelta""" +370 28 training_loop """owa""" +370 28 negative_sampler """basic""" +370 28 evaluator """rankbased""" +370 29 dataset """kinships""" +370 29 model """kg2e""" +370 29 loss """softplus""" +370 29 regularizer """no""" +370 29 optimizer """adadelta""" +370 29 training_loop """owa""" +370 29 negative_sampler """basic""" +370 29 evaluator """rankbased""" +370 30 dataset """kinships""" +370 30 model """kg2e""" +370 30 loss """softplus""" +370 30 regularizer """no""" +370 30 optimizer """adadelta""" +370 30 training_loop """owa""" +370 30 negative_sampler """basic""" +370 30 evaluator """rankbased""" +370 31 dataset """kinships""" +370 31 model """kg2e""" +370 31 loss """softplus""" +370 31 regularizer """no""" +370 31 optimizer """adadelta""" +370 31 training_loop """owa""" +370 31 negative_sampler """basic""" +370 31 evaluator """rankbased""" +370 32 dataset """kinships""" +370 32 model """kg2e""" +370 32 loss """softplus""" +370 32 regularizer """no""" +370 32 optimizer """adadelta""" +370 32 training_loop """owa""" +370 32 negative_sampler """basic""" +370 32 evaluator """rankbased""" +370 33 dataset """kinships""" +370 33 model """kg2e""" +370 33 loss """softplus""" +370 33 regularizer """no""" +370 33 optimizer """adadelta""" +370 33 training_loop """owa""" +370 33 negative_sampler """basic""" +370 33 evaluator """rankbased""" +370 34 dataset """kinships""" +370 34 model """kg2e""" +370 34 loss """softplus""" +370 34 regularizer """no""" +370 34 optimizer """adadelta""" +370 34 training_loop """owa""" +370 34 negative_sampler """basic""" +370 34 evaluator """rankbased""" +370 35 dataset """kinships""" +370 35 model """kg2e""" +370 35 loss """softplus""" +370 35 regularizer """no""" +370 35 optimizer """adadelta""" +370 35 training_loop """owa""" +370 35 negative_sampler """basic""" +370 35 evaluator """rankbased""" +370 36 dataset """kinships""" +370 36 model """kg2e""" +370 36 loss """softplus""" +370 36 regularizer """no""" +370 36 optimizer """adadelta""" +370 36 training_loop """owa""" +370 36 negative_sampler """basic""" +370 36 evaluator """rankbased""" +370 37 dataset """kinships""" +370 37 model """kg2e""" +370 37 loss """softplus""" +370 37 regularizer """no""" +370 37 optimizer """adadelta""" +370 37 training_loop """owa""" +370 37 negative_sampler """basic""" +370 37 evaluator """rankbased""" +370 38 dataset """kinships""" +370 38 model """kg2e""" +370 38 loss """softplus""" +370 38 regularizer """no""" +370 38 optimizer """adadelta""" +370 38 training_loop """owa""" +370 38 negative_sampler """basic""" +370 38 evaluator """rankbased""" +370 39 dataset """kinships""" +370 39 model """kg2e""" +370 39 loss """softplus""" +370 39 regularizer """no""" +370 39 optimizer """adadelta""" +370 39 training_loop """owa""" +370 39 negative_sampler """basic""" +370 39 evaluator """rankbased""" +370 40 dataset """kinships""" +370 40 model """kg2e""" +370 40 loss """softplus""" +370 40 regularizer """no""" +370 40 optimizer """adadelta""" +370 40 training_loop """owa""" +370 40 negative_sampler """basic""" +370 40 evaluator """rankbased""" +370 41 dataset """kinships""" +370 41 model """kg2e""" +370 41 loss """softplus""" +370 41 regularizer """no""" +370 41 optimizer """adadelta""" +370 41 training_loop """owa""" +370 41 negative_sampler """basic""" +370 41 evaluator """rankbased""" +370 42 dataset """kinships""" +370 42 model """kg2e""" +370 42 loss """softplus""" +370 42 regularizer """no""" +370 42 optimizer """adadelta""" +370 42 training_loop """owa""" +370 42 negative_sampler """basic""" +370 42 evaluator """rankbased""" +370 43 dataset """kinships""" +370 43 model """kg2e""" +370 43 loss """softplus""" +370 43 regularizer """no""" +370 43 optimizer """adadelta""" +370 43 training_loop """owa""" +370 43 negative_sampler """basic""" +370 43 evaluator """rankbased""" +370 44 dataset """kinships""" +370 44 model """kg2e""" +370 44 loss """softplus""" +370 44 regularizer """no""" +370 44 optimizer """adadelta""" +370 44 training_loop """owa""" +370 44 negative_sampler """basic""" +370 44 evaluator """rankbased""" +370 45 dataset """kinships""" +370 45 model """kg2e""" +370 45 loss """softplus""" +370 45 regularizer """no""" +370 45 optimizer """adadelta""" +370 45 training_loop """owa""" +370 45 negative_sampler """basic""" +370 45 evaluator """rankbased""" +370 46 dataset """kinships""" +370 46 model """kg2e""" +370 46 loss """softplus""" +370 46 regularizer """no""" +370 46 optimizer """adadelta""" +370 46 training_loop """owa""" +370 46 negative_sampler """basic""" +370 46 evaluator """rankbased""" +370 47 dataset """kinships""" +370 47 model """kg2e""" +370 47 loss """softplus""" +370 47 regularizer """no""" +370 47 optimizer """adadelta""" +370 47 training_loop """owa""" +370 47 negative_sampler """basic""" +370 47 evaluator """rankbased""" +370 48 dataset """kinships""" +370 48 model """kg2e""" +370 48 loss """softplus""" +370 48 regularizer """no""" +370 48 optimizer """adadelta""" +370 48 training_loop """owa""" +370 48 negative_sampler """basic""" +370 48 evaluator """rankbased""" +370 49 dataset """kinships""" +370 49 model """kg2e""" +370 49 loss """softplus""" +370 49 regularizer """no""" +370 49 optimizer """adadelta""" +370 49 training_loop """owa""" +370 49 negative_sampler """basic""" +370 49 evaluator """rankbased""" +370 50 dataset """kinships""" +370 50 model """kg2e""" +370 50 loss """softplus""" +370 50 regularizer """no""" +370 50 optimizer """adadelta""" +370 50 training_loop """owa""" +370 50 negative_sampler """basic""" +370 50 evaluator """rankbased""" +370 51 dataset """kinships""" +370 51 model """kg2e""" +370 51 loss """softplus""" +370 51 regularizer """no""" +370 51 optimizer """adadelta""" +370 51 training_loop """owa""" +370 51 negative_sampler """basic""" +370 51 evaluator """rankbased""" +370 52 dataset """kinships""" +370 52 model """kg2e""" +370 52 loss """softplus""" +370 52 regularizer """no""" +370 52 optimizer """adadelta""" +370 52 training_loop """owa""" +370 52 negative_sampler """basic""" +370 52 evaluator """rankbased""" +370 53 dataset """kinships""" +370 53 model """kg2e""" +370 53 loss """softplus""" +370 53 regularizer """no""" +370 53 optimizer """adadelta""" +370 53 training_loop """owa""" +370 53 negative_sampler """basic""" +370 53 evaluator """rankbased""" +370 54 dataset """kinships""" +370 54 model """kg2e""" +370 54 loss """softplus""" +370 54 regularizer """no""" +370 54 optimizer """adadelta""" +370 54 training_loop """owa""" +370 54 negative_sampler """basic""" +370 54 evaluator """rankbased""" +370 55 dataset """kinships""" +370 55 model """kg2e""" +370 55 loss """softplus""" +370 55 regularizer """no""" +370 55 optimizer """adadelta""" +370 55 training_loop """owa""" +370 55 negative_sampler """basic""" +370 55 evaluator """rankbased""" +370 56 dataset """kinships""" +370 56 model """kg2e""" +370 56 loss """softplus""" +370 56 regularizer """no""" +370 56 optimizer """adadelta""" +370 56 training_loop """owa""" +370 56 negative_sampler """basic""" +370 56 evaluator """rankbased""" +370 57 dataset """kinships""" +370 57 model """kg2e""" +370 57 loss """softplus""" +370 57 regularizer """no""" +370 57 optimizer """adadelta""" +370 57 training_loop """owa""" +370 57 negative_sampler """basic""" +370 57 evaluator """rankbased""" +370 58 dataset """kinships""" +370 58 model """kg2e""" +370 58 loss """softplus""" +370 58 regularizer """no""" +370 58 optimizer """adadelta""" +370 58 training_loop """owa""" +370 58 negative_sampler """basic""" +370 58 evaluator """rankbased""" +370 59 dataset """kinships""" +370 59 model """kg2e""" +370 59 loss """softplus""" +370 59 regularizer """no""" +370 59 optimizer """adadelta""" +370 59 training_loop """owa""" +370 59 negative_sampler """basic""" +370 59 evaluator """rankbased""" +370 60 dataset """kinships""" +370 60 model """kg2e""" +370 60 loss """softplus""" +370 60 regularizer """no""" +370 60 optimizer """adadelta""" +370 60 training_loop """owa""" +370 60 negative_sampler """basic""" +370 60 evaluator """rankbased""" +370 61 dataset """kinships""" +370 61 model """kg2e""" +370 61 loss """softplus""" +370 61 regularizer """no""" +370 61 optimizer """adadelta""" +370 61 training_loop """owa""" +370 61 negative_sampler """basic""" +370 61 evaluator """rankbased""" +370 62 dataset """kinships""" +370 62 model """kg2e""" +370 62 loss """softplus""" +370 62 regularizer """no""" +370 62 optimizer """adadelta""" +370 62 training_loop """owa""" +370 62 negative_sampler """basic""" +370 62 evaluator """rankbased""" +370 63 dataset """kinships""" +370 63 model """kg2e""" +370 63 loss """softplus""" +370 63 regularizer """no""" +370 63 optimizer """adadelta""" +370 63 training_loop """owa""" +370 63 negative_sampler """basic""" +370 63 evaluator """rankbased""" +370 64 dataset """kinships""" +370 64 model """kg2e""" +370 64 loss """softplus""" +370 64 regularizer """no""" +370 64 optimizer """adadelta""" +370 64 training_loop """owa""" +370 64 negative_sampler """basic""" +370 64 evaluator """rankbased""" +370 65 dataset """kinships""" +370 65 model """kg2e""" +370 65 loss """softplus""" +370 65 regularizer """no""" +370 65 optimizer """adadelta""" +370 65 training_loop """owa""" +370 65 negative_sampler """basic""" +370 65 evaluator """rankbased""" +370 66 dataset """kinships""" +370 66 model """kg2e""" +370 66 loss """softplus""" +370 66 regularizer """no""" +370 66 optimizer """adadelta""" +370 66 training_loop """owa""" +370 66 negative_sampler """basic""" +370 66 evaluator """rankbased""" +370 67 dataset """kinships""" +370 67 model """kg2e""" +370 67 loss """softplus""" +370 67 regularizer """no""" +370 67 optimizer """adadelta""" +370 67 training_loop """owa""" +370 67 negative_sampler """basic""" +370 67 evaluator """rankbased""" +370 68 dataset """kinships""" +370 68 model """kg2e""" +370 68 loss """softplus""" +370 68 regularizer """no""" +370 68 optimizer """adadelta""" +370 68 training_loop """owa""" +370 68 negative_sampler """basic""" +370 68 evaluator """rankbased""" +370 69 dataset """kinships""" +370 69 model """kg2e""" +370 69 loss """softplus""" +370 69 regularizer """no""" +370 69 optimizer """adadelta""" +370 69 training_loop """owa""" +370 69 negative_sampler """basic""" +370 69 evaluator """rankbased""" +370 70 dataset """kinships""" +370 70 model """kg2e""" +370 70 loss """softplus""" +370 70 regularizer """no""" +370 70 optimizer """adadelta""" +370 70 training_loop """owa""" +370 70 negative_sampler """basic""" +370 70 evaluator """rankbased""" +370 71 dataset """kinships""" +370 71 model """kg2e""" +370 71 loss """softplus""" +370 71 regularizer """no""" +370 71 optimizer """adadelta""" +370 71 training_loop """owa""" +370 71 negative_sampler """basic""" +370 71 evaluator """rankbased""" +370 72 dataset """kinships""" +370 72 model """kg2e""" +370 72 loss """softplus""" +370 72 regularizer """no""" +370 72 optimizer """adadelta""" +370 72 training_loop """owa""" +370 72 negative_sampler """basic""" +370 72 evaluator """rankbased""" +370 73 dataset """kinships""" +370 73 model """kg2e""" +370 73 loss """softplus""" +370 73 regularizer """no""" +370 73 optimizer """adadelta""" +370 73 training_loop """owa""" +370 73 negative_sampler """basic""" +370 73 evaluator """rankbased""" +370 74 dataset """kinships""" +370 74 model """kg2e""" +370 74 loss """softplus""" +370 74 regularizer """no""" +370 74 optimizer """adadelta""" +370 74 training_loop """owa""" +370 74 negative_sampler """basic""" +370 74 evaluator """rankbased""" +370 75 dataset """kinships""" +370 75 model """kg2e""" +370 75 loss """softplus""" +370 75 regularizer """no""" +370 75 optimizer """adadelta""" +370 75 training_loop """owa""" +370 75 negative_sampler """basic""" +370 75 evaluator """rankbased""" +370 76 dataset """kinships""" +370 76 model """kg2e""" +370 76 loss """softplus""" +370 76 regularizer """no""" +370 76 optimizer """adadelta""" +370 76 training_loop """owa""" +370 76 negative_sampler """basic""" +370 76 evaluator """rankbased""" +370 77 dataset """kinships""" +370 77 model """kg2e""" +370 77 loss """softplus""" +370 77 regularizer """no""" +370 77 optimizer """adadelta""" +370 77 training_loop """owa""" +370 77 negative_sampler """basic""" +370 77 evaluator """rankbased""" +370 78 dataset """kinships""" +370 78 model """kg2e""" +370 78 loss """softplus""" +370 78 regularizer """no""" +370 78 optimizer """adadelta""" +370 78 training_loop """owa""" +370 78 negative_sampler """basic""" +370 78 evaluator """rankbased""" +370 79 dataset """kinships""" +370 79 model """kg2e""" +370 79 loss """softplus""" +370 79 regularizer """no""" +370 79 optimizer """adadelta""" +370 79 training_loop """owa""" +370 79 negative_sampler """basic""" +370 79 evaluator """rankbased""" +370 80 dataset """kinships""" +370 80 model """kg2e""" +370 80 loss """softplus""" +370 80 regularizer """no""" +370 80 optimizer """adadelta""" +370 80 training_loop """owa""" +370 80 negative_sampler """basic""" +370 80 evaluator """rankbased""" +370 81 dataset """kinships""" +370 81 model """kg2e""" +370 81 loss """softplus""" +370 81 regularizer """no""" +370 81 optimizer """adadelta""" +370 81 training_loop """owa""" +370 81 negative_sampler """basic""" +370 81 evaluator """rankbased""" +370 82 dataset """kinships""" +370 82 model """kg2e""" +370 82 loss """softplus""" +370 82 regularizer """no""" +370 82 optimizer """adadelta""" +370 82 training_loop """owa""" +370 82 negative_sampler """basic""" +370 82 evaluator """rankbased""" +370 83 dataset """kinships""" +370 83 model """kg2e""" +370 83 loss """softplus""" +370 83 regularizer """no""" +370 83 optimizer """adadelta""" +370 83 training_loop """owa""" +370 83 negative_sampler """basic""" +370 83 evaluator """rankbased""" +370 84 dataset """kinships""" +370 84 model """kg2e""" +370 84 loss """softplus""" +370 84 regularizer """no""" +370 84 optimizer """adadelta""" +370 84 training_loop """owa""" +370 84 negative_sampler """basic""" +370 84 evaluator """rankbased""" +370 85 dataset """kinships""" +370 85 model """kg2e""" +370 85 loss """softplus""" +370 85 regularizer """no""" +370 85 optimizer """adadelta""" +370 85 training_loop """owa""" +370 85 negative_sampler """basic""" +370 85 evaluator """rankbased""" +370 86 dataset """kinships""" +370 86 model """kg2e""" +370 86 loss """softplus""" +370 86 regularizer """no""" +370 86 optimizer """adadelta""" +370 86 training_loop """owa""" +370 86 negative_sampler """basic""" +370 86 evaluator """rankbased""" +370 87 dataset """kinships""" +370 87 model """kg2e""" +370 87 loss """softplus""" +370 87 regularizer """no""" +370 87 optimizer """adadelta""" +370 87 training_loop """owa""" +370 87 negative_sampler """basic""" +370 87 evaluator """rankbased""" +370 88 dataset """kinships""" +370 88 model """kg2e""" +370 88 loss """softplus""" +370 88 regularizer """no""" +370 88 optimizer """adadelta""" +370 88 training_loop """owa""" +370 88 negative_sampler """basic""" +370 88 evaluator """rankbased""" +370 89 dataset """kinships""" +370 89 model """kg2e""" +370 89 loss """softplus""" +370 89 regularizer """no""" +370 89 optimizer """adadelta""" +370 89 training_loop """owa""" +370 89 negative_sampler """basic""" +370 89 evaluator """rankbased""" +370 90 dataset """kinships""" +370 90 model """kg2e""" +370 90 loss """softplus""" +370 90 regularizer """no""" +370 90 optimizer """adadelta""" +370 90 training_loop """owa""" +370 90 negative_sampler """basic""" +370 90 evaluator """rankbased""" +370 91 dataset """kinships""" +370 91 model """kg2e""" +370 91 loss """softplus""" +370 91 regularizer """no""" +370 91 optimizer """adadelta""" +370 91 training_loop """owa""" +370 91 negative_sampler """basic""" +370 91 evaluator """rankbased""" +370 92 dataset """kinships""" +370 92 model """kg2e""" +370 92 loss """softplus""" +370 92 regularizer """no""" +370 92 optimizer """adadelta""" +370 92 training_loop """owa""" +370 92 negative_sampler """basic""" +370 92 evaluator """rankbased""" +370 93 dataset """kinships""" +370 93 model """kg2e""" +370 93 loss """softplus""" +370 93 regularizer """no""" +370 93 optimizer """adadelta""" +370 93 training_loop """owa""" +370 93 negative_sampler """basic""" +370 93 evaluator """rankbased""" +370 94 dataset """kinships""" +370 94 model """kg2e""" +370 94 loss """softplus""" +370 94 regularizer """no""" +370 94 optimizer """adadelta""" +370 94 training_loop """owa""" +370 94 negative_sampler """basic""" +370 94 evaluator """rankbased""" +370 95 dataset """kinships""" +370 95 model """kg2e""" +370 95 loss """softplus""" +370 95 regularizer """no""" +370 95 optimizer """adadelta""" +370 95 training_loop """owa""" +370 95 negative_sampler """basic""" +370 95 evaluator """rankbased""" +370 96 dataset """kinships""" +370 96 model """kg2e""" +370 96 loss """softplus""" +370 96 regularizer """no""" +370 96 optimizer """adadelta""" +370 96 training_loop """owa""" +370 96 negative_sampler """basic""" +370 96 evaluator """rankbased""" +370 97 dataset """kinships""" +370 97 model """kg2e""" +370 97 loss """softplus""" +370 97 regularizer """no""" +370 97 optimizer """adadelta""" +370 97 training_loop """owa""" +370 97 negative_sampler """basic""" +370 97 evaluator """rankbased""" +370 98 dataset """kinships""" +370 98 model """kg2e""" +370 98 loss """softplus""" +370 98 regularizer """no""" +370 98 optimizer """adadelta""" +370 98 training_loop """owa""" +370 98 negative_sampler """basic""" +370 98 evaluator """rankbased""" +370 99 dataset """kinships""" +370 99 model """kg2e""" +370 99 loss """softplus""" +370 99 regularizer """no""" +370 99 optimizer """adadelta""" +370 99 training_loop """owa""" +370 99 negative_sampler """basic""" +370 99 evaluator """rankbased""" +370 100 dataset """kinships""" +370 100 model """kg2e""" +370 100 loss """softplus""" +370 100 regularizer """no""" +370 100 optimizer """adadelta""" +370 100 training_loop """owa""" +370 100 negative_sampler """basic""" +370 100 evaluator """rankbased""" +371 1 model.embedding_dim 2.0 +371 1 model.c_min 0.041971478824956714 +371 1 model.c_max 6.238787340171753 +371 1 loss.margin 0.6118962032365273 +371 1 negative_sampler.num_negs_per_pos 62.0 +371 1 training.batch_size 0.0 +371 2 model.embedding_dim 0.0 +371 2 model.c_min 0.07431594653034963 +371 2 model.c_max 8.354543680588325 +371 2 loss.margin 7.588069376911229 +371 2 negative_sampler.num_negs_per_pos 26.0 +371 2 training.batch_size 0.0 +371 3 model.embedding_dim 0.0 +371 3 model.c_min 0.05329388485081637 +371 3 model.c_max 3.4802659417476054 +371 3 loss.margin 7.065000578931393 +371 3 negative_sampler.num_negs_per_pos 21.0 +371 3 training.batch_size 2.0 +371 4 model.embedding_dim 1.0 +371 4 model.c_min 0.03867188500277344 +371 4 model.c_max 8.068177558497403 +371 4 loss.margin 8.619264014368316 +371 4 negative_sampler.num_negs_per_pos 37.0 +371 4 training.batch_size 2.0 +371 5 model.embedding_dim 2.0 +371 5 model.c_min 0.054896916732306754 +371 5 model.c_max 1.831638551542135 +371 5 loss.margin 4.663070442430396 +371 5 negative_sampler.num_negs_per_pos 35.0 +371 5 training.batch_size 1.0 +371 6 model.embedding_dim 1.0 +371 6 model.c_min 0.045779033694021004 +371 6 model.c_max 5.378894886099362 +371 6 loss.margin 0.5615440103647715 +371 6 negative_sampler.num_negs_per_pos 67.0 +371 6 training.batch_size 0.0 +371 7 model.embedding_dim 0.0 +371 7 model.c_min 0.054382528638370634 +371 7 model.c_max 9.108639155133487 +371 7 loss.margin 5.210175167115972 +371 7 negative_sampler.num_negs_per_pos 42.0 +371 7 training.batch_size 2.0 +371 8 model.embedding_dim 0.0 +371 8 model.c_min 0.03174664143180637 +371 8 model.c_max 7.386627214824235 +371 8 loss.margin 6.0386739336389414 +371 8 negative_sampler.num_negs_per_pos 50.0 +371 8 training.batch_size 2.0 +371 9 model.embedding_dim 0.0 +371 9 model.c_min 0.011927624692251747 +371 9 model.c_max 8.050434283900959 +371 9 loss.margin 2.94604599023147 +371 9 negative_sampler.num_negs_per_pos 17.0 +371 9 training.batch_size 0.0 +371 10 model.embedding_dim 1.0 +371 10 model.c_min 0.0853451371369525 +371 10 model.c_max 2.9545947481298955 +371 10 loss.margin 9.20389732355274 +371 10 negative_sampler.num_negs_per_pos 61.0 +371 10 training.batch_size 0.0 +371 11 model.embedding_dim 2.0 +371 11 model.c_min 0.03671857970299486 +371 11 model.c_max 8.60196006974244 +371 11 loss.margin 8.260533884321598 +371 11 negative_sampler.num_negs_per_pos 86.0 +371 11 training.batch_size 0.0 +371 12 model.embedding_dim 2.0 +371 12 model.c_min 0.037463391089863296 +371 12 model.c_max 3.772336144906735 +371 12 loss.margin 9.889458850300173 +371 12 negative_sampler.num_negs_per_pos 82.0 +371 12 training.batch_size 1.0 +371 13 model.embedding_dim 2.0 +371 13 model.c_min 0.06262195056544803 +371 13 model.c_max 9.124823621248265 +371 13 loss.margin 4.456633514803357 +371 13 negative_sampler.num_negs_per_pos 79.0 +371 13 training.batch_size 1.0 +371 14 model.embedding_dim 1.0 +371 14 model.c_min 0.02142508185424322 +371 14 model.c_max 6.693987742468186 +371 14 loss.margin 5.512930102521045 +371 14 negative_sampler.num_negs_per_pos 2.0 +371 14 training.batch_size 1.0 +371 15 model.embedding_dim 0.0 +371 15 model.c_min 0.026916083261693723 +371 15 model.c_max 3.464984168407245 +371 15 loss.margin 9.896700295180338 +371 15 negative_sampler.num_negs_per_pos 16.0 +371 15 training.batch_size 0.0 +371 16 model.embedding_dim 2.0 +371 16 model.c_min 0.03418618328551915 +371 16 model.c_max 3.1972125798641833 +371 16 loss.margin 3.212825287973012 +371 16 negative_sampler.num_negs_per_pos 67.0 +371 16 training.batch_size 0.0 +371 17 model.embedding_dim 0.0 +371 17 model.c_min 0.05981858524249347 +371 17 model.c_max 8.669403147915585 +371 17 loss.margin 9.640425211725505 +371 17 negative_sampler.num_negs_per_pos 37.0 +371 17 training.batch_size 0.0 +371 18 model.embedding_dim 1.0 +371 18 model.c_min 0.0583583080823719 +371 18 model.c_max 7.480021078396089 +371 18 loss.margin 1.5237786715911477 +371 18 negative_sampler.num_negs_per_pos 49.0 +371 18 training.batch_size 0.0 +371 19 model.embedding_dim 2.0 +371 19 model.c_min 0.08000170556832932 +371 19 model.c_max 8.964227969242476 +371 19 loss.margin 3.8836832243177337 +371 19 negative_sampler.num_negs_per_pos 98.0 +371 19 training.batch_size 0.0 +371 20 model.embedding_dim 0.0 +371 20 model.c_min 0.022338061232308932 +371 20 model.c_max 9.731361518531257 +371 20 loss.margin 5.760839966258186 +371 20 negative_sampler.num_negs_per_pos 90.0 +371 20 training.batch_size 0.0 +371 21 model.embedding_dim 1.0 +371 21 model.c_min 0.09430521766171752 +371 21 model.c_max 3.9230053904139965 +371 21 loss.margin 2.218434361874631 +371 21 negative_sampler.num_negs_per_pos 15.0 +371 21 training.batch_size 0.0 +371 22 model.embedding_dim 2.0 +371 22 model.c_min 0.06125680304849629 +371 22 model.c_max 7.796963126743114 +371 22 loss.margin 4.2032008735956845 +371 22 negative_sampler.num_negs_per_pos 9.0 +371 22 training.batch_size 2.0 +371 23 model.embedding_dim 2.0 +371 23 model.c_min 0.08174331606476473 +371 23 model.c_max 8.540154359756324 +371 23 loss.margin 9.956346232605057 +371 23 negative_sampler.num_negs_per_pos 1.0 +371 23 training.batch_size 1.0 +371 24 model.embedding_dim 0.0 +371 24 model.c_min 0.029409405671951175 +371 24 model.c_max 2.6665415512897424 +371 24 loss.margin 6.163556637196979 +371 24 negative_sampler.num_negs_per_pos 8.0 +371 24 training.batch_size 0.0 +371 25 model.embedding_dim 2.0 +371 25 model.c_min 0.01392312644277322 +371 25 model.c_max 3.239837407298188 +371 25 loss.margin 6.910552731269815 +371 25 negative_sampler.num_negs_per_pos 1.0 +371 25 training.batch_size 2.0 +371 26 model.embedding_dim 0.0 +371 26 model.c_min 0.01738400987737337 +371 26 model.c_max 2.1187705272679973 +371 26 loss.margin 4.963041497581492 +371 26 negative_sampler.num_negs_per_pos 14.0 +371 26 training.batch_size 1.0 +371 27 model.embedding_dim 2.0 +371 27 model.c_min 0.018679986664979602 +371 27 model.c_max 9.112775705569362 +371 27 loss.margin 2.062510834226963 +371 27 negative_sampler.num_negs_per_pos 7.0 +371 27 training.batch_size 2.0 +371 28 model.embedding_dim 0.0 +371 28 model.c_min 0.011243545559253276 +371 28 model.c_max 7.668884735763923 +371 28 loss.margin 6.522865864539498 +371 28 negative_sampler.num_negs_per_pos 73.0 +371 28 training.batch_size 0.0 +371 29 model.embedding_dim 0.0 +371 29 model.c_min 0.010624659550711573 +371 29 model.c_max 2.331529507361923 +371 29 loss.margin 1.4992838145466492 +371 29 negative_sampler.num_negs_per_pos 32.0 +371 29 training.batch_size 1.0 +371 30 model.embedding_dim 0.0 +371 30 model.c_min 0.010132918727279519 +371 30 model.c_max 1.0721069394144709 +371 30 loss.margin 2.9063615392852395 +371 30 negative_sampler.num_negs_per_pos 71.0 +371 30 training.batch_size 0.0 +371 31 model.embedding_dim 0.0 +371 31 model.c_min 0.03821687184673935 +371 31 model.c_max 3.873814077727053 +371 31 loss.margin 5.038987589876775 +371 31 negative_sampler.num_negs_per_pos 35.0 +371 31 training.batch_size 2.0 +371 32 model.embedding_dim 1.0 +371 32 model.c_min 0.01831615733942022 +371 32 model.c_max 8.496481144507406 +371 32 loss.margin 9.975899098606517 +371 32 negative_sampler.num_negs_per_pos 20.0 +371 32 training.batch_size 0.0 +371 33 model.embedding_dim 2.0 +371 33 model.c_min 0.014985411055994546 +371 33 model.c_max 4.043584974480739 +371 33 loss.margin 6.650610775890078 +371 33 negative_sampler.num_negs_per_pos 56.0 +371 33 training.batch_size 0.0 +371 34 model.embedding_dim 1.0 +371 34 model.c_min 0.012733113698082017 +371 34 model.c_max 2.7092867738599216 +371 34 loss.margin 4.376579254135355 +371 34 negative_sampler.num_negs_per_pos 98.0 +371 34 training.batch_size 0.0 +371 35 model.embedding_dim 0.0 +371 35 model.c_min 0.0314133082489672 +371 35 model.c_max 3.0562621191810164 +371 35 loss.margin 6.067658579404816 +371 35 negative_sampler.num_negs_per_pos 38.0 +371 35 training.batch_size 0.0 +371 36 model.embedding_dim 0.0 +371 36 model.c_min 0.012644428340463355 +371 36 model.c_max 9.600984685640956 +371 36 loss.margin 5.9747896485087155 +371 36 negative_sampler.num_negs_per_pos 52.0 +371 36 training.batch_size 0.0 +371 37 model.embedding_dim 2.0 +371 37 model.c_min 0.01403269307686941 +371 37 model.c_max 6.858310374858269 +371 37 loss.margin 8.41912035040585 +371 37 negative_sampler.num_negs_per_pos 94.0 +371 37 training.batch_size 0.0 +371 38 model.embedding_dim 2.0 +371 38 model.c_min 0.013778890664026794 +371 38 model.c_max 5.782421677765591 +371 38 loss.margin 7.860186588985087 +371 38 negative_sampler.num_negs_per_pos 85.0 +371 38 training.batch_size 2.0 +371 39 model.embedding_dim 0.0 +371 39 model.c_min 0.03126624384478118 +371 39 model.c_max 8.0991891802176 +371 39 loss.margin 9.174609564378944 +371 39 negative_sampler.num_negs_per_pos 31.0 +371 39 training.batch_size 2.0 +371 40 model.embedding_dim 2.0 +371 40 model.c_min 0.05832310302084786 +371 40 model.c_max 8.857141774305088 +371 40 loss.margin 2.0149421538169943 +371 40 negative_sampler.num_negs_per_pos 95.0 +371 40 training.batch_size 0.0 +371 41 model.embedding_dim 1.0 +371 41 model.c_min 0.05254656882440157 +371 41 model.c_max 7.6043350434100185 +371 41 loss.margin 5.292787183302361 +371 41 negative_sampler.num_negs_per_pos 28.0 +371 41 training.batch_size 1.0 +371 42 model.embedding_dim 2.0 +371 42 model.c_min 0.025273135504545706 +371 42 model.c_max 3.1208989111236045 +371 42 loss.margin 5.527137176205888 +371 42 negative_sampler.num_negs_per_pos 8.0 +371 42 training.batch_size 1.0 +371 43 model.embedding_dim 0.0 +371 43 model.c_min 0.07453106739198821 +371 43 model.c_max 5.7994114837813395 +371 43 loss.margin 2.029595752288879 +371 43 negative_sampler.num_negs_per_pos 50.0 +371 43 training.batch_size 2.0 +371 44 model.embedding_dim 1.0 +371 44 model.c_min 0.0690580922115193 +371 44 model.c_max 6.733319283751834 +371 44 loss.margin 9.282104506147716 +371 44 negative_sampler.num_negs_per_pos 75.0 +371 44 training.batch_size 0.0 +371 45 model.embedding_dim 1.0 +371 45 model.c_min 0.021938141163431794 +371 45 model.c_max 3.116939066427764 +371 45 loss.margin 1.9737713756917188 +371 45 negative_sampler.num_negs_per_pos 46.0 +371 45 training.batch_size 2.0 +371 46 model.embedding_dim 2.0 +371 46 model.c_min 0.015415780557729694 +371 46 model.c_max 4.906976514090377 +371 46 loss.margin 8.419343441092357 +371 46 negative_sampler.num_negs_per_pos 65.0 +371 46 training.batch_size 0.0 +371 47 model.embedding_dim 2.0 +371 47 model.c_min 0.027734829211572484 +371 47 model.c_max 9.475510943892287 +371 47 loss.margin 5.57298471403435 +371 47 negative_sampler.num_negs_per_pos 0.0 +371 47 training.batch_size 1.0 +371 48 model.embedding_dim 1.0 +371 48 model.c_min 0.02804561198453298 +371 48 model.c_max 4.232427547790246 +371 48 loss.margin 8.318624774932786 +371 48 negative_sampler.num_negs_per_pos 62.0 +371 48 training.batch_size 1.0 +371 49 model.embedding_dim 2.0 +371 49 model.c_min 0.07709043133358232 +371 49 model.c_max 2.9548141399317123 +371 49 loss.margin 6.381822153326215 +371 49 negative_sampler.num_negs_per_pos 36.0 +371 49 training.batch_size 2.0 +371 50 model.embedding_dim 2.0 +371 50 model.c_min 0.05129560714718404 +371 50 model.c_max 9.85637791921108 +371 50 loss.margin 8.723373710765424 +371 50 negative_sampler.num_negs_per_pos 3.0 +371 50 training.batch_size 1.0 +371 51 model.embedding_dim 0.0 +371 51 model.c_min 0.02048756350859157 +371 51 model.c_max 9.154445514558512 +371 51 loss.margin 2.3045737912233344 +371 51 negative_sampler.num_negs_per_pos 5.0 +371 51 training.batch_size 0.0 +371 52 model.embedding_dim 1.0 +371 52 model.c_min 0.033690059455527534 +371 52 model.c_max 3.623948258988248 +371 52 loss.margin 1.2079375797849408 +371 52 negative_sampler.num_negs_per_pos 74.0 +371 52 training.batch_size 0.0 +371 53 model.embedding_dim 0.0 +371 53 model.c_min 0.034878342432985376 +371 53 model.c_max 1.5650633066864275 +371 53 loss.margin 2.721962797536826 +371 53 negative_sampler.num_negs_per_pos 81.0 +371 53 training.batch_size 0.0 +371 54 model.embedding_dim 0.0 +371 54 model.c_min 0.010916989712636084 +371 54 model.c_max 6.7223659074667 +371 54 loss.margin 3.7355678791021996 +371 54 negative_sampler.num_negs_per_pos 48.0 +371 54 training.batch_size 1.0 +371 55 model.embedding_dim 0.0 +371 55 model.c_min 0.01484696089718461 +371 55 model.c_max 5.931903829124793 +371 55 loss.margin 4.12095714319817 +371 55 negative_sampler.num_negs_per_pos 11.0 +371 55 training.batch_size 2.0 +371 56 model.embedding_dim 1.0 +371 56 model.c_min 0.015261549615797024 +371 56 model.c_max 4.451846953356393 +371 56 loss.margin 9.718120031289363 +371 56 negative_sampler.num_negs_per_pos 19.0 +371 56 training.batch_size 2.0 +371 57 model.embedding_dim 2.0 +371 57 model.c_min 0.0668185637730761 +371 57 model.c_max 9.93629610530724 +371 57 loss.margin 9.502154917233405 +371 57 negative_sampler.num_negs_per_pos 11.0 +371 57 training.batch_size 2.0 +371 58 model.embedding_dim 1.0 +371 58 model.c_min 0.09501228046064253 +371 58 model.c_max 9.292470029864841 +371 58 loss.margin 2.0578157879206884 +371 58 negative_sampler.num_negs_per_pos 67.0 +371 58 training.batch_size 2.0 +371 59 model.embedding_dim 0.0 +371 59 model.c_min 0.012419729844305215 +371 59 model.c_max 7.581357500985632 +371 59 loss.margin 0.7467308128321968 +371 59 negative_sampler.num_negs_per_pos 46.0 +371 59 training.batch_size 2.0 +371 60 model.embedding_dim 1.0 +371 60 model.c_min 0.08899428595787436 +371 60 model.c_max 6.989322503704633 +371 60 loss.margin 2.455204510653172 +371 60 negative_sampler.num_negs_per_pos 72.0 +371 60 training.batch_size 2.0 +371 61 model.embedding_dim 1.0 +371 61 model.c_min 0.028333609285476447 +371 61 model.c_max 7.382394287522754 +371 61 loss.margin 1.9892988486232674 +371 61 negative_sampler.num_negs_per_pos 17.0 +371 61 training.batch_size 0.0 +371 62 model.embedding_dim 0.0 +371 62 model.c_min 0.031227884972741062 +371 62 model.c_max 4.635762732561894 +371 62 loss.margin 0.7266087425181599 +371 62 negative_sampler.num_negs_per_pos 56.0 +371 62 training.batch_size 1.0 +371 63 model.embedding_dim 1.0 +371 63 model.c_min 0.038367745236193636 +371 63 model.c_max 7.435294055524928 +371 63 loss.margin 7.316568875025728 +371 63 negative_sampler.num_negs_per_pos 13.0 +371 63 training.batch_size 2.0 +371 64 model.embedding_dim 2.0 +371 64 model.c_min 0.045358845038970634 +371 64 model.c_max 8.316140120278755 +371 64 loss.margin 3.120270284023311 +371 64 negative_sampler.num_negs_per_pos 14.0 +371 64 training.batch_size 1.0 +371 65 model.embedding_dim 2.0 +371 65 model.c_min 0.02217869014671555 +371 65 model.c_max 5.191554236975081 +371 65 loss.margin 2.2350819780899065 +371 65 negative_sampler.num_negs_per_pos 9.0 +371 65 training.batch_size 1.0 +371 66 model.embedding_dim 0.0 +371 66 model.c_min 0.0709083809434905 +371 66 model.c_max 7.690068288308518 +371 66 loss.margin 7.2299922275833675 +371 66 negative_sampler.num_negs_per_pos 81.0 +371 66 training.batch_size 0.0 +371 67 model.embedding_dim 2.0 +371 67 model.c_min 0.010341142578938133 +371 67 model.c_max 1.9303999494055155 +371 67 loss.margin 7.877069617545345 +371 67 negative_sampler.num_negs_per_pos 86.0 +371 67 training.batch_size 0.0 +371 68 model.embedding_dim 2.0 +371 68 model.c_min 0.012968857549936577 +371 68 model.c_max 4.1426886523174975 +371 68 loss.margin 4.907663755198843 +371 68 negative_sampler.num_negs_per_pos 63.0 +371 68 training.batch_size 0.0 +371 69 model.embedding_dim 2.0 +371 69 model.c_min 0.022611008698001304 +371 69 model.c_max 5.482335895827288 +371 69 loss.margin 7.7555024137659005 +371 69 negative_sampler.num_negs_per_pos 59.0 +371 69 training.batch_size 2.0 +371 70 model.embedding_dim 1.0 +371 70 model.c_min 0.0831666673401278 +371 70 model.c_max 7.14461835546147 +371 70 loss.margin 6.214913855470795 +371 70 negative_sampler.num_negs_per_pos 10.0 +371 70 training.batch_size 1.0 +371 71 model.embedding_dim 2.0 +371 71 model.c_min 0.012208627442654458 +371 71 model.c_max 2.794852069468884 +371 71 loss.margin 7.499073959543757 +371 71 negative_sampler.num_negs_per_pos 71.0 +371 71 training.batch_size 2.0 +371 72 model.embedding_dim 2.0 +371 72 model.c_min 0.02889079877157315 +371 72 model.c_max 2.7126180152039954 +371 72 loss.margin 2.4662462576450412 +371 72 negative_sampler.num_negs_per_pos 7.0 +371 72 training.batch_size 0.0 +371 73 model.embedding_dim 0.0 +371 73 model.c_min 0.026493723530355773 +371 73 model.c_max 5.766409503138977 +371 73 loss.margin 7.7755853193375195 +371 73 negative_sampler.num_negs_per_pos 10.0 +371 73 training.batch_size 2.0 +371 74 model.embedding_dim 0.0 +371 74 model.c_min 0.012029926544963274 +371 74 model.c_max 7.457798802874413 +371 74 loss.margin 9.042370133612543 +371 74 negative_sampler.num_negs_per_pos 93.0 +371 74 training.batch_size 0.0 +371 75 model.embedding_dim 1.0 +371 75 model.c_min 0.05420625952756552 +371 75 model.c_max 9.163825079526287 +371 75 loss.margin 9.013852026793245 +371 75 negative_sampler.num_negs_per_pos 17.0 +371 75 training.batch_size 2.0 +371 76 model.embedding_dim 1.0 +371 76 model.c_min 0.010035421600699286 +371 76 model.c_max 1.371890951896442 +371 76 loss.margin 9.641615408454413 +371 76 negative_sampler.num_negs_per_pos 20.0 +371 76 training.batch_size 2.0 +371 77 model.embedding_dim 0.0 +371 77 model.c_min 0.06139341421104253 +371 77 model.c_max 5.087440935382784 +371 77 loss.margin 0.6469326433194179 +371 77 negative_sampler.num_negs_per_pos 8.0 +371 77 training.batch_size 1.0 +371 78 model.embedding_dim 1.0 +371 78 model.c_min 0.032972148625101574 +371 78 model.c_max 4.18080809052767 +371 78 loss.margin 5.163991204791591 +371 78 negative_sampler.num_negs_per_pos 28.0 +371 78 training.batch_size 2.0 +371 79 model.embedding_dim 0.0 +371 79 model.c_min 0.040799445528168804 +371 79 model.c_max 2.109605267296493 +371 79 loss.margin 9.830374362248888 +371 79 negative_sampler.num_negs_per_pos 87.0 +371 79 training.batch_size 0.0 +371 80 model.embedding_dim 2.0 +371 80 model.c_min 0.0136787221657945 +371 80 model.c_max 3.090775699393446 +371 80 loss.margin 9.807365513998592 +371 80 negative_sampler.num_negs_per_pos 59.0 +371 80 training.batch_size 0.0 +371 81 model.embedding_dim 0.0 +371 81 model.c_min 0.016086176548422943 +371 81 model.c_max 9.555293711921049 +371 81 loss.margin 4.273814970303835 +371 81 negative_sampler.num_negs_per_pos 64.0 +371 81 training.batch_size 1.0 +371 82 model.embedding_dim 0.0 +371 82 model.c_min 0.08384397596891635 +371 82 model.c_max 7.476758109975783 +371 82 loss.margin 7.425983792625094 +371 82 negative_sampler.num_negs_per_pos 67.0 +371 82 training.batch_size 2.0 +371 83 model.embedding_dim 2.0 +371 83 model.c_min 0.06701283705217799 +371 83 model.c_max 4.666416955980138 +371 83 loss.margin 3.013516938171848 +371 83 negative_sampler.num_negs_per_pos 7.0 +371 83 training.batch_size 0.0 +371 84 model.embedding_dim 1.0 +371 84 model.c_min 0.01902466365448352 +371 84 model.c_max 6.264064814327719 +371 84 loss.margin 2.653685989708812 +371 84 negative_sampler.num_negs_per_pos 73.0 +371 84 training.batch_size 2.0 +371 85 model.embedding_dim 1.0 +371 85 model.c_min 0.05650088866378448 +371 85 model.c_max 6.220348263243391 +371 85 loss.margin 2.6076178512530914 +371 85 negative_sampler.num_negs_per_pos 6.0 +371 85 training.batch_size 2.0 +371 86 model.embedding_dim 2.0 +371 86 model.c_min 0.010425132209378896 +371 86 model.c_max 3.3959399069741787 +371 86 loss.margin 9.36305288973923 +371 86 negative_sampler.num_negs_per_pos 13.0 +371 86 training.batch_size 1.0 +371 87 model.embedding_dim 0.0 +371 87 model.c_min 0.03670721522182645 +371 87 model.c_max 3.56477894432277 +371 87 loss.margin 6.657383505761199 +371 87 negative_sampler.num_negs_per_pos 77.0 +371 87 training.batch_size 1.0 +371 88 model.embedding_dim 2.0 +371 88 model.c_min 0.011017950137569735 +371 88 model.c_max 2.8231664096822304 +371 88 loss.margin 6.769761688872537 +371 88 negative_sampler.num_negs_per_pos 54.0 +371 88 training.batch_size 1.0 +371 89 model.embedding_dim 2.0 +371 89 model.c_min 0.03870777126420646 +371 89 model.c_max 7.187478123518177 +371 89 loss.margin 5.341518266359803 +371 89 negative_sampler.num_negs_per_pos 13.0 +371 89 training.batch_size 1.0 +371 90 model.embedding_dim 0.0 +371 90 model.c_min 0.07070788128119275 +371 90 model.c_max 7.859459207394714 +371 90 loss.margin 9.878743188282282 +371 90 negative_sampler.num_negs_per_pos 67.0 +371 90 training.batch_size 2.0 +371 91 model.embedding_dim 2.0 +371 91 model.c_min 0.05036735813956917 +371 91 model.c_max 9.011598205087825 +371 91 loss.margin 7.275574721658979 +371 91 negative_sampler.num_negs_per_pos 34.0 +371 91 training.batch_size 0.0 +371 92 model.embedding_dim 0.0 +371 92 model.c_min 0.012811214176092961 +371 92 model.c_max 7.150025475700904 +371 92 loss.margin 7.259568327172309 +371 92 negative_sampler.num_negs_per_pos 82.0 +371 92 training.batch_size 1.0 +371 93 model.embedding_dim 2.0 +371 93 model.c_min 0.02872841540485459 +371 93 model.c_max 7.987197849453635 +371 93 loss.margin 9.871447351749294 +371 93 negative_sampler.num_negs_per_pos 99.0 +371 93 training.batch_size 1.0 +371 94 model.embedding_dim 0.0 +371 94 model.c_min 0.054058774335906425 +371 94 model.c_max 9.829985284474136 +371 94 loss.margin 2.4579746975122747 +371 94 negative_sampler.num_negs_per_pos 92.0 +371 94 training.batch_size 2.0 +371 95 model.embedding_dim 1.0 +371 95 model.c_min 0.04310129901236553 +371 95 model.c_max 2.875517409357775 +371 95 loss.margin 3.255876778742675 +371 95 negative_sampler.num_negs_per_pos 18.0 +371 95 training.batch_size 2.0 +371 96 model.embedding_dim 1.0 +371 96 model.c_min 0.057103986337270395 +371 96 model.c_max 2.6336959864090588 +371 96 loss.margin 7.606371169318167 +371 96 negative_sampler.num_negs_per_pos 63.0 +371 96 training.batch_size 1.0 +371 97 model.embedding_dim 0.0 +371 97 model.c_min 0.029886732533460643 +371 97 model.c_max 8.825506364691261 +371 97 loss.margin 7.9917341038262775 +371 97 negative_sampler.num_negs_per_pos 56.0 +371 97 training.batch_size 2.0 +371 98 model.embedding_dim 1.0 +371 98 model.c_min 0.09846910685032556 +371 98 model.c_max 2.017885685195651 +371 98 loss.margin 3.6897277079562305 +371 98 negative_sampler.num_negs_per_pos 6.0 +371 98 training.batch_size 2.0 +371 99 model.embedding_dim 2.0 +371 99 model.c_min 0.07678267705705875 +371 99 model.c_max 3.1637959875600106 +371 99 loss.margin 8.5960892209849 +371 99 negative_sampler.num_negs_per_pos 82.0 +371 99 training.batch_size 2.0 +371 100 model.embedding_dim 0.0 +371 100 model.c_min 0.09176244377570522 +371 100 model.c_max 2.2112304602127923 +371 100 loss.margin 1.5972590793685213 +371 100 negative_sampler.num_negs_per_pos 95.0 +371 100 training.batch_size 1.0 +371 1 dataset """kinships""" +371 1 model """kg2e""" +371 1 loss """marginranking""" +371 1 regularizer """no""" +371 1 optimizer """adadelta""" +371 1 training_loop """owa""" +371 1 negative_sampler """basic""" +371 1 evaluator """rankbased""" +371 2 dataset """kinships""" +371 2 model """kg2e""" +371 2 loss """marginranking""" +371 2 regularizer """no""" +371 2 optimizer """adadelta""" +371 2 training_loop """owa""" +371 2 negative_sampler """basic""" +371 2 evaluator """rankbased""" +371 3 dataset """kinships""" +371 3 model """kg2e""" +371 3 loss """marginranking""" +371 3 regularizer """no""" +371 3 optimizer """adadelta""" +371 3 training_loop """owa""" +371 3 negative_sampler """basic""" +371 3 evaluator """rankbased""" +371 4 dataset """kinships""" +371 4 model """kg2e""" +371 4 loss """marginranking""" +371 4 regularizer """no""" +371 4 optimizer """adadelta""" +371 4 training_loop """owa""" +371 4 negative_sampler """basic""" +371 4 evaluator """rankbased""" +371 5 dataset """kinships""" +371 5 model """kg2e""" +371 5 loss """marginranking""" +371 5 regularizer """no""" +371 5 optimizer """adadelta""" +371 5 training_loop """owa""" +371 5 negative_sampler """basic""" +371 5 evaluator """rankbased""" +371 6 dataset """kinships""" +371 6 model """kg2e""" +371 6 loss """marginranking""" +371 6 regularizer """no""" +371 6 optimizer """adadelta""" +371 6 training_loop """owa""" +371 6 negative_sampler """basic""" +371 6 evaluator """rankbased""" +371 7 dataset """kinships""" +371 7 model """kg2e""" +371 7 loss """marginranking""" +371 7 regularizer """no""" +371 7 optimizer """adadelta""" +371 7 training_loop """owa""" +371 7 negative_sampler """basic""" +371 7 evaluator """rankbased""" +371 8 dataset """kinships""" +371 8 model """kg2e""" +371 8 loss """marginranking""" +371 8 regularizer """no""" +371 8 optimizer """adadelta""" +371 8 training_loop """owa""" +371 8 negative_sampler """basic""" +371 8 evaluator """rankbased""" +371 9 dataset """kinships""" +371 9 model """kg2e""" +371 9 loss """marginranking""" +371 9 regularizer """no""" +371 9 optimizer """adadelta""" +371 9 training_loop """owa""" +371 9 negative_sampler """basic""" +371 9 evaluator """rankbased""" +371 10 dataset """kinships""" +371 10 model """kg2e""" +371 10 loss """marginranking""" +371 10 regularizer """no""" +371 10 optimizer """adadelta""" +371 10 training_loop """owa""" +371 10 negative_sampler """basic""" +371 10 evaluator """rankbased""" +371 11 dataset """kinships""" +371 11 model """kg2e""" +371 11 loss """marginranking""" +371 11 regularizer """no""" +371 11 optimizer """adadelta""" +371 11 training_loop """owa""" +371 11 negative_sampler """basic""" +371 11 evaluator """rankbased""" +371 12 dataset """kinships""" +371 12 model """kg2e""" +371 12 loss """marginranking""" +371 12 regularizer """no""" +371 12 optimizer """adadelta""" +371 12 training_loop """owa""" +371 12 negative_sampler """basic""" +371 12 evaluator """rankbased""" +371 13 dataset """kinships""" +371 13 model """kg2e""" +371 13 loss """marginranking""" +371 13 regularizer """no""" +371 13 optimizer """adadelta""" +371 13 training_loop """owa""" +371 13 negative_sampler """basic""" +371 13 evaluator """rankbased""" +371 14 dataset """kinships""" +371 14 model """kg2e""" +371 14 loss """marginranking""" +371 14 regularizer """no""" +371 14 optimizer """adadelta""" +371 14 training_loop """owa""" +371 14 negative_sampler """basic""" +371 14 evaluator """rankbased""" +371 15 dataset """kinships""" +371 15 model """kg2e""" +371 15 loss """marginranking""" +371 15 regularizer """no""" +371 15 optimizer """adadelta""" +371 15 training_loop """owa""" +371 15 negative_sampler """basic""" +371 15 evaluator """rankbased""" +371 16 dataset """kinships""" +371 16 model """kg2e""" +371 16 loss """marginranking""" +371 16 regularizer """no""" +371 16 optimizer """adadelta""" +371 16 training_loop """owa""" +371 16 negative_sampler """basic""" +371 16 evaluator """rankbased""" +371 17 dataset """kinships""" +371 17 model """kg2e""" +371 17 loss """marginranking""" +371 17 regularizer """no""" +371 17 optimizer """adadelta""" +371 17 training_loop """owa""" +371 17 negative_sampler """basic""" +371 17 evaluator """rankbased""" +371 18 dataset """kinships""" +371 18 model """kg2e""" +371 18 loss """marginranking""" +371 18 regularizer """no""" +371 18 optimizer """adadelta""" +371 18 training_loop """owa""" +371 18 negative_sampler """basic""" +371 18 evaluator """rankbased""" +371 19 dataset """kinships""" +371 19 model """kg2e""" +371 19 loss """marginranking""" +371 19 regularizer """no""" +371 19 optimizer """adadelta""" +371 19 training_loop """owa""" +371 19 negative_sampler """basic""" +371 19 evaluator """rankbased""" +371 20 dataset """kinships""" +371 20 model """kg2e""" +371 20 loss """marginranking""" +371 20 regularizer """no""" +371 20 optimizer """adadelta""" +371 20 training_loop """owa""" +371 20 negative_sampler """basic""" +371 20 evaluator """rankbased""" +371 21 dataset """kinships""" +371 21 model """kg2e""" +371 21 loss """marginranking""" +371 21 regularizer """no""" +371 21 optimizer """adadelta""" +371 21 training_loop """owa""" +371 21 negative_sampler """basic""" +371 21 evaluator """rankbased""" +371 22 dataset """kinships""" +371 22 model """kg2e""" +371 22 loss """marginranking""" +371 22 regularizer """no""" +371 22 optimizer """adadelta""" +371 22 training_loop """owa""" +371 22 negative_sampler """basic""" +371 22 evaluator """rankbased""" +371 23 dataset """kinships""" +371 23 model """kg2e""" +371 23 loss """marginranking""" +371 23 regularizer """no""" +371 23 optimizer """adadelta""" +371 23 training_loop """owa""" +371 23 negative_sampler """basic""" +371 23 evaluator """rankbased""" +371 24 dataset """kinships""" +371 24 model """kg2e""" +371 24 loss """marginranking""" +371 24 regularizer """no""" +371 24 optimizer """adadelta""" +371 24 training_loop """owa""" +371 24 negative_sampler """basic""" +371 24 evaluator """rankbased""" +371 25 dataset """kinships""" +371 25 model """kg2e""" +371 25 loss """marginranking""" +371 25 regularizer """no""" +371 25 optimizer """adadelta""" +371 25 training_loop """owa""" +371 25 negative_sampler """basic""" +371 25 evaluator """rankbased""" +371 26 dataset """kinships""" +371 26 model """kg2e""" +371 26 loss """marginranking""" +371 26 regularizer """no""" +371 26 optimizer """adadelta""" +371 26 training_loop """owa""" +371 26 negative_sampler """basic""" +371 26 evaluator """rankbased""" +371 27 dataset """kinships""" +371 27 model """kg2e""" +371 27 loss """marginranking""" +371 27 regularizer """no""" +371 27 optimizer """adadelta""" +371 27 training_loop """owa""" +371 27 negative_sampler """basic""" +371 27 evaluator """rankbased""" +371 28 dataset """kinships""" +371 28 model """kg2e""" +371 28 loss """marginranking""" +371 28 regularizer """no""" +371 28 optimizer """adadelta""" +371 28 training_loop """owa""" +371 28 negative_sampler """basic""" +371 28 evaluator """rankbased""" +371 29 dataset """kinships""" +371 29 model """kg2e""" +371 29 loss """marginranking""" +371 29 regularizer """no""" +371 29 optimizer """adadelta""" +371 29 training_loop """owa""" +371 29 negative_sampler """basic""" +371 29 evaluator """rankbased""" +371 30 dataset """kinships""" +371 30 model """kg2e""" +371 30 loss """marginranking""" +371 30 regularizer """no""" +371 30 optimizer """adadelta""" +371 30 training_loop """owa""" +371 30 negative_sampler """basic""" +371 30 evaluator """rankbased""" +371 31 dataset """kinships""" +371 31 model """kg2e""" +371 31 loss """marginranking""" +371 31 regularizer """no""" +371 31 optimizer """adadelta""" +371 31 training_loop """owa""" +371 31 negative_sampler """basic""" +371 31 evaluator """rankbased""" +371 32 dataset """kinships""" +371 32 model """kg2e""" +371 32 loss """marginranking""" +371 32 regularizer """no""" +371 32 optimizer """adadelta""" +371 32 training_loop """owa""" +371 32 negative_sampler """basic""" +371 32 evaluator """rankbased""" +371 33 dataset """kinships""" +371 33 model """kg2e""" +371 33 loss """marginranking""" +371 33 regularizer """no""" +371 33 optimizer """adadelta""" +371 33 training_loop """owa""" +371 33 negative_sampler """basic""" +371 33 evaluator """rankbased""" +371 34 dataset """kinships""" +371 34 model """kg2e""" +371 34 loss """marginranking""" +371 34 regularizer """no""" +371 34 optimizer """adadelta""" +371 34 training_loop """owa""" +371 34 negative_sampler """basic""" +371 34 evaluator """rankbased""" +371 35 dataset """kinships""" +371 35 model """kg2e""" +371 35 loss """marginranking""" +371 35 regularizer """no""" +371 35 optimizer """adadelta""" +371 35 training_loop """owa""" +371 35 negative_sampler """basic""" +371 35 evaluator """rankbased""" +371 36 dataset """kinships""" +371 36 model """kg2e""" +371 36 loss """marginranking""" +371 36 regularizer """no""" +371 36 optimizer """adadelta""" +371 36 training_loop """owa""" +371 36 negative_sampler """basic""" +371 36 evaluator """rankbased""" +371 37 dataset """kinships""" +371 37 model """kg2e""" +371 37 loss """marginranking""" +371 37 regularizer """no""" +371 37 optimizer """adadelta""" +371 37 training_loop """owa""" +371 37 negative_sampler """basic""" +371 37 evaluator """rankbased""" +371 38 dataset """kinships""" +371 38 model """kg2e""" +371 38 loss """marginranking""" +371 38 regularizer """no""" +371 38 optimizer """adadelta""" +371 38 training_loop """owa""" +371 38 negative_sampler """basic""" +371 38 evaluator """rankbased""" +371 39 dataset """kinships""" +371 39 model """kg2e""" +371 39 loss """marginranking""" +371 39 regularizer """no""" +371 39 optimizer """adadelta""" +371 39 training_loop """owa""" +371 39 negative_sampler """basic""" +371 39 evaluator """rankbased""" +371 40 dataset """kinships""" +371 40 model """kg2e""" +371 40 loss """marginranking""" +371 40 regularizer """no""" +371 40 optimizer """adadelta""" +371 40 training_loop """owa""" +371 40 negative_sampler """basic""" +371 40 evaluator """rankbased""" +371 41 dataset """kinships""" +371 41 model """kg2e""" +371 41 loss """marginranking""" +371 41 regularizer """no""" +371 41 optimizer """adadelta""" +371 41 training_loop """owa""" +371 41 negative_sampler """basic""" +371 41 evaluator """rankbased""" +371 42 dataset """kinships""" +371 42 model """kg2e""" +371 42 loss """marginranking""" +371 42 regularizer """no""" +371 42 optimizer """adadelta""" +371 42 training_loop """owa""" +371 42 negative_sampler """basic""" +371 42 evaluator """rankbased""" +371 43 dataset """kinships""" +371 43 model """kg2e""" +371 43 loss """marginranking""" +371 43 regularizer """no""" +371 43 optimizer """adadelta""" +371 43 training_loop """owa""" +371 43 negative_sampler """basic""" +371 43 evaluator """rankbased""" +371 44 dataset """kinships""" +371 44 model """kg2e""" +371 44 loss """marginranking""" +371 44 regularizer """no""" +371 44 optimizer """adadelta""" +371 44 training_loop """owa""" +371 44 negative_sampler """basic""" +371 44 evaluator """rankbased""" +371 45 dataset """kinships""" +371 45 model """kg2e""" +371 45 loss """marginranking""" +371 45 regularizer """no""" +371 45 optimizer """adadelta""" +371 45 training_loop """owa""" +371 45 negative_sampler """basic""" +371 45 evaluator """rankbased""" +371 46 dataset """kinships""" +371 46 model """kg2e""" +371 46 loss """marginranking""" +371 46 regularizer """no""" +371 46 optimizer """adadelta""" +371 46 training_loop """owa""" +371 46 negative_sampler """basic""" +371 46 evaluator """rankbased""" +371 47 dataset """kinships""" +371 47 model """kg2e""" +371 47 loss """marginranking""" +371 47 regularizer """no""" +371 47 optimizer """adadelta""" +371 47 training_loop """owa""" +371 47 negative_sampler """basic""" +371 47 evaluator """rankbased""" +371 48 dataset """kinships""" +371 48 model """kg2e""" +371 48 loss """marginranking""" +371 48 regularizer """no""" +371 48 optimizer """adadelta""" +371 48 training_loop """owa""" +371 48 negative_sampler """basic""" +371 48 evaluator """rankbased""" +371 49 dataset """kinships""" +371 49 model """kg2e""" +371 49 loss """marginranking""" +371 49 regularizer """no""" +371 49 optimizer """adadelta""" +371 49 training_loop """owa""" +371 49 negative_sampler """basic""" +371 49 evaluator """rankbased""" +371 50 dataset """kinships""" +371 50 model """kg2e""" +371 50 loss """marginranking""" +371 50 regularizer """no""" +371 50 optimizer """adadelta""" +371 50 training_loop """owa""" +371 50 negative_sampler """basic""" +371 50 evaluator """rankbased""" +371 51 dataset """kinships""" +371 51 model """kg2e""" +371 51 loss """marginranking""" +371 51 regularizer """no""" +371 51 optimizer """adadelta""" +371 51 training_loop """owa""" +371 51 negative_sampler """basic""" +371 51 evaluator """rankbased""" +371 52 dataset """kinships""" +371 52 model """kg2e""" +371 52 loss """marginranking""" +371 52 regularizer """no""" +371 52 optimizer """adadelta""" +371 52 training_loop """owa""" +371 52 negative_sampler """basic""" +371 52 evaluator """rankbased""" +371 53 dataset """kinships""" +371 53 model """kg2e""" +371 53 loss """marginranking""" +371 53 regularizer """no""" +371 53 optimizer """adadelta""" +371 53 training_loop """owa""" +371 53 negative_sampler """basic""" +371 53 evaluator """rankbased""" +371 54 dataset """kinships""" +371 54 model """kg2e""" +371 54 loss """marginranking""" +371 54 regularizer """no""" +371 54 optimizer """adadelta""" +371 54 training_loop """owa""" +371 54 negative_sampler """basic""" +371 54 evaluator """rankbased""" +371 55 dataset """kinships""" +371 55 model """kg2e""" +371 55 loss """marginranking""" +371 55 regularizer """no""" +371 55 optimizer """adadelta""" +371 55 training_loop """owa""" +371 55 negative_sampler """basic""" +371 55 evaluator """rankbased""" +371 56 dataset """kinships""" +371 56 model """kg2e""" +371 56 loss """marginranking""" +371 56 regularizer """no""" +371 56 optimizer """adadelta""" +371 56 training_loop """owa""" +371 56 negative_sampler """basic""" +371 56 evaluator """rankbased""" +371 57 dataset """kinships""" +371 57 model """kg2e""" +371 57 loss """marginranking""" +371 57 regularizer """no""" +371 57 optimizer """adadelta""" +371 57 training_loop """owa""" +371 57 negative_sampler """basic""" +371 57 evaluator """rankbased""" +371 58 dataset """kinships""" +371 58 model """kg2e""" +371 58 loss """marginranking""" +371 58 regularizer """no""" +371 58 optimizer """adadelta""" +371 58 training_loop """owa""" +371 58 negative_sampler """basic""" +371 58 evaluator """rankbased""" +371 59 dataset """kinships""" +371 59 model """kg2e""" +371 59 loss """marginranking""" +371 59 regularizer """no""" +371 59 optimizer """adadelta""" +371 59 training_loop """owa""" +371 59 negative_sampler """basic""" +371 59 evaluator """rankbased""" +371 60 dataset """kinships""" +371 60 model """kg2e""" +371 60 loss """marginranking""" +371 60 regularizer """no""" +371 60 optimizer """adadelta""" +371 60 training_loop """owa""" +371 60 negative_sampler """basic""" +371 60 evaluator """rankbased""" +371 61 dataset """kinships""" +371 61 model """kg2e""" +371 61 loss """marginranking""" +371 61 regularizer """no""" +371 61 optimizer """adadelta""" +371 61 training_loop """owa""" +371 61 negative_sampler """basic""" +371 61 evaluator """rankbased""" +371 62 dataset """kinships""" +371 62 model """kg2e""" +371 62 loss """marginranking""" +371 62 regularizer """no""" +371 62 optimizer """adadelta""" +371 62 training_loop """owa""" +371 62 negative_sampler """basic""" +371 62 evaluator """rankbased""" +371 63 dataset """kinships""" +371 63 model """kg2e""" +371 63 loss """marginranking""" +371 63 regularizer """no""" +371 63 optimizer """adadelta""" +371 63 training_loop """owa""" +371 63 negative_sampler """basic""" +371 63 evaluator """rankbased""" +371 64 dataset """kinships""" +371 64 model """kg2e""" +371 64 loss """marginranking""" +371 64 regularizer """no""" +371 64 optimizer """adadelta""" +371 64 training_loop """owa""" +371 64 negative_sampler """basic""" +371 64 evaluator """rankbased""" +371 65 dataset """kinships""" +371 65 model """kg2e""" +371 65 loss """marginranking""" +371 65 regularizer """no""" +371 65 optimizer """adadelta""" +371 65 training_loop """owa""" +371 65 negative_sampler """basic""" +371 65 evaluator """rankbased""" +371 66 dataset """kinships""" +371 66 model """kg2e""" +371 66 loss """marginranking""" +371 66 regularizer """no""" +371 66 optimizer """adadelta""" +371 66 training_loop """owa""" +371 66 negative_sampler """basic""" +371 66 evaluator """rankbased""" +371 67 dataset """kinships""" +371 67 model """kg2e""" +371 67 loss """marginranking""" +371 67 regularizer """no""" +371 67 optimizer """adadelta""" +371 67 training_loop """owa""" +371 67 negative_sampler """basic""" +371 67 evaluator """rankbased""" +371 68 dataset """kinships""" +371 68 model """kg2e""" +371 68 loss """marginranking""" +371 68 regularizer """no""" +371 68 optimizer """adadelta""" +371 68 training_loop """owa""" +371 68 negative_sampler """basic""" +371 68 evaluator """rankbased""" +371 69 dataset """kinships""" +371 69 model """kg2e""" +371 69 loss """marginranking""" +371 69 regularizer """no""" +371 69 optimizer """adadelta""" +371 69 training_loop """owa""" +371 69 negative_sampler """basic""" +371 69 evaluator """rankbased""" +371 70 dataset """kinships""" +371 70 model """kg2e""" +371 70 loss """marginranking""" +371 70 regularizer """no""" +371 70 optimizer """adadelta""" +371 70 training_loop """owa""" +371 70 negative_sampler """basic""" +371 70 evaluator """rankbased""" +371 71 dataset """kinships""" +371 71 model """kg2e""" +371 71 loss """marginranking""" +371 71 regularizer """no""" +371 71 optimizer """adadelta""" +371 71 training_loop """owa""" +371 71 negative_sampler """basic""" +371 71 evaluator """rankbased""" +371 72 dataset """kinships""" +371 72 model """kg2e""" +371 72 loss """marginranking""" +371 72 regularizer """no""" +371 72 optimizer """adadelta""" +371 72 training_loop """owa""" +371 72 negative_sampler """basic""" +371 72 evaluator """rankbased""" +371 73 dataset """kinships""" +371 73 model """kg2e""" +371 73 loss """marginranking""" +371 73 regularizer """no""" +371 73 optimizer """adadelta""" +371 73 training_loop """owa""" +371 73 negative_sampler """basic""" +371 73 evaluator """rankbased""" +371 74 dataset """kinships""" +371 74 model """kg2e""" +371 74 loss """marginranking""" +371 74 regularizer """no""" +371 74 optimizer """adadelta""" +371 74 training_loop """owa""" +371 74 negative_sampler """basic""" +371 74 evaluator """rankbased""" +371 75 dataset """kinships""" +371 75 model """kg2e""" +371 75 loss """marginranking""" +371 75 regularizer """no""" +371 75 optimizer """adadelta""" +371 75 training_loop """owa""" +371 75 negative_sampler """basic""" +371 75 evaluator """rankbased""" +371 76 dataset """kinships""" +371 76 model """kg2e""" +371 76 loss """marginranking""" +371 76 regularizer """no""" +371 76 optimizer """adadelta""" +371 76 training_loop """owa""" +371 76 negative_sampler """basic""" +371 76 evaluator """rankbased""" +371 77 dataset """kinships""" +371 77 model """kg2e""" +371 77 loss """marginranking""" +371 77 regularizer """no""" +371 77 optimizer """adadelta""" +371 77 training_loop """owa""" +371 77 negative_sampler """basic""" +371 77 evaluator """rankbased""" +371 78 dataset """kinships""" +371 78 model """kg2e""" +371 78 loss """marginranking""" +371 78 regularizer """no""" +371 78 optimizer """adadelta""" +371 78 training_loop """owa""" +371 78 negative_sampler """basic""" +371 78 evaluator """rankbased""" +371 79 dataset """kinships""" +371 79 model """kg2e""" +371 79 loss """marginranking""" +371 79 regularizer """no""" +371 79 optimizer """adadelta""" +371 79 training_loop """owa""" +371 79 negative_sampler """basic""" +371 79 evaluator """rankbased""" +371 80 dataset """kinships""" +371 80 model """kg2e""" +371 80 loss """marginranking""" +371 80 regularizer """no""" +371 80 optimizer """adadelta""" +371 80 training_loop """owa""" +371 80 negative_sampler """basic""" +371 80 evaluator """rankbased""" +371 81 dataset """kinships""" +371 81 model """kg2e""" +371 81 loss """marginranking""" +371 81 regularizer """no""" +371 81 optimizer """adadelta""" +371 81 training_loop """owa""" +371 81 negative_sampler """basic""" +371 81 evaluator """rankbased""" +371 82 dataset """kinships""" +371 82 model """kg2e""" +371 82 loss """marginranking""" +371 82 regularizer """no""" +371 82 optimizer """adadelta""" +371 82 training_loop """owa""" +371 82 negative_sampler """basic""" +371 82 evaluator """rankbased""" +371 83 dataset """kinships""" +371 83 model """kg2e""" +371 83 loss """marginranking""" +371 83 regularizer """no""" +371 83 optimizer """adadelta""" +371 83 training_loop """owa""" +371 83 negative_sampler """basic""" +371 83 evaluator """rankbased""" +371 84 dataset """kinships""" +371 84 model """kg2e""" +371 84 loss """marginranking""" +371 84 regularizer """no""" +371 84 optimizer """adadelta""" +371 84 training_loop """owa""" +371 84 negative_sampler """basic""" +371 84 evaluator """rankbased""" +371 85 dataset """kinships""" +371 85 model """kg2e""" +371 85 loss """marginranking""" +371 85 regularizer """no""" +371 85 optimizer """adadelta""" +371 85 training_loop """owa""" +371 85 negative_sampler """basic""" +371 85 evaluator """rankbased""" +371 86 dataset """kinships""" +371 86 model """kg2e""" +371 86 loss """marginranking""" +371 86 regularizer """no""" +371 86 optimizer """adadelta""" +371 86 training_loop """owa""" +371 86 negative_sampler """basic""" +371 86 evaluator """rankbased""" +371 87 dataset """kinships""" +371 87 model """kg2e""" +371 87 loss """marginranking""" +371 87 regularizer """no""" +371 87 optimizer """adadelta""" +371 87 training_loop """owa""" +371 87 negative_sampler """basic""" +371 87 evaluator """rankbased""" +371 88 dataset """kinships""" +371 88 model """kg2e""" +371 88 loss """marginranking""" +371 88 regularizer """no""" +371 88 optimizer """adadelta""" +371 88 training_loop """owa""" +371 88 negative_sampler """basic""" +371 88 evaluator """rankbased""" +371 89 dataset """kinships""" +371 89 model """kg2e""" +371 89 loss """marginranking""" +371 89 regularizer """no""" +371 89 optimizer """adadelta""" +371 89 training_loop """owa""" +371 89 negative_sampler """basic""" +371 89 evaluator """rankbased""" +371 90 dataset """kinships""" +371 90 model """kg2e""" +371 90 loss """marginranking""" +371 90 regularizer """no""" +371 90 optimizer """adadelta""" +371 90 training_loop """owa""" +371 90 negative_sampler """basic""" +371 90 evaluator """rankbased""" +371 91 dataset """kinships""" +371 91 model """kg2e""" +371 91 loss """marginranking""" +371 91 regularizer """no""" +371 91 optimizer """adadelta""" +371 91 training_loop """owa""" +371 91 negative_sampler """basic""" +371 91 evaluator """rankbased""" +371 92 dataset """kinships""" +371 92 model """kg2e""" +371 92 loss """marginranking""" +371 92 regularizer """no""" +371 92 optimizer """adadelta""" +371 92 training_loop """owa""" +371 92 negative_sampler """basic""" +371 92 evaluator """rankbased""" +371 93 dataset """kinships""" +371 93 model """kg2e""" +371 93 loss """marginranking""" +371 93 regularizer """no""" +371 93 optimizer """adadelta""" +371 93 training_loop """owa""" +371 93 negative_sampler """basic""" +371 93 evaluator """rankbased""" +371 94 dataset """kinships""" +371 94 model """kg2e""" +371 94 loss """marginranking""" +371 94 regularizer """no""" +371 94 optimizer """adadelta""" +371 94 training_loop """owa""" +371 94 negative_sampler """basic""" +371 94 evaluator """rankbased""" +371 95 dataset """kinships""" +371 95 model """kg2e""" +371 95 loss """marginranking""" +371 95 regularizer """no""" +371 95 optimizer """adadelta""" +371 95 training_loop """owa""" +371 95 negative_sampler """basic""" +371 95 evaluator """rankbased""" +371 96 dataset """kinships""" +371 96 model """kg2e""" +371 96 loss """marginranking""" +371 96 regularizer """no""" +371 96 optimizer """adadelta""" +371 96 training_loop """owa""" +371 96 negative_sampler """basic""" +371 96 evaluator """rankbased""" +371 97 dataset """kinships""" +371 97 model """kg2e""" +371 97 loss """marginranking""" +371 97 regularizer """no""" +371 97 optimizer """adadelta""" +371 97 training_loop """owa""" +371 97 negative_sampler """basic""" +371 97 evaluator """rankbased""" +371 98 dataset """kinships""" +371 98 model """kg2e""" +371 98 loss """marginranking""" +371 98 regularizer """no""" +371 98 optimizer """adadelta""" +371 98 training_loop """owa""" +371 98 negative_sampler """basic""" +371 98 evaluator """rankbased""" +371 99 dataset """kinships""" +371 99 model """kg2e""" +371 99 loss """marginranking""" +371 99 regularizer """no""" +371 99 optimizer """adadelta""" +371 99 training_loop """owa""" +371 99 negative_sampler """basic""" +371 99 evaluator """rankbased""" +371 100 dataset """kinships""" +371 100 model """kg2e""" +371 100 loss """marginranking""" +371 100 regularizer """no""" +371 100 optimizer """adadelta""" +371 100 training_loop """owa""" +371 100 negative_sampler """basic""" +371 100 evaluator """rankbased""" +372 1 model.embedding_dim 1.0 +372 1 model.c_min 0.056762956328563555 +372 1 model.c_max 1.7376936275046713 +372 1 loss.margin 9.732544229819624 +372 1 negative_sampler.num_negs_per_pos 39.0 +372 1 training.batch_size 2.0 +372 2 model.embedding_dim 2.0 +372 2 model.c_min 0.01368836825989826 +372 2 model.c_max 9.249819913112614 +372 2 loss.margin 4.480559544464045 +372 2 negative_sampler.num_negs_per_pos 94.0 +372 2 training.batch_size 1.0 +372 3 model.embedding_dim 0.0 +372 3 model.c_min 0.09475876157249152 +372 3 model.c_max 1.0955766518039973 +372 3 loss.margin 1.0241537143637247 +372 3 negative_sampler.num_negs_per_pos 57.0 +372 3 training.batch_size 1.0 +372 4 model.embedding_dim 2.0 +372 4 model.c_min 0.04395871730275312 +372 4 model.c_max 9.20144765222327 +372 4 loss.margin 5.131551494361998 +372 4 negative_sampler.num_negs_per_pos 50.0 +372 4 training.batch_size 2.0 +372 5 model.embedding_dim 2.0 +372 5 model.c_min 0.08834846301629065 +372 5 model.c_max 9.565200713483748 +372 5 loss.margin 7.175942350066829 +372 5 negative_sampler.num_negs_per_pos 34.0 +372 5 training.batch_size 1.0 +372 6 model.embedding_dim 2.0 +372 6 model.c_min 0.027469892652003332 +372 6 model.c_max 7.220859842157667 +372 6 loss.margin 5.137733054010111 +372 6 negative_sampler.num_negs_per_pos 6.0 +372 6 training.batch_size 0.0 +372 7 model.embedding_dim 0.0 +372 7 model.c_min 0.013837958907756968 +372 7 model.c_max 8.10088671314151 +372 7 loss.margin 7.248875654449164 +372 7 negative_sampler.num_negs_per_pos 24.0 +372 7 training.batch_size 1.0 +372 8 model.embedding_dim 2.0 +372 8 model.c_min 0.04580306972033041 +372 8 model.c_max 3.9645010050216545 +372 8 loss.margin 7.042851128572001 +372 8 negative_sampler.num_negs_per_pos 88.0 +372 8 training.batch_size 1.0 +372 9 model.embedding_dim 2.0 +372 9 model.c_min 0.0717212715641651 +372 9 model.c_max 6.06086276369934 +372 9 loss.margin 4.148582205872346 +372 9 negative_sampler.num_negs_per_pos 7.0 +372 9 training.batch_size 2.0 +372 10 model.embedding_dim 1.0 +372 10 model.c_min 0.04947146719732976 +372 10 model.c_max 1.2780532020503939 +372 10 loss.margin 5.229186544513805 +372 10 negative_sampler.num_negs_per_pos 31.0 +372 10 training.batch_size 2.0 +372 11 model.embedding_dim 0.0 +372 11 model.c_min 0.07553927459817467 +372 11 model.c_max 1.5959062671465267 +372 11 loss.margin 2.734334678015899 +372 11 negative_sampler.num_negs_per_pos 4.0 +372 11 training.batch_size 2.0 +372 12 model.embedding_dim 1.0 +372 12 model.c_min 0.01393459855316165 +372 12 model.c_max 4.62676817201536 +372 12 loss.margin 1.3471829990785273 +372 12 negative_sampler.num_negs_per_pos 80.0 +372 12 training.batch_size 1.0 +372 13 model.embedding_dim 1.0 +372 13 model.c_min 0.010617450533883781 +372 13 model.c_max 4.446438172978845 +372 13 loss.margin 8.583535729766202 +372 13 negative_sampler.num_negs_per_pos 68.0 +372 13 training.batch_size 0.0 +372 14 model.embedding_dim 2.0 +372 14 model.c_min 0.02051654583321927 +372 14 model.c_max 7.441878156474813 +372 14 loss.margin 2.511576771965153 +372 14 negative_sampler.num_negs_per_pos 55.0 +372 14 training.batch_size 2.0 +372 15 model.embedding_dim 1.0 +372 15 model.c_min 0.02578830280180853 +372 15 model.c_max 7.375285427794654 +372 15 loss.margin 0.9033116064752245 +372 15 negative_sampler.num_negs_per_pos 10.0 +372 15 training.batch_size 0.0 +372 16 model.embedding_dim 0.0 +372 16 model.c_min 0.03946665298074501 +372 16 model.c_max 2.603464014648944 +372 16 loss.margin 6.651319379036143 +372 16 negative_sampler.num_negs_per_pos 15.0 +372 16 training.batch_size 0.0 +372 17 model.embedding_dim 0.0 +372 17 model.c_min 0.08392145413721577 +372 17 model.c_max 6.975425388864682 +372 17 loss.margin 7.6231013922760145 +372 17 negative_sampler.num_negs_per_pos 47.0 +372 17 training.batch_size 0.0 +372 18 model.embedding_dim 0.0 +372 18 model.c_min 0.08013400672198227 +372 18 model.c_max 2.3401302601106817 +372 18 loss.margin 9.57744758955891 +372 18 negative_sampler.num_negs_per_pos 86.0 +372 18 training.batch_size 0.0 +372 19 model.embedding_dim 1.0 +372 19 model.c_min 0.019153225298339942 +372 19 model.c_max 1.474449999685976 +372 19 loss.margin 8.787896278268548 +372 19 negative_sampler.num_negs_per_pos 55.0 +372 19 training.batch_size 0.0 +372 20 model.embedding_dim 2.0 +372 20 model.c_min 0.06214078346878737 +372 20 model.c_max 6.17059462325309 +372 20 loss.margin 5.455907787028738 +372 20 negative_sampler.num_negs_per_pos 49.0 +372 20 training.batch_size 0.0 +372 21 model.embedding_dim 2.0 +372 21 model.c_min 0.01921286415356959 +372 21 model.c_max 6.224728815175206 +372 21 loss.margin 6.899925436059709 +372 21 negative_sampler.num_negs_per_pos 24.0 +372 21 training.batch_size 0.0 +372 22 model.embedding_dim 2.0 +372 22 model.c_min 0.019618391532480933 +372 22 model.c_max 2.7891545254280197 +372 22 loss.margin 1.8381231591095393 +372 22 negative_sampler.num_negs_per_pos 69.0 +372 22 training.batch_size 0.0 +372 23 model.embedding_dim 1.0 +372 23 model.c_min 0.016874724599567587 +372 23 model.c_max 2.30161262846179 +372 23 loss.margin 6.1316077257408 +372 23 negative_sampler.num_negs_per_pos 71.0 +372 23 training.batch_size 2.0 +372 24 model.embedding_dim 2.0 +372 24 model.c_min 0.04387717685202148 +372 24 model.c_max 3.327974897459467 +372 24 loss.margin 4.1206082122573875 +372 24 negative_sampler.num_negs_per_pos 78.0 +372 24 training.batch_size 2.0 +372 25 model.embedding_dim 2.0 +372 25 model.c_min 0.014161342979054881 +372 25 model.c_max 6.888613405734656 +372 25 loss.margin 3.020490644388279 +372 25 negative_sampler.num_negs_per_pos 71.0 +372 25 training.batch_size 2.0 +372 26 model.embedding_dim 0.0 +372 26 model.c_min 0.03104358159732802 +372 26 model.c_max 3.8538100971479077 +372 26 loss.margin 9.155981698974077 +372 26 negative_sampler.num_negs_per_pos 96.0 +372 26 training.batch_size 1.0 +372 27 model.embedding_dim 1.0 +372 27 model.c_min 0.013317083155910684 +372 27 model.c_max 9.819724647463426 +372 27 loss.margin 7.875606214398661 +372 27 negative_sampler.num_negs_per_pos 71.0 +372 27 training.batch_size 1.0 +372 28 model.embedding_dim 2.0 +372 28 model.c_min 0.02365117921133626 +372 28 model.c_max 4.552794018389638 +372 28 loss.margin 1.091435934900899 +372 28 negative_sampler.num_negs_per_pos 11.0 +372 28 training.batch_size 1.0 +372 29 model.embedding_dim 1.0 +372 29 model.c_min 0.04053207579954264 +372 29 model.c_max 9.824818673189299 +372 29 loss.margin 5.907936523179641 +372 29 negative_sampler.num_negs_per_pos 39.0 +372 29 training.batch_size 2.0 +372 30 model.embedding_dim 0.0 +372 30 model.c_min 0.06597517856846899 +372 30 model.c_max 9.878430075400244 +372 30 loss.margin 8.280755835849272 +372 30 negative_sampler.num_negs_per_pos 74.0 +372 30 training.batch_size 0.0 +372 31 model.embedding_dim 1.0 +372 31 model.c_min 0.03900481047056963 +372 31 model.c_max 1.4666003322835768 +372 31 loss.margin 7.3426734608607775 +372 31 negative_sampler.num_negs_per_pos 75.0 +372 31 training.batch_size 0.0 +372 32 model.embedding_dim 0.0 +372 32 model.c_min 0.04427684343254661 +372 32 model.c_max 6.599418031209805 +372 32 loss.margin 7.2472889918107795 +372 32 negative_sampler.num_negs_per_pos 68.0 +372 32 training.batch_size 1.0 +372 33 model.embedding_dim 1.0 +372 33 model.c_min 0.015203384184552752 +372 33 model.c_max 3.76314777282879 +372 33 loss.margin 1.1881432392236204 +372 33 negative_sampler.num_negs_per_pos 77.0 +372 33 training.batch_size 2.0 +372 34 model.embedding_dim 2.0 +372 34 model.c_min 0.01361825517588184 +372 34 model.c_max 4.720461586840453 +372 34 loss.margin 2.14213309009277 +372 34 negative_sampler.num_negs_per_pos 84.0 +372 34 training.batch_size 0.0 +372 35 model.embedding_dim 0.0 +372 35 model.c_min 0.014804428897269944 +372 35 model.c_max 7.424678420985782 +372 35 loss.margin 4.949967983816474 +372 35 negative_sampler.num_negs_per_pos 3.0 +372 35 training.batch_size 0.0 +372 36 model.embedding_dim 0.0 +372 36 model.c_min 0.015277920011972469 +372 36 model.c_max 9.230907578129049 +372 36 loss.margin 0.9850943039037319 +372 36 negative_sampler.num_negs_per_pos 69.0 +372 36 training.batch_size 2.0 +372 37 model.embedding_dim 2.0 +372 37 model.c_min 0.013299305946551385 +372 37 model.c_max 1.923637567685693 +372 37 loss.margin 8.879685977083616 +372 37 negative_sampler.num_negs_per_pos 21.0 +372 37 training.batch_size 2.0 +372 38 model.embedding_dim 0.0 +372 38 model.c_min 0.018063724382590136 +372 38 model.c_max 3.284902129340178 +372 38 loss.margin 3.48215062148018 +372 38 negative_sampler.num_negs_per_pos 43.0 +372 38 training.batch_size 0.0 +372 39 model.embedding_dim 2.0 +372 39 model.c_min 0.05142287474824989 +372 39 model.c_max 2.5599790492949484 +372 39 loss.margin 6.234559403344749 +372 39 negative_sampler.num_negs_per_pos 74.0 +372 39 training.batch_size 1.0 +372 40 model.embedding_dim 2.0 +372 40 model.c_min 0.024863142456869262 +372 40 model.c_max 5.7013636139819415 +372 40 loss.margin 7.416378839402599 +372 40 negative_sampler.num_negs_per_pos 84.0 +372 40 training.batch_size 0.0 +372 41 model.embedding_dim 2.0 +372 41 model.c_min 0.017343807007645543 +372 41 model.c_max 8.508238800794283 +372 41 loss.margin 5.6809222109501105 +372 41 negative_sampler.num_negs_per_pos 93.0 +372 41 training.batch_size 1.0 +372 42 model.embedding_dim 2.0 +372 42 model.c_min 0.021241568656462925 +372 42 model.c_max 4.6636020264877 +372 42 loss.margin 5.241334533318438 +372 42 negative_sampler.num_negs_per_pos 70.0 +372 42 training.batch_size 1.0 +372 43 model.embedding_dim 2.0 +372 43 model.c_min 0.05403343072787022 +372 43 model.c_max 3.755660694489661 +372 43 loss.margin 6.038596580897968 +372 43 negative_sampler.num_negs_per_pos 11.0 +372 43 training.batch_size 0.0 +372 44 model.embedding_dim 0.0 +372 44 model.c_min 0.019967741548742465 +372 44 model.c_max 7.206312627675205 +372 44 loss.margin 7.233868227303185 +372 44 negative_sampler.num_negs_per_pos 19.0 +372 44 training.batch_size 2.0 +372 45 model.embedding_dim 2.0 +372 45 model.c_min 0.0944209616239949 +372 45 model.c_max 3.3732280883366883 +372 45 loss.margin 2.538615594152712 +372 45 negative_sampler.num_negs_per_pos 21.0 +372 45 training.batch_size 0.0 +372 46 model.embedding_dim 0.0 +372 46 model.c_min 0.0569075426557582 +372 46 model.c_max 7.757580751012599 +372 46 loss.margin 4.248777031437408 +372 46 negative_sampler.num_negs_per_pos 29.0 +372 46 training.batch_size 0.0 +372 47 model.embedding_dim 1.0 +372 47 model.c_min 0.017881742801754856 +372 47 model.c_max 9.4069797845245 +372 47 loss.margin 7.997528154182457 +372 47 negative_sampler.num_negs_per_pos 87.0 +372 47 training.batch_size 2.0 +372 48 model.embedding_dim 2.0 +372 48 model.c_min 0.0825967555969012 +372 48 model.c_max 6.647555867496061 +372 48 loss.margin 7.763552249589842 +372 48 negative_sampler.num_negs_per_pos 27.0 +372 48 training.batch_size 0.0 +372 49 model.embedding_dim 2.0 +372 49 model.c_min 0.038987775169331926 +372 49 model.c_max 9.414705758058636 +372 49 loss.margin 0.974648683469304 +372 49 negative_sampler.num_negs_per_pos 72.0 +372 49 training.batch_size 0.0 +372 50 model.embedding_dim 2.0 +372 50 model.c_min 0.08300710660105798 +372 50 model.c_max 1.8299669456979593 +372 50 loss.margin 9.422198015163605 +372 50 negative_sampler.num_negs_per_pos 78.0 +372 50 training.batch_size 0.0 +372 51 model.embedding_dim 2.0 +372 51 model.c_min 0.01933024908816772 +372 51 model.c_max 8.963633462375434 +372 51 loss.margin 9.830384499004376 +372 51 negative_sampler.num_negs_per_pos 76.0 +372 51 training.batch_size 0.0 +372 52 model.embedding_dim 2.0 +372 52 model.c_min 0.012080270509807516 +372 52 model.c_max 8.389635770208372 +372 52 loss.margin 9.578012050356618 +372 52 negative_sampler.num_negs_per_pos 66.0 +372 52 training.batch_size 0.0 +372 53 model.embedding_dim 2.0 +372 53 model.c_min 0.010044259745764016 +372 53 model.c_max 4.995867372202243 +372 53 loss.margin 8.223883888146563 +372 53 negative_sampler.num_negs_per_pos 78.0 +372 53 training.batch_size 0.0 +372 54 model.embedding_dim 0.0 +372 54 model.c_min 0.05418806441728437 +372 54 model.c_max 1.3183635401201603 +372 54 loss.margin 6.911493562195602 +372 54 negative_sampler.num_negs_per_pos 57.0 +372 54 training.batch_size 0.0 +372 55 model.embedding_dim 2.0 +372 55 model.c_min 0.0454693487361093 +372 55 model.c_max 9.98813915325216 +372 55 loss.margin 2.4092827384493765 +372 55 negative_sampler.num_negs_per_pos 95.0 +372 55 training.batch_size 1.0 +372 56 model.embedding_dim 2.0 +372 56 model.c_min 0.02955512025131634 +372 56 model.c_max 9.365298155026679 +372 56 loss.margin 2.4280866295894885 +372 56 negative_sampler.num_negs_per_pos 11.0 +372 56 training.batch_size 2.0 +372 57 model.embedding_dim 1.0 +372 57 model.c_min 0.07436450100819102 +372 57 model.c_max 6.194173646817833 +372 57 loss.margin 0.7871911502305486 +372 57 negative_sampler.num_negs_per_pos 82.0 +372 57 training.batch_size 2.0 +372 58 model.embedding_dim 2.0 +372 58 model.c_min 0.09252695143719133 +372 58 model.c_max 7.933004911004045 +372 58 loss.margin 6.977959548366553 +372 58 negative_sampler.num_negs_per_pos 41.0 +372 58 training.batch_size 1.0 +372 59 model.embedding_dim 1.0 +372 59 model.c_min 0.06578243551545865 +372 59 model.c_max 2.179615708589833 +372 59 loss.margin 6.902138766772979 +372 59 negative_sampler.num_negs_per_pos 19.0 +372 59 training.batch_size 0.0 +372 60 model.embedding_dim 2.0 +372 60 model.c_min 0.03638858936539935 +372 60 model.c_max 7.9308542284746135 +372 60 loss.margin 1.6884874949839217 +372 60 negative_sampler.num_negs_per_pos 21.0 +372 60 training.batch_size 0.0 +372 61 model.embedding_dim 1.0 +372 61 model.c_min 0.04550831592835046 +372 61 model.c_max 4.310844253529051 +372 61 loss.margin 9.90606657102642 +372 61 negative_sampler.num_negs_per_pos 65.0 +372 61 training.batch_size 2.0 +372 62 model.embedding_dim 1.0 +372 62 model.c_min 0.019085555425363727 +372 62 model.c_max 5.987927513532035 +372 62 loss.margin 7.51135609375647 +372 62 negative_sampler.num_negs_per_pos 21.0 +372 62 training.batch_size 1.0 +372 63 model.embedding_dim 2.0 +372 63 model.c_min 0.033982878460961576 +372 63 model.c_max 5.5385482285792715 +372 63 loss.margin 8.407179609571527 +372 63 negative_sampler.num_negs_per_pos 57.0 +372 63 training.batch_size 0.0 +372 64 model.embedding_dim 1.0 +372 64 model.c_min 0.03815368333316243 +372 64 model.c_max 9.290094046350685 +372 64 loss.margin 6.380072893365269 +372 64 negative_sampler.num_negs_per_pos 2.0 +372 64 training.batch_size 0.0 +372 65 model.embedding_dim 2.0 +372 65 model.c_min 0.013303566307621245 +372 65 model.c_max 6.945096391807172 +372 65 loss.margin 8.228608420965594 +372 65 negative_sampler.num_negs_per_pos 8.0 +372 65 training.batch_size 2.0 +372 66 model.embedding_dim 1.0 +372 66 model.c_min 0.07698809928120795 +372 66 model.c_max 4.564936154909816 +372 66 loss.margin 2.8605175802700846 +372 66 negative_sampler.num_negs_per_pos 86.0 +372 66 training.batch_size 2.0 +372 67 model.embedding_dim 2.0 +372 67 model.c_min 0.04379767536144964 +372 67 model.c_max 3.957559015721932 +372 67 loss.margin 8.883078776394557 +372 67 negative_sampler.num_negs_per_pos 40.0 +372 67 training.batch_size 1.0 +372 68 model.embedding_dim 1.0 +372 68 model.c_min 0.07708866253282047 +372 68 model.c_max 5.215166417676546 +372 68 loss.margin 5.222236843531409 +372 68 negative_sampler.num_negs_per_pos 55.0 +372 68 training.batch_size 1.0 +372 69 model.embedding_dim 0.0 +372 69 model.c_min 0.07281294991517476 +372 69 model.c_max 5.955093492856163 +372 69 loss.margin 3.6745576170183036 +372 69 negative_sampler.num_negs_per_pos 83.0 +372 69 training.batch_size 2.0 +372 70 model.embedding_dim 2.0 +372 70 model.c_min 0.017015290360956246 +372 70 model.c_max 7.8139774629242 +372 70 loss.margin 8.777194187368826 +372 70 negative_sampler.num_negs_per_pos 37.0 +372 70 training.batch_size 1.0 +372 71 model.embedding_dim 1.0 +372 71 model.c_min 0.08594199160708076 +372 71 model.c_max 4.897800689007304 +372 71 loss.margin 6.553027963448259 +372 71 negative_sampler.num_negs_per_pos 6.0 +372 71 training.batch_size 2.0 +372 72 model.embedding_dim 2.0 +372 72 model.c_min 0.09195690486307238 +372 72 model.c_max 5.6391842676800765 +372 72 loss.margin 1.05686722438789 +372 72 negative_sampler.num_negs_per_pos 94.0 +372 72 training.batch_size 1.0 +372 73 model.embedding_dim 1.0 +372 73 model.c_min 0.0570593393970984 +372 73 model.c_max 4.462798176578995 +372 73 loss.margin 7.720366271971749 +372 73 negative_sampler.num_negs_per_pos 53.0 +372 73 training.batch_size 1.0 +372 74 model.embedding_dim 1.0 +372 74 model.c_min 0.01993599276568813 +372 74 model.c_max 8.378007565569504 +372 74 loss.margin 1.9110622452340733 +372 74 negative_sampler.num_negs_per_pos 60.0 +372 74 training.batch_size 0.0 +372 75 model.embedding_dim 2.0 +372 75 model.c_min 0.020258401672324053 +372 75 model.c_max 2.6497280935227803 +372 75 loss.margin 8.56710045709979 +372 75 negative_sampler.num_negs_per_pos 65.0 +372 75 training.batch_size 0.0 +372 76 model.embedding_dim 0.0 +372 76 model.c_min 0.01223927029405082 +372 76 model.c_max 3.577585336736821 +372 76 loss.margin 8.996265603107517 +372 76 negative_sampler.num_negs_per_pos 74.0 +372 76 training.batch_size 1.0 +372 77 model.embedding_dim 0.0 +372 77 model.c_min 0.07558341536553904 +372 77 model.c_max 2.655102202073577 +372 77 loss.margin 4.146084869004076 +372 77 negative_sampler.num_negs_per_pos 53.0 +372 77 training.batch_size 0.0 +372 78 model.embedding_dim 2.0 +372 78 model.c_min 0.047853015070253586 +372 78 model.c_max 4.706226124176905 +372 78 loss.margin 9.519943539125354 +372 78 negative_sampler.num_negs_per_pos 25.0 +372 78 training.batch_size 0.0 +372 79 model.embedding_dim 2.0 +372 79 model.c_min 0.022668787615024288 +372 79 model.c_max 9.145227907857793 +372 79 loss.margin 1.8344980918253553 +372 79 negative_sampler.num_negs_per_pos 59.0 +372 79 training.batch_size 0.0 +372 80 model.embedding_dim 1.0 +372 80 model.c_min 0.014102397859108766 +372 80 model.c_max 2.1016242131524048 +372 80 loss.margin 3.4414827329591695 +372 80 negative_sampler.num_negs_per_pos 54.0 +372 80 training.batch_size 2.0 +372 81 model.embedding_dim 0.0 +372 81 model.c_min 0.01469933958634542 +372 81 model.c_max 3.6489526227526916 +372 81 loss.margin 5.50427602514842 +372 81 negative_sampler.num_negs_per_pos 6.0 +372 81 training.batch_size 0.0 +372 82 model.embedding_dim 2.0 +372 82 model.c_min 0.07146156121496974 +372 82 model.c_max 5.331371064726272 +372 82 loss.margin 8.504546585944057 +372 82 negative_sampler.num_negs_per_pos 84.0 +372 82 training.batch_size 0.0 +372 83 model.embedding_dim 1.0 +372 83 model.c_min 0.05019424960578609 +372 83 model.c_max 1.2346496007473027 +372 83 loss.margin 6.366766067699839 +372 83 negative_sampler.num_negs_per_pos 15.0 +372 83 training.batch_size 2.0 +372 84 model.embedding_dim 1.0 +372 84 model.c_min 0.06666193003604472 +372 84 model.c_max 7.9092034304224335 +372 84 loss.margin 6.032417766279242 +372 84 negative_sampler.num_negs_per_pos 21.0 +372 84 training.batch_size 2.0 +372 85 model.embedding_dim 2.0 +372 85 model.c_min 0.019737244833434493 +372 85 model.c_max 3.929563385947229 +372 85 loss.margin 7.814885832954309 +372 85 negative_sampler.num_negs_per_pos 89.0 +372 85 training.batch_size 2.0 +372 86 model.embedding_dim 2.0 +372 86 model.c_min 0.05238978952660238 +372 86 model.c_max 1.271555593873403 +372 86 loss.margin 7.352567030856728 +372 86 negative_sampler.num_negs_per_pos 43.0 +372 86 training.batch_size 1.0 +372 87 model.embedding_dim 2.0 +372 87 model.c_min 0.05204862848841495 +372 87 model.c_max 9.41657804829952 +372 87 loss.margin 8.610150359251463 +372 87 negative_sampler.num_negs_per_pos 25.0 +372 87 training.batch_size 1.0 +372 88 model.embedding_dim 2.0 +372 88 model.c_min 0.015959964120843637 +372 88 model.c_max 1.6909874194313597 +372 88 loss.margin 6.201792872214906 +372 88 negative_sampler.num_negs_per_pos 40.0 +372 88 training.batch_size 0.0 +372 89 model.embedding_dim 2.0 +372 89 model.c_min 0.02256744709845399 +372 89 model.c_max 2.692690959170748 +372 89 loss.margin 5.606474819871646 +372 89 negative_sampler.num_negs_per_pos 24.0 +372 89 training.batch_size 2.0 +372 90 model.embedding_dim 1.0 +372 90 model.c_min 0.09296268161415054 +372 90 model.c_max 3.7437241042244134 +372 90 loss.margin 7.947229686781259 +372 90 negative_sampler.num_negs_per_pos 87.0 +372 90 training.batch_size 0.0 +372 91 model.embedding_dim 0.0 +372 91 model.c_min 0.02741929331173976 +372 91 model.c_max 1.902691571089319 +372 91 loss.margin 0.7445396010260337 +372 91 negative_sampler.num_negs_per_pos 62.0 +372 91 training.batch_size 1.0 +372 92 model.embedding_dim 2.0 +372 92 model.c_min 0.06864302678242072 +372 92 model.c_max 8.360292029330747 +372 92 loss.margin 9.18829964495977 +372 92 negative_sampler.num_negs_per_pos 72.0 +372 92 training.batch_size 1.0 +372 93 model.embedding_dim 0.0 +372 93 model.c_min 0.06262726396169195 +372 93 model.c_max 8.532795050320848 +372 93 loss.margin 6.734642500252582 +372 93 negative_sampler.num_negs_per_pos 34.0 +372 93 training.batch_size 0.0 +372 94 model.embedding_dim 2.0 +372 94 model.c_min 0.02989637743017165 +372 94 model.c_max 3.2319797272834565 +372 94 loss.margin 6.377133529188873 +372 94 negative_sampler.num_negs_per_pos 76.0 +372 94 training.batch_size 0.0 +372 95 model.embedding_dim 2.0 +372 95 model.c_min 0.03724942518239308 +372 95 model.c_max 1.923526034639741 +372 95 loss.margin 0.989384431194956 +372 95 negative_sampler.num_negs_per_pos 69.0 +372 95 training.batch_size 1.0 +372 96 model.embedding_dim 2.0 +372 96 model.c_min 0.028816335602219394 +372 96 model.c_max 8.017787413100669 +372 96 loss.margin 8.786079865220122 +372 96 negative_sampler.num_negs_per_pos 97.0 +372 96 training.batch_size 1.0 +372 97 model.embedding_dim 2.0 +372 97 model.c_min 0.07740576888616485 +372 97 model.c_max 1.9379561461754922 +372 97 loss.margin 6.572739951868726 +372 97 negative_sampler.num_negs_per_pos 11.0 +372 97 training.batch_size 1.0 +372 98 model.embedding_dim 1.0 +372 98 model.c_min 0.08025347026242809 +372 98 model.c_max 5.541882317636299 +372 98 loss.margin 0.7452129708934337 +372 98 negative_sampler.num_negs_per_pos 25.0 +372 98 training.batch_size 0.0 +372 99 model.embedding_dim 0.0 +372 99 model.c_min 0.01445875439402932 +372 99 model.c_max 9.45061730350892 +372 99 loss.margin 7.524645622679815 +372 99 negative_sampler.num_negs_per_pos 92.0 +372 99 training.batch_size 1.0 +372 100 model.embedding_dim 0.0 +372 100 model.c_min 0.08745988384595224 +372 100 model.c_max 4.762474176637269 +372 100 loss.margin 6.910684624859138 +372 100 negative_sampler.num_negs_per_pos 18.0 +372 100 training.batch_size 2.0 +372 1 dataset """kinships""" +372 1 model """kg2e""" +372 1 loss """marginranking""" +372 1 regularizer """no""" +372 1 optimizer """adadelta""" +372 1 training_loop """owa""" +372 1 negative_sampler """basic""" +372 1 evaluator """rankbased""" +372 2 dataset """kinships""" +372 2 model """kg2e""" +372 2 loss """marginranking""" +372 2 regularizer """no""" +372 2 optimizer """adadelta""" +372 2 training_loop """owa""" +372 2 negative_sampler """basic""" +372 2 evaluator """rankbased""" +372 3 dataset """kinships""" +372 3 model """kg2e""" +372 3 loss """marginranking""" +372 3 regularizer """no""" +372 3 optimizer """adadelta""" +372 3 training_loop """owa""" +372 3 negative_sampler """basic""" +372 3 evaluator """rankbased""" +372 4 dataset """kinships""" +372 4 model """kg2e""" +372 4 loss """marginranking""" +372 4 regularizer """no""" +372 4 optimizer """adadelta""" +372 4 training_loop """owa""" +372 4 negative_sampler """basic""" +372 4 evaluator """rankbased""" +372 5 dataset """kinships""" +372 5 model """kg2e""" +372 5 loss """marginranking""" +372 5 regularizer """no""" +372 5 optimizer """adadelta""" +372 5 training_loop """owa""" +372 5 negative_sampler """basic""" +372 5 evaluator """rankbased""" +372 6 dataset """kinships""" +372 6 model """kg2e""" +372 6 loss """marginranking""" +372 6 regularizer """no""" +372 6 optimizer """adadelta""" +372 6 training_loop """owa""" +372 6 negative_sampler """basic""" +372 6 evaluator """rankbased""" +372 7 dataset """kinships""" +372 7 model """kg2e""" +372 7 loss """marginranking""" +372 7 regularizer """no""" +372 7 optimizer """adadelta""" +372 7 training_loop """owa""" +372 7 negative_sampler """basic""" +372 7 evaluator """rankbased""" +372 8 dataset """kinships""" +372 8 model """kg2e""" +372 8 loss """marginranking""" +372 8 regularizer """no""" +372 8 optimizer """adadelta""" +372 8 training_loop """owa""" +372 8 negative_sampler """basic""" +372 8 evaluator """rankbased""" +372 9 dataset """kinships""" +372 9 model """kg2e""" +372 9 loss """marginranking""" +372 9 regularizer """no""" +372 9 optimizer """adadelta""" +372 9 training_loop """owa""" +372 9 negative_sampler """basic""" +372 9 evaluator """rankbased""" +372 10 dataset """kinships""" +372 10 model """kg2e""" +372 10 loss """marginranking""" +372 10 regularizer """no""" +372 10 optimizer """adadelta""" +372 10 training_loop """owa""" +372 10 negative_sampler """basic""" +372 10 evaluator """rankbased""" +372 11 dataset """kinships""" +372 11 model """kg2e""" +372 11 loss """marginranking""" +372 11 regularizer """no""" +372 11 optimizer """adadelta""" +372 11 training_loop """owa""" +372 11 negative_sampler """basic""" +372 11 evaluator """rankbased""" +372 12 dataset """kinships""" +372 12 model """kg2e""" +372 12 loss """marginranking""" +372 12 regularizer """no""" +372 12 optimizer """adadelta""" +372 12 training_loop """owa""" +372 12 negative_sampler """basic""" +372 12 evaluator """rankbased""" +372 13 dataset """kinships""" +372 13 model """kg2e""" +372 13 loss """marginranking""" +372 13 regularizer """no""" +372 13 optimizer """adadelta""" +372 13 training_loop """owa""" +372 13 negative_sampler """basic""" +372 13 evaluator """rankbased""" +372 14 dataset """kinships""" +372 14 model """kg2e""" +372 14 loss """marginranking""" +372 14 regularizer """no""" +372 14 optimizer """adadelta""" +372 14 training_loop """owa""" +372 14 negative_sampler """basic""" +372 14 evaluator """rankbased""" +372 15 dataset """kinships""" +372 15 model """kg2e""" +372 15 loss """marginranking""" +372 15 regularizer """no""" +372 15 optimizer """adadelta""" +372 15 training_loop """owa""" +372 15 negative_sampler """basic""" +372 15 evaluator """rankbased""" +372 16 dataset """kinships""" +372 16 model """kg2e""" +372 16 loss """marginranking""" +372 16 regularizer """no""" +372 16 optimizer """adadelta""" +372 16 training_loop """owa""" +372 16 negative_sampler """basic""" +372 16 evaluator """rankbased""" +372 17 dataset """kinships""" +372 17 model """kg2e""" +372 17 loss """marginranking""" +372 17 regularizer """no""" +372 17 optimizer """adadelta""" +372 17 training_loop """owa""" +372 17 negative_sampler """basic""" +372 17 evaluator """rankbased""" +372 18 dataset """kinships""" +372 18 model """kg2e""" +372 18 loss """marginranking""" +372 18 regularizer """no""" +372 18 optimizer """adadelta""" +372 18 training_loop """owa""" +372 18 negative_sampler """basic""" +372 18 evaluator """rankbased""" +372 19 dataset """kinships""" +372 19 model """kg2e""" +372 19 loss """marginranking""" +372 19 regularizer """no""" +372 19 optimizer """adadelta""" +372 19 training_loop """owa""" +372 19 negative_sampler """basic""" +372 19 evaluator """rankbased""" +372 20 dataset """kinships""" +372 20 model """kg2e""" +372 20 loss """marginranking""" +372 20 regularizer """no""" +372 20 optimizer """adadelta""" +372 20 training_loop """owa""" +372 20 negative_sampler """basic""" +372 20 evaluator """rankbased""" +372 21 dataset """kinships""" +372 21 model """kg2e""" +372 21 loss """marginranking""" +372 21 regularizer """no""" +372 21 optimizer """adadelta""" +372 21 training_loop """owa""" +372 21 negative_sampler """basic""" +372 21 evaluator """rankbased""" +372 22 dataset """kinships""" +372 22 model """kg2e""" +372 22 loss """marginranking""" +372 22 regularizer """no""" +372 22 optimizer """adadelta""" +372 22 training_loop """owa""" +372 22 negative_sampler """basic""" +372 22 evaluator """rankbased""" +372 23 dataset """kinships""" +372 23 model """kg2e""" +372 23 loss """marginranking""" +372 23 regularizer """no""" +372 23 optimizer """adadelta""" +372 23 training_loop """owa""" +372 23 negative_sampler """basic""" +372 23 evaluator """rankbased""" +372 24 dataset """kinships""" +372 24 model """kg2e""" +372 24 loss """marginranking""" +372 24 regularizer """no""" +372 24 optimizer """adadelta""" +372 24 training_loop """owa""" +372 24 negative_sampler """basic""" +372 24 evaluator """rankbased""" +372 25 dataset """kinships""" +372 25 model """kg2e""" +372 25 loss """marginranking""" +372 25 regularizer """no""" +372 25 optimizer """adadelta""" +372 25 training_loop """owa""" +372 25 negative_sampler """basic""" +372 25 evaluator """rankbased""" +372 26 dataset """kinships""" +372 26 model """kg2e""" +372 26 loss """marginranking""" +372 26 regularizer """no""" +372 26 optimizer """adadelta""" +372 26 training_loop """owa""" +372 26 negative_sampler """basic""" +372 26 evaluator """rankbased""" +372 27 dataset """kinships""" +372 27 model """kg2e""" +372 27 loss """marginranking""" +372 27 regularizer """no""" +372 27 optimizer """adadelta""" +372 27 training_loop """owa""" +372 27 negative_sampler """basic""" +372 27 evaluator """rankbased""" +372 28 dataset """kinships""" +372 28 model """kg2e""" +372 28 loss """marginranking""" +372 28 regularizer """no""" +372 28 optimizer """adadelta""" +372 28 training_loop """owa""" +372 28 negative_sampler """basic""" +372 28 evaluator """rankbased""" +372 29 dataset """kinships""" +372 29 model """kg2e""" +372 29 loss """marginranking""" +372 29 regularizer """no""" +372 29 optimizer """adadelta""" +372 29 training_loop """owa""" +372 29 negative_sampler """basic""" +372 29 evaluator """rankbased""" +372 30 dataset """kinships""" +372 30 model """kg2e""" +372 30 loss """marginranking""" +372 30 regularizer """no""" +372 30 optimizer """adadelta""" +372 30 training_loop """owa""" +372 30 negative_sampler """basic""" +372 30 evaluator """rankbased""" +372 31 dataset """kinships""" +372 31 model """kg2e""" +372 31 loss """marginranking""" +372 31 regularizer """no""" +372 31 optimizer """adadelta""" +372 31 training_loop """owa""" +372 31 negative_sampler """basic""" +372 31 evaluator """rankbased""" +372 32 dataset """kinships""" +372 32 model """kg2e""" +372 32 loss """marginranking""" +372 32 regularizer """no""" +372 32 optimizer """adadelta""" +372 32 training_loop """owa""" +372 32 negative_sampler """basic""" +372 32 evaluator """rankbased""" +372 33 dataset """kinships""" +372 33 model """kg2e""" +372 33 loss """marginranking""" +372 33 regularizer """no""" +372 33 optimizer """adadelta""" +372 33 training_loop """owa""" +372 33 negative_sampler """basic""" +372 33 evaluator """rankbased""" +372 34 dataset """kinships""" +372 34 model """kg2e""" +372 34 loss """marginranking""" +372 34 regularizer """no""" +372 34 optimizer """adadelta""" +372 34 training_loop """owa""" +372 34 negative_sampler """basic""" +372 34 evaluator """rankbased""" +372 35 dataset """kinships""" +372 35 model """kg2e""" +372 35 loss """marginranking""" +372 35 regularizer """no""" +372 35 optimizer """adadelta""" +372 35 training_loop """owa""" +372 35 negative_sampler """basic""" +372 35 evaluator """rankbased""" +372 36 dataset """kinships""" +372 36 model """kg2e""" +372 36 loss """marginranking""" +372 36 regularizer """no""" +372 36 optimizer """adadelta""" +372 36 training_loop """owa""" +372 36 negative_sampler """basic""" +372 36 evaluator """rankbased""" +372 37 dataset """kinships""" +372 37 model """kg2e""" +372 37 loss """marginranking""" +372 37 regularizer """no""" +372 37 optimizer """adadelta""" +372 37 training_loop """owa""" +372 37 negative_sampler """basic""" +372 37 evaluator """rankbased""" +372 38 dataset """kinships""" +372 38 model """kg2e""" +372 38 loss """marginranking""" +372 38 regularizer """no""" +372 38 optimizer """adadelta""" +372 38 training_loop """owa""" +372 38 negative_sampler """basic""" +372 38 evaluator """rankbased""" +372 39 dataset """kinships""" +372 39 model """kg2e""" +372 39 loss """marginranking""" +372 39 regularizer """no""" +372 39 optimizer """adadelta""" +372 39 training_loop """owa""" +372 39 negative_sampler """basic""" +372 39 evaluator """rankbased""" +372 40 dataset """kinships""" +372 40 model """kg2e""" +372 40 loss """marginranking""" +372 40 regularizer """no""" +372 40 optimizer """adadelta""" +372 40 training_loop """owa""" +372 40 negative_sampler """basic""" +372 40 evaluator """rankbased""" +372 41 dataset """kinships""" +372 41 model """kg2e""" +372 41 loss """marginranking""" +372 41 regularizer """no""" +372 41 optimizer """adadelta""" +372 41 training_loop """owa""" +372 41 negative_sampler """basic""" +372 41 evaluator """rankbased""" +372 42 dataset """kinships""" +372 42 model """kg2e""" +372 42 loss """marginranking""" +372 42 regularizer """no""" +372 42 optimizer """adadelta""" +372 42 training_loop """owa""" +372 42 negative_sampler """basic""" +372 42 evaluator """rankbased""" +372 43 dataset """kinships""" +372 43 model """kg2e""" +372 43 loss """marginranking""" +372 43 regularizer """no""" +372 43 optimizer """adadelta""" +372 43 training_loop """owa""" +372 43 negative_sampler """basic""" +372 43 evaluator """rankbased""" +372 44 dataset """kinships""" +372 44 model """kg2e""" +372 44 loss """marginranking""" +372 44 regularizer """no""" +372 44 optimizer """adadelta""" +372 44 training_loop """owa""" +372 44 negative_sampler """basic""" +372 44 evaluator """rankbased""" +372 45 dataset """kinships""" +372 45 model """kg2e""" +372 45 loss """marginranking""" +372 45 regularizer """no""" +372 45 optimizer """adadelta""" +372 45 training_loop """owa""" +372 45 negative_sampler """basic""" +372 45 evaluator """rankbased""" +372 46 dataset """kinships""" +372 46 model """kg2e""" +372 46 loss """marginranking""" +372 46 regularizer """no""" +372 46 optimizer """adadelta""" +372 46 training_loop """owa""" +372 46 negative_sampler """basic""" +372 46 evaluator """rankbased""" +372 47 dataset """kinships""" +372 47 model """kg2e""" +372 47 loss """marginranking""" +372 47 regularizer """no""" +372 47 optimizer """adadelta""" +372 47 training_loop """owa""" +372 47 negative_sampler """basic""" +372 47 evaluator """rankbased""" +372 48 dataset """kinships""" +372 48 model """kg2e""" +372 48 loss """marginranking""" +372 48 regularizer """no""" +372 48 optimizer """adadelta""" +372 48 training_loop """owa""" +372 48 negative_sampler """basic""" +372 48 evaluator """rankbased""" +372 49 dataset """kinships""" +372 49 model """kg2e""" +372 49 loss """marginranking""" +372 49 regularizer """no""" +372 49 optimizer """adadelta""" +372 49 training_loop """owa""" +372 49 negative_sampler """basic""" +372 49 evaluator """rankbased""" +372 50 dataset """kinships""" +372 50 model """kg2e""" +372 50 loss """marginranking""" +372 50 regularizer """no""" +372 50 optimizer """adadelta""" +372 50 training_loop """owa""" +372 50 negative_sampler """basic""" +372 50 evaluator """rankbased""" +372 51 dataset """kinships""" +372 51 model """kg2e""" +372 51 loss """marginranking""" +372 51 regularizer """no""" +372 51 optimizer """adadelta""" +372 51 training_loop """owa""" +372 51 negative_sampler """basic""" +372 51 evaluator """rankbased""" +372 52 dataset """kinships""" +372 52 model """kg2e""" +372 52 loss """marginranking""" +372 52 regularizer """no""" +372 52 optimizer """adadelta""" +372 52 training_loop """owa""" +372 52 negative_sampler """basic""" +372 52 evaluator """rankbased""" +372 53 dataset """kinships""" +372 53 model """kg2e""" +372 53 loss """marginranking""" +372 53 regularizer """no""" +372 53 optimizer """adadelta""" +372 53 training_loop """owa""" +372 53 negative_sampler """basic""" +372 53 evaluator """rankbased""" +372 54 dataset """kinships""" +372 54 model """kg2e""" +372 54 loss """marginranking""" +372 54 regularizer """no""" +372 54 optimizer """adadelta""" +372 54 training_loop """owa""" +372 54 negative_sampler """basic""" +372 54 evaluator """rankbased""" +372 55 dataset """kinships""" +372 55 model """kg2e""" +372 55 loss """marginranking""" +372 55 regularizer """no""" +372 55 optimizer """adadelta""" +372 55 training_loop """owa""" +372 55 negative_sampler """basic""" +372 55 evaluator """rankbased""" +372 56 dataset """kinships""" +372 56 model """kg2e""" +372 56 loss """marginranking""" +372 56 regularizer """no""" +372 56 optimizer """adadelta""" +372 56 training_loop """owa""" +372 56 negative_sampler """basic""" +372 56 evaluator """rankbased""" +372 57 dataset """kinships""" +372 57 model """kg2e""" +372 57 loss """marginranking""" +372 57 regularizer """no""" +372 57 optimizer """adadelta""" +372 57 training_loop """owa""" +372 57 negative_sampler """basic""" +372 57 evaluator """rankbased""" +372 58 dataset """kinships""" +372 58 model """kg2e""" +372 58 loss """marginranking""" +372 58 regularizer """no""" +372 58 optimizer """adadelta""" +372 58 training_loop """owa""" +372 58 negative_sampler """basic""" +372 58 evaluator """rankbased""" +372 59 dataset """kinships""" +372 59 model """kg2e""" +372 59 loss """marginranking""" +372 59 regularizer """no""" +372 59 optimizer """adadelta""" +372 59 training_loop """owa""" +372 59 negative_sampler """basic""" +372 59 evaluator """rankbased""" +372 60 dataset """kinships""" +372 60 model """kg2e""" +372 60 loss """marginranking""" +372 60 regularizer """no""" +372 60 optimizer """adadelta""" +372 60 training_loop """owa""" +372 60 negative_sampler """basic""" +372 60 evaluator """rankbased""" +372 61 dataset """kinships""" +372 61 model """kg2e""" +372 61 loss """marginranking""" +372 61 regularizer """no""" +372 61 optimizer """adadelta""" +372 61 training_loop """owa""" +372 61 negative_sampler """basic""" +372 61 evaluator """rankbased""" +372 62 dataset """kinships""" +372 62 model """kg2e""" +372 62 loss """marginranking""" +372 62 regularizer """no""" +372 62 optimizer """adadelta""" +372 62 training_loop """owa""" +372 62 negative_sampler """basic""" +372 62 evaluator """rankbased""" +372 63 dataset """kinships""" +372 63 model """kg2e""" +372 63 loss """marginranking""" +372 63 regularizer """no""" +372 63 optimizer """adadelta""" +372 63 training_loop """owa""" +372 63 negative_sampler """basic""" +372 63 evaluator """rankbased""" +372 64 dataset """kinships""" +372 64 model """kg2e""" +372 64 loss """marginranking""" +372 64 regularizer """no""" +372 64 optimizer """adadelta""" +372 64 training_loop """owa""" +372 64 negative_sampler """basic""" +372 64 evaluator """rankbased""" +372 65 dataset """kinships""" +372 65 model """kg2e""" +372 65 loss """marginranking""" +372 65 regularizer """no""" +372 65 optimizer """adadelta""" +372 65 training_loop """owa""" +372 65 negative_sampler """basic""" +372 65 evaluator """rankbased""" +372 66 dataset """kinships""" +372 66 model """kg2e""" +372 66 loss """marginranking""" +372 66 regularizer """no""" +372 66 optimizer """adadelta""" +372 66 training_loop """owa""" +372 66 negative_sampler """basic""" +372 66 evaluator """rankbased""" +372 67 dataset """kinships""" +372 67 model """kg2e""" +372 67 loss """marginranking""" +372 67 regularizer """no""" +372 67 optimizer """adadelta""" +372 67 training_loop """owa""" +372 67 negative_sampler """basic""" +372 67 evaluator """rankbased""" +372 68 dataset """kinships""" +372 68 model """kg2e""" +372 68 loss """marginranking""" +372 68 regularizer """no""" +372 68 optimizer """adadelta""" +372 68 training_loop """owa""" +372 68 negative_sampler """basic""" +372 68 evaluator """rankbased""" +372 69 dataset """kinships""" +372 69 model """kg2e""" +372 69 loss """marginranking""" +372 69 regularizer """no""" +372 69 optimizer """adadelta""" +372 69 training_loop """owa""" +372 69 negative_sampler """basic""" +372 69 evaluator """rankbased""" +372 70 dataset """kinships""" +372 70 model """kg2e""" +372 70 loss """marginranking""" +372 70 regularizer """no""" +372 70 optimizer """adadelta""" +372 70 training_loop """owa""" +372 70 negative_sampler """basic""" +372 70 evaluator """rankbased""" +372 71 dataset """kinships""" +372 71 model """kg2e""" +372 71 loss """marginranking""" +372 71 regularizer """no""" +372 71 optimizer """adadelta""" +372 71 training_loop """owa""" +372 71 negative_sampler """basic""" +372 71 evaluator """rankbased""" +372 72 dataset """kinships""" +372 72 model """kg2e""" +372 72 loss """marginranking""" +372 72 regularizer """no""" +372 72 optimizer """adadelta""" +372 72 training_loop """owa""" +372 72 negative_sampler """basic""" +372 72 evaluator """rankbased""" +372 73 dataset """kinships""" +372 73 model """kg2e""" +372 73 loss """marginranking""" +372 73 regularizer """no""" +372 73 optimizer """adadelta""" +372 73 training_loop """owa""" +372 73 negative_sampler """basic""" +372 73 evaluator """rankbased""" +372 74 dataset """kinships""" +372 74 model """kg2e""" +372 74 loss """marginranking""" +372 74 regularizer """no""" +372 74 optimizer """adadelta""" +372 74 training_loop """owa""" +372 74 negative_sampler """basic""" +372 74 evaluator """rankbased""" +372 75 dataset """kinships""" +372 75 model """kg2e""" +372 75 loss """marginranking""" +372 75 regularizer """no""" +372 75 optimizer """adadelta""" +372 75 training_loop """owa""" +372 75 negative_sampler """basic""" +372 75 evaluator """rankbased""" +372 76 dataset """kinships""" +372 76 model """kg2e""" +372 76 loss """marginranking""" +372 76 regularizer """no""" +372 76 optimizer """adadelta""" +372 76 training_loop """owa""" +372 76 negative_sampler """basic""" +372 76 evaluator """rankbased""" +372 77 dataset """kinships""" +372 77 model """kg2e""" +372 77 loss """marginranking""" +372 77 regularizer """no""" +372 77 optimizer """adadelta""" +372 77 training_loop """owa""" +372 77 negative_sampler """basic""" +372 77 evaluator """rankbased""" +372 78 dataset """kinships""" +372 78 model """kg2e""" +372 78 loss """marginranking""" +372 78 regularizer """no""" +372 78 optimizer """adadelta""" +372 78 training_loop """owa""" +372 78 negative_sampler """basic""" +372 78 evaluator """rankbased""" +372 79 dataset """kinships""" +372 79 model """kg2e""" +372 79 loss """marginranking""" +372 79 regularizer """no""" +372 79 optimizer """adadelta""" +372 79 training_loop """owa""" +372 79 negative_sampler """basic""" +372 79 evaluator """rankbased""" +372 80 dataset """kinships""" +372 80 model """kg2e""" +372 80 loss """marginranking""" +372 80 regularizer """no""" +372 80 optimizer """adadelta""" +372 80 training_loop """owa""" +372 80 negative_sampler """basic""" +372 80 evaluator """rankbased""" +372 81 dataset """kinships""" +372 81 model """kg2e""" +372 81 loss """marginranking""" +372 81 regularizer """no""" +372 81 optimizer """adadelta""" +372 81 training_loop """owa""" +372 81 negative_sampler """basic""" +372 81 evaluator """rankbased""" +372 82 dataset """kinships""" +372 82 model """kg2e""" +372 82 loss """marginranking""" +372 82 regularizer """no""" +372 82 optimizer """adadelta""" +372 82 training_loop """owa""" +372 82 negative_sampler """basic""" +372 82 evaluator """rankbased""" +372 83 dataset """kinships""" +372 83 model """kg2e""" +372 83 loss """marginranking""" +372 83 regularizer """no""" +372 83 optimizer """adadelta""" +372 83 training_loop """owa""" +372 83 negative_sampler """basic""" +372 83 evaluator """rankbased""" +372 84 dataset """kinships""" +372 84 model """kg2e""" +372 84 loss """marginranking""" +372 84 regularizer """no""" +372 84 optimizer """adadelta""" +372 84 training_loop """owa""" +372 84 negative_sampler """basic""" +372 84 evaluator """rankbased""" +372 85 dataset """kinships""" +372 85 model """kg2e""" +372 85 loss """marginranking""" +372 85 regularizer """no""" +372 85 optimizer """adadelta""" +372 85 training_loop """owa""" +372 85 negative_sampler """basic""" +372 85 evaluator """rankbased""" +372 86 dataset """kinships""" +372 86 model """kg2e""" +372 86 loss """marginranking""" +372 86 regularizer """no""" +372 86 optimizer """adadelta""" +372 86 training_loop """owa""" +372 86 negative_sampler """basic""" +372 86 evaluator """rankbased""" +372 87 dataset """kinships""" +372 87 model """kg2e""" +372 87 loss """marginranking""" +372 87 regularizer """no""" +372 87 optimizer """adadelta""" +372 87 training_loop """owa""" +372 87 negative_sampler """basic""" +372 87 evaluator """rankbased""" +372 88 dataset """kinships""" +372 88 model """kg2e""" +372 88 loss """marginranking""" +372 88 regularizer """no""" +372 88 optimizer """adadelta""" +372 88 training_loop """owa""" +372 88 negative_sampler """basic""" +372 88 evaluator """rankbased""" +372 89 dataset """kinships""" +372 89 model """kg2e""" +372 89 loss """marginranking""" +372 89 regularizer """no""" +372 89 optimizer """adadelta""" +372 89 training_loop """owa""" +372 89 negative_sampler """basic""" +372 89 evaluator """rankbased""" +372 90 dataset """kinships""" +372 90 model """kg2e""" +372 90 loss """marginranking""" +372 90 regularizer """no""" +372 90 optimizer """adadelta""" +372 90 training_loop """owa""" +372 90 negative_sampler """basic""" +372 90 evaluator """rankbased""" +372 91 dataset """kinships""" +372 91 model """kg2e""" +372 91 loss """marginranking""" +372 91 regularizer """no""" +372 91 optimizer """adadelta""" +372 91 training_loop """owa""" +372 91 negative_sampler """basic""" +372 91 evaluator """rankbased""" +372 92 dataset """kinships""" +372 92 model """kg2e""" +372 92 loss """marginranking""" +372 92 regularizer """no""" +372 92 optimizer """adadelta""" +372 92 training_loop """owa""" +372 92 negative_sampler """basic""" +372 92 evaluator """rankbased""" +372 93 dataset """kinships""" +372 93 model """kg2e""" +372 93 loss """marginranking""" +372 93 regularizer """no""" +372 93 optimizer """adadelta""" +372 93 training_loop """owa""" +372 93 negative_sampler """basic""" +372 93 evaluator """rankbased""" +372 94 dataset """kinships""" +372 94 model """kg2e""" +372 94 loss """marginranking""" +372 94 regularizer """no""" +372 94 optimizer """adadelta""" +372 94 training_loop """owa""" +372 94 negative_sampler """basic""" +372 94 evaluator """rankbased""" +372 95 dataset """kinships""" +372 95 model """kg2e""" +372 95 loss """marginranking""" +372 95 regularizer """no""" +372 95 optimizer """adadelta""" +372 95 training_loop """owa""" +372 95 negative_sampler """basic""" +372 95 evaluator """rankbased""" +372 96 dataset """kinships""" +372 96 model """kg2e""" +372 96 loss """marginranking""" +372 96 regularizer """no""" +372 96 optimizer """adadelta""" +372 96 training_loop """owa""" +372 96 negative_sampler """basic""" +372 96 evaluator """rankbased""" +372 97 dataset """kinships""" +372 97 model """kg2e""" +372 97 loss """marginranking""" +372 97 regularizer """no""" +372 97 optimizer """adadelta""" +372 97 training_loop """owa""" +372 97 negative_sampler """basic""" +372 97 evaluator """rankbased""" +372 98 dataset """kinships""" +372 98 model """kg2e""" +372 98 loss """marginranking""" +372 98 regularizer """no""" +372 98 optimizer """adadelta""" +372 98 training_loop """owa""" +372 98 negative_sampler """basic""" +372 98 evaluator """rankbased""" +372 99 dataset """kinships""" +372 99 model """kg2e""" +372 99 loss """marginranking""" +372 99 regularizer """no""" +372 99 optimizer """adadelta""" +372 99 training_loop """owa""" +372 99 negative_sampler """basic""" +372 99 evaluator """rankbased""" +372 100 dataset """kinships""" +372 100 model """kg2e""" +372 100 loss """marginranking""" +372 100 regularizer """no""" +372 100 optimizer """adadelta""" +372 100 training_loop """owa""" +372 100 negative_sampler """basic""" +372 100 evaluator """rankbased""" +373 1 model.embedding_dim 2.0 +373 1 model.c_min 0.012378251360145304 +373 1 model.c_max 1.3446590841109471 +373 1 loss.margin 14.255824013903036 +373 1 loss.adversarial_temperature 0.3901035603336972 +373 1 negative_sampler.num_negs_per_pos 53.0 +373 1 training.batch_size 1.0 +373 2 model.embedding_dim 1.0 +373 2 model.c_min 0.05142267847704674 +373 2 model.c_max 3.272453694312997 +373 2 loss.margin 25.567514749866543 +373 2 loss.adversarial_temperature 0.7950246078198833 +373 2 negative_sampler.num_negs_per_pos 56.0 +373 2 training.batch_size 0.0 +373 3 model.embedding_dim 1.0 +373 3 model.c_min 0.02160472636347603 +373 3 model.c_max 1.0592208640127776 +373 3 loss.margin 6.349975786879496 +373 3 loss.adversarial_temperature 0.1494787758893216 +373 3 negative_sampler.num_negs_per_pos 43.0 +373 3 training.batch_size 2.0 +373 4 model.embedding_dim 0.0 +373 4 model.c_min 0.025786926098365477 +373 4 model.c_max 3.329472471053192 +373 4 loss.margin 11.086711446650929 +373 4 loss.adversarial_temperature 0.73338355841498 +373 4 negative_sampler.num_negs_per_pos 8.0 +373 4 training.batch_size 0.0 +373 5 model.embedding_dim 0.0 +373 5 model.c_min 0.02516188538908204 +373 5 model.c_max 5.848083183489889 +373 5 loss.margin 10.421175449313688 +373 5 loss.adversarial_temperature 0.2733966675542989 +373 5 negative_sampler.num_negs_per_pos 84.0 +373 5 training.batch_size 0.0 +373 6 model.embedding_dim 1.0 +373 6 model.c_min 0.019561998409867304 +373 6 model.c_max 7.010522201937642 +373 6 loss.margin 23.61945908122607 +373 6 loss.adversarial_temperature 0.22623388182954754 +373 6 negative_sampler.num_negs_per_pos 71.0 +373 6 training.batch_size 1.0 +373 7 model.embedding_dim 1.0 +373 7 model.c_min 0.027034597084170383 +373 7 model.c_max 9.215668210024104 +373 7 loss.margin 14.548744918334808 +373 7 loss.adversarial_temperature 0.6284360339476797 +373 7 negative_sampler.num_negs_per_pos 33.0 +373 7 training.batch_size 1.0 +373 8 model.embedding_dim 1.0 +373 8 model.c_min 0.08232379404461064 +373 8 model.c_max 9.398149811296216 +373 8 loss.margin 11.29269094198235 +373 8 loss.adversarial_temperature 0.6572403408068728 +373 8 negative_sampler.num_negs_per_pos 90.0 +373 8 training.batch_size 0.0 +373 9 model.embedding_dim 1.0 +373 9 model.c_min 0.08279764267940695 +373 9 model.c_max 2.031143451881928 +373 9 loss.margin 5.898404339345385 +373 9 loss.adversarial_temperature 0.692709308730744 +373 9 negative_sampler.num_negs_per_pos 77.0 +373 9 training.batch_size 1.0 +373 10 model.embedding_dim 1.0 +373 10 model.c_min 0.07643303795847137 +373 10 model.c_max 5.448038062608636 +373 10 loss.margin 8.958794513003088 +373 10 loss.adversarial_temperature 0.8116843744150768 +373 10 negative_sampler.num_negs_per_pos 93.0 +373 10 training.batch_size 1.0 +373 11 model.embedding_dim 1.0 +373 11 model.c_min 0.016901782189713565 +373 11 model.c_max 7.455830244027193 +373 11 loss.margin 27.72229020136331 +373 11 loss.adversarial_temperature 0.19936867277057946 +373 11 negative_sampler.num_negs_per_pos 70.0 +373 11 training.batch_size 1.0 +373 12 model.embedding_dim 0.0 +373 12 model.c_min 0.03491690239578894 +373 12 model.c_max 1.677978741773788 +373 12 loss.margin 23.375427310215557 +373 12 loss.adversarial_temperature 0.21448071152044457 +373 12 negative_sampler.num_negs_per_pos 60.0 +373 12 training.batch_size 1.0 +373 13 model.embedding_dim 2.0 +373 13 model.c_min 0.013723177433852417 +373 13 model.c_max 8.18002337865341 +373 13 loss.margin 19.534292377875445 +373 13 loss.adversarial_temperature 0.6655726993464978 +373 13 negative_sampler.num_negs_per_pos 63.0 +373 13 training.batch_size 1.0 +373 14 model.embedding_dim 2.0 +373 14 model.c_min 0.018545141237364806 +373 14 model.c_max 4.665700938817873 +373 14 loss.margin 29.731084633078677 +373 14 loss.adversarial_temperature 0.873785007764613 +373 14 negative_sampler.num_negs_per_pos 25.0 +373 14 training.batch_size 1.0 +373 15 model.embedding_dim 1.0 +373 15 model.c_min 0.08521323631092749 +373 15 model.c_max 7.797315837055674 +373 15 loss.margin 25.91328420473964 +373 15 loss.adversarial_temperature 0.6097675168895224 +373 15 negative_sampler.num_negs_per_pos 37.0 +373 15 training.batch_size 1.0 +373 16 model.embedding_dim 1.0 +373 16 model.c_min 0.05229972795324117 +373 16 model.c_max 2.4527161775695143 +373 16 loss.margin 1.6487722325734495 +373 16 loss.adversarial_temperature 0.9520870685927109 +373 16 negative_sampler.num_negs_per_pos 25.0 +373 16 training.batch_size 2.0 +373 17 model.embedding_dim 2.0 +373 17 model.c_min 0.010636947826440238 +373 17 model.c_max 7.849832301398373 +373 17 loss.margin 25.798640982876982 +373 17 loss.adversarial_temperature 0.4847571472312784 +373 17 negative_sampler.num_negs_per_pos 63.0 +373 17 training.batch_size 2.0 +373 18 model.embedding_dim 0.0 +373 18 model.c_min 0.09809289006804871 +373 18 model.c_max 1.3309793230166687 +373 18 loss.margin 12.114761624899614 +373 18 loss.adversarial_temperature 0.19842384931834944 +373 18 negative_sampler.num_negs_per_pos 1.0 +373 18 training.batch_size 1.0 +373 19 model.embedding_dim 1.0 +373 19 model.c_min 0.012129924644721308 +373 19 model.c_max 6.15272489127451 +373 19 loss.margin 16.993197103349644 +373 19 loss.adversarial_temperature 0.6154266138951155 +373 19 negative_sampler.num_negs_per_pos 22.0 +373 19 training.batch_size 1.0 +373 20 model.embedding_dim 1.0 +373 20 model.c_min 0.021517859822270127 +373 20 model.c_max 8.252426180125935 +373 20 loss.margin 9.119503031126051 +373 20 loss.adversarial_temperature 0.4055925858841434 +373 20 negative_sampler.num_negs_per_pos 51.0 +373 20 training.batch_size 0.0 +373 21 model.embedding_dim 1.0 +373 21 model.c_min 0.017218584549499687 +373 21 model.c_max 9.674239732464164 +373 21 loss.margin 25.627366591103396 +373 21 loss.adversarial_temperature 0.49254042180992474 +373 21 negative_sampler.num_negs_per_pos 88.0 +373 21 training.batch_size 2.0 +373 22 model.embedding_dim 2.0 +373 22 model.c_min 0.0929815082796359 +373 22 model.c_max 1.3802337162153424 +373 22 loss.margin 24.448193486999738 +373 22 loss.adversarial_temperature 0.48135454052391813 +373 22 negative_sampler.num_negs_per_pos 65.0 +373 22 training.batch_size 2.0 +373 23 model.embedding_dim 0.0 +373 23 model.c_min 0.05871636955163917 +373 23 model.c_max 7.133496611669636 +373 23 loss.margin 11.559382233216976 +373 23 loss.adversarial_temperature 0.9312451812245864 +373 23 negative_sampler.num_negs_per_pos 41.0 +373 23 training.batch_size 0.0 +373 24 model.embedding_dim 1.0 +373 24 model.c_min 0.09218649462782869 +373 24 model.c_max 3.089341951640587 +373 24 loss.margin 15.153064713468693 +373 24 loss.adversarial_temperature 0.2140608277463406 +373 24 negative_sampler.num_negs_per_pos 32.0 +373 24 training.batch_size 2.0 +373 25 model.embedding_dim 1.0 +373 25 model.c_min 0.03602350602704542 +373 25 model.c_max 5.9289860956549845 +373 25 loss.margin 21.794667063838286 +373 25 loss.adversarial_temperature 0.26890406563975605 +373 25 negative_sampler.num_negs_per_pos 51.0 +373 25 training.batch_size 1.0 +373 26 model.embedding_dim 1.0 +373 26 model.c_min 0.036559691801830566 +373 26 model.c_max 5.480371884023139 +373 26 loss.margin 13.942159662146327 +373 26 loss.adversarial_temperature 0.6657473280966975 +373 26 negative_sampler.num_negs_per_pos 38.0 +373 26 training.batch_size 1.0 +373 27 model.embedding_dim 2.0 +373 27 model.c_min 0.06481987990312234 +373 27 model.c_max 1.0723357668038358 +373 27 loss.margin 17.300979221413954 +373 27 loss.adversarial_temperature 0.17315878588732858 +373 27 negative_sampler.num_negs_per_pos 78.0 +373 27 training.batch_size 1.0 +373 28 model.embedding_dim 1.0 +373 28 model.c_min 0.06370241249908123 +373 28 model.c_max 6.323557263572511 +373 28 loss.margin 25.181528604566886 +373 28 loss.adversarial_temperature 0.33332293760855297 +373 28 negative_sampler.num_negs_per_pos 40.0 +373 28 training.batch_size 1.0 +373 29 model.embedding_dim 1.0 +373 29 model.c_min 0.012028703947477792 +373 29 model.c_max 8.356432271294732 +373 29 loss.margin 4.703411572624707 +373 29 loss.adversarial_temperature 0.3851164195928488 +373 29 negative_sampler.num_negs_per_pos 70.0 +373 29 training.batch_size 1.0 +373 30 model.embedding_dim 1.0 +373 30 model.c_min 0.08127697012689436 +373 30 model.c_max 5.172761997981563 +373 30 loss.margin 19.779286467359572 +373 30 loss.adversarial_temperature 0.17008064643872273 +373 30 negative_sampler.num_negs_per_pos 77.0 +373 30 training.batch_size 2.0 +373 31 model.embedding_dim 2.0 +373 31 model.c_min 0.05900051322802636 +373 31 model.c_max 2.1837005090318224 +373 31 loss.margin 8.696302182320654 +373 31 loss.adversarial_temperature 0.2324983692594947 +373 31 negative_sampler.num_negs_per_pos 81.0 +373 31 training.batch_size 0.0 +373 32 model.embedding_dim 0.0 +373 32 model.c_min 0.06463184417133623 +373 32 model.c_max 5.4978552884886 +373 32 loss.margin 2.053213673774747 +373 32 loss.adversarial_temperature 0.45383373561548845 +373 32 negative_sampler.num_negs_per_pos 17.0 +373 32 training.batch_size 1.0 +373 33 model.embedding_dim 0.0 +373 33 model.c_min 0.017873091195156662 +373 33 model.c_max 6.300429125439785 +373 33 loss.margin 22.65211167672194 +373 33 loss.adversarial_temperature 0.7507330925112732 +373 33 negative_sampler.num_negs_per_pos 30.0 +373 33 training.batch_size 2.0 +373 34 model.embedding_dim 2.0 +373 34 model.c_min 0.020108481798434896 +373 34 model.c_max 8.375092062950632 +373 34 loss.margin 28.704046434958382 +373 34 loss.adversarial_temperature 0.38023073786760286 +373 34 negative_sampler.num_negs_per_pos 36.0 +373 34 training.batch_size 0.0 +373 35 model.embedding_dim 0.0 +373 35 model.c_min 0.044350614084155277 +373 35 model.c_max 8.810032952247902 +373 35 loss.margin 8.895712176785842 +373 35 loss.adversarial_temperature 0.9867369870729817 +373 35 negative_sampler.num_negs_per_pos 80.0 +373 35 training.batch_size 2.0 +373 36 model.embedding_dim 0.0 +373 36 model.c_min 0.019476052903505948 +373 36 model.c_max 7.902072051635117 +373 36 loss.margin 28.28591104925362 +373 36 loss.adversarial_temperature 0.6282521508940497 +373 36 negative_sampler.num_negs_per_pos 85.0 +373 36 training.batch_size 1.0 +373 37 model.embedding_dim 1.0 +373 37 model.c_min 0.012934720960194697 +373 37 model.c_max 6.147829256893888 +373 37 loss.margin 14.86351545778638 +373 37 loss.adversarial_temperature 0.7351156862718826 +373 37 negative_sampler.num_negs_per_pos 50.0 +373 37 training.batch_size 2.0 +373 38 model.embedding_dim 2.0 +373 38 model.c_min 0.0214663791407339 +373 38 model.c_max 8.057717440798317 +373 38 loss.margin 22.63486818305587 +373 38 loss.adversarial_temperature 0.885530603162068 +373 38 negative_sampler.num_negs_per_pos 90.0 +373 38 training.batch_size 0.0 +373 39 model.embedding_dim 1.0 +373 39 model.c_min 0.024937733729720142 +373 39 model.c_max 3.353589768182216 +373 39 loss.margin 4.666740939525598 +373 39 loss.adversarial_temperature 0.13189423284179336 +373 39 negative_sampler.num_negs_per_pos 98.0 +373 39 training.batch_size 1.0 +373 40 model.embedding_dim 2.0 +373 40 model.c_min 0.038931803599981205 +373 40 model.c_max 8.048576191771417 +373 40 loss.margin 26.42984467416413 +373 40 loss.adversarial_temperature 0.27830514162197517 +373 40 negative_sampler.num_negs_per_pos 10.0 +373 40 training.batch_size 0.0 +373 41 model.embedding_dim 2.0 +373 41 model.c_min 0.0519009050616306 +373 41 model.c_max 8.577078499087488 +373 41 loss.margin 28.24230369248563 +373 41 loss.adversarial_temperature 0.9372854351576655 +373 41 negative_sampler.num_negs_per_pos 54.0 +373 41 training.batch_size 0.0 +373 42 model.embedding_dim 2.0 +373 42 model.c_min 0.014502164775412479 +373 42 model.c_max 4.23593314424953 +373 42 loss.margin 2.904137065089566 +373 42 loss.adversarial_temperature 0.995611935988801 +373 42 negative_sampler.num_negs_per_pos 10.0 +373 42 training.batch_size 0.0 +373 43 model.embedding_dim 1.0 +373 43 model.c_min 0.03359401466342691 +373 43 model.c_max 6.366985360611938 +373 43 loss.margin 28.270722324901655 +373 43 loss.adversarial_temperature 0.5383926639477374 +373 43 negative_sampler.num_negs_per_pos 67.0 +373 43 training.batch_size 0.0 +373 44 model.embedding_dim 2.0 +373 44 model.c_min 0.011442811234477815 +373 44 model.c_max 4.688222587423464 +373 44 loss.margin 19.308737183774273 +373 44 loss.adversarial_temperature 0.5250457379841349 +373 44 negative_sampler.num_negs_per_pos 29.0 +373 44 training.batch_size 2.0 +373 45 model.embedding_dim 1.0 +373 45 model.c_min 0.03676338724566947 +373 45 model.c_max 6.317412725924215 +373 45 loss.margin 1.1633237215689163 +373 45 loss.adversarial_temperature 0.9442254975697502 +373 45 negative_sampler.num_negs_per_pos 11.0 +373 45 training.batch_size 0.0 +373 46 model.embedding_dim 1.0 +373 46 model.c_min 0.01194886553798171 +373 46 model.c_max 4.309985893718121 +373 46 loss.margin 10.574636197095762 +373 46 loss.adversarial_temperature 0.3151524388950432 +373 46 negative_sampler.num_negs_per_pos 94.0 +373 46 training.batch_size 0.0 +373 47 model.embedding_dim 0.0 +373 47 model.c_min 0.034758579663758934 +373 47 model.c_max 1.9335934197554938 +373 47 loss.margin 3.1643175975179387 +373 47 loss.adversarial_temperature 0.8641970692117121 +373 47 negative_sampler.num_negs_per_pos 83.0 +373 47 training.batch_size 2.0 +373 48 model.embedding_dim 1.0 +373 48 model.c_min 0.028924292501246066 +373 48 model.c_max 6.791114500063185 +373 48 loss.margin 16.50953159742498 +373 48 loss.adversarial_temperature 0.9576586465848203 +373 48 negative_sampler.num_negs_per_pos 46.0 +373 48 training.batch_size 2.0 +373 49 model.embedding_dim 1.0 +373 49 model.c_min 0.03852908136556263 +373 49 model.c_max 3.4864166602998434 +373 49 loss.margin 27.499143689710237 +373 49 loss.adversarial_temperature 0.29665644433698846 +373 49 negative_sampler.num_negs_per_pos 48.0 +373 49 training.batch_size 0.0 +373 50 model.embedding_dim 1.0 +373 50 model.c_min 0.023446110589613572 +373 50 model.c_max 2.1000007540051664 +373 50 loss.margin 8.327661214295418 +373 50 loss.adversarial_temperature 0.9699747746764834 +373 50 negative_sampler.num_negs_per_pos 82.0 +373 50 training.batch_size 0.0 +373 51 model.embedding_dim 2.0 +373 51 model.c_min 0.010944083384502411 +373 51 model.c_max 6.708856254119226 +373 51 loss.margin 21.891780852017117 +373 51 loss.adversarial_temperature 0.1749369367797438 +373 51 negative_sampler.num_negs_per_pos 53.0 +373 51 training.batch_size 0.0 +373 52 model.embedding_dim 0.0 +373 52 model.c_min 0.08059107451329749 +373 52 model.c_max 9.315296869253528 +373 52 loss.margin 22.904481442850834 +373 52 loss.adversarial_temperature 0.28137332659432407 +373 52 negative_sampler.num_negs_per_pos 22.0 +373 52 training.batch_size 1.0 +373 53 model.embedding_dim 1.0 +373 53 model.c_min 0.018067976252154484 +373 53 model.c_max 1.7758002175604521 +373 53 loss.margin 22.389487805480517 +373 53 loss.adversarial_temperature 0.5245273719854312 +373 53 negative_sampler.num_negs_per_pos 45.0 +373 53 training.batch_size 1.0 +373 54 model.embedding_dim 2.0 +373 54 model.c_min 0.020700167095410497 +373 54 model.c_max 8.219864242466539 +373 54 loss.margin 25.39426905222479 +373 54 loss.adversarial_temperature 0.5970215238322487 +373 54 negative_sampler.num_negs_per_pos 33.0 +373 54 training.batch_size 1.0 +373 55 model.embedding_dim 2.0 +373 55 model.c_min 0.026456357487053113 +373 55 model.c_max 9.925200273183926 +373 55 loss.margin 26.695018692964194 +373 55 loss.adversarial_temperature 0.4791181608511773 +373 55 negative_sampler.num_negs_per_pos 73.0 +373 55 training.batch_size 2.0 +373 56 model.embedding_dim 1.0 +373 56 model.c_min 0.027461738890030812 +373 56 model.c_max 1.14530022560565 +373 56 loss.margin 26.004101076619527 +373 56 loss.adversarial_temperature 0.40479742854540535 +373 56 negative_sampler.num_negs_per_pos 1.0 +373 56 training.batch_size 0.0 +373 57 model.embedding_dim 1.0 +373 57 model.c_min 0.027574078232298307 +373 57 model.c_max 2.6672654031274057 +373 57 loss.margin 29.26841352707199 +373 57 loss.adversarial_temperature 0.21686227569001731 +373 57 negative_sampler.num_negs_per_pos 62.0 +373 57 training.batch_size 0.0 +373 58 model.embedding_dim 0.0 +373 58 model.c_min 0.031145642882845298 +373 58 model.c_max 6.76151364079083 +373 58 loss.margin 28.439122532897635 +373 58 loss.adversarial_temperature 0.26169866055678026 +373 58 negative_sampler.num_negs_per_pos 67.0 +373 58 training.batch_size 1.0 +373 59 model.embedding_dim 1.0 +373 59 model.c_min 0.047610851871403605 +373 59 model.c_max 9.523780425252749 +373 59 loss.margin 19.92468990087161 +373 59 loss.adversarial_temperature 0.1429768853414835 +373 59 negative_sampler.num_negs_per_pos 58.0 +373 59 training.batch_size 0.0 +373 60 model.embedding_dim 2.0 +373 60 model.c_min 0.018109788441580902 +373 60 model.c_max 2.0671603438006567 +373 60 loss.margin 6.926328175634822 +373 60 loss.adversarial_temperature 0.6355274302425546 +373 60 negative_sampler.num_negs_per_pos 53.0 +373 60 training.batch_size 2.0 +373 61 model.embedding_dim 1.0 +373 61 model.c_min 0.0797104064132607 +373 61 model.c_max 5.132226809811727 +373 61 loss.margin 12.198467053907441 +373 61 loss.adversarial_temperature 0.709960107687955 +373 61 negative_sampler.num_negs_per_pos 64.0 +373 61 training.batch_size 0.0 +373 62 model.embedding_dim 1.0 +373 62 model.c_min 0.030755554626344647 +373 62 model.c_max 7.478297069472751 +373 62 loss.margin 9.299982653793588 +373 62 loss.adversarial_temperature 0.9070816300159241 +373 62 negative_sampler.num_negs_per_pos 20.0 +373 62 training.batch_size 2.0 +373 63 model.embedding_dim 2.0 +373 63 model.c_min 0.038983362672870836 +373 63 model.c_max 5.885890411174642 +373 63 loss.margin 8.44215279598339 +373 63 loss.adversarial_temperature 0.5603462434083535 +373 63 negative_sampler.num_negs_per_pos 0.0 +373 63 training.batch_size 0.0 +373 64 model.embedding_dim 0.0 +373 64 model.c_min 0.03700294645985572 +373 64 model.c_max 7.324849499326802 +373 64 loss.margin 8.587610849532393 +373 64 loss.adversarial_temperature 0.2314929296116076 +373 64 negative_sampler.num_negs_per_pos 59.0 +373 64 training.batch_size 2.0 +373 65 model.embedding_dim 2.0 +373 65 model.c_min 0.03926254342343736 +373 65 model.c_max 1.0458418950062875 +373 65 loss.margin 27.332330142428862 +373 65 loss.adversarial_temperature 0.6737812452718124 +373 65 negative_sampler.num_negs_per_pos 35.0 +373 65 training.batch_size 1.0 +373 66 model.embedding_dim 0.0 +373 66 model.c_min 0.08323041265934579 +373 66 model.c_max 4.4715310837691735 +373 66 loss.margin 9.94038296384459 +373 66 loss.adversarial_temperature 0.768804036897147 +373 66 negative_sampler.num_negs_per_pos 37.0 +373 66 training.batch_size 1.0 +373 67 model.embedding_dim 2.0 +373 67 model.c_min 0.0393343743433989 +373 67 model.c_max 1.5029968678952204 +373 67 loss.margin 24.559906143615947 +373 67 loss.adversarial_temperature 0.5108501222385149 +373 67 negative_sampler.num_negs_per_pos 84.0 +373 67 training.batch_size 1.0 +373 68 model.embedding_dim 1.0 +373 68 model.c_min 0.012435175499646015 +373 68 model.c_max 5.595790020016128 +373 68 loss.margin 17.170917900592023 +373 68 loss.adversarial_temperature 0.4728552397343717 +373 68 negative_sampler.num_negs_per_pos 66.0 +373 68 training.batch_size 2.0 +373 69 model.embedding_dim 1.0 +373 69 model.c_min 0.041697093768818284 +373 69 model.c_max 7.870938505452944 +373 69 loss.margin 24.17561928073047 +373 69 loss.adversarial_temperature 0.7724503520402475 +373 69 negative_sampler.num_negs_per_pos 45.0 +373 69 training.batch_size 1.0 +373 70 model.embedding_dim 0.0 +373 70 model.c_min 0.02084942052752731 +373 70 model.c_max 1.6050460968749858 +373 70 loss.margin 27.685624849434422 +373 70 loss.adversarial_temperature 0.8159905073455058 +373 70 negative_sampler.num_negs_per_pos 88.0 +373 70 training.batch_size 1.0 +373 71 model.embedding_dim 1.0 +373 71 model.c_min 0.023330621816229278 +373 71 model.c_max 7.840083054494941 +373 71 loss.margin 26.259461456528076 +373 71 loss.adversarial_temperature 0.1461167550552404 +373 71 negative_sampler.num_negs_per_pos 2.0 +373 71 training.batch_size 1.0 +373 72 model.embedding_dim 0.0 +373 72 model.c_min 0.01126663561106691 +373 72 model.c_max 5.397368271236493 +373 72 loss.margin 18.218773811377257 +373 72 loss.adversarial_temperature 0.1185289905131398 +373 72 negative_sampler.num_negs_per_pos 69.0 +373 72 training.batch_size 0.0 +373 73 model.embedding_dim 0.0 +373 73 model.c_min 0.018645382108066936 +373 73 model.c_max 6.552024315059241 +373 73 loss.margin 20.235559013783316 +373 73 loss.adversarial_temperature 0.835793148846888 +373 73 negative_sampler.num_negs_per_pos 1.0 +373 73 training.batch_size 0.0 +373 74 model.embedding_dim 2.0 +373 74 model.c_min 0.013194713718963776 +373 74 model.c_max 7.176417586976086 +373 74 loss.margin 26.14067694952648 +373 74 loss.adversarial_temperature 0.597461220086821 +373 74 negative_sampler.num_negs_per_pos 38.0 +373 74 training.batch_size 0.0 +373 75 model.embedding_dim 1.0 +373 75 model.c_min 0.030703517536342664 +373 75 model.c_max 9.317538959490687 +373 75 loss.margin 9.696506344800994 +373 75 loss.adversarial_temperature 0.7817737670636176 +373 75 negative_sampler.num_negs_per_pos 73.0 +373 75 training.batch_size 2.0 +373 76 model.embedding_dim 1.0 +373 76 model.c_min 0.019143995468955163 +373 76 model.c_max 4.802621933437557 +373 76 loss.margin 2.4485529758551032 +373 76 loss.adversarial_temperature 0.43811889647885627 +373 76 negative_sampler.num_negs_per_pos 27.0 +373 76 training.batch_size 1.0 +373 77 model.embedding_dim 1.0 +373 77 model.c_min 0.07513212123848856 +373 77 model.c_max 6.776512850109282 +373 77 loss.margin 11.653817641241965 +373 77 loss.adversarial_temperature 0.25593313947047486 +373 77 negative_sampler.num_negs_per_pos 28.0 +373 77 training.batch_size 2.0 +373 78 model.embedding_dim 1.0 +373 78 model.c_min 0.048406867847417503 +373 78 model.c_max 6.2704480735979775 +373 78 loss.margin 19.038486592975627 +373 78 loss.adversarial_temperature 0.17470982050716616 +373 78 negative_sampler.num_negs_per_pos 8.0 +373 78 training.batch_size 1.0 +373 79 model.embedding_dim 1.0 +373 79 model.c_min 0.015016218941285683 +373 79 model.c_max 5.239383549507393 +373 79 loss.margin 18.31837761582702 +373 79 loss.adversarial_temperature 0.22774278205920015 +373 79 negative_sampler.num_negs_per_pos 79.0 +373 79 training.batch_size 2.0 +373 80 model.embedding_dim 1.0 +373 80 model.c_min 0.014241452400295573 +373 80 model.c_max 4.75196928468149 +373 80 loss.margin 14.110239593432148 +373 80 loss.adversarial_temperature 0.9255769097176124 +373 80 negative_sampler.num_negs_per_pos 89.0 +373 80 training.batch_size 0.0 +373 81 model.embedding_dim 0.0 +373 81 model.c_min 0.04280319835343239 +373 81 model.c_max 9.045227345002008 +373 81 loss.margin 5.205010088159668 +373 81 loss.adversarial_temperature 0.8205982992638362 +373 81 negative_sampler.num_negs_per_pos 43.0 +373 81 training.batch_size 0.0 +373 82 model.embedding_dim 0.0 +373 82 model.c_min 0.07015946476496235 +373 82 model.c_max 2.6003543727368124 +373 82 loss.margin 24.54411270053916 +373 82 loss.adversarial_temperature 0.7012434576110609 +373 82 negative_sampler.num_negs_per_pos 32.0 +373 82 training.batch_size 2.0 +373 83 model.embedding_dim 0.0 +373 83 model.c_min 0.05911469680517686 +373 83 model.c_max 9.48684713906219 +373 83 loss.margin 5.382678090578296 +373 83 loss.adversarial_temperature 0.4020036271488442 +373 83 negative_sampler.num_negs_per_pos 3.0 +373 83 training.batch_size 0.0 +373 84 model.embedding_dim 2.0 +373 84 model.c_min 0.02629619006645933 +373 84 model.c_max 8.44736838408228 +373 84 loss.margin 1.1521530425394848 +373 84 loss.adversarial_temperature 0.715302573820128 +373 84 negative_sampler.num_negs_per_pos 10.0 +373 84 training.batch_size 0.0 +373 85 model.embedding_dim 1.0 +373 85 model.c_min 0.06724419331524333 +373 85 model.c_max 7.156746626281428 +373 85 loss.margin 4.229355951573306 +373 85 loss.adversarial_temperature 0.15972334187643156 +373 85 negative_sampler.num_negs_per_pos 79.0 +373 85 training.batch_size 1.0 +373 86 model.embedding_dim 0.0 +373 86 model.c_min 0.0334242909750982 +373 86 model.c_max 7.624330790545237 +373 86 loss.margin 29.957912014006755 +373 86 loss.adversarial_temperature 0.24946986193250037 +373 86 negative_sampler.num_negs_per_pos 70.0 +373 86 training.batch_size 2.0 +373 87 model.embedding_dim 1.0 +373 87 model.c_min 0.01392118723189353 +373 87 model.c_max 7.358426832203019 +373 87 loss.margin 26.210907199153723 +373 87 loss.adversarial_temperature 0.971443060648173 +373 87 negative_sampler.num_negs_per_pos 75.0 +373 87 training.batch_size 1.0 +373 88 model.embedding_dim 0.0 +373 88 model.c_min 0.03506560518049537 +373 88 model.c_max 3.2386670377387663 +373 88 loss.margin 22.489022163726972 +373 88 loss.adversarial_temperature 0.7654088866429198 +373 88 negative_sampler.num_negs_per_pos 27.0 +373 88 training.batch_size 0.0 +373 89 model.embedding_dim 0.0 +373 89 model.c_min 0.05568351038602225 +373 89 model.c_max 7.292196091002381 +373 89 loss.margin 13.491860432748744 +373 89 loss.adversarial_temperature 0.3811021964469383 +373 89 negative_sampler.num_negs_per_pos 45.0 +373 89 training.batch_size 2.0 +373 90 model.embedding_dim 2.0 +373 90 model.c_min 0.07415709156967895 +373 90 model.c_max 7.42764062063869 +373 90 loss.margin 4.081791066225449 +373 90 loss.adversarial_temperature 0.19943455466976717 +373 90 negative_sampler.num_negs_per_pos 26.0 +373 90 training.batch_size 2.0 +373 91 model.embedding_dim 2.0 +373 91 model.c_min 0.026927663748821246 +373 91 model.c_max 1.6705282838949043 +373 91 loss.margin 18.125016678996587 +373 91 loss.adversarial_temperature 0.12920169183600985 +373 91 negative_sampler.num_negs_per_pos 3.0 +373 91 training.batch_size 2.0 +373 92 model.embedding_dim 2.0 +373 92 model.c_min 0.052365405579339754 +373 92 model.c_max 2.1893762137878774 +373 92 loss.margin 24.1749010910439 +373 92 loss.adversarial_temperature 0.5887052823506413 +373 92 negative_sampler.num_negs_per_pos 11.0 +373 92 training.batch_size 2.0 +373 93 model.embedding_dim 0.0 +373 93 model.c_min 0.04907293990838624 +373 93 model.c_max 4.127147785725296 +373 93 loss.margin 26.32905219322205 +373 93 loss.adversarial_temperature 0.8005691248981099 +373 93 negative_sampler.num_negs_per_pos 39.0 +373 93 training.batch_size 2.0 +373 94 model.embedding_dim 1.0 +373 94 model.c_min 0.013084225335042688 +373 94 model.c_max 1.4142947041104055 +373 94 loss.margin 18.73424590832152 +373 94 loss.adversarial_temperature 0.187898948811066 +373 94 negative_sampler.num_negs_per_pos 10.0 +373 94 training.batch_size 1.0 +373 95 model.embedding_dim 0.0 +373 95 model.c_min 0.01933671465747168 +373 95 model.c_max 7.201331264544411 +373 95 loss.margin 21.444389730099633 +373 95 loss.adversarial_temperature 0.9461595998734532 +373 95 negative_sampler.num_negs_per_pos 30.0 +373 95 training.batch_size 0.0 +373 96 model.embedding_dim 1.0 +373 96 model.c_min 0.016011918883051916 +373 96 model.c_max 8.968776733933572 +373 96 loss.margin 7.119225942644753 +373 96 loss.adversarial_temperature 0.17634231826855873 +373 96 negative_sampler.num_negs_per_pos 59.0 +373 96 training.batch_size 0.0 +373 97 model.embedding_dim 1.0 +373 97 model.c_min 0.04990719315405833 +373 97 model.c_max 4.42172086692666 +373 97 loss.margin 8.798339897094657 +373 97 loss.adversarial_temperature 0.8677553474861714 +373 97 negative_sampler.num_negs_per_pos 59.0 +373 97 training.batch_size 2.0 +373 98 model.embedding_dim 0.0 +373 98 model.c_min 0.05244917487990054 +373 98 model.c_max 5.460587499002498 +373 98 loss.margin 9.08515571526371 +373 98 loss.adversarial_temperature 0.7271714128556356 +373 98 negative_sampler.num_negs_per_pos 41.0 +373 98 training.batch_size 0.0 +373 99 model.embedding_dim 0.0 +373 99 model.c_min 0.09783899605388295 +373 99 model.c_max 7.48720904812213 +373 99 loss.margin 6.653021325276496 +373 99 loss.adversarial_temperature 0.8592027070380213 +373 99 negative_sampler.num_negs_per_pos 82.0 +373 99 training.batch_size 1.0 +373 100 model.embedding_dim 2.0 +373 100 model.c_min 0.020511957230972235 +373 100 model.c_max 3.454427667633788 +373 100 loss.margin 5.2223858383277335 +373 100 loss.adversarial_temperature 0.13825787221650737 +373 100 negative_sampler.num_negs_per_pos 2.0 +373 100 training.batch_size 2.0 +373 1 dataset """kinships""" +373 1 model """kg2e""" +373 1 loss """nssa""" +373 1 regularizer """no""" +373 1 optimizer """adadelta""" +373 1 training_loop """owa""" +373 1 negative_sampler """basic""" +373 1 evaluator """rankbased""" +373 2 dataset """kinships""" +373 2 model """kg2e""" +373 2 loss """nssa""" +373 2 regularizer """no""" +373 2 optimizer """adadelta""" +373 2 training_loop """owa""" +373 2 negative_sampler """basic""" +373 2 evaluator """rankbased""" +373 3 dataset """kinships""" +373 3 model """kg2e""" +373 3 loss """nssa""" +373 3 regularizer """no""" +373 3 optimizer """adadelta""" +373 3 training_loop """owa""" +373 3 negative_sampler """basic""" +373 3 evaluator """rankbased""" +373 4 dataset """kinships""" +373 4 model """kg2e""" +373 4 loss """nssa""" +373 4 regularizer """no""" +373 4 optimizer """adadelta""" +373 4 training_loop """owa""" +373 4 negative_sampler """basic""" +373 4 evaluator """rankbased""" +373 5 dataset """kinships""" +373 5 model """kg2e""" +373 5 loss """nssa""" +373 5 regularizer """no""" +373 5 optimizer """adadelta""" +373 5 training_loop """owa""" +373 5 negative_sampler """basic""" +373 5 evaluator """rankbased""" +373 6 dataset """kinships""" +373 6 model """kg2e""" +373 6 loss """nssa""" +373 6 regularizer """no""" +373 6 optimizer """adadelta""" +373 6 training_loop """owa""" +373 6 negative_sampler """basic""" +373 6 evaluator """rankbased""" +373 7 dataset """kinships""" +373 7 model """kg2e""" +373 7 loss """nssa""" +373 7 regularizer """no""" +373 7 optimizer """adadelta""" +373 7 training_loop """owa""" +373 7 negative_sampler """basic""" +373 7 evaluator """rankbased""" +373 8 dataset """kinships""" +373 8 model """kg2e""" +373 8 loss """nssa""" +373 8 regularizer """no""" +373 8 optimizer """adadelta""" +373 8 training_loop """owa""" +373 8 negative_sampler """basic""" +373 8 evaluator """rankbased""" +373 9 dataset """kinships""" +373 9 model """kg2e""" +373 9 loss """nssa""" +373 9 regularizer """no""" +373 9 optimizer """adadelta""" +373 9 training_loop """owa""" +373 9 negative_sampler """basic""" +373 9 evaluator """rankbased""" +373 10 dataset """kinships""" +373 10 model """kg2e""" +373 10 loss """nssa""" +373 10 regularizer """no""" +373 10 optimizer """adadelta""" +373 10 training_loop """owa""" +373 10 negative_sampler """basic""" +373 10 evaluator """rankbased""" +373 11 dataset """kinships""" +373 11 model """kg2e""" +373 11 loss """nssa""" +373 11 regularizer """no""" +373 11 optimizer """adadelta""" +373 11 training_loop """owa""" +373 11 negative_sampler """basic""" +373 11 evaluator """rankbased""" +373 12 dataset """kinships""" +373 12 model """kg2e""" +373 12 loss """nssa""" +373 12 regularizer """no""" +373 12 optimizer """adadelta""" +373 12 training_loop """owa""" +373 12 negative_sampler """basic""" +373 12 evaluator """rankbased""" +373 13 dataset """kinships""" +373 13 model """kg2e""" +373 13 loss """nssa""" +373 13 regularizer """no""" +373 13 optimizer """adadelta""" +373 13 training_loop """owa""" +373 13 negative_sampler """basic""" +373 13 evaluator """rankbased""" +373 14 dataset """kinships""" +373 14 model """kg2e""" +373 14 loss """nssa""" +373 14 regularizer """no""" +373 14 optimizer """adadelta""" +373 14 training_loop """owa""" +373 14 negative_sampler """basic""" +373 14 evaluator """rankbased""" +373 15 dataset """kinships""" +373 15 model """kg2e""" +373 15 loss """nssa""" +373 15 regularizer """no""" +373 15 optimizer """adadelta""" +373 15 training_loop """owa""" +373 15 negative_sampler """basic""" +373 15 evaluator """rankbased""" +373 16 dataset """kinships""" +373 16 model """kg2e""" +373 16 loss """nssa""" +373 16 regularizer """no""" +373 16 optimizer """adadelta""" +373 16 training_loop """owa""" +373 16 negative_sampler """basic""" +373 16 evaluator """rankbased""" +373 17 dataset """kinships""" +373 17 model """kg2e""" +373 17 loss """nssa""" +373 17 regularizer """no""" +373 17 optimizer """adadelta""" +373 17 training_loop """owa""" +373 17 negative_sampler """basic""" +373 17 evaluator """rankbased""" +373 18 dataset """kinships""" +373 18 model """kg2e""" +373 18 loss """nssa""" +373 18 regularizer """no""" +373 18 optimizer """adadelta""" +373 18 training_loop """owa""" +373 18 negative_sampler """basic""" +373 18 evaluator """rankbased""" +373 19 dataset """kinships""" +373 19 model """kg2e""" +373 19 loss """nssa""" +373 19 regularizer """no""" +373 19 optimizer """adadelta""" +373 19 training_loop """owa""" +373 19 negative_sampler """basic""" +373 19 evaluator """rankbased""" +373 20 dataset """kinships""" +373 20 model """kg2e""" +373 20 loss """nssa""" +373 20 regularizer """no""" +373 20 optimizer """adadelta""" +373 20 training_loop """owa""" +373 20 negative_sampler """basic""" +373 20 evaluator """rankbased""" +373 21 dataset """kinships""" +373 21 model """kg2e""" +373 21 loss """nssa""" +373 21 regularizer """no""" +373 21 optimizer """adadelta""" +373 21 training_loop """owa""" +373 21 negative_sampler """basic""" +373 21 evaluator """rankbased""" +373 22 dataset """kinships""" +373 22 model """kg2e""" +373 22 loss """nssa""" +373 22 regularizer """no""" +373 22 optimizer """adadelta""" +373 22 training_loop """owa""" +373 22 negative_sampler """basic""" +373 22 evaluator """rankbased""" +373 23 dataset """kinships""" +373 23 model """kg2e""" +373 23 loss """nssa""" +373 23 regularizer """no""" +373 23 optimizer """adadelta""" +373 23 training_loop """owa""" +373 23 negative_sampler """basic""" +373 23 evaluator """rankbased""" +373 24 dataset """kinships""" +373 24 model """kg2e""" +373 24 loss """nssa""" +373 24 regularizer """no""" +373 24 optimizer """adadelta""" +373 24 training_loop """owa""" +373 24 negative_sampler """basic""" +373 24 evaluator """rankbased""" +373 25 dataset """kinships""" +373 25 model """kg2e""" +373 25 loss """nssa""" +373 25 regularizer """no""" +373 25 optimizer """adadelta""" +373 25 training_loop """owa""" +373 25 negative_sampler """basic""" +373 25 evaluator """rankbased""" +373 26 dataset """kinships""" +373 26 model """kg2e""" +373 26 loss """nssa""" +373 26 regularizer """no""" +373 26 optimizer """adadelta""" +373 26 training_loop """owa""" +373 26 negative_sampler """basic""" +373 26 evaluator """rankbased""" +373 27 dataset """kinships""" +373 27 model """kg2e""" +373 27 loss """nssa""" +373 27 regularizer """no""" +373 27 optimizer """adadelta""" +373 27 training_loop """owa""" +373 27 negative_sampler """basic""" +373 27 evaluator """rankbased""" +373 28 dataset """kinships""" +373 28 model """kg2e""" +373 28 loss """nssa""" +373 28 regularizer """no""" +373 28 optimizer """adadelta""" +373 28 training_loop """owa""" +373 28 negative_sampler """basic""" +373 28 evaluator """rankbased""" +373 29 dataset """kinships""" +373 29 model """kg2e""" +373 29 loss """nssa""" +373 29 regularizer """no""" +373 29 optimizer """adadelta""" +373 29 training_loop """owa""" +373 29 negative_sampler """basic""" +373 29 evaluator """rankbased""" +373 30 dataset """kinships""" +373 30 model """kg2e""" +373 30 loss """nssa""" +373 30 regularizer """no""" +373 30 optimizer """adadelta""" +373 30 training_loop """owa""" +373 30 negative_sampler """basic""" +373 30 evaluator """rankbased""" +373 31 dataset """kinships""" +373 31 model """kg2e""" +373 31 loss """nssa""" +373 31 regularizer """no""" +373 31 optimizer """adadelta""" +373 31 training_loop """owa""" +373 31 negative_sampler """basic""" +373 31 evaluator """rankbased""" +373 32 dataset """kinships""" +373 32 model """kg2e""" +373 32 loss """nssa""" +373 32 regularizer """no""" +373 32 optimizer """adadelta""" +373 32 training_loop """owa""" +373 32 negative_sampler """basic""" +373 32 evaluator """rankbased""" +373 33 dataset """kinships""" +373 33 model """kg2e""" +373 33 loss """nssa""" +373 33 regularizer """no""" +373 33 optimizer """adadelta""" +373 33 training_loop """owa""" +373 33 negative_sampler """basic""" +373 33 evaluator """rankbased""" +373 34 dataset """kinships""" +373 34 model """kg2e""" +373 34 loss """nssa""" +373 34 regularizer """no""" +373 34 optimizer """adadelta""" +373 34 training_loop """owa""" +373 34 negative_sampler """basic""" +373 34 evaluator """rankbased""" +373 35 dataset """kinships""" +373 35 model """kg2e""" +373 35 loss """nssa""" +373 35 regularizer """no""" +373 35 optimizer """adadelta""" +373 35 training_loop """owa""" +373 35 negative_sampler """basic""" +373 35 evaluator """rankbased""" +373 36 dataset """kinships""" +373 36 model """kg2e""" +373 36 loss """nssa""" +373 36 regularizer """no""" +373 36 optimizer """adadelta""" +373 36 training_loop """owa""" +373 36 negative_sampler """basic""" +373 36 evaluator """rankbased""" +373 37 dataset """kinships""" +373 37 model """kg2e""" +373 37 loss """nssa""" +373 37 regularizer """no""" +373 37 optimizer """adadelta""" +373 37 training_loop """owa""" +373 37 negative_sampler """basic""" +373 37 evaluator """rankbased""" +373 38 dataset """kinships""" +373 38 model """kg2e""" +373 38 loss """nssa""" +373 38 regularizer """no""" +373 38 optimizer """adadelta""" +373 38 training_loop """owa""" +373 38 negative_sampler """basic""" +373 38 evaluator """rankbased""" +373 39 dataset """kinships""" +373 39 model """kg2e""" +373 39 loss """nssa""" +373 39 regularizer """no""" +373 39 optimizer """adadelta""" +373 39 training_loop """owa""" +373 39 negative_sampler """basic""" +373 39 evaluator """rankbased""" +373 40 dataset """kinships""" +373 40 model """kg2e""" +373 40 loss """nssa""" +373 40 regularizer """no""" +373 40 optimizer """adadelta""" +373 40 training_loop """owa""" +373 40 negative_sampler """basic""" +373 40 evaluator """rankbased""" +373 41 dataset """kinships""" +373 41 model """kg2e""" +373 41 loss """nssa""" +373 41 regularizer """no""" +373 41 optimizer """adadelta""" +373 41 training_loop """owa""" +373 41 negative_sampler """basic""" +373 41 evaluator """rankbased""" +373 42 dataset """kinships""" +373 42 model """kg2e""" +373 42 loss """nssa""" +373 42 regularizer """no""" +373 42 optimizer """adadelta""" +373 42 training_loop """owa""" +373 42 negative_sampler """basic""" +373 42 evaluator """rankbased""" +373 43 dataset """kinships""" +373 43 model """kg2e""" +373 43 loss """nssa""" +373 43 regularizer """no""" +373 43 optimizer """adadelta""" +373 43 training_loop """owa""" +373 43 negative_sampler """basic""" +373 43 evaluator """rankbased""" +373 44 dataset """kinships""" +373 44 model """kg2e""" +373 44 loss """nssa""" +373 44 regularizer """no""" +373 44 optimizer """adadelta""" +373 44 training_loop """owa""" +373 44 negative_sampler """basic""" +373 44 evaluator """rankbased""" +373 45 dataset """kinships""" +373 45 model """kg2e""" +373 45 loss """nssa""" +373 45 regularizer """no""" +373 45 optimizer """adadelta""" +373 45 training_loop """owa""" +373 45 negative_sampler """basic""" +373 45 evaluator """rankbased""" +373 46 dataset """kinships""" +373 46 model """kg2e""" +373 46 loss """nssa""" +373 46 regularizer """no""" +373 46 optimizer """adadelta""" +373 46 training_loop """owa""" +373 46 negative_sampler """basic""" +373 46 evaluator """rankbased""" +373 47 dataset """kinships""" +373 47 model """kg2e""" +373 47 loss """nssa""" +373 47 regularizer """no""" +373 47 optimizer """adadelta""" +373 47 training_loop """owa""" +373 47 negative_sampler """basic""" +373 47 evaluator """rankbased""" +373 48 dataset """kinships""" +373 48 model """kg2e""" +373 48 loss """nssa""" +373 48 regularizer """no""" +373 48 optimizer """adadelta""" +373 48 training_loop """owa""" +373 48 negative_sampler """basic""" +373 48 evaluator """rankbased""" +373 49 dataset """kinships""" +373 49 model """kg2e""" +373 49 loss """nssa""" +373 49 regularizer """no""" +373 49 optimizer """adadelta""" +373 49 training_loop """owa""" +373 49 negative_sampler """basic""" +373 49 evaluator """rankbased""" +373 50 dataset """kinships""" +373 50 model """kg2e""" +373 50 loss """nssa""" +373 50 regularizer """no""" +373 50 optimizer """adadelta""" +373 50 training_loop """owa""" +373 50 negative_sampler """basic""" +373 50 evaluator """rankbased""" +373 51 dataset """kinships""" +373 51 model """kg2e""" +373 51 loss """nssa""" +373 51 regularizer """no""" +373 51 optimizer """adadelta""" +373 51 training_loop """owa""" +373 51 negative_sampler """basic""" +373 51 evaluator """rankbased""" +373 52 dataset """kinships""" +373 52 model """kg2e""" +373 52 loss """nssa""" +373 52 regularizer """no""" +373 52 optimizer """adadelta""" +373 52 training_loop """owa""" +373 52 negative_sampler """basic""" +373 52 evaluator """rankbased""" +373 53 dataset """kinships""" +373 53 model """kg2e""" +373 53 loss """nssa""" +373 53 regularizer """no""" +373 53 optimizer """adadelta""" +373 53 training_loop """owa""" +373 53 negative_sampler """basic""" +373 53 evaluator """rankbased""" +373 54 dataset """kinships""" +373 54 model """kg2e""" +373 54 loss """nssa""" +373 54 regularizer """no""" +373 54 optimizer """adadelta""" +373 54 training_loop """owa""" +373 54 negative_sampler """basic""" +373 54 evaluator """rankbased""" +373 55 dataset """kinships""" +373 55 model """kg2e""" +373 55 loss """nssa""" +373 55 regularizer """no""" +373 55 optimizer """adadelta""" +373 55 training_loop """owa""" +373 55 negative_sampler """basic""" +373 55 evaluator """rankbased""" +373 56 dataset """kinships""" +373 56 model """kg2e""" +373 56 loss """nssa""" +373 56 regularizer """no""" +373 56 optimizer """adadelta""" +373 56 training_loop """owa""" +373 56 negative_sampler """basic""" +373 56 evaluator """rankbased""" +373 57 dataset """kinships""" +373 57 model """kg2e""" +373 57 loss """nssa""" +373 57 regularizer """no""" +373 57 optimizer """adadelta""" +373 57 training_loop """owa""" +373 57 negative_sampler """basic""" +373 57 evaluator """rankbased""" +373 58 dataset """kinships""" +373 58 model """kg2e""" +373 58 loss """nssa""" +373 58 regularizer """no""" +373 58 optimizer """adadelta""" +373 58 training_loop """owa""" +373 58 negative_sampler """basic""" +373 58 evaluator """rankbased""" +373 59 dataset """kinships""" +373 59 model """kg2e""" +373 59 loss """nssa""" +373 59 regularizer """no""" +373 59 optimizer """adadelta""" +373 59 training_loop """owa""" +373 59 negative_sampler """basic""" +373 59 evaluator """rankbased""" +373 60 dataset """kinships""" +373 60 model """kg2e""" +373 60 loss """nssa""" +373 60 regularizer """no""" +373 60 optimizer """adadelta""" +373 60 training_loop """owa""" +373 60 negative_sampler """basic""" +373 60 evaluator """rankbased""" +373 61 dataset """kinships""" +373 61 model """kg2e""" +373 61 loss """nssa""" +373 61 regularizer """no""" +373 61 optimizer """adadelta""" +373 61 training_loop """owa""" +373 61 negative_sampler """basic""" +373 61 evaluator """rankbased""" +373 62 dataset """kinships""" +373 62 model """kg2e""" +373 62 loss """nssa""" +373 62 regularizer """no""" +373 62 optimizer """adadelta""" +373 62 training_loop """owa""" +373 62 negative_sampler """basic""" +373 62 evaluator """rankbased""" +373 63 dataset """kinships""" +373 63 model """kg2e""" +373 63 loss """nssa""" +373 63 regularizer """no""" +373 63 optimizer """adadelta""" +373 63 training_loop """owa""" +373 63 negative_sampler """basic""" +373 63 evaluator """rankbased""" +373 64 dataset """kinships""" +373 64 model """kg2e""" +373 64 loss """nssa""" +373 64 regularizer """no""" +373 64 optimizer """adadelta""" +373 64 training_loop """owa""" +373 64 negative_sampler """basic""" +373 64 evaluator """rankbased""" +373 65 dataset """kinships""" +373 65 model """kg2e""" +373 65 loss """nssa""" +373 65 regularizer """no""" +373 65 optimizer """adadelta""" +373 65 training_loop """owa""" +373 65 negative_sampler """basic""" +373 65 evaluator """rankbased""" +373 66 dataset """kinships""" +373 66 model """kg2e""" +373 66 loss """nssa""" +373 66 regularizer """no""" +373 66 optimizer """adadelta""" +373 66 training_loop """owa""" +373 66 negative_sampler """basic""" +373 66 evaluator """rankbased""" +373 67 dataset """kinships""" +373 67 model """kg2e""" +373 67 loss """nssa""" +373 67 regularizer """no""" +373 67 optimizer """adadelta""" +373 67 training_loop """owa""" +373 67 negative_sampler """basic""" +373 67 evaluator """rankbased""" +373 68 dataset """kinships""" +373 68 model """kg2e""" +373 68 loss """nssa""" +373 68 regularizer """no""" +373 68 optimizer """adadelta""" +373 68 training_loop """owa""" +373 68 negative_sampler """basic""" +373 68 evaluator """rankbased""" +373 69 dataset """kinships""" +373 69 model """kg2e""" +373 69 loss """nssa""" +373 69 regularizer """no""" +373 69 optimizer """adadelta""" +373 69 training_loop """owa""" +373 69 negative_sampler """basic""" +373 69 evaluator """rankbased""" +373 70 dataset """kinships""" +373 70 model """kg2e""" +373 70 loss """nssa""" +373 70 regularizer """no""" +373 70 optimizer """adadelta""" +373 70 training_loop """owa""" +373 70 negative_sampler """basic""" +373 70 evaluator """rankbased""" +373 71 dataset """kinships""" +373 71 model """kg2e""" +373 71 loss """nssa""" +373 71 regularizer """no""" +373 71 optimizer """adadelta""" +373 71 training_loop """owa""" +373 71 negative_sampler """basic""" +373 71 evaluator """rankbased""" +373 72 dataset """kinships""" +373 72 model """kg2e""" +373 72 loss """nssa""" +373 72 regularizer """no""" +373 72 optimizer """adadelta""" +373 72 training_loop """owa""" +373 72 negative_sampler """basic""" +373 72 evaluator """rankbased""" +373 73 dataset """kinships""" +373 73 model """kg2e""" +373 73 loss """nssa""" +373 73 regularizer """no""" +373 73 optimizer """adadelta""" +373 73 training_loop """owa""" +373 73 negative_sampler """basic""" +373 73 evaluator """rankbased""" +373 74 dataset """kinships""" +373 74 model """kg2e""" +373 74 loss """nssa""" +373 74 regularizer """no""" +373 74 optimizer """adadelta""" +373 74 training_loop """owa""" +373 74 negative_sampler """basic""" +373 74 evaluator """rankbased""" +373 75 dataset """kinships""" +373 75 model """kg2e""" +373 75 loss """nssa""" +373 75 regularizer """no""" +373 75 optimizer """adadelta""" +373 75 training_loop """owa""" +373 75 negative_sampler """basic""" +373 75 evaluator """rankbased""" +373 76 dataset """kinships""" +373 76 model """kg2e""" +373 76 loss """nssa""" +373 76 regularizer """no""" +373 76 optimizer """adadelta""" +373 76 training_loop """owa""" +373 76 negative_sampler """basic""" +373 76 evaluator """rankbased""" +373 77 dataset """kinships""" +373 77 model """kg2e""" +373 77 loss """nssa""" +373 77 regularizer """no""" +373 77 optimizer """adadelta""" +373 77 training_loop """owa""" +373 77 negative_sampler """basic""" +373 77 evaluator """rankbased""" +373 78 dataset """kinships""" +373 78 model """kg2e""" +373 78 loss """nssa""" +373 78 regularizer """no""" +373 78 optimizer """adadelta""" +373 78 training_loop """owa""" +373 78 negative_sampler """basic""" +373 78 evaluator """rankbased""" +373 79 dataset """kinships""" +373 79 model """kg2e""" +373 79 loss """nssa""" +373 79 regularizer """no""" +373 79 optimizer """adadelta""" +373 79 training_loop """owa""" +373 79 negative_sampler """basic""" +373 79 evaluator """rankbased""" +373 80 dataset """kinships""" +373 80 model """kg2e""" +373 80 loss """nssa""" +373 80 regularizer """no""" +373 80 optimizer """adadelta""" +373 80 training_loop """owa""" +373 80 negative_sampler """basic""" +373 80 evaluator """rankbased""" +373 81 dataset """kinships""" +373 81 model """kg2e""" +373 81 loss """nssa""" +373 81 regularizer """no""" +373 81 optimizer """adadelta""" +373 81 training_loop """owa""" +373 81 negative_sampler """basic""" +373 81 evaluator """rankbased""" +373 82 dataset """kinships""" +373 82 model """kg2e""" +373 82 loss """nssa""" +373 82 regularizer """no""" +373 82 optimizer """adadelta""" +373 82 training_loop """owa""" +373 82 negative_sampler """basic""" +373 82 evaluator """rankbased""" +373 83 dataset """kinships""" +373 83 model """kg2e""" +373 83 loss """nssa""" +373 83 regularizer """no""" +373 83 optimizer """adadelta""" +373 83 training_loop """owa""" +373 83 negative_sampler """basic""" +373 83 evaluator """rankbased""" +373 84 dataset """kinships""" +373 84 model """kg2e""" +373 84 loss """nssa""" +373 84 regularizer """no""" +373 84 optimizer """adadelta""" +373 84 training_loop """owa""" +373 84 negative_sampler """basic""" +373 84 evaluator """rankbased""" +373 85 dataset """kinships""" +373 85 model """kg2e""" +373 85 loss """nssa""" +373 85 regularizer """no""" +373 85 optimizer """adadelta""" +373 85 training_loop """owa""" +373 85 negative_sampler """basic""" +373 85 evaluator """rankbased""" +373 86 dataset """kinships""" +373 86 model """kg2e""" +373 86 loss """nssa""" +373 86 regularizer """no""" +373 86 optimizer """adadelta""" +373 86 training_loop """owa""" +373 86 negative_sampler """basic""" +373 86 evaluator """rankbased""" +373 87 dataset """kinships""" +373 87 model """kg2e""" +373 87 loss """nssa""" +373 87 regularizer """no""" +373 87 optimizer """adadelta""" +373 87 training_loop """owa""" +373 87 negative_sampler """basic""" +373 87 evaluator """rankbased""" +373 88 dataset """kinships""" +373 88 model """kg2e""" +373 88 loss """nssa""" +373 88 regularizer """no""" +373 88 optimizer """adadelta""" +373 88 training_loop """owa""" +373 88 negative_sampler """basic""" +373 88 evaluator """rankbased""" +373 89 dataset """kinships""" +373 89 model """kg2e""" +373 89 loss """nssa""" +373 89 regularizer """no""" +373 89 optimizer """adadelta""" +373 89 training_loop """owa""" +373 89 negative_sampler """basic""" +373 89 evaluator """rankbased""" +373 90 dataset """kinships""" +373 90 model """kg2e""" +373 90 loss """nssa""" +373 90 regularizer """no""" +373 90 optimizer """adadelta""" +373 90 training_loop """owa""" +373 90 negative_sampler """basic""" +373 90 evaluator """rankbased""" +373 91 dataset """kinships""" +373 91 model """kg2e""" +373 91 loss """nssa""" +373 91 regularizer """no""" +373 91 optimizer """adadelta""" +373 91 training_loop """owa""" +373 91 negative_sampler """basic""" +373 91 evaluator """rankbased""" +373 92 dataset """kinships""" +373 92 model """kg2e""" +373 92 loss """nssa""" +373 92 regularizer """no""" +373 92 optimizer """adadelta""" +373 92 training_loop """owa""" +373 92 negative_sampler """basic""" +373 92 evaluator """rankbased""" +373 93 dataset """kinships""" +373 93 model """kg2e""" +373 93 loss """nssa""" +373 93 regularizer """no""" +373 93 optimizer """adadelta""" +373 93 training_loop """owa""" +373 93 negative_sampler """basic""" +373 93 evaluator """rankbased""" +373 94 dataset """kinships""" +373 94 model """kg2e""" +373 94 loss """nssa""" +373 94 regularizer """no""" +373 94 optimizer """adadelta""" +373 94 training_loop """owa""" +373 94 negative_sampler """basic""" +373 94 evaluator """rankbased""" +373 95 dataset """kinships""" +373 95 model """kg2e""" +373 95 loss """nssa""" +373 95 regularizer """no""" +373 95 optimizer """adadelta""" +373 95 training_loop """owa""" +373 95 negative_sampler """basic""" +373 95 evaluator """rankbased""" +373 96 dataset """kinships""" +373 96 model """kg2e""" +373 96 loss """nssa""" +373 96 regularizer """no""" +373 96 optimizer """adadelta""" +373 96 training_loop """owa""" +373 96 negative_sampler """basic""" +373 96 evaluator """rankbased""" +373 97 dataset """kinships""" +373 97 model """kg2e""" +373 97 loss """nssa""" +373 97 regularizer """no""" +373 97 optimizer """adadelta""" +373 97 training_loop """owa""" +373 97 negative_sampler """basic""" +373 97 evaluator """rankbased""" +373 98 dataset """kinships""" +373 98 model """kg2e""" +373 98 loss """nssa""" +373 98 regularizer """no""" +373 98 optimizer """adadelta""" +373 98 training_loop """owa""" +373 98 negative_sampler """basic""" +373 98 evaluator """rankbased""" +373 99 dataset """kinships""" +373 99 model """kg2e""" +373 99 loss """nssa""" +373 99 regularizer """no""" +373 99 optimizer """adadelta""" +373 99 training_loop """owa""" +373 99 negative_sampler """basic""" +373 99 evaluator """rankbased""" +373 100 dataset """kinships""" +373 100 model """kg2e""" +373 100 loss """nssa""" +373 100 regularizer """no""" +373 100 optimizer """adadelta""" +373 100 training_loop """owa""" +373 100 negative_sampler """basic""" +373 100 evaluator """rankbased""" +374 1 model.embedding_dim 2.0 +374 1 model.c_min 0.0481086009757267 +374 1 model.c_max 2.738225157277058 +374 1 loss.margin 26.683038732551715 +374 1 loss.adversarial_temperature 0.4925307764247605 +374 1 negative_sampler.num_negs_per_pos 73.0 +374 1 training.batch_size 0.0 +374 2 model.embedding_dim 0.0 +374 2 model.c_min 0.05228745695856388 +374 2 model.c_max 1.237792939254273 +374 2 loss.margin 20.250358994142164 +374 2 loss.adversarial_temperature 0.6094363677808545 +374 2 negative_sampler.num_negs_per_pos 35.0 +374 2 training.batch_size 2.0 +374 3 model.embedding_dim 0.0 +374 3 model.c_min 0.08903899923996307 +374 3 model.c_max 4.317573140089086 +374 3 loss.margin 28.68400213939058 +374 3 loss.adversarial_temperature 0.6737625030710251 +374 3 negative_sampler.num_negs_per_pos 31.0 +374 3 training.batch_size 1.0 +374 4 model.embedding_dim 0.0 +374 4 model.c_min 0.06363290435746888 +374 4 model.c_max 5.481122220297154 +374 4 loss.margin 9.851342372607947 +374 4 loss.adversarial_temperature 0.804642763359381 +374 4 negative_sampler.num_negs_per_pos 94.0 +374 4 training.batch_size 1.0 +374 5 model.embedding_dim 2.0 +374 5 model.c_min 0.07009514550590157 +374 5 model.c_max 2.3901272260376754 +374 5 loss.margin 29.37750020532434 +374 5 loss.adversarial_temperature 0.5143084326618474 +374 5 negative_sampler.num_negs_per_pos 43.0 +374 5 training.batch_size 0.0 +374 6 model.embedding_dim 2.0 +374 6 model.c_min 0.013563128542265808 +374 6 model.c_max 5.020350973890681 +374 6 loss.margin 28.750163655302476 +374 6 loss.adversarial_temperature 0.9878891980421111 +374 6 negative_sampler.num_negs_per_pos 24.0 +374 6 training.batch_size 1.0 +374 7 model.embedding_dim 1.0 +374 7 model.c_min 0.02609830505523266 +374 7 model.c_max 7.418401223870509 +374 7 loss.margin 25.95541474739895 +374 7 loss.adversarial_temperature 0.30862279230334944 +374 7 negative_sampler.num_negs_per_pos 55.0 +374 7 training.batch_size 2.0 +374 8 model.embedding_dim 0.0 +374 8 model.c_min 0.023562749707435708 +374 8 model.c_max 6.3305658203739945 +374 8 loss.margin 5.619436126923852 +374 8 loss.adversarial_temperature 0.7098251522313687 +374 8 negative_sampler.num_negs_per_pos 51.0 +374 8 training.batch_size 1.0 +374 9 model.embedding_dim 2.0 +374 9 model.c_min 0.019052871896229572 +374 9 model.c_max 9.298587288239759 +374 9 loss.margin 26.801437351835997 +374 9 loss.adversarial_temperature 0.7243217442880672 +374 9 negative_sampler.num_negs_per_pos 2.0 +374 9 training.batch_size 2.0 +374 10 model.embedding_dim 0.0 +374 10 model.c_min 0.04126438577775495 +374 10 model.c_max 9.327798675421548 +374 10 loss.margin 3.6825913577674005 +374 10 loss.adversarial_temperature 0.7281442053394237 +374 10 negative_sampler.num_negs_per_pos 55.0 +374 10 training.batch_size 0.0 +374 11 model.embedding_dim 0.0 +374 11 model.c_min 0.06608025484171144 +374 11 model.c_max 7.964715914280697 +374 11 loss.margin 2.207277754931478 +374 11 loss.adversarial_temperature 0.15998003983402034 +374 11 negative_sampler.num_negs_per_pos 6.0 +374 11 training.batch_size 1.0 +374 12 model.embedding_dim 0.0 +374 12 model.c_min 0.030779394997362187 +374 12 model.c_max 4.451266000170875 +374 12 loss.margin 1.8031057055519275 +374 12 loss.adversarial_temperature 0.2185389204642787 +374 12 negative_sampler.num_negs_per_pos 46.0 +374 12 training.batch_size 0.0 +374 13 model.embedding_dim 1.0 +374 13 model.c_min 0.09578256047190657 +374 13 model.c_max 5.258476060917323 +374 13 loss.margin 10.38344497116306 +374 13 loss.adversarial_temperature 0.32262664116409434 +374 13 negative_sampler.num_negs_per_pos 40.0 +374 13 training.batch_size 0.0 +374 14 model.embedding_dim 0.0 +374 14 model.c_min 0.01797966846301125 +374 14 model.c_max 9.659283400956465 +374 14 loss.margin 3.007092910083269 +374 14 loss.adversarial_temperature 0.11217067742751788 +374 14 negative_sampler.num_negs_per_pos 97.0 +374 14 training.batch_size 0.0 +374 15 model.embedding_dim 0.0 +374 15 model.c_min 0.016262151387504652 +374 15 model.c_max 5.896219132993896 +374 15 loss.margin 2.651168471161157 +374 15 loss.adversarial_temperature 0.8625455063706204 +374 15 negative_sampler.num_negs_per_pos 18.0 +374 15 training.batch_size 1.0 +374 16 model.embedding_dim 1.0 +374 16 model.c_min 0.018245776526369717 +374 16 model.c_max 3.5178184981714016 +374 16 loss.margin 7.71923691821077 +374 16 loss.adversarial_temperature 0.8988793344709003 +374 16 negative_sampler.num_negs_per_pos 47.0 +374 16 training.batch_size 2.0 +374 17 model.embedding_dim 1.0 +374 17 model.c_min 0.019366390338554983 +374 17 model.c_max 8.497565815033147 +374 17 loss.margin 22.68511698755215 +374 17 loss.adversarial_temperature 0.2929463300260746 +374 17 negative_sampler.num_negs_per_pos 65.0 +374 17 training.batch_size 2.0 +374 18 model.embedding_dim 1.0 +374 18 model.c_min 0.09339088801010154 +374 18 model.c_max 4.146425543999621 +374 18 loss.margin 10.1413593813488 +374 18 loss.adversarial_temperature 0.29368409500089515 +374 18 negative_sampler.num_negs_per_pos 82.0 +374 18 training.batch_size 2.0 +374 19 model.embedding_dim 1.0 +374 19 model.c_min 0.062115920109854235 +374 19 model.c_max 4.592852658441679 +374 19 loss.margin 17.113152886482244 +374 19 loss.adversarial_temperature 0.8783775951206199 +374 19 negative_sampler.num_negs_per_pos 2.0 +374 19 training.batch_size 1.0 +374 20 model.embedding_dim 0.0 +374 20 model.c_min 0.08672737039795711 +374 20 model.c_max 5.6623245779469205 +374 20 loss.margin 4.5660537133529875 +374 20 loss.adversarial_temperature 0.8914598691866507 +374 20 negative_sampler.num_negs_per_pos 26.0 +374 20 training.batch_size 1.0 +374 21 model.embedding_dim 2.0 +374 21 model.c_min 0.050069876701419214 +374 21 model.c_max 6.689438101065893 +374 21 loss.margin 9.544149000634912 +374 21 loss.adversarial_temperature 0.8910670502093724 +374 21 negative_sampler.num_negs_per_pos 2.0 +374 21 training.batch_size 0.0 +374 22 model.embedding_dim 2.0 +374 22 model.c_min 0.062194100248493304 +374 22 model.c_max 6.348607893270044 +374 22 loss.margin 26.73696691235057 +374 22 loss.adversarial_temperature 0.31890628964334666 +374 22 negative_sampler.num_negs_per_pos 49.0 +374 22 training.batch_size 1.0 +374 23 model.embedding_dim 1.0 +374 23 model.c_min 0.018963659374933713 +374 23 model.c_max 7.539258873324633 +374 23 loss.margin 10.72138666065213 +374 23 loss.adversarial_temperature 0.2867515245603697 +374 23 negative_sampler.num_negs_per_pos 73.0 +374 23 training.batch_size 0.0 +374 24 model.embedding_dim 2.0 +374 24 model.c_min 0.010327577628935273 +374 24 model.c_max 2.477841127586962 +374 24 loss.margin 8.706648537939644 +374 24 loss.adversarial_temperature 0.3391448399845466 +374 24 negative_sampler.num_negs_per_pos 92.0 +374 24 training.batch_size 2.0 +374 25 model.embedding_dim 2.0 +374 25 model.c_min 0.01978870887059524 +374 25 model.c_max 8.26033333231659 +374 25 loss.margin 2.8986852256907447 +374 25 loss.adversarial_temperature 0.5365553028672604 +374 25 negative_sampler.num_negs_per_pos 30.0 +374 25 training.batch_size 0.0 +374 26 model.embedding_dim 1.0 +374 26 model.c_min 0.06583472727422128 +374 26 model.c_max 1.530316909913921 +374 26 loss.margin 8.097906267650576 +374 26 loss.adversarial_temperature 0.5249956757129789 +374 26 negative_sampler.num_negs_per_pos 7.0 +374 26 training.batch_size 2.0 +374 27 model.embedding_dim 0.0 +374 27 model.c_min 0.04038684269732882 +374 27 model.c_max 2.2276359281688687 +374 27 loss.margin 8.710178003068776 +374 27 loss.adversarial_temperature 0.31612881001309934 +374 27 negative_sampler.num_negs_per_pos 95.0 +374 27 training.batch_size 0.0 +374 28 model.embedding_dim 1.0 +374 28 model.c_min 0.018000589217045625 +374 28 model.c_max 8.706402142381261 +374 28 loss.margin 9.500241159958048 +374 28 loss.adversarial_temperature 0.7307053361194362 +374 28 negative_sampler.num_negs_per_pos 67.0 +374 28 training.batch_size 0.0 +374 29 model.embedding_dim 2.0 +374 29 model.c_min 0.012956282191364563 +374 29 model.c_max 4.438618045264226 +374 29 loss.margin 26.68718585748366 +374 29 loss.adversarial_temperature 0.7359004275638962 +374 29 negative_sampler.num_negs_per_pos 29.0 +374 29 training.batch_size 1.0 +374 30 model.embedding_dim 0.0 +374 30 model.c_min 0.09122129011492253 +374 30 model.c_max 4.103325440021578 +374 30 loss.margin 18.51241652359406 +374 30 loss.adversarial_temperature 0.703083371747938 +374 30 negative_sampler.num_negs_per_pos 98.0 +374 30 training.batch_size 0.0 +374 31 model.embedding_dim 1.0 +374 31 model.c_min 0.06396512500300698 +374 31 model.c_max 9.715060032603434 +374 31 loss.margin 8.969471014763144 +374 31 loss.adversarial_temperature 0.37595755381051765 +374 31 negative_sampler.num_negs_per_pos 55.0 +374 31 training.batch_size 0.0 +374 32 model.embedding_dim 2.0 +374 32 model.c_min 0.05818597622662786 +374 32 model.c_max 7.161750694569316 +374 32 loss.margin 5.749220416902187 +374 32 loss.adversarial_temperature 0.6798328382938692 +374 32 negative_sampler.num_negs_per_pos 87.0 +374 32 training.batch_size 2.0 +374 33 model.embedding_dim 2.0 +374 33 model.c_min 0.03146332840643567 +374 33 model.c_max 4.178088551764268 +374 33 loss.margin 4.22577517213643 +374 33 loss.adversarial_temperature 0.17837278251170396 +374 33 negative_sampler.num_negs_per_pos 52.0 +374 33 training.batch_size 2.0 +374 34 model.embedding_dim 2.0 +374 34 model.c_min 0.025673407299361302 +374 34 model.c_max 6.281887180402382 +374 34 loss.margin 6.290639778652145 +374 34 loss.adversarial_temperature 0.7920824590015083 +374 34 negative_sampler.num_negs_per_pos 31.0 +374 34 training.batch_size 1.0 +374 35 model.embedding_dim 2.0 +374 35 model.c_min 0.03498427078025551 +374 35 model.c_max 1.876995649072076 +374 35 loss.margin 20.684015808535833 +374 35 loss.adversarial_temperature 0.2565605074096804 +374 35 negative_sampler.num_negs_per_pos 89.0 +374 35 training.batch_size 2.0 +374 36 model.embedding_dim 1.0 +374 36 model.c_min 0.022277286473984522 +374 36 model.c_max 8.203815883296421 +374 36 loss.margin 18.76074044554083 +374 36 loss.adversarial_temperature 0.1416952909453482 +374 36 negative_sampler.num_negs_per_pos 0.0 +374 36 training.batch_size 0.0 +374 37 model.embedding_dim 0.0 +374 37 model.c_min 0.06317645869215116 +374 37 model.c_max 8.370683578600337 +374 37 loss.margin 20.60747853521388 +374 37 loss.adversarial_temperature 0.7375436449842863 +374 37 negative_sampler.num_negs_per_pos 76.0 +374 37 training.batch_size 2.0 +374 38 model.embedding_dim 0.0 +374 38 model.c_min 0.02734997965164152 +374 38 model.c_max 5.179563559371311 +374 38 loss.margin 9.614280862608082 +374 38 loss.adversarial_temperature 0.2569190730808619 +374 38 negative_sampler.num_negs_per_pos 14.0 +374 38 training.batch_size 1.0 +374 39 model.embedding_dim 2.0 +374 39 model.c_min 0.08260538070869736 +374 39 model.c_max 7.008311205614893 +374 39 loss.margin 13.677719534310055 +374 39 loss.adversarial_temperature 0.6360650225678939 +374 39 negative_sampler.num_negs_per_pos 51.0 +374 39 training.batch_size 0.0 +374 40 model.embedding_dim 2.0 +374 40 model.c_min 0.09161604109593469 +374 40 model.c_max 5.718052804329363 +374 40 loss.margin 1.5223130221407075 +374 40 loss.adversarial_temperature 0.9269236138384267 +374 40 negative_sampler.num_negs_per_pos 23.0 +374 40 training.batch_size 1.0 +374 41 model.embedding_dim 1.0 +374 41 model.c_min 0.029612776836531265 +374 41 model.c_max 6.235973635192353 +374 41 loss.margin 17.45238964809481 +374 41 loss.adversarial_temperature 0.5426013717812379 +374 41 negative_sampler.num_negs_per_pos 90.0 +374 41 training.batch_size 0.0 +374 42 model.embedding_dim 1.0 +374 42 model.c_min 0.03538616168110088 +374 42 model.c_max 9.676988470714784 +374 42 loss.margin 15.4930457023717 +374 42 loss.adversarial_temperature 0.9647903968471608 +374 42 negative_sampler.num_negs_per_pos 6.0 +374 42 training.batch_size 2.0 +374 43 model.embedding_dim 1.0 +374 43 model.c_min 0.01581583026426916 +374 43 model.c_max 1.9708289910797947 +374 43 loss.margin 19.078866780012024 +374 43 loss.adversarial_temperature 0.5516687813931739 +374 43 negative_sampler.num_negs_per_pos 3.0 +374 43 training.batch_size 1.0 +374 44 model.embedding_dim 2.0 +374 44 model.c_min 0.08051373760660636 +374 44 model.c_max 8.202193374504755 +374 44 loss.margin 11.630056500232069 +374 44 loss.adversarial_temperature 0.183377757147073 +374 44 negative_sampler.num_negs_per_pos 90.0 +374 44 training.batch_size 0.0 +374 45 model.embedding_dim 2.0 +374 45 model.c_min 0.025571610126453693 +374 45 model.c_max 4.512277131696849 +374 45 loss.margin 8.58288953178348 +374 45 loss.adversarial_temperature 0.991074341348439 +374 45 negative_sampler.num_negs_per_pos 2.0 +374 45 training.batch_size 2.0 +374 46 model.embedding_dim 2.0 +374 46 model.c_min 0.013724585307034798 +374 46 model.c_max 9.192824772199922 +374 46 loss.margin 21.31207209227534 +374 46 loss.adversarial_temperature 0.9142292368427827 +374 46 negative_sampler.num_negs_per_pos 75.0 +374 46 training.batch_size 0.0 +374 47 model.embedding_dim 1.0 +374 47 model.c_min 0.04711320686617318 +374 47 model.c_max 4.132355798080196 +374 47 loss.margin 14.908431967801837 +374 47 loss.adversarial_temperature 0.4225341214383169 +374 47 negative_sampler.num_negs_per_pos 75.0 +374 47 training.batch_size 1.0 +374 48 model.embedding_dim 1.0 +374 48 model.c_min 0.021959451427710656 +374 48 model.c_max 6.458752233611179 +374 48 loss.margin 25.352260228066253 +374 48 loss.adversarial_temperature 0.754963241924175 +374 48 negative_sampler.num_negs_per_pos 91.0 +374 48 training.batch_size 0.0 +374 49 model.embedding_dim 1.0 +374 49 model.c_min 0.013958304768431592 +374 49 model.c_max 4.458342973905878 +374 49 loss.margin 21.173029302043425 +374 49 loss.adversarial_temperature 0.9074968099429002 +374 49 negative_sampler.num_negs_per_pos 22.0 +374 49 training.batch_size 0.0 +374 50 model.embedding_dim 1.0 +374 50 model.c_min 0.012581506815094148 +374 50 model.c_max 9.59655088384127 +374 50 loss.margin 20.37052259082267 +374 50 loss.adversarial_temperature 0.9776017127289813 +374 50 negative_sampler.num_negs_per_pos 63.0 +374 50 training.batch_size 0.0 +374 51 model.embedding_dim 1.0 +374 51 model.c_min 0.04232693356743906 +374 51 model.c_max 1.6294978486660463 +374 51 loss.margin 15.530061696986575 +374 51 loss.adversarial_temperature 0.6817814069399463 +374 51 negative_sampler.num_negs_per_pos 50.0 +374 51 training.batch_size 0.0 +374 52 model.embedding_dim 0.0 +374 52 model.c_min 0.01150232718387008 +374 52 model.c_max 5.803454384658094 +374 52 loss.margin 1.3145756758580938 +374 52 loss.adversarial_temperature 0.5272449101226422 +374 52 negative_sampler.num_negs_per_pos 3.0 +374 52 training.batch_size 0.0 +374 53 model.embedding_dim 1.0 +374 53 model.c_min 0.01012895636203417 +374 53 model.c_max 6.750087777377001 +374 53 loss.margin 28.11599273735635 +374 53 loss.adversarial_temperature 0.9892318375398053 +374 53 negative_sampler.num_negs_per_pos 27.0 +374 53 training.batch_size 1.0 +374 54 model.embedding_dim 0.0 +374 54 model.c_min 0.024043434880557765 +374 54 model.c_max 7.533322575495989 +374 54 loss.margin 28.697722861543124 +374 54 loss.adversarial_temperature 0.976882114125266 +374 54 negative_sampler.num_negs_per_pos 52.0 +374 54 training.batch_size 0.0 +374 55 model.embedding_dim 0.0 +374 55 model.c_min 0.012293726144176233 +374 55 model.c_max 6.943951612244483 +374 55 loss.margin 8.349008085307075 +374 55 loss.adversarial_temperature 0.8377395844375705 +374 55 negative_sampler.num_negs_per_pos 30.0 +374 55 training.batch_size 2.0 +374 56 model.embedding_dim 0.0 +374 56 model.c_min 0.08101912588469734 +374 56 model.c_max 2.2362154309327025 +374 56 loss.margin 23.15511985243631 +374 56 loss.adversarial_temperature 0.9387519426713926 +374 56 negative_sampler.num_negs_per_pos 39.0 +374 56 training.batch_size 0.0 +374 57 model.embedding_dim 0.0 +374 57 model.c_min 0.04207447333157586 +374 57 model.c_max 6.416648889119921 +374 57 loss.margin 10.481303501662728 +374 57 loss.adversarial_temperature 0.605803234882027 +374 57 negative_sampler.num_negs_per_pos 36.0 +374 57 training.batch_size 2.0 +374 58 model.embedding_dim 2.0 +374 58 model.c_min 0.054280837087726956 +374 58 model.c_max 8.2947086110672 +374 58 loss.margin 1.0857152668269525 +374 58 loss.adversarial_temperature 0.2413323205868686 +374 58 negative_sampler.num_negs_per_pos 68.0 +374 58 training.batch_size 2.0 +374 59 model.embedding_dim 0.0 +374 59 model.c_min 0.02747595848797159 +374 59 model.c_max 7.1529007023941205 +374 59 loss.margin 28.12685755740992 +374 59 loss.adversarial_temperature 0.5583619343041065 +374 59 negative_sampler.num_negs_per_pos 87.0 +374 59 training.batch_size 0.0 +374 60 model.embedding_dim 1.0 +374 60 model.c_min 0.05412828108971487 +374 60 model.c_max 9.93975581100224 +374 60 loss.margin 19.111331110886763 +374 60 loss.adversarial_temperature 0.6978214847675772 +374 60 negative_sampler.num_negs_per_pos 25.0 +374 60 training.batch_size 1.0 +374 61 model.embedding_dim 0.0 +374 61 model.c_min 0.045268281343991586 +374 61 model.c_max 3.85493921827715 +374 61 loss.margin 19.028240490851417 +374 61 loss.adversarial_temperature 0.8237385278701048 +374 61 negative_sampler.num_negs_per_pos 53.0 +374 61 training.batch_size 2.0 +374 62 model.embedding_dim 0.0 +374 62 model.c_min 0.0451727949479246 +374 62 model.c_max 3.258753824390282 +374 62 loss.margin 19.74142956726622 +374 62 loss.adversarial_temperature 0.6688326347901559 +374 62 negative_sampler.num_negs_per_pos 35.0 +374 62 training.batch_size 1.0 +374 63 model.embedding_dim 1.0 +374 63 model.c_min 0.05380061726690098 +374 63 model.c_max 4.265861751441424 +374 63 loss.margin 18.12161545299752 +374 63 loss.adversarial_temperature 0.710825869430614 +374 63 negative_sampler.num_negs_per_pos 87.0 +374 63 training.batch_size 1.0 +374 64 model.embedding_dim 1.0 +374 64 model.c_min 0.03541673348262529 +374 64 model.c_max 5.052137941590666 +374 64 loss.margin 12.52426449716665 +374 64 loss.adversarial_temperature 0.9521679484136075 +374 64 negative_sampler.num_negs_per_pos 71.0 +374 64 training.batch_size 2.0 +374 65 model.embedding_dim 2.0 +374 65 model.c_min 0.02723784373488747 +374 65 model.c_max 2.571547842623575 +374 65 loss.margin 13.390274428454642 +374 65 loss.adversarial_temperature 0.9971454604028485 +374 65 negative_sampler.num_negs_per_pos 64.0 +374 65 training.batch_size 1.0 +374 66 model.embedding_dim 2.0 +374 66 model.c_min 0.011675839508021152 +374 66 model.c_max 5.133922634787533 +374 66 loss.margin 20.63477005299192 +374 66 loss.adversarial_temperature 0.7223904945604547 +374 66 negative_sampler.num_negs_per_pos 82.0 +374 66 training.batch_size 0.0 +374 67 model.embedding_dim 0.0 +374 67 model.c_min 0.04323424812153463 +374 67 model.c_max 8.700319733549062 +374 67 loss.margin 21.257517697526865 +374 67 loss.adversarial_temperature 0.25055559382155734 +374 67 negative_sampler.num_negs_per_pos 86.0 +374 67 training.batch_size 1.0 +374 68 model.embedding_dim 1.0 +374 68 model.c_min 0.028868682493451283 +374 68 model.c_max 4.913636624454421 +374 68 loss.margin 3.775026029060324 +374 68 loss.adversarial_temperature 0.3641006029792131 +374 68 negative_sampler.num_negs_per_pos 69.0 +374 68 training.batch_size 1.0 +374 69 model.embedding_dim 0.0 +374 69 model.c_min 0.08000356458691903 +374 69 model.c_max 9.02452656644332 +374 69 loss.margin 7.724392277289843 +374 69 loss.adversarial_temperature 0.9341918396308776 +374 69 negative_sampler.num_negs_per_pos 34.0 +374 69 training.batch_size 0.0 +374 70 model.embedding_dim 0.0 +374 70 model.c_min 0.011352421265335515 +374 70 model.c_max 7.340328663265213 +374 70 loss.margin 1.8843699200734414 +374 70 loss.adversarial_temperature 0.795457247267912 +374 70 negative_sampler.num_negs_per_pos 33.0 +374 70 training.batch_size 2.0 +374 71 model.embedding_dim 0.0 +374 71 model.c_min 0.014054316048717614 +374 71 model.c_max 6.956499631175273 +374 71 loss.margin 17.91051800655179 +374 71 loss.adversarial_temperature 0.8188350411840888 +374 71 negative_sampler.num_negs_per_pos 62.0 +374 71 training.batch_size 2.0 +374 72 model.embedding_dim 0.0 +374 72 model.c_min 0.09048504384885749 +374 72 model.c_max 2.1375858425706618 +374 72 loss.margin 6.575802709993864 +374 72 loss.adversarial_temperature 0.22902855673249878 +374 72 negative_sampler.num_negs_per_pos 83.0 +374 72 training.batch_size 2.0 +374 73 model.embedding_dim 0.0 +374 73 model.c_min 0.042873291029133835 +374 73 model.c_max 4.7428454498442205 +374 73 loss.margin 21.611639970765705 +374 73 loss.adversarial_temperature 0.28706876565735023 +374 73 negative_sampler.num_negs_per_pos 11.0 +374 73 training.batch_size 2.0 +374 74 model.embedding_dim 2.0 +374 74 model.c_min 0.0690699043208503 +374 74 model.c_max 2.9834881748978335 +374 74 loss.margin 13.57301321902178 +374 74 loss.adversarial_temperature 0.19231331733273815 +374 74 negative_sampler.num_negs_per_pos 42.0 +374 74 training.batch_size 2.0 +374 75 model.embedding_dim 1.0 +374 75 model.c_min 0.013272926874449802 +374 75 model.c_max 6.478130595035556 +374 75 loss.margin 18.624106824099616 +374 75 loss.adversarial_temperature 0.5801972805204303 +374 75 negative_sampler.num_negs_per_pos 51.0 +374 75 training.batch_size 1.0 +374 76 model.embedding_dim 2.0 +374 76 model.c_min 0.016988504221187528 +374 76 model.c_max 2.6202261761460512 +374 76 loss.margin 8.048328014965142 +374 76 loss.adversarial_temperature 0.8952156624406983 +374 76 negative_sampler.num_negs_per_pos 37.0 +374 76 training.batch_size 1.0 +374 77 model.embedding_dim 1.0 +374 77 model.c_min 0.02772256846264449 +374 77 model.c_max 2.270059764910946 +374 77 loss.margin 23.470524932638046 +374 77 loss.adversarial_temperature 0.6804305556276633 +374 77 negative_sampler.num_negs_per_pos 21.0 +374 77 training.batch_size 2.0 +374 78 model.embedding_dim 2.0 +374 78 model.c_min 0.03571978981260329 +374 78 model.c_max 7.251723340772418 +374 78 loss.margin 28.91266226932864 +374 78 loss.adversarial_temperature 0.5228675260158739 +374 78 negative_sampler.num_negs_per_pos 53.0 +374 78 training.batch_size 1.0 +374 79 model.embedding_dim 1.0 +374 79 model.c_min 0.05381419644931027 +374 79 model.c_max 5.434511558199744 +374 79 loss.margin 29.75473903702262 +374 79 loss.adversarial_temperature 0.17650981464965457 +374 79 negative_sampler.num_negs_per_pos 86.0 +374 79 training.batch_size 0.0 +374 80 model.embedding_dim 1.0 +374 80 model.c_min 0.023855178905091125 +374 80 model.c_max 5.298902973490406 +374 80 loss.margin 21.540739622258897 +374 80 loss.adversarial_temperature 0.9539003702569268 +374 80 negative_sampler.num_negs_per_pos 99.0 +374 80 training.batch_size 2.0 +374 81 model.embedding_dim 1.0 +374 81 model.c_min 0.034185817055526556 +374 81 model.c_max 6.053717642255912 +374 81 loss.margin 5.9669765188222 +374 81 loss.adversarial_temperature 0.7687417432293331 +374 81 negative_sampler.num_negs_per_pos 8.0 +374 81 training.batch_size 2.0 +374 82 model.embedding_dim 0.0 +374 82 model.c_min 0.07866247087596223 +374 82 model.c_max 7.120211755873825 +374 82 loss.margin 21.568269155192013 +374 82 loss.adversarial_temperature 0.6153342344896837 +374 82 negative_sampler.num_negs_per_pos 50.0 +374 82 training.batch_size 2.0 +374 83 model.embedding_dim 2.0 +374 83 model.c_min 0.05247243342675412 +374 83 model.c_max 2.4365530460653786 +374 83 loss.margin 3.8761412400786783 +374 83 loss.adversarial_temperature 0.19284915113034512 +374 83 negative_sampler.num_negs_per_pos 67.0 +374 83 training.batch_size 2.0 +374 84 model.embedding_dim 2.0 +374 84 model.c_min 0.020766698617267144 +374 84 model.c_max 3.0522717083027846 +374 84 loss.margin 27.6102239253582 +374 84 loss.adversarial_temperature 0.18673530913313477 +374 84 negative_sampler.num_negs_per_pos 52.0 +374 84 training.batch_size 1.0 +374 85 model.embedding_dim 0.0 +374 85 model.c_min 0.024509479335388933 +374 85 model.c_max 9.199392881221774 +374 85 loss.margin 8.141078085639077 +374 85 loss.adversarial_temperature 0.9435266694563802 +374 85 negative_sampler.num_negs_per_pos 64.0 +374 85 training.batch_size 2.0 +374 86 model.embedding_dim 2.0 +374 86 model.c_min 0.04522196981964239 +374 86 model.c_max 4.62473658004874 +374 86 loss.margin 17.117368528256176 +374 86 loss.adversarial_temperature 0.34523081521164467 +374 86 negative_sampler.num_negs_per_pos 32.0 +374 86 training.batch_size 2.0 +374 87 model.embedding_dim 2.0 +374 87 model.c_min 0.050022905456931284 +374 87 model.c_max 2.0551989317056383 +374 87 loss.margin 17.003667731161563 +374 87 loss.adversarial_temperature 0.8768820448046268 +374 87 negative_sampler.num_negs_per_pos 33.0 +374 87 training.batch_size 1.0 +374 88 model.embedding_dim 2.0 +374 88 model.c_min 0.04898928232348078 +374 88 model.c_max 7.381677381060329 +374 88 loss.margin 21.625949651340168 +374 88 loss.adversarial_temperature 0.8943282842166006 +374 88 negative_sampler.num_negs_per_pos 38.0 +374 88 training.batch_size 0.0 +374 89 model.embedding_dim 0.0 +374 89 model.c_min 0.062351380980070314 +374 89 model.c_max 9.893551035412193 +374 89 loss.margin 27.494115196778118 +374 89 loss.adversarial_temperature 0.5718102787511622 +374 89 negative_sampler.num_negs_per_pos 70.0 +374 89 training.batch_size 0.0 +374 90 model.embedding_dim 0.0 +374 90 model.c_min 0.042852917048386355 +374 90 model.c_max 8.884983536308672 +374 90 loss.margin 9.340400844585387 +374 90 loss.adversarial_temperature 0.992963885391568 +374 90 negative_sampler.num_negs_per_pos 18.0 +374 90 training.batch_size 0.0 +374 91 model.embedding_dim 2.0 +374 91 model.c_min 0.017462388118060456 +374 91 model.c_max 2.809585653550625 +374 91 loss.margin 17.248300132408176 +374 91 loss.adversarial_temperature 0.36911732635832006 +374 91 negative_sampler.num_negs_per_pos 38.0 +374 91 training.batch_size 1.0 +374 92 model.embedding_dim 0.0 +374 92 model.c_min 0.04127094115081165 +374 92 model.c_max 9.67232409420129 +374 92 loss.margin 1.728556555964597 +374 92 loss.adversarial_temperature 0.2696778008348543 +374 92 negative_sampler.num_negs_per_pos 41.0 +374 92 training.batch_size 0.0 +374 93 model.embedding_dim 0.0 +374 93 model.c_min 0.03687769297721272 +374 93 model.c_max 8.614995600055874 +374 93 loss.margin 3.1482085494464416 +374 93 loss.adversarial_temperature 0.8476434775419337 +374 93 negative_sampler.num_negs_per_pos 62.0 +374 93 training.batch_size 2.0 +374 94 model.embedding_dim 1.0 +374 94 model.c_min 0.01231580206978394 +374 94 model.c_max 5.793169746184768 +374 94 loss.margin 19.003304049675258 +374 94 loss.adversarial_temperature 0.2799150586850913 +374 94 negative_sampler.num_negs_per_pos 75.0 +374 94 training.batch_size 0.0 +374 95 model.embedding_dim 0.0 +374 95 model.c_min 0.08081158969469814 +374 95 model.c_max 7.666565584930268 +374 95 loss.margin 4.299300988969484 +374 95 loss.adversarial_temperature 0.24572404538944603 +374 95 negative_sampler.num_negs_per_pos 95.0 +374 95 training.batch_size 1.0 +374 96 model.embedding_dim 0.0 +374 96 model.c_min 0.031209566062654053 +374 96 model.c_max 2.8592206912820166 +374 96 loss.margin 14.949029982747273 +374 96 loss.adversarial_temperature 0.43646299276084666 +374 96 negative_sampler.num_negs_per_pos 9.0 +374 96 training.batch_size 2.0 +374 97 model.embedding_dim 1.0 +374 97 model.c_min 0.036502080018671036 +374 97 model.c_max 1.461263837006095 +374 97 loss.margin 22.33335849976246 +374 97 loss.adversarial_temperature 0.861140728181697 +374 97 negative_sampler.num_negs_per_pos 17.0 +374 97 training.batch_size 2.0 +374 98 model.embedding_dim 0.0 +374 98 model.c_min 0.08275730138335986 +374 98 model.c_max 2.1298139928961684 +374 98 loss.margin 25.3055405786327 +374 98 loss.adversarial_temperature 0.7367419203399018 +374 98 negative_sampler.num_negs_per_pos 36.0 +374 98 training.batch_size 2.0 +374 99 model.embedding_dim 2.0 +374 99 model.c_min 0.08258059042272983 +374 99 model.c_max 1.1434180984864288 +374 99 loss.margin 8.944377484067203 +374 99 loss.adversarial_temperature 0.2955308034798061 +374 99 negative_sampler.num_negs_per_pos 23.0 +374 99 training.batch_size 2.0 +374 100 model.embedding_dim 2.0 +374 100 model.c_min 0.05713591206166952 +374 100 model.c_max 6.121810733739116 +374 100 loss.margin 18.667615341674974 +374 100 loss.adversarial_temperature 0.7791028835349044 +374 100 negative_sampler.num_negs_per_pos 34.0 +374 100 training.batch_size 1.0 +374 1 dataset """kinships""" +374 1 model """kg2e""" +374 1 loss """nssa""" +374 1 regularizer """no""" +374 1 optimizer """adadelta""" +374 1 training_loop """owa""" +374 1 negative_sampler """basic""" +374 1 evaluator """rankbased""" +374 2 dataset """kinships""" +374 2 model """kg2e""" +374 2 loss """nssa""" +374 2 regularizer """no""" +374 2 optimizer """adadelta""" +374 2 training_loop """owa""" +374 2 negative_sampler """basic""" +374 2 evaluator """rankbased""" +374 3 dataset """kinships""" +374 3 model """kg2e""" +374 3 loss """nssa""" +374 3 regularizer """no""" +374 3 optimizer """adadelta""" +374 3 training_loop """owa""" +374 3 negative_sampler """basic""" +374 3 evaluator """rankbased""" +374 4 dataset """kinships""" +374 4 model """kg2e""" +374 4 loss """nssa""" +374 4 regularizer """no""" +374 4 optimizer """adadelta""" +374 4 training_loop """owa""" +374 4 negative_sampler """basic""" +374 4 evaluator """rankbased""" +374 5 dataset """kinships""" +374 5 model """kg2e""" +374 5 loss """nssa""" +374 5 regularizer """no""" +374 5 optimizer """adadelta""" +374 5 training_loop """owa""" +374 5 negative_sampler """basic""" +374 5 evaluator """rankbased""" +374 6 dataset """kinships""" +374 6 model """kg2e""" +374 6 loss """nssa""" +374 6 regularizer """no""" +374 6 optimizer """adadelta""" +374 6 training_loop """owa""" +374 6 negative_sampler """basic""" +374 6 evaluator """rankbased""" +374 7 dataset """kinships""" +374 7 model """kg2e""" +374 7 loss """nssa""" +374 7 regularizer """no""" +374 7 optimizer """adadelta""" +374 7 training_loop """owa""" +374 7 negative_sampler """basic""" +374 7 evaluator """rankbased""" +374 8 dataset """kinships""" +374 8 model """kg2e""" +374 8 loss """nssa""" +374 8 regularizer """no""" +374 8 optimizer """adadelta""" +374 8 training_loop """owa""" +374 8 negative_sampler """basic""" +374 8 evaluator """rankbased""" +374 9 dataset """kinships""" +374 9 model """kg2e""" +374 9 loss """nssa""" +374 9 regularizer """no""" +374 9 optimizer """adadelta""" +374 9 training_loop """owa""" +374 9 negative_sampler """basic""" +374 9 evaluator """rankbased""" +374 10 dataset """kinships""" +374 10 model """kg2e""" +374 10 loss """nssa""" +374 10 regularizer """no""" +374 10 optimizer """adadelta""" +374 10 training_loop """owa""" +374 10 negative_sampler """basic""" +374 10 evaluator """rankbased""" +374 11 dataset """kinships""" +374 11 model """kg2e""" +374 11 loss """nssa""" +374 11 regularizer """no""" +374 11 optimizer """adadelta""" +374 11 training_loop """owa""" +374 11 negative_sampler """basic""" +374 11 evaluator """rankbased""" +374 12 dataset """kinships""" +374 12 model """kg2e""" +374 12 loss """nssa""" +374 12 regularizer """no""" +374 12 optimizer """adadelta""" +374 12 training_loop """owa""" +374 12 negative_sampler """basic""" +374 12 evaluator """rankbased""" +374 13 dataset """kinships""" +374 13 model """kg2e""" +374 13 loss """nssa""" +374 13 regularizer """no""" +374 13 optimizer """adadelta""" +374 13 training_loop """owa""" +374 13 negative_sampler """basic""" +374 13 evaluator """rankbased""" +374 14 dataset """kinships""" +374 14 model """kg2e""" +374 14 loss """nssa""" +374 14 regularizer """no""" +374 14 optimizer """adadelta""" +374 14 training_loop """owa""" +374 14 negative_sampler """basic""" +374 14 evaluator """rankbased""" +374 15 dataset """kinships""" +374 15 model """kg2e""" +374 15 loss """nssa""" +374 15 regularizer """no""" +374 15 optimizer """adadelta""" +374 15 training_loop """owa""" +374 15 negative_sampler """basic""" +374 15 evaluator """rankbased""" +374 16 dataset """kinships""" +374 16 model """kg2e""" +374 16 loss """nssa""" +374 16 regularizer """no""" +374 16 optimizer """adadelta""" +374 16 training_loop """owa""" +374 16 negative_sampler """basic""" +374 16 evaluator """rankbased""" +374 17 dataset """kinships""" +374 17 model """kg2e""" +374 17 loss """nssa""" +374 17 regularizer """no""" +374 17 optimizer """adadelta""" +374 17 training_loop """owa""" +374 17 negative_sampler """basic""" +374 17 evaluator """rankbased""" +374 18 dataset """kinships""" +374 18 model """kg2e""" +374 18 loss """nssa""" +374 18 regularizer """no""" +374 18 optimizer """adadelta""" +374 18 training_loop """owa""" +374 18 negative_sampler """basic""" +374 18 evaluator """rankbased""" +374 19 dataset """kinships""" +374 19 model """kg2e""" +374 19 loss """nssa""" +374 19 regularizer """no""" +374 19 optimizer """adadelta""" +374 19 training_loop """owa""" +374 19 negative_sampler """basic""" +374 19 evaluator """rankbased""" +374 20 dataset """kinships""" +374 20 model """kg2e""" +374 20 loss """nssa""" +374 20 regularizer """no""" +374 20 optimizer """adadelta""" +374 20 training_loop """owa""" +374 20 negative_sampler """basic""" +374 20 evaluator """rankbased""" +374 21 dataset """kinships""" +374 21 model """kg2e""" +374 21 loss """nssa""" +374 21 regularizer """no""" +374 21 optimizer """adadelta""" +374 21 training_loop """owa""" +374 21 negative_sampler """basic""" +374 21 evaluator """rankbased""" +374 22 dataset """kinships""" +374 22 model """kg2e""" +374 22 loss """nssa""" +374 22 regularizer """no""" +374 22 optimizer """adadelta""" +374 22 training_loop """owa""" +374 22 negative_sampler """basic""" +374 22 evaluator """rankbased""" +374 23 dataset """kinships""" +374 23 model """kg2e""" +374 23 loss """nssa""" +374 23 regularizer """no""" +374 23 optimizer """adadelta""" +374 23 training_loop """owa""" +374 23 negative_sampler """basic""" +374 23 evaluator """rankbased""" +374 24 dataset """kinships""" +374 24 model """kg2e""" +374 24 loss """nssa""" +374 24 regularizer """no""" +374 24 optimizer """adadelta""" +374 24 training_loop """owa""" +374 24 negative_sampler """basic""" +374 24 evaluator """rankbased""" +374 25 dataset """kinships""" +374 25 model """kg2e""" +374 25 loss """nssa""" +374 25 regularizer """no""" +374 25 optimizer """adadelta""" +374 25 training_loop """owa""" +374 25 negative_sampler """basic""" +374 25 evaluator """rankbased""" +374 26 dataset """kinships""" +374 26 model """kg2e""" +374 26 loss """nssa""" +374 26 regularizer """no""" +374 26 optimizer """adadelta""" +374 26 training_loop """owa""" +374 26 negative_sampler """basic""" +374 26 evaluator """rankbased""" +374 27 dataset """kinships""" +374 27 model """kg2e""" +374 27 loss """nssa""" +374 27 regularizer """no""" +374 27 optimizer """adadelta""" +374 27 training_loop """owa""" +374 27 negative_sampler """basic""" +374 27 evaluator """rankbased""" +374 28 dataset """kinships""" +374 28 model """kg2e""" +374 28 loss """nssa""" +374 28 regularizer """no""" +374 28 optimizer """adadelta""" +374 28 training_loop """owa""" +374 28 negative_sampler """basic""" +374 28 evaluator """rankbased""" +374 29 dataset """kinships""" +374 29 model """kg2e""" +374 29 loss """nssa""" +374 29 regularizer """no""" +374 29 optimizer """adadelta""" +374 29 training_loop """owa""" +374 29 negative_sampler """basic""" +374 29 evaluator """rankbased""" +374 30 dataset """kinships""" +374 30 model """kg2e""" +374 30 loss """nssa""" +374 30 regularizer """no""" +374 30 optimizer """adadelta""" +374 30 training_loop """owa""" +374 30 negative_sampler """basic""" +374 30 evaluator """rankbased""" +374 31 dataset """kinships""" +374 31 model """kg2e""" +374 31 loss """nssa""" +374 31 regularizer """no""" +374 31 optimizer """adadelta""" +374 31 training_loop """owa""" +374 31 negative_sampler """basic""" +374 31 evaluator """rankbased""" +374 32 dataset """kinships""" +374 32 model """kg2e""" +374 32 loss """nssa""" +374 32 regularizer """no""" +374 32 optimizer """adadelta""" +374 32 training_loop """owa""" +374 32 negative_sampler """basic""" +374 32 evaluator """rankbased""" +374 33 dataset """kinships""" +374 33 model """kg2e""" +374 33 loss """nssa""" +374 33 regularizer """no""" +374 33 optimizer """adadelta""" +374 33 training_loop """owa""" +374 33 negative_sampler """basic""" +374 33 evaluator """rankbased""" +374 34 dataset """kinships""" +374 34 model """kg2e""" +374 34 loss """nssa""" +374 34 regularizer """no""" +374 34 optimizer """adadelta""" +374 34 training_loop """owa""" +374 34 negative_sampler """basic""" +374 34 evaluator """rankbased""" +374 35 dataset """kinships""" +374 35 model """kg2e""" +374 35 loss """nssa""" +374 35 regularizer """no""" +374 35 optimizer """adadelta""" +374 35 training_loop """owa""" +374 35 negative_sampler """basic""" +374 35 evaluator """rankbased""" +374 36 dataset """kinships""" +374 36 model """kg2e""" +374 36 loss """nssa""" +374 36 regularizer """no""" +374 36 optimizer """adadelta""" +374 36 training_loop """owa""" +374 36 negative_sampler """basic""" +374 36 evaluator """rankbased""" +374 37 dataset """kinships""" +374 37 model """kg2e""" +374 37 loss """nssa""" +374 37 regularizer """no""" +374 37 optimizer """adadelta""" +374 37 training_loop """owa""" +374 37 negative_sampler """basic""" +374 37 evaluator """rankbased""" +374 38 dataset """kinships""" +374 38 model """kg2e""" +374 38 loss """nssa""" +374 38 regularizer """no""" +374 38 optimizer """adadelta""" +374 38 training_loop """owa""" +374 38 negative_sampler """basic""" +374 38 evaluator """rankbased""" +374 39 dataset """kinships""" +374 39 model """kg2e""" +374 39 loss """nssa""" +374 39 regularizer """no""" +374 39 optimizer """adadelta""" +374 39 training_loop """owa""" +374 39 negative_sampler """basic""" +374 39 evaluator """rankbased""" +374 40 dataset """kinships""" +374 40 model """kg2e""" +374 40 loss """nssa""" +374 40 regularizer """no""" +374 40 optimizer """adadelta""" +374 40 training_loop """owa""" +374 40 negative_sampler """basic""" +374 40 evaluator """rankbased""" +374 41 dataset """kinships""" +374 41 model """kg2e""" +374 41 loss """nssa""" +374 41 regularizer """no""" +374 41 optimizer """adadelta""" +374 41 training_loop """owa""" +374 41 negative_sampler """basic""" +374 41 evaluator """rankbased""" +374 42 dataset """kinships""" +374 42 model """kg2e""" +374 42 loss """nssa""" +374 42 regularizer """no""" +374 42 optimizer """adadelta""" +374 42 training_loop """owa""" +374 42 negative_sampler """basic""" +374 42 evaluator """rankbased""" +374 43 dataset """kinships""" +374 43 model """kg2e""" +374 43 loss """nssa""" +374 43 regularizer """no""" +374 43 optimizer """adadelta""" +374 43 training_loop """owa""" +374 43 negative_sampler """basic""" +374 43 evaluator """rankbased""" +374 44 dataset """kinships""" +374 44 model """kg2e""" +374 44 loss """nssa""" +374 44 regularizer """no""" +374 44 optimizer """adadelta""" +374 44 training_loop """owa""" +374 44 negative_sampler """basic""" +374 44 evaluator """rankbased""" +374 45 dataset """kinships""" +374 45 model """kg2e""" +374 45 loss """nssa""" +374 45 regularizer """no""" +374 45 optimizer """adadelta""" +374 45 training_loop """owa""" +374 45 negative_sampler """basic""" +374 45 evaluator """rankbased""" +374 46 dataset """kinships""" +374 46 model """kg2e""" +374 46 loss """nssa""" +374 46 regularizer """no""" +374 46 optimizer """adadelta""" +374 46 training_loop """owa""" +374 46 negative_sampler """basic""" +374 46 evaluator """rankbased""" +374 47 dataset """kinships""" +374 47 model """kg2e""" +374 47 loss """nssa""" +374 47 regularizer """no""" +374 47 optimizer """adadelta""" +374 47 training_loop """owa""" +374 47 negative_sampler """basic""" +374 47 evaluator """rankbased""" +374 48 dataset """kinships""" +374 48 model """kg2e""" +374 48 loss """nssa""" +374 48 regularizer """no""" +374 48 optimizer """adadelta""" +374 48 training_loop """owa""" +374 48 negative_sampler """basic""" +374 48 evaluator """rankbased""" +374 49 dataset """kinships""" +374 49 model """kg2e""" +374 49 loss """nssa""" +374 49 regularizer """no""" +374 49 optimizer """adadelta""" +374 49 training_loop """owa""" +374 49 negative_sampler """basic""" +374 49 evaluator """rankbased""" +374 50 dataset """kinships""" +374 50 model """kg2e""" +374 50 loss """nssa""" +374 50 regularizer """no""" +374 50 optimizer """adadelta""" +374 50 training_loop """owa""" +374 50 negative_sampler """basic""" +374 50 evaluator """rankbased""" +374 51 dataset """kinships""" +374 51 model """kg2e""" +374 51 loss """nssa""" +374 51 regularizer """no""" +374 51 optimizer """adadelta""" +374 51 training_loop """owa""" +374 51 negative_sampler """basic""" +374 51 evaluator """rankbased""" +374 52 dataset """kinships""" +374 52 model """kg2e""" +374 52 loss """nssa""" +374 52 regularizer """no""" +374 52 optimizer """adadelta""" +374 52 training_loop """owa""" +374 52 negative_sampler """basic""" +374 52 evaluator """rankbased""" +374 53 dataset """kinships""" +374 53 model """kg2e""" +374 53 loss """nssa""" +374 53 regularizer """no""" +374 53 optimizer """adadelta""" +374 53 training_loop """owa""" +374 53 negative_sampler """basic""" +374 53 evaluator """rankbased""" +374 54 dataset """kinships""" +374 54 model """kg2e""" +374 54 loss """nssa""" +374 54 regularizer """no""" +374 54 optimizer """adadelta""" +374 54 training_loop """owa""" +374 54 negative_sampler """basic""" +374 54 evaluator """rankbased""" +374 55 dataset """kinships""" +374 55 model """kg2e""" +374 55 loss """nssa""" +374 55 regularizer """no""" +374 55 optimizer """adadelta""" +374 55 training_loop """owa""" +374 55 negative_sampler """basic""" +374 55 evaluator """rankbased""" +374 56 dataset """kinships""" +374 56 model """kg2e""" +374 56 loss """nssa""" +374 56 regularizer """no""" +374 56 optimizer """adadelta""" +374 56 training_loop """owa""" +374 56 negative_sampler """basic""" +374 56 evaluator """rankbased""" +374 57 dataset """kinships""" +374 57 model """kg2e""" +374 57 loss """nssa""" +374 57 regularizer """no""" +374 57 optimizer """adadelta""" +374 57 training_loop """owa""" +374 57 negative_sampler """basic""" +374 57 evaluator """rankbased""" +374 58 dataset """kinships""" +374 58 model """kg2e""" +374 58 loss """nssa""" +374 58 regularizer """no""" +374 58 optimizer """adadelta""" +374 58 training_loop """owa""" +374 58 negative_sampler """basic""" +374 58 evaluator """rankbased""" +374 59 dataset """kinships""" +374 59 model """kg2e""" +374 59 loss """nssa""" +374 59 regularizer """no""" +374 59 optimizer """adadelta""" +374 59 training_loop """owa""" +374 59 negative_sampler """basic""" +374 59 evaluator """rankbased""" +374 60 dataset """kinships""" +374 60 model """kg2e""" +374 60 loss """nssa""" +374 60 regularizer """no""" +374 60 optimizer """adadelta""" +374 60 training_loop """owa""" +374 60 negative_sampler """basic""" +374 60 evaluator """rankbased""" +374 61 dataset """kinships""" +374 61 model """kg2e""" +374 61 loss """nssa""" +374 61 regularizer """no""" +374 61 optimizer """adadelta""" +374 61 training_loop """owa""" +374 61 negative_sampler """basic""" +374 61 evaluator """rankbased""" +374 62 dataset """kinships""" +374 62 model """kg2e""" +374 62 loss """nssa""" +374 62 regularizer """no""" +374 62 optimizer """adadelta""" +374 62 training_loop """owa""" +374 62 negative_sampler """basic""" +374 62 evaluator """rankbased""" +374 63 dataset """kinships""" +374 63 model """kg2e""" +374 63 loss """nssa""" +374 63 regularizer """no""" +374 63 optimizer """adadelta""" +374 63 training_loop """owa""" +374 63 negative_sampler """basic""" +374 63 evaluator """rankbased""" +374 64 dataset """kinships""" +374 64 model """kg2e""" +374 64 loss """nssa""" +374 64 regularizer """no""" +374 64 optimizer """adadelta""" +374 64 training_loop """owa""" +374 64 negative_sampler """basic""" +374 64 evaluator """rankbased""" +374 65 dataset """kinships""" +374 65 model """kg2e""" +374 65 loss """nssa""" +374 65 regularizer """no""" +374 65 optimizer """adadelta""" +374 65 training_loop """owa""" +374 65 negative_sampler """basic""" +374 65 evaluator """rankbased""" +374 66 dataset """kinships""" +374 66 model """kg2e""" +374 66 loss """nssa""" +374 66 regularizer """no""" +374 66 optimizer """adadelta""" +374 66 training_loop """owa""" +374 66 negative_sampler """basic""" +374 66 evaluator """rankbased""" +374 67 dataset """kinships""" +374 67 model """kg2e""" +374 67 loss """nssa""" +374 67 regularizer """no""" +374 67 optimizer """adadelta""" +374 67 training_loop """owa""" +374 67 negative_sampler """basic""" +374 67 evaluator """rankbased""" +374 68 dataset """kinships""" +374 68 model """kg2e""" +374 68 loss """nssa""" +374 68 regularizer """no""" +374 68 optimizer """adadelta""" +374 68 training_loop """owa""" +374 68 negative_sampler """basic""" +374 68 evaluator """rankbased""" +374 69 dataset """kinships""" +374 69 model """kg2e""" +374 69 loss """nssa""" +374 69 regularizer """no""" +374 69 optimizer """adadelta""" +374 69 training_loop """owa""" +374 69 negative_sampler """basic""" +374 69 evaluator """rankbased""" +374 70 dataset """kinships""" +374 70 model """kg2e""" +374 70 loss """nssa""" +374 70 regularizer """no""" +374 70 optimizer """adadelta""" +374 70 training_loop """owa""" +374 70 negative_sampler """basic""" +374 70 evaluator """rankbased""" +374 71 dataset """kinships""" +374 71 model """kg2e""" +374 71 loss """nssa""" +374 71 regularizer """no""" +374 71 optimizer """adadelta""" +374 71 training_loop """owa""" +374 71 negative_sampler """basic""" +374 71 evaluator """rankbased""" +374 72 dataset """kinships""" +374 72 model """kg2e""" +374 72 loss """nssa""" +374 72 regularizer """no""" +374 72 optimizer """adadelta""" +374 72 training_loop """owa""" +374 72 negative_sampler """basic""" +374 72 evaluator """rankbased""" +374 73 dataset """kinships""" +374 73 model """kg2e""" +374 73 loss """nssa""" +374 73 regularizer """no""" +374 73 optimizer """adadelta""" +374 73 training_loop """owa""" +374 73 negative_sampler """basic""" +374 73 evaluator """rankbased""" +374 74 dataset """kinships""" +374 74 model """kg2e""" +374 74 loss """nssa""" +374 74 regularizer """no""" +374 74 optimizer """adadelta""" +374 74 training_loop """owa""" +374 74 negative_sampler """basic""" +374 74 evaluator """rankbased""" +374 75 dataset """kinships""" +374 75 model """kg2e""" +374 75 loss """nssa""" +374 75 regularizer """no""" +374 75 optimizer """adadelta""" +374 75 training_loop """owa""" +374 75 negative_sampler """basic""" +374 75 evaluator """rankbased""" +374 76 dataset """kinships""" +374 76 model """kg2e""" +374 76 loss """nssa""" +374 76 regularizer """no""" +374 76 optimizer """adadelta""" +374 76 training_loop """owa""" +374 76 negative_sampler """basic""" +374 76 evaluator """rankbased""" +374 77 dataset """kinships""" +374 77 model """kg2e""" +374 77 loss """nssa""" +374 77 regularizer """no""" +374 77 optimizer """adadelta""" +374 77 training_loop """owa""" +374 77 negative_sampler """basic""" +374 77 evaluator """rankbased""" +374 78 dataset """kinships""" +374 78 model """kg2e""" +374 78 loss """nssa""" +374 78 regularizer """no""" +374 78 optimizer """adadelta""" +374 78 training_loop """owa""" +374 78 negative_sampler """basic""" +374 78 evaluator """rankbased""" +374 79 dataset """kinships""" +374 79 model """kg2e""" +374 79 loss """nssa""" +374 79 regularizer """no""" +374 79 optimizer """adadelta""" +374 79 training_loop """owa""" +374 79 negative_sampler """basic""" +374 79 evaluator """rankbased""" +374 80 dataset """kinships""" +374 80 model """kg2e""" +374 80 loss """nssa""" +374 80 regularizer """no""" +374 80 optimizer """adadelta""" +374 80 training_loop """owa""" +374 80 negative_sampler """basic""" +374 80 evaluator """rankbased""" +374 81 dataset """kinships""" +374 81 model """kg2e""" +374 81 loss """nssa""" +374 81 regularizer """no""" +374 81 optimizer """adadelta""" +374 81 training_loop """owa""" +374 81 negative_sampler """basic""" +374 81 evaluator """rankbased""" +374 82 dataset """kinships""" +374 82 model """kg2e""" +374 82 loss """nssa""" +374 82 regularizer """no""" +374 82 optimizer """adadelta""" +374 82 training_loop """owa""" +374 82 negative_sampler """basic""" +374 82 evaluator """rankbased""" +374 83 dataset """kinships""" +374 83 model """kg2e""" +374 83 loss """nssa""" +374 83 regularizer """no""" +374 83 optimizer """adadelta""" +374 83 training_loop """owa""" +374 83 negative_sampler """basic""" +374 83 evaluator """rankbased""" +374 84 dataset """kinships""" +374 84 model """kg2e""" +374 84 loss """nssa""" +374 84 regularizer """no""" +374 84 optimizer """adadelta""" +374 84 training_loop """owa""" +374 84 negative_sampler """basic""" +374 84 evaluator """rankbased""" +374 85 dataset """kinships""" +374 85 model """kg2e""" +374 85 loss """nssa""" +374 85 regularizer """no""" +374 85 optimizer """adadelta""" +374 85 training_loop """owa""" +374 85 negative_sampler """basic""" +374 85 evaluator """rankbased""" +374 86 dataset """kinships""" +374 86 model """kg2e""" +374 86 loss """nssa""" +374 86 regularizer """no""" +374 86 optimizer """adadelta""" +374 86 training_loop """owa""" +374 86 negative_sampler """basic""" +374 86 evaluator """rankbased""" +374 87 dataset """kinships""" +374 87 model """kg2e""" +374 87 loss """nssa""" +374 87 regularizer """no""" +374 87 optimizer """adadelta""" +374 87 training_loop """owa""" +374 87 negative_sampler """basic""" +374 87 evaluator """rankbased""" +374 88 dataset """kinships""" +374 88 model """kg2e""" +374 88 loss """nssa""" +374 88 regularizer """no""" +374 88 optimizer """adadelta""" +374 88 training_loop """owa""" +374 88 negative_sampler """basic""" +374 88 evaluator """rankbased""" +374 89 dataset """kinships""" +374 89 model """kg2e""" +374 89 loss """nssa""" +374 89 regularizer """no""" +374 89 optimizer """adadelta""" +374 89 training_loop """owa""" +374 89 negative_sampler """basic""" +374 89 evaluator """rankbased""" +374 90 dataset """kinships""" +374 90 model """kg2e""" +374 90 loss """nssa""" +374 90 regularizer """no""" +374 90 optimizer """adadelta""" +374 90 training_loop """owa""" +374 90 negative_sampler """basic""" +374 90 evaluator """rankbased""" +374 91 dataset """kinships""" +374 91 model """kg2e""" +374 91 loss """nssa""" +374 91 regularizer """no""" +374 91 optimizer """adadelta""" +374 91 training_loop """owa""" +374 91 negative_sampler """basic""" +374 91 evaluator """rankbased""" +374 92 dataset """kinships""" +374 92 model """kg2e""" +374 92 loss """nssa""" +374 92 regularizer """no""" +374 92 optimizer """adadelta""" +374 92 training_loop """owa""" +374 92 negative_sampler """basic""" +374 92 evaluator """rankbased""" +374 93 dataset """kinships""" +374 93 model """kg2e""" +374 93 loss """nssa""" +374 93 regularizer """no""" +374 93 optimizer """adadelta""" +374 93 training_loop """owa""" +374 93 negative_sampler """basic""" +374 93 evaluator """rankbased""" +374 94 dataset """kinships""" +374 94 model """kg2e""" +374 94 loss """nssa""" +374 94 regularizer """no""" +374 94 optimizer """adadelta""" +374 94 training_loop """owa""" +374 94 negative_sampler """basic""" +374 94 evaluator """rankbased""" +374 95 dataset """kinships""" +374 95 model """kg2e""" +374 95 loss """nssa""" +374 95 regularizer """no""" +374 95 optimizer """adadelta""" +374 95 training_loop """owa""" +374 95 negative_sampler """basic""" +374 95 evaluator """rankbased""" +374 96 dataset """kinships""" +374 96 model """kg2e""" +374 96 loss """nssa""" +374 96 regularizer """no""" +374 96 optimizer """adadelta""" +374 96 training_loop """owa""" +374 96 negative_sampler """basic""" +374 96 evaluator """rankbased""" +374 97 dataset """kinships""" +374 97 model """kg2e""" +374 97 loss """nssa""" +374 97 regularizer """no""" +374 97 optimizer """adadelta""" +374 97 training_loop """owa""" +374 97 negative_sampler """basic""" +374 97 evaluator """rankbased""" +374 98 dataset """kinships""" +374 98 model """kg2e""" +374 98 loss """nssa""" +374 98 regularizer """no""" +374 98 optimizer """adadelta""" +374 98 training_loop """owa""" +374 98 negative_sampler """basic""" +374 98 evaluator """rankbased""" +374 99 dataset """kinships""" +374 99 model """kg2e""" +374 99 loss """nssa""" +374 99 regularizer """no""" +374 99 optimizer """adadelta""" +374 99 training_loop """owa""" +374 99 negative_sampler """basic""" +374 99 evaluator """rankbased""" +374 100 dataset """kinships""" +374 100 model """kg2e""" +374 100 loss """nssa""" +374 100 regularizer """no""" +374 100 optimizer """adadelta""" +374 100 training_loop """owa""" +374 100 negative_sampler """basic""" +374 100 evaluator """rankbased""" +375 1 model.embedding_dim 1.0 +375 1 model.c_min 0.03706564102353102 +375 1 model.c_max 1.71388589991622 +375 1 optimizer.lr 0.0626892838483624 +375 1 training.batch_size 2.0 +375 1 training.label_smoothing 0.006157479089411243 +375 2 model.embedding_dim 2.0 +375 2 model.c_min 0.08585646985855896 +375 2 model.c_max 1.3501335437720665 +375 2 optimizer.lr 0.0012726867621921459 +375 2 training.batch_size 2.0 +375 2 training.label_smoothing 0.41327355196334875 +375 3 model.embedding_dim 0.0 +375 3 model.c_min 0.06773906358151482 +375 3 model.c_max 6.61362433516436 +375 3 optimizer.lr 0.00709597713996951 +375 3 training.batch_size 2.0 +375 3 training.label_smoothing 0.009321231244204097 +375 4 model.embedding_dim 2.0 +375 4 model.c_min 0.018387236830737053 +375 4 model.c_max 2.65832756750418 +375 4 optimizer.lr 0.015055530411715213 +375 4 training.batch_size 0.0 +375 4 training.label_smoothing 0.005069786047943044 +375 5 model.embedding_dim 0.0 +375 5 model.c_min 0.06346993508252935 +375 5 model.c_max 6.593359222475202 +375 5 optimizer.lr 0.019359668964869747 +375 5 training.batch_size 0.0 +375 5 training.label_smoothing 0.015240312539660591 +375 6 model.embedding_dim 2.0 +375 6 model.c_min 0.06955490896636675 +375 6 model.c_max 5.155891599542553 +375 6 optimizer.lr 0.0032472382981974975 +375 6 training.batch_size 2.0 +375 6 training.label_smoothing 0.014238426983778547 +375 7 model.embedding_dim 2.0 +375 7 model.c_min 0.07501675145499456 +375 7 model.c_max 5.723810887245166 +375 7 optimizer.lr 0.0016030506619495158 +375 7 training.batch_size 1.0 +375 7 training.label_smoothing 0.002958494629553278 +375 8 model.embedding_dim 0.0 +375 8 model.c_min 0.05005253202534818 +375 8 model.c_max 5.45085828564025 +375 8 optimizer.lr 0.008649807883903586 +375 8 training.batch_size 1.0 +375 8 training.label_smoothing 0.0070352522124517685 +375 9 model.embedding_dim 1.0 +375 9 model.c_min 0.016453834767011538 +375 9 model.c_max 9.283070761985693 +375 9 optimizer.lr 0.06276514975715114 +375 9 training.batch_size 1.0 +375 9 training.label_smoothing 0.0036078981436187918 +375 10 model.embedding_dim 1.0 +375 10 model.c_min 0.07028925850120935 +375 10 model.c_max 5.102631610414788 +375 10 optimizer.lr 0.09219775041607889 +375 10 training.batch_size 2.0 +375 10 training.label_smoothing 0.1312275043265895 +375 11 model.embedding_dim 0.0 +375 11 model.c_min 0.025605549649634893 +375 11 model.c_max 6.262003890127126 +375 11 optimizer.lr 0.008464662462867874 +375 11 training.batch_size 0.0 +375 11 training.label_smoothing 0.4879066595231394 +375 12 model.embedding_dim 0.0 +375 12 model.c_min 0.04149407112306543 +375 12 model.c_max 8.905718012779456 +375 12 optimizer.lr 0.005520452333359794 +375 12 training.batch_size 2.0 +375 12 training.label_smoothing 0.004268095893300119 +375 13 model.embedding_dim 2.0 +375 13 model.c_min 0.014465763621163419 +375 13 model.c_max 1.7757196985923662 +375 13 optimizer.lr 0.0049226597438714005 +375 13 training.batch_size 2.0 +375 13 training.label_smoothing 0.0033133349965769514 +375 14 model.embedding_dim 2.0 +375 14 model.c_min 0.07395275568084984 +375 14 model.c_max 4.809257649288787 +375 14 optimizer.lr 0.046865320977950986 +375 14 training.batch_size 1.0 +375 14 training.label_smoothing 0.021975509146108695 +375 15 model.embedding_dim 1.0 +375 15 model.c_min 0.038817598407403404 +375 15 model.c_max 6.798969175513755 +375 15 optimizer.lr 0.014127305490156488 +375 15 training.batch_size 1.0 +375 15 training.label_smoothing 0.21752601704951868 +375 16 model.embedding_dim 2.0 +375 16 model.c_min 0.011228649023585157 +375 16 model.c_max 8.462373426557361 +375 16 optimizer.lr 0.0010329492539403141 +375 16 training.batch_size 1.0 +375 16 training.label_smoothing 0.0543286919358379 +375 17 model.embedding_dim 2.0 +375 17 model.c_min 0.06525928828896184 +375 17 model.c_max 8.570609503391573 +375 17 optimizer.lr 0.008592154852386801 +375 17 training.batch_size 0.0 +375 17 training.label_smoothing 0.012926480070577255 +375 18 model.embedding_dim 0.0 +375 18 model.c_min 0.028551567423265554 +375 18 model.c_max 9.734758953082743 +375 18 optimizer.lr 0.01027582549471797 +375 18 training.batch_size 2.0 +375 18 training.label_smoothing 0.12864966889128085 +375 19 model.embedding_dim 1.0 +375 19 model.c_min 0.040733755483087296 +375 19 model.c_max 8.20083980216847 +375 19 optimizer.lr 0.04350051141364523 +375 19 training.batch_size 2.0 +375 19 training.label_smoothing 0.012315206531715943 +375 20 model.embedding_dim 1.0 +375 20 model.c_min 0.010395631288193439 +375 20 model.c_max 2.8636578654696008 +375 20 optimizer.lr 0.00363725909839362 +375 20 training.batch_size 2.0 +375 20 training.label_smoothing 0.003692641275235835 +375 21 model.embedding_dim 1.0 +375 21 model.c_min 0.01724219488802739 +375 21 model.c_max 3.730912106483407 +375 21 optimizer.lr 0.010966420345514998 +375 21 training.batch_size 2.0 +375 21 training.label_smoothing 0.015312007730672983 +375 22 model.embedding_dim 2.0 +375 22 model.c_min 0.011494358594016981 +375 22 model.c_max 2.5158087619668943 +375 22 optimizer.lr 0.00480827301230598 +375 22 training.batch_size 1.0 +375 22 training.label_smoothing 0.18701954532386003 +375 23 model.embedding_dim 2.0 +375 23 model.c_min 0.04199174988267502 +375 23 model.c_max 3.8920035582974917 +375 23 optimizer.lr 0.010698475645917563 +375 23 training.batch_size 2.0 +375 23 training.label_smoothing 0.8058015978376136 +375 24 model.embedding_dim 0.0 +375 24 model.c_min 0.052151790791756164 +375 24 model.c_max 6.855963221314448 +375 24 optimizer.lr 0.005931069718879009 +375 24 training.batch_size 0.0 +375 24 training.label_smoothing 0.012442487760376411 +375 25 model.embedding_dim 0.0 +375 25 model.c_min 0.044896207192946055 +375 25 model.c_max 7.685702275673395 +375 25 optimizer.lr 0.0048913239665701715 +375 25 training.batch_size 2.0 +375 25 training.label_smoothing 0.105282498504856 +375 26 model.embedding_dim 0.0 +375 26 model.c_min 0.013568846134402828 +375 26 model.c_max 5.498191889538463 +375 26 optimizer.lr 0.0035650112819385553 +375 26 training.batch_size 0.0 +375 26 training.label_smoothing 0.2562069183347519 +375 27 model.embedding_dim 2.0 +375 27 model.c_min 0.02753952542938474 +375 27 model.c_max 6.9868898361148055 +375 27 optimizer.lr 0.011350776970457818 +375 27 training.batch_size 2.0 +375 27 training.label_smoothing 0.07658192245969399 +375 28 model.embedding_dim 0.0 +375 28 model.c_min 0.0686141543257124 +375 28 model.c_max 4.646255809902285 +375 28 optimizer.lr 0.019374001921973624 +375 28 training.batch_size 2.0 +375 28 training.label_smoothing 0.06631721192367075 +375 29 model.embedding_dim 2.0 +375 29 model.c_min 0.0442872844511489 +375 29 model.c_max 8.530739966059365 +375 29 optimizer.lr 0.06576940814652173 +375 29 training.batch_size 1.0 +375 29 training.label_smoothing 0.0031507533858029205 +375 30 model.embedding_dim 0.0 +375 30 model.c_min 0.044396159001937455 +375 30 model.c_max 2.6503676704472587 +375 30 optimizer.lr 0.05798434289617501 +375 30 training.batch_size 2.0 +375 30 training.label_smoothing 0.0026644129110569466 +375 31 model.embedding_dim 1.0 +375 31 model.c_min 0.01630588901961611 +375 31 model.c_max 4.272447076090407 +375 31 optimizer.lr 0.03124670226563448 +375 31 training.batch_size 2.0 +375 31 training.label_smoothing 0.10847130044906711 +375 32 model.embedding_dim 2.0 +375 32 model.c_min 0.017647083696008568 +375 32 model.c_max 5.388507638463301 +375 32 optimizer.lr 0.004819332497606329 +375 32 training.batch_size 1.0 +375 32 training.label_smoothing 0.0037271680658185233 +375 33 model.embedding_dim 0.0 +375 33 model.c_min 0.011254164481696045 +375 33 model.c_max 5.35673392018026 +375 33 optimizer.lr 0.0018266752714415052 +375 33 training.batch_size 0.0 +375 33 training.label_smoothing 0.002724068881394043 +375 34 model.embedding_dim 2.0 +375 34 model.c_min 0.013248120899664678 +375 34 model.c_max 6.073062166274784 +375 34 optimizer.lr 0.0016301877758999144 +375 34 training.batch_size 0.0 +375 34 training.label_smoothing 0.5135401022759212 +375 35 model.embedding_dim 1.0 +375 35 model.c_min 0.06828065052089942 +375 35 model.c_max 4.591920717019526 +375 35 optimizer.lr 0.008599305636712858 +375 35 training.batch_size 2.0 +375 35 training.label_smoothing 0.0010490402940392083 +375 36 model.embedding_dim 2.0 +375 36 model.c_min 0.05441076844781556 +375 36 model.c_max 7.2283015184153605 +375 36 optimizer.lr 0.058958190276978344 +375 36 training.batch_size 1.0 +375 36 training.label_smoothing 0.6364108152859892 +375 37 model.embedding_dim 2.0 +375 37 model.c_min 0.09630317695154615 +375 37 model.c_max 3.8342311897088717 +375 37 optimizer.lr 0.0028204688469366085 +375 37 training.batch_size 2.0 +375 37 training.label_smoothing 0.00942337488527229 +375 38 model.embedding_dim 1.0 +375 38 model.c_min 0.023258501051380515 +375 38 model.c_max 5.839329595702071 +375 38 optimizer.lr 0.011547733433264247 +375 38 training.batch_size 2.0 +375 38 training.label_smoothing 0.018357713975159548 +375 39 model.embedding_dim 0.0 +375 39 model.c_min 0.016153912876033714 +375 39 model.c_max 2.039304790926044 +375 39 optimizer.lr 0.07159966027838241 +375 39 training.batch_size 2.0 +375 39 training.label_smoothing 0.13081882798491612 +375 40 model.embedding_dim 1.0 +375 40 model.c_min 0.09387503229312509 +375 40 model.c_max 2.1704592590702987 +375 40 optimizer.lr 0.001129183232400583 +375 40 training.batch_size 1.0 +375 40 training.label_smoothing 0.007901192791273519 +375 41 model.embedding_dim 1.0 +375 41 model.c_min 0.09767113899226457 +375 41 model.c_max 5.034939103958113 +375 41 optimizer.lr 0.0016457038350525708 +375 41 training.batch_size 2.0 +375 41 training.label_smoothing 0.018600836023582736 +375 42 model.embedding_dim 0.0 +375 42 model.c_min 0.059839125307113605 +375 42 model.c_max 1.2450826051801513 +375 42 optimizer.lr 0.027722407324136444 +375 42 training.batch_size 2.0 +375 42 training.label_smoothing 0.023005807744358453 +375 43 model.embedding_dim 2.0 +375 43 model.c_min 0.010166547979889915 +375 43 model.c_max 5.981064750070158 +375 43 optimizer.lr 0.0020395585902205455 +375 43 training.batch_size 0.0 +375 43 training.label_smoothing 0.8905289511191254 +375 44 model.embedding_dim 1.0 +375 44 model.c_min 0.019739553977433853 +375 44 model.c_max 8.91606153688862 +375 44 optimizer.lr 0.05394879077606556 +375 44 training.batch_size 0.0 +375 44 training.label_smoothing 0.006187810252345695 +375 45 model.embedding_dim 0.0 +375 45 model.c_min 0.031118332968620425 +375 45 model.c_max 1.3938146314870858 +375 45 optimizer.lr 0.006041825798640354 +375 45 training.batch_size 1.0 +375 45 training.label_smoothing 0.041015406169571424 +375 46 model.embedding_dim 2.0 +375 46 model.c_min 0.07545459719313338 +375 46 model.c_max 2.1506837395169733 +375 46 optimizer.lr 0.01288640704738051 +375 46 training.batch_size 1.0 +375 46 training.label_smoothing 0.010097918205272933 +375 47 model.embedding_dim 2.0 +375 47 model.c_min 0.07867085321707197 +375 47 model.c_max 6.968356562573149 +375 47 optimizer.lr 0.011341408209823621 +375 47 training.batch_size 1.0 +375 47 training.label_smoothing 0.14847310185804669 +375 48 model.embedding_dim 1.0 +375 48 model.c_min 0.0796141673668493 +375 48 model.c_max 3.6318559797337957 +375 48 optimizer.lr 0.008589016304765815 +375 48 training.batch_size 1.0 +375 48 training.label_smoothing 0.3729952227275969 +375 49 model.embedding_dim 2.0 +375 49 model.c_min 0.042288810277649434 +375 49 model.c_max 1.1931951518096464 +375 49 optimizer.lr 0.005954386966309028 +375 49 training.batch_size 0.0 +375 49 training.label_smoothing 0.0015774013681924398 +375 50 model.embedding_dim 1.0 +375 50 model.c_min 0.020748233798066933 +375 50 model.c_max 8.522105737395727 +375 50 optimizer.lr 0.003928413961955403 +375 50 training.batch_size 2.0 +375 50 training.label_smoothing 0.08765904476486769 +375 51 model.embedding_dim 2.0 +375 51 model.c_min 0.017545974569722903 +375 51 model.c_max 5.267978319714314 +375 51 optimizer.lr 0.03374591150378962 +375 51 training.batch_size 2.0 +375 51 training.label_smoothing 0.002341977133691444 +375 52 model.embedding_dim 1.0 +375 52 model.c_min 0.08478557173084907 +375 52 model.c_max 4.46834309516481 +375 52 optimizer.lr 0.012388952084874238 +375 52 training.batch_size 2.0 +375 52 training.label_smoothing 0.9353556104902191 +375 53 model.embedding_dim 2.0 +375 53 model.c_min 0.01099455450090382 +375 53 model.c_max 1.774154084642418 +375 53 optimizer.lr 0.04825240934887244 +375 53 training.batch_size 1.0 +375 53 training.label_smoothing 0.1313796743838993 +375 54 model.embedding_dim 0.0 +375 54 model.c_min 0.039193348646146695 +375 54 model.c_max 2.321963607937995 +375 54 optimizer.lr 0.026806738170493966 +375 54 training.batch_size 0.0 +375 54 training.label_smoothing 0.14415467026090908 +375 55 model.embedding_dim 1.0 +375 55 model.c_min 0.04554782649879282 +375 55 model.c_max 3.974320602214922 +375 55 optimizer.lr 0.0011521615729872367 +375 55 training.batch_size 2.0 +375 55 training.label_smoothing 0.06991657405908296 +375 56 model.embedding_dim 0.0 +375 56 model.c_min 0.024432357512077042 +375 56 model.c_max 8.959463307162752 +375 56 optimizer.lr 0.002452748328744458 +375 56 training.batch_size 1.0 +375 56 training.label_smoothing 0.44574251754443517 +375 57 model.embedding_dim 0.0 +375 57 model.c_min 0.021697049120244602 +375 57 model.c_max 4.844074820175679 +375 57 optimizer.lr 0.01405058051920725 +375 57 training.batch_size 1.0 +375 57 training.label_smoothing 0.0018284956445650334 +375 58 model.embedding_dim 1.0 +375 58 model.c_min 0.06243063584892083 +375 58 model.c_max 4.699967374565292 +375 58 optimizer.lr 0.027167708694420856 +375 58 training.batch_size 1.0 +375 58 training.label_smoothing 0.9110665126276927 +375 59 model.embedding_dim 2.0 +375 59 model.c_min 0.03935792173332665 +375 59 model.c_max 7.246633466028849 +375 59 optimizer.lr 0.029349090249527247 +375 59 training.batch_size 0.0 +375 59 training.label_smoothing 0.008214141274367327 +375 60 model.embedding_dim 2.0 +375 60 model.c_min 0.01107948873267896 +375 60 model.c_max 2.285650107276332 +375 60 optimizer.lr 0.0015959829293446743 +375 60 training.batch_size 1.0 +375 60 training.label_smoothing 0.8226811353782655 +375 61 model.embedding_dim 2.0 +375 61 model.c_min 0.01362681004470379 +375 61 model.c_max 8.45674874132149 +375 61 optimizer.lr 0.041827903078187095 +375 61 training.batch_size 1.0 +375 61 training.label_smoothing 0.5802311516538218 +375 62 model.embedding_dim 2.0 +375 62 model.c_min 0.05393132577802187 +375 62 model.c_max 5.689896898612338 +375 62 optimizer.lr 0.050238483174460065 +375 62 training.batch_size 0.0 +375 62 training.label_smoothing 0.013515824220405618 +375 63 model.embedding_dim 2.0 +375 63 model.c_min 0.02492836866382998 +375 63 model.c_max 7.431344476425461 +375 63 optimizer.lr 0.04502000912853225 +375 63 training.batch_size 0.0 +375 63 training.label_smoothing 0.01204001152638213 +375 64 model.embedding_dim 1.0 +375 64 model.c_min 0.07113164584309141 +375 64 model.c_max 6.063499730428491 +375 64 optimizer.lr 0.001887801026095789 +375 64 training.batch_size 2.0 +375 64 training.label_smoothing 0.037368155076685124 +375 65 model.embedding_dim 1.0 +375 65 model.c_min 0.02257031782989676 +375 65 model.c_max 2.8247210171579726 +375 65 optimizer.lr 0.011144235330684663 +375 65 training.batch_size 2.0 +375 65 training.label_smoothing 0.002600642978938515 +375 66 model.embedding_dim 1.0 +375 66 model.c_min 0.022311866810229442 +375 66 model.c_max 7.395484343585479 +375 66 optimizer.lr 0.009151963252994527 +375 66 training.batch_size 2.0 +375 66 training.label_smoothing 0.04520619613423347 +375 67 model.embedding_dim 0.0 +375 67 model.c_min 0.02857398780726809 +375 67 model.c_max 6.25655439176122 +375 67 optimizer.lr 0.0016680485286568196 +375 67 training.batch_size 0.0 +375 67 training.label_smoothing 0.0031673852171522905 +375 68 model.embedding_dim 0.0 +375 68 model.c_min 0.03941288331148991 +375 68 model.c_max 5.19871508924718 +375 68 optimizer.lr 0.0015164806570301325 +375 68 training.batch_size 1.0 +375 68 training.label_smoothing 0.4481043420151877 +375 69 model.embedding_dim 2.0 +375 69 model.c_min 0.0918598581454824 +375 69 model.c_max 4.099313075926596 +375 69 optimizer.lr 0.006478902853829026 +375 69 training.batch_size 1.0 +375 69 training.label_smoothing 0.1848866527976867 +375 70 model.embedding_dim 0.0 +375 70 model.c_min 0.05942620038962063 +375 70 model.c_max 6.173028967928687 +375 70 optimizer.lr 0.0027964367282071797 +375 70 training.batch_size 1.0 +375 70 training.label_smoothing 0.720351138985215 +375 71 model.embedding_dim 1.0 +375 71 model.c_min 0.09099310933249263 +375 71 model.c_max 9.930774765162043 +375 71 optimizer.lr 0.011567054853299914 +375 71 training.batch_size 2.0 +375 71 training.label_smoothing 0.1184337988015512 +375 72 model.embedding_dim 2.0 +375 72 model.c_min 0.06855465230134165 +375 72 model.c_max 9.182621263304323 +375 72 optimizer.lr 0.0016794025900762587 +375 72 training.batch_size 2.0 +375 72 training.label_smoothing 0.0026317070604195973 +375 73 model.embedding_dim 2.0 +375 73 model.c_min 0.037654678924435935 +375 73 model.c_max 2.6655867495285883 +375 73 optimizer.lr 0.0050362196363686555 +375 73 training.batch_size 1.0 +375 73 training.label_smoothing 0.1563882070878142 +375 74 model.embedding_dim 2.0 +375 74 model.c_min 0.03615858336463223 +375 74 model.c_max 5.511757496074894 +375 74 optimizer.lr 0.003269362649845378 +375 74 training.batch_size 1.0 +375 74 training.label_smoothing 0.8114605790515526 +375 75 model.embedding_dim 0.0 +375 75 model.c_min 0.024931176613149624 +375 75 model.c_max 8.869528403771756 +375 75 optimizer.lr 0.004265186371339358 +375 75 training.batch_size 1.0 +375 75 training.label_smoothing 0.8855671693550599 +375 76 model.embedding_dim 1.0 +375 76 model.c_min 0.024482167040464232 +375 76 model.c_max 2.4222584976405153 +375 76 optimizer.lr 0.05314700199514847 +375 76 training.batch_size 0.0 +375 76 training.label_smoothing 0.07847612287245455 +375 77 model.embedding_dim 2.0 +375 77 model.c_min 0.013817148938452341 +375 77 model.c_max 7.800228985868347 +375 77 optimizer.lr 0.0019904348881808077 +375 77 training.batch_size 1.0 +375 77 training.label_smoothing 0.31779851146223564 +375 78 model.embedding_dim 0.0 +375 78 model.c_min 0.08502416246748647 +375 78 model.c_max 5.7071275437892774 +375 78 optimizer.lr 0.09303677115489087 +375 78 training.batch_size 0.0 +375 78 training.label_smoothing 0.0050379254649547565 +375 79 model.embedding_dim 1.0 +375 79 model.c_min 0.012880798277630397 +375 79 model.c_max 4.6997942062715925 +375 79 optimizer.lr 0.0025615298747658534 +375 79 training.batch_size 2.0 +375 79 training.label_smoothing 0.010008376954584284 +375 80 model.embedding_dim 1.0 +375 80 model.c_min 0.012917162190251807 +375 80 model.c_max 7.555419385150289 +375 80 optimizer.lr 0.00162799011874196 +375 80 training.batch_size 1.0 +375 80 training.label_smoothing 0.0024725048685567677 +375 81 model.embedding_dim 2.0 +375 81 model.c_min 0.06494825162940586 +375 81 model.c_max 3.6688030738636823 +375 81 optimizer.lr 0.016066799834285845 +375 81 training.batch_size 0.0 +375 81 training.label_smoothing 0.0047171233563494544 +375 82 model.embedding_dim 2.0 +375 82 model.c_min 0.049172441267250275 +375 82 model.c_max 4.688871517119482 +375 82 optimizer.lr 0.005612202811900844 +375 82 training.batch_size 1.0 +375 82 training.label_smoothing 0.07755547769092168 +375 83 model.embedding_dim 2.0 +375 83 model.c_min 0.04591558016017376 +375 83 model.c_max 7.71099762395488 +375 83 optimizer.lr 0.023962038395649213 +375 83 training.batch_size 0.0 +375 83 training.label_smoothing 0.07482966842046715 +375 84 model.embedding_dim 2.0 +375 84 model.c_min 0.03780346838468574 +375 84 model.c_max 5.5037217942464345 +375 84 optimizer.lr 0.004894587533534002 +375 84 training.batch_size 0.0 +375 84 training.label_smoothing 0.01833348453215028 +375 85 model.embedding_dim 1.0 +375 85 model.c_min 0.018452048057882572 +375 85 model.c_max 1.052203890964524 +375 85 optimizer.lr 0.008587738891551613 +375 85 training.batch_size 1.0 +375 85 training.label_smoothing 0.016739728200305885 +375 86 model.embedding_dim 1.0 +375 86 model.c_min 0.020776798568761634 +375 86 model.c_max 9.525504368209585 +375 86 optimizer.lr 0.005570351307086707 +375 86 training.batch_size 0.0 +375 86 training.label_smoothing 0.20472897275513424 +375 87 model.embedding_dim 1.0 +375 87 model.c_min 0.08688788462507034 +375 87 model.c_max 7.016375433985958 +375 87 optimizer.lr 0.03440034718569014 +375 87 training.batch_size 1.0 +375 87 training.label_smoothing 0.023397844402141883 +375 88 model.embedding_dim 2.0 +375 88 model.c_min 0.01851413402334359 +375 88 model.c_max 9.792310215777675 +375 88 optimizer.lr 0.009024548120146874 +375 88 training.batch_size 0.0 +375 88 training.label_smoothing 0.0012672103880044475 +375 89 model.embedding_dim 2.0 +375 89 model.c_min 0.019505398207222083 +375 89 model.c_max 5.487137523415676 +375 89 optimizer.lr 0.0010757178340140897 +375 89 training.batch_size 1.0 +375 89 training.label_smoothing 0.5108082752524703 +375 90 model.embedding_dim 2.0 +375 90 model.c_min 0.010910931643598005 +375 90 model.c_max 1.5285748795411107 +375 90 optimizer.lr 0.09343375957319115 +375 90 training.batch_size 2.0 +375 90 training.label_smoothing 0.0026312082281744805 +375 91 model.embedding_dim 2.0 +375 91 model.c_min 0.018765860332540116 +375 91 model.c_max 6.522824251505516 +375 91 optimizer.lr 0.01722495271304918 +375 91 training.batch_size 1.0 +375 91 training.label_smoothing 0.02125787934496019 +375 92 model.embedding_dim 2.0 +375 92 model.c_min 0.07138917967357064 +375 92 model.c_max 7.306912870423331 +375 92 optimizer.lr 0.004371385690160139 +375 92 training.batch_size 0.0 +375 92 training.label_smoothing 0.001524308840289611 +375 93 model.embedding_dim 2.0 +375 93 model.c_min 0.07149176661889994 +375 93 model.c_max 3.3200256316437047 +375 93 optimizer.lr 0.06968006072476199 +375 93 training.batch_size 1.0 +375 93 training.label_smoothing 0.06560865977003554 +375 94 model.embedding_dim 0.0 +375 94 model.c_min 0.03273054089479555 +375 94 model.c_max 8.170380073832309 +375 94 optimizer.lr 0.008929811552851748 +375 94 training.batch_size 2.0 +375 94 training.label_smoothing 0.01735528642311188 +375 95 model.embedding_dim 2.0 +375 95 model.c_min 0.05759156122704277 +375 95 model.c_max 4.2545330114055435 +375 95 optimizer.lr 0.0011702383363968634 +375 95 training.batch_size 2.0 +375 95 training.label_smoothing 0.014731496570936772 +375 96 model.embedding_dim 1.0 +375 96 model.c_min 0.011587954728903815 +375 96 model.c_max 7.558799857215403 +375 96 optimizer.lr 0.006160808263803667 +375 96 training.batch_size 2.0 +375 96 training.label_smoothing 0.004402726699374033 +375 97 model.embedding_dim 0.0 +375 97 model.c_min 0.03859684418309964 +375 97 model.c_max 6.965539411397254 +375 97 optimizer.lr 0.03213444264435673 +375 97 training.batch_size 1.0 +375 97 training.label_smoothing 0.14190712443650202 +375 98 model.embedding_dim 1.0 +375 98 model.c_min 0.057326511870013835 +375 98 model.c_max 3.8219628906035625 +375 98 optimizer.lr 0.003972679863215339 +375 98 training.batch_size 1.0 +375 98 training.label_smoothing 0.0014463614652477437 +375 99 model.embedding_dim 0.0 +375 99 model.c_min 0.019927551883476204 +375 99 model.c_max 2.781076151815091 +375 99 optimizer.lr 0.008348050700016494 +375 99 training.batch_size 2.0 +375 99 training.label_smoothing 0.32802163035409154 +375 100 model.embedding_dim 2.0 +375 100 model.c_min 0.028353746144740136 +375 100 model.c_max 3.3046908778023885 +375 100 optimizer.lr 0.050350540872223785 +375 100 training.batch_size 0.0 +375 100 training.label_smoothing 0.3605035915617448 +375 1 dataset """kinships""" +375 1 model """kg2e""" +375 1 loss """bceaftersigmoid""" +375 1 regularizer """no""" +375 1 optimizer """adam""" +375 1 training_loop """lcwa""" +375 1 evaluator """rankbased""" +375 2 dataset """kinships""" +375 2 model """kg2e""" +375 2 loss """bceaftersigmoid""" +375 2 regularizer """no""" +375 2 optimizer """adam""" +375 2 training_loop """lcwa""" +375 2 evaluator """rankbased""" +375 3 dataset """kinships""" +375 3 model """kg2e""" +375 3 loss """bceaftersigmoid""" +375 3 regularizer """no""" +375 3 optimizer """adam""" +375 3 training_loop """lcwa""" +375 3 evaluator """rankbased""" +375 4 dataset """kinships""" +375 4 model """kg2e""" +375 4 loss """bceaftersigmoid""" +375 4 regularizer """no""" +375 4 optimizer """adam""" +375 4 training_loop """lcwa""" +375 4 evaluator """rankbased""" +375 5 dataset """kinships""" +375 5 model """kg2e""" +375 5 loss """bceaftersigmoid""" +375 5 regularizer """no""" +375 5 optimizer """adam""" +375 5 training_loop """lcwa""" +375 5 evaluator """rankbased""" +375 6 dataset """kinships""" +375 6 model """kg2e""" +375 6 loss """bceaftersigmoid""" +375 6 regularizer """no""" +375 6 optimizer """adam""" +375 6 training_loop """lcwa""" +375 6 evaluator """rankbased""" +375 7 dataset """kinships""" +375 7 model """kg2e""" +375 7 loss """bceaftersigmoid""" +375 7 regularizer """no""" +375 7 optimizer """adam""" +375 7 training_loop """lcwa""" +375 7 evaluator """rankbased""" +375 8 dataset """kinships""" +375 8 model """kg2e""" +375 8 loss """bceaftersigmoid""" +375 8 regularizer """no""" +375 8 optimizer """adam""" +375 8 training_loop """lcwa""" +375 8 evaluator """rankbased""" +375 9 dataset """kinships""" +375 9 model """kg2e""" +375 9 loss """bceaftersigmoid""" +375 9 regularizer """no""" +375 9 optimizer """adam""" +375 9 training_loop """lcwa""" +375 9 evaluator """rankbased""" +375 10 dataset """kinships""" +375 10 model """kg2e""" +375 10 loss """bceaftersigmoid""" +375 10 regularizer """no""" +375 10 optimizer """adam""" +375 10 training_loop """lcwa""" +375 10 evaluator """rankbased""" +375 11 dataset """kinships""" +375 11 model """kg2e""" +375 11 loss """bceaftersigmoid""" +375 11 regularizer """no""" +375 11 optimizer """adam""" +375 11 training_loop """lcwa""" +375 11 evaluator """rankbased""" +375 12 dataset """kinships""" +375 12 model """kg2e""" +375 12 loss """bceaftersigmoid""" +375 12 regularizer """no""" +375 12 optimizer """adam""" +375 12 training_loop """lcwa""" +375 12 evaluator """rankbased""" +375 13 dataset """kinships""" +375 13 model """kg2e""" +375 13 loss """bceaftersigmoid""" +375 13 regularizer """no""" +375 13 optimizer """adam""" +375 13 training_loop """lcwa""" +375 13 evaluator """rankbased""" +375 14 dataset """kinships""" +375 14 model """kg2e""" +375 14 loss """bceaftersigmoid""" +375 14 regularizer """no""" +375 14 optimizer """adam""" +375 14 training_loop """lcwa""" +375 14 evaluator """rankbased""" +375 15 dataset """kinships""" +375 15 model """kg2e""" +375 15 loss """bceaftersigmoid""" +375 15 regularizer """no""" +375 15 optimizer """adam""" +375 15 training_loop """lcwa""" +375 15 evaluator """rankbased""" +375 16 dataset """kinships""" +375 16 model """kg2e""" +375 16 loss """bceaftersigmoid""" +375 16 regularizer """no""" +375 16 optimizer """adam""" +375 16 training_loop """lcwa""" +375 16 evaluator """rankbased""" +375 17 dataset """kinships""" +375 17 model """kg2e""" +375 17 loss """bceaftersigmoid""" +375 17 regularizer """no""" +375 17 optimizer """adam""" +375 17 training_loop """lcwa""" +375 17 evaluator """rankbased""" +375 18 dataset """kinships""" +375 18 model """kg2e""" +375 18 loss """bceaftersigmoid""" +375 18 regularizer """no""" +375 18 optimizer """adam""" +375 18 training_loop """lcwa""" +375 18 evaluator """rankbased""" +375 19 dataset """kinships""" +375 19 model """kg2e""" +375 19 loss """bceaftersigmoid""" +375 19 regularizer """no""" +375 19 optimizer """adam""" +375 19 training_loop """lcwa""" +375 19 evaluator """rankbased""" +375 20 dataset """kinships""" +375 20 model """kg2e""" +375 20 loss """bceaftersigmoid""" +375 20 regularizer """no""" +375 20 optimizer """adam""" +375 20 training_loop """lcwa""" +375 20 evaluator """rankbased""" +375 21 dataset """kinships""" +375 21 model """kg2e""" +375 21 loss """bceaftersigmoid""" +375 21 regularizer """no""" +375 21 optimizer """adam""" +375 21 training_loop """lcwa""" +375 21 evaluator """rankbased""" +375 22 dataset """kinships""" +375 22 model """kg2e""" +375 22 loss """bceaftersigmoid""" +375 22 regularizer """no""" +375 22 optimizer """adam""" +375 22 training_loop """lcwa""" +375 22 evaluator """rankbased""" +375 23 dataset """kinships""" +375 23 model """kg2e""" +375 23 loss """bceaftersigmoid""" +375 23 regularizer """no""" +375 23 optimizer """adam""" +375 23 training_loop """lcwa""" +375 23 evaluator """rankbased""" +375 24 dataset """kinships""" +375 24 model """kg2e""" +375 24 loss """bceaftersigmoid""" +375 24 regularizer """no""" +375 24 optimizer """adam""" +375 24 training_loop """lcwa""" +375 24 evaluator """rankbased""" +375 25 dataset """kinships""" +375 25 model """kg2e""" +375 25 loss """bceaftersigmoid""" +375 25 regularizer """no""" +375 25 optimizer """adam""" +375 25 training_loop """lcwa""" +375 25 evaluator """rankbased""" +375 26 dataset """kinships""" +375 26 model """kg2e""" +375 26 loss """bceaftersigmoid""" +375 26 regularizer """no""" +375 26 optimizer """adam""" +375 26 training_loop """lcwa""" +375 26 evaluator """rankbased""" +375 27 dataset """kinships""" +375 27 model """kg2e""" +375 27 loss """bceaftersigmoid""" +375 27 regularizer """no""" +375 27 optimizer """adam""" +375 27 training_loop """lcwa""" +375 27 evaluator """rankbased""" +375 28 dataset """kinships""" +375 28 model """kg2e""" +375 28 loss """bceaftersigmoid""" +375 28 regularizer """no""" +375 28 optimizer """adam""" +375 28 training_loop """lcwa""" +375 28 evaluator """rankbased""" +375 29 dataset """kinships""" +375 29 model """kg2e""" +375 29 loss """bceaftersigmoid""" +375 29 regularizer """no""" +375 29 optimizer """adam""" +375 29 training_loop """lcwa""" +375 29 evaluator """rankbased""" +375 30 dataset """kinships""" +375 30 model """kg2e""" +375 30 loss """bceaftersigmoid""" +375 30 regularizer """no""" +375 30 optimizer """adam""" +375 30 training_loop """lcwa""" +375 30 evaluator """rankbased""" +375 31 dataset """kinships""" +375 31 model """kg2e""" +375 31 loss """bceaftersigmoid""" +375 31 regularizer """no""" +375 31 optimizer """adam""" +375 31 training_loop """lcwa""" +375 31 evaluator """rankbased""" +375 32 dataset """kinships""" +375 32 model """kg2e""" +375 32 loss """bceaftersigmoid""" +375 32 regularizer """no""" +375 32 optimizer """adam""" +375 32 training_loop """lcwa""" +375 32 evaluator """rankbased""" +375 33 dataset """kinships""" +375 33 model """kg2e""" +375 33 loss """bceaftersigmoid""" +375 33 regularizer """no""" +375 33 optimizer """adam""" +375 33 training_loop """lcwa""" +375 33 evaluator """rankbased""" +375 34 dataset """kinships""" +375 34 model """kg2e""" +375 34 loss """bceaftersigmoid""" +375 34 regularizer """no""" +375 34 optimizer """adam""" +375 34 training_loop """lcwa""" +375 34 evaluator """rankbased""" +375 35 dataset """kinships""" +375 35 model """kg2e""" +375 35 loss """bceaftersigmoid""" +375 35 regularizer """no""" +375 35 optimizer """adam""" +375 35 training_loop """lcwa""" +375 35 evaluator """rankbased""" +375 36 dataset """kinships""" +375 36 model """kg2e""" +375 36 loss """bceaftersigmoid""" +375 36 regularizer """no""" +375 36 optimizer """adam""" +375 36 training_loop """lcwa""" +375 36 evaluator """rankbased""" +375 37 dataset """kinships""" +375 37 model """kg2e""" +375 37 loss """bceaftersigmoid""" +375 37 regularizer """no""" +375 37 optimizer """adam""" +375 37 training_loop """lcwa""" +375 37 evaluator """rankbased""" +375 38 dataset """kinships""" +375 38 model """kg2e""" +375 38 loss """bceaftersigmoid""" +375 38 regularizer """no""" +375 38 optimizer """adam""" +375 38 training_loop """lcwa""" +375 38 evaluator """rankbased""" +375 39 dataset """kinships""" +375 39 model """kg2e""" +375 39 loss """bceaftersigmoid""" +375 39 regularizer """no""" +375 39 optimizer """adam""" +375 39 training_loop """lcwa""" +375 39 evaluator """rankbased""" +375 40 dataset """kinships""" +375 40 model """kg2e""" +375 40 loss """bceaftersigmoid""" +375 40 regularizer """no""" +375 40 optimizer """adam""" +375 40 training_loop """lcwa""" +375 40 evaluator """rankbased""" +375 41 dataset """kinships""" +375 41 model """kg2e""" +375 41 loss """bceaftersigmoid""" +375 41 regularizer """no""" +375 41 optimizer """adam""" +375 41 training_loop """lcwa""" +375 41 evaluator """rankbased""" +375 42 dataset """kinships""" +375 42 model """kg2e""" +375 42 loss """bceaftersigmoid""" +375 42 regularizer """no""" +375 42 optimizer """adam""" +375 42 training_loop """lcwa""" +375 42 evaluator """rankbased""" +375 43 dataset """kinships""" +375 43 model """kg2e""" +375 43 loss """bceaftersigmoid""" +375 43 regularizer """no""" +375 43 optimizer """adam""" +375 43 training_loop """lcwa""" +375 43 evaluator """rankbased""" +375 44 dataset """kinships""" +375 44 model """kg2e""" +375 44 loss """bceaftersigmoid""" +375 44 regularizer """no""" +375 44 optimizer """adam""" +375 44 training_loop """lcwa""" +375 44 evaluator """rankbased""" +375 45 dataset """kinships""" +375 45 model """kg2e""" +375 45 loss """bceaftersigmoid""" +375 45 regularizer """no""" +375 45 optimizer """adam""" +375 45 training_loop """lcwa""" +375 45 evaluator """rankbased""" +375 46 dataset """kinships""" +375 46 model """kg2e""" +375 46 loss """bceaftersigmoid""" +375 46 regularizer """no""" +375 46 optimizer """adam""" +375 46 training_loop """lcwa""" +375 46 evaluator """rankbased""" +375 47 dataset """kinships""" +375 47 model """kg2e""" +375 47 loss """bceaftersigmoid""" +375 47 regularizer """no""" +375 47 optimizer """adam""" +375 47 training_loop """lcwa""" +375 47 evaluator """rankbased""" +375 48 dataset """kinships""" +375 48 model """kg2e""" +375 48 loss """bceaftersigmoid""" +375 48 regularizer """no""" +375 48 optimizer """adam""" +375 48 training_loop """lcwa""" +375 48 evaluator """rankbased""" +375 49 dataset """kinships""" +375 49 model """kg2e""" +375 49 loss """bceaftersigmoid""" +375 49 regularizer """no""" +375 49 optimizer """adam""" +375 49 training_loop """lcwa""" +375 49 evaluator """rankbased""" +375 50 dataset """kinships""" +375 50 model """kg2e""" +375 50 loss """bceaftersigmoid""" +375 50 regularizer """no""" +375 50 optimizer """adam""" +375 50 training_loop """lcwa""" +375 50 evaluator """rankbased""" +375 51 dataset """kinships""" +375 51 model """kg2e""" +375 51 loss """bceaftersigmoid""" +375 51 regularizer """no""" +375 51 optimizer """adam""" +375 51 training_loop """lcwa""" +375 51 evaluator """rankbased""" +375 52 dataset """kinships""" +375 52 model """kg2e""" +375 52 loss """bceaftersigmoid""" +375 52 regularizer """no""" +375 52 optimizer """adam""" +375 52 training_loop """lcwa""" +375 52 evaluator """rankbased""" +375 53 dataset """kinships""" +375 53 model """kg2e""" +375 53 loss """bceaftersigmoid""" +375 53 regularizer """no""" +375 53 optimizer """adam""" +375 53 training_loop """lcwa""" +375 53 evaluator """rankbased""" +375 54 dataset """kinships""" +375 54 model """kg2e""" +375 54 loss """bceaftersigmoid""" +375 54 regularizer """no""" +375 54 optimizer """adam""" +375 54 training_loop """lcwa""" +375 54 evaluator """rankbased""" +375 55 dataset """kinships""" +375 55 model """kg2e""" +375 55 loss """bceaftersigmoid""" +375 55 regularizer """no""" +375 55 optimizer """adam""" +375 55 training_loop """lcwa""" +375 55 evaluator """rankbased""" +375 56 dataset """kinships""" +375 56 model """kg2e""" +375 56 loss """bceaftersigmoid""" +375 56 regularizer """no""" +375 56 optimizer """adam""" +375 56 training_loop """lcwa""" +375 56 evaluator """rankbased""" +375 57 dataset """kinships""" +375 57 model """kg2e""" +375 57 loss """bceaftersigmoid""" +375 57 regularizer """no""" +375 57 optimizer """adam""" +375 57 training_loop """lcwa""" +375 57 evaluator """rankbased""" +375 58 dataset """kinships""" +375 58 model """kg2e""" +375 58 loss """bceaftersigmoid""" +375 58 regularizer """no""" +375 58 optimizer """adam""" +375 58 training_loop """lcwa""" +375 58 evaluator """rankbased""" +375 59 dataset """kinships""" +375 59 model """kg2e""" +375 59 loss """bceaftersigmoid""" +375 59 regularizer """no""" +375 59 optimizer """adam""" +375 59 training_loop """lcwa""" +375 59 evaluator """rankbased""" +375 60 dataset """kinships""" +375 60 model """kg2e""" +375 60 loss """bceaftersigmoid""" +375 60 regularizer """no""" +375 60 optimizer """adam""" +375 60 training_loop """lcwa""" +375 60 evaluator """rankbased""" +375 61 dataset """kinships""" +375 61 model """kg2e""" +375 61 loss """bceaftersigmoid""" +375 61 regularizer """no""" +375 61 optimizer """adam""" +375 61 training_loop """lcwa""" +375 61 evaluator """rankbased""" +375 62 dataset """kinships""" +375 62 model """kg2e""" +375 62 loss """bceaftersigmoid""" +375 62 regularizer """no""" +375 62 optimizer """adam""" +375 62 training_loop """lcwa""" +375 62 evaluator """rankbased""" +375 63 dataset """kinships""" +375 63 model """kg2e""" +375 63 loss """bceaftersigmoid""" +375 63 regularizer """no""" +375 63 optimizer """adam""" +375 63 training_loop """lcwa""" +375 63 evaluator """rankbased""" +375 64 dataset """kinships""" +375 64 model """kg2e""" +375 64 loss """bceaftersigmoid""" +375 64 regularizer """no""" +375 64 optimizer """adam""" +375 64 training_loop """lcwa""" +375 64 evaluator """rankbased""" +375 65 dataset """kinships""" +375 65 model """kg2e""" +375 65 loss """bceaftersigmoid""" +375 65 regularizer """no""" +375 65 optimizer """adam""" +375 65 training_loop """lcwa""" +375 65 evaluator """rankbased""" +375 66 dataset """kinships""" +375 66 model """kg2e""" +375 66 loss """bceaftersigmoid""" +375 66 regularizer """no""" +375 66 optimizer """adam""" +375 66 training_loop """lcwa""" +375 66 evaluator """rankbased""" +375 67 dataset """kinships""" +375 67 model """kg2e""" +375 67 loss """bceaftersigmoid""" +375 67 regularizer """no""" +375 67 optimizer """adam""" +375 67 training_loop """lcwa""" +375 67 evaluator """rankbased""" +375 68 dataset """kinships""" +375 68 model """kg2e""" +375 68 loss """bceaftersigmoid""" +375 68 regularizer """no""" +375 68 optimizer """adam""" +375 68 training_loop """lcwa""" +375 68 evaluator """rankbased""" +375 69 dataset """kinships""" +375 69 model """kg2e""" +375 69 loss """bceaftersigmoid""" +375 69 regularizer """no""" +375 69 optimizer """adam""" +375 69 training_loop """lcwa""" +375 69 evaluator """rankbased""" +375 70 dataset """kinships""" +375 70 model """kg2e""" +375 70 loss """bceaftersigmoid""" +375 70 regularizer """no""" +375 70 optimizer """adam""" +375 70 training_loop """lcwa""" +375 70 evaluator """rankbased""" +375 71 dataset """kinships""" +375 71 model """kg2e""" +375 71 loss """bceaftersigmoid""" +375 71 regularizer """no""" +375 71 optimizer """adam""" +375 71 training_loop """lcwa""" +375 71 evaluator """rankbased""" +375 72 dataset """kinships""" +375 72 model """kg2e""" +375 72 loss """bceaftersigmoid""" +375 72 regularizer """no""" +375 72 optimizer """adam""" +375 72 training_loop """lcwa""" +375 72 evaluator """rankbased""" +375 73 dataset """kinships""" +375 73 model """kg2e""" +375 73 loss """bceaftersigmoid""" +375 73 regularizer """no""" +375 73 optimizer """adam""" +375 73 training_loop """lcwa""" +375 73 evaluator """rankbased""" +375 74 dataset """kinships""" +375 74 model """kg2e""" +375 74 loss """bceaftersigmoid""" +375 74 regularizer """no""" +375 74 optimizer """adam""" +375 74 training_loop """lcwa""" +375 74 evaluator """rankbased""" +375 75 dataset """kinships""" +375 75 model """kg2e""" +375 75 loss """bceaftersigmoid""" +375 75 regularizer """no""" +375 75 optimizer """adam""" +375 75 training_loop """lcwa""" +375 75 evaluator """rankbased""" +375 76 dataset """kinships""" +375 76 model """kg2e""" +375 76 loss """bceaftersigmoid""" +375 76 regularizer """no""" +375 76 optimizer """adam""" +375 76 training_loop """lcwa""" +375 76 evaluator """rankbased""" +375 77 dataset """kinships""" +375 77 model """kg2e""" +375 77 loss """bceaftersigmoid""" +375 77 regularizer """no""" +375 77 optimizer """adam""" +375 77 training_loop """lcwa""" +375 77 evaluator """rankbased""" +375 78 dataset """kinships""" +375 78 model """kg2e""" +375 78 loss """bceaftersigmoid""" +375 78 regularizer """no""" +375 78 optimizer """adam""" +375 78 training_loop """lcwa""" +375 78 evaluator """rankbased""" +375 79 dataset """kinships""" +375 79 model """kg2e""" +375 79 loss """bceaftersigmoid""" +375 79 regularizer """no""" +375 79 optimizer """adam""" +375 79 training_loop """lcwa""" +375 79 evaluator """rankbased""" +375 80 dataset """kinships""" +375 80 model """kg2e""" +375 80 loss """bceaftersigmoid""" +375 80 regularizer """no""" +375 80 optimizer """adam""" +375 80 training_loop """lcwa""" +375 80 evaluator """rankbased""" +375 81 dataset """kinships""" +375 81 model """kg2e""" +375 81 loss """bceaftersigmoid""" +375 81 regularizer """no""" +375 81 optimizer """adam""" +375 81 training_loop """lcwa""" +375 81 evaluator """rankbased""" +375 82 dataset """kinships""" +375 82 model """kg2e""" +375 82 loss """bceaftersigmoid""" +375 82 regularizer """no""" +375 82 optimizer """adam""" +375 82 training_loop """lcwa""" +375 82 evaluator """rankbased""" +375 83 dataset """kinships""" +375 83 model """kg2e""" +375 83 loss """bceaftersigmoid""" +375 83 regularizer """no""" +375 83 optimizer """adam""" +375 83 training_loop """lcwa""" +375 83 evaluator """rankbased""" +375 84 dataset """kinships""" +375 84 model """kg2e""" +375 84 loss """bceaftersigmoid""" +375 84 regularizer """no""" +375 84 optimizer """adam""" +375 84 training_loop """lcwa""" +375 84 evaluator """rankbased""" +375 85 dataset """kinships""" +375 85 model """kg2e""" +375 85 loss """bceaftersigmoid""" +375 85 regularizer """no""" +375 85 optimizer """adam""" +375 85 training_loop """lcwa""" +375 85 evaluator """rankbased""" +375 86 dataset """kinships""" +375 86 model """kg2e""" +375 86 loss """bceaftersigmoid""" +375 86 regularizer """no""" +375 86 optimizer """adam""" +375 86 training_loop """lcwa""" +375 86 evaluator """rankbased""" +375 87 dataset """kinships""" +375 87 model """kg2e""" +375 87 loss """bceaftersigmoid""" +375 87 regularizer """no""" +375 87 optimizer """adam""" +375 87 training_loop """lcwa""" +375 87 evaluator """rankbased""" +375 88 dataset """kinships""" +375 88 model """kg2e""" +375 88 loss """bceaftersigmoid""" +375 88 regularizer """no""" +375 88 optimizer """adam""" +375 88 training_loop """lcwa""" +375 88 evaluator """rankbased""" +375 89 dataset """kinships""" +375 89 model """kg2e""" +375 89 loss """bceaftersigmoid""" +375 89 regularizer """no""" +375 89 optimizer """adam""" +375 89 training_loop """lcwa""" +375 89 evaluator """rankbased""" +375 90 dataset """kinships""" +375 90 model """kg2e""" +375 90 loss """bceaftersigmoid""" +375 90 regularizer """no""" +375 90 optimizer """adam""" +375 90 training_loop """lcwa""" +375 90 evaluator """rankbased""" +375 91 dataset """kinships""" +375 91 model """kg2e""" +375 91 loss """bceaftersigmoid""" +375 91 regularizer """no""" +375 91 optimizer """adam""" +375 91 training_loop """lcwa""" +375 91 evaluator """rankbased""" +375 92 dataset """kinships""" +375 92 model """kg2e""" +375 92 loss """bceaftersigmoid""" +375 92 regularizer """no""" +375 92 optimizer """adam""" +375 92 training_loop """lcwa""" +375 92 evaluator """rankbased""" +375 93 dataset """kinships""" +375 93 model """kg2e""" +375 93 loss """bceaftersigmoid""" +375 93 regularizer """no""" +375 93 optimizer """adam""" +375 93 training_loop """lcwa""" +375 93 evaluator """rankbased""" +375 94 dataset """kinships""" +375 94 model """kg2e""" +375 94 loss """bceaftersigmoid""" +375 94 regularizer """no""" +375 94 optimizer """adam""" +375 94 training_loop """lcwa""" +375 94 evaluator """rankbased""" +375 95 dataset """kinships""" +375 95 model """kg2e""" +375 95 loss """bceaftersigmoid""" +375 95 regularizer """no""" +375 95 optimizer """adam""" +375 95 training_loop """lcwa""" +375 95 evaluator """rankbased""" +375 96 dataset """kinships""" +375 96 model """kg2e""" +375 96 loss """bceaftersigmoid""" +375 96 regularizer """no""" +375 96 optimizer """adam""" +375 96 training_loop """lcwa""" +375 96 evaluator """rankbased""" +375 97 dataset """kinships""" +375 97 model """kg2e""" +375 97 loss """bceaftersigmoid""" +375 97 regularizer """no""" +375 97 optimizer """adam""" +375 97 training_loop """lcwa""" +375 97 evaluator """rankbased""" +375 98 dataset """kinships""" +375 98 model """kg2e""" +375 98 loss """bceaftersigmoid""" +375 98 regularizer """no""" +375 98 optimizer """adam""" +375 98 training_loop """lcwa""" +375 98 evaluator """rankbased""" +375 99 dataset """kinships""" +375 99 model """kg2e""" +375 99 loss """bceaftersigmoid""" +375 99 regularizer """no""" +375 99 optimizer """adam""" +375 99 training_loop """lcwa""" +375 99 evaluator """rankbased""" +375 100 dataset """kinships""" +375 100 model """kg2e""" +375 100 loss """bceaftersigmoid""" +375 100 regularizer """no""" +375 100 optimizer """adam""" +375 100 training_loop """lcwa""" +375 100 evaluator """rankbased""" +376 1 model.embedding_dim 0.0 +376 1 model.c_min 0.042859534042232084 +376 1 model.c_max 8.280008170057494 +376 1 optimizer.lr 0.0863987325906029 +376 1 training.batch_size 0.0 +376 1 training.label_smoothing 0.46392527102362047 +376 2 model.embedding_dim 2.0 +376 2 model.c_min 0.05262209563112746 +376 2 model.c_max 5.557755626800963 +376 2 optimizer.lr 0.0010041419589070287 +376 2 training.batch_size 1.0 +376 2 training.label_smoothing 0.13434063997785714 +376 3 model.embedding_dim 2.0 +376 3 model.c_min 0.042265165201298145 +376 3 model.c_max 6.563171152729893 +376 3 optimizer.lr 0.014400485717586635 +376 3 training.batch_size 2.0 +376 3 training.label_smoothing 0.20904382493338175 +376 4 model.embedding_dim 2.0 +376 4 model.c_min 0.015629887679043754 +376 4 model.c_max 5.197016684983086 +376 4 optimizer.lr 0.00490877978337054 +376 4 training.batch_size 2.0 +376 4 training.label_smoothing 0.9512286671518828 +376 5 model.embedding_dim 1.0 +376 5 model.c_min 0.030620786682889846 +376 5 model.c_max 6.898288878507302 +376 5 optimizer.lr 0.0014160578774483695 +376 5 training.batch_size 2.0 +376 5 training.label_smoothing 0.09352348012751098 +376 6 model.embedding_dim 2.0 +376 6 model.c_min 0.021941896997270574 +376 6 model.c_max 5.673537975469966 +376 6 optimizer.lr 0.05048689763997314 +376 6 training.batch_size 1.0 +376 6 training.label_smoothing 0.10098005785782219 +376 7 model.embedding_dim 1.0 +376 7 model.c_min 0.010072935986516601 +376 7 model.c_max 8.740604703595952 +376 7 optimizer.lr 0.0021156850463209323 +376 7 training.batch_size 1.0 +376 7 training.label_smoothing 0.03477341524965938 +376 8 model.embedding_dim 2.0 +376 8 model.c_min 0.06411246326693144 +376 8 model.c_max 8.272316795079117 +376 8 optimizer.lr 0.028624931376787722 +376 8 training.batch_size 1.0 +376 8 training.label_smoothing 0.08145998198720909 +376 9 model.embedding_dim 1.0 +376 9 model.c_min 0.07997384887044784 +376 9 model.c_max 6.350121143064717 +376 9 optimizer.lr 0.006259431389295222 +376 9 training.batch_size 1.0 +376 9 training.label_smoothing 0.003961229222750591 +376 10 model.embedding_dim 2.0 +376 10 model.c_min 0.01336685831090055 +376 10 model.c_max 3.9332291143166787 +376 10 optimizer.lr 0.002283498958580025 +376 10 training.batch_size 1.0 +376 10 training.label_smoothing 0.011152909583483191 +376 11 model.embedding_dim 1.0 +376 11 model.c_min 0.010712701316381619 +376 11 model.c_max 8.65972448067436 +376 11 optimizer.lr 0.09825910606086087 +376 11 training.batch_size 0.0 +376 11 training.label_smoothing 0.0013839820288394122 +376 12 model.embedding_dim 2.0 +376 12 model.c_min 0.05980857231232516 +376 12 model.c_max 4.72593921978488 +376 12 optimizer.lr 0.0024297623212758985 +376 12 training.batch_size 2.0 +376 12 training.label_smoothing 0.030575689443244736 +376 13 model.embedding_dim 1.0 +376 13 model.c_min 0.019400834453315856 +376 13 model.c_max 1.0487120312691462 +376 13 optimizer.lr 0.0018398564337451786 +376 13 training.batch_size 0.0 +376 13 training.label_smoothing 0.5152980887072672 +376 14 model.embedding_dim 2.0 +376 14 model.c_min 0.05570078932324677 +376 14 model.c_max 7.570916533818868 +376 14 optimizer.lr 0.0011458286705277986 +376 14 training.batch_size 2.0 +376 14 training.label_smoothing 0.002472075160717633 +376 15 model.embedding_dim 1.0 +376 15 model.c_min 0.05083397952507033 +376 15 model.c_max 6.443123974556053 +376 15 optimizer.lr 0.04052106849655849 +376 15 training.batch_size 2.0 +376 15 training.label_smoothing 0.005966981368043889 +376 16 model.embedding_dim 2.0 +376 16 model.c_min 0.07257129446894028 +376 16 model.c_max 3.973929432937046 +376 16 optimizer.lr 0.04504808511907873 +376 16 training.batch_size 2.0 +376 16 training.label_smoothing 0.778260570987912 +376 17 model.embedding_dim 1.0 +376 17 model.c_min 0.018909756408964477 +376 17 model.c_max 4.9405828368918305 +376 17 optimizer.lr 0.0047708674510490556 +376 17 training.batch_size 1.0 +376 17 training.label_smoothing 0.003999260724941148 +376 18 model.embedding_dim 1.0 +376 18 model.c_min 0.021189337974786268 +376 18 model.c_max 2.304331988104057 +376 18 optimizer.lr 0.07772073539015438 +376 18 training.batch_size 2.0 +376 18 training.label_smoothing 0.006284448506617326 +376 19 model.embedding_dim 0.0 +376 19 model.c_min 0.010457797972028384 +376 19 model.c_max 5.025962362904559 +376 19 optimizer.lr 0.0325837131012672 +376 19 training.batch_size 1.0 +376 19 training.label_smoothing 0.07813648694372571 +376 20 model.embedding_dim 0.0 +376 20 model.c_min 0.040414652543073366 +376 20 model.c_max 3.0287498583852903 +376 20 optimizer.lr 0.0018589298503505552 +376 20 training.batch_size 2.0 +376 20 training.label_smoothing 0.117801437762347 +376 21 model.embedding_dim 0.0 +376 21 model.c_min 0.013008951128179257 +376 21 model.c_max 6.18726059509846 +376 21 optimizer.lr 0.0016642704667539736 +376 21 training.batch_size 2.0 +376 21 training.label_smoothing 0.003527316207713343 +376 22 model.embedding_dim 2.0 +376 22 model.c_min 0.05643236293942963 +376 22 model.c_max 3.386111720947253 +376 22 optimizer.lr 0.0010018046737844226 +376 22 training.batch_size 2.0 +376 22 training.label_smoothing 0.011396699054217951 +376 23 model.embedding_dim 0.0 +376 23 model.c_min 0.06240980310382613 +376 23 model.c_max 4.795678080177167 +376 23 optimizer.lr 0.0015424897923854714 +376 23 training.batch_size 2.0 +376 23 training.label_smoothing 0.033549430395041074 +376 24 model.embedding_dim 1.0 +376 24 model.c_min 0.010259446240406435 +376 24 model.c_max 5.234310612741147 +376 24 optimizer.lr 0.0029618620706538274 +376 24 training.batch_size 2.0 +376 24 training.label_smoothing 0.0015064134720304775 +376 25 model.embedding_dim 2.0 +376 25 model.c_min 0.012450347666559511 +376 25 model.c_max 4.957607359282226 +376 25 optimizer.lr 0.04184412239452126 +376 25 training.batch_size 1.0 +376 25 training.label_smoothing 0.006766937535429566 +376 26 model.embedding_dim 2.0 +376 26 model.c_min 0.034957222170337525 +376 26 model.c_max 9.33500481199189 +376 26 optimizer.lr 0.004968710574998545 +376 26 training.batch_size 2.0 +376 26 training.label_smoothing 0.6675595891014747 +376 27 model.embedding_dim 2.0 +376 27 model.c_min 0.031946276061227755 +376 27 model.c_max 6.352057483866078 +376 27 optimizer.lr 0.0058521697499541924 +376 27 training.batch_size 1.0 +376 27 training.label_smoothing 0.05142846169955212 +376 28 model.embedding_dim 0.0 +376 28 model.c_min 0.025660856110365594 +376 28 model.c_max 3.099741448468224 +376 28 optimizer.lr 0.08041523204754286 +376 28 training.batch_size 2.0 +376 28 training.label_smoothing 0.0014751823725901625 +376 29 model.embedding_dim 2.0 +376 29 model.c_min 0.023009482314983187 +376 29 model.c_max 1.4240569028044723 +376 29 optimizer.lr 0.008968497725057346 +376 29 training.batch_size 1.0 +376 29 training.label_smoothing 0.04394723153609886 +376 30 model.embedding_dim 1.0 +376 30 model.c_min 0.02121587325536669 +376 30 model.c_max 6.386132717742001 +376 30 optimizer.lr 0.056518177523516185 +376 30 training.batch_size 2.0 +376 30 training.label_smoothing 0.49178278307054857 +376 31 model.embedding_dim 1.0 +376 31 model.c_min 0.061379802018457344 +376 31 model.c_max 6.863819892855417 +376 31 optimizer.lr 0.016470066821711673 +376 31 training.batch_size 2.0 +376 31 training.label_smoothing 0.007135762460594229 +376 32 model.embedding_dim 1.0 +376 32 model.c_min 0.05604747213216538 +376 32 model.c_max 1.4824002302428805 +376 32 optimizer.lr 0.02109123730554472 +376 32 training.batch_size 2.0 +376 32 training.label_smoothing 0.008800949501556642 +376 33 model.embedding_dim 1.0 +376 33 model.c_min 0.02047663433813944 +376 33 model.c_max 8.021876816803081 +376 33 optimizer.lr 0.029816344919423362 +376 33 training.batch_size 1.0 +376 33 training.label_smoothing 0.04924266903149865 +376 34 model.embedding_dim 0.0 +376 34 model.c_min 0.03869572892832318 +376 34 model.c_max 9.75954240635242 +376 34 optimizer.lr 0.030000676120125382 +376 34 training.batch_size 2.0 +376 34 training.label_smoothing 0.03704403951731948 +376 35 model.embedding_dim 2.0 +376 35 model.c_min 0.0206874312054892 +376 35 model.c_max 5.281654292094015 +376 35 optimizer.lr 0.028355934140161427 +376 35 training.batch_size 2.0 +376 35 training.label_smoothing 0.0718291663547882 +376 36 model.embedding_dim 2.0 +376 36 model.c_min 0.027429080692935315 +376 36 model.c_max 5.7394828758538905 +376 36 optimizer.lr 0.013931254372139336 +376 36 training.batch_size 2.0 +376 36 training.label_smoothing 0.012657714810187454 +376 37 model.embedding_dim 2.0 +376 37 model.c_min 0.026622733573320553 +376 37 model.c_max 8.158603914953707 +376 37 optimizer.lr 0.0030033469054021517 +376 37 training.batch_size 2.0 +376 37 training.label_smoothing 0.06065055706168023 +376 38 model.embedding_dim 1.0 +376 38 model.c_min 0.07993756479017483 +376 38 model.c_max 6.28446056840389 +376 38 optimizer.lr 0.024892583192571878 +376 38 training.batch_size 1.0 +376 38 training.label_smoothing 0.14488526941150312 +376 39 model.embedding_dim 0.0 +376 39 model.c_min 0.08449473690738049 +376 39 model.c_max 5.336770782563784 +376 39 optimizer.lr 0.033756025132800925 +376 39 training.batch_size 2.0 +376 39 training.label_smoothing 0.0721427713156391 +376 40 model.embedding_dim 0.0 +376 40 model.c_min 0.030051929030853356 +376 40 model.c_max 7.41406699285305 +376 40 optimizer.lr 0.0036619645516210715 +376 40 training.batch_size 1.0 +376 40 training.label_smoothing 0.464264205365258 +376 41 model.embedding_dim 0.0 +376 41 model.c_min 0.01636405812993898 +376 41 model.c_max 6.585219720800866 +376 41 optimizer.lr 0.0024595378053484007 +376 41 training.batch_size 0.0 +376 41 training.label_smoothing 0.14031834939756277 +376 42 model.embedding_dim 1.0 +376 42 model.c_min 0.03227409138840883 +376 42 model.c_max 1.9503293613655446 +376 42 optimizer.lr 0.026955760653744117 +376 42 training.batch_size 2.0 +376 42 training.label_smoothing 0.003125008503889348 +376 43 model.embedding_dim 2.0 +376 43 model.c_min 0.022190639123867085 +376 43 model.c_max 1.445960694088967 +376 43 optimizer.lr 0.0330637573140413 +376 43 training.batch_size 0.0 +376 43 training.label_smoothing 0.02056303664468837 +376 44 model.embedding_dim 1.0 +376 44 model.c_min 0.024282458878710483 +376 44 model.c_max 9.381247657127977 +376 44 optimizer.lr 0.007158500594018929 +376 44 training.batch_size 2.0 +376 44 training.label_smoothing 0.25870195820155234 +376 45 model.embedding_dim 1.0 +376 45 model.c_min 0.05026263325326569 +376 45 model.c_max 9.70551789138488 +376 45 optimizer.lr 0.08560898433561757 +376 45 training.batch_size 0.0 +376 45 training.label_smoothing 0.7536919186851729 +376 46 model.embedding_dim 2.0 +376 46 model.c_min 0.08584279021735974 +376 46 model.c_max 2.652534234424285 +376 46 optimizer.lr 0.0010522321008820418 +376 46 training.batch_size 2.0 +376 46 training.label_smoothing 0.3331629611774892 +376 47 model.embedding_dim 1.0 +376 47 model.c_min 0.06616585665494086 +376 47 model.c_max 6.483739190645996 +376 47 optimizer.lr 0.0056565093783811785 +376 47 training.batch_size 0.0 +376 47 training.label_smoothing 0.011926609079230958 +376 48 model.embedding_dim 2.0 +376 48 model.c_min 0.022559762636127537 +376 48 model.c_max 9.342374605859076 +376 48 optimizer.lr 0.003658026420849815 +376 48 training.batch_size 1.0 +376 48 training.label_smoothing 0.008235948777853618 +376 49 model.embedding_dim 2.0 +376 49 model.c_min 0.011796605275953942 +376 49 model.c_max 2.2498106900821266 +376 49 optimizer.lr 0.07694599411818548 +376 49 training.batch_size 1.0 +376 49 training.label_smoothing 0.008085958537074505 +376 50 model.embedding_dim 1.0 +376 50 model.c_min 0.09647381864102784 +376 50 model.c_max 7.476035069562308 +376 50 optimizer.lr 0.0013243464647380299 +376 50 training.batch_size 1.0 +376 50 training.label_smoothing 0.04995390403479144 +376 51 model.embedding_dim 0.0 +376 51 model.c_min 0.0336607899457048 +376 51 model.c_max 3.269129530471822 +376 51 optimizer.lr 0.0018801995140841283 +376 51 training.batch_size 1.0 +376 51 training.label_smoothing 0.07161354193752505 +376 52 model.embedding_dim 0.0 +376 52 model.c_min 0.01843726020931145 +376 52 model.c_max 6.501758812018544 +376 52 optimizer.lr 0.023121807126211955 +376 52 training.batch_size 2.0 +376 52 training.label_smoothing 0.13568290608996825 +376 53 model.embedding_dim 2.0 +376 53 model.c_min 0.029836563873822552 +376 53 model.c_max 2.476800285482784 +376 53 optimizer.lr 0.08375494726791151 +376 53 training.batch_size 2.0 +376 53 training.label_smoothing 0.28771931255795 +376 54 model.embedding_dim 2.0 +376 54 model.c_min 0.012300948882571345 +376 54 model.c_max 3.766675135812108 +376 54 optimizer.lr 0.004230416871003421 +376 54 training.batch_size 0.0 +376 54 training.label_smoothing 0.039572761252715784 +376 55 model.embedding_dim 0.0 +376 55 model.c_min 0.012096767527686224 +376 55 model.c_max 2.0574621760798086 +376 55 optimizer.lr 0.023659576501563216 +376 55 training.batch_size 0.0 +376 55 training.label_smoothing 0.020665801482113506 +376 56 model.embedding_dim 0.0 +376 56 model.c_min 0.06096866345315457 +376 56 model.c_max 5.643388636438483 +376 56 optimizer.lr 0.03414819947940238 +376 56 training.batch_size 2.0 +376 56 training.label_smoothing 0.09225751613991699 +376 57 model.embedding_dim 0.0 +376 57 model.c_min 0.039289124196283096 +376 57 model.c_max 1.3624869529494816 +376 57 optimizer.lr 0.0035732713244398134 +376 57 training.batch_size 0.0 +376 57 training.label_smoothing 0.28893952692322455 +376 58 model.embedding_dim 2.0 +376 58 model.c_min 0.06116074430775997 +376 58 model.c_max 7.2199659502309395 +376 58 optimizer.lr 0.001823493054040443 +376 58 training.batch_size 1.0 +376 58 training.label_smoothing 0.00476227391884082 +376 59 model.embedding_dim 1.0 +376 59 model.c_min 0.05567495351956107 +376 59 model.c_max 9.16171941282015 +376 59 optimizer.lr 0.005266089611144845 +376 59 training.batch_size 2.0 +376 59 training.label_smoothing 0.0017101076959898669 +376 60 model.embedding_dim 1.0 +376 60 model.c_min 0.010052980708964813 +376 60 model.c_max 4.23240417865183 +376 60 optimizer.lr 0.008331876766020345 +376 60 training.batch_size 1.0 +376 60 training.label_smoothing 0.002051469250226334 +376 61 model.embedding_dim 2.0 +376 61 model.c_min 0.040186928404471904 +376 61 model.c_max 7.215826218937806 +376 61 optimizer.lr 0.002728015804928604 +376 61 training.batch_size 1.0 +376 61 training.label_smoothing 0.011232457382601512 +376 62 model.embedding_dim 1.0 +376 62 model.c_min 0.021904152089888225 +376 62 model.c_max 1.5488184328135357 +376 62 optimizer.lr 0.08509824233461262 +376 62 training.batch_size 1.0 +376 62 training.label_smoothing 0.03226983915125891 +376 63 model.embedding_dim 0.0 +376 63 model.c_min 0.01630938120756062 +376 63 model.c_max 1.105083235788692 +376 63 optimizer.lr 0.01261639413251529 +376 63 training.batch_size 1.0 +376 63 training.label_smoothing 0.007839997513012216 +376 64 model.embedding_dim 0.0 +376 64 model.c_min 0.08995519594948677 +376 64 model.c_max 3.982597731339331 +376 64 optimizer.lr 0.03163829586355343 +376 64 training.batch_size 1.0 +376 64 training.label_smoothing 0.004324618032955952 +376 65 model.embedding_dim 0.0 +376 65 model.c_min 0.09277125727854155 +376 65 model.c_max 3.7256590346379674 +376 65 optimizer.lr 0.04048351406907917 +376 65 training.batch_size 1.0 +376 65 training.label_smoothing 0.008463910197279097 +376 66 model.embedding_dim 1.0 +376 66 model.c_min 0.08917588233196945 +376 66 model.c_max 1.219619398595196 +376 66 optimizer.lr 0.0016814572864512509 +376 66 training.batch_size 0.0 +376 66 training.label_smoothing 0.3592445376917684 +376 67 model.embedding_dim 0.0 +376 67 model.c_min 0.03752845500376516 +376 67 model.c_max 4.652735639071494 +376 67 optimizer.lr 0.019734919112734624 +376 67 training.batch_size 2.0 +376 67 training.label_smoothing 0.269334140302402 +376 68 model.embedding_dim 0.0 +376 68 model.c_min 0.024564898916774786 +376 68 model.c_max 6.691135360110284 +376 68 optimizer.lr 0.0025833919724113615 +376 68 training.batch_size 2.0 +376 68 training.label_smoothing 0.0017114902513327014 +376 69 model.embedding_dim 2.0 +376 69 model.c_min 0.03288095060398647 +376 69 model.c_max 8.379905968303321 +376 69 optimizer.lr 0.002100482741943367 +376 69 training.batch_size 0.0 +376 69 training.label_smoothing 0.7953560121838217 +376 70 model.embedding_dim 2.0 +376 70 model.c_min 0.010145047214165563 +376 70 model.c_max 4.634103641168138 +376 70 optimizer.lr 0.01268041975226965 +376 70 training.batch_size 0.0 +376 70 training.label_smoothing 0.057724550231943283 +376 71 model.embedding_dim 2.0 +376 71 model.c_min 0.09066246352958847 +376 71 model.c_max 2.052973722718831 +376 71 optimizer.lr 0.010659499236187147 +376 71 training.batch_size 1.0 +376 71 training.label_smoothing 0.10116303150322027 +376 72 model.embedding_dim 1.0 +376 72 model.c_min 0.06248371552587819 +376 72 model.c_max 5.173866476623853 +376 72 optimizer.lr 0.002059662089443047 +376 72 training.batch_size 0.0 +376 72 training.label_smoothing 0.700991759409651 +376 73 model.embedding_dim 0.0 +376 73 model.c_min 0.022624837079734523 +376 73 model.c_max 3.9926971102333386 +376 73 optimizer.lr 0.004257426583832161 +376 73 training.batch_size 1.0 +376 73 training.label_smoothing 0.003652269089466274 +376 74 model.embedding_dim 0.0 +376 74 model.c_min 0.049018257262129626 +376 74 model.c_max 5.094697478083705 +376 74 optimizer.lr 0.04932583365871568 +376 74 training.batch_size 2.0 +376 74 training.label_smoothing 0.0218594830689157 +376 75 model.embedding_dim 1.0 +376 75 model.c_min 0.03702408808096547 +376 75 model.c_max 1.0058373502221842 +376 75 optimizer.lr 0.005260087628308908 +376 75 training.batch_size 0.0 +376 75 training.label_smoothing 0.02718353807876814 +376 76 model.embedding_dim 1.0 +376 76 model.c_min 0.01801223163137476 +376 76 model.c_max 3.5285160041522046 +376 76 optimizer.lr 0.0022137081217903235 +376 76 training.batch_size 0.0 +376 76 training.label_smoothing 0.9758965212593569 +376 77 model.embedding_dim 0.0 +376 77 model.c_min 0.024020059054881474 +376 77 model.c_max 8.508544748783642 +376 77 optimizer.lr 0.056056240725522 +376 77 training.batch_size 0.0 +376 77 training.label_smoothing 0.0023412124316582895 +376 78 model.embedding_dim 2.0 +376 78 model.c_min 0.031078470976924177 +376 78 model.c_max 4.788146668990493 +376 78 optimizer.lr 0.008547231291848156 +376 78 training.batch_size 1.0 +376 78 training.label_smoothing 0.006527900654286385 +376 79 model.embedding_dim 1.0 +376 79 model.c_min 0.019059448507452073 +376 79 model.c_max 9.171479201370003 +376 79 optimizer.lr 0.0066749702120287606 +376 79 training.batch_size 2.0 +376 79 training.label_smoothing 0.005255308626038426 +376 80 model.embedding_dim 2.0 +376 80 model.c_min 0.023971945039151323 +376 80 model.c_max 8.424745575340207 +376 80 optimizer.lr 0.003711153106534908 +376 80 training.batch_size 2.0 +376 80 training.label_smoothing 0.8023158173747158 +376 81 model.embedding_dim 1.0 +376 81 model.c_min 0.024801478723690405 +376 81 model.c_max 6.073261002114096 +376 81 optimizer.lr 0.017572496674239855 +376 81 training.batch_size 1.0 +376 81 training.label_smoothing 0.006990988011833637 +376 82 model.embedding_dim 1.0 +376 82 model.c_min 0.01208844528063067 +376 82 model.c_max 2.013570564907744 +376 82 optimizer.lr 0.0010077218649872897 +376 82 training.batch_size 0.0 +376 82 training.label_smoothing 0.0012123887789912539 +376 83 model.embedding_dim 0.0 +376 83 model.c_min 0.04995498353468768 +376 83 model.c_max 3.445019701871345 +376 83 optimizer.lr 0.03731398579482096 +376 83 training.batch_size 2.0 +376 83 training.label_smoothing 0.4945873338261316 +376 84 model.embedding_dim 1.0 +376 84 model.c_min 0.03831142616910865 +376 84 model.c_max 7.266406335728265 +376 84 optimizer.lr 0.029293401032095327 +376 84 training.batch_size 0.0 +376 84 training.label_smoothing 0.31383314324936684 +376 85 model.embedding_dim 0.0 +376 85 model.c_min 0.03149999805281298 +376 85 model.c_max 9.59842747414889 +376 85 optimizer.lr 0.0012615117788075776 +376 85 training.batch_size 1.0 +376 85 training.label_smoothing 0.457912755831805 +376 86 model.embedding_dim 2.0 +376 86 model.c_min 0.04325516934066314 +376 86 model.c_max 8.110935012936086 +376 86 optimizer.lr 0.005095050270933997 +376 86 training.batch_size 1.0 +376 86 training.label_smoothing 0.008405788219978382 +376 87 model.embedding_dim 2.0 +376 87 model.c_min 0.022843115041585045 +376 87 model.c_max 2.570983386282079 +376 87 optimizer.lr 0.005704030491733393 +376 87 training.batch_size 1.0 +376 87 training.label_smoothing 0.1597486718645716 +376 88 model.embedding_dim 0.0 +376 88 model.c_min 0.08438149345283974 +376 88 model.c_max 4.5770295994693555 +376 88 optimizer.lr 0.005146562565457746 +376 88 training.batch_size 2.0 +376 88 training.label_smoothing 0.005462013297460102 +376 89 model.embedding_dim 1.0 +376 89 model.c_min 0.01100388737297937 +376 89 model.c_max 8.80482227790581 +376 89 optimizer.lr 0.04690806317648568 +376 89 training.batch_size 1.0 +376 89 training.label_smoothing 0.0012586077264824104 +376 90 model.embedding_dim 0.0 +376 90 model.c_min 0.01807530749454082 +376 90 model.c_max 8.123900281668906 +376 90 optimizer.lr 0.007490493356990756 +376 90 training.batch_size 2.0 +376 90 training.label_smoothing 0.009079257854736287 +376 91 model.embedding_dim 2.0 +376 91 model.c_min 0.06252428126655282 +376 91 model.c_max 4.282383926888857 +376 91 optimizer.lr 0.001437336006543593 +376 91 training.batch_size 2.0 +376 91 training.label_smoothing 0.0011617316111921863 +376 92 model.embedding_dim 2.0 +376 92 model.c_min 0.011672340988880675 +376 92 model.c_max 8.503847622111214 +376 92 optimizer.lr 0.002523386870830486 +376 92 training.batch_size 1.0 +376 92 training.label_smoothing 0.0010633070791023756 +376 93 model.embedding_dim 2.0 +376 93 model.c_min 0.02641872552240338 +376 93 model.c_max 5.408557679284252 +376 93 optimizer.lr 0.0014108513702812736 +376 93 training.batch_size 0.0 +376 93 training.label_smoothing 0.10304562552188465 +376 94 model.embedding_dim 2.0 +376 94 model.c_min 0.019007868989175256 +376 94 model.c_max 2.1109584144806144 +376 94 optimizer.lr 0.07963796368712642 +376 94 training.batch_size 2.0 +376 94 training.label_smoothing 0.015540701897080983 +376 95 model.embedding_dim 2.0 +376 95 model.c_min 0.010485017890268625 +376 95 model.c_max 8.9296888844152 +376 95 optimizer.lr 0.0740109825744274 +376 95 training.batch_size 2.0 +376 95 training.label_smoothing 0.0024866773599000647 +376 96 model.embedding_dim 0.0 +376 96 model.c_min 0.07551396394043615 +376 96 model.c_max 1.8831931116364 +376 96 optimizer.lr 0.015897634253196378 +376 96 training.batch_size 1.0 +376 96 training.label_smoothing 0.003134668491970374 +376 97 model.embedding_dim 0.0 +376 97 model.c_min 0.034301751941366816 +376 97 model.c_max 6.855098702289282 +376 97 optimizer.lr 0.016448450962599302 +376 97 training.batch_size 2.0 +376 97 training.label_smoothing 0.0024348046026212533 +376 98 model.embedding_dim 0.0 +376 98 model.c_min 0.01961074500885601 +376 98 model.c_max 8.193442543664244 +376 98 optimizer.lr 0.08853204502863585 +376 98 training.batch_size 0.0 +376 98 training.label_smoothing 0.07414208878852027 +376 99 model.embedding_dim 2.0 +376 99 model.c_min 0.01053264189006622 +376 99 model.c_max 6.5532843000653385 +376 99 optimizer.lr 0.02958023183233269 +376 99 training.batch_size 0.0 +376 99 training.label_smoothing 0.20449143376988885 +376 100 model.embedding_dim 2.0 +376 100 model.c_min 0.041543692494757395 +376 100 model.c_max 5.730223027077891 +376 100 optimizer.lr 0.009313906221032743 +376 100 training.batch_size 0.0 +376 100 training.label_smoothing 0.005820649857359853 +376 1 dataset """kinships""" +376 1 model """kg2e""" +376 1 loss """softplus""" +376 1 regularizer """no""" +376 1 optimizer """adam""" +376 1 training_loop """lcwa""" +376 1 evaluator """rankbased""" +376 2 dataset """kinships""" +376 2 model """kg2e""" +376 2 loss """softplus""" +376 2 regularizer """no""" +376 2 optimizer """adam""" +376 2 training_loop """lcwa""" +376 2 evaluator """rankbased""" +376 3 dataset """kinships""" +376 3 model """kg2e""" +376 3 loss """softplus""" +376 3 regularizer """no""" +376 3 optimizer """adam""" +376 3 training_loop """lcwa""" +376 3 evaluator """rankbased""" +376 4 dataset """kinships""" +376 4 model """kg2e""" +376 4 loss """softplus""" +376 4 regularizer """no""" +376 4 optimizer """adam""" +376 4 training_loop """lcwa""" +376 4 evaluator """rankbased""" +376 5 dataset """kinships""" +376 5 model """kg2e""" +376 5 loss """softplus""" +376 5 regularizer """no""" +376 5 optimizer """adam""" +376 5 training_loop """lcwa""" +376 5 evaluator """rankbased""" +376 6 dataset """kinships""" +376 6 model """kg2e""" +376 6 loss """softplus""" +376 6 regularizer """no""" +376 6 optimizer """adam""" +376 6 training_loop """lcwa""" +376 6 evaluator """rankbased""" +376 7 dataset """kinships""" +376 7 model """kg2e""" +376 7 loss """softplus""" +376 7 regularizer """no""" +376 7 optimizer """adam""" +376 7 training_loop """lcwa""" +376 7 evaluator """rankbased""" +376 8 dataset """kinships""" +376 8 model """kg2e""" +376 8 loss """softplus""" +376 8 regularizer """no""" +376 8 optimizer """adam""" +376 8 training_loop """lcwa""" +376 8 evaluator """rankbased""" +376 9 dataset """kinships""" +376 9 model """kg2e""" +376 9 loss """softplus""" +376 9 regularizer """no""" +376 9 optimizer """adam""" +376 9 training_loop """lcwa""" +376 9 evaluator """rankbased""" +376 10 dataset """kinships""" +376 10 model """kg2e""" +376 10 loss """softplus""" +376 10 regularizer """no""" +376 10 optimizer """adam""" +376 10 training_loop """lcwa""" +376 10 evaluator """rankbased""" +376 11 dataset """kinships""" +376 11 model """kg2e""" +376 11 loss """softplus""" +376 11 regularizer """no""" +376 11 optimizer """adam""" +376 11 training_loop """lcwa""" +376 11 evaluator """rankbased""" +376 12 dataset """kinships""" +376 12 model """kg2e""" +376 12 loss """softplus""" +376 12 regularizer """no""" +376 12 optimizer """adam""" +376 12 training_loop """lcwa""" +376 12 evaluator """rankbased""" +376 13 dataset """kinships""" +376 13 model """kg2e""" +376 13 loss """softplus""" +376 13 regularizer """no""" +376 13 optimizer """adam""" +376 13 training_loop """lcwa""" +376 13 evaluator """rankbased""" +376 14 dataset """kinships""" +376 14 model """kg2e""" +376 14 loss """softplus""" +376 14 regularizer """no""" +376 14 optimizer """adam""" +376 14 training_loop """lcwa""" +376 14 evaluator """rankbased""" +376 15 dataset """kinships""" +376 15 model """kg2e""" +376 15 loss """softplus""" +376 15 regularizer """no""" +376 15 optimizer """adam""" +376 15 training_loop """lcwa""" +376 15 evaluator """rankbased""" +376 16 dataset """kinships""" +376 16 model """kg2e""" +376 16 loss """softplus""" +376 16 regularizer """no""" +376 16 optimizer """adam""" +376 16 training_loop """lcwa""" +376 16 evaluator """rankbased""" +376 17 dataset """kinships""" +376 17 model """kg2e""" +376 17 loss """softplus""" +376 17 regularizer """no""" +376 17 optimizer """adam""" +376 17 training_loop """lcwa""" +376 17 evaluator """rankbased""" +376 18 dataset """kinships""" +376 18 model """kg2e""" +376 18 loss """softplus""" +376 18 regularizer """no""" +376 18 optimizer """adam""" +376 18 training_loop """lcwa""" +376 18 evaluator """rankbased""" +376 19 dataset """kinships""" +376 19 model """kg2e""" +376 19 loss """softplus""" +376 19 regularizer """no""" +376 19 optimizer """adam""" +376 19 training_loop """lcwa""" +376 19 evaluator """rankbased""" +376 20 dataset """kinships""" +376 20 model """kg2e""" +376 20 loss """softplus""" +376 20 regularizer """no""" +376 20 optimizer """adam""" +376 20 training_loop """lcwa""" +376 20 evaluator """rankbased""" +376 21 dataset """kinships""" +376 21 model """kg2e""" +376 21 loss """softplus""" +376 21 regularizer """no""" +376 21 optimizer """adam""" +376 21 training_loop """lcwa""" +376 21 evaluator """rankbased""" +376 22 dataset """kinships""" +376 22 model """kg2e""" +376 22 loss """softplus""" +376 22 regularizer """no""" +376 22 optimizer """adam""" +376 22 training_loop """lcwa""" +376 22 evaluator """rankbased""" +376 23 dataset """kinships""" +376 23 model """kg2e""" +376 23 loss """softplus""" +376 23 regularizer """no""" +376 23 optimizer """adam""" +376 23 training_loop """lcwa""" +376 23 evaluator """rankbased""" +376 24 dataset """kinships""" +376 24 model """kg2e""" +376 24 loss """softplus""" +376 24 regularizer """no""" +376 24 optimizer """adam""" +376 24 training_loop """lcwa""" +376 24 evaluator """rankbased""" +376 25 dataset """kinships""" +376 25 model """kg2e""" +376 25 loss """softplus""" +376 25 regularizer """no""" +376 25 optimizer """adam""" +376 25 training_loop """lcwa""" +376 25 evaluator """rankbased""" +376 26 dataset """kinships""" +376 26 model """kg2e""" +376 26 loss """softplus""" +376 26 regularizer """no""" +376 26 optimizer """adam""" +376 26 training_loop """lcwa""" +376 26 evaluator """rankbased""" +376 27 dataset """kinships""" +376 27 model """kg2e""" +376 27 loss """softplus""" +376 27 regularizer """no""" +376 27 optimizer """adam""" +376 27 training_loop """lcwa""" +376 27 evaluator """rankbased""" +376 28 dataset """kinships""" +376 28 model """kg2e""" +376 28 loss """softplus""" +376 28 regularizer """no""" +376 28 optimizer """adam""" +376 28 training_loop """lcwa""" +376 28 evaluator """rankbased""" +376 29 dataset """kinships""" +376 29 model """kg2e""" +376 29 loss """softplus""" +376 29 regularizer """no""" +376 29 optimizer """adam""" +376 29 training_loop """lcwa""" +376 29 evaluator """rankbased""" +376 30 dataset """kinships""" +376 30 model """kg2e""" +376 30 loss """softplus""" +376 30 regularizer """no""" +376 30 optimizer """adam""" +376 30 training_loop """lcwa""" +376 30 evaluator """rankbased""" +376 31 dataset """kinships""" +376 31 model """kg2e""" +376 31 loss """softplus""" +376 31 regularizer """no""" +376 31 optimizer """adam""" +376 31 training_loop """lcwa""" +376 31 evaluator """rankbased""" +376 32 dataset """kinships""" +376 32 model """kg2e""" +376 32 loss """softplus""" +376 32 regularizer """no""" +376 32 optimizer """adam""" +376 32 training_loop """lcwa""" +376 32 evaluator """rankbased""" +376 33 dataset """kinships""" +376 33 model """kg2e""" +376 33 loss """softplus""" +376 33 regularizer """no""" +376 33 optimizer """adam""" +376 33 training_loop """lcwa""" +376 33 evaluator """rankbased""" +376 34 dataset """kinships""" +376 34 model """kg2e""" +376 34 loss """softplus""" +376 34 regularizer """no""" +376 34 optimizer """adam""" +376 34 training_loop """lcwa""" +376 34 evaluator """rankbased""" +376 35 dataset """kinships""" +376 35 model """kg2e""" +376 35 loss """softplus""" +376 35 regularizer """no""" +376 35 optimizer """adam""" +376 35 training_loop """lcwa""" +376 35 evaluator """rankbased""" +376 36 dataset """kinships""" +376 36 model """kg2e""" +376 36 loss """softplus""" +376 36 regularizer """no""" +376 36 optimizer """adam""" +376 36 training_loop """lcwa""" +376 36 evaluator """rankbased""" +376 37 dataset """kinships""" +376 37 model """kg2e""" +376 37 loss """softplus""" +376 37 regularizer """no""" +376 37 optimizer """adam""" +376 37 training_loop """lcwa""" +376 37 evaluator """rankbased""" +376 38 dataset """kinships""" +376 38 model """kg2e""" +376 38 loss """softplus""" +376 38 regularizer """no""" +376 38 optimizer """adam""" +376 38 training_loop """lcwa""" +376 38 evaluator """rankbased""" +376 39 dataset """kinships""" +376 39 model """kg2e""" +376 39 loss """softplus""" +376 39 regularizer """no""" +376 39 optimizer """adam""" +376 39 training_loop """lcwa""" +376 39 evaluator """rankbased""" +376 40 dataset """kinships""" +376 40 model """kg2e""" +376 40 loss """softplus""" +376 40 regularizer """no""" +376 40 optimizer """adam""" +376 40 training_loop """lcwa""" +376 40 evaluator """rankbased""" +376 41 dataset """kinships""" +376 41 model """kg2e""" +376 41 loss """softplus""" +376 41 regularizer """no""" +376 41 optimizer """adam""" +376 41 training_loop """lcwa""" +376 41 evaluator """rankbased""" +376 42 dataset """kinships""" +376 42 model """kg2e""" +376 42 loss """softplus""" +376 42 regularizer """no""" +376 42 optimizer """adam""" +376 42 training_loop """lcwa""" +376 42 evaluator """rankbased""" +376 43 dataset """kinships""" +376 43 model """kg2e""" +376 43 loss """softplus""" +376 43 regularizer """no""" +376 43 optimizer """adam""" +376 43 training_loop """lcwa""" +376 43 evaluator """rankbased""" +376 44 dataset """kinships""" +376 44 model """kg2e""" +376 44 loss """softplus""" +376 44 regularizer """no""" +376 44 optimizer """adam""" +376 44 training_loop """lcwa""" +376 44 evaluator """rankbased""" +376 45 dataset """kinships""" +376 45 model """kg2e""" +376 45 loss """softplus""" +376 45 regularizer """no""" +376 45 optimizer """adam""" +376 45 training_loop """lcwa""" +376 45 evaluator """rankbased""" +376 46 dataset """kinships""" +376 46 model """kg2e""" +376 46 loss """softplus""" +376 46 regularizer """no""" +376 46 optimizer """adam""" +376 46 training_loop """lcwa""" +376 46 evaluator """rankbased""" +376 47 dataset """kinships""" +376 47 model """kg2e""" +376 47 loss """softplus""" +376 47 regularizer """no""" +376 47 optimizer """adam""" +376 47 training_loop """lcwa""" +376 47 evaluator """rankbased""" +376 48 dataset """kinships""" +376 48 model """kg2e""" +376 48 loss """softplus""" +376 48 regularizer """no""" +376 48 optimizer """adam""" +376 48 training_loop """lcwa""" +376 48 evaluator """rankbased""" +376 49 dataset """kinships""" +376 49 model """kg2e""" +376 49 loss """softplus""" +376 49 regularizer """no""" +376 49 optimizer """adam""" +376 49 training_loop """lcwa""" +376 49 evaluator """rankbased""" +376 50 dataset """kinships""" +376 50 model """kg2e""" +376 50 loss """softplus""" +376 50 regularizer """no""" +376 50 optimizer """adam""" +376 50 training_loop """lcwa""" +376 50 evaluator """rankbased""" +376 51 dataset """kinships""" +376 51 model """kg2e""" +376 51 loss """softplus""" +376 51 regularizer """no""" +376 51 optimizer """adam""" +376 51 training_loop """lcwa""" +376 51 evaluator """rankbased""" +376 52 dataset """kinships""" +376 52 model """kg2e""" +376 52 loss """softplus""" +376 52 regularizer """no""" +376 52 optimizer """adam""" +376 52 training_loop """lcwa""" +376 52 evaluator """rankbased""" +376 53 dataset """kinships""" +376 53 model """kg2e""" +376 53 loss """softplus""" +376 53 regularizer """no""" +376 53 optimizer """adam""" +376 53 training_loop """lcwa""" +376 53 evaluator """rankbased""" +376 54 dataset """kinships""" +376 54 model """kg2e""" +376 54 loss """softplus""" +376 54 regularizer """no""" +376 54 optimizer """adam""" +376 54 training_loop """lcwa""" +376 54 evaluator """rankbased""" +376 55 dataset """kinships""" +376 55 model """kg2e""" +376 55 loss """softplus""" +376 55 regularizer """no""" +376 55 optimizer """adam""" +376 55 training_loop """lcwa""" +376 55 evaluator """rankbased""" +376 56 dataset """kinships""" +376 56 model """kg2e""" +376 56 loss """softplus""" +376 56 regularizer """no""" +376 56 optimizer """adam""" +376 56 training_loop """lcwa""" +376 56 evaluator """rankbased""" +376 57 dataset """kinships""" +376 57 model """kg2e""" +376 57 loss """softplus""" +376 57 regularizer """no""" +376 57 optimizer """adam""" +376 57 training_loop """lcwa""" +376 57 evaluator """rankbased""" +376 58 dataset """kinships""" +376 58 model """kg2e""" +376 58 loss """softplus""" +376 58 regularizer """no""" +376 58 optimizer """adam""" +376 58 training_loop """lcwa""" +376 58 evaluator """rankbased""" +376 59 dataset """kinships""" +376 59 model """kg2e""" +376 59 loss """softplus""" +376 59 regularizer """no""" +376 59 optimizer """adam""" +376 59 training_loop """lcwa""" +376 59 evaluator """rankbased""" +376 60 dataset """kinships""" +376 60 model """kg2e""" +376 60 loss """softplus""" +376 60 regularizer """no""" +376 60 optimizer """adam""" +376 60 training_loop """lcwa""" +376 60 evaluator """rankbased""" +376 61 dataset """kinships""" +376 61 model """kg2e""" +376 61 loss """softplus""" +376 61 regularizer """no""" +376 61 optimizer """adam""" +376 61 training_loop """lcwa""" +376 61 evaluator """rankbased""" +376 62 dataset """kinships""" +376 62 model """kg2e""" +376 62 loss """softplus""" +376 62 regularizer """no""" +376 62 optimizer """adam""" +376 62 training_loop """lcwa""" +376 62 evaluator """rankbased""" +376 63 dataset """kinships""" +376 63 model """kg2e""" +376 63 loss """softplus""" +376 63 regularizer """no""" +376 63 optimizer """adam""" +376 63 training_loop """lcwa""" +376 63 evaluator """rankbased""" +376 64 dataset """kinships""" +376 64 model """kg2e""" +376 64 loss """softplus""" +376 64 regularizer """no""" +376 64 optimizer """adam""" +376 64 training_loop """lcwa""" +376 64 evaluator """rankbased""" +376 65 dataset """kinships""" +376 65 model """kg2e""" +376 65 loss """softplus""" +376 65 regularizer """no""" +376 65 optimizer """adam""" +376 65 training_loop """lcwa""" +376 65 evaluator """rankbased""" +376 66 dataset """kinships""" +376 66 model """kg2e""" +376 66 loss """softplus""" +376 66 regularizer """no""" +376 66 optimizer """adam""" +376 66 training_loop """lcwa""" +376 66 evaluator """rankbased""" +376 67 dataset """kinships""" +376 67 model """kg2e""" +376 67 loss """softplus""" +376 67 regularizer """no""" +376 67 optimizer """adam""" +376 67 training_loop """lcwa""" +376 67 evaluator """rankbased""" +376 68 dataset """kinships""" +376 68 model """kg2e""" +376 68 loss """softplus""" +376 68 regularizer """no""" +376 68 optimizer """adam""" +376 68 training_loop """lcwa""" +376 68 evaluator """rankbased""" +376 69 dataset """kinships""" +376 69 model """kg2e""" +376 69 loss """softplus""" +376 69 regularizer """no""" +376 69 optimizer """adam""" +376 69 training_loop """lcwa""" +376 69 evaluator """rankbased""" +376 70 dataset """kinships""" +376 70 model """kg2e""" +376 70 loss """softplus""" +376 70 regularizer """no""" +376 70 optimizer """adam""" +376 70 training_loop """lcwa""" +376 70 evaluator """rankbased""" +376 71 dataset """kinships""" +376 71 model """kg2e""" +376 71 loss """softplus""" +376 71 regularizer """no""" +376 71 optimizer """adam""" +376 71 training_loop """lcwa""" +376 71 evaluator """rankbased""" +376 72 dataset """kinships""" +376 72 model """kg2e""" +376 72 loss """softplus""" +376 72 regularizer """no""" +376 72 optimizer """adam""" +376 72 training_loop """lcwa""" +376 72 evaluator """rankbased""" +376 73 dataset """kinships""" +376 73 model """kg2e""" +376 73 loss """softplus""" +376 73 regularizer """no""" +376 73 optimizer """adam""" +376 73 training_loop """lcwa""" +376 73 evaluator """rankbased""" +376 74 dataset """kinships""" +376 74 model """kg2e""" +376 74 loss """softplus""" +376 74 regularizer """no""" +376 74 optimizer """adam""" +376 74 training_loop """lcwa""" +376 74 evaluator """rankbased""" +376 75 dataset """kinships""" +376 75 model """kg2e""" +376 75 loss """softplus""" +376 75 regularizer """no""" +376 75 optimizer """adam""" +376 75 training_loop """lcwa""" +376 75 evaluator """rankbased""" +376 76 dataset """kinships""" +376 76 model """kg2e""" +376 76 loss """softplus""" +376 76 regularizer """no""" +376 76 optimizer """adam""" +376 76 training_loop """lcwa""" +376 76 evaluator """rankbased""" +376 77 dataset """kinships""" +376 77 model """kg2e""" +376 77 loss """softplus""" +376 77 regularizer """no""" +376 77 optimizer """adam""" +376 77 training_loop """lcwa""" +376 77 evaluator """rankbased""" +376 78 dataset """kinships""" +376 78 model """kg2e""" +376 78 loss """softplus""" +376 78 regularizer """no""" +376 78 optimizer """adam""" +376 78 training_loop """lcwa""" +376 78 evaluator """rankbased""" +376 79 dataset """kinships""" +376 79 model """kg2e""" +376 79 loss """softplus""" +376 79 regularizer """no""" +376 79 optimizer """adam""" +376 79 training_loop """lcwa""" +376 79 evaluator """rankbased""" +376 80 dataset """kinships""" +376 80 model """kg2e""" +376 80 loss """softplus""" +376 80 regularizer """no""" +376 80 optimizer """adam""" +376 80 training_loop """lcwa""" +376 80 evaluator """rankbased""" +376 81 dataset """kinships""" +376 81 model """kg2e""" +376 81 loss """softplus""" +376 81 regularizer """no""" +376 81 optimizer """adam""" +376 81 training_loop """lcwa""" +376 81 evaluator """rankbased""" +376 82 dataset """kinships""" +376 82 model """kg2e""" +376 82 loss """softplus""" +376 82 regularizer """no""" +376 82 optimizer """adam""" +376 82 training_loop """lcwa""" +376 82 evaluator """rankbased""" +376 83 dataset """kinships""" +376 83 model """kg2e""" +376 83 loss """softplus""" +376 83 regularizer """no""" +376 83 optimizer """adam""" +376 83 training_loop """lcwa""" +376 83 evaluator """rankbased""" +376 84 dataset """kinships""" +376 84 model """kg2e""" +376 84 loss """softplus""" +376 84 regularizer """no""" +376 84 optimizer """adam""" +376 84 training_loop """lcwa""" +376 84 evaluator """rankbased""" +376 85 dataset """kinships""" +376 85 model """kg2e""" +376 85 loss """softplus""" +376 85 regularizer """no""" +376 85 optimizer """adam""" +376 85 training_loop """lcwa""" +376 85 evaluator """rankbased""" +376 86 dataset """kinships""" +376 86 model """kg2e""" +376 86 loss """softplus""" +376 86 regularizer """no""" +376 86 optimizer """adam""" +376 86 training_loop """lcwa""" +376 86 evaluator """rankbased""" +376 87 dataset """kinships""" +376 87 model """kg2e""" +376 87 loss """softplus""" +376 87 regularizer """no""" +376 87 optimizer """adam""" +376 87 training_loop """lcwa""" +376 87 evaluator """rankbased""" +376 88 dataset """kinships""" +376 88 model """kg2e""" +376 88 loss """softplus""" +376 88 regularizer """no""" +376 88 optimizer """adam""" +376 88 training_loop """lcwa""" +376 88 evaluator """rankbased""" +376 89 dataset """kinships""" +376 89 model """kg2e""" +376 89 loss """softplus""" +376 89 regularizer """no""" +376 89 optimizer """adam""" +376 89 training_loop """lcwa""" +376 89 evaluator """rankbased""" +376 90 dataset """kinships""" +376 90 model """kg2e""" +376 90 loss """softplus""" +376 90 regularizer """no""" +376 90 optimizer """adam""" +376 90 training_loop """lcwa""" +376 90 evaluator """rankbased""" +376 91 dataset """kinships""" +376 91 model """kg2e""" +376 91 loss """softplus""" +376 91 regularizer """no""" +376 91 optimizer """adam""" +376 91 training_loop """lcwa""" +376 91 evaluator """rankbased""" +376 92 dataset """kinships""" +376 92 model """kg2e""" +376 92 loss """softplus""" +376 92 regularizer """no""" +376 92 optimizer """adam""" +376 92 training_loop """lcwa""" +376 92 evaluator """rankbased""" +376 93 dataset """kinships""" +376 93 model """kg2e""" +376 93 loss """softplus""" +376 93 regularizer """no""" +376 93 optimizer """adam""" +376 93 training_loop """lcwa""" +376 93 evaluator """rankbased""" +376 94 dataset """kinships""" +376 94 model """kg2e""" +376 94 loss """softplus""" +376 94 regularizer """no""" +376 94 optimizer """adam""" +376 94 training_loop """lcwa""" +376 94 evaluator """rankbased""" +376 95 dataset """kinships""" +376 95 model """kg2e""" +376 95 loss """softplus""" +376 95 regularizer """no""" +376 95 optimizer """adam""" +376 95 training_loop """lcwa""" +376 95 evaluator """rankbased""" +376 96 dataset """kinships""" +376 96 model """kg2e""" +376 96 loss """softplus""" +376 96 regularizer """no""" +376 96 optimizer """adam""" +376 96 training_loop """lcwa""" +376 96 evaluator """rankbased""" +376 97 dataset """kinships""" +376 97 model """kg2e""" +376 97 loss """softplus""" +376 97 regularizer """no""" +376 97 optimizer """adam""" +376 97 training_loop """lcwa""" +376 97 evaluator """rankbased""" +376 98 dataset """kinships""" +376 98 model """kg2e""" +376 98 loss """softplus""" +376 98 regularizer """no""" +376 98 optimizer """adam""" +376 98 training_loop """lcwa""" +376 98 evaluator """rankbased""" +376 99 dataset """kinships""" +376 99 model """kg2e""" +376 99 loss """softplus""" +376 99 regularizer """no""" +376 99 optimizer """adam""" +376 99 training_loop """lcwa""" +376 99 evaluator """rankbased""" +376 100 dataset """kinships""" +376 100 model """kg2e""" +376 100 loss """softplus""" +376 100 regularizer """no""" +376 100 optimizer """adam""" +376 100 training_loop """lcwa""" +376 100 evaluator """rankbased""" +377 1 model.embedding_dim 1.0 +377 1 model.c_min 0.017051020808708604 +377 1 model.c_max 2.1211941971975223 +377 1 optimizer.lr 0.025056299448927413 +377 1 training.batch_size 2.0 +377 1 training.label_smoothing 0.09816201715441517 +377 2 model.embedding_dim 0.0 +377 2 model.c_min 0.019459656477986927 +377 2 model.c_max 3.8876568150055624 +377 2 optimizer.lr 0.08060505010129386 +377 2 training.batch_size 1.0 +377 2 training.label_smoothing 0.1347091534753513 +377 3 model.embedding_dim 1.0 +377 3 model.c_min 0.029822337608241413 +377 3 model.c_max 3.435634853967493 +377 3 optimizer.lr 0.007328315290213486 +377 3 training.batch_size 0.0 +377 3 training.label_smoothing 0.00790201341462916 +377 4 model.embedding_dim 2.0 +377 4 model.c_min 0.08691089002400629 +377 4 model.c_max 8.483928817638887 +377 4 optimizer.lr 0.039575830221408585 +377 4 training.batch_size 1.0 +377 4 training.label_smoothing 0.006061854485176463 +377 5 model.embedding_dim 0.0 +377 5 model.c_min 0.04691279297352125 +377 5 model.c_max 2.916551423889784 +377 5 optimizer.lr 0.008026249978831225 +377 5 training.batch_size 1.0 +377 5 training.label_smoothing 0.04232856186612737 +377 6 model.embedding_dim 2.0 +377 6 model.c_min 0.08722317572241743 +377 6 model.c_max 2.6939336161875875 +377 6 optimizer.lr 0.011044578313736687 +377 6 training.batch_size 1.0 +377 6 training.label_smoothing 0.10510936627090932 +377 7 model.embedding_dim 1.0 +377 7 model.c_min 0.02096721924871873 +377 7 model.c_max 9.58497609825774 +377 7 optimizer.lr 0.00586760687669838 +377 7 training.batch_size 1.0 +377 7 training.label_smoothing 0.01056377663746291 +377 8 model.embedding_dim 1.0 +377 8 model.c_min 0.04295757775904627 +377 8 model.c_max 3.244911677570622 +377 8 optimizer.lr 0.014532373850815404 +377 8 training.batch_size 0.0 +377 8 training.label_smoothing 0.11431070634505014 +377 9 model.embedding_dim 2.0 +377 9 model.c_min 0.040802329330755074 +377 9 model.c_max 6.955243467817528 +377 9 optimizer.lr 0.0013677138294326781 +377 9 training.batch_size 1.0 +377 9 training.label_smoothing 0.00896897051344615 +377 10 model.embedding_dim 1.0 +377 10 model.c_min 0.02659505147047219 +377 10 model.c_max 8.237153097354675 +377 10 optimizer.lr 0.0014099941950672069 +377 10 training.batch_size 1.0 +377 10 training.label_smoothing 0.00786545352303231 +377 11 model.embedding_dim 1.0 +377 11 model.c_min 0.05416953211057316 +377 11 model.c_max 2.076353918399257 +377 11 optimizer.lr 0.0786794816894838 +377 11 training.batch_size 1.0 +377 11 training.label_smoothing 0.42240603774728475 +377 12 model.embedding_dim 2.0 +377 12 model.c_min 0.055780716725524405 +377 12 model.c_max 9.993612383645225 +377 12 optimizer.lr 0.0032601753506809853 +377 12 training.batch_size 2.0 +377 12 training.label_smoothing 0.8009480507813346 +377 13 model.embedding_dim 1.0 +377 13 model.c_min 0.07246508508189194 +377 13 model.c_max 4.459322425390225 +377 13 optimizer.lr 0.00772055920711602 +377 13 training.batch_size 2.0 +377 13 training.label_smoothing 0.051067527662191345 +377 14 model.embedding_dim 1.0 +377 14 model.c_min 0.0542068234824772 +377 14 model.c_max 1.5823425058890273 +377 14 optimizer.lr 0.026279694782295988 +377 14 training.batch_size 1.0 +377 14 training.label_smoothing 0.3792242095383526 +377 15 model.embedding_dim 2.0 +377 15 model.c_min 0.030128442618915657 +377 15 model.c_max 6.839022132204922 +377 15 optimizer.lr 0.0034173575288346273 +377 15 training.batch_size 2.0 +377 15 training.label_smoothing 0.017475774210538052 +377 16 model.embedding_dim 2.0 +377 16 model.c_min 0.02709587882819689 +377 16 model.c_max 2.855923808375799 +377 16 optimizer.lr 0.012655232106739916 +377 16 training.batch_size 2.0 +377 16 training.label_smoothing 0.001647038183507004 +377 17 model.embedding_dim 2.0 +377 17 model.c_min 0.0289930115450294 +377 17 model.c_max 2.638013769416815 +377 17 optimizer.lr 0.09213214268471902 +377 17 training.batch_size 2.0 +377 17 training.label_smoothing 0.39381269117689105 +377 18 model.embedding_dim 2.0 +377 18 model.c_min 0.014741732991222735 +377 18 model.c_max 5.766054378226538 +377 18 optimizer.lr 0.023749021904126855 +377 18 training.batch_size 2.0 +377 18 training.label_smoothing 0.3306256942626528 +377 19 model.embedding_dim 0.0 +377 19 model.c_min 0.01417274033705974 +377 19 model.c_max 8.934929417848075 +377 19 optimizer.lr 0.011827391081962027 +377 19 training.batch_size 1.0 +377 19 training.label_smoothing 0.0017166366790638007 +377 20 model.embedding_dim 2.0 +377 20 model.c_min 0.022575606133280125 +377 20 model.c_max 5.89016397851341 +377 20 optimizer.lr 0.002661826366819985 +377 20 training.batch_size 1.0 +377 20 training.label_smoothing 0.22689659261002576 +377 21 model.embedding_dim 1.0 +377 21 model.c_min 0.03304206773452779 +377 21 model.c_max 1.3565681300548862 +377 21 optimizer.lr 0.09090195311775184 +377 21 training.batch_size 0.0 +377 21 training.label_smoothing 0.9122506065974912 +377 22 model.embedding_dim 2.0 +377 22 model.c_min 0.06497248631431646 +377 22 model.c_max 4.634194518129469 +377 22 optimizer.lr 0.002309917319056327 +377 22 training.batch_size 1.0 +377 22 training.label_smoothing 0.06391653600045855 +377 23 model.embedding_dim 2.0 +377 23 model.c_min 0.016376976210226157 +377 23 model.c_max 9.909629433643829 +377 23 optimizer.lr 0.0018129890333513413 +377 23 training.batch_size 1.0 +377 23 training.label_smoothing 0.12954307259174108 +377 24 model.embedding_dim 2.0 +377 24 model.c_min 0.05475082228239642 +377 24 model.c_max 6.025544732532164 +377 24 optimizer.lr 0.0024485896342436807 +377 24 training.batch_size 0.0 +377 24 training.label_smoothing 0.0841797676472349 +377 25 model.embedding_dim 1.0 +377 25 model.c_min 0.08641336606802467 +377 25 model.c_max 9.38572574848354 +377 25 optimizer.lr 0.0032469129005722767 +377 25 training.batch_size 0.0 +377 25 training.label_smoothing 0.0035722316877486865 +377 26 model.embedding_dim 0.0 +377 26 model.c_min 0.014348080597467965 +377 26 model.c_max 9.66079174604658 +377 26 optimizer.lr 0.03342146860517 +377 26 training.batch_size 2.0 +377 26 training.label_smoothing 0.10342038140119872 +377 27 model.embedding_dim 1.0 +377 27 model.c_min 0.03951929580318524 +377 27 model.c_max 7.818875535601471 +377 27 optimizer.lr 0.03902705475945273 +377 27 training.batch_size 2.0 +377 27 training.label_smoothing 0.035988412101555574 +377 28 model.embedding_dim 1.0 +377 28 model.c_min 0.06021770756865846 +377 28 model.c_max 5.2802598414021835 +377 28 optimizer.lr 0.07766120073750456 +377 28 training.batch_size 2.0 +377 28 training.label_smoothing 0.004313262108667276 +377 29 model.embedding_dim 0.0 +377 29 model.c_min 0.030136866422032133 +377 29 model.c_max 7.60017692129724 +377 29 optimizer.lr 0.005366066130202204 +377 29 training.batch_size 0.0 +377 29 training.label_smoothing 0.0023686876923894396 +377 30 model.embedding_dim 0.0 +377 30 model.c_min 0.017934981858868528 +377 30 model.c_max 8.948838817014796 +377 30 optimizer.lr 0.04174157987065756 +377 30 training.batch_size 2.0 +377 30 training.label_smoothing 0.011762758134944814 +377 31 model.embedding_dim 2.0 +377 31 model.c_min 0.015264220811375748 +377 31 model.c_max 6.294757784972833 +377 31 optimizer.lr 0.0016323870414600367 +377 31 training.batch_size 1.0 +377 31 training.label_smoothing 0.023178376730696726 +377 32 model.embedding_dim 1.0 +377 32 model.c_min 0.04728618467086524 +377 32 model.c_max 3.6262110250486197 +377 32 optimizer.lr 0.0027246406777434286 +377 32 training.batch_size 2.0 +377 32 training.label_smoothing 0.00980204048851277 +377 33 model.embedding_dim 2.0 +377 33 model.c_min 0.0712471535201238 +377 33 model.c_max 7.273435544087217 +377 33 optimizer.lr 0.004641740942988815 +377 33 training.batch_size 2.0 +377 33 training.label_smoothing 0.7476718542608283 +377 34 model.embedding_dim 0.0 +377 34 model.c_min 0.020250225758118356 +377 34 model.c_max 2.4245688238066463 +377 34 optimizer.lr 0.05151607468755485 +377 34 training.batch_size 0.0 +377 34 training.label_smoothing 0.09534826972360096 +377 35 model.embedding_dim 1.0 +377 35 model.c_min 0.0239060819254202 +377 35 model.c_max 9.69923861360515 +377 35 optimizer.lr 0.010633167127011877 +377 35 training.batch_size 2.0 +377 35 training.label_smoothing 0.16537847879722445 +377 36 model.embedding_dim 0.0 +377 36 model.c_min 0.02362673171653478 +377 36 model.c_max 1.8189157901246018 +377 36 optimizer.lr 0.001154122410324864 +377 36 training.batch_size 0.0 +377 36 training.label_smoothing 0.0011637658035131035 +377 37 model.embedding_dim 0.0 +377 37 model.c_min 0.027873052125276184 +377 37 model.c_max 3.8301096317818426 +377 37 optimizer.lr 0.030301003027947433 +377 37 training.batch_size 0.0 +377 37 training.label_smoothing 0.04274632093300396 +377 38 model.embedding_dim 0.0 +377 38 model.c_min 0.047466023545673516 +377 38 model.c_max 4.8824851266860065 +377 38 optimizer.lr 0.027009465706695997 +377 38 training.batch_size 0.0 +377 38 training.label_smoothing 0.005427091502455569 +377 39 model.embedding_dim 2.0 +377 39 model.c_min 0.02622584221547025 +377 39 model.c_max 7.766690406243274 +377 39 optimizer.lr 0.0023075432374038162 +377 39 training.batch_size 2.0 +377 39 training.label_smoothing 0.17944149615960586 +377 40 model.embedding_dim 2.0 +377 40 model.c_min 0.03397687770733786 +377 40 model.c_max 1.4787277236409464 +377 40 optimizer.lr 0.002616868982673992 +377 40 training.batch_size 1.0 +377 40 training.label_smoothing 0.3228426429950792 +377 41 model.embedding_dim 2.0 +377 41 model.c_min 0.037893523711100996 +377 41 model.c_max 3.0588606430120433 +377 41 optimizer.lr 0.002503878508785803 +377 41 training.batch_size 0.0 +377 41 training.label_smoothing 0.12629066010228293 +377 42 model.embedding_dim 1.0 +377 42 model.c_min 0.09565411133939129 +377 42 model.c_max 8.991500306097128 +377 42 optimizer.lr 0.07733599270093659 +377 42 training.batch_size 2.0 +377 42 training.label_smoothing 0.031774066370550136 +377 43 model.embedding_dim 0.0 +377 43 model.c_min 0.01996872424688872 +377 43 model.c_max 3.5440511212260457 +377 43 optimizer.lr 0.053538014409879646 +377 43 training.batch_size 2.0 +377 43 training.label_smoothing 0.052550526688370194 +377 44 model.embedding_dim 1.0 +377 44 model.c_min 0.020565959559663918 +377 44 model.c_max 4.947932645215818 +377 44 optimizer.lr 0.0015176818795497057 +377 44 training.batch_size 2.0 +377 44 training.label_smoothing 0.1476363273098571 +377 45 model.embedding_dim 2.0 +377 45 model.c_min 0.027945614342025027 +377 45 model.c_max 1.4544541630480907 +377 45 optimizer.lr 0.0012990241745301427 +377 45 training.batch_size 2.0 +377 45 training.label_smoothing 0.028412151204543535 +377 46 model.embedding_dim 1.0 +377 46 model.c_min 0.013957483513283632 +377 46 model.c_max 7.581180463092699 +377 46 optimizer.lr 0.02958615222946717 +377 46 training.batch_size 1.0 +377 46 training.label_smoothing 0.06648894900246739 +377 47 model.embedding_dim 0.0 +377 47 model.c_min 0.08013023029703524 +377 47 model.c_max 1.066476119378295 +377 47 optimizer.lr 0.03804082165800942 +377 47 training.batch_size 0.0 +377 47 training.label_smoothing 0.027573819657444692 +377 48 model.embedding_dim 2.0 +377 48 model.c_min 0.020584957722088952 +377 48 model.c_max 7.4856947332815835 +377 48 optimizer.lr 0.002030873019803712 +377 48 training.batch_size 1.0 +377 48 training.label_smoothing 0.005688422369469851 +377 49 model.embedding_dim 2.0 +377 49 model.c_min 0.04756848190397432 +377 49 model.c_max 6.906264360268407 +377 49 optimizer.lr 0.004585734103347158 +377 49 training.batch_size 2.0 +377 49 training.label_smoothing 0.9044603370386487 +377 50 model.embedding_dim 1.0 +377 50 model.c_min 0.08971178976449128 +377 50 model.c_max 9.4795131055673 +377 50 optimizer.lr 0.009818563820815421 +377 50 training.batch_size 1.0 +377 50 training.label_smoothing 0.08659260139538076 +377 51 model.embedding_dim 2.0 +377 51 model.c_min 0.018308809515303823 +377 51 model.c_max 2.4959225396135043 +377 51 optimizer.lr 0.010408198074673332 +377 51 training.batch_size 2.0 +377 51 training.label_smoothing 0.2830946580126654 +377 52 model.embedding_dim 0.0 +377 52 model.c_min 0.03193296083472017 +377 52 model.c_max 8.521307900062972 +377 52 optimizer.lr 0.0017063681662503826 +377 52 training.batch_size 1.0 +377 52 training.label_smoothing 0.03486009067525482 +377 53 model.embedding_dim 0.0 +377 53 model.c_min 0.022964218617520012 +377 53 model.c_max 4.933299631878454 +377 53 optimizer.lr 0.05757186054062417 +377 53 training.batch_size 1.0 +377 53 training.label_smoothing 0.0062539562054824615 +377 54 model.embedding_dim 1.0 +377 54 model.c_min 0.05868137379178419 +377 54 model.c_max 7.749675231980317 +377 54 optimizer.lr 0.0031865706850035874 +377 54 training.batch_size 0.0 +377 54 training.label_smoothing 0.04478903159053835 +377 55 model.embedding_dim 1.0 +377 55 model.c_min 0.01621504897944057 +377 55 model.c_max 2.5466118445414 +377 55 optimizer.lr 0.0011632934938864234 +377 55 training.batch_size 0.0 +377 55 training.label_smoothing 0.25745438548235666 +377 56 model.embedding_dim 2.0 +377 56 model.c_min 0.026269342213073858 +377 56 model.c_max 5.271170196670909 +377 56 optimizer.lr 0.00647298957258747 +377 56 training.batch_size 0.0 +377 56 training.label_smoothing 0.006322698037127731 +377 57 model.embedding_dim 1.0 +377 57 model.c_min 0.014220980457563162 +377 57 model.c_max 2.6786853695039796 +377 57 optimizer.lr 0.01306037525024472 +377 57 training.batch_size 0.0 +377 57 training.label_smoothing 0.02109855534965593 +377 58 model.embedding_dim 0.0 +377 58 model.c_min 0.0478332321321264 +377 58 model.c_max 6.430185135460982 +377 58 optimizer.lr 0.02240383824416612 +377 58 training.batch_size 2.0 +377 58 training.label_smoothing 0.0024647440789675877 +377 59 model.embedding_dim 1.0 +377 59 model.c_min 0.09545337455730718 +377 59 model.c_max 1.1601610677626173 +377 59 optimizer.lr 0.002225470595696665 +377 59 training.batch_size 2.0 +377 59 training.label_smoothing 0.0056456261389849546 +377 60 model.embedding_dim 2.0 +377 60 model.c_min 0.020981774070250144 +377 60 model.c_max 9.268072582199325 +377 60 optimizer.lr 0.07128981543570878 +377 60 training.batch_size 1.0 +377 60 training.label_smoothing 0.0012542914253231819 +377 61 model.embedding_dim 1.0 +377 61 model.c_min 0.019220489103834164 +377 61 model.c_max 2.7294667122942373 +377 61 optimizer.lr 0.008580083973740897 +377 61 training.batch_size 2.0 +377 61 training.label_smoothing 0.0011378966506961873 +377 62 model.embedding_dim 2.0 +377 62 model.c_min 0.03470627228011464 +377 62 model.c_max 3.94862856304459 +377 62 optimizer.lr 0.005420074231457407 +377 62 training.batch_size 1.0 +377 62 training.label_smoothing 0.09112743239904926 +377 63 model.embedding_dim 1.0 +377 63 model.c_min 0.01042781567786881 +377 63 model.c_max 9.009191552250085 +377 63 optimizer.lr 0.08205182789511717 +377 63 training.batch_size 1.0 +377 63 training.label_smoothing 0.0011867303244622008 +377 64 model.embedding_dim 2.0 +377 64 model.c_min 0.08574628154093038 +377 64 model.c_max 1.2580303864638147 +377 64 optimizer.lr 0.07828361219223723 +377 64 training.batch_size 2.0 +377 64 training.label_smoothing 0.053665186290081775 +377 65 model.embedding_dim 1.0 +377 65 model.c_min 0.0773449112316829 +377 65 model.c_max 1.8330069533874722 +377 65 optimizer.lr 0.004207581017836914 +377 65 training.batch_size 2.0 +377 65 training.label_smoothing 0.01103367065768455 +377 66 model.embedding_dim 2.0 +377 66 model.c_min 0.08094551976294712 +377 66 model.c_max 2.594041107256368 +377 66 optimizer.lr 0.0024580740977670247 +377 66 training.batch_size 0.0 +377 66 training.label_smoothing 0.2846325441847062 +377 67 model.embedding_dim 0.0 +377 67 model.c_min 0.045455292556990344 +377 67 model.c_max 5.104159037447272 +377 67 optimizer.lr 0.007776834317218562 +377 67 training.batch_size 2.0 +377 67 training.label_smoothing 0.132876446719381 +377 68 model.embedding_dim 2.0 +377 68 model.c_min 0.07851215006432798 +377 68 model.c_max 6.044215103747963 +377 68 optimizer.lr 0.005340919564972302 +377 68 training.batch_size 1.0 +377 68 training.label_smoothing 0.006286941186286887 +377 69 model.embedding_dim 0.0 +377 69 model.c_min 0.014051798673310708 +377 69 model.c_max 5.521492059651893 +377 69 optimizer.lr 0.020863255366867315 +377 69 training.batch_size 0.0 +377 69 training.label_smoothing 0.005389916013920592 +377 70 model.embedding_dim 2.0 +377 70 model.c_min 0.051545464707083884 +377 70 model.c_max 6.429375405901819 +377 70 optimizer.lr 0.004229227005743717 +377 70 training.batch_size 0.0 +377 70 training.label_smoothing 0.0024973671098975046 +377 71 model.embedding_dim 2.0 +377 71 model.c_min 0.010884592812869874 +377 71 model.c_max 6.195399542949432 +377 71 optimizer.lr 0.011808232951383356 +377 71 training.batch_size 2.0 +377 71 training.label_smoothing 0.9513861153000962 +377 72 model.embedding_dim 0.0 +377 72 model.c_min 0.06303237107502802 +377 72 model.c_max 4.457439851858805 +377 72 optimizer.lr 0.0022372902456186446 +377 72 training.batch_size 0.0 +377 72 training.label_smoothing 0.007406224605595461 +377 73 model.embedding_dim 0.0 +377 73 model.c_min 0.02015324925038243 +377 73 model.c_max 8.791340017274932 +377 73 optimizer.lr 0.05660852423386861 +377 73 training.batch_size 2.0 +377 73 training.label_smoothing 0.18745050999831744 +377 74 model.embedding_dim 2.0 +377 74 model.c_min 0.021775878234809632 +377 74 model.c_max 1.2902368545910656 +377 74 optimizer.lr 0.06394467604195274 +377 74 training.batch_size 0.0 +377 74 training.label_smoothing 0.003925635981404114 +377 75 model.embedding_dim 1.0 +377 75 model.c_min 0.026318224263779118 +377 75 model.c_max 5.321921997592231 +377 75 optimizer.lr 0.005124104425413352 +377 75 training.batch_size 2.0 +377 75 training.label_smoothing 0.006403879999129705 +377 76 model.embedding_dim 1.0 +377 76 model.c_min 0.08328691219294761 +377 76 model.c_max 5.7912330300457056 +377 76 optimizer.lr 0.0018285406722721163 +377 76 training.batch_size 1.0 +377 76 training.label_smoothing 0.01564644008192848 +377 77 model.embedding_dim 2.0 +377 77 model.c_min 0.03448660061610744 +377 77 model.c_max 6.223268230782968 +377 77 optimizer.lr 0.007858707781246477 +377 77 training.batch_size 2.0 +377 77 training.label_smoothing 0.13068095614167402 +377 78 model.embedding_dim 2.0 +377 78 model.c_min 0.06442625658940769 +377 78 model.c_max 7.560293045499566 +377 78 optimizer.lr 0.0434676680832321 +377 78 training.batch_size 1.0 +377 78 training.label_smoothing 0.0012406378079197718 +377 79 model.embedding_dim 1.0 +377 79 model.c_min 0.04758450515838265 +377 79 model.c_max 5.161534133545349 +377 79 optimizer.lr 0.04592360661284401 +377 79 training.batch_size 1.0 +377 79 training.label_smoothing 0.8324253447181458 +377 80 model.embedding_dim 1.0 +377 80 model.c_min 0.08334514991954246 +377 80 model.c_max 1.7746896050045589 +377 80 optimizer.lr 0.008080215026642648 +377 80 training.batch_size 1.0 +377 80 training.label_smoothing 0.1562855418815596 +377 81 model.embedding_dim 0.0 +377 81 model.c_min 0.0243617862610092 +377 81 model.c_max 5.579084960659451 +377 81 optimizer.lr 0.0010275473638464347 +377 81 training.batch_size 1.0 +377 81 training.label_smoothing 0.09629989211026212 +377 82 model.embedding_dim 0.0 +377 82 model.c_min 0.02877744571757525 +377 82 model.c_max 3.5317240523213425 +377 82 optimizer.lr 0.046357212415217154 +377 82 training.batch_size 2.0 +377 82 training.label_smoothing 0.0037338210623384005 +377 83 model.embedding_dim 2.0 +377 83 model.c_min 0.010210659615576148 +377 83 model.c_max 1.9451395282843733 +377 83 optimizer.lr 0.005353626159534125 +377 83 training.batch_size 2.0 +377 83 training.label_smoothing 0.773582888548831 +377 84 model.embedding_dim 0.0 +377 84 model.c_min 0.06775498301308597 +377 84 model.c_max 1.9483742427959065 +377 84 optimizer.lr 0.0011668941962193957 +377 84 training.batch_size 0.0 +377 84 training.label_smoothing 0.007615752405001138 +377 85 model.embedding_dim 2.0 +377 85 model.c_min 0.09205722443267238 +377 85 model.c_max 5.389015281307281 +377 85 optimizer.lr 0.010467621239907294 +377 85 training.batch_size 0.0 +377 85 training.label_smoothing 0.21828305382578192 +377 86 model.embedding_dim 0.0 +377 86 model.c_min 0.05478637068831204 +377 86 model.c_max 6.244171542781816 +377 86 optimizer.lr 0.0186114164879345 +377 86 training.batch_size 2.0 +377 86 training.label_smoothing 0.5592181377161766 +377 87 model.embedding_dim 0.0 +377 87 model.c_min 0.027392674756181602 +377 87 model.c_max 5.829577644637218 +377 87 optimizer.lr 0.00995657673051732 +377 87 training.batch_size 2.0 +377 87 training.label_smoothing 0.004154037106232572 +377 88 model.embedding_dim 2.0 +377 88 model.c_min 0.08524854212053996 +377 88 model.c_max 5.468940003784904 +377 88 optimizer.lr 0.001004002501694982 +377 88 training.batch_size 0.0 +377 88 training.label_smoothing 0.3143196428976494 +377 89 model.embedding_dim 2.0 +377 89 model.c_min 0.016885809864026068 +377 89 model.c_max 7.542666546991192 +377 89 optimizer.lr 0.003858503174269689 +377 89 training.batch_size 2.0 +377 89 training.label_smoothing 0.018568148445106576 +377 90 model.embedding_dim 2.0 +377 90 model.c_min 0.014149777563521366 +377 90 model.c_max 1.241364345999099 +377 90 optimizer.lr 0.004602547022669145 +377 90 training.batch_size 1.0 +377 90 training.label_smoothing 0.0070419267826697 +377 91 model.embedding_dim 0.0 +377 91 model.c_min 0.022956030074189254 +377 91 model.c_max 3.888210932204548 +377 91 optimizer.lr 0.0050620546559389966 +377 91 training.batch_size 1.0 +377 91 training.label_smoothing 0.3642192602373958 +377 92 model.embedding_dim 2.0 +377 92 model.c_min 0.05190911292833567 +377 92 model.c_max 3.5321684461732676 +377 92 optimizer.lr 0.02637424084396273 +377 92 training.batch_size 2.0 +377 92 training.label_smoothing 0.04071498442058251 +377 93 model.embedding_dim 1.0 +377 93 model.c_min 0.016333163812442444 +377 93 model.c_max 2.0175035015911993 +377 93 optimizer.lr 0.013270348664856655 +377 93 training.batch_size 1.0 +377 93 training.label_smoothing 0.0025268788072312872 +377 94 model.embedding_dim 2.0 +377 94 model.c_min 0.02854408038888127 +377 94 model.c_max 4.336116426678421 +377 94 optimizer.lr 0.004433589614576994 +377 94 training.batch_size 0.0 +377 94 training.label_smoothing 0.043434825801448904 +377 95 model.embedding_dim 2.0 +377 95 model.c_min 0.010558111292856892 +377 95 model.c_max 3.2219255273038767 +377 95 optimizer.lr 0.0010926881505441332 +377 95 training.batch_size 2.0 +377 95 training.label_smoothing 0.1717368847530727 +377 96 model.embedding_dim 1.0 +377 96 model.c_min 0.05867536389675462 +377 96 model.c_max 7.484288070143348 +377 96 optimizer.lr 0.015587476996142127 +377 96 training.batch_size 1.0 +377 96 training.label_smoothing 0.035550501883460525 +377 97 model.embedding_dim 1.0 +377 97 model.c_min 0.06412978598114401 +377 97 model.c_max 2.6028527576198024 +377 97 optimizer.lr 0.0019585287557355425 +377 97 training.batch_size 2.0 +377 97 training.label_smoothing 0.021686029675609363 +377 98 model.embedding_dim 0.0 +377 98 model.c_min 0.054496425117278105 +377 98 model.c_max 7.202333932474711 +377 98 optimizer.lr 0.00530056757212528 +377 98 training.batch_size 2.0 +377 98 training.label_smoothing 0.0028951257069391514 +377 99 model.embedding_dim 2.0 +377 99 model.c_min 0.027136166312083584 +377 99 model.c_max 4.722352157160829 +377 99 optimizer.lr 0.027462626963189265 +377 99 training.batch_size 2.0 +377 99 training.label_smoothing 0.001478410254862302 +377 100 model.embedding_dim 1.0 +377 100 model.c_min 0.04382132379227811 +377 100 model.c_max 2.917227165775504 +377 100 optimizer.lr 0.014271430134507442 +377 100 training.batch_size 1.0 +377 100 training.label_smoothing 0.005725024475728584 +377 1 dataset """kinships""" +377 1 model """kg2e""" +377 1 loss """bceaftersigmoid""" +377 1 regularizer """no""" +377 1 optimizer """adam""" +377 1 training_loop """lcwa""" +377 1 evaluator """rankbased""" +377 2 dataset """kinships""" +377 2 model """kg2e""" +377 2 loss """bceaftersigmoid""" +377 2 regularizer """no""" +377 2 optimizer """adam""" +377 2 training_loop """lcwa""" +377 2 evaluator """rankbased""" +377 3 dataset """kinships""" +377 3 model """kg2e""" +377 3 loss """bceaftersigmoid""" +377 3 regularizer """no""" +377 3 optimizer """adam""" +377 3 training_loop """lcwa""" +377 3 evaluator """rankbased""" +377 4 dataset """kinships""" +377 4 model """kg2e""" +377 4 loss """bceaftersigmoid""" +377 4 regularizer """no""" +377 4 optimizer """adam""" +377 4 training_loop """lcwa""" +377 4 evaluator """rankbased""" +377 5 dataset """kinships""" +377 5 model """kg2e""" +377 5 loss """bceaftersigmoid""" +377 5 regularizer """no""" +377 5 optimizer """adam""" +377 5 training_loop """lcwa""" +377 5 evaluator """rankbased""" +377 6 dataset """kinships""" +377 6 model """kg2e""" +377 6 loss """bceaftersigmoid""" +377 6 regularizer """no""" +377 6 optimizer """adam""" +377 6 training_loop """lcwa""" +377 6 evaluator """rankbased""" +377 7 dataset """kinships""" +377 7 model """kg2e""" +377 7 loss """bceaftersigmoid""" +377 7 regularizer """no""" +377 7 optimizer """adam""" +377 7 training_loop """lcwa""" +377 7 evaluator """rankbased""" +377 8 dataset """kinships""" +377 8 model """kg2e""" +377 8 loss """bceaftersigmoid""" +377 8 regularizer """no""" +377 8 optimizer """adam""" +377 8 training_loop """lcwa""" +377 8 evaluator """rankbased""" +377 9 dataset """kinships""" +377 9 model """kg2e""" +377 9 loss """bceaftersigmoid""" +377 9 regularizer """no""" +377 9 optimizer """adam""" +377 9 training_loop """lcwa""" +377 9 evaluator """rankbased""" +377 10 dataset """kinships""" +377 10 model """kg2e""" +377 10 loss """bceaftersigmoid""" +377 10 regularizer """no""" +377 10 optimizer """adam""" +377 10 training_loop """lcwa""" +377 10 evaluator """rankbased""" +377 11 dataset """kinships""" +377 11 model """kg2e""" +377 11 loss """bceaftersigmoid""" +377 11 regularizer """no""" +377 11 optimizer """adam""" +377 11 training_loop """lcwa""" +377 11 evaluator """rankbased""" +377 12 dataset """kinships""" +377 12 model """kg2e""" +377 12 loss """bceaftersigmoid""" +377 12 regularizer """no""" +377 12 optimizer """adam""" +377 12 training_loop """lcwa""" +377 12 evaluator """rankbased""" +377 13 dataset """kinships""" +377 13 model """kg2e""" +377 13 loss """bceaftersigmoid""" +377 13 regularizer """no""" +377 13 optimizer """adam""" +377 13 training_loop """lcwa""" +377 13 evaluator """rankbased""" +377 14 dataset """kinships""" +377 14 model """kg2e""" +377 14 loss """bceaftersigmoid""" +377 14 regularizer """no""" +377 14 optimizer """adam""" +377 14 training_loop """lcwa""" +377 14 evaluator """rankbased""" +377 15 dataset """kinships""" +377 15 model """kg2e""" +377 15 loss """bceaftersigmoid""" +377 15 regularizer """no""" +377 15 optimizer """adam""" +377 15 training_loop """lcwa""" +377 15 evaluator """rankbased""" +377 16 dataset """kinships""" +377 16 model """kg2e""" +377 16 loss """bceaftersigmoid""" +377 16 regularizer """no""" +377 16 optimizer """adam""" +377 16 training_loop """lcwa""" +377 16 evaluator """rankbased""" +377 17 dataset """kinships""" +377 17 model """kg2e""" +377 17 loss """bceaftersigmoid""" +377 17 regularizer """no""" +377 17 optimizer """adam""" +377 17 training_loop """lcwa""" +377 17 evaluator """rankbased""" +377 18 dataset """kinships""" +377 18 model """kg2e""" +377 18 loss """bceaftersigmoid""" +377 18 regularizer """no""" +377 18 optimizer """adam""" +377 18 training_loop """lcwa""" +377 18 evaluator """rankbased""" +377 19 dataset """kinships""" +377 19 model """kg2e""" +377 19 loss """bceaftersigmoid""" +377 19 regularizer """no""" +377 19 optimizer """adam""" +377 19 training_loop """lcwa""" +377 19 evaluator """rankbased""" +377 20 dataset """kinships""" +377 20 model """kg2e""" +377 20 loss """bceaftersigmoid""" +377 20 regularizer """no""" +377 20 optimizer """adam""" +377 20 training_loop """lcwa""" +377 20 evaluator """rankbased""" +377 21 dataset """kinships""" +377 21 model """kg2e""" +377 21 loss """bceaftersigmoid""" +377 21 regularizer """no""" +377 21 optimizer """adam""" +377 21 training_loop """lcwa""" +377 21 evaluator """rankbased""" +377 22 dataset """kinships""" +377 22 model """kg2e""" +377 22 loss """bceaftersigmoid""" +377 22 regularizer """no""" +377 22 optimizer """adam""" +377 22 training_loop """lcwa""" +377 22 evaluator """rankbased""" +377 23 dataset """kinships""" +377 23 model """kg2e""" +377 23 loss """bceaftersigmoid""" +377 23 regularizer """no""" +377 23 optimizer """adam""" +377 23 training_loop """lcwa""" +377 23 evaluator """rankbased""" +377 24 dataset """kinships""" +377 24 model """kg2e""" +377 24 loss """bceaftersigmoid""" +377 24 regularizer """no""" +377 24 optimizer """adam""" +377 24 training_loop """lcwa""" +377 24 evaluator """rankbased""" +377 25 dataset """kinships""" +377 25 model """kg2e""" +377 25 loss """bceaftersigmoid""" +377 25 regularizer """no""" +377 25 optimizer """adam""" +377 25 training_loop """lcwa""" +377 25 evaluator """rankbased""" +377 26 dataset """kinships""" +377 26 model """kg2e""" +377 26 loss """bceaftersigmoid""" +377 26 regularizer """no""" +377 26 optimizer """adam""" +377 26 training_loop """lcwa""" +377 26 evaluator """rankbased""" +377 27 dataset """kinships""" +377 27 model """kg2e""" +377 27 loss """bceaftersigmoid""" +377 27 regularizer """no""" +377 27 optimizer """adam""" +377 27 training_loop """lcwa""" +377 27 evaluator """rankbased""" +377 28 dataset """kinships""" +377 28 model """kg2e""" +377 28 loss """bceaftersigmoid""" +377 28 regularizer """no""" +377 28 optimizer """adam""" +377 28 training_loop """lcwa""" +377 28 evaluator """rankbased""" +377 29 dataset """kinships""" +377 29 model """kg2e""" +377 29 loss """bceaftersigmoid""" +377 29 regularizer """no""" +377 29 optimizer """adam""" +377 29 training_loop """lcwa""" +377 29 evaluator """rankbased""" +377 30 dataset """kinships""" +377 30 model """kg2e""" +377 30 loss """bceaftersigmoid""" +377 30 regularizer """no""" +377 30 optimizer """adam""" +377 30 training_loop """lcwa""" +377 30 evaluator """rankbased""" +377 31 dataset """kinships""" +377 31 model """kg2e""" +377 31 loss """bceaftersigmoid""" +377 31 regularizer """no""" +377 31 optimizer """adam""" +377 31 training_loop """lcwa""" +377 31 evaluator """rankbased""" +377 32 dataset """kinships""" +377 32 model """kg2e""" +377 32 loss """bceaftersigmoid""" +377 32 regularizer """no""" +377 32 optimizer """adam""" +377 32 training_loop """lcwa""" +377 32 evaluator """rankbased""" +377 33 dataset """kinships""" +377 33 model """kg2e""" +377 33 loss """bceaftersigmoid""" +377 33 regularizer """no""" +377 33 optimizer """adam""" +377 33 training_loop """lcwa""" +377 33 evaluator """rankbased""" +377 34 dataset """kinships""" +377 34 model """kg2e""" +377 34 loss """bceaftersigmoid""" +377 34 regularizer """no""" +377 34 optimizer """adam""" +377 34 training_loop """lcwa""" +377 34 evaluator """rankbased""" +377 35 dataset """kinships""" +377 35 model """kg2e""" +377 35 loss """bceaftersigmoid""" +377 35 regularizer """no""" +377 35 optimizer """adam""" +377 35 training_loop """lcwa""" +377 35 evaluator """rankbased""" +377 36 dataset """kinships""" +377 36 model """kg2e""" +377 36 loss """bceaftersigmoid""" +377 36 regularizer """no""" +377 36 optimizer """adam""" +377 36 training_loop """lcwa""" +377 36 evaluator """rankbased""" +377 37 dataset """kinships""" +377 37 model """kg2e""" +377 37 loss """bceaftersigmoid""" +377 37 regularizer """no""" +377 37 optimizer """adam""" +377 37 training_loop """lcwa""" +377 37 evaluator """rankbased""" +377 38 dataset """kinships""" +377 38 model """kg2e""" +377 38 loss """bceaftersigmoid""" +377 38 regularizer """no""" +377 38 optimizer """adam""" +377 38 training_loop """lcwa""" +377 38 evaluator """rankbased""" +377 39 dataset """kinships""" +377 39 model """kg2e""" +377 39 loss """bceaftersigmoid""" +377 39 regularizer """no""" +377 39 optimizer """adam""" +377 39 training_loop """lcwa""" +377 39 evaluator """rankbased""" +377 40 dataset """kinships""" +377 40 model """kg2e""" +377 40 loss """bceaftersigmoid""" +377 40 regularizer """no""" +377 40 optimizer """adam""" +377 40 training_loop """lcwa""" +377 40 evaluator """rankbased""" +377 41 dataset """kinships""" +377 41 model """kg2e""" +377 41 loss """bceaftersigmoid""" +377 41 regularizer """no""" +377 41 optimizer """adam""" +377 41 training_loop """lcwa""" +377 41 evaluator """rankbased""" +377 42 dataset """kinships""" +377 42 model """kg2e""" +377 42 loss """bceaftersigmoid""" +377 42 regularizer """no""" +377 42 optimizer """adam""" +377 42 training_loop """lcwa""" +377 42 evaluator """rankbased""" +377 43 dataset """kinships""" +377 43 model """kg2e""" +377 43 loss """bceaftersigmoid""" +377 43 regularizer """no""" +377 43 optimizer """adam""" +377 43 training_loop """lcwa""" +377 43 evaluator """rankbased""" +377 44 dataset """kinships""" +377 44 model """kg2e""" +377 44 loss """bceaftersigmoid""" +377 44 regularizer """no""" +377 44 optimizer """adam""" +377 44 training_loop """lcwa""" +377 44 evaluator """rankbased""" +377 45 dataset """kinships""" +377 45 model """kg2e""" +377 45 loss """bceaftersigmoid""" +377 45 regularizer """no""" +377 45 optimizer """adam""" +377 45 training_loop """lcwa""" +377 45 evaluator """rankbased""" +377 46 dataset """kinships""" +377 46 model """kg2e""" +377 46 loss """bceaftersigmoid""" +377 46 regularizer """no""" +377 46 optimizer """adam""" +377 46 training_loop """lcwa""" +377 46 evaluator """rankbased""" +377 47 dataset """kinships""" +377 47 model """kg2e""" +377 47 loss """bceaftersigmoid""" +377 47 regularizer """no""" +377 47 optimizer """adam""" +377 47 training_loop """lcwa""" +377 47 evaluator """rankbased""" +377 48 dataset """kinships""" +377 48 model """kg2e""" +377 48 loss """bceaftersigmoid""" +377 48 regularizer """no""" +377 48 optimizer """adam""" +377 48 training_loop """lcwa""" +377 48 evaluator """rankbased""" +377 49 dataset """kinships""" +377 49 model """kg2e""" +377 49 loss """bceaftersigmoid""" +377 49 regularizer """no""" +377 49 optimizer """adam""" +377 49 training_loop """lcwa""" +377 49 evaluator """rankbased""" +377 50 dataset """kinships""" +377 50 model """kg2e""" +377 50 loss """bceaftersigmoid""" +377 50 regularizer """no""" +377 50 optimizer """adam""" +377 50 training_loop """lcwa""" +377 50 evaluator """rankbased""" +377 51 dataset """kinships""" +377 51 model """kg2e""" +377 51 loss """bceaftersigmoid""" +377 51 regularizer """no""" +377 51 optimizer """adam""" +377 51 training_loop """lcwa""" +377 51 evaluator """rankbased""" +377 52 dataset """kinships""" +377 52 model """kg2e""" +377 52 loss """bceaftersigmoid""" +377 52 regularizer """no""" +377 52 optimizer """adam""" +377 52 training_loop """lcwa""" +377 52 evaluator """rankbased""" +377 53 dataset """kinships""" +377 53 model """kg2e""" +377 53 loss """bceaftersigmoid""" +377 53 regularizer """no""" +377 53 optimizer """adam""" +377 53 training_loop """lcwa""" +377 53 evaluator """rankbased""" +377 54 dataset """kinships""" +377 54 model """kg2e""" +377 54 loss """bceaftersigmoid""" +377 54 regularizer """no""" +377 54 optimizer """adam""" +377 54 training_loop """lcwa""" +377 54 evaluator """rankbased""" +377 55 dataset """kinships""" +377 55 model """kg2e""" +377 55 loss """bceaftersigmoid""" +377 55 regularizer """no""" +377 55 optimizer """adam""" +377 55 training_loop """lcwa""" +377 55 evaluator """rankbased""" +377 56 dataset """kinships""" +377 56 model """kg2e""" +377 56 loss """bceaftersigmoid""" +377 56 regularizer """no""" +377 56 optimizer """adam""" +377 56 training_loop """lcwa""" +377 56 evaluator """rankbased""" +377 57 dataset """kinships""" +377 57 model """kg2e""" +377 57 loss """bceaftersigmoid""" +377 57 regularizer """no""" +377 57 optimizer """adam""" +377 57 training_loop """lcwa""" +377 57 evaluator """rankbased""" +377 58 dataset """kinships""" +377 58 model """kg2e""" +377 58 loss """bceaftersigmoid""" +377 58 regularizer """no""" +377 58 optimizer """adam""" +377 58 training_loop """lcwa""" +377 58 evaluator """rankbased""" +377 59 dataset """kinships""" +377 59 model """kg2e""" +377 59 loss """bceaftersigmoid""" +377 59 regularizer """no""" +377 59 optimizer """adam""" +377 59 training_loop """lcwa""" +377 59 evaluator """rankbased""" +377 60 dataset """kinships""" +377 60 model """kg2e""" +377 60 loss """bceaftersigmoid""" +377 60 regularizer """no""" +377 60 optimizer """adam""" +377 60 training_loop """lcwa""" +377 60 evaluator """rankbased""" +377 61 dataset """kinships""" +377 61 model """kg2e""" +377 61 loss """bceaftersigmoid""" +377 61 regularizer """no""" +377 61 optimizer """adam""" +377 61 training_loop """lcwa""" +377 61 evaluator """rankbased""" +377 62 dataset """kinships""" +377 62 model """kg2e""" +377 62 loss """bceaftersigmoid""" +377 62 regularizer """no""" +377 62 optimizer """adam""" +377 62 training_loop """lcwa""" +377 62 evaluator """rankbased""" +377 63 dataset """kinships""" +377 63 model """kg2e""" +377 63 loss """bceaftersigmoid""" +377 63 regularizer """no""" +377 63 optimizer """adam""" +377 63 training_loop """lcwa""" +377 63 evaluator """rankbased""" +377 64 dataset """kinships""" +377 64 model """kg2e""" +377 64 loss """bceaftersigmoid""" +377 64 regularizer """no""" +377 64 optimizer """adam""" +377 64 training_loop """lcwa""" +377 64 evaluator """rankbased""" +377 65 dataset """kinships""" +377 65 model """kg2e""" +377 65 loss """bceaftersigmoid""" +377 65 regularizer """no""" +377 65 optimizer """adam""" +377 65 training_loop """lcwa""" +377 65 evaluator """rankbased""" +377 66 dataset """kinships""" +377 66 model """kg2e""" +377 66 loss """bceaftersigmoid""" +377 66 regularizer """no""" +377 66 optimizer """adam""" +377 66 training_loop """lcwa""" +377 66 evaluator """rankbased""" +377 67 dataset """kinships""" +377 67 model """kg2e""" +377 67 loss """bceaftersigmoid""" +377 67 regularizer """no""" +377 67 optimizer """adam""" +377 67 training_loop """lcwa""" +377 67 evaluator """rankbased""" +377 68 dataset """kinships""" +377 68 model """kg2e""" +377 68 loss """bceaftersigmoid""" +377 68 regularizer """no""" +377 68 optimizer """adam""" +377 68 training_loop """lcwa""" +377 68 evaluator """rankbased""" +377 69 dataset """kinships""" +377 69 model """kg2e""" +377 69 loss """bceaftersigmoid""" +377 69 regularizer """no""" +377 69 optimizer """adam""" +377 69 training_loop """lcwa""" +377 69 evaluator """rankbased""" +377 70 dataset """kinships""" +377 70 model """kg2e""" +377 70 loss """bceaftersigmoid""" +377 70 regularizer """no""" +377 70 optimizer """adam""" +377 70 training_loop """lcwa""" +377 70 evaluator """rankbased""" +377 71 dataset """kinships""" +377 71 model """kg2e""" +377 71 loss """bceaftersigmoid""" +377 71 regularizer """no""" +377 71 optimizer """adam""" +377 71 training_loop """lcwa""" +377 71 evaluator """rankbased""" +377 72 dataset """kinships""" +377 72 model """kg2e""" +377 72 loss """bceaftersigmoid""" +377 72 regularizer """no""" +377 72 optimizer """adam""" +377 72 training_loop """lcwa""" +377 72 evaluator """rankbased""" +377 73 dataset """kinships""" +377 73 model """kg2e""" +377 73 loss """bceaftersigmoid""" +377 73 regularizer """no""" +377 73 optimizer """adam""" +377 73 training_loop """lcwa""" +377 73 evaluator """rankbased""" +377 74 dataset """kinships""" +377 74 model """kg2e""" +377 74 loss """bceaftersigmoid""" +377 74 regularizer """no""" +377 74 optimizer """adam""" +377 74 training_loop """lcwa""" +377 74 evaluator """rankbased""" +377 75 dataset """kinships""" +377 75 model """kg2e""" +377 75 loss """bceaftersigmoid""" +377 75 regularizer """no""" +377 75 optimizer """adam""" +377 75 training_loop """lcwa""" +377 75 evaluator """rankbased""" +377 76 dataset """kinships""" +377 76 model """kg2e""" +377 76 loss """bceaftersigmoid""" +377 76 regularizer """no""" +377 76 optimizer """adam""" +377 76 training_loop """lcwa""" +377 76 evaluator """rankbased""" +377 77 dataset """kinships""" +377 77 model """kg2e""" +377 77 loss """bceaftersigmoid""" +377 77 regularizer """no""" +377 77 optimizer """adam""" +377 77 training_loop """lcwa""" +377 77 evaluator """rankbased""" +377 78 dataset """kinships""" +377 78 model """kg2e""" +377 78 loss """bceaftersigmoid""" +377 78 regularizer """no""" +377 78 optimizer """adam""" +377 78 training_loop """lcwa""" +377 78 evaluator """rankbased""" +377 79 dataset """kinships""" +377 79 model """kg2e""" +377 79 loss """bceaftersigmoid""" +377 79 regularizer """no""" +377 79 optimizer """adam""" +377 79 training_loop """lcwa""" +377 79 evaluator """rankbased""" +377 80 dataset """kinships""" +377 80 model """kg2e""" +377 80 loss """bceaftersigmoid""" +377 80 regularizer """no""" +377 80 optimizer """adam""" +377 80 training_loop """lcwa""" +377 80 evaluator """rankbased""" +377 81 dataset """kinships""" +377 81 model """kg2e""" +377 81 loss """bceaftersigmoid""" +377 81 regularizer """no""" +377 81 optimizer """adam""" +377 81 training_loop """lcwa""" +377 81 evaluator """rankbased""" +377 82 dataset """kinships""" +377 82 model """kg2e""" +377 82 loss """bceaftersigmoid""" +377 82 regularizer """no""" +377 82 optimizer """adam""" +377 82 training_loop """lcwa""" +377 82 evaluator """rankbased""" +377 83 dataset """kinships""" +377 83 model """kg2e""" +377 83 loss """bceaftersigmoid""" +377 83 regularizer """no""" +377 83 optimizer """adam""" +377 83 training_loop """lcwa""" +377 83 evaluator """rankbased""" +377 84 dataset """kinships""" +377 84 model """kg2e""" +377 84 loss """bceaftersigmoid""" +377 84 regularizer """no""" +377 84 optimizer """adam""" +377 84 training_loop """lcwa""" +377 84 evaluator """rankbased""" +377 85 dataset """kinships""" +377 85 model """kg2e""" +377 85 loss """bceaftersigmoid""" +377 85 regularizer """no""" +377 85 optimizer """adam""" +377 85 training_loop """lcwa""" +377 85 evaluator """rankbased""" +377 86 dataset """kinships""" +377 86 model """kg2e""" +377 86 loss """bceaftersigmoid""" +377 86 regularizer """no""" +377 86 optimizer """adam""" +377 86 training_loop """lcwa""" +377 86 evaluator """rankbased""" +377 87 dataset """kinships""" +377 87 model """kg2e""" +377 87 loss """bceaftersigmoid""" +377 87 regularizer """no""" +377 87 optimizer """adam""" +377 87 training_loop """lcwa""" +377 87 evaluator """rankbased""" +377 88 dataset """kinships""" +377 88 model """kg2e""" +377 88 loss """bceaftersigmoid""" +377 88 regularizer """no""" +377 88 optimizer """adam""" +377 88 training_loop """lcwa""" +377 88 evaluator """rankbased""" +377 89 dataset """kinships""" +377 89 model """kg2e""" +377 89 loss """bceaftersigmoid""" +377 89 regularizer """no""" +377 89 optimizer """adam""" +377 89 training_loop """lcwa""" +377 89 evaluator """rankbased""" +377 90 dataset """kinships""" +377 90 model """kg2e""" +377 90 loss """bceaftersigmoid""" +377 90 regularizer """no""" +377 90 optimizer """adam""" +377 90 training_loop """lcwa""" +377 90 evaluator """rankbased""" +377 91 dataset """kinships""" +377 91 model """kg2e""" +377 91 loss """bceaftersigmoid""" +377 91 regularizer """no""" +377 91 optimizer """adam""" +377 91 training_loop """lcwa""" +377 91 evaluator """rankbased""" +377 92 dataset """kinships""" +377 92 model """kg2e""" +377 92 loss """bceaftersigmoid""" +377 92 regularizer """no""" +377 92 optimizer """adam""" +377 92 training_loop """lcwa""" +377 92 evaluator """rankbased""" +377 93 dataset """kinships""" +377 93 model """kg2e""" +377 93 loss """bceaftersigmoid""" +377 93 regularizer """no""" +377 93 optimizer """adam""" +377 93 training_loop """lcwa""" +377 93 evaluator """rankbased""" +377 94 dataset """kinships""" +377 94 model """kg2e""" +377 94 loss """bceaftersigmoid""" +377 94 regularizer """no""" +377 94 optimizer """adam""" +377 94 training_loop """lcwa""" +377 94 evaluator """rankbased""" +377 95 dataset """kinships""" +377 95 model """kg2e""" +377 95 loss """bceaftersigmoid""" +377 95 regularizer """no""" +377 95 optimizer """adam""" +377 95 training_loop """lcwa""" +377 95 evaluator """rankbased""" +377 96 dataset """kinships""" +377 96 model """kg2e""" +377 96 loss """bceaftersigmoid""" +377 96 regularizer """no""" +377 96 optimizer """adam""" +377 96 training_loop """lcwa""" +377 96 evaluator """rankbased""" +377 97 dataset """kinships""" +377 97 model """kg2e""" +377 97 loss """bceaftersigmoid""" +377 97 regularizer """no""" +377 97 optimizer """adam""" +377 97 training_loop """lcwa""" +377 97 evaluator """rankbased""" +377 98 dataset """kinships""" +377 98 model """kg2e""" +377 98 loss """bceaftersigmoid""" +377 98 regularizer """no""" +377 98 optimizer """adam""" +377 98 training_loop """lcwa""" +377 98 evaluator """rankbased""" +377 99 dataset """kinships""" +377 99 model """kg2e""" +377 99 loss """bceaftersigmoid""" +377 99 regularizer """no""" +377 99 optimizer """adam""" +377 99 training_loop """lcwa""" +377 99 evaluator """rankbased""" +377 100 dataset """kinships""" +377 100 model """kg2e""" +377 100 loss """bceaftersigmoid""" +377 100 regularizer """no""" +377 100 optimizer """adam""" +377 100 training_loop """lcwa""" +377 100 evaluator """rankbased""" +378 1 model.embedding_dim 2.0 +378 1 model.c_min 0.015173918845961289 +378 1 model.c_max 8.462174981431108 +378 1 optimizer.lr 0.03849712834710581 +378 1 training.batch_size 1.0 +378 1 training.label_smoothing 0.0030638026174041864 +378 2 model.embedding_dim 1.0 +378 2 model.c_min 0.01948582912010334 +378 2 model.c_max 3.0765164286168636 +378 2 optimizer.lr 0.04648565467248279 +378 2 training.batch_size 0.0 +378 2 training.label_smoothing 0.007741393041632635 +378 3 model.embedding_dim 0.0 +378 3 model.c_min 0.040239444990328194 +378 3 model.c_max 5.768544614635729 +378 3 optimizer.lr 0.0010393975205489664 +378 3 training.batch_size 1.0 +378 3 training.label_smoothing 0.014326470859995236 +378 4 model.embedding_dim 1.0 +378 4 model.c_min 0.03333318897352931 +378 4 model.c_max 2.2213418459009278 +378 4 optimizer.lr 0.06694334577645905 +378 4 training.batch_size 0.0 +378 4 training.label_smoothing 0.005233634943924376 +378 5 model.embedding_dim 2.0 +378 5 model.c_min 0.020969698538109 +378 5 model.c_max 7.6623789028368146 +378 5 optimizer.lr 0.0038485810248243494 +378 5 training.batch_size 2.0 +378 5 training.label_smoothing 0.08497176765571553 +378 6 model.embedding_dim 0.0 +378 6 model.c_min 0.011160650460840279 +378 6 model.c_max 8.519141122545234 +378 6 optimizer.lr 0.005523732626551452 +378 6 training.batch_size 0.0 +378 6 training.label_smoothing 0.02660756963452708 +378 7 model.embedding_dim 1.0 +378 7 model.c_min 0.017646885612602508 +378 7 model.c_max 7.70559693466848 +378 7 optimizer.lr 0.038292692265695295 +378 7 training.batch_size 2.0 +378 7 training.label_smoothing 0.097738907379768 +378 8 model.embedding_dim 2.0 +378 8 model.c_min 0.08450713174466491 +378 8 model.c_max 6.748587135010238 +378 8 optimizer.lr 0.011115707654938297 +378 8 training.batch_size 1.0 +378 8 training.label_smoothing 0.8032624600389214 +378 9 model.embedding_dim 2.0 +378 9 model.c_min 0.05773924884876066 +378 9 model.c_max 2.983579757651232 +378 9 optimizer.lr 0.0039741394668385 +378 9 training.batch_size 1.0 +378 9 training.label_smoothing 0.009418728262793342 +378 10 model.embedding_dim 0.0 +378 10 model.c_min 0.011483597183900787 +378 10 model.c_max 1.9794619491607635 +378 10 optimizer.lr 0.04319818144318976 +378 10 training.batch_size 1.0 +378 10 training.label_smoothing 0.016794836047349694 +378 11 model.embedding_dim 2.0 +378 11 model.c_min 0.044785391028870164 +378 11 model.c_max 3.236890752236628 +378 11 optimizer.lr 0.004579074174567346 +378 11 training.batch_size 0.0 +378 11 training.label_smoothing 0.7301228474444689 +378 12 model.embedding_dim 2.0 +378 12 model.c_min 0.08140544256350248 +378 12 model.c_max 9.825229253947708 +378 12 optimizer.lr 0.03562579088318194 +378 12 training.batch_size 1.0 +378 12 training.label_smoothing 0.14529031936835032 +378 13 model.embedding_dim 1.0 +378 13 model.c_min 0.05797420773407579 +378 13 model.c_max 3.220028338433567 +378 13 optimizer.lr 0.042666064913558 +378 13 training.batch_size 1.0 +378 13 training.label_smoothing 0.406562292694561 +378 14 model.embedding_dim 1.0 +378 14 model.c_min 0.046081991895949644 +378 14 model.c_max 9.638442056393348 +378 14 optimizer.lr 0.03524994590702612 +378 14 training.batch_size 2.0 +378 14 training.label_smoothing 0.1636344140895413 +378 15 model.embedding_dim 2.0 +378 15 model.c_min 0.02055559278477347 +378 15 model.c_max 1.5519823973574765 +378 15 optimizer.lr 0.07760692091676628 +378 15 training.batch_size 2.0 +378 15 training.label_smoothing 0.0038293849726495255 +378 16 model.embedding_dim 1.0 +378 16 model.c_min 0.05885431011226018 +378 16 model.c_max 8.998231956766311 +378 16 optimizer.lr 0.07390879836493276 +378 16 training.batch_size 2.0 +378 16 training.label_smoothing 0.0010727751030772039 +378 17 model.embedding_dim 0.0 +378 17 model.c_min 0.0953106875956337 +378 17 model.c_max 9.485737431677173 +378 17 optimizer.lr 0.0021089119424685765 +378 17 training.batch_size 2.0 +378 17 training.label_smoothing 0.0011001153331788856 +378 18 model.embedding_dim 2.0 +378 18 model.c_min 0.024824350378272138 +378 18 model.c_max 8.865372700384764 +378 18 optimizer.lr 0.019311130955400796 +378 18 training.batch_size 2.0 +378 18 training.label_smoothing 0.003458002418008237 +378 19 model.embedding_dim 0.0 +378 19 model.c_min 0.037425706444153585 +378 19 model.c_max 7.485552890827189 +378 19 optimizer.lr 0.007827753183912342 +378 19 training.batch_size 1.0 +378 19 training.label_smoothing 0.2452964086320344 +378 20 model.embedding_dim 0.0 +378 20 model.c_min 0.01391345864871561 +378 20 model.c_max 1.1051441600565575 +378 20 optimizer.lr 0.0015731510334096943 +378 20 training.batch_size 0.0 +378 20 training.label_smoothing 0.18820921202631247 +378 21 model.embedding_dim 0.0 +378 21 model.c_min 0.03989218641424091 +378 21 model.c_max 5.596728525112469 +378 21 optimizer.lr 0.0013739363608058673 +378 21 training.batch_size 1.0 +378 21 training.label_smoothing 0.37618914617983634 +378 22 model.embedding_dim 1.0 +378 22 model.c_min 0.019809671786337378 +378 22 model.c_max 3.948617501845874 +378 22 optimizer.lr 0.03828217745724033 +378 22 training.batch_size 0.0 +378 22 training.label_smoothing 0.01571587429259106 +378 23 model.embedding_dim 2.0 +378 23 model.c_min 0.010468248670584535 +378 23 model.c_max 1.0261259190870686 +378 23 optimizer.lr 0.003718773725960901 +378 23 training.batch_size 1.0 +378 23 training.label_smoothing 0.269234056173386 +378 24 model.embedding_dim 2.0 +378 24 model.c_min 0.03450087859127328 +378 24 model.c_max 2.5724470285489076 +378 24 optimizer.lr 0.009723762339117903 +378 24 training.batch_size 2.0 +378 24 training.label_smoothing 0.003407925382687725 +378 25 model.embedding_dim 0.0 +378 25 model.c_min 0.08039511873082483 +378 25 model.c_max 6.801257864179419 +378 25 optimizer.lr 0.002553535873771214 +378 25 training.batch_size 0.0 +378 25 training.label_smoothing 0.02135055512152325 +378 26 model.embedding_dim 1.0 +378 26 model.c_min 0.07224660398757972 +378 26 model.c_max 4.620437288954454 +378 26 optimizer.lr 0.0010135344835475557 +378 26 training.batch_size 1.0 +378 26 training.label_smoothing 0.007271155440842819 +378 27 model.embedding_dim 1.0 +378 27 model.c_min 0.016351471735431646 +378 27 model.c_max 2.771041246270071 +378 27 optimizer.lr 0.0022328868342657775 +378 27 training.batch_size 1.0 +378 27 training.label_smoothing 0.017759711963460722 +378 28 model.embedding_dim 2.0 +378 28 model.c_min 0.05104712729647387 +378 28 model.c_max 5.8953262367187085 +378 28 optimizer.lr 0.0617303761579215 +378 28 training.batch_size 0.0 +378 28 training.label_smoothing 0.20392382898581554 +378 29 model.embedding_dim 2.0 +378 29 model.c_min 0.015228397894243495 +378 29 model.c_max 6.059419690771375 +378 29 optimizer.lr 0.016806217125652938 +378 29 training.batch_size 2.0 +378 29 training.label_smoothing 0.5574475934929481 +378 30 model.embedding_dim 0.0 +378 30 model.c_min 0.04365344211791499 +378 30 model.c_max 6.673891056902009 +378 30 optimizer.lr 0.01853341730629838 +378 30 training.batch_size 0.0 +378 30 training.label_smoothing 0.19338070035763774 +378 31 model.embedding_dim 0.0 +378 31 model.c_min 0.031039236952709385 +378 31 model.c_max 3.713953726613134 +378 31 optimizer.lr 0.03730186692148394 +378 31 training.batch_size 1.0 +378 31 training.label_smoothing 0.010281006088630891 +378 32 model.embedding_dim 0.0 +378 32 model.c_min 0.0212578307113176 +378 32 model.c_max 1.5048748104410137 +378 32 optimizer.lr 0.002262111439254117 +378 32 training.batch_size 2.0 +378 32 training.label_smoothing 0.16390429733833203 +378 33 model.embedding_dim 2.0 +378 33 model.c_min 0.04721897884603894 +378 33 model.c_max 2.1277756943443653 +378 33 optimizer.lr 0.010509357751535992 +378 33 training.batch_size 0.0 +378 33 training.label_smoothing 0.08639944600127106 +378 34 model.embedding_dim 1.0 +378 34 model.c_min 0.053236065423869626 +378 34 model.c_max 6.193129610533002 +378 34 optimizer.lr 0.0020511545372462 +378 34 training.batch_size 0.0 +378 34 training.label_smoothing 0.24194457449555845 +378 35 model.embedding_dim 0.0 +378 35 model.c_min 0.04235739850035129 +378 35 model.c_max 8.522464645809897 +378 35 optimizer.lr 0.0010119143950020446 +378 35 training.batch_size 0.0 +378 35 training.label_smoothing 0.005191302833358455 +378 36 model.embedding_dim 2.0 +378 36 model.c_min 0.02186839610956289 +378 36 model.c_max 3.4061815695032593 +378 36 optimizer.lr 0.0037859856584636915 +378 36 training.batch_size 1.0 +378 36 training.label_smoothing 0.015037799151761578 +378 37 model.embedding_dim 2.0 +378 37 model.c_min 0.04097686034190221 +378 37 model.c_max 1.5166773145672243 +378 37 optimizer.lr 0.09837513708121917 +378 37 training.batch_size 2.0 +378 37 training.label_smoothing 0.5256168301886849 +378 38 model.embedding_dim 0.0 +378 38 model.c_min 0.095635949549619 +378 38 model.c_max 1.4256723588224713 +378 38 optimizer.lr 0.0016075062263565614 +378 38 training.batch_size 0.0 +378 38 training.label_smoothing 0.01289384292560526 +378 39 model.embedding_dim 2.0 +378 39 model.c_min 0.014561534942692703 +378 39 model.c_max 5.001973406813127 +378 39 optimizer.lr 0.00189088491735736 +378 39 training.batch_size 1.0 +378 39 training.label_smoothing 0.029950607293276955 +378 40 model.embedding_dim 1.0 +378 40 model.c_min 0.062424195836846184 +378 40 model.c_max 9.91900232481056 +378 40 optimizer.lr 0.02387789280624954 +378 40 training.batch_size 2.0 +378 40 training.label_smoothing 0.0028891538354532936 +378 41 model.embedding_dim 1.0 +378 41 model.c_min 0.06305500613775863 +378 41 model.c_max 4.200552004627248 +378 41 optimizer.lr 0.09034212138473331 +378 41 training.batch_size 0.0 +378 41 training.label_smoothing 0.004957324779972995 +378 42 model.embedding_dim 0.0 +378 42 model.c_min 0.018076598269563 +378 42 model.c_max 9.90551227481078 +378 42 optimizer.lr 0.01137279292569238 +378 42 training.batch_size 0.0 +378 42 training.label_smoothing 0.003142677704011587 +378 43 model.embedding_dim 0.0 +378 43 model.c_min 0.024276125308041047 +378 43 model.c_max 5.026826025611756 +378 43 optimizer.lr 0.01290608891766505 +378 43 training.batch_size 1.0 +378 43 training.label_smoothing 0.14819998759041092 +378 44 model.embedding_dim 1.0 +378 44 model.c_min 0.014763690550426693 +378 44 model.c_max 5.499722450621672 +378 44 optimizer.lr 0.0010017521368786905 +378 44 training.batch_size 0.0 +378 44 training.label_smoothing 0.0029589080014769376 +378 45 model.embedding_dim 2.0 +378 45 model.c_min 0.04432457058886392 +378 45 model.c_max 4.131711113209217 +378 45 optimizer.lr 0.014965979802866963 +378 45 training.batch_size 0.0 +378 45 training.label_smoothing 0.0014045116278620059 +378 46 model.embedding_dim 2.0 +378 46 model.c_min 0.029467988864509742 +378 46 model.c_max 4.435811517080269 +378 46 optimizer.lr 0.00672391902545794 +378 46 training.batch_size 1.0 +378 46 training.label_smoothing 0.03902702365207912 +378 47 model.embedding_dim 2.0 +378 47 model.c_min 0.013426939823732869 +378 47 model.c_max 8.654298937700533 +378 47 optimizer.lr 0.016113546375361 +378 47 training.batch_size 2.0 +378 47 training.label_smoothing 0.27785287766251543 +378 48 model.embedding_dim 0.0 +378 48 model.c_min 0.04753426403110526 +378 48 model.c_max 8.731150268634053 +378 48 optimizer.lr 0.0010111331242395264 +378 48 training.batch_size 1.0 +378 48 training.label_smoothing 0.0020389858701695533 +378 49 model.embedding_dim 0.0 +378 49 model.c_min 0.08125388550175068 +378 49 model.c_max 2.8467315031676845 +378 49 optimizer.lr 0.001718727730913652 +378 49 training.batch_size 0.0 +378 49 training.label_smoothing 0.0015370327976785938 +378 50 model.embedding_dim 2.0 +378 50 model.c_min 0.043142204431898167 +378 50 model.c_max 3.940966280152891 +378 50 optimizer.lr 0.007329056703944059 +378 50 training.batch_size 1.0 +378 50 training.label_smoothing 0.0011537959961669898 +378 51 model.embedding_dim 1.0 +378 51 model.c_min 0.06413628767838726 +378 51 model.c_max 5.647886064299407 +378 51 optimizer.lr 0.0010232244605442907 +378 51 training.batch_size 2.0 +378 51 training.label_smoothing 0.079292587237617 +378 52 model.embedding_dim 1.0 +378 52 model.c_min 0.09322384927557921 +378 52 model.c_max 3.9348954258878095 +378 52 optimizer.lr 0.001964211551140437 +378 52 training.batch_size 2.0 +378 52 training.label_smoothing 0.003373486305472171 +378 53 model.embedding_dim 0.0 +378 53 model.c_min 0.07701159486702028 +378 53 model.c_max 3.132370360824531 +378 53 optimizer.lr 0.033054952125712266 +378 53 training.batch_size 0.0 +378 53 training.label_smoothing 0.38833813213839363 +378 54 model.embedding_dim 2.0 +378 54 model.c_min 0.021232799084019226 +378 54 model.c_max 6.714513723499101 +378 54 optimizer.lr 0.011088610286574388 +378 54 training.batch_size 2.0 +378 54 training.label_smoothing 0.004505366109204214 +378 55 model.embedding_dim 1.0 +378 55 model.c_min 0.019863878200053645 +378 55 model.c_max 2.084031752873131 +378 55 optimizer.lr 0.002289918837666437 +378 55 training.batch_size 1.0 +378 55 training.label_smoothing 0.6629954162964347 +378 56 model.embedding_dim 0.0 +378 56 model.c_min 0.04291640829078334 +378 56 model.c_max 8.621267799691509 +378 56 optimizer.lr 0.07824888180877491 +378 56 training.batch_size 1.0 +378 56 training.label_smoothing 0.0033597808197067505 +378 57 model.embedding_dim 1.0 +378 57 model.c_min 0.03359040828498185 +378 57 model.c_max 9.916340632436784 +378 57 optimizer.lr 0.04155013034382202 +378 57 training.batch_size 0.0 +378 57 training.label_smoothing 0.002207833960683378 +378 58 model.embedding_dim 0.0 +378 58 model.c_min 0.0678631156860977 +378 58 model.c_max 9.271700847265011 +378 58 optimizer.lr 0.0793398702811332 +378 58 training.batch_size 0.0 +378 58 training.label_smoothing 0.0654413090660282 +378 59 model.embedding_dim 0.0 +378 59 model.c_min 0.010127298727931841 +378 59 model.c_max 5.9006550471666035 +378 59 optimizer.lr 0.07514013637529209 +378 59 training.batch_size 1.0 +378 59 training.label_smoothing 0.014949880704626025 +378 60 model.embedding_dim 1.0 +378 60 model.c_min 0.041076511675962636 +378 60 model.c_max 3.0064184709185993 +378 60 optimizer.lr 0.028549161063523707 +378 60 training.batch_size 1.0 +378 60 training.label_smoothing 0.8694643776465892 +378 61 model.embedding_dim 0.0 +378 61 model.c_min 0.014438671093959694 +378 61 model.c_max 1.977288053264251 +378 61 optimizer.lr 0.002373820867047378 +378 61 training.batch_size 0.0 +378 61 training.label_smoothing 0.24911566784557115 +378 62 model.embedding_dim 0.0 +378 62 model.c_min 0.014691683144842163 +378 62 model.c_max 9.792983152029652 +378 62 optimizer.lr 0.002677415512457591 +378 62 training.batch_size 2.0 +378 62 training.label_smoothing 0.07854151331848662 +378 63 model.embedding_dim 2.0 +378 63 model.c_min 0.07549049995633625 +378 63 model.c_max 2.6094732765036777 +378 63 optimizer.lr 0.039553183979299036 +378 63 training.batch_size 0.0 +378 63 training.label_smoothing 0.003195451305468689 +378 64 model.embedding_dim 0.0 +378 64 model.c_min 0.016135831297995177 +378 64 model.c_max 8.41378494906236 +378 64 optimizer.lr 0.05618230008165763 +378 64 training.batch_size 1.0 +378 64 training.label_smoothing 0.0019913683371049332 +378 65 model.embedding_dim 0.0 +378 65 model.c_min 0.014158570497148894 +378 65 model.c_max 5.536658368105893 +378 65 optimizer.lr 0.0018276790175458003 +378 65 training.batch_size 2.0 +378 65 training.label_smoothing 0.08045636746669299 +378 66 model.embedding_dim 1.0 +378 66 model.c_min 0.054724091585272076 +378 66 model.c_max 3.4342218366076547 +378 66 optimizer.lr 0.007155580689980464 +378 66 training.batch_size 2.0 +378 66 training.label_smoothing 0.9414203586985488 +378 67 model.embedding_dim 2.0 +378 67 model.c_min 0.02397233595456225 +378 67 model.c_max 4.80941210027939 +378 67 optimizer.lr 0.00406388400637377 +378 67 training.batch_size 2.0 +378 67 training.label_smoothing 0.007137164855960024 +378 68 model.embedding_dim 0.0 +378 68 model.c_min 0.05777104769006972 +378 68 model.c_max 8.073637961374011 +378 68 optimizer.lr 0.01133019741393347 +378 68 training.batch_size 2.0 +378 68 training.label_smoothing 0.013426722029627762 +378 69 model.embedding_dim 2.0 +378 69 model.c_min 0.04141062970921704 +378 69 model.c_max 7.131810980566836 +378 69 optimizer.lr 0.001009830515123158 +378 69 training.batch_size 2.0 +378 69 training.label_smoothing 0.02121221564929125 +378 70 model.embedding_dim 0.0 +378 70 model.c_min 0.02564758885296938 +378 70 model.c_max 4.322302966811499 +378 70 optimizer.lr 0.002257365643170378 +378 70 training.batch_size 0.0 +378 70 training.label_smoothing 0.0035713269588655942 +378 71 model.embedding_dim 2.0 +378 71 model.c_min 0.014606072339556247 +378 71 model.c_max 5.086784748340933 +378 71 optimizer.lr 0.003744860617554688 +378 71 training.batch_size 0.0 +378 71 training.label_smoothing 0.07977877450767216 +378 72 model.embedding_dim 1.0 +378 72 model.c_min 0.02670187890569772 +378 72 model.c_max 5.170172753729023 +378 72 optimizer.lr 0.06651796013854494 +378 72 training.batch_size 2.0 +378 72 training.label_smoothing 0.00801097328547037 +378 73 model.embedding_dim 2.0 +378 73 model.c_min 0.09592279495363891 +378 73 model.c_max 7.974928483735149 +378 73 optimizer.lr 0.0016826162169122244 +378 73 training.batch_size 2.0 +378 73 training.label_smoothing 0.00123112201818479 +378 74 model.embedding_dim 2.0 +378 74 model.c_min 0.013551055238192222 +378 74 model.c_max 9.656453116787393 +378 74 optimizer.lr 0.004169644651705222 +378 74 training.batch_size 0.0 +378 74 training.label_smoothing 0.5094363870902683 +378 75 model.embedding_dim 0.0 +378 75 model.c_min 0.0780402858959896 +378 75 model.c_max 6.360802329324479 +378 75 optimizer.lr 0.0015320155804564872 +378 75 training.batch_size 2.0 +378 75 training.label_smoothing 0.018780611823514943 +378 76 model.embedding_dim 2.0 +378 76 model.c_min 0.09223289832693253 +378 76 model.c_max 3.128074596995288 +378 76 optimizer.lr 0.020783200722884212 +378 76 training.batch_size 0.0 +378 76 training.label_smoothing 0.014631443741851982 +378 77 model.embedding_dim 1.0 +378 77 model.c_min 0.01305132690709059 +378 77 model.c_max 7.622418399575249 +378 77 optimizer.lr 0.0038607235353163316 +378 77 training.batch_size 0.0 +378 77 training.label_smoothing 0.0028509657467312073 +378 78 model.embedding_dim 1.0 +378 78 model.c_min 0.06291738888655553 +378 78 model.c_max 4.900846730469697 +378 78 optimizer.lr 0.06570565203938683 +378 78 training.batch_size 0.0 +378 78 training.label_smoothing 0.014255450718204787 +378 79 model.embedding_dim 1.0 +378 79 model.c_min 0.034094868619395174 +378 79 model.c_max 7.0230431008360465 +378 79 optimizer.lr 0.0012548807563239949 +378 79 training.batch_size 0.0 +378 79 training.label_smoothing 0.0012038037184772211 +378 80 model.embedding_dim 0.0 +378 80 model.c_min 0.010416830805472245 +378 80 model.c_max 3.5510742831674875 +378 80 optimizer.lr 0.022597012722127524 +378 80 training.batch_size 1.0 +378 80 training.label_smoothing 0.00666763935185789 +378 81 model.embedding_dim 2.0 +378 81 model.c_min 0.04159784288378331 +378 81 model.c_max 6.892588428009179 +378 81 optimizer.lr 0.0028528366749774456 +378 81 training.batch_size 0.0 +378 81 training.label_smoothing 0.030832372454476575 +378 82 model.embedding_dim 1.0 +378 82 model.c_min 0.013524149202498455 +378 82 model.c_max 4.53313730924922 +378 82 optimizer.lr 0.07653138713398155 +378 82 training.batch_size 0.0 +378 82 training.label_smoothing 0.7937374942457875 +378 83 model.embedding_dim 2.0 +378 83 model.c_min 0.06788204925139787 +378 83 model.c_max 6.358118715969686 +378 83 optimizer.lr 0.0011384884580391257 +378 83 training.batch_size 2.0 +378 83 training.label_smoothing 0.04838894258615191 +378 84 model.embedding_dim 1.0 +378 84 model.c_min 0.058222276584986604 +378 84 model.c_max 6.011595165659389 +378 84 optimizer.lr 0.006770761296897971 +378 84 training.batch_size 0.0 +378 84 training.label_smoothing 0.16064926309158023 +378 85 model.embedding_dim 2.0 +378 85 model.c_min 0.015757360037407988 +378 85 model.c_max 3.9126512998221377 +378 85 optimizer.lr 0.021590253391821634 +378 85 training.batch_size 1.0 +378 85 training.label_smoothing 0.024854146336581492 +378 86 model.embedding_dim 2.0 +378 86 model.c_min 0.04362450428986915 +378 86 model.c_max 8.485876492070119 +378 86 optimizer.lr 0.027310468079686918 +378 86 training.batch_size 2.0 +378 86 training.label_smoothing 0.051935993580654975 +378 87 model.embedding_dim 0.0 +378 87 model.c_min 0.04911247056720026 +378 87 model.c_max 4.839579482253568 +378 87 optimizer.lr 0.0032455737906269236 +378 87 training.batch_size 0.0 +378 87 training.label_smoothing 0.0021744464376691813 +378 88 model.embedding_dim 2.0 +378 88 model.c_min 0.08597430171604574 +378 88 model.c_max 4.231308925899775 +378 88 optimizer.lr 0.0071993183789788254 +378 88 training.batch_size 0.0 +378 88 training.label_smoothing 0.05394932292065815 +378 89 model.embedding_dim 2.0 +378 89 model.c_min 0.018283912413210313 +378 89 model.c_max 3.340811187595574 +378 89 optimizer.lr 0.04384480678404347 +378 89 training.batch_size 2.0 +378 89 training.label_smoothing 0.024410249958279145 +378 90 model.embedding_dim 0.0 +378 90 model.c_min 0.014373519315088451 +378 90 model.c_max 8.08777496638222 +378 90 optimizer.lr 0.07350570444416134 +378 90 training.batch_size 0.0 +378 90 training.label_smoothing 0.2567016928928457 +378 91 model.embedding_dim 1.0 +378 91 model.c_min 0.015507337817807184 +378 91 model.c_max 4.015554258967418 +378 91 optimizer.lr 0.053323874257648805 +378 91 training.batch_size 2.0 +378 91 training.label_smoothing 0.004836210541361912 +378 92 model.embedding_dim 1.0 +378 92 model.c_min 0.010493381342025654 +378 92 model.c_max 9.150058374602846 +378 92 optimizer.lr 0.007607278628638635 +378 92 training.batch_size 1.0 +378 92 training.label_smoothing 0.0271982090070403 +378 93 model.embedding_dim 1.0 +378 93 model.c_min 0.06722050182766615 +378 93 model.c_max 7.493719281924354 +378 93 optimizer.lr 0.01694632772364126 +378 93 training.batch_size 0.0 +378 93 training.label_smoothing 0.006838606339378543 +378 94 model.embedding_dim 2.0 +378 94 model.c_min 0.08668280354970245 +378 94 model.c_max 9.97095735265812 +378 94 optimizer.lr 0.004089842104186627 +378 94 training.batch_size 1.0 +378 94 training.label_smoothing 0.004283188333490387 +378 95 model.embedding_dim 0.0 +378 95 model.c_min 0.010037182899753251 +378 95 model.c_max 9.498535886494837 +378 95 optimizer.lr 0.0019131939047204477 +378 95 training.batch_size 0.0 +378 95 training.label_smoothing 0.014366546891768453 +378 96 model.embedding_dim 1.0 +378 96 model.c_min 0.06868910108212671 +378 96 model.c_max 7.798897909523057 +378 96 optimizer.lr 0.018774781076216447 +378 96 training.batch_size 1.0 +378 96 training.label_smoothing 0.0012572517833393955 +378 97 model.embedding_dim 0.0 +378 97 model.c_min 0.05172654519100866 +378 97 model.c_max 1.6786356991737952 +378 97 optimizer.lr 0.06385524096323511 +378 97 training.batch_size 1.0 +378 97 training.label_smoothing 0.11935184431562888 +378 98 model.embedding_dim 0.0 +378 98 model.c_min 0.07915233466513784 +378 98 model.c_max 9.708615606743246 +378 98 optimizer.lr 0.0014636652066783344 +378 98 training.batch_size 0.0 +378 98 training.label_smoothing 0.011430466586988496 +378 99 model.embedding_dim 0.0 +378 99 model.c_min 0.02672054502305909 +378 99 model.c_max 8.313164402969978 +378 99 optimizer.lr 0.003643708381088981 +378 99 training.batch_size 0.0 +378 99 training.label_smoothing 0.0021466932702651972 +378 100 model.embedding_dim 2.0 +378 100 model.c_min 0.08890107348096013 +378 100 model.c_max 5.506919966767323 +378 100 optimizer.lr 0.01683189937884299 +378 100 training.batch_size 0.0 +378 100 training.label_smoothing 0.06929056132998765 +378 1 dataset """kinships""" +378 1 model """kg2e""" +378 1 loss """softplus""" +378 1 regularizer """no""" +378 1 optimizer """adam""" +378 1 training_loop """lcwa""" +378 1 evaluator """rankbased""" +378 2 dataset """kinships""" +378 2 model """kg2e""" +378 2 loss """softplus""" +378 2 regularizer """no""" +378 2 optimizer """adam""" +378 2 training_loop """lcwa""" +378 2 evaluator """rankbased""" +378 3 dataset """kinships""" +378 3 model """kg2e""" +378 3 loss """softplus""" +378 3 regularizer """no""" +378 3 optimizer """adam""" +378 3 training_loop """lcwa""" +378 3 evaluator """rankbased""" +378 4 dataset """kinships""" +378 4 model """kg2e""" +378 4 loss """softplus""" +378 4 regularizer """no""" +378 4 optimizer """adam""" +378 4 training_loop """lcwa""" +378 4 evaluator """rankbased""" +378 5 dataset """kinships""" +378 5 model """kg2e""" +378 5 loss """softplus""" +378 5 regularizer """no""" +378 5 optimizer """adam""" +378 5 training_loop """lcwa""" +378 5 evaluator """rankbased""" +378 6 dataset """kinships""" +378 6 model """kg2e""" +378 6 loss """softplus""" +378 6 regularizer """no""" +378 6 optimizer """adam""" +378 6 training_loop """lcwa""" +378 6 evaluator """rankbased""" +378 7 dataset """kinships""" +378 7 model """kg2e""" +378 7 loss """softplus""" +378 7 regularizer """no""" +378 7 optimizer """adam""" +378 7 training_loop """lcwa""" +378 7 evaluator """rankbased""" +378 8 dataset """kinships""" +378 8 model """kg2e""" +378 8 loss """softplus""" +378 8 regularizer """no""" +378 8 optimizer """adam""" +378 8 training_loop """lcwa""" +378 8 evaluator """rankbased""" +378 9 dataset """kinships""" +378 9 model """kg2e""" +378 9 loss """softplus""" +378 9 regularizer """no""" +378 9 optimizer """adam""" +378 9 training_loop """lcwa""" +378 9 evaluator """rankbased""" +378 10 dataset """kinships""" +378 10 model """kg2e""" +378 10 loss """softplus""" +378 10 regularizer """no""" +378 10 optimizer """adam""" +378 10 training_loop """lcwa""" +378 10 evaluator """rankbased""" +378 11 dataset """kinships""" +378 11 model """kg2e""" +378 11 loss """softplus""" +378 11 regularizer """no""" +378 11 optimizer """adam""" +378 11 training_loop """lcwa""" +378 11 evaluator """rankbased""" +378 12 dataset """kinships""" +378 12 model """kg2e""" +378 12 loss """softplus""" +378 12 regularizer """no""" +378 12 optimizer """adam""" +378 12 training_loop """lcwa""" +378 12 evaluator """rankbased""" +378 13 dataset """kinships""" +378 13 model """kg2e""" +378 13 loss """softplus""" +378 13 regularizer """no""" +378 13 optimizer """adam""" +378 13 training_loop """lcwa""" +378 13 evaluator """rankbased""" +378 14 dataset """kinships""" +378 14 model """kg2e""" +378 14 loss """softplus""" +378 14 regularizer """no""" +378 14 optimizer """adam""" +378 14 training_loop """lcwa""" +378 14 evaluator """rankbased""" +378 15 dataset """kinships""" +378 15 model """kg2e""" +378 15 loss """softplus""" +378 15 regularizer """no""" +378 15 optimizer """adam""" +378 15 training_loop """lcwa""" +378 15 evaluator """rankbased""" +378 16 dataset """kinships""" +378 16 model """kg2e""" +378 16 loss """softplus""" +378 16 regularizer """no""" +378 16 optimizer """adam""" +378 16 training_loop """lcwa""" +378 16 evaluator """rankbased""" +378 17 dataset """kinships""" +378 17 model """kg2e""" +378 17 loss """softplus""" +378 17 regularizer """no""" +378 17 optimizer """adam""" +378 17 training_loop """lcwa""" +378 17 evaluator """rankbased""" +378 18 dataset """kinships""" +378 18 model """kg2e""" +378 18 loss """softplus""" +378 18 regularizer """no""" +378 18 optimizer """adam""" +378 18 training_loop """lcwa""" +378 18 evaluator """rankbased""" +378 19 dataset """kinships""" +378 19 model """kg2e""" +378 19 loss """softplus""" +378 19 regularizer """no""" +378 19 optimizer """adam""" +378 19 training_loop """lcwa""" +378 19 evaluator """rankbased""" +378 20 dataset """kinships""" +378 20 model """kg2e""" +378 20 loss """softplus""" +378 20 regularizer """no""" +378 20 optimizer """adam""" +378 20 training_loop """lcwa""" +378 20 evaluator """rankbased""" +378 21 dataset """kinships""" +378 21 model """kg2e""" +378 21 loss """softplus""" +378 21 regularizer """no""" +378 21 optimizer """adam""" +378 21 training_loop """lcwa""" +378 21 evaluator """rankbased""" +378 22 dataset """kinships""" +378 22 model """kg2e""" +378 22 loss """softplus""" +378 22 regularizer """no""" +378 22 optimizer """adam""" +378 22 training_loop """lcwa""" +378 22 evaluator """rankbased""" +378 23 dataset """kinships""" +378 23 model """kg2e""" +378 23 loss """softplus""" +378 23 regularizer """no""" +378 23 optimizer """adam""" +378 23 training_loop """lcwa""" +378 23 evaluator """rankbased""" +378 24 dataset """kinships""" +378 24 model """kg2e""" +378 24 loss """softplus""" +378 24 regularizer """no""" +378 24 optimizer """adam""" +378 24 training_loop """lcwa""" +378 24 evaluator """rankbased""" +378 25 dataset """kinships""" +378 25 model """kg2e""" +378 25 loss """softplus""" +378 25 regularizer """no""" +378 25 optimizer """adam""" +378 25 training_loop """lcwa""" +378 25 evaluator """rankbased""" +378 26 dataset """kinships""" +378 26 model """kg2e""" +378 26 loss """softplus""" +378 26 regularizer """no""" +378 26 optimizer """adam""" +378 26 training_loop """lcwa""" +378 26 evaluator """rankbased""" +378 27 dataset """kinships""" +378 27 model """kg2e""" +378 27 loss """softplus""" +378 27 regularizer """no""" +378 27 optimizer """adam""" +378 27 training_loop """lcwa""" +378 27 evaluator """rankbased""" +378 28 dataset """kinships""" +378 28 model """kg2e""" +378 28 loss """softplus""" +378 28 regularizer """no""" +378 28 optimizer """adam""" +378 28 training_loop """lcwa""" +378 28 evaluator """rankbased""" +378 29 dataset """kinships""" +378 29 model """kg2e""" +378 29 loss """softplus""" +378 29 regularizer """no""" +378 29 optimizer """adam""" +378 29 training_loop """lcwa""" +378 29 evaluator """rankbased""" +378 30 dataset """kinships""" +378 30 model """kg2e""" +378 30 loss """softplus""" +378 30 regularizer """no""" +378 30 optimizer """adam""" +378 30 training_loop """lcwa""" +378 30 evaluator """rankbased""" +378 31 dataset """kinships""" +378 31 model """kg2e""" +378 31 loss """softplus""" +378 31 regularizer """no""" +378 31 optimizer """adam""" +378 31 training_loop """lcwa""" +378 31 evaluator """rankbased""" +378 32 dataset """kinships""" +378 32 model """kg2e""" +378 32 loss """softplus""" +378 32 regularizer """no""" +378 32 optimizer """adam""" +378 32 training_loop """lcwa""" +378 32 evaluator """rankbased""" +378 33 dataset """kinships""" +378 33 model """kg2e""" +378 33 loss """softplus""" +378 33 regularizer """no""" +378 33 optimizer """adam""" +378 33 training_loop """lcwa""" +378 33 evaluator """rankbased""" +378 34 dataset """kinships""" +378 34 model """kg2e""" +378 34 loss """softplus""" +378 34 regularizer """no""" +378 34 optimizer """adam""" +378 34 training_loop """lcwa""" +378 34 evaluator """rankbased""" +378 35 dataset """kinships""" +378 35 model """kg2e""" +378 35 loss """softplus""" +378 35 regularizer """no""" +378 35 optimizer """adam""" +378 35 training_loop """lcwa""" +378 35 evaluator """rankbased""" +378 36 dataset """kinships""" +378 36 model """kg2e""" +378 36 loss """softplus""" +378 36 regularizer """no""" +378 36 optimizer """adam""" +378 36 training_loop """lcwa""" +378 36 evaluator """rankbased""" +378 37 dataset """kinships""" +378 37 model """kg2e""" +378 37 loss """softplus""" +378 37 regularizer """no""" +378 37 optimizer """adam""" +378 37 training_loop """lcwa""" +378 37 evaluator """rankbased""" +378 38 dataset """kinships""" +378 38 model """kg2e""" +378 38 loss """softplus""" +378 38 regularizer """no""" +378 38 optimizer """adam""" +378 38 training_loop """lcwa""" +378 38 evaluator """rankbased""" +378 39 dataset """kinships""" +378 39 model """kg2e""" +378 39 loss """softplus""" +378 39 regularizer """no""" +378 39 optimizer """adam""" +378 39 training_loop """lcwa""" +378 39 evaluator """rankbased""" +378 40 dataset """kinships""" +378 40 model """kg2e""" +378 40 loss """softplus""" +378 40 regularizer """no""" +378 40 optimizer """adam""" +378 40 training_loop """lcwa""" +378 40 evaluator """rankbased""" +378 41 dataset """kinships""" +378 41 model """kg2e""" +378 41 loss """softplus""" +378 41 regularizer """no""" +378 41 optimizer """adam""" +378 41 training_loop """lcwa""" +378 41 evaluator """rankbased""" +378 42 dataset """kinships""" +378 42 model """kg2e""" +378 42 loss """softplus""" +378 42 regularizer """no""" +378 42 optimizer """adam""" +378 42 training_loop """lcwa""" +378 42 evaluator """rankbased""" +378 43 dataset """kinships""" +378 43 model """kg2e""" +378 43 loss """softplus""" +378 43 regularizer """no""" +378 43 optimizer """adam""" +378 43 training_loop """lcwa""" +378 43 evaluator """rankbased""" +378 44 dataset """kinships""" +378 44 model """kg2e""" +378 44 loss """softplus""" +378 44 regularizer """no""" +378 44 optimizer """adam""" +378 44 training_loop """lcwa""" +378 44 evaluator """rankbased""" +378 45 dataset """kinships""" +378 45 model """kg2e""" +378 45 loss """softplus""" +378 45 regularizer """no""" +378 45 optimizer """adam""" +378 45 training_loop """lcwa""" +378 45 evaluator """rankbased""" +378 46 dataset """kinships""" +378 46 model """kg2e""" +378 46 loss """softplus""" +378 46 regularizer """no""" +378 46 optimizer """adam""" +378 46 training_loop """lcwa""" +378 46 evaluator """rankbased""" +378 47 dataset """kinships""" +378 47 model """kg2e""" +378 47 loss """softplus""" +378 47 regularizer """no""" +378 47 optimizer """adam""" +378 47 training_loop """lcwa""" +378 47 evaluator """rankbased""" +378 48 dataset """kinships""" +378 48 model """kg2e""" +378 48 loss """softplus""" +378 48 regularizer """no""" +378 48 optimizer """adam""" +378 48 training_loop """lcwa""" +378 48 evaluator """rankbased""" +378 49 dataset """kinships""" +378 49 model """kg2e""" +378 49 loss """softplus""" +378 49 regularizer """no""" +378 49 optimizer """adam""" +378 49 training_loop """lcwa""" +378 49 evaluator """rankbased""" +378 50 dataset """kinships""" +378 50 model """kg2e""" +378 50 loss """softplus""" +378 50 regularizer """no""" +378 50 optimizer """adam""" +378 50 training_loop """lcwa""" +378 50 evaluator """rankbased""" +378 51 dataset """kinships""" +378 51 model """kg2e""" +378 51 loss """softplus""" +378 51 regularizer """no""" +378 51 optimizer """adam""" +378 51 training_loop """lcwa""" +378 51 evaluator """rankbased""" +378 52 dataset """kinships""" +378 52 model """kg2e""" +378 52 loss """softplus""" +378 52 regularizer """no""" +378 52 optimizer """adam""" +378 52 training_loop """lcwa""" +378 52 evaluator """rankbased""" +378 53 dataset """kinships""" +378 53 model """kg2e""" +378 53 loss """softplus""" +378 53 regularizer """no""" +378 53 optimizer """adam""" +378 53 training_loop """lcwa""" +378 53 evaluator """rankbased""" +378 54 dataset """kinships""" +378 54 model """kg2e""" +378 54 loss """softplus""" +378 54 regularizer """no""" +378 54 optimizer """adam""" +378 54 training_loop """lcwa""" +378 54 evaluator """rankbased""" +378 55 dataset """kinships""" +378 55 model """kg2e""" +378 55 loss """softplus""" +378 55 regularizer """no""" +378 55 optimizer """adam""" +378 55 training_loop """lcwa""" +378 55 evaluator """rankbased""" +378 56 dataset """kinships""" +378 56 model """kg2e""" +378 56 loss """softplus""" +378 56 regularizer """no""" +378 56 optimizer """adam""" +378 56 training_loop """lcwa""" +378 56 evaluator """rankbased""" +378 57 dataset """kinships""" +378 57 model """kg2e""" +378 57 loss """softplus""" +378 57 regularizer """no""" +378 57 optimizer """adam""" +378 57 training_loop """lcwa""" +378 57 evaluator """rankbased""" +378 58 dataset """kinships""" +378 58 model """kg2e""" +378 58 loss """softplus""" +378 58 regularizer """no""" +378 58 optimizer """adam""" +378 58 training_loop """lcwa""" +378 58 evaluator """rankbased""" +378 59 dataset """kinships""" +378 59 model """kg2e""" +378 59 loss """softplus""" +378 59 regularizer """no""" +378 59 optimizer """adam""" +378 59 training_loop """lcwa""" +378 59 evaluator """rankbased""" +378 60 dataset """kinships""" +378 60 model """kg2e""" +378 60 loss """softplus""" +378 60 regularizer """no""" +378 60 optimizer """adam""" +378 60 training_loop """lcwa""" +378 60 evaluator """rankbased""" +378 61 dataset """kinships""" +378 61 model """kg2e""" +378 61 loss """softplus""" +378 61 regularizer """no""" +378 61 optimizer """adam""" +378 61 training_loop """lcwa""" +378 61 evaluator """rankbased""" +378 62 dataset """kinships""" +378 62 model """kg2e""" +378 62 loss """softplus""" +378 62 regularizer """no""" +378 62 optimizer """adam""" +378 62 training_loop """lcwa""" +378 62 evaluator """rankbased""" +378 63 dataset """kinships""" +378 63 model """kg2e""" +378 63 loss """softplus""" +378 63 regularizer """no""" +378 63 optimizer """adam""" +378 63 training_loop """lcwa""" +378 63 evaluator """rankbased""" +378 64 dataset """kinships""" +378 64 model """kg2e""" +378 64 loss """softplus""" +378 64 regularizer """no""" +378 64 optimizer """adam""" +378 64 training_loop """lcwa""" +378 64 evaluator """rankbased""" +378 65 dataset """kinships""" +378 65 model """kg2e""" +378 65 loss """softplus""" +378 65 regularizer """no""" +378 65 optimizer """adam""" +378 65 training_loop """lcwa""" +378 65 evaluator """rankbased""" +378 66 dataset """kinships""" +378 66 model """kg2e""" +378 66 loss """softplus""" +378 66 regularizer """no""" +378 66 optimizer """adam""" +378 66 training_loop """lcwa""" +378 66 evaluator """rankbased""" +378 67 dataset """kinships""" +378 67 model """kg2e""" +378 67 loss """softplus""" +378 67 regularizer """no""" +378 67 optimizer """adam""" +378 67 training_loop """lcwa""" +378 67 evaluator """rankbased""" +378 68 dataset """kinships""" +378 68 model """kg2e""" +378 68 loss """softplus""" +378 68 regularizer """no""" +378 68 optimizer """adam""" +378 68 training_loop """lcwa""" +378 68 evaluator """rankbased""" +378 69 dataset """kinships""" +378 69 model """kg2e""" +378 69 loss """softplus""" +378 69 regularizer """no""" +378 69 optimizer """adam""" +378 69 training_loop """lcwa""" +378 69 evaluator """rankbased""" +378 70 dataset """kinships""" +378 70 model """kg2e""" +378 70 loss """softplus""" +378 70 regularizer """no""" +378 70 optimizer """adam""" +378 70 training_loop """lcwa""" +378 70 evaluator """rankbased""" +378 71 dataset """kinships""" +378 71 model """kg2e""" +378 71 loss """softplus""" +378 71 regularizer """no""" +378 71 optimizer """adam""" +378 71 training_loop """lcwa""" +378 71 evaluator """rankbased""" +378 72 dataset """kinships""" +378 72 model """kg2e""" +378 72 loss """softplus""" +378 72 regularizer """no""" +378 72 optimizer """adam""" +378 72 training_loop """lcwa""" +378 72 evaluator """rankbased""" +378 73 dataset """kinships""" +378 73 model """kg2e""" +378 73 loss """softplus""" +378 73 regularizer """no""" +378 73 optimizer """adam""" +378 73 training_loop """lcwa""" +378 73 evaluator """rankbased""" +378 74 dataset """kinships""" +378 74 model """kg2e""" +378 74 loss """softplus""" +378 74 regularizer """no""" +378 74 optimizer """adam""" +378 74 training_loop """lcwa""" +378 74 evaluator """rankbased""" +378 75 dataset """kinships""" +378 75 model """kg2e""" +378 75 loss """softplus""" +378 75 regularizer """no""" +378 75 optimizer """adam""" +378 75 training_loop """lcwa""" +378 75 evaluator """rankbased""" +378 76 dataset """kinships""" +378 76 model """kg2e""" +378 76 loss """softplus""" +378 76 regularizer """no""" +378 76 optimizer """adam""" +378 76 training_loop """lcwa""" +378 76 evaluator """rankbased""" +378 77 dataset """kinships""" +378 77 model """kg2e""" +378 77 loss """softplus""" +378 77 regularizer """no""" +378 77 optimizer """adam""" +378 77 training_loop """lcwa""" +378 77 evaluator """rankbased""" +378 78 dataset """kinships""" +378 78 model """kg2e""" +378 78 loss """softplus""" +378 78 regularizer """no""" +378 78 optimizer """adam""" +378 78 training_loop """lcwa""" +378 78 evaluator """rankbased""" +378 79 dataset """kinships""" +378 79 model """kg2e""" +378 79 loss """softplus""" +378 79 regularizer """no""" +378 79 optimizer """adam""" +378 79 training_loop """lcwa""" +378 79 evaluator """rankbased""" +378 80 dataset """kinships""" +378 80 model """kg2e""" +378 80 loss """softplus""" +378 80 regularizer """no""" +378 80 optimizer """adam""" +378 80 training_loop """lcwa""" +378 80 evaluator """rankbased""" +378 81 dataset """kinships""" +378 81 model """kg2e""" +378 81 loss """softplus""" +378 81 regularizer """no""" +378 81 optimizer """adam""" +378 81 training_loop """lcwa""" +378 81 evaluator """rankbased""" +378 82 dataset """kinships""" +378 82 model """kg2e""" +378 82 loss """softplus""" +378 82 regularizer """no""" +378 82 optimizer """adam""" +378 82 training_loop """lcwa""" +378 82 evaluator """rankbased""" +378 83 dataset """kinships""" +378 83 model """kg2e""" +378 83 loss """softplus""" +378 83 regularizer """no""" +378 83 optimizer """adam""" +378 83 training_loop """lcwa""" +378 83 evaluator """rankbased""" +378 84 dataset """kinships""" +378 84 model """kg2e""" +378 84 loss """softplus""" +378 84 regularizer """no""" +378 84 optimizer """adam""" +378 84 training_loop """lcwa""" +378 84 evaluator """rankbased""" +378 85 dataset """kinships""" +378 85 model """kg2e""" +378 85 loss """softplus""" +378 85 regularizer """no""" +378 85 optimizer """adam""" +378 85 training_loop """lcwa""" +378 85 evaluator """rankbased""" +378 86 dataset """kinships""" +378 86 model """kg2e""" +378 86 loss """softplus""" +378 86 regularizer """no""" +378 86 optimizer """adam""" +378 86 training_loop """lcwa""" +378 86 evaluator """rankbased""" +378 87 dataset """kinships""" +378 87 model """kg2e""" +378 87 loss """softplus""" +378 87 regularizer """no""" +378 87 optimizer """adam""" +378 87 training_loop """lcwa""" +378 87 evaluator """rankbased""" +378 88 dataset """kinships""" +378 88 model """kg2e""" +378 88 loss """softplus""" +378 88 regularizer """no""" +378 88 optimizer """adam""" +378 88 training_loop """lcwa""" +378 88 evaluator """rankbased""" +378 89 dataset """kinships""" +378 89 model """kg2e""" +378 89 loss """softplus""" +378 89 regularizer """no""" +378 89 optimizer """adam""" +378 89 training_loop """lcwa""" +378 89 evaluator """rankbased""" +378 90 dataset """kinships""" +378 90 model """kg2e""" +378 90 loss """softplus""" +378 90 regularizer """no""" +378 90 optimizer """adam""" +378 90 training_loop """lcwa""" +378 90 evaluator """rankbased""" +378 91 dataset """kinships""" +378 91 model """kg2e""" +378 91 loss """softplus""" +378 91 regularizer """no""" +378 91 optimizer """adam""" +378 91 training_loop """lcwa""" +378 91 evaluator """rankbased""" +378 92 dataset """kinships""" +378 92 model """kg2e""" +378 92 loss """softplus""" +378 92 regularizer """no""" +378 92 optimizer """adam""" +378 92 training_loop """lcwa""" +378 92 evaluator """rankbased""" +378 93 dataset """kinships""" +378 93 model """kg2e""" +378 93 loss """softplus""" +378 93 regularizer """no""" +378 93 optimizer """adam""" +378 93 training_loop """lcwa""" +378 93 evaluator """rankbased""" +378 94 dataset """kinships""" +378 94 model """kg2e""" +378 94 loss """softplus""" +378 94 regularizer """no""" +378 94 optimizer """adam""" +378 94 training_loop """lcwa""" +378 94 evaluator """rankbased""" +378 95 dataset """kinships""" +378 95 model """kg2e""" +378 95 loss """softplus""" +378 95 regularizer """no""" +378 95 optimizer """adam""" +378 95 training_loop """lcwa""" +378 95 evaluator """rankbased""" +378 96 dataset """kinships""" +378 96 model """kg2e""" +378 96 loss """softplus""" +378 96 regularizer """no""" +378 96 optimizer """adam""" +378 96 training_loop """lcwa""" +378 96 evaluator """rankbased""" +378 97 dataset """kinships""" +378 97 model """kg2e""" +378 97 loss """softplus""" +378 97 regularizer """no""" +378 97 optimizer """adam""" +378 97 training_loop """lcwa""" +378 97 evaluator """rankbased""" +378 98 dataset """kinships""" +378 98 model """kg2e""" +378 98 loss """softplus""" +378 98 regularizer """no""" +378 98 optimizer """adam""" +378 98 training_loop """lcwa""" +378 98 evaluator """rankbased""" +378 99 dataset """kinships""" +378 99 model """kg2e""" +378 99 loss """softplus""" +378 99 regularizer """no""" +378 99 optimizer """adam""" +378 99 training_loop """lcwa""" +378 99 evaluator """rankbased""" +378 100 dataset """kinships""" +378 100 model """kg2e""" +378 100 loss """softplus""" +378 100 regularizer """no""" +378 100 optimizer """adam""" +378 100 training_loop """lcwa""" +378 100 evaluator """rankbased""" +379 1 model.embedding_dim 2.0 +379 1 model.c_min 0.07285249643885332 +379 1 model.c_max 4.54093920666044 +379 1 optimizer.lr 0.009920853753294124 +379 1 training.batch_size 1.0 +379 1 training.label_smoothing 0.011826875777512019 +379 2 model.embedding_dim 2.0 +379 2 model.c_min 0.09201101435825047 +379 2 model.c_max 6.803761001161876 +379 2 optimizer.lr 0.0583095916655196 +379 2 training.batch_size 1.0 +379 2 training.label_smoothing 0.003333417409387551 +379 3 model.embedding_dim 1.0 +379 3 model.c_min 0.014031777343907922 +379 3 model.c_max 5.333728741564917 +379 3 optimizer.lr 0.001959726748345914 +379 3 training.batch_size 0.0 +379 3 training.label_smoothing 0.23708218922536003 +379 4 model.embedding_dim 0.0 +379 4 model.c_min 0.034324676555399385 +379 4 model.c_max 1.533869626917772 +379 4 optimizer.lr 0.0370352372522161 +379 4 training.batch_size 1.0 +379 4 training.label_smoothing 0.009013485214273182 +379 5 model.embedding_dim 1.0 +379 5 model.c_min 0.03191085401953239 +379 5 model.c_max 2.6501976141754975 +379 5 optimizer.lr 0.08985412254431503 +379 5 training.batch_size 1.0 +379 5 training.label_smoothing 0.456491127878546 +379 6 model.embedding_dim 1.0 +379 6 model.c_min 0.01674375062327636 +379 6 model.c_max 2.4622978151090353 +379 6 optimizer.lr 0.013912711777754588 +379 6 training.batch_size 0.0 +379 6 training.label_smoothing 0.19079073006406733 +379 7 model.embedding_dim 0.0 +379 7 model.c_min 0.08390342957678465 +379 7 model.c_max 1.5049146459516778 +379 7 optimizer.lr 0.021537037296143688 +379 7 training.batch_size 0.0 +379 7 training.label_smoothing 0.19394520242254937 +379 8 model.embedding_dim 1.0 +379 8 model.c_min 0.012326944944213923 +379 8 model.c_max 7.482413928778065 +379 8 optimizer.lr 0.023435574044626695 +379 8 training.batch_size 1.0 +379 8 training.label_smoothing 0.00307472675140994 +379 9 model.embedding_dim 0.0 +379 9 model.c_min 0.05898088435922477 +379 9 model.c_max 4.765309544236867 +379 9 optimizer.lr 0.09253389643499235 +379 9 training.batch_size 1.0 +379 9 training.label_smoothing 0.0028820574378135857 +379 10 model.embedding_dim 1.0 +379 10 model.c_min 0.06010118870151172 +379 10 model.c_max 7.6612218484806025 +379 10 optimizer.lr 0.014415814087050092 +379 10 training.batch_size 0.0 +379 10 training.label_smoothing 0.002236724856324115 +379 11 model.embedding_dim 2.0 +379 11 model.c_min 0.036719956701608936 +379 11 model.c_max 9.54472576757735 +379 11 optimizer.lr 0.015659322658397482 +379 11 training.batch_size 2.0 +379 11 training.label_smoothing 0.599223485821903 +379 12 model.embedding_dim 2.0 +379 12 model.c_min 0.07079076573073907 +379 12 model.c_max 2.006163991241597 +379 12 optimizer.lr 0.036695090092099224 +379 12 training.batch_size 0.0 +379 12 training.label_smoothing 0.054486322534017126 +379 13 model.embedding_dim 1.0 +379 13 model.c_min 0.010549218422496634 +379 13 model.c_max 4.724945011135162 +379 13 optimizer.lr 0.004480515729797706 +379 13 training.batch_size 0.0 +379 13 training.label_smoothing 0.0014216047117840114 +379 14 model.embedding_dim 1.0 +379 14 model.c_min 0.030135634677459753 +379 14 model.c_max 3.336601870340839 +379 14 optimizer.lr 0.0014889775749746845 +379 14 training.batch_size 1.0 +379 14 training.label_smoothing 0.6014258160008096 +379 15 model.embedding_dim 2.0 +379 15 model.c_min 0.0923185489513798 +379 15 model.c_max 6.80746619388696 +379 15 optimizer.lr 0.0012708756948459808 +379 15 training.batch_size 0.0 +379 15 training.label_smoothing 0.021482622624995975 +379 16 model.embedding_dim 1.0 +379 16 model.c_min 0.022285319706197123 +379 16 model.c_max 5.815696087541355 +379 16 optimizer.lr 0.001954537298277489 +379 16 training.batch_size 1.0 +379 16 training.label_smoothing 0.040943298301897 +379 17 model.embedding_dim 0.0 +379 17 model.c_min 0.08169368546017004 +379 17 model.c_max 2.7415409525745926 +379 17 optimizer.lr 0.030449426868978034 +379 17 training.batch_size 0.0 +379 17 training.label_smoothing 0.011841831575077648 +379 18 model.embedding_dim 0.0 +379 18 model.c_min 0.014958893785799416 +379 18 model.c_max 6.399789818245324 +379 18 optimizer.lr 0.018649322216285385 +379 18 training.batch_size 0.0 +379 18 training.label_smoothing 0.4619998964077373 +379 19 model.embedding_dim 0.0 +379 19 model.c_min 0.013573564272953937 +379 19 model.c_max 5.582483745450242 +379 19 optimizer.lr 0.017785979305459835 +379 19 training.batch_size 0.0 +379 19 training.label_smoothing 0.3910471143743956 +379 20 model.embedding_dim 0.0 +379 20 model.c_min 0.013377728330988437 +379 20 model.c_max 8.957470975076806 +379 20 optimizer.lr 0.004743263903295275 +379 20 training.batch_size 0.0 +379 20 training.label_smoothing 0.0025956333070614873 +379 21 model.embedding_dim 1.0 +379 21 model.c_min 0.019982555250200883 +379 21 model.c_max 2.9300935810255826 +379 21 optimizer.lr 0.0013613089120803 +379 21 training.batch_size 2.0 +379 21 training.label_smoothing 0.11155294908840621 +379 22 model.embedding_dim 1.0 +379 22 model.c_min 0.012898049198658808 +379 22 model.c_max 3.166126017456814 +379 22 optimizer.lr 0.016708440952525806 +379 22 training.batch_size 2.0 +379 22 training.label_smoothing 0.003541122448451316 +379 23 model.embedding_dim 2.0 +379 23 model.c_min 0.04199403832233554 +379 23 model.c_max 7.666075365310559 +379 23 optimizer.lr 0.036906055406672515 +379 23 training.batch_size 2.0 +379 23 training.label_smoothing 0.025733742495644137 +379 24 model.embedding_dim 0.0 +379 24 model.c_min 0.029158673139316294 +379 24 model.c_max 7.268923005568389 +379 24 optimizer.lr 0.012430009502371147 +379 24 training.batch_size 1.0 +379 24 training.label_smoothing 0.11040658144410222 +379 25 model.embedding_dim 1.0 +379 25 model.c_min 0.020175263765807486 +379 25 model.c_max 5.52164561341253 +379 25 optimizer.lr 0.0016817752214333729 +379 25 training.batch_size 2.0 +379 25 training.label_smoothing 0.028022189925747506 +379 26 model.embedding_dim 0.0 +379 26 model.c_min 0.011064189836806192 +379 26 model.c_max 9.788674575940394 +379 26 optimizer.lr 0.01122502040130545 +379 26 training.batch_size 0.0 +379 26 training.label_smoothing 0.1405486431941372 +379 27 model.embedding_dim 2.0 +379 27 model.c_min 0.08796086949796571 +379 27 model.c_max 1.5838292699609187 +379 27 optimizer.lr 0.018381936802051847 +379 27 training.batch_size 2.0 +379 27 training.label_smoothing 0.010915053390422296 +379 28 model.embedding_dim 2.0 +379 28 model.c_min 0.028552719406767162 +379 28 model.c_max 5.87009457454646 +379 28 optimizer.lr 0.08139998725007001 +379 28 training.batch_size 1.0 +379 28 training.label_smoothing 0.02398280221640114 +379 29 model.embedding_dim 1.0 +379 29 model.c_min 0.05159100518934074 +379 29 model.c_max 4.795301880015396 +379 29 optimizer.lr 0.022864294203562386 +379 29 training.batch_size 1.0 +379 29 training.label_smoothing 0.06684557309086955 +379 30 model.embedding_dim 2.0 +379 30 model.c_min 0.04223527375497055 +379 30 model.c_max 3.502962544105142 +379 30 optimizer.lr 0.0011536873262540253 +379 30 training.batch_size 2.0 +379 30 training.label_smoothing 0.23028582023566857 +379 31 model.embedding_dim 2.0 +379 31 model.c_min 0.027454023562069618 +379 31 model.c_max 9.631360607040751 +379 31 optimizer.lr 0.06548014322683587 +379 31 training.batch_size 1.0 +379 31 training.label_smoothing 0.006786353315240183 +379 32 model.embedding_dim 0.0 +379 32 model.c_min 0.059375839991250644 +379 32 model.c_max 8.63143675473583 +379 32 optimizer.lr 0.06943635597528967 +379 32 training.batch_size 1.0 +379 32 training.label_smoothing 0.20311676742241935 +379 33 model.embedding_dim 1.0 +379 33 model.c_min 0.033879277204495656 +379 33 model.c_max 3.357480677965693 +379 33 optimizer.lr 0.03231666706267927 +379 33 training.batch_size 0.0 +379 33 training.label_smoothing 0.0015730057957891574 +379 34 model.embedding_dim 1.0 +379 34 model.c_min 0.07104642111365427 +379 34 model.c_max 5.227300409983215 +379 34 optimizer.lr 0.054772381223572975 +379 34 training.batch_size 1.0 +379 34 training.label_smoothing 0.4042194738635521 +379 35 model.embedding_dim 0.0 +379 35 model.c_min 0.07489393752712696 +379 35 model.c_max 8.981869999138762 +379 35 optimizer.lr 0.018879907492741887 +379 35 training.batch_size 2.0 +379 35 training.label_smoothing 0.014133667072261482 +379 36 model.embedding_dim 2.0 +379 36 model.c_min 0.010247217393643971 +379 36 model.c_max 4.534788334021131 +379 36 optimizer.lr 0.06237160783588082 +379 36 training.batch_size 1.0 +379 36 training.label_smoothing 0.03615054112209801 +379 37 model.embedding_dim 0.0 +379 37 model.c_min 0.012756894770498326 +379 37 model.c_max 6.742077622474582 +379 37 optimizer.lr 0.002189121806023039 +379 37 training.batch_size 0.0 +379 37 training.label_smoothing 0.005250514874576194 +379 38 model.embedding_dim 1.0 +379 38 model.c_min 0.095150443616944 +379 38 model.c_max 2.1643950705961443 +379 38 optimizer.lr 0.02336868023597653 +379 38 training.batch_size 2.0 +379 38 training.label_smoothing 0.029795601790211068 +379 39 model.embedding_dim 1.0 +379 39 model.c_min 0.046828901909123206 +379 39 model.c_max 8.212906283576409 +379 39 optimizer.lr 0.013726565785584219 +379 39 training.batch_size 2.0 +379 39 training.label_smoothing 0.06942154566726241 +379 40 model.embedding_dim 0.0 +379 40 model.c_min 0.08371991877052598 +379 40 model.c_max 5.151057650402078 +379 40 optimizer.lr 0.0013418937503246473 +379 40 training.batch_size 2.0 +379 40 training.label_smoothing 0.07131404518637002 +379 41 model.embedding_dim 2.0 +379 41 model.c_min 0.020629339384663138 +379 41 model.c_max 9.333396589366671 +379 41 optimizer.lr 0.020053674430082682 +379 41 training.batch_size 1.0 +379 41 training.label_smoothing 0.020174162420482835 +379 42 model.embedding_dim 1.0 +379 42 model.c_min 0.04723905232477966 +379 42 model.c_max 5.043536695272755 +379 42 optimizer.lr 0.08257235349165717 +379 42 training.batch_size 1.0 +379 42 training.label_smoothing 0.007464378038031372 +379 43 model.embedding_dim 0.0 +379 43 model.c_min 0.055901846627591734 +379 43 model.c_max 2.964898968901919 +379 43 optimizer.lr 0.02216804603757685 +379 43 training.batch_size 1.0 +379 43 training.label_smoothing 0.057429864734772525 +379 44 model.embedding_dim 0.0 +379 44 model.c_min 0.015411579268332093 +379 44 model.c_max 4.087542819606623 +379 44 optimizer.lr 0.005513452590816401 +379 44 training.batch_size 0.0 +379 44 training.label_smoothing 0.03834429285169362 +379 45 model.embedding_dim 0.0 +379 45 model.c_min 0.0351138562486706 +379 45 model.c_max 3.193461847644032 +379 45 optimizer.lr 0.042818353185875305 +379 45 training.batch_size 1.0 +379 45 training.label_smoothing 0.12442079304558297 +379 46 model.embedding_dim 1.0 +379 46 model.c_min 0.04190426849229742 +379 46 model.c_max 8.89039925976807 +379 46 optimizer.lr 0.005984320319046288 +379 46 training.batch_size 0.0 +379 46 training.label_smoothing 0.6120796937484727 +379 47 model.embedding_dim 0.0 +379 47 model.c_min 0.019632994316011544 +379 47 model.c_max 4.741777914311012 +379 47 optimizer.lr 0.0019202500311687302 +379 47 training.batch_size 0.0 +379 47 training.label_smoothing 0.003204900952237838 +379 48 model.embedding_dim 1.0 +379 48 model.c_min 0.05825083771498051 +379 48 model.c_max 7.377791919054128 +379 48 optimizer.lr 0.02986713402265384 +379 48 training.batch_size 2.0 +379 48 training.label_smoothing 0.5408565377411022 +379 49 model.embedding_dim 1.0 +379 49 model.c_min 0.018017738662700667 +379 49 model.c_max 4.1900525579379835 +379 49 optimizer.lr 0.012437968164846085 +379 49 training.batch_size 2.0 +379 49 training.label_smoothing 0.11994070916902157 +379 50 model.embedding_dim 1.0 +379 50 model.c_min 0.020173250299781623 +379 50 model.c_max 7.231785319994575 +379 50 optimizer.lr 0.0015044502234929247 +379 50 training.batch_size 0.0 +379 50 training.label_smoothing 0.20014509381444825 +379 51 model.embedding_dim 1.0 +379 51 model.c_min 0.013992586845809653 +379 51 model.c_max 8.0936989103405 +379 51 optimizer.lr 0.033878649774767206 +379 51 training.batch_size 2.0 +379 51 training.label_smoothing 0.022032332375349937 +379 52 model.embedding_dim 1.0 +379 52 model.c_min 0.04577653252995904 +379 52 model.c_max 4.28375989736428 +379 52 optimizer.lr 0.003336178757491212 +379 52 training.batch_size 0.0 +379 52 training.label_smoothing 0.014273895858482929 +379 53 model.embedding_dim 1.0 +379 53 model.c_min 0.012287373564621496 +379 53 model.c_max 3.3199353655065584 +379 53 optimizer.lr 0.08297848792387348 +379 53 training.batch_size 1.0 +379 53 training.label_smoothing 0.6446856499113116 +379 54 model.embedding_dim 0.0 +379 54 model.c_min 0.053105759383686724 +379 54 model.c_max 3.431541133296467 +379 54 optimizer.lr 0.017985932239728185 +379 54 training.batch_size 0.0 +379 54 training.label_smoothing 0.07454674240041972 +379 55 model.embedding_dim 0.0 +379 55 model.c_min 0.018659840013518916 +379 55 model.c_max 9.408618509052461 +379 55 optimizer.lr 0.0012668065022747466 +379 55 training.batch_size 2.0 +379 55 training.label_smoothing 0.3727271355296999 +379 56 model.embedding_dim 0.0 +379 56 model.c_min 0.09012032715713088 +379 56 model.c_max 8.312535926461543 +379 56 optimizer.lr 0.022410133795317852 +379 56 training.batch_size 0.0 +379 56 training.label_smoothing 0.003970092667608035 +379 57 model.embedding_dim 0.0 +379 57 model.c_min 0.015734561739290432 +379 57 model.c_max 1.9929589515496557 +379 57 optimizer.lr 0.017600715258434957 +379 57 training.batch_size 0.0 +379 57 training.label_smoothing 0.8387593300835126 +379 58 model.embedding_dim 2.0 +379 58 model.c_min 0.012067933715437752 +379 58 model.c_max 7.875264131978477 +379 58 optimizer.lr 0.0012130164412347544 +379 58 training.batch_size 2.0 +379 58 training.label_smoothing 0.0431449246708821 +379 59 model.embedding_dim 2.0 +379 59 model.c_min 0.02435149634447675 +379 59 model.c_max 1.0953955890821425 +379 59 optimizer.lr 0.0027497196033796183 +379 59 training.batch_size 2.0 +379 59 training.label_smoothing 0.5228624004280998 +379 60 model.embedding_dim 1.0 +379 60 model.c_min 0.07998753131460333 +379 60 model.c_max 1.9271250458687463 +379 60 optimizer.lr 0.017835002651503045 +379 60 training.batch_size 2.0 +379 60 training.label_smoothing 0.04154277346585252 +379 61 model.embedding_dim 2.0 +379 61 model.c_min 0.08678550330345328 +379 61 model.c_max 8.583322545459776 +379 61 optimizer.lr 0.02666686012991599 +379 61 training.batch_size 0.0 +379 61 training.label_smoothing 0.02429974780332648 +379 62 model.embedding_dim 2.0 +379 62 model.c_min 0.048049802467767265 +379 62 model.c_max 7.982244390790923 +379 62 optimizer.lr 0.014709939660155418 +379 62 training.batch_size 0.0 +379 62 training.label_smoothing 0.6976577092871744 +379 63 model.embedding_dim 1.0 +379 63 model.c_min 0.045519146981178815 +379 63 model.c_max 8.66326286663243 +379 63 optimizer.lr 0.032003820697903015 +379 63 training.batch_size 0.0 +379 63 training.label_smoothing 0.0023588159128314573 +379 64 model.embedding_dim 1.0 +379 64 model.c_min 0.01033378356374021 +379 64 model.c_max 3.819345124826154 +379 64 optimizer.lr 0.044145243723763494 +379 64 training.batch_size 0.0 +379 64 training.label_smoothing 0.1442724660084721 +379 65 model.embedding_dim 1.0 +379 65 model.c_min 0.04179420750104958 +379 65 model.c_max 7.550322547482037 +379 65 optimizer.lr 0.0027671071512794304 +379 65 training.batch_size 1.0 +379 65 training.label_smoothing 0.4007062102235793 +379 66 model.embedding_dim 0.0 +379 66 model.c_min 0.031182593226213407 +379 66 model.c_max 2.9644614987008095 +379 66 optimizer.lr 0.0016394890255377337 +379 66 training.batch_size 1.0 +379 66 training.label_smoothing 0.0018808585909729553 +379 67 model.embedding_dim 1.0 +379 67 model.c_min 0.020005946498829048 +379 67 model.c_max 6.139270234202034 +379 67 optimizer.lr 0.0018693086074227266 +379 67 training.batch_size 0.0 +379 67 training.label_smoothing 0.07248195789639157 +379 68 model.embedding_dim 1.0 +379 68 model.c_min 0.03623046956997253 +379 68 model.c_max 5.512025017267946 +379 68 optimizer.lr 0.034898164736065426 +379 68 training.batch_size 0.0 +379 68 training.label_smoothing 0.0057304035842590355 +379 69 model.embedding_dim 1.0 +379 69 model.c_min 0.03242797461866485 +379 69 model.c_max 1.384189653680573 +379 69 optimizer.lr 0.013263123753168533 +379 69 training.batch_size 2.0 +379 69 training.label_smoothing 0.24043658504880427 +379 70 model.embedding_dim 1.0 +379 70 model.c_min 0.07587638330923478 +379 70 model.c_max 9.80219750756121 +379 70 optimizer.lr 0.04913147163894897 +379 70 training.batch_size 2.0 +379 70 training.label_smoothing 0.06823008199907145 +379 71 model.embedding_dim 2.0 +379 71 model.c_min 0.07438312459356143 +379 71 model.c_max 9.094540881047845 +379 71 optimizer.lr 0.06005233418774821 +379 71 training.batch_size 0.0 +379 71 training.label_smoothing 0.0018597294461923245 +379 72 model.embedding_dim 2.0 +379 72 model.c_min 0.015276921805848913 +379 72 model.c_max 3.1994877013676932 +379 72 optimizer.lr 0.00751415084489001 +379 72 training.batch_size 2.0 +379 72 training.label_smoothing 0.0891647853906505 +379 73 model.embedding_dim 2.0 +379 73 model.c_min 0.052899159903971336 +379 73 model.c_max 2.1295149221232106 +379 73 optimizer.lr 0.003551342847407427 +379 73 training.batch_size 1.0 +379 73 training.label_smoothing 0.0048035255103291786 +379 74 model.embedding_dim 0.0 +379 74 model.c_min 0.018531028902525427 +379 74 model.c_max 3.210438433474699 +379 74 optimizer.lr 0.022216496064664224 +379 74 training.batch_size 1.0 +379 74 training.label_smoothing 0.016989332829423946 +379 75 model.embedding_dim 2.0 +379 75 model.c_min 0.011886142321473317 +379 75 model.c_max 4.617575838065 +379 75 optimizer.lr 0.0654913254979469 +379 75 training.batch_size 1.0 +379 75 training.label_smoothing 0.010728355807712538 +379 76 model.embedding_dim 2.0 +379 76 model.c_min 0.01570353663396077 +379 76 model.c_max 1.3350804631048834 +379 76 optimizer.lr 0.011276342790856882 +379 76 training.batch_size 2.0 +379 76 training.label_smoothing 0.013428625171234766 +379 77 model.embedding_dim 1.0 +379 77 model.c_min 0.03266765046807697 +379 77 model.c_max 4.5971541491298 +379 77 optimizer.lr 0.07382593006535915 +379 77 training.batch_size 0.0 +379 77 training.label_smoothing 0.44655528798448596 +379 78 model.embedding_dim 0.0 +379 78 model.c_min 0.09243014000417861 +379 78 model.c_max 6.463319477489207 +379 78 optimizer.lr 0.011309733158262827 +379 78 training.batch_size 1.0 +379 78 training.label_smoothing 0.0016201978139233552 +379 79 model.embedding_dim 2.0 +379 79 model.c_min 0.046518387583916236 +379 79 model.c_max 8.314102175547413 +379 79 optimizer.lr 0.002078129310651653 +379 79 training.batch_size 1.0 +379 79 training.label_smoothing 0.23222503723481253 +379 80 model.embedding_dim 2.0 +379 80 model.c_min 0.021444679297117505 +379 80 model.c_max 1.0480945883957926 +379 80 optimizer.lr 0.008693782933079857 +379 80 training.batch_size 0.0 +379 80 training.label_smoothing 0.001515550011555995 +379 81 model.embedding_dim 2.0 +379 81 model.c_min 0.0145716762435213 +379 81 model.c_max 3.5981834519961278 +379 81 optimizer.lr 0.0020189938550922576 +379 81 training.batch_size 1.0 +379 81 training.label_smoothing 0.0010969150800641636 +379 82 model.embedding_dim 1.0 +379 82 model.c_min 0.018918590802403146 +379 82 model.c_max 9.309819701667323 +379 82 optimizer.lr 0.011542096999724345 +379 82 training.batch_size 0.0 +379 82 training.label_smoothing 0.03172831469983755 +379 83 model.embedding_dim 0.0 +379 83 model.c_min 0.08496080383341703 +379 83 model.c_max 4.933405729453723 +379 83 optimizer.lr 0.0013071832043745223 +379 83 training.batch_size 0.0 +379 83 training.label_smoothing 0.22488310072269294 +379 84 model.embedding_dim 1.0 +379 84 model.c_min 0.052686150078880466 +379 84 model.c_max 3.4599598265972222 +379 84 optimizer.lr 0.0019491995718079575 +379 84 training.batch_size 0.0 +379 84 training.label_smoothing 0.022331064124450066 +379 85 model.embedding_dim 1.0 +379 85 model.c_min 0.022502548942920156 +379 85 model.c_max 6.8599138381813995 +379 85 optimizer.lr 0.017669417685635513 +379 85 training.batch_size 0.0 +379 85 training.label_smoothing 0.41594584858196704 +379 86 model.embedding_dim 0.0 +379 86 model.c_min 0.03942436021886528 +379 86 model.c_max 6.5601507458135195 +379 86 optimizer.lr 0.04750339412676708 +379 86 training.batch_size 1.0 +379 86 training.label_smoothing 0.030158644456108236 +379 87 model.embedding_dim 0.0 +379 87 model.c_min 0.03683333309808561 +379 87 model.c_max 5.491426248892728 +379 87 optimizer.lr 0.0012371006470741181 +379 87 training.batch_size 0.0 +379 87 training.label_smoothing 0.31291594299205383 +379 88 model.embedding_dim 0.0 +379 88 model.c_min 0.020231518751806652 +379 88 model.c_max 1.5480576220256919 +379 88 optimizer.lr 0.08352708296694027 +379 88 training.batch_size 0.0 +379 88 training.label_smoothing 0.254433519269208 +379 89 model.embedding_dim 0.0 +379 89 model.c_min 0.010667704110052599 +379 89 model.c_max 7.121081750804316 +379 89 optimizer.lr 0.0012347845362065644 +379 89 training.batch_size 0.0 +379 89 training.label_smoothing 0.011523639555989404 +379 90 model.embedding_dim 2.0 +379 90 model.c_min 0.06602980678909291 +379 90 model.c_max 8.705415728928417 +379 90 optimizer.lr 0.012677722143434775 +379 90 training.batch_size 1.0 +379 90 training.label_smoothing 0.0014685656355927184 +379 91 model.embedding_dim 1.0 +379 91 model.c_min 0.06831094589202474 +379 91 model.c_max 1.3553067197180426 +379 91 optimizer.lr 0.002042018346919474 +379 91 training.batch_size 1.0 +379 91 training.label_smoothing 0.0013771141925105913 +379 92 model.embedding_dim 2.0 +379 92 model.c_min 0.012656798745103157 +379 92 model.c_max 5.44611735777441 +379 92 optimizer.lr 0.01822661306058047 +379 92 training.batch_size 1.0 +379 92 training.label_smoothing 0.0015489744728552728 +379 93 model.embedding_dim 0.0 +379 93 model.c_min 0.06802559916929755 +379 93 model.c_max 8.51768406344722 +379 93 optimizer.lr 0.010745573075803963 +379 93 training.batch_size 0.0 +379 93 training.label_smoothing 0.13953059861326705 +379 94 model.embedding_dim 2.0 +379 94 model.c_min 0.04960710562907007 +379 94 model.c_max 5.5760337208863655 +379 94 optimizer.lr 0.08420329921728043 +379 94 training.batch_size 2.0 +379 94 training.label_smoothing 0.02351717754100886 +379 95 model.embedding_dim 0.0 +379 95 model.c_min 0.014195385206305793 +379 95 model.c_max 4.505418888668542 +379 95 optimizer.lr 0.003409054480006751 +379 95 training.batch_size 1.0 +379 95 training.label_smoothing 0.034578061490419046 +379 96 model.embedding_dim 1.0 +379 96 model.c_min 0.043322104765669965 +379 96 model.c_max 2.724680433664927 +379 96 optimizer.lr 0.033072209999887234 +379 96 training.batch_size 2.0 +379 96 training.label_smoothing 0.17817883044598867 +379 97 model.embedding_dim 1.0 +379 97 model.c_min 0.020387537393916976 +379 97 model.c_max 4.678932286681565 +379 97 optimizer.lr 0.00940913904141332 +379 97 training.batch_size 2.0 +379 97 training.label_smoothing 0.001275608466486543 +379 98 model.embedding_dim 1.0 +379 98 model.c_min 0.0516573940501006 +379 98 model.c_max 9.656675703403028 +379 98 optimizer.lr 0.00391667243390144 +379 98 training.batch_size 0.0 +379 98 training.label_smoothing 0.08993599170262224 +379 99 model.embedding_dim 2.0 +379 99 model.c_min 0.0371191909192602 +379 99 model.c_max 6.1861190138196935 +379 99 optimizer.lr 0.013318615728919736 +379 99 training.batch_size 0.0 +379 99 training.label_smoothing 0.11918257275941989 +379 100 model.embedding_dim 1.0 +379 100 model.c_min 0.027566137621050823 +379 100 model.c_max 5.554528137424068 +379 100 optimizer.lr 0.06075115759090637 +379 100 training.batch_size 0.0 +379 100 training.label_smoothing 0.006918779920710815 +379 1 dataset """kinships""" +379 1 model """kg2e""" +379 1 loss """crossentropy""" +379 1 regularizer """no""" +379 1 optimizer """adam""" +379 1 training_loop """lcwa""" +379 1 evaluator """rankbased""" +379 2 dataset """kinships""" +379 2 model """kg2e""" +379 2 loss """crossentropy""" +379 2 regularizer """no""" +379 2 optimizer """adam""" +379 2 training_loop """lcwa""" +379 2 evaluator """rankbased""" +379 3 dataset """kinships""" +379 3 model """kg2e""" +379 3 loss """crossentropy""" +379 3 regularizer """no""" +379 3 optimizer """adam""" +379 3 training_loop """lcwa""" +379 3 evaluator """rankbased""" +379 4 dataset """kinships""" +379 4 model """kg2e""" +379 4 loss """crossentropy""" +379 4 regularizer """no""" +379 4 optimizer """adam""" +379 4 training_loop """lcwa""" +379 4 evaluator """rankbased""" +379 5 dataset """kinships""" +379 5 model """kg2e""" +379 5 loss """crossentropy""" +379 5 regularizer """no""" +379 5 optimizer """adam""" +379 5 training_loop """lcwa""" +379 5 evaluator """rankbased""" +379 6 dataset """kinships""" +379 6 model """kg2e""" +379 6 loss """crossentropy""" +379 6 regularizer """no""" +379 6 optimizer """adam""" +379 6 training_loop """lcwa""" +379 6 evaluator """rankbased""" +379 7 dataset """kinships""" +379 7 model """kg2e""" +379 7 loss """crossentropy""" +379 7 regularizer """no""" +379 7 optimizer """adam""" +379 7 training_loop """lcwa""" +379 7 evaluator """rankbased""" +379 8 dataset """kinships""" +379 8 model """kg2e""" +379 8 loss """crossentropy""" +379 8 regularizer """no""" +379 8 optimizer """adam""" +379 8 training_loop """lcwa""" +379 8 evaluator """rankbased""" +379 9 dataset """kinships""" +379 9 model """kg2e""" +379 9 loss """crossentropy""" +379 9 regularizer """no""" +379 9 optimizer """adam""" +379 9 training_loop """lcwa""" +379 9 evaluator """rankbased""" +379 10 dataset """kinships""" +379 10 model """kg2e""" +379 10 loss """crossentropy""" +379 10 regularizer """no""" +379 10 optimizer """adam""" +379 10 training_loop """lcwa""" +379 10 evaluator """rankbased""" +379 11 dataset """kinships""" +379 11 model """kg2e""" +379 11 loss """crossentropy""" +379 11 regularizer """no""" +379 11 optimizer """adam""" +379 11 training_loop """lcwa""" +379 11 evaluator """rankbased""" +379 12 dataset """kinships""" +379 12 model """kg2e""" +379 12 loss """crossentropy""" +379 12 regularizer """no""" +379 12 optimizer """adam""" +379 12 training_loop """lcwa""" +379 12 evaluator """rankbased""" +379 13 dataset """kinships""" +379 13 model """kg2e""" +379 13 loss """crossentropy""" +379 13 regularizer """no""" +379 13 optimizer """adam""" +379 13 training_loop """lcwa""" +379 13 evaluator """rankbased""" +379 14 dataset """kinships""" +379 14 model """kg2e""" +379 14 loss """crossentropy""" +379 14 regularizer """no""" +379 14 optimizer """adam""" +379 14 training_loop """lcwa""" +379 14 evaluator """rankbased""" +379 15 dataset """kinships""" +379 15 model """kg2e""" +379 15 loss """crossentropy""" +379 15 regularizer """no""" +379 15 optimizer """adam""" +379 15 training_loop """lcwa""" +379 15 evaluator """rankbased""" +379 16 dataset """kinships""" +379 16 model """kg2e""" +379 16 loss """crossentropy""" +379 16 regularizer """no""" +379 16 optimizer """adam""" +379 16 training_loop """lcwa""" +379 16 evaluator """rankbased""" +379 17 dataset """kinships""" +379 17 model """kg2e""" +379 17 loss """crossentropy""" +379 17 regularizer """no""" +379 17 optimizer """adam""" +379 17 training_loop """lcwa""" +379 17 evaluator """rankbased""" +379 18 dataset """kinships""" +379 18 model """kg2e""" +379 18 loss """crossentropy""" +379 18 regularizer """no""" +379 18 optimizer """adam""" +379 18 training_loop """lcwa""" +379 18 evaluator """rankbased""" +379 19 dataset """kinships""" +379 19 model """kg2e""" +379 19 loss """crossentropy""" +379 19 regularizer """no""" +379 19 optimizer """adam""" +379 19 training_loop """lcwa""" +379 19 evaluator """rankbased""" +379 20 dataset """kinships""" +379 20 model """kg2e""" +379 20 loss """crossentropy""" +379 20 regularizer """no""" +379 20 optimizer """adam""" +379 20 training_loop """lcwa""" +379 20 evaluator """rankbased""" +379 21 dataset """kinships""" +379 21 model """kg2e""" +379 21 loss """crossentropy""" +379 21 regularizer """no""" +379 21 optimizer """adam""" +379 21 training_loop """lcwa""" +379 21 evaluator """rankbased""" +379 22 dataset """kinships""" +379 22 model """kg2e""" +379 22 loss """crossentropy""" +379 22 regularizer """no""" +379 22 optimizer """adam""" +379 22 training_loop """lcwa""" +379 22 evaluator """rankbased""" +379 23 dataset """kinships""" +379 23 model """kg2e""" +379 23 loss """crossentropy""" +379 23 regularizer """no""" +379 23 optimizer """adam""" +379 23 training_loop """lcwa""" +379 23 evaluator """rankbased""" +379 24 dataset """kinships""" +379 24 model """kg2e""" +379 24 loss """crossentropy""" +379 24 regularizer """no""" +379 24 optimizer """adam""" +379 24 training_loop """lcwa""" +379 24 evaluator """rankbased""" +379 25 dataset """kinships""" +379 25 model """kg2e""" +379 25 loss """crossentropy""" +379 25 regularizer """no""" +379 25 optimizer """adam""" +379 25 training_loop """lcwa""" +379 25 evaluator """rankbased""" +379 26 dataset """kinships""" +379 26 model """kg2e""" +379 26 loss """crossentropy""" +379 26 regularizer """no""" +379 26 optimizer """adam""" +379 26 training_loop """lcwa""" +379 26 evaluator """rankbased""" +379 27 dataset """kinships""" +379 27 model """kg2e""" +379 27 loss """crossentropy""" +379 27 regularizer """no""" +379 27 optimizer """adam""" +379 27 training_loop """lcwa""" +379 27 evaluator """rankbased""" +379 28 dataset """kinships""" +379 28 model """kg2e""" +379 28 loss """crossentropy""" +379 28 regularizer """no""" +379 28 optimizer """adam""" +379 28 training_loop """lcwa""" +379 28 evaluator """rankbased""" +379 29 dataset """kinships""" +379 29 model """kg2e""" +379 29 loss """crossentropy""" +379 29 regularizer """no""" +379 29 optimizer """adam""" +379 29 training_loop """lcwa""" +379 29 evaluator """rankbased""" +379 30 dataset """kinships""" +379 30 model """kg2e""" +379 30 loss """crossentropy""" +379 30 regularizer """no""" +379 30 optimizer """adam""" +379 30 training_loop """lcwa""" +379 30 evaluator """rankbased""" +379 31 dataset """kinships""" +379 31 model """kg2e""" +379 31 loss """crossentropy""" +379 31 regularizer """no""" +379 31 optimizer """adam""" +379 31 training_loop """lcwa""" +379 31 evaluator """rankbased""" +379 32 dataset """kinships""" +379 32 model """kg2e""" +379 32 loss """crossentropy""" +379 32 regularizer """no""" +379 32 optimizer """adam""" +379 32 training_loop """lcwa""" +379 32 evaluator """rankbased""" +379 33 dataset """kinships""" +379 33 model """kg2e""" +379 33 loss """crossentropy""" +379 33 regularizer """no""" +379 33 optimizer """adam""" +379 33 training_loop """lcwa""" +379 33 evaluator """rankbased""" +379 34 dataset """kinships""" +379 34 model """kg2e""" +379 34 loss """crossentropy""" +379 34 regularizer """no""" +379 34 optimizer """adam""" +379 34 training_loop """lcwa""" +379 34 evaluator """rankbased""" +379 35 dataset """kinships""" +379 35 model """kg2e""" +379 35 loss """crossentropy""" +379 35 regularizer """no""" +379 35 optimizer """adam""" +379 35 training_loop """lcwa""" +379 35 evaluator """rankbased""" +379 36 dataset """kinships""" +379 36 model """kg2e""" +379 36 loss """crossentropy""" +379 36 regularizer """no""" +379 36 optimizer """adam""" +379 36 training_loop """lcwa""" +379 36 evaluator """rankbased""" +379 37 dataset """kinships""" +379 37 model """kg2e""" +379 37 loss """crossentropy""" +379 37 regularizer """no""" +379 37 optimizer """adam""" +379 37 training_loop """lcwa""" +379 37 evaluator """rankbased""" +379 38 dataset """kinships""" +379 38 model """kg2e""" +379 38 loss """crossentropy""" +379 38 regularizer """no""" +379 38 optimizer """adam""" +379 38 training_loop """lcwa""" +379 38 evaluator """rankbased""" +379 39 dataset """kinships""" +379 39 model """kg2e""" +379 39 loss """crossentropy""" +379 39 regularizer """no""" +379 39 optimizer """adam""" +379 39 training_loop """lcwa""" +379 39 evaluator """rankbased""" +379 40 dataset """kinships""" +379 40 model """kg2e""" +379 40 loss """crossentropy""" +379 40 regularizer """no""" +379 40 optimizer """adam""" +379 40 training_loop """lcwa""" +379 40 evaluator """rankbased""" +379 41 dataset """kinships""" +379 41 model """kg2e""" +379 41 loss """crossentropy""" +379 41 regularizer """no""" +379 41 optimizer """adam""" +379 41 training_loop """lcwa""" +379 41 evaluator """rankbased""" +379 42 dataset """kinships""" +379 42 model """kg2e""" +379 42 loss """crossentropy""" +379 42 regularizer """no""" +379 42 optimizer """adam""" +379 42 training_loop """lcwa""" +379 42 evaluator """rankbased""" +379 43 dataset """kinships""" +379 43 model """kg2e""" +379 43 loss """crossentropy""" +379 43 regularizer """no""" +379 43 optimizer """adam""" +379 43 training_loop """lcwa""" +379 43 evaluator """rankbased""" +379 44 dataset """kinships""" +379 44 model """kg2e""" +379 44 loss """crossentropy""" +379 44 regularizer """no""" +379 44 optimizer """adam""" +379 44 training_loop """lcwa""" +379 44 evaluator """rankbased""" +379 45 dataset """kinships""" +379 45 model """kg2e""" +379 45 loss """crossentropy""" +379 45 regularizer """no""" +379 45 optimizer """adam""" +379 45 training_loop """lcwa""" +379 45 evaluator """rankbased""" +379 46 dataset """kinships""" +379 46 model """kg2e""" +379 46 loss """crossentropy""" +379 46 regularizer """no""" +379 46 optimizer """adam""" +379 46 training_loop """lcwa""" +379 46 evaluator """rankbased""" +379 47 dataset """kinships""" +379 47 model """kg2e""" +379 47 loss """crossentropy""" +379 47 regularizer """no""" +379 47 optimizer """adam""" +379 47 training_loop """lcwa""" +379 47 evaluator """rankbased""" +379 48 dataset """kinships""" +379 48 model """kg2e""" +379 48 loss """crossentropy""" +379 48 regularizer """no""" +379 48 optimizer """adam""" +379 48 training_loop """lcwa""" +379 48 evaluator """rankbased""" +379 49 dataset """kinships""" +379 49 model """kg2e""" +379 49 loss """crossentropy""" +379 49 regularizer """no""" +379 49 optimizer """adam""" +379 49 training_loop """lcwa""" +379 49 evaluator """rankbased""" +379 50 dataset """kinships""" +379 50 model """kg2e""" +379 50 loss """crossentropy""" +379 50 regularizer """no""" +379 50 optimizer """adam""" +379 50 training_loop """lcwa""" +379 50 evaluator """rankbased""" +379 51 dataset """kinships""" +379 51 model """kg2e""" +379 51 loss """crossentropy""" +379 51 regularizer """no""" +379 51 optimizer """adam""" +379 51 training_loop """lcwa""" +379 51 evaluator """rankbased""" +379 52 dataset """kinships""" +379 52 model """kg2e""" +379 52 loss """crossentropy""" +379 52 regularizer """no""" +379 52 optimizer """adam""" +379 52 training_loop """lcwa""" +379 52 evaluator """rankbased""" +379 53 dataset """kinships""" +379 53 model """kg2e""" +379 53 loss """crossentropy""" +379 53 regularizer """no""" +379 53 optimizer """adam""" +379 53 training_loop """lcwa""" +379 53 evaluator """rankbased""" +379 54 dataset """kinships""" +379 54 model """kg2e""" +379 54 loss """crossentropy""" +379 54 regularizer """no""" +379 54 optimizer """adam""" +379 54 training_loop """lcwa""" +379 54 evaluator """rankbased""" +379 55 dataset """kinships""" +379 55 model """kg2e""" +379 55 loss """crossentropy""" +379 55 regularizer """no""" +379 55 optimizer """adam""" +379 55 training_loop """lcwa""" +379 55 evaluator """rankbased""" +379 56 dataset """kinships""" +379 56 model """kg2e""" +379 56 loss """crossentropy""" +379 56 regularizer """no""" +379 56 optimizer """adam""" +379 56 training_loop """lcwa""" +379 56 evaluator """rankbased""" +379 57 dataset """kinships""" +379 57 model """kg2e""" +379 57 loss """crossentropy""" +379 57 regularizer """no""" +379 57 optimizer """adam""" +379 57 training_loop """lcwa""" +379 57 evaluator """rankbased""" +379 58 dataset """kinships""" +379 58 model """kg2e""" +379 58 loss """crossentropy""" +379 58 regularizer """no""" +379 58 optimizer """adam""" +379 58 training_loop """lcwa""" +379 58 evaluator """rankbased""" +379 59 dataset """kinships""" +379 59 model """kg2e""" +379 59 loss """crossentropy""" +379 59 regularizer """no""" +379 59 optimizer """adam""" +379 59 training_loop """lcwa""" +379 59 evaluator """rankbased""" +379 60 dataset """kinships""" +379 60 model """kg2e""" +379 60 loss """crossentropy""" +379 60 regularizer """no""" +379 60 optimizer """adam""" +379 60 training_loop """lcwa""" +379 60 evaluator """rankbased""" +379 61 dataset """kinships""" +379 61 model """kg2e""" +379 61 loss """crossentropy""" +379 61 regularizer """no""" +379 61 optimizer """adam""" +379 61 training_loop """lcwa""" +379 61 evaluator """rankbased""" +379 62 dataset """kinships""" +379 62 model """kg2e""" +379 62 loss """crossentropy""" +379 62 regularizer """no""" +379 62 optimizer """adam""" +379 62 training_loop """lcwa""" +379 62 evaluator """rankbased""" +379 63 dataset """kinships""" +379 63 model """kg2e""" +379 63 loss """crossentropy""" +379 63 regularizer """no""" +379 63 optimizer """adam""" +379 63 training_loop """lcwa""" +379 63 evaluator """rankbased""" +379 64 dataset """kinships""" +379 64 model """kg2e""" +379 64 loss """crossentropy""" +379 64 regularizer """no""" +379 64 optimizer """adam""" +379 64 training_loop """lcwa""" +379 64 evaluator """rankbased""" +379 65 dataset """kinships""" +379 65 model """kg2e""" +379 65 loss """crossentropy""" +379 65 regularizer """no""" +379 65 optimizer """adam""" +379 65 training_loop """lcwa""" +379 65 evaluator """rankbased""" +379 66 dataset """kinships""" +379 66 model """kg2e""" +379 66 loss """crossentropy""" +379 66 regularizer """no""" +379 66 optimizer """adam""" +379 66 training_loop """lcwa""" +379 66 evaluator """rankbased""" +379 67 dataset """kinships""" +379 67 model """kg2e""" +379 67 loss """crossentropy""" +379 67 regularizer """no""" +379 67 optimizer """adam""" +379 67 training_loop """lcwa""" +379 67 evaluator """rankbased""" +379 68 dataset """kinships""" +379 68 model """kg2e""" +379 68 loss """crossentropy""" +379 68 regularizer """no""" +379 68 optimizer """adam""" +379 68 training_loop """lcwa""" +379 68 evaluator """rankbased""" +379 69 dataset """kinships""" +379 69 model """kg2e""" +379 69 loss """crossentropy""" +379 69 regularizer """no""" +379 69 optimizer """adam""" +379 69 training_loop """lcwa""" +379 69 evaluator """rankbased""" +379 70 dataset """kinships""" +379 70 model """kg2e""" +379 70 loss """crossentropy""" +379 70 regularizer """no""" +379 70 optimizer """adam""" +379 70 training_loop """lcwa""" +379 70 evaluator """rankbased""" +379 71 dataset """kinships""" +379 71 model """kg2e""" +379 71 loss """crossentropy""" +379 71 regularizer """no""" +379 71 optimizer """adam""" +379 71 training_loop """lcwa""" +379 71 evaluator """rankbased""" +379 72 dataset """kinships""" +379 72 model """kg2e""" +379 72 loss """crossentropy""" +379 72 regularizer """no""" +379 72 optimizer """adam""" +379 72 training_loop """lcwa""" +379 72 evaluator """rankbased""" +379 73 dataset """kinships""" +379 73 model """kg2e""" +379 73 loss """crossentropy""" +379 73 regularizer """no""" +379 73 optimizer """adam""" +379 73 training_loop """lcwa""" +379 73 evaluator """rankbased""" +379 74 dataset """kinships""" +379 74 model """kg2e""" +379 74 loss """crossentropy""" +379 74 regularizer """no""" +379 74 optimizer """adam""" +379 74 training_loop """lcwa""" +379 74 evaluator """rankbased""" +379 75 dataset """kinships""" +379 75 model """kg2e""" +379 75 loss """crossentropy""" +379 75 regularizer """no""" +379 75 optimizer """adam""" +379 75 training_loop """lcwa""" +379 75 evaluator """rankbased""" +379 76 dataset """kinships""" +379 76 model """kg2e""" +379 76 loss """crossentropy""" +379 76 regularizer """no""" +379 76 optimizer """adam""" +379 76 training_loop """lcwa""" +379 76 evaluator """rankbased""" +379 77 dataset """kinships""" +379 77 model """kg2e""" +379 77 loss """crossentropy""" +379 77 regularizer """no""" +379 77 optimizer """adam""" +379 77 training_loop """lcwa""" +379 77 evaluator """rankbased""" +379 78 dataset """kinships""" +379 78 model """kg2e""" +379 78 loss """crossentropy""" +379 78 regularizer """no""" +379 78 optimizer """adam""" +379 78 training_loop """lcwa""" +379 78 evaluator """rankbased""" +379 79 dataset """kinships""" +379 79 model """kg2e""" +379 79 loss """crossentropy""" +379 79 regularizer """no""" +379 79 optimizer """adam""" +379 79 training_loop """lcwa""" +379 79 evaluator """rankbased""" +379 80 dataset """kinships""" +379 80 model """kg2e""" +379 80 loss """crossentropy""" +379 80 regularizer """no""" +379 80 optimizer """adam""" +379 80 training_loop """lcwa""" +379 80 evaluator """rankbased""" +379 81 dataset """kinships""" +379 81 model """kg2e""" +379 81 loss """crossentropy""" +379 81 regularizer """no""" +379 81 optimizer """adam""" +379 81 training_loop """lcwa""" +379 81 evaluator """rankbased""" +379 82 dataset """kinships""" +379 82 model """kg2e""" +379 82 loss """crossentropy""" +379 82 regularizer """no""" +379 82 optimizer """adam""" +379 82 training_loop """lcwa""" +379 82 evaluator """rankbased""" +379 83 dataset """kinships""" +379 83 model """kg2e""" +379 83 loss """crossentropy""" +379 83 regularizer """no""" +379 83 optimizer """adam""" +379 83 training_loop """lcwa""" +379 83 evaluator """rankbased""" +379 84 dataset """kinships""" +379 84 model """kg2e""" +379 84 loss """crossentropy""" +379 84 regularizer """no""" +379 84 optimizer """adam""" +379 84 training_loop """lcwa""" +379 84 evaluator """rankbased""" +379 85 dataset """kinships""" +379 85 model """kg2e""" +379 85 loss """crossentropy""" +379 85 regularizer """no""" +379 85 optimizer """adam""" +379 85 training_loop """lcwa""" +379 85 evaluator """rankbased""" +379 86 dataset """kinships""" +379 86 model """kg2e""" +379 86 loss """crossentropy""" +379 86 regularizer """no""" +379 86 optimizer """adam""" +379 86 training_loop """lcwa""" +379 86 evaluator """rankbased""" +379 87 dataset """kinships""" +379 87 model """kg2e""" +379 87 loss """crossentropy""" +379 87 regularizer """no""" +379 87 optimizer """adam""" +379 87 training_loop """lcwa""" +379 87 evaluator """rankbased""" +379 88 dataset """kinships""" +379 88 model """kg2e""" +379 88 loss """crossentropy""" +379 88 regularizer """no""" +379 88 optimizer """adam""" +379 88 training_loop """lcwa""" +379 88 evaluator """rankbased""" +379 89 dataset """kinships""" +379 89 model """kg2e""" +379 89 loss """crossentropy""" +379 89 regularizer """no""" +379 89 optimizer """adam""" +379 89 training_loop """lcwa""" +379 89 evaluator """rankbased""" +379 90 dataset """kinships""" +379 90 model """kg2e""" +379 90 loss """crossentropy""" +379 90 regularizer """no""" +379 90 optimizer """adam""" +379 90 training_loop """lcwa""" +379 90 evaluator """rankbased""" +379 91 dataset """kinships""" +379 91 model """kg2e""" +379 91 loss """crossentropy""" +379 91 regularizer """no""" +379 91 optimizer """adam""" +379 91 training_loop """lcwa""" +379 91 evaluator """rankbased""" +379 92 dataset """kinships""" +379 92 model """kg2e""" +379 92 loss """crossentropy""" +379 92 regularizer """no""" +379 92 optimizer """adam""" +379 92 training_loop """lcwa""" +379 92 evaluator """rankbased""" +379 93 dataset """kinships""" +379 93 model """kg2e""" +379 93 loss """crossentropy""" +379 93 regularizer """no""" +379 93 optimizer """adam""" +379 93 training_loop """lcwa""" +379 93 evaluator """rankbased""" +379 94 dataset """kinships""" +379 94 model """kg2e""" +379 94 loss """crossentropy""" +379 94 regularizer """no""" +379 94 optimizer """adam""" +379 94 training_loop """lcwa""" +379 94 evaluator """rankbased""" +379 95 dataset """kinships""" +379 95 model """kg2e""" +379 95 loss """crossentropy""" +379 95 regularizer """no""" +379 95 optimizer """adam""" +379 95 training_loop """lcwa""" +379 95 evaluator """rankbased""" +379 96 dataset """kinships""" +379 96 model """kg2e""" +379 96 loss """crossentropy""" +379 96 regularizer """no""" +379 96 optimizer """adam""" +379 96 training_loop """lcwa""" +379 96 evaluator """rankbased""" +379 97 dataset """kinships""" +379 97 model """kg2e""" +379 97 loss """crossentropy""" +379 97 regularizer """no""" +379 97 optimizer """adam""" +379 97 training_loop """lcwa""" +379 97 evaluator """rankbased""" +379 98 dataset """kinships""" +379 98 model """kg2e""" +379 98 loss """crossentropy""" +379 98 regularizer """no""" +379 98 optimizer """adam""" +379 98 training_loop """lcwa""" +379 98 evaluator """rankbased""" +379 99 dataset """kinships""" +379 99 model """kg2e""" +379 99 loss """crossentropy""" +379 99 regularizer """no""" +379 99 optimizer """adam""" +379 99 training_loop """lcwa""" +379 99 evaluator """rankbased""" +379 100 dataset """kinships""" +379 100 model """kg2e""" +379 100 loss """crossentropy""" +379 100 regularizer """no""" +379 100 optimizer """adam""" +379 100 training_loop """lcwa""" +379 100 evaluator """rankbased""" +380 1 model.embedding_dim 0.0 +380 1 model.c_min 0.02183184511892855 +380 1 model.c_max 3.8268997340139634 +380 1 optimizer.lr 0.0023657694817484 +380 1 training.batch_size 2.0 +380 1 training.label_smoothing 0.003499151513606791 +380 2 model.embedding_dim 1.0 +380 2 model.c_min 0.023937738818389873 +380 2 model.c_max 6.213444270926961 +380 2 optimizer.lr 0.027298634648878682 +380 2 training.batch_size 1.0 +380 2 training.label_smoothing 0.11830793167420375 +380 3 model.embedding_dim 0.0 +380 3 model.c_min 0.03607043709561846 +380 3 model.c_max 8.948878383176286 +380 3 optimizer.lr 0.0026731748445145576 +380 3 training.batch_size 1.0 +380 3 training.label_smoothing 0.006567796138801292 +380 4 model.embedding_dim 0.0 +380 4 model.c_min 0.04659174648587082 +380 4 model.c_max 4.263638164712628 +380 4 optimizer.lr 0.0013663872361361525 +380 4 training.batch_size 1.0 +380 4 training.label_smoothing 0.04895066245680027 +380 5 model.embedding_dim 2.0 +380 5 model.c_min 0.020475766452326297 +380 5 model.c_max 1.831931462643476 +380 5 optimizer.lr 0.023159655302292272 +380 5 training.batch_size 0.0 +380 5 training.label_smoothing 0.03703997642270045 +380 6 model.embedding_dim 1.0 +380 6 model.c_min 0.01357501473403585 +380 6 model.c_max 9.19470261696285 +380 6 optimizer.lr 0.007502613390741352 +380 6 training.batch_size 2.0 +380 6 training.label_smoothing 0.0011771581797586196 +380 7 model.embedding_dim 1.0 +380 7 model.c_min 0.015870267667296937 +380 7 model.c_max 6.417635545008718 +380 7 optimizer.lr 0.018554293368361837 +380 7 training.batch_size 1.0 +380 7 training.label_smoothing 0.07519145173632677 +380 8 model.embedding_dim 1.0 +380 8 model.c_min 0.022638090382063696 +380 8 model.c_max 1.254286756483446 +380 8 optimizer.lr 0.0012977645415830112 +380 8 training.batch_size 1.0 +380 8 training.label_smoothing 0.039116963853641405 +380 9 model.embedding_dim 2.0 +380 9 model.c_min 0.024659023062664598 +380 9 model.c_max 7.497176620165913 +380 9 optimizer.lr 0.07446851267485025 +380 9 training.batch_size 1.0 +380 9 training.label_smoothing 0.04327228800893464 +380 10 model.embedding_dim 1.0 +380 10 model.c_min 0.013096190450188467 +380 10 model.c_max 1.0989653568417719 +380 10 optimizer.lr 0.08931275471553189 +380 10 training.batch_size 2.0 +380 10 training.label_smoothing 0.02621459542088962 +380 11 model.embedding_dim 2.0 +380 11 model.c_min 0.056348058968725925 +380 11 model.c_max 8.457718842093806 +380 11 optimizer.lr 0.021789211324566807 +380 11 training.batch_size 2.0 +380 11 training.label_smoothing 0.0010428171763619833 +380 12 model.embedding_dim 2.0 +380 12 model.c_min 0.015150721399316893 +380 12 model.c_max 6.856311019551957 +380 12 optimizer.lr 0.006176643520779148 +380 12 training.batch_size 2.0 +380 12 training.label_smoothing 0.002944426463993451 +380 13 model.embedding_dim 0.0 +380 13 model.c_min 0.032360787359993703 +380 13 model.c_max 7.1753436066097755 +380 13 optimizer.lr 0.05931033719535482 +380 13 training.batch_size 0.0 +380 13 training.label_smoothing 0.001088372267604778 +380 14 model.embedding_dim 0.0 +380 14 model.c_min 0.07396237601516177 +380 14 model.c_max 3.2748525114429134 +380 14 optimizer.lr 0.0015190849621632278 +380 14 training.batch_size 2.0 +380 14 training.label_smoothing 0.09821982485729228 +380 15 model.embedding_dim 1.0 +380 15 model.c_min 0.07114126524364646 +380 15 model.c_max 7.695803954347739 +380 15 optimizer.lr 0.018885334785809532 +380 15 training.batch_size 2.0 +380 15 training.label_smoothing 0.0029263494181771916 +380 16 model.embedding_dim 0.0 +380 16 model.c_min 0.018608964560741383 +380 16 model.c_max 6.282512813068051 +380 16 optimizer.lr 0.06394157177432121 +380 16 training.batch_size 2.0 +380 16 training.label_smoothing 0.012777236211445768 +380 17 model.embedding_dim 0.0 +380 17 model.c_min 0.03483273618297121 +380 17 model.c_max 4.342210644367563 +380 17 optimizer.lr 0.004292282669540271 +380 17 training.batch_size 0.0 +380 17 training.label_smoothing 0.00170925521855539 +380 18 model.embedding_dim 0.0 +380 18 model.c_min 0.010276160371649428 +380 18 model.c_max 9.960673800825555 +380 18 optimizer.lr 0.03173406929140264 +380 18 training.batch_size 2.0 +380 18 training.label_smoothing 0.04201425680782908 +380 19 model.embedding_dim 2.0 +380 19 model.c_min 0.012114218769057118 +380 19 model.c_max 3.762346673609666 +380 19 optimizer.lr 0.013067879117263868 +380 19 training.batch_size 0.0 +380 19 training.label_smoothing 0.2203314843603975 +380 20 model.embedding_dim 0.0 +380 20 model.c_min 0.011348730992832385 +380 20 model.c_max 5.416815118933527 +380 20 optimizer.lr 0.016511185145950184 +380 20 training.batch_size 0.0 +380 20 training.label_smoothing 0.17574494934421556 +380 21 model.embedding_dim 0.0 +380 21 model.c_min 0.012213519184686078 +380 21 model.c_max 1.5850478780230266 +380 21 optimizer.lr 0.0028279083245205073 +380 21 training.batch_size 0.0 +380 21 training.label_smoothing 0.05884813319864322 +380 22 model.embedding_dim 2.0 +380 22 model.c_min 0.018503534651575972 +380 22 model.c_max 9.906169222475135 +380 22 optimizer.lr 0.04594103816878211 +380 22 training.batch_size 0.0 +380 22 training.label_smoothing 0.001339277671382948 +380 23 model.embedding_dim 0.0 +380 23 model.c_min 0.012195231934340347 +380 23 model.c_max 4.410259292211908 +380 23 optimizer.lr 0.001258071548153939 +380 23 training.batch_size 1.0 +380 23 training.label_smoothing 0.008726476261674285 +380 24 model.embedding_dim 0.0 +380 24 model.c_min 0.02358482427904224 +380 24 model.c_max 2.600586721572511 +380 24 optimizer.lr 0.010849174878041812 +380 24 training.batch_size 1.0 +380 24 training.label_smoothing 0.08648895110963305 +380 25 model.embedding_dim 2.0 +380 25 model.c_min 0.012043526812968717 +380 25 model.c_max 8.608467170445904 +380 25 optimizer.lr 0.04992377794038689 +380 25 training.batch_size 1.0 +380 25 training.label_smoothing 0.049429852105563034 +380 26 model.embedding_dim 1.0 +380 26 model.c_min 0.01701282574937401 +380 26 model.c_max 9.447690038329112 +380 26 optimizer.lr 0.07195090373340002 +380 26 training.batch_size 2.0 +380 26 training.label_smoothing 0.04653161788741474 +380 27 model.embedding_dim 1.0 +380 27 model.c_min 0.019272385383886034 +380 27 model.c_max 8.787143971481417 +380 27 optimizer.lr 0.07392563695815194 +380 27 training.batch_size 1.0 +380 27 training.label_smoothing 0.4397052007799383 +380 28 model.embedding_dim 2.0 +380 28 model.c_min 0.06453384358618275 +380 28 model.c_max 9.623670444937563 +380 28 optimizer.lr 0.002005532675951774 +380 28 training.batch_size 0.0 +380 28 training.label_smoothing 0.367123966301907 +380 29 model.embedding_dim 1.0 +380 29 model.c_min 0.017476588941747376 +380 29 model.c_max 2.998154907141075 +380 29 optimizer.lr 0.002490048760762051 +380 29 training.batch_size 2.0 +380 29 training.label_smoothing 0.010252597704657536 +380 30 model.embedding_dim 1.0 +380 30 model.c_min 0.020433244606175217 +380 30 model.c_max 1.4005734665174425 +380 30 optimizer.lr 0.015343758437998482 +380 30 training.batch_size 0.0 +380 30 training.label_smoothing 0.44774359409243014 +380 31 model.embedding_dim 1.0 +380 31 model.c_min 0.07853939870576239 +380 31 model.c_max 3.7163921207034827 +380 31 optimizer.lr 0.014291427765305084 +380 31 training.batch_size 2.0 +380 31 training.label_smoothing 0.026660873837816276 +380 32 model.embedding_dim 2.0 +380 32 model.c_min 0.023679266195077528 +380 32 model.c_max 6.607291288205228 +380 32 optimizer.lr 0.0023654170726068384 +380 32 training.batch_size 0.0 +380 32 training.label_smoothing 0.7063388487884406 +380 33 model.embedding_dim 0.0 +380 33 model.c_min 0.01323074100748206 +380 33 model.c_max 1.2302449562782543 +380 33 optimizer.lr 0.009839581074802378 +380 33 training.batch_size 2.0 +380 33 training.label_smoothing 0.25458334574029867 +380 34 model.embedding_dim 1.0 +380 34 model.c_min 0.0133830411371051 +380 34 model.c_max 9.8869683731554 +380 34 optimizer.lr 0.00391653242468634 +380 34 training.batch_size 0.0 +380 34 training.label_smoothing 0.006726887533763125 +380 35 model.embedding_dim 2.0 +380 35 model.c_min 0.010121664857667412 +380 35 model.c_max 2.047895074025941 +380 35 optimizer.lr 0.08473131284411758 +380 35 training.batch_size 1.0 +380 35 training.label_smoothing 0.33835878251255813 +380 36 model.embedding_dim 2.0 +380 36 model.c_min 0.01163316990705707 +380 36 model.c_max 5.306383329226937 +380 36 optimizer.lr 0.007435631717582881 +380 36 training.batch_size 0.0 +380 36 training.label_smoothing 0.03018977602735732 +380 37 model.embedding_dim 0.0 +380 37 model.c_min 0.030693166425350794 +380 37 model.c_max 8.988423509798341 +380 37 optimizer.lr 0.0027533687137364362 +380 37 training.batch_size 1.0 +380 37 training.label_smoothing 0.004255885679575649 +380 38 model.embedding_dim 1.0 +380 38 model.c_min 0.015664861699742644 +380 38 model.c_max 1.1616162491594384 +380 38 optimizer.lr 0.034810951170121514 +380 38 training.batch_size 2.0 +380 38 training.label_smoothing 0.0016512809074255474 +380 39 model.embedding_dim 0.0 +380 39 model.c_min 0.02255669866095595 +380 39 model.c_max 9.069614544337483 +380 39 optimizer.lr 0.022027982228115046 +380 39 training.batch_size 1.0 +380 39 training.label_smoothing 0.4602980928849539 +380 40 model.embedding_dim 1.0 +380 40 model.c_min 0.020218490713849933 +380 40 model.c_max 2.2185603802349068 +380 40 optimizer.lr 0.01567176604688193 +380 40 training.batch_size 0.0 +380 40 training.label_smoothing 0.012451003773602937 +380 41 model.embedding_dim 2.0 +380 41 model.c_min 0.03766906290614182 +380 41 model.c_max 4.177005699086489 +380 41 optimizer.lr 0.0015473897249452192 +380 41 training.batch_size 1.0 +380 41 training.label_smoothing 0.21237669482114274 +380 42 model.embedding_dim 2.0 +380 42 model.c_min 0.012046265072294608 +380 42 model.c_max 6.813479138110266 +380 42 optimizer.lr 0.005356102599490892 +380 42 training.batch_size 2.0 +380 42 training.label_smoothing 0.29949661022874 +380 43 model.embedding_dim 0.0 +380 43 model.c_min 0.01042167447153754 +380 43 model.c_max 4.274597794544507 +380 43 optimizer.lr 0.0019488934066858425 +380 43 training.batch_size 1.0 +380 43 training.label_smoothing 0.01486334416398056 +380 44 model.embedding_dim 2.0 +380 44 model.c_min 0.015208730747246517 +380 44 model.c_max 1.9795326008253193 +380 44 optimizer.lr 0.04434193602820593 +380 44 training.batch_size 2.0 +380 44 training.label_smoothing 0.17322629798930247 +380 45 model.embedding_dim 0.0 +380 45 model.c_min 0.0473929603271929 +380 45 model.c_max 4.398420046159169 +380 45 optimizer.lr 0.06096927682213461 +380 45 training.batch_size 1.0 +380 45 training.label_smoothing 0.7188201395406412 +380 46 model.embedding_dim 2.0 +380 46 model.c_min 0.056497753962295366 +380 46 model.c_max 8.963090631827399 +380 46 optimizer.lr 0.06976936236051685 +380 46 training.batch_size 0.0 +380 46 training.label_smoothing 0.16024348326575333 +380 47 model.embedding_dim 0.0 +380 47 model.c_min 0.04475094619764137 +380 47 model.c_max 1.4329743770560994 +380 47 optimizer.lr 0.002581621793469079 +380 47 training.batch_size 2.0 +380 47 training.label_smoothing 0.00457603171964438 +380 48 model.embedding_dim 0.0 +380 48 model.c_min 0.05072106640527108 +380 48 model.c_max 2.045128980931151 +380 48 optimizer.lr 0.015037176226476103 +380 48 training.batch_size 1.0 +380 48 training.label_smoothing 0.09980084686574381 +380 49 model.embedding_dim 0.0 +380 49 model.c_min 0.049206683065122825 +380 49 model.c_max 1.150649010966084 +380 49 optimizer.lr 0.025163763147954203 +380 49 training.batch_size 0.0 +380 49 training.label_smoothing 0.01816726292387328 +380 50 model.embedding_dim 1.0 +380 50 model.c_min 0.014251372906729077 +380 50 model.c_max 2.4372994510821053 +380 50 optimizer.lr 0.002105561974549307 +380 50 training.batch_size 1.0 +380 50 training.label_smoothing 0.018013179728398828 +380 51 model.embedding_dim 1.0 +380 51 model.c_min 0.03759402552021357 +380 51 model.c_max 1.8423565305381011 +380 51 optimizer.lr 0.04108897404689896 +380 51 training.batch_size 2.0 +380 51 training.label_smoothing 0.027596153382164156 +380 52 model.embedding_dim 1.0 +380 52 model.c_min 0.018769736531670588 +380 52 model.c_max 3.8176078125226653 +380 52 optimizer.lr 0.0018936732090128977 +380 52 training.batch_size 1.0 +380 52 training.label_smoothing 0.0015724338764700628 +380 53 model.embedding_dim 1.0 +380 53 model.c_min 0.014835321444257046 +380 53 model.c_max 7.215060142776122 +380 53 optimizer.lr 0.0016989077026365639 +380 53 training.batch_size 2.0 +380 53 training.label_smoothing 0.005689697504872042 +380 54 model.embedding_dim 0.0 +380 54 model.c_min 0.026446715138882353 +380 54 model.c_max 8.554568557117971 +380 54 optimizer.lr 0.03910753848316484 +380 54 training.batch_size 0.0 +380 54 training.label_smoothing 0.22960177987943495 +380 55 model.embedding_dim 1.0 +380 55 model.c_min 0.06249327185492781 +380 55 model.c_max 8.671075849699683 +380 55 optimizer.lr 0.008344123779225714 +380 55 training.batch_size 1.0 +380 55 training.label_smoothing 0.002845796425726643 +380 56 model.embedding_dim 2.0 +380 56 model.c_min 0.032334686369958884 +380 56 model.c_max 3.651938539082865 +380 56 optimizer.lr 0.016814606927277386 +380 56 training.batch_size 0.0 +380 56 training.label_smoothing 0.007324449929381887 +380 57 model.embedding_dim 0.0 +380 57 model.c_min 0.01011914909558334 +380 57 model.c_max 9.732070751033374 +380 57 optimizer.lr 0.0025877153650102768 +380 57 training.batch_size 1.0 +380 57 training.label_smoothing 0.0024820427727202487 +380 58 model.embedding_dim 0.0 +380 58 model.c_min 0.025327125357402093 +380 58 model.c_max 8.273866471140003 +380 58 optimizer.lr 0.06960112767437569 +380 58 training.batch_size 2.0 +380 58 training.label_smoothing 0.013719857156156834 +380 59 model.embedding_dim 2.0 +380 59 model.c_min 0.010795049331201862 +380 59 model.c_max 4.729251051780294 +380 59 optimizer.lr 0.013394837047768109 +380 59 training.batch_size 1.0 +380 59 training.label_smoothing 0.21620358936230683 +380 60 model.embedding_dim 1.0 +380 60 model.c_min 0.045891635665230024 +380 60 model.c_max 3.9183814522945406 +380 60 optimizer.lr 0.008923222155907413 +380 60 training.batch_size 1.0 +380 60 training.label_smoothing 0.08666286037798313 +380 61 model.embedding_dim 0.0 +380 61 model.c_min 0.03686919746123623 +380 61 model.c_max 6.015340649341908 +380 61 optimizer.lr 0.004787193373048117 +380 61 training.batch_size 1.0 +380 61 training.label_smoothing 0.15928641169242183 +380 62 model.embedding_dim 0.0 +380 62 model.c_min 0.025123404305236217 +380 62 model.c_max 7.022176524338732 +380 62 optimizer.lr 0.01234304053574208 +380 62 training.batch_size 2.0 +380 62 training.label_smoothing 0.0010403858494802622 +380 63 model.embedding_dim 0.0 +380 63 model.c_min 0.0671670705653791 +380 63 model.c_max 2.8375978955318413 +380 63 optimizer.lr 0.05316985567042781 +380 63 training.batch_size 2.0 +380 63 training.label_smoothing 0.7264699804744766 +380 64 model.embedding_dim 2.0 +380 64 model.c_min 0.03383128995184923 +380 64 model.c_max 9.996965693722942 +380 64 optimizer.lr 0.09414381629823447 +380 64 training.batch_size 1.0 +380 64 training.label_smoothing 0.05654726171341245 +380 65 model.embedding_dim 2.0 +380 65 model.c_min 0.0690235368279575 +380 65 model.c_max 9.983248052178272 +380 65 optimizer.lr 0.011152227549479518 +380 65 training.batch_size 0.0 +380 65 training.label_smoothing 0.08835760560374856 +380 66 model.embedding_dim 2.0 +380 66 model.c_min 0.01155600694875245 +380 66 model.c_max 2.404781450367705 +380 66 optimizer.lr 0.0010239092385951637 +380 66 training.batch_size 0.0 +380 66 training.label_smoothing 0.2515816008427633 +380 67 model.embedding_dim 2.0 +380 67 model.c_min 0.012807923676346552 +380 67 model.c_max 6.567479961397965 +380 67 optimizer.lr 0.004816577270752406 +380 67 training.batch_size 2.0 +380 67 training.label_smoothing 0.12229934142286977 +380 68 model.embedding_dim 2.0 +380 68 model.c_min 0.013994678948376608 +380 68 model.c_max 3.9608128423458533 +380 68 optimizer.lr 0.0017485364488648782 +380 68 training.batch_size 0.0 +380 68 training.label_smoothing 0.01706071647848452 +380 69 model.embedding_dim 2.0 +380 69 model.c_min 0.05131752102433944 +380 69 model.c_max 5.335040831827688 +380 69 optimizer.lr 0.005661260956401845 +380 69 training.batch_size 2.0 +380 69 training.label_smoothing 0.002964880195429419 +380 70 model.embedding_dim 1.0 +380 70 model.c_min 0.027686592185608937 +380 70 model.c_max 1.209313768935492 +380 70 optimizer.lr 0.008088488100646473 +380 70 training.batch_size 0.0 +380 70 training.label_smoothing 0.0030593888634080236 +380 71 model.embedding_dim 0.0 +380 71 model.c_min 0.06347309425333186 +380 71 model.c_max 7.764910743741216 +380 71 optimizer.lr 0.09362983674610177 +380 71 training.batch_size 2.0 +380 71 training.label_smoothing 0.08260995592130142 +380 72 model.embedding_dim 1.0 +380 72 model.c_min 0.012939841312326445 +380 72 model.c_max 9.706359138797557 +380 72 optimizer.lr 0.002860340452305586 +380 72 training.batch_size 1.0 +380 72 training.label_smoothing 0.6795817678584676 +380 73 model.embedding_dim 2.0 +380 73 model.c_min 0.02115237933627544 +380 73 model.c_max 7.009344049539937 +380 73 optimizer.lr 0.017571802371210258 +380 73 training.batch_size 1.0 +380 73 training.label_smoothing 0.06571990507834087 +380 74 model.embedding_dim 2.0 +380 74 model.c_min 0.01155227031821275 +380 74 model.c_max 9.996076553473333 +380 74 optimizer.lr 0.07332871868874428 +380 74 training.batch_size 1.0 +380 74 training.label_smoothing 0.025289070103386838 +380 75 model.embedding_dim 1.0 +380 75 model.c_min 0.02872967000920585 +380 75 model.c_max 2.1424684378593595 +380 75 optimizer.lr 0.001242461178034848 +380 75 training.batch_size 2.0 +380 75 training.label_smoothing 0.2742616509542931 +380 76 model.embedding_dim 2.0 +380 76 model.c_min 0.061965542546073456 +380 76 model.c_max 6.8973306999232635 +380 76 optimizer.lr 0.02706966244656906 +380 76 training.batch_size 1.0 +380 76 training.label_smoothing 0.046431892685568965 +380 77 model.embedding_dim 0.0 +380 77 model.c_min 0.017089395070424437 +380 77 model.c_max 2.7158127714790856 +380 77 optimizer.lr 0.006128013138573376 +380 77 training.batch_size 2.0 +380 77 training.label_smoothing 0.06766174856326707 +380 78 model.embedding_dim 2.0 +380 78 model.c_min 0.03058368877486605 +380 78 model.c_max 1.8104871834881962 +380 78 optimizer.lr 0.011033570395218822 +380 78 training.batch_size 1.0 +380 78 training.label_smoothing 0.0038283235318572195 +380 79 model.embedding_dim 2.0 +380 79 model.c_min 0.0168088052530703 +380 79 model.c_max 6.182146609547821 +380 79 optimizer.lr 0.014415762717695657 +380 79 training.batch_size 0.0 +380 79 training.label_smoothing 0.05916035903397868 +380 80 model.embedding_dim 1.0 +380 80 model.c_min 0.0646989289386818 +380 80 model.c_max 5.011393335167022 +380 80 optimizer.lr 0.0023019221896491407 +380 80 training.batch_size 2.0 +380 80 training.label_smoothing 0.001047387819188667 +380 81 model.embedding_dim 1.0 +380 81 model.c_min 0.010594444595916575 +380 81 model.c_max 2.2128890590264843 +380 81 optimizer.lr 0.0011559554923136481 +380 81 training.batch_size 1.0 +380 81 training.label_smoothing 0.026555725672987248 +380 82 model.embedding_dim 1.0 +380 82 model.c_min 0.03249844812904343 +380 82 model.c_max 9.279909918889729 +380 82 optimizer.lr 0.01729000466754248 +380 82 training.batch_size 1.0 +380 82 training.label_smoothing 0.02760296644504577 +380 83 model.embedding_dim 2.0 +380 83 model.c_min 0.06755251427051069 +380 83 model.c_max 8.160533735778596 +380 83 optimizer.lr 0.0931886719780769 +380 83 training.batch_size 1.0 +380 83 training.label_smoothing 0.043238775994347854 +380 84 model.embedding_dim 0.0 +380 84 model.c_min 0.011179536253153412 +380 84 model.c_max 7.675812832838 +380 84 optimizer.lr 0.004192184119500288 +380 84 training.batch_size 1.0 +380 84 training.label_smoothing 0.07493486308571613 +380 85 model.embedding_dim 2.0 +380 85 model.c_min 0.010271274856281855 +380 85 model.c_max 7.7258180480549905 +380 85 optimizer.lr 0.005198418158529726 +380 85 training.batch_size 1.0 +380 85 training.label_smoothing 0.015372874370236031 +380 86 model.embedding_dim 0.0 +380 86 model.c_min 0.06591740418475323 +380 86 model.c_max 7.28138600248298 +380 86 optimizer.lr 0.014033715171211427 +380 86 training.batch_size 2.0 +380 86 training.label_smoothing 0.48330723606263015 +380 87 model.embedding_dim 1.0 +380 87 model.c_min 0.01686915369355204 +380 87 model.c_max 6.85453205578465 +380 87 optimizer.lr 0.016766364403558817 +380 87 training.batch_size 1.0 +380 87 training.label_smoothing 0.01934240468947495 +380 88 model.embedding_dim 0.0 +380 88 model.c_min 0.01859899901647398 +380 88 model.c_max 5.464948340730462 +380 88 optimizer.lr 0.042153646781034726 +380 88 training.batch_size 0.0 +380 88 training.label_smoothing 0.0022082419924167153 +380 89 model.embedding_dim 1.0 +380 89 model.c_min 0.035798776874773466 +380 89 model.c_max 6.530410845924102 +380 89 optimizer.lr 0.005329511038979187 +380 89 training.batch_size 2.0 +380 89 training.label_smoothing 0.0024320085291324144 +380 90 model.embedding_dim 2.0 +380 90 model.c_min 0.025863004404441427 +380 90 model.c_max 2.87742667532199 +380 90 optimizer.lr 0.0012073861913181703 +380 90 training.batch_size 0.0 +380 90 training.label_smoothing 0.028651095758320814 +380 91 model.embedding_dim 1.0 +380 91 model.c_min 0.035675156754448094 +380 91 model.c_max 2.8340638633117794 +380 91 optimizer.lr 0.011724792342385899 +380 91 training.batch_size 0.0 +380 91 training.label_smoothing 0.04368844247963826 +380 92 model.embedding_dim 2.0 +380 92 model.c_min 0.03669726437289858 +380 92 model.c_max 8.183509024500498 +380 92 optimizer.lr 0.0059904657761723585 +380 92 training.batch_size 0.0 +380 92 training.label_smoothing 0.468546563890934 +380 93 model.embedding_dim 1.0 +380 93 model.c_min 0.013149642733467526 +380 93 model.c_max 7.901638574492452 +380 93 optimizer.lr 0.06815413261469386 +380 93 training.batch_size 1.0 +380 93 training.label_smoothing 0.6934476072146116 +380 94 model.embedding_dim 0.0 +380 94 model.c_min 0.053144795171073005 +380 94 model.c_max 9.364158798174063 +380 94 optimizer.lr 0.001655265741861096 +380 94 training.batch_size 1.0 +380 94 training.label_smoothing 0.28054359686921315 +380 95 model.embedding_dim 0.0 +380 95 model.c_min 0.020435017805320237 +380 95 model.c_max 3.3474062031102623 +380 95 optimizer.lr 0.0014830448070898233 +380 95 training.batch_size 1.0 +380 95 training.label_smoothing 0.0024570331435753556 +380 96 model.embedding_dim 2.0 +380 96 model.c_min 0.051372907226138985 +380 96 model.c_max 3.159825398880875 +380 96 optimizer.lr 0.0035533959357720688 +380 96 training.batch_size 1.0 +380 96 training.label_smoothing 0.07866380545744825 +380 97 model.embedding_dim 2.0 +380 97 model.c_min 0.0799191187077259 +380 97 model.c_max 4.460381470480437 +380 97 optimizer.lr 0.002224073385635973 +380 97 training.batch_size 0.0 +380 97 training.label_smoothing 0.19740633715649689 +380 98 model.embedding_dim 0.0 +380 98 model.c_min 0.09290513175518918 +380 98 model.c_max 5.271740599605639 +380 98 optimizer.lr 0.08830466969799973 +380 98 training.batch_size 0.0 +380 98 training.label_smoothing 0.0011971173518751047 +380 99 model.embedding_dim 1.0 +380 99 model.c_min 0.022941025341030513 +380 99 model.c_max 3.003210929066207 +380 99 optimizer.lr 0.0024754623920238794 +380 99 training.batch_size 1.0 +380 99 training.label_smoothing 0.006803745264531067 +380 100 model.embedding_dim 0.0 +380 100 model.c_min 0.034605116224571325 +380 100 model.c_max 8.65738855695694 +380 100 optimizer.lr 0.0012049953152593173 +380 100 training.batch_size 2.0 +380 100 training.label_smoothing 0.14991857832314204 +380 1 dataset """kinships""" +380 1 model """kg2e""" +380 1 loss """crossentropy""" +380 1 regularizer """no""" +380 1 optimizer """adam""" +380 1 training_loop """lcwa""" +380 1 evaluator """rankbased""" +380 2 dataset """kinships""" +380 2 model """kg2e""" +380 2 loss """crossentropy""" +380 2 regularizer """no""" +380 2 optimizer """adam""" +380 2 training_loop """lcwa""" +380 2 evaluator """rankbased""" +380 3 dataset """kinships""" +380 3 model """kg2e""" +380 3 loss """crossentropy""" +380 3 regularizer """no""" +380 3 optimizer """adam""" +380 3 training_loop """lcwa""" +380 3 evaluator """rankbased""" +380 4 dataset """kinships""" +380 4 model """kg2e""" +380 4 loss """crossentropy""" +380 4 regularizer """no""" +380 4 optimizer """adam""" +380 4 training_loop """lcwa""" +380 4 evaluator """rankbased""" +380 5 dataset """kinships""" +380 5 model """kg2e""" +380 5 loss """crossentropy""" +380 5 regularizer """no""" +380 5 optimizer """adam""" +380 5 training_loop """lcwa""" +380 5 evaluator """rankbased""" +380 6 dataset """kinships""" +380 6 model """kg2e""" +380 6 loss """crossentropy""" +380 6 regularizer """no""" +380 6 optimizer """adam""" +380 6 training_loop """lcwa""" +380 6 evaluator """rankbased""" +380 7 dataset """kinships""" +380 7 model """kg2e""" +380 7 loss """crossentropy""" +380 7 regularizer """no""" +380 7 optimizer """adam""" +380 7 training_loop """lcwa""" +380 7 evaluator """rankbased""" +380 8 dataset """kinships""" +380 8 model """kg2e""" +380 8 loss """crossentropy""" +380 8 regularizer """no""" +380 8 optimizer """adam""" +380 8 training_loop """lcwa""" +380 8 evaluator """rankbased""" +380 9 dataset """kinships""" +380 9 model """kg2e""" +380 9 loss """crossentropy""" +380 9 regularizer """no""" +380 9 optimizer """adam""" +380 9 training_loop """lcwa""" +380 9 evaluator """rankbased""" +380 10 dataset """kinships""" +380 10 model """kg2e""" +380 10 loss """crossentropy""" +380 10 regularizer """no""" +380 10 optimizer """adam""" +380 10 training_loop """lcwa""" +380 10 evaluator """rankbased""" +380 11 dataset """kinships""" +380 11 model """kg2e""" +380 11 loss """crossentropy""" +380 11 regularizer """no""" +380 11 optimizer """adam""" +380 11 training_loop """lcwa""" +380 11 evaluator """rankbased""" +380 12 dataset """kinships""" +380 12 model """kg2e""" +380 12 loss """crossentropy""" +380 12 regularizer """no""" +380 12 optimizer """adam""" +380 12 training_loop """lcwa""" +380 12 evaluator """rankbased""" +380 13 dataset """kinships""" +380 13 model """kg2e""" +380 13 loss """crossentropy""" +380 13 regularizer """no""" +380 13 optimizer """adam""" +380 13 training_loop """lcwa""" +380 13 evaluator """rankbased""" +380 14 dataset """kinships""" +380 14 model """kg2e""" +380 14 loss """crossentropy""" +380 14 regularizer """no""" +380 14 optimizer """adam""" +380 14 training_loop """lcwa""" +380 14 evaluator """rankbased""" +380 15 dataset """kinships""" +380 15 model """kg2e""" +380 15 loss """crossentropy""" +380 15 regularizer """no""" +380 15 optimizer """adam""" +380 15 training_loop """lcwa""" +380 15 evaluator """rankbased""" +380 16 dataset """kinships""" +380 16 model """kg2e""" +380 16 loss """crossentropy""" +380 16 regularizer """no""" +380 16 optimizer """adam""" +380 16 training_loop """lcwa""" +380 16 evaluator """rankbased""" +380 17 dataset """kinships""" +380 17 model """kg2e""" +380 17 loss """crossentropy""" +380 17 regularizer """no""" +380 17 optimizer """adam""" +380 17 training_loop """lcwa""" +380 17 evaluator """rankbased""" +380 18 dataset """kinships""" +380 18 model """kg2e""" +380 18 loss """crossentropy""" +380 18 regularizer """no""" +380 18 optimizer """adam""" +380 18 training_loop """lcwa""" +380 18 evaluator """rankbased""" +380 19 dataset """kinships""" +380 19 model """kg2e""" +380 19 loss """crossentropy""" +380 19 regularizer """no""" +380 19 optimizer """adam""" +380 19 training_loop """lcwa""" +380 19 evaluator """rankbased""" +380 20 dataset """kinships""" +380 20 model """kg2e""" +380 20 loss """crossentropy""" +380 20 regularizer """no""" +380 20 optimizer """adam""" +380 20 training_loop """lcwa""" +380 20 evaluator """rankbased""" +380 21 dataset """kinships""" +380 21 model """kg2e""" +380 21 loss """crossentropy""" +380 21 regularizer """no""" +380 21 optimizer """adam""" +380 21 training_loop """lcwa""" +380 21 evaluator """rankbased""" +380 22 dataset """kinships""" +380 22 model """kg2e""" +380 22 loss """crossentropy""" +380 22 regularizer """no""" +380 22 optimizer """adam""" +380 22 training_loop """lcwa""" +380 22 evaluator """rankbased""" +380 23 dataset """kinships""" +380 23 model """kg2e""" +380 23 loss """crossentropy""" +380 23 regularizer """no""" +380 23 optimizer """adam""" +380 23 training_loop """lcwa""" +380 23 evaluator """rankbased""" +380 24 dataset """kinships""" +380 24 model """kg2e""" +380 24 loss """crossentropy""" +380 24 regularizer """no""" +380 24 optimizer """adam""" +380 24 training_loop """lcwa""" +380 24 evaluator """rankbased""" +380 25 dataset """kinships""" +380 25 model """kg2e""" +380 25 loss """crossentropy""" +380 25 regularizer """no""" +380 25 optimizer """adam""" +380 25 training_loop """lcwa""" +380 25 evaluator """rankbased""" +380 26 dataset """kinships""" +380 26 model """kg2e""" +380 26 loss """crossentropy""" +380 26 regularizer """no""" +380 26 optimizer """adam""" +380 26 training_loop """lcwa""" +380 26 evaluator """rankbased""" +380 27 dataset """kinships""" +380 27 model """kg2e""" +380 27 loss """crossentropy""" +380 27 regularizer """no""" +380 27 optimizer """adam""" +380 27 training_loop """lcwa""" +380 27 evaluator """rankbased""" +380 28 dataset """kinships""" +380 28 model """kg2e""" +380 28 loss """crossentropy""" +380 28 regularizer """no""" +380 28 optimizer """adam""" +380 28 training_loop """lcwa""" +380 28 evaluator """rankbased""" +380 29 dataset """kinships""" +380 29 model """kg2e""" +380 29 loss """crossentropy""" +380 29 regularizer """no""" +380 29 optimizer """adam""" +380 29 training_loop """lcwa""" +380 29 evaluator """rankbased""" +380 30 dataset """kinships""" +380 30 model """kg2e""" +380 30 loss """crossentropy""" +380 30 regularizer """no""" +380 30 optimizer """adam""" +380 30 training_loop """lcwa""" +380 30 evaluator """rankbased""" +380 31 dataset """kinships""" +380 31 model """kg2e""" +380 31 loss """crossentropy""" +380 31 regularizer """no""" +380 31 optimizer """adam""" +380 31 training_loop """lcwa""" +380 31 evaluator """rankbased""" +380 32 dataset """kinships""" +380 32 model """kg2e""" +380 32 loss """crossentropy""" +380 32 regularizer """no""" +380 32 optimizer """adam""" +380 32 training_loop """lcwa""" +380 32 evaluator """rankbased""" +380 33 dataset """kinships""" +380 33 model """kg2e""" +380 33 loss """crossentropy""" +380 33 regularizer """no""" +380 33 optimizer """adam""" +380 33 training_loop """lcwa""" +380 33 evaluator """rankbased""" +380 34 dataset """kinships""" +380 34 model """kg2e""" +380 34 loss """crossentropy""" +380 34 regularizer """no""" +380 34 optimizer """adam""" +380 34 training_loop """lcwa""" +380 34 evaluator """rankbased""" +380 35 dataset """kinships""" +380 35 model """kg2e""" +380 35 loss """crossentropy""" +380 35 regularizer """no""" +380 35 optimizer """adam""" +380 35 training_loop """lcwa""" +380 35 evaluator """rankbased""" +380 36 dataset """kinships""" +380 36 model """kg2e""" +380 36 loss """crossentropy""" +380 36 regularizer """no""" +380 36 optimizer """adam""" +380 36 training_loop """lcwa""" +380 36 evaluator """rankbased""" +380 37 dataset """kinships""" +380 37 model """kg2e""" +380 37 loss """crossentropy""" +380 37 regularizer """no""" +380 37 optimizer """adam""" +380 37 training_loop """lcwa""" +380 37 evaluator """rankbased""" +380 38 dataset """kinships""" +380 38 model """kg2e""" +380 38 loss """crossentropy""" +380 38 regularizer """no""" +380 38 optimizer """adam""" +380 38 training_loop """lcwa""" +380 38 evaluator """rankbased""" +380 39 dataset """kinships""" +380 39 model """kg2e""" +380 39 loss """crossentropy""" +380 39 regularizer """no""" +380 39 optimizer """adam""" +380 39 training_loop """lcwa""" +380 39 evaluator """rankbased""" +380 40 dataset """kinships""" +380 40 model """kg2e""" +380 40 loss """crossentropy""" +380 40 regularizer """no""" +380 40 optimizer """adam""" +380 40 training_loop """lcwa""" +380 40 evaluator """rankbased""" +380 41 dataset """kinships""" +380 41 model """kg2e""" +380 41 loss """crossentropy""" +380 41 regularizer """no""" +380 41 optimizer """adam""" +380 41 training_loop """lcwa""" +380 41 evaluator """rankbased""" +380 42 dataset """kinships""" +380 42 model """kg2e""" +380 42 loss """crossentropy""" +380 42 regularizer """no""" +380 42 optimizer """adam""" +380 42 training_loop """lcwa""" +380 42 evaluator """rankbased""" +380 43 dataset """kinships""" +380 43 model """kg2e""" +380 43 loss """crossentropy""" +380 43 regularizer """no""" +380 43 optimizer """adam""" +380 43 training_loop """lcwa""" +380 43 evaluator """rankbased""" +380 44 dataset """kinships""" +380 44 model """kg2e""" +380 44 loss """crossentropy""" +380 44 regularizer """no""" +380 44 optimizer """adam""" +380 44 training_loop """lcwa""" +380 44 evaluator """rankbased""" +380 45 dataset """kinships""" +380 45 model """kg2e""" +380 45 loss """crossentropy""" +380 45 regularizer """no""" +380 45 optimizer """adam""" +380 45 training_loop """lcwa""" +380 45 evaluator """rankbased""" +380 46 dataset """kinships""" +380 46 model """kg2e""" +380 46 loss """crossentropy""" +380 46 regularizer """no""" +380 46 optimizer """adam""" +380 46 training_loop """lcwa""" +380 46 evaluator """rankbased""" +380 47 dataset """kinships""" +380 47 model """kg2e""" +380 47 loss """crossentropy""" +380 47 regularizer """no""" +380 47 optimizer """adam""" +380 47 training_loop """lcwa""" +380 47 evaluator """rankbased""" +380 48 dataset """kinships""" +380 48 model """kg2e""" +380 48 loss """crossentropy""" +380 48 regularizer """no""" +380 48 optimizer """adam""" +380 48 training_loop """lcwa""" +380 48 evaluator """rankbased""" +380 49 dataset """kinships""" +380 49 model """kg2e""" +380 49 loss """crossentropy""" +380 49 regularizer """no""" +380 49 optimizer """adam""" +380 49 training_loop """lcwa""" +380 49 evaluator """rankbased""" +380 50 dataset """kinships""" +380 50 model """kg2e""" +380 50 loss """crossentropy""" +380 50 regularizer """no""" +380 50 optimizer """adam""" +380 50 training_loop """lcwa""" +380 50 evaluator """rankbased""" +380 51 dataset """kinships""" +380 51 model """kg2e""" +380 51 loss """crossentropy""" +380 51 regularizer """no""" +380 51 optimizer """adam""" +380 51 training_loop """lcwa""" +380 51 evaluator """rankbased""" +380 52 dataset """kinships""" +380 52 model """kg2e""" +380 52 loss """crossentropy""" +380 52 regularizer """no""" +380 52 optimizer """adam""" +380 52 training_loop """lcwa""" +380 52 evaluator """rankbased""" +380 53 dataset """kinships""" +380 53 model """kg2e""" +380 53 loss """crossentropy""" +380 53 regularizer """no""" +380 53 optimizer """adam""" +380 53 training_loop """lcwa""" +380 53 evaluator """rankbased""" +380 54 dataset """kinships""" +380 54 model """kg2e""" +380 54 loss """crossentropy""" +380 54 regularizer """no""" +380 54 optimizer """adam""" +380 54 training_loop """lcwa""" +380 54 evaluator """rankbased""" +380 55 dataset """kinships""" +380 55 model """kg2e""" +380 55 loss """crossentropy""" +380 55 regularizer """no""" +380 55 optimizer """adam""" +380 55 training_loop """lcwa""" +380 55 evaluator """rankbased""" +380 56 dataset """kinships""" +380 56 model """kg2e""" +380 56 loss """crossentropy""" +380 56 regularizer """no""" +380 56 optimizer """adam""" +380 56 training_loop """lcwa""" +380 56 evaluator """rankbased""" +380 57 dataset """kinships""" +380 57 model """kg2e""" +380 57 loss """crossentropy""" +380 57 regularizer """no""" +380 57 optimizer """adam""" +380 57 training_loop """lcwa""" +380 57 evaluator """rankbased""" +380 58 dataset """kinships""" +380 58 model """kg2e""" +380 58 loss """crossentropy""" +380 58 regularizer """no""" +380 58 optimizer """adam""" +380 58 training_loop """lcwa""" +380 58 evaluator """rankbased""" +380 59 dataset """kinships""" +380 59 model """kg2e""" +380 59 loss """crossentropy""" +380 59 regularizer """no""" +380 59 optimizer """adam""" +380 59 training_loop """lcwa""" +380 59 evaluator """rankbased""" +380 60 dataset """kinships""" +380 60 model """kg2e""" +380 60 loss """crossentropy""" +380 60 regularizer """no""" +380 60 optimizer """adam""" +380 60 training_loop """lcwa""" +380 60 evaluator """rankbased""" +380 61 dataset """kinships""" +380 61 model """kg2e""" +380 61 loss """crossentropy""" +380 61 regularizer """no""" +380 61 optimizer """adam""" +380 61 training_loop """lcwa""" +380 61 evaluator """rankbased""" +380 62 dataset """kinships""" +380 62 model """kg2e""" +380 62 loss """crossentropy""" +380 62 regularizer """no""" +380 62 optimizer """adam""" +380 62 training_loop """lcwa""" +380 62 evaluator """rankbased""" +380 63 dataset """kinships""" +380 63 model """kg2e""" +380 63 loss """crossentropy""" +380 63 regularizer """no""" +380 63 optimizer """adam""" +380 63 training_loop """lcwa""" +380 63 evaluator """rankbased""" +380 64 dataset """kinships""" +380 64 model """kg2e""" +380 64 loss """crossentropy""" +380 64 regularizer """no""" +380 64 optimizer """adam""" +380 64 training_loop """lcwa""" +380 64 evaluator """rankbased""" +380 65 dataset """kinships""" +380 65 model """kg2e""" +380 65 loss """crossentropy""" +380 65 regularizer """no""" +380 65 optimizer """adam""" +380 65 training_loop """lcwa""" +380 65 evaluator """rankbased""" +380 66 dataset """kinships""" +380 66 model """kg2e""" +380 66 loss """crossentropy""" +380 66 regularizer """no""" +380 66 optimizer """adam""" +380 66 training_loop """lcwa""" +380 66 evaluator """rankbased""" +380 67 dataset """kinships""" +380 67 model """kg2e""" +380 67 loss """crossentropy""" +380 67 regularizer """no""" +380 67 optimizer """adam""" +380 67 training_loop """lcwa""" +380 67 evaluator """rankbased""" +380 68 dataset """kinships""" +380 68 model """kg2e""" +380 68 loss """crossentropy""" +380 68 regularizer """no""" +380 68 optimizer """adam""" +380 68 training_loop """lcwa""" +380 68 evaluator """rankbased""" +380 69 dataset """kinships""" +380 69 model """kg2e""" +380 69 loss """crossentropy""" +380 69 regularizer """no""" +380 69 optimizer """adam""" +380 69 training_loop """lcwa""" +380 69 evaluator """rankbased""" +380 70 dataset """kinships""" +380 70 model """kg2e""" +380 70 loss """crossentropy""" +380 70 regularizer """no""" +380 70 optimizer """adam""" +380 70 training_loop """lcwa""" +380 70 evaluator """rankbased""" +380 71 dataset """kinships""" +380 71 model """kg2e""" +380 71 loss """crossentropy""" +380 71 regularizer """no""" +380 71 optimizer """adam""" +380 71 training_loop """lcwa""" +380 71 evaluator """rankbased""" +380 72 dataset """kinships""" +380 72 model """kg2e""" +380 72 loss """crossentropy""" +380 72 regularizer """no""" +380 72 optimizer """adam""" +380 72 training_loop """lcwa""" +380 72 evaluator """rankbased""" +380 73 dataset """kinships""" +380 73 model """kg2e""" +380 73 loss """crossentropy""" +380 73 regularizer """no""" +380 73 optimizer """adam""" +380 73 training_loop """lcwa""" +380 73 evaluator """rankbased""" +380 74 dataset """kinships""" +380 74 model """kg2e""" +380 74 loss """crossentropy""" +380 74 regularizer """no""" +380 74 optimizer """adam""" +380 74 training_loop """lcwa""" +380 74 evaluator """rankbased""" +380 75 dataset """kinships""" +380 75 model """kg2e""" +380 75 loss """crossentropy""" +380 75 regularizer """no""" +380 75 optimizer """adam""" +380 75 training_loop """lcwa""" +380 75 evaluator """rankbased""" +380 76 dataset """kinships""" +380 76 model """kg2e""" +380 76 loss """crossentropy""" +380 76 regularizer """no""" +380 76 optimizer """adam""" +380 76 training_loop """lcwa""" +380 76 evaluator """rankbased""" +380 77 dataset """kinships""" +380 77 model """kg2e""" +380 77 loss """crossentropy""" +380 77 regularizer """no""" +380 77 optimizer """adam""" +380 77 training_loop """lcwa""" +380 77 evaluator """rankbased""" +380 78 dataset """kinships""" +380 78 model """kg2e""" +380 78 loss """crossentropy""" +380 78 regularizer """no""" +380 78 optimizer """adam""" +380 78 training_loop """lcwa""" +380 78 evaluator """rankbased""" +380 79 dataset """kinships""" +380 79 model """kg2e""" +380 79 loss """crossentropy""" +380 79 regularizer """no""" +380 79 optimizer """adam""" +380 79 training_loop """lcwa""" +380 79 evaluator """rankbased""" +380 80 dataset """kinships""" +380 80 model """kg2e""" +380 80 loss """crossentropy""" +380 80 regularizer """no""" +380 80 optimizer """adam""" +380 80 training_loop """lcwa""" +380 80 evaluator """rankbased""" +380 81 dataset """kinships""" +380 81 model """kg2e""" +380 81 loss """crossentropy""" +380 81 regularizer """no""" +380 81 optimizer """adam""" +380 81 training_loop """lcwa""" +380 81 evaluator """rankbased""" +380 82 dataset """kinships""" +380 82 model """kg2e""" +380 82 loss """crossentropy""" +380 82 regularizer """no""" +380 82 optimizer """adam""" +380 82 training_loop """lcwa""" +380 82 evaluator """rankbased""" +380 83 dataset """kinships""" +380 83 model """kg2e""" +380 83 loss """crossentropy""" +380 83 regularizer """no""" +380 83 optimizer """adam""" +380 83 training_loop """lcwa""" +380 83 evaluator """rankbased""" +380 84 dataset """kinships""" +380 84 model """kg2e""" +380 84 loss """crossentropy""" +380 84 regularizer """no""" +380 84 optimizer """adam""" +380 84 training_loop """lcwa""" +380 84 evaluator """rankbased""" +380 85 dataset """kinships""" +380 85 model """kg2e""" +380 85 loss """crossentropy""" +380 85 regularizer """no""" +380 85 optimizer """adam""" +380 85 training_loop """lcwa""" +380 85 evaluator """rankbased""" +380 86 dataset """kinships""" +380 86 model """kg2e""" +380 86 loss """crossentropy""" +380 86 regularizer """no""" +380 86 optimizer """adam""" +380 86 training_loop """lcwa""" +380 86 evaluator """rankbased""" +380 87 dataset """kinships""" +380 87 model """kg2e""" +380 87 loss """crossentropy""" +380 87 regularizer """no""" +380 87 optimizer """adam""" +380 87 training_loop """lcwa""" +380 87 evaluator """rankbased""" +380 88 dataset """kinships""" +380 88 model """kg2e""" +380 88 loss """crossentropy""" +380 88 regularizer """no""" +380 88 optimizer """adam""" +380 88 training_loop """lcwa""" +380 88 evaluator """rankbased""" +380 89 dataset """kinships""" +380 89 model """kg2e""" +380 89 loss """crossentropy""" +380 89 regularizer """no""" +380 89 optimizer """adam""" +380 89 training_loop """lcwa""" +380 89 evaluator """rankbased""" +380 90 dataset """kinships""" +380 90 model """kg2e""" +380 90 loss """crossentropy""" +380 90 regularizer """no""" +380 90 optimizer """adam""" +380 90 training_loop """lcwa""" +380 90 evaluator """rankbased""" +380 91 dataset """kinships""" +380 91 model """kg2e""" +380 91 loss """crossentropy""" +380 91 regularizer """no""" +380 91 optimizer """adam""" +380 91 training_loop """lcwa""" +380 91 evaluator """rankbased""" +380 92 dataset """kinships""" +380 92 model """kg2e""" +380 92 loss """crossentropy""" +380 92 regularizer """no""" +380 92 optimizer """adam""" +380 92 training_loop """lcwa""" +380 92 evaluator """rankbased""" +380 93 dataset """kinships""" +380 93 model """kg2e""" +380 93 loss """crossentropy""" +380 93 regularizer """no""" +380 93 optimizer """adam""" +380 93 training_loop """lcwa""" +380 93 evaluator """rankbased""" +380 94 dataset """kinships""" +380 94 model """kg2e""" +380 94 loss """crossentropy""" +380 94 regularizer """no""" +380 94 optimizer """adam""" +380 94 training_loop """lcwa""" +380 94 evaluator """rankbased""" +380 95 dataset """kinships""" +380 95 model """kg2e""" +380 95 loss """crossentropy""" +380 95 regularizer """no""" +380 95 optimizer """adam""" +380 95 training_loop """lcwa""" +380 95 evaluator """rankbased""" +380 96 dataset """kinships""" +380 96 model """kg2e""" +380 96 loss """crossentropy""" +380 96 regularizer """no""" +380 96 optimizer """adam""" +380 96 training_loop """lcwa""" +380 96 evaluator """rankbased""" +380 97 dataset """kinships""" +380 97 model """kg2e""" +380 97 loss """crossentropy""" +380 97 regularizer """no""" +380 97 optimizer """adam""" +380 97 training_loop """lcwa""" +380 97 evaluator """rankbased""" +380 98 dataset """kinships""" +380 98 model """kg2e""" +380 98 loss """crossentropy""" +380 98 regularizer """no""" +380 98 optimizer """adam""" +380 98 training_loop """lcwa""" +380 98 evaluator """rankbased""" +380 99 dataset """kinships""" +380 99 model """kg2e""" +380 99 loss """crossentropy""" +380 99 regularizer """no""" +380 99 optimizer """adam""" +380 99 training_loop """lcwa""" +380 99 evaluator """rankbased""" +380 100 dataset """kinships""" +380 100 model """kg2e""" +380 100 loss """crossentropy""" +380 100 regularizer """no""" +380 100 optimizer """adam""" +380 100 training_loop """lcwa""" +380 100 evaluator """rankbased""" +381 1 model.embedding_dim 0.0 +381 1 model.c_min 0.05369130346104058 +381 1 model.c_max 9.12512348564619 +381 1 optimizer.lr 0.03432448749544329 +381 1 negative_sampler.num_negs_per_pos 53.0 +381 1 training.batch_size 2.0 +381 2 model.embedding_dim 1.0 +381 2 model.c_min 0.01044312602252699 +381 2 model.c_max 5.662414274916323 +381 2 optimizer.lr 0.006946007984100746 +381 2 negative_sampler.num_negs_per_pos 33.0 +381 2 training.batch_size 0.0 +381 3 model.embedding_dim 1.0 +381 3 model.c_min 0.012777961312396224 +381 3 model.c_max 9.143927197273028 +381 3 optimizer.lr 0.008818989581540662 +381 3 negative_sampler.num_negs_per_pos 36.0 +381 3 training.batch_size 2.0 +381 4 model.embedding_dim 0.0 +381 4 model.c_min 0.06162732583157439 +381 4 model.c_max 9.62528723837393 +381 4 optimizer.lr 0.0022635811243401694 +381 4 negative_sampler.num_negs_per_pos 70.0 +381 4 training.batch_size 2.0 +381 5 model.embedding_dim 0.0 +381 5 model.c_min 0.023643777654516834 +381 5 model.c_max 2.9336317800823783 +381 5 optimizer.lr 0.005396055003566641 +381 5 negative_sampler.num_negs_per_pos 64.0 +381 5 training.batch_size 2.0 +381 6 model.embedding_dim 2.0 +381 6 model.c_min 0.04151732306350915 +381 6 model.c_max 6.520389909259141 +381 6 optimizer.lr 0.006984549049713772 +381 6 negative_sampler.num_negs_per_pos 3.0 +381 6 training.batch_size 0.0 +381 7 model.embedding_dim 0.0 +381 7 model.c_min 0.02806390107304135 +381 7 model.c_max 7.037032695644651 +381 7 optimizer.lr 0.04817830939775263 +381 7 negative_sampler.num_negs_per_pos 56.0 +381 7 training.batch_size 0.0 +381 8 model.embedding_dim 1.0 +381 8 model.c_min 0.036079956504593065 +381 8 model.c_max 9.280550435714746 +381 8 optimizer.lr 0.005697154504199374 +381 8 negative_sampler.num_negs_per_pos 50.0 +381 8 training.batch_size 1.0 +381 9 model.embedding_dim 1.0 +381 9 model.c_min 0.05051406700149562 +381 9 model.c_max 8.606432314833977 +381 9 optimizer.lr 0.00702551728646067 +381 9 negative_sampler.num_negs_per_pos 43.0 +381 9 training.batch_size 2.0 +381 10 model.embedding_dim 2.0 +381 10 model.c_min 0.03661513493818632 +381 10 model.c_max 8.928618856139302 +381 10 optimizer.lr 0.006327603074563024 +381 10 negative_sampler.num_negs_per_pos 26.0 +381 10 training.batch_size 2.0 +381 11 model.embedding_dim 1.0 +381 11 model.c_min 0.01707858810567268 +381 11 model.c_max 9.832330603216125 +381 11 optimizer.lr 0.0062583845412642536 +381 11 negative_sampler.num_negs_per_pos 10.0 +381 11 training.batch_size 2.0 +381 12 model.embedding_dim 2.0 +381 12 model.c_min 0.01569518659533645 +381 12 model.c_max 9.253276601647855 +381 12 optimizer.lr 0.004108163892506708 +381 12 negative_sampler.num_negs_per_pos 69.0 +381 12 training.batch_size 1.0 +381 13 model.embedding_dim 1.0 +381 13 model.c_min 0.03320440803565544 +381 13 model.c_max 8.667701883590588 +381 13 optimizer.lr 0.06073901005380944 +381 13 negative_sampler.num_negs_per_pos 68.0 +381 13 training.batch_size 2.0 +381 14 model.embedding_dim 0.0 +381 14 model.c_min 0.01634117578370308 +381 14 model.c_max 7.752202733861921 +381 14 optimizer.lr 0.0015142144998918014 +381 14 negative_sampler.num_negs_per_pos 28.0 +381 14 training.batch_size 2.0 +381 15 model.embedding_dim 0.0 +381 15 model.c_min 0.06891196523949919 +381 15 model.c_max 4.238337500717215 +381 15 optimizer.lr 0.008931462748964976 +381 15 negative_sampler.num_negs_per_pos 99.0 +381 15 training.batch_size 0.0 +381 16 model.embedding_dim 2.0 +381 16 model.c_min 0.03494278707392039 +381 16 model.c_max 8.73993176978658 +381 16 optimizer.lr 0.0014962380240816622 +381 16 negative_sampler.num_negs_per_pos 5.0 +381 16 training.batch_size 0.0 +381 17 model.embedding_dim 1.0 +381 17 model.c_min 0.022504474409146536 +381 17 model.c_max 1.4951622365674826 +381 17 optimizer.lr 0.0014029330738827804 +381 17 negative_sampler.num_negs_per_pos 86.0 +381 17 training.batch_size 1.0 +381 18 model.embedding_dim 0.0 +381 18 model.c_min 0.0698849858393767 +381 18 model.c_max 5.0212490221935155 +381 18 optimizer.lr 0.02830721886638635 +381 18 negative_sampler.num_negs_per_pos 5.0 +381 18 training.batch_size 2.0 +381 19 model.embedding_dim 2.0 +381 19 model.c_min 0.06864923568881334 +381 19 model.c_max 9.05343257784354 +381 19 optimizer.lr 0.02113472693806921 +381 19 negative_sampler.num_negs_per_pos 19.0 +381 19 training.batch_size 0.0 +381 20 model.embedding_dim 0.0 +381 20 model.c_min 0.04907715372474234 +381 20 model.c_max 4.869798065934427 +381 20 optimizer.lr 0.003463490396754712 +381 20 negative_sampler.num_negs_per_pos 23.0 +381 20 training.batch_size 2.0 +381 21 model.embedding_dim 1.0 +381 21 model.c_min 0.055493415517817966 +381 21 model.c_max 1.5021606319002454 +381 21 optimizer.lr 0.004019252566978897 +381 21 negative_sampler.num_negs_per_pos 72.0 +381 21 training.batch_size 1.0 +381 22 model.embedding_dim 1.0 +381 22 model.c_min 0.012306459997805351 +381 22 model.c_max 6.457345661338204 +381 22 optimizer.lr 0.004363322707462008 +381 22 negative_sampler.num_negs_per_pos 24.0 +381 22 training.batch_size 2.0 +381 23 model.embedding_dim 1.0 +381 23 model.c_min 0.03485370132581877 +381 23 model.c_max 4.7650972761859025 +381 23 optimizer.lr 0.004047103422461684 +381 23 negative_sampler.num_negs_per_pos 44.0 +381 23 training.batch_size 0.0 +381 24 model.embedding_dim 2.0 +381 24 model.c_min 0.019013526579214898 +381 24 model.c_max 3.8361152022231852 +381 24 optimizer.lr 0.0075689278892431735 +381 24 negative_sampler.num_negs_per_pos 78.0 +381 24 training.batch_size 1.0 +381 25 model.embedding_dim 2.0 +381 25 model.c_min 0.05913248450221201 +381 25 model.c_max 6.210886838023337 +381 25 optimizer.lr 0.03960636397477594 +381 25 negative_sampler.num_negs_per_pos 90.0 +381 25 training.batch_size 2.0 +381 26 model.embedding_dim 2.0 +381 26 model.c_min 0.012239015702526407 +381 26 model.c_max 8.598280552610685 +381 26 optimizer.lr 0.01476201537028642 +381 26 negative_sampler.num_negs_per_pos 23.0 +381 26 training.batch_size 0.0 +381 27 model.embedding_dim 2.0 +381 27 model.c_min 0.057690369159042645 +381 27 model.c_max 3.0015078181998653 +381 27 optimizer.lr 0.00509824140781016 +381 27 negative_sampler.num_negs_per_pos 64.0 +381 27 training.batch_size 0.0 +381 28 model.embedding_dim 1.0 +381 28 model.c_min 0.05620766996697071 +381 28 model.c_max 7.080496380742947 +381 28 optimizer.lr 0.07775469725582414 +381 28 negative_sampler.num_negs_per_pos 64.0 +381 28 training.batch_size 1.0 +381 29 model.embedding_dim 1.0 +381 29 model.c_min 0.07906448945516809 +381 29 model.c_max 4.185682301041706 +381 29 optimizer.lr 0.002511208374878345 +381 29 negative_sampler.num_negs_per_pos 62.0 +381 29 training.batch_size 2.0 +381 30 model.embedding_dim 1.0 +381 30 model.c_min 0.03810728326727857 +381 30 model.c_max 5.603516393283729 +381 30 optimizer.lr 0.0010453947852759797 +381 30 negative_sampler.num_negs_per_pos 99.0 +381 30 training.batch_size 1.0 +381 31 model.embedding_dim 2.0 +381 31 model.c_min 0.01597414835217032 +381 31 model.c_max 9.612170597209465 +381 31 optimizer.lr 0.051498917662747126 +381 31 negative_sampler.num_negs_per_pos 24.0 +381 31 training.batch_size 0.0 +381 32 model.embedding_dim 1.0 +381 32 model.c_min 0.08934590939379995 +381 32 model.c_max 5.807318584085296 +381 32 optimizer.lr 0.006327527614819528 +381 32 negative_sampler.num_negs_per_pos 18.0 +381 32 training.batch_size 0.0 +381 33 model.embedding_dim 1.0 +381 33 model.c_min 0.020831397344270412 +381 33 model.c_max 1.22176663296148 +381 33 optimizer.lr 0.04515874974625678 +381 33 negative_sampler.num_negs_per_pos 58.0 +381 33 training.batch_size 1.0 +381 34 model.embedding_dim 1.0 +381 34 model.c_min 0.014555377211287857 +381 34 model.c_max 2.866736884372833 +381 34 optimizer.lr 0.026012370510220387 +381 34 negative_sampler.num_negs_per_pos 29.0 +381 34 training.batch_size 1.0 +381 35 model.embedding_dim 0.0 +381 35 model.c_min 0.013230586306535933 +381 35 model.c_max 7.477324453984258 +381 35 optimizer.lr 0.002877967752373208 +381 35 negative_sampler.num_negs_per_pos 98.0 +381 35 training.batch_size 2.0 +381 36 model.embedding_dim 2.0 +381 36 model.c_min 0.02098703094725364 +381 36 model.c_max 8.652518148817224 +381 36 optimizer.lr 0.002147362612478049 +381 36 negative_sampler.num_negs_per_pos 92.0 +381 36 training.batch_size 2.0 +381 37 model.embedding_dim 0.0 +381 37 model.c_min 0.017995505275799836 +381 37 model.c_max 3.39141552598482 +381 37 optimizer.lr 0.0027871494998578357 +381 37 negative_sampler.num_negs_per_pos 53.0 +381 37 training.batch_size 0.0 +381 38 model.embedding_dim 0.0 +381 38 model.c_min 0.02029263349536168 +381 38 model.c_max 8.742949801630488 +381 38 optimizer.lr 0.017521894779464926 +381 38 negative_sampler.num_negs_per_pos 68.0 +381 38 training.batch_size 2.0 +381 39 model.embedding_dim 1.0 +381 39 model.c_min 0.044593200191427665 +381 39 model.c_max 1.4154181243306059 +381 39 optimizer.lr 0.0059104823180254895 +381 39 negative_sampler.num_negs_per_pos 38.0 +381 39 training.batch_size 1.0 +381 40 model.embedding_dim 1.0 +381 40 model.c_min 0.020457862725415735 +381 40 model.c_max 8.48666948894717 +381 40 optimizer.lr 0.0017130172868722567 +381 40 negative_sampler.num_negs_per_pos 51.0 +381 40 training.batch_size 0.0 +381 41 model.embedding_dim 0.0 +381 41 model.c_min 0.014017435545683321 +381 41 model.c_max 2.6695940364744892 +381 41 optimizer.lr 0.017922416817430717 +381 41 negative_sampler.num_negs_per_pos 99.0 +381 41 training.batch_size 1.0 +381 42 model.embedding_dim 2.0 +381 42 model.c_min 0.04257000089217337 +381 42 model.c_max 4.490029231110235 +381 42 optimizer.lr 0.0025815026994742054 +381 42 negative_sampler.num_negs_per_pos 44.0 +381 42 training.batch_size 2.0 +381 43 model.embedding_dim 0.0 +381 43 model.c_min 0.07861932014932285 +381 43 model.c_max 4.534552367484567 +381 43 optimizer.lr 0.00370321928780251 +381 43 negative_sampler.num_negs_per_pos 56.0 +381 43 training.batch_size 0.0 +381 44 model.embedding_dim 1.0 +381 44 model.c_min 0.012711138655185998 +381 44 model.c_max 5.986085579534272 +381 44 optimizer.lr 0.0023494641803814844 +381 44 negative_sampler.num_negs_per_pos 54.0 +381 44 training.batch_size 1.0 +381 45 model.embedding_dim 0.0 +381 45 model.c_min 0.015104137742489898 +381 45 model.c_max 5.2206946566645325 +381 45 optimizer.lr 0.0017585386945647484 +381 45 negative_sampler.num_negs_per_pos 45.0 +381 45 training.batch_size 2.0 +381 46 model.embedding_dim 1.0 +381 46 model.c_min 0.09152158301981159 +381 46 model.c_max 7.777343164091857 +381 46 optimizer.lr 0.0037970906179864276 +381 46 negative_sampler.num_negs_per_pos 60.0 +381 46 training.batch_size 2.0 +381 47 model.embedding_dim 0.0 +381 47 model.c_min 0.056857700131316906 +381 47 model.c_max 5.435888116347886 +381 47 optimizer.lr 0.0017187932062999224 +381 47 negative_sampler.num_negs_per_pos 97.0 +381 47 training.batch_size 0.0 +381 48 model.embedding_dim 0.0 +381 48 model.c_min 0.015591446019423428 +381 48 model.c_max 9.561195964685787 +381 48 optimizer.lr 0.04519217507237733 +381 48 negative_sampler.num_negs_per_pos 87.0 +381 48 training.batch_size 0.0 +381 49 model.embedding_dim 0.0 +381 49 model.c_min 0.013955046388062399 +381 49 model.c_max 3.8997929242725755 +381 49 optimizer.lr 0.07851691835380929 +381 49 negative_sampler.num_negs_per_pos 53.0 +381 49 training.batch_size 0.0 +381 50 model.embedding_dim 0.0 +381 50 model.c_min 0.07710395952135514 +381 50 model.c_max 3.7531005604941576 +381 50 optimizer.lr 0.007223628494262555 +381 50 negative_sampler.num_negs_per_pos 95.0 +381 50 training.batch_size 2.0 +381 51 model.embedding_dim 0.0 +381 51 model.c_min 0.035064467282605226 +381 51 model.c_max 5.987944648574297 +381 51 optimizer.lr 0.011689352525642079 +381 51 negative_sampler.num_negs_per_pos 87.0 +381 51 training.batch_size 0.0 +381 52 model.embedding_dim 1.0 +381 52 model.c_min 0.014834112677613956 +381 52 model.c_max 7.786259609084575 +381 52 optimizer.lr 0.0036503509665648083 +381 52 negative_sampler.num_negs_per_pos 99.0 +381 52 training.batch_size 1.0 +381 53 model.embedding_dim 2.0 +381 53 model.c_min 0.018598293019133006 +381 53 model.c_max 2.6623668920791195 +381 53 optimizer.lr 0.007899587320608144 +381 53 negative_sampler.num_negs_per_pos 6.0 +381 53 training.batch_size 2.0 +381 54 model.embedding_dim 0.0 +381 54 model.c_min 0.08905358825724931 +381 54 model.c_max 7.200975891779279 +381 54 optimizer.lr 0.00961950603582863 +381 54 negative_sampler.num_negs_per_pos 18.0 +381 54 training.batch_size 2.0 +381 55 model.embedding_dim 0.0 +381 55 model.c_min 0.09426630088934508 +381 55 model.c_max 8.869721763070103 +381 55 optimizer.lr 0.021237738132252035 +381 55 negative_sampler.num_negs_per_pos 31.0 +381 55 training.batch_size 1.0 +381 56 model.embedding_dim 0.0 +381 56 model.c_min 0.049167452800275666 +381 56 model.c_max 9.918161600472029 +381 56 optimizer.lr 0.0019178443359174355 +381 56 negative_sampler.num_negs_per_pos 99.0 +381 56 training.batch_size 2.0 +381 57 model.embedding_dim 0.0 +381 57 model.c_min 0.010253434882139737 +381 57 model.c_max 2.5355310947305667 +381 57 optimizer.lr 0.0300507445022789 +381 57 negative_sampler.num_negs_per_pos 1.0 +381 57 training.batch_size 0.0 +381 58 model.embedding_dim 0.0 +381 58 model.c_min 0.07972737143707961 +381 58 model.c_max 1.9870738103386811 +381 58 optimizer.lr 0.029981987556552507 +381 58 negative_sampler.num_negs_per_pos 1.0 +381 58 training.batch_size 2.0 +381 59 model.embedding_dim 0.0 +381 59 model.c_min 0.040299472495274144 +381 59 model.c_max 3.65794698676769 +381 59 optimizer.lr 0.07288939879878902 +381 59 negative_sampler.num_negs_per_pos 39.0 +381 59 training.batch_size 0.0 +381 60 model.embedding_dim 1.0 +381 60 model.c_min 0.02963614810219833 +381 60 model.c_max 7.151369753491392 +381 60 optimizer.lr 0.003221056628034745 +381 60 negative_sampler.num_negs_per_pos 8.0 +381 60 training.batch_size 0.0 +381 61 model.embedding_dim 1.0 +381 61 model.c_min 0.04917168104526769 +381 61 model.c_max 4.358259373372197 +381 61 optimizer.lr 0.04037942256190164 +381 61 negative_sampler.num_negs_per_pos 2.0 +381 61 training.batch_size 2.0 +381 62 model.embedding_dim 1.0 +381 62 model.c_min 0.024297572115988493 +381 62 model.c_max 6.260303927072515 +381 62 optimizer.lr 0.01876119981281594 +381 62 negative_sampler.num_negs_per_pos 71.0 +381 62 training.batch_size 0.0 +381 63 model.embedding_dim 1.0 +381 63 model.c_min 0.050421222614663 +381 63 model.c_max 7.70052143956825 +381 63 optimizer.lr 0.00571950679709032 +381 63 negative_sampler.num_negs_per_pos 34.0 +381 63 training.batch_size 0.0 +381 64 model.embedding_dim 2.0 +381 64 model.c_min 0.015007303762162964 +381 64 model.c_max 4.38856550421514 +381 64 optimizer.lr 0.0031308819088491724 +381 64 negative_sampler.num_negs_per_pos 0.0 +381 64 training.batch_size 2.0 +381 65 model.embedding_dim 0.0 +381 65 model.c_min 0.04840202575627143 +381 65 model.c_max 5.182812027675621 +381 65 optimizer.lr 0.005090109917323391 +381 65 negative_sampler.num_negs_per_pos 77.0 +381 65 training.batch_size 1.0 +381 66 model.embedding_dim 1.0 +381 66 model.c_min 0.08506472004081368 +381 66 model.c_max 2.0274566695514746 +381 66 optimizer.lr 0.006866447286666252 +381 66 negative_sampler.num_negs_per_pos 57.0 +381 66 training.batch_size 0.0 +381 67 model.embedding_dim 1.0 +381 67 model.c_min 0.010219407021972944 +381 67 model.c_max 5.780732681097198 +381 67 optimizer.lr 0.0023914300134942 +381 67 negative_sampler.num_negs_per_pos 25.0 +381 67 training.batch_size 1.0 +381 68 model.embedding_dim 1.0 +381 68 model.c_min 0.06512881185550894 +381 68 model.c_max 6.034345423748859 +381 68 optimizer.lr 0.059077836843376905 +381 68 negative_sampler.num_negs_per_pos 64.0 +381 68 training.batch_size 0.0 +381 69 model.embedding_dim 1.0 +381 69 model.c_min 0.040931024577915524 +381 69 model.c_max 9.230486052900428 +381 69 optimizer.lr 0.0015774757639956203 +381 69 negative_sampler.num_negs_per_pos 53.0 +381 69 training.batch_size 1.0 +381 70 model.embedding_dim 1.0 +381 70 model.c_min 0.027743290396820715 +381 70 model.c_max 8.525351569304412 +381 70 optimizer.lr 0.004315266020543904 +381 70 negative_sampler.num_negs_per_pos 51.0 +381 70 training.batch_size 2.0 +381 71 model.embedding_dim 1.0 +381 71 model.c_min 0.05171145054637776 +381 71 model.c_max 1.5395209012878956 +381 71 optimizer.lr 0.009459124789484493 +381 71 negative_sampler.num_negs_per_pos 53.0 +381 71 training.batch_size 2.0 +381 72 model.embedding_dim 0.0 +381 72 model.c_min 0.013317471659238193 +381 72 model.c_max 3.6119639231465857 +381 72 optimizer.lr 0.012386384588380279 +381 72 negative_sampler.num_negs_per_pos 90.0 +381 72 training.batch_size 1.0 +381 73 model.embedding_dim 0.0 +381 73 model.c_min 0.010939460265664582 +381 73 model.c_max 4.036577448718006 +381 73 optimizer.lr 0.0031508260402367486 +381 73 negative_sampler.num_negs_per_pos 86.0 +381 73 training.batch_size 1.0 +381 74 model.embedding_dim 1.0 +381 74 model.c_min 0.050673724786826205 +381 74 model.c_max 1.0067967595723055 +381 74 optimizer.lr 0.06556896119648711 +381 74 negative_sampler.num_negs_per_pos 1.0 +381 74 training.batch_size 1.0 +381 75 model.embedding_dim 1.0 +381 75 model.c_min 0.04586104033489359 +381 75 model.c_max 3.4607380092666604 +381 75 optimizer.lr 0.0012313235311527846 +381 75 negative_sampler.num_negs_per_pos 34.0 +381 75 training.batch_size 0.0 +381 76 model.embedding_dim 1.0 +381 76 model.c_min 0.03897136012392149 +381 76 model.c_max 6.703168551297302 +381 76 optimizer.lr 0.004643179642491487 +381 76 negative_sampler.num_negs_per_pos 58.0 +381 76 training.batch_size 1.0 +381 77 model.embedding_dim 0.0 +381 77 model.c_min 0.05530765244300419 +381 77 model.c_max 3.4806022361785933 +381 77 optimizer.lr 0.0011737285041287637 +381 77 negative_sampler.num_negs_per_pos 40.0 +381 77 training.batch_size 1.0 +381 78 model.embedding_dim 1.0 +381 78 model.c_min 0.046024344375762825 +381 78 model.c_max 9.021128045331249 +381 78 optimizer.lr 0.003898675888252797 +381 78 negative_sampler.num_negs_per_pos 88.0 +381 78 training.batch_size 1.0 +381 79 model.embedding_dim 0.0 +381 79 model.c_min 0.027910392894555276 +381 79 model.c_max 1.6145068681187187 +381 79 optimizer.lr 0.05251410001229623 +381 79 negative_sampler.num_negs_per_pos 28.0 +381 79 training.batch_size 0.0 +381 80 model.embedding_dim 0.0 +381 80 model.c_min 0.024162252300756797 +381 80 model.c_max 3.1722519928123845 +381 80 optimizer.lr 0.05770588578511583 +381 80 negative_sampler.num_negs_per_pos 87.0 +381 80 training.batch_size 0.0 +381 81 model.embedding_dim 1.0 +381 81 model.c_min 0.07533053164723645 +381 81 model.c_max 7.479990873642823 +381 81 optimizer.lr 0.0013289707862031888 +381 81 negative_sampler.num_negs_per_pos 38.0 +381 81 training.batch_size 1.0 +381 82 model.embedding_dim 2.0 +381 82 model.c_min 0.06067540030318975 +381 82 model.c_max 2.3038751729604527 +381 82 optimizer.lr 0.0017688328138701283 +381 82 negative_sampler.num_negs_per_pos 73.0 +381 82 training.batch_size 1.0 +381 83 model.embedding_dim 0.0 +381 83 model.c_min 0.037652590320464016 +381 83 model.c_max 8.23392236467208 +381 83 optimizer.lr 0.08609233405346267 +381 83 negative_sampler.num_negs_per_pos 28.0 +381 83 training.batch_size 0.0 +381 84 model.embedding_dim 0.0 +381 84 model.c_min 0.029378884747198773 +381 84 model.c_max 8.219244339775095 +381 84 optimizer.lr 0.009593930560349394 +381 84 negative_sampler.num_negs_per_pos 38.0 +381 84 training.batch_size 0.0 +381 85 model.embedding_dim 0.0 +381 85 model.c_min 0.027870129192968184 +381 85 model.c_max 5.266831687405839 +381 85 optimizer.lr 0.018566644248847 +381 85 negative_sampler.num_negs_per_pos 75.0 +381 85 training.batch_size 0.0 +381 86 model.embedding_dim 0.0 +381 86 model.c_min 0.07574454691314676 +381 86 model.c_max 8.180268998152137 +381 86 optimizer.lr 0.008979890600521305 +381 86 negative_sampler.num_negs_per_pos 48.0 +381 86 training.batch_size 1.0 +381 87 model.embedding_dim 1.0 +381 87 model.c_min 0.01813988947617766 +381 87 model.c_max 1.772684254730207 +381 87 optimizer.lr 0.020968992656480095 +381 87 negative_sampler.num_negs_per_pos 32.0 +381 87 training.batch_size 2.0 +381 88 model.embedding_dim 2.0 +381 88 model.c_min 0.02301267697106775 +381 88 model.c_max 4.687417517431016 +381 88 optimizer.lr 0.0027234621779521477 +381 88 negative_sampler.num_negs_per_pos 33.0 +381 88 training.batch_size 2.0 +381 89 model.embedding_dim 2.0 +381 89 model.c_min 0.04159242115041774 +381 89 model.c_max 3.3284858541136484 +381 89 optimizer.lr 0.0032905622077585017 +381 89 negative_sampler.num_negs_per_pos 5.0 +381 89 training.batch_size 1.0 +381 90 model.embedding_dim 0.0 +381 90 model.c_min 0.033526365683739845 +381 90 model.c_max 1.8653799596398166 +381 90 optimizer.lr 0.009422469453679333 +381 90 negative_sampler.num_negs_per_pos 22.0 +381 90 training.batch_size 0.0 +381 91 model.embedding_dim 2.0 +381 91 model.c_min 0.012984392373895239 +381 91 model.c_max 8.04366086254003 +381 91 optimizer.lr 0.009050234331071957 +381 91 negative_sampler.num_negs_per_pos 2.0 +381 91 training.batch_size 2.0 +381 92 model.embedding_dim 1.0 +381 92 model.c_min 0.06318881240932157 +381 92 model.c_max 1.1203597251193598 +381 92 optimizer.lr 0.001268488587374536 +381 92 negative_sampler.num_negs_per_pos 84.0 +381 92 training.batch_size 0.0 +381 93 model.embedding_dim 0.0 +381 93 model.c_min 0.042741462184773676 +381 93 model.c_max 4.941634959148832 +381 93 optimizer.lr 0.003240996432294777 +381 93 negative_sampler.num_negs_per_pos 76.0 +381 93 training.batch_size 0.0 +381 94 model.embedding_dim 1.0 +381 94 model.c_min 0.07923406888153176 +381 94 model.c_max 5.604626270647769 +381 94 optimizer.lr 0.020222397999971047 +381 94 negative_sampler.num_negs_per_pos 2.0 +381 94 training.batch_size 0.0 +381 95 model.embedding_dim 1.0 +381 95 model.c_min 0.03260204211646564 +381 95 model.c_max 4.601535420438388 +381 95 optimizer.lr 0.009975286364181017 +381 95 negative_sampler.num_negs_per_pos 65.0 +381 95 training.batch_size 2.0 +381 96 model.embedding_dim 0.0 +381 96 model.c_min 0.01078645952572612 +381 96 model.c_max 9.409194350641538 +381 96 optimizer.lr 0.02600253648166432 +381 96 negative_sampler.num_negs_per_pos 97.0 +381 96 training.batch_size 0.0 +381 97 model.embedding_dim 0.0 +381 97 model.c_min 0.017279553794904284 +381 97 model.c_max 2.9625781329641616 +381 97 optimizer.lr 0.0059305160426593164 +381 97 negative_sampler.num_negs_per_pos 82.0 +381 97 training.batch_size 1.0 +381 98 model.embedding_dim 2.0 +381 98 model.c_min 0.012320510544046658 +381 98 model.c_max 1.1820280473614206 +381 98 optimizer.lr 0.08222176035175648 +381 98 negative_sampler.num_negs_per_pos 58.0 +381 98 training.batch_size 2.0 +381 99 model.embedding_dim 2.0 +381 99 model.c_min 0.019491312517572718 +381 99 model.c_max 4.591378909072425 +381 99 optimizer.lr 0.0030731326305090912 +381 99 negative_sampler.num_negs_per_pos 68.0 +381 99 training.batch_size 0.0 +381 100 model.embedding_dim 0.0 +381 100 model.c_min 0.027877387434549615 +381 100 model.c_max 3.293073230402301 +381 100 optimizer.lr 0.02842243268663325 +381 100 negative_sampler.num_negs_per_pos 42.0 +381 100 training.batch_size 2.0 +381 1 dataset """kinships""" +381 1 model """kg2e""" +381 1 loss """bceaftersigmoid""" +381 1 regularizer """no""" +381 1 optimizer """adam""" +381 1 training_loop """owa""" +381 1 negative_sampler """basic""" +381 1 evaluator """rankbased""" +381 2 dataset """kinships""" +381 2 model """kg2e""" +381 2 loss """bceaftersigmoid""" +381 2 regularizer """no""" +381 2 optimizer """adam""" +381 2 training_loop """owa""" +381 2 negative_sampler """basic""" +381 2 evaluator """rankbased""" +381 3 dataset """kinships""" +381 3 model """kg2e""" +381 3 loss """bceaftersigmoid""" +381 3 regularizer """no""" +381 3 optimizer """adam""" +381 3 training_loop """owa""" +381 3 negative_sampler """basic""" +381 3 evaluator """rankbased""" +381 4 dataset """kinships""" +381 4 model """kg2e""" +381 4 loss """bceaftersigmoid""" +381 4 regularizer """no""" +381 4 optimizer """adam""" +381 4 training_loop """owa""" +381 4 negative_sampler """basic""" +381 4 evaluator """rankbased""" +381 5 dataset """kinships""" +381 5 model """kg2e""" +381 5 loss """bceaftersigmoid""" +381 5 regularizer """no""" +381 5 optimizer """adam""" +381 5 training_loop """owa""" +381 5 negative_sampler """basic""" +381 5 evaluator """rankbased""" +381 6 dataset """kinships""" +381 6 model """kg2e""" +381 6 loss """bceaftersigmoid""" +381 6 regularizer """no""" +381 6 optimizer """adam""" +381 6 training_loop """owa""" +381 6 negative_sampler """basic""" +381 6 evaluator """rankbased""" +381 7 dataset """kinships""" +381 7 model """kg2e""" +381 7 loss """bceaftersigmoid""" +381 7 regularizer """no""" +381 7 optimizer """adam""" +381 7 training_loop """owa""" +381 7 negative_sampler """basic""" +381 7 evaluator """rankbased""" +381 8 dataset """kinships""" +381 8 model """kg2e""" +381 8 loss """bceaftersigmoid""" +381 8 regularizer """no""" +381 8 optimizer """adam""" +381 8 training_loop """owa""" +381 8 negative_sampler """basic""" +381 8 evaluator """rankbased""" +381 9 dataset """kinships""" +381 9 model """kg2e""" +381 9 loss """bceaftersigmoid""" +381 9 regularizer """no""" +381 9 optimizer """adam""" +381 9 training_loop """owa""" +381 9 negative_sampler """basic""" +381 9 evaluator """rankbased""" +381 10 dataset """kinships""" +381 10 model """kg2e""" +381 10 loss """bceaftersigmoid""" +381 10 regularizer """no""" +381 10 optimizer """adam""" +381 10 training_loop """owa""" +381 10 negative_sampler """basic""" +381 10 evaluator """rankbased""" +381 11 dataset """kinships""" +381 11 model """kg2e""" +381 11 loss """bceaftersigmoid""" +381 11 regularizer """no""" +381 11 optimizer """adam""" +381 11 training_loop """owa""" +381 11 negative_sampler """basic""" +381 11 evaluator """rankbased""" +381 12 dataset """kinships""" +381 12 model """kg2e""" +381 12 loss """bceaftersigmoid""" +381 12 regularizer """no""" +381 12 optimizer """adam""" +381 12 training_loop """owa""" +381 12 negative_sampler """basic""" +381 12 evaluator """rankbased""" +381 13 dataset """kinships""" +381 13 model """kg2e""" +381 13 loss """bceaftersigmoid""" +381 13 regularizer """no""" +381 13 optimizer """adam""" +381 13 training_loop """owa""" +381 13 negative_sampler """basic""" +381 13 evaluator """rankbased""" +381 14 dataset """kinships""" +381 14 model """kg2e""" +381 14 loss """bceaftersigmoid""" +381 14 regularizer """no""" +381 14 optimizer """adam""" +381 14 training_loop """owa""" +381 14 negative_sampler """basic""" +381 14 evaluator """rankbased""" +381 15 dataset """kinships""" +381 15 model """kg2e""" +381 15 loss """bceaftersigmoid""" +381 15 regularizer """no""" +381 15 optimizer """adam""" +381 15 training_loop """owa""" +381 15 negative_sampler """basic""" +381 15 evaluator """rankbased""" +381 16 dataset """kinships""" +381 16 model """kg2e""" +381 16 loss """bceaftersigmoid""" +381 16 regularizer """no""" +381 16 optimizer """adam""" +381 16 training_loop """owa""" +381 16 negative_sampler """basic""" +381 16 evaluator """rankbased""" +381 17 dataset """kinships""" +381 17 model """kg2e""" +381 17 loss """bceaftersigmoid""" +381 17 regularizer """no""" +381 17 optimizer """adam""" +381 17 training_loop """owa""" +381 17 negative_sampler """basic""" +381 17 evaluator """rankbased""" +381 18 dataset """kinships""" +381 18 model """kg2e""" +381 18 loss """bceaftersigmoid""" +381 18 regularizer """no""" +381 18 optimizer """adam""" +381 18 training_loop """owa""" +381 18 negative_sampler """basic""" +381 18 evaluator """rankbased""" +381 19 dataset """kinships""" +381 19 model """kg2e""" +381 19 loss """bceaftersigmoid""" +381 19 regularizer """no""" +381 19 optimizer """adam""" +381 19 training_loop """owa""" +381 19 negative_sampler """basic""" +381 19 evaluator """rankbased""" +381 20 dataset """kinships""" +381 20 model """kg2e""" +381 20 loss """bceaftersigmoid""" +381 20 regularizer """no""" +381 20 optimizer """adam""" +381 20 training_loop """owa""" +381 20 negative_sampler """basic""" +381 20 evaluator """rankbased""" +381 21 dataset """kinships""" +381 21 model """kg2e""" +381 21 loss """bceaftersigmoid""" +381 21 regularizer """no""" +381 21 optimizer """adam""" +381 21 training_loop """owa""" +381 21 negative_sampler """basic""" +381 21 evaluator """rankbased""" +381 22 dataset """kinships""" +381 22 model """kg2e""" +381 22 loss """bceaftersigmoid""" +381 22 regularizer """no""" +381 22 optimizer """adam""" +381 22 training_loop """owa""" +381 22 negative_sampler """basic""" +381 22 evaluator """rankbased""" +381 23 dataset """kinships""" +381 23 model """kg2e""" +381 23 loss """bceaftersigmoid""" +381 23 regularizer """no""" +381 23 optimizer """adam""" +381 23 training_loop """owa""" +381 23 negative_sampler """basic""" +381 23 evaluator """rankbased""" +381 24 dataset """kinships""" +381 24 model """kg2e""" +381 24 loss """bceaftersigmoid""" +381 24 regularizer """no""" +381 24 optimizer """adam""" +381 24 training_loop """owa""" +381 24 negative_sampler """basic""" +381 24 evaluator """rankbased""" +381 25 dataset """kinships""" +381 25 model """kg2e""" +381 25 loss """bceaftersigmoid""" +381 25 regularizer """no""" +381 25 optimizer """adam""" +381 25 training_loop """owa""" +381 25 negative_sampler """basic""" +381 25 evaluator """rankbased""" +381 26 dataset """kinships""" +381 26 model """kg2e""" +381 26 loss """bceaftersigmoid""" +381 26 regularizer """no""" +381 26 optimizer """adam""" +381 26 training_loop """owa""" +381 26 negative_sampler """basic""" +381 26 evaluator """rankbased""" +381 27 dataset """kinships""" +381 27 model """kg2e""" +381 27 loss """bceaftersigmoid""" +381 27 regularizer """no""" +381 27 optimizer """adam""" +381 27 training_loop """owa""" +381 27 negative_sampler """basic""" +381 27 evaluator """rankbased""" +381 28 dataset """kinships""" +381 28 model """kg2e""" +381 28 loss """bceaftersigmoid""" +381 28 regularizer """no""" +381 28 optimizer """adam""" +381 28 training_loop """owa""" +381 28 negative_sampler """basic""" +381 28 evaluator """rankbased""" +381 29 dataset """kinships""" +381 29 model """kg2e""" +381 29 loss """bceaftersigmoid""" +381 29 regularizer """no""" +381 29 optimizer """adam""" +381 29 training_loop """owa""" +381 29 negative_sampler """basic""" +381 29 evaluator """rankbased""" +381 30 dataset """kinships""" +381 30 model """kg2e""" +381 30 loss """bceaftersigmoid""" +381 30 regularizer """no""" +381 30 optimizer """adam""" +381 30 training_loop """owa""" +381 30 negative_sampler """basic""" +381 30 evaluator """rankbased""" +381 31 dataset """kinships""" +381 31 model """kg2e""" +381 31 loss """bceaftersigmoid""" +381 31 regularizer """no""" +381 31 optimizer """adam""" +381 31 training_loop """owa""" +381 31 negative_sampler """basic""" +381 31 evaluator """rankbased""" +381 32 dataset """kinships""" +381 32 model """kg2e""" +381 32 loss """bceaftersigmoid""" +381 32 regularizer """no""" +381 32 optimizer """adam""" +381 32 training_loop """owa""" +381 32 negative_sampler """basic""" +381 32 evaluator """rankbased""" +381 33 dataset """kinships""" +381 33 model """kg2e""" +381 33 loss """bceaftersigmoid""" +381 33 regularizer """no""" +381 33 optimizer """adam""" +381 33 training_loop """owa""" +381 33 negative_sampler """basic""" +381 33 evaluator """rankbased""" +381 34 dataset """kinships""" +381 34 model """kg2e""" +381 34 loss """bceaftersigmoid""" +381 34 regularizer """no""" +381 34 optimizer """adam""" +381 34 training_loop """owa""" +381 34 negative_sampler """basic""" +381 34 evaluator """rankbased""" +381 35 dataset """kinships""" +381 35 model """kg2e""" +381 35 loss """bceaftersigmoid""" +381 35 regularizer """no""" +381 35 optimizer """adam""" +381 35 training_loop """owa""" +381 35 negative_sampler """basic""" +381 35 evaluator """rankbased""" +381 36 dataset """kinships""" +381 36 model """kg2e""" +381 36 loss """bceaftersigmoid""" +381 36 regularizer """no""" +381 36 optimizer """adam""" +381 36 training_loop """owa""" +381 36 negative_sampler """basic""" +381 36 evaluator """rankbased""" +381 37 dataset """kinships""" +381 37 model """kg2e""" +381 37 loss """bceaftersigmoid""" +381 37 regularizer """no""" +381 37 optimizer """adam""" +381 37 training_loop """owa""" +381 37 negative_sampler """basic""" +381 37 evaluator """rankbased""" +381 38 dataset """kinships""" +381 38 model """kg2e""" +381 38 loss """bceaftersigmoid""" +381 38 regularizer """no""" +381 38 optimizer """adam""" +381 38 training_loop """owa""" +381 38 negative_sampler """basic""" +381 38 evaluator """rankbased""" +381 39 dataset """kinships""" +381 39 model """kg2e""" +381 39 loss """bceaftersigmoid""" +381 39 regularizer """no""" +381 39 optimizer """adam""" +381 39 training_loop """owa""" +381 39 negative_sampler """basic""" +381 39 evaluator """rankbased""" +381 40 dataset """kinships""" +381 40 model """kg2e""" +381 40 loss """bceaftersigmoid""" +381 40 regularizer """no""" +381 40 optimizer """adam""" +381 40 training_loop """owa""" +381 40 negative_sampler """basic""" +381 40 evaluator """rankbased""" +381 41 dataset """kinships""" +381 41 model """kg2e""" +381 41 loss """bceaftersigmoid""" +381 41 regularizer """no""" +381 41 optimizer """adam""" +381 41 training_loop """owa""" +381 41 negative_sampler """basic""" +381 41 evaluator """rankbased""" +381 42 dataset """kinships""" +381 42 model """kg2e""" +381 42 loss """bceaftersigmoid""" +381 42 regularizer """no""" +381 42 optimizer """adam""" +381 42 training_loop """owa""" +381 42 negative_sampler """basic""" +381 42 evaluator """rankbased""" +381 43 dataset """kinships""" +381 43 model """kg2e""" +381 43 loss """bceaftersigmoid""" +381 43 regularizer """no""" +381 43 optimizer """adam""" +381 43 training_loop """owa""" +381 43 negative_sampler """basic""" +381 43 evaluator """rankbased""" +381 44 dataset """kinships""" +381 44 model """kg2e""" +381 44 loss """bceaftersigmoid""" +381 44 regularizer """no""" +381 44 optimizer """adam""" +381 44 training_loop """owa""" +381 44 negative_sampler """basic""" +381 44 evaluator """rankbased""" +381 45 dataset """kinships""" +381 45 model """kg2e""" +381 45 loss """bceaftersigmoid""" +381 45 regularizer """no""" +381 45 optimizer """adam""" +381 45 training_loop """owa""" +381 45 negative_sampler """basic""" +381 45 evaluator """rankbased""" +381 46 dataset """kinships""" +381 46 model """kg2e""" +381 46 loss """bceaftersigmoid""" +381 46 regularizer """no""" +381 46 optimizer """adam""" +381 46 training_loop """owa""" +381 46 negative_sampler """basic""" +381 46 evaluator """rankbased""" +381 47 dataset """kinships""" +381 47 model """kg2e""" +381 47 loss """bceaftersigmoid""" +381 47 regularizer """no""" +381 47 optimizer """adam""" +381 47 training_loop """owa""" +381 47 negative_sampler """basic""" +381 47 evaluator """rankbased""" +381 48 dataset """kinships""" +381 48 model """kg2e""" +381 48 loss """bceaftersigmoid""" +381 48 regularizer """no""" +381 48 optimizer """adam""" +381 48 training_loop """owa""" +381 48 negative_sampler """basic""" +381 48 evaluator """rankbased""" +381 49 dataset """kinships""" +381 49 model """kg2e""" +381 49 loss """bceaftersigmoid""" +381 49 regularizer """no""" +381 49 optimizer """adam""" +381 49 training_loop """owa""" +381 49 negative_sampler """basic""" +381 49 evaluator """rankbased""" +381 50 dataset """kinships""" +381 50 model """kg2e""" +381 50 loss """bceaftersigmoid""" +381 50 regularizer """no""" +381 50 optimizer """adam""" +381 50 training_loop """owa""" +381 50 negative_sampler """basic""" +381 50 evaluator """rankbased""" +381 51 dataset """kinships""" +381 51 model """kg2e""" +381 51 loss """bceaftersigmoid""" +381 51 regularizer """no""" +381 51 optimizer """adam""" +381 51 training_loop """owa""" +381 51 negative_sampler """basic""" +381 51 evaluator """rankbased""" +381 52 dataset """kinships""" +381 52 model """kg2e""" +381 52 loss """bceaftersigmoid""" +381 52 regularizer """no""" +381 52 optimizer """adam""" +381 52 training_loop """owa""" +381 52 negative_sampler """basic""" +381 52 evaluator """rankbased""" +381 53 dataset """kinships""" +381 53 model """kg2e""" +381 53 loss """bceaftersigmoid""" +381 53 regularizer """no""" +381 53 optimizer """adam""" +381 53 training_loop """owa""" +381 53 negative_sampler """basic""" +381 53 evaluator """rankbased""" +381 54 dataset """kinships""" +381 54 model """kg2e""" +381 54 loss """bceaftersigmoid""" +381 54 regularizer """no""" +381 54 optimizer """adam""" +381 54 training_loop """owa""" +381 54 negative_sampler """basic""" +381 54 evaluator """rankbased""" +381 55 dataset """kinships""" +381 55 model """kg2e""" +381 55 loss """bceaftersigmoid""" +381 55 regularizer """no""" +381 55 optimizer """adam""" +381 55 training_loop """owa""" +381 55 negative_sampler """basic""" +381 55 evaluator """rankbased""" +381 56 dataset """kinships""" +381 56 model """kg2e""" +381 56 loss """bceaftersigmoid""" +381 56 regularizer """no""" +381 56 optimizer """adam""" +381 56 training_loop """owa""" +381 56 negative_sampler """basic""" +381 56 evaluator """rankbased""" +381 57 dataset """kinships""" +381 57 model """kg2e""" +381 57 loss """bceaftersigmoid""" +381 57 regularizer """no""" +381 57 optimizer """adam""" +381 57 training_loop """owa""" +381 57 negative_sampler """basic""" +381 57 evaluator """rankbased""" +381 58 dataset """kinships""" +381 58 model """kg2e""" +381 58 loss """bceaftersigmoid""" +381 58 regularizer """no""" +381 58 optimizer """adam""" +381 58 training_loop """owa""" +381 58 negative_sampler """basic""" +381 58 evaluator """rankbased""" +381 59 dataset """kinships""" +381 59 model """kg2e""" +381 59 loss """bceaftersigmoid""" +381 59 regularizer """no""" +381 59 optimizer """adam""" +381 59 training_loop """owa""" +381 59 negative_sampler """basic""" +381 59 evaluator """rankbased""" +381 60 dataset """kinships""" +381 60 model """kg2e""" +381 60 loss """bceaftersigmoid""" +381 60 regularizer """no""" +381 60 optimizer """adam""" +381 60 training_loop """owa""" +381 60 negative_sampler """basic""" +381 60 evaluator """rankbased""" +381 61 dataset """kinships""" +381 61 model """kg2e""" +381 61 loss """bceaftersigmoid""" +381 61 regularizer """no""" +381 61 optimizer """adam""" +381 61 training_loop """owa""" +381 61 negative_sampler """basic""" +381 61 evaluator """rankbased""" +381 62 dataset """kinships""" +381 62 model """kg2e""" +381 62 loss """bceaftersigmoid""" +381 62 regularizer """no""" +381 62 optimizer """adam""" +381 62 training_loop """owa""" +381 62 negative_sampler """basic""" +381 62 evaluator """rankbased""" +381 63 dataset """kinships""" +381 63 model """kg2e""" +381 63 loss """bceaftersigmoid""" +381 63 regularizer """no""" +381 63 optimizer """adam""" +381 63 training_loop """owa""" +381 63 negative_sampler """basic""" +381 63 evaluator """rankbased""" +381 64 dataset """kinships""" +381 64 model """kg2e""" +381 64 loss """bceaftersigmoid""" +381 64 regularizer """no""" +381 64 optimizer """adam""" +381 64 training_loop """owa""" +381 64 negative_sampler """basic""" +381 64 evaluator """rankbased""" +381 65 dataset """kinships""" +381 65 model """kg2e""" +381 65 loss """bceaftersigmoid""" +381 65 regularizer """no""" +381 65 optimizer """adam""" +381 65 training_loop """owa""" +381 65 negative_sampler """basic""" +381 65 evaluator """rankbased""" +381 66 dataset """kinships""" +381 66 model """kg2e""" +381 66 loss """bceaftersigmoid""" +381 66 regularizer """no""" +381 66 optimizer """adam""" +381 66 training_loop """owa""" +381 66 negative_sampler """basic""" +381 66 evaluator """rankbased""" +381 67 dataset """kinships""" +381 67 model """kg2e""" +381 67 loss """bceaftersigmoid""" +381 67 regularizer """no""" +381 67 optimizer """adam""" +381 67 training_loop """owa""" +381 67 negative_sampler """basic""" +381 67 evaluator """rankbased""" +381 68 dataset """kinships""" +381 68 model """kg2e""" +381 68 loss """bceaftersigmoid""" +381 68 regularizer """no""" +381 68 optimizer """adam""" +381 68 training_loop """owa""" +381 68 negative_sampler """basic""" +381 68 evaluator """rankbased""" +381 69 dataset """kinships""" +381 69 model """kg2e""" +381 69 loss """bceaftersigmoid""" +381 69 regularizer """no""" +381 69 optimizer """adam""" +381 69 training_loop """owa""" +381 69 negative_sampler """basic""" +381 69 evaluator """rankbased""" +381 70 dataset """kinships""" +381 70 model """kg2e""" +381 70 loss """bceaftersigmoid""" +381 70 regularizer """no""" +381 70 optimizer """adam""" +381 70 training_loop """owa""" +381 70 negative_sampler """basic""" +381 70 evaluator """rankbased""" +381 71 dataset """kinships""" +381 71 model """kg2e""" +381 71 loss """bceaftersigmoid""" +381 71 regularizer """no""" +381 71 optimizer """adam""" +381 71 training_loop """owa""" +381 71 negative_sampler """basic""" +381 71 evaluator """rankbased""" +381 72 dataset """kinships""" +381 72 model """kg2e""" +381 72 loss """bceaftersigmoid""" +381 72 regularizer """no""" +381 72 optimizer """adam""" +381 72 training_loop """owa""" +381 72 negative_sampler """basic""" +381 72 evaluator """rankbased""" +381 73 dataset """kinships""" +381 73 model """kg2e""" +381 73 loss """bceaftersigmoid""" +381 73 regularizer """no""" +381 73 optimizer """adam""" +381 73 training_loop """owa""" +381 73 negative_sampler """basic""" +381 73 evaluator """rankbased""" +381 74 dataset """kinships""" +381 74 model """kg2e""" +381 74 loss """bceaftersigmoid""" +381 74 regularizer """no""" +381 74 optimizer """adam""" +381 74 training_loop """owa""" +381 74 negative_sampler """basic""" +381 74 evaluator """rankbased""" +381 75 dataset """kinships""" +381 75 model """kg2e""" +381 75 loss """bceaftersigmoid""" +381 75 regularizer """no""" +381 75 optimizer """adam""" +381 75 training_loop """owa""" +381 75 negative_sampler """basic""" +381 75 evaluator """rankbased""" +381 76 dataset """kinships""" +381 76 model """kg2e""" +381 76 loss """bceaftersigmoid""" +381 76 regularizer """no""" +381 76 optimizer """adam""" +381 76 training_loop """owa""" +381 76 negative_sampler """basic""" +381 76 evaluator """rankbased""" +381 77 dataset """kinships""" +381 77 model """kg2e""" +381 77 loss """bceaftersigmoid""" +381 77 regularizer """no""" +381 77 optimizer """adam""" +381 77 training_loop """owa""" +381 77 negative_sampler """basic""" +381 77 evaluator """rankbased""" +381 78 dataset """kinships""" +381 78 model """kg2e""" +381 78 loss """bceaftersigmoid""" +381 78 regularizer """no""" +381 78 optimizer """adam""" +381 78 training_loop """owa""" +381 78 negative_sampler """basic""" +381 78 evaluator """rankbased""" +381 79 dataset """kinships""" +381 79 model """kg2e""" +381 79 loss """bceaftersigmoid""" +381 79 regularizer """no""" +381 79 optimizer """adam""" +381 79 training_loop """owa""" +381 79 negative_sampler """basic""" +381 79 evaluator """rankbased""" +381 80 dataset """kinships""" +381 80 model """kg2e""" +381 80 loss """bceaftersigmoid""" +381 80 regularizer """no""" +381 80 optimizer """adam""" +381 80 training_loop """owa""" +381 80 negative_sampler """basic""" +381 80 evaluator """rankbased""" +381 81 dataset """kinships""" +381 81 model """kg2e""" +381 81 loss """bceaftersigmoid""" +381 81 regularizer """no""" +381 81 optimizer """adam""" +381 81 training_loop """owa""" +381 81 negative_sampler """basic""" +381 81 evaluator """rankbased""" +381 82 dataset """kinships""" +381 82 model """kg2e""" +381 82 loss """bceaftersigmoid""" +381 82 regularizer """no""" +381 82 optimizer """adam""" +381 82 training_loop """owa""" +381 82 negative_sampler """basic""" +381 82 evaluator """rankbased""" +381 83 dataset """kinships""" +381 83 model """kg2e""" +381 83 loss """bceaftersigmoid""" +381 83 regularizer """no""" +381 83 optimizer """adam""" +381 83 training_loop """owa""" +381 83 negative_sampler """basic""" +381 83 evaluator """rankbased""" +381 84 dataset """kinships""" +381 84 model """kg2e""" +381 84 loss """bceaftersigmoid""" +381 84 regularizer """no""" +381 84 optimizer """adam""" +381 84 training_loop """owa""" +381 84 negative_sampler """basic""" +381 84 evaluator """rankbased""" +381 85 dataset """kinships""" +381 85 model """kg2e""" +381 85 loss """bceaftersigmoid""" +381 85 regularizer """no""" +381 85 optimizer """adam""" +381 85 training_loop """owa""" +381 85 negative_sampler """basic""" +381 85 evaluator """rankbased""" +381 86 dataset """kinships""" +381 86 model """kg2e""" +381 86 loss """bceaftersigmoid""" +381 86 regularizer """no""" +381 86 optimizer """adam""" +381 86 training_loop """owa""" +381 86 negative_sampler """basic""" +381 86 evaluator """rankbased""" +381 87 dataset """kinships""" +381 87 model """kg2e""" +381 87 loss """bceaftersigmoid""" +381 87 regularizer """no""" +381 87 optimizer """adam""" +381 87 training_loop """owa""" +381 87 negative_sampler """basic""" +381 87 evaluator """rankbased""" +381 88 dataset """kinships""" +381 88 model """kg2e""" +381 88 loss """bceaftersigmoid""" +381 88 regularizer """no""" +381 88 optimizer """adam""" +381 88 training_loop """owa""" +381 88 negative_sampler """basic""" +381 88 evaluator """rankbased""" +381 89 dataset """kinships""" +381 89 model """kg2e""" +381 89 loss """bceaftersigmoid""" +381 89 regularizer """no""" +381 89 optimizer """adam""" +381 89 training_loop """owa""" +381 89 negative_sampler """basic""" +381 89 evaluator """rankbased""" +381 90 dataset """kinships""" +381 90 model """kg2e""" +381 90 loss """bceaftersigmoid""" +381 90 regularizer """no""" +381 90 optimizer """adam""" +381 90 training_loop """owa""" +381 90 negative_sampler """basic""" +381 90 evaluator """rankbased""" +381 91 dataset """kinships""" +381 91 model """kg2e""" +381 91 loss """bceaftersigmoid""" +381 91 regularizer """no""" +381 91 optimizer """adam""" +381 91 training_loop """owa""" +381 91 negative_sampler """basic""" +381 91 evaluator """rankbased""" +381 92 dataset """kinships""" +381 92 model """kg2e""" +381 92 loss """bceaftersigmoid""" +381 92 regularizer """no""" +381 92 optimizer """adam""" +381 92 training_loop """owa""" +381 92 negative_sampler """basic""" +381 92 evaluator """rankbased""" +381 93 dataset """kinships""" +381 93 model """kg2e""" +381 93 loss """bceaftersigmoid""" +381 93 regularizer """no""" +381 93 optimizer """adam""" +381 93 training_loop """owa""" +381 93 negative_sampler """basic""" +381 93 evaluator """rankbased""" +381 94 dataset """kinships""" +381 94 model """kg2e""" +381 94 loss """bceaftersigmoid""" +381 94 regularizer """no""" +381 94 optimizer """adam""" +381 94 training_loop """owa""" +381 94 negative_sampler """basic""" +381 94 evaluator """rankbased""" +381 95 dataset """kinships""" +381 95 model """kg2e""" +381 95 loss """bceaftersigmoid""" +381 95 regularizer """no""" +381 95 optimizer """adam""" +381 95 training_loop """owa""" +381 95 negative_sampler """basic""" +381 95 evaluator """rankbased""" +381 96 dataset """kinships""" +381 96 model """kg2e""" +381 96 loss """bceaftersigmoid""" +381 96 regularizer """no""" +381 96 optimizer """adam""" +381 96 training_loop """owa""" +381 96 negative_sampler """basic""" +381 96 evaluator """rankbased""" +381 97 dataset """kinships""" +381 97 model """kg2e""" +381 97 loss """bceaftersigmoid""" +381 97 regularizer """no""" +381 97 optimizer """adam""" +381 97 training_loop """owa""" +381 97 negative_sampler """basic""" +381 97 evaluator """rankbased""" +381 98 dataset """kinships""" +381 98 model """kg2e""" +381 98 loss """bceaftersigmoid""" +381 98 regularizer """no""" +381 98 optimizer """adam""" +381 98 training_loop """owa""" +381 98 negative_sampler """basic""" +381 98 evaluator """rankbased""" +381 99 dataset """kinships""" +381 99 model """kg2e""" +381 99 loss """bceaftersigmoid""" +381 99 regularizer """no""" +381 99 optimizer """adam""" +381 99 training_loop """owa""" +381 99 negative_sampler """basic""" +381 99 evaluator """rankbased""" +381 100 dataset """kinships""" +381 100 model """kg2e""" +381 100 loss """bceaftersigmoid""" +381 100 regularizer """no""" +381 100 optimizer """adam""" +381 100 training_loop """owa""" +381 100 negative_sampler """basic""" +381 100 evaluator """rankbased""" +382 1 model.embedding_dim 0.0 +382 1 model.c_min 0.08775042402729485 +382 1 model.c_max 8.086393500867032 +382 1 optimizer.lr 0.04766460165806003 +382 1 negative_sampler.num_negs_per_pos 24.0 +382 1 training.batch_size 1.0 +382 2 model.embedding_dim 1.0 +382 2 model.c_min 0.058856412410707076 +382 2 model.c_max 3.6862376866045308 +382 2 optimizer.lr 0.07398557666667156 +382 2 negative_sampler.num_negs_per_pos 43.0 +382 2 training.batch_size 0.0 +382 3 model.embedding_dim 2.0 +382 3 model.c_min 0.07976480472953161 +382 3 model.c_max 1.653459433298175 +382 3 optimizer.lr 0.0010974464109714576 +382 3 negative_sampler.num_negs_per_pos 51.0 +382 3 training.batch_size 2.0 +382 4 model.embedding_dim 0.0 +382 4 model.c_min 0.01913322875230123 +382 4 model.c_max 7.240068682095138 +382 4 optimizer.lr 0.037577378877536795 +382 4 negative_sampler.num_negs_per_pos 95.0 +382 4 training.batch_size 0.0 +382 5 model.embedding_dim 1.0 +382 5 model.c_min 0.07310022671220874 +382 5 model.c_max 7.728824443039892 +382 5 optimizer.lr 0.011460491277362697 +382 5 negative_sampler.num_negs_per_pos 49.0 +382 5 training.batch_size 0.0 +382 6 model.embedding_dim 1.0 +382 6 model.c_min 0.09744481769302994 +382 6 model.c_max 5.250883855662977 +382 6 optimizer.lr 0.0010296395586811107 +382 6 negative_sampler.num_negs_per_pos 22.0 +382 6 training.batch_size 0.0 +382 7 model.embedding_dim 0.0 +382 7 model.c_min 0.022641628228812133 +382 7 model.c_max 8.436035568648473 +382 7 optimizer.lr 0.09591310197893155 +382 7 negative_sampler.num_negs_per_pos 90.0 +382 7 training.batch_size 2.0 +382 8 model.embedding_dim 0.0 +382 8 model.c_min 0.04099328991516338 +382 8 model.c_max 7.36127839598338 +382 8 optimizer.lr 0.013358775593598637 +382 8 negative_sampler.num_negs_per_pos 40.0 +382 8 training.batch_size 0.0 +382 9 model.embedding_dim 0.0 +382 9 model.c_min 0.01832914317394446 +382 9 model.c_max 2.3386273265415918 +382 9 optimizer.lr 0.024818769167633533 +382 9 negative_sampler.num_negs_per_pos 95.0 +382 9 training.batch_size 1.0 +382 10 model.embedding_dim 0.0 +382 10 model.c_min 0.07011598441714673 +382 10 model.c_max 6.214267830883954 +382 10 optimizer.lr 0.057935176514823154 +382 10 negative_sampler.num_negs_per_pos 78.0 +382 10 training.batch_size 1.0 +382 11 model.embedding_dim 1.0 +382 11 model.c_min 0.016653365277336032 +382 11 model.c_max 4.198855433925983 +382 11 optimizer.lr 0.03296679697493843 +382 11 negative_sampler.num_negs_per_pos 95.0 +382 11 training.batch_size 1.0 +382 12 model.embedding_dim 0.0 +382 12 model.c_min 0.056889977440749896 +382 12 model.c_max 8.201472112019356 +382 12 optimizer.lr 0.0024930655662653892 +382 12 negative_sampler.num_negs_per_pos 27.0 +382 12 training.batch_size 2.0 +382 13 model.embedding_dim 1.0 +382 13 model.c_min 0.05801845195876702 +382 13 model.c_max 6.995836257059869 +382 13 optimizer.lr 0.007675679092181919 +382 13 negative_sampler.num_negs_per_pos 48.0 +382 13 training.batch_size 0.0 +382 14 model.embedding_dim 0.0 +382 14 model.c_min 0.01737278664998839 +382 14 model.c_max 7.346378574140287 +382 14 optimizer.lr 0.004052882635332459 +382 14 negative_sampler.num_negs_per_pos 76.0 +382 14 training.batch_size 2.0 +382 15 model.embedding_dim 0.0 +382 15 model.c_min 0.06708634411686844 +382 15 model.c_max 8.133412110248122 +382 15 optimizer.lr 0.04096577747717416 +382 15 negative_sampler.num_negs_per_pos 34.0 +382 15 training.batch_size 2.0 +382 16 model.embedding_dim 1.0 +382 16 model.c_min 0.06775349709468281 +382 16 model.c_max 4.832769024755926 +382 16 optimizer.lr 0.043959854693306634 +382 16 negative_sampler.num_negs_per_pos 2.0 +382 16 training.batch_size 0.0 +382 17 model.embedding_dim 1.0 +382 17 model.c_min 0.015502304719855142 +382 17 model.c_max 5.375235154323412 +382 17 optimizer.lr 0.07470592414708944 +382 17 negative_sampler.num_negs_per_pos 13.0 +382 17 training.batch_size 2.0 +382 18 model.embedding_dim 2.0 +382 18 model.c_min 0.01587205966051191 +382 18 model.c_max 3.3303597535268707 +382 18 optimizer.lr 0.002140090934201587 +382 18 negative_sampler.num_negs_per_pos 28.0 +382 18 training.batch_size 2.0 +382 19 model.embedding_dim 0.0 +382 19 model.c_min 0.014307320784673519 +382 19 model.c_max 6.914579734883923 +382 19 optimizer.lr 0.006282610533165598 +382 19 negative_sampler.num_negs_per_pos 34.0 +382 19 training.batch_size 1.0 +382 20 model.embedding_dim 0.0 +382 20 model.c_min 0.0607655246871081 +382 20 model.c_max 6.236582770693623 +382 20 optimizer.lr 0.09202309699750344 +382 20 negative_sampler.num_negs_per_pos 17.0 +382 20 training.batch_size 1.0 +382 21 model.embedding_dim 1.0 +382 21 model.c_min 0.0134033684185494 +382 21 model.c_max 3.9356587372189598 +382 21 optimizer.lr 0.0026333464004964858 +382 21 negative_sampler.num_negs_per_pos 77.0 +382 21 training.batch_size 0.0 +382 22 model.embedding_dim 0.0 +382 22 model.c_min 0.06262731891453541 +382 22 model.c_max 8.495565491054657 +382 22 optimizer.lr 0.01174125904526739 +382 22 negative_sampler.num_negs_per_pos 86.0 +382 22 training.batch_size 2.0 +382 23 model.embedding_dim 2.0 +382 23 model.c_min 0.01681420190536487 +382 23 model.c_max 9.910562841695903 +382 23 optimizer.lr 0.04179634362226974 +382 23 negative_sampler.num_negs_per_pos 73.0 +382 23 training.batch_size 2.0 +382 24 model.embedding_dim 1.0 +382 24 model.c_min 0.021235646429457085 +382 24 model.c_max 8.610068718591958 +382 24 optimizer.lr 0.04120043664223143 +382 24 negative_sampler.num_negs_per_pos 88.0 +382 24 training.batch_size 0.0 +382 25 model.embedding_dim 2.0 +382 25 model.c_min 0.031312207366334305 +382 25 model.c_max 8.461723386283147 +382 25 optimizer.lr 0.003563788214968806 +382 25 negative_sampler.num_negs_per_pos 98.0 +382 25 training.batch_size 1.0 +382 26 model.embedding_dim 2.0 +382 26 model.c_min 0.011424967118926665 +382 26 model.c_max 6.928235099912157 +382 26 optimizer.lr 0.016939380430346273 +382 26 negative_sampler.num_negs_per_pos 3.0 +382 26 training.batch_size 1.0 +382 27 model.embedding_dim 1.0 +382 27 model.c_min 0.01445673904243813 +382 27 model.c_max 9.847665139506113 +382 27 optimizer.lr 0.00626372300256714 +382 27 negative_sampler.num_negs_per_pos 20.0 +382 27 training.batch_size 1.0 +382 28 model.embedding_dim 2.0 +382 28 model.c_min 0.033073859070364864 +382 28 model.c_max 6.903220861729343 +382 28 optimizer.lr 0.008962507984090345 +382 28 negative_sampler.num_negs_per_pos 92.0 +382 28 training.batch_size 0.0 +382 29 model.embedding_dim 0.0 +382 29 model.c_min 0.02057564265462932 +382 29 model.c_max 5.4921461912405665 +382 29 optimizer.lr 0.014709644627053563 +382 29 negative_sampler.num_negs_per_pos 66.0 +382 29 training.batch_size 2.0 +382 30 model.embedding_dim 1.0 +382 30 model.c_min 0.04051492830047979 +382 30 model.c_max 7.652152668914146 +382 30 optimizer.lr 0.0033174440353377355 +382 30 negative_sampler.num_negs_per_pos 59.0 +382 30 training.batch_size 2.0 +382 31 model.embedding_dim 1.0 +382 31 model.c_min 0.0362699320331195 +382 31 model.c_max 4.106518107673942 +382 31 optimizer.lr 0.031610970728590836 +382 31 negative_sampler.num_negs_per_pos 37.0 +382 31 training.batch_size 2.0 +382 32 model.embedding_dim 0.0 +382 32 model.c_min 0.09291253227872906 +382 32 model.c_max 5.750846884459641 +382 32 optimizer.lr 0.022441041687906147 +382 32 negative_sampler.num_negs_per_pos 17.0 +382 32 training.batch_size 1.0 +382 33 model.embedding_dim 0.0 +382 33 model.c_min 0.05295238029578232 +382 33 model.c_max 1.00512010288409 +382 33 optimizer.lr 0.026421948437632407 +382 33 negative_sampler.num_negs_per_pos 28.0 +382 33 training.batch_size 1.0 +382 34 model.embedding_dim 1.0 +382 34 model.c_min 0.023033477218797505 +382 34 model.c_max 2.377594479441493 +382 34 optimizer.lr 0.08136775108606939 +382 34 negative_sampler.num_negs_per_pos 83.0 +382 34 training.batch_size 1.0 +382 35 model.embedding_dim 1.0 +382 35 model.c_min 0.07290826900307747 +382 35 model.c_max 3.4399470734221014 +382 35 optimizer.lr 0.0269112541248821 +382 35 negative_sampler.num_negs_per_pos 6.0 +382 35 training.batch_size 1.0 +382 36 model.embedding_dim 0.0 +382 36 model.c_min 0.0850842127398118 +382 36 model.c_max 7.049637873906596 +382 36 optimizer.lr 0.0013684392492812876 +382 36 negative_sampler.num_negs_per_pos 30.0 +382 36 training.batch_size 2.0 +382 37 model.embedding_dim 0.0 +382 37 model.c_min 0.08637891477697494 +382 37 model.c_max 6.875977123346252 +382 37 optimizer.lr 0.03385804967408647 +382 37 negative_sampler.num_negs_per_pos 69.0 +382 37 training.batch_size 2.0 +382 38 model.embedding_dim 0.0 +382 38 model.c_min 0.03240079961073594 +382 38 model.c_max 2.012478919875964 +382 38 optimizer.lr 0.027586313792605354 +382 38 negative_sampler.num_negs_per_pos 43.0 +382 38 training.batch_size 1.0 +382 39 model.embedding_dim 2.0 +382 39 model.c_min 0.017012339037880773 +382 39 model.c_max 9.836410773109646 +382 39 optimizer.lr 0.0036054966135525943 +382 39 negative_sampler.num_negs_per_pos 31.0 +382 39 training.batch_size 2.0 +382 40 model.embedding_dim 1.0 +382 40 model.c_min 0.062410466234767564 +382 40 model.c_max 4.316438321937245 +382 40 optimizer.lr 0.011003068658829683 +382 40 negative_sampler.num_negs_per_pos 52.0 +382 40 training.batch_size 2.0 +382 41 model.embedding_dim 0.0 +382 41 model.c_min 0.04607694497691552 +382 41 model.c_max 5.675329040001768 +382 41 optimizer.lr 0.0014816888066990807 +382 41 negative_sampler.num_negs_per_pos 24.0 +382 41 training.batch_size 0.0 +382 42 model.embedding_dim 1.0 +382 42 model.c_min 0.06146522504986878 +382 42 model.c_max 7.748195537645698 +382 42 optimizer.lr 0.0011506340953297241 +382 42 negative_sampler.num_negs_per_pos 1.0 +382 42 training.batch_size 0.0 +382 43 model.embedding_dim 0.0 +382 43 model.c_min 0.011546537202887244 +382 43 model.c_max 7.3371676309519005 +382 43 optimizer.lr 0.038087937224765285 +382 43 negative_sampler.num_negs_per_pos 17.0 +382 43 training.batch_size 2.0 +382 44 model.embedding_dim 0.0 +382 44 model.c_min 0.04261679498699657 +382 44 model.c_max 8.144582122883623 +382 44 optimizer.lr 0.0035278204902499735 +382 44 negative_sampler.num_negs_per_pos 28.0 +382 44 training.batch_size 1.0 +382 45 model.embedding_dim 2.0 +382 45 model.c_min 0.062488803129072075 +382 45 model.c_max 8.47005776849829 +382 45 optimizer.lr 0.0055839349806057976 +382 45 negative_sampler.num_negs_per_pos 18.0 +382 45 training.batch_size 0.0 +382 46 model.embedding_dim 2.0 +382 46 model.c_min 0.025377392761018597 +382 46 model.c_max 3.390473046947413 +382 46 optimizer.lr 0.0012597617760542268 +382 46 negative_sampler.num_negs_per_pos 16.0 +382 46 training.batch_size 0.0 +382 47 model.embedding_dim 0.0 +382 47 model.c_min 0.07729860470456273 +382 47 model.c_max 1.3535127273190355 +382 47 optimizer.lr 0.012681881762535336 +382 47 negative_sampler.num_negs_per_pos 99.0 +382 47 training.batch_size 1.0 +382 48 model.embedding_dim 2.0 +382 48 model.c_min 0.05401902898607946 +382 48 model.c_max 2.728924710589235 +382 48 optimizer.lr 0.035959102147621644 +382 48 negative_sampler.num_negs_per_pos 99.0 +382 48 training.batch_size 1.0 +382 49 model.embedding_dim 2.0 +382 49 model.c_min 0.025763828025570843 +382 49 model.c_max 8.517286823271197 +382 49 optimizer.lr 0.011985674650115812 +382 49 negative_sampler.num_negs_per_pos 19.0 +382 49 training.batch_size 2.0 +382 50 model.embedding_dim 0.0 +382 50 model.c_min 0.03641219158395997 +382 50 model.c_max 9.592884303717964 +382 50 optimizer.lr 0.0017198544254120845 +382 50 negative_sampler.num_negs_per_pos 83.0 +382 50 training.batch_size 0.0 +382 51 model.embedding_dim 0.0 +382 51 model.c_min 0.023867347040814085 +382 51 model.c_max 7.888753407058042 +382 51 optimizer.lr 0.008421635161610394 +382 51 negative_sampler.num_negs_per_pos 85.0 +382 51 training.batch_size 1.0 +382 52 model.embedding_dim 2.0 +382 52 model.c_min 0.052525050591573155 +382 52 model.c_max 7.592705328879844 +382 52 optimizer.lr 0.0067967741525340915 +382 52 negative_sampler.num_negs_per_pos 58.0 +382 52 training.batch_size 1.0 +382 53 model.embedding_dim 2.0 +382 53 model.c_min 0.01585712896620879 +382 53 model.c_max 2.635328149971127 +382 53 optimizer.lr 0.06770392854528161 +382 53 negative_sampler.num_negs_per_pos 67.0 +382 53 training.batch_size 1.0 +382 54 model.embedding_dim 2.0 +382 54 model.c_min 0.0402131959873096 +382 54 model.c_max 6.356100570544339 +382 54 optimizer.lr 0.04846588501323942 +382 54 negative_sampler.num_negs_per_pos 0.0 +382 54 training.batch_size 2.0 +382 55 model.embedding_dim 0.0 +382 55 model.c_min 0.028119652800798284 +382 55 model.c_max 1.7216641517673406 +382 55 optimizer.lr 0.00260177179644562 +382 55 negative_sampler.num_negs_per_pos 9.0 +382 55 training.batch_size 2.0 +382 56 model.embedding_dim 0.0 +382 56 model.c_min 0.02355827915938665 +382 56 model.c_max 3.6959628085526375 +382 56 optimizer.lr 0.0021009506546724992 +382 56 negative_sampler.num_negs_per_pos 48.0 +382 56 training.batch_size 0.0 +382 57 model.embedding_dim 1.0 +382 57 model.c_min 0.08021031610123222 +382 57 model.c_max 1.6663516464343417 +382 57 optimizer.lr 0.02096285931771237 +382 57 negative_sampler.num_negs_per_pos 55.0 +382 57 training.batch_size 0.0 +382 58 model.embedding_dim 0.0 +382 58 model.c_min 0.035110569232436675 +382 58 model.c_max 7.513235288927466 +382 58 optimizer.lr 0.09605171050136875 +382 58 negative_sampler.num_negs_per_pos 82.0 +382 58 training.batch_size 2.0 +382 59 model.embedding_dim 2.0 +382 59 model.c_min 0.056472156417924024 +382 59 model.c_max 6.894817980951594 +382 59 optimizer.lr 0.001966273213385754 +382 59 negative_sampler.num_negs_per_pos 43.0 +382 59 training.batch_size 0.0 +382 60 model.embedding_dim 2.0 +382 60 model.c_min 0.06177892673504042 +382 60 model.c_max 6.668124837590685 +382 60 optimizer.lr 0.013676189468857635 +382 60 negative_sampler.num_negs_per_pos 51.0 +382 60 training.batch_size 0.0 +382 61 model.embedding_dim 1.0 +382 61 model.c_min 0.017568558943409935 +382 61 model.c_max 5.9713874128886575 +382 61 optimizer.lr 0.007920287318698363 +382 61 negative_sampler.num_negs_per_pos 3.0 +382 61 training.batch_size 0.0 +382 62 model.embedding_dim 0.0 +382 62 model.c_min 0.027473644971069563 +382 62 model.c_max 4.352994200094149 +382 62 optimizer.lr 0.06682132076756361 +382 62 negative_sampler.num_negs_per_pos 95.0 +382 62 training.batch_size 0.0 +382 63 model.embedding_dim 2.0 +382 63 model.c_min 0.03810638617308844 +382 63 model.c_max 2.8542390033070593 +382 63 optimizer.lr 0.02140892079342595 +382 63 negative_sampler.num_negs_per_pos 81.0 +382 63 training.batch_size 1.0 +382 64 model.embedding_dim 2.0 +382 64 model.c_min 0.03468482971040848 +382 64 model.c_max 5.833845192483089 +382 64 optimizer.lr 0.04990315089899313 +382 64 negative_sampler.num_negs_per_pos 5.0 +382 64 training.batch_size 1.0 +382 65 model.embedding_dim 1.0 +382 65 model.c_min 0.032250969556727356 +382 65 model.c_max 3.5162535829475354 +382 65 optimizer.lr 0.030778924287351017 +382 65 negative_sampler.num_negs_per_pos 59.0 +382 65 training.batch_size 1.0 +382 66 model.embedding_dim 2.0 +382 66 model.c_min 0.014914260066580483 +382 66 model.c_max 4.507136658832238 +382 66 optimizer.lr 0.008165435079933918 +382 66 negative_sampler.num_negs_per_pos 84.0 +382 66 training.batch_size 1.0 +382 67 model.embedding_dim 2.0 +382 67 model.c_min 0.045193790895170914 +382 67 model.c_max 2.9046446783695274 +382 67 optimizer.lr 0.012859323853199824 +382 67 negative_sampler.num_negs_per_pos 60.0 +382 67 training.batch_size 1.0 +382 68 model.embedding_dim 0.0 +382 68 model.c_min 0.022001966837264197 +382 68 model.c_max 1.9342026749260666 +382 68 optimizer.lr 0.021601632005332994 +382 68 negative_sampler.num_negs_per_pos 35.0 +382 68 training.batch_size 0.0 +382 69 model.embedding_dim 2.0 +382 69 model.c_min 0.013647415483626617 +382 69 model.c_max 4.68627978411625 +382 69 optimizer.lr 0.02146710400099423 +382 69 negative_sampler.num_negs_per_pos 22.0 +382 69 training.batch_size 2.0 +382 70 model.embedding_dim 1.0 +382 70 model.c_min 0.011651365955303566 +382 70 model.c_max 9.8411861190342 +382 70 optimizer.lr 0.07947099715407785 +382 70 negative_sampler.num_negs_per_pos 83.0 +382 70 training.batch_size 0.0 +382 71 model.embedding_dim 2.0 +382 71 model.c_min 0.010566849275848578 +382 71 model.c_max 5.034254554480659 +382 71 optimizer.lr 0.0031474328304832394 +382 71 negative_sampler.num_negs_per_pos 65.0 +382 71 training.batch_size 2.0 +382 72 model.embedding_dim 2.0 +382 72 model.c_min 0.08249043939361118 +382 72 model.c_max 4.040517587243517 +382 72 optimizer.lr 0.05799896088760221 +382 72 negative_sampler.num_negs_per_pos 71.0 +382 72 training.batch_size 1.0 +382 73 model.embedding_dim 0.0 +382 73 model.c_min 0.044495148577850634 +382 73 model.c_max 7.033634890852797 +382 73 optimizer.lr 0.0012126874807904641 +382 73 negative_sampler.num_negs_per_pos 68.0 +382 73 training.batch_size 2.0 +382 74 model.embedding_dim 0.0 +382 74 model.c_min 0.02166318190799958 +382 74 model.c_max 3.719143096248726 +382 74 optimizer.lr 0.005853194430730126 +382 74 negative_sampler.num_negs_per_pos 85.0 +382 74 training.batch_size 2.0 +382 75 model.embedding_dim 2.0 +382 75 model.c_min 0.01483012109235398 +382 75 model.c_max 8.303781947206549 +382 75 optimizer.lr 0.0046472730992872385 +382 75 negative_sampler.num_negs_per_pos 19.0 +382 75 training.batch_size 2.0 +382 76 model.embedding_dim 2.0 +382 76 model.c_min 0.01328299410807688 +382 76 model.c_max 7.1063433159150655 +382 76 optimizer.lr 0.017850804459476836 +382 76 negative_sampler.num_negs_per_pos 75.0 +382 76 training.batch_size 0.0 +382 77 model.embedding_dim 2.0 +382 77 model.c_min 0.07107266988125654 +382 77 model.c_max 5.661791933112567 +382 77 optimizer.lr 0.001834418264493418 +382 77 negative_sampler.num_negs_per_pos 44.0 +382 77 training.batch_size 2.0 +382 78 model.embedding_dim 0.0 +382 78 model.c_min 0.018726715930164037 +382 78 model.c_max 9.803006308057881 +382 78 optimizer.lr 0.01406208155446986 +382 78 negative_sampler.num_negs_per_pos 15.0 +382 78 training.batch_size 2.0 +382 79 model.embedding_dim 2.0 +382 79 model.c_min 0.013383355926616863 +382 79 model.c_max 5.005420118262345 +382 79 optimizer.lr 0.010284392155559253 +382 79 negative_sampler.num_negs_per_pos 65.0 +382 79 training.batch_size 2.0 +382 80 model.embedding_dim 2.0 +382 80 model.c_min 0.015573007248074816 +382 80 model.c_max 6.296111825530793 +382 80 optimizer.lr 0.001695251540819732 +382 80 negative_sampler.num_negs_per_pos 83.0 +382 80 training.batch_size 2.0 +382 81 model.embedding_dim 1.0 +382 81 model.c_min 0.09132658356706737 +382 81 model.c_max 2.927253751131842 +382 81 optimizer.lr 0.09342432758276031 +382 81 negative_sampler.num_negs_per_pos 97.0 +382 81 training.batch_size 2.0 +382 82 model.embedding_dim 2.0 +382 82 model.c_min 0.053726662116495016 +382 82 model.c_max 7.784496814992355 +382 82 optimizer.lr 0.019520938312931804 +382 82 negative_sampler.num_negs_per_pos 60.0 +382 82 training.batch_size 0.0 +382 83 model.embedding_dim 0.0 +382 83 model.c_min 0.07463113172153757 +382 83 model.c_max 4.008855670641153 +382 83 optimizer.lr 0.018955999205812275 +382 83 negative_sampler.num_negs_per_pos 25.0 +382 83 training.batch_size 1.0 +382 84 model.embedding_dim 1.0 +382 84 model.c_min 0.011556545012399048 +382 84 model.c_max 5.684489576261415 +382 84 optimizer.lr 0.013931751584227858 +382 84 negative_sampler.num_negs_per_pos 15.0 +382 84 training.batch_size 2.0 +382 85 model.embedding_dim 2.0 +382 85 model.c_min 0.026056996466003016 +382 85 model.c_max 2.7445125889719018 +382 85 optimizer.lr 0.010182237452212839 +382 85 negative_sampler.num_negs_per_pos 27.0 +382 85 training.batch_size 2.0 +382 86 model.embedding_dim 0.0 +382 86 model.c_min 0.010510889445817087 +382 86 model.c_max 8.582178063078775 +382 86 optimizer.lr 0.0037793843792811987 +382 86 negative_sampler.num_negs_per_pos 10.0 +382 86 training.batch_size 0.0 +382 87 model.embedding_dim 0.0 +382 87 model.c_min 0.07765088144751925 +382 87 model.c_max 7.282430236335044 +382 87 optimizer.lr 0.025900174133201698 +382 87 negative_sampler.num_negs_per_pos 54.0 +382 87 training.batch_size 2.0 +382 88 model.embedding_dim 1.0 +382 88 model.c_min 0.014875642922559891 +382 88 model.c_max 8.390748024495718 +382 88 optimizer.lr 0.0021056066675997484 +382 88 negative_sampler.num_negs_per_pos 35.0 +382 88 training.batch_size 2.0 +382 89 model.embedding_dim 0.0 +382 89 model.c_min 0.012828849981106501 +382 89 model.c_max 5.190487396111531 +382 89 optimizer.lr 0.04582038832221661 +382 89 negative_sampler.num_negs_per_pos 71.0 +382 89 training.batch_size 2.0 +382 90 model.embedding_dim 2.0 +382 90 model.c_min 0.01308472357996352 +382 90 model.c_max 7.0101373285584625 +382 90 optimizer.lr 0.08972667548911128 +382 90 negative_sampler.num_negs_per_pos 80.0 +382 90 training.batch_size 2.0 +382 91 model.embedding_dim 0.0 +382 91 model.c_min 0.017678709217152304 +382 91 model.c_max 8.878993575539837 +382 91 optimizer.lr 0.002387087730107316 +382 91 negative_sampler.num_negs_per_pos 8.0 +382 91 training.batch_size 2.0 +382 92 model.embedding_dim 0.0 +382 92 model.c_min 0.022642367060137815 +382 92 model.c_max 4.864982072041945 +382 92 optimizer.lr 0.003095939177573903 +382 92 negative_sampler.num_negs_per_pos 39.0 +382 92 training.batch_size 2.0 +382 93 model.embedding_dim 2.0 +382 93 model.c_min 0.021337165315020033 +382 93 model.c_max 9.974998371766956 +382 93 optimizer.lr 0.010905402414859832 +382 93 negative_sampler.num_negs_per_pos 45.0 +382 93 training.batch_size 1.0 +382 94 model.embedding_dim 1.0 +382 94 model.c_min 0.0691035042953131 +382 94 model.c_max 7.053585892699072 +382 94 optimizer.lr 0.0089282328183491 +382 94 negative_sampler.num_negs_per_pos 42.0 +382 94 training.batch_size 1.0 +382 95 model.embedding_dim 1.0 +382 95 model.c_min 0.0417863629905096 +382 95 model.c_max 4.646635275605346 +382 95 optimizer.lr 0.0026307920413965753 +382 95 negative_sampler.num_negs_per_pos 48.0 +382 95 training.batch_size 0.0 +382 96 model.embedding_dim 2.0 +382 96 model.c_min 0.04024893342316305 +382 96 model.c_max 4.145935070356856 +382 96 optimizer.lr 0.004087703958870243 +382 96 negative_sampler.num_negs_per_pos 90.0 +382 96 training.batch_size 2.0 +382 97 model.embedding_dim 2.0 +382 97 model.c_min 0.013518231758351994 +382 97 model.c_max 2.4391683852532116 +382 97 optimizer.lr 0.09213733813875144 +382 97 negative_sampler.num_negs_per_pos 91.0 +382 97 training.batch_size 0.0 +382 98 model.embedding_dim 0.0 +382 98 model.c_min 0.01790957650050043 +382 98 model.c_max 4.053956755444545 +382 98 optimizer.lr 0.024243895819105448 +382 98 negative_sampler.num_negs_per_pos 32.0 +382 98 training.batch_size 1.0 +382 99 model.embedding_dim 0.0 +382 99 model.c_min 0.049600657228416445 +382 99 model.c_max 4.491663987034844 +382 99 optimizer.lr 0.012171455885360115 +382 99 negative_sampler.num_negs_per_pos 78.0 +382 99 training.batch_size 1.0 +382 100 model.embedding_dim 2.0 +382 100 model.c_min 0.020192106919786054 +382 100 model.c_max 5.435745992160457 +382 100 optimizer.lr 0.07785667044678458 +382 100 negative_sampler.num_negs_per_pos 54.0 +382 100 training.batch_size 1.0 +382 1 dataset """kinships""" +382 1 model """kg2e""" +382 1 loss """softplus""" +382 1 regularizer """no""" +382 1 optimizer """adam""" +382 1 training_loop """owa""" +382 1 negative_sampler """basic""" +382 1 evaluator """rankbased""" +382 2 dataset """kinships""" +382 2 model """kg2e""" +382 2 loss """softplus""" +382 2 regularizer """no""" +382 2 optimizer """adam""" +382 2 training_loop """owa""" +382 2 negative_sampler """basic""" +382 2 evaluator """rankbased""" +382 3 dataset """kinships""" +382 3 model """kg2e""" +382 3 loss """softplus""" +382 3 regularizer """no""" +382 3 optimizer """adam""" +382 3 training_loop """owa""" +382 3 negative_sampler """basic""" +382 3 evaluator """rankbased""" +382 4 dataset """kinships""" +382 4 model """kg2e""" +382 4 loss """softplus""" +382 4 regularizer """no""" +382 4 optimizer """adam""" +382 4 training_loop """owa""" +382 4 negative_sampler """basic""" +382 4 evaluator """rankbased""" +382 5 dataset """kinships""" +382 5 model """kg2e""" +382 5 loss """softplus""" +382 5 regularizer """no""" +382 5 optimizer """adam""" +382 5 training_loop """owa""" +382 5 negative_sampler """basic""" +382 5 evaluator """rankbased""" +382 6 dataset """kinships""" +382 6 model """kg2e""" +382 6 loss """softplus""" +382 6 regularizer """no""" +382 6 optimizer """adam""" +382 6 training_loop """owa""" +382 6 negative_sampler """basic""" +382 6 evaluator """rankbased""" +382 7 dataset """kinships""" +382 7 model """kg2e""" +382 7 loss """softplus""" +382 7 regularizer """no""" +382 7 optimizer """adam""" +382 7 training_loop """owa""" +382 7 negative_sampler """basic""" +382 7 evaluator """rankbased""" +382 8 dataset """kinships""" +382 8 model """kg2e""" +382 8 loss """softplus""" +382 8 regularizer """no""" +382 8 optimizer """adam""" +382 8 training_loop """owa""" +382 8 negative_sampler """basic""" +382 8 evaluator """rankbased""" +382 9 dataset """kinships""" +382 9 model """kg2e""" +382 9 loss """softplus""" +382 9 regularizer """no""" +382 9 optimizer """adam""" +382 9 training_loop """owa""" +382 9 negative_sampler """basic""" +382 9 evaluator """rankbased""" +382 10 dataset """kinships""" +382 10 model """kg2e""" +382 10 loss """softplus""" +382 10 regularizer """no""" +382 10 optimizer """adam""" +382 10 training_loop """owa""" +382 10 negative_sampler """basic""" +382 10 evaluator """rankbased""" +382 11 dataset """kinships""" +382 11 model """kg2e""" +382 11 loss """softplus""" +382 11 regularizer """no""" +382 11 optimizer """adam""" +382 11 training_loop """owa""" +382 11 negative_sampler """basic""" +382 11 evaluator """rankbased""" +382 12 dataset """kinships""" +382 12 model """kg2e""" +382 12 loss """softplus""" +382 12 regularizer """no""" +382 12 optimizer """adam""" +382 12 training_loop """owa""" +382 12 negative_sampler """basic""" +382 12 evaluator """rankbased""" +382 13 dataset """kinships""" +382 13 model """kg2e""" +382 13 loss """softplus""" +382 13 regularizer """no""" +382 13 optimizer """adam""" +382 13 training_loop """owa""" +382 13 negative_sampler """basic""" +382 13 evaluator """rankbased""" +382 14 dataset """kinships""" +382 14 model """kg2e""" +382 14 loss """softplus""" +382 14 regularizer """no""" +382 14 optimizer """adam""" +382 14 training_loop """owa""" +382 14 negative_sampler """basic""" +382 14 evaluator """rankbased""" +382 15 dataset """kinships""" +382 15 model """kg2e""" +382 15 loss """softplus""" +382 15 regularizer """no""" +382 15 optimizer """adam""" +382 15 training_loop """owa""" +382 15 negative_sampler """basic""" +382 15 evaluator """rankbased""" +382 16 dataset """kinships""" +382 16 model """kg2e""" +382 16 loss """softplus""" +382 16 regularizer """no""" +382 16 optimizer """adam""" +382 16 training_loop """owa""" +382 16 negative_sampler """basic""" +382 16 evaluator """rankbased""" +382 17 dataset """kinships""" +382 17 model """kg2e""" +382 17 loss """softplus""" +382 17 regularizer """no""" +382 17 optimizer """adam""" +382 17 training_loop """owa""" +382 17 negative_sampler """basic""" +382 17 evaluator """rankbased""" +382 18 dataset """kinships""" +382 18 model """kg2e""" +382 18 loss """softplus""" +382 18 regularizer """no""" +382 18 optimizer """adam""" +382 18 training_loop """owa""" +382 18 negative_sampler """basic""" +382 18 evaluator """rankbased""" +382 19 dataset """kinships""" +382 19 model """kg2e""" +382 19 loss """softplus""" +382 19 regularizer """no""" +382 19 optimizer """adam""" +382 19 training_loop """owa""" +382 19 negative_sampler """basic""" +382 19 evaluator """rankbased""" +382 20 dataset """kinships""" +382 20 model """kg2e""" +382 20 loss """softplus""" +382 20 regularizer """no""" +382 20 optimizer """adam""" +382 20 training_loop """owa""" +382 20 negative_sampler """basic""" +382 20 evaluator """rankbased""" +382 21 dataset """kinships""" +382 21 model """kg2e""" +382 21 loss """softplus""" +382 21 regularizer """no""" +382 21 optimizer """adam""" +382 21 training_loop """owa""" +382 21 negative_sampler """basic""" +382 21 evaluator """rankbased""" +382 22 dataset """kinships""" +382 22 model """kg2e""" +382 22 loss """softplus""" +382 22 regularizer """no""" +382 22 optimizer """adam""" +382 22 training_loop """owa""" +382 22 negative_sampler """basic""" +382 22 evaluator """rankbased""" +382 23 dataset """kinships""" +382 23 model """kg2e""" +382 23 loss """softplus""" +382 23 regularizer """no""" +382 23 optimizer """adam""" +382 23 training_loop """owa""" +382 23 negative_sampler """basic""" +382 23 evaluator """rankbased""" +382 24 dataset """kinships""" +382 24 model """kg2e""" +382 24 loss """softplus""" +382 24 regularizer """no""" +382 24 optimizer """adam""" +382 24 training_loop """owa""" +382 24 negative_sampler """basic""" +382 24 evaluator """rankbased""" +382 25 dataset """kinships""" +382 25 model """kg2e""" +382 25 loss """softplus""" +382 25 regularizer """no""" +382 25 optimizer """adam""" +382 25 training_loop """owa""" +382 25 negative_sampler """basic""" +382 25 evaluator """rankbased""" +382 26 dataset """kinships""" +382 26 model """kg2e""" +382 26 loss """softplus""" +382 26 regularizer """no""" +382 26 optimizer """adam""" +382 26 training_loop """owa""" +382 26 negative_sampler """basic""" +382 26 evaluator """rankbased""" +382 27 dataset """kinships""" +382 27 model """kg2e""" +382 27 loss """softplus""" +382 27 regularizer """no""" +382 27 optimizer """adam""" +382 27 training_loop """owa""" +382 27 negative_sampler """basic""" +382 27 evaluator """rankbased""" +382 28 dataset """kinships""" +382 28 model """kg2e""" +382 28 loss """softplus""" +382 28 regularizer """no""" +382 28 optimizer """adam""" +382 28 training_loop """owa""" +382 28 negative_sampler """basic""" +382 28 evaluator """rankbased""" +382 29 dataset """kinships""" +382 29 model """kg2e""" +382 29 loss """softplus""" +382 29 regularizer """no""" +382 29 optimizer """adam""" +382 29 training_loop """owa""" +382 29 negative_sampler """basic""" +382 29 evaluator """rankbased""" +382 30 dataset """kinships""" +382 30 model """kg2e""" +382 30 loss """softplus""" +382 30 regularizer """no""" +382 30 optimizer """adam""" +382 30 training_loop """owa""" +382 30 negative_sampler """basic""" +382 30 evaluator """rankbased""" +382 31 dataset """kinships""" +382 31 model """kg2e""" +382 31 loss """softplus""" +382 31 regularizer """no""" +382 31 optimizer """adam""" +382 31 training_loop """owa""" +382 31 negative_sampler """basic""" +382 31 evaluator """rankbased""" +382 32 dataset """kinships""" +382 32 model """kg2e""" +382 32 loss """softplus""" +382 32 regularizer """no""" +382 32 optimizer """adam""" +382 32 training_loop """owa""" +382 32 negative_sampler """basic""" +382 32 evaluator """rankbased""" +382 33 dataset """kinships""" +382 33 model """kg2e""" +382 33 loss """softplus""" +382 33 regularizer """no""" +382 33 optimizer """adam""" +382 33 training_loop """owa""" +382 33 negative_sampler """basic""" +382 33 evaluator """rankbased""" +382 34 dataset """kinships""" +382 34 model """kg2e""" +382 34 loss """softplus""" +382 34 regularizer """no""" +382 34 optimizer """adam""" +382 34 training_loop """owa""" +382 34 negative_sampler """basic""" +382 34 evaluator """rankbased""" +382 35 dataset """kinships""" +382 35 model """kg2e""" +382 35 loss """softplus""" +382 35 regularizer """no""" +382 35 optimizer """adam""" +382 35 training_loop """owa""" +382 35 negative_sampler """basic""" +382 35 evaluator """rankbased""" +382 36 dataset """kinships""" +382 36 model """kg2e""" +382 36 loss """softplus""" +382 36 regularizer """no""" +382 36 optimizer """adam""" +382 36 training_loop """owa""" +382 36 negative_sampler """basic""" +382 36 evaluator """rankbased""" +382 37 dataset """kinships""" +382 37 model """kg2e""" +382 37 loss """softplus""" +382 37 regularizer """no""" +382 37 optimizer """adam""" +382 37 training_loop """owa""" +382 37 negative_sampler """basic""" +382 37 evaluator """rankbased""" +382 38 dataset """kinships""" +382 38 model """kg2e""" +382 38 loss """softplus""" +382 38 regularizer """no""" +382 38 optimizer """adam""" +382 38 training_loop """owa""" +382 38 negative_sampler """basic""" +382 38 evaluator """rankbased""" +382 39 dataset """kinships""" +382 39 model """kg2e""" +382 39 loss """softplus""" +382 39 regularizer """no""" +382 39 optimizer """adam""" +382 39 training_loop """owa""" +382 39 negative_sampler """basic""" +382 39 evaluator """rankbased""" +382 40 dataset """kinships""" +382 40 model """kg2e""" +382 40 loss """softplus""" +382 40 regularizer """no""" +382 40 optimizer """adam""" +382 40 training_loop """owa""" +382 40 negative_sampler """basic""" +382 40 evaluator """rankbased""" +382 41 dataset """kinships""" +382 41 model """kg2e""" +382 41 loss """softplus""" +382 41 regularizer """no""" +382 41 optimizer """adam""" +382 41 training_loop """owa""" +382 41 negative_sampler """basic""" +382 41 evaluator """rankbased""" +382 42 dataset """kinships""" +382 42 model """kg2e""" +382 42 loss """softplus""" +382 42 regularizer """no""" +382 42 optimizer """adam""" +382 42 training_loop """owa""" +382 42 negative_sampler """basic""" +382 42 evaluator """rankbased""" +382 43 dataset """kinships""" +382 43 model """kg2e""" +382 43 loss """softplus""" +382 43 regularizer """no""" +382 43 optimizer """adam""" +382 43 training_loop """owa""" +382 43 negative_sampler """basic""" +382 43 evaluator """rankbased""" +382 44 dataset """kinships""" +382 44 model """kg2e""" +382 44 loss """softplus""" +382 44 regularizer """no""" +382 44 optimizer """adam""" +382 44 training_loop """owa""" +382 44 negative_sampler """basic""" +382 44 evaluator """rankbased""" +382 45 dataset """kinships""" +382 45 model """kg2e""" +382 45 loss """softplus""" +382 45 regularizer """no""" +382 45 optimizer """adam""" +382 45 training_loop """owa""" +382 45 negative_sampler """basic""" +382 45 evaluator """rankbased""" +382 46 dataset """kinships""" +382 46 model """kg2e""" +382 46 loss """softplus""" +382 46 regularizer """no""" +382 46 optimizer """adam""" +382 46 training_loop """owa""" +382 46 negative_sampler """basic""" +382 46 evaluator """rankbased""" +382 47 dataset """kinships""" +382 47 model """kg2e""" +382 47 loss """softplus""" +382 47 regularizer """no""" +382 47 optimizer """adam""" +382 47 training_loop """owa""" +382 47 negative_sampler """basic""" +382 47 evaluator """rankbased""" +382 48 dataset """kinships""" +382 48 model """kg2e""" +382 48 loss """softplus""" +382 48 regularizer """no""" +382 48 optimizer """adam""" +382 48 training_loop """owa""" +382 48 negative_sampler """basic""" +382 48 evaluator """rankbased""" +382 49 dataset """kinships""" +382 49 model """kg2e""" +382 49 loss """softplus""" +382 49 regularizer """no""" +382 49 optimizer """adam""" +382 49 training_loop """owa""" +382 49 negative_sampler """basic""" +382 49 evaluator """rankbased""" +382 50 dataset """kinships""" +382 50 model """kg2e""" +382 50 loss """softplus""" +382 50 regularizer """no""" +382 50 optimizer """adam""" +382 50 training_loop """owa""" +382 50 negative_sampler """basic""" +382 50 evaluator """rankbased""" +382 51 dataset """kinships""" +382 51 model """kg2e""" +382 51 loss """softplus""" +382 51 regularizer """no""" +382 51 optimizer """adam""" +382 51 training_loop """owa""" +382 51 negative_sampler """basic""" +382 51 evaluator """rankbased""" +382 52 dataset """kinships""" +382 52 model """kg2e""" +382 52 loss """softplus""" +382 52 regularizer """no""" +382 52 optimizer """adam""" +382 52 training_loop """owa""" +382 52 negative_sampler """basic""" +382 52 evaluator """rankbased""" +382 53 dataset """kinships""" +382 53 model """kg2e""" +382 53 loss """softplus""" +382 53 regularizer """no""" +382 53 optimizer """adam""" +382 53 training_loop """owa""" +382 53 negative_sampler """basic""" +382 53 evaluator """rankbased""" +382 54 dataset """kinships""" +382 54 model """kg2e""" +382 54 loss """softplus""" +382 54 regularizer """no""" +382 54 optimizer """adam""" +382 54 training_loop """owa""" +382 54 negative_sampler """basic""" +382 54 evaluator """rankbased""" +382 55 dataset """kinships""" +382 55 model """kg2e""" +382 55 loss """softplus""" +382 55 regularizer """no""" +382 55 optimizer """adam""" +382 55 training_loop """owa""" +382 55 negative_sampler """basic""" +382 55 evaluator """rankbased""" +382 56 dataset """kinships""" +382 56 model """kg2e""" +382 56 loss """softplus""" +382 56 regularizer """no""" +382 56 optimizer """adam""" +382 56 training_loop """owa""" +382 56 negative_sampler """basic""" +382 56 evaluator """rankbased""" +382 57 dataset """kinships""" +382 57 model """kg2e""" +382 57 loss """softplus""" +382 57 regularizer """no""" +382 57 optimizer """adam""" +382 57 training_loop """owa""" +382 57 negative_sampler """basic""" +382 57 evaluator """rankbased""" +382 58 dataset """kinships""" +382 58 model """kg2e""" +382 58 loss """softplus""" +382 58 regularizer """no""" +382 58 optimizer """adam""" +382 58 training_loop """owa""" +382 58 negative_sampler """basic""" +382 58 evaluator """rankbased""" +382 59 dataset """kinships""" +382 59 model """kg2e""" +382 59 loss """softplus""" +382 59 regularizer """no""" +382 59 optimizer """adam""" +382 59 training_loop """owa""" +382 59 negative_sampler """basic""" +382 59 evaluator """rankbased""" +382 60 dataset """kinships""" +382 60 model """kg2e""" +382 60 loss """softplus""" +382 60 regularizer """no""" +382 60 optimizer """adam""" +382 60 training_loop """owa""" +382 60 negative_sampler """basic""" +382 60 evaluator """rankbased""" +382 61 dataset """kinships""" +382 61 model """kg2e""" +382 61 loss """softplus""" +382 61 regularizer """no""" +382 61 optimizer """adam""" +382 61 training_loop """owa""" +382 61 negative_sampler """basic""" +382 61 evaluator """rankbased""" +382 62 dataset """kinships""" +382 62 model """kg2e""" +382 62 loss """softplus""" +382 62 regularizer """no""" +382 62 optimizer """adam""" +382 62 training_loop """owa""" +382 62 negative_sampler """basic""" +382 62 evaluator """rankbased""" +382 63 dataset """kinships""" +382 63 model """kg2e""" +382 63 loss """softplus""" +382 63 regularizer """no""" +382 63 optimizer """adam""" +382 63 training_loop """owa""" +382 63 negative_sampler """basic""" +382 63 evaluator """rankbased""" +382 64 dataset """kinships""" +382 64 model """kg2e""" +382 64 loss """softplus""" +382 64 regularizer """no""" +382 64 optimizer """adam""" +382 64 training_loop """owa""" +382 64 negative_sampler """basic""" +382 64 evaluator """rankbased""" +382 65 dataset """kinships""" +382 65 model """kg2e""" +382 65 loss """softplus""" +382 65 regularizer """no""" +382 65 optimizer """adam""" +382 65 training_loop """owa""" +382 65 negative_sampler """basic""" +382 65 evaluator """rankbased""" +382 66 dataset """kinships""" +382 66 model """kg2e""" +382 66 loss """softplus""" +382 66 regularizer """no""" +382 66 optimizer """adam""" +382 66 training_loop """owa""" +382 66 negative_sampler """basic""" +382 66 evaluator """rankbased""" +382 67 dataset """kinships""" +382 67 model """kg2e""" +382 67 loss """softplus""" +382 67 regularizer """no""" +382 67 optimizer """adam""" +382 67 training_loop """owa""" +382 67 negative_sampler """basic""" +382 67 evaluator """rankbased""" +382 68 dataset """kinships""" +382 68 model """kg2e""" +382 68 loss """softplus""" +382 68 regularizer """no""" +382 68 optimizer """adam""" +382 68 training_loop """owa""" +382 68 negative_sampler """basic""" +382 68 evaluator """rankbased""" +382 69 dataset """kinships""" +382 69 model """kg2e""" +382 69 loss """softplus""" +382 69 regularizer """no""" +382 69 optimizer """adam""" +382 69 training_loop """owa""" +382 69 negative_sampler """basic""" +382 69 evaluator """rankbased""" +382 70 dataset """kinships""" +382 70 model """kg2e""" +382 70 loss """softplus""" +382 70 regularizer """no""" +382 70 optimizer """adam""" +382 70 training_loop """owa""" +382 70 negative_sampler """basic""" +382 70 evaluator """rankbased""" +382 71 dataset """kinships""" +382 71 model """kg2e""" +382 71 loss """softplus""" +382 71 regularizer """no""" +382 71 optimizer """adam""" +382 71 training_loop """owa""" +382 71 negative_sampler """basic""" +382 71 evaluator """rankbased""" +382 72 dataset """kinships""" +382 72 model """kg2e""" +382 72 loss """softplus""" +382 72 regularizer """no""" +382 72 optimizer """adam""" +382 72 training_loop """owa""" +382 72 negative_sampler """basic""" +382 72 evaluator """rankbased""" +382 73 dataset """kinships""" +382 73 model """kg2e""" +382 73 loss """softplus""" +382 73 regularizer """no""" +382 73 optimizer """adam""" +382 73 training_loop """owa""" +382 73 negative_sampler """basic""" +382 73 evaluator """rankbased""" +382 74 dataset """kinships""" +382 74 model """kg2e""" +382 74 loss """softplus""" +382 74 regularizer """no""" +382 74 optimizer """adam""" +382 74 training_loop """owa""" +382 74 negative_sampler """basic""" +382 74 evaluator """rankbased""" +382 75 dataset """kinships""" +382 75 model """kg2e""" +382 75 loss """softplus""" +382 75 regularizer """no""" +382 75 optimizer """adam""" +382 75 training_loop """owa""" +382 75 negative_sampler """basic""" +382 75 evaluator """rankbased""" +382 76 dataset """kinships""" +382 76 model """kg2e""" +382 76 loss """softplus""" +382 76 regularizer """no""" +382 76 optimizer """adam""" +382 76 training_loop """owa""" +382 76 negative_sampler """basic""" +382 76 evaluator """rankbased""" +382 77 dataset """kinships""" +382 77 model """kg2e""" +382 77 loss """softplus""" +382 77 regularizer """no""" +382 77 optimizer """adam""" +382 77 training_loop """owa""" +382 77 negative_sampler """basic""" +382 77 evaluator """rankbased""" +382 78 dataset """kinships""" +382 78 model """kg2e""" +382 78 loss """softplus""" +382 78 regularizer """no""" +382 78 optimizer """adam""" +382 78 training_loop """owa""" +382 78 negative_sampler """basic""" +382 78 evaluator """rankbased""" +382 79 dataset """kinships""" +382 79 model """kg2e""" +382 79 loss """softplus""" +382 79 regularizer """no""" +382 79 optimizer """adam""" +382 79 training_loop """owa""" +382 79 negative_sampler """basic""" +382 79 evaluator """rankbased""" +382 80 dataset """kinships""" +382 80 model """kg2e""" +382 80 loss """softplus""" +382 80 regularizer """no""" +382 80 optimizer """adam""" +382 80 training_loop """owa""" +382 80 negative_sampler """basic""" +382 80 evaluator """rankbased""" +382 81 dataset """kinships""" +382 81 model """kg2e""" +382 81 loss """softplus""" +382 81 regularizer """no""" +382 81 optimizer """adam""" +382 81 training_loop """owa""" +382 81 negative_sampler """basic""" +382 81 evaluator """rankbased""" +382 82 dataset """kinships""" +382 82 model """kg2e""" +382 82 loss """softplus""" +382 82 regularizer """no""" +382 82 optimizer """adam""" +382 82 training_loop """owa""" +382 82 negative_sampler """basic""" +382 82 evaluator """rankbased""" +382 83 dataset """kinships""" +382 83 model """kg2e""" +382 83 loss """softplus""" +382 83 regularizer """no""" +382 83 optimizer """adam""" +382 83 training_loop """owa""" +382 83 negative_sampler """basic""" +382 83 evaluator """rankbased""" +382 84 dataset """kinships""" +382 84 model """kg2e""" +382 84 loss """softplus""" +382 84 regularizer """no""" +382 84 optimizer """adam""" +382 84 training_loop """owa""" +382 84 negative_sampler """basic""" +382 84 evaluator """rankbased""" +382 85 dataset """kinships""" +382 85 model """kg2e""" +382 85 loss """softplus""" +382 85 regularizer """no""" +382 85 optimizer """adam""" +382 85 training_loop """owa""" +382 85 negative_sampler """basic""" +382 85 evaluator """rankbased""" +382 86 dataset """kinships""" +382 86 model """kg2e""" +382 86 loss """softplus""" +382 86 regularizer """no""" +382 86 optimizer """adam""" +382 86 training_loop """owa""" +382 86 negative_sampler """basic""" +382 86 evaluator """rankbased""" +382 87 dataset """kinships""" +382 87 model """kg2e""" +382 87 loss """softplus""" +382 87 regularizer """no""" +382 87 optimizer """adam""" +382 87 training_loop """owa""" +382 87 negative_sampler """basic""" +382 87 evaluator """rankbased""" +382 88 dataset """kinships""" +382 88 model """kg2e""" +382 88 loss """softplus""" +382 88 regularizer """no""" +382 88 optimizer """adam""" +382 88 training_loop """owa""" +382 88 negative_sampler """basic""" +382 88 evaluator """rankbased""" +382 89 dataset """kinships""" +382 89 model """kg2e""" +382 89 loss """softplus""" +382 89 regularizer """no""" +382 89 optimizer """adam""" +382 89 training_loop """owa""" +382 89 negative_sampler """basic""" +382 89 evaluator """rankbased""" +382 90 dataset """kinships""" +382 90 model """kg2e""" +382 90 loss """softplus""" +382 90 regularizer """no""" +382 90 optimizer """adam""" +382 90 training_loop """owa""" +382 90 negative_sampler """basic""" +382 90 evaluator """rankbased""" +382 91 dataset """kinships""" +382 91 model """kg2e""" +382 91 loss """softplus""" +382 91 regularizer """no""" +382 91 optimizer """adam""" +382 91 training_loop """owa""" +382 91 negative_sampler """basic""" +382 91 evaluator """rankbased""" +382 92 dataset """kinships""" +382 92 model """kg2e""" +382 92 loss """softplus""" +382 92 regularizer """no""" +382 92 optimizer """adam""" +382 92 training_loop """owa""" +382 92 negative_sampler """basic""" +382 92 evaluator """rankbased""" +382 93 dataset """kinships""" +382 93 model """kg2e""" +382 93 loss """softplus""" +382 93 regularizer """no""" +382 93 optimizer """adam""" +382 93 training_loop """owa""" +382 93 negative_sampler """basic""" +382 93 evaluator """rankbased""" +382 94 dataset """kinships""" +382 94 model """kg2e""" +382 94 loss """softplus""" +382 94 regularizer """no""" +382 94 optimizer """adam""" +382 94 training_loop """owa""" +382 94 negative_sampler """basic""" +382 94 evaluator """rankbased""" +382 95 dataset """kinships""" +382 95 model """kg2e""" +382 95 loss """softplus""" +382 95 regularizer """no""" +382 95 optimizer """adam""" +382 95 training_loop """owa""" +382 95 negative_sampler """basic""" +382 95 evaluator """rankbased""" +382 96 dataset """kinships""" +382 96 model """kg2e""" +382 96 loss """softplus""" +382 96 regularizer """no""" +382 96 optimizer """adam""" +382 96 training_loop """owa""" +382 96 negative_sampler """basic""" +382 96 evaluator """rankbased""" +382 97 dataset """kinships""" +382 97 model """kg2e""" +382 97 loss """softplus""" +382 97 regularizer """no""" +382 97 optimizer """adam""" +382 97 training_loop """owa""" +382 97 negative_sampler """basic""" +382 97 evaluator """rankbased""" +382 98 dataset """kinships""" +382 98 model """kg2e""" +382 98 loss """softplus""" +382 98 regularizer """no""" +382 98 optimizer """adam""" +382 98 training_loop """owa""" +382 98 negative_sampler """basic""" +382 98 evaluator """rankbased""" +382 99 dataset """kinships""" +382 99 model """kg2e""" +382 99 loss """softplus""" +382 99 regularizer """no""" +382 99 optimizer """adam""" +382 99 training_loop """owa""" +382 99 negative_sampler """basic""" +382 99 evaluator """rankbased""" +382 100 dataset """kinships""" +382 100 model """kg2e""" +382 100 loss """softplus""" +382 100 regularizer """no""" +382 100 optimizer """adam""" +382 100 training_loop """owa""" +382 100 negative_sampler """basic""" +382 100 evaluator """rankbased""" +383 1 model.embedding_dim 1.0 +383 1 model.c_min 0.06036517310884643 +383 1 model.c_max 8.104734125571019 +383 1 optimizer.lr 0.05811009731536848 +383 1 negative_sampler.num_negs_per_pos 16.0 +383 1 training.batch_size 2.0 +383 2 model.embedding_dim 0.0 +383 2 model.c_min 0.07052407963155259 +383 2 model.c_max 2.290193164723944 +383 2 optimizer.lr 0.08592203638635594 +383 2 negative_sampler.num_negs_per_pos 36.0 +383 2 training.batch_size 1.0 +383 3 model.embedding_dim 2.0 +383 3 model.c_min 0.028448940597045752 +383 3 model.c_max 8.617606458890299 +383 3 optimizer.lr 0.02013266333278856 +383 3 negative_sampler.num_negs_per_pos 58.0 +383 3 training.batch_size 2.0 +383 4 model.embedding_dim 2.0 +383 4 model.c_min 0.014737103447603017 +383 4 model.c_max 4.441125377437812 +383 4 optimizer.lr 0.014655426630654096 +383 4 negative_sampler.num_negs_per_pos 33.0 +383 4 training.batch_size 0.0 +383 5 model.embedding_dim 0.0 +383 5 model.c_min 0.03709769186448081 +383 5 model.c_max 2.9578069486816734 +383 5 optimizer.lr 0.004065181569992549 +383 5 negative_sampler.num_negs_per_pos 30.0 +383 5 training.batch_size 1.0 +383 6 model.embedding_dim 0.0 +383 6 model.c_min 0.06497896440284268 +383 6 model.c_max 8.513360145413277 +383 6 optimizer.lr 0.043114785010361825 +383 6 negative_sampler.num_negs_per_pos 28.0 +383 6 training.batch_size 1.0 +383 7 model.embedding_dim 1.0 +383 7 model.c_min 0.047115938221216895 +383 7 model.c_max 8.905837854902671 +383 7 optimizer.lr 0.005755820994645461 +383 7 negative_sampler.num_negs_per_pos 37.0 +383 7 training.batch_size 1.0 +383 8 model.embedding_dim 1.0 +383 8 model.c_min 0.05736903833632688 +383 8 model.c_max 3.752916879025924 +383 8 optimizer.lr 0.04801020134077871 +383 8 negative_sampler.num_negs_per_pos 99.0 +383 8 training.batch_size 1.0 +383 9 model.embedding_dim 2.0 +383 9 model.c_min 0.03703061762378647 +383 9 model.c_max 5.98177923790936 +383 9 optimizer.lr 0.0018994879649978568 +383 9 negative_sampler.num_negs_per_pos 81.0 +383 9 training.batch_size 1.0 +383 10 model.embedding_dim 2.0 +383 10 model.c_min 0.06570241664496332 +383 10 model.c_max 1.641635912372878 +383 10 optimizer.lr 0.003037266005152995 +383 10 negative_sampler.num_negs_per_pos 69.0 +383 10 training.batch_size 0.0 +383 11 model.embedding_dim 1.0 +383 11 model.c_min 0.06038940449871377 +383 11 model.c_max 5.316164769424317 +383 11 optimizer.lr 0.011037052316505244 +383 11 negative_sampler.num_negs_per_pos 0.0 +383 11 training.batch_size 2.0 +383 12 model.embedding_dim 1.0 +383 12 model.c_min 0.016861478576459882 +383 12 model.c_max 3.692176074126233 +383 12 optimizer.lr 0.004131789080665336 +383 12 negative_sampler.num_negs_per_pos 52.0 +383 12 training.batch_size 1.0 +383 13 model.embedding_dim 1.0 +383 13 model.c_min 0.010083087192424236 +383 13 model.c_max 9.725577163048689 +383 13 optimizer.lr 0.04411952072279281 +383 13 negative_sampler.num_negs_per_pos 98.0 +383 13 training.batch_size 2.0 +383 14 model.embedding_dim 0.0 +383 14 model.c_min 0.010830067837566612 +383 14 model.c_max 8.657224595896414 +383 14 optimizer.lr 0.015823039296674037 +383 14 negative_sampler.num_negs_per_pos 11.0 +383 14 training.batch_size 1.0 +383 15 model.embedding_dim 1.0 +383 15 model.c_min 0.05938500904532605 +383 15 model.c_max 9.872673269313905 +383 15 optimizer.lr 0.029892733141840124 +383 15 negative_sampler.num_negs_per_pos 85.0 +383 15 training.batch_size 0.0 +383 16 model.embedding_dim 2.0 +383 16 model.c_min 0.010699304566994661 +383 16 model.c_max 1.485258489813079 +383 16 optimizer.lr 0.043580270207093326 +383 16 negative_sampler.num_negs_per_pos 63.0 +383 16 training.batch_size 1.0 +383 17 model.embedding_dim 2.0 +383 17 model.c_min 0.07891988918827691 +383 17 model.c_max 1.9499603923480775 +383 17 optimizer.lr 0.0029274565797289087 +383 17 negative_sampler.num_negs_per_pos 65.0 +383 17 training.batch_size 1.0 +383 18 model.embedding_dim 2.0 +383 18 model.c_min 0.011048128288976217 +383 18 model.c_max 7.319431164810863 +383 18 optimizer.lr 0.018501263532075072 +383 18 negative_sampler.num_negs_per_pos 51.0 +383 18 training.batch_size 0.0 +383 19 model.embedding_dim 1.0 +383 19 model.c_min 0.04440723077048726 +383 19 model.c_max 7.343044918120112 +383 19 optimizer.lr 0.026028979217558007 +383 19 negative_sampler.num_negs_per_pos 99.0 +383 19 training.batch_size 0.0 +383 20 model.embedding_dim 0.0 +383 20 model.c_min 0.04557514015779495 +383 20 model.c_max 6.022640528810692 +383 20 optimizer.lr 0.001248515800791925 +383 20 negative_sampler.num_negs_per_pos 66.0 +383 20 training.batch_size 2.0 +383 21 model.embedding_dim 0.0 +383 21 model.c_min 0.01893188196280919 +383 21 model.c_max 2.4481048886545236 +383 21 optimizer.lr 0.0018936733298860204 +383 21 negative_sampler.num_negs_per_pos 55.0 +383 21 training.batch_size 0.0 +383 22 model.embedding_dim 0.0 +383 22 model.c_min 0.06654148366125134 +383 22 model.c_max 5.873873460784422 +383 22 optimizer.lr 0.0276572167620139 +383 22 negative_sampler.num_negs_per_pos 35.0 +383 22 training.batch_size 0.0 +383 23 model.embedding_dim 1.0 +383 23 model.c_min 0.07121530585612615 +383 23 model.c_max 6.865590822657667 +383 23 optimizer.lr 0.029465538928764492 +383 23 negative_sampler.num_negs_per_pos 31.0 +383 23 training.batch_size 2.0 +383 24 model.embedding_dim 1.0 +383 24 model.c_min 0.034324873077124306 +383 24 model.c_max 7.5763946475535615 +383 24 optimizer.lr 0.007584087139567431 +383 24 negative_sampler.num_negs_per_pos 64.0 +383 24 training.batch_size 2.0 +383 25 model.embedding_dim 0.0 +383 25 model.c_min 0.07021471046374593 +383 25 model.c_max 9.423605740494926 +383 25 optimizer.lr 0.0012679169761230372 +383 25 negative_sampler.num_negs_per_pos 81.0 +383 25 training.batch_size 2.0 +383 26 model.embedding_dim 2.0 +383 26 model.c_min 0.018876731899679332 +383 26 model.c_max 5.7711848456987385 +383 26 optimizer.lr 0.06451110650462999 +383 26 negative_sampler.num_negs_per_pos 60.0 +383 26 training.batch_size 2.0 +383 27 model.embedding_dim 0.0 +383 27 model.c_min 0.0596701546428035 +383 27 model.c_max 1.829778621734362 +383 27 optimizer.lr 0.022034583883956946 +383 27 negative_sampler.num_negs_per_pos 43.0 +383 27 training.batch_size 0.0 +383 28 model.embedding_dim 1.0 +383 28 model.c_min 0.016583412046054263 +383 28 model.c_max 6.923814113478606 +383 28 optimizer.lr 0.0010131918446874596 +383 28 negative_sampler.num_negs_per_pos 15.0 +383 28 training.batch_size 1.0 +383 29 model.embedding_dim 0.0 +383 29 model.c_min 0.022167082575123637 +383 29 model.c_max 6.728215650175249 +383 29 optimizer.lr 0.0033575154721204804 +383 29 negative_sampler.num_negs_per_pos 80.0 +383 29 training.batch_size 1.0 +383 30 model.embedding_dim 2.0 +383 30 model.c_min 0.05189388569869368 +383 30 model.c_max 1.2794605766506306 +383 30 optimizer.lr 0.005226272105124343 +383 30 negative_sampler.num_negs_per_pos 2.0 +383 30 training.batch_size 0.0 +383 31 model.embedding_dim 2.0 +383 31 model.c_min 0.08796217933331617 +383 31 model.c_max 6.579506333480067 +383 31 optimizer.lr 0.01715889951195735 +383 31 negative_sampler.num_negs_per_pos 92.0 +383 31 training.batch_size 0.0 +383 32 model.embedding_dim 1.0 +383 32 model.c_min 0.04153471671870604 +383 32 model.c_max 5.241605799153725 +383 32 optimizer.lr 0.010800782765104239 +383 32 negative_sampler.num_negs_per_pos 41.0 +383 32 training.batch_size 2.0 +383 33 model.embedding_dim 2.0 +383 33 model.c_min 0.050890250398797174 +383 33 model.c_max 6.766974350014466 +383 33 optimizer.lr 0.0024393938014577133 +383 33 negative_sampler.num_negs_per_pos 94.0 +383 33 training.batch_size 1.0 +383 34 model.embedding_dim 1.0 +383 34 model.c_min 0.0196457000113921 +383 34 model.c_max 1.7104161629555457 +383 34 optimizer.lr 0.005683794298762859 +383 34 negative_sampler.num_negs_per_pos 57.0 +383 34 training.batch_size 0.0 +383 35 model.embedding_dim 0.0 +383 35 model.c_min 0.01871282889836038 +383 35 model.c_max 4.677143970189066 +383 35 optimizer.lr 0.002509107268491748 +383 35 negative_sampler.num_negs_per_pos 23.0 +383 35 training.batch_size 2.0 +383 36 model.embedding_dim 1.0 +383 36 model.c_min 0.01833169230534794 +383 36 model.c_max 8.232352829634998 +383 36 optimizer.lr 0.05443252479174574 +383 36 negative_sampler.num_negs_per_pos 41.0 +383 36 training.batch_size 1.0 +383 37 model.embedding_dim 0.0 +383 37 model.c_min 0.09456618904881726 +383 37 model.c_max 3.5079554687779817 +383 37 optimizer.lr 0.006584215221564049 +383 37 negative_sampler.num_negs_per_pos 26.0 +383 37 training.batch_size 0.0 +383 38 model.embedding_dim 1.0 +383 38 model.c_min 0.049654834333932564 +383 38 model.c_max 2.750619052558026 +383 38 optimizer.lr 0.08225360367029866 +383 38 negative_sampler.num_negs_per_pos 31.0 +383 38 training.batch_size 0.0 +383 39 model.embedding_dim 1.0 +383 39 model.c_min 0.04854089044100866 +383 39 model.c_max 4.527016434004027 +383 39 optimizer.lr 0.0017823254912880406 +383 39 negative_sampler.num_negs_per_pos 94.0 +383 39 training.batch_size 1.0 +383 40 model.embedding_dim 2.0 +383 40 model.c_min 0.025280151500233314 +383 40 model.c_max 2.051937170198512 +383 40 optimizer.lr 0.006501333128418033 +383 40 negative_sampler.num_negs_per_pos 4.0 +383 40 training.batch_size 2.0 +383 41 model.embedding_dim 0.0 +383 41 model.c_min 0.028627024952216723 +383 41 model.c_max 6.352903428173097 +383 41 optimizer.lr 0.007799857666573135 +383 41 negative_sampler.num_negs_per_pos 98.0 +383 41 training.batch_size 0.0 +383 42 model.embedding_dim 2.0 +383 42 model.c_min 0.010249480641574427 +383 42 model.c_max 6.626834701344394 +383 42 optimizer.lr 0.0972328241280127 +383 42 negative_sampler.num_negs_per_pos 68.0 +383 42 training.batch_size 2.0 +383 43 model.embedding_dim 1.0 +383 43 model.c_min 0.034583370513588146 +383 43 model.c_max 1.4792661007861225 +383 43 optimizer.lr 0.028671835649000686 +383 43 negative_sampler.num_negs_per_pos 30.0 +383 43 training.batch_size 0.0 +383 44 model.embedding_dim 1.0 +383 44 model.c_min 0.02182175214288952 +383 44 model.c_max 1.8378150057493912 +383 44 optimizer.lr 0.0010272180320956804 +383 44 negative_sampler.num_negs_per_pos 31.0 +383 44 training.batch_size 0.0 +383 45 model.embedding_dim 2.0 +383 45 model.c_min 0.02786252234140325 +383 45 model.c_max 8.996485863548159 +383 45 optimizer.lr 0.007948574029985382 +383 45 negative_sampler.num_negs_per_pos 88.0 +383 45 training.batch_size 1.0 +383 46 model.embedding_dim 0.0 +383 46 model.c_min 0.012638415666054496 +383 46 model.c_max 3.2291856949621733 +383 46 optimizer.lr 0.05483375946955054 +383 46 negative_sampler.num_negs_per_pos 40.0 +383 46 training.batch_size 1.0 +383 47 model.embedding_dim 0.0 +383 47 model.c_min 0.019723140340570433 +383 47 model.c_max 5.537672141289973 +383 47 optimizer.lr 0.008380309393831975 +383 47 negative_sampler.num_negs_per_pos 15.0 +383 47 training.batch_size 1.0 +383 48 model.embedding_dim 0.0 +383 48 model.c_min 0.03566188461001958 +383 48 model.c_max 1.3481881789049541 +383 48 optimizer.lr 0.013559529990032775 +383 48 negative_sampler.num_negs_per_pos 62.0 +383 48 training.batch_size 1.0 +383 49 model.embedding_dim 2.0 +383 49 model.c_min 0.04528303554315525 +383 49 model.c_max 2.371677787690049 +383 49 optimizer.lr 0.0034362648123085677 +383 49 negative_sampler.num_negs_per_pos 49.0 +383 49 training.batch_size 0.0 +383 50 model.embedding_dim 0.0 +383 50 model.c_min 0.029885135718370832 +383 50 model.c_max 2.986454058791505 +383 50 optimizer.lr 0.0010166390127038128 +383 50 negative_sampler.num_negs_per_pos 28.0 +383 50 training.batch_size 0.0 +383 51 model.embedding_dim 0.0 +383 51 model.c_min 0.03264018328504361 +383 51 model.c_max 5.873485404478827 +383 51 optimizer.lr 0.040358776128357594 +383 51 negative_sampler.num_negs_per_pos 33.0 +383 51 training.batch_size 0.0 +383 52 model.embedding_dim 1.0 +383 52 model.c_min 0.05443412564385529 +383 52 model.c_max 1.0581068771871367 +383 52 optimizer.lr 0.08283012759406005 +383 52 negative_sampler.num_negs_per_pos 73.0 +383 52 training.batch_size 2.0 +383 53 model.embedding_dim 2.0 +383 53 model.c_min 0.02655775039713938 +383 53 model.c_max 3.795615758658624 +383 53 optimizer.lr 0.0030847724570625848 +383 53 negative_sampler.num_negs_per_pos 91.0 +383 53 training.batch_size 2.0 +383 54 model.embedding_dim 2.0 +383 54 model.c_min 0.011692617021283441 +383 54 model.c_max 2.7510976833945104 +383 54 optimizer.lr 0.02662053917792015 +383 54 negative_sampler.num_negs_per_pos 16.0 +383 54 training.batch_size 1.0 +383 55 model.embedding_dim 1.0 +383 55 model.c_min 0.01382762044566609 +383 55 model.c_max 2.7010586958843783 +383 55 optimizer.lr 0.022799540543912228 +383 55 negative_sampler.num_negs_per_pos 72.0 +383 55 training.batch_size 1.0 +383 56 model.embedding_dim 0.0 +383 56 model.c_min 0.0549693403589213 +383 56 model.c_max 2.76124867056088 +383 56 optimizer.lr 0.03486118913782036 +383 56 negative_sampler.num_negs_per_pos 39.0 +383 56 training.batch_size 1.0 +383 57 model.embedding_dim 0.0 +383 57 model.c_min 0.030908830255757476 +383 57 model.c_max 1.9696610351581227 +383 57 optimizer.lr 0.012963335718158312 +383 57 negative_sampler.num_negs_per_pos 93.0 +383 57 training.batch_size 2.0 +383 58 model.embedding_dim 1.0 +383 58 model.c_min 0.011597954781806879 +383 58 model.c_max 3.2330505647346417 +383 58 optimizer.lr 0.0041384789362957876 +383 58 negative_sampler.num_negs_per_pos 13.0 +383 58 training.batch_size 2.0 +383 59 model.embedding_dim 0.0 +383 59 model.c_min 0.03953750798502328 +383 59 model.c_max 9.834476769279835 +383 59 optimizer.lr 0.0012260719070808605 +383 59 negative_sampler.num_negs_per_pos 71.0 +383 59 training.batch_size 2.0 +383 60 model.embedding_dim 0.0 +383 60 model.c_min 0.024067161296758906 +383 60 model.c_max 1.1641088197695466 +383 60 optimizer.lr 0.001399011914612312 +383 60 negative_sampler.num_negs_per_pos 75.0 +383 60 training.batch_size 2.0 +383 61 model.embedding_dim 2.0 +383 61 model.c_min 0.014349581495339528 +383 61 model.c_max 9.332598182679398 +383 61 optimizer.lr 0.02075170591587495 +383 61 negative_sampler.num_negs_per_pos 71.0 +383 61 training.batch_size 2.0 +383 62 model.embedding_dim 1.0 +383 62 model.c_min 0.08143623687135099 +383 62 model.c_max 6.675428047718316 +383 62 optimizer.lr 0.035655178117588025 +383 62 negative_sampler.num_negs_per_pos 28.0 +383 62 training.batch_size 0.0 +383 63 model.embedding_dim 1.0 +383 63 model.c_min 0.022581852111507192 +383 63 model.c_max 3.8270702978670625 +383 63 optimizer.lr 0.049131180647911256 +383 63 negative_sampler.num_negs_per_pos 69.0 +383 63 training.batch_size 0.0 +383 64 model.embedding_dim 0.0 +383 64 model.c_min 0.011845229650878778 +383 64 model.c_max 3.2445125182559758 +383 64 optimizer.lr 0.0973468604252872 +383 64 negative_sampler.num_negs_per_pos 26.0 +383 64 training.batch_size 1.0 +383 65 model.embedding_dim 0.0 +383 65 model.c_min 0.030538526029551264 +383 65 model.c_max 3.8367175347685087 +383 65 optimizer.lr 0.009609058723659691 +383 65 negative_sampler.num_negs_per_pos 1.0 +383 65 training.batch_size 1.0 +383 66 model.embedding_dim 0.0 +383 66 model.c_min 0.09939356169596053 +383 66 model.c_max 4.045754726318345 +383 66 optimizer.lr 0.01879685762322809 +383 66 negative_sampler.num_negs_per_pos 39.0 +383 66 training.batch_size 1.0 +383 67 model.embedding_dim 0.0 +383 67 model.c_min 0.02670429097034996 +383 67 model.c_max 1.2235820538457134 +383 67 optimizer.lr 0.08074532807198205 +383 67 negative_sampler.num_negs_per_pos 63.0 +383 67 training.batch_size 2.0 +383 68 model.embedding_dim 1.0 +383 68 model.c_min 0.018430876856743522 +383 68 model.c_max 4.257119602901254 +383 68 optimizer.lr 0.0020535976442332005 +383 68 negative_sampler.num_negs_per_pos 6.0 +383 68 training.batch_size 1.0 +383 69 model.embedding_dim 1.0 +383 69 model.c_min 0.030111778131272443 +383 69 model.c_max 5.82713623513565 +383 69 optimizer.lr 0.0017198886728233005 +383 69 negative_sampler.num_negs_per_pos 87.0 +383 69 training.batch_size 2.0 +383 70 model.embedding_dim 2.0 +383 70 model.c_min 0.01040376637664863 +383 70 model.c_max 6.34972713099982 +383 70 optimizer.lr 0.01116295614468438 +383 70 negative_sampler.num_negs_per_pos 12.0 +383 70 training.batch_size 0.0 +383 71 model.embedding_dim 0.0 +383 71 model.c_min 0.041598822166758934 +383 71 model.c_max 5.41093742943419 +383 71 optimizer.lr 0.002243432389576645 +383 71 negative_sampler.num_negs_per_pos 7.0 +383 71 training.batch_size 1.0 +383 72 model.embedding_dim 1.0 +383 72 model.c_min 0.05699116367000077 +383 72 model.c_max 7.780690662883904 +383 72 optimizer.lr 0.011437475046832964 +383 72 negative_sampler.num_negs_per_pos 2.0 +383 72 training.batch_size 1.0 +383 73 model.embedding_dim 1.0 +383 73 model.c_min 0.06341001658734827 +383 73 model.c_max 3.7244114289002175 +383 73 optimizer.lr 0.0376011428711715 +383 73 negative_sampler.num_negs_per_pos 59.0 +383 73 training.batch_size 1.0 +383 74 model.embedding_dim 2.0 +383 74 model.c_min 0.016033497012817814 +383 74 model.c_max 1.059322187590173 +383 74 optimizer.lr 0.0025283276976152637 +383 74 negative_sampler.num_negs_per_pos 21.0 +383 74 training.batch_size 0.0 +383 75 model.embedding_dim 1.0 +383 75 model.c_min 0.03004834840319144 +383 75 model.c_max 8.774394888164695 +383 75 optimizer.lr 0.004277720153120411 +383 75 negative_sampler.num_negs_per_pos 4.0 +383 75 training.batch_size 2.0 +383 76 model.embedding_dim 1.0 +383 76 model.c_min 0.06190864733320891 +383 76 model.c_max 9.52769452724736 +383 76 optimizer.lr 0.07033717077539636 +383 76 negative_sampler.num_negs_per_pos 70.0 +383 76 training.batch_size 0.0 +383 77 model.embedding_dim 0.0 +383 77 model.c_min 0.020737812223681654 +383 77 model.c_max 6.305498952780285 +383 77 optimizer.lr 0.06682332621840749 +383 77 negative_sampler.num_negs_per_pos 30.0 +383 77 training.batch_size 1.0 +383 78 model.embedding_dim 1.0 +383 78 model.c_min 0.06403274346692525 +383 78 model.c_max 2.9923283064697346 +383 78 optimizer.lr 0.0375352660033727 +383 78 negative_sampler.num_negs_per_pos 37.0 +383 78 training.batch_size 1.0 +383 79 model.embedding_dim 1.0 +383 79 model.c_min 0.04018379618773885 +383 79 model.c_max 5.102959646290134 +383 79 optimizer.lr 0.09066065316673692 +383 79 negative_sampler.num_negs_per_pos 61.0 +383 79 training.batch_size 1.0 +383 80 model.embedding_dim 2.0 +383 80 model.c_min 0.09505702122470704 +383 80 model.c_max 8.337659675506076 +383 80 optimizer.lr 0.0015752264365493384 +383 80 negative_sampler.num_negs_per_pos 89.0 +383 80 training.batch_size 0.0 +383 81 model.embedding_dim 1.0 +383 81 model.c_min 0.07018253154829257 +383 81 model.c_max 1.0542676925925818 +383 81 optimizer.lr 0.03243991695985102 +383 81 negative_sampler.num_negs_per_pos 18.0 +383 81 training.batch_size 0.0 +383 82 model.embedding_dim 2.0 +383 82 model.c_min 0.052131629020628095 +383 82 model.c_max 6.154436708252111 +383 82 optimizer.lr 0.007005547224310499 +383 82 negative_sampler.num_negs_per_pos 33.0 +383 82 training.batch_size 1.0 +383 83 model.embedding_dim 2.0 +383 83 model.c_min 0.028673510242423834 +383 83 model.c_max 5.098026195178713 +383 83 optimizer.lr 0.0017786321434980791 +383 83 negative_sampler.num_negs_per_pos 8.0 +383 83 training.batch_size 0.0 +383 84 model.embedding_dim 0.0 +383 84 model.c_min 0.05205967484610378 +383 84 model.c_max 4.58741387039695 +383 84 optimizer.lr 0.0056702773223620585 +383 84 negative_sampler.num_negs_per_pos 93.0 +383 84 training.batch_size 1.0 +383 85 model.embedding_dim 0.0 +383 85 model.c_min 0.09403357146067984 +383 85 model.c_max 6.635431058175827 +383 85 optimizer.lr 0.0021146323891767867 +383 85 negative_sampler.num_negs_per_pos 43.0 +383 85 training.batch_size 2.0 +383 86 model.embedding_dim 0.0 +383 86 model.c_min 0.014273266807567495 +383 86 model.c_max 1.2356385992745302 +383 86 optimizer.lr 0.004997514948416434 +383 86 negative_sampler.num_negs_per_pos 53.0 +383 86 training.batch_size 1.0 +383 87 model.embedding_dim 2.0 +383 87 model.c_min 0.01263673518578161 +383 87 model.c_max 5.381178608269221 +383 87 optimizer.lr 0.02832691294142059 +383 87 negative_sampler.num_negs_per_pos 96.0 +383 87 training.batch_size 2.0 +383 88 model.embedding_dim 2.0 +383 88 model.c_min 0.011579412909427827 +383 88 model.c_max 3.2947939800259873 +383 88 optimizer.lr 0.01093103909930857 +383 88 negative_sampler.num_negs_per_pos 79.0 +383 88 training.batch_size 2.0 +383 89 model.embedding_dim 0.0 +383 89 model.c_min 0.07951296741123262 +383 89 model.c_max 4.673773616550884 +383 89 optimizer.lr 0.03754844512602135 +383 89 negative_sampler.num_negs_per_pos 96.0 +383 89 training.batch_size 2.0 +383 90 model.embedding_dim 2.0 +383 90 model.c_min 0.010694403173931541 +383 90 model.c_max 9.851689216107513 +383 90 optimizer.lr 0.005271392329303855 +383 90 negative_sampler.num_negs_per_pos 91.0 +383 90 training.batch_size 1.0 +383 91 model.embedding_dim 2.0 +383 91 model.c_min 0.037065686640947636 +383 91 model.c_max 9.44402235083156 +383 91 optimizer.lr 0.029081112621722097 +383 91 negative_sampler.num_negs_per_pos 97.0 +383 91 training.batch_size 2.0 +383 92 model.embedding_dim 0.0 +383 92 model.c_min 0.013008137143921223 +383 92 model.c_max 8.986491466501011 +383 92 optimizer.lr 0.00564721715832362 +383 92 negative_sampler.num_negs_per_pos 66.0 +383 92 training.batch_size 0.0 +383 93 model.embedding_dim 1.0 +383 93 model.c_min 0.01085301207291396 +383 93 model.c_max 1.0159286045906004 +383 93 optimizer.lr 0.005304581330457781 +383 93 negative_sampler.num_negs_per_pos 25.0 +383 93 training.batch_size 1.0 +383 94 model.embedding_dim 2.0 +383 94 model.c_min 0.021851606525839003 +383 94 model.c_max 5.104852305545867 +383 94 optimizer.lr 0.0014520328731839176 +383 94 negative_sampler.num_negs_per_pos 12.0 +383 94 training.batch_size 2.0 +383 95 model.embedding_dim 2.0 +383 95 model.c_min 0.011852768066858648 +383 95 model.c_max 9.336816404919233 +383 95 optimizer.lr 0.004568746225405924 +383 95 negative_sampler.num_negs_per_pos 12.0 +383 95 training.batch_size 1.0 +383 96 model.embedding_dim 0.0 +383 96 model.c_min 0.014546930966382075 +383 96 model.c_max 2.6516698245711128 +383 96 optimizer.lr 0.025062292102239068 +383 96 negative_sampler.num_negs_per_pos 0.0 +383 96 training.batch_size 0.0 +383 97 model.embedding_dim 2.0 +383 97 model.c_min 0.012120009235702589 +383 97 model.c_max 1.540740241665509 +383 97 optimizer.lr 0.021820026063529938 +383 97 negative_sampler.num_negs_per_pos 61.0 +383 97 training.batch_size 2.0 +383 98 model.embedding_dim 2.0 +383 98 model.c_min 0.024413421894237747 +383 98 model.c_max 4.572001488032988 +383 98 optimizer.lr 0.008577534373964931 +383 98 negative_sampler.num_negs_per_pos 57.0 +383 98 training.batch_size 1.0 +383 99 model.embedding_dim 0.0 +383 99 model.c_min 0.0442817467817048 +383 99 model.c_max 3.795369499327581 +383 99 optimizer.lr 0.001306234803233537 +383 99 negative_sampler.num_negs_per_pos 64.0 +383 99 training.batch_size 0.0 +383 100 model.embedding_dim 0.0 +383 100 model.c_min 0.07595704826656188 +383 100 model.c_max 4.268358970908421 +383 100 optimizer.lr 0.0013913774627955023 +383 100 negative_sampler.num_negs_per_pos 34.0 +383 100 training.batch_size 1.0 +383 1 dataset """kinships""" +383 1 model """kg2e""" +383 1 loss """bceaftersigmoid""" +383 1 regularizer """no""" +383 1 optimizer """adam""" +383 1 training_loop """owa""" +383 1 negative_sampler """basic""" +383 1 evaluator """rankbased""" +383 2 dataset """kinships""" +383 2 model """kg2e""" +383 2 loss """bceaftersigmoid""" +383 2 regularizer """no""" +383 2 optimizer """adam""" +383 2 training_loop """owa""" +383 2 negative_sampler """basic""" +383 2 evaluator """rankbased""" +383 3 dataset """kinships""" +383 3 model """kg2e""" +383 3 loss """bceaftersigmoid""" +383 3 regularizer """no""" +383 3 optimizer """adam""" +383 3 training_loop """owa""" +383 3 negative_sampler """basic""" +383 3 evaluator """rankbased""" +383 4 dataset """kinships""" +383 4 model """kg2e""" +383 4 loss """bceaftersigmoid""" +383 4 regularizer """no""" +383 4 optimizer """adam""" +383 4 training_loop """owa""" +383 4 negative_sampler """basic""" +383 4 evaluator """rankbased""" +383 5 dataset """kinships""" +383 5 model """kg2e""" +383 5 loss """bceaftersigmoid""" +383 5 regularizer """no""" +383 5 optimizer """adam""" +383 5 training_loop """owa""" +383 5 negative_sampler """basic""" +383 5 evaluator """rankbased""" +383 6 dataset """kinships""" +383 6 model """kg2e""" +383 6 loss """bceaftersigmoid""" +383 6 regularizer """no""" +383 6 optimizer """adam""" +383 6 training_loop """owa""" +383 6 negative_sampler """basic""" +383 6 evaluator """rankbased""" +383 7 dataset """kinships""" +383 7 model """kg2e""" +383 7 loss """bceaftersigmoid""" +383 7 regularizer """no""" +383 7 optimizer """adam""" +383 7 training_loop """owa""" +383 7 negative_sampler """basic""" +383 7 evaluator """rankbased""" +383 8 dataset """kinships""" +383 8 model """kg2e""" +383 8 loss """bceaftersigmoid""" +383 8 regularizer """no""" +383 8 optimizer """adam""" +383 8 training_loop """owa""" +383 8 negative_sampler """basic""" +383 8 evaluator """rankbased""" +383 9 dataset """kinships""" +383 9 model """kg2e""" +383 9 loss """bceaftersigmoid""" +383 9 regularizer """no""" +383 9 optimizer """adam""" +383 9 training_loop """owa""" +383 9 negative_sampler """basic""" +383 9 evaluator """rankbased""" +383 10 dataset """kinships""" +383 10 model """kg2e""" +383 10 loss """bceaftersigmoid""" +383 10 regularizer """no""" +383 10 optimizer """adam""" +383 10 training_loop """owa""" +383 10 negative_sampler """basic""" +383 10 evaluator """rankbased""" +383 11 dataset """kinships""" +383 11 model """kg2e""" +383 11 loss """bceaftersigmoid""" +383 11 regularizer """no""" +383 11 optimizer """adam""" +383 11 training_loop """owa""" +383 11 negative_sampler """basic""" +383 11 evaluator """rankbased""" +383 12 dataset """kinships""" +383 12 model """kg2e""" +383 12 loss """bceaftersigmoid""" +383 12 regularizer """no""" +383 12 optimizer """adam""" +383 12 training_loop """owa""" +383 12 negative_sampler """basic""" +383 12 evaluator """rankbased""" +383 13 dataset """kinships""" +383 13 model """kg2e""" +383 13 loss """bceaftersigmoid""" +383 13 regularizer """no""" +383 13 optimizer """adam""" +383 13 training_loop """owa""" +383 13 negative_sampler """basic""" +383 13 evaluator """rankbased""" +383 14 dataset """kinships""" +383 14 model """kg2e""" +383 14 loss """bceaftersigmoid""" +383 14 regularizer """no""" +383 14 optimizer """adam""" +383 14 training_loop """owa""" +383 14 negative_sampler """basic""" +383 14 evaluator """rankbased""" +383 15 dataset """kinships""" +383 15 model """kg2e""" +383 15 loss """bceaftersigmoid""" +383 15 regularizer """no""" +383 15 optimizer """adam""" +383 15 training_loop """owa""" +383 15 negative_sampler """basic""" +383 15 evaluator """rankbased""" +383 16 dataset """kinships""" +383 16 model """kg2e""" +383 16 loss """bceaftersigmoid""" +383 16 regularizer """no""" +383 16 optimizer """adam""" +383 16 training_loop """owa""" +383 16 negative_sampler """basic""" +383 16 evaluator """rankbased""" +383 17 dataset """kinships""" +383 17 model """kg2e""" +383 17 loss """bceaftersigmoid""" +383 17 regularizer """no""" +383 17 optimizer """adam""" +383 17 training_loop """owa""" +383 17 negative_sampler """basic""" +383 17 evaluator """rankbased""" +383 18 dataset """kinships""" +383 18 model """kg2e""" +383 18 loss """bceaftersigmoid""" +383 18 regularizer """no""" +383 18 optimizer """adam""" +383 18 training_loop """owa""" +383 18 negative_sampler """basic""" +383 18 evaluator """rankbased""" +383 19 dataset """kinships""" +383 19 model """kg2e""" +383 19 loss """bceaftersigmoid""" +383 19 regularizer """no""" +383 19 optimizer """adam""" +383 19 training_loop """owa""" +383 19 negative_sampler """basic""" +383 19 evaluator """rankbased""" +383 20 dataset """kinships""" +383 20 model """kg2e""" +383 20 loss """bceaftersigmoid""" +383 20 regularizer """no""" +383 20 optimizer """adam""" +383 20 training_loop """owa""" +383 20 negative_sampler """basic""" +383 20 evaluator """rankbased""" +383 21 dataset """kinships""" +383 21 model """kg2e""" +383 21 loss """bceaftersigmoid""" +383 21 regularizer """no""" +383 21 optimizer """adam""" +383 21 training_loop """owa""" +383 21 negative_sampler """basic""" +383 21 evaluator """rankbased""" +383 22 dataset """kinships""" +383 22 model """kg2e""" +383 22 loss """bceaftersigmoid""" +383 22 regularizer """no""" +383 22 optimizer """adam""" +383 22 training_loop """owa""" +383 22 negative_sampler """basic""" +383 22 evaluator """rankbased""" +383 23 dataset """kinships""" +383 23 model """kg2e""" +383 23 loss """bceaftersigmoid""" +383 23 regularizer """no""" +383 23 optimizer """adam""" +383 23 training_loop """owa""" +383 23 negative_sampler """basic""" +383 23 evaluator """rankbased""" +383 24 dataset """kinships""" +383 24 model """kg2e""" +383 24 loss """bceaftersigmoid""" +383 24 regularizer """no""" +383 24 optimizer """adam""" +383 24 training_loop """owa""" +383 24 negative_sampler """basic""" +383 24 evaluator """rankbased""" +383 25 dataset """kinships""" +383 25 model """kg2e""" +383 25 loss """bceaftersigmoid""" +383 25 regularizer """no""" +383 25 optimizer """adam""" +383 25 training_loop """owa""" +383 25 negative_sampler """basic""" +383 25 evaluator """rankbased""" +383 26 dataset """kinships""" +383 26 model """kg2e""" +383 26 loss """bceaftersigmoid""" +383 26 regularizer """no""" +383 26 optimizer """adam""" +383 26 training_loop """owa""" +383 26 negative_sampler """basic""" +383 26 evaluator """rankbased""" +383 27 dataset """kinships""" +383 27 model """kg2e""" +383 27 loss """bceaftersigmoid""" +383 27 regularizer """no""" +383 27 optimizer """adam""" +383 27 training_loop """owa""" +383 27 negative_sampler """basic""" +383 27 evaluator """rankbased""" +383 28 dataset """kinships""" +383 28 model """kg2e""" +383 28 loss """bceaftersigmoid""" +383 28 regularizer """no""" +383 28 optimizer """adam""" +383 28 training_loop """owa""" +383 28 negative_sampler """basic""" +383 28 evaluator """rankbased""" +383 29 dataset """kinships""" +383 29 model """kg2e""" +383 29 loss """bceaftersigmoid""" +383 29 regularizer """no""" +383 29 optimizer """adam""" +383 29 training_loop """owa""" +383 29 negative_sampler """basic""" +383 29 evaluator """rankbased""" +383 30 dataset """kinships""" +383 30 model """kg2e""" +383 30 loss """bceaftersigmoid""" +383 30 regularizer """no""" +383 30 optimizer """adam""" +383 30 training_loop """owa""" +383 30 negative_sampler """basic""" +383 30 evaluator """rankbased""" +383 31 dataset """kinships""" +383 31 model """kg2e""" +383 31 loss """bceaftersigmoid""" +383 31 regularizer """no""" +383 31 optimizer """adam""" +383 31 training_loop """owa""" +383 31 negative_sampler """basic""" +383 31 evaluator """rankbased""" +383 32 dataset """kinships""" +383 32 model """kg2e""" +383 32 loss """bceaftersigmoid""" +383 32 regularizer """no""" +383 32 optimizer """adam""" +383 32 training_loop """owa""" +383 32 negative_sampler """basic""" +383 32 evaluator """rankbased""" +383 33 dataset """kinships""" +383 33 model """kg2e""" +383 33 loss """bceaftersigmoid""" +383 33 regularizer """no""" +383 33 optimizer """adam""" +383 33 training_loop """owa""" +383 33 negative_sampler """basic""" +383 33 evaluator """rankbased""" +383 34 dataset """kinships""" +383 34 model """kg2e""" +383 34 loss """bceaftersigmoid""" +383 34 regularizer """no""" +383 34 optimizer """adam""" +383 34 training_loop """owa""" +383 34 negative_sampler """basic""" +383 34 evaluator """rankbased""" +383 35 dataset """kinships""" +383 35 model """kg2e""" +383 35 loss """bceaftersigmoid""" +383 35 regularizer """no""" +383 35 optimizer """adam""" +383 35 training_loop """owa""" +383 35 negative_sampler """basic""" +383 35 evaluator """rankbased""" +383 36 dataset """kinships""" +383 36 model """kg2e""" +383 36 loss """bceaftersigmoid""" +383 36 regularizer """no""" +383 36 optimizer """adam""" +383 36 training_loop """owa""" +383 36 negative_sampler """basic""" +383 36 evaluator """rankbased""" +383 37 dataset """kinships""" +383 37 model """kg2e""" +383 37 loss """bceaftersigmoid""" +383 37 regularizer """no""" +383 37 optimizer """adam""" +383 37 training_loop """owa""" +383 37 negative_sampler """basic""" +383 37 evaluator """rankbased""" +383 38 dataset """kinships""" +383 38 model """kg2e""" +383 38 loss """bceaftersigmoid""" +383 38 regularizer """no""" +383 38 optimizer """adam""" +383 38 training_loop """owa""" +383 38 negative_sampler """basic""" +383 38 evaluator """rankbased""" +383 39 dataset """kinships""" +383 39 model """kg2e""" +383 39 loss """bceaftersigmoid""" +383 39 regularizer """no""" +383 39 optimizer """adam""" +383 39 training_loop """owa""" +383 39 negative_sampler """basic""" +383 39 evaluator """rankbased""" +383 40 dataset """kinships""" +383 40 model """kg2e""" +383 40 loss """bceaftersigmoid""" +383 40 regularizer """no""" +383 40 optimizer """adam""" +383 40 training_loop """owa""" +383 40 negative_sampler """basic""" +383 40 evaluator """rankbased""" +383 41 dataset """kinships""" +383 41 model """kg2e""" +383 41 loss """bceaftersigmoid""" +383 41 regularizer """no""" +383 41 optimizer """adam""" +383 41 training_loop """owa""" +383 41 negative_sampler """basic""" +383 41 evaluator """rankbased""" +383 42 dataset """kinships""" +383 42 model """kg2e""" +383 42 loss """bceaftersigmoid""" +383 42 regularizer """no""" +383 42 optimizer """adam""" +383 42 training_loop """owa""" +383 42 negative_sampler """basic""" +383 42 evaluator """rankbased""" +383 43 dataset """kinships""" +383 43 model """kg2e""" +383 43 loss """bceaftersigmoid""" +383 43 regularizer """no""" +383 43 optimizer """adam""" +383 43 training_loop """owa""" +383 43 negative_sampler """basic""" +383 43 evaluator """rankbased""" +383 44 dataset """kinships""" +383 44 model """kg2e""" +383 44 loss """bceaftersigmoid""" +383 44 regularizer """no""" +383 44 optimizer """adam""" +383 44 training_loop """owa""" +383 44 negative_sampler """basic""" +383 44 evaluator """rankbased""" +383 45 dataset """kinships""" +383 45 model """kg2e""" +383 45 loss """bceaftersigmoid""" +383 45 regularizer """no""" +383 45 optimizer """adam""" +383 45 training_loop """owa""" +383 45 negative_sampler """basic""" +383 45 evaluator """rankbased""" +383 46 dataset """kinships""" +383 46 model """kg2e""" +383 46 loss """bceaftersigmoid""" +383 46 regularizer """no""" +383 46 optimizer """adam""" +383 46 training_loop """owa""" +383 46 negative_sampler """basic""" +383 46 evaluator """rankbased""" +383 47 dataset """kinships""" +383 47 model """kg2e""" +383 47 loss """bceaftersigmoid""" +383 47 regularizer """no""" +383 47 optimizer """adam""" +383 47 training_loop """owa""" +383 47 negative_sampler """basic""" +383 47 evaluator """rankbased""" +383 48 dataset """kinships""" +383 48 model """kg2e""" +383 48 loss """bceaftersigmoid""" +383 48 regularizer """no""" +383 48 optimizer """adam""" +383 48 training_loop """owa""" +383 48 negative_sampler """basic""" +383 48 evaluator """rankbased""" +383 49 dataset """kinships""" +383 49 model """kg2e""" +383 49 loss """bceaftersigmoid""" +383 49 regularizer """no""" +383 49 optimizer """adam""" +383 49 training_loop """owa""" +383 49 negative_sampler """basic""" +383 49 evaluator """rankbased""" +383 50 dataset """kinships""" +383 50 model """kg2e""" +383 50 loss """bceaftersigmoid""" +383 50 regularizer """no""" +383 50 optimizer """adam""" +383 50 training_loop """owa""" +383 50 negative_sampler """basic""" +383 50 evaluator """rankbased""" +383 51 dataset """kinships""" +383 51 model """kg2e""" +383 51 loss """bceaftersigmoid""" +383 51 regularizer """no""" +383 51 optimizer """adam""" +383 51 training_loop """owa""" +383 51 negative_sampler """basic""" +383 51 evaluator """rankbased""" +383 52 dataset """kinships""" +383 52 model """kg2e""" +383 52 loss """bceaftersigmoid""" +383 52 regularizer """no""" +383 52 optimizer """adam""" +383 52 training_loop """owa""" +383 52 negative_sampler """basic""" +383 52 evaluator """rankbased""" +383 53 dataset """kinships""" +383 53 model """kg2e""" +383 53 loss """bceaftersigmoid""" +383 53 regularizer """no""" +383 53 optimizer """adam""" +383 53 training_loop """owa""" +383 53 negative_sampler """basic""" +383 53 evaluator """rankbased""" +383 54 dataset """kinships""" +383 54 model """kg2e""" +383 54 loss """bceaftersigmoid""" +383 54 regularizer """no""" +383 54 optimizer """adam""" +383 54 training_loop """owa""" +383 54 negative_sampler """basic""" +383 54 evaluator """rankbased""" +383 55 dataset """kinships""" +383 55 model """kg2e""" +383 55 loss """bceaftersigmoid""" +383 55 regularizer """no""" +383 55 optimizer """adam""" +383 55 training_loop """owa""" +383 55 negative_sampler """basic""" +383 55 evaluator """rankbased""" +383 56 dataset """kinships""" +383 56 model """kg2e""" +383 56 loss """bceaftersigmoid""" +383 56 regularizer """no""" +383 56 optimizer """adam""" +383 56 training_loop """owa""" +383 56 negative_sampler """basic""" +383 56 evaluator """rankbased""" +383 57 dataset """kinships""" +383 57 model """kg2e""" +383 57 loss """bceaftersigmoid""" +383 57 regularizer """no""" +383 57 optimizer """adam""" +383 57 training_loop """owa""" +383 57 negative_sampler """basic""" +383 57 evaluator """rankbased""" +383 58 dataset """kinships""" +383 58 model """kg2e""" +383 58 loss """bceaftersigmoid""" +383 58 regularizer """no""" +383 58 optimizer """adam""" +383 58 training_loop """owa""" +383 58 negative_sampler """basic""" +383 58 evaluator """rankbased""" +383 59 dataset """kinships""" +383 59 model """kg2e""" +383 59 loss """bceaftersigmoid""" +383 59 regularizer """no""" +383 59 optimizer """adam""" +383 59 training_loop """owa""" +383 59 negative_sampler """basic""" +383 59 evaluator """rankbased""" +383 60 dataset """kinships""" +383 60 model """kg2e""" +383 60 loss """bceaftersigmoid""" +383 60 regularizer """no""" +383 60 optimizer """adam""" +383 60 training_loop """owa""" +383 60 negative_sampler """basic""" +383 60 evaluator """rankbased""" +383 61 dataset """kinships""" +383 61 model """kg2e""" +383 61 loss """bceaftersigmoid""" +383 61 regularizer """no""" +383 61 optimizer """adam""" +383 61 training_loop """owa""" +383 61 negative_sampler """basic""" +383 61 evaluator """rankbased""" +383 62 dataset """kinships""" +383 62 model """kg2e""" +383 62 loss """bceaftersigmoid""" +383 62 regularizer """no""" +383 62 optimizer """adam""" +383 62 training_loop """owa""" +383 62 negative_sampler """basic""" +383 62 evaluator """rankbased""" +383 63 dataset """kinships""" +383 63 model """kg2e""" +383 63 loss """bceaftersigmoid""" +383 63 regularizer """no""" +383 63 optimizer """adam""" +383 63 training_loop """owa""" +383 63 negative_sampler """basic""" +383 63 evaluator """rankbased""" +383 64 dataset """kinships""" +383 64 model """kg2e""" +383 64 loss """bceaftersigmoid""" +383 64 regularizer """no""" +383 64 optimizer """adam""" +383 64 training_loop """owa""" +383 64 negative_sampler """basic""" +383 64 evaluator """rankbased""" +383 65 dataset """kinships""" +383 65 model """kg2e""" +383 65 loss """bceaftersigmoid""" +383 65 regularizer """no""" +383 65 optimizer """adam""" +383 65 training_loop """owa""" +383 65 negative_sampler """basic""" +383 65 evaluator """rankbased""" +383 66 dataset """kinships""" +383 66 model """kg2e""" +383 66 loss """bceaftersigmoid""" +383 66 regularizer """no""" +383 66 optimizer """adam""" +383 66 training_loop """owa""" +383 66 negative_sampler """basic""" +383 66 evaluator """rankbased""" +383 67 dataset """kinships""" +383 67 model """kg2e""" +383 67 loss """bceaftersigmoid""" +383 67 regularizer """no""" +383 67 optimizer """adam""" +383 67 training_loop """owa""" +383 67 negative_sampler """basic""" +383 67 evaluator """rankbased""" +383 68 dataset """kinships""" +383 68 model """kg2e""" +383 68 loss """bceaftersigmoid""" +383 68 regularizer """no""" +383 68 optimizer """adam""" +383 68 training_loop """owa""" +383 68 negative_sampler """basic""" +383 68 evaluator """rankbased""" +383 69 dataset """kinships""" +383 69 model """kg2e""" +383 69 loss """bceaftersigmoid""" +383 69 regularizer """no""" +383 69 optimizer """adam""" +383 69 training_loop """owa""" +383 69 negative_sampler """basic""" +383 69 evaluator """rankbased""" +383 70 dataset """kinships""" +383 70 model """kg2e""" +383 70 loss """bceaftersigmoid""" +383 70 regularizer """no""" +383 70 optimizer """adam""" +383 70 training_loop """owa""" +383 70 negative_sampler """basic""" +383 70 evaluator """rankbased""" +383 71 dataset """kinships""" +383 71 model """kg2e""" +383 71 loss """bceaftersigmoid""" +383 71 regularizer """no""" +383 71 optimizer """adam""" +383 71 training_loop """owa""" +383 71 negative_sampler """basic""" +383 71 evaluator """rankbased""" +383 72 dataset """kinships""" +383 72 model """kg2e""" +383 72 loss """bceaftersigmoid""" +383 72 regularizer """no""" +383 72 optimizer """adam""" +383 72 training_loop """owa""" +383 72 negative_sampler """basic""" +383 72 evaluator """rankbased""" +383 73 dataset """kinships""" +383 73 model """kg2e""" +383 73 loss """bceaftersigmoid""" +383 73 regularizer """no""" +383 73 optimizer """adam""" +383 73 training_loop """owa""" +383 73 negative_sampler """basic""" +383 73 evaluator """rankbased""" +383 74 dataset """kinships""" +383 74 model """kg2e""" +383 74 loss """bceaftersigmoid""" +383 74 regularizer """no""" +383 74 optimizer """adam""" +383 74 training_loop """owa""" +383 74 negative_sampler """basic""" +383 74 evaluator """rankbased""" +383 75 dataset """kinships""" +383 75 model """kg2e""" +383 75 loss """bceaftersigmoid""" +383 75 regularizer """no""" +383 75 optimizer """adam""" +383 75 training_loop """owa""" +383 75 negative_sampler """basic""" +383 75 evaluator """rankbased""" +383 76 dataset """kinships""" +383 76 model """kg2e""" +383 76 loss """bceaftersigmoid""" +383 76 regularizer """no""" +383 76 optimizer """adam""" +383 76 training_loop """owa""" +383 76 negative_sampler """basic""" +383 76 evaluator """rankbased""" +383 77 dataset """kinships""" +383 77 model """kg2e""" +383 77 loss """bceaftersigmoid""" +383 77 regularizer """no""" +383 77 optimizer """adam""" +383 77 training_loop """owa""" +383 77 negative_sampler """basic""" +383 77 evaluator """rankbased""" +383 78 dataset """kinships""" +383 78 model """kg2e""" +383 78 loss """bceaftersigmoid""" +383 78 regularizer """no""" +383 78 optimizer """adam""" +383 78 training_loop """owa""" +383 78 negative_sampler """basic""" +383 78 evaluator """rankbased""" +383 79 dataset """kinships""" +383 79 model """kg2e""" +383 79 loss """bceaftersigmoid""" +383 79 regularizer """no""" +383 79 optimizer """adam""" +383 79 training_loop """owa""" +383 79 negative_sampler """basic""" +383 79 evaluator """rankbased""" +383 80 dataset """kinships""" +383 80 model """kg2e""" +383 80 loss """bceaftersigmoid""" +383 80 regularizer """no""" +383 80 optimizer """adam""" +383 80 training_loop """owa""" +383 80 negative_sampler """basic""" +383 80 evaluator """rankbased""" +383 81 dataset """kinships""" +383 81 model """kg2e""" +383 81 loss """bceaftersigmoid""" +383 81 regularizer """no""" +383 81 optimizer """adam""" +383 81 training_loop """owa""" +383 81 negative_sampler """basic""" +383 81 evaluator """rankbased""" +383 82 dataset """kinships""" +383 82 model """kg2e""" +383 82 loss """bceaftersigmoid""" +383 82 regularizer """no""" +383 82 optimizer """adam""" +383 82 training_loop """owa""" +383 82 negative_sampler """basic""" +383 82 evaluator """rankbased""" +383 83 dataset """kinships""" +383 83 model """kg2e""" +383 83 loss """bceaftersigmoid""" +383 83 regularizer """no""" +383 83 optimizer """adam""" +383 83 training_loop """owa""" +383 83 negative_sampler """basic""" +383 83 evaluator """rankbased""" +383 84 dataset """kinships""" +383 84 model """kg2e""" +383 84 loss """bceaftersigmoid""" +383 84 regularizer """no""" +383 84 optimizer """adam""" +383 84 training_loop """owa""" +383 84 negative_sampler """basic""" +383 84 evaluator """rankbased""" +383 85 dataset """kinships""" +383 85 model """kg2e""" +383 85 loss """bceaftersigmoid""" +383 85 regularizer """no""" +383 85 optimizer """adam""" +383 85 training_loop """owa""" +383 85 negative_sampler """basic""" +383 85 evaluator """rankbased""" +383 86 dataset """kinships""" +383 86 model """kg2e""" +383 86 loss """bceaftersigmoid""" +383 86 regularizer """no""" +383 86 optimizer """adam""" +383 86 training_loop """owa""" +383 86 negative_sampler """basic""" +383 86 evaluator """rankbased""" +383 87 dataset """kinships""" +383 87 model """kg2e""" +383 87 loss """bceaftersigmoid""" +383 87 regularizer """no""" +383 87 optimizer """adam""" +383 87 training_loop """owa""" +383 87 negative_sampler """basic""" +383 87 evaluator """rankbased""" +383 88 dataset """kinships""" +383 88 model """kg2e""" +383 88 loss """bceaftersigmoid""" +383 88 regularizer """no""" +383 88 optimizer """adam""" +383 88 training_loop """owa""" +383 88 negative_sampler """basic""" +383 88 evaluator """rankbased""" +383 89 dataset """kinships""" +383 89 model """kg2e""" +383 89 loss """bceaftersigmoid""" +383 89 regularizer """no""" +383 89 optimizer """adam""" +383 89 training_loop """owa""" +383 89 negative_sampler """basic""" +383 89 evaluator """rankbased""" +383 90 dataset """kinships""" +383 90 model """kg2e""" +383 90 loss """bceaftersigmoid""" +383 90 regularizer """no""" +383 90 optimizer """adam""" +383 90 training_loop """owa""" +383 90 negative_sampler """basic""" +383 90 evaluator """rankbased""" +383 91 dataset """kinships""" +383 91 model """kg2e""" +383 91 loss """bceaftersigmoid""" +383 91 regularizer """no""" +383 91 optimizer """adam""" +383 91 training_loop """owa""" +383 91 negative_sampler """basic""" +383 91 evaluator """rankbased""" +383 92 dataset """kinships""" +383 92 model """kg2e""" +383 92 loss """bceaftersigmoid""" +383 92 regularizer """no""" +383 92 optimizer """adam""" +383 92 training_loop """owa""" +383 92 negative_sampler """basic""" +383 92 evaluator """rankbased""" +383 93 dataset """kinships""" +383 93 model """kg2e""" +383 93 loss """bceaftersigmoid""" +383 93 regularizer """no""" +383 93 optimizer """adam""" +383 93 training_loop """owa""" +383 93 negative_sampler """basic""" +383 93 evaluator """rankbased""" +383 94 dataset """kinships""" +383 94 model """kg2e""" +383 94 loss """bceaftersigmoid""" +383 94 regularizer """no""" +383 94 optimizer """adam""" +383 94 training_loop """owa""" +383 94 negative_sampler """basic""" +383 94 evaluator """rankbased""" +383 95 dataset """kinships""" +383 95 model """kg2e""" +383 95 loss """bceaftersigmoid""" +383 95 regularizer """no""" +383 95 optimizer """adam""" +383 95 training_loop """owa""" +383 95 negative_sampler """basic""" +383 95 evaluator """rankbased""" +383 96 dataset """kinships""" +383 96 model """kg2e""" +383 96 loss """bceaftersigmoid""" +383 96 regularizer """no""" +383 96 optimizer """adam""" +383 96 training_loop """owa""" +383 96 negative_sampler """basic""" +383 96 evaluator """rankbased""" +383 97 dataset """kinships""" +383 97 model """kg2e""" +383 97 loss """bceaftersigmoid""" +383 97 regularizer """no""" +383 97 optimizer """adam""" +383 97 training_loop """owa""" +383 97 negative_sampler """basic""" +383 97 evaluator """rankbased""" +383 98 dataset """kinships""" +383 98 model """kg2e""" +383 98 loss """bceaftersigmoid""" +383 98 regularizer """no""" +383 98 optimizer """adam""" +383 98 training_loop """owa""" +383 98 negative_sampler """basic""" +383 98 evaluator """rankbased""" +383 99 dataset """kinships""" +383 99 model """kg2e""" +383 99 loss """bceaftersigmoid""" +383 99 regularizer """no""" +383 99 optimizer """adam""" +383 99 training_loop """owa""" +383 99 negative_sampler """basic""" +383 99 evaluator """rankbased""" +383 100 dataset """kinships""" +383 100 model """kg2e""" +383 100 loss """bceaftersigmoid""" +383 100 regularizer """no""" +383 100 optimizer """adam""" +383 100 training_loop """owa""" +383 100 negative_sampler """basic""" +383 100 evaluator """rankbased""" +384 1 model.embedding_dim 0.0 +384 1 model.c_min 0.01366348440659691 +384 1 model.c_max 2.643812607989785 +384 1 optimizer.lr 0.004418739519253422 +384 1 negative_sampler.num_negs_per_pos 21.0 +384 1 training.batch_size 0.0 +384 2 model.embedding_dim 2.0 +384 2 model.c_min 0.01521248358738597 +384 2 model.c_max 8.33836332625505 +384 2 optimizer.lr 0.0922786715574951 +384 2 negative_sampler.num_negs_per_pos 68.0 +384 2 training.batch_size 2.0 +384 3 model.embedding_dim 1.0 +384 3 model.c_min 0.08724131198891963 +384 3 model.c_max 4.305573624096512 +384 3 optimizer.lr 0.07964572280233513 +384 3 negative_sampler.num_negs_per_pos 35.0 +384 3 training.batch_size 1.0 +384 4 model.embedding_dim 1.0 +384 4 model.c_min 0.023335259622579145 +384 4 model.c_max 3.6180608277677173 +384 4 optimizer.lr 0.053829687916416485 +384 4 negative_sampler.num_negs_per_pos 68.0 +384 4 training.batch_size 0.0 +384 5 model.embedding_dim 2.0 +384 5 model.c_min 0.028751064440549964 +384 5 model.c_max 5.827476765691595 +384 5 optimizer.lr 0.01576335444399404 +384 5 negative_sampler.num_negs_per_pos 43.0 +384 5 training.batch_size 2.0 +384 6 model.embedding_dim 0.0 +384 6 model.c_min 0.011821578709073712 +384 6 model.c_max 4.187105536595542 +384 6 optimizer.lr 0.06332046618574347 +384 6 negative_sampler.num_negs_per_pos 35.0 +384 6 training.batch_size 1.0 +384 7 model.embedding_dim 2.0 +384 7 model.c_min 0.02824796255103701 +384 7 model.c_max 1.066498652555492 +384 7 optimizer.lr 0.033229509114473844 +384 7 negative_sampler.num_negs_per_pos 1.0 +384 7 training.batch_size 0.0 +384 8 model.embedding_dim 2.0 +384 8 model.c_min 0.04050316622132096 +384 8 model.c_max 3.794164349205135 +384 8 optimizer.lr 0.006070353260335947 +384 8 negative_sampler.num_negs_per_pos 69.0 +384 8 training.batch_size 2.0 +384 9 model.embedding_dim 2.0 +384 9 model.c_min 0.01131868598415122 +384 9 model.c_max 7.710113963759005 +384 9 optimizer.lr 0.009173622565317494 +384 9 negative_sampler.num_negs_per_pos 15.0 +384 9 training.batch_size 2.0 +384 10 model.embedding_dim 0.0 +384 10 model.c_min 0.03723502286588974 +384 10 model.c_max 4.031269921880277 +384 10 optimizer.lr 0.007137267931960966 +384 10 negative_sampler.num_negs_per_pos 35.0 +384 10 training.batch_size 0.0 +384 11 model.embedding_dim 2.0 +384 11 model.c_min 0.053085147482256644 +384 11 model.c_max 1.6723583002672866 +384 11 optimizer.lr 0.002507067162190185 +384 11 negative_sampler.num_negs_per_pos 95.0 +384 11 training.batch_size 2.0 +384 12 model.embedding_dim 0.0 +384 12 model.c_min 0.030880303611695716 +384 12 model.c_max 3.314642030573795 +384 12 optimizer.lr 0.006114978966254008 +384 12 negative_sampler.num_negs_per_pos 38.0 +384 12 training.batch_size 1.0 +384 13 model.embedding_dim 1.0 +384 13 model.c_min 0.03230851650569122 +384 13 model.c_max 1.2107938647103358 +384 13 optimizer.lr 0.014989758703817522 +384 13 negative_sampler.num_negs_per_pos 3.0 +384 13 training.batch_size 0.0 +384 14 model.embedding_dim 0.0 +384 14 model.c_min 0.0866847846176272 +384 14 model.c_max 9.885817124871584 +384 14 optimizer.lr 0.003245295605989296 +384 14 negative_sampler.num_negs_per_pos 20.0 +384 14 training.batch_size 1.0 +384 15 model.embedding_dim 0.0 +384 15 model.c_min 0.04158990737811761 +384 15 model.c_max 8.36631367307735 +384 15 optimizer.lr 0.0794387501774518 +384 15 negative_sampler.num_negs_per_pos 41.0 +384 15 training.batch_size 2.0 +384 16 model.embedding_dim 0.0 +384 16 model.c_min 0.08990843253019169 +384 16 model.c_max 6.203618005961818 +384 16 optimizer.lr 0.008670331537650003 +384 16 negative_sampler.num_negs_per_pos 15.0 +384 16 training.batch_size 2.0 +384 17 model.embedding_dim 0.0 +384 17 model.c_min 0.023826271368030887 +384 17 model.c_max 5.656424932756461 +384 17 optimizer.lr 0.0029627377181369106 +384 17 negative_sampler.num_negs_per_pos 99.0 +384 17 training.batch_size 0.0 +384 18 model.embedding_dim 2.0 +384 18 model.c_min 0.026414173412199007 +384 18 model.c_max 1.0909480112649068 +384 18 optimizer.lr 0.04286637046055769 +384 18 negative_sampler.num_negs_per_pos 40.0 +384 18 training.batch_size 1.0 +384 19 model.embedding_dim 1.0 +384 19 model.c_min 0.059203434493882975 +384 19 model.c_max 8.473994978473032 +384 19 optimizer.lr 0.002639110357028866 +384 19 negative_sampler.num_negs_per_pos 45.0 +384 19 training.batch_size 2.0 +384 20 model.embedding_dim 1.0 +384 20 model.c_min 0.017891205900563045 +384 20 model.c_max 9.962200372374099 +384 20 optimizer.lr 0.004133424815468791 +384 20 negative_sampler.num_negs_per_pos 36.0 +384 20 training.batch_size 0.0 +384 21 model.embedding_dim 2.0 +384 21 model.c_min 0.010919918371080339 +384 21 model.c_max 1.0448516556922183 +384 21 optimizer.lr 0.005948062347795785 +384 21 negative_sampler.num_negs_per_pos 6.0 +384 21 training.batch_size 1.0 +384 22 model.embedding_dim 0.0 +384 22 model.c_min 0.05942380886253812 +384 22 model.c_max 7.811042615227308 +384 22 optimizer.lr 0.0041751876954258214 +384 22 negative_sampler.num_negs_per_pos 10.0 +384 22 training.batch_size 0.0 +384 23 model.embedding_dim 1.0 +384 23 model.c_min 0.03155214941070173 +384 23 model.c_max 2.2975407453276158 +384 23 optimizer.lr 0.002684195763672028 +384 23 negative_sampler.num_negs_per_pos 78.0 +384 23 training.batch_size 2.0 +384 24 model.embedding_dim 2.0 +384 24 model.c_min 0.03069933779957903 +384 24 model.c_max 9.668461514564967 +384 24 optimizer.lr 0.014609543559528784 +384 24 negative_sampler.num_negs_per_pos 48.0 +384 24 training.batch_size 2.0 +384 25 model.embedding_dim 2.0 +384 25 model.c_min 0.046632906338202475 +384 25 model.c_max 6.9225652985306985 +384 25 optimizer.lr 0.01466973077350808 +384 25 negative_sampler.num_negs_per_pos 11.0 +384 25 training.batch_size 1.0 +384 26 model.embedding_dim 0.0 +384 26 model.c_min 0.07032272975690551 +384 26 model.c_max 3.1086959234029137 +384 26 optimizer.lr 0.024148624123780008 +384 26 negative_sampler.num_negs_per_pos 81.0 +384 26 training.batch_size 2.0 +384 27 model.embedding_dim 1.0 +384 27 model.c_min 0.01270344327303403 +384 27 model.c_max 7.481047475461437 +384 27 optimizer.lr 0.0023952013130254376 +384 27 negative_sampler.num_negs_per_pos 43.0 +384 27 training.batch_size 0.0 +384 28 model.embedding_dim 0.0 +384 28 model.c_min 0.05754200777393129 +384 28 model.c_max 1.2181131952963886 +384 28 optimizer.lr 0.0030192703309008463 +384 28 negative_sampler.num_negs_per_pos 59.0 +384 28 training.batch_size 0.0 +384 29 model.embedding_dim 2.0 +384 29 model.c_min 0.022096468811489516 +384 29 model.c_max 3.522234559700881 +384 29 optimizer.lr 0.012400881206775079 +384 29 negative_sampler.num_negs_per_pos 74.0 +384 29 training.batch_size 2.0 +384 30 model.embedding_dim 2.0 +384 30 model.c_min 0.030275732824157988 +384 30 model.c_max 5.7480003478079755 +384 30 optimizer.lr 0.011963991278909158 +384 30 negative_sampler.num_negs_per_pos 31.0 +384 30 training.batch_size 0.0 +384 31 model.embedding_dim 2.0 +384 31 model.c_min 0.013546280538098268 +384 31 model.c_max 1.018649857141755 +384 31 optimizer.lr 0.07254897954991361 +384 31 negative_sampler.num_negs_per_pos 66.0 +384 31 training.batch_size 2.0 +384 32 model.embedding_dim 2.0 +384 32 model.c_min 0.03611994229094502 +384 32 model.c_max 4.164686665211777 +384 32 optimizer.lr 0.0013312325266545656 +384 32 negative_sampler.num_negs_per_pos 62.0 +384 32 training.batch_size 1.0 +384 33 model.embedding_dim 1.0 +384 33 model.c_min 0.038625798706840554 +384 33 model.c_max 2.5651674356364356 +384 33 optimizer.lr 0.038763685149621765 +384 33 negative_sampler.num_negs_per_pos 66.0 +384 33 training.batch_size 1.0 +384 34 model.embedding_dim 0.0 +384 34 model.c_min 0.05995920753211827 +384 34 model.c_max 2.605749733768687 +384 34 optimizer.lr 0.014610974403953644 +384 34 negative_sampler.num_negs_per_pos 32.0 +384 34 training.batch_size 1.0 +384 35 model.embedding_dim 0.0 +384 35 model.c_min 0.054477254812630396 +384 35 model.c_max 6.989572486252681 +384 35 optimizer.lr 0.004341293311781166 +384 35 negative_sampler.num_negs_per_pos 31.0 +384 35 training.batch_size 0.0 +384 36 model.embedding_dim 2.0 +384 36 model.c_min 0.038368029677646694 +384 36 model.c_max 6.394219937804694 +384 36 optimizer.lr 0.004534064867301269 +384 36 negative_sampler.num_negs_per_pos 90.0 +384 36 training.batch_size 1.0 +384 37 model.embedding_dim 0.0 +384 37 model.c_min 0.09266991213701843 +384 37 model.c_max 3.430596777282517 +384 37 optimizer.lr 0.0032569297209675773 +384 37 negative_sampler.num_negs_per_pos 60.0 +384 37 training.batch_size 1.0 +384 38 model.embedding_dim 2.0 +384 38 model.c_min 0.03627361015699864 +384 38 model.c_max 8.218142567443302 +384 38 optimizer.lr 0.0026508970614901976 +384 38 negative_sampler.num_negs_per_pos 41.0 +384 38 training.batch_size 1.0 +384 39 model.embedding_dim 2.0 +384 39 model.c_min 0.02900410957396064 +384 39 model.c_max 9.401475295898067 +384 39 optimizer.lr 0.00575643915597967 +384 39 negative_sampler.num_negs_per_pos 94.0 +384 39 training.batch_size 2.0 +384 40 model.embedding_dim 2.0 +384 40 model.c_min 0.06136048480298887 +384 40 model.c_max 2.9963959498329933 +384 40 optimizer.lr 0.0033883264195207602 +384 40 negative_sampler.num_negs_per_pos 29.0 +384 40 training.batch_size 2.0 +384 41 model.embedding_dim 0.0 +384 41 model.c_min 0.01954555797793292 +384 41 model.c_max 3.5725341379161515 +384 41 optimizer.lr 0.008396728333665704 +384 41 negative_sampler.num_negs_per_pos 5.0 +384 41 training.batch_size 2.0 +384 42 model.embedding_dim 1.0 +384 42 model.c_min 0.010793686867827309 +384 42 model.c_max 9.805242292776123 +384 42 optimizer.lr 0.006034391053953575 +384 42 negative_sampler.num_negs_per_pos 28.0 +384 42 training.batch_size 1.0 +384 43 model.embedding_dim 2.0 +384 43 model.c_min 0.024807368870942696 +384 43 model.c_max 8.698777924472099 +384 43 optimizer.lr 0.0015879606061764882 +384 43 negative_sampler.num_negs_per_pos 51.0 +384 43 training.batch_size 0.0 +384 44 model.embedding_dim 2.0 +384 44 model.c_min 0.013889555733922554 +384 44 model.c_max 9.448375176773188 +384 44 optimizer.lr 0.014570105011635822 +384 44 negative_sampler.num_negs_per_pos 14.0 +384 44 training.batch_size 0.0 +384 45 model.embedding_dim 1.0 +384 45 model.c_min 0.028371682422496602 +384 45 model.c_max 7.627779469282567 +384 45 optimizer.lr 0.09207278810305297 +384 45 negative_sampler.num_negs_per_pos 99.0 +384 45 training.batch_size 1.0 +384 46 model.embedding_dim 1.0 +384 46 model.c_min 0.02111566764647328 +384 46 model.c_max 2.8392701273910457 +384 46 optimizer.lr 0.002248831932309448 +384 46 negative_sampler.num_negs_per_pos 36.0 +384 46 training.batch_size 2.0 +384 47 model.embedding_dim 0.0 +384 47 model.c_min 0.09773874585069102 +384 47 model.c_max 6.896800446538126 +384 47 optimizer.lr 0.002677674460346105 +384 47 negative_sampler.num_negs_per_pos 94.0 +384 47 training.batch_size 2.0 +384 48 model.embedding_dim 2.0 +384 48 model.c_min 0.021012585218581264 +384 48 model.c_max 4.430210043256745 +384 48 optimizer.lr 0.0015175012715027572 +384 48 negative_sampler.num_negs_per_pos 96.0 +384 48 training.batch_size 0.0 +384 49 model.embedding_dim 1.0 +384 49 model.c_min 0.027185497247543367 +384 49 model.c_max 7.901084024470454 +384 49 optimizer.lr 0.0024602109696021516 +384 49 negative_sampler.num_negs_per_pos 64.0 +384 49 training.batch_size 2.0 +384 50 model.embedding_dim 0.0 +384 50 model.c_min 0.013289705915258722 +384 50 model.c_max 2.0349438983148467 +384 50 optimizer.lr 0.0024258295249036037 +384 50 negative_sampler.num_negs_per_pos 91.0 +384 50 training.batch_size 1.0 +384 51 model.embedding_dim 1.0 +384 51 model.c_min 0.05563384402426699 +384 51 model.c_max 8.807934534336427 +384 51 optimizer.lr 0.04445266912361613 +384 51 negative_sampler.num_negs_per_pos 80.0 +384 51 training.batch_size 1.0 +384 52 model.embedding_dim 1.0 +384 52 model.c_min 0.049179675113069 +384 52 model.c_max 3.6651066694565984 +384 52 optimizer.lr 0.039254882198778346 +384 52 negative_sampler.num_negs_per_pos 23.0 +384 52 training.batch_size 1.0 +384 53 model.embedding_dim 2.0 +384 53 model.c_min 0.010207386156540292 +384 53 model.c_max 6.1159250817159645 +384 53 optimizer.lr 0.004905452745849801 +384 53 negative_sampler.num_negs_per_pos 37.0 +384 53 training.batch_size 1.0 +384 54 model.embedding_dim 0.0 +384 54 model.c_min 0.06555926432271496 +384 54 model.c_max 7.51238907637447 +384 54 optimizer.lr 0.023518139155593858 +384 54 negative_sampler.num_negs_per_pos 36.0 +384 54 training.batch_size 0.0 +384 55 model.embedding_dim 1.0 +384 55 model.c_min 0.021256577962719327 +384 55 model.c_max 9.464011888456405 +384 55 optimizer.lr 0.045972197844042266 +384 55 negative_sampler.num_negs_per_pos 77.0 +384 55 training.batch_size 1.0 +384 56 model.embedding_dim 2.0 +384 56 model.c_min 0.05673786927949378 +384 56 model.c_max 1.702441624175008 +384 56 optimizer.lr 0.016181271251089806 +384 56 negative_sampler.num_negs_per_pos 67.0 +384 56 training.batch_size 2.0 +384 57 model.embedding_dim 0.0 +384 57 model.c_min 0.056260678174104094 +384 57 model.c_max 5.478368643519857 +384 57 optimizer.lr 0.002118408223016586 +384 57 negative_sampler.num_negs_per_pos 17.0 +384 57 training.batch_size 2.0 +384 58 model.embedding_dim 1.0 +384 58 model.c_min 0.059099106100532456 +384 58 model.c_max 3.4191584032802536 +384 58 optimizer.lr 0.0011457674143802118 +384 58 negative_sampler.num_negs_per_pos 59.0 +384 58 training.batch_size 2.0 +384 59 model.embedding_dim 2.0 +384 59 model.c_min 0.067449135792406 +384 59 model.c_max 7.937800595204886 +384 59 optimizer.lr 0.008553707956195152 +384 59 negative_sampler.num_negs_per_pos 25.0 +384 59 training.batch_size 0.0 +384 60 model.embedding_dim 0.0 +384 60 model.c_min 0.017782631769482634 +384 60 model.c_max 1.262945689830075 +384 60 optimizer.lr 0.003969968067425946 +384 60 negative_sampler.num_negs_per_pos 66.0 +384 60 training.batch_size 1.0 +384 61 model.embedding_dim 1.0 +384 61 model.c_min 0.023989241041707143 +384 61 model.c_max 1.2089440774829923 +384 61 optimizer.lr 0.09148597344009128 +384 61 negative_sampler.num_negs_per_pos 67.0 +384 61 training.batch_size 0.0 +384 62 model.embedding_dim 1.0 +384 62 model.c_min 0.06208053788813529 +384 62 model.c_max 4.311377850299134 +384 62 optimizer.lr 0.00862235076415171 +384 62 negative_sampler.num_negs_per_pos 98.0 +384 62 training.batch_size 1.0 +384 63 model.embedding_dim 2.0 +384 63 model.c_min 0.033110639297605444 +384 63 model.c_max 3.286771859685234 +384 63 optimizer.lr 0.08010852673228433 +384 63 negative_sampler.num_negs_per_pos 29.0 +384 63 training.batch_size 2.0 +384 64 model.embedding_dim 1.0 +384 64 model.c_min 0.041657319348134846 +384 64 model.c_max 8.06385057044441 +384 64 optimizer.lr 0.0019101729538529171 +384 64 negative_sampler.num_negs_per_pos 68.0 +384 64 training.batch_size 1.0 +384 65 model.embedding_dim 0.0 +384 65 model.c_min 0.05832367417716996 +384 65 model.c_max 6.2995339717082866 +384 65 optimizer.lr 0.045956585605420285 +384 65 negative_sampler.num_negs_per_pos 55.0 +384 65 training.batch_size 0.0 +384 66 model.embedding_dim 1.0 +384 66 model.c_min 0.020536064194337383 +384 66 model.c_max 1.2471858356631873 +384 66 optimizer.lr 0.08059774087416997 +384 66 negative_sampler.num_negs_per_pos 85.0 +384 66 training.batch_size 2.0 +384 67 model.embedding_dim 1.0 +384 67 model.c_min 0.0352362386291703 +384 67 model.c_max 1.7281662815628 +384 67 optimizer.lr 0.06535327936059332 +384 67 negative_sampler.num_negs_per_pos 50.0 +384 67 training.batch_size 2.0 +384 68 model.embedding_dim 0.0 +384 68 model.c_min 0.0186167679271542 +384 68 model.c_max 2.602651374709294 +384 68 optimizer.lr 0.025252233660430973 +384 68 negative_sampler.num_negs_per_pos 94.0 +384 68 training.batch_size 0.0 +384 69 model.embedding_dim 2.0 +384 69 model.c_min 0.02836927458495097 +384 69 model.c_max 1.6626621828416632 +384 69 optimizer.lr 0.009024789338200596 +384 69 negative_sampler.num_negs_per_pos 71.0 +384 69 training.batch_size 2.0 +384 70 model.embedding_dim 1.0 +384 70 model.c_min 0.04496164358109068 +384 70 model.c_max 7.826909476402884 +384 70 optimizer.lr 0.0018291259215262904 +384 70 negative_sampler.num_negs_per_pos 83.0 +384 70 training.batch_size 1.0 +384 71 model.embedding_dim 2.0 +384 71 model.c_min 0.0554355098002004 +384 71 model.c_max 3.029952012521124 +384 71 optimizer.lr 0.04563997403815042 +384 71 negative_sampler.num_negs_per_pos 78.0 +384 71 training.batch_size 1.0 +384 72 model.embedding_dim 2.0 +384 72 model.c_min 0.09066080545537436 +384 72 model.c_max 4.991480613183926 +384 72 optimizer.lr 0.02209969867388456 +384 72 negative_sampler.num_negs_per_pos 35.0 +384 72 training.batch_size 2.0 +384 73 model.embedding_dim 1.0 +384 73 model.c_min 0.0873908566788123 +384 73 model.c_max 5.480186798141778 +384 73 optimizer.lr 0.006951509969125404 +384 73 negative_sampler.num_negs_per_pos 70.0 +384 73 training.batch_size 2.0 +384 74 model.embedding_dim 2.0 +384 74 model.c_min 0.042684717686115044 +384 74 model.c_max 8.737029801612776 +384 74 optimizer.lr 0.0020607474383939717 +384 74 negative_sampler.num_negs_per_pos 76.0 +384 74 training.batch_size 0.0 +384 75 model.embedding_dim 2.0 +384 75 model.c_min 0.013728403282259027 +384 75 model.c_max 9.687571872268705 +384 75 optimizer.lr 0.0016956721443306681 +384 75 negative_sampler.num_negs_per_pos 14.0 +384 75 training.batch_size 1.0 +384 76 model.embedding_dim 2.0 +384 76 model.c_min 0.04426878398541707 +384 76 model.c_max 5.298705333443913 +384 76 optimizer.lr 0.0025863315064972936 +384 76 negative_sampler.num_negs_per_pos 63.0 +384 76 training.batch_size 0.0 +384 77 model.embedding_dim 1.0 +384 77 model.c_min 0.03152339028821924 +384 77 model.c_max 4.3454070899524595 +384 77 optimizer.lr 0.032714694468314674 +384 77 negative_sampler.num_negs_per_pos 23.0 +384 77 training.batch_size 2.0 +384 78 model.embedding_dim 0.0 +384 78 model.c_min 0.09584574540694828 +384 78 model.c_max 7.827178229799356 +384 78 optimizer.lr 0.065710258311285 +384 78 negative_sampler.num_negs_per_pos 80.0 +384 78 training.batch_size 0.0 +384 79 model.embedding_dim 1.0 +384 79 model.c_min 0.010746649743432094 +384 79 model.c_max 7.311962035815386 +384 79 optimizer.lr 0.05985941762521337 +384 79 negative_sampler.num_negs_per_pos 71.0 +384 79 training.batch_size 0.0 +384 80 model.embedding_dim 0.0 +384 80 model.c_min 0.01777784643480949 +384 80 model.c_max 3.330940048229918 +384 80 optimizer.lr 0.006063655359305295 +384 80 negative_sampler.num_negs_per_pos 39.0 +384 80 training.batch_size 1.0 +384 81 model.embedding_dim 1.0 +384 81 model.c_min 0.047850747331202856 +384 81 model.c_max 5.1113160638165915 +384 81 optimizer.lr 0.0015494033750116667 +384 81 negative_sampler.num_negs_per_pos 59.0 +384 81 training.batch_size 2.0 +384 82 model.embedding_dim 0.0 +384 82 model.c_min 0.01988365081935896 +384 82 model.c_max 4.486953154148472 +384 82 optimizer.lr 0.02001987782589889 +384 82 negative_sampler.num_negs_per_pos 14.0 +384 82 training.batch_size 1.0 +384 83 model.embedding_dim 2.0 +384 83 model.c_min 0.01201887171537015 +384 83 model.c_max 2.2420857707507857 +384 83 optimizer.lr 0.007055644955954724 +384 83 negative_sampler.num_negs_per_pos 44.0 +384 83 training.batch_size 2.0 +384 84 model.embedding_dim 0.0 +384 84 model.c_min 0.049509187285831396 +384 84 model.c_max 9.195616422566495 +384 84 optimizer.lr 0.011902179037426693 +384 84 negative_sampler.num_negs_per_pos 96.0 +384 84 training.batch_size 2.0 +384 85 model.embedding_dim 2.0 +384 85 model.c_min 0.014449617301074913 +384 85 model.c_max 3.931641102155595 +384 85 optimizer.lr 0.00745034568234479 +384 85 negative_sampler.num_negs_per_pos 96.0 +384 85 training.batch_size 0.0 +384 86 model.embedding_dim 2.0 +384 86 model.c_min 0.015335851642667195 +384 86 model.c_max 3.8954596990390447 +384 86 optimizer.lr 0.0042304368684950265 +384 86 negative_sampler.num_negs_per_pos 9.0 +384 86 training.batch_size 1.0 +384 87 model.embedding_dim 0.0 +384 87 model.c_min 0.052081772365726406 +384 87 model.c_max 6.591063110653882 +384 87 optimizer.lr 0.011961117468373784 +384 87 negative_sampler.num_negs_per_pos 16.0 +384 87 training.batch_size 1.0 +384 88 model.embedding_dim 2.0 +384 88 model.c_min 0.010399329715553626 +384 88 model.c_max 6.2927565282269375 +384 88 optimizer.lr 0.0011994050954515821 +384 88 negative_sampler.num_negs_per_pos 64.0 +384 88 training.batch_size 0.0 +384 89 model.embedding_dim 1.0 +384 89 model.c_min 0.09286441234081996 +384 89 model.c_max 6.748953825400385 +384 89 optimizer.lr 0.07934325461380402 +384 89 negative_sampler.num_negs_per_pos 85.0 +384 89 training.batch_size 2.0 +384 90 model.embedding_dim 2.0 +384 90 model.c_min 0.030173088942467678 +384 90 model.c_max 6.285091085675232 +384 90 optimizer.lr 0.010490556563527794 +384 90 negative_sampler.num_negs_per_pos 0.0 +384 90 training.batch_size 2.0 +384 91 model.embedding_dim 2.0 +384 91 model.c_min 0.03972125497227274 +384 91 model.c_max 3.4844503407527654 +384 91 optimizer.lr 0.06896791731258249 +384 91 negative_sampler.num_negs_per_pos 15.0 +384 91 training.batch_size 0.0 +384 92 model.embedding_dim 1.0 +384 92 model.c_min 0.026209718430577893 +384 92 model.c_max 4.030780116133418 +384 92 optimizer.lr 0.002943335034623179 +384 92 negative_sampler.num_negs_per_pos 43.0 +384 92 training.batch_size 2.0 +384 93 model.embedding_dim 0.0 +384 93 model.c_min 0.03812493750941361 +384 93 model.c_max 1.6754745310664978 +384 93 optimizer.lr 0.04261443435728103 +384 93 negative_sampler.num_negs_per_pos 31.0 +384 93 training.batch_size 2.0 +384 94 model.embedding_dim 1.0 +384 94 model.c_min 0.05810284618483678 +384 94 model.c_max 2.6483118509484784 +384 94 optimizer.lr 0.008329955006607529 +384 94 negative_sampler.num_negs_per_pos 68.0 +384 94 training.batch_size 2.0 +384 95 model.embedding_dim 2.0 +384 95 model.c_min 0.030550834387972528 +384 95 model.c_max 6.531799985363248 +384 95 optimizer.lr 0.0012467234499524396 +384 95 negative_sampler.num_negs_per_pos 47.0 +384 95 training.batch_size 2.0 +384 96 model.embedding_dim 2.0 +384 96 model.c_min 0.035212386695996405 +384 96 model.c_max 8.495145402104185 +384 96 optimizer.lr 0.00675789055640449 +384 96 negative_sampler.num_negs_per_pos 63.0 +384 96 training.batch_size 1.0 +384 97 model.embedding_dim 2.0 +384 97 model.c_min 0.08109279997387811 +384 97 model.c_max 1.603715137333058 +384 97 optimizer.lr 0.0010723681496768156 +384 97 negative_sampler.num_negs_per_pos 28.0 +384 97 training.batch_size 2.0 +384 98 model.embedding_dim 2.0 +384 98 model.c_min 0.023556185637490158 +384 98 model.c_max 6.472540953633924 +384 98 optimizer.lr 0.0020488699196645104 +384 98 negative_sampler.num_negs_per_pos 31.0 +384 98 training.batch_size 0.0 +384 99 model.embedding_dim 0.0 +384 99 model.c_min 0.01976597834937886 +384 99 model.c_max 5.736876636600857 +384 99 optimizer.lr 0.00831549299677187 +384 99 negative_sampler.num_negs_per_pos 66.0 +384 99 training.batch_size 2.0 +384 100 model.embedding_dim 2.0 +384 100 model.c_min 0.054530671763389466 +384 100 model.c_max 2.718839956217294 +384 100 optimizer.lr 0.07020623603569263 +384 100 negative_sampler.num_negs_per_pos 75.0 +384 100 training.batch_size 2.0 +384 1 dataset """kinships""" +384 1 model """kg2e""" +384 1 loss """softplus""" +384 1 regularizer """no""" +384 1 optimizer """adam""" +384 1 training_loop """owa""" +384 1 negative_sampler """basic""" +384 1 evaluator """rankbased""" +384 2 dataset """kinships""" +384 2 model """kg2e""" +384 2 loss """softplus""" +384 2 regularizer """no""" +384 2 optimizer """adam""" +384 2 training_loop """owa""" +384 2 negative_sampler """basic""" +384 2 evaluator """rankbased""" +384 3 dataset """kinships""" +384 3 model """kg2e""" +384 3 loss """softplus""" +384 3 regularizer """no""" +384 3 optimizer """adam""" +384 3 training_loop """owa""" +384 3 negative_sampler """basic""" +384 3 evaluator """rankbased""" +384 4 dataset """kinships""" +384 4 model """kg2e""" +384 4 loss """softplus""" +384 4 regularizer """no""" +384 4 optimizer """adam""" +384 4 training_loop """owa""" +384 4 negative_sampler """basic""" +384 4 evaluator """rankbased""" +384 5 dataset """kinships""" +384 5 model """kg2e""" +384 5 loss """softplus""" +384 5 regularizer """no""" +384 5 optimizer """adam""" +384 5 training_loop """owa""" +384 5 negative_sampler """basic""" +384 5 evaluator """rankbased""" +384 6 dataset """kinships""" +384 6 model """kg2e""" +384 6 loss """softplus""" +384 6 regularizer """no""" +384 6 optimizer """adam""" +384 6 training_loop """owa""" +384 6 negative_sampler """basic""" +384 6 evaluator """rankbased""" +384 7 dataset """kinships""" +384 7 model """kg2e""" +384 7 loss """softplus""" +384 7 regularizer """no""" +384 7 optimizer """adam""" +384 7 training_loop """owa""" +384 7 negative_sampler """basic""" +384 7 evaluator """rankbased""" +384 8 dataset """kinships""" +384 8 model """kg2e""" +384 8 loss """softplus""" +384 8 regularizer """no""" +384 8 optimizer """adam""" +384 8 training_loop """owa""" +384 8 negative_sampler """basic""" +384 8 evaluator """rankbased""" +384 9 dataset """kinships""" +384 9 model """kg2e""" +384 9 loss """softplus""" +384 9 regularizer """no""" +384 9 optimizer """adam""" +384 9 training_loop """owa""" +384 9 negative_sampler """basic""" +384 9 evaluator """rankbased""" +384 10 dataset """kinships""" +384 10 model """kg2e""" +384 10 loss """softplus""" +384 10 regularizer """no""" +384 10 optimizer """adam""" +384 10 training_loop """owa""" +384 10 negative_sampler """basic""" +384 10 evaluator """rankbased""" +384 11 dataset """kinships""" +384 11 model """kg2e""" +384 11 loss """softplus""" +384 11 regularizer """no""" +384 11 optimizer """adam""" +384 11 training_loop """owa""" +384 11 negative_sampler """basic""" +384 11 evaluator """rankbased""" +384 12 dataset """kinships""" +384 12 model """kg2e""" +384 12 loss """softplus""" +384 12 regularizer """no""" +384 12 optimizer """adam""" +384 12 training_loop """owa""" +384 12 negative_sampler """basic""" +384 12 evaluator """rankbased""" +384 13 dataset """kinships""" +384 13 model """kg2e""" +384 13 loss """softplus""" +384 13 regularizer """no""" +384 13 optimizer """adam""" +384 13 training_loop """owa""" +384 13 negative_sampler """basic""" +384 13 evaluator """rankbased""" +384 14 dataset """kinships""" +384 14 model """kg2e""" +384 14 loss """softplus""" +384 14 regularizer """no""" +384 14 optimizer """adam""" +384 14 training_loop """owa""" +384 14 negative_sampler """basic""" +384 14 evaluator """rankbased""" +384 15 dataset """kinships""" +384 15 model """kg2e""" +384 15 loss """softplus""" +384 15 regularizer """no""" +384 15 optimizer """adam""" +384 15 training_loop """owa""" +384 15 negative_sampler """basic""" +384 15 evaluator """rankbased""" +384 16 dataset """kinships""" +384 16 model """kg2e""" +384 16 loss """softplus""" +384 16 regularizer """no""" +384 16 optimizer """adam""" +384 16 training_loop """owa""" +384 16 negative_sampler """basic""" +384 16 evaluator """rankbased""" +384 17 dataset """kinships""" +384 17 model """kg2e""" +384 17 loss """softplus""" +384 17 regularizer """no""" +384 17 optimizer """adam""" +384 17 training_loop """owa""" +384 17 negative_sampler """basic""" +384 17 evaluator """rankbased""" +384 18 dataset """kinships""" +384 18 model """kg2e""" +384 18 loss """softplus""" +384 18 regularizer """no""" +384 18 optimizer """adam""" +384 18 training_loop """owa""" +384 18 negative_sampler """basic""" +384 18 evaluator """rankbased""" +384 19 dataset """kinships""" +384 19 model """kg2e""" +384 19 loss """softplus""" +384 19 regularizer """no""" +384 19 optimizer """adam""" +384 19 training_loop """owa""" +384 19 negative_sampler """basic""" +384 19 evaluator """rankbased""" +384 20 dataset """kinships""" +384 20 model """kg2e""" +384 20 loss """softplus""" +384 20 regularizer """no""" +384 20 optimizer """adam""" +384 20 training_loop """owa""" +384 20 negative_sampler """basic""" +384 20 evaluator """rankbased""" +384 21 dataset """kinships""" +384 21 model """kg2e""" +384 21 loss """softplus""" +384 21 regularizer """no""" +384 21 optimizer """adam""" +384 21 training_loop """owa""" +384 21 negative_sampler """basic""" +384 21 evaluator """rankbased""" +384 22 dataset """kinships""" +384 22 model """kg2e""" +384 22 loss """softplus""" +384 22 regularizer """no""" +384 22 optimizer """adam""" +384 22 training_loop """owa""" +384 22 negative_sampler """basic""" +384 22 evaluator """rankbased""" +384 23 dataset """kinships""" +384 23 model """kg2e""" +384 23 loss """softplus""" +384 23 regularizer """no""" +384 23 optimizer """adam""" +384 23 training_loop """owa""" +384 23 negative_sampler """basic""" +384 23 evaluator """rankbased""" +384 24 dataset """kinships""" +384 24 model """kg2e""" +384 24 loss """softplus""" +384 24 regularizer """no""" +384 24 optimizer """adam""" +384 24 training_loop """owa""" +384 24 negative_sampler """basic""" +384 24 evaluator """rankbased""" +384 25 dataset """kinships""" +384 25 model """kg2e""" +384 25 loss """softplus""" +384 25 regularizer """no""" +384 25 optimizer """adam""" +384 25 training_loop """owa""" +384 25 negative_sampler """basic""" +384 25 evaluator """rankbased""" +384 26 dataset """kinships""" +384 26 model """kg2e""" +384 26 loss """softplus""" +384 26 regularizer """no""" +384 26 optimizer """adam""" +384 26 training_loop """owa""" +384 26 negative_sampler """basic""" +384 26 evaluator """rankbased""" +384 27 dataset """kinships""" +384 27 model """kg2e""" +384 27 loss """softplus""" +384 27 regularizer """no""" +384 27 optimizer """adam""" +384 27 training_loop """owa""" +384 27 negative_sampler """basic""" +384 27 evaluator """rankbased""" +384 28 dataset """kinships""" +384 28 model """kg2e""" +384 28 loss """softplus""" +384 28 regularizer """no""" +384 28 optimizer """adam""" +384 28 training_loop """owa""" +384 28 negative_sampler """basic""" +384 28 evaluator """rankbased""" +384 29 dataset """kinships""" +384 29 model """kg2e""" +384 29 loss """softplus""" +384 29 regularizer """no""" +384 29 optimizer """adam""" +384 29 training_loop """owa""" +384 29 negative_sampler """basic""" +384 29 evaluator """rankbased""" +384 30 dataset """kinships""" +384 30 model """kg2e""" +384 30 loss """softplus""" +384 30 regularizer """no""" +384 30 optimizer """adam""" +384 30 training_loop """owa""" +384 30 negative_sampler """basic""" +384 30 evaluator """rankbased""" +384 31 dataset """kinships""" +384 31 model """kg2e""" +384 31 loss """softplus""" +384 31 regularizer """no""" +384 31 optimizer """adam""" +384 31 training_loop """owa""" +384 31 negative_sampler """basic""" +384 31 evaluator """rankbased""" +384 32 dataset """kinships""" +384 32 model """kg2e""" +384 32 loss """softplus""" +384 32 regularizer """no""" +384 32 optimizer """adam""" +384 32 training_loop """owa""" +384 32 negative_sampler """basic""" +384 32 evaluator """rankbased""" +384 33 dataset """kinships""" +384 33 model """kg2e""" +384 33 loss """softplus""" +384 33 regularizer """no""" +384 33 optimizer """adam""" +384 33 training_loop """owa""" +384 33 negative_sampler """basic""" +384 33 evaluator """rankbased""" +384 34 dataset """kinships""" +384 34 model """kg2e""" +384 34 loss """softplus""" +384 34 regularizer """no""" +384 34 optimizer """adam""" +384 34 training_loop """owa""" +384 34 negative_sampler """basic""" +384 34 evaluator """rankbased""" +384 35 dataset """kinships""" +384 35 model """kg2e""" +384 35 loss """softplus""" +384 35 regularizer """no""" +384 35 optimizer """adam""" +384 35 training_loop """owa""" +384 35 negative_sampler """basic""" +384 35 evaluator """rankbased""" +384 36 dataset """kinships""" +384 36 model """kg2e""" +384 36 loss """softplus""" +384 36 regularizer """no""" +384 36 optimizer """adam""" +384 36 training_loop """owa""" +384 36 negative_sampler """basic""" +384 36 evaluator """rankbased""" +384 37 dataset """kinships""" +384 37 model """kg2e""" +384 37 loss """softplus""" +384 37 regularizer """no""" +384 37 optimizer """adam""" +384 37 training_loop """owa""" +384 37 negative_sampler """basic""" +384 37 evaluator """rankbased""" +384 38 dataset """kinships""" +384 38 model """kg2e""" +384 38 loss """softplus""" +384 38 regularizer """no""" +384 38 optimizer """adam""" +384 38 training_loop """owa""" +384 38 negative_sampler """basic""" +384 38 evaluator """rankbased""" +384 39 dataset """kinships""" +384 39 model """kg2e""" +384 39 loss """softplus""" +384 39 regularizer """no""" +384 39 optimizer """adam""" +384 39 training_loop """owa""" +384 39 negative_sampler """basic""" +384 39 evaluator """rankbased""" +384 40 dataset """kinships""" +384 40 model """kg2e""" +384 40 loss """softplus""" +384 40 regularizer """no""" +384 40 optimizer """adam""" +384 40 training_loop """owa""" +384 40 negative_sampler """basic""" +384 40 evaluator """rankbased""" +384 41 dataset """kinships""" +384 41 model """kg2e""" +384 41 loss """softplus""" +384 41 regularizer """no""" +384 41 optimizer """adam""" +384 41 training_loop """owa""" +384 41 negative_sampler """basic""" +384 41 evaluator """rankbased""" +384 42 dataset """kinships""" +384 42 model """kg2e""" +384 42 loss """softplus""" +384 42 regularizer """no""" +384 42 optimizer """adam""" +384 42 training_loop """owa""" +384 42 negative_sampler """basic""" +384 42 evaluator """rankbased""" +384 43 dataset """kinships""" +384 43 model """kg2e""" +384 43 loss """softplus""" +384 43 regularizer """no""" +384 43 optimizer """adam""" +384 43 training_loop """owa""" +384 43 negative_sampler """basic""" +384 43 evaluator """rankbased""" +384 44 dataset """kinships""" +384 44 model """kg2e""" +384 44 loss """softplus""" +384 44 regularizer """no""" +384 44 optimizer """adam""" +384 44 training_loop """owa""" +384 44 negative_sampler """basic""" +384 44 evaluator """rankbased""" +384 45 dataset """kinships""" +384 45 model """kg2e""" +384 45 loss """softplus""" +384 45 regularizer """no""" +384 45 optimizer """adam""" +384 45 training_loop """owa""" +384 45 negative_sampler """basic""" +384 45 evaluator """rankbased""" +384 46 dataset """kinships""" +384 46 model """kg2e""" +384 46 loss """softplus""" +384 46 regularizer """no""" +384 46 optimizer """adam""" +384 46 training_loop """owa""" +384 46 negative_sampler """basic""" +384 46 evaluator """rankbased""" +384 47 dataset """kinships""" +384 47 model """kg2e""" +384 47 loss """softplus""" +384 47 regularizer """no""" +384 47 optimizer """adam""" +384 47 training_loop """owa""" +384 47 negative_sampler """basic""" +384 47 evaluator """rankbased""" +384 48 dataset """kinships""" +384 48 model """kg2e""" +384 48 loss """softplus""" +384 48 regularizer """no""" +384 48 optimizer """adam""" +384 48 training_loop """owa""" +384 48 negative_sampler """basic""" +384 48 evaluator """rankbased""" +384 49 dataset """kinships""" +384 49 model """kg2e""" +384 49 loss """softplus""" +384 49 regularizer """no""" +384 49 optimizer """adam""" +384 49 training_loop """owa""" +384 49 negative_sampler """basic""" +384 49 evaluator """rankbased""" +384 50 dataset """kinships""" +384 50 model """kg2e""" +384 50 loss """softplus""" +384 50 regularizer """no""" +384 50 optimizer """adam""" +384 50 training_loop """owa""" +384 50 negative_sampler """basic""" +384 50 evaluator """rankbased""" +384 51 dataset """kinships""" +384 51 model """kg2e""" +384 51 loss """softplus""" +384 51 regularizer """no""" +384 51 optimizer """adam""" +384 51 training_loop """owa""" +384 51 negative_sampler """basic""" +384 51 evaluator """rankbased""" +384 52 dataset """kinships""" +384 52 model """kg2e""" +384 52 loss """softplus""" +384 52 regularizer """no""" +384 52 optimizer """adam""" +384 52 training_loop """owa""" +384 52 negative_sampler """basic""" +384 52 evaluator """rankbased""" +384 53 dataset """kinships""" +384 53 model """kg2e""" +384 53 loss """softplus""" +384 53 regularizer """no""" +384 53 optimizer """adam""" +384 53 training_loop """owa""" +384 53 negative_sampler """basic""" +384 53 evaluator """rankbased""" +384 54 dataset """kinships""" +384 54 model """kg2e""" +384 54 loss """softplus""" +384 54 regularizer """no""" +384 54 optimizer """adam""" +384 54 training_loop """owa""" +384 54 negative_sampler """basic""" +384 54 evaluator """rankbased""" +384 55 dataset """kinships""" +384 55 model """kg2e""" +384 55 loss """softplus""" +384 55 regularizer """no""" +384 55 optimizer """adam""" +384 55 training_loop """owa""" +384 55 negative_sampler """basic""" +384 55 evaluator """rankbased""" +384 56 dataset """kinships""" +384 56 model """kg2e""" +384 56 loss """softplus""" +384 56 regularizer """no""" +384 56 optimizer """adam""" +384 56 training_loop """owa""" +384 56 negative_sampler """basic""" +384 56 evaluator """rankbased""" +384 57 dataset """kinships""" +384 57 model """kg2e""" +384 57 loss """softplus""" +384 57 regularizer """no""" +384 57 optimizer """adam""" +384 57 training_loop """owa""" +384 57 negative_sampler """basic""" +384 57 evaluator """rankbased""" +384 58 dataset """kinships""" +384 58 model """kg2e""" +384 58 loss """softplus""" +384 58 regularizer """no""" +384 58 optimizer """adam""" +384 58 training_loop """owa""" +384 58 negative_sampler """basic""" +384 58 evaluator """rankbased""" +384 59 dataset """kinships""" +384 59 model """kg2e""" +384 59 loss """softplus""" +384 59 regularizer """no""" +384 59 optimizer """adam""" +384 59 training_loop """owa""" +384 59 negative_sampler """basic""" +384 59 evaluator """rankbased""" +384 60 dataset """kinships""" +384 60 model """kg2e""" +384 60 loss """softplus""" +384 60 regularizer """no""" +384 60 optimizer """adam""" +384 60 training_loop """owa""" +384 60 negative_sampler """basic""" +384 60 evaluator """rankbased""" +384 61 dataset """kinships""" +384 61 model """kg2e""" +384 61 loss """softplus""" +384 61 regularizer """no""" +384 61 optimizer """adam""" +384 61 training_loop """owa""" +384 61 negative_sampler """basic""" +384 61 evaluator """rankbased""" +384 62 dataset """kinships""" +384 62 model """kg2e""" +384 62 loss """softplus""" +384 62 regularizer """no""" +384 62 optimizer """adam""" +384 62 training_loop """owa""" +384 62 negative_sampler """basic""" +384 62 evaluator """rankbased""" +384 63 dataset """kinships""" +384 63 model """kg2e""" +384 63 loss """softplus""" +384 63 regularizer """no""" +384 63 optimizer """adam""" +384 63 training_loop """owa""" +384 63 negative_sampler """basic""" +384 63 evaluator """rankbased""" +384 64 dataset """kinships""" +384 64 model """kg2e""" +384 64 loss """softplus""" +384 64 regularizer """no""" +384 64 optimizer """adam""" +384 64 training_loop """owa""" +384 64 negative_sampler """basic""" +384 64 evaluator """rankbased""" +384 65 dataset """kinships""" +384 65 model """kg2e""" +384 65 loss """softplus""" +384 65 regularizer """no""" +384 65 optimizer """adam""" +384 65 training_loop """owa""" +384 65 negative_sampler """basic""" +384 65 evaluator """rankbased""" +384 66 dataset """kinships""" +384 66 model """kg2e""" +384 66 loss """softplus""" +384 66 regularizer """no""" +384 66 optimizer """adam""" +384 66 training_loop """owa""" +384 66 negative_sampler """basic""" +384 66 evaluator """rankbased""" +384 67 dataset """kinships""" +384 67 model """kg2e""" +384 67 loss """softplus""" +384 67 regularizer """no""" +384 67 optimizer """adam""" +384 67 training_loop """owa""" +384 67 negative_sampler """basic""" +384 67 evaluator """rankbased""" +384 68 dataset """kinships""" +384 68 model """kg2e""" +384 68 loss """softplus""" +384 68 regularizer """no""" +384 68 optimizer """adam""" +384 68 training_loop """owa""" +384 68 negative_sampler """basic""" +384 68 evaluator """rankbased""" +384 69 dataset """kinships""" +384 69 model """kg2e""" +384 69 loss """softplus""" +384 69 regularizer """no""" +384 69 optimizer """adam""" +384 69 training_loop """owa""" +384 69 negative_sampler """basic""" +384 69 evaluator """rankbased""" +384 70 dataset """kinships""" +384 70 model """kg2e""" +384 70 loss """softplus""" +384 70 regularizer """no""" +384 70 optimizer """adam""" +384 70 training_loop """owa""" +384 70 negative_sampler """basic""" +384 70 evaluator """rankbased""" +384 71 dataset """kinships""" +384 71 model """kg2e""" +384 71 loss """softplus""" +384 71 regularizer """no""" +384 71 optimizer """adam""" +384 71 training_loop """owa""" +384 71 negative_sampler """basic""" +384 71 evaluator """rankbased""" +384 72 dataset """kinships""" +384 72 model """kg2e""" +384 72 loss """softplus""" +384 72 regularizer """no""" +384 72 optimizer """adam""" +384 72 training_loop """owa""" +384 72 negative_sampler """basic""" +384 72 evaluator """rankbased""" +384 73 dataset """kinships""" +384 73 model """kg2e""" +384 73 loss """softplus""" +384 73 regularizer """no""" +384 73 optimizer """adam""" +384 73 training_loop """owa""" +384 73 negative_sampler """basic""" +384 73 evaluator """rankbased""" +384 74 dataset """kinships""" +384 74 model """kg2e""" +384 74 loss """softplus""" +384 74 regularizer """no""" +384 74 optimizer """adam""" +384 74 training_loop """owa""" +384 74 negative_sampler """basic""" +384 74 evaluator """rankbased""" +384 75 dataset """kinships""" +384 75 model """kg2e""" +384 75 loss """softplus""" +384 75 regularizer """no""" +384 75 optimizer """adam""" +384 75 training_loop """owa""" +384 75 negative_sampler """basic""" +384 75 evaluator """rankbased""" +384 76 dataset """kinships""" +384 76 model """kg2e""" +384 76 loss """softplus""" +384 76 regularizer """no""" +384 76 optimizer """adam""" +384 76 training_loop """owa""" +384 76 negative_sampler """basic""" +384 76 evaluator """rankbased""" +384 77 dataset """kinships""" +384 77 model """kg2e""" +384 77 loss """softplus""" +384 77 regularizer """no""" +384 77 optimizer """adam""" +384 77 training_loop """owa""" +384 77 negative_sampler """basic""" +384 77 evaluator """rankbased""" +384 78 dataset """kinships""" +384 78 model """kg2e""" +384 78 loss """softplus""" +384 78 regularizer """no""" +384 78 optimizer """adam""" +384 78 training_loop """owa""" +384 78 negative_sampler """basic""" +384 78 evaluator """rankbased""" +384 79 dataset """kinships""" +384 79 model """kg2e""" +384 79 loss """softplus""" +384 79 regularizer """no""" +384 79 optimizer """adam""" +384 79 training_loop """owa""" +384 79 negative_sampler """basic""" +384 79 evaluator """rankbased""" +384 80 dataset """kinships""" +384 80 model """kg2e""" +384 80 loss """softplus""" +384 80 regularizer """no""" +384 80 optimizer """adam""" +384 80 training_loop """owa""" +384 80 negative_sampler """basic""" +384 80 evaluator """rankbased""" +384 81 dataset """kinships""" +384 81 model """kg2e""" +384 81 loss """softplus""" +384 81 regularizer """no""" +384 81 optimizer """adam""" +384 81 training_loop """owa""" +384 81 negative_sampler """basic""" +384 81 evaluator """rankbased""" +384 82 dataset """kinships""" +384 82 model """kg2e""" +384 82 loss """softplus""" +384 82 regularizer """no""" +384 82 optimizer """adam""" +384 82 training_loop """owa""" +384 82 negative_sampler """basic""" +384 82 evaluator """rankbased""" +384 83 dataset """kinships""" +384 83 model """kg2e""" +384 83 loss """softplus""" +384 83 regularizer """no""" +384 83 optimizer """adam""" +384 83 training_loop """owa""" +384 83 negative_sampler """basic""" +384 83 evaluator """rankbased""" +384 84 dataset """kinships""" +384 84 model """kg2e""" +384 84 loss """softplus""" +384 84 regularizer """no""" +384 84 optimizer """adam""" +384 84 training_loop """owa""" +384 84 negative_sampler """basic""" +384 84 evaluator """rankbased""" +384 85 dataset """kinships""" +384 85 model """kg2e""" +384 85 loss """softplus""" +384 85 regularizer """no""" +384 85 optimizer """adam""" +384 85 training_loop """owa""" +384 85 negative_sampler """basic""" +384 85 evaluator """rankbased""" +384 86 dataset """kinships""" +384 86 model """kg2e""" +384 86 loss """softplus""" +384 86 regularizer """no""" +384 86 optimizer """adam""" +384 86 training_loop """owa""" +384 86 negative_sampler """basic""" +384 86 evaluator """rankbased""" +384 87 dataset """kinships""" +384 87 model """kg2e""" +384 87 loss """softplus""" +384 87 regularizer """no""" +384 87 optimizer """adam""" +384 87 training_loop """owa""" +384 87 negative_sampler """basic""" +384 87 evaluator """rankbased""" +384 88 dataset """kinships""" +384 88 model """kg2e""" +384 88 loss """softplus""" +384 88 regularizer """no""" +384 88 optimizer """adam""" +384 88 training_loop """owa""" +384 88 negative_sampler """basic""" +384 88 evaluator """rankbased""" +384 89 dataset """kinships""" +384 89 model """kg2e""" +384 89 loss """softplus""" +384 89 regularizer """no""" +384 89 optimizer """adam""" +384 89 training_loop """owa""" +384 89 negative_sampler """basic""" +384 89 evaluator """rankbased""" +384 90 dataset """kinships""" +384 90 model """kg2e""" +384 90 loss """softplus""" +384 90 regularizer """no""" +384 90 optimizer """adam""" +384 90 training_loop """owa""" +384 90 negative_sampler """basic""" +384 90 evaluator """rankbased""" +384 91 dataset """kinships""" +384 91 model """kg2e""" +384 91 loss """softplus""" +384 91 regularizer """no""" +384 91 optimizer """adam""" +384 91 training_loop """owa""" +384 91 negative_sampler """basic""" +384 91 evaluator """rankbased""" +384 92 dataset """kinships""" +384 92 model """kg2e""" +384 92 loss """softplus""" +384 92 regularizer """no""" +384 92 optimizer """adam""" +384 92 training_loop """owa""" +384 92 negative_sampler """basic""" +384 92 evaluator """rankbased""" +384 93 dataset """kinships""" +384 93 model """kg2e""" +384 93 loss """softplus""" +384 93 regularizer """no""" +384 93 optimizer """adam""" +384 93 training_loop """owa""" +384 93 negative_sampler """basic""" +384 93 evaluator """rankbased""" +384 94 dataset """kinships""" +384 94 model """kg2e""" +384 94 loss """softplus""" +384 94 regularizer """no""" +384 94 optimizer """adam""" +384 94 training_loop """owa""" +384 94 negative_sampler """basic""" +384 94 evaluator """rankbased""" +384 95 dataset """kinships""" +384 95 model """kg2e""" +384 95 loss """softplus""" +384 95 regularizer """no""" +384 95 optimizer """adam""" +384 95 training_loop """owa""" +384 95 negative_sampler """basic""" +384 95 evaluator """rankbased""" +384 96 dataset """kinships""" +384 96 model """kg2e""" +384 96 loss """softplus""" +384 96 regularizer """no""" +384 96 optimizer """adam""" +384 96 training_loop """owa""" +384 96 negative_sampler """basic""" +384 96 evaluator """rankbased""" +384 97 dataset """kinships""" +384 97 model """kg2e""" +384 97 loss """softplus""" +384 97 regularizer """no""" +384 97 optimizer """adam""" +384 97 training_loop """owa""" +384 97 negative_sampler """basic""" +384 97 evaluator """rankbased""" +384 98 dataset """kinships""" +384 98 model """kg2e""" +384 98 loss """softplus""" +384 98 regularizer """no""" +384 98 optimizer """adam""" +384 98 training_loop """owa""" +384 98 negative_sampler """basic""" +384 98 evaluator """rankbased""" +384 99 dataset """kinships""" +384 99 model """kg2e""" +384 99 loss """softplus""" +384 99 regularizer """no""" +384 99 optimizer """adam""" +384 99 training_loop """owa""" +384 99 negative_sampler """basic""" +384 99 evaluator """rankbased""" +384 100 dataset """kinships""" +384 100 model """kg2e""" +384 100 loss """softplus""" +384 100 regularizer """no""" +384 100 optimizer """adam""" +384 100 training_loop """owa""" +384 100 negative_sampler """basic""" +384 100 evaluator """rankbased""" +385 1 model.embedding_dim 1.0 +385 1 model.c_min 0.012512596671185932 +385 1 model.c_max 1.7681003758419696 +385 1 loss.margin 1.7837047023251895 +385 1 optimizer.lr 0.010482942912971333 +385 1 negative_sampler.num_negs_per_pos 14.0 +385 1 training.batch_size 1.0 +385 2 model.embedding_dim 0.0 +385 2 model.c_min 0.02385050752100298 +385 2 model.c_max 4.171308068369255 +385 2 loss.margin 9.704877267113366 +385 2 optimizer.lr 0.06321577362149182 +385 2 negative_sampler.num_negs_per_pos 30.0 +385 2 training.batch_size 0.0 +385 3 model.embedding_dim 0.0 +385 3 model.c_min 0.02771952443276281 +385 3 model.c_max 3.1818801751110453 +385 3 loss.margin 4.233076299593922 +385 3 optimizer.lr 0.0011267053987467463 +385 3 negative_sampler.num_negs_per_pos 13.0 +385 3 training.batch_size 1.0 +385 4 model.embedding_dim 1.0 +385 4 model.c_min 0.03569105647210646 +385 4 model.c_max 4.161051614095053 +385 4 loss.margin 2.3078217465072477 +385 4 optimizer.lr 0.0015253530879151137 +385 4 negative_sampler.num_negs_per_pos 8.0 +385 4 training.batch_size 1.0 +385 5 model.embedding_dim 1.0 +385 5 model.c_min 0.013885904165837596 +385 5 model.c_max 9.988340364053842 +385 5 loss.margin 3.780824987767213 +385 5 optimizer.lr 0.010082093343127369 +385 5 negative_sampler.num_negs_per_pos 14.0 +385 5 training.batch_size 2.0 +385 6 model.embedding_dim 1.0 +385 6 model.c_min 0.035331512988347163 +385 6 model.c_max 7.422324154215469 +385 6 loss.margin 3.7195974280280617 +385 6 optimizer.lr 0.07585209357086428 +385 6 negative_sampler.num_negs_per_pos 27.0 +385 6 training.batch_size 2.0 +385 7 model.embedding_dim 0.0 +385 7 model.c_min 0.0755942145407348 +385 7 model.c_max 1.921046986936699 +385 7 loss.margin 1.0727763919047155 +385 7 optimizer.lr 0.007010146987434352 +385 7 negative_sampler.num_negs_per_pos 43.0 +385 7 training.batch_size 2.0 +385 8 model.embedding_dim 1.0 +385 8 model.c_min 0.022215202460304718 +385 8 model.c_max 5.4754485285809205 +385 8 loss.margin 5.406785724581832 +385 8 optimizer.lr 0.053805639047594 +385 8 negative_sampler.num_negs_per_pos 47.0 +385 8 training.batch_size 1.0 +385 9 model.embedding_dim 0.0 +385 9 model.c_min 0.08919337824187006 +385 9 model.c_max 7.767028172798029 +385 9 loss.margin 2.929348376571208 +385 9 optimizer.lr 0.0701279576281892 +385 9 negative_sampler.num_negs_per_pos 56.0 +385 9 training.batch_size 1.0 +385 10 model.embedding_dim 2.0 +385 10 model.c_min 0.022600807522614813 +385 10 model.c_max 7.9388303843554375 +385 10 loss.margin 7.128603763540615 +385 10 optimizer.lr 0.005157021191576122 +385 10 negative_sampler.num_negs_per_pos 5.0 +385 10 training.batch_size 2.0 +385 11 model.embedding_dim 1.0 +385 11 model.c_min 0.06303572232973106 +385 11 model.c_max 3.5987811766758724 +385 11 loss.margin 9.58900781831658 +385 11 optimizer.lr 0.0018309775503530983 +385 11 negative_sampler.num_negs_per_pos 40.0 +385 11 training.batch_size 0.0 +385 12 model.embedding_dim 1.0 +385 12 model.c_min 0.028835453620453047 +385 12 model.c_max 7.960331121284631 +385 12 loss.margin 4.826374423442985 +385 12 optimizer.lr 0.03871948078086633 +385 12 negative_sampler.num_negs_per_pos 25.0 +385 12 training.batch_size 2.0 +385 13 model.embedding_dim 1.0 +385 13 model.c_min 0.07390220877712333 +385 13 model.c_max 9.097936049302374 +385 13 loss.margin 5.996470563899514 +385 13 optimizer.lr 0.001600519856138677 +385 13 negative_sampler.num_negs_per_pos 0.0 +385 13 training.batch_size 1.0 +385 14 model.embedding_dim 1.0 +385 14 model.c_min 0.02375737233018761 +385 14 model.c_max 2.4792463054187146 +385 14 loss.margin 6.49126168739162 +385 14 optimizer.lr 0.018285010543285615 +385 14 negative_sampler.num_negs_per_pos 79.0 +385 14 training.batch_size 2.0 +385 15 model.embedding_dim 2.0 +385 15 model.c_min 0.024599617600123493 +385 15 model.c_max 1.1710725477518331 +385 15 loss.margin 6.3604299317875 +385 15 optimizer.lr 0.006735996868714868 +385 15 negative_sampler.num_negs_per_pos 1.0 +385 15 training.batch_size 1.0 +385 16 model.embedding_dim 1.0 +385 16 model.c_min 0.04552307031247459 +385 16 model.c_max 8.638548420174512 +385 16 loss.margin 3.3456344965930973 +385 16 optimizer.lr 0.007370890159304839 +385 16 negative_sampler.num_negs_per_pos 34.0 +385 16 training.batch_size 1.0 +385 17 model.embedding_dim 2.0 +385 17 model.c_min 0.04503260953658256 +385 17 model.c_max 2.282629807566956 +385 17 loss.margin 8.988896240292979 +385 17 optimizer.lr 0.007524518942928517 +385 17 negative_sampler.num_negs_per_pos 52.0 +385 17 training.batch_size 1.0 +385 18 model.embedding_dim 1.0 +385 18 model.c_min 0.028237577397400834 +385 18 model.c_max 7.79176739244309 +385 18 loss.margin 5.531409599594171 +385 18 optimizer.lr 0.06384598777543612 +385 18 negative_sampler.num_negs_per_pos 88.0 +385 18 training.batch_size 0.0 +385 19 model.embedding_dim 0.0 +385 19 model.c_min 0.07820963343971285 +385 19 model.c_max 6.366656367721676 +385 19 loss.margin 3.0154552073736367 +385 19 optimizer.lr 0.002382356702392835 +385 19 negative_sampler.num_negs_per_pos 10.0 +385 19 training.batch_size 2.0 +385 20 model.embedding_dim 2.0 +385 20 model.c_min 0.02245736433270458 +385 20 model.c_max 1.7749993727284905 +385 20 loss.margin 0.773616965737792 +385 20 optimizer.lr 0.009861930977738945 +385 20 negative_sampler.num_negs_per_pos 46.0 +385 20 training.batch_size 0.0 +385 21 model.embedding_dim 2.0 +385 21 model.c_min 0.03181314157856665 +385 21 model.c_max 5.126051410923252 +385 21 loss.margin 4.129676220987381 +385 21 optimizer.lr 0.0027763788295651964 +385 21 negative_sampler.num_negs_per_pos 72.0 +385 21 training.batch_size 2.0 +385 22 model.embedding_dim 1.0 +385 22 model.c_min 0.02129590376034814 +385 22 model.c_max 8.886058363051003 +385 22 loss.margin 4.885589137409245 +385 22 optimizer.lr 0.016750578036385255 +385 22 negative_sampler.num_negs_per_pos 46.0 +385 22 training.batch_size 2.0 +385 23 model.embedding_dim 1.0 +385 23 model.c_min 0.05353464630627301 +385 23 model.c_max 9.1184603021549 +385 23 loss.margin 4.912491573257087 +385 23 optimizer.lr 0.05657589857799268 +385 23 negative_sampler.num_negs_per_pos 44.0 +385 23 training.batch_size 1.0 +385 24 model.embedding_dim 2.0 +385 24 model.c_min 0.015329966673857392 +385 24 model.c_max 5.769380906976529 +385 24 loss.margin 2.4975606671873782 +385 24 optimizer.lr 0.0020066483306583546 +385 24 negative_sampler.num_negs_per_pos 65.0 +385 24 training.batch_size 2.0 +385 25 model.embedding_dim 1.0 +385 25 model.c_min 0.015401437560001577 +385 25 model.c_max 5.210229203099352 +385 25 loss.margin 6.197277790360925 +385 25 optimizer.lr 0.0025204625381610507 +385 25 negative_sampler.num_negs_per_pos 29.0 +385 25 training.batch_size 2.0 +385 26 model.embedding_dim 0.0 +385 26 model.c_min 0.09268386240882838 +385 26 model.c_max 4.975470381518341 +385 26 loss.margin 2.4943721564999626 +385 26 optimizer.lr 0.019906269337272124 +385 26 negative_sampler.num_negs_per_pos 85.0 +385 26 training.batch_size 0.0 +385 27 model.embedding_dim 1.0 +385 27 model.c_min 0.09805567989958489 +385 27 model.c_max 7.985169528975557 +385 27 loss.margin 5.383648377916961 +385 27 optimizer.lr 0.022961851299561714 +385 27 negative_sampler.num_negs_per_pos 81.0 +385 27 training.batch_size 0.0 +385 28 model.embedding_dim 1.0 +385 28 model.c_min 0.011509504319846953 +385 28 model.c_max 9.715630218575264 +385 28 loss.margin 5.548244501343731 +385 28 optimizer.lr 0.003005006908783918 +385 28 negative_sampler.num_negs_per_pos 66.0 +385 28 training.batch_size 2.0 +385 29 model.embedding_dim 2.0 +385 29 model.c_min 0.013886683750882832 +385 29 model.c_max 3.889903116000326 +385 29 loss.margin 3.618160665692318 +385 29 optimizer.lr 0.07029422993673133 +385 29 negative_sampler.num_negs_per_pos 87.0 +385 29 training.batch_size 1.0 +385 30 model.embedding_dim 0.0 +385 30 model.c_min 0.02748002950383592 +385 30 model.c_max 2.369597072757751 +385 30 loss.margin 7.540850868520135 +385 30 optimizer.lr 0.003497199622586059 +385 30 negative_sampler.num_negs_per_pos 28.0 +385 30 training.batch_size 1.0 +385 31 model.embedding_dim 2.0 +385 31 model.c_min 0.07040796661233331 +385 31 model.c_max 5.492713895628501 +385 31 loss.margin 1.1063621819466898 +385 31 optimizer.lr 0.004318767880742 +385 31 negative_sampler.num_negs_per_pos 9.0 +385 31 training.batch_size 2.0 +385 32 model.embedding_dim 0.0 +385 32 model.c_min 0.010145088498926855 +385 32 model.c_max 8.557044091405658 +385 32 loss.margin 6.5235347780723885 +385 32 optimizer.lr 0.008378897431476734 +385 32 negative_sampler.num_negs_per_pos 4.0 +385 32 training.batch_size 2.0 +385 33 model.embedding_dim 2.0 +385 33 model.c_min 0.08971016573350556 +385 33 model.c_max 2.178214996259299 +385 33 loss.margin 0.9438956335381707 +385 33 optimizer.lr 0.06930705388549209 +385 33 negative_sampler.num_negs_per_pos 21.0 +385 33 training.batch_size 1.0 +385 34 model.embedding_dim 1.0 +385 34 model.c_min 0.014249273507096738 +385 34 model.c_max 4.155146957098507 +385 34 loss.margin 6.458964498258662 +385 34 optimizer.lr 0.002625118381415634 +385 34 negative_sampler.num_negs_per_pos 75.0 +385 34 training.batch_size 1.0 +385 35 model.embedding_dim 1.0 +385 35 model.c_min 0.01761946521861021 +385 35 model.c_max 7.563179010831702 +385 35 loss.margin 8.917366303831045 +385 35 optimizer.lr 0.004989346944923916 +385 35 negative_sampler.num_negs_per_pos 75.0 +385 35 training.batch_size 0.0 +385 36 model.embedding_dim 2.0 +385 36 model.c_min 0.05023962914702324 +385 36 model.c_max 5.827734000542761 +385 36 loss.margin 5.067545116076815 +385 36 optimizer.lr 0.012970663487517893 +385 36 negative_sampler.num_negs_per_pos 84.0 +385 36 training.batch_size 2.0 +385 37 model.embedding_dim 1.0 +385 37 model.c_min 0.013200324182568385 +385 37 model.c_max 4.8455975779451546 +385 37 loss.margin 8.532583152888769 +385 37 optimizer.lr 0.0019972580178986956 +385 37 negative_sampler.num_negs_per_pos 46.0 +385 37 training.batch_size 1.0 +385 38 model.embedding_dim 2.0 +385 38 model.c_min 0.013974349325226527 +385 38 model.c_max 7.4629825169141375 +385 38 loss.margin 4.761752262428292 +385 38 optimizer.lr 0.003165129777568371 +385 38 negative_sampler.num_negs_per_pos 50.0 +385 38 training.batch_size 0.0 +385 39 model.embedding_dim 2.0 +385 39 model.c_min 0.012889756768408285 +385 39 model.c_max 1.2718329266512187 +385 39 loss.margin 6.874614126643834 +385 39 optimizer.lr 0.01563980027124693 +385 39 negative_sampler.num_negs_per_pos 72.0 +385 39 training.batch_size 2.0 +385 40 model.embedding_dim 0.0 +385 40 model.c_min 0.04185160543205686 +385 40 model.c_max 2.908442865502294 +385 40 loss.margin 4.411753360670594 +385 40 optimizer.lr 0.029518018873807613 +385 40 negative_sampler.num_negs_per_pos 4.0 +385 40 training.batch_size 0.0 +385 41 model.embedding_dim 2.0 +385 41 model.c_min 0.05264494400487541 +385 41 model.c_max 1.1957377647783916 +385 41 loss.margin 3.2600493818246026 +385 41 optimizer.lr 0.04155778696885082 +385 41 negative_sampler.num_negs_per_pos 45.0 +385 41 training.batch_size 0.0 +385 42 model.embedding_dim 2.0 +385 42 model.c_min 0.06755270274945732 +385 42 model.c_max 3.3031444472742244 +385 42 loss.margin 2.202183124191003 +385 42 optimizer.lr 0.0011413394055196364 +385 42 negative_sampler.num_negs_per_pos 46.0 +385 42 training.batch_size 0.0 +385 43 model.embedding_dim 2.0 +385 43 model.c_min 0.06961141690292276 +385 43 model.c_max 2.0397731490116335 +385 43 loss.margin 7.919956652042596 +385 43 optimizer.lr 0.009481613318364963 +385 43 negative_sampler.num_negs_per_pos 13.0 +385 43 training.batch_size 0.0 +385 44 model.embedding_dim 0.0 +385 44 model.c_min 0.03882662386646646 +385 44 model.c_max 8.002812098033441 +385 44 loss.margin 7.544159619202205 +385 44 optimizer.lr 0.023298118081943566 +385 44 negative_sampler.num_negs_per_pos 92.0 +385 44 training.batch_size 2.0 +385 45 model.embedding_dim 0.0 +385 45 model.c_min 0.04970326010677687 +385 45 model.c_max 9.636229377658102 +385 45 loss.margin 0.8222718842331084 +385 45 optimizer.lr 0.0014910303173093311 +385 45 negative_sampler.num_negs_per_pos 82.0 +385 45 training.batch_size 1.0 +385 46 model.embedding_dim 0.0 +385 46 model.c_min 0.02674294999642836 +385 46 model.c_max 6.038867957578407 +385 46 loss.margin 2.747140474430706 +385 46 optimizer.lr 0.029138431241072026 +385 46 negative_sampler.num_negs_per_pos 54.0 +385 46 training.batch_size 1.0 +385 47 model.embedding_dim 2.0 +385 47 model.c_min 0.07644971366428319 +385 47 model.c_max 1.7276316226408344 +385 47 loss.margin 1.0665972963510844 +385 47 optimizer.lr 0.00400408161592504 +385 47 negative_sampler.num_negs_per_pos 23.0 +385 47 training.batch_size 2.0 +385 48 model.embedding_dim 2.0 +385 48 model.c_min 0.013432856793549598 +385 48 model.c_max 6.38352896420357 +385 48 loss.margin 1.0256193990587588 +385 48 optimizer.lr 0.03980166006235916 +385 48 negative_sampler.num_negs_per_pos 87.0 +385 48 training.batch_size 0.0 +385 49 model.embedding_dim 2.0 +385 49 model.c_min 0.061654637745150634 +385 49 model.c_max 9.756837853035348 +385 49 loss.margin 1.5510031556937274 +385 49 optimizer.lr 0.0018488224877721304 +385 49 negative_sampler.num_negs_per_pos 48.0 +385 49 training.batch_size 0.0 +385 50 model.embedding_dim 2.0 +385 50 model.c_min 0.014541204192052855 +385 50 model.c_max 2.953499153892385 +385 50 loss.margin 0.616351197555554 +385 50 optimizer.lr 0.01092841371027493 +385 50 negative_sampler.num_negs_per_pos 33.0 +385 50 training.batch_size 1.0 +385 51 model.embedding_dim 2.0 +385 51 model.c_min 0.013995723622275106 +385 51 model.c_max 8.712616350266906 +385 51 loss.margin 6.989289055142017 +385 51 optimizer.lr 0.05000266268503755 +385 51 negative_sampler.num_negs_per_pos 4.0 +385 51 training.batch_size 0.0 +385 52 model.embedding_dim 2.0 +385 52 model.c_min 0.05827937813416744 +385 52 model.c_max 4.019743799435503 +385 52 loss.margin 8.046697146941142 +385 52 optimizer.lr 0.006072759149507711 +385 52 negative_sampler.num_negs_per_pos 63.0 +385 52 training.batch_size 0.0 +385 53 model.embedding_dim 2.0 +385 53 model.c_min 0.06714598537829905 +385 53 model.c_max 9.732106242646068 +385 53 loss.margin 6.995995498917993 +385 53 optimizer.lr 0.046743585484495316 +385 53 negative_sampler.num_negs_per_pos 41.0 +385 53 training.batch_size 1.0 +385 54 model.embedding_dim 1.0 +385 54 model.c_min 0.08041537923375651 +385 54 model.c_max 9.955336497106043 +385 54 loss.margin 7.547163376267559 +385 54 optimizer.lr 0.06137064131852612 +385 54 negative_sampler.num_negs_per_pos 28.0 +385 54 training.batch_size 2.0 +385 55 model.embedding_dim 0.0 +385 55 model.c_min 0.09696218157125389 +385 55 model.c_max 3.443379956702687 +385 55 loss.margin 8.9409352511145 +385 55 optimizer.lr 0.004103529005923625 +385 55 negative_sampler.num_negs_per_pos 25.0 +385 55 training.batch_size 1.0 +385 56 model.embedding_dim 1.0 +385 56 model.c_min 0.018746940177156578 +385 56 model.c_max 2.7466972214932404 +385 56 loss.margin 7.954428055206446 +385 56 optimizer.lr 0.03132799548265062 +385 56 negative_sampler.num_negs_per_pos 86.0 +385 56 training.batch_size 1.0 +385 57 model.embedding_dim 1.0 +385 57 model.c_min 0.0506504145273696 +385 57 model.c_max 3.3936434925196752 +385 57 loss.margin 0.5772529187369874 +385 57 optimizer.lr 0.09006230943695581 +385 57 negative_sampler.num_negs_per_pos 32.0 +385 57 training.batch_size 0.0 +385 58 model.embedding_dim 1.0 +385 58 model.c_min 0.06272441436189853 +385 58 model.c_max 5.8528762919065915 +385 58 loss.margin 3.024141107918128 +385 58 optimizer.lr 0.0350187701552531 +385 58 negative_sampler.num_negs_per_pos 30.0 +385 58 training.batch_size 1.0 +385 59 model.embedding_dim 2.0 +385 59 model.c_min 0.07981078897118411 +385 59 model.c_max 3.5794933629514296 +385 59 loss.margin 1.9925057158084194 +385 59 optimizer.lr 0.0011563253085131783 +385 59 negative_sampler.num_negs_per_pos 5.0 +385 59 training.batch_size 0.0 +385 60 model.embedding_dim 2.0 +385 60 model.c_min 0.05262847642116022 +385 60 model.c_max 1.1194141066940937 +385 60 loss.margin 7.981429781075897 +385 60 optimizer.lr 0.0012560719523688857 +385 60 negative_sampler.num_negs_per_pos 16.0 +385 60 training.batch_size 1.0 +385 61 model.embedding_dim 1.0 +385 61 model.c_min 0.03993918622607872 +385 61 model.c_max 4.648281209750474 +385 61 loss.margin 7.677646988101084 +385 61 optimizer.lr 0.02214461889628983 +385 61 negative_sampler.num_negs_per_pos 45.0 +385 61 training.batch_size 0.0 +385 62 model.embedding_dim 0.0 +385 62 model.c_min 0.06360181709567336 +385 62 model.c_max 2.189770847478046 +385 62 loss.margin 1.6297368425063141 +385 62 optimizer.lr 0.008456623269529059 +385 62 negative_sampler.num_negs_per_pos 20.0 +385 62 training.batch_size 1.0 +385 63 model.embedding_dim 0.0 +385 63 model.c_min 0.05229211861714848 +385 63 model.c_max 1.63308046985316 +385 63 loss.margin 6.650274872367737 +385 63 optimizer.lr 0.002697227782158272 +385 63 negative_sampler.num_negs_per_pos 23.0 +385 63 training.batch_size 0.0 +385 64 model.embedding_dim 2.0 +385 64 model.c_min 0.021133814812130676 +385 64 model.c_max 5.060695700864597 +385 64 loss.margin 8.885522974097821 +385 64 optimizer.lr 0.07917838173039828 +385 64 negative_sampler.num_negs_per_pos 42.0 +385 64 training.batch_size 0.0 +385 65 model.embedding_dim 0.0 +385 65 model.c_min 0.09456586464158989 +385 65 model.c_max 7.750393644692236 +385 65 loss.margin 0.8004594079178353 +385 65 optimizer.lr 0.04004760571491903 +385 65 negative_sampler.num_negs_per_pos 53.0 +385 65 training.batch_size 0.0 +385 66 model.embedding_dim 0.0 +385 66 model.c_min 0.02598142866268526 +385 66 model.c_max 2.1700774423901525 +385 66 loss.margin 0.5544990804161136 +385 66 optimizer.lr 0.06175970387973235 +385 66 negative_sampler.num_negs_per_pos 0.0 +385 66 training.batch_size 0.0 +385 67 model.embedding_dim 2.0 +385 67 model.c_min 0.04779297099330661 +385 67 model.c_max 6.298321258867693 +385 67 loss.margin 3.704168878653128 +385 67 optimizer.lr 0.015057975776721045 +385 67 negative_sampler.num_negs_per_pos 17.0 +385 67 training.batch_size 0.0 +385 68 model.embedding_dim 1.0 +385 68 model.c_min 0.020966715980636045 +385 68 model.c_max 1.576250073871501 +385 68 loss.margin 4.956443579105545 +385 68 optimizer.lr 0.0946254592736979 +385 68 negative_sampler.num_negs_per_pos 23.0 +385 68 training.batch_size 2.0 +385 69 model.embedding_dim 2.0 +385 69 model.c_min 0.02551306184042186 +385 69 model.c_max 7.580919468837992 +385 69 loss.margin 3.739958974235511 +385 69 optimizer.lr 0.04892238656004097 +385 69 negative_sampler.num_negs_per_pos 78.0 +385 69 training.batch_size 1.0 +385 70 model.embedding_dim 1.0 +385 70 model.c_min 0.0199070725517585 +385 70 model.c_max 5.6875986247427 +385 70 loss.margin 5.105180703559695 +385 70 optimizer.lr 0.06018971192274822 +385 70 negative_sampler.num_negs_per_pos 3.0 +385 70 training.batch_size 1.0 +385 71 model.embedding_dim 1.0 +385 71 model.c_min 0.030088046990007434 +385 71 model.c_max 7.182876698835072 +385 71 loss.margin 7.412115043142003 +385 71 optimizer.lr 0.06318017947840547 +385 71 negative_sampler.num_negs_per_pos 54.0 +385 71 training.batch_size 2.0 +385 72 model.embedding_dim 1.0 +385 72 model.c_min 0.03496629512614342 +385 72 model.c_max 6.337395547907114 +385 72 loss.margin 2.133149996613142 +385 72 optimizer.lr 0.0049688881812311745 +385 72 negative_sampler.num_negs_per_pos 47.0 +385 72 training.batch_size 0.0 +385 73 model.embedding_dim 2.0 +385 73 model.c_min 0.019050969761526475 +385 73 model.c_max 9.396200326599317 +385 73 loss.margin 6.582322246367637 +385 73 optimizer.lr 0.0018542687871625876 +385 73 negative_sampler.num_negs_per_pos 58.0 +385 73 training.batch_size 1.0 +385 74 model.embedding_dim 1.0 +385 74 model.c_min 0.09626049563340405 +385 74 model.c_max 9.629483364967243 +385 74 loss.margin 8.67761785961212 +385 74 optimizer.lr 0.0030274499038744633 +385 74 negative_sampler.num_negs_per_pos 59.0 +385 74 training.batch_size 1.0 +385 75 model.embedding_dim 2.0 +385 75 model.c_min 0.03557235769179148 +385 75 model.c_max 5.775791350585664 +385 75 loss.margin 7.069343657338788 +385 75 optimizer.lr 0.001583789351108498 +385 75 negative_sampler.num_negs_per_pos 34.0 +385 75 training.batch_size 1.0 +385 76 model.embedding_dim 2.0 +385 76 model.c_min 0.09958285812943926 +385 76 model.c_max 7.261936156217395 +385 76 loss.margin 9.109828607225092 +385 76 optimizer.lr 0.011705295183151243 +385 76 negative_sampler.num_negs_per_pos 91.0 +385 76 training.batch_size 0.0 +385 77 model.embedding_dim 1.0 +385 77 model.c_min 0.08151600140267933 +385 77 model.c_max 6.8068253971798525 +385 77 loss.margin 2.050418177476951 +385 77 optimizer.lr 0.018801488044486576 +385 77 negative_sampler.num_negs_per_pos 70.0 +385 77 training.batch_size 0.0 +385 78 model.embedding_dim 2.0 +385 78 model.c_min 0.06502271963516792 +385 78 model.c_max 1.1237667332983143 +385 78 loss.margin 2.969517512831401 +385 78 optimizer.lr 0.00302915676864544 +385 78 negative_sampler.num_negs_per_pos 33.0 +385 78 training.batch_size 1.0 +385 79 model.embedding_dim 0.0 +385 79 model.c_min 0.01914461060590784 +385 79 model.c_max 4.342733830201611 +385 79 loss.margin 5.645566451273857 +385 79 optimizer.lr 0.01802136701994926 +385 79 negative_sampler.num_negs_per_pos 44.0 +385 79 training.batch_size 2.0 +385 80 model.embedding_dim 2.0 +385 80 model.c_min 0.025298671510985988 +385 80 model.c_max 7.159124815404145 +385 80 loss.margin 3.0424296938539284 +385 80 optimizer.lr 0.03356639038548959 +385 80 negative_sampler.num_negs_per_pos 39.0 +385 80 training.batch_size 2.0 +385 81 model.embedding_dim 1.0 +385 81 model.c_min 0.04417443439872346 +385 81 model.c_max 7.244855732321475 +385 81 loss.margin 0.617633757133729 +385 81 optimizer.lr 0.005468792579585931 +385 81 negative_sampler.num_negs_per_pos 95.0 +385 81 training.batch_size 0.0 +385 82 model.embedding_dim 0.0 +385 82 model.c_min 0.07760725729734322 +385 82 model.c_max 3.7117294029181185 +385 82 loss.margin 6.310916351640286 +385 82 optimizer.lr 0.029794086780547617 +385 82 negative_sampler.num_negs_per_pos 47.0 +385 82 training.batch_size 1.0 +385 83 model.embedding_dim 2.0 +385 83 model.c_min 0.021559413699222582 +385 83 model.c_max 3.858571628052733 +385 83 loss.margin 1.1329199665670837 +385 83 optimizer.lr 0.0035176419670883367 +385 83 negative_sampler.num_negs_per_pos 36.0 +385 83 training.batch_size 2.0 +385 84 model.embedding_dim 0.0 +385 84 model.c_min 0.015326166037466215 +385 84 model.c_max 5.744198322754029 +385 84 loss.margin 2.5773833863095934 +385 84 optimizer.lr 0.032126691443944096 +385 84 negative_sampler.num_negs_per_pos 81.0 +385 84 training.batch_size 0.0 +385 85 model.embedding_dim 1.0 +385 85 model.c_min 0.010122760394600536 +385 85 model.c_max 1.3904818245236492 +385 85 loss.margin 6.00837191677916 +385 85 optimizer.lr 0.017169084877642148 +385 85 negative_sampler.num_negs_per_pos 69.0 +385 85 training.batch_size 2.0 +385 86 model.embedding_dim 0.0 +385 86 model.c_min 0.037720312493284947 +385 86 model.c_max 2.929802885965807 +385 86 loss.margin 9.188668755683 +385 86 optimizer.lr 0.04533291285041036 +385 86 negative_sampler.num_negs_per_pos 25.0 +385 86 training.batch_size 0.0 +385 87 model.embedding_dim 0.0 +385 87 model.c_min 0.09253299821169404 +385 87 model.c_max 7.0703647075755 +385 87 loss.margin 6.465504149960007 +385 87 optimizer.lr 0.004857452588887466 +385 87 negative_sampler.num_negs_per_pos 92.0 +385 87 training.batch_size 0.0 +385 88 model.embedding_dim 1.0 +385 88 model.c_min 0.0127871937371803 +385 88 model.c_max 4.038527923766287 +385 88 loss.margin 4.852505200686242 +385 88 optimizer.lr 0.035641672518720774 +385 88 negative_sampler.num_negs_per_pos 90.0 +385 88 training.batch_size 1.0 +385 89 model.embedding_dim 0.0 +385 89 model.c_min 0.02743884975089345 +385 89 model.c_max 8.965725034595328 +385 89 loss.margin 8.307609973148294 +385 89 optimizer.lr 0.0016049385862471528 +385 89 negative_sampler.num_negs_per_pos 28.0 +385 89 training.batch_size 2.0 +385 90 model.embedding_dim 1.0 +385 90 model.c_min 0.011927356658202968 +385 90 model.c_max 3.459547700631825 +385 90 loss.margin 4.698081326686706 +385 90 optimizer.lr 0.009225655457046492 +385 90 negative_sampler.num_negs_per_pos 78.0 +385 90 training.batch_size 2.0 +385 91 model.embedding_dim 1.0 +385 91 model.c_min 0.055544767221738754 +385 91 model.c_max 4.066712429706174 +385 91 loss.margin 3.3861077090089733 +385 91 optimizer.lr 0.006791573700970208 +385 91 negative_sampler.num_negs_per_pos 8.0 +385 91 training.batch_size 2.0 +385 92 model.embedding_dim 2.0 +385 92 model.c_min 0.02941471624444428 +385 92 model.c_max 9.451635374130987 +385 92 loss.margin 4.769221570624395 +385 92 optimizer.lr 0.025390513097814586 +385 92 negative_sampler.num_negs_per_pos 26.0 +385 92 training.batch_size 0.0 +385 93 model.embedding_dim 0.0 +385 93 model.c_min 0.015746826234070276 +385 93 model.c_max 4.804673962374789 +385 93 loss.margin 6.291009419796193 +385 93 optimizer.lr 0.0028270875907522694 +385 93 negative_sampler.num_negs_per_pos 78.0 +385 93 training.batch_size 2.0 +385 94 model.embedding_dim 0.0 +385 94 model.c_min 0.0918854364201431 +385 94 model.c_max 7.547917996860214 +385 94 loss.margin 8.448328750067112 +385 94 optimizer.lr 0.001138728605971541 +385 94 negative_sampler.num_negs_per_pos 1.0 +385 94 training.batch_size 1.0 +385 95 model.embedding_dim 2.0 +385 95 model.c_min 0.02093289506153356 +385 95 model.c_max 3.813541615442998 +385 95 loss.margin 5.606222838957293 +385 95 optimizer.lr 0.036436174435974154 +385 95 negative_sampler.num_negs_per_pos 97.0 +385 95 training.batch_size 2.0 +385 96 model.embedding_dim 1.0 +385 96 model.c_min 0.013880808155597262 +385 96 model.c_max 4.533662863643179 +385 96 loss.margin 6.384948491110012 +385 96 optimizer.lr 0.022910655628428034 +385 96 negative_sampler.num_negs_per_pos 77.0 +385 96 training.batch_size 0.0 +385 97 model.embedding_dim 1.0 +385 97 model.c_min 0.069482509817456 +385 97 model.c_max 5.578895805340073 +385 97 loss.margin 9.271314357140435 +385 97 optimizer.lr 0.005215450380793165 +385 97 negative_sampler.num_negs_per_pos 91.0 +385 97 training.batch_size 2.0 +385 98 model.embedding_dim 2.0 +385 98 model.c_min 0.046561876724765826 +385 98 model.c_max 3.1943749043048384 +385 98 loss.margin 6.702294918590093 +385 98 optimizer.lr 0.0015813534002890328 +385 98 negative_sampler.num_negs_per_pos 27.0 +385 98 training.batch_size 1.0 +385 99 model.embedding_dim 2.0 +385 99 model.c_min 0.014857196093361636 +385 99 model.c_max 4.166564741976472 +385 99 loss.margin 2.5809251884367206 +385 99 optimizer.lr 0.0011471320672596867 +385 99 negative_sampler.num_negs_per_pos 23.0 +385 99 training.batch_size 0.0 +385 100 model.embedding_dim 2.0 +385 100 model.c_min 0.04647323416557813 +385 100 model.c_max 8.851939267225514 +385 100 loss.margin 2.2326145606296555 +385 100 optimizer.lr 0.0018782980730236373 +385 100 negative_sampler.num_negs_per_pos 82.0 +385 100 training.batch_size 1.0 +385 1 dataset """kinships""" +385 1 model """kg2e""" +385 1 loss """marginranking""" +385 1 regularizer """no""" +385 1 optimizer """adam""" +385 1 training_loop """owa""" +385 1 negative_sampler """basic""" +385 1 evaluator """rankbased""" +385 2 dataset """kinships""" +385 2 model """kg2e""" +385 2 loss """marginranking""" +385 2 regularizer """no""" +385 2 optimizer """adam""" +385 2 training_loop """owa""" +385 2 negative_sampler """basic""" +385 2 evaluator """rankbased""" +385 3 dataset """kinships""" +385 3 model """kg2e""" +385 3 loss """marginranking""" +385 3 regularizer """no""" +385 3 optimizer """adam""" +385 3 training_loop """owa""" +385 3 negative_sampler """basic""" +385 3 evaluator """rankbased""" +385 4 dataset """kinships""" +385 4 model """kg2e""" +385 4 loss """marginranking""" +385 4 regularizer """no""" +385 4 optimizer """adam""" +385 4 training_loop """owa""" +385 4 negative_sampler """basic""" +385 4 evaluator """rankbased""" +385 5 dataset """kinships""" +385 5 model """kg2e""" +385 5 loss """marginranking""" +385 5 regularizer """no""" +385 5 optimizer """adam""" +385 5 training_loop """owa""" +385 5 negative_sampler """basic""" +385 5 evaluator """rankbased""" +385 6 dataset """kinships""" +385 6 model """kg2e""" +385 6 loss """marginranking""" +385 6 regularizer """no""" +385 6 optimizer """adam""" +385 6 training_loop """owa""" +385 6 negative_sampler """basic""" +385 6 evaluator """rankbased""" +385 7 dataset """kinships""" +385 7 model """kg2e""" +385 7 loss """marginranking""" +385 7 regularizer """no""" +385 7 optimizer """adam""" +385 7 training_loop """owa""" +385 7 negative_sampler """basic""" +385 7 evaluator """rankbased""" +385 8 dataset """kinships""" +385 8 model """kg2e""" +385 8 loss """marginranking""" +385 8 regularizer """no""" +385 8 optimizer """adam""" +385 8 training_loop """owa""" +385 8 negative_sampler """basic""" +385 8 evaluator """rankbased""" +385 9 dataset """kinships""" +385 9 model """kg2e""" +385 9 loss """marginranking""" +385 9 regularizer """no""" +385 9 optimizer """adam""" +385 9 training_loop """owa""" +385 9 negative_sampler """basic""" +385 9 evaluator """rankbased""" +385 10 dataset """kinships""" +385 10 model """kg2e""" +385 10 loss """marginranking""" +385 10 regularizer """no""" +385 10 optimizer """adam""" +385 10 training_loop """owa""" +385 10 negative_sampler """basic""" +385 10 evaluator """rankbased""" +385 11 dataset """kinships""" +385 11 model """kg2e""" +385 11 loss """marginranking""" +385 11 regularizer """no""" +385 11 optimizer """adam""" +385 11 training_loop """owa""" +385 11 negative_sampler """basic""" +385 11 evaluator """rankbased""" +385 12 dataset """kinships""" +385 12 model """kg2e""" +385 12 loss """marginranking""" +385 12 regularizer """no""" +385 12 optimizer """adam""" +385 12 training_loop """owa""" +385 12 negative_sampler """basic""" +385 12 evaluator """rankbased""" +385 13 dataset """kinships""" +385 13 model """kg2e""" +385 13 loss """marginranking""" +385 13 regularizer """no""" +385 13 optimizer """adam""" +385 13 training_loop """owa""" +385 13 negative_sampler """basic""" +385 13 evaluator """rankbased""" +385 14 dataset """kinships""" +385 14 model """kg2e""" +385 14 loss """marginranking""" +385 14 regularizer """no""" +385 14 optimizer """adam""" +385 14 training_loop """owa""" +385 14 negative_sampler """basic""" +385 14 evaluator """rankbased""" +385 15 dataset """kinships""" +385 15 model """kg2e""" +385 15 loss """marginranking""" +385 15 regularizer """no""" +385 15 optimizer """adam""" +385 15 training_loop """owa""" +385 15 negative_sampler """basic""" +385 15 evaluator """rankbased""" +385 16 dataset """kinships""" +385 16 model """kg2e""" +385 16 loss """marginranking""" +385 16 regularizer """no""" +385 16 optimizer """adam""" +385 16 training_loop """owa""" +385 16 negative_sampler """basic""" +385 16 evaluator """rankbased""" +385 17 dataset """kinships""" +385 17 model """kg2e""" +385 17 loss """marginranking""" +385 17 regularizer """no""" +385 17 optimizer """adam""" +385 17 training_loop """owa""" +385 17 negative_sampler """basic""" +385 17 evaluator """rankbased""" +385 18 dataset """kinships""" +385 18 model """kg2e""" +385 18 loss """marginranking""" +385 18 regularizer """no""" +385 18 optimizer """adam""" +385 18 training_loop """owa""" +385 18 negative_sampler """basic""" +385 18 evaluator """rankbased""" +385 19 dataset """kinships""" +385 19 model """kg2e""" +385 19 loss """marginranking""" +385 19 regularizer """no""" +385 19 optimizer """adam""" +385 19 training_loop """owa""" +385 19 negative_sampler """basic""" +385 19 evaluator """rankbased""" +385 20 dataset """kinships""" +385 20 model """kg2e""" +385 20 loss """marginranking""" +385 20 regularizer """no""" +385 20 optimizer """adam""" +385 20 training_loop """owa""" +385 20 negative_sampler """basic""" +385 20 evaluator """rankbased""" +385 21 dataset """kinships""" +385 21 model """kg2e""" +385 21 loss """marginranking""" +385 21 regularizer """no""" +385 21 optimizer """adam""" +385 21 training_loop """owa""" +385 21 negative_sampler """basic""" +385 21 evaluator """rankbased""" +385 22 dataset """kinships""" +385 22 model """kg2e""" +385 22 loss """marginranking""" +385 22 regularizer """no""" +385 22 optimizer """adam""" +385 22 training_loop """owa""" +385 22 negative_sampler """basic""" +385 22 evaluator """rankbased""" +385 23 dataset """kinships""" +385 23 model """kg2e""" +385 23 loss """marginranking""" +385 23 regularizer """no""" +385 23 optimizer """adam""" +385 23 training_loop """owa""" +385 23 negative_sampler """basic""" +385 23 evaluator """rankbased""" +385 24 dataset """kinships""" +385 24 model """kg2e""" +385 24 loss """marginranking""" +385 24 regularizer """no""" +385 24 optimizer """adam""" +385 24 training_loop """owa""" +385 24 negative_sampler """basic""" +385 24 evaluator """rankbased""" +385 25 dataset """kinships""" +385 25 model """kg2e""" +385 25 loss """marginranking""" +385 25 regularizer """no""" +385 25 optimizer """adam""" +385 25 training_loop """owa""" +385 25 negative_sampler """basic""" +385 25 evaluator """rankbased""" +385 26 dataset """kinships""" +385 26 model """kg2e""" +385 26 loss """marginranking""" +385 26 regularizer """no""" +385 26 optimizer """adam""" +385 26 training_loop """owa""" +385 26 negative_sampler """basic""" +385 26 evaluator """rankbased""" +385 27 dataset """kinships""" +385 27 model """kg2e""" +385 27 loss """marginranking""" +385 27 regularizer """no""" +385 27 optimizer """adam""" +385 27 training_loop """owa""" +385 27 negative_sampler """basic""" +385 27 evaluator """rankbased""" +385 28 dataset """kinships""" +385 28 model """kg2e""" +385 28 loss """marginranking""" +385 28 regularizer """no""" +385 28 optimizer """adam""" +385 28 training_loop """owa""" +385 28 negative_sampler """basic""" +385 28 evaluator """rankbased""" +385 29 dataset """kinships""" +385 29 model """kg2e""" +385 29 loss """marginranking""" +385 29 regularizer """no""" +385 29 optimizer """adam""" +385 29 training_loop """owa""" +385 29 negative_sampler """basic""" +385 29 evaluator """rankbased""" +385 30 dataset """kinships""" +385 30 model """kg2e""" +385 30 loss """marginranking""" +385 30 regularizer """no""" +385 30 optimizer """adam""" +385 30 training_loop """owa""" +385 30 negative_sampler """basic""" +385 30 evaluator """rankbased""" +385 31 dataset """kinships""" +385 31 model """kg2e""" +385 31 loss """marginranking""" +385 31 regularizer """no""" +385 31 optimizer """adam""" +385 31 training_loop """owa""" +385 31 negative_sampler """basic""" +385 31 evaluator """rankbased""" +385 32 dataset """kinships""" +385 32 model """kg2e""" +385 32 loss """marginranking""" +385 32 regularizer """no""" +385 32 optimizer """adam""" +385 32 training_loop """owa""" +385 32 negative_sampler """basic""" +385 32 evaluator """rankbased""" +385 33 dataset """kinships""" +385 33 model """kg2e""" +385 33 loss """marginranking""" +385 33 regularizer """no""" +385 33 optimizer """adam""" +385 33 training_loop """owa""" +385 33 negative_sampler """basic""" +385 33 evaluator """rankbased""" +385 34 dataset """kinships""" +385 34 model """kg2e""" +385 34 loss """marginranking""" +385 34 regularizer """no""" +385 34 optimizer """adam""" +385 34 training_loop """owa""" +385 34 negative_sampler """basic""" +385 34 evaluator """rankbased""" +385 35 dataset """kinships""" +385 35 model """kg2e""" +385 35 loss """marginranking""" +385 35 regularizer """no""" +385 35 optimizer """adam""" +385 35 training_loop """owa""" +385 35 negative_sampler """basic""" +385 35 evaluator """rankbased""" +385 36 dataset """kinships""" +385 36 model """kg2e""" +385 36 loss """marginranking""" +385 36 regularizer """no""" +385 36 optimizer """adam""" +385 36 training_loop """owa""" +385 36 negative_sampler """basic""" +385 36 evaluator """rankbased""" +385 37 dataset """kinships""" +385 37 model """kg2e""" +385 37 loss """marginranking""" +385 37 regularizer """no""" +385 37 optimizer """adam""" +385 37 training_loop """owa""" +385 37 negative_sampler """basic""" +385 37 evaluator """rankbased""" +385 38 dataset """kinships""" +385 38 model """kg2e""" +385 38 loss """marginranking""" +385 38 regularizer """no""" +385 38 optimizer """adam""" +385 38 training_loop """owa""" +385 38 negative_sampler """basic""" +385 38 evaluator """rankbased""" +385 39 dataset """kinships""" +385 39 model """kg2e""" +385 39 loss """marginranking""" +385 39 regularizer """no""" +385 39 optimizer """adam""" +385 39 training_loop """owa""" +385 39 negative_sampler """basic""" +385 39 evaluator """rankbased""" +385 40 dataset """kinships""" +385 40 model """kg2e""" +385 40 loss """marginranking""" +385 40 regularizer """no""" +385 40 optimizer """adam""" +385 40 training_loop """owa""" +385 40 negative_sampler """basic""" +385 40 evaluator """rankbased""" +385 41 dataset """kinships""" +385 41 model """kg2e""" +385 41 loss """marginranking""" +385 41 regularizer """no""" +385 41 optimizer """adam""" +385 41 training_loop """owa""" +385 41 negative_sampler """basic""" +385 41 evaluator """rankbased""" +385 42 dataset """kinships""" +385 42 model """kg2e""" +385 42 loss """marginranking""" +385 42 regularizer """no""" +385 42 optimizer """adam""" +385 42 training_loop """owa""" +385 42 negative_sampler """basic""" +385 42 evaluator """rankbased""" +385 43 dataset """kinships""" +385 43 model """kg2e""" +385 43 loss """marginranking""" +385 43 regularizer """no""" +385 43 optimizer """adam""" +385 43 training_loop """owa""" +385 43 negative_sampler """basic""" +385 43 evaluator """rankbased""" +385 44 dataset """kinships""" +385 44 model """kg2e""" +385 44 loss """marginranking""" +385 44 regularizer """no""" +385 44 optimizer """adam""" +385 44 training_loop """owa""" +385 44 negative_sampler """basic""" +385 44 evaluator """rankbased""" +385 45 dataset """kinships""" +385 45 model """kg2e""" +385 45 loss """marginranking""" +385 45 regularizer """no""" +385 45 optimizer """adam""" +385 45 training_loop """owa""" +385 45 negative_sampler """basic""" +385 45 evaluator """rankbased""" +385 46 dataset """kinships""" +385 46 model """kg2e""" +385 46 loss """marginranking""" +385 46 regularizer """no""" +385 46 optimizer """adam""" +385 46 training_loop """owa""" +385 46 negative_sampler """basic""" +385 46 evaluator """rankbased""" +385 47 dataset """kinships""" +385 47 model """kg2e""" +385 47 loss """marginranking""" +385 47 regularizer """no""" +385 47 optimizer """adam""" +385 47 training_loop """owa""" +385 47 negative_sampler """basic""" +385 47 evaluator """rankbased""" +385 48 dataset """kinships""" +385 48 model """kg2e""" +385 48 loss """marginranking""" +385 48 regularizer """no""" +385 48 optimizer """adam""" +385 48 training_loop """owa""" +385 48 negative_sampler """basic""" +385 48 evaluator """rankbased""" +385 49 dataset """kinships""" +385 49 model """kg2e""" +385 49 loss """marginranking""" +385 49 regularizer """no""" +385 49 optimizer """adam""" +385 49 training_loop """owa""" +385 49 negative_sampler """basic""" +385 49 evaluator """rankbased""" +385 50 dataset """kinships""" +385 50 model """kg2e""" +385 50 loss """marginranking""" +385 50 regularizer """no""" +385 50 optimizer """adam""" +385 50 training_loop """owa""" +385 50 negative_sampler """basic""" +385 50 evaluator """rankbased""" +385 51 dataset """kinships""" +385 51 model """kg2e""" +385 51 loss """marginranking""" +385 51 regularizer """no""" +385 51 optimizer """adam""" +385 51 training_loop """owa""" +385 51 negative_sampler """basic""" +385 51 evaluator """rankbased""" +385 52 dataset """kinships""" +385 52 model """kg2e""" +385 52 loss """marginranking""" +385 52 regularizer """no""" +385 52 optimizer """adam""" +385 52 training_loop """owa""" +385 52 negative_sampler """basic""" +385 52 evaluator """rankbased""" +385 53 dataset """kinships""" +385 53 model """kg2e""" +385 53 loss """marginranking""" +385 53 regularizer """no""" +385 53 optimizer """adam""" +385 53 training_loop """owa""" +385 53 negative_sampler """basic""" +385 53 evaluator """rankbased""" +385 54 dataset """kinships""" +385 54 model """kg2e""" +385 54 loss """marginranking""" +385 54 regularizer """no""" +385 54 optimizer """adam""" +385 54 training_loop """owa""" +385 54 negative_sampler """basic""" +385 54 evaluator """rankbased""" +385 55 dataset """kinships""" +385 55 model """kg2e""" +385 55 loss """marginranking""" +385 55 regularizer """no""" +385 55 optimizer """adam""" +385 55 training_loop """owa""" +385 55 negative_sampler """basic""" +385 55 evaluator """rankbased""" +385 56 dataset """kinships""" +385 56 model """kg2e""" +385 56 loss """marginranking""" +385 56 regularizer """no""" +385 56 optimizer """adam""" +385 56 training_loop """owa""" +385 56 negative_sampler """basic""" +385 56 evaluator """rankbased""" +385 57 dataset """kinships""" +385 57 model """kg2e""" +385 57 loss """marginranking""" +385 57 regularizer """no""" +385 57 optimizer """adam""" +385 57 training_loop """owa""" +385 57 negative_sampler """basic""" +385 57 evaluator """rankbased""" +385 58 dataset """kinships""" +385 58 model """kg2e""" +385 58 loss """marginranking""" +385 58 regularizer """no""" +385 58 optimizer """adam""" +385 58 training_loop """owa""" +385 58 negative_sampler """basic""" +385 58 evaluator """rankbased""" +385 59 dataset """kinships""" +385 59 model """kg2e""" +385 59 loss """marginranking""" +385 59 regularizer """no""" +385 59 optimizer """adam""" +385 59 training_loop """owa""" +385 59 negative_sampler """basic""" +385 59 evaluator """rankbased""" +385 60 dataset """kinships""" +385 60 model """kg2e""" +385 60 loss """marginranking""" +385 60 regularizer """no""" +385 60 optimizer """adam""" +385 60 training_loop """owa""" +385 60 negative_sampler """basic""" +385 60 evaluator """rankbased""" +385 61 dataset """kinships""" +385 61 model """kg2e""" +385 61 loss """marginranking""" +385 61 regularizer """no""" +385 61 optimizer """adam""" +385 61 training_loop """owa""" +385 61 negative_sampler """basic""" +385 61 evaluator """rankbased""" +385 62 dataset """kinships""" +385 62 model """kg2e""" +385 62 loss """marginranking""" +385 62 regularizer """no""" +385 62 optimizer """adam""" +385 62 training_loop """owa""" +385 62 negative_sampler """basic""" +385 62 evaluator """rankbased""" +385 63 dataset """kinships""" +385 63 model """kg2e""" +385 63 loss """marginranking""" +385 63 regularizer """no""" +385 63 optimizer """adam""" +385 63 training_loop """owa""" +385 63 negative_sampler """basic""" +385 63 evaluator """rankbased""" +385 64 dataset """kinships""" +385 64 model """kg2e""" +385 64 loss """marginranking""" +385 64 regularizer """no""" +385 64 optimizer """adam""" +385 64 training_loop """owa""" +385 64 negative_sampler """basic""" +385 64 evaluator """rankbased""" +385 65 dataset """kinships""" +385 65 model """kg2e""" +385 65 loss """marginranking""" +385 65 regularizer """no""" +385 65 optimizer """adam""" +385 65 training_loop """owa""" +385 65 negative_sampler """basic""" +385 65 evaluator """rankbased""" +385 66 dataset """kinships""" +385 66 model """kg2e""" +385 66 loss """marginranking""" +385 66 regularizer """no""" +385 66 optimizer """adam""" +385 66 training_loop """owa""" +385 66 negative_sampler """basic""" +385 66 evaluator """rankbased""" +385 67 dataset """kinships""" +385 67 model """kg2e""" +385 67 loss """marginranking""" +385 67 regularizer """no""" +385 67 optimizer """adam""" +385 67 training_loop """owa""" +385 67 negative_sampler """basic""" +385 67 evaluator """rankbased""" +385 68 dataset """kinships""" +385 68 model """kg2e""" +385 68 loss """marginranking""" +385 68 regularizer """no""" +385 68 optimizer """adam""" +385 68 training_loop """owa""" +385 68 negative_sampler """basic""" +385 68 evaluator """rankbased""" +385 69 dataset """kinships""" +385 69 model """kg2e""" +385 69 loss """marginranking""" +385 69 regularizer """no""" +385 69 optimizer """adam""" +385 69 training_loop """owa""" +385 69 negative_sampler """basic""" +385 69 evaluator """rankbased""" +385 70 dataset """kinships""" +385 70 model """kg2e""" +385 70 loss """marginranking""" +385 70 regularizer """no""" +385 70 optimizer """adam""" +385 70 training_loop """owa""" +385 70 negative_sampler """basic""" +385 70 evaluator """rankbased""" +385 71 dataset """kinships""" +385 71 model """kg2e""" +385 71 loss """marginranking""" +385 71 regularizer """no""" +385 71 optimizer """adam""" +385 71 training_loop """owa""" +385 71 negative_sampler """basic""" +385 71 evaluator """rankbased""" +385 72 dataset """kinships""" +385 72 model """kg2e""" +385 72 loss """marginranking""" +385 72 regularizer """no""" +385 72 optimizer """adam""" +385 72 training_loop """owa""" +385 72 negative_sampler """basic""" +385 72 evaluator """rankbased""" +385 73 dataset """kinships""" +385 73 model """kg2e""" +385 73 loss """marginranking""" +385 73 regularizer """no""" +385 73 optimizer """adam""" +385 73 training_loop """owa""" +385 73 negative_sampler """basic""" +385 73 evaluator """rankbased""" +385 74 dataset """kinships""" +385 74 model """kg2e""" +385 74 loss """marginranking""" +385 74 regularizer """no""" +385 74 optimizer """adam""" +385 74 training_loop """owa""" +385 74 negative_sampler """basic""" +385 74 evaluator """rankbased""" +385 75 dataset """kinships""" +385 75 model """kg2e""" +385 75 loss """marginranking""" +385 75 regularizer """no""" +385 75 optimizer """adam""" +385 75 training_loop """owa""" +385 75 negative_sampler """basic""" +385 75 evaluator """rankbased""" +385 76 dataset """kinships""" +385 76 model """kg2e""" +385 76 loss """marginranking""" +385 76 regularizer """no""" +385 76 optimizer """adam""" +385 76 training_loop """owa""" +385 76 negative_sampler """basic""" +385 76 evaluator """rankbased""" +385 77 dataset """kinships""" +385 77 model """kg2e""" +385 77 loss """marginranking""" +385 77 regularizer """no""" +385 77 optimizer """adam""" +385 77 training_loop """owa""" +385 77 negative_sampler """basic""" +385 77 evaluator """rankbased""" +385 78 dataset """kinships""" +385 78 model """kg2e""" +385 78 loss """marginranking""" +385 78 regularizer """no""" +385 78 optimizer """adam""" +385 78 training_loop """owa""" +385 78 negative_sampler """basic""" +385 78 evaluator """rankbased""" +385 79 dataset """kinships""" +385 79 model """kg2e""" +385 79 loss """marginranking""" +385 79 regularizer """no""" +385 79 optimizer """adam""" +385 79 training_loop """owa""" +385 79 negative_sampler """basic""" +385 79 evaluator """rankbased""" +385 80 dataset """kinships""" +385 80 model """kg2e""" +385 80 loss """marginranking""" +385 80 regularizer """no""" +385 80 optimizer """adam""" +385 80 training_loop """owa""" +385 80 negative_sampler """basic""" +385 80 evaluator """rankbased""" +385 81 dataset """kinships""" +385 81 model """kg2e""" +385 81 loss """marginranking""" +385 81 regularizer """no""" +385 81 optimizer """adam""" +385 81 training_loop """owa""" +385 81 negative_sampler """basic""" +385 81 evaluator """rankbased""" +385 82 dataset """kinships""" +385 82 model """kg2e""" +385 82 loss """marginranking""" +385 82 regularizer """no""" +385 82 optimizer """adam""" +385 82 training_loop """owa""" +385 82 negative_sampler """basic""" +385 82 evaluator """rankbased""" +385 83 dataset """kinships""" +385 83 model """kg2e""" +385 83 loss """marginranking""" +385 83 regularizer """no""" +385 83 optimizer """adam""" +385 83 training_loop """owa""" +385 83 negative_sampler """basic""" +385 83 evaluator """rankbased""" +385 84 dataset """kinships""" +385 84 model """kg2e""" +385 84 loss """marginranking""" +385 84 regularizer """no""" +385 84 optimizer """adam""" +385 84 training_loop """owa""" +385 84 negative_sampler """basic""" +385 84 evaluator """rankbased""" +385 85 dataset """kinships""" +385 85 model """kg2e""" +385 85 loss """marginranking""" +385 85 regularizer """no""" +385 85 optimizer """adam""" +385 85 training_loop """owa""" +385 85 negative_sampler """basic""" +385 85 evaluator """rankbased""" +385 86 dataset """kinships""" +385 86 model """kg2e""" +385 86 loss """marginranking""" +385 86 regularizer """no""" +385 86 optimizer """adam""" +385 86 training_loop """owa""" +385 86 negative_sampler """basic""" +385 86 evaluator """rankbased""" +385 87 dataset """kinships""" +385 87 model """kg2e""" +385 87 loss """marginranking""" +385 87 regularizer """no""" +385 87 optimizer """adam""" +385 87 training_loop """owa""" +385 87 negative_sampler """basic""" +385 87 evaluator """rankbased""" +385 88 dataset """kinships""" +385 88 model """kg2e""" +385 88 loss """marginranking""" +385 88 regularizer """no""" +385 88 optimizer """adam""" +385 88 training_loop """owa""" +385 88 negative_sampler """basic""" +385 88 evaluator """rankbased""" +385 89 dataset """kinships""" +385 89 model """kg2e""" +385 89 loss """marginranking""" +385 89 regularizer """no""" +385 89 optimizer """adam""" +385 89 training_loop """owa""" +385 89 negative_sampler """basic""" +385 89 evaluator """rankbased""" +385 90 dataset """kinships""" +385 90 model """kg2e""" +385 90 loss """marginranking""" +385 90 regularizer """no""" +385 90 optimizer """adam""" +385 90 training_loop """owa""" +385 90 negative_sampler """basic""" +385 90 evaluator """rankbased""" +385 91 dataset """kinships""" +385 91 model """kg2e""" +385 91 loss """marginranking""" +385 91 regularizer """no""" +385 91 optimizer """adam""" +385 91 training_loop """owa""" +385 91 negative_sampler """basic""" +385 91 evaluator """rankbased""" +385 92 dataset """kinships""" +385 92 model """kg2e""" +385 92 loss """marginranking""" +385 92 regularizer """no""" +385 92 optimizer """adam""" +385 92 training_loop """owa""" +385 92 negative_sampler """basic""" +385 92 evaluator """rankbased""" +385 93 dataset """kinships""" +385 93 model """kg2e""" +385 93 loss """marginranking""" +385 93 regularizer """no""" +385 93 optimizer """adam""" +385 93 training_loop """owa""" +385 93 negative_sampler """basic""" +385 93 evaluator """rankbased""" +385 94 dataset """kinships""" +385 94 model """kg2e""" +385 94 loss """marginranking""" +385 94 regularizer """no""" +385 94 optimizer """adam""" +385 94 training_loop """owa""" +385 94 negative_sampler """basic""" +385 94 evaluator """rankbased""" +385 95 dataset """kinships""" +385 95 model """kg2e""" +385 95 loss """marginranking""" +385 95 regularizer """no""" +385 95 optimizer """adam""" +385 95 training_loop """owa""" +385 95 negative_sampler """basic""" +385 95 evaluator """rankbased""" +385 96 dataset """kinships""" +385 96 model """kg2e""" +385 96 loss """marginranking""" +385 96 regularizer """no""" +385 96 optimizer """adam""" +385 96 training_loop """owa""" +385 96 negative_sampler """basic""" +385 96 evaluator """rankbased""" +385 97 dataset """kinships""" +385 97 model """kg2e""" +385 97 loss """marginranking""" +385 97 regularizer """no""" +385 97 optimizer """adam""" +385 97 training_loop """owa""" +385 97 negative_sampler """basic""" +385 97 evaluator """rankbased""" +385 98 dataset """kinships""" +385 98 model """kg2e""" +385 98 loss """marginranking""" +385 98 regularizer """no""" +385 98 optimizer """adam""" +385 98 training_loop """owa""" +385 98 negative_sampler """basic""" +385 98 evaluator """rankbased""" +385 99 dataset """kinships""" +385 99 model """kg2e""" +385 99 loss """marginranking""" +385 99 regularizer """no""" +385 99 optimizer """adam""" +385 99 training_loop """owa""" +385 99 negative_sampler """basic""" +385 99 evaluator """rankbased""" +385 100 dataset """kinships""" +385 100 model """kg2e""" +385 100 loss """marginranking""" +385 100 regularizer """no""" +385 100 optimizer """adam""" +385 100 training_loop """owa""" +385 100 negative_sampler """basic""" +385 100 evaluator """rankbased""" +386 1 model.embedding_dim 2.0 +386 1 model.c_min 0.03036265536746139 +386 1 model.c_max 8.578537008212738 +386 1 loss.margin 5.0045591945475305 +386 1 optimizer.lr 0.0017853416537780189 +386 1 negative_sampler.num_negs_per_pos 16.0 +386 1 training.batch_size 1.0 +386 2 model.embedding_dim 1.0 +386 2 model.c_min 0.012276104107499365 +386 2 model.c_max 5.4496846412843105 +386 2 loss.margin 3.3944251725408696 +386 2 optimizer.lr 0.010509191935795456 +386 2 negative_sampler.num_negs_per_pos 22.0 +386 2 training.batch_size 2.0 +386 3 model.embedding_dim 0.0 +386 3 model.c_min 0.011811679381121867 +386 3 model.c_max 5.977009746075492 +386 3 loss.margin 2.932905978484246 +386 3 optimizer.lr 0.0010537425202613382 +386 3 negative_sampler.num_negs_per_pos 69.0 +386 3 training.batch_size 2.0 +386 4 model.embedding_dim 0.0 +386 4 model.c_min 0.061662340305833646 +386 4 model.c_max 7.281215027178195 +386 4 loss.margin 0.7868313652479955 +386 4 optimizer.lr 0.002939176507744067 +386 4 negative_sampler.num_negs_per_pos 95.0 +386 4 training.batch_size 1.0 +386 5 model.embedding_dim 0.0 +386 5 model.c_min 0.014559185306083324 +386 5 model.c_max 8.276448533804825 +386 5 loss.margin 6.2949858338183 +386 5 optimizer.lr 0.002013417084458983 +386 5 negative_sampler.num_negs_per_pos 26.0 +386 5 training.batch_size 2.0 +386 6 model.embedding_dim 0.0 +386 6 model.c_min 0.01926086438349138 +386 6 model.c_max 5.0975210739169645 +386 6 loss.margin 5.604741155283328 +386 6 optimizer.lr 0.003614747650068772 +386 6 negative_sampler.num_negs_per_pos 42.0 +386 6 training.batch_size 1.0 +386 7 model.embedding_dim 2.0 +386 7 model.c_min 0.06340138242086929 +386 7 model.c_max 4.4736068391533905 +386 7 loss.margin 7.663335989097793 +386 7 optimizer.lr 0.010580568039076857 +386 7 negative_sampler.num_negs_per_pos 99.0 +386 7 training.batch_size 0.0 +386 8 model.embedding_dim 1.0 +386 8 model.c_min 0.014852823106420717 +386 8 model.c_max 6.773106818585632 +386 8 loss.margin 8.342039975483011 +386 8 optimizer.lr 0.09793985890111813 +386 8 negative_sampler.num_negs_per_pos 41.0 +386 8 training.batch_size 0.0 +386 9 model.embedding_dim 1.0 +386 9 model.c_min 0.01112941297025132 +386 9 model.c_max 4.400279379230005 +386 9 loss.margin 7.2304278174384 +386 9 optimizer.lr 0.005568093726351968 +386 9 negative_sampler.num_negs_per_pos 98.0 +386 9 training.batch_size 1.0 +386 10 model.embedding_dim 0.0 +386 10 model.c_min 0.030525641599949173 +386 10 model.c_max 4.681193387342396 +386 10 loss.margin 2.5602645532526616 +386 10 optimizer.lr 0.04221401751645626 +386 10 negative_sampler.num_negs_per_pos 53.0 +386 10 training.batch_size 0.0 +386 11 model.embedding_dim 0.0 +386 11 model.c_min 0.05652399590575576 +386 11 model.c_max 7.987745744353625 +386 11 loss.margin 9.973276445756083 +386 11 optimizer.lr 0.005576369119170874 +386 11 negative_sampler.num_negs_per_pos 7.0 +386 11 training.batch_size 0.0 +386 12 model.embedding_dim 0.0 +386 12 model.c_min 0.0306097860450373 +386 12 model.c_max 3.3715371315337315 +386 12 loss.margin 5.281520241177751 +386 12 optimizer.lr 0.032644308942766286 +386 12 negative_sampler.num_negs_per_pos 35.0 +386 12 training.batch_size 2.0 +386 13 model.embedding_dim 2.0 +386 13 model.c_min 0.04309063223772247 +386 13 model.c_max 3.6302353365969084 +386 13 loss.margin 3.4816176352256774 +386 13 optimizer.lr 0.026585564174279475 +386 13 negative_sampler.num_negs_per_pos 54.0 +386 13 training.batch_size 0.0 +386 14 model.embedding_dim 2.0 +386 14 model.c_min 0.01023464916956975 +386 14 model.c_max 9.541716380103757 +386 14 loss.margin 6.61724320041075 +386 14 optimizer.lr 0.018236987995166206 +386 14 negative_sampler.num_negs_per_pos 2.0 +386 14 training.batch_size 2.0 +386 15 model.embedding_dim 2.0 +386 15 model.c_min 0.05505556661013573 +386 15 model.c_max 7.290064306365291 +386 15 loss.margin 8.210189934325118 +386 15 optimizer.lr 0.0037561309473607717 +386 15 negative_sampler.num_negs_per_pos 19.0 +386 15 training.batch_size 1.0 +386 16 model.embedding_dim 2.0 +386 16 model.c_min 0.016775640739882972 +386 16 model.c_max 6.718663980140495 +386 16 loss.margin 2.4173084942062673 +386 16 optimizer.lr 0.012628108290645071 +386 16 negative_sampler.num_negs_per_pos 7.0 +386 16 training.batch_size 1.0 +386 17 model.embedding_dim 1.0 +386 17 model.c_min 0.02188256461805548 +386 17 model.c_max 9.341754318473384 +386 17 loss.margin 2.5303023392808894 +386 17 optimizer.lr 0.00872872215711352 +386 17 negative_sampler.num_negs_per_pos 30.0 +386 17 training.batch_size 0.0 +386 18 model.embedding_dim 2.0 +386 18 model.c_min 0.08431954137415283 +386 18 model.c_max 2.8056093959430317 +386 18 loss.margin 9.24679938195767 +386 18 optimizer.lr 0.07334685778738519 +386 18 negative_sampler.num_negs_per_pos 41.0 +386 18 training.batch_size 2.0 +386 19 model.embedding_dim 1.0 +386 19 model.c_min 0.06324051137793726 +386 19 model.c_max 1.8410924550356285 +386 19 loss.margin 5.618518494819936 +386 19 optimizer.lr 0.017148112999867095 +386 19 negative_sampler.num_negs_per_pos 42.0 +386 19 training.batch_size 0.0 +386 20 model.embedding_dim 0.0 +386 20 model.c_min 0.09306314793942534 +386 20 model.c_max 3.8974157952912827 +386 20 loss.margin 8.598787016121065 +386 20 optimizer.lr 0.002278737398856472 +386 20 negative_sampler.num_negs_per_pos 17.0 +386 20 training.batch_size 2.0 +386 21 model.embedding_dim 2.0 +386 21 model.c_min 0.017348754788494104 +386 21 model.c_max 8.675865564478864 +386 21 loss.margin 2.4422662517397864 +386 21 optimizer.lr 0.006442468002704381 +386 21 negative_sampler.num_negs_per_pos 65.0 +386 21 training.batch_size 0.0 +386 22 model.embedding_dim 2.0 +386 22 model.c_min 0.06577622817880102 +386 22 model.c_max 7.016198107105397 +386 22 loss.margin 4.807288800775012 +386 22 optimizer.lr 0.012372744876231518 +386 22 negative_sampler.num_negs_per_pos 32.0 +386 22 training.batch_size 0.0 +386 23 model.embedding_dim 1.0 +386 23 model.c_min 0.014932322357866864 +386 23 model.c_max 6.0221903115217 +386 23 loss.margin 5.655754245685394 +386 23 optimizer.lr 0.025697408152740804 +386 23 negative_sampler.num_negs_per_pos 59.0 +386 23 training.batch_size 1.0 +386 24 model.embedding_dim 2.0 +386 24 model.c_min 0.09677608704159851 +386 24 model.c_max 2.278142846965704 +386 24 loss.margin 1.007238943549444 +386 24 optimizer.lr 0.0012805099900301395 +386 24 negative_sampler.num_negs_per_pos 41.0 +386 24 training.batch_size 0.0 +386 25 model.embedding_dim 0.0 +386 25 model.c_min 0.02966196978682154 +386 25 model.c_max 7.701028549359487 +386 25 loss.margin 2.360796077841563 +386 25 optimizer.lr 0.003980865038079583 +386 25 negative_sampler.num_negs_per_pos 24.0 +386 25 training.batch_size 0.0 +386 26 model.embedding_dim 0.0 +386 26 model.c_min 0.0872482889452046 +386 26 model.c_max 3.451347714642282 +386 26 loss.margin 2.7975544497379765 +386 26 optimizer.lr 0.05518260918887757 +386 26 negative_sampler.num_negs_per_pos 35.0 +386 26 training.batch_size 2.0 +386 27 model.embedding_dim 1.0 +386 27 model.c_min 0.018751446698866466 +386 27 model.c_max 8.421444057174728 +386 27 loss.margin 7.507178441363242 +386 27 optimizer.lr 0.006665261300818892 +386 27 negative_sampler.num_negs_per_pos 69.0 +386 27 training.batch_size 2.0 +386 28 model.embedding_dim 1.0 +386 28 model.c_min 0.044271616756854155 +386 28 model.c_max 2.779074768964275 +386 28 loss.margin 7.48520803091597 +386 28 optimizer.lr 0.01701229902568063 +386 28 negative_sampler.num_negs_per_pos 62.0 +386 28 training.batch_size 2.0 +386 29 model.embedding_dim 1.0 +386 29 model.c_min 0.08399990607099096 +386 29 model.c_max 3.234888912230237 +386 29 loss.margin 8.69249343246747 +386 29 optimizer.lr 0.0196868376888594 +386 29 negative_sampler.num_negs_per_pos 45.0 +386 29 training.batch_size 1.0 +386 30 model.embedding_dim 0.0 +386 30 model.c_min 0.05184596808577382 +386 30 model.c_max 6.68189955718566 +386 30 loss.margin 4.5198464801694165 +386 30 optimizer.lr 0.01083162691798973 +386 30 negative_sampler.num_negs_per_pos 84.0 +386 30 training.batch_size 2.0 +386 31 model.embedding_dim 2.0 +386 31 model.c_min 0.09674676674550775 +386 31 model.c_max 9.560613704582572 +386 31 loss.margin 4.738457379005065 +386 31 optimizer.lr 0.004564187786609726 +386 31 negative_sampler.num_negs_per_pos 17.0 +386 31 training.batch_size 2.0 +386 32 model.embedding_dim 0.0 +386 32 model.c_min 0.011565557650460868 +386 32 model.c_max 6.822230782777548 +386 32 loss.margin 8.579864144629699 +386 32 optimizer.lr 0.020018349688336563 +386 32 negative_sampler.num_negs_per_pos 86.0 +386 32 training.batch_size 1.0 +386 33 model.embedding_dim 2.0 +386 33 model.c_min 0.023147358773386306 +386 33 model.c_max 8.61855256754816 +386 33 loss.margin 6.906590405119337 +386 33 optimizer.lr 0.0040896540136209645 +386 33 negative_sampler.num_negs_per_pos 88.0 +386 33 training.batch_size 1.0 +386 34 model.embedding_dim 1.0 +386 34 model.c_min 0.01743537214384713 +386 34 model.c_max 4.223390426488909 +386 34 loss.margin 7.277847208111646 +386 34 optimizer.lr 0.015048273067705108 +386 34 negative_sampler.num_negs_per_pos 87.0 +386 34 training.batch_size 2.0 +386 35 model.embedding_dim 2.0 +386 35 model.c_min 0.029328632617910397 +386 35 model.c_max 6.460181666967309 +386 35 loss.margin 3.372575511418954 +386 35 optimizer.lr 0.019053682175831102 +386 35 negative_sampler.num_negs_per_pos 62.0 +386 35 training.batch_size 1.0 +386 36 model.embedding_dim 2.0 +386 36 model.c_min 0.04561383689586827 +386 36 model.c_max 9.839436816073727 +386 36 loss.margin 2.7411320307237443 +386 36 optimizer.lr 0.008598470072768382 +386 36 negative_sampler.num_negs_per_pos 86.0 +386 36 training.batch_size 0.0 +386 37 model.embedding_dim 0.0 +386 37 model.c_min 0.05264686054130813 +386 37 model.c_max 4.572207532027086 +386 37 loss.margin 7.910956607180595 +386 37 optimizer.lr 0.016135536777033707 +386 37 negative_sampler.num_negs_per_pos 92.0 +386 37 training.batch_size 2.0 +386 38 model.embedding_dim 2.0 +386 38 model.c_min 0.050311756876431274 +386 38 model.c_max 5.385400566201746 +386 38 loss.margin 7.505594909168586 +386 38 optimizer.lr 0.02499019329083861 +386 38 negative_sampler.num_negs_per_pos 60.0 +386 38 training.batch_size 0.0 +386 39 model.embedding_dim 1.0 +386 39 model.c_min 0.03804987751353437 +386 39 model.c_max 2.995457559691429 +386 39 loss.margin 4.024150537372681 +386 39 optimizer.lr 0.002711696761289637 +386 39 negative_sampler.num_negs_per_pos 98.0 +386 39 training.batch_size 0.0 +386 40 model.embedding_dim 2.0 +386 40 model.c_min 0.03186995942451196 +386 40 model.c_max 1.411393144372156 +386 40 loss.margin 9.572580527545623 +386 40 optimizer.lr 0.03563214738921515 +386 40 negative_sampler.num_negs_per_pos 51.0 +386 40 training.batch_size 2.0 +386 41 model.embedding_dim 0.0 +386 41 model.c_min 0.09076187291582684 +386 41 model.c_max 8.59382146116247 +386 41 loss.margin 4.593055601765628 +386 41 optimizer.lr 0.0018467464675844558 +386 41 negative_sampler.num_negs_per_pos 60.0 +386 41 training.batch_size 1.0 +386 42 model.embedding_dim 2.0 +386 42 model.c_min 0.03541128996274736 +386 42 model.c_max 1.4196933686007571 +386 42 loss.margin 6.854121586654883 +386 42 optimizer.lr 0.0033988475078670535 +386 42 negative_sampler.num_negs_per_pos 37.0 +386 42 training.batch_size 0.0 +386 43 model.embedding_dim 2.0 +386 43 model.c_min 0.02460707998818648 +386 43 model.c_max 4.378275730137264 +386 43 loss.margin 6.99337464229594 +386 43 optimizer.lr 0.001435399622407424 +386 43 negative_sampler.num_negs_per_pos 67.0 +386 43 training.batch_size 2.0 +386 44 model.embedding_dim 0.0 +386 44 model.c_min 0.05185814364794864 +386 44 model.c_max 7.2701637991871095 +386 44 loss.margin 6.511178388541274 +386 44 optimizer.lr 0.001572267164445682 +386 44 negative_sampler.num_negs_per_pos 39.0 +386 44 training.batch_size 0.0 +386 45 model.embedding_dim 0.0 +386 45 model.c_min 0.011848743081553773 +386 45 model.c_max 2.7462062293699616 +386 45 loss.margin 5.24221678630984 +386 45 optimizer.lr 0.003280850459076933 +386 45 negative_sampler.num_negs_per_pos 57.0 +386 45 training.batch_size 1.0 +386 46 model.embedding_dim 2.0 +386 46 model.c_min 0.01437697773809735 +386 46 model.c_max 2.554737566949143 +386 46 loss.margin 1.1035806550025704 +386 46 optimizer.lr 0.016348043954051643 +386 46 negative_sampler.num_negs_per_pos 51.0 +386 46 training.batch_size 2.0 +386 47 model.embedding_dim 2.0 +386 47 model.c_min 0.02570323761505012 +386 47 model.c_max 8.683247704545549 +386 47 loss.margin 9.055604901647298 +386 47 optimizer.lr 0.031676369035643415 +386 47 negative_sampler.num_negs_per_pos 35.0 +386 47 training.batch_size 1.0 +386 48 model.embedding_dim 0.0 +386 48 model.c_min 0.09056141125381109 +386 48 model.c_max 2.5935669330887112 +386 48 loss.margin 1.1610877651615996 +386 48 optimizer.lr 0.03418534738080752 +386 48 negative_sampler.num_negs_per_pos 28.0 +386 48 training.batch_size 0.0 +386 49 model.embedding_dim 1.0 +386 49 model.c_min 0.07804130623244296 +386 49 model.c_max 1.869200487101512 +386 49 loss.margin 3.731243449413772 +386 49 optimizer.lr 0.09225564457709363 +386 49 negative_sampler.num_negs_per_pos 14.0 +386 49 training.batch_size 2.0 +386 50 model.embedding_dim 0.0 +386 50 model.c_min 0.09016332748450075 +386 50 model.c_max 8.153985177684572 +386 50 loss.margin 0.8119768548589252 +386 50 optimizer.lr 0.07256156748335155 +386 50 negative_sampler.num_negs_per_pos 83.0 +386 50 training.batch_size 0.0 +386 51 model.embedding_dim 0.0 +386 51 model.c_min 0.0399487880262231 +386 51 model.c_max 9.192297777061418 +386 51 loss.margin 2.230593020970723 +386 51 optimizer.lr 0.003458860459150225 +386 51 negative_sampler.num_negs_per_pos 88.0 +386 51 training.batch_size 0.0 +386 52 model.embedding_dim 0.0 +386 52 model.c_min 0.02164553622209399 +386 52 model.c_max 1.5072351455969082 +386 52 loss.margin 9.29934286734914 +386 52 optimizer.lr 0.005388122121792785 +386 52 negative_sampler.num_negs_per_pos 34.0 +386 52 training.batch_size 0.0 +386 53 model.embedding_dim 0.0 +386 53 model.c_min 0.09843964751858969 +386 53 model.c_max 9.146107206191907 +386 53 loss.margin 1.3981268077628144 +386 53 optimizer.lr 0.0019400698136102564 +386 53 negative_sampler.num_negs_per_pos 75.0 +386 53 training.batch_size 1.0 +386 54 model.embedding_dim 0.0 +386 54 model.c_min 0.09756247188468477 +386 54 model.c_max 8.097040952665349 +386 54 loss.margin 8.960454082488855 +386 54 optimizer.lr 0.025885115485612016 +386 54 negative_sampler.num_negs_per_pos 15.0 +386 54 training.batch_size 0.0 +386 55 model.embedding_dim 2.0 +386 55 model.c_min 0.03458356727033055 +386 55 model.c_max 5.195921647794402 +386 55 loss.margin 5.381537376902394 +386 55 optimizer.lr 0.004938247609741769 +386 55 negative_sampler.num_negs_per_pos 79.0 +386 55 training.batch_size 0.0 +386 56 model.embedding_dim 0.0 +386 56 model.c_min 0.013646138644976531 +386 56 model.c_max 6.7443626462427115 +386 56 loss.margin 0.5129884088610881 +386 56 optimizer.lr 0.08047221436458607 +386 56 negative_sampler.num_negs_per_pos 51.0 +386 56 training.batch_size 0.0 +386 57 model.embedding_dim 1.0 +386 57 model.c_min 0.011505708774140172 +386 57 model.c_max 1.5140255737361459 +386 57 loss.margin 8.312359926942671 +386 57 optimizer.lr 0.00508882492223828 +386 57 negative_sampler.num_negs_per_pos 97.0 +386 57 training.batch_size 2.0 +386 58 model.embedding_dim 1.0 +386 58 model.c_min 0.04589287857885539 +386 58 model.c_max 3.722639036963875 +386 58 loss.margin 6.361155371002752 +386 58 optimizer.lr 0.013577758372475705 +386 58 negative_sampler.num_negs_per_pos 41.0 +386 58 training.batch_size 0.0 +386 59 model.embedding_dim 1.0 +386 59 model.c_min 0.04118152081298638 +386 59 model.c_max 5.517136666698398 +386 59 loss.margin 2.9180780333942837 +386 59 optimizer.lr 0.0011894580295410688 +386 59 negative_sampler.num_negs_per_pos 87.0 +386 59 training.batch_size 1.0 +386 60 model.embedding_dim 2.0 +386 60 model.c_min 0.07104037359146799 +386 60 model.c_max 8.536232323917083 +386 60 loss.margin 5.24982256857043 +386 60 optimizer.lr 0.02906308558124068 +386 60 negative_sampler.num_negs_per_pos 1.0 +386 60 training.batch_size 0.0 +386 61 model.embedding_dim 0.0 +386 61 model.c_min 0.013238465611375897 +386 61 model.c_max 8.943313239613254 +386 61 loss.margin 5.827559535123344 +386 61 optimizer.lr 0.005600404783137335 +386 61 negative_sampler.num_negs_per_pos 27.0 +386 61 training.batch_size 0.0 +386 62 model.embedding_dim 1.0 +386 62 model.c_min 0.010118924912900328 +386 62 model.c_max 5.932470340514289 +386 62 loss.margin 8.14252558348945 +386 62 optimizer.lr 0.005042569100625535 +386 62 negative_sampler.num_negs_per_pos 41.0 +386 62 training.batch_size 1.0 +386 63 model.embedding_dim 1.0 +386 63 model.c_min 0.020833620308338966 +386 63 model.c_max 1.3066349937967365 +386 63 loss.margin 1.53693661102672 +386 63 optimizer.lr 0.03576822039357482 +386 63 negative_sampler.num_negs_per_pos 35.0 +386 63 training.batch_size 2.0 +386 64 model.embedding_dim 2.0 +386 64 model.c_min 0.013464893709760783 +386 64 model.c_max 9.278091657936043 +386 64 loss.margin 2.526582963366995 +386 64 optimizer.lr 0.016622445328084753 +386 64 negative_sampler.num_negs_per_pos 32.0 +386 64 training.batch_size 2.0 +386 65 model.embedding_dim 1.0 +386 65 model.c_min 0.07258984478054797 +386 65 model.c_max 9.186242237966214 +386 65 loss.margin 7.425233976537844 +386 65 optimizer.lr 0.006417864744869938 +386 65 negative_sampler.num_negs_per_pos 91.0 +386 65 training.batch_size 1.0 +386 66 model.embedding_dim 0.0 +386 66 model.c_min 0.05239054728578391 +386 66 model.c_max 2.243949413360394 +386 66 loss.margin 2.9864977486040982 +386 66 optimizer.lr 0.0020331821540198153 +386 66 negative_sampler.num_negs_per_pos 2.0 +386 66 training.batch_size 2.0 +386 67 model.embedding_dim 0.0 +386 67 model.c_min 0.022838512294838922 +386 67 model.c_max 4.332431047037755 +386 67 loss.margin 3.8731409689411382 +386 67 optimizer.lr 0.0010376800989103417 +386 67 negative_sampler.num_negs_per_pos 15.0 +386 67 training.batch_size 2.0 +386 68 model.embedding_dim 2.0 +386 68 model.c_min 0.06174946261246995 +386 68 model.c_max 7.208885821079699 +386 68 loss.margin 2.7357854305288445 +386 68 optimizer.lr 0.0027046075683376785 +386 68 negative_sampler.num_negs_per_pos 53.0 +386 68 training.batch_size 2.0 +386 69 model.embedding_dim 1.0 +386 69 model.c_min 0.08476837108155305 +386 69 model.c_max 7.316715921221119 +386 69 loss.margin 1.4318277708954135 +386 69 optimizer.lr 0.038589003577663566 +386 69 negative_sampler.num_negs_per_pos 18.0 +386 69 training.batch_size 2.0 +386 70 model.embedding_dim 0.0 +386 70 model.c_min 0.05841718000817945 +386 70 model.c_max 3.0741306894032765 +386 70 loss.margin 6.582754183305813 +386 70 optimizer.lr 0.005460352829325157 +386 70 negative_sampler.num_negs_per_pos 35.0 +386 70 training.batch_size 1.0 +386 71 model.embedding_dim 0.0 +386 71 model.c_min 0.013965542387597247 +386 71 model.c_max 8.90553661396799 +386 71 loss.margin 6.290657189874051 +386 71 optimizer.lr 0.040106082043453194 +386 71 negative_sampler.num_negs_per_pos 91.0 +386 71 training.batch_size 0.0 +386 72 model.embedding_dim 0.0 +386 72 model.c_min 0.0966303734121842 +386 72 model.c_max 6.054612248542659 +386 72 loss.margin 7.135015261330834 +386 72 optimizer.lr 0.08256446033794604 +386 72 negative_sampler.num_negs_per_pos 92.0 +386 72 training.batch_size 2.0 +386 73 model.embedding_dim 2.0 +386 73 model.c_min 0.023981967241179777 +386 73 model.c_max 7.741703344130085 +386 73 loss.margin 6.949566800621314 +386 73 optimizer.lr 0.01127778361534206 +386 73 negative_sampler.num_negs_per_pos 27.0 +386 73 training.batch_size 0.0 +386 74 model.embedding_dim 0.0 +386 74 model.c_min 0.010109020263541033 +386 74 model.c_max 8.394221622111042 +386 74 loss.margin 9.292233172014582 +386 74 optimizer.lr 0.08926913048248865 +386 74 negative_sampler.num_negs_per_pos 51.0 +386 74 training.batch_size 0.0 +386 75 model.embedding_dim 2.0 +386 75 model.c_min 0.06450525280486749 +386 75 model.c_max 8.937831339220518 +386 75 loss.margin 4.8620042804225285 +386 75 optimizer.lr 0.0028395330025060054 +386 75 negative_sampler.num_negs_per_pos 25.0 +386 75 training.batch_size 2.0 +386 76 model.embedding_dim 0.0 +386 76 model.c_min 0.04860807813601637 +386 76 model.c_max 9.36848913422569 +386 76 loss.margin 6.6255741510519535 +386 76 optimizer.lr 0.0010398162597007568 +386 76 negative_sampler.num_negs_per_pos 19.0 +386 76 training.batch_size 0.0 +386 77 model.embedding_dim 0.0 +386 77 model.c_min 0.011222589870205505 +386 77 model.c_max 8.313573283232401 +386 77 loss.margin 0.5622552472445831 +386 77 optimizer.lr 0.0588324548399567 +386 77 negative_sampler.num_negs_per_pos 7.0 +386 77 training.batch_size 0.0 +386 78 model.embedding_dim 0.0 +386 78 model.c_min 0.0620040173613581 +386 78 model.c_max 2.9866863523150338 +386 78 loss.margin 4.94672271899985 +386 78 optimizer.lr 0.005928271954645117 +386 78 negative_sampler.num_negs_per_pos 2.0 +386 78 training.batch_size 0.0 +386 79 model.embedding_dim 2.0 +386 79 model.c_min 0.036524217063195925 +386 79 model.c_max 6.833821473261026 +386 79 loss.margin 1.707310662486209 +386 79 optimizer.lr 0.0011464423180725459 +386 79 negative_sampler.num_negs_per_pos 69.0 +386 79 training.batch_size 0.0 +386 80 model.embedding_dim 1.0 +386 80 model.c_min 0.0812848348129495 +386 80 model.c_max 3.3056976572321455 +386 80 loss.margin 9.800187560750649 +386 80 optimizer.lr 0.0013152257777081554 +386 80 negative_sampler.num_negs_per_pos 19.0 +386 80 training.batch_size 2.0 +386 81 model.embedding_dim 0.0 +386 81 model.c_min 0.01643098659796858 +386 81 model.c_max 3.5490809632502534 +386 81 loss.margin 0.7754141052101826 +386 81 optimizer.lr 0.014085239986405953 +386 81 negative_sampler.num_negs_per_pos 13.0 +386 81 training.batch_size 2.0 +386 82 model.embedding_dim 2.0 +386 82 model.c_min 0.012557829477195625 +386 82 model.c_max 4.977102568281392 +386 82 loss.margin 1.904291519535994 +386 82 optimizer.lr 0.004990674177460335 +386 82 negative_sampler.num_negs_per_pos 94.0 +386 82 training.batch_size 0.0 +386 83 model.embedding_dim 1.0 +386 83 model.c_min 0.015594911639315548 +386 83 model.c_max 8.160639568258986 +386 83 loss.margin 5.833383964739881 +386 83 optimizer.lr 0.03596001257269686 +386 83 negative_sampler.num_negs_per_pos 7.0 +386 83 training.batch_size 0.0 +386 84 model.embedding_dim 1.0 +386 84 model.c_min 0.08188879498459391 +386 84 model.c_max 8.271413700596407 +386 84 loss.margin 0.7344213577765495 +386 84 optimizer.lr 0.024218354003597245 +386 84 negative_sampler.num_negs_per_pos 89.0 +386 84 training.batch_size 0.0 +386 85 model.embedding_dim 2.0 +386 85 model.c_min 0.0844148073400462 +386 85 model.c_max 8.301330243312183 +386 85 loss.margin 3.235991043074449 +386 85 optimizer.lr 0.002560017237720814 +386 85 negative_sampler.num_negs_per_pos 78.0 +386 85 training.batch_size 1.0 +386 86 model.embedding_dim 2.0 +386 86 model.c_min 0.042582450264720205 +386 86 model.c_max 9.037390061226873 +386 86 loss.margin 1.6722771686681648 +386 86 optimizer.lr 0.05588279019339367 +386 86 negative_sampler.num_negs_per_pos 5.0 +386 86 training.batch_size 0.0 +386 87 model.embedding_dim 0.0 +386 87 model.c_min 0.02902228573560226 +386 87 model.c_max 3.28702557183298 +386 87 loss.margin 3.877923745987616 +386 87 optimizer.lr 0.0022838949233496297 +386 87 negative_sampler.num_negs_per_pos 62.0 +386 87 training.batch_size 2.0 +386 88 model.embedding_dim 2.0 +386 88 model.c_min 0.054135736716231724 +386 88 model.c_max 5.726909683715964 +386 88 loss.margin 9.692973650397809 +386 88 optimizer.lr 0.02766340846211948 +386 88 negative_sampler.num_negs_per_pos 23.0 +386 88 training.batch_size 2.0 +386 89 model.embedding_dim 0.0 +386 89 model.c_min 0.04639120353276262 +386 89 model.c_max 3.1827722381796386 +386 89 loss.margin 1.6992813484737093 +386 89 optimizer.lr 0.0010169483083672852 +386 89 negative_sampler.num_negs_per_pos 1.0 +386 89 training.batch_size 2.0 +386 90 model.embedding_dim 2.0 +386 90 model.c_min 0.06393348551986124 +386 90 model.c_max 3.5059721446463397 +386 90 loss.margin 1.643424803433376 +386 90 optimizer.lr 0.03779551584419007 +386 90 negative_sampler.num_negs_per_pos 21.0 +386 90 training.batch_size 1.0 +386 91 model.embedding_dim 2.0 +386 91 model.c_min 0.02511761845000029 +386 91 model.c_max 4.1643473079629025 +386 91 loss.margin 3.472400051917256 +386 91 optimizer.lr 0.019283999784838652 +386 91 negative_sampler.num_negs_per_pos 3.0 +386 91 training.batch_size 2.0 +386 92 model.embedding_dim 2.0 +386 92 model.c_min 0.025791000275079957 +386 92 model.c_max 7.553520205609297 +386 92 loss.margin 8.757228234743895 +386 92 optimizer.lr 0.023986202024428825 +386 92 negative_sampler.num_negs_per_pos 59.0 +386 92 training.batch_size 2.0 +386 93 model.embedding_dim 2.0 +386 93 model.c_min 0.08881242298548883 +386 93 model.c_max 4.9151799073835925 +386 93 loss.margin 7.0992735996990906 +386 93 optimizer.lr 0.05731201746291973 +386 93 negative_sampler.num_negs_per_pos 59.0 +386 93 training.batch_size 1.0 +386 94 model.embedding_dim 0.0 +386 94 model.c_min 0.051644999513605655 +386 94 model.c_max 8.831972927988431 +386 94 loss.margin 4.5211247160175745 +386 94 optimizer.lr 0.0010770087348618692 +386 94 negative_sampler.num_negs_per_pos 52.0 +386 94 training.batch_size 1.0 +386 95 model.embedding_dim 0.0 +386 95 model.c_min 0.026015341654520588 +386 95 model.c_max 9.983342043561327 +386 95 loss.margin 7.805934310125765 +386 95 optimizer.lr 0.0011534327993565907 +386 95 negative_sampler.num_negs_per_pos 46.0 +386 95 training.batch_size 0.0 +386 96 model.embedding_dim 0.0 +386 96 model.c_min 0.026981653485246813 +386 96 model.c_max 3.4593762747293066 +386 96 loss.margin 6.937171911361829 +386 96 optimizer.lr 0.06539243870537743 +386 96 negative_sampler.num_negs_per_pos 21.0 +386 96 training.batch_size 0.0 +386 97 model.embedding_dim 2.0 +386 97 model.c_min 0.07272789425262334 +386 97 model.c_max 9.863522854344726 +386 97 loss.margin 1.733081088992759 +386 97 optimizer.lr 0.03605660158282675 +386 97 negative_sampler.num_negs_per_pos 66.0 +386 97 training.batch_size 0.0 +386 98 model.embedding_dim 2.0 +386 98 model.c_min 0.0324929503777847 +386 98 model.c_max 1.3341765249766389 +386 98 loss.margin 9.274563098533461 +386 98 optimizer.lr 0.005984232038694445 +386 98 negative_sampler.num_negs_per_pos 4.0 +386 98 training.batch_size 2.0 +386 99 model.embedding_dim 1.0 +386 99 model.c_min 0.06511647870661465 +386 99 model.c_max 7.840654372834435 +386 99 loss.margin 2.5122805766956686 +386 99 optimizer.lr 0.016228907003371828 +386 99 negative_sampler.num_negs_per_pos 65.0 +386 99 training.batch_size 1.0 +386 100 model.embedding_dim 2.0 +386 100 model.c_min 0.013850105750061424 +386 100 model.c_max 8.59799213846742 +386 100 loss.margin 4.45143716318955 +386 100 optimizer.lr 0.0316484382672831 +386 100 negative_sampler.num_negs_per_pos 84.0 +386 100 training.batch_size 1.0 +386 1 dataset """kinships""" +386 1 model """kg2e""" +386 1 loss """marginranking""" +386 1 regularizer """no""" +386 1 optimizer """adam""" +386 1 training_loop """owa""" +386 1 negative_sampler """basic""" +386 1 evaluator """rankbased""" +386 2 dataset """kinships""" +386 2 model """kg2e""" +386 2 loss """marginranking""" +386 2 regularizer """no""" +386 2 optimizer """adam""" +386 2 training_loop """owa""" +386 2 negative_sampler """basic""" +386 2 evaluator """rankbased""" +386 3 dataset """kinships""" +386 3 model """kg2e""" +386 3 loss """marginranking""" +386 3 regularizer """no""" +386 3 optimizer """adam""" +386 3 training_loop """owa""" +386 3 negative_sampler """basic""" +386 3 evaluator """rankbased""" +386 4 dataset """kinships""" +386 4 model """kg2e""" +386 4 loss """marginranking""" +386 4 regularizer """no""" +386 4 optimizer """adam""" +386 4 training_loop """owa""" +386 4 negative_sampler """basic""" +386 4 evaluator """rankbased""" +386 5 dataset """kinships""" +386 5 model """kg2e""" +386 5 loss """marginranking""" +386 5 regularizer """no""" +386 5 optimizer """adam""" +386 5 training_loop """owa""" +386 5 negative_sampler """basic""" +386 5 evaluator """rankbased""" +386 6 dataset """kinships""" +386 6 model """kg2e""" +386 6 loss """marginranking""" +386 6 regularizer """no""" +386 6 optimizer """adam""" +386 6 training_loop """owa""" +386 6 negative_sampler """basic""" +386 6 evaluator """rankbased""" +386 7 dataset """kinships""" +386 7 model """kg2e""" +386 7 loss """marginranking""" +386 7 regularizer """no""" +386 7 optimizer """adam""" +386 7 training_loop """owa""" +386 7 negative_sampler """basic""" +386 7 evaluator """rankbased""" +386 8 dataset """kinships""" +386 8 model """kg2e""" +386 8 loss """marginranking""" +386 8 regularizer """no""" +386 8 optimizer """adam""" +386 8 training_loop """owa""" +386 8 negative_sampler """basic""" +386 8 evaluator """rankbased""" +386 9 dataset """kinships""" +386 9 model """kg2e""" +386 9 loss """marginranking""" +386 9 regularizer """no""" +386 9 optimizer """adam""" +386 9 training_loop """owa""" +386 9 negative_sampler """basic""" +386 9 evaluator """rankbased""" +386 10 dataset """kinships""" +386 10 model """kg2e""" +386 10 loss """marginranking""" +386 10 regularizer """no""" +386 10 optimizer """adam""" +386 10 training_loop """owa""" +386 10 negative_sampler """basic""" +386 10 evaluator """rankbased""" +386 11 dataset """kinships""" +386 11 model """kg2e""" +386 11 loss """marginranking""" +386 11 regularizer """no""" +386 11 optimizer """adam""" +386 11 training_loop """owa""" +386 11 negative_sampler """basic""" +386 11 evaluator """rankbased""" +386 12 dataset """kinships""" +386 12 model """kg2e""" +386 12 loss """marginranking""" +386 12 regularizer """no""" +386 12 optimizer """adam""" +386 12 training_loop """owa""" +386 12 negative_sampler """basic""" +386 12 evaluator """rankbased""" +386 13 dataset """kinships""" +386 13 model """kg2e""" +386 13 loss """marginranking""" +386 13 regularizer """no""" +386 13 optimizer """adam""" +386 13 training_loop """owa""" +386 13 negative_sampler """basic""" +386 13 evaluator """rankbased""" +386 14 dataset """kinships""" +386 14 model """kg2e""" +386 14 loss """marginranking""" +386 14 regularizer """no""" +386 14 optimizer """adam""" +386 14 training_loop """owa""" +386 14 negative_sampler """basic""" +386 14 evaluator """rankbased""" +386 15 dataset """kinships""" +386 15 model """kg2e""" +386 15 loss """marginranking""" +386 15 regularizer """no""" +386 15 optimizer """adam""" +386 15 training_loop """owa""" +386 15 negative_sampler """basic""" +386 15 evaluator """rankbased""" +386 16 dataset """kinships""" +386 16 model """kg2e""" +386 16 loss """marginranking""" +386 16 regularizer """no""" +386 16 optimizer """adam""" +386 16 training_loop """owa""" +386 16 negative_sampler """basic""" +386 16 evaluator """rankbased""" +386 17 dataset """kinships""" +386 17 model """kg2e""" +386 17 loss """marginranking""" +386 17 regularizer """no""" +386 17 optimizer """adam""" +386 17 training_loop """owa""" +386 17 negative_sampler """basic""" +386 17 evaluator """rankbased""" +386 18 dataset """kinships""" +386 18 model """kg2e""" +386 18 loss """marginranking""" +386 18 regularizer """no""" +386 18 optimizer """adam""" +386 18 training_loop """owa""" +386 18 negative_sampler """basic""" +386 18 evaluator """rankbased""" +386 19 dataset """kinships""" +386 19 model """kg2e""" +386 19 loss """marginranking""" +386 19 regularizer """no""" +386 19 optimizer """adam""" +386 19 training_loop """owa""" +386 19 negative_sampler """basic""" +386 19 evaluator """rankbased""" +386 20 dataset """kinships""" +386 20 model """kg2e""" +386 20 loss """marginranking""" +386 20 regularizer """no""" +386 20 optimizer """adam""" +386 20 training_loop """owa""" +386 20 negative_sampler """basic""" +386 20 evaluator """rankbased""" +386 21 dataset """kinships""" +386 21 model """kg2e""" +386 21 loss """marginranking""" +386 21 regularizer """no""" +386 21 optimizer """adam""" +386 21 training_loop """owa""" +386 21 negative_sampler """basic""" +386 21 evaluator """rankbased""" +386 22 dataset """kinships""" +386 22 model """kg2e""" +386 22 loss """marginranking""" +386 22 regularizer """no""" +386 22 optimizer """adam""" +386 22 training_loop """owa""" +386 22 negative_sampler """basic""" +386 22 evaluator """rankbased""" +386 23 dataset """kinships""" +386 23 model """kg2e""" +386 23 loss """marginranking""" +386 23 regularizer """no""" +386 23 optimizer """adam""" +386 23 training_loop """owa""" +386 23 negative_sampler """basic""" +386 23 evaluator """rankbased""" +386 24 dataset """kinships""" +386 24 model """kg2e""" +386 24 loss """marginranking""" +386 24 regularizer """no""" +386 24 optimizer """adam""" +386 24 training_loop """owa""" +386 24 negative_sampler """basic""" +386 24 evaluator """rankbased""" +386 25 dataset """kinships""" +386 25 model """kg2e""" +386 25 loss """marginranking""" +386 25 regularizer """no""" +386 25 optimizer """adam""" +386 25 training_loop """owa""" +386 25 negative_sampler """basic""" +386 25 evaluator """rankbased""" +386 26 dataset """kinships""" +386 26 model """kg2e""" +386 26 loss """marginranking""" +386 26 regularizer """no""" +386 26 optimizer """adam""" +386 26 training_loop """owa""" +386 26 negative_sampler """basic""" +386 26 evaluator """rankbased""" +386 27 dataset """kinships""" +386 27 model """kg2e""" +386 27 loss """marginranking""" +386 27 regularizer """no""" +386 27 optimizer """adam""" +386 27 training_loop """owa""" +386 27 negative_sampler """basic""" +386 27 evaluator """rankbased""" +386 28 dataset """kinships""" +386 28 model """kg2e""" +386 28 loss """marginranking""" +386 28 regularizer """no""" +386 28 optimizer """adam""" +386 28 training_loop """owa""" +386 28 negative_sampler """basic""" +386 28 evaluator """rankbased""" +386 29 dataset """kinships""" +386 29 model """kg2e""" +386 29 loss """marginranking""" +386 29 regularizer """no""" +386 29 optimizer """adam""" +386 29 training_loop """owa""" +386 29 negative_sampler """basic""" +386 29 evaluator """rankbased""" +386 30 dataset """kinships""" +386 30 model """kg2e""" +386 30 loss """marginranking""" +386 30 regularizer """no""" +386 30 optimizer """adam""" +386 30 training_loop """owa""" +386 30 negative_sampler """basic""" +386 30 evaluator """rankbased""" +386 31 dataset """kinships""" +386 31 model """kg2e""" +386 31 loss """marginranking""" +386 31 regularizer """no""" +386 31 optimizer """adam""" +386 31 training_loop """owa""" +386 31 negative_sampler """basic""" +386 31 evaluator """rankbased""" +386 32 dataset """kinships""" +386 32 model """kg2e""" +386 32 loss """marginranking""" +386 32 regularizer """no""" +386 32 optimizer """adam""" +386 32 training_loop """owa""" +386 32 negative_sampler """basic""" +386 32 evaluator """rankbased""" +386 33 dataset """kinships""" +386 33 model """kg2e""" +386 33 loss """marginranking""" +386 33 regularizer """no""" +386 33 optimizer """adam""" +386 33 training_loop """owa""" +386 33 negative_sampler """basic""" +386 33 evaluator """rankbased""" +386 34 dataset """kinships""" +386 34 model """kg2e""" +386 34 loss """marginranking""" +386 34 regularizer """no""" +386 34 optimizer """adam""" +386 34 training_loop """owa""" +386 34 negative_sampler """basic""" +386 34 evaluator """rankbased""" +386 35 dataset """kinships""" +386 35 model """kg2e""" +386 35 loss """marginranking""" +386 35 regularizer """no""" +386 35 optimizer """adam""" +386 35 training_loop """owa""" +386 35 negative_sampler """basic""" +386 35 evaluator """rankbased""" +386 36 dataset """kinships""" +386 36 model """kg2e""" +386 36 loss """marginranking""" +386 36 regularizer """no""" +386 36 optimizer """adam""" +386 36 training_loop """owa""" +386 36 negative_sampler """basic""" +386 36 evaluator """rankbased""" +386 37 dataset """kinships""" +386 37 model """kg2e""" +386 37 loss """marginranking""" +386 37 regularizer """no""" +386 37 optimizer """adam""" +386 37 training_loop """owa""" +386 37 negative_sampler """basic""" +386 37 evaluator """rankbased""" +386 38 dataset """kinships""" +386 38 model """kg2e""" +386 38 loss """marginranking""" +386 38 regularizer """no""" +386 38 optimizer """adam""" +386 38 training_loop """owa""" +386 38 negative_sampler """basic""" +386 38 evaluator """rankbased""" +386 39 dataset """kinships""" +386 39 model """kg2e""" +386 39 loss """marginranking""" +386 39 regularizer """no""" +386 39 optimizer """adam""" +386 39 training_loop """owa""" +386 39 negative_sampler """basic""" +386 39 evaluator """rankbased""" +386 40 dataset """kinships""" +386 40 model """kg2e""" +386 40 loss """marginranking""" +386 40 regularizer """no""" +386 40 optimizer """adam""" +386 40 training_loop """owa""" +386 40 negative_sampler """basic""" +386 40 evaluator """rankbased""" +386 41 dataset """kinships""" +386 41 model """kg2e""" +386 41 loss """marginranking""" +386 41 regularizer """no""" +386 41 optimizer """adam""" +386 41 training_loop """owa""" +386 41 negative_sampler """basic""" +386 41 evaluator """rankbased""" +386 42 dataset """kinships""" +386 42 model """kg2e""" +386 42 loss """marginranking""" +386 42 regularizer """no""" +386 42 optimizer """adam""" +386 42 training_loop """owa""" +386 42 negative_sampler """basic""" +386 42 evaluator """rankbased""" +386 43 dataset """kinships""" +386 43 model """kg2e""" +386 43 loss """marginranking""" +386 43 regularizer """no""" +386 43 optimizer """adam""" +386 43 training_loop """owa""" +386 43 negative_sampler """basic""" +386 43 evaluator """rankbased""" +386 44 dataset """kinships""" +386 44 model """kg2e""" +386 44 loss """marginranking""" +386 44 regularizer """no""" +386 44 optimizer """adam""" +386 44 training_loop """owa""" +386 44 negative_sampler """basic""" +386 44 evaluator """rankbased""" +386 45 dataset """kinships""" +386 45 model """kg2e""" +386 45 loss """marginranking""" +386 45 regularizer """no""" +386 45 optimizer """adam""" +386 45 training_loop """owa""" +386 45 negative_sampler """basic""" +386 45 evaluator """rankbased""" +386 46 dataset """kinships""" +386 46 model """kg2e""" +386 46 loss """marginranking""" +386 46 regularizer """no""" +386 46 optimizer """adam""" +386 46 training_loop """owa""" +386 46 negative_sampler """basic""" +386 46 evaluator """rankbased""" +386 47 dataset """kinships""" +386 47 model """kg2e""" +386 47 loss """marginranking""" +386 47 regularizer """no""" +386 47 optimizer """adam""" +386 47 training_loop """owa""" +386 47 negative_sampler """basic""" +386 47 evaluator """rankbased""" +386 48 dataset """kinships""" +386 48 model """kg2e""" +386 48 loss """marginranking""" +386 48 regularizer """no""" +386 48 optimizer """adam""" +386 48 training_loop """owa""" +386 48 negative_sampler """basic""" +386 48 evaluator """rankbased""" +386 49 dataset """kinships""" +386 49 model """kg2e""" +386 49 loss """marginranking""" +386 49 regularizer """no""" +386 49 optimizer """adam""" +386 49 training_loop """owa""" +386 49 negative_sampler """basic""" +386 49 evaluator """rankbased""" +386 50 dataset """kinships""" +386 50 model """kg2e""" +386 50 loss """marginranking""" +386 50 regularizer """no""" +386 50 optimizer """adam""" +386 50 training_loop """owa""" +386 50 negative_sampler """basic""" +386 50 evaluator """rankbased""" +386 51 dataset """kinships""" +386 51 model """kg2e""" +386 51 loss """marginranking""" +386 51 regularizer """no""" +386 51 optimizer """adam""" +386 51 training_loop """owa""" +386 51 negative_sampler """basic""" +386 51 evaluator """rankbased""" +386 52 dataset """kinships""" +386 52 model """kg2e""" +386 52 loss """marginranking""" +386 52 regularizer """no""" +386 52 optimizer """adam""" +386 52 training_loop """owa""" +386 52 negative_sampler """basic""" +386 52 evaluator """rankbased""" +386 53 dataset """kinships""" +386 53 model """kg2e""" +386 53 loss """marginranking""" +386 53 regularizer """no""" +386 53 optimizer """adam""" +386 53 training_loop """owa""" +386 53 negative_sampler """basic""" +386 53 evaluator """rankbased""" +386 54 dataset """kinships""" +386 54 model """kg2e""" +386 54 loss """marginranking""" +386 54 regularizer """no""" +386 54 optimizer """adam""" +386 54 training_loop """owa""" +386 54 negative_sampler """basic""" +386 54 evaluator """rankbased""" +386 55 dataset """kinships""" +386 55 model """kg2e""" +386 55 loss """marginranking""" +386 55 regularizer """no""" +386 55 optimizer """adam""" +386 55 training_loop """owa""" +386 55 negative_sampler """basic""" +386 55 evaluator """rankbased""" +386 56 dataset """kinships""" +386 56 model """kg2e""" +386 56 loss """marginranking""" +386 56 regularizer """no""" +386 56 optimizer """adam""" +386 56 training_loop """owa""" +386 56 negative_sampler """basic""" +386 56 evaluator """rankbased""" +386 57 dataset """kinships""" +386 57 model """kg2e""" +386 57 loss """marginranking""" +386 57 regularizer """no""" +386 57 optimizer """adam""" +386 57 training_loop """owa""" +386 57 negative_sampler """basic""" +386 57 evaluator """rankbased""" +386 58 dataset """kinships""" +386 58 model """kg2e""" +386 58 loss """marginranking""" +386 58 regularizer """no""" +386 58 optimizer """adam""" +386 58 training_loop """owa""" +386 58 negative_sampler """basic""" +386 58 evaluator """rankbased""" +386 59 dataset """kinships""" +386 59 model """kg2e""" +386 59 loss """marginranking""" +386 59 regularizer """no""" +386 59 optimizer """adam""" +386 59 training_loop """owa""" +386 59 negative_sampler """basic""" +386 59 evaluator """rankbased""" +386 60 dataset """kinships""" +386 60 model """kg2e""" +386 60 loss """marginranking""" +386 60 regularizer """no""" +386 60 optimizer """adam""" +386 60 training_loop """owa""" +386 60 negative_sampler """basic""" +386 60 evaluator """rankbased""" +386 61 dataset """kinships""" +386 61 model """kg2e""" +386 61 loss """marginranking""" +386 61 regularizer """no""" +386 61 optimizer """adam""" +386 61 training_loop """owa""" +386 61 negative_sampler """basic""" +386 61 evaluator """rankbased""" +386 62 dataset """kinships""" +386 62 model """kg2e""" +386 62 loss """marginranking""" +386 62 regularizer """no""" +386 62 optimizer """adam""" +386 62 training_loop """owa""" +386 62 negative_sampler """basic""" +386 62 evaluator """rankbased""" +386 63 dataset """kinships""" +386 63 model """kg2e""" +386 63 loss """marginranking""" +386 63 regularizer """no""" +386 63 optimizer """adam""" +386 63 training_loop """owa""" +386 63 negative_sampler """basic""" +386 63 evaluator """rankbased""" +386 64 dataset """kinships""" +386 64 model """kg2e""" +386 64 loss """marginranking""" +386 64 regularizer """no""" +386 64 optimizer """adam""" +386 64 training_loop """owa""" +386 64 negative_sampler """basic""" +386 64 evaluator """rankbased""" +386 65 dataset """kinships""" +386 65 model """kg2e""" +386 65 loss """marginranking""" +386 65 regularizer """no""" +386 65 optimizer """adam""" +386 65 training_loop """owa""" +386 65 negative_sampler """basic""" +386 65 evaluator """rankbased""" +386 66 dataset """kinships""" +386 66 model """kg2e""" +386 66 loss """marginranking""" +386 66 regularizer """no""" +386 66 optimizer """adam""" +386 66 training_loop """owa""" +386 66 negative_sampler """basic""" +386 66 evaluator """rankbased""" +386 67 dataset """kinships""" +386 67 model """kg2e""" +386 67 loss """marginranking""" +386 67 regularizer """no""" +386 67 optimizer """adam""" +386 67 training_loop """owa""" +386 67 negative_sampler """basic""" +386 67 evaluator """rankbased""" +386 68 dataset """kinships""" +386 68 model """kg2e""" +386 68 loss """marginranking""" +386 68 regularizer """no""" +386 68 optimizer """adam""" +386 68 training_loop """owa""" +386 68 negative_sampler """basic""" +386 68 evaluator """rankbased""" +386 69 dataset """kinships""" +386 69 model """kg2e""" +386 69 loss """marginranking""" +386 69 regularizer """no""" +386 69 optimizer """adam""" +386 69 training_loop """owa""" +386 69 negative_sampler """basic""" +386 69 evaluator """rankbased""" +386 70 dataset """kinships""" +386 70 model """kg2e""" +386 70 loss """marginranking""" +386 70 regularizer """no""" +386 70 optimizer """adam""" +386 70 training_loop """owa""" +386 70 negative_sampler """basic""" +386 70 evaluator """rankbased""" +386 71 dataset """kinships""" +386 71 model """kg2e""" +386 71 loss """marginranking""" +386 71 regularizer """no""" +386 71 optimizer """adam""" +386 71 training_loop """owa""" +386 71 negative_sampler """basic""" +386 71 evaluator """rankbased""" +386 72 dataset """kinships""" +386 72 model """kg2e""" +386 72 loss """marginranking""" +386 72 regularizer """no""" +386 72 optimizer """adam""" +386 72 training_loop """owa""" +386 72 negative_sampler """basic""" +386 72 evaluator """rankbased""" +386 73 dataset """kinships""" +386 73 model """kg2e""" +386 73 loss """marginranking""" +386 73 regularizer """no""" +386 73 optimizer """adam""" +386 73 training_loop """owa""" +386 73 negative_sampler """basic""" +386 73 evaluator """rankbased""" +386 74 dataset """kinships""" +386 74 model """kg2e""" +386 74 loss """marginranking""" +386 74 regularizer """no""" +386 74 optimizer """adam""" +386 74 training_loop """owa""" +386 74 negative_sampler """basic""" +386 74 evaluator """rankbased""" +386 75 dataset """kinships""" +386 75 model """kg2e""" +386 75 loss """marginranking""" +386 75 regularizer """no""" +386 75 optimizer """adam""" +386 75 training_loop """owa""" +386 75 negative_sampler """basic""" +386 75 evaluator """rankbased""" +386 76 dataset """kinships""" +386 76 model """kg2e""" +386 76 loss """marginranking""" +386 76 regularizer """no""" +386 76 optimizer """adam""" +386 76 training_loop """owa""" +386 76 negative_sampler """basic""" +386 76 evaluator """rankbased""" +386 77 dataset """kinships""" +386 77 model """kg2e""" +386 77 loss """marginranking""" +386 77 regularizer """no""" +386 77 optimizer """adam""" +386 77 training_loop """owa""" +386 77 negative_sampler """basic""" +386 77 evaluator """rankbased""" +386 78 dataset """kinships""" +386 78 model """kg2e""" +386 78 loss """marginranking""" +386 78 regularizer """no""" +386 78 optimizer """adam""" +386 78 training_loop """owa""" +386 78 negative_sampler """basic""" +386 78 evaluator """rankbased""" +386 79 dataset """kinships""" +386 79 model """kg2e""" +386 79 loss """marginranking""" +386 79 regularizer """no""" +386 79 optimizer """adam""" +386 79 training_loop """owa""" +386 79 negative_sampler """basic""" +386 79 evaluator """rankbased""" +386 80 dataset """kinships""" +386 80 model """kg2e""" +386 80 loss """marginranking""" +386 80 regularizer """no""" +386 80 optimizer """adam""" +386 80 training_loop """owa""" +386 80 negative_sampler """basic""" +386 80 evaluator """rankbased""" +386 81 dataset """kinships""" +386 81 model """kg2e""" +386 81 loss """marginranking""" +386 81 regularizer """no""" +386 81 optimizer """adam""" +386 81 training_loop """owa""" +386 81 negative_sampler """basic""" +386 81 evaluator """rankbased""" +386 82 dataset """kinships""" +386 82 model """kg2e""" +386 82 loss """marginranking""" +386 82 regularizer """no""" +386 82 optimizer """adam""" +386 82 training_loop """owa""" +386 82 negative_sampler """basic""" +386 82 evaluator """rankbased""" +386 83 dataset """kinships""" +386 83 model """kg2e""" +386 83 loss """marginranking""" +386 83 regularizer """no""" +386 83 optimizer """adam""" +386 83 training_loop """owa""" +386 83 negative_sampler """basic""" +386 83 evaluator """rankbased""" +386 84 dataset """kinships""" +386 84 model """kg2e""" +386 84 loss """marginranking""" +386 84 regularizer """no""" +386 84 optimizer """adam""" +386 84 training_loop """owa""" +386 84 negative_sampler """basic""" +386 84 evaluator """rankbased""" +386 85 dataset """kinships""" +386 85 model """kg2e""" +386 85 loss """marginranking""" +386 85 regularizer """no""" +386 85 optimizer """adam""" +386 85 training_loop """owa""" +386 85 negative_sampler """basic""" +386 85 evaluator """rankbased""" +386 86 dataset """kinships""" +386 86 model """kg2e""" +386 86 loss """marginranking""" +386 86 regularizer """no""" +386 86 optimizer """adam""" +386 86 training_loop """owa""" +386 86 negative_sampler """basic""" +386 86 evaluator """rankbased""" +386 87 dataset """kinships""" +386 87 model """kg2e""" +386 87 loss """marginranking""" +386 87 regularizer """no""" +386 87 optimizer """adam""" +386 87 training_loop """owa""" +386 87 negative_sampler """basic""" +386 87 evaluator """rankbased""" +386 88 dataset """kinships""" +386 88 model """kg2e""" +386 88 loss """marginranking""" +386 88 regularizer """no""" +386 88 optimizer """adam""" +386 88 training_loop """owa""" +386 88 negative_sampler """basic""" +386 88 evaluator """rankbased""" +386 89 dataset """kinships""" +386 89 model """kg2e""" +386 89 loss """marginranking""" +386 89 regularizer """no""" +386 89 optimizer """adam""" +386 89 training_loop """owa""" +386 89 negative_sampler """basic""" +386 89 evaluator """rankbased""" +386 90 dataset """kinships""" +386 90 model """kg2e""" +386 90 loss """marginranking""" +386 90 regularizer """no""" +386 90 optimizer """adam""" +386 90 training_loop """owa""" +386 90 negative_sampler """basic""" +386 90 evaluator """rankbased""" +386 91 dataset """kinships""" +386 91 model """kg2e""" +386 91 loss """marginranking""" +386 91 regularizer """no""" +386 91 optimizer """adam""" +386 91 training_loop """owa""" +386 91 negative_sampler """basic""" +386 91 evaluator """rankbased""" +386 92 dataset """kinships""" +386 92 model """kg2e""" +386 92 loss """marginranking""" +386 92 regularizer """no""" +386 92 optimizer """adam""" +386 92 training_loop """owa""" +386 92 negative_sampler """basic""" +386 92 evaluator """rankbased""" +386 93 dataset """kinships""" +386 93 model """kg2e""" +386 93 loss """marginranking""" +386 93 regularizer """no""" +386 93 optimizer """adam""" +386 93 training_loop """owa""" +386 93 negative_sampler """basic""" +386 93 evaluator """rankbased""" +386 94 dataset """kinships""" +386 94 model """kg2e""" +386 94 loss """marginranking""" +386 94 regularizer """no""" +386 94 optimizer """adam""" +386 94 training_loop """owa""" +386 94 negative_sampler """basic""" +386 94 evaluator """rankbased""" +386 95 dataset """kinships""" +386 95 model """kg2e""" +386 95 loss """marginranking""" +386 95 regularizer """no""" +386 95 optimizer """adam""" +386 95 training_loop """owa""" +386 95 negative_sampler """basic""" +386 95 evaluator """rankbased""" +386 96 dataset """kinships""" +386 96 model """kg2e""" +386 96 loss """marginranking""" +386 96 regularizer """no""" +386 96 optimizer """adam""" +386 96 training_loop """owa""" +386 96 negative_sampler """basic""" +386 96 evaluator """rankbased""" +386 97 dataset """kinships""" +386 97 model """kg2e""" +386 97 loss """marginranking""" +386 97 regularizer """no""" +386 97 optimizer """adam""" +386 97 training_loop """owa""" +386 97 negative_sampler """basic""" +386 97 evaluator """rankbased""" +386 98 dataset """kinships""" +386 98 model """kg2e""" +386 98 loss """marginranking""" +386 98 regularizer """no""" +386 98 optimizer """adam""" +386 98 training_loop """owa""" +386 98 negative_sampler """basic""" +386 98 evaluator """rankbased""" +386 99 dataset """kinships""" +386 99 model """kg2e""" +386 99 loss """marginranking""" +386 99 regularizer """no""" +386 99 optimizer """adam""" +386 99 training_loop """owa""" +386 99 negative_sampler """basic""" +386 99 evaluator """rankbased""" +386 100 dataset """kinships""" +386 100 model """kg2e""" +386 100 loss """marginranking""" +386 100 regularizer """no""" +386 100 optimizer """adam""" +386 100 training_loop """owa""" +386 100 negative_sampler """basic""" +386 100 evaluator """rankbased""" +387 1 model.embedding_dim 0.0 +387 1 model.c_min 0.04082874062427786 +387 1 model.c_max 6.5880077012359815 +387 1 loss.margin 15.134303888188851 +387 1 loss.adversarial_temperature 0.6923016193655201 +387 1 optimizer.lr 0.011001476754488865 +387 1 negative_sampler.num_negs_per_pos 99.0 +387 1 training.batch_size 2.0 +387 2 model.embedding_dim 2.0 +387 2 model.c_min 0.04484553921165793 +387 2 model.c_max 6.7862203202349 +387 2 loss.margin 3.324156328288627 +387 2 loss.adversarial_temperature 0.26541885101994817 +387 2 optimizer.lr 0.0400782624076709 +387 2 negative_sampler.num_negs_per_pos 82.0 +387 2 training.batch_size 1.0 +387 3 model.embedding_dim 0.0 +387 3 model.c_min 0.03517130440332615 +387 3 model.c_max 8.539299472225963 +387 3 loss.margin 5.051849626545383 +387 3 loss.adversarial_temperature 0.3368275724586739 +387 3 optimizer.lr 0.012363026608101988 +387 3 negative_sampler.num_negs_per_pos 11.0 +387 3 training.batch_size 0.0 +387 4 model.embedding_dim 1.0 +387 4 model.c_min 0.08762237255846422 +387 4 model.c_max 3.994742358708975 +387 4 loss.margin 29.093694749407582 +387 4 loss.adversarial_temperature 0.5678398341404061 +387 4 optimizer.lr 0.08844786960394778 +387 4 negative_sampler.num_negs_per_pos 69.0 +387 4 training.batch_size 2.0 +387 5 model.embedding_dim 1.0 +387 5 model.c_min 0.054075731244058164 +387 5 model.c_max 9.97945027835471 +387 5 loss.margin 15.090122609175973 +387 5 loss.adversarial_temperature 0.9836621223373792 +387 5 optimizer.lr 0.0406611960345094 +387 5 negative_sampler.num_negs_per_pos 93.0 +387 5 training.batch_size 1.0 +387 6 model.embedding_dim 0.0 +387 6 model.c_min 0.020605348427058853 +387 6 model.c_max 8.549276964913766 +387 6 loss.margin 13.316985715583103 +387 6 loss.adversarial_temperature 0.6535820703265649 +387 6 optimizer.lr 0.014802849656288525 +387 6 negative_sampler.num_negs_per_pos 36.0 +387 6 training.batch_size 1.0 +387 7 model.embedding_dim 0.0 +387 7 model.c_min 0.046181485685558334 +387 7 model.c_max 6.981623496093802 +387 7 loss.margin 21.24341253941152 +387 7 loss.adversarial_temperature 0.7151422541211023 +387 7 optimizer.lr 0.09839294349749991 +387 7 negative_sampler.num_negs_per_pos 68.0 +387 7 training.batch_size 1.0 +387 8 model.embedding_dim 0.0 +387 8 model.c_min 0.023793016004849 +387 8 model.c_max 6.644249329811931 +387 8 loss.margin 19.962201874715934 +387 8 loss.adversarial_temperature 0.16741042814630408 +387 8 optimizer.lr 0.0025863043491958495 +387 8 negative_sampler.num_negs_per_pos 27.0 +387 8 training.batch_size 1.0 +387 9 model.embedding_dim 0.0 +387 9 model.c_min 0.021018700988091096 +387 9 model.c_max 5.538593393927375 +387 9 loss.margin 1.2380960092507147 +387 9 loss.adversarial_temperature 0.37323238569286843 +387 9 optimizer.lr 0.09449028668138226 +387 9 negative_sampler.num_negs_per_pos 74.0 +387 9 training.batch_size 2.0 +387 10 model.embedding_dim 1.0 +387 10 model.c_min 0.05082257516184887 +387 10 model.c_max 8.59250514928437 +387 10 loss.margin 5.476679151649559 +387 10 loss.adversarial_temperature 0.5827383344647789 +387 10 optimizer.lr 0.0036396338885305132 +387 10 negative_sampler.num_negs_per_pos 86.0 +387 10 training.batch_size 0.0 +387 11 model.embedding_dim 1.0 +387 11 model.c_min 0.021599632697540727 +387 11 model.c_max 7.48251983532173 +387 11 loss.margin 2.9173988923075913 +387 11 loss.adversarial_temperature 0.6505016280895112 +387 11 optimizer.lr 0.0017471072152436201 +387 11 negative_sampler.num_negs_per_pos 54.0 +387 11 training.batch_size 1.0 +387 12 model.embedding_dim 0.0 +387 12 model.c_min 0.056374169315361766 +387 12 model.c_max 8.565351466143603 +387 12 loss.margin 1.5913443268116225 +387 12 loss.adversarial_temperature 0.4839840306593567 +387 12 optimizer.lr 0.04450089777797866 +387 12 negative_sampler.num_negs_per_pos 56.0 +387 12 training.batch_size 1.0 +387 13 model.embedding_dim 0.0 +387 13 model.c_min 0.023193476724320632 +387 13 model.c_max 4.493556647099975 +387 13 loss.margin 26.631543758071928 +387 13 loss.adversarial_temperature 0.2541041430765306 +387 13 optimizer.lr 0.006420458861873709 +387 13 negative_sampler.num_negs_per_pos 96.0 +387 13 training.batch_size 2.0 +387 14 model.embedding_dim 0.0 +387 14 model.c_min 0.013624680629115146 +387 14 model.c_max 5.46364579922562 +387 14 loss.margin 4.888011573212036 +387 14 loss.adversarial_temperature 0.2656212279911371 +387 14 optimizer.lr 0.016439720406501605 +387 14 negative_sampler.num_negs_per_pos 61.0 +387 14 training.batch_size 0.0 +387 15 model.embedding_dim 0.0 +387 15 model.c_min 0.01956289160354424 +387 15 model.c_max 7.309801058065702 +387 15 loss.margin 1.1014084730094167 +387 15 loss.adversarial_temperature 0.4811118367487993 +387 15 optimizer.lr 0.03674197817232289 +387 15 negative_sampler.num_negs_per_pos 62.0 +387 15 training.batch_size 1.0 +387 16 model.embedding_dim 0.0 +387 16 model.c_min 0.08284932469493937 +387 16 model.c_max 8.991811872066231 +387 16 loss.margin 15.461198039201426 +387 16 loss.adversarial_temperature 0.29096149013572414 +387 16 optimizer.lr 0.027721251407953198 +387 16 negative_sampler.num_negs_per_pos 84.0 +387 16 training.batch_size 0.0 +387 17 model.embedding_dim 2.0 +387 17 model.c_min 0.034499263833453434 +387 17 model.c_max 7.324865823426337 +387 17 loss.margin 23.20393223276665 +387 17 loss.adversarial_temperature 0.6248988315506107 +387 17 optimizer.lr 0.003153968991308698 +387 17 negative_sampler.num_negs_per_pos 70.0 +387 17 training.batch_size 2.0 +387 18 model.embedding_dim 0.0 +387 18 model.c_min 0.0509258823644 +387 18 model.c_max 2.8518235117272033 +387 18 loss.margin 17.479554402017513 +387 18 loss.adversarial_temperature 0.3762189172906659 +387 18 optimizer.lr 0.0013207297691577907 +387 18 negative_sampler.num_negs_per_pos 51.0 +387 18 training.batch_size 2.0 +387 19 model.embedding_dim 2.0 +387 19 model.c_min 0.014060238394864595 +387 19 model.c_max 4.054761229445741 +387 19 loss.margin 4.337647395443815 +387 19 loss.adversarial_temperature 0.7730181316780125 +387 19 optimizer.lr 0.001784779230522112 +387 19 negative_sampler.num_negs_per_pos 0.0 +387 19 training.batch_size 0.0 +387 20 model.embedding_dim 0.0 +387 20 model.c_min 0.017546677550973687 +387 20 model.c_max 5.314036430719179 +387 20 loss.margin 23.539727327012304 +387 20 loss.adversarial_temperature 0.9556792999521427 +387 20 optimizer.lr 0.0016001598625859674 +387 20 negative_sampler.num_negs_per_pos 77.0 +387 20 training.batch_size 0.0 +387 21 model.embedding_dim 1.0 +387 21 model.c_min 0.019155057365205615 +387 21 model.c_max 8.98908625845291 +387 21 loss.margin 23.209049416453198 +387 21 loss.adversarial_temperature 0.913622654562085 +387 21 optimizer.lr 0.013295863830088557 +387 21 negative_sampler.num_negs_per_pos 68.0 +387 21 training.batch_size 2.0 +387 22 model.embedding_dim 0.0 +387 22 model.c_min 0.02801837267272224 +387 22 model.c_max 3.7232143580467323 +387 22 loss.margin 21.21007024437256 +387 22 loss.adversarial_temperature 0.18868181086105784 +387 22 optimizer.lr 0.02432151936190045 +387 22 negative_sampler.num_negs_per_pos 12.0 +387 22 training.batch_size 0.0 +387 23 model.embedding_dim 0.0 +387 23 model.c_min 0.04734846328706924 +387 23 model.c_max 3.5277760083125016 +387 23 loss.margin 24.901580191524776 +387 23 loss.adversarial_temperature 0.43208452886795845 +387 23 optimizer.lr 0.05719727355021304 +387 23 negative_sampler.num_negs_per_pos 69.0 +387 23 training.batch_size 1.0 +387 24 model.embedding_dim 0.0 +387 24 model.c_min 0.04102767413465564 +387 24 model.c_max 3.602762894711815 +387 24 loss.margin 5.612657824368962 +387 24 loss.adversarial_temperature 0.4015806309949138 +387 24 optimizer.lr 0.08648641698172913 +387 24 negative_sampler.num_negs_per_pos 0.0 +387 24 training.batch_size 1.0 +387 25 model.embedding_dim 1.0 +387 25 model.c_min 0.02032572911124025 +387 25 model.c_max 1.378041234477323 +387 25 loss.margin 9.331360488153496 +387 25 loss.adversarial_temperature 0.4050354848556851 +387 25 optimizer.lr 0.0017129717263695296 +387 25 negative_sampler.num_negs_per_pos 57.0 +387 25 training.batch_size 0.0 +387 26 model.embedding_dim 2.0 +387 26 model.c_min 0.09087423732092662 +387 26 model.c_max 7.327450797425498 +387 26 loss.margin 23.276490530902297 +387 26 loss.adversarial_temperature 0.7899697812332456 +387 26 optimizer.lr 0.0013328578692847393 +387 26 negative_sampler.num_negs_per_pos 40.0 +387 26 training.batch_size 0.0 +387 27 model.embedding_dim 0.0 +387 27 model.c_min 0.017240972576135414 +387 27 model.c_max 5.354938395791166 +387 27 loss.margin 19.869429820609177 +387 27 loss.adversarial_temperature 0.48984813245166103 +387 27 optimizer.lr 0.02257470737887485 +387 27 negative_sampler.num_negs_per_pos 20.0 +387 27 training.batch_size 0.0 +387 28 model.embedding_dim 1.0 +387 28 model.c_min 0.04361576654717207 +387 28 model.c_max 6.975930465696362 +387 28 loss.margin 26.490900227740884 +387 28 loss.adversarial_temperature 0.4093591565388317 +387 28 optimizer.lr 0.008398024252877634 +387 28 negative_sampler.num_negs_per_pos 32.0 +387 28 training.batch_size 0.0 +387 29 model.embedding_dim 1.0 +387 29 model.c_min 0.030976386286948304 +387 29 model.c_max 9.816933986221192 +387 29 loss.margin 16.540267112891183 +387 29 loss.adversarial_temperature 0.9865085738680529 +387 29 optimizer.lr 0.020160641910550437 +387 29 negative_sampler.num_negs_per_pos 37.0 +387 29 training.batch_size 1.0 +387 30 model.embedding_dim 2.0 +387 30 model.c_min 0.03461586862961649 +387 30 model.c_max 6.1167229182583895 +387 30 loss.margin 21.34069404052614 +387 30 loss.adversarial_temperature 0.5933803308397686 +387 30 optimizer.lr 0.004464340237931721 +387 30 negative_sampler.num_negs_per_pos 46.0 +387 30 training.batch_size 1.0 +387 31 model.embedding_dim 0.0 +387 31 model.c_min 0.016500310823898783 +387 31 model.c_max 1.910366435093144 +387 31 loss.margin 29.664795046561355 +387 31 loss.adversarial_temperature 0.6039475727997226 +387 31 optimizer.lr 0.002301064118172303 +387 31 negative_sampler.num_negs_per_pos 36.0 +387 31 training.batch_size 0.0 +387 32 model.embedding_dim 2.0 +387 32 model.c_min 0.07856110448744574 +387 32 model.c_max 4.1810692921419355 +387 32 loss.margin 29.657922488913265 +387 32 loss.adversarial_temperature 0.5169892057496559 +387 32 optimizer.lr 0.04473427176886297 +387 32 negative_sampler.num_negs_per_pos 15.0 +387 32 training.batch_size 1.0 +387 33 model.embedding_dim 2.0 +387 33 model.c_min 0.01688854818493298 +387 33 model.c_max 7.460416366158715 +387 33 loss.margin 19.976921368771546 +387 33 loss.adversarial_temperature 0.6479755295252444 +387 33 optimizer.lr 0.004728218853400073 +387 33 negative_sampler.num_negs_per_pos 99.0 +387 33 training.batch_size 2.0 +387 34 model.embedding_dim 2.0 +387 34 model.c_min 0.045144947517961824 +387 34 model.c_max 1.6167710216383666 +387 34 loss.margin 2.1255592504820355 +387 34 loss.adversarial_temperature 0.4047160366471806 +387 34 optimizer.lr 0.027483925155389523 +387 34 negative_sampler.num_negs_per_pos 56.0 +387 34 training.batch_size 0.0 +387 35 model.embedding_dim 2.0 +387 35 model.c_min 0.020971528771061947 +387 35 model.c_max 5.538228218062423 +387 35 loss.margin 3.447907038979082 +387 35 loss.adversarial_temperature 0.8294105813803435 +387 35 optimizer.lr 0.02979336856977976 +387 35 negative_sampler.num_negs_per_pos 11.0 +387 35 training.batch_size 1.0 +387 36 model.embedding_dim 1.0 +387 36 model.c_min 0.013657183583418027 +387 36 model.c_max 7.466843333197007 +387 36 loss.margin 22.151000758109614 +387 36 loss.adversarial_temperature 0.765506767607302 +387 36 optimizer.lr 0.006451316029941898 +387 36 negative_sampler.num_negs_per_pos 16.0 +387 36 training.batch_size 2.0 +387 37 model.embedding_dim 0.0 +387 37 model.c_min 0.05585502006432385 +387 37 model.c_max 1.1148997335670487 +387 37 loss.margin 3.687803071118506 +387 37 loss.adversarial_temperature 0.9024912675206735 +387 37 optimizer.lr 0.0028928845617019897 +387 37 negative_sampler.num_negs_per_pos 57.0 +387 37 training.batch_size 0.0 +387 38 model.embedding_dim 1.0 +387 38 model.c_min 0.01797715892146099 +387 38 model.c_max 9.247613856605891 +387 38 loss.margin 15.755781192677563 +387 38 loss.adversarial_temperature 0.9231454055213292 +387 38 optimizer.lr 0.009175332337902836 +387 38 negative_sampler.num_negs_per_pos 34.0 +387 38 training.batch_size 1.0 +387 39 model.embedding_dim 2.0 +387 39 model.c_min 0.07132753303666635 +387 39 model.c_max 9.683192222874453 +387 39 loss.margin 16.442171901004254 +387 39 loss.adversarial_temperature 0.3185734244683426 +387 39 optimizer.lr 0.007298865454277833 +387 39 negative_sampler.num_negs_per_pos 28.0 +387 39 training.batch_size 2.0 +387 40 model.embedding_dim 1.0 +387 40 model.c_min 0.018558669910120857 +387 40 model.c_max 5.936800173562851 +387 40 loss.margin 28.046452158467748 +387 40 loss.adversarial_temperature 0.8167349686645892 +387 40 optimizer.lr 0.055451553492593474 +387 40 negative_sampler.num_negs_per_pos 16.0 +387 40 training.batch_size 0.0 +387 41 model.embedding_dim 2.0 +387 41 model.c_min 0.021318167188263237 +387 41 model.c_max 7.88390807151635 +387 41 loss.margin 27.57769179406914 +387 41 loss.adversarial_temperature 0.18470601028996175 +387 41 optimizer.lr 0.0037752069327854723 +387 41 negative_sampler.num_negs_per_pos 26.0 +387 41 training.batch_size 1.0 +387 42 model.embedding_dim 2.0 +387 42 model.c_min 0.017552830800191863 +387 42 model.c_max 5.3040738401052305 +387 42 loss.margin 17.01119029180921 +387 42 loss.adversarial_temperature 0.2771516652505938 +387 42 optimizer.lr 0.006705448693776205 +387 42 negative_sampler.num_negs_per_pos 73.0 +387 42 training.batch_size 2.0 +387 43 model.embedding_dim 1.0 +387 43 model.c_min 0.01521454092469548 +387 43 model.c_max 8.142672947637546 +387 43 loss.margin 19.74069666885666 +387 43 loss.adversarial_temperature 0.6882422824577108 +387 43 optimizer.lr 0.01012362339854927 +387 43 negative_sampler.num_negs_per_pos 7.0 +387 43 training.batch_size 2.0 +387 44 model.embedding_dim 2.0 +387 44 model.c_min 0.09189819330895706 +387 44 model.c_max 7.055095858633284 +387 44 loss.margin 23.028865794221268 +387 44 loss.adversarial_temperature 0.3581487005655529 +387 44 optimizer.lr 0.010408017553980195 +387 44 negative_sampler.num_negs_per_pos 76.0 +387 44 training.batch_size 2.0 +387 45 model.embedding_dim 1.0 +387 45 model.c_min 0.044504213470567405 +387 45 model.c_max 4.241034308678267 +387 45 loss.margin 11.770581451511728 +387 45 loss.adversarial_temperature 0.8424445653177483 +387 45 optimizer.lr 0.0037076542974674035 +387 45 negative_sampler.num_negs_per_pos 22.0 +387 45 training.batch_size 0.0 +387 46 model.embedding_dim 1.0 +387 46 model.c_min 0.015078114650630952 +387 46 model.c_max 7.928904133242366 +387 46 loss.margin 26.065033347230184 +387 46 loss.adversarial_temperature 0.9568374412500793 +387 46 optimizer.lr 0.0212000470213363 +387 46 negative_sampler.num_negs_per_pos 12.0 +387 46 training.batch_size 2.0 +387 47 model.embedding_dim 2.0 +387 47 model.c_min 0.09799181138334395 +387 47 model.c_max 9.236340370937478 +387 47 loss.margin 15.469185383702134 +387 47 loss.adversarial_temperature 0.9949833812181064 +387 47 optimizer.lr 0.016788669532179265 +387 47 negative_sampler.num_negs_per_pos 57.0 +387 47 training.batch_size 1.0 +387 48 model.embedding_dim 2.0 +387 48 model.c_min 0.09442150925298369 +387 48 model.c_max 8.07942480219784 +387 48 loss.margin 14.658169345149233 +387 48 loss.adversarial_temperature 0.4340739560891901 +387 48 optimizer.lr 0.009723212606764751 +387 48 negative_sampler.num_negs_per_pos 85.0 +387 48 training.batch_size 1.0 +387 49 model.embedding_dim 0.0 +387 49 model.c_min 0.027138050333500648 +387 49 model.c_max 1.2145358332954101 +387 49 loss.margin 3.5371716993480806 +387 49 loss.adversarial_temperature 0.4189920721544841 +387 49 optimizer.lr 0.019259328005037175 +387 49 negative_sampler.num_negs_per_pos 40.0 +387 49 training.batch_size 2.0 +387 50 model.embedding_dim 0.0 +387 50 model.c_min 0.06549889258279247 +387 50 model.c_max 6.221040444352368 +387 50 loss.margin 19.786423245247672 +387 50 loss.adversarial_temperature 0.8440533560894931 +387 50 optimizer.lr 0.0016730285274975227 +387 50 negative_sampler.num_negs_per_pos 14.0 +387 50 training.batch_size 0.0 +387 51 model.embedding_dim 1.0 +387 51 model.c_min 0.012544501399816065 +387 51 model.c_max 2.098215170972968 +387 51 loss.margin 12.572407951589776 +387 51 loss.adversarial_temperature 0.6612821878538268 +387 51 optimizer.lr 0.03402993437143856 +387 51 negative_sampler.num_negs_per_pos 63.0 +387 51 training.batch_size 2.0 +387 52 model.embedding_dim 1.0 +387 52 model.c_min 0.07083330000771114 +387 52 model.c_max 7.035247051259277 +387 52 loss.margin 13.943896014970178 +387 52 loss.adversarial_temperature 0.6458247667261438 +387 52 optimizer.lr 0.04982072002021384 +387 52 negative_sampler.num_negs_per_pos 23.0 +387 52 training.batch_size 2.0 +387 53 model.embedding_dim 2.0 +387 53 model.c_min 0.016383150398269528 +387 53 model.c_max 3.212608870610091 +387 53 loss.margin 28.298826254693335 +387 53 loss.adversarial_temperature 0.37398412274284465 +387 53 optimizer.lr 0.008420530840609243 +387 53 negative_sampler.num_negs_per_pos 55.0 +387 53 training.batch_size 1.0 +387 54 model.embedding_dim 1.0 +387 54 model.c_min 0.021645537048810098 +387 54 model.c_max 7.5619114025795175 +387 54 loss.margin 25.442610734810156 +387 54 loss.adversarial_temperature 0.5656581684299083 +387 54 optimizer.lr 0.014698810154111038 +387 54 negative_sampler.num_negs_per_pos 93.0 +387 54 training.batch_size 1.0 +387 55 model.embedding_dim 2.0 +387 55 model.c_min 0.08924333542240655 +387 55 model.c_max 9.255118317311796 +387 55 loss.margin 15.995250751502935 +387 55 loss.adversarial_temperature 0.13931717487399647 +387 55 optimizer.lr 0.04201450740515023 +387 55 negative_sampler.num_negs_per_pos 20.0 +387 55 training.batch_size 1.0 +387 56 model.embedding_dim 2.0 +387 56 model.c_min 0.014530045506243589 +387 56 model.c_max 4.628116985025152 +387 56 loss.margin 26.34494408901144 +387 56 loss.adversarial_temperature 0.10951570807640071 +387 56 optimizer.lr 0.014999720181678134 +387 56 negative_sampler.num_negs_per_pos 86.0 +387 56 training.batch_size 0.0 +387 57 model.embedding_dim 2.0 +387 57 model.c_min 0.014024338515100253 +387 57 model.c_max 2.0910507781976877 +387 57 loss.margin 12.410947666877977 +387 57 loss.adversarial_temperature 0.5819427644447828 +387 57 optimizer.lr 0.0016106794684681387 +387 57 negative_sampler.num_negs_per_pos 26.0 +387 57 training.batch_size 1.0 +387 58 model.embedding_dim 2.0 +387 58 model.c_min 0.015049396415405038 +387 58 model.c_max 6.277307225455112 +387 58 loss.margin 14.095050349112556 +387 58 loss.adversarial_temperature 0.10263260950089942 +387 58 optimizer.lr 0.0024656048028461612 +387 58 negative_sampler.num_negs_per_pos 62.0 +387 58 training.batch_size 2.0 +387 59 model.embedding_dim 0.0 +387 59 model.c_min 0.014981314805331837 +387 59 model.c_max 8.794855203983442 +387 59 loss.margin 24.08351951079214 +387 59 loss.adversarial_temperature 0.5597676969808651 +387 59 optimizer.lr 0.002425864513051754 +387 59 negative_sampler.num_negs_per_pos 73.0 +387 59 training.batch_size 1.0 +387 60 model.embedding_dim 1.0 +387 60 model.c_min 0.04862358976353675 +387 60 model.c_max 9.733129660378477 +387 60 loss.margin 17.3849673700697 +387 60 loss.adversarial_temperature 0.5270756324170328 +387 60 optimizer.lr 0.010031914788040358 +387 60 negative_sampler.num_negs_per_pos 74.0 +387 60 training.batch_size 1.0 +387 61 model.embedding_dim 0.0 +387 61 model.c_min 0.012087133880213465 +387 61 model.c_max 4.717688857776447 +387 61 loss.margin 3.7393269228068298 +387 61 loss.adversarial_temperature 0.11033627079958933 +387 61 optimizer.lr 0.00743410722220361 +387 61 negative_sampler.num_negs_per_pos 3.0 +387 61 training.batch_size 1.0 +387 62 model.embedding_dim 2.0 +387 62 model.c_min 0.01671858169902453 +387 62 model.c_max 4.500849801410134 +387 62 loss.margin 14.861880879925003 +387 62 loss.adversarial_temperature 0.4576761141507455 +387 62 optimizer.lr 0.00914605837159538 +387 62 negative_sampler.num_negs_per_pos 34.0 +387 62 training.batch_size 2.0 +387 63 model.embedding_dim 2.0 +387 63 model.c_min 0.04238538636280672 +387 63 model.c_max 7.992189420125102 +387 63 loss.margin 3.7240568099361777 +387 63 loss.adversarial_temperature 0.26114271756822194 +387 63 optimizer.lr 0.007087498465918655 +387 63 negative_sampler.num_negs_per_pos 88.0 +387 63 training.batch_size 2.0 +387 64 model.embedding_dim 0.0 +387 64 model.c_min 0.019934339738419474 +387 64 model.c_max 8.489346665721769 +387 64 loss.margin 25.787330872380547 +387 64 loss.adversarial_temperature 0.8389289935815339 +387 64 optimizer.lr 0.049294348936961364 +387 64 negative_sampler.num_negs_per_pos 71.0 +387 64 training.batch_size 1.0 +387 65 model.embedding_dim 0.0 +387 65 model.c_min 0.017245819883452165 +387 65 model.c_max 1.196005962518594 +387 65 loss.margin 19.77764209840005 +387 65 loss.adversarial_temperature 0.7064370869821688 +387 65 optimizer.lr 0.0019427697799935368 +387 65 negative_sampler.num_negs_per_pos 17.0 +387 65 training.batch_size 1.0 +387 66 model.embedding_dim 0.0 +387 66 model.c_min 0.08860299067603074 +387 66 model.c_max 1.3189176968102752 +387 66 loss.margin 11.095988884463907 +387 66 loss.adversarial_temperature 0.39184339277310665 +387 66 optimizer.lr 0.008465439167156074 +387 66 negative_sampler.num_negs_per_pos 56.0 +387 66 training.batch_size 1.0 +387 67 model.embedding_dim 1.0 +387 67 model.c_min 0.03789257309328793 +387 67 model.c_max 1.916669192128849 +387 67 loss.margin 13.696611743109196 +387 67 loss.adversarial_temperature 0.7397203903180641 +387 67 optimizer.lr 0.0028955665163406356 +387 67 negative_sampler.num_negs_per_pos 49.0 +387 67 training.batch_size 0.0 +387 68 model.embedding_dim 2.0 +387 68 model.c_min 0.025542097852061102 +387 68 model.c_max 5.433204392128893 +387 68 loss.margin 2.275715750087992 +387 68 loss.adversarial_temperature 0.7499864005866148 +387 68 optimizer.lr 0.007006155791865298 +387 68 negative_sampler.num_negs_per_pos 46.0 +387 68 training.batch_size 2.0 +387 69 model.embedding_dim 0.0 +387 69 model.c_min 0.08608489361973541 +387 69 model.c_max 4.111811957961695 +387 69 loss.margin 14.201074270885107 +387 69 loss.adversarial_temperature 0.5980397763999923 +387 69 optimizer.lr 0.028817930535266212 +387 69 negative_sampler.num_negs_per_pos 40.0 +387 69 training.batch_size 2.0 +387 70 model.embedding_dim 0.0 +387 70 model.c_min 0.09081367649174085 +387 70 model.c_max 3.226620055659396 +387 70 loss.margin 18.6328876744568 +387 70 loss.adversarial_temperature 0.3652758603980366 +387 70 optimizer.lr 0.0012719044628728825 +387 70 negative_sampler.num_negs_per_pos 82.0 +387 70 training.batch_size 2.0 +387 71 model.embedding_dim 1.0 +387 71 model.c_min 0.0572799141982849 +387 71 model.c_max 4.236755364580749 +387 71 loss.margin 5.919752913862203 +387 71 loss.adversarial_temperature 0.5383643278178128 +387 71 optimizer.lr 0.06825769408270545 +387 71 negative_sampler.num_negs_per_pos 40.0 +387 71 training.batch_size 1.0 +387 72 model.embedding_dim 0.0 +387 72 model.c_min 0.0284763358422085 +387 72 model.c_max 9.608883671854226 +387 72 loss.margin 11.345688733446261 +387 72 loss.adversarial_temperature 0.46593334037384426 +387 72 optimizer.lr 0.03340280738481901 +387 72 negative_sampler.num_negs_per_pos 64.0 +387 72 training.batch_size 0.0 +387 73 model.embedding_dim 1.0 +387 73 model.c_min 0.05307984353596441 +387 73 model.c_max 4.241475249333773 +387 73 loss.margin 4.349469826405307 +387 73 loss.adversarial_temperature 0.958057794398456 +387 73 optimizer.lr 0.0022033121599719283 +387 73 negative_sampler.num_negs_per_pos 10.0 +387 73 training.batch_size 2.0 +387 74 model.embedding_dim 1.0 +387 74 model.c_min 0.0502908894604794 +387 74 model.c_max 2.506066298560306 +387 74 loss.margin 23.638931711028675 +387 74 loss.adversarial_temperature 0.3822525850524515 +387 74 optimizer.lr 0.0805813583600469 +387 74 negative_sampler.num_negs_per_pos 80.0 +387 74 training.batch_size 1.0 +387 75 model.embedding_dim 2.0 +387 75 model.c_min 0.01790166388637145 +387 75 model.c_max 4.101904296503241 +387 75 loss.margin 17.814683590812177 +387 75 loss.adversarial_temperature 0.22535761633937523 +387 75 optimizer.lr 0.007827038642009196 +387 75 negative_sampler.num_negs_per_pos 82.0 +387 75 training.batch_size 1.0 +387 76 model.embedding_dim 1.0 +387 76 model.c_min 0.013956766557576607 +387 76 model.c_max 4.097806007879711 +387 76 loss.margin 2.6160207629420325 +387 76 loss.adversarial_temperature 0.9371978408801899 +387 76 optimizer.lr 0.003400179782327153 +387 76 negative_sampler.num_negs_per_pos 66.0 +387 76 training.batch_size 1.0 +387 77 model.embedding_dim 2.0 +387 77 model.c_min 0.012423222206605552 +387 77 model.c_max 7.958065210271228 +387 77 loss.margin 26.302801422874996 +387 77 loss.adversarial_temperature 0.2566232302304539 +387 77 optimizer.lr 0.007213941627861754 +387 77 negative_sampler.num_negs_per_pos 86.0 +387 77 training.batch_size 1.0 +387 78 model.embedding_dim 1.0 +387 78 model.c_min 0.09739703075293439 +387 78 model.c_max 5.4935833345022385 +387 78 loss.margin 17.93619643506156 +387 78 loss.adversarial_temperature 0.5413833066782885 +387 78 optimizer.lr 0.03322549558836233 +387 78 negative_sampler.num_negs_per_pos 83.0 +387 78 training.batch_size 0.0 +387 79 model.embedding_dim 0.0 +387 79 model.c_min 0.025469683231893667 +387 79 model.c_max 5.122505662122803 +387 79 loss.margin 22.462838580696864 +387 79 loss.adversarial_temperature 0.35313973677307386 +387 79 optimizer.lr 0.06614367362055097 +387 79 negative_sampler.num_negs_per_pos 13.0 +387 79 training.batch_size 1.0 +387 80 model.embedding_dim 2.0 +387 80 model.c_min 0.05003526311736905 +387 80 model.c_max 9.59200909353885 +387 80 loss.margin 9.027257057077083 +387 80 loss.adversarial_temperature 0.9281253259119149 +387 80 optimizer.lr 0.0010141877309888315 +387 80 negative_sampler.num_negs_per_pos 72.0 +387 80 training.batch_size 1.0 +387 81 model.embedding_dim 0.0 +387 81 model.c_min 0.020733740298787662 +387 81 model.c_max 4.855310168677569 +387 81 loss.margin 13.809898890507858 +387 81 loss.adversarial_temperature 0.13051238064705123 +387 81 optimizer.lr 0.003410708016054837 +387 81 negative_sampler.num_negs_per_pos 74.0 +387 81 training.batch_size 1.0 +387 82 model.embedding_dim 1.0 +387 82 model.c_min 0.041540756811561864 +387 82 model.c_max 1.0839852875310396 +387 82 loss.margin 12.828374443175836 +387 82 loss.adversarial_temperature 0.746486021018101 +387 82 optimizer.lr 0.015730287981874647 +387 82 negative_sampler.num_negs_per_pos 6.0 +387 82 training.batch_size 2.0 +387 83 model.embedding_dim 1.0 +387 83 model.c_min 0.06112766752996663 +387 83 model.c_max 7.552599974476057 +387 83 loss.margin 27.316114266387967 +387 83 loss.adversarial_temperature 0.774902658701674 +387 83 optimizer.lr 0.0018970726121315395 +387 83 negative_sampler.num_negs_per_pos 50.0 +387 83 training.batch_size 0.0 +387 84 model.embedding_dim 1.0 +387 84 model.c_min 0.022725157989124337 +387 84 model.c_max 1.891574105035501 +387 84 loss.margin 6.121546951033843 +387 84 loss.adversarial_temperature 0.2112915787877547 +387 84 optimizer.lr 0.006471405731604544 +387 84 negative_sampler.num_negs_per_pos 7.0 +387 84 training.batch_size 0.0 +387 85 model.embedding_dim 1.0 +387 85 model.c_min 0.09472071223680299 +387 85 model.c_max 5.771818398360262 +387 85 loss.margin 1.4417999798903445 +387 85 loss.adversarial_temperature 0.8771001714706903 +387 85 optimizer.lr 0.02879772339354377 +387 85 negative_sampler.num_negs_per_pos 5.0 +387 85 training.batch_size 0.0 +387 86 model.embedding_dim 0.0 +387 86 model.c_min 0.02461158123821184 +387 86 model.c_max 8.093052612986853 +387 86 loss.margin 20.19845499258268 +387 86 loss.adversarial_temperature 0.1599041541755296 +387 86 optimizer.lr 0.010212118959393281 +387 86 negative_sampler.num_negs_per_pos 75.0 +387 86 training.batch_size 1.0 +387 87 model.embedding_dim 1.0 +387 87 model.c_min 0.030582865150037917 +387 87 model.c_max 5.822055436029317 +387 87 loss.margin 23.627003650348836 +387 87 loss.adversarial_temperature 0.49491058073482463 +387 87 optimizer.lr 0.08755505624307193 +387 87 negative_sampler.num_negs_per_pos 48.0 +387 87 training.batch_size 0.0 +387 88 model.embedding_dim 1.0 +387 88 model.c_min 0.07337352541817348 +387 88 model.c_max 7.516102035881508 +387 88 loss.margin 11.382643936310217 +387 88 loss.adversarial_temperature 0.9666165430399453 +387 88 optimizer.lr 0.015868271645495223 +387 88 negative_sampler.num_negs_per_pos 68.0 +387 88 training.batch_size 2.0 +387 89 model.embedding_dim 2.0 +387 89 model.c_min 0.04837968127137576 +387 89 model.c_max 3.2178957431767756 +387 89 loss.margin 18.528945635310613 +387 89 loss.adversarial_temperature 0.6481613370189125 +387 89 optimizer.lr 0.02665932021726184 +387 89 negative_sampler.num_negs_per_pos 72.0 +387 89 training.batch_size 1.0 +387 90 model.embedding_dim 2.0 +387 90 model.c_min 0.01607587079036322 +387 90 model.c_max 5.785342100643963 +387 90 loss.margin 22.206141508100664 +387 90 loss.adversarial_temperature 0.9093568754164474 +387 90 optimizer.lr 0.021603539759063668 +387 90 negative_sampler.num_negs_per_pos 22.0 +387 90 training.batch_size 0.0 +387 91 model.embedding_dim 2.0 +387 91 model.c_min 0.02041395630871163 +387 91 model.c_max 9.00818781691667 +387 91 loss.margin 17.23809632093214 +387 91 loss.adversarial_temperature 0.5872608693992178 +387 91 optimizer.lr 0.02151273320068515 +387 91 negative_sampler.num_negs_per_pos 21.0 +387 91 training.batch_size 2.0 +387 92 model.embedding_dim 2.0 +387 92 model.c_min 0.02406927028230167 +387 92 model.c_max 5.7708008844501935 +387 92 loss.margin 15.138168838431792 +387 92 loss.adversarial_temperature 0.8500978385815673 +387 92 optimizer.lr 0.002028330775154724 +387 92 negative_sampler.num_negs_per_pos 49.0 +387 92 training.batch_size 1.0 +387 93 model.embedding_dim 2.0 +387 93 model.c_min 0.030551662379830712 +387 93 model.c_max 4.993815330634625 +387 93 loss.margin 29.33347548179799 +387 93 loss.adversarial_temperature 0.5867826234128404 +387 93 optimizer.lr 0.0023803220648286882 +387 93 negative_sampler.num_negs_per_pos 13.0 +387 93 training.batch_size 0.0 +387 94 model.embedding_dim 1.0 +387 94 model.c_min 0.09739629807390957 +387 94 model.c_max 9.950061978088344 +387 94 loss.margin 25.426662347643905 +387 94 loss.adversarial_temperature 0.41893392780617766 +387 94 optimizer.lr 0.0038573229217285943 +387 94 negative_sampler.num_negs_per_pos 82.0 +387 94 training.batch_size 2.0 +387 95 model.embedding_dim 2.0 +387 95 model.c_min 0.02614885422195596 +387 95 model.c_max 2.0650109929473093 +387 95 loss.margin 26.57854706661442 +387 95 loss.adversarial_temperature 0.20620149525691617 +387 95 optimizer.lr 0.0010068827279862188 +387 95 negative_sampler.num_negs_per_pos 45.0 +387 95 training.batch_size 1.0 +387 96 model.embedding_dim 1.0 +387 96 model.c_min 0.08199876562638554 +387 96 model.c_max 1.4073065422704558 +387 96 loss.margin 7.173273354285129 +387 96 loss.adversarial_temperature 0.12503027245778514 +387 96 optimizer.lr 0.009867436614235897 +387 96 negative_sampler.num_negs_per_pos 69.0 +387 96 training.batch_size 0.0 +387 97 model.embedding_dim 0.0 +387 97 model.c_min 0.07058929285275033 +387 97 model.c_max 8.448445735161984 +387 97 loss.margin 9.053180269943033 +387 97 loss.adversarial_temperature 0.25838852476658586 +387 97 optimizer.lr 0.0051697002175845035 +387 97 negative_sampler.num_negs_per_pos 58.0 +387 97 training.batch_size 0.0 +387 98 model.embedding_dim 2.0 +387 98 model.c_min 0.08747958832083416 +387 98 model.c_max 8.02136129851763 +387 98 loss.margin 7.722740348901965 +387 98 loss.adversarial_temperature 0.33376196717383616 +387 98 optimizer.lr 0.009327559756302657 +387 98 negative_sampler.num_negs_per_pos 39.0 +387 98 training.batch_size 0.0 +387 99 model.embedding_dim 1.0 +387 99 model.c_min 0.03827661403297845 +387 99 model.c_max 4.860936861793192 +387 99 loss.margin 5.248493333634547 +387 99 loss.adversarial_temperature 0.49326934413472145 +387 99 optimizer.lr 0.0050041002216220255 +387 99 negative_sampler.num_negs_per_pos 47.0 +387 99 training.batch_size 2.0 +387 100 model.embedding_dim 1.0 +387 100 model.c_min 0.0357676651226834 +387 100 model.c_max 4.254266451393518 +387 100 loss.margin 1.393573307902688 +387 100 loss.adversarial_temperature 0.32921368564846915 +387 100 optimizer.lr 0.0033387714433850027 +387 100 negative_sampler.num_negs_per_pos 7.0 +387 100 training.batch_size 2.0 +387 1 dataset """kinships""" +387 1 model """kg2e""" +387 1 loss """nssa""" +387 1 regularizer """no""" +387 1 optimizer """adam""" +387 1 training_loop """owa""" +387 1 negative_sampler """basic""" +387 1 evaluator """rankbased""" +387 2 dataset """kinships""" +387 2 model """kg2e""" +387 2 loss """nssa""" +387 2 regularizer """no""" +387 2 optimizer """adam""" +387 2 training_loop """owa""" +387 2 negative_sampler """basic""" +387 2 evaluator """rankbased""" +387 3 dataset """kinships""" +387 3 model """kg2e""" +387 3 loss """nssa""" +387 3 regularizer """no""" +387 3 optimizer """adam""" +387 3 training_loop """owa""" +387 3 negative_sampler """basic""" +387 3 evaluator """rankbased""" +387 4 dataset """kinships""" +387 4 model """kg2e""" +387 4 loss """nssa""" +387 4 regularizer """no""" +387 4 optimizer """adam""" +387 4 training_loop """owa""" +387 4 negative_sampler """basic""" +387 4 evaluator """rankbased""" +387 5 dataset """kinships""" +387 5 model """kg2e""" +387 5 loss """nssa""" +387 5 regularizer """no""" +387 5 optimizer """adam""" +387 5 training_loop """owa""" +387 5 negative_sampler """basic""" +387 5 evaluator """rankbased""" +387 6 dataset """kinships""" +387 6 model """kg2e""" +387 6 loss """nssa""" +387 6 regularizer """no""" +387 6 optimizer """adam""" +387 6 training_loop """owa""" +387 6 negative_sampler """basic""" +387 6 evaluator """rankbased""" +387 7 dataset """kinships""" +387 7 model """kg2e""" +387 7 loss """nssa""" +387 7 regularizer """no""" +387 7 optimizer """adam""" +387 7 training_loop """owa""" +387 7 negative_sampler """basic""" +387 7 evaluator """rankbased""" +387 8 dataset """kinships""" +387 8 model """kg2e""" +387 8 loss """nssa""" +387 8 regularizer """no""" +387 8 optimizer """adam""" +387 8 training_loop """owa""" +387 8 negative_sampler """basic""" +387 8 evaluator """rankbased""" +387 9 dataset """kinships""" +387 9 model """kg2e""" +387 9 loss """nssa""" +387 9 regularizer """no""" +387 9 optimizer """adam""" +387 9 training_loop """owa""" +387 9 negative_sampler """basic""" +387 9 evaluator """rankbased""" +387 10 dataset """kinships""" +387 10 model """kg2e""" +387 10 loss """nssa""" +387 10 regularizer """no""" +387 10 optimizer """adam""" +387 10 training_loop """owa""" +387 10 negative_sampler """basic""" +387 10 evaluator """rankbased""" +387 11 dataset """kinships""" +387 11 model """kg2e""" +387 11 loss """nssa""" +387 11 regularizer """no""" +387 11 optimizer """adam""" +387 11 training_loop """owa""" +387 11 negative_sampler """basic""" +387 11 evaluator """rankbased""" +387 12 dataset """kinships""" +387 12 model """kg2e""" +387 12 loss """nssa""" +387 12 regularizer """no""" +387 12 optimizer """adam""" +387 12 training_loop """owa""" +387 12 negative_sampler """basic""" +387 12 evaluator """rankbased""" +387 13 dataset """kinships""" +387 13 model """kg2e""" +387 13 loss """nssa""" +387 13 regularizer """no""" +387 13 optimizer """adam""" +387 13 training_loop """owa""" +387 13 negative_sampler """basic""" +387 13 evaluator """rankbased""" +387 14 dataset """kinships""" +387 14 model """kg2e""" +387 14 loss """nssa""" +387 14 regularizer """no""" +387 14 optimizer """adam""" +387 14 training_loop """owa""" +387 14 negative_sampler """basic""" +387 14 evaluator """rankbased""" +387 15 dataset """kinships""" +387 15 model """kg2e""" +387 15 loss """nssa""" +387 15 regularizer """no""" +387 15 optimizer """adam""" +387 15 training_loop """owa""" +387 15 negative_sampler """basic""" +387 15 evaluator """rankbased""" +387 16 dataset """kinships""" +387 16 model """kg2e""" +387 16 loss """nssa""" +387 16 regularizer """no""" +387 16 optimizer """adam""" +387 16 training_loop """owa""" +387 16 negative_sampler """basic""" +387 16 evaluator """rankbased""" +387 17 dataset """kinships""" +387 17 model """kg2e""" +387 17 loss """nssa""" +387 17 regularizer """no""" +387 17 optimizer """adam""" +387 17 training_loop """owa""" +387 17 negative_sampler """basic""" +387 17 evaluator """rankbased""" +387 18 dataset """kinships""" +387 18 model """kg2e""" +387 18 loss """nssa""" +387 18 regularizer """no""" +387 18 optimizer """adam""" +387 18 training_loop """owa""" +387 18 negative_sampler """basic""" +387 18 evaluator """rankbased""" +387 19 dataset """kinships""" +387 19 model """kg2e""" +387 19 loss """nssa""" +387 19 regularizer """no""" +387 19 optimizer """adam""" +387 19 training_loop """owa""" +387 19 negative_sampler """basic""" +387 19 evaluator """rankbased""" +387 20 dataset """kinships""" +387 20 model """kg2e""" +387 20 loss """nssa""" +387 20 regularizer """no""" +387 20 optimizer """adam""" +387 20 training_loop """owa""" +387 20 negative_sampler """basic""" +387 20 evaluator """rankbased""" +387 21 dataset """kinships""" +387 21 model """kg2e""" +387 21 loss """nssa""" +387 21 regularizer """no""" +387 21 optimizer """adam""" +387 21 training_loop """owa""" +387 21 negative_sampler """basic""" +387 21 evaluator """rankbased""" +387 22 dataset """kinships""" +387 22 model """kg2e""" +387 22 loss """nssa""" +387 22 regularizer """no""" +387 22 optimizer """adam""" +387 22 training_loop """owa""" +387 22 negative_sampler """basic""" +387 22 evaluator """rankbased""" +387 23 dataset """kinships""" +387 23 model """kg2e""" +387 23 loss """nssa""" +387 23 regularizer """no""" +387 23 optimizer """adam""" +387 23 training_loop """owa""" +387 23 negative_sampler """basic""" +387 23 evaluator """rankbased""" +387 24 dataset """kinships""" +387 24 model """kg2e""" +387 24 loss """nssa""" +387 24 regularizer """no""" +387 24 optimizer """adam""" +387 24 training_loop """owa""" +387 24 negative_sampler """basic""" +387 24 evaluator """rankbased""" +387 25 dataset """kinships""" +387 25 model """kg2e""" +387 25 loss """nssa""" +387 25 regularizer """no""" +387 25 optimizer """adam""" +387 25 training_loop """owa""" +387 25 negative_sampler """basic""" +387 25 evaluator """rankbased""" +387 26 dataset """kinships""" +387 26 model """kg2e""" +387 26 loss """nssa""" +387 26 regularizer """no""" +387 26 optimizer """adam""" +387 26 training_loop """owa""" +387 26 negative_sampler """basic""" +387 26 evaluator """rankbased""" +387 27 dataset """kinships""" +387 27 model """kg2e""" +387 27 loss """nssa""" +387 27 regularizer """no""" +387 27 optimizer """adam""" +387 27 training_loop """owa""" +387 27 negative_sampler """basic""" +387 27 evaluator """rankbased""" +387 28 dataset """kinships""" +387 28 model """kg2e""" +387 28 loss """nssa""" +387 28 regularizer """no""" +387 28 optimizer """adam""" +387 28 training_loop """owa""" +387 28 negative_sampler """basic""" +387 28 evaluator """rankbased""" +387 29 dataset """kinships""" +387 29 model """kg2e""" +387 29 loss """nssa""" +387 29 regularizer """no""" +387 29 optimizer """adam""" +387 29 training_loop """owa""" +387 29 negative_sampler """basic""" +387 29 evaluator """rankbased""" +387 30 dataset """kinships""" +387 30 model """kg2e""" +387 30 loss """nssa""" +387 30 regularizer """no""" +387 30 optimizer """adam""" +387 30 training_loop """owa""" +387 30 negative_sampler """basic""" +387 30 evaluator """rankbased""" +387 31 dataset """kinships""" +387 31 model """kg2e""" +387 31 loss """nssa""" +387 31 regularizer """no""" +387 31 optimizer """adam""" +387 31 training_loop """owa""" +387 31 negative_sampler """basic""" +387 31 evaluator """rankbased""" +387 32 dataset """kinships""" +387 32 model """kg2e""" +387 32 loss """nssa""" +387 32 regularizer """no""" +387 32 optimizer """adam""" +387 32 training_loop """owa""" +387 32 negative_sampler """basic""" +387 32 evaluator """rankbased""" +387 33 dataset """kinships""" +387 33 model """kg2e""" +387 33 loss """nssa""" +387 33 regularizer """no""" +387 33 optimizer """adam""" +387 33 training_loop """owa""" +387 33 negative_sampler """basic""" +387 33 evaluator """rankbased""" +387 34 dataset """kinships""" +387 34 model """kg2e""" +387 34 loss """nssa""" +387 34 regularizer """no""" +387 34 optimizer """adam""" +387 34 training_loop """owa""" +387 34 negative_sampler """basic""" +387 34 evaluator """rankbased""" +387 35 dataset """kinships""" +387 35 model """kg2e""" +387 35 loss """nssa""" +387 35 regularizer """no""" +387 35 optimizer """adam""" +387 35 training_loop """owa""" +387 35 negative_sampler """basic""" +387 35 evaluator """rankbased""" +387 36 dataset """kinships""" +387 36 model """kg2e""" +387 36 loss """nssa""" +387 36 regularizer """no""" +387 36 optimizer """adam""" +387 36 training_loop """owa""" +387 36 negative_sampler """basic""" +387 36 evaluator """rankbased""" +387 37 dataset """kinships""" +387 37 model """kg2e""" +387 37 loss """nssa""" +387 37 regularizer """no""" +387 37 optimizer """adam""" +387 37 training_loop """owa""" +387 37 negative_sampler """basic""" +387 37 evaluator """rankbased""" +387 38 dataset """kinships""" +387 38 model """kg2e""" +387 38 loss """nssa""" +387 38 regularizer """no""" +387 38 optimizer """adam""" +387 38 training_loop """owa""" +387 38 negative_sampler """basic""" +387 38 evaluator """rankbased""" +387 39 dataset """kinships""" +387 39 model """kg2e""" +387 39 loss """nssa""" +387 39 regularizer """no""" +387 39 optimizer """adam""" +387 39 training_loop """owa""" +387 39 negative_sampler """basic""" +387 39 evaluator """rankbased""" +387 40 dataset """kinships""" +387 40 model """kg2e""" +387 40 loss """nssa""" +387 40 regularizer """no""" +387 40 optimizer """adam""" +387 40 training_loop """owa""" +387 40 negative_sampler """basic""" +387 40 evaluator """rankbased""" +387 41 dataset """kinships""" +387 41 model """kg2e""" +387 41 loss """nssa""" +387 41 regularizer """no""" +387 41 optimizer """adam""" +387 41 training_loop """owa""" +387 41 negative_sampler """basic""" +387 41 evaluator """rankbased""" +387 42 dataset """kinships""" +387 42 model """kg2e""" +387 42 loss """nssa""" +387 42 regularizer """no""" +387 42 optimizer """adam""" +387 42 training_loop """owa""" +387 42 negative_sampler """basic""" +387 42 evaluator """rankbased""" +387 43 dataset """kinships""" +387 43 model """kg2e""" +387 43 loss """nssa""" +387 43 regularizer """no""" +387 43 optimizer """adam""" +387 43 training_loop """owa""" +387 43 negative_sampler """basic""" +387 43 evaluator """rankbased""" +387 44 dataset """kinships""" +387 44 model """kg2e""" +387 44 loss """nssa""" +387 44 regularizer """no""" +387 44 optimizer """adam""" +387 44 training_loop """owa""" +387 44 negative_sampler """basic""" +387 44 evaluator """rankbased""" +387 45 dataset """kinships""" +387 45 model """kg2e""" +387 45 loss """nssa""" +387 45 regularizer """no""" +387 45 optimizer """adam""" +387 45 training_loop """owa""" +387 45 negative_sampler """basic""" +387 45 evaluator """rankbased""" +387 46 dataset """kinships""" +387 46 model """kg2e""" +387 46 loss """nssa""" +387 46 regularizer """no""" +387 46 optimizer """adam""" +387 46 training_loop """owa""" +387 46 negative_sampler """basic""" +387 46 evaluator """rankbased""" +387 47 dataset """kinships""" +387 47 model """kg2e""" +387 47 loss """nssa""" +387 47 regularizer """no""" +387 47 optimizer """adam""" +387 47 training_loop """owa""" +387 47 negative_sampler """basic""" +387 47 evaluator """rankbased""" +387 48 dataset """kinships""" +387 48 model """kg2e""" +387 48 loss """nssa""" +387 48 regularizer """no""" +387 48 optimizer """adam""" +387 48 training_loop """owa""" +387 48 negative_sampler """basic""" +387 48 evaluator """rankbased""" +387 49 dataset """kinships""" +387 49 model """kg2e""" +387 49 loss """nssa""" +387 49 regularizer """no""" +387 49 optimizer """adam""" +387 49 training_loop """owa""" +387 49 negative_sampler """basic""" +387 49 evaluator """rankbased""" +387 50 dataset """kinships""" +387 50 model """kg2e""" +387 50 loss """nssa""" +387 50 regularizer """no""" +387 50 optimizer """adam""" +387 50 training_loop """owa""" +387 50 negative_sampler """basic""" +387 50 evaluator """rankbased""" +387 51 dataset """kinships""" +387 51 model """kg2e""" +387 51 loss """nssa""" +387 51 regularizer """no""" +387 51 optimizer """adam""" +387 51 training_loop """owa""" +387 51 negative_sampler """basic""" +387 51 evaluator """rankbased""" +387 52 dataset """kinships""" +387 52 model """kg2e""" +387 52 loss """nssa""" +387 52 regularizer """no""" +387 52 optimizer """adam""" +387 52 training_loop """owa""" +387 52 negative_sampler """basic""" +387 52 evaluator """rankbased""" +387 53 dataset """kinships""" +387 53 model """kg2e""" +387 53 loss """nssa""" +387 53 regularizer """no""" +387 53 optimizer """adam""" +387 53 training_loop """owa""" +387 53 negative_sampler """basic""" +387 53 evaluator """rankbased""" +387 54 dataset """kinships""" +387 54 model """kg2e""" +387 54 loss """nssa""" +387 54 regularizer """no""" +387 54 optimizer """adam""" +387 54 training_loop """owa""" +387 54 negative_sampler """basic""" +387 54 evaluator """rankbased""" +387 55 dataset """kinships""" +387 55 model """kg2e""" +387 55 loss """nssa""" +387 55 regularizer """no""" +387 55 optimizer """adam""" +387 55 training_loop """owa""" +387 55 negative_sampler """basic""" +387 55 evaluator """rankbased""" +387 56 dataset """kinships""" +387 56 model """kg2e""" +387 56 loss """nssa""" +387 56 regularizer """no""" +387 56 optimizer """adam""" +387 56 training_loop """owa""" +387 56 negative_sampler """basic""" +387 56 evaluator """rankbased""" +387 57 dataset """kinships""" +387 57 model """kg2e""" +387 57 loss """nssa""" +387 57 regularizer """no""" +387 57 optimizer """adam""" +387 57 training_loop """owa""" +387 57 negative_sampler """basic""" +387 57 evaluator """rankbased""" +387 58 dataset """kinships""" +387 58 model """kg2e""" +387 58 loss """nssa""" +387 58 regularizer """no""" +387 58 optimizer """adam""" +387 58 training_loop """owa""" +387 58 negative_sampler """basic""" +387 58 evaluator """rankbased""" +387 59 dataset """kinships""" +387 59 model """kg2e""" +387 59 loss """nssa""" +387 59 regularizer """no""" +387 59 optimizer """adam""" +387 59 training_loop """owa""" +387 59 negative_sampler """basic""" +387 59 evaluator """rankbased""" +387 60 dataset """kinships""" +387 60 model """kg2e""" +387 60 loss """nssa""" +387 60 regularizer """no""" +387 60 optimizer """adam""" +387 60 training_loop """owa""" +387 60 negative_sampler """basic""" +387 60 evaluator """rankbased""" +387 61 dataset """kinships""" +387 61 model """kg2e""" +387 61 loss """nssa""" +387 61 regularizer """no""" +387 61 optimizer """adam""" +387 61 training_loop """owa""" +387 61 negative_sampler """basic""" +387 61 evaluator """rankbased""" +387 62 dataset """kinships""" +387 62 model """kg2e""" +387 62 loss """nssa""" +387 62 regularizer """no""" +387 62 optimizer """adam""" +387 62 training_loop """owa""" +387 62 negative_sampler """basic""" +387 62 evaluator """rankbased""" +387 63 dataset """kinships""" +387 63 model """kg2e""" +387 63 loss """nssa""" +387 63 regularizer """no""" +387 63 optimizer """adam""" +387 63 training_loop """owa""" +387 63 negative_sampler """basic""" +387 63 evaluator """rankbased""" +387 64 dataset """kinships""" +387 64 model """kg2e""" +387 64 loss """nssa""" +387 64 regularizer """no""" +387 64 optimizer """adam""" +387 64 training_loop """owa""" +387 64 negative_sampler """basic""" +387 64 evaluator """rankbased""" +387 65 dataset """kinships""" +387 65 model """kg2e""" +387 65 loss """nssa""" +387 65 regularizer """no""" +387 65 optimizer """adam""" +387 65 training_loop """owa""" +387 65 negative_sampler """basic""" +387 65 evaluator """rankbased""" +387 66 dataset """kinships""" +387 66 model """kg2e""" +387 66 loss """nssa""" +387 66 regularizer """no""" +387 66 optimizer """adam""" +387 66 training_loop """owa""" +387 66 negative_sampler """basic""" +387 66 evaluator """rankbased""" +387 67 dataset """kinships""" +387 67 model """kg2e""" +387 67 loss """nssa""" +387 67 regularizer """no""" +387 67 optimizer """adam""" +387 67 training_loop """owa""" +387 67 negative_sampler """basic""" +387 67 evaluator """rankbased""" +387 68 dataset """kinships""" +387 68 model """kg2e""" +387 68 loss """nssa""" +387 68 regularizer """no""" +387 68 optimizer """adam""" +387 68 training_loop """owa""" +387 68 negative_sampler """basic""" +387 68 evaluator """rankbased""" +387 69 dataset """kinships""" +387 69 model """kg2e""" +387 69 loss """nssa""" +387 69 regularizer """no""" +387 69 optimizer """adam""" +387 69 training_loop """owa""" +387 69 negative_sampler """basic""" +387 69 evaluator """rankbased""" +387 70 dataset """kinships""" +387 70 model """kg2e""" +387 70 loss """nssa""" +387 70 regularizer """no""" +387 70 optimizer """adam""" +387 70 training_loop """owa""" +387 70 negative_sampler """basic""" +387 70 evaluator """rankbased""" +387 71 dataset """kinships""" +387 71 model """kg2e""" +387 71 loss """nssa""" +387 71 regularizer """no""" +387 71 optimizer """adam""" +387 71 training_loop """owa""" +387 71 negative_sampler """basic""" +387 71 evaluator """rankbased""" +387 72 dataset """kinships""" +387 72 model """kg2e""" +387 72 loss """nssa""" +387 72 regularizer """no""" +387 72 optimizer """adam""" +387 72 training_loop """owa""" +387 72 negative_sampler """basic""" +387 72 evaluator """rankbased""" +387 73 dataset """kinships""" +387 73 model """kg2e""" +387 73 loss """nssa""" +387 73 regularizer """no""" +387 73 optimizer """adam""" +387 73 training_loop """owa""" +387 73 negative_sampler """basic""" +387 73 evaluator """rankbased""" +387 74 dataset """kinships""" +387 74 model """kg2e""" +387 74 loss """nssa""" +387 74 regularizer """no""" +387 74 optimizer """adam""" +387 74 training_loop """owa""" +387 74 negative_sampler """basic""" +387 74 evaluator """rankbased""" +387 75 dataset """kinships""" +387 75 model """kg2e""" +387 75 loss """nssa""" +387 75 regularizer """no""" +387 75 optimizer """adam""" +387 75 training_loop """owa""" +387 75 negative_sampler """basic""" +387 75 evaluator """rankbased""" +387 76 dataset """kinships""" +387 76 model """kg2e""" +387 76 loss """nssa""" +387 76 regularizer """no""" +387 76 optimizer """adam""" +387 76 training_loop """owa""" +387 76 negative_sampler """basic""" +387 76 evaluator """rankbased""" +387 77 dataset """kinships""" +387 77 model """kg2e""" +387 77 loss """nssa""" +387 77 regularizer """no""" +387 77 optimizer """adam""" +387 77 training_loop """owa""" +387 77 negative_sampler """basic""" +387 77 evaluator """rankbased""" +387 78 dataset """kinships""" +387 78 model """kg2e""" +387 78 loss """nssa""" +387 78 regularizer """no""" +387 78 optimizer """adam""" +387 78 training_loop """owa""" +387 78 negative_sampler """basic""" +387 78 evaluator """rankbased""" +387 79 dataset """kinships""" +387 79 model """kg2e""" +387 79 loss """nssa""" +387 79 regularizer """no""" +387 79 optimizer """adam""" +387 79 training_loop """owa""" +387 79 negative_sampler """basic""" +387 79 evaluator """rankbased""" +387 80 dataset """kinships""" +387 80 model """kg2e""" +387 80 loss """nssa""" +387 80 regularizer """no""" +387 80 optimizer """adam""" +387 80 training_loop """owa""" +387 80 negative_sampler """basic""" +387 80 evaluator """rankbased""" +387 81 dataset """kinships""" +387 81 model """kg2e""" +387 81 loss """nssa""" +387 81 regularizer """no""" +387 81 optimizer """adam""" +387 81 training_loop """owa""" +387 81 negative_sampler """basic""" +387 81 evaluator """rankbased""" +387 82 dataset """kinships""" +387 82 model """kg2e""" +387 82 loss """nssa""" +387 82 regularizer """no""" +387 82 optimizer """adam""" +387 82 training_loop """owa""" +387 82 negative_sampler """basic""" +387 82 evaluator """rankbased""" +387 83 dataset """kinships""" +387 83 model """kg2e""" +387 83 loss """nssa""" +387 83 regularizer """no""" +387 83 optimizer """adam""" +387 83 training_loop """owa""" +387 83 negative_sampler """basic""" +387 83 evaluator """rankbased""" +387 84 dataset """kinships""" +387 84 model """kg2e""" +387 84 loss """nssa""" +387 84 regularizer """no""" +387 84 optimizer """adam""" +387 84 training_loop """owa""" +387 84 negative_sampler """basic""" +387 84 evaluator """rankbased""" +387 85 dataset """kinships""" +387 85 model """kg2e""" +387 85 loss """nssa""" +387 85 regularizer """no""" +387 85 optimizer """adam""" +387 85 training_loop """owa""" +387 85 negative_sampler """basic""" +387 85 evaluator """rankbased""" +387 86 dataset """kinships""" +387 86 model """kg2e""" +387 86 loss """nssa""" +387 86 regularizer """no""" +387 86 optimizer """adam""" +387 86 training_loop """owa""" +387 86 negative_sampler """basic""" +387 86 evaluator """rankbased""" +387 87 dataset """kinships""" +387 87 model """kg2e""" +387 87 loss """nssa""" +387 87 regularizer """no""" +387 87 optimizer """adam""" +387 87 training_loop """owa""" +387 87 negative_sampler """basic""" +387 87 evaluator """rankbased""" +387 88 dataset """kinships""" +387 88 model """kg2e""" +387 88 loss """nssa""" +387 88 regularizer """no""" +387 88 optimizer """adam""" +387 88 training_loop """owa""" +387 88 negative_sampler """basic""" +387 88 evaluator """rankbased""" +387 89 dataset """kinships""" +387 89 model """kg2e""" +387 89 loss """nssa""" +387 89 regularizer """no""" +387 89 optimizer """adam""" +387 89 training_loop """owa""" +387 89 negative_sampler """basic""" +387 89 evaluator """rankbased""" +387 90 dataset """kinships""" +387 90 model """kg2e""" +387 90 loss """nssa""" +387 90 regularizer """no""" +387 90 optimizer """adam""" +387 90 training_loop """owa""" +387 90 negative_sampler """basic""" +387 90 evaluator """rankbased""" +387 91 dataset """kinships""" +387 91 model """kg2e""" +387 91 loss """nssa""" +387 91 regularizer """no""" +387 91 optimizer """adam""" +387 91 training_loop """owa""" +387 91 negative_sampler """basic""" +387 91 evaluator """rankbased""" +387 92 dataset """kinships""" +387 92 model """kg2e""" +387 92 loss """nssa""" +387 92 regularizer """no""" +387 92 optimizer """adam""" +387 92 training_loop """owa""" +387 92 negative_sampler """basic""" +387 92 evaluator """rankbased""" +387 93 dataset """kinships""" +387 93 model """kg2e""" +387 93 loss """nssa""" +387 93 regularizer """no""" +387 93 optimizer """adam""" +387 93 training_loop """owa""" +387 93 negative_sampler """basic""" +387 93 evaluator """rankbased""" +387 94 dataset """kinships""" +387 94 model """kg2e""" +387 94 loss """nssa""" +387 94 regularizer """no""" +387 94 optimizer """adam""" +387 94 training_loop """owa""" +387 94 negative_sampler """basic""" +387 94 evaluator """rankbased""" +387 95 dataset """kinships""" +387 95 model """kg2e""" +387 95 loss """nssa""" +387 95 regularizer """no""" +387 95 optimizer """adam""" +387 95 training_loop """owa""" +387 95 negative_sampler """basic""" +387 95 evaluator """rankbased""" +387 96 dataset """kinships""" +387 96 model """kg2e""" +387 96 loss """nssa""" +387 96 regularizer """no""" +387 96 optimizer """adam""" +387 96 training_loop """owa""" +387 96 negative_sampler """basic""" +387 96 evaluator """rankbased""" +387 97 dataset """kinships""" +387 97 model """kg2e""" +387 97 loss """nssa""" +387 97 regularizer """no""" +387 97 optimizer """adam""" +387 97 training_loop """owa""" +387 97 negative_sampler """basic""" +387 97 evaluator """rankbased""" +387 98 dataset """kinships""" +387 98 model """kg2e""" +387 98 loss """nssa""" +387 98 regularizer """no""" +387 98 optimizer """adam""" +387 98 training_loop """owa""" +387 98 negative_sampler """basic""" +387 98 evaluator """rankbased""" +387 99 dataset """kinships""" +387 99 model """kg2e""" +387 99 loss """nssa""" +387 99 regularizer """no""" +387 99 optimizer """adam""" +387 99 training_loop """owa""" +387 99 negative_sampler """basic""" +387 99 evaluator """rankbased""" +387 100 dataset """kinships""" +387 100 model """kg2e""" +387 100 loss """nssa""" +387 100 regularizer """no""" +387 100 optimizer """adam""" +387 100 training_loop """owa""" +387 100 negative_sampler """basic""" +387 100 evaluator """rankbased""" +388 1 model.embedding_dim 0.0 +388 1 model.c_min 0.020041123538531054 +388 1 model.c_max 7.929409967831326 +388 1 loss.margin 8.056253638926732 +388 1 loss.adversarial_temperature 0.36959946411824895 +388 1 optimizer.lr 0.016433857603685494 +388 1 negative_sampler.num_negs_per_pos 57.0 +388 1 training.batch_size 2.0 +388 2 model.embedding_dim 2.0 +388 2 model.c_min 0.02191591064821124 +388 2 model.c_max 5.81709875519601 +388 2 loss.margin 16.747262400576517 +388 2 loss.adversarial_temperature 0.9686217766334135 +388 2 optimizer.lr 0.004363106250208648 +388 2 negative_sampler.num_negs_per_pos 25.0 +388 2 training.batch_size 1.0 +388 3 model.embedding_dim 0.0 +388 3 model.c_min 0.018576349520739496 +388 3 model.c_max 3.5550235028755526 +388 3 loss.margin 25.04062602822329 +388 3 loss.adversarial_temperature 0.7368295069043104 +388 3 optimizer.lr 0.03358793202451951 +388 3 negative_sampler.num_negs_per_pos 89.0 +388 3 training.batch_size 1.0 +388 4 model.embedding_dim 0.0 +388 4 model.c_min 0.045160575746528486 +388 4 model.c_max 9.020075631626597 +388 4 loss.margin 2.2660415300793106 +388 4 loss.adversarial_temperature 0.4058661860388031 +388 4 optimizer.lr 0.0038763850291347406 +388 4 negative_sampler.num_negs_per_pos 91.0 +388 4 training.batch_size 1.0 +388 5 model.embedding_dim 1.0 +388 5 model.c_min 0.03832852858569888 +388 5 model.c_max 3.4292297056589027 +388 5 loss.margin 23.37113457349584 +388 5 loss.adversarial_temperature 0.31463588567896333 +388 5 optimizer.lr 0.02754740848477941 +388 5 negative_sampler.num_negs_per_pos 40.0 +388 5 training.batch_size 2.0 +388 6 model.embedding_dim 0.0 +388 6 model.c_min 0.021035861452075408 +388 6 model.c_max 9.309465152824115 +388 6 loss.margin 3.3088415451448876 +388 6 loss.adversarial_temperature 0.4285886711669168 +388 6 optimizer.lr 0.06696820410336003 +388 6 negative_sampler.num_negs_per_pos 78.0 +388 6 training.batch_size 1.0 +388 7 model.embedding_dim 1.0 +388 7 model.c_min 0.010930559351223616 +388 7 model.c_max 6.2180455247424575 +388 7 loss.margin 10.171592953487512 +388 7 loss.adversarial_temperature 0.5775034152097677 +388 7 optimizer.lr 0.009692350702886427 +388 7 negative_sampler.num_negs_per_pos 3.0 +388 7 training.batch_size 0.0 +388 8 model.embedding_dim 2.0 +388 8 model.c_min 0.0637385781313219 +388 8 model.c_max 6.635984049683883 +388 8 loss.margin 20.47866826620467 +388 8 loss.adversarial_temperature 0.5675756080604126 +388 8 optimizer.lr 0.026822908073276197 +388 8 negative_sampler.num_negs_per_pos 74.0 +388 8 training.batch_size 2.0 +388 9 model.embedding_dim 1.0 +388 9 model.c_min 0.09424831833901526 +388 9 model.c_max 7.890749406339901 +388 9 loss.margin 16.656313773525646 +388 9 loss.adversarial_temperature 0.2078679999182805 +388 9 optimizer.lr 0.007313101556220605 +388 9 negative_sampler.num_negs_per_pos 94.0 +388 9 training.batch_size 0.0 +388 10 model.embedding_dim 2.0 +388 10 model.c_min 0.010970904457012091 +388 10 model.c_max 3.798958923691736 +388 10 loss.margin 19.634886011931254 +388 10 loss.adversarial_temperature 0.19795258510404384 +388 10 optimizer.lr 0.0025375217454754236 +388 10 negative_sampler.num_negs_per_pos 76.0 +388 10 training.batch_size 2.0 +388 11 model.embedding_dim 0.0 +388 11 model.c_min 0.04912307319968713 +388 11 model.c_max 4.809561729488466 +388 11 loss.margin 29.70198572478877 +388 11 loss.adversarial_temperature 0.48027166076689065 +388 11 optimizer.lr 0.007793020606112377 +388 11 negative_sampler.num_negs_per_pos 16.0 +388 11 training.batch_size 1.0 +388 12 model.embedding_dim 0.0 +388 12 model.c_min 0.028536079515236786 +388 12 model.c_max 3.0622304113203667 +388 12 loss.margin 12.48070459259984 +388 12 loss.adversarial_temperature 0.7960178636863539 +388 12 optimizer.lr 0.01115057379432575 +388 12 negative_sampler.num_negs_per_pos 19.0 +388 12 training.batch_size 1.0 +388 13 model.embedding_dim 1.0 +388 13 model.c_min 0.06499029402404512 +388 13 model.c_max 8.355396890421513 +388 13 loss.margin 25.23331627432246 +388 13 loss.adversarial_temperature 0.8557835860565723 +388 13 optimizer.lr 0.004022809485344617 +388 13 negative_sampler.num_negs_per_pos 17.0 +388 13 training.batch_size 1.0 +388 14 model.embedding_dim 0.0 +388 14 model.c_min 0.02957562112618618 +388 14 model.c_max 4.007585000046534 +388 14 loss.margin 8.02070079148924 +388 14 loss.adversarial_temperature 0.2738929250332364 +388 14 optimizer.lr 0.028052353537418084 +388 14 negative_sampler.num_negs_per_pos 20.0 +388 14 training.batch_size 0.0 +388 15 model.embedding_dim 2.0 +388 15 model.c_min 0.0321801035045026 +388 15 model.c_max 5.6090439677458255 +388 15 loss.margin 4.240377278544745 +388 15 loss.adversarial_temperature 0.23524460422451485 +388 15 optimizer.lr 0.01332438703811296 +388 15 negative_sampler.num_negs_per_pos 12.0 +388 15 training.batch_size 2.0 +388 16 model.embedding_dim 1.0 +388 16 model.c_min 0.027855981664843887 +388 16 model.c_max 9.941206870252525 +388 16 loss.margin 18.939220829790916 +388 16 loss.adversarial_temperature 0.32397444257179187 +388 16 optimizer.lr 0.0022756356041438103 +388 16 negative_sampler.num_negs_per_pos 4.0 +388 16 training.batch_size 1.0 +388 17 model.embedding_dim 1.0 +388 17 model.c_min 0.020561644907539978 +388 17 model.c_max 5.252312561339604 +388 17 loss.margin 2.648363389846788 +388 17 loss.adversarial_temperature 0.9787704360606987 +388 17 optimizer.lr 0.002233910466142657 +388 17 negative_sampler.num_negs_per_pos 49.0 +388 17 training.batch_size 0.0 +388 18 model.embedding_dim 0.0 +388 18 model.c_min 0.02479032919926247 +388 18 model.c_max 9.413099019382491 +388 18 loss.margin 21.747471363239562 +388 18 loss.adversarial_temperature 0.14507756253769488 +388 18 optimizer.lr 0.01644393198477061 +388 18 negative_sampler.num_negs_per_pos 36.0 +388 18 training.batch_size 2.0 +388 19 model.embedding_dim 1.0 +388 19 model.c_min 0.032576469408819475 +388 19 model.c_max 3.557827259171749 +388 19 loss.margin 14.435310855648146 +388 19 loss.adversarial_temperature 0.8740673979137873 +388 19 optimizer.lr 0.03372945769267382 +388 19 negative_sampler.num_negs_per_pos 24.0 +388 19 training.batch_size 0.0 +388 20 model.embedding_dim 1.0 +388 20 model.c_min 0.013009184826745944 +388 20 model.c_max 6.3645379431781235 +388 20 loss.margin 3.9889221190475244 +388 20 loss.adversarial_temperature 0.6252318213425122 +388 20 optimizer.lr 0.003284409109402743 +388 20 negative_sampler.num_negs_per_pos 9.0 +388 20 training.batch_size 0.0 +388 21 model.embedding_dim 1.0 +388 21 model.c_min 0.026301139905504405 +388 21 model.c_max 8.807633472647403 +388 21 loss.margin 27.217851499093047 +388 21 loss.adversarial_temperature 0.5955110914858737 +388 21 optimizer.lr 0.032202242589110475 +388 21 negative_sampler.num_negs_per_pos 69.0 +388 21 training.batch_size 1.0 +388 22 model.embedding_dim 2.0 +388 22 model.c_min 0.048261562686383326 +388 22 model.c_max 4.202408797944285 +388 22 loss.margin 18.96435522681732 +388 22 loss.adversarial_temperature 0.21871352904929792 +388 22 optimizer.lr 0.0012942729177883557 +388 22 negative_sampler.num_negs_per_pos 14.0 +388 22 training.batch_size 2.0 +388 23 model.embedding_dim 2.0 +388 23 model.c_min 0.06254448116845177 +388 23 model.c_max 4.909762303369976 +388 23 loss.margin 15.859361489657068 +388 23 loss.adversarial_temperature 0.4138193984604127 +388 23 optimizer.lr 0.027827200313528276 +388 23 negative_sampler.num_negs_per_pos 57.0 +388 23 training.batch_size 2.0 +388 24 model.embedding_dim 2.0 +388 24 model.c_min 0.04174168787255273 +388 24 model.c_max 2.9402623274735062 +388 24 loss.margin 14.298843207973594 +388 24 loss.adversarial_temperature 0.3330763778348191 +388 24 optimizer.lr 0.0584140018541873 +388 24 negative_sampler.num_negs_per_pos 51.0 +388 24 training.batch_size 1.0 +388 25 model.embedding_dim 0.0 +388 25 model.c_min 0.056898469707999326 +388 25 model.c_max 8.692934829428715 +388 25 loss.margin 4.2000899569337005 +388 25 loss.adversarial_temperature 0.39911335653724433 +388 25 optimizer.lr 0.00360160743479755 +388 25 negative_sampler.num_negs_per_pos 93.0 +388 25 training.batch_size 2.0 +388 26 model.embedding_dim 2.0 +388 26 model.c_min 0.05006446348548674 +388 26 model.c_max 8.925040145730105 +388 26 loss.margin 9.142946375059518 +388 26 loss.adversarial_temperature 0.4881232860267932 +388 26 optimizer.lr 0.006922513125322586 +388 26 negative_sampler.num_negs_per_pos 3.0 +388 26 training.batch_size 2.0 +388 27 model.embedding_dim 1.0 +388 27 model.c_min 0.03286095797442574 +388 27 model.c_max 7.531784314245147 +388 27 loss.margin 9.701517682503942 +388 27 loss.adversarial_temperature 0.5595093191283497 +388 27 optimizer.lr 0.00911907360653386 +388 27 negative_sampler.num_negs_per_pos 8.0 +388 27 training.batch_size 2.0 +388 28 model.embedding_dim 1.0 +388 28 model.c_min 0.037622447781532675 +388 28 model.c_max 5.220762990090746 +388 28 loss.margin 5.514580915836259 +388 28 loss.adversarial_temperature 0.8044868116408458 +388 28 optimizer.lr 0.007427119551109641 +388 28 negative_sampler.num_negs_per_pos 34.0 +388 28 training.batch_size 2.0 +388 29 model.embedding_dim 1.0 +388 29 model.c_min 0.07591315804183557 +388 29 model.c_max 8.938079954994372 +388 29 loss.margin 25.767581816292875 +388 29 loss.adversarial_temperature 0.8020308616708377 +388 29 optimizer.lr 0.029627846297415817 +388 29 negative_sampler.num_negs_per_pos 58.0 +388 29 training.batch_size 1.0 +388 30 model.embedding_dim 2.0 +388 30 model.c_min 0.05868501932900416 +388 30 model.c_max 9.219684799088846 +388 30 loss.margin 3.6206375715727073 +388 30 loss.adversarial_temperature 0.14730073278330127 +388 30 optimizer.lr 0.0016696771618499465 +388 30 negative_sampler.num_negs_per_pos 69.0 +388 30 training.batch_size 2.0 +388 31 model.embedding_dim 1.0 +388 31 model.c_min 0.018876181951607004 +388 31 model.c_max 2.6727950862656273 +388 31 loss.margin 6.131792106142652 +388 31 loss.adversarial_temperature 0.9007371239152923 +388 31 optimizer.lr 0.0069920368646860555 +388 31 negative_sampler.num_negs_per_pos 8.0 +388 31 training.batch_size 1.0 +388 32 model.embedding_dim 2.0 +388 32 model.c_min 0.023370822032414276 +388 32 model.c_max 9.665895219374047 +388 32 loss.margin 13.88622167561456 +388 32 loss.adversarial_temperature 0.43280142029264 +388 32 optimizer.lr 0.010339389452117086 +388 32 negative_sampler.num_negs_per_pos 34.0 +388 32 training.batch_size 2.0 +388 33 model.embedding_dim 1.0 +388 33 model.c_min 0.07451293116272446 +388 33 model.c_max 3.6803275533703497 +388 33 loss.margin 24.435500446318734 +388 33 loss.adversarial_temperature 0.9164166499433958 +388 33 optimizer.lr 0.0010412954930426837 +388 33 negative_sampler.num_negs_per_pos 99.0 +388 33 training.batch_size 1.0 +388 34 model.embedding_dim 2.0 +388 34 model.c_min 0.06290920792132945 +388 34 model.c_max 4.485571062020636 +388 34 loss.margin 5.062609144285902 +388 34 loss.adversarial_temperature 0.9901910306418791 +388 34 optimizer.lr 0.019078605286629233 +388 34 negative_sampler.num_negs_per_pos 38.0 +388 34 training.batch_size 1.0 +388 35 model.embedding_dim 2.0 +388 35 model.c_min 0.022467931285331977 +388 35 model.c_max 8.66462237361644 +388 35 loss.margin 12.816637224003777 +388 35 loss.adversarial_temperature 0.7992298281184949 +388 35 optimizer.lr 0.038201218916782284 +388 35 negative_sampler.num_negs_per_pos 40.0 +388 35 training.batch_size 2.0 +388 36 model.embedding_dim 1.0 +388 36 model.c_min 0.02578871962941565 +388 36 model.c_max 1.721403406255115 +388 36 loss.margin 29.92112859486026 +388 36 loss.adversarial_temperature 0.21860923839783164 +388 36 optimizer.lr 0.018336425574890333 +388 36 negative_sampler.num_negs_per_pos 85.0 +388 36 training.batch_size 1.0 +388 37 model.embedding_dim 1.0 +388 37 model.c_min 0.01514396713190669 +388 37 model.c_max 8.048671997692033 +388 37 loss.margin 22.885658577577843 +388 37 loss.adversarial_temperature 0.6152091625558679 +388 37 optimizer.lr 0.014932788193439227 +388 37 negative_sampler.num_negs_per_pos 52.0 +388 37 training.batch_size 2.0 +388 38 model.embedding_dim 1.0 +388 38 model.c_min 0.016747561085699667 +388 38 model.c_max 5.630412140522381 +388 38 loss.margin 10.067623741003759 +388 38 loss.adversarial_temperature 0.9641259070474204 +388 38 optimizer.lr 0.043887132459810664 +388 38 negative_sampler.num_negs_per_pos 59.0 +388 38 training.batch_size 1.0 +388 39 model.embedding_dim 2.0 +388 39 model.c_min 0.016759152179577328 +388 39 model.c_max 6.619002480480347 +388 39 loss.margin 18.62142394767688 +388 39 loss.adversarial_temperature 0.5529156905628411 +388 39 optimizer.lr 0.01093691814726096 +388 39 negative_sampler.num_negs_per_pos 49.0 +388 39 training.batch_size 1.0 +388 40 model.embedding_dim 1.0 +388 40 model.c_min 0.045448364153704006 +388 40 model.c_max 1.0270953838595407 +388 40 loss.margin 5.392606798115183 +388 40 loss.adversarial_temperature 0.2153671242597347 +388 40 optimizer.lr 0.0011178457631656768 +388 40 negative_sampler.num_negs_per_pos 66.0 +388 40 training.batch_size 2.0 +388 41 model.embedding_dim 2.0 +388 41 model.c_min 0.0756228934766262 +388 41 model.c_max 7.591402973959694 +388 41 loss.margin 21.414734710052876 +388 41 loss.adversarial_temperature 0.2411575865693274 +388 41 optimizer.lr 0.011873876581248501 +388 41 negative_sampler.num_negs_per_pos 82.0 +388 41 training.batch_size 1.0 +388 42 model.embedding_dim 0.0 +388 42 model.c_min 0.06735753487028684 +388 42 model.c_max 2.620741836968236 +388 42 loss.margin 16.760409296870634 +388 42 loss.adversarial_temperature 0.5296671787745347 +388 42 optimizer.lr 0.017916884775528233 +388 42 negative_sampler.num_negs_per_pos 87.0 +388 42 training.batch_size 0.0 +388 43 model.embedding_dim 2.0 +388 43 model.c_min 0.04539859415835546 +388 43 model.c_max 2.6530062236042258 +388 43 loss.margin 4.650229563957083 +388 43 loss.adversarial_temperature 0.9158395043113428 +388 43 optimizer.lr 0.0021143574442715205 +388 43 negative_sampler.num_negs_per_pos 30.0 +388 43 training.batch_size 0.0 +388 44 model.embedding_dim 0.0 +388 44 model.c_min 0.04849454957156433 +388 44 model.c_max 4.9237052702547786 +388 44 loss.margin 16.169304610721063 +388 44 loss.adversarial_temperature 0.8491635526538657 +388 44 optimizer.lr 0.0067809343367919855 +388 44 negative_sampler.num_negs_per_pos 30.0 +388 44 training.batch_size 2.0 +388 45 model.embedding_dim 0.0 +388 45 model.c_min 0.015693976950457832 +388 45 model.c_max 4.964978071488048 +388 45 loss.margin 6.791859065411273 +388 45 loss.adversarial_temperature 0.6886445075048117 +388 45 optimizer.lr 0.008186962353347782 +388 45 negative_sampler.num_negs_per_pos 98.0 +388 45 training.batch_size 0.0 +388 46 model.embedding_dim 0.0 +388 46 model.c_min 0.014163910711426395 +388 46 model.c_max 7.458997776422975 +388 46 loss.margin 17.272528219967537 +388 46 loss.adversarial_temperature 0.7204213077080472 +388 46 optimizer.lr 0.0011131512454074595 +388 46 negative_sampler.num_negs_per_pos 33.0 +388 46 training.batch_size 1.0 +388 47 model.embedding_dim 0.0 +388 47 model.c_min 0.010824161602288307 +388 47 model.c_max 8.01901875383448 +388 47 loss.margin 14.560263361246061 +388 47 loss.adversarial_temperature 0.4146409001464828 +388 47 optimizer.lr 0.0447676581672346 +388 47 negative_sampler.num_negs_per_pos 0.0 +388 47 training.batch_size 1.0 +388 48 model.embedding_dim 1.0 +388 48 model.c_min 0.053789597283550636 +388 48 model.c_max 9.831784281706774 +388 48 loss.margin 22.318239300158744 +388 48 loss.adversarial_temperature 0.26464608062967904 +388 48 optimizer.lr 0.029581141592927965 +388 48 negative_sampler.num_negs_per_pos 74.0 +388 48 training.batch_size 1.0 +388 49 model.embedding_dim 0.0 +388 49 model.c_min 0.04293871941546413 +388 49 model.c_max 5.9018052887143275 +388 49 loss.margin 26.490559841716454 +388 49 loss.adversarial_temperature 0.9994164398718202 +388 49 optimizer.lr 0.05352905267200947 +388 49 negative_sampler.num_negs_per_pos 14.0 +388 49 training.batch_size 2.0 +388 50 model.embedding_dim 0.0 +388 50 model.c_min 0.02938671419591684 +388 50 model.c_max 6.769280186700623 +388 50 loss.margin 4.411055163483036 +388 50 loss.adversarial_temperature 0.7926500750283155 +388 50 optimizer.lr 0.0016832187922946205 +388 50 negative_sampler.num_negs_per_pos 80.0 +388 50 training.batch_size 1.0 +388 51 model.embedding_dim 1.0 +388 51 model.c_min 0.03505365566494559 +388 51 model.c_max 6.492040782796106 +388 51 loss.margin 9.845000002674732 +388 51 loss.adversarial_temperature 0.7405449582584454 +388 51 optimizer.lr 0.0024321666419244666 +388 51 negative_sampler.num_negs_per_pos 54.0 +388 51 training.batch_size 1.0 +388 52 model.embedding_dim 1.0 +388 52 model.c_min 0.07228031047872283 +388 52 model.c_max 4.591552620507129 +388 52 loss.margin 15.949005436782985 +388 52 loss.adversarial_temperature 0.38191681457418414 +388 52 optimizer.lr 0.027489673166577976 +388 52 negative_sampler.num_negs_per_pos 62.0 +388 52 training.batch_size 0.0 +388 53 model.embedding_dim 1.0 +388 53 model.c_min 0.04104277290537827 +388 53 model.c_max 2.751508705410469 +388 53 loss.margin 22.38650936760814 +388 53 loss.adversarial_temperature 0.11801710602872584 +388 53 optimizer.lr 0.01149100061118254 +388 53 negative_sampler.num_negs_per_pos 89.0 +388 53 training.batch_size 0.0 +388 54 model.embedding_dim 2.0 +388 54 model.c_min 0.09548083548343056 +388 54 model.c_max 4.022280712963223 +388 54 loss.margin 9.599235917327086 +388 54 loss.adversarial_temperature 0.49974387814113597 +388 54 optimizer.lr 0.0011678726816605255 +388 54 negative_sampler.num_negs_per_pos 76.0 +388 54 training.batch_size 2.0 +388 55 model.embedding_dim 1.0 +388 55 model.c_min 0.019521213560783294 +388 55 model.c_max 5.587897605465149 +388 55 loss.margin 4.667279370642223 +388 55 loss.adversarial_temperature 0.632319067424192 +388 55 optimizer.lr 0.0021182007217227373 +388 55 negative_sampler.num_negs_per_pos 69.0 +388 55 training.batch_size 0.0 +388 56 model.embedding_dim 2.0 +388 56 model.c_min 0.03780784538549166 +388 56 model.c_max 3.539092847742533 +388 56 loss.margin 4.0375083321142 +388 56 loss.adversarial_temperature 0.4833110238100654 +388 56 optimizer.lr 0.035754782142432114 +388 56 negative_sampler.num_negs_per_pos 23.0 +388 56 training.batch_size 0.0 +388 57 model.embedding_dim 0.0 +388 57 model.c_min 0.09729800509139072 +388 57 model.c_max 5.759607584920464 +388 57 loss.margin 11.955889213420681 +388 57 loss.adversarial_temperature 0.7477653577230036 +388 57 optimizer.lr 0.001158174488875466 +388 57 negative_sampler.num_negs_per_pos 74.0 +388 57 training.batch_size 1.0 +388 58 model.embedding_dim 0.0 +388 58 model.c_min 0.012138090921921483 +388 58 model.c_max 6.644608942528112 +388 58 loss.margin 29.039560288095 +388 58 loss.adversarial_temperature 0.22917367599921154 +388 58 optimizer.lr 0.08151922517746568 +388 58 negative_sampler.num_negs_per_pos 66.0 +388 58 training.batch_size 2.0 +388 59 model.embedding_dim 2.0 +388 59 model.c_min 0.018045068266781126 +388 59 model.c_max 4.467202169260534 +388 59 loss.margin 26.507893684059106 +388 59 loss.adversarial_temperature 0.7613440420933509 +388 59 optimizer.lr 0.019024672420148995 +388 59 negative_sampler.num_negs_per_pos 22.0 +388 59 training.batch_size 1.0 +388 60 model.embedding_dim 1.0 +388 60 model.c_min 0.01284421779047404 +388 60 model.c_max 6.664573370761772 +388 60 loss.margin 10.625784484542857 +388 60 loss.adversarial_temperature 0.7099418818725788 +388 60 optimizer.lr 0.026550682524528815 +388 60 negative_sampler.num_negs_per_pos 95.0 +388 60 training.batch_size 0.0 +388 61 model.embedding_dim 0.0 +388 61 model.c_min 0.09812991137429032 +388 61 model.c_max 8.722932804249702 +388 61 loss.margin 5.061620900385217 +388 61 loss.adversarial_temperature 0.35527416253519817 +388 61 optimizer.lr 0.0047645467946784105 +388 61 negative_sampler.num_negs_per_pos 62.0 +388 61 training.batch_size 1.0 +388 62 model.embedding_dim 0.0 +388 62 model.c_min 0.014233538409325272 +388 62 model.c_max 4.108252167749974 +388 62 loss.margin 19.46928376092912 +388 62 loss.adversarial_temperature 0.8014499170052565 +388 62 optimizer.lr 0.018077061867642102 +388 62 negative_sampler.num_negs_per_pos 40.0 +388 62 training.batch_size 0.0 +388 63 model.embedding_dim 0.0 +388 63 model.c_min 0.04023019965027201 +388 63 model.c_max 3.648298070231929 +388 63 loss.margin 8.086029384717687 +388 63 loss.adversarial_temperature 0.8305814357118451 +388 63 optimizer.lr 0.04746325944801047 +388 63 negative_sampler.num_negs_per_pos 4.0 +388 63 training.batch_size 2.0 +388 64 model.embedding_dim 2.0 +388 64 model.c_min 0.06942132018300583 +388 64 model.c_max 8.478080008436692 +388 64 loss.margin 2.9240682613008673 +388 64 loss.adversarial_temperature 0.48049275413552484 +388 64 optimizer.lr 0.06962427414482021 +388 64 negative_sampler.num_negs_per_pos 64.0 +388 64 training.batch_size 2.0 +388 65 model.embedding_dim 0.0 +388 65 model.c_min 0.010695829562020599 +388 65 model.c_max 5.0207854793434255 +388 65 loss.margin 17.71751824523022 +388 65 loss.adversarial_temperature 0.5931255607384699 +388 65 optimizer.lr 0.06722820050529689 +388 65 negative_sampler.num_negs_per_pos 39.0 +388 65 training.batch_size 1.0 +388 66 model.embedding_dim 0.0 +388 66 model.c_min 0.09273437269798618 +388 66 model.c_max 5.266752613070366 +388 66 loss.margin 9.013110484243509 +388 66 loss.adversarial_temperature 0.7057462384651114 +388 66 optimizer.lr 0.008681999725623805 +388 66 negative_sampler.num_negs_per_pos 50.0 +388 66 training.batch_size 2.0 +388 67 model.embedding_dim 0.0 +388 67 model.c_min 0.016815242426916795 +388 67 model.c_max 2.480498136186828 +388 67 loss.margin 25.345067465049258 +388 67 loss.adversarial_temperature 0.19581912652335173 +388 67 optimizer.lr 0.005363936749056188 +388 67 negative_sampler.num_negs_per_pos 41.0 +388 67 training.batch_size 2.0 +388 68 model.embedding_dim 2.0 +388 68 model.c_min 0.05436672793305105 +388 68 model.c_max 6.725769908809018 +388 68 loss.margin 21.76525893224723 +388 68 loss.adversarial_temperature 0.6131359460696649 +388 68 optimizer.lr 0.03715703114135622 +388 68 negative_sampler.num_negs_per_pos 14.0 +388 68 training.batch_size 1.0 +388 69 model.embedding_dim 1.0 +388 69 model.c_min 0.010828844920503374 +388 69 model.c_max 8.511484528303733 +388 69 loss.margin 18.52863762425082 +388 69 loss.adversarial_temperature 0.5165592816850457 +388 69 optimizer.lr 0.0027392477281484857 +388 69 negative_sampler.num_negs_per_pos 14.0 +388 69 training.batch_size 2.0 +388 70 model.embedding_dim 0.0 +388 70 model.c_min 0.07821269335538901 +388 70 model.c_max 5.5767773759348405 +388 70 loss.margin 17.536258747853335 +388 70 loss.adversarial_temperature 0.7070835370691885 +388 70 optimizer.lr 0.026525834675111784 +388 70 negative_sampler.num_negs_per_pos 48.0 +388 70 training.batch_size 2.0 +388 71 model.embedding_dim 1.0 +388 71 model.c_min 0.05019452066319095 +388 71 model.c_max 8.939386241321115 +388 71 loss.margin 8.142532987915262 +388 71 loss.adversarial_temperature 0.9003773784561133 +388 71 optimizer.lr 0.0023797973902784397 +388 71 negative_sampler.num_negs_per_pos 76.0 +388 71 training.batch_size 1.0 +388 72 model.embedding_dim 0.0 +388 72 model.c_min 0.01666747769137068 +388 72 model.c_max 4.231806624445631 +388 72 loss.margin 2.5693161091409333 +388 72 loss.adversarial_temperature 0.9137343633956226 +388 72 optimizer.lr 0.05308804134298039 +388 72 negative_sampler.num_negs_per_pos 98.0 +388 72 training.batch_size 1.0 +388 73 model.embedding_dim 1.0 +388 73 model.c_min 0.04342371002204828 +388 73 model.c_max 6.991303926723709 +388 73 loss.margin 1.356688110114514 +388 73 loss.adversarial_temperature 0.5392504450392108 +388 73 optimizer.lr 0.08229186222337585 +388 73 negative_sampler.num_negs_per_pos 98.0 +388 73 training.batch_size 1.0 +388 74 model.embedding_dim 0.0 +388 74 model.c_min 0.06809002079495373 +388 74 model.c_max 3.2907366211597924 +388 74 loss.margin 22.376418110787892 +388 74 loss.adversarial_temperature 0.13471769564183247 +388 74 optimizer.lr 0.05030610397016409 +388 74 negative_sampler.num_negs_per_pos 57.0 +388 74 training.batch_size 0.0 +388 75 model.embedding_dim 1.0 +388 75 model.c_min 0.01801248833610072 +388 75 model.c_max 6.325172404128869 +388 75 loss.margin 26.720352572879815 +388 75 loss.adversarial_temperature 0.8363428856063221 +388 75 optimizer.lr 0.048473737320848954 +388 75 negative_sampler.num_negs_per_pos 91.0 +388 75 training.batch_size 1.0 +388 76 model.embedding_dim 0.0 +388 76 model.c_min 0.05258965370096187 +388 76 model.c_max 9.90957417280039 +388 76 loss.margin 5.9499262746258665 +388 76 loss.adversarial_temperature 0.6376065637173176 +388 76 optimizer.lr 0.00844894513221928 +388 76 negative_sampler.num_negs_per_pos 36.0 +388 76 training.batch_size 0.0 +388 77 model.embedding_dim 2.0 +388 77 model.c_min 0.06770772760787311 +388 77 model.c_max 3.1282543227799016 +388 77 loss.margin 13.787906172002875 +388 77 loss.adversarial_temperature 0.8260325787464804 +388 77 optimizer.lr 0.01882767010710329 +388 77 negative_sampler.num_negs_per_pos 28.0 +388 77 training.batch_size 0.0 +388 78 model.embedding_dim 2.0 +388 78 model.c_min 0.0210940913313083 +388 78 model.c_max 8.017585464398156 +388 78 loss.margin 4.103235891219542 +388 78 loss.adversarial_temperature 0.6877717829005006 +388 78 optimizer.lr 0.030688217577324678 +388 78 negative_sampler.num_negs_per_pos 85.0 +388 78 training.batch_size 2.0 +388 79 model.embedding_dim 1.0 +388 79 model.c_min 0.027110497289412602 +388 79 model.c_max 1.649167789208942 +388 79 loss.margin 3.750479878686834 +388 79 loss.adversarial_temperature 0.3249458545058788 +388 79 optimizer.lr 0.03119821470804933 +388 79 negative_sampler.num_negs_per_pos 55.0 +388 79 training.batch_size 2.0 +388 80 model.embedding_dim 2.0 +388 80 model.c_min 0.05087403419602083 +388 80 model.c_max 2.9032438790214434 +388 80 loss.margin 15.680182972666605 +388 80 loss.adversarial_temperature 0.6563538701297437 +388 80 optimizer.lr 0.05656241818910645 +388 80 negative_sampler.num_negs_per_pos 61.0 +388 80 training.batch_size 2.0 +388 81 model.embedding_dim 2.0 +388 81 model.c_min 0.02433595219908997 +388 81 model.c_max 9.043215492914472 +388 81 loss.margin 13.70552483499412 +388 81 loss.adversarial_temperature 0.3153721874474116 +388 81 optimizer.lr 0.009608604221829133 +388 81 negative_sampler.num_negs_per_pos 73.0 +388 81 training.batch_size 2.0 +388 82 model.embedding_dim 2.0 +388 82 model.c_min 0.013527826085548285 +388 82 model.c_max 3.205083477768323 +388 82 loss.margin 25.411787361160286 +388 82 loss.adversarial_temperature 0.9150055427531232 +388 82 optimizer.lr 0.03934360124196003 +388 82 negative_sampler.num_negs_per_pos 87.0 +388 82 training.batch_size 2.0 +388 83 model.embedding_dim 2.0 +388 83 model.c_min 0.07699409979764126 +388 83 model.c_max 1.135989003958928 +388 83 loss.margin 1.8734414665948211 +388 83 loss.adversarial_temperature 0.1752446981430883 +388 83 optimizer.lr 0.06801293816396306 +388 83 negative_sampler.num_negs_per_pos 54.0 +388 83 training.batch_size 2.0 +388 84 model.embedding_dim 0.0 +388 84 model.c_min 0.010529565793846741 +388 84 model.c_max 2.0650457204579373 +388 84 loss.margin 13.450370204178062 +388 84 loss.adversarial_temperature 0.5096706836194106 +388 84 optimizer.lr 0.004013580988131674 +388 84 negative_sampler.num_negs_per_pos 8.0 +388 84 training.batch_size 2.0 +388 85 model.embedding_dim 2.0 +388 85 model.c_min 0.052144695992660674 +388 85 model.c_max 3.845945520591762 +388 85 loss.margin 14.322815497748312 +388 85 loss.adversarial_temperature 0.5349534316320539 +388 85 optimizer.lr 0.005172406374263072 +388 85 negative_sampler.num_negs_per_pos 8.0 +388 85 training.batch_size 0.0 +388 86 model.embedding_dim 2.0 +388 86 model.c_min 0.010188545633371893 +388 86 model.c_max 6.268297477368251 +388 86 loss.margin 18.621622447796835 +388 86 loss.adversarial_temperature 0.30824004819980666 +388 86 optimizer.lr 0.027950443417106372 +388 86 negative_sampler.num_negs_per_pos 80.0 +388 86 training.batch_size 2.0 +388 87 model.embedding_dim 0.0 +388 87 model.c_min 0.02051128061608643 +388 87 model.c_max 8.813451216171597 +388 87 loss.margin 7.121879039516706 +388 87 loss.adversarial_temperature 0.42703044952709324 +388 87 optimizer.lr 0.037371746588458335 +388 87 negative_sampler.num_negs_per_pos 95.0 +388 87 training.batch_size 1.0 +388 88 model.embedding_dim 2.0 +388 88 model.c_min 0.012040610436806745 +388 88 model.c_max 9.80568856980336 +388 88 loss.margin 21.823734700689094 +388 88 loss.adversarial_temperature 0.2287049771568155 +388 88 optimizer.lr 0.00256141275924579 +388 88 negative_sampler.num_negs_per_pos 53.0 +388 88 training.batch_size 2.0 +388 89 model.embedding_dim 0.0 +388 89 model.c_min 0.02182216546529702 +388 89 model.c_max 6.883110361478722 +388 89 loss.margin 20.620510398564804 +388 89 loss.adversarial_temperature 0.5416792741109557 +388 89 optimizer.lr 0.05109015628006054 +388 89 negative_sampler.num_negs_per_pos 96.0 +388 89 training.batch_size 0.0 +388 90 model.embedding_dim 1.0 +388 90 model.c_min 0.044991071062664804 +388 90 model.c_max 5.6360648790063586 +388 90 loss.margin 9.835900710853304 +388 90 loss.adversarial_temperature 0.5834634541048042 +388 90 optimizer.lr 0.08720710655404253 +388 90 negative_sampler.num_negs_per_pos 41.0 +388 90 training.batch_size 0.0 +388 91 model.embedding_dim 1.0 +388 91 model.c_min 0.06888900490452755 +388 91 model.c_max 5.0888106172922685 +388 91 loss.margin 28.95877005196095 +388 91 loss.adversarial_temperature 0.6668934753499975 +388 91 optimizer.lr 0.05348871750901822 +388 91 negative_sampler.num_negs_per_pos 10.0 +388 91 training.batch_size 0.0 +388 92 model.embedding_dim 0.0 +388 92 model.c_min 0.031528939286447934 +388 92 model.c_max 7.903250842972922 +388 92 loss.margin 17.24157847549952 +388 92 loss.adversarial_temperature 0.7410471257664885 +388 92 optimizer.lr 0.001099659961296947 +388 92 negative_sampler.num_negs_per_pos 74.0 +388 92 training.batch_size 2.0 +388 93 model.embedding_dim 1.0 +388 93 model.c_min 0.06671158238672031 +388 93 model.c_max 9.365487616874368 +388 93 loss.margin 1.9445737172845874 +388 93 loss.adversarial_temperature 0.14230333417941282 +388 93 optimizer.lr 0.018948420581289867 +388 93 negative_sampler.num_negs_per_pos 15.0 +388 93 training.batch_size 0.0 +388 94 model.embedding_dim 0.0 +388 94 model.c_min 0.08962519088943416 +388 94 model.c_max 4.326976309042576 +388 94 loss.margin 13.211961145889196 +388 94 loss.adversarial_temperature 0.466030310816795 +388 94 optimizer.lr 0.08665585254932129 +388 94 negative_sampler.num_negs_per_pos 91.0 +388 94 training.batch_size 1.0 +388 95 model.embedding_dim 1.0 +388 95 model.c_min 0.05475455887902965 +388 95 model.c_max 4.176604236061165 +388 95 loss.margin 23.757426461830185 +388 95 loss.adversarial_temperature 0.845160965889924 +388 95 optimizer.lr 0.0022960543154664226 +388 95 negative_sampler.num_negs_per_pos 55.0 +388 95 training.batch_size 2.0 +388 96 model.embedding_dim 0.0 +388 96 model.c_min 0.012818268602593583 +388 96 model.c_max 6.637188937455502 +388 96 loss.margin 15.156341736515852 +388 96 loss.adversarial_temperature 0.31865675837945495 +388 96 optimizer.lr 0.010546936630871395 +388 96 negative_sampler.num_negs_per_pos 26.0 +388 96 training.batch_size 1.0 +388 97 model.embedding_dim 1.0 +388 97 model.c_min 0.03172192131511506 +388 97 model.c_max 4.6060704193864 +388 97 loss.margin 6.808559969285633 +388 97 loss.adversarial_temperature 0.49121190056749264 +388 97 optimizer.lr 0.010615965198298372 +388 97 negative_sampler.num_negs_per_pos 77.0 +388 97 training.batch_size 1.0 +388 98 model.embedding_dim 2.0 +388 98 model.c_min 0.010509256591815867 +388 98 model.c_max 5.5669000204560035 +388 98 loss.margin 9.47467194299378 +388 98 loss.adversarial_temperature 0.33240187190080306 +388 98 optimizer.lr 0.027922057356831457 +388 98 negative_sampler.num_negs_per_pos 79.0 +388 98 training.batch_size 1.0 +388 99 model.embedding_dim 0.0 +388 99 model.c_min 0.042230473651607224 +388 99 model.c_max 7.9036971780785965 +388 99 loss.margin 23.829836405653843 +388 99 loss.adversarial_temperature 0.31158768367175105 +388 99 optimizer.lr 0.0064600789749242995 +388 99 negative_sampler.num_negs_per_pos 66.0 +388 99 training.batch_size 1.0 +388 100 model.embedding_dim 2.0 +388 100 model.c_min 0.012645794236397815 +388 100 model.c_max 9.989124914019099 +388 100 loss.margin 15.703572636984758 +388 100 loss.adversarial_temperature 0.3525379520759695 +388 100 optimizer.lr 0.014821046281987896 +388 100 negative_sampler.num_negs_per_pos 73.0 +388 100 training.batch_size 2.0 +388 1 dataset """kinships""" +388 1 model """kg2e""" +388 1 loss """nssa""" +388 1 regularizer """no""" +388 1 optimizer """adam""" +388 1 training_loop """owa""" +388 1 negative_sampler """basic""" +388 1 evaluator """rankbased""" +388 2 dataset """kinships""" +388 2 model """kg2e""" +388 2 loss """nssa""" +388 2 regularizer """no""" +388 2 optimizer """adam""" +388 2 training_loop """owa""" +388 2 negative_sampler """basic""" +388 2 evaluator """rankbased""" +388 3 dataset """kinships""" +388 3 model """kg2e""" +388 3 loss """nssa""" +388 3 regularizer """no""" +388 3 optimizer """adam""" +388 3 training_loop """owa""" +388 3 negative_sampler """basic""" +388 3 evaluator """rankbased""" +388 4 dataset """kinships""" +388 4 model """kg2e""" +388 4 loss """nssa""" +388 4 regularizer """no""" +388 4 optimizer """adam""" +388 4 training_loop """owa""" +388 4 negative_sampler """basic""" +388 4 evaluator """rankbased""" +388 5 dataset """kinships""" +388 5 model """kg2e""" +388 5 loss """nssa""" +388 5 regularizer """no""" +388 5 optimizer """adam""" +388 5 training_loop """owa""" +388 5 negative_sampler """basic""" +388 5 evaluator """rankbased""" +388 6 dataset """kinships""" +388 6 model """kg2e""" +388 6 loss """nssa""" +388 6 regularizer """no""" +388 6 optimizer """adam""" +388 6 training_loop """owa""" +388 6 negative_sampler """basic""" +388 6 evaluator """rankbased""" +388 7 dataset """kinships""" +388 7 model """kg2e""" +388 7 loss """nssa""" +388 7 regularizer """no""" +388 7 optimizer """adam""" +388 7 training_loop """owa""" +388 7 negative_sampler """basic""" +388 7 evaluator """rankbased""" +388 8 dataset """kinships""" +388 8 model """kg2e""" +388 8 loss """nssa""" +388 8 regularizer """no""" +388 8 optimizer """adam""" +388 8 training_loop """owa""" +388 8 negative_sampler """basic""" +388 8 evaluator """rankbased""" +388 9 dataset """kinships""" +388 9 model """kg2e""" +388 9 loss """nssa""" +388 9 regularizer """no""" +388 9 optimizer """adam""" +388 9 training_loop """owa""" +388 9 negative_sampler """basic""" +388 9 evaluator """rankbased""" +388 10 dataset """kinships""" +388 10 model """kg2e""" +388 10 loss """nssa""" +388 10 regularizer """no""" +388 10 optimizer """adam""" +388 10 training_loop """owa""" +388 10 negative_sampler """basic""" +388 10 evaluator """rankbased""" +388 11 dataset """kinships""" +388 11 model """kg2e""" +388 11 loss """nssa""" +388 11 regularizer """no""" +388 11 optimizer """adam""" +388 11 training_loop """owa""" +388 11 negative_sampler """basic""" +388 11 evaluator """rankbased""" +388 12 dataset """kinships""" +388 12 model """kg2e""" +388 12 loss """nssa""" +388 12 regularizer """no""" +388 12 optimizer """adam""" +388 12 training_loop """owa""" +388 12 negative_sampler """basic""" +388 12 evaluator """rankbased""" +388 13 dataset """kinships""" +388 13 model """kg2e""" +388 13 loss """nssa""" +388 13 regularizer """no""" +388 13 optimizer """adam""" +388 13 training_loop """owa""" +388 13 negative_sampler """basic""" +388 13 evaluator """rankbased""" +388 14 dataset """kinships""" +388 14 model """kg2e""" +388 14 loss """nssa""" +388 14 regularizer """no""" +388 14 optimizer """adam""" +388 14 training_loop """owa""" +388 14 negative_sampler """basic""" +388 14 evaluator """rankbased""" +388 15 dataset """kinships""" +388 15 model """kg2e""" +388 15 loss """nssa""" +388 15 regularizer """no""" +388 15 optimizer """adam""" +388 15 training_loop """owa""" +388 15 negative_sampler """basic""" +388 15 evaluator """rankbased""" +388 16 dataset """kinships""" +388 16 model """kg2e""" +388 16 loss """nssa""" +388 16 regularizer """no""" +388 16 optimizer """adam""" +388 16 training_loop """owa""" +388 16 negative_sampler """basic""" +388 16 evaluator """rankbased""" +388 17 dataset """kinships""" +388 17 model """kg2e""" +388 17 loss """nssa""" +388 17 regularizer """no""" +388 17 optimizer """adam""" +388 17 training_loop """owa""" +388 17 negative_sampler """basic""" +388 17 evaluator """rankbased""" +388 18 dataset """kinships""" +388 18 model """kg2e""" +388 18 loss """nssa""" +388 18 regularizer """no""" +388 18 optimizer """adam""" +388 18 training_loop """owa""" +388 18 negative_sampler """basic""" +388 18 evaluator """rankbased""" +388 19 dataset """kinships""" +388 19 model """kg2e""" +388 19 loss """nssa""" +388 19 regularizer """no""" +388 19 optimizer """adam""" +388 19 training_loop """owa""" +388 19 negative_sampler """basic""" +388 19 evaluator """rankbased""" +388 20 dataset """kinships""" +388 20 model """kg2e""" +388 20 loss """nssa""" +388 20 regularizer """no""" +388 20 optimizer """adam""" +388 20 training_loop """owa""" +388 20 negative_sampler """basic""" +388 20 evaluator """rankbased""" +388 21 dataset """kinships""" +388 21 model """kg2e""" +388 21 loss """nssa""" +388 21 regularizer """no""" +388 21 optimizer """adam""" +388 21 training_loop """owa""" +388 21 negative_sampler """basic""" +388 21 evaluator """rankbased""" +388 22 dataset """kinships""" +388 22 model """kg2e""" +388 22 loss """nssa""" +388 22 regularizer """no""" +388 22 optimizer """adam""" +388 22 training_loop """owa""" +388 22 negative_sampler """basic""" +388 22 evaluator """rankbased""" +388 23 dataset """kinships""" +388 23 model """kg2e""" +388 23 loss """nssa""" +388 23 regularizer """no""" +388 23 optimizer """adam""" +388 23 training_loop """owa""" +388 23 negative_sampler """basic""" +388 23 evaluator """rankbased""" +388 24 dataset """kinships""" +388 24 model """kg2e""" +388 24 loss """nssa""" +388 24 regularizer """no""" +388 24 optimizer """adam""" +388 24 training_loop """owa""" +388 24 negative_sampler """basic""" +388 24 evaluator """rankbased""" +388 25 dataset """kinships""" +388 25 model """kg2e""" +388 25 loss """nssa""" +388 25 regularizer """no""" +388 25 optimizer """adam""" +388 25 training_loop """owa""" +388 25 negative_sampler """basic""" +388 25 evaluator """rankbased""" +388 26 dataset """kinships""" +388 26 model """kg2e""" +388 26 loss """nssa""" +388 26 regularizer """no""" +388 26 optimizer """adam""" +388 26 training_loop """owa""" +388 26 negative_sampler """basic""" +388 26 evaluator """rankbased""" +388 27 dataset """kinships""" +388 27 model """kg2e""" +388 27 loss """nssa""" +388 27 regularizer """no""" +388 27 optimizer """adam""" +388 27 training_loop """owa""" +388 27 negative_sampler """basic""" +388 27 evaluator """rankbased""" +388 28 dataset """kinships""" +388 28 model """kg2e""" +388 28 loss """nssa""" +388 28 regularizer """no""" +388 28 optimizer """adam""" +388 28 training_loop """owa""" +388 28 negative_sampler """basic""" +388 28 evaluator """rankbased""" +388 29 dataset """kinships""" +388 29 model """kg2e""" +388 29 loss """nssa""" +388 29 regularizer """no""" +388 29 optimizer """adam""" +388 29 training_loop """owa""" +388 29 negative_sampler """basic""" +388 29 evaluator """rankbased""" +388 30 dataset """kinships""" +388 30 model """kg2e""" +388 30 loss """nssa""" +388 30 regularizer """no""" +388 30 optimizer """adam""" +388 30 training_loop """owa""" +388 30 negative_sampler """basic""" +388 30 evaluator """rankbased""" +388 31 dataset """kinships""" +388 31 model """kg2e""" +388 31 loss """nssa""" +388 31 regularizer """no""" +388 31 optimizer """adam""" +388 31 training_loop """owa""" +388 31 negative_sampler """basic""" +388 31 evaluator """rankbased""" +388 32 dataset """kinships""" +388 32 model """kg2e""" +388 32 loss """nssa""" +388 32 regularizer """no""" +388 32 optimizer """adam""" +388 32 training_loop """owa""" +388 32 negative_sampler """basic""" +388 32 evaluator """rankbased""" +388 33 dataset """kinships""" +388 33 model """kg2e""" +388 33 loss """nssa""" +388 33 regularizer """no""" +388 33 optimizer """adam""" +388 33 training_loop """owa""" +388 33 negative_sampler """basic""" +388 33 evaluator """rankbased""" +388 34 dataset """kinships""" +388 34 model """kg2e""" +388 34 loss """nssa""" +388 34 regularizer """no""" +388 34 optimizer """adam""" +388 34 training_loop """owa""" +388 34 negative_sampler """basic""" +388 34 evaluator """rankbased""" +388 35 dataset """kinships""" +388 35 model """kg2e""" +388 35 loss """nssa""" +388 35 regularizer """no""" +388 35 optimizer """adam""" +388 35 training_loop """owa""" +388 35 negative_sampler """basic""" +388 35 evaluator """rankbased""" +388 36 dataset """kinships""" +388 36 model """kg2e""" +388 36 loss """nssa""" +388 36 regularizer """no""" +388 36 optimizer """adam""" +388 36 training_loop """owa""" +388 36 negative_sampler """basic""" +388 36 evaluator """rankbased""" +388 37 dataset """kinships""" +388 37 model """kg2e""" +388 37 loss """nssa""" +388 37 regularizer """no""" +388 37 optimizer """adam""" +388 37 training_loop """owa""" +388 37 negative_sampler """basic""" +388 37 evaluator """rankbased""" +388 38 dataset """kinships""" +388 38 model """kg2e""" +388 38 loss """nssa""" +388 38 regularizer """no""" +388 38 optimizer """adam""" +388 38 training_loop """owa""" +388 38 negative_sampler """basic""" +388 38 evaluator """rankbased""" +388 39 dataset """kinships""" +388 39 model """kg2e""" +388 39 loss """nssa""" +388 39 regularizer """no""" +388 39 optimizer """adam""" +388 39 training_loop """owa""" +388 39 negative_sampler """basic""" +388 39 evaluator """rankbased""" +388 40 dataset """kinships""" +388 40 model """kg2e""" +388 40 loss """nssa""" +388 40 regularizer """no""" +388 40 optimizer """adam""" +388 40 training_loop """owa""" +388 40 negative_sampler """basic""" +388 40 evaluator """rankbased""" +388 41 dataset """kinships""" +388 41 model """kg2e""" +388 41 loss """nssa""" +388 41 regularizer """no""" +388 41 optimizer """adam""" +388 41 training_loop """owa""" +388 41 negative_sampler """basic""" +388 41 evaluator """rankbased""" +388 42 dataset """kinships""" +388 42 model """kg2e""" +388 42 loss """nssa""" +388 42 regularizer """no""" +388 42 optimizer """adam""" +388 42 training_loop """owa""" +388 42 negative_sampler """basic""" +388 42 evaluator """rankbased""" +388 43 dataset """kinships""" +388 43 model """kg2e""" +388 43 loss """nssa""" +388 43 regularizer """no""" +388 43 optimizer """adam""" +388 43 training_loop """owa""" +388 43 negative_sampler """basic""" +388 43 evaluator """rankbased""" +388 44 dataset """kinships""" +388 44 model """kg2e""" +388 44 loss """nssa""" +388 44 regularizer """no""" +388 44 optimizer """adam""" +388 44 training_loop """owa""" +388 44 negative_sampler """basic""" +388 44 evaluator """rankbased""" +388 45 dataset """kinships""" +388 45 model """kg2e""" +388 45 loss """nssa""" +388 45 regularizer """no""" +388 45 optimizer """adam""" +388 45 training_loop """owa""" +388 45 negative_sampler """basic""" +388 45 evaluator """rankbased""" +388 46 dataset """kinships""" +388 46 model """kg2e""" +388 46 loss """nssa""" +388 46 regularizer """no""" +388 46 optimizer """adam""" +388 46 training_loop """owa""" +388 46 negative_sampler """basic""" +388 46 evaluator """rankbased""" +388 47 dataset """kinships""" +388 47 model """kg2e""" +388 47 loss """nssa""" +388 47 regularizer """no""" +388 47 optimizer """adam""" +388 47 training_loop """owa""" +388 47 negative_sampler """basic""" +388 47 evaluator """rankbased""" +388 48 dataset """kinships""" +388 48 model """kg2e""" +388 48 loss """nssa""" +388 48 regularizer """no""" +388 48 optimizer """adam""" +388 48 training_loop """owa""" +388 48 negative_sampler """basic""" +388 48 evaluator """rankbased""" +388 49 dataset """kinships""" +388 49 model """kg2e""" +388 49 loss """nssa""" +388 49 regularizer """no""" +388 49 optimizer """adam""" +388 49 training_loop """owa""" +388 49 negative_sampler """basic""" +388 49 evaluator """rankbased""" +388 50 dataset """kinships""" +388 50 model """kg2e""" +388 50 loss """nssa""" +388 50 regularizer """no""" +388 50 optimizer """adam""" +388 50 training_loop """owa""" +388 50 negative_sampler """basic""" +388 50 evaluator """rankbased""" +388 51 dataset """kinships""" +388 51 model """kg2e""" +388 51 loss """nssa""" +388 51 regularizer """no""" +388 51 optimizer """adam""" +388 51 training_loop """owa""" +388 51 negative_sampler """basic""" +388 51 evaluator """rankbased""" +388 52 dataset """kinships""" +388 52 model """kg2e""" +388 52 loss """nssa""" +388 52 regularizer """no""" +388 52 optimizer """adam""" +388 52 training_loop """owa""" +388 52 negative_sampler """basic""" +388 52 evaluator """rankbased""" +388 53 dataset """kinships""" +388 53 model """kg2e""" +388 53 loss """nssa""" +388 53 regularizer """no""" +388 53 optimizer """adam""" +388 53 training_loop """owa""" +388 53 negative_sampler """basic""" +388 53 evaluator """rankbased""" +388 54 dataset """kinships""" +388 54 model """kg2e""" +388 54 loss """nssa""" +388 54 regularizer """no""" +388 54 optimizer """adam""" +388 54 training_loop """owa""" +388 54 negative_sampler """basic""" +388 54 evaluator """rankbased""" +388 55 dataset """kinships""" +388 55 model """kg2e""" +388 55 loss """nssa""" +388 55 regularizer """no""" +388 55 optimizer """adam""" +388 55 training_loop """owa""" +388 55 negative_sampler """basic""" +388 55 evaluator """rankbased""" +388 56 dataset """kinships""" +388 56 model """kg2e""" +388 56 loss """nssa""" +388 56 regularizer """no""" +388 56 optimizer """adam""" +388 56 training_loop """owa""" +388 56 negative_sampler """basic""" +388 56 evaluator """rankbased""" +388 57 dataset """kinships""" +388 57 model """kg2e""" +388 57 loss """nssa""" +388 57 regularizer """no""" +388 57 optimizer """adam""" +388 57 training_loop """owa""" +388 57 negative_sampler """basic""" +388 57 evaluator """rankbased""" +388 58 dataset """kinships""" +388 58 model """kg2e""" +388 58 loss """nssa""" +388 58 regularizer """no""" +388 58 optimizer """adam""" +388 58 training_loop """owa""" +388 58 negative_sampler """basic""" +388 58 evaluator """rankbased""" +388 59 dataset """kinships""" +388 59 model """kg2e""" +388 59 loss """nssa""" +388 59 regularizer """no""" +388 59 optimizer """adam""" +388 59 training_loop """owa""" +388 59 negative_sampler """basic""" +388 59 evaluator """rankbased""" +388 60 dataset """kinships""" +388 60 model """kg2e""" +388 60 loss """nssa""" +388 60 regularizer """no""" +388 60 optimizer """adam""" +388 60 training_loop """owa""" +388 60 negative_sampler """basic""" +388 60 evaluator """rankbased""" +388 61 dataset """kinships""" +388 61 model """kg2e""" +388 61 loss """nssa""" +388 61 regularizer """no""" +388 61 optimizer """adam""" +388 61 training_loop """owa""" +388 61 negative_sampler """basic""" +388 61 evaluator """rankbased""" +388 62 dataset """kinships""" +388 62 model """kg2e""" +388 62 loss """nssa""" +388 62 regularizer """no""" +388 62 optimizer """adam""" +388 62 training_loop """owa""" +388 62 negative_sampler """basic""" +388 62 evaluator """rankbased""" +388 63 dataset """kinships""" +388 63 model """kg2e""" +388 63 loss """nssa""" +388 63 regularizer """no""" +388 63 optimizer """adam""" +388 63 training_loop """owa""" +388 63 negative_sampler """basic""" +388 63 evaluator """rankbased""" +388 64 dataset """kinships""" +388 64 model """kg2e""" +388 64 loss """nssa""" +388 64 regularizer """no""" +388 64 optimizer """adam""" +388 64 training_loop """owa""" +388 64 negative_sampler """basic""" +388 64 evaluator """rankbased""" +388 65 dataset """kinships""" +388 65 model """kg2e""" +388 65 loss """nssa""" +388 65 regularizer """no""" +388 65 optimizer """adam""" +388 65 training_loop """owa""" +388 65 negative_sampler """basic""" +388 65 evaluator """rankbased""" +388 66 dataset """kinships""" +388 66 model """kg2e""" +388 66 loss """nssa""" +388 66 regularizer """no""" +388 66 optimizer """adam""" +388 66 training_loop """owa""" +388 66 negative_sampler """basic""" +388 66 evaluator """rankbased""" +388 67 dataset """kinships""" +388 67 model """kg2e""" +388 67 loss """nssa""" +388 67 regularizer """no""" +388 67 optimizer """adam""" +388 67 training_loop """owa""" +388 67 negative_sampler """basic""" +388 67 evaluator """rankbased""" +388 68 dataset """kinships""" +388 68 model """kg2e""" +388 68 loss """nssa""" +388 68 regularizer """no""" +388 68 optimizer """adam""" +388 68 training_loop """owa""" +388 68 negative_sampler """basic""" +388 68 evaluator """rankbased""" +388 69 dataset """kinships""" +388 69 model """kg2e""" +388 69 loss """nssa""" +388 69 regularizer """no""" +388 69 optimizer """adam""" +388 69 training_loop """owa""" +388 69 negative_sampler """basic""" +388 69 evaluator """rankbased""" +388 70 dataset """kinships""" +388 70 model """kg2e""" +388 70 loss """nssa""" +388 70 regularizer """no""" +388 70 optimizer """adam""" +388 70 training_loop """owa""" +388 70 negative_sampler """basic""" +388 70 evaluator """rankbased""" +388 71 dataset """kinships""" +388 71 model """kg2e""" +388 71 loss """nssa""" +388 71 regularizer """no""" +388 71 optimizer """adam""" +388 71 training_loop """owa""" +388 71 negative_sampler """basic""" +388 71 evaluator """rankbased""" +388 72 dataset """kinships""" +388 72 model """kg2e""" +388 72 loss """nssa""" +388 72 regularizer """no""" +388 72 optimizer """adam""" +388 72 training_loop """owa""" +388 72 negative_sampler """basic""" +388 72 evaluator """rankbased""" +388 73 dataset """kinships""" +388 73 model """kg2e""" +388 73 loss """nssa""" +388 73 regularizer """no""" +388 73 optimizer """adam""" +388 73 training_loop """owa""" +388 73 negative_sampler """basic""" +388 73 evaluator """rankbased""" +388 74 dataset """kinships""" +388 74 model """kg2e""" +388 74 loss """nssa""" +388 74 regularizer """no""" +388 74 optimizer """adam""" +388 74 training_loop """owa""" +388 74 negative_sampler """basic""" +388 74 evaluator """rankbased""" +388 75 dataset """kinships""" +388 75 model """kg2e""" +388 75 loss """nssa""" +388 75 regularizer """no""" +388 75 optimizer """adam""" +388 75 training_loop """owa""" +388 75 negative_sampler """basic""" +388 75 evaluator """rankbased""" +388 76 dataset """kinships""" +388 76 model """kg2e""" +388 76 loss """nssa""" +388 76 regularizer """no""" +388 76 optimizer """adam""" +388 76 training_loop """owa""" +388 76 negative_sampler """basic""" +388 76 evaluator """rankbased""" +388 77 dataset """kinships""" +388 77 model """kg2e""" +388 77 loss """nssa""" +388 77 regularizer """no""" +388 77 optimizer """adam""" +388 77 training_loop """owa""" +388 77 negative_sampler """basic""" +388 77 evaluator """rankbased""" +388 78 dataset """kinships""" +388 78 model """kg2e""" +388 78 loss """nssa""" +388 78 regularizer """no""" +388 78 optimizer """adam""" +388 78 training_loop """owa""" +388 78 negative_sampler """basic""" +388 78 evaluator """rankbased""" +388 79 dataset """kinships""" +388 79 model """kg2e""" +388 79 loss """nssa""" +388 79 regularizer """no""" +388 79 optimizer """adam""" +388 79 training_loop """owa""" +388 79 negative_sampler """basic""" +388 79 evaluator """rankbased""" +388 80 dataset """kinships""" +388 80 model """kg2e""" +388 80 loss """nssa""" +388 80 regularizer """no""" +388 80 optimizer """adam""" +388 80 training_loop """owa""" +388 80 negative_sampler """basic""" +388 80 evaluator """rankbased""" +388 81 dataset """kinships""" +388 81 model """kg2e""" +388 81 loss """nssa""" +388 81 regularizer """no""" +388 81 optimizer """adam""" +388 81 training_loop """owa""" +388 81 negative_sampler """basic""" +388 81 evaluator """rankbased""" +388 82 dataset """kinships""" +388 82 model """kg2e""" +388 82 loss """nssa""" +388 82 regularizer """no""" +388 82 optimizer """adam""" +388 82 training_loop """owa""" +388 82 negative_sampler """basic""" +388 82 evaluator """rankbased""" +388 83 dataset """kinships""" +388 83 model """kg2e""" +388 83 loss """nssa""" +388 83 regularizer """no""" +388 83 optimizer """adam""" +388 83 training_loop """owa""" +388 83 negative_sampler """basic""" +388 83 evaluator """rankbased""" +388 84 dataset """kinships""" +388 84 model """kg2e""" +388 84 loss """nssa""" +388 84 regularizer """no""" +388 84 optimizer """adam""" +388 84 training_loop """owa""" +388 84 negative_sampler """basic""" +388 84 evaluator """rankbased""" +388 85 dataset """kinships""" +388 85 model """kg2e""" +388 85 loss """nssa""" +388 85 regularizer """no""" +388 85 optimizer """adam""" +388 85 training_loop """owa""" +388 85 negative_sampler """basic""" +388 85 evaluator """rankbased""" +388 86 dataset """kinships""" +388 86 model """kg2e""" +388 86 loss """nssa""" +388 86 regularizer """no""" +388 86 optimizer """adam""" +388 86 training_loop """owa""" +388 86 negative_sampler """basic""" +388 86 evaluator """rankbased""" +388 87 dataset """kinships""" +388 87 model """kg2e""" +388 87 loss """nssa""" +388 87 regularizer """no""" +388 87 optimizer """adam""" +388 87 training_loop """owa""" +388 87 negative_sampler """basic""" +388 87 evaluator """rankbased""" +388 88 dataset """kinships""" +388 88 model """kg2e""" +388 88 loss """nssa""" +388 88 regularizer """no""" +388 88 optimizer """adam""" +388 88 training_loop """owa""" +388 88 negative_sampler """basic""" +388 88 evaluator """rankbased""" +388 89 dataset """kinships""" +388 89 model """kg2e""" +388 89 loss """nssa""" +388 89 regularizer """no""" +388 89 optimizer """adam""" +388 89 training_loop """owa""" +388 89 negative_sampler """basic""" +388 89 evaluator """rankbased""" +388 90 dataset """kinships""" +388 90 model """kg2e""" +388 90 loss """nssa""" +388 90 regularizer """no""" +388 90 optimizer """adam""" +388 90 training_loop """owa""" +388 90 negative_sampler """basic""" +388 90 evaluator """rankbased""" +388 91 dataset """kinships""" +388 91 model """kg2e""" +388 91 loss """nssa""" +388 91 regularizer """no""" +388 91 optimizer """adam""" +388 91 training_loop """owa""" +388 91 negative_sampler """basic""" +388 91 evaluator """rankbased""" +388 92 dataset """kinships""" +388 92 model """kg2e""" +388 92 loss """nssa""" +388 92 regularizer """no""" +388 92 optimizer """adam""" +388 92 training_loop """owa""" +388 92 negative_sampler """basic""" +388 92 evaluator """rankbased""" +388 93 dataset """kinships""" +388 93 model """kg2e""" +388 93 loss """nssa""" +388 93 regularizer """no""" +388 93 optimizer """adam""" +388 93 training_loop """owa""" +388 93 negative_sampler """basic""" +388 93 evaluator """rankbased""" +388 94 dataset """kinships""" +388 94 model """kg2e""" +388 94 loss """nssa""" +388 94 regularizer """no""" +388 94 optimizer """adam""" +388 94 training_loop """owa""" +388 94 negative_sampler """basic""" +388 94 evaluator """rankbased""" +388 95 dataset """kinships""" +388 95 model """kg2e""" +388 95 loss """nssa""" +388 95 regularizer """no""" +388 95 optimizer """adam""" +388 95 training_loop """owa""" +388 95 negative_sampler """basic""" +388 95 evaluator """rankbased""" +388 96 dataset """kinships""" +388 96 model """kg2e""" +388 96 loss """nssa""" +388 96 regularizer """no""" +388 96 optimizer """adam""" +388 96 training_loop """owa""" +388 96 negative_sampler """basic""" +388 96 evaluator """rankbased""" +388 97 dataset """kinships""" +388 97 model """kg2e""" +388 97 loss """nssa""" +388 97 regularizer """no""" +388 97 optimizer """adam""" +388 97 training_loop """owa""" +388 97 negative_sampler """basic""" +388 97 evaluator """rankbased""" +388 98 dataset """kinships""" +388 98 model """kg2e""" +388 98 loss """nssa""" +388 98 regularizer """no""" +388 98 optimizer """adam""" +388 98 training_loop """owa""" +388 98 negative_sampler """basic""" +388 98 evaluator """rankbased""" +388 99 dataset """kinships""" +388 99 model """kg2e""" +388 99 loss """nssa""" +388 99 regularizer """no""" +388 99 optimizer """adam""" +388 99 training_loop """owa""" +388 99 negative_sampler """basic""" +388 99 evaluator """rankbased""" +388 100 dataset """kinships""" +388 100 model """kg2e""" +388 100 loss """nssa""" +388 100 regularizer """no""" +388 100 optimizer """adam""" +388 100 training_loop """owa""" +388 100 negative_sampler """basic""" +388 100 evaluator """rankbased""" +389 1 model.embedding_dim 1.0 +389 1 model.c_min 0.014159257020164644 +389 1 model.c_max 6.580642101155399 +389 1 loss.margin 24.990294274996906 +389 1 loss.adversarial_temperature 0.5572875557665437 +389 1 optimizer.lr 0.0193955717982744 +389 1 negative_sampler.num_negs_per_pos 26.0 +389 1 training.batch_size 0.0 +389 2 model.embedding_dim 0.0 +389 2 model.c_min 0.028796100803255797 +389 2 model.c_max 1.02570386174294 +389 2 loss.margin 26.17428745431058 +389 2 loss.adversarial_temperature 0.15013761961270797 +389 2 optimizer.lr 0.0017753260623128739 +389 2 negative_sampler.num_negs_per_pos 35.0 +389 2 training.batch_size 2.0 +389 3 model.embedding_dim 0.0 +389 3 model.c_min 0.021610691231828523 +389 3 model.c_max 1.7011183600155206 +389 3 loss.margin 24.234186777601934 +389 3 loss.adversarial_temperature 0.35777133633511715 +389 3 optimizer.lr 0.0018809583775369885 +389 3 negative_sampler.num_negs_per_pos 25.0 +389 3 training.batch_size 2.0 +389 4 model.embedding_dim 2.0 +389 4 model.c_min 0.012672430407370792 +389 4 model.c_max 9.01186294593368 +389 4 loss.margin 29.36991725042112 +389 4 loss.adversarial_temperature 0.34681312147313426 +389 4 optimizer.lr 0.015641242497646084 +389 4 negative_sampler.num_negs_per_pos 54.0 +389 4 training.batch_size 1.0 +389 5 model.embedding_dim 0.0 +389 5 model.c_min 0.09208382334718516 +389 5 model.c_max 4.280652833173176 +389 5 loss.margin 7.747421778187398 +389 5 loss.adversarial_temperature 0.7935970444151147 +389 5 optimizer.lr 0.01867409302201576 +389 5 negative_sampler.num_negs_per_pos 74.0 +389 5 training.batch_size 2.0 +389 6 model.embedding_dim 0.0 +389 6 model.c_min 0.02077270291948059 +389 6 model.c_max 2.4212615504457093 +389 6 loss.margin 7.32322718957199 +389 6 loss.adversarial_temperature 0.2616329381084488 +389 6 optimizer.lr 0.0026250224131724284 +389 6 negative_sampler.num_negs_per_pos 6.0 +389 6 training.batch_size 0.0 +389 7 model.embedding_dim 1.0 +389 7 model.c_min 0.029731294409270047 +389 7 model.c_max 8.75809900123404 +389 7 loss.margin 26.15795441640439 +389 7 loss.adversarial_temperature 0.3421314848284695 +389 7 optimizer.lr 0.08535146524105579 +389 7 negative_sampler.num_negs_per_pos 58.0 +389 7 training.batch_size 1.0 +389 8 model.embedding_dim 0.0 +389 8 model.c_min 0.06528243327199724 +389 8 model.c_max 4.980299695954161 +389 8 loss.margin 10.862668020557331 +389 8 loss.adversarial_temperature 0.566822690764662 +389 8 optimizer.lr 0.06558207862414628 +389 8 negative_sampler.num_negs_per_pos 55.0 +389 8 training.batch_size 1.0 +389 9 model.embedding_dim 1.0 +389 9 model.c_min 0.030800094500236155 +389 9 model.c_max 2.469005903317697 +389 9 loss.margin 4.037742607193223 +389 9 loss.adversarial_temperature 0.68499529234146 +389 9 optimizer.lr 0.02494406398941081 +389 9 negative_sampler.num_negs_per_pos 9.0 +389 9 training.batch_size 0.0 +389 10 model.embedding_dim 0.0 +389 10 model.c_min 0.032473601913444526 +389 10 model.c_max 3.777523695029337 +389 10 loss.margin 23.717523754160787 +389 10 loss.adversarial_temperature 0.5687457575866076 +389 10 optimizer.lr 0.01568094183773542 +389 10 negative_sampler.num_negs_per_pos 57.0 +389 10 training.batch_size 2.0 +389 11 model.embedding_dim 0.0 +389 11 model.c_min 0.03983130350707485 +389 11 model.c_max 6.931667291604213 +389 11 loss.margin 19.98981802178298 +389 11 loss.adversarial_temperature 0.6857904841093132 +389 11 optimizer.lr 0.0707990028244655 +389 11 negative_sampler.num_negs_per_pos 61.0 +389 11 training.batch_size 2.0 +389 12 model.embedding_dim 2.0 +389 12 model.c_min 0.06631839684255086 +389 12 model.c_max 3.4164280989090074 +389 12 loss.margin 12.354942198167645 +389 12 loss.adversarial_temperature 0.9222372564414584 +389 12 optimizer.lr 0.002604852859633358 +389 12 negative_sampler.num_negs_per_pos 62.0 +389 12 training.batch_size 1.0 +389 13 model.embedding_dim 2.0 +389 13 model.c_min 0.024379015104292655 +389 13 model.c_max 7.56223576090791 +389 13 loss.margin 27.844440394037395 +389 13 loss.adversarial_temperature 0.6520204076864373 +389 13 optimizer.lr 0.0874873047033949 +389 13 negative_sampler.num_negs_per_pos 46.0 +389 13 training.batch_size 1.0 +389 14 model.embedding_dim 0.0 +389 14 model.c_min 0.033416699892598635 +389 14 model.c_max 6.0762896332660326 +389 14 loss.margin 25.993423606460613 +389 14 loss.adversarial_temperature 0.5306978143331552 +389 14 optimizer.lr 0.040822092512483296 +389 14 negative_sampler.num_negs_per_pos 60.0 +389 14 training.batch_size 2.0 +389 15 model.embedding_dim 1.0 +389 15 model.c_min 0.021616647993865774 +389 15 model.c_max 9.550420528520355 +389 15 loss.margin 16.18511206903085 +389 15 loss.adversarial_temperature 0.25437433290645967 +389 15 optimizer.lr 0.0012431973314804667 +389 15 negative_sampler.num_negs_per_pos 24.0 +389 15 training.batch_size 0.0 +389 16 model.embedding_dim 2.0 +389 16 model.c_min 0.011292923155250628 +389 16 model.c_max 1.0963025856941282 +389 16 loss.margin 20.19327426873602 +389 16 loss.adversarial_temperature 0.4392327611736023 +389 16 optimizer.lr 0.0064263014058486 +389 16 negative_sampler.num_negs_per_pos 88.0 +389 16 training.batch_size 2.0 +389 17 model.embedding_dim 1.0 +389 17 model.c_min 0.011488565578849731 +389 17 model.c_max 9.917531894427965 +389 17 loss.margin 7.859474764262381 +389 17 loss.adversarial_temperature 0.38325052784646507 +389 17 optimizer.lr 0.06780868687484332 +389 17 negative_sampler.num_negs_per_pos 38.0 +389 17 training.batch_size 1.0 +389 18 model.embedding_dim 0.0 +389 18 model.c_min 0.014884903736016251 +389 18 model.c_max 1.6719278887171338 +389 18 loss.margin 19.468279120875497 +389 18 loss.adversarial_temperature 0.21416226926497672 +389 18 optimizer.lr 0.004427845939614998 +389 18 negative_sampler.num_negs_per_pos 93.0 +389 18 training.batch_size 2.0 +389 19 model.embedding_dim 0.0 +389 19 model.c_min 0.07718403093102778 +389 19 model.c_max 3.5591634284554234 +389 19 loss.margin 26.081343879104523 +389 19 loss.adversarial_temperature 0.40883272325173753 +389 19 optimizer.lr 0.004932166066619462 +389 19 negative_sampler.num_negs_per_pos 5.0 +389 19 training.batch_size 2.0 +389 20 model.embedding_dim 1.0 +389 20 model.c_min 0.03674250101419081 +389 20 model.c_max 9.011446113445112 +389 20 loss.margin 28.922582142210345 +389 20 loss.adversarial_temperature 0.6366503548673207 +389 20 optimizer.lr 0.042040805350120516 +389 20 negative_sampler.num_negs_per_pos 19.0 +389 20 training.batch_size 2.0 +389 21 model.embedding_dim 2.0 +389 21 model.c_min 0.013577771012382497 +389 21 model.c_max 6.8049520537678125 +389 21 loss.margin 14.00788737821939 +389 21 loss.adversarial_temperature 0.6714938446366259 +389 21 optimizer.lr 0.0028892008178510852 +389 21 negative_sampler.num_negs_per_pos 69.0 +389 21 training.batch_size 1.0 +389 22 model.embedding_dim 0.0 +389 22 model.c_min 0.013575248777306415 +389 22 model.c_max 3.8633733485844273 +389 22 loss.margin 5.2442520899882785 +389 22 loss.adversarial_temperature 0.1530058778311716 +389 22 optimizer.lr 0.004218384622433038 +389 22 negative_sampler.num_negs_per_pos 29.0 +389 22 training.batch_size 1.0 +389 23 model.embedding_dim 2.0 +389 23 model.c_min 0.010413823732380163 +389 23 model.c_max 5.9799699267800746 +389 23 loss.margin 12.565565036778871 +389 23 loss.adversarial_temperature 0.2503994937619874 +389 23 optimizer.lr 0.0014133291005450285 +389 23 negative_sampler.num_negs_per_pos 62.0 +389 23 training.batch_size 2.0 +389 24 model.embedding_dim 2.0 +389 24 model.c_min 0.06500758860743183 +389 24 model.c_max 9.93610846121014 +389 24 loss.margin 10.159174728153229 +389 24 loss.adversarial_temperature 0.267547310067773 +389 24 optimizer.lr 0.022135170529158327 +389 24 negative_sampler.num_negs_per_pos 18.0 +389 24 training.batch_size 0.0 +389 25 model.embedding_dim 2.0 +389 25 model.c_min 0.018499716807227878 +389 25 model.c_max 7.990135349860992 +389 25 loss.margin 22.411808406187767 +389 25 loss.adversarial_temperature 0.819764842605454 +389 25 optimizer.lr 0.0014842858990087849 +389 25 negative_sampler.num_negs_per_pos 59.0 +389 25 training.batch_size 0.0 +389 26 model.embedding_dim 0.0 +389 26 model.c_min 0.04489626751167184 +389 26 model.c_max 4.713838578100109 +389 26 loss.margin 10.094935753395696 +389 26 loss.adversarial_temperature 0.3011976416329286 +389 26 optimizer.lr 0.0027610084268330248 +389 26 negative_sampler.num_negs_per_pos 2.0 +389 26 training.batch_size 1.0 +389 27 model.embedding_dim 2.0 +389 27 model.c_min 0.018154160765692075 +389 27 model.c_max 5.005950454234234 +389 27 loss.margin 27.1998568785552 +389 27 loss.adversarial_temperature 0.7654165786745626 +389 27 optimizer.lr 0.005533947358890431 +389 27 negative_sampler.num_negs_per_pos 55.0 +389 27 training.batch_size 1.0 +389 28 model.embedding_dim 0.0 +389 28 model.c_min 0.0491973492789388 +389 28 model.c_max 4.259136190321558 +389 28 loss.margin 25.290814248909996 +389 28 loss.adversarial_temperature 0.850737304419821 +389 28 optimizer.lr 0.003878072971112122 +389 28 negative_sampler.num_negs_per_pos 61.0 +389 28 training.batch_size 0.0 +389 29 model.embedding_dim 0.0 +389 29 model.c_min 0.010023762478376833 +389 29 model.c_max 7.043420190835535 +389 29 loss.margin 17.9068252533063 +389 29 loss.adversarial_temperature 0.3604685788622822 +389 29 optimizer.lr 0.004853910436108791 +389 29 negative_sampler.num_negs_per_pos 48.0 +389 29 training.batch_size 0.0 +389 1 dataset """wn18rr""" +389 1 model """kg2e""" +389 1 loss """nssa""" +389 1 regularizer """no""" +389 1 optimizer """adam""" +389 1 training_loop """owa""" +389 1 negative_sampler """basic""" +389 1 evaluator """rankbased""" +389 2 dataset """wn18rr""" +389 2 model """kg2e""" +389 2 loss """nssa""" +389 2 regularizer """no""" +389 2 optimizer """adam""" +389 2 training_loop """owa""" +389 2 negative_sampler """basic""" +389 2 evaluator """rankbased""" +389 3 dataset """wn18rr""" +389 3 model """kg2e""" +389 3 loss """nssa""" +389 3 regularizer """no""" +389 3 optimizer """adam""" +389 3 training_loop """owa""" +389 3 negative_sampler """basic""" +389 3 evaluator """rankbased""" +389 4 dataset """wn18rr""" +389 4 model """kg2e""" +389 4 loss """nssa""" +389 4 regularizer """no""" +389 4 optimizer """adam""" +389 4 training_loop """owa""" +389 4 negative_sampler """basic""" +389 4 evaluator """rankbased""" +389 5 dataset """wn18rr""" +389 5 model """kg2e""" +389 5 loss """nssa""" +389 5 regularizer """no""" +389 5 optimizer """adam""" +389 5 training_loop """owa""" +389 5 negative_sampler """basic""" +389 5 evaluator """rankbased""" +389 6 dataset """wn18rr""" +389 6 model """kg2e""" +389 6 loss """nssa""" +389 6 regularizer """no""" +389 6 optimizer """adam""" +389 6 training_loop """owa""" +389 6 negative_sampler """basic""" +389 6 evaluator """rankbased""" +389 7 dataset """wn18rr""" +389 7 model """kg2e""" +389 7 loss """nssa""" +389 7 regularizer """no""" +389 7 optimizer """adam""" +389 7 training_loop """owa""" +389 7 negative_sampler """basic""" +389 7 evaluator """rankbased""" +389 8 dataset """wn18rr""" +389 8 model """kg2e""" +389 8 loss """nssa""" +389 8 regularizer """no""" +389 8 optimizer """adam""" +389 8 training_loop """owa""" +389 8 negative_sampler """basic""" +389 8 evaluator """rankbased""" +389 9 dataset """wn18rr""" +389 9 model """kg2e""" +389 9 loss """nssa""" +389 9 regularizer """no""" +389 9 optimizer """adam""" +389 9 training_loop """owa""" +389 9 negative_sampler """basic""" +389 9 evaluator """rankbased""" +389 10 dataset """wn18rr""" +389 10 model """kg2e""" +389 10 loss """nssa""" +389 10 regularizer """no""" +389 10 optimizer """adam""" +389 10 training_loop """owa""" +389 10 negative_sampler """basic""" +389 10 evaluator """rankbased""" +389 11 dataset """wn18rr""" +389 11 model """kg2e""" +389 11 loss """nssa""" +389 11 regularizer """no""" +389 11 optimizer """adam""" +389 11 training_loop """owa""" +389 11 negative_sampler """basic""" +389 11 evaluator """rankbased""" +389 12 dataset """wn18rr""" +389 12 model """kg2e""" +389 12 loss """nssa""" +389 12 regularizer """no""" +389 12 optimizer """adam""" +389 12 training_loop """owa""" +389 12 negative_sampler """basic""" +389 12 evaluator """rankbased""" +389 13 dataset """wn18rr""" +389 13 model """kg2e""" +389 13 loss """nssa""" +389 13 regularizer """no""" +389 13 optimizer """adam""" +389 13 training_loop """owa""" +389 13 negative_sampler """basic""" +389 13 evaluator """rankbased""" +389 14 dataset """wn18rr""" +389 14 model """kg2e""" +389 14 loss """nssa""" +389 14 regularizer """no""" +389 14 optimizer """adam""" +389 14 training_loop """owa""" +389 14 negative_sampler """basic""" +389 14 evaluator """rankbased""" +389 15 dataset """wn18rr""" +389 15 model """kg2e""" +389 15 loss """nssa""" +389 15 regularizer """no""" +389 15 optimizer """adam""" +389 15 training_loop """owa""" +389 15 negative_sampler """basic""" +389 15 evaluator """rankbased""" +389 16 dataset """wn18rr""" +389 16 model """kg2e""" +389 16 loss """nssa""" +389 16 regularizer """no""" +389 16 optimizer """adam""" +389 16 training_loop """owa""" +389 16 negative_sampler """basic""" +389 16 evaluator """rankbased""" +389 17 dataset """wn18rr""" +389 17 model """kg2e""" +389 17 loss """nssa""" +389 17 regularizer """no""" +389 17 optimizer """adam""" +389 17 training_loop """owa""" +389 17 negative_sampler """basic""" +389 17 evaluator """rankbased""" +389 18 dataset """wn18rr""" +389 18 model """kg2e""" +389 18 loss """nssa""" +389 18 regularizer """no""" +389 18 optimizer """adam""" +389 18 training_loop """owa""" +389 18 negative_sampler """basic""" +389 18 evaluator """rankbased""" +389 19 dataset """wn18rr""" +389 19 model """kg2e""" +389 19 loss """nssa""" +389 19 regularizer """no""" +389 19 optimizer """adam""" +389 19 training_loop """owa""" +389 19 negative_sampler """basic""" +389 19 evaluator """rankbased""" +389 20 dataset """wn18rr""" +389 20 model """kg2e""" +389 20 loss """nssa""" +389 20 regularizer """no""" +389 20 optimizer """adam""" +389 20 training_loop """owa""" +389 20 negative_sampler """basic""" +389 20 evaluator """rankbased""" +389 21 dataset """wn18rr""" +389 21 model """kg2e""" +389 21 loss """nssa""" +389 21 regularizer """no""" +389 21 optimizer """adam""" +389 21 training_loop """owa""" +389 21 negative_sampler """basic""" +389 21 evaluator """rankbased""" +389 22 dataset """wn18rr""" +389 22 model """kg2e""" +389 22 loss """nssa""" +389 22 regularizer """no""" +389 22 optimizer """adam""" +389 22 training_loop """owa""" +389 22 negative_sampler """basic""" +389 22 evaluator """rankbased""" +389 23 dataset """wn18rr""" +389 23 model """kg2e""" +389 23 loss """nssa""" +389 23 regularizer """no""" +389 23 optimizer """adam""" +389 23 training_loop """owa""" +389 23 negative_sampler """basic""" +389 23 evaluator """rankbased""" +389 24 dataset """wn18rr""" +389 24 model """kg2e""" +389 24 loss """nssa""" +389 24 regularizer """no""" +389 24 optimizer """adam""" +389 24 training_loop """owa""" +389 24 negative_sampler """basic""" +389 24 evaluator """rankbased""" +389 25 dataset """wn18rr""" +389 25 model """kg2e""" +389 25 loss """nssa""" +389 25 regularizer """no""" +389 25 optimizer """adam""" +389 25 training_loop """owa""" +389 25 negative_sampler """basic""" +389 25 evaluator """rankbased""" +389 26 dataset """wn18rr""" +389 26 model """kg2e""" +389 26 loss """nssa""" +389 26 regularizer """no""" +389 26 optimizer """adam""" +389 26 training_loop """owa""" +389 26 negative_sampler """basic""" +389 26 evaluator """rankbased""" +389 27 dataset """wn18rr""" +389 27 model """kg2e""" +389 27 loss """nssa""" +389 27 regularizer """no""" +389 27 optimizer """adam""" +389 27 training_loop """owa""" +389 27 negative_sampler """basic""" +389 27 evaluator """rankbased""" +389 28 dataset """wn18rr""" +389 28 model """kg2e""" +389 28 loss """nssa""" +389 28 regularizer """no""" +389 28 optimizer """adam""" +389 28 training_loop """owa""" +389 28 negative_sampler """basic""" +389 28 evaluator """rankbased""" +389 29 dataset """wn18rr""" +389 29 model """kg2e""" +389 29 loss """nssa""" +389 29 regularizer """no""" +389 29 optimizer """adam""" +389 29 training_loop """owa""" +389 29 negative_sampler """basic""" +389 29 evaluator """rankbased""" +390 1 model.embedding_dim 1.0 +390 1 model.c_min 0.05682463053496309 +390 1 model.c_max 3.576013426810557 +390 1 loss.margin 22.7208463699908 +390 1 loss.adversarial_temperature 0.5336628072129501 +390 1 optimizer.lr 0.027089728299533204 +390 1 negative_sampler.num_negs_per_pos 47.0 +390 1 training.batch_size 2.0 +390 2 model.embedding_dim 0.0 +390 2 model.c_min 0.08196685608674982 +390 2 model.c_max 8.019500775591624 +390 2 loss.margin 14.637307985125654 +390 2 loss.adversarial_temperature 0.9770379179345179 +390 2 optimizer.lr 0.0014432225214086698 +390 2 negative_sampler.num_negs_per_pos 40.0 +390 2 training.batch_size 1.0 +390 3 model.embedding_dim 2.0 +390 3 model.c_min 0.08525655641855875 +390 3 model.c_max 9.620440024504251 +390 3 loss.margin 7.275089888454429 +390 3 loss.adversarial_temperature 0.6493810175765644 +390 3 optimizer.lr 0.042262565960379896 +390 3 negative_sampler.num_negs_per_pos 57.0 +390 3 training.batch_size 0.0 +390 4 model.embedding_dim 1.0 +390 4 model.c_min 0.06232891759986686 +390 4 model.c_max 6.451730384087907 +390 4 loss.margin 18.896833255449458 +390 4 loss.adversarial_temperature 0.33519883075877926 +390 4 optimizer.lr 0.06264573397901652 +390 4 negative_sampler.num_negs_per_pos 15.0 +390 4 training.batch_size 1.0 +390 5 model.embedding_dim 1.0 +390 5 model.c_min 0.024332418132807863 +390 5 model.c_max 2.966482681805319 +390 5 loss.margin 7.896146434779794 +390 5 loss.adversarial_temperature 0.30281410785331725 +390 5 optimizer.lr 0.00528281926413455 +390 5 negative_sampler.num_negs_per_pos 92.0 +390 5 training.batch_size 2.0 +390 6 model.embedding_dim 1.0 +390 6 model.c_min 0.07854562779844414 +390 6 model.c_max 3.019884370785141 +390 6 loss.margin 16.32907922077184 +390 6 loss.adversarial_temperature 0.48055176183002474 +390 6 optimizer.lr 0.08519403884829399 +390 6 negative_sampler.num_negs_per_pos 88.0 +390 6 training.batch_size 1.0 +390 7 model.embedding_dim 1.0 +390 7 model.c_min 0.038090453676245126 +390 7 model.c_max 3.444316030500217 +390 7 loss.margin 26.746625196055685 +390 7 loss.adversarial_temperature 0.49079699410902855 +390 7 optimizer.lr 0.0014211387075333808 +390 7 negative_sampler.num_negs_per_pos 46.0 +390 7 training.batch_size 2.0 +390 8 model.embedding_dim 1.0 +390 8 model.c_min 0.04848933050946526 +390 8 model.c_max 3.106806963513694 +390 8 loss.margin 3.5492179877785826 +390 8 loss.adversarial_temperature 0.24709108064502147 +390 8 optimizer.lr 0.0025727448525891296 +390 8 negative_sampler.num_negs_per_pos 24.0 +390 8 training.batch_size 1.0 +390 9 model.embedding_dim 0.0 +390 9 model.c_min 0.07443724199489019 +390 9 model.c_max 7.297261177127949 +390 9 loss.margin 6.6383703586915255 +390 9 loss.adversarial_temperature 0.950034384149864 +390 9 optimizer.lr 0.006919611105300134 +390 9 negative_sampler.num_negs_per_pos 53.0 +390 9 training.batch_size 2.0 +390 10 model.embedding_dim 0.0 +390 10 model.c_min 0.018590057336776822 +390 10 model.c_max 5.111121912782055 +390 10 loss.margin 16.654876634629172 +390 10 loss.adversarial_temperature 0.10463652330020137 +390 10 optimizer.lr 0.0010752223967899677 +390 10 negative_sampler.num_negs_per_pos 86.0 +390 10 training.batch_size 2.0 +390 11 model.embedding_dim 1.0 +390 11 model.c_min 0.0355118363548156 +390 11 model.c_max 8.62222189618029 +390 11 loss.margin 23.968758391193468 +390 11 loss.adversarial_temperature 0.13617179675935226 +390 11 optimizer.lr 0.07572624392990528 +390 11 negative_sampler.num_negs_per_pos 40.0 +390 11 training.batch_size 0.0 +390 12 model.embedding_dim 2.0 +390 12 model.c_min 0.05082328412212004 +390 12 model.c_max 6.30917101269177 +390 12 loss.margin 20.616708630374696 +390 12 loss.adversarial_temperature 0.1450688205968257 +390 12 optimizer.lr 0.0032005567874700616 +390 12 negative_sampler.num_negs_per_pos 66.0 +390 12 training.batch_size 1.0 +390 13 model.embedding_dim 1.0 +390 13 model.c_min 0.01952795588388083 +390 13 model.c_max 1.951410468173316 +390 13 loss.margin 27.621882914691223 +390 13 loss.adversarial_temperature 0.6017902528042662 +390 13 optimizer.lr 0.008443367885239752 +390 13 negative_sampler.num_negs_per_pos 13.0 +390 13 training.batch_size 2.0 +390 14 model.embedding_dim 2.0 +390 14 model.c_min 0.027102259974823205 +390 14 model.c_max 2.3281203913807085 +390 14 loss.margin 11.170125959984535 +390 14 loss.adversarial_temperature 0.18031919272162858 +390 14 optimizer.lr 0.003794302878790927 +390 14 negative_sampler.num_negs_per_pos 42.0 +390 14 training.batch_size 0.0 +390 15 model.embedding_dim 0.0 +390 15 model.c_min 0.01117837917151853 +390 15 model.c_max 2.619076407581316 +390 15 loss.margin 29.1723577274446 +390 15 loss.adversarial_temperature 0.7201212357862781 +390 15 optimizer.lr 0.0690292364830116 +390 15 negative_sampler.num_negs_per_pos 39.0 +390 15 training.batch_size 1.0 +390 16 model.embedding_dim 0.0 +390 16 model.c_min 0.024180256261542556 +390 16 model.c_max 8.478041891243404 +390 16 loss.margin 27.853738014173715 +390 16 loss.adversarial_temperature 0.415655608490501 +390 16 optimizer.lr 0.02988492622316623 +390 16 negative_sampler.num_negs_per_pos 34.0 +390 16 training.batch_size 2.0 +390 17 model.embedding_dim 0.0 +390 17 model.c_min 0.059152786987796864 +390 17 model.c_max 5.823895939775599 +390 17 loss.margin 21.863472485014576 +390 17 loss.adversarial_temperature 0.7703373177650212 +390 17 optimizer.lr 0.01250920937404157 +390 17 negative_sampler.num_negs_per_pos 93.0 +390 17 training.batch_size 0.0 +390 18 model.embedding_dim 1.0 +390 18 model.c_min 0.08580083334381247 +390 18 model.c_max 2.7630944815639515 +390 18 loss.margin 27.318989945102338 +390 18 loss.adversarial_temperature 0.28061141965997294 +390 18 optimizer.lr 0.029192086088748 +390 18 negative_sampler.num_negs_per_pos 84.0 +390 18 training.batch_size 0.0 +390 19 model.embedding_dim 2.0 +390 19 model.c_min 0.03218166149891861 +390 19 model.c_max 6.736130136854722 +390 19 loss.margin 2.090550639512779 +390 19 loss.adversarial_temperature 0.36973518255018506 +390 19 optimizer.lr 0.0010794035008611683 +390 19 negative_sampler.num_negs_per_pos 53.0 +390 19 training.batch_size 0.0 +390 20 model.embedding_dim 1.0 +390 20 model.c_min 0.014480699835577137 +390 20 model.c_max 7.147092564265327 +390 20 loss.margin 8.775480491995836 +390 20 loss.adversarial_temperature 0.6999000678946379 +390 20 optimizer.lr 0.0020990919333070844 +390 20 negative_sampler.num_negs_per_pos 14.0 +390 20 training.batch_size 2.0 +390 21 model.embedding_dim 1.0 +390 21 model.c_min 0.05574055412387141 +390 21 model.c_max 4.476717743765458 +390 21 loss.margin 1.4518135797724017 +390 21 loss.adversarial_temperature 0.5946490179076905 +390 21 optimizer.lr 0.023916581751958477 +390 21 negative_sampler.num_negs_per_pos 33.0 +390 21 training.batch_size 2.0 +390 22 model.embedding_dim 2.0 +390 22 model.c_min 0.029092081943440148 +390 22 model.c_max 9.579272703955654 +390 22 loss.margin 13.159738591402848 +390 22 loss.adversarial_temperature 0.2087398980092857 +390 22 optimizer.lr 0.08932912085881199 +390 22 negative_sampler.num_negs_per_pos 90.0 +390 22 training.batch_size 2.0 +390 23 model.embedding_dim 0.0 +390 23 model.c_min 0.0403615667993611 +390 23 model.c_max 6.424097571086029 +390 23 loss.margin 22.564712943928082 +390 23 loss.adversarial_temperature 0.21269242084004578 +390 23 optimizer.lr 0.05842328001766355 +390 23 negative_sampler.num_negs_per_pos 81.0 +390 23 training.batch_size 1.0 +390 24 model.embedding_dim 1.0 +390 24 model.c_min 0.0546404399266364 +390 24 model.c_max 6.537146138482461 +390 24 loss.margin 17.972134528341424 +390 24 loss.adversarial_temperature 0.890315534094854 +390 24 optimizer.lr 0.03663677012732065 +390 24 negative_sampler.num_negs_per_pos 56.0 +390 24 training.batch_size 1.0 +390 25 model.embedding_dim 0.0 +390 25 model.c_min 0.031596427620672995 +390 25 model.c_max 3.2609395389572216 +390 25 loss.margin 1.648900586958244 +390 25 loss.adversarial_temperature 0.5171999981100338 +390 25 optimizer.lr 0.020691287214397378 +390 25 negative_sampler.num_negs_per_pos 74.0 +390 25 training.batch_size 2.0 +390 26 model.embedding_dim 1.0 +390 26 model.c_min 0.01030945889557566 +390 26 model.c_max 1.0620684923582644 +390 26 loss.margin 28.61377039062895 +390 26 loss.adversarial_temperature 0.46131287053562453 +390 26 optimizer.lr 0.024278246869527332 +390 26 negative_sampler.num_negs_per_pos 44.0 +390 26 training.batch_size 1.0 +390 27 model.embedding_dim 0.0 +390 27 model.c_min 0.026922596870503576 +390 27 model.c_max 7.157645196579524 +390 27 loss.margin 26.969211645375374 +390 27 loss.adversarial_temperature 0.8223106917471332 +390 27 optimizer.lr 0.004763410429612253 +390 27 negative_sampler.num_negs_per_pos 16.0 +390 27 training.batch_size 2.0 +390 28 model.embedding_dim 2.0 +390 28 model.c_min 0.04377501996852043 +390 28 model.c_max 6.3403172058730854 +390 28 loss.margin 9.243360016563713 +390 28 loss.adversarial_temperature 0.42977867805461034 +390 28 optimizer.lr 0.06020730169798038 +390 28 negative_sampler.num_negs_per_pos 85.0 +390 28 training.batch_size 1.0 +390 29 model.embedding_dim 1.0 +390 29 model.c_min 0.010939798037707006 +390 29 model.c_max 5.279629053722078 +390 29 loss.margin 11.043989447723272 +390 29 loss.adversarial_temperature 0.906115883441939 +390 29 optimizer.lr 0.010900128247918073 +390 29 negative_sampler.num_negs_per_pos 68.0 +390 29 training.batch_size 0.0 +390 30 model.embedding_dim 0.0 +390 30 model.c_min 0.09272819763303318 +390 30 model.c_max 3.904686005783662 +390 30 loss.margin 26.448268867438802 +390 30 loss.adversarial_temperature 0.19629805184394342 +390 30 optimizer.lr 0.01453125596203622 +390 30 negative_sampler.num_negs_per_pos 24.0 +390 30 training.batch_size 0.0 +390 31 model.embedding_dim 1.0 +390 31 model.c_min 0.01765888951511339 +390 31 model.c_max 2.884202399809477 +390 31 loss.margin 12.819828673900208 +390 31 loss.adversarial_temperature 0.8779083182663417 +390 31 optimizer.lr 0.0010938015315310307 +390 31 negative_sampler.num_negs_per_pos 24.0 +390 31 training.batch_size 1.0 +390 32 model.embedding_dim 2.0 +390 32 model.c_min 0.09749585557871791 +390 32 model.c_max 8.499288468833937 +390 32 loss.margin 10.726481929475597 +390 32 loss.adversarial_temperature 0.8846152626039981 +390 32 optimizer.lr 0.008998813033498169 +390 32 negative_sampler.num_negs_per_pos 45.0 +390 32 training.batch_size 0.0 +390 33 model.embedding_dim 1.0 +390 33 model.c_min 0.023009963307485434 +390 33 model.c_max 5.569621491567487 +390 33 loss.margin 9.166351758975322 +390 33 loss.adversarial_temperature 0.9481060343399258 +390 33 optimizer.lr 0.007517343451173458 +390 33 negative_sampler.num_negs_per_pos 66.0 +390 33 training.batch_size 0.0 +390 34 model.embedding_dim 2.0 +390 34 model.c_min 0.013510556497214698 +390 34 model.c_max 2.0312578154837646 +390 34 loss.margin 15.716430210722127 +390 34 loss.adversarial_temperature 0.6550777839273051 +390 34 optimizer.lr 0.005238516583625824 +390 34 negative_sampler.num_negs_per_pos 3.0 +390 34 training.batch_size 0.0 +390 35 model.embedding_dim 1.0 +390 35 model.c_min 0.06536619925345345 +390 35 model.c_max 9.14346808842349 +390 35 loss.margin 21.11139727373239 +390 35 loss.adversarial_temperature 0.9360153637540312 +390 35 optimizer.lr 0.004555288545843387 +390 35 negative_sampler.num_negs_per_pos 60.0 +390 35 training.batch_size 0.0 +390 36 model.embedding_dim 2.0 +390 36 model.c_min 0.028588618507843024 +390 36 model.c_max 1.0601560415967193 +390 36 loss.margin 21.79397995208029 +390 36 loss.adversarial_temperature 0.31436924008995465 +390 36 optimizer.lr 0.009135846089294113 +390 36 negative_sampler.num_negs_per_pos 81.0 +390 36 training.batch_size 1.0 +390 37 model.embedding_dim 2.0 +390 37 model.c_min 0.010429271897551754 +390 37 model.c_max 8.300867992047099 +390 37 loss.margin 24.326126199515095 +390 37 loss.adversarial_temperature 0.4832232808740689 +390 37 optimizer.lr 0.006838273277143109 +390 37 negative_sampler.num_negs_per_pos 79.0 +390 37 training.batch_size 0.0 +390 38 model.embedding_dim 0.0 +390 38 model.c_min 0.015061088493647675 +390 38 model.c_max 9.416396800356006 +390 38 loss.margin 23.376208976267453 +390 38 loss.adversarial_temperature 0.4699871631184047 +390 38 optimizer.lr 0.05420527206442923 +390 38 negative_sampler.num_negs_per_pos 1.0 +390 38 training.batch_size 0.0 +390 39 model.embedding_dim 0.0 +390 39 model.c_min 0.01873458441477514 +390 39 model.c_max 2.56686107220854 +390 39 loss.margin 29.59894911076195 +390 39 loss.adversarial_temperature 0.6625275869147711 +390 39 optimizer.lr 0.00969253551343971 +390 39 negative_sampler.num_negs_per_pos 41.0 +390 39 training.batch_size 2.0 +390 40 model.embedding_dim 2.0 +390 40 model.c_min 0.08239742653553098 +390 40 model.c_max 8.751236072777257 +390 40 loss.margin 19.16734959848126 +390 40 loss.adversarial_temperature 0.2514338616935132 +390 40 optimizer.lr 0.0013965853795792314 +390 40 negative_sampler.num_negs_per_pos 14.0 +390 40 training.batch_size 2.0 +390 41 model.embedding_dim 0.0 +390 41 model.c_min 0.023315077617096627 +390 41 model.c_max 6.0175710932219335 +390 41 loss.margin 24.44345845372222 +390 41 loss.adversarial_temperature 0.8977256448889612 +390 41 optimizer.lr 0.017759267626288183 +390 41 negative_sampler.num_negs_per_pos 0.0 +390 41 training.batch_size 0.0 +390 42 model.embedding_dim 1.0 +390 42 model.c_min 0.017520285748886893 +390 42 model.c_max 2.8934422118474643 +390 42 loss.margin 13.033591588978615 +390 42 loss.adversarial_temperature 0.7624793165553545 +390 42 optimizer.lr 0.0011130193636157233 +390 42 negative_sampler.num_negs_per_pos 5.0 +390 42 training.batch_size 1.0 +390 43 model.embedding_dim 0.0 +390 43 model.c_min 0.013779056062506493 +390 43 model.c_max 1.651466599517074 +390 43 loss.margin 26.33654172917823 +390 43 loss.adversarial_temperature 0.19475805424805723 +390 43 optimizer.lr 0.034957921124749956 +390 43 negative_sampler.num_negs_per_pos 89.0 +390 43 training.batch_size 1.0 +390 44 model.embedding_dim 1.0 +390 44 model.c_min 0.06621888232553826 +390 44 model.c_max 3.492294112976542 +390 44 loss.margin 20.715560626332245 +390 44 loss.adversarial_temperature 0.5759102654148704 +390 44 optimizer.lr 0.009780192329061802 +390 44 negative_sampler.num_negs_per_pos 7.0 +390 44 training.batch_size 2.0 +390 45 model.embedding_dim 2.0 +390 45 model.c_min 0.015058100115042014 +390 45 model.c_max 4.278688401436357 +390 45 loss.margin 7.148464229713464 +390 45 loss.adversarial_temperature 0.5765893541357219 +390 45 optimizer.lr 0.0054692702112918035 +390 45 negative_sampler.num_negs_per_pos 62.0 +390 45 training.batch_size 2.0 +390 1 dataset """wn18rr""" +390 1 model """kg2e""" +390 1 loss """nssa""" +390 1 regularizer """no""" +390 1 optimizer """adam""" +390 1 training_loop """owa""" +390 1 negative_sampler """basic""" +390 1 evaluator """rankbased""" +390 2 dataset """wn18rr""" +390 2 model """kg2e""" +390 2 loss """nssa""" +390 2 regularizer """no""" +390 2 optimizer """adam""" +390 2 training_loop """owa""" +390 2 negative_sampler """basic""" +390 2 evaluator """rankbased""" +390 3 dataset """wn18rr""" +390 3 model """kg2e""" +390 3 loss """nssa""" +390 3 regularizer """no""" +390 3 optimizer """adam""" +390 3 training_loop """owa""" +390 3 negative_sampler """basic""" +390 3 evaluator """rankbased""" +390 4 dataset """wn18rr""" +390 4 model """kg2e""" +390 4 loss """nssa""" +390 4 regularizer """no""" +390 4 optimizer """adam""" +390 4 training_loop """owa""" +390 4 negative_sampler """basic""" +390 4 evaluator """rankbased""" +390 5 dataset """wn18rr""" +390 5 model """kg2e""" +390 5 loss """nssa""" +390 5 regularizer """no""" +390 5 optimizer """adam""" +390 5 training_loop """owa""" +390 5 negative_sampler """basic""" +390 5 evaluator """rankbased""" +390 6 dataset """wn18rr""" +390 6 model """kg2e""" +390 6 loss """nssa""" +390 6 regularizer """no""" +390 6 optimizer """adam""" +390 6 training_loop """owa""" +390 6 negative_sampler """basic""" +390 6 evaluator """rankbased""" +390 7 dataset """wn18rr""" +390 7 model """kg2e""" +390 7 loss """nssa""" +390 7 regularizer """no""" +390 7 optimizer """adam""" +390 7 training_loop """owa""" +390 7 negative_sampler """basic""" +390 7 evaluator """rankbased""" +390 8 dataset """wn18rr""" +390 8 model """kg2e""" +390 8 loss """nssa""" +390 8 regularizer """no""" +390 8 optimizer """adam""" +390 8 training_loop """owa""" +390 8 negative_sampler """basic""" +390 8 evaluator """rankbased""" +390 9 dataset """wn18rr""" +390 9 model """kg2e""" +390 9 loss """nssa""" +390 9 regularizer """no""" +390 9 optimizer """adam""" +390 9 training_loop """owa""" +390 9 negative_sampler """basic""" +390 9 evaluator """rankbased""" +390 10 dataset """wn18rr""" +390 10 model """kg2e""" +390 10 loss """nssa""" +390 10 regularizer """no""" +390 10 optimizer """adam""" +390 10 training_loop """owa""" +390 10 negative_sampler """basic""" +390 10 evaluator """rankbased""" +390 11 dataset """wn18rr""" +390 11 model """kg2e""" +390 11 loss """nssa""" +390 11 regularizer """no""" +390 11 optimizer """adam""" +390 11 training_loop """owa""" +390 11 negative_sampler """basic""" +390 11 evaluator """rankbased""" +390 12 dataset """wn18rr""" +390 12 model """kg2e""" +390 12 loss """nssa""" +390 12 regularizer """no""" +390 12 optimizer """adam""" +390 12 training_loop """owa""" +390 12 negative_sampler """basic""" +390 12 evaluator """rankbased""" +390 13 dataset """wn18rr""" +390 13 model """kg2e""" +390 13 loss """nssa""" +390 13 regularizer """no""" +390 13 optimizer """adam""" +390 13 training_loop """owa""" +390 13 negative_sampler """basic""" +390 13 evaluator """rankbased""" +390 14 dataset """wn18rr""" +390 14 model """kg2e""" +390 14 loss """nssa""" +390 14 regularizer """no""" +390 14 optimizer """adam""" +390 14 training_loop """owa""" +390 14 negative_sampler """basic""" +390 14 evaluator """rankbased""" +390 15 dataset """wn18rr""" +390 15 model """kg2e""" +390 15 loss """nssa""" +390 15 regularizer """no""" +390 15 optimizer """adam""" +390 15 training_loop """owa""" +390 15 negative_sampler """basic""" +390 15 evaluator """rankbased""" +390 16 dataset """wn18rr""" +390 16 model """kg2e""" +390 16 loss """nssa""" +390 16 regularizer """no""" +390 16 optimizer """adam""" +390 16 training_loop """owa""" +390 16 negative_sampler """basic""" +390 16 evaluator """rankbased""" +390 17 dataset """wn18rr""" +390 17 model """kg2e""" +390 17 loss """nssa""" +390 17 regularizer """no""" +390 17 optimizer """adam""" +390 17 training_loop """owa""" +390 17 negative_sampler """basic""" +390 17 evaluator """rankbased""" +390 18 dataset """wn18rr""" +390 18 model """kg2e""" +390 18 loss """nssa""" +390 18 regularizer """no""" +390 18 optimizer """adam""" +390 18 training_loop """owa""" +390 18 negative_sampler """basic""" +390 18 evaluator """rankbased""" +390 19 dataset """wn18rr""" +390 19 model """kg2e""" +390 19 loss """nssa""" +390 19 regularizer """no""" +390 19 optimizer """adam""" +390 19 training_loop """owa""" +390 19 negative_sampler """basic""" +390 19 evaluator """rankbased""" +390 20 dataset """wn18rr""" +390 20 model """kg2e""" +390 20 loss """nssa""" +390 20 regularizer """no""" +390 20 optimizer """adam""" +390 20 training_loop """owa""" +390 20 negative_sampler """basic""" +390 20 evaluator """rankbased""" +390 21 dataset """wn18rr""" +390 21 model """kg2e""" +390 21 loss """nssa""" +390 21 regularizer """no""" +390 21 optimizer """adam""" +390 21 training_loop """owa""" +390 21 negative_sampler """basic""" +390 21 evaluator """rankbased""" +390 22 dataset """wn18rr""" +390 22 model """kg2e""" +390 22 loss """nssa""" +390 22 regularizer """no""" +390 22 optimizer """adam""" +390 22 training_loop """owa""" +390 22 negative_sampler """basic""" +390 22 evaluator """rankbased""" +390 23 dataset """wn18rr""" +390 23 model """kg2e""" +390 23 loss """nssa""" +390 23 regularizer """no""" +390 23 optimizer """adam""" +390 23 training_loop """owa""" +390 23 negative_sampler """basic""" +390 23 evaluator """rankbased""" +390 24 dataset """wn18rr""" +390 24 model """kg2e""" +390 24 loss """nssa""" +390 24 regularizer """no""" +390 24 optimizer """adam""" +390 24 training_loop """owa""" +390 24 negative_sampler """basic""" +390 24 evaluator """rankbased""" +390 25 dataset """wn18rr""" +390 25 model """kg2e""" +390 25 loss """nssa""" +390 25 regularizer """no""" +390 25 optimizer """adam""" +390 25 training_loop """owa""" +390 25 negative_sampler """basic""" +390 25 evaluator """rankbased""" +390 26 dataset """wn18rr""" +390 26 model """kg2e""" +390 26 loss """nssa""" +390 26 regularizer """no""" +390 26 optimizer """adam""" +390 26 training_loop """owa""" +390 26 negative_sampler """basic""" +390 26 evaluator """rankbased""" +390 27 dataset """wn18rr""" +390 27 model """kg2e""" +390 27 loss """nssa""" +390 27 regularizer """no""" +390 27 optimizer """adam""" +390 27 training_loop """owa""" +390 27 negative_sampler """basic""" +390 27 evaluator """rankbased""" +390 28 dataset """wn18rr""" +390 28 model """kg2e""" +390 28 loss """nssa""" +390 28 regularizer """no""" +390 28 optimizer """adam""" +390 28 training_loop """owa""" +390 28 negative_sampler """basic""" +390 28 evaluator """rankbased""" +390 29 dataset """wn18rr""" +390 29 model """kg2e""" +390 29 loss """nssa""" +390 29 regularizer """no""" +390 29 optimizer """adam""" +390 29 training_loop """owa""" +390 29 negative_sampler """basic""" +390 29 evaluator """rankbased""" +390 30 dataset """wn18rr""" +390 30 model """kg2e""" +390 30 loss """nssa""" +390 30 regularizer """no""" +390 30 optimizer """adam""" +390 30 training_loop """owa""" +390 30 negative_sampler """basic""" +390 30 evaluator """rankbased""" +390 31 dataset """wn18rr""" +390 31 model """kg2e""" +390 31 loss """nssa""" +390 31 regularizer """no""" +390 31 optimizer """adam""" +390 31 training_loop """owa""" +390 31 negative_sampler """basic""" +390 31 evaluator """rankbased""" +390 32 dataset """wn18rr""" +390 32 model """kg2e""" +390 32 loss """nssa""" +390 32 regularizer """no""" +390 32 optimizer """adam""" +390 32 training_loop """owa""" +390 32 negative_sampler """basic""" +390 32 evaluator """rankbased""" +390 33 dataset """wn18rr""" +390 33 model """kg2e""" +390 33 loss """nssa""" +390 33 regularizer """no""" +390 33 optimizer """adam""" +390 33 training_loop """owa""" +390 33 negative_sampler """basic""" +390 33 evaluator """rankbased""" +390 34 dataset """wn18rr""" +390 34 model """kg2e""" +390 34 loss """nssa""" +390 34 regularizer """no""" +390 34 optimizer """adam""" +390 34 training_loop """owa""" +390 34 negative_sampler """basic""" +390 34 evaluator """rankbased""" +390 35 dataset """wn18rr""" +390 35 model """kg2e""" +390 35 loss """nssa""" +390 35 regularizer """no""" +390 35 optimizer """adam""" +390 35 training_loop """owa""" +390 35 negative_sampler """basic""" +390 35 evaluator """rankbased""" +390 36 dataset """wn18rr""" +390 36 model """kg2e""" +390 36 loss """nssa""" +390 36 regularizer """no""" +390 36 optimizer """adam""" +390 36 training_loop """owa""" +390 36 negative_sampler """basic""" +390 36 evaluator """rankbased""" +390 37 dataset """wn18rr""" +390 37 model """kg2e""" +390 37 loss """nssa""" +390 37 regularizer """no""" +390 37 optimizer """adam""" +390 37 training_loop """owa""" +390 37 negative_sampler """basic""" +390 37 evaluator """rankbased""" +390 38 dataset """wn18rr""" +390 38 model """kg2e""" +390 38 loss """nssa""" +390 38 regularizer """no""" +390 38 optimizer """adam""" +390 38 training_loop """owa""" +390 38 negative_sampler """basic""" +390 38 evaluator """rankbased""" +390 39 dataset """wn18rr""" +390 39 model """kg2e""" +390 39 loss """nssa""" +390 39 regularizer """no""" +390 39 optimizer """adam""" +390 39 training_loop """owa""" +390 39 negative_sampler """basic""" +390 39 evaluator """rankbased""" +390 40 dataset """wn18rr""" +390 40 model """kg2e""" +390 40 loss """nssa""" +390 40 regularizer """no""" +390 40 optimizer """adam""" +390 40 training_loop """owa""" +390 40 negative_sampler """basic""" +390 40 evaluator """rankbased""" +390 41 dataset """wn18rr""" +390 41 model """kg2e""" +390 41 loss """nssa""" +390 41 regularizer """no""" +390 41 optimizer """adam""" +390 41 training_loop """owa""" +390 41 negative_sampler """basic""" +390 41 evaluator """rankbased""" +390 42 dataset """wn18rr""" +390 42 model """kg2e""" +390 42 loss """nssa""" +390 42 regularizer """no""" +390 42 optimizer """adam""" +390 42 training_loop """owa""" +390 42 negative_sampler """basic""" +390 42 evaluator """rankbased""" +390 43 dataset """wn18rr""" +390 43 model """kg2e""" +390 43 loss """nssa""" +390 43 regularizer """no""" +390 43 optimizer """adam""" +390 43 training_loop """owa""" +390 43 negative_sampler """basic""" +390 43 evaluator """rankbased""" +390 44 dataset """wn18rr""" +390 44 model """kg2e""" +390 44 loss """nssa""" +390 44 regularizer """no""" +390 44 optimizer """adam""" +390 44 training_loop """owa""" +390 44 negative_sampler """basic""" +390 44 evaluator """rankbased""" +390 45 dataset """wn18rr""" +390 45 model """kg2e""" +390 45 loss """nssa""" +390 45 regularizer """no""" +390 45 optimizer """adam""" +390 45 training_loop """owa""" +390 45 negative_sampler """basic""" +390 45 evaluator """rankbased""" +391 1 model.embedding_dim 1.0 +391 1 model.c_min 0.04927416976199671 +391 1 model.c_max 2.215225843610431 +391 1 loss.margin 3.641009453959307 +391 1 optimizer.lr 0.04697588177422741 +391 1 negative_sampler.num_negs_per_pos 10.0 +391 1 training.batch_size 1.0 +391 2 model.embedding_dim 1.0 +391 2 model.c_min 0.09083179079782798 +391 2 model.c_max 6.054320343371493 +391 2 loss.margin 6.40872000698513 +391 2 optimizer.lr 0.00717401368482125 +391 2 negative_sampler.num_negs_per_pos 87.0 +391 2 training.batch_size 0.0 +391 3 model.embedding_dim 0.0 +391 3 model.c_min 0.034181365150160785 +391 3 model.c_max 8.86611619203459 +391 3 loss.margin 9.011306631836256 +391 3 optimizer.lr 0.028587108080338697 +391 3 negative_sampler.num_negs_per_pos 86.0 +391 3 training.batch_size 0.0 +391 4 model.embedding_dim 0.0 +391 4 model.c_min 0.08552162162112516 +391 4 model.c_max 9.812129857957093 +391 4 loss.margin 9.776070787844 +391 4 optimizer.lr 0.08315164406926477 +391 4 negative_sampler.num_negs_per_pos 9.0 +391 4 training.batch_size 1.0 +391 5 model.embedding_dim 0.0 +391 5 model.c_min 0.06350378565562032 +391 5 model.c_max 1.753726854073867 +391 5 loss.margin 8.317115094085045 +391 5 optimizer.lr 0.0011640651667428807 +391 5 negative_sampler.num_negs_per_pos 15.0 +391 5 training.batch_size 2.0 +391 6 model.embedding_dim 2.0 +391 6 model.c_min 0.015775193477138932 +391 6 model.c_max 6.699636457294054 +391 6 loss.margin 4.8819289146290705 +391 6 optimizer.lr 0.04181507675211655 +391 6 negative_sampler.num_negs_per_pos 95.0 +391 6 training.batch_size 1.0 +391 7 model.embedding_dim 2.0 +391 7 model.c_min 0.06541541463233667 +391 7 model.c_max 5.4093475026402835 +391 7 loss.margin 4.990458522955512 +391 7 optimizer.lr 0.0033931451114780553 +391 7 negative_sampler.num_negs_per_pos 5.0 +391 7 training.batch_size 0.0 +391 8 model.embedding_dim 1.0 +391 8 model.c_min 0.012067579181134953 +391 8 model.c_max 2.242336136406644 +391 8 loss.margin 3.248857344972462 +391 8 optimizer.lr 0.025795246776880887 +391 8 negative_sampler.num_negs_per_pos 98.0 +391 8 training.batch_size 1.0 +391 9 model.embedding_dim 0.0 +391 9 model.c_min 0.05528765979533101 +391 9 model.c_max 2.1893716730764776 +391 9 loss.margin 0.7256625591674315 +391 9 optimizer.lr 0.0029095301774549024 +391 9 negative_sampler.num_negs_per_pos 10.0 +391 9 training.batch_size 1.0 +391 10 model.embedding_dim 0.0 +391 10 model.c_min 0.06678522734109092 +391 10 model.c_max 4.074308446074734 +391 10 loss.margin 4.278116731974621 +391 10 optimizer.lr 0.005923168919990066 +391 10 negative_sampler.num_negs_per_pos 48.0 +391 10 training.batch_size 0.0 +391 11 model.embedding_dim 1.0 +391 11 model.c_min 0.05080395577802499 +391 11 model.c_max 3.7176251595232306 +391 11 loss.margin 3.1044907307864844 +391 11 optimizer.lr 0.06718101094891438 +391 11 negative_sampler.num_negs_per_pos 22.0 +391 11 training.batch_size 0.0 +391 12 model.embedding_dim 0.0 +391 12 model.c_min 0.06695563327158144 +391 12 model.c_max 4.139739055832722 +391 12 loss.margin 7.741282281535295 +391 12 optimizer.lr 0.013789598368895954 +391 12 negative_sampler.num_negs_per_pos 80.0 +391 12 training.batch_size 1.0 +391 13 model.embedding_dim 1.0 +391 13 model.c_min 0.06873886329451619 +391 13 model.c_max 9.675911248144844 +391 13 loss.margin 5.753724871760693 +391 13 optimizer.lr 0.0015095911448052462 +391 13 negative_sampler.num_negs_per_pos 80.0 +391 13 training.batch_size 2.0 +391 14 model.embedding_dim 1.0 +391 14 model.c_min 0.02094425990052524 +391 14 model.c_max 6.317289628264465 +391 14 loss.margin 1.7575497751559153 +391 14 optimizer.lr 0.001918541281161517 +391 14 negative_sampler.num_negs_per_pos 70.0 +391 14 training.batch_size 2.0 +391 15 model.embedding_dim 1.0 +391 15 model.c_min 0.08218089869869982 +391 15 model.c_max 3.943968786366747 +391 15 loss.margin 2.5286861251660264 +391 15 optimizer.lr 0.0010381122643987148 +391 15 negative_sampler.num_negs_per_pos 7.0 +391 15 training.batch_size 0.0 +391 16 model.embedding_dim 1.0 +391 16 model.c_min 0.025711353495156734 +391 16 model.c_max 8.481739831908085 +391 16 loss.margin 2.7407465600929686 +391 16 optimizer.lr 0.002587708060888346 +391 16 negative_sampler.num_negs_per_pos 54.0 +391 16 training.batch_size 0.0 +391 17 model.embedding_dim 1.0 +391 17 model.c_min 0.013051322322891103 +391 17 model.c_max 6.936290243519533 +391 17 loss.margin 7.967314596838615 +391 17 optimizer.lr 0.0010242828272715718 +391 17 negative_sampler.num_negs_per_pos 73.0 +391 17 training.batch_size 0.0 +391 1 dataset """wn18rr""" +391 1 model """kg2e""" +391 1 loss """marginranking""" +391 1 regularizer """no""" +391 1 optimizer """adam""" +391 1 training_loop """owa""" +391 1 negative_sampler """basic""" +391 1 evaluator """rankbased""" +391 2 dataset """wn18rr""" +391 2 model """kg2e""" +391 2 loss """marginranking""" +391 2 regularizer """no""" +391 2 optimizer """adam""" +391 2 training_loop """owa""" +391 2 negative_sampler """basic""" +391 2 evaluator """rankbased""" +391 3 dataset """wn18rr""" +391 3 model """kg2e""" +391 3 loss """marginranking""" +391 3 regularizer """no""" +391 3 optimizer """adam""" +391 3 training_loop """owa""" +391 3 negative_sampler """basic""" +391 3 evaluator """rankbased""" +391 4 dataset """wn18rr""" +391 4 model """kg2e""" +391 4 loss """marginranking""" +391 4 regularizer """no""" +391 4 optimizer """adam""" +391 4 training_loop """owa""" +391 4 negative_sampler """basic""" +391 4 evaluator """rankbased""" +391 5 dataset """wn18rr""" +391 5 model """kg2e""" +391 5 loss """marginranking""" +391 5 regularizer """no""" +391 5 optimizer """adam""" +391 5 training_loop """owa""" +391 5 negative_sampler """basic""" +391 5 evaluator """rankbased""" +391 6 dataset """wn18rr""" +391 6 model """kg2e""" +391 6 loss """marginranking""" +391 6 regularizer """no""" +391 6 optimizer """adam""" +391 6 training_loop """owa""" +391 6 negative_sampler """basic""" +391 6 evaluator """rankbased""" +391 7 dataset """wn18rr""" +391 7 model """kg2e""" +391 7 loss """marginranking""" +391 7 regularizer """no""" +391 7 optimizer """adam""" +391 7 training_loop """owa""" +391 7 negative_sampler """basic""" +391 7 evaluator """rankbased""" +391 8 dataset """wn18rr""" +391 8 model """kg2e""" +391 8 loss """marginranking""" +391 8 regularizer """no""" +391 8 optimizer """adam""" +391 8 training_loop """owa""" +391 8 negative_sampler """basic""" +391 8 evaluator """rankbased""" +391 9 dataset """wn18rr""" +391 9 model """kg2e""" +391 9 loss """marginranking""" +391 9 regularizer """no""" +391 9 optimizer """adam""" +391 9 training_loop """owa""" +391 9 negative_sampler """basic""" +391 9 evaluator """rankbased""" +391 10 dataset """wn18rr""" +391 10 model """kg2e""" +391 10 loss """marginranking""" +391 10 regularizer """no""" +391 10 optimizer """adam""" +391 10 training_loop """owa""" +391 10 negative_sampler """basic""" +391 10 evaluator """rankbased""" +391 11 dataset """wn18rr""" +391 11 model """kg2e""" +391 11 loss """marginranking""" +391 11 regularizer """no""" +391 11 optimizer """adam""" +391 11 training_loop """owa""" +391 11 negative_sampler """basic""" +391 11 evaluator """rankbased""" +391 12 dataset """wn18rr""" +391 12 model """kg2e""" +391 12 loss """marginranking""" +391 12 regularizer """no""" +391 12 optimizer """adam""" +391 12 training_loop """owa""" +391 12 negative_sampler """basic""" +391 12 evaluator """rankbased""" +391 13 dataset """wn18rr""" +391 13 model """kg2e""" +391 13 loss """marginranking""" +391 13 regularizer """no""" +391 13 optimizer """adam""" +391 13 training_loop """owa""" +391 13 negative_sampler """basic""" +391 13 evaluator """rankbased""" +391 14 dataset """wn18rr""" +391 14 model """kg2e""" +391 14 loss """marginranking""" +391 14 regularizer """no""" +391 14 optimizer """adam""" +391 14 training_loop """owa""" +391 14 negative_sampler """basic""" +391 14 evaluator """rankbased""" +391 15 dataset """wn18rr""" +391 15 model """kg2e""" +391 15 loss """marginranking""" +391 15 regularizer """no""" +391 15 optimizer """adam""" +391 15 training_loop """owa""" +391 15 negative_sampler """basic""" +391 15 evaluator """rankbased""" +391 16 dataset """wn18rr""" +391 16 model """kg2e""" +391 16 loss """marginranking""" +391 16 regularizer """no""" +391 16 optimizer """adam""" +391 16 training_loop """owa""" +391 16 negative_sampler """basic""" +391 16 evaluator """rankbased""" +391 17 dataset """wn18rr""" +391 17 model """kg2e""" +391 17 loss """marginranking""" +391 17 regularizer """no""" +391 17 optimizer """adam""" +391 17 training_loop """owa""" +391 17 negative_sampler """basic""" +391 17 evaluator """rankbased""" +392 1 model.embedding_dim 1.0 +392 1 model.c_min 0.039178338218364674 +392 1 model.c_max 9.714242108337828 +392 1 loss.margin 3.2923854647016526 +392 1 optimizer.lr 0.09153018179756943 +392 1 negative_sampler.num_negs_per_pos 52.0 +392 1 training.batch_size 2.0 +392 2 model.embedding_dim 2.0 +392 2 model.c_min 0.039994431091801536 +392 2 model.c_max 2.9272358015958795 +392 2 loss.margin 6.930519628413696 +392 2 optimizer.lr 0.0010168502914879602 +392 2 negative_sampler.num_negs_per_pos 49.0 +392 2 training.batch_size 0.0 +392 3 model.embedding_dim 2.0 +392 3 model.c_min 0.019673211483922665 +392 3 model.c_max 1.249515938704552 +392 3 loss.margin 9.66262056707634 +392 3 optimizer.lr 0.011351352116617846 +392 3 negative_sampler.num_negs_per_pos 98.0 +392 3 training.batch_size 1.0 +392 4 model.embedding_dim 2.0 +392 4 model.c_min 0.03204680239712188 +392 4 model.c_max 7.350457615394042 +392 4 loss.margin 3.7030503323634854 +392 4 optimizer.lr 0.027716579751275575 +392 4 negative_sampler.num_negs_per_pos 79.0 +392 4 training.batch_size 1.0 +392 5 model.embedding_dim 0.0 +392 5 model.c_min 0.04577883874965008 +392 5 model.c_max 8.566976074727576 +392 5 loss.margin 4.838898486538822 +392 5 optimizer.lr 0.016032019074017783 +392 5 negative_sampler.num_negs_per_pos 89.0 +392 5 training.batch_size 1.0 +392 6 model.embedding_dim 2.0 +392 6 model.c_min 0.04934916555366719 +392 6 model.c_max 3.778856287935387 +392 6 loss.margin 8.077721942782679 +392 6 optimizer.lr 0.003021733973888686 +392 6 negative_sampler.num_negs_per_pos 85.0 +392 6 training.batch_size 1.0 +392 7 model.embedding_dim 2.0 +392 7 model.c_min 0.09653994558594184 +392 7 model.c_max 7.720166683982391 +392 7 loss.margin 6.32611102150435 +392 7 optimizer.lr 0.002190766508753899 +392 7 negative_sampler.num_negs_per_pos 15.0 +392 7 training.batch_size 2.0 +392 8 model.embedding_dim 2.0 +392 8 model.c_min 0.01876267809936028 +392 8 model.c_max 2.8207970039310952 +392 8 loss.margin 0.6379959471663347 +392 8 optimizer.lr 0.03799276296881408 +392 8 negative_sampler.num_negs_per_pos 72.0 +392 8 training.batch_size 1.0 +392 9 model.embedding_dim 2.0 +392 9 model.c_min 0.021820544220665476 +392 9 model.c_max 5.14341205151014 +392 9 loss.margin 5.618083515377289 +392 9 optimizer.lr 0.0070077058257541885 +392 9 negative_sampler.num_negs_per_pos 78.0 +392 9 training.batch_size 0.0 +392 10 model.embedding_dim 2.0 +392 10 model.c_min 0.05687817691442407 +392 10 model.c_max 8.929105561487708 +392 10 loss.margin 0.9539820876012461 +392 10 optimizer.lr 0.0048849833535173715 +392 10 negative_sampler.num_negs_per_pos 10.0 +392 10 training.batch_size 2.0 +392 11 model.embedding_dim 1.0 +392 11 model.c_min 0.09760067378180234 +392 11 model.c_max 3.874606389112873 +392 11 loss.margin 8.713788501779716 +392 11 optimizer.lr 0.026897757337406156 +392 11 negative_sampler.num_negs_per_pos 28.0 +392 11 training.batch_size 2.0 +392 12 model.embedding_dim 0.0 +392 12 model.c_min 0.09875935778220256 +392 12 model.c_max 9.671238892364611 +392 12 loss.margin 6.792771312197068 +392 12 optimizer.lr 0.0020348377600422256 +392 12 negative_sampler.num_negs_per_pos 93.0 +392 12 training.batch_size 2.0 +392 13 model.embedding_dim 2.0 +392 13 model.c_min 0.06540049146158329 +392 13 model.c_max 9.506457316519931 +392 13 loss.margin 4.86528926240917 +392 13 optimizer.lr 0.07824593750361322 +392 13 negative_sampler.num_negs_per_pos 25.0 +392 13 training.batch_size 2.0 +392 14 model.embedding_dim 2.0 +392 14 model.c_min 0.06843000017047378 +392 14 model.c_max 7.574725325509215 +392 14 loss.margin 1.7405358932460873 +392 14 optimizer.lr 0.00627275248819716 +392 14 negative_sampler.num_negs_per_pos 12.0 +392 14 training.batch_size 2.0 +392 15 model.embedding_dim 1.0 +392 15 model.c_min 0.030507410581024824 +392 15 model.c_max 9.649331530182332 +392 15 loss.margin 7.637851202509676 +392 15 optimizer.lr 0.005757831930080351 +392 15 negative_sampler.num_negs_per_pos 37.0 +392 15 training.batch_size 2.0 +392 16 model.embedding_dim 1.0 +392 16 model.c_min 0.014528901427872552 +392 16 model.c_max 4.365397963131325 +392 16 loss.margin 4.8141006210018205 +392 16 optimizer.lr 0.002850433682641268 +392 16 negative_sampler.num_negs_per_pos 7.0 +392 16 training.batch_size 2.0 +392 17 model.embedding_dim 1.0 +392 17 model.c_min 0.08668709189526448 +392 17 model.c_max 4.148264260626726 +392 17 loss.margin 3.572158904542834 +392 17 optimizer.lr 0.005649892539677594 +392 17 negative_sampler.num_negs_per_pos 56.0 +392 17 training.batch_size 1.0 +392 18 model.embedding_dim 0.0 +392 18 model.c_min 0.052869217033592215 +392 18 model.c_max 5.25790765237468 +392 18 loss.margin 6.6774879497966655 +392 18 optimizer.lr 0.002668638171548994 +392 18 negative_sampler.num_negs_per_pos 44.0 +392 18 training.batch_size 2.0 +392 19 model.embedding_dim 0.0 +392 19 model.c_min 0.03373219906295037 +392 19 model.c_max 2.4851725750365126 +392 19 loss.margin 7.582568609074622 +392 19 optimizer.lr 0.01552933383798642 +392 19 negative_sampler.num_negs_per_pos 2.0 +392 19 training.batch_size 0.0 +392 20 model.embedding_dim 1.0 +392 20 model.c_min 0.07121956235647714 +392 20 model.c_max 8.52991365414424 +392 20 loss.margin 3.6076247803885666 +392 20 optimizer.lr 0.0014677707872606082 +392 20 negative_sampler.num_negs_per_pos 97.0 +392 20 training.batch_size 1.0 +392 21 model.embedding_dim 1.0 +392 21 model.c_min 0.09098627721906895 +392 21 model.c_max 2.558120118474154 +392 21 loss.margin 6.282536692604897 +392 21 optimizer.lr 0.001154888393666905 +392 21 negative_sampler.num_negs_per_pos 69.0 +392 21 training.batch_size 0.0 +392 22 model.embedding_dim 2.0 +392 22 model.c_min 0.02743325715544673 +392 22 model.c_max 9.592666039199074 +392 22 loss.margin 3.304558667929702 +392 22 optimizer.lr 0.048157692249175085 +392 22 negative_sampler.num_negs_per_pos 26.0 +392 22 training.batch_size 1.0 +392 23 model.embedding_dim 1.0 +392 23 model.c_min 0.015937080840350826 +392 23 model.c_max 9.60571944807133 +392 23 loss.margin 7.178921081444475 +392 23 optimizer.lr 0.005051825725425779 +392 23 negative_sampler.num_negs_per_pos 8.0 +392 23 training.batch_size 1.0 +392 24 model.embedding_dim 0.0 +392 24 model.c_min 0.045138509384032535 +392 24 model.c_max 9.136888359414023 +392 24 loss.margin 4.581367091828545 +392 24 optimizer.lr 0.012563697939332664 +392 24 negative_sampler.num_negs_per_pos 81.0 +392 24 training.batch_size 0.0 +392 25 model.embedding_dim 0.0 +392 25 model.c_min 0.013481431514069669 +392 25 model.c_max 3.2091674978623574 +392 25 loss.margin 7.340860113820976 +392 25 optimizer.lr 0.001835954235077859 +392 25 negative_sampler.num_negs_per_pos 47.0 +392 25 training.batch_size 1.0 +392 26 model.embedding_dim 0.0 +392 26 model.c_min 0.03509846378116869 +392 26 model.c_max 8.638810867693941 +392 26 loss.margin 0.5130088415140261 +392 26 optimizer.lr 0.047775983878431495 +392 26 negative_sampler.num_negs_per_pos 24.0 +392 26 training.batch_size 2.0 +392 27 model.embedding_dim 2.0 +392 27 model.c_min 0.022029672371364214 +392 27 model.c_max 4.4115636902783875 +392 27 loss.margin 3.9192132824220423 +392 27 optimizer.lr 0.010548418450453408 +392 27 negative_sampler.num_negs_per_pos 78.0 +392 27 training.batch_size 2.0 +392 28 model.embedding_dim 0.0 +392 28 model.c_min 0.017281171908678762 +392 28 model.c_max 5.383713231731823 +392 28 loss.margin 6.565628205357155 +392 28 optimizer.lr 0.006216698590786469 +392 28 negative_sampler.num_negs_per_pos 55.0 +392 28 training.batch_size 1.0 +392 29 model.embedding_dim 1.0 +392 29 model.c_min 0.0349034882064788 +392 29 model.c_max 6.870132656434553 +392 29 loss.margin 2.212438725529693 +392 29 optimizer.lr 0.027987186809227352 +392 29 negative_sampler.num_negs_per_pos 44.0 +392 29 training.batch_size 1.0 +392 30 model.embedding_dim 1.0 +392 30 model.c_min 0.04833591856796104 +392 30 model.c_max 2.53458025015962 +392 30 loss.margin 7.724117596609461 +392 30 optimizer.lr 0.0030287571225643766 +392 30 negative_sampler.num_negs_per_pos 94.0 +392 30 training.batch_size 1.0 +392 31 model.embedding_dim 1.0 +392 31 model.c_min 0.02359898187645525 +392 31 model.c_max 1.4165945635212263 +392 31 loss.margin 8.334784123693073 +392 31 optimizer.lr 0.0019256444517017593 +392 31 negative_sampler.num_negs_per_pos 23.0 +392 31 training.batch_size 2.0 +392 32 model.embedding_dim 0.0 +392 32 model.c_min 0.023743680616944703 +392 32 model.c_max 5.78170967041805 +392 32 loss.margin 6.547478883220458 +392 32 optimizer.lr 0.006274032908492347 +392 32 negative_sampler.num_negs_per_pos 14.0 +392 32 training.batch_size 0.0 +392 33 model.embedding_dim 2.0 +392 33 model.c_min 0.02488390005379165 +392 33 model.c_max 2.4733539209986146 +392 33 loss.margin 5.363905795950035 +392 33 optimizer.lr 0.002140100909706222 +392 33 negative_sampler.num_negs_per_pos 80.0 +392 33 training.batch_size 1.0 +392 1 dataset """wn18rr""" +392 1 model """kg2e""" +392 1 loss """marginranking""" +392 1 regularizer """no""" +392 1 optimizer """adam""" +392 1 training_loop """owa""" +392 1 negative_sampler """basic""" +392 1 evaluator """rankbased""" +392 2 dataset """wn18rr""" +392 2 model """kg2e""" +392 2 loss """marginranking""" +392 2 regularizer """no""" +392 2 optimizer """adam""" +392 2 training_loop """owa""" +392 2 negative_sampler """basic""" +392 2 evaluator """rankbased""" +392 3 dataset """wn18rr""" +392 3 model """kg2e""" +392 3 loss """marginranking""" +392 3 regularizer """no""" +392 3 optimizer """adam""" +392 3 training_loop """owa""" +392 3 negative_sampler """basic""" +392 3 evaluator """rankbased""" +392 4 dataset """wn18rr""" +392 4 model """kg2e""" +392 4 loss """marginranking""" +392 4 regularizer """no""" +392 4 optimizer """adam""" +392 4 training_loop """owa""" +392 4 negative_sampler """basic""" +392 4 evaluator """rankbased""" +392 5 dataset """wn18rr""" +392 5 model """kg2e""" +392 5 loss """marginranking""" +392 5 regularizer """no""" +392 5 optimizer """adam""" +392 5 training_loop """owa""" +392 5 negative_sampler """basic""" +392 5 evaluator """rankbased""" +392 6 dataset """wn18rr""" +392 6 model """kg2e""" +392 6 loss """marginranking""" +392 6 regularizer """no""" +392 6 optimizer """adam""" +392 6 training_loop """owa""" +392 6 negative_sampler """basic""" +392 6 evaluator """rankbased""" +392 7 dataset """wn18rr""" +392 7 model """kg2e""" +392 7 loss """marginranking""" +392 7 regularizer """no""" +392 7 optimizer """adam""" +392 7 training_loop """owa""" +392 7 negative_sampler """basic""" +392 7 evaluator """rankbased""" +392 8 dataset """wn18rr""" +392 8 model """kg2e""" +392 8 loss """marginranking""" +392 8 regularizer """no""" +392 8 optimizer """adam""" +392 8 training_loop """owa""" +392 8 negative_sampler """basic""" +392 8 evaluator """rankbased""" +392 9 dataset """wn18rr""" +392 9 model """kg2e""" +392 9 loss """marginranking""" +392 9 regularizer """no""" +392 9 optimizer """adam""" +392 9 training_loop """owa""" +392 9 negative_sampler """basic""" +392 9 evaluator """rankbased""" +392 10 dataset """wn18rr""" +392 10 model """kg2e""" +392 10 loss """marginranking""" +392 10 regularizer """no""" +392 10 optimizer """adam""" +392 10 training_loop """owa""" +392 10 negative_sampler """basic""" +392 10 evaluator """rankbased""" +392 11 dataset """wn18rr""" +392 11 model """kg2e""" +392 11 loss """marginranking""" +392 11 regularizer """no""" +392 11 optimizer """adam""" +392 11 training_loop """owa""" +392 11 negative_sampler """basic""" +392 11 evaluator """rankbased""" +392 12 dataset """wn18rr""" +392 12 model """kg2e""" +392 12 loss """marginranking""" +392 12 regularizer """no""" +392 12 optimizer """adam""" +392 12 training_loop """owa""" +392 12 negative_sampler """basic""" +392 12 evaluator """rankbased""" +392 13 dataset """wn18rr""" +392 13 model """kg2e""" +392 13 loss """marginranking""" +392 13 regularizer """no""" +392 13 optimizer """adam""" +392 13 training_loop """owa""" +392 13 negative_sampler """basic""" +392 13 evaluator """rankbased""" +392 14 dataset """wn18rr""" +392 14 model """kg2e""" +392 14 loss """marginranking""" +392 14 regularizer """no""" +392 14 optimizer """adam""" +392 14 training_loop """owa""" +392 14 negative_sampler """basic""" +392 14 evaluator """rankbased""" +392 15 dataset """wn18rr""" +392 15 model """kg2e""" +392 15 loss """marginranking""" +392 15 regularizer """no""" +392 15 optimizer """adam""" +392 15 training_loop """owa""" +392 15 negative_sampler """basic""" +392 15 evaluator """rankbased""" +392 16 dataset """wn18rr""" +392 16 model """kg2e""" +392 16 loss """marginranking""" +392 16 regularizer """no""" +392 16 optimizer """adam""" +392 16 training_loop """owa""" +392 16 negative_sampler """basic""" +392 16 evaluator """rankbased""" +392 17 dataset """wn18rr""" +392 17 model """kg2e""" +392 17 loss """marginranking""" +392 17 regularizer """no""" +392 17 optimizer """adam""" +392 17 training_loop """owa""" +392 17 negative_sampler """basic""" +392 17 evaluator """rankbased""" +392 18 dataset """wn18rr""" +392 18 model """kg2e""" +392 18 loss """marginranking""" +392 18 regularizer """no""" +392 18 optimizer """adam""" +392 18 training_loop """owa""" +392 18 negative_sampler """basic""" +392 18 evaluator """rankbased""" +392 19 dataset """wn18rr""" +392 19 model """kg2e""" +392 19 loss """marginranking""" +392 19 regularizer """no""" +392 19 optimizer """adam""" +392 19 training_loop """owa""" +392 19 negative_sampler """basic""" +392 19 evaluator """rankbased""" +392 20 dataset """wn18rr""" +392 20 model """kg2e""" +392 20 loss """marginranking""" +392 20 regularizer """no""" +392 20 optimizer """adam""" +392 20 training_loop """owa""" +392 20 negative_sampler """basic""" +392 20 evaluator """rankbased""" +392 21 dataset """wn18rr""" +392 21 model """kg2e""" +392 21 loss """marginranking""" +392 21 regularizer """no""" +392 21 optimizer """adam""" +392 21 training_loop """owa""" +392 21 negative_sampler """basic""" +392 21 evaluator """rankbased""" +392 22 dataset """wn18rr""" +392 22 model """kg2e""" +392 22 loss """marginranking""" +392 22 regularizer """no""" +392 22 optimizer """adam""" +392 22 training_loop """owa""" +392 22 negative_sampler """basic""" +392 22 evaluator """rankbased""" +392 23 dataset """wn18rr""" +392 23 model """kg2e""" +392 23 loss """marginranking""" +392 23 regularizer """no""" +392 23 optimizer """adam""" +392 23 training_loop """owa""" +392 23 negative_sampler """basic""" +392 23 evaluator """rankbased""" +392 24 dataset """wn18rr""" +392 24 model """kg2e""" +392 24 loss """marginranking""" +392 24 regularizer """no""" +392 24 optimizer """adam""" +392 24 training_loop """owa""" +392 24 negative_sampler """basic""" +392 24 evaluator """rankbased""" +392 25 dataset """wn18rr""" +392 25 model """kg2e""" +392 25 loss """marginranking""" +392 25 regularizer """no""" +392 25 optimizer """adam""" +392 25 training_loop """owa""" +392 25 negative_sampler """basic""" +392 25 evaluator """rankbased""" +392 26 dataset """wn18rr""" +392 26 model """kg2e""" +392 26 loss """marginranking""" +392 26 regularizer """no""" +392 26 optimizer """adam""" +392 26 training_loop """owa""" +392 26 negative_sampler """basic""" +392 26 evaluator """rankbased""" +392 27 dataset """wn18rr""" +392 27 model """kg2e""" +392 27 loss """marginranking""" +392 27 regularizer """no""" +392 27 optimizer """adam""" +392 27 training_loop """owa""" +392 27 negative_sampler """basic""" +392 27 evaluator """rankbased""" +392 28 dataset """wn18rr""" +392 28 model """kg2e""" +392 28 loss """marginranking""" +392 28 regularizer """no""" +392 28 optimizer """adam""" +392 28 training_loop """owa""" +392 28 negative_sampler """basic""" +392 28 evaluator """rankbased""" +392 29 dataset """wn18rr""" +392 29 model """kg2e""" +392 29 loss """marginranking""" +392 29 regularizer """no""" +392 29 optimizer """adam""" +392 29 training_loop """owa""" +392 29 negative_sampler """basic""" +392 29 evaluator """rankbased""" +392 30 dataset """wn18rr""" +392 30 model """kg2e""" +392 30 loss """marginranking""" +392 30 regularizer """no""" +392 30 optimizer """adam""" +392 30 training_loop """owa""" +392 30 negative_sampler """basic""" +392 30 evaluator """rankbased""" +392 31 dataset """wn18rr""" +392 31 model """kg2e""" +392 31 loss """marginranking""" +392 31 regularizer """no""" +392 31 optimizer """adam""" +392 31 training_loop """owa""" +392 31 negative_sampler """basic""" +392 31 evaluator """rankbased""" +392 32 dataset """wn18rr""" +392 32 model """kg2e""" +392 32 loss """marginranking""" +392 32 regularizer """no""" +392 32 optimizer """adam""" +392 32 training_loop """owa""" +392 32 negative_sampler """basic""" +392 32 evaluator """rankbased""" +392 33 dataset """wn18rr""" +392 33 model """kg2e""" +392 33 loss """marginranking""" +392 33 regularizer """no""" +392 33 optimizer """adam""" +392 33 training_loop """owa""" +392 33 negative_sampler """basic""" +392 33 evaluator """rankbased""" +393 1 model.embedding_dim 0.0 +393 1 model.c_min 0.036838738549206 +393 1 model.c_max 6.1468758464054964 +393 1 optimizer.lr 0.003099043987747482 +393 1 negative_sampler.num_negs_per_pos 16.0 +393 1 training.batch_size 1.0 +393 2 model.embedding_dim 2.0 +393 2 model.c_min 0.013245256990054749 +393 2 model.c_max 4.416584891877703 +393 2 optimizer.lr 0.003188240342472937 +393 2 negative_sampler.num_negs_per_pos 83.0 +393 2 training.batch_size 2.0 +393 3 model.embedding_dim 2.0 +393 3 model.c_min 0.013813485874214503 +393 3 model.c_max 7.34776351763211 +393 3 optimizer.lr 0.0010440657274516094 +393 3 negative_sampler.num_negs_per_pos 48.0 +393 3 training.batch_size 0.0 +393 4 model.embedding_dim 1.0 +393 4 model.c_min 0.02054877771549069 +393 4 model.c_max 4.147963019630697 +393 4 optimizer.lr 0.08149926619867108 +393 4 negative_sampler.num_negs_per_pos 0.0 +393 4 training.batch_size 2.0 +393 5 model.embedding_dim 0.0 +393 5 model.c_min 0.015324718540392667 +393 5 model.c_max 1.4795355434324031 +393 5 optimizer.lr 0.012461806391935 +393 5 negative_sampler.num_negs_per_pos 6.0 +393 5 training.batch_size 2.0 +393 6 model.embedding_dim 1.0 +393 6 model.c_min 0.011724239834975453 +393 6 model.c_max 5.584687285736932 +393 6 optimizer.lr 0.05256837637153486 +393 6 negative_sampler.num_negs_per_pos 96.0 +393 6 training.batch_size 1.0 +393 7 model.embedding_dim 2.0 +393 7 model.c_min 0.017831265421689252 +393 7 model.c_max 5.738017490031255 +393 7 optimizer.lr 0.004142434117267497 +393 7 negative_sampler.num_negs_per_pos 92.0 +393 7 training.batch_size 2.0 +393 8 model.embedding_dim 2.0 +393 8 model.c_min 0.02688151193775083 +393 8 model.c_max 4.08166578300699 +393 8 optimizer.lr 0.0031786854003079392 +393 8 negative_sampler.num_negs_per_pos 61.0 +393 8 training.batch_size 1.0 +393 9 model.embedding_dim 1.0 +393 9 model.c_min 0.029229007097067952 +393 9 model.c_max 5.228535160484554 +393 9 optimizer.lr 0.0015145345079356676 +393 9 negative_sampler.num_negs_per_pos 58.0 +393 9 training.batch_size 1.0 +393 10 model.embedding_dim 0.0 +393 10 model.c_min 0.042678564828181255 +393 10 model.c_max 9.29077662336856 +393 10 optimizer.lr 0.0021185662103781664 +393 10 negative_sampler.num_negs_per_pos 75.0 +393 10 training.batch_size 1.0 +393 11 model.embedding_dim 2.0 +393 11 model.c_min 0.025633833140200544 +393 11 model.c_max 5.8333064625708495 +393 11 optimizer.lr 0.021630877510673927 +393 11 negative_sampler.num_negs_per_pos 44.0 +393 11 training.batch_size 0.0 +393 12 model.embedding_dim 2.0 +393 12 model.c_min 0.028672357255525797 +393 12 model.c_max 1.3729832204783246 +393 12 optimizer.lr 0.0018274683558911421 +393 12 negative_sampler.num_negs_per_pos 53.0 +393 12 training.batch_size 1.0 +393 13 model.embedding_dim 2.0 +393 13 model.c_min 0.02380753187230652 +393 13 model.c_max 2.0408102204599565 +393 13 optimizer.lr 0.0023616313984642645 +393 13 negative_sampler.num_negs_per_pos 34.0 +393 13 training.batch_size 2.0 +393 14 model.embedding_dim 1.0 +393 14 model.c_min 0.013874321626140176 +393 14 model.c_max 9.723306902947161 +393 14 optimizer.lr 0.057934541186823035 +393 14 negative_sampler.num_negs_per_pos 55.0 +393 14 training.batch_size 0.0 +393 15 model.embedding_dim 0.0 +393 15 model.c_min 0.02930285018870967 +393 15 model.c_max 6.597510747830924 +393 15 optimizer.lr 0.07907511035653846 +393 15 negative_sampler.num_negs_per_pos 11.0 +393 15 training.batch_size 2.0 +393 16 model.embedding_dim 1.0 +393 16 model.c_min 0.017714332606406535 +393 16 model.c_max 9.583158695080845 +393 16 optimizer.lr 0.005201550070153043 +393 16 negative_sampler.num_negs_per_pos 98.0 +393 16 training.batch_size 0.0 +393 17 model.embedding_dim 0.0 +393 17 model.c_min 0.010733235573178387 +393 17 model.c_max 4.674949173216688 +393 17 optimizer.lr 0.001687747697443609 +393 17 negative_sampler.num_negs_per_pos 96.0 +393 17 training.batch_size 0.0 +393 18 model.embedding_dim 1.0 +393 18 model.c_min 0.015331254695122452 +393 18 model.c_max 5.40406840478396 +393 18 optimizer.lr 0.0010231713290131921 +393 18 negative_sampler.num_negs_per_pos 31.0 +393 18 training.batch_size 1.0 +393 19 model.embedding_dim 2.0 +393 19 model.c_min 0.011233659540575503 +393 19 model.c_max 4.870579658514085 +393 19 optimizer.lr 0.05066039419577593 +393 19 negative_sampler.num_negs_per_pos 47.0 +393 19 training.batch_size 1.0 +393 20 model.embedding_dim 0.0 +393 20 model.c_min 0.026640578471055745 +393 20 model.c_max 9.064501895689796 +393 20 optimizer.lr 0.0012631117852503876 +393 20 negative_sampler.num_negs_per_pos 34.0 +393 20 training.batch_size 1.0 +393 21 model.embedding_dim 0.0 +393 21 model.c_min 0.03832025372871045 +393 21 model.c_max 6.47974616272258 +393 21 optimizer.lr 0.049393956879910585 +393 21 negative_sampler.num_negs_per_pos 75.0 +393 21 training.batch_size 2.0 +393 22 model.embedding_dim 0.0 +393 22 model.c_min 0.08194836545035565 +393 22 model.c_max 8.801253411781738 +393 22 optimizer.lr 0.0014748979396399838 +393 22 negative_sampler.num_negs_per_pos 76.0 +393 22 training.batch_size 2.0 +393 23 model.embedding_dim 2.0 +393 23 model.c_min 0.023235098803632986 +393 23 model.c_max 9.495769741249937 +393 23 optimizer.lr 0.0027952979569878128 +393 23 negative_sampler.num_negs_per_pos 45.0 +393 23 training.batch_size 0.0 +393 24 model.embedding_dim 2.0 +393 24 model.c_min 0.025171101421510664 +393 24 model.c_max 1.9591325663197443 +393 24 optimizer.lr 0.005588086234087374 +393 24 negative_sampler.num_negs_per_pos 38.0 +393 24 training.batch_size 2.0 +393 25 model.embedding_dim 0.0 +393 25 model.c_min 0.012569174079701626 +393 25 model.c_max 5.088576069530217 +393 25 optimizer.lr 0.03141964022025931 +393 25 negative_sampler.num_negs_per_pos 72.0 +393 25 training.batch_size 2.0 +393 26 model.embedding_dim 2.0 +393 26 model.c_min 0.013537470376023217 +393 26 model.c_max 7.948158663272317 +393 26 optimizer.lr 0.008405912198874934 +393 26 negative_sampler.num_negs_per_pos 77.0 +393 26 training.batch_size 1.0 +393 27 model.embedding_dim 0.0 +393 27 model.c_min 0.01871232173043746 +393 27 model.c_max 7.273919822264816 +393 27 optimizer.lr 0.04252481448274641 +393 27 negative_sampler.num_negs_per_pos 24.0 +393 27 training.batch_size 1.0 +393 28 model.embedding_dim 2.0 +393 28 model.c_min 0.0724556420522362 +393 28 model.c_max 3.931309727020185 +393 28 optimizer.lr 0.06420548515904077 +393 28 negative_sampler.num_negs_per_pos 53.0 +393 28 training.batch_size 0.0 +393 29 model.embedding_dim 1.0 +393 29 model.c_min 0.09101873015521023 +393 29 model.c_max 4.575837144585574 +393 29 optimizer.lr 0.014519617494906528 +393 29 negative_sampler.num_negs_per_pos 92.0 +393 29 training.batch_size 1.0 +393 30 model.embedding_dim 1.0 +393 30 model.c_min 0.0791804298719494 +393 30 model.c_max 4.433970158379103 +393 30 optimizer.lr 0.011473892861631502 +393 30 negative_sampler.num_negs_per_pos 10.0 +393 30 training.batch_size 1.0 +393 31 model.embedding_dim 2.0 +393 31 model.c_min 0.015242474512220824 +393 31 model.c_max 2.1461510841552163 +393 31 optimizer.lr 0.002129311544209092 +393 31 negative_sampler.num_negs_per_pos 3.0 +393 31 training.batch_size 1.0 +393 32 model.embedding_dim 0.0 +393 32 model.c_min 0.013651798160590964 +393 32 model.c_max 6.6808425283823905 +393 32 optimizer.lr 0.06951121949745709 +393 32 negative_sampler.num_negs_per_pos 40.0 +393 32 training.batch_size 1.0 +393 33 model.embedding_dim 0.0 +393 33 model.c_min 0.07096062740531223 +393 33 model.c_max 1.92125081325921 +393 33 optimizer.lr 0.004905065034127872 +393 33 negative_sampler.num_negs_per_pos 75.0 +393 33 training.batch_size 0.0 +393 34 model.embedding_dim 2.0 +393 34 model.c_min 0.04000195928391946 +393 34 model.c_max 5.2825147514086375 +393 34 optimizer.lr 0.0054173881829837804 +393 34 negative_sampler.num_negs_per_pos 49.0 +393 34 training.batch_size 1.0 +393 35 model.embedding_dim 2.0 +393 35 model.c_min 0.05715978605090039 +393 35 model.c_max 9.177193945206513 +393 35 optimizer.lr 0.0011557724124236436 +393 35 negative_sampler.num_negs_per_pos 28.0 +393 35 training.batch_size 1.0 +393 36 model.embedding_dim 2.0 +393 36 model.c_min 0.04033819418154555 +393 36 model.c_max 6.587221990922465 +393 36 optimizer.lr 0.001886791025722509 +393 36 negative_sampler.num_negs_per_pos 26.0 +393 36 training.batch_size 1.0 +393 37 model.embedding_dim 2.0 +393 37 model.c_min 0.031945444118423504 +393 37 model.c_max 3.608113462330552 +393 37 optimizer.lr 0.003213518755696061 +393 37 negative_sampler.num_negs_per_pos 67.0 +393 37 training.batch_size 1.0 +393 38 model.embedding_dim 0.0 +393 38 model.c_min 0.08360947722955824 +393 38 model.c_max 2.9234271335160336 +393 38 optimizer.lr 0.003912628743934795 +393 38 negative_sampler.num_negs_per_pos 33.0 +393 38 training.batch_size 2.0 +393 39 model.embedding_dim 2.0 +393 39 model.c_min 0.016096399546161683 +393 39 model.c_max 9.546800486106363 +393 39 optimizer.lr 0.044797206508902586 +393 39 negative_sampler.num_negs_per_pos 92.0 +393 39 training.batch_size 2.0 +393 40 model.embedding_dim 0.0 +393 40 model.c_min 0.019762893744723203 +393 40 model.c_max 8.234184921717233 +393 40 optimizer.lr 0.033307115293496926 +393 40 negative_sampler.num_negs_per_pos 51.0 +393 40 training.batch_size 0.0 +393 41 model.embedding_dim 2.0 +393 41 model.c_min 0.0314416413611694 +393 41 model.c_max 3.270652181535395 +393 41 optimizer.lr 0.07635421980273258 +393 41 negative_sampler.num_negs_per_pos 95.0 +393 41 training.batch_size 2.0 +393 42 model.embedding_dim 0.0 +393 42 model.c_min 0.013393734675868817 +393 42 model.c_max 2.149939550668032 +393 42 optimizer.lr 0.0020643628294087875 +393 42 negative_sampler.num_negs_per_pos 29.0 +393 42 training.batch_size 2.0 +393 43 model.embedding_dim 1.0 +393 43 model.c_min 0.06577652195505233 +393 43 model.c_max 3.0186451849288343 +393 43 optimizer.lr 0.06865185043979653 +393 43 negative_sampler.num_negs_per_pos 88.0 +393 43 training.batch_size 1.0 +393 44 model.embedding_dim 0.0 +393 44 model.c_min 0.019183006119131037 +393 44 model.c_max 6.342552918668987 +393 44 optimizer.lr 0.00405854181288974 +393 44 negative_sampler.num_negs_per_pos 92.0 +393 44 training.batch_size 2.0 +393 45 model.embedding_dim 0.0 +393 45 model.c_min 0.044131870412611164 +393 45 model.c_max 3.148787869023968 +393 45 optimizer.lr 0.06960961606453638 +393 45 negative_sampler.num_negs_per_pos 37.0 +393 45 training.batch_size 0.0 +393 46 model.embedding_dim 2.0 +393 46 model.c_min 0.014243511796454694 +393 46 model.c_max 9.167824049230477 +393 46 optimizer.lr 0.06514598236352395 +393 46 negative_sampler.num_negs_per_pos 58.0 +393 46 training.batch_size 2.0 +393 47 model.embedding_dim 2.0 +393 47 model.c_min 0.01105559287373303 +393 47 model.c_max 2.314823598378247 +393 47 optimizer.lr 0.014284097549550618 +393 47 negative_sampler.num_negs_per_pos 72.0 +393 47 training.batch_size 0.0 +393 48 model.embedding_dim 0.0 +393 48 model.c_min 0.0314606145446627 +393 48 model.c_max 3.455562983563543 +393 48 optimizer.lr 0.05941821005045349 +393 48 negative_sampler.num_negs_per_pos 40.0 +393 48 training.batch_size 1.0 +393 49 model.embedding_dim 2.0 +393 49 model.c_min 0.05660230522892395 +393 49 model.c_max 3.539564411590919 +393 49 optimizer.lr 0.001105650093561273 +393 49 negative_sampler.num_negs_per_pos 75.0 +393 49 training.batch_size 0.0 +393 50 model.embedding_dim 2.0 +393 50 model.c_min 0.011745720590981562 +393 50 model.c_max 8.411846873554424 +393 50 optimizer.lr 0.0031184289102232617 +393 50 negative_sampler.num_negs_per_pos 26.0 +393 50 training.batch_size 0.0 +393 51 model.embedding_dim 1.0 +393 51 model.c_min 0.0181873877941085 +393 51 model.c_max 2.2273997617878734 +393 51 optimizer.lr 0.0034530442322086652 +393 51 negative_sampler.num_negs_per_pos 14.0 +393 51 training.batch_size 0.0 +393 52 model.embedding_dim 0.0 +393 52 model.c_min 0.04673124930025878 +393 52 model.c_max 9.32519330868744 +393 52 optimizer.lr 0.002897710120643285 +393 52 negative_sampler.num_negs_per_pos 48.0 +393 52 training.batch_size 1.0 +393 53 model.embedding_dim 2.0 +393 53 model.c_min 0.06171025742428335 +393 53 model.c_max 5.055103849029444 +393 53 optimizer.lr 0.01573172215643768 +393 53 negative_sampler.num_negs_per_pos 6.0 +393 53 training.batch_size 1.0 +393 54 model.embedding_dim 1.0 +393 54 model.c_min 0.01790336932705674 +393 54 model.c_max 4.297037714917481 +393 54 optimizer.lr 0.049269885487996615 +393 54 negative_sampler.num_negs_per_pos 51.0 +393 54 training.batch_size 2.0 +393 55 model.embedding_dim 1.0 +393 55 model.c_min 0.04485584441006778 +393 55 model.c_max 3.922311208916513 +393 55 optimizer.lr 0.012338958132834124 +393 55 negative_sampler.num_negs_per_pos 99.0 +393 55 training.batch_size 2.0 +393 56 model.embedding_dim 1.0 +393 56 model.c_min 0.017632284062669187 +393 56 model.c_max 6.268359791505287 +393 56 optimizer.lr 0.06969559002129908 +393 56 negative_sampler.num_negs_per_pos 80.0 +393 56 training.batch_size 1.0 +393 57 model.embedding_dim 1.0 +393 57 model.c_min 0.09532120714092694 +393 57 model.c_max 9.810014492029305 +393 57 optimizer.lr 0.007060163146521252 +393 57 negative_sampler.num_negs_per_pos 93.0 +393 57 training.batch_size 2.0 +393 58 model.embedding_dim 1.0 +393 58 model.c_min 0.08169539296128679 +393 58 model.c_max 8.79163750712119 +393 58 optimizer.lr 0.0038733536319082783 +393 58 negative_sampler.num_negs_per_pos 33.0 +393 58 training.batch_size 1.0 +393 59 model.embedding_dim 0.0 +393 59 model.c_min 0.022389527156753013 +393 59 model.c_max 6.908374701671948 +393 59 optimizer.lr 0.00259114644325196 +393 59 negative_sampler.num_negs_per_pos 78.0 +393 59 training.batch_size 1.0 +393 60 model.embedding_dim 1.0 +393 60 model.c_min 0.06323079628090561 +393 60 model.c_max 6.301403680179448 +393 60 optimizer.lr 0.013870878122864205 +393 60 negative_sampler.num_negs_per_pos 50.0 +393 60 training.batch_size 0.0 +393 61 model.embedding_dim 1.0 +393 61 model.c_min 0.08150130261198907 +393 61 model.c_max 9.552811519417203 +393 61 optimizer.lr 0.012720032402620483 +393 61 negative_sampler.num_negs_per_pos 89.0 +393 61 training.batch_size 0.0 +393 62 model.embedding_dim 2.0 +393 62 model.c_min 0.04782191839657304 +393 62 model.c_max 8.654598918477813 +393 62 optimizer.lr 0.0020119890504029076 +393 62 negative_sampler.num_negs_per_pos 47.0 +393 62 training.batch_size 1.0 +393 1 dataset """wn18rr""" +393 1 model """kg2e""" +393 1 loss """bceaftersigmoid""" +393 1 regularizer """no""" +393 1 optimizer """adam""" +393 1 training_loop """owa""" +393 1 negative_sampler """basic""" +393 1 evaluator """rankbased""" +393 2 dataset """wn18rr""" +393 2 model """kg2e""" +393 2 loss """bceaftersigmoid""" +393 2 regularizer """no""" +393 2 optimizer """adam""" +393 2 training_loop """owa""" +393 2 negative_sampler """basic""" +393 2 evaluator """rankbased""" +393 3 dataset """wn18rr""" +393 3 model """kg2e""" +393 3 loss """bceaftersigmoid""" +393 3 regularizer """no""" +393 3 optimizer """adam""" +393 3 training_loop """owa""" +393 3 negative_sampler """basic""" +393 3 evaluator """rankbased""" +393 4 dataset """wn18rr""" +393 4 model """kg2e""" +393 4 loss """bceaftersigmoid""" +393 4 regularizer """no""" +393 4 optimizer """adam""" +393 4 training_loop """owa""" +393 4 negative_sampler """basic""" +393 4 evaluator """rankbased""" +393 5 dataset """wn18rr""" +393 5 model """kg2e""" +393 5 loss """bceaftersigmoid""" +393 5 regularizer """no""" +393 5 optimizer """adam""" +393 5 training_loop """owa""" +393 5 negative_sampler """basic""" +393 5 evaluator """rankbased""" +393 6 dataset """wn18rr""" +393 6 model """kg2e""" +393 6 loss """bceaftersigmoid""" +393 6 regularizer """no""" +393 6 optimizer """adam""" +393 6 training_loop """owa""" +393 6 negative_sampler """basic""" +393 6 evaluator """rankbased""" +393 7 dataset """wn18rr""" +393 7 model """kg2e""" +393 7 loss """bceaftersigmoid""" +393 7 regularizer """no""" +393 7 optimizer """adam""" +393 7 training_loop """owa""" +393 7 negative_sampler """basic""" +393 7 evaluator """rankbased""" +393 8 dataset """wn18rr""" +393 8 model """kg2e""" +393 8 loss """bceaftersigmoid""" +393 8 regularizer """no""" +393 8 optimizer """adam""" +393 8 training_loop """owa""" +393 8 negative_sampler """basic""" +393 8 evaluator """rankbased""" +393 9 dataset """wn18rr""" +393 9 model """kg2e""" +393 9 loss """bceaftersigmoid""" +393 9 regularizer """no""" +393 9 optimizer """adam""" +393 9 training_loop """owa""" +393 9 negative_sampler """basic""" +393 9 evaluator """rankbased""" +393 10 dataset """wn18rr""" +393 10 model """kg2e""" +393 10 loss """bceaftersigmoid""" +393 10 regularizer """no""" +393 10 optimizer """adam""" +393 10 training_loop """owa""" +393 10 negative_sampler """basic""" +393 10 evaluator """rankbased""" +393 11 dataset """wn18rr""" +393 11 model """kg2e""" +393 11 loss """bceaftersigmoid""" +393 11 regularizer """no""" +393 11 optimizer """adam""" +393 11 training_loop """owa""" +393 11 negative_sampler """basic""" +393 11 evaluator """rankbased""" +393 12 dataset """wn18rr""" +393 12 model """kg2e""" +393 12 loss """bceaftersigmoid""" +393 12 regularizer """no""" +393 12 optimizer """adam""" +393 12 training_loop """owa""" +393 12 negative_sampler """basic""" +393 12 evaluator """rankbased""" +393 13 dataset """wn18rr""" +393 13 model """kg2e""" +393 13 loss """bceaftersigmoid""" +393 13 regularizer """no""" +393 13 optimizer """adam""" +393 13 training_loop """owa""" +393 13 negative_sampler """basic""" +393 13 evaluator """rankbased""" +393 14 dataset """wn18rr""" +393 14 model """kg2e""" +393 14 loss """bceaftersigmoid""" +393 14 regularizer """no""" +393 14 optimizer """adam""" +393 14 training_loop """owa""" +393 14 negative_sampler """basic""" +393 14 evaluator """rankbased""" +393 15 dataset """wn18rr""" +393 15 model """kg2e""" +393 15 loss """bceaftersigmoid""" +393 15 regularizer """no""" +393 15 optimizer """adam""" +393 15 training_loop """owa""" +393 15 negative_sampler """basic""" +393 15 evaluator """rankbased""" +393 16 dataset """wn18rr""" +393 16 model """kg2e""" +393 16 loss """bceaftersigmoid""" +393 16 regularizer """no""" +393 16 optimizer """adam""" +393 16 training_loop """owa""" +393 16 negative_sampler """basic""" +393 16 evaluator """rankbased""" +393 17 dataset """wn18rr""" +393 17 model """kg2e""" +393 17 loss """bceaftersigmoid""" +393 17 regularizer """no""" +393 17 optimizer """adam""" +393 17 training_loop """owa""" +393 17 negative_sampler """basic""" +393 17 evaluator """rankbased""" +393 18 dataset """wn18rr""" +393 18 model """kg2e""" +393 18 loss """bceaftersigmoid""" +393 18 regularizer """no""" +393 18 optimizer """adam""" +393 18 training_loop """owa""" +393 18 negative_sampler """basic""" +393 18 evaluator """rankbased""" +393 19 dataset """wn18rr""" +393 19 model """kg2e""" +393 19 loss """bceaftersigmoid""" +393 19 regularizer """no""" +393 19 optimizer """adam""" +393 19 training_loop """owa""" +393 19 negative_sampler """basic""" +393 19 evaluator """rankbased""" +393 20 dataset """wn18rr""" +393 20 model """kg2e""" +393 20 loss """bceaftersigmoid""" +393 20 regularizer """no""" +393 20 optimizer """adam""" +393 20 training_loop """owa""" +393 20 negative_sampler """basic""" +393 20 evaluator """rankbased""" +393 21 dataset """wn18rr""" +393 21 model """kg2e""" +393 21 loss """bceaftersigmoid""" +393 21 regularizer """no""" +393 21 optimizer """adam""" +393 21 training_loop """owa""" +393 21 negative_sampler """basic""" +393 21 evaluator """rankbased""" +393 22 dataset """wn18rr""" +393 22 model """kg2e""" +393 22 loss """bceaftersigmoid""" +393 22 regularizer """no""" +393 22 optimizer """adam""" +393 22 training_loop """owa""" +393 22 negative_sampler """basic""" +393 22 evaluator """rankbased""" +393 23 dataset """wn18rr""" +393 23 model """kg2e""" +393 23 loss """bceaftersigmoid""" +393 23 regularizer """no""" +393 23 optimizer """adam""" +393 23 training_loop """owa""" +393 23 negative_sampler """basic""" +393 23 evaluator """rankbased""" +393 24 dataset """wn18rr""" +393 24 model """kg2e""" +393 24 loss """bceaftersigmoid""" +393 24 regularizer """no""" +393 24 optimizer """adam""" +393 24 training_loop """owa""" +393 24 negative_sampler """basic""" +393 24 evaluator """rankbased""" +393 25 dataset """wn18rr""" +393 25 model """kg2e""" +393 25 loss """bceaftersigmoid""" +393 25 regularizer """no""" +393 25 optimizer """adam""" +393 25 training_loop """owa""" +393 25 negative_sampler """basic""" +393 25 evaluator """rankbased""" +393 26 dataset """wn18rr""" +393 26 model """kg2e""" +393 26 loss """bceaftersigmoid""" +393 26 regularizer """no""" +393 26 optimizer """adam""" +393 26 training_loop """owa""" +393 26 negative_sampler """basic""" +393 26 evaluator """rankbased""" +393 27 dataset """wn18rr""" +393 27 model """kg2e""" +393 27 loss """bceaftersigmoid""" +393 27 regularizer """no""" +393 27 optimizer """adam""" +393 27 training_loop """owa""" +393 27 negative_sampler """basic""" +393 27 evaluator """rankbased""" +393 28 dataset """wn18rr""" +393 28 model """kg2e""" +393 28 loss """bceaftersigmoid""" +393 28 regularizer """no""" +393 28 optimizer """adam""" +393 28 training_loop """owa""" +393 28 negative_sampler """basic""" +393 28 evaluator """rankbased""" +393 29 dataset """wn18rr""" +393 29 model """kg2e""" +393 29 loss """bceaftersigmoid""" +393 29 regularizer """no""" +393 29 optimizer """adam""" +393 29 training_loop """owa""" +393 29 negative_sampler """basic""" +393 29 evaluator """rankbased""" +393 30 dataset """wn18rr""" +393 30 model """kg2e""" +393 30 loss """bceaftersigmoid""" +393 30 regularizer """no""" +393 30 optimizer """adam""" +393 30 training_loop """owa""" +393 30 negative_sampler """basic""" +393 30 evaluator """rankbased""" +393 31 dataset """wn18rr""" +393 31 model """kg2e""" +393 31 loss """bceaftersigmoid""" +393 31 regularizer """no""" +393 31 optimizer """adam""" +393 31 training_loop """owa""" +393 31 negative_sampler """basic""" +393 31 evaluator """rankbased""" +393 32 dataset """wn18rr""" +393 32 model """kg2e""" +393 32 loss """bceaftersigmoid""" +393 32 regularizer """no""" +393 32 optimizer """adam""" +393 32 training_loop """owa""" +393 32 negative_sampler """basic""" +393 32 evaluator """rankbased""" +393 33 dataset """wn18rr""" +393 33 model """kg2e""" +393 33 loss """bceaftersigmoid""" +393 33 regularizer """no""" +393 33 optimizer """adam""" +393 33 training_loop """owa""" +393 33 negative_sampler """basic""" +393 33 evaluator """rankbased""" +393 34 dataset """wn18rr""" +393 34 model """kg2e""" +393 34 loss """bceaftersigmoid""" +393 34 regularizer """no""" +393 34 optimizer """adam""" +393 34 training_loop """owa""" +393 34 negative_sampler """basic""" +393 34 evaluator """rankbased""" +393 35 dataset """wn18rr""" +393 35 model """kg2e""" +393 35 loss """bceaftersigmoid""" +393 35 regularizer """no""" +393 35 optimizer """adam""" +393 35 training_loop """owa""" +393 35 negative_sampler """basic""" +393 35 evaluator """rankbased""" +393 36 dataset """wn18rr""" +393 36 model """kg2e""" +393 36 loss """bceaftersigmoid""" +393 36 regularizer """no""" +393 36 optimizer """adam""" +393 36 training_loop """owa""" +393 36 negative_sampler """basic""" +393 36 evaluator """rankbased""" +393 37 dataset """wn18rr""" +393 37 model """kg2e""" +393 37 loss """bceaftersigmoid""" +393 37 regularizer """no""" +393 37 optimizer """adam""" +393 37 training_loop """owa""" +393 37 negative_sampler """basic""" +393 37 evaluator """rankbased""" +393 38 dataset """wn18rr""" +393 38 model """kg2e""" +393 38 loss """bceaftersigmoid""" +393 38 regularizer """no""" +393 38 optimizer """adam""" +393 38 training_loop """owa""" +393 38 negative_sampler """basic""" +393 38 evaluator """rankbased""" +393 39 dataset """wn18rr""" +393 39 model """kg2e""" +393 39 loss """bceaftersigmoid""" +393 39 regularizer """no""" +393 39 optimizer """adam""" +393 39 training_loop """owa""" +393 39 negative_sampler """basic""" +393 39 evaluator """rankbased""" +393 40 dataset """wn18rr""" +393 40 model """kg2e""" +393 40 loss """bceaftersigmoid""" +393 40 regularizer """no""" +393 40 optimizer """adam""" +393 40 training_loop """owa""" +393 40 negative_sampler """basic""" +393 40 evaluator """rankbased""" +393 41 dataset """wn18rr""" +393 41 model """kg2e""" +393 41 loss """bceaftersigmoid""" +393 41 regularizer """no""" +393 41 optimizer """adam""" +393 41 training_loop """owa""" +393 41 negative_sampler """basic""" +393 41 evaluator """rankbased""" +393 42 dataset """wn18rr""" +393 42 model """kg2e""" +393 42 loss """bceaftersigmoid""" +393 42 regularizer """no""" +393 42 optimizer """adam""" +393 42 training_loop """owa""" +393 42 negative_sampler """basic""" +393 42 evaluator """rankbased""" +393 43 dataset """wn18rr""" +393 43 model """kg2e""" +393 43 loss """bceaftersigmoid""" +393 43 regularizer """no""" +393 43 optimizer """adam""" +393 43 training_loop """owa""" +393 43 negative_sampler """basic""" +393 43 evaluator """rankbased""" +393 44 dataset """wn18rr""" +393 44 model """kg2e""" +393 44 loss """bceaftersigmoid""" +393 44 regularizer """no""" +393 44 optimizer """adam""" +393 44 training_loop """owa""" +393 44 negative_sampler """basic""" +393 44 evaluator """rankbased""" +393 45 dataset """wn18rr""" +393 45 model """kg2e""" +393 45 loss """bceaftersigmoid""" +393 45 regularizer """no""" +393 45 optimizer """adam""" +393 45 training_loop """owa""" +393 45 negative_sampler """basic""" +393 45 evaluator """rankbased""" +393 46 dataset """wn18rr""" +393 46 model """kg2e""" +393 46 loss """bceaftersigmoid""" +393 46 regularizer """no""" +393 46 optimizer """adam""" +393 46 training_loop """owa""" +393 46 negative_sampler """basic""" +393 46 evaluator """rankbased""" +393 47 dataset """wn18rr""" +393 47 model """kg2e""" +393 47 loss """bceaftersigmoid""" +393 47 regularizer """no""" +393 47 optimizer """adam""" +393 47 training_loop """owa""" +393 47 negative_sampler """basic""" +393 47 evaluator """rankbased""" +393 48 dataset """wn18rr""" +393 48 model """kg2e""" +393 48 loss """bceaftersigmoid""" +393 48 regularizer """no""" +393 48 optimizer """adam""" +393 48 training_loop """owa""" +393 48 negative_sampler """basic""" +393 48 evaluator """rankbased""" +393 49 dataset """wn18rr""" +393 49 model """kg2e""" +393 49 loss """bceaftersigmoid""" +393 49 regularizer """no""" +393 49 optimizer """adam""" +393 49 training_loop """owa""" +393 49 negative_sampler """basic""" +393 49 evaluator """rankbased""" +393 50 dataset """wn18rr""" +393 50 model """kg2e""" +393 50 loss """bceaftersigmoid""" +393 50 regularizer """no""" +393 50 optimizer """adam""" +393 50 training_loop """owa""" +393 50 negative_sampler """basic""" +393 50 evaluator """rankbased""" +393 51 dataset """wn18rr""" +393 51 model """kg2e""" +393 51 loss """bceaftersigmoid""" +393 51 regularizer """no""" +393 51 optimizer """adam""" +393 51 training_loop """owa""" +393 51 negative_sampler """basic""" +393 51 evaluator """rankbased""" +393 52 dataset """wn18rr""" +393 52 model """kg2e""" +393 52 loss """bceaftersigmoid""" +393 52 regularizer """no""" +393 52 optimizer """adam""" +393 52 training_loop """owa""" +393 52 negative_sampler """basic""" +393 52 evaluator """rankbased""" +393 53 dataset """wn18rr""" +393 53 model """kg2e""" +393 53 loss """bceaftersigmoid""" +393 53 regularizer """no""" +393 53 optimizer """adam""" +393 53 training_loop """owa""" +393 53 negative_sampler """basic""" +393 53 evaluator """rankbased""" +393 54 dataset """wn18rr""" +393 54 model """kg2e""" +393 54 loss """bceaftersigmoid""" +393 54 regularizer """no""" +393 54 optimizer """adam""" +393 54 training_loop """owa""" +393 54 negative_sampler """basic""" +393 54 evaluator """rankbased""" +393 55 dataset """wn18rr""" +393 55 model """kg2e""" +393 55 loss """bceaftersigmoid""" +393 55 regularizer """no""" +393 55 optimizer """adam""" +393 55 training_loop """owa""" +393 55 negative_sampler """basic""" +393 55 evaluator """rankbased""" +393 56 dataset """wn18rr""" +393 56 model """kg2e""" +393 56 loss """bceaftersigmoid""" +393 56 regularizer """no""" +393 56 optimizer """adam""" +393 56 training_loop """owa""" +393 56 negative_sampler """basic""" +393 56 evaluator """rankbased""" +393 57 dataset """wn18rr""" +393 57 model """kg2e""" +393 57 loss """bceaftersigmoid""" +393 57 regularizer """no""" +393 57 optimizer """adam""" +393 57 training_loop """owa""" +393 57 negative_sampler """basic""" +393 57 evaluator """rankbased""" +393 58 dataset """wn18rr""" +393 58 model """kg2e""" +393 58 loss """bceaftersigmoid""" +393 58 regularizer """no""" +393 58 optimizer """adam""" +393 58 training_loop """owa""" +393 58 negative_sampler """basic""" +393 58 evaluator """rankbased""" +393 59 dataset """wn18rr""" +393 59 model """kg2e""" +393 59 loss """bceaftersigmoid""" +393 59 regularizer """no""" +393 59 optimizer """adam""" +393 59 training_loop """owa""" +393 59 negative_sampler """basic""" +393 59 evaluator """rankbased""" +393 60 dataset """wn18rr""" +393 60 model """kg2e""" +393 60 loss """bceaftersigmoid""" +393 60 regularizer """no""" +393 60 optimizer """adam""" +393 60 training_loop """owa""" +393 60 negative_sampler """basic""" +393 60 evaluator """rankbased""" +393 61 dataset """wn18rr""" +393 61 model """kg2e""" +393 61 loss """bceaftersigmoid""" +393 61 regularizer """no""" +393 61 optimizer """adam""" +393 61 training_loop """owa""" +393 61 negative_sampler """basic""" +393 61 evaluator """rankbased""" +393 62 dataset """wn18rr""" +393 62 model """kg2e""" +393 62 loss """bceaftersigmoid""" +393 62 regularizer """no""" +393 62 optimizer """adam""" +393 62 training_loop """owa""" +393 62 negative_sampler """basic""" +393 62 evaluator """rankbased""" +394 1 model.embedding_dim 2.0 +394 1 model.c_min 0.012611340365240893 +394 1 model.c_max 7.646019280809016 +394 1 optimizer.lr 0.004151864522023916 +394 1 negative_sampler.num_negs_per_pos 67.0 +394 1 training.batch_size 1.0 +394 2 model.embedding_dim 0.0 +394 2 model.c_min 0.09943658424845277 +394 2 model.c_max 2.451511579771551 +394 2 optimizer.lr 0.00535632627151264 +394 2 negative_sampler.num_negs_per_pos 57.0 +394 2 training.batch_size 0.0 +394 3 model.embedding_dim 1.0 +394 3 model.c_min 0.04219162782493412 +394 3 model.c_max 4.846631695850006 +394 3 optimizer.lr 0.016323553187346632 +394 3 negative_sampler.num_negs_per_pos 96.0 +394 3 training.batch_size 0.0 +394 4 model.embedding_dim 2.0 +394 4 model.c_min 0.030665246143727117 +394 4 model.c_max 9.776276605016335 +394 4 optimizer.lr 0.024020708313621204 +394 4 negative_sampler.num_negs_per_pos 10.0 +394 4 training.batch_size 0.0 +394 5 model.embedding_dim 1.0 +394 5 model.c_min 0.0212150047616489 +394 5 model.c_max 5.69885984603332 +394 5 optimizer.lr 0.010643465081059672 +394 5 negative_sampler.num_negs_per_pos 54.0 +394 5 training.batch_size 0.0 +394 6 model.embedding_dim 2.0 +394 6 model.c_min 0.028954293961240586 +394 6 model.c_max 6.124436731519602 +394 6 optimizer.lr 0.0012285965515417122 +394 6 negative_sampler.num_negs_per_pos 46.0 +394 6 training.batch_size 1.0 +394 7 model.embedding_dim 2.0 +394 7 model.c_min 0.03073687307409789 +394 7 model.c_max 4.7523401230124875 +394 7 optimizer.lr 0.004859156872336067 +394 7 negative_sampler.num_negs_per_pos 43.0 +394 7 training.batch_size 0.0 +394 8 model.embedding_dim 0.0 +394 8 model.c_min 0.023128974120791134 +394 8 model.c_max 7.054087982330647 +394 8 optimizer.lr 0.0034731513973266376 +394 8 negative_sampler.num_negs_per_pos 34.0 +394 8 training.batch_size 1.0 +394 9 model.embedding_dim 1.0 +394 9 model.c_min 0.013604443880172613 +394 9 model.c_max 1.415489129861407 +394 9 optimizer.lr 0.001021914877946879 +394 9 negative_sampler.num_negs_per_pos 18.0 +394 9 training.batch_size 2.0 +394 10 model.embedding_dim 1.0 +394 10 model.c_min 0.0573981874837721 +394 10 model.c_max 7.440004833190155 +394 10 optimizer.lr 0.07785257405667846 +394 10 negative_sampler.num_negs_per_pos 32.0 +394 10 training.batch_size 2.0 +394 11 model.embedding_dim 1.0 +394 11 model.c_min 0.06375426378265872 +394 11 model.c_max 2.4017578222987823 +394 11 optimizer.lr 0.051406252512121636 +394 11 negative_sampler.num_negs_per_pos 20.0 +394 11 training.batch_size 1.0 +394 12 model.embedding_dim 2.0 +394 12 model.c_min 0.02905592630217677 +394 12 model.c_max 2.259692151764421 +394 12 optimizer.lr 0.01179806132591898 +394 12 negative_sampler.num_negs_per_pos 5.0 +394 12 training.batch_size 2.0 +394 13 model.embedding_dim 2.0 +394 13 model.c_min 0.05872840990805474 +394 13 model.c_max 1.067076583572402 +394 13 optimizer.lr 0.003592705603032873 +394 13 negative_sampler.num_negs_per_pos 38.0 +394 13 training.batch_size 0.0 +394 14 model.embedding_dim 0.0 +394 14 model.c_min 0.053358080737359245 +394 14 model.c_max 1.8134937480441615 +394 14 optimizer.lr 0.0031941817491004517 +394 14 negative_sampler.num_negs_per_pos 24.0 +394 14 training.batch_size 0.0 +394 15 model.embedding_dim 0.0 +394 15 model.c_min 0.04922810161614641 +394 15 model.c_max 9.498134318546713 +394 15 optimizer.lr 0.009491316718540728 +394 15 negative_sampler.num_negs_per_pos 67.0 +394 15 training.batch_size 2.0 +394 16 model.embedding_dim 0.0 +394 16 model.c_min 0.09133443433477895 +394 16 model.c_max 5.465739706431587 +394 16 optimizer.lr 0.001880129297098614 +394 16 negative_sampler.num_negs_per_pos 8.0 +394 16 training.batch_size 2.0 +394 17 model.embedding_dim 0.0 +394 17 model.c_min 0.04767564115229125 +394 17 model.c_max 2.0578962849151115 +394 17 optimizer.lr 0.0027232680750798118 +394 17 negative_sampler.num_negs_per_pos 86.0 +394 17 training.batch_size 2.0 +394 18 model.embedding_dim 2.0 +394 18 model.c_min 0.02567463684995027 +394 18 model.c_max 8.248578530284151 +394 18 optimizer.lr 0.006475500292026775 +394 18 negative_sampler.num_negs_per_pos 54.0 +394 18 training.batch_size 2.0 +394 19 model.embedding_dim 2.0 +394 19 model.c_min 0.010252498878126357 +394 19 model.c_max 6.724782500736965 +394 19 optimizer.lr 0.061071939171422465 +394 19 negative_sampler.num_negs_per_pos 2.0 +394 19 training.batch_size 0.0 +394 20 model.embedding_dim 2.0 +394 20 model.c_min 0.06429186823427388 +394 20 model.c_max 8.905042990506661 +394 20 optimizer.lr 0.0026122559846369097 +394 20 negative_sampler.num_negs_per_pos 2.0 +394 20 training.batch_size 1.0 +394 21 model.embedding_dim 2.0 +394 21 model.c_min 0.020323071629445823 +394 21 model.c_max 8.60572132490543 +394 21 optimizer.lr 0.09753978138330752 +394 21 negative_sampler.num_negs_per_pos 51.0 +394 21 training.batch_size 2.0 +394 22 model.embedding_dim 0.0 +394 22 model.c_min 0.04048858954696274 +394 22 model.c_max 5.000107642157543 +394 22 optimizer.lr 0.025193058215237905 +394 22 negative_sampler.num_negs_per_pos 51.0 +394 22 training.batch_size 2.0 +394 23 model.embedding_dim 1.0 +394 23 model.c_min 0.028300592670715843 +394 23 model.c_max 3.6481451553593267 +394 23 optimizer.lr 0.0018471800376805853 +394 23 negative_sampler.num_negs_per_pos 77.0 +394 23 training.batch_size 2.0 +394 24 model.embedding_dim 0.0 +394 24 model.c_min 0.010026562383506929 +394 24 model.c_max 1.4335443157369623 +394 24 optimizer.lr 0.0034097107381686237 +394 24 negative_sampler.num_negs_per_pos 2.0 +394 24 training.batch_size 1.0 +394 25 model.embedding_dim 2.0 +394 25 model.c_min 0.02896289354287389 +394 25 model.c_max 5.454595247899176 +394 25 optimizer.lr 0.001069266083737794 +394 25 negative_sampler.num_negs_per_pos 12.0 +394 25 training.batch_size 1.0 +394 26 model.embedding_dim 2.0 +394 26 model.c_min 0.03473494086055608 +394 26 model.c_max 7.3729232558316085 +394 26 optimizer.lr 0.004335951165431516 +394 26 negative_sampler.num_negs_per_pos 24.0 +394 26 training.batch_size 2.0 +394 27 model.embedding_dim 2.0 +394 27 model.c_min 0.042080447451496014 +394 27 model.c_max 3.5865537870884925 +394 27 optimizer.lr 0.001990152845057769 +394 27 negative_sampler.num_negs_per_pos 93.0 +394 27 training.batch_size 2.0 +394 28 model.embedding_dim 2.0 +394 28 model.c_min 0.011075222397025148 +394 28 model.c_max 6.3973444673378665 +394 28 optimizer.lr 0.0016108791773740822 +394 28 negative_sampler.num_negs_per_pos 44.0 +394 28 training.batch_size 1.0 +394 29 model.embedding_dim 2.0 +394 29 model.c_min 0.07172869039862126 +394 29 model.c_max 7.560331674164109 +394 29 optimizer.lr 0.0017713711542658144 +394 29 negative_sampler.num_negs_per_pos 56.0 +394 29 training.batch_size 1.0 +394 30 model.embedding_dim 1.0 +394 30 model.c_min 0.042341661542982086 +394 30 model.c_max 3.5903654839756554 +394 30 optimizer.lr 0.014100258022332798 +394 30 negative_sampler.num_negs_per_pos 4.0 +394 30 training.batch_size 1.0 +394 31 model.embedding_dim 1.0 +394 31 model.c_min 0.016152557106475302 +394 31 model.c_max 2.8040258130285114 +394 31 optimizer.lr 0.005094350794020892 +394 31 negative_sampler.num_negs_per_pos 93.0 +394 31 training.batch_size 0.0 +394 32 model.embedding_dim 0.0 +394 32 model.c_min 0.06704980097556845 +394 32 model.c_max 4.861817068373794 +394 32 optimizer.lr 0.002411138198348517 +394 32 negative_sampler.num_negs_per_pos 78.0 +394 32 training.batch_size 0.0 +394 33 model.embedding_dim 2.0 +394 33 model.c_min 0.04262922967768853 +394 33 model.c_max 2.8949421713367585 +394 33 optimizer.lr 0.01251566266931233 +394 33 negative_sampler.num_negs_per_pos 89.0 +394 33 training.batch_size 0.0 +394 1 dataset """wn18rr""" +394 1 model """kg2e""" +394 1 loss """softplus""" +394 1 regularizer """no""" +394 1 optimizer """adam""" +394 1 training_loop """owa""" +394 1 negative_sampler """basic""" +394 1 evaluator """rankbased""" +394 2 dataset """wn18rr""" +394 2 model """kg2e""" +394 2 loss """softplus""" +394 2 regularizer """no""" +394 2 optimizer """adam""" +394 2 training_loop """owa""" +394 2 negative_sampler """basic""" +394 2 evaluator """rankbased""" +394 3 dataset """wn18rr""" +394 3 model """kg2e""" +394 3 loss """softplus""" +394 3 regularizer """no""" +394 3 optimizer """adam""" +394 3 training_loop """owa""" +394 3 negative_sampler """basic""" +394 3 evaluator """rankbased""" +394 4 dataset """wn18rr""" +394 4 model """kg2e""" +394 4 loss """softplus""" +394 4 regularizer """no""" +394 4 optimizer """adam""" +394 4 training_loop """owa""" +394 4 negative_sampler """basic""" +394 4 evaluator """rankbased""" +394 5 dataset """wn18rr""" +394 5 model """kg2e""" +394 5 loss """softplus""" +394 5 regularizer """no""" +394 5 optimizer """adam""" +394 5 training_loop """owa""" +394 5 negative_sampler """basic""" +394 5 evaluator """rankbased""" +394 6 dataset """wn18rr""" +394 6 model """kg2e""" +394 6 loss """softplus""" +394 6 regularizer """no""" +394 6 optimizer """adam""" +394 6 training_loop """owa""" +394 6 negative_sampler """basic""" +394 6 evaluator """rankbased""" +394 7 dataset """wn18rr""" +394 7 model """kg2e""" +394 7 loss """softplus""" +394 7 regularizer """no""" +394 7 optimizer """adam""" +394 7 training_loop """owa""" +394 7 negative_sampler """basic""" +394 7 evaluator """rankbased""" +394 8 dataset """wn18rr""" +394 8 model """kg2e""" +394 8 loss """softplus""" +394 8 regularizer """no""" +394 8 optimizer """adam""" +394 8 training_loop """owa""" +394 8 negative_sampler """basic""" +394 8 evaluator """rankbased""" +394 9 dataset """wn18rr""" +394 9 model """kg2e""" +394 9 loss """softplus""" +394 9 regularizer """no""" +394 9 optimizer """adam""" +394 9 training_loop """owa""" +394 9 negative_sampler """basic""" +394 9 evaluator """rankbased""" +394 10 dataset """wn18rr""" +394 10 model """kg2e""" +394 10 loss """softplus""" +394 10 regularizer """no""" +394 10 optimizer """adam""" +394 10 training_loop """owa""" +394 10 negative_sampler """basic""" +394 10 evaluator """rankbased""" +394 11 dataset """wn18rr""" +394 11 model """kg2e""" +394 11 loss """softplus""" +394 11 regularizer """no""" +394 11 optimizer """adam""" +394 11 training_loop """owa""" +394 11 negative_sampler """basic""" +394 11 evaluator """rankbased""" +394 12 dataset """wn18rr""" +394 12 model """kg2e""" +394 12 loss """softplus""" +394 12 regularizer """no""" +394 12 optimizer """adam""" +394 12 training_loop """owa""" +394 12 negative_sampler """basic""" +394 12 evaluator """rankbased""" +394 13 dataset """wn18rr""" +394 13 model """kg2e""" +394 13 loss """softplus""" +394 13 regularizer """no""" +394 13 optimizer """adam""" +394 13 training_loop """owa""" +394 13 negative_sampler """basic""" +394 13 evaluator """rankbased""" +394 14 dataset """wn18rr""" +394 14 model """kg2e""" +394 14 loss """softplus""" +394 14 regularizer """no""" +394 14 optimizer """adam""" +394 14 training_loop """owa""" +394 14 negative_sampler """basic""" +394 14 evaluator """rankbased""" +394 15 dataset """wn18rr""" +394 15 model """kg2e""" +394 15 loss """softplus""" +394 15 regularizer """no""" +394 15 optimizer """adam""" +394 15 training_loop """owa""" +394 15 negative_sampler """basic""" +394 15 evaluator """rankbased""" +394 16 dataset """wn18rr""" +394 16 model """kg2e""" +394 16 loss """softplus""" +394 16 regularizer """no""" +394 16 optimizer """adam""" +394 16 training_loop """owa""" +394 16 negative_sampler """basic""" +394 16 evaluator """rankbased""" +394 17 dataset """wn18rr""" +394 17 model """kg2e""" +394 17 loss """softplus""" +394 17 regularizer """no""" +394 17 optimizer """adam""" +394 17 training_loop """owa""" +394 17 negative_sampler """basic""" +394 17 evaluator """rankbased""" +394 18 dataset """wn18rr""" +394 18 model """kg2e""" +394 18 loss """softplus""" +394 18 regularizer """no""" +394 18 optimizer """adam""" +394 18 training_loop """owa""" +394 18 negative_sampler """basic""" +394 18 evaluator """rankbased""" +394 19 dataset """wn18rr""" +394 19 model """kg2e""" +394 19 loss """softplus""" +394 19 regularizer """no""" +394 19 optimizer """adam""" +394 19 training_loop """owa""" +394 19 negative_sampler """basic""" +394 19 evaluator """rankbased""" +394 20 dataset """wn18rr""" +394 20 model """kg2e""" +394 20 loss """softplus""" +394 20 regularizer """no""" +394 20 optimizer """adam""" +394 20 training_loop """owa""" +394 20 negative_sampler """basic""" +394 20 evaluator """rankbased""" +394 21 dataset """wn18rr""" +394 21 model """kg2e""" +394 21 loss """softplus""" +394 21 regularizer """no""" +394 21 optimizer """adam""" +394 21 training_loop """owa""" +394 21 negative_sampler """basic""" +394 21 evaluator """rankbased""" +394 22 dataset """wn18rr""" +394 22 model """kg2e""" +394 22 loss """softplus""" +394 22 regularizer """no""" +394 22 optimizer """adam""" +394 22 training_loop """owa""" +394 22 negative_sampler """basic""" +394 22 evaluator """rankbased""" +394 23 dataset """wn18rr""" +394 23 model """kg2e""" +394 23 loss """softplus""" +394 23 regularizer """no""" +394 23 optimizer """adam""" +394 23 training_loop """owa""" +394 23 negative_sampler """basic""" +394 23 evaluator """rankbased""" +394 24 dataset """wn18rr""" +394 24 model """kg2e""" +394 24 loss """softplus""" +394 24 regularizer """no""" +394 24 optimizer """adam""" +394 24 training_loop """owa""" +394 24 negative_sampler """basic""" +394 24 evaluator """rankbased""" +394 25 dataset """wn18rr""" +394 25 model """kg2e""" +394 25 loss """softplus""" +394 25 regularizer """no""" +394 25 optimizer """adam""" +394 25 training_loop """owa""" +394 25 negative_sampler """basic""" +394 25 evaluator """rankbased""" +394 26 dataset """wn18rr""" +394 26 model """kg2e""" +394 26 loss """softplus""" +394 26 regularizer """no""" +394 26 optimizer """adam""" +394 26 training_loop """owa""" +394 26 negative_sampler """basic""" +394 26 evaluator """rankbased""" +394 27 dataset """wn18rr""" +394 27 model """kg2e""" +394 27 loss """softplus""" +394 27 regularizer """no""" +394 27 optimizer """adam""" +394 27 training_loop """owa""" +394 27 negative_sampler """basic""" +394 27 evaluator """rankbased""" +394 28 dataset """wn18rr""" +394 28 model """kg2e""" +394 28 loss """softplus""" +394 28 regularizer """no""" +394 28 optimizer """adam""" +394 28 training_loop """owa""" +394 28 negative_sampler """basic""" +394 28 evaluator """rankbased""" +394 29 dataset """wn18rr""" +394 29 model """kg2e""" +394 29 loss """softplus""" +394 29 regularizer """no""" +394 29 optimizer """adam""" +394 29 training_loop """owa""" +394 29 negative_sampler """basic""" +394 29 evaluator """rankbased""" +394 30 dataset """wn18rr""" +394 30 model """kg2e""" +394 30 loss """softplus""" +394 30 regularizer """no""" +394 30 optimizer """adam""" +394 30 training_loop """owa""" +394 30 negative_sampler """basic""" +394 30 evaluator """rankbased""" +394 31 dataset """wn18rr""" +394 31 model """kg2e""" +394 31 loss """softplus""" +394 31 regularizer """no""" +394 31 optimizer """adam""" +394 31 training_loop """owa""" +394 31 negative_sampler """basic""" +394 31 evaluator """rankbased""" +394 32 dataset """wn18rr""" +394 32 model """kg2e""" +394 32 loss """softplus""" +394 32 regularizer """no""" +394 32 optimizer """adam""" +394 32 training_loop """owa""" +394 32 negative_sampler """basic""" +394 32 evaluator """rankbased""" +394 33 dataset """wn18rr""" +394 33 model """kg2e""" +394 33 loss """softplus""" +394 33 regularizer """no""" +394 33 optimizer """adam""" +394 33 training_loop """owa""" +394 33 negative_sampler """basic""" +394 33 evaluator """rankbased""" +395 1 model.embedding_dim 2.0 +395 1 model.c_min 0.010632447565314293 +395 1 model.c_max 2.1883302565339173 +395 1 optimizer.lr 0.0018269648339416353 +395 1 negative_sampler.num_negs_per_pos 69.0 +395 1 training.batch_size 1.0 +395 2 model.embedding_dim 1.0 +395 2 model.c_min 0.05788877148357768 +395 2 model.c_max 1.629819088851029 +395 2 optimizer.lr 0.05355737218899369 +395 2 negative_sampler.num_negs_per_pos 21.0 +395 2 training.batch_size 0.0 +395 3 model.embedding_dim 2.0 +395 3 model.c_min 0.010009067671079222 +395 3 model.c_max 4.193802988999681 +395 3 optimizer.lr 0.05827012815520939 +395 3 negative_sampler.num_negs_per_pos 40.0 +395 3 training.batch_size 2.0 +395 4 model.embedding_dim 2.0 +395 4 model.c_min 0.015948476085459615 +395 4 model.c_max 3.0794600034193276 +395 4 optimizer.lr 0.00516764588108608 +395 4 negative_sampler.num_negs_per_pos 11.0 +395 4 training.batch_size 2.0 +395 5 model.embedding_dim 0.0 +395 5 model.c_min 0.014699512231156847 +395 5 model.c_max 6.182423699719743 +395 5 optimizer.lr 0.01916190896084803 +395 5 negative_sampler.num_negs_per_pos 94.0 +395 5 training.batch_size 2.0 +395 6 model.embedding_dim 1.0 +395 6 model.c_min 0.08125032021457033 +395 6 model.c_max 5.249305420876569 +395 6 optimizer.lr 0.0014183539968506053 +395 6 negative_sampler.num_negs_per_pos 36.0 +395 6 training.batch_size 0.0 +395 7 model.embedding_dim 0.0 +395 7 model.c_min 0.09709843188107962 +395 7 model.c_max 7.00966474266369 +395 7 optimizer.lr 0.007868008557464858 +395 7 negative_sampler.num_negs_per_pos 9.0 +395 7 training.batch_size 1.0 +395 8 model.embedding_dim 2.0 +395 8 model.c_min 0.05488563123854428 +395 8 model.c_max 5.111788437015488 +395 8 optimizer.lr 0.0035521673140263927 +395 8 negative_sampler.num_negs_per_pos 9.0 +395 8 training.batch_size 0.0 +395 9 model.embedding_dim 1.0 +395 9 model.c_min 0.052438978680773844 +395 9 model.c_max 7.285618558769452 +395 9 optimizer.lr 0.013180681667046288 +395 9 negative_sampler.num_negs_per_pos 81.0 +395 9 training.batch_size 2.0 +395 10 model.embedding_dim 0.0 +395 10 model.c_min 0.010817889362910256 +395 10 model.c_max 2.5268290155359243 +395 10 optimizer.lr 0.0019586603745132387 +395 10 negative_sampler.num_negs_per_pos 69.0 +395 10 training.batch_size 2.0 +395 11 model.embedding_dim 2.0 +395 11 model.c_min 0.02345222845621891 +395 11 model.c_max 9.593495757803977 +395 11 optimizer.lr 0.0022824641369029387 +395 11 negative_sampler.num_negs_per_pos 4.0 +395 11 training.batch_size 1.0 +395 12 model.embedding_dim 2.0 +395 12 model.c_min 0.021516403051176258 +395 12 model.c_max 6.724161267495621 +395 12 optimizer.lr 0.0211861615508536 +395 12 negative_sampler.num_negs_per_pos 96.0 +395 12 training.batch_size 0.0 +395 13 model.embedding_dim 0.0 +395 13 model.c_min 0.022432905250549126 +395 13 model.c_max 2.454575726969015 +395 13 optimizer.lr 0.06451403032136882 +395 13 negative_sampler.num_negs_per_pos 98.0 +395 13 training.batch_size 2.0 +395 14 model.embedding_dim 1.0 +395 14 model.c_min 0.034497842361287466 +395 14 model.c_max 3.618222068615674 +395 14 optimizer.lr 0.003623774766594986 +395 14 negative_sampler.num_negs_per_pos 54.0 +395 14 training.batch_size 2.0 +395 15 model.embedding_dim 0.0 +395 15 model.c_min 0.06170950688435745 +395 15 model.c_max 4.918624238931 +395 15 optimizer.lr 0.0296890691401855 +395 15 negative_sampler.num_negs_per_pos 52.0 +395 15 training.batch_size 1.0 +395 16 model.embedding_dim 0.0 +395 16 model.c_min 0.0828584928687106 +395 16 model.c_max 4.416508345057471 +395 16 optimizer.lr 0.008936827515983205 +395 16 negative_sampler.num_negs_per_pos 68.0 +395 16 training.batch_size 2.0 +395 17 model.embedding_dim 0.0 +395 17 model.c_min 0.07309085262039275 +395 17 model.c_max 8.494413873018345 +395 17 optimizer.lr 0.0015402644037930842 +395 17 negative_sampler.num_negs_per_pos 81.0 +395 17 training.batch_size 0.0 +395 18 model.embedding_dim 2.0 +395 18 model.c_min 0.029128483092170858 +395 18 model.c_max 7.9338909847957755 +395 18 optimizer.lr 0.07472014353341112 +395 18 negative_sampler.num_negs_per_pos 3.0 +395 18 training.batch_size 1.0 +395 19 model.embedding_dim 1.0 +395 19 model.c_min 0.02168533394993106 +395 19 model.c_max 1.653770610156192 +395 19 optimizer.lr 0.055298273374675494 +395 19 negative_sampler.num_negs_per_pos 63.0 +395 19 training.batch_size 1.0 +395 20 model.embedding_dim 1.0 +395 20 model.c_min 0.013134873435098803 +395 20 model.c_max 1.3075641013233863 +395 20 optimizer.lr 0.0022352868253256862 +395 20 negative_sampler.num_negs_per_pos 72.0 +395 20 training.batch_size 1.0 +395 21 model.embedding_dim 1.0 +395 21 model.c_min 0.015204967246732677 +395 21 model.c_max 5.966012668405101 +395 21 optimizer.lr 0.00279136292009337 +395 21 negative_sampler.num_negs_per_pos 17.0 +395 21 training.batch_size 2.0 +395 22 model.embedding_dim 0.0 +395 22 model.c_min 0.013517922697711437 +395 22 model.c_max 5.1530492955871505 +395 22 optimizer.lr 0.002619746422737321 +395 22 negative_sampler.num_negs_per_pos 46.0 +395 22 training.batch_size 0.0 +395 23 model.embedding_dim 2.0 +395 23 model.c_min 0.028921619298380722 +395 23 model.c_max 6.163606680711216 +395 23 optimizer.lr 0.03539043823206438 +395 23 negative_sampler.num_negs_per_pos 18.0 +395 23 training.batch_size 0.0 +395 24 model.embedding_dim 0.0 +395 24 model.c_min 0.02992954591610614 +395 24 model.c_max 3.2850100339332515 +395 24 optimizer.lr 0.018738159061431912 +395 24 negative_sampler.num_negs_per_pos 78.0 +395 24 training.batch_size 1.0 +395 25 model.embedding_dim 2.0 +395 25 model.c_min 0.058079760696944586 +395 25 model.c_max 6.308494837272297 +395 25 optimizer.lr 0.09392282164482794 +395 25 negative_sampler.num_negs_per_pos 5.0 +395 25 training.batch_size 2.0 +395 26 model.embedding_dim 2.0 +395 26 model.c_min 0.033571970757826065 +395 26 model.c_max 3.7334904627135357 +395 26 optimizer.lr 0.005794918182385526 +395 26 negative_sampler.num_negs_per_pos 69.0 +395 26 training.batch_size 1.0 +395 27 model.embedding_dim 0.0 +395 27 model.c_min 0.05529394811409184 +395 27 model.c_max 3.2246064739912974 +395 27 optimizer.lr 0.08942124127013991 +395 27 negative_sampler.num_negs_per_pos 4.0 +395 27 training.batch_size 2.0 +395 28 model.embedding_dim 0.0 +395 28 model.c_min 0.04563416589053498 +395 28 model.c_max 5.683659308115434 +395 28 optimizer.lr 0.004409751162982232 +395 28 negative_sampler.num_negs_per_pos 88.0 +395 28 training.batch_size 0.0 +395 29 model.embedding_dim 1.0 +395 29 model.c_min 0.037180172366740184 +395 29 model.c_max 4.751487546638067 +395 29 optimizer.lr 0.0018009641124087854 +395 29 negative_sampler.num_negs_per_pos 83.0 +395 29 training.batch_size 2.0 +395 30 model.embedding_dim 2.0 +395 30 model.c_min 0.014355638541458974 +395 30 model.c_max 6.66869450553747 +395 30 optimizer.lr 0.021752836420960663 +395 30 negative_sampler.num_negs_per_pos 72.0 +395 30 training.batch_size 2.0 +395 31 model.embedding_dim 0.0 +395 31 model.c_min 0.025290935569269923 +395 31 model.c_max 8.903152501695422 +395 31 optimizer.lr 0.0011811379958423312 +395 31 negative_sampler.num_negs_per_pos 56.0 +395 31 training.batch_size 0.0 +395 32 model.embedding_dim 1.0 +395 32 model.c_min 0.0188733721368354 +395 32 model.c_max 9.54568943158034 +395 32 optimizer.lr 0.02338085557793797 +395 32 negative_sampler.num_negs_per_pos 46.0 +395 32 training.batch_size 1.0 +395 33 model.embedding_dim 2.0 +395 33 model.c_min 0.02320973783499346 +395 33 model.c_max 1.145257216321042 +395 33 optimizer.lr 0.04281721182055582 +395 33 negative_sampler.num_negs_per_pos 55.0 +395 33 training.batch_size 2.0 +395 34 model.embedding_dim 1.0 +395 34 model.c_min 0.012723662989191212 +395 34 model.c_max 1.60503233088617 +395 34 optimizer.lr 0.0022966999960207677 +395 34 negative_sampler.num_negs_per_pos 73.0 +395 34 training.batch_size 2.0 +395 35 model.embedding_dim 2.0 +395 35 model.c_min 0.017623196730337902 +395 35 model.c_max 3.1393728090563684 +395 35 optimizer.lr 0.005484215154538174 +395 35 negative_sampler.num_negs_per_pos 43.0 +395 35 training.batch_size 0.0 +395 36 model.embedding_dim 1.0 +395 36 model.c_min 0.046401402918629465 +395 36 model.c_max 6.174797894968042 +395 36 optimizer.lr 0.002618331683072191 +395 36 negative_sampler.num_negs_per_pos 14.0 +395 36 training.batch_size 0.0 +395 37 model.embedding_dim 0.0 +395 37 model.c_min 0.023884598267434415 +395 37 model.c_max 5.101374160323742 +395 37 optimizer.lr 0.004701800624712018 +395 37 negative_sampler.num_negs_per_pos 87.0 +395 37 training.batch_size 0.0 +395 38 model.embedding_dim 2.0 +395 38 model.c_min 0.05965643300656413 +395 38 model.c_max 3.3096895913603372 +395 38 optimizer.lr 0.0056103005071187605 +395 38 negative_sampler.num_negs_per_pos 58.0 +395 38 training.batch_size 0.0 +395 39 model.embedding_dim 1.0 +395 39 model.c_min 0.025508054578118 +395 39 model.c_max 9.929675550330117 +395 39 optimizer.lr 0.0027476031882674172 +395 39 negative_sampler.num_negs_per_pos 49.0 +395 39 training.batch_size 2.0 +395 40 model.embedding_dim 0.0 +395 40 model.c_min 0.026047862260109113 +395 40 model.c_max 8.118942067024372 +395 40 optimizer.lr 0.005862217817482501 +395 40 negative_sampler.num_negs_per_pos 67.0 +395 40 training.batch_size 1.0 +395 41 model.embedding_dim 2.0 +395 41 model.c_min 0.044178523022347094 +395 41 model.c_max 2.175982564480355 +395 41 optimizer.lr 0.02744329724114921 +395 41 negative_sampler.num_negs_per_pos 23.0 +395 41 training.batch_size 0.0 +395 42 model.embedding_dim 1.0 +395 42 model.c_min 0.07165529011067698 +395 42 model.c_max 9.19870291849848 +395 42 optimizer.lr 0.003171239206606016 +395 42 negative_sampler.num_negs_per_pos 88.0 +395 42 training.batch_size 2.0 +395 43 model.embedding_dim 0.0 +395 43 model.c_min 0.09254419392169702 +395 43 model.c_max 3.422371958302761 +395 43 optimizer.lr 0.0020916936284098657 +395 43 negative_sampler.num_negs_per_pos 75.0 +395 43 training.batch_size 0.0 +395 44 model.embedding_dim 0.0 +395 44 model.c_min 0.025339420386146633 +395 44 model.c_max 5.6011877350891055 +395 44 optimizer.lr 0.039036944610013995 +395 44 negative_sampler.num_negs_per_pos 97.0 +395 44 training.batch_size 1.0 +395 45 model.embedding_dim 2.0 +395 45 model.c_min 0.01633071954627303 +395 45 model.c_max 2.356949708600501 +395 45 optimizer.lr 0.04796027780980999 +395 45 negative_sampler.num_negs_per_pos 4.0 +395 45 training.batch_size 0.0 +395 46 model.embedding_dim 2.0 +395 46 model.c_min 0.019830425799160913 +395 46 model.c_max 8.329731321885665 +395 46 optimizer.lr 0.07708316304110059 +395 46 negative_sampler.num_negs_per_pos 96.0 +395 46 training.batch_size 0.0 +395 47 model.embedding_dim 0.0 +395 47 model.c_min 0.02645150910292175 +395 47 model.c_max 7.1674825336130965 +395 47 optimizer.lr 0.0020352186597376335 +395 47 negative_sampler.num_negs_per_pos 62.0 +395 47 training.batch_size 2.0 +395 48 model.embedding_dim 1.0 +395 48 model.c_min 0.03522658736331826 +395 48 model.c_max 5.533897800021086 +395 48 optimizer.lr 0.0022519777119387337 +395 48 negative_sampler.num_negs_per_pos 95.0 +395 48 training.batch_size 1.0 +395 49 model.embedding_dim 0.0 +395 49 model.c_min 0.05555309284396447 +395 49 model.c_max 2.3780547077080185 +395 49 optimizer.lr 0.007553750402443435 +395 49 negative_sampler.num_negs_per_pos 18.0 +395 49 training.batch_size 1.0 +395 50 model.embedding_dim 0.0 +395 50 model.c_min 0.09457685250810059 +395 50 model.c_max 9.639240780618255 +395 50 optimizer.lr 0.031279784517656485 +395 50 negative_sampler.num_negs_per_pos 77.0 +395 50 training.batch_size 2.0 +395 51 model.embedding_dim 1.0 +395 51 model.c_min 0.08352172086082968 +395 51 model.c_max 5.347785146366301 +395 51 optimizer.lr 0.06460825902267449 +395 51 negative_sampler.num_negs_per_pos 52.0 +395 51 training.batch_size 2.0 +395 52 model.embedding_dim 1.0 +395 52 model.c_min 0.07625067309304893 +395 52 model.c_max 6.700833883023923 +395 52 optimizer.lr 0.0012947956011279604 +395 52 negative_sampler.num_negs_per_pos 20.0 +395 52 training.batch_size 1.0 +395 53 model.embedding_dim 2.0 +395 53 model.c_min 0.010025614796834587 +395 53 model.c_max 1.9283472254172251 +395 53 optimizer.lr 0.004001901249092173 +395 53 negative_sampler.num_negs_per_pos 73.0 +395 53 training.batch_size 0.0 +395 54 model.embedding_dim 0.0 +395 54 model.c_min 0.020456618252940227 +395 54 model.c_max 8.463534795064373 +395 54 optimizer.lr 0.01679115513187266 +395 54 negative_sampler.num_negs_per_pos 92.0 +395 54 training.batch_size 1.0 +395 55 model.embedding_dim 2.0 +395 55 model.c_min 0.09578749061602285 +395 55 model.c_max 5.924967520128218 +395 55 optimizer.lr 0.06464961297413536 +395 55 negative_sampler.num_negs_per_pos 93.0 +395 55 training.batch_size 2.0 +395 56 model.embedding_dim 2.0 +395 56 model.c_min 0.01949646880537666 +395 56 model.c_max 5.943628066665502 +395 56 optimizer.lr 0.0016474044049193087 +395 56 negative_sampler.num_negs_per_pos 7.0 +395 56 training.batch_size 2.0 +395 57 model.embedding_dim 2.0 +395 57 model.c_min 0.0216613594690101 +395 57 model.c_max 1.7148332108499797 +395 57 optimizer.lr 0.0017034242917991136 +395 57 negative_sampler.num_negs_per_pos 57.0 +395 57 training.batch_size 1.0 +395 58 model.embedding_dim 0.0 +395 58 model.c_min 0.0660083797230169 +395 58 model.c_max 9.479724405873684 +395 58 optimizer.lr 0.00481476717498051 +395 58 negative_sampler.num_negs_per_pos 9.0 +395 58 training.batch_size 1.0 +395 59 model.embedding_dim 0.0 +395 59 model.c_min 0.02577538158557086 +395 59 model.c_max 3.4212794772837576 +395 59 optimizer.lr 0.0017651800959716162 +395 59 negative_sampler.num_negs_per_pos 71.0 +395 59 training.batch_size 2.0 +395 60 model.embedding_dim 1.0 +395 60 model.c_min 0.0734410854139536 +395 60 model.c_max 1.4192860785695496 +395 60 optimizer.lr 0.0017143896325121089 +395 60 negative_sampler.num_negs_per_pos 86.0 +395 60 training.batch_size 0.0 +395 61 model.embedding_dim 2.0 +395 61 model.c_min 0.030550998836358038 +395 61 model.c_max 7.131531153047213 +395 61 optimizer.lr 0.0032044965482636704 +395 61 negative_sampler.num_negs_per_pos 92.0 +395 61 training.batch_size 2.0 +395 62 model.embedding_dim 2.0 +395 62 model.c_min 0.09813193934063413 +395 62 model.c_max 7.560810566992431 +395 62 optimizer.lr 0.002143017180340101 +395 62 negative_sampler.num_negs_per_pos 20.0 +395 62 training.batch_size 2.0 +395 63 model.embedding_dim 2.0 +395 63 model.c_min 0.07093848327350086 +395 63 model.c_max 2.7558581299059246 +395 63 optimizer.lr 0.03551151443954815 +395 63 negative_sampler.num_negs_per_pos 14.0 +395 63 training.batch_size 1.0 +395 64 model.embedding_dim 0.0 +395 64 model.c_min 0.01684893245831277 +395 64 model.c_max 8.739486521605025 +395 64 optimizer.lr 0.036130631931224975 +395 64 negative_sampler.num_negs_per_pos 79.0 +395 64 training.batch_size 1.0 +395 65 model.embedding_dim 0.0 +395 65 model.c_min 0.042038708708716295 +395 65 model.c_max 7.0013615631021855 +395 65 optimizer.lr 0.037242677998941286 +395 65 negative_sampler.num_negs_per_pos 52.0 +395 65 training.batch_size 2.0 +395 66 model.embedding_dim 0.0 +395 66 model.c_min 0.02048286629433223 +395 66 model.c_max 6.011731047053186 +395 66 optimizer.lr 0.01857330088638036 +395 66 negative_sampler.num_negs_per_pos 97.0 +395 66 training.batch_size 0.0 +395 67 model.embedding_dim 0.0 +395 67 model.c_min 0.01758386905156623 +395 67 model.c_max 2.1752163906664634 +395 67 optimizer.lr 0.01311171383700357 +395 67 negative_sampler.num_negs_per_pos 44.0 +395 67 training.batch_size 0.0 +395 68 model.embedding_dim 2.0 +395 68 model.c_min 0.07113197079572993 +395 68 model.c_max 3.5181825055954183 +395 68 optimizer.lr 0.0010800110340749036 +395 68 negative_sampler.num_negs_per_pos 35.0 +395 68 training.batch_size 0.0 +395 69 model.embedding_dim 1.0 +395 69 model.c_min 0.01559160502214311 +395 69 model.c_max 6.5829589585832124 +395 69 optimizer.lr 0.025803433645476735 +395 69 negative_sampler.num_negs_per_pos 79.0 +395 69 training.batch_size 0.0 +395 70 model.embedding_dim 1.0 +395 70 model.c_min 0.028458784896463668 +395 70 model.c_max 6.364337215708085 +395 70 optimizer.lr 0.0058692917707172726 +395 70 negative_sampler.num_negs_per_pos 1.0 +395 70 training.batch_size 0.0 +395 71 model.embedding_dim 2.0 +395 71 model.c_min 0.012874407173170633 +395 71 model.c_max 9.219176097272271 +395 71 optimizer.lr 0.020659203928198904 +395 71 negative_sampler.num_negs_per_pos 32.0 +395 71 training.batch_size 0.0 +395 72 model.embedding_dim 0.0 +395 72 model.c_min 0.09667571012203448 +395 72 model.c_max 5.733989454504911 +395 72 optimizer.lr 0.010342656836165502 +395 72 negative_sampler.num_negs_per_pos 59.0 +395 72 training.batch_size 0.0 +395 73 model.embedding_dim 1.0 +395 73 model.c_min 0.010146444500494473 +395 73 model.c_max 2.5270194164926165 +395 73 optimizer.lr 0.00434732317906227 +395 73 negative_sampler.num_negs_per_pos 83.0 +395 73 training.batch_size 0.0 +395 74 model.embedding_dim 2.0 +395 74 model.c_min 0.052914219202238356 +395 74 model.c_max 3.1850748851351276 +395 74 optimizer.lr 0.008651007501095142 +395 74 negative_sampler.num_negs_per_pos 23.0 +395 74 training.batch_size 2.0 +395 75 model.embedding_dim 0.0 +395 75 model.c_min 0.04689263463182323 +395 75 model.c_max 8.866495870610631 +395 75 optimizer.lr 0.0015516787731835076 +395 75 negative_sampler.num_negs_per_pos 93.0 +395 75 training.batch_size 1.0 +395 76 model.embedding_dim 0.0 +395 76 model.c_min 0.0328460798461962 +395 76 model.c_max 4.495965307917696 +395 76 optimizer.lr 0.006142485179719027 +395 76 negative_sampler.num_negs_per_pos 33.0 +395 76 training.batch_size 0.0 +395 77 model.embedding_dim 1.0 +395 77 model.c_min 0.07872261672384898 +395 77 model.c_max 4.500948397674994 +395 77 optimizer.lr 0.09038987190414778 +395 77 negative_sampler.num_negs_per_pos 19.0 +395 77 training.batch_size 2.0 +395 78 model.embedding_dim 0.0 +395 78 model.c_min 0.03305162681927586 +395 78 model.c_max 8.496579233666017 +395 78 optimizer.lr 0.0016234850084690282 +395 78 negative_sampler.num_negs_per_pos 40.0 +395 78 training.batch_size 2.0 +395 79 model.embedding_dim 2.0 +395 79 model.c_min 0.023469878195948042 +395 79 model.c_max 3.4289412179694043 +395 79 optimizer.lr 0.04934786826513639 +395 79 negative_sampler.num_negs_per_pos 69.0 +395 79 training.batch_size 2.0 +395 80 model.embedding_dim 2.0 +395 80 model.c_min 0.017004667774865704 +395 80 model.c_max 1.154914411859863 +395 80 optimizer.lr 0.020539094464755887 +395 80 negative_sampler.num_negs_per_pos 94.0 +395 80 training.batch_size 2.0 +395 81 model.embedding_dim 0.0 +395 81 model.c_min 0.013717951709931892 +395 81 model.c_max 9.55660992338514 +395 81 optimizer.lr 0.0014874024550740423 +395 81 negative_sampler.num_negs_per_pos 94.0 +395 81 training.batch_size 1.0 +395 82 model.embedding_dim 2.0 +395 82 model.c_min 0.01290206095692832 +395 82 model.c_max 4.315430061856126 +395 82 optimizer.lr 0.001831890123715352 +395 82 negative_sampler.num_negs_per_pos 90.0 +395 82 training.batch_size 2.0 +395 83 model.embedding_dim 2.0 +395 83 model.c_min 0.04685596199479006 +395 83 model.c_max 2.8453087480128016 +395 83 optimizer.lr 0.033600793551287485 +395 83 negative_sampler.num_negs_per_pos 41.0 +395 83 training.batch_size 0.0 +395 84 model.embedding_dim 1.0 +395 84 model.c_min 0.0186738856329917 +395 84 model.c_max 3.0962554826185933 +395 84 optimizer.lr 0.0012751625404211619 +395 84 negative_sampler.num_negs_per_pos 78.0 +395 84 training.batch_size 2.0 +395 85 model.embedding_dim 2.0 +395 85 model.c_min 0.010781408986283022 +395 85 model.c_max 3.3044798662858215 +395 85 optimizer.lr 0.06358048272881325 +395 85 negative_sampler.num_negs_per_pos 70.0 +395 85 training.batch_size 1.0 +395 86 model.embedding_dim 2.0 +395 86 model.c_min 0.031894350682904395 +395 86 model.c_max 3.034766582673212 +395 86 optimizer.lr 0.012210882916639429 +395 86 negative_sampler.num_negs_per_pos 42.0 +395 86 training.batch_size 1.0 +395 87 model.embedding_dim 1.0 +395 87 model.c_min 0.06319254678351191 +395 87 model.c_max 9.272151055218387 +395 87 optimizer.lr 0.011988502143834012 +395 87 negative_sampler.num_negs_per_pos 71.0 +395 87 training.batch_size 1.0 +395 88 model.embedding_dim 0.0 +395 88 model.c_min 0.01745170935777253 +395 88 model.c_max 2.9486816364677098 +395 88 optimizer.lr 0.003695259483270661 +395 88 negative_sampler.num_negs_per_pos 61.0 +395 88 training.batch_size 2.0 +395 89 model.embedding_dim 2.0 +395 89 model.c_min 0.06458361004591308 +395 89 model.c_max 5.004043217541859 +395 89 optimizer.lr 0.0014951445027097125 +395 89 negative_sampler.num_negs_per_pos 40.0 +395 89 training.batch_size 2.0 +395 90 model.embedding_dim 0.0 +395 90 model.c_min 0.08997482244629594 +395 90 model.c_max 5.090408344980988 +395 90 optimizer.lr 0.00349193775556902 +395 90 negative_sampler.num_negs_per_pos 4.0 +395 90 training.batch_size 0.0 +395 91 model.embedding_dim 0.0 +395 91 model.c_min 0.02514866942519536 +395 91 model.c_max 9.677144147918364 +395 91 optimizer.lr 0.010261930751986323 +395 91 negative_sampler.num_negs_per_pos 18.0 +395 91 training.batch_size 2.0 +395 92 model.embedding_dim 1.0 +395 92 model.c_min 0.013697483993255367 +395 92 model.c_max 3.925584287841763 +395 92 optimizer.lr 0.0011792461350364763 +395 92 negative_sampler.num_negs_per_pos 71.0 +395 92 training.batch_size 1.0 +395 93 model.embedding_dim 0.0 +395 93 model.c_min 0.0182672256573307 +395 93 model.c_max 2.0164484055208116 +395 93 optimizer.lr 0.011443581610389697 +395 93 negative_sampler.num_negs_per_pos 66.0 +395 93 training.batch_size 0.0 +395 94 model.embedding_dim 1.0 +395 94 model.c_min 0.0746028055975155 +395 94 model.c_max 8.778998731480828 +395 94 optimizer.lr 0.02146556367375283 +395 94 negative_sampler.num_negs_per_pos 22.0 +395 94 training.batch_size 0.0 +395 95 model.embedding_dim 2.0 +395 95 model.c_min 0.032676590393395086 +395 95 model.c_max 2.5625405252499904 +395 95 optimizer.lr 0.03521537776274029 +395 95 negative_sampler.num_negs_per_pos 12.0 +395 95 training.batch_size 1.0 +395 96 model.embedding_dim 0.0 +395 96 model.c_min 0.04856976956501459 +395 96 model.c_max 5.268368800978244 +395 96 optimizer.lr 0.014523000966870002 +395 96 negative_sampler.num_negs_per_pos 1.0 +395 96 training.batch_size 2.0 +395 97 model.embedding_dim 0.0 +395 97 model.c_min 0.07016261946558464 +395 97 model.c_max 5.744004792199039 +395 97 optimizer.lr 0.0016699106619489442 +395 97 negative_sampler.num_negs_per_pos 24.0 +395 97 training.batch_size 2.0 +395 98 model.embedding_dim 2.0 +395 98 model.c_min 0.06390335904933375 +395 98 model.c_max 1.1419335441121503 +395 98 optimizer.lr 0.01972001600772287 +395 98 negative_sampler.num_negs_per_pos 22.0 +395 98 training.batch_size 0.0 +395 99 model.embedding_dim 2.0 +395 99 model.c_min 0.024083675545618907 +395 99 model.c_max 7.423466980420873 +395 99 optimizer.lr 0.0014415762109816115 +395 99 negative_sampler.num_negs_per_pos 99.0 +395 99 training.batch_size 1.0 +395 100 model.embedding_dim 2.0 +395 100 model.c_min 0.050541376475611 +395 100 model.c_max 2.5539547129241003 +395 100 optimizer.lr 0.001980846872717279 +395 100 negative_sampler.num_negs_per_pos 54.0 +395 100 training.batch_size 1.0 +395 1 dataset """wn18rr""" +395 1 model """kg2e""" +395 1 loss """bceaftersigmoid""" +395 1 regularizer """no""" +395 1 optimizer """adam""" +395 1 training_loop """owa""" +395 1 negative_sampler """basic""" +395 1 evaluator """rankbased""" +395 2 dataset """wn18rr""" +395 2 model """kg2e""" +395 2 loss """bceaftersigmoid""" +395 2 regularizer """no""" +395 2 optimizer """adam""" +395 2 training_loop """owa""" +395 2 negative_sampler """basic""" +395 2 evaluator """rankbased""" +395 3 dataset """wn18rr""" +395 3 model """kg2e""" +395 3 loss """bceaftersigmoid""" +395 3 regularizer """no""" +395 3 optimizer """adam""" +395 3 training_loop """owa""" +395 3 negative_sampler """basic""" +395 3 evaluator """rankbased""" +395 4 dataset """wn18rr""" +395 4 model """kg2e""" +395 4 loss """bceaftersigmoid""" +395 4 regularizer """no""" +395 4 optimizer """adam""" +395 4 training_loop """owa""" +395 4 negative_sampler """basic""" +395 4 evaluator """rankbased""" +395 5 dataset """wn18rr""" +395 5 model """kg2e""" +395 5 loss """bceaftersigmoid""" +395 5 regularizer """no""" +395 5 optimizer """adam""" +395 5 training_loop """owa""" +395 5 negative_sampler """basic""" +395 5 evaluator """rankbased""" +395 6 dataset """wn18rr""" +395 6 model """kg2e""" +395 6 loss """bceaftersigmoid""" +395 6 regularizer """no""" +395 6 optimizer """adam""" +395 6 training_loop """owa""" +395 6 negative_sampler """basic""" +395 6 evaluator """rankbased""" +395 7 dataset """wn18rr""" +395 7 model """kg2e""" +395 7 loss """bceaftersigmoid""" +395 7 regularizer """no""" +395 7 optimizer """adam""" +395 7 training_loop """owa""" +395 7 negative_sampler """basic""" +395 7 evaluator """rankbased""" +395 8 dataset """wn18rr""" +395 8 model """kg2e""" +395 8 loss """bceaftersigmoid""" +395 8 regularizer """no""" +395 8 optimizer """adam""" +395 8 training_loop """owa""" +395 8 negative_sampler """basic""" +395 8 evaluator """rankbased""" +395 9 dataset """wn18rr""" +395 9 model """kg2e""" +395 9 loss """bceaftersigmoid""" +395 9 regularizer """no""" +395 9 optimizer """adam""" +395 9 training_loop """owa""" +395 9 negative_sampler """basic""" +395 9 evaluator """rankbased""" +395 10 dataset """wn18rr""" +395 10 model """kg2e""" +395 10 loss """bceaftersigmoid""" +395 10 regularizer """no""" +395 10 optimizer """adam""" +395 10 training_loop """owa""" +395 10 negative_sampler """basic""" +395 10 evaluator """rankbased""" +395 11 dataset """wn18rr""" +395 11 model """kg2e""" +395 11 loss """bceaftersigmoid""" +395 11 regularizer """no""" +395 11 optimizer """adam""" +395 11 training_loop """owa""" +395 11 negative_sampler """basic""" +395 11 evaluator """rankbased""" +395 12 dataset """wn18rr""" +395 12 model """kg2e""" +395 12 loss """bceaftersigmoid""" +395 12 regularizer """no""" +395 12 optimizer """adam""" +395 12 training_loop """owa""" +395 12 negative_sampler """basic""" +395 12 evaluator """rankbased""" +395 13 dataset """wn18rr""" +395 13 model """kg2e""" +395 13 loss """bceaftersigmoid""" +395 13 regularizer """no""" +395 13 optimizer """adam""" +395 13 training_loop """owa""" +395 13 negative_sampler """basic""" +395 13 evaluator """rankbased""" +395 14 dataset """wn18rr""" +395 14 model """kg2e""" +395 14 loss """bceaftersigmoid""" +395 14 regularizer """no""" +395 14 optimizer """adam""" +395 14 training_loop """owa""" +395 14 negative_sampler """basic""" +395 14 evaluator """rankbased""" +395 15 dataset """wn18rr""" +395 15 model """kg2e""" +395 15 loss """bceaftersigmoid""" +395 15 regularizer """no""" +395 15 optimizer """adam""" +395 15 training_loop """owa""" +395 15 negative_sampler """basic""" +395 15 evaluator """rankbased""" +395 16 dataset """wn18rr""" +395 16 model """kg2e""" +395 16 loss """bceaftersigmoid""" +395 16 regularizer """no""" +395 16 optimizer """adam""" +395 16 training_loop """owa""" +395 16 negative_sampler """basic""" +395 16 evaluator """rankbased""" +395 17 dataset """wn18rr""" +395 17 model """kg2e""" +395 17 loss """bceaftersigmoid""" +395 17 regularizer """no""" +395 17 optimizer """adam""" +395 17 training_loop """owa""" +395 17 negative_sampler """basic""" +395 17 evaluator """rankbased""" +395 18 dataset """wn18rr""" +395 18 model """kg2e""" +395 18 loss """bceaftersigmoid""" +395 18 regularizer """no""" +395 18 optimizer """adam""" +395 18 training_loop """owa""" +395 18 negative_sampler """basic""" +395 18 evaluator """rankbased""" +395 19 dataset """wn18rr""" +395 19 model """kg2e""" +395 19 loss """bceaftersigmoid""" +395 19 regularizer """no""" +395 19 optimizer """adam""" +395 19 training_loop """owa""" +395 19 negative_sampler """basic""" +395 19 evaluator """rankbased""" +395 20 dataset """wn18rr""" +395 20 model """kg2e""" +395 20 loss """bceaftersigmoid""" +395 20 regularizer """no""" +395 20 optimizer """adam""" +395 20 training_loop """owa""" +395 20 negative_sampler """basic""" +395 20 evaluator """rankbased""" +395 21 dataset """wn18rr""" +395 21 model """kg2e""" +395 21 loss """bceaftersigmoid""" +395 21 regularizer """no""" +395 21 optimizer """adam""" +395 21 training_loop """owa""" +395 21 negative_sampler """basic""" +395 21 evaluator """rankbased""" +395 22 dataset """wn18rr""" +395 22 model """kg2e""" +395 22 loss """bceaftersigmoid""" +395 22 regularizer """no""" +395 22 optimizer """adam""" +395 22 training_loop """owa""" +395 22 negative_sampler """basic""" +395 22 evaluator """rankbased""" +395 23 dataset """wn18rr""" +395 23 model """kg2e""" +395 23 loss """bceaftersigmoid""" +395 23 regularizer """no""" +395 23 optimizer """adam""" +395 23 training_loop """owa""" +395 23 negative_sampler """basic""" +395 23 evaluator """rankbased""" +395 24 dataset """wn18rr""" +395 24 model """kg2e""" +395 24 loss """bceaftersigmoid""" +395 24 regularizer """no""" +395 24 optimizer """adam""" +395 24 training_loop """owa""" +395 24 negative_sampler """basic""" +395 24 evaluator """rankbased""" +395 25 dataset """wn18rr""" +395 25 model """kg2e""" +395 25 loss """bceaftersigmoid""" +395 25 regularizer """no""" +395 25 optimizer """adam""" +395 25 training_loop """owa""" +395 25 negative_sampler """basic""" +395 25 evaluator """rankbased""" +395 26 dataset """wn18rr""" +395 26 model """kg2e""" +395 26 loss """bceaftersigmoid""" +395 26 regularizer """no""" +395 26 optimizer """adam""" +395 26 training_loop """owa""" +395 26 negative_sampler """basic""" +395 26 evaluator """rankbased""" +395 27 dataset """wn18rr""" +395 27 model """kg2e""" +395 27 loss """bceaftersigmoid""" +395 27 regularizer """no""" +395 27 optimizer """adam""" +395 27 training_loop """owa""" +395 27 negative_sampler """basic""" +395 27 evaluator """rankbased""" +395 28 dataset """wn18rr""" +395 28 model """kg2e""" +395 28 loss """bceaftersigmoid""" +395 28 regularizer """no""" +395 28 optimizer """adam""" +395 28 training_loop """owa""" +395 28 negative_sampler """basic""" +395 28 evaluator """rankbased""" +395 29 dataset """wn18rr""" +395 29 model """kg2e""" +395 29 loss """bceaftersigmoid""" +395 29 regularizer """no""" +395 29 optimizer """adam""" +395 29 training_loop """owa""" +395 29 negative_sampler """basic""" +395 29 evaluator """rankbased""" +395 30 dataset """wn18rr""" +395 30 model """kg2e""" +395 30 loss """bceaftersigmoid""" +395 30 regularizer """no""" +395 30 optimizer """adam""" +395 30 training_loop """owa""" +395 30 negative_sampler """basic""" +395 30 evaluator """rankbased""" +395 31 dataset """wn18rr""" +395 31 model """kg2e""" +395 31 loss """bceaftersigmoid""" +395 31 regularizer """no""" +395 31 optimizer """adam""" +395 31 training_loop """owa""" +395 31 negative_sampler """basic""" +395 31 evaluator """rankbased""" +395 32 dataset """wn18rr""" +395 32 model """kg2e""" +395 32 loss """bceaftersigmoid""" +395 32 regularizer """no""" +395 32 optimizer """adam""" +395 32 training_loop """owa""" +395 32 negative_sampler """basic""" +395 32 evaluator """rankbased""" +395 33 dataset """wn18rr""" +395 33 model """kg2e""" +395 33 loss """bceaftersigmoid""" +395 33 regularizer """no""" +395 33 optimizer """adam""" +395 33 training_loop """owa""" +395 33 negative_sampler """basic""" +395 33 evaluator """rankbased""" +395 34 dataset """wn18rr""" +395 34 model """kg2e""" +395 34 loss """bceaftersigmoid""" +395 34 regularizer """no""" +395 34 optimizer """adam""" +395 34 training_loop """owa""" +395 34 negative_sampler """basic""" +395 34 evaluator """rankbased""" +395 35 dataset """wn18rr""" +395 35 model """kg2e""" +395 35 loss """bceaftersigmoid""" +395 35 regularizer """no""" +395 35 optimizer """adam""" +395 35 training_loop """owa""" +395 35 negative_sampler """basic""" +395 35 evaluator """rankbased""" +395 36 dataset """wn18rr""" +395 36 model """kg2e""" +395 36 loss """bceaftersigmoid""" +395 36 regularizer """no""" +395 36 optimizer """adam""" +395 36 training_loop """owa""" +395 36 negative_sampler """basic""" +395 36 evaluator """rankbased""" +395 37 dataset """wn18rr""" +395 37 model """kg2e""" +395 37 loss """bceaftersigmoid""" +395 37 regularizer """no""" +395 37 optimizer """adam""" +395 37 training_loop """owa""" +395 37 negative_sampler """basic""" +395 37 evaluator """rankbased""" +395 38 dataset """wn18rr""" +395 38 model """kg2e""" +395 38 loss """bceaftersigmoid""" +395 38 regularizer """no""" +395 38 optimizer """adam""" +395 38 training_loop """owa""" +395 38 negative_sampler """basic""" +395 38 evaluator """rankbased""" +395 39 dataset """wn18rr""" +395 39 model """kg2e""" +395 39 loss """bceaftersigmoid""" +395 39 regularizer """no""" +395 39 optimizer """adam""" +395 39 training_loop """owa""" +395 39 negative_sampler """basic""" +395 39 evaluator """rankbased""" +395 40 dataset """wn18rr""" +395 40 model """kg2e""" +395 40 loss """bceaftersigmoid""" +395 40 regularizer """no""" +395 40 optimizer """adam""" +395 40 training_loop """owa""" +395 40 negative_sampler """basic""" +395 40 evaluator """rankbased""" +395 41 dataset """wn18rr""" +395 41 model """kg2e""" +395 41 loss """bceaftersigmoid""" +395 41 regularizer """no""" +395 41 optimizer """adam""" +395 41 training_loop """owa""" +395 41 negative_sampler """basic""" +395 41 evaluator """rankbased""" +395 42 dataset """wn18rr""" +395 42 model """kg2e""" +395 42 loss """bceaftersigmoid""" +395 42 regularizer """no""" +395 42 optimizer """adam""" +395 42 training_loop """owa""" +395 42 negative_sampler """basic""" +395 42 evaluator """rankbased""" +395 43 dataset """wn18rr""" +395 43 model """kg2e""" +395 43 loss """bceaftersigmoid""" +395 43 regularizer """no""" +395 43 optimizer """adam""" +395 43 training_loop """owa""" +395 43 negative_sampler """basic""" +395 43 evaluator """rankbased""" +395 44 dataset """wn18rr""" +395 44 model """kg2e""" +395 44 loss """bceaftersigmoid""" +395 44 regularizer """no""" +395 44 optimizer """adam""" +395 44 training_loop """owa""" +395 44 negative_sampler """basic""" +395 44 evaluator """rankbased""" +395 45 dataset """wn18rr""" +395 45 model """kg2e""" +395 45 loss """bceaftersigmoid""" +395 45 regularizer """no""" +395 45 optimizer """adam""" +395 45 training_loop """owa""" +395 45 negative_sampler """basic""" +395 45 evaluator """rankbased""" +395 46 dataset """wn18rr""" +395 46 model """kg2e""" +395 46 loss """bceaftersigmoid""" +395 46 regularizer """no""" +395 46 optimizer """adam""" +395 46 training_loop """owa""" +395 46 negative_sampler """basic""" +395 46 evaluator """rankbased""" +395 47 dataset """wn18rr""" +395 47 model """kg2e""" +395 47 loss """bceaftersigmoid""" +395 47 regularizer """no""" +395 47 optimizer """adam""" +395 47 training_loop """owa""" +395 47 negative_sampler """basic""" +395 47 evaluator """rankbased""" +395 48 dataset """wn18rr""" +395 48 model """kg2e""" +395 48 loss """bceaftersigmoid""" +395 48 regularizer """no""" +395 48 optimizer """adam""" +395 48 training_loop """owa""" +395 48 negative_sampler """basic""" +395 48 evaluator """rankbased""" +395 49 dataset """wn18rr""" +395 49 model """kg2e""" +395 49 loss """bceaftersigmoid""" +395 49 regularizer """no""" +395 49 optimizer """adam""" +395 49 training_loop """owa""" +395 49 negative_sampler """basic""" +395 49 evaluator """rankbased""" +395 50 dataset """wn18rr""" +395 50 model """kg2e""" +395 50 loss """bceaftersigmoid""" +395 50 regularizer """no""" +395 50 optimizer """adam""" +395 50 training_loop """owa""" +395 50 negative_sampler """basic""" +395 50 evaluator """rankbased""" +395 51 dataset """wn18rr""" +395 51 model """kg2e""" +395 51 loss """bceaftersigmoid""" +395 51 regularizer """no""" +395 51 optimizer """adam""" +395 51 training_loop """owa""" +395 51 negative_sampler """basic""" +395 51 evaluator """rankbased""" +395 52 dataset """wn18rr""" +395 52 model """kg2e""" +395 52 loss """bceaftersigmoid""" +395 52 regularizer """no""" +395 52 optimizer """adam""" +395 52 training_loop """owa""" +395 52 negative_sampler """basic""" +395 52 evaluator """rankbased""" +395 53 dataset """wn18rr""" +395 53 model """kg2e""" +395 53 loss """bceaftersigmoid""" +395 53 regularizer """no""" +395 53 optimizer """adam""" +395 53 training_loop """owa""" +395 53 negative_sampler """basic""" +395 53 evaluator """rankbased""" +395 54 dataset """wn18rr""" +395 54 model """kg2e""" +395 54 loss """bceaftersigmoid""" +395 54 regularizer """no""" +395 54 optimizer """adam""" +395 54 training_loop """owa""" +395 54 negative_sampler """basic""" +395 54 evaluator """rankbased""" +395 55 dataset """wn18rr""" +395 55 model """kg2e""" +395 55 loss """bceaftersigmoid""" +395 55 regularizer """no""" +395 55 optimizer """adam""" +395 55 training_loop """owa""" +395 55 negative_sampler """basic""" +395 55 evaluator """rankbased""" +395 56 dataset """wn18rr""" +395 56 model """kg2e""" +395 56 loss """bceaftersigmoid""" +395 56 regularizer """no""" +395 56 optimizer """adam""" +395 56 training_loop """owa""" +395 56 negative_sampler """basic""" +395 56 evaluator """rankbased""" +395 57 dataset """wn18rr""" +395 57 model """kg2e""" +395 57 loss """bceaftersigmoid""" +395 57 regularizer """no""" +395 57 optimizer """adam""" +395 57 training_loop """owa""" +395 57 negative_sampler """basic""" +395 57 evaluator """rankbased""" +395 58 dataset """wn18rr""" +395 58 model """kg2e""" +395 58 loss """bceaftersigmoid""" +395 58 regularizer """no""" +395 58 optimizer """adam""" +395 58 training_loop """owa""" +395 58 negative_sampler """basic""" +395 58 evaluator """rankbased""" +395 59 dataset """wn18rr""" +395 59 model """kg2e""" +395 59 loss """bceaftersigmoid""" +395 59 regularizer """no""" +395 59 optimizer """adam""" +395 59 training_loop """owa""" +395 59 negative_sampler """basic""" +395 59 evaluator """rankbased""" +395 60 dataset """wn18rr""" +395 60 model """kg2e""" +395 60 loss """bceaftersigmoid""" +395 60 regularizer """no""" +395 60 optimizer """adam""" +395 60 training_loop """owa""" +395 60 negative_sampler """basic""" +395 60 evaluator """rankbased""" +395 61 dataset """wn18rr""" +395 61 model """kg2e""" +395 61 loss """bceaftersigmoid""" +395 61 regularizer """no""" +395 61 optimizer """adam""" +395 61 training_loop """owa""" +395 61 negative_sampler """basic""" +395 61 evaluator """rankbased""" +395 62 dataset """wn18rr""" +395 62 model """kg2e""" +395 62 loss """bceaftersigmoid""" +395 62 regularizer """no""" +395 62 optimizer """adam""" +395 62 training_loop """owa""" +395 62 negative_sampler """basic""" +395 62 evaluator """rankbased""" +395 63 dataset """wn18rr""" +395 63 model """kg2e""" +395 63 loss """bceaftersigmoid""" +395 63 regularizer """no""" +395 63 optimizer """adam""" +395 63 training_loop """owa""" +395 63 negative_sampler """basic""" +395 63 evaluator """rankbased""" +395 64 dataset """wn18rr""" +395 64 model """kg2e""" +395 64 loss """bceaftersigmoid""" +395 64 regularizer """no""" +395 64 optimizer """adam""" +395 64 training_loop """owa""" +395 64 negative_sampler """basic""" +395 64 evaluator """rankbased""" +395 65 dataset """wn18rr""" +395 65 model """kg2e""" +395 65 loss """bceaftersigmoid""" +395 65 regularizer """no""" +395 65 optimizer """adam""" +395 65 training_loop """owa""" +395 65 negative_sampler """basic""" +395 65 evaluator """rankbased""" +395 66 dataset """wn18rr""" +395 66 model """kg2e""" +395 66 loss """bceaftersigmoid""" +395 66 regularizer """no""" +395 66 optimizer """adam""" +395 66 training_loop """owa""" +395 66 negative_sampler """basic""" +395 66 evaluator """rankbased""" +395 67 dataset """wn18rr""" +395 67 model """kg2e""" +395 67 loss """bceaftersigmoid""" +395 67 regularizer """no""" +395 67 optimizer """adam""" +395 67 training_loop """owa""" +395 67 negative_sampler """basic""" +395 67 evaluator """rankbased""" +395 68 dataset """wn18rr""" +395 68 model """kg2e""" +395 68 loss """bceaftersigmoid""" +395 68 regularizer """no""" +395 68 optimizer """adam""" +395 68 training_loop """owa""" +395 68 negative_sampler """basic""" +395 68 evaluator """rankbased""" +395 69 dataset """wn18rr""" +395 69 model """kg2e""" +395 69 loss """bceaftersigmoid""" +395 69 regularizer """no""" +395 69 optimizer """adam""" +395 69 training_loop """owa""" +395 69 negative_sampler """basic""" +395 69 evaluator """rankbased""" +395 70 dataset """wn18rr""" +395 70 model """kg2e""" +395 70 loss """bceaftersigmoid""" +395 70 regularizer """no""" +395 70 optimizer """adam""" +395 70 training_loop """owa""" +395 70 negative_sampler """basic""" +395 70 evaluator """rankbased""" +395 71 dataset """wn18rr""" +395 71 model """kg2e""" +395 71 loss """bceaftersigmoid""" +395 71 regularizer """no""" +395 71 optimizer """adam""" +395 71 training_loop """owa""" +395 71 negative_sampler """basic""" +395 71 evaluator """rankbased""" +395 72 dataset """wn18rr""" +395 72 model """kg2e""" +395 72 loss """bceaftersigmoid""" +395 72 regularizer """no""" +395 72 optimizer """adam""" +395 72 training_loop """owa""" +395 72 negative_sampler """basic""" +395 72 evaluator """rankbased""" +395 73 dataset """wn18rr""" +395 73 model """kg2e""" +395 73 loss """bceaftersigmoid""" +395 73 regularizer """no""" +395 73 optimizer """adam""" +395 73 training_loop """owa""" +395 73 negative_sampler """basic""" +395 73 evaluator """rankbased""" +395 74 dataset """wn18rr""" +395 74 model """kg2e""" +395 74 loss """bceaftersigmoid""" +395 74 regularizer """no""" +395 74 optimizer """adam""" +395 74 training_loop """owa""" +395 74 negative_sampler """basic""" +395 74 evaluator """rankbased""" +395 75 dataset """wn18rr""" +395 75 model """kg2e""" +395 75 loss """bceaftersigmoid""" +395 75 regularizer """no""" +395 75 optimizer """adam""" +395 75 training_loop """owa""" +395 75 negative_sampler """basic""" +395 75 evaluator """rankbased""" +395 76 dataset """wn18rr""" +395 76 model """kg2e""" +395 76 loss """bceaftersigmoid""" +395 76 regularizer """no""" +395 76 optimizer """adam""" +395 76 training_loop """owa""" +395 76 negative_sampler """basic""" +395 76 evaluator """rankbased""" +395 77 dataset """wn18rr""" +395 77 model """kg2e""" +395 77 loss """bceaftersigmoid""" +395 77 regularizer """no""" +395 77 optimizer """adam""" +395 77 training_loop """owa""" +395 77 negative_sampler """basic""" +395 77 evaluator """rankbased""" +395 78 dataset """wn18rr""" +395 78 model """kg2e""" +395 78 loss """bceaftersigmoid""" +395 78 regularizer """no""" +395 78 optimizer """adam""" +395 78 training_loop """owa""" +395 78 negative_sampler """basic""" +395 78 evaluator """rankbased""" +395 79 dataset """wn18rr""" +395 79 model """kg2e""" +395 79 loss """bceaftersigmoid""" +395 79 regularizer """no""" +395 79 optimizer """adam""" +395 79 training_loop """owa""" +395 79 negative_sampler """basic""" +395 79 evaluator """rankbased""" +395 80 dataset """wn18rr""" +395 80 model """kg2e""" +395 80 loss """bceaftersigmoid""" +395 80 regularizer """no""" +395 80 optimizer """adam""" +395 80 training_loop """owa""" +395 80 negative_sampler """basic""" +395 80 evaluator """rankbased""" +395 81 dataset """wn18rr""" +395 81 model """kg2e""" +395 81 loss """bceaftersigmoid""" +395 81 regularizer """no""" +395 81 optimizer """adam""" +395 81 training_loop """owa""" +395 81 negative_sampler """basic""" +395 81 evaluator """rankbased""" +395 82 dataset """wn18rr""" +395 82 model """kg2e""" +395 82 loss """bceaftersigmoid""" +395 82 regularizer """no""" +395 82 optimizer """adam""" +395 82 training_loop """owa""" +395 82 negative_sampler """basic""" +395 82 evaluator """rankbased""" +395 83 dataset """wn18rr""" +395 83 model """kg2e""" +395 83 loss """bceaftersigmoid""" +395 83 regularizer """no""" +395 83 optimizer """adam""" +395 83 training_loop """owa""" +395 83 negative_sampler """basic""" +395 83 evaluator """rankbased""" +395 84 dataset """wn18rr""" +395 84 model """kg2e""" +395 84 loss """bceaftersigmoid""" +395 84 regularizer """no""" +395 84 optimizer """adam""" +395 84 training_loop """owa""" +395 84 negative_sampler """basic""" +395 84 evaluator """rankbased""" +395 85 dataset """wn18rr""" +395 85 model """kg2e""" +395 85 loss """bceaftersigmoid""" +395 85 regularizer """no""" +395 85 optimizer """adam""" +395 85 training_loop """owa""" +395 85 negative_sampler """basic""" +395 85 evaluator """rankbased""" +395 86 dataset """wn18rr""" +395 86 model """kg2e""" +395 86 loss """bceaftersigmoid""" +395 86 regularizer """no""" +395 86 optimizer """adam""" +395 86 training_loop """owa""" +395 86 negative_sampler """basic""" +395 86 evaluator """rankbased""" +395 87 dataset """wn18rr""" +395 87 model """kg2e""" +395 87 loss """bceaftersigmoid""" +395 87 regularizer """no""" +395 87 optimizer """adam""" +395 87 training_loop """owa""" +395 87 negative_sampler """basic""" +395 87 evaluator """rankbased""" +395 88 dataset """wn18rr""" +395 88 model """kg2e""" +395 88 loss """bceaftersigmoid""" +395 88 regularizer """no""" +395 88 optimizer """adam""" +395 88 training_loop """owa""" +395 88 negative_sampler """basic""" +395 88 evaluator """rankbased""" +395 89 dataset """wn18rr""" +395 89 model """kg2e""" +395 89 loss """bceaftersigmoid""" +395 89 regularizer """no""" +395 89 optimizer """adam""" +395 89 training_loop """owa""" +395 89 negative_sampler """basic""" +395 89 evaluator """rankbased""" +395 90 dataset """wn18rr""" +395 90 model """kg2e""" +395 90 loss """bceaftersigmoid""" +395 90 regularizer """no""" +395 90 optimizer """adam""" +395 90 training_loop """owa""" +395 90 negative_sampler """basic""" +395 90 evaluator """rankbased""" +395 91 dataset """wn18rr""" +395 91 model """kg2e""" +395 91 loss """bceaftersigmoid""" +395 91 regularizer """no""" +395 91 optimizer """adam""" +395 91 training_loop """owa""" +395 91 negative_sampler """basic""" +395 91 evaluator """rankbased""" +395 92 dataset """wn18rr""" +395 92 model """kg2e""" +395 92 loss """bceaftersigmoid""" +395 92 regularizer """no""" +395 92 optimizer """adam""" +395 92 training_loop """owa""" +395 92 negative_sampler """basic""" +395 92 evaluator """rankbased""" +395 93 dataset """wn18rr""" +395 93 model """kg2e""" +395 93 loss """bceaftersigmoid""" +395 93 regularizer """no""" +395 93 optimizer """adam""" +395 93 training_loop """owa""" +395 93 negative_sampler """basic""" +395 93 evaluator """rankbased""" +395 94 dataset """wn18rr""" +395 94 model """kg2e""" +395 94 loss """bceaftersigmoid""" +395 94 regularizer """no""" +395 94 optimizer """adam""" +395 94 training_loop """owa""" +395 94 negative_sampler """basic""" +395 94 evaluator """rankbased""" +395 95 dataset """wn18rr""" +395 95 model """kg2e""" +395 95 loss """bceaftersigmoid""" +395 95 regularizer """no""" +395 95 optimizer """adam""" +395 95 training_loop """owa""" +395 95 negative_sampler """basic""" +395 95 evaluator """rankbased""" +395 96 dataset """wn18rr""" +395 96 model """kg2e""" +395 96 loss """bceaftersigmoid""" +395 96 regularizer """no""" +395 96 optimizer """adam""" +395 96 training_loop """owa""" +395 96 negative_sampler """basic""" +395 96 evaluator """rankbased""" +395 97 dataset """wn18rr""" +395 97 model """kg2e""" +395 97 loss """bceaftersigmoid""" +395 97 regularizer """no""" +395 97 optimizer """adam""" +395 97 training_loop """owa""" +395 97 negative_sampler """basic""" +395 97 evaluator """rankbased""" +395 98 dataset """wn18rr""" +395 98 model """kg2e""" +395 98 loss """bceaftersigmoid""" +395 98 regularizer """no""" +395 98 optimizer """adam""" +395 98 training_loop """owa""" +395 98 negative_sampler """basic""" +395 98 evaluator """rankbased""" +395 99 dataset """wn18rr""" +395 99 model """kg2e""" +395 99 loss """bceaftersigmoid""" +395 99 regularizer """no""" +395 99 optimizer """adam""" +395 99 training_loop """owa""" +395 99 negative_sampler """basic""" +395 99 evaluator """rankbased""" +395 100 dataset """wn18rr""" +395 100 model """kg2e""" +395 100 loss """bceaftersigmoid""" +395 100 regularizer """no""" +395 100 optimizer """adam""" +395 100 training_loop """owa""" +395 100 negative_sampler """basic""" +395 100 evaluator """rankbased""" +396 1 model.embedding_dim 0.0 +396 1 model.c_min 0.059610056051880224 +396 1 model.c_max 3.173893718141363 +396 1 optimizer.lr 0.06674490355022218 +396 1 negative_sampler.num_negs_per_pos 24.0 +396 1 training.batch_size 0.0 +396 2 model.embedding_dim 0.0 +396 2 model.c_min 0.010266580632628556 +396 2 model.c_max 4.563592451176631 +396 2 optimizer.lr 0.0014747111921678262 +396 2 negative_sampler.num_negs_per_pos 88.0 +396 2 training.batch_size 2.0 +396 3 model.embedding_dim 0.0 +396 3 model.c_min 0.025282162743293386 +396 3 model.c_max 8.043127513933449 +396 3 optimizer.lr 0.0026676229800298777 +396 3 negative_sampler.num_negs_per_pos 92.0 +396 3 training.batch_size 0.0 +396 4 model.embedding_dim 1.0 +396 4 model.c_min 0.014872657065561633 +396 4 model.c_max 4.911376364787283 +396 4 optimizer.lr 0.0015850710792563068 +396 4 negative_sampler.num_negs_per_pos 77.0 +396 4 training.batch_size 2.0 +396 5 model.embedding_dim 2.0 +396 5 model.c_min 0.012103190549398856 +396 5 model.c_max 2.5751201967236335 +396 5 optimizer.lr 0.05279725557444686 +396 5 negative_sampler.num_negs_per_pos 52.0 +396 5 training.batch_size 0.0 +396 6 model.embedding_dim 1.0 +396 6 model.c_min 0.012169703360903493 +396 6 model.c_max 1.7065002373693563 +396 6 optimizer.lr 0.008328485636446994 +396 6 negative_sampler.num_negs_per_pos 77.0 +396 6 training.batch_size 2.0 +396 7 model.embedding_dim 1.0 +396 7 model.c_min 0.013872443242121574 +396 7 model.c_max 4.241004052576606 +396 7 optimizer.lr 0.0010765938197838728 +396 7 negative_sampler.num_negs_per_pos 77.0 +396 7 training.batch_size 0.0 +396 8 model.embedding_dim 0.0 +396 8 model.c_min 0.020024379850838176 +396 8 model.c_max 9.287353702093618 +396 8 optimizer.lr 0.003901891774620229 +396 8 negative_sampler.num_negs_per_pos 74.0 +396 8 training.batch_size 1.0 +396 9 model.embedding_dim 2.0 +396 9 model.c_min 0.07785210489024295 +396 9 model.c_max 6.343583525757916 +396 9 optimizer.lr 0.005406383587981627 +396 9 negative_sampler.num_negs_per_pos 5.0 +396 9 training.batch_size 0.0 +396 10 model.embedding_dim 0.0 +396 10 model.c_min 0.012959361471552659 +396 10 model.c_max 9.047410523008832 +396 10 optimizer.lr 0.0011761835541314043 +396 10 negative_sampler.num_negs_per_pos 3.0 +396 10 training.batch_size 2.0 +396 11 model.embedding_dim 2.0 +396 11 model.c_min 0.02392215440473994 +396 11 model.c_max 7.92108674729272 +396 11 optimizer.lr 0.0012915512646627337 +396 11 negative_sampler.num_negs_per_pos 49.0 +396 11 training.batch_size 0.0 +396 12 model.embedding_dim 1.0 +396 12 model.c_min 0.014099770524196673 +396 12 model.c_max 6.025488996206316 +396 12 optimizer.lr 0.005796030661306735 +396 12 negative_sampler.num_negs_per_pos 11.0 +396 12 training.batch_size 0.0 +396 13 model.embedding_dim 0.0 +396 13 model.c_min 0.02598565971072067 +396 13 model.c_max 2.959253842362852 +396 13 optimizer.lr 0.013179523287887431 +396 13 negative_sampler.num_negs_per_pos 58.0 +396 13 training.batch_size 1.0 +396 14 model.embedding_dim 2.0 +396 14 model.c_min 0.014398241328230213 +396 14 model.c_max 4.376266801401698 +396 14 optimizer.lr 0.038188283350699584 +396 14 negative_sampler.num_negs_per_pos 81.0 +396 14 training.batch_size 2.0 +396 15 model.embedding_dim 2.0 +396 15 model.c_min 0.012000783247564267 +396 15 model.c_max 5.133787431999038 +396 15 optimizer.lr 0.0011501668138811606 +396 15 negative_sampler.num_negs_per_pos 7.0 +396 15 training.batch_size 1.0 +396 16 model.embedding_dim 1.0 +396 16 model.c_min 0.07342194465990305 +396 16 model.c_max 1.6751974285793085 +396 16 optimizer.lr 0.001587394259604503 +396 16 negative_sampler.num_negs_per_pos 78.0 +396 16 training.batch_size 2.0 +396 17 model.embedding_dim 0.0 +396 17 model.c_min 0.05085675829468006 +396 17 model.c_max 9.397881572980053 +396 17 optimizer.lr 0.04798700262305323 +396 17 negative_sampler.num_negs_per_pos 36.0 +396 17 training.batch_size 0.0 +396 18 model.embedding_dim 1.0 +396 18 model.c_min 0.07339900530572983 +396 18 model.c_max 4.873101334249977 +396 18 optimizer.lr 0.026627248677102602 +396 18 negative_sampler.num_negs_per_pos 57.0 +396 18 training.batch_size 2.0 +396 19 model.embedding_dim 0.0 +396 19 model.c_min 0.017852883938364587 +396 19 model.c_max 9.19800313778545 +396 19 optimizer.lr 0.003442462094626651 +396 19 negative_sampler.num_negs_per_pos 21.0 +396 19 training.batch_size 1.0 +396 20 model.embedding_dim 0.0 +396 20 model.c_min 0.09241943362534855 +396 20 model.c_max 6.894396577964578 +396 20 optimizer.lr 0.003952365842647423 +396 20 negative_sampler.num_negs_per_pos 22.0 +396 20 training.batch_size 2.0 +396 21 model.embedding_dim 0.0 +396 21 model.c_min 0.012661587548129521 +396 21 model.c_max 9.035132946138834 +396 21 optimizer.lr 0.002871975443956731 +396 21 negative_sampler.num_negs_per_pos 10.0 +396 21 training.batch_size 0.0 +396 22 model.embedding_dim 0.0 +396 22 model.c_min 0.07236396900219916 +396 22 model.c_max 2.7476700166351877 +396 22 optimizer.lr 0.0022881161674885133 +396 22 negative_sampler.num_negs_per_pos 94.0 +396 22 training.batch_size 0.0 +396 23 model.embedding_dim 0.0 +396 23 model.c_min 0.032901869345365306 +396 23 model.c_max 9.794065441329424 +396 23 optimizer.lr 0.015600266456685531 +396 23 negative_sampler.num_negs_per_pos 62.0 +396 23 training.batch_size 0.0 +396 24 model.embedding_dim 0.0 +396 24 model.c_min 0.09342939424764089 +396 24 model.c_max 8.544090176080669 +396 24 optimizer.lr 0.002803576779998557 +396 24 negative_sampler.num_negs_per_pos 15.0 +396 24 training.batch_size 0.0 +396 25 model.embedding_dim 0.0 +396 25 model.c_min 0.02826001356234455 +396 25 model.c_max 5.803883717710314 +396 25 optimizer.lr 0.003514780372039767 +396 25 negative_sampler.num_negs_per_pos 33.0 +396 25 training.batch_size 2.0 +396 26 model.embedding_dim 2.0 +396 26 model.c_min 0.09708012119577218 +396 26 model.c_max 6.703683766391423 +396 26 optimizer.lr 0.00172518531563367 +396 26 negative_sampler.num_negs_per_pos 14.0 +396 26 training.batch_size 1.0 +396 27 model.embedding_dim 2.0 +396 27 model.c_min 0.023662494386850898 +396 27 model.c_max 3.7737640124572924 +396 27 optimizer.lr 0.031794362321982336 +396 27 negative_sampler.num_negs_per_pos 25.0 +396 27 training.batch_size 2.0 +396 28 model.embedding_dim 1.0 +396 28 model.c_min 0.03619130467066818 +396 28 model.c_max 8.010421995472608 +396 28 optimizer.lr 0.008986031062541634 +396 28 negative_sampler.num_negs_per_pos 55.0 +396 28 training.batch_size 0.0 +396 29 model.embedding_dim 2.0 +396 29 model.c_min 0.02650883743783278 +396 29 model.c_max 7.4315415695414755 +396 29 optimizer.lr 0.0026268359965321854 +396 29 negative_sampler.num_negs_per_pos 74.0 +396 29 training.batch_size 1.0 +396 30 model.embedding_dim 0.0 +396 30 model.c_min 0.06696835835854952 +396 30 model.c_max 6.056836803110516 +396 30 optimizer.lr 0.07833722588251832 +396 30 negative_sampler.num_negs_per_pos 20.0 +396 30 training.batch_size 2.0 +396 31 model.embedding_dim 0.0 +396 31 model.c_min 0.020434990045796856 +396 31 model.c_max 7.988949586776769 +396 31 optimizer.lr 0.0012207008910758424 +396 31 negative_sampler.num_negs_per_pos 56.0 +396 31 training.batch_size 0.0 +396 32 model.embedding_dim 2.0 +396 32 model.c_min 0.01994368759679744 +396 32 model.c_max 2.2276297970378267 +396 32 optimizer.lr 0.002747119687799296 +396 32 negative_sampler.num_negs_per_pos 32.0 +396 32 training.batch_size 2.0 +396 33 model.embedding_dim 0.0 +396 33 model.c_min 0.011304064900882836 +396 33 model.c_max 1.73131127943599 +396 33 optimizer.lr 0.06328357645332801 +396 33 negative_sampler.num_negs_per_pos 25.0 +396 33 training.batch_size 1.0 +396 34 model.embedding_dim 2.0 +396 34 model.c_min 0.05080543491491145 +396 34 model.c_max 5.097949206759529 +396 34 optimizer.lr 0.015850856555261205 +396 34 negative_sampler.num_negs_per_pos 80.0 +396 34 training.batch_size 0.0 +396 35 model.embedding_dim 0.0 +396 35 model.c_min 0.0335058257913734 +396 35 model.c_max 2.595975691907754 +396 35 optimizer.lr 0.001024291165896041 +396 35 negative_sampler.num_negs_per_pos 86.0 +396 35 training.batch_size 1.0 +396 36 model.embedding_dim 1.0 +396 36 model.c_min 0.06211343398120215 +396 36 model.c_max 1.0860075798852913 +396 36 optimizer.lr 0.001820306521775809 +396 36 negative_sampler.num_negs_per_pos 47.0 +396 36 training.batch_size 0.0 +396 37 model.embedding_dim 2.0 +396 37 model.c_min 0.05785687499606977 +396 37 model.c_max 4.484145288197372 +396 37 optimizer.lr 0.0052793499520224375 +396 37 negative_sampler.num_negs_per_pos 68.0 +396 37 training.batch_size 0.0 +396 38 model.embedding_dim 1.0 +396 38 model.c_min 0.04387850980293371 +396 38 model.c_max 5.129342668539934 +396 38 optimizer.lr 0.0011473698466146036 +396 38 negative_sampler.num_negs_per_pos 48.0 +396 38 training.batch_size 2.0 +396 39 model.embedding_dim 2.0 +396 39 model.c_min 0.033366228622343105 +396 39 model.c_max 9.252049183973396 +396 39 optimizer.lr 0.006121500005429991 +396 39 negative_sampler.num_negs_per_pos 25.0 +396 39 training.batch_size 0.0 +396 40 model.embedding_dim 0.0 +396 40 model.c_min 0.019354779903004386 +396 40 model.c_max 9.960710699312026 +396 40 optimizer.lr 0.007175621193499598 +396 40 negative_sampler.num_negs_per_pos 13.0 +396 40 training.batch_size 1.0 +396 41 model.embedding_dim 1.0 +396 41 model.c_min 0.026583836401987002 +396 41 model.c_max 7.245098882537068 +396 41 optimizer.lr 0.018723876714089344 +396 41 negative_sampler.num_negs_per_pos 54.0 +396 41 training.batch_size 1.0 +396 42 model.embedding_dim 2.0 +396 42 model.c_min 0.03809485144564175 +396 42 model.c_max 1.726115819820797 +396 42 optimizer.lr 0.004563596366775041 +396 42 negative_sampler.num_negs_per_pos 68.0 +396 42 training.batch_size 1.0 +396 43 model.embedding_dim 2.0 +396 43 model.c_min 0.03628330371570421 +396 43 model.c_max 6.058192019286617 +396 43 optimizer.lr 0.08474723336430068 +396 43 negative_sampler.num_negs_per_pos 3.0 +396 43 training.batch_size 0.0 +396 44 model.embedding_dim 1.0 +396 44 model.c_min 0.010481157886801717 +396 44 model.c_max 1.2871028514144949 +396 44 optimizer.lr 0.0015285786219187986 +396 44 negative_sampler.num_negs_per_pos 37.0 +396 44 training.batch_size 0.0 +396 45 model.embedding_dim 2.0 +396 45 model.c_min 0.015310811831274742 +396 45 model.c_max 6.756492374160217 +396 45 optimizer.lr 0.007774413411041215 +396 45 negative_sampler.num_negs_per_pos 66.0 +396 45 training.batch_size 1.0 +396 46 model.embedding_dim 2.0 +396 46 model.c_min 0.08028073594819866 +396 46 model.c_max 7.702634587702592 +396 46 optimizer.lr 0.012240197915368473 +396 46 negative_sampler.num_negs_per_pos 56.0 +396 46 training.batch_size 0.0 +396 47 model.embedding_dim 1.0 +396 47 model.c_min 0.011165968094760906 +396 47 model.c_max 3.7813355215938538 +396 47 optimizer.lr 0.016523790232761234 +396 47 negative_sampler.num_negs_per_pos 19.0 +396 47 training.batch_size 2.0 +396 48 model.embedding_dim 0.0 +396 48 model.c_min 0.03155955460563926 +396 48 model.c_max 6.548141088376511 +396 48 optimizer.lr 0.002148484511517467 +396 48 negative_sampler.num_negs_per_pos 76.0 +396 48 training.batch_size 2.0 +396 49 model.embedding_dim 1.0 +396 49 model.c_min 0.01921226984378246 +396 49 model.c_max 6.595322759383116 +396 49 optimizer.lr 0.0016452789601900182 +396 49 negative_sampler.num_negs_per_pos 62.0 +396 49 training.batch_size 2.0 +396 50 model.embedding_dim 0.0 +396 50 model.c_min 0.036082173194083265 +396 50 model.c_max 7.619287039243326 +396 50 optimizer.lr 0.003636452630639873 +396 50 negative_sampler.num_negs_per_pos 86.0 +396 50 training.batch_size 0.0 +396 51 model.embedding_dim 2.0 +396 51 model.c_min 0.03359469602166341 +396 51 model.c_max 8.736095412111991 +396 51 optimizer.lr 0.06614465965831133 +396 51 negative_sampler.num_negs_per_pos 95.0 +396 51 training.batch_size 1.0 +396 52 model.embedding_dim 2.0 +396 52 model.c_min 0.07523374068288509 +396 52 model.c_max 4.19054858717443 +396 52 optimizer.lr 0.001249576127171493 +396 52 negative_sampler.num_negs_per_pos 10.0 +396 52 training.batch_size 0.0 +396 53 model.embedding_dim 0.0 +396 53 model.c_min 0.09244017859527086 +396 53 model.c_max 3.1763484575221277 +396 53 optimizer.lr 0.012518491785814665 +396 53 negative_sampler.num_negs_per_pos 13.0 +396 53 training.batch_size 2.0 +396 54 model.embedding_dim 2.0 +396 54 model.c_min 0.03667157802545563 +396 54 model.c_max 9.215090844802129 +396 54 optimizer.lr 0.0142802436627728 +396 54 negative_sampler.num_negs_per_pos 19.0 +396 54 training.batch_size 2.0 +396 55 model.embedding_dim 0.0 +396 55 model.c_min 0.016331770440142543 +396 55 model.c_max 1.263198356136873 +396 55 optimizer.lr 0.0010645744356333872 +396 55 negative_sampler.num_negs_per_pos 63.0 +396 55 training.batch_size 2.0 +396 56 model.embedding_dim 0.0 +396 56 model.c_min 0.02962850698184467 +396 56 model.c_max 2.9122707530546044 +396 56 optimizer.lr 0.03277728922117737 +396 56 negative_sampler.num_negs_per_pos 60.0 +396 56 training.batch_size 1.0 +396 57 model.embedding_dim 2.0 +396 57 model.c_min 0.029713705868515197 +396 57 model.c_max 6.996942328985025 +396 57 optimizer.lr 0.013575123736760642 +396 57 negative_sampler.num_negs_per_pos 38.0 +396 57 training.batch_size 0.0 +396 58 model.embedding_dim 0.0 +396 58 model.c_min 0.06410168747553907 +396 58 model.c_max 9.931654571659067 +396 58 optimizer.lr 0.004661464953630156 +396 58 negative_sampler.num_negs_per_pos 83.0 +396 58 training.batch_size 2.0 +396 59 model.embedding_dim 0.0 +396 59 model.c_min 0.04541579519842504 +396 59 model.c_max 7.2955132131613905 +396 59 optimizer.lr 0.011093217872232625 +396 59 negative_sampler.num_negs_per_pos 41.0 +396 59 training.batch_size 0.0 +396 60 model.embedding_dim 1.0 +396 60 model.c_min 0.04803535178827403 +396 60 model.c_max 2.6255111496045833 +396 60 optimizer.lr 0.010257626996807543 +396 60 negative_sampler.num_negs_per_pos 75.0 +396 60 training.batch_size 2.0 +396 61 model.embedding_dim 1.0 +396 61 model.c_min 0.02895036029232977 +396 61 model.c_max 8.058882734816738 +396 61 optimizer.lr 0.013950550420042748 +396 61 negative_sampler.num_negs_per_pos 50.0 +396 61 training.batch_size 2.0 +396 62 model.embedding_dim 0.0 +396 62 model.c_min 0.07970553792389133 +396 62 model.c_max 2.349751183163428 +396 62 optimizer.lr 0.0678230598695272 +396 62 negative_sampler.num_negs_per_pos 52.0 +396 62 training.batch_size 1.0 +396 1 dataset """wn18rr""" +396 1 model """kg2e""" +396 1 loss """softplus""" +396 1 regularizer """no""" +396 1 optimizer """adam""" +396 1 training_loop """owa""" +396 1 negative_sampler """basic""" +396 1 evaluator """rankbased""" +396 2 dataset """wn18rr""" +396 2 model """kg2e""" +396 2 loss """softplus""" +396 2 regularizer """no""" +396 2 optimizer """adam""" +396 2 training_loop """owa""" +396 2 negative_sampler """basic""" +396 2 evaluator """rankbased""" +396 3 dataset """wn18rr""" +396 3 model """kg2e""" +396 3 loss """softplus""" +396 3 regularizer """no""" +396 3 optimizer """adam""" +396 3 training_loop """owa""" +396 3 negative_sampler """basic""" +396 3 evaluator """rankbased""" +396 4 dataset """wn18rr""" +396 4 model """kg2e""" +396 4 loss """softplus""" +396 4 regularizer """no""" +396 4 optimizer """adam""" +396 4 training_loop """owa""" +396 4 negative_sampler """basic""" +396 4 evaluator """rankbased""" +396 5 dataset """wn18rr""" +396 5 model """kg2e""" +396 5 loss """softplus""" +396 5 regularizer """no""" +396 5 optimizer """adam""" +396 5 training_loop """owa""" +396 5 negative_sampler """basic""" +396 5 evaluator """rankbased""" +396 6 dataset """wn18rr""" +396 6 model """kg2e""" +396 6 loss """softplus""" +396 6 regularizer """no""" +396 6 optimizer """adam""" +396 6 training_loop """owa""" +396 6 negative_sampler """basic""" +396 6 evaluator """rankbased""" +396 7 dataset """wn18rr""" +396 7 model """kg2e""" +396 7 loss """softplus""" +396 7 regularizer """no""" +396 7 optimizer """adam""" +396 7 training_loop """owa""" +396 7 negative_sampler """basic""" +396 7 evaluator """rankbased""" +396 8 dataset """wn18rr""" +396 8 model """kg2e""" +396 8 loss """softplus""" +396 8 regularizer """no""" +396 8 optimizer """adam""" +396 8 training_loop """owa""" +396 8 negative_sampler """basic""" +396 8 evaluator """rankbased""" +396 9 dataset """wn18rr""" +396 9 model """kg2e""" +396 9 loss """softplus""" +396 9 regularizer """no""" +396 9 optimizer """adam""" +396 9 training_loop """owa""" +396 9 negative_sampler """basic""" +396 9 evaluator """rankbased""" +396 10 dataset """wn18rr""" +396 10 model """kg2e""" +396 10 loss """softplus""" +396 10 regularizer """no""" +396 10 optimizer """adam""" +396 10 training_loop """owa""" +396 10 negative_sampler """basic""" +396 10 evaluator """rankbased""" +396 11 dataset """wn18rr""" +396 11 model """kg2e""" +396 11 loss """softplus""" +396 11 regularizer """no""" +396 11 optimizer """adam""" +396 11 training_loop """owa""" +396 11 negative_sampler """basic""" +396 11 evaluator """rankbased""" +396 12 dataset """wn18rr""" +396 12 model """kg2e""" +396 12 loss """softplus""" +396 12 regularizer """no""" +396 12 optimizer """adam""" +396 12 training_loop """owa""" +396 12 negative_sampler """basic""" +396 12 evaluator """rankbased""" +396 13 dataset """wn18rr""" +396 13 model """kg2e""" +396 13 loss """softplus""" +396 13 regularizer """no""" +396 13 optimizer """adam""" +396 13 training_loop """owa""" +396 13 negative_sampler """basic""" +396 13 evaluator """rankbased""" +396 14 dataset """wn18rr""" +396 14 model """kg2e""" +396 14 loss """softplus""" +396 14 regularizer """no""" +396 14 optimizer """adam""" +396 14 training_loop """owa""" +396 14 negative_sampler """basic""" +396 14 evaluator """rankbased""" +396 15 dataset """wn18rr""" +396 15 model """kg2e""" +396 15 loss """softplus""" +396 15 regularizer """no""" +396 15 optimizer """adam""" +396 15 training_loop """owa""" +396 15 negative_sampler """basic""" +396 15 evaluator """rankbased""" +396 16 dataset """wn18rr""" +396 16 model """kg2e""" +396 16 loss """softplus""" +396 16 regularizer """no""" +396 16 optimizer """adam""" +396 16 training_loop """owa""" +396 16 negative_sampler """basic""" +396 16 evaluator """rankbased""" +396 17 dataset """wn18rr""" +396 17 model """kg2e""" +396 17 loss """softplus""" +396 17 regularizer """no""" +396 17 optimizer """adam""" +396 17 training_loop """owa""" +396 17 negative_sampler """basic""" +396 17 evaluator """rankbased""" +396 18 dataset """wn18rr""" +396 18 model """kg2e""" +396 18 loss """softplus""" +396 18 regularizer """no""" +396 18 optimizer """adam""" +396 18 training_loop """owa""" +396 18 negative_sampler """basic""" +396 18 evaluator """rankbased""" +396 19 dataset """wn18rr""" +396 19 model """kg2e""" +396 19 loss """softplus""" +396 19 regularizer """no""" +396 19 optimizer """adam""" +396 19 training_loop """owa""" +396 19 negative_sampler """basic""" +396 19 evaluator """rankbased""" +396 20 dataset """wn18rr""" +396 20 model """kg2e""" +396 20 loss """softplus""" +396 20 regularizer """no""" +396 20 optimizer """adam""" +396 20 training_loop """owa""" +396 20 negative_sampler """basic""" +396 20 evaluator """rankbased""" +396 21 dataset """wn18rr""" +396 21 model """kg2e""" +396 21 loss """softplus""" +396 21 regularizer """no""" +396 21 optimizer """adam""" +396 21 training_loop """owa""" +396 21 negative_sampler """basic""" +396 21 evaluator """rankbased""" +396 22 dataset """wn18rr""" +396 22 model """kg2e""" +396 22 loss """softplus""" +396 22 regularizer """no""" +396 22 optimizer """adam""" +396 22 training_loop """owa""" +396 22 negative_sampler """basic""" +396 22 evaluator """rankbased""" +396 23 dataset """wn18rr""" +396 23 model """kg2e""" +396 23 loss """softplus""" +396 23 regularizer """no""" +396 23 optimizer """adam""" +396 23 training_loop """owa""" +396 23 negative_sampler """basic""" +396 23 evaluator """rankbased""" +396 24 dataset """wn18rr""" +396 24 model """kg2e""" +396 24 loss """softplus""" +396 24 regularizer """no""" +396 24 optimizer """adam""" +396 24 training_loop """owa""" +396 24 negative_sampler """basic""" +396 24 evaluator """rankbased""" +396 25 dataset """wn18rr""" +396 25 model """kg2e""" +396 25 loss """softplus""" +396 25 regularizer """no""" +396 25 optimizer """adam""" +396 25 training_loop """owa""" +396 25 negative_sampler """basic""" +396 25 evaluator """rankbased""" +396 26 dataset """wn18rr""" +396 26 model """kg2e""" +396 26 loss """softplus""" +396 26 regularizer """no""" +396 26 optimizer """adam""" +396 26 training_loop """owa""" +396 26 negative_sampler """basic""" +396 26 evaluator """rankbased""" +396 27 dataset """wn18rr""" +396 27 model """kg2e""" +396 27 loss """softplus""" +396 27 regularizer """no""" +396 27 optimizer """adam""" +396 27 training_loop """owa""" +396 27 negative_sampler """basic""" +396 27 evaluator """rankbased""" +396 28 dataset """wn18rr""" +396 28 model """kg2e""" +396 28 loss """softplus""" +396 28 regularizer """no""" +396 28 optimizer """adam""" +396 28 training_loop """owa""" +396 28 negative_sampler """basic""" +396 28 evaluator """rankbased""" +396 29 dataset """wn18rr""" +396 29 model """kg2e""" +396 29 loss """softplus""" +396 29 regularizer """no""" +396 29 optimizer """adam""" +396 29 training_loop """owa""" +396 29 negative_sampler """basic""" +396 29 evaluator """rankbased""" +396 30 dataset """wn18rr""" +396 30 model """kg2e""" +396 30 loss """softplus""" +396 30 regularizer """no""" +396 30 optimizer """adam""" +396 30 training_loop """owa""" +396 30 negative_sampler """basic""" +396 30 evaluator """rankbased""" +396 31 dataset """wn18rr""" +396 31 model """kg2e""" +396 31 loss """softplus""" +396 31 regularizer """no""" +396 31 optimizer """adam""" +396 31 training_loop """owa""" +396 31 negative_sampler """basic""" +396 31 evaluator """rankbased""" +396 32 dataset """wn18rr""" +396 32 model """kg2e""" +396 32 loss """softplus""" +396 32 regularizer """no""" +396 32 optimizer """adam""" +396 32 training_loop """owa""" +396 32 negative_sampler """basic""" +396 32 evaluator """rankbased""" +396 33 dataset """wn18rr""" +396 33 model """kg2e""" +396 33 loss """softplus""" +396 33 regularizer """no""" +396 33 optimizer """adam""" +396 33 training_loop """owa""" +396 33 negative_sampler """basic""" +396 33 evaluator """rankbased""" +396 34 dataset """wn18rr""" +396 34 model """kg2e""" +396 34 loss """softplus""" +396 34 regularizer """no""" +396 34 optimizer """adam""" +396 34 training_loop """owa""" +396 34 negative_sampler """basic""" +396 34 evaluator """rankbased""" +396 35 dataset """wn18rr""" +396 35 model """kg2e""" +396 35 loss """softplus""" +396 35 regularizer """no""" +396 35 optimizer """adam""" +396 35 training_loop """owa""" +396 35 negative_sampler """basic""" +396 35 evaluator """rankbased""" +396 36 dataset """wn18rr""" +396 36 model """kg2e""" +396 36 loss """softplus""" +396 36 regularizer """no""" +396 36 optimizer """adam""" +396 36 training_loop """owa""" +396 36 negative_sampler """basic""" +396 36 evaluator """rankbased""" +396 37 dataset """wn18rr""" +396 37 model """kg2e""" +396 37 loss """softplus""" +396 37 regularizer """no""" +396 37 optimizer """adam""" +396 37 training_loop """owa""" +396 37 negative_sampler """basic""" +396 37 evaluator """rankbased""" +396 38 dataset """wn18rr""" +396 38 model """kg2e""" +396 38 loss """softplus""" +396 38 regularizer """no""" +396 38 optimizer """adam""" +396 38 training_loop """owa""" +396 38 negative_sampler """basic""" +396 38 evaluator """rankbased""" +396 39 dataset """wn18rr""" +396 39 model """kg2e""" +396 39 loss """softplus""" +396 39 regularizer """no""" +396 39 optimizer """adam""" +396 39 training_loop """owa""" +396 39 negative_sampler """basic""" +396 39 evaluator """rankbased""" +396 40 dataset """wn18rr""" +396 40 model """kg2e""" +396 40 loss """softplus""" +396 40 regularizer """no""" +396 40 optimizer """adam""" +396 40 training_loop """owa""" +396 40 negative_sampler """basic""" +396 40 evaluator """rankbased""" +396 41 dataset """wn18rr""" +396 41 model """kg2e""" +396 41 loss """softplus""" +396 41 regularizer """no""" +396 41 optimizer """adam""" +396 41 training_loop """owa""" +396 41 negative_sampler """basic""" +396 41 evaluator """rankbased""" +396 42 dataset """wn18rr""" +396 42 model """kg2e""" +396 42 loss """softplus""" +396 42 regularizer """no""" +396 42 optimizer """adam""" +396 42 training_loop """owa""" +396 42 negative_sampler """basic""" +396 42 evaluator """rankbased""" +396 43 dataset """wn18rr""" +396 43 model """kg2e""" +396 43 loss """softplus""" +396 43 regularizer """no""" +396 43 optimizer """adam""" +396 43 training_loop """owa""" +396 43 negative_sampler """basic""" +396 43 evaluator """rankbased""" +396 44 dataset """wn18rr""" +396 44 model """kg2e""" +396 44 loss """softplus""" +396 44 regularizer """no""" +396 44 optimizer """adam""" +396 44 training_loop """owa""" +396 44 negative_sampler """basic""" +396 44 evaluator """rankbased""" +396 45 dataset """wn18rr""" +396 45 model """kg2e""" +396 45 loss """softplus""" +396 45 regularizer """no""" +396 45 optimizer """adam""" +396 45 training_loop """owa""" +396 45 negative_sampler """basic""" +396 45 evaluator """rankbased""" +396 46 dataset """wn18rr""" +396 46 model """kg2e""" +396 46 loss """softplus""" +396 46 regularizer """no""" +396 46 optimizer """adam""" +396 46 training_loop """owa""" +396 46 negative_sampler """basic""" +396 46 evaluator """rankbased""" +396 47 dataset """wn18rr""" +396 47 model """kg2e""" +396 47 loss """softplus""" +396 47 regularizer """no""" +396 47 optimizer """adam""" +396 47 training_loop """owa""" +396 47 negative_sampler """basic""" +396 47 evaluator """rankbased""" +396 48 dataset """wn18rr""" +396 48 model """kg2e""" +396 48 loss """softplus""" +396 48 regularizer """no""" +396 48 optimizer """adam""" +396 48 training_loop """owa""" +396 48 negative_sampler """basic""" +396 48 evaluator """rankbased""" +396 49 dataset """wn18rr""" +396 49 model """kg2e""" +396 49 loss """softplus""" +396 49 regularizer """no""" +396 49 optimizer """adam""" +396 49 training_loop """owa""" +396 49 negative_sampler """basic""" +396 49 evaluator """rankbased""" +396 50 dataset """wn18rr""" +396 50 model """kg2e""" +396 50 loss """softplus""" +396 50 regularizer """no""" +396 50 optimizer """adam""" +396 50 training_loop """owa""" +396 50 negative_sampler """basic""" +396 50 evaluator """rankbased""" +396 51 dataset """wn18rr""" +396 51 model """kg2e""" +396 51 loss """softplus""" +396 51 regularizer """no""" +396 51 optimizer """adam""" +396 51 training_loop """owa""" +396 51 negative_sampler """basic""" +396 51 evaluator """rankbased""" +396 52 dataset """wn18rr""" +396 52 model """kg2e""" +396 52 loss """softplus""" +396 52 regularizer """no""" +396 52 optimizer """adam""" +396 52 training_loop """owa""" +396 52 negative_sampler """basic""" +396 52 evaluator """rankbased""" +396 53 dataset """wn18rr""" +396 53 model """kg2e""" +396 53 loss """softplus""" +396 53 regularizer """no""" +396 53 optimizer """adam""" +396 53 training_loop """owa""" +396 53 negative_sampler """basic""" +396 53 evaluator """rankbased""" +396 54 dataset """wn18rr""" +396 54 model """kg2e""" +396 54 loss """softplus""" +396 54 regularizer """no""" +396 54 optimizer """adam""" +396 54 training_loop """owa""" +396 54 negative_sampler """basic""" +396 54 evaluator """rankbased""" +396 55 dataset """wn18rr""" +396 55 model """kg2e""" +396 55 loss """softplus""" +396 55 regularizer """no""" +396 55 optimizer """adam""" +396 55 training_loop """owa""" +396 55 negative_sampler """basic""" +396 55 evaluator """rankbased""" +396 56 dataset """wn18rr""" +396 56 model """kg2e""" +396 56 loss """softplus""" +396 56 regularizer """no""" +396 56 optimizer """adam""" +396 56 training_loop """owa""" +396 56 negative_sampler """basic""" +396 56 evaluator """rankbased""" +396 57 dataset """wn18rr""" +396 57 model """kg2e""" +396 57 loss """softplus""" +396 57 regularizer """no""" +396 57 optimizer """adam""" +396 57 training_loop """owa""" +396 57 negative_sampler """basic""" +396 57 evaluator """rankbased""" +396 58 dataset """wn18rr""" +396 58 model """kg2e""" +396 58 loss """softplus""" +396 58 regularizer """no""" +396 58 optimizer """adam""" +396 58 training_loop """owa""" +396 58 negative_sampler """basic""" +396 58 evaluator """rankbased""" +396 59 dataset """wn18rr""" +396 59 model """kg2e""" +396 59 loss """softplus""" +396 59 regularizer """no""" +396 59 optimizer """adam""" +396 59 training_loop """owa""" +396 59 negative_sampler """basic""" +396 59 evaluator """rankbased""" +396 60 dataset """wn18rr""" +396 60 model """kg2e""" +396 60 loss """softplus""" +396 60 regularizer """no""" +396 60 optimizer """adam""" +396 60 training_loop """owa""" +396 60 negative_sampler """basic""" +396 60 evaluator """rankbased""" +396 61 dataset """wn18rr""" +396 61 model """kg2e""" +396 61 loss """softplus""" +396 61 regularizer """no""" +396 61 optimizer """adam""" +396 61 training_loop """owa""" +396 61 negative_sampler """basic""" +396 61 evaluator """rankbased""" +396 62 dataset """wn18rr""" +396 62 model """kg2e""" +396 62 loss """softplus""" +396 62 regularizer """no""" +396 62 optimizer """adam""" +396 62 training_loop """owa""" +396 62 negative_sampler """basic""" +396 62 evaluator """rankbased""" +397 1 model.embedding_dim 0.0 +397 1 model.c_min 0.08073482890328863 +397 1 model.c_max 9.799881233445921 +397 1 optimizer.lr 0.00251952816222832 +397 1 training.batch_size 1.0 +397 1 training.label_smoothing 0.003428813692495464 +397 2 model.embedding_dim 2.0 +397 2 model.c_min 0.040680133451075344 +397 2 model.c_max 4.687634752955194 +397 2 optimizer.lr 0.011670567314781454 +397 2 training.batch_size 0.0 +397 2 training.label_smoothing 0.47010109879922807 +397 1 dataset """wn18rr""" +397 1 model """kg2e""" +397 1 loss """crossentropy""" +397 1 regularizer """no""" +397 1 optimizer """adam""" +397 1 training_loop """lcwa""" +397 1 evaluator """rankbased""" +397 2 dataset """wn18rr""" +397 2 model """kg2e""" +397 2 loss """crossentropy""" +397 2 regularizer """no""" +397 2 optimizer """adam""" +397 2 training_loop """lcwa""" +397 2 evaluator """rankbased""" +398 1 model.embedding_dim 0.0 +398 1 model.c_min 0.020949155227070866 +398 1 model.c_max 6.20378792767683 +398 1 optimizer.lr 0.0032887561337797346 +398 1 training.batch_size 1.0 +398 1 training.label_smoothing 0.003442445882222274 +398 2 model.embedding_dim 0.0 +398 2 model.c_min 0.01073078823650913 +398 2 model.c_max 9.296734259687902 +398 2 optimizer.lr 0.0032034606938943395 +398 2 training.batch_size 1.0 +398 2 training.label_smoothing 0.0016999931220068043 +398 3 model.embedding_dim 1.0 +398 3 model.c_min 0.01632987550929811 +398 3 model.c_max 4.4308313205588545 +398 3 optimizer.lr 0.0016779173306710469 +398 3 training.batch_size 1.0 +398 3 training.label_smoothing 0.018748857938011818 +398 1 dataset """wn18rr""" +398 1 model """kg2e""" +398 1 loss """crossentropy""" +398 1 regularizer """no""" +398 1 optimizer """adam""" +398 1 training_loop """lcwa""" +398 1 evaluator """rankbased""" +398 2 dataset """wn18rr""" +398 2 model """kg2e""" +398 2 loss """crossentropy""" +398 2 regularizer """no""" +398 2 optimizer """adam""" +398 2 training_loop """lcwa""" +398 2 evaluator """rankbased""" +398 3 dataset """wn18rr""" +398 3 model """kg2e""" +398 3 loss """crossentropy""" +398 3 regularizer """no""" +398 3 optimizer """adam""" +398 3 training_loop """lcwa""" +398 3 evaluator """rankbased""" +399 1 model.embedding_dim 2.0 +399 1 model.c_min 0.04069119489434297 +399 1 model.c_max 5.266066818674675 +399 1 optimizer.lr 0.008796734638298713 +399 1 training.batch_size 2.0 +399 1 training.label_smoothing 0.015105666487946935 +399 2 model.embedding_dim 2.0 +399 2 model.c_min 0.010076081010539677 +399 2 model.c_max 3.4376935392691506 +399 2 optimizer.lr 0.0010613355976067079 +399 2 training.batch_size 0.0 +399 2 training.label_smoothing 0.8368349643625429 +399 3 model.embedding_dim 1.0 +399 3 model.c_min 0.02300202212923884 +399 3 model.c_max 7.719134208101167 +399 3 optimizer.lr 0.05478344377020757 +399 3 training.batch_size 0.0 +399 3 training.label_smoothing 0.019516136276268765 +399 1 dataset """wn18rr""" +399 1 model """kg2e""" +399 1 loss """bceaftersigmoid""" +399 1 regularizer """no""" +399 1 optimizer """adam""" +399 1 training_loop """lcwa""" +399 1 evaluator """rankbased""" +399 2 dataset """wn18rr""" +399 2 model """kg2e""" +399 2 loss """bceaftersigmoid""" +399 2 regularizer """no""" +399 2 optimizer """adam""" +399 2 training_loop """lcwa""" +399 2 evaluator """rankbased""" +399 3 dataset """wn18rr""" +399 3 model """kg2e""" +399 3 loss """bceaftersigmoid""" +399 3 regularizer """no""" +399 3 optimizer """adam""" +399 3 training_loop """lcwa""" +399 3 evaluator """rankbased""" +400 1 model.embedding_dim 0.0 +400 1 model.c_min 0.0132817247547674 +400 1 model.c_max 2.360735230317271 +400 1 optimizer.lr 0.022401218066038335 +400 1 training.batch_size 2.0 +400 1 training.label_smoothing 0.2680989529525321 +400 2 model.embedding_dim 2.0 +400 2 model.c_min 0.028826368613569697 +400 2 model.c_max 1.0855510687098806 +400 2 optimizer.lr 0.056210232591111936 +400 2 training.batch_size 0.0 +400 2 training.label_smoothing 0.07697803175972456 +400 3 model.embedding_dim 2.0 +400 3 model.c_min 0.02633733960232291 +400 3 model.c_max 5.8577197884493915 +400 3 optimizer.lr 0.051945823009466575 +400 3 training.batch_size 1.0 +400 3 training.label_smoothing 0.6204364434709088 +400 4 model.embedding_dim 2.0 +400 4 model.c_min 0.014382888873578806 +400 4 model.c_max 9.32439886093745 +400 4 optimizer.lr 0.011536311696552122 +400 4 training.batch_size 2.0 +400 4 training.label_smoothing 0.044185812379103656 +400 5 model.embedding_dim 1.0 +400 5 model.c_min 0.09813897915244692 +400 5 model.c_max 1.6846570517602255 +400 5 optimizer.lr 0.027204906716902594 +400 5 training.batch_size 2.0 +400 5 training.label_smoothing 0.11205414378294383 +400 6 model.embedding_dim 0.0 +400 6 model.c_min 0.045251695493746656 +400 6 model.c_max 6.724800378197367 +400 6 optimizer.lr 0.011664515940129003 +400 6 training.batch_size 1.0 +400 6 training.label_smoothing 0.3709156772425533 +400 7 model.embedding_dim 0.0 +400 7 model.c_min 0.02503359929828692 +400 7 model.c_max 3.260814478064726 +400 7 optimizer.lr 0.02130754505828102 +400 7 training.batch_size 2.0 +400 7 training.label_smoothing 0.0035860065727546706 +400 1 dataset """wn18rr""" +400 1 model """kg2e""" +400 1 loss """bceaftersigmoid""" +400 1 regularizer """no""" +400 1 optimizer """adam""" +400 1 training_loop """lcwa""" +400 1 evaluator """rankbased""" +400 2 dataset """wn18rr""" +400 2 model """kg2e""" +400 2 loss """bceaftersigmoid""" +400 2 regularizer """no""" +400 2 optimizer """adam""" +400 2 training_loop """lcwa""" +400 2 evaluator """rankbased""" +400 3 dataset """wn18rr""" +400 3 model """kg2e""" +400 3 loss """bceaftersigmoid""" +400 3 regularizer """no""" +400 3 optimizer """adam""" +400 3 training_loop """lcwa""" +400 3 evaluator """rankbased""" +400 4 dataset """wn18rr""" +400 4 model """kg2e""" +400 4 loss """bceaftersigmoid""" +400 4 regularizer """no""" +400 4 optimizer """adam""" +400 4 training_loop """lcwa""" +400 4 evaluator """rankbased""" +400 5 dataset """wn18rr""" +400 5 model """kg2e""" +400 5 loss """bceaftersigmoid""" +400 5 regularizer """no""" +400 5 optimizer """adam""" +400 5 training_loop """lcwa""" +400 5 evaluator """rankbased""" +400 6 dataset """wn18rr""" +400 6 model """kg2e""" +400 6 loss """bceaftersigmoid""" +400 6 regularizer """no""" +400 6 optimizer """adam""" +400 6 training_loop """lcwa""" +400 6 evaluator """rankbased""" +400 7 dataset """wn18rr""" +400 7 model """kg2e""" +400 7 loss """bceaftersigmoid""" +400 7 regularizer """no""" +400 7 optimizer """adam""" +400 7 training_loop """lcwa""" +400 7 evaluator """rankbased""" +401 1 model.embedding_dim 0.0 +401 1 model.c_min 0.05197654490562962 +401 1 model.c_max 5.738091012623004 +401 1 optimizer.lr 0.0070186051042554895 +401 1 training.batch_size 2.0 +401 1 training.label_smoothing 0.13918077670260567 +401 2 model.embedding_dim 1.0 +401 2 model.c_min 0.018861991037081256 +401 2 model.c_max 4.384431048102308 +401 2 optimizer.lr 0.00549991508509822 +401 2 training.batch_size 0.0 +401 2 training.label_smoothing 0.00288101551620832 +401 1 dataset """wn18rr""" +401 1 model """kg2e""" +401 1 loss """softplus""" +401 1 regularizer """no""" +401 1 optimizer """adam""" +401 1 training_loop """lcwa""" +401 1 evaluator """rankbased""" +401 2 dataset """wn18rr""" +401 2 model """kg2e""" +401 2 loss """softplus""" +401 2 regularizer """no""" +401 2 optimizer """adam""" +401 2 training_loop """lcwa""" +401 2 evaluator """rankbased""" +402 1 model.embedding_dim 0.0 +402 1 model.c_min 0.08083747517396765 +402 1 model.c_max 7.008034768910205 +402 1 optimizer.lr 0.012993252822571067 +402 1 training.batch_size 2.0 +402 1 training.label_smoothing 0.7439056773292865 +402 2 model.embedding_dim 1.0 +402 2 model.c_min 0.039012815150962186 +402 2 model.c_max 1.8344132137341216 +402 2 optimizer.lr 0.0019366063706735622 +402 2 training.batch_size 0.0 +402 2 training.label_smoothing 0.03987905861196216 +402 3 model.embedding_dim 2.0 +402 3 model.c_min 0.025844931458840707 +402 3 model.c_max 7.921522002390576 +402 3 optimizer.lr 0.011308070327048699 +402 3 training.batch_size 2.0 +402 3 training.label_smoothing 0.7354015336557294 +402 1 dataset """wn18rr""" +402 1 model """kg2e""" +402 1 loss """softplus""" +402 1 regularizer """no""" +402 1 optimizer """adam""" +402 1 training_loop """lcwa""" +402 1 evaluator """rankbased""" +402 2 dataset """wn18rr""" +402 2 model """kg2e""" +402 2 loss """softplus""" +402 2 regularizer """no""" +402 2 optimizer """adam""" +402 2 training_loop """lcwa""" +402 2 evaluator """rankbased""" +402 3 dataset """wn18rr""" +402 3 model """kg2e""" +402 3 loss """softplus""" +402 3 regularizer """no""" +402 3 optimizer """adam""" +402 3 training_loop """lcwa""" +402 3 evaluator """rankbased""" +403 1 model.embedding_dim 1.0 +403 1 optimizer.lr 0.012986137913510122 +403 1 training.batch_size 1.0 +403 1 training.label_smoothing 0.04135679954017376 +403 2 model.embedding_dim 0.0 +403 2 optimizer.lr 0.005636970937150903 +403 2 training.batch_size 1.0 +403 2 training.label_smoothing 0.03530817072549801 +403 1 dataset """fb15k237""" +403 1 model """ntn""" +403 1 loss """bceaftersigmoid""" +403 1 regularizer """no""" +403 1 optimizer """adam""" +403 1 training_loop """lcwa""" +403 1 evaluator """rankbased""" +403 2 dataset """fb15k237""" +403 2 model """ntn""" +403 2 loss """bceaftersigmoid""" +403 2 regularizer """no""" +403 2 optimizer """adam""" +403 2 training_loop """lcwa""" +403 2 evaluator """rankbased""" +404 1 model.embedding_dim 2.0 +404 1 optimizer.lr 0.0035209080275230425 +404 1 training.batch_size 0.0 +404 1 training.label_smoothing 0.0048162185640198965 +404 1 dataset """fb15k237""" +404 1 model """ntn""" +404 1 loss """bceaftersigmoid""" +404 1 regularizer """no""" +404 1 optimizer """adam""" +404 1 training_loop """lcwa""" +404 1 evaluator """rankbased""" +405 1 model.embedding_dim 1.0 +405 1 loss.margin 2.032869252925782 +405 1 loss.adversarial_temperature 0.5882560634713196 +405 1 optimizer.lr 0.06033886642316311 +405 1 negative_sampler.num_negs_per_pos 40.0 +405 1 training.batch_size 2.0 +405 2 model.embedding_dim 0.0 +405 2 loss.margin 20.424467531200417 +405 2 loss.adversarial_temperature 0.7438996880704645 +405 2 optimizer.lr 0.03987713942502831 +405 2 negative_sampler.num_negs_per_pos 18.0 +405 2 training.batch_size 0.0 +405 3 model.embedding_dim 2.0 +405 3 loss.margin 25.15931087498938 +405 3 loss.adversarial_temperature 0.9540942341836705 +405 3 optimizer.lr 0.01065092923372249 +405 3 negative_sampler.num_negs_per_pos 66.0 +405 3 training.batch_size 0.0 +405 4 model.embedding_dim 0.0 +405 4 loss.margin 8.30330590661972 +405 4 loss.adversarial_temperature 0.9879946236397779 +405 4 optimizer.lr 0.00289705342049766 +405 4 negative_sampler.num_negs_per_pos 37.0 +405 4 training.batch_size 0.0 +405 1 dataset """fb15k237""" +405 1 model """ntn""" +405 1 loss """nssa""" +405 1 regularizer """no""" +405 1 optimizer """adam""" +405 1 training_loop """owa""" +405 1 negative_sampler """basic""" +405 1 evaluator """rankbased""" +405 2 dataset """fb15k237""" +405 2 model """ntn""" +405 2 loss """nssa""" +405 2 regularizer """no""" +405 2 optimizer """adam""" +405 2 training_loop """owa""" +405 2 negative_sampler """basic""" +405 2 evaluator """rankbased""" +405 3 dataset """fb15k237""" +405 3 model """ntn""" +405 3 loss """nssa""" +405 3 regularizer """no""" +405 3 optimizer """adam""" +405 3 training_loop """owa""" +405 3 negative_sampler """basic""" +405 3 evaluator """rankbased""" +405 4 dataset """fb15k237""" +405 4 model """ntn""" +405 4 loss """nssa""" +405 4 regularizer """no""" +405 4 optimizer """adam""" +405 4 training_loop """owa""" +405 4 negative_sampler """basic""" +405 4 evaluator """rankbased""" +406 1 model.embedding_dim 1.0 +406 1 loss.margin 23.964360921474974 +406 1 loss.adversarial_temperature 0.6625889818612857 +406 1 optimizer.lr 0.0026186675355235074 +406 1 negative_sampler.num_negs_per_pos 60.0 +406 1 training.batch_size 0.0 +406 2 model.embedding_dim 2.0 +406 2 loss.margin 2.2947698947416035 +406 2 loss.adversarial_temperature 0.7643568746735622 +406 2 optimizer.lr 0.009296186021634928 +406 2 negative_sampler.num_negs_per_pos 44.0 +406 2 training.batch_size 0.0 +406 3 model.embedding_dim 2.0 +406 3 loss.margin 28.992580417223152 +406 3 loss.adversarial_temperature 0.6759742840605292 +406 3 optimizer.lr 0.00162145722677549 +406 3 negative_sampler.num_negs_per_pos 88.0 +406 3 training.batch_size 0.0 +406 4 model.embedding_dim 0.0 +406 4 loss.margin 22.85273365358577 +406 4 loss.adversarial_temperature 0.3577065154618035 +406 4 optimizer.lr 0.053843165581595374 +406 4 negative_sampler.num_negs_per_pos 27.0 +406 4 training.batch_size 0.0 +406 1 dataset """fb15k237""" +406 1 model """ntn""" +406 1 loss """nssa""" +406 1 regularizer """no""" +406 1 optimizer """adam""" +406 1 training_loop """owa""" +406 1 negative_sampler """basic""" +406 1 evaluator """rankbased""" +406 2 dataset """fb15k237""" +406 2 model """ntn""" +406 2 loss """nssa""" +406 2 regularizer """no""" +406 2 optimizer """adam""" +406 2 training_loop """owa""" +406 2 negative_sampler """basic""" +406 2 evaluator """rankbased""" +406 3 dataset """fb15k237""" +406 3 model """ntn""" +406 3 loss """nssa""" +406 3 regularizer """no""" +406 3 optimizer """adam""" +406 3 training_loop """owa""" +406 3 negative_sampler """basic""" +406 3 evaluator """rankbased""" +406 4 dataset """fb15k237""" +406 4 model """ntn""" +406 4 loss """nssa""" +406 4 regularizer """no""" +406 4 optimizer """adam""" +406 4 training_loop """owa""" +406 4 negative_sampler """basic""" +406 4 evaluator """rankbased""" +407 1 model.embedding_dim 2.0 +407 1 loss.margin 4.364731345069678 +407 1 optimizer.lr 0.005330369716756746 +407 1 negative_sampler.num_negs_per_pos 79.0 +407 1 training.batch_size 2.0 +407 1 dataset """fb15k237""" +407 1 model """ntn""" +407 1 loss """marginranking""" +407 1 regularizer """no""" +407 1 optimizer """adam""" +407 1 training_loop """owa""" +407 1 negative_sampler """basic""" +407 1 evaluator """rankbased""" +408 1 model.embedding_dim 0.0 +408 1 loss.margin 8.156637004322418 +408 1 optimizer.lr 0.00738751946430548 +408 1 negative_sampler.num_negs_per_pos 5.0 +408 1 training.batch_size 1.0 +408 2 model.embedding_dim 2.0 +408 2 loss.margin 9.545014138500497 +408 2 optimizer.lr 0.01289363261832492 +408 2 negative_sampler.num_negs_per_pos 32.0 +408 2 training.batch_size 0.0 +408 3 model.embedding_dim 2.0 +408 3 loss.margin 1.690022342416543 +408 3 optimizer.lr 0.002914903750419276 +408 3 negative_sampler.num_negs_per_pos 72.0 +408 3 training.batch_size 2.0 +408 1 dataset """fb15k237""" +408 1 model """ntn""" +408 1 loss """marginranking""" +408 1 regularizer """no""" +408 1 optimizer """adam""" +408 1 training_loop """owa""" +408 1 negative_sampler """basic""" +408 1 evaluator """rankbased""" +408 2 dataset """fb15k237""" +408 2 model """ntn""" +408 2 loss """marginranking""" +408 2 regularizer """no""" +408 2 optimizer """adam""" +408 2 training_loop """owa""" +408 2 negative_sampler """basic""" +408 2 evaluator """rankbased""" +408 3 dataset """fb15k237""" +408 3 model """ntn""" +408 3 loss """marginranking""" +408 3 regularizer """no""" +408 3 optimizer """adam""" +408 3 training_loop """owa""" +408 3 negative_sampler """basic""" +408 3 evaluator """rankbased""" +409 1 model.embedding_dim 0.0 +409 1 optimizer.lr 0.08515436077822439 +409 1 negative_sampler.num_negs_per_pos 77.0 +409 1 training.batch_size 1.0 +409 2 model.embedding_dim 2.0 +409 2 optimizer.lr 0.03529399380275555 +409 2 negative_sampler.num_negs_per_pos 3.0 +409 2 training.batch_size 2.0 +409 3 model.embedding_dim 0.0 +409 3 optimizer.lr 0.0017031432927630903 +409 3 negative_sampler.num_negs_per_pos 68.0 +409 3 training.batch_size 0.0 +409 4 model.embedding_dim 0.0 +409 4 optimizer.lr 0.002125448548080965 +409 4 negative_sampler.num_negs_per_pos 97.0 +409 4 training.batch_size 2.0 +409 5 model.embedding_dim 1.0 +409 5 optimizer.lr 0.002062498198793621 +409 5 negative_sampler.num_negs_per_pos 87.0 +409 5 training.batch_size 1.0 +409 6 model.embedding_dim 1.0 +409 6 optimizer.lr 0.025236720538159695 +409 6 negative_sampler.num_negs_per_pos 18.0 +409 6 training.batch_size 2.0 +409 7 model.embedding_dim 1.0 +409 7 optimizer.lr 0.0014231683878716843 +409 7 negative_sampler.num_negs_per_pos 27.0 +409 7 training.batch_size 2.0 +409 1 dataset """fb15k237""" +409 1 model """ntn""" +409 1 loss """bceaftersigmoid""" +409 1 regularizer """no""" +409 1 optimizer """adam""" +409 1 training_loop """owa""" +409 1 negative_sampler """basic""" +409 1 evaluator """rankbased""" +409 2 dataset """fb15k237""" +409 2 model """ntn""" +409 2 loss """bceaftersigmoid""" +409 2 regularizer """no""" +409 2 optimizer """adam""" +409 2 training_loop """owa""" +409 2 negative_sampler """basic""" +409 2 evaluator """rankbased""" +409 3 dataset """fb15k237""" +409 3 model """ntn""" +409 3 loss """bceaftersigmoid""" +409 3 regularizer """no""" +409 3 optimizer """adam""" +409 3 training_loop """owa""" +409 3 negative_sampler """basic""" +409 3 evaluator """rankbased""" +409 4 dataset """fb15k237""" +409 4 model """ntn""" +409 4 loss """bceaftersigmoid""" +409 4 regularizer """no""" +409 4 optimizer """adam""" +409 4 training_loop """owa""" +409 4 negative_sampler """basic""" +409 4 evaluator """rankbased""" +409 5 dataset """fb15k237""" +409 5 model """ntn""" +409 5 loss """bceaftersigmoid""" +409 5 regularizer """no""" +409 5 optimizer """adam""" +409 5 training_loop """owa""" +409 5 negative_sampler """basic""" +409 5 evaluator """rankbased""" +409 6 dataset """fb15k237""" +409 6 model """ntn""" +409 6 loss """bceaftersigmoid""" +409 6 regularizer """no""" +409 6 optimizer """adam""" +409 6 training_loop """owa""" +409 6 negative_sampler """basic""" +409 6 evaluator """rankbased""" +409 7 dataset """fb15k237""" +409 7 model """ntn""" +409 7 loss """bceaftersigmoid""" +409 7 regularizer """no""" +409 7 optimizer """adam""" +409 7 training_loop """owa""" +409 7 negative_sampler """basic""" +409 7 evaluator """rankbased""" +410 1 model.embedding_dim 0.0 +410 1 optimizer.lr 0.011815826049451871 +410 1 negative_sampler.num_negs_per_pos 56.0 +410 1 training.batch_size 2.0 +410 2 model.embedding_dim 2.0 +410 2 optimizer.lr 0.03577394984199391 +410 2 negative_sampler.num_negs_per_pos 3.0 +410 2 training.batch_size 2.0 +410 3 model.embedding_dim 2.0 +410 3 optimizer.lr 0.018770560476260377 +410 3 negative_sampler.num_negs_per_pos 67.0 +410 3 training.batch_size 2.0 +410 4 model.embedding_dim 0.0 +410 4 optimizer.lr 0.012317908268391568 +410 4 negative_sampler.num_negs_per_pos 19.0 +410 4 training.batch_size 1.0 +410 5 model.embedding_dim 2.0 +410 5 optimizer.lr 0.011450226558597978 +410 5 negative_sampler.num_negs_per_pos 29.0 +410 5 training.batch_size 2.0 +410 6 model.embedding_dim 0.0 +410 6 optimizer.lr 0.004389111232622652 +410 6 negative_sampler.num_negs_per_pos 67.0 +410 6 training.batch_size 1.0 +410 7 model.embedding_dim 1.0 +410 7 optimizer.lr 0.009658936571735619 +410 7 negative_sampler.num_negs_per_pos 26.0 +410 7 training.batch_size 2.0 +410 1 dataset """fb15k237""" +410 1 model """ntn""" +410 1 loss """bceaftersigmoid""" +410 1 regularizer """no""" +410 1 optimizer """adam""" +410 1 training_loop """owa""" +410 1 negative_sampler """basic""" +410 1 evaluator """rankbased""" +410 2 dataset """fb15k237""" +410 2 model """ntn""" +410 2 loss """bceaftersigmoid""" +410 2 regularizer """no""" +410 2 optimizer """adam""" +410 2 training_loop """owa""" +410 2 negative_sampler """basic""" +410 2 evaluator """rankbased""" +410 3 dataset """fb15k237""" +410 3 model """ntn""" +410 3 loss """bceaftersigmoid""" +410 3 regularizer """no""" +410 3 optimizer """adam""" +410 3 training_loop """owa""" +410 3 negative_sampler """basic""" +410 3 evaluator """rankbased""" +410 4 dataset """fb15k237""" +410 4 model """ntn""" +410 4 loss """bceaftersigmoid""" +410 4 regularizer """no""" +410 4 optimizer """adam""" +410 4 training_loop """owa""" +410 4 negative_sampler """basic""" +410 4 evaluator """rankbased""" +410 5 dataset """fb15k237""" +410 5 model """ntn""" +410 5 loss """bceaftersigmoid""" +410 5 regularizer """no""" +410 5 optimizer """adam""" +410 5 training_loop """owa""" +410 5 negative_sampler """basic""" +410 5 evaluator """rankbased""" +410 6 dataset """fb15k237""" +410 6 model """ntn""" +410 6 loss """bceaftersigmoid""" +410 6 regularizer """no""" +410 6 optimizer """adam""" +410 6 training_loop """owa""" +410 6 negative_sampler """basic""" +410 6 evaluator """rankbased""" +410 7 dataset """fb15k237""" +410 7 model """ntn""" +410 7 loss """bceaftersigmoid""" +410 7 regularizer """no""" +410 7 optimizer """adam""" +410 7 training_loop """owa""" +410 7 negative_sampler """basic""" +410 7 evaluator """rankbased""" +411 1 model.embedding_dim 1.0 +411 1 optimizer.lr 0.03159757638002468 +411 1 negative_sampler.num_negs_per_pos 38.0 +411 1 training.batch_size 0.0 +411 2 model.embedding_dim 1.0 +411 2 optimizer.lr 0.001859159449302801 +411 2 negative_sampler.num_negs_per_pos 43.0 +411 2 training.batch_size 2.0 +411 1 dataset """fb15k237""" +411 1 model """ntn""" +411 1 loss """softplus""" +411 1 regularizer """no""" +411 1 optimizer """adam""" +411 1 training_loop """owa""" +411 1 negative_sampler """basic""" +411 1 evaluator """rankbased""" +411 2 dataset """fb15k237""" +411 2 model """ntn""" +411 2 loss """softplus""" +411 2 regularizer """no""" +411 2 optimizer """adam""" +411 2 training_loop """owa""" +411 2 negative_sampler """basic""" +411 2 evaluator """rankbased""" +412 1 model.embedding_dim 0.0 +412 1 optimizer.lr 0.003756449642227109 +412 1 negative_sampler.num_negs_per_pos 34.0 +412 1 training.batch_size 0.0 +412 2 model.embedding_dim 2.0 +412 2 optimizer.lr 0.012056699753361872 +412 2 negative_sampler.num_negs_per_pos 15.0 +412 2 training.batch_size 0.0 +412 3 model.embedding_dim 2.0 +412 3 optimizer.lr 0.027447632354705136 +412 3 negative_sampler.num_negs_per_pos 57.0 +412 3 training.batch_size 1.0 +412 4 model.embedding_dim 2.0 +412 4 optimizer.lr 0.0011567200832218719 +412 4 negative_sampler.num_negs_per_pos 5.0 +412 4 training.batch_size 0.0 +412 1 dataset """fb15k237""" +412 1 model """ntn""" +412 1 loss """softplus""" +412 1 regularizer """no""" +412 1 optimizer """adam""" +412 1 training_loop """owa""" +412 1 negative_sampler """basic""" +412 1 evaluator """rankbased""" +412 2 dataset """fb15k237""" +412 2 model """ntn""" +412 2 loss """softplus""" +412 2 regularizer """no""" +412 2 optimizer """adam""" +412 2 training_loop """owa""" +412 2 negative_sampler """basic""" +412 2 evaluator """rankbased""" +412 3 dataset """fb15k237""" +412 3 model """ntn""" +412 3 loss """softplus""" +412 3 regularizer """no""" +412 3 optimizer """adam""" +412 3 training_loop """owa""" +412 3 negative_sampler """basic""" +412 3 evaluator """rankbased""" +412 4 dataset """fb15k237""" +412 4 model """ntn""" +412 4 loss """softplus""" +412 4 regularizer """no""" +412 4 optimizer """adam""" +412 4 training_loop """owa""" +412 4 negative_sampler """basic""" +412 4 evaluator """rankbased""" +413 1 model.embedding_dim 2.0 +413 1 optimizer.lr 0.09572799158958216 +413 1 training.batch_size 2.0 +413 1 training.label_smoothing 0.02043445989526733 +413 1 dataset """fb15k237""" +413 1 model """ntn""" +413 1 loss """crossentropy""" +413 1 regularizer """no""" +413 1 optimizer """adam""" +413 1 training_loop """lcwa""" +413 1 evaluator """rankbased""" +414 1 model.embedding_dim 1.0 +414 1 optimizer.lr 0.005582742808622058 +414 1 training.batch_size 0.0 +414 1 training.label_smoothing 0.005561606330578477 +414 2 model.embedding_dim 1.0 +414 2 optimizer.lr 0.001424639293051331 +414 2 training.batch_size 0.0 +414 2 training.label_smoothing 0.0010872171330896033 +414 1 dataset """fb15k237""" +414 1 model """ntn""" +414 1 loss """crossentropy""" +414 1 regularizer """no""" +414 1 optimizer """adam""" +414 1 training_loop """lcwa""" +414 1 evaluator """rankbased""" +414 2 dataset """fb15k237""" +414 2 model """ntn""" +414 2 loss """crossentropy""" +414 2 regularizer """no""" +414 2 optimizer """adam""" +414 2 training_loop """lcwa""" +414 2 evaluator """rankbased""" +415 1 model.embedding_dim 0.0 +415 1 training.batch_size 1.0 +415 1 training.label_smoothing 0.462144509149457 +415 2 model.embedding_dim 1.0 +415 2 training.batch_size 1.0 +415 2 training.label_smoothing 0.15292648074383766 +415 3 model.embedding_dim 2.0 +415 3 training.batch_size 1.0 +415 3 training.label_smoothing 0.090379918991508 +415 4 model.embedding_dim 1.0 +415 4 training.batch_size 2.0 +415 4 training.label_smoothing 0.012321034926626395 +415 5 model.embedding_dim 1.0 +415 5 training.batch_size 0.0 +415 5 training.label_smoothing 0.05004365303494199 +415 6 model.embedding_dim 2.0 +415 6 training.batch_size 1.0 +415 6 training.label_smoothing 0.17026783917632365 +415 7 model.embedding_dim 0.0 +415 7 training.batch_size 2.0 +415 7 training.label_smoothing 0.6005745262408989 +415 8 model.embedding_dim 0.0 +415 8 training.batch_size 1.0 +415 8 training.label_smoothing 0.17424254674232084 +415 9 model.embedding_dim 2.0 +415 9 training.batch_size 2.0 +415 9 training.label_smoothing 0.0017713929792469254 +415 10 model.embedding_dim 1.0 +415 10 training.batch_size 0.0 +415 10 training.label_smoothing 0.008619806530875178 +415 11 model.embedding_dim 0.0 +415 11 training.batch_size 0.0 +415 11 training.label_smoothing 0.0016453558092185837 +415 12 model.embedding_dim 0.0 +415 12 training.batch_size 1.0 +415 12 training.label_smoothing 0.003126134964581946 +415 13 model.embedding_dim 1.0 +415 13 training.batch_size 1.0 +415 13 training.label_smoothing 0.18705029029106643 +415 14 model.embedding_dim 2.0 +415 14 training.batch_size 0.0 +415 14 training.label_smoothing 0.006070465804084194 +415 15 model.embedding_dim 0.0 +415 15 training.batch_size 1.0 +415 15 training.label_smoothing 0.05243899388117286 +415 16 model.embedding_dim 2.0 +415 16 training.batch_size 2.0 +415 16 training.label_smoothing 0.382328932078046 +415 17 model.embedding_dim 0.0 +415 17 training.batch_size 1.0 +415 17 training.label_smoothing 0.03475715234979292 +415 18 model.embedding_dim 2.0 +415 18 training.batch_size 2.0 +415 18 training.label_smoothing 0.5714303622196393 +415 19 model.embedding_dim 0.0 +415 19 training.batch_size 2.0 +415 19 training.label_smoothing 0.23079614398048243 +415 20 model.embedding_dim 0.0 +415 20 training.batch_size 0.0 +415 20 training.label_smoothing 0.002098418955197351 +415 21 model.embedding_dim 2.0 +415 21 training.batch_size 1.0 +415 21 training.label_smoothing 0.5091870974256976 +415 22 model.embedding_dim 1.0 +415 22 training.batch_size 1.0 +415 22 training.label_smoothing 0.022894057209799038 +415 23 model.embedding_dim 0.0 +415 23 training.batch_size 0.0 +415 23 training.label_smoothing 0.03616603669759203 +415 24 model.embedding_dim 0.0 +415 24 training.batch_size 1.0 +415 24 training.label_smoothing 0.08493514623309786 +415 25 model.embedding_dim 0.0 +415 25 training.batch_size 2.0 +415 25 training.label_smoothing 0.005356179587255604 +415 26 model.embedding_dim 2.0 +415 26 training.batch_size 2.0 +415 26 training.label_smoothing 0.0014426133701977756 +415 27 model.embedding_dim 1.0 +415 27 training.batch_size 0.0 +415 27 training.label_smoothing 0.4207770104566265 +415 28 model.embedding_dim 2.0 +415 28 training.batch_size 2.0 +415 28 training.label_smoothing 0.57170011635696 +415 29 model.embedding_dim 1.0 +415 29 training.batch_size 1.0 +415 29 training.label_smoothing 0.011912747682336265 +415 30 model.embedding_dim 0.0 +415 30 training.batch_size 2.0 +415 30 training.label_smoothing 0.011053036220063596 +415 31 model.embedding_dim 2.0 +415 31 training.batch_size 2.0 +415 31 training.label_smoothing 0.024910522543679273 +415 32 model.embedding_dim 1.0 +415 32 training.batch_size 0.0 +415 32 training.label_smoothing 0.395701127640033 +415 33 model.embedding_dim 1.0 +415 33 training.batch_size 2.0 +415 33 training.label_smoothing 0.13418989178952617 +415 34 model.embedding_dim 0.0 +415 34 training.batch_size 2.0 +415 34 training.label_smoothing 0.14581996368702113 +415 35 model.embedding_dim 0.0 +415 35 training.batch_size 2.0 +415 35 training.label_smoothing 0.035648437245574566 +415 36 model.embedding_dim 1.0 +415 36 training.batch_size 1.0 +415 36 training.label_smoothing 0.05943142415761352 +415 37 model.embedding_dim 1.0 +415 37 training.batch_size 1.0 +415 37 training.label_smoothing 0.5879018349908286 +415 38 model.embedding_dim 0.0 +415 38 training.batch_size 0.0 +415 38 training.label_smoothing 0.7913394536899597 +415 39 model.embedding_dim 2.0 +415 39 training.batch_size 0.0 +415 39 training.label_smoothing 0.5446493740751144 +415 40 model.embedding_dim 2.0 +415 40 training.batch_size 1.0 +415 40 training.label_smoothing 0.057579918093504164 +415 41 model.embedding_dim 0.0 +415 41 training.batch_size 0.0 +415 41 training.label_smoothing 0.47577727461819985 +415 42 model.embedding_dim 0.0 +415 42 training.batch_size 1.0 +415 42 training.label_smoothing 0.0010537376573142962 +415 43 model.embedding_dim 1.0 +415 43 training.batch_size 0.0 +415 43 training.label_smoothing 0.011372222597926746 +415 44 model.embedding_dim 1.0 +415 44 training.batch_size 0.0 +415 44 training.label_smoothing 0.00486507747799234 +415 45 model.embedding_dim 2.0 +415 45 training.batch_size 1.0 +415 45 training.label_smoothing 0.27164525652504634 +415 46 model.embedding_dim 1.0 +415 46 training.batch_size 1.0 +415 46 training.label_smoothing 0.09208223943114544 +415 47 model.embedding_dim 1.0 +415 47 training.batch_size 0.0 +415 47 training.label_smoothing 0.005548548654810284 +415 48 model.embedding_dim 2.0 +415 48 training.batch_size 0.0 +415 48 training.label_smoothing 0.07164539476511139 +415 49 model.embedding_dim 0.0 +415 49 training.batch_size 2.0 +415 49 training.label_smoothing 0.0013663576363695709 +415 50 model.embedding_dim 0.0 +415 50 training.batch_size 0.0 +415 50 training.label_smoothing 0.008198794603098983 +415 51 model.embedding_dim 2.0 +415 51 training.batch_size 2.0 +415 51 training.label_smoothing 0.03925306058519736 +415 52 model.embedding_dim 0.0 +415 52 training.batch_size 1.0 +415 52 training.label_smoothing 0.9313825052535989 +415 53 model.embedding_dim 0.0 +415 53 training.batch_size 0.0 +415 53 training.label_smoothing 0.7045673508460117 +415 54 model.embedding_dim 0.0 +415 54 training.batch_size 0.0 +415 54 training.label_smoothing 0.02884984671999157 +415 55 model.embedding_dim 1.0 +415 55 training.batch_size 0.0 +415 55 training.label_smoothing 0.02584547545073604 +415 56 model.embedding_dim 2.0 +415 56 training.batch_size 0.0 +415 56 training.label_smoothing 0.006317342427329502 +415 57 model.embedding_dim 2.0 +415 57 training.batch_size 1.0 +415 57 training.label_smoothing 0.010642080416339987 +415 58 model.embedding_dim 2.0 +415 58 training.batch_size 2.0 +415 58 training.label_smoothing 0.01426063815206824 +415 59 model.embedding_dim 0.0 +415 59 training.batch_size 1.0 +415 59 training.label_smoothing 0.01665655262590769 +415 60 model.embedding_dim 0.0 +415 60 training.batch_size 0.0 +415 60 training.label_smoothing 0.39867657549410557 +415 61 model.embedding_dim 1.0 +415 61 training.batch_size 0.0 +415 61 training.label_smoothing 0.9981339155397413 +415 62 model.embedding_dim 2.0 +415 62 training.batch_size 0.0 +415 62 training.label_smoothing 0.082087198751696 +415 63 model.embedding_dim 0.0 +415 63 training.batch_size 1.0 +415 63 training.label_smoothing 0.3074557348326481 +415 64 model.embedding_dim 1.0 +415 64 training.batch_size 1.0 +415 64 training.label_smoothing 0.009169442340138766 +415 65 model.embedding_dim 2.0 +415 65 training.batch_size 2.0 +415 65 training.label_smoothing 0.006802192523874717 +415 66 model.embedding_dim 1.0 +415 66 training.batch_size 0.0 +415 66 training.label_smoothing 0.04784476063875694 +415 67 model.embedding_dim 0.0 +415 67 training.batch_size 2.0 +415 67 training.label_smoothing 0.036499171603247185 +415 68 model.embedding_dim 2.0 +415 68 training.batch_size 0.0 +415 68 training.label_smoothing 0.4865305205256214 +415 69 model.embedding_dim 0.0 +415 69 training.batch_size 0.0 +415 69 training.label_smoothing 0.001149010156881434 +415 70 model.embedding_dim 2.0 +415 70 training.batch_size 0.0 +415 70 training.label_smoothing 0.06561687372401527 +415 71 model.embedding_dim 0.0 +415 71 training.batch_size 0.0 +415 71 training.label_smoothing 0.20244249647670715 +415 72 model.embedding_dim 2.0 +415 72 training.batch_size 2.0 +415 72 training.label_smoothing 0.007228485300001101 +415 73 model.embedding_dim 1.0 +415 73 training.batch_size 0.0 +415 73 training.label_smoothing 0.67902331383328 +415 74 model.embedding_dim 0.0 +415 74 training.batch_size 2.0 +415 74 training.label_smoothing 0.038315484214504265 +415 75 model.embedding_dim 0.0 +415 75 training.batch_size 2.0 +415 75 training.label_smoothing 0.015777302961681117 +415 76 model.embedding_dim 2.0 +415 76 training.batch_size 1.0 +415 76 training.label_smoothing 0.01577518787977391 +415 77 model.embedding_dim 1.0 +415 77 training.batch_size 2.0 +415 77 training.label_smoothing 0.12020724001934072 +415 78 model.embedding_dim 1.0 +415 78 training.batch_size 0.0 +415 78 training.label_smoothing 0.006971598437725069 +415 79 model.embedding_dim 1.0 +415 79 training.batch_size 1.0 +415 79 training.label_smoothing 0.9114825833173232 +415 80 model.embedding_dim 2.0 +415 80 training.batch_size 2.0 +415 80 training.label_smoothing 0.00316446119526888 +415 81 model.embedding_dim 2.0 +415 81 training.batch_size 2.0 +415 81 training.label_smoothing 0.37181173206992335 +415 82 model.embedding_dim 2.0 +415 82 training.batch_size 0.0 +415 82 training.label_smoothing 0.026437831342487932 +415 83 model.embedding_dim 2.0 +415 83 training.batch_size 0.0 +415 83 training.label_smoothing 0.14571557741801203 +415 84 model.embedding_dim 2.0 +415 84 training.batch_size 0.0 +415 84 training.label_smoothing 0.0016009516742346553 +415 85 model.embedding_dim 2.0 +415 85 training.batch_size 2.0 +415 85 training.label_smoothing 0.40159774417887883 +415 86 model.embedding_dim 1.0 +415 86 training.batch_size 1.0 +415 86 training.label_smoothing 0.010729754895480037 +415 87 model.embedding_dim 0.0 +415 87 training.batch_size 0.0 +415 87 training.label_smoothing 0.003439258032364386 +415 88 model.embedding_dim 2.0 +415 88 training.batch_size 2.0 +415 88 training.label_smoothing 0.001447713750535507 +415 89 model.embedding_dim 1.0 +415 89 training.batch_size 1.0 +415 89 training.label_smoothing 0.03277900382233732 +415 90 model.embedding_dim 0.0 +415 90 training.batch_size 0.0 +415 90 training.label_smoothing 0.0029886794973865956 +415 91 model.embedding_dim 2.0 +415 91 training.batch_size 1.0 +415 91 training.label_smoothing 0.16738261433996934 +415 92 model.embedding_dim 2.0 +415 92 training.batch_size 2.0 +415 92 training.label_smoothing 0.0037094916829888847 +415 93 model.embedding_dim 1.0 +415 93 training.batch_size 2.0 +415 93 training.label_smoothing 0.13633985332242674 +415 94 model.embedding_dim 1.0 +415 94 training.batch_size 2.0 +415 94 training.label_smoothing 0.016702538018303046 +415 95 model.embedding_dim 0.0 +415 95 training.batch_size 1.0 +415 95 training.label_smoothing 0.02109319192158473 +415 96 model.embedding_dim 2.0 +415 96 training.batch_size 2.0 +415 96 training.label_smoothing 0.6590222697581644 +415 97 model.embedding_dim 2.0 +415 97 training.batch_size 0.0 +415 97 training.label_smoothing 0.0014273783878291618 +415 98 model.embedding_dim 0.0 +415 98 training.batch_size 1.0 +415 98 training.label_smoothing 0.493962210413903 +415 99 model.embedding_dim 1.0 +415 99 training.batch_size 0.0 +415 99 training.label_smoothing 0.0017716511705534632 +415 100 model.embedding_dim 0.0 +415 100 training.batch_size 0.0 +415 100 training.label_smoothing 0.0010048986161136238 +415 1 dataset """kinships""" +415 1 model """ntn""" +415 1 loss """crossentropy""" +415 1 regularizer """no""" +415 1 optimizer """adadelta""" +415 1 training_loop """lcwa""" +415 1 evaluator """rankbased""" +415 2 dataset """kinships""" +415 2 model """ntn""" +415 2 loss """crossentropy""" +415 2 regularizer """no""" +415 2 optimizer """adadelta""" +415 2 training_loop """lcwa""" +415 2 evaluator """rankbased""" +415 3 dataset """kinships""" +415 3 model """ntn""" +415 3 loss """crossentropy""" +415 3 regularizer """no""" +415 3 optimizer """adadelta""" +415 3 training_loop """lcwa""" +415 3 evaluator """rankbased""" +415 4 dataset """kinships""" +415 4 model """ntn""" +415 4 loss """crossentropy""" +415 4 regularizer """no""" +415 4 optimizer """adadelta""" +415 4 training_loop """lcwa""" +415 4 evaluator """rankbased""" +415 5 dataset """kinships""" +415 5 model """ntn""" +415 5 loss """crossentropy""" +415 5 regularizer """no""" +415 5 optimizer """adadelta""" +415 5 training_loop """lcwa""" +415 5 evaluator """rankbased""" +415 6 dataset """kinships""" +415 6 model """ntn""" +415 6 loss """crossentropy""" +415 6 regularizer """no""" +415 6 optimizer """adadelta""" +415 6 training_loop """lcwa""" +415 6 evaluator """rankbased""" +415 7 dataset """kinships""" +415 7 model """ntn""" +415 7 loss """crossentropy""" +415 7 regularizer """no""" +415 7 optimizer """adadelta""" +415 7 training_loop """lcwa""" +415 7 evaluator """rankbased""" +415 8 dataset """kinships""" +415 8 model """ntn""" +415 8 loss """crossentropy""" +415 8 regularizer """no""" +415 8 optimizer """adadelta""" +415 8 training_loop """lcwa""" +415 8 evaluator """rankbased""" +415 9 dataset """kinships""" +415 9 model """ntn""" +415 9 loss """crossentropy""" +415 9 regularizer """no""" +415 9 optimizer """adadelta""" +415 9 training_loop """lcwa""" +415 9 evaluator """rankbased""" +415 10 dataset """kinships""" +415 10 model """ntn""" +415 10 loss """crossentropy""" +415 10 regularizer """no""" +415 10 optimizer """adadelta""" +415 10 training_loop """lcwa""" +415 10 evaluator """rankbased""" +415 11 dataset """kinships""" +415 11 model """ntn""" +415 11 loss """crossentropy""" +415 11 regularizer """no""" +415 11 optimizer """adadelta""" +415 11 training_loop """lcwa""" +415 11 evaluator """rankbased""" +415 12 dataset """kinships""" +415 12 model """ntn""" +415 12 loss """crossentropy""" +415 12 regularizer """no""" +415 12 optimizer """adadelta""" +415 12 training_loop """lcwa""" +415 12 evaluator """rankbased""" +415 13 dataset """kinships""" +415 13 model """ntn""" +415 13 loss """crossentropy""" +415 13 regularizer """no""" +415 13 optimizer """adadelta""" +415 13 training_loop """lcwa""" +415 13 evaluator """rankbased""" +415 14 dataset """kinships""" +415 14 model """ntn""" +415 14 loss """crossentropy""" +415 14 regularizer """no""" +415 14 optimizer """adadelta""" +415 14 training_loop """lcwa""" +415 14 evaluator """rankbased""" +415 15 dataset """kinships""" +415 15 model """ntn""" +415 15 loss """crossentropy""" +415 15 regularizer """no""" +415 15 optimizer """adadelta""" +415 15 training_loop """lcwa""" +415 15 evaluator """rankbased""" +415 16 dataset """kinships""" +415 16 model """ntn""" +415 16 loss """crossentropy""" +415 16 regularizer """no""" +415 16 optimizer """adadelta""" +415 16 training_loop """lcwa""" +415 16 evaluator """rankbased""" +415 17 dataset """kinships""" +415 17 model """ntn""" +415 17 loss """crossentropy""" +415 17 regularizer """no""" +415 17 optimizer """adadelta""" +415 17 training_loop """lcwa""" +415 17 evaluator """rankbased""" +415 18 dataset """kinships""" +415 18 model """ntn""" +415 18 loss """crossentropy""" +415 18 regularizer """no""" +415 18 optimizer """adadelta""" +415 18 training_loop """lcwa""" +415 18 evaluator """rankbased""" +415 19 dataset """kinships""" +415 19 model """ntn""" +415 19 loss """crossentropy""" +415 19 regularizer """no""" +415 19 optimizer """adadelta""" +415 19 training_loop """lcwa""" +415 19 evaluator """rankbased""" +415 20 dataset """kinships""" +415 20 model """ntn""" +415 20 loss """crossentropy""" +415 20 regularizer """no""" +415 20 optimizer """adadelta""" +415 20 training_loop """lcwa""" +415 20 evaluator """rankbased""" +415 21 dataset """kinships""" +415 21 model """ntn""" +415 21 loss """crossentropy""" +415 21 regularizer """no""" +415 21 optimizer """adadelta""" +415 21 training_loop """lcwa""" +415 21 evaluator """rankbased""" +415 22 dataset """kinships""" +415 22 model """ntn""" +415 22 loss """crossentropy""" +415 22 regularizer """no""" +415 22 optimizer """adadelta""" +415 22 training_loop """lcwa""" +415 22 evaluator """rankbased""" +415 23 dataset """kinships""" +415 23 model """ntn""" +415 23 loss """crossentropy""" +415 23 regularizer """no""" +415 23 optimizer """adadelta""" +415 23 training_loop """lcwa""" +415 23 evaluator """rankbased""" +415 24 dataset """kinships""" +415 24 model """ntn""" +415 24 loss """crossentropy""" +415 24 regularizer """no""" +415 24 optimizer """adadelta""" +415 24 training_loop """lcwa""" +415 24 evaluator """rankbased""" +415 25 dataset """kinships""" +415 25 model """ntn""" +415 25 loss """crossentropy""" +415 25 regularizer """no""" +415 25 optimizer """adadelta""" +415 25 training_loop """lcwa""" +415 25 evaluator """rankbased""" +415 26 dataset """kinships""" +415 26 model """ntn""" +415 26 loss """crossentropy""" +415 26 regularizer """no""" +415 26 optimizer """adadelta""" +415 26 training_loop """lcwa""" +415 26 evaluator """rankbased""" +415 27 dataset """kinships""" +415 27 model """ntn""" +415 27 loss """crossentropy""" +415 27 regularizer """no""" +415 27 optimizer """adadelta""" +415 27 training_loop """lcwa""" +415 27 evaluator """rankbased""" +415 28 dataset """kinships""" +415 28 model """ntn""" +415 28 loss """crossentropy""" +415 28 regularizer """no""" +415 28 optimizer """adadelta""" +415 28 training_loop """lcwa""" +415 28 evaluator """rankbased""" +415 29 dataset """kinships""" +415 29 model """ntn""" +415 29 loss """crossentropy""" +415 29 regularizer """no""" +415 29 optimizer """adadelta""" +415 29 training_loop """lcwa""" +415 29 evaluator """rankbased""" +415 30 dataset """kinships""" +415 30 model """ntn""" +415 30 loss """crossentropy""" +415 30 regularizer """no""" +415 30 optimizer """adadelta""" +415 30 training_loop """lcwa""" +415 30 evaluator """rankbased""" +415 31 dataset """kinships""" +415 31 model """ntn""" +415 31 loss """crossentropy""" +415 31 regularizer """no""" +415 31 optimizer """adadelta""" +415 31 training_loop """lcwa""" +415 31 evaluator """rankbased""" +415 32 dataset """kinships""" +415 32 model """ntn""" +415 32 loss """crossentropy""" +415 32 regularizer """no""" +415 32 optimizer """adadelta""" +415 32 training_loop """lcwa""" +415 32 evaluator """rankbased""" +415 33 dataset """kinships""" +415 33 model """ntn""" +415 33 loss """crossentropy""" +415 33 regularizer """no""" +415 33 optimizer """adadelta""" +415 33 training_loop """lcwa""" +415 33 evaluator """rankbased""" +415 34 dataset """kinships""" +415 34 model """ntn""" +415 34 loss """crossentropy""" +415 34 regularizer """no""" +415 34 optimizer """adadelta""" +415 34 training_loop """lcwa""" +415 34 evaluator """rankbased""" +415 35 dataset """kinships""" +415 35 model """ntn""" +415 35 loss """crossentropy""" +415 35 regularizer """no""" +415 35 optimizer """adadelta""" +415 35 training_loop """lcwa""" +415 35 evaluator """rankbased""" +415 36 dataset """kinships""" +415 36 model """ntn""" +415 36 loss """crossentropy""" +415 36 regularizer """no""" +415 36 optimizer """adadelta""" +415 36 training_loop """lcwa""" +415 36 evaluator """rankbased""" +415 37 dataset """kinships""" +415 37 model """ntn""" +415 37 loss """crossentropy""" +415 37 regularizer """no""" +415 37 optimizer """adadelta""" +415 37 training_loop """lcwa""" +415 37 evaluator """rankbased""" +415 38 dataset """kinships""" +415 38 model """ntn""" +415 38 loss """crossentropy""" +415 38 regularizer """no""" +415 38 optimizer """adadelta""" +415 38 training_loop """lcwa""" +415 38 evaluator """rankbased""" +415 39 dataset """kinships""" +415 39 model """ntn""" +415 39 loss """crossentropy""" +415 39 regularizer """no""" +415 39 optimizer """adadelta""" +415 39 training_loop """lcwa""" +415 39 evaluator """rankbased""" +415 40 dataset """kinships""" +415 40 model """ntn""" +415 40 loss """crossentropy""" +415 40 regularizer """no""" +415 40 optimizer """adadelta""" +415 40 training_loop """lcwa""" +415 40 evaluator """rankbased""" +415 41 dataset """kinships""" +415 41 model """ntn""" +415 41 loss """crossentropy""" +415 41 regularizer """no""" +415 41 optimizer """adadelta""" +415 41 training_loop """lcwa""" +415 41 evaluator """rankbased""" +415 42 dataset """kinships""" +415 42 model """ntn""" +415 42 loss """crossentropy""" +415 42 regularizer """no""" +415 42 optimizer """adadelta""" +415 42 training_loop """lcwa""" +415 42 evaluator """rankbased""" +415 43 dataset """kinships""" +415 43 model """ntn""" +415 43 loss """crossentropy""" +415 43 regularizer """no""" +415 43 optimizer """adadelta""" +415 43 training_loop """lcwa""" +415 43 evaluator """rankbased""" +415 44 dataset """kinships""" +415 44 model """ntn""" +415 44 loss """crossentropy""" +415 44 regularizer """no""" +415 44 optimizer """adadelta""" +415 44 training_loop """lcwa""" +415 44 evaluator """rankbased""" +415 45 dataset """kinships""" +415 45 model """ntn""" +415 45 loss """crossentropy""" +415 45 regularizer """no""" +415 45 optimizer """adadelta""" +415 45 training_loop """lcwa""" +415 45 evaluator """rankbased""" +415 46 dataset """kinships""" +415 46 model """ntn""" +415 46 loss """crossentropy""" +415 46 regularizer """no""" +415 46 optimizer """adadelta""" +415 46 training_loop """lcwa""" +415 46 evaluator """rankbased""" +415 47 dataset """kinships""" +415 47 model """ntn""" +415 47 loss """crossentropy""" +415 47 regularizer """no""" +415 47 optimizer """adadelta""" +415 47 training_loop """lcwa""" +415 47 evaluator """rankbased""" +415 48 dataset """kinships""" +415 48 model """ntn""" +415 48 loss """crossentropy""" +415 48 regularizer """no""" +415 48 optimizer """adadelta""" +415 48 training_loop """lcwa""" +415 48 evaluator """rankbased""" +415 49 dataset """kinships""" +415 49 model """ntn""" +415 49 loss """crossentropy""" +415 49 regularizer """no""" +415 49 optimizer """adadelta""" +415 49 training_loop """lcwa""" +415 49 evaluator """rankbased""" +415 50 dataset """kinships""" +415 50 model """ntn""" +415 50 loss """crossentropy""" +415 50 regularizer """no""" +415 50 optimizer """adadelta""" +415 50 training_loop """lcwa""" +415 50 evaluator """rankbased""" +415 51 dataset """kinships""" +415 51 model """ntn""" +415 51 loss """crossentropy""" +415 51 regularizer """no""" +415 51 optimizer """adadelta""" +415 51 training_loop """lcwa""" +415 51 evaluator """rankbased""" +415 52 dataset """kinships""" +415 52 model """ntn""" +415 52 loss """crossentropy""" +415 52 regularizer """no""" +415 52 optimizer """adadelta""" +415 52 training_loop """lcwa""" +415 52 evaluator """rankbased""" +415 53 dataset """kinships""" +415 53 model """ntn""" +415 53 loss """crossentropy""" +415 53 regularizer """no""" +415 53 optimizer """adadelta""" +415 53 training_loop """lcwa""" +415 53 evaluator """rankbased""" +415 54 dataset """kinships""" +415 54 model """ntn""" +415 54 loss """crossentropy""" +415 54 regularizer """no""" +415 54 optimizer """adadelta""" +415 54 training_loop """lcwa""" +415 54 evaluator """rankbased""" +415 55 dataset """kinships""" +415 55 model """ntn""" +415 55 loss """crossentropy""" +415 55 regularizer """no""" +415 55 optimizer """adadelta""" +415 55 training_loop """lcwa""" +415 55 evaluator """rankbased""" +415 56 dataset """kinships""" +415 56 model """ntn""" +415 56 loss """crossentropy""" +415 56 regularizer """no""" +415 56 optimizer """adadelta""" +415 56 training_loop """lcwa""" +415 56 evaluator """rankbased""" +415 57 dataset """kinships""" +415 57 model """ntn""" +415 57 loss """crossentropy""" +415 57 regularizer """no""" +415 57 optimizer """adadelta""" +415 57 training_loop """lcwa""" +415 57 evaluator """rankbased""" +415 58 dataset """kinships""" +415 58 model """ntn""" +415 58 loss """crossentropy""" +415 58 regularizer """no""" +415 58 optimizer """adadelta""" +415 58 training_loop """lcwa""" +415 58 evaluator """rankbased""" +415 59 dataset """kinships""" +415 59 model """ntn""" +415 59 loss """crossentropy""" +415 59 regularizer """no""" +415 59 optimizer """adadelta""" +415 59 training_loop """lcwa""" +415 59 evaluator """rankbased""" +415 60 dataset """kinships""" +415 60 model """ntn""" +415 60 loss """crossentropy""" +415 60 regularizer """no""" +415 60 optimizer """adadelta""" +415 60 training_loop """lcwa""" +415 60 evaluator """rankbased""" +415 61 dataset """kinships""" +415 61 model """ntn""" +415 61 loss """crossentropy""" +415 61 regularizer """no""" +415 61 optimizer """adadelta""" +415 61 training_loop """lcwa""" +415 61 evaluator """rankbased""" +415 62 dataset """kinships""" +415 62 model """ntn""" +415 62 loss """crossentropy""" +415 62 regularizer """no""" +415 62 optimizer """adadelta""" +415 62 training_loop """lcwa""" +415 62 evaluator """rankbased""" +415 63 dataset """kinships""" +415 63 model """ntn""" +415 63 loss """crossentropy""" +415 63 regularizer """no""" +415 63 optimizer """adadelta""" +415 63 training_loop """lcwa""" +415 63 evaluator """rankbased""" +415 64 dataset """kinships""" +415 64 model """ntn""" +415 64 loss """crossentropy""" +415 64 regularizer """no""" +415 64 optimizer """adadelta""" +415 64 training_loop """lcwa""" +415 64 evaluator """rankbased""" +415 65 dataset """kinships""" +415 65 model """ntn""" +415 65 loss """crossentropy""" +415 65 regularizer """no""" +415 65 optimizer """adadelta""" +415 65 training_loop """lcwa""" +415 65 evaluator """rankbased""" +415 66 dataset """kinships""" +415 66 model """ntn""" +415 66 loss """crossentropy""" +415 66 regularizer """no""" +415 66 optimizer """adadelta""" +415 66 training_loop """lcwa""" +415 66 evaluator """rankbased""" +415 67 dataset """kinships""" +415 67 model """ntn""" +415 67 loss """crossentropy""" +415 67 regularizer """no""" +415 67 optimizer """adadelta""" +415 67 training_loop """lcwa""" +415 67 evaluator """rankbased""" +415 68 dataset """kinships""" +415 68 model """ntn""" +415 68 loss """crossentropy""" +415 68 regularizer """no""" +415 68 optimizer """adadelta""" +415 68 training_loop """lcwa""" +415 68 evaluator """rankbased""" +415 69 dataset """kinships""" +415 69 model """ntn""" +415 69 loss """crossentropy""" +415 69 regularizer """no""" +415 69 optimizer """adadelta""" +415 69 training_loop """lcwa""" +415 69 evaluator """rankbased""" +415 70 dataset """kinships""" +415 70 model """ntn""" +415 70 loss """crossentropy""" +415 70 regularizer """no""" +415 70 optimizer """adadelta""" +415 70 training_loop """lcwa""" +415 70 evaluator """rankbased""" +415 71 dataset """kinships""" +415 71 model """ntn""" +415 71 loss """crossentropy""" +415 71 regularizer """no""" +415 71 optimizer """adadelta""" +415 71 training_loop """lcwa""" +415 71 evaluator """rankbased""" +415 72 dataset """kinships""" +415 72 model """ntn""" +415 72 loss """crossentropy""" +415 72 regularizer """no""" +415 72 optimizer """adadelta""" +415 72 training_loop """lcwa""" +415 72 evaluator """rankbased""" +415 73 dataset """kinships""" +415 73 model """ntn""" +415 73 loss """crossentropy""" +415 73 regularizer """no""" +415 73 optimizer """adadelta""" +415 73 training_loop """lcwa""" +415 73 evaluator """rankbased""" +415 74 dataset """kinships""" +415 74 model """ntn""" +415 74 loss """crossentropy""" +415 74 regularizer """no""" +415 74 optimizer """adadelta""" +415 74 training_loop """lcwa""" +415 74 evaluator """rankbased""" +415 75 dataset """kinships""" +415 75 model """ntn""" +415 75 loss """crossentropy""" +415 75 regularizer """no""" +415 75 optimizer """adadelta""" +415 75 training_loop """lcwa""" +415 75 evaluator """rankbased""" +415 76 dataset """kinships""" +415 76 model """ntn""" +415 76 loss """crossentropy""" +415 76 regularizer """no""" +415 76 optimizer """adadelta""" +415 76 training_loop """lcwa""" +415 76 evaluator """rankbased""" +415 77 dataset """kinships""" +415 77 model """ntn""" +415 77 loss """crossentropy""" +415 77 regularizer """no""" +415 77 optimizer """adadelta""" +415 77 training_loop """lcwa""" +415 77 evaluator """rankbased""" +415 78 dataset """kinships""" +415 78 model """ntn""" +415 78 loss """crossentropy""" +415 78 regularizer """no""" +415 78 optimizer """adadelta""" +415 78 training_loop """lcwa""" +415 78 evaluator """rankbased""" +415 79 dataset """kinships""" +415 79 model """ntn""" +415 79 loss """crossentropy""" +415 79 regularizer """no""" +415 79 optimizer """adadelta""" +415 79 training_loop """lcwa""" +415 79 evaluator """rankbased""" +415 80 dataset """kinships""" +415 80 model """ntn""" +415 80 loss """crossentropy""" +415 80 regularizer """no""" +415 80 optimizer """adadelta""" +415 80 training_loop """lcwa""" +415 80 evaluator """rankbased""" +415 81 dataset """kinships""" +415 81 model """ntn""" +415 81 loss """crossentropy""" +415 81 regularizer """no""" +415 81 optimizer """adadelta""" +415 81 training_loop """lcwa""" +415 81 evaluator """rankbased""" +415 82 dataset """kinships""" +415 82 model """ntn""" +415 82 loss """crossentropy""" +415 82 regularizer """no""" +415 82 optimizer """adadelta""" +415 82 training_loop """lcwa""" +415 82 evaluator """rankbased""" +415 83 dataset """kinships""" +415 83 model """ntn""" +415 83 loss """crossentropy""" +415 83 regularizer """no""" +415 83 optimizer """adadelta""" +415 83 training_loop """lcwa""" +415 83 evaluator """rankbased""" +415 84 dataset """kinships""" +415 84 model """ntn""" +415 84 loss """crossentropy""" +415 84 regularizer """no""" +415 84 optimizer """adadelta""" +415 84 training_loop """lcwa""" +415 84 evaluator """rankbased""" +415 85 dataset """kinships""" +415 85 model """ntn""" +415 85 loss """crossentropy""" +415 85 regularizer """no""" +415 85 optimizer """adadelta""" +415 85 training_loop """lcwa""" +415 85 evaluator """rankbased""" +415 86 dataset """kinships""" +415 86 model """ntn""" +415 86 loss """crossentropy""" +415 86 regularizer """no""" +415 86 optimizer """adadelta""" +415 86 training_loop """lcwa""" +415 86 evaluator """rankbased""" +415 87 dataset """kinships""" +415 87 model """ntn""" +415 87 loss """crossentropy""" +415 87 regularizer """no""" +415 87 optimizer """adadelta""" +415 87 training_loop """lcwa""" +415 87 evaluator """rankbased""" +415 88 dataset """kinships""" +415 88 model """ntn""" +415 88 loss """crossentropy""" +415 88 regularizer """no""" +415 88 optimizer """adadelta""" +415 88 training_loop """lcwa""" +415 88 evaluator """rankbased""" +415 89 dataset """kinships""" +415 89 model """ntn""" +415 89 loss """crossentropy""" +415 89 regularizer """no""" +415 89 optimizer """adadelta""" +415 89 training_loop """lcwa""" +415 89 evaluator """rankbased""" +415 90 dataset """kinships""" +415 90 model """ntn""" +415 90 loss """crossentropy""" +415 90 regularizer """no""" +415 90 optimizer """adadelta""" +415 90 training_loop """lcwa""" +415 90 evaluator """rankbased""" +415 91 dataset """kinships""" +415 91 model """ntn""" +415 91 loss """crossentropy""" +415 91 regularizer """no""" +415 91 optimizer """adadelta""" +415 91 training_loop """lcwa""" +415 91 evaluator """rankbased""" +415 92 dataset """kinships""" +415 92 model """ntn""" +415 92 loss """crossentropy""" +415 92 regularizer """no""" +415 92 optimizer """adadelta""" +415 92 training_loop """lcwa""" +415 92 evaluator """rankbased""" +415 93 dataset """kinships""" +415 93 model """ntn""" +415 93 loss """crossentropy""" +415 93 regularizer """no""" +415 93 optimizer """adadelta""" +415 93 training_loop """lcwa""" +415 93 evaluator """rankbased""" +415 94 dataset """kinships""" +415 94 model """ntn""" +415 94 loss """crossentropy""" +415 94 regularizer """no""" +415 94 optimizer """adadelta""" +415 94 training_loop """lcwa""" +415 94 evaluator """rankbased""" +415 95 dataset """kinships""" +415 95 model """ntn""" +415 95 loss """crossentropy""" +415 95 regularizer """no""" +415 95 optimizer """adadelta""" +415 95 training_loop """lcwa""" +415 95 evaluator """rankbased""" +415 96 dataset """kinships""" +415 96 model """ntn""" +415 96 loss """crossentropy""" +415 96 regularizer """no""" +415 96 optimizer """adadelta""" +415 96 training_loop """lcwa""" +415 96 evaluator """rankbased""" +415 97 dataset """kinships""" +415 97 model """ntn""" +415 97 loss """crossentropy""" +415 97 regularizer """no""" +415 97 optimizer """adadelta""" +415 97 training_loop """lcwa""" +415 97 evaluator """rankbased""" +415 98 dataset """kinships""" +415 98 model """ntn""" +415 98 loss """crossentropy""" +415 98 regularizer """no""" +415 98 optimizer """adadelta""" +415 98 training_loop """lcwa""" +415 98 evaluator """rankbased""" +415 99 dataset """kinships""" +415 99 model """ntn""" +415 99 loss """crossentropy""" +415 99 regularizer """no""" +415 99 optimizer """adadelta""" +415 99 training_loop """lcwa""" +415 99 evaluator """rankbased""" +415 100 dataset """kinships""" +415 100 model """ntn""" +415 100 loss """crossentropy""" +415 100 regularizer """no""" +415 100 optimizer """adadelta""" +415 100 training_loop """lcwa""" +415 100 evaluator """rankbased""" +416 1 model.embedding_dim 1.0 +416 1 training.batch_size 0.0 +416 1 training.label_smoothing 0.008243767662377945 +416 2 model.embedding_dim 1.0 +416 2 training.batch_size 1.0 +416 2 training.label_smoothing 0.10141846089048545 +416 3 model.embedding_dim 2.0 +416 3 training.batch_size 2.0 +416 3 training.label_smoothing 0.06090405161190869 +416 4 model.embedding_dim 1.0 +416 4 training.batch_size 2.0 +416 4 training.label_smoothing 0.02050058804474625 +416 5 model.embedding_dim 1.0 +416 5 training.batch_size 0.0 +416 5 training.label_smoothing 0.39433574334741467 +416 6 model.embedding_dim 2.0 +416 6 training.batch_size 2.0 +416 6 training.label_smoothing 0.108293016164537 +416 7 model.embedding_dim 2.0 +416 7 training.batch_size 2.0 +416 7 training.label_smoothing 0.09992495913888265 +416 8 model.embedding_dim 0.0 +416 8 training.batch_size 2.0 +416 8 training.label_smoothing 0.13961356821490847 +416 9 model.embedding_dim 2.0 +416 9 training.batch_size 0.0 +416 9 training.label_smoothing 0.09580054269852752 +416 10 model.embedding_dim 2.0 +416 10 training.batch_size 2.0 +416 10 training.label_smoothing 0.030506047639146244 +416 11 model.embedding_dim 0.0 +416 11 training.batch_size 0.0 +416 11 training.label_smoothing 0.20096452788349922 +416 12 model.embedding_dim 1.0 +416 12 training.batch_size 2.0 +416 12 training.label_smoothing 0.007782615697024786 +416 13 model.embedding_dim 1.0 +416 13 training.batch_size 1.0 +416 13 training.label_smoothing 0.008311520143013095 +416 14 model.embedding_dim 0.0 +416 14 training.batch_size 0.0 +416 14 training.label_smoothing 0.044488042502281014 +416 15 model.embedding_dim 2.0 +416 15 training.batch_size 2.0 +416 15 training.label_smoothing 0.006593434652683622 +416 16 model.embedding_dim 1.0 +416 16 training.batch_size 0.0 +416 16 training.label_smoothing 0.8597899758750268 +416 17 model.embedding_dim 1.0 +416 17 training.batch_size 2.0 +416 17 training.label_smoothing 0.018505948269338022 +416 18 model.embedding_dim 1.0 +416 18 training.batch_size 0.0 +416 18 training.label_smoothing 0.2667211798108116 +416 19 model.embedding_dim 2.0 +416 19 training.batch_size 0.0 +416 19 training.label_smoothing 0.012209417919033657 +416 20 model.embedding_dim 1.0 +416 20 training.batch_size 2.0 +416 20 training.label_smoothing 0.038720627716772045 +416 21 model.embedding_dim 1.0 +416 21 training.batch_size 0.0 +416 21 training.label_smoothing 0.010136375771296332 +416 22 model.embedding_dim 2.0 +416 22 training.batch_size 2.0 +416 22 training.label_smoothing 0.03398283998227629 +416 23 model.embedding_dim 2.0 +416 23 training.batch_size 0.0 +416 23 training.label_smoothing 0.0036151508171589227 +416 24 model.embedding_dim 0.0 +416 24 training.batch_size 2.0 +416 24 training.label_smoothing 0.0028477238015110124 +416 25 model.embedding_dim 2.0 +416 25 training.batch_size 0.0 +416 25 training.label_smoothing 0.06493432214231992 +416 26 model.embedding_dim 2.0 +416 26 training.batch_size 2.0 +416 26 training.label_smoothing 0.003190781772143252 +416 27 model.embedding_dim 1.0 +416 27 training.batch_size 2.0 +416 27 training.label_smoothing 0.32696889559478354 +416 28 model.embedding_dim 2.0 +416 28 training.batch_size 1.0 +416 28 training.label_smoothing 0.0026831294542903447 +416 29 model.embedding_dim 0.0 +416 29 training.batch_size 0.0 +416 29 training.label_smoothing 0.0014409608045491927 +416 30 model.embedding_dim 2.0 +416 30 training.batch_size 1.0 +416 30 training.label_smoothing 0.004282319301296108 +416 31 model.embedding_dim 1.0 +416 31 training.batch_size 0.0 +416 31 training.label_smoothing 0.017942397549928886 +416 32 model.embedding_dim 2.0 +416 32 training.batch_size 1.0 +416 32 training.label_smoothing 0.007142225712794346 +416 33 model.embedding_dim 2.0 +416 33 training.batch_size 1.0 +416 33 training.label_smoothing 0.03940715028911126 +416 34 model.embedding_dim 1.0 +416 34 training.batch_size 1.0 +416 34 training.label_smoothing 0.33842620882249647 +416 35 model.embedding_dim 1.0 +416 35 training.batch_size 2.0 +416 35 training.label_smoothing 0.03645326111963137 +416 36 model.embedding_dim 2.0 +416 36 training.batch_size 0.0 +416 36 training.label_smoothing 0.0011720315092068217 +416 37 model.embedding_dim 0.0 +416 37 training.batch_size 1.0 +416 37 training.label_smoothing 0.04472872210565982 +416 38 model.embedding_dim 2.0 +416 38 training.batch_size 1.0 +416 38 training.label_smoothing 0.03963961310817173 +416 39 model.embedding_dim 2.0 +416 39 training.batch_size 1.0 +416 39 training.label_smoothing 0.0027276895098282965 +416 40 model.embedding_dim 0.0 +416 40 training.batch_size 0.0 +416 40 training.label_smoothing 0.39290846219253744 +416 41 model.embedding_dim 0.0 +416 41 training.batch_size 1.0 +416 41 training.label_smoothing 0.20063561137651229 +416 42 model.embedding_dim 2.0 +416 42 training.batch_size 1.0 +416 42 training.label_smoothing 0.3563775360407222 +416 43 model.embedding_dim 0.0 +416 43 training.batch_size 0.0 +416 43 training.label_smoothing 0.030837221582226078 +416 44 model.embedding_dim 0.0 +416 44 training.batch_size 0.0 +416 44 training.label_smoothing 0.04471936397641611 +416 45 model.embedding_dim 1.0 +416 45 training.batch_size 0.0 +416 45 training.label_smoothing 0.4814288298315222 +416 46 model.embedding_dim 1.0 +416 46 training.batch_size 2.0 +416 46 training.label_smoothing 0.13593533418652656 +416 47 model.embedding_dim 1.0 +416 47 training.batch_size 1.0 +416 47 training.label_smoothing 0.0026057918253736237 +416 48 model.embedding_dim 0.0 +416 48 training.batch_size 1.0 +416 48 training.label_smoothing 0.006671191650527939 +416 49 model.embedding_dim 2.0 +416 49 training.batch_size 2.0 +416 49 training.label_smoothing 0.0212264663123635 +416 50 model.embedding_dim 2.0 +416 50 training.batch_size 0.0 +416 50 training.label_smoothing 0.0019144287757568894 +416 51 model.embedding_dim 2.0 +416 51 training.batch_size 0.0 +416 51 training.label_smoothing 0.7471187533562426 +416 52 model.embedding_dim 1.0 +416 52 training.batch_size 1.0 +416 52 training.label_smoothing 0.45875301655353096 +416 53 model.embedding_dim 0.0 +416 53 training.batch_size 2.0 +416 53 training.label_smoothing 0.009950737796198853 +416 54 model.embedding_dim 2.0 +416 54 training.batch_size 1.0 +416 54 training.label_smoothing 0.0013071003865164532 +416 55 model.embedding_dim 1.0 +416 55 training.batch_size 1.0 +416 55 training.label_smoothing 0.01080199626030004 +416 56 model.embedding_dim 0.0 +416 56 training.batch_size 0.0 +416 56 training.label_smoothing 0.011091249822805958 +416 57 model.embedding_dim 1.0 +416 57 training.batch_size 0.0 +416 57 training.label_smoothing 0.005768982788356287 +416 58 model.embedding_dim 0.0 +416 58 training.batch_size 1.0 +416 58 training.label_smoothing 0.001070683060315451 +416 59 model.embedding_dim 0.0 +416 59 training.batch_size 1.0 +416 59 training.label_smoothing 0.005789554153569048 +416 60 model.embedding_dim 2.0 +416 60 training.batch_size 0.0 +416 60 training.label_smoothing 0.012536599919188348 +416 61 model.embedding_dim 1.0 +416 61 training.batch_size 0.0 +416 61 training.label_smoothing 0.005422434601358717 +416 62 model.embedding_dim 0.0 +416 62 training.batch_size 1.0 +416 62 training.label_smoothing 0.11807273989871535 +416 63 model.embedding_dim 1.0 +416 63 training.batch_size 1.0 +416 63 training.label_smoothing 0.6132169885929142 +416 64 model.embedding_dim 2.0 +416 64 training.batch_size 0.0 +416 64 training.label_smoothing 0.03272246394630146 +416 65 model.embedding_dim 2.0 +416 65 training.batch_size 1.0 +416 65 training.label_smoothing 0.15888300496493443 +416 66 model.embedding_dim 0.0 +416 66 training.batch_size 0.0 +416 66 training.label_smoothing 0.03621300329629363 +416 67 model.embedding_dim 2.0 +416 67 training.batch_size 0.0 +416 67 training.label_smoothing 0.841028610281959 +416 68 model.embedding_dim 0.0 +416 68 training.batch_size 1.0 +416 68 training.label_smoothing 0.03840161659659783 +416 69 model.embedding_dim 0.0 +416 69 training.batch_size 2.0 +416 69 training.label_smoothing 0.03916662188826984 +416 70 model.embedding_dim 1.0 +416 70 training.batch_size 1.0 +416 70 training.label_smoothing 0.018886468497635773 +416 71 model.embedding_dim 0.0 +416 71 training.batch_size 2.0 +416 71 training.label_smoothing 0.14408698247656795 +416 72 model.embedding_dim 2.0 +416 72 training.batch_size 1.0 +416 72 training.label_smoothing 0.0010142964162995175 +416 73 model.embedding_dim 2.0 +416 73 training.batch_size 2.0 +416 73 training.label_smoothing 0.2596284657262145 +416 74 model.embedding_dim 1.0 +416 74 training.batch_size 0.0 +416 74 training.label_smoothing 0.05571564691296156 +416 75 model.embedding_dim 1.0 +416 75 training.batch_size 1.0 +416 75 training.label_smoothing 0.10623095560728206 +416 76 model.embedding_dim 2.0 +416 76 training.batch_size 0.0 +416 76 training.label_smoothing 0.06373797195154067 +416 77 model.embedding_dim 1.0 +416 77 training.batch_size 1.0 +416 77 training.label_smoothing 0.8379170801452253 +416 78 model.embedding_dim 0.0 +416 78 training.batch_size 2.0 +416 78 training.label_smoothing 0.21288940031389741 +416 79 model.embedding_dim 0.0 +416 79 training.batch_size 2.0 +416 79 training.label_smoothing 0.4380481893378309 +416 80 model.embedding_dim 2.0 +416 80 training.batch_size 2.0 +416 80 training.label_smoothing 0.1876268710902817 +416 81 model.embedding_dim 2.0 +416 81 training.batch_size 0.0 +416 81 training.label_smoothing 0.002047891725091021 +416 82 model.embedding_dim 1.0 +416 82 training.batch_size 0.0 +416 82 training.label_smoothing 0.0053590177435827915 +416 83 model.embedding_dim 2.0 +416 83 training.batch_size 1.0 +416 83 training.label_smoothing 0.009809882385708001 +416 84 model.embedding_dim 1.0 +416 84 training.batch_size 2.0 +416 84 training.label_smoothing 0.27506357073209425 +416 85 model.embedding_dim 1.0 +416 85 training.batch_size 0.0 +416 85 training.label_smoothing 0.3003785085700304 +416 86 model.embedding_dim 2.0 +416 86 training.batch_size 1.0 +416 86 training.label_smoothing 0.039270122472941066 +416 87 model.embedding_dim 2.0 +416 87 training.batch_size 1.0 +416 87 training.label_smoothing 0.004931079639903464 +416 88 model.embedding_dim 1.0 +416 88 training.batch_size 2.0 +416 88 training.label_smoothing 0.05543115595046296 +416 89 model.embedding_dim 2.0 +416 89 training.batch_size 1.0 +416 89 training.label_smoothing 0.010574429890094626 +416 90 model.embedding_dim 0.0 +416 90 training.batch_size 2.0 +416 90 training.label_smoothing 0.10996093807813158 +416 91 model.embedding_dim 1.0 +416 91 training.batch_size 0.0 +416 91 training.label_smoothing 0.1334935949347072 +416 92 model.embedding_dim 2.0 +416 92 training.batch_size 1.0 +416 92 training.label_smoothing 0.012860173877313346 +416 93 model.embedding_dim 2.0 +416 93 training.batch_size 2.0 +416 93 training.label_smoothing 0.026797870199026297 +416 94 model.embedding_dim 1.0 +416 94 training.batch_size 2.0 +416 94 training.label_smoothing 0.027440774155223384 +416 95 model.embedding_dim 0.0 +416 95 training.batch_size 0.0 +416 95 training.label_smoothing 0.0014854144788642068 +416 96 model.embedding_dim 1.0 +416 96 training.batch_size 1.0 +416 96 training.label_smoothing 0.004042812958315281 +416 97 model.embedding_dim 1.0 +416 97 training.batch_size 2.0 +416 97 training.label_smoothing 0.038715330232871326 +416 98 model.embedding_dim 1.0 +416 98 training.batch_size 0.0 +416 98 training.label_smoothing 0.8283241125585874 +416 99 model.embedding_dim 0.0 +416 99 training.batch_size 1.0 +416 99 training.label_smoothing 0.003117051084841374 +416 100 model.embedding_dim 2.0 +416 100 training.batch_size 1.0 +416 100 training.label_smoothing 0.0016824553149221552 +416 1 dataset """kinships""" +416 1 model """ntn""" +416 1 loss """crossentropy""" +416 1 regularizer """no""" +416 1 optimizer """adadelta""" +416 1 training_loop """lcwa""" +416 1 evaluator """rankbased""" +416 2 dataset """kinships""" +416 2 model """ntn""" +416 2 loss """crossentropy""" +416 2 regularizer """no""" +416 2 optimizer """adadelta""" +416 2 training_loop """lcwa""" +416 2 evaluator """rankbased""" +416 3 dataset """kinships""" +416 3 model """ntn""" +416 3 loss """crossentropy""" +416 3 regularizer """no""" +416 3 optimizer """adadelta""" +416 3 training_loop """lcwa""" +416 3 evaluator """rankbased""" +416 4 dataset """kinships""" +416 4 model """ntn""" +416 4 loss """crossentropy""" +416 4 regularizer """no""" +416 4 optimizer """adadelta""" +416 4 training_loop """lcwa""" +416 4 evaluator """rankbased""" +416 5 dataset """kinships""" +416 5 model """ntn""" +416 5 loss """crossentropy""" +416 5 regularizer """no""" +416 5 optimizer """adadelta""" +416 5 training_loop """lcwa""" +416 5 evaluator """rankbased""" +416 6 dataset """kinships""" +416 6 model """ntn""" +416 6 loss """crossentropy""" +416 6 regularizer """no""" +416 6 optimizer """adadelta""" +416 6 training_loop """lcwa""" +416 6 evaluator """rankbased""" +416 7 dataset """kinships""" +416 7 model """ntn""" +416 7 loss """crossentropy""" +416 7 regularizer """no""" +416 7 optimizer """adadelta""" +416 7 training_loop """lcwa""" +416 7 evaluator """rankbased""" +416 8 dataset """kinships""" +416 8 model """ntn""" +416 8 loss """crossentropy""" +416 8 regularizer """no""" +416 8 optimizer """adadelta""" +416 8 training_loop """lcwa""" +416 8 evaluator """rankbased""" +416 9 dataset """kinships""" +416 9 model """ntn""" +416 9 loss """crossentropy""" +416 9 regularizer """no""" +416 9 optimizer """adadelta""" +416 9 training_loop """lcwa""" +416 9 evaluator """rankbased""" +416 10 dataset """kinships""" +416 10 model """ntn""" +416 10 loss """crossentropy""" +416 10 regularizer """no""" +416 10 optimizer """adadelta""" +416 10 training_loop """lcwa""" +416 10 evaluator """rankbased""" +416 11 dataset """kinships""" +416 11 model """ntn""" +416 11 loss """crossentropy""" +416 11 regularizer """no""" +416 11 optimizer """adadelta""" +416 11 training_loop """lcwa""" +416 11 evaluator """rankbased""" +416 12 dataset """kinships""" +416 12 model """ntn""" +416 12 loss """crossentropy""" +416 12 regularizer """no""" +416 12 optimizer """adadelta""" +416 12 training_loop """lcwa""" +416 12 evaluator """rankbased""" +416 13 dataset """kinships""" +416 13 model """ntn""" +416 13 loss """crossentropy""" +416 13 regularizer """no""" +416 13 optimizer """adadelta""" +416 13 training_loop """lcwa""" +416 13 evaluator """rankbased""" +416 14 dataset """kinships""" +416 14 model """ntn""" +416 14 loss """crossentropy""" +416 14 regularizer """no""" +416 14 optimizer """adadelta""" +416 14 training_loop """lcwa""" +416 14 evaluator """rankbased""" +416 15 dataset """kinships""" +416 15 model """ntn""" +416 15 loss """crossentropy""" +416 15 regularizer """no""" +416 15 optimizer """adadelta""" +416 15 training_loop """lcwa""" +416 15 evaluator """rankbased""" +416 16 dataset """kinships""" +416 16 model """ntn""" +416 16 loss """crossentropy""" +416 16 regularizer """no""" +416 16 optimizer """adadelta""" +416 16 training_loop """lcwa""" +416 16 evaluator """rankbased""" +416 17 dataset """kinships""" +416 17 model """ntn""" +416 17 loss """crossentropy""" +416 17 regularizer """no""" +416 17 optimizer """adadelta""" +416 17 training_loop """lcwa""" +416 17 evaluator """rankbased""" +416 18 dataset """kinships""" +416 18 model """ntn""" +416 18 loss """crossentropy""" +416 18 regularizer """no""" +416 18 optimizer """adadelta""" +416 18 training_loop """lcwa""" +416 18 evaluator """rankbased""" +416 19 dataset """kinships""" +416 19 model """ntn""" +416 19 loss """crossentropy""" +416 19 regularizer """no""" +416 19 optimizer """adadelta""" +416 19 training_loop """lcwa""" +416 19 evaluator """rankbased""" +416 20 dataset """kinships""" +416 20 model """ntn""" +416 20 loss """crossentropy""" +416 20 regularizer """no""" +416 20 optimizer """adadelta""" +416 20 training_loop """lcwa""" +416 20 evaluator """rankbased""" +416 21 dataset """kinships""" +416 21 model """ntn""" +416 21 loss """crossentropy""" +416 21 regularizer """no""" +416 21 optimizer """adadelta""" +416 21 training_loop """lcwa""" +416 21 evaluator """rankbased""" +416 22 dataset """kinships""" +416 22 model """ntn""" +416 22 loss """crossentropy""" +416 22 regularizer """no""" +416 22 optimizer """adadelta""" +416 22 training_loop """lcwa""" +416 22 evaluator """rankbased""" +416 23 dataset """kinships""" +416 23 model """ntn""" +416 23 loss """crossentropy""" +416 23 regularizer """no""" +416 23 optimizer """adadelta""" +416 23 training_loop """lcwa""" +416 23 evaluator """rankbased""" +416 24 dataset """kinships""" +416 24 model """ntn""" +416 24 loss """crossentropy""" +416 24 regularizer """no""" +416 24 optimizer """adadelta""" +416 24 training_loop """lcwa""" +416 24 evaluator """rankbased""" +416 25 dataset """kinships""" +416 25 model """ntn""" +416 25 loss """crossentropy""" +416 25 regularizer """no""" +416 25 optimizer """adadelta""" +416 25 training_loop """lcwa""" +416 25 evaluator """rankbased""" +416 26 dataset """kinships""" +416 26 model """ntn""" +416 26 loss """crossentropy""" +416 26 regularizer """no""" +416 26 optimizer """adadelta""" +416 26 training_loop """lcwa""" +416 26 evaluator """rankbased""" +416 27 dataset """kinships""" +416 27 model """ntn""" +416 27 loss """crossentropy""" +416 27 regularizer """no""" +416 27 optimizer """adadelta""" +416 27 training_loop """lcwa""" +416 27 evaluator """rankbased""" +416 28 dataset """kinships""" +416 28 model """ntn""" +416 28 loss """crossentropy""" +416 28 regularizer """no""" +416 28 optimizer """adadelta""" +416 28 training_loop """lcwa""" +416 28 evaluator """rankbased""" +416 29 dataset """kinships""" +416 29 model """ntn""" +416 29 loss """crossentropy""" +416 29 regularizer """no""" +416 29 optimizer """adadelta""" +416 29 training_loop """lcwa""" +416 29 evaluator """rankbased""" +416 30 dataset """kinships""" +416 30 model """ntn""" +416 30 loss """crossentropy""" +416 30 regularizer """no""" +416 30 optimizer """adadelta""" +416 30 training_loop """lcwa""" +416 30 evaluator """rankbased""" +416 31 dataset """kinships""" +416 31 model """ntn""" +416 31 loss """crossentropy""" +416 31 regularizer """no""" +416 31 optimizer """adadelta""" +416 31 training_loop """lcwa""" +416 31 evaluator """rankbased""" +416 32 dataset """kinships""" +416 32 model """ntn""" +416 32 loss """crossentropy""" +416 32 regularizer """no""" +416 32 optimizer """adadelta""" +416 32 training_loop """lcwa""" +416 32 evaluator """rankbased""" +416 33 dataset """kinships""" +416 33 model """ntn""" +416 33 loss """crossentropy""" +416 33 regularizer """no""" +416 33 optimizer """adadelta""" +416 33 training_loop """lcwa""" +416 33 evaluator """rankbased""" +416 34 dataset """kinships""" +416 34 model """ntn""" +416 34 loss """crossentropy""" +416 34 regularizer """no""" +416 34 optimizer """adadelta""" +416 34 training_loop """lcwa""" +416 34 evaluator """rankbased""" +416 35 dataset """kinships""" +416 35 model """ntn""" +416 35 loss """crossentropy""" +416 35 regularizer """no""" +416 35 optimizer """adadelta""" +416 35 training_loop """lcwa""" +416 35 evaluator """rankbased""" +416 36 dataset """kinships""" +416 36 model """ntn""" +416 36 loss """crossentropy""" +416 36 regularizer """no""" +416 36 optimizer """adadelta""" +416 36 training_loop """lcwa""" +416 36 evaluator """rankbased""" +416 37 dataset """kinships""" +416 37 model """ntn""" +416 37 loss """crossentropy""" +416 37 regularizer """no""" +416 37 optimizer """adadelta""" +416 37 training_loop """lcwa""" +416 37 evaluator """rankbased""" +416 38 dataset """kinships""" +416 38 model """ntn""" +416 38 loss """crossentropy""" +416 38 regularizer """no""" +416 38 optimizer """adadelta""" +416 38 training_loop """lcwa""" +416 38 evaluator """rankbased""" +416 39 dataset """kinships""" +416 39 model """ntn""" +416 39 loss """crossentropy""" +416 39 regularizer """no""" +416 39 optimizer """adadelta""" +416 39 training_loop """lcwa""" +416 39 evaluator """rankbased""" +416 40 dataset """kinships""" +416 40 model """ntn""" +416 40 loss """crossentropy""" +416 40 regularizer """no""" +416 40 optimizer """adadelta""" +416 40 training_loop """lcwa""" +416 40 evaluator """rankbased""" +416 41 dataset """kinships""" +416 41 model """ntn""" +416 41 loss """crossentropy""" +416 41 regularizer """no""" +416 41 optimizer """adadelta""" +416 41 training_loop """lcwa""" +416 41 evaluator """rankbased""" +416 42 dataset """kinships""" +416 42 model """ntn""" +416 42 loss """crossentropy""" +416 42 regularizer """no""" +416 42 optimizer """adadelta""" +416 42 training_loop """lcwa""" +416 42 evaluator """rankbased""" +416 43 dataset """kinships""" +416 43 model """ntn""" +416 43 loss """crossentropy""" +416 43 regularizer """no""" +416 43 optimizer """adadelta""" +416 43 training_loop """lcwa""" +416 43 evaluator """rankbased""" +416 44 dataset """kinships""" +416 44 model """ntn""" +416 44 loss """crossentropy""" +416 44 regularizer """no""" +416 44 optimizer """adadelta""" +416 44 training_loop """lcwa""" +416 44 evaluator """rankbased""" +416 45 dataset """kinships""" +416 45 model """ntn""" +416 45 loss """crossentropy""" +416 45 regularizer """no""" +416 45 optimizer """adadelta""" +416 45 training_loop """lcwa""" +416 45 evaluator """rankbased""" +416 46 dataset """kinships""" +416 46 model """ntn""" +416 46 loss """crossentropy""" +416 46 regularizer """no""" +416 46 optimizer """adadelta""" +416 46 training_loop """lcwa""" +416 46 evaluator """rankbased""" +416 47 dataset """kinships""" +416 47 model """ntn""" +416 47 loss """crossentropy""" +416 47 regularizer """no""" +416 47 optimizer """adadelta""" +416 47 training_loop """lcwa""" +416 47 evaluator """rankbased""" +416 48 dataset """kinships""" +416 48 model """ntn""" +416 48 loss """crossentropy""" +416 48 regularizer """no""" +416 48 optimizer """adadelta""" +416 48 training_loop """lcwa""" +416 48 evaluator """rankbased""" +416 49 dataset """kinships""" +416 49 model """ntn""" +416 49 loss """crossentropy""" +416 49 regularizer """no""" +416 49 optimizer """adadelta""" +416 49 training_loop """lcwa""" +416 49 evaluator """rankbased""" +416 50 dataset """kinships""" +416 50 model """ntn""" +416 50 loss """crossentropy""" +416 50 regularizer """no""" +416 50 optimizer """adadelta""" +416 50 training_loop """lcwa""" +416 50 evaluator """rankbased""" +416 51 dataset """kinships""" +416 51 model """ntn""" +416 51 loss """crossentropy""" +416 51 regularizer """no""" +416 51 optimizer """adadelta""" +416 51 training_loop """lcwa""" +416 51 evaluator """rankbased""" +416 52 dataset """kinships""" +416 52 model """ntn""" +416 52 loss """crossentropy""" +416 52 regularizer """no""" +416 52 optimizer """adadelta""" +416 52 training_loop """lcwa""" +416 52 evaluator """rankbased""" +416 53 dataset """kinships""" +416 53 model """ntn""" +416 53 loss """crossentropy""" +416 53 regularizer """no""" +416 53 optimizer """adadelta""" +416 53 training_loop """lcwa""" +416 53 evaluator """rankbased""" +416 54 dataset """kinships""" +416 54 model """ntn""" +416 54 loss """crossentropy""" +416 54 regularizer """no""" +416 54 optimizer """adadelta""" +416 54 training_loop """lcwa""" +416 54 evaluator """rankbased""" +416 55 dataset """kinships""" +416 55 model """ntn""" +416 55 loss """crossentropy""" +416 55 regularizer """no""" +416 55 optimizer """adadelta""" +416 55 training_loop """lcwa""" +416 55 evaluator """rankbased""" +416 56 dataset """kinships""" +416 56 model """ntn""" +416 56 loss """crossentropy""" +416 56 regularizer """no""" +416 56 optimizer """adadelta""" +416 56 training_loop """lcwa""" +416 56 evaluator """rankbased""" +416 57 dataset """kinships""" +416 57 model """ntn""" +416 57 loss """crossentropy""" +416 57 regularizer """no""" +416 57 optimizer """adadelta""" +416 57 training_loop """lcwa""" +416 57 evaluator """rankbased""" +416 58 dataset """kinships""" +416 58 model """ntn""" +416 58 loss """crossentropy""" +416 58 regularizer """no""" +416 58 optimizer """adadelta""" +416 58 training_loop """lcwa""" +416 58 evaluator """rankbased""" +416 59 dataset """kinships""" +416 59 model """ntn""" +416 59 loss """crossentropy""" +416 59 regularizer """no""" +416 59 optimizer """adadelta""" +416 59 training_loop """lcwa""" +416 59 evaluator """rankbased""" +416 60 dataset """kinships""" +416 60 model """ntn""" +416 60 loss """crossentropy""" +416 60 regularizer """no""" +416 60 optimizer """adadelta""" +416 60 training_loop """lcwa""" +416 60 evaluator """rankbased""" +416 61 dataset """kinships""" +416 61 model """ntn""" +416 61 loss """crossentropy""" +416 61 regularizer """no""" +416 61 optimizer """adadelta""" +416 61 training_loop """lcwa""" +416 61 evaluator """rankbased""" +416 62 dataset """kinships""" +416 62 model """ntn""" +416 62 loss """crossentropy""" +416 62 regularizer """no""" +416 62 optimizer """adadelta""" +416 62 training_loop """lcwa""" +416 62 evaluator """rankbased""" +416 63 dataset """kinships""" +416 63 model """ntn""" +416 63 loss """crossentropy""" +416 63 regularizer """no""" +416 63 optimizer """adadelta""" +416 63 training_loop """lcwa""" +416 63 evaluator """rankbased""" +416 64 dataset """kinships""" +416 64 model """ntn""" +416 64 loss """crossentropy""" +416 64 regularizer """no""" +416 64 optimizer """adadelta""" +416 64 training_loop """lcwa""" +416 64 evaluator """rankbased""" +416 65 dataset """kinships""" +416 65 model """ntn""" +416 65 loss """crossentropy""" +416 65 regularizer """no""" +416 65 optimizer """adadelta""" +416 65 training_loop """lcwa""" +416 65 evaluator """rankbased""" +416 66 dataset """kinships""" +416 66 model """ntn""" +416 66 loss """crossentropy""" +416 66 regularizer """no""" +416 66 optimizer """adadelta""" +416 66 training_loop """lcwa""" +416 66 evaluator """rankbased""" +416 67 dataset """kinships""" +416 67 model """ntn""" +416 67 loss """crossentropy""" +416 67 regularizer """no""" +416 67 optimizer """adadelta""" +416 67 training_loop """lcwa""" +416 67 evaluator """rankbased""" +416 68 dataset """kinships""" +416 68 model """ntn""" +416 68 loss """crossentropy""" +416 68 regularizer """no""" +416 68 optimizer """adadelta""" +416 68 training_loop """lcwa""" +416 68 evaluator """rankbased""" +416 69 dataset """kinships""" +416 69 model """ntn""" +416 69 loss """crossentropy""" +416 69 regularizer """no""" +416 69 optimizer """adadelta""" +416 69 training_loop """lcwa""" +416 69 evaluator """rankbased""" +416 70 dataset """kinships""" +416 70 model """ntn""" +416 70 loss """crossentropy""" +416 70 regularizer """no""" +416 70 optimizer """adadelta""" +416 70 training_loop """lcwa""" +416 70 evaluator """rankbased""" +416 71 dataset """kinships""" +416 71 model """ntn""" +416 71 loss """crossentropy""" +416 71 regularizer """no""" +416 71 optimizer """adadelta""" +416 71 training_loop """lcwa""" +416 71 evaluator """rankbased""" +416 72 dataset """kinships""" +416 72 model """ntn""" +416 72 loss """crossentropy""" +416 72 regularizer """no""" +416 72 optimizer """adadelta""" +416 72 training_loop """lcwa""" +416 72 evaluator """rankbased""" +416 73 dataset """kinships""" +416 73 model """ntn""" +416 73 loss """crossentropy""" +416 73 regularizer """no""" +416 73 optimizer """adadelta""" +416 73 training_loop """lcwa""" +416 73 evaluator """rankbased""" +416 74 dataset """kinships""" +416 74 model """ntn""" +416 74 loss """crossentropy""" +416 74 regularizer """no""" +416 74 optimizer """adadelta""" +416 74 training_loop """lcwa""" +416 74 evaluator """rankbased""" +416 75 dataset """kinships""" +416 75 model """ntn""" +416 75 loss """crossentropy""" +416 75 regularizer """no""" +416 75 optimizer """adadelta""" +416 75 training_loop """lcwa""" +416 75 evaluator """rankbased""" +416 76 dataset """kinships""" +416 76 model """ntn""" +416 76 loss """crossentropy""" +416 76 regularizer """no""" +416 76 optimizer """adadelta""" +416 76 training_loop """lcwa""" +416 76 evaluator """rankbased""" +416 77 dataset """kinships""" +416 77 model """ntn""" +416 77 loss """crossentropy""" +416 77 regularizer """no""" +416 77 optimizer """adadelta""" +416 77 training_loop """lcwa""" +416 77 evaluator """rankbased""" +416 78 dataset """kinships""" +416 78 model """ntn""" +416 78 loss """crossentropy""" +416 78 regularizer """no""" +416 78 optimizer """adadelta""" +416 78 training_loop """lcwa""" +416 78 evaluator """rankbased""" +416 79 dataset """kinships""" +416 79 model """ntn""" +416 79 loss """crossentropy""" +416 79 regularizer """no""" +416 79 optimizer """adadelta""" +416 79 training_loop """lcwa""" +416 79 evaluator """rankbased""" +416 80 dataset """kinships""" +416 80 model """ntn""" +416 80 loss """crossentropy""" +416 80 regularizer """no""" +416 80 optimizer """adadelta""" +416 80 training_loop """lcwa""" +416 80 evaluator """rankbased""" +416 81 dataset """kinships""" +416 81 model """ntn""" +416 81 loss """crossentropy""" +416 81 regularizer """no""" +416 81 optimizer """adadelta""" +416 81 training_loop """lcwa""" +416 81 evaluator """rankbased""" +416 82 dataset """kinships""" +416 82 model """ntn""" +416 82 loss """crossentropy""" +416 82 regularizer """no""" +416 82 optimizer """adadelta""" +416 82 training_loop """lcwa""" +416 82 evaluator """rankbased""" +416 83 dataset """kinships""" +416 83 model """ntn""" +416 83 loss """crossentropy""" +416 83 regularizer """no""" +416 83 optimizer """adadelta""" +416 83 training_loop """lcwa""" +416 83 evaluator """rankbased""" +416 84 dataset """kinships""" +416 84 model """ntn""" +416 84 loss """crossentropy""" +416 84 regularizer """no""" +416 84 optimizer """adadelta""" +416 84 training_loop """lcwa""" +416 84 evaluator """rankbased""" +416 85 dataset """kinships""" +416 85 model """ntn""" +416 85 loss """crossentropy""" +416 85 regularizer """no""" +416 85 optimizer """adadelta""" +416 85 training_loop """lcwa""" +416 85 evaluator """rankbased""" +416 86 dataset """kinships""" +416 86 model """ntn""" +416 86 loss """crossentropy""" +416 86 regularizer """no""" +416 86 optimizer """adadelta""" +416 86 training_loop """lcwa""" +416 86 evaluator """rankbased""" +416 87 dataset """kinships""" +416 87 model """ntn""" +416 87 loss """crossentropy""" +416 87 regularizer """no""" +416 87 optimizer """adadelta""" +416 87 training_loop """lcwa""" +416 87 evaluator """rankbased""" +416 88 dataset """kinships""" +416 88 model """ntn""" +416 88 loss """crossentropy""" +416 88 regularizer """no""" +416 88 optimizer """adadelta""" +416 88 training_loop """lcwa""" +416 88 evaluator """rankbased""" +416 89 dataset """kinships""" +416 89 model """ntn""" +416 89 loss """crossentropy""" +416 89 regularizer """no""" +416 89 optimizer """adadelta""" +416 89 training_loop """lcwa""" +416 89 evaluator """rankbased""" +416 90 dataset """kinships""" +416 90 model """ntn""" +416 90 loss """crossentropy""" +416 90 regularizer """no""" +416 90 optimizer """adadelta""" +416 90 training_loop """lcwa""" +416 90 evaluator """rankbased""" +416 91 dataset """kinships""" +416 91 model """ntn""" +416 91 loss """crossentropy""" +416 91 regularizer """no""" +416 91 optimizer """adadelta""" +416 91 training_loop """lcwa""" +416 91 evaluator """rankbased""" +416 92 dataset """kinships""" +416 92 model """ntn""" +416 92 loss """crossentropy""" +416 92 regularizer """no""" +416 92 optimizer """adadelta""" +416 92 training_loop """lcwa""" +416 92 evaluator """rankbased""" +416 93 dataset """kinships""" +416 93 model """ntn""" +416 93 loss """crossentropy""" +416 93 regularizer """no""" +416 93 optimizer """adadelta""" +416 93 training_loop """lcwa""" +416 93 evaluator """rankbased""" +416 94 dataset """kinships""" +416 94 model """ntn""" +416 94 loss """crossentropy""" +416 94 regularizer """no""" +416 94 optimizer """adadelta""" +416 94 training_loop """lcwa""" +416 94 evaluator """rankbased""" +416 95 dataset """kinships""" +416 95 model """ntn""" +416 95 loss """crossentropy""" +416 95 regularizer """no""" +416 95 optimizer """adadelta""" +416 95 training_loop """lcwa""" +416 95 evaluator """rankbased""" +416 96 dataset """kinships""" +416 96 model """ntn""" +416 96 loss """crossentropy""" +416 96 regularizer """no""" +416 96 optimizer """adadelta""" +416 96 training_loop """lcwa""" +416 96 evaluator """rankbased""" +416 97 dataset """kinships""" +416 97 model """ntn""" +416 97 loss """crossentropy""" +416 97 regularizer """no""" +416 97 optimizer """adadelta""" +416 97 training_loop """lcwa""" +416 97 evaluator """rankbased""" +416 98 dataset """kinships""" +416 98 model """ntn""" +416 98 loss """crossentropy""" +416 98 regularizer """no""" +416 98 optimizer """adadelta""" +416 98 training_loop """lcwa""" +416 98 evaluator """rankbased""" +416 99 dataset """kinships""" +416 99 model """ntn""" +416 99 loss """crossentropy""" +416 99 regularizer """no""" +416 99 optimizer """adadelta""" +416 99 training_loop """lcwa""" +416 99 evaluator """rankbased""" +416 100 dataset """kinships""" +416 100 model """ntn""" +416 100 loss """crossentropy""" +416 100 regularizer """no""" +416 100 optimizer """adadelta""" +416 100 training_loop """lcwa""" +416 100 evaluator """rankbased""" +417 1 model.embedding_dim 1.0 +417 1 loss.margin 21.21606978251459 +417 1 loss.adversarial_temperature 0.40575494456752303 +417 1 negative_sampler.num_negs_per_pos 81.0 +417 1 training.batch_size 0.0 +417 2 model.embedding_dim 1.0 +417 2 loss.margin 15.972414543132743 +417 2 loss.adversarial_temperature 0.4674998324126597 +417 2 negative_sampler.num_negs_per_pos 31.0 +417 2 training.batch_size 0.0 +417 3 model.embedding_dim 1.0 +417 3 loss.margin 14.06105580078971 +417 3 loss.adversarial_temperature 0.8458567347144039 +417 3 negative_sampler.num_negs_per_pos 93.0 +417 3 training.batch_size 1.0 +417 4 model.embedding_dim 1.0 +417 4 loss.margin 3.95848562971833 +417 4 loss.adversarial_temperature 0.3048983627933477 +417 4 negative_sampler.num_negs_per_pos 46.0 +417 4 training.batch_size 0.0 +417 5 model.embedding_dim 1.0 +417 5 loss.margin 12.407497603922844 +417 5 loss.adversarial_temperature 0.5976080852134367 +417 5 negative_sampler.num_negs_per_pos 64.0 +417 5 training.batch_size 1.0 +417 6 model.embedding_dim 1.0 +417 6 loss.margin 18.743146868291106 +417 6 loss.adversarial_temperature 0.2780367339641818 +417 6 negative_sampler.num_negs_per_pos 47.0 +417 6 training.batch_size 0.0 +417 7 model.embedding_dim 1.0 +417 7 loss.margin 14.235630815756611 +417 7 loss.adversarial_temperature 0.47675702304360845 +417 7 negative_sampler.num_negs_per_pos 61.0 +417 7 training.batch_size 2.0 +417 8 model.embedding_dim 1.0 +417 8 loss.margin 6.169563604080696 +417 8 loss.adversarial_temperature 0.5042760940354817 +417 8 negative_sampler.num_negs_per_pos 1.0 +417 8 training.batch_size 1.0 +417 9 model.embedding_dim 0.0 +417 9 loss.margin 9.439780756384174 +417 9 loss.adversarial_temperature 0.6114747973833465 +417 9 negative_sampler.num_negs_per_pos 3.0 +417 9 training.batch_size 2.0 +417 10 model.embedding_dim 1.0 +417 10 loss.margin 11.94615698125338 +417 10 loss.adversarial_temperature 0.7421693158945658 +417 10 negative_sampler.num_negs_per_pos 25.0 +417 10 training.batch_size 2.0 +417 11 model.embedding_dim 0.0 +417 11 loss.margin 2.84743819626481 +417 11 loss.adversarial_temperature 0.6864638586031613 +417 11 negative_sampler.num_negs_per_pos 64.0 +417 11 training.batch_size 2.0 +417 12 model.embedding_dim 0.0 +417 12 loss.margin 28.842410347048716 +417 12 loss.adversarial_temperature 0.10672151631785953 +417 12 negative_sampler.num_negs_per_pos 70.0 +417 12 training.batch_size 1.0 +417 13 model.embedding_dim 2.0 +417 13 loss.margin 7.312635710504182 +417 13 loss.adversarial_temperature 0.5350286318204602 +417 13 negative_sampler.num_negs_per_pos 71.0 +417 13 training.batch_size 0.0 +417 14 model.embedding_dim 0.0 +417 14 loss.margin 19.6056553232937 +417 14 loss.adversarial_temperature 0.753167798410003 +417 14 negative_sampler.num_negs_per_pos 76.0 +417 14 training.batch_size 2.0 +417 15 model.embedding_dim 2.0 +417 15 loss.margin 20.22761953888706 +417 15 loss.adversarial_temperature 0.5802128745626667 +417 15 negative_sampler.num_negs_per_pos 8.0 +417 15 training.batch_size 1.0 +417 16 model.embedding_dim 2.0 +417 16 loss.margin 28.89512433884187 +417 16 loss.adversarial_temperature 0.761997776619518 +417 16 negative_sampler.num_negs_per_pos 36.0 +417 16 training.batch_size 0.0 +417 17 model.embedding_dim 2.0 +417 17 loss.margin 19.108249609555926 +417 17 loss.adversarial_temperature 0.13925908786343263 +417 17 negative_sampler.num_negs_per_pos 9.0 +417 17 training.batch_size 2.0 +417 18 model.embedding_dim 1.0 +417 18 loss.margin 22.09755440519657 +417 18 loss.adversarial_temperature 0.7745359381991597 +417 18 negative_sampler.num_negs_per_pos 0.0 +417 18 training.batch_size 1.0 +417 19 model.embedding_dim 1.0 +417 19 loss.margin 26.671083915640367 +417 19 loss.adversarial_temperature 0.45998461512970734 +417 19 negative_sampler.num_negs_per_pos 65.0 +417 19 training.batch_size 2.0 +417 20 model.embedding_dim 2.0 +417 20 loss.margin 16.55839143693464 +417 20 loss.adversarial_temperature 0.817529263911425 +417 20 negative_sampler.num_negs_per_pos 53.0 +417 20 training.batch_size 0.0 +417 21 model.embedding_dim 2.0 +417 21 loss.margin 16.212244842998913 +417 21 loss.adversarial_temperature 0.6591789069664821 +417 21 negative_sampler.num_negs_per_pos 58.0 +417 21 training.batch_size 1.0 +417 22 model.embedding_dim 1.0 +417 22 loss.margin 5.561570214944167 +417 22 loss.adversarial_temperature 0.7594580387730979 +417 22 negative_sampler.num_negs_per_pos 59.0 +417 22 training.batch_size 2.0 +417 23 model.embedding_dim 1.0 +417 23 loss.margin 7.585265084223148 +417 23 loss.adversarial_temperature 0.789377654660249 +417 23 negative_sampler.num_negs_per_pos 40.0 +417 23 training.batch_size 1.0 +417 24 model.embedding_dim 0.0 +417 24 loss.margin 20.96178547172291 +417 24 loss.adversarial_temperature 0.6751770895627142 +417 24 negative_sampler.num_negs_per_pos 52.0 +417 24 training.batch_size 2.0 +417 25 model.embedding_dim 2.0 +417 25 loss.margin 1.2302570607593282 +417 25 loss.adversarial_temperature 0.8144713417874582 +417 25 negative_sampler.num_negs_per_pos 79.0 +417 25 training.batch_size 0.0 +417 26 model.embedding_dim 1.0 +417 26 loss.margin 10.512332167315074 +417 26 loss.adversarial_temperature 0.6911863883498894 +417 26 negative_sampler.num_negs_per_pos 75.0 +417 26 training.batch_size 2.0 +417 27 model.embedding_dim 2.0 +417 27 loss.margin 11.627400967976724 +417 27 loss.adversarial_temperature 0.7108143390452507 +417 27 negative_sampler.num_negs_per_pos 76.0 +417 27 training.batch_size 2.0 +417 28 model.embedding_dim 0.0 +417 28 loss.margin 20.325781853633124 +417 28 loss.adversarial_temperature 0.25904423924266734 +417 28 negative_sampler.num_negs_per_pos 69.0 +417 28 training.batch_size 0.0 +417 29 model.embedding_dim 2.0 +417 29 loss.margin 20.338793260509917 +417 29 loss.adversarial_temperature 0.596205046118015 +417 29 negative_sampler.num_negs_per_pos 43.0 +417 29 training.batch_size 0.0 +417 30 model.embedding_dim 0.0 +417 30 loss.margin 6.26856511512405 +417 30 loss.adversarial_temperature 0.719779763530481 +417 30 negative_sampler.num_negs_per_pos 29.0 +417 30 training.batch_size 0.0 +417 31 model.embedding_dim 1.0 +417 31 loss.margin 12.548742743043281 +417 31 loss.adversarial_temperature 0.6297538945893117 +417 31 negative_sampler.num_negs_per_pos 4.0 +417 31 training.batch_size 2.0 +417 32 model.embedding_dim 1.0 +417 32 loss.margin 11.803680679899292 +417 32 loss.adversarial_temperature 0.8013765825944403 +417 32 negative_sampler.num_negs_per_pos 22.0 +417 32 training.batch_size 2.0 +417 33 model.embedding_dim 1.0 +417 33 loss.margin 27.749835234925932 +417 33 loss.adversarial_temperature 0.579296678851802 +417 33 negative_sampler.num_negs_per_pos 64.0 +417 33 training.batch_size 2.0 +417 34 model.embedding_dim 0.0 +417 34 loss.margin 8.217354636034898 +417 34 loss.adversarial_temperature 0.7191447589572426 +417 34 negative_sampler.num_negs_per_pos 38.0 +417 34 training.batch_size 0.0 +417 35 model.embedding_dim 2.0 +417 35 loss.margin 10.231043611026161 +417 35 loss.adversarial_temperature 0.23542958600930833 +417 35 negative_sampler.num_negs_per_pos 73.0 +417 35 training.batch_size 0.0 +417 36 model.embedding_dim 1.0 +417 36 loss.margin 7.459350706587763 +417 36 loss.adversarial_temperature 0.17559558908674094 +417 36 negative_sampler.num_negs_per_pos 37.0 +417 36 training.batch_size 1.0 +417 37 model.embedding_dim 0.0 +417 37 loss.margin 1.7350424171185777 +417 37 loss.adversarial_temperature 0.7987428898030893 +417 37 negative_sampler.num_negs_per_pos 84.0 +417 37 training.batch_size 0.0 +417 38 model.embedding_dim 2.0 +417 38 loss.margin 6.261269515748966 +417 38 loss.adversarial_temperature 0.5373319919730248 +417 38 negative_sampler.num_negs_per_pos 7.0 +417 38 training.batch_size 2.0 +417 39 model.embedding_dim 0.0 +417 39 loss.margin 23.720369010554094 +417 39 loss.adversarial_temperature 0.6609286943498295 +417 39 negative_sampler.num_negs_per_pos 73.0 +417 39 training.batch_size 2.0 +417 40 model.embedding_dim 1.0 +417 40 loss.margin 28.586001743859896 +417 40 loss.adversarial_temperature 0.5180836669973857 +417 40 negative_sampler.num_negs_per_pos 37.0 +417 40 training.batch_size 2.0 +417 41 model.embedding_dim 1.0 +417 41 loss.margin 16.41159587629181 +417 41 loss.adversarial_temperature 0.3211935853420484 +417 41 negative_sampler.num_negs_per_pos 2.0 +417 41 training.batch_size 2.0 +417 42 model.embedding_dim 0.0 +417 42 loss.margin 11.141462065709879 +417 42 loss.adversarial_temperature 0.7787007776787429 +417 42 negative_sampler.num_negs_per_pos 94.0 +417 42 training.batch_size 2.0 +417 43 model.embedding_dim 1.0 +417 43 loss.margin 5.0388357189938295 +417 43 loss.adversarial_temperature 0.867627272749681 +417 43 negative_sampler.num_negs_per_pos 99.0 +417 43 training.batch_size 2.0 +417 44 model.embedding_dim 1.0 +417 44 loss.margin 13.531683087981463 +417 44 loss.adversarial_temperature 0.18412256390787363 +417 44 negative_sampler.num_negs_per_pos 47.0 +417 44 training.batch_size 1.0 +417 45 model.embedding_dim 0.0 +417 45 loss.margin 5.000623581257232 +417 45 loss.adversarial_temperature 0.7403917721365983 +417 45 negative_sampler.num_negs_per_pos 39.0 +417 45 training.batch_size 1.0 +417 46 model.embedding_dim 0.0 +417 46 loss.margin 1.4221615626392632 +417 46 loss.adversarial_temperature 0.22547030003683552 +417 46 negative_sampler.num_negs_per_pos 38.0 +417 46 training.batch_size 0.0 +417 47 model.embedding_dim 2.0 +417 47 loss.margin 16.573713439665113 +417 47 loss.adversarial_temperature 0.3293855609560454 +417 47 negative_sampler.num_negs_per_pos 25.0 +417 47 training.batch_size 1.0 +417 48 model.embedding_dim 2.0 +417 48 loss.margin 15.279615811711007 +417 48 loss.adversarial_temperature 0.8897102770173104 +417 48 negative_sampler.num_negs_per_pos 64.0 +417 48 training.batch_size 0.0 +417 49 model.embedding_dim 2.0 +417 49 loss.margin 3.1582984221130164 +417 49 loss.adversarial_temperature 0.1212533204884944 +417 49 negative_sampler.num_negs_per_pos 48.0 +417 49 training.batch_size 0.0 +417 50 model.embedding_dim 1.0 +417 50 loss.margin 21.744731871714915 +417 50 loss.adversarial_temperature 0.9026879954756107 +417 50 negative_sampler.num_negs_per_pos 37.0 +417 50 training.batch_size 0.0 +417 51 model.embedding_dim 0.0 +417 51 loss.margin 6.612564139966659 +417 51 loss.adversarial_temperature 0.1837726578650587 +417 51 negative_sampler.num_negs_per_pos 47.0 +417 51 training.batch_size 2.0 +417 52 model.embedding_dim 2.0 +417 52 loss.margin 23.37977548347295 +417 52 loss.adversarial_temperature 0.46848635220221646 +417 52 negative_sampler.num_negs_per_pos 67.0 +417 52 training.batch_size 2.0 +417 53 model.embedding_dim 0.0 +417 53 loss.margin 28.610464220597244 +417 53 loss.adversarial_temperature 0.11239628174680177 +417 53 negative_sampler.num_negs_per_pos 70.0 +417 53 training.batch_size 1.0 +417 54 model.embedding_dim 0.0 +417 54 loss.margin 29.316886274279852 +417 54 loss.adversarial_temperature 0.29136189937925006 +417 54 negative_sampler.num_negs_per_pos 47.0 +417 54 training.batch_size 1.0 +417 55 model.embedding_dim 1.0 +417 55 loss.margin 8.196994064629441 +417 55 loss.adversarial_temperature 0.6825442031948578 +417 55 negative_sampler.num_negs_per_pos 8.0 +417 55 training.batch_size 2.0 +417 56 model.embedding_dim 0.0 +417 56 loss.margin 17.739441264664496 +417 56 loss.adversarial_temperature 0.9955843018579934 +417 56 negative_sampler.num_negs_per_pos 24.0 +417 56 training.batch_size 0.0 +417 57 model.embedding_dim 1.0 +417 57 loss.margin 25.783243445907246 +417 57 loss.adversarial_temperature 0.934106314609707 +417 57 negative_sampler.num_negs_per_pos 1.0 +417 57 training.batch_size 0.0 +417 58 model.embedding_dim 1.0 +417 58 loss.margin 1.4956854642519282 +417 58 loss.adversarial_temperature 0.5736568018753744 +417 58 negative_sampler.num_negs_per_pos 70.0 +417 58 training.batch_size 2.0 +417 59 model.embedding_dim 1.0 +417 59 loss.margin 4.982143151652117 +417 59 loss.adversarial_temperature 0.8644572790034585 +417 59 negative_sampler.num_negs_per_pos 39.0 +417 59 training.batch_size 0.0 +417 60 model.embedding_dim 1.0 +417 60 loss.margin 2.1373615665677574 +417 60 loss.adversarial_temperature 0.12839964945892385 +417 60 negative_sampler.num_negs_per_pos 47.0 +417 60 training.batch_size 1.0 +417 61 model.embedding_dim 1.0 +417 61 loss.margin 19.69079029532197 +417 61 loss.adversarial_temperature 0.7103854589945486 +417 61 negative_sampler.num_negs_per_pos 53.0 +417 61 training.batch_size 1.0 +417 62 model.embedding_dim 0.0 +417 62 loss.margin 20.66812188507118 +417 62 loss.adversarial_temperature 0.7630328131179679 +417 62 negative_sampler.num_negs_per_pos 11.0 +417 62 training.batch_size 1.0 +417 63 model.embedding_dim 1.0 +417 63 loss.margin 29.983955985687647 +417 63 loss.adversarial_temperature 0.3432980123456434 +417 63 negative_sampler.num_negs_per_pos 1.0 +417 63 training.batch_size 1.0 +417 64 model.embedding_dim 1.0 +417 64 loss.margin 10.150135548482169 +417 64 loss.adversarial_temperature 0.9160088151080187 +417 64 negative_sampler.num_negs_per_pos 48.0 +417 64 training.batch_size 2.0 +417 65 model.embedding_dim 0.0 +417 65 loss.margin 4.087119445804454 +417 65 loss.adversarial_temperature 0.7916550743921028 +417 65 negative_sampler.num_negs_per_pos 14.0 +417 65 training.batch_size 2.0 +417 66 model.embedding_dim 1.0 +417 66 loss.margin 21.215029750451695 +417 66 loss.adversarial_temperature 0.6722178059467007 +417 66 negative_sampler.num_negs_per_pos 55.0 +417 66 training.batch_size 0.0 +417 67 model.embedding_dim 1.0 +417 67 loss.margin 18.45021327176413 +417 67 loss.adversarial_temperature 0.4103988281192966 +417 67 negative_sampler.num_negs_per_pos 3.0 +417 67 training.batch_size 1.0 +417 68 model.embedding_dim 0.0 +417 68 loss.margin 15.041638847915433 +417 68 loss.adversarial_temperature 0.22034041041736518 +417 68 negative_sampler.num_negs_per_pos 3.0 +417 68 training.batch_size 0.0 +417 69 model.embedding_dim 2.0 +417 69 loss.margin 25.59790088187105 +417 69 loss.adversarial_temperature 0.9372351726966878 +417 69 negative_sampler.num_negs_per_pos 26.0 +417 69 training.batch_size 1.0 +417 70 model.embedding_dim 2.0 +417 70 loss.margin 22.419537709298222 +417 70 loss.adversarial_temperature 0.6689665468260126 +417 70 negative_sampler.num_negs_per_pos 42.0 +417 70 training.batch_size 2.0 +417 71 model.embedding_dim 2.0 +417 71 loss.margin 11.13767255609044 +417 71 loss.adversarial_temperature 0.45339639443091917 +417 71 negative_sampler.num_negs_per_pos 1.0 +417 71 training.batch_size 0.0 +417 72 model.embedding_dim 1.0 +417 72 loss.margin 12.016509576495647 +417 72 loss.adversarial_temperature 0.1212778481539099 +417 72 negative_sampler.num_negs_per_pos 90.0 +417 72 training.batch_size 1.0 +417 73 model.embedding_dim 2.0 +417 73 loss.margin 16.495317326304612 +417 73 loss.adversarial_temperature 0.4625586832800769 +417 73 negative_sampler.num_negs_per_pos 49.0 +417 73 training.batch_size 1.0 +417 74 model.embedding_dim 1.0 +417 74 loss.margin 2.889565351612676 +417 74 loss.adversarial_temperature 0.39654016422203364 +417 74 negative_sampler.num_negs_per_pos 35.0 +417 74 training.batch_size 2.0 +417 75 model.embedding_dim 0.0 +417 75 loss.margin 27.03038739471928 +417 75 loss.adversarial_temperature 0.5725028738054634 +417 75 negative_sampler.num_negs_per_pos 42.0 +417 75 training.batch_size 0.0 +417 76 model.embedding_dim 0.0 +417 76 loss.margin 13.703296170047565 +417 76 loss.adversarial_temperature 0.6289461597287057 +417 76 negative_sampler.num_negs_per_pos 7.0 +417 76 training.batch_size 1.0 +417 77 model.embedding_dim 1.0 +417 77 loss.margin 20.753175027464845 +417 77 loss.adversarial_temperature 0.9481114553250253 +417 77 negative_sampler.num_negs_per_pos 85.0 +417 77 training.batch_size 0.0 +417 78 model.embedding_dim 2.0 +417 78 loss.margin 18.694676799298815 +417 78 loss.adversarial_temperature 0.14120207015914454 +417 78 negative_sampler.num_negs_per_pos 90.0 +417 78 training.batch_size 0.0 +417 1 dataset """kinships""" +417 1 model """ntn""" +417 1 loss """nssa""" +417 1 regularizer """no""" +417 1 optimizer """adadelta""" +417 1 training_loop """owa""" +417 1 negative_sampler """basic""" +417 1 evaluator """rankbased""" +417 2 dataset """kinships""" +417 2 model """ntn""" +417 2 loss """nssa""" +417 2 regularizer """no""" +417 2 optimizer """adadelta""" +417 2 training_loop """owa""" +417 2 negative_sampler """basic""" +417 2 evaluator """rankbased""" +417 3 dataset """kinships""" +417 3 model """ntn""" +417 3 loss """nssa""" +417 3 regularizer """no""" +417 3 optimizer """adadelta""" +417 3 training_loop """owa""" +417 3 negative_sampler """basic""" +417 3 evaluator """rankbased""" +417 4 dataset """kinships""" +417 4 model """ntn""" +417 4 loss """nssa""" +417 4 regularizer """no""" +417 4 optimizer """adadelta""" +417 4 training_loop """owa""" +417 4 negative_sampler """basic""" +417 4 evaluator """rankbased""" +417 5 dataset """kinships""" +417 5 model """ntn""" +417 5 loss """nssa""" +417 5 regularizer """no""" +417 5 optimizer """adadelta""" +417 5 training_loop """owa""" +417 5 negative_sampler """basic""" +417 5 evaluator """rankbased""" +417 6 dataset """kinships""" +417 6 model """ntn""" +417 6 loss """nssa""" +417 6 regularizer """no""" +417 6 optimizer """adadelta""" +417 6 training_loop """owa""" +417 6 negative_sampler """basic""" +417 6 evaluator """rankbased""" +417 7 dataset """kinships""" +417 7 model """ntn""" +417 7 loss """nssa""" +417 7 regularizer """no""" +417 7 optimizer """adadelta""" +417 7 training_loop """owa""" +417 7 negative_sampler """basic""" +417 7 evaluator """rankbased""" +417 8 dataset """kinships""" +417 8 model """ntn""" +417 8 loss """nssa""" +417 8 regularizer """no""" +417 8 optimizer """adadelta""" +417 8 training_loop """owa""" +417 8 negative_sampler """basic""" +417 8 evaluator """rankbased""" +417 9 dataset """kinships""" +417 9 model """ntn""" +417 9 loss """nssa""" +417 9 regularizer """no""" +417 9 optimizer """adadelta""" +417 9 training_loop """owa""" +417 9 negative_sampler """basic""" +417 9 evaluator """rankbased""" +417 10 dataset """kinships""" +417 10 model """ntn""" +417 10 loss """nssa""" +417 10 regularizer """no""" +417 10 optimizer """adadelta""" +417 10 training_loop """owa""" +417 10 negative_sampler """basic""" +417 10 evaluator """rankbased""" +417 11 dataset """kinships""" +417 11 model """ntn""" +417 11 loss """nssa""" +417 11 regularizer """no""" +417 11 optimizer """adadelta""" +417 11 training_loop """owa""" +417 11 negative_sampler """basic""" +417 11 evaluator """rankbased""" +417 12 dataset """kinships""" +417 12 model """ntn""" +417 12 loss """nssa""" +417 12 regularizer """no""" +417 12 optimizer """adadelta""" +417 12 training_loop """owa""" +417 12 negative_sampler """basic""" +417 12 evaluator """rankbased""" +417 13 dataset """kinships""" +417 13 model """ntn""" +417 13 loss """nssa""" +417 13 regularizer """no""" +417 13 optimizer """adadelta""" +417 13 training_loop """owa""" +417 13 negative_sampler """basic""" +417 13 evaluator """rankbased""" +417 14 dataset """kinships""" +417 14 model """ntn""" +417 14 loss """nssa""" +417 14 regularizer """no""" +417 14 optimizer """adadelta""" +417 14 training_loop """owa""" +417 14 negative_sampler """basic""" +417 14 evaluator """rankbased""" +417 15 dataset """kinships""" +417 15 model """ntn""" +417 15 loss """nssa""" +417 15 regularizer """no""" +417 15 optimizer """adadelta""" +417 15 training_loop """owa""" +417 15 negative_sampler """basic""" +417 15 evaluator """rankbased""" +417 16 dataset """kinships""" +417 16 model """ntn""" +417 16 loss """nssa""" +417 16 regularizer """no""" +417 16 optimizer """adadelta""" +417 16 training_loop """owa""" +417 16 negative_sampler """basic""" +417 16 evaluator """rankbased""" +417 17 dataset """kinships""" +417 17 model """ntn""" +417 17 loss """nssa""" +417 17 regularizer """no""" +417 17 optimizer """adadelta""" +417 17 training_loop """owa""" +417 17 negative_sampler """basic""" +417 17 evaluator """rankbased""" +417 18 dataset """kinships""" +417 18 model """ntn""" +417 18 loss """nssa""" +417 18 regularizer """no""" +417 18 optimizer """adadelta""" +417 18 training_loop """owa""" +417 18 negative_sampler """basic""" +417 18 evaluator """rankbased""" +417 19 dataset """kinships""" +417 19 model """ntn""" +417 19 loss """nssa""" +417 19 regularizer """no""" +417 19 optimizer """adadelta""" +417 19 training_loop """owa""" +417 19 negative_sampler """basic""" +417 19 evaluator """rankbased""" +417 20 dataset """kinships""" +417 20 model """ntn""" +417 20 loss """nssa""" +417 20 regularizer """no""" +417 20 optimizer """adadelta""" +417 20 training_loop """owa""" +417 20 negative_sampler """basic""" +417 20 evaluator """rankbased""" +417 21 dataset """kinships""" +417 21 model """ntn""" +417 21 loss """nssa""" +417 21 regularizer """no""" +417 21 optimizer """adadelta""" +417 21 training_loop """owa""" +417 21 negative_sampler """basic""" +417 21 evaluator """rankbased""" +417 22 dataset """kinships""" +417 22 model """ntn""" +417 22 loss """nssa""" +417 22 regularizer """no""" +417 22 optimizer """adadelta""" +417 22 training_loop """owa""" +417 22 negative_sampler """basic""" +417 22 evaluator """rankbased""" +417 23 dataset """kinships""" +417 23 model """ntn""" +417 23 loss """nssa""" +417 23 regularizer """no""" +417 23 optimizer """adadelta""" +417 23 training_loop """owa""" +417 23 negative_sampler """basic""" +417 23 evaluator """rankbased""" +417 24 dataset """kinships""" +417 24 model """ntn""" +417 24 loss """nssa""" +417 24 regularizer """no""" +417 24 optimizer """adadelta""" +417 24 training_loop """owa""" +417 24 negative_sampler """basic""" +417 24 evaluator """rankbased""" +417 25 dataset """kinships""" +417 25 model """ntn""" +417 25 loss """nssa""" +417 25 regularizer """no""" +417 25 optimizer """adadelta""" +417 25 training_loop """owa""" +417 25 negative_sampler """basic""" +417 25 evaluator """rankbased""" +417 26 dataset """kinships""" +417 26 model """ntn""" +417 26 loss """nssa""" +417 26 regularizer """no""" +417 26 optimizer """adadelta""" +417 26 training_loop """owa""" +417 26 negative_sampler """basic""" +417 26 evaluator """rankbased""" +417 27 dataset """kinships""" +417 27 model """ntn""" +417 27 loss """nssa""" +417 27 regularizer """no""" +417 27 optimizer """adadelta""" +417 27 training_loop """owa""" +417 27 negative_sampler """basic""" +417 27 evaluator """rankbased""" +417 28 dataset """kinships""" +417 28 model """ntn""" +417 28 loss """nssa""" +417 28 regularizer """no""" +417 28 optimizer """adadelta""" +417 28 training_loop """owa""" +417 28 negative_sampler """basic""" +417 28 evaluator """rankbased""" +417 29 dataset """kinships""" +417 29 model """ntn""" +417 29 loss """nssa""" +417 29 regularizer """no""" +417 29 optimizer """adadelta""" +417 29 training_loop """owa""" +417 29 negative_sampler """basic""" +417 29 evaluator """rankbased""" +417 30 dataset """kinships""" +417 30 model """ntn""" +417 30 loss """nssa""" +417 30 regularizer """no""" +417 30 optimizer """adadelta""" +417 30 training_loop """owa""" +417 30 negative_sampler """basic""" +417 30 evaluator """rankbased""" +417 31 dataset """kinships""" +417 31 model """ntn""" +417 31 loss """nssa""" +417 31 regularizer """no""" +417 31 optimizer """adadelta""" +417 31 training_loop """owa""" +417 31 negative_sampler """basic""" +417 31 evaluator """rankbased""" +417 32 dataset """kinships""" +417 32 model """ntn""" +417 32 loss """nssa""" +417 32 regularizer """no""" +417 32 optimizer """adadelta""" +417 32 training_loop """owa""" +417 32 negative_sampler """basic""" +417 32 evaluator """rankbased""" +417 33 dataset """kinships""" +417 33 model """ntn""" +417 33 loss """nssa""" +417 33 regularizer """no""" +417 33 optimizer """adadelta""" +417 33 training_loop """owa""" +417 33 negative_sampler """basic""" +417 33 evaluator """rankbased""" +417 34 dataset """kinships""" +417 34 model """ntn""" +417 34 loss """nssa""" +417 34 regularizer """no""" +417 34 optimizer """adadelta""" +417 34 training_loop """owa""" +417 34 negative_sampler """basic""" +417 34 evaluator """rankbased""" +417 35 dataset """kinships""" +417 35 model """ntn""" +417 35 loss """nssa""" +417 35 regularizer """no""" +417 35 optimizer """adadelta""" +417 35 training_loop """owa""" +417 35 negative_sampler """basic""" +417 35 evaluator """rankbased""" +417 36 dataset """kinships""" +417 36 model """ntn""" +417 36 loss """nssa""" +417 36 regularizer """no""" +417 36 optimizer """adadelta""" +417 36 training_loop """owa""" +417 36 negative_sampler """basic""" +417 36 evaluator """rankbased""" +417 37 dataset """kinships""" +417 37 model """ntn""" +417 37 loss """nssa""" +417 37 regularizer """no""" +417 37 optimizer """adadelta""" +417 37 training_loop """owa""" +417 37 negative_sampler """basic""" +417 37 evaluator """rankbased""" +417 38 dataset """kinships""" +417 38 model """ntn""" +417 38 loss """nssa""" +417 38 regularizer """no""" +417 38 optimizer """adadelta""" +417 38 training_loop """owa""" +417 38 negative_sampler """basic""" +417 38 evaluator """rankbased""" +417 39 dataset """kinships""" +417 39 model """ntn""" +417 39 loss """nssa""" +417 39 regularizer """no""" +417 39 optimizer """adadelta""" +417 39 training_loop """owa""" +417 39 negative_sampler """basic""" +417 39 evaluator """rankbased""" +417 40 dataset """kinships""" +417 40 model """ntn""" +417 40 loss """nssa""" +417 40 regularizer """no""" +417 40 optimizer """adadelta""" +417 40 training_loop """owa""" +417 40 negative_sampler """basic""" +417 40 evaluator """rankbased""" +417 41 dataset """kinships""" +417 41 model """ntn""" +417 41 loss """nssa""" +417 41 regularizer """no""" +417 41 optimizer """adadelta""" +417 41 training_loop """owa""" +417 41 negative_sampler """basic""" +417 41 evaluator """rankbased""" +417 42 dataset """kinships""" +417 42 model """ntn""" +417 42 loss """nssa""" +417 42 regularizer """no""" +417 42 optimizer """adadelta""" +417 42 training_loop """owa""" +417 42 negative_sampler """basic""" +417 42 evaluator """rankbased""" +417 43 dataset """kinships""" +417 43 model """ntn""" +417 43 loss """nssa""" +417 43 regularizer """no""" +417 43 optimizer """adadelta""" +417 43 training_loop """owa""" +417 43 negative_sampler """basic""" +417 43 evaluator """rankbased""" +417 44 dataset """kinships""" +417 44 model """ntn""" +417 44 loss """nssa""" +417 44 regularizer """no""" +417 44 optimizer """adadelta""" +417 44 training_loop """owa""" +417 44 negative_sampler """basic""" +417 44 evaluator """rankbased""" +417 45 dataset """kinships""" +417 45 model """ntn""" +417 45 loss """nssa""" +417 45 regularizer """no""" +417 45 optimizer """adadelta""" +417 45 training_loop """owa""" +417 45 negative_sampler """basic""" +417 45 evaluator """rankbased""" +417 46 dataset """kinships""" +417 46 model """ntn""" +417 46 loss """nssa""" +417 46 regularizer """no""" +417 46 optimizer """adadelta""" +417 46 training_loop """owa""" +417 46 negative_sampler """basic""" +417 46 evaluator """rankbased""" +417 47 dataset """kinships""" +417 47 model """ntn""" +417 47 loss """nssa""" +417 47 regularizer """no""" +417 47 optimizer """adadelta""" +417 47 training_loop """owa""" +417 47 negative_sampler """basic""" +417 47 evaluator """rankbased""" +417 48 dataset """kinships""" +417 48 model """ntn""" +417 48 loss """nssa""" +417 48 regularizer """no""" +417 48 optimizer """adadelta""" +417 48 training_loop """owa""" +417 48 negative_sampler """basic""" +417 48 evaluator """rankbased""" +417 49 dataset """kinships""" +417 49 model """ntn""" +417 49 loss """nssa""" +417 49 regularizer """no""" +417 49 optimizer """adadelta""" +417 49 training_loop """owa""" +417 49 negative_sampler """basic""" +417 49 evaluator """rankbased""" +417 50 dataset """kinships""" +417 50 model """ntn""" +417 50 loss """nssa""" +417 50 regularizer """no""" +417 50 optimizer """adadelta""" +417 50 training_loop """owa""" +417 50 negative_sampler """basic""" +417 50 evaluator """rankbased""" +417 51 dataset """kinships""" +417 51 model """ntn""" +417 51 loss """nssa""" +417 51 regularizer """no""" +417 51 optimizer """adadelta""" +417 51 training_loop """owa""" +417 51 negative_sampler """basic""" +417 51 evaluator """rankbased""" +417 52 dataset """kinships""" +417 52 model """ntn""" +417 52 loss """nssa""" +417 52 regularizer """no""" +417 52 optimizer """adadelta""" +417 52 training_loop """owa""" +417 52 negative_sampler """basic""" +417 52 evaluator """rankbased""" +417 53 dataset """kinships""" +417 53 model """ntn""" +417 53 loss """nssa""" +417 53 regularizer """no""" +417 53 optimizer """adadelta""" +417 53 training_loop """owa""" +417 53 negative_sampler """basic""" +417 53 evaluator """rankbased""" +417 54 dataset """kinships""" +417 54 model """ntn""" +417 54 loss """nssa""" +417 54 regularizer """no""" +417 54 optimizer """adadelta""" +417 54 training_loop """owa""" +417 54 negative_sampler """basic""" +417 54 evaluator """rankbased""" +417 55 dataset """kinships""" +417 55 model """ntn""" +417 55 loss """nssa""" +417 55 regularizer """no""" +417 55 optimizer """adadelta""" +417 55 training_loop """owa""" +417 55 negative_sampler """basic""" +417 55 evaluator """rankbased""" +417 56 dataset """kinships""" +417 56 model """ntn""" +417 56 loss """nssa""" +417 56 regularizer """no""" +417 56 optimizer """adadelta""" +417 56 training_loop """owa""" +417 56 negative_sampler """basic""" +417 56 evaluator """rankbased""" +417 57 dataset """kinships""" +417 57 model """ntn""" +417 57 loss """nssa""" +417 57 regularizer """no""" +417 57 optimizer """adadelta""" +417 57 training_loop """owa""" +417 57 negative_sampler """basic""" +417 57 evaluator """rankbased""" +417 58 dataset """kinships""" +417 58 model """ntn""" +417 58 loss """nssa""" +417 58 regularizer """no""" +417 58 optimizer """adadelta""" +417 58 training_loop """owa""" +417 58 negative_sampler """basic""" +417 58 evaluator """rankbased""" +417 59 dataset """kinships""" +417 59 model """ntn""" +417 59 loss """nssa""" +417 59 regularizer """no""" +417 59 optimizer """adadelta""" +417 59 training_loop """owa""" +417 59 negative_sampler """basic""" +417 59 evaluator """rankbased""" +417 60 dataset """kinships""" +417 60 model """ntn""" +417 60 loss """nssa""" +417 60 regularizer """no""" +417 60 optimizer """adadelta""" +417 60 training_loop """owa""" +417 60 negative_sampler """basic""" +417 60 evaluator """rankbased""" +417 61 dataset """kinships""" +417 61 model """ntn""" +417 61 loss """nssa""" +417 61 regularizer """no""" +417 61 optimizer """adadelta""" +417 61 training_loop """owa""" +417 61 negative_sampler """basic""" +417 61 evaluator """rankbased""" +417 62 dataset """kinships""" +417 62 model """ntn""" +417 62 loss """nssa""" +417 62 regularizer """no""" +417 62 optimizer """adadelta""" +417 62 training_loop """owa""" +417 62 negative_sampler """basic""" +417 62 evaluator """rankbased""" +417 63 dataset """kinships""" +417 63 model """ntn""" +417 63 loss """nssa""" +417 63 regularizer """no""" +417 63 optimizer """adadelta""" +417 63 training_loop """owa""" +417 63 negative_sampler """basic""" +417 63 evaluator """rankbased""" +417 64 dataset """kinships""" +417 64 model """ntn""" +417 64 loss """nssa""" +417 64 regularizer """no""" +417 64 optimizer """adadelta""" +417 64 training_loop """owa""" +417 64 negative_sampler """basic""" +417 64 evaluator """rankbased""" +417 65 dataset """kinships""" +417 65 model """ntn""" +417 65 loss """nssa""" +417 65 regularizer """no""" +417 65 optimizer """adadelta""" +417 65 training_loop """owa""" +417 65 negative_sampler """basic""" +417 65 evaluator """rankbased""" +417 66 dataset """kinships""" +417 66 model """ntn""" +417 66 loss """nssa""" +417 66 regularizer """no""" +417 66 optimizer """adadelta""" +417 66 training_loop """owa""" +417 66 negative_sampler """basic""" +417 66 evaluator """rankbased""" +417 67 dataset """kinships""" +417 67 model """ntn""" +417 67 loss """nssa""" +417 67 regularizer """no""" +417 67 optimizer """adadelta""" +417 67 training_loop """owa""" +417 67 negative_sampler """basic""" +417 67 evaluator """rankbased""" +417 68 dataset """kinships""" +417 68 model """ntn""" +417 68 loss """nssa""" +417 68 regularizer """no""" +417 68 optimizer """adadelta""" +417 68 training_loop """owa""" +417 68 negative_sampler """basic""" +417 68 evaluator """rankbased""" +417 69 dataset """kinships""" +417 69 model """ntn""" +417 69 loss """nssa""" +417 69 regularizer """no""" +417 69 optimizer """adadelta""" +417 69 training_loop """owa""" +417 69 negative_sampler """basic""" +417 69 evaluator """rankbased""" +417 70 dataset """kinships""" +417 70 model """ntn""" +417 70 loss """nssa""" +417 70 regularizer """no""" +417 70 optimizer """adadelta""" +417 70 training_loop """owa""" +417 70 negative_sampler """basic""" +417 70 evaluator """rankbased""" +417 71 dataset """kinships""" +417 71 model """ntn""" +417 71 loss """nssa""" +417 71 regularizer """no""" +417 71 optimizer """adadelta""" +417 71 training_loop """owa""" +417 71 negative_sampler """basic""" +417 71 evaluator """rankbased""" +417 72 dataset """kinships""" +417 72 model """ntn""" +417 72 loss """nssa""" +417 72 regularizer """no""" +417 72 optimizer """adadelta""" +417 72 training_loop """owa""" +417 72 negative_sampler """basic""" +417 72 evaluator """rankbased""" +417 73 dataset """kinships""" +417 73 model """ntn""" +417 73 loss """nssa""" +417 73 regularizer """no""" +417 73 optimizer """adadelta""" +417 73 training_loop """owa""" +417 73 negative_sampler """basic""" +417 73 evaluator """rankbased""" +417 74 dataset """kinships""" +417 74 model """ntn""" +417 74 loss """nssa""" +417 74 regularizer """no""" +417 74 optimizer """adadelta""" +417 74 training_loop """owa""" +417 74 negative_sampler """basic""" +417 74 evaluator """rankbased""" +417 75 dataset """kinships""" +417 75 model """ntn""" +417 75 loss """nssa""" +417 75 regularizer """no""" +417 75 optimizer """adadelta""" +417 75 training_loop """owa""" +417 75 negative_sampler """basic""" +417 75 evaluator """rankbased""" +417 76 dataset """kinships""" +417 76 model """ntn""" +417 76 loss """nssa""" +417 76 regularizer """no""" +417 76 optimizer """adadelta""" +417 76 training_loop """owa""" +417 76 negative_sampler """basic""" +417 76 evaluator """rankbased""" +417 77 dataset """kinships""" +417 77 model """ntn""" +417 77 loss """nssa""" +417 77 regularizer """no""" +417 77 optimizer """adadelta""" +417 77 training_loop """owa""" +417 77 negative_sampler """basic""" +417 77 evaluator """rankbased""" +417 78 dataset """kinships""" +417 78 model """ntn""" +417 78 loss """nssa""" +417 78 regularizer """no""" +417 78 optimizer """adadelta""" +417 78 training_loop """owa""" +417 78 negative_sampler """basic""" +417 78 evaluator """rankbased""" +418 1 model.embedding_dim 1.0 +418 1 loss.margin 17.861328903359265 +418 1 loss.adversarial_temperature 0.6111590617714777 +418 1 negative_sampler.num_negs_per_pos 45.0 +418 1 training.batch_size 1.0 +418 2 model.embedding_dim 1.0 +418 2 loss.margin 25.890412413493895 +418 2 loss.adversarial_temperature 0.5548039270110405 +418 2 negative_sampler.num_negs_per_pos 57.0 +418 2 training.batch_size 1.0 +418 3 model.embedding_dim 2.0 +418 3 loss.margin 9.498216914837068 +418 3 loss.adversarial_temperature 0.6565254408586656 +418 3 negative_sampler.num_negs_per_pos 36.0 +418 3 training.batch_size 0.0 +418 4 model.embedding_dim 2.0 +418 4 loss.margin 15.51171506184079 +418 4 loss.adversarial_temperature 0.6612248509205282 +418 4 negative_sampler.num_negs_per_pos 28.0 +418 4 training.batch_size 0.0 +418 5 model.embedding_dim 2.0 +418 5 loss.margin 7.224573429693004 +418 5 loss.adversarial_temperature 0.6864809424557592 +418 5 negative_sampler.num_negs_per_pos 55.0 +418 5 training.batch_size 0.0 +418 6 model.embedding_dim 2.0 +418 6 loss.margin 20.673728589078358 +418 6 loss.adversarial_temperature 0.42610702128291544 +418 6 negative_sampler.num_negs_per_pos 53.0 +418 6 training.batch_size 2.0 +418 7 model.embedding_dim 2.0 +418 7 loss.margin 27.06645128309744 +418 7 loss.adversarial_temperature 0.23559887388236211 +418 7 negative_sampler.num_negs_per_pos 60.0 +418 7 training.batch_size 2.0 +418 8 model.embedding_dim 1.0 +418 8 loss.margin 1.3352491607959256 +418 8 loss.adversarial_temperature 0.10580609327046192 +418 8 negative_sampler.num_negs_per_pos 21.0 +418 8 training.batch_size 1.0 +418 9 model.embedding_dim 2.0 +418 9 loss.margin 2.3036841238619425 +418 9 loss.adversarial_temperature 0.6009577601279482 +418 9 negative_sampler.num_negs_per_pos 58.0 +418 9 training.batch_size 0.0 +418 10 model.embedding_dim 2.0 +418 10 loss.margin 18.780854294704575 +418 10 loss.adversarial_temperature 0.78587898039511 +418 10 negative_sampler.num_negs_per_pos 40.0 +418 10 training.batch_size 2.0 +418 11 model.embedding_dim 2.0 +418 11 loss.margin 6.292730233290635 +418 11 loss.adversarial_temperature 0.5256500822182506 +418 11 negative_sampler.num_negs_per_pos 68.0 +418 11 training.batch_size 2.0 +418 12 model.embedding_dim 2.0 +418 12 loss.margin 16.528234291555773 +418 12 loss.adversarial_temperature 0.6926988602923758 +418 12 negative_sampler.num_negs_per_pos 84.0 +418 12 training.batch_size 2.0 +418 13 model.embedding_dim 0.0 +418 13 loss.margin 18.15651788168249 +418 13 loss.adversarial_temperature 0.6680370847200671 +418 13 negative_sampler.num_negs_per_pos 7.0 +418 13 training.batch_size 2.0 +418 14 model.embedding_dim 0.0 +418 14 loss.margin 28.091248146622547 +418 14 loss.adversarial_temperature 0.48369483626739873 +418 14 negative_sampler.num_negs_per_pos 67.0 +418 14 training.batch_size 0.0 +418 15 model.embedding_dim 1.0 +418 15 loss.margin 4.816899344029034 +418 15 loss.adversarial_temperature 0.13063848139481968 +418 15 negative_sampler.num_negs_per_pos 80.0 +418 15 training.batch_size 0.0 +418 16 model.embedding_dim 2.0 +418 16 loss.margin 8.460550448646508 +418 16 loss.adversarial_temperature 0.9415858424255138 +418 16 negative_sampler.num_negs_per_pos 89.0 +418 16 training.batch_size 0.0 +418 17 model.embedding_dim 1.0 +418 17 loss.margin 7.86042316822343 +418 17 loss.adversarial_temperature 0.8381082994077714 +418 17 negative_sampler.num_negs_per_pos 12.0 +418 17 training.batch_size 2.0 +418 18 model.embedding_dim 2.0 +418 18 loss.margin 1.4325850059381864 +418 18 loss.adversarial_temperature 0.7949816428362833 +418 18 negative_sampler.num_negs_per_pos 14.0 +418 18 training.batch_size 1.0 +418 19 model.embedding_dim 2.0 +418 19 loss.margin 4.47216347308133 +418 19 loss.adversarial_temperature 0.4532536595727771 +418 19 negative_sampler.num_negs_per_pos 43.0 +418 19 training.batch_size 1.0 +418 20 model.embedding_dim 0.0 +418 20 loss.margin 22.267751605374137 +418 20 loss.adversarial_temperature 0.21498556074170255 +418 20 negative_sampler.num_negs_per_pos 77.0 +418 20 training.batch_size 0.0 +418 21 model.embedding_dim 1.0 +418 21 loss.margin 6.250908779333174 +418 21 loss.adversarial_temperature 0.34209139771815616 +418 21 negative_sampler.num_negs_per_pos 67.0 +418 21 training.batch_size 2.0 +418 22 model.embedding_dim 0.0 +418 22 loss.margin 26.59473286855881 +418 22 loss.adversarial_temperature 0.9796035901284774 +418 22 negative_sampler.num_negs_per_pos 4.0 +418 22 training.batch_size 1.0 +418 23 model.embedding_dim 0.0 +418 23 loss.margin 20.25094357302292 +418 23 loss.adversarial_temperature 0.8086137480635819 +418 23 negative_sampler.num_negs_per_pos 76.0 +418 23 training.batch_size 0.0 +418 24 model.embedding_dim 1.0 +418 24 loss.margin 6.398336775220782 +418 24 loss.adversarial_temperature 0.9078190525080647 +418 24 negative_sampler.num_negs_per_pos 13.0 +418 24 training.batch_size 1.0 +418 25 model.embedding_dim 2.0 +418 25 loss.margin 27.106276418506248 +418 25 loss.adversarial_temperature 0.9528828193867802 +418 25 negative_sampler.num_negs_per_pos 86.0 +418 25 training.batch_size 1.0 +418 26 model.embedding_dim 2.0 +418 26 loss.margin 29.596443280050615 +418 26 loss.adversarial_temperature 0.49316726211135575 +418 26 negative_sampler.num_negs_per_pos 46.0 +418 26 training.batch_size 0.0 +418 27 model.embedding_dim 1.0 +418 27 loss.margin 2.0205183003481086 +418 27 loss.adversarial_temperature 0.38348203388148516 +418 27 negative_sampler.num_negs_per_pos 51.0 +418 27 training.batch_size 2.0 +418 28 model.embedding_dim 0.0 +418 28 loss.margin 6.709693896415477 +418 28 loss.adversarial_temperature 0.5622602024763916 +418 28 negative_sampler.num_negs_per_pos 42.0 +418 28 training.batch_size 1.0 +418 29 model.embedding_dim 1.0 +418 29 loss.margin 11.57158683629842 +418 29 loss.adversarial_temperature 0.4177669143235112 +418 29 negative_sampler.num_negs_per_pos 38.0 +418 29 training.batch_size 0.0 +418 30 model.embedding_dim 0.0 +418 30 loss.margin 23.368106565597515 +418 30 loss.adversarial_temperature 0.728366138361022 +418 30 negative_sampler.num_negs_per_pos 16.0 +418 30 training.batch_size 0.0 +418 31 model.embedding_dim 2.0 +418 31 loss.margin 22.56246606699745 +418 31 loss.adversarial_temperature 0.14341571613510848 +418 31 negative_sampler.num_negs_per_pos 25.0 +418 31 training.batch_size 1.0 +418 32 model.embedding_dim 2.0 +418 32 loss.margin 4.281193829643324 +418 32 loss.adversarial_temperature 0.8288948861388737 +418 32 negative_sampler.num_negs_per_pos 29.0 +418 32 training.batch_size 0.0 +418 33 model.embedding_dim 2.0 +418 33 loss.margin 16.73049683663271 +418 33 loss.adversarial_temperature 0.7604060206098002 +418 33 negative_sampler.num_negs_per_pos 67.0 +418 33 training.batch_size 0.0 +418 34 model.embedding_dim 0.0 +418 34 loss.margin 26.63089279357852 +418 34 loss.adversarial_temperature 0.16624862077285057 +418 34 negative_sampler.num_negs_per_pos 46.0 +418 34 training.batch_size 1.0 +418 35 model.embedding_dim 1.0 +418 35 loss.margin 20.604608422413953 +418 35 loss.adversarial_temperature 0.42463944970082446 +418 35 negative_sampler.num_negs_per_pos 53.0 +418 35 training.batch_size 2.0 +418 36 model.embedding_dim 1.0 +418 36 loss.margin 28.638247094144685 +418 36 loss.adversarial_temperature 0.25542572668957364 +418 36 negative_sampler.num_negs_per_pos 6.0 +418 36 training.batch_size 2.0 +418 37 model.embedding_dim 2.0 +418 37 loss.margin 16.964333563452744 +418 37 loss.adversarial_temperature 0.8659000054054761 +418 37 negative_sampler.num_negs_per_pos 86.0 +418 37 training.batch_size 2.0 +418 38 model.embedding_dim 1.0 +418 38 loss.margin 27.22292212181327 +418 38 loss.adversarial_temperature 0.10748552329556277 +418 38 negative_sampler.num_negs_per_pos 74.0 +418 38 training.batch_size 0.0 +418 39 model.embedding_dim 1.0 +418 39 loss.margin 17.92808431008049 +418 39 loss.adversarial_temperature 0.1342485080140214 +418 39 negative_sampler.num_negs_per_pos 96.0 +418 39 training.batch_size 2.0 +418 40 model.embedding_dim 1.0 +418 40 loss.margin 28.471378340117784 +418 40 loss.adversarial_temperature 0.6266186281657998 +418 40 negative_sampler.num_negs_per_pos 82.0 +418 40 training.batch_size 0.0 +418 41 model.embedding_dim 2.0 +418 41 loss.margin 17.36074594331169 +418 41 loss.adversarial_temperature 0.157875910590364 +418 41 negative_sampler.num_negs_per_pos 76.0 +418 41 training.batch_size 0.0 +418 42 model.embedding_dim 0.0 +418 42 loss.margin 28.242776660738997 +418 42 loss.adversarial_temperature 0.6658213970784012 +418 42 negative_sampler.num_negs_per_pos 39.0 +418 42 training.batch_size 0.0 +418 43 model.embedding_dim 2.0 +418 43 loss.margin 3.308314654447198 +418 43 loss.adversarial_temperature 0.8974656651250542 +418 43 negative_sampler.num_negs_per_pos 28.0 +418 43 training.batch_size 0.0 +418 44 model.embedding_dim 1.0 +418 44 loss.margin 27.49639869256637 +418 44 loss.adversarial_temperature 0.551403556531142 +418 44 negative_sampler.num_negs_per_pos 76.0 +418 44 training.batch_size 0.0 +418 45 model.embedding_dim 1.0 +418 45 loss.margin 17.185732706643496 +418 45 loss.adversarial_temperature 0.8050480780970247 +418 45 negative_sampler.num_negs_per_pos 39.0 +418 45 training.batch_size 1.0 +418 46 model.embedding_dim 2.0 +418 46 loss.margin 9.959729700667065 +418 46 loss.adversarial_temperature 0.5954433003341841 +418 46 negative_sampler.num_negs_per_pos 24.0 +418 46 training.batch_size 2.0 +418 47 model.embedding_dim 0.0 +418 47 loss.margin 26.44814745939828 +418 47 loss.adversarial_temperature 0.5803002562683717 +418 47 negative_sampler.num_negs_per_pos 20.0 +418 47 training.batch_size 2.0 +418 48 model.embedding_dim 0.0 +418 48 loss.margin 22.510008295538295 +418 48 loss.adversarial_temperature 0.16527085626887084 +418 48 negative_sampler.num_negs_per_pos 91.0 +418 48 training.batch_size 2.0 +418 49 model.embedding_dim 2.0 +418 49 loss.margin 12.444381907947669 +418 49 loss.adversarial_temperature 0.7838397811656407 +418 49 negative_sampler.num_negs_per_pos 31.0 +418 49 training.batch_size 2.0 +418 50 model.embedding_dim 1.0 +418 50 loss.margin 1.2269332912930495 +418 50 loss.adversarial_temperature 0.81593494268348 +418 50 negative_sampler.num_negs_per_pos 33.0 +418 50 training.batch_size 1.0 +418 51 model.embedding_dim 2.0 +418 51 loss.margin 25.290166753920545 +418 51 loss.adversarial_temperature 0.35383634218036697 +418 51 negative_sampler.num_negs_per_pos 68.0 +418 51 training.batch_size 1.0 +418 52 model.embedding_dim 2.0 +418 52 loss.margin 23.92664188110924 +418 52 loss.adversarial_temperature 0.7522335766191449 +418 52 negative_sampler.num_negs_per_pos 63.0 +418 52 training.batch_size 1.0 +418 53 model.embedding_dim 0.0 +418 53 loss.margin 24.232983217392217 +418 53 loss.adversarial_temperature 0.4935133952738744 +418 53 negative_sampler.num_negs_per_pos 35.0 +418 53 training.batch_size 1.0 +418 54 model.embedding_dim 0.0 +418 54 loss.margin 21.810612967371117 +418 54 loss.adversarial_temperature 0.5161235234383175 +418 54 negative_sampler.num_negs_per_pos 67.0 +418 54 training.batch_size 0.0 +418 55 model.embedding_dim 0.0 +418 55 loss.margin 12.992051680145797 +418 55 loss.adversarial_temperature 0.9299451587022096 +418 55 negative_sampler.num_negs_per_pos 28.0 +418 55 training.batch_size 1.0 +418 56 model.embedding_dim 0.0 +418 56 loss.margin 5.498727743885665 +418 56 loss.adversarial_temperature 0.18370710106072122 +418 56 negative_sampler.num_negs_per_pos 38.0 +418 56 training.batch_size 1.0 +418 57 model.embedding_dim 1.0 +418 57 loss.margin 17.680220004906676 +418 57 loss.adversarial_temperature 0.4865888363553438 +418 57 negative_sampler.num_negs_per_pos 27.0 +418 57 training.batch_size 0.0 +418 58 model.embedding_dim 0.0 +418 58 loss.margin 8.894239605822062 +418 58 loss.adversarial_temperature 0.7203747330557048 +418 58 negative_sampler.num_negs_per_pos 79.0 +418 58 training.batch_size 0.0 +418 59 model.embedding_dim 0.0 +418 59 loss.margin 17.435494199078974 +418 59 loss.adversarial_temperature 0.9200209558144308 +418 59 negative_sampler.num_negs_per_pos 57.0 +418 59 training.batch_size 0.0 +418 60 model.embedding_dim 2.0 +418 60 loss.margin 10.007310125993772 +418 60 loss.adversarial_temperature 0.5541993466439051 +418 60 negative_sampler.num_negs_per_pos 50.0 +418 60 training.batch_size 2.0 +418 61 model.embedding_dim 1.0 +418 61 loss.margin 11.818224150091192 +418 61 loss.adversarial_temperature 0.9108500821257521 +418 61 negative_sampler.num_negs_per_pos 98.0 +418 61 training.batch_size 2.0 +418 62 model.embedding_dim 0.0 +418 62 loss.margin 1.8562954255640842 +418 62 loss.adversarial_temperature 0.9556301964877572 +418 62 negative_sampler.num_negs_per_pos 34.0 +418 62 training.batch_size 2.0 +418 63 model.embedding_dim 2.0 +418 63 loss.margin 15.166752498579243 +418 63 loss.adversarial_temperature 0.8505055434850343 +418 63 negative_sampler.num_negs_per_pos 44.0 +418 63 training.batch_size 0.0 +418 64 model.embedding_dim 1.0 +418 64 loss.margin 23.37344366718023 +418 64 loss.adversarial_temperature 0.7750627410864332 +418 64 negative_sampler.num_negs_per_pos 50.0 +418 64 training.batch_size 1.0 +418 65 model.embedding_dim 0.0 +418 65 loss.margin 22.144588064150778 +418 65 loss.adversarial_temperature 0.6752944620564018 +418 65 negative_sampler.num_negs_per_pos 46.0 +418 65 training.batch_size 1.0 +418 66 model.embedding_dim 0.0 +418 66 loss.margin 26.161399242117227 +418 66 loss.adversarial_temperature 0.5474551716588688 +418 66 negative_sampler.num_negs_per_pos 3.0 +418 66 training.batch_size 2.0 +418 67 model.embedding_dim 2.0 +418 67 loss.margin 14.779244910996491 +418 67 loss.adversarial_temperature 0.4087485966769221 +418 67 negative_sampler.num_negs_per_pos 96.0 +418 67 training.batch_size 2.0 +418 68 model.embedding_dim 2.0 +418 68 loss.margin 20.66551464587636 +418 68 loss.adversarial_temperature 0.6066499309641101 +418 68 negative_sampler.num_negs_per_pos 19.0 +418 68 training.batch_size 1.0 +418 69 model.embedding_dim 0.0 +418 69 loss.margin 16.73380149230152 +418 69 loss.adversarial_temperature 0.830183119551521 +418 69 negative_sampler.num_negs_per_pos 42.0 +418 69 training.batch_size 2.0 +418 70 model.embedding_dim 2.0 +418 70 loss.margin 28.07207576449131 +418 70 loss.adversarial_temperature 0.6055798527628408 +418 70 negative_sampler.num_negs_per_pos 56.0 +418 70 training.batch_size 1.0 +418 71 model.embedding_dim 2.0 +418 71 loss.margin 29.52029371154386 +418 71 loss.adversarial_temperature 0.4040610222321618 +418 71 negative_sampler.num_negs_per_pos 56.0 +418 71 training.batch_size 2.0 +418 72 model.embedding_dim 2.0 +418 72 loss.margin 21.203775122267032 +418 72 loss.adversarial_temperature 0.6567411866770265 +418 72 negative_sampler.num_negs_per_pos 26.0 +418 72 training.batch_size 0.0 +418 73 model.embedding_dim 1.0 +418 73 loss.margin 21.325821589046278 +418 73 loss.adversarial_temperature 0.8630291150933189 +418 73 negative_sampler.num_negs_per_pos 35.0 +418 73 training.batch_size 1.0 +418 74 model.embedding_dim 0.0 +418 74 loss.margin 12.656109170065077 +418 74 loss.adversarial_temperature 0.3565517081586055 +418 74 negative_sampler.num_negs_per_pos 88.0 +418 74 training.batch_size 2.0 +418 75 model.embedding_dim 0.0 +418 75 loss.margin 3.224398130290246 +418 75 loss.adversarial_temperature 0.5019106487816998 +418 75 negative_sampler.num_negs_per_pos 92.0 +418 75 training.batch_size 2.0 +418 76 model.embedding_dim 1.0 +418 76 loss.margin 8.794865791978689 +418 76 loss.adversarial_temperature 0.715338159897638 +418 76 negative_sampler.num_negs_per_pos 70.0 +418 76 training.batch_size 1.0 +418 77 model.embedding_dim 1.0 +418 77 loss.margin 15.84615048209569 +418 77 loss.adversarial_temperature 0.716173621214088 +418 77 negative_sampler.num_negs_per_pos 46.0 +418 77 training.batch_size 0.0 +418 78 model.embedding_dim 0.0 +418 78 loss.margin 20.115966281699095 +418 78 loss.adversarial_temperature 0.1423368368309858 +418 78 negative_sampler.num_negs_per_pos 29.0 +418 78 training.batch_size 0.0 +418 79 model.embedding_dim 1.0 +418 79 loss.margin 16.10649589532838 +418 79 loss.adversarial_temperature 0.9735403961621371 +418 79 negative_sampler.num_negs_per_pos 28.0 +418 79 training.batch_size 0.0 +418 80 model.embedding_dim 1.0 +418 80 loss.margin 5.999392868357508 +418 80 loss.adversarial_temperature 0.4157630005745597 +418 80 negative_sampler.num_negs_per_pos 45.0 +418 80 training.batch_size 1.0 +418 81 model.embedding_dim 2.0 +418 81 loss.margin 23.489229481141535 +418 81 loss.adversarial_temperature 0.3576853252058897 +418 81 negative_sampler.num_negs_per_pos 3.0 +418 81 training.batch_size 1.0 +418 82 model.embedding_dim 0.0 +418 82 loss.margin 23.3020626751199 +418 82 loss.adversarial_temperature 0.9982835054137441 +418 82 negative_sampler.num_negs_per_pos 36.0 +418 82 training.batch_size 0.0 +418 83 model.embedding_dim 0.0 +418 83 loss.margin 28.660922353983015 +418 83 loss.adversarial_temperature 0.12993645989849406 +418 83 negative_sampler.num_negs_per_pos 39.0 +418 83 training.batch_size 1.0 +418 84 model.embedding_dim 0.0 +418 84 loss.margin 13.194795291071902 +418 84 loss.adversarial_temperature 0.34384469785053223 +418 84 negative_sampler.num_negs_per_pos 87.0 +418 84 training.batch_size 1.0 +418 85 model.embedding_dim 1.0 +418 85 loss.margin 6.868685937904425 +418 85 loss.adversarial_temperature 0.844929050369007 +418 85 negative_sampler.num_negs_per_pos 67.0 +418 85 training.batch_size 1.0 +418 86 model.embedding_dim 2.0 +418 86 loss.margin 7.450353864753403 +418 86 loss.adversarial_temperature 0.25665246698702554 +418 86 negative_sampler.num_negs_per_pos 4.0 +418 86 training.batch_size 1.0 +418 87 model.embedding_dim 1.0 +418 87 loss.margin 29.648222996118893 +418 87 loss.adversarial_temperature 0.377735761317757 +418 87 negative_sampler.num_negs_per_pos 67.0 +418 87 training.batch_size 1.0 +418 88 model.embedding_dim 0.0 +418 88 loss.margin 8.8629729314353 +418 88 loss.adversarial_temperature 0.4812703155335326 +418 88 negative_sampler.num_negs_per_pos 97.0 +418 88 training.batch_size 1.0 +418 89 model.embedding_dim 1.0 +418 89 loss.margin 2.763509808508444 +418 89 loss.adversarial_temperature 0.45329098724969663 +418 89 negative_sampler.num_negs_per_pos 47.0 +418 89 training.batch_size 0.0 +418 90 model.embedding_dim 2.0 +418 90 loss.margin 4.256339824608533 +418 90 loss.adversarial_temperature 0.6734216449997369 +418 90 negative_sampler.num_negs_per_pos 89.0 +418 90 training.batch_size 1.0 +418 91 model.embedding_dim 0.0 +418 91 loss.margin 16.088706688904587 +418 91 loss.adversarial_temperature 0.21257314811777753 +418 91 negative_sampler.num_negs_per_pos 36.0 +418 91 training.batch_size 0.0 +418 92 model.embedding_dim 2.0 +418 92 loss.margin 11.56878205304356 +418 92 loss.adversarial_temperature 0.9002017921698483 +418 92 negative_sampler.num_negs_per_pos 93.0 +418 92 training.batch_size 0.0 +418 93 model.embedding_dim 0.0 +418 93 loss.margin 26.97574672111348 +418 93 loss.adversarial_temperature 0.15523164098221592 +418 93 negative_sampler.num_negs_per_pos 65.0 +418 93 training.batch_size 2.0 +418 94 model.embedding_dim 1.0 +418 94 loss.margin 9.387307025406567 +418 94 loss.adversarial_temperature 0.7862470932302891 +418 94 negative_sampler.num_negs_per_pos 87.0 +418 94 training.batch_size 0.0 +418 95 model.embedding_dim 1.0 +418 95 loss.margin 26.67089729328608 +418 95 loss.adversarial_temperature 0.7951309633642579 +418 95 negative_sampler.num_negs_per_pos 75.0 +418 95 training.batch_size 1.0 +418 96 model.embedding_dim 1.0 +418 96 loss.margin 3.056544310927338 +418 96 loss.adversarial_temperature 0.5221933704648805 +418 96 negative_sampler.num_negs_per_pos 37.0 +418 96 training.batch_size 1.0 +418 97 model.embedding_dim 0.0 +418 97 loss.margin 27.03215165082735 +418 97 loss.adversarial_temperature 0.6167376611547839 +418 97 negative_sampler.num_negs_per_pos 97.0 +418 97 training.batch_size 1.0 +418 98 model.embedding_dim 1.0 +418 98 loss.margin 3.858732114202059 +418 98 loss.adversarial_temperature 0.3920927591494152 +418 98 negative_sampler.num_negs_per_pos 61.0 +418 98 training.batch_size 2.0 +418 99 model.embedding_dim 2.0 +418 99 loss.margin 1.7422895542138899 +418 99 loss.adversarial_temperature 0.6143331499051744 +418 99 negative_sampler.num_negs_per_pos 60.0 +418 99 training.batch_size 2.0 +418 100 model.embedding_dim 0.0 +418 100 loss.margin 2.1213788217702136 +418 100 loss.adversarial_temperature 0.5778305773439795 +418 100 negative_sampler.num_negs_per_pos 91.0 +418 100 training.batch_size 2.0 +418 1 dataset """kinships""" +418 1 model """ntn""" +418 1 loss """nssa""" +418 1 regularizer """no""" +418 1 optimizer """adadelta""" +418 1 training_loop """owa""" +418 1 negative_sampler """basic""" +418 1 evaluator """rankbased""" +418 2 dataset """kinships""" +418 2 model """ntn""" +418 2 loss """nssa""" +418 2 regularizer """no""" +418 2 optimizer """adadelta""" +418 2 training_loop """owa""" +418 2 negative_sampler """basic""" +418 2 evaluator """rankbased""" +418 3 dataset """kinships""" +418 3 model """ntn""" +418 3 loss """nssa""" +418 3 regularizer """no""" +418 3 optimizer """adadelta""" +418 3 training_loop """owa""" +418 3 negative_sampler """basic""" +418 3 evaluator """rankbased""" +418 4 dataset """kinships""" +418 4 model """ntn""" +418 4 loss """nssa""" +418 4 regularizer """no""" +418 4 optimizer """adadelta""" +418 4 training_loop """owa""" +418 4 negative_sampler """basic""" +418 4 evaluator """rankbased""" +418 5 dataset """kinships""" +418 5 model """ntn""" +418 5 loss """nssa""" +418 5 regularizer """no""" +418 5 optimizer """adadelta""" +418 5 training_loop """owa""" +418 5 negative_sampler """basic""" +418 5 evaluator """rankbased""" +418 6 dataset """kinships""" +418 6 model """ntn""" +418 6 loss """nssa""" +418 6 regularizer """no""" +418 6 optimizer """adadelta""" +418 6 training_loop """owa""" +418 6 negative_sampler """basic""" +418 6 evaluator """rankbased""" +418 7 dataset """kinships""" +418 7 model """ntn""" +418 7 loss """nssa""" +418 7 regularizer """no""" +418 7 optimizer """adadelta""" +418 7 training_loop """owa""" +418 7 negative_sampler """basic""" +418 7 evaluator """rankbased""" +418 8 dataset """kinships""" +418 8 model """ntn""" +418 8 loss """nssa""" +418 8 regularizer """no""" +418 8 optimizer """adadelta""" +418 8 training_loop """owa""" +418 8 negative_sampler """basic""" +418 8 evaluator """rankbased""" +418 9 dataset """kinships""" +418 9 model """ntn""" +418 9 loss """nssa""" +418 9 regularizer """no""" +418 9 optimizer """adadelta""" +418 9 training_loop """owa""" +418 9 negative_sampler """basic""" +418 9 evaluator """rankbased""" +418 10 dataset """kinships""" +418 10 model """ntn""" +418 10 loss """nssa""" +418 10 regularizer """no""" +418 10 optimizer """adadelta""" +418 10 training_loop """owa""" +418 10 negative_sampler """basic""" +418 10 evaluator """rankbased""" +418 11 dataset """kinships""" +418 11 model """ntn""" +418 11 loss """nssa""" +418 11 regularizer """no""" +418 11 optimizer """adadelta""" +418 11 training_loop """owa""" +418 11 negative_sampler """basic""" +418 11 evaluator """rankbased""" +418 12 dataset """kinships""" +418 12 model """ntn""" +418 12 loss """nssa""" +418 12 regularizer """no""" +418 12 optimizer """adadelta""" +418 12 training_loop """owa""" +418 12 negative_sampler """basic""" +418 12 evaluator """rankbased""" +418 13 dataset """kinships""" +418 13 model """ntn""" +418 13 loss """nssa""" +418 13 regularizer """no""" +418 13 optimizer """adadelta""" +418 13 training_loop """owa""" +418 13 negative_sampler """basic""" +418 13 evaluator """rankbased""" +418 14 dataset """kinships""" +418 14 model """ntn""" +418 14 loss """nssa""" +418 14 regularizer """no""" +418 14 optimizer """adadelta""" +418 14 training_loop """owa""" +418 14 negative_sampler """basic""" +418 14 evaluator """rankbased""" +418 15 dataset """kinships""" +418 15 model """ntn""" +418 15 loss """nssa""" +418 15 regularizer """no""" +418 15 optimizer """adadelta""" +418 15 training_loop """owa""" +418 15 negative_sampler """basic""" +418 15 evaluator """rankbased""" +418 16 dataset """kinships""" +418 16 model """ntn""" +418 16 loss """nssa""" +418 16 regularizer """no""" +418 16 optimizer """adadelta""" +418 16 training_loop """owa""" +418 16 negative_sampler """basic""" +418 16 evaluator """rankbased""" +418 17 dataset """kinships""" +418 17 model """ntn""" +418 17 loss """nssa""" +418 17 regularizer """no""" +418 17 optimizer """adadelta""" +418 17 training_loop """owa""" +418 17 negative_sampler """basic""" +418 17 evaluator """rankbased""" +418 18 dataset """kinships""" +418 18 model """ntn""" +418 18 loss """nssa""" +418 18 regularizer """no""" +418 18 optimizer """adadelta""" +418 18 training_loop """owa""" +418 18 negative_sampler """basic""" +418 18 evaluator """rankbased""" +418 19 dataset """kinships""" +418 19 model """ntn""" +418 19 loss """nssa""" +418 19 regularizer """no""" +418 19 optimizer """adadelta""" +418 19 training_loop """owa""" +418 19 negative_sampler """basic""" +418 19 evaluator """rankbased""" +418 20 dataset """kinships""" +418 20 model """ntn""" +418 20 loss """nssa""" +418 20 regularizer """no""" +418 20 optimizer """adadelta""" +418 20 training_loop """owa""" +418 20 negative_sampler """basic""" +418 20 evaluator """rankbased""" +418 21 dataset """kinships""" +418 21 model """ntn""" +418 21 loss """nssa""" +418 21 regularizer """no""" +418 21 optimizer """adadelta""" +418 21 training_loop """owa""" +418 21 negative_sampler """basic""" +418 21 evaluator """rankbased""" +418 22 dataset """kinships""" +418 22 model """ntn""" +418 22 loss """nssa""" +418 22 regularizer """no""" +418 22 optimizer """adadelta""" +418 22 training_loop """owa""" +418 22 negative_sampler """basic""" +418 22 evaluator """rankbased""" +418 23 dataset """kinships""" +418 23 model """ntn""" +418 23 loss """nssa""" +418 23 regularizer """no""" +418 23 optimizer """adadelta""" +418 23 training_loop """owa""" +418 23 negative_sampler """basic""" +418 23 evaluator """rankbased""" +418 24 dataset """kinships""" +418 24 model """ntn""" +418 24 loss """nssa""" +418 24 regularizer """no""" +418 24 optimizer """adadelta""" +418 24 training_loop """owa""" +418 24 negative_sampler """basic""" +418 24 evaluator """rankbased""" +418 25 dataset """kinships""" +418 25 model """ntn""" +418 25 loss """nssa""" +418 25 regularizer """no""" +418 25 optimizer """adadelta""" +418 25 training_loop """owa""" +418 25 negative_sampler """basic""" +418 25 evaluator """rankbased""" +418 26 dataset """kinships""" +418 26 model """ntn""" +418 26 loss """nssa""" +418 26 regularizer """no""" +418 26 optimizer """adadelta""" +418 26 training_loop """owa""" +418 26 negative_sampler """basic""" +418 26 evaluator """rankbased""" +418 27 dataset """kinships""" +418 27 model """ntn""" +418 27 loss """nssa""" +418 27 regularizer """no""" +418 27 optimizer """adadelta""" +418 27 training_loop """owa""" +418 27 negative_sampler """basic""" +418 27 evaluator """rankbased""" +418 28 dataset """kinships""" +418 28 model """ntn""" +418 28 loss """nssa""" +418 28 regularizer """no""" +418 28 optimizer """adadelta""" +418 28 training_loop """owa""" +418 28 negative_sampler """basic""" +418 28 evaluator """rankbased""" +418 29 dataset """kinships""" +418 29 model """ntn""" +418 29 loss """nssa""" +418 29 regularizer """no""" +418 29 optimizer """adadelta""" +418 29 training_loop """owa""" +418 29 negative_sampler """basic""" +418 29 evaluator """rankbased""" +418 30 dataset """kinships""" +418 30 model """ntn""" +418 30 loss """nssa""" +418 30 regularizer """no""" +418 30 optimizer """adadelta""" +418 30 training_loop """owa""" +418 30 negative_sampler """basic""" +418 30 evaluator """rankbased""" +418 31 dataset """kinships""" +418 31 model """ntn""" +418 31 loss """nssa""" +418 31 regularizer """no""" +418 31 optimizer """adadelta""" +418 31 training_loop """owa""" +418 31 negative_sampler """basic""" +418 31 evaluator """rankbased""" +418 32 dataset """kinships""" +418 32 model """ntn""" +418 32 loss """nssa""" +418 32 regularizer """no""" +418 32 optimizer """adadelta""" +418 32 training_loop """owa""" +418 32 negative_sampler """basic""" +418 32 evaluator """rankbased""" +418 33 dataset """kinships""" +418 33 model """ntn""" +418 33 loss """nssa""" +418 33 regularizer """no""" +418 33 optimizer """adadelta""" +418 33 training_loop """owa""" +418 33 negative_sampler """basic""" +418 33 evaluator """rankbased""" +418 34 dataset """kinships""" +418 34 model """ntn""" +418 34 loss """nssa""" +418 34 regularizer """no""" +418 34 optimizer """adadelta""" +418 34 training_loop """owa""" +418 34 negative_sampler """basic""" +418 34 evaluator """rankbased""" +418 35 dataset """kinships""" +418 35 model """ntn""" +418 35 loss """nssa""" +418 35 regularizer """no""" +418 35 optimizer """adadelta""" +418 35 training_loop """owa""" +418 35 negative_sampler """basic""" +418 35 evaluator """rankbased""" +418 36 dataset """kinships""" +418 36 model """ntn""" +418 36 loss """nssa""" +418 36 regularizer """no""" +418 36 optimizer """adadelta""" +418 36 training_loop """owa""" +418 36 negative_sampler """basic""" +418 36 evaluator """rankbased""" +418 37 dataset """kinships""" +418 37 model """ntn""" +418 37 loss """nssa""" +418 37 regularizer """no""" +418 37 optimizer """adadelta""" +418 37 training_loop """owa""" +418 37 negative_sampler """basic""" +418 37 evaluator """rankbased""" +418 38 dataset """kinships""" +418 38 model """ntn""" +418 38 loss """nssa""" +418 38 regularizer """no""" +418 38 optimizer """adadelta""" +418 38 training_loop """owa""" +418 38 negative_sampler """basic""" +418 38 evaluator """rankbased""" +418 39 dataset """kinships""" +418 39 model """ntn""" +418 39 loss """nssa""" +418 39 regularizer """no""" +418 39 optimizer """adadelta""" +418 39 training_loop """owa""" +418 39 negative_sampler """basic""" +418 39 evaluator """rankbased""" +418 40 dataset """kinships""" +418 40 model """ntn""" +418 40 loss """nssa""" +418 40 regularizer """no""" +418 40 optimizer """adadelta""" +418 40 training_loop """owa""" +418 40 negative_sampler """basic""" +418 40 evaluator """rankbased""" +418 41 dataset """kinships""" +418 41 model """ntn""" +418 41 loss """nssa""" +418 41 regularizer """no""" +418 41 optimizer """adadelta""" +418 41 training_loop """owa""" +418 41 negative_sampler """basic""" +418 41 evaluator """rankbased""" +418 42 dataset """kinships""" +418 42 model """ntn""" +418 42 loss """nssa""" +418 42 regularizer """no""" +418 42 optimizer """adadelta""" +418 42 training_loop """owa""" +418 42 negative_sampler """basic""" +418 42 evaluator """rankbased""" +418 43 dataset """kinships""" +418 43 model """ntn""" +418 43 loss """nssa""" +418 43 regularizer """no""" +418 43 optimizer """adadelta""" +418 43 training_loop """owa""" +418 43 negative_sampler """basic""" +418 43 evaluator """rankbased""" +418 44 dataset """kinships""" +418 44 model """ntn""" +418 44 loss """nssa""" +418 44 regularizer """no""" +418 44 optimizer """adadelta""" +418 44 training_loop """owa""" +418 44 negative_sampler """basic""" +418 44 evaluator """rankbased""" +418 45 dataset """kinships""" +418 45 model """ntn""" +418 45 loss """nssa""" +418 45 regularizer """no""" +418 45 optimizer """adadelta""" +418 45 training_loop """owa""" +418 45 negative_sampler """basic""" +418 45 evaluator """rankbased""" +418 46 dataset """kinships""" +418 46 model """ntn""" +418 46 loss """nssa""" +418 46 regularizer """no""" +418 46 optimizer """adadelta""" +418 46 training_loop """owa""" +418 46 negative_sampler """basic""" +418 46 evaluator """rankbased""" +418 47 dataset """kinships""" +418 47 model """ntn""" +418 47 loss """nssa""" +418 47 regularizer """no""" +418 47 optimizer """adadelta""" +418 47 training_loop """owa""" +418 47 negative_sampler """basic""" +418 47 evaluator """rankbased""" +418 48 dataset """kinships""" +418 48 model """ntn""" +418 48 loss """nssa""" +418 48 regularizer """no""" +418 48 optimizer """adadelta""" +418 48 training_loop """owa""" +418 48 negative_sampler """basic""" +418 48 evaluator """rankbased""" +418 49 dataset """kinships""" +418 49 model """ntn""" +418 49 loss """nssa""" +418 49 regularizer """no""" +418 49 optimizer """adadelta""" +418 49 training_loop """owa""" +418 49 negative_sampler """basic""" +418 49 evaluator """rankbased""" +418 50 dataset """kinships""" +418 50 model """ntn""" +418 50 loss """nssa""" +418 50 regularizer """no""" +418 50 optimizer """adadelta""" +418 50 training_loop """owa""" +418 50 negative_sampler """basic""" +418 50 evaluator """rankbased""" +418 51 dataset """kinships""" +418 51 model """ntn""" +418 51 loss """nssa""" +418 51 regularizer """no""" +418 51 optimizer """adadelta""" +418 51 training_loop """owa""" +418 51 negative_sampler """basic""" +418 51 evaluator """rankbased""" +418 52 dataset """kinships""" +418 52 model """ntn""" +418 52 loss """nssa""" +418 52 regularizer """no""" +418 52 optimizer """adadelta""" +418 52 training_loop """owa""" +418 52 negative_sampler """basic""" +418 52 evaluator """rankbased""" +418 53 dataset """kinships""" +418 53 model """ntn""" +418 53 loss """nssa""" +418 53 regularizer """no""" +418 53 optimizer """adadelta""" +418 53 training_loop """owa""" +418 53 negative_sampler """basic""" +418 53 evaluator """rankbased""" +418 54 dataset """kinships""" +418 54 model """ntn""" +418 54 loss """nssa""" +418 54 regularizer """no""" +418 54 optimizer """adadelta""" +418 54 training_loop """owa""" +418 54 negative_sampler """basic""" +418 54 evaluator """rankbased""" +418 55 dataset """kinships""" +418 55 model """ntn""" +418 55 loss """nssa""" +418 55 regularizer """no""" +418 55 optimizer """adadelta""" +418 55 training_loop """owa""" +418 55 negative_sampler """basic""" +418 55 evaluator """rankbased""" +418 56 dataset """kinships""" +418 56 model """ntn""" +418 56 loss """nssa""" +418 56 regularizer """no""" +418 56 optimizer """adadelta""" +418 56 training_loop """owa""" +418 56 negative_sampler """basic""" +418 56 evaluator """rankbased""" +418 57 dataset """kinships""" +418 57 model """ntn""" +418 57 loss """nssa""" +418 57 regularizer """no""" +418 57 optimizer """adadelta""" +418 57 training_loop """owa""" +418 57 negative_sampler """basic""" +418 57 evaluator """rankbased""" +418 58 dataset """kinships""" +418 58 model """ntn""" +418 58 loss """nssa""" +418 58 regularizer """no""" +418 58 optimizer """adadelta""" +418 58 training_loop """owa""" +418 58 negative_sampler """basic""" +418 58 evaluator """rankbased""" +418 59 dataset """kinships""" +418 59 model """ntn""" +418 59 loss """nssa""" +418 59 regularizer """no""" +418 59 optimizer """adadelta""" +418 59 training_loop """owa""" +418 59 negative_sampler """basic""" +418 59 evaluator """rankbased""" +418 60 dataset """kinships""" +418 60 model """ntn""" +418 60 loss """nssa""" +418 60 regularizer """no""" +418 60 optimizer """adadelta""" +418 60 training_loop """owa""" +418 60 negative_sampler """basic""" +418 60 evaluator """rankbased""" +418 61 dataset """kinships""" +418 61 model """ntn""" +418 61 loss """nssa""" +418 61 regularizer """no""" +418 61 optimizer """adadelta""" +418 61 training_loop """owa""" +418 61 negative_sampler """basic""" +418 61 evaluator """rankbased""" +418 62 dataset """kinships""" +418 62 model """ntn""" +418 62 loss """nssa""" +418 62 regularizer """no""" +418 62 optimizer """adadelta""" +418 62 training_loop """owa""" +418 62 negative_sampler """basic""" +418 62 evaluator """rankbased""" +418 63 dataset """kinships""" +418 63 model """ntn""" +418 63 loss """nssa""" +418 63 regularizer """no""" +418 63 optimizer """adadelta""" +418 63 training_loop """owa""" +418 63 negative_sampler """basic""" +418 63 evaluator """rankbased""" +418 64 dataset """kinships""" +418 64 model """ntn""" +418 64 loss """nssa""" +418 64 regularizer """no""" +418 64 optimizer """adadelta""" +418 64 training_loop """owa""" +418 64 negative_sampler """basic""" +418 64 evaluator """rankbased""" +418 65 dataset """kinships""" +418 65 model """ntn""" +418 65 loss """nssa""" +418 65 regularizer """no""" +418 65 optimizer """adadelta""" +418 65 training_loop """owa""" +418 65 negative_sampler """basic""" +418 65 evaluator """rankbased""" +418 66 dataset """kinships""" +418 66 model """ntn""" +418 66 loss """nssa""" +418 66 regularizer """no""" +418 66 optimizer """adadelta""" +418 66 training_loop """owa""" +418 66 negative_sampler """basic""" +418 66 evaluator """rankbased""" +418 67 dataset """kinships""" +418 67 model """ntn""" +418 67 loss """nssa""" +418 67 regularizer """no""" +418 67 optimizer """adadelta""" +418 67 training_loop """owa""" +418 67 negative_sampler """basic""" +418 67 evaluator """rankbased""" +418 68 dataset """kinships""" +418 68 model """ntn""" +418 68 loss """nssa""" +418 68 regularizer """no""" +418 68 optimizer """adadelta""" +418 68 training_loop """owa""" +418 68 negative_sampler """basic""" +418 68 evaluator """rankbased""" +418 69 dataset """kinships""" +418 69 model """ntn""" +418 69 loss """nssa""" +418 69 regularizer """no""" +418 69 optimizer """adadelta""" +418 69 training_loop """owa""" +418 69 negative_sampler """basic""" +418 69 evaluator """rankbased""" +418 70 dataset """kinships""" +418 70 model """ntn""" +418 70 loss """nssa""" +418 70 regularizer """no""" +418 70 optimizer """adadelta""" +418 70 training_loop """owa""" +418 70 negative_sampler """basic""" +418 70 evaluator """rankbased""" +418 71 dataset """kinships""" +418 71 model """ntn""" +418 71 loss """nssa""" +418 71 regularizer """no""" +418 71 optimizer """adadelta""" +418 71 training_loop """owa""" +418 71 negative_sampler """basic""" +418 71 evaluator """rankbased""" +418 72 dataset """kinships""" +418 72 model """ntn""" +418 72 loss """nssa""" +418 72 regularizer """no""" +418 72 optimizer """adadelta""" +418 72 training_loop """owa""" +418 72 negative_sampler """basic""" +418 72 evaluator """rankbased""" +418 73 dataset """kinships""" +418 73 model """ntn""" +418 73 loss """nssa""" +418 73 regularizer """no""" +418 73 optimizer """adadelta""" +418 73 training_loop """owa""" +418 73 negative_sampler """basic""" +418 73 evaluator """rankbased""" +418 74 dataset """kinships""" +418 74 model """ntn""" +418 74 loss """nssa""" +418 74 regularizer """no""" +418 74 optimizer """adadelta""" +418 74 training_loop """owa""" +418 74 negative_sampler """basic""" +418 74 evaluator """rankbased""" +418 75 dataset """kinships""" +418 75 model """ntn""" +418 75 loss """nssa""" +418 75 regularizer """no""" +418 75 optimizer """adadelta""" +418 75 training_loop """owa""" +418 75 negative_sampler """basic""" +418 75 evaluator """rankbased""" +418 76 dataset """kinships""" +418 76 model """ntn""" +418 76 loss """nssa""" +418 76 regularizer """no""" +418 76 optimizer """adadelta""" +418 76 training_loop """owa""" +418 76 negative_sampler """basic""" +418 76 evaluator """rankbased""" +418 77 dataset """kinships""" +418 77 model """ntn""" +418 77 loss """nssa""" +418 77 regularizer """no""" +418 77 optimizer """adadelta""" +418 77 training_loop """owa""" +418 77 negative_sampler """basic""" +418 77 evaluator """rankbased""" +418 78 dataset """kinships""" +418 78 model """ntn""" +418 78 loss """nssa""" +418 78 regularizer """no""" +418 78 optimizer """adadelta""" +418 78 training_loop """owa""" +418 78 negative_sampler """basic""" +418 78 evaluator """rankbased""" +418 79 dataset """kinships""" +418 79 model """ntn""" +418 79 loss """nssa""" +418 79 regularizer """no""" +418 79 optimizer """adadelta""" +418 79 training_loop """owa""" +418 79 negative_sampler """basic""" +418 79 evaluator """rankbased""" +418 80 dataset """kinships""" +418 80 model """ntn""" +418 80 loss """nssa""" +418 80 regularizer """no""" +418 80 optimizer """adadelta""" +418 80 training_loop """owa""" +418 80 negative_sampler """basic""" +418 80 evaluator """rankbased""" +418 81 dataset """kinships""" +418 81 model """ntn""" +418 81 loss """nssa""" +418 81 regularizer """no""" +418 81 optimizer """adadelta""" +418 81 training_loop """owa""" +418 81 negative_sampler """basic""" +418 81 evaluator """rankbased""" +418 82 dataset """kinships""" +418 82 model """ntn""" +418 82 loss """nssa""" +418 82 regularizer """no""" +418 82 optimizer """adadelta""" +418 82 training_loop """owa""" +418 82 negative_sampler """basic""" +418 82 evaluator """rankbased""" +418 83 dataset """kinships""" +418 83 model """ntn""" +418 83 loss """nssa""" +418 83 regularizer """no""" +418 83 optimizer """adadelta""" +418 83 training_loop """owa""" +418 83 negative_sampler """basic""" +418 83 evaluator """rankbased""" +418 84 dataset """kinships""" +418 84 model """ntn""" +418 84 loss """nssa""" +418 84 regularizer """no""" +418 84 optimizer """adadelta""" +418 84 training_loop """owa""" +418 84 negative_sampler """basic""" +418 84 evaluator """rankbased""" +418 85 dataset """kinships""" +418 85 model """ntn""" +418 85 loss """nssa""" +418 85 regularizer """no""" +418 85 optimizer """adadelta""" +418 85 training_loop """owa""" +418 85 negative_sampler """basic""" +418 85 evaluator """rankbased""" +418 86 dataset """kinships""" +418 86 model """ntn""" +418 86 loss """nssa""" +418 86 regularizer """no""" +418 86 optimizer """adadelta""" +418 86 training_loop """owa""" +418 86 negative_sampler """basic""" +418 86 evaluator """rankbased""" +418 87 dataset """kinships""" +418 87 model """ntn""" +418 87 loss """nssa""" +418 87 regularizer """no""" +418 87 optimizer """adadelta""" +418 87 training_loop """owa""" +418 87 negative_sampler """basic""" +418 87 evaluator """rankbased""" +418 88 dataset """kinships""" +418 88 model """ntn""" +418 88 loss """nssa""" +418 88 regularizer """no""" +418 88 optimizer """adadelta""" +418 88 training_loop """owa""" +418 88 negative_sampler """basic""" +418 88 evaluator """rankbased""" +418 89 dataset """kinships""" +418 89 model """ntn""" +418 89 loss """nssa""" +418 89 regularizer """no""" +418 89 optimizer """adadelta""" +418 89 training_loop """owa""" +418 89 negative_sampler """basic""" +418 89 evaluator """rankbased""" +418 90 dataset """kinships""" +418 90 model """ntn""" +418 90 loss """nssa""" +418 90 regularizer """no""" +418 90 optimizer """adadelta""" +418 90 training_loop """owa""" +418 90 negative_sampler """basic""" +418 90 evaluator """rankbased""" +418 91 dataset """kinships""" +418 91 model """ntn""" +418 91 loss """nssa""" +418 91 regularizer """no""" +418 91 optimizer """adadelta""" +418 91 training_loop """owa""" +418 91 negative_sampler """basic""" +418 91 evaluator """rankbased""" +418 92 dataset """kinships""" +418 92 model """ntn""" +418 92 loss """nssa""" +418 92 regularizer """no""" +418 92 optimizer """adadelta""" +418 92 training_loop """owa""" +418 92 negative_sampler """basic""" +418 92 evaluator """rankbased""" +418 93 dataset """kinships""" +418 93 model """ntn""" +418 93 loss """nssa""" +418 93 regularizer """no""" +418 93 optimizer """adadelta""" +418 93 training_loop """owa""" +418 93 negative_sampler """basic""" +418 93 evaluator """rankbased""" +418 94 dataset """kinships""" +418 94 model """ntn""" +418 94 loss """nssa""" +418 94 regularizer """no""" +418 94 optimizer """adadelta""" +418 94 training_loop """owa""" +418 94 negative_sampler """basic""" +418 94 evaluator """rankbased""" +418 95 dataset """kinships""" +418 95 model """ntn""" +418 95 loss """nssa""" +418 95 regularizer """no""" +418 95 optimizer """adadelta""" +418 95 training_loop """owa""" +418 95 negative_sampler """basic""" +418 95 evaluator """rankbased""" +418 96 dataset """kinships""" +418 96 model """ntn""" +418 96 loss """nssa""" +418 96 regularizer """no""" +418 96 optimizer """adadelta""" +418 96 training_loop """owa""" +418 96 negative_sampler """basic""" +418 96 evaluator """rankbased""" +418 97 dataset """kinships""" +418 97 model """ntn""" +418 97 loss """nssa""" +418 97 regularizer """no""" +418 97 optimizer """adadelta""" +418 97 training_loop """owa""" +418 97 negative_sampler """basic""" +418 97 evaluator """rankbased""" +418 98 dataset """kinships""" +418 98 model """ntn""" +418 98 loss """nssa""" +418 98 regularizer """no""" +418 98 optimizer """adadelta""" +418 98 training_loop """owa""" +418 98 negative_sampler """basic""" +418 98 evaluator """rankbased""" +418 99 dataset """kinships""" +418 99 model """ntn""" +418 99 loss """nssa""" +418 99 regularizer """no""" +418 99 optimizer """adadelta""" +418 99 training_loop """owa""" +418 99 negative_sampler """basic""" +418 99 evaluator """rankbased""" +418 100 dataset """kinships""" +418 100 model """ntn""" +418 100 loss """nssa""" +418 100 regularizer """no""" +418 100 optimizer """adadelta""" +418 100 training_loop """owa""" +418 100 negative_sampler """basic""" +418 100 evaluator """rankbased""" +419 1 model.embedding_dim 1.0 +419 1 loss.margin 3.5157312226753326 +419 1 negative_sampler.num_negs_per_pos 79.0 +419 1 training.batch_size 0.0 +419 2 model.embedding_dim 1.0 +419 2 loss.margin 4.319323134042799 +419 2 negative_sampler.num_negs_per_pos 47.0 +419 2 training.batch_size 1.0 +419 3 model.embedding_dim 1.0 +419 3 loss.margin 7.106313645362271 +419 3 negative_sampler.num_negs_per_pos 60.0 +419 3 training.batch_size 2.0 +419 4 model.embedding_dim 2.0 +419 4 loss.margin 1.8316184842246286 +419 4 negative_sampler.num_negs_per_pos 3.0 +419 4 training.batch_size 0.0 +419 5 model.embedding_dim 1.0 +419 5 loss.margin 1.4763644536062062 +419 5 negative_sampler.num_negs_per_pos 27.0 +419 5 training.batch_size 1.0 +419 6 model.embedding_dim 0.0 +419 6 loss.margin 1.657028559186298 +419 6 negative_sampler.num_negs_per_pos 14.0 +419 6 training.batch_size 1.0 +419 7 model.embedding_dim 1.0 +419 7 loss.margin 3.291359001479104 +419 7 negative_sampler.num_negs_per_pos 74.0 +419 7 training.batch_size 2.0 +419 8 model.embedding_dim 0.0 +419 8 loss.margin 4.706249418910487 +419 8 negative_sampler.num_negs_per_pos 81.0 +419 8 training.batch_size 0.0 +419 9 model.embedding_dim 0.0 +419 9 loss.margin 2.368257551692557 +419 9 negative_sampler.num_negs_per_pos 60.0 +419 9 training.batch_size 2.0 +419 10 model.embedding_dim 1.0 +419 10 loss.margin 5.8884476364400165 +419 10 negative_sampler.num_negs_per_pos 75.0 +419 10 training.batch_size 1.0 +419 11 model.embedding_dim 0.0 +419 11 loss.margin 8.782973321959116 +419 11 negative_sampler.num_negs_per_pos 25.0 +419 11 training.batch_size 2.0 +419 12 model.embedding_dim 2.0 +419 12 loss.margin 6.172039687941094 +419 12 negative_sampler.num_negs_per_pos 31.0 +419 12 training.batch_size 1.0 +419 13 model.embedding_dim 1.0 +419 13 loss.margin 8.757943888200332 +419 13 negative_sampler.num_negs_per_pos 7.0 +419 13 training.batch_size 2.0 +419 14 model.embedding_dim 1.0 +419 14 loss.margin 3.030572466506325 +419 14 negative_sampler.num_negs_per_pos 8.0 +419 14 training.batch_size 1.0 +419 15 model.embedding_dim 0.0 +419 15 loss.margin 1.3857304429182933 +419 15 negative_sampler.num_negs_per_pos 89.0 +419 15 training.batch_size 2.0 +419 16 model.embedding_dim 1.0 +419 16 loss.margin 5.286787017111303 +419 16 negative_sampler.num_negs_per_pos 88.0 +419 16 training.batch_size 0.0 +419 17 model.embedding_dim 2.0 +419 17 loss.margin 7.0038771807533164 +419 17 negative_sampler.num_negs_per_pos 16.0 +419 17 training.batch_size 2.0 +419 18 model.embedding_dim 1.0 +419 18 loss.margin 6.521376177609681 +419 18 negative_sampler.num_negs_per_pos 79.0 +419 18 training.batch_size 1.0 +419 19 model.embedding_dim 2.0 +419 19 loss.margin 0.7254163734962938 +419 19 negative_sampler.num_negs_per_pos 70.0 +419 19 training.batch_size 2.0 +419 20 model.embedding_dim 2.0 +419 20 loss.margin 2.88676935949501 +419 20 negative_sampler.num_negs_per_pos 3.0 +419 20 training.batch_size 1.0 +419 21 model.embedding_dim 2.0 +419 21 loss.margin 2.929583798362819 +419 21 negative_sampler.num_negs_per_pos 79.0 +419 21 training.batch_size 0.0 +419 22 model.embedding_dim 0.0 +419 22 loss.margin 0.7411740328741981 +419 22 negative_sampler.num_negs_per_pos 41.0 +419 22 training.batch_size 1.0 +419 23 model.embedding_dim 2.0 +419 23 loss.margin 9.905363112922595 +419 23 negative_sampler.num_negs_per_pos 81.0 +419 23 training.batch_size 1.0 +419 24 model.embedding_dim 0.0 +419 24 loss.margin 7.598189339611784 +419 24 negative_sampler.num_negs_per_pos 16.0 +419 24 training.batch_size 1.0 +419 25 model.embedding_dim 0.0 +419 25 loss.margin 7.94106787819726 +419 25 negative_sampler.num_negs_per_pos 35.0 +419 25 training.batch_size 2.0 +419 26 model.embedding_dim 2.0 +419 26 loss.margin 9.844534106662598 +419 26 negative_sampler.num_negs_per_pos 32.0 +419 26 training.batch_size 1.0 +419 27 model.embedding_dim 1.0 +419 27 loss.margin 5.599961659223457 +419 27 negative_sampler.num_negs_per_pos 45.0 +419 27 training.batch_size 2.0 +419 28 model.embedding_dim 1.0 +419 28 loss.margin 5.387861098932762 +419 28 negative_sampler.num_negs_per_pos 87.0 +419 28 training.batch_size 2.0 +419 29 model.embedding_dim 0.0 +419 29 loss.margin 7.53758581774378 +419 29 negative_sampler.num_negs_per_pos 26.0 +419 29 training.batch_size 0.0 +419 30 model.embedding_dim 0.0 +419 30 loss.margin 4.50426903140961 +419 30 negative_sampler.num_negs_per_pos 37.0 +419 30 training.batch_size 1.0 +419 31 model.embedding_dim 0.0 +419 31 loss.margin 2.32810900089235 +419 31 negative_sampler.num_negs_per_pos 32.0 +419 31 training.batch_size 2.0 +419 32 model.embedding_dim 1.0 +419 32 loss.margin 9.63743882442484 +419 32 negative_sampler.num_negs_per_pos 5.0 +419 32 training.batch_size 1.0 +419 33 model.embedding_dim 1.0 +419 33 loss.margin 7.7871634242466925 +419 33 negative_sampler.num_negs_per_pos 79.0 +419 33 training.batch_size 1.0 +419 34 model.embedding_dim 1.0 +419 34 loss.margin 4.943361514018395 +419 34 negative_sampler.num_negs_per_pos 66.0 +419 34 training.batch_size 2.0 +419 35 model.embedding_dim 1.0 +419 35 loss.margin 8.759981604536506 +419 35 negative_sampler.num_negs_per_pos 15.0 +419 35 training.batch_size 1.0 +419 36 model.embedding_dim 0.0 +419 36 loss.margin 2.3266859310152643 +419 36 negative_sampler.num_negs_per_pos 77.0 +419 36 training.batch_size 0.0 +419 37 model.embedding_dim 0.0 +419 37 loss.margin 5.6314378483909495 +419 37 negative_sampler.num_negs_per_pos 92.0 +419 37 training.batch_size 2.0 +419 38 model.embedding_dim 2.0 +419 38 loss.margin 9.239729717217084 +419 38 negative_sampler.num_negs_per_pos 47.0 +419 38 training.batch_size 2.0 +419 39 model.embedding_dim 2.0 +419 39 loss.margin 3.3444969496943124 +419 39 negative_sampler.num_negs_per_pos 14.0 +419 39 training.batch_size 0.0 +419 40 model.embedding_dim 1.0 +419 40 loss.margin 5.182413436892734 +419 40 negative_sampler.num_negs_per_pos 7.0 +419 40 training.batch_size 0.0 +419 41 model.embedding_dim 0.0 +419 41 loss.margin 1.8540061663988538 +419 41 negative_sampler.num_negs_per_pos 28.0 +419 41 training.batch_size 2.0 +419 42 model.embedding_dim 0.0 +419 42 loss.margin 2.8369145154460083 +419 42 negative_sampler.num_negs_per_pos 76.0 +419 42 training.batch_size 0.0 +419 43 model.embedding_dim 2.0 +419 43 loss.margin 7.023700650651564 +419 43 negative_sampler.num_negs_per_pos 61.0 +419 43 training.batch_size 2.0 +419 44 model.embedding_dim 2.0 +419 44 loss.margin 8.880701064013763 +419 44 negative_sampler.num_negs_per_pos 5.0 +419 44 training.batch_size 1.0 +419 45 model.embedding_dim 0.0 +419 45 loss.margin 0.7149435670470793 +419 45 negative_sampler.num_negs_per_pos 1.0 +419 45 training.batch_size 2.0 +419 46 model.embedding_dim 1.0 +419 46 loss.margin 9.28203976302281 +419 46 negative_sampler.num_negs_per_pos 60.0 +419 46 training.batch_size 1.0 +419 47 model.embedding_dim 0.0 +419 47 loss.margin 9.590203285197404 +419 47 negative_sampler.num_negs_per_pos 27.0 +419 47 training.batch_size 0.0 +419 48 model.embedding_dim 0.0 +419 48 loss.margin 4.502564437867463 +419 48 negative_sampler.num_negs_per_pos 67.0 +419 48 training.batch_size 2.0 +419 49 model.embedding_dim 0.0 +419 49 loss.margin 8.255324813310231 +419 49 negative_sampler.num_negs_per_pos 43.0 +419 49 training.batch_size 1.0 +419 50 model.embedding_dim 1.0 +419 50 loss.margin 4.332457861586354 +419 50 negative_sampler.num_negs_per_pos 21.0 +419 50 training.batch_size 1.0 +419 51 model.embedding_dim 2.0 +419 51 loss.margin 3.080883897926793 +419 51 negative_sampler.num_negs_per_pos 54.0 +419 51 training.batch_size 0.0 +419 52 model.embedding_dim 0.0 +419 52 loss.margin 9.87436839522111 +419 52 negative_sampler.num_negs_per_pos 42.0 +419 52 training.batch_size 2.0 +419 53 model.embedding_dim 0.0 +419 53 loss.margin 5.413661062120578 +419 53 negative_sampler.num_negs_per_pos 14.0 +419 53 training.batch_size 0.0 +419 54 model.embedding_dim 0.0 +419 54 loss.margin 7.644288916525952 +419 54 negative_sampler.num_negs_per_pos 66.0 +419 54 training.batch_size 0.0 +419 55 model.embedding_dim 0.0 +419 55 loss.margin 9.585993280547564 +419 55 negative_sampler.num_negs_per_pos 52.0 +419 55 training.batch_size 1.0 +419 56 model.embedding_dim 1.0 +419 56 loss.margin 3.141655043896857 +419 56 negative_sampler.num_negs_per_pos 38.0 +419 56 training.batch_size 2.0 +419 57 model.embedding_dim 0.0 +419 57 loss.margin 1.524278500997502 +419 57 negative_sampler.num_negs_per_pos 59.0 +419 57 training.batch_size 1.0 +419 58 model.embedding_dim 2.0 +419 58 loss.margin 2.307312368931079 +419 58 negative_sampler.num_negs_per_pos 41.0 +419 58 training.batch_size 1.0 +419 59 model.embedding_dim 1.0 +419 59 loss.margin 7.281501140967221 +419 59 negative_sampler.num_negs_per_pos 86.0 +419 59 training.batch_size 2.0 +419 60 model.embedding_dim 1.0 +419 60 loss.margin 0.5492753120003464 +419 60 negative_sampler.num_negs_per_pos 14.0 +419 60 training.batch_size 2.0 +419 61 model.embedding_dim 2.0 +419 61 loss.margin 3.276379488015733 +419 61 negative_sampler.num_negs_per_pos 98.0 +419 61 training.batch_size 2.0 +419 62 model.embedding_dim 1.0 +419 62 loss.margin 7.103127341810275 +419 62 negative_sampler.num_negs_per_pos 7.0 +419 62 training.batch_size 2.0 +419 63 model.embedding_dim 1.0 +419 63 loss.margin 3.3902997125652004 +419 63 negative_sampler.num_negs_per_pos 58.0 +419 63 training.batch_size 0.0 +419 64 model.embedding_dim 2.0 +419 64 loss.margin 6.058670144914792 +419 64 negative_sampler.num_negs_per_pos 21.0 +419 64 training.batch_size 2.0 +419 65 model.embedding_dim 1.0 +419 65 loss.margin 4.238579906807898 +419 65 negative_sampler.num_negs_per_pos 25.0 +419 65 training.batch_size 0.0 +419 66 model.embedding_dim 0.0 +419 66 loss.margin 3.543027647553491 +419 66 negative_sampler.num_negs_per_pos 68.0 +419 66 training.batch_size 2.0 +419 67 model.embedding_dim 1.0 +419 67 loss.margin 7.992958694054793 +419 67 negative_sampler.num_negs_per_pos 81.0 +419 67 training.batch_size 1.0 +419 68 model.embedding_dim 1.0 +419 68 loss.margin 5.893953142849056 +419 68 negative_sampler.num_negs_per_pos 20.0 +419 68 training.batch_size 1.0 +419 69 model.embedding_dim 1.0 +419 69 loss.margin 1.8738380751936068 +419 69 negative_sampler.num_negs_per_pos 98.0 +419 69 training.batch_size 1.0 +419 70 model.embedding_dim 2.0 +419 70 loss.margin 8.534784757767728 +419 70 negative_sampler.num_negs_per_pos 49.0 +419 70 training.batch_size 1.0 +419 71 model.embedding_dim 0.0 +419 71 loss.margin 1.7564064872745537 +419 71 negative_sampler.num_negs_per_pos 68.0 +419 71 training.batch_size 2.0 +419 72 model.embedding_dim 1.0 +419 72 loss.margin 5.9356118851981865 +419 72 negative_sampler.num_negs_per_pos 33.0 +419 72 training.batch_size 0.0 +419 73 model.embedding_dim 2.0 +419 73 loss.margin 8.025353232978276 +419 73 negative_sampler.num_negs_per_pos 45.0 +419 73 training.batch_size 1.0 +419 74 model.embedding_dim 2.0 +419 74 loss.margin 1.1816751448727736 +419 74 negative_sampler.num_negs_per_pos 85.0 +419 74 training.batch_size 2.0 +419 75 model.embedding_dim 1.0 +419 75 loss.margin 5.385258966307785 +419 75 negative_sampler.num_negs_per_pos 43.0 +419 75 training.batch_size 0.0 +419 76 model.embedding_dim 0.0 +419 76 loss.margin 2.03061386490376 +419 76 negative_sampler.num_negs_per_pos 59.0 +419 76 training.batch_size 2.0 +419 77 model.embedding_dim 1.0 +419 77 loss.margin 5.150147923278429 +419 77 negative_sampler.num_negs_per_pos 28.0 +419 77 training.batch_size 2.0 +419 78 model.embedding_dim 1.0 +419 78 loss.margin 9.267830490064291 +419 78 negative_sampler.num_negs_per_pos 93.0 +419 78 training.batch_size 0.0 +419 79 model.embedding_dim 2.0 +419 79 loss.margin 9.306582905711993 +419 79 negative_sampler.num_negs_per_pos 73.0 +419 79 training.batch_size 2.0 +419 1 dataset """kinships""" +419 1 model """ntn""" +419 1 loss """marginranking""" +419 1 regularizer """no""" +419 1 optimizer """adadelta""" +419 1 training_loop """owa""" +419 1 negative_sampler """basic""" +419 1 evaluator """rankbased""" +419 2 dataset """kinships""" +419 2 model """ntn""" +419 2 loss """marginranking""" +419 2 regularizer """no""" +419 2 optimizer """adadelta""" +419 2 training_loop """owa""" +419 2 negative_sampler """basic""" +419 2 evaluator """rankbased""" +419 3 dataset """kinships""" +419 3 model """ntn""" +419 3 loss """marginranking""" +419 3 regularizer """no""" +419 3 optimizer """adadelta""" +419 3 training_loop """owa""" +419 3 negative_sampler """basic""" +419 3 evaluator """rankbased""" +419 4 dataset """kinships""" +419 4 model """ntn""" +419 4 loss """marginranking""" +419 4 regularizer """no""" +419 4 optimizer """adadelta""" +419 4 training_loop """owa""" +419 4 negative_sampler """basic""" +419 4 evaluator """rankbased""" +419 5 dataset """kinships""" +419 5 model """ntn""" +419 5 loss """marginranking""" +419 5 regularizer """no""" +419 5 optimizer """adadelta""" +419 5 training_loop """owa""" +419 5 negative_sampler """basic""" +419 5 evaluator """rankbased""" +419 6 dataset """kinships""" +419 6 model """ntn""" +419 6 loss """marginranking""" +419 6 regularizer """no""" +419 6 optimizer """adadelta""" +419 6 training_loop """owa""" +419 6 negative_sampler """basic""" +419 6 evaluator """rankbased""" +419 7 dataset """kinships""" +419 7 model """ntn""" +419 7 loss """marginranking""" +419 7 regularizer """no""" +419 7 optimizer """adadelta""" +419 7 training_loop """owa""" +419 7 negative_sampler """basic""" +419 7 evaluator """rankbased""" +419 8 dataset """kinships""" +419 8 model """ntn""" +419 8 loss """marginranking""" +419 8 regularizer """no""" +419 8 optimizer """adadelta""" +419 8 training_loop """owa""" +419 8 negative_sampler """basic""" +419 8 evaluator """rankbased""" +419 9 dataset """kinships""" +419 9 model """ntn""" +419 9 loss """marginranking""" +419 9 regularizer """no""" +419 9 optimizer """adadelta""" +419 9 training_loop """owa""" +419 9 negative_sampler """basic""" +419 9 evaluator """rankbased""" +419 10 dataset """kinships""" +419 10 model """ntn""" +419 10 loss """marginranking""" +419 10 regularizer """no""" +419 10 optimizer """adadelta""" +419 10 training_loop """owa""" +419 10 negative_sampler """basic""" +419 10 evaluator """rankbased""" +419 11 dataset """kinships""" +419 11 model """ntn""" +419 11 loss """marginranking""" +419 11 regularizer """no""" +419 11 optimizer """adadelta""" +419 11 training_loop """owa""" +419 11 negative_sampler """basic""" +419 11 evaluator """rankbased""" +419 12 dataset """kinships""" +419 12 model """ntn""" +419 12 loss """marginranking""" +419 12 regularizer """no""" +419 12 optimizer """adadelta""" +419 12 training_loop """owa""" +419 12 negative_sampler """basic""" +419 12 evaluator """rankbased""" +419 13 dataset """kinships""" +419 13 model """ntn""" +419 13 loss """marginranking""" +419 13 regularizer """no""" +419 13 optimizer """adadelta""" +419 13 training_loop """owa""" +419 13 negative_sampler """basic""" +419 13 evaluator """rankbased""" +419 14 dataset """kinships""" +419 14 model """ntn""" +419 14 loss """marginranking""" +419 14 regularizer """no""" +419 14 optimizer """adadelta""" +419 14 training_loop """owa""" +419 14 negative_sampler """basic""" +419 14 evaluator """rankbased""" +419 15 dataset """kinships""" +419 15 model """ntn""" +419 15 loss """marginranking""" +419 15 regularizer """no""" +419 15 optimizer """adadelta""" +419 15 training_loop """owa""" +419 15 negative_sampler """basic""" +419 15 evaluator """rankbased""" +419 16 dataset """kinships""" +419 16 model """ntn""" +419 16 loss """marginranking""" +419 16 regularizer """no""" +419 16 optimizer """adadelta""" +419 16 training_loop """owa""" +419 16 negative_sampler """basic""" +419 16 evaluator """rankbased""" +419 17 dataset """kinships""" +419 17 model """ntn""" +419 17 loss """marginranking""" +419 17 regularizer """no""" +419 17 optimizer """adadelta""" +419 17 training_loop """owa""" +419 17 negative_sampler """basic""" +419 17 evaluator """rankbased""" +419 18 dataset """kinships""" +419 18 model """ntn""" +419 18 loss """marginranking""" +419 18 regularizer """no""" +419 18 optimizer """adadelta""" +419 18 training_loop """owa""" +419 18 negative_sampler """basic""" +419 18 evaluator """rankbased""" +419 19 dataset """kinships""" +419 19 model """ntn""" +419 19 loss """marginranking""" +419 19 regularizer """no""" +419 19 optimizer """adadelta""" +419 19 training_loop """owa""" +419 19 negative_sampler """basic""" +419 19 evaluator """rankbased""" +419 20 dataset """kinships""" +419 20 model """ntn""" +419 20 loss """marginranking""" +419 20 regularizer """no""" +419 20 optimizer """adadelta""" +419 20 training_loop """owa""" +419 20 negative_sampler """basic""" +419 20 evaluator """rankbased""" +419 21 dataset """kinships""" +419 21 model """ntn""" +419 21 loss """marginranking""" +419 21 regularizer """no""" +419 21 optimizer """adadelta""" +419 21 training_loop """owa""" +419 21 negative_sampler """basic""" +419 21 evaluator """rankbased""" +419 22 dataset """kinships""" +419 22 model """ntn""" +419 22 loss """marginranking""" +419 22 regularizer """no""" +419 22 optimizer """adadelta""" +419 22 training_loop """owa""" +419 22 negative_sampler """basic""" +419 22 evaluator """rankbased""" +419 23 dataset """kinships""" +419 23 model """ntn""" +419 23 loss """marginranking""" +419 23 regularizer """no""" +419 23 optimizer """adadelta""" +419 23 training_loop """owa""" +419 23 negative_sampler """basic""" +419 23 evaluator """rankbased""" +419 24 dataset """kinships""" +419 24 model """ntn""" +419 24 loss """marginranking""" +419 24 regularizer """no""" +419 24 optimizer """adadelta""" +419 24 training_loop """owa""" +419 24 negative_sampler """basic""" +419 24 evaluator """rankbased""" +419 25 dataset """kinships""" +419 25 model """ntn""" +419 25 loss """marginranking""" +419 25 regularizer """no""" +419 25 optimizer """adadelta""" +419 25 training_loop """owa""" +419 25 negative_sampler """basic""" +419 25 evaluator """rankbased""" +419 26 dataset """kinships""" +419 26 model """ntn""" +419 26 loss """marginranking""" +419 26 regularizer """no""" +419 26 optimizer """adadelta""" +419 26 training_loop """owa""" +419 26 negative_sampler """basic""" +419 26 evaluator """rankbased""" +419 27 dataset """kinships""" +419 27 model """ntn""" +419 27 loss """marginranking""" +419 27 regularizer """no""" +419 27 optimizer """adadelta""" +419 27 training_loop """owa""" +419 27 negative_sampler """basic""" +419 27 evaluator """rankbased""" +419 28 dataset """kinships""" +419 28 model """ntn""" +419 28 loss """marginranking""" +419 28 regularizer """no""" +419 28 optimizer """adadelta""" +419 28 training_loop """owa""" +419 28 negative_sampler """basic""" +419 28 evaluator """rankbased""" +419 29 dataset """kinships""" +419 29 model """ntn""" +419 29 loss """marginranking""" +419 29 regularizer """no""" +419 29 optimizer """adadelta""" +419 29 training_loop """owa""" +419 29 negative_sampler """basic""" +419 29 evaluator """rankbased""" +419 30 dataset """kinships""" +419 30 model """ntn""" +419 30 loss """marginranking""" +419 30 regularizer """no""" +419 30 optimizer """adadelta""" +419 30 training_loop """owa""" +419 30 negative_sampler """basic""" +419 30 evaluator """rankbased""" +419 31 dataset """kinships""" +419 31 model """ntn""" +419 31 loss """marginranking""" +419 31 regularizer """no""" +419 31 optimizer """adadelta""" +419 31 training_loop """owa""" +419 31 negative_sampler """basic""" +419 31 evaluator """rankbased""" +419 32 dataset """kinships""" +419 32 model """ntn""" +419 32 loss """marginranking""" +419 32 regularizer """no""" +419 32 optimizer """adadelta""" +419 32 training_loop """owa""" +419 32 negative_sampler """basic""" +419 32 evaluator """rankbased""" +419 33 dataset """kinships""" +419 33 model """ntn""" +419 33 loss """marginranking""" +419 33 regularizer """no""" +419 33 optimizer """adadelta""" +419 33 training_loop """owa""" +419 33 negative_sampler """basic""" +419 33 evaluator """rankbased""" +419 34 dataset """kinships""" +419 34 model """ntn""" +419 34 loss """marginranking""" +419 34 regularizer """no""" +419 34 optimizer """adadelta""" +419 34 training_loop """owa""" +419 34 negative_sampler """basic""" +419 34 evaluator """rankbased""" +419 35 dataset """kinships""" +419 35 model """ntn""" +419 35 loss """marginranking""" +419 35 regularizer """no""" +419 35 optimizer """adadelta""" +419 35 training_loop """owa""" +419 35 negative_sampler """basic""" +419 35 evaluator """rankbased""" +419 36 dataset """kinships""" +419 36 model """ntn""" +419 36 loss """marginranking""" +419 36 regularizer """no""" +419 36 optimizer """adadelta""" +419 36 training_loop """owa""" +419 36 negative_sampler """basic""" +419 36 evaluator """rankbased""" +419 37 dataset """kinships""" +419 37 model """ntn""" +419 37 loss """marginranking""" +419 37 regularizer """no""" +419 37 optimizer """adadelta""" +419 37 training_loop """owa""" +419 37 negative_sampler """basic""" +419 37 evaluator """rankbased""" +419 38 dataset """kinships""" +419 38 model """ntn""" +419 38 loss """marginranking""" +419 38 regularizer """no""" +419 38 optimizer """adadelta""" +419 38 training_loop """owa""" +419 38 negative_sampler """basic""" +419 38 evaluator """rankbased""" +419 39 dataset """kinships""" +419 39 model """ntn""" +419 39 loss """marginranking""" +419 39 regularizer """no""" +419 39 optimizer """adadelta""" +419 39 training_loop """owa""" +419 39 negative_sampler """basic""" +419 39 evaluator """rankbased""" +419 40 dataset """kinships""" +419 40 model """ntn""" +419 40 loss """marginranking""" +419 40 regularizer """no""" +419 40 optimizer """adadelta""" +419 40 training_loop """owa""" +419 40 negative_sampler """basic""" +419 40 evaluator """rankbased""" +419 41 dataset """kinships""" +419 41 model """ntn""" +419 41 loss """marginranking""" +419 41 regularizer """no""" +419 41 optimizer """adadelta""" +419 41 training_loop """owa""" +419 41 negative_sampler """basic""" +419 41 evaluator """rankbased""" +419 42 dataset """kinships""" +419 42 model """ntn""" +419 42 loss """marginranking""" +419 42 regularizer """no""" +419 42 optimizer """adadelta""" +419 42 training_loop """owa""" +419 42 negative_sampler """basic""" +419 42 evaluator """rankbased""" +419 43 dataset """kinships""" +419 43 model """ntn""" +419 43 loss """marginranking""" +419 43 regularizer """no""" +419 43 optimizer """adadelta""" +419 43 training_loop """owa""" +419 43 negative_sampler """basic""" +419 43 evaluator """rankbased""" +419 44 dataset """kinships""" +419 44 model """ntn""" +419 44 loss """marginranking""" +419 44 regularizer """no""" +419 44 optimizer """adadelta""" +419 44 training_loop """owa""" +419 44 negative_sampler """basic""" +419 44 evaluator """rankbased""" +419 45 dataset """kinships""" +419 45 model """ntn""" +419 45 loss """marginranking""" +419 45 regularizer """no""" +419 45 optimizer """adadelta""" +419 45 training_loop """owa""" +419 45 negative_sampler """basic""" +419 45 evaluator """rankbased""" +419 46 dataset """kinships""" +419 46 model """ntn""" +419 46 loss """marginranking""" +419 46 regularizer """no""" +419 46 optimizer """adadelta""" +419 46 training_loop """owa""" +419 46 negative_sampler """basic""" +419 46 evaluator """rankbased""" +419 47 dataset """kinships""" +419 47 model """ntn""" +419 47 loss """marginranking""" +419 47 regularizer """no""" +419 47 optimizer """adadelta""" +419 47 training_loop """owa""" +419 47 negative_sampler """basic""" +419 47 evaluator """rankbased""" +419 48 dataset """kinships""" +419 48 model """ntn""" +419 48 loss """marginranking""" +419 48 regularizer """no""" +419 48 optimizer """adadelta""" +419 48 training_loop """owa""" +419 48 negative_sampler """basic""" +419 48 evaluator """rankbased""" +419 49 dataset """kinships""" +419 49 model """ntn""" +419 49 loss """marginranking""" +419 49 regularizer """no""" +419 49 optimizer """adadelta""" +419 49 training_loop """owa""" +419 49 negative_sampler """basic""" +419 49 evaluator """rankbased""" +419 50 dataset """kinships""" +419 50 model """ntn""" +419 50 loss """marginranking""" +419 50 regularizer """no""" +419 50 optimizer """adadelta""" +419 50 training_loop """owa""" +419 50 negative_sampler """basic""" +419 50 evaluator """rankbased""" +419 51 dataset """kinships""" +419 51 model """ntn""" +419 51 loss """marginranking""" +419 51 regularizer """no""" +419 51 optimizer """adadelta""" +419 51 training_loop """owa""" +419 51 negative_sampler """basic""" +419 51 evaluator """rankbased""" +419 52 dataset """kinships""" +419 52 model """ntn""" +419 52 loss """marginranking""" +419 52 regularizer """no""" +419 52 optimizer """adadelta""" +419 52 training_loop """owa""" +419 52 negative_sampler """basic""" +419 52 evaluator """rankbased""" +419 53 dataset """kinships""" +419 53 model """ntn""" +419 53 loss """marginranking""" +419 53 regularizer """no""" +419 53 optimizer """adadelta""" +419 53 training_loop """owa""" +419 53 negative_sampler """basic""" +419 53 evaluator """rankbased""" +419 54 dataset """kinships""" +419 54 model """ntn""" +419 54 loss """marginranking""" +419 54 regularizer """no""" +419 54 optimizer """adadelta""" +419 54 training_loop """owa""" +419 54 negative_sampler """basic""" +419 54 evaluator """rankbased""" +419 55 dataset """kinships""" +419 55 model """ntn""" +419 55 loss """marginranking""" +419 55 regularizer """no""" +419 55 optimizer """adadelta""" +419 55 training_loop """owa""" +419 55 negative_sampler """basic""" +419 55 evaluator """rankbased""" +419 56 dataset """kinships""" +419 56 model """ntn""" +419 56 loss """marginranking""" +419 56 regularizer """no""" +419 56 optimizer """adadelta""" +419 56 training_loop """owa""" +419 56 negative_sampler """basic""" +419 56 evaluator """rankbased""" +419 57 dataset """kinships""" +419 57 model """ntn""" +419 57 loss """marginranking""" +419 57 regularizer """no""" +419 57 optimizer """adadelta""" +419 57 training_loop """owa""" +419 57 negative_sampler """basic""" +419 57 evaluator """rankbased""" +419 58 dataset """kinships""" +419 58 model """ntn""" +419 58 loss """marginranking""" +419 58 regularizer """no""" +419 58 optimizer """adadelta""" +419 58 training_loop """owa""" +419 58 negative_sampler """basic""" +419 58 evaluator """rankbased""" +419 59 dataset """kinships""" +419 59 model """ntn""" +419 59 loss """marginranking""" +419 59 regularizer """no""" +419 59 optimizer """adadelta""" +419 59 training_loop """owa""" +419 59 negative_sampler """basic""" +419 59 evaluator """rankbased""" +419 60 dataset """kinships""" +419 60 model """ntn""" +419 60 loss """marginranking""" +419 60 regularizer """no""" +419 60 optimizer """adadelta""" +419 60 training_loop """owa""" +419 60 negative_sampler """basic""" +419 60 evaluator """rankbased""" +419 61 dataset """kinships""" +419 61 model """ntn""" +419 61 loss """marginranking""" +419 61 regularizer """no""" +419 61 optimizer """adadelta""" +419 61 training_loop """owa""" +419 61 negative_sampler """basic""" +419 61 evaluator """rankbased""" +419 62 dataset """kinships""" +419 62 model """ntn""" +419 62 loss """marginranking""" +419 62 regularizer """no""" +419 62 optimizer """adadelta""" +419 62 training_loop """owa""" +419 62 negative_sampler """basic""" +419 62 evaluator """rankbased""" +419 63 dataset """kinships""" +419 63 model """ntn""" +419 63 loss """marginranking""" +419 63 regularizer """no""" +419 63 optimizer """adadelta""" +419 63 training_loop """owa""" +419 63 negative_sampler """basic""" +419 63 evaluator """rankbased""" +419 64 dataset """kinships""" +419 64 model """ntn""" +419 64 loss """marginranking""" +419 64 regularizer """no""" +419 64 optimizer """adadelta""" +419 64 training_loop """owa""" +419 64 negative_sampler """basic""" +419 64 evaluator """rankbased""" +419 65 dataset """kinships""" +419 65 model """ntn""" +419 65 loss """marginranking""" +419 65 regularizer """no""" +419 65 optimizer """adadelta""" +419 65 training_loop """owa""" +419 65 negative_sampler """basic""" +419 65 evaluator """rankbased""" +419 66 dataset """kinships""" +419 66 model """ntn""" +419 66 loss """marginranking""" +419 66 regularizer """no""" +419 66 optimizer """adadelta""" +419 66 training_loop """owa""" +419 66 negative_sampler """basic""" +419 66 evaluator """rankbased""" +419 67 dataset """kinships""" +419 67 model """ntn""" +419 67 loss """marginranking""" +419 67 regularizer """no""" +419 67 optimizer """adadelta""" +419 67 training_loop """owa""" +419 67 negative_sampler """basic""" +419 67 evaluator """rankbased""" +419 68 dataset """kinships""" +419 68 model """ntn""" +419 68 loss """marginranking""" +419 68 regularizer """no""" +419 68 optimizer """adadelta""" +419 68 training_loop """owa""" +419 68 negative_sampler """basic""" +419 68 evaluator """rankbased""" +419 69 dataset """kinships""" +419 69 model """ntn""" +419 69 loss """marginranking""" +419 69 regularizer """no""" +419 69 optimizer """adadelta""" +419 69 training_loop """owa""" +419 69 negative_sampler """basic""" +419 69 evaluator """rankbased""" +419 70 dataset """kinships""" +419 70 model """ntn""" +419 70 loss """marginranking""" +419 70 regularizer """no""" +419 70 optimizer """adadelta""" +419 70 training_loop """owa""" +419 70 negative_sampler """basic""" +419 70 evaluator """rankbased""" +419 71 dataset """kinships""" +419 71 model """ntn""" +419 71 loss """marginranking""" +419 71 regularizer """no""" +419 71 optimizer """adadelta""" +419 71 training_loop """owa""" +419 71 negative_sampler """basic""" +419 71 evaluator """rankbased""" +419 72 dataset """kinships""" +419 72 model """ntn""" +419 72 loss """marginranking""" +419 72 regularizer """no""" +419 72 optimizer """adadelta""" +419 72 training_loop """owa""" +419 72 negative_sampler """basic""" +419 72 evaluator """rankbased""" +419 73 dataset """kinships""" +419 73 model """ntn""" +419 73 loss """marginranking""" +419 73 regularizer """no""" +419 73 optimizer """adadelta""" +419 73 training_loop """owa""" +419 73 negative_sampler """basic""" +419 73 evaluator """rankbased""" +419 74 dataset """kinships""" +419 74 model """ntn""" +419 74 loss """marginranking""" +419 74 regularizer """no""" +419 74 optimizer """adadelta""" +419 74 training_loop """owa""" +419 74 negative_sampler """basic""" +419 74 evaluator """rankbased""" +419 75 dataset """kinships""" +419 75 model """ntn""" +419 75 loss """marginranking""" +419 75 regularizer """no""" +419 75 optimizer """adadelta""" +419 75 training_loop """owa""" +419 75 negative_sampler """basic""" +419 75 evaluator """rankbased""" +419 76 dataset """kinships""" +419 76 model """ntn""" +419 76 loss """marginranking""" +419 76 regularizer """no""" +419 76 optimizer """adadelta""" +419 76 training_loop """owa""" +419 76 negative_sampler """basic""" +419 76 evaluator """rankbased""" +419 77 dataset """kinships""" +419 77 model """ntn""" +419 77 loss """marginranking""" +419 77 regularizer """no""" +419 77 optimizer """adadelta""" +419 77 training_loop """owa""" +419 77 negative_sampler """basic""" +419 77 evaluator """rankbased""" +419 78 dataset """kinships""" +419 78 model """ntn""" +419 78 loss """marginranking""" +419 78 regularizer """no""" +419 78 optimizer """adadelta""" +419 78 training_loop """owa""" +419 78 negative_sampler """basic""" +419 78 evaluator """rankbased""" +419 79 dataset """kinships""" +419 79 model """ntn""" +419 79 loss """marginranking""" +419 79 regularizer """no""" +419 79 optimizer """adadelta""" +419 79 training_loop """owa""" +419 79 negative_sampler """basic""" +419 79 evaluator """rankbased""" +420 1 model.embedding_dim 1.0 +420 1 loss.margin 6.9196215326509325 +420 1 negative_sampler.num_negs_per_pos 69.0 +420 1 training.batch_size 2.0 +420 2 model.embedding_dim 0.0 +420 2 loss.margin 1.807343495072037 +420 2 negative_sampler.num_negs_per_pos 3.0 +420 2 training.batch_size 1.0 +420 3 model.embedding_dim 0.0 +420 3 loss.margin 7.029967918325681 +420 3 negative_sampler.num_negs_per_pos 70.0 +420 3 training.batch_size 2.0 +420 4 model.embedding_dim 2.0 +420 4 loss.margin 9.793615937715785 +420 4 negative_sampler.num_negs_per_pos 29.0 +420 4 training.batch_size 1.0 +420 5 model.embedding_dim 1.0 +420 5 loss.margin 1.37701356044003 +420 5 negative_sampler.num_negs_per_pos 6.0 +420 5 training.batch_size 0.0 +420 6 model.embedding_dim 2.0 +420 6 loss.margin 1.4486244127945709 +420 6 negative_sampler.num_negs_per_pos 34.0 +420 6 training.batch_size 0.0 +420 7 model.embedding_dim 1.0 +420 7 loss.margin 5.517265978635454 +420 7 negative_sampler.num_negs_per_pos 26.0 +420 7 training.batch_size 1.0 +420 8 model.embedding_dim 1.0 +420 8 loss.margin 8.7420966534872 +420 8 negative_sampler.num_negs_per_pos 34.0 +420 8 training.batch_size 2.0 +420 9 model.embedding_dim 1.0 +420 9 loss.margin 1.243318789063352 +420 9 negative_sampler.num_negs_per_pos 40.0 +420 9 training.batch_size 1.0 +420 10 model.embedding_dim 1.0 +420 10 loss.margin 4.61553718690035 +420 10 negative_sampler.num_negs_per_pos 95.0 +420 10 training.batch_size 2.0 +420 11 model.embedding_dim 2.0 +420 11 loss.margin 9.500018468985385 +420 11 negative_sampler.num_negs_per_pos 59.0 +420 11 training.batch_size 2.0 +420 12 model.embedding_dim 1.0 +420 12 loss.margin 4.9915413572023954 +420 12 negative_sampler.num_negs_per_pos 14.0 +420 12 training.batch_size 0.0 +420 13 model.embedding_dim 2.0 +420 13 loss.margin 7.711705152513961 +420 13 negative_sampler.num_negs_per_pos 29.0 +420 13 training.batch_size 1.0 +420 14 model.embedding_dim 1.0 +420 14 loss.margin 3.6705227205681017 +420 14 negative_sampler.num_negs_per_pos 34.0 +420 14 training.batch_size 1.0 +420 15 model.embedding_dim 1.0 +420 15 loss.margin 6.4503008153437555 +420 15 negative_sampler.num_negs_per_pos 52.0 +420 15 training.batch_size 0.0 +420 16 model.embedding_dim 1.0 +420 16 loss.margin 8.763368546080951 +420 16 negative_sampler.num_negs_per_pos 8.0 +420 16 training.batch_size 2.0 +420 17 model.embedding_dim 1.0 +420 17 loss.margin 5.54753035486407 +420 17 negative_sampler.num_negs_per_pos 94.0 +420 17 training.batch_size 1.0 +420 18 model.embedding_dim 1.0 +420 18 loss.margin 5.6999142224004125 +420 18 negative_sampler.num_negs_per_pos 88.0 +420 18 training.batch_size 2.0 +420 19 model.embedding_dim 2.0 +420 19 loss.margin 7.475939021419146 +420 19 negative_sampler.num_negs_per_pos 5.0 +420 19 training.batch_size 1.0 +420 20 model.embedding_dim 2.0 +420 20 loss.margin 9.730520201156324 +420 20 negative_sampler.num_negs_per_pos 75.0 +420 20 training.batch_size 1.0 +420 21 model.embedding_dim 0.0 +420 21 loss.margin 1.174222357817259 +420 21 negative_sampler.num_negs_per_pos 31.0 +420 21 training.batch_size 2.0 +420 22 model.embedding_dim 0.0 +420 22 loss.margin 6.183836933186103 +420 22 negative_sampler.num_negs_per_pos 0.0 +420 22 training.batch_size 2.0 +420 23 model.embedding_dim 1.0 +420 23 loss.margin 3.522444905521744 +420 23 negative_sampler.num_negs_per_pos 0.0 +420 23 training.batch_size 1.0 +420 24 model.embedding_dim 0.0 +420 24 loss.margin 4.081176495440512 +420 24 negative_sampler.num_negs_per_pos 37.0 +420 24 training.batch_size 0.0 +420 25 model.embedding_dim 2.0 +420 25 loss.margin 3.8711027822585447 +420 25 negative_sampler.num_negs_per_pos 88.0 +420 25 training.batch_size 1.0 +420 26 model.embedding_dim 0.0 +420 26 loss.margin 2.1131016810966163 +420 26 negative_sampler.num_negs_per_pos 45.0 +420 26 training.batch_size 1.0 +420 27 model.embedding_dim 2.0 +420 27 loss.margin 1.3705003841855794 +420 27 negative_sampler.num_negs_per_pos 40.0 +420 27 training.batch_size 2.0 +420 28 model.embedding_dim 0.0 +420 28 loss.margin 4.39897333919439 +420 28 negative_sampler.num_negs_per_pos 98.0 +420 28 training.batch_size 2.0 +420 29 model.embedding_dim 2.0 +420 29 loss.margin 9.454428641261106 +420 29 negative_sampler.num_negs_per_pos 11.0 +420 29 training.batch_size 0.0 +420 30 model.embedding_dim 1.0 +420 30 loss.margin 8.611005518240491 +420 30 negative_sampler.num_negs_per_pos 94.0 +420 30 training.batch_size 1.0 +420 31 model.embedding_dim 1.0 +420 31 loss.margin 7.6368901416584 +420 31 negative_sampler.num_negs_per_pos 9.0 +420 31 training.batch_size 1.0 +420 32 model.embedding_dim 1.0 +420 32 loss.margin 9.621310932947083 +420 32 negative_sampler.num_negs_per_pos 89.0 +420 32 training.batch_size 2.0 +420 33 model.embedding_dim 2.0 +420 33 loss.margin 1.1592813038870324 +420 33 negative_sampler.num_negs_per_pos 36.0 +420 33 training.batch_size 1.0 +420 34 model.embedding_dim 2.0 +420 34 loss.margin 1.730786332842086 +420 34 negative_sampler.num_negs_per_pos 79.0 +420 34 training.batch_size 1.0 +420 35 model.embedding_dim 0.0 +420 35 loss.margin 4.7572874510505265 +420 35 negative_sampler.num_negs_per_pos 77.0 +420 35 training.batch_size 2.0 +420 36 model.embedding_dim 1.0 +420 36 loss.margin 1.6748654797007452 +420 36 negative_sampler.num_negs_per_pos 25.0 +420 36 training.batch_size 0.0 +420 37 model.embedding_dim 0.0 +420 37 loss.margin 6.195496320500136 +420 37 negative_sampler.num_negs_per_pos 40.0 +420 37 training.batch_size 2.0 +420 38 model.embedding_dim 2.0 +420 38 loss.margin 9.71416139594407 +420 38 negative_sampler.num_negs_per_pos 20.0 +420 38 training.batch_size 0.0 +420 39 model.embedding_dim 0.0 +420 39 loss.margin 9.168560105937313 +420 39 negative_sampler.num_negs_per_pos 90.0 +420 39 training.batch_size 2.0 +420 40 model.embedding_dim 0.0 +420 40 loss.margin 9.32867385179713 +420 40 negative_sampler.num_negs_per_pos 17.0 +420 40 training.batch_size 0.0 +420 41 model.embedding_dim 0.0 +420 41 loss.margin 8.06712896203102 +420 41 negative_sampler.num_negs_per_pos 45.0 +420 41 training.batch_size 1.0 +420 42 model.embedding_dim 0.0 +420 42 loss.margin 0.6706585510509204 +420 42 negative_sampler.num_negs_per_pos 34.0 +420 42 training.batch_size 0.0 +420 43 model.embedding_dim 1.0 +420 43 loss.margin 1.6464549891498634 +420 43 negative_sampler.num_negs_per_pos 55.0 +420 43 training.batch_size 0.0 +420 44 model.embedding_dim 0.0 +420 44 loss.margin 5.926711722516833 +420 44 negative_sampler.num_negs_per_pos 47.0 +420 44 training.batch_size 2.0 +420 45 model.embedding_dim 0.0 +420 45 loss.margin 2.1394392590545435 +420 45 negative_sampler.num_negs_per_pos 86.0 +420 45 training.batch_size 2.0 +420 46 model.embedding_dim 2.0 +420 46 loss.margin 6.695916468773266 +420 46 negative_sampler.num_negs_per_pos 94.0 +420 46 training.batch_size 1.0 +420 47 model.embedding_dim 2.0 +420 47 loss.margin 0.7154157581619909 +420 47 negative_sampler.num_negs_per_pos 12.0 +420 47 training.batch_size 1.0 +420 48 model.embedding_dim 0.0 +420 48 loss.margin 0.7918169712398873 +420 48 negative_sampler.num_negs_per_pos 78.0 +420 48 training.batch_size 0.0 +420 49 model.embedding_dim 2.0 +420 49 loss.margin 7.352719107154255 +420 49 negative_sampler.num_negs_per_pos 33.0 +420 49 training.batch_size 0.0 +420 50 model.embedding_dim 1.0 +420 50 loss.margin 7.165878789881775 +420 50 negative_sampler.num_negs_per_pos 51.0 +420 50 training.batch_size 1.0 +420 51 model.embedding_dim 1.0 +420 51 loss.margin 4.443306413097943 +420 51 negative_sampler.num_negs_per_pos 62.0 +420 51 training.batch_size 2.0 +420 52 model.embedding_dim 0.0 +420 52 loss.margin 8.537829390283278 +420 52 negative_sampler.num_negs_per_pos 98.0 +420 52 training.batch_size 2.0 +420 53 model.embedding_dim 1.0 +420 53 loss.margin 0.7988126195670419 +420 53 negative_sampler.num_negs_per_pos 63.0 +420 53 training.batch_size 2.0 +420 54 model.embedding_dim 0.0 +420 54 loss.margin 7.480941644029754 +420 54 negative_sampler.num_negs_per_pos 1.0 +420 54 training.batch_size 1.0 +420 55 model.embedding_dim 0.0 +420 55 loss.margin 2.380675001571534 +420 55 negative_sampler.num_negs_per_pos 12.0 +420 55 training.batch_size 2.0 +420 56 model.embedding_dim 1.0 +420 56 loss.margin 5.512913572870971 +420 56 negative_sampler.num_negs_per_pos 65.0 +420 56 training.batch_size 0.0 +420 57 model.embedding_dim 1.0 +420 57 loss.margin 1.2246272097161617 +420 57 negative_sampler.num_negs_per_pos 41.0 +420 57 training.batch_size 1.0 +420 58 model.embedding_dim 2.0 +420 58 loss.margin 3.9808182971731965 +420 58 negative_sampler.num_negs_per_pos 95.0 +420 58 training.batch_size 1.0 +420 59 model.embedding_dim 1.0 +420 59 loss.margin 3.2281355601581714 +420 59 negative_sampler.num_negs_per_pos 29.0 +420 59 training.batch_size 1.0 +420 60 model.embedding_dim 2.0 +420 60 loss.margin 3.2422011675406095 +420 60 negative_sampler.num_negs_per_pos 62.0 +420 60 training.batch_size 1.0 +420 61 model.embedding_dim 0.0 +420 61 loss.margin 8.804095598422357 +420 61 negative_sampler.num_negs_per_pos 20.0 +420 61 training.batch_size 2.0 +420 62 model.embedding_dim 1.0 +420 62 loss.margin 3.861513934436148 +420 62 negative_sampler.num_negs_per_pos 47.0 +420 62 training.batch_size 2.0 +420 63 model.embedding_dim 0.0 +420 63 loss.margin 0.8821218857531525 +420 63 negative_sampler.num_negs_per_pos 23.0 +420 63 training.batch_size 1.0 +420 64 model.embedding_dim 2.0 +420 64 loss.margin 9.161012472116143 +420 64 negative_sampler.num_negs_per_pos 71.0 +420 64 training.batch_size 0.0 +420 65 model.embedding_dim 2.0 +420 65 loss.margin 7.9525760016246485 +420 65 negative_sampler.num_negs_per_pos 57.0 +420 65 training.batch_size 1.0 +420 66 model.embedding_dim 2.0 +420 66 loss.margin 7.436444275501326 +420 66 negative_sampler.num_negs_per_pos 7.0 +420 66 training.batch_size 2.0 +420 67 model.embedding_dim 0.0 +420 67 loss.margin 0.7403793548266557 +420 67 negative_sampler.num_negs_per_pos 86.0 +420 67 training.batch_size 0.0 +420 68 model.embedding_dim 0.0 +420 68 loss.margin 8.111410579018408 +420 68 negative_sampler.num_negs_per_pos 4.0 +420 68 training.batch_size 1.0 +420 69 model.embedding_dim 0.0 +420 69 loss.margin 5.434107584930716 +420 69 negative_sampler.num_negs_per_pos 26.0 +420 69 training.batch_size 0.0 +420 70 model.embedding_dim 2.0 +420 70 loss.margin 9.530073967181785 +420 70 negative_sampler.num_negs_per_pos 10.0 +420 70 training.batch_size 0.0 +420 71 model.embedding_dim 0.0 +420 71 loss.margin 3.065581661592612 +420 71 negative_sampler.num_negs_per_pos 85.0 +420 71 training.batch_size 2.0 +420 72 model.embedding_dim 2.0 +420 72 loss.margin 5.816806930464478 +420 72 negative_sampler.num_negs_per_pos 17.0 +420 72 training.batch_size 2.0 +420 73 model.embedding_dim 0.0 +420 73 loss.margin 2.289692531235455 +420 73 negative_sampler.num_negs_per_pos 68.0 +420 73 training.batch_size 1.0 +420 74 model.embedding_dim 2.0 +420 74 loss.margin 7.496972130322049 +420 74 negative_sampler.num_negs_per_pos 97.0 +420 74 training.batch_size 2.0 +420 75 model.embedding_dim 2.0 +420 75 loss.margin 2.736744720490342 +420 75 negative_sampler.num_negs_per_pos 48.0 +420 75 training.batch_size 2.0 +420 76 model.embedding_dim 2.0 +420 76 loss.margin 3.2631973974326067 +420 76 negative_sampler.num_negs_per_pos 48.0 +420 76 training.batch_size 1.0 +420 77 model.embedding_dim 0.0 +420 77 loss.margin 4.438476773938772 +420 77 negative_sampler.num_negs_per_pos 75.0 +420 77 training.batch_size 1.0 +420 78 model.embedding_dim 0.0 +420 78 loss.margin 0.9698257624768627 +420 78 negative_sampler.num_negs_per_pos 36.0 +420 78 training.batch_size 2.0 +420 79 model.embedding_dim 2.0 +420 79 loss.margin 3.6540102503755545 +420 79 negative_sampler.num_negs_per_pos 93.0 +420 79 training.batch_size 2.0 +420 80 model.embedding_dim 0.0 +420 80 loss.margin 4.127234552600264 +420 80 negative_sampler.num_negs_per_pos 44.0 +420 80 training.batch_size 0.0 +420 81 model.embedding_dim 2.0 +420 81 loss.margin 6.786181101001049 +420 81 negative_sampler.num_negs_per_pos 83.0 +420 81 training.batch_size 2.0 +420 82 model.embedding_dim 0.0 +420 82 loss.margin 2.886873115451423 +420 82 negative_sampler.num_negs_per_pos 54.0 +420 82 training.batch_size 1.0 +420 83 model.embedding_dim 1.0 +420 83 loss.margin 0.9686360203338962 +420 83 negative_sampler.num_negs_per_pos 42.0 +420 83 training.batch_size 1.0 +420 84 model.embedding_dim 1.0 +420 84 loss.margin 5.026130816120043 +420 84 negative_sampler.num_negs_per_pos 11.0 +420 84 training.batch_size 2.0 +420 85 model.embedding_dim 1.0 +420 85 loss.margin 1.8958024050294826 +420 85 negative_sampler.num_negs_per_pos 37.0 +420 85 training.batch_size 2.0 +420 86 model.embedding_dim 0.0 +420 86 loss.margin 8.959911429859934 +420 86 negative_sampler.num_negs_per_pos 7.0 +420 86 training.batch_size 2.0 +420 87 model.embedding_dim 2.0 +420 87 loss.margin 4.427819174884037 +420 87 negative_sampler.num_negs_per_pos 27.0 +420 87 training.batch_size 1.0 +420 88 model.embedding_dim 1.0 +420 88 loss.margin 4.031398398719599 +420 88 negative_sampler.num_negs_per_pos 89.0 +420 88 training.batch_size 2.0 +420 89 model.embedding_dim 0.0 +420 89 loss.margin 9.683882251464077 +420 89 negative_sampler.num_negs_per_pos 8.0 +420 89 training.batch_size 1.0 +420 90 model.embedding_dim 1.0 +420 90 loss.margin 6.203905866253072 +420 90 negative_sampler.num_negs_per_pos 73.0 +420 90 training.batch_size 1.0 +420 91 model.embedding_dim 0.0 +420 91 loss.margin 7.758437408024881 +420 91 negative_sampler.num_negs_per_pos 97.0 +420 91 training.batch_size 0.0 +420 92 model.embedding_dim 0.0 +420 92 loss.margin 2.1956258336986094 +420 92 negative_sampler.num_negs_per_pos 18.0 +420 92 training.batch_size 1.0 +420 93 model.embedding_dim 2.0 +420 93 loss.margin 3.6426122438284096 +420 93 negative_sampler.num_negs_per_pos 89.0 +420 93 training.batch_size 0.0 +420 94 model.embedding_dim 1.0 +420 94 loss.margin 9.811197939685874 +420 94 negative_sampler.num_negs_per_pos 10.0 +420 94 training.batch_size 1.0 +420 95 model.embedding_dim 2.0 +420 95 loss.margin 2.7083595454749965 +420 95 negative_sampler.num_negs_per_pos 59.0 +420 95 training.batch_size 2.0 +420 96 model.embedding_dim 1.0 +420 96 loss.margin 1.6243316566183923 +420 96 negative_sampler.num_negs_per_pos 39.0 +420 96 training.batch_size 1.0 +420 97 model.embedding_dim 1.0 +420 97 loss.margin 2.202929998735827 +420 97 negative_sampler.num_negs_per_pos 6.0 +420 97 training.batch_size 2.0 +420 98 model.embedding_dim 0.0 +420 98 loss.margin 5.872806673800305 +420 98 negative_sampler.num_negs_per_pos 81.0 +420 98 training.batch_size 2.0 +420 99 model.embedding_dim 1.0 +420 99 loss.margin 1.7234173726628066 +420 99 negative_sampler.num_negs_per_pos 30.0 +420 99 training.batch_size 1.0 +420 100 model.embedding_dim 1.0 +420 100 loss.margin 9.872154701740005 +420 100 negative_sampler.num_negs_per_pos 66.0 +420 100 training.batch_size 2.0 +420 1 dataset """kinships""" +420 1 model """ntn""" +420 1 loss """marginranking""" +420 1 regularizer """no""" +420 1 optimizer """adadelta""" +420 1 training_loop """owa""" +420 1 negative_sampler """basic""" +420 1 evaluator """rankbased""" +420 2 dataset """kinships""" +420 2 model """ntn""" +420 2 loss """marginranking""" +420 2 regularizer """no""" +420 2 optimizer """adadelta""" +420 2 training_loop """owa""" +420 2 negative_sampler """basic""" +420 2 evaluator """rankbased""" +420 3 dataset """kinships""" +420 3 model """ntn""" +420 3 loss """marginranking""" +420 3 regularizer """no""" +420 3 optimizer """adadelta""" +420 3 training_loop """owa""" +420 3 negative_sampler """basic""" +420 3 evaluator """rankbased""" +420 4 dataset """kinships""" +420 4 model """ntn""" +420 4 loss """marginranking""" +420 4 regularizer """no""" +420 4 optimizer """adadelta""" +420 4 training_loop """owa""" +420 4 negative_sampler """basic""" +420 4 evaluator """rankbased""" +420 5 dataset """kinships""" +420 5 model """ntn""" +420 5 loss """marginranking""" +420 5 regularizer """no""" +420 5 optimizer """adadelta""" +420 5 training_loop """owa""" +420 5 negative_sampler """basic""" +420 5 evaluator """rankbased""" +420 6 dataset """kinships""" +420 6 model """ntn""" +420 6 loss """marginranking""" +420 6 regularizer """no""" +420 6 optimizer """adadelta""" +420 6 training_loop """owa""" +420 6 negative_sampler """basic""" +420 6 evaluator """rankbased""" +420 7 dataset """kinships""" +420 7 model """ntn""" +420 7 loss """marginranking""" +420 7 regularizer """no""" +420 7 optimizer """adadelta""" +420 7 training_loop """owa""" +420 7 negative_sampler """basic""" +420 7 evaluator """rankbased""" +420 8 dataset """kinships""" +420 8 model """ntn""" +420 8 loss """marginranking""" +420 8 regularizer """no""" +420 8 optimizer """adadelta""" +420 8 training_loop """owa""" +420 8 negative_sampler """basic""" +420 8 evaluator """rankbased""" +420 9 dataset """kinships""" +420 9 model """ntn""" +420 9 loss """marginranking""" +420 9 regularizer """no""" +420 9 optimizer """adadelta""" +420 9 training_loop """owa""" +420 9 negative_sampler """basic""" +420 9 evaluator """rankbased""" +420 10 dataset """kinships""" +420 10 model """ntn""" +420 10 loss """marginranking""" +420 10 regularizer """no""" +420 10 optimizer """adadelta""" +420 10 training_loop """owa""" +420 10 negative_sampler """basic""" +420 10 evaluator """rankbased""" +420 11 dataset """kinships""" +420 11 model """ntn""" +420 11 loss """marginranking""" +420 11 regularizer """no""" +420 11 optimizer """adadelta""" +420 11 training_loop """owa""" +420 11 negative_sampler """basic""" +420 11 evaluator """rankbased""" +420 12 dataset """kinships""" +420 12 model """ntn""" +420 12 loss """marginranking""" +420 12 regularizer """no""" +420 12 optimizer """adadelta""" +420 12 training_loop """owa""" +420 12 negative_sampler """basic""" +420 12 evaluator """rankbased""" +420 13 dataset """kinships""" +420 13 model """ntn""" +420 13 loss """marginranking""" +420 13 regularizer """no""" +420 13 optimizer """adadelta""" +420 13 training_loop """owa""" +420 13 negative_sampler """basic""" +420 13 evaluator """rankbased""" +420 14 dataset """kinships""" +420 14 model """ntn""" +420 14 loss """marginranking""" +420 14 regularizer """no""" +420 14 optimizer """adadelta""" +420 14 training_loop """owa""" +420 14 negative_sampler """basic""" +420 14 evaluator """rankbased""" +420 15 dataset """kinships""" +420 15 model """ntn""" +420 15 loss """marginranking""" +420 15 regularizer """no""" +420 15 optimizer """adadelta""" +420 15 training_loop """owa""" +420 15 negative_sampler """basic""" +420 15 evaluator """rankbased""" +420 16 dataset """kinships""" +420 16 model """ntn""" +420 16 loss """marginranking""" +420 16 regularizer """no""" +420 16 optimizer """adadelta""" +420 16 training_loop """owa""" +420 16 negative_sampler """basic""" +420 16 evaluator """rankbased""" +420 17 dataset """kinships""" +420 17 model """ntn""" +420 17 loss """marginranking""" +420 17 regularizer """no""" +420 17 optimizer """adadelta""" +420 17 training_loop """owa""" +420 17 negative_sampler """basic""" +420 17 evaluator """rankbased""" +420 18 dataset """kinships""" +420 18 model """ntn""" +420 18 loss """marginranking""" +420 18 regularizer """no""" +420 18 optimizer """adadelta""" +420 18 training_loop """owa""" +420 18 negative_sampler """basic""" +420 18 evaluator """rankbased""" +420 19 dataset """kinships""" +420 19 model """ntn""" +420 19 loss """marginranking""" +420 19 regularizer """no""" +420 19 optimizer """adadelta""" +420 19 training_loop """owa""" +420 19 negative_sampler """basic""" +420 19 evaluator """rankbased""" +420 20 dataset """kinships""" +420 20 model """ntn""" +420 20 loss """marginranking""" +420 20 regularizer """no""" +420 20 optimizer """adadelta""" +420 20 training_loop """owa""" +420 20 negative_sampler """basic""" +420 20 evaluator """rankbased""" +420 21 dataset """kinships""" +420 21 model """ntn""" +420 21 loss """marginranking""" +420 21 regularizer """no""" +420 21 optimizer """adadelta""" +420 21 training_loop """owa""" +420 21 negative_sampler """basic""" +420 21 evaluator """rankbased""" +420 22 dataset """kinships""" +420 22 model """ntn""" +420 22 loss """marginranking""" +420 22 regularizer """no""" +420 22 optimizer """adadelta""" +420 22 training_loop """owa""" +420 22 negative_sampler """basic""" +420 22 evaluator """rankbased""" +420 23 dataset """kinships""" +420 23 model """ntn""" +420 23 loss """marginranking""" +420 23 regularizer """no""" +420 23 optimizer """adadelta""" +420 23 training_loop """owa""" +420 23 negative_sampler """basic""" +420 23 evaluator """rankbased""" +420 24 dataset """kinships""" +420 24 model """ntn""" +420 24 loss """marginranking""" +420 24 regularizer """no""" +420 24 optimizer """adadelta""" +420 24 training_loop """owa""" +420 24 negative_sampler """basic""" +420 24 evaluator """rankbased""" +420 25 dataset """kinships""" +420 25 model """ntn""" +420 25 loss """marginranking""" +420 25 regularizer """no""" +420 25 optimizer """adadelta""" +420 25 training_loop """owa""" +420 25 negative_sampler """basic""" +420 25 evaluator """rankbased""" +420 26 dataset """kinships""" +420 26 model """ntn""" +420 26 loss """marginranking""" +420 26 regularizer """no""" +420 26 optimizer """adadelta""" +420 26 training_loop """owa""" +420 26 negative_sampler """basic""" +420 26 evaluator """rankbased""" +420 27 dataset """kinships""" +420 27 model """ntn""" +420 27 loss """marginranking""" +420 27 regularizer """no""" +420 27 optimizer """adadelta""" +420 27 training_loop """owa""" +420 27 negative_sampler """basic""" +420 27 evaluator """rankbased""" +420 28 dataset """kinships""" +420 28 model """ntn""" +420 28 loss """marginranking""" +420 28 regularizer """no""" +420 28 optimizer """adadelta""" +420 28 training_loop """owa""" +420 28 negative_sampler """basic""" +420 28 evaluator """rankbased""" +420 29 dataset """kinships""" +420 29 model """ntn""" +420 29 loss """marginranking""" +420 29 regularizer """no""" +420 29 optimizer """adadelta""" +420 29 training_loop """owa""" +420 29 negative_sampler """basic""" +420 29 evaluator """rankbased""" +420 30 dataset """kinships""" +420 30 model """ntn""" +420 30 loss """marginranking""" +420 30 regularizer """no""" +420 30 optimizer """adadelta""" +420 30 training_loop """owa""" +420 30 negative_sampler """basic""" +420 30 evaluator """rankbased""" +420 31 dataset """kinships""" +420 31 model """ntn""" +420 31 loss """marginranking""" +420 31 regularizer """no""" +420 31 optimizer """adadelta""" +420 31 training_loop """owa""" +420 31 negative_sampler """basic""" +420 31 evaluator """rankbased""" +420 32 dataset """kinships""" +420 32 model """ntn""" +420 32 loss """marginranking""" +420 32 regularizer """no""" +420 32 optimizer """adadelta""" +420 32 training_loop """owa""" +420 32 negative_sampler """basic""" +420 32 evaluator """rankbased""" +420 33 dataset """kinships""" +420 33 model """ntn""" +420 33 loss """marginranking""" +420 33 regularizer """no""" +420 33 optimizer """adadelta""" +420 33 training_loop """owa""" +420 33 negative_sampler """basic""" +420 33 evaluator """rankbased""" +420 34 dataset """kinships""" +420 34 model """ntn""" +420 34 loss """marginranking""" +420 34 regularizer """no""" +420 34 optimizer """adadelta""" +420 34 training_loop """owa""" +420 34 negative_sampler """basic""" +420 34 evaluator """rankbased""" +420 35 dataset """kinships""" +420 35 model """ntn""" +420 35 loss """marginranking""" +420 35 regularizer """no""" +420 35 optimizer """adadelta""" +420 35 training_loop """owa""" +420 35 negative_sampler """basic""" +420 35 evaluator """rankbased""" +420 36 dataset """kinships""" +420 36 model """ntn""" +420 36 loss """marginranking""" +420 36 regularizer """no""" +420 36 optimizer """adadelta""" +420 36 training_loop """owa""" +420 36 negative_sampler """basic""" +420 36 evaluator """rankbased""" +420 37 dataset """kinships""" +420 37 model """ntn""" +420 37 loss """marginranking""" +420 37 regularizer """no""" +420 37 optimizer """adadelta""" +420 37 training_loop """owa""" +420 37 negative_sampler """basic""" +420 37 evaluator """rankbased""" +420 38 dataset """kinships""" +420 38 model """ntn""" +420 38 loss """marginranking""" +420 38 regularizer """no""" +420 38 optimizer """adadelta""" +420 38 training_loop """owa""" +420 38 negative_sampler """basic""" +420 38 evaluator """rankbased""" +420 39 dataset """kinships""" +420 39 model """ntn""" +420 39 loss """marginranking""" +420 39 regularizer """no""" +420 39 optimizer """adadelta""" +420 39 training_loop """owa""" +420 39 negative_sampler """basic""" +420 39 evaluator """rankbased""" +420 40 dataset """kinships""" +420 40 model """ntn""" +420 40 loss """marginranking""" +420 40 regularizer """no""" +420 40 optimizer """adadelta""" +420 40 training_loop """owa""" +420 40 negative_sampler """basic""" +420 40 evaluator """rankbased""" +420 41 dataset """kinships""" +420 41 model """ntn""" +420 41 loss """marginranking""" +420 41 regularizer """no""" +420 41 optimizer """adadelta""" +420 41 training_loop """owa""" +420 41 negative_sampler """basic""" +420 41 evaluator """rankbased""" +420 42 dataset """kinships""" +420 42 model """ntn""" +420 42 loss """marginranking""" +420 42 regularizer """no""" +420 42 optimizer """adadelta""" +420 42 training_loop """owa""" +420 42 negative_sampler """basic""" +420 42 evaluator """rankbased""" +420 43 dataset """kinships""" +420 43 model """ntn""" +420 43 loss """marginranking""" +420 43 regularizer """no""" +420 43 optimizer """adadelta""" +420 43 training_loop """owa""" +420 43 negative_sampler """basic""" +420 43 evaluator """rankbased""" +420 44 dataset """kinships""" +420 44 model """ntn""" +420 44 loss """marginranking""" +420 44 regularizer """no""" +420 44 optimizer """adadelta""" +420 44 training_loop """owa""" +420 44 negative_sampler """basic""" +420 44 evaluator """rankbased""" +420 45 dataset """kinships""" +420 45 model """ntn""" +420 45 loss """marginranking""" +420 45 regularizer """no""" +420 45 optimizer """adadelta""" +420 45 training_loop """owa""" +420 45 negative_sampler """basic""" +420 45 evaluator """rankbased""" +420 46 dataset """kinships""" +420 46 model """ntn""" +420 46 loss """marginranking""" +420 46 regularizer """no""" +420 46 optimizer """adadelta""" +420 46 training_loop """owa""" +420 46 negative_sampler """basic""" +420 46 evaluator """rankbased""" +420 47 dataset """kinships""" +420 47 model """ntn""" +420 47 loss """marginranking""" +420 47 regularizer """no""" +420 47 optimizer """adadelta""" +420 47 training_loop """owa""" +420 47 negative_sampler """basic""" +420 47 evaluator """rankbased""" +420 48 dataset """kinships""" +420 48 model """ntn""" +420 48 loss """marginranking""" +420 48 regularizer """no""" +420 48 optimizer """adadelta""" +420 48 training_loop """owa""" +420 48 negative_sampler """basic""" +420 48 evaluator """rankbased""" +420 49 dataset """kinships""" +420 49 model """ntn""" +420 49 loss """marginranking""" +420 49 regularizer """no""" +420 49 optimizer """adadelta""" +420 49 training_loop """owa""" +420 49 negative_sampler """basic""" +420 49 evaluator """rankbased""" +420 50 dataset """kinships""" +420 50 model """ntn""" +420 50 loss """marginranking""" +420 50 regularizer """no""" +420 50 optimizer """adadelta""" +420 50 training_loop """owa""" +420 50 negative_sampler """basic""" +420 50 evaluator """rankbased""" +420 51 dataset """kinships""" +420 51 model """ntn""" +420 51 loss """marginranking""" +420 51 regularizer """no""" +420 51 optimizer """adadelta""" +420 51 training_loop """owa""" +420 51 negative_sampler """basic""" +420 51 evaluator """rankbased""" +420 52 dataset """kinships""" +420 52 model """ntn""" +420 52 loss """marginranking""" +420 52 regularizer """no""" +420 52 optimizer """adadelta""" +420 52 training_loop """owa""" +420 52 negative_sampler """basic""" +420 52 evaluator """rankbased""" +420 53 dataset """kinships""" +420 53 model """ntn""" +420 53 loss """marginranking""" +420 53 regularizer """no""" +420 53 optimizer """adadelta""" +420 53 training_loop """owa""" +420 53 negative_sampler """basic""" +420 53 evaluator """rankbased""" +420 54 dataset """kinships""" +420 54 model """ntn""" +420 54 loss """marginranking""" +420 54 regularizer """no""" +420 54 optimizer """adadelta""" +420 54 training_loop """owa""" +420 54 negative_sampler """basic""" +420 54 evaluator """rankbased""" +420 55 dataset """kinships""" +420 55 model """ntn""" +420 55 loss """marginranking""" +420 55 regularizer """no""" +420 55 optimizer """adadelta""" +420 55 training_loop """owa""" +420 55 negative_sampler """basic""" +420 55 evaluator """rankbased""" +420 56 dataset """kinships""" +420 56 model """ntn""" +420 56 loss """marginranking""" +420 56 regularizer """no""" +420 56 optimizer """adadelta""" +420 56 training_loop """owa""" +420 56 negative_sampler """basic""" +420 56 evaluator """rankbased""" +420 57 dataset """kinships""" +420 57 model """ntn""" +420 57 loss """marginranking""" +420 57 regularizer """no""" +420 57 optimizer """adadelta""" +420 57 training_loop """owa""" +420 57 negative_sampler """basic""" +420 57 evaluator """rankbased""" +420 58 dataset """kinships""" +420 58 model """ntn""" +420 58 loss """marginranking""" +420 58 regularizer """no""" +420 58 optimizer """adadelta""" +420 58 training_loop """owa""" +420 58 negative_sampler """basic""" +420 58 evaluator """rankbased""" +420 59 dataset """kinships""" +420 59 model """ntn""" +420 59 loss """marginranking""" +420 59 regularizer """no""" +420 59 optimizer """adadelta""" +420 59 training_loop """owa""" +420 59 negative_sampler """basic""" +420 59 evaluator """rankbased""" +420 60 dataset """kinships""" +420 60 model """ntn""" +420 60 loss """marginranking""" +420 60 regularizer """no""" +420 60 optimizer """adadelta""" +420 60 training_loop """owa""" +420 60 negative_sampler """basic""" +420 60 evaluator """rankbased""" +420 61 dataset """kinships""" +420 61 model """ntn""" +420 61 loss """marginranking""" +420 61 regularizer """no""" +420 61 optimizer """adadelta""" +420 61 training_loop """owa""" +420 61 negative_sampler """basic""" +420 61 evaluator """rankbased""" +420 62 dataset """kinships""" +420 62 model """ntn""" +420 62 loss """marginranking""" +420 62 regularizer """no""" +420 62 optimizer """adadelta""" +420 62 training_loop """owa""" +420 62 negative_sampler """basic""" +420 62 evaluator """rankbased""" +420 63 dataset """kinships""" +420 63 model """ntn""" +420 63 loss """marginranking""" +420 63 regularizer """no""" +420 63 optimizer """adadelta""" +420 63 training_loop """owa""" +420 63 negative_sampler """basic""" +420 63 evaluator """rankbased""" +420 64 dataset """kinships""" +420 64 model """ntn""" +420 64 loss """marginranking""" +420 64 regularizer """no""" +420 64 optimizer """adadelta""" +420 64 training_loop """owa""" +420 64 negative_sampler """basic""" +420 64 evaluator """rankbased""" +420 65 dataset """kinships""" +420 65 model """ntn""" +420 65 loss """marginranking""" +420 65 regularizer """no""" +420 65 optimizer """adadelta""" +420 65 training_loop """owa""" +420 65 negative_sampler """basic""" +420 65 evaluator """rankbased""" +420 66 dataset """kinships""" +420 66 model """ntn""" +420 66 loss """marginranking""" +420 66 regularizer """no""" +420 66 optimizer """adadelta""" +420 66 training_loop """owa""" +420 66 negative_sampler """basic""" +420 66 evaluator """rankbased""" +420 67 dataset """kinships""" +420 67 model """ntn""" +420 67 loss """marginranking""" +420 67 regularizer """no""" +420 67 optimizer """adadelta""" +420 67 training_loop """owa""" +420 67 negative_sampler """basic""" +420 67 evaluator """rankbased""" +420 68 dataset """kinships""" +420 68 model """ntn""" +420 68 loss """marginranking""" +420 68 regularizer """no""" +420 68 optimizer """adadelta""" +420 68 training_loop """owa""" +420 68 negative_sampler """basic""" +420 68 evaluator """rankbased""" +420 69 dataset """kinships""" +420 69 model """ntn""" +420 69 loss """marginranking""" +420 69 regularizer """no""" +420 69 optimizer """adadelta""" +420 69 training_loop """owa""" +420 69 negative_sampler """basic""" +420 69 evaluator """rankbased""" +420 70 dataset """kinships""" +420 70 model """ntn""" +420 70 loss """marginranking""" +420 70 regularizer """no""" +420 70 optimizer """adadelta""" +420 70 training_loop """owa""" +420 70 negative_sampler """basic""" +420 70 evaluator """rankbased""" +420 71 dataset """kinships""" +420 71 model """ntn""" +420 71 loss """marginranking""" +420 71 regularizer """no""" +420 71 optimizer """adadelta""" +420 71 training_loop """owa""" +420 71 negative_sampler """basic""" +420 71 evaluator """rankbased""" +420 72 dataset """kinships""" +420 72 model """ntn""" +420 72 loss """marginranking""" +420 72 regularizer """no""" +420 72 optimizer """adadelta""" +420 72 training_loop """owa""" +420 72 negative_sampler """basic""" +420 72 evaluator """rankbased""" +420 73 dataset """kinships""" +420 73 model """ntn""" +420 73 loss """marginranking""" +420 73 regularizer """no""" +420 73 optimizer """adadelta""" +420 73 training_loop """owa""" +420 73 negative_sampler """basic""" +420 73 evaluator """rankbased""" +420 74 dataset """kinships""" +420 74 model """ntn""" +420 74 loss """marginranking""" +420 74 regularizer """no""" +420 74 optimizer """adadelta""" +420 74 training_loop """owa""" +420 74 negative_sampler """basic""" +420 74 evaluator """rankbased""" +420 75 dataset """kinships""" +420 75 model """ntn""" +420 75 loss """marginranking""" +420 75 regularizer """no""" +420 75 optimizer """adadelta""" +420 75 training_loop """owa""" +420 75 negative_sampler """basic""" +420 75 evaluator """rankbased""" +420 76 dataset """kinships""" +420 76 model """ntn""" +420 76 loss """marginranking""" +420 76 regularizer """no""" +420 76 optimizer """adadelta""" +420 76 training_loop """owa""" +420 76 negative_sampler """basic""" +420 76 evaluator """rankbased""" +420 77 dataset """kinships""" +420 77 model """ntn""" +420 77 loss """marginranking""" +420 77 regularizer """no""" +420 77 optimizer """adadelta""" +420 77 training_loop """owa""" +420 77 negative_sampler """basic""" +420 77 evaluator """rankbased""" +420 78 dataset """kinships""" +420 78 model """ntn""" +420 78 loss """marginranking""" +420 78 regularizer """no""" +420 78 optimizer """adadelta""" +420 78 training_loop """owa""" +420 78 negative_sampler """basic""" +420 78 evaluator """rankbased""" +420 79 dataset """kinships""" +420 79 model """ntn""" +420 79 loss """marginranking""" +420 79 regularizer """no""" +420 79 optimizer """adadelta""" +420 79 training_loop """owa""" +420 79 negative_sampler """basic""" +420 79 evaluator """rankbased""" +420 80 dataset """kinships""" +420 80 model """ntn""" +420 80 loss """marginranking""" +420 80 regularizer """no""" +420 80 optimizer """adadelta""" +420 80 training_loop """owa""" +420 80 negative_sampler """basic""" +420 80 evaluator """rankbased""" +420 81 dataset """kinships""" +420 81 model """ntn""" +420 81 loss """marginranking""" +420 81 regularizer """no""" +420 81 optimizer """adadelta""" +420 81 training_loop """owa""" +420 81 negative_sampler """basic""" +420 81 evaluator """rankbased""" +420 82 dataset """kinships""" +420 82 model """ntn""" +420 82 loss """marginranking""" +420 82 regularizer """no""" +420 82 optimizer """adadelta""" +420 82 training_loop """owa""" +420 82 negative_sampler """basic""" +420 82 evaluator """rankbased""" +420 83 dataset """kinships""" +420 83 model """ntn""" +420 83 loss """marginranking""" +420 83 regularizer """no""" +420 83 optimizer """adadelta""" +420 83 training_loop """owa""" +420 83 negative_sampler """basic""" +420 83 evaluator """rankbased""" +420 84 dataset """kinships""" +420 84 model """ntn""" +420 84 loss """marginranking""" +420 84 regularizer """no""" +420 84 optimizer """adadelta""" +420 84 training_loop """owa""" +420 84 negative_sampler """basic""" +420 84 evaluator """rankbased""" +420 85 dataset """kinships""" +420 85 model """ntn""" +420 85 loss """marginranking""" +420 85 regularizer """no""" +420 85 optimizer """adadelta""" +420 85 training_loop """owa""" +420 85 negative_sampler """basic""" +420 85 evaluator """rankbased""" +420 86 dataset """kinships""" +420 86 model """ntn""" +420 86 loss """marginranking""" +420 86 regularizer """no""" +420 86 optimizer """adadelta""" +420 86 training_loop """owa""" +420 86 negative_sampler """basic""" +420 86 evaluator """rankbased""" +420 87 dataset """kinships""" +420 87 model """ntn""" +420 87 loss """marginranking""" +420 87 regularizer """no""" +420 87 optimizer """adadelta""" +420 87 training_loop """owa""" +420 87 negative_sampler """basic""" +420 87 evaluator """rankbased""" +420 88 dataset """kinships""" +420 88 model """ntn""" +420 88 loss """marginranking""" +420 88 regularizer """no""" +420 88 optimizer """adadelta""" +420 88 training_loop """owa""" +420 88 negative_sampler """basic""" +420 88 evaluator """rankbased""" +420 89 dataset """kinships""" +420 89 model """ntn""" +420 89 loss """marginranking""" +420 89 regularizer """no""" +420 89 optimizer """adadelta""" +420 89 training_loop """owa""" +420 89 negative_sampler """basic""" +420 89 evaluator """rankbased""" +420 90 dataset """kinships""" +420 90 model """ntn""" +420 90 loss """marginranking""" +420 90 regularizer """no""" +420 90 optimizer """adadelta""" +420 90 training_loop """owa""" +420 90 negative_sampler """basic""" +420 90 evaluator """rankbased""" +420 91 dataset """kinships""" +420 91 model """ntn""" +420 91 loss """marginranking""" +420 91 regularizer """no""" +420 91 optimizer """adadelta""" +420 91 training_loop """owa""" +420 91 negative_sampler """basic""" +420 91 evaluator """rankbased""" +420 92 dataset """kinships""" +420 92 model """ntn""" +420 92 loss """marginranking""" +420 92 regularizer """no""" +420 92 optimizer """adadelta""" +420 92 training_loop """owa""" +420 92 negative_sampler """basic""" +420 92 evaluator """rankbased""" +420 93 dataset """kinships""" +420 93 model """ntn""" +420 93 loss """marginranking""" +420 93 regularizer """no""" +420 93 optimizer """adadelta""" +420 93 training_loop """owa""" +420 93 negative_sampler """basic""" +420 93 evaluator """rankbased""" +420 94 dataset """kinships""" +420 94 model """ntn""" +420 94 loss """marginranking""" +420 94 regularizer """no""" +420 94 optimizer """adadelta""" +420 94 training_loop """owa""" +420 94 negative_sampler """basic""" +420 94 evaluator """rankbased""" +420 95 dataset """kinships""" +420 95 model """ntn""" +420 95 loss """marginranking""" +420 95 regularizer """no""" +420 95 optimizer """adadelta""" +420 95 training_loop """owa""" +420 95 negative_sampler """basic""" +420 95 evaluator """rankbased""" +420 96 dataset """kinships""" +420 96 model """ntn""" +420 96 loss """marginranking""" +420 96 regularizer """no""" +420 96 optimizer """adadelta""" +420 96 training_loop """owa""" +420 96 negative_sampler """basic""" +420 96 evaluator """rankbased""" +420 97 dataset """kinships""" +420 97 model """ntn""" +420 97 loss """marginranking""" +420 97 regularizer """no""" +420 97 optimizer """adadelta""" +420 97 training_loop """owa""" +420 97 negative_sampler """basic""" +420 97 evaluator """rankbased""" +420 98 dataset """kinships""" +420 98 model """ntn""" +420 98 loss """marginranking""" +420 98 regularizer """no""" +420 98 optimizer """adadelta""" +420 98 training_loop """owa""" +420 98 negative_sampler """basic""" +420 98 evaluator """rankbased""" +420 99 dataset """kinships""" +420 99 model """ntn""" +420 99 loss """marginranking""" +420 99 regularizer """no""" +420 99 optimizer """adadelta""" +420 99 training_loop """owa""" +420 99 negative_sampler """basic""" +420 99 evaluator """rankbased""" +420 100 dataset """kinships""" +420 100 model """ntn""" +420 100 loss """marginranking""" +420 100 regularizer """no""" +420 100 optimizer """adadelta""" +420 100 training_loop """owa""" +420 100 negative_sampler """basic""" +420 100 evaluator """rankbased""" +421 1 model.embedding_dim 1.0 +421 1 training.batch_size 0.0 +421 1 training.label_smoothing 0.002635569031565169 +421 2 model.embedding_dim 2.0 +421 2 training.batch_size 2.0 +421 2 training.label_smoothing 0.01717995549442748 +421 3 model.embedding_dim 2.0 +421 3 training.batch_size 2.0 +421 3 training.label_smoothing 0.039255710169794546 +421 4 model.embedding_dim 2.0 +421 4 training.batch_size 2.0 +421 4 training.label_smoothing 0.014505503086244266 +421 5 model.embedding_dim 2.0 +421 5 training.batch_size 1.0 +421 5 training.label_smoothing 0.01081027656808819 +421 6 model.embedding_dim 1.0 +421 6 training.batch_size 2.0 +421 6 training.label_smoothing 0.001481356145904829 +421 7 model.embedding_dim 2.0 +421 7 training.batch_size 2.0 +421 7 training.label_smoothing 0.484810679942913 +421 8 model.embedding_dim 2.0 +421 8 training.batch_size 0.0 +421 8 training.label_smoothing 0.06155245917827684 +421 9 model.embedding_dim 1.0 +421 9 training.batch_size 2.0 +421 9 training.label_smoothing 0.32980532016932607 +421 10 model.embedding_dim 1.0 +421 10 training.batch_size 2.0 +421 10 training.label_smoothing 0.9479558813525635 +421 11 model.embedding_dim 2.0 +421 11 training.batch_size 1.0 +421 11 training.label_smoothing 0.010339795970824493 +421 12 model.embedding_dim 0.0 +421 12 training.batch_size 2.0 +421 12 training.label_smoothing 0.0035244322817938214 +421 13 model.embedding_dim 0.0 +421 13 training.batch_size 1.0 +421 13 training.label_smoothing 0.5262176636992306 +421 14 model.embedding_dim 2.0 +421 14 training.batch_size 2.0 +421 14 training.label_smoothing 0.009266710516888082 +421 15 model.embedding_dim 2.0 +421 15 training.batch_size 0.0 +421 15 training.label_smoothing 0.035774769762644175 +421 16 model.embedding_dim 2.0 +421 16 training.batch_size 1.0 +421 16 training.label_smoothing 0.009090975011417955 +421 17 model.embedding_dim 1.0 +421 17 training.batch_size 2.0 +421 17 training.label_smoothing 0.1441979964737863 +421 18 model.embedding_dim 2.0 +421 18 training.batch_size 2.0 +421 18 training.label_smoothing 0.0010169690079126824 +421 19 model.embedding_dim 1.0 +421 19 training.batch_size 2.0 +421 19 training.label_smoothing 0.008769100734084349 +421 20 model.embedding_dim 1.0 +421 20 training.batch_size 0.0 +421 20 training.label_smoothing 0.007948430670876598 +421 21 model.embedding_dim 1.0 +421 21 training.batch_size 2.0 +421 21 training.label_smoothing 0.017610325889544972 +421 22 model.embedding_dim 0.0 +421 22 training.batch_size 0.0 +421 22 training.label_smoothing 0.06118264539607057 +421 23 model.embedding_dim 2.0 +421 23 training.batch_size 2.0 +421 23 training.label_smoothing 0.6792898952599966 +421 24 model.embedding_dim 2.0 +421 24 training.batch_size 0.0 +421 24 training.label_smoothing 0.1767936280476556 +421 25 model.embedding_dim 2.0 +421 25 training.batch_size 1.0 +421 25 training.label_smoothing 0.040898815480387526 +421 26 model.embedding_dim 2.0 +421 26 training.batch_size 1.0 +421 26 training.label_smoothing 0.4733025361448639 +421 27 model.embedding_dim 0.0 +421 27 training.batch_size 2.0 +421 27 training.label_smoothing 0.001935552622243592 +421 28 model.embedding_dim 2.0 +421 28 training.batch_size 2.0 +421 28 training.label_smoothing 0.09323350702231334 +421 29 model.embedding_dim 1.0 +421 29 training.batch_size 2.0 +421 29 training.label_smoothing 0.025160711251206235 +421 30 model.embedding_dim 2.0 +421 30 training.batch_size 0.0 +421 30 training.label_smoothing 0.1535160558106891 +421 31 model.embedding_dim 1.0 +421 31 training.batch_size 2.0 +421 31 training.label_smoothing 0.004340153340002967 +421 32 model.embedding_dim 2.0 +421 32 training.batch_size 2.0 +421 32 training.label_smoothing 0.13858100186772948 +421 33 model.embedding_dim 1.0 +421 33 training.batch_size 2.0 +421 33 training.label_smoothing 0.3169403945124233 +421 34 model.embedding_dim 0.0 +421 34 training.batch_size 2.0 +421 34 training.label_smoothing 0.18013013530807506 +421 35 model.embedding_dim 0.0 +421 35 training.batch_size 0.0 +421 35 training.label_smoothing 0.0013977307941496126 +421 36 model.embedding_dim 0.0 +421 36 training.batch_size 1.0 +421 36 training.label_smoothing 0.37593945585756305 +421 37 model.embedding_dim 2.0 +421 37 training.batch_size 0.0 +421 37 training.label_smoothing 0.019875359944399033 +421 38 model.embedding_dim 0.0 +421 38 training.batch_size 2.0 +421 38 training.label_smoothing 0.18332318321675978 +421 39 model.embedding_dim 2.0 +421 39 training.batch_size 2.0 +421 39 training.label_smoothing 0.008460632620016539 +421 40 model.embedding_dim 2.0 +421 40 training.batch_size 1.0 +421 40 training.label_smoothing 0.08371071381573614 +421 41 model.embedding_dim 1.0 +421 41 training.batch_size 1.0 +421 41 training.label_smoothing 0.2757174749919044 +421 42 model.embedding_dim 1.0 +421 42 training.batch_size 1.0 +421 42 training.label_smoothing 0.0013095293736697467 +421 43 model.embedding_dim 0.0 +421 43 training.batch_size 0.0 +421 43 training.label_smoothing 0.02443815051802842 +421 44 model.embedding_dim 1.0 +421 44 training.batch_size 2.0 +421 44 training.label_smoothing 0.13407338481615041 +421 45 model.embedding_dim 1.0 +421 45 training.batch_size 0.0 +421 45 training.label_smoothing 0.002241285253782504 +421 46 model.embedding_dim 1.0 +421 46 training.batch_size 1.0 +421 46 training.label_smoothing 0.1019277383295411 +421 47 model.embedding_dim 0.0 +421 47 training.batch_size 2.0 +421 47 training.label_smoothing 0.0027640814885139266 +421 48 model.embedding_dim 1.0 +421 48 training.batch_size 1.0 +421 48 training.label_smoothing 0.0012603179326344611 +421 49 model.embedding_dim 2.0 +421 49 training.batch_size 2.0 +421 49 training.label_smoothing 0.06332453227524397 +421 50 model.embedding_dim 0.0 +421 50 training.batch_size 2.0 +421 50 training.label_smoothing 0.00254132805965618 +421 51 model.embedding_dim 2.0 +421 51 training.batch_size 2.0 +421 51 training.label_smoothing 0.010625072206210414 +421 52 model.embedding_dim 1.0 +421 52 training.batch_size 0.0 +421 52 training.label_smoothing 0.11812234241220262 +421 53 model.embedding_dim 1.0 +421 53 training.batch_size 0.0 +421 53 training.label_smoothing 0.6655105884584587 +421 54 model.embedding_dim 0.0 +421 54 training.batch_size 2.0 +421 54 training.label_smoothing 0.03821413868818295 +421 55 model.embedding_dim 0.0 +421 55 training.batch_size 0.0 +421 55 training.label_smoothing 0.04398901898911259 +421 56 model.embedding_dim 2.0 +421 56 training.batch_size 1.0 +421 56 training.label_smoothing 0.0012534770415194062 +421 57 model.embedding_dim 1.0 +421 57 training.batch_size 2.0 +421 57 training.label_smoothing 0.03675401553992284 +421 58 model.embedding_dim 2.0 +421 58 training.batch_size 2.0 +421 58 training.label_smoothing 0.8903671207803868 +421 59 model.embedding_dim 1.0 +421 59 training.batch_size 0.0 +421 59 training.label_smoothing 0.05519260406904274 +421 60 model.embedding_dim 2.0 +421 60 training.batch_size 1.0 +421 60 training.label_smoothing 0.0099168997543382 +421 61 model.embedding_dim 2.0 +421 61 training.batch_size 0.0 +421 61 training.label_smoothing 0.03685304905493294 +421 62 model.embedding_dim 2.0 +421 62 training.batch_size 2.0 +421 62 training.label_smoothing 0.24636119498803905 +421 63 model.embedding_dim 1.0 +421 63 training.batch_size 1.0 +421 63 training.label_smoothing 0.2745955691463323 +421 64 model.embedding_dim 2.0 +421 64 training.batch_size 2.0 +421 64 training.label_smoothing 0.06056684929166232 +421 65 model.embedding_dim 2.0 +421 65 training.batch_size 1.0 +421 65 training.label_smoothing 0.9467471478274376 +421 66 model.embedding_dim 0.0 +421 66 training.batch_size 1.0 +421 66 training.label_smoothing 0.02621151684567287 +421 67 model.embedding_dim 0.0 +421 67 training.batch_size 0.0 +421 67 training.label_smoothing 0.0033562149778959367 +421 68 model.embedding_dim 1.0 +421 68 training.batch_size 1.0 +421 68 training.label_smoothing 0.31520227482094426 +421 69 model.embedding_dim 2.0 +421 69 training.batch_size 1.0 +421 69 training.label_smoothing 0.3551536181534104 +421 70 model.embedding_dim 1.0 +421 70 training.batch_size 2.0 +421 70 training.label_smoothing 0.23957912282064786 +421 71 model.embedding_dim 1.0 +421 71 training.batch_size 0.0 +421 71 training.label_smoothing 0.0033142046667239363 +421 72 model.embedding_dim 0.0 +421 72 training.batch_size 2.0 +421 72 training.label_smoothing 0.0051055552139573045 +421 73 model.embedding_dim 1.0 +421 73 training.batch_size 0.0 +421 73 training.label_smoothing 0.011275456720588873 +421 74 model.embedding_dim 0.0 +421 74 training.batch_size 2.0 +421 74 training.label_smoothing 0.006775490775542998 +421 75 model.embedding_dim 1.0 +421 75 training.batch_size 1.0 +421 75 training.label_smoothing 0.4040370442129426 +421 76 model.embedding_dim 0.0 +421 76 training.batch_size 1.0 +421 76 training.label_smoothing 0.3679448924277497 +421 77 model.embedding_dim 0.0 +421 77 training.batch_size 2.0 +421 77 training.label_smoothing 0.023644728202820573 +421 78 model.embedding_dim 1.0 +421 78 training.batch_size 2.0 +421 78 training.label_smoothing 0.06244370132251456 +421 79 model.embedding_dim 1.0 +421 79 training.batch_size 2.0 +421 79 training.label_smoothing 0.01829583898382653 +421 80 model.embedding_dim 1.0 +421 80 training.batch_size 2.0 +421 80 training.label_smoothing 0.9598563395381263 +421 81 model.embedding_dim 2.0 +421 81 training.batch_size 0.0 +421 81 training.label_smoothing 0.13864523614919547 +421 82 model.embedding_dim 2.0 +421 82 training.batch_size 0.0 +421 82 training.label_smoothing 0.04844105169021803 +421 83 model.embedding_dim 0.0 +421 83 training.batch_size 1.0 +421 83 training.label_smoothing 0.18999512560442403 +421 84 model.embedding_dim 2.0 +421 84 training.batch_size 1.0 +421 84 training.label_smoothing 0.0021471942330568683 +421 85 model.embedding_dim 2.0 +421 85 training.batch_size 1.0 +421 85 training.label_smoothing 0.0014078497231481736 +421 86 model.embedding_dim 1.0 +421 86 training.batch_size 2.0 +421 86 training.label_smoothing 0.6498211177524897 +421 87 model.embedding_dim 2.0 +421 87 training.batch_size 2.0 +421 87 training.label_smoothing 0.010675486697990344 +421 88 model.embedding_dim 2.0 +421 88 training.batch_size 0.0 +421 88 training.label_smoothing 0.514901753577665 +421 89 model.embedding_dim 0.0 +421 89 training.batch_size 0.0 +421 89 training.label_smoothing 0.07971450097857563 +421 90 model.embedding_dim 1.0 +421 90 training.batch_size 1.0 +421 90 training.label_smoothing 0.0012000901034051785 +421 91 model.embedding_dim 1.0 +421 91 training.batch_size 1.0 +421 91 training.label_smoothing 0.11584038804269896 +421 92 model.embedding_dim 2.0 +421 92 training.batch_size 1.0 +421 92 training.label_smoothing 0.0036192820049862177 +421 93 model.embedding_dim 2.0 +421 93 training.batch_size 1.0 +421 93 training.label_smoothing 0.00296903123760196 +421 94 model.embedding_dim 0.0 +421 94 training.batch_size 2.0 +421 94 training.label_smoothing 0.08630955871905784 +421 95 model.embedding_dim 1.0 +421 95 training.batch_size 2.0 +421 95 training.label_smoothing 0.006365721430887105 +421 96 model.embedding_dim 2.0 +421 96 training.batch_size 1.0 +421 96 training.label_smoothing 0.005148017251693471 +421 97 model.embedding_dim 0.0 +421 97 training.batch_size 2.0 +421 97 training.label_smoothing 0.003746076447104304 +421 98 model.embedding_dim 2.0 +421 98 training.batch_size 2.0 +421 98 training.label_smoothing 0.003841849907465966 +421 99 model.embedding_dim 0.0 +421 99 training.batch_size 0.0 +421 99 training.label_smoothing 0.034267170413152345 +421 100 model.embedding_dim 0.0 +421 100 training.batch_size 2.0 +421 100 training.label_smoothing 0.14403653957643084 +421 1 dataset """kinships""" +421 1 model """ntn""" +421 1 loss """bceaftersigmoid""" +421 1 regularizer """no""" +421 1 optimizer """adadelta""" +421 1 training_loop """lcwa""" +421 1 evaluator """rankbased""" +421 2 dataset """kinships""" +421 2 model """ntn""" +421 2 loss """bceaftersigmoid""" +421 2 regularizer """no""" +421 2 optimizer """adadelta""" +421 2 training_loop """lcwa""" +421 2 evaluator """rankbased""" +421 3 dataset """kinships""" +421 3 model """ntn""" +421 3 loss """bceaftersigmoid""" +421 3 regularizer """no""" +421 3 optimizer """adadelta""" +421 3 training_loop """lcwa""" +421 3 evaluator """rankbased""" +421 4 dataset """kinships""" +421 4 model """ntn""" +421 4 loss """bceaftersigmoid""" +421 4 regularizer """no""" +421 4 optimizer """adadelta""" +421 4 training_loop """lcwa""" +421 4 evaluator """rankbased""" +421 5 dataset """kinships""" +421 5 model """ntn""" +421 5 loss """bceaftersigmoid""" +421 5 regularizer """no""" +421 5 optimizer """adadelta""" +421 5 training_loop """lcwa""" +421 5 evaluator """rankbased""" +421 6 dataset """kinships""" +421 6 model """ntn""" +421 6 loss """bceaftersigmoid""" +421 6 regularizer """no""" +421 6 optimizer """adadelta""" +421 6 training_loop """lcwa""" +421 6 evaluator """rankbased""" +421 7 dataset """kinships""" +421 7 model """ntn""" +421 7 loss """bceaftersigmoid""" +421 7 regularizer """no""" +421 7 optimizer """adadelta""" +421 7 training_loop """lcwa""" +421 7 evaluator """rankbased""" +421 8 dataset """kinships""" +421 8 model """ntn""" +421 8 loss """bceaftersigmoid""" +421 8 regularizer """no""" +421 8 optimizer """adadelta""" +421 8 training_loop """lcwa""" +421 8 evaluator """rankbased""" +421 9 dataset """kinships""" +421 9 model """ntn""" +421 9 loss """bceaftersigmoid""" +421 9 regularizer """no""" +421 9 optimizer """adadelta""" +421 9 training_loop """lcwa""" +421 9 evaluator """rankbased""" +421 10 dataset """kinships""" +421 10 model """ntn""" +421 10 loss """bceaftersigmoid""" +421 10 regularizer """no""" +421 10 optimizer """adadelta""" +421 10 training_loop """lcwa""" +421 10 evaluator """rankbased""" +421 11 dataset """kinships""" +421 11 model """ntn""" +421 11 loss """bceaftersigmoid""" +421 11 regularizer """no""" +421 11 optimizer """adadelta""" +421 11 training_loop """lcwa""" +421 11 evaluator """rankbased""" +421 12 dataset """kinships""" +421 12 model """ntn""" +421 12 loss """bceaftersigmoid""" +421 12 regularizer """no""" +421 12 optimizer """adadelta""" +421 12 training_loop """lcwa""" +421 12 evaluator """rankbased""" +421 13 dataset """kinships""" +421 13 model """ntn""" +421 13 loss """bceaftersigmoid""" +421 13 regularizer """no""" +421 13 optimizer """adadelta""" +421 13 training_loop """lcwa""" +421 13 evaluator """rankbased""" +421 14 dataset """kinships""" +421 14 model """ntn""" +421 14 loss """bceaftersigmoid""" +421 14 regularizer """no""" +421 14 optimizer """adadelta""" +421 14 training_loop """lcwa""" +421 14 evaluator """rankbased""" +421 15 dataset """kinships""" +421 15 model """ntn""" +421 15 loss """bceaftersigmoid""" +421 15 regularizer """no""" +421 15 optimizer """adadelta""" +421 15 training_loop """lcwa""" +421 15 evaluator """rankbased""" +421 16 dataset """kinships""" +421 16 model """ntn""" +421 16 loss """bceaftersigmoid""" +421 16 regularizer """no""" +421 16 optimizer """adadelta""" +421 16 training_loop """lcwa""" +421 16 evaluator """rankbased""" +421 17 dataset """kinships""" +421 17 model """ntn""" +421 17 loss """bceaftersigmoid""" +421 17 regularizer """no""" +421 17 optimizer """adadelta""" +421 17 training_loop """lcwa""" +421 17 evaluator """rankbased""" +421 18 dataset """kinships""" +421 18 model """ntn""" +421 18 loss """bceaftersigmoid""" +421 18 regularizer """no""" +421 18 optimizer """adadelta""" +421 18 training_loop """lcwa""" +421 18 evaluator """rankbased""" +421 19 dataset """kinships""" +421 19 model """ntn""" +421 19 loss """bceaftersigmoid""" +421 19 regularizer """no""" +421 19 optimizer """adadelta""" +421 19 training_loop """lcwa""" +421 19 evaluator """rankbased""" +421 20 dataset """kinships""" +421 20 model """ntn""" +421 20 loss """bceaftersigmoid""" +421 20 regularizer """no""" +421 20 optimizer """adadelta""" +421 20 training_loop """lcwa""" +421 20 evaluator """rankbased""" +421 21 dataset """kinships""" +421 21 model """ntn""" +421 21 loss """bceaftersigmoid""" +421 21 regularizer """no""" +421 21 optimizer """adadelta""" +421 21 training_loop """lcwa""" +421 21 evaluator """rankbased""" +421 22 dataset """kinships""" +421 22 model """ntn""" +421 22 loss """bceaftersigmoid""" +421 22 regularizer """no""" +421 22 optimizer """adadelta""" +421 22 training_loop """lcwa""" +421 22 evaluator """rankbased""" +421 23 dataset """kinships""" +421 23 model """ntn""" +421 23 loss """bceaftersigmoid""" +421 23 regularizer """no""" +421 23 optimizer """adadelta""" +421 23 training_loop """lcwa""" +421 23 evaluator """rankbased""" +421 24 dataset """kinships""" +421 24 model """ntn""" +421 24 loss """bceaftersigmoid""" +421 24 regularizer """no""" +421 24 optimizer """adadelta""" +421 24 training_loop """lcwa""" +421 24 evaluator """rankbased""" +421 25 dataset """kinships""" +421 25 model """ntn""" +421 25 loss """bceaftersigmoid""" +421 25 regularizer """no""" +421 25 optimizer """adadelta""" +421 25 training_loop """lcwa""" +421 25 evaluator """rankbased""" +421 26 dataset """kinships""" +421 26 model """ntn""" +421 26 loss """bceaftersigmoid""" +421 26 regularizer """no""" +421 26 optimizer """adadelta""" +421 26 training_loop """lcwa""" +421 26 evaluator """rankbased""" +421 27 dataset """kinships""" +421 27 model """ntn""" +421 27 loss """bceaftersigmoid""" +421 27 regularizer """no""" +421 27 optimizer """adadelta""" +421 27 training_loop """lcwa""" +421 27 evaluator """rankbased""" +421 28 dataset """kinships""" +421 28 model """ntn""" +421 28 loss """bceaftersigmoid""" +421 28 regularizer """no""" +421 28 optimizer """adadelta""" +421 28 training_loop """lcwa""" +421 28 evaluator """rankbased""" +421 29 dataset """kinships""" +421 29 model """ntn""" +421 29 loss """bceaftersigmoid""" +421 29 regularizer """no""" +421 29 optimizer """adadelta""" +421 29 training_loop """lcwa""" +421 29 evaluator """rankbased""" +421 30 dataset """kinships""" +421 30 model """ntn""" +421 30 loss """bceaftersigmoid""" +421 30 regularizer """no""" +421 30 optimizer """adadelta""" +421 30 training_loop """lcwa""" +421 30 evaluator """rankbased""" +421 31 dataset """kinships""" +421 31 model """ntn""" +421 31 loss """bceaftersigmoid""" +421 31 regularizer """no""" +421 31 optimizer """adadelta""" +421 31 training_loop """lcwa""" +421 31 evaluator """rankbased""" +421 32 dataset """kinships""" +421 32 model """ntn""" +421 32 loss """bceaftersigmoid""" +421 32 regularizer """no""" +421 32 optimizer """adadelta""" +421 32 training_loop """lcwa""" +421 32 evaluator """rankbased""" +421 33 dataset """kinships""" +421 33 model """ntn""" +421 33 loss """bceaftersigmoid""" +421 33 regularizer """no""" +421 33 optimizer """adadelta""" +421 33 training_loop """lcwa""" +421 33 evaluator """rankbased""" +421 34 dataset """kinships""" +421 34 model """ntn""" +421 34 loss """bceaftersigmoid""" +421 34 regularizer """no""" +421 34 optimizer """adadelta""" +421 34 training_loop """lcwa""" +421 34 evaluator """rankbased""" +421 35 dataset """kinships""" +421 35 model """ntn""" +421 35 loss """bceaftersigmoid""" +421 35 regularizer """no""" +421 35 optimizer """adadelta""" +421 35 training_loop """lcwa""" +421 35 evaluator """rankbased""" +421 36 dataset """kinships""" +421 36 model """ntn""" +421 36 loss """bceaftersigmoid""" +421 36 regularizer """no""" +421 36 optimizer """adadelta""" +421 36 training_loop """lcwa""" +421 36 evaluator """rankbased""" +421 37 dataset """kinships""" +421 37 model """ntn""" +421 37 loss """bceaftersigmoid""" +421 37 regularizer """no""" +421 37 optimizer """adadelta""" +421 37 training_loop """lcwa""" +421 37 evaluator """rankbased""" +421 38 dataset """kinships""" +421 38 model """ntn""" +421 38 loss """bceaftersigmoid""" +421 38 regularizer """no""" +421 38 optimizer """adadelta""" +421 38 training_loop """lcwa""" +421 38 evaluator """rankbased""" +421 39 dataset """kinships""" +421 39 model """ntn""" +421 39 loss """bceaftersigmoid""" +421 39 regularizer """no""" +421 39 optimizer """adadelta""" +421 39 training_loop """lcwa""" +421 39 evaluator """rankbased""" +421 40 dataset """kinships""" +421 40 model """ntn""" +421 40 loss """bceaftersigmoid""" +421 40 regularizer """no""" +421 40 optimizer """adadelta""" +421 40 training_loop """lcwa""" +421 40 evaluator """rankbased""" +421 41 dataset """kinships""" +421 41 model """ntn""" +421 41 loss """bceaftersigmoid""" +421 41 regularizer """no""" +421 41 optimizer """adadelta""" +421 41 training_loop """lcwa""" +421 41 evaluator """rankbased""" +421 42 dataset """kinships""" +421 42 model """ntn""" +421 42 loss """bceaftersigmoid""" +421 42 regularizer """no""" +421 42 optimizer """adadelta""" +421 42 training_loop """lcwa""" +421 42 evaluator """rankbased""" +421 43 dataset """kinships""" +421 43 model """ntn""" +421 43 loss """bceaftersigmoid""" +421 43 regularizer """no""" +421 43 optimizer """adadelta""" +421 43 training_loop """lcwa""" +421 43 evaluator """rankbased""" +421 44 dataset """kinships""" +421 44 model """ntn""" +421 44 loss """bceaftersigmoid""" +421 44 regularizer """no""" +421 44 optimizer """adadelta""" +421 44 training_loop """lcwa""" +421 44 evaluator """rankbased""" +421 45 dataset """kinships""" +421 45 model """ntn""" +421 45 loss """bceaftersigmoid""" +421 45 regularizer """no""" +421 45 optimizer """adadelta""" +421 45 training_loop """lcwa""" +421 45 evaluator """rankbased""" +421 46 dataset """kinships""" +421 46 model """ntn""" +421 46 loss """bceaftersigmoid""" +421 46 regularizer """no""" +421 46 optimizer """adadelta""" +421 46 training_loop """lcwa""" +421 46 evaluator """rankbased""" +421 47 dataset """kinships""" +421 47 model """ntn""" +421 47 loss """bceaftersigmoid""" +421 47 regularizer """no""" +421 47 optimizer """adadelta""" +421 47 training_loop """lcwa""" +421 47 evaluator """rankbased""" +421 48 dataset """kinships""" +421 48 model """ntn""" +421 48 loss """bceaftersigmoid""" +421 48 regularizer """no""" +421 48 optimizer """adadelta""" +421 48 training_loop """lcwa""" +421 48 evaluator """rankbased""" +421 49 dataset """kinships""" +421 49 model """ntn""" +421 49 loss """bceaftersigmoid""" +421 49 regularizer """no""" +421 49 optimizer """adadelta""" +421 49 training_loop """lcwa""" +421 49 evaluator """rankbased""" +421 50 dataset """kinships""" +421 50 model """ntn""" +421 50 loss """bceaftersigmoid""" +421 50 regularizer """no""" +421 50 optimizer """adadelta""" +421 50 training_loop """lcwa""" +421 50 evaluator """rankbased""" +421 51 dataset """kinships""" +421 51 model """ntn""" +421 51 loss """bceaftersigmoid""" +421 51 regularizer """no""" +421 51 optimizer """adadelta""" +421 51 training_loop """lcwa""" +421 51 evaluator """rankbased""" +421 52 dataset """kinships""" +421 52 model """ntn""" +421 52 loss """bceaftersigmoid""" +421 52 regularizer """no""" +421 52 optimizer """adadelta""" +421 52 training_loop """lcwa""" +421 52 evaluator """rankbased""" +421 53 dataset """kinships""" +421 53 model """ntn""" +421 53 loss """bceaftersigmoid""" +421 53 regularizer """no""" +421 53 optimizer """adadelta""" +421 53 training_loop """lcwa""" +421 53 evaluator """rankbased""" +421 54 dataset """kinships""" +421 54 model """ntn""" +421 54 loss """bceaftersigmoid""" +421 54 regularizer """no""" +421 54 optimizer """adadelta""" +421 54 training_loop """lcwa""" +421 54 evaluator """rankbased""" +421 55 dataset """kinships""" +421 55 model """ntn""" +421 55 loss """bceaftersigmoid""" +421 55 regularizer """no""" +421 55 optimizer """adadelta""" +421 55 training_loop """lcwa""" +421 55 evaluator """rankbased""" +421 56 dataset """kinships""" +421 56 model """ntn""" +421 56 loss """bceaftersigmoid""" +421 56 regularizer """no""" +421 56 optimizer """adadelta""" +421 56 training_loop """lcwa""" +421 56 evaluator """rankbased""" +421 57 dataset """kinships""" +421 57 model """ntn""" +421 57 loss """bceaftersigmoid""" +421 57 regularizer """no""" +421 57 optimizer """adadelta""" +421 57 training_loop """lcwa""" +421 57 evaluator """rankbased""" +421 58 dataset """kinships""" +421 58 model """ntn""" +421 58 loss """bceaftersigmoid""" +421 58 regularizer """no""" +421 58 optimizer """adadelta""" +421 58 training_loop """lcwa""" +421 58 evaluator """rankbased""" +421 59 dataset """kinships""" +421 59 model """ntn""" +421 59 loss """bceaftersigmoid""" +421 59 regularizer """no""" +421 59 optimizer """adadelta""" +421 59 training_loop """lcwa""" +421 59 evaluator """rankbased""" +421 60 dataset """kinships""" +421 60 model """ntn""" +421 60 loss """bceaftersigmoid""" +421 60 regularizer """no""" +421 60 optimizer """adadelta""" +421 60 training_loop """lcwa""" +421 60 evaluator """rankbased""" +421 61 dataset """kinships""" +421 61 model """ntn""" +421 61 loss """bceaftersigmoid""" +421 61 regularizer """no""" +421 61 optimizer """adadelta""" +421 61 training_loop """lcwa""" +421 61 evaluator """rankbased""" +421 62 dataset """kinships""" +421 62 model """ntn""" +421 62 loss """bceaftersigmoid""" +421 62 regularizer """no""" +421 62 optimizer """adadelta""" +421 62 training_loop """lcwa""" +421 62 evaluator """rankbased""" +421 63 dataset """kinships""" +421 63 model """ntn""" +421 63 loss """bceaftersigmoid""" +421 63 regularizer """no""" +421 63 optimizer """adadelta""" +421 63 training_loop """lcwa""" +421 63 evaluator """rankbased""" +421 64 dataset """kinships""" +421 64 model """ntn""" +421 64 loss """bceaftersigmoid""" +421 64 regularizer """no""" +421 64 optimizer """adadelta""" +421 64 training_loop """lcwa""" +421 64 evaluator """rankbased""" +421 65 dataset """kinships""" +421 65 model """ntn""" +421 65 loss """bceaftersigmoid""" +421 65 regularizer """no""" +421 65 optimizer """adadelta""" +421 65 training_loop """lcwa""" +421 65 evaluator """rankbased""" +421 66 dataset """kinships""" +421 66 model """ntn""" +421 66 loss """bceaftersigmoid""" +421 66 regularizer """no""" +421 66 optimizer """adadelta""" +421 66 training_loop """lcwa""" +421 66 evaluator """rankbased""" +421 67 dataset """kinships""" +421 67 model """ntn""" +421 67 loss """bceaftersigmoid""" +421 67 regularizer """no""" +421 67 optimizer """adadelta""" +421 67 training_loop """lcwa""" +421 67 evaluator """rankbased""" +421 68 dataset """kinships""" +421 68 model """ntn""" +421 68 loss """bceaftersigmoid""" +421 68 regularizer """no""" +421 68 optimizer """adadelta""" +421 68 training_loop """lcwa""" +421 68 evaluator """rankbased""" +421 69 dataset """kinships""" +421 69 model """ntn""" +421 69 loss """bceaftersigmoid""" +421 69 regularizer """no""" +421 69 optimizer """adadelta""" +421 69 training_loop """lcwa""" +421 69 evaluator """rankbased""" +421 70 dataset """kinships""" +421 70 model """ntn""" +421 70 loss """bceaftersigmoid""" +421 70 regularizer """no""" +421 70 optimizer """adadelta""" +421 70 training_loop """lcwa""" +421 70 evaluator """rankbased""" +421 71 dataset """kinships""" +421 71 model """ntn""" +421 71 loss """bceaftersigmoid""" +421 71 regularizer """no""" +421 71 optimizer """adadelta""" +421 71 training_loop """lcwa""" +421 71 evaluator """rankbased""" +421 72 dataset """kinships""" +421 72 model """ntn""" +421 72 loss """bceaftersigmoid""" +421 72 regularizer """no""" +421 72 optimizer """adadelta""" +421 72 training_loop """lcwa""" +421 72 evaluator """rankbased""" +421 73 dataset """kinships""" +421 73 model """ntn""" +421 73 loss """bceaftersigmoid""" +421 73 regularizer """no""" +421 73 optimizer """adadelta""" +421 73 training_loop """lcwa""" +421 73 evaluator """rankbased""" +421 74 dataset """kinships""" +421 74 model """ntn""" +421 74 loss """bceaftersigmoid""" +421 74 regularizer """no""" +421 74 optimizer """adadelta""" +421 74 training_loop """lcwa""" +421 74 evaluator """rankbased""" +421 75 dataset """kinships""" +421 75 model """ntn""" +421 75 loss """bceaftersigmoid""" +421 75 regularizer """no""" +421 75 optimizer """adadelta""" +421 75 training_loop """lcwa""" +421 75 evaluator """rankbased""" +421 76 dataset """kinships""" +421 76 model """ntn""" +421 76 loss """bceaftersigmoid""" +421 76 regularizer """no""" +421 76 optimizer """adadelta""" +421 76 training_loop """lcwa""" +421 76 evaluator """rankbased""" +421 77 dataset """kinships""" +421 77 model """ntn""" +421 77 loss """bceaftersigmoid""" +421 77 regularizer """no""" +421 77 optimizer """adadelta""" +421 77 training_loop """lcwa""" +421 77 evaluator """rankbased""" +421 78 dataset """kinships""" +421 78 model """ntn""" +421 78 loss """bceaftersigmoid""" +421 78 regularizer """no""" +421 78 optimizer """adadelta""" +421 78 training_loop """lcwa""" +421 78 evaluator """rankbased""" +421 79 dataset """kinships""" +421 79 model """ntn""" +421 79 loss """bceaftersigmoid""" +421 79 regularizer """no""" +421 79 optimizer """adadelta""" +421 79 training_loop """lcwa""" +421 79 evaluator """rankbased""" +421 80 dataset """kinships""" +421 80 model """ntn""" +421 80 loss """bceaftersigmoid""" +421 80 regularizer """no""" +421 80 optimizer """adadelta""" +421 80 training_loop """lcwa""" +421 80 evaluator """rankbased""" +421 81 dataset """kinships""" +421 81 model """ntn""" +421 81 loss """bceaftersigmoid""" +421 81 regularizer """no""" +421 81 optimizer """adadelta""" +421 81 training_loop """lcwa""" +421 81 evaluator """rankbased""" +421 82 dataset """kinships""" +421 82 model """ntn""" +421 82 loss """bceaftersigmoid""" +421 82 regularizer """no""" +421 82 optimizer """adadelta""" +421 82 training_loop """lcwa""" +421 82 evaluator """rankbased""" +421 83 dataset """kinships""" +421 83 model """ntn""" +421 83 loss """bceaftersigmoid""" +421 83 regularizer """no""" +421 83 optimizer """adadelta""" +421 83 training_loop """lcwa""" +421 83 evaluator """rankbased""" +421 84 dataset """kinships""" +421 84 model """ntn""" +421 84 loss """bceaftersigmoid""" +421 84 regularizer """no""" +421 84 optimizer """adadelta""" +421 84 training_loop """lcwa""" +421 84 evaluator """rankbased""" +421 85 dataset """kinships""" +421 85 model """ntn""" +421 85 loss """bceaftersigmoid""" +421 85 regularizer """no""" +421 85 optimizer """adadelta""" +421 85 training_loop """lcwa""" +421 85 evaluator """rankbased""" +421 86 dataset """kinships""" +421 86 model """ntn""" +421 86 loss """bceaftersigmoid""" +421 86 regularizer """no""" +421 86 optimizer """adadelta""" +421 86 training_loop """lcwa""" +421 86 evaluator """rankbased""" +421 87 dataset """kinships""" +421 87 model """ntn""" +421 87 loss """bceaftersigmoid""" +421 87 regularizer """no""" +421 87 optimizer """adadelta""" +421 87 training_loop """lcwa""" +421 87 evaluator """rankbased""" +421 88 dataset """kinships""" +421 88 model """ntn""" +421 88 loss """bceaftersigmoid""" +421 88 regularizer """no""" +421 88 optimizer """adadelta""" +421 88 training_loop """lcwa""" +421 88 evaluator """rankbased""" +421 89 dataset """kinships""" +421 89 model """ntn""" +421 89 loss """bceaftersigmoid""" +421 89 regularizer """no""" +421 89 optimizer """adadelta""" +421 89 training_loop """lcwa""" +421 89 evaluator """rankbased""" +421 90 dataset """kinships""" +421 90 model """ntn""" +421 90 loss """bceaftersigmoid""" +421 90 regularizer """no""" +421 90 optimizer """adadelta""" +421 90 training_loop """lcwa""" +421 90 evaluator """rankbased""" +421 91 dataset """kinships""" +421 91 model """ntn""" +421 91 loss """bceaftersigmoid""" +421 91 regularizer """no""" +421 91 optimizer """adadelta""" +421 91 training_loop """lcwa""" +421 91 evaluator """rankbased""" +421 92 dataset """kinships""" +421 92 model """ntn""" +421 92 loss """bceaftersigmoid""" +421 92 regularizer """no""" +421 92 optimizer """adadelta""" +421 92 training_loop """lcwa""" +421 92 evaluator """rankbased""" +421 93 dataset """kinships""" +421 93 model """ntn""" +421 93 loss """bceaftersigmoid""" +421 93 regularizer """no""" +421 93 optimizer """adadelta""" +421 93 training_loop """lcwa""" +421 93 evaluator """rankbased""" +421 94 dataset """kinships""" +421 94 model """ntn""" +421 94 loss """bceaftersigmoid""" +421 94 regularizer """no""" +421 94 optimizer """adadelta""" +421 94 training_loop """lcwa""" +421 94 evaluator """rankbased""" +421 95 dataset """kinships""" +421 95 model """ntn""" +421 95 loss """bceaftersigmoid""" +421 95 regularizer """no""" +421 95 optimizer """adadelta""" +421 95 training_loop """lcwa""" +421 95 evaluator """rankbased""" +421 96 dataset """kinships""" +421 96 model """ntn""" +421 96 loss """bceaftersigmoid""" +421 96 regularizer """no""" +421 96 optimizer """adadelta""" +421 96 training_loop """lcwa""" +421 96 evaluator """rankbased""" +421 97 dataset """kinships""" +421 97 model """ntn""" +421 97 loss """bceaftersigmoid""" +421 97 regularizer """no""" +421 97 optimizer """adadelta""" +421 97 training_loop """lcwa""" +421 97 evaluator """rankbased""" +421 98 dataset """kinships""" +421 98 model """ntn""" +421 98 loss """bceaftersigmoid""" +421 98 regularizer """no""" +421 98 optimizer """adadelta""" +421 98 training_loop """lcwa""" +421 98 evaluator """rankbased""" +421 99 dataset """kinships""" +421 99 model """ntn""" +421 99 loss """bceaftersigmoid""" +421 99 regularizer """no""" +421 99 optimizer """adadelta""" +421 99 training_loop """lcwa""" +421 99 evaluator """rankbased""" +421 100 dataset """kinships""" +421 100 model """ntn""" +421 100 loss """bceaftersigmoid""" +421 100 regularizer """no""" +421 100 optimizer """adadelta""" +421 100 training_loop """lcwa""" +421 100 evaluator """rankbased""" +422 1 model.embedding_dim 2.0 +422 1 training.batch_size 1.0 +422 1 training.label_smoothing 0.004480197878692571 +422 2 model.embedding_dim 1.0 +422 2 training.batch_size 0.0 +422 2 training.label_smoothing 0.06870859930371939 +422 3 model.embedding_dim 0.0 +422 3 training.batch_size 2.0 +422 3 training.label_smoothing 0.3218773995630502 +422 4 model.embedding_dim 0.0 +422 4 training.batch_size 2.0 +422 4 training.label_smoothing 0.4637021550578786 +422 5 model.embedding_dim 0.0 +422 5 training.batch_size 2.0 +422 5 training.label_smoothing 0.0096200441792571 +422 6 model.embedding_dim 0.0 +422 6 training.batch_size 2.0 +422 6 training.label_smoothing 0.1048201945616297 +422 7 model.embedding_dim 0.0 +422 7 training.batch_size 2.0 +422 7 training.label_smoothing 0.004703860734378637 +422 8 model.embedding_dim 2.0 +422 8 training.batch_size 0.0 +422 8 training.label_smoothing 0.0011235563563452258 +422 9 model.embedding_dim 0.0 +422 9 training.batch_size 0.0 +422 9 training.label_smoothing 0.04707188916964374 +422 10 model.embedding_dim 0.0 +422 10 training.batch_size 2.0 +422 10 training.label_smoothing 0.3259232987124428 +422 11 model.embedding_dim 1.0 +422 11 training.batch_size 2.0 +422 11 training.label_smoothing 0.06718927072166209 +422 12 model.embedding_dim 1.0 +422 12 training.batch_size 0.0 +422 12 training.label_smoothing 0.31701296960774333 +422 13 model.embedding_dim 0.0 +422 13 training.batch_size 1.0 +422 13 training.label_smoothing 0.0013170139844651688 +422 14 model.embedding_dim 2.0 +422 14 training.batch_size 0.0 +422 14 training.label_smoothing 0.0067338302159352095 +422 15 model.embedding_dim 1.0 +422 15 training.batch_size 2.0 +422 15 training.label_smoothing 0.05786456571392936 +422 16 model.embedding_dim 0.0 +422 16 training.batch_size 0.0 +422 16 training.label_smoothing 0.005333228947044873 +422 17 model.embedding_dim 2.0 +422 17 training.batch_size 1.0 +422 17 training.label_smoothing 0.4343366909248196 +422 18 model.embedding_dim 0.0 +422 18 training.batch_size 0.0 +422 18 training.label_smoothing 0.001751053385549546 +422 19 model.embedding_dim 2.0 +422 19 training.batch_size 1.0 +422 19 training.label_smoothing 0.017861317121219016 +422 20 model.embedding_dim 0.0 +422 20 training.batch_size 1.0 +422 20 training.label_smoothing 0.005687885707413616 +422 21 model.embedding_dim 2.0 +422 21 training.batch_size 2.0 +422 21 training.label_smoothing 0.075577771091888 +422 22 model.embedding_dim 2.0 +422 22 training.batch_size 0.0 +422 22 training.label_smoothing 0.0010309590251777537 +422 23 model.embedding_dim 2.0 +422 23 training.batch_size 0.0 +422 23 training.label_smoothing 0.19490484872208952 +422 24 model.embedding_dim 1.0 +422 24 training.batch_size 2.0 +422 24 training.label_smoothing 0.0032582373689552784 +422 25 model.embedding_dim 0.0 +422 25 training.batch_size 0.0 +422 25 training.label_smoothing 0.07600770497061182 +422 26 model.embedding_dim 0.0 +422 26 training.batch_size 2.0 +422 26 training.label_smoothing 0.06075794728295479 +422 27 model.embedding_dim 0.0 +422 27 training.batch_size 1.0 +422 27 training.label_smoothing 0.015441269340331656 +422 28 model.embedding_dim 0.0 +422 28 training.batch_size 0.0 +422 28 training.label_smoothing 0.003219187471598084 +422 29 model.embedding_dim 0.0 +422 29 training.batch_size 0.0 +422 29 training.label_smoothing 0.0032696876356321032 +422 30 model.embedding_dim 1.0 +422 30 training.batch_size 0.0 +422 30 training.label_smoothing 0.001971923761850682 +422 31 model.embedding_dim 1.0 +422 31 training.batch_size 0.0 +422 31 training.label_smoothing 0.0016198867129946827 +422 32 model.embedding_dim 2.0 +422 32 training.batch_size 2.0 +422 32 training.label_smoothing 0.17355271979617903 +422 33 model.embedding_dim 0.0 +422 33 training.batch_size 2.0 +422 33 training.label_smoothing 0.18664518981367254 +422 34 model.embedding_dim 1.0 +422 34 training.batch_size 0.0 +422 34 training.label_smoothing 0.036053138843102094 +422 35 model.embedding_dim 2.0 +422 35 training.batch_size 2.0 +422 35 training.label_smoothing 0.008056766788130642 +422 36 model.embedding_dim 2.0 +422 36 training.batch_size 0.0 +422 36 training.label_smoothing 0.0025612001033205227 +422 37 model.embedding_dim 1.0 +422 37 training.batch_size 1.0 +422 37 training.label_smoothing 0.009594087890013005 +422 38 model.embedding_dim 2.0 +422 38 training.batch_size 1.0 +422 38 training.label_smoothing 0.2845073309305189 +422 39 model.embedding_dim 1.0 +422 39 training.batch_size 2.0 +422 39 training.label_smoothing 0.9864559076123337 +422 40 model.embedding_dim 1.0 +422 40 training.batch_size 2.0 +422 40 training.label_smoothing 0.0010088024631875762 +422 41 model.embedding_dim 2.0 +422 41 training.batch_size 0.0 +422 41 training.label_smoothing 0.1372535321637367 +422 42 model.embedding_dim 2.0 +422 42 training.batch_size 1.0 +422 42 training.label_smoothing 0.0935348731638544 +422 43 model.embedding_dim 2.0 +422 43 training.batch_size 1.0 +422 43 training.label_smoothing 0.03677791869402793 +422 44 model.embedding_dim 2.0 +422 44 training.batch_size 2.0 +422 44 training.label_smoothing 0.1337607949289241 +422 45 model.embedding_dim 1.0 +422 45 training.batch_size 1.0 +422 45 training.label_smoothing 0.489978786074421 +422 46 model.embedding_dim 0.0 +422 46 training.batch_size 1.0 +422 46 training.label_smoothing 0.04082839559505969 +422 47 model.embedding_dim 0.0 +422 47 training.batch_size 1.0 +422 47 training.label_smoothing 0.047735371973822195 +422 48 model.embedding_dim 2.0 +422 48 training.batch_size 1.0 +422 48 training.label_smoothing 0.008350084465692548 +422 49 model.embedding_dim 0.0 +422 49 training.batch_size 0.0 +422 49 training.label_smoothing 0.03877145018481813 +422 50 model.embedding_dim 1.0 +422 50 training.batch_size 0.0 +422 50 training.label_smoothing 0.02911124153066802 +422 51 model.embedding_dim 1.0 +422 51 training.batch_size 0.0 +422 51 training.label_smoothing 0.12509684882188093 +422 52 model.embedding_dim 1.0 +422 52 training.batch_size 2.0 +422 52 training.label_smoothing 0.11651486118997034 +422 53 model.embedding_dim 1.0 +422 53 training.batch_size 1.0 +422 53 training.label_smoothing 0.00615522244554285 +422 54 model.embedding_dim 1.0 +422 54 training.batch_size 0.0 +422 54 training.label_smoothing 0.9998067105496344 +422 55 model.embedding_dim 1.0 +422 55 training.batch_size 1.0 +422 55 training.label_smoothing 0.05759830787172303 +422 56 model.embedding_dim 2.0 +422 56 training.batch_size 1.0 +422 56 training.label_smoothing 0.019117945122937928 +422 57 model.embedding_dim 1.0 +422 57 training.batch_size 1.0 +422 57 training.label_smoothing 0.29363334761876325 +422 58 model.embedding_dim 0.0 +422 58 training.batch_size 1.0 +422 58 training.label_smoothing 0.4969114778264683 +422 59 model.embedding_dim 2.0 +422 59 training.batch_size 0.0 +422 59 training.label_smoothing 0.004538854640105224 +422 60 model.embedding_dim 0.0 +422 60 training.batch_size 1.0 +422 60 training.label_smoothing 0.0012939791205807377 +422 61 model.embedding_dim 2.0 +422 61 training.batch_size 1.0 +422 61 training.label_smoothing 0.021437466896907433 +422 62 model.embedding_dim 0.0 +422 62 training.batch_size 2.0 +422 62 training.label_smoothing 0.006599418270299637 +422 63 model.embedding_dim 0.0 +422 63 training.batch_size 1.0 +422 63 training.label_smoothing 0.019357377403832615 +422 64 model.embedding_dim 0.0 +422 64 training.batch_size 2.0 +422 64 training.label_smoothing 0.05367108534417662 +422 65 model.embedding_dim 0.0 +422 65 training.batch_size 0.0 +422 65 training.label_smoothing 0.030453531692310114 +422 66 model.embedding_dim 0.0 +422 66 training.batch_size 1.0 +422 66 training.label_smoothing 0.061074803543296474 +422 67 model.embedding_dim 0.0 +422 67 training.batch_size 1.0 +422 67 training.label_smoothing 0.09521680314038765 +422 68 model.embedding_dim 2.0 +422 68 training.batch_size 0.0 +422 68 training.label_smoothing 0.8244706847411322 +422 69 model.embedding_dim 2.0 +422 69 training.batch_size 2.0 +422 69 training.label_smoothing 0.0015850868155983053 +422 70 model.embedding_dim 2.0 +422 70 training.batch_size 2.0 +422 70 training.label_smoothing 0.022341234415565014 +422 71 model.embedding_dim 0.0 +422 71 training.batch_size 2.0 +422 71 training.label_smoothing 0.0034758025495572097 +422 72 model.embedding_dim 1.0 +422 72 training.batch_size 2.0 +422 72 training.label_smoothing 0.0058844266122794415 +422 73 model.embedding_dim 1.0 +422 73 training.batch_size 0.0 +422 73 training.label_smoothing 0.4004554950698177 +422 74 model.embedding_dim 0.0 +422 74 training.batch_size 0.0 +422 74 training.label_smoothing 0.05450821084035236 +422 75 model.embedding_dim 0.0 +422 75 training.batch_size 0.0 +422 75 training.label_smoothing 0.0029756487623794846 +422 76 model.embedding_dim 1.0 +422 76 training.batch_size 2.0 +422 76 training.label_smoothing 0.0012666053034261961 +422 77 model.embedding_dim 2.0 +422 77 training.batch_size 1.0 +422 77 training.label_smoothing 0.043634178401340054 +422 78 model.embedding_dim 1.0 +422 78 training.batch_size 0.0 +422 78 training.label_smoothing 0.2043529951561497 +422 79 model.embedding_dim 1.0 +422 79 training.batch_size 2.0 +422 79 training.label_smoothing 0.22595216623361528 +422 80 model.embedding_dim 1.0 +422 80 training.batch_size 0.0 +422 80 training.label_smoothing 0.058203753963894274 +422 81 model.embedding_dim 1.0 +422 81 training.batch_size 1.0 +422 81 training.label_smoothing 0.012373598488036736 +422 82 model.embedding_dim 1.0 +422 82 training.batch_size 2.0 +422 82 training.label_smoothing 0.4200370643241278 +422 83 model.embedding_dim 2.0 +422 83 training.batch_size 1.0 +422 83 training.label_smoothing 0.02129353500121248 +422 84 model.embedding_dim 0.0 +422 84 training.batch_size 0.0 +422 84 training.label_smoothing 0.0052584877975372325 +422 85 model.embedding_dim 0.0 +422 85 training.batch_size 2.0 +422 85 training.label_smoothing 0.0015277236904671437 +422 86 model.embedding_dim 0.0 +422 86 training.batch_size 0.0 +422 86 training.label_smoothing 0.11168076844227205 +422 87 model.embedding_dim 1.0 +422 87 training.batch_size 1.0 +422 87 training.label_smoothing 0.03653480250728616 +422 88 model.embedding_dim 1.0 +422 88 training.batch_size 0.0 +422 88 training.label_smoothing 0.0033613706207436217 +422 89 model.embedding_dim 0.0 +422 89 training.batch_size 1.0 +422 89 training.label_smoothing 0.0013625882001860024 +422 90 model.embedding_dim 1.0 +422 90 training.batch_size 0.0 +422 90 training.label_smoothing 0.012561417113304782 +422 91 model.embedding_dim 0.0 +422 91 training.batch_size 1.0 +422 91 training.label_smoothing 0.006279867391154264 +422 92 model.embedding_dim 0.0 +422 92 training.batch_size 1.0 +422 92 training.label_smoothing 0.0738623609274932 +422 93 model.embedding_dim 2.0 +422 93 training.batch_size 1.0 +422 93 training.label_smoothing 0.7440292307239599 +422 94 model.embedding_dim 2.0 +422 94 training.batch_size 0.0 +422 94 training.label_smoothing 0.01298201108880215 +422 95 model.embedding_dim 2.0 +422 95 training.batch_size 0.0 +422 95 training.label_smoothing 0.0053778819950876565 +422 96 model.embedding_dim 0.0 +422 96 training.batch_size 2.0 +422 96 training.label_smoothing 0.06845445333555603 +422 97 model.embedding_dim 2.0 +422 97 training.batch_size 0.0 +422 97 training.label_smoothing 0.018862505665391202 +422 98 model.embedding_dim 0.0 +422 98 training.batch_size 0.0 +422 98 training.label_smoothing 0.09632833814822256 +422 99 model.embedding_dim 2.0 +422 99 training.batch_size 2.0 +422 99 training.label_smoothing 0.0011428123990772623 +422 100 model.embedding_dim 1.0 +422 100 training.batch_size 2.0 +422 100 training.label_smoothing 0.19264109410739766 +422 1 dataset """kinships""" +422 1 model """ntn""" +422 1 loss """softplus""" +422 1 regularizer """no""" +422 1 optimizer """adadelta""" +422 1 training_loop """lcwa""" +422 1 evaluator """rankbased""" +422 2 dataset """kinships""" +422 2 model """ntn""" +422 2 loss """softplus""" +422 2 regularizer """no""" +422 2 optimizer """adadelta""" +422 2 training_loop """lcwa""" +422 2 evaluator """rankbased""" +422 3 dataset """kinships""" +422 3 model """ntn""" +422 3 loss """softplus""" +422 3 regularizer """no""" +422 3 optimizer """adadelta""" +422 3 training_loop """lcwa""" +422 3 evaluator """rankbased""" +422 4 dataset """kinships""" +422 4 model """ntn""" +422 4 loss """softplus""" +422 4 regularizer """no""" +422 4 optimizer """adadelta""" +422 4 training_loop """lcwa""" +422 4 evaluator """rankbased""" +422 5 dataset """kinships""" +422 5 model """ntn""" +422 5 loss """softplus""" +422 5 regularizer """no""" +422 5 optimizer """adadelta""" +422 5 training_loop """lcwa""" +422 5 evaluator """rankbased""" +422 6 dataset """kinships""" +422 6 model """ntn""" +422 6 loss """softplus""" +422 6 regularizer """no""" +422 6 optimizer """adadelta""" +422 6 training_loop """lcwa""" +422 6 evaluator """rankbased""" +422 7 dataset """kinships""" +422 7 model """ntn""" +422 7 loss """softplus""" +422 7 regularizer """no""" +422 7 optimizer """adadelta""" +422 7 training_loop """lcwa""" +422 7 evaluator """rankbased""" +422 8 dataset """kinships""" +422 8 model """ntn""" +422 8 loss """softplus""" +422 8 regularizer """no""" +422 8 optimizer """adadelta""" +422 8 training_loop """lcwa""" +422 8 evaluator """rankbased""" +422 9 dataset """kinships""" +422 9 model """ntn""" +422 9 loss """softplus""" +422 9 regularizer """no""" +422 9 optimizer """adadelta""" +422 9 training_loop """lcwa""" +422 9 evaluator """rankbased""" +422 10 dataset """kinships""" +422 10 model """ntn""" +422 10 loss """softplus""" +422 10 regularizer """no""" +422 10 optimizer """adadelta""" +422 10 training_loop """lcwa""" +422 10 evaluator """rankbased""" +422 11 dataset """kinships""" +422 11 model """ntn""" +422 11 loss """softplus""" +422 11 regularizer """no""" +422 11 optimizer """adadelta""" +422 11 training_loop """lcwa""" +422 11 evaluator """rankbased""" +422 12 dataset """kinships""" +422 12 model """ntn""" +422 12 loss """softplus""" +422 12 regularizer """no""" +422 12 optimizer """adadelta""" +422 12 training_loop """lcwa""" +422 12 evaluator """rankbased""" +422 13 dataset """kinships""" +422 13 model """ntn""" +422 13 loss """softplus""" +422 13 regularizer """no""" +422 13 optimizer """adadelta""" +422 13 training_loop """lcwa""" +422 13 evaluator """rankbased""" +422 14 dataset """kinships""" +422 14 model """ntn""" +422 14 loss """softplus""" +422 14 regularizer """no""" +422 14 optimizer """adadelta""" +422 14 training_loop """lcwa""" +422 14 evaluator """rankbased""" +422 15 dataset """kinships""" +422 15 model """ntn""" +422 15 loss """softplus""" +422 15 regularizer """no""" +422 15 optimizer """adadelta""" +422 15 training_loop """lcwa""" +422 15 evaluator """rankbased""" +422 16 dataset """kinships""" +422 16 model """ntn""" +422 16 loss """softplus""" +422 16 regularizer """no""" +422 16 optimizer """adadelta""" +422 16 training_loop """lcwa""" +422 16 evaluator """rankbased""" +422 17 dataset """kinships""" +422 17 model """ntn""" +422 17 loss """softplus""" +422 17 regularizer """no""" +422 17 optimizer """adadelta""" +422 17 training_loop """lcwa""" +422 17 evaluator """rankbased""" +422 18 dataset """kinships""" +422 18 model """ntn""" +422 18 loss """softplus""" +422 18 regularizer """no""" +422 18 optimizer """adadelta""" +422 18 training_loop """lcwa""" +422 18 evaluator """rankbased""" +422 19 dataset """kinships""" +422 19 model """ntn""" +422 19 loss """softplus""" +422 19 regularizer """no""" +422 19 optimizer """adadelta""" +422 19 training_loop """lcwa""" +422 19 evaluator """rankbased""" +422 20 dataset """kinships""" +422 20 model """ntn""" +422 20 loss """softplus""" +422 20 regularizer """no""" +422 20 optimizer """adadelta""" +422 20 training_loop """lcwa""" +422 20 evaluator """rankbased""" +422 21 dataset """kinships""" +422 21 model """ntn""" +422 21 loss """softplus""" +422 21 regularizer """no""" +422 21 optimizer """adadelta""" +422 21 training_loop """lcwa""" +422 21 evaluator """rankbased""" +422 22 dataset """kinships""" +422 22 model """ntn""" +422 22 loss """softplus""" +422 22 regularizer """no""" +422 22 optimizer """adadelta""" +422 22 training_loop """lcwa""" +422 22 evaluator """rankbased""" +422 23 dataset """kinships""" +422 23 model """ntn""" +422 23 loss """softplus""" +422 23 regularizer """no""" +422 23 optimizer """adadelta""" +422 23 training_loop """lcwa""" +422 23 evaluator """rankbased""" +422 24 dataset """kinships""" +422 24 model """ntn""" +422 24 loss """softplus""" +422 24 regularizer """no""" +422 24 optimizer """adadelta""" +422 24 training_loop """lcwa""" +422 24 evaluator """rankbased""" +422 25 dataset """kinships""" +422 25 model """ntn""" +422 25 loss """softplus""" +422 25 regularizer """no""" +422 25 optimizer """adadelta""" +422 25 training_loop """lcwa""" +422 25 evaluator """rankbased""" +422 26 dataset """kinships""" +422 26 model """ntn""" +422 26 loss """softplus""" +422 26 regularizer """no""" +422 26 optimizer """adadelta""" +422 26 training_loop """lcwa""" +422 26 evaluator """rankbased""" +422 27 dataset """kinships""" +422 27 model """ntn""" +422 27 loss """softplus""" +422 27 regularizer """no""" +422 27 optimizer """adadelta""" +422 27 training_loop """lcwa""" +422 27 evaluator """rankbased""" +422 28 dataset """kinships""" +422 28 model """ntn""" +422 28 loss """softplus""" +422 28 regularizer """no""" +422 28 optimizer """adadelta""" +422 28 training_loop """lcwa""" +422 28 evaluator """rankbased""" +422 29 dataset """kinships""" +422 29 model """ntn""" +422 29 loss """softplus""" +422 29 regularizer """no""" +422 29 optimizer """adadelta""" +422 29 training_loop """lcwa""" +422 29 evaluator """rankbased""" +422 30 dataset """kinships""" +422 30 model """ntn""" +422 30 loss """softplus""" +422 30 regularizer """no""" +422 30 optimizer """adadelta""" +422 30 training_loop """lcwa""" +422 30 evaluator """rankbased""" +422 31 dataset """kinships""" +422 31 model """ntn""" +422 31 loss """softplus""" +422 31 regularizer """no""" +422 31 optimizer """adadelta""" +422 31 training_loop """lcwa""" +422 31 evaluator """rankbased""" +422 32 dataset """kinships""" +422 32 model """ntn""" +422 32 loss """softplus""" +422 32 regularizer """no""" +422 32 optimizer """adadelta""" +422 32 training_loop """lcwa""" +422 32 evaluator """rankbased""" +422 33 dataset """kinships""" +422 33 model """ntn""" +422 33 loss """softplus""" +422 33 regularizer """no""" +422 33 optimizer """adadelta""" +422 33 training_loop """lcwa""" +422 33 evaluator """rankbased""" +422 34 dataset """kinships""" +422 34 model """ntn""" +422 34 loss """softplus""" +422 34 regularizer """no""" +422 34 optimizer """adadelta""" +422 34 training_loop """lcwa""" +422 34 evaluator """rankbased""" +422 35 dataset """kinships""" +422 35 model """ntn""" +422 35 loss """softplus""" +422 35 regularizer """no""" +422 35 optimizer """adadelta""" +422 35 training_loop """lcwa""" +422 35 evaluator """rankbased""" +422 36 dataset """kinships""" +422 36 model """ntn""" +422 36 loss """softplus""" +422 36 regularizer """no""" +422 36 optimizer """adadelta""" +422 36 training_loop """lcwa""" +422 36 evaluator """rankbased""" +422 37 dataset """kinships""" +422 37 model """ntn""" +422 37 loss """softplus""" +422 37 regularizer """no""" +422 37 optimizer """adadelta""" +422 37 training_loop """lcwa""" +422 37 evaluator """rankbased""" +422 38 dataset """kinships""" +422 38 model """ntn""" +422 38 loss """softplus""" +422 38 regularizer """no""" +422 38 optimizer """adadelta""" +422 38 training_loop """lcwa""" +422 38 evaluator """rankbased""" +422 39 dataset """kinships""" +422 39 model """ntn""" +422 39 loss """softplus""" +422 39 regularizer """no""" +422 39 optimizer """adadelta""" +422 39 training_loop """lcwa""" +422 39 evaluator """rankbased""" +422 40 dataset """kinships""" +422 40 model """ntn""" +422 40 loss """softplus""" +422 40 regularizer """no""" +422 40 optimizer """adadelta""" +422 40 training_loop """lcwa""" +422 40 evaluator """rankbased""" +422 41 dataset """kinships""" +422 41 model """ntn""" +422 41 loss """softplus""" +422 41 regularizer """no""" +422 41 optimizer """adadelta""" +422 41 training_loop """lcwa""" +422 41 evaluator """rankbased""" +422 42 dataset """kinships""" +422 42 model """ntn""" +422 42 loss """softplus""" +422 42 regularizer """no""" +422 42 optimizer """adadelta""" +422 42 training_loop """lcwa""" +422 42 evaluator """rankbased""" +422 43 dataset """kinships""" +422 43 model """ntn""" +422 43 loss """softplus""" +422 43 regularizer """no""" +422 43 optimizer """adadelta""" +422 43 training_loop """lcwa""" +422 43 evaluator """rankbased""" +422 44 dataset """kinships""" +422 44 model """ntn""" +422 44 loss """softplus""" +422 44 regularizer """no""" +422 44 optimizer """adadelta""" +422 44 training_loop """lcwa""" +422 44 evaluator """rankbased""" +422 45 dataset """kinships""" +422 45 model """ntn""" +422 45 loss """softplus""" +422 45 regularizer """no""" +422 45 optimizer """adadelta""" +422 45 training_loop """lcwa""" +422 45 evaluator """rankbased""" +422 46 dataset """kinships""" +422 46 model """ntn""" +422 46 loss """softplus""" +422 46 regularizer """no""" +422 46 optimizer """adadelta""" +422 46 training_loop """lcwa""" +422 46 evaluator """rankbased""" +422 47 dataset """kinships""" +422 47 model """ntn""" +422 47 loss """softplus""" +422 47 regularizer """no""" +422 47 optimizer """adadelta""" +422 47 training_loop """lcwa""" +422 47 evaluator """rankbased""" +422 48 dataset """kinships""" +422 48 model """ntn""" +422 48 loss """softplus""" +422 48 regularizer """no""" +422 48 optimizer """adadelta""" +422 48 training_loop """lcwa""" +422 48 evaluator """rankbased""" +422 49 dataset """kinships""" +422 49 model """ntn""" +422 49 loss """softplus""" +422 49 regularizer """no""" +422 49 optimizer """adadelta""" +422 49 training_loop """lcwa""" +422 49 evaluator """rankbased""" +422 50 dataset """kinships""" +422 50 model """ntn""" +422 50 loss """softplus""" +422 50 regularizer """no""" +422 50 optimizer """adadelta""" +422 50 training_loop """lcwa""" +422 50 evaluator """rankbased""" +422 51 dataset """kinships""" +422 51 model """ntn""" +422 51 loss """softplus""" +422 51 regularizer """no""" +422 51 optimizer """adadelta""" +422 51 training_loop """lcwa""" +422 51 evaluator """rankbased""" +422 52 dataset """kinships""" +422 52 model """ntn""" +422 52 loss """softplus""" +422 52 regularizer """no""" +422 52 optimizer """adadelta""" +422 52 training_loop """lcwa""" +422 52 evaluator """rankbased""" +422 53 dataset """kinships""" +422 53 model """ntn""" +422 53 loss """softplus""" +422 53 regularizer """no""" +422 53 optimizer """adadelta""" +422 53 training_loop """lcwa""" +422 53 evaluator """rankbased""" +422 54 dataset """kinships""" +422 54 model """ntn""" +422 54 loss """softplus""" +422 54 regularizer """no""" +422 54 optimizer """adadelta""" +422 54 training_loop """lcwa""" +422 54 evaluator """rankbased""" +422 55 dataset """kinships""" +422 55 model """ntn""" +422 55 loss """softplus""" +422 55 regularizer """no""" +422 55 optimizer """adadelta""" +422 55 training_loop """lcwa""" +422 55 evaluator """rankbased""" +422 56 dataset """kinships""" +422 56 model """ntn""" +422 56 loss """softplus""" +422 56 regularizer """no""" +422 56 optimizer """adadelta""" +422 56 training_loop """lcwa""" +422 56 evaluator """rankbased""" +422 57 dataset """kinships""" +422 57 model """ntn""" +422 57 loss """softplus""" +422 57 regularizer """no""" +422 57 optimizer """adadelta""" +422 57 training_loop """lcwa""" +422 57 evaluator """rankbased""" +422 58 dataset """kinships""" +422 58 model """ntn""" +422 58 loss """softplus""" +422 58 regularizer """no""" +422 58 optimizer """adadelta""" +422 58 training_loop """lcwa""" +422 58 evaluator """rankbased""" +422 59 dataset """kinships""" +422 59 model """ntn""" +422 59 loss """softplus""" +422 59 regularizer """no""" +422 59 optimizer """adadelta""" +422 59 training_loop """lcwa""" +422 59 evaluator """rankbased""" +422 60 dataset """kinships""" +422 60 model """ntn""" +422 60 loss """softplus""" +422 60 regularizer """no""" +422 60 optimizer """adadelta""" +422 60 training_loop """lcwa""" +422 60 evaluator """rankbased""" +422 61 dataset """kinships""" +422 61 model """ntn""" +422 61 loss """softplus""" +422 61 regularizer """no""" +422 61 optimizer """adadelta""" +422 61 training_loop """lcwa""" +422 61 evaluator """rankbased""" +422 62 dataset """kinships""" +422 62 model """ntn""" +422 62 loss """softplus""" +422 62 regularizer """no""" +422 62 optimizer """adadelta""" +422 62 training_loop """lcwa""" +422 62 evaluator """rankbased""" +422 63 dataset """kinships""" +422 63 model """ntn""" +422 63 loss """softplus""" +422 63 regularizer """no""" +422 63 optimizer """adadelta""" +422 63 training_loop """lcwa""" +422 63 evaluator """rankbased""" +422 64 dataset """kinships""" +422 64 model """ntn""" +422 64 loss """softplus""" +422 64 regularizer """no""" +422 64 optimizer """adadelta""" +422 64 training_loop """lcwa""" +422 64 evaluator """rankbased""" +422 65 dataset """kinships""" +422 65 model """ntn""" +422 65 loss """softplus""" +422 65 regularizer """no""" +422 65 optimizer """adadelta""" +422 65 training_loop """lcwa""" +422 65 evaluator """rankbased""" +422 66 dataset """kinships""" +422 66 model """ntn""" +422 66 loss """softplus""" +422 66 regularizer """no""" +422 66 optimizer """adadelta""" +422 66 training_loop """lcwa""" +422 66 evaluator """rankbased""" +422 67 dataset """kinships""" +422 67 model """ntn""" +422 67 loss """softplus""" +422 67 regularizer """no""" +422 67 optimizer """adadelta""" +422 67 training_loop """lcwa""" +422 67 evaluator """rankbased""" +422 68 dataset """kinships""" +422 68 model """ntn""" +422 68 loss """softplus""" +422 68 regularizer """no""" +422 68 optimizer """adadelta""" +422 68 training_loop """lcwa""" +422 68 evaluator """rankbased""" +422 69 dataset """kinships""" +422 69 model """ntn""" +422 69 loss """softplus""" +422 69 regularizer """no""" +422 69 optimizer """adadelta""" +422 69 training_loop """lcwa""" +422 69 evaluator """rankbased""" +422 70 dataset """kinships""" +422 70 model """ntn""" +422 70 loss """softplus""" +422 70 regularizer """no""" +422 70 optimizer """adadelta""" +422 70 training_loop """lcwa""" +422 70 evaluator """rankbased""" +422 71 dataset """kinships""" +422 71 model """ntn""" +422 71 loss """softplus""" +422 71 regularizer """no""" +422 71 optimizer """adadelta""" +422 71 training_loop """lcwa""" +422 71 evaluator """rankbased""" +422 72 dataset """kinships""" +422 72 model """ntn""" +422 72 loss """softplus""" +422 72 regularizer """no""" +422 72 optimizer """adadelta""" +422 72 training_loop """lcwa""" +422 72 evaluator """rankbased""" +422 73 dataset """kinships""" +422 73 model """ntn""" +422 73 loss """softplus""" +422 73 regularizer """no""" +422 73 optimizer """adadelta""" +422 73 training_loop """lcwa""" +422 73 evaluator """rankbased""" +422 74 dataset """kinships""" +422 74 model """ntn""" +422 74 loss """softplus""" +422 74 regularizer """no""" +422 74 optimizer """adadelta""" +422 74 training_loop """lcwa""" +422 74 evaluator """rankbased""" +422 75 dataset """kinships""" +422 75 model """ntn""" +422 75 loss """softplus""" +422 75 regularizer """no""" +422 75 optimizer """adadelta""" +422 75 training_loop """lcwa""" +422 75 evaluator """rankbased""" +422 76 dataset """kinships""" +422 76 model """ntn""" +422 76 loss """softplus""" +422 76 regularizer """no""" +422 76 optimizer """adadelta""" +422 76 training_loop """lcwa""" +422 76 evaluator """rankbased""" +422 77 dataset """kinships""" +422 77 model """ntn""" +422 77 loss """softplus""" +422 77 regularizer """no""" +422 77 optimizer """adadelta""" +422 77 training_loop """lcwa""" +422 77 evaluator """rankbased""" +422 78 dataset """kinships""" +422 78 model """ntn""" +422 78 loss """softplus""" +422 78 regularizer """no""" +422 78 optimizer """adadelta""" +422 78 training_loop """lcwa""" +422 78 evaluator """rankbased""" +422 79 dataset """kinships""" +422 79 model """ntn""" +422 79 loss """softplus""" +422 79 regularizer """no""" +422 79 optimizer """adadelta""" +422 79 training_loop """lcwa""" +422 79 evaluator """rankbased""" +422 80 dataset """kinships""" +422 80 model """ntn""" +422 80 loss """softplus""" +422 80 regularizer """no""" +422 80 optimizer """adadelta""" +422 80 training_loop """lcwa""" +422 80 evaluator """rankbased""" +422 81 dataset """kinships""" +422 81 model """ntn""" +422 81 loss """softplus""" +422 81 regularizer """no""" +422 81 optimizer """adadelta""" +422 81 training_loop """lcwa""" +422 81 evaluator """rankbased""" +422 82 dataset """kinships""" +422 82 model """ntn""" +422 82 loss """softplus""" +422 82 regularizer """no""" +422 82 optimizer """adadelta""" +422 82 training_loop """lcwa""" +422 82 evaluator """rankbased""" +422 83 dataset """kinships""" +422 83 model """ntn""" +422 83 loss """softplus""" +422 83 regularizer """no""" +422 83 optimizer """adadelta""" +422 83 training_loop """lcwa""" +422 83 evaluator """rankbased""" +422 84 dataset """kinships""" +422 84 model """ntn""" +422 84 loss """softplus""" +422 84 regularizer """no""" +422 84 optimizer """adadelta""" +422 84 training_loop """lcwa""" +422 84 evaluator """rankbased""" +422 85 dataset """kinships""" +422 85 model """ntn""" +422 85 loss """softplus""" +422 85 regularizer """no""" +422 85 optimizer """adadelta""" +422 85 training_loop """lcwa""" +422 85 evaluator """rankbased""" +422 86 dataset """kinships""" +422 86 model """ntn""" +422 86 loss """softplus""" +422 86 regularizer """no""" +422 86 optimizer """adadelta""" +422 86 training_loop """lcwa""" +422 86 evaluator """rankbased""" +422 87 dataset """kinships""" +422 87 model """ntn""" +422 87 loss """softplus""" +422 87 regularizer """no""" +422 87 optimizer """adadelta""" +422 87 training_loop """lcwa""" +422 87 evaluator """rankbased""" +422 88 dataset """kinships""" +422 88 model """ntn""" +422 88 loss """softplus""" +422 88 regularizer """no""" +422 88 optimizer """adadelta""" +422 88 training_loop """lcwa""" +422 88 evaluator """rankbased""" +422 89 dataset """kinships""" +422 89 model """ntn""" +422 89 loss """softplus""" +422 89 regularizer """no""" +422 89 optimizer """adadelta""" +422 89 training_loop """lcwa""" +422 89 evaluator """rankbased""" +422 90 dataset """kinships""" +422 90 model """ntn""" +422 90 loss """softplus""" +422 90 regularizer """no""" +422 90 optimizer """adadelta""" +422 90 training_loop """lcwa""" +422 90 evaluator """rankbased""" +422 91 dataset """kinships""" +422 91 model """ntn""" +422 91 loss """softplus""" +422 91 regularizer """no""" +422 91 optimizer """adadelta""" +422 91 training_loop """lcwa""" +422 91 evaluator """rankbased""" +422 92 dataset """kinships""" +422 92 model """ntn""" +422 92 loss """softplus""" +422 92 regularizer """no""" +422 92 optimizer """adadelta""" +422 92 training_loop """lcwa""" +422 92 evaluator """rankbased""" +422 93 dataset """kinships""" +422 93 model """ntn""" +422 93 loss """softplus""" +422 93 regularizer """no""" +422 93 optimizer """adadelta""" +422 93 training_loop """lcwa""" +422 93 evaluator """rankbased""" +422 94 dataset """kinships""" +422 94 model """ntn""" +422 94 loss """softplus""" +422 94 regularizer """no""" +422 94 optimizer """adadelta""" +422 94 training_loop """lcwa""" +422 94 evaluator """rankbased""" +422 95 dataset """kinships""" +422 95 model """ntn""" +422 95 loss """softplus""" +422 95 regularizer """no""" +422 95 optimizer """adadelta""" +422 95 training_loop """lcwa""" +422 95 evaluator """rankbased""" +422 96 dataset """kinships""" +422 96 model """ntn""" +422 96 loss """softplus""" +422 96 regularizer """no""" +422 96 optimizer """adadelta""" +422 96 training_loop """lcwa""" +422 96 evaluator """rankbased""" +422 97 dataset """kinships""" +422 97 model """ntn""" +422 97 loss """softplus""" +422 97 regularizer """no""" +422 97 optimizer """adadelta""" +422 97 training_loop """lcwa""" +422 97 evaluator """rankbased""" +422 98 dataset """kinships""" +422 98 model """ntn""" +422 98 loss """softplus""" +422 98 regularizer """no""" +422 98 optimizer """adadelta""" +422 98 training_loop """lcwa""" +422 98 evaluator """rankbased""" +422 99 dataset """kinships""" +422 99 model """ntn""" +422 99 loss """softplus""" +422 99 regularizer """no""" +422 99 optimizer """adadelta""" +422 99 training_loop """lcwa""" +422 99 evaluator """rankbased""" +422 100 dataset """kinships""" +422 100 model """ntn""" +422 100 loss """softplus""" +422 100 regularizer """no""" +422 100 optimizer """adadelta""" +422 100 training_loop """lcwa""" +422 100 evaluator """rankbased""" +423 1 model.embedding_dim 1.0 +423 1 training.batch_size 0.0 +423 1 training.label_smoothing 0.41160853328980523 +423 2 model.embedding_dim 2.0 +423 2 training.batch_size 2.0 +423 2 training.label_smoothing 0.0011834390945972835 +423 3 model.embedding_dim 2.0 +423 3 training.batch_size 0.0 +423 3 training.label_smoothing 0.27622792948484215 +423 4 model.embedding_dim 1.0 +423 4 training.batch_size 1.0 +423 4 training.label_smoothing 0.0036936952952624992 +423 5 model.embedding_dim 2.0 +423 5 training.batch_size 1.0 +423 5 training.label_smoothing 0.3462069349719691 +423 6 model.embedding_dim 1.0 +423 6 training.batch_size 0.0 +423 6 training.label_smoothing 0.30689914536320817 +423 7 model.embedding_dim 1.0 +423 7 training.batch_size 0.0 +423 7 training.label_smoothing 0.036923662152241375 +423 8 model.embedding_dim 1.0 +423 8 training.batch_size 2.0 +423 8 training.label_smoothing 0.03014260177716759 +423 9 model.embedding_dim 2.0 +423 9 training.batch_size 1.0 +423 9 training.label_smoothing 0.02130319180133904 +423 10 model.embedding_dim 0.0 +423 10 training.batch_size 2.0 +423 10 training.label_smoothing 0.02374330622984141 +423 11 model.embedding_dim 1.0 +423 11 training.batch_size 2.0 +423 11 training.label_smoothing 0.03478446064753597 +423 12 model.embedding_dim 1.0 +423 12 training.batch_size 2.0 +423 12 training.label_smoothing 0.020619227881409373 +423 13 model.embedding_dim 0.0 +423 13 training.batch_size 1.0 +423 13 training.label_smoothing 0.29549159387512564 +423 14 model.embedding_dim 2.0 +423 14 training.batch_size 0.0 +423 14 training.label_smoothing 0.055003624489306846 +423 15 model.embedding_dim 1.0 +423 15 training.batch_size 2.0 +423 15 training.label_smoothing 0.03803672364702034 +423 16 model.embedding_dim 0.0 +423 16 training.batch_size 1.0 +423 16 training.label_smoothing 0.0011605345532783046 +423 17 model.embedding_dim 2.0 +423 17 training.batch_size 1.0 +423 17 training.label_smoothing 0.042962971449467514 +423 18 model.embedding_dim 1.0 +423 18 training.batch_size 0.0 +423 18 training.label_smoothing 0.0025408430944849516 +423 19 model.embedding_dim 2.0 +423 19 training.batch_size 0.0 +423 19 training.label_smoothing 0.022843764683011736 +423 20 model.embedding_dim 0.0 +423 20 training.batch_size 2.0 +423 20 training.label_smoothing 0.0783844514789747 +423 21 model.embedding_dim 0.0 +423 21 training.batch_size 2.0 +423 21 training.label_smoothing 0.03047876157595224 +423 22 model.embedding_dim 1.0 +423 22 training.batch_size 2.0 +423 22 training.label_smoothing 0.058209388578148315 +423 23 model.embedding_dim 1.0 +423 23 training.batch_size 0.0 +423 23 training.label_smoothing 0.011922306312759624 +423 24 model.embedding_dim 2.0 +423 24 training.batch_size 2.0 +423 24 training.label_smoothing 0.0023658023818155134 +423 25 model.embedding_dim 2.0 +423 25 training.batch_size 1.0 +423 25 training.label_smoothing 0.022278373036719604 +423 26 model.embedding_dim 1.0 +423 26 training.batch_size 0.0 +423 26 training.label_smoothing 0.04647561714174114 +423 27 model.embedding_dim 0.0 +423 27 training.batch_size 1.0 +423 27 training.label_smoothing 0.006736675450160388 +423 28 model.embedding_dim 1.0 +423 28 training.batch_size 0.0 +423 28 training.label_smoothing 0.4471026826704743 +423 29 model.embedding_dim 0.0 +423 29 training.batch_size 1.0 +423 29 training.label_smoothing 0.003908183333514629 +423 30 model.embedding_dim 1.0 +423 30 training.batch_size 1.0 +423 30 training.label_smoothing 0.003666677201392299 +423 31 model.embedding_dim 0.0 +423 31 training.batch_size 2.0 +423 31 training.label_smoothing 0.09681311900690857 +423 32 model.embedding_dim 1.0 +423 32 training.batch_size 2.0 +423 32 training.label_smoothing 0.5724548356565236 +423 33 model.embedding_dim 2.0 +423 33 training.batch_size 2.0 +423 33 training.label_smoothing 0.0013591757987511618 +423 34 model.embedding_dim 1.0 +423 34 training.batch_size 0.0 +423 34 training.label_smoothing 0.09439363273458726 +423 35 model.embedding_dim 0.0 +423 35 training.batch_size 2.0 +423 35 training.label_smoothing 0.11154033731224516 +423 36 model.embedding_dim 0.0 +423 36 training.batch_size 2.0 +423 36 training.label_smoothing 0.9804975317583853 +423 37 model.embedding_dim 2.0 +423 37 training.batch_size 2.0 +423 37 training.label_smoothing 0.383214961979768 +423 38 model.embedding_dim 2.0 +423 38 training.batch_size 0.0 +423 38 training.label_smoothing 0.01281689337252483 +423 39 model.embedding_dim 2.0 +423 39 training.batch_size 2.0 +423 39 training.label_smoothing 0.5713684762481492 +423 40 model.embedding_dim 1.0 +423 40 training.batch_size 0.0 +423 40 training.label_smoothing 0.5615598486143244 +423 41 model.embedding_dim 2.0 +423 41 training.batch_size 1.0 +423 41 training.label_smoothing 0.006506971810028534 +423 42 model.embedding_dim 0.0 +423 42 training.batch_size 0.0 +423 42 training.label_smoothing 0.0024266088088242526 +423 43 model.embedding_dim 0.0 +423 43 training.batch_size 1.0 +423 43 training.label_smoothing 0.1640922625815041 +423 44 model.embedding_dim 0.0 +423 44 training.batch_size 1.0 +423 44 training.label_smoothing 0.05662289258744719 +423 45 model.embedding_dim 0.0 +423 45 training.batch_size 2.0 +423 45 training.label_smoothing 0.01855691108291157 +423 46 model.embedding_dim 2.0 +423 46 training.batch_size 0.0 +423 46 training.label_smoothing 0.0187475566063662 +423 47 model.embedding_dim 2.0 +423 47 training.batch_size 1.0 +423 47 training.label_smoothing 0.24668619561879485 +423 48 model.embedding_dim 0.0 +423 48 training.batch_size 1.0 +423 48 training.label_smoothing 0.0024508733757493137 +423 49 model.embedding_dim 0.0 +423 49 training.batch_size 2.0 +423 49 training.label_smoothing 0.009373151838716402 +423 50 model.embedding_dim 2.0 +423 50 training.batch_size 0.0 +423 50 training.label_smoothing 0.0018856674089734542 +423 51 model.embedding_dim 1.0 +423 51 training.batch_size 1.0 +423 51 training.label_smoothing 0.010725034147701797 +423 52 model.embedding_dim 1.0 +423 52 training.batch_size 1.0 +423 52 training.label_smoothing 0.009654478907644906 +423 53 model.embedding_dim 0.0 +423 53 training.batch_size 2.0 +423 53 training.label_smoothing 0.6040421275419141 +423 54 model.embedding_dim 2.0 +423 54 training.batch_size 2.0 +423 54 training.label_smoothing 0.006137695377783254 +423 55 model.embedding_dim 1.0 +423 55 training.batch_size 1.0 +423 55 training.label_smoothing 0.013353304691123257 +423 56 model.embedding_dim 1.0 +423 56 training.batch_size 0.0 +423 56 training.label_smoothing 0.03461759769592507 +423 57 model.embedding_dim 1.0 +423 57 training.batch_size 0.0 +423 57 training.label_smoothing 0.010346822401929358 +423 58 model.embedding_dim 1.0 +423 58 training.batch_size 1.0 +423 58 training.label_smoothing 0.42471284256680497 +423 59 model.embedding_dim 1.0 +423 59 training.batch_size 2.0 +423 59 training.label_smoothing 0.06279616690280505 +423 60 model.embedding_dim 1.0 +423 60 training.batch_size 2.0 +423 60 training.label_smoothing 0.0019211769106260337 +423 61 model.embedding_dim 2.0 +423 61 training.batch_size 2.0 +423 61 training.label_smoothing 0.07970832861121528 +423 62 model.embedding_dim 2.0 +423 62 training.batch_size 0.0 +423 62 training.label_smoothing 0.022798816725158804 +423 63 model.embedding_dim 2.0 +423 63 training.batch_size 0.0 +423 63 training.label_smoothing 0.027485274503485743 +423 64 model.embedding_dim 1.0 +423 64 training.batch_size 1.0 +423 64 training.label_smoothing 0.04298226418944883 +423 65 model.embedding_dim 0.0 +423 65 training.batch_size 2.0 +423 65 training.label_smoothing 0.0027355907335142047 +423 66 model.embedding_dim 2.0 +423 66 training.batch_size 2.0 +423 66 training.label_smoothing 0.03837658851152542 +423 67 model.embedding_dim 1.0 +423 67 training.batch_size 0.0 +423 67 training.label_smoothing 0.15393497019309296 +423 68 model.embedding_dim 1.0 +423 68 training.batch_size 0.0 +423 68 training.label_smoothing 0.9180843152377762 +423 69 model.embedding_dim 0.0 +423 69 training.batch_size 2.0 +423 69 training.label_smoothing 0.6556176184855786 +423 70 model.embedding_dim 1.0 +423 70 training.batch_size 1.0 +423 70 training.label_smoothing 0.05537480124597182 +423 71 model.embedding_dim 1.0 +423 71 training.batch_size 2.0 +423 71 training.label_smoothing 0.0838370510325146 +423 72 model.embedding_dim 2.0 +423 72 training.batch_size 1.0 +423 72 training.label_smoothing 0.19400234440426328 +423 73 model.embedding_dim 1.0 +423 73 training.batch_size 2.0 +423 73 training.label_smoothing 0.8001144146752036 +423 74 model.embedding_dim 1.0 +423 74 training.batch_size 2.0 +423 74 training.label_smoothing 0.004215770843458017 +423 75 model.embedding_dim 0.0 +423 75 training.batch_size 0.0 +423 75 training.label_smoothing 0.9086602571418088 +423 76 model.embedding_dim 0.0 +423 76 training.batch_size 1.0 +423 76 training.label_smoothing 0.01184372709469647 +423 77 model.embedding_dim 1.0 +423 77 training.batch_size 2.0 +423 77 training.label_smoothing 0.003766720295910289 +423 78 model.embedding_dim 0.0 +423 78 training.batch_size 0.0 +423 78 training.label_smoothing 0.019337112765396028 +423 79 model.embedding_dim 2.0 +423 79 training.batch_size 1.0 +423 79 training.label_smoothing 0.11790455821774103 +423 80 model.embedding_dim 2.0 +423 80 training.batch_size 1.0 +423 80 training.label_smoothing 0.00535301250798704 +423 81 model.embedding_dim 2.0 +423 81 training.batch_size 2.0 +423 81 training.label_smoothing 0.384150161144011 +423 82 model.embedding_dim 1.0 +423 82 training.batch_size 1.0 +423 82 training.label_smoothing 0.002944271045158252 +423 83 model.embedding_dim 2.0 +423 83 training.batch_size 2.0 +423 83 training.label_smoothing 0.004418965622311417 +423 84 model.embedding_dim 1.0 +423 84 training.batch_size 0.0 +423 84 training.label_smoothing 0.0124240363052265 +423 85 model.embedding_dim 2.0 +423 85 training.batch_size 0.0 +423 85 training.label_smoothing 0.3610724476826767 +423 86 model.embedding_dim 1.0 +423 86 training.batch_size 1.0 +423 86 training.label_smoothing 0.19780798318507797 +423 87 model.embedding_dim 2.0 +423 87 training.batch_size 1.0 +423 87 training.label_smoothing 0.04227572346098932 +423 88 model.embedding_dim 0.0 +423 88 training.batch_size 2.0 +423 88 training.label_smoothing 0.052922544975717814 +423 89 model.embedding_dim 2.0 +423 89 training.batch_size 1.0 +423 89 training.label_smoothing 0.014275022643853543 +423 90 model.embedding_dim 0.0 +423 90 training.batch_size 1.0 +423 90 training.label_smoothing 0.005848080065964356 +423 91 model.embedding_dim 1.0 +423 91 training.batch_size 2.0 +423 91 training.label_smoothing 0.032675065681246 +423 92 model.embedding_dim 0.0 +423 92 training.batch_size 0.0 +423 92 training.label_smoothing 0.11229870356616178 +423 93 model.embedding_dim 2.0 +423 93 training.batch_size 1.0 +423 93 training.label_smoothing 0.9122096648681707 +423 94 model.embedding_dim 1.0 +423 94 training.batch_size 0.0 +423 94 training.label_smoothing 0.002100152805305032 +423 95 model.embedding_dim 2.0 +423 95 training.batch_size 2.0 +423 95 training.label_smoothing 0.038610795204914755 +423 96 model.embedding_dim 0.0 +423 96 training.batch_size 1.0 +423 96 training.label_smoothing 0.008557227757629451 +423 97 model.embedding_dim 1.0 +423 97 training.batch_size 0.0 +423 97 training.label_smoothing 0.4206392633097166 +423 98 model.embedding_dim 1.0 +423 98 training.batch_size 1.0 +423 98 training.label_smoothing 0.015016402719551722 +423 99 model.embedding_dim 2.0 +423 99 training.batch_size 0.0 +423 99 training.label_smoothing 0.23048770853921816 +423 100 model.embedding_dim 2.0 +423 100 training.batch_size 2.0 +423 100 training.label_smoothing 0.5055350120580792 +423 1 dataset """kinships""" +423 1 model """ntn""" +423 1 loss """bceaftersigmoid""" +423 1 regularizer """no""" +423 1 optimizer """adadelta""" +423 1 training_loop """lcwa""" +423 1 evaluator """rankbased""" +423 2 dataset """kinships""" +423 2 model """ntn""" +423 2 loss """bceaftersigmoid""" +423 2 regularizer """no""" +423 2 optimizer """adadelta""" +423 2 training_loop """lcwa""" +423 2 evaluator """rankbased""" +423 3 dataset """kinships""" +423 3 model """ntn""" +423 3 loss """bceaftersigmoid""" +423 3 regularizer """no""" +423 3 optimizer """adadelta""" +423 3 training_loop """lcwa""" +423 3 evaluator """rankbased""" +423 4 dataset """kinships""" +423 4 model """ntn""" +423 4 loss """bceaftersigmoid""" +423 4 regularizer """no""" +423 4 optimizer """adadelta""" +423 4 training_loop """lcwa""" +423 4 evaluator """rankbased""" +423 5 dataset """kinships""" +423 5 model """ntn""" +423 5 loss """bceaftersigmoid""" +423 5 regularizer """no""" +423 5 optimizer """adadelta""" +423 5 training_loop """lcwa""" +423 5 evaluator """rankbased""" +423 6 dataset """kinships""" +423 6 model """ntn""" +423 6 loss """bceaftersigmoid""" +423 6 regularizer """no""" +423 6 optimizer """adadelta""" +423 6 training_loop """lcwa""" +423 6 evaluator """rankbased""" +423 7 dataset """kinships""" +423 7 model """ntn""" +423 7 loss """bceaftersigmoid""" +423 7 regularizer """no""" +423 7 optimizer """adadelta""" +423 7 training_loop """lcwa""" +423 7 evaluator """rankbased""" +423 8 dataset """kinships""" +423 8 model """ntn""" +423 8 loss """bceaftersigmoid""" +423 8 regularizer """no""" +423 8 optimizer """adadelta""" +423 8 training_loop """lcwa""" +423 8 evaluator """rankbased""" +423 9 dataset """kinships""" +423 9 model """ntn""" +423 9 loss """bceaftersigmoid""" +423 9 regularizer """no""" +423 9 optimizer """adadelta""" +423 9 training_loop """lcwa""" +423 9 evaluator """rankbased""" +423 10 dataset """kinships""" +423 10 model """ntn""" +423 10 loss """bceaftersigmoid""" +423 10 regularizer """no""" +423 10 optimizer """adadelta""" +423 10 training_loop """lcwa""" +423 10 evaluator """rankbased""" +423 11 dataset """kinships""" +423 11 model """ntn""" +423 11 loss """bceaftersigmoid""" +423 11 regularizer """no""" +423 11 optimizer """adadelta""" +423 11 training_loop """lcwa""" +423 11 evaluator """rankbased""" +423 12 dataset """kinships""" +423 12 model """ntn""" +423 12 loss """bceaftersigmoid""" +423 12 regularizer """no""" +423 12 optimizer """adadelta""" +423 12 training_loop """lcwa""" +423 12 evaluator """rankbased""" +423 13 dataset """kinships""" +423 13 model """ntn""" +423 13 loss """bceaftersigmoid""" +423 13 regularizer """no""" +423 13 optimizer """adadelta""" +423 13 training_loop """lcwa""" +423 13 evaluator """rankbased""" +423 14 dataset """kinships""" +423 14 model """ntn""" +423 14 loss """bceaftersigmoid""" +423 14 regularizer """no""" +423 14 optimizer """adadelta""" +423 14 training_loop """lcwa""" +423 14 evaluator """rankbased""" +423 15 dataset """kinships""" +423 15 model """ntn""" +423 15 loss """bceaftersigmoid""" +423 15 regularizer """no""" +423 15 optimizer """adadelta""" +423 15 training_loop """lcwa""" +423 15 evaluator """rankbased""" +423 16 dataset """kinships""" +423 16 model """ntn""" +423 16 loss """bceaftersigmoid""" +423 16 regularizer """no""" +423 16 optimizer """adadelta""" +423 16 training_loop """lcwa""" +423 16 evaluator """rankbased""" +423 17 dataset """kinships""" +423 17 model """ntn""" +423 17 loss """bceaftersigmoid""" +423 17 regularizer """no""" +423 17 optimizer """adadelta""" +423 17 training_loop """lcwa""" +423 17 evaluator """rankbased""" +423 18 dataset """kinships""" +423 18 model """ntn""" +423 18 loss """bceaftersigmoid""" +423 18 regularizer """no""" +423 18 optimizer """adadelta""" +423 18 training_loop """lcwa""" +423 18 evaluator """rankbased""" +423 19 dataset """kinships""" +423 19 model """ntn""" +423 19 loss """bceaftersigmoid""" +423 19 regularizer """no""" +423 19 optimizer """adadelta""" +423 19 training_loop """lcwa""" +423 19 evaluator """rankbased""" +423 20 dataset """kinships""" +423 20 model """ntn""" +423 20 loss """bceaftersigmoid""" +423 20 regularizer """no""" +423 20 optimizer """adadelta""" +423 20 training_loop """lcwa""" +423 20 evaluator """rankbased""" +423 21 dataset """kinships""" +423 21 model """ntn""" +423 21 loss """bceaftersigmoid""" +423 21 regularizer """no""" +423 21 optimizer """adadelta""" +423 21 training_loop """lcwa""" +423 21 evaluator """rankbased""" +423 22 dataset """kinships""" +423 22 model """ntn""" +423 22 loss """bceaftersigmoid""" +423 22 regularizer """no""" +423 22 optimizer """adadelta""" +423 22 training_loop """lcwa""" +423 22 evaluator """rankbased""" +423 23 dataset """kinships""" +423 23 model """ntn""" +423 23 loss """bceaftersigmoid""" +423 23 regularizer """no""" +423 23 optimizer """adadelta""" +423 23 training_loop """lcwa""" +423 23 evaluator """rankbased""" +423 24 dataset """kinships""" +423 24 model """ntn""" +423 24 loss """bceaftersigmoid""" +423 24 regularizer """no""" +423 24 optimizer """adadelta""" +423 24 training_loop """lcwa""" +423 24 evaluator """rankbased""" +423 25 dataset """kinships""" +423 25 model """ntn""" +423 25 loss """bceaftersigmoid""" +423 25 regularizer """no""" +423 25 optimizer """adadelta""" +423 25 training_loop """lcwa""" +423 25 evaluator """rankbased""" +423 26 dataset """kinships""" +423 26 model """ntn""" +423 26 loss """bceaftersigmoid""" +423 26 regularizer """no""" +423 26 optimizer """adadelta""" +423 26 training_loop """lcwa""" +423 26 evaluator """rankbased""" +423 27 dataset """kinships""" +423 27 model """ntn""" +423 27 loss """bceaftersigmoid""" +423 27 regularizer """no""" +423 27 optimizer """adadelta""" +423 27 training_loop """lcwa""" +423 27 evaluator """rankbased""" +423 28 dataset """kinships""" +423 28 model """ntn""" +423 28 loss """bceaftersigmoid""" +423 28 regularizer """no""" +423 28 optimizer """adadelta""" +423 28 training_loop """lcwa""" +423 28 evaluator """rankbased""" +423 29 dataset """kinships""" +423 29 model """ntn""" +423 29 loss """bceaftersigmoid""" +423 29 regularizer """no""" +423 29 optimizer """adadelta""" +423 29 training_loop """lcwa""" +423 29 evaluator """rankbased""" +423 30 dataset """kinships""" +423 30 model """ntn""" +423 30 loss """bceaftersigmoid""" +423 30 regularizer """no""" +423 30 optimizer """adadelta""" +423 30 training_loop """lcwa""" +423 30 evaluator """rankbased""" +423 31 dataset """kinships""" +423 31 model """ntn""" +423 31 loss """bceaftersigmoid""" +423 31 regularizer """no""" +423 31 optimizer """adadelta""" +423 31 training_loop """lcwa""" +423 31 evaluator """rankbased""" +423 32 dataset """kinships""" +423 32 model """ntn""" +423 32 loss """bceaftersigmoid""" +423 32 regularizer """no""" +423 32 optimizer """adadelta""" +423 32 training_loop """lcwa""" +423 32 evaluator """rankbased""" +423 33 dataset """kinships""" +423 33 model """ntn""" +423 33 loss """bceaftersigmoid""" +423 33 regularizer """no""" +423 33 optimizer """adadelta""" +423 33 training_loop """lcwa""" +423 33 evaluator """rankbased""" +423 34 dataset """kinships""" +423 34 model """ntn""" +423 34 loss """bceaftersigmoid""" +423 34 regularizer """no""" +423 34 optimizer """adadelta""" +423 34 training_loop """lcwa""" +423 34 evaluator """rankbased""" +423 35 dataset """kinships""" +423 35 model """ntn""" +423 35 loss """bceaftersigmoid""" +423 35 regularizer """no""" +423 35 optimizer """adadelta""" +423 35 training_loop """lcwa""" +423 35 evaluator """rankbased""" +423 36 dataset """kinships""" +423 36 model """ntn""" +423 36 loss """bceaftersigmoid""" +423 36 regularizer """no""" +423 36 optimizer """adadelta""" +423 36 training_loop """lcwa""" +423 36 evaluator """rankbased""" +423 37 dataset """kinships""" +423 37 model """ntn""" +423 37 loss """bceaftersigmoid""" +423 37 regularizer """no""" +423 37 optimizer """adadelta""" +423 37 training_loop """lcwa""" +423 37 evaluator """rankbased""" +423 38 dataset """kinships""" +423 38 model """ntn""" +423 38 loss """bceaftersigmoid""" +423 38 regularizer """no""" +423 38 optimizer """adadelta""" +423 38 training_loop """lcwa""" +423 38 evaluator """rankbased""" +423 39 dataset """kinships""" +423 39 model """ntn""" +423 39 loss """bceaftersigmoid""" +423 39 regularizer """no""" +423 39 optimizer """adadelta""" +423 39 training_loop """lcwa""" +423 39 evaluator """rankbased""" +423 40 dataset """kinships""" +423 40 model """ntn""" +423 40 loss """bceaftersigmoid""" +423 40 regularizer """no""" +423 40 optimizer """adadelta""" +423 40 training_loop """lcwa""" +423 40 evaluator """rankbased""" +423 41 dataset """kinships""" +423 41 model """ntn""" +423 41 loss """bceaftersigmoid""" +423 41 regularizer """no""" +423 41 optimizer """adadelta""" +423 41 training_loop """lcwa""" +423 41 evaluator """rankbased""" +423 42 dataset """kinships""" +423 42 model """ntn""" +423 42 loss """bceaftersigmoid""" +423 42 regularizer """no""" +423 42 optimizer """adadelta""" +423 42 training_loop """lcwa""" +423 42 evaluator """rankbased""" +423 43 dataset """kinships""" +423 43 model """ntn""" +423 43 loss """bceaftersigmoid""" +423 43 regularizer """no""" +423 43 optimizer """adadelta""" +423 43 training_loop """lcwa""" +423 43 evaluator """rankbased""" +423 44 dataset """kinships""" +423 44 model """ntn""" +423 44 loss """bceaftersigmoid""" +423 44 regularizer """no""" +423 44 optimizer """adadelta""" +423 44 training_loop """lcwa""" +423 44 evaluator """rankbased""" +423 45 dataset """kinships""" +423 45 model """ntn""" +423 45 loss """bceaftersigmoid""" +423 45 regularizer """no""" +423 45 optimizer """adadelta""" +423 45 training_loop """lcwa""" +423 45 evaluator """rankbased""" +423 46 dataset """kinships""" +423 46 model """ntn""" +423 46 loss """bceaftersigmoid""" +423 46 regularizer """no""" +423 46 optimizer """adadelta""" +423 46 training_loop """lcwa""" +423 46 evaluator """rankbased""" +423 47 dataset """kinships""" +423 47 model """ntn""" +423 47 loss """bceaftersigmoid""" +423 47 regularizer """no""" +423 47 optimizer """adadelta""" +423 47 training_loop """lcwa""" +423 47 evaluator """rankbased""" +423 48 dataset """kinships""" +423 48 model """ntn""" +423 48 loss """bceaftersigmoid""" +423 48 regularizer """no""" +423 48 optimizer """adadelta""" +423 48 training_loop """lcwa""" +423 48 evaluator """rankbased""" +423 49 dataset """kinships""" +423 49 model """ntn""" +423 49 loss """bceaftersigmoid""" +423 49 regularizer """no""" +423 49 optimizer """adadelta""" +423 49 training_loop """lcwa""" +423 49 evaluator """rankbased""" +423 50 dataset """kinships""" +423 50 model """ntn""" +423 50 loss """bceaftersigmoid""" +423 50 regularizer """no""" +423 50 optimizer """adadelta""" +423 50 training_loop """lcwa""" +423 50 evaluator """rankbased""" +423 51 dataset """kinships""" +423 51 model """ntn""" +423 51 loss """bceaftersigmoid""" +423 51 regularizer """no""" +423 51 optimizer """adadelta""" +423 51 training_loop """lcwa""" +423 51 evaluator """rankbased""" +423 52 dataset """kinships""" +423 52 model """ntn""" +423 52 loss """bceaftersigmoid""" +423 52 regularizer """no""" +423 52 optimizer """adadelta""" +423 52 training_loop """lcwa""" +423 52 evaluator """rankbased""" +423 53 dataset """kinships""" +423 53 model """ntn""" +423 53 loss """bceaftersigmoid""" +423 53 regularizer """no""" +423 53 optimizer """adadelta""" +423 53 training_loop """lcwa""" +423 53 evaluator """rankbased""" +423 54 dataset """kinships""" +423 54 model """ntn""" +423 54 loss """bceaftersigmoid""" +423 54 regularizer """no""" +423 54 optimizer """adadelta""" +423 54 training_loop """lcwa""" +423 54 evaluator """rankbased""" +423 55 dataset """kinships""" +423 55 model """ntn""" +423 55 loss """bceaftersigmoid""" +423 55 regularizer """no""" +423 55 optimizer """adadelta""" +423 55 training_loop """lcwa""" +423 55 evaluator """rankbased""" +423 56 dataset """kinships""" +423 56 model """ntn""" +423 56 loss """bceaftersigmoid""" +423 56 regularizer """no""" +423 56 optimizer """adadelta""" +423 56 training_loop """lcwa""" +423 56 evaluator """rankbased""" +423 57 dataset """kinships""" +423 57 model """ntn""" +423 57 loss """bceaftersigmoid""" +423 57 regularizer """no""" +423 57 optimizer """adadelta""" +423 57 training_loop """lcwa""" +423 57 evaluator """rankbased""" +423 58 dataset """kinships""" +423 58 model """ntn""" +423 58 loss """bceaftersigmoid""" +423 58 regularizer """no""" +423 58 optimizer """adadelta""" +423 58 training_loop """lcwa""" +423 58 evaluator """rankbased""" +423 59 dataset """kinships""" +423 59 model """ntn""" +423 59 loss """bceaftersigmoid""" +423 59 regularizer """no""" +423 59 optimizer """adadelta""" +423 59 training_loop """lcwa""" +423 59 evaluator """rankbased""" +423 60 dataset """kinships""" +423 60 model """ntn""" +423 60 loss """bceaftersigmoid""" +423 60 regularizer """no""" +423 60 optimizer """adadelta""" +423 60 training_loop """lcwa""" +423 60 evaluator """rankbased""" +423 61 dataset """kinships""" +423 61 model """ntn""" +423 61 loss """bceaftersigmoid""" +423 61 regularizer """no""" +423 61 optimizer """adadelta""" +423 61 training_loop """lcwa""" +423 61 evaluator """rankbased""" +423 62 dataset """kinships""" +423 62 model """ntn""" +423 62 loss """bceaftersigmoid""" +423 62 regularizer """no""" +423 62 optimizer """adadelta""" +423 62 training_loop """lcwa""" +423 62 evaluator """rankbased""" +423 63 dataset """kinships""" +423 63 model """ntn""" +423 63 loss """bceaftersigmoid""" +423 63 regularizer """no""" +423 63 optimizer """adadelta""" +423 63 training_loop """lcwa""" +423 63 evaluator """rankbased""" +423 64 dataset """kinships""" +423 64 model """ntn""" +423 64 loss """bceaftersigmoid""" +423 64 regularizer """no""" +423 64 optimizer """adadelta""" +423 64 training_loop """lcwa""" +423 64 evaluator """rankbased""" +423 65 dataset """kinships""" +423 65 model """ntn""" +423 65 loss """bceaftersigmoid""" +423 65 regularizer """no""" +423 65 optimizer """adadelta""" +423 65 training_loop """lcwa""" +423 65 evaluator """rankbased""" +423 66 dataset """kinships""" +423 66 model """ntn""" +423 66 loss """bceaftersigmoid""" +423 66 regularizer """no""" +423 66 optimizer """adadelta""" +423 66 training_loop """lcwa""" +423 66 evaluator """rankbased""" +423 67 dataset """kinships""" +423 67 model """ntn""" +423 67 loss """bceaftersigmoid""" +423 67 regularizer """no""" +423 67 optimizer """adadelta""" +423 67 training_loop """lcwa""" +423 67 evaluator """rankbased""" +423 68 dataset """kinships""" +423 68 model """ntn""" +423 68 loss """bceaftersigmoid""" +423 68 regularizer """no""" +423 68 optimizer """adadelta""" +423 68 training_loop """lcwa""" +423 68 evaluator """rankbased""" +423 69 dataset """kinships""" +423 69 model """ntn""" +423 69 loss """bceaftersigmoid""" +423 69 regularizer """no""" +423 69 optimizer """adadelta""" +423 69 training_loop """lcwa""" +423 69 evaluator """rankbased""" +423 70 dataset """kinships""" +423 70 model """ntn""" +423 70 loss """bceaftersigmoid""" +423 70 regularizer """no""" +423 70 optimizer """adadelta""" +423 70 training_loop """lcwa""" +423 70 evaluator """rankbased""" +423 71 dataset """kinships""" +423 71 model """ntn""" +423 71 loss """bceaftersigmoid""" +423 71 regularizer """no""" +423 71 optimizer """adadelta""" +423 71 training_loop """lcwa""" +423 71 evaluator """rankbased""" +423 72 dataset """kinships""" +423 72 model """ntn""" +423 72 loss """bceaftersigmoid""" +423 72 regularizer """no""" +423 72 optimizer """adadelta""" +423 72 training_loop """lcwa""" +423 72 evaluator """rankbased""" +423 73 dataset """kinships""" +423 73 model """ntn""" +423 73 loss """bceaftersigmoid""" +423 73 regularizer """no""" +423 73 optimizer """adadelta""" +423 73 training_loop """lcwa""" +423 73 evaluator """rankbased""" +423 74 dataset """kinships""" +423 74 model """ntn""" +423 74 loss """bceaftersigmoid""" +423 74 regularizer """no""" +423 74 optimizer """adadelta""" +423 74 training_loop """lcwa""" +423 74 evaluator """rankbased""" +423 75 dataset """kinships""" +423 75 model """ntn""" +423 75 loss """bceaftersigmoid""" +423 75 regularizer """no""" +423 75 optimizer """adadelta""" +423 75 training_loop """lcwa""" +423 75 evaluator """rankbased""" +423 76 dataset """kinships""" +423 76 model """ntn""" +423 76 loss """bceaftersigmoid""" +423 76 regularizer """no""" +423 76 optimizer """adadelta""" +423 76 training_loop """lcwa""" +423 76 evaluator """rankbased""" +423 77 dataset """kinships""" +423 77 model """ntn""" +423 77 loss """bceaftersigmoid""" +423 77 regularizer """no""" +423 77 optimizer """adadelta""" +423 77 training_loop """lcwa""" +423 77 evaluator """rankbased""" +423 78 dataset """kinships""" +423 78 model """ntn""" +423 78 loss """bceaftersigmoid""" +423 78 regularizer """no""" +423 78 optimizer """adadelta""" +423 78 training_loop """lcwa""" +423 78 evaluator """rankbased""" +423 79 dataset """kinships""" +423 79 model """ntn""" +423 79 loss """bceaftersigmoid""" +423 79 regularizer """no""" +423 79 optimizer """adadelta""" +423 79 training_loop """lcwa""" +423 79 evaluator """rankbased""" +423 80 dataset """kinships""" +423 80 model """ntn""" +423 80 loss """bceaftersigmoid""" +423 80 regularizer """no""" +423 80 optimizer """adadelta""" +423 80 training_loop """lcwa""" +423 80 evaluator """rankbased""" +423 81 dataset """kinships""" +423 81 model """ntn""" +423 81 loss """bceaftersigmoid""" +423 81 regularizer """no""" +423 81 optimizer """adadelta""" +423 81 training_loop """lcwa""" +423 81 evaluator """rankbased""" +423 82 dataset """kinships""" +423 82 model """ntn""" +423 82 loss """bceaftersigmoid""" +423 82 regularizer """no""" +423 82 optimizer """adadelta""" +423 82 training_loop """lcwa""" +423 82 evaluator """rankbased""" +423 83 dataset """kinships""" +423 83 model """ntn""" +423 83 loss """bceaftersigmoid""" +423 83 regularizer """no""" +423 83 optimizer """adadelta""" +423 83 training_loop """lcwa""" +423 83 evaluator """rankbased""" +423 84 dataset """kinships""" +423 84 model """ntn""" +423 84 loss """bceaftersigmoid""" +423 84 regularizer """no""" +423 84 optimizer """adadelta""" +423 84 training_loop """lcwa""" +423 84 evaluator """rankbased""" +423 85 dataset """kinships""" +423 85 model """ntn""" +423 85 loss """bceaftersigmoid""" +423 85 regularizer """no""" +423 85 optimizer """adadelta""" +423 85 training_loop """lcwa""" +423 85 evaluator """rankbased""" +423 86 dataset """kinships""" +423 86 model """ntn""" +423 86 loss """bceaftersigmoid""" +423 86 regularizer """no""" +423 86 optimizer """adadelta""" +423 86 training_loop """lcwa""" +423 86 evaluator """rankbased""" +423 87 dataset """kinships""" +423 87 model """ntn""" +423 87 loss """bceaftersigmoid""" +423 87 regularizer """no""" +423 87 optimizer """adadelta""" +423 87 training_loop """lcwa""" +423 87 evaluator """rankbased""" +423 88 dataset """kinships""" +423 88 model """ntn""" +423 88 loss """bceaftersigmoid""" +423 88 regularizer """no""" +423 88 optimizer """adadelta""" +423 88 training_loop """lcwa""" +423 88 evaluator """rankbased""" +423 89 dataset """kinships""" +423 89 model """ntn""" +423 89 loss """bceaftersigmoid""" +423 89 regularizer """no""" +423 89 optimizer """adadelta""" +423 89 training_loop """lcwa""" +423 89 evaluator """rankbased""" +423 90 dataset """kinships""" +423 90 model """ntn""" +423 90 loss """bceaftersigmoid""" +423 90 regularizer """no""" +423 90 optimizer """adadelta""" +423 90 training_loop """lcwa""" +423 90 evaluator """rankbased""" +423 91 dataset """kinships""" +423 91 model """ntn""" +423 91 loss """bceaftersigmoid""" +423 91 regularizer """no""" +423 91 optimizer """adadelta""" +423 91 training_loop """lcwa""" +423 91 evaluator """rankbased""" +423 92 dataset """kinships""" +423 92 model """ntn""" +423 92 loss """bceaftersigmoid""" +423 92 regularizer """no""" +423 92 optimizer """adadelta""" +423 92 training_loop """lcwa""" +423 92 evaluator """rankbased""" +423 93 dataset """kinships""" +423 93 model """ntn""" +423 93 loss """bceaftersigmoid""" +423 93 regularizer """no""" +423 93 optimizer """adadelta""" +423 93 training_loop """lcwa""" +423 93 evaluator """rankbased""" +423 94 dataset """kinships""" +423 94 model """ntn""" +423 94 loss """bceaftersigmoid""" +423 94 regularizer """no""" +423 94 optimizer """adadelta""" +423 94 training_loop """lcwa""" +423 94 evaluator """rankbased""" +423 95 dataset """kinships""" +423 95 model """ntn""" +423 95 loss """bceaftersigmoid""" +423 95 regularizer """no""" +423 95 optimizer """adadelta""" +423 95 training_loop """lcwa""" +423 95 evaluator """rankbased""" +423 96 dataset """kinships""" +423 96 model """ntn""" +423 96 loss """bceaftersigmoid""" +423 96 regularizer """no""" +423 96 optimizer """adadelta""" +423 96 training_loop """lcwa""" +423 96 evaluator """rankbased""" +423 97 dataset """kinships""" +423 97 model """ntn""" +423 97 loss """bceaftersigmoid""" +423 97 regularizer """no""" +423 97 optimizer """adadelta""" +423 97 training_loop """lcwa""" +423 97 evaluator """rankbased""" +423 98 dataset """kinships""" +423 98 model """ntn""" +423 98 loss """bceaftersigmoid""" +423 98 regularizer """no""" +423 98 optimizer """adadelta""" +423 98 training_loop """lcwa""" +423 98 evaluator """rankbased""" +423 99 dataset """kinships""" +423 99 model """ntn""" +423 99 loss """bceaftersigmoid""" +423 99 regularizer """no""" +423 99 optimizer """adadelta""" +423 99 training_loop """lcwa""" +423 99 evaluator """rankbased""" +423 100 dataset """kinships""" +423 100 model """ntn""" +423 100 loss """bceaftersigmoid""" +423 100 regularizer """no""" +423 100 optimizer """adadelta""" +423 100 training_loop """lcwa""" +423 100 evaluator """rankbased""" +424 1 model.embedding_dim 0.0 +424 1 training.batch_size 0.0 +424 1 training.label_smoothing 0.055593989476426825 +424 2 model.embedding_dim 1.0 +424 2 training.batch_size 1.0 +424 2 training.label_smoothing 0.0010947289654385543 +424 3 model.embedding_dim 1.0 +424 3 training.batch_size 1.0 +424 3 training.label_smoothing 0.06935833251336718 +424 4 model.embedding_dim 2.0 +424 4 training.batch_size 1.0 +424 4 training.label_smoothing 0.12947175085671783 +424 5 model.embedding_dim 2.0 +424 5 training.batch_size 0.0 +424 5 training.label_smoothing 0.0010187962795356947 +424 6 model.embedding_dim 0.0 +424 6 training.batch_size 0.0 +424 6 training.label_smoothing 0.009692102939162613 +424 7 model.embedding_dim 2.0 +424 7 training.batch_size 1.0 +424 7 training.label_smoothing 0.9635533905272708 +424 8 model.embedding_dim 1.0 +424 8 training.batch_size 1.0 +424 8 training.label_smoothing 0.013819831028555192 +424 9 model.embedding_dim 0.0 +424 9 training.batch_size 2.0 +424 9 training.label_smoothing 0.013626527522200203 +424 10 model.embedding_dim 1.0 +424 10 training.batch_size 1.0 +424 10 training.label_smoothing 0.028553109493071133 +424 11 model.embedding_dim 1.0 +424 11 training.batch_size 0.0 +424 11 training.label_smoothing 0.0012905078261742009 +424 12 model.embedding_dim 0.0 +424 12 training.batch_size 0.0 +424 12 training.label_smoothing 0.01779922251853103 +424 13 model.embedding_dim 1.0 +424 13 training.batch_size 1.0 +424 13 training.label_smoothing 0.3292237265777252 +424 14 model.embedding_dim 1.0 +424 14 training.batch_size 2.0 +424 14 training.label_smoothing 0.041297976846405626 +424 15 model.embedding_dim 1.0 +424 15 training.batch_size 1.0 +424 15 training.label_smoothing 0.06573660992808476 +424 16 model.embedding_dim 2.0 +424 16 training.batch_size 2.0 +424 16 training.label_smoothing 0.018158647860423174 +424 17 model.embedding_dim 2.0 +424 17 training.batch_size 1.0 +424 17 training.label_smoothing 0.00877770062357604 +424 18 model.embedding_dim 1.0 +424 18 training.batch_size 1.0 +424 18 training.label_smoothing 0.29809961137077634 +424 19 model.embedding_dim 2.0 +424 19 training.batch_size 2.0 +424 19 training.label_smoothing 0.005366923197835025 +424 20 model.embedding_dim 0.0 +424 20 training.batch_size 1.0 +424 20 training.label_smoothing 0.13666379167624687 +424 21 model.embedding_dim 1.0 +424 21 training.batch_size 1.0 +424 21 training.label_smoothing 0.1263500320163927 +424 22 model.embedding_dim 2.0 +424 22 training.batch_size 1.0 +424 22 training.label_smoothing 0.0017452699175329854 +424 23 model.embedding_dim 1.0 +424 23 training.batch_size 1.0 +424 23 training.label_smoothing 0.9019728321410055 +424 24 model.embedding_dim 1.0 +424 24 training.batch_size 1.0 +424 24 training.label_smoothing 0.7258117353840599 +424 25 model.embedding_dim 2.0 +424 25 training.batch_size 1.0 +424 25 training.label_smoothing 0.010136348766661057 +424 26 model.embedding_dim 2.0 +424 26 training.batch_size 0.0 +424 26 training.label_smoothing 0.0012153518882625147 +424 27 model.embedding_dim 1.0 +424 27 training.batch_size 2.0 +424 27 training.label_smoothing 0.018266648355050232 +424 28 model.embedding_dim 0.0 +424 28 training.batch_size 0.0 +424 28 training.label_smoothing 0.001510818756365156 +424 29 model.embedding_dim 1.0 +424 29 training.batch_size 1.0 +424 29 training.label_smoothing 0.005655279605089544 +424 30 model.embedding_dim 0.0 +424 30 training.batch_size 1.0 +424 30 training.label_smoothing 0.03462240100661949 +424 31 model.embedding_dim 1.0 +424 31 training.batch_size 2.0 +424 31 training.label_smoothing 0.6932330374592036 +424 32 model.embedding_dim 0.0 +424 32 training.batch_size 1.0 +424 32 training.label_smoothing 0.029368467955757944 +424 33 model.embedding_dim 1.0 +424 33 training.batch_size 0.0 +424 33 training.label_smoothing 0.042798221240626474 +424 34 model.embedding_dim 0.0 +424 34 training.batch_size 1.0 +424 34 training.label_smoothing 0.34846076199958614 +424 35 model.embedding_dim 2.0 +424 35 training.batch_size 2.0 +424 35 training.label_smoothing 0.00848194675363644 +424 36 model.embedding_dim 0.0 +424 36 training.batch_size 1.0 +424 36 training.label_smoothing 0.004499614873333293 +424 37 model.embedding_dim 2.0 +424 37 training.batch_size 0.0 +424 37 training.label_smoothing 0.0024246005333808875 +424 38 model.embedding_dim 2.0 +424 38 training.batch_size 1.0 +424 38 training.label_smoothing 0.001915611826415115 +424 39 model.embedding_dim 0.0 +424 39 training.batch_size 1.0 +424 39 training.label_smoothing 0.3671460671729338 +424 40 model.embedding_dim 2.0 +424 40 training.batch_size 0.0 +424 40 training.label_smoothing 0.013230690294385452 +424 41 model.embedding_dim 2.0 +424 41 training.batch_size 0.0 +424 41 training.label_smoothing 0.5235256775388368 +424 42 model.embedding_dim 1.0 +424 42 training.batch_size 0.0 +424 42 training.label_smoothing 0.9044283845385032 +424 43 model.embedding_dim 2.0 +424 43 training.batch_size 1.0 +424 43 training.label_smoothing 0.017219992082159005 +424 44 model.embedding_dim 2.0 +424 44 training.batch_size 1.0 +424 44 training.label_smoothing 0.13966663975978577 +424 45 model.embedding_dim 1.0 +424 45 training.batch_size 0.0 +424 45 training.label_smoothing 0.6171004951272433 +424 46 model.embedding_dim 2.0 +424 46 training.batch_size 2.0 +424 46 training.label_smoothing 0.35928232755326595 +424 47 model.embedding_dim 2.0 +424 47 training.batch_size 1.0 +424 47 training.label_smoothing 0.02960025166192831 +424 48 model.embedding_dim 0.0 +424 48 training.batch_size 2.0 +424 48 training.label_smoothing 0.046721540544101416 +424 49 model.embedding_dim 1.0 +424 49 training.batch_size 2.0 +424 49 training.label_smoothing 0.5843827772579352 +424 50 model.embedding_dim 0.0 +424 50 training.batch_size 1.0 +424 50 training.label_smoothing 0.0033013123422289646 +424 51 model.embedding_dim 2.0 +424 51 training.batch_size 2.0 +424 51 training.label_smoothing 0.006286640193357577 +424 52 model.embedding_dim 1.0 +424 52 training.batch_size 0.0 +424 52 training.label_smoothing 0.9516890753021181 +424 53 model.embedding_dim 1.0 +424 53 training.batch_size 0.0 +424 53 training.label_smoothing 0.03501365665162684 +424 54 model.embedding_dim 0.0 +424 54 training.batch_size 2.0 +424 54 training.label_smoothing 0.003586266296600296 +424 55 model.embedding_dim 1.0 +424 55 training.batch_size 0.0 +424 55 training.label_smoothing 0.21418961912501994 +424 56 model.embedding_dim 2.0 +424 56 training.batch_size 1.0 +424 56 training.label_smoothing 0.37329845322059557 +424 57 model.embedding_dim 0.0 +424 57 training.batch_size 0.0 +424 57 training.label_smoothing 0.0532343773880817 +424 58 model.embedding_dim 1.0 +424 58 training.batch_size 2.0 +424 58 training.label_smoothing 0.003044300480480136 +424 59 model.embedding_dim 0.0 +424 59 training.batch_size 2.0 +424 59 training.label_smoothing 0.0022364658175863046 +424 60 model.embedding_dim 0.0 +424 60 training.batch_size 2.0 +424 60 training.label_smoothing 0.004983655369037299 +424 61 model.embedding_dim 2.0 +424 61 training.batch_size 2.0 +424 61 training.label_smoothing 0.15100603142194288 +424 62 model.embedding_dim 1.0 +424 62 training.batch_size 0.0 +424 62 training.label_smoothing 0.0017041737727245083 +424 63 model.embedding_dim 1.0 +424 63 training.batch_size 0.0 +424 63 training.label_smoothing 0.014691502240385143 +424 64 model.embedding_dim 2.0 +424 64 training.batch_size 2.0 +424 64 training.label_smoothing 0.007487288985971309 +424 65 model.embedding_dim 1.0 +424 65 training.batch_size 0.0 +424 65 training.label_smoothing 0.010913347120556461 +424 66 model.embedding_dim 0.0 +424 66 training.batch_size 1.0 +424 66 training.label_smoothing 0.0049905065841549376 +424 67 model.embedding_dim 0.0 +424 67 training.batch_size 0.0 +424 67 training.label_smoothing 0.06915166001885721 +424 68 model.embedding_dim 2.0 +424 68 training.batch_size 0.0 +424 68 training.label_smoothing 0.25823340606190687 +424 69 model.embedding_dim 2.0 +424 69 training.batch_size 2.0 +424 69 training.label_smoothing 0.01563378983880355 +424 70 model.embedding_dim 2.0 +424 70 training.batch_size 0.0 +424 70 training.label_smoothing 0.27026526808707557 +424 71 model.embedding_dim 0.0 +424 71 training.batch_size 0.0 +424 71 training.label_smoothing 0.002690282509774758 +424 72 model.embedding_dim 2.0 +424 72 training.batch_size 2.0 +424 72 training.label_smoothing 0.0010198373819508262 +424 73 model.embedding_dim 1.0 +424 73 training.batch_size 0.0 +424 73 training.label_smoothing 0.0011947465583068836 +424 74 model.embedding_dim 0.0 +424 74 training.batch_size 0.0 +424 74 training.label_smoothing 0.005841320845043383 +424 75 model.embedding_dim 0.0 +424 75 training.batch_size 0.0 +424 75 training.label_smoothing 0.32051371051919225 +424 76 model.embedding_dim 2.0 +424 76 training.batch_size 1.0 +424 76 training.label_smoothing 0.1662970248933145 +424 77 model.embedding_dim 0.0 +424 77 training.batch_size 1.0 +424 77 training.label_smoothing 0.0026294993930709503 +424 78 model.embedding_dim 2.0 +424 78 training.batch_size 2.0 +424 78 training.label_smoothing 0.6856287616302494 +424 79 model.embedding_dim 0.0 +424 79 training.batch_size 0.0 +424 79 training.label_smoothing 0.2985312887362359 +424 80 model.embedding_dim 2.0 +424 80 training.batch_size 1.0 +424 80 training.label_smoothing 0.022787818465974097 +424 81 model.embedding_dim 2.0 +424 81 training.batch_size 1.0 +424 81 training.label_smoothing 0.002776367660842647 +424 82 model.embedding_dim 0.0 +424 82 training.batch_size 0.0 +424 82 training.label_smoothing 0.0053410498389398885 +424 83 model.embedding_dim 1.0 +424 83 training.batch_size 2.0 +424 83 training.label_smoothing 0.0018231426673882224 +424 84 model.embedding_dim 1.0 +424 84 training.batch_size 1.0 +424 84 training.label_smoothing 0.006770975037900335 +424 85 model.embedding_dim 0.0 +424 85 training.batch_size 1.0 +424 85 training.label_smoothing 0.030451013668966537 +424 86 model.embedding_dim 1.0 +424 86 training.batch_size 1.0 +424 86 training.label_smoothing 0.007602068087937389 +424 87 model.embedding_dim 2.0 +424 87 training.batch_size 2.0 +424 87 training.label_smoothing 0.6771268633172339 +424 88 model.embedding_dim 0.0 +424 88 training.batch_size 1.0 +424 88 training.label_smoothing 0.0016314522224946495 +424 89 model.embedding_dim 1.0 +424 89 training.batch_size 0.0 +424 89 training.label_smoothing 0.7514259695053104 +424 90 model.embedding_dim 2.0 +424 90 training.batch_size 2.0 +424 90 training.label_smoothing 0.03807565990335052 +424 91 model.embedding_dim 2.0 +424 91 training.batch_size 1.0 +424 91 training.label_smoothing 0.0598427081186043 +424 92 model.embedding_dim 2.0 +424 92 training.batch_size 2.0 +424 92 training.label_smoothing 0.44795202003709217 +424 93 model.embedding_dim 0.0 +424 93 training.batch_size 0.0 +424 93 training.label_smoothing 0.018533653807943678 +424 94 model.embedding_dim 1.0 +424 94 training.batch_size 0.0 +424 94 training.label_smoothing 0.006656774071655434 +424 95 model.embedding_dim 2.0 +424 95 training.batch_size 2.0 +424 95 training.label_smoothing 0.0018497725839432535 +424 96 model.embedding_dim 2.0 +424 96 training.batch_size 2.0 +424 96 training.label_smoothing 0.006337028629311338 +424 97 model.embedding_dim 0.0 +424 97 training.batch_size 1.0 +424 97 training.label_smoothing 0.015186991448484004 +424 98 model.embedding_dim 2.0 +424 98 training.batch_size 1.0 +424 98 training.label_smoothing 0.0015916638511170192 +424 99 model.embedding_dim 0.0 +424 99 training.batch_size 0.0 +424 99 training.label_smoothing 0.021305887557580023 +424 100 model.embedding_dim 1.0 +424 100 training.batch_size 0.0 +424 100 training.label_smoothing 0.2088037379882734 +424 1 dataset """kinships""" +424 1 model """ntn""" +424 1 loss """softplus""" +424 1 regularizer """no""" +424 1 optimizer """adadelta""" +424 1 training_loop """lcwa""" +424 1 evaluator """rankbased""" +424 2 dataset """kinships""" +424 2 model """ntn""" +424 2 loss """softplus""" +424 2 regularizer """no""" +424 2 optimizer """adadelta""" +424 2 training_loop """lcwa""" +424 2 evaluator """rankbased""" +424 3 dataset """kinships""" +424 3 model """ntn""" +424 3 loss """softplus""" +424 3 regularizer """no""" +424 3 optimizer """adadelta""" +424 3 training_loop """lcwa""" +424 3 evaluator """rankbased""" +424 4 dataset """kinships""" +424 4 model """ntn""" +424 4 loss """softplus""" +424 4 regularizer """no""" +424 4 optimizer """adadelta""" +424 4 training_loop """lcwa""" +424 4 evaluator """rankbased""" +424 5 dataset """kinships""" +424 5 model """ntn""" +424 5 loss """softplus""" +424 5 regularizer """no""" +424 5 optimizer """adadelta""" +424 5 training_loop """lcwa""" +424 5 evaluator """rankbased""" +424 6 dataset """kinships""" +424 6 model """ntn""" +424 6 loss """softplus""" +424 6 regularizer """no""" +424 6 optimizer """adadelta""" +424 6 training_loop """lcwa""" +424 6 evaluator """rankbased""" +424 7 dataset """kinships""" +424 7 model """ntn""" +424 7 loss """softplus""" +424 7 regularizer """no""" +424 7 optimizer """adadelta""" +424 7 training_loop """lcwa""" +424 7 evaluator """rankbased""" +424 8 dataset """kinships""" +424 8 model """ntn""" +424 8 loss """softplus""" +424 8 regularizer """no""" +424 8 optimizer """adadelta""" +424 8 training_loop """lcwa""" +424 8 evaluator """rankbased""" +424 9 dataset """kinships""" +424 9 model """ntn""" +424 9 loss """softplus""" +424 9 regularizer """no""" +424 9 optimizer """adadelta""" +424 9 training_loop """lcwa""" +424 9 evaluator """rankbased""" +424 10 dataset """kinships""" +424 10 model """ntn""" +424 10 loss """softplus""" +424 10 regularizer """no""" +424 10 optimizer """adadelta""" +424 10 training_loop """lcwa""" +424 10 evaluator """rankbased""" +424 11 dataset """kinships""" +424 11 model """ntn""" +424 11 loss """softplus""" +424 11 regularizer """no""" +424 11 optimizer """adadelta""" +424 11 training_loop """lcwa""" +424 11 evaluator """rankbased""" +424 12 dataset """kinships""" +424 12 model """ntn""" +424 12 loss """softplus""" +424 12 regularizer """no""" +424 12 optimizer """adadelta""" +424 12 training_loop """lcwa""" +424 12 evaluator """rankbased""" +424 13 dataset """kinships""" +424 13 model """ntn""" +424 13 loss """softplus""" +424 13 regularizer """no""" +424 13 optimizer """adadelta""" +424 13 training_loop """lcwa""" +424 13 evaluator """rankbased""" +424 14 dataset """kinships""" +424 14 model """ntn""" +424 14 loss """softplus""" +424 14 regularizer """no""" +424 14 optimizer """adadelta""" +424 14 training_loop """lcwa""" +424 14 evaluator """rankbased""" +424 15 dataset """kinships""" +424 15 model """ntn""" +424 15 loss """softplus""" +424 15 regularizer """no""" +424 15 optimizer """adadelta""" +424 15 training_loop """lcwa""" +424 15 evaluator """rankbased""" +424 16 dataset """kinships""" +424 16 model """ntn""" +424 16 loss """softplus""" +424 16 regularizer """no""" +424 16 optimizer """adadelta""" +424 16 training_loop """lcwa""" +424 16 evaluator """rankbased""" +424 17 dataset """kinships""" +424 17 model """ntn""" +424 17 loss """softplus""" +424 17 regularizer """no""" +424 17 optimizer """adadelta""" +424 17 training_loop """lcwa""" +424 17 evaluator """rankbased""" +424 18 dataset """kinships""" +424 18 model """ntn""" +424 18 loss """softplus""" +424 18 regularizer """no""" +424 18 optimizer """adadelta""" +424 18 training_loop """lcwa""" +424 18 evaluator """rankbased""" +424 19 dataset """kinships""" +424 19 model """ntn""" +424 19 loss """softplus""" +424 19 regularizer """no""" +424 19 optimizer """adadelta""" +424 19 training_loop """lcwa""" +424 19 evaluator """rankbased""" +424 20 dataset """kinships""" +424 20 model """ntn""" +424 20 loss """softplus""" +424 20 regularizer """no""" +424 20 optimizer """adadelta""" +424 20 training_loop """lcwa""" +424 20 evaluator """rankbased""" +424 21 dataset """kinships""" +424 21 model """ntn""" +424 21 loss """softplus""" +424 21 regularizer """no""" +424 21 optimizer """adadelta""" +424 21 training_loop """lcwa""" +424 21 evaluator """rankbased""" +424 22 dataset """kinships""" +424 22 model """ntn""" +424 22 loss """softplus""" +424 22 regularizer """no""" +424 22 optimizer """adadelta""" +424 22 training_loop """lcwa""" +424 22 evaluator """rankbased""" +424 23 dataset """kinships""" +424 23 model """ntn""" +424 23 loss """softplus""" +424 23 regularizer """no""" +424 23 optimizer """adadelta""" +424 23 training_loop """lcwa""" +424 23 evaluator """rankbased""" +424 24 dataset """kinships""" +424 24 model """ntn""" +424 24 loss """softplus""" +424 24 regularizer """no""" +424 24 optimizer """adadelta""" +424 24 training_loop """lcwa""" +424 24 evaluator """rankbased""" +424 25 dataset """kinships""" +424 25 model """ntn""" +424 25 loss """softplus""" +424 25 regularizer """no""" +424 25 optimizer """adadelta""" +424 25 training_loop """lcwa""" +424 25 evaluator """rankbased""" +424 26 dataset """kinships""" +424 26 model """ntn""" +424 26 loss """softplus""" +424 26 regularizer """no""" +424 26 optimizer """adadelta""" +424 26 training_loop """lcwa""" +424 26 evaluator """rankbased""" +424 27 dataset """kinships""" +424 27 model """ntn""" +424 27 loss """softplus""" +424 27 regularizer """no""" +424 27 optimizer """adadelta""" +424 27 training_loop """lcwa""" +424 27 evaluator """rankbased""" +424 28 dataset """kinships""" +424 28 model """ntn""" +424 28 loss """softplus""" +424 28 regularizer """no""" +424 28 optimizer """adadelta""" +424 28 training_loop """lcwa""" +424 28 evaluator """rankbased""" +424 29 dataset """kinships""" +424 29 model """ntn""" +424 29 loss """softplus""" +424 29 regularizer """no""" +424 29 optimizer """adadelta""" +424 29 training_loop """lcwa""" +424 29 evaluator """rankbased""" +424 30 dataset """kinships""" +424 30 model """ntn""" +424 30 loss """softplus""" +424 30 regularizer """no""" +424 30 optimizer """adadelta""" +424 30 training_loop """lcwa""" +424 30 evaluator """rankbased""" +424 31 dataset """kinships""" +424 31 model """ntn""" +424 31 loss """softplus""" +424 31 regularizer """no""" +424 31 optimizer """adadelta""" +424 31 training_loop """lcwa""" +424 31 evaluator """rankbased""" +424 32 dataset """kinships""" +424 32 model """ntn""" +424 32 loss """softplus""" +424 32 regularizer """no""" +424 32 optimizer """adadelta""" +424 32 training_loop """lcwa""" +424 32 evaluator """rankbased""" +424 33 dataset """kinships""" +424 33 model """ntn""" +424 33 loss """softplus""" +424 33 regularizer """no""" +424 33 optimizer """adadelta""" +424 33 training_loop """lcwa""" +424 33 evaluator """rankbased""" +424 34 dataset """kinships""" +424 34 model """ntn""" +424 34 loss """softplus""" +424 34 regularizer """no""" +424 34 optimizer """adadelta""" +424 34 training_loop """lcwa""" +424 34 evaluator """rankbased""" +424 35 dataset """kinships""" +424 35 model """ntn""" +424 35 loss """softplus""" +424 35 regularizer """no""" +424 35 optimizer """adadelta""" +424 35 training_loop """lcwa""" +424 35 evaluator """rankbased""" +424 36 dataset """kinships""" +424 36 model """ntn""" +424 36 loss """softplus""" +424 36 regularizer """no""" +424 36 optimizer """adadelta""" +424 36 training_loop """lcwa""" +424 36 evaluator """rankbased""" +424 37 dataset """kinships""" +424 37 model """ntn""" +424 37 loss """softplus""" +424 37 regularizer """no""" +424 37 optimizer """adadelta""" +424 37 training_loop """lcwa""" +424 37 evaluator """rankbased""" +424 38 dataset """kinships""" +424 38 model """ntn""" +424 38 loss """softplus""" +424 38 regularizer """no""" +424 38 optimizer """adadelta""" +424 38 training_loop """lcwa""" +424 38 evaluator """rankbased""" +424 39 dataset """kinships""" +424 39 model """ntn""" +424 39 loss """softplus""" +424 39 regularizer """no""" +424 39 optimizer """adadelta""" +424 39 training_loop """lcwa""" +424 39 evaluator """rankbased""" +424 40 dataset """kinships""" +424 40 model """ntn""" +424 40 loss """softplus""" +424 40 regularizer """no""" +424 40 optimizer """adadelta""" +424 40 training_loop """lcwa""" +424 40 evaluator """rankbased""" +424 41 dataset """kinships""" +424 41 model """ntn""" +424 41 loss """softplus""" +424 41 regularizer """no""" +424 41 optimizer """adadelta""" +424 41 training_loop """lcwa""" +424 41 evaluator """rankbased""" +424 42 dataset """kinships""" +424 42 model """ntn""" +424 42 loss """softplus""" +424 42 regularizer """no""" +424 42 optimizer """adadelta""" +424 42 training_loop """lcwa""" +424 42 evaluator """rankbased""" +424 43 dataset """kinships""" +424 43 model """ntn""" +424 43 loss """softplus""" +424 43 regularizer """no""" +424 43 optimizer """adadelta""" +424 43 training_loop """lcwa""" +424 43 evaluator """rankbased""" +424 44 dataset """kinships""" +424 44 model """ntn""" +424 44 loss """softplus""" +424 44 regularizer """no""" +424 44 optimizer """adadelta""" +424 44 training_loop """lcwa""" +424 44 evaluator """rankbased""" +424 45 dataset """kinships""" +424 45 model """ntn""" +424 45 loss """softplus""" +424 45 regularizer """no""" +424 45 optimizer """adadelta""" +424 45 training_loop """lcwa""" +424 45 evaluator """rankbased""" +424 46 dataset """kinships""" +424 46 model """ntn""" +424 46 loss """softplus""" +424 46 regularizer """no""" +424 46 optimizer """adadelta""" +424 46 training_loop """lcwa""" +424 46 evaluator """rankbased""" +424 47 dataset """kinships""" +424 47 model """ntn""" +424 47 loss """softplus""" +424 47 regularizer """no""" +424 47 optimizer """adadelta""" +424 47 training_loop """lcwa""" +424 47 evaluator """rankbased""" +424 48 dataset """kinships""" +424 48 model """ntn""" +424 48 loss """softplus""" +424 48 regularizer """no""" +424 48 optimizer """adadelta""" +424 48 training_loop """lcwa""" +424 48 evaluator """rankbased""" +424 49 dataset """kinships""" +424 49 model """ntn""" +424 49 loss """softplus""" +424 49 regularizer """no""" +424 49 optimizer """adadelta""" +424 49 training_loop """lcwa""" +424 49 evaluator """rankbased""" +424 50 dataset """kinships""" +424 50 model """ntn""" +424 50 loss """softplus""" +424 50 regularizer """no""" +424 50 optimizer """adadelta""" +424 50 training_loop """lcwa""" +424 50 evaluator """rankbased""" +424 51 dataset """kinships""" +424 51 model """ntn""" +424 51 loss """softplus""" +424 51 regularizer """no""" +424 51 optimizer """adadelta""" +424 51 training_loop """lcwa""" +424 51 evaluator """rankbased""" +424 52 dataset """kinships""" +424 52 model """ntn""" +424 52 loss """softplus""" +424 52 regularizer """no""" +424 52 optimizer """adadelta""" +424 52 training_loop """lcwa""" +424 52 evaluator """rankbased""" +424 53 dataset """kinships""" +424 53 model """ntn""" +424 53 loss """softplus""" +424 53 regularizer """no""" +424 53 optimizer """adadelta""" +424 53 training_loop """lcwa""" +424 53 evaluator """rankbased""" +424 54 dataset """kinships""" +424 54 model """ntn""" +424 54 loss """softplus""" +424 54 regularizer """no""" +424 54 optimizer """adadelta""" +424 54 training_loop """lcwa""" +424 54 evaluator """rankbased""" +424 55 dataset """kinships""" +424 55 model """ntn""" +424 55 loss """softplus""" +424 55 regularizer """no""" +424 55 optimizer """adadelta""" +424 55 training_loop """lcwa""" +424 55 evaluator """rankbased""" +424 56 dataset """kinships""" +424 56 model """ntn""" +424 56 loss """softplus""" +424 56 regularizer """no""" +424 56 optimizer """adadelta""" +424 56 training_loop """lcwa""" +424 56 evaluator """rankbased""" +424 57 dataset """kinships""" +424 57 model """ntn""" +424 57 loss """softplus""" +424 57 regularizer """no""" +424 57 optimizer """adadelta""" +424 57 training_loop """lcwa""" +424 57 evaluator """rankbased""" +424 58 dataset """kinships""" +424 58 model """ntn""" +424 58 loss """softplus""" +424 58 regularizer """no""" +424 58 optimizer """adadelta""" +424 58 training_loop """lcwa""" +424 58 evaluator """rankbased""" +424 59 dataset """kinships""" +424 59 model """ntn""" +424 59 loss """softplus""" +424 59 regularizer """no""" +424 59 optimizer """adadelta""" +424 59 training_loop """lcwa""" +424 59 evaluator """rankbased""" +424 60 dataset """kinships""" +424 60 model """ntn""" +424 60 loss """softplus""" +424 60 regularizer """no""" +424 60 optimizer """adadelta""" +424 60 training_loop """lcwa""" +424 60 evaluator """rankbased""" +424 61 dataset """kinships""" +424 61 model """ntn""" +424 61 loss """softplus""" +424 61 regularizer """no""" +424 61 optimizer """adadelta""" +424 61 training_loop """lcwa""" +424 61 evaluator """rankbased""" +424 62 dataset """kinships""" +424 62 model """ntn""" +424 62 loss """softplus""" +424 62 regularizer """no""" +424 62 optimizer """adadelta""" +424 62 training_loop """lcwa""" +424 62 evaluator """rankbased""" +424 63 dataset """kinships""" +424 63 model """ntn""" +424 63 loss """softplus""" +424 63 regularizer """no""" +424 63 optimizer """adadelta""" +424 63 training_loop """lcwa""" +424 63 evaluator """rankbased""" +424 64 dataset """kinships""" +424 64 model """ntn""" +424 64 loss """softplus""" +424 64 regularizer """no""" +424 64 optimizer """adadelta""" +424 64 training_loop """lcwa""" +424 64 evaluator """rankbased""" +424 65 dataset """kinships""" +424 65 model """ntn""" +424 65 loss """softplus""" +424 65 regularizer """no""" +424 65 optimizer """adadelta""" +424 65 training_loop """lcwa""" +424 65 evaluator """rankbased""" +424 66 dataset """kinships""" +424 66 model """ntn""" +424 66 loss """softplus""" +424 66 regularizer """no""" +424 66 optimizer """adadelta""" +424 66 training_loop """lcwa""" +424 66 evaluator """rankbased""" +424 67 dataset """kinships""" +424 67 model """ntn""" +424 67 loss """softplus""" +424 67 regularizer """no""" +424 67 optimizer """adadelta""" +424 67 training_loop """lcwa""" +424 67 evaluator """rankbased""" +424 68 dataset """kinships""" +424 68 model """ntn""" +424 68 loss """softplus""" +424 68 regularizer """no""" +424 68 optimizer """adadelta""" +424 68 training_loop """lcwa""" +424 68 evaluator """rankbased""" +424 69 dataset """kinships""" +424 69 model """ntn""" +424 69 loss """softplus""" +424 69 regularizer """no""" +424 69 optimizer """adadelta""" +424 69 training_loop """lcwa""" +424 69 evaluator """rankbased""" +424 70 dataset """kinships""" +424 70 model """ntn""" +424 70 loss """softplus""" +424 70 regularizer """no""" +424 70 optimizer """adadelta""" +424 70 training_loop """lcwa""" +424 70 evaluator """rankbased""" +424 71 dataset """kinships""" +424 71 model """ntn""" +424 71 loss """softplus""" +424 71 regularizer """no""" +424 71 optimizer """adadelta""" +424 71 training_loop """lcwa""" +424 71 evaluator """rankbased""" +424 72 dataset """kinships""" +424 72 model """ntn""" +424 72 loss """softplus""" +424 72 regularizer """no""" +424 72 optimizer """adadelta""" +424 72 training_loop """lcwa""" +424 72 evaluator """rankbased""" +424 73 dataset """kinships""" +424 73 model """ntn""" +424 73 loss """softplus""" +424 73 regularizer """no""" +424 73 optimizer """adadelta""" +424 73 training_loop """lcwa""" +424 73 evaluator """rankbased""" +424 74 dataset """kinships""" +424 74 model """ntn""" +424 74 loss """softplus""" +424 74 regularizer """no""" +424 74 optimizer """adadelta""" +424 74 training_loop """lcwa""" +424 74 evaluator """rankbased""" +424 75 dataset """kinships""" +424 75 model """ntn""" +424 75 loss """softplus""" +424 75 regularizer """no""" +424 75 optimizer """adadelta""" +424 75 training_loop """lcwa""" +424 75 evaluator """rankbased""" +424 76 dataset """kinships""" +424 76 model """ntn""" +424 76 loss """softplus""" +424 76 regularizer """no""" +424 76 optimizer """adadelta""" +424 76 training_loop """lcwa""" +424 76 evaluator """rankbased""" +424 77 dataset """kinships""" +424 77 model """ntn""" +424 77 loss """softplus""" +424 77 regularizer """no""" +424 77 optimizer """adadelta""" +424 77 training_loop """lcwa""" +424 77 evaluator """rankbased""" +424 78 dataset """kinships""" +424 78 model """ntn""" +424 78 loss """softplus""" +424 78 regularizer """no""" +424 78 optimizer """adadelta""" +424 78 training_loop """lcwa""" +424 78 evaluator """rankbased""" +424 79 dataset """kinships""" +424 79 model """ntn""" +424 79 loss """softplus""" +424 79 regularizer """no""" +424 79 optimizer """adadelta""" +424 79 training_loop """lcwa""" +424 79 evaluator """rankbased""" +424 80 dataset """kinships""" +424 80 model """ntn""" +424 80 loss """softplus""" +424 80 regularizer """no""" +424 80 optimizer """adadelta""" +424 80 training_loop """lcwa""" +424 80 evaluator """rankbased""" +424 81 dataset """kinships""" +424 81 model """ntn""" +424 81 loss """softplus""" +424 81 regularizer """no""" +424 81 optimizer """adadelta""" +424 81 training_loop """lcwa""" +424 81 evaluator """rankbased""" +424 82 dataset """kinships""" +424 82 model """ntn""" +424 82 loss """softplus""" +424 82 regularizer """no""" +424 82 optimizer """adadelta""" +424 82 training_loop """lcwa""" +424 82 evaluator """rankbased""" +424 83 dataset """kinships""" +424 83 model """ntn""" +424 83 loss """softplus""" +424 83 regularizer """no""" +424 83 optimizer """adadelta""" +424 83 training_loop """lcwa""" +424 83 evaluator """rankbased""" +424 84 dataset """kinships""" +424 84 model """ntn""" +424 84 loss """softplus""" +424 84 regularizer """no""" +424 84 optimizer """adadelta""" +424 84 training_loop """lcwa""" +424 84 evaluator """rankbased""" +424 85 dataset """kinships""" +424 85 model """ntn""" +424 85 loss """softplus""" +424 85 regularizer """no""" +424 85 optimizer """adadelta""" +424 85 training_loop """lcwa""" +424 85 evaluator """rankbased""" +424 86 dataset """kinships""" +424 86 model """ntn""" +424 86 loss """softplus""" +424 86 regularizer """no""" +424 86 optimizer """adadelta""" +424 86 training_loop """lcwa""" +424 86 evaluator """rankbased""" +424 87 dataset """kinships""" +424 87 model """ntn""" +424 87 loss """softplus""" +424 87 regularizer """no""" +424 87 optimizer """adadelta""" +424 87 training_loop """lcwa""" +424 87 evaluator """rankbased""" +424 88 dataset """kinships""" +424 88 model """ntn""" +424 88 loss """softplus""" +424 88 regularizer """no""" +424 88 optimizer """adadelta""" +424 88 training_loop """lcwa""" +424 88 evaluator """rankbased""" +424 89 dataset """kinships""" +424 89 model """ntn""" +424 89 loss """softplus""" +424 89 regularizer """no""" +424 89 optimizer """adadelta""" +424 89 training_loop """lcwa""" +424 89 evaluator """rankbased""" +424 90 dataset """kinships""" +424 90 model """ntn""" +424 90 loss """softplus""" +424 90 regularizer """no""" +424 90 optimizer """adadelta""" +424 90 training_loop """lcwa""" +424 90 evaluator """rankbased""" +424 91 dataset """kinships""" +424 91 model """ntn""" +424 91 loss """softplus""" +424 91 regularizer """no""" +424 91 optimizer """adadelta""" +424 91 training_loop """lcwa""" +424 91 evaluator """rankbased""" +424 92 dataset """kinships""" +424 92 model """ntn""" +424 92 loss """softplus""" +424 92 regularizer """no""" +424 92 optimizer """adadelta""" +424 92 training_loop """lcwa""" +424 92 evaluator """rankbased""" +424 93 dataset """kinships""" +424 93 model """ntn""" +424 93 loss """softplus""" +424 93 regularizer """no""" +424 93 optimizer """adadelta""" +424 93 training_loop """lcwa""" +424 93 evaluator """rankbased""" +424 94 dataset """kinships""" +424 94 model """ntn""" +424 94 loss """softplus""" +424 94 regularizer """no""" +424 94 optimizer """adadelta""" +424 94 training_loop """lcwa""" +424 94 evaluator """rankbased""" +424 95 dataset """kinships""" +424 95 model """ntn""" +424 95 loss """softplus""" +424 95 regularizer """no""" +424 95 optimizer """adadelta""" +424 95 training_loop """lcwa""" +424 95 evaluator """rankbased""" +424 96 dataset """kinships""" +424 96 model """ntn""" +424 96 loss """softplus""" +424 96 regularizer """no""" +424 96 optimizer """adadelta""" +424 96 training_loop """lcwa""" +424 96 evaluator """rankbased""" +424 97 dataset """kinships""" +424 97 model """ntn""" +424 97 loss """softplus""" +424 97 regularizer """no""" +424 97 optimizer """adadelta""" +424 97 training_loop """lcwa""" +424 97 evaluator """rankbased""" +424 98 dataset """kinships""" +424 98 model """ntn""" +424 98 loss """softplus""" +424 98 regularizer """no""" +424 98 optimizer """adadelta""" +424 98 training_loop """lcwa""" +424 98 evaluator """rankbased""" +424 99 dataset """kinships""" +424 99 model """ntn""" +424 99 loss """softplus""" +424 99 regularizer """no""" +424 99 optimizer """adadelta""" +424 99 training_loop """lcwa""" +424 99 evaluator """rankbased""" +424 100 dataset """kinships""" +424 100 model """ntn""" +424 100 loss """softplus""" +424 100 regularizer """no""" +424 100 optimizer """adadelta""" +424 100 training_loop """lcwa""" +424 100 evaluator """rankbased""" +425 1 model.embedding_dim 0.0 +425 1 negative_sampler.num_negs_per_pos 15.0 +425 1 training.batch_size 0.0 +425 2 model.embedding_dim 0.0 +425 2 negative_sampler.num_negs_per_pos 27.0 +425 2 training.batch_size 0.0 +425 3 model.embedding_dim 2.0 +425 3 negative_sampler.num_negs_per_pos 37.0 +425 3 training.batch_size 2.0 +425 4 model.embedding_dim 1.0 +425 4 negative_sampler.num_negs_per_pos 32.0 +425 4 training.batch_size 2.0 +425 5 model.embedding_dim 2.0 +425 5 negative_sampler.num_negs_per_pos 64.0 +425 5 training.batch_size 0.0 +425 6 model.embedding_dim 1.0 +425 6 negative_sampler.num_negs_per_pos 61.0 +425 6 training.batch_size 2.0 +425 7 model.embedding_dim 1.0 +425 7 negative_sampler.num_negs_per_pos 99.0 +425 7 training.batch_size 2.0 +425 8 model.embedding_dim 0.0 +425 8 negative_sampler.num_negs_per_pos 8.0 +425 8 training.batch_size 0.0 +425 9 model.embedding_dim 0.0 +425 9 negative_sampler.num_negs_per_pos 39.0 +425 9 training.batch_size 2.0 +425 10 model.embedding_dim 1.0 +425 10 negative_sampler.num_negs_per_pos 79.0 +425 10 training.batch_size 2.0 +425 11 model.embedding_dim 1.0 +425 11 negative_sampler.num_negs_per_pos 8.0 +425 11 training.batch_size 2.0 +425 12 model.embedding_dim 0.0 +425 12 negative_sampler.num_negs_per_pos 24.0 +425 12 training.batch_size 1.0 +425 13 model.embedding_dim 0.0 +425 13 negative_sampler.num_negs_per_pos 22.0 +425 13 training.batch_size 1.0 +425 14 model.embedding_dim 2.0 +425 14 negative_sampler.num_negs_per_pos 67.0 +425 14 training.batch_size 2.0 +425 15 model.embedding_dim 2.0 +425 15 negative_sampler.num_negs_per_pos 49.0 +425 15 training.batch_size 0.0 +425 16 model.embedding_dim 2.0 +425 16 negative_sampler.num_negs_per_pos 16.0 +425 16 training.batch_size 1.0 +425 17 model.embedding_dim 2.0 +425 17 negative_sampler.num_negs_per_pos 77.0 +425 17 training.batch_size 1.0 +425 18 model.embedding_dim 1.0 +425 18 negative_sampler.num_negs_per_pos 70.0 +425 18 training.batch_size 1.0 +425 19 model.embedding_dim 2.0 +425 19 negative_sampler.num_negs_per_pos 81.0 +425 19 training.batch_size 2.0 +425 20 model.embedding_dim 0.0 +425 20 negative_sampler.num_negs_per_pos 93.0 +425 20 training.batch_size 0.0 +425 21 model.embedding_dim 1.0 +425 21 negative_sampler.num_negs_per_pos 64.0 +425 21 training.batch_size 0.0 +425 22 model.embedding_dim 1.0 +425 22 negative_sampler.num_negs_per_pos 73.0 +425 22 training.batch_size 0.0 +425 23 model.embedding_dim 2.0 +425 23 negative_sampler.num_negs_per_pos 83.0 +425 23 training.batch_size 2.0 +425 24 model.embedding_dim 2.0 +425 24 negative_sampler.num_negs_per_pos 96.0 +425 24 training.batch_size 2.0 +425 25 model.embedding_dim 1.0 +425 25 negative_sampler.num_negs_per_pos 5.0 +425 25 training.batch_size 2.0 +425 26 model.embedding_dim 1.0 +425 26 negative_sampler.num_negs_per_pos 94.0 +425 26 training.batch_size 2.0 +425 27 model.embedding_dim 1.0 +425 27 negative_sampler.num_negs_per_pos 74.0 +425 27 training.batch_size 2.0 +425 28 model.embedding_dim 1.0 +425 28 negative_sampler.num_negs_per_pos 34.0 +425 28 training.batch_size 0.0 +425 29 model.embedding_dim 2.0 +425 29 negative_sampler.num_negs_per_pos 39.0 +425 29 training.batch_size 0.0 +425 30 model.embedding_dim 2.0 +425 30 negative_sampler.num_negs_per_pos 27.0 +425 30 training.batch_size 1.0 +425 31 model.embedding_dim 1.0 +425 31 negative_sampler.num_negs_per_pos 59.0 +425 31 training.batch_size 1.0 +425 32 model.embedding_dim 1.0 +425 32 negative_sampler.num_negs_per_pos 20.0 +425 32 training.batch_size 2.0 +425 33 model.embedding_dim 0.0 +425 33 negative_sampler.num_negs_per_pos 53.0 +425 33 training.batch_size 0.0 +425 34 model.embedding_dim 0.0 +425 34 negative_sampler.num_negs_per_pos 54.0 +425 34 training.batch_size 1.0 +425 35 model.embedding_dim 2.0 +425 35 negative_sampler.num_negs_per_pos 4.0 +425 35 training.batch_size 2.0 +425 36 model.embedding_dim 2.0 +425 36 negative_sampler.num_negs_per_pos 90.0 +425 36 training.batch_size 2.0 +425 37 model.embedding_dim 2.0 +425 37 negative_sampler.num_negs_per_pos 20.0 +425 37 training.batch_size 2.0 +425 38 model.embedding_dim 0.0 +425 38 negative_sampler.num_negs_per_pos 12.0 +425 38 training.batch_size 2.0 +425 39 model.embedding_dim 0.0 +425 39 negative_sampler.num_negs_per_pos 19.0 +425 39 training.batch_size 1.0 +425 40 model.embedding_dim 0.0 +425 40 negative_sampler.num_negs_per_pos 9.0 +425 40 training.batch_size 0.0 +425 41 model.embedding_dim 1.0 +425 41 negative_sampler.num_negs_per_pos 58.0 +425 41 training.batch_size 2.0 +425 42 model.embedding_dim 1.0 +425 42 negative_sampler.num_negs_per_pos 77.0 +425 42 training.batch_size 0.0 +425 43 model.embedding_dim 1.0 +425 43 negative_sampler.num_negs_per_pos 11.0 +425 43 training.batch_size 0.0 +425 44 model.embedding_dim 0.0 +425 44 negative_sampler.num_negs_per_pos 23.0 +425 44 training.batch_size 1.0 +425 45 model.embedding_dim 1.0 +425 45 negative_sampler.num_negs_per_pos 54.0 +425 45 training.batch_size 1.0 +425 46 model.embedding_dim 1.0 +425 46 negative_sampler.num_negs_per_pos 44.0 +425 46 training.batch_size 1.0 +425 47 model.embedding_dim 1.0 +425 47 negative_sampler.num_negs_per_pos 96.0 +425 47 training.batch_size 2.0 +425 48 model.embedding_dim 1.0 +425 48 negative_sampler.num_negs_per_pos 61.0 +425 48 training.batch_size 0.0 +425 49 model.embedding_dim 0.0 +425 49 negative_sampler.num_negs_per_pos 13.0 +425 49 training.batch_size 2.0 +425 50 model.embedding_dim 0.0 +425 50 negative_sampler.num_negs_per_pos 99.0 +425 50 training.batch_size 1.0 +425 51 model.embedding_dim 2.0 +425 51 negative_sampler.num_negs_per_pos 32.0 +425 51 training.batch_size 2.0 +425 52 model.embedding_dim 1.0 +425 52 negative_sampler.num_negs_per_pos 62.0 +425 52 training.batch_size 1.0 +425 53 model.embedding_dim 0.0 +425 53 negative_sampler.num_negs_per_pos 11.0 +425 53 training.batch_size 2.0 +425 54 model.embedding_dim 0.0 +425 54 negative_sampler.num_negs_per_pos 12.0 +425 54 training.batch_size 1.0 +425 55 model.embedding_dim 1.0 +425 55 negative_sampler.num_negs_per_pos 8.0 +425 55 training.batch_size 1.0 +425 56 model.embedding_dim 0.0 +425 56 negative_sampler.num_negs_per_pos 24.0 +425 56 training.batch_size 2.0 +425 57 model.embedding_dim 1.0 +425 57 negative_sampler.num_negs_per_pos 71.0 +425 57 training.batch_size 1.0 +425 58 model.embedding_dim 1.0 +425 58 negative_sampler.num_negs_per_pos 26.0 +425 58 training.batch_size 2.0 +425 59 model.embedding_dim 1.0 +425 59 negative_sampler.num_negs_per_pos 72.0 +425 59 training.batch_size 0.0 +425 60 model.embedding_dim 0.0 +425 60 negative_sampler.num_negs_per_pos 40.0 +425 60 training.batch_size 2.0 +425 61 model.embedding_dim 1.0 +425 61 negative_sampler.num_negs_per_pos 18.0 +425 61 training.batch_size 2.0 +425 62 model.embedding_dim 2.0 +425 62 negative_sampler.num_negs_per_pos 63.0 +425 62 training.batch_size 2.0 +425 63 model.embedding_dim 1.0 +425 63 negative_sampler.num_negs_per_pos 1.0 +425 63 training.batch_size 1.0 +425 64 model.embedding_dim 2.0 +425 64 negative_sampler.num_negs_per_pos 36.0 +425 64 training.batch_size 2.0 +425 65 model.embedding_dim 2.0 +425 65 negative_sampler.num_negs_per_pos 21.0 +425 65 training.batch_size 2.0 +425 66 model.embedding_dim 0.0 +425 66 negative_sampler.num_negs_per_pos 32.0 +425 66 training.batch_size 2.0 +425 67 model.embedding_dim 1.0 +425 67 negative_sampler.num_negs_per_pos 2.0 +425 67 training.batch_size 1.0 +425 68 model.embedding_dim 2.0 +425 68 negative_sampler.num_negs_per_pos 82.0 +425 68 training.batch_size 2.0 +425 69 model.embedding_dim 2.0 +425 69 negative_sampler.num_negs_per_pos 83.0 +425 69 training.batch_size 2.0 +425 1 dataset """kinships""" +425 1 model """ntn""" +425 1 loss """bceaftersigmoid""" +425 1 regularizer """no""" +425 1 optimizer """adadelta""" +425 1 training_loop """owa""" +425 1 negative_sampler """basic""" +425 1 evaluator """rankbased""" +425 2 dataset """kinships""" +425 2 model """ntn""" +425 2 loss """bceaftersigmoid""" +425 2 regularizer """no""" +425 2 optimizer """adadelta""" +425 2 training_loop """owa""" +425 2 negative_sampler """basic""" +425 2 evaluator """rankbased""" +425 3 dataset """kinships""" +425 3 model """ntn""" +425 3 loss """bceaftersigmoid""" +425 3 regularizer """no""" +425 3 optimizer """adadelta""" +425 3 training_loop """owa""" +425 3 negative_sampler """basic""" +425 3 evaluator """rankbased""" +425 4 dataset """kinships""" +425 4 model """ntn""" +425 4 loss """bceaftersigmoid""" +425 4 regularizer """no""" +425 4 optimizer """adadelta""" +425 4 training_loop """owa""" +425 4 negative_sampler """basic""" +425 4 evaluator """rankbased""" +425 5 dataset """kinships""" +425 5 model """ntn""" +425 5 loss """bceaftersigmoid""" +425 5 regularizer """no""" +425 5 optimizer """adadelta""" +425 5 training_loop """owa""" +425 5 negative_sampler """basic""" +425 5 evaluator """rankbased""" +425 6 dataset """kinships""" +425 6 model """ntn""" +425 6 loss """bceaftersigmoid""" +425 6 regularizer """no""" +425 6 optimizer """adadelta""" +425 6 training_loop """owa""" +425 6 negative_sampler """basic""" +425 6 evaluator """rankbased""" +425 7 dataset """kinships""" +425 7 model """ntn""" +425 7 loss """bceaftersigmoid""" +425 7 regularizer """no""" +425 7 optimizer """adadelta""" +425 7 training_loop """owa""" +425 7 negative_sampler """basic""" +425 7 evaluator """rankbased""" +425 8 dataset """kinships""" +425 8 model """ntn""" +425 8 loss """bceaftersigmoid""" +425 8 regularizer """no""" +425 8 optimizer """adadelta""" +425 8 training_loop """owa""" +425 8 negative_sampler """basic""" +425 8 evaluator """rankbased""" +425 9 dataset """kinships""" +425 9 model """ntn""" +425 9 loss """bceaftersigmoid""" +425 9 regularizer """no""" +425 9 optimizer """adadelta""" +425 9 training_loop """owa""" +425 9 negative_sampler """basic""" +425 9 evaluator """rankbased""" +425 10 dataset """kinships""" +425 10 model """ntn""" +425 10 loss """bceaftersigmoid""" +425 10 regularizer """no""" +425 10 optimizer """adadelta""" +425 10 training_loop """owa""" +425 10 negative_sampler """basic""" +425 10 evaluator """rankbased""" +425 11 dataset """kinships""" +425 11 model """ntn""" +425 11 loss """bceaftersigmoid""" +425 11 regularizer """no""" +425 11 optimizer """adadelta""" +425 11 training_loop """owa""" +425 11 negative_sampler """basic""" +425 11 evaluator """rankbased""" +425 12 dataset """kinships""" +425 12 model """ntn""" +425 12 loss """bceaftersigmoid""" +425 12 regularizer """no""" +425 12 optimizer """adadelta""" +425 12 training_loop """owa""" +425 12 negative_sampler """basic""" +425 12 evaluator """rankbased""" +425 13 dataset """kinships""" +425 13 model """ntn""" +425 13 loss """bceaftersigmoid""" +425 13 regularizer """no""" +425 13 optimizer """adadelta""" +425 13 training_loop """owa""" +425 13 negative_sampler """basic""" +425 13 evaluator """rankbased""" +425 14 dataset """kinships""" +425 14 model """ntn""" +425 14 loss """bceaftersigmoid""" +425 14 regularizer """no""" +425 14 optimizer """adadelta""" +425 14 training_loop """owa""" +425 14 negative_sampler """basic""" +425 14 evaluator """rankbased""" +425 15 dataset """kinships""" +425 15 model """ntn""" +425 15 loss """bceaftersigmoid""" +425 15 regularizer """no""" +425 15 optimizer """adadelta""" +425 15 training_loop """owa""" +425 15 negative_sampler """basic""" +425 15 evaluator """rankbased""" +425 16 dataset """kinships""" +425 16 model """ntn""" +425 16 loss """bceaftersigmoid""" +425 16 regularizer """no""" +425 16 optimizer """adadelta""" +425 16 training_loop """owa""" +425 16 negative_sampler """basic""" +425 16 evaluator """rankbased""" +425 17 dataset """kinships""" +425 17 model """ntn""" +425 17 loss """bceaftersigmoid""" +425 17 regularizer """no""" +425 17 optimizer """adadelta""" +425 17 training_loop """owa""" +425 17 negative_sampler """basic""" +425 17 evaluator """rankbased""" +425 18 dataset """kinships""" +425 18 model """ntn""" +425 18 loss """bceaftersigmoid""" +425 18 regularizer """no""" +425 18 optimizer """adadelta""" +425 18 training_loop """owa""" +425 18 negative_sampler """basic""" +425 18 evaluator """rankbased""" +425 19 dataset """kinships""" +425 19 model """ntn""" +425 19 loss """bceaftersigmoid""" +425 19 regularizer """no""" +425 19 optimizer """adadelta""" +425 19 training_loop """owa""" +425 19 negative_sampler """basic""" +425 19 evaluator """rankbased""" +425 20 dataset """kinships""" +425 20 model """ntn""" +425 20 loss """bceaftersigmoid""" +425 20 regularizer """no""" +425 20 optimizer """adadelta""" +425 20 training_loop """owa""" +425 20 negative_sampler """basic""" +425 20 evaluator """rankbased""" +425 21 dataset """kinships""" +425 21 model """ntn""" +425 21 loss """bceaftersigmoid""" +425 21 regularizer """no""" +425 21 optimizer """adadelta""" +425 21 training_loop """owa""" +425 21 negative_sampler """basic""" +425 21 evaluator """rankbased""" +425 22 dataset """kinships""" +425 22 model """ntn""" +425 22 loss """bceaftersigmoid""" +425 22 regularizer """no""" +425 22 optimizer """adadelta""" +425 22 training_loop """owa""" +425 22 negative_sampler """basic""" +425 22 evaluator """rankbased""" +425 23 dataset """kinships""" +425 23 model """ntn""" +425 23 loss """bceaftersigmoid""" +425 23 regularizer """no""" +425 23 optimizer """adadelta""" +425 23 training_loop """owa""" +425 23 negative_sampler """basic""" +425 23 evaluator """rankbased""" +425 24 dataset """kinships""" +425 24 model """ntn""" +425 24 loss """bceaftersigmoid""" +425 24 regularizer """no""" +425 24 optimizer """adadelta""" +425 24 training_loop """owa""" +425 24 negative_sampler """basic""" +425 24 evaluator """rankbased""" +425 25 dataset """kinships""" +425 25 model """ntn""" +425 25 loss """bceaftersigmoid""" +425 25 regularizer """no""" +425 25 optimizer """adadelta""" +425 25 training_loop """owa""" +425 25 negative_sampler """basic""" +425 25 evaluator """rankbased""" +425 26 dataset """kinships""" +425 26 model """ntn""" +425 26 loss """bceaftersigmoid""" +425 26 regularizer """no""" +425 26 optimizer """adadelta""" +425 26 training_loop """owa""" +425 26 negative_sampler """basic""" +425 26 evaluator """rankbased""" +425 27 dataset """kinships""" +425 27 model """ntn""" +425 27 loss """bceaftersigmoid""" +425 27 regularizer """no""" +425 27 optimizer """adadelta""" +425 27 training_loop """owa""" +425 27 negative_sampler """basic""" +425 27 evaluator """rankbased""" +425 28 dataset """kinships""" +425 28 model """ntn""" +425 28 loss """bceaftersigmoid""" +425 28 regularizer """no""" +425 28 optimizer """adadelta""" +425 28 training_loop """owa""" +425 28 negative_sampler """basic""" +425 28 evaluator """rankbased""" +425 29 dataset """kinships""" +425 29 model """ntn""" +425 29 loss """bceaftersigmoid""" +425 29 regularizer """no""" +425 29 optimizer """adadelta""" +425 29 training_loop """owa""" +425 29 negative_sampler """basic""" +425 29 evaluator """rankbased""" +425 30 dataset """kinships""" +425 30 model """ntn""" +425 30 loss """bceaftersigmoid""" +425 30 regularizer """no""" +425 30 optimizer """adadelta""" +425 30 training_loop """owa""" +425 30 negative_sampler """basic""" +425 30 evaluator """rankbased""" +425 31 dataset """kinships""" +425 31 model """ntn""" +425 31 loss """bceaftersigmoid""" +425 31 regularizer """no""" +425 31 optimizer """adadelta""" +425 31 training_loop """owa""" +425 31 negative_sampler """basic""" +425 31 evaluator """rankbased""" +425 32 dataset """kinships""" +425 32 model """ntn""" +425 32 loss """bceaftersigmoid""" +425 32 regularizer """no""" +425 32 optimizer """adadelta""" +425 32 training_loop """owa""" +425 32 negative_sampler """basic""" +425 32 evaluator """rankbased""" +425 33 dataset """kinships""" +425 33 model """ntn""" +425 33 loss """bceaftersigmoid""" +425 33 regularizer """no""" +425 33 optimizer """adadelta""" +425 33 training_loop """owa""" +425 33 negative_sampler """basic""" +425 33 evaluator """rankbased""" +425 34 dataset """kinships""" +425 34 model """ntn""" +425 34 loss """bceaftersigmoid""" +425 34 regularizer """no""" +425 34 optimizer """adadelta""" +425 34 training_loop """owa""" +425 34 negative_sampler """basic""" +425 34 evaluator """rankbased""" +425 35 dataset """kinships""" +425 35 model """ntn""" +425 35 loss """bceaftersigmoid""" +425 35 regularizer """no""" +425 35 optimizer """adadelta""" +425 35 training_loop """owa""" +425 35 negative_sampler """basic""" +425 35 evaluator """rankbased""" +425 36 dataset """kinships""" +425 36 model """ntn""" +425 36 loss """bceaftersigmoid""" +425 36 regularizer """no""" +425 36 optimizer """adadelta""" +425 36 training_loop """owa""" +425 36 negative_sampler """basic""" +425 36 evaluator """rankbased""" +425 37 dataset """kinships""" +425 37 model """ntn""" +425 37 loss """bceaftersigmoid""" +425 37 regularizer """no""" +425 37 optimizer """adadelta""" +425 37 training_loop """owa""" +425 37 negative_sampler """basic""" +425 37 evaluator """rankbased""" +425 38 dataset """kinships""" +425 38 model """ntn""" +425 38 loss """bceaftersigmoid""" +425 38 regularizer """no""" +425 38 optimizer """adadelta""" +425 38 training_loop """owa""" +425 38 negative_sampler """basic""" +425 38 evaluator """rankbased""" +425 39 dataset """kinships""" +425 39 model """ntn""" +425 39 loss """bceaftersigmoid""" +425 39 regularizer """no""" +425 39 optimizer """adadelta""" +425 39 training_loop """owa""" +425 39 negative_sampler """basic""" +425 39 evaluator """rankbased""" +425 40 dataset """kinships""" +425 40 model """ntn""" +425 40 loss """bceaftersigmoid""" +425 40 regularizer """no""" +425 40 optimizer """adadelta""" +425 40 training_loop """owa""" +425 40 negative_sampler """basic""" +425 40 evaluator """rankbased""" +425 41 dataset """kinships""" +425 41 model """ntn""" +425 41 loss """bceaftersigmoid""" +425 41 regularizer """no""" +425 41 optimizer """adadelta""" +425 41 training_loop """owa""" +425 41 negative_sampler """basic""" +425 41 evaluator """rankbased""" +425 42 dataset """kinships""" +425 42 model """ntn""" +425 42 loss """bceaftersigmoid""" +425 42 regularizer """no""" +425 42 optimizer """adadelta""" +425 42 training_loop """owa""" +425 42 negative_sampler """basic""" +425 42 evaluator """rankbased""" +425 43 dataset """kinships""" +425 43 model """ntn""" +425 43 loss """bceaftersigmoid""" +425 43 regularizer """no""" +425 43 optimizer """adadelta""" +425 43 training_loop """owa""" +425 43 negative_sampler """basic""" +425 43 evaluator """rankbased""" +425 44 dataset """kinships""" +425 44 model """ntn""" +425 44 loss """bceaftersigmoid""" +425 44 regularizer """no""" +425 44 optimizer """adadelta""" +425 44 training_loop """owa""" +425 44 negative_sampler """basic""" +425 44 evaluator """rankbased""" +425 45 dataset """kinships""" +425 45 model """ntn""" +425 45 loss """bceaftersigmoid""" +425 45 regularizer """no""" +425 45 optimizer """adadelta""" +425 45 training_loop """owa""" +425 45 negative_sampler """basic""" +425 45 evaluator """rankbased""" +425 46 dataset """kinships""" +425 46 model """ntn""" +425 46 loss """bceaftersigmoid""" +425 46 regularizer """no""" +425 46 optimizer """adadelta""" +425 46 training_loop """owa""" +425 46 negative_sampler """basic""" +425 46 evaluator """rankbased""" +425 47 dataset """kinships""" +425 47 model """ntn""" +425 47 loss """bceaftersigmoid""" +425 47 regularizer """no""" +425 47 optimizer """adadelta""" +425 47 training_loop """owa""" +425 47 negative_sampler """basic""" +425 47 evaluator """rankbased""" +425 48 dataset """kinships""" +425 48 model """ntn""" +425 48 loss """bceaftersigmoid""" +425 48 regularizer """no""" +425 48 optimizer """adadelta""" +425 48 training_loop """owa""" +425 48 negative_sampler """basic""" +425 48 evaluator """rankbased""" +425 49 dataset """kinships""" +425 49 model """ntn""" +425 49 loss """bceaftersigmoid""" +425 49 regularizer """no""" +425 49 optimizer """adadelta""" +425 49 training_loop """owa""" +425 49 negative_sampler """basic""" +425 49 evaluator """rankbased""" +425 50 dataset """kinships""" +425 50 model """ntn""" +425 50 loss """bceaftersigmoid""" +425 50 regularizer """no""" +425 50 optimizer """adadelta""" +425 50 training_loop """owa""" +425 50 negative_sampler """basic""" +425 50 evaluator """rankbased""" +425 51 dataset """kinships""" +425 51 model """ntn""" +425 51 loss """bceaftersigmoid""" +425 51 regularizer """no""" +425 51 optimizer """adadelta""" +425 51 training_loop """owa""" +425 51 negative_sampler """basic""" +425 51 evaluator """rankbased""" +425 52 dataset """kinships""" +425 52 model """ntn""" +425 52 loss """bceaftersigmoid""" +425 52 regularizer """no""" +425 52 optimizer """adadelta""" +425 52 training_loop """owa""" +425 52 negative_sampler """basic""" +425 52 evaluator """rankbased""" +425 53 dataset """kinships""" +425 53 model """ntn""" +425 53 loss """bceaftersigmoid""" +425 53 regularizer """no""" +425 53 optimizer """adadelta""" +425 53 training_loop """owa""" +425 53 negative_sampler """basic""" +425 53 evaluator """rankbased""" +425 54 dataset """kinships""" +425 54 model """ntn""" +425 54 loss """bceaftersigmoid""" +425 54 regularizer """no""" +425 54 optimizer """adadelta""" +425 54 training_loop """owa""" +425 54 negative_sampler """basic""" +425 54 evaluator """rankbased""" +425 55 dataset """kinships""" +425 55 model """ntn""" +425 55 loss """bceaftersigmoid""" +425 55 regularizer """no""" +425 55 optimizer """adadelta""" +425 55 training_loop """owa""" +425 55 negative_sampler """basic""" +425 55 evaluator """rankbased""" +425 56 dataset """kinships""" +425 56 model """ntn""" +425 56 loss """bceaftersigmoid""" +425 56 regularizer """no""" +425 56 optimizer """adadelta""" +425 56 training_loop """owa""" +425 56 negative_sampler """basic""" +425 56 evaluator """rankbased""" +425 57 dataset """kinships""" +425 57 model """ntn""" +425 57 loss """bceaftersigmoid""" +425 57 regularizer """no""" +425 57 optimizer """adadelta""" +425 57 training_loop """owa""" +425 57 negative_sampler """basic""" +425 57 evaluator """rankbased""" +425 58 dataset """kinships""" +425 58 model """ntn""" +425 58 loss """bceaftersigmoid""" +425 58 regularizer """no""" +425 58 optimizer """adadelta""" +425 58 training_loop """owa""" +425 58 negative_sampler """basic""" +425 58 evaluator """rankbased""" +425 59 dataset """kinships""" +425 59 model """ntn""" +425 59 loss """bceaftersigmoid""" +425 59 regularizer """no""" +425 59 optimizer """adadelta""" +425 59 training_loop """owa""" +425 59 negative_sampler """basic""" +425 59 evaluator """rankbased""" +425 60 dataset """kinships""" +425 60 model """ntn""" +425 60 loss """bceaftersigmoid""" +425 60 regularizer """no""" +425 60 optimizer """adadelta""" +425 60 training_loop """owa""" +425 60 negative_sampler """basic""" +425 60 evaluator """rankbased""" +425 61 dataset """kinships""" +425 61 model """ntn""" +425 61 loss """bceaftersigmoid""" +425 61 regularizer """no""" +425 61 optimizer """adadelta""" +425 61 training_loop """owa""" +425 61 negative_sampler """basic""" +425 61 evaluator """rankbased""" +425 62 dataset """kinships""" +425 62 model """ntn""" +425 62 loss """bceaftersigmoid""" +425 62 regularizer """no""" +425 62 optimizer """adadelta""" +425 62 training_loop """owa""" +425 62 negative_sampler """basic""" +425 62 evaluator """rankbased""" +425 63 dataset """kinships""" +425 63 model """ntn""" +425 63 loss """bceaftersigmoid""" +425 63 regularizer """no""" +425 63 optimizer """adadelta""" +425 63 training_loop """owa""" +425 63 negative_sampler """basic""" +425 63 evaluator """rankbased""" +425 64 dataset """kinships""" +425 64 model """ntn""" +425 64 loss """bceaftersigmoid""" +425 64 regularizer """no""" +425 64 optimizer """adadelta""" +425 64 training_loop """owa""" +425 64 negative_sampler """basic""" +425 64 evaluator """rankbased""" +425 65 dataset """kinships""" +425 65 model """ntn""" +425 65 loss """bceaftersigmoid""" +425 65 regularizer """no""" +425 65 optimizer """adadelta""" +425 65 training_loop """owa""" +425 65 negative_sampler """basic""" +425 65 evaluator """rankbased""" +425 66 dataset """kinships""" +425 66 model """ntn""" +425 66 loss """bceaftersigmoid""" +425 66 regularizer """no""" +425 66 optimizer """adadelta""" +425 66 training_loop """owa""" +425 66 negative_sampler """basic""" +425 66 evaluator """rankbased""" +425 67 dataset """kinships""" +425 67 model """ntn""" +425 67 loss """bceaftersigmoid""" +425 67 regularizer """no""" +425 67 optimizer """adadelta""" +425 67 training_loop """owa""" +425 67 negative_sampler """basic""" +425 67 evaluator """rankbased""" +425 68 dataset """kinships""" +425 68 model """ntn""" +425 68 loss """bceaftersigmoid""" +425 68 regularizer """no""" +425 68 optimizer """adadelta""" +425 68 training_loop """owa""" +425 68 negative_sampler """basic""" +425 68 evaluator """rankbased""" +425 69 dataset """kinships""" +425 69 model """ntn""" +425 69 loss """bceaftersigmoid""" +425 69 regularizer """no""" +425 69 optimizer """adadelta""" +425 69 training_loop """owa""" +425 69 negative_sampler """basic""" +425 69 evaluator """rankbased""" +426 1 model.embedding_dim 1.0 +426 1 negative_sampler.num_negs_per_pos 39.0 +426 1 training.batch_size 1.0 +426 2 model.embedding_dim 1.0 +426 2 negative_sampler.num_negs_per_pos 2.0 +426 2 training.batch_size 0.0 +426 3 model.embedding_dim 1.0 +426 3 negative_sampler.num_negs_per_pos 72.0 +426 3 training.batch_size 1.0 +426 4 model.embedding_dim 0.0 +426 4 negative_sampler.num_negs_per_pos 90.0 +426 4 training.batch_size 0.0 +426 5 model.embedding_dim 0.0 +426 5 negative_sampler.num_negs_per_pos 73.0 +426 5 training.batch_size 2.0 +426 6 model.embedding_dim 2.0 +426 6 negative_sampler.num_negs_per_pos 35.0 +426 6 training.batch_size 1.0 +426 7 model.embedding_dim 0.0 +426 7 negative_sampler.num_negs_per_pos 64.0 +426 7 training.batch_size 2.0 +426 8 model.embedding_dim 2.0 +426 8 negative_sampler.num_negs_per_pos 96.0 +426 8 training.batch_size 1.0 +426 9 model.embedding_dim 0.0 +426 9 negative_sampler.num_negs_per_pos 38.0 +426 9 training.batch_size 0.0 +426 10 model.embedding_dim 2.0 +426 10 negative_sampler.num_negs_per_pos 29.0 +426 10 training.batch_size 2.0 +426 11 model.embedding_dim 2.0 +426 11 negative_sampler.num_negs_per_pos 4.0 +426 11 training.batch_size 1.0 +426 12 model.embedding_dim 1.0 +426 12 negative_sampler.num_negs_per_pos 69.0 +426 12 training.batch_size 0.0 +426 13 model.embedding_dim 2.0 +426 13 negative_sampler.num_negs_per_pos 93.0 +426 13 training.batch_size 2.0 +426 14 model.embedding_dim 1.0 +426 14 negative_sampler.num_negs_per_pos 76.0 +426 14 training.batch_size 2.0 +426 15 model.embedding_dim 0.0 +426 15 negative_sampler.num_negs_per_pos 64.0 +426 15 training.batch_size 1.0 +426 16 model.embedding_dim 1.0 +426 16 negative_sampler.num_negs_per_pos 9.0 +426 16 training.batch_size 2.0 +426 17 model.embedding_dim 0.0 +426 17 negative_sampler.num_negs_per_pos 89.0 +426 17 training.batch_size 1.0 +426 18 model.embedding_dim 0.0 +426 18 negative_sampler.num_negs_per_pos 87.0 +426 18 training.batch_size 0.0 +426 19 model.embedding_dim 1.0 +426 19 negative_sampler.num_negs_per_pos 43.0 +426 19 training.batch_size 0.0 +426 20 model.embedding_dim 1.0 +426 20 negative_sampler.num_negs_per_pos 97.0 +426 20 training.batch_size 1.0 +426 21 model.embedding_dim 0.0 +426 21 negative_sampler.num_negs_per_pos 68.0 +426 21 training.batch_size 2.0 +426 22 model.embedding_dim 1.0 +426 22 negative_sampler.num_negs_per_pos 26.0 +426 22 training.batch_size 1.0 +426 23 model.embedding_dim 2.0 +426 23 negative_sampler.num_negs_per_pos 24.0 +426 23 training.batch_size 0.0 +426 24 model.embedding_dim 2.0 +426 24 negative_sampler.num_negs_per_pos 85.0 +426 24 training.batch_size 1.0 +426 25 model.embedding_dim 1.0 +426 25 negative_sampler.num_negs_per_pos 8.0 +426 25 training.batch_size 0.0 +426 26 model.embedding_dim 2.0 +426 26 negative_sampler.num_negs_per_pos 30.0 +426 26 training.batch_size 1.0 +426 27 model.embedding_dim 2.0 +426 27 negative_sampler.num_negs_per_pos 59.0 +426 27 training.batch_size 0.0 +426 28 model.embedding_dim 0.0 +426 28 negative_sampler.num_negs_per_pos 38.0 +426 28 training.batch_size 1.0 +426 29 model.embedding_dim 2.0 +426 29 negative_sampler.num_negs_per_pos 13.0 +426 29 training.batch_size 1.0 +426 30 model.embedding_dim 0.0 +426 30 negative_sampler.num_negs_per_pos 50.0 +426 30 training.batch_size 0.0 +426 31 model.embedding_dim 1.0 +426 31 negative_sampler.num_negs_per_pos 63.0 +426 31 training.batch_size 2.0 +426 32 model.embedding_dim 2.0 +426 32 negative_sampler.num_negs_per_pos 60.0 +426 32 training.batch_size 0.0 +426 33 model.embedding_dim 1.0 +426 33 negative_sampler.num_negs_per_pos 85.0 +426 33 training.batch_size 2.0 +426 34 model.embedding_dim 0.0 +426 34 negative_sampler.num_negs_per_pos 4.0 +426 34 training.batch_size 0.0 +426 35 model.embedding_dim 1.0 +426 35 negative_sampler.num_negs_per_pos 9.0 +426 35 training.batch_size 2.0 +426 36 model.embedding_dim 0.0 +426 36 negative_sampler.num_negs_per_pos 89.0 +426 36 training.batch_size 1.0 +426 37 model.embedding_dim 1.0 +426 37 negative_sampler.num_negs_per_pos 6.0 +426 37 training.batch_size 2.0 +426 38 model.embedding_dim 2.0 +426 38 negative_sampler.num_negs_per_pos 86.0 +426 38 training.batch_size 0.0 +426 39 model.embedding_dim 1.0 +426 39 negative_sampler.num_negs_per_pos 66.0 +426 39 training.batch_size 0.0 +426 40 model.embedding_dim 2.0 +426 40 negative_sampler.num_negs_per_pos 94.0 +426 40 training.batch_size 1.0 +426 41 model.embedding_dim 1.0 +426 41 negative_sampler.num_negs_per_pos 97.0 +426 41 training.batch_size 0.0 +426 42 model.embedding_dim 1.0 +426 42 negative_sampler.num_negs_per_pos 89.0 +426 42 training.batch_size 1.0 +426 43 model.embedding_dim 2.0 +426 43 negative_sampler.num_negs_per_pos 49.0 +426 43 training.batch_size 0.0 +426 44 model.embedding_dim 0.0 +426 44 negative_sampler.num_negs_per_pos 13.0 +426 44 training.batch_size 2.0 +426 45 model.embedding_dim 0.0 +426 45 negative_sampler.num_negs_per_pos 53.0 +426 45 training.batch_size 2.0 +426 46 model.embedding_dim 0.0 +426 46 negative_sampler.num_negs_per_pos 90.0 +426 46 training.batch_size 0.0 +426 47 model.embedding_dim 2.0 +426 47 negative_sampler.num_negs_per_pos 70.0 +426 47 training.batch_size 1.0 +426 48 model.embedding_dim 0.0 +426 48 negative_sampler.num_negs_per_pos 86.0 +426 48 training.batch_size 2.0 +426 49 model.embedding_dim 1.0 +426 49 negative_sampler.num_negs_per_pos 68.0 +426 49 training.batch_size 1.0 +426 50 model.embedding_dim 1.0 +426 50 negative_sampler.num_negs_per_pos 79.0 +426 50 training.batch_size 2.0 +426 51 model.embedding_dim 1.0 +426 51 negative_sampler.num_negs_per_pos 93.0 +426 51 training.batch_size 2.0 +426 52 model.embedding_dim 1.0 +426 52 negative_sampler.num_negs_per_pos 99.0 +426 52 training.batch_size 2.0 +426 53 model.embedding_dim 1.0 +426 53 negative_sampler.num_negs_per_pos 19.0 +426 53 training.batch_size 1.0 +426 54 model.embedding_dim 2.0 +426 54 negative_sampler.num_negs_per_pos 84.0 +426 54 training.batch_size 0.0 +426 55 model.embedding_dim 0.0 +426 55 negative_sampler.num_negs_per_pos 42.0 +426 55 training.batch_size 0.0 +426 56 model.embedding_dim 0.0 +426 56 negative_sampler.num_negs_per_pos 34.0 +426 56 training.batch_size 1.0 +426 57 model.embedding_dim 2.0 +426 57 negative_sampler.num_negs_per_pos 51.0 +426 57 training.batch_size 1.0 +426 1 dataset """kinships""" +426 1 model """ntn""" +426 1 loss """softplus""" +426 1 regularizer """no""" +426 1 optimizer """adadelta""" +426 1 training_loop """owa""" +426 1 negative_sampler """basic""" +426 1 evaluator """rankbased""" +426 2 dataset """kinships""" +426 2 model """ntn""" +426 2 loss """softplus""" +426 2 regularizer """no""" +426 2 optimizer """adadelta""" +426 2 training_loop """owa""" +426 2 negative_sampler """basic""" +426 2 evaluator """rankbased""" +426 3 dataset """kinships""" +426 3 model """ntn""" +426 3 loss """softplus""" +426 3 regularizer """no""" +426 3 optimizer """adadelta""" +426 3 training_loop """owa""" +426 3 negative_sampler """basic""" +426 3 evaluator """rankbased""" +426 4 dataset """kinships""" +426 4 model """ntn""" +426 4 loss """softplus""" +426 4 regularizer """no""" +426 4 optimizer """adadelta""" +426 4 training_loop """owa""" +426 4 negative_sampler """basic""" +426 4 evaluator """rankbased""" +426 5 dataset """kinships""" +426 5 model """ntn""" +426 5 loss """softplus""" +426 5 regularizer """no""" +426 5 optimizer """adadelta""" +426 5 training_loop """owa""" +426 5 negative_sampler """basic""" +426 5 evaluator """rankbased""" +426 6 dataset """kinships""" +426 6 model """ntn""" +426 6 loss """softplus""" +426 6 regularizer """no""" +426 6 optimizer """adadelta""" +426 6 training_loop """owa""" +426 6 negative_sampler """basic""" +426 6 evaluator """rankbased""" +426 7 dataset """kinships""" +426 7 model """ntn""" +426 7 loss """softplus""" +426 7 regularizer """no""" +426 7 optimizer """adadelta""" +426 7 training_loop """owa""" +426 7 negative_sampler """basic""" +426 7 evaluator """rankbased""" +426 8 dataset """kinships""" +426 8 model """ntn""" +426 8 loss """softplus""" +426 8 regularizer """no""" +426 8 optimizer """adadelta""" +426 8 training_loop """owa""" +426 8 negative_sampler """basic""" +426 8 evaluator """rankbased""" +426 9 dataset """kinships""" +426 9 model """ntn""" +426 9 loss """softplus""" +426 9 regularizer """no""" +426 9 optimizer """adadelta""" +426 9 training_loop """owa""" +426 9 negative_sampler """basic""" +426 9 evaluator """rankbased""" +426 10 dataset """kinships""" +426 10 model """ntn""" +426 10 loss """softplus""" +426 10 regularizer """no""" +426 10 optimizer """adadelta""" +426 10 training_loop """owa""" +426 10 negative_sampler """basic""" +426 10 evaluator """rankbased""" +426 11 dataset """kinships""" +426 11 model """ntn""" +426 11 loss """softplus""" +426 11 regularizer """no""" +426 11 optimizer """adadelta""" +426 11 training_loop """owa""" +426 11 negative_sampler """basic""" +426 11 evaluator """rankbased""" +426 12 dataset """kinships""" +426 12 model """ntn""" +426 12 loss """softplus""" +426 12 regularizer """no""" +426 12 optimizer """adadelta""" +426 12 training_loop """owa""" +426 12 negative_sampler """basic""" +426 12 evaluator """rankbased""" +426 13 dataset """kinships""" +426 13 model """ntn""" +426 13 loss """softplus""" +426 13 regularizer """no""" +426 13 optimizer """adadelta""" +426 13 training_loop """owa""" +426 13 negative_sampler """basic""" +426 13 evaluator """rankbased""" +426 14 dataset """kinships""" +426 14 model """ntn""" +426 14 loss """softplus""" +426 14 regularizer """no""" +426 14 optimizer """adadelta""" +426 14 training_loop """owa""" +426 14 negative_sampler """basic""" +426 14 evaluator """rankbased""" +426 15 dataset """kinships""" +426 15 model """ntn""" +426 15 loss """softplus""" +426 15 regularizer """no""" +426 15 optimizer """adadelta""" +426 15 training_loop """owa""" +426 15 negative_sampler """basic""" +426 15 evaluator """rankbased""" +426 16 dataset """kinships""" +426 16 model """ntn""" +426 16 loss """softplus""" +426 16 regularizer """no""" +426 16 optimizer """adadelta""" +426 16 training_loop """owa""" +426 16 negative_sampler """basic""" +426 16 evaluator """rankbased""" +426 17 dataset """kinships""" +426 17 model """ntn""" +426 17 loss """softplus""" +426 17 regularizer """no""" +426 17 optimizer """adadelta""" +426 17 training_loop """owa""" +426 17 negative_sampler """basic""" +426 17 evaluator """rankbased""" +426 18 dataset """kinships""" +426 18 model """ntn""" +426 18 loss """softplus""" +426 18 regularizer """no""" +426 18 optimizer """adadelta""" +426 18 training_loop """owa""" +426 18 negative_sampler """basic""" +426 18 evaluator """rankbased""" +426 19 dataset """kinships""" +426 19 model """ntn""" +426 19 loss """softplus""" +426 19 regularizer """no""" +426 19 optimizer """adadelta""" +426 19 training_loop """owa""" +426 19 negative_sampler """basic""" +426 19 evaluator """rankbased""" +426 20 dataset """kinships""" +426 20 model """ntn""" +426 20 loss """softplus""" +426 20 regularizer """no""" +426 20 optimizer """adadelta""" +426 20 training_loop """owa""" +426 20 negative_sampler """basic""" +426 20 evaluator """rankbased""" +426 21 dataset """kinships""" +426 21 model """ntn""" +426 21 loss """softplus""" +426 21 regularizer """no""" +426 21 optimizer """adadelta""" +426 21 training_loop """owa""" +426 21 negative_sampler """basic""" +426 21 evaluator """rankbased""" +426 22 dataset """kinships""" +426 22 model """ntn""" +426 22 loss """softplus""" +426 22 regularizer """no""" +426 22 optimizer """adadelta""" +426 22 training_loop """owa""" +426 22 negative_sampler """basic""" +426 22 evaluator """rankbased""" +426 23 dataset """kinships""" +426 23 model """ntn""" +426 23 loss """softplus""" +426 23 regularizer """no""" +426 23 optimizer """adadelta""" +426 23 training_loop """owa""" +426 23 negative_sampler """basic""" +426 23 evaluator """rankbased""" +426 24 dataset """kinships""" +426 24 model """ntn""" +426 24 loss """softplus""" +426 24 regularizer """no""" +426 24 optimizer """adadelta""" +426 24 training_loop """owa""" +426 24 negative_sampler """basic""" +426 24 evaluator """rankbased""" +426 25 dataset """kinships""" +426 25 model """ntn""" +426 25 loss """softplus""" +426 25 regularizer """no""" +426 25 optimizer """adadelta""" +426 25 training_loop """owa""" +426 25 negative_sampler """basic""" +426 25 evaluator """rankbased""" +426 26 dataset """kinships""" +426 26 model """ntn""" +426 26 loss """softplus""" +426 26 regularizer """no""" +426 26 optimizer """adadelta""" +426 26 training_loop """owa""" +426 26 negative_sampler """basic""" +426 26 evaluator """rankbased""" +426 27 dataset """kinships""" +426 27 model """ntn""" +426 27 loss """softplus""" +426 27 regularizer """no""" +426 27 optimizer """adadelta""" +426 27 training_loop """owa""" +426 27 negative_sampler """basic""" +426 27 evaluator """rankbased""" +426 28 dataset """kinships""" +426 28 model """ntn""" +426 28 loss """softplus""" +426 28 regularizer """no""" +426 28 optimizer """adadelta""" +426 28 training_loop """owa""" +426 28 negative_sampler """basic""" +426 28 evaluator """rankbased""" +426 29 dataset """kinships""" +426 29 model """ntn""" +426 29 loss """softplus""" +426 29 regularizer """no""" +426 29 optimizer """adadelta""" +426 29 training_loop """owa""" +426 29 negative_sampler """basic""" +426 29 evaluator """rankbased""" +426 30 dataset """kinships""" +426 30 model """ntn""" +426 30 loss """softplus""" +426 30 regularizer """no""" +426 30 optimizer """adadelta""" +426 30 training_loop """owa""" +426 30 negative_sampler """basic""" +426 30 evaluator """rankbased""" +426 31 dataset """kinships""" +426 31 model """ntn""" +426 31 loss """softplus""" +426 31 regularizer """no""" +426 31 optimizer """adadelta""" +426 31 training_loop """owa""" +426 31 negative_sampler """basic""" +426 31 evaluator """rankbased""" +426 32 dataset """kinships""" +426 32 model """ntn""" +426 32 loss """softplus""" +426 32 regularizer """no""" +426 32 optimizer """adadelta""" +426 32 training_loop """owa""" +426 32 negative_sampler """basic""" +426 32 evaluator """rankbased""" +426 33 dataset """kinships""" +426 33 model """ntn""" +426 33 loss """softplus""" +426 33 regularizer """no""" +426 33 optimizer """adadelta""" +426 33 training_loop """owa""" +426 33 negative_sampler """basic""" +426 33 evaluator """rankbased""" +426 34 dataset """kinships""" +426 34 model """ntn""" +426 34 loss """softplus""" +426 34 regularizer """no""" +426 34 optimizer """adadelta""" +426 34 training_loop """owa""" +426 34 negative_sampler """basic""" +426 34 evaluator """rankbased""" +426 35 dataset """kinships""" +426 35 model """ntn""" +426 35 loss """softplus""" +426 35 regularizer """no""" +426 35 optimizer """adadelta""" +426 35 training_loop """owa""" +426 35 negative_sampler """basic""" +426 35 evaluator """rankbased""" +426 36 dataset """kinships""" +426 36 model """ntn""" +426 36 loss """softplus""" +426 36 regularizer """no""" +426 36 optimizer """adadelta""" +426 36 training_loop """owa""" +426 36 negative_sampler """basic""" +426 36 evaluator """rankbased""" +426 37 dataset """kinships""" +426 37 model """ntn""" +426 37 loss """softplus""" +426 37 regularizer """no""" +426 37 optimizer """adadelta""" +426 37 training_loop """owa""" +426 37 negative_sampler """basic""" +426 37 evaluator """rankbased""" +426 38 dataset """kinships""" +426 38 model """ntn""" +426 38 loss """softplus""" +426 38 regularizer """no""" +426 38 optimizer """adadelta""" +426 38 training_loop """owa""" +426 38 negative_sampler """basic""" +426 38 evaluator """rankbased""" +426 39 dataset """kinships""" +426 39 model """ntn""" +426 39 loss """softplus""" +426 39 regularizer """no""" +426 39 optimizer """adadelta""" +426 39 training_loop """owa""" +426 39 negative_sampler """basic""" +426 39 evaluator """rankbased""" +426 40 dataset """kinships""" +426 40 model """ntn""" +426 40 loss """softplus""" +426 40 regularizer """no""" +426 40 optimizer """adadelta""" +426 40 training_loop """owa""" +426 40 negative_sampler """basic""" +426 40 evaluator """rankbased""" +426 41 dataset """kinships""" +426 41 model """ntn""" +426 41 loss """softplus""" +426 41 regularizer """no""" +426 41 optimizer """adadelta""" +426 41 training_loop """owa""" +426 41 negative_sampler """basic""" +426 41 evaluator """rankbased""" +426 42 dataset """kinships""" +426 42 model """ntn""" +426 42 loss """softplus""" +426 42 regularizer """no""" +426 42 optimizer """adadelta""" +426 42 training_loop """owa""" +426 42 negative_sampler """basic""" +426 42 evaluator """rankbased""" +426 43 dataset """kinships""" +426 43 model """ntn""" +426 43 loss """softplus""" +426 43 regularizer """no""" +426 43 optimizer """adadelta""" +426 43 training_loop """owa""" +426 43 negative_sampler """basic""" +426 43 evaluator """rankbased""" +426 44 dataset """kinships""" +426 44 model """ntn""" +426 44 loss """softplus""" +426 44 regularizer """no""" +426 44 optimizer """adadelta""" +426 44 training_loop """owa""" +426 44 negative_sampler """basic""" +426 44 evaluator """rankbased""" +426 45 dataset """kinships""" +426 45 model """ntn""" +426 45 loss """softplus""" +426 45 regularizer """no""" +426 45 optimizer """adadelta""" +426 45 training_loop """owa""" +426 45 negative_sampler """basic""" +426 45 evaluator """rankbased""" +426 46 dataset """kinships""" +426 46 model """ntn""" +426 46 loss """softplus""" +426 46 regularizer """no""" +426 46 optimizer """adadelta""" +426 46 training_loop """owa""" +426 46 negative_sampler """basic""" +426 46 evaluator """rankbased""" +426 47 dataset """kinships""" +426 47 model """ntn""" +426 47 loss """softplus""" +426 47 regularizer """no""" +426 47 optimizer """adadelta""" +426 47 training_loop """owa""" +426 47 negative_sampler """basic""" +426 47 evaluator """rankbased""" +426 48 dataset """kinships""" +426 48 model """ntn""" +426 48 loss """softplus""" +426 48 regularizer """no""" +426 48 optimizer """adadelta""" +426 48 training_loop """owa""" +426 48 negative_sampler """basic""" +426 48 evaluator """rankbased""" +426 49 dataset """kinships""" +426 49 model """ntn""" +426 49 loss """softplus""" +426 49 regularizer """no""" +426 49 optimizer """adadelta""" +426 49 training_loop """owa""" +426 49 negative_sampler """basic""" +426 49 evaluator """rankbased""" +426 50 dataset """kinships""" +426 50 model """ntn""" +426 50 loss """softplus""" +426 50 regularizer """no""" +426 50 optimizer """adadelta""" +426 50 training_loop """owa""" +426 50 negative_sampler """basic""" +426 50 evaluator """rankbased""" +426 51 dataset """kinships""" +426 51 model """ntn""" +426 51 loss """softplus""" +426 51 regularizer """no""" +426 51 optimizer """adadelta""" +426 51 training_loop """owa""" +426 51 negative_sampler """basic""" +426 51 evaluator """rankbased""" +426 52 dataset """kinships""" +426 52 model """ntn""" +426 52 loss """softplus""" +426 52 regularizer """no""" +426 52 optimizer """adadelta""" +426 52 training_loop """owa""" +426 52 negative_sampler """basic""" +426 52 evaluator """rankbased""" +426 53 dataset """kinships""" +426 53 model """ntn""" +426 53 loss """softplus""" +426 53 regularizer """no""" +426 53 optimizer """adadelta""" +426 53 training_loop """owa""" +426 53 negative_sampler """basic""" +426 53 evaluator """rankbased""" +426 54 dataset """kinships""" +426 54 model """ntn""" +426 54 loss """softplus""" +426 54 regularizer """no""" +426 54 optimizer """adadelta""" +426 54 training_loop """owa""" +426 54 negative_sampler """basic""" +426 54 evaluator """rankbased""" +426 55 dataset """kinships""" +426 55 model """ntn""" +426 55 loss """softplus""" +426 55 regularizer """no""" +426 55 optimizer """adadelta""" +426 55 training_loop """owa""" +426 55 negative_sampler """basic""" +426 55 evaluator """rankbased""" +426 56 dataset """kinships""" +426 56 model """ntn""" +426 56 loss """softplus""" +426 56 regularizer """no""" +426 56 optimizer """adadelta""" +426 56 training_loop """owa""" +426 56 negative_sampler """basic""" +426 56 evaluator """rankbased""" +426 57 dataset """kinships""" +426 57 model """ntn""" +426 57 loss """softplus""" +426 57 regularizer """no""" +426 57 optimizer """adadelta""" +426 57 training_loop """owa""" +426 57 negative_sampler """basic""" +426 57 evaluator """rankbased""" +427 1 model.embedding_dim 0.0 +427 1 negative_sampler.num_negs_per_pos 69.0 +427 1 training.batch_size 0.0 +427 2 model.embedding_dim 0.0 +427 2 negative_sampler.num_negs_per_pos 90.0 +427 2 training.batch_size 0.0 +427 3 model.embedding_dim 1.0 +427 3 negative_sampler.num_negs_per_pos 78.0 +427 3 training.batch_size 0.0 +427 4 model.embedding_dim 1.0 +427 4 negative_sampler.num_negs_per_pos 94.0 +427 4 training.batch_size 0.0 +427 5 model.embedding_dim 2.0 +427 5 negative_sampler.num_negs_per_pos 27.0 +427 5 training.batch_size 2.0 +427 6 model.embedding_dim 2.0 +427 6 negative_sampler.num_negs_per_pos 6.0 +427 6 training.batch_size 0.0 +427 7 model.embedding_dim 0.0 +427 7 negative_sampler.num_negs_per_pos 22.0 +427 7 training.batch_size 1.0 +427 8 model.embedding_dim 0.0 +427 8 negative_sampler.num_negs_per_pos 9.0 +427 8 training.batch_size 2.0 +427 9 model.embedding_dim 2.0 +427 9 negative_sampler.num_negs_per_pos 26.0 +427 9 training.batch_size 2.0 +427 10 model.embedding_dim 0.0 +427 10 negative_sampler.num_negs_per_pos 32.0 +427 10 training.batch_size 1.0 +427 11 model.embedding_dim 0.0 +427 11 negative_sampler.num_negs_per_pos 95.0 +427 11 training.batch_size 0.0 +427 12 model.embedding_dim 0.0 +427 12 negative_sampler.num_negs_per_pos 53.0 +427 12 training.batch_size 1.0 +427 13 model.embedding_dim 0.0 +427 13 negative_sampler.num_negs_per_pos 11.0 +427 13 training.batch_size 2.0 +427 14 model.embedding_dim 2.0 +427 14 negative_sampler.num_negs_per_pos 80.0 +427 14 training.batch_size 0.0 +427 15 model.embedding_dim 0.0 +427 15 negative_sampler.num_negs_per_pos 12.0 +427 15 training.batch_size 2.0 +427 16 model.embedding_dim 2.0 +427 16 negative_sampler.num_negs_per_pos 61.0 +427 16 training.batch_size 0.0 +427 17 model.embedding_dim 2.0 +427 17 negative_sampler.num_negs_per_pos 29.0 +427 17 training.batch_size 0.0 +427 18 model.embedding_dim 2.0 +427 18 negative_sampler.num_negs_per_pos 29.0 +427 18 training.batch_size 1.0 +427 19 model.embedding_dim 1.0 +427 19 negative_sampler.num_negs_per_pos 91.0 +427 19 training.batch_size 1.0 +427 20 model.embedding_dim 1.0 +427 20 negative_sampler.num_negs_per_pos 3.0 +427 20 training.batch_size 2.0 +427 21 model.embedding_dim 1.0 +427 21 negative_sampler.num_negs_per_pos 6.0 +427 21 training.batch_size 0.0 +427 22 model.embedding_dim 0.0 +427 22 negative_sampler.num_negs_per_pos 55.0 +427 22 training.batch_size 2.0 +427 23 model.embedding_dim 1.0 +427 23 negative_sampler.num_negs_per_pos 75.0 +427 23 training.batch_size 1.0 +427 24 model.embedding_dim 2.0 +427 24 negative_sampler.num_negs_per_pos 68.0 +427 24 training.batch_size 2.0 +427 25 model.embedding_dim 2.0 +427 25 negative_sampler.num_negs_per_pos 43.0 +427 25 training.batch_size 0.0 +427 26 model.embedding_dim 2.0 +427 26 negative_sampler.num_negs_per_pos 43.0 +427 26 training.batch_size 2.0 +427 27 model.embedding_dim 0.0 +427 27 negative_sampler.num_negs_per_pos 61.0 +427 27 training.batch_size 0.0 +427 28 model.embedding_dim 2.0 +427 28 negative_sampler.num_negs_per_pos 99.0 +427 28 training.batch_size 0.0 +427 29 model.embedding_dim 1.0 +427 29 negative_sampler.num_negs_per_pos 62.0 +427 29 training.batch_size 2.0 +427 30 model.embedding_dim 0.0 +427 30 negative_sampler.num_negs_per_pos 24.0 +427 30 training.batch_size 1.0 +427 31 model.embedding_dim 0.0 +427 31 negative_sampler.num_negs_per_pos 62.0 +427 31 training.batch_size 1.0 +427 32 model.embedding_dim 0.0 +427 32 negative_sampler.num_negs_per_pos 11.0 +427 32 training.batch_size 1.0 +427 33 model.embedding_dim 0.0 +427 33 negative_sampler.num_negs_per_pos 95.0 +427 33 training.batch_size 1.0 +427 34 model.embedding_dim 2.0 +427 34 negative_sampler.num_negs_per_pos 75.0 +427 34 training.batch_size 1.0 +427 35 model.embedding_dim 1.0 +427 35 negative_sampler.num_negs_per_pos 97.0 +427 35 training.batch_size 2.0 +427 36 model.embedding_dim 2.0 +427 36 negative_sampler.num_negs_per_pos 69.0 +427 36 training.batch_size 1.0 +427 37 model.embedding_dim 2.0 +427 37 negative_sampler.num_negs_per_pos 43.0 +427 37 training.batch_size 0.0 +427 38 model.embedding_dim 1.0 +427 38 negative_sampler.num_negs_per_pos 0.0 +427 38 training.batch_size 2.0 +427 39 model.embedding_dim 0.0 +427 39 negative_sampler.num_negs_per_pos 92.0 +427 39 training.batch_size 0.0 +427 40 model.embedding_dim 1.0 +427 40 negative_sampler.num_negs_per_pos 40.0 +427 40 training.batch_size 0.0 +427 41 model.embedding_dim 2.0 +427 41 negative_sampler.num_negs_per_pos 59.0 +427 41 training.batch_size 1.0 +427 42 model.embedding_dim 0.0 +427 42 negative_sampler.num_negs_per_pos 20.0 +427 42 training.batch_size 2.0 +427 43 model.embedding_dim 2.0 +427 43 negative_sampler.num_negs_per_pos 73.0 +427 43 training.batch_size 0.0 +427 44 model.embedding_dim 1.0 +427 44 negative_sampler.num_negs_per_pos 27.0 +427 44 training.batch_size 2.0 +427 45 model.embedding_dim 2.0 +427 45 negative_sampler.num_negs_per_pos 44.0 +427 45 training.batch_size 1.0 +427 46 model.embedding_dim 0.0 +427 46 negative_sampler.num_negs_per_pos 17.0 +427 46 training.batch_size 0.0 +427 47 model.embedding_dim 0.0 +427 47 negative_sampler.num_negs_per_pos 59.0 +427 47 training.batch_size 2.0 +427 48 model.embedding_dim 2.0 +427 48 negative_sampler.num_negs_per_pos 61.0 +427 48 training.batch_size 2.0 +427 49 model.embedding_dim 2.0 +427 49 negative_sampler.num_negs_per_pos 41.0 +427 49 training.batch_size 2.0 +427 50 model.embedding_dim 1.0 +427 50 negative_sampler.num_negs_per_pos 16.0 +427 50 training.batch_size 1.0 +427 51 model.embedding_dim 2.0 +427 51 negative_sampler.num_negs_per_pos 8.0 +427 51 training.batch_size 2.0 +427 52 model.embedding_dim 2.0 +427 52 negative_sampler.num_negs_per_pos 13.0 +427 52 training.batch_size 1.0 +427 53 model.embedding_dim 0.0 +427 53 negative_sampler.num_negs_per_pos 46.0 +427 53 training.batch_size 1.0 +427 54 model.embedding_dim 1.0 +427 54 negative_sampler.num_negs_per_pos 11.0 +427 54 training.batch_size 1.0 +427 55 model.embedding_dim 0.0 +427 55 negative_sampler.num_negs_per_pos 21.0 +427 55 training.batch_size 1.0 +427 56 model.embedding_dim 0.0 +427 56 negative_sampler.num_negs_per_pos 0.0 +427 56 training.batch_size 0.0 +427 57 model.embedding_dim 1.0 +427 57 negative_sampler.num_negs_per_pos 69.0 +427 57 training.batch_size 2.0 +427 58 model.embedding_dim 2.0 +427 58 negative_sampler.num_negs_per_pos 96.0 +427 58 training.batch_size 2.0 +427 59 model.embedding_dim 1.0 +427 59 negative_sampler.num_negs_per_pos 60.0 +427 59 training.batch_size 2.0 +427 60 model.embedding_dim 2.0 +427 60 negative_sampler.num_negs_per_pos 43.0 +427 60 training.batch_size 0.0 +427 61 model.embedding_dim 1.0 +427 61 negative_sampler.num_negs_per_pos 95.0 +427 61 training.batch_size 0.0 +427 62 model.embedding_dim 0.0 +427 62 negative_sampler.num_negs_per_pos 10.0 +427 62 training.batch_size 0.0 +427 63 model.embedding_dim 1.0 +427 63 negative_sampler.num_negs_per_pos 30.0 +427 63 training.batch_size 2.0 +427 64 model.embedding_dim 0.0 +427 64 negative_sampler.num_negs_per_pos 0.0 +427 64 training.batch_size 1.0 +427 65 model.embedding_dim 2.0 +427 65 negative_sampler.num_negs_per_pos 7.0 +427 65 training.batch_size 0.0 +427 66 model.embedding_dim 2.0 +427 66 negative_sampler.num_negs_per_pos 97.0 +427 66 training.batch_size 1.0 +427 67 model.embedding_dim 0.0 +427 67 negative_sampler.num_negs_per_pos 35.0 +427 67 training.batch_size 2.0 +427 68 model.embedding_dim 1.0 +427 68 negative_sampler.num_negs_per_pos 74.0 +427 68 training.batch_size 0.0 +427 69 model.embedding_dim 2.0 +427 69 negative_sampler.num_negs_per_pos 19.0 +427 69 training.batch_size 0.0 +427 70 model.embedding_dim 1.0 +427 70 negative_sampler.num_negs_per_pos 65.0 +427 70 training.batch_size 0.0 +427 71 model.embedding_dim 0.0 +427 71 negative_sampler.num_negs_per_pos 52.0 +427 71 training.batch_size 2.0 +427 72 model.embedding_dim 0.0 +427 72 negative_sampler.num_negs_per_pos 79.0 +427 72 training.batch_size 0.0 +427 73 model.embedding_dim 0.0 +427 73 negative_sampler.num_negs_per_pos 35.0 +427 73 training.batch_size 0.0 +427 74 model.embedding_dim 0.0 +427 74 negative_sampler.num_negs_per_pos 30.0 +427 74 training.batch_size 1.0 +427 75 model.embedding_dim 2.0 +427 75 negative_sampler.num_negs_per_pos 58.0 +427 75 training.batch_size 0.0 +427 76 model.embedding_dim 0.0 +427 76 negative_sampler.num_negs_per_pos 46.0 +427 76 training.batch_size 1.0 +427 77 model.embedding_dim 1.0 +427 77 negative_sampler.num_negs_per_pos 85.0 +427 77 training.batch_size 1.0 +427 78 model.embedding_dim 0.0 +427 78 negative_sampler.num_negs_per_pos 58.0 +427 78 training.batch_size 2.0 +427 79 model.embedding_dim 2.0 +427 79 negative_sampler.num_negs_per_pos 18.0 +427 79 training.batch_size 0.0 +427 80 model.embedding_dim 1.0 +427 80 negative_sampler.num_negs_per_pos 56.0 +427 80 training.batch_size 2.0 +427 81 model.embedding_dim 2.0 +427 81 negative_sampler.num_negs_per_pos 32.0 +427 81 training.batch_size 1.0 +427 82 model.embedding_dim 2.0 +427 82 negative_sampler.num_negs_per_pos 97.0 +427 82 training.batch_size 0.0 +427 83 model.embedding_dim 0.0 +427 83 negative_sampler.num_negs_per_pos 12.0 +427 83 training.batch_size 0.0 +427 84 model.embedding_dim 1.0 +427 84 negative_sampler.num_negs_per_pos 78.0 +427 84 training.batch_size 1.0 +427 85 model.embedding_dim 1.0 +427 85 negative_sampler.num_negs_per_pos 50.0 +427 85 training.batch_size 1.0 +427 86 model.embedding_dim 1.0 +427 86 negative_sampler.num_negs_per_pos 51.0 +427 86 training.batch_size 0.0 +427 87 model.embedding_dim 0.0 +427 87 negative_sampler.num_negs_per_pos 72.0 +427 87 training.batch_size 1.0 +427 88 model.embedding_dim 1.0 +427 88 negative_sampler.num_negs_per_pos 0.0 +427 88 training.batch_size 1.0 +427 89 model.embedding_dim 1.0 +427 89 negative_sampler.num_negs_per_pos 26.0 +427 89 training.batch_size 0.0 +427 90 model.embedding_dim 0.0 +427 90 negative_sampler.num_negs_per_pos 91.0 +427 90 training.batch_size 1.0 +427 91 model.embedding_dim 0.0 +427 91 negative_sampler.num_negs_per_pos 58.0 +427 91 training.batch_size 1.0 +427 92 model.embedding_dim 0.0 +427 92 negative_sampler.num_negs_per_pos 65.0 +427 92 training.batch_size 2.0 +427 93 model.embedding_dim 1.0 +427 93 negative_sampler.num_negs_per_pos 89.0 +427 93 training.batch_size 0.0 +427 94 model.embedding_dim 0.0 +427 94 negative_sampler.num_negs_per_pos 94.0 +427 94 training.batch_size 1.0 +427 95 model.embedding_dim 1.0 +427 95 negative_sampler.num_negs_per_pos 78.0 +427 95 training.batch_size 1.0 +427 96 model.embedding_dim 2.0 +427 96 negative_sampler.num_negs_per_pos 34.0 +427 96 training.batch_size 0.0 +427 97 model.embedding_dim 2.0 +427 97 negative_sampler.num_negs_per_pos 97.0 +427 97 training.batch_size 0.0 +427 98 model.embedding_dim 2.0 +427 98 negative_sampler.num_negs_per_pos 70.0 +427 98 training.batch_size 2.0 +427 99 model.embedding_dim 0.0 +427 99 negative_sampler.num_negs_per_pos 4.0 +427 99 training.batch_size 2.0 +427 100 model.embedding_dim 1.0 +427 100 negative_sampler.num_negs_per_pos 76.0 +427 100 training.batch_size 2.0 +427 1 dataset """kinships""" +427 1 model """ntn""" +427 1 loss """bceaftersigmoid""" +427 1 regularizer """no""" +427 1 optimizer """adadelta""" +427 1 training_loop """owa""" +427 1 negative_sampler """basic""" +427 1 evaluator """rankbased""" +427 2 dataset """kinships""" +427 2 model """ntn""" +427 2 loss """bceaftersigmoid""" +427 2 regularizer """no""" +427 2 optimizer """adadelta""" +427 2 training_loop """owa""" +427 2 negative_sampler """basic""" +427 2 evaluator """rankbased""" +427 3 dataset """kinships""" +427 3 model """ntn""" +427 3 loss """bceaftersigmoid""" +427 3 regularizer """no""" +427 3 optimizer """adadelta""" +427 3 training_loop """owa""" +427 3 negative_sampler """basic""" +427 3 evaluator """rankbased""" +427 4 dataset """kinships""" +427 4 model """ntn""" +427 4 loss """bceaftersigmoid""" +427 4 regularizer """no""" +427 4 optimizer """adadelta""" +427 4 training_loop """owa""" +427 4 negative_sampler """basic""" +427 4 evaluator """rankbased""" +427 5 dataset """kinships""" +427 5 model """ntn""" +427 5 loss """bceaftersigmoid""" +427 5 regularizer """no""" +427 5 optimizer """adadelta""" +427 5 training_loop """owa""" +427 5 negative_sampler """basic""" +427 5 evaluator """rankbased""" +427 6 dataset """kinships""" +427 6 model """ntn""" +427 6 loss """bceaftersigmoid""" +427 6 regularizer """no""" +427 6 optimizer """adadelta""" +427 6 training_loop """owa""" +427 6 negative_sampler """basic""" +427 6 evaluator """rankbased""" +427 7 dataset """kinships""" +427 7 model """ntn""" +427 7 loss """bceaftersigmoid""" +427 7 regularizer """no""" +427 7 optimizer """adadelta""" +427 7 training_loop """owa""" +427 7 negative_sampler """basic""" +427 7 evaluator """rankbased""" +427 8 dataset """kinships""" +427 8 model """ntn""" +427 8 loss """bceaftersigmoid""" +427 8 regularizer """no""" +427 8 optimizer """adadelta""" +427 8 training_loop """owa""" +427 8 negative_sampler """basic""" +427 8 evaluator """rankbased""" +427 9 dataset """kinships""" +427 9 model """ntn""" +427 9 loss """bceaftersigmoid""" +427 9 regularizer """no""" +427 9 optimizer """adadelta""" +427 9 training_loop """owa""" +427 9 negative_sampler """basic""" +427 9 evaluator """rankbased""" +427 10 dataset """kinships""" +427 10 model """ntn""" +427 10 loss """bceaftersigmoid""" +427 10 regularizer """no""" +427 10 optimizer """adadelta""" +427 10 training_loop """owa""" +427 10 negative_sampler """basic""" +427 10 evaluator """rankbased""" +427 11 dataset """kinships""" +427 11 model """ntn""" +427 11 loss """bceaftersigmoid""" +427 11 regularizer """no""" +427 11 optimizer """adadelta""" +427 11 training_loop """owa""" +427 11 negative_sampler """basic""" +427 11 evaluator """rankbased""" +427 12 dataset """kinships""" +427 12 model """ntn""" +427 12 loss """bceaftersigmoid""" +427 12 regularizer """no""" +427 12 optimizer """adadelta""" +427 12 training_loop """owa""" +427 12 negative_sampler """basic""" +427 12 evaluator """rankbased""" +427 13 dataset """kinships""" +427 13 model """ntn""" +427 13 loss """bceaftersigmoid""" +427 13 regularizer """no""" +427 13 optimizer """adadelta""" +427 13 training_loop """owa""" +427 13 negative_sampler """basic""" +427 13 evaluator """rankbased""" +427 14 dataset """kinships""" +427 14 model """ntn""" +427 14 loss """bceaftersigmoid""" +427 14 regularizer """no""" +427 14 optimizer """adadelta""" +427 14 training_loop """owa""" +427 14 negative_sampler """basic""" +427 14 evaluator """rankbased""" +427 15 dataset """kinships""" +427 15 model """ntn""" +427 15 loss """bceaftersigmoid""" +427 15 regularizer """no""" +427 15 optimizer """adadelta""" +427 15 training_loop """owa""" +427 15 negative_sampler """basic""" +427 15 evaluator """rankbased""" +427 16 dataset """kinships""" +427 16 model """ntn""" +427 16 loss """bceaftersigmoid""" +427 16 regularizer """no""" +427 16 optimizer """adadelta""" +427 16 training_loop """owa""" +427 16 negative_sampler """basic""" +427 16 evaluator """rankbased""" +427 17 dataset """kinships""" +427 17 model """ntn""" +427 17 loss """bceaftersigmoid""" +427 17 regularizer """no""" +427 17 optimizer """adadelta""" +427 17 training_loop """owa""" +427 17 negative_sampler """basic""" +427 17 evaluator """rankbased""" +427 18 dataset """kinships""" +427 18 model """ntn""" +427 18 loss """bceaftersigmoid""" +427 18 regularizer """no""" +427 18 optimizer """adadelta""" +427 18 training_loop """owa""" +427 18 negative_sampler """basic""" +427 18 evaluator """rankbased""" +427 19 dataset """kinships""" +427 19 model """ntn""" +427 19 loss """bceaftersigmoid""" +427 19 regularizer """no""" +427 19 optimizer """adadelta""" +427 19 training_loop """owa""" +427 19 negative_sampler """basic""" +427 19 evaluator """rankbased""" +427 20 dataset """kinships""" +427 20 model """ntn""" +427 20 loss """bceaftersigmoid""" +427 20 regularizer """no""" +427 20 optimizer """adadelta""" +427 20 training_loop """owa""" +427 20 negative_sampler """basic""" +427 20 evaluator """rankbased""" +427 21 dataset """kinships""" +427 21 model """ntn""" +427 21 loss """bceaftersigmoid""" +427 21 regularizer """no""" +427 21 optimizer """adadelta""" +427 21 training_loop """owa""" +427 21 negative_sampler """basic""" +427 21 evaluator """rankbased""" +427 22 dataset """kinships""" +427 22 model """ntn""" +427 22 loss """bceaftersigmoid""" +427 22 regularizer """no""" +427 22 optimizer """adadelta""" +427 22 training_loop """owa""" +427 22 negative_sampler """basic""" +427 22 evaluator """rankbased""" +427 23 dataset """kinships""" +427 23 model """ntn""" +427 23 loss """bceaftersigmoid""" +427 23 regularizer """no""" +427 23 optimizer """adadelta""" +427 23 training_loop """owa""" +427 23 negative_sampler """basic""" +427 23 evaluator """rankbased""" +427 24 dataset """kinships""" +427 24 model """ntn""" +427 24 loss """bceaftersigmoid""" +427 24 regularizer """no""" +427 24 optimizer """adadelta""" +427 24 training_loop """owa""" +427 24 negative_sampler """basic""" +427 24 evaluator """rankbased""" +427 25 dataset """kinships""" +427 25 model """ntn""" +427 25 loss """bceaftersigmoid""" +427 25 regularizer """no""" +427 25 optimizer """adadelta""" +427 25 training_loop """owa""" +427 25 negative_sampler """basic""" +427 25 evaluator """rankbased""" +427 26 dataset """kinships""" +427 26 model """ntn""" +427 26 loss """bceaftersigmoid""" +427 26 regularizer """no""" +427 26 optimizer """adadelta""" +427 26 training_loop """owa""" +427 26 negative_sampler """basic""" +427 26 evaluator """rankbased""" +427 27 dataset """kinships""" +427 27 model """ntn""" +427 27 loss """bceaftersigmoid""" +427 27 regularizer """no""" +427 27 optimizer """adadelta""" +427 27 training_loop """owa""" +427 27 negative_sampler """basic""" +427 27 evaluator """rankbased""" +427 28 dataset """kinships""" +427 28 model """ntn""" +427 28 loss """bceaftersigmoid""" +427 28 regularizer """no""" +427 28 optimizer """adadelta""" +427 28 training_loop """owa""" +427 28 negative_sampler """basic""" +427 28 evaluator """rankbased""" +427 29 dataset """kinships""" +427 29 model """ntn""" +427 29 loss """bceaftersigmoid""" +427 29 regularizer """no""" +427 29 optimizer """adadelta""" +427 29 training_loop """owa""" +427 29 negative_sampler """basic""" +427 29 evaluator """rankbased""" +427 30 dataset """kinships""" +427 30 model """ntn""" +427 30 loss """bceaftersigmoid""" +427 30 regularizer """no""" +427 30 optimizer """adadelta""" +427 30 training_loop """owa""" +427 30 negative_sampler """basic""" +427 30 evaluator """rankbased""" +427 31 dataset """kinships""" +427 31 model """ntn""" +427 31 loss """bceaftersigmoid""" +427 31 regularizer """no""" +427 31 optimizer """adadelta""" +427 31 training_loop """owa""" +427 31 negative_sampler """basic""" +427 31 evaluator """rankbased""" +427 32 dataset """kinships""" +427 32 model """ntn""" +427 32 loss """bceaftersigmoid""" +427 32 regularizer """no""" +427 32 optimizer """adadelta""" +427 32 training_loop """owa""" +427 32 negative_sampler """basic""" +427 32 evaluator """rankbased""" +427 33 dataset """kinships""" +427 33 model """ntn""" +427 33 loss """bceaftersigmoid""" +427 33 regularizer """no""" +427 33 optimizer """adadelta""" +427 33 training_loop """owa""" +427 33 negative_sampler """basic""" +427 33 evaluator """rankbased""" +427 34 dataset """kinships""" +427 34 model """ntn""" +427 34 loss """bceaftersigmoid""" +427 34 regularizer """no""" +427 34 optimizer """adadelta""" +427 34 training_loop """owa""" +427 34 negative_sampler """basic""" +427 34 evaluator """rankbased""" +427 35 dataset """kinships""" +427 35 model """ntn""" +427 35 loss """bceaftersigmoid""" +427 35 regularizer """no""" +427 35 optimizer """adadelta""" +427 35 training_loop """owa""" +427 35 negative_sampler """basic""" +427 35 evaluator """rankbased""" +427 36 dataset """kinships""" +427 36 model """ntn""" +427 36 loss """bceaftersigmoid""" +427 36 regularizer """no""" +427 36 optimizer """adadelta""" +427 36 training_loop """owa""" +427 36 negative_sampler """basic""" +427 36 evaluator """rankbased""" +427 37 dataset """kinships""" +427 37 model """ntn""" +427 37 loss """bceaftersigmoid""" +427 37 regularizer """no""" +427 37 optimizer """adadelta""" +427 37 training_loop """owa""" +427 37 negative_sampler """basic""" +427 37 evaluator """rankbased""" +427 38 dataset """kinships""" +427 38 model """ntn""" +427 38 loss """bceaftersigmoid""" +427 38 regularizer """no""" +427 38 optimizer """adadelta""" +427 38 training_loop """owa""" +427 38 negative_sampler """basic""" +427 38 evaluator """rankbased""" +427 39 dataset """kinships""" +427 39 model """ntn""" +427 39 loss """bceaftersigmoid""" +427 39 regularizer """no""" +427 39 optimizer """adadelta""" +427 39 training_loop """owa""" +427 39 negative_sampler """basic""" +427 39 evaluator """rankbased""" +427 40 dataset """kinships""" +427 40 model """ntn""" +427 40 loss """bceaftersigmoid""" +427 40 regularizer """no""" +427 40 optimizer """adadelta""" +427 40 training_loop """owa""" +427 40 negative_sampler """basic""" +427 40 evaluator """rankbased""" +427 41 dataset """kinships""" +427 41 model """ntn""" +427 41 loss """bceaftersigmoid""" +427 41 regularizer """no""" +427 41 optimizer """adadelta""" +427 41 training_loop """owa""" +427 41 negative_sampler """basic""" +427 41 evaluator """rankbased""" +427 42 dataset """kinships""" +427 42 model """ntn""" +427 42 loss """bceaftersigmoid""" +427 42 regularizer """no""" +427 42 optimizer """adadelta""" +427 42 training_loop """owa""" +427 42 negative_sampler """basic""" +427 42 evaluator """rankbased""" +427 43 dataset """kinships""" +427 43 model """ntn""" +427 43 loss """bceaftersigmoid""" +427 43 regularizer """no""" +427 43 optimizer """adadelta""" +427 43 training_loop """owa""" +427 43 negative_sampler """basic""" +427 43 evaluator """rankbased""" +427 44 dataset """kinships""" +427 44 model """ntn""" +427 44 loss """bceaftersigmoid""" +427 44 regularizer """no""" +427 44 optimizer """adadelta""" +427 44 training_loop """owa""" +427 44 negative_sampler """basic""" +427 44 evaluator """rankbased""" +427 45 dataset """kinships""" +427 45 model """ntn""" +427 45 loss """bceaftersigmoid""" +427 45 regularizer """no""" +427 45 optimizer """adadelta""" +427 45 training_loop """owa""" +427 45 negative_sampler """basic""" +427 45 evaluator """rankbased""" +427 46 dataset """kinships""" +427 46 model """ntn""" +427 46 loss """bceaftersigmoid""" +427 46 regularizer """no""" +427 46 optimizer """adadelta""" +427 46 training_loop """owa""" +427 46 negative_sampler """basic""" +427 46 evaluator """rankbased""" +427 47 dataset """kinships""" +427 47 model """ntn""" +427 47 loss """bceaftersigmoid""" +427 47 regularizer """no""" +427 47 optimizer """adadelta""" +427 47 training_loop """owa""" +427 47 negative_sampler """basic""" +427 47 evaluator """rankbased""" +427 48 dataset """kinships""" +427 48 model """ntn""" +427 48 loss """bceaftersigmoid""" +427 48 regularizer """no""" +427 48 optimizer """adadelta""" +427 48 training_loop """owa""" +427 48 negative_sampler """basic""" +427 48 evaluator """rankbased""" +427 49 dataset """kinships""" +427 49 model """ntn""" +427 49 loss """bceaftersigmoid""" +427 49 regularizer """no""" +427 49 optimizer """adadelta""" +427 49 training_loop """owa""" +427 49 negative_sampler """basic""" +427 49 evaluator """rankbased""" +427 50 dataset """kinships""" +427 50 model """ntn""" +427 50 loss """bceaftersigmoid""" +427 50 regularizer """no""" +427 50 optimizer """adadelta""" +427 50 training_loop """owa""" +427 50 negative_sampler """basic""" +427 50 evaluator """rankbased""" +427 51 dataset """kinships""" +427 51 model """ntn""" +427 51 loss """bceaftersigmoid""" +427 51 regularizer """no""" +427 51 optimizer """adadelta""" +427 51 training_loop """owa""" +427 51 negative_sampler """basic""" +427 51 evaluator """rankbased""" +427 52 dataset """kinships""" +427 52 model """ntn""" +427 52 loss """bceaftersigmoid""" +427 52 regularizer """no""" +427 52 optimizer """adadelta""" +427 52 training_loop """owa""" +427 52 negative_sampler """basic""" +427 52 evaluator """rankbased""" +427 53 dataset """kinships""" +427 53 model """ntn""" +427 53 loss """bceaftersigmoid""" +427 53 regularizer """no""" +427 53 optimizer """adadelta""" +427 53 training_loop """owa""" +427 53 negative_sampler """basic""" +427 53 evaluator """rankbased""" +427 54 dataset """kinships""" +427 54 model """ntn""" +427 54 loss """bceaftersigmoid""" +427 54 regularizer """no""" +427 54 optimizer """adadelta""" +427 54 training_loop """owa""" +427 54 negative_sampler """basic""" +427 54 evaluator """rankbased""" +427 55 dataset """kinships""" +427 55 model """ntn""" +427 55 loss """bceaftersigmoid""" +427 55 regularizer """no""" +427 55 optimizer """adadelta""" +427 55 training_loop """owa""" +427 55 negative_sampler """basic""" +427 55 evaluator """rankbased""" +427 56 dataset """kinships""" +427 56 model """ntn""" +427 56 loss """bceaftersigmoid""" +427 56 regularizer """no""" +427 56 optimizer """adadelta""" +427 56 training_loop """owa""" +427 56 negative_sampler """basic""" +427 56 evaluator """rankbased""" +427 57 dataset """kinships""" +427 57 model """ntn""" +427 57 loss """bceaftersigmoid""" +427 57 regularizer """no""" +427 57 optimizer """adadelta""" +427 57 training_loop """owa""" +427 57 negative_sampler """basic""" +427 57 evaluator """rankbased""" +427 58 dataset """kinships""" +427 58 model """ntn""" +427 58 loss """bceaftersigmoid""" +427 58 regularizer """no""" +427 58 optimizer """adadelta""" +427 58 training_loop """owa""" +427 58 negative_sampler """basic""" +427 58 evaluator """rankbased""" +427 59 dataset """kinships""" +427 59 model """ntn""" +427 59 loss """bceaftersigmoid""" +427 59 regularizer """no""" +427 59 optimizer """adadelta""" +427 59 training_loop """owa""" +427 59 negative_sampler """basic""" +427 59 evaluator """rankbased""" +427 60 dataset """kinships""" +427 60 model """ntn""" +427 60 loss """bceaftersigmoid""" +427 60 regularizer """no""" +427 60 optimizer """adadelta""" +427 60 training_loop """owa""" +427 60 negative_sampler """basic""" +427 60 evaluator """rankbased""" +427 61 dataset """kinships""" +427 61 model """ntn""" +427 61 loss """bceaftersigmoid""" +427 61 regularizer """no""" +427 61 optimizer """adadelta""" +427 61 training_loop """owa""" +427 61 negative_sampler """basic""" +427 61 evaluator """rankbased""" +427 62 dataset """kinships""" +427 62 model """ntn""" +427 62 loss """bceaftersigmoid""" +427 62 regularizer """no""" +427 62 optimizer """adadelta""" +427 62 training_loop """owa""" +427 62 negative_sampler """basic""" +427 62 evaluator """rankbased""" +427 63 dataset """kinships""" +427 63 model """ntn""" +427 63 loss """bceaftersigmoid""" +427 63 regularizer """no""" +427 63 optimizer """adadelta""" +427 63 training_loop """owa""" +427 63 negative_sampler """basic""" +427 63 evaluator """rankbased""" +427 64 dataset """kinships""" +427 64 model """ntn""" +427 64 loss """bceaftersigmoid""" +427 64 regularizer """no""" +427 64 optimizer """adadelta""" +427 64 training_loop """owa""" +427 64 negative_sampler """basic""" +427 64 evaluator """rankbased""" +427 65 dataset """kinships""" +427 65 model """ntn""" +427 65 loss """bceaftersigmoid""" +427 65 regularizer """no""" +427 65 optimizer """adadelta""" +427 65 training_loop """owa""" +427 65 negative_sampler """basic""" +427 65 evaluator """rankbased""" +427 66 dataset """kinships""" +427 66 model """ntn""" +427 66 loss """bceaftersigmoid""" +427 66 regularizer """no""" +427 66 optimizer """adadelta""" +427 66 training_loop """owa""" +427 66 negative_sampler """basic""" +427 66 evaluator """rankbased""" +427 67 dataset """kinships""" +427 67 model """ntn""" +427 67 loss """bceaftersigmoid""" +427 67 regularizer """no""" +427 67 optimizer """adadelta""" +427 67 training_loop """owa""" +427 67 negative_sampler """basic""" +427 67 evaluator """rankbased""" +427 68 dataset """kinships""" +427 68 model """ntn""" +427 68 loss """bceaftersigmoid""" +427 68 regularizer """no""" +427 68 optimizer """adadelta""" +427 68 training_loop """owa""" +427 68 negative_sampler """basic""" +427 68 evaluator """rankbased""" +427 69 dataset """kinships""" +427 69 model """ntn""" +427 69 loss """bceaftersigmoid""" +427 69 regularizer """no""" +427 69 optimizer """adadelta""" +427 69 training_loop """owa""" +427 69 negative_sampler """basic""" +427 69 evaluator """rankbased""" +427 70 dataset """kinships""" +427 70 model """ntn""" +427 70 loss """bceaftersigmoid""" +427 70 regularizer """no""" +427 70 optimizer """adadelta""" +427 70 training_loop """owa""" +427 70 negative_sampler """basic""" +427 70 evaluator """rankbased""" +427 71 dataset """kinships""" +427 71 model """ntn""" +427 71 loss """bceaftersigmoid""" +427 71 regularizer """no""" +427 71 optimizer """adadelta""" +427 71 training_loop """owa""" +427 71 negative_sampler """basic""" +427 71 evaluator """rankbased""" +427 72 dataset """kinships""" +427 72 model """ntn""" +427 72 loss """bceaftersigmoid""" +427 72 regularizer """no""" +427 72 optimizer """adadelta""" +427 72 training_loop """owa""" +427 72 negative_sampler """basic""" +427 72 evaluator """rankbased""" +427 73 dataset """kinships""" +427 73 model """ntn""" +427 73 loss """bceaftersigmoid""" +427 73 regularizer """no""" +427 73 optimizer """adadelta""" +427 73 training_loop """owa""" +427 73 negative_sampler """basic""" +427 73 evaluator """rankbased""" +427 74 dataset """kinships""" +427 74 model """ntn""" +427 74 loss """bceaftersigmoid""" +427 74 regularizer """no""" +427 74 optimizer """adadelta""" +427 74 training_loop """owa""" +427 74 negative_sampler """basic""" +427 74 evaluator """rankbased""" +427 75 dataset """kinships""" +427 75 model """ntn""" +427 75 loss """bceaftersigmoid""" +427 75 regularizer """no""" +427 75 optimizer """adadelta""" +427 75 training_loop """owa""" +427 75 negative_sampler """basic""" +427 75 evaluator """rankbased""" +427 76 dataset """kinships""" +427 76 model """ntn""" +427 76 loss """bceaftersigmoid""" +427 76 regularizer """no""" +427 76 optimizer """adadelta""" +427 76 training_loop """owa""" +427 76 negative_sampler """basic""" +427 76 evaluator """rankbased""" +427 77 dataset """kinships""" +427 77 model """ntn""" +427 77 loss """bceaftersigmoid""" +427 77 regularizer """no""" +427 77 optimizer """adadelta""" +427 77 training_loop """owa""" +427 77 negative_sampler """basic""" +427 77 evaluator """rankbased""" +427 78 dataset """kinships""" +427 78 model """ntn""" +427 78 loss """bceaftersigmoid""" +427 78 regularizer """no""" +427 78 optimizer """adadelta""" +427 78 training_loop """owa""" +427 78 negative_sampler """basic""" +427 78 evaluator """rankbased""" +427 79 dataset """kinships""" +427 79 model """ntn""" +427 79 loss """bceaftersigmoid""" +427 79 regularizer """no""" +427 79 optimizer """adadelta""" +427 79 training_loop """owa""" +427 79 negative_sampler """basic""" +427 79 evaluator """rankbased""" +427 80 dataset """kinships""" +427 80 model """ntn""" +427 80 loss """bceaftersigmoid""" +427 80 regularizer """no""" +427 80 optimizer """adadelta""" +427 80 training_loop """owa""" +427 80 negative_sampler """basic""" +427 80 evaluator """rankbased""" +427 81 dataset """kinships""" +427 81 model """ntn""" +427 81 loss """bceaftersigmoid""" +427 81 regularizer """no""" +427 81 optimizer """adadelta""" +427 81 training_loop """owa""" +427 81 negative_sampler """basic""" +427 81 evaluator """rankbased""" +427 82 dataset """kinships""" +427 82 model """ntn""" +427 82 loss """bceaftersigmoid""" +427 82 regularizer """no""" +427 82 optimizer """adadelta""" +427 82 training_loop """owa""" +427 82 negative_sampler """basic""" +427 82 evaluator """rankbased""" +427 83 dataset """kinships""" +427 83 model """ntn""" +427 83 loss """bceaftersigmoid""" +427 83 regularizer """no""" +427 83 optimizer """adadelta""" +427 83 training_loop """owa""" +427 83 negative_sampler """basic""" +427 83 evaluator """rankbased""" +427 84 dataset """kinships""" +427 84 model """ntn""" +427 84 loss """bceaftersigmoid""" +427 84 regularizer """no""" +427 84 optimizer """adadelta""" +427 84 training_loop """owa""" +427 84 negative_sampler """basic""" +427 84 evaluator """rankbased""" +427 85 dataset """kinships""" +427 85 model """ntn""" +427 85 loss """bceaftersigmoid""" +427 85 regularizer """no""" +427 85 optimizer """adadelta""" +427 85 training_loop """owa""" +427 85 negative_sampler """basic""" +427 85 evaluator """rankbased""" +427 86 dataset """kinships""" +427 86 model """ntn""" +427 86 loss """bceaftersigmoid""" +427 86 regularizer """no""" +427 86 optimizer """adadelta""" +427 86 training_loop """owa""" +427 86 negative_sampler """basic""" +427 86 evaluator """rankbased""" +427 87 dataset """kinships""" +427 87 model """ntn""" +427 87 loss """bceaftersigmoid""" +427 87 regularizer """no""" +427 87 optimizer """adadelta""" +427 87 training_loop """owa""" +427 87 negative_sampler """basic""" +427 87 evaluator """rankbased""" +427 88 dataset """kinships""" +427 88 model """ntn""" +427 88 loss """bceaftersigmoid""" +427 88 regularizer """no""" +427 88 optimizer """adadelta""" +427 88 training_loop """owa""" +427 88 negative_sampler """basic""" +427 88 evaluator """rankbased""" +427 89 dataset """kinships""" +427 89 model """ntn""" +427 89 loss """bceaftersigmoid""" +427 89 regularizer """no""" +427 89 optimizer """adadelta""" +427 89 training_loop """owa""" +427 89 negative_sampler """basic""" +427 89 evaluator """rankbased""" +427 90 dataset """kinships""" +427 90 model """ntn""" +427 90 loss """bceaftersigmoid""" +427 90 regularizer """no""" +427 90 optimizer """adadelta""" +427 90 training_loop """owa""" +427 90 negative_sampler """basic""" +427 90 evaluator """rankbased""" +427 91 dataset """kinships""" +427 91 model """ntn""" +427 91 loss """bceaftersigmoid""" +427 91 regularizer """no""" +427 91 optimizer """adadelta""" +427 91 training_loop """owa""" +427 91 negative_sampler """basic""" +427 91 evaluator """rankbased""" +427 92 dataset """kinships""" +427 92 model """ntn""" +427 92 loss """bceaftersigmoid""" +427 92 regularizer """no""" +427 92 optimizer """adadelta""" +427 92 training_loop """owa""" +427 92 negative_sampler """basic""" +427 92 evaluator """rankbased""" +427 93 dataset """kinships""" +427 93 model """ntn""" +427 93 loss """bceaftersigmoid""" +427 93 regularizer """no""" +427 93 optimizer """adadelta""" +427 93 training_loop """owa""" +427 93 negative_sampler """basic""" +427 93 evaluator """rankbased""" +427 94 dataset """kinships""" +427 94 model """ntn""" +427 94 loss """bceaftersigmoid""" +427 94 regularizer """no""" +427 94 optimizer """adadelta""" +427 94 training_loop """owa""" +427 94 negative_sampler """basic""" +427 94 evaluator """rankbased""" +427 95 dataset """kinships""" +427 95 model """ntn""" +427 95 loss """bceaftersigmoid""" +427 95 regularizer """no""" +427 95 optimizer """adadelta""" +427 95 training_loop """owa""" +427 95 negative_sampler """basic""" +427 95 evaluator """rankbased""" +427 96 dataset """kinships""" +427 96 model """ntn""" +427 96 loss """bceaftersigmoid""" +427 96 regularizer """no""" +427 96 optimizer """adadelta""" +427 96 training_loop """owa""" +427 96 negative_sampler """basic""" +427 96 evaluator """rankbased""" +427 97 dataset """kinships""" +427 97 model """ntn""" +427 97 loss """bceaftersigmoid""" +427 97 regularizer """no""" +427 97 optimizer """adadelta""" +427 97 training_loop """owa""" +427 97 negative_sampler """basic""" +427 97 evaluator """rankbased""" +427 98 dataset """kinships""" +427 98 model """ntn""" +427 98 loss """bceaftersigmoid""" +427 98 regularizer """no""" +427 98 optimizer """adadelta""" +427 98 training_loop """owa""" +427 98 negative_sampler """basic""" +427 98 evaluator """rankbased""" +427 99 dataset """kinships""" +427 99 model """ntn""" +427 99 loss """bceaftersigmoid""" +427 99 regularizer """no""" +427 99 optimizer """adadelta""" +427 99 training_loop """owa""" +427 99 negative_sampler """basic""" +427 99 evaluator """rankbased""" +427 100 dataset """kinships""" +427 100 model """ntn""" +427 100 loss """bceaftersigmoid""" +427 100 regularizer """no""" +427 100 optimizer """adadelta""" +427 100 training_loop """owa""" +427 100 negative_sampler """basic""" +427 100 evaluator """rankbased""" +428 1 model.embedding_dim 0.0 +428 1 negative_sampler.num_negs_per_pos 76.0 +428 1 training.batch_size 0.0 +428 2 model.embedding_dim 1.0 +428 2 negative_sampler.num_negs_per_pos 62.0 +428 2 training.batch_size 0.0 +428 3 model.embedding_dim 1.0 +428 3 negative_sampler.num_negs_per_pos 31.0 +428 3 training.batch_size 2.0 +428 4 model.embedding_dim 0.0 +428 4 negative_sampler.num_negs_per_pos 98.0 +428 4 training.batch_size 2.0 +428 5 model.embedding_dim 1.0 +428 5 negative_sampler.num_negs_per_pos 22.0 +428 5 training.batch_size 2.0 +428 6 model.embedding_dim 2.0 +428 6 negative_sampler.num_negs_per_pos 61.0 +428 6 training.batch_size 1.0 +428 7 model.embedding_dim 0.0 +428 7 negative_sampler.num_negs_per_pos 39.0 +428 7 training.batch_size 2.0 +428 8 model.embedding_dim 0.0 +428 8 negative_sampler.num_negs_per_pos 98.0 +428 8 training.batch_size 1.0 +428 9 model.embedding_dim 1.0 +428 9 negative_sampler.num_negs_per_pos 82.0 +428 9 training.batch_size 2.0 +428 10 model.embedding_dim 2.0 +428 10 negative_sampler.num_negs_per_pos 55.0 +428 10 training.batch_size 0.0 +428 11 model.embedding_dim 2.0 +428 11 negative_sampler.num_negs_per_pos 54.0 +428 11 training.batch_size 1.0 +428 12 model.embedding_dim 1.0 +428 12 negative_sampler.num_negs_per_pos 10.0 +428 12 training.batch_size 2.0 +428 13 model.embedding_dim 1.0 +428 13 negative_sampler.num_negs_per_pos 40.0 +428 13 training.batch_size 0.0 +428 14 model.embedding_dim 0.0 +428 14 negative_sampler.num_negs_per_pos 25.0 +428 14 training.batch_size 1.0 +428 15 model.embedding_dim 2.0 +428 15 negative_sampler.num_negs_per_pos 22.0 +428 15 training.batch_size 0.0 +428 16 model.embedding_dim 2.0 +428 16 negative_sampler.num_negs_per_pos 88.0 +428 16 training.batch_size 1.0 +428 17 model.embedding_dim 0.0 +428 17 negative_sampler.num_negs_per_pos 82.0 +428 17 training.batch_size 1.0 +428 18 model.embedding_dim 1.0 +428 18 negative_sampler.num_negs_per_pos 62.0 +428 18 training.batch_size 1.0 +428 19 model.embedding_dim 0.0 +428 19 negative_sampler.num_negs_per_pos 51.0 +428 19 training.batch_size 2.0 +428 20 model.embedding_dim 2.0 +428 20 negative_sampler.num_negs_per_pos 23.0 +428 20 training.batch_size 2.0 +428 21 model.embedding_dim 0.0 +428 21 negative_sampler.num_negs_per_pos 59.0 +428 21 training.batch_size 2.0 +428 22 model.embedding_dim 0.0 +428 22 negative_sampler.num_negs_per_pos 68.0 +428 22 training.batch_size 2.0 +428 23 model.embedding_dim 1.0 +428 23 negative_sampler.num_negs_per_pos 21.0 +428 23 training.batch_size 0.0 +428 24 model.embedding_dim 1.0 +428 24 negative_sampler.num_negs_per_pos 77.0 +428 24 training.batch_size 1.0 +428 25 model.embedding_dim 1.0 +428 25 negative_sampler.num_negs_per_pos 75.0 +428 25 training.batch_size 2.0 +428 26 model.embedding_dim 0.0 +428 26 negative_sampler.num_negs_per_pos 89.0 +428 26 training.batch_size 0.0 +428 27 model.embedding_dim 0.0 +428 27 negative_sampler.num_negs_per_pos 82.0 +428 27 training.batch_size 1.0 +428 28 model.embedding_dim 1.0 +428 28 negative_sampler.num_negs_per_pos 45.0 +428 28 training.batch_size 0.0 +428 29 model.embedding_dim 1.0 +428 29 negative_sampler.num_negs_per_pos 14.0 +428 29 training.batch_size 2.0 +428 30 model.embedding_dim 2.0 +428 30 negative_sampler.num_negs_per_pos 27.0 +428 30 training.batch_size 1.0 +428 31 model.embedding_dim 2.0 +428 31 negative_sampler.num_negs_per_pos 72.0 +428 31 training.batch_size 0.0 +428 32 model.embedding_dim 0.0 +428 32 negative_sampler.num_negs_per_pos 64.0 +428 32 training.batch_size 1.0 +428 33 model.embedding_dim 0.0 +428 33 negative_sampler.num_negs_per_pos 24.0 +428 33 training.batch_size 0.0 +428 34 model.embedding_dim 1.0 +428 34 negative_sampler.num_negs_per_pos 83.0 +428 34 training.batch_size 0.0 +428 35 model.embedding_dim 0.0 +428 35 negative_sampler.num_negs_per_pos 36.0 +428 35 training.batch_size 2.0 +428 36 model.embedding_dim 1.0 +428 36 negative_sampler.num_negs_per_pos 7.0 +428 36 training.batch_size 1.0 +428 37 model.embedding_dim 0.0 +428 37 negative_sampler.num_negs_per_pos 16.0 +428 37 training.batch_size 1.0 +428 38 model.embedding_dim 0.0 +428 38 negative_sampler.num_negs_per_pos 98.0 +428 38 training.batch_size 0.0 +428 39 model.embedding_dim 0.0 +428 39 negative_sampler.num_negs_per_pos 52.0 +428 39 training.batch_size 2.0 +428 40 model.embedding_dim 0.0 +428 40 negative_sampler.num_negs_per_pos 41.0 +428 40 training.batch_size 0.0 +428 41 model.embedding_dim 1.0 +428 41 negative_sampler.num_negs_per_pos 30.0 +428 41 training.batch_size 1.0 +428 42 model.embedding_dim 2.0 +428 42 negative_sampler.num_negs_per_pos 17.0 +428 42 training.batch_size 2.0 +428 43 model.embedding_dim 1.0 +428 43 negative_sampler.num_negs_per_pos 66.0 +428 43 training.batch_size 0.0 +428 44 model.embedding_dim 1.0 +428 44 negative_sampler.num_negs_per_pos 97.0 +428 44 training.batch_size 2.0 +428 45 model.embedding_dim 0.0 +428 45 negative_sampler.num_negs_per_pos 38.0 +428 45 training.batch_size 2.0 +428 46 model.embedding_dim 2.0 +428 46 negative_sampler.num_negs_per_pos 59.0 +428 46 training.batch_size 1.0 +428 47 model.embedding_dim 0.0 +428 47 negative_sampler.num_negs_per_pos 8.0 +428 47 training.batch_size 1.0 +428 48 model.embedding_dim 1.0 +428 48 negative_sampler.num_negs_per_pos 75.0 +428 48 training.batch_size 0.0 +428 49 model.embedding_dim 1.0 +428 49 negative_sampler.num_negs_per_pos 64.0 +428 49 training.batch_size 0.0 +428 50 model.embedding_dim 1.0 +428 50 negative_sampler.num_negs_per_pos 90.0 +428 50 training.batch_size 0.0 +428 51 model.embedding_dim 1.0 +428 51 negative_sampler.num_negs_per_pos 37.0 +428 51 training.batch_size 2.0 +428 52 model.embedding_dim 1.0 +428 52 negative_sampler.num_negs_per_pos 77.0 +428 52 training.batch_size 2.0 +428 53 model.embedding_dim 1.0 +428 53 negative_sampler.num_negs_per_pos 66.0 +428 53 training.batch_size 1.0 +428 54 model.embedding_dim 2.0 +428 54 negative_sampler.num_negs_per_pos 69.0 +428 54 training.batch_size 1.0 +428 55 model.embedding_dim 1.0 +428 55 negative_sampler.num_negs_per_pos 38.0 +428 55 training.batch_size 1.0 +428 56 model.embedding_dim 1.0 +428 56 negative_sampler.num_negs_per_pos 28.0 +428 56 training.batch_size 2.0 +428 57 model.embedding_dim 0.0 +428 57 negative_sampler.num_negs_per_pos 38.0 +428 57 training.batch_size 1.0 +428 58 model.embedding_dim 0.0 +428 58 negative_sampler.num_negs_per_pos 71.0 +428 58 training.batch_size 2.0 +428 59 model.embedding_dim 0.0 +428 59 negative_sampler.num_negs_per_pos 41.0 +428 59 training.batch_size 2.0 +428 60 model.embedding_dim 0.0 +428 60 negative_sampler.num_negs_per_pos 68.0 +428 60 training.batch_size 1.0 +428 61 model.embedding_dim 1.0 +428 61 negative_sampler.num_negs_per_pos 60.0 +428 61 training.batch_size 2.0 +428 62 model.embedding_dim 1.0 +428 62 negative_sampler.num_negs_per_pos 99.0 +428 62 training.batch_size 2.0 +428 63 model.embedding_dim 1.0 +428 63 negative_sampler.num_negs_per_pos 80.0 +428 63 training.batch_size 2.0 +428 64 model.embedding_dim 0.0 +428 64 negative_sampler.num_negs_per_pos 97.0 +428 64 training.batch_size 0.0 +428 65 model.embedding_dim 0.0 +428 65 negative_sampler.num_negs_per_pos 57.0 +428 65 training.batch_size 0.0 +428 66 model.embedding_dim 0.0 +428 66 negative_sampler.num_negs_per_pos 90.0 +428 66 training.batch_size 2.0 +428 67 model.embedding_dim 0.0 +428 67 negative_sampler.num_negs_per_pos 90.0 +428 67 training.batch_size 1.0 +428 68 model.embedding_dim 2.0 +428 68 negative_sampler.num_negs_per_pos 6.0 +428 68 training.batch_size 2.0 +428 69 model.embedding_dim 0.0 +428 69 negative_sampler.num_negs_per_pos 97.0 +428 69 training.batch_size 0.0 +428 70 model.embedding_dim 2.0 +428 70 negative_sampler.num_negs_per_pos 66.0 +428 70 training.batch_size 2.0 +428 71 model.embedding_dim 0.0 +428 71 negative_sampler.num_negs_per_pos 44.0 +428 71 training.batch_size 1.0 +428 72 model.embedding_dim 1.0 +428 72 negative_sampler.num_negs_per_pos 36.0 +428 72 training.batch_size 2.0 +428 73 model.embedding_dim 0.0 +428 73 negative_sampler.num_negs_per_pos 49.0 +428 73 training.batch_size 2.0 +428 74 model.embedding_dim 0.0 +428 74 negative_sampler.num_negs_per_pos 53.0 +428 74 training.batch_size 0.0 +428 75 model.embedding_dim 2.0 +428 75 negative_sampler.num_negs_per_pos 22.0 +428 75 training.batch_size 1.0 +428 76 model.embedding_dim 0.0 +428 76 negative_sampler.num_negs_per_pos 93.0 +428 76 training.batch_size 0.0 +428 77 model.embedding_dim 1.0 +428 77 negative_sampler.num_negs_per_pos 64.0 +428 77 training.batch_size 0.0 +428 78 model.embedding_dim 1.0 +428 78 negative_sampler.num_negs_per_pos 39.0 +428 78 training.batch_size 2.0 +428 79 model.embedding_dim 0.0 +428 79 negative_sampler.num_negs_per_pos 7.0 +428 79 training.batch_size 1.0 +428 80 model.embedding_dim 2.0 +428 80 negative_sampler.num_negs_per_pos 37.0 +428 80 training.batch_size 0.0 +428 81 model.embedding_dim 1.0 +428 81 negative_sampler.num_negs_per_pos 7.0 +428 81 training.batch_size 0.0 +428 82 model.embedding_dim 0.0 +428 82 negative_sampler.num_negs_per_pos 53.0 +428 82 training.batch_size 1.0 +428 83 model.embedding_dim 2.0 +428 83 negative_sampler.num_negs_per_pos 33.0 +428 83 training.batch_size 0.0 +428 84 model.embedding_dim 0.0 +428 84 negative_sampler.num_negs_per_pos 42.0 +428 84 training.batch_size 2.0 +428 85 model.embedding_dim 0.0 +428 85 negative_sampler.num_negs_per_pos 31.0 +428 85 training.batch_size 0.0 +428 86 model.embedding_dim 0.0 +428 86 negative_sampler.num_negs_per_pos 5.0 +428 86 training.batch_size 2.0 +428 87 model.embedding_dim 1.0 +428 87 negative_sampler.num_negs_per_pos 12.0 +428 87 training.batch_size 0.0 +428 88 model.embedding_dim 2.0 +428 88 negative_sampler.num_negs_per_pos 50.0 +428 88 training.batch_size 2.0 +428 89 model.embedding_dim 1.0 +428 89 negative_sampler.num_negs_per_pos 76.0 +428 89 training.batch_size 2.0 +428 90 model.embedding_dim 0.0 +428 90 negative_sampler.num_negs_per_pos 92.0 +428 90 training.batch_size 0.0 +428 91 model.embedding_dim 0.0 +428 91 negative_sampler.num_negs_per_pos 38.0 +428 91 training.batch_size 2.0 +428 92 model.embedding_dim 0.0 +428 92 negative_sampler.num_negs_per_pos 10.0 +428 92 training.batch_size 2.0 +428 93 model.embedding_dim 2.0 +428 93 negative_sampler.num_negs_per_pos 19.0 +428 93 training.batch_size 0.0 +428 94 model.embedding_dim 1.0 +428 94 negative_sampler.num_negs_per_pos 90.0 +428 94 training.batch_size 1.0 +428 95 model.embedding_dim 0.0 +428 95 negative_sampler.num_negs_per_pos 13.0 +428 95 training.batch_size 1.0 +428 96 model.embedding_dim 1.0 +428 96 negative_sampler.num_negs_per_pos 15.0 +428 96 training.batch_size 2.0 +428 97 model.embedding_dim 0.0 +428 97 negative_sampler.num_negs_per_pos 86.0 +428 97 training.batch_size 1.0 +428 98 model.embedding_dim 0.0 +428 98 negative_sampler.num_negs_per_pos 20.0 +428 98 training.batch_size 2.0 +428 99 model.embedding_dim 1.0 +428 99 negative_sampler.num_negs_per_pos 13.0 +428 99 training.batch_size 1.0 +428 100 model.embedding_dim 0.0 +428 100 negative_sampler.num_negs_per_pos 56.0 +428 100 training.batch_size 0.0 +428 1 dataset """kinships""" +428 1 model """ntn""" +428 1 loss """softplus""" +428 1 regularizer """no""" +428 1 optimizer """adadelta""" +428 1 training_loop """owa""" +428 1 negative_sampler """basic""" +428 1 evaluator """rankbased""" +428 2 dataset """kinships""" +428 2 model """ntn""" +428 2 loss """softplus""" +428 2 regularizer """no""" +428 2 optimizer """adadelta""" +428 2 training_loop """owa""" +428 2 negative_sampler """basic""" +428 2 evaluator """rankbased""" +428 3 dataset """kinships""" +428 3 model """ntn""" +428 3 loss """softplus""" +428 3 regularizer """no""" +428 3 optimizer """adadelta""" +428 3 training_loop """owa""" +428 3 negative_sampler """basic""" +428 3 evaluator """rankbased""" +428 4 dataset """kinships""" +428 4 model """ntn""" +428 4 loss """softplus""" +428 4 regularizer """no""" +428 4 optimizer """adadelta""" +428 4 training_loop """owa""" +428 4 negative_sampler """basic""" +428 4 evaluator """rankbased""" +428 5 dataset """kinships""" +428 5 model """ntn""" +428 5 loss """softplus""" +428 5 regularizer """no""" +428 5 optimizer """adadelta""" +428 5 training_loop """owa""" +428 5 negative_sampler """basic""" +428 5 evaluator """rankbased""" +428 6 dataset """kinships""" +428 6 model """ntn""" +428 6 loss """softplus""" +428 6 regularizer """no""" +428 6 optimizer """adadelta""" +428 6 training_loop """owa""" +428 6 negative_sampler """basic""" +428 6 evaluator """rankbased""" +428 7 dataset """kinships""" +428 7 model """ntn""" +428 7 loss """softplus""" +428 7 regularizer """no""" +428 7 optimizer """adadelta""" +428 7 training_loop """owa""" +428 7 negative_sampler """basic""" +428 7 evaluator """rankbased""" +428 8 dataset """kinships""" +428 8 model """ntn""" +428 8 loss """softplus""" +428 8 regularizer """no""" +428 8 optimizer """adadelta""" +428 8 training_loop """owa""" +428 8 negative_sampler """basic""" +428 8 evaluator """rankbased""" +428 9 dataset """kinships""" +428 9 model """ntn""" +428 9 loss """softplus""" +428 9 regularizer """no""" +428 9 optimizer """adadelta""" +428 9 training_loop """owa""" +428 9 negative_sampler """basic""" +428 9 evaluator """rankbased""" +428 10 dataset """kinships""" +428 10 model """ntn""" +428 10 loss """softplus""" +428 10 regularizer """no""" +428 10 optimizer """adadelta""" +428 10 training_loop """owa""" +428 10 negative_sampler """basic""" +428 10 evaluator """rankbased""" +428 11 dataset """kinships""" +428 11 model """ntn""" +428 11 loss """softplus""" +428 11 regularizer """no""" +428 11 optimizer """adadelta""" +428 11 training_loop """owa""" +428 11 negative_sampler """basic""" +428 11 evaluator """rankbased""" +428 12 dataset """kinships""" +428 12 model """ntn""" +428 12 loss """softplus""" +428 12 regularizer """no""" +428 12 optimizer """adadelta""" +428 12 training_loop """owa""" +428 12 negative_sampler """basic""" +428 12 evaluator """rankbased""" +428 13 dataset """kinships""" +428 13 model """ntn""" +428 13 loss """softplus""" +428 13 regularizer """no""" +428 13 optimizer """adadelta""" +428 13 training_loop """owa""" +428 13 negative_sampler """basic""" +428 13 evaluator """rankbased""" +428 14 dataset """kinships""" +428 14 model """ntn""" +428 14 loss """softplus""" +428 14 regularizer """no""" +428 14 optimizer """adadelta""" +428 14 training_loop """owa""" +428 14 negative_sampler """basic""" +428 14 evaluator """rankbased""" +428 15 dataset """kinships""" +428 15 model """ntn""" +428 15 loss """softplus""" +428 15 regularizer """no""" +428 15 optimizer """adadelta""" +428 15 training_loop """owa""" +428 15 negative_sampler """basic""" +428 15 evaluator """rankbased""" +428 16 dataset """kinships""" +428 16 model """ntn""" +428 16 loss """softplus""" +428 16 regularizer """no""" +428 16 optimizer """adadelta""" +428 16 training_loop """owa""" +428 16 negative_sampler """basic""" +428 16 evaluator """rankbased""" +428 17 dataset """kinships""" +428 17 model """ntn""" +428 17 loss """softplus""" +428 17 regularizer """no""" +428 17 optimizer """adadelta""" +428 17 training_loop """owa""" +428 17 negative_sampler """basic""" +428 17 evaluator """rankbased""" +428 18 dataset """kinships""" +428 18 model """ntn""" +428 18 loss """softplus""" +428 18 regularizer """no""" +428 18 optimizer """adadelta""" +428 18 training_loop """owa""" +428 18 negative_sampler """basic""" +428 18 evaluator """rankbased""" +428 19 dataset """kinships""" +428 19 model """ntn""" +428 19 loss """softplus""" +428 19 regularizer """no""" +428 19 optimizer """adadelta""" +428 19 training_loop """owa""" +428 19 negative_sampler """basic""" +428 19 evaluator """rankbased""" +428 20 dataset """kinships""" +428 20 model """ntn""" +428 20 loss """softplus""" +428 20 regularizer """no""" +428 20 optimizer """adadelta""" +428 20 training_loop """owa""" +428 20 negative_sampler """basic""" +428 20 evaluator """rankbased""" +428 21 dataset """kinships""" +428 21 model """ntn""" +428 21 loss """softplus""" +428 21 regularizer """no""" +428 21 optimizer """adadelta""" +428 21 training_loop """owa""" +428 21 negative_sampler """basic""" +428 21 evaluator """rankbased""" +428 22 dataset """kinships""" +428 22 model """ntn""" +428 22 loss """softplus""" +428 22 regularizer """no""" +428 22 optimizer """adadelta""" +428 22 training_loop """owa""" +428 22 negative_sampler """basic""" +428 22 evaluator """rankbased""" +428 23 dataset """kinships""" +428 23 model """ntn""" +428 23 loss """softplus""" +428 23 regularizer """no""" +428 23 optimizer """adadelta""" +428 23 training_loop """owa""" +428 23 negative_sampler """basic""" +428 23 evaluator """rankbased""" +428 24 dataset """kinships""" +428 24 model """ntn""" +428 24 loss """softplus""" +428 24 regularizer """no""" +428 24 optimizer """adadelta""" +428 24 training_loop """owa""" +428 24 negative_sampler """basic""" +428 24 evaluator """rankbased""" +428 25 dataset """kinships""" +428 25 model """ntn""" +428 25 loss """softplus""" +428 25 regularizer """no""" +428 25 optimizer """adadelta""" +428 25 training_loop """owa""" +428 25 negative_sampler """basic""" +428 25 evaluator """rankbased""" +428 26 dataset """kinships""" +428 26 model """ntn""" +428 26 loss """softplus""" +428 26 regularizer """no""" +428 26 optimizer """adadelta""" +428 26 training_loop """owa""" +428 26 negative_sampler """basic""" +428 26 evaluator """rankbased""" +428 27 dataset """kinships""" +428 27 model """ntn""" +428 27 loss """softplus""" +428 27 regularizer """no""" +428 27 optimizer """adadelta""" +428 27 training_loop """owa""" +428 27 negative_sampler """basic""" +428 27 evaluator """rankbased""" +428 28 dataset """kinships""" +428 28 model """ntn""" +428 28 loss """softplus""" +428 28 regularizer """no""" +428 28 optimizer """adadelta""" +428 28 training_loop """owa""" +428 28 negative_sampler """basic""" +428 28 evaluator """rankbased""" +428 29 dataset """kinships""" +428 29 model """ntn""" +428 29 loss """softplus""" +428 29 regularizer """no""" +428 29 optimizer """adadelta""" +428 29 training_loop """owa""" +428 29 negative_sampler """basic""" +428 29 evaluator """rankbased""" +428 30 dataset """kinships""" +428 30 model """ntn""" +428 30 loss """softplus""" +428 30 regularizer """no""" +428 30 optimizer """adadelta""" +428 30 training_loop """owa""" +428 30 negative_sampler """basic""" +428 30 evaluator """rankbased""" +428 31 dataset """kinships""" +428 31 model """ntn""" +428 31 loss """softplus""" +428 31 regularizer """no""" +428 31 optimizer """adadelta""" +428 31 training_loop """owa""" +428 31 negative_sampler """basic""" +428 31 evaluator """rankbased""" +428 32 dataset """kinships""" +428 32 model """ntn""" +428 32 loss """softplus""" +428 32 regularizer """no""" +428 32 optimizer """adadelta""" +428 32 training_loop """owa""" +428 32 negative_sampler """basic""" +428 32 evaluator """rankbased""" +428 33 dataset """kinships""" +428 33 model """ntn""" +428 33 loss """softplus""" +428 33 regularizer """no""" +428 33 optimizer """adadelta""" +428 33 training_loop """owa""" +428 33 negative_sampler """basic""" +428 33 evaluator """rankbased""" +428 34 dataset """kinships""" +428 34 model """ntn""" +428 34 loss """softplus""" +428 34 regularizer """no""" +428 34 optimizer """adadelta""" +428 34 training_loop """owa""" +428 34 negative_sampler """basic""" +428 34 evaluator """rankbased""" +428 35 dataset """kinships""" +428 35 model """ntn""" +428 35 loss """softplus""" +428 35 regularizer """no""" +428 35 optimizer """adadelta""" +428 35 training_loop """owa""" +428 35 negative_sampler """basic""" +428 35 evaluator """rankbased""" +428 36 dataset """kinships""" +428 36 model """ntn""" +428 36 loss """softplus""" +428 36 regularizer """no""" +428 36 optimizer """adadelta""" +428 36 training_loop """owa""" +428 36 negative_sampler """basic""" +428 36 evaluator """rankbased""" +428 37 dataset """kinships""" +428 37 model """ntn""" +428 37 loss """softplus""" +428 37 regularizer """no""" +428 37 optimizer """adadelta""" +428 37 training_loop """owa""" +428 37 negative_sampler """basic""" +428 37 evaluator """rankbased""" +428 38 dataset """kinships""" +428 38 model """ntn""" +428 38 loss """softplus""" +428 38 regularizer """no""" +428 38 optimizer """adadelta""" +428 38 training_loop """owa""" +428 38 negative_sampler """basic""" +428 38 evaluator """rankbased""" +428 39 dataset """kinships""" +428 39 model """ntn""" +428 39 loss """softplus""" +428 39 regularizer """no""" +428 39 optimizer """adadelta""" +428 39 training_loop """owa""" +428 39 negative_sampler """basic""" +428 39 evaluator """rankbased""" +428 40 dataset """kinships""" +428 40 model """ntn""" +428 40 loss """softplus""" +428 40 regularizer """no""" +428 40 optimizer """adadelta""" +428 40 training_loop """owa""" +428 40 negative_sampler """basic""" +428 40 evaluator """rankbased""" +428 41 dataset """kinships""" +428 41 model """ntn""" +428 41 loss """softplus""" +428 41 regularizer """no""" +428 41 optimizer """adadelta""" +428 41 training_loop """owa""" +428 41 negative_sampler """basic""" +428 41 evaluator """rankbased""" +428 42 dataset """kinships""" +428 42 model """ntn""" +428 42 loss """softplus""" +428 42 regularizer """no""" +428 42 optimizer """adadelta""" +428 42 training_loop """owa""" +428 42 negative_sampler """basic""" +428 42 evaluator """rankbased""" +428 43 dataset """kinships""" +428 43 model """ntn""" +428 43 loss """softplus""" +428 43 regularizer """no""" +428 43 optimizer """adadelta""" +428 43 training_loop """owa""" +428 43 negative_sampler """basic""" +428 43 evaluator """rankbased""" +428 44 dataset """kinships""" +428 44 model """ntn""" +428 44 loss """softplus""" +428 44 regularizer """no""" +428 44 optimizer """adadelta""" +428 44 training_loop """owa""" +428 44 negative_sampler """basic""" +428 44 evaluator """rankbased""" +428 45 dataset """kinships""" +428 45 model """ntn""" +428 45 loss """softplus""" +428 45 regularizer """no""" +428 45 optimizer """adadelta""" +428 45 training_loop """owa""" +428 45 negative_sampler """basic""" +428 45 evaluator """rankbased""" +428 46 dataset """kinships""" +428 46 model """ntn""" +428 46 loss """softplus""" +428 46 regularizer """no""" +428 46 optimizer """adadelta""" +428 46 training_loop """owa""" +428 46 negative_sampler """basic""" +428 46 evaluator """rankbased""" +428 47 dataset """kinships""" +428 47 model """ntn""" +428 47 loss """softplus""" +428 47 regularizer """no""" +428 47 optimizer """adadelta""" +428 47 training_loop """owa""" +428 47 negative_sampler """basic""" +428 47 evaluator """rankbased""" +428 48 dataset """kinships""" +428 48 model """ntn""" +428 48 loss """softplus""" +428 48 regularizer """no""" +428 48 optimizer """adadelta""" +428 48 training_loop """owa""" +428 48 negative_sampler """basic""" +428 48 evaluator """rankbased""" +428 49 dataset """kinships""" +428 49 model """ntn""" +428 49 loss """softplus""" +428 49 regularizer """no""" +428 49 optimizer """adadelta""" +428 49 training_loop """owa""" +428 49 negative_sampler """basic""" +428 49 evaluator """rankbased""" +428 50 dataset """kinships""" +428 50 model """ntn""" +428 50 loss """softplus""" +428 50 regularizer """no""" +428 50 optimizer """adadelta""" +428 50 training_loop """owa""" +428 50 negative_sampler """basic""" +428 50 evaluator """rankbased""" +428 51 dataset """kinships""" +428 51 model """ntn""" +428 51 loss """softplus""" +428 51 regularizer """no""" +428 51 optimizer """adadelta""" +428 51 training_loop """owa""" +428 51 negative_sampler """basic""" +428 51 evaluator """rankbased""" +428 52 dataset """kinships""" +428 52 model """ntn""" +428 52 loss """softplus""" +428 52 regularizer """no""" +428 52 optimizer """adadelta""" +428 52 training_loop """owa""" +428 52 negative_sampler """basic""" +428 52 evaluator """rankbased""" +428 53 dataset """kinships""" +428 53 model """ntn""" +428 53 loss """softplus""" +428 53 regularizer """no""" +428 53 optimizer """adadelta""" +428 53 training_loop """owa""" +428 53 negative_sampler """basic""" +428 53 evaluator """rankbased""" +428 54 dataset """kinships""" +428 54 model """ntn""" +428 54 loss """softplus""" +428 54 regularizer """no""" +428 54 optimizer """adadelta""" +428 54 training_loop """owa""" +428 54 negative_sampler """basic""" +428 54 evaluator """rankbased""" +428 55 dataset """kinships""" +428 55 model """ntn""" +428 55 loss """softplus""" +428 55 regularizer """no""" +428 55 optimizer """adadelta""" +428 55 training_loop """owa""" +428 55 negative_sampler """basic""" +428 55 evaluator """rankbased""" +428 56 dataset """kinships""" +428 56 model """ntn""" +428 56 loss """softplus""" +428 56 regularizer """no""" +428 56 optimizer """adadelta""" +428 56 training_loop """owa""" +428 56 negative_sampler """basic""" +428 56 evaluator """rankbased""" +428 57 dataset """kinships""" +428 57 model """ntn""" +428 57 loss """softplus""" +428 57 regularizer """no""" +428 57 optimizer """adadelta""" +428 57 training_loop """owa""" +428 57 negative_sampler """basic""" +428 57 evaluator """rankbased""" +428 58 dataset """kinships""" +428 58 model """ntn""" +428 58 loss """softplus""" +428 58 regularizer """no""" +428 58 optimizer """adadelta""" +428 58 training_loop """owa""" +428 58 negative_sampler """basic""" +428 58 evaluator """rankbased""" +428 59 dataset """kinships""" +428 59 model """ntn""" +428 59 loss """softplus""" +428 59 regularizer """no""" +428 59 optimizer """adadelta""" +428 59 training_loop """owa""" +428 59 negative_sampler """basic""" +428 59 evaluator """rankbased""" +428 60 dataset """kinships""" +428 60 model """ntn""" +428 60 loss """softplus""" +428 60 regularizer """no""" +428 60 optimizer """adadelta""" +428 60 training_loop """owa""" +428 60 negative_sampler """basic""" +428 60 evaluator """rankbased""" +428 61 dataset """kinships""" +428 61 model """ntn""" +428 61 loss """softplus""" +428 61 regularizer """no""" +428 61 optimizer """adadelta""" +428 61 training_loop """owa""" +428 61 negative_sampler """basic""" +428 61 evaluator """rankbased""" +428 62 dataset """kinships""" +428 62 model """ntn""" +428 62 loss """softplus""" +428 62 regularizer """no""" +428 62 optimizer """adadelta""" +428 62 training_loop """owa""" +428 62 negative_sampler """basic""" +428 62 evaluator """rankbased""" +428 63 dataset """kinships""" +428 63 model """ntn""" +428 63 loss """softplus""" +428 63 regularizer """no""" +428 63 optimizer """adadelta""" +428 63 training_loop """owa""" +428 63 negative_sampler """basic""" +428 63 evaluator """rankbased""" +428 64 dataset """kinships""" +428 64 model """ntn""" +428 64 loss """softplus""" +428 64 regularizer """no""" +428 64 optimizer """adadelta""" +428 64 training_loop """owa""" +428 64 negative_sampler """basic""" +428 64 evaluator """rankbased""" +428 65 dataset """kinships""" +428 65 model """ntn""" +428 65 loss """softplus""" +428 65 regularizer """no""" +428 65 optimizer """adadelta""" +428 65 training_loop """owa""" +428 65 negative_sampler """basic""" +428 65 evaluator """rankbased""" +428 66 dataset """kinships""" +428 66 model """ntn""" +428 66 loss """softplus""" +428 66 regularizer """no""" +428 66 optimizer """adadelta""" +428 66 training_loop """owa""" +428 66 negative_sampler """basic""" +428 66 evaluator """rankbased""" +428 67 dataset """kinships""" +428 67 model """ntn""" +428 67 loss """softplus""" +428 67 regularizer """no""" +428 67 optimizer """adadelta""" +428 67 training_loop """owa""" +428 67 negative_sampler """basic""" +428 67 evaluator """rankbased""" +428 68 dataset """kinships""" +428 68 model """ntn""" +428 68 loss """softplus""" +428 68 regularizer """no""" +428 68 optimizer """adadelta""" +428 68 training_loop """owa""" +428 68 negative_sampler """basic""" +428 68 evaluator """rankbased""" +428 69 dataset """kinships""" +428 69 model """ntn""" +428 69 loss """softplus""" +428 69 regularizer """no""" +428 69 optimizer """adadelta""" +428 69 training_loop """owa""" +428 69 negative_sampler """basic""" +428 69 evaluator """rankbased""" +428 70 dataset """kinships""" +428 70 model """ntn""" +428 70 loss """softplus""" +428 70 regularizer """no""" +428 70 optimizer """adadelta""" +428 70 training_loop """owa""" +428 70 negative_sampler """basic""" +428 70 evaluator """rankbased""" +428 71 dataset """kinships""" +428 71 model """ntn""" +428 71 loss """softplus""" +428 71 regularizer """no""" +428 71 optimizer """adadelta""" +428 71 training_loop """owa""" +428 71 negative_sampler """basic""" +428 71 evaluator """rankbased""" +428 72 dataset """kinships""" +428 72 model """ntn""" +428 72 loss """softplus""" +428 72 regularizer """no""" +428 72 optimizer """adadelta""" +428 72 training_loop """owa""" +428 72 negative_sampler """basic""" +428 72 evaluator """rankbased""" +428 73 dataset """kinships""" +428 73 model """ntn""" +428 73 loss """softplus""" +428 73 regularizer """no""" +428 73 optimizer """adadelta""" +428 73 training_loop """owa""" +428 73 negative_sampler """basic""" +428 73 evaluator """rankbased""" +428 74 dataset """kinships""" +428 74 model """ntn""" +428 74 loss """softplus""" +428 74 regularizer """no""" +428 74 optimizer """adadelta""" +428 74 training_loop """owa""" +428 74 negative_sampler """basic""" +428 74 evaluator """rankbased""" +428 75 dataset """kinships""" +428 75 model """ntn""" +428 75 loss """softplus""" +428 75 regularizer """no""" +428 75 optimizer """adadelta""" +428 75 training_loop """owa""" +428 75 negative_sampler """basic""" +428 75 evaluator """rankbased""" +428 76 dataset """kinships""" +428 76 model """ntn""" +428 76 loss """softplus""" +428 76 regularizer """no""" +428 76 optimizer """adadelta""" +428 76 training_loop """owa""" +428 76 negative_sampler """basic""" +428 76 evaluator """rankbased""" +428 77 dataset """kinships""" +428 77 model """ntn""" +428 77 loss """softplus""" +428 77 regularizer """no""" +428 77 optimizer """adadelta""" +428 77 training_loop """owa""" +428 77 negative_sampler """basic""" +428 77 evaluator """rankbased""" +428 78 dataset """kinships""" +428 78 model """ntn""" +428 78 loss """softplus""" +428 78 regularizer """no""" +428 78 optimizer """adadelta""" +428 78 training_loop """owa""" +428 78 negative_sampler """basic""" +428 78 evaluator """rankbased""" +428 79 dataset """kinships""" +428 79 model """ntn""" +428 79 loss """softplus""" +428 79 regularizer """no""" +428 79 optimizer """adadelta""" +428 79 training_loop """owa""" +428 79 negative_sampler """basic""" +428 79 evaluator """rankbased""" +428 80 dataset """kinships""" +428 80 model """ntn""" +428 80 loss """softplus""" +428 80 regularizer """no""" +428 80 optimizer """adadelta""" +428 80 training_loop """owa""" +428 80 negative_sampler """basic""" +428 80 evaluator """rankbased""" +428 81 dataset """kinships""" +428 81 model """ntn""" +428 81 loss """softplus""" +428 81 regularizer """no""" +428 81 optimizer """adadelta""" +428 81 training_loop """owa""" +428 81 negative_sampler """basic""" +428 81 evaluator """rankbased""" +428 82 dataset """kinships""" +428 82 model """ntn""" +428 82 loss """softplus""" +428 82 regularizer """no""" +428 82 optimizer """adadelta""" +428 82 training_loop """owa""" +428 82 negative_sampler """basic""" +428 82 evaluator """rankbased""" +428 83 dataset """kinships""" +428 83 model """ntn""" +428 83 loss """softplus""" +428 83 regularizer """no""" +428 83 optimizer """adadelta""" +428 83 training_loop """owa""" +428 83 negative_sampler """basic""" +428 83 evaluator """rankbased""" +428 84 dataset """kinships""" +428 84 model """ntn""" +428 84 loss """softplus""" +428 84 regularizer """no""" +428 84 optimizer """adadelta""" +428 84 training_loop """owa""" +428 84 negative_sampler """basic""" +428 84 evaluator """rankbased""" +428 85 dataset """kinships""" +428 85 model """ntn""" +428 85 loss """softplus""" +428 85 regularizer """no""" +428 85 optimizer """adadelta""" +428 85 training_loop """owa""" +428 85 negative_sampler """basic""" +428 85 evaluator """rankbased""" +428 86 dataset """kinships""" +428 86 model """ntn""" +428 86 loss """softplus""" +428 86 regularizer """no""" +428 86 optimizer """adadelta""" +428 86 training_loop """owa""" +428 86 negative_sampler """basic""" +428 86 evaluator """rankbased""" +428 87 dataset """kinships""" +428 87 model """ntn""" +428 87 loss """softplus""" +428 87 regularizer """no""" +428 87 optimizer """adadelta""" +428 87 training_loop """owa""" +428 87 negative_sampler """basic""" +428 87 evaluator """rankbased""" +428 88 dataset """kinships""" +428 88 model """ntn""" +428 88 loss """softplus""" +428 88 regularizer """no""" +428 88 optimizer """adadelta""" +428 88 training_loop """owa""" +428 88 negative_sampler """basic""" +428 88 evaluator """rankbased""" +428 89 dataset """kinships""" +428 89 model """ntn""" +428 89 loss """softplus""" +428 89 regularizer """no""" +428 89 optimizer """adadelta""" +428 89 training_loop """owa""" +428 89 negative_sampler """basic""" +428 89 evaluator """rankbased""" +428 90 dataset """kinships""" +428 90 model """ntn""" +428 90 loss """softplus""" +428 90 regularizer """no""" +428 90 optimizer """adadelta""" +428 90 training_loop """owa""" +428 90 negative_sampler """basic""" +428 90 evaluator """rankbased""" +428 91 dataset """kinships""" +428 91 model """ntn""" +428 91 loss """softplus""" +428 91 regularizer """no""" +428 91 optimizer """adadelta""" +428 91 training_loop """owa""" +428 91 negative_sampler """basic""" +428 91 evaluator """rankbased""" +428 92 dataset """kinships""" +428 92 model """ntn""" +428 92 loss """softplus""" +428 92 regularizer """no""" +428 92 optimizer """adadelta""" +428 92 training_loop """owa""" +428 92 negative_sampler """basic""" +428 92 evaluator """rankbased""" +428 93 dataset """kinships""" +428 93 model """ntn""" +428 93 loss """softplus""" +428 93 regularizer """no""" +428 93 optimizer """adadelta""" +428 93 training_loop """owa""" +428 93 negative_sampler """basic""" +428 93 evaluator """rankbased""" +428 94 dataset """kinships""" +428 94 model """ntn""" +428 94 loss """softplus""" +428 94 regularizer """no""" +428 94 optimizer """adadelta""" +428 94 training_loop """owa""" +428 94 negative_sampler """basic""" +428 94 evaluator """rankbased""" +428 95 dataset """kinships""" +428 95 model """ntn""" +428 95 loss """softplus""" +428 95 regularizer """no""" +428 95 optimizer """adadelta""" +428 95 training_loop """owa""" +428 95 negative_sampler """basic""" +428 95 evaluator """rankbased""" +428 96 dataset """kinships""" +428 96 model """ntn""" +428 96 loss """softplus""" +428 96 regularizer """no""" +428 96 optimizer """adadelta""" +428 96 training_loop """owa""" +428 96 negative_sampler """basic""" +428 96 evaluator """rankbased""" +428 97 dataset """kinships""" +428 97 model """ntn""" +428 97 loss """softplus""" +428 97 regularizer """no""" +428 97 optimizer """adadelta""" +428 97 training_loop """owa""" +428 97 negative_sampler """basic""" +428 97 evaluator """rankbased""" +428 98 dataset """kinships""" +428 98 model """ntn""" +428 98 loss """softplus""" +428 98 regularizer """no""" +428 98 optimizer """adadelta""" +428 98 training_loop """owa""" +428 98 negative_sampler """basic""" +428 98 evaluator """rankbased""" +428 99 dataset """kinships""" +428 99 model """ntn""" +428 99 loss """softplus""" +428 99 regularizer """no""" +428 99 optimizer """adadelta""" +428 99 training_loop """owa""" +428 99 negative_sampler """basic""" +428 99 evaluator """rankbased""" +428 100 dataset """kinships""" +428 100 model """ntn""" +428 100 loss """softplus""" +428 100 regularizer """no""" +428 100 optimizer """adadelta""" +428 100 training_loop """owa""" +428 100 negative_sampler """basic""" +428 100 evaluator """rankbased""" +429 1 model.embedding_dim 2.0 +429 1 optimizer.lr 0.09188399686464631 +429 1 training.batch_size 2.0 +429 1 training.label_smoothing 0.7290235003674221 +429 2 model.embedding_dim 0.0 +429 2 optimizer.lr 0.0031054303087677195 +429 2 training.batch_size 2.0 +429 2 training.label_smoothing 0.012200506906884793 +429 3 model.embedding_dim 0.0 +429 3 optimizer.lr 0.0061780618600643464 +429 3 training.batch_size 1.0 +429 3 training.label_smoothing 0.034068079348097205 +429 4 model.embedding_dim 0.0 +429 4 optimizer.lr 0.09279965990440033 +429 4 training.batch_size 1.0 +429 4 training.label_smoothing 0.008442616110479475 +429 5 model.embedding_dim 2.0 +429 5 optimizer.lr 0.0033913395399234885 +429 5 training.batch_size 1.0 +429 5 training.label_smoothing 0.3918213947875838 +429 6 model.embedding_dim 0.0 +429 6 optimizer.lr 0.00618581793290911 +429 6 training.batch_size 1.0 +429 6 training.label_smoothing 0.08925411261393398 +429 7 model.embedding_dim 2.0 +429 7 optimizer.lr 0.0012314414916976325 +429 7 training.batch_size 1.0 +429 7 training.label_smoothing 0.009024936513651787 +429 8 model.embedding_dim 1.0 +429 8 optimizer.lr 0.005476974840567186 +429 8 training.batch_size 2.0 +429 8 training.label_smoothing 0.13459408313370114 +429 9 model.embedding_dim 2.0 +429 9 optimizer.lr 0.0016962720061488778 +429 9 training.batch_size 1.0 +429 9 training.label_smoothing 0.0038606476644072203 +429 10 model.embedding_dim 1.0 +429 10 optimizer.lr 0.03212189112967951 +429 10 training.batch_size 0.0 +429 10 training.label_smoothing 0.0010236016655173583 +429 11 model.embedding_dim 2.0 +429 11 optimizer.lr 0.0014062545835989934 +429 11 training.batch_size 1.0 +429 11 training.label_smoothing 0.4306024188964172 +429 12 model.embedding_dim 1.0 +429 12 optimizer.lr 0.05964387127291338 +429 12 training.batch_size 0.0 +429 12 training.label_smoothing 0.23183110115243746 +429 13 model.embedding_dim 1.0 +429 13 optimizer.lr 0.009484076623235848 +429 13 training.batch_size 0.0 +429 13 training.label_smoothing 0.329495620467536 +429 14 model.embedding_dim 0.0 +429 14 optimizer.lr 0.06689688312122125 +429 14 training.batch_size 2.0 +429 14 training.label_smoothing 0.7452405391933408 +429 15 model.embedding_dim 2.0 +429 15 optimizer.lr 0.0029138548264933672 +429 15 training.batch_size 0.0 +429 15 training.label_smoothing 0.002808212821058964 +429 16 model.embedding_dim 2.0 +429 16 optimizer.lr 0.004487957442214453 +429 16 training.batch_size 1.0 +429 16 training.label_smoothing 0.03645102317660179 +429 17 model.embedding_dim 0.0 +429 17 optimizer.lr 0.0017682872858505035 +429 17 training.batch_size 2.0 +429 17 training.label_smoothing 0.006760256607452251 +429 18 model.embedding_dim 1.0 +429 18 optimizer.lr 0.01029137310402132 +429 18 training.batch_size 0.0 +429 18 training.label_smoothing 0.0139173548666167 +429 19 model.embedding_dim 2.0 +429 19 optimizer.lr 0.0764470742212467 +429 19 training.batch_size 1.0 +429 19 training.label_smoothing 0.008560833283645953 +429 20 model.embedding_dim 0.0 +429 20 optimizer.lr 0.09833464950997656 +429 20 training.batch_size 1.0 +429 20 training.label_smoothing 0.17829736863318696 +429 21 model.embedding_dim 2.0 +429 21 optimizer.lr 0.04418852929720799 +429 21 training.batch_size 1.0 +429 21 training.label_smoothing 0.002919436868900567 +429 22 model.embedding_dim 0.0 +429 22 optimizer.lr 0.0014851990986298486 +429 22 training.batch_size 2.0 +429 22 training.label_smoothing 0.8530878144414011 +429 23 model.embedding_dim 2.0 +429 23 optimizer.lr 0.08732911446202471 +429 23 training.batch_size 2.0 +429 23 training.label_smoothing 0.27084642982613966 +429 24 model.embedding_dim 1.0 +429 24 optimizer.lr 0.026276425369585504 +429 24 training.batch_size 0.0 +429 24 training.label_smoothing 0.006565092393128272 +429 25 model.embedding_dim 1.0 +429 25 optimizer.lr 0.014554066227262274 +429 25 training.batch_size 0.0 +429 25 training.label_smoothing 0.30776236873122703 +429 26 model.embedding_dim 2.0 +429 26 optimizer.lr 0.008744452816030106 +429 26 training.batch_size 1.0 +429 26 training.label_smoothing 0.057624536152950843 +429 27 model.embedding_dim 2.0 +429 27 optimizer.lr 0.015222373475265057 +429 27 training.batch_size 2.0 +429 27 training.label_smoothing 0.04458663817339566 +429 28 model.embedding_dim 2.0 +429 28 optimizer.lr 0.01114701481032104 +429 28 training.batch_size 2.0 +429 28 training.label_smoothing 0.025437864777962695 +429 29 model.embedding_dim 0.0 +429 29 optimizer.lr 0.018263670499852946 +429 29 training.batch_size 0.0 +429 29 training.label_smoothing 0.11145772525097594 +429 30 model.embedding_dim 1.0 +429 30 optimizer.lr 0.0012257733561703802 +429 30 training.batch_size 2.0 +429 30 training.label_smoothing 0.022948144108562357 +429 31 model.embedding_dim 2.0 +429 31 optimizer.lr 0.0020796904988057818 +429 31 training.batch_size 1.0 +429 31 training.label_smoothing 0.008868829088026109 +429 32 model.embedding_dim 2.0 +429 32 optimizer.lr 0.0016351874680275557 +429 32 training.batch_size 1.0 +429 32 training.label_smoothing 0.07572168524734411 +429 33 model.embedding_dim 2.0 +429 33 optimizer.lr 0.042635304512235836 +429 33 training.batch_size 1.0 +429 33 training.label_smoothing 0.03902510880725463 +429 34 model.embedding_dim 2.0 +429 34 optimizer.lr 0.06849413268928405 +429 34 training.batch_size 2.0 +429 34 training.label_smoothing 0.049119999306006565 +429 35 model.embedding_dim 1.0 +429 35 optimizer.lr 0.01742180417465319 +429 35 training.batch_size 0.0 +429 35 training.label_smoothing 0.08146406664870913 +429 36 model.embedding_dim 2.0 +429 36 optimizer.lr 0.04130999739392229 +429 36 training.batch_size 1.0 +429 36 training.label_smoothing 0.8121292369858842 +429 37 model.embedding_dim 1.0 +429 37 optimizer.lr 0.004985385297386604 +429 37 training.batch_size 0.0 +429 37 training.label_smoothing 0.11676431083236946 +429 38 model.embedding_dim 2.0 +429 38 optimizer.lr 0.009753373922658456 +429 38 training.batch_size 1.0 +429 38 training.label_smoothing 0.4873748318182192 +429 39 model.embedding_dim 2.0 +429 39 optimizer.lr 0.013109085491804777 +429 39 training.batch_size 1.0 +429 39 training.label_smoothing 0.045762385143193966 +429 40 model.embedding_dim 0.0 +429 40 optimizer.lr 0.04448856282552068 +429 40 training.batch_size 1.0 +429 40 training.label_smoothing 0.0032674493087103576 +429 41 model.embedding_dim 1.0 +429 41 optimizer.lr 0.03280545566468881 +429 41 training.batch_size 1.0 +429 41 training.label_smoothing 0.024693920227039836 +429 42 model.embedding_dim 2.0 +429 42 optimizer.lr 0.05026312584709177 +429 42 training.batch_size 1.0 +429 42 training.label_smoothing 0.5888379003554574 +429 43 model.embedding_dim 2.0 +429 43 optimizer.lr 0.023773653126340652 +429 43 training.batch_size 2.0 +429 43 training.label_smoothing 0.11800107315181582 +429 44 model.embedding_dim 0.0 +429 44 optimizer.lr 0.004600298549890595 +429 44 training.batch_size 1.0 +429 44 training.label_smoothing 0.2448298393265004 +429 45 model.embedding_dim 2.0 +429 45 optimizer.lr 0.05227214824601778 +429 45 training.batch_size 0.0 +429 45 training.label_smoothing 0.05731357857399243 +429 46 model.embedding_dim 2.0 +429 46 optimizer.lr 0.01203283625620554 +429 46 training.batch_size 2.0 +429 46 training.label_smoothing 0.009719095653167593 +429 47 model.embedding_dim 0.0 +429 47 optimizer.lr 0.0512064403373613 +429 47 training.batch_size 0.0 +429 47 training.label_smoothing 0.47917803212254634 +429 48 model.embedding_dim 2.0 +429 48 optimizer.lr 0.0012516206089176606 +429 48 training.batch_size 0.0 +429 48 training.label_smoothing 0.5221762908643882 +429 49 model.embedding_dim 2.0 +429 49 optimizer.lr 0.0035133504387511224 +429 49 training.batch_size 0.0 +429 49 training.label_smoothing 0.7828937058156731 +429 50 model.embedding_dim 2.0 +429 50 optimizer.lr 0.011351726725113644 +429 50 training.batch_size 2.0 +429 50 training.label_smoothing 0.033439461513217354 +429 51 model.embedding_dim 2.0 +429 51 optimizer.lr 0.0062886378653669766 +429 51 training.batch_size 0.0 +429 51 training.label_smoothing 0.08092327643821312 +429 52 model.embedding_dim 2.0 +429 52 optimizer.lr 0.016992397262989036 +429 52 training.batch_size 2.0 +429 52 training.label_smoothing 0.21924515957371365 +429 53 model.embedding_dim 2.0 +429 53 optimizer.lr 0.003265674223323267 +429 53 training.batch_size 2.0 +429 53 training.label_smoothing 0.00138271134330885 +429 54 model.embedding_dim 2.0 +429 54 optimizer.lr 0.08514257746527305 +429 54 training.batch_size 1.0 +429 54 training.label_smoothing 0.36409001278247005 +429 55 model.embedding_dim 1.0 +429 55 optimizer.lr 0.0010903039040802912 +429 55 training.batch_size 0.0 +429 55 training.label_smoothing 0.6684703875876 +429 56 model.embedding_dim 2.0 +429 56 optimizer.lr 0.009156015164966596 +429 56 training.batch_size 1.0 +429 56 training.label_smoothing 0.7371874264787598 +429 57 model.embedding_dim 1.0 +429 57 optimizer.lr 0.01719498813581251 +429 57 training.batch_size 0.0 +429 57 training.label_smoothing 0.03173406869130454 +429 58 model.embedding_dim 1.0 +429 58 optimizer.lr 0.0010340381270623016 +429 58 training.batch_size 2.0 +429 58 training.label_smoothing 0.3811263304812336 +429 59 model.embedding_dim 0.0 +429 59 optimizer.lr 0.04854685969651839 +429 59 training.batch_size 2.0 +429 59 training.label_smoothing 0.004056321260533208 +429 60 model.embedding_dim 0.0 +429 60 optimizer.lr 0.03945420707933236 +429 60 training.batch_size 2.0 +429 60 training.label_smoothing 0.11973009319154969 +429 61 model.embedding_dim 0.0 +429 61 optimizer.lr 0.029527766617726443 +429 61 training.batch_size 2.0 +429 61 training.label_smoothing 0.040589470674240305 +429 62 model.embedding_dim 0.0 +429 62 optimizer.lr 0.013330876249661425 +429 62 training.batch_size 1.0 +429 62 training.label_smoothing 0.03312183287540022 +429 63 model.embedding_dim 0.0 +429 63 optimizer.lr 0.03388962408265823 +429 63 training.batch_size 0.0 +429 63 training.label_smoothing 0.0016512995153803737 +429 64 model.embedding_dim 0.0 +429 64 optimizer.lr 0.004578277666794145 +429 64 training.batch_size 2.0 +429 64 training.label_smoothing 0.0032568680927988402 +429 65 model.embedding_dim 1.0 +429 65 optimizer.lr 0.014947584625474459 +429 65 training.batch_size 0.0 +429 65 training.label_smoothing 0.37460743922254724 +429 66 model.embedding_dim 0.0 +429 66 optimizer.lr 0.007596876315556304 +429 66 training.batch_size 0.0 +429 66 training.label_smoothing 0.0357433244490599 +429 67 model.embedding_dim 0.0 +429 67 optimizer.lr 0.00865971234594689 +429 67 training.batch_size 0.0 +429 67 training.label_smoothing 0.0016040524061926643 +429 68 model.embedding_dim 1.0 +429 68 optimizer.lr 0.012423651667467624 +429 68 training.batch_size 2.0 +429 68 training.label_smoothing 0.021300745285014598 +429 69 model.embedding_dim 0.0 +429 69 optimizer.lr 0.004865311369196758 +429 69 training.batch_size 1.0 +429 69 training.label_smoothing 0.041749596774247444 +429 70 model.embedding_dim 2.0 +429 70 optimizer.lr 0.001436732812833913 +429 70 training.batch_size 2.0 +429 70 training.label_smoothing 0.028293576058894192 +429 71 model.embedding_dim 2.0 +429 71 optimizer.lr 0.047268417611005295 +429 71 training.batch_size 0.0 +429 71 training.label_smoothing 0.0012915714142099153 +429 72 model.embedding_dim 0.0 +429 72 optimizer.lr 0.001482666648824909 +429 72 training.batch_size 0.0 +429 72 training.label_smoothing 0.06247465486209498 +429 73 model.embedding_dim 0.0 +429 73 optimizer.lr 0.00993789779644609 +429 73 training.batch_size 2.0 +429 73 training.label_smoothing 0.0021707120418249766 +429 74 model.embedding_dim 2.0 +429 74 optimizer.lr 0.0042367049032189285 +429 74 training.batch_size 2.0 +429 74 training.label_smoothing 0.05653706942022531 +429 75 model.embedding_dim 0.0 +429 75 optimizer.lr 0.0010888735094098862 +429 75 training.batch_size 2.0 +429 75 training.label_smoothing 0.05099409144796107 +429 76 model.embedding_dim 2.0 +429 76 optimizer.lr 0.0023282297044920743 +429 76 training.batch_size 0.0 +429 76 training.label_smoothing 0.0013857686267012878 +429 77 model.embedding_dim 1.0 +429 77 optimizer.lr 0.003204052105642701 +429 77 training.batch_size 1.0 +429 77 training.label_smoothing 0.004119570715848556 +429 78 model.embedding_dim 2.0 +429 78 optimizer.lr 0.010556729782891924 +429 78 training.batch_size 2.0 +429 78 training.label_smoothing 0.0036343240453400676 +429 79 model.embedding_dim 0.0 +429 79 optimizer.lr 0.061861413277900526 +429 79 training.batch_size 1.0 +429 79 training.label_smoothing 0.0771004174589165 +429 80 model.embedding_dim 0.0 +429 80 optimizer.lr 0.0011183500903431626 +429 80 training.batch_size 1.0 +429 80 training.label_smoothing 0.04961757449291495 +429 81 model.embedding_dim 2.0 +429 81 optimizer.lr 0.0037167269496389123 +429 81 training.batch_size 2.0 +429 81 training.label_smoothing 0.008284218619939974 +429 82 model.embedding_dim 0.0 +429 82 optimizer.lr 0.01516632301425456 +429 82 training.batch_size 1.0 +429 82 training.label_smoothing 0.05333061378827418 +429 83 model.embedding_dim 1.0 +429 83 optimizer.lr 0.0027466056309727823 +429 83 training.batch_size 2.0 +429 83 training.label_smoothing 0.06765959817240096 +429 84 model.embedding_dim 1.0 +429 84 optimizer.lr 0.06461174579777793 +429 84 training.batch_size 1.0 +429 84 training.label_smoothing 0.10046542465165653 +429 85 model.embedding_dim 2.0 +429 85 optimizer.lr 0.01412927987383297 +429 85 training.batch_size 0.0 +429 85 training.label_smoothing 0.30995415923143993 +429 86 model.embedding_dim 0.0 +429 86 optimizer.lr 0.03884465058797761 +429 86 training.batch_size 1.0 +429 86 training.label_smoothing 0.009685704910141603 +429 87 model.embedding_dim 0.0 +429 87 optimizer.lr 0.012948101524988534 +429 87 training.batch_size 1.0 +429 87 training.label_smoothing 0.0049385451024805405 +429 88 model.embedding_dim 1.0 +429 88 optimizer.lr 0.004378725666482718 +429 88 training.batch_size 0.0 +429 88 training.label_smoothing 0.13103627270427184 +429 89 model.embedding_dim 2.0 +429 89 optimizer.lr 0.009364787918661847 +429 89 training.batch_size 0.0 +429 89 training.label_smoothing 0.1362222915722282 +429 90 model.embedding_dim 0.0 +429 90 optimizer.lr 0.042665529941425646 +429 90 training.batch_size 1.0 +429 90 training.label_smoothing 0.006754787876664184 +429 91 model.embedding_dim 1.0 +429 91 optimizer.lr 0.0963542187495018 +429 91 training.batch_size 0.0 +429 91 training.label_smoothing 0.01468760150726841 +429 92 model.embedding_dim 1.0 +429 92 optimizer.lr 0.02251679438648406 +429 92 training.batch_size 1.0 +429 92 training.label_smoothing 0.6451866327813881 +429 93 model.embedding_dim 1.0 +429 93 optimizer.lr 0.0013732837779352105 +429 93 training.batch_size 1.0 +429 93 training.label_smoothing 0.15681293015835654 +429 94 model.embedding_dim 0.0 +429 94 optimizer.lr 0.013056657344654063 +429 94 training.batch_size 1.0 +429 94 training.label_smoothing 0.0024149457416746457 +429 95 model.embedding_dim 2.0 +429 95 optimizer.lr 0.005104362599303874 +429 95 training.batch_size 1.0 +429 95 training.label_smoothing 0.07709886329088272 +429 96 model.embedding_dim 2.0 +429 96 optimizer.lr 0.04079670702892463 +429 96 training.batch_size 1.0 +429 96 training.label_smoothing 0.0011056718613708103 +429 97 model.embedding_dim 0.0 +429 97 optimizer.lr 0.0012594108554047849 +429 97 training.batch_size 0.0 +429 97 training.label_smoothing 0.04826419595750027 +429 98 model.embedding_dim 2.0 +429 98 optimizer.lr 0.04073753729610457 +429 98 training.batch_size 2.0 +429 98 training.label_smoothing 0.7196691950497335 +429 99 model.embedding_dim 0.0 +429 99 optimizer.lr 0.0063091751206349215 +429 99 training.batch_size 1.0 +429 99 training.label_smoothing 0.14983510250283072 +429 100 model.embedding_dim 2.0 +429 100 optimizer.lr 0.0024877772280926737 +429 100 training.batch_size 1.0 +429 100 training.label_smoothing 0.022335735914411528 +429 1 dataset """kinships""" +429 1 model """ntn""" +429 1 loss """bceaftersigmoid""" +429 1 regularizer """no""" +429 1 optimizer """adam""" +429 1 training_loop """lcwa""" +429 1 evaluator """rankbased""" +429 2 dataset """kinships""" +429 2 model """ntn""" +429 2 loss """bceaftersigmoid""" +429 2 regularizer """no""" +429 2 optimizer """adam""" +429 2 training_loop """lcwa""" +429 2 evaluator """rankbased""" +429 3 dataset """kinships""" +429 3 model """ntn""" +429 3 loss """bceaftersigmoid""" +429 3 regularizer """no""" +429 3 optimizer """adam""" +429 3 training_loop """lcwa""" +429 3 evaluator """rankbased""" +429 4 dataset """kinships""" +429 4 model """ntn""" +429 4 loss """bceaftersigmoid""" +429 4 regularizer """no""" +429 4 optimizer """adam""" +429 4 training_loop """lcwa""" +429 4 evaluator """rankbased""" +429 5 dataset """kinships""" +429 5 model """ntn""" +429 5 loss """bceaftersigmoid""" +429 5 regularizer """no""" +429 5 optimizer """adam""" +429 5 training_loop """lcwa""" +429 5 evaluator """rankbased""" +429 6 dataset """kinships""" +429 6 model """ntn""" +429 6 loss """bceaftersigmoid""" +429 6 regularizer """no""" +429 6 optimizer """adam""" +429 6 training_loop """lcwa""" +429 6 evaluator """rankbased""" +429 7 dataset """kinships""" +429 7 model """ntn""" +429 7 loss """bceaftersigmoid""" +429 7 regularizer """no""" +429 7 optimizer """adam""" +429 7 training_loop """lcwa""" +429 7 evaluator """rankbased""" +429 8 dataset """kinships""" +429 8 model """ntn""" +429 8 loss """bceaftersigmoid""" +429 8 regularizer """no""" +429 8 optimizer """adam""" +429 8 training_loop """lcwa""" +429 8 evaluator """rankbased""" +429 9 dataset """kinships""" +429 9 model """ntn""" +429 9 loss """bceaftersigmoid""" +429 9 regularizer """no""" +429 9 optimizer """adam""" +429 9 training_loop """lcwa""" +429 9 evaluator """rankbased""" +429 10 dataset """kinships""" +429 10 model """ntn""" +429 10 loss """bceaftersigmoid""" +429 10 regularizer """no""" +429 10 optimizer """adam""" +429 10 training_loop """lcwa""" +429 10 evaluator """rankbased""" +429 11 dataset """kinships""" +429 11 model """ntn""" +429 11 loss """bceaftersigmoid""" +429 11 regularizer """no""" +429 11 optimizer """adam""" +429 11 training_loop """lcwa""" +429 11 evaluator """rankbased""" +429 12 dataset """kinships""" +429 12 model """ntn""" +429 12 loss """bceaftersigmoid""" +429 12 regularizer """no""" +429 12 optimizer """adam""" +429 12 training_loop """lcwa""" +429 12 evaluator """rankbased""" +429 13 dataset """kinships""" +429 13 model """ntn""" +429 13 loss """bceaftersigmoid""" +429 13 regularizer """no""" +429 13 optimizer """adam""" +429 13 training_loop """lcwa""" +429 13 evaluator """rankbased""" +429 14 dataset """kinships""" +429 14 model """ntn""" +429 14 loss """bceaftersigmoid""" +429 14 regularizer """no""" +429 14 optimizer """adam""" +429 14 training_loop """lcwa""" +429 14 evaluator """rankbased""" +429 15 dataset """kinships""" +429 15 model """ntn""" +429 15 loss """bceaftersigmoid""" +429 15 regularizer """no""" +429 15 optimizer """adam""" +429 15 training_loop """lcwa""" +429 15 evaluator """rankbased""" +429 16 dataset """kinships""" +429 16 model """ntn""" +429 16 loss """bceaftersigmoid""" +429 16 regularizer """no""" +429 16 optimizer """adam""" +429 16 training_loop """lcwa""" +429 16 evaluator """rankbased""" +429 17 dataset """kinships""" +429 17 model """ntn""" +429 17 loss """bceaftersigmoid""" +429 17 regularizer """no""" +429 17 optimizer """adam""" +429 17 training_loop """lcwa""" +429 17 evaluator """rankbased""" +429 18 dataset """kinships""" +429 18 model """ntn""" +429 18 loss """bceaftersigmoid""" +429 18 regularizer """no""" +429 18 optimizer """adam""" +429 18 training_loop """lcwa""" +429 18 evaluator """rankbased""" +429 19 dataset """kinships""" +429 19 model """ntn""" +429 19 loss """bceaftersigmoid""" +429 19 regularizer """no""" +429 19 optimizer """adam""" +429 19 training_loop """lcwa""" +429 19 evaluator """rankbased""" +429 20 dataset """kinships""" +429 20 model """ntn""" +429 20 loss """bceaftersigmoid""" +429 20 regularizer """no""" +429 20 optimizer """adam""" +429 20 training_loop """lcwa""" +429 20 evaluator """rankbased""" +429 21 dataset """kinships""" +429 21 model """ntn""" +429 21 loss """bceaftersigmoid""" +429 21 regularizer """no""" +429 21 optimizer """adam""" +429 21 training_loop """lcwa""" +429 21 evaluator """rankbased""" +429 22 dataset """kinships""" +429 22 model """ntn""" +429 22 loss """bceaftersigmoid""" +429 22 regularizer """no""" +429 22 optimizer """adam""" +429 22 training_loop """lcwa""" +429 22 evaluator """rankbased""" +429 23 dataset """kinships""" +429 23 model """ntn""" +429 23 loss """bceaftersigmoid""" +429 23 regularizer """no""" +429 23 optimizer """adam""" +429 23 training_loop """lcwa""" +429 23 evaluator """rankbased""" +429 24 dataset """kinships""" +429 24 model """ntn""" +429 24 loss """bceaftersigmoid""" +429 24 regularizer """no""" +429 24 optimizer """adam""" +429 24 training_loop """lcwa""" +429 24 evaluator """rankbased""" +429 25 dataset """kinships""" +429 25 model """ntn""" +429 25 loss """bceaftersigmoid""" +429 25 regularizer """no""" +429 25 optimizer """adam""" +429 25 training_loop """lcwa""" +429 25 evaluator """rankbased""" +429 26 dataset """kinships""" +429 26 model """ntn""" +429 26 loss """bceaftersigmoid""" +429 26 regularizer """no""" +429 26 optimizer """adam""" +429 26 training_loop """lcwa""" +429 26 evaluator """rankbased""" +429 27 dataset """kinships""" +429 27 model """ntn""" +429 27 loss """bceaftersigmoid""" +429 27 regularizer """no""" +429 27 optimizer """adam""" +429 27 training_loop """lcwa""" +429 27 evaluator """rankbased""" +429 28 dataset """kinships""" +429 28 model """ntn""" +429 28 loss """bceaftersigmoid""" +429 28 regularizer """no""" +429 28 optimizer """adam""" +429 28 training_loop """lcwa""" +429 28 evaluator """rankbased""" +429 29 dataset """kinships""" +429 29 model """ntn""" +429 29 loss """bceaftersigmoid""" +429 29 regularizer """no""" +429 29 optimizer """adam""" +429 29 training_loop """lcwa""" +429 29 evaluator """rankbased""" +429 30 dataset """kinships""" +429 30 model """ntn""" +429 30 loss """bceaftersigmoid""" +429 30 regularizer """no""" +429 30 optimizer """adam""" +429 30 training_loop """lcwa""" +429 30 evaluator """rankbased""" +429 31 dataset """kinships""" +429 31 model """ntn""" +429 31 loss """bceaftersigmoid""" +429 31 regularizer """no""" +429 31 optimizer """adam""" +429 31 training_loop """lcwa""" +429 31 evaluator """rankbased""" +429 32 dataset """kinships""" +429 32 model """ntn""" +429 32 loss """bceaftersigmoid""" +429 32 regularizer """no""" +429 32 optimizer """adam""" +429 32 training_loop """lcwa""" +429 32 evaluator """rankbased""" +429 33 dataset """kinships""" +429 33 model """ntn""" +429 33 loss """bceaftersigmoid""" +429 33 regularizer """no""" +429 33 optimizer """adam""" +429 33 training_loop """lcwa""" +429 33 evaluator """rankbased""" +429 34 dataset """kinships""" +429 34 model """ntn""" +429 34 loss """bceaftersigmoid""" +429 34 regularizer """no""" +429 34 optimizer """adam""" +429 34 training_loop """lcwa""" +429 34 evaluator """rankbased""" +429 35 dataset """kinships""" +429 35 model """ntn""" +429 35 loss """bceaftersigmoid""" +429 35 regularizer """no""" +429 35 optimizer """adam""" +429 35 training_loop """lcwa""" +429 35 evaluator """rankbased""" +429 36 dataset """kinships""" +429 36 model """ntn""" +429 36 loss """bceaftersigmoid""" +429 36 regularizer """no""" +429 36 optimizer """adam""" +429 36 training_loop """lcwa""" +429 36 evaluator """rankbased""" +429 37 dataset """kinships""" +429 37 model """ntn""" +429 37 loss """bceaftersigmoid""" +429 37 regularizer """no""" +429 37 optimizer """adam""" +429 37 training_loop """lcwa""" +429 37 evaluator """rankbased""" +429 38 dataset """kinships""" +429 38 model """ntn""" +429 38 loss """bceaftersigmoid""" +429 38 regularizer """no""" +429 38 optimizer """adam""" +429 38 training_loop """lcwa""" +429 38 evaluator """rankbased""" +429 39 dataset """kinships""" +429 39 model """ntn""" +429 39 loss """bceaftersigmoid""" +429 39 regularizer """no""" +429 39 optimizer """adam""" +429 39 training_loop """lcwa""" +429 39 evaluator """rankbased""" +429 40 dataset """kinships""" +429 40 model """ntn""" +429 40 loss """bceaftersigmoid""" +429 40 regularizer """no""" +429 40 optimizer """adam""" +429 40 training_loop """lcwa""" +429 40 evaluator """rankbased""" +429 41 dataset """kinships""" +429 41 model """ntn""" +429 41 loss """bceaftersigmoid""" +429 41 regularizer """no""" +429 41 optimizer """adam""" +429 41 training_loop """lcwa""" +429 41 evaluator """rankbased""" +429 42 dataset """kinships""" +429 42 model """ntn""" +429 42 loss """bceaftersigmoid""" +429 42 regularizer """no""" +429 42 optimizer """adam""" +429 42 training_loop """lcwa""" +429 42 evaluator """rankbased""" +429 43 dataset """kinships""" +429 43 model """ntn""" +429 43 loss """bceaftersigmoid""" +429 43 regularizer """no""" +429 43 optimizer """adam""" +429 43 training_loop """lcwa""" +429 43 evaluator """rankbased""" +429 44 dataset """kinships""" +429 44 model """ntn""" +429 44 loss """bceaftersigmoid""" +429 44 regularizer """no""" +429 44 optimizer """adam""" +429 44 training_loop """lcwa""" +429 44 evaluator """rankbased""" +429 45 dataset """kinships""" +429 45 model """ntn""" +429 45 loss """bceaftersigmoid""" +429 45 regularizer """no""" +429 45 optimizer """adam""" +429 45 training_loop """lcwa""" +429 45 evaluator """rankbased""" +429 46 dataset """kinships""" +429 46 model """ntn""" +429 46 loss """bceaftersigmoid""" +429 46 regularizer """no""" +429 46 optimizer """adam""" +429 46 training_loop """lcwa""" +429 46 evaluator """rankbased""" +429 47 dataset """kinships""" +429 47 model """ntn""" +429 47 loss """bceaftersigmoid""" +429 47 regularizer """no""" +429 47 optimizer """adam""" +429 47 training_loop """lcwa""" +429 47 evaluator """rankbased""" +429 48 dataset """kinships""" +429 48 model """ntn""" +429 48 loss """bceaftersigmoid""" +429 48 regularizer """no""" +429 48 optimizer """adam""" +429 48 training_loop """lcwa""" +429 48 evaluator """rankbased""" +429 49 dataset """kinships""" +429 49 model """ntn""" +429 49 loss """bceaftersigmoid""" +429 49 regularizer """no""" +429 49 optimizer """adam""" +429 49 training_loop """lcwa""" +429 49 evaluator """rankbased""" +429 50 dataset """kinships""" +429 50 model """ntn""" +429 50 loss """bceaftersigmoid""" +429 50 regularizer """no""" +429 50 optimizer """adam""" +429 50 training_loop """lcwa""" +429 50 evaluator """rankbased""" +429 51 dataset """kinships""" +429 51 model """ntn""" +429 51 loss """bceaftersigmoid""" +429 51 regularizer """no""" +429 51 optimizer """adam""" +429 51 training_loop """lcwa""" +429 51 evaluator """rankbased""" +429 52 dataset """kinships""" +429 52 model """ntn""" +429 52 loss """bceaftersigmoid""" +429 52 regularizer """no""" +429 52 optimizer """adam""" +429 52 training_loop """lcwa""" +429 52 evaluator """rankbased""" +429 53 dataset """kinships""" +429 53 model """ntn""" +429 53 loss """bceaftersigmoid""" +429 53 regularizer """no""" +429 53 optimizer """adam""" +429 53 training_loop """lcwa""" +429 53 evaluator """rankbased""" +429 54 dataset """kinships""" +429 54 model """ntn""" +429 54 loss """bceaftersigmoid""" +429 54 regularizer """no""" +429 54 optimizer """adam""" +429 54 training_loop """lcwa""" +429 54 evaluator """rankbased""" +429 55 dataset """kinships""" +429 55 model """ntn""" +429 55 loss """bceaftersigmoid""" +429 55 regularizer """no""" +429 55 optimizer """adam""" +429 55 training_loop """lcwa""" +429 55 evaluator """rankbased""" +429 56 dataset """kinships""" +429 56 model """ntn""" +429 56 loss """bceaftersigmoid""" +429 56 regularizer """no""" +429 56 optimizer """adam""" +429 56 training_loop """lcwa""" +429 56 evaluator """rankbased""" +429 57 dataset """kinships""" +429 57 model """ntn""" +429 57 loss """bceaftersigmoid""" +429 57 regularizer """no""" +429 57 optimizer """adam""" +429 57 training_loop """lcwa""" +429 57 evaluator """rankbased""" +429 58 dataset """kinships""" +429 58 model """ntn""" +429 58 loss """bceaftersigmoid""" +429 58 regularizer """no""" +429 58 optimizer """adam""" +429 58 training_loop """lcwa""" +429 58 evaluator """rankbased""" +429 59 dataset """kinships""" +429 59 model """ntn""" +429 59 loss """bceaftersigmoid""" +429 59 regularizer """no""" +429 59 optimizer """adam""" +429 59 training_loop """lcwa""" +429 59 evaluator """rankbased""" +429 60 dataset """kinships""" +429 60 model """ntn""" +429 60 loss """bceaftersigmoid""" +429 60 regularizer """no""" +429 60 optimizer """adam""" +429 60 training_loop """lcwa""" +429 60 evaluator """rankbased""" +429 61 dataset """kinships""" +429 61 model """ntn""" +429 61 loss """bceaftersigmoid""" +429 61 regularizer """no""" +429 61 optimizer """adam""" +429 61 training_loop """lcwa""" +429 61 evaluator """rankbased""" +429 62 dataset """kinships""" +429 62 model """ntn""" +429 62 loss """bceaftersigmoid""" +429 62 regularizer """no""" +429 62 optimizer """adam""" +429 62 training_loop """lcwa""" +429 62 evaluator """rankbased""" +429 63 dataset """kinships""" +429 63 model """ntn""" +429 63 loss """bceaftersigmoid""" +429 63 regularizer """no""" +429 63 optimizer """adam""" +429 63 training_loop """lcwa""" +429 63 evaluator """rankbased""" +429 64 dataset """kinships""" +429 64 model """ntn""" +429 64 loss """bceaftersigmoid""" +429 64 regularizer """no""" +429 64 optimizer """adam""" +429 64 training_loop """lcwa""" +429 64 evaluator """rankbased""" +429 65 dataset """kinships""" +429 65 model """ntn""" +429 65 loss """bceaftersigmoid""" +429 65 regularizer """no""" +429 65 optimizer """adam""" +429 65 training_loop """lcwa""" +429 65 evaluator """rankbased""" +429 66 dataset """kinships""" +429 66 model """ntn""" +429 66 loss """bceaftersigmoid""" +429 66 regularizer """no""" +429 66 optimizer """adam""" +429 66 training_loop """lcwa""" +429 66 evaluator """rankbased""" +429 67 dataset """kinships""" +429 67 model """ntn""" +429 67 loss """bceaftersigmoid""" +429 67 regularizer """no""" +429 67 optimizer """adam""" +429 67 training_loop """lcwa""" +429 67 evaluator """rankbased""" +429 68 dataset """kinships""" +429 68 model """ntn""" +429 68 loss """bceaftersigmoid""" +429 68 regularizer """no""" +429 68 optimizer """adam""" +429 68 training_loop """lcwa""" +429 68 evaluator """rankbased""" +429 69 dataset """kinships""" +429 69 model """ntn""" +429 69 loss """bceaftersigmoid""" +429 69 regularizer """no""" +429 69 optimizer """adam""" +429 69 training_loop """lcwa""" +429 69 evaluator """rankbased""" +429 70 dataset """kinships""" +429 70 model """ntn""" +429 70 loss """bceaftersigmoid""" +429 70 regularizer """no""" +429 70 optimizer """adam""" +429 70 training_loop """lcwa""" +429 70 evaluator """rankbased""" +429 71 dataset """kinships""" +429 71 model """ntn""" +429 71 loss """bceaftersigmoid""" +429 71 regularizer """no""" +429 71 optimizer """adam""" +429 71 training_loop """lcwa""" +429 71 evaluator """rankbased""" +429 72 dataset """kinships""" +429 72 model """ntn""" +429 72 loss """bceaftersigmoid""" +429 72 regularizer """no""" +429 72 optimizer """adam""" +429 72 training_loop """lcwa""" +429 72 evaluator """rankbased""" +429 73 dataset """kinships""" +429 73 model """ntn""" +429 73 loss """bceaftersigmoid""" +429 73 regularizer """no""" +429 73 optimizer """adam""" +429 73 training_loop """lcwa""" +429 73 evaluator """rankbased""" +429 74 dataset """kinships""" +429 74 model """ntn""" +429 74 loss """bceaftersigmoid""" +429 74 regularizer """no""" +429 74 optimizer """adam""" +429 74 training_loop """lcwa""" +429 74 evaluator """rankbased""" +429 75 dataset """kinships""" +429 75 model """ntn""" +429 75 loss """bceaftersigmoid""" +429 75 regularizer """no""" +429 75 optimizer """adam""" +429 75 training_loop """lcwa""" +429 75 evaluator """rankbased""" +429 76 dataset """kinships""" +429 76 model """ntn""" +429 76 loss """bceaftersigmoid""" +429 76 regularizer """no""" +429 76 optimizer """adam""" +429 76 training_loop """lcwa""" +429 76 evaluator """rankbased""" +429 77 dataset """kinships""" +429 77 model """ntn""" +429 77 loss """bceaftersigmoid""" +429 77 regularizer """no""" +429 77 optimizer """adam""" +429 77 training_loop """lcwa""" +429 77 evaluator """rankbased""" +429 78 dataset """kinships""" +429 78 model """ntn""" +429 78 loss """bceaftersigmoid""" +429 78 regularizer """no""" +429 78 optimizer """adam""" +429 78 training_loop """lcwa""" +429 78 evaluator """rankbased""" +429 79 dataset """kinships""" +429 79 model """ntn""" +429 79 loss """bceaftersigmoid""" +429 79 regularizer """no""" +429 79 optimizer """adam""" +429 79 training_loop """lcwa""" +429 79 evaluator """rankbased""" +429 80 dataset """kinships""" +429 80 model """ntn""" +429 80 loss """bceaftersigmoid""" +429 80 regularizer """no""" +429 80 optimizer """adam""" +429 80 training_loop """lcwa""" +429 80 evaluator """rankbased""" +429 81 dataset """kinships""" +429 81 model """ntn""" +429 81 loss """bceaftersigmoid""" +429 81 regularizer """no""" +429 81 optimizer """adam""" +429 81 training_loop """lcwa""" +429 81 evaluator """rankbased""" +429 82 dataset """kinships""" +429 82 model """ntn""" +429 82 loss """bceaftersigmoid""" +429 82 regularizer """no""" +429 82 optimizer """adam""" +429 82 training_loop """lcwa""" +429 82 evaluator """rankbased""" +429 83 dataset """kinships""" +429 83 model """ntn""" +429 83 loss """bceaftersigmoid""" +429 83 regularizer """no""" +429 83 optimizer """adam""" +429 83 training_loop """lcwa""" +429 83 evaluator """rankbased""" +429 84 dataset """kinships""" +429 84 model """ntn""" +429 84 loss """bceaftersigmoid""" +429 84 regularizer """no""" +429 84 optimizer """adam""" +429 84 training_loop """lcwa""" +429 84 evaluator """rankbased""" +429 85 dataset """kinships""" +429 85 model """ntn""" +429 85 loss """bceaftersigmoid""" +429 85 regularizer """no""" +429 85 optimizer """adam""" +429 85 training_loop """lcwa""" +429 85 evaluator """rankbased""" +429 86 dataset """kinships""" +429 86 model """ntn""" +429 86 loss """bceaftersigmoid""" +429 86 regularizer """no""" +429 86 optimizer """adam""" +429 86 training_loop """lcwa""" +429 86 evaluator """rankbased""" +429 87 dataset """kinships""" +429 87 model """ntn""" +429 87 loss """bceaftersigmoid""" +429 87 regularizer """no""" +429 87 optimizer """adam""" +429 87 training_loop """lcwa""" +429 87 evaluator """rankbased""" +429 88 dataset """kinships""" +429 88 model """ntn""" +429 88 loss """bceaftersigmoid""" +429 88 regularizer """no""" +429 88 optimizer """adam""" +429 88 training_loop """lcwa""" +429 88 evaluator """rankbased""" +429 89 dataset """kinships""" +429 89 model """ntn""" +429 89 loss """bceaftersigmoid""" +429 89 regularizer """no""" +429 89 optimizer """adam""" +429 89 training_loop """lcwa""" +429 89 evaluator """rankbased""" +429 90 dataset """kinships""" +429 90 model """ntn""" +429 90 loss """bceaftersigmoid""" +429 90 regularizer """no""" +429 90 optimizer """adam""" +429 90 training_loop """lcwa""" +429 90 evaluator """rankbased""" +429 91 dataset """kinships""" +429 91 model """ntn""" +429 91 loss """bceaftersigmoid""" +429 91 regularizer """no""" +429 91 optimizer """adam""" +429 91 training_loop """lcwa""" +429 91 evaluator """rankbased""" +429 92 dataset """kinships""" +429 92 model """ntn""" +429 92 loss """bceaftersigmoid""" +429 92 regularizer """no""" +429 92 optimizer """adam""" +429 92 training_loop """lcwa""" +429 92 evaluator """rankbased""" +429 93 dataset """kinships""" +429 93 model """ntn""" +429 93 loss """bceaftersigmoid""" +429 93 regularizer """no""" +429 93 optimizer """adam""" +429 93 training_loop """lcwa""" +429 93 evaluator """rankbased""" +429 94 dataset """kinships""" +429 94 model """ntn""" +429 94 loss """bceaftersigmoid""" +429 94 regularizer """no""" +429 94 optimizer """adam""" +429 94 training_loop """lcwa""" +429 94 evaluator """rankbased""" +429 95 dataset """kinships""" +429 95 model """ntn""" +429 95 loss """bceaftersigmoid""" +429 95 regularizer """no""" +429 95 optimizer """adam""" +429 95 training_loop """lcwa""" +429 95 evaluator """rankbased""" +429 96 dataset """kinships""" +429 96 model """ntn""" +429 96 loss """bceaftersigmoid""" +429 96 regularizer """no""" +429 96 optimizer """adam""" +429 96 training_loop """lcwa""" +429 96 evaluator """rankbased""" +429 97 dataset """kinships""" +429 97 model """ntn""" +429 97 loss """bceaftersigmoid""" +429 97 regularizer """no""" +429 97 optimizer """adam""" +429 97 training_loop """lcwa""" +429 97 evaluator """rankbased""" +429 98 dataset """kinships""" +429 98 model """ntn""" +429 98 loss """bceaftersigmoid""" +429 98 regularizer """no""" +429 98 optimizer """adam""" +429 98 training_loop """lcwa""" +429 98 evaluator """rankbased""" +429 99 dataset """kinships""" +429 99 model """ntn""" +429 99 loss """bceaftersigmoid""" +429 99 regularizer """no""" +429 99 optimizer """adam""" +429 99 training_loop """lcwa""" +429 99 evaluator """rankbased""" +429 100 dataset """kinships""" +429 100 model """ntn""" +429 100 loss """bceaftersigmoid""" +429 100 regularizer """no""" +429 100 optimizer """adam""" +429 100 training_loop """lcwa""" +429 100 evaluator """rankbased""" +430 1 model.embedding_dim 2.0 +430 1 optimizer.lr 0.00903708575161079 +430 1 training.batch_size 0.0 +430 1 training.label_smoothing 0.016303540545623475 +430 2 model.embedding_dim 2.0 +430 2 optimizer.lr 0.005033004295144603 +430 2 training.batch_size 0.0 +430 2 training.label_smoothing 0.003370274174372436 +430 3 model.embedding_dim 1.0 +430 3 optimizer.lr 0.06181689950257031 +430 3 training.batch_size 1.0 +430 3 training.label_smoothing 0.031752926529248306 +430 4 model.embedding_dim 1.0 +430 4 optimizer.lr 0.00781406335923933 +430 4 training.batch_size 2.0 +430 4 training.label_smoothing 0.42314066200609995 +430 5 model.embedding_dim 0.0 +430 5 optimizer.lr 0.0022329685575795187 +430 5 training.batch_size 1.0 +430 5 training.label_smoothing 0.050858210961617925 +430 6 model.embedding_dim 1.0 +430 6 optimizer.lr 0.03246771961189697 +430 6 training.batch_size 1.0 +430 6 training.label_smoothing 0.002293116856063938 +430 7 model.embedding_dim 1.0 +430 7 optimizer.lr 0.07759138849723315 +430 7 training.batch_size 2.0 +430 7 training.label_smoothing 0.027287000686442762 +430 8 model.embedding_dim 1.0 +430 8 optimizer.lr 0.04577341118684704 +430 8 training.batch_size 2.0 +430 8 training.label_smoothing 0.4918713463088085 +430 9 model.embedding_dim 1.0 +430 9 optimizer.lr 0.0011007237216762895 +430 9 training.batch_size 0.0 +430 9 training.label_smoothing 0.009674257673841712 +430 10 model.embedding_dim 2.0 +430 10 optimizer.lr 0.029720493036213574 +430 10 training.batch_size 0.0 +430 10 training.label_smoothing 0.005746262030411352 +430 11 model.embedding_dim 0.0 +430 11 optimizer.lr 0.001004490119289172 +430 11 training.batch_size 0.0 +430 11 training.label_smoothing 0.15939239985274845 +430 12 model.embedding_dim 2.0 +430 12 optimizer.lr 0.0019425868677092246 +430 12 training.batch_size 2.0 +430 12 training.label_smoothing 0.2403725676449491 +430 13 model.embedding_dim 2.0 +430 13 optimizer.lr 0.005787831771818195 +430 13 training.batch_size 0.0 +430 13 training.label_smoothing 0.6294093290121947 +430 14 model.embedding_dim 0.0 +430 14 optimizer.lr 0.09554607351017191 +430 14 training.batch_size 0.0 +430 14 training.label_smoothing 0.02259459523924409 +430 15 model.embedding_dim 2.0 +430 15 optimizer.lr 0.03787463157292411 +430 15 training.batch_size 2.0 +430 15 training.label_smoothing 0.08603041561240987 +430 16 model.embedding_dim 2.0 +430 16 optimizer.lr 0.09764797210592792 +430 16 training.batch_size 1.0 +430 16 training.label_smoothing 0.002392141990103601 +430 17 model.embedding_dim 1.0 +430 17 optimizer.lr 0.0053184105202539 +430 17 training.batch_size 1.0 +430 17 training.label_smoothing 0.04713604199503218 +430 18 model.embedding_dim 2.0 +430 18 optimizer.lr 0.024056449777304853 +430 18 training.batch_size 1.0 +430 18 training.label_smoothing 0.14156976875882776 +430 19 model.embedding_dim 1.0 +430 19 optimizer.lr 0.0582620152891421 +430 19 training.batch_size 2.0 +430 19 training.label_smoothing 0.04210898218890401 +430 20 model.embedding_dim 0.0 +430 20 optimizer.lr 0.006168797808240168 +430 20 training.batch_size 2.0 +430 20 training.label_smoothing 0.006492496108352672 +430 21 model.embedding_dim 1.0 +430 21 optimizer.lr 0.00591991465678908 +430 21 training.batch_size 2.0 +430 21 training.label_smoothing 0.006879129511889326 +430 22 model.embedding_dim 0.0 +430 22 optimizer.lr 0.009920681782378031 +430 22 training.batch_size 2.0 +430 22 training.label_smoothing 0.00281199038302035 +430 23 model.embedding_dim 1.0 +430 23 optimizer.lr 0.029698941289439448 +430 23 training.batch_size 0.0 +430 23 training.label_smoothing 0.04334991996808263 +430 24 model.embedding_dim 0.0 +430 24 optimizer.lr 0.0371127846858558 +430 24 training.batch_size 1.0 +430 24 training.label_smoothing 0.005486198741823521 +430 25 model.embedding_dim 0.0 +430 25 optimizer.lr 0.014941945037251846 +430 25 training.batch_size 2.0 +430 25 training.label_smoothing 0.0034580737475672805 +430 26 model.embedding_dim 0.0 +430 26 optimizer.lr 0.001596292870104233 +430 26 training.batch_size 0.0 +430 26 training.label_smoothing 0.0016398433961550888 +430 27 model.embedding_dim 2.0 +430 27 optimizer.lr 0.002820820203427705 +430 27 training.batch_size 2.0 +430 27 training.label_smoothing 0.005140915184681466 +430 28 model.embedding_dim 1.0 +430 28 optimizer.lr 0.0011335087620676393 +430 28 training.batch_size 2.0 +430 28 training.label_smoothing 0.007567766257900132 +430 29 model.embedding_dim 0.0 +430 29 optimizer.lr 0.007700457074862359 +430 29 training.batch_size 2.0 +430 29 training.label_smoothing 0.0011388137739347696 +430 30 model.embedding_dim 0.0 +430 30 optimizer.lr 0.0010335689017960021 +430 30 training.batch_size 1.0 +430 30 training.label_smoothing 0.0030436071662375496 +430 31 model.embedding_dim 0.0 +430 31 optimizer.lr 0.01801829159486301 +430 31 training.batch_size 2.0 +430 31 training.label_smoothing 0.005710288162970949 +430 32 model.embedding_dim 0.0 +430 32 optimizer.lr 0.07843427921011392 +430 32 training.batch_size 2.0 +430 32 training.label_smoothing 0.5636893759840713 +430 33 model.embedding_dim 2.0 +430 33 optimizer.lr 0.0011293473384863147 +430 33 training.batch_size 2.0 +430 33 training.label_smoothing 0.04834021940756572 +430 34 model.embedding_dim 0.0 +430 34 optimizer.lr 0.04473060162392936 +430 34 training.batch_size 2.0 +430 34 training.label_smoothing 0.05230948761750799 +430 35 model.embedding_dim 2.0 +430 35 optimizer.lr 0.09693378684428136 +430 35 training.batch_size 1.0 +430 35 training.label_smoothing 0.0375755439227993 +430 36 model.embedding_dim 1.0 +430 36 optimizer.lr 0.02920391738247997 +430 36 training.batch_size 2.0 +430 36 training.label_smoothing 0.04538949342682512 +430 37 model.embedding_dim 1.0 +430 37 optimizer.lr 0.021619519641033872 +430 37 training.batch_size 0.0 +430 37 training.label_smoothing 0.0034194982685413146 +430 38 model.embedding_dim 0.0 +430 38 optimizer.lr 0.023741784285127406 +430 38 training.batch_size 0.0 +430 38 training.label_smoothing 0.02595223434811655 +430 39 model.embedding_dim 0.0 +430 39 optimizer.lr 0.023936165548963377 +430 39 training.batch_size 1.0 +430 39 training.label_smoothing 0.0014402413798628526 +430 40 model.embedding_dim 2.0 +430 40 optimizer.lr 0.002467195463981986 +430 40 training.batch_size 0.0 +430 40 training.label_smoothing 0.03147569612036785 +430 41 model.embedding_dim 2.0 +430 41 optimizer.lr 0.009655795091009977 +430 41 training.batch_size 1.0 +430 41 training.label_smoothing 0.0043117748147773645 +430 42 model.embedding_dim 2.0 +430 42 optimizer.lr 0.016562542926858992 +430 42 training.batch_size 1.0 +430 42 training.label_smoothing 0.005132406912230164 +430 43 model.embedding_dim 1.0 +430 43 optimizer.lr 0.005399015769483977 +430 43 training.batch_size 0.0 +430 43 training.label_smoothing 0.14219725788761206 +430 44 model.embedding_dim 2.0 +430 44 optimizer.lr 0.0019913595611824667 +430 44 training.batch_size 0.0 +430 44 training.label_smoothing 0.0640494153733069 +430 45 model.embedding_dim 1.0 +430 45 optimizer.lr 0.012494213183895368 +430 45 training.batch_size 1.0 +430 45 training.label_smoothing 0.28281695396352124 +430 46 model.embedding_dim 1.0 +430 46 optimizer.lr 0.008326241678409263 +430 46 training.batch_size 1.0 +430 46 training.label_smoothing 0.22287412697466594 +430 47 model.embedding_dim 2.0 +430 47 optimizer.lr 0.0011371092367365092 +430 47 training.batch_size 1.0 +430 47 training.label_smoothing 0.01954250018315379 +430 48 model.embedding_dim 2.0 +430 48 optimizer.lr 0.041382773345127195 +430 48 training.batch_size 2.0 +430 48 training.label_smoothing 0.005750054600844319 +430 49 model.embedding_dim 1.0 +430 49 optimizer.lr 0.022442401337080184 +430 49 training.batch_size 2.0 +430 49 training.label_smoothing 0.0033798795921640536 +430 50 model.embedding_dim 0.0 +430 50 optimizer.lr 0.009103732706820527 +430 50 training.batch_size 1.0 +430 50 training.label_smoothing 0.014483446968195821 +430 51 model.embedding_dim 1.0 +430 51 optimizer.lr 0.0056030489912981506 +430 51 training.batch_size 0.0 +430 51 training.label_smoothing 0.5022881241857285 +430 52 model.embedding_dim 1.0 +430 52 optimizer.lr 0.006131964390347643 +430 52 training.batch_size 0.0 +430 52 training.label_smoothing 0.04119884966241574 +430 53 model.embedding_dim 0.0 +430 53 optimizer.lr 0.041836453446888335 +430 53 training.batch_size 1.0 +430 53 training.label_smoothing 0.02539106302140792 +430 54 model.embedding_dim 0.0 +430 54 optimizer.lr 0.030191380580036602 +430 54 training.batch_size 1.0 +430 54 training.label_smoothing 0.6030277589894766 +430 55 model.embedding_dim 1.0 +430 55 optimizer.lr 0.007506197315493982 +430 55 training.batch_size 2.0 +430 55 training.label_smoothing 0.05671389438091624 +430 56 model.embedding_dim 0.0 +430 56 optimizer.lr 0.006805044970956325 +430 56 training.batch_size 0.0 +430 56 training.label_smoothing 0.35493375395125937 +430 57 model.embedding_dim 0.0 +430 57 optimizer.lr 0.02401793468350646 +430 57 training.batch_size 2.0 +430 57 training.label_smoothing 0.2847912884646525 +430 58 model.embedding_dim 0.0 +430 58 optimizer.lr 0.005130813604191712 +430 58 training.batch_size 1.0 +430 58 training.label_smoothing 0.13337746033457074 +430 59 model.embedding_dim 0.0 +430 59 optimizer.lr 0.07154129321723242 +430 59 training.batch_size 1.0 +430 59 training.label_smoothing 0.004545122893220275 +430 60 model.embedding_dim 2.0 +430 60 optimizer.lr 0.005597817487780618 +430 60 training.batch_size 2.0 +430 60 training.label_smoothing 0.03324579592259041 +430 61 model.embedding_dim 0.0 +430 61 optimizer.lr 0.001054157330431656 +430 61 training.batch_size 0.0 +430 61 training.label_smoothing 0.011916247426078622 +430 62 model.embedding_dim 1.0 +430 62 optimizer.lr 0.015388709466453312 +430 62 training.batch_size 1.0 +430 62 training.label_smoothing 0.006790325183345725 +430 63 model.embedding_dim 2.0 +430 63 optimizer.lr 0.04598571543904594 +430 63 training.batch_size 1.0 +430 63 training.label_smoothing 0.003932242732313188 +430 64 model.embedding_dim 1.0 +430 64 optimizer.lr 0.001288254297685918 +430 64 training.batch_size 1.0 +430 64 training.label_smoothing 0.13791941688553566 +430 65 model.embedding_dim 2.0 +430 65 optimizer.lr 0.08991454567258385 +430 65 training.batch_size 0.0 +430 65 training.label_smoothing 0.9437075131977378 +430 66 model.embedding_dim 0.0 +430 66 optimizer.lr 0.004959951366933465 +430 66 training.batch_size 0.0 +430 66 training.label_smoothing 0.3561618427280549 +430 67 model.embedding_dim 2.0 +430 67 optimizer.lr 0.022849826478148937 +430 67 training.batch_size 0.0 +430 67 training.label_smoothing 0.001098388818203218 +430 68 model.embedding_dim 1.0 +430 68 optimizer.lr 0.09500086331204279 +430 68 training.batch_size 0.0 +430 68 training.label_smoothing 0.029434238194220037 +430 69 model.embedding_dim 0.0 +430 69 optimizer.lr 0.0648367798246532 +430 69 training.batch_size 1.0 +430 69 training.label_smoothing 0.019002906391966768 +430 70 model.embedding_dim 0.0 +430 70 optimizer.lr 0.006877954290183937 +430 70 training.batch_size 2.0 +430 70 training.label_smoothing 0.30053367390938457 +430 71 model.embedding_dim 2.0 +430 71 optimizer.lr 0.06314295440013354 +430 71 training.batch_size 0.0 +430 71 training.label_smoothing 0.004384953792332125 +430 72 model.embedding_dim 1.0 +430 72 optimizer.lr 0.001689610274205559 +430 72 training.batch_size 2.0 +430 72 training.label_smoothing 0.021529782629286617 +430 73 model.embedding_dim 2.0 +430 73 optimizer.lr 0.03610660304852005 +430 73 training.batch_size 1.0 +430 73 training.label_smoothing 0.1440084741144753 +430 74 model.embedding_dim 1.0 +430 74 optimizer.lr 0.0012843408237570863 +430 74 training.batch_size 1.0 +430 74 training.label_smoothing 0.2662436763091772 +430 75 model.embedding_dim 2.0 +430 75 optimizer.lr 0.012285375458686534 +430 75 training.batch_size 2.0 +430 75 training.label_smoothing 0.025757666806574393 +430 76 model.embedding_dim 0.0 +430 76 optimizer.lr 0.05369197295553264 +430 76 training.batch_size 2.0 +430 76 training.label_smoothing 0.006440880980053202 +430 77 model.embedding_dim 0.0 +430 77 optimizer.lr 0.06996051977687395 +430 77 training.batch_size 1.0 +430 77 training.label_smoothing 0.3913508909588011 +430 78 model.embedding_dim 0.0 +430 78 optimizer.lr 0.002203774749933712 +430 78 training.batch_size 2.0 +430 78 training.label_smoothing 0.10846396904366588 +430 79 model.embedding_dim 0.0 +430 79 optimizer.lr 0.09859208368395657 +430 79 training.batch_size 0.0 +430 79 training.label_smoothing 0.016948582513453537 +430 80 model.embedding_dim 0.0 +430 80 optimizer.lr 0.024423268038299174 +430 80 training.batch_size 2.0 +430 80 training.label_smoothing 0.03352768331022549 +430 81 model.embedding_dim 0.0 +430 81 optimizer.lr 0.01794121467994848 +430 81 training.batch_size 2.0 +430 81 training.label_smoothing 0.04616580634610082 +430 82 model.embedding_dim 0.0 +430 82 optimizer.lr 0.003217573323009667 +430 82 training.batch_size 2.0 +430 82 training.label_smoothing 0.0018919070492833033 +430 83 model.embedding_dim 0.0 +430 83 optimizer.lr 0.0034203876870634252 +430 83 training.batch_size 2.0 +430 83 training.label_smoothing 0.007199090059692242 +430 84 model.embedding_dim 0.0 +430 84 optimizer.lr 0.0012133382440594571 +430 84 training.batch_size 0.0 +430 84 training.label_smoothing 0.0012984396360712998 +430 85 model.embedding_dim 1.0 +430 85 optimizer.lr 0.001074336754928005 +430 85 training.batch_size 2.0 +430 85 training.label_smoothing 0.1401863152889732 +430 86 model.embedding_dim 2.0 +430 86 optimizer.lr 0.004024798806058506 +430 86 training.batch_size 2.0 +430 86 training.label_smoothing 0.07176510254051174 +430 87 model.embedding_dim 2.0 +430 87 optimizer.lr 0.03413562332518632 +430 87 training.batch_size 1.0 +430 87 training.label_smoothing 0.006429607702302077 +430 88 model.embedding_dim 1.0 +430 88 optimizer.lr 0.005842074405294258 +430 88 training.batch_size 1.0 +430 88 training.label_smoothing 0.0019742189991204476 +430 89 model.embedding_dim 1.0 +430 89 optimizer.lr 0.0050692851063637665 +430 89 training.batch_size 0.0 +430 89 training.label_smoothing 0.004540444555412032 +430 90 model.embedding_dim 0.0 +430 90 optimizer.lr 0.0027036207614585645 +430 90 training.batch_size 0.0 +430 90 training.label_smoothing 0.11190858013577855 +430 91 model.embedding_dim 0.0 +430 91 optimizer.lr 0.044457250248029524 +430 91 training.batch_size 2.0 +430 91 training.label_smoothing 0.00156856517381062 +430 92 model.embedding_dim 1.0 +430 92 optimizer.lr 0.001434862784896422 +430 92 training.batch_size 1.0 +430 92 training.label_smoothing 0.006058914809803448 +430 93 model.embedding_dim 1.0 +430 93 optimizer.lr 0.051786003601006075 +430 93 training.batch_size 0.0 +430 93 training.label_smoothing 0.01173749755765074 +430 94 model.embedding_dim 2.0 +430 94 optimizer.lr 0.02718462944231569 +430 94 training.batch_size 1.0 +430 94 training.label_smoothing 0.006967108217432095 +430 95 model.embedding_dim 1.0 +430 95 optimizer.lr 0.003241625435232492 +430 95 training.batch_size 1.0 +430 95 training.label_smoothing 0.015685320297602428 +430 96 model.embedding_dim 1.0 +430 96 optimizer.lr 0.08624931333796614 +430 96 training.batch_size 1.0 +430 96 training.label_smoothing 0.004836807539734065 +430 97 model.embedding_dim 2.0 +430 97 optimizer.lr 0.002670628964823331 +430 97 training.batch_size 0.0 +430 97 training.label_smoothing 0.03161130252275323 +430 98 model.embedding_dim 0.0 +430 98 optimizer.lr 0.013559460921855029 +430 98 training.batch_size 1.0 +430 98 training.label_smoothing 0.03555366884215979 +430 99 model.embedding_dim 0.0 +430 99 optimizer.lr 0.006877638953502979 +430 99 training.batch_size 0.0 +430 99 training.label_smoothing 0.002735482083836958 +430 100 model.embedding_dim 1.0 +430 100 optimizer.lr 0.0037242905230903517 +430 100 training.batch_size 2.0 +430 100 training.label_smoothing 0.048606515715471275 +430 1 dataset """kinships""" +430 1 model """ntn""" +430 1 loss """softplus""" +430 1 regularizer """no""" +430 1 optimizer """adam""" +430 1 training_loop """lcwa""" +430 1 evaluator """rankbased""" +430 2 dataset """kinships""" +430 2 model """ntn""" +430 2 loss """softplus""" +430 2 regularizer """no""" +430 2 optimizer """adam""" +430 2 training_loop """lcwa""" +430 2 evaluator """rankbased""" +430 3 dataset """kinships""" +430 3 model """ntn""" +430 3 loss """softplus""" +430 3 regularizer """no""" +430 3 optimizer """adam""" +430 3 training_loop """lcwa""" +430 3 evaluator """rankbased""" +430 4 dataset """kinships""" +430 4 model """ntn""" +430 4 loss """softplus""" +430 4 regularizer """no""" +430 4 optimizer """adam""" +430 4 training_loop """lcwa""" +430 4 evaluator """rankbased""" +430 5 dataset """kinships""" +430 5 model """ntn""" +430 5 loss """softplus""" +430 5 regularizer """no""" +430 5 optimizer """adam""" +430 5 training_loop """lcwa""" +430 5 evaluator """rankbased""" +430 6 dataset """kinships""" +430 6 model """ntn""" +430 6 loss """softplus""" +430 6 regularizer """no""" +430 6 optimizer """adam""" +430 6 training_loop """lcwa""" +430 6 evaluator """rankbased""" +430 7 dataset """kinships""" +430 7 model """ntn""" +430 7 loss """softplus""" +430 7 regularizer """no""" +430 7 optimizer """adam""" +430 7 training_loop """lcwa""" +430 7 evaluator """rankbased""" +430 8 dataset """kinships""" +430 8 model """ntn""" +430 8 loss """softplus""" +430 8 regularizer """no""" +430 8 optimizer """adam""" +430 8 training_loop """lcwa""" +430 8 evaluator """rankbased""" +430 9 dataset """kinships""" +430 9 model """ntn""" +430 9 loss """softplus""" +430 9 regularizer """no""" +430 9 optimizer """adam""" +430 9 training_loop """lcwa""" +430 9 evaluator """rankbased""" +430 10 dataset """kinships""" +430 10 model """ntn""" +430 10 loss """softplus""" +430 10 regularizer """no""" +430 10 optimizer """adam""" +430 10 training_loop """lcwa""" +430 10 evaluator """rankbased""" +430 11 dataset """kinships""" +430 11 model """ntn""" +430 11 loss """softplus""" +430 11 regularizer """no""" +430 11 optimizer """adam""" +430 11 training_loop """lcwa""" +430 11 evaluator """rankbased""" +430 12 dataset """kinships""" +430 12 model """ntn""" +430 12 loss """softplus""" +430 12 regularizer """no""" +430 12 optimizer """adam""" +430 12 training_loop """lcwa""" +430 12 evaluator """rankbased""" +430 13 dataset """kinships""" +430 13 model """ntn""" +430 13 loss """softplus""" +430 13 regularizer """no""" +430 13 optimizer """adam""" +430 13 training_loop """lcwa""" +430 13 evaluator """rankbased""" +430 14 dataset """kinships""" +430 14 model """ntn""" +430 14 loss """softplus""" +430 14 regularizer """no""" +430 14 optimizer """adam""" +430 14 training_loop """lcwa""" +430 14 evaluator """rankbased""" +430 15 dataset """kinships""" +430 15 model """ntn""" +430 15 loss """softplus""" +430 15 regularizer """no""" +430 15 optimizer """adam""" +430 15 training_loop """lcwa""" +430 15 evaluator """rankbased""" +430 16 dataset """kinships""" +430 16 model """ntn""" +430 16 loss """softplus""" +430 16 regularizer """no""" +430 16 optimizer """adam""" +430 16 training_loop """lcwa""" +430 16 evaluator """rankbased""" +430 17 dataset """kinships""" +430 17 model """ntn""" +430 17 loss """softplus""" +430 17 regularizer """no""" +430 17 optimizer """adam""" +430 17 training_loop """lcwa""" +430 17 evaluator """rankbased""" +430 18 dataset """kinships""" +430 18 model """ntn""" +430 18 loss """softplus""" +430 18 regularizer """no""" +430 18 optimizer """adam""" +430 18 training_loop """lcwa""" +430 18 evaluator """rankbased""" +430 19 dataset """kinships""" +430 19 model """ntn""" +430 19 loss """softplus""" +430 19 regularizer """no""" +430 19 optimizer """adam""" +430 19 training_loop """lcwa""" +430 19 evaluator """rankbased""" +430 20 dataset """kinships""" +430 20 model """ntn""" +430 20 loss """softplus""" +430 20 regularizer """no""" +430 20 optimizer """adam""" +430 20 training_loop """lcwa""" +430 20 evaluator """rankbased""" +430 21 dataset """kinships""" +430 21 model """ntn""" +430 21 loss """softplus""" +430 21 regularizer """no""" +430 21 optimizer """adam""" +430 21 training_loop """lcwa""" +430 21 evaluator """rankbased""" +430 22 dataset """kinships""" +430 22 model """ntn""" +430 22 loss """softplus""" +430 22 regularizer """no""" +430 22 optimizer """adam""" +430 22 training_loop """lcwa""" +430 22 evaluator """rankbased""" +430 23 dataset """kinships""" +430 23 model """ntn""" +430 23 loss """softplus""" +430 23 regularizer """no""" +430 23 optimizer """adam""" +430 23 training_loop """lcwa""" +430 23 evaluator """rankbased""" +430 24 dataset """kinships""" +430 24 model """ntn""" +430 24 loss """softplus""" +430 24 regularizer """no""" +430 24 optimizer """adam""" +430 24 training_loop """lcwa""" +430 24 evaluator """rankbased""" +430 25 dataset """kinships""" +430 25 model """ntn""" +430 25 loss """softplus""" +430 25 regularizer """no""" +430 25 optimizer """adam""" +430 25 training_loop """lcwa""" +430 25 evaluator """rankbased""" +430 26 dataset """kinships""" +430 26 model """ntn""" +430 26 loss """softplus""" +430 26 regularizer """no""" +430 26 optimizer """adam""" +430 26 training_loop """lcwa""" +430 26 evaluator """rankbased""" +430 27 dataset """kinships""" +430 27 model """ntn""" +430 27 loss """softplus""" +430 27 regularizer """no""" +430 27 optimizer """adam""" +430 27 training_loop """lcwa""" +430 27 evaluator """rankbased""" +430 28 dataset """kinships""" +430 28 model """ntn""" +430 28 loss """softplus""" +430 28 regularizer """no""" +430 28 optimizer """adam""" +430 28 training_loop """lcwa""" +430 28 evaluator """rankbased""" +430 29 dataset """kinships""" +430 29 model """ntn""" +430 29 loss """softplus""" +430 29 regularizer """no""" +430 29 optimizer """adam""" +430 29 training_loop """lcwa""" +430 29 evaluator """rankbased""" +430 30 dataset """kinships""" +430 30 model """ntn""" +430 30 loss """softplus""" +430 30 regularizer """no""" +430 30 optimizer """adam""" +430 30 training_loop """lcwa""" +430 30 evaluator """rankbased""" +430 31 dataset """kinships""" +430 31 model """ntn""" +430 31 loss """softplus""" +430 31 regularizer """no""" +430 31 optimizer """adam""" +430 31 training_loop """lcwa""" +430 31 evaluator """rankbased""" +430 32 dataset """kinships""" +430 32 model """ntn""" +430 32 loss """softplus""" +430 32 regularizer """no""" +430 32 optimizer """adam""" +430 32 training_loop """lcwa""" +430 32 evaluator """rankbased""" +430 33 dataset """kinships""" +430 33 model """ntn""" +430 33 loss """softplus""" +430 33 regularizer """no""" +430 33 optimizer """adam""" +430 33 training_loop """lcwa""" +430 33 evaluator """rankbased""" +430 34 dataset """kinships""" +430 34 model """ntn""" +430 34 loss """softplus""" +430 34 regularizer """no""" +430 34 optimizer """adam""" +430 34 training_loop """lcwa""" +430 34 evaluator """rankbased""" +430 35 dataset """kinships""" +430 35 model """ntn""" +430 35 loss """softplus""" +430 35 regularizer """no""" +430 35 optimizer """adam""" +430 35 training_loop """lcwa""" +430 35 evaluator """rankbased""" +430 36 dataset """kinships""" +430 36 model """ntn""" +430 36 loss """softplus""" +430 36 regularizer """no""" +430 36 optimizer """adam""" +430 36 training_loop """lcwa""" +430 36 evaluator """rankbased""" +430 37 dataset """kinships""" +430 37 model """ntn""" +430 37 loss """softplus""" +430 37 regularizer """no""" +430 37 optimizer """adam""" +430 37 training_loop """lcwa""" +430 37 evaluator """rankbased""" +430 38 dataset """kinships""" +430 38 model """ntn""" +430 38 loss """softplus""" +430 38 regularizer """no""" +430 38 optimizer """adam""" +430 38 training_loop """lcwa""" +430 38 evaluator """rankbased""" +430 39 dataset """kinships""" +430 39 model """ntn""" +430 39 loss """softplus""" +430 39 regularizer """no""" +430 39 optimizer """adam""" +430 39 training_loop """lcwa""" +430 39 evaluator """rankbased""" +430 40 dataset """kinships""" +430 40 model """ntn""" +430 40 loss """softplus""" +430 40 regularizer """no""" +430 40 optimizer """adam""" +430 40 training_loop """lcwa""" +430 40 evaluator """rankbased""" +430 41 dataset """kinships""" +430 41 model """ntn""" +430 41 loss """softplus""" +430 41 regularizer """no""" +430 41 optimizer """adam""" +430 41 training_loop """lcwa""" +430 41 evaluator """rankbased""" +430 42 dataset """kinships""" +430 42 model """ntn""" +430 42 loss """softplus""" +430 42 regularizer """no""" +430 42 optimizer """adam""" +430 42 training_loop """lcwa""" +430 42 evaluator """rankbased""" +430 43 dataset """kinships""" +430 43 model """ntn""" +430 43 loss """softplus""" +430 43 regularizer """no""" +430 43 optimizer """adam""" +430 43 training_loop """lcwa""" +430 43 evaluator """rankbased""" +430 44 dataset """kinships""" +430 44 model """ntn""" +430 44 loss """softplus""" +430 44 regularizer """no""" +430 44 optimizer """adam""" +430 44 training_loop """lcwa""" +430 44 evaluator """rankbased""" +430 45 dataset """kinships""" +430 45 model """ntn""" +430 45 loss """softplus""" +430 45 regularizer """no""" +430 45 optimizer """adam""" +430 45 training_loop """lcwa""" +430 45 evaluator """rankbased""" +430 46 dataset """kinships""" +430 46 model """ntn""" +430 46 loss """softplus""" +430 46 regularizer """no""" +430 46 optimizer """adam""" +430 46 training_loop """lcwa""" +430 46 evaluator """rankbased""" +430 47 dataset """kinships""" +430 47 model """ntn""" +430 47 loss """softplus""" +430 47 regularizer """no""" +430 47 optimizer """adam""" +430 47 training_loop """lcwa""" +430 47 evaluator """rankbased""" +430 48 dataset """kinships""" +430 48 model """ntn""" +430 48 loss """softplus""" +430 48 regularizer """no""" +430 48 optimizer """adam""" +430 48 training_loop """lcwa""" +430 48 evaluator """rankbased""" +430 49 dataset """kinships""" +430 49 model """ntn""" +430 49 loss """softplus""" +430 49 regularizer """no""" +430 49 optimizer """adam""" +430 49 training_loop """lcwa""" +430 49 evaluator """rankbased""" +430 50 dataset """kinships""" +430 50 model """ntn""" +430 50 loss """softplus""" +430 50 regularizer """no""" +430 50 optimizer """adam""" +430 50 training_loop """lcwa""" +430 50 evaluator """rankbased""" +430 51 dataset """kinships""" +430 51 model """ntn""" +430 51 loss """softplus""" +430 51 regularizer """no""" +430 51 optimizer """adam""" +430 51 training_loop """lcwa""" +430 51 evaluator """rankbased""" +430 52 dataset """kinships""" +430 52 model """ntn""" +430 52 loss """softplus""" +430 52 regularizer """no""" +430 52 optimizer """adam""" +430 52 training_loop """lcwa""" +430 52 evaluator """rankbased""" +430 53 dataset """kinships""" +430 53 model """ntn""" +430 53 loss """softplus""" +430 53 regularizer """no""" +430 53 optimizer """adam""" +430 53 training_loop """lcwa""" +430 53 evaluator """rankbased""" +430 54 dataset """kinships""" +430 54 model """ntn""" +430 54 loss """softplus""" +430 54 regularizer """no""" +430 54 optimizer """adam""" +430 54 training_loop """lcwa""" +430 54 evaluator """rankbased""" +430 55 dataset """kinships""" +430 55 model """ntn""" +430 55 loss """softplus""" +430 55 regularizer """no""" +430 55 optimizer """adam""" +430 55 training_loop """lcwa""" +430 55 evaluator """rankbased""" +430 56 dataset """kinships""" +430 56 model """ntn""" +430 56 loss """softplus""" +430 56 regularizer """no""" +430 56 optimizer """adam""" +430 56 training_loop """lcwa""" +430 56 evaluator """rankbased""" +430 57 dataset """kinships""" +430 57 model """ntn""" +430 57 loss """softplus""" +430 57 regularizer """no""" +430 57 optimizer """adam""" +430 57 training_loop """lcwa""" +430 57 evaluator """rankbased""" +430 58 dataset """kinships""" +430 58 model """ntn""" +430 58 loss """softplus""" +430 58 regularizer """no""" +430 58 optimizer """adam""" +430 58 training_loop """lcwa""" +430 58 evaluator """rankbased""" +430 59 dataset """kinships""" +430 59 model """ntn""" +430 59 loss """softplus""" +430 59 regularizer """no""" +430 59 optimizer """adam""" +430 59 training_loop """lcwa""" +430 59 evaluator """rankbased""" +430 60 dataset """kinships""" +430 60 model """ntn""" +430 60 loss """softplus""" +430 60 regularizer """no""" +430 60 optimizer """adam""" +430 60 training_loop """lcwa""" +430 60 evaluator """rankbased""" +430 61 dataset """kinships""" +430 61 model """ntn""" +430 61 loss """softplus""" +430 61 regularizer """no""" +430 61 optimizer """adam""" +430 61 training_loop """lcwa""" +430 61 evaluator """rankbased""" +430 62 dataset """kinships""" +430 62 model """ntn""" +430 62 loss """softplus""" +430 62 regularizer """no""" +430 62 optimizer """adam""" +430 62 training_loop """lcwa""" +430 62 evaluator """rankbased""" +430 63 dataset """kinships""" +430 63 model """ntn""" +430 63 loss """softplus""" +430 63 regularizer """no""" +430 63 optimizer """adam""" +430 63 training_loop """lcwa""" +430 63 evaluator """rankbased""" +430 64 dataset """kinships""" +430 64 model """ntn""" +430 64 loss """softplus""" +430 64 regularizer """no""" +430 64 optimizer """adam""" +430 64 training_loop """lcwa""" +430 64 evaluator """rankbased""" +430 65 dataset """kinships""" +430 65 model """ntn""" +430 65 loss """softplus""" +430 65 regularizer """no""" +430 65 optimizer """adam""" +430 65 training_loop """lcwa""" +430 65 evaluator """rankbased""" +430 66 dataset """kinships""" +430 66 model """ntn""" +430 66 loss """softplus""" +430 66 regularizer """no""" +430 66 optimizer """adam""" +430 66 training_loop """lcwa""" +430 66 evaluator """rankbased""" +430 67 dataset """kinships""" +430 67 model """ntn""" +430 67 loss """softplus""" +430 67 regularizer """no""" +430 67 optimizer """adam""" +430 67 training_loop """lcwa""" +430 67 evaluator """rankbased""" +430 68 dataset """kinships""" +430 68 model """ntn""" +430 68 loss """softplus""" +430 68 regularizer """no""" +430 68 optimizer """adam""" +430 68 training_loop """lcwa""" +430 68 evaluator """rankbased""" +430 69 dataset """kinships""" +430 69 model """ntn""" +430 69 loss """softplus""" +430 69 regularizer """no""" +430 69 optimizer """adam""" +430 69 training_loop """lcwa""" +430 69 evaluator """rankbased""" +430 70 dataset """kinships""" +430 70 model """ntn""" +430 70 loss """softplus""" +430 70 regularizer """no""" +430 70 optimizer """adam""" +430 70 training_loop """lcwa""" +430 70 evaluator """rankbased""" +430 71 dataset """kinships""" +430 71 model """ntn""" +430 71 loss """softplus""" +430 71 regularizer """no""" +430 71 optimizer """adam""" +430 71 training_loop """lcwa""" +430 71 evaluator """rankbased""" +430 72 dataset """kinships""" +430 72 model """ntn""" +430 72 loss """softplus""" +430 72 regularizer """no""" +430 72 optimizer """adam""" +430 72 training_loop """lcwa""" +430 72 evaluator """rankbased""" +430 73 dataset """kinships""" +430 73 model """ntn""" +430 73 loss """softplus""" +430 73 regularizer """no""" +430 73 optimizer """adam""" +430 73 training_loop """lcwa""" +430 73 evaluator """rankbased""" +430 74 dataset """kinships""" +430 74 model """ntn""" +430 74 loss """softplus""" +430 74 regularizer """no""" +430 74 optimizer """adam""" +430 74 training_loop """lcwa""" +430 74 evaluator """rankbased""" +430 75 dataset """kinships""" +430 75 model """ntn""" +430 75 loss """softplus""" +430 75 regularizer """no""" +430 75 optimizer """adam""" +430 75 training_loop """lcwa""" +430 75 evaluator """rankbased""" +430 76 dataset """kinships""" +430 76 model """ntn""" +430 76 loss """softplus""" +430 76 regularizer """no""" +430 76 optimizer """adam""" +430 76 training_loop """lcwa""" +430 76 evaluator """rankbased""" +430 77 dataset """kinships""" +430 77 model """ntn""" +430 77 loss """softplus""" +430 77 regularizer """no""" +430 77 optimizer """adam""" +430 77 training_loop """lcwa""" +430 77 evaluator """rankbased""" +430 78 dataset """kinships""" +430 78 model """ntn""" +430 78 loss """softplus""" +430 78 regularizer """no""" +430 78 optimizer """adam""" +430 78 training_loop """lcwa""" +430 78 evaluator """rankbased""" +430 79 dataset """kinships""" +430 79 model """ntn""" +430 79 loss """softplus""" +430 79 regularizer """no""" +430 79 optimizer """adam""" +430 79 training_loop """lcwa""" +430 79 evaluator """rankbased""" +430 80 dataset """kinships""" +430 80 model """ntn""" +430 80 loss """softplus""" +430 80 regularizer """no""" +430 80 optimizer """adam""" +430 80 training_loop """lcwa""" +430 80 evaluator """rankbased""" +430 81 dataset """kinships""" +430 81 model """ntn""" +430 81 loss """softplus""" +430 81 regularizer """no""" +430 81 optimizer """adam""" +430 81 training_loop """lcwa""" +430 81 evaluator """rankbased""" +430 82 dataset """kinships""" +430 82 model """ntn""" +430 82 loss """softplus""" +430 82 regularizer """no""" +430 82 optimizer """adam""" +430 82 training_loop """lcwa""" +430 82 evaluator """rankbased""" +430 83 dataset """kinships""" +430 83 model """ntn""" +430 83 loss """softplus""" +430 83 regularizer """no""" +430 83 optimizer """adam""" +430 83 training_loop """lcwa""" +430 83 evaluator """rankbased""" +430 84 dataset """kinships""" +430 84 model """ntn""" +430 84 loss """softplus""" +430 84 regularizer """no""" +430 84 optimizer """adam""" +430 84 training_loop """lcwa""" +430 84 evaluator """rankbased""" +430 85 dataset """kinships""" +430 85 model """ntn""" +430 85 loss """softplus""" +430 85 regularizer """no""" +430 85 optimizer """adam""" +430 85 training_loop """lcwa""" +430 85 evaluator """rankbased""" +430 86 dataset """kinships""" +430 86 model """ntn""" +430 86 loss """softplus""" +430 86 regularizer """no""" +430 86 optimizer """adam""" +430 86 training_loop """lcwa""" +430 86 evaluator """rankbased""" +430 87 dataset """kinships""" +430 87 model """ntn""" +430 87 loss """softplus""" +430 87 regularizer """no""" +430 87 optimizer """adam""" +430 87 training_loop """lcwa""" +430 87 evaluator """rankbased""" +430 88 dataset """kinships""" +430 88 model """ntn""" +430 88 loss """softplus""" +430 88 regularizer """no""" +430 88 optimizer """adam""" +430 88 training_loop """lcwa""" +430 88 evaluator """rankbased""" +430 89 dataset """kinships""" +430 89 model """ntn""" +430 89 loss """softplus""" +430 89 regularizer """no""" +430 89 optimizer """adam""" +430 89 training_loop """lcwa""" +430 89 evaluator """rankbased""" +430 90 dataset """kinships""" +430 90 model """ntn""" +430 90 loss """softplus""" +430 90 regularizer """no""" +430 90 optimizer """adam""" +430 90 training_loop """lcwa""" +430 90 evaluator """rankbased""" +430 91 dataset """kinships""" +430 91 model """ntn""" +430 91 loss """softplus""" +430 91 regularizer """no""" +430 91 optimizer """adam""" +430 91 training_loop """lcwa""" +430 91 evaluator """rankbased""" +430 92 dataset """kinships""" +430 92 model """ntn""" +430 92 loss """softplus""" +430 92 regularizer """no""" +430 92 optimizer """adam""" +430 92 training_loop """lcwa""" +430 92 evaluator """rankbased""" +430 93 dataset """kinships""" +430 93 model """ntn""" +430 93 loss """softplus""" +430 93 regularizer """no""" +430 93 optimizer """adam""" +430 93 training_loop """lcwa""" +430 93 evaluator """rankbased""" +430 94 dataset """kinships""" +430 94 model """ntn""" +430 94 loss """softplus""" +430 94 regularizer """no""" +430 94 optimizer """adam""" +430 94 training_loop """lcwa""" +430 94 evaluator """rankbased""" +430 95 dataset """kinships""" +430 95 model """ntn""" +430 95 loss """softplus""" +430 95 regularizer """no""" +430 95 optimizer """adam""" +430 95 training_loop """lcwa""" +430 95 evaluator """rankbased""" +430 96 dataset """kinships""" +430 96 model """ntn""" +430 96 loss """softplus""" +430 96 regularizer """no""" +430 96 optimizer """adam""" +430 96 training_loop """lcwa""" +430 96 evaluator """rankbased""" +430 97 dataset """kinships""" +430 97 model """ntn""" +430 97 loss """softplus""" +430 97 regularizer """no""" +430 97 optimizer """adam""" +430 97 training_loop """lcwa""" +430 97 evaluator """rankbased""" +430 98 dataset """kinships""" +430 98 model """ntn""" +430 98 loss """softplus""" +430 98 regularizer """no""" +430 98 optimizer """adam""" +430 98 training_loop """lcwa""" +430 98 evaluator """rankbased""" +430 99 dataset """kinships""" +430 99 model """ntn""" +430 99 loss """softplus""" +430 99 regularizer """no""" +430 99 optimizer """adam""" +430 99 training_loop """lcwa""" +430 99 evaluator """rankbased""" +430 100 dataset """kinships""" +430 100 model """ntn""" +430 100 loss """softplus""" +430 100 regularizer """no""" +430 100 optimizer """adam""" +430 100 training_loop """lcwa""" +430 100 evaluator """rankbased""" +431 1 model.embedding_dim 0.0 +431 1 optimizer.lr 0.030700422812775845 +431 1 training.batch_size 0.0 +431 1 training.label_smoothing 0.05563813353345872 +431 2 model.embedding_dim 1.0 +431 2 optimizer.lr 0.010486548587663689 +431 2 training.batch_size 2.0 +431 2 training.label_smoothing 0.559785418547827 +431 3 model.embedding_dim 1.0 +431 3 optimizer.lr 0.001506436033559001 +431 3 training.batch_size 0.0 +431 3 training.label_smoothing 0.012974837416936186 +431 4 model.embedding_dim 1.0 +431 4 optimizer.lr 0.0010592154738514592 +431 4 training.batch_size 0.0 +431 4 training.label_smoothing 0.0010509125775516083 +431 5 model.embedding_dim 0.0 +431 5 optimizer.lr 0.003537526001363747 +431 5 training.batch_size 1.0 +431 5 training.label_smoothing 0.0012327879798244477 +431 6 model.embedding_dim 2.0 +431 6 optimizer.lr 0.00945228640312183 +431 6 training.batch_size 2.0 +431 6 training.label_smoothing 0.016601751216414173 +431 7 model.embedding_dim 0.0 +431 7 optimizer.lr 0.024956897392409583 +431 7 training.batch_size 0.0 +431 7 training.label_smoothing 0.10617971288467516 +431 8 model.embedding_dim 2.0 +431 8 optimizer.lr 0.03149470973012976 +431 8 training.batch_size 1.0 +431 8 training.label_smoothing 0.010875715470716016 +431 9 model.embedding_dim 2.0 +431 9 optimizer.lr 0.077590422327913 +431 9 training.batch_size 1.0 +431 9 training.label_smoothing 0.7281244245744785 +431 10 model.embedding_dim 2.0 +431 10 optimizer.lr 0.023933897171252612 +431 10 training.batch_size 2.0 +431 10 training.label_smoothing 0.015374506921046876 +431 11 model.embedding_dim 0.0 +431 11 optimizer.lr 0.009175206447434482 +431 11 training.batch_size 0.0 +431 11 training.label_smoothing 0.6096149930613823 +431 12 model.embedding_dim 0.0 +431 12 optimizer.lr 0.05511471227851058 +431 12 training.batch_size 2.0 +431 12 training.label_smoothing 0.007075207445038534 +431 13 model.embedding_dim 0.0 +431 13 optimizer.lr 0.004912539534114014 +431 13 training.batch_size 1.0 +431 13 training.label_smoothing 0.003112525726019793 +431 14 model.embedding_dim 2.0 +431 14 optimizer.lr 0.09073785185696305 +431 14 training.batch_size 2.0 +431 14 training.label_smoothing 0.005565558204542051 +431 15 model.embedding_dim 0.0 +431 15 optimizer.lr 0.02263285799961241 +431 15 training.batch_size 2.0 +431 15 training.label_smoothing 0.007735892849403939 +431 16 model.embedding_dim 2.0 +431 16 optimizer.lr 0.007173526483421645 +431 16 training.batch_size 0.0 +431 16 training.label_smoothing 0.022958453609304992 +431 17 model.embedding_dim 2.0 +431 17 optimizer.lr 0.030486515026464746 +431 17 training.batch_size 2.0 +431 17 training.label_smoothing 0.21857734398773593 +431 18 model.embedding_dim 1.0 +431 18 optimizer.lr 0.00406501635490609 +431 18 training.batch_size 1.0 +431 18 training.label_smoothing 0.2230739616551139 +431 19 model.embedding_dim 2.0 +431 19 optimizer.lr 0.03389574307363918 +431 19 training.batch_size 0.0 +431 19 training.label_smoothing 0.45770653328260014 +431 20 model.embedding_dim 2.0 +431 20 optimizer.lr 0.01150340631917601 +431 20 training.batch_size 1.0 +431 20 training.label_smoothing 0.0016555437713421483 +431 21 model.embedding_dim 0.0 +431 21 optimizer.lr 0.02666054436707663 +431 21 training.batch_size 2.0 +431 21 training.label_smoothing 0.01562604379162155 +431 22 model.embedding_dim 1.0 +431 22 optimizer.lr 0.002252385146086689 +431 22 training.batch_size 2.0 +431 22 training.label_smoothing 0.32521019282666513 +431 23 model.embedding_dim 0.0 +431 23 optimizer.lr 0.0016691452241218252 +431 23 training.batch_size 0.0 +431 23 training.label_smoothing 0.25512633529551454 +431 24 model.embedding_dim 2.0 +431 24 optimizer.lr 0.0075484384394096165 +431 24 training.batch_size 1.0 +431 24 training.label_smoothing 0.4727581578552918 +431 25 model.embedding_dim 1.0 +431 25 optimizer.lr 0.023471116036465362 +431 25 training.batch_size 2.0 +431 25 training.label_smoothing 0.8712242987213328 +431 26 model.embedding_dim 2.0 +431 26 optimizer.lr 0.08655167953056224 +431 26 training.batch_size 0.0 +431 26 training.label_smoothing 0.17543231476230464 +431 27 model.embedding_dim 2.0 +431 27 optimizer.lr 0.0025602731988033003 +431 27 training.batch_size 1.0 +431 27 training.label_smoothing 0.033576914227563315 +431 28 model.embedding_dim 0.0 +431 28 optimizer.lr 0.0029389745390067347 +431 28 training.batch_size 1.0 +431 28 training.label_smoothing 0.07275756542425144 +431 29 model.embedding_dim 2.0 +431 29 optimizer.lr 0.011603659565953894 +431 29 training.batch_size 2.0 +431 29 training.label_smoothing 0.22333897047687942 +431 30 model.embedding_dim 1.0 +431 30 optimizer.lr 0.024887134246279623 +431 30 training.batch_size 1.0 +431 30 training.label_smoothing 0.0031625376703462915 +431 31 model.embedding_dim 2.0 +431 31 optimizer.lr 0.03396603746532316 +431 31 training.batch_size 1.0 +431 31 training.label_smoothing 0.0038490198500331536 +431 32 model.embedding_dim 2.0 +431 32 optimizer.lr 0.0696425076998091 +431 32 training.batch_size 1.0 +431 32 training.label_smoothing 0.17760027486672664 +431 33 model.embedding_dim 2.0 +431 33 optimizer.lr 0.0014005512420283773 +431 33 training.batch_size 0.0 +431 33 training.label_smoothing 0.16083872125842658 +431 34 model.embedding_dim 0.0 +431 34 optimizer.lr 0.006653025839200649 +431 34 training.batch_size 1.0 +431 34 training.label_smoothing 0.0019279718465961934 +431 35 model.embedding_dim 0.0 +431 35 optimizer.lr 0.018203914509420875 +431 35 training.batch_size 1.0 +431 35 training.label_smoothing 0.3727603326702654 +431 36 model.embedding_dim 0.0 +431 36 optimizer.lr 0.04974377152731718 +431 36 training.batch_size 2.0 +431 36 training.label_smoothing 0.0056616038519652825 +431 37 model.embedding_dim 1.0 +431 37 optimizer.lr 0.03532928677805638 +431 37 training.batch_size 1.0 +431 37 training.label_smoothing 0.40637783224744667 +431 38 model.embedding_dim 1.0 +431 38 optimizer.lr 0.06365022203103153 +431 38 training.batch_size 0.0 +431 38 training.label_smoothing 0.0365770508784673 +431 39 model.embedding_dim 1.0 +431 39 optimizer.lr 0.002960047450495807 +431 39 training.batch_size 1.0 +431 39 training.label_smoothing 0.8738986423296968 +431 40 model.embedding_dim 1.0 +431 40 optimizer.lr 0.00354536421819602 +431 40 training.batch_size 2.0 +431 40 training.label_smoothing 0.5598696586158192 +431 41 model.embedding_dim 1.0 +431 41 optimizer.lr 0.005305804306784561 +431 41 training.batch_size 1.0 +431 41 training.label_smoothing 0.25176111666542844 +431 42 model.embedding_dim 2.0 +431 42 optimizer.lr 0.0034803655206289 +431 42 training.batch_size 2.0 +431 42 training.label_smoothing 0.14059754420542778 +431 43 model.embedding_dim 0.0 +431 43 optimizer.lr 0.0015086037040840051 +431 43 training.batch_size 0.0 +431 43 training.label_smoothing 0.001152361918849754 +431 44 model.embedding_dim 2.0 +431 44 optimizer.lr 0.0748026898386242 +431 44 training.batch_size 1.0 +431 44 training.label_smoothing 0.6074544956413319 +431 45 model.embedding_dim 1.0 +431 45 optimizer.lr 0.03286140871071825 +431 45 training.batch_size 1.0 +431 45 training.label_smoothing 0.02860219365213571 +431 46 model.embedding_dim 1.0 +431 46 optimizer.lr 0.0040690086177013205 +431 46 training.batch_size 1.0 +431 46 training.label_smoothing 0.02743468482517453 +431 47 model.embedding_dim 1.0 +431 47 optimizer.lr 0.0011786802994233547 +431 47 training.batch_size 2.0 +431 47 training.label_smoothing 0.009035916752038027 +431 48 model.embedding_dim 0.0 +431 48 optimizer.lr 0.009030605569846495 +431 48 training.batch_size 1.0 +431 48 training.label_smoothing 0.06865319759833027 +431 49 model.embedding_dim 0.0 +431 49 optimizer.lr 0.0015371997478379514 +431 49 training.batch_size 2.0 +431 49 training.label_smoothing 0.28183534789199616 +431 50 model.embedding_dim 2.0 +431 50 optimizer.lr 0.004424787406176658 +431 50 training.batch_size 2.0 +431 50 training.label_smoothing 0.007517940840689644 +431 51 model.embedding_dim 0.0 +431 51 optimizer.lr 0.002336651927075705 +431 51 training.batch_size 1.0 +431 51 training.label_smoothing 0.0874404074892693 +431 52 model.embedding_dim 2.0 +431 52 optimizer.lr 0.0041778183274893445 +431 52 training.batch_size 2.0 +431 52 training.label_smoothing 0.001935125354085779 +431 53 model.embedding_dim 0.0 +431 53 optimizer.lr 0.005778688603783668 +431 53 training.batch_size 1.0 +431 53 training.label_smoothing 0.0010826461869798746 +431 54 model.embedding_dim 2.0 +431 54 optimizer.lr 0.008015089557159162 +431 54 training.batch_size 0.0 +431 54 training.label_smoothing 0.2536169335217774 +431 55 model.embedding_dim 2.0 +431 55 optimizer.lr 0.008243892352685942 +431 55 training.batch_size 0.0 +431 55 training.label_smoothing 0.023801463532133284 +431 56 model.embedding_dim 2.0 +431 56 optimizer.lr 0.0015358881433298226 +431 56 training.batch_size 0.0 +431 56 training.label_smoothing 0.018059327738349503 +431 57 model.embedding_dim 0.0 +431 57 optimizer.lr 0.004748722181404328 +431 57 training.batch_size 2.0 +431 57 training.label_smoothing 0.005297866935010708 +431 58 model.embedding_dim 1.0 +431 58 optimizer.lr 0.0016363120852912545 +431 58 training.batch_size 0.0 +431 58 training.label_smoothing 0.14054511243421527 +431 59 model.embedding_dim 1.0 +431 59 optimizer.lr 0.0882588279064779 +431 59 training.batch_size 0.0 +431 59 training.label_smoothing 0.14943213494204796 +431 60 model.embedding_dim 1.0 +431 60 optimizer.lr 0.003302676689007412 +431 60 training.batch_size 2.0 +431 60 training.label_smoothing 0.3163589059248548 +431 61 model.embedding_dim 1.0 +431 61 optimizer.lr 0.03832724607869451 +431 61 training.batch_size 0.0 +431 61 training.label_smoothing 0.5160026938840162 +431 62 model.embedding_dim 1.0 +431 62 optimizer.lr 0.011100367923019572 +431 62 training.batch_size 0.0 +431 62 training.label_smoothing 0.00941976795512723 +431 63 model.embedding_dim 0.0 +431 63 optimizer.lr 0.001869845879011473 +431 63 training.batch_size 1.0 +431 63 training.label_smoothing 0.04732959437304069 +431 64 model.embedding_dim 1.0 +431 64 optimizer.lr 0.08612056348914444 +431 64 training.batch_size 2.0 +431 64 training.label_smoothing 0.5106430794567274 +431 65 model.embedding_dim 1.0 +431 65 optimizer.lr 0.006158583199358131 +431 65 training.batch_size 0.0 +431 65 training.label_smoothing 0.1998181672896751 +431 66 model.embedding_dim 2.0 +431 66 optimizer.lr 0.028504589649167952 +431 66 training.batch_size 1.0 +431 66 training.label_smoothing 0.5547869826942706 +431 67 model.embedding_dim 2.0 +431 67 optimizer.lr 0.0032927690224801302 +431 67 training.batch_size 2.0 +431 67 training.label_smoothing 0.038018219392034064 +431 68 model.embedding_dim 0.0 +431 68 optimizer.lr 0.03680468035946826 +431 68 training.batch_size 2.0 +431 68 training.label_smoothing 0.9659712955852638 +431 69 model.embedding_dim 2.0 +431 69 optimizer.lr 0.010066238822432074 +431 69 training.batch_size 1.0 +431 69 training.label_smoothing 0.18876188517628636 +431 70 model.embedding_dim 0.0 +431 70 optimizer.lr 0.001806649908351338 +431 70 training.batch_size 0.0 +431 70 training.label_smoothing 0.038886717957063034 +431 71 model.embedding_dim 0.0 +431 71 optimizer.lr 0.003507811709590923 +431 71 training.batch_size 2.0 +431 71 training.label_smoothing 0.11165926460279912 +431 72 model.embedding_dim 2.0 +431 72 optimizer.lr 0.0033426651608792855 +431 72 training.batch_size 2.0 +431 72 training.label_smoothing 0.11853648436476849 +431 73 model.embedding_dim 1.0 +431 73 optimizer.lr 0.0023447512550664315 +431 73 training.batch_size 2.0 +431 73 training.label_smoothing 0.17420172389825628 +431 74 model.embedding_dim 0.0 +431 74 optimizer.lr 0.0039007268166510282 +431 74 training.batch_size 0.0 +431 74 training.label_smoothing 0.11610858023425039 +431 75 model.embedding_dim 0.0 +431 75 optimizer.lr 0.0018636631163757552 +431 75 training.batch_size 0.0 +431 75 training.label_smoothing 0.05140795041461517 +431 76 model.embedding_dim 0.0 +431 76 optimizer.lr 0.00897154592887163 +431 76 training.batch_size 2.0 +431 76 training.label_smoothing 0.24117017508919344 +431 77 model.embedding_dim 1.0 +431 77 optimizer.lr 0.007933855997242339 +431 77 training.batch_size 2.0 +431 77 training.label_smoothing 0.0554587966759811 +431 78 model.embedding_dim 1.0 +431 78 optimizer.lr 0.00148676499381223 +431 78 training.batch_size 2.0 +431 78 training.label_smoothing 0.0018372404142956943 +431 79 model.embedding_dim 2.0 +431 79 optimizer.lr 0.0057444264837941544 +431 79 training.batch_size 0.0 +431 79 training.label_smoothing 0.08079317027552614 +431 80 model.embedding_dim 0.0 +431 80 optimizer.lr 0.010655724071811748 +431 80 training.batch_size 0.0 +431 80 training.label_smoothing 0.031331755023968566 +431 81 model.embedding_dim 2.0 +431 81 optimizer.lr 0.003926149093017215 +431 81 training.batch_size 2.0 +431 81 training.label_smoothing 0.002702873515865766 +431 82 model.embedding_dim 1.0 +431 82 optimizer.lr 0.002419427005499502 +431 82 training.batch_size 2.0 +431 82 training.label_smoothing 0.010685224244665835 +431 83 model.embedding_dim 2.0 +431 83 optimizer.lr 0.08919352370179826 +431 83 training.batch_size 1.0 +431 83 training.label_smoothing 0.10144440369804228 +431 84 model.embedding_dim 2.0 +431 84 optimizer.lr 0.08175864275771756 +431 84 training.batch_size 0.0 +431 84 training.label_smoothing 0.3872515307778458 +431 85 model.embedding_dim 0.0 +431 85 optimizer.lr 0.026779515690570424 +431 85 training.batch_size 1.0 +431 85 training.label_smoothing 0.0016470359921546814 +431 86 model.embedding_dim 0.0 +431 86 optimizer.lr 0.0073463768644435705 +431 86 training.batch_size 1.0 +431 86 training.label_smoothing 0.6428908598200817 +431 87 model.embedding_dim 1.0 +431 87 optimizer.lr 0.001405537802943267 +431 87 training.batch_size 1.0 +431 87 training.label_smoothing 0.0401940671137907 +431 88 model.embedding_dim 1.0 +431 88 optimizer.lr 0.06087149401694482 +431 88 training.batch_size 0.0 +431 88 training.label_smoothing 0.02928219170119212 +431 89 model.embedding_dim 2.0 +431 89 optimizer.lr 0.001726964793463379 +431 89 training.batch_size 2.0 +431 89 training.label_smoothing 0.06978088369840939 +431 90 model.embedding_dim 0.0 +431 90 optimizer.lr 0.014726838637155319 +431 90 training.batch_size 2.0 +431 90 training.label_smoothing 0.04754526094308101 +431 91 model.embedding_dim 1.0 +431 91 optimizer.lr 0.0019306318636755897 +431 91 training.batch_size 2.0 +431 91 training.label_smoothing 0.002311035917812086 +431 92 model.embedding_dim 1.0 +431 92 optimizer.lr 0.004092073736597139 +431 92 training.batch_size 2.0 +431 92 training.label_smoothing 0.015887517432437215 +431 93 model.embedding_dim 2.0 +431 93 optimizer.lr 0.02162297012440873 +431 93 training.batch_size 0.0 +431 93 training.label_smoothing 0.003749547771950458 +431 94 model.embedding_dim 2.0 +431 94 optimizer.lr 0.0538585479185288 +431 94 training.batch_size 0.0 +431 94 training.label_smoothing 0.20108116415260596 +431 95 model.embedding_dim 2.0 +431 95 optimizer.lr 0.002411696315656068 +431 95 training.batch_size 0.0 +431 95 training.label_smoothing 0.0104886253526038 +431 96 model.embedding_dim 1.0 +431 96 optimizer.lr 0.0026678881732446666 +431 96 training.batch_size 0.0 +431 96 training.label_smoothing 0.014098898255476722 +431 97 model.embedding_dim 2.0 +431 97 optimizer.lr 0.0012789699485097556 +431 97 training.batch_size 2.0 +431 97 training.label_smoothing 0.0021789903035723114 +431 98 model.embedding_dim 1.0 +431 98 optimizer.lr 0.005737169012573719 +431 98 training.batch_size 0.0 +431 98 training.label_smoothing 0.01565851786074967 +431 99 model.embedding_dim 2.0 +431 99 optimizer.lr 0.01837321372909664 +431 99 training.batch_size 1.0 +431 99 training.label_smoothing 0.07062720971640155 +431 100 model.embedding_dim 0.0 +431 100 optimizer.lr 0.007000307950246306 +431 100 training.batch_size 2.0 +431 100 training.label_smoothing 0.006611497949137665 +431 1 dataset """kinships""" +431 1 model """ntn""" +431 1 loss """bceaftersigmoid""" +431 1 regularizer """no""" +431 1 optimizer """adam""" +431 1 training_loop """lcwa""" +431 1 evaluator """rankbased""" +431 2 dataset """kinships""" +431 2 model """ntn""" +431 2 loss """bceaftersigmoid""" +431 2 regularizer """no""" +431 2 optimizer """adam""" +431 2 training_loop """lcwa""" +431 2 evaluator """rankbased""" +431 3 dataset """kinships""" +431 3 model """ntn""" +431 3 loss """bceaftersigmoid""" +431 3 regularizer """no""" +431 3 optimizer """adam""" +431 3 training_loop """lcwa""" +431 3 evaluator """rankbased""" +431 4 dataset """kinships""" +431 4 model """ntn""" +431 4 loss """bceaftersigmoid""" +431 4 regularizer """no""" +431 4 optimizer """adam""" +431 4 training_loop """lcwa""" +431 4 evaluator """rankbased""" +431 5 dataset """kinships""" +431 5 model """ntn""" +431 5 loss """bceaftersigmoid""" +431 5 regularizer """no""" +431 5 optimizer """adam""" +431 5 training_loop """lcwa""" +431 5 evaluator """rankbased""" +431 6 dataset """kinships""" +431 6 model """ntn""" +431 6 loss """bceaftersigmoid""" +431 6 regularizer """no""" +431 6 optimizer """adam""" +431 6 training_loop """lcwa""" +431 6 evaluator """rankbased""" +431 7 dataset """kinships""" +431 7 model """ntn""" +431 7 loss """bceaftersigmoid""" +431 7 regularizer """no""" +431 7 optimizer """adam""" +431 7 training_loop """lcwa""" +431 7 evaluator """rankbased""" +431 8 dataset """kinships""" +431 8 model """ntn""" +431 8 loss """bceaftersigmoid""" +431 8 regularizer """no""" +431 8 optimizer """adam""" +431 8 training_loop """lcwa""" +431 8 evaluator """rankbased""" +431 9 dataset """kinships""" +431 9 model """ntn""" +431 9 loss """bceaftersigmoid""" +431 9 regularizer """no""" +431 9 optimizer """adam""" +431 9 training_loop """lcwa""" +431 9 evaluator """rankbased""" +431 10 dataset """kinships""" +431 10 model """ntn""" +431 10 loss """bceaftersigmoid""" +431 10 regularizer """no""" +431 10 optimizer """adam""" +431 10 training_loop """lcwa""" +431 10 evaluator """rankbased""" +431 11 dataset """kinships""" +431 11 model """ntn""" +431 11 loss """bceaftersigmoid""" +431 11 regularizer """no""" +431 11 optimizer """adam""" +431 11 training_loop """lcwa""" +431 11 evaluator """rankbased""" +431 12 dataset """kinships""" +431 12 model """ntn""" +431 12 loss """bceaftersigmoid""" +431 12 regularizer """no""" +431 12 optimizer """adam""" +431 12 training_loop """lcwa""" +431 12 evaluator """rankbased""" +431 13 dataset """kinships""" +431 13 model """ntn""" +431 13 loss """bceaftersigmoid""" +431 13 regularizer """no""" +431 13 optimizer """adam""" +431 13 training_loop """lcwa""" +431 13 evaluator """rankbased""" +431 14 dataset """kinships""" +431 14 model """ntn""" +431 14 loss """bceaftersigmoid""" +431 14 regularizer """no""" +431 14 optimizer """adam""" +431 14 training_loop """lcwa""" +431 14 evaluator """rankbased""" +431 15 dataset """kinships""" +431 15 model """ntn""" +431 15 loss """bceaftersigmoid""" +431 15 regularizer """no""" +431 15 optimizer """adam""" +431 15 training_loop """lcwa""" +431 15 evaluator """rankbased""" +431 16 dataset """kinships""" +431 16 model """ntn""" +431 16 loss """bceaftersigmoid""" +431 16 regularizer """no""" +431 16 optimizer """adam""" +431 16 training_loop """lcwa""" +431 16 evaluator """rankbased""" +431 17 dataset """kinships""" +431 17 model """ntn""" +431 17 loss """bceaftersigmoid""" +431 17 regularizer """no""" +431 17 optimizer """adam""" +431 17 training_loop """lcwa""" +431 17 evaluator """rankbased""" +431 18 dataset """kinships""" +431 18 model """ntn""" +431 18 loss """bceaftersigmoid""" +431 18 regularizer """no""" +431 18 optimizer """adam""" +431 18 training_loop """lcwa""" +431 18 evaluator """rankbased""" +431 19 dataset """kinships""" +431 19 model """ntn""" +431 19 loss """bceaftersigmoid""" +431 19 regularizer """no""" +431 19 optimizer """adam""" +431 19 training_loop """lcwa""" +431 19 evaluator """rankbased""" +431 20 dataset """kinships""" +431 20 model """ntn""" +431 20 loss """bceaftersigmoid""" +431 20 regularizer """no""" +431 20 optimizer """adam""" +431 20 training_loop """lcwa""" +431 20 evaluator """rankbased""" +431 21 dataset """kinships""" +431 21 model """ntn""" +431 21 loss """bceaftersigmoid""" +431 21 regularizer """no""" +431 21 optimizer """adam""" +431 21 training_loop """lcwa""" +431 21 evaluator """rankbased""" +431 22 dataset """kinships""" +431 22 model """ntn""" +431 22 loss """bceaftersigmoid""" +431 22 regularizer """no""" +431 22 optimizer """adam""" +431 22 training_loop """lcwa""" +431 22 evaluator """rankbased""" +431 23 dataset """kinships""" +431 23 model """ntn""" +431 23 loss """bceaftersigmoid""" +431 23 regularizer """no""" +431 23 optimizer """adam""" +431 23 training_loop """lcwa""" +431 23 evaluator """rankbased""" +431 24 dataset """kinships""" +431 24 model """ntn""" +431 24 loss """bceaftersigmoid""" +431 24 regularizer """no""" +431 24 optimizer """adam""" +431 24 training_loop """lcwa""" +431 24 evaluator """rankbased""" +431 25 dataset """kinships""" +431 25 model """ntn""" +431 25 loss """bceaftersigmoid""" +431 25 regularizer """no""" +431 25 optimizer """adam""" +431 25 training_loop """lcwa""" +431 25 evaluator """rankbased""" +431 26 dataset """kinships""" +431 26 model """ntn""" +431 26 loss """bceaftersigmoid""" +431 26 regularizer """no""" +431 26 optimizer """adam""" +431 26 training_loop """lcwa""" +431 26 evaluator """rankbased""" +431 27 dataset """kinships""" +431 27 model """ntn""" +431 27 loss """bceaftersigmoid""" +431 27 regularizer """no""" +431 27 optimizer """adam""" +431 27 training_loop """lcwa""" +431 27 evaluator """rankbased""" +431 28 dataset """kinships""" +431 28 model """ntn""" +431 28 loss """bceaftersigmoid""" +431 28 regularizer """no""" +431 28 optimizer """adam""" +431 28 training_loop """lcwa""" +431 28 evaluator """rankbased""" +431 29 dataset """kinships""" +431 29 model """ntn""" +431 29 loss """bceaftersigmoid""" +431 29 regularizer """no""" +431 29 optimizer """adam""" +431 29 training_loop """lcwa""" +431 29 evaluator """rankbased""" +431 30 dataset """kinships""" +431 30 model """ntn""" +431 30 loss """bceaftersigmoid""" +431 30 regularizer """no""" +431 30 optimizer """adam""" +431 30 training_loop """lcwa""" +431 30 evaluator """rankbased""" +431 31 dataset """kinships""" +431 31 model """ntn""" +431 31 loss """bceaftersigmoid""" +431 31 regularizer """no""" +431 31 optimizer """adam""" +431 31 training_loop """lcwa""" +431 31 evaluator """rankbased""" +431 32 dataset """kinships""" +431 32 model """ntn""" +431 32 loss """bceaftersigmoid""" +431 32 regularizer """no""" +431 32 optimizer """adam""" +431 32 training_loop """lcwa""" +431 32 evaluator """rankbased""" +431 33 dataset """kinships""" +431 33 model """ntn""" +431 33 loss """bceaftersigmoid""" +431 33 regularizer """no""" +431 33 optimizer """adam""" +431 33 training_loop """lcwa""" +431 33 evaluator """rankbased""" +431 34 dataset """kinships""" +431 34 model """ntn""" +431 34 loss """bceaftersigmoid""" +431 34 regularizer """no""" +431 34 optimizer """adam""" +431 34 training_loop """lcwa""" +431 34 evaluator """rankbased""" +431 35 dataset """kinships""" +431 35 model """ntn""" +431 35 loss """bceaftersigmoid""" +431 35 regularizer """no""" +431 35 optimizer """adam""" +431 35 training_loop """lcwa""" +431 35 evaluator """rankbased""" +431 36 dataset """kinships""" +431 36 model """ntn""" +431 36 loss """bceaftersigmoid""" +431 36 regularizer """no""" +431 36 optimizer """adam""" +431 36 training_loop """lcwa""" +431 36 evaluator """rankbased""" +431 37 dataset """kinships""" +431 37 model """ntn""" +431 37 loss """bceaftersigmoid""" +431 37 regularizer """no""" +431 37 optimizer """adam""" +431 37 training_loop """lcwa""" +431 37 evaluator """rankbased""" +431 38 dataset """kinships""" +431 38 model """ntn""" +431 38 loss """bceaftersigmoid""" +431 38 regularizer """no""" +431 38 optimizer """adam""" +431 38 training_loop """lcwa""" +431 38 evaluator """rankbased""" +431 39 dataset """kinships""" +431 39 model """ntn""" +431 39 loss """bceaftersigmoid""" +431 39 regularizer """no""" +431 39 optimizer """adam""" +431 39 training_loop """lcwa""" +431 39 evaluator """rankbased""" +431 40 dataset """kinships""" +431 40 model """ntn""" +431 40 loss """bceaftersigmoid""" +431 40 regularizer """no""" +431 40 optimizer """adam""" +431 40 training_loop """lcwa""" +431 40 evaluator """rankbased""" +431 41 dataset """kinships""" +431 41 model """ntn""" +431 41 loss """bceaftersigmoid""" +431 41 regularizer """no""" +431 41 optimizer """adam""" +431 41 training_loop """lcwa""" +431 41 evaluator """rankbased""" +431 42 dataset """kinships""" +431 42 model """ntn""" +431 42 loss """bceaftersigmoid""" +431 42 regularizer """no""" +431 42 optimizer """adam""" +431 42 training_loop """lcwa""" +431 42 evaluator """rankbased""" +431 43 dataset """kinships""" +431 43 model """ntn""" +431 43 loss """bceaftersigmoid""" +431 43 regularizer """no""" +431 43 optimizer """adam""" +431 43 training_loop """lcwa""" +431 43 evaluator """rankbased""" +431 44 dataset """kinships""" +431 44 model """ntn""" +431 44 loss """bceaftersigmoid""" +431 44 regularizer """no""" +431 44 optimizer """adam""" +431 44 training_loop """lcwa""" +431 44 evaluator """rankbased""" +431 45 dataset """kinships""" +431 45 model """ntn""" +431 45 loss """bceaftersigmoid""" +431 45 regularizer """no""" +431 45 optimizer """adam""" +431 45 training_loop """lcwa""" +431 45 evaluator """rankbased""" +431 46 dataset """kinships""" +431 46 model """ntn""" +431 46 loss """bceaftersigmoid""" +431 46 regularizer """no""" +431 46 optimizer """adam""" +431 46 training_loop """lcwa""" +431 46 evaluator """rankbased""" +431 47 dataset """kinships""" +431 47 model """ntn""" +431 47 loss """bceaftersigmoid""" +431 47 regularizer """no""" +431 47 optimizer """adam""" +431 47 training_loop """lcwa""" +431 47 evaluator """rankbased""" +431 48 dataset """kinships""" +431 48 model """ntn""" +431 48 loss """bceaftersigmoid""" +431 48 regularizer """no""" +431 48 optimizer """adam""" +431 48 training_loop """lcwa""" +431 48 evaluator """rankbased""" +431 49 dataset """kinships""" +431 49 model """ntn""" +431 49 loss """bceaftersigmoid""" +431 49 regularizer """no""" +431 49 optimizer """adam""" +431 49 training_loop """lcwa""" +431 49 evaluator """rankbased""" +431 50 dataset """kinships""" +431 50 model """ntn""" +431 50 loss """bceaftersigmoid""" +431 50 regularizer """no""" +431 50 optimizer """adam""" +431 50 training_loop """lcwa""" +431 50 evaluator """rankbased""" +431 51 dataset """kinships""" +431 51 model """ntn""" +431 51 loss """bceaftersigmoid""" +431 51 regularizer """no""" +431 51 optimizer """adam""" +431 51 training_loop """lcwa""" +431 51 evaluator """rankbased""" +431 52 dataset """kinships""" +431 52 model """ntn""" +431 52 loss """bceaftersigmoid""" +431 52 regularizer """no""" +431 52 optimizer """adam""" +431 52 training_loop """lcwa""" +431 52 evaluator """rankbased""" +431 53 dataset """kinships""" +431 53 model """ntn""" +431 53 loss """bceaftersigmoid""" +431 53 regularizer """no""" +431 53 optimizer """adam""" +431 53 training_loop """lcwa""" +431 53 evaluator """rankbased""" +431 54 dataset """kinships""" +431 54 model """ntn""" +431 54 loss """bceaftersigmoid""" +431 54 regularizer """no""" +431 54 optimizer """adam""" +431 54 training_loop """lcwa""" +431 54 evaluator """rankbased""" +431 55 dataset """kinships""" +431 55 model """ntn""" +431 55 loss """bceaftersigmoid""" +431 55 regularizer """no""" +431 55 optimizer """adam""" +431 55 training_loop """lcwa""" +431 55 evaluator """rankbased""" +431 56 dataset """kinships""" +431 56 model """ntn""" +431 56 loss """bceaftersigmoid""" +431 56 regularizer """no""" +431 56 optimizer """adam""" +431 56 training_loop """lcwa""" +431 56 evaluator """rankbased""" +431 57 dataset """kinships""" +431 57 model """ntn""" +431 57 loss """bceaftersigmoid""" +431 57 regularizer """no""" +431 57 optimizer """adam""" +431 57 training_loop """lcwa""" +431 57 evaluator """rankbased""" +431 58 dataset """kinships""" +431 58 model """ntn""" +431 58 loss """bceaftersigmoid""" +431 58 regularizer """no""" +431 58 optimizer """adam""" +431 58 training_loop """lcwa""" +431 58 evaluator """rankbased""" +431 59 dataset """kinships""" +431 59 model """ntn""" +431 59 loss """bceaftersigmoid""" +431 59 regularizer """no""" +431 59 optimizer """adam""" +431 59 training_loop """lcwa""" +431 59 evaluator """rankbased""" +431 60 dataset """kinships""" +431 60 model """ntn""" +431 60 loss """bceaftersigmoid""" +431 60 regularizer """no""" +431 60 optimizer """adam""" +431 60 training_loop """lcwa""" +431 60 evaluator """rankbased""" +431 61 dataset """kinships""" +431 61 model """ntn""" +431 61 loss """bceaftersigmoid""" +431 61 regularizer """no""" +431 61 optimizer """adam""" +431 61 training_loop """lcwa""" +431 61 evaluator """rankbased""" +431 62 dataset """kinships""" +431 62 model """ntn""" +431 62 loss """bceaftersigmoid""" +431 62 regularizer """no""" +431 62 optimizer """adam""" +431 62 training_loop """lcwa""" +431 62 evaluator """rankbased""" +431 63 dataset """kinships""" +431 63 model """ntn""" +431 63 loss """bceaftersigmoid""" +431 63 regularizer """no""" +431 63 optimizer """adam""" +431 63 training_loop """lcwa""" +431 63 evaluator """rankbased""" +431 64 dataset """kinships""" +431 64 model """ntn""" +431 64 loss """bceaftersigmoid""" +431 64 regularizer """no""" +431 64 optimizer """adam""" +431 64 training_loop """lcwa""" +431 64 evaluator """rankbased""" +431 65 dataset """kinships""" +431 65 model """ntn""" +431 65 loss """bceaftersigmoid""" +431 65 regularizer """no""" +431 65 optimizer """adam""" +431 65 training_loop """lcwa""" +431 65 evaluator """rankbased""" +431 66 dataset """kinships""" +431 66 model """ntn""" +431 66 loss """bceaftersigmoid""" +431 66 regularizer """no""" +431 66 optimizer """adam""" +431 66 training_loop """lcwa""" +431 66 evaluator """rankbased""" +431 67 dataset """kinships""" +431 67 model """ntn""" +431 67 loss """bceaftersigmoid""" +431 67 regularizer """no""" +431 67 optimizer """adam""" +431 67 training_loop """lcwa""" +431 67 evaluator """rankbased""" +431 68 dataset """kinships""" +431 68 model """ntn""" +431 68 loss """bceaftersigmoid""" +431 68 regularizer """no""" +431 68 optimizer """adam""" +431 68 training_loop """lcwa""" +431 68 evaluator """rankbased""" +431 69 dataset """kinships""" +431 69 model """ntn""" +431 69 loss """bceaftersigmoid""" +431 69 regularizer """no""" +431 69 optimizer """adam""" +431 69 training_loop """lcwa""" +431 69 evaluator """rankbased""" +431 70 dataset """kinships""" +431 70 model """ntn""" +431 70 loss """bceaftersigmoid""" +431 70 regularizer """no""" +431 70 optimizer """adam""" +431 70 training_loop """lcwa""" +431 70 evaluator """rankbased""" +431 71 dataset """kinships""" +431 71 model """ntn""" +431 71 loss """bceaftersigmoid""" +431 71 regularizer """no""" +431 71 optimizer """adam""" +431 71 training_loop """lcwa""" +431 71 evaluator """rankbased""" +431 72 dataset """kinships""" +431 72 model """ntn""" +431 72 loss """bceaftersigmoid""" +431 72 regularizer """no""" +431 72 optimizer """adam""" +431 72 training_loop """lcwa""" +431 72 evaluator """rankbased""" +431 73 dataset """kinships""" +431 73 model """ntn""" +431 73 loss """bceaftersigmoid""" +431 73 regularizer """no""" +431 73 optimizer """adam""" +431 73 training_loop """lcwa""" +431 73 evaluator """rankbased""" +431 74 dataset """kinships""" +431 74 model """ntn""" +431 74 loss """bceaftersigmoid""" +431 74 regularizer """no""" +431 74 optimizer """adam""" +431 74 training_loop """lcwa""" +431 74 evaluator """rankbased""" +431 75 dataset """kinships""" +431 75 model """ntn""" +431 75 loss """bceaftersigmoid""" +431 75 regularizer """no""" +431 75 optimizer """adam""" +431 75 training_loop """lcwa""" +431 75 evaluator """rankbased""" +431 76 dataset """kinships""" +431 76 model """ntn""" +431 76 loss """bceaftersigmoid""" +431 76 regularizer """no""" +431 76 optimizer """adam""" +431 76 training_loop """lcwa""" +431 76 evaluator """rankbased""" +431 77 dataset """kinships""" +431 77 model """ntn""" +431 77 loss """bceaftersigmoid""" +431 77 regularizer """no""" +431 77 optimizer """adam""" +431 77 training_loop """lcwa""" +431 77 evaluator """rankbased""" +431 78 dataset """kinships""" +431 78 model """ntn""" +431 78 loss """bceaftersigmoid""" +431 78 regularizer """no""" +431 78 optimizer """adam""" +431 78 training_loop """lcwa""" +431 78 evaluator """rankbased""" +431 79 dataset """kinships""" +431 79 model """ntn""" +431 79 loss """bceaftersigmoid""" +431 79 regularizer """no""" +431 79 optimizer """adam""" +431 79 training_loop """lcwa""" +431 79 evaluator """rankbased""" +431 80 dataset """kinships""" +431 80 model """ntn""" +431 80 loss """bceaftersigmoid""" +431 80 regularizer """no""" +431 80 optimizer """adam""" +431 80 training_loop """lcwa""" +431 80 evaluator """rankbased""" +431 81 dataset """kinships""" +431 81 model """ntn""" +431 81 loss """bceaftersigmoid""" +431 81 regularizer """no""" +431 81 optimizer """adam""" +431 81 training_loop """lcwa""" +431 81 evaluator """rankbased""" +431 82 dataset """kinships""" +431 82 model """ntn""" +431 82 loss """bceaftersigmoid""" +431 82 regularizer """no""" +431 82 optimizer """adam""" +431 82 training_loop """lcwa""" +431 82 evaluator """rankbased""" +431 83 dataset """kinships""" +431 83 model """ntn""" +431 83 loss """bceaftersigmoid""" +431 83 regularizer """no""" +431 83 optimizer """adam""" +431 83 training_loop """lcwa""" +431 83 evaluator """rankbased""" +431 84 dataset """kinships""" +431 84 model """ntn""" +431 84 loss """bceaftersigmoid""" +431 84 regularizer """no""" +431 84 optimizer """adam""" +431 84 training_loop """lcwa""" +431 84 evaluator """rankbased""" +431 85 dataset """kinships""" +431 85 model """ntn""" +431 85 loss """bceaftersigmoid""" +431 85 regularizer """no""" +431 85 optimizer """adam""" +431 85 training_loop """lcwa""" +431 85 evaluator """rankbased""" +431 86 dataset """kinships""" +431 86 model """ntn""" +431 86 loss """bceaftersigmoid""" +431 86 regularizer """no""" +431 86 optimizer """adam""" +431 86 training_loop """lcwa""" +431 86 evaluator """rankbased""" +431 87 dataset """kinships""" +431 87 model """ntn""" +431 87 loss """bceaftersigmoid""" +431 87 regularizer """no""" +431 87 optimizer """adam""" +431 87 training_loop """lcwa""" +431 87 evaluator """rankbased""" +431 88 dataset """kinships""" +431 88 model """ntn""" +431 88 loss """bceaftersigmoid""" +431 88 regularizer """no""" +431 88 optimizer """adam""" +431 88 training_loop """lcwa""" +431 88 evaluator """rankbased""" +431 89 dataset """kinships""" +431 89 model """ntn""" +431 89 loss """bceaftersigmoid""" +431 89 regularizer """no""" +431 89 optimizer """adam""" +431 89 training_loop """lcwa""" +431 89 evaluator """rankbased""" +431 90 dataset """kinships""" +431 90 model """ntn""" +431 90 loss """bceaftersigmoid""" +431 90 regularizer """no""" +431 90 optimizer """adam""" +431 90 training_loop """lcwa""" +431 90 evaluator """rankbased""" +431 91 dataset """kinships""" +431 91 model """ntn""" +431 91 loss """bceaftersigmoid""" +431 91 regularizer """no""" +431 91 optimizer """adam""" +431 91 training_loop """lcwa""" +431 91 evaluator """rankbased""" +431 92 dataset """kinships""" +431 92 model """ntn""" +431 92 loss """bceaftersigmoid""" +431 92 regularizer """no""" +431 92 optimizer """adam""" +431 92 training_loop """lcwa""" +431 92 evaluator """rankbased""" +431 93 dataset """kinships""" +431 93 model """ntn""" +431 93 loss """bceaftersigmoid""" +431 93 regularizer """no""" +431 93 optimizer """adam""" +431 93 training_loop """lcwa""" +431 93 evaluator """rankbased""" +431 94 dataset """kinships""" +431 94 model """ntn""" +431 94 loss """bceaftersigmoid""" +431 94 regularizer """no""" +431 94 optimizer """adam""" +431 94 training_loop """lcwa""" +431 94 evaluator """rankbased""" +431 95 dataset """kinships""" +431 95 model """ntn""" +431 95 loss """bceaftersigmoid""" +431 95 regularizer """no""" +431 95 optimizer """adam""" +431 95 training_loop """lcwa""" +431 95 evaluator """rankbased""" +431 96 dataset """kinships""" +431 96 model """ntn""" +431 96 loss """bceaftersigmoid""" +431 96 regularizer """no""" +431 96 optimizer """adam""" +431 96 training_loop """lcwa""" +431 96 evaluator """rankbased""" +431 97 dataset """kinships""" +431 97 model """ntn""" +431 97 loss """bceaftersigmoid""" +431 97 regularizer """no""" +431 97 optimizer """adam""" +431 97 training_loop """lcwa""" +431 97 evaluator """rankbased""" +431 98 dataset """kinships""" +431 98 model """ntn""" +431 98 loss """bceaftersigmoid""" +431 98 regularizer """no""" +431 98 optimizer """adam""" +431 98 training_loop """lcwa""" +431 98 evaluator """rankbased""" +431 99 dataset """kinships""" +431 99 model """ntn""" +431 99 loss """bceaftersigmoid""" +431 99 regularizer """no""" +431 99 optimizer """adam""" +431 99 training_loop """lcwa""" +431 99 evaluator """rankbased""" +431 100 dataset """kinships""" +431 100 model """ntn""" +431 100 loss """bceaftersigmoid""" +431 100 regularizer """no""" +431 100 optimizer """adam""" +431 100 training_loop """lcwa""" +431 100 evaluator """rankbased""" +432 1 model.embedding_dim 2.0 +432 1 optimizer.lr 0.013165982884810025 +432 1 training.batch_size 1.0 +432 1 training.label_smoothing 0.001486395073820071 +432 2 model.embedding_dim 0.0 +432 2 optimizer.lr 0.013485458520833295 +432 2 training.batch_size 2.0 +432 2 training.label_smoothing 0.008530265040739386 +432 3 model.embedding_dim 1.0 +432 3 optimizer.lr 0.022690490516564753 +432 3 training.batch_size 0.0 +432 3 training.label_smoothing 0.041345984907031 +432 4 model.embedding_dim 1.0 +432 4 optimizer.lr 0.01180708562804732 +432 4 training.batch_size 1.0 +432 4 training.label_smoothing 0.1141097709259789 +432 5 model.embedding_dim 0.0 +432 5 optimizer.lr 0.004138994366203933 +432 5 training.batch_size 2.0 +432 5 training.label_smoothing 0.00756109372841237 +432 6 model.embedding_dim 0.0 +432 6 optimizer.lr 0.0015708450504948299 +432 6 training.batch_size 0.0 +432 6 training.label_smoothing 0.0021725680224467868 +432 7 model.embedding_dim 2.0 +432 7 optimizer.lr 0.0014307387428188678 +432 7 training.batch_size 1.0 +432 7 training.label_smoothing 0.018635892052131148 +432 8 model.embedding_dim 1.0 +432 8 optimizer.lr 0.014070316405628568 +432 8 training.batch_size 1.0 +432 8 training.label_smoothing 0.7522726656631394 +432 9 model.embedding_dim 2.0 +432 9 optimizer.lr 0.017141467780018526 +432 9 training.batch_size 1.0 +432 9 training.label_smoothing 0.1345206351526865 +432 10 model.embedding_dim 0.0 +432 10 optimizer.lr 0.05368543816123071 +432 10 training.batch_size 2.0 +432 10 training.label_smoothing 0.002303465260907722 +432 11 model.embedding_dim 2.0 +432 11 optimizer.lr 0.014666719741710597 +432 11 training.batch_size 2.0 +432 11 training.label_smoothing 0.5155682074118758 +432 12 model.embedding_dim 1.0 +432 12 optimizer.lr 0.009853576206769276 +432 12 training.batch_size 2.0 +432 12 training.label_smoothing 0.0035790742955642083 +432 13 model.embedding_dim 0.0 +432 13 optimizer.lr 0.0014364209389616147 +432 13 training.batch_size 0.0 +432 13 training.label_smoothing 0.016368053005892794 +432 14 model.embedding_dim 2.0 +432 14 optimizer.lr 0.010348338787598148 +432 14 training.batch_size 2.0 +432 14 training.label_smoothing 0.30112663110975535 +432 15 model.embedding_dim 0.0 +432 15 optimizer.lr 0.0187054004246903 +432 15 training.batch_size 2.0 +432 15 training.label_smoothing 0.0033443512395400043 +432 16 model.embedding_dim 1.0 +432 16 optimizer.lr 0.005961709695257256 +432 16 training.batch_size 1.0 +432 16 training.label_smoothing 0.054961143428206585 +432 17 model.embedding_dim 1.0 +432 17 optimizer.lr 0.0017529499481838751 +432 17 training.batch_size 2.0 +432 17 training.label_smoothing 0.09408004621295049 +432 18 model.embedding_dim 1.0 +432 18 optimizer.lr 0.006319716584024674 +432 18 training.batch_size 1.0 +432 18 training.label_smoothing 0.018757875856978485 +432 19 model.embedding_dim 0.0 +432 19 optimizer.lr 0.025047120975494035 +432 19 training.batch_size 2.0 +432 19 training.label_smoothing 0.01256955133309165 +432 20 model.embedding_dim 1.0 +432 20 optimizer.lr 0.003719140021401932 +432 20 training.batch_size 1.0 +432 20 training.label_smoothing 0.0010302162313477272 +432 21 model.embedding_dim 2.0 +432 21 optimizer.lr 0.07023568681033657 +432 21 training.batch_size 2.0 +432 21 training.label_smoothing 0.0011317779312878856 +432 22 model.embedding_dim 1.0 +432 22 optimizer.lr 0.002304630449181573 +432 22 training.batch_size 0.0 +432 22 training.label_smoothing 0.010474524668379684 +432 23 model.embedding_dim 2.0 +432 23 optimizer.lr 0.028362636775545125 +432 23 training.batch_size 2.0 +432 23 training.label_smoothing 0.046676324326472125 +432 24 model.embedding_dim 1.0 +432 24 optimizer.lr 0.02557792973480774 +432 24 training.batch_size 2.0 +432 24 training.label_smoothing 0.03710955028411261 +432 25 model.embedding_dim 2.0 +432 25 optimizer.lr 0.0201553905088972 +432 25 training.batch_size 0.0 +432 25 training.label_smoothing 0.0034656009265936517 +432 26 model.embedding_dim 0.0 +432 26 optimizer.lr 0.002000126348667047 +432 26 training.batch_size 1.0 +432 26 training.label_smoothing 0.6536291621204431 +432 27 model.embedding_dim 2.0 +432 27 optimizer.lr 0.0011625043161696807 +432 27 training.batch_size 2.0 +432 27 training.label_smoothing 0.0026073107328224558 +432 28 model.embedding_dim 0.0 +432 28 optimizer.lr 0.007570813778097445 +432 28 training.batch_size 1.0 +432 28 training.label_smoothing 0.03458927634982635 +432 29 model.embedding_dim 0.0 +432 29 optimizer.lr 0.03265806437830852 +432 29 training.batch_size 1.0 +432 29 training.label_smoothing 0.975397574904956 +432 30 model.embedding_dim 1.0 +432 30 optimizer.lr 0.005622771455712449 +432 30 training.batch_size 2.0 +432 30 training.label_smoothing 0.04073730989129202 +432 31 model.embedding_dim 1.0 +432 31 optimizer.lr 0.0010761577474096829 +432 31 training.batch_size 0.0 +432 31 training.label_smoothing 0.021168614972260393 +432 32 model.embedding_dim 1.0 +432 32 optimizer.lr 0.04356588093079079 +432 32 training.batch_size 2.0 +432 32 training.label_smoothing 0.024696738345811462 +432 33 model.embedding_dim 2.0 +432 33 optimizer.lr 0.0034602748297064253 +432 33 training.batch_size 1.0 +432 33 training.label_smoothing 0.9732557799976114 +432 34 model.embedding_dim 2.0 +432 34 optimizer.lr 0.0019170141074271684 +432 34 training.batch_size 2.0 +432 34 training.label_smoothing 0.003289216208280497 +432 35 model.embedding_dim 2.0 +432 35 optimizer.lr 0.0303350182347795 +432 35 training.batch_size 2.0 +432 35 training.label_smoothing 0.9991814920358358 +432 36 model.embedding_dim 1.0 +432 36 optimizer.lr 0.008059129914302875 +432 36 training.batch_size 1.0 +432 36 training.label_smoothing 0.6009360079440229 +432 37 model.embedding_dim 1.0 +432 37 optimizer.lr 0.002008108062659119 +432 37 training.batch_size 0.0 +432 37 training.label_smoothing 0.004042167727581786 +432 38 model.embedding_dim 1.0 +432 38 optimizer.lr 0.00943529871542214 +432 38 training.batch_size 2.0 +432 38 training.label_smoothing 0.6801935331509337 +432 39 model.embedding_dim 2.0 +432 39 optimizer.lr 0.08156630610884152 +432 39 training.batch_size 0.0 +432 39 training.label_smoothing 0.003196126835294983 +432 40 model.embedding_dim 2.0 +432 40 optimizer.lr 0.04098490463786402 +432 40 training.batch_size 1.0 +432 40 training.label_smoothing 0.029456464787815557 +432 41 model.embedding_dim 1.0 +432 41 optimizer.lr 0.0012372658716682374 +432 41 training.batch_size 0.0 +432 41 training.label_smoothing 0.002441240354057441 +432 42 model.embedding_dim 2.0 +432 42 optimizer.lr 0.024615510927495095 +432 42 training.batch_size 2.0 +432 42 training.label_smoothing 0.031154835364297816 +432 43 model.embedding_dim 0.0 +432 43 optimizer.lr 0.019586143192919143 +432 43 training.batch_size 0.0 +432 43 training.label_smoothing 0.022050750749595296 +432 44 model.embedding_dim 0.0 +432 44 optimizer.lr 0.05567505750107538 +432 44 training.batch_size 2.0 +432 44 training.label_smoothing 0.32564785359793585 +432 45 model.embedding_dim 0.0 +432 45 optimizer.lr 0.0020009171319866803 +432 45 training.batch_size 1.0 +432 45 training.label_smoothing 0.005685387508145759 +432 46 model.embedding_dim 1.0 +432 46 optimizer.lr 0.007594979997275051 +432 46 training.batch_size 1.0 +432 46 training.label_smoothing 0.11937477151413575 +432 47 model.embedding_dim 2.0 +432 47 optimizer.lr 0.0034083266636876493 +432 47 training.batch_size 1.0 +432 47 training.label_smoothing 0.03136485910483615 +432 48 model.embedding_dim 1.0 +432 48 optimizer.lr 0.005910406716325768 +432 48 training.batch_size 1.0 +432 48 training.label_smoothing 0.14487748340880666 +432 49 model.embedding_dim 1.0 +432 49 optimizer.lr 0.003326794614961468 +432 49 training.batch_size 2.0 +432 49 training.label_smoothing 0.002614019835730624 +432 50 model.embedding_dim 1.0 +432 50 optimizer.lr 0.05790985644292289 +432 50 training.batch_size 2.0 +432 50 training.label_smoothing 0.016268277505208064 +432 51 model.embedding_dim 0.0 +432 51 optimizer.lr 0.017313690272894075 +432 51 training.batch_size 2.0 +432 51 training.label_smoothing 0.06295905276064913 +432 52 model.embedding_dim 1.0 +432 52 optimizer.lr 0.004318141735978021 +432 52 training.batch_size 0.0 +432 52 training.label_smoothing 0.003993707208618275 +432 53 model.embedding_dim 1.0 +432 53 optimizer.lr 0.001451670741585327 +432 53 training.batch_size 0.0 +432 53 training.label_smoothing 0.013134802536061113 +432 54 model.embedding_dim 1.0 +432 54 optimizer.lr 0.029952119033166722 +432 54 training.batch_size 0.0 +432 54 training.label_smoothing 0.002020428153718849 +432 55 model.embedding_dim 0.0 +432 55 optimizer.lr 0.01360421634588144 +432 55 training.batch_size 1.0 +432 55 training.label_smoothing 0.007546766079676899 +432 56 model.embedding_dim 2.0 +432 56 optimizer.lr 0.03276833631442715 +432 56 training.batch_size 2.0 +432 56 training.label_smoothing 0.14197735789915206 +432 57 model.embedding_dim 1.0 +432 57 optimizer.lr 0.06689539005226552 +432 57 training.batch_size 2.0 +432 57 training.label_smoothing 0.0011624220808779885 +432 58 model.embedding_dim 1.0 +432 58 optimizer.lr 0.013880064903871977 +432 58 training.batch_size 0.0 +432 58 training.label_smoothing 0.01500982801090152 +432 59 model.embedding_dim 1.0 +432 59 optimizer.lr 0.08438068154998593 +432 59 training.batch_size 2.0 +432 59 training.label_smoothing 0.030697913453031175 +432 60 model.embedding_dim 0.0 +432 60 optimizer.lr 0.0020841508338038813 +432 60 training.batch_size 1.0 +432 60 training.label_smoothing 0.007275040024006994 +432 61 model.embedding_dim 2.0 +432 61 optimizer.lr 0.006947515895753794 +432 61 training.batch_size 2.0 +432 61 training.label_smoothing 0.0013352858213522237 +432 62 model.embedding_dim 1.0 +432 62 optimizer.lr 0.015274836676199057 +432 62 training.batch_size 1.0 +432 62 training.label_smoothing 0.7026222694228985 +432 63 model.embedding_dim 2.0 +432 63 optimizer.lr 0.05877831542592217 +432 63 training.batch_size 2.0 +432 63 training.label_smoothing 0.003832023775651997 +432 64 model.embedding_dim 1.0 +432 64 optimizer.lr 0.05360219020792118 +432 64 training.batch_size 1.0 +432 64 training.label_smoothing 0.054014291845599235 +432 65 model.embedding_dim 2.0 +432 65 optimizer.lr 0.06494499288925028 +432 65 training.batch_size 0.0 +432 65 training.label_smoothing 0.033839400179712646 +432 66 model.embedding_dim 0.0 +432 66 optimizer.lr 0.005677361941145846 +432 66 training.batch_size 1.0 +432 66 training.label_smoothing 0.0033440975498174514 +432 67 model.embedding_dim 2.0 +432 67 optimizer.lr 0.0020154656740932453 +432 67 training.batch_size 2.0 +432 67 training.label_smoothing 0.033114019310629884 +432 68 model.embedding_dim 0.0 +432 68 optimizer.lr 0.011039221367440849 +432 68 training.batch_size 2.0 +432 68 training.label_smoothing 0.029257345700674505 +432 69 model.embedding_dim 1.0 +432 69 optimizer.lr 0.006445613736645344 +432 69 training.batch_size 1.0 +432 69 training.label_smoothing 0.8763959533371369 +432 70 model.embedding_dim 1.0 +432 70 optimizer.lr 0.0022708916524268818 +432 70 training.batch_size 1.0 +432 70 training.label_smoothing 0.0011507880374721454 +432 71 model.embedding_dim 0.0 +432 71 optimizer.lr 0.019741377072858505 +432 71 training.batch_size 0.0 +432 71 training.label_smoothing 0.026913460688681997 +432 72 model.embedding_dim 0.0 +432 72 optimizer.lr 0.001876680394016815 +432 72 training.batch_size 0.0 +432 72 training.label_smoothing 0.7608219308605854 +432 73 model.embedding_dim 2.0 +432 73 optimizer.lr 0.002195510063307469 +432 73 training.batch_size 0.0 +432 73 training.label_smoothing 0.0010718628359930738 +432 74 model.embedding_dim 0.0 +432 74 optimizer.lr 0.005480990565193745 +432 74 training.batch_size 1.0 +432 74 training.label_smoothing 0.7142880738934102 +432 75 model.embedding_dim 2.0 +432 75 optimizer.lr 0.011928166686349675 +432 75 training.batch_size 2.0 +432 75 training.label_smoothing 0.16328593680633177 +432 76 model.embedding_dim 2.0 +432 76 optimizer.lr 0.06241891066747814 +432 76 training.batch_size 1.0 +432 76 training.label_smoothing 0.22360699595870775 +432 77 model.embedding_dim 0.0 +432 77 optimizer.lr 0.06939567302227374 +432 77 training.batch_size 2.0 +432 77 training.label_smoothing 0.2360817923070581 +432 78 model.embedding_dim 0.0 +432 78 optimizer.lr 0.05395784683374603 +432 78 training.batch_size 1.0 +432 78 training.label_smoothing 0.03187072463627671 +432 79 model.embedding_dim 0.0 +432 79 optimizer.lr 0.0024111314495768085 +432 79 training.batch_size 2.0 +432 79 training.label_smoothing 0.00924187656302904 +432 80 model.embedding_dim 2.0 +432 80 optimizer.lr 0.0030235566957758967 +432 80 training.batch_size 2.0 +432 80 training.label_smoothing 0.04969946189287409 +432 81 model.embedding_dim 0.0 +432 81 optimizer.lr 0.011570548110646742 +432 81 training.batch_size 1.0 +432 81 training.label_smoothing 0.01545030436902648 +432 82 model.embedding_dim 0.0 +432 82 optimizer.lr 0.0019540927000765546 +432 82 training.batch_size 1.0 +432 82 training.label_smoothing 0.4775783983282025 +432 83 model.embedding_dim 0.0 +432 83 optimizer.lr 0.05574422420717723 +432 83 training.batch_size 2.0 +432 83 training.label_smoothing 0.08736406720947205 +432 84 model.embedding_dim 1.0 +432 84 optimizer.lr 0.023717001701134968 +432 84 training.batch_size 0.0 +432 84 training.label_smoothing 0.011377754261475217 +432 85 model.embedding_dim 1.0 +432 85 optimizer.lr 0.03134395794079404 +432 85 training.batch_size 0.0 +432 85 training.label_smoothing 0.15800304607859106 +432 86 model.embedding_dim 1.0 +432 86 optimizer.lr 0.002820732544524617 +432 86 training.batch_size 0.0 +432 86 training.label_smoothing 0.0105074915123429 +432 87 model.embedding_dim 1.0 +432 87 optimizer.lr 0.06818322070977803 +432 87 training.batch_size 0.0 +432 87 training.label_smoothing 0.6054545153776573 +432 88 model.embedding_dim 1.0 +432 88 optimizer.lr 0.007115724564469352 +432 88 training.batch_size 2.0 +432 88 training.label_smoothing 0.001114694999640147 +432 89 model.embedding_dim 1.0 +432 89 optimizer.lr 0.007192535423327477 +432 89 training.batch_size 0.0 +432 89 training.label_smoothing 0.03574462913535779 +432 90 model.embedding_dim 0.0 +432 90 optimizer.lr 0.0016019346928075374 +432 90 training.batch_size 1.0 +432 90 training.label_smoothing 0.04060220419950045 +432 91 model.embedding_dim 2.0 +432 91 optimizer.lr 0.01679314254598911 +432 91 training.batch_size 2.0 +432 91 training.label_smoothing 0.21018109533261206 +432 92 model.embedding_dim 0.0 +432 92 optimizer.lr 0.010159164380970104 +432 92 training.batch_size 0.0 +432 92 training.label_smoothing 0.1222956819258319 +432 93 model.embedding_dim 1.0 +432 93 optimizer.lr 0.02550461844484617 +432 93 training.batch_size 0.0 +432 93 training.label_smoothing 0.16167449210350696 +432 94 model.embedding_dim 0.0 +432 94 optimizer.lr 0.01246691867554494 +432 94 training.batch_size 0.0 +432 94 training.label_smoothing 0.004200311656001082 +432 95 model.embedding_dim 0.0 +432 95 optimizer.lr 0.025483881258558765 +432 95 training.batch_size 2.0 +432 95 training.label_smoothing 0.009570875329100375 +432 96 model.embedding_dim 0.0 +432 96 optimizer.lr 0.00634253603311301 +432 96 training.batch_size 0.0 +432 96 training.label_smoothing 0.01182044804364298 +432 97 model.embedding_dim 0.0 +432 97 optimizer.lr 0.01172449166142919 +432 97 training.batch_size 2.0 +432 97 training.label_smoothing 0.012794989103194005 +432 98 model.embedding_dim 1.0 +432 98 optimizer.lr 0.008597970417436188 +432 98 training.batch_size 1.0 +432 98 training.label_smoothing 0.00555800734647184 +432 99 model.embedding_dim 0.0 +432 99 optimizer.lr 0.015551077539986467 +432 99 training.batch_size 2.0 +432 99 training.label_smoothing 0.010890969363237623 +432 100 model.embedding_dim 0.0 +432 100 optimizer.lr 0.001765823250576434 +432 100 training.batch_size 0.0 +432 100 training.label_smoothing 0.27483132117867487 +432 1 dataset """kinships""" +432 1 model """ntn""" +432 1 loss """softplus""" +432 1 regularizer """no""" +432 1 optimizer """adam""" +432 1 training_loop """lcwa""" +432 1 evaluator """rankbased""" +432 2 dataset """kinships""" +432 2 model """ntn""" +432 2 loss """softplus""" +432 2 regularizer """no""" +432 2 optimizer """adam""" +432 2 training_loop """lcwa""" +432 2 evaluator """rankbased""" +432 3 dataset """kinships""" +432 3 model """ntn""" +432 3 loss """softplus""" +432 3 regularizer """no""" +432 3 optimizer """adam""" +432 3 training_loop """lcwa""" +432 3 evaluator """rankbased""" +432 4 dataset """kinships""" +432 4 model """ntn""" +432 4 loss """softplus""" +432 4 regularizer """no""" +432 4 optimizer """adam""" +432 4 training_loop """lcwa""" +432 4 evaluator """rankbased""" +432 5 dataset """kinships""" +432 5 model """ntn""" +432 5 loss """softplus""" +432 5 regularizer """no""" +432 5 optimizer """adam""" +432 5 training_loop """lcwa""" +432 5 evaluator """rankbased""" +432 6 dataset """kinships""" +432 6 model """ntn""" +432 6 loss """softplus""" +432 6 regularizer """no""" +432 6 optimizer """adam""" +432 6 training_loop """lcwa""" +432 6 evaluator """rankbased""" +432 7 dataset """kinships""" +432 7 model """ntn""" +432 7 loss """softplus""" +432 7 regularizer """no""" +432 7 optimizer """adam""" +432 7 training_loop """lcwa""" +432 7 evaluator """rankbased""" +432 8 dataset """kinships""" +432 8 model """ntn""" +432 8 loss """softplus""" +432 8 regularizer """no""" +432 8 optimizer """adam""" +432 8 training_loop """lcwa""" +432 8 evaluator """rankbased""" +432 9 dataset """kinships""" +432 9 model """ntn""" +432 9 loss """softplus""" +432 9 regularizer """no""" +432 9 optimizer """adam""" +432 9 training_loop """lcwa""" +432 9 evaluator """rankbased""" +432 10 dataset """kinships""" +432 10 model """ntn""" +432 10 loss """softplus""" +432 10 regularizer """no""" +432 10 optimizer """adam""" +432 10 training_loop """lcwa""" +432 10 evaluator """rankbased""" +432 11 dataset """kinships""" +432 11 model """ntn""" +432 11 loss """softplus""" +432 11 regularizer """no""" +432 11 optimizer """adam""" +432 11 training_loop """lcwa""" +432 11 evaluator """rankbased""" +432 12 dataset """kinships""" +432 12 model """ntn""" +432 12 loss """softplus""" +432 12 regularizer """no""" +432 12 optimizer """adam""" +432 12 training_loop """lcwa""" +432 12 evaluator """rankbased""" +432 13 dataset """kinships""" +432 13 model """ntn""" +432 13 loss """softplus""" +432 13 regularizer """no""" +432 13 optimizer """adam""" +432 13 training_loop """lcwa""" +432 13 evaluator """rankbased""" +432 14 dataset """kinships""" +432 14 model """ntn""" +432 14 loss """softplus""" +432 14 regularizer """no""" +432 14 optimizer """adam""" +432 14 training_loop """lcwa""" +432 14 evaluator """rankbased""" +432 15 dataset """kinships""" +432 15 model """ntn""" +432 15 loss """softplus""" +432 15 regularizer """no""" +432 15 optimizer """adam""" +432 15 training_loop """lcwa""" +432 15 evaluator """rankbased""" +432 16 dataset """kinships""" +432 16 model """ntn""" +432 16 loss """softplus""" +432 16 regularizer """no""" +432 16 optimizer """adam""" +432 16 training_loop """lcwa""" +432 16 evaluator """rankbased""" +432 17 dataset """kinships""" +432 17 model """ntn""" +432 17 loss """softplus""" +432 17 regularizer """no""" +432 17 optimizer """adam""" +432 17 training_loop """lcwa""" +432 17 evaluator """rankbased""" +432 18 dataset """kinships""" +432 18 model """ntn""" +432 18 loss """softplus""" +432 18 regularizer """no""" +432 18 optimizer """adam""" +432 18 training_loop """lcwa""" +432 18 evaluator """rankbased""" +432 19 dataset """kinships""" +432 19 model """ntn""" +432 19 loss """softplus""" +432 19 regularizer """no""" +432 19 optimizer """adam""" +432 19 training_loop """lcwa""" +432 19 evaluator """rankbased""" +432 20 dataset """kinships""" +432 20 model """ntn""" +432 20 loss """softplus""" +432 20 regularizer """no""" +432 20 optimizer """adam""" +432 20 training_loop """lcwa""" +432 20 evaluator """rankbased""" +432 21 dataset """kinships""" +432 21 model """ntn""" +432 21 loss """softplus""" +432 21 regularizer """no""" +432 21 optimizer """adam""" +432 21 training_loop """lcwa""" +432 21 evaluator """rankbased""" +432 22 dataset """kinships""" +432 22 model """ntn""" +432 22 loss """softplus""" +432 22 regularizer """no""" +432 22 optimizer """adam""" +432 22 training_loop """lcwa""" +432 22 evaluator """rankbased""" +432 23 dataset """kinships""" +432 23 model """ntn""" +432 23 loss """softplus""" +432 23 regularizer """no""" +432 23 optimizer """adam""" +432 23 training_loop """lcwa""" +432 23 evaluator """rankbased""" +432 24 dataset """kinships""" +432 24 model """ntn""" +432 24 loss """softplus""" +432 24 regularizer """no""" +432 24 optimizer """adam""" +432 24 training_loop """lcwa""" +432 24 evaluator """rankbased""" +432 25 dataset """kinships""" +432 25 model """ntn""" +432 25 loss """softplus""" +432 25 regularizer """no""" +432 25 optimizer """adam""" +432 25 training_loop """lcwa""" +432 25 evaluator """rankbased""" +432 26 dataset """kinships""" +432 26 model """ntn""" +432 26 loss """softplus""" +432 26 regularizer """no""" +432 26 optimizer """adam""" +432 26 training_loop """lcwa""" +432 26 evaluator """rankbased""" +432 27 dataset """kinships""" +432 27 model """ntn""" +432 27 loss """softplus""" +432 27 regularizer """no""" +432 27 optimizer """adam""" +432 27 training_loop """lcwa""" +432 27 evaluator """rankbased""" +432 28 dataset """kinships""" +432 28 model """ntn""" +432 28 loss """softplus""" +432 28 regularizer """no""" +432 28 optimizer """adam""" +432 28 training_loop """lcwa""" +432 28 evaluator """rankbased""" +432 29 dataset """kinships""" +432 29 model """ntn""" +432 29 loss """softplus""" +432 29 regularizer """no""" +432 29 optimizer """adam""" +432 29 training_loop """lcwa""" +432 29 evaluator """rankbased""" +432 30 dataset """kinships""" +432 30 model """ntn""" +432 30 loss """softplus""" +432 30 regularizer """no""" +432 30 optimizer """adam""" +432 30 training_loop """lcwa""" +432 30 evaluator """rankbased""" +432 31 dataset """kinships""" +432 31 model """ntn""" +432 31 loss """softplus""" +432 31 regularizer """no""" +432 31 optimizer """adam""" +432 31 training_loop """lcwa""" +432 31 evaluator """rankbased""" +432 32 dataset """kinships""" +432 32 model """ntn""" +432 32 loss """softplus""" +432 32 regularizer """no""" +432 32 optimizer """adam""" +432 32 training_loop """lcwa""" +432 32 evaluator """rankbased""" +432 33 dataset """kinships""" +432 33 model """ntn""" +432 33 loss """softplus""" +432 33 regularizer """no""" +432 33 optimizer """adam""" +432 33 training_loop """lcwa""" +432 33 evaluator """rankbased""" +432 34 dataset """kinships""" +432 34 model """ntn""" +432 34 loss """softplus""" +432 34 regularizer """no""" +432 34 optimizer """adam""" +432 34 training_loop """lcwa""" +432 34 evaluator """rankbased""" +432 35 dataset """kinships""" +432 35 model """ntn""" +432 35 loss """softplus""" +432 35 regularizer """no""" +432 35 optimizer """adam""" +432 35 training_loop """lcwa""" +432 35 evaluator """rankbased""" +432 36 dataset """kinships""" +432 36 model """ntn""" +432 36 loss """softplus""" +432 36 regularizer """no""" +432 36 optimizer """adam""" +432 36 training_loop """lcwa""" +432 36 evaluator """rankbased""" +432 37 dataset """kinships""" +432 37 model """ntn""" +432 37 loss """softplus""" +432 37 regularizer """no""" +432 37 optimizer """adam""" +432 37 training_loop """lcwa""" +432 37 evaluator """rankbased""" +432 38 dataset """kinships""" +432 38 model """ntn""" +432 38 loss """softplus""" +432 38 regularizer """no""" +432 38 optimizer """adam""" +432 38 training_loop """lcwa""" +432 38 evaluator """rankbased""" +432 39 dataset """kinships""" +432 39 model """ntn""" +432 39 loss """softplus""" +432 39 regularizer """no""" +432 39 optimizer """adam""" +432 39 training_loop """lcwa""" +432 39 evaluator """rankbased""" +432 40 dataset """kinships""" +432 40 model """ntn""" +432 40 loss """softplus""" +432 40 regularizer """no""" +432 40 optimizer """adam""" +432 40 training_loop """lcwa""" +432 40 evaluator """rankbased""" +432 41 dataset """kinships""" +432 41 model """ntn""" +432 41 loss """softplus""" +432 41 regularizer """no""" +432 41 optimizer """adam""" +432 41 training_loop """lcwa""" +432 41 evaluator """rankbased""" +432 42 dataset """kinships""" +432 42 model """ntn""" +432 42 loss """softplus""" +432 42 regularizer """no""" +432 42 optimizer """adam""" +432 42 training_loop """lcwa""" +432 42 evaluator """rankbased""" +432 43 dataset """kinships""" +432 43 model """ntn""" +432 43 loss """softplus""" +432 43 regularizer """no""" +432 43 optimizer """adam""" +432 43 training_loop """lcwa""" +432 43 evaluator """rankbased""" +432 44 dataset """kinships""" +432 44 model """ntn""" +432 44 loss """softplus""" +432 44 regularizer """no""" +432 44 optimizer """adam""" +432 44 training_loop """lcwa""" +432 44 evaluator """rankbased""" +432 45 dataset """kinships""" +432 45 model """ntn""" +432 45 loss """softplus""" +432 45 regularizer """no""" +432 45 optimizer """adam""" +432 45 training_loop """lcwa""" +432 45 evaluator """rankbased""" +432 46 dataset """kinships""" +432 46 model """ntn""" +432 46 loss """softplus""" +432 46 regularizer """no""" +432 46 optimizer """adam""" +432 46 training_loop """lcwa""" +432 46 evaluator """rankbased""" +432 47 dataset """kinships""" +432 47 model """ntn""" +432 47 loss """softplus""" +432 47 regularizer """no""" +432 47 optimizer """adam""" +432 47 training_loop """lcwa""" +432 47 evaluator """rankbased""" +432 48 dataset """kinships""" +432 48 model """ntn""" +432 48 loss """softplus""" +432 48 regularizer """no""" +432 48 optimizer """adam""" +432 48 training_loop """lcwa""" +432 48 evaluator """rankbased""" +432 49 dataset """kinships""" +432 49 model """ntn""" +432 49 loss """softplus""" +432 49 regularizer """no""" +432 49 optimizer """adam""" +432 49 training_loop """lcwa""" +432 49 evaluator """rankbased""" +432 50 dataset """kinships""" +432 50 model """ntn""" +432 50 loss """softplus""" +432 50 regularizer """no""" +432 50 optimizer """adam""" +432 50 training_loop """lcwa""" +432 50 evaluator """rankbased""" +432 51 dataset """kinships""" +432 51 model """ntn""" +432 51 loss """softplus""" +432 51 regularizer """no""" +432 51 optimizer """adam""" +432 51 training_loop """lcwa""" +432 51 evaluator """rankbased""" +432 52 dataset """kinships""" +432 52 model """ntn""" +432 52 loss """softplus""" +432 52 regularizer """no""" +432 52 optimizer """adam""" +432 52 training_loop """lcwa""" +432 52 evaluator """rankbased""" +432 53 dataset """kinships""" +432 53 model """ntn""" +432 53 loss """softplus""" +432 53 regularizer """no""" +432 53 optimizer """adam""" +432 53 training_loop """lcwa""" +432 53 evaluator """rankbased""" +432 54 dataset """kinships""" +432 54 model """ntn""" +432 54 loss """softplus""" +432 54 regularizer """no""" +432 54 optimizer """adam""" +432 54 training_loop """lcwa""" +432 54 evaluator """rankbased""" +432 55 dataset """kinships""" +432 55 model """ntn""" +432 55 loss """softplus""" +432 55 regularizer """no""" +432 55 optimizer """adam""" +432 55 training_loop """lcwa""" +432 55 evaluator """rankbased""" +432 56 dataset """kinships""" +432 56 model """ntn""" +432 56 loss """softplus""" +432 56 regularizer """no""" +432 56 optimizer """adam""" +432 56 training_loop """lcwa""" +432 56 evaluator """rankbased""" +432 57 dataset """kinships""" +432 57 model """ntn""" +432 57 loss """softplus""" +432 57 regularizer """no""" +432 57 optimizer """adam""" +432 57 training_loop """lcwa""" +432 57 evaluator """rankbased""" +432 58 dataset """kinships""" +432 58 model """ntn""" +432 58 loss """softplus""" +432 58 regularizer """no""" +432 58 optimizer """adam""" +432 58 training_loop """lcwa""" +432 58 evaluator """rankbased""" +432 59 dataset """kinships""" +432 59 model """ntn""" +432 59 loss """softplus""" +432 59 regularizer """no""" +432 59 optimizer """adam""" +432 59 training_loop """lcwa""" +432 59 evaluator """rankbased""" +432 60 dataset """kinships""" +432 60 model """ntn""" +432 60 loss """softplus""" +432 60 regularizer """no""" +432 60 optimizer """adam""" +432 60 training_loop """lcwa""" +432 60 evaluator """rankbased""" +432 61 dataset """kinships""" +432 61 model """ntn""" +432 61 loss """softplus""" +432 61 regularizer """no""" +432 61 optimizer """adam""" +432 61 training_loop """lcwa""" +432 61 evaluator """rankbased""" +432 62 dataset """kinships""" +432 62 model """ntn""" +432 62 loss """softplus""" +432 62 regularizer """no""" +432 62 optimizer """adam""" +432 62 training_loop """lcwa""" +432 62 evaluator """rankbased""" +432 63 dataset """kinships""" +432 63 model """ntn""" +432 63 loss """softplus""" +432 63 regularizer """no""" +432 63 optimizer """adam""" +432 63 training_loop """lcwa""" +432 63 evaluator """rankbased""" +432 64 dataset """kinships""" +432 64 model """ntn""" +432 64 loss """softplus""" +432 64 regularizer """no""" +432 64 optimizer """adam""" +432 64 training_loop """lcwa""" +432 64 evaluator """rankbased""" +432 65 dataset """kinships""" +432 65 model """ntn""" +432 65 loss """softplus""" +432 65 regularizer """no""" +432 65 optimizer """adam""" +432 65 training_loop """lcwa""" +432 65 evaluator """rankbased""" +432 66 dataset """kinships""" +432 66 model """ntn""" +432 66 loss """softplus""" +432 66 regularizer """no""" +432 66 optimizer """adam""" +432 66 training_loop """lcwa""" +432 66 evaluator """rankbased""" +432 67 dataset """kinships""" +432 67 model """ntn""" +432 67 loss """softplus""" +432 67 regularizer """no""" +432 67 optimizer """adam""" +432 67 training_loop """lcwa""" +432 67 evaluator """rankbased""" +432 68 dataset """kinships""" +432 68 model """ntn""" +432 68 loss """softplus""" +432 68 regularizer """no""" +432 68 optimizer """adam""" +432 68 training_loop """lcwa""" +432 68 evaluator """rankbased""" +432 69 dataset """kinships""" +432 69 model """ntn""" +432 69 loss """softplus""" +432 69 regularizer """no""" +432 69 optimizer """adam""" +432 69 training_loop """lcwa""" +432 69 evaluator """rankbased""" +432 70 dataset """kinships""" +432 70 model """ntn""" +432 70 loss """softplus""" +432 70 regularizer """no""" +432 70 optimizer """adam""" +432 70 training_loop """lcwa""" +432 70 evaluator """rankbased""" +432 71 dataset """kinships""" +432 71 model """ntn""" +432 71 loss """softplus""" +432 71 regularizer """no""" +432 71 optimizer """adam""" +432 71 training_loop """lcwa""" +432 71 evaluator """rankbased""" +432 72 dataset """kinships""" +432 72 model """ntn""" +432 72 loss """softplus""" +432 72 regularizer """no""" +432 72 optimizer """adam""" +432 72 training_loop """lcwa""" +432 72 evaluator """rankbased""" +432 73 dataset """kinships""" +432 73 model """ntn""" +432 73 loss """softplus""" +432 73 regularizer """no""" +432 73 optimizer """adam""" +432 73 training_loop """lcwa""" +432 73 evaluator """rankbased""" +432 74 dataset """kinships""" +432 74 model """ntn""" +432 74 loss """softplus""" +432 74 regularizer """no""" +432 74 optimizer """adam""" +432 74 training_loop """lcwa""" +432 74 evaluator """rankbased""" +432 75 dataset """kinships""" +432 75 model """ntn""" +432 75 loss """softplus""" +432 75 regularizer """no""" +432 75 optimizer """adam""" +432 75 training_loop """lcwa""" +432 75 evaluator """rankbased""" +432 76 dataset """kinships""" +432 76 model """ntn""" +432 76 loss """softplus""" +432 76 regularizer """no""" +432 76 optimizer """adam""" +432 76 training_loop """lcwa""" +432 76 evaluator """rankbased""" +432 77 dataset """kinships""" +432 77 model """ntn""" +432 77 loss """softplus""" +432 77 regularizer """no""" +432 77 optimizer """adam""" +432 77 training_loop """lcwa""" +432 77 evaluator """rankbased""" +432 78 dataset """kinships""" +432 78 model """ntn""" +432 78 loss """softplus""" +432 78 regularizer """no""" +432 78 optimizer """adam""" +432 78 training_loop """lcwa""" +432 78 evaluator """rankbased""" +432 79 dataset """kinships""" +432 79 model """ntn""" +432 79 loss """softplus""" +432 79 regularizer """no""" +432 79 optimizer """adam""" +432 79 training_loop """lcwa""" +432 79 evaluator """rankbased""" +432 80 dataset """kinships""" +432 80 model """ntn""" +432 80 loss """softplus""" +432 80 regularizer """no""" +432 80 optimizer """adam""" +432 80 training_loop """lcwa""" +432 80 evaluator """rankbased""" +432 81 dataset """kinships""" +432 81 model """ntn""" +432 81 loss """softplus""" +432 81 regularizer """no""" +432 81 optimizer """adam""" +432 81 training_loop """lcwa""" +432 81 evaluator """rankbased""" +432 82 dataset """kinships""" +432 82 model """ntn""" +432 82 loss """softplus""" +432 82 regularizer """no""" +432 82 optimizer """adam""" +432 82 training_loop """lcwa""" +432 82 evaluator """rankbased""" +432 83 dataset """kinships""" +432 83 model """ntn""" +432 83 loss """softplus""" +432 83 regularizer """no""" +432 83 optimizer """adam""" +432 83 training_loop """lcwa""" +432 83 evaluator """rankbased""" +432 84 dataset """kinships""" +432 84 model """ntn""" +432 84 loss """softplus""" +432 84 regularizer """no""" +432 84 optimizer """adam""" +432 84 training_loop """lcwa""" +432 84 evaluator """rankbased""" +432 85 dataset """kinships""" +432 85 model """ntn""" +432 85 loss """softplus""" +432 85 regularizer """no""" +432 85 optimizer """adam""" +432 85 training_loop """lcwa""" +432 85 evaluator """rankbased""" +432 86 dataset """kinships""" +432 86 model """ntn""" +432 86 loss """softplus""" +432 86 regularizer """no""" +432 86 optimizer """adam""" +432 86 training_loop """lcwa""" +432 86 evaluator """rankbased""" +432 87 dataset """kinships""" +432 87 model """ntn""" +432 87 loss """softplus""" +432 87 regularizer """no""" +432 87 optimizer """adam""" +432 87 training_loop """lcwa""" +432 87 evaluator """rankbased""" +432 88 dataset """kinships""" +432 88 model """ntn""" +432 88 loss """softplus""" +432 88 regularizer """no""" +432 88 optimizer """adam""" +432 88 training_loop """lcwa""" +432 88 evaluator """rankbased""" +432 89 dataset """kinships""" +432 89 model """ntn""" +432 89 loss """softplus""" +432 89 regularizer """no""" +432 89 optimizer """adam""" +432 89 training_loop """lcwa""" +432 89 evaluator """rankbased""" +432 90 dataset """kinships""" +432 90 model """ntn""" +432 90 loss """softplus""" +432 90 regularizer """no""" +432 90 optimizer """adam""" +432 90 training_loop """lcwa""" +432 90 evaluator """rankbased""" +432 91 dataset """kinships""" +432 91 model """ntn""" +432 91 loss """softplus""" +432 91 regularizer """no""" +432 91 optimizer """adam""" +432 91 training_loop """lcwa""" +432 91 evaluator """rankbased""" +432 92 dataset """kinships""" +432 92 model """ntn""" +432 92 loss """softplus""" +432 92 regularizer """no""" +432 92 optimizer """adam""" +432 92 training_loop """lcwa""" +432 92 evaluator """rankbased""" +432 93 dataset """kinships""" +432 93 model """ntn""" +432 93 loss """softplus""" +432 93 regularizer """no""" +432 93 optimizer """adam""" +432 93 training_loop """lcwa""" +432 93 evaluator """rankbased""" +432 94 dataset """kinships""" +432 94 model """ntn""" +432 94 loss """softplus""" +432 94 regularizer """no""" +432 94 optimizer """adam""" +432 94 training_loop """lcwa""" +432 94 evaluator """rankbased""" +432 95 dataset """kinships""" +432 95 model """ntn""" +432 95 loss """softplus""" +432 95 regularizer """no""" +432 95 optimizer """adam""" +432 95 training_loop """lcwa""" +432 95 evaluator """rankbased""" +432 96 dataset """kinships""" +432 96 model """ntn""" +432 96 loss """softplus""" +432 96 regularizer """no""" +432 96 optimizer """adam""" +432 96 training_loop """lcwa""" +432 96 evaluator """rankbased""" +432 97 dataset """kinships""" +432 97 model """ntn""" +432 97 loss """softplus""" +432 97 regularizer """no""" +432 97 optimizer """adam""" +432 97 training_loop """lcwa""" +432 97 evaluator """rankbased""" +432 98 dataset """kinships""" +432 98 model """ntn""" +432 98 loss """softplus""" +432 98 regularizer """no""" +432 98 optimizer """adam""" +432 98 training_loop """lcwa""" +432 98 evaluator """rankbased""" +432 99 dataset """kinships""" +432 99 model """ntn""" +432 99 loss """softplus""" +432 99 regularizer """no""" +432 99 optimizer """adam""" +432 99 training_loop """lcwa""" +432 99 evaluator """rankbased""" +432 100 dataset """kinships""" +432 100 model """ntn""" +432 100 loss """softplus""" +432 100 regularizer """no""" +432 100 optimizer """adam""" +432 100 training_loop """lcwa""" +432 100 evaluator """rankbased""" +433 1 model.embedding_dim 2.0 +433 1 optimizer.lr 0.0021314061201301027 +433 1 training.batch_size 0.0 +433 1 training.label_smoothing 0.34802999335931417 +433 2 model.embedding_dim 0.0 +433 2 optimizer.lr 0.01173607889626796 +433 2 training.batch_size 2.0 +433 2 training.label_smoothing 0.010536922847363378 +433 3 model.embedding_dim 2.0 +433 3 optimizer.lr 0.007045046930807357 +433 3 training.batch_size 2.0 +433 3 training.label_smoothing 0.06783226919173299 +433 4 model.embedding_dim 2.0 +433 4 optimizer.lr 0.0067411504698764845 +433 4 training.batch_size 0.0 +433 4 training.label_smoothing 0.04757675049555189 +433 5 model.embedding_dim 0.0 +433 5 optimizer.lr 0.015604467284500951 +433 5 training.batch_size 0.0 +433 5 training.label_smoothing 0.0020619487218891024 +433 6 model.embedding_dim 0.0 +433 6 optimizer.lr 0.0018648621638608896 +433 6 training.batch_size 1.0 +433 6 training.label_smoothing 0.10962112452204423 +433 7 model.embedding_dim 0.0 +433 7 optimizer.lr 0.05582995791410758 +433 7 training.batch_size 1.0 +433 7 training.label_smoothing 0.0328021870591237 +433 8 model.embedding_dim 1.0 +433 8 optimizer.lr 0.029887117332467725 +433 8 training.batch_size 2.0 +433 8 training.label_smoothing 0.17667594247338347 +433 9 model.embedding_dim 0.0 +433 9 optimizer.lr 0.004793055732640449 +433 9 training.batch_size 2.0 +433 9 training.label_smoothing 0.09043052103329013 +433 10 model.embedding_dim 2.0 +433 10 optimizer.lr 0.001667714203841293 +433 10 training.batch_size 2.0 +433 10 training.label_smoothing 0.015245573304246876 +433 11 model.embedding_dim 0.0 +433 11 optimizer.lr 0.05288174514605587 +433 11 training.batch_size 2.0 +433 11 training.label_smoothing 0.0044249316952928155 +433 12 model.embedding_dim 2.0 +433 12 optimizer.lr 0.005132577435792805 +433 12 training.batch_size 0.0 +433 12 training.label_smoothing 0.0010674275720657543 +433 13 model.embedding_dim 1.0 +433 13 optimizer.lr 0.03458754533632495 +433 13 training.batch_size 2.0 +433 13 training.label_smoothing 0.19037322147378302 +433 14 model.embedding_dim 0.0 +433 14 optimizer.lr 0.02889225822041284 +433 14 training.batch_size 0.0 +433 14 training.label_smoothing 0.4502126447092911 +433 15 model.embedding_dim 1.0 +433 15 optimizer.lr 0.0014498154512491762 +433 15 training.batch_size 2.0 +433 15 training.label_smoothing 0.00850082578972973 +433 16 model.embedding_dim 1.0 +433 16 optimizer.lr 0.04346977354561734 +433 16 training.batch_size 1.0 +433 16 training.label_smoothing 0.011644746752561982 +433 17 model.embedding_dim 0.0 +433 17 optimizer.lr 0.03772328182573137 +433 17 training.batch_size 0.0 +433 17 training.label_smoothing 0.044177430462927825 +433 18 model.embedding_dim 2.0 +433 18 optimizer.lr 0.06445334799497775 +433 18 training.batch_size 0.0 +433 18 training.label_smoothing 0.665791087781462 +433 19 model.embedding_dim 2.0 +433 19 optimizer.lr 0.006486641759353016 +433 19 training.batch_size 0.0 +433 19 training.label_smoothing 0.5363598896993571 +433 20 model.embedding_dim 0.0 +433 20 optimizer.lr 0.007912143516561276 +433 20 training.batch_size 1.0 +433 20 training.label_smoothing 0.010244811816460097 +433 21 model.embedding_dim 1.0 +433 21 optimizer.lr 0.005199252277735338 +433 21 training.batch_size 2.0 +433 21 training.label_smoothing 0.04280182552633614 +433 22 model.embedding_dim 0.0 +433 22 optimizer.lr 0.05037703446235553 +433 22 training.batch_size 1.0 +433 22 training.label_smoothing 0.0427573144177275 +433 23 model.embedding_dim 1.0 +433 23 optimizer.lr 0.08915225984887994 +433 23 training.batch_size 2.0 +433 23 training.label_smoothing 0.006910795198486653 +433 24 model.embedding_dim 0.0 +433 24 optimizer.lr 0.08331394842193936 +433 24 training.batch_size 0.0 +433 24 training.label_smoothing 0.004011254047779674 +433 25 model.embedding_dim 0.0 +433 25 optimizer.lr 0.07086746115305695 +433 25 training.batch_size 1.0 +433 25 training.label_smoothing 0.8557954234850919 +433 26 model.embedding_dim 1.0 +433 26 optimizer.lr 0.061895378996917154 +433 26 training.batch_size 2.0 +433 26 training.label_smoothing 0.0012324734612134999 +433 27 model.embedding_dim 1.0 +433 27 optimizer.lr 0.006087883546200128 +433 27 training.batch_size 1.0 +433 27 training.label_smoothing 0.002255331250456569 +433 28 model.embedding_dim 0.0 +433 28 optimizer.lr 0.005971745369836164 +433 28 training.batch_size 0.0 +433 28 training.label_smoothing 0.3230187923087816 +433 29 model.embedding_dim 0.0 +433 29 optimizer.lr 0.005983428443011681 +433 29 training.batch_size 1.0 +433 29 training.label_smoothing 0.030472215501785107 +433 30 model.embedding_dim 2.0 +433 30 optimizer.lr 0.0583407938292448 +433 30 training.batch_size 1.0 +433 30 training.label_smoothing 0.021622677933413995 +433 31 model.embedding_dim 2.0 +433 31 optimizer.lr 0.006071575617116805 +433 31 training.batch_size 2.0 +433 31 training.label_smoothing 0.006208405100189619 +433 32 model.embedding_dim 1.0 +433 32 optimizer.lr 0.014544327019153942 +433 32 training.batch_size 1.0 +433 32 training.label_smoothing 0.05632707884907477 +433 33 model.embedding_dim 1.0 +433 33 optimizer.lr 0.0019501495610505428 +433 33 training.batch_size 1.0 +433 33 training.label_smoothing 0.006547984210025246 +433 34 model.embedding_dim 2.0 +433 34 optimizer.lr 0.025493015192192343 +433 34 training.batch_size 0.0 +433 34 training.label_smoothing 0.9564859883836944 +433 35 model.embedding_dim 1.0 +433 35 optimizer.lr 0.0024111964671345554 +433 35 training.batch_size 2.0 +433 35 training.label_smoothing 0.0012997119153787015 +433 36 model.embedding_dim 2.0 +433 36 optimizer.lr 0.0041199446041302575 +433 36 training.batch_size 0.0 +433 36 training.label_smoothing 0.007898835994663565 +433 37 model.embedding_dim 1.0 +433 37 optimizer.lr 0.0017297223586990936 +433 37 training.batch_size 1.0 +433 37 training.label_smoothing 0.0019997007498074332 +433 38 model.embedding_dim 0.0 +433 38 optimizer.lr 0.0011633162723695438 +433 38 training.batch_size 2.0 +433 38 training.label_smoothing 0.014018444873663513 +433 39 model.embedding_dim 2.0 +433 39 optimizer.lr 0.007268597837619915 +433 39 training.batch_size 2.0 +433 39 training.label_smoothing 0.573953766523579 +433 40 model.embedding_dim 0.0 +433 40 optimizer.lr 0.0021810384300548504 +433 40 training.batch_size 0.0 +433 40 training.label_smoothing 0.0019878139716699317 +433 41 model.embedding_dim 0.0 +433 41 optimizer.lr 0.07016173133466688 +433 41 training.batch_size 2.0 +433 41 training.label_smoothing 0.029948565150373568 +433 42 model.embedding_dim 1.0 +433 42 optimizer.lr 0.07816826936894582 +433 42 training.batch_size 1.0 +433 42 training.label_smoothing 0.05037833269604706 +433 43 model.embedding_dim 2.0 +433 43 optimizer.lr 0.010898476021773882 +433 43 training.batch_size 0.0 +433 43 training.label_smoothing 0.15478348214944915 +433 44 model.embedding_dim 1.0 +433 44 optimizer.lr 0.0722255943324125 +433 44 training.batch_size 1.0 +433 44 training.label_smoothing 0.0023022116544187225 +433 45 model.embedding_dim 2.0 +433 45 optimizer.lr 0.09704708781055792 +433 45 training.batch_size 0.0 +433 45 training.label_smoothing 0.05739223659639816 +433 46 model.embedding_dim 0.0 +433 46 optimizer.lr 0.0010288636491808433 +433 46 training.batch_size 2.0 +433 46 training.label_smoothing 0.014904263429678266 +433 47 model.embedding_dim 0.0 +433 47 optimizer.lr 0.014971830981687504 +433 47 training.batch_size 1.0 +433 47 training.label_smoothing 0.024616375023103283 +433 48 model.embedding_dim 2.0 +433 48 optimizer.lr 0.0032869352211968806 +433 48 training.batch_size 1.0 +433 48 training.label_smoothing 0.4858690697971843 +433 49 model.embedding_dim 2.0 +433 49 optimizer.lr 0.001168252291571803 +433 49 training.batch_size 0.0 +433 49 training.label_smoothing 0.5617466747302524 +433 50 model.embedding_dim 0.0 +433 50 optimizer.lr 0.025430113376182027 +433 50 training.batch_size 1.0 +433 50 training.label_smoothing 0.08690821689310478 +433 51 model.embedding_dim 2.0 +433 51 optimizer.lr 0.009611330514830622 +433 51 training.batch_size 1.0 +433 51 training.label_smoothing 0.5476015314388762 +433 52 model.embedding_dim 0.0 +433 52 optimizer.lr 0.002268800281872381 +433 52 training.batch_size 0.0 +433 52 training.label_smoothing 0.06698469034551739 +433 53 model.embedding_dim 1.0 +433 53 optimizer.lr 0.017264945678668535 +433 53 training.batch_size 1.0 +433 53 training.label_smoothing 0.0019213879195611981 +433 54 model.embedding_dim 2.0 +433 54 optimizer.lr 0.056188069708470734 +433 54 training.batch_size 1.0 +433 54 training.label_smoothing 0.019533642994174157 +433 55 model.embedding_dim 1.0 +433 55 optimizer.lr 0.0022751984581271542 +433 55 training.batch_size 0.0 +433 55 training.label_smoothing 0.004371111695895324 +433 56 model.embedding_dim 2.0 +433 56 optimizer.lr 0.004381299928649742 +433 56 training.batch_size 0.0 +433 56 training.label_smoothing 0.006794174316620176 +433 57 model.embedding_dim 2.0 +433 57 optimizer.lr 0.02499140344904068 +433 57 training.batch_size 2.0 +433 57 training.label_smoothing 0.001494411546459008 +433 58 model.embedding_dim 1.0 +433 58 optimizer.lr 0.02246202381717857 +433 58 training.batch_size 0.0 +433 58 training.label_smoothing 0.007788521796946711 +433 59 model.embedding_dim 2.0 +433 59 optimizer.lr 0.09062434793736109 +433 59 training.batch_size 1.0 +433 59 training.label_smoothing 0.0013559620943443408 +433 60 model.embedding_dim 2.0 +433 60 optimizer.lr 0.06964674537982977 +433 60 training.batch_size 0.0 +433 60 training.label_smoothing 0.09430985183735115 +433 61 model.embedding_dim 0.0 +433 61 optimizer.lr 0.019704409290954995 +433 61 training.batch_size 1.0 +433 61 training.label_smoothing 0.5960768428896716 +433 62 model.embedding_dim 2.0 +433 62 optimizer.lr 0.026311888576804193 +433 62 training.batch_size 2.0 +433 62 training.label_smoothing 0.16561148622486374 +433 63 model.embedding_dim 1.0 +433 63 optimizer.lr 0.032777584453445925 +433 63 training.batch_size 0.0 +433 63 training.label_smoothing 0.002664684153824963 +433 64 model.embedding_dim 2.0 +433 64 optimizer.lr 0.0026417128383282155 +433 64 training.batch_size 0.0 +433 64 training.label_smoothing 0.732970492487571 +433 65 model.embedding_dim 0.0 +433 65 optimizer.lr 0.009768137718965276 +433 65 training.batch_size 2.0 +433 65 training.label_smoothing 0.003386772887956057 +433 66 model.embedding_dim 1.0 +433 66 optimizer.lr 0.019097383320956972 +433 66 training.batch_size 0.0 +433 66 training.label_smoothing 0.11005132000438748 +433 67 model.embedding_dim 1.0 +433 67 optimizer.lr 0.0010427064372841098 +433 67 training.batch_size 1.0 +433 67 training.label_smoothing 0.0018146560983525821 +433 68 model.embedding_dim 1.0 +433 68 optimizer.lr 0.048372342500103485 +433 68 training.batch_size 2.0 +433 68 training.label_smoothing 0.5310151413446401 +433 69 model.embedding_dim 1.0 +433 69 optimizer.lr 0.0018448741589897888 +433 69 training.batch_size 1.0 +433 69 training.label_smoothing 0.004626159069828706 +433 70 model.embedding_dim 1.0 +433 70 optimizer.lr 0.01550922598420109 +433 70 training.batch_size 2.0 +433 70 training.label_smoothing 0.006396283056097769 +433 71 model.embedding_dim 0.0 +433 71 optimizer.lr 0.018975797223867146 +433 71 training.batch_size 1.0 +433 71 training.label_smoothing 0.028141128424722325 +433 72 model.embedding_dim 0.0 +433 72 optimizer.lr 0.010882570119857951 +433 72 training.batch_size 2.0 +433 72 training.label_smoothing 0.16443399724274432 +433 73 model.embedding_dim 0.0 +433 73 optimizer.lr 0.002129869347508168 +433 73 training.batch_size 0.0 +433 73 training.label_smoothing 0.18468501766272988 +433 74 model.embedding_dim 2.0 +433 74 optimizer.lr 0.01108960674721357 +433 74 training.batch_size 2.0 +433 74 training.label_smoothing 0.002883390503146539 +433 75 model.embedding_dim 1.0 +433 75 optimizer.lr 0.04891650606503681 +433 75 training.batch_size 1.0 +433 75 training.label_smoothing 0.0046345207403870325 +433 76 model.embedding_dim 1.0 +433 76 optimizer.lr 0.01492437734621631 +433 76 training.batch_size 2.0 +433 76 training.label_smoothing 0.3860450292630406 +433 77 model.embedding_dim 2.0 +433 77 optimizer.lr 0.014220342993570029 +433 77 training.batch_size 0.0 +433 77 training.label_smoothing 0.7885360464211852 +433 78 model.embedding_dim 0.0 +433 78 optimizer.lr 0.015292649036331056 +433 78 training.batch_size 0.0 +433 78 training.label_smoothing 0.06017906975060552 +433 79 model.embedding_dim 2.0 +433 79 optimizer.lr 0.009120021927255728 +433 79 training.batch_size 0.0 +433 79 training.label_smoothing 0.7847747025755631 +433 80 model.embedding_dim 2.0 +433 80 optimizer.lr 0.04213067408572866 +433 80 training.batch_size 0.0 +433 80 training.label_smoothing 0.004748753149025504 +433 81 model.embedding_dim 2.0 +433 81 optimizer.lr 0.005751481535397412 +433 81 training.batch_size 0.0 +433 81 training.label_smoothing 0.7962921779937742 +433 82 model.embedding_dim 2.0 +433 82 optimizer.lr 0.007426429609624747 +433 82 training.batch_size 1.0 +433 82 training.label_smoothing 0.09299147691978797 +433 83 model.embedding_dim 1.0 +433 83 optimizer.lr 0.0768589024629252 +433 83 training.batch_size 1.0 +433 83 training.label_smoothing 0.19840284128907398 +433 84 model.embedding_dim 2.0 +433 84 optimizer.lr 0.009357246993444484 +433 84 training.batch_size 0.0 +433 84 training.label_smoothing 0.12803552720246048 +433 85 model.embedding_dim 2.0 +433 85 optimizer.lr 0.012031100749573736 +433 85 training.batch_size 2.0 +433 85 training.label_smoothing 0.04213142398619653 +433 86 model.embedding_dim 2.0 +433 86 optimizer.lr 0.02102002155420974 +433 86 training.batch_size 1.0 +433 86 training.label_smoothing 0.013701951463215335 +433 87 model.embedding_dim 2.0 +433 87 optimizer.lr 0.03992295868676086 +433 87 training.batch_size 0.0 +433 87 training.label_smoothing 0.06603013908658985 +433 88 model.embedding_dim 1.0 +433 88 optimizer.lr 0.0010105612342633464 +433 88 training.batch_size 2.0 +433 88 training.label_smoothing 0.0010414141369635866 +433 89 model.embedding_dim 0.0 +433 89 optimizer.lr 0.0010414471835323068 +433 89 training.batch_size 0.0 +433 89 training.label_smoothing 0.37645975432003254 +433 90 model.embedding_dim 0.0 +433 90 optimizer.lr 0.08152780802448423 +433 90 training.batch_size 2.0 +433 90 training.label_smoothing 0.11791083768394167 +433 91 model.embedding_dim 1.0 +433 91 optimizer.lr 0.031388848212746095 +433 91 training.batch_size 1.0 +433 91 training.label_smoothing 0.052745543717958566 +433 92 model.embedding_dim 2.0 +433 92 optimizer.lr 0.006384895408954469 +433 92 training.batch_size 1.0 +433 92 training.label_smoothing 0.4866415103245438 +433 93 model.embedding_dim 0.0 +433 93 optimizer.lr 0.009005835149906875 +433 93 training.batch_size 1.0 +433 93 training.label_smoothing 0.16975142444719654 +433 94 model.embedding_dim 0.0 +433 94 optimizer.lr 0.004924338320409624 +433 94 training.batch_size 2.0 +433 94 training.label_smoothing 0.005880273346122876 +433 95 model.embedding_dim 0.0 +433 95 optimizer.lr 0.005111531791660382 +433 95 training.batch_size 1.0 +433 95 training.label_smoothing 0.0020752489486108756 +433 96 model.embedding_dim 2.0 +433 96 optimizer.lr 0.04749825187360317 +433 96 training.batch_size 1.0 +433 96 training.label_smoothing 0.0010515473370353342 +433 97 model.embedding_dim 0.0 +433 97 optimizer.lr 0.004829396198783872 +433 97 training.batch_size 0.0 +433 97 training.label_smoothing 0.0028345033256995246 +433 98 model.embedding_dim 2.0 +433 98 optimizer.lr 0.028968075665660977 +433 98 training.batch_size 1.0 +433 98 training.label_smoothing 0.1088184501834503 +433 99 model.embedding_dim 1.0 +433 99 optimizer.lr 0.015137667793615445 +433 99 training.batch_size 0.0 +433 99 training.label_smoothing 0.0010738596317407415 +433 100 model.embedding_dim 0.0 +433 100 optimizer.lr 0.0022052834472991 +433 100 training.batch_size 1.0 +433 100 training.label_smoothing 0.014449699204343538 +433 1 dataset """kinships""" +433 1 model """ntn""" +433 1 loss """crossentropy""" +433 1 regularizer """no""" +433 1 optimizer """adam""" +433 1 training_loop """lcwa""" +433 1 evaluator """rankbased""" +433 2 dataset """kinships""" +433 2 model """ntn""" +433 2 loss """crossentropy""" +433 2 regularizer """no""" +433 2 optimizer """adam""" +433 2 training_loop """lcwa""" +433 2 evaluator """rankbased""" +433 3 dataset """kinships""" +433 3 model """ntn""" +433 3 loss """crossentropy""" +433 3 regularizer """no""" +433 3 optimizer """adam""" +433 3 training_loop """lcwa""" +433 3 evaluator """rankbased""" +433 4 dataset """kinships""" +433 4 model """ntn""" +433 4 loss """crossentropy""" +433 4 regularizer """no""" +433 4 optimizer """adam""" +433 4 training_loop """lcwa""" +433 4 evaluator """rankbased""" +433 5 dataset """kinships""" +433 5 model """ntn""" +433 5 loss """crossentropy""" +433 5 regularizer """no""" +433 5 optimizer """adam""" +433 5 training_loop """lcwa""" +433 5 evaluator """rankbased""" +433 6 dataset """kinships""" +433 6 model """ntn""" +433 6 loss """crossentropy""" +433 6 regularizer """no""" +433 6 optimizer """adam""" +433 6 training_loop """lcwa""" +433 6 evaluator """rankbased""" +433 7 dataset """kinships""" +433 7 model """ntn""" +433 7 loss """crossentropy""" +433 7 regularizer """no""" +433 7 optimizer """adam""" +433 7 training_loop """lcwa""" +433 7 evaluator """rankbased""" +433 8 dataset """kinships""" +433 8 model """ntn""" +433 8 loss """crossentropy""" +433 8 regularizer """no""" +433 8 optimizer """adam""" +433 8 training_loop """lcwa""" +433 8 evaluator """rankbased""" +433 9 dataset """kinships""" +433 9 model """ntn""" +433 9 loss """crossentropy""" +433 9 regularizer """no""" +433 9 optimizer """adam""" +433 9 training_loop """lcwa""" +433 9 evaluator """rankbased""" +433 10 dataset """kinships""" +433 10 model """ntn""" +433 10 loss """crossentropy""" +433 10 regularizer """no""" +433 10 optimizer """adam""" +433 10 training_loop """lcwa""" +433 10 evaluator """rankbased""" +433 11 dataset """kinships""" +433 11 model """ntn""" +433 11 loss """crossentropy""" +433 11 regularizer """no""" +433 11 optimizer """adam""" +433 11 training_loop """lcwa""" +433 11 evaluator """rankbased""" +433 12 dataset """kinships""" +433 12 model """ntn""" +433 12 loss """crossentropy""" +433 12 regularizer """no""" +433 12 optimizer """adam""" +433 12 training_loop """lcwa""" +433 12 evaluator """rankbased""" +433 13 dataset """kinships""" +433 13 model """ntn""" +433 13 loss """crossentropy""" +433 13 regularizer """no""" +433 13 optimizer """adam""" +433 13 training_loop """lcwa""" +433 13 evaluator """rankbased""" +433 14 dataset """kinships""" +433 14 model """ntn""" +433 14 loss """crossentropy""" +433 14 regularizer """no""" +433 14 optimizer """adam""" +433 14 training_loop """lcwa""" +433 14 evaluator """rankbased""" +433 15 dataset """kinships""" +433 15 model """ntn""" +433 15 loss """crossentropy""" +433 15 regularizer """no""" +433 15 optimizer """adam""" +433 15 training_loop """lcwa""" +433 15 evaluator """rankbased""" +433 16 dataset """kinships""" +433 16 model """ntn""" +433 16 loss """crossentropy""" +433 16 regularizer """no""" +433 16 optimizer """adam""" +433 16 training_loop """lcwa""" +433 16 evaluator """rankbased""" +433 17 dataset """kinships""" +433 17 model """ntn""" +433 17 loss """crossentropy""" +433 17 regularizer """no""" +433 17 optimizer """adam""" +433 17 training_loop """lcwa""" +433 17 evaluator """rankbased""" +433 18 dataset """kinships""" +433 18 model """ntn""" +433 18 loss """crossentropy""" +433 18 regularizer """no""" +433 18 optimizer """adam""" +433 18 training_loop """lcwa""" +433 18 evaluator """rankbased""" +433 19 dataset """kinships""" +433 19 model """ntn""" +433 19 loss """crossentropy""" +433 19 regularizer """no""" +433 19 optimizer """adam""" +433 19 training_loop """lcwa""" +433 19 evaluator """rankbased""" +433 20 dataset """kinships""" +433 20 model """ntn""" +433 20 loss """crossentropy""" +433 20 regularizer """no""" +433 20 optimizer """adam""" +433 20 training_loop """lcwa""" +433 20 evaluator """rankbased""" +433 21 dataset """kinships""" +433 21 model """ntn""" +433 21 loss """crossentropy""" +433 21 regularizer """no""" +433 21 optimizer """adam""" +433 21 training_loop """lcwa""" +433 21 evaluator """rankbased""" +433 22 dataset """kinships""" +433 22 model """ntn""" +433 22 loss """crossentropy""" +433 22 regularizer """no""" +433 22 optimizer """adam""" +433 22 training_loop """lcwa""" +433 22 evaluator """rankbased""" +433 23 dataset """kinships""" +433 23 model """ntn""" +433 23 loss """crossentropy""" +433 23 regularizer """no""" +433 23 optimizer """adam""" +433 23 training_loop """lcwa""" +433 23 evaluator """rankbased""" +433 24 dataset """kinships""" +433 24 model """ntn""" +433 24 loss """crossentropy""" +433 24 regularizer """no""" +433 24 optimizer """adam""" +433 24 training_loop """lcwa""" +433 24 evaluator """rankbased""" +433 25 dataset """kinships""" +433 25 model """ntn""" +433 25 loss """crossentropy""" +433 25 regularizer """no""" +433 25 optimizer """adam""" +433 25 training_loop """lcwa""" +433 25 evaluator """rankbased""" +433 26 dataset """kinships""" +433 26 model """ntn""" +433 26 loss """crossentropy""" +433 26 regularizer """no""" +433 26 optimizer """adam""" +433 26 training_loop """lcwa""" +433 26 evaluator """rankbased""" +433 27 dataset """kinships""" +433 27 model """ntn""" +433 27 loss """crossentropy""" +433 27 regularizer """no""" +433 27 optimizer """adam""" +433 27 training_loop """lcwa""" +433 27 evaluator """rankbased""" +433 28 dataset """kinships""" +433 28 model """ntn""" +433 28 loss """crossentropy""" +433 28 regularizer """no""" +433 28 optimizer """adam""" +433 28 training_loop """lcwa""" +433 28 evaluator """rankbased""" +433 29 dataset """kinships""" +433 29 model """ntn""" +433 29 loss """crossentropy""" +433 29 regularizer """no""" +433 29 optimizer """adam""" +433 29 training_loop """lcwa""" +433 29 evaluator """rankbased""" +433 30 dataset """kinships""" +433 30 model """ntn""" +433 30 loss """crossentropy""" +433 30 regularizer """no""" +433 30 optimizer """adam""" +433 30 training_loop """lcwa""" +433 30 evaluator """rankbased""" +433 31 dataset """kinships""" +433 31 model """ntn""" +433 31 loss """crossentropy""" +433 31 regularizer """no""" +433 31 optimizer """adam""" +433 31 training_loop """lcwa""" +433 31 evaluator """rankbased""" +433 32 dataset """kinships""" +433 32 model """ntn""" +433 32 loss """crossentropy""" +433 32 regularizer """no""" +433 32 optimizer """adam""" +433 32 training_loop """lcwa""" +433 32 evaluator """rankbased""" +433 33 dataset """kinships""" +433 33 model """ntn""" +433 33 loss """crossentropy""" +433 33 regularizer """no""" +433 33 optimizer """adam""" +433 33 training_loop """lcwa""" +433 33 evaluator """rankbased""" +433 34 dataset """kinships""" +433 34 model """ntn""" +433 34 loss """crossentropy""" +433 34 regularizer """no""" +433 34 optimizer """adam""" +433 34 training_loop """lcwa""" +433 34 evaluator """rankbased""" +433 35 dataset """kinships""" +433 35 model """ntn""" +433 35 loss """crossentropy""" +433 35 regularizer """no""" +433 35 optimizer """adam""" +433 35 training_loop """lcwa""" +433 35 evaluator """rankbased""" +433 36 dataset """kinships""" +433 36 model """ntn""" +433 36 loss """crossentropy""" +433 36 regularizer """no""" +433 36 optimizer """adam""" +433 36 training_loop """lcwa""" +433 36 evaluator """rankbased""" +433 37 dataset """kinships""" +433 37 model """ntn""" +433 37 loss """crossentropy""" +433 37 regularizer """no""" +433 37 optimizer """adam""" +433 37 training_loop """lcwa""" +433 37 evaluator """rankbased""" +433 38 dataset """kinships""" +433 38 model """ntn""" +433 38 loss """crossentropy""" +433 38 regularizer """no""" +433 38 optimizer """adam""" +433 38 training_loop """lcwa""" +433 38 evaluator """rankbased""" +433 39 dataset """kinships""" +433 39 model """ntn""" +433 39 loss """crossentropy""" +433 39 regularizer """no""" +433 39 optimizer """adam""" +433 39 training_loop """lcwa""" +433 39 evaluator """rankbased""" +433 40 dataset """kinships""" +433 40 model """ntn""" +433 40 loss """crossentropy""" +433 40 regularizer """no""" +433 40 optimizer """adam""" +433 40 training_loop """lcwa""" +433 40 evaluator """rankbased""" +433 41 dataset """kinships""" +433 41 model """ntn""" +433 41 loss """crossentropy""" +433 41 regularizer """no""" +433 41 optimizer """adam""" +433 41 training_loop """lcwa""" +433 41 evaluator """rankbased""" +433 42 dataset """kinships""" +433 42 model """ntn""" +433 42 loss """crossentropy""" +433 42 regularizer """no""" +433 42 optimizer """adam""" +433 42 training_loop """lcwa""" +433 42 evaluator """rankbased""" +433 43 dataset """kinships""" +433 43 model """ntn""" +433 43 loss """crossentropy""" +433 43 regularizer """no""" +433 43 optimizer """adam""" +433 43 training_loop """lcwa""" +433 43 evaluator """rankbased""" +433 44 dataset """kinships""" +433 44 model """ntn""" +433 44 loss """crossentropy""" +433 44 regularizer """no""" +433 44 optimizer """adam""" +433 44 training_loop """lcwa""" +433 44 evaluator """rankbased""" +433 45 dataset """kinships""" +433 45 model """ntn""" +433 45 loss """crossentropy""" +433 45 regularizer """no""" +433 45 optimizer """adam""" +433 45 training_loop """lcwa""" +433 45 evaluator """rankbased""" +433 46 dataset """kinships""" +433 46 model """ntn""" +433 46 loss """crossentropy""" +433 46 regularizer """no""" +433 46 optimizer """adam""" +433 46 training_loop """lcwa""" +433 46 evaluator """rankbased""" +433 47 dataset """kinships""" +433 47 model """ntn""" +433 47 loss """crossentropy""" +433 47 regularizer """no""" +433 47 optimizer """adam""" +433 47 training_loop """lcwa""" +433 47 evaluator """rankbased""" +433 48 dataset """kinships""" +433 48 model """ntn""" +433 48 loss """crossentropy""" +433 48 regularizer """no""" +433 48 optimizer """adam""" +433 48 training_loop """lcwa""" +433 48 evaluator """rankbased""" +433 49 dataset """kinships""" +433 49 model """ntn""" +433 49 loss """crossentropy""" +433 49 regularizer """no""" +433 49 optimizer """adam""" +433 49 training_loop """lcwa""" +433 49 evaluator """rankbased""" +433 50 dataset """kinships""" +433 50 model """ntn""" +433 50 loss """crossentropy""" +433 50 regularizer """no""" +433 50 optimizer """adam""" +433 50 training_loop """lcwa""" +433 50 evaluator """rankbased""" +433 51 dataset """kinships""" +433 51 model """ntn""" +433 51 loss """crossentropy""" +433 51 regularizer """no""" +433 51 optimizer """adam""" +433 51 training_loop """lcwa""" +433 51 evaluator """rankbased""" +433 52 dataset """kinships""" +433 52 model """ntn""" +433 52 loss """crossentropy""" +433 52 regularizer """no""" +433 52 optimizer """adam""" +433 52 training_loop """lcwa""" +433 52 evaluator """rankbased""" +433 53 dataset """kinships""" +433 53 model """ntn""" +433 53 loss """crossentropy""" +433 53 regularizer """no""" +433 53 optimizer """adam""" +433 53 training_loop """lcwa""" +433 53 evaluator """rankbased""" +433 54 dataset """kinships""" +433 54 model """ntn""" +433 54 loss """crossentropy""" +433 54 regularizer """no""" +433 54 optimizer """adam""" +433 54 training_loop """lcwa""" +433 54 evaluator """rankbased""" +433 55 dataset """kinships""" +433 55 model """ntn""" +433 55 loss """crossentropy""" +433 55 regularizer """no""" +433 55 optimizer """adam""" +433 55 training_loop """lcwa""" +433 55 evaluator """rankbased""" +433 56 dataset """kinships""" +433 56 model """ntn""" +433 56 loss """crossentropy""" +433 56 regularizer """no""" +433 56 optimizer """adam""" +433 56 training_loop """lcwa""" +433 56 evaluator """rankbased""" +433 57 dataset """kinships""" +433 57 model """ntn""" +433 57 loss """crossentropy""" +433 57 regularizer """no""" +433 57 optimizer """adam""" +433 57 training_loop """lcwa""" +433 57 evaluator """rankbased""" +433 58 dataset """kinships""" +433 58 model """ntn""" +433 58 loss """crossentropy""" +433 58 regularizer """no""" +433 58 optimizer """adam""" +433 58 training_loop """lcwa""" +433 58 evaluator """rankbased""" +433 59 dataset """kinships""" +433 59 model """ntn""" +433 59 loss """crossentropy""" +433 59 regularizer """no""" +433 59 optimizer """adam""" +433 59 training_loop """lcwa""" +433 59 evaluator """rankbased""" +433 60 dataset """kinships""" +433 60 model """ntn""" +433 60 loss """crossentropy""" +433 60 regularizer """no""" +433 60 optimizer """adam""" +433 60 training_loop """lcwa""" +433 60 evaluator """rankbased""" +433 61 dataset """kinships""" +433 61 model """ntn""" +433 61 loss """crossentropy""" +433 61 regularizer """no""" +433 61 optimizer """adam""" +433 61 training_loop """lcwa""" +433 61 evaluator """rankbased""" +433 62 dataset """kinships""" +433 62 model """ntn""" +433 62 loss """crossentropy""" +433 62 regularizer """no""" +433 62 optimizer """adam""" +433 62 training_loop """lcwa""" +433 62 evaluator """rankbased""" +433 63 dataset """kinships""" +433 63 model """ntn""" +433 63 loss """crossentropy""" +433 63 regularizer """no""" +433 63 optimizer """adam""" +433 63 training_loop """lcwa""" +433 63 evaluator """rankbased""" +433 64 dataset """kinships""" +433 64 model """ntn""" +433 64 loss """crossentropy""" +433 64 regularizer """no""" +433 64 optimizer """adam""" +433 64 training_loop """lcwa""" +433 64 evaluator """rankbased""" +433 65 dataset """kinships""" +433 65 model """ntn""" +433 65 loss """crossentropy""" +433 65 regularizer """no""" +433 65 optimizer """adam""" +433 65 training_loop """lcwa""" +433 65 evaluator """rankbased""" +433 66 dataset """kinships""" +433 66 model """ntn""" +433 66 loss """crossentropy""" +433 66 regularizer """no""" +433 66 optimizer """adam""" +433 66 training_loop """lcwa""" +433 66 evaluator """rankbased""" +433 67 dataset """kinships""" +433 67 model """ntn""" +433 67 loss """crossentropy""" +433 67 regularizer """no""" +433 67 optimizer """adam""" +433 67 training_loop """lcwa""" +433 67 evaluator """rankbased""" +433 68 dataset """kinships""" +433 68 model """ntn""" +433 68 loss """crossentropy""" +433 68 regularizer """no""" +433 68 optimizer """adam""" +433 68 training_loop """lcwa""" +433 68 evaluator """rankbased""" +433 69 dataset """kinships""" +433 69 model """ntn""" +433 69 loss """crossentropy""" +433 69 regularizer """no""" +433 69 optimizer """adam""" +433 69 training_loop """lcwa""" +433 69 evaluator """rankbased""" +433 70 dataset """kinships""" +433 70 model """ntn""" +433 70 loss """crossentropy""" +433 70 regularizer """no""" +433 70 optimizer """adam""" +433 70 training_loop """lcwa""" +433 70 evaluator """rankbased""" +433 71 dataset """kinships""" +433 71 model """ntn""" +433 71 loss """crossentropy""" +433 71 regularizer """no""" +433 71 optimizer """adam""" +433 71 training_loop """lcwa""" +433 71 evaluator """rankbased""" +433 72 dataset """kinships""" +433 72 model """ntn""" +433 72 loss """crossentropy""" +433 72 regularizer """no""" +433 72 optimizer """adam""" +433 72 training_loop """lcwa""" +433 72 evaluator """rankbased""" +433 73 dataset """kinships""" +433 73 model """ntn""" +433 73 loss """crossentropy""" +433 73 regularizer """no""" +433 73 optimizer """adam""" +433 73 training_loop """lcwa""" +433 73 evaluator """rankbased""" +433 74 dataset """kinships""" +433 74 model """ntn""" +433 74 loss """crossentropy""" +433 74 regularizer """no""" +433 74 optimizer """adam""" +433 74 training_loop """lcwa""" +433 74 evaluator """rankbased""" +433 75 dataset """kinships""" +433 75 model """ntn""" +433 75 loss """crossentropy""" +433 75 regularizer """no""" +433 75 optimizer """adam""" +433 75 training_loop """lcwa""" +433 75 evaluator """rankbased""" +433 76 dataset """kinships""" +433 76 model """ntn""" +433 76 loss """crossentropy""" +433 76 regularizer """no""" +433 76 optimizer """adam""" +433 76 training_loop """lcwa""" +433 76 evaluator """rankbased""" +433 77 dataset """kinships""" +433 77 model """ntn""" +433 77 loss """crossentropy""" +433 77 regularizer """no""" +433 77 optimizer """adam""" +433 77 training_loop """lcwa""" +433 77 evaluator """rankbased""" +433 78 dataset """kinships""" +433 78 model """ntn""" +433 78 loss """crossentropy""" +433 78 regularizer """no""" +433 78 optimizer """adam""" +433 78 training_loop """lcwa""" +433 78 evaluator """rankbased""" +433 79 dataset """kinships""" +433 79 model """ntn""" +433 79 loss """crossentropy""" +433 79 regularizer """no""" +433 79 optimizer """adam""" +433 79 training_loop """lcwa""" +433 79 evaluator """rankbased""" +433 80 dataset """kinships""" +433 80 model """ntn""" +433 80 loss """crossentropy""" +433 80 regularizer """no""" +433 80 optimizer """adam""" +433 80 training_loop """lcwa""" +433 80 evaluator """rankbased""" +433 81 dataset """kinships""" +433 81 model """ntn""" +433 81 loss """crossentropy""" +433 81 regularizer """no""" +433 81 optimizer """adam""" +433 81 training_loop """lcwa""" +433 81 evaluator """rankbased""" +433 82 dataset """kinships""" +433 82 model """ntn""" +433 82 loss """crossentropy""" +433 82 regularizer """no""" +433 82 optimizer """adam""" +433 82 training_loop """lcwa""" +433 82 evaluator """rankbased""" +433 83 dataset """kinships""" +433 83 model """ntn""" +433 83 loss """crossentropy""" +433 83 regularizer """no""" +433 83 optimizer """adam""" +433 83 training_loop """lcwa""" +433 83 evaluator """rankbased""" +433 84 dataset """kinships""" +433 84 model """ntn""" +433 84 loss """crossentropy""" +433 84 regularizer """no""" +433 84 optimizer """adam""" +433 84 training_loop """lcwa""" +433 84 evaluator """rankbased""" +433 85 dataset """kinships""" +433 85 model """ntn""" +433 85 loss """crossentropy""" +433 85 regularizer """no""" +433 85 optimizer """adam""" +433 85 training_loop """lcwa""" +433 85 evaluator """rankbased""" +433 86 dataset """kinships""" +433 86 model """ntn""" +433 86 loss """crossentropy""" +433 86 regularizer """no""" +433 86 optimizer """adam""" +433 86 training_loop """lcwa""" +433 86 evaluator """rankbased""" +433 87 dataset """kinships""" +433 87 model """ntn""" +433 87 loss """crossentropy""" +433 87 regularizer """no""" +433 87 optimizer """adam""" +433 87 training_loop """lcwa""" +433 87 evaluator """rankbased""" +433 88 dataset """kinships""" +433 88 model """ntn""" +433 88 loss """crossentropy""" +433 88 regularizer """no""" +433 88 optimizer """adam""" +433 88 training_loop """lcwa""" +433 88 evaluator """rankbased""" +433 89 dataset """kinships""" +433 89 model """ntn""" +433 89 loss """crossentropy""" +433 89 regularizer """no""" +433 89 optimizer """adam""" +433 89 training_loop """lcwa""" +433 89 evaluator """rankbased""" +433 90 dataset """kinships""" +433 90 model """ntn""" +433 90 loss """crossentropy""" +433 90 regularizer """no""" +433 90 optimizer """adam""" +433 90 training_loop """lcwa""" +433 90 evaluator """rankbased""" +433 91 dataset """kinships""" +433 91 model """ntn""" +433 91 loss """crossentropy""" +433 91 regularizer """no""" +433 91 optimizer """adam""" +433 91 training_loop """lcwa""" +433 91 evaluator """rankbased""" +433 92 dataset """kinships""" +433 92 model """ntn""" +433 92 loss """crossentropy""" +433 92 regularizer """no""" +433 92 optimizer """adam""" +433 92 training_loop """lcwa""" +433 92 evaluator """rankbased""" +433 93 dataset """kinships""" +433 93 model """ntn""" +433 93 loss """crossentropy""" +433 93 regularizer """no""" +433 93 optimizer """adam""" +433 93 training_loop """lcwa""" +433 93 evaluator """rankbased""" +433 94 dataset """kinships""" +433 94 model """ntn""" +433 94 loss """crossentropy""" +433 94 regularizer """no""" +433 94 optimizer """adam""" +433 94 training_loop """lcwa""" +433 94 evaluator """rankbased""" +433 95 dataset """kinships""" +433 95 model """ntn""" +433 95 loss """crossentropy""" +433 95 regularizer """no""" +433 95 optimizer """adam""" +433 95 training_loop """lcwa""" +433 95 evaluator """rankbased""" +433 96 dataset """kinships""" +433 96 model """ntn""" +433 96 loss """crossentropy""" +433 96 regularizer """no""" +433 96 optimizer """adam""" +433 96 training_loop """lcwa""" +433 96 evaluator """rankbased""" +433 97 dataset """kinships""" +433 97 model """ntn""" +433 97 loss """crossentropy""" +433 97 regularizer """no""" +433 97 optimizer """adam""" +433 97 training_loop """lcwa""" +433 97 evaluator """rankbased""" +433 98 dataset """kinships""" +433 98 model """ntn""" +433 98 loss """crossentropy""" +433 98 regularizer """no""" +433 98 optimizer """adam""" +433 98 training_loop """lcwa""" +433 98 evaluator """rankbased""" +433 99 dataset """kinships""" +433 99 model """ntn""" +433 99 loss """crossentropy""" +433 99 regularizer """no""" +433 99 optimizer """adam""" +433 99 training_loop """lcwa""" +433 99 evaluator """rankbased""" +433 100 dataset """kinships""" +433 100 model """ntn""" +433 100 loss """crossentropy""" +433 100 regularizer """no""" +433 100 optimizer """adam""" +433 100 training_loop """lcwa""" +433 100 evaluator """rankbased""" +434 1 model.embedding_dim 2.0 +434 1 optimizer.lr 0.0013751743481107295 +434 1 training.batch_size 1.0 +434 1 training.label_smoothing 0.009064199623332676 +434 2 model.embedding_dim 2.0 +434 2 optimizer.lr 0.0037708297394825185 +434 2 training.batch_size 2.0 +434 2 training.label_smoothing 0.1838086514541875 +434 3 model.embedding_dim 2.0 +434 3 optimizer.lr 0.008822496573938481 +434 3 training.batch_size 1.0 +434 3 training.label_smoothing 0.6245056407694047 +434 4 model.embedding_dim 0.0 +434 4 optimizer.lr 0.0013986288843140565 +434 4 training.batch_size 1.0 +434 4 training.label_smoothing 0.9211343919713509 +434 5 model.embedding_dim 0.0 +434 5 optimizer.lr 0.0019609614299259865 +434 5 training.batch_size 0.0 +434 5 training.label_smoothing 0.10828881664910027 +434 6 model.embedding_dim 0.0 +434 6 optimizer.lr 0.004020189602640503 +434 6 training.batch_size 0.0 +434 6 training.label_smoothing 0.28702433318204623 +434 7 model.embedding_dim 0.0 +434 7 optimizer.lr 0.009834517026656945 +434 7 training.batch_size 0.0 +434 7 training.label_smoothing 0.007529522329739024 +434 8 model.embedding_dim 1.0 +434 8 optimizer.lr 0.002785936486031904 +434 8 training.batch_size 1.0 +434 8 training.label_smoothing 0.251034089939798 +434 9 model.embedding_dim 2.0 +434 9 optimizer.lr 0.040001431390969844 +434 9 training.batch_size 2.0 +434 9 training.label_smoothing 0.1868621390989617 +434 10 model.embedding_dim 0.0 +434 10 optimizer.lr 0.002481102359493463 +434 10 training.batch_size 1.0 +434 10 training.label_smoothing 0.05283720590526975 +434 11 model.embedding_dim 0.0 +434 11 optimizer.lr 0.020545479948248135 +434 11 training.batch_size 1.0 +434 11 training.label_smoothing 0.09332512399622891 +434 12 model.embedding_dim 0.0 +434 12 optimizer.lr 0.022950629251540657 +434 12 training.batch_size 1.0 +434 12 training.label_smoothing 0.6883294834349184 +434 13 model.embedding_dim 2.0 +434 13 optimizer.lr 0.0011354960293715403 +434 13 training.batch_size 2.0 +434 13 training.label_smoothing 0.16733271252790502 +434 14 model.embedding_dim 2.0 +434 14 optimizer.lr 0.015405103346766228 +434 14 training.batch_size 1.0 +434 14 training.label_smoothing 0.01707322862989634 +434 15 model.embedding_dim 0.0 +434 15 optimizer.lr 0.00358302156041217 +434 15 training.batch_size 2.0 +434 15 training.label_smoothing 0.06279701156435438 +434 16 model.embedding_dim 0.0 +434 16 optimizer.lr 0.02663559244945564 +434 16 training.batch_size 1.0 +434 16 training.label_smoothing 0.010933472994070392 +434 17 model.embedding_dim 1.0 +434 17 optimizer.lr 0.0042934707924144 +434 17 training.batch_size 0.0 +434 17 training.label_smoothing 0.6977445441590551 +434 18 model.embedding_dim 1.0 +434 18 optimizer.lr 0.048227537536220756 +434 18 training.batch_size 0.0 +434 18 training.label_smoothing 0.010913009543866252 +434 19 model.embedding_dim 1.0 +434 19 optimizer.lr 0.008844000992310832 +434 19 training.batch_size 0.0 +434 19 training.label_smoothing 0.7551466858429231 +434 20 model.embedding_dim 2.0 +434 20 optimizer.lr 0.0021709842432885537 +434 20 training.batch_size 1.0 +434 20 training.label_smoothing 0.08343694346506186 +434 21 model.embedding_dim 2.0 +434 21 optimizer.lr 0.004005453148488285 +434 21 training.batch_size 1.0 +434 21 training.label_smoothing 0.007926457701686544 +434 22 model.embedding_dim 2.0 +434 22 optimizer.lr 0.002346903262714229 +434 22 training.batch_size 0.0 +434 22 training.label_smoothing 0.08557060516837153 +434 23 model.embedding_dim 0.0 +434 23 optimizer.lr 0.06412187018116389 +434 23 training.batch_size 0.0 +434 23 training.label_smoothing 0.35784112406774393 +434 24 model.embedding_dim 0.0 +434 24 optimizer.lr 0.003479939973776697 +434 24 training.batch_size 1.0 +434 24 training.label_smoothing 0.3546473920002315 +434 25 model.embedding_dim 0.0 +434 25 optimizer.lr 0.005350596785890837 +434 25 training.batch_size 2.0 +434 25 training.label_smoothing 0.0393982957196148 +434 26 model.embedding_dim 1.0 +434 26 optimizer.lr 0.05119468981321545 +434 26 training.batch_size 2.0 +434 26 training.label_smoothing 0.0023203907870750773 +434 27 model.embedding_dim 1.0 +434 27 optimizer.lr 0.001610784891018529 +434 27 training.batch_size 1.0 +434 27 training.label_smoothing 0.0012712502604974212 +434 28 model.embedding_dim 0.0 +434 28 optimizer.lr 0.04667945648190972 +434 28 training.batch_size 0.0 +434 28 training.label_smoothing 0.0016680626944874685 +434 29 model.embedding_dim 2.0 +434 29 optimizer.lr 0.08212769272855337 +434 29 training.batch_size 1.0 +434 29 training.label_smoothing 0.009548140411110871 +434 30 model.embedding_dim 2.0 +434 30 optimizer.lr 0.0708884354829395 +434 30 training.batch_size 2.0 +434 30 training.label_smoothing 0.0010303811015322424 +434 31 model.embedding_dim 2.0 +434 31 optimizer.lr 0.002683029313612311 +434 31 training.batch_size 1.0 +434 31 training.label_smoothing 0.5194703372825846 +434 32 model.embedding_dim 2.0 +434 32 optimizer.lr 0.03208299581120167 +434 32 training.batch_size 2.0 +434 32 training.label_smoothing 0.6940273849824931 +434 33 model.embedding_dim 2.0 +434 33 optimizer.lr 0.011345642908187263 +434 33 training.batch_size 0.0 +434 33 training.label_smoothing 0.0028317605840660173 +434 34 model.embedding_dim 2.0 +434 34 optimizer.lr 0.005433473901004021 +434 34 training.batch_size 1.0 +434 34 training.label_smoothing 0.18108152930747146 +434 35 model.embedding_dim 2.0 +434 35 optimizer.lr 0.006982627267323761 +434 35 training.batch_size 0.0 +434 35 training.label_smoothing 0.0015313736525950545 +434 36 model.embedding_dim 2.0 +434 36 optimizer.lr 0.020453078532020372 +434 36 training.batch_size 0.0 +434 36 training.label_smoothing 0.8011929764303946 +434 37 model.embedding_dim 2.0 +434 37 optimizer.lr 0.01439702611668884 +434 37 training.batch_size 1.0 +434 37 training.label_smoothing 0.09873219951828106 +434 38 model.embedding_dim 1.0 +434 38 optimizer.lr 0.001132083330493938 +434 38 training.batch_size 1.0 +434 38 training.label_smoothing 0.23261796003663596 +434 39 model.embedding_dim 0.0 +434 39 optimizer.lr 0.04273146156969295 +434 39 training.batch_size 1.0 +434 39 training.label_smoothing 0.057643260371036883 +434 40 model.embedding_dim 1.0 +434 40 optimizer.lr 0.004601272171169085 +434 40 training.batch_size 2.0 +434 40 training.label_smoothing 0.004436511885596311 +434 41 model.embedding_dim 1.0 +434 41 optimizer.lr 0.013330900943089174 +434 41 training.batch_size 2.0 +434 41 training.label_smoothing 0.013964483149653355 +434 42 model.embedding_dim 0.0 +434 42 optimizer.lr 0.008701133634641763 +434 42 training.batch_size 1.0 +434 42 training.label_smoothing 0.018414427343778125 +434 43 model.embedding_dim 0.0 +434 43 optimizer.lr 0.0022906993632756866 +434 43 training.batch_size 1.0 +434 43 training.label_smoothing 0.013708326066614837 +434 44 model.embedding_dim 1.0 +434 44 optimizer.lr 0.06436836392210944 +434 44 training.batch_size 1.0 +434 44 training.label_smoothing 0.0021749998491600816 +434 45 model.embedding_dim 0.0 +434 45 optimizer.lr 0.004192990807804837 +434 45 training.batch_size 1.0 +434 45 training.label_smoothing 0.24330988274399395 +434 46 model.embedding_dim 1.0 +434 46 optimizer.lr 0.015621959966096777 +434 46 training.batch_size 1.0 +434 46 training.label_smoothing 0.0024092730572261426 +434 47 model.embedding_dim 0.0 +434 47 optimizer.lr 0.02941144998413174 +434 47 training.batch_size 2.0 +434 47 training.label_smoothing 0.9256321263486159 +434 48 model.embedding_dim 1.0 +434 48 optimizer.lr 0.00206383011537024 +434 48 training.batch_size 2.0 +434 48 training.label_smoothing 0.29508446483412687 +434 49 model.embedding_dim 1.0 +434 49 optimizer.lr 0.06892551479130281 +434 49 training.batch_size 2.0 +434 49 training.label_smoothing 0.06907921299891875 +434 50 model.embedding_dim 0.0 +434 50 optimizer.lr 0.007234838603908385 +434 50 training.batch_size 2.0 +434 50 training.label_smoothing 0.6743248899193562 +434 51 model.embedding_dim 1.0 +434 51 optimizer.lr 0.0018440019228875928 +434 51 training.batch_size 0.0 +434 51 training.label_smoothing 0.025257048366778233 +434 52 model.embedding_dim 0.0 +434 52 optimizer.lr 0.010246204736306922 +434 52 training.batch_size 1.0 +434 52 training.label_smoothing 0.34107912613485225 +434 53 model.embedding_dim 0.0 +434 53 optimizer.lr 0.010157905878679633 +434 53 training.batch_size 0.0 +434 53 training.label_smoothing 0.032142665455621396 +434 54 model.embedding_dim 1.0 +434 54 optimizer.lr 0.07722516690016686 +434 54 training.batch_size 0.0 +434 54 training.label_smoothing 0.001267247653214877 +434 55 model.embedding_dim 1.0 +434 55 optimizer.lr 0.0017581763755630372 +434 55 training.batch_size 0.0 +434 55 training.label_smoothing 0.08320194104928909 +434 56 model.embedding_dim 2.0 +434 56 optimizer.lr 0.004895337981386499 +434 56 training.batch_size 2.0 +434 56 training.label_smoothing 0.03397048426803425 +434 57 model.embedding_dim 0.0 +434 57 optimizer.lr 0.0059311404253425525 +434 57 training.batch_size 2.0 +434 57 training.label_smoothing 0.003583904827023083 +434 58 model.embedding_dim 1.0 +434 58 optimizer.lr 0.002269837332297935 +434 58 training.batch_size 0.0 +434 58 training.label_smoothing 0.004819240776008506 +434 59 model.embedding_dim 1.0 +434 59 optimizer.lr 0.0017125407065762324 +434 59 training.batch_size 1.0 +434 59 training.label_smoothing 0.06064403618392029 +434 60 model.embedding_dim 2.0 +434 60 optimizer.lr 0.003265194615932631 +434 60 training.batch_size 2.0 +434 60 training.label_smoothing 0.0025011514218869764 +434 61 model.embedding_dim 2.0 +434 61 optimizer.lr 0.07250570269834997 +434 61 training.batch_size 2.0 +434 61 training.label_smoothing 0.05608346707729666 +434 62 model.embedding_dim 1.0 +434 62 optimizer.lr 0.001337882553826353 +434 62 training.batch_size 1.0 +434 62 training.label_smoothing 0.01052294414019298 +434 63 model.embedding_dim 1.0 +434 63 optimizer.lr 0.0019709829164915055 +434 63 training.batch_size 1.0 +434 63 training.label_smoothing 0.01249641778842187 +434 64 model.embedding_dim 1.0 +434 64 optimizer.lr 0.0020123056753173565 +434 64 training.batch_size 0.0 +434 64 training.label_smoothing 0.010730300934813315 +434 65 model.embedding_dim 1.0 +434 65 optimizer.lr 0.0014176744015012056 +434 65 training.batch_size 0.0 +434 65 training.label_smoothing 0.8600917630177561 +434 66 model.embedding_dim 1.0 +434 66 optimizer.lr 0.06352496292975515 +434 66 training.batch_size 0.0 +434 66 training.label_smoothing 0.1625322588821066 +434 67 model.embedding_dim 0.0 +434 67 optimizer.lr 0.027617713204859184 +434 67 training.batch_size 2.0 +434 67 training.label_smoothing 0.08606262860460348 +434 68 model.embedding_dim 2.0 +434 68 optimizer.lr 0.0018327366365031177 +434 68 training.batch_size 2.0 +434 68 training.label_smoothing 0.0012680652208146685 +434 69 model.embedding_dim 1.0 +434 69 optimizer.lr 0.02699426398437837 +434 69 training.batch_size 0.0 +434 69 training.label_smoothing 0.04744195763074055 +434 70 model.embedding_dim 1.0 +434 70 optimizer.lr 0.004572496229410291 +434 70 training.batch_size 0.0 +434 70 training.label_smoothing 0.009953670817673786 +434 71 model.embedding_dim 1.0 +434 71 optimizer.lr 0.0032855595898514394 +434 71 training.batch_size 2.0 +434 71 training.label_smoothing 0.2916806394090796 +434 72 model.embedding_dim 0.0 +434 72 optimizer.lr 0.0017352710708323374 +434 72 training.batch_size 1.0 +434 72 training.label_smoothing 0.0040714555874003236 +434 73 model.embedding_dim 2.0 +434 73 optimizer.lr 0.021279818268755756 +434 73 training.batch_size 0.0 +434 73 training.label_smoothing 0.19577216381182086 +434 74 model.embedding_dim 1.0 +434 74 optimizer.lr 0.028355292441840593 +434 74 training.batch_size 0.0 +434 74 training.label_smoothing 0.0058322039818920245 +434 75 model.embedding_dim 0.0 +434 75 optimizer.lr 0.007549786238125297 +434 75 training.batch_size 1.0 +434 75 training.label_smoothing 0.052317295641756814 +434 76 model.embedding_dim 0.0 +434 76 optimizer.lr 0.0030914835898204146 +434 76 training.batch_size 1.0 +434 76 training.label_smoothing 0.0051376655102621915 +434 77 model.embedding_dim 1.0 +434 77 optimizer.lr 0.014357804021159189 +434 77 training.batch_size 0.0 +434 77 training.label_smoothing 0.011793629233260874 +434 78 model.embedding_dim 0.0 +434 78 optimizer.lr 0.08738478984749382 +434 78 training.batch_size 2.0 +434 78 training.label_smoothing 0.005737997619692034 +434 79 model.embedding_dim 0.0 +434 79 optimizer.lr 0.06872403915457949 +434 79 training.batch_size 2.0 +434 79 training.label_smoothing 0.9266099841961705 +434 80 model.embedding_dim 1.0 +434 80 optimizer.lr 0.026146705314393913 +434 80 training.batch_size 0.0 +434 80 training.label_smoothing 0.0387907543051513 +434 81 model.embedding_dim 1.0 +434 81 optimizer.lr 0.06729841247593094 +434 81 training.batch_size 2.0 +434 81 training.label_smoothing 0.03681699305566565 +434 82 model.embedding_dim 0.0 +434 82 optimizer.lr 0.0012421257969995505 +434 82 training.batch_size 2.0 +434 82 training.label_smoothing 0.014771632998127721 +434 83 model.embedding_dim 0.0 +434 83 optimizer.lr 0.0019217754507679648 +434 83 training.batch_size 2.0 +434 83 training.label_smoothing 0.10587157704137361 +434 84 model.embedding_dim 0.0 +434 84 optimizer.lr 0.04386137613342518 +434 84 training.batch_size 2.0 +434 84 training.label_smoothing 0.007074686913879267 +434 85 model.embedding_dim 2.0 +434 85 optimizer.lr 0.03374772422254355 +434 85 training.batch_size 1.0 +434 85 training.label_smoothing 0.0016978178451416885 +434 86 model.embedding_dim 1.0 +434 86 optimizer.lr 0.025913687612726784 +434 86 training.batch_size 1.0 +434 86 training.label_smoothing 0.0011089337677657627 +434 87 model.embedding_dim 0.0 +434 87 optimizer.lr 0.05976872118898041 +434 87 training.batch_size 0.0 +434 87 training.label_smoothing 0.0611557285359177 +434 88 model.embedding_dim 1.0 +434 88 optimizer.lr 0.02208597844357566 +434 88 training.batch_size 1.0 +434 88 training.label_smoothing 0.004859744183307763 +434 89 model.embedding_dim 1.0 +434 89 optimizer.lr 0.009123224393696361 +434 89 training.batch_size 0.0 +434 89 training.label_smoothing 0.7057663912809381 +434 90 model.embedding_dim 0.0 +434 90 optimizer.lr 0.0018875000615029634 +434 90 training.batch_size 2.0 +434 90 training.label_smoothing 0.0011225575703710934 +434 91 model.embedding_dim 2.0 +434 91 optimizer.lr 0.014242439007244711 +434 91 training.batch_size 1.0 +434 91 training.label_smoothing 0.02741768723144322 +434 92 model.embedding_dim 2.0 +434 92 optimizer.lr 0.0018398853813445433 +434 92 training.batch_size 0.0 +434 92 training.label_smoothing 0.39430164906723253 +434 93 model.embedding_dim 2.0 +434 93 optimizer.lr 0.04438806285148828 +434 93 training.batch_size 2.0 +434 93 training.label_smoothing 0.04873924384192195 +434 94 model.embedding_dim 1.0 +434 94 optimizer.lr 0.005773412792821987 +434 94 training.batch_size 2.0 +434 94 training.label_smoothing 0.1054589523088754 +434 95 model.embedding_dim 1.0 +434 95 optimizer.lr 0.01097445144258258 +434 95 training.batch_size 0.0 +434 95 training.label_smoothing 0.01734778944471844 +434 96 model.embedding_dim 1.0 +434 96 optimizer.lr 0.0020084660015498427 +434 96 training.batch_size 0.0 +434 96 training.label_smoothing 0.0017680786872057701 +434 97 model.embedding_dim 2.0 +434 97 optimizer.lr 0.06114795824857358 +434 97 training.batch_size 0.0 +434 97 training.label_smoothing 0.0031310777349145774 +434 98 model.embedding_dim 0.0 +434 98 optimizer.lr 0.022713599705618575 +434 98 training.batch_size 2.0 +434 98 training.label_smoothing 0.6252573026119522 +434 99 model.embedding_dim 2.0 +434 99 optimizer.lr 0.0016211088254790061 +434 99 training.batch_size 0.0 +434 99 training.label_smoothing 0.8217899213010751 +434 100 model.embedding_dim 1.0 +434 100 optimizer.lr 0.001582834328628958 +434 100 training.batch_size 2.0 +434 100 training.label_smoothing 0.021551619546701865 +434 1 dataset """kinships""" +434 1 model """ntn""" +434 1 loss """crossentropy""" +434 1 regularizer """no""" +434 1 optimizer """adam""" +434 1 training_loop """lcwa""" +434 1 evaluator """rankbased""" +434 2 dataset """kinships""" +434 2 model """ntn""" +434 2 loss """crossentropy""" +434 2 regularizer """no""" +434 2 optimizer """adam""" +434 2 training_loop """lcwa""" +434 2 evaluator """rankbased""" +434 3 dataset """kinships""" +434 3 model """ntn""" +434 3 loss """crossentropy""" +434 3 regularizer """no""" +434 3 optimizer """adam""" +434 3 training_loop """lcwa""" +434 3 evaluator """rankbased""" +434 4 dataset """kinships""" +434 4 model """ntn""" +434 4 loss """crossentropy""" +434 4 regularizer """no""" +434 4 optimizer """adam""" +434 4 training_loop """lcwa""" +434 4 evaluator """rankbased""" +434 5 dataset """kinships""" +434 5 model """ntn""" +434 5 loss """crossentropy""" +434 5 regularizer """no""" +434 5 optimizer """adam""" +434 5 training_loop """lcwa""" +434 5 evaluator """rankbased""" +434 6 dataset """kinships""" +434 6 model """ntn""" +434 6 loss """crossentropy""" +434 6 regularizer """no""" +434 6 optimizer """adam""" +434 6 training_loop """lcwa""" +434 6 evaluator """rankbased""" +434 7 dataset """kinships""" +434 7 model """ntn""" +434 7 loss """crossentropy""" +434 7 regularizer """no""" +434 7 optimizer """adam""" +434 7 training_loop """lcwa""" +434 7 evaluator """rankbased""" +434 8 dataset """kinships""" +434 8 model """ntn""" +434 8 loss """crossentropy""" +434 8 regularizer """no""" +434 8 optimizer """adam""" +434 8 training_loop """lcwa""" +434 8 evaluator """rankbased""" +434 9 dataset """kinships""" +434 9 model """ntn""" +434 9 loss """crossentropy""" +434 9 regularizer """no""" +434 9 optimizer """adam""" +434 9 training_loop """lcwa""" +434 9 evaluator """rankbased""" +434 10 dataset """kinships""" +434 10 model """ntn""" +434 10 loss """crossentropy""" +434 10 regularizer """no""" +434 10 optimizer """adam""" +434 10 training_loop """lcwa""" +434 10 evaluator """rankbased""" +434 11 dataset """kinships""" +434 11 model """ntn""" +434 11 loss """crossentropy""" +434 11 regularizer """no""" +434 11 optimizer """adam""" +434 11 training_loop """lcwa""" +434 11 evaluator """rankbased""" +434 12 dataset """kinships""" +434 12 model """ntn""" +434 12 loss """crossentropy""" +434 12 regularizer """no""" +434 12 optimizer """adam""" +434 12 training_loop """lcwa""" +434 12 evaluator """rankbased""" +434 13 dataset """kinships""" +434 13 model """ntn""" +434 13 loss """crossentropy""" +434 13 regularizer """no""" +434 13 optimizer """adam""" +434 13 training_loop """lcwa""" +434 13 evaluator """rankbased""" +434 14 dataset """kinships""" +434 14 model """ntn""" +434 14 loss """crossentropy""" +434 14 regularizer """no""" +434 14 optimizer """adam""" +434 14 training_loop """lcwa""" +434 14 evaluator """rankbased""" +434 15 dataset """kinships""" +434 15 model """ntn""" +434 15 loss """crossentropy""" +434 15 regularizer """no""" +434 15 optimizer """adam""" +434 15 training_loop """lcwa""" +434 15 evaluator """rankbased""" +434 16 dataset """kinships""" +434 16 model """ntn""" +434 16 loss """crossentropy""" +434 16 regularizer """no""" +434 16 optimizer """adam""" +434 16 training_loop """lcwa""" +434 16 evaluator """rankbased""" +434 17 dataset """kinships""" +434 17 model """ntn""" +434 17 loss """crossentropy""" +434 17 regularizer """no""" +434 17 optimizer """adam""" +434 17 training_loop """lcwa""" +434 17 evaluator """rankbased""" +434 18 dataset """kinships""" +434 18 model """ntn""" +434 18 loss """crossentropy""" +434 18 regularizer """no""" +434 18 optimizer """adam""" +434 18 training_loop """lcwa""" +434 18 evaluator """rankbased""" +434 19 dataset """kinships""" +434 19 model """ntn""" +434 19 loss """crossentropy""" +434 19 regularizer """no""" +434 19 optimizer """adam""" +434 19 training_loop """lcwa""" +434 19 evaluator """rankbased""" +434 20 dataset """kinships""" +434 20 model """ntn""" +434 20 loss """crossentropy""" +434 20 regularizer """no""" +434 20 optimizer """adam""" +434 20 training_loop """lcwa""" +434 20 evaluator """rankbased""" +434 21 dataset """kinships""" +434 21 model """ntn""" +434 21 loss """crossentropy""" +434 21 regularizer """no""" +434 21 optimizer """adam""" +434 21 training_loop """lcwa""" +434 21 evaluator """rankbased""" +434 22 dataset """kinships""" +434 22 model """ntn""" +434 22 loss """crossentropy""" +434 22 regularizer """no""" +434 22 optimizer """adam""" +434 22 training_loop """lcwa""" +434 22 evaluator """rankbased""" +434 23 dataset """kinships""" +434 23 model """ntn""" +434 23 loss """crossentropy""" +434 23 regularizer """no""" +434 23 optimizer """adam""" +434 23 training_loop """lcwa""" +434 23 evaluator """rankbased""" +434 24 dataset """kinships""" +434 24 model """ntn""" +434 24 loss """crossentropy""" +434 24 regularizer """no""" +434 24 optimizer """adam""" +434 24 training_loop """lcwa""" +434 24 evaluator """rankbased""" +434 25 dataset """kinships""" +434 25 model """ntn""" +434 25 loss """crossentropy""" +434 25 regularizer """no""" +434 25 optimizer """adam""" +434 25 training_loop """lcwa""" +434 25 evaluator """rankbased""" +434 26 dataset """kinships""" +434 26 model """ntn""" +434 26 loss """crossentropy""" +434 26 regularizer """no""" +434 26 optimizer """adam""" +434 26 training_loop """lcwa""" +434 26 evaluator """rankbased""" +434 27 dataset """kinships""" +434 27 model """ntn""" +434 27 loss """crossentropy""" +434 27 regularizer """no""" +434 27 optimizer """adam""" +434 27 training_loop """lcwa""" +434 27 evaluator """rankbased""" +434 28 dataset """kinships""" +434 28 model """ntn""" +434 28 loss """crossentropy""" +434 28 regularizer """no""" +434 28 optimizer """adam""" +434 28 training_loop """lcwa""" +434 28 evaluator """rankbased""" +434 29 dataset """kinships""" +434 29 model """ntn""" +434 29 loss """crossentropy""" +434 29 regularizer """no""" +434 29 optimizer """adam""" +434 29 training_loop """lcwa""" +434 29 evaluator """rankbased""" +434 30 dataset """kinships""" +434 30 model """ntn""" +434 30 loss """crossentropy""" +434 30 regularizer """no""" +434 30 optimizer """adam""" +434 30 training_loop """lcwa""" +434 30 evaluator """rankbased""" +434 31 dataset """kinships""" +434 31 model """ntn""" +434 31 loss """crossentropy""" +434 31 regularizer """no""" +434 31 optimizer """adam""" +434 31 training_loop """lcwa""" +434 31 evaluator """rankbased""" +434 32 dataset """kinships""" +434 32 model """ntn""" +434 32 loss """crossentropy""" +434 32 regularizer """no""" +434 32 optimizer """adam""" +434 32 training_loop """lcwa""" +434 32 evaluator """rankbased""" +434 33 dataset """kinships""" +434 33 model """ntn""" +434 33 loss """crossentropy""" +434 33 regularizer """no""" +434 33 optimizer """adam""" +434 33 training_loop """lcwa""" +434 33 evaluator """rankbased""" +434 34 dataset """kinships""" +434 34 model """ntn""" +434 34 loss """crossentropy""" +434 34 regularizer """no""" +434 34 optimizer """adam""" +434 34 training_loop """lcwa""" +434 34 evaluator """rankbased""" +434 35 dataset """kinships""" +434 35 model """ntn""" +434 35 loss """crossentropy""" +434 35 regularizer """no""" +434 35 optimizer """adam""" +434 35 training_loop """lcwa""" +434 35 evaluator """rankbased""" +434 36 dataset """kinships""" +434 36 model """ntn""" +434 36 loss """crossentropy""" +434 36 regularizer """no""" +434 36 optimizer """adam""" +434 36 training_loop """lcwa""" +434 36 evaluator """rankbased""" +434 37 dataset """kinships""" +434 37 model """ntn""" +434 37 loss """crossentropy""" +434 37 regularizer """no""" +434 37 optimizer """adam""" +434 37 training_loop """lcwa""" +434 37 evaluator """rankbased""" +434 38 dataset """kinships""" +434 38 model """ntn""" +434 38 loss """crossentropy""" +434 38 regularizer """no""" +434 38 optimizer """adam""" +434 38 training_loop """lcwa""" +434 38 evaluator """rankbased""" +434 39 dataset """kinships""" +434 39 model """ntn""" +434 39 loss """crossentropy""" +434 39 regularizer """no""" +434 39 optimizer """adam""" +434 39 training_loop """lcwa""" +434 39 evaluator """rankbased""" +434 40 dataset """kinships""" +434 40 model """ntn""" +434 40 loss """crossentropy""" +434 40 regularizer """no""" +434 40 optimizer """adam""" +434 40 training_loop """lcwa""" +434 40 evaluator """rankbased""" +434 41 dataset """kinships""" +434 41 model """ntn""" +434 41 loss """crossentropy""" +434 41 regularizer """no""" +434 41 optimizer """adam""" +434 41 training_loop """lcwa""" +434 41 evaluator """rankbased""" +434 42 dataset """kinships""" +434 42 model """ntn""" +434 42 loss """crossentropy""" +434 42 regularizer """no""" +434 42 optimizer """adam""" +434 42 training_loop """lcwa""" +434 42 evaluator """rankbased""" +434 43 dataset """kinships""" +434 43 model """ntn""" +434 43 loss """crossentropy""" +434 43 regularizer """no""" +434 43 optimizer """adam""" +434 43 training_loop """lcwa""" +434 43 evaluator """rankbased""" +434 44 dataset """kinships""" +434 44 model """ntn""" +434 44 loss """crossentropy""" +434 44 regularizer """no""" +434 44 optimizer """adam""" +434 44 training_loop """lcwa""" +434 44 evaluator """rankbased""" +434 45 dataset """kinships""" +434 45 model """ntn""" +434 45 loss """crossentropy""" +434 45 regularizer """no""" +434 45 optimizer """adam""" +434 45 training_loop """lcwa""" +434 45 evaluator """rankbased""" +434 46 dataset """kinships""" +434 46 model """ntn""" +434 46 loss """crossentropy""" +434 46 regularizer """no""" +434 46 optimizer """adam""" +434 46 training_loop """lcwa""" +434 46 evaluator """rankbased""" +434 47 dataset """kinships""" +434 47 model """ntn""" +434 47 loss """crossentropy""" +434 47 regularizer """no""" +434 47 optimizer """adam""" +434 47 training_loop """lcwa""" +434 47 evaluator """rankbased""" +434 48 dataset """kinships""" +434 48 model """ntn""" +434 48 loss """crossentropy""" +434 48 regularizer """no""" +434 48 optimizer """adam""" +434 48 training_loop """lcwa""" +434 48 evaluator """rankbased""" +434 49 dataset """kinships""" +434 49 model """ntn""" +434 49 loss """crossentropy""" +434 49 regularizer """no""" +434 49 optimizer """adam""" +434 49 training_loop """lcwa""" +434 49 evaluator """rankbased""" +434 50 dataset """kinships""" +434 50 model """ntn""" +434 50 loss """crossentropy""" +434 50 regularizer """no""" +434 50 optimizer """adam""" +434 50 training_loop """lcwa""" +434 50 evaluator """rankbased""" +434 51 dataset """kinships""" +434 51 model """ntn""" +434 51 loss """crossentropy""" +434 51 regularizer """no""" +434 51 optimizer """adam""" +434 51 training_loop """lcwa""" +434 51 evaluator """rankbased""" +434 52 dataset """kinships""" +434 52 model """ntn""" +434 52 loss """crossentropy""" +434 52 regularizer """no""" +434 52 optimizer """adam""" +434 52 training_loop """lcwa""" +434 52 evaluator """rankbased""" +434 53 dataset """kinships""" +434 53 model """ntn""" +434 53 loss """crossentropy""" +434 53 regularizer """no""" +434 53 optimizer """adam""" +434 53 training_loop """lcwa""" +434 53 evaluator """rankbased""" +434 54 dataset """kinships""" +434 54 model """ntn""" +434 54 loss """crossentropy""" +434 54 regularizer """no""" +434 54 optimizer """adam""" +434 54 training_loop """lcwa""" +434 54 evaluator """rankbased""" +434 55 dataset """kinships""" +434 55 model """ntn""" +434 55 loss """crossentropy""" +434 55 regularizer """no""" +434 55 optimizer """adam""" +434 55 training_loop """lcwa""" +434 55 evaluator """rankbased""" +434 56 dataset """kinships""" +434 56 model """ntn""" +434 56 loss """crossentropy""" +434 56 regularizer """no""" +434 56 optimizer """adam""" +434 56 training_loop """lcwa""" +434 56 evaluator """rankbased""" +434 57 dataset """kinships""" +434 57 model """ntn""" +434 57 loss """crossentropy""" +434 57 regularizer """no""" +434 57 optimizer """adam""" +434 57 training_loop """lcwa""" +434 57 evaluator """rankbased""" +434 58 dataset """kinships""" +434 58 model """ntn""" +434 58 loss """crossentropy""" +434 58 regularizer """no""" +434 58 optimizer """adam""" +434 58 training_loop """lcwa""" +434 58 evaluator """rankbased""" +434 59 dataset """kinships""" +434 59 model """ntn""" +434 59 loss """crossentropy""" +434 59 regularizer """no""" +434 59 optimizer """adam""" +434 59 training_loop """lcwa""" +434 59 evaluator """rankbased""" +434 60 dataset """kinships""" +434 60 model """ntn""" +434 60 loss """crossentropy""" +434 60 regularizer """no""" +434 60 optimizer """adam""" +434 60 training_loop """lcwa""" +434 60 evaluator """rankbased""" +434 61 dataset """kinships""" +434 61 model """ntn""" +434 61 loss """crossentropy""" +434 61 regularizer """no""" +434 61 optimizer """adam""" +434 61 training_loop """lcwa""" +434 61 evaluator """rankbased""" +434 62 dataset """kinships""" +434 62 model """ntn""" +434 62 loss """crossentropy""" +434 62 regularizer """no""" +434 62 optimizer """adam""" +434 62 training_loop """lcwa""" +434 62 evaluator """rankbased""" +434 63 dataset """kinships""" +434 63 model """ntn""" +434 63 loss """crossentropy""" +434 63 regularizer """no""" +434 63 optimizer """adam""" +434 63 training_loop """lcwa""" +434 63 evaluator """rankbased""" +434 64 dataset """kinships""" +434 64 model """ntn""" +434 64 loss """crossentropy""" +434 64 regularizer """no""" +434 64 optimizer """adam""" +434 64 training_loop """lcwa""" +434 64 evaluator """rankbased""" +434 65 dataset """kinships""" +434 65 model """ntn""" +434 65 loss """crossentropy""" +434 65 regularizer """no""" +434 65 optimizer """adam""" +434 65 training_loop """lcwa""" +434 65 evaluator """rankbased""" +434 66 dataset """kinships""" +434 66 model """ntn""" +434 66 loss """crossentropy""" +434 66 regularizer """no""" +434 66 optimizer """adam""" +434 66 training_loop """lcwa""" +434 66 evaluator """rankbased""" +434 67 dataset """kinships""" +434 67 model """ntn""" +434 67 loss """crossentropy""" +434 67 regularizer """no""" +434 67 optimizer """adam""" +434 67 training_loop """lcwa""" +434 67 evaluator """rankbased""" +434 68 dataset """kinships""" +434 68 model """ntn""" +434 68 loss """crossentropy""" +434 68 regularizer """no""" +434 68 optimizer """adam""" +434 68 training_loop """lcwa""" +434 68 evaluator """rankbased""" +434 69 dataset """kinships""" +434 69 model """ntn""" +434 69 loss """crossentropy""" +434 69 regularizer """no""" +434 69 optimizer """adam""" +434 69 training_loop """lcwa""" +434 69 evaluator """rankbased""" +434 70 dataset """kinships""" +434 70 model """ntn""" +434 70 loss """crossentropy""" +434 70 regularizer """no""" +434 70 optimizer """adam""" +434 70 training_loop """lcwa""" +434 70 evaluator """rankbased""" +434 71 dataset """kinships""" +434 71 model """ntn""" +434 71 loss """crossentropy""" +434 71 regularizer """no""" +434 71 optimizer """adam""" +434 71 training_loop """lcwa""" +434 71 evaluator """rankbased""" +434 72 dataset """kinships""" +434 72 model """ntn""" +434 72 loss """crossentropy""" +434 72 regularizer """no""" +434 72 optimizer """adam""" +434 72 training_loop """lcwa""" +434 72 evaluator """rankbased""" +434 73 dataset """kinships""" +434 73 model """ntn""" +434 73 loss """crossentropy""" +434 73 regularizer """no""" +434 73 optimizer """adam""" +434 73 training_loop """lcwa""" +434 73 evaluator """rankbased""" +434 74 dataset """kinships""" +434 74 model """ntn""" +434 74 loss """crossentropy""" +434 74 regularizer """no""" +434 74 optimizer """adam""" +434 74 training_loop """lcwa""" +434 74 evaluator """rankbased""" +434 75 dataset """kinships""" +434 75 model """ntn""" +434 75 loss """crossentropy""" +434 75 regularizer """no""" +434 75 optimizer """adam""" +434 75 training_loop """lcwa""" +434 75 evaluator """rankbased""" +434 76 dataset """kinships""" +434 76 model """ntn""" +434 76 loss """crossentropy""" +434 76 regularizer """no""" +434 76 optimizer """adam""" +434 76 training_loop """lcwa""" +434 76 evaluator """rankbased""" +434 77 dataset """kinships""" +434 77 model """ntn""" +434 77 loss """crossentropy""" +434 77 regularizer """no""" +434 77 optimizer """adam""" +434 77 training_loop """lcwa""" +434 77 evaluator """rankbased""" +434 78 dataset """kinships""" +434 78 model """ntn""" +434 78 loss """crossentropy""" +434 78 regularizer """no""" +434 78 optimizer """adam""" +434 78 training_loop """lcwa""" +434 78 evaluator """rankbased""" +434 79 dataset """kinships""" +434 79 model """ntn""" +434 79 loss """crossentropy""" +434 79 regularizer """no""" +434 79 optimizer """adam""" +434 79 training_loop """lcwa""" +434 79 evaluator """rankbased""" +434 80 dataset """kinships""" +434 80 model """ntn""" +434 80 loss """crossentropy""" +434 80 regularizer """no""" +434 80 optimizer """adam""" +434 80 training_loop """lcwa""" +434 80 evaluator """rankbased""" +434 81 dataset """kinships""" +434 81 model """ntn""" +434 81 loss """crossentropy""" +434 81 regularizer """no""" +434 81 optimizer """adam""" +434 81 training_loop """lcwa""" +434 81 evaluator """rankbased""" +434 82 dataset """kinships""" +434 82 model """ntn""" +434 82 loss """crossentropy""" +434 82 regularizer """no""" +434 82 optimizer """adam""" +434 82 training_loop """lcwa""" +434 82 evaluator """rankbased""" +434 83 dataset """kinships""" +434 83 model """ntn""" +434 83 loss """crossentropy""" +434 83 regularizer """no""" +434 83 optimizer """adam""" +434 83 training_loop """lcwa""" +434 83 evaluator """rankbased""" +434 84 dataset """kinships""" +434 84 model """ntn""" +434 84 loss """crossentropy""" +434 84 regularizer """no""" +434 84 optimizer """adam""" +434 84 training_loop """lcwa""" +434 84 evaluator """rankbased""" +434 85 dataset """kinships""" +434 85 model """ntn""" +434 85 loss """crossentropy""" +434 85 regularizer """no""" +434 85 optimizer """adam""" +434 85 training_loop """lcwa""" +434 85 evaluator """rankbased""" +434 86 dataset """kinships""" +434 86 model """ntn""" +434 86 loss """crossentropy""" +434 86 regularizer """no""" +434 86 optimizer """adam""" +434 86 training_loop """lcwa""" +434 86 evaluator """rankbased""" +434 87 dataset """kinships""" +434 87 model """ntn""" +434 87 loss """crossentropy""" +434 87 regularizer """no""" +434 87 optimizer """adam""" +434 87 training_loop """lcwa""" +434 87 evaluator """rankbased""" +434 88 dataset """kinships""" +434 88 model """ntn""" +434 88 loss """crossentropy""" +434 88 regularizer """no""" +434 88 optimizer """adam""" +434 88 training_loop """lcwa""" +434 88 evaluator """rankbased""" +434 89 dataset """kinships""" +434 89 model """ntn""" +434 89 loss """crossentropy""" +434 89 regularizer """no""" +434 89 optimizer """adam""" +434 89 training_loop """lcwa""" +434 89 evaluator """rankbased""" +434 90 dataset """kinships""" +434 90 model """ntn""" +434 90 loss """crossentropy""" +434 90 regularizer """no""" +434 90 optimizer """adam""" +434 90 training_loop """lcwa""" +434 90 evaluator """rankbased""" +434 91 dataset """kinships""" +434 91 model """ntn""" +434 91 loss """crossentropy""" +434 91 regularizer """no""" +434 91 optimizer """adam""" +434 91 training_loop """lcwa""" +434 91 evaluator """rankbased""" +434 92 dataset """kinships""" +434 92 model """ntn""" +434 92 loss """crossentropy""" +434 92 regularizer """no""" +434 92 optimizer """adam""" +434 92 training_loop """lcwa""" +434 92 evaluator """rankbased""" +434 93 dataset """kinships""" +434 93 model """ntn""" +434 93 loss """crossentropy""" +434 93 regularizer """no""" +434 93 optimizer """adam""" +434 93 training_loop """lcwa""" +434 93 evaluator """rankbased""" +434 94 dataset """kinships""" +434 94 model """ntn""" +434 94 loss """crossentropy""" +434 94 regularizer """no""" +434 94 optimizer """adam""" +434 94 training_loop """lcwa""" +434 94 evaluator """rankbased""" +434 95 dataset """kinships""" +434 95 model """ntn""" +434 95 loss """crossentropy""" +434 95 regularizer """no""" +434 95 optimizer """adam""" +434 95 training_loop """lcwa""" +434 95 evaluator """rankbased""" +434 96 dataset """kinships""" +434 96 model """ntn""" +434 96 loss """crossentropy""" +434 96 regularizer """no""" +434 96 optimizer """adam""" +434 96 training_loop """lcwa""" +434 96 evaluator """rankbased""" +434 97 dataset """kinships""" +434 97 model """ntn""" +434 97 loss """crossentropy""" +434 97 regularizer """no""" +434 97 optimizer """adam""" +434 97 training_loop """lcwa""" +434 97 evaluator """rankbased""" +434 98 dataset """kinships""" +434 98 model """ntn""" +434 98 loss """crossentropy""" +434 98 regularizer """no""" +434 98 optimizer """adam""" +434 98 training_loop """lcwa""" +434 98 evaluator """rankbased""" +434 99 dataset """kinships""" +434 99 model """ntn""" +434 99 loss """crossentropy""" +434 99 regularizer """no""" +434 99 optimizer """adam""" +434 99 training_loop """lcwa""" +434 99 evaluator """rankbased""" +434 100 dataset """kinships""" +434 100 model """ntn""" +434 100 loss """crossentropy""" +434 100 regularizer """no""" +434 100 optimizer """adam""" +434 100 training_loop """lcwa""" +434 100 evaluator """rankbased""" +435 1 model.embedding_dim 0.0 +435 1 loss.margin 3.3334431122524677 +435 1 optimizer.lr 0.022737899623606648 +435 1 negative_sampler.num_negs_per_pos 91.0 +435 1 training.batch_size 0.0 +435 2 model.embedding_dim 1.0 +435 2 loss.margin 1.2097450337813556 +435 2 optimizer.lr 0.0015093376849336277 +435 2 negative_sampler.num_negs_per_pos 70.0 +435 2 training.batch_size 2.0 +435 3 model.embedding_dim 2.0 +435 3 loss.margin 6.6499527155566645 +435 3 optimizer.lr 0.018181562335107076 +435 3 negative_sampler.num_negs_per_pos 56.0 +435 3 training.batch_size 1.0 +435 4 model.embedding_dim 2.0 +435 4 loss.margin 2.328366884211568 +435 4 optimizer.lr 0.002992970615978448 +435 4 negative_sampler.num_negs_per_pos 8.0 +435 4 training.batch_size 1.0 +435 5 model.embedding_dim 0.0 +435 5 loss.margin 4.718431142123332 +435 5 optimizer.lr 0.0021746190833007964 +435 5 negative_sampler.num_negs_per_pos 16.0 +435 5 training.batch_size 0.0 +435 6 model.embedding_dim 0.0 +435 6 loss.margin 3.6290756109395543 +435 6 optimizer.lr 0.02039478067217331 +435 6 negative_sampler.num_negs_per_pos 4.0 +435 6 training.batch_size 0.0 +435 7 model.embedding_dim 0.0 +435 7 loss.margin 3.1316045533956354 +435 7 optimizer.lr 0.007719337219986569 +435 7 negative_sampler.num_negs_per_pos 68.0 +435 7 training.batch_size 1.0 +435 8 model.embedding_dim 0.0 +435 8 loss.margin 1.190571326220891 +435 8 optimizer.lr 0.022774603799550677 +435 8 negative_sampler.num_negs_per_pos 67.0 +435 8 training.batch_size 0.0 +435 9 model.embedding_dim 2.0 +435 9 loss.margin 8.281513451897576 +435 9 optimizer.lr 0.010744785751175606 +435 9 negative_sampler.num_negs_per_pos 79.0 +435 9 training.batch_size 1.0 +435 10 model.embedding_dim 0.0 +435 10 loss.margin 7.431326312744007 +435 10 optimizer.lr 0.007247916917686493 +435 10 negative_sampler.num_negs_per_pos 81.0 +435 10 training.batch_size 2.0 +435 11 model.embedding_dim 2.0 +435 11 loss.margin 9.287817945408626 +435 11 optimizer.lr 0.0041767310709556575 +435 11 negative_sampler.num_negs_per_pos 12.0 +435 11 training.batch_size 1.0 +435 12 model.embedding_dim 2.0 +435 12 loss.margin 1.4146672389458301 +435 12 optimizer.lr 0.08182314051760113 +435 12 negative_sampler.num_negs_per_pos 25.0 +435 12 training.batch_size 1.0 +435 13 model.embedding_dim 2.0 +435 13 loss.margin 1.953861800614428 +435 13 optimizer.lr 0.002141383363678115 +435 13 negative_sampler.num_negs_per_pos 86.0 +435 13 training.batch_size 0.0 +435 14 model.embedding_dim 0.0 +435 14 loss.margin 6.870814828707494 +435 14 optimizer.lr 0.04782414868423768 +435 14 negative_sampler.num_negs_per_pos 73.0 +435 14 training.batch_size 0.0 +435 15 model.embedding_dim 0.0 +435 15 loss.margin 8.162239588525843 +435 15 optimizer.lr 0.03467410773481946 +435 15 negative_sampler.num_negs_per_pos 37.0 +435 15 training.batch_size 1.0 +435 16 model.embedding_dim 2.0 +435 16 loss.margin 7.882400983445928 +435 16 optimizer.lr 0.0037717874152238115 +435 16 negative_sampler.num_negs_per_pos 92.0 +435 16 training.batch_size 0.0 +435 17 model.embedding_dim 1.0 +435 17 loss.margin 5.626995931117625 +435 17 optimizer.lr 0.0012928818606875378 +435 17 negative_sampler.num_negs_per_pos 75.0 +435 17 training.batch_size 0.0 +435 18 model.embedding_dim 2.0 +435 18 loss.margin 4.783302546697559 +435 18 optimizer.lr 0.0011144913517414513 +435 18 negative_sampler.num_negs_per_pos 19.0 +435 18 training.batch_size 0.0 +435 19 model.embedding_dim 2.0 +435 19 loss.margin 1.7274210066685252 +435 19 optimizer.lr 0.04240543413452491 +435 19 negative_sampler.num_negs_per_pos 84.0 +435 19 training.batch_size 2.0 +435 1 dataset """kinships""" +435 1 model """ntn""" +435 1 loss """marginranking""" +435 1 regularizer """no""" +435 1 optimizer """adam""" +435 1 training_loop """owa""" +435 1 negative_sampler """basic""" +435 1 evaluator """rankbased""" +435 2 dataset """kinships""" +435 2 model """ntn""" +435 2 loss """marginranking""" +435 2 regularizer """no""" +435 2 optimizer """adam""" +435 2 training_loop """owa""" +435 2 negative_sampler """basic""" +435 2 evaluator """rankbased""" +435 3 dataset """kinships""" +435 3 model """ntn""" +435 3 loss """marginranking""" +435 3 regularizer """no""" +435 3 optimizer """adam""" +435 3 training_loop """owa""" +435 3 negative_sampler """basic""" +435 3 evaluator """rankbased""" +435 4 dataset """kinships""" +435 4 model """ntn""" +435 4 loss """marginranking""" +435 4 regularizer """no""" +435 4 optimizer """adam""" +435 4 training_loop """owa""" +435 4 negative_sampler """basic""" +435 4 evaluator """rankbased""" +435 5 dataset """kinships""" +435 5 model """ntn""" +435 5 loss """marginranking""" +435 5 regularizer """no""" +435 5 optimizer """adam""" +435 5 training_loop """owa""" +435 5 negative_sampler """basic""" +435 5 evaluator """rankbased""" +435 6 dataset """kinships""" +435 6 model """ntn""" +435 6 loss """marginranking""" +435 6 regularizer """no""" +435 6 optimizer """adam""" +435 6 training_loop """owa""" +435 6 negative_sampler """basic""" +435 6 evaluator """rankbased""" +435 7 dataset """kinships""" +435 7 model """ntn""" +435 7 loss """marginranking""" +435 7 regularizer """no""" +435 7 optimizer """adam""" +435 7 training_loop """owa""" +435 7 negative_sampler """basic""" +435 7 evaluator """rankbased""" +435 8 dataset """kinships""" +435 8 model """ntn""" +435 8 loss """marginranking""" +435 8 regularizer """no""" +435 8 optimizer """adam""" +435 8 training_loop """owa""" +435 8 negative_sampler """basic""" +435 8 evaluator """rankbased""" +435 9 dataset """kinships""" +435 9 model """ntn""" +435 9 loss """marginranking""" +435 9 regularizer """no""" +435 9 optimizer """adam""" +435 9 training_loop """owa""" +435 9 negative_sampler """basic""" +435 9 evaluator """rankbased""" +435 10 dataset """kinships""" +435 10 model """ntn""" +435 10 loss """marginranking""" +435 10 regularizer """no""" +435 10 optimizer """adam""" +435 10 training_loop """owa""" +435 10 negative_sampler """basic""" +435 10 evaluator """rankbased""" +435 11 dataset """kinships""" +435 11 model """ntn""" +435 11 loss """marginranking""" +435 11 regularizer """no""" +435 11 optimizer """adam""" +435 11 training_loop """owa""" +435 11 negative_sampler """basic""" +435 11 evaluator """rankbased""" +435 12 dataset """kinships""" +435 12 model """ntn""" +435 12 loss """marginranking""" +435 12 regularizer """no""" +435 12 optimizer """adam""" +435 12 training_loop """owa""" +435 12 negative_sampler """basic""" +435 12 evaluator """rankbased""" +435 13 dataset """kinships""" +435 13 model """ntn""" +435 13 loss """marginranking""" +435 13 regularizer """no""" +435 13 optimizer """adam""" +435 13 training_loop """owa""" +435 13 negative_sampler """basic""" +435 13 evaluator """rankbased""" +435 14 dataset """kinships""" +435 14 model """ntn""" +435 14 loss """marginranking""" +435 14 regularizer """no""" +435 14 optimizer """adam""" +435 14 training_loop """owa""" +435 14 negative_sampler """basic""" +435 14 evaluator """rankbased""" +435 15 dataset """kinships""" +435 15 model """ntn""" +435 15 loss """marginranking""" +435 15 regularizer """no""" +435 15 optimizer """adam""" +435 15 training_loop """owa""" +435 15 negative_sampler """basic""" +435 15 evaluator """rankbased""" +435 16 dataset """kinships""" +435 16 model """ntn""" +435 16 loss """marginranking""" +435 16 regularizer """no""" +435 16 optimizer """adam""" +435 16 training_loop """owa""" +435 16 negative_sampler """basic""" +435 16 evaluator """rankbased""" +435 17 dataset """kinships""" +435 17 model """ntn""" +435 17 loss """marginranking""" +435 17 regularizer """no""" +435 17 optimizer """adam""" +435 17 training_loop """owa""" +435 17 negative_sampler """basic""" +435 17 evaluator """rankbased""" +435 18 dataset """kinships""" +435 18 model """ntn""" +435 18 loss """marginranking""" +435 18 regularizer """no""" +435 18 optimizer """adam""" +435 18 training_loop """owa""" +435 18 negative_sampler """basic""" +435 18 evaluator """rankbased""" +435 19 dataset """kinships""" +435 19 model """ntn""" +435 19 loss """marginranking""" +435 19 regularizer """no""" +435 19 optimizer """adam""" +435 19 training_loop """owa""" +435 19 negative_sampler """basic""" +435 19 evaluator """rankbased""" +436 1 model.embedding_dim 0.0 +436 1 loss.margin 7.848622017418798 +436 1 optimizer.lr 0.07553560850121255 +436 1 negative_sampler.num_negs_per_pos 4.0 +436 1 training.batch_size 1.0 +436 2 model.embedding_dim 1.0 +436 2 loss.margin 2.5580358137463026 +436 2 optimizer.lr 0.009003712701297347 +436 2 negative_sampler.num_negs_per_pos 12.0 +436 2 training.batch_size 1.0 +436 3 model.embedding_dim 2.0 +436 3 loss.margin 7.410598936519456 +436 3 optimizer.lr 0.0016333511039181335 +436 3 negative_sampler.num_negs_per_pos 56.0 +436 3 training.batch_size 2.0 +436 4 model.embedding_dim 0.0 +436 4 loss.margin 9.535679514097698 +436 4 optimizer.lr 0.09786428336853606 +436 4 negative_sampler.num_negs_per_pos 8.0 +436 4 training.batch_size 2.0 +436 5 model.embedding_dim 1.0 +436 5 loss.margin 3.839894282947907 +436 5 optimizer.lr 0.008440247026511888 +436 5 negative_sampler.num_negs_per_pos 81.0 +436 5 training.batch_size 0.0 +436 6 model.embedding_dim 1.0 +436 6 loss.margin 8.056623418270695 +436 6 optimizer.lr 0.05870592090937014 +436 6 negative_sampler.num_negs_per_pos 38.0 +436 6 training.batch_size 2.0 +436 7 model.embedding_dim 0.0 +436 7 loss.margin 3.7605654591077857 +436 7 optimizer.lr 0.024730652345507333 +436 7 negative_sampler.num_negs_per_pos 62.0 +436 7 training.batch_size 2.0 +436 8 model.embedding_dim 0.0 +436 8 loss.margin 3.734681665245775 +436 8 optimizer.lr 0.020083129343518198 +436 8 negative_sampler.num_negs_per_pos 28.0 +436 8 training.batch_size 0.0 +436 9 model.embedding_dim 2.0 +436 9 loss.margin 2.125092901187883 +436 9 optimizer.lr 0.02970776997167905 +436 9 negative_sampler.num_negs_per_pos 91.0 +436 9 training.batch_size 0.0 +436 10 model.embedding_dim 0.0 +436 10 loss.margin 8.87614922036154 +436 10 optimizer.lr 0.052488544118420875 +436 10 negative_sampler.num_negs_per_pos 28.0 +436 10 training.batch_size 2.0 +436 11 model.embedding_dim 0.0 +436 11 loss.margin 5.8939392298800675 +436 11 optimizer.lr 0.002447324906141658 +436 11 negative_sampler.num_negs_per_pos 74.0 +436 11 training.batch_size 1.0 +436 12 model.embedding_dim 2.0 +436 12 loss.margin 5.2555949120001015 +436 12 optimizer.lr 0.005988399162034319 +436 12 negative_sampler.num_negs_per_pos 42.0 +436 12 training.batch_size 1.0 +436 13 model.embedding_dim 2.0 +436 13 loss.margin 4.986973949074118 +436 13 optimizer.lr 0.01612086938872575 +436 13 negative_sampler.num_negs_per_pos 91.0 +436 13 training.batch_size 1.0 +436 14 model.embedding_dim 2.0 +436 14 loss.margin 6.225060742551376 +436 14 optimizer.lr 0.010296673824717663 +436 14 negative_sampler.num_negs_per_pos 53.0 +436 14 training.batch_size 1.0 +436 15 model.embedding_dim 2.0 +436 15 loss.margin 9.291519803640323 +436 15 optimizer.lr 0.0012675296154687017 +436 15 negative_sampler.num_negs_per_pos 36.0 +436 15 training.batch_size 1.0 +436 16 model.embedding_dim 1.0 +436 16 loss.margin 2.516114790722526 +436 16 optimizer.lr 0.04080564662223635 +436 16 negative_sampler.num_negs_per_pos 86.0 +436 16 training.batch_size 0.0 +436 17 model.embedding_dim 1.0 +436 17 loss.margin 5.218901123013984 +436 17 optimizer.lr 0.0076741319077222436 +436 17 negative_sampler.num_negs_per_pos 13.0 +436 17 training.batch_size 0.0 +436 18 model.embedding_dim 0.0 +436 18 loss.margin 4.38375813987822 +436 18 optimizer.lr 0.009325134346472402 +436 18 negative_sampler.num_negs_per_pos 15.0 +436 18 training.batch_size 1.0 +436 19 model.embedding_dim 2.0 +436 19 loss.margin 1.8889730167866707 +436 19 optimizer.lr 0.0028891957505271693 +436 19 negative_sampler.num_negs_per_pos 87.0 +436 19 training.batch_size 1.0 +436 20 model.embedding_dim 2.0 +436 20 loss.margin 8.038580137716501 +436 20 optimizer.lr 0.03758595119684865 +436 20 negative_sampler.num_negs_per_pos 93.0 +436 20 training.batch_size 1.0 +436 21 model.embedding_dim 1.0 +436 21 loss.margin 1.1614784321067924 +436 21 optimizer.lr 0.003126568553728942 +436 21 negative_sampler.num_negs_per_pos 97.0 +436 21 training.batch_size 1.0 +436 22 model.embedding_dim 0.0 +436 22 loss.margin 7.285741563118389 +436 22 optimizer.lr 0.0013180236767336784 +436 22 negative_sampler.num_negs_per_pos 50.0 +436 22 training.batch_size 1.0 +436 23 model.embedding_dim 0.0 +436 23 loss.margin 5.299455854445967 +436 23 optimizer.lr 0.009060172239698086 +436 23 negative_sampler.num_negs_per_pos 39.0 +436 23 training.batch_size 1.0 +436 24 model.embedding_dim 1.0 +436 24 loss.margin 5.498481888107559 +436 24 optimizer.lr 0.011125578348486525 +436 24 negative_sampler.num_negs_per_pos 76.0 +436 24 training.batch_size 1.0 +436 25 model.embedding_dim 1.0 +436 25 loss.margin 3.0191450445098713 +436 25 optimizer.lr 0.008367592043443426 +436 25 negative_sampler.num_negs_per_pos 5.0 +436 25 training.batch_size 0.0 +436 26 model.embedding_dim 1.0 +436 26 loss.margin 5.64557122412781 +436 26 optimizer.lr 0.00341391500325392 +436 26 negative_sampler.num_negs_per_pos 80.0 +436 26 training.batch_size 0.0 +436 27 model.embedding_dim 2.0 +436 27 loss.margin 3.3936809890959916 +436 27 optimizer.lr 0.009155439754008633 +436 27 negative_sampler.num_negs_per_pos 91.0 +436 27 training.batch_size 0.0 +436 28 model.embedding_dim 1.0 +436 28 loss.margin 1.141362400044378 +436 28 optimizer.lr 0.0020630281492566203 +436 28 negative_sampler.num_negs_per_pos 99.0 +436 28 training.batch_size 1.0 +436 29 model.embedding_dim 1.0 +436 29 loss.margin 2.6928068278266615 +436 29 optimizer.lr 0.01416645388022338 +436 29 negative_sampler.num_negs_per_pos 39.0 +436 29 training.batch_size 1.0 +436 30 model.embedding_dim 0.0 +436 30 loss.margin 6.509640178973367 +436 30 optimizer.lr 0.008958196125679165 +436 30 negative_sampler.num_negs_per_pos 61.0 +436 30 training.batch_size 1.0 +436 31 model.embedding_dim 0.0 +436 31 loss.margin 1.8952655996936136 +436 31 optimizer.lr 0.014939196681719495 +436 31 negative_sampler.num_negs_per_pos 34.0 +436 31 training.batch_size 0.0 +436 32 model.embedding_dim 1.0 +436 32 loss.margin 9.140361087036567 +436 32 optimizer.lr 0.0012554145521901725 +436 32 negative_sampler.num_negs_per_pos 72.0 +436 32 training.batch_size 2.0 +436 33 model.embedding_dim 1.0 +436 33 loss.margin 9.99389841338346 +436 33 optimizer.lr 0.03533892531197783 +436 33 negative_sampler.num_negs_per_pos 81.0 +436 33 training.batch_size 1.0 +436 34 model.embedding_dim 0.0 +436 34 loss.margin 5.856457319251129 +436 34 optimizer.lr 0.0016995544661646635 +436 34 negative_sampler.num_negs_per_pos 25.0 +436 34 training.batch_size 0.0 +436 35 model.embedding_dim 2.0 +436 35 loss.margin 5.112895749632985 +436 35 optimizer.lr 0.002822973996681553 +436 35 negative_sampler.num_negs_per_pos 90.0 +436 35 training.batch_size 2.0 +436 36 model.embedding_dim 0.0 +436 36 loss.margin 9.618065440124731 +436 36 optimizer.lr 0.003910129233593329 +436 36 negative_sampler.num_negs_per_pos 21.0 +436 36 training.batch_size 2.0 +436 37 model.embedding_dim 0.0 +436 37 loss.margin 8.56621971739425 +436 37 optimizer.lr 0.013875361845034883 +436 37 negative_sampler.num_negs_per_pos 60.0 +436 37 training.batch_size 0.0 +436 38 model.embedding_dim 0.0 +436 38 loss.margin 2.3400944265994306 +436 38 optimizer.lr 0.021699998767478215 +436 38 negative_sampler.num_negs_per_pos 75.0 +436 38 training.batch_size 1.0 +436 39 model.embedding_dim 0.0 +436 39 loss.margin 1.903346291719716 +436 39 optimizer.lr 0.0035973721150083463 +436 39 negative_sampler.num_negs_per_pos 30.0 +436 39 training.batch_size 0.0 +436 40 model.embedding_dim 1.0 +436 40 loss.margin 2.8678039729567812 +436 40 optimizer.lr 0.017516368064226236 +436 40 negative_sampler.num_negs_per_pos 64.0 +436 40 training.batch_size 0.0 +436 41 model.embedding_dim 2.0 +436 41 loss.margin 7.725612566694994 +436 41 optimizer.lr 0.025196757747925107 +436 41 negative_sampler.num_negs_per_pos 11.0 +436 41 training.batch_size 0.0 +436 42 model.embedding_dim 1.0 +436 42 loss.margin 8.20415088747884 +436 42 optimizer.lr 0.013969801113453209 +436 42 negative_sampler.num_negs_per_pos 17.0 +436 42 training.batch_size 1.0 +436 43 model.embedding_dim 0.0 +436 43 loss.margin 2.4863317525982334 +436 43 optimizer.lr 0.05301499431646596 +436 43 negative_sampler.num_negs_per_pos 47.0 +436 43 training.batch_size 0.0 +436 44 model.embedding_dim 0.0 +436 44 loss.margin 1.4122288211470142 +436 44 optimizer.lr 0.023130100234746855 +436 44 negative_sampler.num_negs_per_pos 8.0 +436 44 training.batch_size 0.0 +436 45 model.embedding_dim 0.0 +436 45 loss.margin 1.828715964424033 +436 45 optimizer.lr 0.0011913933674809788 +436 45 negative_sampler.num_negs_per_pos 79.0 +436 45 training.batch_size 1.0 +436 46 model.embedding_dim 2.0 +436 46 loss.margin 3.7886904960988197 +436 46 optimizer.lr 0.004803729803178335 +436 46 negative_sampler.num_negs_per_pos 76.0 +436 46 training.batch_size 2.0 +436 1 dataset """kinships""" +436 1 model """ntn""" +436 1 loss """marginranking""" +436 1 regularizer """no""" +436 1 optimizer """adam""" +436 1 training_loop """owa""" +436 1 negative_sampler """basic""" +436 1 evaluator """rankbased""" +436 2 dataset """kinships""" +436 2 model """ntn""" +436 2 loss """marginranking""" +436 2 regularizer """no""" +436 2 optimizer """adam""" +436 2 training_loop """owa""" +436 2 negative_sampler """basic""" +436 2 evaluator """rankbased""" +436 3 dataset """kinships""" +436 3 model """ntn""" +436 3 loss """marginranking""" +436 3 regularizer """no""" +436 3 optimizer """adam""" +436 3 training_loop """owa""" +436 3 negative_sampler """basic""" +436 3 evaluator """rankbased""" +436 4 dataset """kinships""" +436 4 model """ntn""" +436 4 loss """marginranking""" +436 4 regularizer """no""" +436 4 optimizer """adam""" +436 4 training_loop """owa""" +436 4 negative_sampler """basic""" +436 4 evaluator """rankbased""" +436 5 dataset """kinships""" +436 5 model """ntn""" +436 5 loss """marginranking""" +436 5 regularizer """no""" +436 5 optimizer """adam""" +436 5 training_loop """owa""" +436 5 negative_sampler """basic""" +436 5 evaluator """rankbased""" +436 6 dataset """kinships""" +436 6 model """ntn""" +436 6 loss """marginranking""" +436 6 regularizer """no""" +436 6 optimizer """adam""" +436 6 training_loop """owa""" +436 6 negative_sampler """basic""" +436 6 evaluator """rankbased""" +436 7 dataset """kinships""" +436 7 model """ntn""" +436 7 loss """marginranking""" +436 7 regularizer """no""" +436 7 optimizer """adam""" +436 7 training_loop """owa""" +436 7 negative_sampler """basic""" +436 7 evaluator """rankbased""" +436 8 dataset """kinships""" +436 8 model """ntn""" +436 8 loss """marginranking""" +436 8 regularizer """no""" +436 8 optimizer """adam""" +436 8 training_loop """owa""" +436 8 negative_sampler """basic""" +436 8 evaluator """rankbased""" +436 9 dataset """kinships""" +436 9 model """ntn""" +436 9 loss """marginranking""" +436 9 regularizer """no""" +436 9 optimizer """adam""" +436 9 training_loop """owa""" +436 9 negative_sampler """basic""" +436 9 evaluator """rankbased""" +436 10 dataset """kinships""" +436 10 model """ntn""" +436 10 loss """marginranking""" +436 10 regularizer """no""" +436 10 optimizer """adam""" +436 10 training_loop """owa""" +436 10 negative_sampler """basic""" +436 10 evaluator """rankbased""" +436 11 dataset """kinships""" +436 11 model """ntn""" +436 11 loss """marginranking""" +436 11 regularizer """no""" +436 11 optimizer """adam""" +436 11 training_loop """owa""" +436 11 negative_sampler """basic""" +436 11 evaluator """rankbased""" +436 12 dataset """kinships""" +436 12 model """ntn""" +436 12 loss """marginranking""" +436 12 regularizer """no""" +436 12 optimizer """adam""" +436 12 training_loop """owa""" +436 12 negative_sampler """basic""" +436 12 evaluator """rankbased""" +436 13 dataset """kinships""" +436 13 model """ntn""" +436 13 loss """marginranking""" +436 13 regularizer """no""" +436 13 optimizer """adam""" +436 13 training_loop """owa""" +436 13 negative_sampler """basic""" +436 13 evaluator """rankbased""" +436 14 dataset """kinships""" +436 14 model """ntn""" +436 14 loss """marginranking""" +436 14 regularizer """no""" +436 14 optimizer """adam""" +436 14 training_loop """owa""" +436 14 negative_sampler """basic""" +436 14 evaluator """rankbased""" +436 15 dataset """kinships""" +436 15 model """ntn""" +436 15 loss """marginranking""" +436 15 regularizer """no""" +436 15 optimizer """adam""" +436 15 training_loop """owa""" +436 15 negative_sampler """basic""" +436 15 evaluator """rankbased""" +436 16 dataset """kinships""" +436 16 model """ntn""" +436 16 loss """marginranking""" +436 16 regularizer """no""" +436 16 optimizer """adam""" +436 16 training_loop """owa""" +436 16 negative_sampler """basic""" +436 16 evaluator """rankbased""" +436 17 dataset """kinships""" +436 17 model """ntn""" +436 17 loss """marginranking""" +436 17 regularizer """no""" +436 17 optimizer """adam""" +436 17 training_loop """owa""" +436 17 negative_sampler """basic""" +436 17 evaluator """rankbased""" +436 18 dataset """kinships""" +436 18 model """ntn""" +436 18 loss """marginranking""" +436 18 regularizer """no""" +436 18 optimizer """adam""" +436 18 training_loop """owa""" +436 18 negative_sampler """basic""" +436 18 evaluator """rankbased""" +436 19 dataset """kinships""" +436 19 model """ntn""" +436 19 loss """marginranking""" +436 19 regularizer """no""" +436 19 optimizer """adam""" +436 19 training_loop """owa""" +436 19 negative_sampler """basic""" +436 19 evaluator """rankbased""" +436 20 dataset """kinships""" +436 20 model """ntn""" +436 20 loss """marginranking""" +436 20 regularizer """no""" +436 20 optimizer """adam""" +436 20 training_loop """owa""" +436 20 negative_sampler """basic""" +436 20 evaluator """rankbased""" +436 21 dataset """kinships""" +436 21 model """ntn""" +436 21 loss """marginranking""" +436 21 regularizer """no""" +436 21 optimizer """adam""" +436 21 training_loop """owa""" +436 21 negative_sampler """basic""" +436 21 evaluator """rankbased""" +436 22 dataset """kinships""" +436 22 model """ntn""" +436 22 loss """marginranking""" +436 22 regularizer """no""" +436 22 optimizer """adam""" +436 22 training_loop """owa""" +436 22 negative_sampler """basic""" +436 22 evaluator """rankbased""" +436 23 dataset """kinships""" +436 23 model """ntn""" +436 23 loss """marginranking""" +436 23 regularizer """no""" +436 23 optimizer """adam""" +436 23 training_loop """owa""" +436 23 negative_sampler """basic""" +436 23 evaluator """rankbased""" +436 24 dataset """kinships""" +436 24 model """ntn""" +436 24 loss """marginranking""" +436 24 regularizer """no""" +436 24 optimizer """adam""" +436 24 training_loop """owa""" +436 24 negative_sampler """basic""" +436 24 evaluator """rankbased""" +436 25 dataset """kinships""" +436 25 model """ntn""" +436 25 loss """marginranking""" +436 25 regularizer """no""" +436 25 optimizer """adam""" +436 25 training_loop """owa""" +436 25 negative_sampler """basic""" +436 25 evaluator """rankbased""" +436 26 dataset """kinships""" +436 26 model """ntn""" +436 26 loss """marginranking""" +436 26 regularizer """no""" +436 26 optimizer """adam""" +436 26 training_loop """owa""" +436 26 negative_sampler """basic""" +436 26 evaluator """rankbased""" +436 27 dataset """kinships""" +436 27 model """ntn""" +436 27 loss """marginranking""" +436 27 regularizer """no""" +436 27 optimizer """adam""" +436 27 training_loop """owa""" +436 27 negative_sampler """basic""" +436 27 evaluator """rankbased""" +436 28 dataset """kinships""" +436 28 model """ntn""" +436 28 loss """marginranking""" +436 28 regularizer """no""" +436 28 optimizer """adam""" +436 28 training_loop """owa""" +436 28 negative_sampler """basic""" +436 28 evaluator """rankbased""" +436 29 dataset """kinships""" +436 29 model """ntn""" +436 29 loss """marginranking""" +436 29 regularizer """no""" +436 29 optimizer """adam""" +436 29 training_loop """owa""" +436 29 negative_sampler """basic""" +436 29 evaluator """rankbased""" +436 30 dataset """kinships""" +436 30 model """ntn""" +436 30 loss """marginranking""" +436 30 regularizer """no""" +436 30 optimizer """adam""" +436 30 training_loop """owa""" +436 30 negative_sampler """basic""" +436 30 evaluator """rankbased""" +436 31 dataset """kinships""" +436 31 model """ntn""" +436 31 loss """marginranking""" +436 31 regularizer """no""" +436 31 optimizer """adam""" +436 31 training_loop """owa""" +436 31 negative_sampler """basic""" +436 31 evaluator """rankbased""" +436 32 dataset """kinships""" +436 32 model """ntn""" +436 32 loss """marginranking""" +436 32 regularizer """no""" +436 32 optimizer """adam""" +436 32 training_loop """owa""" +436 32 negative_sampler """basic""" +436 32 evaluator """rankbased""" +436 33 dataset """kinships""" +436 33 model """ntn""" +436 33 loss """marginranking""" +436 33 regularizer """no""" +436 33 optimizer """adam""" +436 33 training_loop """owa""" +436 33 negative_sampler """basic""" +436 33 evaluator """rankbased""" +436 34 dataset """kinships""" +436 34 model """ntn""" +436 34 loss """marginranking""" +436 34 regularizer """no""" +436 34 optimizer """adam""" +436 34 training_loop """owa""" +436 34 negative_sampler """basic""" +436 34 evaluator """rankbased""" +436 35 dataset """kinships""" +436 35 model """ntn""" +436 35 loss """marginranking""" +436 35 regularizer """no""" +436 35 optimizer """adam""" +436 35 training_loop """owa""" +436 35 negative_sampler """basic""" +436 35 evaluator """rankbased""" +436 36 dataset """kinships""" +436 36 model """ntn""" +436 36 loss """marginranking""" +436 36 regularizer """no""" +436 36 optimizer """adam""" +436 36 training_loop """owa""" +436 36 negative_sampler """basic""" +436 36 evaluator """rankbased""" +436 37 dataset """kinships""" +436 37 model """ntn""" +436 37 loss """marginranking""" +436 37 regularizer """no""" +436 37 optimizer """adam""" +436 37 training_loop """owa""" +436 37 negative_sampler """basic""" +436 37 evaluator """rankbased""" +436 38 dataset """kinships""" +436 38 model """ntn""" +436 38 loss """marginranking""" +436 38 regularizer """no""" +436 38 optimizer """adam""" +436 38 training_loop """owa""" +436 38 negative_sampler """basic""" +436 38 evaluator """rankbased""" +436 39 dataset """kinships""" +436 39 model """ntn""" +436 39 loss """marginranking""" +436 39 regularizer """no""" +436 39 optimizer """adam""" +436 39 training_loop """owa""" +436 39 negative_sampler """basic""" +436 39 evaluator """rankbased""" +436 40 dataset """kinships""" +436 40 model """ntn""" +436 40 loss """marginranking""" +436 40 regularizer """no""" +436 40 optimizer """adam""" +436 40 training_loop """owa""" +436 40 negative_sampler """basic""" +436 40 evaluator """rankbased""" +436 41 dataset """kinships""" +436 41 model """ntn""" +436 41 loss """marginranking""" +436 41 regularizer """no""" +436 41 optimizer """adam""" +436 41 training_loop """owa""" +436 41 negative_sampler """basic""" +436 41 evaluator """rankbased""" +436 42 dataset """kinships""" +436 42 model """ntn""" +436 42 loss """marginranking""" +436 42 regularizer """no""" +436 42 optimizer """adam""" +436 42 training_loop """owa""" +436 42 negative_sampler """basic""" +436 42 evaluator """rankbased""" +436 43 dataset """kinships""" +436 43 model """ntn""" +436 43 loss """marginranking""" +436 43 regularizer """no""" +436 43 optimizer """adam""" +436 43 training_loop """owa""" +436 43 negative_sampler """basic""" +436 43 evaluator """rankbased""" +436 44 dataset """kinships""" +436 44 model """ntn""" +436 44 loss """marginranking""" +436 44 regularizer """no""" +436 44 optimizer """adam""" +436 44 training_loop """owa""" +436 44 negative_sampler """basic""" +436 44 evaluator """rankbased""" +436 45 dataset """kinships""" +436 45 model """ntn""" +436 45 loss """marginranking""" +436 45 regularizer """no""" +436 45 optimizer """adam""" +436 45 training_loop """owa""" +436 45 negative_sampler """basic""" +436 45 evaluator """rankbased""" +436 46 dataset """kinships""" +436 46 model """ntn""" +436 46 loss """marginranking""" +436 46 regularizer """no""" +436 46 optimizer """adam""" +436 46 training_loop """owa""" +436 46 negative_sampler """basic""" +436 46 evaluator """rankbased""" +437 1 model.embedding_dim 1.0 +437 1 optimizer.lr 0.0012992062957770888 +437 1 negative_sampler.num_negs_per_pos 63.0 +437 1 training.batch_size 0.0 +437 2 model.embedding_dim 1.0 +437 2 optimizer.lr 0.06575464699674281 +437 2 negative_sampler.num_negs_per_pos 83.0 +437 2 training.batch_size 0.0 +437 3 model.embedding_dim 0.0 +437 3 optimizer.lr 0.0036169717573424093 +437 3 negative_sampler.num_negs_per_pos 44.0 +437 3 training.batch_size 1.0 +437 4 model.embedding_dim 2.0 +437 4 optimizer.lr 0.0010543403648993522 +437 4 negative_sampler.num_negs_per_pos 87.0 +437 4 training.batch_size 2.0 +437 5 model.embedding_dim 1.0 +437 5 optimizer.lr 0.011223322935132472 +437 5 negative_sampler.num_negs_per_pos 66.0 +437 5 training.batch_size 1.0 +437 6 model.embedding_dim 1.0 +437 6 optimizer.lr 0.004408778087265745 +437 6 negative_sampler.num_negs_per_pos 92.0 +437 6 training.batch_size 2.0 +437 7 model.embedding_dim 0.0 +437 7 optimizer.lr 0.0021847388772603857 +437 7 negative_sampler.num_negs_per_pos 9.0 +437 7 training.batch_size 1.0 +437 8 model.embedding_dim 2.0 +437 8 optimizer.lr 0.0015922268988378097 +437 8 negative_sampler.num_negs_per_pos 36.0 +437 8 training.batch_size 1.0 +437 9 model.embedding_dim 0.0 +437 9 optimizer.lr 0.020278376900320878 +437 9 negative_sampler.num_negs_per_pos 49.0 +437 9 training.batch_size 2.0 +437 10 model.embedding_dim 0.0 +437 10 optimizer.lr 0.010096131776289655 +437 10 negative_sampler.num_negs_per_pos 1.0 +437 10 training.batch_size 2.0 +437 11 model.embedding_dim 0.0 +437 11 optimizer.lr 0.017392561480967846 +437 11 negative_sampler.num_negs_per_pos 42.0 +437 11 training.batch_size 1.0 +437 12 model.embedding_dim 1.0 +437 12 optimizer.lr 0.02999132062838412 +437 12 negative_sampler.num_negs_per_pos 78.0 +437 12 training.batch_size 1.0 +437 13 model.embedding_dim 1.0 +437 13 optimizer.lr 0.05771956953900947 +437 13 negative_sampler.num_negs_per_pos 98.0 +437 13 training.batch_size 2.0 +437 14 model.embedding_dim 1.0 +437 14 optimizer.lr 0.0071143171728123 +437 14 negative_sampler.num_negs_per_pos 67.0 +437 14 training.batch_size 0.0 +437 15 model.embedding_dim 0.0 +437 15 optimizer.lr 0.0017466550622935442 +437 15 negative_sampler.num_negs_per_pos 90.0 +437 15 training.batch_size 2.0 +437 16 model.embedding_dim 0.0 +437 16 optimizer.lr 0.017368144617072555 +437 16 negative_sampler.num_negs_per_pos 54.0 +437 16 training.batch_size 2.0 +437 17 model.embedding_dim 0.0 +437 17 optimizer.lr 0.003932830195126486 +437 17 negative_sampler.num_negs_per_pos 34.0 +437 17 training.batch_size 2.0 +437 18 model.embedding_dim 1.0 +437 18 optimizer.lr 0.017171615175754745 +437 18 negative_sampler.num_negs_per_pos 91.0 +437 18 training.batch_size 1.0 +437 19 model.embedding_dim 1.0 +437 19 optimizer.lr 0.0028493722295223106 +437 19 negative_sampler.num_negs_per_pos 26.0 +437 19 training.batch_size 1.0 +437 20 model.embedding_dim 0.0 +437 20 optimizer.lr 0.001562176925232488 +437 20 negative_sampler.num_negs_per_pos 91.0 +437 20 training.batch_size 1.0 +437 21 model.embedding_dim 0.0 +437 21 optimizer.lr 0.0018895648266554395 +437 21 negative_sampler.num_negs_per_pos 14.0 +437 21 training.batch_size 2.0 +437 22 model.embedding_dim 2.0 +437 22 optimizer.lr 0.008580826859689389 +437 22 negative_sampler.num_negs_per_pos 29.0 +437 22 training.batch_size 2.0 +437 23 model.embedding_dim 0.0 +437 23 optimizer.lr 0.004162781967887245 +437 23 negative_sampler.num_negs_per_pos 63.0 +437 23 training.batch_size 2.0 +437 24 model.embedding_dim 1.0 +437 24 optimizer.lr 0.05558134338518085 +437 24 negative_sampler.num_negs_per_pos 27.0 +437 24 training.batch_size 0.0 +437 25 model.embedding_dim 1.0 +437 25 optimizer.lr 0.001042842446365161 +437 25 negative_sampler.num_negs_per_pos 79.0 +437 25 training.batch_size 1.0 +437 26 model.embedding_dim 2.0 +437 26 optimizer.lr 0.0011133633637169327 +437 26 negative_sampler.num_negs_per_pos 88.0 +437 26 training.batch_size 2.0 +437 27 model.embedding_dim 2.0 +437 27 optimizer.lr 0.027853891616926436 +437 27 negative_sampler.num_negs_per_pos 64.0 +437 27 training.batch_size 0.0 +437 28 model.embedding_dim 0.0 +437 28 optimizer.lr 0.0040127374860239665 +437 28 negative_sampler.num_negs_per_pos 96.0 +437 28 training.batch_size 1.0 +437 29 model.embedding_dim 1.0 +437 29 optimizer.lr 0.05825399541290329 +437 29 negative_sampler.num_negs_per_pos 86.0 +437 29 training.batch_size 2.0 +437 30 model.embedding_dim 0.0 +437 30 optimizer.lr 0.0011902659705403183 +437 30 negative_sampler.num_negs_per_pos 94.0 +437 30 training.batch_size 2.0 +437 31 model.embedding_dim 2.0 +437 31 optimizer.lr 0.00853349353773076 +437 31 negative_sampler.num_negs_per_pos 60.0 +437 31 training.batch_size 1.0 +437 32 model.embedding_dim 2.0 +437 32 optimizer.lr 0.022464977212332295 +437 32 negative_sampler.num_negs_per_pos 58.0 +437 32 training.batch_size 0.0 +437 33 model.embedding_dim 0.0 +437 33 optimizer.lr 0.0010853967928189075 +437 33 negative_sampler.num_negs_per_pos 93.0 +437 33 training.batch_size 1.0 +437 34 model.embedding_dim 0.0 +437 34 optimizer.lr 0.002240241083292968 +437 34 negative_sampler.num_negs_per_pos 51.0 +437 34 training.batch_size 2.0 +437 35 model.embedding_dim 1.0 +437 35 optimizer.lr 0.030626419194532555 +437 35 negative_sampler.num_negs_per_pos 88.0 +437 35 training.batch_size 2.0 +437 36 model.embedding_dim 1.0 +437 36 optimizer.lr 0.0021903953729217112 +437 36 negative_sampler.num_negs_per_pos 47.0 +437 36 training.batch_size 1.0 +437 37 model.embedding_dim 0.0 +437 37 optimizer.lr 0.0032259112162794 +437 37 negative_sampler.num_negs_per_pos 64.0 +437 37 training.batch_size 1.0 +437 38 model.embedding_dim 1.0 +437 38 optimizer.lr 0.0012762535236283345 +437 38 negative_sampler.num_negs_per_pos 3.0 +437 38 training.batch_size 0.0 +437 39 model.embedding_dim 2.0 +437 39 optimizer.lr 0.008205485812415737 +437 39 negative_sampler.num_negs_per_pos 54.0 +437 39 training.batch_size 1.0 +437 40 model.embedding_dim 0.0 +437 40 optimizer.lr 0.002678242923944795 +437 40 negative_sampler.num_negs_per_pos 32.0 +437 40 training.batch_size 0.0 +437 41 model.embedding_dim 0.0 +437 41 optimizer.lr 0.04627463569294779 +437 41 negative_sampler.num_negs_per_pos 46.0 +437 41 training.batch_size 1.0 +437 42 model.embedding_dim 2.0 +437 42 optimizer.lr 0.038788009912143935 +437 42 negative_sampler.num_negs_per_pos 56.0 +437 42 training.batch_size 1.0 +437 43 model.embedding_dim 1.0 +437 43 optimizer.lr 0.007168717162270272 +437 43 negative_sampler.num_negs_per_pos 30.0 +437 43 training.batch_size 2.0 +437 44 model.embedding_dim 2.0 +437 44 optimizer.lr 0.006204717299595348 +437 44 negative_sampler.num_negs_per_pos 2.0 +437 44 training.batch_size 2.0 +437 45 model.embedding_dim 1.0 +437 45 optimizer.lr 0.0011179182287859612 +437 45 negative_sampler.num_negs_per_pos 98.0 +437 45 training.batch_size 1.0 +437 46 model.embedding_dim 1.0 +437 46 optimizer.lr 0.007622162548651198 +437 46 negative_sampler.num_negs_per_pos 27.0 +437 46 training.batch_size 0.0 +437 47 model.embedding_dim 2.0 +437 47 optimizer.lr 0.0013339387442820629 +437 47 negative_sampler.num_negs_per_pos 20.0 +437 47 training.batch_size 2.0 +437 1 dataset """kinships""" +437 1 model """ntn""" +437 1 loss """bceaftersigmoid""" +437 1 regularizer """no""" +437 1 optimizer """adam""" +437 1 training_loop """owa""" +437 1 negative_sampler """basic""" +437 1 evaluator """rankbased""" +437 2 dataset """kinships""" +437 2 model """ntn""" +437 2 loss """bceaftersigmoid""" +437 2 regularizer """no""" +437 2 optimizer """adam""" +437 2 training_loop """owa""" +437 2 negative_sampler """basic""" +437 2 evaluator """rankbased""" +437 3 dataset """kinships""" +437 3 model """ntn""" +437 3 loss """bceaftersigmoid""" +437 3 regularizer """no""" +437 3 optimizer """adam""" +437 3 training_loop """owa""" +437 3 negative_sampler """basic""" +437 3 evaluator """rankbased""" +437 4 dataset """kinships""" +437 4 model """ntn""" +437 4 loss """bceaftersigmoid""" +437 4 regularizer """no""" +437 4 optimizer """adam""" +437 4 training_loop """owa""" +437 4 negative_sampler """basic""" +437 4 evaluator """rankbased""" +437 5 dataset """kinships""" +437 5 model """ntn""" +437 5 loss """bceaftersigmoid""" +437 5 regularizer """no""" +437 5 optimizer """adam""" +437 5 training_loop """owa""" +437 5 negative_sampler """basic""" +437 5 evaluator """rankbased""" +437 6 dataset """kinships""" +437 6 model """ntn""" +437 6 loss """bceaftersigmoid""" +437 6 regularizer """no""" +437 6 optimizer """adam""" +437 6 training_loop """owa""" +437 6 negative_sampler """basic""" +437 6 evaluator """rankbased""" +437 7 dataset """kinships""" +437 7 model """ntn""" +437 7 loss """bceaftersigmoid""" +437 7 regularizer """no""" +437 7 optimizer """adam""" +437 7 training_loop """owa""" +437 7 negative_sampler """basic""" +437 7 evaluator """rankbased""" +437 8 dataset """kinships""" +437 8 model """ntn""" +437 8 loss """bceaftersigmoid""" +437 8 regularizer """no""" +437 8 optimizer """adam""" +437 8 training_loop """owa""" +437 8 negative_sampler """basic""" +437 8 evaluator """rankbased""" +437 9 dataset """kinships""" +437 9 model """ntn""" +437 9 loss """bceaftersigmoid""" +437 9 regularizer """no""" +437 9 optimizer """adam""" +437 9 training_loop """owa""" +437 9 negative_sampler """basic""" +437 9 evaluator """rankbased""" +437 10 dataset """kinships""" +437 10 model """ntn""" +437 10 loss """bceaftersigmoid""" +437 10 regularizer """no""" +437 10 optimizer """adam""" +437 10 training_loop """owa""" +437 10 negative_sampler """basic""" +437 10 evaluator """rankbased""" +437 11 dataset """kinships""" +437 11 model """ntn""" +437 11 loss """bceaftersigmoid""" +437 11 regularizer """no""" +437 11 optimizer """adam""" +437 11 training_loop """owa""" +437 11 negative_sampler """basic""" +437 11 evaluator """rankbased""" +437 12 dataset """kinships""" +437 12 model """ntn""" +437 12 loss """bceaftersigmoid""" +437 12 regularizer """no""" +437 12 optimizer """adam""" +437 12 training_loop """owa""" +437 12 negative_sampler """basic""" +437 12 evaluator """rankbased""" +437 13 dataset """kinships""" +437 13 model """ntn""" +437 13 loss """bceaftersigmoid""" +437 13 regularizer """no""" +437 13 optimizer """adam""" +437 13 training_loop """owa""" +437 13 negative_sampler """basic""" +437 13 evaluator """rankbased""" +437 14 dataset """kinships""" +437 14 model """ntn""" +437 14 loss """bceaftersigmoid""" +437 14 regularizer """no""" +437 14 optimizer """adam""" +437 14 training_loop """owa""" +437 14 negative_sampler """basic""" +437 14 evaluator """rankbased""" +437 15 dataset """kinships""" +437 15 model """ntn""" +437 15 loss """bceaftersigmoid""" +437 15 regularizer """no""" +437 15 optimizer """adam""" +437 15 training_loop """owa""" +437 15 negative_sampler """basic""" +437 15 evaluator """rankbased""" +437 16 dataset """kinships""" +437 16 model """ntn""" +437 16 loss """bceaftersigmoid""" +437 16 regularizer """no""" +437 16 optimizer """adam""" +437 16 training_loop """owa""" +437 16 negative_sampler """basic""" +437 16 evaluator """rankbased""" +437 17 dataset """kinships""" +437 17 model """ntn""" +437 17 loss """bceaftersigmoid""" +437 17 regularizer """no""" +437 17 optimizer """adam""" +437 17 training_loop """owa""" +437 17 negative_sampler """basic""" +437 17 evaluator """rankbased""" +437 18 dataset """kinships""" +437 18 model """ntn""" +437 18 loss """bceaftersigmoid""" +437 18 regularizer """no""" +437 18 optimizer """adam""" +437 18 training_loop """owa""" +437 18 negative_sampler """basic""" +437 18 evaluator """rankbased""" +437 19 dataset """kinships""" +437 19 model """ntn""" +437 19 loss """bceaftersigmoid""" +437 19 regularizer """no""" +437 19 optimizer """adam""" +437 19 training_loop """owa""" +437 19 negative_sampler """basic""" +437 19 evaluator """rankbased""" +437 20 dataset """kinships""" +437 20 model """ntn""" +437 20 loss """bceaftersigmoid""" +437 20 regularizer """no""" +437 20 optimizer """adam""" +437 20 training_loop """owa""" +437 20 negative_sampler """basic""" +437 20 evaluator """rankbased""" +437 21 dataset """kinships""" +437 21 model """ntn""" +437 21 loss """bceaftersigmoid""" +437 21 regularizer """no""" +437 21 optimizer """adam""" +437 21 training_loop """owa""" +437 21 negative_sampler """basic""" +437 21 evaluator """rankbased""" +437 22 dataset """kinships""" +437 22 model """ntn""" +437 22 loss """bceaftersigmoid""" +437 22 regularizer """no""" +437 22 optimizer """adam""" +437 22 training_loop """owa""" +437 22 negative_sampler """basic""" +437 22 evaluator """rankbased""" +437 23 dataset """kinships""" +437 23 model """ntn""" +437 23 loss """bceaftersigmoid""" +437 23 regularizer """no""" +437 23 optimizer """adam""" +437 23 training_loop """owa""" +437 23 negative_sampler """basic""" +437 23 evaluator """rankbased""" +437 24 dataset """kinships""" +437 24 model """ntn""" +437 24 loss """bceaftersigmoid""" +437 24 regularizer """no""" +437 24 optimizer """adam""" +437 24 training_loop """owa""" +437 24 negative_sampler """basic""" +437 24 evaluator """rankbased""" +437 25 dataset """kinships""" +437 25 model """ntn""" +437 25 loss """bceaftersigmoid""" +437 25 regularizer """no""" +437 25 optimizer """adam""" +437 25 training_loop """owa""" +437 25 negative_sampler """basic""" +437 25 evaluator """rankbased""" +437 26 dataset """kinships""" +437 26 model """ntn""" +437 26 loss """bceaftersigmoid""" +437 26 regularizer """no""" +437 26 optimizer """adam""" +437 26 training_loop """owa""" +437 26 negative_sampler """basic""" +437 26 evaluator """rankbased""" +437 27 dataset """kinships""" +437 27 model """ntn""" +437 27 loss """bceaftersigmoid""" +437 27 regularizer """no""" +437 27 optimizer """adam""" +437 27 training_loop """owa""" +437 27 negative_sampler """basic""" +437 27 evaluator """rankbased""" +437 28 dataset """kinships""" +437 28 model """ntn""" +437 28 loss """bceaftersigmoid""" +437 28 regularizer """no""" +437 28 optimizer """adam""" +437 28 training_loop """owa""" +437 28 negative_sampler """basic""" +437 28 evaluator """rankbased""" +437 29 dataset """kinships""" +437 29 model """ntn""" +437 29 loss """bceaftersigmoid""" +437 29 regularizer """no""" +437 29 optimizer """adam""" +437 29 training_loop """owa""" +437 29 negative_sampler """basic""" +437 29 evaluator """rankbased""" +437 30 dataset """kinships""" +437 30 model """ntn""" +437 30 loss """bceaftersigmoid""" +437 30 regularizer """no""" +437 30 optimizer """adam""" +437 30 training_loop """owa""" +437 30 negative_sampler """basic""" +437 30 evaluator """rankbased""" +437 31 dataset """kinships""" +437 31 model """ntn""" +437 31 loss """bceaftersigmoid""" +437 31 regularizer """no""" +437 31 optimizer """adam""" +437 31 training_loop """owa""" +437 31 negative_sampler """basic""" +437 31 evaluator """rankbased""" +437 32 dataset """kinships""" +437 32 model """ntn""" +437 32 loss """bceaftersigmoid""" +437 32 regularizer """no""" +437 32 optimizer """adam""" +437 32 training_loop """owa""" +437 32 negative_sampler """basic""" +437 32 evaluator """rankbased""" +437 33 dataset """kinships""" +437 33 model """ntn""" +437 33 loss """bceaftersigmoid""" +437 33 regularizer """no""" +437 33 optimizer """adam""" +437 33 training_loop """owa""" +437 33 negative_sampler """basic""" +437 33 evaluator """rankbased""" +437 34 dataset """kinships""" +437 34 model """ntn""" +437 34 loss """bceaftersigmoid""" +437 34 regularizer """no""" +437 34 optimizer """adam""" +437 34 training_loop """owa""" +437 34 negative_sampler """basic""" +437 34 evaluator """rankbased""" +437 35 dataset """kinships""" +437 35 model """ntn""" +437 35 loss """bceaftersigmoid""" +437 35 regularizer """no""" +437 35 optimizer """adam""" +437 35 training_loop """owa""" +437 35 negative_sampler """basic""" +437 35 evaluator """rankbased""" +437 36 dataset """kinships""" +437 36 model """ntn""" +437 36 loss """bceaftersigmoid""" +437 36 regularizer """no""" +437 36 optimizer """adam""" +437 36 training_loop """owa""" +437 36 negative_sampler """basic""" +437 36 evaluator """rankbased""" +437 37 dataset """kinships""" +437 37 model """ntn""" +437 37 loss """bceaftersigmoid""" +437 37 regularizer """no""" +437 37 optimizer """adam""" +437 37 training_loop """owa""" +437 37 negative_sampler """basic""" +437 37 evaluator """rankbased""" +437 38 dataset """kinships""" +437 38 model """ntn""" +437 38 loss """bceaftersigmoid""" +437 38 regularizer """no""" +437 38 optimizer """adam""" +437 38 training_loop """owa""" +437 38 negative_sampler """basic""" +437 38 evaluator """rankbased""" +437 39 dataset """kinships""" +437 39 model """ntn""" +437 39 loss """bceaftersigmoid""" +437 39 regularizer """no""" +437 39 optimizer """adam""" +437 39 training_loop """owa""" +437 39 negative_sampler """basic""" +437 39 evaluator """rankbased""" +437 40 dataset """kinships""" +437 40 model """ntn""" +437 40 loss """bceaftersigmoid""" +437 40 regularizer """no""" +437 40 optimizer """adam""" +437 40 training_loop """owa""" +437 40 negative_sampler """basic""" +437 40 evaluator """rankbased""" +437 41 dataset """kinships""" +437 41 model """ntn""" +437 41 loss """bceaftersigmoid""" +437 41 regularizer """no""" +437 41 optimizer """adam""" +437 41 training_loop """owa""" +437 41 negative_sampler """basic""" +437 41 evaluator """rankbased""" +437 42 dataset """kinships""" +437 42 model """ntn""" +437 42 loss """bceaftersigmoid""" +437 42 regularizer """no""" +437 42 optimizer """adam""" +437 42 training_loop """owa""" +437 42 negative_sampler """basic""" +437 42 evaluator """rankbased""" +437 43 dataset """kinships""" +437 43 model """ntn""" +437 43 loss """bceaftersigmoid""" +437 43 regularizer """no""" +437 43 optimizer """adam""" +437 43 training_loop """owa""" +437 43 negative_sampler """basic""" +437 43 evaluator """rankbased""" +437 44 dataset """kinships""" +437 44 model """ntn""" +437 44 loss """bceaftersigmoid""" +437 44 regularizer """no""" +437 44 optimizer """adam""" +437 44 training_loop """owa""" +437 44 negative_sampler """basic""" +437 44 evaluator """rankbased""" +437 45 dataset """kinships""" +437 45 model """ntn""" +437 45 loss """bceaftersigmoid""" +437 45 regularizer """no""" +437 45 optimizer """adam""" +437 45 training_loop """owa""" +437 45 negative_sampler """basic""" +437 45 evaluator """rankbased""" +437 46 dataset """kinships""" +437 46 model """ntn""" +437 46 loss """bceaftersigmoid""" +437 46 regularizer """no""" +437 46 optimizer """adam""" +437 46 training_loop """owa""" +437 46 negative_sampler """basic""" +437 46 evaluator """rankbased""" +437 47 dataset """kinships""" +437 47 model """ntn""" +437 47 loss """bceaftersigmoid""" +437 47 regularizer """no""" +437 47 optimizer """adam""" +437 47 training_loop """owa""" +437 47 negative_sampler """basic""" +437 47 evaluator """rankbased""" +438 1 model.embedding_dim 1.0 +438 1 optimizer.lr 0.013950083943339709 +438 1 negative_sampler.num_negs_per_pos 95.0 +438 1 training.batch_size 1.0 +438 2 model.embedding_dim 2.0 +438 2 optimizer.lr 0.0023283163764288776 +438 2 negative_sampler.num_negs_per_pos 17.0 +438 2 training.batch_size 0.0 +438 3 model.embedding_dim 2.0 +438 3 optimizer.lr 0.0749044720227809 +438 3 negative_sampler.num_negs_per_pos 0.0 +438 3 training.batch_size 2.0 +438 4 model.embedding_dim 2.0 +438 4 optimizer.lr 0.03451399424354462 +438 4 negative_sampler.num_negs_per_pos 0.0 +438 4 training.batch_size 1.0 +438 5 model.embedding_dim 2.0 +438 5 optimizer.lr 0.0011419282895399876 +438 5 negative_sampler.num_negs_per_pos 48.0 +438 5 training.batch_size 1.0 +438 6 model.embedding_dim 2.0 +438 6 optimizer.lr 0.0011923715705456127 +438 6 negative_sampler.num_negs_per_pos 42.0 +438 6 training.batch_size 1.0 +438 7 model.embedding_dim 1.0 +438 7 optimizer.lr 0.009127534075116062 +438 7 negative_sampler.num_negs_per_pos 64.0 +438 7 training.batch_size 0.0 +438 8 model.embedding_dim 2.0 +438 8 optimizer.lr 0.031509648672725946 +438 8 negative_sampler.num_negs_per_pos 95.0 +438 8 training.batch_size 1.0 +438 9 model.embedding_dim 0.0 +438 9 optimizer.lr 0.008211709906268249 +438 9 negative_sampler.num_negs_per_pos 54.0 +438 9 training.batch_size 2.0 +438 10 model.embedding_dim 2.0 +438 10 optimizer.lr 0.007996754862529723 +438 10 negative_sampler.num_negs_per_pos 19.0 +438 10 training.batch_size 1.0 +438 11 model.embedding_dim 0.0 +438 11 optimizer.lr 0.006900876069296846 +438 11 negative_sampler.num_negs_per_pos 63.0 +438 11 training.batch_size 1.0 +438 12 model.embedding_dim 2.0 +438 12 optimizer.lr 0.044742158152611934 +438 12 negative_sampler.num_negs_per_pos 41.0 +438 12 training.batch_size 2.0 +438 13 model.embedding_dim 1.0 +438 13 optimizer.lr 0.008350158286054302 +438 13 negative_sampler.num_negs_per_pos 59.0 +438 13 training.batch_size 1.0 +438 14 model.embedding_dim 2.0 +438 14 optimizer.lr 0.001417182536410996 +438 14 negative_sampler.num_negs_per_pos 97.0 +438 14 training.batch_size 2.0 +438 15 model.embedding_dim 0.0 +438 15 optimizer.lr 0.006549117217191472 +438 15 negative_sampler.num_negs_per_pos 97.0 +438 15 training.batch_size 2.0 +438 16 model.embedding_dim 2.0 +438 16 optimizer.lr 0.0024691629324972587 +438 16 negative_sampler.num_negs_per_pos 55.0 +438 16 training.batch_size 0.0 +438 17 model.embedding_dim 1.0 +438 17 optimizer.lr 0.002066796073944754 +438 17 negative_sampler.num_negs_per_pos 82.0 +438 17 training.batch_size 0.0 +438 18 model.embedding_dim 2.0 +438 18 optimizer.lr 0.05029440692406593 +438 18 negative_sampler.num_negs_per_pos 20.0 +438 18 training.batch_size 1.0 +438 19 model.embedding_dim 1.0 +438 19 optimizer.lr 0.00557742970818338 +438 19 negative_sampler.num_negs_per_pos 18.0 +438 19 training.batch_size 0.0 +438 20 model.embedding_dim 2.0 +438 20 optimizer.lr 0.04246657450854014 +438 20 negative_sampler.num_negs_per_pos 99.0 +438 20 training.batch_size 0.0 +438 21 model.embedding_dim 2.0 +438 21 optimizer.lr 0.0010897625283762438 +438 21 negative_sampler.num_negs_per_pos 86.0 +438 21 training.batch_size 1.0 +438 22 model.embedding_dim 1.0 +438 22 optimizer.lr 0.05770804407934252 +438 22 negative_sampler.num_negs_per_pos 58.0 +438 22 training.batch_size 0.0 +438 23 model.embedding_dim 0.0 +438 23 optimizer.lr 0.01521743390693372 +438 23 negative_sampler.num_negs_per_pos 81.0 +438 23 training.batch_size 0.0 +438 24 model.embedding_dim 2.0 +438 24 optimizer.lr 0.005122608725949995 +438 24 negative_sampler.num_negs_per_pos 50.0 +438 24 training.batch_size 2.0 +438 25 model.embedding_dim 2.0 +438 25 optimizer.lr 0.002339592666743683 +438 25 negative_sampler.num_negs_per_pos 74.0 +438 25 training.batch_size 2.0 +438 26 model.embedding_dim 1.0 +438 26 optimizer.lr 0.03945894529072216 +438 26 negative_sampler.num_negs_per_pos 10.0 +438 26 training.batch_size 0.0 +438 27 model.embedding_dim 0.0 +438 27 optimizer.lr 0.011208039750341276 +438 27 negative_sampler.num_negs_per_pos 37.0 +438 27 training.batch_size 0.0 +438 28 model.embedding_dim 1.0 +438 28 optimizer.lr 0.09916161570843794 +438 28 negative_sampler.num_negs_per_pos 98.0 +438 28 training.batch_size 2.0 +438 29 model.embedding_dim 0.0 +438 29 optimizer.lr 0.0012421175332722708 +438 29 negative_sampler.num_negs_per_pos 14.0 +438 29 training.batch_size 0.0 +438 1 dataset """kinships""" +438 1 model """ntn""" +438 1 loss """softplus""" +438 1 regularizer """no""" +438 1 optimizer """adam""" +438 1 training_loop """owa""" +438 1 negative_sampler """basic""" +438 1 evaluator """rankbased""" +438 2 dataset """kinships""" +438 2 model """ntn""" +438 2 loss """softplus""" +438 2 regularizer """no""" +438 2 optimizer """adam""" +438 2 training_loop """owa""" +438 2 negative_sampler """basic""" +438 2 evaluator """rankbased""" +438 3 dataset """kinships""" +438 3 model """ntn""" +438 3 loss """softplus""" +438 3 regularizer """no""" +438 3 optimizer """adam""" +438 3 training_loop """owa""" +438 3 negative_sampler """basic""" +438 3 evaluator """rankbased""" +438 4 dataset """kinships""" +438 4 model """ntn""" +438 4 loss """softplus""" +438 4 regularizer """no""" +438 4 optimizer """adam""" +438 4 training_loop """owa""" +438 4 negative_sampler """basic""" +438 4 evaluator """rankbased""" +438 5 dataset """kinships""" +438 5 model """ntn""" +438 5 loss """softplus""" +438 5 regularizer """no""" +438 5 optimizer """adam""" +438 5 training_loop """owa""" +438 5 negative_sampler """basic""" +438 5 evaluator """rankbased""" +438 6 dataset """kinships""" +438 6 model """ntn""" +438 6 loss """softplus""" +438 6 regularizer """no""" +438 6 optimizer """adam""" +438 6 training_loop """owa""" +438 6 negative_sampler """basic""" +438 6 evaluator """rankbased""" +438 7 dataset """kinships""" +438 7 model """ntn""" +438 7 loss """softplus""" +438 7 regularizer """no""" +438 7 optimizer """adam""" +438 7 training_loop """owa""" +438 7 negative_sampler """basic""" +438 7 evaluator """rankbased""" +438 8 dataset """kinships""" +438 8 model """ntn""" +438 8 loss """softplus""" +438 8 regularizer """no""" +438 8 optimizer """adam""" +438 8 training_loop """owa""" +438 8 negative_sampler """basic""" +438 8 evaluator """rankbased""" +438 9 dataset """kinships""" +438 9 model """ntn""" +438 9 loss """softplus""" +438 9 regularizer """no""" +438 9 optimizer """adam""" +438 9 training_loop """owa""" +438 9 negative_sampler """basic""" +438 9 evaluator """rankbased""" +438 10 dataset """kinships""" +438 10 model """ntn""" +438 10 loss """softplus""" +438 10 regularizer """no""" +438 10 optimizer """adam""" +438 10 training_loop """owa""" +438 10 negative_sampler """basic""" +438 10 evaluator """rankbased""" +438 11 dataset """kinships""" +438 11 model """ntn""" +438 11 loss """softplus""" +438 11 regularizer """no""" +438 11 optimizer """adam""" +438 11 training_loop """owa""" +438 11 negative_sampler """basic""" +438 11 evaluator """rankbased""" +438 12 dataset """kinships""" +438 12 model """ntn""" +438 12 loss """softplus""" +438 12 regularizer """no""" +438 12 optimizer """adam""" +438 12 training_loop """owa""" +438 12 negative_sampler """basic""" +438 12 evaluator """rankbased""" +438 13 dataset """kinships""" +438 13 model """ntn""" +438 13 loss """softplus""" +438 13 regularizer """no""" +438 13 optimizer """adam""" +438 13 training_loop """owa""" +438 13 negative_sampler """basic""" +438 13 evaluator """rankbased""" +438 14 dataset """kinships""" +438 14 model """ntn""" +438 14 loss """softplus""" +438 14 regularizer """no""" +438 14 optimizer """adam""" +438 14 training_loop """owa""" +438 14 negative_sampler """basic""" +438 14 evaluator """rankbased""" +438 15 dataset """kinships""" +438 15 model """ntn""" +438 15 loss """softplus""" +438 15 regularizer """no""" +438 15 optimizer """adam""" +438 15 training_loop """owa""" +438 15 negative_sampler """basic""" +438 15 evaluator """rankbased""" +438 16 dataset """kinships""" +438 16 model """ntn""" +438 16 loss """softplus""" +438 16 regularizer """no""" +438 16 optimizer """adam""" +438 16 training_loop """owa""" +438 16 negative_sampler """basic""" +438 16 evaluator """rankbased""" +438 17 dataset """kinships""" +438 17 model """ntn""" +438 17 loss """softplus""" +438 17 regularizer """no""" +438 17 optimizer """adam""" +438 17 training_loop """owa""" +438 17 negative_sampler """basic""" +438 17 evaluator """rankbased""" +438 18 dataset """kinships""" +438 18 model """ntn""" +438 18 loss """softplus""" +438 18 regularizer """no""" +438 18 optimizer """adam""" +438 18 training_loop """owa""" +438 18 negative_sampler """basic""" +438 18 evaluator """rankbased""" +438 19 dataset """kinships""" +438 19 model """ntn""" +438 19 loss """softplus""" +438 19 regularizer """no""" +438 19 optimizer """adam""" +438 19 training_loop """owa""" +438 19 negative_sampler """basic""" +438 19 evaluator """rankbased""" +438 20 dataset """kinships""" +438 20 model """ntn""" +438 20 loss """softplus""" +438 20 regularizer """no""" +438 20 optimizer """adam""" +438 20 training_loop """owa""" +438 20 negative_sampler """basic""" +438 20 evaluator """rankbased""" +438 21 dataset """kinships""" +438 21 model """ntn""" +438 21 loss """softplus""" +438 21 regularizer """no""" +438 21 optimizer """adam""" +438 21 training_loop """owa""" +438 21 negative_sampler """basic""" +438 21 evaluator """rankbased""" +438 22 dataset """kinships""" +438 22 model """ntn""" +438 22 loss """softplus""" +438 22 regularizer """no""" +438 22 optimizer """adam""" +438 22 training_loop """owa""" +438 22 negative_sampler """basic""" +438 22 evaluator """rankbased""" +438 23 dataset """kinships""" +438 23 model """ntn""" +438 23 loss """softplus""" +438 23 regularizer """no""" +438 23 optimizer """adam""" +438 23 training_loop """owa""" +438 23 negative_sampler """basic""" +438 23 evaluator """rankbased""" +438 24 dataset """kinships""" +438 24 model """ntn""" +438 24 loss """softplus""" +438 24 regularizer """no""" +438 24 optimizer """adam""" +438 24 training_loop """owa""" +438 24 negative_sampler """basic""" +438 24 evaluator """rankbased""" +438 25 dataset """kinships""" +438 25 model """ntn""" +438 25 loss """softplus""" +438 25 regularizer """no""" +438 25 optimizer """adam""" +438 25 training_loop """owa""" +438 25 negative_sampler """basic""" +438 25 evaluator """rankbased""" +438 26 dataset """kinships""" +438 26 model """ntn""" +438 26 loss """softplus""" +438 26 regularizer """no""" +438 26 optimizer """adam""" +438 26 training_loop """owa""" +438 26 negative_sampler """basic""" +438 26 evaluator """rankbased""" +438 27 dataset """kinships""" +438 27 model """ntn""" +438 27 loss """softplus""" +438 27 regularizer """no""" +438 27 optimizer """adam""" +438 27 training_loop """owa""" +438 27 negative_sampler """basic""" +438 27 evaluator """rankbased""" +438 28 dataset """kinships""" +438 28 model """ntn""" +438 28 loss """softplus""" +438 28 regularizer """no""" +438 28 optimizer """adam""" +438 28 training_loop """owa""" +438 28 negative_sampler """basic""" +438 28 evaluator """rankbased""" +438 29 dataset """kinships""" +438 29 model """ntn""" +438 29 loss """softplus""" +438 29 regularizer """no""" +438 29 optimizer """adam""" +438 29 training_loop """owa""" +438 29 negative_sampler """basic""" +438 29 evaluator """rankbased""" +439 1 model.embedding_dim 2.0 +439 1 optimizer.lr 0.011802889285839576 +439 1 negative_sampler.num_negs_per_pos 62.0 +439 1 training.batch_size 0.0 +439 2 model.embedding_dim 2.0 +439 2 optimizer.lr 0.012051405315803928 +439 2 negative_sampler.num_negs_per_pos 5.0 +439 2 training.batch_size 1.0 +439 3 model.embedding_dim 2.0 +439 3 optimizer.lr 0.0015959048811237013 +439 3 negative_sampler.num_negs_per_pos 90.0 +439 3 training.batch_size 2.0 +439 4 model.embedding_dim 2.0 +439 4 optimizer.lr 0.0010532518127088434 +439 4 negative_sampler.num_negs_per_pos 57.0 +439 4 training.batch_size 1.0 +439 5 model.embedding_dim 0.0 +439 5 optimizer.lr 0.0022670721056680158 +439 5 negative_sampler.num_negs_per_pos 47.0 +439 5 training.batch_size 2.0 +439 6 model.embedding_dim 1.0 +439 6 optimizer.lr 0.08263226051849651 +439 6 negative_sampler.num_negs_per_pos 7.0 +439 6 training.batch_size 0.0 +439 7 model.embedding_dim 0.0 +439 7 optimizer.lr 0.011703725325001246 +439 7 negative_sampler.num_negs_per_pos 15.0 +439 7 training.batch_size 2.0 +439 8 model.embedding_dim 1.0 +439 8 optimizer.lr 0.03980965058341343 +439 8 negative_sampler.num_negs_per_pos 1.0 +439 8 training.batch_size 0.0 +439 9 model.embedding_dim 1.0 +439 9 optimizer.lr 0.001335458589449706 +439 9 negative_sampler.num_negs_per_pos 34.0 +439 9 training.batch_size 1.0 +439 10 model.embedding_dim 0.0 +439 10 optimizer.lr 0.007925347674233084 +439 10 negative_sampler.num_negs_per_pos 95.0 +439 10 training.batch_size 2.0 +439 11 model.embedding_dim 1.0 +439 11 optimizer.lr 0.027756226498599525 +439 11 negative_sampler.num_negs_per_pos 75.0 +439 11 training.batch_size 1.0 +439 12 model.embedding_dim 1.0 +439 12 optimizer.lr 0.005078581321121111 +439 12 negative_sampler.num_negs_per_pos 30.0 +439 12 training.batch_size 0.0 +439 13 model.embedding_dim 1.0 +439 13 optimizer.lr 0.09183580619163903 +439 13 negative_sampler.num_negs_per_pos 44.0 +439 13 training.batch_size 1.0 +439 14 model.embedding_dim 0.0 +439 14 optimizer.lr 0.0021098650648069925 +439 14 negative_sampler.num_negs_per_pos 66.0 +439 14 training.batch_size 0.0 +439 15 model.embedding_dim 0.0 +439 15 optimizer.lr 0.06739181553631302 +439 15 negative_sampler.num_negs_per_pos 27.0 +439 15 training.batch_size 0.0 +439 16 model.embedding_dim 1.0 +439 16 optimizer.lr 0.030427434484194528 +439 16 negative_sampler.num_negs_per_pos 35.0 +439 16 training.batch_size 2.0 +439 17 model.embedding_dim 1.0 +439 17 optimizer.lr 0.00915224885802631 +439 17 negative_sampler.num_negs_per_pos 38.0 +439 17 training.batch_size 1.0 +439 18 model.embedding_dim 2.0 +439 18 optimizer.lr 0.017228785387959616 +439 18 negative_sampler.num_negs_per_pos 25.0 +439 18 training.batch_size 0.0 +439 19 model.embedding_dim 0.0 +439 19 optimizer.lr 0.00626082304857163 +439 19 negative_sampler.num_negs_per_pos 8.0 +439 19 training.batch_size 1.0 +439 20 model.embedding_dim 0.0 +439 20 optimizer.lr 0.016594917515611134 +439 20 negative_sampler.num_negs_per_pos 96.0 +439 20 training.batch_size 2.0 +439 21 model.embedding_dim 2.0 +439 21 optimizer.lr 0.0011460711782820614 +439 21 negative_sampler.num_negs_per_pos 32.0 +439 21 training.batch_size 0.0 +439 22 model.embedding_dim 2.0 +439 22 optimizer.lr 0.015418948955966086 +439 22 negative_sampler.num_negs_per_pos 38.0 +439 22 training.batch_size 2.0 +439 23 model.embedding_dim 0.0 +439 23 optimizer.lr 0.03258803690079032 +439 23 negative_sampler.num_negs_per_pos 78.0 +439 23 training.batch_size 0.0 +439 24 model.embedding_dim 2.0 +439 24 optimizer.lr 0.01177522206451949 +439 24 negative_sampler.num_negs_per_pos 64.0 +439 24 training.batch_size 1.0 +439 25 model.embedding_dim 1.0 +439 25 optimizer.lr 0.020586551610952773 +439 25 negative_sampler.num_negs_per_pos 72.0 +439 25 training.batch_size 0.0 +439 26 model.embedding_dim 1.0 +439 26 optimizer.lr 0.0076084510146168394 +439 26 negative_sampler.num_negs_per_pos 53.0 +439 26 training.batch_size 1.0 +439 27 model.embedding_dim 1.0 +439 27 optimizer.lr 0.08936676696390067 +439 27 negative_sampler.num_negs_per_pos 88.0 +439 27 training.batch_size 1.0 +439 28 model.embedding_dim 2.0 +439 28 optimizer.lr 0.003710817526234829 +439 28 negative_sampler.num_negs_per_pos 98.0 +439 28 training.batch_size 0.0 +439 29 model.embedding_dim 2.0 +439 29 optimizer.lr 0.03273163538347698 +439 29 negative_sampler.num_negs_per_pos 71.0 +439 29 training.batch_size 1.0 +439 30 model.embedding_dim 0.0 +439 30 optimizer.lr 0.0059124182874476655 +439 30 negative_sampler.num_negs_per_pos 23.0 +439 30 training.batch_size 1.0 +439 31 model.embedding_dim 2.0 +439 31 optimizer.lr 0.0026973488979926087 +439 31 negative_sampler.num_negs_per_pos 46.0 +439 31 training.batch_size 2.0 +439 32 model.embedding_dim 1.0 +439 32 optimizer.lr 0.003736266813594709 +439 32 negative_sampler.num_negs_per_pos 95.0 +439 32 training.batch_size 0.0 +439 33 model.embedding_dim 2.0 +439 33 optimizer.lr 0.0033461353216235326 +439 33 negative_sampler.num_negs_per_pos 92.0 +439 33 training.batch_size 1.0 +439 34 model.embedding_dim 0.0 +439 34 optimizer.lr 0.009817451494036228 +439 34 negative_sampler.num_negs_per_pos 37.0 +439 34 training.batch_size 2.0 +439 35 model.embedding_dim 2.0 +439 35 optimizer.lr 0.031535543637100504 +439 35 negative_sampler.num_negs_per_pos 46.0 +439 35 training.batch_size 2.0 +439 36 model.embedding_dim 1.0 +439 36 optimizer.lr 0.006273486054635501 +439 36 negative_sampler.num_negs_per_pos 5.0 +439 36 training.batch_size 1.0 +439 37 model.embedding_dim 1.0 +439 37 optimizer.lr 0.07868583383281674 +439 37 negative_sampler.num_negs_per_pos 76.0 +439 37 training.batch_size 1.0 +439 38 model.embedding_dim 2.0 +439 38 optimizer.lr 0.07778221117992237 +439 38 negative_sampler.num_negs_per_pos 55.0 +439 38 training.batch_size 0.0 +439 39 model.embedding_dim 0.0 +439 39 optimizer.lr 0.0020719220339765394 +439 39 negative_sampler.num_negs_per_pos 54.0 +439 39 training.batch_size 1.0 +439 40 model.embedding_dim 1.0 +439 40 optimizer.lr 0.029535736600782722 +439 40 negative_sampler.num_negs_per_pos 87.0 +439 40 training.batch_size 0.0 +439 41 model.embedding_dim 2.0 +439 41 optimizer.lr 0.02038890348868269 +439 41 negative_sampler.num_negs_per_pos 72.0 +439 41 training.batch_size 1.0 +439 42 model.embedding_dim 1.0 +439 42 optimizer.lr 0.0010636363913319382 +439 42 negative_sampler.num_negs_per_pos 62.0 +439 42 training.batch_size 1.0 +439 43 model.embedding_dim 2.0 +439 43 optimizer.lr 0.02582538351461569 +439 43 negative_sampler.num_negs_per_pos 11.0 +439 43 training.batch_size 0.0 +439 44 model.embedding_dim 1.0 +439 44 optimizer.lr 0.0012849155494700471 +439 44 negative_sampler.num_negs_per_pos 26.0 +439 44 training.batch_size 2.0 +439 45 model.embedding_dim 0.0 +439 45 optimizer.lr 0.09052531928422496 +439 45 negative_sampler.num_negs_per_pos 46.0 +439 45 training.batch_size 1.0 +439 46 model.embedding_dim 1.0 +439 46 optimizer.lr 0.020045862193634703 +439 46 negative_sampler.num_negs_per_pos 86.0 +439 46 training.batch_size 2.0 +439 47 model.embedding_dim 1.0 +439 47 optimizer.lr 0.05478589293056654 +439 47 negative_sampler.num_negs_per_pos 75.0 +439 47 training.batch_size 1.0 +439 48 model.embedding_dim 0.0 +439 48 optimizer.lr 0.021228912983808516 +439 48 negative_sampler.num_negs_per_pos 66.0 +439 48 training.batch_size 1.0 +439 49 model.embedding_dim 1.0 +439 49 optimizer.lr 0.01969912195575404 +439 49 negative_sampler.num_negs_per_pos 50.0 +439 49 training.batch_size 1.0 +439 50 model.embedding_dim 1.0 +439 50 optimizer.lr 0.007978361471769417 +439 50 negative_sampler.num_negs_per_pos 42.0 +439 50 training.batch_size 1.0 +439 51 model.embedding_dim 1.0 +439 51 optimizer.lr 0.003673788914104472 +439 51 negative_sampler.num_negs_per_pos 40.0 +439 51 training.batch_size 2.0 +439 52 model.embedding_dim 0.0 +439 52 optimizer.lr 0.045139322330911544 +439 52 negative_sampler.num_negs_per_pos 19.0 +439 52 training.batch_size 2.0 +439 53 model.embedding_dim 0.0 +439 53 optimizer.lr 0.024766697069975366 +439 53 negative_sampler.num_negs_per_pos 88.0 +439 53 training.batch_size 1.0 +439 54 model.embedding_dim 0.0 +439 54 optimizer.lr 0.008192988356720483 +439 54 negative_sampler.num_negs_per_pos 4.0 +439 54 training.batch_size 2.0 +439 55 model.embedding_dim 2.0 +439 55 optimizer.lr 0.004334526493821162 +439 55 negative_sampler.num_negs_per_pos 85.0 +439 55 training.batch_size 2.0 +439 56 model.embedding_dim 0.0 +439 56 optimizer.lr 0.0026913389316863255 +439 56 negative_sampler.num_negs_per_pos 85.0 +439 56 training.batch_size 1.0 +439 57 model.embedding_dim 1.0 +439 57 optimizer.lr 0.002006761236067494 +439 57 negative_sampler.num_negs_per_pos 97.0 +439 57 training.batch_size 1.0 +439 58 model.embedding_dim 1.0 +439 58 optimizer.lr 0.0017400266810111886 +439 58 negative_sampler.num_negs_per_pos 39.0 +439 58 training.batch_size 0.0 +439 59 model.embedding_dim 2.0 +439 59 optimizer.lr 0.03847987999276578 +439 59 negative_sampler.num_negs_per_pos 56.0 +439 59 training.batch_size 0.0 +439 60 model.embedding_dim 2.0 +439 60 optimizer.lr 0.004812839390210128 +439 60 negative_sampler.num_negs_per_pos 67.0 +439 60 training.batch_size 2.0 +439 61 model.embedding_dim 2.0 +439 61 optimizer.lr 0.002182331759677652 +439 61 negative_sampler.num_negs_per_pos 26.0 +439 61 training.batch_size 0.0 +439 62 model.embedding_dim 1.0 +439 62 optimizer.lr 0.0021364714288913088 +439 62 negative_sampler.num_negs_per_pos 3.0 +439 62 training.batch_size 0.0 +439 63 model.embedding_dim 2.0 +439 63 optimizer.lr 0.007814296543348786 +439 63 negative_sampler.num_negs_per_pos 82.0 +439 63 training.batch_size 2.0 +439 64 model.embedding_dim 0.0 +439 64 optimizer.lr 0.025556537502430074 +439 64 negative_sampler.num_negs_per_pos 70.0 +439 64 training.batch_size 2.0 +439 65 model.embedding_dim 2.0 +439 65 optimizer.lr 0.01458312138494491 +439 65 negative_sampler.num_negs_per_pos 51.0 +439 65 training.batch_size 1.0 +439 66 model.embedding_dim 2.0 +439 66 optimizer.lr 0.001129407980843235 +439 66 negative_sampler.num_negs_per_pos 12.0 +439 66 training.batch_size 2.0 +439 67 model.embedding_dim 0.0 +439 67 optimizer.lr 0.07801646556096982 +439 67 negative_sampler.num_negs_per_pos 78.0 +439 67 training.batch_size 1.0 +439 68 model.embedding_dim 2.0 +439 68 optimizer.lr 0.0011486932284973455 +439 68 negative_sampler.num_negs_per_pos 4.0 +439 68 training.batch_size 0.0 +439 69 model.embedding_dim 0.0 +439 69 optimizer.lr 0.0014644862057680938 +439 69 negative_sampler.num_negs_per_pos 48.0 +439 69 training.batch_size 2.0 +439 70 model.embedding_dim 2.0 +439 70 optimizer.lr 0.029170449540925285 +439 70 negative_sampler.num_negs_per_pos 87.0 +439 70 training.batch_size 1.0 +439 71 model.embedding_dim 1.0 +439 71 optimizer.lr 0.021917824206226687 +439 71 negative_sampler.num_negs_per_pos 81.0 +439 71 training.batch_size 1.0 +439 72 model.embedding_dim 1.0 +439 72 optimizer.lr 0.09163214729973242 +439 72 negative_sampler.num_negs_per_pos 17.0 +439 72 training.batch_size 1.0 +439 73 model.embedding_dim 0.0 +439 73 optimizer.lr 0.038859205019697356 +439 73 negative_sampler.num_negs_per_pos 13.0 +439 73 training.batch_size 1.0 +439 74 model.embedding_dim 0.0 +439 74 optimizer.lr 0.002793555234554595 +439 74 negative_sampler.num_negs_per_pos 40.0 +439 74 training.batch_size 2.0 +439 75 model.embedding_dim 1.0 +439 75 optimizer.lr 0.002774738839224062 +439 75 negative_sampler.num_negs_per_pos 64.0 +439 75 training.batch_size 0.0 +439 76 model.embedding_dim 1.0 +439 76 optimizer.lr 0.0023158826718660355 +439 76 negative_sampler.num_negs_per_pos 62.0 +439 76 training.batch_size 2.0 +439 77 model.embedding_dim 1.0 +439 77 optimizer.lr 0.0013008571190281392 +439 77 negative_sampler.num_negs_per_pos 63.0 +439 77 training.batch_size 0.0 +439 78 model.embedding_dim 2.0 +439 78 optimizer.lr 0.003931117400285786 +439 78 negative_sampler.num_negs_per_pos 96.0 +439 78 training.batch_size 2.0 +439 79 model.embedding_dim 1.0 +439 79 optimizer.lr 0.0062305642620975165 +439 79 negative_sampler.num_negs_per_pos 13.0 +439 79 training.batch_size 2.0 +439 80 model.embedding_dim 1.0 +439 80 optimizer.lr 0.011163952289274644 +439 80 negative_sampler.num_negs_per_pos 24.0 +439 80 training.batch_size 0.0 +439 81 model.embedding_dim 0.0 +439 81 optimizer.lr 0.08552324760949985 +439 81 negative_sampler.num_negs_per_pos 4.0 +439 81 training.batch_size 0.0 +439 82 model.embedding_dim 2.0 +439 82 optimizer.lr 0.012773988782245869 +439 82 negative_sampler.num_negs_per_pos 11.0 +439 82 training.batch_size 0.0 +439 83 model.embedding_dim 0.0 +439 83 optimizer.lr 0.04579375879127102 +439 83 negative_sampler.num_negs_per_pos 81.0 +439 83 training.batch_size 1.0 +439 84 model.embedding_dim 2.0 +439 84 optimizer.lr 0.016329329138709096 +439 84 negative_sampler.num_negs_per_pos 26.0 +439 84 training.batch_size 2.0 +439 85 model.embedding_dim 1.0 +439 85 optimizer.lr 0.018389576746635834 +439 85 negative_sampler.num_negs_per_pos 76.0 +439 85 training.batch_size 2.0 +439 86 model.embedding_dim 0.0 +439 86 optimizer.lr 0.047169077413055656 +439 86 negative_sampler.num_negs_per_pos 47.0 +439 86 training.batch_size 2.0 +439 87 model.embedding_dim 1.0 +439 87 optimizer.lr 0.0013644465320535374 +439 87 negative_sampler.num_negs_per_pos 33.0 +439 87 training.batch_size 1.0 +439 88 model.embedding_dim 1.0 +439 88 optimizer.lr 0.004713187018415309 +439 88 negative_sampler.num_negs_per_pos 66.0 +439 88 training.batch_size 1.0 +439 89 model.embedding_dim 0.0 +439 89 optimizer.lr 0.0016271018910740856 +439 89 negative_sampler.num_negs_per_pos 27.0 +439 89 training.batch_size 0.0 +439 90 model.embedding_dim 0.0 +439 90 optimizer.lr 0.038251145566785105 +439 90 negative_sampler.num_negs_per_pos 11.0 +439 90 training.batch_size 0.0 +439 91 model.embedding_dim 1.0 +439 91 optimizer.lr 0.007589585231889369 +439 91 negative_sampler.num_negs_per_pos 40.0 +439 91 training.batch_size 1.0 +439 92 model.embedding_dim 0.0 +439 92 optimizer.lr 0.08012608731504625 +439 92 negative_sampler.num_negs_per_pos 9.0 +439 92 training.batch_size 0.0 +439 93 model.embedding_dim 2.0 +439 93 optimizer.lr 0.0018614581255278861 +439 93 negative_sampler.num_negs_per_pos 57.0 +439 93 training.batch_size 0.0 +439 94 model.embedding_dim 0.0 +439 94 optimizer.lr 0.04085731700988295 +439 94 negative_sampler.num_negs_per_pos 85.0 +439 94 training.batch_size 1.0 +439 95 model.embedding_dim 2.0 +439 95 optimizer.lr 0.03638799217746786 +439 95 negative_sampler.num_negs_per_pos 14.0 +439 95 training.batch_size 0.0 +439 1 dataset """kinships""" +439 1 model """ntn""" +439 1 loss """bceaftersigmoid""" +439 1 regularizer """no""" +439 1 optimizer """adam""" +439 1 training_loop """owa""" +439 1 negative_sampler """basic""" +439 1 evaluator """rankbased""" +439 2 dataset """kinships""" +439 2 model """ntn""" +439 2 loss """bceaftersigmoid""" +439 2 regularizer """no""" +439 2 optimizer """adam""" +439 2 training_loop """owa""" +439 2 negative_sampler """basic""" +439 2 evaluator """rankbased""" +439 3 dataset """kinships""" +439 3 model """ntn""" +439 3 loss """bceaftersigmoid""" +439 3 regularizer """no""" +439 3 optimizer """adam""" +439 3 training_loop """owa""" +439 3 negative_sampler """basic""" +439 3 evaluator """rankbased""" +439 4 dataset """kinships""" +439 4 model """ntn""" +439 4 loss """bceaftersigmoid""" +439 4 regularizer """no""" +439 4 optimizer """adam""" +439 4 training_loop """owa""" +439 4 negative_sampler """basic""" +439 4 evaluator """rankbased""" +439 5 dataset """kinships""" +439 5 model """ntn""" +439 5 loss """bceaftersigmoid""" +439 5 regularizer """no""" +439 5 optimizer """adam""" +439 5 training_loop """owa""" +439 5 negative_sampler """basic""" +439 5 evaluator """rankbased""" +439 6 dataset """kinships""" +439 6 model """ntn""" +439 6 loss """bceaftersigmoid""" +439 6 regularizer """no""" +439 6 optimizer """adam""" +439 6 training_loop """owa""" +439 6 negative_sampler """basic""" +439 6 evaluator """rankbased""" +439 7 dataset """kinships""" +439 7 model """ntn""" +439 7 loss """bceaftersigmoid""" +439 7 regularizer """no""" +439 7 optimizer """adam""" +439 7 training_loop """owa""" +439 7 negative_sampler """basic""" +439 7 evaluator """rankbased""" +439 8 dataset """kinships""" +439 8 model """ntn""" +439 8 loss """bceaftersigmoid""" +439 8 regularizer """no""" +439 8 optimizer """adam""" +439 8 training_loop """owa""" +439 8 negative_sampler """basic""" +439 8 evaluator """rankbased""" +439 9 dataset """kinships""" +439 9 model """ntn""" +439 9 loss """bceaftersigmoid""" +439 9 regularizer """no""" +439 9 optimizer """adam""" +439 9 training_loop """owa""" +439 9 negative_sampler """basic""" +439 9 evaluator """rankbased""" +439 10 dataset """kinships""" +439 10 model """ntn""" +439 10 loss """bceaftersigmoid""" +439 10 regularizer """no""" +439 10 optimizer """adam""" +439 10 training_loop """owa""" +439 10 negative_sampler """basic""" +439 10 evaluator """rankbased""" +439 11 dataset """kinships""" +439 11 model """ntn""" +439 11 loss """bceaftersigmoid""" +439 11 regularizer """no""" +439 11 optimizer """adam""" +439 11 training_loop """owa""" +439 11 negative_sampler """basic""" +439 11 evaluator """rankbased""" +439 12 dataset """kinships""" +439 12 model """ntn""" +439 12 loss """bceaftersigmoid""" +439 12 regularizer """no""" +439 12 optimizer """adam""" +439 12 training_loop """owa""" +439 12 negative_sampler """basic""" +439 12 evaluator """rankbased""" +439 13 dataset """kinships""" +439 13 model """ntn""" +439 13 loss """bceaftersigmoid""" +439 13 regularizer """no""" +439 13 optimizer """adam""" +439 13 training_loop """owa""" +439 13 negative_sampler """basic""" +439 13 evaluator """rankbased""" +439 14 dataset """kinships""" +439 14 model """ntn""" +439 14 loss """bceaftersigmoid""" +439 14 regularizer """no""" +439 14 optimizer """adam""" +439 14 training_loop """owa""" +439 14 negative_sampler """basic""" +439 14 evaluator """rankbased""" +439 15 dataset """kinships""" +439 15 model """ntn""" +439 15 loss """bceaftersigmoid""" +439 15 regularizer """no""" +439 15 optimizer """adam""" +439 15 training_loop """owa""" +439 15 negative_sampler """basic""" +439 15 evaluator """rankbased""" +439 16 dataset """kinships""" +439 16 model """ntn""" +439 16 loss """bceaftersigmoid""" +439 16 regularizer """no""" +439 16 optimizer """adam""" +439 16 training_loop """owa""" +439 16 negative_sampler """basic""" +439 16 evaluator """rankbased""" +439 17 dataset """kinships""" +439 17 model """ntn""" +439 17 loss """bceaftersigmoid""" +439 17 regularizer """no""" +439 17 optimizer """adam""" +439 17 training_loop """owa""" +439 17 negative_sampler """basic""" +439 17 evaluator """rankbased""" +439 18 dataset """kinships""" +439 18 model """ntn""" +439 18 loss """bceaftersigmoid""" +439 18 regularizer """no""" +439 18 optimizer """adam""" +439 18 training_loop """owa""" +439 18 negative_sampler """basic""" +439 18 evaluator """rankbased""" +439 19 dataset """kinships""" +439 19 model """ntn""" +439 19 loss """bceaftersigmoid""" +439 19 regularizer """no""" +439 19 optimizer """adam""" +439 19 training_loop """owa""" +439 19 negative_sampler """basic""" +439 19 evaluator """rankbased""" +439 20 dataset """kinships""" +439 20 model """ntn""" +439 20 loss """bceaftersigmoid""" +439 20 regularizer """no""" +439 20 optimizer """adam""" +439 20 training_loop """owa""" +439 20 negative_sampler """basic""" +439 20 evaluator """rankbased""" +439 21 dataset """kinships""" +439 21 model """ntn""" +439 21 loss """bceaftersigmoid""" +439 21 regularizer """no""" +439 21 optimizer """adam""" +439 21 training_loop """owa""" +439 21 negative_sampler """basic""" +439 21 evaluator """rankbased""" +439 22 dataset """kinships""" +439 22 model """ntn""" +439 22 loss """bceaftersigmoid""" +439 22 regularizer """no""" +439 22 optimizer """adam""" +439 22 training_loop """owa""" +439 22 negative_sampler """basic""" +439 22 evaluator """rankbased""" +439 23 dataset """kinships""" +439 23 model """ntn""" +439 23 loss """bceaftersigmoid""" +439 23 regularizer """no""" +439 23 optimizer """adam""" +439 23 training_loop """owa""" +439 23 negative_sampler """basic""" +439 23 evaluator """rankbased""" +439 24 dataset """kinships""" +439 24 model """ntn""" +439 24 loss """bceaftersigmoid""" +439 24 regularizer """no""" +439 24 optimizer """adam""" +439 24 training_loop """owa""" +439 24 negative_sampler """basic""" +439 24 evaluator """rankbased""" +439 25 dataset """kinships""" +439 25 model """ntn""" +439 25 loss """bceaftersigmoid""" +439 25 regularizer """no""" +439 25 optimizer """adam""" +439 25 training_loop """owa""" +439 25 negative_sampler """basic""" +439 25 evaluator """rankbased""" +439 26 dataset """kinships""" +439 26 model """ntn""" +439 26 loss """bceaftersigmoid""" +439 26 regularizer """no""" +439 26 optimizer """adam""" +439 26 training_loop """owa""" +439 26 negative_sampler """basic""" +439 26 evaluator """rankbased""" +439 27 dataset """kinships""" +439 27 model """ntn""" +439 27 loss """bceaftersigmoid""" +439 27 regularizer """no""" +439 27 optimizer """adam""" +439 27 training_loop """owa""" +439 27 negative_sampler """basic""" +439 27 evaluator """rankbased""" +439 28 dataset """kinships""" +439 28 model """ntn""" +439 28 loss """bceaftersigmoid""" +439 28 regularizer """no""" +439 28 optimizer """adam""" +439 28 training_loop """owa""" +439 28 negative_sampler """basic""" +439 28 evaluator """rankbased""" +439 29 dataset """kinships""" +439 29 model """ntn""" +439 29 loss """bceaftersigmoid""" +439 29 regularizer """no""" +439 29 optimizer """adam""" +439 29 training_loop """owa""" +439 29 negative_sampler """basic""" +439 29 evaluator """rankbased""" +439 30 dataset """kinships""" +439 30 model """ntn""" +439 30 loss """bceaftersigmoid""" +439 30 regularizer """no""" +439 30 optimizer """adam""" +439 30 training_loop """owa""" +439 30 negative_sampler """basic""" +439 30 evaluator """rankbased""" +439 31 dataset """kinships""" +439 31 model """ntn""" +439 31 loss """bceaftersigmoid""" +439 31 regularizer """no""" +439 31 optimizer """adam""" +439 31 training_loop """owa""" +439 31 negative_sampler """basic""" +439 31 evaluator """rankbased""" +439 32 dataset """kinships""" +439 32 model """ntn""" +439 32 loss """bceaftersigmoid""" +439 32 regularizer """no""" +439 32 optimizer """adam""" +439 32 training_loop """owa""" +439 32 negative_sampler """basic""" +439 32 evaluator """rankbased""" +439 33 dataset """kinships""" +439 33 model """ntn""" +439 33 loss """bceaftersigmoid""" +439 33 regularizer """no""" +439 33 optimizer """adam""" +439 33 training_loop """owa""" +439 33 negative_sampler """basic""" +439 33 evaluator """rankbased""" +439 34 dataset """kinships""" +439 34 model """ntn""" +439 34 loss """bceaftersigmoid""" +439 34 regularizer """no""" +439 34 optimizer """adam""" +439 34 training_loop """owa""" +439 34 negative_sampler """basic""" +439 34 evaluator """rankbased""" +439 35 dataset """kinships""" +439 35 model """ntn""" +439 35 loss """bceaftersigmoid""" +439 35 regularizer """no""" +439 35 optimizer """adam""" +439 35 training_loop """owa""" +439 35 negative_sampler """basic""" +439 35 evaluator """rankbased""" +439 36 dataset """kinships""" +439 36 model """ntn""" +439 36 loss """bceaftersigmoid""" +439 36 regularizer """no""" +439 36 optimizer """adam""" +439 36 training_loop """owa""" +439 36 negative_sampler """basic""" +439 36 evaluator """rankbased""" +439 37 dataset """kinships""" +439 37 model """ntn""" +439 37 loss """bceaftersigmoid""" +439 37 regularizer """no""" +439 37 optimizer """adam""" +439 37 training_loop """owa""" +439 37 negative_sampler """basic""" +439 37 evaluator """rankbased""" +439 38 dataset """kinships""" +439 38 model """ntn""" +439 38 loss """bceaftersigmoid""" +439 38 regularizer """no""" +439 38 optimizer """adam""" +439 38 training_loop """owa""" +439 38 negative_sampler """basic""" +439 38 evaluator """rankbased""" +439 39 dataset """kinships""" +439 39 model """ntn""" +439 39 loss """bceaftersigmoid""" +439 39 regularizer """no""" +439 39 optimizer """adam""" +439 39 training_loop """owa""" +439 39 negative_sampler """basic""" +439 39 evaluator """rankbased""" +439 40 dataset """kinships""" +439 40 model """ntn""" +439 40 loss """bceaftersigmoid""" +439 40 regularizer """no""" +439 40 optimizer """adam""" +439 40 training_loop """owa""" +439 40 negative_sampler """basic""" +439 40 evaluator """rankbased""" +439 41 dataset """kinships""" +439 41 model """ntn""" +439 41 loss """bceaftersigmoid""" +439 41 regularizer """no""" +439 41 optimizer """adam""" +439 41 training_loop """owa""" +439 41 negative_sampler """basic""" +439 41 evaluator """rankbased""" +439 42 dataset """kinships""" +439 42 model """ntn""" +439 42 loss """bceaftersigmoid""" +439 42 regularizer """no""" +439 42 optimizer """adam""" +439 42 training_loop """owa""" +439 42 negative_sampler """basic""" +439 42 evaluator """rankbased""" +439 43 dataset """kinships""" +439 43 model """ntn""" +439 43 loss """bceaftersigmoid""" +439 43 regularizer """no""" +439 43 optimizer """adam""" +439 43 training_loop """owa""" +439 43 negative_sampler """basic""" +439 43 evaluator """rankbased""" +439 44 dataset """kinships""" +439 44 model """ntn""" +439 44 loss """bceaftersigmoid""" +439 44 regularizer """no""" +439 44 optimizer """adam""" +439 44 training_loop """owa""" +439 44 negative_sampler """basic""" +439 44 evaluator """rankbased""" +439 45 dataset """kinships""" +439 45 model """ntn""" +439 45 loss """bceaftersigmoid""" +439 45 regularizer """no""" +439 45 optimizer """adam""" +439 45 training_loop """owa""" +439 45 negative_sampler """basic""" +439 45 evaluator """rankbased""" +439 46 dataset """kinships""" +439 46 model """ntn""" +439 46 loss """bceaftersigmoid""" +439 46 regularizer """no""" +439 46 optimizer """adam""" +439 46 training_loop """owa""" +439 46 negative_sampler """basic""" +439 46 evaluator """rankbased""" +439 47 dataset """kinships""" +439 47 model """ntn""" +439 47 loss """bceaftersigmoid""" +439 47 regularizer """no""" +439 47 optimizer """adam""" +439 47 training_loop """owa""" +439 47 negative_sampler """basic""" +439 47 evaluator """rankbased""" +439 48 dataset """kinships""" +439 48 model """ntn""" +439 48 loss """bceaftersigmoid""" +439 48 regularizer """no""" +439 48 optimizer """adam""" +439 48 training_loop """owa""" +439 48 negative_sampler """basic""" +439 48 evaluator """rankbased""" +439 49 dataset """kinships""" +439 49 model """ntn""" +439 49 loss """bceaftersigmoid""" +439 49 regularizer """no""" +439 49 optimizer """adam""" +439 49 training_loop """owa""" +439 49 negative_sampler """basic""" +439 49 evaluator """rankbased""" +439 50 dataset """kinships""" +439 50 model """ntn""" +439 50 loss """bceaftersigmoid""" +439 50 regularizer """no""" +439 50 optimizer """adam""" +439 50 training_loop """owa""" +439 50 negative_sampler """basic""" +439 50 evaluator """rankbased""" +439 51 dataset """kinships""" +439 51 model """ntn""" +439 51 loss """bceaftersigmoid""" +439 51 regularizer """no""" +439 51 optimizer """adam""" +439 51 training_loop """owa""" +439 51 negative_sampler """basic""" +439 51 evaluator """rankbased""" +439 52 dataset """kinships""" +439 52 model """ntn""" +439 52 loss """bceaftersigmoid""" +439 52 regularizer """no""" +439 52 optimizer """adam""" +439 52 training_loop """owa""" +439 52 negative_sampler """basic""" +439 52 evaluator """rankbased""" +439 53 dataset """kinships""" +439 53 model """ntn""" +439 53 loss """bceaftersigmoid""" +439 53 regularizer """no""" +439 53 optimizer """adam""" +439 53 training_loop """owa""" +439 53 negative_sampler """basic""" +439 53 evaluator """rankbased""" +439 54 dataset """kinships""" +439 54 model """ntn""" +439 54 loss """bceaftersigmoid""" +439 54 regularizer """no""" +439 54 optimizer """adam""" +439 54 training_loop """owa""" +439 54 negative_sampler """basic""" +439 54 evaluator """rankbased""" +439 55 dataset """kinships""" +439 55 model """ntn""" +439 55 loss """bceaftersigmoid""" +439 55 regularizer """no""" +439 55 optimizer """adam""" +439 55 training_loop """owa""" +439 55 negative_sampler """basic""" +439 55 evaluator """rankbased""" +439 56 dataset """kinships""" +439 56 model """ntn""" +439 56 loss """bceaftersigmoid""" +439 56 regularizer """no""" +439 56 optimizer """adam""" +439 56 training_loop """owa""" +439 56 negative_sampler """basic""" +439 56 evaluator """rankbased""" +439 57 dataset """kinships""" +439 57 model """ntn""" +439 57 loss """bceaftersigmoid""" +439 57 regularizer """no""" +439 57 optimizer """adam""" +439 57 training_loop """owa""" +439 57 negative_sampler """basic""" +439 57 evaluator """rankbased""" +439 58 dataset """kinships""" +439 58 model """ntn""" +439 58 loss """bceaftersigmoid""" +439 58 regularizer """no""" +439 58 optimizer """adam""" +439 58 training_loop """owa""" +439 58 negative_sampler """basic""" +439 58 evaluator """rankbased""" +439 59 dataset """kinships""" +439 59 model """ntn""" +439 59 loss """bceaftersigmoid""" +439 59 regularizer """no""" +439 59 optimizer """adam""" +439 59 training_loop """owa""" +439 59 negative_sampler """basic""" +439 59 evaluator """rankbased""" +439 60 dataset """kinships""" +439 60 model """ntn""" +439 60 loss """bceaftersigmoid""" +439 60 regularizer """no""" +439 60 optimizer """adam""" +439 60 training_loop """owa""" +439 60 negative_sampler """basic""" +439 60 evaluator """rankbased""" +439 61 dataset """kinships""" +439 61 model """ntn""" +439 61 loss """bceaftersigmoid""" +439 61 regularizer """no""" +439 61 optimizer """adam""" +439 61 training_loop """owa""" +439 61 negative_sampler """basic""" +439 61 evaluator """rankbased""" +439 62 dataset """kinships""" +439 62 model """ntn""" +439 62 loss """bceaftersigmoid""" +439 62 regularizer """no""" +439 62 optimizer """adam""" +439 62 training_loop """owa""" +439 62 negative_sampler """basic""" +439 62 evaluator """rankbased""" +439 63 dataset """kinships""" +439 63 model """ntn""" +439 63 loss """bceaftersigmoid""" +439 63 regularizer """no""" +439 63 optimizer """adam""" +439 63 training_loop """owa""" +439 63 negative_sampler """basic""" +439 63 evaluator """rankbased""" +439 64 dataset """kinships""" +439 64 model """ntn""" +439 64 loss """bceaftersigmoid""" +439 64 regularizer """no""" +439 64 optimizer """adam""" +439 64 training_loop """owa""" +439 64 negative_sampler """basic""" +439 64 evaluator """rankbased""" +439 65 dataset """kinships""" +439 65 model """ntn""" +439 65 loss """bceaftersigmoid""" +439 65 regularizer """no""" +439 65 optimizer """adam""" +439 65 training_loop """owa""" +439 65 negative_sampler """basic""" +439 65 evaluator """rankbased""" +439 66 dataset """kinships""" +439 66 model """ntn""" +439 66 loss """bceaftersigmoid""" +439 66 regularizer """no""" +439 66 optimizer """adam""" +439 66 training_loop """owa""" +439 66 negative_sampler """basic""" +439 66 evaluator """rankbased""" +439 67 dataset """kinships""" +439 67 model """ntn""" +439 67 loss """bceaftersigmoid""" +439 67 regularizer """no""" +439 67 optimizer """adam""" +439 67 training_loop """owa""" +439 67 negative_sampler """basic""" +439 67 evaluator """rankbased""" +439 68 dataset """kinships""" +439 68 model """ntn""" +439 68 loss """bceaftersigmoid""" +439 68 regularizer """no""" +439 68 optimizer """adam""" +439 68 training_loop """owa""" +439 68 negative_sampler """basic""" +439 68 evaluator """rankbased""" +439 69 dataset """kinships""" +439 69 model """ntn""" +439 69 loss """bceaftersigmoid""" +439 69 regularizer """no""" +439 69 optimizer """adam""" +439 69 training_loop """owa""" +439 69 negative_sampler """basic""" +439 69 evaluator """rankbased""" +439 70 dataset """kinships""" +439 70 model """ntn""" +439 70 loss """bceaftersigmoid""" +439 70 regularizer """no""" +439 70 optimizer """adam""" +439 70 training_loop """owa""" +439 70 negative_sampler """basic""" +439 70 evaluator """rankbased""" +439 71 dataset """kinships""" +439 71 model """ntn""" +439 71 loss """bceaftersigmoid""" +439 71 regularizer """no""" +439 71 optimizer """adam""" +439 71 training_loop """owa""" +439 71 negative_sampler """basic""" +439 71 evaluator """rankbased""" +439 72 dataset """kinships""" +439 72 model """ntn""" +439 72 loss """bceaftersigmoid""" +439 72 regularizer """no""" +439 72 optimizer """adam""" +439 72 training_loop """owa""" +439 72 negative_sampler """basic""" +439 72 evaluator """rankbased""" +439 73 dataset """kinships""" +439 73 model """ntn""" +439 73 loss """bceaftersigmoid""" +439 73 regularizer """no""" +439 73 optimizer """adam""" +439 73 training_loop """owa""" +439 73 negative_sampler """basic""" +439 73 evaluator """rankbased""" +439 74 dataset """kinships""" +439 74 model """ntn""" +439 74 loss """bceaftersigmoid""" +439 74 regularizer """no""" +439 74 optimizer """adam""" +439 74 training_loop """owa""" +439 74 negative_sampler """basic""" +439 74 evaluator """rankbased""" +439 75 dataset """kinships""" +439 75 model """ntn""" +439 75 loss """bceaftersigmoid""" +439 75 regularizer """no""" +439 75 optimizer """adam""" +439 75 training_loop """owa""" +439 75 negative_sampler """basic""" +439 75 evaluator """rankbased""" +439 76 dataset """kinships""" +439 76 model """ntn""" +439 76 loss """bceaftersigmoid""" +439 76 regularizer """no""" +439 76 optimizer """adam""" +439 76 training_loop """owa""" +439 76 negative_sampler """basic""" +439 76 evaluator """rankbased""" +439 77 dataset """kinships""" +439 77 model """ntn""" +439 77 loss """bceaftersigmoid""" +439 77 regularizer """no""" +439 77 optimizer """adam""" +439 77 training_loop """owa""" +439 77 negative_sampler """basic""" +439 77 evaluator """rankbased""" +439 78 dataset """kinships""" +439 78 model """ntn""" +439 78 loss """bceaftersigmoid""" +439 78 regularizer """no""" +439 78 optimizer """adam""" +439 78 training_loop """owa""" +439 78 negative_sampler """basic""" +439 78 evaluator """rankbased""" +439 79 dataset """kinships""" +439 79 model """ntn""" +439 79 loss """bceaftersigmoid""" +439 79 regularizer """no""" +439 79 optimizer """adam""" +439 79 training_loop """owa""" +439 79 negative_sampler """basic""" +439 79 evaluator """rankbased""" +439 80 dataset """kinships""" +439 80 model """ntn""" +439 80 loss """bceaftersigmoid""" +439 80 regularizer """no""" +439 80 optimizer """adam""" +439 80 training_loop """owa""" +439 80 negative_sampler """basic""" +439 80 evaluator """rankbased""" +439 81 dataset """kinships""" +439 81 model """ntn""" +439 81 loss """bceaftersigmoid""" +439 81 regularizer """no""" +439 81 optimizer """adam""" +439 81 training_loop """owa""" +439 81 negative_sampler """basic""" +439 81 evaluator """rankbased""" +439 82 dataset """kinships""" +439 82 model """ntn""" +439 82 loss """bceaftersigmoid""" +439 82 regularizer """no""" +439 82 optimizer """adam""" +439 82 training_loop """owa""" +439 82 negative_sampler """basic""" +439 82 evaluator """rankbased""" +439 83 dataset """kinships""" +439 83 model """ntn""" +439 83 loss """bceaftersigmoid""" +439 83 regularizer """no""" +439 83 optimizer """adam""" +439 83 training_loop """owa""" +439 83 negative_sampler """basic""" +439 83 evaluator """rankbased""" +439 84 dataset """kinships""" +439 84 model """ntn""" +439 84 loss """bceaftersigmoid""" +439 84 regularizer """no""" +439 84 optimizer """adam""" +439 84 training_loop """owa""" +439 84 negative_sampler """basic""" +439 84 evaluator """rankbased""" +439 85 dataset """kinships""" +439 85 model """ntn""" +439 85 loss """bceaftersigmoid""" +439 85 regularizer """no""" +439 85 optimizer """adam""" +439 85 training_loop """owa""" +439 85 negative_sampler """basic""" +439 85 evaluator """rankbased""" +439 86 dataset """kinships""" +439 86 model """ntn""" +439 86 loss """bceaftersigmoid""" +439 86 regularizer """no""" +439 86 optimizer """adam""" +439 86 training_loop """owa""" +439 86 negative_sampler """basic""" +439 86 evaluator """rankbased""" +439 87 dataset """kinships""" +439 87 model """ntn""" +439 87 loss """bceaftersigmoid""" +439 87 regularizer """no""" +439 87 optimizer """adam""" +439 87 training_loop """owa""" +439 87 negative_sampler """basic""" +439 87 evaluator """rankbased""" +439 88 dataset """kinships""" +439 88 model """ntn""" +439 88 loss """bceaftersigmoid""" +439 88 regularizer """no""" +439 88 optimizer """adam""" +439 88 training_loop """owa""" +439 88 negative_sampler """basic""" +439 88 evaluator """rankbased""" +439 89 dataset """kinships""" +439 89 model """ntn""" +439 89 loss """bceaftersigmoid""" +439 89 regularizer """no""" +439 89 optimizer """adam""" +439 89 training_loop """owa""" +439 89 negative_sampler """basic""" +439 89 evaluator """rankbased""" +439 90 dataset """kinships""" +439 90 model """ntn""" +439 90 loss """bceaftersigmoid""" +439 90 regularizer """no""" +439 90 optimizer """adam""" +439 90 training_loop """owa""" +439 90 negative_sampler """basic""" +439 90 evaluator """rankbased""" +439 91 dataset """kinships""" +439 91 model """ntn""" +439 91 loss """bceaftersigmoid""" +439 91 regularizer """no""" +439 91 optimizer """adam""" +439 91 training_loop """owa""" +439 91 negative_sampler """basic""" +439 91 evaluator """rankbased""" +439 92 dataset """kinships""" +439 92 model """ntn""" +439 92 loss """bceaftersigmoid""" +439 92 regularizer """no""" +439 92 optimizer """adam""" +439 92 training_loop """owa""" +439 92 negative_sampler """basic""" +439 92 evaluator """rankbased""" +439 93 dataset """kinships""" +439 93 model """ntn""" +439 93 loss """bceaftersigmoid""" +439 93 regularizer """no""" +439 93 optimizer """adam""" +439 93 training_loop """owa""" +439 93 negative_sampler """basic""" +439 93 evaluator """rankbased""" +439 94 dataset """kinships""" +439 94 model """ntn""" +439 94 loss """bceaftersigmoid""" +439 94 regularizer """no""" +439 94 optimizer """adam""" +439 94 training_loop """owa""" +439 94 negative_sampler """basic""" +439 94 evaluator """rankbased""" +439 95 dataset """kinships""" +439 95 model """ntn""" +439 95 loss """bceaftersigmoid""" +439 95 regularizer """no""" +439 95 optimizer """adam""" +439 95 training_loop """owa""" +439 95 negative_sampler """basic""" +439 95 evaluator """rankbased""" +440 1 model.embedding_dim 2.0 +440 1 optimizer.lr 0.04052989267729473 +440 1 negative_sampler.num_negs_per_pos 80.0 +440 1 training.batch_size 0.0 +440 2 model.embedding_dim 2.0 +440 2 optimizer.lr 0.0024578910854410486 +440 2 negative_sampler.num_negs_per_pos 65.0 +440 2 training.batch_size 1.0 +440 3 model.embedding_dim 1.0 +440 3 optimizer.lr 0.0013698792219967956 +440 3 negative_sampler.num_negs_per_pos 54.0 +440 3 training.batch_size 2.0 +440 4 model.embedding_dim 2.0 +440 4 optimizer.lr 0.003950749680010983 +440 4 negative_sampler.num_negs_per_pos 1.0 +440 4 training.batch_size 2.0 +440 5 model.embedding_dim 2.0 +440 5 optimizer.lr 0.09062235084054025 +440 5 negative_sampler.num_negs_per_pos 51.0 +440 5 training.batch_size 1.0 +440 6 model.embedding_dim 0.0 +440 6 optimizer.lr 0.003975640813112084 +440 6 negative_sampler.num_negs_per_pos 42.0 +440 6 training.batch_size 1.0 +440 7 model.embedding_dim 0.0 +440 7 optimizer.lr 0.06039021071001031 +440 7 negative_sampler.num_negs_per_pos 49.0 +440 7 training.batch_size 2.0 +440 8 model.embedding_dim 1.0 +440 8 optimizer.lr 0.005044270923749438 +440 8 negative_sampler.num_negs_per_pos 42.0 +440 8 training.batch_size 1.0 +440 9 model.embedding_dim 0.0 +440 9 optimizer.lr 0.0032754952441548723 +440 9 negative_sampler.num_negs_per_pos 62.0 +440 9 training.batch_size 1.0 +440 10 model.embedding_dim 1.0 +440 10 optimizer.lr 0.03225150103435952 +440 10 negative_sampler.num_negs_per_pos 75.0 +440 10 training.batch_size 2.0 +440 11 model.embedding_dim 2.0 +440 11 optimizer.lr 0.029524834633743106 +440 11 negative_sampler.num_negs_per_pos 20.0 +440 11 training.batch_size 0.0 +440 12 model.embedding_dim 0.0 +440 12 optimizer.lr 0.01576386738135801 +440 12 negative_sampler.num_negs_per_pos 53.0 +440 12 training.batch_size 2.0 +440 13 model.embedding_dim 2.0 +440 13 optimizer.lr 0.028764992223814866 +440 13 negative_sampler.num_negs_per_pos 21.0 +440 13 training.batch_size 0.0 +440 14 model.embedding_dim 0.0 +440 14 optimizer.lr 0.0068775480215382085 +440 14 negative_sampler.num_negs_per_pos 42.0 +440 14 training.batch_size 2.0 +440 15 model.embedding_dim 1.0 +440 15 optimizer.lr 0.014240458955481988 +440 15 negative_sampler.num_negs_per_pos 73.0 +440 15 training.batch_size 0.0 +440 16 model.embedding_dim 0.0 +440 16 optimizer.lr 0.002025940586363386 +440 16 negative_sampler.num_negs_per_pos 95.0 +440 16 training.batch_size 0.0 +440 17 model.embedding_dim 1.0 +440 17 optimizer.lr 0.002404797670812612 +440 17 negative_sampler.num_negs_per_pos 39.0 +440 17 training.batch_size 1.0 +440 18 model.embedding_dim 2.0 +440 18 optimizer.lr 0.07588547684187487 +440 18 negative_sampler.num_negs_per_pos 47.0 +440 18 training.batch_size 1.0 +440 19 model.embedding_dim 0.0 +440 19 optimizer.lr 0.09647695941977262 +440 19 negative_sampler.num_negs_per_pos 55.0 +440 19 training.batch_size 2.0 +440 20 model.embedding_dim 0.0 +440 20 optimizer.lr 0.0015756845798080923 +440 20 negative_sampler.num_negs_per_pos 85.0 +440 20 training.batch_size 0.0 +440 21 model.embedding_dim 1.0 +440 21 optimizer.lr 0.056112806394501995 +440 21 negative_sampler.num_negs_per_pos 99.0 +440 21 training.batch_size 1.0 +440 22 model.embedding_dim 0.0 +440 22 optimizer.lr 0.002474855866435669 +440 22 negative_sampler.num_negs_per_pos 47.0 +440 22 training.batch_size 1.0 +440 23 model.embedding_dim 0.0 +440 23 optimizer.lr 0.01544351971518533 +440 23 negative_sampler.num_negs_per_pos 44.0 +440 23 training.batch_size 1.0 +440 24 model.embedding_dim 1.0 +440 24 optimizer.lr 0.0010577935324249561 +440 24 negative_sampler.num_negs_per_pos 20.0 +440 24 training.batch_size 0.0 +440 25 model.embedding_dim 2.0 +440 25 optimizer.lr 0.024451333783435846 +440 25 negative_sampler.num_negs_per_pos 94.0 +440 25 training.batch_size 2.0 +440 26 model.embedding_dim 1.0 +440 26 optimizer.lr 0.09068780702084142 +440 26 negative_sampler.num_negs_per_pos 63.0 +440 26 training.batch_size 0.0 +440 27 model.embedding_dim 2.0 +440 27 optimizer.lr 0.004533680408316206 +440 27 negative_sampler.num_negs_per_pos 56.0 +440 27 training.batch_size 0.0 +440 28 model.embedding_dim 1.0 +440 28 optimizer.lr 0.003078492310426737 +440 28 negative_sampler.num_negs_per_pos 96.0 +440 28 training.batch_size 2.0 +440 29 model.embedding_dim 1.0 +440 29 optimizer.lr 0.002161870424132507 +440 29 negative_sampler.num_negs_per_pos 40.0 +440 29 training.batch_size 2.0 +440 30 model.embedding_dim 2.0 +440 30 optimizer.lr 0.0015934365260403115 +440 30 negative_sampler.num_negs_per_pos 89.0 +440 30 training.batch_size 0.0 +440 31 model.embedding_dim 0.0 +440 31 optimizer.lr 0.016164797551675603 +440 31 negative_sampler.num_negs_per_pos 33.0 +440 31 training.batch_size 0.0 +440 32 model.embedding_dim 1.0 +440 32 optimizer.lr 0.06544079284659682 +440 32 negative_sampler.num_negs_per_pos 33.0 +440 32 training.batch_size 2.0 +440 33 model.embedding_dim 1.0 +440 33 optimizer.lr 0.0034293216229201295 +440 33 negative_sampler.num_negs_per_pos 40.0 +440 33 training.batch_size 2.0 +440 34 model.embedding_dim 1.0 +440 34 optimizer.lr 0.019970569345389703 +440 34 negative_sampler.num_negs_per_pos 3.0 +440 34 training.batch_size 0.0 +440 35 model.embedding_dim 2.0 +440 35 optimizer.lr 0.055991834833808554 +440 35 negative_sampler.num_negs_per_pos 1.0 +440 35 training.batch_size 1.0 +440 36 model.embedding_dim 1.0 +440 36 optimizer.lr 0.001528682869458608 +440 36 negative_sampler.num_negs_per_pos 85.0 +440 36 training.batch_size 2.0 +440 37 model.embedding_dim 1.0 +440 37 optimizer.lr 0.0015485332477652547 +440 37 negative_sampler.num_negs_per_pos 36.0 +440 37 training.batch_size 1.0 +440 38 model.embedding_dim 0.0 +440 38 optimizer.lr 0.0020044652653793136 +440 38 negative_sampler.num_negs_per_pos 40.0 +440 38 training.batch_size 1.0 +440 39 model.embedding_dim 0.0 +440 39 optimizer.lr 0.03301772686971002 +440 39 negative_sampler.num_negs_per_pos 31.0 +440 39 training.batch_size 2.0 +440 40 model.embedding_dim 1.0 +440 40 optimizer.lr 0.03345564846769114 +440 40 negative_sampler.num_negs_per_pos 95.0 +440 40 training.batch_size 1.0 +440 41 model.embedding_dim 1.0 +440 41 optimizer.lr 0.07279114414855165 +440 41 negative_sampler.num_negs_per_pos 60.0 +440 41 training.batch_size 2.0 +440 42 model.embedding_dim 0.0 +440 42 optimizer.lr 0.067998942089031 +440 42 negative_sampler.num_negs_per_pos 99.0 +440 42 training.batch_size 2.0 +440 43 model.embedding_dim 0.0 +440 43 optimizer.lr 0.07102145805552461 +440 43 negative_sampler.num_negs_per_pos 4.0 +440 43 training.batch_size 1.0 +440 44 model.embedding_dim 0.0 +440 44 optimizer.lr 0.0013882934684569695 +440 44 negative_sampler.num_negs_per_pos 87.0 +440 44 training.batch_size 2.0 +440 45 model.embedding_dim 0.0 +440 45 optimizer.lr 0.009104616520766034 +440 45 negative_sampler.num_negs_per_pos 5.0 +440 45 training.batch_size 1.0 +440 46 model.embedding_dim 2.0 +440 46 optimizer.lr 0.004353761739926491 +440 46 negative_sampler.num_negs_per_pos 59.0 +440 46 training.batch_size 1.0 +440 47 model.embedding_dim 2.0 +440 47 optimizer.lr 0.0021578132625871553 +440 47 negative_sampler.num_negs_per_pos 68.0 +440 47 training.batch_size 0.0 +440 48 model.embedding_dim 2.0 +440 48 optimizer.lr 0.021226249038759838 +440 48 negative_sampler.num_negs_per_pos 14.0 +440 48 training.batch_size 0.0 +440 49 model.embedding_dim 2.0 +440 49 optimizer.lr 0.0011917437159549502 +440 49 negative_sampler.num_negs_per_pos 38.0 +440 49 training.batch_size 2.0 +440 50 model.embedding_dim 1.0 +440 50 optimizer.lr 0.02591981075503331 +440 50 negative_sampler.num_negs_per_pos 34.0 +440 50 training.batch_size 2.0 +440 51 model.embedding_dim 0.0 +440 51 optimizer.lr 0.0010530185318854118 +440 51 negative_sampler.num_negs_per_pos 91.0 +440 51 training.batch_size 0.0 +440 52 model.embedding_dim 1.0 +440 52 optimizer.lr 0.006363390439246232 +440 52 negative_sampler.num_negs_per_pos 19.0 +440 52 training.batch_size 0.0 +440 53 model.embedding_dim 0.0 +440 53 optimizer.lr 0.08045262878267002 +440 53 negative_sampler.num_negs_per_pos 25.0 +440 53 training.batch_size 2.0 +440 54 model.embedding_dim 1.0 +440 54 optimizer.lr 0.012905869113637612 +440 54 negative_sampler.num_negs_per_pos 6.0 +440 54 training.batch_size 1.0 +440 55 model.embedding_dim 0.0 +440 55 optimizer.lr 0.023996283161353514 +440 55 negative_sampler.num_negs_per_pos 76.0 +440 55 training.batch_size 2.0 +440 56 model.embedding_dim 1.0 +440 56 optimizer.lr 0.0033114478111536537 +440 56 negative_sampler.num_negs_per_pos 58.0 +440 56 training.batch_size 0.0 +440 57 model.embedding_dim 2.0 +440 57 optimizer.lr 0.009480637546631622 +440 57 negative_sampler.num_negs_per_pos 84.0 +440 57 training.batch_size 2.0 +440 58 model.embedding_dim 0.0 +440 58 optimizer.lr 0.08592808181696668 +440 58 negative_sampler.num_negs_per_pos 79.0 +440 58 training.batch_size 2.0 +440 59 model.embedding_dim 1.0 +440 59 optimizer.lr 0.019881387019715972 +440 59 negative_sampler.num_negs_per_pos 66.0 +440 59 training.batch_size 2.0 +440 60 model.embedding_dim 0.0 +440 60 optimizer.lr 0.012109948076876088 +440 60 negative_sampler.num_negs_per_pos 52.0 +440 60 training.batch_size 2.0 +440 61 model.embedding_dim 1.0 +440 61 optimizer.lr 0.001187326998833654 +440 61 negative_sampler.num_negs_per_pos 36.0 +440 61 training.batch_size 2.0 +440 62 model.embedding_dim 2.0 +440 62 optimizer.lr 0.07469457006216865 +440 62 negative_sampler.num_negs_per_pos 52.0 +440 62 training.batch_size 0.0 +440 63 model.embedding_dim 1.0 +440 63 optimizer.lr 0.0012190309311767933 +440 63 negative_sampler.num_negs_per_pos 50.0 +440 63 training.batch_size 1.0 +440 64 model.embedding_dim 1.0 +440 64 optimizer.lr 0.010784690856592569 +440 64 negative_sampler.num_negs_per_pos 4.0 +440 64 training.batch_size 0.0 +440 65 model.embedding_dim 1.0 +440 65 optimizer.lr 0.010604448972360824 +440 65 negative_sampler.num_negs_per_pos 57.0 +440 65 training.batch_size 1.0 +440 66 model.embedding_dim 2.0 +440 66 optimizer.lr 0.003674853774291569 +440 66 negative_sampler.num_negs_per_pos 27.0 +440 66 training.batch_size 2.0 +440 67 model.embedding_dim 1.0 +440 67 optimizer.lr 0.018762140631257695 +440 67 negative_sampler.num_negs_per_pos 96.0 +440 67 training.batch_size 1.0 +440 68 model.embedding_dim 1.0 +440 68 optimizer.lr 0.001318476816733429 +440 68 negative_sampler.num_negs_per_pos 28.0 +440 68 training.batch_size 2.0 +440 69 model.embedding_dim 1.0 +440 69 optimizer.lr 0.022823861191472745 +440 69 negative_sampler.num_negs_per_pos 8.0 +440 69 training.batch_size 1.0 +440 70 model.embedding_dim 2.0 +440 70 optimizer.lr 0.009390782336270201 +440 70 negative_sampler.num_negs_per_pos 33.0 +440 70 training.batch_size 1.0 +440 71 model.embedding_dim 0.0 +440 71 optimizer.lr 0.022314373109535775 +440 71 negative_sampler.num_negs_per_pos 67.0 +440 71 training.batch_size 2.0 +440 72 model.embedding_dim 2.0 +440 72 optimizer.lr 0.014378561030905158 +440 72 negative_sampler.num_negs_per_pos 65.0 +440 72 training.batch_size 0.0 +440 73 model.embedding_dim 1.0 +440 73 optimizer.lr 0.003926468749567518 +440 73 negative_sampler.num_negs_per_pos 2.0 +440 73 training.batch_size 1.0 +440 74 model.embedding_dim 2.0 +440 74 optimizer.lr 0.002192597788211551 +440 74 negative_sampler.num_negs_per_pos 32.0 +440 74 training.batch_size 2.0 +440 75 model.embedding_dim 0.0 +440 75 optimizer.lr 0.006140093078426455 +440 75 negative_sampler.num_negs_per_pos 54.0 +440 75 training.batch_size 0.0 +440 76 model.embedding_dim 0.0 +440 76 optimizer.lr 0.0021188629087338083 +440 76 negative_sampler.num_negs_per_pos 41.0 +440 76 training.batch_size 2.0 +440 77 model.embedding_dim 2.0 +440 77 optimizer.lr 0.009875110931514577 +440 77 negative_sampler.num_negs_per_pos 31.0 +440 77 training.batch_size 0.0 +440 78 model.embedding_dim 0.0 +440 78 optimizer.lr 0.04181602973044904 +440 78 negative_sampler.num_negs_per_pos 44.0 +440 78 training.batch_size 2.0 +440 79 model.embedding_dim 0.0 +440 79 optimizer.lr 0.0658994996950241 +440 79 negative_sampler.num_negs_per_pos 99.0 +440 79 training.batch_size 1.0 +440 80 model.embedding_dim 1.0 +440 80 optimizer.lr 0.011996994219407264 +440 80 negative_sampler.num_negs_per_pos 89.0 +440 80 training.batch_size 1.0 +440 81 model.embedding_dim 0.0 +440 81 optimizer.lr 0.01401672764508888 +440 81 negative_sampler.num_negs_per_pos 36.0 +440 81 training.batch_size 1.0 +440 82 model.embedding_dim 0.0 +440 82 optimizer.lr 0.0018517112857263688 +440 82 negative_sampler.num_negs_per_pos 80.0 +440 82 training.batch_size 0.0 +440 83 model.embedding_dim 0.0 +440 83 optimizer.lr 0.013009083112932727 +440 83 negative_sampler.num_negs_per_pos 65.0 +440 83 training.batch_size 1.0 +440 84 model.embedding_dim 0.0 +440 84 optimizer.lr 0.01280352226246726 +440 84 negative_sampler.num_negs_per_pos 14.0 +440 84 training.batch_size 1.0 +440 85 model.embedding_dim 1.0 +440 85 optimizer.lr 0.04621772375959625 +440 85 negative_sampler.num_negs_per_pos 82.0 +440 85 training.batch_size 2.0 +440 86 model.embedding_dim 0.0 +440 86 optimizer.lr 0.0012564806735323993 +440 86 negative_sampler.num_negs_per_pos 48.0 +440 86 training.batch_size 0.0 +440 87 model.embedding_dim 0.0 +440 87 optimizer.lr 0.047805133811458476 +440 87 negative_sampler.num_negs_per_pos 13.0 +440 87 training.batch_size 0.0 +440 88 model.embedding_dim 1.0 +440 88 optimizer.lr 0.0017458542600354155 +440 88 negative_sampler.num_negs_per_pos 7.0 +440 88 training.batch_size 0.0 +440 89 model.embedding_dim 1.0 +440 89 optimizer.lr 0.0013318780061084717 +440 89 negative_sampler.num_negs_per_pos 86.0 +440 89 training.batch_size 1.0 +440 90 model.embedding_dim 1.0 +440 90 optimizer.lr 0.08024292962213572 +440 90 negative_sampler.num_negs_per_pos 53.0 +440 90 training.batch_size 1.0 +440 91 model.embedding_dim 1.0 +440 91 optimizer.lr 0.00446010441509688 +440 91 negative_sampler.num_negs_per_pos 37.0 +440 91 training.batch_size 1.0 +440 92 model.embedding_dim 0.0 +440 92 optimizer.lr 0.05738057794821478 +440 92 negative_sampler.num_negs_per_pos 7.0 +440 92 training.batch_size 0.0 +440 93 model.embedding_dim 0.0 +440 93 optimizer.lr 0.010239446689820287 +440 93 negative_sampler.num_negs_per_pos 46.0 +440 93 training.batch_size 0.0 +440 94 model.embedding_dim 2.0 +440 94 optimizer.lr 0.0827819831773477 +440 94 negative_sampler.num_negs_per_pos 50.0 +440 94 training.batch_size 0.0 +440 95 model.embedding_dim 2.0 +440 95 optimizer.lr 0.0013657026839886378 +440 95 negative_sampler.num_negs_per_pos 65.0 +440 95 training.batch_size 0.0 +440 96 model.embedding_dim 1.0 +440 96 optimizer.lr 0.0048655378663006585 +440 96 negative_sampler.num_negs_per_pos 6.0 +440 96 training.batch_size 1.0 +440 97 model.embedding_dim 1.0 +440 97 optimizer.lr 0.007344311615025849 +440 97 negative_sampler.num_negs_per_pos 38.0 +440 97 training.batch_size 2.0 +440 98 model.embedding_dim 0.0 +440 98 optimizer.lr 0.00356588097089952 +440 98 negative_sampler.num_negs_per_pos 6.0 +440 98 training.batch_size 1.0 +440 99 model.embedding_dim 1.0 +440 99 optimizer.lr 0.09661938952429723 +440 99 negative_sampler.num_negs_per_pos 4.0 +440 99 training.batch_size 0.0 +440 100 model.embedding_dim 2.0 +440 100 optimizer.lr 0.07464298580363762 +440 100 negative_sampler.num_negs_per_pos 87.0 +440 100 training.batch_size 1.0 +440 1 dataset """kinships""" +440 1 model """ntn""" +440 1 loss """softplus""" +440 1 regularizer """no""" +440 1 optimizer """adam""" +440 1 training_loop """owa""" +440 1 negative_sampler """basic""" +440 1 evaluator """rankbased""" +440 2 dataset """kinships""" +440 2 model """ntn""" +440 2 loss """softplus""" +440 2 regularizer """no""" +440 2 optimizer """adam""" +440 2 training_loop """owa""" +440 2 negative_sampler """basic""" +440 2 evaluator """rankbased""" +440 3 dataset """kinships""" +440 3 model """ntn""" +440 3 loss """softplus""" +440 3 regularizer """no""" +440 3 optimizer """adam""" +440 3 training_loop """owa""" +440 3 negative_sampler """basic""" +440 3 evaluator """rankbased""" +440 4 dataset """kinships""" +440 4 model """ntn""" +440 4 loss """softplus""" +440 4 regularizer """no""" +440 4 optimizer """adam""" +440 4 training_loop """owa""" +440 4 negative_sampler """basic""" +440 4 evaluator """rankbased""" +440 5 dataset """kinships""" +440 5 model """ntn""" +440 5 loss """softplus""" +440 5 regularizer """no""" +440 5 optimizer """adam""" +440 5 training_loop """owa""" +440 5 negative_sampler """basic""" +440 5 evaluator """rankbased""" +440 6 dataset """kinships""" +440 6 model """ntn""" +440 6 loss """softplus""" +440 6 regularizer """no""" +440 6 optimizer """adam""" +440 6 training_loop """owa""" +440 6 negative_sampler """basic""" +440 6 evaluator """rankbased""" +440 7 dataset """kinships""" +440 7 model """ntn""" +440 7 loss """softplus""" +440 7 regularizer """no""" +440 7 optimizer """adam""" +440 7 training_loop """owa""" +440 7 negative_sampler """basic""" +440 7 evaluator """rankbased""" +440 8 dataset """kinships""" +440 8 model """ntn""" +440 8 loss """softplus""" +440 8 regularizer """no""" +440 8 optimizer """adam""" +440 8 training_loop """owa""" +440 8 negative_sampler """basic""" +440 8 evaluator """rankbased""" +440 9 dataset """kinships""" +440 9 model """ntn""" +440 9 loss """softplus""" +440 9 regularizer """no""" +440 9 optimizer """adam""" +440 9 training_loop """owa""" +440 9 negative_sampler """basic""" +440 9 evaluator """rankbased""" +440 10 dataset """kinships""" +440 10 model """ntn""" +440 10 loss """softplus""" +440 10 regularizer """no""" +440 10 optimizer """adam""" +440 10 training_loop """owa""" +440 10 negative_sampler """basic""" +440 10 evaluator """rankbased""" +440 11 dataset """kinships""" +440 11 model """ntn""" +440 11 loss """softplus""" +440 11 regularizer """no""" +440 11 optimizer """adam""" +440 11 training_loop """owa""" +440 11 negative_sampler """basic""" +440 11 evaluator """rankbased""" +440 12 dataset """kinships""" +440 12 model """ntn""" +440 12 loss """softplus""" +440 12 regularizer """no""" +440 12 optimizer """adam""" +440 12 training_loop """owa""" +440 12 negative_sampler """basic""" +440 12 evaluator """rankbased""" +440 13 dataset """kinships""" +440 13 model """ntn""" +440 13 loss """softplus""" +440 13 regularizer """no""" +440 13 optimizer """adam""" +440 13 training_loop """owa""" +440 13 negative_sampler """basic""" +440 13 evaluator """rankbased""" +440 14 dataset """kinships""" +440 14 model """ntn""" +440 14 loss """softplus""" +440 14 regularizer """no""" +440 14 optimizer """adam""" +440 14 training_loop """owa""" +440 14 negative_sampler """basic""" +440 14 evaluator """rankbased""" +440 15 dataset """kinships""" +440 15 model """ntn""" +440 15 loss """softplus""" +440 15 regularizer """no""" +440 15 optimizer """adam""" +440 15 training_loop """owa""" +440 15 negative_sampler """basic""" +440 15 evaluator """rankbased""" +440 16 dataset """kinships""" +440 16 model """ntn""" +440 16 loss """softplus""" +440 16 regularizer """no""" +440 16 optimizer """adam""" +440 16 training_loop """owa""" +440 16 negative_sampler """basic""" +440 16 evaluator """rankbased""" +440 17 dataset """kinships""" +440 17 model """ntn""" +440 17 loss """softplus""" +440 17 regularizer """no""" +440 17 optimizer """adam""" +440 17 training_loop """owa""" +440 17 negative_sampler """basic""" +440 17 evaluator """rankbased""" +440 18 dataset """kinships""" +440 18 model """ntn""" +440 18 loss """softplus""" +440 18 regularizer """no""" +440 18 optimizer """adam""" +440 18 training_loop """owa""" +440 18 negative_sampler """basic""" +440 18 evaluator """rankbased""" +440 19 dataset """kinships""" +440 19 model """ntn""" +440 19 loss """softplus""" +440 19 regularizer """no""" +440 19 optimizer """adam""" +440 19 training_loop """owa""" +440 19 negative_sampler """basic""" +440 19 evaluator """rankbased""" +440 20 dataset """kinships""" +440 20 model """ntn""" +440 20 loss """softplus""" +440 20 regularizer """no""" +440 20 optimizer """adam""" +440 20 training_loop """owa""" +440 20 negative_sampler """basic""" +440 20 evaluator """rankbased""" +440 21 dataset """kinships""" +440 21 model """ntn""" +440 21 loss """softplus""" +440 21 regularizer """no""" +440 21 optimizer """adam""" +440 21 training_loop """owa""" +440 21 negative_sampler """basic""" +440 21 evaluator """rankbased""" +440 22 dataset """kinships""" +440 22 model """ntn""" +440 22 loss """softplus""" +440 22 regularizer """no""" +440 22 optimizer """adam""" +440 22 training_loop """owa""" +440 22 negative_sampler """basic""" +440 22 evaluator """rankbased""" +440 23 dataset """kinships""" +440 23 model """ntn""" +440 23 loss """softplus""" +440 23 regularizer """no""" +440 23 optimizer """adam""" +440 23 training_loop """owa""" +440 23 negative_sampler """basic""" +440 23 evaluator """rankbased""" +440 24 dataset """kinships""" +440 24 model """ntn""" +440 24 loss """softplus""" +440 24 regularizer """no""" +440 24 optimizer """adam""" +440 24 training_loop """owa""" +440 24 negative_sampler """basic""" +440 24 evaluator """rankbased""" +440 25 dataset """kinships""" +440 25 model """ntn""" +440 25 loss """softplus""" +440 25 regularizer """no""" +440 25 optimizer """adam""" +440 25 training_loop """owa""" +440 25 negative_sampler """basic""" +440 25 evaluator """rankbased""" +440 26 dataset """kinships""" +440 26 model """ntn""" +440 26 loss """softplus""" +440 26 regularizer """no""" +440 26 optimizer """adam""" +440 26 training_loop """owa""" +440 26 negative_sampler """basic""" +440 26 evaluator """rankbased""" +440 27 dataset """kinships""" +440 27 model """ntn""" +440 27 loss """softplus""" +440 27 regularizer """no""" +440 27 optimizer """adam""" +440 27 training_loop """owa""" +440 27 negative_sampler """basic""" +440 27 evaluator """rankbased""" +440 28 dataset """kinships""" +440 28 model """ntn""" +440 28 loss """softplus""" +440 28 regularizer """no""" +440 28 optimizer """adam""" +440 28 training_loop """owa""" +440 28 negative_sampler """basic""" +440 28 evaluator """rankbased""" +440 29 dataset """kinships""" +440 29 model """ntn""" +440 29 loss """softplus""" +440 29 regularizer """no""" +440 29 optimizer """adam""" +440 29 training_loop """owa""" +440 29 negative_sampler """basic""" +440 29 evaluator """rankbased""" +440 30 dataset """kinships""" +440 30 model """ntn""" +440 30 loss """softplus""" +440 30 regularizer """no""" +440 30 optimizer """adam""" +440 30 training_loop """owa""" +440 30 negative_sampler """basic""" +440 30 evaluator """rankbased""" +440 31 dataset """kinships""" +440 31 model """ntn""" +440 31 loss """softplus""" +440 31 regularizer """no""" +440 31 optimizer """adam""" +440 31 training_loop """owa""" +440 31 negative_sampler """basic""" +440 31 evaluator """rankbased""" +440 32 dataset """kinships""" +440 32 model """ntn""" +440 32 loss """softplus""" +440 32 regularizer """no""" +440 32 optimizer """adam""" +440 32 training_loop """owa""" +440 32 negative_sampler """basic""" +440 32 evaluator """rankbased""" +440 33 dataset """kinships""" +440 33 model """ntn""" +440 33 loss """softplus""" +440 33 regularizer """no""" +440 33 optimizer """adam""" +440 33 training_loop """owa""" +440 33 negative_sampler """basic""" +440 33 evaluator """rankbased""" +440 34 dataset """kinships""" +440 34 model """ntn""" +440 34 loss """softplus""" +440 34 regularizer """no""" +440 34 optimizer """adam""" +440 34 training_loop """owa""" +440 34 negative_sampler """basic""" +440 34 evaluator """rankbased""" +440 35 dataset """kinships""" +440 35 model """ntn""" +440 35 loss """softplus""" +440 35 regularizer """no""" +440 35 optimizer """adam""" +440 35 training_loop """owa""" +440 35 negative_sampler """basic""" +440 35 evaluator """rankbased""" +440 36 dataset """kinships""" +440 36 model """ntn""" +440 36 loss """softplus""" +440 36 regularizer """no""" +440 36 optimizer """adam""" +440 36 training_loop """owa""" +440 36 negative_sampler """basic""" +440 36 evaluator """rankbased""" +440 37 dataset """kinships""" +440 37 model """ntn""" +440 37 loss """softplus""" +440 37 regularizer """no""" +440 37 optimizer """adam""" +440 37 training_loop """owa""" +440 37 negative_sampler """basic""" +440 37 evaluator """rankbased""" +440 38 dataset """kinships""" +440 38 model """ntn""" +440 38 loss """softplus""" +440 38 regularizer """no""" +440 38 optimizer """adam""" +440 38 training_loop """owa""" +440 38 negative_sampler """basic""" +440 38 evaluator """rankbased""" +440 39 dataset """kinships""" +440 39 model """ntn""" +440 39 loss """softplus""" +440 39 regularizer """no""" +440 39 optimizer """adam""" +440 39 training_loop """owa""" +440 39 negative_sampler """basic""" +440 39 evaluator """rankbased""" +440 40 dataset """kinships""" +440 40 model """ntn""" +440 40 loss """softplus""" +440 40 regularizer """no""" +440 40 optimizer """adam""" +440 40 training_loop """owa""" +440 40 negative_sampler """basic""" +440 40 evaluator """rankbased""" +440 41 dataset """kinships""" +440 41 model """ntn""" +440 41 loss """softplus""" +440 41 regularizer """no""" +440 41 optimizer """adam""" +440 41 training_loop """owa""" +440 41 negative_sampler """basic""" +440 41 evaluator """rankbased""" +440 42 dataset """kinships""" +440 42 model """ntn""" +440 42 loss """softplus""" +440 42 regularizer """no""" +440 42 optimizer """adam""" +440 42 training_loop """owa""" +440 42 negative_sampler """basic""" +440 42 evaluator """rankbased""" +440 43 dataset """kinships""" +440 43 model """ntn""" +440 43 loss """softplus""" +440 43 regularizer """no""" +440 43 optimizer """adam""" +440 43 training_loop """owa""" +440 43 negative_sampler """basic""" +440 43 evaluator """rankbased""" +440 44 dataset """kinships""" +440 44 model """ntn""" +440 44 loss """softplus""" +440 44 regularizer """no""" +440 44 optimizer """adam""" +440 44 training_loop """owa""" +440 44 negative_sampler """basic""" +440 44 evaluator """rankbased""" +440 45 dataset """kinships""" +440 45 model """ntn""" +440 45 loss """softplus""" +440 45 regularizer """no""" +440 45 optimizer """adam""" +440 45 training_loop """owa""" +440 45 negative_sampler """basic""" +440 45 evaluator """rankbased""" +440 46 dataset """kinships""" +440 46 model """ntn""" +440 46 loss """softplus""" +440 46 regularizer """no""" +440 46 optimizer """adam""" +440 46 training_loop """owa""" +440 46 negative_sampler """basic""" +440 46 evaluator """rankbased""" +440 47 dataset """kinships""" +440 47 model """ntn""" +440 47 loss """softplus""" +440 47 regularizer """no""" +440 47 optimizer """adam""" +440 47 training_loop """owa""" +440 47 negative_sampler """basic""" +440 47 evaluator """rankbased""" +440 48 dataset """kinships""" +440 48 model """ntn""" +440 48 loss """softplus""" +440 48 regularizer """no""" +440 48 optimizer """adam""" +440 48 training_loop """owa""" +440 48 negative_sampler """basic""" +440 48 evaluator """rankbased""" +440 49 dataset """kinships""" +440 49 model """ntn""" +440 49 loss """softplus""" +440 49 regularizer """no""" +440 49 optimizer """adam""" +440 49 training_loop """owa""" +440 49 negative_sampler """basic""" +440 49 evaluator """rankbased""" +440 50 dataset """kinships""" +440 50 model """ntn""" +440 50 loss """softplus""" +440 50 regularizer """no""" +440 50 optimizer """adam""" +440 50 training_loop """owa""" +440 50 negative_sampler """basic""" +440 50 evaluator """rankbased""" +440 51 dataset """kinships""" +440 51 model """ntn""" +440 51 loss """softplus""" +440 51 regularizer """no""" +440 51 optimizer """adam""" +440 51 training_loop """owa""" +440 51 negative_sampler """basic""" +440 51 evaluator """rankbased""" +440 52 dataset """kinships""" +440 52 model """ntn""" +440 52 loss """softplus""" +440 52 regularizer """no""" +440 52 optimizer """adam""" +440 52 training_loop """owa""" +440 52 negative_sampler """basic""" +440 52 evaluator """rankbased""" +440 53 dataset """kinships""" +440 53 model """ntn""" +440 53 loss """softplus""" +440 53 regularizer """no""" +440 53 optimizer """adam""" +440 53 training_loop """owa""" +440 53 negative_sampler """basic""" +440 53 evaluator """rankbased""" +440 54 dataset """kinships""" +440 54 model """ntn""" +440 54 loss """softplus""" +440 54 regularizer """no""" +440 54 optimizer """adam""" +440 54 training_loop """owa""" +440 54 negative_sampler """basic""" +440 54 evaluator """rankbased""" +440 55 dataset """kinships""" +440 55 model """ntn""" +440 55 loss """softplus""" +440 55 regularizer """no""" +440 55 optimizer """adam""" +440 55 training_loop """owa""" +440 55 negative_sampler """basic""" +440 55 evaluator """rankbased""" +440 56 dataset """kinships""" +440 56 model """ntn""" +440 56 loss """softplus""" +440 56 regularizer """no""" +440 56 optimizer """adam""" +440 56 training_loop """owa""" +440 56 negative_sampler """basic""" +440 56 evaluator """rankbased""" +440 57 dataset """kinships""" +440 57 model """ntn""" +440 57 loss """softplus""" +440 57 regularizer """no""" +440 57 optimizer """adam""" +440 57 training_loop """owa""" +440 57 negative_sampler """basic""" +440 57 evaluator """rankbased""" +440 58 dataset """kinships""" +440 58 model """ntn""" +440 58 loss """softplus""" +440 58 regularizer """no""" +440 58 optimizer """adam""" +440 58 training_loop """owa""" +440 58 negative_sampler """basic""" +440 58 evaluator """rankbased""" +440 59 dataset """kinships""" +440 59 model """ntn""" +440 59 loss """softplus""" +440 59 regularizer """no""" +440 59 optimizer """adam""" +440 59 training_loop """owa""" +440 59 negative_sampler """basic""" +440 59 evaluator """rankbased""" +440 60 dataset """kinships""" +440 60 model """ntn""" +440 60 loss """softplus""" +440 60 regularizer """no""" +440 60 optimizer """adam""" +440 60 training_loop """owa""" +440 60 negative_sampler """basic""" +440 60 evaluator """rankbased""" +440 61 dataset """kinships""" +440 61 model """ntn""" +440 61 loss """softplus""" +440 61 regularizer """no""" +440 61 optimizer """adam""" +440 61 training_loop """owa""" +440 61 negative_sampler """basic""" +440 61 evaluator """rankbased""" +440 62 dataset """kinships""" +440 62 model """ntn""" +440 62 loss """softplus""" +440 62 regularizer """no""" +440 62 optimizer """adam""" +440 62 training_loop """owa""" +440 62 negative_sampler """basic""" +440 62 evaluator """rankbased""" +440 63 dataset """kinships""" +440 63 model """ntn""" +440 63 loss """softplus""" +440 63 regularizer """no""" +440 63 optimizer """adam""" +440 63 training_loop """owa""" +440 63 negative_sampler """basic""" +440 63 evaluator """rankbased""" +440 64 dataset """kinships""" +440 64 model """ntn""" +440 64 loss """softplus""" +440 64 regularizer """no""" +440 64 optimizer """adam""" +440 64 training_loop """owa""" +440 64 negative_sampler """basic""" +440 64 evaluator """rankbased""" +440 65 dataset """kinships""" +440 65 model """ntn""" +440 65 loss """softplus""" +440 65 regularizer """no""" +440 65 optimizer """adam""" +440 65 training_loop """owa""" +440 65 negative_sampler """basic""" +440 65 evaluator """rankbased""" +440 66 dataset """kinships""" +440 66 model """ntn""" +440 66 loss """softplus""" +440 66 regularizer """no""" +440 66 optimizer """adam""" +440 66 training_loop """owa""" +440 66 negative_sampler """basic""" +440 66 evaluator """rankbased""" +440 67 dataset """kinships""" +440 67 model """ntn""" +440 67 loss """softplus""" +440 67 regularizer """no""" +440 67 optimizer """adam""" +440 67 training_loop """owa""" +440 67 negative_sampler """basic""" +440 67 evaluator """rankbased""" +440 68 dataset """kinships""" +440 68 model """ntn""" +440 68 loss """softplus""" +440 68 regularizer """no""" +440 68 optimizer """adam""" +440 68 training_loop """owa""" +440 68 negative_sampler """basic""" +440 68 evaluator """rankbased""" +440 69 dataset """kinships""" +440 69 model """ntn""" +440 69 loss """softplus""" +440 69 regularizer """no""" +440 69 optimizer """adam""" +440 69 training_loop """owa""" +440 69 negative_sampler """basic""" +440 69 evaluator """rankbased""" +440 70 dataset """kinships""" +440 70 model """ntn""" +440 70 loss """softplus""" +440 70 regularizer """no""" +440 70 optimizer """adam""" +440 70 training_loop """owa""" +440 70 negative_sampler """basic""" +440 70 evaluator """rankbased""" +440 71 dataset """kinships""" +440 71 model """ntn""" +440 71 loss """softplus""" +440 71 regularizer """no""" +440 71 optimizer """adam""" +440 71 training_loop """owa""" +440 71 negative_sampler """basic""" +440 71 evaluator """rankbased""" +440 72 dataset """kinships""" +440 72 model """ntn""" +440 72 loss """softplus""" +440 72 regularizer """no""" +440 72 optimizer """adam""" +440 72 training_loop """owa""" +440 72 negative_sampler """basic""" +440 72 evaluator """rankbased""" +440 73 dataset """kinships""" +440 73 model """ntn""" +440 73 loss """softplus""" +440 73 regularizer """no""" +440 73 optimizer """adam""" +440 73 training_loop """owa""" +440 73 negative_sampler """basic""" +440 73 evaluator """rankbased""" +440 74 dataset """kinships""" +440 74 model """ntn""" +440 74 loss """softplus""" +440 74 regularizer """no""" +440 74 optimizer """adam""" +440 74 training_loop """owa""" +440 74 negative_sampler """basic""" +440 74 evaluator """rankbased""" +440 75 dataset """kinships""" +440 75 model """ntn""" +440 75 loss """softplus""" +440 75 regularizer """no""" +440 75 optimizer """adam""" +440 75 training_loop """owa""" +440 75 negative_sampler """basic""" +440 75 evaluator """rankbased""" +440 76 dataset """kinships""" +440 76 model """ntn""" +440 76 loss """softplus""" +440 76 regularizer """no""" +440 76 optimizer """adam""" +440 76 training_loop """owa""" +440 76 negative_sampler """basic""" +440 76 evaluator """rankbased""" +440 77 dataset """kinships""" +440 77 model """ntn""" +440 77 loss """softplus""" +440 77 regularizer """no""" +440 77 optimizer """adam""" +440 77 training_loop """owa""" +440 77 negative_sampler """basic""" +440 77 evaluator """rankbased""" +440 78 dataset """kinships""" +440 78 model """ntn""" +440 78 loss """softplus""" +440 78 regularizer """no""" +440 78 optimizer """adam""" +440 78 training_loop """owa""" +440 78 negative_sampler """basic""" +440 78 evaluator """rankbased""" +440 79 dataset """kinships""" +440 79 model """ntn""" +440 79 loss """softplus""" +440 79 regularizer """no""" +440 79 optimizer """adam""" +440 79 training_loop """owa""" +440 79 negative_sampler """basic""" +440 79 evaluator """rankbased""" +440 80 dataset """kinships""" +440 80 model """ntn""" +440 80 loss """softplus""" +440 80 regularizer """no""" +440 80 optimizer """adam""" +440 80 training_loop """owa""" +440 80 negative_sampler """basic""" +440 80 evaluator """rankbased""" +440 81 dataset """kinships""" +440 81 model """ntn""" +440 81 loss """softplus""" +440 81 regularizer """no""" +440 81 optimizer """adam""" +440 81 training_loop """owa""" +440 81 negative_sampler """basic""" +440 81 evaluator """rankbased""" +440 82 dataset """kinships""" +440 82 model """ntn""" +440 82 loss """softplus""" +440 82 regularizer """no""" +440 82 optimizer """adam""" +440 82 training_loop """owa""" +440 82 negative_sampler """basic""" +440 82 evaluator """rankbased""" +440 83 dataset """kinships""" +440 83 model """ntn""" +440 83 loss """softplus""" +440 83 regularizer """no""" +440 83 optimizer """adam""" +440 83 training_loop """owa""" +440 83 negative_sampler """basic""" +440 83 evaluator """rankbased""" +440 84 dataset """kinships""" +440 84 model """ntn""" +440 84 loss """softplus""" +440 84 regularizer """no""" +440 84 optimizer """adam""" +440 84 training_loop """owa""" +440 84 negative_sampler """basic""" +440 84 evaluator """rankbased""" +440 85 dataset """kinships""" +440 85 model """ntn""" +440 85 loss """softplus""" +440 85 regularizer """no""" +440 85 optimizer """adam""" +440 85 training_loop """owa""" +440 85 negative_sampler """basic""" +440 85 evaluator """rankbased""" +440 86 dataset """kinships""" +440 86 model """ntn""" +440 86 loss """softplus""" +440 86 regularizer """no""" +440 86 optimizer """adam""" +440 86 training_loop """owa""" +440 86 negative_sampler """basic""" +440 86 evaluator """rankbased""" +440 87 dataset """kinships""" +440 87 model """ntn""" +440 87 loss """softplus""" +440 87 regularizer """no""" +440 87 optimizer """adam""" +440 87 training_loop """owa""" +440 87 negative_sampler """basic""" +440 87 evaluator """rankbased""" +440 88 dataset """kinships""" +440 88 model """ntn""" +440 88 loss """softplus""" +440 88 regularizer """no""" +440 88 optimizer """adam""" +440 88 training_loop """owa""" +440 88 negative_sampler """basic""" +440 88 evaluator """rankbased""" +440 89 dataset """kinships""" +440 89 model """ntn""" +440 89 loss """softplus""" +440 89 regularizer """no""" +440 89 optimizer """adam""" +440 89 training_loop """owa""" +440 89 negative_sampler """basic""" +440 89 evaluator """rankbased""" +440 90 dataset """kinships""" +440 90 model """ntn""" +440 90 loss """softplus""" +440 90 regularizer """no""" +440 90 optimizer """adam""" +440 90 training_loop """owa""" +440 90 negative_sampler """basic""" +440 90 evaluator """rankbased""" +440 91 dataset """kinships""" +440 91 model """ntn""" +440 91 loss """softplus""" +440 91 regularizer """no""" +440 91 optimizer """adam""" +440 91 training_loop """owa""" +440 91 negative_sampler """basic""" +440 91 evaluator """rankbased""" +440 92 dataset """kinships""" +440 92 model """ntn""" +440 92 loss """softplus""" +440 92 regularizer """no""" +440 92 optimizer """adam""" +440 92 training_loop """owa""" +440 92 negative_sampler """basic""" +440 92 evaluator """rankbased""" +440 93 dataset """kinships""" +440 93 model """ntn""" +440 93 loss """softplus""" +440 93 regularizer """no""" +440 93 optimizer """adam""" +440 93 training_loop """owa""" +440 93 negative_sampler """basic""" +440 93 evaluator """rankbased""" +440 94 dataset """kinships""" +440 94 model """ntn""" +440 94 loss """softplus""" +440 94 regularizer """no""" +440 94 optimizer """adam""" +440 94 training_loop """owa""" +440 94 negative_sampler """basic""" +440 94 evaluator """rankbased""" +440 95 dataset """kinships""" +440 95 model """ntn""" +440 95 loss """softplus""" +440 95 regularizer """no""" +440 95 optimizer """adam""" +440 95 training_loop """owa""" +440 95 negative_sampler """basic""" +440 95 evaluator """rankbased""" +440 96 dataset """kinships""" +440 96 model """ntn""" +440 96 loss """softplus""" +440 96 regularizer """no""" +440 96 optimizer """adam""" +440 96 training_loop """owa""" +440 96 negative_sampler """basic""" +440 96 evaluator """rankbased""" +440 97 dataset """kinships""" +440 97 model """ntn""" +440 97 loss """softplus""" +440 97 regularizer """no""" +440 97 optimizer """adam""" +440 97 training_loop """owa""" +440 97 negative_sampler """basic""" +440 97 evaluator """rankbased""" +440 98 dataset """kinships""" +440 98 model """ntn""" +440 98 loss """softplus""" +440 98 regularizer """no""" +440 98 optimizer """adam""" +440 98 training_loop """owa""" +440 98 negative_sampler """basic""" +440 98 evaluator """rankbased""" +440 99 dataset """kinships""" +440 99 model """ntn""" +440 99 loss """softplus""" +440 99 regularizer """no""" +440 99 optimizer """adam""" +440 99 training_loop """owa""" +440 99 negative_sampler """basic""" +440 99 evaluator """rankbased""" +440 100 dataset """kinships""" +440 100 model """ntn""" +440 100 loss """softplus""" +440 100 regularizer """no""" +440 100 optimizer """adam""" +440 100 training_loop """owa""" +440 100 negative_sampler """basic""" +440 100 evaluator """rankbased""" +441 1 model.embedding_dim 1.0 +441 1 loss.margin 4.383456865147848 +441 1 loss.adversarial_temperature 0.4400424905132265 +441 1 optimizer.lr 0.0023410665014244932 +441 1 negative_sampler.num_negs_per_pos 68.0 +441 1 training.batch_size 1.0 +441 2 model.embedding_dim 2.0 +441 2 loss.margin 4.465891359360009 +441 2 loss.adversarial_temperature 0.9328634394347814 +441 2 optimizer.lr 0.010089052237173123 +441 2 negative_sampler.num_negs_per_pos 52.0 +441 2 training.batch_size 2.0 +441 3 model.embedding_dim 1.0 +441 3 loss.margin 16.632655192542856 +441 3 loss.adversarial_temperature 0.19771960719964896 +441 3 optimizer.lr 0.02089380314121694 +441 3 negative_sampler.num_negs_per_pos 84.0 +441 3 training.batch_size 2.0 +441 4 model.embedding_dim 2.0 +441 4 loss.margin 2.354459297766628 +441 4 loss.adversarial_temperature 0.9357460369088109 +441 4 optimizer.lr 0.09357639051457861 +441 4 negative_sampler.num_negs_per_pos 67.0 +441 4 training.batch_size 2.0 +441 5 model.embedding_dim 2.0 +441 5 loss.margin 29.611939920185858 +441 5 loss.adversarial_temperature 0.9594278751939967 +441 5 optimizer.lr 0.06414269818822681 +441 5 negative_sampler.num_negs_per_pos 85.0 +441 5 training.batch_size 0.0 +441 6 model.embedding_dim 0.0 +441 6 loss.margin 8.373871185134522 +441 6 loss.adversarial_temperature 0.896746646122869 +441 6 optimizer.lr 0.07021222673896028 +441 6 negative_sampler.num_negs_per_pos 21.0 +441 6 training.batch_size 2.0 +441 7 model.embedding_dim 0.0 +441 7 loss.margin 25.887875753825618 +441 7 loss.adversarial_temperature 0.17221554834299407 +441 7 optimizer.lr 0.0021830475805706363 +441 7 negative_sampler.num_negs_per_pos 48.0 +441 7 training.batch_size 0.0 +441 8 model.embedding_dim 1.0 +441 8 loss.margin 1.3889878635434683 +441 8 loss.adversarial_temperature 0.9404856473084625 +441 8 optimizer.lr 0.03427225885212048 +441 8 negative_sampler.num_negs_per_pos 36.0 +441 8 training.batch_size 0.0 +441 9 model.embedding_dim 0.0 +441 9 loss.margin 3.1148539008412652 +441 9 loss.adversarial_temperature 0.652982775664127 +441 9 optimizer.lr 0.0018232037683241157 +441 9 negative_sampler.num_negs_per_pos 89.0 +441 9 training.batch_size 1.0 +441 10 model.embedding_dim 0.0 +441 10 loss.margin 28.224865270125193 +441 10 loss.adversarial_temperature 0.3292433628377281 +441 10 optimizer.lr 0.05881346117707407 +441 10 negative_sampler.num_negs_per_pos 22.0 +441 10 training.batch_size 2.0 +441 11 model.embedding_dim 0.0 +441 11 loss.margin 5.797815401575759 +441 11 loss.adversarial_temperature 0.4590685219943269 +441 11 optimizer.lr 0.007131763963095264 +441 11 negative_sampler.num_negs_per_pos 30.0 +441 11 training.batch_size 2.0 +441 12 model.embedding_dim 2.0 +441 12 loss.margin 24.77442321768994 +441 12 loss.adversarial_temperature 0.4314924930533174 +441 12 optimizer.lr 0.0054647262381616905 +441 12 negative_sampler.num_negs_per_pos 4.0 +441 12 training.batch_size 1.0 +441 13 model.embedding_dim 0.0 +441 13 loss.margin 1.9523469692486564 +441 13 loss.adversarial_temperature 0.6256416740240696 +441 13 optimizer.lr 0.04062101898801164 +441 13 negative_sampler.num_negs_per_pos 87.0 +441 13 training.batch_size 0.0 +441 14 model.embedding_dim 0.0 +441 14 loss.margin 14.30857095927157 +441 14 loss.adversarial_temperature 0.46656275076545894 +441 14 optimizer.lr 0.003366263982173454 +441 14 negative_sampler.num_negs_per_pos 22.0 +441 14 training.batch_size 2.0 +441 15 model.embedding_dim 1.0 +441 15 loss.margin 2.6853401241654993 +441 15 loss.adversarial_temperature 0.9634032813827457 +441 15 optimizer.lr 0.06254152911983617 +441 15 negative_sampler.num_negs_per_pos 88.0 +441 15 training.batch_size 2.0 +441 16 model.embedding_dim 1.0 +441 16 loss.margin 25.51797667544831 +441 16 loss.adversarial_temperature 0.2894175967631718 +441 16 optimizer.lr 0.09927860262418547 +441 16 negative_sampler.num_negs_per_pos 0.0 +441 16 training.batch_size 0.0 +441 17 model.embedding_dim 0.0 +441 17 loss.margin 12.822864545710326 +441 17 loss.adversarial_temperature 0.24506353542046516 +441 17 optimizer.lr 0.08473001533697744 +441 17 negative_sampler.num_negs_per_pos 37.0 +441 17 training.batch_size 1.0 +441 18 model.embedding_dim 1.0 +441 18 loss.margin 29.75559201020283 +441 18 loss.adversarial_temperature 0.4373739351419199 +441 18 optimizer.lr 0.014674767796617935 +441 18 negative_sampler.num_negs_per_pos 24.0 +441 18 training.batch_size 2.0 +441 19 model.embedding_dim 2.0 +441 19 loss.margin 1.600747272984063 +441 19 loss.adversarial_temperature 0.3653639713828175 +441 19 optimizer.lr 0.0011410503996230974 +441 19 negative_sampler.num_negs_per_pos 1.0 +441 19 training.batch_size 1.0 +441 20 model.embedding_dim 0.0 +441 20 loss.margin 15.086681491033858 +441 20 loss.adversarial_temperature 0.8358834741399742 +441 20 optimizer.lr 0.011315520342107867 +441 20 negative_sampler.num_negs_per_pos 67.0 +441 20 training.batch_size 1.0 +441 21 model.embedding_dim 2.0 +441 21 loss.margin 4.742343316622678 +441 21 loss.adversarial_temperature 0.3837825279004108 +441 21 optimizer.lr 0.005255575035444855 +441 21 negative_sampler.num_negs_per_pos 41.0 +441 21 training.batch_size 2.0 +441 22 model.embedding_dim 2.0 +441 22 loss.margin 24.7518874961045 +441 22 loss.adversarial_temperature 0.19486884122100817 +441 22 optimizer.lr 0.003944516114470629 +441 22 negative_sampler.num_negs_per_pos 28.0 +441 22 training.batch_size 1.0 +441 23 model.embedding_dim 0.0 +441 23 loss.margin 8.367413243945165 +441 23 loss.adversarial_temperature 0.6995932415875982 +441 23 optimizer.lr 0.006297141085450807 +441 23 negative_sampler.num_negs_per_pos 15.0 +441 23 training.batch_size 2.0 +441 24 model.embedding_dim 0.0 +441 24 loss.margin 2.547058300024552 +441 24 loss.adversarial_temperature 0.2609321247356585 +441 24 optimizer.lr 0.02640123586557679 +441 24 negative_sampler.num_negs_per_pos 62.0 +441 24 training.batch_size 0.0 +441 25 model.embedding_dim 0.0 +441 25 loss.margin 14.763629862358428 +441 25 loss.adversarial_temperature 0.925278833693659 +441 25 optimizer.lr 0.0031496313464883037 +441 25 negative_sampler.num_negs_per_pos 34.0 +441 25 training.batch_size 2.0 +441 26 model.embedding_dim 2.0 +441 26 loss.margin 29.6906699066656 +441 26 loss.adversarial_temperature 0.6431547532555946 +441 26 optimizer.lr 0.00416712328624679 +441 26 negative_sampler.num_negs_per_pos 52.0 +441 26 training.batch_size 1.0 +441 27 model.embedding_dim 2.0 +441 27 loss.margin 24.553597221092218 +441 27 loss.adversarial_temperature 0.8613631750283869 +441 27 optimizer.lr 0.033292636781692794 +441 27 negative_sampler.num_negs_per_pos 17.0 +441 27 training.batch_size 0.0 +441 28 model.embedding_dim 1.0 +441 28 loss.margin 24.00869729913572 +441 28 loss.adversarial_temperature 0.4519594584679193 +441 28 optimizer.lr 0.0016498760367809572 +441 28 negative_sampler.num_negs_per_pos 58.0 +441 28 training.batch_size 1.0 +441 29 model.embedding_dim 2.0 +441 29 loss.margin 17.96926587047676 +441 29 loss.adversarial_temperature 0.783986278935628 +441 29 optimizer.lr 0.010501591261216879 +441 29 negative_sampler.num_negs_per_pos 3.0 +441 29 training.batch_size 0.0 +441 30 model.embedding_dim 2.0 +441 30 loss.margin 25.56996656877906 +441 30 loss.adversarial_temperature 0.3021543971810305 +441 30 optimizer.lr 0.009543989740701095 +441 30 negative_sampler.num_negs_per_pos 46.0 +441 30 training.batch_size 0.0 +441 31 model.embedding_dim 0.0 +441 31 loss.margin 22.87934647473042 +441 31 loss.adversarial_temperature 0.9962950520080234 +441 31 optimizer.lr 0.01920314177986606 +441 31 negative_sampler.num_negs_per_pos 2.0 +441 31 training.batch_size 0.0 +441 32 model.embedding_dim 2.0 +441 32 loss.margin 27.07414515004543 +441 32 loss.adversarial_temperature 0.2660622859365034 +441 32 optimizer.lr 0.002031990189262607 +441 32 negative_sampler.num_negs_per_pos 76.0 +441 32 training.batch_size 0.0 +441 33 model.embedding_dim 1.0 +441 33 loss.margin 24.171669156949903 +441 33 loss.adversarial_temperature 0.931308219646278 +441 33 optimizer.lr 0.010488810594588786 +441 33 negative_sampler.num_negs_per_pos 6.0 +441 33 training.batch_size 1.0 +441 34 model.embedding_dim 2.0 +441 34 loss.margin 19.690936935503213 +441 34 loss.adversarial_temperature 0.442183067547822 +441 34 optimizer.lr 0.007010354416801005 +441 34 negative_sampler.num_negs_per_pos 65.0 +441 34 training.batch_size 0.0 +441 35 model.embedding_dim 0.0 +441 35 loss.margin 9.008525893271733 +441 35 loss.adversarial_temperature 0.8674894106510427 +441 35 optimizer.lr 0.05793827626690036 +441 35 negative_sampler.num_negs_per_pos 48.0 +441 35 training.batch_size 0.0 +441 36 model.embedding_dim 0.0 +441 36 loss.margin 12.2963905530461 +441 36 loss.adversarial_temperature 0.35227291717698783 +441 36 optimizer.lr 0.002753962770128939 +441 36 negative_sampler.num_negs_per_pos 17.0 +441 36 training.batch_size 0.0 +441 37 model.embedding_dim 1.0 +441 37 loss.margin 2.5180765761593684 +441 37 loss.adversarial_temperature 0.6315413207134883 +441 37 optimizer.lr 0.003757057208566662 +441 37 negative_sampler.num_negs_per_pos 10.0 +441 37 training.batch_size 0.0 +441 38 model.embedding_dim 1.0 +441 38 loss.margin 25.389330296615896 +441 38 loss.adversarial_temperature 0.6612442378750036 +441 38 optimizer.lr 0.0197260808623941 +441 38 negative_sampler.num_negs_per_pos 70.0 +441 38 training.batch_size 1.0 +441 39 model.embedding_dim 0.0 +441 39 loss.margin 25.81592244528895 +441 39 loss.adversarial_temperature 0.3960169400185414 +441 39 optimizer.lr 0.08468683811005649 +441 39 negative_sampler.num_negs_per_pos 43.0 +441 39 training.batch_size 2.0 +441 40 model.embedding_dim 2.0 +441 40 loss.margin 4.484489808038571 +441 40 loss.adversarial_temperature 0.9352804132283132 +441 40 optimizer.lr 0.01878962560761085 +441 40 negative_sampler.num_negs_per_pos 72.0 +441 40 training.batch_size 1.0 +441 41 model.embedding_dim 2.0 +441 41 loss.margin 7.575336176016084 +441 41 loss.adversarial_temperature 0.5446043944791372 +441 41 optimizer.lr 0.03243435568334114 +441 41 negative_sampler.num_negs_per_pos 49.0 +441 41 training.batch_size 1.0 +441 42 model.embedding_dim 2.0 +441 42 loss.margin 28.01973206895326 +441 42 loss.adversarial_temperature 0.39688021509833593 +441 42 optimizer.lr 0.014925214785924177 +441 42 negative_sampler.num_negs_per_pos 28.0 +441 42 training.batch_size 2.0 +441 43 model.embedding_dim 1.0 +441 43 loss.margin 1.5716295866458265 +441 43 loss.adversarial_temperature 0.9680231430772122 +441 43 optimizer.lr 0.01094626050684152 +441 43 negative_sampler.num_negs_per_pos 86.0 +441 43 training.batch_size 1.0 +441 44 model.embedding_dim 0.0 +441 44 loss.margin 5.804956400224688 +441 44 loss.adversarial_temperature 0.7110618003010886 +441 44 optimizer.lr 0.014942363909135177 +441 44 negative_sampler.num_negs_per_pos 73.0 +441 44 training.batch_size 2.0 +441 45 model.embedding_dim 0.0 +441 45 loss.margin 4.588780623162825 +441 45 loss.adversarial_temperature 0.4476352155292045 +441 45 optimizer.lr 0.020306602030566712 +441 45 negative_sampler.num_negs_per_pos 48.0 +441 45 training.batch_size 1.0 +441 46 model.embedding_dim 0.0 +441 46 loss.margin 3.97323254747692 +441 46 loss.adversarial_temperature 0.9247938369505916 +441 46 optimizer.lr 0.028359538976361565 +441 46 negative_sampler.num_negs_per_pos 66.0 +441 46 training.batch_size 1.0 +441 47 model.embedding_dim 0.0 +441 47 loss.margin 20.33861009545391 +441 47 loss.adversarial_temperature 0.11638832819176471 +441 47 optimizer.lr 0.029096677676818186 +441 47 negative_sampler.num_negs_per_pos 85.0 +441 47 training.batch_size 0.0 +441 1 dataset """kinships""" +441 1 model """ntn""" +441 1 loss """nssa""" +441 1 regularizer """no""" +441 1 optimizer """adam""" +441 1 training_loop """owa""" +441 1 negative_sampler """basic""" +441 1 evaluator """rankbased""" +441 2 dataset """kinships""" +441 2 model """ntn""" +441 2 loss """nssa""" +441 2 regularizer """no""" +441 2 optimizer """adam""" +441 2 training_loop """owa""" +441 2 negative_sampler """basic""" +441 2 evaluator """rankbased""" +441 3 dataset """kinships""" +441 3 model """ntn""" +441 3 loss """nssa""" +441 3 regularizer """no""" +441 3 optimizer """adam""" +441 3 training_loop """owa""" +441 3 negative_sampler """basic""" +441 3 evaluator """rankbased""" +441 4 dataset """kinships""" +441 4 model """ntn""" +441 4 loss """nssa""" +441 4 regularizer """no""" +441 4 optimizer """adam""" +441 4 training_loop """owa""" +441 4 negative_sampler """basic""" +441 4 evaluator """rankbased""" +441 5 dataset """kinships""" +441 5 model """ntn""" +441 5 loss """nssa""" +441 5 regularizer """no""" +441 5 optimizer """adam""" +441 5 training_loop """owa""" +441 5 negative_sampler """basic""" +441 5 evaluator """rankbased""" +441 6 dataset """kinships""" +441 6 model """ntn""" +441 6 loss """nssa""" +441 6 regularizer """no""" +441 6 optimizer """adam""" +441 6 training_loop """owa""" +441 6 negative_sampler """basic""" +441 6 evaluator """rankbased""" +441 7 dataset """kinships""" +441 7 model """ntn""" +441 7 loss """nssa""" +441 7 regularizer """no""" +441 7 optimizer """adam""" +441 7 training_loop """owa""" +441 7 negative_sampler """basic""" +441 7 evaluator """rankbased""" +441 8 dataset """kinships""" +441 8 model """ntn""" +441 8 loss """nssa""" +441 8 regularizer """no""" +441 8 optimizer """adam""" +441 8 training_loop """owa""" +441 8 negative_sampler """basic""" +441 8 evaluator """rankbased""" +441 9 dataset """kinships""" +441 9 model """ntn""" +441 9 loss """nssa""" +441 9 regularizer """no""" +441 9 optimizer """adam""" +441 9 training_loop """owa""" +441 9 negative_sampler """basic""" +441 9 evaluator """rankbased""" +441 10 dataset """kinships""" +441 10 model """ntn""" +441 10 loss """nssa""" +441 10 regularizer """no""" +441 10 optimizer """adam""" +441 10 training_loop """owa""" +441 10 negative_sampler """basic""" +441 10 evaluator """rankbased""" +441 11 dataset """kinships""" +441 11 model """ntn""" +441 11 loss """nssa""" +441 11 regularizer """no""" +441 11 optimizer """adam""" +441 11 training_loop """owa""" +441 11 negative_sampler """basic""" +441 11 evaluator """rankbased""" +441 12 dataset """kinships""" +441 12 model """ntn""" +441 12 loss """nssa""" +441 12 regularizer """no""" +441 12 optimizer """adam""" +441 12 training_loop """owa""" +441 12 negative_sampler """basic""" +441 12 evaluator """rankbased""" +441 13 dataset """kinships""" +441 13 model """ntn""" +441 13 loss """nssa""" +441 13 regularizer """no""" +441 13 optimizer """adam""" +441 13 training_loop """owa""" +441 13 negative_sampler """basic""" +441 13 evaluator """rankbased""" +441 14 dataset """kinships""" +441 14 model """ntn""" +441 14 loss """nssa""" +441 14 regularizer """no""" +441 14 optimizer """adam""" +441 14 training_loop """owa""" +441 14 negative_sampler """basic""" +441 14 evaluator """rankbased""" +441 15 dataset """kinships""" +441 15 model """ntn""" +441 15 loss """nssa""" +441 15 regularizer """no""" +441 15 optimizer """adam""" +441 15 training_loop """owa""" +441 15 negative_sampler """basic""" +441 15 evaluator """rankbased""" +441 16 dataset """kinships""" +441 16 model """ntn""" +441 16 loss """nssa""" +441 16 regularizer """no""" +441 16 optimizer """adam""" +441 16 training_loop """owa""" +441 16 negative_sampler """basic""" +441 16 evaluator """rankbased""" +441 17 dataset """kinships""" +441 17 model """ntn""" +441 17 loss """nssa""" +441 17 regularizer """no""" +441 17 optimizer """adam""" +441 17 training_loop """owa""" +441 17 negative_sampler """basic""" +441 17 evaluator """rankbased""" +441 18 dataset """kinships""" +441 18 model """ntn""" +441 18 loss """nssa""" +441 18 regularizer """no""" +441 18 optimizer """adam""" +441 18 training_loop """owa""" +441 18 negative_sampler """basic""" +441 18 evaluator """rankbased""" +441 19 dataset """kinships""" +441 19 model """ntn""" +441 19 loss """nssa""" +441 19 regularizer """no""" +441 19 optimizer """adam""" +441 19 training_loop """owa""" +441 19 negative_sampler """basic""" +441 19 evaluator """rankbased""" +441 20 dataset """kinships""" +441 20 model """ntn""" +441 20 loss """nssa""" +441 20 regularizer """no""" +441 20 optimizer """adam""" +441 20 training_loop """owa""" +441 20 negative_sampler """basic""" +441 20 evaluator """rankbased""" +441 21 dataset """kinships""" +441 21 model """ntn""" +441 21 loss """nssa""" +441 21 regularizer """no""" +441 21 optimizer """adam""" +441 21 training_loop """owa""" +441 21 negative_sampler """basic""" +441 21 evaluator """rankbased""" +441 22 dataset """kinships""" +441 22 model """ntn""" +441 22 loss """nssa""" +441 22 regularizer """no""" +441 22 optimizer """adam""" +441 22 training_loop """owa""" +441 22 negative_sampler """basic""" +441 22 evaluator """rankbased""" +441 23 dataset """kinships""" +441 23 model """ntn""" +441 23 loss """nssa""" +441 23 regularizer """no""" +441 23 optimizer """adam""" +441 23 training_loop """owa""" +441 23 negative_sampler """basic""" +441 23 evaluator """rankbased""" +441 24 dataset """kinships""" +441 24 model """ntn""" +441 24 loss """nssa""" +441 24 regularizer """no""" +441 24 optimizer """adam""" +441 24 training_loop """owa""" +441 24 negative_sampler """basic""" +441 24 evaluator """rankbased""" +441 25 dataset """kinships""" +441 25 model """ntn""" +441 25 loss """nssa""" +441 25 regularizer """no""" +441 25 optimizer """adam""" +441 25 training_loop """owa""" +441 25 negative_sampler """basic""" +441 25 evaluator """rankbased""" +441 26 dataset """kinships""" +441 26 model """ntn""" +441 26 loss """nssa""" +441 26 regularizer """no""" +441 26 optimizer """adam""" +441 26 training_loop """owa""" +441 26 negative_sampler """basic""" +441 26 evaluator """rankbased""" +441 27 dataset """kinships""" +441 27 model """ntn""" +441 27 loss """nssa""" +441 27 regularizer """no""" +441 27 optimizer """adam""" +441 27 training_loop """owa""" +441 27 negative_sampler """basic""" +441 27 evaluator """rankbased""" +441 28 dataset """kinships""" +441 28 model """ntn""" +441 28 loss """nssa""" +441 28 regularizer """no""" +441 28 optimizer """adam""" +441 28 training_loop """owa""" +441 28 negative_sampler """basic""" +441 28 evaluator """rankbased""" +441 29 dataset """kinships""" +441 29 model """ntn""" +441 29 loss """nssa""" +441 29 regularizer """no""" +441 29 optimizer """adam""" +441 29 training_loop """owa""" +441 29 negative_sampler """basic""" +441 29 evaluator """rankbased""" +441 30 dataset """kinships""" +441 30 model """ntn""" +441 30 loss """nssa""" +441 30 regularizer """no""" +441 30 optimizer """adam""" +441 30 training_loop """owa""" +441 30 negative_sampler """basic""" +441 30 evaluator """rankbased""" +441 31 dataset """kinships""" +441 31 model """ntn""" +441 31 loss """nssa""" +441 31 regularizer """no""" +441 31 optimizer """adam""" +441 31 training_loop """owa""" +441 31 negative_sampler """basic""" +441 31 evaluator """rankbased""" +441 32 dataset """kinships""" +441 32 model """ntn""" +441 32 loss """nssa""" +441 32 regularizer """no""" +441 32 optimizer """adam""" +441 32 training_loop """owa""" +441 32 negative_sampler """basic""" +441 32 evaluator """rankbased""" +441 33 dataset """kinships""" +441 33 model """ntn""" +441 33 loss """nssa""" +441 33 regularizer """no""" +441 33 optimizer """adam""" +441 33 training_loop """owa""" +441 33 negative_sampler """basic""" +441 33 evaluator """rankbased""" +441 34 dataset """kinships""" +441 34 model """ntn""" +441 34 loss """nssa""" +441 34 regularizer """no""" +441 34 optimizer """adam""" +441 34 training_loop """owa""" +441 34 negative_sampler """basic""" +441 34 evaluator """rankbased""" +441 35 dataset """kinships""" +441 35 model """ntn""" +441 35 loss """nssa""" +441 35 regularizer """no""" +441 35 optimizer """adam""" +441 35 training_loop """owa""" +441 35 negative_sampler """basic""" +441 35 evaluator """rankbased""" +441 36 dataset """kinships""" +441 36 model """ntn""" +441 36 loss """nssa""" +441 36 regularizer """no""" +441 36 optimizer """adam""" +441 36 training_loop """owa""" +441 36 negative_sampler """basic""" +441 36 evaluator """rankbased""" +441 37 dataset """kinships""" +441 37 model """ntn""" +441 37 loss """nssa""" +441 37 regularizer """no""" +441 37 optimizer """adam""" +441 37 training_loop """owa""" +441 37 negative_sampler """basic""" +441 37 evaluator """rankbased""" +441 38 dataset """kinships""" +441 38 model """ntn""" +441 38 loss """nssa""" +441 38 regularizer """no""" +441 38 optimizer """adam""" +441 38 training_loop """owa""" +441 38 negative_sampler """basic""" +441 38 evaluator """rankbased""" +441 39 dataset """kinships""" +441 39 model """ntn""" +441 39 loss """nssa""" +441 39 regularizer """no""" +441 39 optimizer """adam""" +441 39 training_loop """owa""" +441 39 negative_sampler """basic""" +441 39 evaluator """rankbased""" +441 40 dataset """kinships""" +441 40 model """ntn""" +441 40 loss """nssa""" +441 40 regularizer """no""" +441 40 optimizer """adam""" +441 40 training_loop """owa""" +441 40 negative_sampler """basic""" +441 40 evaluator """rankbased""" +441 41 dataset """kinships""" +441 41 model """ntn""" +441 41 loss """nssa""" +441 41 regularizer """no""" +441 41 optimizer """adam""" +441 41 training_loop """owa""" +441 41 negative_sampler """basic""" +441 41 evaluator """rankbased""" +441 42 dataset """kinships""" +441 42 model """ntn""" +441 42 loss """nssa""" +441 42 regularizer """no""" +441 42 optimizer """adam""" +441 42 training_loop """owa""" +441 42 negative_sampler """basic""" +441 42 evaluator """rankbased""" +441 43 dataset """kinships""" +441 43 model """ntn""" +441 43 loss """nssa""" +441 43 regularizer """no""" +441 43 optimizer """adam""" +441 43 training_loop """owa""" +441 43 negative_sampler """basic""" +441 43 evaluator """rankbased""" +441 44 dataset """kinships""" +441 44 model """ntn""" +441 44 loss """nssa""" +441 44 regularizer """no""" +441 44 optimizer """adam""" +441 44 training_loop """owa""" +441 44 negative_sampler """basic""" +441 44 evaluator """rankbased""" +441 45 dataset """kinships""" +441 45 model """ntn""" +441 45 loss """nssa""" +441 45 regularizer """no""" +441 45 optimizer """adam""" +441 45 training_loop """owa""" +441 45 negative_sampler """basic""" +441 45 evaluator """rankbased""" +441 46 dataset """kinships""" +441 46 model """ntn""" +441 46 loss """nssa""" +441 46 regularizer """no""" +441 46 optimizer """adam""" +441 46 training_loop """owa""" +441 46 negative_sampler """basic""" +441 46 evaluator """rankbased""" +441 47 dataset """kinships""" +441 47 model """ntn""" +441 47 loss """nssa""" +441 47 regularizer """no""" +441 47 optimizer """adam""" +441 47 training_loop """owa""" +441 47 negative_sampler """basic""" +441 47 evaluator """rankbased""" +442 1 model.embedding_dim 2.0 +442 1 loss.margin 21.120272976577645 +442 1 loss.adversarial_temperature 0.351843949076345 +442 1 optimizer.lr 0.0025785701745235706 +442 1 negative_sampler.num_negs_per_pos 68.0 +442 1 training.batch_size 0.0 +442 2 model.embedding_dim 2.0 +442 2 loss.margin 27.822251451136424 +442 2 loss.adversarial_temperature 0.41211496240552625 +442 2 optimizer.lr 0.015387035644343652 +442 2 negative_sampler.num_negs_per_pos 6.0 +442 2 training.batch_size 0.0 +442 3 model.embedding_dim 1.0 +442 3 loss.margin 26.809706837480192 +442 3 loss.adversarial_temperature 0.7148029875100416 +442 3 optimizer.lr 0.014507199041146746 +442 3 negative_sampler.num_negs_per_pos 91.0 +442 3 training.batch_size 1.0 +442 4 model.embedding_dim 1.0 +442 4 loss.margin 10.266513688216193 +442 4 loss.adversarial_temperature 0.8451738231330195 +442 4 optimizer.lr 0.0011127060928739587 +442 4 negative_sampler.num_negs_per_pos 15.0 +442 4 training.batch_size 0.0 +442 5 model.embedding_dim 2.0 +442 5 loss.margin 24.26317701340239 +442 5 loss.adversarial_temperature 0.7894070073187226 +442 5 optimizer.lr 0.004007834959950341 +442 5 negative_sampler.num_negs_per_pos 24.0 +442 5 training.batch_size 0.0 +442 6 model.embedding_dim 2.0 +442 6 loss.margin 16.89073639211476 +442 6 loss.adversarial_temperature 0.36601592550226214 +442 6 optimizer.lr 0.025046875728256108 +442 6 negative_sampler.num_negs_per_pos 4.0 +442 6 training.batch_size 0.0 +442 7 model.embedding_dim 1.0 +442 7 loss.margin 14.486363450602058 +442 7 loss.adversarial_temperature 0.9208905868600618 +442 7 optimizer.lr 0.0038201427379746184 +442 7 negative_sampler.num_negs_per_pos 72.0 +442 7 training.batch_size 2.0 +442 8 model.embedding_dim 2.0 +442 8 loss.margin 26.63952993900583 +442 8 loss.adversarial_temperature 0.7740453829726446 +442 8 optimizer.lr 0.0014654420712130517 +442 8 negative_sampler.num_negs_per_pos 60.0 +442 8 training.batch_size 2.0 +442 9 model.embedding_dim 2.0 +442 9 loss.margin 7.823361889543298 +442 9 loss.adversarial_temperature 0.4804131746231254 +442 9 optimizer.lr 0.0015517190313044872 +442 9 negative_sampler.num_negs_per_pos 36.0 +442 9 training.batch_size 1.0 +442 10 model.embedding_dim 0.0 +442 10 loss.margin 21.645522507944488 +442 10 loss.adversarial_temperature 0.6948643529110092 +442 10 optimizer.lr 0.0015850453286102141 +442 10 negative_sampler.num_negs_per_pos 59.0 +442 10 training.batch_size 2.0 +442 11 model.embedding_dim 1.0 +442 11 loss.margin 11.240651979026843 +442 11 loss.adversarial_temperature 0.8373216919951527 +442 11 optimizer.lr 0.009652988028981652 +442 11 negative_sampler.num_negs_per_pos 16.0 +442 11 training.batch_size 2.0 +442 12 model.embedding_dim 0.0 +442 12 loss.margin 4.561427322530653 +442 12 loss.adversarial_temperature 0.2505055068839915 +442 12 optimizer.lr 0.001531565816640613 +442 12 negative_sampler.num_negs_per_pos 58.0 +442 12 training.batch_size 2.0 +442 13 model.embedding_dim 2.0 +442 13 loss.margin 2.486205060471283 +442 13 loss.adversarial_temperature 0.7430074088796533 +442 13 optimizer.lr 0.08850887295064902 +442 13 negative_sampler.num_negs_per_pos 13.0 +442 13 training.batch_size 2.0 +442 14 model.embedding_dim 2.0 +442 14 loss.margin 9.94560431463014 +442 14 loss.adversarial_temperature 0.6914568091653149 +442 14 optimizer.lr 0.0012113884540607257 +442 14 negative_sampler.num_negs_per_pos 7.0 +442 14 training.batch_size 2.0 +442 15 model.embedding_dim 0.0 +442 15 loss.margin 17.769171667958222 +442 15 loss.adversarial_temperature 0.4801814453189177 +442 15 optimizer.lr 0.0020364616247006083 +442 15 negative_sampler.num_negs_per_pos 39.0 +442 15 training.batch_size 0.0 +442 16 model.embedding_dim 0.0 +442 16 loss.margin 24.8028644265042 +442 16 loss.adversarial_temperature 0.20169148180898117 +442 16 optimizer.lr 0.04176451279992917 +442 16 negative_sampler.num_negs_per_pos 21.0 +442 16 training.batch_size 2.0 +442 17 model.embedding_dim 2.0 +442 17 loss.margin 16.044046639024675 +442 17 loss.adversarial_temperature 0.5622111011604767 +442 17 optimizer.lr 0.010979962151459466 +442 17 negative_sampler.num_negs_per_pos 98.0 +442 17 training.batch_size 1.0 +442 18 model.embedding_dim 0.0 +442 18 loss.margin 8.195465107423113 +442 18 loss.adversarial_temperature 0.2674463016646481 +442 18 optimizer.lr 0.01123978784370367 +442 18 negative_sampler.num_negs_per_pos 30.0 +442 18 training.batch_size 2.0 +442 19 model.embedding_dim 2.0 +442 19 loss.margin 26.883209104357032 +442 19 loss.adversarial_temperature 0.5325397305345686 +442 19 optimizer.lr 0.0030949555910723086 +442 19 negative_sampler.num_negs_per_pos 56.0 +442 19 training.batch_size 1.0 +442 20 model.embedding_dim 2.0 +442 20 loss.margin 19.64160265310282 +442 20 loss.adversarial_temperature 0.9739045313439257 +442 20 optimizer.lr 0.002912960546033022 +442 20 negative_sampler.num_negs_per_pos 80.0 +442 20 training.batch_size 1.0 +442 21 model.embedding_dim 2.0 +442 21 loss.margin 9.140021926027579 +442 21 loss.adversarial_temperature 0.32217814235888276 +442 21 optimizer.lr 0.007139888825586045 +442 21 negative_sampler.num_negs_per_pos 74.0 +442 21 training.batch_size 0.0 +442 22 model.embedding_dim 2.0 +442 22 loss.margin 15.422769474501314 +442 22 loss.adversarial_temperature 0.16298596981331293 +442 22 optimizer.lr 0.007401451437666566 +442 22 negative_sampler.num_negs_per_pos 55.0 +442 22 training.batch_size 1.0 +442 23 model.embedding_dim 2.0 +442 23 loss.margin 7.562449105445942 +442 23 loss.adversarial_temperature 0.13710790291198388 +442 23 optimizer.lr 0.0045623310958945885 +442 23 negative_sampler.num_negs_per_pos 60.0 +442 23 training.batch_size 0.0 +442 24 model.embedding_dim 2.0 +442 24 loss.margin 5.2849298808814 +442 24 loss.adversarial_temperature 0.9705593213105824 +442 24 optimizer.lr 0.018319606518838825 +442 24 negative_sampler.num_negs_per_pos 1.0 +442 24 training.batch_size 2.0 +442 25 model.embedding_dim 0.0 +442 25 loss.margin 18.74990030191784 +442 25 loss.adversarial_temperature 0.6968671707902363 +442 25 optimizer.lr 0.0038859849283510647 +442 25 negative_sampler.num_negs_per_pos 94.0 +442 25 training.batch_size 1.0 +442 26 model.embedding_dim 2.0 +442 26 loss.margin 29.76305068970281 +442 26 loss.adversarial_temperature 0.19244565504801453 +442 26 optimizer.lr 0.004033614953345844 +442 26 negative_sampler.num_negs_per_pos 1.0 +442 26 training.batch_size 1.0 +442 27 model.embedding_dim 2.0 +442 27 loss.margin 2.5515389887704476 +442 27 loss.adversarial_temperature 0.6955421337486332 +442 27 optimizer.lr 0.0030092025921803324 +442 27 negative_sampler.num_negs_per_pos 95.0 +442 27 training.batch_size 0.0 +442 28 model.embedding_dim 2.0 +442 28 loss.margin 22.96366230784684 +442 28 loss.adversarial_temperature 0.6551093895094873 +442 28 optimizer.lr 0.0011180736763300475 +442 28 negative_sampler.num_negs_per_pos 64.0 +442 28 training.batch_size 1.0 +442 29 model.embedding_dim 1.0 +442 29 loss.margin 3.4833506005058266 +442 29 loss.adversarial_temperature 0.3522889494526448 +442 29 optimizer.lr 0.04672962303186283 +442 29 negative_sampler.num_negs_per_pos 64.0 +442 29 training.batch_size 0.0 +442 30 model.embedding_dim 1.0 +442 30 loss.margin 28.205689682185362 +442 30 loss.adversarial_temperature 0.12032899387129714 +442 30 optimizer.lr 0.008539693546837167 +442 30 negative_sampler.num_negs_per_pos 95.0 +442 30 training.batch_size 2.0 +442 31 model.embedding_dim 0.0 +442 31 loss.margin 6.993053113460052 +442 31 loss.adversarial_temperature 0.542413672868372 +442 31 optimizer.lr 0.007018901341578915 +442 31 negative_sampler.num_negs_per_pos 70.0 +442 31 training.batch_size 0.0 +442 32 model.embedding_dim 1.0 +442 32 loss.margin 19.445109133594 +442 32 loss.adversarial_temperature 0.992815834537338 +442 32 optimizer.lr 0.047239658117118714 +442 32 negative_sampler.num_negs_per_pos 92.0 +442 32 training.batch_size 2.0 +442 33 model.embedding_dim 2.0 +442 33 loss.margin 27.8749358144154 +442 33 loss.adversarial_temperature 0.6032351429593578 +442 33 optimizer.lr 0.004300438783541183 +442 33 negative_sampler.num_negs_per_pos 84.0 +442 33 training.batch_size 2.0 +442 34 model.embedding_dim 1.0 +442 34 loss.margin 17.355616822887008 +442 34 loss.adversarial_temperature 0.7555869597089437 +442 34 optimizer.lr 0.004907616773032572 +442 34 negative_sampler.num_negs_per_pos 79.0 +442 34 training.batch_size 0.0 +442 35 model.embedding_dim 1.0 +442 35 loss.margin 26.474086979545824 +442 35 loss.adversarial_temperature 0.4954592363014949 +442 35 optimizer.lr 0.019046942595878604 +442 35 negative_sampler.num_negs_per_pos 59.0 +442 35 training.batch_size 1.0 +442 36 model.embedding_dim 2.0 +442 36 loss.margin 13.816168722898185 +442 36 loss.adversarial_temperature 0.8732138156514271 +442 36 optimizer.lr 0.0014430090503569132 +442 36 negative_sampler.num_negs_per_pos 96.0 +442 36 training.batch_size 0.0 +442 37 model.embedding_dim 2.0 +442 37 loss.margin 13.684578681739243 +442 37 loss.adversarial_temperature 0.3277329100523439 +442 37 optimizer.lr 0.009763099230140999 +442 37 negative_sampler.num_negs_per_pos 45.0 +442 37 training.batch_size 2.0 +442 38 model.embedding_dim 2.0 +442 38 loss.margin 11.100388801029531 +442 38 loss.adversarial_temperature 0.41629703676329977 +442 38 optimizer.lr 0.07551533236093566 +442 38 negative_sampler.num_negs_per_pos 39.0 +442 38 training.batch_size 1.0 +442 39 model.embedding_dim 1.0 +442 39 loss.margin 15.013004317174657 +442 39 loss.adversarial_temperature 0.32480907475252374 +442 39 optimizer.lr 0.014108613676865509 +442 39 negative_sampler.num_negs_per_pos 9.0 +442 39 training.batch_size 1.0 +442 40 model.embedding_dim 2.0 +442 40 loss.margin 12.283019628647846 +442 40 loss.adversarial_temperature 0.31725364030617786 +442 40 optimizer.lr 0.0028692673590410027 +442 40 negative_sampler.num_negs_per_pos 78.0 +442 40 training.batch_size 0.0 +442 41 model.embedding_dim 2.0 +442 41 loss.margin 17.16855408183711 +442 41 loss.adversarial_temperature 0.7691567690877702 +442 41 optimizer.lr 0.024040872584788014 +442 41 negative_sampler.num_negs_per_pos 59.0 +442 41 training.batch_size 1.0 +442 42 model.embedding_dim 0.0 +442 42 loss.margin 29.12545406429631 +442 42 loss.adversarial_temperature 0.5766762357207066 +442 42 optimizer.lr 0.03427026864472735 +442 42 negative_sampler.num_negs_per_pos 55.0 +442 42 training.batch_size 2.0 +442 43 model.embedding_dim 0.0 +442 43 loss.margin 4.215821132163789 +442 43 loss.adversarial_temperature 0.22968128581517058 +442 43 optimizer.lr 0.002355380862690284 +442 43 negative_sampler.num_negs_per_pos 49.0 +442 43 training.batch_size 1.0 +442 44 model.embedding_dim 1.0 +442 44 loss.margin 27.220732902563093 +442 44 loss.adversarial_temperature 0.781470366616147 +442 44 optimizer.lr 0.0022549468923449304 +442 44 negative_sampler.num_negs_per_pos 60.0 +442 44 training.batch_size 0.0 +442 45 model.embedding_dim 1.0 +442 45 loss.margin 25.435148077825364 +442 45 loss.adversarial_temperature 0.3322845030569148 +442 45 optimizer.lr 0.01040069210611706 +442 45 negative_sampler.num_negs_per_pos 40.0 +442 45 training.batch_size 0.0 +442 46 model.embedding_dim 1.0 +442 46 loss.margin 15.963299026162339 +442 46 loss.adversarial_temperature 0.36484611218839913 +442 46 optimizer.lr 0.07284215901831055 +442 46 negative_sampler.num_negs_per_pos 54.0 +442 46 training.batch_size 1.0 +442 47 model.embedding_dim 0.0 +442 47 loss.margin 16.785117101066866 +442 47 loss.adversarial_temperature 0.5945770278626542 +442 47 optimizer.lr 0.003316402729595205 +442 47 negative_sampler.num_negs_per_pos 69.0 +442 47 training.batch_size 1.0 +442 48 model.embedding_dim 0.0 +442 48 loss.margin 17.67871561606233 +442 48 loss.adversarial_temperature 0.7858990407322345 +442 48 optimizer.lr 0.05662913883063748 +442 48 negative_sampler.num_negs_per_pos 51.0 +442 48 training.batch_size 1.0 +442 49 model.embedding_dim 1.0 +442 49 loss.margin 23.56920495397015 +442 49 loss.adversarial_temperature 0.13332050099972786 +442 49 optimizer.lr 0.07212993473402488 +442 49 negative_sampler.num_negs_per_pos 50.0 +442 49 training.batch_size 1.0 +442 50 model.embedding_dim 1.0 +442 50 loss.margin 9.192263736243692 +442 50 loss.adversarial_temperature 0.33559569078209683 +442 50 optimizer.lr 0.005790251699651073 +442 50 negative_sampler.num_negs_per_pos 15.0 +442 50 training.batch_size 0.0 +442 51 model.embedding_dim 2.0 +442 51 loss.margin 27.502272818597536 +442 51 loss.adversarial_temperature 0.3514483574316716 +442 51 optimizer.lr 0.0014251259455337587 +442 51 negative_sampler.num_negs_per_pos 79.0 +442 51 training.batch_size 0.0 +442 52 model.embedding_dim 0.0 +442 52 loss.margin 10.470790450438823 +442 52 loss.adversarial_temperature 0.15602019010363705 +442 52 optimizer.lr 0.0033445808153411347 +442 52 negative_sampler.num_negs_per_pos 79.0 +442 52 training.batch_size 2.0 +442 53 model.embedding_dim 2.0 +442 53 loss.margin 21.42241230507102 +442 53 loss.adversarial_temperature 0.7888548768405139 +442 53 optimizer.lr 0.002436193908223715 +442 53 negative_sampler.num_negs_per_pos 66.0 +442 53 training.batch_size 2.0 +442 54 model.embedding_dim 2.0 +442 54 loss.margin 27.18862182382221 +442 54 loss.adversarial_temperature 0.47772335044885517 +442 54 optimizer.lr 0.012948247440871108 +442 54 negative_sampler.num_negs_per_pos 6.0 +442 54 training.batch_size 1.0 +442 55 model.embedding_dim 1.0 +442 55 loss.margin 5.724570115198424 +442 55 loss.adversarial_temperature 0.8588804190339333 +442 55 optimizer.lr 0.04263141275633658 +442 55 negative_sampler.num_negs_per_pos 85.0 +442 55 training.batch_size 2.0 +442 56 model.embedding_dim 2.0 +442 56 loss.margin 24.474588557381313 +442 56 loss.adversarial_temperature 0.6089637214626101 +442 56 optimizer.lr 0.005085551657284719 +442 56 negative_sampler.num_negs_per_pos 98.0 +442 56 training.batch_size 1.0 +442 57 model.embedding_dim 1.0 +442 57 loss.margin 25.435132468088874 +442 57 loss.adversarial_temperature 0.5420480360874845 +442 57 optimizer.lr 0.00462397833219833 +442 57 negative_sampler.num_negs_per_pos 75.0 +442 57 training.batch_size 0.0 +442 58 model.embedding_dim 1.0 +442 58 loss.margin 16.76805887029834 +442 58 loss.adversarial_temperature 0.20486512865542178 +442 58 optimizer.lr 0.03507978415614942 +442 58 negative_sampler.num_negs_per_pos 7.0 +442 58 training.batch_size 1.0 +442 59 model.embedding_dim 2.0 +442 59 loss.margin 10.210953199534748 +442 59 loss.adversarial_temperature 0.8943998623713336 +442 59 optimizer.lr 0.006647425860030108 +442 59 negative_sampler.num_negs_per_pos 62.0 +442 59 training.batch_size 1.0 +442 60 model.embedding_dim 0.0 +442 60 loss.margin 6.32327468894538 +442 60 loss.adversarial_temperature 0.8618456819123436 +442 60 optimizer.lr 0.011727111698281164 +442 60 negative_sampler.num_negs_per_pos 23.0 +442 60 training.batch_size 0.0 +442 61 model.embedding_dim 0.0 +442 61 loss.margin 9.98960289787298 +442 61 loss.adversarial_temperature 0.7512649822540851 +442 61 optimizer.lr 0.00556500634223431 +442 61 negative_sampler.num_negs_per_pos 12.0 +442 61 training.batch_size 1.0 +442 62 model.embedding_dim 0.0 +442 62 loss.margin 21.142968935334444 +442 62 loss.adversarial_temperature 0.30913374242373176 +442 62 optimizer.lr 0.003630857559673639 +442 62 negative_sampler.num_negs_per_pos 51.0 +442 62 training.batch_size 0.0 +442 63 model.embedding_dim 2.0 +442 63 loss.margin 9.400369578542985 +442 63 loss.adversarial_temperature 0.48146562819522587 +442 63 optimizer.lr 0.008907761846009783 +442 63 negative_sampler.num_negs_per_pos 82.0 +442 63 training.batch_size 2.0 +442 64 model.embedding_dim 2.0 +442 64 loss.margin 9.465323536623043 +442 64 loss.adversarial_temperature 0.8609884980207175 +442 64 optimizer.lr 0.0020157513158402768 +442 64 negative_sampler.num_negs_per_pos 11.0 +442 64 training.batch_size 2.0 +442 65 model.embedding_dim 1.0 +442 65 loss.margin 22.6893369706232 +442 65 loss.adversarial_temperature 0.5709609975261596 +442 65 optimizer.lr 0.05650376328259706 +442 65 negative_sampler.num_negs_per_pos 97.0 +442 65 training.batch_size 2.0 +442 66 model.embedding_dim 2.0 +442 66 loss.margin 12.769900960952885 +442 66 loss.adversarial_temperature 0.49128723561907306 +442 66 optimizer.lr 0.02567727778752997 +442 66 negative_sampler.num_negs_per_pos 87.0 +442 66 training.batch_size 1.0 +442 67 model.embedding_dim 1.0 +442 67 loss.margin 3.0834016269030586 +442 67 loss.adversarial_temperature 0.6607066875935337 +442 67 optimizer.lr 0.0019762765579082763 +442 67 negative_sampler.num_negs_per_pos 36.0 +442 67 training.batch_size 2.0 +442 68 model.embedding_dim 0.0 +442 68 loss.margin 13.239934517363219 +442 68 loss.adversarial_temperature 0.7899195573586335 +442 68 optimizer.lr 0.09520529905119451 +442 68 negative_sampler.num_negs_per_pos 77.0 +442 68 training.batch_size 0.0 +442 69 model.embedding_dim 1.0 +442 69 loss.margin 6.837047358198369 +442 69 loss.adversarial_temperature 0.9046758855596091 +442 69 optimizer.lr 0.028156778039301285 +442 69 negative_sampler.num_negs_per_pos 27.0 +442 69 training.batch_size 0.0 +442 70 model.embedding_dim 2.0 +442 70 loss.margin 7.545263017261356 +442 70 loss.adversarial_temperature 0.5974974112296503 +442 70 optimizer.lr 0.002240824506645984 +442 70 negative_sampler.num_negs_per_pos 58.0 +442 70 training.batch_size 2.0 +442 71 model.embedding_dim 2.0 +442 71 loss.margin 15.325981654906313 +442 71 loss.adversarial_temperature 0.9475572610084618 +442 71 optimizer.lr 0.01217450999697227 +442 71 negative_sampler.num_negs_per_pos 45.0 +442 71 training.batch_size 0.0 +442 72 model.embedding_dim 2.0 +442 72 loss.margin 9.162142677798187 +442 72 loss.adversarial_temperature 0.8914364094745122 +442 72 optimizer.lr 0.005170757069095719 +442 72 negative_sampler.num_negs_per_pos 7.0 +442 72 training.batch_size 0.0 +442 73 model.embedding_dim 2.0 +442 73 loss.margin 27.553932473708777 +442 73 loss.adversarial_temperature 0.1435231571009572 +442 73 optimizer.lr 0.04357042335454293 +442 73 negative_sampler.num_negs_per_pos 91.0 +442 73 training.batch_size 1.0 +442 74 model.embedding_dim 1.0 +442 74 loss.margin 17.416857605057523 +442 74 loss.adversarial_temperature 0.49582657500112093 +442 74 optimizer.lr 0.007043397892702631 +442 74 negative_sampler.num_negs_per_pos 15.0 +442 74 training.batch_size 0.0 +442 75 model.embedding_dim 0.0 +442 75 loss.margin 7.352087029835722 +442 75 loss.adversarial_temperature 0.8042445381322403 +442 75 optimizer.lr 0.0932140759001363 +442 75 negative_sampler.num_negs_per_pos 30.0 +442 75 training.batch_size 0.0 +442 76 model.embedding_dim 1.0 +442 76 loss.margin 13.656185887912867 +442 76 loss.adversarial_temperature 0.7540670767398466 +442 76 optimizer.lr 0.008995807294730183 +442 76 negative_sampler.num_negs_per_pos 34.0 +442 76 training.batch_size 1.0 +442 77 model.embedding_dim 2.0 +442 77 loss.margin 1.3921407876134155 +442 77 loss.adversarial_temperature 0.7356037284093145 +442 77 optimizer.lr 0.010016102561586753 +442 77 negative_sampler.num_negs_per_pos 58.0 +442 77 training.batch_size 1.0 +442 1 dataset """kinships""" +442 1 model """ntn""" +442 1 loss """nssa""" +442 1 regularizer """no""" +442 1 optimizer """adam""" +442 1 training_loop """owa""" +442 1 negative_sampler """basic""" +442 1 evaluator """rankbased""" +442 2 dataset """kinships""" +442 2 model """ntn""" +442 2 loss """nssa""" +442 2 regularizer """no""" +442 2 optimizer """adam""" +442 2 training_loop """owa""" +442 2 negative_sampler """basic""" +442 2 evaluator """rankbased""" +442 3 dataset """kinships""" +442 3 model """ntn""" +442 3 loss """nssa""" +442 3 regularizer """no""" +442 3 optimizer """adam""" +442 3 training_loop """owa""" +442 3 negative_sampler """basic""" +442 3 evaluator """rankbased""" +442 4 dataset """kinships""" +442 4 model """ntn""" +442 4 loss """nssa""" +442 4 regularizer """no""" +442 4 optimizer """adam""" +442 4 training_loop """owa""" +442 4 negative_sampler """basic""" +442 4 evaluator """rankbased""" +442 5 dataset """kinships""" +442 5 model """ntn""" +442 5 loss """nssa""" +442 5 regularizer """no""" +442 5 optimizer """adam""" +442 5 training_loop """owa""" +442 5 negative_sampler """basic""" +442 5 evaluator """rankbased""" +442 6 dataset """kinships""" +442 6 model """ntn""" +442 6 loss """nssa""" +442 6 regularizer """no""" +442 6 optimizer """adam""" +442 6 training_loop """owa""" +442 6 negative_sampler """basic""" +442 6 evaluator """rankbased""" +442 7 dataset """kinships""" +442 7 model """ntn""" +442 7 loss """nssa""" +442 7 regularizer """no""" +442 7 optimizer """adam""" +442 7 training_loop """owa""" +442 7 negative_sampler """basic""" +442 7 evaluator """rankbased""" +442 8 dataset """kinships""" +442 8 model """ntn""" +442 8 loss """nssa""" +442 8 regularizer """no""" +442 8 optimizer """adam""" +442 8 training_loop """owa""" +442 8 negative_sampler """basic""" +442 8 evaluator """rankbased""" +442 9 dataset """kinships""" +442 9 model """ntn""" +442 9 loss """nssa""" +442 9 regularizer """no""" +442 9 optimizer """adam""" +442 9 training_loop """owa""" +442 9 negative_sampler """basic""" +442 9 evaluator """rankbased""" +442 10 dataset """kinships""" +442 10 model """ntn""" +442 10 loss """nssa""" +442 10 regularizer """no""" +442 10 optimizer """adam""" +442 10 training_loop """owa""" +442 10 negative_sampler """basic""" +442 10 evaluator """rankbased""" +442 11 dataset """kinships""" +442 11 model """ntn""" +442 11 loss """nssa""" +442 11 regularizer """no""" +442 11 optimizer """adam""" +442 11 training_loop """owa""" +442 11 negative_sampler """basic""" +442 11 evaluator """rankbased""" +442 12 dataset """kinships""" +442 12 model """ntn""" +442 12 loss """nssa""" +442 12 regularizer """no""" +442 12 optimizer """adam""" +442 12 training_loop """owa""" +442 12 negative_sampler """basic""" +442 12 evaluator """rankbased""" +442 13 dataset """kinships""" +442 13 model """ntn""" +442 13 loss """nssa""" +442 13 regularizer """no""" +442 13 optimizer """adam""" +442 13 training_loop """owa""" +442 13 negative_sampler """basic""" +442 13 evaluator """rankbased""" +442 14 dataset """kinships""" +442 14 model """ntn""" +442 14 loss """nssa""" +442 14 regularizer """no""" +442 14 optimizer """adam""" +442 14 training_loop """owa""" +442 14 negative_sampler """basic""" +442 14 evaluator """rankbased""" +442 15 dataset """kinships""" +442 15 model """ntn""" +442 15 loss """nssa""" +442 15 regularizer """no""" +442 15 optimizer """adam""" +442 15 training_loop """owa""" +442 15 negative_sampler """basic""" +442 15 evaluator """rankbased""" +442 16 dataset """kinships""" +442 16 model """ntn""" +442 16 loss """nssa""" +442 16 regularizer """no""" +442 16 optimizer """adam""" +442 16 training_loop """owa""" +442 16 negative_sampler """basic""" +442 16 evaluator """rankbased""" +442 17 dataset """kinships""" +442 17 model """ntn""" +442 17 loss """nssa""" +442 17 regularizer """no""" +442 17 optimizer """adam""" +442 17 training_loop """owa""" +442 17 negative_sampler """basic""" +442 17 evaluator """rankbased""" +442 18 dataset """kinships""" +442 18 model """ntn""" +442 18 loss """nssa""" +442 18 regularizer """no""" +442 18 optimizer """adam""" +442 18 training_loop """owa""" +442 18 negative_sampler """basic""" +442 18 evaluator """rankbased""" +442 19 dataset """kinships""" +442 19 model """ntn""" +442 19 loss """nssa""" +442 19 regularizer """no""" +442 19 optimizer """adam""" +442 19 training_loop """owa""" +442 19 negative_sampler """basic""" +442 19 evaluator """rankbased""" +442 20 dataset """kinships""" +442 20 model """ntn""" +442 20 loss """nssa""" +442 20 regularizer """no""" +442 20 optimizer """adam""" +442 20 training_loop """owa""" +442 20 negative_sampler """basic""" +442 20 evaluator """rankbased""" +442 21 dataset """kinships""" +442 21 model """ntn""" +442 21 loss """nssa""" +442 21 regularizer """no""" +442 21 optimizer """adam""" +442 21 training_loop """owa""" +442 21 negative_sampler """basic""" +442 21 evaluator """rankbased""" +442 22 dataset """kinships""" +442 22 model """ntn""" +442 22 loss """nssa""" +442 22 regularizer """no""" +442 22 optimizer """adam""" +442 22 training_loop """owa""" +442 22 negative_sampler """basic""" +442 22 evaluator """rankbased""" +442 23 dataset """kinships""" +442 23 model """ntn""" +442 23 loss """nssa""" +442 23 regularizer """no""" +442 23 optimizer """adam""" +442 23 training_loop """owa""" +442 23 negative_sampler """basic""" +442 23 evaluator """rankbased""" +442 24 dataset """kinships""" +442 24 model """ntn""" +442 24 loss """nssa""" +442 24 regularizer """no""" +442 24 optimizer """adam""" +442 24 training_loop """owa""" +442 24 negative_sampler """basic""" +442 24 evaluator """rankbased""" +442 25 dataset """kinships""" +442 25 model """ntn""" +442 25 loss """nssa""" +442 25 regularizer """no""" +442 25 optimizer """adam""" +442 25 training_loop """owa""" +442 25 negative_sampler """basic""" +442 25 evaluator """rankbased""" +442 26 dataset """kinships""" +442 26 model """ntn""" +442 26 loss """nssa""" +442 26 regularizer """no""" +442 26 optimizer """adam""" +442 26 training_loop """owa""" +442 26 negative_sampler """basic""" +442 26 evaluator """rankbased""" +442 27 dataset """kinships""" +442 27 model """ntn""" +442 27 loss """nssa""" +442 27 regularizer """no""" +442 27 optimizer """adam""" +442 27 training_loop """owa""" +442 27 negative_sampler """basic""" +442 27 evaluator """rankbased""" +442 28 dataset """kinships""" +442 28 model """ntn""" +442 28 loss """nssa""" +442 28 regularizer """no""" +442 28 optimizer """adam""" +442 28 training_loop """owa""" +442 28 negative_sampler """basic""" +442 28 evaluator """rankbased""" +442 29 dataset """kinships""" +442 29 model """ntn""" +442 29 loss """nssa""" +442 29 regularizer """no""" +442 29 optimizer """adam""" +442 29 training_loop """owa""" +442 29 negative_sampler """basic""" +442 29 evaluator """rankbased""" +442 30 dataset """kinships""" +442 30 model """ntn""" +442 30 loss """nssa""" +442 30 regularizer """no""" +442 30 optimizer """adam""" +442 30 training_loop """owa""" +442 30 negative_sampler """basic""" +442 30 evaluator """rankbased""" +442 31 dataset """kinships""" +442 31 model """ntn""" +442 31 loss """nssa""" +442 31 regularizer """no""" +442 31 optimizer """adam""" +442 31 training_loop """owa""" +442 31 negative_sampler """basic""" +442 31 evaluator """rankbased""" +442 32 dataset """kinships""" +442 32 model """ntn""" +442 32 loss """nssa""" +442 32 regularizer """no""" +442 32 optimizer """adam""" +442 32 training_loop """owa""" +442 32 negative_sampler """basic""" +442 32 evaluator """rankbased""" +442 33 dataset """kinships""" +442 33 model """ntn""" +442 33 loss """nssa""" +442 33 regularizer """no""" +442 33 optimizer """adam""" +442 33 training_loop """owa""" +442 33 negative_sampler """basic""" +442 33 evaluator """rankbased""" +442 34 dataset """kinships""" +442 34 model """ntn""" +442 34 loss """nssa""" +442 34 regularizer """no""" +442 34 optimizer """adam""" +442 34 training_loop """owa""" +442 34 negative_sampler """basic""" +442 34 evaluator """rankbased""" +442 35 dataset """kinships""" +442 35 model """ntn""" +442 35 loss """nssa""" +442 35 regularizer """no""" +442 35 optimizer """adam""" +442 35 training_loop """owa""" +442 35 negative_sampler """basic""" +442 35 evaluator """rankbased""" +442 36 dataset """kinships""" +442 36 model """ntn""" +442 36 loss """nssa""" +442 36 regularizer """no""" +442 36 optimizer """adam""" +442 36 training_loop """owa""" +442 36 negative_sampler """basic""" +442 36 evaluator """rankbased""" +442 37 dataset """kinships""" +442 37 model """ntn""" +442 37 loss """nssa""" +442 37 regularizer """no""" +442 37 optimizer """adam""" +442 37 training_loop """owa""" +442 37 negative_sampler """basic""" +442 37 evaluator """rankbased""" +442 38 dataset """kinships""" +442 38 model """ntn""" +442 38 loss """nssa""" +442 38 regularizer """no""" +442 38 optimizer """adam""" +442 38 training_loop """owa""" +442 38 negative_sampler """basic""" +442 38 evaluator """rankbased""" +442 39 dataset """kinships""" +442 39 model """ntn""" +442 39 loss """nssa""" +442 39 regularizer """no""" +442 39 optimizer """adam""" +442 39 training_loop """owa""" +442 39 negative_sampler """basic""" +442 39 evaluator """rankbased""" +442 40 dataset """kinships""" +442 40 model """ntn""" +442 40 loss """nssa""" +442 40 regularizer """no""" +442 40 optimizer """adam""" +442 40 training_loop """owa""" +442 40 negative_sampler """basic""" +442 40 evaluator """rankbased""" +442 41 dataset """kinships""" +442 41 model """ntn""" +442 41 loss """nssa""" +442 41 regularizer """no""" +442 41 optimizer """adam""" +442 41 training_loop """owa""" +442 41 negative_sampler """basic""" +442 41 evaluator """rankbased""" +442 42 dataset """kinships""" +442 42 model """ntn""" +442 42 loss """nssa""" +442 42 regularizer """no""" +442 42 optimizer """adam""" +442 42 training_loop """owa""" +442 42 negative_sampler """basic""" +442 42 evaluator """rankbased""" +442 43 dataset """kinships""" +442 43 model """ntn""" +442 43 loss """nssa""" +442 43 regularizer """no""" +442 43 optimizer """adam""" +442 43 training_loop """owa""" +442 43 negative_sampler """basic""" +442 43 evaluator """rankbased""" +442 44 dataset """kinships""" +442 44 model """ntn""" +442 44 loss """nssa""" +442 44 regularizer """no""" +442 44 optimizer """adam""" +442 44 training_loop """owa""" +442 44 negative_sampler """basic""" +442 44 evaluator """rankbased""" +442 45 dataset """kinships""" +442 45 model """ntn""" +442 45 loss """nssa""" +442 45 regularizer """no""" +442 45 optimizer """adam""" +442 45 training_loop """owa""" +442 45 negative_sampler """basic""" +442 45 evaluator """rankbased""" +442 46 dataset """kinships""" +442 46 model """ntn""" +442 46 loss """nssa""" +442 46 regularizer """no""" +442 46 optimizer """adam""" +442 46 training_loop """owa""" +442 46 negative_sampler """basic""" +442 46 evaluator """rankbased""" +442 47 dataset """kinships""" +442 47 model """ntn""" +442 47 loss """nssa""" +442 47 regularizer """no""" +442 47 optimizer """adam""" +442 47 training_loop """owa""" +442 47 negative_sampler """basic""" +442 47 evaluator """rankbased""" +442 48 dataset """kinships""" +442 48 model """ntn""" +442 48 loss """nssa""" +442 48 regularizer """no""" +442 48 optimizer """adam""" +442 48 training_loop """owa""" +442 48 negative_sampler """basic""" +442 48 evaluator """rankbased""" +442 49 dataset """kinships""" +442 49 model """ntn""" +442 49 loss """nssa""" +442 49 regularizer """no""" +442 49 optimizer """adam""" +442 49 training_loop """owa""" +442 49 negative_sampler """basic""" +442 49 evaluator """rankbased""" +442 50 dataset """kinships""" +442 50 model """ntn""" +442 50 loss """nssa""" +442 50 regularizer """no""" +442 50 optimizer """adam""" +442 50 training_loop """owa""" +442 50 negative_sampler """basic""" +442 50 evaluator """rankbased""" +442 51 dataset """kinships""" +442 51 model """ntn""" +442 51 loss """nssa""" +442 51 regularizer """no""" +442 51 optimizer """adam""" +442 51 training_loop """owa""" +442 51 negative_sampler """basic""" +442 51 evaluator """rankbased""" +442 52 dataset """kinships""" +442 52 model """ntn""" +442 52 loss """nssa""" +442 52 regularizer """no""" +442 52 optimizer """adam""" +442 52 training_loop """owa""" +442 52 negative_sampler """basic""" +442 52 evaluator """rankbased""" +442 53 dataset """kinships""" +442 53 model """ntn""" +442 53 loss """nssa""" +442 53 regularizer """no""" +442 53 optimizer """adam""" +442 53 training_loop """owa""" +442 53 negative_sampler """basic""" +442 53 evaluator """rankbased""" +442 54 dataset """kinships""" +442 54 model """ntn""" +442 54 loss """nssa""" +442 54 regularizer """no""" +442 54 optimizer """adam""" +442 54 training_loop """owa""" +442 54 negative_sampler """basic""" +442 54 evaluator """rankbased""" +442 55 dataset """kinships""" +442 55 model """ntn""" +442 55 loss """nssa""" +442 55 regularizer """no""" +442 55 optimizer """adam""" +442 55 training_loop """owa""" +442 55 negative_sampler """basic""" +442 55 evaluator """rankbased""" +442 56 dataset """kinships""" +442 56 model """ntn""" +442 56 loss """nssa""" +442 56 regularizer """no""" +442 56 optimizer """adam""" +442 56 training_loop """owa""" +442 56 negative_sampler """basic""" +442 56 evaluator """rankbased""" +442 57 dataset """kinships""" +442 57 model """ntn""" +442 57 loss """nssa""" +442 57 regularizer """no""" +442 57 optimizer """adam""" +442 57 training_loop """owa""" +442 57 negative_sampler """basic""" +442 57 evaluator """rankbased""" +442 58 dataset """kinships""" +442 58 model """ntn""" +442 58 loss """nssa""" +442 58 regularizer """no""" +442 58 optimizer """adam""" +442 58 training_loop """owa""" +442 58 negative_sampler """basic""" +442 58 evaluator """rankbased""" +442 59 dataset """kinships""" +442 59 model """ntn""" +442 59 loss """nssa""" +442 59 regularizer """no""" +442 59 optimizer """adam""" +442 59 training_loop """owa""" +442 59 negative_sampler """basic""" +442 59 evaluator """rankbased""" +442 60 dataset """kinships""" +442 60 model """ntn""" +442 60 loss """nssa""" +442 60 regularizer """no""" +442 60 optimizer """adam""" +442 60 training_loop """owa""" +442 60 negative_sampler """basic""" +442 60 evaluator """rankbased""" +442 61 dataset """kinships""" +442 61 model """ntn""" +442 61 loss """nssa""" +442 61 regularizer """no""" +442 61 optimizer """adam""" +442 61 training_loop """owa""" +442 61 negative_sampler """basic""" +442 61 evaluator """rankbased""" +442 62 dataset """kinships""" +442 62 model """ntn""" +442 62 loss """nssa""" +442 62 regularizer """no""" +442 62 optimizer """adam""" +442 62 training_loop """owa""" +442 62 negative_sampler """basic""" +442 62 evaluator """rankbased""" +442 63 dataset """kinships""" +442 63 model """ntn""" +442 63 loss """nssa""" +442 63 regularizer """no""" +442 63 optimizer """adam""" +442 63 training_loop """owa""" +442 63 negative_sampler """basic""" +442 63 evaluator """rankbased""" +442 64 dataset """kinships""" +442 64 model """ntn""" +442 64 loss """nssa""" +442 64 regularizer """no""" +442 64 optimizer """adam""" +442 64 training_loop """owa""" +442 64 negative_sampler """basic""" +442 64 evaluator """rankbased""" +442 65 dataset """kinships""" +442 65 model """ntn""" +442 65 loss """nssa""" +442 65 regularizer """no""" +442 65 optimizer """adam""" +442 65 training_loop """owa""" +442 65 negative_sampler """basic""" +442 65 evaluator """rankbased""" +442 66 dataset """kinships""" +442 66 model """ntn""" +442 66 loss """nssa""" +442 66 regularizer """no""" +442 66 optimizer """adam""" +442 66 training_loop """owa""" +442 66 negative_sampler """basic""" +442 66 evaluator """rankbased""" +442 67 dataset """kinships""" +442 67 model """ntn""" +442 67 loss """nssa""" +442 67 regularizer """no""" +442 67 optimizer """adam""" +442 67 training_loop """owa""" +442 67 negative_sampler """basic""" +442 67 evaluator """rankbased""" +442 68 dataset """kinships""" +442 68 model """ntn""" +442 68 loss """nssa""" +442 68 regularizer """no""" +442 68 optimizer """adam""" +442 68 training_loop """owa""" +442 68 negative_sampler """basic""" +442 68 evaluator """rankbased""" +442 69 dataset """kinships""" +442 69 model """ntn""" +442 69 loss """nssa""" +442 69 regularizer """no""" +442 69 optimizer """adam""" +442 69 training_loop """owa""" +442 69 negative_sampler """basic""" +442 69 evaluator """rankbased""" +442 70 dataset """kinships""" +442 70 model """ntn""" +442 70 loss """nssa""" +442 70 regularizer """no""" +442 70 optimizer """adam""" +442 70 training_loop """owa""" +442 70 negative_sampler """basic""" +442 70 evaluator """rankbased""" +442 71 dataset """kinships""" +442 71 model """ntn""" +442 71 loss """nssa""" +442 71 regularizer """no""" +442 71 optimizer """adam""" +442 71 training_loop """owa""" +442 71 negative_sampler """basic""" +442 71 evaluator """rankbased""" +442 72 dataset """kinships""" +442 72 model """ntn""" +442 72 loss """nssa""" +442 72 regularizer """no""" +442 72 optimizer """adam""" +442 72 training_loop """owa""" +442 72 negative_sampler """basic""" +442 72 evaluator """rankbased""" +442 73 dataset """kinships""" +442 73 model """ntn""" +442 73 loss """nssa""" +442 73 regularizer """no""" +442 73 optimizer """adam""" +442 73 training_loop """owa""" +442 73 negative_sampler """basic""" +442 73 evaluator """rankbased""" +442 74 dataset """kinships""" +442 74 model """ntn""" +442 74 loss """nssa""" +442 74 regularizer """no""" +442 74 optimizer """adam""" +442 74 training_loop """owa""" +442 74 negative_sampler """basic""" +442 74 evaluator """rankbased""" +442 75 dataset """kinships""" +442 75 model """ntn""" +442 75 loss """nssa""" +442 75 regularizer """no""" +442 75 optimizer """adam""" +442 75 training_loop """owa""" +442 75 negative_sampler """basic""" +442 75 evaluator """rankbased""" +442 76 dataset """kinships""" +442 76 model """ntn""" +442 76 loss """nssa""" +442 76 regularizer """no""" +442 76 optimizer """adam""" +442 76 training_loop """owa""" +442 76 negative_sampler """basic""" +442 76 evaluator """rankbased""" +442 77 dataset """kinships""" +442 77 model """ntn""" +442 77 loss """nssa""" +442 77 regularizer """no""" +442 77 optimizer """adam""" +442 77 training_loop """owa""" +442 77 negative_sampler """basic""" +442 77 evaluator """rankbased""" +443 1 model.embedding_dim 1.0 +443 1 optimizer.lr 0.08250913507048914 +443 1 training.batch_size 0.0 +443 1 training.label_smoothing 0.15859573452762432 +443 1 dataset """wn18rr""" +443 1 model """ntn""" +443 1 loss """crossentropy""" +443 1 regularizer """no""" +443 1 optimizer """adam""" +443 1 training_loop """lcwa""" +443 1 evaluator """rankbased""" +444 1 model.embedding_dim 0.0 +444 1 optimizer.lr 0.003818315274147917 +444 1 training.batch_size 0.0 +444 1 training.label_smoothing 0.004825809248763494 +444 2 model.embedding_dim 1.0 +444 2 optimizer.lr 0.0018028812922812024 +444 2 training.batch_size 0.0 +444 2 training.label_smoothing 0.002306544004872167 +444 1 dataset """wn18rr""" +444 1 model """ntn""" +444 1 loss """crossentropy""" +444 1 regularizer """no""" +444 1 optimizer """adam""" +444 1 training_loop """lcwa""" +444 1 evaluator """rankbased""" +444 2 dataset """wn18rr""" +444 2 model """ntn""" +444 2 loss """crossentropy""" +444 2 regularizer """no""" +444 2 optimizer """adam""" +444 2 training_loop """lcwa""" +444 2 evaluator """rankbased""" +445 1 model.embedding_dim 2.0 +445 1 loss.margin 5.257106976263697 +445 1 optimizer.lr 0.001167683417759167 +445 1 negative_sampler.num_negs_per_pos 89.0 +445 1 training.batch_size 0.0 +445 2 model.embedding_dim 2.0 +445 2 loss.margin 1.9414359083453578 +445 2 optimizer.lr 0.034268074847776754 +445 2 negative_sampler.num_negs_per_pos 38.0 +445 2 training.batch_size 1.0 +445 3 model.embedding_dim 0.0 +445 3 loss.margin 3.0859813472592483 +445 3 optimizer.lr 0.002785651947867109 +445 3 negative_sampler.num_negs_per_pos 7.0 +445 3 training.batch_size 1.0 +445 4 model.embedding_dim 1.0 +445 4 loss.margin 3.179093256714526 +445 4 optimizer.lr 0.004003020960598492 +445 4 negative_sampler.num_negs_per_pos 81.0 +445 4 training.batch_size 0.0 +445 5 model.embedding_dim 2.0 +445 5 loss.margin 2.6467859131724745 +445 5 optimizer.lr 0.007939845780524113 +445 5 negative_sampler.num_negs_per_pos 79.0 +445 5 training.batch_size 2.0 +445 1 dataset """wn18rr""" +445 1 model """ntn""" +445 1 loss """marginranking""" +445 1 regularizer """no""" +445 1 optimizer """adam""" +445 1 training_loop """owa""" +445 1 negative_sampler """basic""" +445 1 evaluator """rankbased""" +445 2 dataset """wn18rr""" +445 2 model """ntn""" +445 2 loss """marginranking""" +445 2 regularizer """no""" +445 2 optimizer """adam""" +445 2 training_loop """owa""" +445 2 negative_sampler """basic""" +445 2 evaluator """rankbased""" +445 3 dataset """wn18rr""" +445 3 model """ntn""" +445 3 loss """marginranking""" +445 3 regularizer """no""" +445 3 optimizer """adam""" +445 3 training_loop """owa""" +445 3 negative_sampler """basic""" +445 3 evaluator """rankbased""" +445 4 dataset """wn18rr""" +445 4 model """ntn""" +445 4 loss """marginranking""" +445 4 regularizer """no""" +445 4 optimizer """adam""" +445 4 training_loop """owa""" +445 4 negative_sampler """basic""" +445 4 evaluator """rankbased""" +445 5 dataset """wn18rr""" +445 5 model """ntn""" +445 5 loss """marginranking""" +445 5 regularizer """no""" +445 5 optimizer """adam""" +445 5 training_loop """owa""" +445 5 negative_sampler """basic""" +445 5 evaluator """rankbased""" +446 1 model.embedding_dim 0.0 +446 1 loss.margin 2.3193588522288966 +446 1 optimizer.lr 0.03693277006683474 +446 1 negative_sampler.num_negs_per_pos 74.0 +446 1 training.batch_size 1.0 +446 2 model.embedding_dim 2.0 +446 2 loss.margin 7.6269131412848585 +446 2 optimizer.lr 0.0027600886011834745 +446 2 negative_sampler.num_negs_per_pos 32.0 +446 2 training.batch_size 2.0 +446 3 model.embedding_dim 0.0 +446 3 loss.margin 2.6675189241500252 +446 3 optimizer.lr 0.057697802311128144 +446 3 negative_sampler.num_negs_per_pos 58.0 +446 3 training.batch_size 1.0 +446 4 model.embedding_dim 1.0 +446 4 loss.margin 8.908397808748555 +446 4 optimizer.lr 0.005677369523994128 +446 4 negative_sampler.num_negs_per_pos 29.0 +446 4 training.batch_size 1.0 +446 5 model.embedding_dim 2.0 +446 5 loss.margin 9.012395326569722 +446 5 optimizer.lr 0.003778340769333125 +446 5 negative_sampler.num_negs_per_pos 89.0 +446 5 training.batch_size 1.0 +446 6 model.embedding_dim 2.0 +446 6 loss.margin 9.698963405889472 +446 6 optimizer.lr 0.002080754148242432 +446 6 negative_sampler.num_negs_per_pos 44.0 +446 6 training.batch_size 1.0 +446 7 model.embedding_dim 0.0 +446 7 loss.margin 6.507687049329795 +446 7 optimizer.lr 0.0025140516277781604 +446 7 negative_sampler.num_negs_per_pos 92.0 +446 7 training.batch_size 1.0 +446 8 model.embedding_dim 0.0 +446 8 loss.margin 7.41457428698561 +446 8 optimizer.lr 0.04633267471367877 +446 8 negative_sampler.num_negs_per_pos 21.0 +446 8 training.batch_size 2.0 +446 9 model.embedding_dim 2.0 +446 9 loss.margin 6.428446961063821 +446 9 optimizer.lr 0.0075670537710680035 +446 9 negative_sampler.num_negs_per_pos 17.0 +446 9 training.batch_size 0.0 +446 10 model.embedding_dim 2.0 +446 10 loss.margin 8.671781152491286 +446 10 optimizer.lr 0.06745655381757153 +446 10 negative_sampler.num_negs_per_pos 46.0 +446 10 training.batch_size 1.0 +446 11 model.embedding_dim 1.0 +446 11 loss.margin 9.847407915961245 +446 11 optimizer.lr 0.09992565616400742 +446 11 negative_sampler.num_negs_per_pos 36.0 +446 11 training.batch_size 2.0 +446 12 model.embedding_dim 1.0 +446 12 loss.margin 6.81451219116807 +446 12 optimizer.lr 0.0495553155276586 +446 12 negative_sampler.num_negs_per_pos 30.0 +446 12 training.batch_size 2.0 +446 1 dataset """wn18rr""" +446 1 model """ntn""" +446 1 loss """marginranking""" +446 1 regularizer """no""" +446 1 optimizer """adam""" +446 1 training_loop """owa""" +446 1 negative_sampler """basic""" +446 1 evaluator """rankbased""" +446 2 dataset """wn18rr""" +446 2 model """ntn""" +446 2 loss """marginranking""" +446 2 regularizer """no""" +446 2 optimizer """adam""" +446 2 training_loop """owa""" +446 2 negative_sampler """basic""" +446 2 evaluator """rankbased""" +446 3 dataset """wn18rr""" +446 3 model """ntn""" +446 3 loss """marginranking""" +446 3 regularizer """no""" +446 3 optimizer """adam""" +446 3 training_loop """owa""" +446 3 negative_sampler """basic""" +446 3 evaluator """rankbased""" +446 4 dataset """wn18rr""" +446 4 model """ntn""" +446 4 loss """marginranking""" +446 4 regularizer """no""" +446 4 optimizer """adam""" +446 4 training_loop """owa""" +446 4 negative_sampler """basic""" +446 4 evaluator """rankbased""" +446 5 dataset """wn18rr""" +446 5 model """ntn""" +446 5 loss """marginranking""" +446 5 regularizer """no""" +446 5 optimizer """adam""" +446 5 training_loop """owa""" +446 5 negative_sampler """basic""" +446 5 evaluator """rankbased""" +446 6 dataset """wn18rr""" +446 6 model """ntn""" +446 6 loss """marginranking""" +446 6 regularizer """no""" +446 6 optimizer """adam""" +446 6 training_loop """owa""" +446 6 negative_sampler """basic""" +446 6 evaluator """rankbased""" +446 7 dataset """wn18rr""" +446 7 model """ntn""" +446 7 loss """marginranking""" +446 7 regularizer """no""" +446 7 optimizer """adam""" +446 7 training_loop """owa""" +446 7 negative_sampler """basic""" +446 7 evaluator """rankbased""" +446 8 dataset """wn18rr""" +446 8 model """ntn""" +446 8 loss """marginranking""" +446 8 regularizer """no""" +446 8 optimizer """adam""" +446 8 training_loop """owa""" +446 8 negative_sampler """basic""" +446 8 evaluator """rankbased""" +446 9 dataset """wn18rr""" +446 9 model """ntn""" +446 9 loss """marginranking""" +446 9 regularizer """no""" +446 9 optimizer """adam""" +446 9 training_loop """owa""" +446 9 negative_sampler """basic""" +446 9 evaluator """rankbased""" +446 10 dataset """wn18rr""" +446 10 model """ntn""" +446 10 loss """marginranking""" +446 10 regularizer """no""" +446 10 optimizer """adam""" +446 10 training_loop """owa""" +446 10 negative_sampler """basic""" +446 10 evaluator """rankbased""" +446 11 dataset """wn18rr""" +446 11 model """ntn""" +446 11 loss """marginranking""" +446 11 regularizer """no""" +446 11 optimizer """adam""" +446 11 training_loop """owa""" +446 11 negative_sampler """basic""" +446 11 evaluator """rankbased""" +446 12 dataset """wn18rr""" +446 12 model """ntn""" +446 12 loss """marginranking""" +446 12 regularizer """no""" +446 12 optimizer """adam""" +446 12 training_loop """owa""" +446 12 negative_sampler """basic""" +446 12 evaluator """rankbased""" +447 1 model.embedding_dim 0.0 +447 1 optimizer.lr 0.03560260291801175 +447 1 negative_sampler.num_negs_per_pos 6.0 +447 1 training.batch_size 2.0 +447 2 model.embedding_dim 0.0 +447 2 optimizer.lr 0.0024663929267788056 +447 2 negative_sampler.num_negs_per_pos 90.0 +447 2 training.batch_size 0.0 +447 3 model.embedding_dim 1.0 +447 3 optimizer.lr 0.016272026387732084 +447 3 negative_sampler.num_negs_per_pos 42.0 +447 3 training.batch_size 2.0 +447 4 model.embedding_dim 1.0 +447 4 optimizer.lr 0.0036725171359799956 +447 4 negative_sampler.num_negs_per_pos 67.0 +447 4 training.batch_size 0.0 +447 5 model.embedding_dim 2.0 +447 5 optimizer.lr 0.04554475171147684 +447 5 negative_sampler.num_negs_per_pos 56.0 +447 5 training.batch_size 2.0 +447 6 model.embedding_dim 0.0 +447 6 optimizer.lr 0.0011468565820080974 +447 6 negative_sampler.num_negs_per_pos 80.0 +447 6 training.batch_size 2.0 +447 7 model.embedding_dim 0.0 +447 7 optimizer.lr 0.002219602855224912 +447 7 negative_sampler.num_negs_per_pos 22.0 +447 7 training.batch_size 1.0 +447 8 model.embedding_dim 2.0 +447 8 optimizer.lr 0.024102085810121115 +447 8 negative_sampler.num_negs_per_pos 25.0 +447 8 training.batch_size 1.0 +447 9 model.embedding_dim 2.0 +447 9 optimizer.lr 0.0028855935672233374 +447 9 negative_sampler.num_negs_per_pos 12.0 +447 9 training.batch_size 1.0 +447 10 model.embedding_dim 1.0 +447 10 optimizer.lr 0.0019367476106670369 +447 10 negative_sampler.num_negs_per_pos 58.0 +447 10 training.batch_size 2.0 +447 11 model.embedding_dim 2.0 +447 11 optimizer.lr 0.001174067884039618 +447 11 negative_sampler.num_negs_per_pos 10.0 +447 11 training.batch_size 2.0 +447 12 model.embedding_dim 0.0 +447 12 optimizer.lr 0.01073992526619883 +447 12 negative_sampler.num_negs_per_pos 43.0 +447 12 training.batch_size 1.0 +447 13 model.embedding_dim 0.0 +447 13 optimizer.lr 0.0012745830766471911 +447 13 negative_sampler.num_negs_per_pos 95.0 +447 13 training.batch_size 1.0 +447 14 model.embedding_dim 1.0 +447 14 optimizer.lr 0.06270494113891296 +447 14 negative_sampler.num_negs_per_pos 13.0 +447 14 training.batch_size 0.0 +447 15 model.embedding_dim 2.0 +447 15 optimizer.lr 0.01175962434234869 +447 15 negative_sampler.num_negs_per_pos 4.0 +447 15 training.batch_size 0.0 +447 16 model.embedding_dim 2.0 +447 16 optimizer.lr 0.021276669161387797 +447 16 negative_sampler.num_negs_per_pos 53.0 +447 16 training.batch_size 2.0 +447 17 model.embedding_dim 1.0 +447 17 optimizer.lr 0.0025117390144003275 +447 17 negative_sampler.num_negs_per_pos 77.0 +447 17 training.batch_size 1.0 +447 18 model.embedding_dim 2.0 +447 18 optimizer.lr 0.008984293252736008 +447 18 negative_sampler.num_negs_per_pos 92.0 +447 18 training.batch_size 1.0 +447 1 dataset """wn18rr""" +447 1 model """ntn""" +447 1 loss """softplus""" +447 1 regularizer """no""" +447 1 optimizer """adam""" +447 1 training_loop """owa""" +447 1 negative_sampler """basic""" +447 1 evaluator """rankbased""" +447 2 dataset """wn18rr""" +447 2 model """ntn""" +447 2 loss """softplus""" +447 2 regularizer """no""" +447 2 optimizer """adam""" +447 2 training_loop """owa""" +447 2 negative_sampler """basic""" +447 2 evaluator """rankbased""" +447 3 dataset """wn18rr""" +447 3 model """ntn""" +447 3 loss """softplus""" +447 3 regularizer """no""" +447 3 optimizer """adam""" +447 3 training_loop """owa""" +447 3 negative_sampler """basic""" +447 3 evaluator """rankbased""" +447 4 dataset """wn18rr""" +447 4 model """ntn""" +447 4 loss """softplus""" +447 4 regularizer """no""" +447 4 optimizer """adam""" +447 4 training_loop """owa""" +447 4 negative_sampler """basic""" +447 4 evaluator """rankbased""" +447 5 dataset """wn18rr""" +447 5 model """ntn""" +447 5 loss """softplus""" +447 5 regularizer """no""" +447 5 optimizer """adam""" +447 5 training_loop """owa""" +447 5 negative_sampler """basic""" +447 5 evaluator """rankbased""" +447 6 dataset """wn18rr""" +447 6 model """ntn""" +447 6 loss """softplus""" +447 6 regularizer """no""" +447 6 optimizer """adam""" +447 6 training_loop """owa""" +447 6 negative_sampler """basic""" +447 6 evaluator """rankbased""" +447 7 dataset """wn18rr""" +447 7 model """ntn""" +447 7 loss """softplus""" +447 7 regularizer """no""" +447 7 optimizer """adam""" +447 7 training_loop """owa""" +447 7 negative_sampler """basic""" +447 7 evaluator """rankbased""" +447 8 dataset """wn18rr""" +447 8 model """ntn""" +447 8 loss """softplus""" +447 8 regularizer """no""" +447 8 optimizer """adam""" +447 8 training_loop """owa""" +447 8 negative_sampler """basic""" +447 8 evaluator """rankbased""" +447 9 dataset """wn18rr""" +447 9 model """ntn""" +447 9 loss """softplus""" +447 9 regularizer """no""" +447 9 optimizer """adam""" +447 9 training_loop """owa""" +447 9 negative_sampler """basic""" +447 9 evaluator """rankbased""" +447 10 dataset """wn18rr""" +447 10 model """ntn""" +447 10 loss """softplus""" +447 10 regularizer """no""" +447 10 optimizer """adam""" +447 10 training_loop """owa""" +447 10 negative_sampler """basic""" +447 10 evaluator """rankbased""" +447 11 dataset """wn18rr""" +447 11 model """ntn""" +447 11 loss """softplus""" +447 11 regularizer """no""" +447 11 optimizer """adam""" +447 11 training_loop """owa""" +447 11 negative_sampler """basic""" +447 11 evaluator """rankbased""" +447 12 dataset """wn18rr""" +447 12 model """ntn""" +447 12 loss """softplus""" +447 12 regularizer """no""" +447 12 optimizer """adam""" +447 12 training_loop """owa""" +447 12 negative_sampler """basic""" +447 12 evaluator """rankbased""" +447 13 dataset """wn18rr""" +447 13 model """ntn""" +447 13 loss """softplus""" +447 13 regularizer """no""" +447 13 optimizer """adam""" +447 13 training_loop """owa""" +447 13 negative_sampler """basic""" +447 13 evaluator """rankbased""" +447 14 dataset """wn18rr""" +447 14 model """ntn""" +447 14 loss """softplus""" +447 14 regularizer """no""" +447 14 optimizer """adam""" +447 14 training_loop """owa""" +447 14 negative_sampler """basic""" +447 14 evaluator """rankbased""" +447 15 dataset """wn18rr""" +447 15 model """ntn""" +447 15 loss """softplus""" +447 15 regularizer """no""" +447 15 optimizer """adam""" +447 15 training_loop """owa""" +447 15 negative_sampler """basic""" +447 15 evaluator """rankbased""" +447 16 dataset """wn18rr""" +447 16 model """ntn""" +447 16 loss """softplus""" +447 16 regularizer """no""" +447 16 optimizer """adam""" +447 16 training_loop """owa""" +447 16 negative_sampler """basic""" +447 16 evaluator """rankbased""" +447 17 dataset """wn18rr""" +447 17 model """ntn""" +447 17 loss """softplus""" +447 17 regularizer """no""" +447 17 optimizer """adam""" +447 17 training_loop """owa""" +447 17 negative_sampler """basic""" +447 17 evaluator """rankbased""" +447 18 dataset """wn18rr""" +447 18 model """ntn""" +447 18 loss """softplus""" +447 18 regularizer """no""" +447 18 optimizer """adam""" +447 18 training_loop """owa""" +447 18 negative_sampler """basic""" +447 18 evaluator """rankbased""" +448 1 model.embedding_dim 2.0 +448 1 optimizer.lr 0.0021429900583429333 +448 1 negative_sampler.num_negs_per_pos 56.0 +448 1 training.batch_size 0.0 +448 2 model.embedding_dim 2.0 +448 2 optimizer.lr 0.05119632671312163 +448 2 negative_sampler.num_negs_per_pos 89.0 +448 2 training.batch_size 1.0 +448 3 model.embedding_dim 0.0 +448 3 optimizer.lr 0.004804720671274641 +448 3 negative_sampler.num_negs_per_pos 76.0 +448 3 training.batch_size 1.0 +448 4 model.embedding_dim 1.0 +448 4 optimizer.lr 0.00662523390298047 +448 4 negative_sampler.num_negs_per_pos 37.0 +448 4 training.batch_size 1.0 +448 5 model.embedding_dim 1.0 +448 5 optimizer.lr 0.044612138487056106 +448 5 negative_sampler.num_negs_per_pos 60.0 +448 5 training.batch_size 1.0 +448 6 model.embedding_dim 2.0 +448 6 optimizer.lr 0.002030641198551522 +448 6 negative_sampler.num_negs_per_pos 68.0 +448 6 training.batch_size 0.0 +448 7 model.embedding_dim 1.0 +448 7 optimizer.lr 0.038223613184887796 +448 7 negative_sampler.num_negs_per_pos 9.0 +448 7 training.batch_size 0.0 +448 8 model.embedding_dim 0.0 +448 8 optimizer.lr 0.0029365177260247464 +448 8 negative_sampler.num_negs_per_pos 2.0 +448 8 training.batch_size 1.0 +448 9 model.embedding_dim 2.0 +448 9 optimizer.lr 0.006070661342922042 +448 9 negative_sampler.num_negs_per_pos 74.0 +448 9 training.batch_size 2.0 +448 10 model.embedding_dim 0.0 +448 10 optimizer.lr 0.009859037548699517 +448 10 negative_sampler.num_negs_per_pos 15.0 +448 10 training.batch_size 0.0 +448 11 model.embedding_dim 1.0 +448 11 optimizer.lr 0.03858300618357074 +448 11 negative_sampler.num_negs_per_pos 98.0 +448 11 training.batch_size 0.0 +448 12 model.embedding_dim 0.0 +448 12 optimizer.lr 0.004532880489749018 +448 12 negative_sampler.num_negs_per_pos 16.0 +448 12 training.batch_size 0.0 +448 13 model.embedding_dim 1.0 +448 13 optimizer.lr 0.08379261025908248 +448 13 negative_sampler.num_negs_per_pos 28.0 +448 13 training.batch_size 2.0 +448 14 model.embedding_dim 2.0 +448 14 optimizer.lr 0.0015273136105529222 +448 14 negative_sampler.num_negs_per_pos 94.0 +448 14 training.batch_size 2.0 +448 15 model.embedding_dim 2.0 +448 15 optimizer.lr 0.029283096852386552 +448 15 negative_sampler.num_negs_per_pos 23.0 +448 15 training.batch_size 0.0 +448 16 model.embedding_dim 1.0 +448 16 optimizer.lr 0.010358285371705146 +448 16 negative_sampler.num_negs_per_pos 55.0 +448 16 training.batch_size 2.0 +448 17 model.embedding_dim 1.0 +448 17 optimizer.lr 0.005509794523374556 +448 17 negative_sampler.num_negs_per_pos 68.0 +448 17 training.batch_size 2.0 +448 1 dataset """wn18rr""" +448 1 model """ntn""" +448 1 loss """softplus""" +448 1 regularizer """no""" +448 1 optimizer """adam""" +448 1 training_loop """owa""" +448 1 negative_sampler """basic""" +448 1 evaluator """rankbased""" +448 2 dataset """wn18rr""" +448 2 model """ntn""" +448 2 loss """softplus""" +448 2 regularizer """no""" +448 2 optimizer """adam""" +448 2 training_loop """owa""" +448 2 negative_sampler """basic""" +448 2 evaluator """rankbased""" +448 3 dataset """wn18rr""" +448 3 model """ntn""" +448 3 loss """softplus""" +448 3 regularizer """no""" +448 3 optimizer """adam""" +448 3 training_loop """owa""" +448 3 negative_sampler """basic""" +448 3 evaluator """rankbased""" +448 4 dataset """wn18rr""" +448 4 model """ntn""" +448 4 loss """softplus""" +448 4 regularizer """no""" +448 4 optimizer """adam""" +448 4 training_loop """owa""" +448 4 negative_sampler """basic""" +448 4 evaluator """rankbased""" +448 5 dataset """wn18rr""" +448 5 model """ntn""" +448 5 loss """softplus""" +448 5 regularizer """no""" +448 5 optimizer """adam""" +448 5 training_loop """owa""" +448 5 negative_sampler """basic""" +448 5 evaluator """rankbased""" +448 6 dataset """wn18rr""" +448 6 model """ntn""" +448 6 loss """softplus""" +448 6 regularizer """no""" +448 6 optimizer """adam""" +448 6 training_loop """owa""" +448 6 negative_sampler """basic""" +448 6 evaluator """rankbased""" +448 7 dataset """wn18rr""" +448 7 model """ntn""" +448 7 loss """softplus""" +448 7 regularizer """no""" +448 7 optimizer """adam""" +448 7 training_loop """owa""" +448 7 negative_sampler """basic""" +448 7 evaluator """rankbased""" +448 8 dataset """wn18rr""" +448 8 model """ntn""" +448 8 loss """softplus""" +448 8 regularizer """no""" +448 8 optimizer """adam""" +448 8 training_loop """owa""" +448 8 negative_sampler """basic""" +448 8 evaluator """rankbased""" +448 9 dataset """wn18rr""" +448 9 model """ntn""" +448 9 loss """softplus""" +448 9 regularizer """no""" +448 9 optimizer """adam""" +448 9 training_loop """owa""" +448 9 negative_sampler """basic""" +448 9 evaluator """rankbased""" +448 10 dataset """wn18rr""" +448 10 model """ntn""" +448 10 loss """softplus""" +448 10 regularizer """no""" +448 10 optimizer """adam""" +448 10 training_loop """owa""" +448 10 negative_sampler """basic""" +448 10 evaluator """rankbased""" +448 11 dataset """wn18rr""" +448 11 model """ntn""" +448 11 loss """softplus""" +448 11 regularizer """no""" +448 11 optimizer """adam""" +448 11 training_loop """owa""" +448 11 negative_sampler """basic""" +448 11 evaluator """rankbased""" +448 12 dataset """wn18rr""" +448 12 model """ntn""" +448 12 loss """softplus""" +448 12 regularizer """no""" +448 12 optimizer """adam""" +448 12 training_loop """owa""" +448 12 negative_sampler """basic""" +448 12 evaluator """rankbased""" +448 13 dataset """wn18rr""" +448 13 model """ntn""" +448 13 loss """softplus""" +448 13 regularizer """no""" +448 13 optimizer """adam""" +448 13 training_loop """owa""" +448 13 negative_sampler """basic""" +448 13 evaluator """rankbased""" +448 14 dataset """wn18rr""" +448 14 model """ntn""" +448 14 loss """softplus""" +448 14 regularizer """no""" +448 14 optimizer """adam""" +448 14 training_loop """owa""" +448 14 negative_sampler """basic""" +448 14 evaluator """rankbased""" +448 15 dataset """wn18rr""" +448 15 model """ntn""" +448 15 loss """softplus""" +448 15 regularizer """no""" +448 15 optimizer """adam""" +448 15 training_loop """owa""" +448 15 negative_sampler """basic""" +448 15 evaluator """rankbased""" +448 16 dataset """wn18rr""" +448 16 model """ntn""" +448 16 loss """softplus""" +448 16 regularizer """no""" +448 16 optimizer """adam""" +448 16 training_loop """owa""" +448 16 negative_sampler """basic""" +448 16 evaluator """rankbased""" +448 17 dataset """wn18rr""" +448 17 model """ntn""" +448 17 loss """softplus""" +448 17 regularizer """no""" +448 17 optimizer """adam""" +448 17 training_loop """owa""" +448 17 negative_sampler """basic""" +448 17 evaluator """rankbased""" +449 1 model.embedding_dim 1.0 +449 1 optimizer.lr 0.0880209775745498 +449 1 negative_sampler.num_negs_per_pos 63.0 +449 1 training.batch_size 2.0 +449 2 model.embedding_dim 2.0 +449 2 optimizer.lr 0.01196308441729141 +449 2 negative_sampler.num_negs_per_pos 24.0 +449 2 training.batch_size 1.0 +449 3 model.embedding_dim 1.0 +449 3 optimizer.lr 0.009316400104695059 +449 3 negative_sampler.num_negs_per_pos 30.0 +449 3 training.batch_size 0.0 +449 4 model.embedding_dim 1.0 +449 4 optimizer.lr 0.006804366239553761 +449 4 negative_sampler.num_negs_per_pos 80.0 +449 4 training.batch_size 0.0 +449 5 model.embedding_dim 0.0 +449 5 optimizer.lr 0.0327600493819715 +449 5 negative_sampler.num_negs_per_pos 7.0 +449 5 training.batch_size 2.0 +449 6 model.embedding_dim 1.0 +449 6 optimizer.lr 0.005976082876287862 +449 6 negative_sampler.num_negs_per_pos 75.0 +449 6 training.batch_size 2.0 +449 7 model.embedding_dim 1.0 +449 7 optimizer.lr 0.04482777151717181 +449 7 negative_sampler.num_negs_per_pos 57.0 +449 7 training.batch_size 1.0 +449 8 model.embedding_dim 0.0 +449 8 optimizer.lr 0.01436104588811202 +449 8 negative_sampler.num_negs_per_pos 57.0 +449 8 training.batch_size 0.0 +449 9 model.embedding_dim 0.0 +449 9 optimizer.lr 0.002860423869497935 +449 9 negative_sampler.num_negs_per_pos 11.0 +449 9 training.batch_size 1.0 +449 10 model.embedding_dim 1.0 +449 10 optimizer.lr 0.0017106426118429863 +449 10 negative_sampler.num_negs_per_pos 92.0 +449 10 training.batch_size 2.0 +449 11 model.embedding_dim 2.0 +449 11 optimizer.lr 0.00803447397827124 +449 11 negative_sampler.num_negs_per_pos 49.0 +449 11 training.batch_size 1.0 +449 12 model.embedding_dim 0.0 +449 12 optimizer.lr 0.003223737537224281 +449 12 negative_sampler.num_negs_per_pos 60.0 +449 12 training.batch_size 2.0 +449 13 model.embedding_dim 0.0 +449 13 optimizer.lr 0.0281714584215224 +449 13 negative_sampler.num_negs_per_pos 25.0 +449 13 training.batch_size 0.0 +449 14 model.embedding_dim 1.0 +449 14 optimizer.lr 0.004915099884888788 +449 14 negative_sampler.num_negs_per_pos 28.0 +449 14 training.batch_size 0.0 +449 15 model.embedding_dim 0.0 +449 15 optimizer.lr 0.004241093974292776 +449 15 negative_sampler.num_negs_per_pos 74.0 +449 15 training.batch_size 2.0 +449 16 model.embedding_dim 1.0 +449 16 optimizer.lr 0.012780904841375555 +449 16 negative_sampler.num_negs_per_pos 21.0 +449 16 training.batch_size 1.0 +449 17 model.embedding_dim 1.0 +449 17 optimizer.lr 0.003767558887558387 +449 17 negative_sampler.num_negs_per_pos 11.0 +449 17 training.batch_size 1.0 +449 18 model.embedding_dim 0.0 +449 18 optimizer.lr 0.002133758478033831 +449 18 negative_sampler.num_negs_per_pos 70.0 +449 18 training.batch_size 1.0 +449 19 model.embedding_dim 1.0 +449 19 optimizer.lr 0.03427648430068352 +449 19 negative_sampler.num_negs_per_pos 97.0 +449 19 training.batch_size 2.0 +449 20 model.embedding_dim 1.0 +449 20 optimizer.lr 0.013808845998469535 +449 20 negative_sampler.num_negs_per_pos 54.0 +449 20 training.batch_size 2.0 +449 1 dataset """wn18rr""" +449 1 model """ntn""" +449 1 loss """bceaftersigmoid""" +449 1 regularizer """no""" +449 1 optimizer """adam""" +449 1 training_loop """owa""" +449 1 negative_sampler """basic""" +449 1 evaluator """rankbased""" +449 2 dataset """wn18rr""" +449 2 model """ntn""" +449 2 loss """bceaftersigmoid""" +449 2 regularizer """no""" +449 2 optimizer """adam""" +449 2 training_loop """owa""" +449 2 negative_sampler """basic""" +449 2 evaluator """rankbased""" +449 3 dataset """wn18rr""" +449 3 model """ntn""" +449 3 loss """bceaftersigmoid""" +449 3 regularizer """no""" +449 3 optimizer """adam""" +449 3 training_loop """owa""" +449 3 negative_sampler """basic""" +449 3 evaluator """rankbased""" +449 4 dataset """wn18rr""" +449 4 model """ntn""" +449 4 loss """bceaftersigmoid""" +449 4 regularizer """no""" +449 4 optimizer """adam""" +449 4 training_loop """owa""" +449 4 negative_sampler """basic""" +449 4 evaluator """rankbased""" +449 5 dataset """wn18rr""" +449 5 model """ntn""" +449 5 loss """bceaftersigmoid""" +449 5 regularizer """no""" +449 5 optimizer """adam""" +449 5 training_loop """owa""" +449 5 negative_sampler """basic""" +449 5 evaluator """rankbased""" +449 6 dataset """wn18rr""" +449 6 model """ntn""" +449 6 loss """bceaftersigmoid""" +449 6 regularizer """no""" +449 6 optimizer """adam""" +449 6 training_loop """owa""" +449 6 negative_sampler """basic""" +449 6 evaluator """rankbased""" +449 7 dataset """wn18rr""" +449 7 model """ntn""" +449 7 loss """bceaftersigmoid""" +449 7 regularizer """no""" +449 7 optimizer """adam""" +449 7 training_loop """owa""" +449 7 negative_sampler """basic""" +449 7 evaluator """rankbased""" +449 8 dataset """wn18rr""" +449 8 model """ntn""" +449 8 loss """bceaftersigmoid""" +449 8 regularizer """no""" +449 8 optimizer """adam""" +449 8 training_loop """owa""" +449 8 negative_sampler """basic""" +449 8 evaluator """rankbased""" +449 9 dataset """wn18rr""" +449 9 model """ntn""" +449 9 loss """bceaftersigmoid""" +449 9 regularizer """no""" +449 9 optimizer """adam""" +449 9 training_loop """owa""" +449 9 negative_sampler """basic""" +449 9 evaluator """rankbased""" +449 10 dataset """wn18rr""" +449 10 model """ntn""" +449 10 loss """bceaftersigmoid""" +449 10 regularizer """no""" +449 10 optimizer """adam""" +449 10 training_loop """owa""" +449 10 negative_sampler """basic""" +449 10 evaluator """rankbased""" +449 11 dataset """wn18rr""" +449 11 model """ntn""" +449 11 loss """bceaftersigmoid""" +449 11 regularizer """no""" +449 11 optimizer """adam""" +449 11 training_loop """owa""" +449 11 negative_sampler """basic""" +449 11 evaluator """rankbased""" +449 12 dataset """wn18rr""" +449 12 model """ntn""" +449 12 loss """bceaftersigmoid""" +449 12 regularizer """no""" +449 12 optimizer """adam""" +449 12 training_loop """owa""" +449 12 negative_sampler """basic""" +449 12 evaluator """rankbased""" +449 13 dataset """wn18rr""" +449 13 model """ntn""" +449 13 loss """bceaftersigmoid""" +449 13 regularizer """no""" +449 13 optimizer """adam""" +449 13 training_loop """owa""" +449 13 negative_sampler """basic""" +449 13 evaluator """rankbased""" +449 14 dataset """wn18rr""" +449 14 model """ntn""" +449 14 loss """bceaftersigmoid""" +449 14 regularizer """no""" +449 14 optimizer """adam""" +449 14 training_loop """owa""" +449 14 negative_sampler """basic""" +449 14 evaluator """rankbased""" +449 15 dataset """wn18rr""" +449 15 model """ntn""" +449 15 loss """bceaftersigmoid""" +449 15 regularizer """no""" +449 15 optimizer """adam""" +449 15 training_loop """owa""" +449 15 negative_sampler """basic""" +449 15 evaluator """rankbased""" +449 16 dataset """wn18rr""" +449 16 model """ntn""" +449 16 loss """bceaftersigmoid""" +449 16 regularizer """no""" +449 16 optimizer """adam""" +449 16 training_loop """owa""" +449 16 negative_sampler """basic""" +449 16 evaluator """rankbased""" +449 17 dataset """wn18rr""" +449 17 model """ntn""" +449 17 loss """bceaftersigmoid""" +449 17 regularizer """no""" +449 17 optimizer """adam""" +449 17 training_loop """owa""" +449 17 negative_sampler """basic""" +449 17 evaluator """rankbased""" +449 18 dataset """wn18rr""" +449 18 model """ntn""" +449 18 loss """bceaftersigmoid""" +449 18 regularizer """no""" +449 18 optimizer """adam""" +449 18 training_loop """owa""" +449 18 negative_sampler """basic""" +449 18 evaluator """rankbased""" +449 19 dataset """wn18rr""" +449 19 model """ntn""" +449 19 loss """bceaftersigmoid""" +449 19 regularizer """no""" +449 19 optimizer """adam""" +449 19 training_loop """owa""" +449 19 negative_sampler """basic""" +449 19 evaluator """rankbased""" +449 20 dataset """wn18rr""" +449 20 model """ntn""" +449 20 loss """bceaftersigmoid""" +449 20 regularizer """no""" +449 20 optimizer """adam""" +449 20 training_loop """owa""" +449 20 negative_sampler """basic""" +449 20 evaluator """rankbased""" +450 1 model.embedding_dim 0.0 +450 1 optimizer.lr 0.0017934049818535299 +450 1 negative_sampler.num_negs_per_pos 27.0 +450 1 training.batch_size 0.0 +450 2 model.embedding_dim 1.0 +450 2 optimizer.lr 0.023297409789939036 +450 2 negative_sampler.num_negs_per_pos 94.0 +450 2 training.batch_size 2.0 +450 3 model.embedding_dim 0.0 +450 3 optimizer.lr 0.0012604881374971477 +450 3 negative_sampler.num_negs_per_pos 34.0 +450 3 training.batch_size 0.0 +450 4 model.embedding_dim 0.0 +450 4 optimizer.lr 0.033342763021693145 +450 4 negative_sampler.num_negs_per_pos 8.0 +450 4 training.batch_size 0.0 +450 5 model.embedding_dim 0.0 +450 5 optimizer.lr 0.01394213617336676 +450 5 negative_sampler.num_negs_per_pos 22.0 +450 5 training.batch_size 2.0 +450 6 model.embedding_dim 0.0 +450 6 optimizer.lr 0.00813676959325877 +450 6 negative_sampler.num_negs_per_pos 34.0 +450 6 training.batch_size 2.0 +450 7 model.embedding_dim 1.0 +450 7 optimizer.lr 0.030666191262955746 +450 7 negative_sampler.num_negs_per_pos 45.0 +450 7 training.batch_size 2.0 +450 8 model.embedding_dim 0.0 +450 8 optimizer.lr 0.015492655126965058 +450 8 negative_sampler.num_negs_per_pos 51.0 +450 8 training.batch_size 1.0 +450 9 model.embedding_dim 2.0 +450 9 optimizer.lr 0.003657803822978076 +450 9 negative_sampler.num_negs_per_pos 73.0 +450 9 training.batch_size 2.0 +450 10 model.embedding_dim 2.0 +450 10 optimizer.lr 0.010257271365683673 +450 10 negative_sampler.num_negs_per_pos 82.0 +450 10 training.batch_size 2.0 +450 11 model.embedding_dim 1.0 +450 11 optimizer.lr 0.032926802402339965 +450 11 negative_sampler.num_negs_per_pos 31.0 +450 11 training.batch_size 0.0 +450 12 model.embedding_dim 0.0 +450 12 optimizer.lr 0.019614480270815273 +450 12 negative_sampler.num_negs_per_pos 17.0 +450 12 training.batch_size 0.0 +450 13 model.embedding_dim 0.0 +450 13 optimizer.lr 0.009235290658398558 +450 13 negative_sampler.num_negs_per_pos 55.0 +450 13 training.batch_size 2.0 +450 14 model.embedding_dim 2.0 +450 14 optimizer.lr 0.014944164313517645 +450 14 negative_sampler.num_negs_per_pos 2.0 +450 14 training.batch_size 1.0 +450 15 model.embedding_dim 2.0 +450 15 optimizer.lr 0.0012363405529806492 +450 15 negative_sampler.num_negs_per_pos 90.0 +450 15 training.batch_size 2.0 +450 16 model.embedding_dim 0.0 +450 16 optimizer.lr 0.002004522630070326 +450 16 negative_sampler.num_negs_per_pos 31.0 +450 16 training.batch_size 2.0 +450 17 model.embedding_dim 1.0 +450 17 optimizer.lr 0.015335427241660364 +450 17 negative_sampler.num_negs_per_pos 40.0 +450 17 training.batch_size 0.0 +450 18 model.embedding_dim 0.0 +450 18 optimizer.lr 0.02752881666458862 +450 18 negative_sampler.num_negs_per_pos 63.0 +450 18 training.batch_size 1.0 +450 19 model.embedding_dim 2.0 +450 19 optimizer.lr 0.005475975201773851 +450 19 negative_sampler.num_negs_per_pos 42.0 +450 19 training.batch_size 1.0 +450 20 model.embedding_dim 1.0 +450 20 optimizer.lr 0.03008477359349639 +450 20 negative_sampler.num_negs_per_pos 32.0 +450 20 training.batch_size 2.0 +450 21 model.embedding_dim 2.0 +450 21 optimizer.lr 0.006830801352100672 +450 21 negative_sampler.num_negs_per_pos 14.0 +450 21 training.batch_size 2.0 +450 22 model.embedding_dim 2.0 +450 22 optimizer.lr 0.04917213758663589 +450 22 negative_sampler.num_negs_per_pos 79.0 +450 22 training.batch_size 0.0 +450 1 dataset """wn18rr""" +450 1 model """ntn""" +450 1 loss """bceaftersigmoid""" +450 1 regularizer """no""" +450 1 optimizer """adam""" +450 1 training_loop """owa""" +450 1 negative_sampler """basic""" +450 1 evaluator """rankbased""" +450 2 dataset """wn18rr""" +450 2 model """ntn""" +450 2 loss """bceaftersigmoid""" +450 2 regularizer """no""" +450 2 optimizer """adam""" +450 2 training_loop """owa""" +450 2 negative_sampler """basic""" +450 2 evaluator """rankbased""" +450 3 dataset """wn18rr""" +450 3 model """ntn""" +450 3 loss """bceaftersigmoid""" +450 3 regularizer """no""" +450 3 optimizer """adam""" +450 3 training_loop """owa""" +450 3 negative_sampler """basic""" +450 3 evaluator """rankbased""" +450 4 dataset """wn18rr""" +450 4 model """ntn""" +450 4 loss """bceaftersigmoid""" +450 4 regularizer """no""" +450 4 optimizer """adam""" +450 4 training_loop """owa""" +450 4 negative_sampler """basic""" +450 4 evaluator """rankbased""" +450 5 dataset """wn18rr""" +450 5 model """ntn""" +450 5 loss """bceaftersigmoid""" +450 5 regularizer """no""" +450 5 optimizer """adam""" +450 5 training_loop """owa""" +450 5 negative_sampler """basic""" +450 5 evaluator """rankbased""" +450 6 dataset """wn18rr""" +450 6 model """ntn""" +450 6 loss """bceaftersigmoid""" +450 6 regularizer """no""" +450 6 optimizer """adam""" +450 6 training_loop """owa""" +450 6 negative_sampler """basic""" +450 6 evaluator """rankbased""" +450 7 dataset """wn18rr""" +450 7 model """ntn""" +450 7 loss """bceaftersigmoid""" +450 7 regularizer """no""" +450 7 optimizer """adam""" +450 7 training_loop """owa""" +450 7 negative_sampler """basic""" +450 7 evaluator """rankbased""" +450 8 dataset """wn18rr""" +450 8 model """ntn""" +450 8 loss """bceaftersigmoid""" +450 8 regularizer """no""" +450 8 optimizer """adam""" +450 8 training_loop """owa""" +450 8 negative_sampler """basic""" +450 8 evaluator """rankbased""" +450 9 dataset """wn18rr""" +450 9 model """ntn""" +450 9 loss """bceaftersigmoid""" +450 9 regularizer """no""" +450 9 optimizer """adam""" +450 9 training_loop """owa""" +450 9 negative_sampler """basic""" +450 9 evaluator """rankbased""" +450 10 dataset """wn18rr""" +450 10 model """ntn""" +450 10 loss """bceaftersigmoid""" +450 10 regularizer """no""" +450 10 optimizer """adam""" +450 10 training_loop """owa""" +450 10 negative_sampler """basic""" +450 10 evaluator """rankbased""" +450 11 dataset """wn18rr""" +450 11 model """ntn""" +450 11 loss """bceaftersigmoid""" +450 11 regularizer """no""" +450 11 optimizer """adam""" +450 11 training_loop """owa""" +450 11 negative_sampler """basic""" +450 11 evaluator """rankbased""" +450 12 dataset """wn18rr""" +450 12 model """ntn""" +450 12 loss """bceaftersigmoid""" +450 12 regularizer """no""" +450 12 optimizer """adam""" +450 12 training_loop """owa""" +450 12 negative_sampler """basic""" +450 12 evaluator """rankbased""" +450 13 dataset """wn18rr""" +450 13 model """ntn""" +450 13 loss """bceaftersigmoid""" +450 13 regularizer """no""" +450 13 optimizer """adam""" +450 13 training_loop """owa""" +450 13 negative_sampler """basic""" +450 13 evaluator """rankbased""" +450 14 dataset """wn18rr""" +450 14 model """ntn""" +450 14 loss """bceaftersigmoid""" +450 14 regularizer """no""" +450 14 optimizer """adam""" +450 14 training_loop """owa""" +450 14 negative_sampler """basic""" +450 14 evaluator """rankbased""" +450 15 dataset """wn18rr""" +450 15 model """ntn""" +450 15 loss """bceaftersigmoid""" +450 15 regularizer """no""" +450 15 optimizer """adam""" +450 15 training_loop """owa""" +450 15 negative_sampler """basic""" +450 15 evaluator """rankbased""" +450 16 dataset """wn18rr""" +450 16 model """ntn""" +450 16 loss """bceaftersigmoid""" +450 16 regularizer """no""" +450 16 optimizer """adam""" +450 16 training_loop """owa""" +450 16 negative_sampler """basic""" +450 16 evaluator """rankbased""" +450 17 dataset """wn18rr""" +450 17 model """ntn""" +450 17 loss """bceaftersigmoid""" +450 17 regularizer """no""" +450 17 optimizer """adam""" +450 17 training_loop """owa""" +450 17 negative_sampler """basic""" +450 17 evaluator """rankbased""" +450 18 dataset """wn18rr""" +450 18 model """ntn""" +450 18 loss """bceaftersigmoid""" +450 18 regularizer """no""" +450 18 optimizer """adam""" +450 18 training_loop """owa""" +450 18 negative_sampler """basic""" +450 18 evaluator """rankbased""" +450 19 dataset """wn18rr""" +450 19 model """ntn""" +450 19 loss """bceaftersigmoid""" +450 19 regularizer """no""" +450 19 optimizer """adam""" +450 19 training_loop """owa""" +450 19 negative_sampler """basic""" +450 19 evaluator """rankbased""" +450 20 dataset """wn18rr""" +450 20 model """ntn""" +450 20 loss """bceaftersigmoid""" +450 20 regularizer """no""" +450 20 optimizer """adam""" +450 20 training_loop """owa""" +450 20 negative_sampler """basic""" +450 20 evaluator """rankbased""" +450 21 dataset """wn18rr""" +450 21 model """ntn""" +450 21 loss """bceaftersigmoid""" +450 21 regularizer """no""" +450 21 optimizer """adam""" +450 21 training_loop """owa""" +450 21 negative_sampler """basic""" +450 21 evaluator """rankbased""" +450 22 dataset """wn18rr""" +450 22 model """ntn""" +450 22 loss """bceaftersigmoid""" +450 22 regularizer """no""" +450 22 optimizer """adam""" +450 22 training_loop """owa""" +450 22 negative_sampler """basic""" +450 22 evaluator """rankbased""" +451 1 model.embedding_dim 2.0 +451 1 optimizer.lr 0.06829962072677412 +451 1 negative_sampler.num_negs_per_pos 6.0 +451 1 training.batch_size 0.0 +451 2 model.embedding_dim 0.0 +451 2 optimizer.lr 0.004364700964968068 +451 2 negative_sampler.num_negs_per_pos 20.0 +451 2 training.batch_size 1.0 +451 3 model.embedding_dim 1.0 +451 3 optimizer.lr 0.0524015471394734 +451 3 negative_sampler.num_negs_per_pos 49.0 +451 3 training.batch_size 1.0 +451 4 model.embedding_dim 2.0 +451 4 optimizer.lr 0.029357501694134015 +451 4 negative_sampler.num_negs_per_pos 81.0 +451 4 training.batch_size 2.0 +451 5 model.embedding_dim 2.0 +451 5 optimizer.lr 0.0028371320776572847 +451 5 negative_sampler.num_negs_per_pos 92.0 +451 5 training.batch_size 0.0 +451 6 model.embedding_dim 1.0 +451 6 optimizer.lr 0.0021732224200827076 +451 6 negative_sampler.num_negs_per_pos 73.0 +451 6 training.batch_size 1.0 +451 7 model.embedding_dim 2.0 +451 7 optimizer.lr 0.0017445416898844521 +451 7 negative_sampler.num_negs_per_pos 92.0 +451 7 training.batch_size 2.0 +451 8 model.embedding_dim 0.0 +451 8 optimizer.lr 0.0017228280291340015 +451 8 negative_sampler.num_negs_per_pos 51.0 +451 8 training.batch_size 1.0 +451 9 model.embedding_dim 0.0 +451 9 optimizer.lr 0.0037252567637814584 +451 9 negative_sampler.num_negs_per_pos 89.0 +451 9 training.batch_size 2.0 +451 10 model.embedding_dim 0.0 +451 10 optimizer.lr 0.07652396716349137 +451 10 negative_sampler.num_negs_per_pos 37.0 +451 10 training.batch_size 1.0 +451 11 model.embedding_dim 1.0 +451 11 optimizer.lr 0.014141165617974873 +451 11 negative_sampler.num_negs_per_pos 2.0 +451 11 training.batch_size 0.0 +451 12 model.embedding_dim 2.0 +451 12 optimizer.lr 0.006075222941412448 +451 12 negative_sampler.num_negs_per_pos 99.0 +451 12 training.batch_size 0.0 +451 13 model.embedding_dim 2.0 +451 13 optimizer.lr 0.007803854338953689 +451 13 negative_sampler.num_negs_per_pos 15.0 +451 13 training.batch_size 0.0 +451 14 model.embedding_dim 1.0 +451 14 optimizer.lr 0.09071680318343169 +451 14 negative_sampler.num_negs_per_pos 26.0 +451 14 training.batch_size 2.0 +451 1 dataset """fb15k237""" +451 1 model """proje""" +451 1 loss """bceaftersigmoid""" +451 1 regularizer """no""" +451 1 optimizer """adam""" +451 1 training_loop """owa""" +451 1 negative_sampler """basic""" +451 1 evaluator """rankbased""" +451 2 dataset """fb15k237""" +451 2 model """proje""" +451 2 loss """bceaftersigmoid""" +451 2 regularizer """no""" +451 2 optimizer """adam""" +451 2 training_loop """owa""" +451 2 negative_sampler """basic""" +451 2 evaluator """rankbased""" +451 3 dataset """fb15k237""" +451 3 model """proje""" +451 3 loss """bceaftersigmoid""" +451 3 regularizer """no""" +451 3 optimizer """adam""" +451 3 training_loop """owa""" +451 3 negative_sampler """basic""" +451 3 evaluator """rankbased""" +451 4 dataset """fb15k237""" +451 4 model """proje""" +451 4 loss """bceaftersigmoid""" +451 4 regularizer """no""" +451 4 optimizer """adam""" +451 4 training_loop """owa""" +451 4 negative_sampler """basic""" +451 4 evaluator """rankbased""" +451 5 dataset """fb15k237""" +451 5 model """proje""" +451 5 loss """bceaftersigmoid""" +451 5 regularizer """no""" +451 5 optimizer """adam""" +451 5 training_loop """owa""" +451 5 negative_sampler """basic""" +451 5 evaluator """rankbased""" +451 6 dataset """fb15k237""" +451 6 model """proje""" +451 6 loss """bceaftersigmoid""" +451 6 regularizer """no""" +451 6 optimizer """adam""" +451 6 training_loop """owa""" +451 6 negative_sampler """basic""" +451 6 evaluator """rankbased""" +451 7 dataset """fb15k237""" +451 7 model """proje""" +451 7 loss """bceaftersigmoid""" +451 7 regularizer """no""" +451 7 optimizer """adam""" +451 7 training_loop """owa""" +451 7 negative_sampler """basic""" +451 7 evaluator """rankbased""" +451 8 dataset """fb15k237""" +451 8 model """proje""" +451 8 loss """bceaftersigmoid""" +451 8 regularizer """no""" +451 8 optimizer """adam""" +451 8 training_loop """owa""" +451 8 negative_sampler """basic""" +451 8 evaluator """rankbased""" +451 9 dataset """fb15k237""" +451 9 model """proje""" +451 9 loss """bceaftersigmoid""" +451 9 regularizer """no""" +451 9 optimizer """adam""" +451 9 training_loop """owa""" +451 9 negative_sampler """basic""" +451 9 evaluator """rankbased""" +451 10 dataset """fb15k237""" +451 10 model """proje""" +451 10 loss """bceaftersigmoid""" +451 10 regularizer """no""" +451 10 optimizer """adam""" +451 10 training_loop """owa""" +451 10 negative_sampler """basic""" +451 10 evaluator """rankbased""" +451 11 dataset """fb15k237""" +451 11 model """proje""" +451 11 loss """bceaftersigmoid""" +451 11 regularizer """no""" +451 11 optimizer """adam""" +451 11 training_loop """owa""" +451 11 negative_sampler """basic""" +451 11 evaluator """rankbased""" +451 12 dataset """fb15k237""" +451 12 model """proje""" +451 12 loss """bceaftersigmoid""" +451 12 regularizer """no""" +451 12 optimizer """adam""" +451 12 training_loop """owa""" +451 12 negative_sampler """basic""" +451 12 evaluator """rankbased""" +451 13 dataset """fb15k237""" +451 13 model """proje""" +451 13 loss """bceaftersigmoid""" +451 13 regularizer """no""" +451 13 optimizer """adam""" +451 13 training_loop """owa""" +451 13 negative_sampler """basic""" +451 13 evaluator """rankbased""" +451 14 dataset """fb15k237""" +451 14 model """proje""" +451 14 loss """bceaftersigmoid""" +451 14 regularizer """no""" +451 14 optimizer """adam""" +451 14 training_loop """owa""" +451 14 negative_sampler """basic""" +451 14 evaluator """rankbased""" +452 1 model.embedding_dim 0.0 +452 1 optimizer.lr 0.0655099542587027 +452 1 negative_sampler.num_negs_per_pos 92.0 +452 1 training.batch_size 2.0 +452 2 model.embedding_dim 1.0 +452 2 optimizer.lr 0.0025784975140389207 +452 2 negative_sampler.num_negs_per_pos 5.0 +452 2 training.batch_size 0.0 +452 3 model.embedding_dim 2.0 +452 3 optimizer.lr 0.0025058610525237218 +452 3 negative_sampler.num_negs_per_pos 79.0 +452 3 training.batch_size 0.0 +452 4 model.embedding_dim 1.0 +452 4 optimizer.lr 0.026820739059218637 +452 4 negative_sampler.num_negs_per_pos 44.0 +452 4 training.batch_size 2.0 +452 5 model.embedding_dim 0.0 +452 5 optimizer.lr 0.055619408700655015 +452 5 negative_sampler.num_negs_per_pos 69.0 +452 5 training.batch_size 1.0 +452 6 model.embedding_dim 1.0 +452 6 optimizer.lr 0.00815339529605562 +452 6 negative_sampler.num_negs_per_pos 60.0 +452 6 training.batch_size 2.0 +452 7 model.embedding_dim 2.0 +452 7 optimizer.lr 0.020390023743988673 +452 7 negative_sampler.num_negs_per_pos 89.0 +452 7 training.batch_size 2.0 +452 8 model.embedding_dim 0.0 +452 8 optimizer.lr 0.0011641421017878752 +452 8 negative_sampler.num_negs_per_pos 8.0 +452 8 training.batch_size 1.0 +452 9 model.embedding_dim 1.0 +452 9 optimizer.lr 0.022125459311375575 +452 9 negative_sampler.num_negs_per_pos 63.0 +452 9 training.batch_size 1.0 +452 10 model.embedding_dim 1.0 +452 10 optimizer.lr 0.08042570094322986 +452 10 negative_sampler.num_negs_per_pos 33.0 +452 10 training.batch_size 2.0 +452 11 model.embedding_dim 2.0 +452 11 optimizer.lr 0.0016620584034815035 +452 11 negative_sampler.num_negs_per_pos 26.0 +452 11 training.batch_size 0.0 +452 12 model.embedding_dim 0.0 +452 12 optimizer.lr 0.0030720560103602744 +452 12 negative_sampler.num_negs_per_pos 16.0 +452 12 training.batch_size 0.0 +452 13 model.embedding_dim 1.0 +452 13 optimizer.lr 0.012818093935405651 +452 13 negative_sampler.num_negs_per_pos 29.0 +452 13 training.batch_size 2.0 +452 14 model.embedding_dim 2.0 +452 14 optimizer.lr 0.008994822824307618 +452 14 negative_sampler.num_negs_per_pos 62.0 +452 14 training.batch_size 0.0 +452 15 model.embedding_dim 1.0 +452 15 optimizer.lr 0.0828397796875585 +452 15 negative_sampler.num_negs_per_pos 35.0 +452 15 training.batch_size 0.0 +452 16 model.embedding_dim 0.0 +452 16 optimizer.lr 0.0010807330378749236 +452 16 negative_sampler.num_negs_per_pos 38.0 +452 16 training.batch_size 2.0 +452 17 model.embedding_dim 2.0 +452 17 optimizer.lr 0.006447716239831121 +452 17 negative_sampler.num_negs_per_pos 72.0 +452 17 training.batch_size 2.0 +452 18 model.embedding_dim 2.0 +452 18 optimizer.lr 0.012324978967703085 +452 18 negative_sampler.num_negs_per_pos 99.0 +452 18 training.batch_size 1.0 +452 19 model.embedding_dim 1.0 +452 19 optimizer.lr 0.0034808091608804527 +452 19 negative_sampler.num_negs_per_pos 40.0 +452 19 training.batch_size 0.0 +452 1 dataset """fb15k237""" +452 1 model """proje""" +452 1 loss """softplus""" +452 1 regularizer """no""" +452 1 optimizer """adam""" +452 1 training_loop """owa""" +452 1 negative_sampler """basic""" +452 1 evaluator """rankbased""" +452 2 dataset """fb15k237""" +452 2 model """proje""" +452 2 loss """softplus""" +452 2 regularizer """no""" +452 2 optimizer """adam""" +452 2 training_loop """owa""" +452 2 negative_sampler """basic""" +452 2 evaluator """rankbased""" +452 3 dataset """fb15k237""" +452 3 model """proje""" +452 3 loss """softplus""" +452 3 regularizer """no""" +452 3 optimizer """adam""" +452 3 training_loop """owa""" +452 3 negative_sampler """basic""" +452 3 evaluator """rankbased""" +452 4 dataset """fb15k237""" +452 4 model """proje""" +452 4 loss """softplus""" +452 4 regularizer """no""" +452 4 optimizer """adam""" +452 4 training_loop """owa""" +452 4 negative_sampler """basic""" +452 4 evaluator """rankbased""" +452 5 dataset """fb15k237""" +452 5 model """proje""" +452 5 loss """softplus""" +452 5 regularizer """no""" +452 5 optimizer """adam""" +452 5 training_loop """owa""" +452 5 negative_sampler """basic""" +452 5 evaluator """rankbased""" +452 6 dataset """fb15k237""" +452 6 model """proje""" +452 6 loss """softplus""" +452 6 regularizer """no""" +452 6 optimizer """adam""" +452 6 training_loop """owa""" +452 6 negative_sampler """basic""" +452 6 evaluator """rankbased""" +452 7 dataset """fb15k237""" +452 7 model """proje""" +452 7 loss """softplus""" +452 7 regularizer """no""" +452 7 optimizer """adam""" +452 7 training_loop """owa""" +452 7 negative_sampler """basic""" +452 7 evaluator """rankbased""" +452 8 dataset """fb15k237""" +452 8 model """proje""" +452 8 loss """softplus""" +452 8 regularizer """no""" +452 8 optimizer """adam""" +452 8 training_loop """owa""" +452 8 negative_sampler """basic""" +452 8 evaluator """rankbased""" +452 9 dataset """fb15k237""" +452 9 model """proje""" +452 9 loss """softplus""" +452 9 regularizer """no""" +452 9 optimizer """adam""" +452 9 training_loop """owa""" +452 9 negative_sampler """basic""" +452 9 evaluator """rankbased""" +452 10 dataset """fb15k237""" +452 10 model """proje""" +452 10 loss """softplus""" +452 10 regularizer """no""" +452 10 optimizer """adam""" +452 10 training_loop """owa""" +452 10 negative_sampler """basic""" +452 10 evaluator """rankbased""" +452 11 dataset """fb15k237""" +452 11 model """proje""" +452 11 loss """softplus""" +452 11 regularizer """no""" +452 11 optimizer """adam""" +452 11 training_loop """owa""" +452 11 negative_sampler """basic""" +452 11 evaluator """rankbased""" +452 12 dataset """fb15k237""" +452 12 model """proje""" +452 12 loss """softplus""" +452 12 regularizer """no""" +452 12 optimizer """adam""" +452 12 training_loop """owa""" +452 12 negative_sampler """basic""" +452 12 evaluator """rankbased""" +452 13 dataset """fb15k237""" +452 13 model """proje""" +452 13 loss """softplus""" +452 13 regularizer """no""" +452 13 optimizer """adam""" +452 13 training_loop """owa""" +452 13 negative_sampler """basic""" +452 13 evaluator """rankbased""" +452 14 dataset """fb15k237""" +452 14 model """proje""" +452 14 loss """softplus""" +452 14 regularizer """no""" +452 14 optimizer """adam""" +452 14 training_loop """owa""" +452 14 negative_sampler """basic""" +452 14 evaluator """rankbased""" +452 15 dataset """fb15k237""" +452 15 model """proje""" +452 15 loss """softplus""" +452 15 regularizer """no""" +452 15 optimizer """adam""" +452 15 training_loop """owa""" +452 15 negative_sampler """basic""" +452 15 evaluator """rankbased""" +452 16 dataset """fb15k237""" +452 16 model """proje""" +452 16 loss """softplus""" +452 16 regularizer """no""" +452 16 optimizer """adam""" +452 16 training_loop """owa""" +452 16 negative_sampler """basic""" +452 16 evaluator """rankbased""" +452 17 dataset """fb15k237""" +452 17 model """proje""" +452 17 loss """softplus""" +452 17 regularizer """no""" +452 17 optimizer """adam""" +452 17 training_loop """owa""" +452 17 negative_sampler """basic""" +452 17 evaluator """rankbased""" +452 18 dataset """fb15k237""" +452 18 model """proje""" +452 18 loss """softplus""" +452 18 regularizer """no""" +452 18 optimizer """adam""" +452 18 training_loop """owa""" +452 18 negative_sampler """basic""" +452 18 evaluator """rankbased""" +452 19 dataset """fb15k237""" +452 19 model """proje""" +452 19 loss """softplus""" +452 19 regularizer """no""" +452 19 optimizer """adam""" +452 19 training_loop """owa""" +452 19 negative_sampler """basic""" +452 19 evaluator """rankbased""" +453 1 model.embedding_dim 0.0 +453 1 optimizer.lr 0.03527752059191336 +453 1 negative_sampler.num_negs_per_pos 62.0 +453 1 training.batch_size 2.0 +453 2 model.embedding_dim 1.0 +453 2 optimizer.lr 0.08293023196908504 +453 2 negative_sampler.num_negs_per_pos 83.0 +453 2 training.batch_size 0.0 +453 3 model.embedding_dim 2.0 +453 3 optimizer.lr 0.004109086546161039 +453 3 negative_sampler.num_negs_per_pos 82.0 +453 3 training.batch_size 1.0 +453 4 model.embedding_dim 1.0 +453 4 optimizer.lr 0.017330648672931482 +453 4 negative_sampler.num_negs_per_pos 86.0 +453 4 training.batch_size 1.0 +453 5 model.embedding_dim 1.0 +453 5 optimizer.lr 0.008747357398380194 +453 5 negative_sampler.num_negs_per_pos 2.0 +453 5 training.batch_size 0.0 +453 6 model.embedding_dim 2.0 +453 6 optimizer.lr 0.014605778839683402 +453 6 negative_sampler.num_negs_per_pos 56.0 +453 6 training.batch_size 1.0 +453 7 model.embedding_dim 1.0 +453 7 optimizer.lr 0.006488671748541771 +453 7 negative_sampler.num_negs_per_pos 44.0 +453 7 training.batch_size 2.0 +453 8 model.embedding_dim 0.0 +453 8 optimizer.lr 0.033826695515007194 +453 8 negative_sampler.num_negs_per_pos 4.0 +453 8 training.batch_size 0.0 +453 9 model.embedding_dim 2.0 +453 9 optimizer.lr 0.006460003363883841 +453 9 negative_sampler.num_negs_per_pos 83.0 +453 9 training.batch_size 0.0 +453 10 model.embedding_dim 1.0 +453 10 optimizer.lr 0.039480407215385115 +453 10 negative_sampler.num_negs_per_pos 7.0 +453 10 training.batch_size 0.0 +453 11 model.embedding_dim 0.0 +453 11 optimizer.lr 0.0200474874326027 +453 11 negative_sampler.num_negs_per_pos 70.0 +453 11 training.batch_size 0.0 +453 12 model.embedding_dim 0.0 +453 12 optimizer.lr 0.0015218250388833996 +453 12 negative_sampler.num_negs_per_pos 17.0 +453 12 training.batch_size 2.0 +453 13 model.embedding_dim 2.0 +453 13 optimizer.lr 0.031584532165688343 +453 13 negative_sampler.num_negs_per_pos 45.0 +453 13 training.batch_size 0.0 +453 14 model.embedding_dim 1.0 +453 14 optimizer.lr 0.00585374869231813 +453 14 negative_sampler.num_negs_per_pos 30.0 +453 14 training.batch_size 0.0 +453 15 model.embedding_dim 2.0 +453 15 optimizer.lr 0.003588579263653976 +453 15 negative_sampler.num_negs_per_pos 91.0 +453 15 training.batch_size 2.0 +453 16 model.embedding_dim 1.0 +453 16 optimizer.lr 0.006742464447015206 +453 16 negative_sampler.num_negs_per_pos 1.0 +453 16 training.batch_size 2.0 +453 17 model.embedding_dim 1.0 +453 17 optimizer.lr 0.001305017875462117 +453 17 negative_sampler.num_negs_per_pos 5.0 +453 17 training.batch_size 1.0 +453 18 model.embedding_dim 2.0 +453 18 optimizer.lr 0.033180099001941736 +453 18 negative_sampler.num_negs_per_pos 49.0 +453 18 training.batch_size 1.0 +453 19 model.embedding_dim 2.0 +453 19 optimizer.lr 0.05600841951827537 +453 19 negative_sampler.num_negs_per_pos 68.0 +453 19 training.batch_size 1.0 +453 20 model.embedding_dim 0.0 +453 20 optimizer.lr 0.06842888218938682 +453 20 negative_sampler.num_negs_per_pos 15.0 +453 20 training.batch_size 1.0 +453 21 model.embedding_dim 0.0 +453 21 optimizer.lr 0.006969245252952307 +453 21 negative_sampler.num_negs_per_pos 23.0 +453 21 training.batch_size 1.0 +453 22 model.embedding_dim 1.0 +453 22 optimizer.lr 0.04531143398619439 +453 22 negative_sampler.num_negs_per_pos 33.0 +453 22 training.batch_size 1.0 +453 23 model.embedding_dim 0.0 +453 23 optimizer.lr 0.009173032992695097 +453 23 negative_sampler.num_negs_per_pos 73.0 +453 23 training.batch_size 1.0 +453 24 model.embedding_dim 2.0 +453 24 optimizer.lr 0.005405442810014424 +453 24 negative_sampler.num_negs_per_pos 57.0 +453 24 training.batch_size 1.0 +453 25 model.embedding_dim 2.0 +453 25 optimizer.lr 0.035937736144390844 +453 25 negative_sampler.num_negs_per_pos 65.0 +453 25 training.batch_size 0.0 +453 26 model.embedding_dim 2.0 +453 26 optimizer.lr 0.0016426344707575365 +453 26 negative_sampler.num_negs_per_pos 77.0 +453 26 training.batch_size 1.0 +453 27 model.embedding_dim 0.0 +453 27 optimizer.lr 0.04077340082188388 +453 27 negative_sampler.num_negs_per_pos 33.0 +453 27 training.batch_size 0.0 +453 28 model.embedding_dim 1.0 +453 28 optimizer.lr 0.014566740333950664 +453 28 negative_sampler.num_negs_per_pos 10.0 +453 28 training.batch_size 1.0 +453 29 model.embedding_dim 0.0 +453 29 optimizer.lr 0.009449030355329026 +453 29 negative_sampler.num_negs_per_pos 2.0 +453 29 training.batch_size 2.0 +453 30 model.embedding_dim 0.0 +453 30 optimizer.lr 0.02236525048435807 +453 30 negative_sampler.num_negs_per_pos 21.0 +453 30 training.batch_size 0.0 +453 31 model.embedding_dim 0.0 +453 31 optimizer.lr 0.030799740602819547 +453 31 negative_sampler.num_negs_per_pos 74.0 +453 31 training.batch_size 2.0 +453 32 model.embedding_dim 2.0 +453 32 optimizer.lr 0.0019767328381787423 +453 32 negative_sampler.num_negs_per_pos 79.0 +453 32 training.batch_size 2.0 +453 33 model.embedding_dim 1.0 +453 33 optimizer.lr 0.06676828685319616 +453 33 negative_sampler.num_negs_per_pos 70.0 +453 33 training.batch_size 1.0 +453 34 model.embedding_dim 2.0 +453 34 optimizer.lr 0.04195664181169152 +453 34 negative_sampler.num_negs_per_pos 1.0 +453 34 training.batch_size 2.0 +453 35 model.embedding_dim 0.0 +453 35 optimizer.lr 0.09447531073627696 +453 35 negative_sampler.num_negs_per_pos 20.0 +453 35 training.batch_size 2.0 +453 36 model.embedding_dim 1.0 +453 36 optimizer.lr 0.0642119920951986 +453 36 negative_sampler.num_negs_per_pos 41.0 +453 36 training.batch_size 1.0 +453 1 dataset """fb15k237""" +453 1 model """proje""" +453 1 loss """bceaftersigmoid""" +453 1 regularizer """no""" +453 1 optimizer """adam""" +453 1 training_loop """owa""" +453 1 negative_sampler """basic""" +453 1 evaluator """rankbased""" +453 2 dataset """fb15k237""" +453 2 model """proje""" +453 2 loss """bceaftersigmoid""" +453 2 regularizer """no""" +453 2 optimizer """adam""" +453 2 training_loop """owa""" +453 2 negative_sampler """basic""" +453 2 evaluator """rankbased""" +453 3 dataset """fb15k237""" +453 3 model """proje""" +453 3 loss """bceaftersigmoid""" +453 3 regularizer """no""" +453 3 optimizer """adam""" +453 3 training_loop """owa""" +453 3 negative_sampler """basic""" +453 3 evaluator """rankbased""" +453 4 dataset """fb15k237""" +453 4 model """proje""" +453 4 loss """bceaftersigmoid""" +453 4 regularizer """no""" +453 4 optimizer """adam""" +453 4 training_loop """owa""" +453 4 negative_sampler """basic""" +453 4 evaluator """rankbased""" +453 5 dataset """fb15k237""" +453 5 model """proje""" +453 5 loss """bceaftersigmoid""" +453 5 regularizer """no""" +453 5 optimizer """adam""" +453 5 training_loop """owa""" +453 5 negative_sampler """basic""" +453 5 evaluator """rankbased""" +453 6 dataset """fb15k237""" +453 6 model """proje""" +453 6 loss """bceaftersigmoid""" +453 6 regularizer """no""" +453 6 optimizer """adam""" +453 6 training_loop """owa""" +453 6 negative_sampler """basic""" +453 6 evaluator """rankbased""" +453 7 dataset """fb15k237""" +453 7 model """proje""" +453 7 loss """bceaftersigmoid""" +453 7 regularizer """no""" +453 7 optimizer """adam""" +453 7 training_loop """owa""" +453 7 negative_sampler """basic""" +453 7 evaluator """rankbased""" +453 8 dataset """fb15k237""" +453 8 model """proje""" +453 8 loss """bceaftersigmoid""" +453 8 regularizer """no""" +453 8 optimizer """adam""" +453 8 training_loop """owa""" +453 8 negative_sampler """basic""" +453 8 evaluator """rankbased""" +453 9 dataset """fb15k237""" +453 9 model """proje""" +453 9 loss """bceaftersigmoid""" +453 9 regularizer """no""" +453 9 optimizer """adam""" +453 9 training_loop """owa""" +453 9 negative_sampler """basic""" +453 9 evaluator """rankbased""" +453 10 dataset """fb15k237""" +453 10 model """proje""" +453 10 loss """bceaftersigmoid""" +453 10 regularizer """no""" +453 10 optimizer """adam""" +453 10 training_loop """owa""" +453 10 negative_sampler """basic""" +453 10 evaluator """rankbased""" +453 11 dataset """fb15k237""" +453 11 model """proje""" +453 11 loss """bceaftersigmoid""" +453 11 regularizer """no""" +453 11 optimizer """adam""" +453 11 training_loop """owa""" +453 11 negative_sampler """basic""" +453 11 evaluator """rankbased""" +453 12 dataset """fb15k237""" +453 12 model """proje""" +453 12 loss """bceaftersigmoid""" +453 12 regularizer """no""" +453 12 optimizer """adam""" +453 12 training_loop """owa""" +453 12 negative_sampler """basic""" +453 12 evaluator """rankbased""" +453 13 dataset """fb15k237""" +453 13 model """proje""" +453 13 loss """bceaftersigmoid""" +453 13 regularizer """no""" +453 13 optimizer """adam""" +453 13 training_loop """owa""" +453 13 negative_sampler """basic""" +453 13 evaluator """rankbased""" +453 14 dataset """fb15k237""" +453 14 model """proje""" +453 14 loss """bceaftersigmoid""" +453 14 regularizer """no""" +453 14 optimizer """adam""" +453 14 training_loop """owa""" +453 14 negative_sampler """basic""" +453 14 evaluator """rankbased""" +453 15 dataset """fb15k237""" +453 15 model """proje""" +453 15 loss """bceaftersigmoid""" +453 15 regularizer """no""" +453 15 optimizer """adam""" +453 15 training_loop """owa""" +453 15 negative_sampler """basic""" +453 15 evaluator """rankbased""" +453 16 dataset """fb15k237""" +453 16 model """proje""" +453 16 loss """bceaftersigmoid""" +453 16 regularizer """no""" +453 16 optimizer """adam""" +453 16 training_loop """owa""" +453 16 negative_sampler """basic""" +453 16 evaluator """rankbased""" +453 17 dataset """fb15k237""" +453 17 model """proje""" +453 17 loss """bceaftersigmoid""" +453 17 regularizer """no""" +453 17 optimizer """adam""" +453 17 training_loop """owa""" +453 17 negative_sampler """basic""" +453 17 evaluator """rankbased""" +453 18 dataset """fb15k237""" +453 18 model """proje""" +453 18 loss """bceaftersigmoid""" +453 18 regularizer """no""" +453 18 optimizer """adam""" +453 18 training_loop """owa""" +453 18 negative_sampler """basic""" +453 18 evaluator """rankbased""" +453 19 dataset """fb15k237""" +453 19 model """proje""" +453 19 loss """bceaftersigmoid""" +453 19 regularizer """no""" +453 19 optimizer """adam""" +453 19 training_loop """owa""" +453 19 negative_sampler """basic""" +453 19 evaluator """rankbased""" +453 20 dataset """fb15k237""" +453 20 model """proje""" +453 20 loss """bceaftersigmoid""" +453 20 regularizer """no""" +453 20 optimizer """adam""" +453 20 training_loop """owa""" +453 20 negative_sampler """basic""" +453 20 evaluator """rankbased""" +453 21 dataset """fb15k237""" +453 21 model """proje""" +453 21 loss """bceaftersigmoid""" +453 21 regularizer """no""" +453 21 optimizer """adam""" +453 21 training_loop """owa""" +453 21 negative_sampler """basic""" +453 21 evaluator """rankbased""" +453 22 dataset """fb15k237""" +453 22 model """proje""" +453 22 loss """bceaftersigmoid""" +453 22 regularizer """no""" +453 22 optimizer """adam""" +453 22 training_loop """owa""" +453 22 negative_sampler """basic""" +453 22 evaluator """rankbased""" +453 23 dataset """fb15k237""" +453 23 model """proje""" +453 23 loss """bceaftersigmoid""" +453 23 regularizer """no""" +453 23 optimizer """adam""" +453 23 training_loop """owa""" +453 23 negative_sampler """basic""" +453 23 evaluator """rankbased""" +453 24 dataset """fb15k237""" +453 24 model """proje""" +453 24 loss """bceaftersigmoid""" +453 24 regularizer """no""" +453 24 optimizer """adam""" +453 24 training_loop """owa""" +453 24 negative_sampler """basic""" +453 24 evaluator """rankbased""" +453 25 dataset """fb15k237""" +453 25 model """proje""" +453 25 loss """bceaftersigmoid""" +453 25 regularizer """no""" +453 25 optimizer """adam""" +453 25 training_loop """owa""" +453 25 negative_sampler """basic""" +453 25 evaluator """rankbased""" +453 26 dataset """fb15k237""" +453 26 model """proje""" +453 26 loss """bceaftersigmoid""" +453 26 regularizer """no""" +453 26 optimizer """adam""" +453 26 training_loop """owa""" +453 26 negative_sampler """basic""" +453 26 evaluator """rankbased""" +453 27 dataset """fb15k237""" +453 27 model """proje""" +453 27 loss """bceaftersigmoid""" +453 27 regularizer """no""" +453 27 optimizer """adam""" +453 27 training_loop """owa""" +453 27 negative_sampler """basic""" +453 27 evaluator """rankbased""" +453 28 dataset """fb15k237""" +453 28 model """proje""" +453 28 loss """bceaftersigmoid""" +453 28 regularizer """no""" +453 28 optimizer """adam""" +453 28 training_loop """owa""" +453 28 negative_sampler """basic""" +453 28 evaluator """rankbased""" +453 29 dataset """fb15k237""" +453 29 model """proje""" +453 29 loss """bceaftersigmoid""" +453 29 regularizer """no""" +453 29 optimizer """adam""" +453 29 training_loop """owa""" +453 29 negative_sampler """basic""" +453 29 evaluator """rankbased""" +453 30 dataset """fb15k237""" +453 30 model """proje""" +453 30 loss """bceaftersigmoid""" +453 30 regularizer """no""" +453 30 optimizer """adam""" +453 30 training_loop """owa""" +453 30 negative_sampler """basic""" +453 30 evaluator """rankbased""" +453 31 dataset """fb15k237""" +453 31 model """proje""" +453 31 loss """bceaftersigmoid""" +453 31 regularizer """no""" +453 31 optimizer """adam""" +453 31 training_loop """owa""" +453 31 negative_sampler """basic""" +453 31 evaluator """rankbased""" +453 32 dataset """fb15k237""" +453 32 model """proje""" +453 32 loss """bceaftersigmoid""" +453 32 regularizer """no""" +453 32 optimizer """adam""" +453 32 training_loop """owa""" +453 32 negative_sampler """basic""" +453 32 evaluator """rankbased""" +453 33 dataset """fb15k237""" +453 33 model """proje""" +453 33 loss """bceaftersigmoid""" +453 33 regularizer """no""" +453 33 optimizer """adam""" +453 33 training_loop """owa""" +453 33 negative_sampler """basic""" +453 33 evaluator """rankbased""" +453 34 dataset """fb15k237""" +453 34 model """proje""" +453 34 loss """bceaftersigmoid""" +453 34 regularizer """no""" +453 34 optimizer """adam""" +453 34 training_loop """owa""" +453 34 negative_sampler """basic""" +453 34 evaluator """rankbased""" +453 35 dataset """fb15k237""" +453 35 model """proje""" +453 35 loss """bceaftersigmoid""" +453 35 regularizer """no""" +453 35 optimizer """adam""" +453 35 training_loop """owa""" +453 35 negative_sampler """basic""" +453 35 evaluator """rankbased""" +453 36 dataset """fb15k237""" +453 36 model """proje""" +453 36 loss """bceaftersigmoid""" +453 36 regularizer """no""" +453 36 optimizer """adam""" +453 36 training_loop """owa""" +453 36 negative_sampler """basic""" +453 36 evaluator """rankbased""" +454 1 model.embedding_dim 0.0 +454 1 optimizer.lr 0.03926694885255116 +454 1 negative_sampler.num_negs_per_pos 56.0 +454 1 training.batch_size 1.0 +454 2 model.embedding_dim 1.0 +454 2 optimizer.lr 0.009904424270778037 +454 2 negative_sampler.num_negs_per_pos 9.0 +454 2 training.batch_size 0.0 +454 3 model.embedding_dim 1.0 +454 3 optimizer.lr 0.0013376961517656575 +454 3 negative_sampler.num_negs_per_pos 51.0 +454 3 training.batch_size 0.0 +454 4 model.embedding_dim 1.0 +454 4 optimizer.lr 0.02259101597991313 +454 4 negative_sampler.num_negs_per_pos 12.0 +454 4 training.batch_size 2.0 +454 5 model.embedding_dim 1.0 +454 5 optimizer.lr 0.0012063858368897412 +454 5 negative_sampler.num_negs_per_pos 56.0 +454 5 training.batch_size 1.0 +454 6 model.embedding_dim 2.0 +454 6 optimizer.lr 0.01029735463868666 +454 6 negative_sampler.num_negs_per_pos 27.0 +454 6 training.batch_size 0.0 +454 7 model.embedding_dim 0.0 +454 7 optimizer.lr 0.0014988630391867801 +454 7 negative_sampler.num_negs_per_pos 57.0 +454 7 training.batch_size 2.0 +454 8 model.embedding_dim 0.0 +454 8 optimizer.lr 0.01823079732891423 +454 8 negative_sampler.num_negs_per_pos 11.0 +454 8 training.batch_size 1.0 +454 9 model.embedding_dim 2.0 +454 9 optimizer.lr 0.004875125453436211 +454 9 negative_sampler.num_negs_per_pos 19.0 +454 9 training.batch_size 1.0 +454 10 model.embedding_dim 2.0 +454 10 optimizer.lr 0.022575530035320205 +454 10 negative_sampler.num_negs_per_pos 31.0 +454 10 training.batch_size 0.0 +454 11 model.embedding_dim 1.0 +454 11 optimizer.lr 0.0041752588481174515 +454 11 negative_sampler.num_negs_per_pos 37.0 +454 11 training.batch_size 1.0 +454 12 model.embedding_dim 1.0 +454 12 optimizer.lr 0.008497502493114008 +454 12 negative_sampler.num_negs_per_pos 24.0 +454 12 training.batch_size 1.0 +454 13 model.embedding_dim 0.0 +454 13 optimizer.lr 0.08736549674849697 +454 13 negative_sampler.num_negs_per_pos 26.0 +454 13 training.batch_size 0.0 +454 14 model.embedding_dim 1.0 +454 14 optimizer.lr 0.05671175013040077 +454 14 negative_sampler.num_negs_per_pos 28.0 +454 14 training.batch_size 2.0 +454 15 model.embedding_dim 2.0 +454 15 optimizer.lr 0.03216128551194749 +454 15 negative_sampler.num_negs_per_pos 25.0 +454 15 training.batch_size 2.0 +454 16 model.embedding_dim 1.0 +454 16 optimizer.lr 0.02662925019600534 +454 16 negative_sampler.num_negs_per_pos 74.0 +454 16 training.batch_size 0.0 +454 17 model.embedding_dim 0.0 +454 17 optimizer.lr 0.011522716380036858 +454 17 negative_sampler.num_negs_per_pos 90.0 +454 17 training.batch_size 0.0 +454 18 model.embedding_dim 1.0 +454 18 optimizer.lr 0.032907228394285865 +454 18 negative_sampler.num_negs_per_pos 77.0 +454 18 training.batch_size 1.0 +454 19 model.embedding_dim 0.0 +454 19 optimizer.lr 0.007009044610340619 +454 19 negative_sampler.num_negs_per_pos 8.0 +454 19 training.batch_size 2.0 +454 20 model.embedding_dim 2.0 +454 20 optimizer.lr 0.029946285538026602 +454 20 negative_sampler.num_negs_per_pos 28.0 +454 20 training.batch_size 2.0 +454 21 model.embedding_dim 0.0 +454 21 optimizer.lr 0.026549685385197603 +454 21 negative_sampler.num_negs_per_pos 42.0 +454 21 training.batch_size 1.0 +454 22 model.embedding_dim 2.0 +454 22 optimizer.lr 0.00429493633159944 +454 22 negative_sampler.num_negs_per_pos 29.0 +454 22 training.batch_size 1.0 +454 23 model.embedding_dim 0.0 +454 23 optimizer.lr 0.02332071200808479 +454 23 negative_sampler.num_negs_per_pos 9.0 +454 23 training.batch_size 1.0 +454 24 model.embedding_dim 2.0 +454 24 optimizer.lr 0.003578460758518346 +454 24 negative_sampler.num_negs_per_pos 83.0 +454 24 training.batch_size 1.0 +454 25 model.embedding_dim 0.0 +454 25 optimizer.lr 0.0028698826995704105 +454 25 negative_sampler.num_negs_per_pos 29.0 +454 25 training.batch_size 0.0 +454 26 model.embedding_dim 2.0 +454 26 optimizer.lr 0.02117602693579815 +454 26 negative_sampler.num_negs_per_pos 44.0 +454 26 training.batch_size 1.0 +454 1 dataset """fb15k237""" +454 1 model """proje""" +454 1 loss """softplus""" +454 1 regularizer """no""" +454 1 optimizer """adam""" +454 1 training_loop """owa""" +454 1 negative_sampler """basic""" +454 1 evaluator """rankbased""" +454 2 dataset """fb15k237""" +454 2 model """proje""" +454 2 loss """softplus""" +454 2 regularizer """no""" +454 2 optimizer """adam""" +454 2 training_loop """owa""" +454 2 negative_sampler """basic""" +454 2 evaluator """rankbased""" +454 3 dataset """fb15k237""" +454 3 model """proje""" +454 3 loss """softplus""" +454 3 regularizer """no""" +454 3 optimizer """adam""" +454 3 training_loop """owa""" +454 3 negative_sampler """basic""" +454 3 evaluator """rankbased""" +454 4 dataset """fb15k237""" +454 4 model """proje""" +454 4 loss """softplus""" +454 4 regularizer """no""" +454 4 optimizer """adam""" +454 4 training_loop """owa""" +454 4 negative_sampler """basic""" +454 4 evaluator """rankbased""" +454 5 dataset """fb15k237""" +454 5 model """proje""" +454 5 loss """softplus""" +454 5 regularizer """no""" +454 5 optimizer """adam""" +454 5 training_loop """owa""" +454 5 negative_sampler """basic""" +454 5 evaluator """rankbased""" +454 6 dataset """fb15k237""" +454 6 model """proje""" +454 6 loss """softplus""" +454 6 regularizer """no""" +454 6 optimizer """adam""" +454 6 training_loop """owa""" +454 6 negative_sampler """basic""" +454 6 evaluator """rankbased""" +454 7 dataset """fb15k237""" +454 7 model """proje""" +454 7 loss """softplus""" +454 7 regularizer """no""" +454 7 optimizer """adam""" +454 7 training_loop """owa""" +454 7 negative_sampler """basic""" +454 7 evaluator """rankbased""" +454 8 dataset """fb15k237""" +454 8 model """proje""" +454 8 loss """softplus""" +454 8 regularizer """no""" +454 8 optimizer """adam""" +454 8 training_loop """owa""" +454 8 negative_sampler """basic""" +454 8 evaluator """rankbased""" +454 9 dataset """fb15k237""" +454 9 model """proje""" +454 9 loss """softplus""" +454 9 regularizer """no""" +454 9 optimizer """adam""" +454 9 training_loop """owa""" +454 9 negative_sampler """basic""" +454 9 evaluator """rankbased""" +454 10 dataset """fb15k237""" +454 10 model """proje""" +454 10 loss """softplus""" +454 10 regularizer """no""" +454 10 optimizer """adam""" +454 10 training_loop """owa""" +454 10 negative_sampler """basic""" +454 10 evaluator """rankbased""" +454 11 dataset """fb15k237""" +454 11 model """proje""" +454 11 loss """softplus""" +454 11 regularizer """no""" +454 11 optimizer """adam""" +454 11 training_loop """owa""" +454 11 negative_sampler """basic""" +454 11 evaluator """rankbased""" +454 12 dataset """fb15k237""" +454 12 model """proje""" +454 12 loss """softplus""" +454 12 regularizer """no""" +454 12 optimizer """adam""" +454 12 training_loop """owa""" +454 12 negative_sampler """basic""" +454 12 evaluator """rankbased""" +454 13 dataset """fb15k237""" +454 13 model """proje""" +454 13 loss """softplus""" +454 13 regularizer """no""" +454 13 optimizer """adam""" +454 13 training_loop """owa""" +454 13 negative_sampler """basic""" +454 13 evaluator """rankbased""" +454 14 dataset """fb15k237""" +454 14 model """proje""" +454 14 loss """softplus""" +454 14 regularizer """no""" +454 14 optimizer """adam""" +454 14 training_loop """owa""" +454 14 negative_sampler """basic""" +454 14 evaluator """rankbased""" +454 15 dataset """fb15k237""" +454 15 model """proje""" +454 15 loss """softplus""" +454 15 regularizer """no""" +454 15 optimizer """adam""" +454 15 training_loop """owa""" +454 15 negative_sampler """basic""" +454 15 evaluator """rankbased""" +454 16 dataset """fb15k237""" +454 16 model """proje""" +454 16 loss """softplus""" +454 16 regularizer """no""" +454 16 optimizer """adam""" +454 16 training_loop """owa""" +454 16 negative_sampler """basic""" +454 16 evaluator """rankbased""" +454 17 dataset """fb15k237""" +454 17 model """proje""" +454 17 loss """softplus""" +454 17 regularizer """no""" +454 17 optimizer """adam""" +454 17 training_loop """owa""" +454 17 negative_sampler """basic""" +454 17 evaluator """rankbased""" +454 18 dataset """fb15k237""" +454 18 model """proje""" +454 18 loss """softplus""" +454 18 regularizer """no""" +454 18 optimizer """adam""" +454 18 training_loop """owa""" +454 18 negative_sampler """basic""" +454 18 evaluator """rankbased""" +454 19 dataset """fb15k237""" +454 19 model """proje""" +454 19 loss """softplus""" +454 19 regularizer """no""" +454 19 optimizer """adam""" +454 19 training_loop """owa""" +454 19 negative_sampler """basic""" +454 19 evaluator """rankbased""" +454 20 dataset """fb15k237""" +454 20 model """proje""" +454 20 loss """softplus""" +454 20 regularizer """no""" +454 20 optimizer """adam""" +454 20 training_loop """owa""" +454 20 negative_sampler """basic""" +454 20 evaluator """rankbased""" +454 21 dataset """fb15k237""" +454 21 model """proje""" +454 21 loss """softplus""" +454 21 regularizer """no""" +454 21 optimizer """adam""" +454 21 training_loop """owa""" +454 21 negative_sampler """basic""" +454 21 evaluator """rankbased""" +454 22 dataset """fb15k237""" +454 22 model """proje""" +454 22 loss """softplus""" +454 22 regularizer """no""" +454 22 optimizer """adam""" +454 22 training_loop """owa""" +454 22 negative_sampler """basic""" +454 22 evaluator """rankbased""" +454 23 dataset """fb15k237""" +454 23 model """proje""" +454 23 loss """softplus""" +454 23 regularizer """no""" +454 23 optimizer """adam""" +454 23 training_loop """owa""" +454 23 negative_sampler """basic""" +454 23 evaluator """rankbased""" +454 24 dataset """fb15k237""" +454 24 model """proje""" +454 24 loss """softplus""" +454 24 regularizer """no""" +454 24 optimizer """adam""" +454 24 training_loop """owa""" +454 24 negative_sampler """basic""" +454 24 evaluator """rankbased""" +454 25 dataset """fb15k237""" +454 25 model """proje""" +454 25 loss """softplus""" +454 25 regularizer """no""" +454 25 optimizer """adam""" +454 25 training_loop """owa""" +454 25 negative_sampler """basic""" +454 25 evaluator """rankbased""" +454 26 dataset """fb15k237""" +454 26 model """proje""" +454 26 loss """softplus""" +454 26 regularizer """no""" +454 26 optimizer """adam""" +454 26 training_loop """owa""" +454 26 negative_sampler """basic""" +454 26 evaluator """rankbased""" +455 1 model.embedding_dim 1.0 +455 1 optimizer.lr 0.025145436761534263 +455 1 training.batch_size 0.0 +455 1 training.label_smoothing 0.004286889503677184 +455 2 model.embedding_dim 2.0 +455 2 optimizer.lr 0.023850798787857168 +455 2 training.batch_size 2.0 +455 2 training.label_smoothing 0.19621768631722666 +455 3 model.embedding_dim 1.0 +455 3 optimizer.lr 0.03303665483700539 +455 3 training.batch_size 0.0 +455 3 training.label_smoothing 0.0022998329593923983 +455 4 model.embedding_dim 0.0 +455 4 optimizer.lr 0.001815003478116638 +455 4 training.batch_size 0.0 +455 4 training.label_smoothing 0.6603726333496681 +455 5 model.embedding_dim 0.0 +455 5 optimizer.lr 0.015511974199157414 +455 5 training.batch_size 0.0 +455 5 training.label_smoothing 0.4635005469871933 +455 6 model.embedding_dim 0.0 +455 6 optimizer.lr 0.001746110559915747 +455 6 training.batch_size 0.0 +455 6 training.label_smoothing 0.0022919741906681705 +455 7 model.embedding_dim 1.0 +455 7 optimizer.lr 0.024847699913278412 +455 7 training.batch_size 2.0 +455 7 training.label_smoothing 0.5727623039700717 +455 8 model.embedding_dim 0.0 +455 8 optimizer.lr 0.025473835663835386 +455 8 training.batch_size 1.0 +455 8 training.label_smoothing 0.005949414813126796 +455 9 model.embedding_dim 1.0 +455 9 optimizer.lr 0.01832904921445097 +455 9 training.batch_size 0.0 +455 9 training.label_smoothing 0.20222510901279678 +455 10 model.embedding_dim 2.0 +455 10 optimizer.lr 0.0035919670949983327 +455 10 training.batch_size 1.0 +455 10 training.label_smoothing 0.5803335649051257 +455 11 model.embedding_dim 2.0 +455 11 optimizer.lr 0.002018486913119855 +455 11 training.batch_size 2.0 +455 11 training.label_smoothing 0.1630095684071997 +455 12 model.embedding_dim 2.0 +455 12 optimizer.lr 0.0014523929003498514 +455 12 training.batch_size 0.0 +455 12 training.label_smoothing 0.0111642571776841 +455 13 model.embedding_dim 2.0 +455 13 optimizer.lr 0.002063832458114267 +455 13 training.batch_size 0.0 +455 13 training.label_smoothing 0.0024616602122290105 +455 14 model.embedding_dim 1.0 +455 14 optimizer.lr 0.0015046978017527182 +455 14 training.batch_size 0.0 +455 14 training.label_smoothing 0.0945374852666419 +455 15 model.embedding_dim 1.0 +455 15 optimizer.lr 0.005519923191737334 +455 15 training.batch_size 0.0 +455 15 training.label_smoothing 0.0010807390819637286 +455 16 model.embedding_dim 1.0 +455 16 optimizer.lr 0.009210917482066032 +455 16 training.batch_size 2.0 +455 16 training.label_smoothing 0.011569693094073373 +455 17 model.embedding_dim 0.0 +455 17 optimizer.lr 0.001653997766161616 +455 17 training.batch_size 0.0 +455 17 training.label_smoothing 0.7883322776863503 +455 18 model.embedding_dim 0.0 +455 18 optimizer.lr 0.004071719634340827 +455 18 training.batch_size 2.0 +455 18 training.label_smoothing 0.0037391573091879312 +455 19 model.embedding_dim 1.0 +455 19 optimizer.lr 0.0018495333224891886 +455 19 training.batch_size 1.0 +455 19 training.label_smoothing 0.024577428315880535 +455 1 dataset """fb15k237""" +455 1 model """proje""" +455 1 loss """bceaftersigmoid""" +455 1 regularizer """no""" +455 1 optimizer """adam""" +455 1 training_loop """lcwa""" +455 1 evaluator """rankbased""" +455 2 dataset """fb15k237""" +455 2 model """proje""" +455 2 loss """bceaftersigmoid""" +455 2 regularizer """no""" +455 2 optimizer """adam""" +455 2 training_loop """lcwa""" +455 2 evaluator """rankbased""" +455 3 dataset """fb15k237""" +455 3 model """proje""" +455 3 loss """bceaftersigmoid""" +455 3 regularizer """no""" +455 3 optimizer """adam""" +455 3 training_loop """lcwa""" +455 3 evaluator """rankbased""" +455 4 dataset """fb15k237""" +455 4 model """proje""" +455 4 loss """bceaftersigmoid""" +455 4 regularizer """no""" +455 4 optimizer """adam""" +455 4 training_loop """lcwa""" +455 4 evaluator """rankbased""" +455 5 dataset """fb15k237""" +455 5 model """proje""" +455 5 loss """bceaftersigmoid""" +455 5 regularizer """no""" +455 5 optimizer """adam""" +455 5 training_loop """lcwa""" +455 5 evaluator """rankbased""" +455 6 dataset """fb15k237""" +455 6 model """proje""" +455 6 loss """bceaftersigmoid""" +455 6 regularizer """no""" +455 6 optimizer """adam""" +455 6 training_loop """lcwa""" +455 6 evaluator """rankbased""" +455 7 dataset """fb15k237""" +455 7 model """proje""" +455 7 loss """bceaftersigmoid""" +455 7 regularizer """no""" +455 7 optimizer """adam""" +455 7 training_loop """lcwa""" +455 7 evaluator """rankbased""" +455 8 dataset """fb15k237""" +455 8 model """proje""" +455 8 loss """bceaftersigmoid""" +455 8 regularizer """no""" +455 8 optimizer """adam""" +455 8 training_loop """lcwa""" +455 8 evaluator """rankbased""" +455 9 dataset """fb15k237""" +455 9 model """proje""" +455 9 loss """bceaftersigmoid""" +455 9 regularizer """no""" +455 9 optimizer """adam""" +455 9 training_loop """lcwa""" +455 9 evaluator """rankbased""" +455 10 dataset """fb15k237""" +455 10 model """proje""" +455 10 loss """bceaftersigmoid""" +455 10 regularizer """no""" +455 10 optimizer """adam""" +455 10 training_loop """lcwa""" +455 10 evaluator """rankbased""" +455 11 dataset """fb15k237""" +455 11 model """proje""" +455 11 loss """bceaftersigmoid""" +455 11 regularizer """no""" +455 11 optimizer """adam""" +455 11 training_loop """lcwa""" +455 11 evaluator """rankbased""" +455 12 dataset """fb15k237""" +455 12 model """proje""" +455 12 loss """bceaftersigmoid""" +455 12 regularizer """no""" +455 12 optimizer """adam""" +455 12 training_loop """lcwa""" +455 12 evaluator """rankbased""" +455 13 dataset """fb15k237""" +455 13 model """proje""" +455 13 loss """bceaftersigmoid""" +455 13 regularizer """no""" +455 13 optimizer """adam""" +455 13 training_loop """lcwa""" +455 13 evaluator """rankbased""" +455 14 dataset """fb15k237""" +455 14 model """proje""" +455 14 loss """bceaftersigmoid""" +455 14 regularizer """no""" +455 14 optimizer """adam""" +455 14 training_loop """lcwa""" +455 14 evaluator """rankbased""" +455 15 dataset """fb15k237""" +455 15 model """proje""" +455 15 loss """bceaftersigmoid""" +455 15 regularizer """no""" +455 15 optimizer """adam""" +455 15 training_loop """lcwa""" +455 15 evaluator """rankbased""" +455 16 dataset """fb15k237""" +455 16 model """proje""" +455 16 loss """bceaftersigmoid""" +455 16 regularizer """no""" +455 16 optimizer """adam""" +455 16 training_loop """lcwa""" +455 16 evaluator """rankbased""" +455 17 dataset """fb15k237""" +455 17 model """proje""" +455 17 loss """bceaftersigmoid""" +455 17 regularizer """no""" +455 17 optimizer """adam""" +455 17 training_loop """lcwa""" +455 17 evaluator """rankbased""" +455 18 dataset """fb15k237""" +455 18 model """proje""" +455 18 loss """bceaftersigmoid""" +455 18 regularizer """no""" +455 18 optimizer """adam""" +455 18 training_loop """lcwa""" +455 18 evaluator """rankbased""" +455 19 dataset """fb15k237""" +455 19 model """proje""" +455 19 loss """bceaftersigmoid""" +455 19 regularizer """no""" +455 19 optimizer """adam""" +455 19 training_loop """lcwa""" +455 19 evaluator """rankbased""" +456 1 model.embedding_dim 2.0 +456 1 optimizer.lr 0.001029145480092889 +456 1 training.batch_size 1.0 +456 1 training.label_smoothing 0.046581400273349505 +456 2 model.embedding_dim 2.0 +456 2 optimizer.lr 0.02023494785696874 +456 2 training.batch_size 0.0 +456 2 training.label_smoothing 0.001560776947057109 +456 3 model.embedding_dim 0.0 +456 3 optimizer.lr 0.03074637279866527 +456 3 training.batch_size 0.0 +456 3 training.label_smoothing 0.052432828613624936 +456 4 model.embedding_dim 0.0 +456 4 optimizer.lr 0.08760044288360602 +456 4 training.batch_size 1.0 +456 4 training.label_smoothing 0.0011726032280363018 +456 5 model.embedding_dim 0.0 +456 5 optimizer.lr 0.005645892758539641 +456 5 training.batch_size 0.0 +456 5 training.label_smoothing 0.1591876570576682 +456 6 model.embedding_dim 2.0 +456 6 optimizer.lr 0.04342595743659351 +456 6 training.batch_size 1.0 +456 6 training.label_smoothing 0.02332705775806501 +456 7 model.embedding_dim 0.0 +456 7 optimizer.lr 0.00608739724854804 +456 7 training.batch_size 2.0 +456 7 training.label_smoothing 0.0012435770062038417 +456 8 model.embedding_dim 2.0 +456 8 optimizer.lr 0.009744777298059329 +456 8 training.batch_size 0.0 +456 8 training.label_smoothing 0.03513537575059479 +456 9 model.embedding_dim 0.0 +456 9 optimizer.lr 0.032274454802004485 +456 9 training.batch_size 1.0 +456 9 training.label_smoothing 0.023561498874025737 +456 10 model.embedding_dim 1.0 +456 10 optimizer.lr 0.07565415711875433 +456 10 training.batch_size 2.0 +456 10 training.label_smoothing 0.10609822978574304 +456 11 model.embedding_dim 2.0 +456 11 optimizer.lr 0.0011896187459552422 +456 11 training.batch_size 0.0 +456 11 training.label_smoothing 0.9374350406429623 +456 12 model.embedding_dim 1.0 +456 12 optimizer.lr 0.00495972588854886 +456 12 training.batch_size 2.0 +456 12 training.label_smoothing 0.0024975188423852043 +456 13 model.embedding_dim 2.0 +456 13 optimizer.lr 0.002568559865049051 +456 13 training.batch_size 1.0 +456 13 training.label_smoothing 0.29949519756558524 +456 14 model.embedding_dim 1.0 +456 14 optimizer.lr 0.07047906753708762 +456 14 training.batch_size 1.0 +456 14 training.label_smoothing 0.4809704872543196 +456 15 model.embedding_dim 0.0 +456 15 optimizer.lr 0.0021474252693549295 +456 15 training.batch_size 1.0 +456 15 training.label_smoothing 0.2896297083976256 +456 16 model.embedding_dim 1.0 +456 16 optimizer.lr 0.0724987655130341 +456 16 training.batch_size 1.0 +456 16 training.label_smoothing 0.12233041274490153 +456 17 model.embedding_dim 0.0 +456 17 optimizer.lr 0.0069809700454262465 +456 17 training.batch_size 1.0 +456 17 training.label_smoothing 0.2638792916024344 +456 18 model.embedding_dim 0.0 +456 18 optimizer.lr 0.01220265670863169 +456 18 training.batch_size 2.0 +456 18 training.label_smoothing 0.33136207826624564 +456 19 model.embedding_dim 0.0 +456 19 optimizer.lr 0.03298062102117904 +456 19 training.batch_size 2.0 +456 19 training.label_smoothing 0.14195233140785896 +456 20 model.embedding_dim 0.0 +456 20 optimizer.lr 0.06977943044384188 +456 20 training.batch_size 1.0 +456 20 training.label_smoothing 0.05845180571863762 +456 21 model.embedding_dim 2.0 +456 21 optimizer.lr 0.0031048144745647608 +456 21 training.batch_size 1.0 +456 21 training.label_smoothing 0.25989823573971804 +456 1 dataset """fb15k237""" +456 1 model """proje""" +456 1 loss """softplus""" +456 1 regularizer """no""" +456 1 optimizer """adam""" +456 1 training_loop """lcwa""" +456 1 evaluator """rankbased""" +456 2 dataset """fb15k237""" +456 2 model """proje""" +456 2 loss """softplus""" +456 2 regularizer """no""" +456 2 optimizer """adam""" +456 2 training_loop """lcwa""" +456 2 evaluator """rankbased""" +456 3 dataset """fb15k237""" +456 3 model """proje""" +456 3 loss """softplus""" +456 3 regularizer """no""" +456 3 optimizer """adam""" +456 3 training_loop """lcwa""" +456 3 evaluator """rankbased""" +456 4 dataset """fb15k237""" +456 4 model """proje""" +456 4 loss """softplus""" +456 4 regularizer """no""" +456 4 optimizer """adam""" +456 4 training_loop """lcwa""" +456 4 evaluator """rankbased""" +456 5 dataset """fb15k237""" +456 5 model """proje""" +456 5 loss """softplus""" +456 5 regularizer """no""" +456 5 optimizer """adam""" +456 5 training_loop """lcwa""" +456 5 evaluator """rankbased""" +456 6 dataset """fb15k237""" +456 6 model """proje""" +456 6 loss """softplus""" +456 6 regularizer """no""" +456 6 optimizer """adam""" +456 6 training_loop """lcwa""" +456 6 evaluator """rankbased""" +456 7 dataset """fb15k237""" +456 7 model """proje""" +456 7 loss """softplus""" +456 7 regularizer """no""" +456 7 optimizer """adam""" +456 7 training_loop """lcwa""" +456 7 evaluator """rankbased""" +456 8 dataset """fb15k237""" +456 8 model """proje""" +456 8 loss """softplus""" +456 8 regularizer """no""" +456 8 optimizer """adam""" +456 8 training_loop """lcwa""" +456 8 evaluator """rankbased""" +456 9 dataset """fb15k237""" +456 9 model """proje""" +456 9 loss """softplus""" +456 9 regularizer """no""" +456 9 optimizer """adam""" +456 9 training_loop """lcwa""" +456 9 evaluator """rankbased""" +456 10 dataset """fb15k237""" +456 10 model """proje""" +456 10 loss """softplus""" +456 10 regularizer """no""" +456 10 optimizer """adam""" +456 10 training_loop """lcwa""" +456 10 evaluator """rankbased""" +456 11 dataset """fb15k237""" +456 11 model """proje""" +456 11 loss """softplus""" +456 11 regularizer """no""" +456 11 optimizer """adam""" +456 11 training_loop """lcwa""" +456 11 evaluator """rankbased""" +456 12 dataset """fb15k237""" +456 12 model """proje""" +456 12 loss """softplus""" +456 12 regularizer """no""" +456 12 optimizer """adam""" +456 12 training_loop """lcwa""" +456 12 evaluator """rankbased""" +456 13 dataset """fb15k237""" +456 13 model """proje""" +456 13 loss """softplus""" +456 13 regularizer """no""" +456 13 optimizer """adam""" +456 13 training_loop """lcwa""" +456 13 evaluator """rankbased""" +456 14 dataset """fb15k237""" +456 14 model """proje""" +456 14 loss """softplus""" +456 14 regularizer """no""" +456 14 optimizer """adam""" +456 14 training_loop """lcwa""" +456 14 evaluator """rankbased""" +456 15 dataset """fb15k237""" +456 15 model """proje""" +456 15 loss """softplus""" +456 15 regularizer """no""" +456 15 optimizer """adam""" +456 15 training_loop """lcwa""" +456 15 evaluator """rankbased""" +456 16 dataset """fb15k237""" +456 16 model """proje""" +456 16 loss """softplus""" +456 16 regularizer """no""" +456 16 optimizer """adam""" +456 16 training_loop """lcwa""" +456 16 evaluator """rankbased""" +456 17 dataset """fb15k237""" +456 17 model """proje""" +456 17 loss """softplus""" +456 17 regularizer """no""" +456 17 optimizer """adam""" +456 17 training_loop """lcwa""" +456 17 evaluator """rankbased""" +456 18 dataset """fb15k237""" +456 18 model """proje""" +456 18 loss """softplus""" +456 18 regularizer """no""" +456 18 optimizer """adam""" +456 18 training_loop """lcwa""" +456 18 evaluator """rankbased""" +456 19 dataset """fb15k237""" +456 19 model """proje""" +456 19 loss """softplus""" +456 19 regularizer """no""" +456 19 optimizer """adam""" +456 19 training_loop """lcwa""" +456 19 evaluator """rankbased""" +456 20 dataset """fb15k237""" +456 20 model """proje""" +456 20 loss """softplus""" +456 20 regularizer """no""" +456 20 optimizer """adam""" +456 20 training_loop """lcwa""" +456 20 evaluator """rankbased""" +456 21 dataset """fb15k237""" +456 21 model """proje""" +456 21 loss """softplus""" +456 21 regularizer """no""" +456 21 optimizer """adam""" +456 21 training_loop """lcwa""" +456 21 evaluator """rankbased""" +457 1 model.embedding_dim 1.0 +457 1 optimizer.lr 0.003489134258756528 +457 1 training.batch_size 0.0 +457 1 training.label_smoothing 0.002016537590406817 +457 2 model.embedding_dim 0.0 +457 2 optimizer.lr 0.006167350065818307 +457 2 training.batch_size 2.0 +457 2 training.label_smoothing 0.339439725217557 +457 3 model.embedding_dim 2.0 +457 3 optimizer.lr 0.016501294638442514 +457 3 training.batch_size 2.0 +457 3 training.label_smoothing 0.009114795858486312 +457 4 model.embedding_dim 2.0 +457 4 optimizer.lr 0.013850982136971656 +457 4 training.batch_size 0.0 +457 4 training.label_smoothing 0.0036603271742530986 +457 5 model.embedding_dim 1.0 +457 5 optimizer.lr 0.0066457744330635175 +457 5 training.batch_size 1.0 +457 5 training.label_smoothing 0.0020501611338254095 +457 6 model.embedding_dim 0.0 +457 6 optimizer.lr 0.01841403985156324 +457 6 training.batch_size 0.0 +457 6 training.label_smoothing 0.21046331784288164 +457 7 model.embedding_dim 1.0 +457 7 optimizer.lr 0.0028360530949914984 +457 7 training.batch_size 2.0 +457 7 training.label_smoothing 0.4321066587186088 +457 8 model.embedding_dim 0.0 +457 8 optimizer.lr 0.002952768319804174 +457 8 training.batch_size 1.0 +457 8 training.label_smoothing 0.0058393199836937 +457 9 model.embedding_dim 0.0 +457 9 optimizer.lr 0.006334068124468941 +457 9 training.batch_size 0.0 +457 9 training.label_smoothing 0.17487791407013745 +457 10 model.embedding_dim 0.0 +457 10 optimizer.lr 0.0020859891400797316 +457 10 training.batch_size 2.0 +457 10 training.label_smoothing 0.2657420369335426 +457 11 model.embedding_dim 1.0 +457 11 optimizer.lr 0.0017758861540910279 +457 11 training.batch_size 2.0 +457 11 training.label_smoothing 0.20126594771817916 +457 12 model.embedding_dim 1.0 +457 12 optimizer.lr 0.0015916224516542325 +457 12 training.batch_size 2.0 +457 12 training.label_smoothing 0.10542203900118655 +457 13 model.embedding_dim 0.0 +457 13 optimizer.lr 0.002958578899510632 +457 13 training.batch_size 1.0 +457 13 training.label_smoothing 0.017522584038278017 +457 14 model.embedding_dim 2.0 +457 14 optimizer.lr 0.03125099603851459 +457 14 training.batch_size 1.0 +457 14 training.label_smoothing 0.00339801420338846 +457 15 model.embedding_dim 0.0 +457 15 optimizer.lr 0.07448644457520713 +457 15 training.batch_size 0.0 +457 15 training.label_smoothing 0.015746992663560458 +457 16 model.embedding_dim 0.0 +457 16 optimizer.lr 0.0726372415537548 +457 16 training.batch_size 2.0 +457 16 training.label_smoothing 0.008809009354642587 +457 17 model.embedding_dim 2.0 +457 17 optimizer.lr 0.0244579092982604 +457 17 training.batch_size 2.0 +457 17 training.label_smoothing 0.1577724520471937 +457 18 model.embedding_dim 0.0 +457 18 optimizer.lr 0.010173216078959144 +457 18 training.batch_size 0.0 +457 18 training.label_smoothing 0.946486554296181 +457 19 model.embedding_dim 1.0 +457 19 optimizer.lr 0.0016114036521790477 +457 19 training.batch_size 0.0 +457 19 training.label_smoothing 0.02511371070769095 +457 20 model.embedding_dim 1.0 +457 20 optimizer.lr 0.0038461457783462417 +457 20 training.batch_size 0.0 +457 20 training.label_smoothing 0.1366374239897553 +457 21 model.embedding_dim 0.0 +457 21 optimizer.lr 0.015589710804150466 +457 21 training.batch_size 1.0 +457 21 training.label_smoothing 0.002131931714039299 +457 22 model.embedding_dim 0.0 +457 22 optimizer.lr 0.005980251974140701 +457 22 training.batch_size 2.0 +457 22 training.label_smoothing 0.37475990947002075 +457 23 model.embedding_dim 0.0 +457 23 optimizer.lr 0.00396237734998516 +457 23 training.batch_size 2.0 +457 23 training.label_smoothing 0.001498102829263564 +457 24 model.embedding_dim 2.0 +457 24 optimizer.lr 0.011675944825395568 +457 24 training.batch_size 1.0 +457 24 training.label_smoothing 0.05035102031331557 +457 25 model.embedding_dim 0.0 +457 25 optimizer.lr 0.002798169660594595 +457 25 training.batch_size 0.0 +457 25 training.label_smoothing 0.002501571997624329 +457 26 model.embedding_dim 2.0 +457 26 optimizer.lr 0.011642790649077384 +457 26 training.batch_size 1.0 +457 26 training.label_smoothing 0.49266284544960426 +457 27 model.embedding_dim 1.0 +457 27 optimizer.lr 0.03053997110646091 +457 27 training.batch_size 2.0 +457 27 training.label_smoothing 0.5507470368164251 +457 28 model.embedding_dim 2.0 +457 28 optimizer.lr 0.002801771166886299 +457 28 training.batch_size 0.0 +457 28 training.label_smoothing 0.04824235106390424 +457 29 model.embedding_dim 1.0 +457 29 optimizer.lr 0.017902302717254424 +457 29 training.batch_size 2.0 +457 29 training.label_smoothing 0.08934058340212862 +457 30 model.embedding_dim 0.0 +457 30 optimizer.lr 0.00510630099968071 +457 30 training.batch_size 1.0 +457 30 training.label_smoothing 0.4386231257859345 +457 31 model.embedding_dim 2.0 +457 31 optimizer.lr 0.011261458572457054 +457 31 training.batch_size 2.0 +457 31 training.label_smoothing 0.3298371447652648 +457 32 model.embedding_dim 1.0 +457 32 optimizer.lr 0.01693404590973134 +457 32 training.batch_size 2.0 +457 32 training.label_smoothing 0.03262732401396821 +457 33 model.embedding_dim 2.0 +457 33 optimizer.lr 0.0012179798069481844 +457 33 training.batch_size 2.0 +457 33 training.label_smoothing 0.17940675036384726 +457 34 model.embedding_dim 0.0 +457 34 optimizer.lr 0.059613654906535044 +457 34 training.batch_size 0.0 +457 34 training.label_smoothing 0.0041481419567879225 +457 35 model.embedding_dim 2.0 +457 35 optimizer.lr 0.005306670349627965 +457 35 training.batch_size 2.0 +457 35 training.label_smoothing 0.0021839690497938748 +457 36 model.embedding_dim 0.0 +457 36 optimizer.lr 0.05172521167971041 +457 36 training.batch_size 0.0 +457 36 training.label_smoothing 0.018353226351096725 +457 37 model.embedding_dim 2.0 +457 37 optimizer.lr 0.018682836148915968 +457 37 training.batch_size 0.0 +457 37 training.label_smoothing 0.005280180706067947 +457 38 model.embedding_dim 0.0 +457 38 optimizer.lr 0.039288249737240426 +457 38 training.batch_size 2.0 +457 38 training.label_smoothing 0.14074272716755235 +457 1 dataset """fb15k237""" +457 1 model """proje""" +457 1 loss """bceaftersigmoid""" +457 1 regularizer """no""" +457 1 optimizer """adam""" +457 1 training_loop """lcwa""" +457 1 evaluator """rankbased""" +457 2 dataset """fb15k237""" +457 2 model """proje""" +457 2 loss """bceaftersigmoid""" +457 2 regularizer """no""" +457 2 optimizer """adam""" +457 2 training_loop """lcwa""" +457 2 evaluator """rankbased""" +457 3 dataset """fb15k237""" +457 3 model """proje""" +457 3 loss """bceaftersigmoid""" +457 3 regularizer """no""" +457 3 optimizer """adam""" +457 3 training_loop """lcwa""" +457 3 evaluator """rankbased""" +457 4 dataset """fb15k237""" +457 4 model """proje""" +457 4 loss """bceaftersigmoid""" +457 4 regularizer """no""" +457 4 optimizer """adam""" +457 4 training_loop """lcwa""" +457 4 evaluator """rankbased""" +457 5 dataset """fb15k237""" +457 5 model """proje""" +457 5 loss """bceaftersigmoid""" +457 5 regularizer """no""" +457 5 optimizer """adam""" +457 5 training_loop """lcwa""" +457 5 evaluator """rankbased""" +457 6 dataset """fb15k237""" +457 6 model """proje""" +457 6 loss """bceaftersigmoid""" +457 6 regularizer """no""" +457 6 optimizer """adam""" +457 6 training_loop """lcwa""" +457 6 evaluator """rankbased""" +457 7 dataset """fb15k237""" +457 7 model """proje""" +457 7 loss """bceaftersigmoid""" +457 7 regularizer """no""" +457 7 optimizer """adam""" +457 7 training_loop """lcwa""" +457 7 evaluator """rankbased""" +457 8 dataset """fb15k237""" +457 8 model """proje""" +457 8 loss """bceaftersigmoid""" +457 8 regularizer """no""" +457 8 optimizer """adam""" +457 8 training_loop """lcwa""" +457 8 evaluator """rankbased""" +457 9 dataset """fb15k237""" +457 9 model """proje""" +457 9 loss """bceaftersigmoid""" +457 9 regularizer """no""" +457 9 optimizer """adam""" +457 9 training_loop """lcwa""" +457 9 evaluator """rankbased""" +457 10 dataset """fb15k237""" +457 10 model """proje""" +457 10 loss """bceaftersigmoid""" +457 10 regularizer """no""" +457 10 optimizer """adam""" +457 10 training_loop """lcwa""" +457 10 evaluator """rankbased""" +457 11 dataset """fb15k237""" +457 11 model """proje""" +457 11 loss """bceaftersigmoid""" +457 11 regularizer """no""" +457 11 optimizer """adam""" +457 11 training_loop """lcwa""" +457 11 evaluator """rankbased""" +457 12 dataset """fb15k237""" +457 12 model """proje""" +457 12 loss """bceaftersigmoid""" +457 12 regularizer """no""" +457 12 optimizer """adam""" +457 12 training_loop """lcwa""" +457 12 evaluator """rankbased""" +457 13 dataset """fb15k237""" +457 13 model """proje""" +457 13 loss """bceaftersigmoid""" +457 13 regularizer """no""" +457 13 optimizer """adam""" +457 13 training_loop """lcwa""" +457 13 evaluator """rankbased""" +457 14 dataset """fb15k237""" +457 14 model """proje""" +457 14 loss """bceaftersigmoid""" +457 14 regularizer """no""" +457 14 optimizer """adam""" +457 14 training_loop """lcwa""" +457 14 evaluator """rankbased""" +457 15 dataset """fb15k237""" +457 15 model """proje""" +457 15 loss """bceaftersigmoid""" +457 15 regularizer """no""" +457 15 optimizer """adam""" +457 15 training_loop """lcwa""" +457 15 evaluator """rankbased""" +457 16 dataset """fb15k237""" +457 16 model """proje""" +457 16 loss """bceaftersigmoid""" +457 16 regularizer """no""" +457 16 optimizer """adam""" +457 16 training_loop """lcwa""" +457 16 evaluator """rankbased""" +457 17 dataset """fb15k237""" +457 17 model """proje""" +457 17 loss """bceaftersigmoid""" +457 17 regularizer """no""" +457 17 optimizer """adam""" +457 17 training_loop """lcwa""" +457 17 evaluator """rankbased""" +457 18 dataset """fb15k237""" +457 18 model """proje""" +457 18 loss """bceaftersigmoid""" +457 18 regularizer """no""" +457 18 optimizer """adam""" +457 18 training_loop """lcwa""" +457 18 evaluator """rankbased""" +457 19 dataset """fb15k237""" +457 19 model """proje""" +457 19 loss """bceaftersigmoid""" +457 19 regularizer """no""" +457 19 optimizer """adam""" +457 19 training_loop """lcwa""" +457 19 evaluator """rankbased""" +457 20 dataset """fb15k237""" +457 20 model """proje""" +457 20 loss """bceaftersigmoid""" +457 20 regularizer """no""" +457 20 optimizer """adam""" +457 20 training_loop """lcwa""" +457 20 evaluator """rankbased""" +457 21 dataset """fb15k237""" +457 21 model """proje""" +457 21 loss """bceaftersigmoid""" +457 21 regularizer """no""" +457 21 optimizer """adam""" +457 21 training_loop """lcwa""" +457 21 evaluator """rankbased""" +457 22 dataset """fb15k237""" +457 22 model """proje""" +457 22 loss """bceaftersigmoid""" +457 22 regularizer """no""" +457 22 optimizer """adam""" +457 22 training_loop """lcwa""" +457 22 evaluator """rankbased""" +457 23 dataset """fb15k237""" +457 23 model """proje""" +457 23 loss """bceaftersigmoid""" +457 23 regularizer """no""" +457 23 optimizer """adam""" +457 23 training_loop """lcwa""" +457 23 evaluator """rankbased""" +457 24 dataset """fb15k237""" +457 24 model """proje""" +457 24 loss """bceaftersigmoid""" +457 24 regularizer """no""" +457 24 optimizer """adam""" +457 24 training_loop """lcwa""" +457 24 evaluator """rankbased""" +457 25 dataset """fb15k237""" +457 25 model """proje""" +457 25 loss """bceaftersigmoid""" +457 25 regularizer """no""" +457 25 optimizer """adam""" +457 25 training_loop """lcwa""" +457 25 evaluator """rankbased""" +457 26 dataset """fb15k237""" +457 26 model """proje""" +457 26 loss """bceaftersigmoid""" +457 26 regularizer """no""" +457 26 optimizer """adam""" +457 26 training_loop """lcwa""" +457 26 evaluator """rankbased""" +457 27 dataset """fb15k237""" +457 27 model """proje""" +457 27 loss """bceaftersigmoid""" +457 27 regularizer """no""" +457 27 optimizer """adam""" +457 27 training_loop """lcwa""" +457 27 evaluator """rankbased""" +457 28 dataset """fb15k237""" +457 28 model """proje""" +457 28 loss """bceaftersigmoid""" +457 28 regularizer """no""" +457 28 optimizer """adam""" +457 28 training_loop """lcwa""" +457 28 evaluator """rankbased""" +457 29 dataset """fb15k237""" +457 29 model """proje""" +457 29 loss """bceaftersigmoid""" +457 29 regularizer """no""" +457 29 optimizer """adam""" +457 29 training_loop """lcwa""" +457 29 evaluator """rankbased""" +457 30 dataset """fb15k237""" +457 30 model """proje""" +457 30 loss """bceaftersigmoid""" +457 30 regularizer """no""" +457 30 optimizer """adam""" +457 30 training_loop """lcwa""" +457 30 evaluator """rankbased""" +457 31 dataset """fb15k237""" +457 31 model """proje""" +457 31 loss """bceaftersigmoid""" +457 31 regularizer """no""" +457 31 optimizer """adam""" +457 31 training_loop """lcwa""" +457 31 evaluator """rankbased""" +457 32 dataset """fb15k237""" +457 32 model """proje""" +457 32 loss """bceaftersigmoid""" +457 32 regularizer """no""" +457 32 optimizer """adam""" +457 32 training_loop """lcwa""" +457 32 evaluator """rankbased""" +457 33 dataset """fb15k237""" +457 33 model """proje""" +457 33 loss """bceaftersigmoid""" +457 33 regularizer """no""" +457 33 optimizer """adam""" +457 33 training_loop """lcwa""" +457 33 evaluator """rankbased""" +457 34 dataset """fb15k237""" +457 34 model """proje""" +457 34 loss """bceaftersigmoid""" +457 34 regularizer """no""" +457 34 optimizer """adam""" +457 34 training_loop """lcwa""" +457 34 evaluator """rankbased""" +457 35 dataset """fb15k237""" +457 35 model """proje""" +457 35 loss """bceaftersigmoid""" +457 35 regularizer """no""" +457 35 optimizer """adam""" +457 35 training_loop """lcwa""" +457 35 evaluator """rankbased""" +457 36 dataset """fb15k237""" +457 36 model """proje""" +457 36 loss """bceaftersigmoid""" +457 36 regularizer """no""" +457 36 optimizer """adam""" +457 36 training_loop """lcwa""" +457 36 evaluator """rankbased""" +457 37 dataset """fb15k237""" +457 37 model """proje""" +457 37 loss """bceaftersigmoid""" +457 37 regularizer """no""" +457 37 optimizer """adam""" +457 37 training_loop """lcwa""" +457 37 evaluator """rankbased""" +457 38 dataset """fb15k237""" +457 38 model """proje""" +457 38 loss """bceaftersigmoid""" +457 38 regularizer """no""" +457 38 optimizer """adam""" +457 38 training_loop """lcwa""" +457 38 evaluator """rankbased""" +458 1 model.embedding_dim 1.0 +458 1 optimizer.lr 0.0016682907723989647 +458 1 training.batch_size 2.0 +458 1 training.label_smoothing 0.3942570414114822 +458 2 model.embedding_dim 2.0 +458 2 optimizer.lr 0.0057790066892080124 +458 2 training.batch_size 2.0 +458 2 training.label_smoothing 0.008943614403673408 +458 3 model.embedding_dim 2.0 +458 3 optimizer.lr 0.0010565595519551059 +458 3 training.batch_size 0.0 +458 3 training.label_smoothing 0.7109421323938605 +458 4 model.embedding_dim 1.0 +458 4 optimizer.lr 0.005666631576668313 +458 4 training.batch_size 0.0 +458 4 training.label_smoothing 0.4949845233603215 +458 5 model.embedding_dim 2.0 +458 5 optimizer.lr 0.007466294992739181 +458 5 training.batch_size 1.0 +458 5 training.label_smoothing 0.0011329724394096511 +458 6 model.embedding_dim 1.0 +458 6 optimizer.lr 0.007514961309416786 +458 6 training.batch_size 0.0 +458 6 training.label_smoothing 0.0011598921472143447 +458 7 model.embedding_dim 1.0 +458 7 optimizer.lr 0.0013262602726909348 +458 7 training.batch_size 1.0 +458 7 training.label_smoothing 0.12092216578210452 +458 8 model.embedding_dim 0.0 +458 8 optimizer.lr 0.014095849542266241 +458 8 training.batch_size 2.0 +458 8 training.label_smoothing 0.04533307581418829 +458 9 model.embedding_dim 0.0 +458 9 optimizer.lr 0.05636397511915468 +458 9 training.batch_size 2.0 +458 9 training.label_smoothing 0.0019328977789196179 +458 10 model.embedding_dim 1.0 +458 10 optimizer.lr 0.00276216936538124 +458 10 training.batch_size 2.0 +458 10 training.label_smoothing 0.0021514853816319733 +458 11 model.embedding_dim 1.0 +458 11 optimizer.lr 0.04875182747207539 +458 11 training.batch_size 0.0 +458 11 training.label_smoothing 0.004123369410757307 +458 12 model.embedding_dim 1.0 +458 12 optimizer.lr 0.02951959423816665 +458 12 training.batch_size 1.0 +458 12 training.label_smoothing 0.6525711418879775 +458 13 model.embedding_dim 1.0 +458 13 optimizer.lr 0.01479111746262203 +458 13 training.batch_size 0.0 +458 13 training.label_smoothing 0.3206405061825058 +458 14 model.embedding_dim 2.0 +458 14 optimizer.lr 0.0019497074971326811 +458 14 training.batch_size 0.0 +458 14 training.label_smoothing 0.11468887466656459 +458 15 model.embedding_dim 2.0 +458 15 optimizer.lr 0.07960389145340704 +458 15 training.batch_size 0.0 +458 15 training.label_smoothing 0.1830900701208823 +458 16 model.embedding_dim 1.0 +458 16 optimizer.lr 0.028309861616876923 +458 16 training.batch_size 0.0 +458 16 training.label_smoothing 0.0010751275984254185 +458 17 model.embedding_dim 2.0 +458 17 optimizer.lr 0.002589556612002773 +458 17 training.batch_size 2.0 +458 17 training.label_smoothing 0.09672331395318187 +458 18 model.embedding_dim 1.0 +458 18 optimizer.lr 0.002464973294903737 +458 18 training.batch_size 0.0 +458 18 training.label_smoothing 0.045289454596314345 +458 19 model.embedding_dim 0.0 +458 19 optimizer.lr 0.0015453167200863843 +458 19 training.batch_size 1.0 +458 19 training.label_smoothing 0.03064786479332115 +458 20 model.embedding_dim 1.0 +458 20 optimizer.lr 0.0022699866164517498 +458 20 training.batch_size 1.0 +458 20 training.label_smoothing 0.07283138945969875 +458 21 model.embedding_dim 0.0 +458 21 optimizer.lr 0.004858040374621601 +458 21 training.batch_size 2.0 +458 21 training.label_smoothing 0.01829523354492382 +458 22 model.embedding_dim 1.0 +458 22 optimizer.lr 0.014594152132228994 +458 22 training.batch_size 2.0 +458 22 training.label_smoothing 0.08777810168474769 +458 23 model.embedding_dim 1.0 +458 23 optimizer.lr 0.03150172834792675 +458 23 training.batch_size 1.0 +458 23 training.label_smoothing 0.4149460926069112 +458 24 model.embedding_dim 2.0 +458 24 optimizer.lr 0.07344450686146256 +458 24 training.batch_size 0.0 +458 24 training.label_smoothing 0.7496924049788565 +458 25 model.embedding_dim 1.0 +458 25 optimizer.lr 0.0039460267533876735 +458 25 training.batch_size 2.0 +458 25 training.label_smoothing 0.5865701221472759 +458 26 model.embedding_dim 2.0 +458 26 optimizer.lr 0.0027705647903110537 +458 26 training.batch_size 1.0 +458 26 training.label_smoothing 0.2235325228047731 +458 27 model.embedding_dim 2.0 +458 27 optimizer.lr 0.015405132205344382 +458 27 training.batch_size 2.0 +458 27 training.label_smoothing 0.11580835865729552 +458 28 model.embedding_dim 0.0 +458 28 optimizer.lr 0.0011960055031997143 +458 28 training.batch_size 2.0 +458 28 training.label_smoothing 0.012108258726281311 +458 29 model.embedding_dim 1.0 +458 29 optimizer.lr 0.04797325995464374 +458 29 training.batch_size 0.0 +458 29 training.label_smoothing 0.008609936510607385 +458 30 model.embedding_dim 1.0 +458 30 optimizer.lr 0.003249783541923193 +458 30 training.batch_size 2.0 +458 30 training.label_smoothing 0.0011895876428894467 +458 31 model.embedding_dim 0.0 +458 31 optimizer.lr 0.0736068977777543 +458 31 training.batch_size 1.0 +458 31 training.label_smoothing 0.7818382355256831 +458 32 model.embedding_dim 0.0 +458 32 optimizer.lr 0.007806797256172481 +458 32 training.batch_size 1.0 +458 32 training.label_smoothing 0.21385626823556156 +458 33 model.embedding_dim 0.0 +458 33 optimizer.lr 0.03239410940639224 +458 33 training.batch_size 1.0 +458 33 training.label_smoothing 0.1287243213172454 +458 34 model.embedding_dim 0.0 +458 34 optimizer.lr 0.0013382745153531666 +458 34 training.batch_size 2.0 +458 34 training.label_smoothing 0.016577509801177517 +458 35 model.embedding_dim 2.0 +458 35 optimizer.lr 0.016980885609719186 +458 35 training.batch_size 0.0 +458 35 training.label_smoothing 0.2052913841512526 +458 36 model.embedding_dim 0.0 +458 36 optimizer.lr 0.048667371513780824 +458 36 training.batch_size 2.0 +458 36 training.label_smoothing 0.023092689050575564 +458 37 model.embedding_dim 2.0 +458 37 optimizer.lr 0.08222112343448912 +458 37 training.batch_size 0.0 +458 37 training.label_smoothing 0.059511236153445786 +458 1 dataset """fb15k237""" +458 1 model """proje""" +458 1 loss """softplus""" +458 1 regularizer """no""" +458 1 optimizer """adam""" +458 1 training_loop """lcwa""" +458 1 evaluator """rankbased""" +458 2 dataset """fb15k237""" +458 2 model """proje""" +458 2 loss """softplus""" +458 2 regularizer """no""" +458 2 optimizer """adam""" +458 2 training_loop """lcwa""" +458 2 evaluator """rankbased""" +458 3 dataset """fb15k237""" +458 3 model """proje""" +458 3 loss """softplus""" +458 3 regularizer """no""" +458 3 optimizer """adam""" +458 3 training_loop """lcwa""" +458 3 evaluator """rankbased""" +458 4 dataset """fb15k237""" +458 4 model """proje""" +458 4 loss """softplus""" +458 4 regularizer """no""" +458 4 optimizer """adam""" +458 4 training_loop """lcwa""" +458 4 evaluator """rankbased""" +458 5 dataset """fb15k237""" +458 5 model """proje""" +458 5 loss """softplus""" +458 5 regularizer """no""" +458 5 optimizer """adam""" +458 5 training_loop """lcwa""" +458 5 evaluator """rankbased""" +458 6 dataset """fb15k237""" +458 6 model """proje""" +458 6 loss """softplus""" +458 6 regularizer """no""" +458 6 optimizer """adam""" +458 6 training_loop """lcwa""" +458 6 evaluator """rankbased""" +458 7 dataset """fb15k237""" +458 7 model """proje""" +458 7 loss """softplus""" +458 7 regularizer """no""" +458 7 optimizer """adam""" +458 7 training_loop """lcwa""" +458 7 evaluator """rankbased""" +458 8 dataset """fb15k237""" +458 8 model """proje""" +458 8 loss """softplus""" +458 8 regularizer """no""" +458 8 optimizer """adam""" +458 8 training_loop """lcwa""" +458 8 evaluator """rankbased""" +458 9 dataset """fb15k237""" +458 9 model """proje""" +458 9 loss """softplus""" +458 9 regularizer """no""" +458 9 optimizer """adam""" +458 9 training_loop """lcwa""" +458 9 evaluator """rankbased""" +458 10 dataset """fb15k237""" +458 10 model """proje""" +458 10 loss """softplus""" +458 10 regularizer """no""" +458 10 optimizer """adam""" +458 10 training_loop """lcwa""" +458 10 evaluator """rankbased""" +458 11 dataset """fb15k237""" +458 11 model """proje""" +458 11 loss """softplus""" +458 11 regularizer """no""" +458 11 optimizer """adam""" +458 11 training_loop """lcwa""" +458 11 evaluator """rankbased""" +458 12 dataset """fb15k237""" +458 12 model """proje""" +458 12 loss """softplus""" +458 12 regularizer """no""" +458 12 optimizer """adam""" +458 12 training_loop """lcwa""" +458 12 evaluator """rankbased""" +458 13 dataset """fb15k237""" +458 13 model """proje""" +458 13 loss """softplus""" +458 13 regularizer """no""" +458 13 optimizer """adam""" +458 13 training_loop """lcwa""" +458 13 evaluator """rankbased""" +458 14 dataset """fb15k237""" +458 14 model """proje""" +458 14 loss """softplus""" +458 14 regularizer """no""" +458 14 optimizer """adam""" +458 14 training_loop """lcwa""" +458 14 evaluator """rankbased""" +458 15 dataset """fb15k237""" +458 15 model """proje""" +458 15 loss """softplus""" +458 15 regularizer """no""" +458 15 optimizer """adam""" +458 15 training_loop """lcwa""" +458 15 evaluator """rankbased""" +458 16 dataset """fb15k237""" +458 16 model """proje""" +458 16 loss """softplus""" +458 16 regularizer """no""" +458 16 optimizer """adam""" +458 16 training_loop """lcwa""" +458 16 evaluator """rankbased""" +458 17 dataset """fb15k237""" +458 17 model """proje""" +458 17 loss """softplus""" +458 17 regularizer """no""" +458 17 optimizer """adam""" +458 17 training_loop """lcwa""" +458 17 evaluator """rankbased""" +458 18 dataset """fb15k237""" +458 18 model """proje""" +458 18 loss """softplus""" +458 18 regularizer """no""" +458 18 optimizer """adam""" +458 18 training_loop """lcwa""" +458 18 evaluator """rankbased""" +458 19 dataset """fb15k237""" +458 19 model """proje""" +458 19 loss """softplus""" +458 19 regularizer """no""" +458 19 optimizer """adam""" +458 19 training_loop """lcwa""" +458 19 evaluator """rankbased""" +458 20 dataset """fb15k237""" +458 20 model """proje""" +458 20 loss """softplus""" +458 20 regularizer """no""" +458 20 optimizer """adam""" +458 20 training_loop """lcwa""" +458 20 evaluator """rankbased""" +458 21 dataset """fb15k237""" +458 21 model """proje""" +458 21 loss """softplus""" +458 21 regularizer """no""" +458 21 optimizer """adam""" +458 21 training_loop """lcwa""" +458 21 evaluator """rankbased""" +458 22 dataset """fb15k237""" +458 22 model """proje""" +458 22 loss """softplus""" +458 22 regularizer """no""" +458 22 optimizer """adam""" +458 22 training_loop """lcwa""" +458 22 evaluator """rankbased""" +458 23 dataset """fb15k237""" +458 23 model """proje""" +458 23 loss """softplus""" +458 23 regularizer """no""" +458 23 optimizer """adam""" +458 23 training_loop """lcwa""" +458 23 evaluator """rankbased""" +458 24 dataset """fb15k237""" +458 24 model """proje""" +458 24 loss """softplus""" +458 24 regularizer """no""" +458 24 optimizer """adam""" +458 24 training_loop """lcwa""" +458 24 evaluator """rankbased""" +458 25 dataset """fb15k237""" +458 25 model """proje""" +458 25 loss """softplus""" +458 25 regularizer """no""" +458 25 optimizer """adam""" +458 25 training_loop """lcwa""" +458 25 evaluator """rankbased""" +458 26 dataset """fb15k237""" +458 26 model """proje""" +458 26 loss """softplus""" +458 26 regularizer """no""" +458 26 optimizer """adam""" +458 26 training_loop """lcwa""" +458 26 evaluator """rankbased""" +458 27 dataset """fb15k237""" +458 27 model """proje""" +458 27 loss """softplus""" +458 27 regularizer """no""" +458 27 optimizer """adam""" +458 27 training_loop """lcwa""" +458 27 evaluator """rankbased""" +458 28 dataset """fb15k237""" +458 28 model """proje""" +458 28 loss """softplus""" +458 28 regularizer """no""" +458 28 optimizer """adam""" +458 28 training_loop """lcwa""" +458 28 evaluator """rankbased""" +458 29 dataset """fb15k237""" +458 29 model """proje""" +458 29 loss """softplus""" +458 29 regularizer """no""" +458 29 optimizer """adam""" +458 29 training_loop """lcwa""" +458 29 evaluator """rankbased""" +458 30 dataset """fb15k237""" +458 30 model """proje""" +458 30 loss """softplus""" +458 30 regularizer """no""" +458 30 optimizer """adam""" +458 30 training_loop """lcwa""" +458 30 evaluator """rankbased""" +458 31 dataset """fb15k237""" +458 31 model """proje""" +458 31 loss """softplus""" +458 31 regularizer """no""" +458 31 optimizer """adam""" +458 31 training_loop """lcwa""" +458 31 evaluator """rankbased""" +458 32 dataset """fb15k237""" +458 32 model """proje""" +458 32 loss """softplus""" +458 32 regularizer """no""" +458 32 optimizer """adam""" +458 32 training_loop """lcwa""" +458 32 evaluator """rankbased""" +458 33 dataset """fb15k237""" +458 33 model """proje""" +458 33 loss """softplus""" +458 33 regularizer """no""" +458 33 optimizer """adam""" +458 33 training_loop """lcwa""" +458 33 evaluator """rankbased""" +458 34 dataset """fb15k237""" +458 34 model """proje""" +458 34 loss """softplus""" +458 34 regularizer """no""" +458 34 optimizer """adam""" +458 34 training_loop """lcwa""" +458 34 evaluator """rankbased""" +458 35 dataset """fb15k237""" +458 35 model """proje""" +458 35 loss """softplus""" +458 35 regularizer """no""" +458 35 optimizer """adam""" +458 35 training_loop """lcwa""" +458 35 evaluator """rankbased""" +458 36 dataset """fb15k237""" +458 36 model """proje""" +458 36 loss """softplus""" +458 36 regularizer """no""" +458 36 optimizer """adam""" +458 36 training_loop """lcwa""" +458 36 evaluator """rankbased""" +458 37 dataset """fb15k237""" +458 37 model """proje""" +458 37 loss """softplus""" +458 37 regularizer """no""" +458 37 optimizer """adam""" +458 37 training_loop """lcwa""" +458 37 evaluator """rankbased""" +459 1 model.embedding_dim 2.0 +459 1 loss.margin 8.113967631450079 +459 1 optimizer.lr 0.028799461679222677 +459 1 negative_sampler.num_negs_per_pos 80.0 +459 1 training.batch_size 1.0 +459 2 model.embedding_dim 1.0 +459 2 loss.margin 6.589985581229406 +459 2 optimizer.lr 0.0239533675404367 +459 2 negative_sampler.num_negs_per_pos 35.0 +459 2 training.batch_size 1.0 +459 3 model.embedding_dim 1.0 +459 3 loss.margin 8.32751594181257 +459 3 optimizer.lr 0.004969553951669371 +459 3 negative_sampler.num_negs_per_pos 45.0 +459 3 training.batch_size 0.0 +459 4 model.embedding_dim 1.0 +459 4 loss.margin 3.721644008881637 +459 4 optimizer.lr 0.010241546131447692 +459 4 negative_sampler.num_negs_per_pos 70.0 +459 4 training.batch_size 2.0 +459 5 model.embedding_dim 0.0 +459 5 loss.margin 4.991288138058771 +459 5 optimizer.lr 0.0014707311021900941 +459 5 negative_sampler.num_negs_per_pos 34.0 +459 5 training.batch_size 2.0 +459 6 model.embedding_dim 2.0 +459 6 loss.margin 0.6800298746016534 +459 6 optimizer.lr 0.08473711603761352 +459 6 negative_sampler.num_negs_per_pos 19.0 +459 6 training.batch_size 2.0 +459 7 model.embedding_dim 0.0 +459 7 loss.margin 3.866864284869166 +459 7 optimizer.lr 0.026015485610410023 +459 7 negative_sampler.num_negs_per_pos 43.0 +459 7 training.batch_size 0.0 +459 8 model.embedding_dim 2.0 +459 8 loss.margin 4.582854451634489 +459 8 optimizer.lr 0.044296999084743154 +459 8 negative_sampler.num_negs_per_pos 10.0 +459 8 training.batch_size 2.0 +459 9 model.embedding_dim 0.0 +459 9 loss.margin 4.3509691085218485 +459 9 optimizer.lr 0.002598969790099577 +459 9 negative_sampler.num_negs_per_pos 50.0 +459 9 training.batch_size 0.0 +459 10 model.embedding_dim 2.0 +459 10 loss.margin 3.3531937502716613 +459 10 optimizer.lr 0.01600830610645924 +459 10 negative_sampler.num_negs_per_pos 94.0 +459 10 training.batch_size 2.0 +459 11 model.embedding_dim 0.0 +459 11 loss.margin 7.797237925815803 +459 11 optimizer.lr 0.004618349455168172 +459 11 negative_sampler.num_negs_per_pos 60.0 +459 11 training.batch_size 2.0 +459 12 model.embedding_dim 1.0 +459 12 loss.margin 3.416413118368707 +459 12 optimizer.lr 0.017019258693206838 +459 12 negative_sampler.num_negs_per_pos 93.0 +459 12 training.batch_size 1.0 +459 13 model.embedding_dim 0.0 +459 13 loss.margin 9.45711745940151 +459 13 optimizer.lr 0.0016280906500195117 +459 13 negative_sampler.num_negs_per_pos 50.0 +459 13 training.batch_size 1.0 +459 14 model.embedding_dim 1.0 +459 14 loss.margin 6.138402706474035 +459 14 optimizer.lr 0.0017099720784201314 +459 14 negative_sampler.num_negs_per_pos 97.0 +459 14 training.batch_size 2.0 +459 15 model.embedding_dim 0.0 +459 15 loss.margin 4.277716569341356 +459 15 optimizer.lr 0.006144939984687541 +459 15 negative_sampler.num_negs_per_pos 8.0 +459 15 training.batch_size 0.0 +459 16 model.embedding_dim 1.0 +459 16 loss.margin 3.9524490247109294 +459 16 optimizer.lr 0.009169690898131673 +459 16 negative_sampler.num_negs_per_pos 80.0 +459 16 training.batch_size 0.0 +459 17 model.embedding_dim 1.0 +459 17 loss.margin 8.786089824872178 +459 17 optimizer.lr 0.0028717233549234162 +459 17 negative_sampler.num_negs_per_pos 9.0 +459 17 training.batch_size 2.0 +459 18 model.embedding_dim 0.0 +459 18 loss.margin 5.836488390831373 +459 18 optimizer.lr 0.04224966557706042 +459 18 negative_sampler.num_negs_per_pos 79.0 +459 18 training.batch_size 2.0 +459 19 model.embedding_dim 1.0 +459 19 loss.margin 7.958562033948439 +459 19 optimizer.lr 0.0013502285938911409 +459 19 negative_sampler.num_negs_per_pos 19.0 +459 19 training.batch_size 0.0 +459 20 model.embedding_dim 1.0 +459 20 loss.margin 7.664508980643832 +459 20 optimizer.lr 0.0033013872302942366 +459 20 negative_sampler.num_negs_per_pos 20.0 +459 20 training.batch_size 0.0 +459 21 model.embedding_dim 1.0 +459 21 loss.margin 8.858090557455613 +459 21 optimizer.lr 0.005346570389064793 +459 21 negative_sampler.num_negs_per_pos 85.0 +459 21 training.batch_size 0.0 +459 22 model.embedding_dim 2.0 +459 22 loss.margin 7.709679823923154 +459 22 optimizer.lr 0.009982181111089726 +459 22 negative_sampler.num_negs_per_pos 57.0 +459 22 training.batch_size 2.0 +459 23 model.embedding_dim 2.0 +459 23 loss.margin 8.953818349971824 +459 23 optimizer.lr 0.06734318453227879 +459 23 negative_sampler.num_negs_per_pos 80.0 +459 23 training.batch_size 2.0 +459 24 model.embedding_dim 2.0 +459 24 loss.margin 4.006735159259064 +459 24 optimizer.lr 0.004414273285706552 +459 24 negative_sampler.num_negs_per_pos 51.0 +459 24 training.batch_size 1.0 +459 1 dataset """fb15k237""" +459 1 model """proje""" +459 1 loss """marginranking""" +459 1 regularizer """no""" +459 1 optimizer """adam""" +459 1 training_loop """owa""" +459 1 negative_sampler """basic""" +459 1 evaluator """rankbased""" +459 2 dataset """fb15k237""" +459 2 model """proje""" +459 2 loss """marginranking""" +459 2 regularizer """no""" +459 2 optimizer """adam""" +459 2 training_loop """owa""" +459 2 negative_sampler """basic""" +459 2 evaluator """rankbased""" +459 3 dataset """fb15k237""" +459 3 model """proje""" +459 3 loss """marginranking""" +459 3 regularizer """no""" +459 3 optimizer """adam""" +459 3 training_loop """owa""" +459 3 negative_sampler """basic""" +459 3 evaluator """rankbased""" +459 4 dataset """fb15k237""" +459 4 model """proje""" +459 4 loss """marginranking""" +459 4 regularizer """no""" +459 4 optimizer """adam""" +459 4 training_loop """owa""" +459 4 negative_sampler """basic""" +459 4 evaluator """rankbased""" +459 5 dataset """fb15k237""" +459 5 model """proje""" +459 5 loss """marginranking""" +459 5 regularizer """no""" +459 5 optimizer """adam""" +459 5 training_loop """owa""" +459 5 negative_sampler """basic""" +459 5 evaluator """rankbased""" +459 6 dataset """fb15k237""" +459 6 model """proje""" +459 6 loss """marginranking""" +459 6 regularizer """no""" +459 6 optimizer """adam""" +459 6 training_loop """owa""" +459 6 negative_sampler """basic""" +459 6 evaluator """rankbased""" +459 7 dataset """fb15k237""" +459 7 model """proje""" +459 7 loss """marginranking""" +459 7 regularizer """no""" +459 7 optimizer """adam""" +459 7 training_loop """owa""" +459 7 negative_sampler """basic""" +459 7 evaluator """rankbased""" +459 8 dataset """fb15k237""" +459 8 model """proje""" +459 8 loss """marginranking""" +459 8 regularizer """no""" +459 8 optimizer """adam""" +459 8 training_loop """owa""" +459 8 negative_sampler """basic""" +459 8 evaluator """rankbased""" +459 9 dataset """fb15k237""" +459 9 model """proje""" +459 9 loss """marginranking""" +459 9 regularizer """no""" +459 9 optimizer """adam""" +459 9 training_loop """owa""" +459 9 negative_sampler """basic""" +459 9 evaluator """rankbased""" +459 10 dataset """fb15k237""" +459 10 model """proje""" +459 10 loss """marginranking""" +459 10 regularizer """no""" +459 10 optimizer """adam""" +459 10 training_loop """owa""" +459 10 negative_sampler """basic""" +459 10 evaluator """rankbased""" +459 11 dataset """fb15k237""" +459 11 model """proje""" +459 11 loss """marginranking""" +459 11 regularizer """no""" +459 11 optimizer """adam""" +459 11 training_loop """owa""" +459 11 negative_sampler """basic""" +459 11 evaluator """rankbased""" +459 12 dataset """fb15k237""" +459 12 model """proje""" +459 12 loss """marginranking""" +459 12 regularizer """no""" +459 12 optimizer """adam""" +459 12 training_loop """owa""" +459 12 negative_sampler """basic""" +459 12 evaluator """rankbased""" +459 13 dataset """fb15k237""" +459 13 model """proje""" +459 13 loss """marginranking""" +459 13 regularizer """no""" +459 13 optimizer """adam""" +459 13 training_loop """owa""" +459 13 negative_sampler """basic""" +459 13 evaluator """rankbased""" +459 14 dataset """fb15k237""" +459 14 model """proje""" +459 14 loss """marginranking""" +459 14 regularizer """no""" +459 14 optimizer """adam""" +459 14 training_loop """owa""" +459 14 negative_sampler """basic""" +459 14 evaluator """rankbased""" +459 15 dataset """fb15k237""" +459 15 model """proje""" +459 15 loss """marginranking""" +459 15 regularizer """no""" +459 15 optimizer """adam""" +459 15 training_loop """owa""" +459 15 negative_sampler """basic""" +459 15 evaluator """rankbased""" +459 16 dataset """fb15k237""" +459 16 model """proje""" +459 16 loss """marginranking""" +459 16 regularizer """no""" +459 16 optimizer """adam""" +459 16 training_loop """owa""" +459 16 negative_sampler """basic""" +459 16 evaluator """rankbased""" +459 17 dataset """fb15k237""" +459 17 model """proje""" +459 17 loss """marginranking""" +459 17 regularizer """no""" +459 17 optimizer """adam""" +459 17 training_loop """owa""" +459 17 negative_sampler """basic""" +459 17 evaluator """rankbased""" +459 18 dataset """fb15k237""" +459 18 model """proje""" +459 18 loss """marginranking""" +459 18 regularizer """no""" +459 18 optimizer """adam""" +459 18 training_loop """owa""" +459 18 negative_sampler """basic""" +459 18 evaluator """rankbased""" +459 19 dataset """fb15k237""" +459 19 model """proje""" +459 19 loss """marginranking""" +459 19 regularizer """no""" +459 19 optimizer """adam""" +459 19 training_loop """owa""" +459 19 negative_sampler """basic""" +459 19 evaluator """rankbased""" +459 20 dataset """fb15k237""" +459 20 model """proje""" +459 20 loss """marginranking""" +459 20 regularizer """no""" +459 20 optimizer """adam""" +459 20 training_loop """owa""" +459 20 negative_sampler """basic""" +459 20 evaluator """rankbased""" +459 21 dataset """fb15k237""" +459 21 model """proje""" +459 21 loss """marginranking""" +459 21 regularizer """no""" +459 21 optimizer """adam""" +459 21 training_loop """owa""" +459 21 negative_sampler """basic""" +459 21 evaluator """rankbased""" +459 22 dataset """fb15k237""" +459 22 model """proje""" +459 22 loss """marginranking""" +459 22 regularizer """no""" +459 22 optimizer """adam""" +459 22 training_loop """owa""" +459 22 negative_sampler """basic""" +459 22 evaluator """rankbased""" +459 23 dataset """fb15k237""" +459 23 model """proje""" +459 23 loss """marginranking""" +459 23 regularizer """no""" +459 23 optimizer """adam""" +459 23 training_loop """owa""" +459 23 negative_sampler """basic""" +459 23 evaluator """rankbased""" +459 24 dataset """fb15k237""" +459 24 model """proje""" +459 24 loss """marginranking""" +459 24 regularizer """no""" +459 24 optimizer """adam""" +459 24 training_loop """owa""" +459 24 negative_sampler """basic""" +459 24 evaluator """rankbased""" +460 1 model.embedding_dim 1.0 +460 1 loss.margin 9.41507225038298 +460 1 optimizer.lr 0.001293439289473381 +460 1 negative_sampler.num_negs_per_pos 42.0 +460 1 training.batch_size 1.0 +460 2 model.embedding_dim 2.0 +460 2 loss.margin 8.521529837367773 +460 2 optimizer.lr 0.032682080575552365 +460 2 negative_sampler.num_negs_per_pos 83.0 +460 2 training.batch_size 1.0 +460 3 model.embedding_dim 0.0 +460 3 loss.margin 1.1981066530636744 +460 3 optimizer.lr 0.01611605366101887 +460 3 negative_sampler.num_negs_per_pos 10.0 +460 3 training.batch_size 2.0 +460 4 model.embedding_dim 2.0 +460 4 loss.margin 2.3991162568461855 +460 4 optimizer.lr 0.06044670021941565 +460 4 negative_sampler.num_negs_per_pos 36.0 +460 4 training.batch_size 2.0 +460 5 model.embedding_dim 2.0 +460 5 loss.margin 5.292400965803058 +460 5 optimizer.lr 0.006071745307170713 +460 5 negative_sampler.num_negs_per_pos 60.0 +460 5 training.batch_size 2.0 +460 6 model.embedding_dim 0.0 +460 6 loss.margin 3.5274539220722954 +460 6 optimizer.lr 0.05732367215900574 +460 6 negative_sampler.num_negs_per_pos 85.0 +460 6 training.batch_size 1.0 +460 7 model.embedding_dim 0.0 +460 7 loss.margin 4.938579620246887 +460 7 optimizer.lr 0.0039305940407471434 +460 7 negative_sampler.num_negs_per_pos 94.0 +460 7 training.batch_size 0.0 +460 8 model.embedding_dim 0.0 +460 8 loss.margin 1.5456442926065224 +460 8 optimizer.lr 0.013132658580715084 +460 8 negative_sampler.num_negs_per_pos 17.0 +460 8 training.batch_size 0.0 +460 9 model.embedding_dim 1.0 +460 9 loss.margin 9.979540372036128 +460 9 optimizer.lr 0.005181075118177457 +460 9 negative_sampler.num_negs_per_pos 17.0 +460 9 training.batch_size 2.0 +460 10 model.embedding_dim 1.0 +460 10 loss.margin 7.511059383378205 +460 10 optimizer.lr 0.008427832925358501 +460 10 negative_sampler.num_negs_per_pos 89.0 +460 10 training.batch_size 2.0 +460 11 model.embedding_dim 0.0 +460 11 loss.margin 1.3822235237176264 +460 11 optimizer.lr 0.038602908953512656 +460 11 negative_sampler.num_negs_per_pos 24.0 +460 11 training.batch_size 2.0 +460 12 model.embedding_dim 2.0 +460 12 loss.margin 2.427373813833481 +460 12 optimizer.lr 0.0023320152887770958 +460 12 negative_sampler.num_negs_per_pos 32.0 +460 12 training.batch_size 2.0 +460 13 model.embedding_dim 0.0 +460 13 loss.margin 5.570772949238258 +460 13 optimizer.lr 0.015996093113330383 +460 13 negative_sampler.num_negs_per_pos 8.0 +460 13 training.batch_size 2.0 +460 14 model.embedding_dim 0.0 +460 14 loss.margin 4.704022983794113 +460 14 optimizer.lr 0.0021940587905760036 +460 14 negative_sampler.num_negs_per_pos 54.0 +460 14 training.batch_size 1.0 +460 15 model.embedding_dim 0.0 +460 15 loss.margin 8.601399805994564 +460 15 optimizer.lr 0.054788079982724044 +460 15 negative_sampler.num_negs_per_pos 93.0 +460 15 training.batch_size 0.0 +460 16 model.embedding_dim 2.0 +460 16 loss.margin 1.467447606164269 +460 16 optimizer.lr 0.0010537585637104855 +460 16 negative_sampler.num_negs_per_pos 31.0 +460 16 training.batch_size 0.0 +460 17 model.embedding_dim 1.0 +460 17 loss.margin 9.373455547283726 +460 17 optimizer.lr 0.01154349917788112 +460 17 negative_sampler.num_negs_per_pos 32.0 +460 17 training.batch_size 0.0 +460 18 model.embedding_dim 1.0 +460 18 loss.margin 6.6482306521021135 +460 18 optimizer.lr 0.005219349649919909 +460 18 negative_sampler.num_negs_per_pos 51.0 +460 18 training.batch_size 0.0 +460 19 model.embedding_dim 0.0 +460 19 loss.margin 1.1959252247288776 +460 19 optimizer.lr 0.0016804230549100033 +460 19 negative_sampler.num_negs_per_pos 89.0 +460 19 training.batch_size 2.0 +460 20 model.embedding_dim 2.0 +460 20 loss.margin 7.494879003144923 +460 20 optimizer.lr 0.002513331438351559 +460 20 negative_sampler.num_negs_per_pos 42.0 +460 20 training.batch_size 2.0 +460 21 model.embedding_dim 1.0 +460 21 loss.margin 9.95004220384547 +460 21 optimizer.lr 0.004236912153988555 +460 21 negative_sampler.num_negs_per_pos 31.0 +460 21 training.batch_size 1.0 +460 22 model.embedding_dim 0.0 +460 22 loss.margin 7.984438211467736 +460 22 optimizer.lr 0.004313390105035239 +460 22 negative_sampler.num_negs_per_pos 20.0 +460 22 training.batch_size 1.0 +460 23 model.embedding_dim 0.0 +460 23 loss.margin 2.3016203460533595 +460 23 optimizer.lr 0.04359867010195801 +460 23 negative_sampler.num_negs_per_pos 86.0 +460 23 training.batch_size 0.0 +460 24 model.embedding_dim 0.0 +460 24 loss.margin 5.361726911483688 +460 24 optimizer.lr 0.008298817020254375 +460 24 negative_sampler.num_negs_per_pos 67.0 +460 24 training.batch_size 2.0 +460 25 model.embedding_dim 0.0 +460 25 loss.margin 3.36234825753272 +460 25 optimizer.lr 0.010578998878926833 +460 25 negative_sampler.num_negs_per_pos 63.0 +460 25 training.batch_size 2.0 +460 26 model.embedding_dim 0.0 +460 26 loss.margin 2.9236933455861647 +460 26 optimizer.lr 0.0713485983206201 +460 26 negative_sampler.num_negs_per_pos 71.0 +460 26 training.batch_size 1.0 +460 27 model.embedding_dim 2.0 +460 27 loss.margin 2.912964849074391 +460 27 optimizer.lr 0.018787890478389656 +460 27 negative_sampler.num_negs_per_pos 14.0 +460 27 training.batch_size 1.0 +460 28 model.embedding_dim 1.0 +460 28 loss.margin 2.207675872182412 +460 28 optimizer.lr 0.006897835416919365 +460 28 negative_sampler.num_negs_per_pos 94.0 +460 28 training.batch_size 1.0 +460 29 model.embedding_dim 2.0 +460 29 loss.margin 8.195939632173262 +460 29 optimizer.lr 0.004757117958300583 +460 29 negative_sampler.num_negs_per_pos 78.0 +460 29 training.batch_size 1.0 +460 30 model.embedding_dim 2.0 +460 30 loss.margin 8.68590359234695 +460 30 optimizer.lr 0.0016368428572886766 +460 30 negative_sampler.num_negs_per_pos 86.0 +460 30 training.batch_size 0.0 +460 31 model.embedding_dim 1.0 +460 31 loss.margin 7.853605943496573 +460 31 optimizer.lr 0.030569659047612812 +460 31 negative_sampler.num_negs_per_pos 17.0 +460 31 training.batch_size 2.0 +460 32 model.embedding_dim 1.0 +460 32 loss.margin 2.0204826328767314 +460 32 optimizer.lr 0.05675738836434224 +460 32 negative_sampler.num_negs_per_pos 14.0 +460 32 training.batch_size 2.0 +460 33 model.embedding_dim 2.0 +460 33 loss.margin 5.432160089300467 +460 33 optimizer.lr 0.09228543908262035 +460 33 negative_sampler.num_negs_per_pos 67.0 +460 33 training.batch_size 2.0 +460 34 model.embedding_dim 2.0 +460 34 loss.margin 4.440921014820827 +460 34 optimizer.lr 0.08774720488264884 +460 34 negative_sampler.num_negs_per_pos 58.0 +460 34 training.batch_size 1.0 +460 35 model.embedding_dim 2.0 +460 35 loss.margin 1.890645448105114 +460 35 optimizer.lr 0.026879139523375072 +460 35 negative_sampler.num_negs_per_pos 58.0 +460 35 training.batch_size 1.0 +460 36 model.embedding_dim 0.0 +460 36 loss.margin 8.653859502220897 +460 36 optimizer.lr 0.01259769058504986 +460 36 negative_sampler.num_negs_per_pos 13.0 +460 36 training.batch_size 1.0 +460 37 model.embedding_dim 2.0 +460 37 loss.margin 0.5431654765017042 +460 37 optimizer.lr 0.0013204607808117237 +460 37 negative_sampler.num_negs_per_pos 48.0 +460 37 training.batch_size 0.0 +460 38 model.embedding_dim 1.0 +460 38 loss.margin 4.280059317402831 +460 38 optimizer.lr 0.03472230037632657 +460 38 negative_sampler.num_negs_per_pos 33.0 +460 38 training.batch_size 1.0 +460 39 model.embedding_dim 2.0 +460 39 loss.margin 2.4705931503779737 +460 39 optimizer.lr 0.0823805297150299 +460 39 negative_sampler.num_negs_per_pos 82.0 +460 39 training.batch_size 2.0 +460 40 model.embedding_dim 2.0 +460 40 loss.margin 2.563264019103435 +460 40 optimizer.lr 0.021248696895640445 +460 40 negative_sampler.num_negs_per_pos 67.0 +460 40 training.batch_size 0.0 +460 41 model.embedding_dim 2.0 +460 41 loss.margin 3.4248472696473757 +460 41 optimizer.lr 0.019786656331078917 +460 41 negative_sampler.num_negs_per_pos 29.0 +460 41 training.batch_size 0.0 +460 42 model.embedding_dim 2.0 +460 42 loss.margin 0.9354353666148848 +460 42 optimizer.lr 0.035306794416651605 +460 42 negative_sampler.num_negs_per_pos 32.0 +460 42 training.batch_size 2.0 +460 43 model.embedding_dim 0.0 +460 43 loss.margin 4.070796448566966 +460 43 optimizer.lr 0.06433679046615008 +460 43 negative_sampler.num_negs_per_pos 11.0 +460 43 training.batch_size 2.0 +460 44 model.embedding_dim 0.0 +460 44 loss.margin 0.7184391073835034 +460 44 optimizer.lr 0.008807836724118627 +460 44 negative_sampler.num_negs_per_pos 46.0 +460 44 training.batch_size 2.0 +460 45 model.embedding_dim 2.0 +460 45 loss.margin 2.9633428369442374 +460 45 optimizer.lr 0.022847119231234288 +460 45 negative_sampler.num_negs_per_pos 9.0 +460 45 training.batch_size 2.0 +460 46 model.embedding_dim 1.0 +460 46 loss.margin 4.542903990952663 +460 46 optimizer.lr 0.060424195696304014 +460 46 negative_sampler.num_negs_per_pos 83.0 +460 46 training.batch_size 0.0 +460 47 model.embedding_dim 2.0 +460 47 loss.margin 0.5341728787480045 +460 47 optimizer.lr 0.0059030017440575115 +460 47 negative_sampler.num_negs_per_pos 72.0 +460 47 training.batch_size 2.0 +460 48 model.embedding_dim 0.0 +460 48 loss.margin 3.5722204508160313 +460 48 optimizer.lr 0.0021740815016586985 +460 48 negative_sampler.num_negs_per_pos 7.0 +460 48 training.batch_size 1.0 +460 49 model.embedding_dim 0.0 +460 49 loss.margin 6.1133987399199645 +460 49 optimizer.lr 0.038024253006952995 +460 49 negative_sampler.num_negs_per_pos 94.0 +460 49 training.batch_size 2.0 +460 50 model.embedding_dim 1.0 +460 50 loss.margin 6.980308617188987 +460 50 optimizer.lr 0.0043487963207365764 +460 50 negative_sampler.num_negs_per_pos 0.0 +460 50 training.batch_size 2.0 +460 51 model.embedding_dim 0.0 +460 51 loss.margin 2.257936945791993 +460 51 optimizer.lr 0.0027286708965086497 +460 51 negative_sampler.num_negs_per_pos 69.0 +460 51 training.batch_size 2.0 +460 52 model.embedding_dim 2.0 +460 52 loss.margin 1.2595150693429435 +460 52 optimizer.lr 0.0736416416216297 +460 52 negative_sampler.num_negs_per_pos 66.0 +460 52 training.batch_size 1.0 +460 53 model.embedding_dim 2.0 +460 53 loss.margin 1.8612397957639986 +460 53 optimizer.lr 0.028505528605205596 +460 53 negative_sampler.num_negs_per_pos 81.0 +460 53 training.batch_size 2.0 +460 54 model.embedding_dim 1.0 +460 54 loss.margin 6.694727854344058 +460 54 optimizer.lr 0.004791899482995172 +460 54 negative_sampler.num_negs_per_pos 3.0 +460 54 training.batch_size 1.0 +460 55 model.embedding_dim 0.0 +460 55 loss.margin 4.752367852088231 +460 55 optimizer.lr 0.009097632458183189 +460 55 negative_sampler.num_negs_per_pos 76.0 +460 55 training.batch_size 2.0 +460 56 model.embedding_dim 0.0 +460 56 loss.margin 5.003948092295432 +460 56 optimizer.lr 0.0268698850356714 +460 56 negative_sampler.num_negs_per_pos 52.0 +460 56 training.batch_size 0.0 +460 57 model.embedding_dim 2.0 +460 57 loss.margin 6.784309924202327 +460 57 optimizer.lr 0.001289572016326242 +460 57 negative_sampler.num_negs_per_pos 16.0 +460 57 training.batch_size 2.0 +460 58 model.embedding_dim 0.0 +460 58 loss.margin 5.0092942979740736 +460 58 optimizer.lr 0.06885534861826305 +460 58 negative_sampler.num_negs_per_pos 95.0 +460 58 training.batch_size 2.0 +460 59 model.embedding_dim 2.0 +460 59 loss.margin 6.624055311021272 +460 59 optimizer.lr 0.006769345207127236 +460 59 negative_sampler.num_negs_per_pos 32.0 +460 59 training.batch_size 2.0 +460 1 dataset """fb15k237""" +460 1 model """proje""" +460 1 loss """marginranking""" +460 1 regularizer """no""" +460 1 optimizer """adam""" +460 1 training_loop """owa""" +460 1 negative_sampler """basic""" +460 1 evaluator """rankbased""" +460 2 dataset """fb15k237""" +460 2 model """proje""" +460 2 loss """marginranking""" +460 2 regularizer """no""" +460 2 optimizer """adam""" +460 2 training_loop """owa""" +460 2 negative_sampler """basic""" +460 2 evaluator """rankbased""" +460 3 dataset """fb15k237""" +460 3 model """proje""" +460 3 loss """marginranking""" +460 3 regularizer """no""" +460 3 optimizer """adam""" +460 3 training_loop """owa""" +460 3 negative_sampler """basic""" +460 3 evaluator """rankbased""" +460 4 dataset """fb15k237""" +460 4 model """proje""" +460 4 loss """marginranking""" +460 4 regularizer """no""" +460 4 optimizer """adam""" +460 4 training_loop """owa""" +460 4 negative_sampler """basic""" +460 4 evaluator """rankbased""" +460 5 dataset """fb15k237""" +460 5 model """proje""" +460 5 loss """marginranking""" +460 5 regularizer """no""" +460 5 optimizer """adam""" +460 5 training_loop """owa""" +460 5 negative_sampler """basic""" +460 5 evaluator """rankbased""" +460 6 dataset """fb15k237""" +460 6 model """proje""" +460 6 loss """marginranking""" +460 6 regularizer """no""" +460 6 optimizer """adam""" +460 6 training_loop """owa""" +460 6 negative_sampler """basic""" +460 6 evaluator """rankbased""" +460 7 dataset """fb15k237""" +460 7 model """proje""" +460 7 loss """marginranking""" +460 7 regularizer """no""" +460 7 optimizer """adam""" +460 7 training_loop """owa""" +460 7 negative_sampler """basic""" +460 7 evaluator """rankbased""" +460 8 dataset """fb15k237""" +460 8 model """proje""" +460 8 loss """marginranking""" +460 8 regularizer """no""" +460 8 optimizer """adam""" +460 8 training_loop """owa""" +460 8 negative_sampler """basic""" +460 8 evaluator """rankbased""" +460 9 dataset """fb15k237""" +460 9 model """proje""" +460 9 loss """marginranking""" +460 9 regularizer """no""" +460 9 optimizer """adam""" +460 9 training_loop """owa""" +460 9 negative_sampler """basic""" +460 9 evaluator """rankbased""" +460 10 dataset """fb15k237""" +460 10 model """proje""" +460 10 loss """marginranking""" +460 10 regularizer """no""" +460 10 optimizer """adam""" +460 10 training_loop """owa""" +460 10 negative_sampler """basic""" +460 10 evaluator """rankbased""" +460 11 dataset """fb15k237""" +460 11 model """proje""" +460 11 loss """marginranking""" +460 11 regularizer """no""" +460 11 optimizer """adam""" +460 11 training_loop """owa""" +460 11 negative_sampler """basic""" +460 11 evaluator """rankbased""" +460 12 dataset """fb15k237""" +460 12 model """proje""" +460 12 loss """marginranking""" +460 12 regularizer """no""" +460 12 optimizer """adam""" +460 12 training_loop """owa""" +460 12 negative_sampler """basic""" +460 12 evaluator """rankbased""" +460 13 dataset """fb15k237""" +460 13 model """proje""" +460 13 loss """marginranking""" +460 13 regularizer """no""" +460 13 optimizer """adam""" +460 13 training_loop """owa""" +460 13 negative_sampler """basic""" +460 13 evaluator """rankbased""" +460 14 dataset """fb15k237""" +460 14 model """proje""" +460 14 loss """marginranking""" +460 14 regularizer """no""" +460 14 optimizer """adam""" +460 14 training_loop """owa""" +460 14 negative_sampler """basic""" +460 14 evaluator """rankbased""" +460 15 dataset """fb15k237""" +460 15 model """proje""" +460 15 loss """marginranking""" +460 15 regularizer """no""" +460 15 optimizer """adam""" +460 15 training_loop """owa""" +460 15 negative_sampler """basic""" +460 15 evaluator """rankbased""" +460 16 dataset """fb15k237""" +460 16 model """proje""" +460 16 loss """marginranking""" +460 16 regularizer """no""" +460 16 optimizer """adam""" +460 16 training_loop """owa""" +460 16 negative_sampler """basic""" +460 16 evaluator """rankbased""" +460 17 dataset """fb15k237""" +460 17 model """proje""" +460 17 loss """marginranking""" +460 17 regularizer """no""" +460 17 optimizer """adam""" +460 17 training_loop """owa""" +460 17 negative_sampler """basic""" +460 17 evaluator """rankbased""" +460 18 dataset """fb15k237""" +460 18 model """proje""" +460 18 loss """marginranking""" +460 18 regularizer """no""" +460 18 optimizer """adam""" +460 18 training_loop """owa""" +460 18 negative_sampler """basic""" +460 18 evaluator """rankbased""" +460 19 dataset """fb15k237""" +460 19 model """proje""" +460 19 loss """marginranking""" +460 19 regularizer """no""" +460 19 optimizer """adam""" +460 19 training_loop """owa""" +460 19 negative_sampler """basic""" +460 19 evaluator """rankbased""" +460 20 dataset """fb15k237""" +460 20 model """proje""" +460 20 loss """marginranking""" +460 20 regularizer """no""" +460 20 optimizer """adam""" +460 20 training_loop """owa""" +460 20 negative_sampler """basic""" +460 20 evaluator """rankbased""" +460 21 dataset """fb15k237""" +460 21 model """proje""" +460 21 loss """marginranking""" +460 21 regularizer """no""" +460 21 optimizer """adam""" +460 21 training_loop """owa""" +460 21 negative_sampler """basic""" +460 21 evaluator """rankbased""" +460 22 dataset """fb15k237""" +460 22 model """proje""" +460 22 loss """marginranking""" +460 22 regularizer """no""" +460 22 optimizer """adam""" +460 22 training_loop """owa""" +460 22 negative_sampler """basic""" +460 22 evaluator """rankbased""" +460 23 dataset """fb15k237""" +460 23 model """proje""" +460 23 loss """marginranking""" +460 23 regularizer """no""" +460 23 optimizer """adam""" +460 23 training_loop """owa""" +460 23 negative_sampler """basic""" +460 23 evaluator """rankbased""" +460 24 dataset """fb15k237""" +460 24 model """proje""" +460 24 loss """marginranking""" +460 24 regularizer """no""" +460 24 optimizer """adam""" +460 24 training_loop """owa""" +460 24 negative_sampler """basic""" +460 24 evaluator """rankbased""" +460 25 dataset """fb15k237""" +460 25 model """proje""" +460 25 loss """marginranking""" +460 25 regularizer """no""" +460 25 optimizer """adam""" +460 25 training_loop """owa""" +460 25 negative_sampler """basic""" +460 25 evaluator """rankbased""" +460 26 dataset """fb15k237""" +460 26 model """proje""" +460 26 loss """marginranking""" +460 26 regularizer """no""" +460 26 optimizer """adam""" +460 26 training_loop """owa""" +460 26 negative_sampler """basic""" +460 26 evaluator """rankbased""" +460 27 dataset """fb15k237""" +460 27 model """proje""" +460 27 loss """marginranking""" +460 27 regularizer """no""" +460 27 optimizer """adam""" +460 27 training_loop """owa""" +460 27 negative_sampler """basic""" +460 27 evaluator """rankbased""" +460 28 dataset """fb15k237""" +460 28 model """proje""" +460 28 loss """marginranking""" +460 28 regularizer """no""" +460 28 optimizer """adam""" +460 28 training_loop """owa""" +460 28 negative_sampler """basic""" +460 28 evaluator """rankbased""" +460 29 dataset """fb15k237""" +460 29 model """proje""" +460 29 loss """marginranking""" +460 29 regularizer """no""" +460 29 optimizer """adam""" +460 29 training_loop """owa""" +460 29 negative_sampler """basic""" +460 29 evaluator """rankbased""" +460 30 dataset """fb15k237""" +460 30 model """proje""" +460 30 loss """marginranking""" +460 30 regularizer """no""" +460 30 optimizer """adam""" +460 30 training_loop """owa""" +460 30 negative_sampler """basic""" +460 30 evaluator """rankbased""" +460 31 dataset """fb15k237""" +460 31 model """proje""" +460 31 loss """marginranking""" +460 31 regularizer """no""" +460 31 optimizer """adam""" +460 31 training_loop """owa""" +460 31 negative_sampler """basic""" +460 31 evaluator """rankbased""" +460 32 dataset """fb15k237""" +460 32 model """proje""" +460 32 loss """marginranking""" +460 32 regularizer """no""" +460 32 optimizer """adam""" +460 32 training_loop """owa""" +460 32 negative_sampler """basic""" +460 32 evaluator """rankbased""" +460 33 dataset """fb15k237""" +460 33 model """proje""" +460 33 loss """marginranking""" +460 33 regularizer """no""" +460 33 optimizer """adam""" +460 33 training_loop """owa""" +460 33 negative_sampler """basic""" +460 33 evaluator """rankbased""" +460 34 dataset """fb15k237""" +460 34 model """proje""" +460 34 loss """marginranking""" +460 34 regularizer """no""" +460 34 optimizer """adam""" +460 34 training_loop """owa""" +460 34 negative_sampler """basic""" +460 34 evaluator """rankbased""" +460 35 dataset """fb15k237""" +460 35 model """proje""" +460 35 loss """marginranking""" +460 35 regularizer """no""" +460 35 optimizer """adam""" +460 35 training_loop """owa""" +460 35 negative_sampler """basic""" +460 35 evaluator """rankbased""" +460 36 dataset """fb15k237""" +460 36 model """proje""" +460 36 loss """marginranking""" +460 36 regularizer """no""" +460 36 optimizer """adam""" +460 36 training_loop """owa""" +460 36 negative_sampler """basic""" +460 36 evaluator """rankbased""" +460 37 dataset """fb15k237""" +460 37 model """proje""" +460 37 loss """marginranking""" +460 37 regularizer """no""" +460 37 optimizer """adam""" +460 37 training_loop """owa""" +460 37 negative_sampler """basic""" +460 37 evaluator """rankbased""" +460 38 dataset """fb15k237""" +460 38 model """proje""" +460 38 loss """marginranking""" +460 38 regularizer """no""" +460 38 optimizer """adam""" +460 38 training_loop """owa""" +460 38 negative_sampler """basic""" +460 38 evaluator """rankbased""" +460 39 dataset """fb15k237""" +460 39 model """proje""" +460 39 loss """marginranking""" +460 39 regularizer """no""" +460 39 optimizer """adam""" +460 39 training_loop """owa""" +460 39 negative_sampler """basic""" +460 39 evaluator """rankbased""" +460 40 dataset """fb15k237""" +460 40 model """proje""" +460 40 loss """marginranking""" +460 40 regularizer """no""" +460 40 optimizer """adam""" +460 40 training_loop """owa""" +460 40 negative_sampler """basic""" +460 40 evaluator """rankbased""" +460 41 dataset """fb15k237""" +460 41 model """proje""" +460 41 loss """marginranking""" +460 41 regularizer """no""" +460 41 optimizer """adam""" +460 41 training_loop """owa""" +460 41 negative_sampler """basic""" +460 41 evaluator """rankbased""" +460 42 dataset """fb15k237""" +460 42 model """proje""" +460 42 loss """marginranking""" +460 42 regularizer """no""" +460 42 optimizer """adam""" +460 42 training_loop """owa""" +460 42 negative_sampler """basic""" +460 42 evaluator """rankbased""" +460 43 dataset """fb15k237""" +460 43 model """proje""" +460 43 loss """marginranking""" +460 43 regularizer """no""" +460 43 optimizer """adam""" +460 43 training_loop """owa""" +460 43 negative_sampler """basic""" +460 43 evaluator """rankbased""" +460 44 dataset """fb15k237""" +460 44 model """proje""" +460 44 loss """marginranking""" +460 44 regularizer """no""" +460 44 optimizer """adam""" +460 44 training_loop """owa""" +460 44 negative_sampler """basic""" +460 44 evaluator """rankbased""" +460 45 dataset """fb15k237""" +460 45 model """proje""" +460 45 loss """marginranking""" +460 45 regularizer """no""" +460 45 optimizer """adam""" +460 45 training_loop """owa""" +460 45 negative_sampler """basic""" +460 45 evaluator """rankbased""" +460 46 dataset """fb15k237""" +460 46 model """proje""" +460 46 loss """marginranking""" +460 46 regularizer """no""" +460 46 optimizer """adam""" +460 46 training_loop """owa""" +460 46 negative_sampler """basic""" +460 46 evaluator """rankbased""" +460 47 dataset """fb15k237""" +460 47 model """proje""" +460 47 loss """marginranking""" +460 47 regularizer """no""" +460 47 optimizer """adam""" +460 47 training_loop """owa""" +460 47 negative_sampler """basic""" +460 47 evaluator """rankbased""" +460 48 dataset """fb15k237""" +460 48 model """proje""" +460 48 loss """marginranking""" +460 48 regularizer """no""" +460 48 optimizer """adam""" +460 48 training_loop """owa""" +460 48 negative_sampler """basic""" +460 48 evaluator """rankbased""" +460 49 dataset """fb15k237""" +460 49 model """proje""" +460 49 loss """marginranking""" +460 49 regularizer """no""" +460 49 optimizer """adam""" +460 49 training_loop """owa""" +460 49 negative_sampler """basic""" +460 49 evaluator """rankbased""" +460 50 dataset """fb15k237""" +460 50 model """proje""" +460 50 loss """marginranking""" +460 50 regularizer """no""" +460 50 optimizer """adam""" +460 50 training_loop """owa""" +460 50 negative_sampler """basic""" +460 50 evaluator """rankbased""" +460 51 dataset """fb15k237""" +460 51 model """proje""" +460 51 loss """marginranking""" +460 51 regularizer """no""" +460 51 optimizer """adam""" +460 51 training_loop """owa""" +460 51 negative_sampler """basic""" +460 51 evaluator """rankbased""" +460 52 dataset """fb15k237""" +460 52 model """proje""" +460 52 loss """marginranking""" +460 52 regularizer """no""" +460 52 optimizer """adam""" +460 52 training_loop """owa""" +460 52 negative_sampler """basic""" +460 52 evaluator """rankbased""" +460 53 dataset """fb15k237""" +460 53 model """proje""" +460 53 loss """marginranking""" +460 53 regularizer """no""" +460 53 optimizer """adam""" +460 53 training_loop """owa""" +460 53 negative_sampler """basic""" +460 53 evaluator """rankbased""" +460 54 dataset """fb15k237""" +460 54 model """proje""" +460 54 loss """marginranking""" +460 54 regularizer """no""" +460 54 optimizer """adam""" +460 54 training_loop """owa""" +460 54 negative_sampler """basic""" +460 54 evaluator """rankbased""" +460 55 dataset """fb15k237""" +460 55 model """proje""" +460 55 loss """marginranking""" +460 55 regularizer """no""" +460 55 optimizer """adam""" +460 55 training_loop """owa""" +460 55 negative_sampler """basic""" +460 55 evaluator """rankbased""" +460 56 dataset """fb15k237""" +460 56 model """proje""" +460 56 loss """marginranking""" +460 56 regularizer """no""" +460 56 optimizer """adam""" +460 56 training_loop """owa""" +460 56 negative_sampler """basic""" +460 56 evaluator """rankbased""" +460 57 dataset """fb15k237""" +460 57 model """proje""" +460 57 loss """marginranking""" +460 57 regularizer """no""" +460 57 optimizer """adam""" +460 57 training_loop """owa""" +460 57 negative_sampler """basic""" +460 57 evaluator """rankbased""" +460 58 dataset """fb15k237""" +460 58 model """proje""" +460 58 loss """marginranking""" +460 58 regularizer """no""" +460 58 optimizer """adam""" +460 58 training_loop """owa""" +460 58 negative_sampler """basic""" +460 58 evaluator """rankbased""" +460 59 dataset """fb15k237""" +460 59 model """proje""" +460 59 loss """marginranking""" +460 59 regularizer """no""" +460 59 optimizer """adam""" +460 59 training_loop """owa""" +460 59 negative_sampler """basic""" +460 59 evaluator """rankbased""" +461 1 model.embedding_dim 1.0 +461 1 loss.margin 11.736430753720086 +461 1 loss.adversarial_temperature 0.944939545631369 +461 1 optimizer.lr 0.0011049903326964443 +461 1 negative_sampler.num_negs_per_pos 77.0 +461 1 training.batch_size 0.0 +461 2 model.embedding_dim 1.0 +461 2 loss.margin 12.965864908160384 +461 2 loss.adversarial_temperature 0.7637833800587658 +461 2 optimizer.lr 0.0032120393221021165 +461 2 negative_sampler.num_negs_per_pos 17.0 +461 2 training.batch_size 0.0 +461 3 model.embedding_dim 1.0 +461 3 loss.margin 4.708335247109563 +461 3 loss.adversarial_temperature 0.7000414165410425 +461 3 optimizer.lr 0.0033633679536624935 +461 3 negative_sampler.num_negs_per_pos 83.0 +461 3 training.batch_size 2.0 +461 4 model.embedding_dim 1.0 +461 4 loss.margin 12.99028526895284 +461 4 loss.adversarial_temperature 0.16105198118363842 +461 4 optimizer.lr 0.005772510800926894 +461 4 negative_sampler.num_negs_per_pos 21.0 +461 4 training.batch_size 0.0 +461 5 model.embedding_dim 0.0 +461 5 loss.margin 12.41961957807258 +461 5 loss.adversarial_temperature 0.7167820131393323 +461 5 optimizer.lr 0.007547759835362667 +461 5 negative_sampler.num_negs_per_pos 67.0 +461 5 training.batch_size 1.0 +461 6 model.embedding_dim 1.0 +461 6 loss.margin 5.266059088541294 +461 6 loss.adversarial_temperature 0.5199657472451497 +461 6 optimizer.lr 0.030794959822932007 +461 6 negative_sampler.num_negs_per_pos 67.0 +461 6 training.batch_size 1.0 +461 7 model.embedding_dim 0.0 +461 7 loss.margin 4.480312353989496 +461 7 loss.adversarial_temperature 0.1548570206403541 +461 7 optimizer.lr 0.012024187696112602 +461 7 negative_sampler.num_negs_per_pos 70.0 +461 7 training.batch_size 0.0 +461 8 model.embedding_dim 0.0 +461 8 loss.margin 12.974136429080215 +461 8 loss.adversarial_temperature 0.4323819569611307 +461 8 optimizer.lr 0.00944977102198997 +461 8 negative_sampler.num_negs_per_pos 98.0 +461 8 training.batch_size 1.0 +461 9 model.embedding_dim 0.0 +461 9 loss.margin 25.12412740263911 +461 9 loss.adversarial_temperature 0.335218841169136 +461 9 optimizer.lr 0.013149736554526462 +461 9 negative_sampler.num_negs_per_pos 21.0 +461 9 training.batch_size 0.0 +461 10 model.embedding_dim 1.0 +461 10 loss.margin 10.84915012840018 +461 10 loss.adversarial_temperature 0.15727331586878285 +461 10 optimizer.lr 0.0011050128833894033 +461 10 negative_sampler.num_negs_per_pos 80.0 +461 10 training.batch_size 0.0 +461 11 model.embedding_dim 0.0 +461 11 loss.margin 1.172944340648557 +461 11 loss.adversarial_temperature 0.2085103368680398 +461 11 optimizer.lr 0.0019274839202276847 +461 11 negative_sampler.num_negs_per_pos 45.0 +461 11 training.batch_size 2.0 +461 12 model.embedding_dim 0.0 +461 12 loss.margin 16.8129633634938 +461 12 loss.adversarial_temperature 0.42172636822003395 +461 12 optimizer.lr 0.002802079692165133 +461 12 negative_sampler.num_negs_per_pos 37.0 +461 12 training.batch_size 0.0 +461 13 model.embedding_dim 1.0 +461 13 loss.margin 12.246278840783711 +461 13 loss.adversarial_temperature 0.5660537166897855 +461 13 optimizer.lr 0.0019604208887617366 +461 13 negative_sampler.num_negs_per_pos 31.0 +461 13 training.batch_size 2.0 +461 14 model.embedding_dim 2.0 +461 14 loss.margin 17.12332073543092 +461 14 loss.adversarial_temperature 0.5411849204896131 +461 14 optimizer.lr 0.001146840628473493 +461 14 negative_sampler.num_negs_per_pos 79.0 +461 14 training.batch_size 0.0 +461 15 model.embedding_dim 0.0 +461 15 loss.margin 20.78345148589269 +461 15 loss.adversarial_temperature 0.31236140466104334 +461 15 optimizer.lr 0.014939007019921785 +461 15 negative_sampler.num_negs_per_pos 67.0 +461 15 training.batch_size 0.0 +461 16 model.embedding_dim 1.0 +461 16 loss.margin 23.178669955703533 +461 16 loss.adversarial_temperature 0.4376561141139471 +461 16 optimizer.lr 0.0014150690406873306 +461 16 negative_sampler.num_negs_per_pos 75.0 +461 16 training.batch_size 1.0 +461 17 model.embedding_dim 2.0 +461 17 loss.margin 12.87338304338142 +461 17 loss.adversarial_temperature 0.47770519286482055 +461 17 optimizer.lr 0.003067369726092161 +461 17 negative_sampler.num_negs_per_pos 28.0 +461 17 training.batch_size 1.0 +461 18 model.embedding_dim 1.0 +461 18 loss.margin 27.89167092810754 +461 18 loss.adversarial_temperature 0.7288300120588569 +461 18 optimizer.lr 0.03055505649335543 +461 18 negative_sampler.num_negs_per_pos 19.0 +461 18 training.batch_size 1.0 +461 19 model.embedding_dim 1.0 +461 19 loss.margin 9.184705829194122 +461 19 loss.adversarial_temperature 0.780872948633475 +461 19 optimizer.lr 0.0063612671852080315 +461 19 negative_sampler.num_negs_per_pos 21.0 +461 19 training.batch_size 2.0 +461 20 model.embedding_dim 1.0 +461 20 loss.margin 13.04016916099139 +461 20 loss.adversarial_temperature 0.7243972722641382 +461 20 optimizer.lr 0.04634662988373303 +461 20 negative_sampler.num_negs_per_pos 45.0 +461 20 training.batch_size 2.0 +461 21 model.embedding_dim 0.0 +461 21 loss.margin 1.0287198855903121 +461 21 loss.adversarial_temperature 0.5488732531360758 +461 21 optimizer.lr 0.05627463646117597 +461 21 negative_sampler.num_negs_per_pos 8.0 +461 21 training.batch_size 0.0 +461 22 model.embedding_dim 1.0 +461 22 loss.margin 14.833567578617535 +461 22 loss.adversarial_temperature 0.5898832201614262 +461 22 optimizer.lr 0.011657322711981881 +461 22 negative_sampler.num_negs_per_pos 61.0 +461 22 training.batch_size 2.0 +461 1 dataset """fb15k237""" +461 1 model """proje""" +461 1 loss """nssa""" +461 1 regularizer """no""" +461 1 optimizer """adam""" +461 1 training_loop """owa""" +461 1 negative_sampler """basic""" +461 1 evaluator """rankbased""" +461 2 dataset """fb15k237""" +461 2 model """proje""" +461 2 loss """nssa""" +461 2 regularizer """no""" +461 2 optimizer """adam""" +461 2 training_loop """owa""" +461 2 negative_sampler """basic""" +461 2 evaluator """rankbased""" +461 3 dataset """fb15k237""" +461 3 model """proje""" +461 3 loss """nssa""" +461 3 regularizer """no""" +461 3 optimizer """adam""" +461 3 training_loop """owa""" +461 3 negative_sampler """basic""" +461 3 evaluator """rankbased""" +461 4 dataset """fb15k237""" +461 4 model """proje""" +461 4 loss """nssa""" +461 4 regularizer """no""" +461 4 optimizer """adam""" +461 4 training_loop """owa""" +461 4 negative_sampler """basic""" +461 4 evaluator """rankbased""" +461 5 dataset """fb15k237""" +461 5 model """proje""" +461 5 loss """nssa""" +461 5 regularizer """no""" +461 5 optimizer """adam""" +461 5 training_loop """owa""" +461 5 negative_sampler """basic""" +461 5 evaluator """rankbased""" +461 6 dataset """fb15k237""" +461 6 model """proje""" +461 6 loss """nssa""" +461 6 regularizer """no""" +461 6 optimizer """adam""" +461 6 training_loop """owa""" +461 6 negative_sampler """basic""" +461 6 evaluator """rankbased""" +461 7 dataset """fb15k237""" +461 7 model """proje""" +461 7 loss """nssa""" +461 7 regularizer """no""" +461 7 optimizer """adam""" +461 7 training_loop """owa""" +461 7 negative_sampler """basic""" +461 7 evaluator """rankbased""" +461 8 dataset """fb15k237""" +461 8 model """proje""" +461 8 loss """nssa""" +461 8 regularizer """no""" +461 8 optimizer """adam""" +461 8 training_loop """owa""" +461 8 negative_sampler """basic""" +461 8 evaluator """rankbased""" +461 9 dataset """fb15k237""" +461 9 model """proje""" +461 9 loss """nssa""" +461 9 regularizer """no""" +461 9 optimizer """adam""" +461 9 training_loop """owa""" +461 9 negative_sampler """basic""" +461 9 evaluator """rankbased""" +461 10 dataset """fb15k237""" +461 10 model """proje""" +461 10 loss """nssa""" +461 10 regularizer """no""" +461 10 optimizer """adam""" +461 10 training_loop """owa""" +461 10 negative_sampler """basic""" +461 10 evaluator """rankbased""" +461 11 dataset """fb15k237""" +461 11 model """proje""" +461 11 loss """nssa""" +461 11 regularizer """no""" +461 11 optimizer """adam""" +461 11 training_loop """owa""" +461 11 negative_sampler """basic""" +461 11 evaluator """rankbased""" +461 12 dataset """fb15k237""" +461 12 model """proje""" +461 12 loss """nssa""" +461 12 regularizer """no""" +461 12 optimizer """adam""" +461 12 training_loop """owa""" +461 12 negative_sampler """basic""" +461 12 evaluator """rankbased""" +461 13 dataset """fb15k237""" +461 13 model """proje""" +461 13 loss """nssa""" +461 13 regularizer """no""" +461 13 optimizer """adam""" +461 13 training_loop """owa""" +461 13 negative_sampler """basic""" +461 13 evaluator """rankbased""" +461 14 dataset """fb15k237""" +461 14 model """proje""" +461 14 loss """nssa""" +461 14 regularizer """no""" +461 14 optimizer """adam""" +461 14 training_loop """owa""" +461 14 negative_sampler """basic""" +461 14 evaluator """rankbased""" +461 15 dataset """fb15k237""" +461 15 model """proje""" +461 15 loss """nssa""" +461 15 regularizer """no""" +461 15 optimizer """adam""" +461 15 training_loop """owa""" +461 15 negative_sampler """basic""" +461 15 evaluator """rankbased""" +461 16 dataset """fb15k237""" +461 16 model """proje""" +461 16 loss """nssa""" +461 16 regularizer """no""" +461 16 optimizer """adam""" +461 16 training_loop """owa""" +461 16 negative_sampler """basic""" +461 16 evaluator """rankbased""" +461 17 dataset """fb15k237""" +461 17 model """proje""" +461 17 loss """nssa""" +461 17 regularizer """no""" +461 17 optimizer """adam""" +461 17 training_loop """owa""" +461 17 negative_sampler """basic""" +461 17 evaluator """rankbased""" +461 18 dataset """fb15k237""" +461 18 model """proje""" +461 18 loss """nssa""" +461 18 regularizer """no""" +461 18 optimizer """adam""" +461 18 training_loop """owa""" +461 18 negative_sampler """basic""" +461 18 evaluator """rankbased""" +461 19 dataset """fb15k237""" +461 19 model """proje""" +461 19 loss """nssa""" +461 19 regularizer """no""" +461 19 optimizer """adam""" +461 19 training_loop """owa""" +461 19 negative_sampler """basic""" +461 19 evaluator """rankbased""" +461 20 dataset """fb15k237""" +461 20 model """proje""" +461 20 loss """nssa""" +461 20 regularizer """no""" +461 20 optimizer """adam""" +461 20 training_loop """owa""" +461 20 negative_sampler """basic""" +461 20 evaluator """rankbased""" +461 21 dataset """fb15k237""" +461 21 model """proje""" +461 21 loss """nssa""" +461 21 regularizer """no""" +461 21 optimizer """adam""" +461 21 training_loop """owa""" +461 21 negative_sampler """basic""" +461 21 evaluator """rankbased""" +461 22 dataset """fb15k237""" +461 22 model """proje""" +461 22 loss """nssa""" +461 22 regularizer """no""" +461 22 optimizer """adam""" +461 22 training_loop """owa""" +461 22 negative_sampler """basic""" +461 22 evaluator """rankbased""" +462 1 model.embedding_dim 2.0 +462 1 loss.margin 12.31988228248743 +462 1 loss.adversarial_temperature 0.45840830742114236 +462 1 optimizer.lr 0.0014164032046584724 +462 1 negative_sampler.num_negs_per_pos 58.0 +462 1 training.batch_size 0.0 +462 2 model.embedding_dim 0.0 +462 2 loss.margin 26.401240578346673 +462 2 loss.adversarial_temperature 0.945993279868638 +462 2 optimizer.lr 0.009678887180289211 +462 2 negative_sampler.num_negs_per_pos 55.0 +462 2 training.batch_size 2.0 +462 3 model.embedding_dim 2.0 +462 3 loss.margin 26.47728660337875 +462 3 loss.adversarial_temperature 0.6218538043783689 +462 3 optimizer.lr 0.0015919947930873337 +462 3 negative_sampler.num_negs_per_pos 23.0 +462 3 training.batch_size 1.0 +462 4 model.embedding_dim 1.0 +462 4 loss.margin 15.998450952177771 +462 4 loss.adversarial_temperature 0.4037958206759406 +462 4 optimizer.lr 0.001645116410490138 +462 4 negative_sampler.num_negs_per_pos 20.0 +462 4 training.batch_size 2.0 +462 5 model.embedding_dim 0.0 +462 5 loss.margin 26.367204557233034 +462 5 loss.adversarial_temperature 0.7919033140915513 +462 5 optimizer.lr 0.0013428541068665704 +462 5 negative_sampler.num_negs_per_pos 90.0 +462 5 training.batch_size 2.0 +462 6 model.embedding_dim 1.0 +462 6 loss.margin 3.546895225739316 +462 6 loss.adversarial_temperature 0.2901485962134696 +462 6 optimizer.lr 0.008315097432732843 +462 6 negative_sampler.num_negs_per_pos 45.0 +462 6 training.batch_size 0.0 +462 7 model.embedding_dim 1.0 +462 7 loss.margin 15.315369452968325 +462 7 loss.adversarial_temperature 0.2001097891990295 +462 7 optimizer.lr 0.06972968728687014 +462 7 negative_sampler.num_negs_per_pos 91.0 +462 7 training.batch_size 1.0 +462 8 model.embedding_dim 2.0 +462 8 loss.margin 18.332893527192553 +462 8 loss.adversarial_temperature 0.8688930416660011 +462 8 optimizer.lr 0.0010231232770177546 +462 8 negative_sampler.num_negs_per_pos 82.0 +462 8 training.batch_size 0.0 +462 9 model.embedding_dim 2.0 +462 9 loss.margin 20.296692071506033 +462 9 loss.adversarial_temperature 0.6313146164848085 +462 9 optimizer.lr 0.004622952712284932 +462 9 negative_sampler.num_negs_per_pos 24.0 +462 9 training.batch_size 2.0 +462 10 model.embedding_dim 0.0 +462 10 loss.margin 7.645264353185415 +462 10 loss.adversarial_temperature 0.3956858453911781 +462 10 optimizer.lr 0.0010615692949076342 +462 10 negative_sampler.num_negs_per_pos 2.0 +462 10 training.batch_size 1.0 +462 11 model.embedding_dim 0.0 +462 11 loss.margin 24.804422948235846 +462 11 loss.adversarial_temperature 0.8889405947996877 +462 11 optimizer.lr 0.0015560188540235506 +462 11 negative_sampler.num_negs_per_pos 73.0 +462 11 training.batch_size 0.0 +462 12 model.embedding_dim 1.0 +462 12 loss.margin 26.956268887656844 +462 12 loss.adversarial_temperature 0.6157336391462725 +462 12 optimizer.lr 0.015284600779634379 +462 12 negative_sampler.num_negs_per_pos 53.0 +462 12 training.batch_size 2.0 +462 13 model.embedding_dim 2.0 +462 13 loss.margin 2.6046630421471764 +462 13 loss.adversarial_temperature 0.953254952741606 +462 13 optimizer.lr 0.016434078760610628 +462 13 negative_sampler.num_negs_per_pos 0.0 +462 13 training.batch_size 1.0 +462 14 model.embedding_dim 0.0 +462 14 loss.margin 26.471130512716478 +462 14 loss.adversarial_temperature 0.6825834016559367 +462 14 optimizer.lr 0.03074447469555166 +462 14 negative_sampler.num_negs_per_pos 12.0 +462 14 training.batch_size 0.0 +462 15 model.embedding_dim 2.0 +462 15 loss.margin 17.521385971524722 +462 15 loss.adversarial_temperature 0.7142093353028521 +462 15 optimizer.lr 0.03401440978533787 +462 15 negative_sampler.num_negs_per_pos 8.0 +462 15 training.batch_size 1.0 +462 16 model.embedding_dim 0.0 +462 16 loss.margin 10.649196375764046 +462 16 loss.adversarial_temperature 0.16494626158627776 +462 16 optimizer.lr 0.0035407295815104082 +462 16 negative_sampler.num_negs_per_pos 39.0 +462 16 training.batch_size 1.0 +462 17 model.embedding_dim 0.0 +462 17 loss.margin 4.022702123787806 +462 17 loss.adversarial_temperature 0.4601897133315628 +462 17 optimizer.lr 0.08642308512774474 +462 17 negative_sampler.num_negs_per_pos 61.0 +462 17 training.batch_size 2.0 +462 18 model.embedding_dim 1.0 +462 18 loss.margin 11.664683400238001 +462 18 loss.adversarial_temperature 0.4405514379172299 +462 18 optimizer.lr 0.001441273735607314 +462 18 negative_sampler.num_negs_per_pos 88.0 +462 18 training.batch_size 2.0 +462 19 model.embedding_dim 1.0 +462 19 loss.margin 6.2980552873585465 +462 19 loss.adversarial_temperature 0.32471300036367984 +462 19 optimizer.lr 0.0018236252895563276 +462 19 negative_sampler.num_negs_per_pos 45.0 +462 19 training.batch_size 0.0 +462 20 model.embedding_dim 1.0 +462 20 loss.margin 18.937498155235108 +462 20 loss.adversarial_temperature 0.9487549599703617 +462 20 optimizer.lr 0.09780741800540968 +462 20 negative_sampler.num_negs_per_pos 31.0 +462 20 training.batch_size 1.0 +462 21 model.embedding_dim 0.0 +462 21 loss.margin 13.43483727861248 +462 21 loss.adversarial_temperature 0.9180005861496658 +462 21 optimizer.lr 0.057530391814185754 +462 21 negative_sampler.num_negs_per_pos 62.0 +462 21 training.batch_size 1.0 +462 22 model.embedding_dim 1.0 +462 22 loss.margin 20.03097406270448 +462 22 loss.adversarial_temperature 0.203263410025967 +462 22 optimizer.lr 0.003861849537597415 +462 22 negative_sampler.num_negs_per_pos 5.0 +462 22 training.batch_size 2.0 +462 23 model.embedding_dim 0.0 +462 23 loss.margin 10.378426245652419 +462 23 loss.adversarial_temperature 0.6174335202534221 +462 23 optimizer.lr 0.008181387096249888 +462 23 negative_sampler.num_negs_per_pos 47.0 +462 23 training.batch_size 1.0 +462 24 model.embedding_dim 0.0 +462 24 loss.margin 12.263100041314281 +462 24 loss.adversarial_temperature 0.810703357631204 +462 24 optimizer.lr 0.01254475568067002 +462 24 negative_sampler.num_negs_per_pos 0.0 +462 24 training.batch_size 0.0 +462 25 model.embedding_dim 2.0 +462 25 loss.margin 24.26735569514245 +462 25 loss.adversarial_temperature 0.9751840850725662 +462 25 optimizer.lr 0.008867486685495323 +462 25 negative_sampler.num_negs_per_pos 17.0 +462 25 training.batch_size 2.0 +462 26 model.embedding_dim 0.0 +462 26 loss.margin 11.584979783418472 +462 26 loss.adversarial_temperature 0.7111487674256702 +462 26 optimizer.lr 0.004901855068467264 +462 26 negative_sampler.num_negs_per_pos 18.0 +462 26 training.batch_size 0.0 +462 27 model.embedding_dim 2.0 +462 27 loss.margin 2.7792586962856407 +462 27 loss.adversarial_temperature 0.4837858672236312 +462 27 optimizer.lr 0.0014211229565288144 +462 27 negative_sampler.num_negs_per_pos 25.0 +462 27 training.batch_size 1.0 +462 28 model.embedding_dim 0.0 +462 28 loss.margin 9.375719631884403 +462 28 loss.adversarial_temperature 0.8431850405135981 +462 28 optimizer.lr 0.009250396403970771 +462 28 negative_sampler.num_negs_per_pos 2.0 +462 28 training.batch_size 2.0 +462 29 model.embedding_dim 0.0 +462 29 loss.margin 3.5400102468135994 +462 29 loss.adversarial_temperature 0.4707539172121208 +462 29 optimizer.lr 0.012878716244399735 +462 29 negative_sampler.num_negs_per_pos 72.0 +462 29 training.batch_size 2.0 +462 30 model.embedding_dim 2.0 +462 30 loss.margin 1.0492245905807516 +462 30 loss.adversarial_temperature 0.4320409031560112 +462 30 optimizer.lr 0.027307439276523078 +462 30 negative_sampler.num_negs_per_pos 57.0 +462 30 training.batch_size 2.0 +462 31 model.embedding_dim 0.0 +462 31 loss.margin 14.06207553017501 +462 31 loss.adversarial_temperature 0.23706062097264755 +462 31 optimizer.lr 0.011730514566557118 +462 31 negative_sampler.num_negs_per_pos 37.0 +462 31 training.batch_size 1.0 +462 32 model.embedding_dim 1.0 +462 32 loss.margin 3.3338740339331014 +462 32 loss.adversarial_temperature 0.37121933676587093 +462 32 optimizer.lr 0.0013409606233116887 +462 32 negative_sampler.num_negs_per_pos 45.0 +462 32 training.batch_size 2.0 +462 33 model.embedding_dim 1.0 +462 33 loss.margin 4.9168844067537165 +462 33 loss.adversarial_temperature 0.8444875779119344 +462 33 optimizer.lr 0.0011889868779262707 +462 33 negative_sampler.num_negs_per_pos 0.0 +462 33 training.batch_size 0.0 +462 34 model.embedding_dim 1.0 +462 34 loss.margin 24.389411465396556 +462 34 loss.adversarial_temperature 0.608648100202839 +462 34 optimizer.lr 0.05811495053276324 +462 34 negative_sampler.num_negs_per_pos 27.0 +462 34 training.batch_size 1.0 +462 35 model.embedding_dim 2.0 +462 35 loss.margin 1.202867905516394 +462 35 loss.adversarial_temperature 0.6653105341798585 +462 35 optimizer.lr 0.009604968505536823 +462 35 negative_sampler.num_negs_per_pos 56.0 +462 35 training.batch_size 1.0 +462 36 model.embedding_dim 1.0 +462 36 loss.margin 13.522675018097951 +462 36 loss.adversarial_temperature 0.8328685484884497 +462 36 optimizer.lr 0.031926935818057556 +462 36 negative_sampler.num_negs_per_pos 89.0 +462 36 training.batch_size 0.0 +462 37 model.embedding_dim 0.0 +462 37 loss.margin 1.1301587018783694 +462 37 loss.adversarial_temperature 0.21445644643169415 +462 37 optimizer.lr 0.07260619643752012 +462 37 negative_sampler.num_negs_per_pos 46.0 +462 37 training.batch_size 1.0 +462 38 model.embedding_dim 2.0 +462 38 loss.margin 13.79616493211942 +462 38 loss.adversarial_temperature 0.4480906923852408 +462 38 optimizer.lr 0.011921465827776458 +462 38 negative_sampler.num_negs_per_pos 72.0 +462 38 training.batch_size 2.0 +462 39 model.embedding_dim 2.0 +462 39 loss.margin 10.533909613105807 +462 39 loss.adversarial_temperature 0.6211836972925999 +462 39 optimizer.lr 0.0042031539421521005 +462 39 negative_sampler.num_negs_per_pos 60.0 +462 39 training.batch_size 0.0 +462 40 model.embedding_dim 0.0 +462 40 loss.margin 24.054960537186023 +462 40 loss.adversarial_temperature 0.3288713636101547 +462 40 optimizer.lr 0.09043991826842471 +462 40 negative_sampler.num_negs_per_pos 66.0 +462 40 training.batch_size 0.0 +462 41 model.embedding_dim 0.0 +462 41 loss.margin 27.215216483504353 +462 41 loss.adversarial_temperature 0.9951823659202662 +462 41 optimizer.lr 0.006845622853394148 +462 41 negative_sampler.num_negs_per_pos 15.0 +462 41 training.batch_size 2.0 +462 42 model.embedding_dim 2.0 +462 42 loss.margin 2.9185872991648862 +462 42 loss.adversarial_temperature 0.26464133496823283 +462 42 optimizer.lr 0.028575941273237357 +462 42 negative_sampler.num_negs_per_pos 96.0 +462 42 training.batch_size 2.0 +462 43 model.embedding_dim 0.0 +462 43 loss.margin 26.678211750146527 +462 43 loss.adversarial_temperature 0.36051189114962895 +462 43 optimizer.lr 0.05290309313730925 +462 43 negative_sampler.num_negs_per_pos 56.0 +462 43 training.batch_size 0.0 +462 44 model.embedding_dim 1.0 +462 44 loss.margin 21.800503107452293 +462 44 loss.adversarial_temperature 0.2695912936367909 +462 44 optimizer.lr 0.02209524687829299 +462 44 negative_sampler.num_negs_per_pos 16.0 +462 44 training.batch_size 0.0 +462 45 model.embedding_dim 0.0 +462 45 loss.margin 2.4138905935095014 +462 45 loss.adversarial_temperature 0.9539716285376858 +462 45 optimizer.lr 0.012575144282542352 +462 45 negative_sampler.num_negs_per_pos 87.0 +462 45 training.batch_size 2.0 +462 46 model.embedding_dim 0.0 +462 46 loss.margin 14.1693351527022 +462 46 loss.adversarial_temperature 0.45036150110484413 +462 46 optimizer.lr 0.015137961415217213 +462 46 negative_sampler.num_negs_per_pos 79.0 +462 46 training.batch_size 0.0 +462 47 model.embedding_dim 2.0 +462 47 loss.margin 14.261466101785732 +462 47 loss.adversarial_temperature 0.23948058669144284 +462 47 optimizer.lr 0.0010702840712423533 +462 47 negative_sampler.num_negs_per_pos 37.0 +462 47 training.batch_size 2.0 +462 1 dataset """fb15k237""" +462 1 model """proje""" +462 1 loss """nssa""" +462 1 regularizer """no""" +462 1 optimizer """adam""" +462 1 training_loop """owa""" +462 1 negative_sampler """basic""" +462 1 evaluator """rankbased""" +462 2 dataset """fb15k237""" +462 2 model """proje""" +462 2 loss """nssa""" +462 2 regularizer """no""" +462 2 optimizer """adam""" +462 2 training_loop """owa""" +462 2 negative_sampler """basic""" +462 2 evaluator """rankbased""" +462 3 dataset """fb15k237""" +462 3 model """proje""" +462 3 loss """nssa""" +462 3 regularizer """no""" +462 3 optimizer """adam""" +462 3 training_loop """owa""" +462 3 negative_sampler """basic""" +462 3 evaluator """rankbased""" +462 4 dataset """fb15k237""" +462 4 model """proje""" +462 4 loss """nssa""" +462 4 regularizer """no""" +462 4 optimizer """adam""" +462 4 training_loop """owa""" +462 4 negative_sampler """basic""" +462 4 evaluator """rankbased""" +462 5 dataset """fb15k237""" +462 5 model """proje""" +462 5 loss """nssa""" +462 5 regularizer """no""" +462 5 optimizer """adam""" +462 5 training_loop """owa""" +462 5 negative_sampler """basic""" +462 5 evaluator """rankbased""" +462 6 dataset """fb15k237""" +462 6 model """proje""" +462 6 loss """nssa""" +462 6 regularizer """no""" +462 6 optimizer """adam""" +462 6 training_loop """owa""" +462 6 negative_sampler """basic""" +462 6 evaluator """rankbased""" +462 7 dataset """fb15k237""" +462 7 model """proje""" +462 7 loss """nssa""" +462 7 regularizer """no""" +462 7 optimizer """adam""" +462 7 training_loop """owa""" +462 7 negative_sampler """basic""" +462 7 evaluator """rankbased""" +462 8 dataset """fb15k237""" +462 8 model """proje""" +462 8 loss """nssa""" +462 8 regularizer """no""" +462 8 optimizer """adam""" +462 8 training_loop """owa""" +462 8 negative_sampler """basic""" +462 8 evaluator """rankbased""" +462 9 dataset """fb15k237""" +462 9 model """proje""" +462 9 loss """nssa""" +462 9 regularizer """no""" +462 9 optimizer """adam""" +462 9 training_loop """owa""" +462 9 negative_sampler """basic""" +462 9 evaluator """rankbased""" +462 10 dataset """fb15k237""" +462 10 model """proje""" +462 10 loss """nssa""" +462 10 regularizer """no""" +462 10 optimizer """adam""" +462 10 training_loop """owa""" +462 10 negative_sampler """basic""" +462 10 evaluator """rankbased""" +462 11 dataset """fb15k237""" +462 11 model """proje""" +462 11 loss """nssa""" +462 11 regularizer """no""" +462 11 optimizer """adam""" +462 11 training_loop """owa""" +462 11 negative_sampler """basic""" +462 11 evaluator """rankbased""" +462 12 dataset """fb15k237""" +462 12 model """proje""" +462 12 loss """nssa""" +462 12 regularizer """no""" +462 12 optimizer """adam""" +462 12 training_loop """owa""" +462 12 negative_sampler """basic""" +462 12 evaluator """rankbased""" +462 13 dataset """fb15k237""" +462 13 model """proje""" +462 13 loss """nssa""" +462 13 regularizer """no""" +462 13 optimizer """adam""" +462 13 training_loop """owa""" +462 13 negative_sampler """basic""" +462 13 evaluator """rankbased""" +462 14 dataset """fb15k237""" +462 14 model """proje""" +462 14 loss """nssa""" +462 14 regularizer """no""" +462 14 optimizer """adam""" +462 14 training_loop """owa""" +462 14 negative_sampler """basic""" +462 14 evaluator """rankbased""" +462 15 dataset """fb15k237""" +462 15 model """proje""" +462 15 loss """nssa""" +462 15 regularizer """no""" +462 15 optimizer """adam""" +462 15 training_loop """owa""" +462 15 negative_sampler """basic""" +462 15 evaluator """rankbased""" +462 16 dataset """fb15k237""" +462 16 model """proje""" +462 16 loss """nssa""" +462 16 regularizer """no""" +462 16 optimizer """adam""" +462 16 training_loop """owa""" +462 16 negative_sampler """basic""" +462 16 evaluator """rankbased""" +462 17 dataset """fb15k237""" +462 17 model """proje""" +462 17 loss """nssa""" +462 17 regularizer """no""" +462 17 optimizer """adam""" +462 17 training_loop """owa""" +462 17 negative_sampler """basic""" +462 17 evaluator """rankbased""" +462 18 dataset """fb15k237""" +462 18 model """proje""" +462 18 loss """nssa""" +462 18 regularizer """no""" +462 18 optimizer """adam""" +462 18 training_loop """owa""" +462 18 negative_sampler """basic""" +462 18 evaluator """rankbased""" +462 19 dataset """fb15k237""" +462 19 model """proje""" +462 19 loss """nssa""" +462 19 regularizer """no""" +462 19 optimizer """adam""" +462 19 training_loop """owa""" +462 19 negative_sampler """basic""" +462 19 evaluator """rankbased""" +462 20 dataset """fb15k237""" +462 20 model """proje""" +462 20 loss """nssa""" +462 20 regularizer """no""" +462 20 optimizer """adam""" +462 20 training_loop """owa""" +462 20 negative_sampler """basic""" +462 20 evaluator """rankbased""" +462 21 dataset """fb15k237""" +462 21 model """proje""" +462 21 loss """nssa""" +462 21 regularizer """no""" +462 21 optimizer """adam""" +462 21 training_loop """owa""" +462 21 negative_sampler """basic""" +462 21 evaluator """rankbased""" +462 22 dataset """fb15k237""" +462 22 model """proje""" +462 22 loss """nssa""" +462 22 regularizer """no""" +462 22 optimizer """adam""" +462 22 training_loop """owa""" +462 22 negative_sampler """basic""" +462 22 evaluator """rankbased""" +462 23 dataset """fb15k237""" +462 23 model """proje""" +462 23 loss """nssa""" +462 23 regularizer """no""" +462 23 optimizer """adam""" +462 23 training_loop """owa""" +462 23 negative_sampler """basic""" +462 23 evaluator """rankbased""" +462 24 dataset """fb15k237""" +462 24 model """proje""" +462 24 loss """nssa""" +462 24 regularizer """no""" +462 24 optimizer """adam""" +462 24 training_loop """owa""" +462 24 negative_sampler """basic""" +462 24 evaluator """rankbased""" +462 25 dataset """fb15k237""" +462 25 model """proje""" +462 25 loss """nssa""" +462 25 regularizer """no""" +462 25 optimizer """adam""" +462 25 training_loop """owa""" +462 25 negative_sampler """basic""" +462 25 evaluator """rankbased""" +462 26 dataset """fb15k237""" +462 26 model """proje""" +462 26 loss """nssa""" +462 26 regularizer """no""" +462 26 optimizer """adam""" +462 26 training_loop """owa""" +462 26 negative_sampler """basic""" +462 26 evaluator """rankbased""" +462 27 dataset """fb15k237""" +462 27 model """proje""" +462 27 loss """nssa""" +462 27 regularizer """no""" +462 27 optimizer """adam""" +462 27 training_loop """owa""" +462 27 negative_sampler """basic""" +462 27 evaluator """rankbased""" +462 28 dataset """fb15k237""" +462 28 model """proje""" +462 28 loss """nssa""" +462 28 regularizer """no""" +462 28 optimizer """adam""" +462 28 training_loop """owa""" +462 28 negative_sampler """basic""" +462 28 evaluator """rankbased""" +462 29 dataset """fb15k237""" +462 29 model """proje""" +462 29 loss """nssa""" +462 29 regularizer """no""" +462 29 optimizer """adam""" +462 29 training_loop """owa""" +462 29 negative_sampler """basic""" +462 29 evaluator """rankbased""" +462 30 dataset """fb15k237""" +462 30 model """proje""" +462 30 loss """nssa""" +462 30 regularizer """no""" +462 30 optimizer """adam""" +462 30 training_loop """owa""" +462 30 negative_sampler """basic""" +462 30 evaluator """rankbased""" +462 31 dataset """fb15k237""" +462 31 model """proje""" +462 31 loss """nssa""" +462 31 regularizer """no""" +462 31 optimizer """adam""" +462 31 training_loop """owa""" +462 31 negative_sampler """basic""" +462 31 evaluator """rankbased""" +462 32 dataset """fb15k237""" +462 32 model """proje""" +462 32 loss """nssa""" +462 32 regularizer """no""" +462 32 optimizer """adam""" +462 32 training_loop """owa""" +462 32 negative_sampler """basic""" +462 32 evaluator """rankbased""" +462 33 dataset """fb15k237""" +462 33 model """proje""" +462 33 loss """nssa""" +462 33 regularizer """no""" +462 33 optimizer """adam""" +462 33 training_loop """owa""" +462 33 negative_sampler """basic""" +462 33 evaluator """rankbased""" +462 34 dataset """fb15k237""" +462 34 model """proje""" +462 34 loss """nssa""" +462 34 regularizer """no""" +462 34 optimizer """adam""" +462 34 training_loop """owa""" +462 34 negative_sampler """basic""" +462 34 evaluator """rankbased""" +462 35 dataset """fb15k237""" +462 35 model """proje""" +462 35 loss """nssa""" +462 35 regularizer """no""" +462 35 optimizer """adam""" +462 35 training_loop """owa""" +462 35 negative_sampler """basic""" +462 35 evaluator """rankbased""" +462 36 dataset """fb15k237""" +462 36 model """proje""" +462 36 loss """nssa""" +462 36 regularizer """no""" +462 36 optimizer """adam""" +462 36 training_loop """owa""" +462 36 negative_sampler """basic""" +462 36 evaluator """rankbased""" +462 37 dataset """fb15k237""" +462 37 model """proje""" +462 37 loss """nssa""" +462 37 regularizer """no""" +462 37 optimizer """adam""" +462 37 training_loop """owa""" +462 37 negative_sampler """basic""" +462 37 evaluator """rankbased""" +462 38 dataset """fb15k237""" +462 38 model """proje""" +462 38 loss """nssa""" +462 38 regularizer """no""" +462 38 optimizer """adam""" +462 38 training_loop """owa""" +462 38 negative_sampler """basic""" +462 38 evaluator """rankbased""" +462 39 dataset """fb15k237""" +462 39 model """proje""" +462 39 loss """nssa""" +462 39 regularizer """no""" +462 39 optimizer """adam""" +462 39 training_loop """owa""" +462 39 negative_sampler """basic""" +462 39 evaluator """rankbased""" +462 40 dataset """fb15k237""" +462 40 model """proje""" +462 40 loss """nssa""" +462 40 regularizer """no""" +462 40 optimizer """adam""" +462 40 training_loop """owa""" +462 40 negative_sampler """basic""" +462 40 evaluator """rankbased""" +462 41 dataset """fb15k237""" +462 41 model """proje""" +462 41 loss """nssa""" +462 41 regularizer """no""" +462 41 optimizer """adam""" +462 41 training_loop """owa""" +462 41 negative_sampler """basic""" +462 41 evaluator """rankbased""" +462 42 dataset """fb15k237""" +462 42 model """proje""" +462 42 loss """nssa""" +462 42 regularizer """no""" +462 42 optimizer """adam""" +462 42 training_loop """owa""" +462 42 negative_sampler """basic""" +462 42 evaluator """rankbased""" +462 43 dataset """fb15k237""" +462 43 model """proje""" +462 43 loss """nssa""" +462 43 regularizer """no""" +462 43 optimizer """adam""" +462 43 training_loop """owa""" +462 43 negative_sampler """basic""" +462 43 evaluator """rankbased""" +462 44 dataset """fb15k237""" +462 44 model """proje""" +462 44 loss """nssa""" +462 44 regularizer """no""" +462 44 optimizer """adam""" +462 44 training_loop """owa""" +462 44 negative_sampler """basic""" +462 44 evaluator """rankbased""" +462 45 dataset """fb15k237""" +462 45 model """proje""" +462 45 loss """nssa""" +462 45 regularizer """no""" +462 45 optimizer """adam""" +462 45 training_loop """owa""" +462 45 negative_sampler """basic""" +462 45 evaluator """rankbased""" +462 46 dataset """fb15k237""" +462 46 model """proje""" +462 46 loss """nssa""" +462 46 regularizer """no""" +462 46 optimizer """adam""" +462 46 training_loop """owa""" +462 46 negative_sampler """basic""" +462 46 evaluator """rankbased""" +462 47 dataset """fb15k237""" +462 47 model """proje""" +462 47 loss """nssa""" +462 47 regularizer """no""" +462 47 optimizer """adam""" +462 47 training_loop """owa""" +462 47 negative_sampler """basic""" +462 47 evaluator """rankbased""" +463 1 model.embedding_dim 0.0 +463 1 optimizer.lr 0.0018568730709054779 +463 1 training.batch_size 2.0 +463 1 training.label_smoothing 0.026685783241049286 +463 2 model.embedding_dim 2.0 +463 2 optimizer.lr 0.009302402651142471 +463 2 training.batch_size 1.0 +463 2 training.label_smoothing 0.015737664037981106 +463 3 model.embedding_dim 1.0 +463 3 optimizer.lr 0.01983339834193438 +463 3 training.batch_size 0.0 +463 3 training.label_smoothing 0.14242704943971768 +463 4 model.embedding_dim 0.0 +463 4 optimizer.lr 0.0046336426082671175 +463 4 training.batch_size 1.0 +463 4 training.label_smoothing 0.15105993273697577 +463 5 model.embedding_dim 1.0 +463 5 optimizer.lr 0.03126860041717077 +463 5 training.batch_size 0.0 +463 5 training.label_smoothing 0.033112460168186884 +463 6 model.embedding_dim 2.0 +463 6 optimizer.lr 0.007789887560499459 +463 6 training.batch_size 1.0 +463 6 training.label_smoothing 0.0036770575242404506 +463 7 model.embedding_dim 1.0 +463 7 optimizer.lr 0.003883026967294654 +463 7 training.batch_size 2.0 +463 7 training.label_smoothing 0.17118307372663913 +463 8 model.embedding_dim 0.0 +463 8 optimizer.lr 0.002028292878637441 +463 8 training.batch_size 0.0 +463 8 training.label_smoothing 0.26776413448855424 +463 9 model.embedding_dim 1.0 +463 9 optimizer.lr 0.005118027078367382 +463 9 training.batch_size 2.0 +463 9 training.label_smoothing 0.06213197251789341 +463 10 model.embedding_dim 1.0 +463 10 optimizer.lr 0.0037440082495071724 +463 10 training.batch_size 1.0 +463 10 training.label_smoothing 0.09904026173158868 +463 11 model.embedding_dim 2.0 +463 11 optimizer.lr 0.005014805580584848 +463 11 training.batch_size 1.0 +463 11 training.label_smoothing 0.5262594189219801 +463 12 model.embedding_dim 0.0 +463 12 optimizer.lr 0.022610967920716183 +463 12 training.batch_size 2.0 +463 12 training.label_smoothing 0.043879456870386725 +463 13 model.embedding_dim 0.0 +463 13 optimizer.lr 0.011503682386052828 +463 13 training.batch_size 0.0 +463 13 training.label_smoothing 0.18277918094660123 +463 14 model.embedding_dim 2.0 +463 14 optimizer.lr 0.05560090675211978 +463 14 training.batch_size 1.0 +463 14 training.label_smoothing 0.004963908117622989 +463 15 model.embedding_dim 2.0 +463 15 optimizer.lr 0.005391557630238048 +463 15 training.batch_size 0.0 +463 15 training.label_smoothing 0.016975388439582646 +463 16 model.embedding_dim 2.0 +463 16 optimizer.lr 0.002160607858244647 +463 16 training.batch_size 2.0 +463 16 training.label_smoothing 0.07735924440446786 +463 17 model.embedding_dim 0.0 +463 17 optimizer.lr 0.027085358611505836 +463 17 training.batch_size 1.0 +463 17 training.label_smoothing 0.008529279012579728 +463 18 model.embedding_dim 2.0 +463 18 optimizer.lr 0.05525591630500537 +463 18 training.batch_size 2.0 +463 18 training.label_smoothing 0.251237080807743 +463 19 model.embedding_dim 0.0 +463 19 optimizer.lr 0.00587707325857725 +463 19 training.batch_size 0.0 +463 19 training.label_smoothing 0.010822897901721319 +463 1 dataset """fb15k237""" +463 1 model """proje""" +463 1 loss """crossentropy""" +463 1 regularizer """no""" +463 1 optimizer """adam""" +463 1 training_loop """lcwa""" +463 1 evaluator """rankbased""" +463 2 dataset """fb15k237""" +463 2 model """proje""" +463 2 loss """crossentropy""" +463 2 regularizer """no""" +463 2 optimizer """adam""" +463 2 training_loop """lcwa""" +463 2 evaluator """rankbased""" +463 3 dataset """fb15k237""" +463 3 model """proje""" +463 3 loss """crossentropy""" +463 3 regularizer """no""" +463 3 optimizer """adam""" +463 3 training_loop """lcwa""" +463 3 evaluator """rankbased""" +463 4 dataset """fb15k237""" +463 4 model """proje""" +463 4 loss """crossentropy""" +463 4 regularizer """no""" +463 4 optimizer """adam""" +463 4 training_loop """lcwa""" +463 4 evaluator """rankbased""" +463 5 dataset """fb15k237""" +463 5 model """proje""" +463 5 loss """crossentropy""" +463 5 regularizer """no""" +463 5 optimizer """adam""" +463 5 training_loop """lcwa""" +463 5 evaluator """rankbased""" +463 6 dataset """fb15k237""" +463 6 model """proje""" +463 6 loss """crossentropy""" +463 6 regularizer """no""" +463 6 optimizer """adam""" +463 6 training_loop """lcwa""" +463 6 evaluator """rankbased""" +463 7 dataset """fb15k237""" +463 7 model """proje""" +463 7 loss """crossentropy""" +463 7 regularizer """no""" +463 7 optimizer """adam""" +463 7 training_loop """lcwa""" +463 7 evaluator """rankbased""" +463 8 dataset """fb15k237""" +463 8 model """proje""" +463 8 loss """crossentropy""" +463 8 regularizer """no""" +463 8 optimizer """adam""" +463 8 training_loop """lcwa""" +463 8 evaluator """rankbased""" +463 9 dataset """fb15k237""" +463 9 model """proje""" +463 9 loss """crossentropy""" +463 9 regularizer """no""" +463 9 optimizer """adam""" +463 9 training_loop """lcwa""" +463 9 evaluator """rankbased""" +463 10 dataset """fb15k237""" +463 10 model """proje""" +463 10 loss """crossentropy""" +463 10 regularizer """no""" +463 10 optimizer """adam""" +463 10 training_loop """lcwa""" +463 10 evaluator """rankbased""" +463 11 dataset """fb15k237""" +463 11 model """proje""" +463 11 loss """crossentropy""" +463 11 regularizer """no""" +463 11 optimizer """adam""" +463 11 training_loop """lcwa""" +463 11 evaluator """rankbased""" +463 12 dataset """fb15k237""" +463 12 model """proje""" +463 12 loss """crossentropy""" +463 12 regularizer """no""" +463 12 optimizer """adam""" +463 12 training_loop """lcwa""" +463 12 evaluator """rankbased""" +463 13 dataset """fb15k237""" +463 13 model """proje""" +463 13 loss """crossentropy""" +463 13 regularizer """no""" +463 13 optimizer """adam""" +463 13 training_loop """lcwa""" +463 13 evaluator """rankbased""" +463 14 dataset """fb15k237""" +463 14 model """proje""" +463 14 loss """crossentropy""" +463 14 regularizer """no""" +463 14 optimizer """adam""" +463 14 training_loop """lcwa""" +463 14 evaluator """rankbased""" +463 15 dataset """fb15k237""" +463 15 model """proje""" +463 15 loss """crossentropy""" +463 15 regularizer """no""" +463 15 optimizer """adam""" +463 15 training_loop """lcwa""" +463 15 evaluator """rankbased""" +463 16 dataset """fb15k237""" +463 16 model """proje""" +463 16 loss """crossentropy""" +463 16 regularizer """no""" +463 16 optimizer """adam""" +463 16 training_loop """lcwa""" +463 16 evaluator """rankbased""" +463 17 dataset """fb15k237""" +463 17 model """proje""" +463 17 loss """crossentropy""" +463 17 regularizer """no""" +463 17 optimizer """adam""" +463 17 training_loop """lcwa""" +463 17 evaluator """rankbased""" +463 18 dataset """fb15k237""" +463 18 model """proje""" +463 18 loss """crossentropy""" +463 18 regularizer """no""" +463 18 optimizer """adam""" +463 18 training_loop """lcwa""" +463 18 evaluator """rankbased""" +463 19 dataset """fb15k237""" +463 19 model """proje""" +463 19 loss """crossentropy""" +463 19 regularizer """no""" +463 19 optimizer """adam""" +463 19 training_loop """lcwa""" +463 19 evaluator """rankbased""" +464 1 model.embedding_dim 0.0 +464 1 optimizer.lr 0.022385243238866634 +464 1 training.batch_size 2.0 +464 1 training.label_smoothing 0.03886051247567037 +464 2 model.embedding_dim 0.0 +464 2 optimizer.lr 0.0019717690425806902 +464 2 training.batch_size 0.0 +464 2 training.label_smoothing 0.024762995155291765 +464 3 model.embedding_dim 2.0 +464 3 optimizer.lr 0.025681456842727927 +464 3 training.batch_size 2.0 +464 3 training.label_smoothing 0.015119559285371932 +464 4 model.embedding_dim 2.0 +464 4 optimizer.lr 0.001228500803042699 +464 4 training.batch_size 2.0 +464 4 training.label_smoothing 0.41830528803907713 +464 5 model.embedding_dim 2.0 +464 5 optimizer.lr 0.006409191362463005 +464 5 training.batch_size 2.0 +464 5 training.label_smoothing 0.014149540914689368 +464 6 model.embedding_dim 1.0 +464 6 optimizer.lr 0.008013094841516775 +464 6 training.batch_size 0.0 +464 6 training.label_smoothing 0.23352580119564967 +464 7 model.embedding_dim 2.0 +464 7 optimizer.lr 0.07028846582494562 +464 7 training.batch_size 1.0 +464 7 training.label_smoothing 0.006598122283551724 +464 8 model.embedding_dim 2.0 +464 8 optimizer.lr 0.04657488908510043 +464 8 training.batch_size 0.0 +464 8 training.label_smoothing 0.013133121459667375 +464 9 model.embedding_dim 2.0 +464 9 optimizer.lr 0.0015115133546269226 +464 9 training.batch_size 1.0 +464 9 training.label_smoothing 0.004730844123388325 +464 10 model.embedding_dim 2.0 +464 10 optimizer.lr 0.0059652838606669275 +464 10 training.batch_size 1.0 +464 10 training.label_smoothing 0.002052990085477497 +464 11 model.embedding_dim 1.0 +464 11 optimizer.lr 0.005221456442956803 +464 11 training.batch_size 1.0 +464 11 training.label_smoothing 0.07201223380227605 +464 12 model.embedding_dim 0.0 +464 12 optimizer.lr 0.0010361865932659946 +464 12 training.batch_size 2.0 +464 12 training.label_smoothing 0.1789369226437397 +464 13 model.embedding_dim 1.0 +464 13 optimizer.lr 0.017735138422173707 +464 13 training.batch_size 1.0 +464 13 training.label_smoothing 0.2766469929789948 +464 14 model.embedding_dim 0.0 +464 14 optimizer.lr 0.0024005357388691612 +464 14 training.batch_size 0.0 +464 14 training.label_smoothing 0.22485966192613852 +464 15 model.embedding_dim 1.0 +464 15 optimizer.lr 0.05040115735755774 +464 15 training.batch_size 2.0 +464 15 training.label_smoothing 0.07078853011341263 +464 16 model.embedding_dim 2.0 +464 16 optimizer.lr 0.0022301213784101685 +464 16 training.batch_size 2.0 +464 16 training.label_smoothing 0.013130888399288084 +464 17 model.embedding_dim 2.0 +464 17 optimizer.lr 0.0946772834714714 +464 17 training.batch_size 1.0 +464 17 training.label_smoothing 0.06537194972188745 +464 18 model.embedding_dim 1.0 +464 18 optimizer.lr 0.016284817767804848 +464 18 training.batch_size 1.0 +464 18 training.label_smoothing 0.7616636647312586 +464 19 model.embedding_dim 0.0 +464 19 optimizer.lr 0.0015827528058703552 +464 19 training.batch_size 2.0 +464 19 training.label_smoothing 0.002555258718927736 +464 20 model.embedding_dim 1.0 +464 20 optimizer.lr 0.0018582490454971737 +464 20 training.batch_size 2.0 +464 20 training.label_smoothing 0.031032921437762373 +464 21 model.embedding_dim 0.0 +464 21 optimizer.lr 0.0010247754980961617 +464 21 training.batch_size 2.0 +464 21 training.label_smoothing 0.3192037032285157 +464 22 model.embedding_dim 2.0 +464 22 optimizer.lr 0.0033608179515635973 +464 22 training.batch_size 0.0 +464 22 training.label_smoothing 0.022553403096391055 +464 23 model.embedding_dim 1.0 +464 23 optimizer.lr 0.03895141495780011 +464 23 training.batch_size 2.0 +464 23 training.label_smoothing 0.002544648780437501 +464 24 model.embedding_dim 1.0 +464 24 optimizer.lr 0.002184509025404941 +464 24 training.batch_size 1.0 +464 24 training.label_smoothing 0.8570595067105637 +464 25 model.embedding_dim 0.0 +464 25 optimizer.lr 0.014105204908252504 +464 25 training.batch_size 2.0 +464 25 training.label_smoothing 0.037395601512713726 +464 26 model.embedding_dim 1.0 +464 26 optimizer.lr 0.0012284246905852484 +464 26 training.batch_size 1.0 +464 26 training.label_smoothing 0.0013629093138803887 +464 27 model.embedding_dim 2.0 +464 27 optimizer.lr 0.05204377957883684 +464 27 training.batch_size 2.0 +464 27 training.label_smoothing 0.8621707418664845 +464 28 model.embedding_dim 1.0 +464 28 optimizer.lr 0.0014166590293992307 +464 28 training.batch_size 0.0 +464 28 training.label_smoothing 0.015624735716730528 +464 29 model.embedding_dim 1.0 +464 29 optimizer.lr 0.0075831719040814465 +464 29 training.batch_size 2.0 +464 29 training.label_smoothing 0.01893741705694492 +464 30 model.embedding_dim 0.0 +464 30 optimizer.lr 0.0010944919995857232 +464 30 training.batch_size 0.0 +464 30 training.label_smoothing 0.1966858320985629 +464 1 dataset """fb15k237""" +464 1 model """proje""" +464 1 loss """crossentropy""" +464 1 regularizer """no""" +464 1 optimizer """adam""" +464 1 training_loop """lcwa""" +464 1 evaluator """rankbased""" +464 2 dataset """fb15k237""" +464 2 model """proje""" +464 2 loss """crossentropy""" +464 2 regularizer """no""" +464 2 optimizer """adam""" +464 2 training_loop """lcwa""" +464 2 evaluator """rankbased""" +464 3 dataset """fb15k237""" +464 3 model """proje""" +464 3 loss """crossentropy""" +464 3 regularizer """no""" +464 3 optimizer """adam""" +464 3 training_loop """lcwa""" +464 3 evaluator """rankbased""" +464 4 dataset """fb15k237""" +464 4 model """proje""" +464 4 loss """crossentropy""" +464 4 regularizer """no""" +464 4 optimizer """adam""" +464 4 training_loop """lcwa""" +464 4 evaluator """rankbased""" +464 5 dataset """fb15k237""" +464 5 model """proje""" +464 5 loss """crossentropy""" +464 5 regularizer """no""" +464 5 optimizer """adam""" +464 5 training_loop """lcwa""" +464 5 evaluator """rankbased""" +464 6 dataset """fb15k237""" +464 6 model """proje""" +464 6 loss """crossentropy""" +464 6 regularizer """no""" +464 6 optimizer """adam""" +464 6 training_loop """lcwa""" +464 6 evaluator """rankbased""" +464 7 dataset """fb15k237""" +464 7 model """proje""" +464 7 loss """crossentropy""" +464 7 regularizer """no""" +464 7 optimizer """adam""" +464 7 training_loop """lcwa""" +464 7 evaluator """rankbased""" +464 8 dataset """fb15k237""" +464 8 model """proje""" +464 8 loss """crossentropy""" +464 8 regularizer """no""" +464 8 optimizer """adam""" +464 8 training_loop """lcwa""" +464 8 evaluator """rankbased""" +464 9 dataset """fb15k237""" +464 9 model """proje""" +464 9 loss """crossentropy""" +464 9 regularizer """no""" +464 9 optimizer """adam""" +464 9 training_loop """lcwa""" +464 9 evaluator """rankbased""" +464 10 dataset """fb15k237""" +464 10 model """proje""" +464 10 loss """crossentropy""" +464 10 regularizer """no""" +464 10 optimizer """adam""" +464 10 training_loop """lcwa""" +464 10 evaluator """rankbased""" +464 11 dataset """fb15k237""" +464 11 model """proje""" +464 11 loss """crossentropy""" +464 11 regularizer """no""" +464 11 optimizer """adam""" +464 11 training_loop """lcwa""" +464 11 evaluator """rankbased""" +464 12 dataset """fb15k237""" +464 12 model """proje""" +464 12 loss """crossentropy""" +464 12 regularizer """no""" +464 12 optimizer """adam""" +464 12 training_loop """lcwa""" +464 12 evaluator """rankbased""" +464 13 dataset """fb15k237""" +464 13 model """proje""" +464 13 loss """crossentropy""" +464 13 regularizer """no""" +464 13 optimizer """adam""" +464 13 training_loop """lcwa""" +464 13 evaluator """rankbased""" +464 14 dataset """fb15k237""" +464 14 model """proje""" +464 14 loss """crossentropy""" +464 14 regularizer """no""" +464 14 optimizer """adam""" +464 14 training_loop """lcwa""" +464 14 evaluator """rankbased""" +464 15 dataset """fb15k237""" +464 15 model """proje""" +464 15 loss """crossentropy""" +464 15 regularizer """no""" +464 15 optimizer """adam""" +464 15 training_loop """lcwa""" +464 15 evaluator """rankbased""" +464 16 dataset """fb15k237""" +464 16 model """proje""" +464 16 loss """crossentropy""" +464 16 regularizer """no""" +464 16 optimizer """adam""" +464 16 training_loop """lcwa""" +464 16 evaluator """rankbased""" +464 17 dataset """fb15k237""" +464 17 model """proje""" +464 17 loss """crossentropy""" +464 17 regularizer """no""" +464 17 optimizer """adam""" +464 17 training_loop """lcwa""" +464 17 evaluator """rankbased""" +464 18 dataset """fb15k237""" +464 18 model """proje""" +464 18 loss """crossentropy""" +464 18 regularizer """no""" +464 18 optimizer """adam""" +464 18 training_loop """lcwa""" +464 18 evaluator """rankbased""" +464 19 dataset """fb15k237""" +464 19 model """proje""" +464 19 loss """crossentropy""" +464 19 regularizer """no""" +464 19 optimizer """adam""" +464 19 training_loop """lcwa""" +464 19 evaluator """rankbased""" +464 20 dataset """fb15k237""" +464 20 model """proje""" +464 20 loss """crossentropy""" +464 20 regularizer """no""" +464 20 optimizer """adam""" +464 20 training_loop """lcwa""" +464 20 evaluator """rankbased""" +464 21 dataset """fb15k237""" +464 21 model """proje""" +464 21 loss """crossentropy""" +464 21 regularizer """no""" +464 21 optimizer """adam""" +464 21 training_loop """lcwa""" +464 21 evaluator """rankbased""" +464 22 dataset """fb15k237""" +464 22 model """proje""" +464 22 loss """crossentropy""" +464 22 regularizer """no""" +464 22 optimizer """adam""" +464 22 training_loop """lcwa""" +464 22 evaluator """rankbased""" +464 23 dataset """fb15k237""" +464 23 model """proje""" +464 23 loss """crossentropy""" +464 23 regularizer """no""" +464 23 optimizer """adam""" +464 23 training_loop """lcwa""" +464 23 evaluator """rankbased""" +464 24 dataset """fb15k237""" +464 24 model """proje""" +464 24 loss """crossentropy""" +464 24 regularizer """no""" +464 24 optimizer """adam""" +464 24 training_loop """lcwa""" +464 24 evaluator """rankbased""" +464 25 dataset """fb15k237""" +464 25 model """proje""" +464 25 loss """crossentropy""" +464 25 regularizer """no""" +464 25 optimizer """adam""" +464 25 training_loop """lcwa""" +464 25 evaluator """rankbased""" +464 26 dataset """fb15k237""" +464 26 model """proje""" +464 26 loss """crossentropy""" +464 26 regularizer """no""" +464 26 optimizer """adam""" +464 26 training_loop """lcwa""" +464 26 evaluator """rankbased""" +464 27 dataset """fb15k237""" +464 27 model """proje""" +464 27 loss """crossentropy""" +464 27 regularizer """no""" +464 27 optimizer """adam""" +464 27 training_loop """lcwa""" +464 27 evaluator """rankbased""" +464 28 dataset """fb15k237""" +464 28 model """proje""" +464 28 loss """crossentropy""" +464 28 regularizer """no""" +464 28 optimizer """adam""" +464 28 training_loop """lcwa""" +464 28 evaluator """rankbased""" +464 29 dataset """fb15k237""" +464 29 model """proje""" +464 29 loss """crossentropy""" +464 29 regularizer """no""" +464 29 optimizer """adam""" +464 29 training_loop """lcwa""" +464 29 evaluator """rankbased""" +464 30 dataset """fb15k237""" +464 30 model """proje""" +464 30 loss """crossentropy""" +464 30 regularizer """no""" +464 30 optimizer """adam""" +464 30 training_loop """lcwa""" +464 30 evaluator """rankbased""" +465 1 model.embedding_dim 0.0 +465 1 training.batch_size 2.0 +465 1 training.label_smoothing 0.07421531276223599 +465 2 model.embedding_dim 1.0 +465 2 training.batch_size 1.0 +465 2 training.label_smoothing 0.002684164954133754 +465 3 model.embedding_dim 1.0 +465 3 training.batch_size 1.0 +465 3 training.label_smoothing 0.014946521914283431 +465 4 model.embedding_dim 2.0 +465 4 training.batch_size 0.0 +465 4 training.label_smoothing 0.003663271433313764 +465 5 model.embedding_dim 0.0 +465 5 training.batch_size 1.0 +465 5 training.label_smoothing 0.07122878990809882 +465 6 model.embedding_dim 1.0 +465 6 training.batch_size 0.0 +465 6 training.label_smoothing 0.041056208233432485 +465 7 model.embedding_dim 0.0 +465 7 training.batch_size 0.0 +465 7 training.label_smoothing 0.15444767781194443 +465 8 model.embedding_dim 1.0 +465 8 training.batch_size 1.0 +465 8 training.label_smoothing 0.0203857625907623 +465 9 model.embedding_dim 2.0 +465 9 training.batch_size 2.0 +465 9 training.label_smoothing 0.00272197613434058 +465 10 model.embedding_dim 0.0 +465 10 training.batch_size 0.0 +465 10 training.label_smoothing 0.09163452094078345 +465 11 model.embedding_dim 0.0 +465 11 training.batch_size 2.0 +465 11 training.label_smoothing 0.0014317717788524889 +465 12 model.embedding_dim 0.0 +465 12 training.batch_size 1.0 +465 12 training.label_smoothing 0.19808366330623764 +465 13 model.embedding_dim 0.0 +465 13 training.batch_size 0.0 +465 13 training.label_smoothing 0.007052808888648162 +465 14 model.embedding_dim 0.0 +465 14 training.batch_size 0.0 +465 14 training.label_smoothing 0.21973876431153636 +465 15 model.embedding_dim 0.0 +465 15 training.batch_size 0.0 +465 15 training.label_smoothing 0.003308563614964576 +465 16 model.embedding_dim 2.0 +465 16 training.batch_size 0.0 +465 16 training.label_smoothing 0.18008928078818418 +465 17 model.embedding_dim 0.0 +465 17 training.batch_size 1.0 +465 17 training.label_smoothing 0.005670610507642413 +465 18 model.embedding_dim 1.0 +465 18 training.batch_size 1.0 +465 18 training.label_smoothing 0.011524415592205064 +465 19 model.embedding_dim 0.0 +465 19 training.batch_size 0.0 +465 19 training.label_smoothing 0.024494757650588513 +465 20 model.embedding_dim 2.0 +465 20 training.batch_size 1.0 +465 20 training.label_smoothing 0.05040855187506954 +465 21 model.embedding_dim 2.0 +465 21 training.batch_size 0.0 +465 21 training.label_smoothing 0.7080438034242589 +465 22 model.embedding_dim 0.0 +465 22 training.batch_size 1.0 +465 22 training.label_smoothing 0.06837874306694557 +465 23 model.embedding_dim 1.0 +465 23 training.batch_size 1.0 +465 23 training.label_smoothing 0.5117720762023417 +465 24 model.embedding_dim 0.0 +465 24 training.batch_size 0.0 +465 24 training.label_smoothing 0.020334981852601474 +465 25 model.embedding_dim 1.0 +465 25 training.batch_size 2.0 +465 25 training.label_smoothing 0.15095864811292023 +465 26 model.embedding_dim 0.0 +465 26 training.batch_size 2.0 +465 26 training.label_smoothing 0.10041611978487551 +465 27 model.embedding_dim 1.0 +465 27 training.batch_size 2.0 +465 27 training.label_smoothing 0.0666705060371514 +465 28 model.embedding_dim 2.0 +465 28 training.batch_size 2.0 +465 28 training.label_smoothing 0.597272662597686 +465 29 model.embedding_dim 0.0 +465 29 training.batch_size 0.0 +465 29 training.label_smoothing 0.7657945349001264 +465 30 model.embedding_dim 2.0 +465 30 training.batch_size 1.0 +465 30 training.label_smoothing 0.0022079751557810634 +465 31 model.embedding_dim 0.0 +465 31 training.batch_size 2.0 +465 31 training.label_smoothing 0.0015234243021146252 +465 32 model.embedding_dim 2.0 +465 32 training.batch_size 1.0 +465 32 training.label_smoothing 0.0012944380002353133 +465 33 model.embedding_dim 1.0 +465 33 training.batch_size 0.0 +465 33 training.label_smoothing 0.014967821297248282 +465 34 model.embedding_dim 0.0 +465 34 training.batch_size 2.0 +465 34 training.label_smoothing 0.12483321004023397 +465 35 model.embedding_dim 2.0 +465 35 training.batch_size 1.0 +465 35 training.label_smoothing 0.007202332776180585 +465 36 model.embedding_dim 0.0 +465 36 training.batch_size 1.0 +465 36 training.label_smoothing 0.11628271420095809 +465 37 model.embedding_dim 1.0 +465 37 training.batch_size 2.0 +465 37 training.label_smoothing 0.019539549009863082 +465 38 model.embedding_dim 1.0 +465 38 training.batch_size 1.0 +465 38 training.label_smoothing 0.5218618117424096 +465 39 model.embedding_dim 1.0 +465 39 training.batch_size 0.0 +465 39 training.label_smoothing 0.07211723074829116 +465 40 model.embedding_dim 0.0 +465 40 training.batch_size 2.0 +465 40 training.label_smoothing 0.16665049458654307 +465 41 model.embedding_dim 2.0 +465 41 training.batch_size 0.0 +465 41 training.label_smoothing 0.1051082545784106 +465 42 model.embedding_dim 2.0 +465 42 training.batch_size 2.0 +465 42 training.label_smoothing 0.006048305060722853 +465 43 model.embedding_dim 0.0 +465 43 training.batch_size 2.0 +465 43 training.label_smoothing 0.18228321413042045 +465 44 model.embedding_dim 1.0 +465 44 training.batch_size 2.0 +465 44 training.label_smoothing 0.020059927576388786 +465 45 model.embedding_dim 0.0 +465 45 training.batch_size 2.0 +465 45 training.label_smoothing 0.0067384743127980795 +465 46 model.embedding_dim 1.0 +465 46 training.batch_size 2.0 +465 46 training.label_smoothing 0.019207204048893645 +465 47 model.embedding_dim 2.0 +465 47 training.batch_size 1.0 +465 47 training.label_smoothing 0.0022951970408070736 +465 48 model.embedding_dim 1.0 +465 48 training.batch_size 0.0 +465 48 training.label_smoothing 0.08298933174992165 +465 49 model.embedding_dim 2.0 +465 49 training.batch_size 0.0 +465 49 training.label_smoothing 0.05290384543616728 +465 50 model.embedding_dim 1.0 +465 50 training.batch_size 2.0 +465 50 training.label_smoothing 0.05359230878546595 +465 51 model.embedding_dim 1.0 +465 51 training.batch_size 0.0 +465 51 training.label_smoothing 0.06135938253103395 +465 52 model.embedding_dim 0.0 +465 52 training.batch_size 0.0 +465 52 training.label_smoothing 0.13359211024820233 +465 53 model.embedding_dim 2.0 +465 53 training.batch_size 0.0 +465 53 training.label_smoothing 0.0034751889991037456 +465 54 model.embedding_dim 1.0 +465 54 training.batch_size 1.0 +465 54 training.label_smoothing 0.3527449889987508 +465 55 model.embedding_dim 1.0 +465 55 training.batch_size 2.0 +465 55 training.label_smoothing 0.009870315269393597 +465 56 model.embedding_dim 1.0 +465 56 training.batch_size 0.0 +465 56 training.label_smoothing 0.041253831006633636 +465 57 model.embedding_dim 2.0 +465 57 training.batch_size 2.0 +465 57 training.label_smoothing 0.003723805496698219 +465 58 model.embedding_dim 2.0 +465 58 training.batch_size 0.0 +465 58 training.label_smoothing 0.00633751348925226 +465 59 model.embedding_dim 1.0 +465 59 training.batch_size 0.0 +465 59 training.label_smoothing 0.002276232362498902 +465 60 model.embedding_dim 1.0 +465 60 training.batch_size 1.0 +465 60 training.label_smoothing 0.0038134604898011036 +465 61 model.embedding_dim 2.0 +465 61 training.batch_size 0.0 +465 61 training.label_smoothing 0.04669632376169453 +465 62 model.embedding_dim 0.0 +465 62 training.batch_size 2.0 +465 62 training.label_smoothing 0.3511618893300684 +465 63 model.embedding_dim 1.0 +465 63 training.batch_size 2.0 +465 63 training.label_smoothing 0.53239153140915 +465 64 model.embedding_dim 0.0 +465 64 training.batch_size 1.0 +465 64 training.label_smoothing 0.020286267100871502 +465 65 model.embedding_dim 1.0 +465 65 training.batch_size 0.0 +465 65 training.label_smoothing 0.019685687552399356 +465 66 model.embedding_dim 0.0 +465 66 training.batch_size 1.0 +465 66 training.label_smoothing 0.12707703950248297 +465 67 model.embedding_dim 1.0 +465 67 training.batch_size 2.0 +465 67 training.label_smoothing 0.019755501428145333 +465 68 model.embedding_dim 1.0 +465 68 training.batch_size 1.0 +465 68 training.label_smoothing 0.7266273621916448 +465 69 model.embedding_dim 0.0 +465 69 training.batch_size 0.0 +465 69 training.label_smoothing 0.00952787007271129 +465 70 model.embedding_dim 1.0 +465 70 training.batch_size 0.0 +465 70 training.label_smoothing 0.007604343214510163 +465 71 model.embedding_dim 0.0 +465 71 training.batch_size 0.0 +465 71 training.label_smoothing 0.028233920350109028 +465 72 model.embedding_dim 2.0 +465 72 training.batch_size 0.0 +465 72 training.label_smoothing 0.0013361965333656192 +465 73 model.embedding_dim 2.0 +465 73 training.batch_size 0.0 +465 73 training.label_smoothing 0.5602076907660792 +465 74 model.embedding_dim 0.0 +465 74 training.batch_size 2.0 +465 74 training.label_smoothing 0.0013895825671620793 +465 75 model.embedding_dim 1.0 +465 75 training.batch_size 0.0 +465 75 training.label_smoothing 0.0038916460386271656 +465 76 model.embedding_dim 1.0 +465 76 training.batch_size 0.0 +465 76 training.label_smoothing 0.007143330086632847 +465 77 model.embedding_dim 0.0 +465 77 training.batch_size 2.0 +465 77 training.label_smoothing 0.0010138674890260765 +465 78 model.embedding_dim 2.0 +465 78 training.batch_size 1.0 +465 78 training.label_smoothing 0.6888709201239649 +465 79 model.embedding_dim 0.0 +465 79 training.batch_size 1.0 +465 79 training.label_smoothing 0.03460618392803151 +465 80 model.embedding_dim 0.0 +465 80 training.batch_size 0.0 +465 80 training.label_smoothing 0.002437913649208881 +465 81 model.embedding_dim 0.0 +465 81 training.batch_size 0.0 +465 81 training.label_smoothing 0.4948131471554509 +465 82 model.embedding_dim 2.0 +465 82 training.batch_size 0.0 +465 82 training.label_smoothing 0.0013710531571854998 +465 83 model.embedding_dim 0.0 +465 83 training.batch_size 2.0 +465 83 training.label_smoothing 0.0028133357085010162 +465 84 model.embedding_dim 1.0 +465 84 training.batch_size 0.0 +465 84 training.label_smoothing 0.001374031757004097 +465 85 model.embedding_dim 2.0 +465 85 training.batch_size 0.0 +465 85 training.label_smoothing 0.0032059522857415 +465 86 model.embedding_dim 2.0 +465 86 training.batch_size 1.0 +465 86 training.label_smoothing 0.0019962143410444653 +465 87 model.embedding_dim 2.0 +465 87 training.batch_size 1.0 +465 87 training.label_smoothing 0.0014391126736753217 +465 88 model.embedding_dim 0.0 +465 88 training.batch_size 0.0 +465 88 training.label_smoothing 0.08070629772025527 +465 89 model.embedding_dim 2.0 +465 89 training.batch_size 0.0 +465 89 training.label_smoothing 0.12982178613014725 +465 90 model.embedding_dim 1.0 +465 90 training.batch_size 0.0 +465 90 training.label_smoothing 0.1851402611784761 +465 91 model.embedding_dim 0.0 +465 91 training.batch_size 0.0 +465 91 training.label_smoothing 0.0023673047409136696 +465 92 model.embedding_dim 2.0 +465 92 training.batch_size 1.0 +465 92 training.label_smoothing 0.05422393600760591 +465 93 model.embedding_dim 1.0 +465 93 training.batch_size 2.0 +465 93 training.label_smoothing 0.0023003774619985433 +465 94 model.embedding_dim 2.0 +465 94 training.batch_size 1.0 +465 94 training.label_smoothing 0.006188000791703969 +465 95 model.embedding_dim 2.0 +465 95 training.batch_size 1.0 +465 95 training.label_smoothing 0.008386218228444991 +465 96 model.embedding_dim 0.0 +465 96 training.batch_size 2.0 +465 96 training.label_smoothing 0.01278685400834937 +465 97 model.embedding_dim 1.0 +465 97 training.batch_size 2.0 +465 97 training.label_smoothing 0.0020666866556608462 +465 98 model.embedding_dim 0.0 +465 98 training.batch_size 0.0 +465 98 training.label_smoothing 0.003000059257041899 +465 99 model.embedding_dim 2.0 +465 99 training.batch_size 1.0 +465 99 training.label_smoothing 0.009013049567249392 +465 100 model.embedding_dim 1.0 +465 100 training.batch_size 2.0 +465 100 training.label_smoothing 0.9264539411135391 +465 1 dataset """kinships""" +465 1 model """proje""" +465 1 loss """bceaftersigmoid""" +465 1 regularizer """no""" +465 1 optimizer """adadelta""" +465 1 training_loop """lcwa""" +465 1 evaluator """rankbased""" +465 2 dataset """kinships""" +465 2 model """proje""" +465 2 loss """bceaftersigmoid""" +465 2 regularizer """no""" +465 2 optimizer """adadelta""" +465 2 training_loop """lcwa""" +465 2 evaluator """rankbased""" +465 3 dataset """kinships""" +465 3 model """proje""" +465 3 loss """bceaftersigmoid""" +465 3 regularizer """no""" +465 3 optimizer """adadelta""" +465 3 training_loop """lcwa""" +465 3 evaluator """rankbased""" +465 4 dataset """kinships""" +465 4 model """proje""" +465 4 loss """bceaftersigmoid""" +465 4 regularizer """no""" +465 4 optimizer """adadelta""" +465 4 training_loop """lcwa""" +465 4 evaluator """rankbased""" +465 5 dataset """kinships""" +465 5 model """proje""" +465 5 loss """bceaftersigmoid""" +465 5 regularizer """no""" +465 5 optimizer """adadelta""" +465 5 training_loop """lcwa""" +465 5 evaluator """rankbased""" +465 6 dataset """kinships""" +465 6 model """proje""" +465 6 loss """bceaftersigmoid""" +465 6 regularizer """no""" +465 6 optimizer """adadelta""" +465 6 training_loop """lcwa""" +465 6 evaluator """rankbased""" +465 7 dataset """kinships""" +465 7 model """proje""" +465 7 loss """bceaftersigmoid""" +465 7 regularizer """no""" +465 7 optimizer """adadelta""" +465 7 training_loop """lcwa""" +465 7 evaluator """rankbased""" +465 8 dataset """kinships""" +465 8 model """proje""" +465 8 loss """bceaftersigmoid""" +465 8 regularizer """no""" +465 8 optimizer """adadelta""" +465 8 training_loop """lcwa""" +465 8 evaluator """rankbased""" +465 9 dataset """kinships""" +465 9 model """proje""" +465 9 loss """bceaftersigmoid""" +465 9 regularizer """no""" +465 9 optimizer """adadelta""" +465 9 training_loop """lcwa""" +465 9 evaluator """rankbased""" +465 10 dataset """kinships""" +465 10 model """proje""" +465 10 loss """bceaftersigmoid""" +465 10 regularizer """no""" +465 10 optimizer """adadelta""" +465 10 training_loop """lcwa""" +465 10 evaluator """rankbased""" +465 11 dataset """kinships""" +465 11 model """proje""" +465 11 loss """bceaftersigmoid""" +465 11 regularizer """no""" +465 11 optimizer """adadelta""" +465 11 training_loop """lcwa""" +465 11 evaluator """rankbased""" +465 12 dataset """kinships""" +465 12 model """proje""" +465 12 loss """bceaftersigmoid""" +465 12 regularizer """no""" +465 12 optimizer """adadelta""" +465 12 training_loop """lcwa""" +465 12 evaluator """rankbased""" +465 13 dataset """kinships""" +465 13 model """proje""" +465 13 loss """bceaftersigmoid""" +465 13 regularizer """no""" +465 13 optimizer """adadelta""" +465 13 training_loop """lcwa""" +465 13 evaluator """rankbased""" +465 14 dataset """kinships""" +465 14 model """proje""" +465 14 loss """bceaftersigmoid""" +465 14 regularizer """no""" +465 14 optimizer """adadelta""" +465 14 training_loop """lcwa""" +465 14 evaluator """rankbased""" +465 15 dataset """kinships""" +465 15 model """proje""" +465 15 loss """bceaftersigmoid""" +465 15 regularizer """no""" +465 15 optimizer """adadelta""" +465 15 training_loop """lcwa""" +465 15 evaluator """rankbased""" +465 16 dataset """kinships""" +465 16 model """proje""" +465 16 loss """bceaftersigmoid""" +465 16 regularizer """no""" +465 16 optimizer """adadelta""" +465 16 training_loop """lcwa""" +465 16 evaluator """rankbased""" +465 17 dataset """kinships""" +465 17 model """proje""" +465 17 loss """bceaftersigmoid""" +465 17 regularizer """no""" +465 17 optimizer """adadelta""" +465 17 training_loop """lcwa""" +465 17 evaluator """rankbased""" +465 18 dataset """kinships""" +465 18 model """proje""" +465 18 loss """bceaftersigmoid""" +465 18 regularizer """no""" +465 18 optimizer """adadelta""" +465 18 training_loop """lcwa""" +465 18 evaluator """rankbased""" +465 19 dataset """kinships""" +465 19 model """proje""" +465 19 loss """bceaftersigmoid""" +465 19 regularizer """no""" +465 19 optimizer """adadelta""" +465 19 training_loop """lcwa""" +465 19 evaluator """rankbased""" +465 20 dataset """kinships""" +465 20 model """proje""" +465 20 loss """bceaftersigmoid""" +465 20 regularizer """no""" +465 20 optimizer """adadelta""" +465 20 training_loop """lcwa""" +465 20 evaluator """rankbased""" +465 21 dataset """kinships""" +465 21 model """proje""" +465 21 loss """bceaftersigmoid""" +465 21 regularizer """no""" +465 21 optimizer """adadelta""" +465 21 training_loop """lcwa""" +465 21 evaluator """rankbased""" +465 22 dataset """kinships""" +465 22 model """proje""" +465 22 loss """bceaftersigmoid""" +465 22 regularizer """no""" +465 22 optimizer """adadelta""" +465 22 training_loop """lcwa""" +465 22 evaluator """rankbased""" +465 23 dataset """kinships""" +465 23 model """proje""" +465 23 loss """bceaftersigmoid""" +465 23 regularizer """no""" +465 23 optimizer """adadelta""" +465 23 training_loop """lcwa""" +465 23 evaluator """rankbased""" +465 24 dataset """kinships""" +465 24 model """proje""" +465 24 loss """bceaftersigmoid""" +465 24 regularizer """no""" +465 24 optimizer """adadelta""" +465 24 training_loop """lcwa""" +465 24 evaluator """rankbased""" +465 25 dataset """kinships""" +465 25 model """proje""" +465 25 loss """bceaftersigmoid""" +465 25 regularizer """no""" +465 25 optimizer """adadelta""" +465 25 training_loop """lcwa""" +465 25 evaluator """rankbased""" +465 26 dataset """kinships""" +465 26 model """proje""" +465 26 loss """bceaftersigmoid""" +465 26 regularizer """no""" +465 26 optimizer """adadelta""" +465 26 training_loop """lcwa""" +465 26 evaluator """rankbased""" +465 27 dataset """kinships""" +465 27 model """proje""" +465 27 loss """bceaftersigmoid""" +465 27 regularizer """no""" +465 27 optimizer """adadelta""" +465 27 training_loop """lcwa""" +465 27 evaluator """rankbased""" +465 28 dataset """kinships""" +465 28 model """proje""" +465 28 loss """bceaftersigmoid""" +465 28 regularizer """no""" +465 28 optimizer """adadelta""" +465 28 training_loop """lcwa""" +465 28 evaluator """rankbased""" +465 29 dataset """kinships""" +465 29 model """proje""" +465 29 loss """bceaftersigmoid""" +465 29 regularizer """no""" +465 29 optimizer """adadelta""" +465 29 training_loop """lcwa""" +465 29 evaluator """rankbased""" +465 30 dataset """kinships""" +465 30 model """proje""" +465 30 loss """bceaftersigmoid""" +465 30 regularizer """no""" +465 30 optimizer """adadelta""" +465 30 training_loop """lcwa""" +465 30 evaluator """rankbased""" +465 31 dataset """kinships""" +465 31 model """proje""" +465 31 loss """bceaftersigmoid""" +465 31 regularizer """no""" +465 31 optimizer """adadelta""" +465 31 training_loop """lcwa""" +465 31 evaluator """rankbased""" +465 32 dataset """kinships""" +465 32 model """proje""" +465 32 loss """bceaftersigmoid""" +465 32 regularizer """no""" +465 32 optimizer """adadelta""" +465 32 training_loop """lcwa""" +465 32 evaluator """rankbased""" +465 33 dataset """kinships""" +465 33 model """proje""" +465 33 loss """bceaftersigmoid""" +465 33 regularizer """no""" +465 33 optimizer """adadelta""" +465 33 training_loop """lcwa""" +465 33 evaluator """rankbased""" +465 34 dataset """kinships""" +465 34 model """proje""" +465 34 loss """bceaftersigmoid""" +465 34 regularizer """no""" +465 34 optimizer """adadelta""" +465 34 training_loop """lcwa""" +465 34 evaluator """rankbased""" +465 35 dataset """kinships""" +465 35 model """proje""" +465 35 loss """bceaftersigmoid""" +465 35 regularizer """no""" +465 35 optimizer """adadelta""" +465 35 training_loop """lcwa""" +465 35 evaluator """rankbased""" +465 36 dataset """kinships""" +465 36 model """proje""" +465 36 loss """bceaftersigmoid""" +465 36 regularizer """no""" +465 36 optimizer """adadelta""" +465 36 training_loop """lcwa""" +465 36 evaluator """rankbased""" +465 37 dataset """kinships""" +465 37 model """proje""" +465 37 loss """bceaftersigmoid""" +465 37 regularizer """no""" +465 37 optimizer """adadelta""" +465 37 training_loop """lcwa""" +465 37 evaluator """rankbased""" +465 38 dataset """kinships""" +465 38 model """proje""" +465 38 loss """bceaftersigmoid""" +465 38 regularizer """no""" +465 38 optimizer """adadelta""" +465 38 training_loop """lcwa""" +465 38 evaluator """rankbased""" +465 39 dataset """kinships""" +465 39 model """proje""" +465 39 loss """bceaftersigmoid""" +465 39 regularizer """no""" +465 39 optimizer """adadelta""" +465 39 training_loop """lcwa""" +465 39 evaluator """rankbased""" +465 40 dataset """kinships""" +465 40 model """proje""" +465 40 loss """bceaftersigmoid""" +465 40 regularizer """no""" +465 40 optimizer """adadelta""" +465 40 training_loop """lcwa""" +465 40 evaluator """rankbased""" +465 41 dataset """kinships""" +465 41 model """proje""" +465 41 loss """bceaftersigmoid""" +465 41 regularizer """no""" +465 41 optimizer """adadelta""" +465 41 training_loop """lcwa""" +465 41 evaluator """rankbased""" +465 42 dataset """kinships""" +465 42 model """proje""" +465 42 loss """bceaftersigmoid""" +465 42 regularizer """no""" +465 42 optimizer """adadelta""" +465 42 training_loop """lcwa""" +465 42 evaluator """rankbased""" +465 43 dataset """kinships""" +465 43 model """proje""" +465 43 loss """bceaftersigmoid""" +465 43 regularizer """no""" +465 43 optimizer """adadelta""" +465 43 training_loop """lcwa""" +465 43 evaluator """rankbased""" +465 44 dataset """kinships""" +465 44 model """proje""" +465 44 loss """bceaftersigmoid""" +465 44 regularizer """no""" +465 44 optimizer """adadelta""" +465 44 training_loop """lcwa""" +465 44 evaluator """rankbased""" +465 45 dataset """kinships""" +465 45 model """proje""" +465 45 loss """bceaftersigmoid""" +465 45 regularizer """no""" +465 45 optimizer """adadelta""" +465 45 training_loop """lcwa""" +465 45 evaluator """rankbased""" +465 46 dataset """kinships""" +465 46 model """proje""" +465 46 loss """bceaftersigmoid""" +465 46 regularizer """no""" +465 46 optimizer """adadelta""" +465 46 training_loop """lcwa""" +465 46 evaluator """rankbased""" +465 47 dataset """kinships""" +465 47 model """proje""" +465 47 loss """bceaftersigmoid""" +465 47 regularizer """no""" +465 47 optimizer """adadelta""" +465 47 training_loop """lcwa""" +465 47 evaluator """rankbased""" +465 48 dataset """kinships""" +465 48 model """proje""" +465 48 loss """bceaftersigmoid""" +465 48 regularizer """no""" +465 48 optimizer """adadelta""" +465 48 training_loop """lcwa""" +465 48 evaluator """rankbased""" +465 49 dataset """kinships""" +465 49 model """proje""" +465 49 loss """bceaftersigmoid""" +465 49 regularizer """no""" +465 49 optimizer """adadelta""" +465 49 training_loop """lcwa""" +465 49 evaluator """rankbased""" +465 50 dataset """kinships""" +465 50 model """proje""" +465 50 loss """bceaftersigmoid""" +465 50 regularizer """no""" +465 50 optimizer """adadelta""" +465 50 training_loop """lcwa""" +465 50 evaluator """rankbased""" +465 51 dataset """kinships""" +465 51 model """proje""" +465 51 loss """bceaftersigmoid""" +465 51 regularizer """no""" +465 51 optimizer """adadelta""" +465 51 training_loop """lcwa""" +465 51 evaluator """rankbased""" +465 52 dataset """kinships""" +465 52 model """proje""" +465 52 loss """bceaftersigmoid""" +465 52 regularizer """no""" +465 52 optimizer """adadelta""" +465 52 training_loop """lcwa""" +465 52 evaluator """rankbased""" +465 53 dataset """kinships""" +465 53 model """proje""" +465 53 loss """bceaftersigmoid""" +465 53 regularizer """no""" +465 53 optimizer """adadelta""" +465 53 training_loop """lcwa""" +465 53 evaluator """rankbased""" +465 54 dataset """kinships""" +465 54 model """proje""" +465 54 loss """bceaftersigmoid""" +465 54 regularizer """no""" +465 54 optimizer """adadelta""" +465 54 training_loop """lcwa""" +465 54 evaluator """rankbased""" +465 55 dataset """kinships""" +465 55 model """proje""" +465 55 loss """bceaftersigmoid""" +465 55 regularizer """no""" +465 55 optimizer """adadelta""" +465 55 training_loop """lcwa""" +465 55 evaluator """rankbased""" +465 56 dataset """kinships""" +465 56 model """proje""" +465 56 loss """bceaftersigmoid""" +465 56 regularizer """no""" +465 56 optimizer """adadelta""" +465 56 training_loop """lcwa""" +465 56 evaluator """rankbased""" +465 57 dataset """kinships""" +465 57 model """proje""" +465 57 loss """bceaftersigmoid""" +465 57 regularizer """no""" +465 57 optimizer """adadelta""" +465 57 training_loop """lcwa""" +465 57 evaluator """rankbased""" +465 58 dataset """kinships""" +465 58 model """proje""" +465 58 loss """bceaftersigmoid""" +465 58 regularizer """no""" +465 58 optimizer """adadelta""" +465 58 training_loop """lcwa""" +465 58 evaluator """rankbased""" +465 59 dataset """kinships""" +465 59 model """proje""" +465 59 loss """bceaftersigmoid""" +465 59 regularizer """no""" +465 59 optimizer """adadelta""" +465 59 training_loop """lcwa""" +465 59 evaluator """rankbased""" +465 60 dataset """kinships""" +465 60 model """proje""" +465 60 loss """bceaftersigmoid""" +465 60 regularizer """no""" +465 60 optimizer """adadelta""" +465 60 training_loop """lcwa""" +465 60 evaluator """rankbased""" +465 61 dataset """kinships""" +465 61 model """proje""" +465 61 loss """bceaftersigmoid""" +465 61 regularizer """no""" +465 61 optimizer """adadelta""" +465 61 training_loop """lcwa""" +465 61 evaluator """rankbased""" +465 62 dataset """kinships""" +465 62 model """proje""" +465 62 loss """bceaftersigmoid""" +465 62 regularizer """no""" +465 62 optimizer """adadelta""" +465 62 training_loop """lcwa""" +465 62 evaluator """rankbased""" +465 63 dataset """kinships""" +465 63 model """proje""" +465 63 loss """bceaftersigmoid""" +465 63 regularizer """no""" +465 63 optimizer """adadelta""" +465 63 training_loop """lcwa""" +465 63 evaluator """rankbased""" +465 64 dataset """kinships""" +465 64 model """proje""" +465 64 loss """bceaftersigmoid""" +465 64 regularizer """no""" +465 64 optimizer """adadelta""" +465 64 training_loop """lcwa""" +465 64 evaluator """rankbased""" +465 65 dataset """kinships""" +465 65 model """proje""" +465 65 loss """bceaftersigmoid""" +465 65 regularizer """no""" +465 65 optimizer """adadelta""" +465 65 training_loop """lcwa""" +465 65 evaluator """rankbased""" +465 66 dataset """kinships""" +465 66 model """proje""" +465 66 loss """bceaftersigmoid""" +465 66 regularizer """no""" +465 66 optimizer """adadelta""" +465 66 training_loop """lcwa""" +465 66 evaluator """rankbased""" +465 67 dataset """kinships""" +465 67 model """proje""" +465 67 loss """bceaftersigmoid""" +465 67 regularizer """no""" +465 67 optimizer """adadelta""" +465 67 training_loop """lcwa""" +465 67 evaluator """rankbased""" +465 68 dataset """kinships""" +465 68 model """proje""" +465 68 loss """bceaftersigmoid""" +465 68 regularizer """no""" +465 68 optimizer """adadelta""" +465 68 training_loop """lcwa""" +465 68 evaluator """rankbased""" +465 69 dataset """kinships""" +465 69 model """proje""" +465 69 loss """bceaftersigmoid""" +465 69 regularizer """no""" +465 69 optimizer """adadelta""" +465 69 training_loop """lcwa""" +465 69 evaluator """rankbased""" +465 70 dataset """kinships""" +465 70 model """proje""" +465 70 loss """bceaftersigmoid""" +465 70 regularizer """no""" +465 70 optimizer """adadelta""" +465 70 training_loop """lcwa""" +465 70 evaluator """rankbased""" +465 71 dataset """kinships""" +465 71 model """proje""" +465 71 loss """bceaftersigmoid""" +465 71 regularizer """no""" +465 71 optimizer """adadelta""" +465 71 training_loop """lcwa""" +465 71 evaluator """rankbased""" +465 72 dataset """kinships""" +465 72 model """proje""" +465 72 loss """bceaftersigmoid""" +465 72 regularizer """no""" +465 72 optimizer """adadelta""" +465 72 training_loop """lcwa""" +465 72 evaluator """rankbased""" +465 73 dataset """kinships""" +465 73 model """proje""" +465 73 loss """bceaftersigmoid""" +465 73 regularizer """no""" +465 73 optimizer """adadelta""" +465 73 training_loop """lcwa""" +465 73 evaluator """rankbased""" +465 74 dataset """kinships""" +465 74 model """proje""" +465 74 loss """bceaftersigmoid""" +465 74 regularizer """no""" +465 74 optimizer """adadelta""" +465 74 training_loop """lcwa""" +465 74 evaluator """rankbased""" +465 75 dataset """kinships""" +465 75 model """proje""" +465 75 loss """bceaftersigmoid""" +465 75 regularizer """no""" +465 75 optimizer """adadelta""" +465 75 training_loop """lcwa""" +465 75 evaluator """rankbased""" +465 76 dataset """kinships""" +465 76 model """proje""" +465 76 loss """bceaftersigmoid""" +465 76 regularizer """no""" +465 76 optimizer """adadelta""" +465 76 training_loop """lcwa""" +465 76 evaluator """rankbased""" +465 77 dataset """kinships""" +465 77 model """proje""" +465 77 loss """bceaftersigmoid""" +465 77 regularizer """no""" +465 77 optimizer """adadelta""" +465 77 training_loop """lcwa""" +465 77 evaluator """rankbased""" +465 78 dataset """kinships""" +465 78 model """proje""" +465 78 loss """bceaftersigmoid""" +465 78 regularizer """no""" +465 78 optimizer """adadelta""" +465 78 training_loop """lcwa""" +465 78 evaluator """rankbased""" +465 79 dataset """kinships""" +465 79 model """proje""" +465 79 loss """bceaftersigmoid""" +465 79 regularizer """no""" +465 79 optimizer """adadelta""" +465 79 training_loop """lcwa""" +465 79 evaluator """rankbased""" +465 80 dataset """kinships""" +465 80 model """proje""" +465 80 loss """bceaftersigmoid""" +465 80 regularizer """no""" +465 80 optimizer """adadelta""" +465 80 training_loop """lcwa""" +465 80 evaluator """rankbased""" +465 81 dataset """kinships""" +465 81 model """proje""" +465 81 loss """bceaftersigmoid""" +465 81 regularizer """no""" +465 81 optimizer """adadelta""" +465 81 training_loop """lcwa""" +465 81 evaluator """rankbased""" +465 82 dataset """kinships""" +465 82 model """proje""" +465 82 loss """bceaftersigmoid""" +465 82 regularizer """no""" +465 82 optimizer """adadelta""" +465 82 training_loop """lcwa""" +465 82 evaluator """rankbased""" +465 83 dataset """kinships""" +465 83 model """proje""" +465 83 loss """bceaftersigmoid""" +465 83 regularizer """no""" +465 83 optimizer """adadelta""" +465 83 training_loop """lcwa""" +465 83 evaluator """rankbased""" +465 84 dataset """kinships""" +465 84 model """proje""" +465 84 loss """bceaftersigmoid""" +465 84 regularizer """no""" +465 84 optimizer """adadelta""" +465 84 training_loop """lcwa""" +465 84 evaluator """rankbased""" +465 85 dataset """kinships""" +465 85 model """proje""" +465 85 loss """bceaftersigmoid""" +465 85 regularizer """no""" +465 85 optimizer """adadelta""" +465 85 training_loop """lcwa""" +465 85 evaluator """rankbased""" +465 86 dataset """kinships""" +465 86 model """proje""" +465 86 loss """bceaftersigmoid""" +465 86 regularizer """no""" +465 86 optimizer """adadelta""" +465 86 training_loop """lcwa""" +465 86 evaluator """rankbased""" +465 87 dataset """kinships""" +465 87 model """proje""" +465 87 loss """bceaftersigmoid""" +465 87 regularizer """no""" +465 87 optimizer """adadelta""" +465 87 training_loop """lcwa""" +465 87 evaluator """rankbased""" +465 88 dataset """kinships""" +465 88 model """proje""" +465 88 loss """bceaftersigmoid""" +465 88 regularizer """no""" +465 88 optimizer """adadelta""" +465 88 training_loop """lcwa""" +465 88 evaluator """rankbased""" +465 89 dataset """kinships""" +465 89 model """proje""" +465 89 loss """bceaftersigmoid""" +465 89 regularizer """no""" +465 89 optimizer """adadelta""" +465 89 training_loop """lcwa""" +465 89 evaluator """rankbased""" +465 90 dataset """kinships""" +465 90 model """proje""" +465 90 loss """bceaftersigmoid""" +465 90 regularizer """no""" +465 90 optimizer """adadelta""" +465 90 training_loop """lcwa""" +465 90 evaluator """rankbased""" +465 91 dataset """kinships""" +465 91 model """proje""" +465 91 loss """bceaftersigmoid""" +465 91 regularizer """no""" +465 91 optimizer """adadelta""" +465 91 training_loop """lcwa""" +465 91 evaluator """rankbased""" +465 92 dataset """kinships""" +465 92 model """proje""" +465 92 loss """bceaftersigmoid""" +465 92 regularizer """no""" +465 92 optimizer """adadelta""" +465 92 training_loop """lcwa""" +465 92 evaluator """rankbased""" +465 93 dataset """kinships""" +465 93 model """proje""" +465 93 loss """bceaftersigmoid""" +465 93 regularizer """no""" +465 93 optimizer """adadelta""" +465 93 training_loop """lcwa""" +465 93 evaluator """rankbased""" +465 94 dataset """kinships""" +465 94 model """proje""" +465 94 loss """bceaftersigmoid""" +465 94 regularizer """no""" +465 94 optimizer """adadelta""" +465 94 training_loop """lcwa""" +465 94 evaluator """rankbased""" +465 95 dataset """kinships""" +465 95 model """proje""" +465 95 loss """bceaftersigmoid""" +465 95 regularizer """no""" +465 95 optimizer """adadelta""" +465 95 training_loop """lcwa""" +465 95 evaluator """rankbased""" +465 96 dataset """kinships""" +465 96 model """proje""" +465 96 loss """bceaftersigmoid""" +465 96 regularizer """no""" +465 96 optimizer """adadelta""" +465 96 training_loop """lcwa""" +465 96 evaluator """rankbased""" +465 97 dataset """kinships""" +465 97 model """proje""" +465 97 loss """bceaftersigmoid""" +465 97 regularizer """no""" +465 97 optimizer """adadelta""" +465 97 training_loop """lcwa""" +465 97 evaluator """rankbased""" +465 98 dataset """kinships""" +465 98 model """proje""" +465 98 loss """bceaftersigmoid""" +465 98 regularizer """no""" +465 98 optimizer """adadelta""" +465 98 training_loop """lcwa""" +465 98 evaluator """rankbased""" +465 99 dataset """kinships""" +465 99 model """proje""" +465 99 loss """bceaftersigmoid""" +465 99 regularizer """no""" +465 99 optimizer """adadelta""" +465 99 training_loop """lcwa""" +465 99 evaluator """rankbased""" +465 100 dataset """kinships""" +465 100 model """proje""" +465 100 loss """bceaftersigmoid""" +465 100 regularizer """no""" +465 100 optimizer """adadelta""" +465 100 training_loop """lcwa""" +465 100 evaluator """rankbased""" +466 1 model.embedding_dim 1.0 +466 1 training.batch_size 1.0 +466 1 training.label_smoothing 0.903408199497855 +466 2 model.embedding_dim 0.0 +466 2 training.batch_size 1.0 +466 2 training.label_smoothing 0.002332995963242547 +466 3 model.embedding_dim 2.0 +466 3 training.batch_size 0.0 +466 3 training.label_smoothing 0.2741309041036561 +466 4 model.embedding_dim 0.0 +466 4 training.batch_size 1.0 +466 4 training.label_smoothing 0.097712930098315 +466 5 model.embedding_dim 2.0 +466 5 training.batch_size 0.0 +466 5 training.label_smoothing 0.003007818873038435 +466 6 model.embedding_dim 1.0 +466 6 training.batch_size 1.0 +466 6 training.label_smoothing 0.00488399936687008 +466 7 model.embedding_dim 1.0 +466 7 training.batch_size 2.0 +466 7 training.label_smoothing 0.0038389936187435167 +466 8 model.embedding_dim 1.0 +466 8 training.batch_size 1.0 +466 8 training.label_smoothing 0.002125839038921556 +466 9 model.embedding_dim 1.0 +466 9 training.batch_size 0.0 +466 9 training.label_smoothing 0.0041391833600253345 +466 10 model.embedding_dim 1.0 +466 10 training.batch_size 2.0 +466 10 training.label_smoothing 0.0018853364012643818 +466 11 model.embedding_dim 2.0 +466 11 training.batch_size 2.0 +466 11 training.label_smoothing 0.1811227473721115 +466 12 model.embedding_dim 1.0 +466 12 training.batch_size 0.0 +466 12 training.label_smoothing 0.0067502054657164225 +466 13 model.embedding_dim 1.0 +466 13 training.batch_size 0.0 +466 13 training.label_smoothing 0.3390797570042642 +466 14 model.embedding_dim 1.0 +466 14 training.batch_size 0.0 +466 14 training.label_smoothing 0.0025615597725504105 +466 15 model.embedding_dim 1.0 +466 15 training.batch_size 0.0 +466 15 training.label_smoothing 0.7840063471488938 +466 16 model.embedding_dim 2.0 +466 16 training.batch_size 1.0 +466 16 training.label_smoothing 0.9068053765547075 +466 17 model.embedding_dim 2.0 +466 17 training.batch_size 0.0 +466 17 training.label_smoothing 0.28097888479241195 +466 18 model.embedding_dim 1.0 +466 18 training.batch_size 0.0 +466 18 training.label_smoothing 0.02183182950608806 +466 19 model.embedding_dim 0.0 +466 19 training.batch_size 0.0 +466 19 training.label_smoothing 0.39785489501697896 +466 20 model.embedding_dim 2.0 +466 20 training.batch_size 1.0 +466 20 training.label_smoothing 0.006170574217637941 +466 21 model.embedding_dim 0.0 +466 21 training.batch_size 1.0 +466 21 training.label_smoothing 0.11333366721092891 +466 22 model.embedding_dim 2.0 +466 22 training.batch_size 2.0 +466 22 training.label_smoothing 0.17128677228495084 +466 23 model.embedding_dim 0.0 +466 23 training.batch_size 0.0 +466 23 training.label_smoothing 0.05557897293075901 +466 24 model.embedding_dim 2.0 +466 24 training.batch_size 1.0 +466 24 training.label_smoothing 0.0010350249386304753 +466 25 model.embedding_dim 2.0 +466 25 training.batch_size 2.0 +466 25 training.label_smoothing 0.018227149606072465 +466 26 model.embedding_dim 0.0 +466 26 training.batch_size 1.0 +466 26 training.label_smoothing 0.0037598009966452245 +466 27 model.embedding_dim 0.0 +466 27 training.batch_size 2.0 +466 27 training.label_smoothing 0.5244571446969164 +466 28 model.embedding_dim 2.0 +466 28 training.batch_size 0.0 +466 28 training.label_smoothing 0.34042501750401666 +466 29 model.embedding_dim 0.0 +466 29 training.batch_size 2.0 +466 29 training.label_smoothing 0.019688826358540995 +466 30 model.embedding_dim 1.0 +466 30 training.batch_size 1.0 +466 30 training.label_smoothing 0.1641177966962591 +466 31 model.embedding_dim 2.0 +466 31 training.batch_size 2.0 +466 31 training.label_smoothing 0.004743745547829145 +466 32 model.embedding_dim 2.0 +466 32 training.batch_size 1.0 +466 32 training.label_smoothing 0.02182326985340354 +466 33 model.embedding_dim 1.0 +466 33 training.batch_size 1.0 +466 33 training.label_smoothing 0.6943106360806761 +466 34 model.embedding_dim 2.0 +466 34 training.batch_size 0.0 +466 34 training.label_smoothing 0.011873661487020236 +466 35 model.embedding_dim 0.0 +466 35 training.batch_size 0.0 +466 35 training.label_smoothing 0.09636351587605842 +466 36 model.embedding_dim 0.0 +466 36 training.batch_size 0.0 +466 36 training.label_smoothing 0.027480959586004987 +466 37 model.embedding_dim 0.0 +466 37 training.batch_size 0.0 +466 37 training.label_smoothing 0.6369630643750298 +466 38 model.embedding_dim 2.0 +466 38 training.batch_size 0.0 +466 38 training.label_smoothing 0.05911961479762628 +466 39 model.embedding_dim 1.0 +466 39 training.batch_size 0.0 +466 39 training.label_smoothing 0.8396906922940812 +466 40 model.embedding_dim 2.0 +466 40 training.batch_size 1.0 +466 40 training.label_smoothing 0.33966232964195153 +466 41 model.embedding_dim 1.0 +466 41 training.batch_size 2.0 +466 41 training.label_smoothing 0.0037193689199323692 +466 42 model.embedding_dim 2.0 +466 42 training.batch_size 0.0 +466 42 training.label_smoothing 0.01515371933722664 +466 43 model.embedding_dim 1.0 +466 43 training.batch_size 0.0 +466 43 training.label_smoothing 0.2527965110428559 +466 44 model.embedding_dim 0.0 +466 44 training.batch_size 0.0 +466 44 training.label_smoothing 0.002086890316191248 +466 45 model.embedding_dim 0.0 +466 45 training.batch_size 2.0 +466 45 training.label_smoothing 0.009784435084165278 +466 46 model.embedding_dim 0.0 +466 46 training.batch_size 0.0 +466 46 training.label_smoothing 0.007502532815894773 +466 47 model.embedding_dim 1.0 +466 47 training.batch_size 2.0 +466 47 training.label_smoothing 0.04216661386185034 +466 48 model.embedding_dim 1.0 +466 48 training.batch_size 0.0 +466 48 training.label_smoothing 0.05729609308771544 +466 49 model.embedding_dim 1.0 +466 49 training.batch_size 1.0 +466 49 training.label_smoothing 0.09792557544893758 +466 50 model.embedding_dim 1.0 +466 50 training.batch_size 0.0 +466 50 training.label_smoothing 0.002868364037142411 +466 51 model.embedding_dim 2.0 +466 51 training.batch_size 1.0 +466 51 training.label_smoothing 0.001417476869175898 +466 52 model.embedding_dim 0.0 +466 52 training.batch_size 0.0 +466 52 training.label_smoothing 0.028640389559936405 +466 53 model.embedding_dim 1.0 +466 53 training.batch_size 0.0 +466 53 training.label_smoothing 0.037130359889626686 +466 54 model.embedding_dim 1.0 +466 54 training.batch_size 0.0 +466 54 training.label_smoothing 0.03797298687053299 +466 55 model.embedding_dim 0.0 +466 55 training.batch_size 0.0 +466 55 training.label_smoothing 0.2916481826446864 +466 56 model.embedding_dim 2.0 +466 56 training.batch_size 1.0 +466 56 training.label_smoothing 0.008003525221124316 +466 57 model.embedding_dim 0.0 +466 57 training.batch_size 1.0 +466 57 training.label_smoothing 0.6421443477221295 +466 58 model.embedding_dim 0.0 +466 58 training.batch_size 0.0 +466 58 training.label_smoothing 0.003047870004585747 +466 59 model.embedding_dim 2.0 +466 59 training.batch_size 2.0 +466 59 training.label_smoothing 0.040251214267547894 +466 60 model.embedding_dim 1.0 +466 60 training.batch_size 2.0 +466 60 training.label_smoothing 0.01868839341854994 +466 61 model.embedding_dim 2.0 +466 61 training.batch_size 0.0 +466 61 training.label_smoothing 0.28460135790726776 +466 62 model.embedding_dim 1.0 +466 62 training.batch_size 1.0 +466 62 training.label_smoothing 0.009782358385914125 +466 63 model.embedding_dim 2.0 +466 63 training.batch_size 1.0 +466 63 training.label_smoothing 0.004270737890173154 +466 64 model.embedding_dim 2.0 +466 64 training.batch_size 1.0 +466 64 training.label_smoothing 0.002047411046240253 +466 65 model.embedding_dim 0.0 +466 65 training.batch_size 1.0 +466 65 training.label_smoothing 0.0033921423265519733 +466 66 model.embedding_dim 0.0 +466 66 training.batch_size 1.0 +466 66 training.label_smoothing 0.012688155795273279 +466 67 model.embedding_dim 0.0 +466 67 training.batch_size 0.0 +466 67 training.label_smoothing 0.07243964387606558 +466 68 model.embedding_dim 0.0 +466 68 training.batch_size 0.0 +466 68 training.label_smoothing 0.023725635493713156 +466 69 model.embedding_dim 0.0 +466 69 training.batch_size 2.0 +466 69 training.label_smoothing 0.6342526366893053 +466 70 model.embedding_dim 1.0 +466 70 training.batch_size 1.0 +466 70 training.label_smoothing 0.057973110348412626 +466 71 model.embedding_dim 0.0 +466 71 training.batch_size 2.0 +466 71 training.label_smoothing 0.10083032577831039 +466 72 model.embedding_dim 0.0 +466 72 training.batch_size 1.0 +466 72 training.label_smoothing 0.09122023688967298 +466 73 model.embedding_dim 0.0 +466 73 training.batch_size 2.0 +466 73 training.label_smoothing 0.0013984576986052447 +466 74 model.embedding_dim 0.0 +466 74 training.batch_size 2.0 +466 74 training.label_smoothing 0.0024645802293756145 +466 75 model.embedding_dim 1.0 +466 75 training.batch_size 1.0 +466 75 training.label_smoothing 0.20684073257144284 +466 76 model.embedding_dim 1.0 +466 76 training.batch_size 0.0 +466 76 training.label_smoothing 0.14680670286488692 +466 77 model.embedding_dim 0.0 +466 77 training.batch_size 0.0 +466 77 training.label_smoothing 0.7741145181452095 +466 78 model.embedding_dim 2.0 +466 78 training.batch_size 0.0 +466 78 training.label_smoothing 0.001773406334873703 +466 79 model.embedding_dim 2.0 +466 79 training.batch_size 0.0 +466 79 training.label_smoothing 0.009946251556384282 +466 80 model.embedding_dim 2.0 +466 80 training.batch_size 2.0 +466 80 training.label_smoothing 0.20548664801044927 +466 81 model.embedding_dim 2.0 +466 81 training.batch_size 0.0 +466 81 training.label_smoothing 0.10255077130695812 +466 82 model.embedding_dim 1.0 +466 82 training.batch_size 2.0 +466 82 training.label_smoothing 0.8368145721257098 +466 83 model.embedding_dim 2.0 +466 83 training.batch_size 0.0 +466 83 training.label_smoothing 0.10440087138066091 +466 84 model.embedding_dim 1.0 +466 84 training.batch_size 0.0 +466 84 training.label_smoothing 0.08074439257288919 +466 85 model.embedding_dim 1.0 +466 85 training.batch_size 2.0 +466 85 training.label_smoothing 0.12259080239094479 +466 86 model.embedding_dim 1.0 +466 86 training.batch_size 2.0 +466 86 training.label_smoothing 0.2054460230574882 +466 87 model.embedding_dim 2.0 +466 87 training.batch_size 2.0 +466 87 training.label_smoothing 0.5119536500800964 +466 88 model.embedding_dim 2.0 +466 88 training.batch_size 2.0 +466 88 training.label_smoothing 0.08031131572162423 +466 89 model.embedding_dim 0.0 +466 89 training.batch_size 0.0 +466 89 training.label_smoothing 0.8993463653153564 +466 90 model.embedding_dim 1.0 +466 90 training.batch_size 0.0 +466 90 training.label_smoothing 0.06776818434747876 +466 91 model.embedding_dim 2.0 +466 91 training.batch_size 0.0 +466 91 training.label_smoothing 0.04955615043054908 +466 92 model.embedding_dim 1.0 +466 92 training.batch_size 0.0 +466 92 training.label_smoothing 0.5986885729386089 +466 93 model.embedding_dim 2.0 +466 93 training.batch_size 0.0 +466 93 training.label_smoothing 0.004712137473404204 +466 94 model.embedding_dim 2.0 +466 94 training.batch_size 1.0 +466 94 training.label_smoothing 0.0026983616977998255 +466 95 model.embedding_dim 0.0 +466 95 training.batch_size 1.0 +466 95 training.label_smoothing 0.001242413606612349 +466 96 model.embedding_dim 1.0 +466 96 training.batch_size 1.0 +466 96 training.label_smoothing 0.0022252620287990465 +466 97 model.embedding_dim 2.0 +466 97 training.batch_size 0.0 +466 97 training.label_smoothing 0.9364465616743126 +466 98 model.embedding_dim 1.0 +466 98 training.batch_size 0.0 +466 98 training.label_smoothing 0.007489548257047378 +466 99 model.embedding_dim 0.0 +466 99 training.batch_size 1.0 +466 99 training.label_smoothing 0.8703811712388022 +466 100 model.embedding_dim 2.0 +466 100 training.batch_size 1.0 +466 100 training.label_smoothing 0.09818992840961689 +466 1 dataset """kinships""" +466 1 model """proje""" +466 1 loss """softplus""" +466 1 regularizer """no""" +466 1 optimizer """adadelta""" +466 1 training_loop """lcwa""" +466 1 evaluator """rankbased""" +466 2 dataset """kinships""" +466 2 model """proje""" +466 2 loss """softplus""" +466 2 regularizer """no""" +466 2 optimizer """adadelta""" +466 2 training_loop """lcwa""" +466 2 evaluator """rankbased""" +466 3 dataset """kinships""" +466 3 model """proje""" +466 3 loss """softplus""" +466 3 regularizer """no""" +466 3 optimizer """adadelta""" +466 3 training_loop """lcwa""" +466 3 evaluator """rankbased""" +466 4 dataset """kinships""" +466 4 model """proje""" +466 4 loss """softplus""" +466 4 regularizer """no""" +466 4 optimizer """adadelta""" +466 4 training_loop """lcwa""" +466 4 evaluator """rankbased""" +466 5 dataset """kinships""" +466 5 model """proje""" +466 5 loss """softplus""" +466 5 regularizer """no""" +466 5 optimizer """adadelta""" +466 5 training_loop """lcwa""" +466 5 evaluator """rankbased""" +466 6 dataset """kinships""" +466 6 model """proje""" +466 6 loss """softplus""" +466 6 regularizer """no""" +466 6 optimizer """adadelta""" +466 6 training_loop """lcwa""" +466 6 evaluator """rankbased""" +466 7 dataset """kinships""" +466 7 model """proje""" +466 7 loss """softplus""" +466 7 regularizer """no""" +466 7 optimizer """adadelta""" +466 7 training_loop """lcwa""" +466 7 evaluator """rankbased""" +466 8 dataset """kinships""" +466 8 model """proje""" +466 8 loss """softplus""" +466 8 regularizer """no""" +466 8 optimizer """adadelta""" +466 8 training_loop """lcwa""" +466 8 evaluator """rankbased""" +466 9 dataset """kinships""" +466 9 model """proje""" +466 9 loss """softplus""" +466 9 regularizer """no""" +466 9 optimizer """adadelta""" +466 9 training_loop """lcwa""" +466 9 evaluator """rankbased""" +466 10 dataset """kinships""" +466 10 model """proje""" +466 10 loss """softplus""" +466 10 regularizer """no""" +466 10 optimizer """adadelta""" +466 10 training_loop """lcwa""" +466 10 evaluator """rankbased""" +466 11 dataset """kinships""" +466 11 model """proje""" +466 11 loss """softplus""" +466 11 regularizer """no""" +466 11 optimizer """adadelta""" +466 11 training_loop """lcwa""" +466 11 evaluator """rankbased""" +466 12 dataset """kinships""" +466 12 model """proje""" +466 12 loss """softplus""" +466 12 regularizer """no""" +466 12 optimizer """adadelta""" +466 12 training_loop """lcwa""" +466 12 evaluator """rankbased""" +466 13 dataset """kinships""" +466 13 model """proje""" +466 13 loss """softplus""" +466 13 regularizer """no""" +466 13 optimizer """adadelta""" +466 13 training_loop """lcwa""" +466 13 evaluator """rankbased""" +466 14 dataset """kinships""" +466 14 model """proje""" +466 14 loss """softplus""" +466 14 regularizer """no""" +466 14 optimizer """adadelta""" +466 14 training_loop """lcwa""" +466 14 evaluator """rankbased""" +466 15 dataset """kinships""" +466 15 model """proje""" +466 15 loss """softplus""" +466 15 regularizer """no""" +466 15 optimizer """adadelta""" +466 15 training_loop """lcwa""" +466 15 evaluator """rankbased""" +466 16 dataset """kinships""" +466 16 model """proje""" +466 16 loss """softplus""" +466 16 regularizer """no""" +466 16 optimizer """adadelta""" +466 16 training_loop """lcwa""" +466 16 evaluator """rankbased""" +466 17 dataset """kinships""" +466 17 model """proje""" +466 17 loss """softplus""" +466 17 regularizer """no""" +466 17 optimizer """adadelta""" +466 17 training_loop """lcwa""" +466 17 evaluator """rankbased""" +466 18 dataset """kinships""" +466 18 model """proje""" +466 18 loss """softplus""" +466 18 regularizer """no""" +466 18 optimizer """adadelta""" +466 18 training_loop """lcwa""" +466 18 evaluator """rankbased""" +466 19 dataset """kinships""" +466 19 model """proje""" +466 19 loss """softplus""" +466 19 regularizer """no""" +466 19 optimizer """adadelta""" +466 19 training_loop """lcwa""" +466 19 evaluator """rankbased""" +466 20 dataset """kinships""" +466 20 model """proje""" +466 20 loss """softplus""" +466 20 regularizer """no""" +466 20 optimizer """adadelta""" +466 20 training_loop """lcwa""" +466 20 evaluator """rankbased""" +466 21 dataset """kinships""" +466 21 model """proje""" +466 21 loss """softplus""" +466 21 regularizer """no""" +466 21 optimizer """adadelta""" +466 21 training_loop """lcwa""" +466 21 evaluator """rankbased""" +466 22 dataset """kinships""" +466 22 model """proje""" +466 22 loss """softplus""" +466 22 regularizer """no""" +466 22 optimizer """adadelta""" +466 22 training_loop """lcwa""" +466 22 evaluator """rankbased""" +466 23 dataset """kinships""" +466 23 model """proje""" +466 23 loss """softplus""" +466 23 regularizer """no""" +466 23 optimizer """adadelta""" +466 23 training_loop """lcwa""" +466 23 evaluator """rankbased""" +466 24 dataset """kinships""" +466 24 model """proje""" +466 24 loss """softplus""" +466 24 regularizer """no""" +466 24 optimizer """adadelta""" +466 24 training_loop """lcwa""" +466 24 evaluator """rankbased""" +466 25 dataset """kinships""" +466 25 model """proje""" +466 25 loss """softplus""" +466 25 regularizer """no""" +466 25 optimizer """adadelta""" +466 25 training_loop """lcwa""" +466 25 evaluator """rankbased""" +466 26 dataset """kinships""" +466 26 model """proje""" +466 26 loss """softplus""" +466 26 regularizer """no""" +466 26 optimizer """adadelta""" +466 26 training_loop """lcwa""" +466 26 evaluator """rankbased""" +466 27 dataset """kinships""" +466 27 model """proje""" +466 27 loss """softplus""" +466 27 regularizer """no""" +466 27 optimizer """adadelta""" +466 27 training_loop """lcwa""" +466 27 evaluator """rankbased""" +466 28 dataset """kinships""" +466 28 model """proje""" +466 28 loss """softplus""" +466 28 regularizer """no""" +466 28 optimizer """adadelta""" +466 28 training_loop """lcwa""" +466 28 evaluator """rankbased""" +466 29 dataset """kinships""" +466 29 model """proje""" +466 29 loss """softplus""" +466 29 regularizer """no""" +466 29 optimizer """adadelta""" +466 29 training_loop """lcwa""" +466 29 evaluator """rankbased""" +466 30 dataset """kinships""" +466 30 model """proje""" +466 30 loss """softplus""" +466 30 regularizer """no""" +466 30 optimizer """adadelta""" +466 30 training_loop """lcwa""" +466 30 evaluator """rankbased""" +466 31 dataset """kinships""" +466 31 model """proje""" +466 31 loss """softplus""" +466 31 regularizer """no""" +466 31 optimizer """adadelta""" +466 31 training_loop """lcwa""" +466 31 evaluator """rankbased""" +466 32 dataset """kinships""" +466 32 model """proje""" +466 32 loss """softplus""" +466 32 regularizer """no""" +466 32 optimizer """adadelta""" +466 32 training_loop """lcwa""" +466 32 evaluator """rankbased""" +466 33 dataset """kinships""" +466 33 model """proje""" +466 33 loss """softplus""" +466 33 regularizer """no""" +466 33 optimizer """adadelta""" +466 33 training_loop """lcwa""" +466 33 evaluator """rankbased""" +466 34 dataset """kinships""" +466 34 model """proje""" +466 34 loss """softplus""" +466 34 regularizer """no""" +466 34 optimizer """adadelta""" +466 34 training_loop """lcwa""" +466 34 evaluator """rankbased""" +466 35 dataset """kinships""" +466 35 model """proje""" +466 35 loss """softplus""" +466 35 regularizer """no""" +466 35 optimizer """adadelta""" +466 35 training_loop """lcwa""" +466 35 evaluator """rankbased""" +466 36 dataset """kinships""" +466 36 model """proje""" +466 36 loss """softplus""" +466 36 regularizer """no""" +466 36 optimizer """adadelta""" +466 36 training_loop """lcwa""" +466 36 evaluator """rankbased""" +466 37 dataset """kinships""" +466 37 model """proje""" +466 37 loss """softplus""" +466 37 regularizer """no""" +466 37 optimizer """adadelta""" +466 37 training_loop """lcwa""" +466 37 evaluator """rankbased""" +466 38 dataset """kinships""" +466 38 model """proje""" +466 38 loss """softplus""" +466 38 regularizer """no""" +466 38 optimizer """adadelta""" +466 38 training_loop """lcwa""" +466 38 evaluator """rankbased""" +466 39 dataset """kinships""" +466 39 model """proje""" +466 39 loss """softplus""" +466 39 regularizer """no""" +466 39 optimizer """adadelta""" +466 39 training_loop """lcwa""" +466 39 evaluator """rankbased""" +466 40 dataset """kinships""" +466 40 model """proje""" +466 40 loss """softplus""" +466 40 regularizer """no""" +466 40 optimizer """adadelta""" +466 40 training_loop """lcwa""" +466 40 evaluator """rankbased""" +466 41 dataset """kinships""" +466 41 model """proje""" +466 41 loss """softplus""" +466 41 regularizer """no""" +466 41 optimizer """adadelta""" +466 41 training_loop """lcwa""" +466 41 evaluator """rankbased""" +466 42 dataset """kinships""" +466 42 model """proje""" +466 42 loss """softplus""" +466 42 regularizer """no""" +466 42 optimizer """adadelta""" +466 42 training_loop """lcwa""" +466 42 evaluator """rankbased""" +466 43 dataset """kinships""" +466 43 model """proje""" +466 43 loss """softplus""" +466 43 regularizer """no""" +466 43 optimizer """adadelta""" +466 43 training_loop """lcwa""" +466 43 evaluator """rankbased""" +466 44 dataset """kinships""" +466 44 model """proje""" +466 44 loss """softplus""" +466 44 regularizer """no""" +466 44 optimizer """adadelta""" +466 44 training_loop """lcwa""" +466 44 evaluator """rankbased""" +466 45 dataset """kinships""" +466 45 model """proje""" +466 45 loss """softplus""" +466 45 regularizer """no""" +466 45 optimizer """adadelta""" +466 45 training_loop """lcwa""" +466 45 evaluator """rankbased""" +466 46 dataset """kinships""" +466 46 model """proje""" +466 46 loss """softplus""" +466 46 regularizer """no""" +466 46 optimizer """adadelta""" +466 46 training_loop """lcwa""" +466 46 evaluator """rankbased""" +466 47 dataset """kinships""" +466 47 model """proje""" +466 47 loss """softplus""" +466 47 regularizer """no""" +466 47 optimizer """adadelta""" +466 47 training_loop """lcwa""" +466 47 evaluator """rankbased""" +466 48 dataset """kinships""" +466 48 model """proje""" +466 48 loss """softplus""" +466 48 regularizer """no""" +466 48 optimizer """adadelta""" +466 48 training_loop """lcwa""" +466 48 evaluator """rankbased""" +466 49 dataset """kinships""" +466 49 model """proje""" +466 49 loss """softplus""" +466 49 regularizer """no""" +466 49 optimizer """adadelta""" +466 49 training_loop """lcwa""" +466 49 evaluator """rankbased""" +466 50 dataset """kinships""" +466 50 model """proje""" +466 50 loss """softplus""" +466 50 regularizer """no""" +466 50 optimizer """adadelta""" +466 50 training_loop """lcwa""" +466 50 evaluator """rankbased""" +466 51 dataset """kinships""" +466 51 model """proje""" +466 51 loss """softplus""" +466 51 regularizer """no""" +466 51 optimizer """adadelta""" +466 51 training_loop """lcwa""" +466 51 evaluator """rankbased""" +466 52 dataset """kinships""" +466 52 model """proje""" +466 52 loss """softplus""" +466 52 regularizer """no""" +466 52 optimizer """adadelta""" +466 52 training_loop """lcwa""" +466 52 evaluator """rankbased""" +466 53 dataset """kinships""" +466 53 model """proje""" +466 53 loss """softplus""" +466 53 regularizer """no""" +466 53 optimizer """adadelta""" +466 53 training_loop """lcwa""" +466 53 evaluator """rankbased""" +466 54 dataset """kinships""" +466 54 model """proje""" +466 54 loss """softplus""" +466 54 regularizer """no""" +466 54 optimizer """adadelta""" +466 54 training_loop """lcwa""" +466 54 evaluator """rankbased""" +466 55 dataset """kinships""" +466 55 model """proje""" +466 55 loss """softplus""" +466 55 regularizer """no""" +466 55 optimizer """adadelta""" +466 55 training_loop """lcwa""" +466 55 evaluator """rankbased""" +466 56 dataset """kinships""" +466 56 model """proje""" +466 56 loss """softplus""" +466 56 regularizer """no""" +466 56 optimizer """adadelta""" +466 56 training_loop """lcwa""" +466 56 evaluator """rankbased""" +466 57 dataset """kinships""" +466 57 model """proje""" +466 57 loss """softplus""" +466 57 regularizer """no""" +466 57 optimizer """adadelta""" +466 57 training_loop """lcwa""" +466 57 evaluator """rankbased""" +466 58 dataset """kinships""" +466 58 model """proje""" +466 58 loss """softplus""" +466 58 regularizer """no""" +466 58 optimizer """adadelta""" +466 58 training_loop """lcwa""" +466 58 evaluator """rankbased""" +466 59 dataset """kinships""" +466 59 model """proje""" +466 59 loss """softplus""" +466 59 regularizer """no""" +466 59 optimizer """adadelta""" +466 59 training_loop """lcwa""" +466 59 evaluator """rankbased""" +466 60 dataset """kinships""" +466 60 model """proje""" +466 60 loss """softplus""" +466 60 regularizer """no""" +466 60 optimizer """adadelta""" +466 60 training_loop """lcwa""" +466 60 evaluator """rankbased""" +466 61 dataset """kinships""" +466 61 model """proje""" +466 61 loss """softplus""" +466 61 regularizer """no""" +466 61 optimizer """adadelta""" +466 61 training_loop """lcwa""" +466 61 evaluator """rankbased""" +466 62 dataset """kinships""" +466 62 model """proje""" +466 62 loss """softplus""" +466 62 regularizer """no""" +466 62 optimizer """adadelta""" +466 62 training_loop """lcwa""" +466 62 evaluator """rankbased""" +466 63 dataset """kinships""" +466 63 model """proje""" +466 63 loss """softplus""" +466 63 regularizer """no""" +466 63 optimizer """adadelta""" +466 63 training_loop """lcwa""" +466 63 evaluator """rankbased""" +466 64 dataset """kinships""" +466 64 model """proje""" +466 64 loss """softplus""" +466 64 regularizer """no""" +466 64 optimizer """adadelta""" +466 64 training_loop """lcwa""" +466 64 evaluator """rankbased""" +466 65 dataset """kinships""" +466 65 model """proje""" +466 65 loss """softplus""" +466 65 regularizer """no""" +466 65 optimizer """adadelta""" +466 65 training_loop """lcwa""" +466 65 evaluator """rankbased""" +466 66 dataset """kinships""" +466 66 model """proje""" +466 66 loss """softplus""" +466 66 regularizer """no""" +466 66 optimizer """adadelta""" +466 66 training_loop """lcwa""" +466 66 evaluator """rankbased""" +466 67 dataset """kinships""" +466 67 model """proje""" +466 67 loss """softplus""" +466 67 regularizer """no""" +466 67 optimizer """adadelta""" +466 67 training_loop """lcwa""" +466 67 evaluator """rankbased""" +466 68 dataset """kinships""" +466 68 model """proje""" +466 68 loss """softplus""" +466 68 regularizer """no""" +466 68 optimizer """adadelta""" +466 68 training_loop """lcwa""" +466 68 evaluator """rankbased""" +466 69 dataset """kinships""" +466 69 model """proje""" +466 69 loss """softplus""" +466 69 regularizer """no""" +466 69 optimizer """adadelta""" +466 69 training_loop """lcwa""" +466 69 evaluator """rankbased""" +466 70 dataset """kinships""" +466 70 model """proje""" +466 70 loss """softplus""" +466 70 regularizer """no""" +466 70 optimizer """adadelta""" +466 70 training_loop """lcwa""" +466 70 evaluator """rankbased""" +466 71 dataset """kinships""" +466 71 model """proje""" +466 71 loss """softplus""" +466 71 regularizer """no""" +466 71 optimizer """adadelta""" +466 71 training_loop """lcwa""" +466 71 evaluator """rankbased""" +466 72 dataset """kinships""" +466 72 model """proje""" +466 72 loss """softplus""" +466 72 regularizer """no""" +466 72 optimizer """adadelta""" +466 72 training_loop """lcwa""" +466 72 evaluator """rankbased""" +466 73 dataset """kinships""" +466 73 model """proje""" +466 73 loss """softplus""" +466 73 regularizer """no""" +466 73 optimizer """adadelta""" +466 73 training_loop """lcwa""" +466 73 evaluator """rankbased""" +466 74 dataset """kinships""" +466 74 model """proje""" +466 74 loss """softplus""" +466 74 regularizer """no""" +466 74 optimizer """adadelta""" +466 74 training_loop """lcwa""" +466 74 evaluator """rankbased""" +466 75 dataset """kinships""" +466 75 model """proje""" +466 75 loss """softplus""" +466 75 regularizer """no""" +466 75 optimizer """adadelta""" +466 75 training_loop """lcwa""" +466 75 evaluator """rankbased""" +466 76 dataset """kinships""" +466 76 model """proje""" +466 76 loss """softplus""" +466 76 regularizer """no""" +466 76 optimizer """adadelta""" +466 76 training_loop """lcwa""" +466 76 evaluator """rankbased""" +466 77 dataset """kinships""" +466 77 model """proje""" +466 77 loss """softplus""" +466 77 regularizer """no""" +466 77 optimizer """adadelta""" +466 77 training_loop """lcwa""" +466 77 evaluator """rankbased""" +466 78 dataset """kinships""" +466 78 model """proje""" +466 78 loss """softplus""" +466 78 regularizer """no""" +466 78 optimizer """adadelta""" +466 78 training_loop """lcwa""" +466 78 evaluator """rankbased""" +466 79 dataset """kinships""" +466 79 model """proje""" +466 79 loss """softplus""" +466 79 regularizer """no""" +466 79 optimizer """adadelta""" +466 79 training_loop """lcwa""" +466 79 evaluator """rankbased""" +466 80 dataset """kinships""" +466 80 model """proje""" +466 80 loss """softplus""" +466 80 regularizer """no""" +466 80 optimizer """adadelta""" +466 80 training_loop """lcwa""" +466 80 evaluator """rankbased""" +466 81 dataset """kinships""" +466 81 model """proje""" +466 81 loss """softplus""" +466 81 regularizer """no""" +466 81 optimizer """adadelta""" +466 81 training_loop """lcwa""" +466 81 evaluator """rankbased""" +466 82 dataset """kinships""" +466 82 model """proje""" +466 82 loss """softplus""" +466 82 regularizer """no""" +466 82 optimizer """adadelta""" +466 82 training_loop """lcwa""" +466 82 evaluator """rankbased""" +466 83 dataset """kinships""" +466 83 model """proje""" +466 83 loss """softplus""" +466 83 regularizer """no""" +466 83 optimizer """adadelta""" +466 83 training_loop """lcwa""" +466 83 evaluator """rankbased""" +466 84 dataset """kinships""" +466 84 model """proje""" +466 84 loss """softplus""" +466 84 regularizer """no""" +466 84 optimizer """adadelta""" +466 84 training_loop """lcwa""" +466 84 evaluator """rankbased""" +466 85 dataset """kinships""" +466 85 model """proje""" +466 85 loss """softplus""" +466 85 regularizer """no""" +466 85 optimizer """adadelta""" +466 85 training_loop """lcwa""" +466 85 evaluator """rankbased""" +466 86 dataset """kinships""" +466 86 model """proje""" +466 86 loss """softplus""" +466 86 regularizer """no""" +466 86 optimizer """adadelta""" +466 86 training_loop """lcwa""" +466 86 evaluator """rankbased""" +466 87 dataset """kinships""" +466 87 model """proje""" +466 87 loss """softplus""" +466 87 regularizer """no""" +466 87 optimizer """adadelta""" +466 87 training_loop """lcwa""" +466 87 evaluator """rankbased""" +466 88 dataset """kinships""" +466 88 model """proje""" +466 88 loss """softplus""" +466 88 regularizer """no""" +466 88 optimizer """adadelta""" +466 88 training_loop """lcwa""" +466 88 evaluator """rankbased""" +466 89 dataset """kinships""" +466 89 model """proje""" +466 89 loss """softplus""" +466 89 regularizer """no""" +466 89 optimizer """adadelta""" +466 89 training_loop """lcwa""" +466 89 evaluator """rankbased""" +466 90 dataset """kinships""" +466 90 model """proje""" +466 90 loss """softplus""" +466 90 regularizer """no""" +466 90 optimizer """adadelta""" +466 90 training_loop """lcwa""" +466 90 evaluator """rankbased""" +466 91 dataset """kinships""" +466 91 model """proje""" +466 91 loss """softplus""" +466 91 regularizer """no""" +466 91 optimizer """adadelta""" +466 91 training_loop """lcwa""" +466 91 evaluator """rankbased""" +466 92 dataset """kinships""" +466 92 model """proje""" +466 92 loss """softplus""" +466 92 regularizer """no""" +466 92 optimizer """adadelta""" +466 92 training_loop """lcwa""" +466 92 evaluator """rankbased""" +466 93 dataset """kinships""" +466 93 model """proje""" +466 93 loss """softplus""" +466 93 regularizer """no""" +466 93 optimizer """adadelta""" +466 93 training_loop """lcwa""" +466 93 evaluator """rankbased""" +466 94 dataset """kinships""" +466 94 model """proje""" +466 94 loss """softplus""" +466 94 regularizer """no""" +466 94 optimizer """adadelta""" +466 94 training_loop """lcwa""" +466 94 evaluator """rankbased""" +466 95 dataset """kinships""" +466 95 model """proje""" +466 95 loss """softplus""" +466 95 regularizer """no""" +466 95 optimizer """adadelta""" +466 95 training_loop """lcwa""" +466 95 evaluator """rankbased""" +466 96 dataset """kinships""" +466 96 model """proje""" +466 96 loss """softplus""" +466 96 regularizer """no""" +466 96 optimizer """adadelta""" +466 96 training_loop """lcwa""" +466 96 evaluator """rankbased""" +466 97 dataset """kinships""" +466 97 model """proje""" +466 97 loss """softplus""" +466 97 regularizer """no""" +466 97 optimizer """adadelta""" +466 97 training_loop """lcwa""" +466 97 evaluator """rankbased""" +466 98 dataset """kinships""" +466 98 model """proje""" +466 98 loss """softplus""" +466 98 regularizer """no""" +466 98 optimizer """adadelta""" +466 98 training_loop """lcwa""" +466 98 evaluator """rankbased""" +466 99 dataset """kinships""" +466 99 model """proje""" +466 99 loss """softplus""" +466 99 regularizer """no""" +466 99 optimizer """adadelta""" +466 99 training_loop """lcwa""" +466 99 evaluator """rankbased""" +466 100 dataset """kinships""" +466 100 model """proje""" +466 100 loss """softplus""" +466 100 regularizer """no""" +466 100 optimizer """adadelta""" +466 100 training_loop """lcwa""" +466 100 evaluator """rankbased""" +467 1 model.embedding_dim 0.0 +467 1 training.batch_size 1.0 +467 1 training.label_smoothing 0.0012080654459229482 +467 2 model.embedding_dim 0.0 +467 2 training.batch_size 1.0 +467 2 training.label_smoothing 0.02459234568847803 +467 3 model.embedding_dim 2.0 +467 3 training.batch_size 0.0 +467 3 training.label_smoothing 0.0498271911504591 +467 4 model.embedding_dim 0.0 +467 4 training.batch_size 1.0 +467 4 training.label_smoothing 0.0017776210182459836 +467 5 model.embedding_dim 1.0 +467 5 training.batch_size 0.0 +467 5 training.label_smoothing 0.3730908153117602 +467 6 model.embedding_dim 1.0 +467 6 training.batch_size 2.0 +467 6 training.label_smoothing 0.0607448583062274 +467 7 model.embedding_dim 2.0 +467 7 training.batch_size 2.0 +467 7 training.label_smoothing 0.4287981530923866 +467 8 model.embedding_dim 1.0 +467 8 training.batch_size 0.0 +467 8 training.label_smoothing 0.015888807120821087 +467 9 model.embedding_dim 1.0 +467 9 training.batch_size 0.0 +467 9 training.label_smoothing 0.005228402443278208 +467 10 model.embedding_dim 1.0 +467 10 training.batch_size 1.0 +467 10 training.label_smoothing 0.29771010311049867 +467 11 model.embedding_dim 2.0 +467 11 training.batch_size 1.0 +467 11 training.label_smoothing 0.006902575257046079 +467 12 model.embedding_dim 1.0 +467 12 training.batch_size 2.0 +467 12 training.label_smoothing 0.003696868706320063 +467 13 model.embedding_dim 1.0 +467 13 training.batch_size 2.0 +467 13 training.label_smoothing 0.11071400169606845 +467 14 model.embedding_dim 1.0 +467 14 training.batch_size 1.0 +467 14 training.label_smoothing 0.3809799327087825 +467 15 model.embedding_dim 1.0 +467 15 training.batch_size 2.0 +467 15 training.label_smoothing 0.0011491918855614685 +467 16 model.embedding_dim 2.0 +467 16 training.batch_size 1.0 +467 16 training.label_smoothing 0.0022357735892544646 +467 17 model.embedding_dim 1.0 +467 17 training.batch_size 2.0 +467 17 training.label_smoothing 0.01508320510985905 +467 18 model.embedding_dim 0.0 +467 18 training.batch_size 0.0 +467 18 training.label_smoothing 0.21104988604730193 +467 19 model.embedding_dim 2.0 +467 19 training.batch_size 2.0 +467 19 training.label_smoothing 0.5126790492500217 +467 20 model.embedding_dim 2.0 +467 20 training.batch_size 2.0 +467 20 training.label_smoothing 0.0041977235613628126 +467 21 model.embedding_dim 2.0 +467 21 training.batch_size 1.0 +467 21 training.label_smoothing 0.04491484327467378 +467 22 model.embedding_dim 0.0 +467 22 training.batch_size 2.0 +467 22 training.label_smoothing 0.0011691143722560961 +467 23 model.embedding_dim 2.0 +467 23 training.batch_size 2.0 +467 23 training.label_smoothing 0.003796869639779541 +467 24 model.embedding_dim 2.0 +467 24 training.batch_size 2.0 +467 24 training.label_smoothing 0.015190340775952533 +467 25 model.embedding_dim 0.0 +467 25 training.batch_size 1.0 +467 25 training.label_smoothing 0.007366133327775355 +467 26 model.embedding_dim 0.0 +467 26 training.batch_size 0.0 +467 26 training.label_smoothing 0.02158441704884585 +467 27 model.embedding_dim 0.0 +467 27 training.batch_size 1.0 +467 27 training.label_smoothing 0.00556792814906542 +467 28 model.embedding_dim 2.0 +467 28 training.batch_size 1.0 +467 28 training.label_smoothing 0.017223098666633706 +467 29 model.embedding_dim 2.0 +467 29 training.batch_size 2.0 +467 29 training.label_smoothing 0.0446311068996276 +467 30 model.embedding_dim 1.0 +467 30 training.batch_size 0.0 +467 30 training.label_smoothing 0.026328362699452948 +467 31 model.embedding_dim 2.0 +467 31 training.batch_size 2.0 +467 31 training.label_smoothing 0.5668704143338844 +467 32 model.embedding_dim 2.0 +467 32 training.batch_size 0.0 +467 32 training.label_smoothing 0.025197431922285716 +467 33 model.embedding_dim 2.0 +467 33 training.batch_size 1.0 +467 33 training.label_smoothing 0.023251667448403825 +467 34 model.embedding_dim 0.0 +467 34 training.batch_size 2.0 +467 34 training.label_smoothing 0.08115748617098599 +467 35 model.embedding_dim 2.0 +467 35 training.batch_size 1.0 +467 35 training.label_smoothing 0.18600984847166252 +467 36 model.embedding_dim 2.0 +467 36 training.batch_size 2.0 +467 36 training.label_smoothing 0.17279644207544598 +467 37 model.embedding_dim 1.0 +467 37 training.batch_size 2.0 +467 37 training.label_smoothing 0.19076835399997866 +467 38 model.embedding_dim 0.0 +467 38 training.batch_size 2.0 +467 38 training.label_smoothing 0.002852666276604762 +467 39 model.embedding_dim 2.0 +467 39 training.batch_size 0.0 +467 39 training.label_smoothing 0.548090416566057 +467 40 model.embedding_dim 1.0 +467 40 training.batch_size 2.0 +467 40 training.label_smoothing 0.032453482155995986 +467 41 model.embedding_dim 1.0 +467 41 training.batch_size 1.0 +467 41 training.label_smoothing 0.010132181384507591 +467 42 model.embedding_dim 2.0 +467 42 training.batch_size 0.0 +467 42 training.label_smoothing 0.25716639492426246 +467 43 model.embedding_dim 2.0 +467 43 training.batch_size 2.0 +467 43 training.label_smoothing 0.4799133125198321 +467 44 model.embedding_dim 2.0 +467 44 training.batch_size 0.0 +467 44 training.label_smoothing 0.002478261586374432 +467 45 model.embedding_dim 1.0 +467 45 training.batch_size 2.0 +467 45 training.label_smoothing 0.0013308555484496192 +467 46 model.embedding_dim 1.0 +467 46 training.batch_size 0.0 +467 46 training.label_smoothing 0.20039103389424337 +467 47 model.embedding_dim 1.0 +467 47 training.batch_size 0.0 +467 47 training.label_smoothing 0.004906005725397136 +467 48 model.embedding_dim 2.0 +467 48 training.batch_size 0.0 +467 48 training.label_smoothing 0.6897934847031076 +467 49 model.embedding_dim 0.0 +467 49 training.batch_size 1.0 +467 49 training.label_smoothing 0.059187476895029574 +467 50 model.embedding_dim 0.0 +467 50 training.batch_size 1.0 +467 50 training.label_smoothing 0.1627437948686935 +467 51 model.embedding_dim 0.0 +467 51 training.batch_size 0.0 +467 51 training.label_smoothing 0.03280727772496467 +467 52 model.embedding_dim 0.0 +467 52 training.batch_size 2.0 +467 52 training.label_smoothing 0.26137478367772704 +467 53 model.embedding_dim 1.0 +467 53 training.batch_size 0.0 +467 53 training.label_smoothing 0.3168868990067672 +467 54 model.embedding_dim 2.0 +467 54 training.batch_size 2.0 +467 54 training.label_smoothing 0.1909397264355524 +467 55 model.embedding_dim 0.0 +467 55 training.batch_size 0.0 +467 55 training.label_smoothing 0.429736349622204 +467 56 model.embedding_dim 1.0 +467 56 training.batch_size 2.0 +467 56 training.label_smoothing 0.003984854522060253 +467 57 model.embedding_dim 0.0 +467 57 training.batch_size 2.0 +467 57 training.label_smoothing 0.5118123953611521 +467 58 model.embedding_dim 1.0 +467 58 training.batch_size 2.0 +467 58 training.label_smoothing 0.017367895929714685 +467 59 model.embedding_dim 1.0 +467 59 training.batch_size 0.0 +467 59 training.label_smoothing 0.004903565499812407 +467 60 model.embedding_dim 1.0 +467 60 training.batch_size 2.0 +467 60 training.label_smoothing 0.00383672911680763 +467 61 model.embedding_dim 0.0 +467 61 training.batch_size 2.0 +467 61 training.label_smoothing 0.025965799553106638 +467 62 model.embedding_dim 1.0 +467 62 training.batch_size 0.0 +467 62 training.label_smoothing 0.7527150898180944 +467 63 model.embedding_dim 0.0 +467 63 training.batch_size 2.0 +467 63 training.label_smoothing 0.007278672048172565 +467 64 model.embedding_dim 2.0 +467 64 training.batch_size 1.0 +467 64 training.label_smoothing 0.008059129586947088 +467 65 model.embedding_dim 0.0 +467 65 training.batch_size 2.0 +467 65 training.label_smoothing 0.6633243629412532 +467 66 model.embedding_dim 1.0 +467 66 training.batch_size 1.0 +467 66 training.label_smoothing 0.005621854046155456 +467 67 model.embedding_dim 0.0 +467 67 training.batch_size 2.0 +467 67 training.label_smoothing 0.0773514262190602 +467 68 model.embedding_dim 1.0 +467 68 training.batch_size 1.0 +467 68 training.label_smoothing 0.9667406901274381 +467 69 model.embedding_dim 2.0 +467 69 training.batch_size 1.0 +467 69 training.label_smoothing 0.9526052391558338 +467 70 model.embedding_dim 1.0 +467 70 training.batch_size 0.0 +467 70 training.label_smoothing 0.11393573610047422 +467 71 model.embedding_dim 1.0 +467 71 training.batch_size 1.0 +467 71 training.label_smoothing 0.020953726431859703 +467 72 model.embedding_dim 1.0 +467 72 training.batch_size 0.0 +467 72 training.label_smoothing 0.020176760160895334 +467 73 model.embedding_dim 2.0 +467 73 training.batch_size 1.0 +467 73 training.label_smoothing 0.005174180905760249 +467 74 model.embedding_dim 1.0 +467 74 training.batch_size 2.0 +467 74 training.label_smoothing 0.4358915714583336 +467 75 model.embedding_dim 2.0 +467 75 training.batch_size 0.0 +467 75 training.label_smoothing 0.04730220913549416 +467 76 model.embedding_dim 2.0 +467 76 training.batch_size 2.0 +467 76 training.label_smoothing 0.10356646051660916 +467 77 model.embedding_dim 1.0 +467 77 training.batch_size 0.0 +467 77 training.label_smoothing 0.30934299105524626 +467 78 model.embedding_dim 1.0 +467 78 training.batch_size 0.0 +467 78 training.label_smoothing 0.003803194959270822 +467 79 model.embedding_dim 2.0 +467 79 training.batch_size 1.0 +467 79 training.label_smoothing 0.005647644602762952 +467 80 model.embedding_dim 0.0 +467 80 training.batch_size 2.0 +467 80 training.label_smoothing 0.31910297753751243 +467 81 model.embedding_dim 2.0 +467 81 training.batch_size 2.0 +467 81 training.label_smoothing 0.0232844307729168 +467 82 model.embedding_dim 1.0 +467 82 training.batch_size 0.0 +467 82 training.label_smoothing 0.01517473444990087 +467 83 model.embedding_dim 1.0 +467 83 training.batch_size 1.0 +467 83 training.label_smoothing 0.0029034450540546194 +467 84 model.embedding_dim 1.0 +467 84 training.batch_size 1.0 +467 84 training.label_smoothing 0.008901918595189769 +467 85 model.embedding_dim 0.0 +467 85 training.batch_size 1.0 +467 85 training.label_smoothing 0.0010423901182960472 +467 86 model.embedding_dim 1.0 +467 86 training.batch_size 2.0 +467 86 training.label_smoothing 0.002762179494985788 +467 87 model.embedding_dim 1.0 +467 87 training.batch_size 0.0 +467 87 training.label_smoothing 0.0011632757027491584 +467 88 model.embedding_dim 0.0 +467 88 training.batch_size 1.0 +467 88 training.label_smoothing 0.003560691133616408 +467 89 model.embedding_dim 2.0 +467 89 training.batch_size 2.0 +467 89 training.label_smoothing 0.14241813374417048 +467 90 model.embedding_dim 1.0 +467 90 training.batch_size 1.0 +467 90 training.label_smoothing 0.005162613648532179 +467 91 model.embedding_dim 0.0 +467 91 training.batch_size 0.0 +467 91 training.label_smoothing 0.3702845186971646 +467 92 model.embedding_dim 1.0 +467 92 training.batch_size 0.0 +467 92 training.label_smoothing 0.46124913244893756 +467 93 model.embedding_dim 2.0 +467 93 training.batch_size 1.0 +467 93 training.label_smoothing 0.001061084969273147 +467 94 model.embedding_dim 1.0 +467 94 training.batch_size 0.0 +467 94 training.label_smoothing 0.7161383364659079 +467 95 model.embedding_dim 1.0 +467 95 training.batch_size 1.0 +467 95 training.label_smoothing 0.005178831676275767 +467 96 model.embedding_dim 2.0 +467 96 training.batch_size 1.0 +467 96 training.label_smoothing 0.07642462007309049 +467 97 model.embedding_dim 0.0 +467 97 training.batch_size 0.0 +467 97 training.label_smoothing 0.6476268467285431 +467 98 model.embedding_dim 2.0 +467 98 training.batch_size 2.0 +467 98 training.label_smoothing 0.0010119376756866825 +467 99 model.embedding_dim 0.0 +467 99 training.batch_size 0.0 +467 99 training.label_smoothing 0.07689947091895753 +467 100 model.embedding_dim 0.0 +467 100 training.batch_size 1.0 +467 100 training.label_smoothing 0.0012331944162394786 +467 1 dataset """kinships""" +467 1 model """proje""" +467 1 loss """bceaftersigmoid""" +467 1 regularizer """no""" +467 1 optimizer """adadelta""" +467 1 training_loop """lcwa""" +467 1 evaluator """rankbased""" +467 2 dataset """kinships""" +467 2 model """proje""" +467 2 loss """bceaftersigmoid""" +467 2 regularizer """no""" +467 2 optimizer """adadelta""" +467 2 training_loop """lcwa""" +467 2 evaluator """rankbased""" +467 3 dataset """kinships""" +467 3 model """proje""" +467 3 loss """bceaftersigmoid""" +467 3 regularizer """no""" +467 3 optimizer """adadelta""" +467 3 training_loop """lcwa""" +467 3 evaluator """rankbased""" +467 4 dataset """kinships""" +467 4 model """proje""" +467 4 loss """bceaftersigmoid""" +467 4 regularizer """no""" +467 4 optimizer """adadelta""" +467 4 training_loop """lcwa""" +467 4 evaluator """rankbased""" +467 5 dataset """kinships""" +467 5 model """proje""" +467 5 loss """bceaftersigmoid""" +467 5 regularizer """no""" +467 5 optimizer """adadelta""" +467 5 training_loop """lcwa""" +467 5 evaluator """rankbased""" +467 6 dataset """kinships""" +467 6 model """proje""" +467 6 loss """bceaftersigmoid""" +467 6 regularizer """no""" +467 6 optimizer """adadelta""" +467 6 training_loop """lcwa""" +467 6 evaluator """rankbased""" +467 7 dataset """kinships""" +467 7 model """proje""" +467 7 loss """bceaftersigmoid""" +467 7 regularizer """no""" +467 7 optimizer """adadelta""" +467 7 training_loop """lcwa""" +467 7 evaluator """rankbased""" +467 8 dataset """kinships""" +467 8 model """proje""" +467 8 loss """bceaftersigmoid""" +467 8 regularizer """no""" +467 8 optimizer """adadelta""" +467 8 training_loop """lcwa""" +467 8 evaluator """rankbased""" +467 9 dataset """kinships""" +467 9 model """proje""" +467 9 loss """bceaftersigmoid""" +467 9 regularizer """no""" +467 9 optimizer """adadelta""" +467 9 training_loop """lcwa""" +467 9 evaluator """rankbased""" +467 10 dataset """kinships""" +467 10 model """proje""" +467 10 loss """bceaftersigmoid""" +467 10 regularizer """no""" +467 10 optimizer """adadelta""" +467 10 training_loop """lcwa""" +467 10 evaluator """rankbased""" +467 11 dataset """kinships""" +467 11 model """proje""" +467 11 loss """bceaftersigmoid""" +467 11 regularizer """no""" +467 11 optimizer """adadelta""" +467 11 training_loop """lcwa""" +467 11 evaluator """rankbased""" +467 12 dataset """kinships""" +467 12 model """proje""" +467 12 loss """bceaftersigmoid""" +467 12 regularizer """no""" +467 12 optimizer """adadelta""" +467 12 training_loop """lcwa""" +467 12 evaluator """rankbased""" +467 13 dataset """kinships""" +467 13 model """proje""" +467 13 loss """bceaftersigmoid""" +467 13 regularizer """no""" +467 13 optimizer """adadelta""" +467 13 training_loop """lcwa""" +467 13 evaluator """rankbased""" +467 14 dataset """kinships""" +467 14 model """proje""" +467 14 loss """bceaftersigmoid""" +467 14 regularizer """no""" +467 14 optimizer """adadelta""" +467 14 training_loop """lcwa""" +467 14 evaluator """rankbased""" +467 15 dataset """kinships""" +467 15 model """proje""" +467 15 loss """bceaftersigmoid""" +467 15 regularizer """no""" +467 15 optimizer """adadelta""" +467 15 training_loop """lcwa""" +467 15 evaluator """rankbased""" +467 16 dataset """kinships""" +467 16 model """proje""" +467 16 loss """bceaftersigmoid""" +467 16 regularizer """no""" +467 16 optimizer """adadelta""" +467 16 training_loop """lcwa""" +467 16 evaluator """rankbased""" +467 17 dataset """kinships""" +467 17 model """proje""" +467 17 loss """bceaftersigmoid""" +467 17 regularizer """no""" +467 17 optimizer """adadelta""" +467 17 training_loop """lcwa""" +467 17 evaluator """rankbased""" +467 18 dataset """kinships""" +467 18 model """proje""" +467 18 loss """bceaftersigmoid""" +467 18 regularizer """no""" +467 18 optimizer """adadelta""" +467 18 training_loop """lcwa""" +467 18 evaluator """rankbased""" +467 19 dataset """kinships""" +467 19 model """proje""" +467 19 loss """bceaftersigmoid""" +467 19 regularizer """no""" +467 19 optimizer """adadelta""" +467 19 training_loop """lcwa""" +467 19 evaluator """rankbased""" +467 20 dataset """kinships""" +467 20 model """proje""" +467 20 loss """bceaftersigmoid""" +467 20 regularizer """no""" +467 20 optimizer """adadelta""" +467 20 training_loop """lcwa""" +467 20 evaluator """rankbased""" +467 21 dataset """kinships""" +467 21 model """proje""" +467 21 loss """bceaftersigmoid""" +467 21 regularizer """no""" +467 21 optimizer """adadelta""" +467 21 training_loop """lcwa""" +467 21 evaluator """rankbased""" +467 22 dataset """kinships""" +467 22 model """proje""" +467 22 loss """bceaftersigmoid""" +467 22 regularizer """no""" +467 22 optimizer """adadelta""" +467 22 training_loop """lcwa""" +467 22 evaluator """rankbased""" +467 23 dataset """kinships""" +467 23 model """proje""" +467 23 loss """bceaftersigmoid""" +467 23 regularizer """no""" +467 23 optimizer """adadelta""" +467 23 training_loop """lcwa""" +467 23 evaluator """rankbased""" +467 24 dataset """kinships""" +467 24 model """proje""" +467 24 loss """bceaftersigmoid""" +467 24 regularizer """no""" +467 24 optimizer """adadelta""" +467 24 training_loop """lcwa""" +467 24 evaluator """rankbased""" +467 25 dataset """kinships""" +467 25 model """proje""" +467 25 loss """bceaftersigmoid""" +467 25 regularizer """no""" +467 25 optimizer """adadelta""" +467 25 training_loop """lcwa""" +467 25 evaluator """rankbased""" +467 26 dataset """kinships""" +467 26 model """proje""" +467 26 loss """bceaftersigmoid""" +467 26 regularizer """no""" +467 26 optimizer """adadelta""" +467 26 training_loop """lcwa""" +467 26 evaluator """rankbased""" +467 27 dataset """kinships""" +467 27 model """proje""" +467 27 loss """bceaftersigmoid""" +467 27 regularizer """no""" +467 27 optimizer """adadelta""" +467 27 training_loop """lcwa""" +467 27 evaluator """rankbased""" +467 28 dataset """kinships""" +467 28 model """proje""" +467 28 loss """bceaftersigmoid""" +467 28 regularizer """no""" +467 28 optimizer """adadelta""" +467 28 training_loop """lcwa""" +467 28 evaluator """rankbased""" +467 29 dataset """kinships""" +467 29 model """proje""" +467 29 loss """bceaftersigmoid""" +467 29 regularizer """no""" +467 29 optimizer """adadelta""" +467 29 training_loop """lcwa""" +467 29 evaluator """rankbased""" +467 30 dataset """kinships""" +467 30 model """proje""" +467 30 loss """bceaftersigmoid""" +467 30 regularizer """no""" +467 30 optimizer """adadelta""" +467 30 training_loop """lcwa""" +467 30 evaluator """rankbased""" +467 31 dataset """kinships""" +467 31 model """proje""" +467 31 loss """bceaftersigmoid""" +467 31 regularizer """no""" +467 31 optimizer """adadelta""" +467 31 training_loop """lcwa""" +467 31 evaluator """rankbased""" +467 32 dataset """kinships""" +467 32 model """proje""" +467 32 loss """bceaftersigmoid""" +467 32 regularizer """no""" +467 32 optimizer """adadelta""" +467 32 training_loop """lcwa""" +467 32 evaluator """rankbased""" +467 33 dataset """kinships""" +467 33 model """proje""" +467 33 loss """bceaftersigmoid""" +467 33 regularizer """no""" +467 33 optimizer """adadelta""" +467 33 training_loop """lcwa""" +467 33 evaluator """rankbased""" +467 34 dataset """kinships""" +467 34 model """proje""" +467 34 loss """bceaftersigmoid""" +467 34 regularizer """no""" +467 34 optimizer """adadelta""" +467 34 training_loop """lcwa""" +467 34 evaluator """rankbased""" +467 35 dataset """kinships""" +467 35 model """proje""" +467 35 loss """bceaftersigmoid""" +467 35 regularizer """no""" +467 35 optimizer """adadelta""" +467 35 training_loop """lcwa""" +467 35 evaluator """rankbased""" +467 36 dataset """kinships""" +467 36 model """proje""" +467 36 loss """bceaftersigmoid""" +467 36 regularizer """no""" +467 36 optimizer """adadelta""" +467 36 training_loop """lcwa""" +467 36 evaluator """rankbased""" +467 37 dataset """kinships""" +467 37 model """proje""" +467 37 loss """bceaftersigmoid""" +467 37 regularizer """no""" +467 37 optimizer """adadelta""" +467 37 training_loop """lcwa""" +467 37 evaluator """rankbased""" +467 38 dataset """kinships""" +467 38 model """proje""" +467 38 loss """bceaftersigmoid""" +467 38 regularizer """no""" +467 38 optimizer """adadelta""" +467 38 training_loop """lcwa""" +467 38 evaluator """rankbased""" +467 39 dataset """kinships""" +467 39 model """proje""" +467 39 loss """bceaftersigmoid""" +467 39 regularizer """no""" +467 39 optimizer """adadelta""" +467 39 training_loop """lcwa""" +467 39 evaluator """rankbased""" +467 40 dataset """kinships""" +467 40 model """proje""" +467 40 loss """bceaftersigmoid""" +467 40 regularizer """no""" +467 40 optimizer """adadelta""" +467 40 training_loop """lcwa""" +467 40 evaluator """rankbased""" +467 41 dataset """kinships""" +467 41 model """proje""" +467 41 loss """bceaftersigmoid""" +467 41 regularizer """no""" +467 41 optimizer """adadelta""" +467 41 training_loop """lcwa""" +467 41 evaluator """rankbased""" +467 42 dataset """kinships""" +467 42 model """proje""" +467 42 loss """bceaftersigmoid""" +467 42 regularizer """no""" +467 42 optimizer """adadelta""" +467 42 training_loop """lcwa""" +467 42 evaluator """rankbased""" +467 43 dataset """kinships""" +467 43 model """proje""" +467 43 loss """bceaftersigmoid""" +467 43 regularizer """no""" +467 43 optimizer """adadelta""" +467 43 training_loop """lcwa""" +467 43 evaluator """rankbased""" +467 44 dataset """kinships""" +467 44 model """proje""" +467 44 loss """bceaftersigmoid""" +467 44 regularizer """no""" +467 44 optimizer """adadelta""" +467 44 training_loop """lcwa""" +467 44 evaluator """rankbased""" +467 45 dataset """kinships""" +467 45 model """proje""" +467 45 loss """bceaftersigmoid""" +467 45 regularizer """no""" +467 45 optimizer """adadelta""" +467 45 training_loop """lcwa""" +467 45 evaluator """rankbased""" +467 46 dataset """kinships""" +467 46 model """proje""" +467 46 loss """bceaftersigmoid""" +467 46 regularizer """no""" +467 46 optimizer """adadelta""" +467 46 training_loop """lcwa""" +467 46 evaluator """rankbased""" +467 47 dataset """kinships""" +467 47 model """proje""" +467 47 loss """bceaftersigmoid""" +467 47 regularizer """no""" +467 47 optimizer """adadelta""" +467 47 training_loop """lcwa""" +467 47 evaluator """rankbased""" +467 48 dataset """kinships""" +467 48 model """proje""" +467 48 loss """bceaftersigmoid""" +467 48 regularizer """no""" +467 48 optimizer """adadelta""" +467 48 training_loop """lcwa""" +467 48 evaluator """rankbased""" +467 49 dataset """kinships""" +467 49 model """proje""" +467 49 loss """bceaftersigmoid""" +467 49 regularizer """no""" +467 49 optimizer """adadelta""" +467 49 training_loop """lcwa""" +467 49 evaluator """rankbased""" +467 50 dataset """kinships""" +467 50 model """proje""" +467 50 loss """bceaftersigmoid""" +467 50 regularizer """no""" +467 50 optimizer """adadelta""" +467 50 training_loop """lcwa""" +467 50 evaluator """rankbased""" +467 51 dataset """kinships""" +467 51 model """proje""" +467 51 loss """bceaftersigmoid""" +467 51 regularizer """no""" +467 51 optimizer """adadelta""" +467 51 training_loop """lcwa""" +467 51 evaluator """rankbased""" +467 52 dataset """kinships""" +467 52 model """proje""" +467 52 loss """bceaftersigmoid""" +467 52 regularizer """no""" +467 52 optimizer """adadelta""" +467 52 training_loop """lcwa""" +467 52 evaluator """rankbased""" +467 53 dataset """kinships""" +467 53 model """proje""" +467 53 loss """bceaftersigmoid""" +467 53 regularizer """no""" +467 53 optimizer """adadelta""" +467 53 training_loop """lcwa""" +467 53 evaluator """rankbased""" +467 54 dataset """kinships""" +467 54 model """proje""" +467 54 loss """bceaftersigmoid""" +467 54 regularizer """no""" +467 54 optimizer """adadelta""" +467 54 training_loop """lcwa""" +467 54 evaluator """rankbased""" +467 55 dataset """kinships""" +467 55 model """proje""" +467 55 loss """bceaftersigmoid""" +467 55 regularizer """no""" +467 55 optimizer """adadelta""" +467 55 training_loop """lcwa""" +467 55 evaluator """rankbased""" +467 56 dataset """kinships""" +467 56 model """proje""" +467 56 loss """bceaftersigmoid""" +467 56 regularizer """no""" +467 56 optimizer """adadelta""" +467 56 training_loop """lcwa""" +467 56 evaluator """rankbased""" +467 57 dataset """kinships""" +467 57 model """proje""" +467 57 loss """bceaftersigmoid""" +467 57 regularizer """no""" +467 57 optimizer """adadelta""" +467 57 training_loop """lcwa""" +467 57 evaluator """rankbased""" +467 58 dataset """kinships""" +467 58 model """proje""" +467 58 loss """bceaftersigmoid""" +467 58 regularizer """no""" +467 58 optimizer """adadelta""" +467 58 training_loop """lcwa""" +467 58 evaluator """rankbased""" +467 59 dataset """kinships""" +467 59 model """proje""" +467 59 loss """bceaftersigmoid""" +467 59 regularizer """no""" +467 59 optimizer """adadelta""" +467 59 training_loop """lcwa""" +467 59 evaluator """rankbased""" +467 60 dataset """kinships""" +467 60 model """proje""" +467 60 loss """bceaftersigmoid""" +467 60 regularizer """no""" +467 60 optimizer """adadelta""" +467 60 training_loop """lcwa""" +467 60 evaluator """rankbased""" +467 61 dataset """kinships""" +467 61 model """proje""" +467 61 loss """bceaftersigmoid""" +467 61 regularizer """no""" +467 61 optimizer """adadelta""" +467 61 training_loop """lcwa""" +467 61 evaluator """rankbased""" +467 62 dataset """kinships""" +467 62 model """proje""" +467 62 loss """bceaftersigmoid""" +467 62 regularizer """no""" +467 62 optimizer """adadelta""" +467 62 training_loop """lcwa""" +467 62 evaluator """rankbased""" +467 63 dataset """kinships""" +467 63 model """proje""" +467 63 loss """bceaftersigmoid""" +467 63 regularizer """no""" +467 63 optimizer """adadelta""" +467 63 training_loop """lcwa""" +467 63 evaluator """rankbased""" +467 64 dataset """kinships""" +467 64 model """proje""" +467 64 loss """bceaftersigmoid""" +467 64 regularizer """no""" +467 64 optimizer """adadelta""" +467 64 training_loop """lcwa""" +467 64 evaluator """rankbased""" +467 65 dataset """kinships""" +467 65 model """proje""" +467 65 loss """bceaftersigmoid""" +467 65 regularizer """no""" +467 65 optimizer """adadelta""" +467 65 training_loop """lcwa""" +467 65 evaluator """rankbased""" +467 66 dataset """kinships""" +467 66 model """proje""" +467 66 loss """bceaftersigmoid""" +467 66 regularizer """no""" +467 66 optimizer """adadelta""" +467 66 training_loop """lcwa""" +467 66 evaluator """rankbased""" +467 67 dataset """kinships""" +467 67 model """proje""" +467 67 loss """bceaftersigmoid""" +467 67 regularizer """no""" +467 67 optimizer """adadelta""" +467 67 training_loop """lcwa""" +467 67 evaluator """rankbased""" +467 68 dataset """kinships""" +467 68 model """proje""" +467 68 loss """bceaftersigmoid""" +467 68 regularizer """no""" +467 68 optimizer """adadelta""" +467 68 training_loop """lcwa""" +467 68 evaluator """rankbased""" +467 69 dataset """kinships""" +467 69 model """proje""" +467 69 loss """bceaftersigmoid""" +467 69 regularizer """no""" +467 69 optimizer """adadelta""" +467 69 training_loop """lcwa""" +467 69 evaluator """rankbased""" +467 70 dataset """kinships""" +467 70 model """proje""" +467 70 loss """bceaftersigmoid""" +467 70 regularizer """no""" +467 70 optimizer """adadelta""" +467 70 training_loop """lcwa""" +467 70 evaluator """rankbased""" +467 71 dataset """kinships""" +467 71 model """proje""" +467 71 loss """bceaftersigmoid""" +467 71 regularizer """no""" +467 71 optimizer """adadelta""" +467 71 training_loop """lcwa""" +467 71 evaluator """rankbased""" +467 72 dataset """kinships""" +467 72 model """proje""" +467 72 loss """bceaftersigmoid""" +467 72 regularizer """no""" +467 72 optimizer """adadelta""" +467 72 training_loop """lcwa""" +467 72 evaluator """rankbased""" +467 73 dataset """kinships""" +467 73 model """proje""" +467 73 loss """bceaftersigmoid""" +467 73 regularizer """no""" +467 73 optimizer """adadelta""" +467 73 training_loop """lcwa""" +467 73 evaluator """rankbased""" +467 74 dataset """kinships""" +467 74 model """proje""" +467 74 loss """bceaftersigmoid""" +467 74 regularizer """no""" +467 74 optimizer """adadelta""" +467 74 training_loop """lcwa""" +467 74 evaluator """rankbased""" +467 75 dataset """kinships""" +467 75 model """proje""" +467 75 loss """bceaftersigmoid""" +467 75 regularizer """no""" +467 75 optimizer """adadelta""" +467 75 training_loop """lcwa""" +467 75 evaluator """rankbased""" +467 76 dataset """kinships""" +467 76 model """proje""" +467 76 loss """bceaftersigmoid""" +467 76 regularizer """no""" +467 76 optimizer """adadelta""" +467 76 training_loop """lcwa""" +467 76 evaluator """rankbased""" +467 77 dataset """kinships""" +467 77 model """proje""" +467 77 loss """bceaftersigmoid""" +467 77 regularizer """no""" +467 77 optimizer """adadelta""" +467 77 training_loop """lcwa""" +467 77 evaluator """rankbased""" +467 78 dataset """kinships""" +467 78 model """proje""" +467 78 loss """bceaftersigmoid""" +467 78 regularizer """no""" +467 78 optimizer """adadelta""" +467 78 training_loop """lcwa""" +467 78 evaluator """rankbased""" +467 79 dataset """kinships""" +467 79 model """proje""" +467 79 loss """bceaftersigmoid""" +467 79 regularizer """no""" +467 79 optimizer """adadelta""" +467 79 training_loop """lcwa""" +467 79 evaluator """rankbased""" +467 80 dataset """kinships""" +467 80 model """proje""" +467 80 loss """bceaftersigmoid""" +467 80 regularizer """no""" +467 80 optimizer """adadelta""" +467 80 training_loop """lcwa""" +467 80 evaluator """rankbased""" +467 81 dataset """kinships""" +467 81 model """proje""" +467 81 loss """bceaftersigmoid""" +467 81 regularizer """no""" +467 81 optimizer """adadelta""" +467 81 training_loop """lcwa""" +467 81 evaluator """rankbased""" +467 82 dataset """kinships""" +467 82 model """proje""" +467 82 loss """bceaftersigmoid""" +467 82 regularizer """no""" +467 82 optimizer """adadelta""" +467 82 training_loop """lcwa""" +467 82 evaluator """rankbased""" +467 83 dataset """kinships""" +467 83 model """proje""" +467 83 loss """bceaftersigmoid""" +467 83 regularizer """no""" +467 83 optimizer """adadelta""" +467 83 training_loop """lcwa""" +467 83 evaluator """rankbased""" +467 84 dataset """kinships""" +467 84 model """proje""" +467 84 loss """bceaftersigmoid""" +467 84 regularizer """no""" +467 84 optimizer """adadelta""" +467 84 training_loop """lcwa""" +467 84 evaluator """rankbased""" +467 85 dataset """kinships""" +467 85 model """proje""" +467 85 loss """bceaftersigmoid""" +467 85 regularizer """no""" +467 85 optimizer """adadelta""" +467 85 training_loop """lcwa""" +467 85 evaluator """rankbased""" +467 86 dataset """kinships""" +467 86 model """proje""" +467 86 loss """bceaftersigmoid""" +467 86 regularizer """no""" +467 86 optimizer """adadelta""" +467 86 training_loop """lcwa""" +467 86 evaluator """rankbased""" +467 87 dataset """kinships""" +467 87 model """proje""" +467 87 loss """bceaftersigmoid""" +467 87 regularizer """no""" +467 87 optimizer """adadelta""" +467 87 training_loop """lcwa""" +467 87 evaluator """rankbased""" +467 88 dataset """kinships""" +467 88 model """proje""" +467 88 loss """bceaftersigmoid""" +467 88 regularizer """no""" +467 88 optimizer """adadelta""" +467 88 training_loop """lcwa""" +467 88 evaluator """rankbased""" +467 89 dataset """kinships""" +467 89 model """proje""" +467 89 loss """bceaftersigmoid""" +467 89 regularizer """no""" +467 89 optimizer """adadelta""" +467 89 training_loop """lcwa""" +467 89 evaluator """rankbased""" +467 90 dataset """kinships""" +467 90 model """proje""" +467 90 loss """bceaftersigmoid""" +467 90 regularizer """no""" +467 90 optimizer """adadelta""" +467 90 training_loop """lcwa""" +467 90 evaluator """rankbased""" +467 91 dataset """kinships""" +467 91 model """proje""" +467 91 loss """bceaftersigmoid""" +467 91 regularizer """no""" +467 91 optimizer """adadelta""" +467 91 training_loop """lcwa""" +467 91 evaluator """rankbased""" +467 92 dataset """kinships""" +467 92 model """proje""" +467 92 loss """bceaftersigmoid""" +467 92 regularizer """no""" +467 92 optimizer """adadelta""" +467 92 training_loop """lcwa""" +467 92 evaluator """rankbased""" +467 93 dataset """kinships""" +467 93 model """proje""" +467 93 loss """bceaftersigmoid""" +467 93 regularizer """no""" +467 93 optimizer """adadelta""" +467 93 training_loop """lcwa""" +467 93 evaluator """rankbased""" +467 94 dataset """kinships""" +467 94 model """proje""" +467 94 loss """bceaftersigmoid""" +467 94 regularizer """no""" +467 94 optimizer """adadelta""" +467 94 training_loop """lcwa""" +467 94 evaluator """rankbased""" +467 95 dataset """kinships""" +467 95 model """proje""" +467 95 loss """bceaftersigmoid""" +467 95 regularizer """no""" +467 95 optimizer """adadelta""" +467 95 training_loop """lcwa""" +467 95 evaluator """rankbased""" +467 96 dataset """kinships""" +467 96 model """proje""" +467 96 loss """bceaftersigmoid""" +467 96 regularizer """no""" +467 96 optimizer """adadelta""" +467 96 training_loop """lcwa""" +467 96 evaluator """rankbased""" +467 97 dataset """kinships""" +467 97 model """proje""" +467 97 loss """bceaftersigmoid""" +467 97 regularizer """no""" +467 97 optimizer """adadelta""" +467 97 training_loop """lcwa""" +467 97 evaluator """rankbased""" +467 98 dataset """kinships""" +467 98 model """proje""" +467 98 loss """bceaftersigmoid""" +467 98 regularizer """no""" +467 98 optimizer """adadelta""" +467 98 training_loop """lcwa""" +467 98 evaluator """rankbased""" +467 99 dataset """kinships""" +467 99 model """proje""" +467 99 loss """bceaftersigmoid""" +467 99 regularizer """no""" +467 99 optimizer """adadelta""" +467 99 training_loop """lcwa""" +467 99 evaluator """rankbased""" +467 100 dataset """kinships""" +467 100 model """proje""" +467 100 loss """bceaftersigmoid""" +467 100 regularizer """no""" +467 100 optimizer """adadelta""" +467 100 training_loop """lcwa""" +467 100 evaluator """rankbased""" +468 1 model.embedding_dim 2.0 +468 1 training.batch_size 2.0 +468 1 training.label_smoothing 0.9107350093252367 +468 2 model.embedding_dim 2.0 +468 2 training.batch_size 2.0 +468 2 training.label_smoothing 0.0020966028774174372 +468 3 model.embedding_dim 1.0 +468 3 training.batch_size 0.0 +468 3 training.label_smoothing 0.15937439662633343 +468 4 model.embedding_dim 2.0 +468 4 training.batch_size 1.0 +468 4 training.label_smoothing 0.5449841830268143 +468 5 model.embedding_dim 0.0 +468 5 training.batch_size 2.0 +468 5 training.label_smoothing 0.0052829447806335824 +468 6 model.embedding_dim 1.0 +468 6 training.batch_size 2.0 +468 6 training.label_smoothing 0.03620518337286756 +468 7 model.embedding_dim 0.0 +468 7 training.batch_size 0.0 +468 7 training.label_smoothing 0.3691460429520951 +468 8 model.embedding_dim 0.0 +468 8 training.batch_size 2.0 +468 8 training.label_smoothing 0.15157618722804436 +468 9 model.embedding_dim 0.0 +468 9 training.batch_size 1.0 +468 9 training.label_smoothing 0.27354929846989806 +468 10 model.embedding_dim 1.0 +468 10 training.batch_size 1.0 +468 10 training.label_smoothing 0.017179270113330633 +468 11 model.embedding_dim 1.0 +468 11 training.batch_size 1.0 +468 11 training.label_smoothing 0.05149839888630689 +468 12 model.embedding_dim 1.0 +468 12 training.batch_size 2.0 +468 12 training.label_smoothing 0.47389636433109594 +468 13 model.embedding_dim 2.0 +468 13 training.batch_size 0.0 +468 13 training.label_smoothing 0.009824796914236184 +468 14 model.embedding_dim 1.0 +468 14 training.batch_size 0.0 +468 14 training.label_smoothing 0.01087372325060009 +468 15 model.embedding_dim 2.0 +468 15 training.batch_size 2.0 +468 15 training.label_smoothing 0.008319591701003425 +468 16 model.embedding_dim 2.0 +468 16 training.batch_size 1.0 +468 16 training.label_smoothing 0.01995411241330737 +468 17 model.embedding_dim 2.0 +468 17 training.batch_size 0.0 +468 17 training.label_smoothing 0.1482772505732914 +468 18 model.embedding_dim 1.0 +468 18 training.batch_size 0.0 +468 18 training.label_smoothing 0.0011755393384459483 +468 19 model.embedding_dim 1.0 +468 19 training.batch_size 0.0 +468 19 training.label_smoothing 0.20265238522475335 +468 20 model.embedding_dim 1.0 +468 20 training.batch_size 0.0 +468 20 training.label_smoothing 0.02487028871727451 +468 21 model.embedding_dim 1.0 +468 21 training.batch_size 1.0 +468 21 training.label_smoothing 0.09850844386249995 +468 22 model.embedding_dim 0.0 +468 22 training.batch_size 0.0 +468 22 training.label_smoothing 0.11552846391022818 +468 23 model.embedding_dim 1.0 +468 23 training.batch_size 2.0 +468 23 training.label_smoothing 0.9220484135983931 +468 24 model.embedding_dim 2.0 +468 24 training.batch_size 0.0 +468 24 training.label_smoothing 0.06540890283269443 +468 25 model.embedding_dim 1.0 +468 25 training.batch_size 1.0 +468 25 training.label_smoothing 0.7545432418426119 +468 26 model.embedding_dim 1.0 +468 26 training.batch_size 0.0 +468 26 training.label_smoothing 0.0010980077613879184 +468 27 model.embedding_dim 2.0 +468 27 training.batch_size 1.0 +468 27 training.label_smoothing 0.01360738145048424 +468 28 model.embedding_dim 0.0 +468 28 training.batch_size 1.0 +468 28 training.label_smoothing 0.08988336399721686 +468 29 model.embedding_dim 2.0 +468 29 training.batch_size 1.0 +468 29 training.label_smoothing 0.007010423311658871 +468 30 model.embedding_dim 2.0 +468 30 training.batch_size 2.0 +468 30 training.label_smoothing 0.0017161206214586992 +468 31 model.embedding_dim 1.0 +468 31 training.batch_size 0.0 +468 31 training.label_smoothing 0.030135225049842628 +468 32 model.embedding_dim 0.0 +468 32 training.batch_size 0.0 +468 32 training.label_smoothing 0.02658034402198967 +468 33 model.embedding_dim 0.0 +468 33 training.batch_size 1.0 +468 33 training.label_smoothing 0.7187147323190334 +468 34 model.embedding_dim 1.0 +468 34 training.batch_size 1.0 +468 34 training.label_smoothing 0.001085520947006609 +468 35 model.embedding_dim 2.0 +468 35 training.batch_size 0.0 +468 35 training.label_smoothing 0.4395002558434942 +468 36 model.embedding_dim 2.0 +468 36 training.batch_size 0.0 +468 36 training.label_smoothing 0.5115879203063491 +468 37 model.embedding_dim 1.0 +468 37 training.batch_size 0.0 +468 37 training.label_smoothing 0.0011430345868042033 +468 38 model.embedding_dim 0.0 +468 38 training.batch_size 0.0 +468 38 training.label_smoothing 0.1646413137288735 +468 39 model.embedding_dim 0.0 +468 39 training.batch_size 2.0 +468 39 training.label_smoothing 0.4049494974519009 +468 40 model.embedding_dim 2.0 +468 40 training.batch_size 0.0 +468 40 training.label_smoothing 0.8089132525125355 +468 41 model.embedding_dim 1.0 +468 41 training.batch_size 1.0 +468 41 training.label_smoothing 0.13750604605833386 +468 42 model.embedding_dim 0.0 +468 42 training.batch_size 2.0 +468 42 training.label_smoothing 0.38786210957736567 +468 43 model.embedding_dim 0.0 +468 43 training.batch_size 1.0 +468 43 training.label_smoothing 0.06193829114497836 +468 44 model.embedding_dim 2.0 +468 44 training.batch_size 0.0 +468 44 training.label_smoothing 0.3567804872172677 +468 45 model.embedding_dim 0.0 +468 45 training.batch_size 0.0 +468 45 training.label_smoothing 0.0014995564823986514 +468 46 model.embedding_dim 0.0 +468 46 training.batch_size 2.0 +468 46 training.label_smoothing 0.9926846717364294 +468 47 model.embedding_dim 2.0 +468 47 training.batch_size 1.0 +468 47 training.label_smoothing 0.007756185875628196 +468 48 model.embedding_dim 1.0 +468 48 training.batch_size 2.0 +468 48 training.label_smoothing 0.00149227850185962 +468 49 model.embedding_dim 0.0 +468 49 training.batch_size 1.0 +468 49 training.label_smoothing 0.08613280453491465 +468 50 model.embedding_dim 1.0 +468 50 training.batch_size 0.0 +468 50 training.label_smoothing 0.001026951066661808 +468 51 model.embedding_dim 2.0 +468 51 training.batch_size 0.0 +468 51 training.label_smoothing 0.399182348861923 +468 52 model.embedding_dim 2.0 +468 52 training.batch_size 1.0 +468 52 training.label_smoothing 0.004076126421100676 +468 53 model.embedding_dim 2.0 +468 53 training.batch_size 0.0 +468 53 training.label_smoothing 0.001622147217707965 +468 54 model.embedding_dim 2.0 +468 54 training.batch_size 1.0 +468 54 training.label_smoothing 0.011057590461852954 +468 55 model.embedding_dim 2.0 +468 55 training.batch_size 2.0 +468 55 training.label_smoothing 0.003897253873912776 +468 56 model.embedding_dim 1.0 +468 56 training.batch_size 2.0 +468 56 training.label_smoothing 0.0024706202448715477 +468 57 model.embedding_dim 1.0 +468 57 training.batch_size 0.0 +468 57 training.label_smoothing 0.011140961154674708 +468 58 model.embedding_dim 1.0 +468 58 training.batch_size 1.0 +468 58 training.label_smoothing 0.17612537851543536 +468 59 model.embedding_dim 2.0 +468 59 training.batch_size 1.0 +468 59 training.label_smoothing 0.0013540709003365499 +468 60 model.embedding_dim 0.0 +468 60 training.batch_size 2.0 +468 60 training.label_smoothing 0.06425855641210901 +468 61 model.embedding_dim 0.0 +468 61 training.batch_size 1.0 +468 61 training.label_smoothing 0.03798316587336407 +468 62 model.embedding_dim 2.0 +468 62 training.batch_size 1.0 +468 62 training.label_smoothing 0.01861064023970008 +468 63 model.embedding_dim 2.0 +468 63 training.batch_size 0.0 +468 63 training.label_smoothing 0.4763502647408261 +468 64 model.embedding_dim 1.0 +468 64 training.batch_size 1.0 +468 64 training.label_smoothing 0.019995073097378566 +468 65 model.embedding_dim 0.0 +468 65 training.batch_size 0.0 +468 65 training.label_smoothing 0.06996015286273317 +468 66 model.embedding_dim 1.0 +468 66 training.batch_size 2.0 +468 66 training.label_smoothing 0.01082354336047639 +468 67 model.embedding_dim 0.0 +468 67 training.batch_size 2.0 +468 67 training.label_smoothing 0.0016797081203158044 +468 68 model.embedding_dim 2.0 +468 68 training.batch_size 0.0 +468 68 training.label_smoothing 0.11435660748625502 +468 69 model.embedding_dim 1.0 +468 69 training.batch_size 1.0 +468 69 training.label_smoothing 0.004328322863599768 +468 70 model.embedding_dim 2.0 +468 70 training.batch_size 2.0 +468 70 training.label_smoothing 0.014102224151197672 +468 71 model.embedding_dim 1.0 +468 71 training.batch_size 2.0 +468 71 training.label_smoothing 0.43125623085128034 +468 72 model.embedding_dim 0.0 +468 72 training.batch_size 0.0 +468 72 training.label_smoothing 0.24882374766682933 +468 73 model.embedding_dim 0.0 +468 73 training.batch_size 1.0 +468 73 training.label_smoothing 0.1696445677194509 +468 74 model.embedding_dim 0.0 +468 74 training.batch_size 0.0 +468 74 training.label_smoothing 0.21005892478062066 +468 75 model.embedding_dim 2.0 +468 75 training.batch_size 1.0 +468 75 training.label_smoothing 0.005758765451904585 +468 76 model.embedding_dim 0.0 +468 76 training.batch_size 2.0 +468 76 training.label_smoothing 0.051694975824868664 +468 77 model.embedding_dim 2.0 +468 77 training.batch_size 0.0 +468 77 training.label_smoothing 0.09370182255382452 +468 78 model.embedding_dim 0.0 +468 78 training.batch_size 1.0 +468 78 training.label_smoothing 0.011565834391966567 +468 79 model.embedding_dim 1.0 +468 79 training.batch_size 0.0 +468 79 training.label_smoothing 0.006757884247147742 +468 80 model.embedding_dim 1.0 +468 80 training.batch_size 1.0 +468 80 training.label_smoothing 0.09557731532484313 +468 81 model.embedding_dim 2.0 +468 81 training.batch_size 1.0 +468 81 training.label_smoothing 0.0039902450357060205 +468 82 model.embedding_dim 0.0 +468 82 training.batch_size 1.0 +468 82 training.label_smoothing 0.730148476967792 +468 83 model.embedding_dim 0.0 +468 83 training.batch_size 2.0 +468 83 training.label_smoothing 0.8800410005897646 +468 84 model.embedding_dim 1.0 +468 84 training.batch_size 2.0 +468 84 training.label_smoothing 0.003956741550972333 +468 85 model.embedding_dim 2.0 +468 85 training.batch_size 1.0 +468 85 training.label_smoothing 0.3149396285291057 +468 86 model.embedding_dim 2.0 +468 86 training.batch_size 0.0 +468 86 training.label_smoothing 0.20294735084532506 +468 87 model.embedding_dim 2.0 +468 87 training.batch_size 0.0 +468 87 training.label_smoothing 0.07176321266341361 +468 88 model.embedding_dim 0.0 +468 88 training.batch_size 0.0 +468 88 training.label_smoothing 0.0022270718257047183 +468 89 model.embedding_dim 0.0 +468 89 training.batch_size 0.0 +468 89 training.label_smoothing 0.0019652619647972645 +468 90 model.embedding_dim 1.0 +468 90 training.batch_size 0.0 +468 90 training.label_smoothing 0.010786645130051125 +468 91 model.embedding_dim 0.0 +468 91 training.batch_size 1.0 +468 91 training.label_smoothing 0.014256820886067239 +468 92 model.embedding_dim 1.0 +468 92 training.batch_size 0.0 +468 92 training.label_smoothing 0.01107915792830297 +468 93 model.embedding_dim 0.0 +468 93 training.batch_size 1.0 +468 93 training.label_smoothing 0.064340587218027 +468 94 model.embedding_dim 1.0 +468 94 training.batch_size 2.0 +468 94 training.label_smoothing 0.10989629930896315 +468 95 model.embedding_dim 0.0 +468 95 training.batch_size 1.0 +468 95 training.label_smoothing 0.6649334503684604 +468 96 model.embedding_dim 0.0 +468 96 training.batch_size 2.0 +468 96 training.label_smoothing 0.20518838311223553 +468 97 model.embedding_dim 2.0 +468 97 training.batch_size 1.0 +468 97 training.label_smoothing 0.007267204752253453 +468 98 model.embedding_dim 1.0 +468 98 training.batch_size 0.0 +468 98 training.label_smoothing 0.00104685353133387 +468 99 model.embedding_dim 1.0 +468 99 training.batch_size 1.0 +468 99 training.label_smoothing 0.017341132089077206 +468 100 model.embedding_dim 0.0 +468 100 training.batch_size 1.0 +468 100 training.label_smoothing 0.06267822116625736 +468 1 dataset """kinships""" +468 1 model """proje""" +468 1 loss """softplus""" +468 1 regularizer """no""" +468 1 optimizer """adadelta""" +468 1 training_loop """lcwa""" +468 1 evaluator """rankbased""" +468 2 dataset """kinships""" +468 2 model """proje""" +468 2 loss """softplus""" +468 2 regularizer """no""" +468 2 optimizer """adadelta""" +468 2 training_loop """lcwa""" +468 2 evaluator """rankbased""" +468 3 dataset """kinships""" +468 3 model """proje""" +468 3 loss """softplus""" +468 3 regularizer """no""" +468 3 optimizer """adadelta""" +468 3 training_loop """lcwa""" +468 3 evaluator """rankbased""" +468 4 dataset """kinships""" +468 4 model """proje""" +468 4 loss """softplus""" +468 4 regularizer """no""" +468 4 optimizer """adadelta""" +468 4 training_loop """lcwa""" +468 4 evaluator """rankbased""" +468 5 dataset """kinships""" +468 5 model """proje""" +468 5 loss """softplus""" +468 5 regularizer """no""" +468 5 optimizer """adadelta""" +468 5 training_loop """lcwa""" +468 5 evaluator """rankbased""" +468 6 dataset """kinships""" +468 6 model """proje""" +468 6 loss """softplus""" +468 6 regularizer """no""" +468 6 optimizer """adadelta""" +468 6 training_loop """lcwa""" +468 6 evaluator """rankbased""" +468 7 dataset """kinships""" +468 7 model """proje""" +468 7 loss """softplus""" +468 7 regularizer """no""" +468 7 optimizer """adadelta""" +468 7 training_loop """lcwa""" +468 7 evaluator """rankbased""" +468 8 dataset """kinships""" +468 8 model """proje""" +468 8 loss """softplus""" +468 8 regularizer """no""" +468 8 optimizer """adadelta""" +468 8 training_loop """lcwa""" +468 8 evaluator """rankbased""" +468 9 dataset """kinships""" +468 9 model """proje""" +468 9 loss """softplus""" +468 9 regularizer """no""" +468 9 optimizer """adadelta""" +468 9 training_loop """lcwa""" +468 9 evaluator """rankbased""" +468 10 dataset """kinships""" +468 10 model """proje""" +468 10 loss """softplus""" +468 10 regularizer """no""" +468 10 optimizer """adadelta""" +468 10 training_loop """lcwa""" +468 10 evaluator """rankbased""" +468 11 dataset """kinships""" +468 11 model """proje""" +468 11 loss """softplus""" +468 11 regularizer """no""" +468 11 optimizer """adadelta""" +468 11 training_loop """lcwa""" +468 11 evaluator """rankbased""" +468 12 dataset """kinships""" +468 12 model """proje""" +468 12 loss """softplus""" +468 12 regularizer """no""" +468 12 optimizer """adadelta""" +468 12 training_loop """lcwa""" +468 12 evaluator """rankbased""" +468 13 dataset """kinships""" +468 13 model """proje""" +468 13 loss """softplus""" +468 13 regularizer """no""" +468 13 optimizer """adadelta""" +468 13 training_loop """lcwa""" +468 13 evaluator """rankbased""" +468 14 dataset """kinships""" +468 14 model """proje""" +468 14 loss """softplus""" +468 14 regularizer """no""" +468 14 optimizer """adadelta""" +468 14 training_loop """lcwa""" +468 14 evaluator """rankbased""" +468 15 dataset """kinships""" +468 15 model """proje""" +468 15 loss """softplus""" +468 15 regularizer """no""" +468 15 optimizer """adadelta""" +468 15 training_loop """lcwa""" +468 15 evaluator """rankbased""" +468 16 dataset """kinships""" +468 16 model """proje""" +468 16 loss """softplus""" +468 16 regularizer """no""" +468 16 optimizer """adadelta""" +468 16 training_loop """lcwa""" +468 16 evaluator """rankbased""" +468 17 dataset """kinships""" +468 17 model """proje""" +468 17 loss """softplus""" +468 17 regularizer """no""" +468 17 optimizer """adadelta""" +468 17 training_loop """lcwa""" +468 17 evaluator """rankbased""" +468 18 dataset """kinships""" +468 18 model """proje""" +468 18 loss """softplus""" +468 18 regularizer """no""" +468 18 optimizer """adadelta""" +468 18 training_loop """lcwa""" +468 18 evaluator """rankbased""" +468 19 dataset """kinships""" +468 19 model """proje""" +468 19 loss """softplus""" +468 19 regularizer """no""" +468 19 optimizer """adadelta""" +468 19 training_loop """lcwa""" +468 19 evaluator """rankbased""" +468 20 dataset """kinships""" +468 20 model """proje""" +468 20 loss """softplus""" +468 20 regularizer """no""" +468 20 optimizer """adadelta""" +468 20 training_loop """lcwa""" +468 20 evaluator """rankbased""" +468 21 dataset """kinships""" +468 21 model """proje""" +468 21 loss """softplus""" +468 21 regularizer """no""" +468 21 optimizer """adadelta""" +468 21 training_loop """lcwa""" +468 21 evaluator """rankbased""" +468 22 dataset """kinships""" +468 22 model """proje""" +468 22 loss """softplus""" +468 22 regularizer """no""" +468 22 optimizer """adadelta""" +468 22 training_loop """lcwa""" +468 22 evaluator """rankbased""" +468 23 dataset """kinships""" +468 23 model """proje""" +468 23 loss """softplus""" +468 23 regularizer """no""" +468 23 optimizer """adadelta""" +468 23 training_loop """lcwa""" +468 23 evaluator """rankbased""" +468 24 dataset """kinships""" +468 24 model """proje""" +468 24 loss """softplus""" +468 24 regularizer """no""" +468 24 optimizer """adadelta""" +468 24 training_loop """lcwa""" +468 24 evaluator """rankbased""" +468 25 dataset """kinships""" +468 25 model """proje""" +468 25 loss """softplus""" +468 25 regularizer """no""" +468 25 optimizer """adadelta""" +468 25 training_loop """lcwa""" +468 25 evaluator """rankbased""" +468 26 dataset """kinships""" +468 26 model """proje""" +468 26 loss """softplus""" +468 26 regularizer """no""" +468 26 optimizer """adadelta""" +468 26 training_loop """lcwa""" +468 26 evaluator """rankbased""" +468 27 dataset """kinships""" +468 27 model """proje""" +468 27 loss """softplus""" +468 27 regularizer """no""" +468 27 optimizer """adadelta""" +468 27 training_loop """lcwa""" +468 27 evaluator """rankbased""" +468 28 dataset """kinships""" +468 28 model """proje""" +468 28 loss """softplus""" +468 28 regularizer """no""" +468 28 optimizer """adadelta""" +468 28 training_loop """lcwa""" +468 28 evaluator """rankbased""" +468 29 dataset """kinships""" +468 29 model """proje""" +468 29 loss """softplus""" +468 29 regularizer """no""" +468 29 optimizer """adadelta""" +468 29 training_loop """lcwa""" +468 29 evaluator """rankbased""" +468 30 dataset """kinships""" +468 30 model """proje""" +468 30 loss """softplus""" +468 30 regularizer """no""" +468 30 optimizer """adadelta""" +468 30 training_loop """lcwa""" +468 30 evaluator """rankbased""" +468 31 dataset """kinships""" +468 31 model """proje""" +468 31 loss """softplus""" +468 31 regularizer """no""" +468 31 optimizer """adadelta""" +468 31 training_loop """lcwa""" +468 31 evaluator """rankbased""" +468 32 dataset """kinships""" +468 32 model """proje""" +468 32 loss """softplus""" +468 32 regularizer """no""" +468 32 optimizer """adadelta""" +468 32 training_loop """lcwa""" +468 32 evaluator """rankbased""" +468 33 dataset """kinships""" +468 33 model """proje""" +468 33 loss """softplus""" +468 33 regularizer """no""" +468 33 optimizer """adadelta""" +468 33 training_loop """lcwa""" +468 33 evaluator """rankbased""" +468 34 dataset """kinships""" +468 34 model """proje""" +468 34 loss """softplus""" +468 34 regularizer """no""" +468 34 optimizer """adadelta""" +468 34 training_loop """lcwa""" +468 34 evaluator """rankbased""" +468 35 dataset """kinships""" +468 35 model """proje""" +468 35 loss """softplus""" +468 35 regularizer """no""" +468 35 optimizer """adadelta""" +468 35 training_loop """lcwa""" +468 35 evaluator """rankbased""" +468 36 dataset """kinships""" +468 36 model """proje""" +468 36 loss """softplus""" +468 36 regularizer """no""" +468 36 optimizer """adadelta""" +468 36 training_loop """lcwa""" +468 36 evaluator """rankbased""" +468 37 dataset """kinships""" +468 37 model """proje""" +468 37 loss """softplus""" +468 37 regularizer """no""" +468 37 optimizer """adadelta""" +468 37 training_loop """lcwa""" +468 37 evaluator """rankbased""" +468 38 dataset """kinships""" +468 38 model """proje""" +468 38 loss """softplus""" +468 38 regularizer """no""" +468 38 optimizer """adadelta""" +468 38 training_loop """lcwa""" +468 38 evaluator """rankbased""" +468 39 dataset """kinships""" +468 39 model """proje""" +468 39 loss """softplus""" +468 39 regularizer """no""" +468 39 optimizer """adadelta""" +468 39 training_loop """lcwa""" +468 39 evaluator """rankbased""" +468 40 dataset """kinships""" +468 40 model """proje""" +468 40 loss """softplus""" +468 40 regularizer """no""" +468 40 optimizer """adadelta""" +468 40 training_loop """lcwa""" +468 40 evaluator """rankbased""" +468 41 dataset """kinships""" +468 41 model """proje""" +468 41 loss """softplus""" +468 41 regularizer """no""" +468 41 optimizer """adadelta""" +468 41 training_loop """lcwa""" +468 41 evaluator """rankbased""" +468 42 dataset """kinships""" +468 42 model """proje""" +468 42 loss """softplus""" +468 42 regularizer """no""" +468 42 optimizer """adadelta""" +468 42 training_loop """lcwa""" +468 42 evaluator """rankbased""" +468 43 dataset """kinships""" +468 43 model """proje""" +468 43 loss """softplus""" +468 43 regularizer """no""" +468 43 optimizer """adadelta""" +468 43 training_loop """lcwa""" +468 43 evaluator """rankbased""" +468 44 dataset """kinships""" +468 44 model """proje""" +468 44 loss """softplus""" +468 44 regularizer """no""" +468 44 optimizer """adadelta""" +468 44 training_loop """lcwa""" +468 44 evaluator """rankbased""" +468 45 dataset """kinships""" +468 45 model """proje""" +468 45 loss """softplus""" +468 45 regularizer """no""" +468 45 optimizer """adadelta""" +468 45 training_loop """lcwa""" +468 45 evaluator """rankbased""" +468 46 dataset """kinships""" +468 46 model """proje""" +468 46 loss """softplus""" +468 46 regularizer """no""" +468 46 optimizer """adadelta""" +468 46 training_loop """lcwa""" +468 46 evaluator """rankbased""" +468 47 dataset """kinships""" +468 47 model """proje""" +468 47 loss """softplus""" +468 47 regularizer """no""" +468 47 optimizer """adadelta""" +468 47 training_loop """lcwa""" +468 47 evaluator """rankbased""" +468 48 dataset """kinships""" +468 48 model """proje""" +468 48 loss """softplus""" +468 48 regularizer """no""" +468 48 optimizer """adadelta""" +468 48 training_loop """lcwa""" +468 48 evaluator """rankbased""" +468 49 dataset """kinships""" +468 49 model """proje""" +468 49 loss """softplus""" +468 49 regularizer """no""" +468 49 optimizer """adadelta""" +468 49 training_loop """lcwa""" +468 49 evaluator """rankbased""" +468 50 dataset """kinships""" +468 50 model """proje""" +468 50 loss """softplus""" +468 50 regularizer """no""" +468 50 optimizer """adadelta""" +468 50 training_loop """lcwa""" +468 50 evaluator """rankbased""" +468 51 dataset """kinships""" +468 51 model """proje""" +468 51 loss """softplus""" +468 51 regularizer """no""" +468 51 optimizer """adadelta""" +468 51 training_loop """lcwa""" +468 51 evaluator """rankbased""" +468 52 dataset """kinships""" +468 52 model """proje""" +468 52 loss """softplus""" +468 52 regularizer """no""" +468 52 optimizer """adadelta""" +468 52 training_loop """lcwa""" +468 52 evaluator """rankbased""" +468 53 dataset """kinships""" +468 53 model """proje""" +468 53 loss """softplus""" +468 53 regularizer """no""" +468 53 optimizer """adadelta""" +468 53 training_loop """lcwa""" +468 53 evaluator """rankbased""" +468 54 dataset """kinships""" +468 54 model """proje""" +468 54 loss """softplus""" +468 54 regularizer """no""" +468 54 optimizer """adadelta""" +468 54 training_loop """lcwa""" +468 54 evaluator """rankbased""" +468 55 dataset """kinships""" +468 55 model """proje""" +468 55 loss """softplus""" +468 55 regularizer """no""" +468 55 optimizer """adadelta""" +468 55 training_loop """lcwa""" +468 55 evaluator """rankbased""" +468 56 dataset """kinships""" +468 56 model """proje""" +468 56 loss """softplus""" +468 56 regularizer """no""" +468 56 optimizer """adadelta""" +468 56 training_loop """lcwa""" +468 56 evaluator """rankbased""" +468 57 dataset """kinships""" +468 57 model """proje""" +468 57 loss """softplus""" +468 57 regularizer """no""" +468 57 optimizer """adadelta""" +468 57 training_loop """lcwa""" +468 57 evaluator """rankbased""" +468 58 dataset """kinships""" +468 58 model """proje""" +468 58 loss """softplus""" +468 58 regularizer """no""" +468 58 optimizer """adadelta""" +468 58 training_loop """lcwa""" +468 58 evaluator """rankbased""" +468 59 dataset """kinships""" +468 59 model """proje""" +468 59 loss """softplus""" +468 59 regularizer """no""" +468 59 optimizer """adadelta""" +468 59 training_loop """lcwa""" +468 59 evaluator """rankbased""" +468 60 dataset """kinships""" +468 60 model """proje""" +468 60 loss """softplus""" +468 60 regularizer """no""" +468 60 optimizer """adadelta""" +468 60 training_loop """lcwa""" +468 60 evaluator """rankbased""" +468 61 dataset """kinships""" +468 61 model """proje""" +468 61 loss """softplus""" +468 61 regularizer """no""" +468 61 optimizer """adadelta""" +468 61 training_loop """lcwa""" +468 61 evaluator """rankbased""" +468 62 dataset """kinships""" +468 62 model """proje""" +468 62 loss """softplus""" +468 62 regularizer """no""" +468 62 optimizer """adadelta""" +468 62 training_loop """lcwa""" +468 62 evaluator """rankbased""" +468 63 dataset """kinships""" +468 63 model """proje""" +468 63 loss """softplus""" +468 63 regularizer """no""" +468 63 optimizer """adadelta""" +468 63 training_loop """lcwa""" +468 63 evaluator """rankbased""" +468 64 dataset """kinships""" +468 64 model """proje""" +468 64 loss """softplus""" +468 64 regularizer """no""" +468 64 optimizer """adadelta""" +468 64 training_loop """lcwa""" +468 64 evaluator """rankbased""" +468 65 dataset """kinships""" +468 65 model """proje""" +468 65 loss """softplus""" +468 65 regularizer """no""" +468 65 optimizer """adadelta""" +468 65 training_loop """lcwa""" +468 65 evaluator """rankbased""" +468 66 dataset """kinships""" +468 66 model """proje""" +468 66 loss """softplus""" +468 66 regularizer """no""" +468 66 optimizer """adadelta""" +468 66 training_loop """lcwa""" +468 66 evaluator """rankbased""" +468 67 dataset """kinships""" +468 67 model """proje""" +468 67 loss """softplus""" +468 67 regularizer """no""" +468 67 optimizer """adadelta""" +468 67 training_loop """lcwa""" +468 67 evaluator """rankbased""" +468 68 dataset """kinships""" +468 68 model """proje""" +468 68 loss """softplus""" +468 68 regularizer """no""" +468 68 optimizer """adadelta""" +468 68 training_loop """lcwa""" +468 68 evaluator """rankbased""" +468 69 dataset """kinships""" +468 69 model """proje""" +468 69 loss """softplus""" +468 69 regularizer """no""" +468 69 optimizer """adadelta""" +468 69 training_loop """lcwa""" +468 69 evaluator """rankbased""" +468 70 dataset """kinships""" +468 70 model """proje""" +468 70 loss """softplus""" +468 70 regularizer """no""" +468 70 optimizer """adadelta""" +468 70 training_loop """lcwa""" +468 70 evaluator """rankbased""" +468 71 dataset """kinships""" +468 71 model """proje""" +468 71 loss """softplus""" +468 71 regularizer """no""" +468 71 optimizer """adadelta""" +468 71 training_loop """lcwa""" +468 71 evaluator """rankbased""" +468 72 dataset """kinships""" +468 72 model """proje""" +468 72 loss """softplus""" +468 72 regularizer """no""" +468 72 optimizer """adadelta""" +468 72 training_loop """lcwa""" +468 72 evaluator """rankbased""" +468 73 dataset """kinships""" +468 73 model """proje""" +468 73 loss """softplus""" +468 73 regularizer """no""" +468 73 optimizer """adadelta""" +468 73 training_loop """lcwa""" +468 73 evaluator """rankbased""" +468 74 dataset """kinships""" +468 74 model """proje""" +468 74 loss """softplus""" +468 74 regularizer """no""" +468 74 optimizer """adadelta""" +468 74 training_loop """lcwa""" +468 74 evaluator """rankbased""" +468 75 dataset """kinships""" +468 75 model """proje""" +468 75 loss """softplus""" +468 75 regularizer """no""" +468 75 optimizer """adadelta""" +468 75 training_loop """lcwa""" +468 75 evaluator """rankbased""" +468 76 dataset """kinships""" +468 76 model """proje""" +468 76 loss """softplus""" +468 76 regularizer """no""" +468 76 optimizer """adadelta""" +468 76 training_loop """lcwa""" +468 76 evaluator """rankbased""" +468 77 dataset """kinships""" +468 77 model """proje""" +468 77 loss """softplus""" +468 77 regularizer """no""" +468 77 optimizer """adadelta""" +468 77 training_loop """lcwa""" +468 77 evaluator """rankbased""" +468 78 dataset """kinships""" +468 78 model """proje""" +468 78 loss """softplus""" +468 78 regularizer """no""" +468 78 optimizer """adadelta""" +468 78 training_loop """lcwa""" +468 78 evaluator """rankbased""" +468 79 dataset """kinships""" +468 79 model """proje""" +468 79 loss """softplus""" +468 79 regularizer """no""" +468 79 optimizer """adadelta""" +468 79 training_loop """lcwa""" +468 79 evaluator """rankbased""" +468 80 dataset """kinships""" +468 80 model """proje""" +468 80 loss """softplus""" +468 80 regularizer """no""" +468 80 optimizer """adadelta""" +468 80 training_loop """lcwa""" +468 80 evaluator """rankbased""" +468 81 dataset """kinships""" +468 81 model """proje""" +468 81 loss """softplus""" +468 81 regularizer """no""" +468 81 optimizer """adadelta""" +468 81 training_loop """lcwa""" +468 81 evaluator """rankbased""" +468 82 dataset """kinships""" +468 82 model """proje""" +468 82 loss """softplus""" +468 82 regularizer """no""" +468 82 optimizer """adadelta""" +468 82 training_loop """lcwa""" +468 82 evaluator """rankbased""" +468 83 dataset """kinships""" +468 83 model """proje""" +468 83 loss """softplus""" +468 83 regularizer """no""" +468 83 optimizer """adadelta""" +468 83 training_loop """lcwa""" +468 83 evaluator """rankbased""" +468 84 dataset """kinships""" +468 84 model """proje""" +468 84 loss """softplus""" +468 84 regularizer """no""" +468 84 optimizer """adadelta""" +468 84 training_loop """lcwa""" +468 84 evaluator """rankbased""" +468 85 dataset """kinships""" +468 85 model """proje""" +468 85 loss """softplus""" +468 85 regularizer """no""" +468 85 optimizer """adadelta""" +468 85 training_loop """lcwa""" +468 85 evaluator """rankbased""" +468 86 dataset """kinships""" +468 86 model """proje""" +468 86 loss """softplus""" +468 86 regularizer """no""" +468 86 optimizer """adadelta""" +468 86 training_loop """lcwa""" +468 86 evaluator """rankbased""" +468 87 dataset """kinships""" +468 87 model """proje""" +468 87 loss """softplus""" +468 87 regularizer """no""" +468 87 optimizer """adadelta""" +468 87 training_loop """lcwa""" +468 87 evaluator """rankbased""" +468 88 dataset """kinships""" +468 88 model """proje""" +468 88 loss """softplus""" +468 88 regularizer """no""" +468 88 optimizer """adadelta""" +468 88 training_loop """lcwa""" +468 88 evaluator """rankbased""" +468 89 dataset """kinships""" +468 89 model """proje""" +468 89 loss """softplus""" +468 89 regularizer """no""" +468 89 optimizer """adadelta""" +468 89 training_loop """lcwa""" +468 89 evaluator """rankbased""" +468 90 dataset """kinships""" +468 90 model """proje""" +468 90 loss """softplus""" +468 90 regularizer """no""" +468 90 optimizer """adadelta""" +468 90 training_loop """lcwa""" +468 90 evaluator """rankbased""" +468 91 dataset """kinships""" +468 91 model """proje""" +468 91 loss """softplus""" +468 91 regularizer """no""" +468 91 optimizer """adadelta""" +468 91 training_loop """lcwa""" +468 91 evaluator """rankbased""" +468 92 dataset """kinships""" +468 92 model """proje""" +468 92 loss """softplus""" +468 92 regularizer """no""" +468 92 optimizer """adadelta""" +468 92 training_loop """lcwa""" +468 92 evaluator """rankbased""" +468 93 dataset """kinships""" +468 93 model """proje""" +468 93 loss """softplus""" +468 93 regularizer """no""" +468 93 optimizer """adadelta""" +468 93 training_loop """lcwa""" +468 93 evaluator """rankbased""" +468 94 dataset """kinships""" +468 94 model """proje""" +468 94 loss """softplus""" +468 94 regularizer """no""" +468 94 optimizer """adadelta""" +468 94 training_loop """lcwa""" +468 94 evaluator """rankbased""" +468 95 dataset """kinships""" +468 95 model """proje""" +468 95 loss """softplus""" +468 95 regularizer """no""" +468 95 optimizer """adadelta""" +468 95 training_loop """lcwa""" +468 95 evaluator """rankbased""" +468 96 dataset """kinships""" +468 96 model """proje""" +468 96 loss """softplus""" +468 96 regularizer """no""" +468 96 optimizer """adadelta""" +468 96 training_loop """lcwa""" +468 96 evaluator """rankbased""" +468 97 dataset """kinships""" +468 97 model """proje""" +468 97 loss """softplus""" +468 97 regularizer """no""" +468 97 optimizer """adadelta""" +468 97 training_loop """lcwa""" +468 97 evaluator """rankbased""" +468 98 dataset """kinships""" +468 98 model """proje""" +468 98 loss """softplus""" +468 98 regularizer """no""" +468 98 optimizer """adadelta""" +468 98 training_loop """lcwa""" +468 98 evaluator """rankbased""" +468 99 dataset """kinships""" +468 99 model """proje""" +468 99 loss """softplus""" +468 99 regularizer """no""" +468 99 optimizer """adadelta""" +468 99 training_loop """lcwa""" +468 99 evaluator """rankbased""" +468 100 dataset """kinships""" +468 100 model """proje""" +468 100 loss """softplus""" +468 100 regularizer """no""" +468 100 optimizer """adadelta""" +468 100 training_loop """lcwa""" +468 100 evaluator """rankbased""" +469 1 model.embedding_dim 2.0 +469 1 training.batch_size 0.0 +469 1 training.label_smoothing 0.001074119699662577 +469 2 model.embedding_dim 2.0 +469 2 training.batch_size 0.0 +469 2 training.label_smoothing 0.02877784905367211 +469 3 model.embedding_dim 0.0 +469 3 training.batch_size 2.0 +469 3 training.label_smoothing 0.16605758136421409 +469 4 model.embedding_dim 0.0 +469 4 training.batch_size 1.0 +469 4 training.label_smoothing 0.00421080727332173 +469 5 model.embedding_dim 2.0 +469 5 training.batch_size 0.0 +469 5 training.label_smoothing 0.5462725355671815 +469 6 model.embedding_dim 0.0 +469 6 training.batch_size 0.0 +469 6 training.label_smoothing 0.07912442478216958 +469 7 model.embedding_dim 1.0 +469 7 training.batch_size 1.0 +469 7 training.label_smoothing 0.03917113186105031 +469 8 model.embedding_dim 1.0 +469 8 training.batch_size 2.0 +469 8 training.label_smoothing 0.2653019672663578 +469 9 model.embedding_dim 1.0 +469 9 training.batch_size 1.0 +469 9 training.label_smoothing 0.0026117497582862246 +469 10 model.embedding_dim 2.0 +469 10 training.batch_size 0.0 +469 10 training.label_smoothing 0.0018500331984657365 +469 11 model.embedding_dim 2.0 +469 11 training.batch_size 1.0 +469 11 training.label_smoothing 0.5144218100559128 +469 12 model.embedding_dim 1.0 +469 12 training.batch_size 1.0 +469 12 training.label_smoothing 0.0012961829683845296 +469 13 model.embedding_dim 0.0 +469 13 training.batch_size 0.0 +469 13 training.label_smoothing 0.04418885032104622 +469 14 model.embedding_dim 1.0 +469 14 training.batch_size 0.0 +469 14 training.label_smoothing 0.014890457082513861 +469 15 model.embedding_dim 0.0 +469 15 training.batch_size 1.0 +469 15 training.label_smoothing 0.0010252980221674701 +469 16 model.embedding_dim 2.0 +469 16 training.batch_size 0.0 +469 16 training.label_smoothing 0.0010933302758463163 +469 17 model.embedding_dim 0.0 +469 17 training.batch_size 1.0 +469 17 training.label_smoothing 0.009194383353579779 +469 18 model.embedding_dim 0.0 +469 18 training.batch_size 0.0 +469 18 training.label_smoothing 0.4920372283068614 +469 19 model.embedding_dim 2.0 +469 19 training.batch_size 1.0 +469 19 training.label_smoothing 0.6618519805545933 +469 20 model.embedding_dim 2.0 +469 20 training.batch_size 0.0 +469 20 training.label_smoothing 0.035320739879816855 +469 21 model.embedding_dim 0.0 +469 21 training.batch_size 0.0 +469 21 training.label_smoothing 0.05349606151890633 +469 22 model.embedding_dim 0.0 +469 22 training.batch_size 2.0 +469 22 training.label_smoothing 0.006241249441815602 +469 23 model.embedding_dim 1.0 +469 23 training.batch_size 1.0 +469 23 training.label_smoothing 0.19171197469551185 +469 24 model.embedding_dim 2.0 +469 24 training.batch_size 1.0 +469 24 training.label_smoothing 0.004151376869311501 +469 25 model.embedding_dim 2.0 +469 25 training.batch_size 1.0 +469 25 training.label_smoothing 0.006434116731731259 +469 26 model.embedding_dim 0.0 +469 26 training.batch_size 0.0 +469 26 training.label_smoothing 0.005595474042393388 +469 27 model.embedding_dim 2.0 +469 27 training.batch_size 1.0 +469 27 training.label_smoothing 0.009829273677502026 +469 28 model.embedding_dim 0.0 +469 28 training.batch_size 0.0 +469 28 training.label_smoothing 0.13759266179110552 +469 29 model.embedding_dim 1.0 +469 29 training.batch_size 1.0 +469 29 training.label_smoothing 0.16693576814829272 +469 30 model.embedding_dim 2.0 +469 30 training.batch_size 2.0 +469 30 training.label_smoothing 0.0017461908480483534 +469 31 model.embedding_dim 1.0 +469 31 training.batch_size 1.0 +469 31 training.label_smoothing 0.001106313461933567 +469 32 model.embedding_dim 0.0 +469 32 training.batch_size 1.0 +469 32 training.label_smoothing 0.34413699736474956 +469 33 model.embedding_dim 2.0 +469 33 training.batch_size 1.0 +469 33 training.label_smoothing 0.2884829399527836 +469 34 model.embedding_dim 0.0 +469 34 training.batch_size 1.0 +469 34 training.label_smoothing 0.01104381224302128 +469 35 model.embedding_dim 0.0 +469 35 training.batch_size 2.0 +469 35 training.label_smoothing 0.5888980220485025 +469 36 model.embedding_dim 1.0 +469 36 training.batch_size 1.0 +469 36 training.label_smoothing 0.01696089653487089 +469 37 model.embedding_dim 2.0 +469 37 training.batch_size 2.0 +469 37 training.label_smoothing 0.0032346105358879405 +469 38 model.embedding_dim 1.0 +469 38 training.batch_size 0.0 +469 38 training.label_smoothing 0.0013468686638123993 +469 39 model.embedding_dim 1.0 +469 39 training.batch_size 0.0 +469 39 training.label_smoothing 0.004156215687249897 +469 40 model.embedding_dim 1.0 +469 40 training.batch_size 0.0 +469 40 training.label_smoothing 0.31303386324768434 +469 41 model.embedding_dim 1.0 +469 41 training.batch_size 0.0 +469 41 training.label_smoothing 0.04822274604693756 +469 42 model.embedding_dim 2.0 +469 42 training.batch_size 0.0 +469 42 training.label_smoothing 0.35402908932940963 +469 43 model.embedding_dim 0.0 +469 43 training.batch_size 1.0 +469 43 training.label_smoothing 0.3804767592909154 +469 44 model.embedding_dim 2.0 +469 44 training.batch_size 2.0 +469 44 training.label_smoothing 0.024070644487335025 +469 45 model.embedding_dim 2.0 +469 45 training.batch_size 0.0 +469 45 training.label_smoothing 0.5512533677714653 +469 46 model.embedding_dim 0.0 +469 46 training.batch_size 0.0 +469 46 training.label_smoothing 0.0016376755470575025 +469 47 model.embedding_dim 2.0 +469 47 training.batch_size 1.0 +469 47 training.label_smoothing 0.011690735747684282 +469 48 model.embedding_dim 2.0 +469 48 training.batch_size 0.0 +469 48 training.label_smoothing 0.6125602397418923 +469 49 model.embedding_dim 1.0 +469 49 training.batch_size 2.0 +469 49 training.label_smoothing 0.0010351768383836685 +469 50 model.embedding_dim 2.0 +469 50 training.batch_size 0.0 +469 50 training.label_smoothing 0.00807486918576674 +469 51 model.embedding_dim 2.0 +469 51 training.batch_size 0.0 +469 51 training.label_smoothing 0.015441187097497132 +469 52 model.embedding_dim 0.0 +469 52 training.batch_size 1.0 +469 52 training.label_smoothing 0.0019498131428434093 +469 53 model.embedding_dim 2.0 +469 53 training.batch_size 1.0 +469 53 training.label_smoothing 0.0034868394743744324 +469 54 model.embedding_dim 1.0 +469 54 training.batch_size 1.0 +469 54 training.label_smoothing 0.001792949203645146 +469 55 model.embedding_dim 0.0 +469 55 training.batch_size 0.0 +469 55 training.label_smoothing 0.20570905055580338 +469 56 model.embedding_dim 2.0 +469 56 training.batch_size 0.0 +469 56 training.label_smoothing 0.001395345675412439 +469 57 model.embedding_dim 2.0 +469 57 training.batch_size 1.0 +469 57 training.label_smoothing 0.058941605035656625 +469 58 model.embedding_dim 2.0 +469 58 training.batch_size 2.0 +469 58 training.label_smoothing 0.026016887480080658 +469 59 model.embedding_dim 0.0 +469 59 training.batch_size 1.0 +469 59 training.label_smoothing 0.0011016341688668823 +469 60 model.embedding_dim 1.0 +469 60 training.batch_size 0.0 +469 60 training.label_smoothing 0.003903489284285551 +469 61 model.embedding_dim 0.0 +469 61 training.batch_size 0.0 +469 61 training.label_smoothing 0.07805003130917257 +469 62 model.embedding_dim 0.0 +469 62 training.batch_size 2.0 +469 62 training.label_smoothing 0.05531786155202801 +469 63 model.embedding_dim 0.0 +469 63 training.batch_size 2.0 +469 63 training.label_smoothing 0.5503065608823641 +469 64 model.embedding_dim 0.0 +469 64 training.batch_size 2.0 +469 64 training.label_smoothing 0.001852025234960209 +469 65 model.embedding_dim 1.0 +469 65 training.batch_size 2.0 +469 65 training.label_smoothing 0.4617603563624104 +469 66 model.embedding_dim 2.0 +469 66 training.batch_size 0.0 +469 66 training.label_smoothing 0.0044148862979786554 +469 67 model.embedding_dim 0.0 +469 67 training.batch_size 1.0 +469 67 training.label_smoothing 0.0012810625307957328 +469 68 model.embedding_dim 1.0 +469 68 training.batch_size 0.0 +469 68 training.label_smoothing 0.006191987655521744 +469 69 model.embedding_dim 1.0 +469 69 training.batch_size 0.0 +469 69 training.label_smoothing 0.005750761469192699 +469 70 model.embedding_dim 0.0 +469 70 training.batch_size 0.0 +469 70 training.label_smoothing 0.2769420602264522 +469 71 model.embedding_dim 1.0 +469 71 training.batch_size 1.0 +469 71 training.label_smoothing 0.31474416375885717 +469 72 model.embedding_dim 1.0 +469 72 training.batch_size 2.0 +469 72 training.label_smoothing 0.5496861392485264 +469 73 model.embedding_dim 1.0 +469 73 training.batch_size 2.0 +469 73 training.label_smoothing 0.003528444954567351 +469 74 model.embedding_dim 0.0 +469 74 training.batch_size 1.0 +469 74 training.label_smoothing 0.04198107668657609 +469 75 model.embedding_dim 2.0 +469 75 training.batch_size 2.0 +469 75 training.label_smoothing 0.4017092777156483 +469 76 model.embedding_dim 2.0 +469 76 training.batch_size 2.0 +469 76 training.label_smoothing 0.001228684822151576 +469 77 model.embedding_dim 1.0 +469 77 training.batch_size 0.0 +469 77 training.label_smoothing 0.04967124436286718 +469 78 model.embedding_dim 0.0 +469 78 training.batch_size 0.0 +469 78 training.label_smoothing 0.1062416459970893 +469 79 model.embedding_dim 2.0 +469 79 training.batch_size 0.0 +469 79 training.label_smoothing 0.03955704431323331 +469 80 model.embedding_dim 0.0 +469 80 training.batch_size 1.0 +469 80 training.label_smoothing 0.051370018105532336 +469 81 model.embedding_dim 2.0 +469 81 training.batch_size 1.0 +469 81 training.label_smoothing 0.005649913188498501 +469 82 model.embedding_dim 2.0 +469 82 training.batch_size 2.0 +469 82 training.label_smoothing 0.06639793329708095 +469 83 model.embedding_dim 2.0 +469 83 training.batch_size 1.0 +469 83 training.label_smoothing 0.02627742710395842 +469 84 model.embedding_dim 1.0 +469 84 training.batch_size 0.0 +469 84 training.label_smoothing 0.5081940542197622 +469 85 model.embedding_dim 0.0 +469 85 training.batch_size 2.0 +469 85 training.label_smoothing 0.42186238362803086 +469 86 model.embedding_dim 2.0 +469 86 training.batch_size 1.0 +469 86 training.label_smoothing 0.6388106201641833 +469 87 model.embedding_dim 1.0 +469 87 training.batch_size 1.0 +469 87 training.label_smoothing 0.004979039184757184 +469 88 model.embedding_dim 2.0 +469 88 training.batch_size 2.0 +469 88 training.label_smoothing 0.0017515622171960497 +469 89 model.embedding_dim 1.0 +469 89 training.batch_size 2.0 +469 89 training.label_smoothing 0.0012383648135418062 +469 90 model.embedding_dim 1.0 +469 90 training.batch_size 2.0 +469 90 training.label_smoothing 0.7438138668565939 +469 91 model.embedding_dim 1.0 +469 91 training.batch_size 0.0 +469 91 training.label_smoothing 0.00478406693563571 +469 92 model.embedding_dim 2.0 +469 92 training.batch_size 0.0 +469 92 training.label_smoothing 0.600390963380957 +469 93 model.embedding_dim 1.0 +469 93 training.batch_size 0.0 +469 93 training.label_smoothing 0.002099363629236031 +469 94 model.embedding_dim 1.0 +469 94 training.batch_size 0.0 +469 94 training.label_smoothing 0.0069105912184191315 +469 95 model.embedding_dim 1.0 +469 95 training.batch_size 1.0 +469 95 training.label_smoothing 0.003935336195476664 +469 96 model.embedding_dim 2.0 +469 96 training.batch_size 2.0 +469 96 training.label_smoothing 0.0056252980328302385 +469 97 model.embedding_dim 1.0 +469 97 training.batch_size 2.0 +469 97 training.label_smoothing 0.13111117469566977 +469 98 model.embedding_dim 2.0 +469 98 training.batch_size 2.0 +469 98 training.label_smoothing 0.6022863909519607 +469 99 model.embedding_dim 0.0 +469 99 training.batch_size 2.0 +469 99 training.label_smoothing 0.007640641846029017 +469 100 model.embedding_dim 2.0 +469 100 training.batch_size 2.0 +469 100 training.label_smoothing 0.23587338486956166 +469 1 dataset """kinships""" +469 1 model """proje""" +469 1 loss """crossentropy""" +469 1 regularizer """no""" +469 1 optimizer """adadelta""" +469 1 training_loop """lcwa""" +469 1 evaluator """rankbased""" +469 2 dataset """kinships""" +469 2 model """proje""" +469 2 loss """crossentropy""" +469 2 regularizer """no""" +469 2 optimizer """adadelta""" +469 2 training_loop """lcwa""" +469 2 evaluator """rankbased""" +469 3 dataset """kinships""" +469 3 model """proje""" +469 3 loss """crossentropy""" +469 3 regularizer """no""" +469 3 optimizer """adadelta""" +469 3 training_loop """lcwa""" +469 3 evaluator """rankbased""" +469 4 dataset """kinships""" +469 4 model """proje""" +469 4 loss """crossentropy""" +469 4 regularizer """no""" +469 4 optimizer """adadelta""" +469 4 training_loop """lcwa""" +469 4 evaluator """rankbased""" +469 5 dataset """kinships""" +469 5 model """proje""" +469 5 loss """crossentropy""" +469 5 regularizer """no""" +469 5 optimizer """adadelta""" +469 5 training_loop """lcwa""" +469 5 evaluator """rankbased""" +469 6 dataset """kinships""" +469 6 model """proje""" +469 6 loss """crossentropy""" +469 6 regularizer """no""" +469 6 optimizer """adadelta""" +469 6 training_loop """lcwa""" +469 6 evaluator """rankbased""" +469 7 dataset """kinships""" +469 7 model """proje""" +469 7 loss """crossentropy""" +469 7 regularizer """no""" +469 7 optimizer """adadelta""" +469 7 training_loop """lcwa""" +469 7 evaluator """rankbased""" +469 8 dataset """kinships""" +469 8 model """proje""" +469 8 loss """crossentropy""" +469 8 regularizer """no""" +469 8 optimizer """adadelta""" +469 8 training_loop """lcwa""" +469 8 evaluator """rankbased""" +469 9 dataset """kinships""" +469 9 model """proje""" +469 9 loss """crossentropy""" +469 9 regularizer """no""" +469 9 optimizer """adadelta""" +469 9 training_loop """lcwa""" +469 9 evaluator """rankbased""" +469 10 dataset """kinships""" +469 10 model """proje""" +469 10 loss """crossentropy""" +469 10 regularizer """no""" +469 10 optimizer """adadelta""" +469 10 training_loop """lcwa""" +469 10 evaluator """rankbased""" +469 11 dataset """kinships""" +469 11 model """proje""" +469 11 loss """crossentropy""" +469 11 regularizer """no""" +469 11 optimizer """adadelta""" +469 11 training_loop """lcwa""" +469 11 evaluator """rankbased""" +469 12 dataset """kinships""" +469 12 model """proje""" +469 12 loss """crossentropy""" +469 12 regularizer """no""" +469 12 optimizer """adadelta""" +469 12 training_loop """lcwa""" +469 12 evaluator """rankbased""" +469 13 dataset """kinships""" +469 13 model """proje""" +469 13 loss """crossentropy""" +469 13 regularizer """no""" +469 13 optimizer """adadelta""" +469 13 training_loop """lcwa""" +469 13 evaluator """rankbased""" +469 14 dataset """kinships""" +469 14 model """proje""" +469 14 loss """crossentropy""" +469 14 regularizer """no""" +469 14 optimizer """adadelta""" +469 14 training_loop """lcwa""" +469 14 evaluator """rankbased""" +469 15 dataset """kinships""" +469 15 model """proje""" +469 15 loss """crossentropy""" +469 15 regularizer """no""" +469 15 optimizer """adadelta""" +469 15 training_loop """lcwa""" +469 15 evaluator """rankbased""" +469 16 dataset """kinships""" +469 16 model """proje""" +469 16 loss """crossentropy""" +469 16 regularizer """no""" +469 16 optimizer """adadelta""" +469 16 training_loop """lcwa""" +469 16 evaluator """rankbased""" +469 17 dataset """kinships""" +469 17 model """proje""" +469 17 loss """crossentropy""" +469 17 regularizer """no""" +469 17 optimizer """adadelta""" +469 17 training_loop """lcwa""" +469 17 evaluator """rankbased""" +469 18 dataset """kinships""" +469 18 model """proje""" +469 18 loss """crossentropy""" +469 18 regularizer """no""" +469 18 optimizer """adadelta""" +469 18 training_loop """lcwa""" +469 18 evaluator """rankbased""" +469 19 dataset """kinships""" +469 19 model """proje""" +469 19 loss """crossentropy""" +469 19 regularizer """no""" +469 19 optimizer """adadelta""" +469 19 training_loop """lcwa""" +469 19 evaluator """rankbased""" +469 20 dataset """kinships""" +469 20 model """proje""" +469 20 loss """crossentropy""" +469 20 regularizer """no""" +469 20 optimizer """adadelta""" +469 20 training_loop """lcwa""" +469 20 evaluator """rankbased""" +469 21 dataset """kinships""" +469 21 model """proje""" +469 21 loss """crossentropy""" +469 21 regularizer """no""" +469 21 optimizer """adadelta""" +469 21 training_loop """lcwa""" +469 21 evaluator """rankbased""" +469 22 dataset """kinships""" +469 22 model """proje""" +469 22 loss """crossentropy""" +469 22 regularizer """no""" +469 22 optimizer """adadelta""" +469 22 training_loop """lcwa""" +469 22 evaluator """rankbased""" +469 23 dataset """kinships""" +469 23 model """proje""" +469 23 loss """crossentropy""" +469 23 regularizer """no""" +469 23 optimizer """adadelta""" +469 23 training_loop """lcwa""" +469 23 evaluator """rankbased""" +469 24 dataset """kinships""" +469 24 model """proje""" +469 24 loss """crossentropy""" +469 24 regularizer """no""" +469 24 optimizer """adadelta""" +469 24 training_loop """lcwa""" +469 24 evaluator """rankbased""" +469 25 dataset """kinships""" +469 25 model """proje""" +469 25 loss """crossentropy""" +469 25 regularizer """no""" +469 25 optimizer """adadelta""" +469 25 training_loop """lcwa""" +469 25 evaluator """rankbased""" +469 26 dataset """kinships""" +469 26 model """proje""" +469 26 loss """crossentropy""" +469 26 regularizer """no""" +469 26 optimizer """adadelta""" +469 26 training_loop """lcwa""" +469 26 evaluator """rankbased""" +469 27 dataset """kinships""" +469 27 model """proje""" +469 27 loss """crossentropy""" +469 27 regularizer """no""" +469 27 optimizer """adadelta""" +469 27 training_loop """lcwa""" +469 27 evaluator """rankbased""" +469 28 dataset """kinships""" +469 28 model """proje""" +469 28 loss """crossentropy""" +469 28 regularizer """no""" +469 28 optimizer """adadelta""" +469 28 training_loop """lcwa""" +469 28 evaluator """rankbased""" +469 29 dataset """kinships""" +469 29 model """proje""" +469 29 loss """crossentropy""" +469 29 regularizer """no""" +469 29 optimizer """adadelta""" +469 29 training_loop """lcwa""" +469 29 evaluator """rankbased""" +469 30 dataset """kinships""" +469 30 model """proje""" +469 30 loss """crossentropy""" +469 30 regularizer """no""" +469 30 optimizer """adadelta""" +469 30 training_loop """lcwa""" +469 30 evaluator """rankbased""" +469 31 dataset """kinships""" +469 31 model """proje""" +469 31 loss """crossentropy""" +469 31 regularizer """no""" +469 31 optimizer """adadelta""" +469 31 training_loop """lcwa""" +469 31 evaluator """rankbased""" +469 32 dataset """kinships""" +469 32 model """proje""" +469 32 loss """crossentropy""" +469 32 regularizer """no""" +469 32 optimizer """adadelta""" +469 32 training_loop """lcwa""" +469 32 evaluator """rankbased""" +469 33 dataset """kinships""" +469 33 model """proje""" +469 33 loss """crossentropy""" +469 33 regularizer """no""" +469 33 optimizer """adadelta""" +469 33 training_loop """lcwa""" +469 33 evaluator """rankbased""" +469 34 dataset """kinships""" +469 34 model """proje""" +469 34 loss """crossentropy""" +469 34 regularizer """no""" +469 34 optimizer """adadelta""" +469 34 training_loop """lcwa""" +469 34 evaluator """rankbased""" +469 35 dataset """kinships""" +469 35 model """proje""" +469 35 loss """crossentropy""" +469 35 regularizer """no""" +469 35 optimizer """adadelta""" +469 35 training_loop """lcwa""" +469 35 evaluator """rankbased""" +469 36 dataset """kinships""" +469 36 model """proje""" +469 36 loss """crossentropy""" +469 36 regularizer """no""" +469 36 optimizer """adadelta""" +469 36 training_loop """lcwa""" +469 36 evaluator """rankbased""" +469 37 dataset """kinships""" +469 37 model """proje""" +469 37 loss """crossentropy""" +469 37 regularizer """no""" +469 37 optimizer """adadelta""" +469 37 training_loop """lcwa""" +469 37 evaluator """rankbased""" +469 38 dataset """kinships""" +469 38 model """proje""" +469 38 loss """crossentropy""" +469 38 regularizer """no""" +469 38 optimizer """adadelta""" +469 38 training_loop """lcwa""" +469 38 evaluator """rankbased""" +469 39 dataset """kinships""" +469 39 model """proje""" +469 39 loss """crossentropy""" +469 39 regularizer """no""" +469 39 optimizer """adadelta""" +469 39 training_loop """lcwa""" +469 39 evaluator """rankbased""" +469 40 dataset """kinships""" +469 40 model """proje""" +469 40 loss """crossentropy""" +469 40 regularizer """no""" +469 40 optimizer """adadelta""" +469 40 training_loop """lcwa""" +469 40 evaluator """rankbased""" +469 41 dataset """kinships""" +469 41 model """proje""" +469 41 loss """crossentropy""" +469 41 regularizer """no""" +469 41 optimizer """adadelta""" +469 41 training_loop """lcwa""" +469 41 evaluator """rankbased""" +469 42 dataset """kinships""" +469 42 model """proje""" +469 42 loss """crossentropy""" +469 42 regularizer """no""" +469 42 optimizer """adadelta""" +469 42 training_loop """lcwa""" +469 42 evaluator """rankbased""" +469 43 dataset """kinships""" +469 43 model """proje""" +469 43 loss """crossentropy""" +469 43 regularizer """no""" +469 43 optimizer """adadelta""" +469 43 training_loop """lcwa""" +469 43 evaluator """rankbased""" +469 44 dataset """kinships""" +469 44 model """proje""" +469 44 loss """crossentropy""" +469 44 regularizer """no""" +469 44 optimizer """adadelta""" +469 44 training_loop """lcwa""" +469 44 evaluator """rankbased""" +469 45 dataset """kinships""" +469 45 model """proje""" +469 45 loss """crossentropy""" +469 45 regularizer """no""" +469 45 optimizer """adadelta""" +469 45 training_loop """lcwa""" +469 45 evaluator """rankbased""" +469 46 dataset """kinships""" +469 46 model """proje""" +469 46 loss """crossentropy""" +469 46 regularizer """no""" +469 46 optimizer """adadelta""" +469 46 training_loop """lcwa""" +469 46 evaluator """rankbased""" +469 47 dataset """kinships""" +469 47 model """proje""" +469 47 loss """crossentropy""" +469 47 regularizer """no""" +469 47 optimizer """adadelta""" +469 47 training_loop """lcwa""" +469 47 evaluator """rankbased""" +469 48 dataset """kinships""" +469 48 model """proje""" +469 48 loss """crossentropy""" +469 48 regularizer """no""" +469 48 optimizer """adadelta""" +469 48 training_loop """lcwa""" +469 48 evaluator """rankbased""" +469 49 dataset """kinships""" +469 49 model """proje""" +469 49 loss """crossentropy""" +469 49 regularizer """no""" +469 49 optimizer """adadelta""" +469 49 training_loop """lcwa""" +469 49 evaluator """rankbased""" +469 50 dataset """kinships""" +469 50 model """proje""" +469 50 loss """crossentropy""" +469 50 regularizer """no""" +469 50 optimizer """adadelta""" +469 50 training_loop """lcwa""" +469 50 evaluator """rankbased""" +469 51 dataset """kinships""" +469 51 model """proje""" +469 51 loss """crossentropy""" +469 51 regularizer """no""" +469 51 optimizer """adadelta""" +469 51 training_loop """lcwa""" +469 51 evaluator """rankbased""" +469 52 dataset """kinships""" +469 52 model """proje""" +469 52 loss """crossentropy""" +469 52 regularizer """no""" +469 52 optimizer """adadelta""" +469 52 training_loop """lcwa""" +469 52 evaluator """rankbased""" +469 53 dataset """kinships""" +469 53 model """proje""" +469 53 loss """crossentropy""" +469 53 regularizer """no""" +469 53 optimizer """adadelta""" +469 53 training_loop """lcwa""" +469 53 evaluator """rankbased""" +469 54 dataset """kinships""" +469 54 model """proje""" +469 54 loss """crossentropy""" +469 54 regularizer """no""" +469 54 optimizer """adadelta""" +469 54 training_loop """lcwa""" +469 54 evaluator """rankbased""" +469 55 dataset """kinships""" +469 55 model """proje""" +469 55 loss """crossentropy""" +469 55 regularizer """no""" +469 55 optimizer """adadelta""" +469 55 training_loop """lcwa""" +469 55 evaluator """rankbased""" +469 56 dataset """kinships""" +469 56 model """proje""" +469 56 loss """crossentropy""" +469 56 regularizer """no""" +469 56 optimizer """adadelta""" +469 56 training_loop """lcwa""" +469 56 evaluator """rankbased""" +469 57 dataset """kinships""" +469 57 model """proje""" +469 57 loss """crossentropy""" +469 57 regularizer """no""" +469 57 optimizer """adadelta""" +469 57 training_loop """lcwa""" +469 57 evaluator """rankbased""" +469 58 dataset """kinships""" +469 58 model """proje""" +469 58 loss """crossentropy""" +469 58 regularizer """no""" +469 58 optimizer """adadelta""" +469 58 training_loop """lcwa""" +469 58 evaluator """rankbased""" +469 59 dataset """kinships""" +469 59 model """proje""" +469 59 loss """crossentropy""" +469 59 regularizer """no""" +469 59 optimizer """adadelta""" +469 59 training_loop """lcwa""" +469 59 evaluator """rankbased""" +469 60 dataset """kinships""" +469 60 model """proje""" +469 60 loss """crossentropy""" +469 60 regularizer """no""" +469 60 optimizer """adadelta""" +469 60 training_loop """lcwa""" +469 60 evaluator """rankbased""" +469 61 dataset """kinships""" +469 61 model """proje""" +469 61 loss """crossentropy""" +469 61 regularizer """no""" +469 61 optimizer """adadelta""" +469 61 training_loop """lcwa""" +469 61 evaluator """rankbased""" +469 62 dataset """kinships""" +469 62 model """proje""" +469 62 loss """crossentropy""" +469 62 regularizer """no""" +469 62 optimizer """adadelta""" +469 62 training_loop """lcwa""" +469 62 evaluator """rankbased""" +469 63 dataset """kinships""" +469 63 model """proje""" +469 63 loss """crossentropy""" +469 63 regularizer """no""" +469 63 optimizer """adadelta""" +469 63 training_loop """lcwa""" +469 63 evaluator """rankbased""" +469 64 dataset """kinships""" +469 64 model """proje""" +469 64 loss """crossentropy""" +469 64 regularizer """no""" +469 64 optimizer """adadelta""" +469 64 training_loop """lcwa""" +469 64 evaluator """rankbased""" +469 65 dataset """kinships""" +469 65 model """proje""" +469 65 loss """crossentropy""" +469 65 regularizer """no""" +469 65 optimizer """adadelta""" +469 65 training_loop """lcwa""" +469 65 evaluator """rankbased""" +469 66 dataset """kinships""" +469 66 model """proje""" +469 66 loss """crossentropy""" +469 66 regularizer """no""" +469 66 optimizer """adadelta""" +469 66 training_loop """lcwa""" +469 66 evaluator """rankbased""" +469 67 dataset """kinships""" +469 67 model """proje""" +469 67 loss """crossentropy""" +469 67 regularizer """no""" +469 67 optimizer """adadelta""" +469 67 training_loop """lcwa""" +469 67 evaluator """rankbased""" +469 68 dataset """kinships""" +469 68 model """proje""" +469 68 loss """crossentropy""" +469 68 regularizer """no""" +469 68 optimizer """adadelta""" +469 68 training_loop """lcwa""" +469 68 evaluator """rankbased""" +469 69 dataset """kinships""" +469 69 model """proje""" +469 69 loss """crossentropy""" +469 69 regularizer """no""" +469 69 optimizer """adadelta""" +469 69 training_loop """lcwa""" +469 69 evaluator """rankbased""" +469 70 dataset """kinships""" +469 70 model """proje""" +469 70 loss """crossentropy""" +469 70 regularizer """no""" +469 70 optimizer """adadelta""" +469 70 training_loop """lcwa""" +469 70 evaluator """rankbased""" +469 71 dataset """kinships""" +469 71 model """proje""" +469 71 loss """crossentropy""" +469 71 regularizer """no""" +469 71 optimizer """adadelta""" +469 71 training_loop """lcwa""" +469 71 evaluator """rankbased""" +469 72 dataset """kinships""" +469 72 model """proje""" +469 72 loss """crossentropy""" +469 72 regularizer """no""" +469 72 optimizer """adadelta""" +469 72 training_loop """lcwa""" +469 72 evaluator """rankbased""" +469 73 dataset """kinships""" +469 73 model """proje""" +469 73 loss """crossentropy""" +469 73 regularizer """no""" +469 73 optimizer """adadelta""" +469 73 training_loop """lcwa""" +469 73 evaluator """rankbased""" +469 74 dataset """kinships""" +469 74 model """proje""" +469 74 loss """crossentropy""" +469 74 regularizer """no""" +469 74 optimizer """adadelta""" +469 74 training_loop """lcwa""" +469 74 evaluator """rankbased""" +469 75 dataset """kinships""" +469 75 model """proje""" +469 75 loss """crossentropy""" +469 75 regularizer """no""" +469 75 optimizer """adadelta""" +469 75 training_loop """lcwa""" +469 75 evaluator """rankbased""" +469 76 dataset """kinships""" +469 76 model """proje""" +469 76 loss """crossentropy""" +469 76 regularizer """no""" +469 76 optimizer """adadelta""" +469 76 training_loop """lcwa""" +469 76 evaluator """rankbased""" +469 77 dataset """kinships""" +469 77 model """proje""" +469 77 loss """crossentropy""" +469 77 regularizer """no""" +469 77 optimizer """adadelta""" +469 77 training_loop """lcwa""" +469 77 evaluator """rankbased""" +469 78 dataset """kinships""" +469 78 model """proje""" +469 78 loss """crossentropy""" +469 78 regularizer """no""" +469 78 optimizer """adadelta""" +469 78 training_loop """lcwa""" +469 78 evaluator """rankbased""" +469 79 dataset """kinships""" +469 79 model """proje""" +469 79 loss """crossentropy""" +469 79 regularizer """no""" +469 79 optimizer """adadelta""" +469 79 training_loop """lcwa""" +469 79 evaluator """rankbased""" +469 80 dataset """kinships""" +469 80 model """proje""" +469 80 loss """crossentropy""" +469 80 regularizer """no""" +469 80 optimizer """adadelta""" +469 80 training_loop """lcwa""" +469 80 evaluator """rankbased""" +469 81 dataset """kinships""" +469 81 model """proje""" +469 81 loss """crossentropy""" +469 81 regularizer """no""" +469 81 optimizer """adadelta""" +469 81 training_loop """lcwa""" +469 81 evaluator """rankbased""" +469 82 dataset """kinships""" +469 82 model """proje""" +469 82 loss """crossentropy""" +469 82 regularizer """no""" +469 82 optimizer """adadelta""" +469 82 training_loop """lcwa""" +469 82 evaluator """rankbased""" +469 83 dataset """kinships""" +469 83 model """proje""" +469 83 loss """crossentropy""" +469 83 regularizer """no""" +469 83 optimizer """adadelta""" +469 83 training_loop """lcwa""" +469 83 evaluator """rankbased""" +469 84 dataset """kinships""" +469 84 model """proje""" +469 84 loss """crossentropy""" +469 84 regularizer """no""" +469 84 optimizer """adadelta""" +469 84 training_loop """lcwa""" +469 84 evaluator """rankbased""" +469 85 dataset """kinships""" +469 85 model """proje""" +469 85 loss """crossentropy""" +469 85 regularizer """no""" +469 85 optimizer """adadelta""" +469 85 training_loop """lcwa""" +469 85 evaluator """rankbased""" +469 86 dataset """kinships""" +469 86 model """proje""" +469 86 loss """crossentropy""" +469 86 regularizer """no""" +469 86 optimizer """adadelta""" +469 86 training_loop """lcwa""" +469 86 evaluator """rankbased""" +469 87 dataset """kinships""" +469 87 model """proje""" +469 87 loss """crossentropy""" +469 87 regularizer """no""" +469 87 optimizer """adadelta""" +469 87 training_loop """lcwa""" +469 87 evaluator """rankbased""" +469 88 dataset """kinships""" +469 88 model """proje""" +469 88 loss """crossentropy""" +469 88 regularizer """no""" +469 88 optimizer """adadelta""" +469 88 training_loop """lcwa""" +469 88 evaluator """rankbased""" +469 89 dataset """kinships""" +469 89 model """proje""" +469 89 loss """crossentropy""" +469 89 regularizer """no""" +469 89 optimizer """adadelta""" +469 89 training_loop """lcwa""" +469 89 evaluator """rankbased""" +469 90 dataset """kinships""" +469 90 model """proje""" +469 90 loss """crossentropy""" +469 90 regularizer """no""" +469 90 optimizer """adadelta""" +469 90 training_loop """lcwa""" +469 90 evaluator """rankbased""" +469 91 dataset """kinships""" +469 91 model """proje""" +469 91 loss """crossentropy""" +469 91 regularizer """no""" +469 91 optimizer """adadelta""" +469 91 training_loop """lcwa""" +469 91 evaluator """rankbased""" +469 92 dataset """kinships""" +469 92 model """proje""" +469 92 loss """crossentropy""" +469 92 regularizer """no""" +469 92 optimizer """adadelta""" +469 92 training_loop """lcwa""" +469 92 evaluator """rankbased""" +469 93 dataset """kinships""" +469 93 model """proje""" +469 93 loss """crossentropy""" +469 93 regularizer """no""" +469 93 optimizer """adadelta""" +469 93 training_loop """lcwa""" +469 93 evaluator """rankbased""" +469 94 dataset """kinships""" +469 94 model """proje""" +469 94 loss """crossentropy""" +469 94 regularizer """no""" +469 94 optimizer """adadelta""" +469 94 training_loop """lcwa""" +469 94 evaluator """rankbased""" +469 95 dataset """kinships""" +469 95 model """proje""" +469 95 loss """crossentropy""" +469 95 regularizer """no""" +469 95 optimizer """adadelta""" +469 95 training_loop """lcwa""" +469 95 evaluator """rankbased""" +469 96 dataset """kinships""" +469 96 model """proje""" +469 96 loss """crossentropy""" +469 96 regularizer """no""" +469 96 optimizer """adadelta""" +469 96 training_loop """lcwa""" +469 96 evaluator """rankbased""" +469 97 dataset """kinships""" +469 97 model """proje""" +469 97 loss """crossentropy""" +469 97 regularizer """no""" +469 97 optimizer """adadelta""" +469 97 training_loop """lcwa""" +469 97 evaluator """rankbased""" +469 98 dataset """kinships""" +469 98 model """proje""" +469 98 loss """crossentropy""" +469 98 regularizer """no""" +469 98 optimizer """adadelta""" +469 98 training_loop """lcwa""" +469 98 evaluator """rankbased""" +469 99 dataset """kinships""" +469 99 model """proje""" +469 99 loss """crossentropy""" +469 99 regularizer """no""" +469 99 optimizer """adadelta""" +469 99 training_loop """lcwa""" +469 99 evaluator """rankbased""" +469 100 dataset """kinships""" +469 100 model """proje""" +469 100 loss """crossentropy""" +469 100 regularizer """no""" +469 100 optimizer """adadelta""" +469 100 training_loop """lcwa""" +469 100 evaluator """rankbased""" +470 1 model.embedding_dim 1.0 +470 1 training.batch_size 2.0 +470 1 training.label_smoothing 0.0010544809260861412 +470 2 model.embedding_dim 2.0 +470 2 training.batch_size 2.0 +470 2 training.label_smoothing 0.1252584473343679 +470 3 model.embedding_dim 1.0 +470 3 training.batch_size 0.0 +470 3 training.label_smoothing 0.21675963456957797 +470 4 model.embedding_dim 1.0 +470 4 training.batch_size 1.0 +470 4 training.label_smoothing 0.019030541421306228 +470 5 model.embedding_dim 1.0 +470 5 training.batch_size 0.0 +470 5 training.label_smoothing 0.9506084611704012 +470 6 model.embedding_dim 1.0 +470 6 training.batch_size 1.0 +470 6 training.label_smoothing 0.0024864323063378056 +470 7 model.embedding_dim 2.0 +470 7 training.batch_size 1.0 +470 7 training.label_smoothing 0.011836873652004136 +470 8 model.embedding_dim 2.0 +470 8 training.batch_size 0.0 +470 8 training.label_smoothing 0.06917182153854645 +470 9 model.embedding_dim 1.0 +470 9 training.batch_size 2.0 +470 9 training.label_smoothing 0.0014615614995111318 +470 10 model.embedding_dim 1.0 +470 10 training.batch_size 2.0 +470 10 training.label_smoothing 0.7102067863839836 +470 11 model.embedding_dim 0.0 +470 11 training.batch_size 2.0 +470 11 training.label_smoothing 0.38303845800117114 +470 12 model.embedding_dim 0.0 +470 12 training.batch_size 1.0 +470 12 training.label_smoothing 0.0011084533771635802 +470 13 model.embedding_dim 0.0 +470 13 training.batch_size 2.0 +470 13 training.label_smoothing 0.0043155709782162965 +470 14 model.embedding_dim 2.0 +470 14 training.batch_size 0.0 +470 14 training.label_smoothing 0.0022185663482396814 +470 15 model.embedding_dim 0.0 +470 15 training.batch_size 2.0 +470 15 training.label_smoothing 0.08251911793712809 +470 16 model.embedding_dim 2.0 +470 16 training.batch_size 0.0 +470 16 training.label_smoothing 0.21018867267796656 +470 17 model.embedding_dim 0.0 +470 17 training.batch_size 2.0 +470 17 training.label_smoothing 0.003939170874798639 +470 18 model.embedding_dim 1.0 +470 18 training.batch_size 0.0 +470 18 training.label_smoothing 0.0011546316909234696 +470 19 model.embedding_dim 0.0 +470 19 training.batch_size 2.0 +470 19 training.label_smoothing 0.3855865386240096 +470 20 model.embedding_dim 2.0 +470 20 training.batch_size 2.0 +470 20 training.label_smoothing 0.04637377223387794 +470 21 model.embedding_dim 1.0 +470 21 training.batch_size 1.0 +470 21 training.label_smoothing 0.16771154872894672 +470 22 model.embedding_dim 0.0 +470 22 training.batch_size 0.0 +470 22 training.label_smoothing 0.5832437450474394 +470 23 model.embedding_dim 1.0 +470 23 training.batch_size 1.0 +470 23 training.label_smoothing 0.14546409710938693 +470 24 model.embedding_dim 2.0 +470 24 training.batch_size 2.0 +470 24 training.label_smoothing 0.17427902954974597 +470 25 model.embedding_dim 2.0 +470 25 training.batch_size 0.0 +470 25 training.label_smoothing 0.21735587519750027 +470 26 model.embedding_dim 2.0 +470 26 training.batch_size 1.0 +470 26 training.label_smoothing 0.007969949670466788 +470 27 model.embedding_dim 1.0 +470 27 training.batch_size 2.0 +470 27 training.label_smoothing 0.0019898754505424158 +470 28 model.embedding_dim 1.0 +470 28 training.batch_size 0.0 +470 28 training.label_smoothing 0.9794583361082618 +470 29 model.embedding_dim 1.0 +470 29 training.batch_size 0.0 +470 29 training.label_smoothing 0.0013591690549659306 +470 30 model.embedding_dim 0.0 +470 30 training.batch_size 2.0 +470 30 training.label_smoothing 0.0015064005824159585 +470 31 model.embedding_dim 1.0 +470 31 training.batch_size 1.0 +470 31 training.label_smoothing 0.44968123821765643 +470 32 model.embedding_dim 1.0 +470 32 training.batch_size 2.0 +470 32 training.label_smoothing 0.047960275436247375 +470 33 model.embedding_dim 1.0 +470 33 training.batch_size 0.0 +470 33 training.label_smoothing 0.499518779409221 +470 34 model.embedding_dim 0.0 +470 34 training.batch_size 0.0 +470 34 training.label_smoothing 0.0010657866060551045 +470 35 model.embedding_dim 0.0 +470 35 training.batch_size 0.0 +470 35 training.label_smoothing 0.003948752596671915 +470 36 model.embedding_dim 2.0 +470 36 training.batch_size 0.0 +470 36 training.label_smoothing 0.0018167061759521359 +470 37 model.embedding_dim 0.0 +470 37 training.batch_size 2.0 +470 37 training.label_smoothing 0.018159154962484065 +470 38 model.embedding_dim 2.0 +470 38 training.batch_size 1.0 +470 38 training.label_smoothing 0.22432036158067703 +470 39 model.embedding_dim 0.0 +470 39 training.batch_size 0.0 +470 39 training.label_smoothing 0.03335188649944529 +470 40 model.embedding_dim 2.0 +470 40 training.batch_size 2.0 +470 40 training.label_smoothing 0.2739819242101895 +470 41 model.embedding_dim 0.0 +470 41 training.batch_size 0.0 +470 41 training.label_smoothing 0.017373205158460744 +470 42 model.embedding_dim 2.0 +470 42 training.batch_size 0.0 +470 42 training.label_smoothing 0.0033517691765280154 +470 43 model.embedding_dim 1.0 +470 43 training.batch_size 0.0 +470 43 training.label_smoothing 0.0014645213132449453 +470 44 model.embedding_dim 0.0 +470 44 training.batch_size 1.0 +470 44 training.label_smoothing 0.0014350843299060037 +470 45 model.embedding_dim 2.0 +470 45 training.batch_size 2.0 +470 45 training.label_smoothing 0.0033619927025095413 +470 46 model.embedding_dim 0.0 +470 46 training.batch_size 1.0 +470 46 training.label_smoothing 0.009297556155413292 +470 47 model.embedding_dim 2.0 +470 47 training.batch_size 0.0 +470 47 training.label_smoothing 0.0042036204723856245 +470 48 model.embedding_dim 2.0 +470 48 training.batch_size 0.0 +470 48 training.label_smoothing 0.06797944572602908 +470 49 model.embedding_dim 0.0 +470 49 training.batch_size 1.0 +470 49 training.label_smoothing 0.12893104435037048 +470 50 model.embedding_dim 2.0 +470 50 training.batch_size 1.0 +470 50 training.label_smoothing 0.9526856176702859 +470 51 model.embedding_dim 2.0 +470 51 training.batch_size 0.0 +470 51 training.label_smoothing 0.05163674923071163 +470 52 model.embedding_dim 0.0 +470 52 training.batch_size 0.0 +470 52 training.label_smoothing 0.419933454325317 +470 53 model.embedding_dim 2.0 +470 53 training.batch_size 2.0 +470 53 training.label_smoothing 0.09253813980894324 +470 54 model.embedding_dim 1.0 +470 54 training.batch_size 0.0 +470 54 training.label_smoothing 0.0023246676047562038 +470 55 model.embedding_dim 2.0 +470 55 training.batch_size 0.0 +470 55 training.label_smoothing 0.056833976348969424 +470 56 model.embedding_dim 1.0 +470 56 training.batch_size 1.0 +470 56 training.label_smoothing 0.09938032477136452 +470 57 model.embedding_dim 2.0 +470 57 training.batch_size 0.0 +470 57 training.label_smoothing 0.0038959605443969823 +470 58 model.embedding_dim 2.0 +470 58 training.batch_size 1.0 +470 58 training.label_smoothing 0.037488662930315664 +470 59 model.embedding_dim 1.0 +470 59 training.batch_size 0.0 +470 59 training.label_smoothing 0.0032946979622128386 +470 60 model.embedding_dim 2.0 +470 60 training.batch_size 2.0 +470 60 training.label_smoothing 0.9757162213845868 +470 61 model.embedding_dim 2.0 +470 61 training.batch_size 2.0 +470 61 training.label_smoothing 0.0011774743891628955 +470 62 model.embedding_dim 1.0 +470 62 training.batch_size 2.0 +470 62 training.label_smoothing 0.007951977298545111 +470 63 model.embedding_dim 2.0 +470 63 training.batch_size 0.0 +470 63 training.label_smoothing 0.22637462651457135 +470 64 model.embedding_dim 0.0 +470 64 training.batch_size 0.0 +470 64 training.label_smoothing 0.0014607872074230476 +470 65 model.embedding_dim 1.0 +470 65 training.batch_size 1.0 +470 65 training.label_smoothing 0.0030487058257906625 +470 66 model.embedding_dim 2.0 +470 66 training.batch_size 2.0 +470 66 training.label_smoothing 0.10600074347240791 +470 67 model.embedding_dim 1.0 +470 67 training.batch_size 0.0 +470 67 training.label_smoothing 0.4208818833542245 +470 68 model.embedding_dim 0.0 +470 68 training.batch_size 2.0 +470 68 training.label_smoothing 0.005050399875571105 +470 69 model.embedding_dim 0.0 +470 69 training.batch_size 1.0 +470 69 training.label_smoothing 0.22402530229488765 +470 70 model.embedding_dim 2.0 +470 70 training.batch_size 1.0 +470 70 training.label_smoothing 0.0021406544745673274 +470 71 model.embedding_dim 1.0 +470 71 training.batch_size 1.0 +470 71 training.label_smoothing 0.03151902144987427 +470 72 model.embedding_dim 1.0 +470 72 training.batch_size 1.0 +470 72 training.label_smoothing 0.1443909319820905 +470 73 model.embedding_dim 1.0 +470 73 training.batch_size 2.0 +470 73 training.label_smoothing 0.003219844817247905 +470 74 model.embedding_dim 1.0 +470 74 training.batch_size 0.0 +470 74 training.label_smoothing 0.014744027667160467 +470 75 model.embedding_dim 2.0 +470 75 training.batch_size 1.0 +470 75 training.label_smoothing 0.02251270042540107 +470 76 model.embedding_dim 1.0 +470 76 training.batch_size 1.0 +470 76 training.label_smoothing 0.2447355813260811 +470 77 model.embedding_dim 2.0 +470 77 training.batch_size 2.0 +470 77 training.label_smoothing 0.02923365797256274 +470 78 model.embedding_dim 2.0 +470 78 training.batch_size 2.0 +470 78 training.label_smoothing 0.0029055775376548416 +470 79 model.embedding_dim 2.0 +470 79 training.batch_size 1.0 +470 79 training.label_smoothing 0.004208537766746709 +470 80 model.embedding_dim 0.0 +470 80 training.batch_size 0.0 +470 80 training.label_smoothing 0.0031902104022415493 +470 81 model.embedding_dim 0.0 +470 81 training.batch_size 2.0 +470 81 training.label_smoothing 0.010829409744542143 +470 82 model.embedding_dim 1.0 +470 82 training.batch_size 0.0 +470 82 training.label_smoothing 0.3413600407946095 +470 83 model.embedding_dim 1.0 +470 83 training.batch_size 1.0 +470 83 training.label_smoothing 0.00319938675450005 +470 84 model.embedding_dim 0.0 +470 84 training.batch_size 1.0 +470 84 training.label_smoothing 0.024522892823110588 +470 85 model.embedding_dim 2.0 +470 85 training.batch_size 2.0 +470 85 training.label_smoothing 0.0034250954987217748 +470 86 model.embedding_dim 2.0 +470 86 training.batch_size 0.0 +470 86 training.label_smoothing 0.3110225702961551 +470 87 model.embedding_dim 2.0 +470 87 training.batch_size 1.0 +470 87 training.label_smoothing 0.30242942110822 +470 88 model.embedding_dim 2.0 +470 88 training.batch_size 1.0 +470 88 training.label_smoothing 0.022995561806062447 +470 89 model.embedding_dim 0.0 +470 89 training.batch_size 1.0 +470 89 training.label_smoothing 0.6287664815864504 +470 90 model.embedding_dim 2.0 +470 90 training.batch_size 2.0 +470 90 training.label_smoothing 0.002949852557077063 +470 91 model.embedding_dim 2.0 +470 91 training.batch_size 1.0 +470 91 training.label_smoothing 0.16655738406381113 +470 92 model.embedding_dim 2.0 +470 92 training.batch_size 0.0 +470 92 training.label_smoothing 0.71499760404644 +470 93 model.embedding_dim 1.0 +470 93 training.batch_size 0.0 +470 93 training.label_smoothing 0.015267396996674542 +470 94 model.embedding_dim 0.0 +470 94 training.batch_size 2.0 +470 94 training.label_smoothing 0.8329544966645075 +470 95 model.embedding_dim 2.0 +470 95 training.batch_size 1.0 +470 95 training.label_smoothing 0.0749246717791851 +470 96 model.embedding_dim 1.0 +470 96 training.batch_size 2.0 +470 96 training.label_smoothing 0.014208521286774587 +470 97 model.embedding_dim 0.0 +470 97 training.batch_size 1.0 +470 97 training.label_smoothing 0.007648401693024009 +470 98 model.embedding_dim 2.0 +470 98 training.batch_size 0.0 +470 98 training.label_smoothing 0.11221484014696285 +470 99 model.embedding_dim 2.0 +470 99 training.batch_size 2.0 +470 99 training.label_smoothing 0.06042829005462202 +470 100 model.embedding_dim 0.0 +470 100 training.batch_size 0.0 +470 100 training.label_smoothing 0.03160417364038629 +470 1 dataset """kinships""" +470 1 model """proje""" +470 1 loss """crossentropy""" +470 1 regularizer """no""" +470 1 optimizer """adadelta""" +470 1 training_loop """lcwa""" +470 1 evaluator """rankbased""" +470 2 dataset """kinships""" +470 2 model """proje""" +470 2 loss """crossentropy""" +470 2 regularizer """no""" +470 2 optimizer """adadelta""" +470 2 training_loop """lcwa""" +470 2 evaluator """rankbased""" +470 3 dataset """kinships""" +470 3 model """proje""" +470 3 loss """crossentropy""" +470 3 regularizer """no""" +470 3 optimizer """adadelta""" +470 3 training_loop """lcwa""" +470 3 evaluator """rankbased""" +470 4 dataset """kinships""" +470 4 model """proje""" +470 4 loss """crossentropy""" +470 4 regularizer """no""" +470 4 optimizer """adadelta""" +470 4 training_loop """lcwa""" +470 4 evaluator """rankbased""" +470 5 dataset """kinships""" +470 5 model """proje""" +470 5 loss """crossentropy""" +470 5 regularizer """no""" +470 5 optimizer """adadelta""" +470 5 training_loop """lcwa""" +470 5 evaluator """rankbased""" +470 6 dataset """kinships""" +470 6 model """proje""" +470 6 loss """crossentropy""" +470 6 regularizer """no""" +470 6 optimizer """adadelta""" +470 6 training_loop """lcwa""" +470 6 evaluator """rankbased""" +470 7 dataset """kinships""" +470 7 model """proje""" +470 7 loss """crossentropy""" +470 7 regularizer """no""" +470 7 optimizer """adadelta""" +470 7 training_loop """lcwa""" +470 7 evaluator """rankbased""" +470 8 dataset """kinships""" +470 8 model """proje""" +470 8 loss """crossentropy""" +470 8 regularizer """no""" +470 8 optimizer """adadelta""" +470 8 training_loop """lcwa""" +470 8 evaluator """rankbased""" +470 9 dataset """kinships""" +470 9 model """proje""" +470 9 loss """crossentropy""" +470 9 regularizer """no""" +470 9 optimizer """adadelta""" +470 9 training_loop """lcwa""" +470 9 evaluator """rankbased""" +470 10 dataset """kinships""" +470 10 model """proje""" +470 10 loss """crossentropy""" +470 10 regularizer """no""" +470 10 optimizer """adadelta""" +470 10 training_loop """lcwa""" +470 10 evaluator """rankbased""" +470 11 dataset """kinships""" +470 11 model """proje""" +470 11 loss """crossentropy""" +470 11 regularizer """no""" +470 11 optimizer """adadelta""" +470 11 training_loop """lcwa""" +470 11 evaluator """rankbased""" +470 12 dataset """kinships""" +470 12 model """proje""" +470 12 loss """crossentropy""" +470 12 regularizer """no""" +470 12 optimizer """adadelta""" +470 12 training_loop """lcwa""" +470 12 evaluator """rankbased""" +470 13 dataset """kinships""" +470 13 model """proje""" +470 13 loss """crossentropy""" +470 13 regularizer """no""" +470 13 optimizer """adadelta""" +470 13 training_loop """lcwa""" +470 13 evaluator """rankbased""" +470 14 dataset """kinships""" +470 14 model """proje""" +470 14 loss """crossentropy""" +470 14 regularizer """no""" +470 14 optimizer """adadelta""" +470 14 training_loop """lcwa""" +470 14 evaluator """rankbased""" +470 15 dataset """kinships""" +470 15 model """proje""" +470 15 loss """crossentropy""" +470 15 regularizer """no""" +470 15 optimizer """adadelta""" +470 15 training_loop """lcwa""" +470 15 evaluator """rankbased""" +470 16 dataset """kinships""" +470 16 model """proje""" +470 16 loss """crossentropy""" +470 16 regularizer """no""" +470 16 optimizer """adadelta""" +470 16 training_loop """lcwa""" +470 16 evaluator """rankbased""" +470 17 dataset """kinships""" +470 17 model """proje""" +470 17 loss """crossentropy""" +470 17 regularizer """no""" +470 17 optimizer """adadelta""" +470 17 training_loop """lcwa""" +470 17 evaluator """rankbased""" +470 18 dataset """kinships""" +470 18 model """proje""" +470 18 loss """crossentropy""" +470 18 regularizer """no""" +470 18 optimizer """adadelta""" +470 18 training_loop """lcwa""" +470 18 evaluator """rankbased""" +470 19 dataset """kinships""" +470 19 model """proje""" +470 19 loss """crossentropy""" +470 19 regularizer """no""" +470 19 optimizer """adadelta""" +470 19 training_loop """lcwa""" +470 19 evaluator """rankbased""" +470 20 dataset """kinships""" +470 20 model """proje""" +470 20 loss """crossentropy""" +470 20 regularizer """no""" +470 20 optimizer """adadelta""" +470 20 training_loop """lcwa""" +470 20 evaluator """rankbased""" +470 21 dataset """kinships""" +470 21 model """proje""" +470 21 loss """crossentropy""" +470 21 regularizer """no""" +470 21 optimizer """adadelta""" +470 21 training_loop """lcwa""" +470 21 evaluator """rankbased""" +470 22 dataset """kinships""" +470 22 model """proje""" +470 22 loss """crossentropy""" +470 22 regularizer """no""" +470 22 optimizer """adadelta""" +470 22 training_loop """lcwa""" +470 22 evaluator """rankbased""" +470 23 dataset """kinships""" +470 23 model """proje""" +470 23 loss """crossentropy""" +470 23 regularizer """no""" +470 23 optimizer """adadelta""" +470 23 training_loop """lcwa""" +470 23 evaluator """rankbased""" +470 24 dataset """kinships""" +470 24 model """proje""" +470 24 loss """crossentropy""" +470 24 regularizer """no""" +470 24 optimizer """adadelta""" +470 24 training_loop """lcwa""" +470 24 evaluator """rankbased""" +470 25 dataset """kinships""" +470 25 model """proje""" +470 25 loss """crossentropy""" +470 25 regularizer """no""" +470 25 optimizer """adadelta""" +470 25 training_loop """lcwa""" +470 25 evaluator """rankbased""" +470 26 dataset """kinships""" +470 26 model """proje""" +470 26 loss """crossentropy""" +470 26 regularizer """no""" +470 26 optimizer """adadelta""" +470 26 training_loop """lcwa""" +470 26 evaluator """rankbased""" +470 27 dataset """kinships""" +470 27 model """proje""" +470 27 loss """crossentropy""" +470 27 regularizer """no""" +470 27 optimizer """adadelta""" +470 27 training_loop """lcwa""" +470 27 evaluator """rankbased""" +470 28 dataset """kinships""" +470 28 model """proje""" +470 28 loss """crossentropy""" +470 28 regularizer """no""" +470 28 optimizer """adadelta""" +470 28 training_loop """lcwa""" +470 28 evaluator """rankbased""" +470 29 dataset """kinships""" +470 29 model """proje""" +470 29 loss """crossentropy""" +470 29 regularizer """no""" +470 29 optimizer """adadelta""" +470 29 training_loop """lcwa""" +470 29 evaluator """rankbased""" +470 30 dataset """kinships""" +470 30 model """proje""" +470 30 loss """crossentropy""" +470 30 regularizer """no""" +470 30 optimizer """adadelta""" +470 30 training_loop """lcwa""" +470 30 evaluator """rankbased""" +470 31 dataset """kinships""" +470 31 model """proje""" +470 31 loss """crossentropy""" +470 31 regularizer """no""" +470 31 optimizer """adadelta""" +470 31 training_loop """lcwa""" +470 31 evaluator """rankbased""" +470 32 dataset """kinships""" +470 32 model """proje""" +470 32 loss """crossentropy""" +470 32 regularizer """no""" +470 32 optimizer """adadelta""" +470 32 training_loop """lcwa""" +470 32 evaluator """rankbased""" +470 33 dataset """kinships""" +470 33 model """proje""" +470 33 loss """crossentropy""" +470 33 regularizer """no""" +470 33 optimizer """adadelta""" +470 33 training_loop """lcwa""" +470 33 evaluator """rankbased""" +470 34 dataset """kinships""" +470 34 model """proje""" +470 34 loss """crossentropy""" +470 34 regularizer """no""" +470 34 optimizer """adadelta""" +470 34 training_loop """lcwa""" +470 34 evaluator """rankbased""" +470 35 dataset """kinships""" +470 35 model """proje""" +470 35 loss """crossentropy""" +470 35 regularizer """no""" +470 35 optimizer """adadelta""" +470 35 training_loop """lcwa""" +470 35 evaluator """rankbased""" +470 36 dataset """kinships""" +470 36 model """proje""" +470 36 loss """crossentropy""" +470 36 regularizer """no""" +470 36 optimizer """adadelta""" +470 36 training_loop """lcwa""" +470 36 evaluator """rankbased""" +470 37 dataset """kinships""" +470 37 model """proje""" +470 37 loss """crossentropy""" +470 37 regularizer """no""" +470 37 optimizer """adadelta""" +470 37 training_loop """lcwa""" +470 37 evaluator """rankbased""" +470 38 dataset """kinships""" +470 38 model """proje""" +470 38 loss """crossentropy""" +470 38 regularizer """no""" +470 38 optimizer """adadelta""" +470 38 training_loop """lcwa""" +470 38 evaluator """rankbased""" +470 39 dataset """kinships""" +470 39 model """proje""" +470 39 loss """crossentropy""" +470 39 regularizer """no""" +470 39 optimizer """adadelta""" +470 39 training_loop """lcwa""" +470 39 evaluator """rankbased""" +470 40 dataset """kinships""" +470 40 model """proje""" +470 40 loss """crossentropy""" +470 40 regularizer """no""" +470 40 optimizer """adadelta""" +470 40 training_loop """lcwa""" +470 40 evaluator """rankbased""" +470 41 dataset """kinships""" +470 41 model """proje""" +470 41 loss """crossentropy""" +470 41 regularizer """no""" +470 41 optimizer """adadelta""" +470 41 training_loop """lcwa""" +470 41 evaluator """rankbased""" +470 42 dataset """kinships""" +470 42 model """proje""" +470 42 loss """crossentropy""" +470 42 regularizer """no""" +470 42 optimizer """adadelta""" +470 42 training_loop """lcwa""" +470 42 evaluator """rankbased""" +470 43 dataset """kinships""" +470 43 model """proje""" +470 43 loss """crossentropy""" +470 43 regularizer """no""" +470 43 optimizer """adadelta""" +470 43 training_loop """lcwa""" +470 43 evaluator """rankbased""" +470 44 dataset """kinships""" +470 44 model """proje""" +470 44 loss """crossentropy""" +470 44 regularizer """no""" +470 44 optimizer """adadelta""" +470 44 training_loop """lcwa""" +470 44 evaluator """rankbased""" +470 45 dataset """kinships""" +470 45 model """proje""" +470 45 loss """crossentropy""" +470 45 regularizer """no""" +470 45 optimizer """adadelta""" +470 45 training_loop """lcwa""" +470 45 evaluator """rankbased""" +470 46 dataset """kinships""" +470 46 model """proje""" +470 46 loss """crossentropy""" +470 46 regularizer """no""" +470 46 optimizer """adadelta""" +470 46 training_loop """lcwa""" +470 46 evaluator """rankbased""" +470 47 dataset """kinships""" +470 47 model """proje""" +470 47 loss """crossentropy""" +470 47 regularizer """no""" +470 47 optimizer """adadelta""" +470 47 training_loop """lcwa""" +470 47 evaluator """rankbased""" +470 48 dataset """kinships""" +470 48 model """proje""" +470 48 loss """crossentropy""" +470 48 regularizer """no""" +470 48 optimizer """adadelta""" +470 48 training_loop """lcwa""" +470 48 evaluator """rankbased""" +470 49 dataset """kinships""" +470 49 model """proje""" +470 49 loss """crossentropy""" +470 49 regularizer """no""" +470 49 optimizer """adadelta""" +470 49 training_loop """lcwa""" +470 49 evaluator """rankbased""" +470 50 dataset """kinships""" +470 50 model """proje""" +470 50 loss """crossentropy""" +470 50 regularizer """no""" +470 50 optimizer """adadelta""" +470 50 training_loop """lcwa""" +470 50 evaluator """rankbased""" +470 51 dataset """kinships""" +470 51 model """proje""" +470 51 loss """crossentropy""" +470 51 regularizer """no""" +470 51 optimizer """adadelta""" +470 51 training_loop """lcwa""" +470 51 evaluator """rankbased""" +470 52 dataset """kinships""" +470 52 model """proje""" +470 52 loss """crossentropy""" +470 52 regularizer """no""" +470 52 optimizer """adadelta""" +470 52 training_loop """lcwa""" +470 52 evaluator """rankbased""" +470 53 dataset """kinships""" +470 53 model """proje""" +470 53 loss """crossentropy""" +470 53 regularizer """no""" +470 53 optimizer """adadelta""" +470 53 training_loop """lcwa""" +470 53 evaluator """rankbased""" +470 54 dataset """kinships""" +470 54 model """proje""" +470 54 loss """crossentropy""" +470 54 regularizer """no""" +470 54 optimizer """adadelta""" +470 54 training_loop """lcwa""" +470 54 evaluator """rankbased""" +470 55 dataset """kinships""" +470 55 model """proje""" +470 55 loss """crossentropy""" +470 55 regularizer """no""" +470 55 optimizer """adadelta""" +470 55 training_loop """lcwa""" +470 55 evaluator """rankbased""" +470 56 dataset """kinships""" +470 56 model """proje""" +470 56 loss """crossentropy""" +470 56 regularizer """no""" +470 56 optimizer """adadelta""" +470 56 training_loop """lcwa""" +470 56 evaluator """rankbased""" +470 57 dataset """kinships""" +470 57 model """proje""" +470 57 loss """crossentropy""" +470 57 regularizer """no""" +470 57 optimizer """adadelta""" +470 57 training_loop """lcwa""" +470 57 evaluator """rankbased""" +470 58 dataset """kinships""" +470 58 model """proje""" +470 58 loss """crossentropy""" +470 58 regularizer """no""" +470 58 optimizer """adadelta""" +470 58 training_loop """lcwa""" +470 58 evaluator """rankbased""" +470 59 dataset """kinships""" +470 59 model """proje""" +470 59 loss """crossentropy""" +470 59 regularizer """no""" +470 59 optimizer """adadelta""" +470 59 training_loop """lcwa""" +470 59 evaluator """rankbased""" +470 60 dataset """kinships""" +470 60 model """proje""" +470 60 loss """crossentropy""" +470 60 regularizer """no""" +470 60 optimizer """adadelta""" +470 60 training_loop """lcwa""" +470 60 evaluator """rankbased""" +470 61 dataset """kinships""" +470 61 model """proje""" +470 61 loss """crossentropy""" +470 61 regularizer """no""" +470 61 optimizer """adadelta""" +470 61 training_loop """lcwa""" +470 61 evaluator """rankbased""" +470 62 dataset """kinships""" +470 62 model """proje""" +470 62 loss """crossentropy""" +470 62 regularizer """no""" +470 62 optimizer """adadelta""" +470 62 training_loop """lcwa""" +470 62 evaluator """rankbased""" +470 63 dataset """kinships""" +470 63 model """proje""" +470 63 loss """crossentropy""" +470 63 regularizer """no""" +470 63 optimizer """adadelta""" +470 63 training_loop """lcwa""" +470 63 evaluator """rankbased""" +470 64 dataset """kinships""" +470 64 model """proje""" +470 64 loss """crossentropy""" +470 64 regularizer """no""" +470 64 optimizer """adadelta""" +470 64 training_loop """lcwa""" +470 64 evaluator """rankbased""" +470 65 dataset """kinships""" +470 65 model """proje""" +470 65 loss """crossentropy""" +470 65 regularizer """no""" +470 65 optimizer """adadelta""" +470 65 training_loop """lcwa""" +470 65 evaluator """rankbased""" +470 66 dataset """kinships""" +470 66 model """proje""" +470 66 loss """crossentropy""" +470 66 regularizer """no""" +470 66 optimizer """adadelta""" +470 66 training_loop """lcwa""" +470 66 evaluator """rankbased""" +470 67 dataset """kinships""" +470 67 model """proje""" +470 67 loss """crossentropy""" +470 67 regularizer """no""" +470 67 optimizer """adadelta""" +470 67 training_loop """lcwa""" +470 67 evaluator """rankbased""" +470 68 dataset """kinships""" +470 68 model """proje""" +470 68 loss """crossentropy""" +470 68 regularizer """no""" +470 68 optimizer """adadelta""" +470 68 training_loop """lcwa""" +470 68 evaluator """rankbased""" +470 69 dataset """kinships""" +470 69 model """proje""" +470 69 loss """crossentropy""" +470 69 regularizer """no""" +470 69 optimizer """adadelta""" +470 69 training_loop """lcwa""" +470 69 evaluator """rankbased""" +470 70 dataset """kinships""" +470 70 model """proje""" +470 70 loss """crossentropy""" +470 70 regularizer """no""" +470 70 optimizer """adadelta""" +470 70 training_loop """lcwa""" +470 70 evaluator """rankbased""" +470 71 dataset """kinships""" +470 71 model """proje""" +470 71 loss """crossentropy""" +470 71 regularizer """no""" +470 71 optimizer """adadelta""" +470 71 training_loop """lcwa""" +470 71 evaluator """rankbased""" +470 72 dataset """kinships""" +470 72 model """proje""" +470 72 loss """crossentropy""" +470 72 regularizer """no""" +470 72 optimizer """adadelta""" +470 72 training_loop """lcwa""" +470 72 evaluator """rankbased""" +470 73 dataset """kinships""" +470 73 model """proje""" +470 73 loss """crossentropy""" +470 73 regularizer """no""" +470 73 optimizer """adadelta""" +470 73 training_loop """lcwa""" +470 73 evaluator """rankbased""" +470 74 dataset """kinships""" +470 74 model """proje""" +470 74 loss """crossentropy""" +470 74 regularizer """no""" +470 74 optimizer """adadelta""" +470 74 training_loop """lcwa""" +470 74 evaluator """rankbased""" +470 75 dataset """kinships""" +470 75 model """proje""" +470 75 loss """crossentropy""" +470 75 regularizer """no""" +470 75 optimizer """adadelta""" +470 75 training_loop """lcwa""" +470 75 evaluator """rankbased""" +470 76 dataset """kinships""" +470 76 model """proje""" +470 76 loss """crossentropy""" +470 76 regularizer """no""" +470 76 optimizer """adadelta""" +470 76 training_loop """lcwa""" +470 76 evaluator """rankbased""" +470 77 dataset """kinships""" +470 77 model """proje""" +470 77 loss """crossentropy""" +470 77 regularizer """no""" +470 77 optimizer """adadelta""" +470 77 training_loop """lcwa""" +470 77 evaluator """rankbased""" +470 78 dataset """kinships""" +470 78 model """proje""" +470 78 loss """crossentropy""" +470 78 regularizer """no""" +470 78 optimizer """adadelta""" +470 78 training_loop """lcwa""" +470 78 evaluator """rankbased""" +470 79 dataset """kinships""" +470 79 model """proje""" +470 79 loss """crossentropy""" +470 79 regularizer """no""" +470 79 optimizer """adadelta""" +470 79 training_loop """lcwa""" +470 79 evaluator """rankbased""" +470 80 dataset """kinships""" +470 80 model """proje""" +470 80 loss """crossentropy""" +470 80 regularizer """no""" +470 80 optimizer """adadelta""" +470 80 training_loop """lcwa""" +470 80 evaluator """rankbased""" +470 81 dataset """kinships""" +470 81 model """proje""" +470 81 loss """crossentropy""" +470 81 regularizer """no""" +470 81 optimizer """adadelta""" +470 81 training_loop """lcwa""" +470 81 evaluator """rankbased""" +470 82 dataset """kinships""" +470 82 model """proje""" +470 82 loss """crossentropy""" +470 82 regularizer """no""" +470 82 optimizer """adadelta""" +470 82 training_loop """lcwa""" +470 82 evaluator """rankbased""" +470 83 dataset """kinships""" +470 83 model """proje""" +470 83 loss """crossentropy""" +470 83 regularizer """no""" +470 83 optimizer """adadelta""" +470 83 training_loop """lcwa""" +470 83 evaluator """rankbased""" +470 84 dataset """kinships""" +470 84 model """proje""" +470 84 loss """crossentropy""" +470 84 regularizer """no""" +470 84 optimizer """adadelta""" +470 84 training_loop """lcwa""" +470 84 evaluator """rankbased""" +470 85 dataset """kinships""" +470 85 model """proje""" +470 85 loss """crossentropy""" +470 85 regularizer """no""" +470 85 optimizer """adadelta""" +470 85 training_loop """lcwa""" +470 85 evaluator """rankbased""" +470 86 dataset """kinships""" +470 86 model """proje""" +470 86 loss """crossentropy""" +470 86 regularizer """no""" +470 86 optimizer """adadelta""" +470 86 training_loop """lcwa""" +470 86 evaluator """rankbased""" +470 87 dataset """kinships""" +470 87 model """proje""" +470 87 loss """crossentropy""" +470 87 regularizer """no""" +470 87 optimizer """adadelta""" +470 87 training_loop """lcwa""" +470 87 evaluator """rankbased""" +470 88 dataset """kinships""" +470 88 model """proje""" +470 88 loss """crossentropy""" +470 88 regularizer """no""" +470 88 optimizer """adadelta""" +470 88 training_loop """lcwa""" +470 88 evaluator """rankbased""" +470 89 dataset """kinships""" +470 89 model """proje""" +470 89 loss """crossentropy""" +470 89 regularizer """no""" +470 89 optimizer """adadelta""" +470 89 training_loop """lcwa""" +470 89 evaluator """rankbased""" +470 90 dataset """kinships""" +470 90 model """proje""" +470 90 loss """crossentropy""" +470 90 regularizer """no""" +470 90 optimizer """adadelta""" +470 90 training_loop """lcwa""" +470 90 evaluator """rankbased""" +470 91 dataset """kinships""" +470 91 model """proje""" +470 91 loss """crossentropy""" +470 91 regularizer """no""" +470 91 optimizer """adadelta""" +470 91 training_loop """lcwa""" +470 91 evaluator """rankbased""" +470 92 dataset """kinships""" +470 92 model """proje""" +470 92 loss """crossentropy""" +470 92 regularizer """no""" +470 92 optimizer """adadelta""" +470 92 training_loop """lcwa""" +470 92 evaluator """rankbased""" +470 93 dataset """kinships""" +470 93 model """proje""" +470 93 loss """crossentropy""" +470 93 regularizer """no""" +470 93 optimizer """adadelta""" +470 93 training_loop """lcwa""" +470 93 evaluator """rankbased""" +470 94 dataset """kinships""" +470 94 model """proje""" +470 94 loss """crossentropy""" +470 94 regularizer """no""" +470 94 optimizer """adadelta""" +470 94 training_loop """lcwa""" +470 94 evaluator """rankbased""" +470 95 dataset """kinships""" +470 95 model """proje""" +470 95 loss """crossentropy""" +470 95 regularizer """no""" +470 95 optimizer """adadelta""" +470 95 training_loop """lcwa""" +470 95 evaluator """rankbased""" +470 96 dataset """kinships""" +470 96 model """proje""" +470 96 loss """crossentropy""" +470 96 regularizer """no""" +470 96 optimizer """adadelta""" +470 96 training_loop """lcwa""" +470 96 evaluator """rankbased""" +470 97 dataset """kinships""" +470 97 model """proje""" +470 97 loss """crossentropy""" +470 97 regularizer """no""" +470 97 optimizer """adadelta""" +470 97 training_loop """lcwa""" +470 97 evaluator """rankbased""" +470 98 dataset """kinships""" +470 98 model """proje""" +470 98 loss """crossentropy""" +470 98 regularizer """no""" +470 98 optimizer """adadelta""" +470 98 training_loop """lcwa""" +470 98 evaluator """rankbased""" +470 99 dataset """kinships""" +470 99 model """proje""" +470 99 loss """crossentropy""" +470 99 regularizer """no""" +470 99 optimizer """adadelta""" +470 99 training_loop """lcwa""" +470 99 evaluator """rankbased""" +470 100 dataset """kinships""" +470 100 model """proje""" +470 100 loss """crossentropy""" +470 100 regularizer """no""" +470 100 optimizer """adadelta""" +470 100 training_loop """lcwa""" +470 100 evaluator """rankbased""" +471 1 model.embedding_dim 0.0 +471 1 loss.margin 1.1924140524066977 +471 1 negative_sampler.num_negs_per_pos 20.0 +471 1 training.batch_size 1.0 +471 2 model.embedding_dim 2.0 +471 2 loss.margin 4.6564550842399335 +471 2 negative_sampler.num_negs_per_pos 96.0 +471 2 training.batch_size 0.0 +471 3 model.embedding_dim 0.0 +471 3 loss.margin 4.3544928433732 +471 3 negative_sampler.num_negs_per_pos 33.0 +471 3 training.batch_size 2.0 +471 4 model.embedding_dim 2.0 +471 4 loss.margin 4.350669201904798 +471 4 negative_sampler.num_negs_per_pos 99.0 +471 4 training.batch_size 1.0 +471 5 model.embedding_dim 0.0 +471 5 loss.margin 4.964151873019224 +471 5 negative_sampler.num_negs_per_pos 9.0 +471 5 training.batch_size 2.0 +471 6 model.embedding_dim 1.0 +471 6 loss.margin 4.028892903379759 +471 6 negative_sampler.num_negs_per_pos 7.0 +471 6 training.batch_size 1.0 +471 7 model.embedding_dim 0.0 +471 7 loss.margin 2.061992642093416 +471 7 negative_sampler.num_negs_per_pos 64.0 +471 7 training.batch_size 2.0 +471 8 model.embedding_dim 0.0 +471 8 loss.margin 2.014691461289627 +471 8 negative_sampler.num_negs_per_pos 21.0 +471 8 training.batch_size 1.0 +471 9 model.embedding_dim 0.0 +471 9 loss.margin 2.8990220567884366 +471 9 negative_sampler.num_negs_per_pos 41.0 +471 9 training.batch_size 2.0 +471 10 model.embedding_dim 0.0 +471 10 loss.margin 3.9793459566045577 +471 10 negative_sampler.num_negs_per_pos 59.0 +471 10 training.batch_size 0.0 +471 11 model.embedding_dim 1.0 +471 11 loss.margin 5.624704308074544 +471 11 negative_sampler.num_negs_per_pos 45.0 +471 11 training.batch_size 2.0 +471 12 model.embedding_dim 1.0 +471 12 loss.margin 4.068915988084284 +471 12 negative_sampler.num_negs_per_pos 86.0 +471 12 training.batch_size 0.0 +471 13 model.embedding_dim 0.0 +471 13 loss.margin 6.402830352024773 +471 13 negative_sampler.num_negs_per_pos 54.0 +471 13 training.batch_size 0.0 +471 14 model.embedding_dim 2.0 +471 14 loss.margin 9.572528176200867 +471 14 negative_sampler.num_negs_per_pos 23.0 +471 14 training.batch_size 1.0 +471 15 model.embedding_dim 1.0 +471 15 loss.margin 4.847830220504916 +471 15 negative_sampler.num_negs_per_pos 91.0 +471 15 training.batch_size 2.0 +471 16 model.embedding_dim 1.0 +471 16 loss.margin 7.285728472298497 +471 16 negative_sampler.num_negs_per_pos 7.0 +471 16 training.batch_size 0.0 +471 17 model.embedding_dim 0.0 +471 17 loss.margin 8.09539680345264 +471 17 negative_sampler.num_negs_per_pos 84.0 +471 17 training.batch_size 1.0 +471 18 model.embedding_dim 0.0 +471 18 loss.margin 2.144944478590171 +471 18 negative_sampler.num_negs_per_pos 66.0 +471 18 training.batch_size 2.0 +471 19 model.embedding_dim 1.0 +471 19 loss.margin 5.378194566412024 +471 19 negative_sampler.num_negs_per_pos 91.0 +471 19 training.batch_size 2.0 +471 20 model.embedding_dim 0.0 +471 20 loss.margin 1.2116685469862736 +471 20 negative_sampler.num_negs_per_pos 30.0 +471 20 training.batch_size 1.0 +471 21 model.embedding_dim 1.0 +471 21 loss.margin 2.530894213639775 +471 21 negative_sampler.num_negs_per_pos 99.0 +471 21 training.batch_size 2.0 +471 22 model.embedding_dim 2.0 +471 22 loss.margin 9.793135814221465 +471 22 negative_sampler.num_negs_per_pos 22.0 +471 22 training.batch_size 2.0 +471 23 model.embedding_dim 1.0 +471 23 loss.margin 1.3535576204882334 +471 23 negative_sampler.num_negs_per_pos 14.0 +471 23 training.batch_size 1.0 +471 24 model.embedding_dim 2.0 +471 24 loss.margin 8.01380232118794 +471 24 negative_sampler.num_negs_per_pos 43.0 +471 24 training.batch_size 0.0 +471 25 model.embedding_dim 2.0 +471 25 loss.margin 9.257342740465319 +471 25 negative_sampler.num_negs_per_pos 20.0 +471 25 training.batch_size 0.0 +471 26 model.embedding_dim 2.0 +471 26 loss.margin 2.112224789759084 +471 26 negative_sampler.num_negs_per_pos 38.0 +471 26 training.batch_size 1.0 +471 27 model.embedding_dim 2.0 +471 27 loss.margin 7.251738508737825 +471 27 negative_sampler.num_negs_per_pos 29.0 +471 27 training.batch_size 1.0 +471 28 model.embedding_dim 1.0 +471 28 loss.margin 3.475721506127001 +471 28 negative_sampler.num_negs_per_pos 82.0 +471 28 training.batch_size 1.0 +471 29 model.embedding_dim 2.0 +471 29 loss.margin 3.0479629847478225 +471 29 negative_sampler.num_negs_per_pos 95.0 +471 29 training.batch_size 2.0 +471 30 model.embedding_dim 0.0 +471 30 loss.margin 5.924992501090191 +471 30 negative_sampler.num_negs_per_pos 90.0 +471 30 training.batch_size 0.0 +471 31 model.embedding_dim 0.0 +471 31 loss.margin 3.935987662294689 +471 31 negative_sampler.num_negs_per_pos 92.0 +471 31 training.batch_size 0.0 +471 32 model.embedding_dim 0.0 +471 32 loss.margin 1.791059303803564 +471 32 negative_sampler.num_negs_per_pos 39.0 +471 32 training.batch_size 0.0 +471 33 model.embedding_dim 0.0 +471 33 loss.margin 7.391689538510514 +471 33 negative_sampler.num_negs_per_pos 15.0 +471 33 training.batch_size 2.0 +471 34 model.embedding_dim 2.0 +471 34 loss.margin 6.584323519458111 +471 34 negative_sampler.num_negs_per_pos 13.0 +471 34 training.batch_size 0.0 +471 35 model.embedding_dim 2.0 +471 35 loss.margin 5.449842725454868 +471 35 negative_sampler.num_negs_per_pos 91.0 +471 35 training.batch_size 1.0 +471 36 model.embedding_dim 0.0 +471 36 loss.margin 1.2513135149055337 +471 36 negative_sampler.num_negs_per_pos 68.0 +471 36 training.batch_size 2.0 +471 37 model.embedding_dim 1.0 +471 37 loss.margin 6.732596373565519 +471 37 negative_sampler.num_negs_per_pos 57.0 +471 37 training.batch_size 0.0 +471 38 model.embedding_dim 1.0 +471 38 loss.margin 0.5736378521850228 +471 38 negative_sampler.num_negs_per_pos 71.0 +471 38 training.batch_size 2.0 +471 39 model.embedding_dim 2.0 +471 39 loss.margin 6.313427341971467 +471 39 negative_sampler.num_negs_per_pos 85.0 +471 39 training.batch_size 0.0 +471 40 model.embedding_dim 1.0 +471 40 loss.margin 2.409477379179758 +471 40 negative_sampler.num_negs_per_pos 3.0 +471 40 training.batch_size 1.0 +471 41 model.embedding_dim 2.0 +471 41 loss.margin 4.792397952485287 +471 41 negative_sampler.num_negs_per_pos 67.0 +471 41 training.batch_size 1.0 +471 42 model.embedding_dim 1.0 +471 42 loss.margin 8.113602970671327 +471 42 negative_sampler.num_negs_per_pos 97.0 +471 42 training.batch_size 2.0 +471 43 model.embedding_dim 0.0 +471 43 loss.margin 6.177764060937658 +471 43 negative_sampler.num_negs_per_pos 26.0 +471 43 training.batch_size 2.0 +471 44 model.embedding_dim 2.0 +471 44 loss.margin 7.9008038150560935 +471 44 negative_sampler.num_negs_per_pos 77.0 +471 44 training.batch_size 0.0 +471 45 model.embedding_dim 2.0 +471 45 loss.margin 5.639823531486528 +471 45 negative_sampler.num_negs_per_pos 74.0 +471 45 training.batch_size 0.0 +471 46 model.embedding_dim 1.0 +471 46 loss.margin 9.459347816887059 +471 46 negative_sampler.num_negs_per_pos 32.0 +471 46 training.batch_size 1.0 +471 47 model.embedding_dim 1.0 +471 47 loss.margin 6.262675025367255 +471 47 negative_sampler.num_negs_per_pos 62.0 +471 47 training.batch_size 1.0 +471 48 model.embedding_dim 2.0 +471 48 loss.margin 2.949287451153496 +471 48 negative_sampler.num_negs_per_pos 77.0 +471 48 training.batch_size 0.0 +471 49 model.embedding_dim 2.0 +471 49 loss.margin 6.117734820313983 +471 49 negative_sampler.num_negs_per_pos 41.0 +471 49 training.batch_size 2.0 +471 50 model.embedding_dim 2.0 +471 50 loss.margin 6.629242909290289 +471 50 negative_sampler.num_negs_per_pos 44.0 +471 50 training.batch_size 1.0 +471 51 model.embedding_dim 1.0 +471 51 loss.margin 9.093603304745429 +471 51 negative_sampler.num_negs_per_pos 68.0 +471 51 training.batch_size 1.0 +471 52 model.embedding_dim 2.0 +471 52 loss.margin 5.227593564711337 +471 52 negative_sampler.num_negs_per_pos 37.0 +471 52 training.batch_size 0.0 +471 53 model.embedding_dim 0.0 +471 53 loss.margin 1.6327814366034041 +471 53 negative_sampler.num_negs_per_pos 36.0 +471 53 training.batch_size 2.0 +471 54 model.embedding_dim 0.0 +471 54 loss.margin 8.396532075546915 +471 54 negative_sampler.num_negs_per_pos 44.0 +471 54 training.batch_size 1.0 +471 55 model.embedding_dim 2.0 +471 55 loss.margin 8.135253205200735 +471 55 negative_sampler.num_negs_per_pos 4.0 +471 55 training.batch_size 1.0 +471 56 model.embedding_dim 2.0 +471 56 loss.margin 1.8921040372698872 +471 56 negative_sampler.num_negs_per_pos 95.0 +471 56 training.batch_size 1.0 +471 57 model.embedding_dim 1.0 +471 57 loss.margin 5.699202917766201 +471 57 negative_sampler.num_negs_per_pos 79.0 +471 57 training.batch_size 0.0 +471 58 model.embedding_dim 2.0 +471 58 loss.margin 3.552324507070904 +471 58 negative_sampler.num_negs_per_pos 31.0 +471 58 training.batch_size 0.0 +471 59 model.embedding_dim 1.0 +471 59 loss.margin 3.211042719922813 +471 59 negative_sampler.num_negs_per_pos 29.0 +471 59 training.batch_size 0.0 +471 60 model.embedding_dim 0.0 +471 60 loss.margin 0.5486527831028019 +471 60 negative_sampler.num_negs_per_pos 68.0 +471 60 training.batch_size 2.0 +471 61 model.embedding_dim 0.0 +471 61 loss.margin 5.938195464323279 +471 61 negative_sampler.num_negs_per_pos 1.0 +471 61 training.batch_size 0.0 +471 62 model.embedding_dim 1.0 +471 62 loss.margin 1.9809837045191736 +471 62 negative_sampler.num_negs_per_pos 84.0 +471 62 training.batch_size 1.0 +471 63 model.embedding_dim 0.0 +471 63 loss.margin 8.790099113189465 +471 63 negative_sampler.num_negs_per_pos 88.0 +471 63 training.batch_size 2.0 +471 64 model.embedding_dim 0.0 +471 64 loss.margin 0.5262730910692969 +471 64 negative_sampler.num_negs_per_pos 4.0 +471 64 training.batch_size 2.0 +471 65 model.embedding_dim 2.0 +471 65 loss.margin 7.898824189228088 +471 65 negative_sampler.num_negs_per_pos 63.0 +471 65 training.batch_size 0.0 +471 66 model.embedding_dim 0.0 +471 66 loss.margin 6.6927336473413135 +471 66 negative_sampler.num_negs_per_pos 73.0 +471 66 training.batch_size 2.0 +471 67 model.embedding_dim 0.0 +471 67 loss.margin 1.2864284844015454 +471 67 negative_sampler.num_negs_per_pos 63.0 +471 67 training.batch_size 1.0 +471 68 model.embedding_dim 2.0 +471 68 loss.margin 7.221239526314706 +471 68 negative_sampler.num_negs_per_pos 17.0 +471 68 training.batch_size 1.0 +471 69 model.embedding_dim 1.0 +471 69 loss.margin 3.9634985023402525 +471 69 negative_sampler.num_negs_per_pos 12.0 +471 69 training.batch_size 1.0 +471 70 model.embedding_dim 1.0 +471 70 loss.margin 8.3111877360525 +471 70 negative_sampler.num_negs_per_pos 94.0 +471 70 training.batch_size 1.0 +471 71 model.embedding_dim 2.0 +471 71 loss.margin 3.605866621204492 +471 71 negative_sampler.num_negs_per_pos 85.0 +471 71 training.batch_size 2.0 +471 72 model.embedding_dim 1.0 +471 72 loss.margin 8.46392904397754 +471 72 negative_sampler.num_negs_per_pos 27.0 +471 72 training.batch_size 2.0 +471 73 model.embedding_dim 1.0 +471 73 loss.margin 9.621835115055289 +471 73 negative_sampler.num_negs_per_pos 71.0 +471 73 training.batch_size 0.0 +471 74 model.embedding_dim 2.0 +471 74 loss.margin 2.834669581031977 +471 74 negative_sampler.num_negs_per_pos 76.0 +471 74 training.batch_size 2.0 +471 75 model.embedding_dim 1.0 +471 75 loss.margin 2.4895751995368034 +471 75 negative_sampler.num_negs_per_pos 41.0 +471 75 training.batch_size 1.0 +471 76 model.embedding_dim 0.0 +471 76 loss.margin 4.878206367654264 +471 76 negative_sampler.num_negs_per_pos 58.0 +471 76 training.batch_size 1.0 +471 77 model.embedding_dim 0.0 +471 77 loss.margin 7.379228950116959 +471 77 negative_sampler.num_negs_per_pos 26.0 +471 77 training.batch_size 0.0 +471 78 model.embedding_dim 2.0 +471 78 loss.margin 4.280331166672717 +471 78 negative_sampler.num_negs_per_pos 47.0 +471 78 training.batch_size 2.0 +471 79 model.embedding_dim 1.0 +471 79 loss.margin 2.169195727787237 +471 79 negative_sampler.num_negs_per_pos 25.0 +471 79 training.batch_size 0.0 +471 80 model.embedding_dim 2.0 +471 80 loss.margin 9.731428208326003 +471 80 negative_sampler.num_negs_per_pos 11.0 +471 80 training.batch_size 2.0 +471 81 model.embedding_dim 0.0 +471 81 loss.margin 9.546767748000457 +471 81 negative_sampler.num_negs_per_pos 18.0 +471 81 training.batch_size 1.0 +471 82 model.embedding_dim 2.0 +471 82 loss.margin 1.9989389029181401 +471 82 negative_sampler.num_negs_per_pos 94.0 +471 82 training.batch_size 0.0 +471 83 model.embedding_dim 0.0 +471 83 loss.margin 3.2809491876252577 +471 83 negative_sampler.num_negs_per_pos 2.0 +471 83 training.batch_size 2.0 +471 84 model.embedding_dim 1.0 +471 84 loss.margin 3.320033697576193 +471 84 negative_sampler.num_negs_per_pos 52.0 +471 84 training.batch_size 1.0 +471 85 model.embedding_dim 2.0 +471 85 loss.margin 4.32158365120562 +471 85 negative_sampler.num_negs_per_pos 42.0 +471 85 training.batch_size 0.0 +471 86 model.embedding_dim 0.0 +471 86 loss.margin 6.9705249575929935 +471 86 negative_sampler.num_negs_per_pos 52.0 +471 86 training.batch_size 2.0 +471 87 model.embedding_dim 1.0 +471 87 loss.margin 3.3083631554712625 +471 87 negative_sampler.num_negs_per_pos 64.0 +471 87 training.batch_size 0.0 +471 88 model.embedding_dim 0.0 +471 88 loss.margin 8.870588287740654 +471 88 negative_sampler.num_negs_per_pos 79.0 +471 88 training.batch_size 0.0 +471 89 model.embedding_dim 0.0 +471 89 loss.margin 0.936768604596868 +471 89 negative_sampler.num_negs_per_pos 37.0 +471 89 training.batch_size 1.0 +471 90 model.embedding_dim 2.0 +471 90 loss.margin 8.484059237740802 +471 90 negative_sampler.num_negs_per_pos 41.0 +471 90 training.batch_size 2.0 +471 91 model.embedding_dim 2.0 +471 91 loss.margin 5.976766354141586 +471 91 negative_sampler.num_negs_per_pos 5.0 +471 91 training.batch_size 0.0 +471 92 model.embedding_dim 1.0 +471 92 loss.margin 2.002074740131106 +471 92 negative_sampler.num_negs_per_pos 22.0 +471 92 training.batch_size 1.0 +471 93 model.embedding_dim 1.0 +471 93 loss.margin 8.30367177472776 +471 93 negative_sampler.num_negs_per_pos 73.0 +471 93 training.batch_size 1.0 +471 94 model.embedding_dim 0.0 +471 94 loss.margin 9.54842753340414 +471 94 negative_sampler.num_negs_per_pos 76.0 +471 94 training.batch_size 2.0 +471 95 model.embedding_dim 0.0 +471 95 loss.margin 3.6891107401549212 +471 95 negative_sampler.num_negs_per_pos 79.0 +471 95 training.batch_size 2.0 +471 96 model.embedding_dim 0.0 +471 96 loss.margin 3.1514961732695888 +471 96 negative_sampler.num_negs_per_pos 13.0 +471 96 training.batch_size 1.0 +471 97 model.embedding_dim 0.0 +471 97 loss.margin 6.221736691907197 +471 97 negative_sampler.num_negs_per_pos 54.0 +471 97 training.batch_size 0.0 +471 98 model.embedding_dim 0.0 +471 98 loss.margin 8.02849189414139 +471 98 negative_sampler.num_negs_per_pos 56.0 +471 98 training.batch_size 0.0 +471 99 model.embedding_dim 1.0 +471 99 loss.margin 6.234137773061979 +471 99 negative_sampler.num_negs_per_pos 14.0 +471 99 training.batch_size 0.0 +471 100 model.embedding_dim 2.0 +471 100 loss.margin 1.850193845356026 +471 100 negative_sampler.num_negs_per_pos 85.0 +471 100 training.batch_size 2.0 +471 1 dataset """kinships""" +471 1 model """proje""" +471 1 loss """marginranking""" +471 1 regularizer """no""" +471 1 optimizer """adadelta""" +471 1 training_loop """owa""" +471 1 negative_sampler """basic""" +471 1 evaluator """rankbased""" +471 2 dataset """kinships""" +471 2 model """proje""" +471 2 loss """marginranking""" +471 2 regularizer """no""" +471 2 optimizer """adadelta""" +471 2 training_loop """owa""" +471 2 negative_sampler """basic""" +471 2 evaluator """rankbased""" +471 3 dataset """kinships""" +471 3 model """proje""" +471 3 loss """marginranking""" +471 3 regularizer """no""" +471 3 optimizer """adadelta""" +471 3 training_loop """owa""" +471 3 negative_sampler """basic""" +471 3 evaluator """rankbased""" +471 4 dataset """kinships""" +471 4 model """proje""" +471 4 loss """marginranking""" +471 4 regularizer """no""" +471 4 optimizer """adadelta""" +471 4 training_loop """owa""" +471 4 negative_sampler """basic""" +471 4 evaluator """rankbased""" +471 5 dataset """kinships""" +471 5 model """proje""" +471 5 loss """marginranking""" +471 5 regularizer """no""" +471 5 optimizer """adadelta""" +471 5 training_loop """owa""" +471 5 negative_sampler """basic""" +471 5 evaluator """rankbased""" +471 6 dataset """kinships""" +471 6 model """proje""" +471 6 loss """marginranking""" +471 6 regularizer """no""" +471 6 optimizer """adadelta""" +471 6 training_loop """owa""" +471 6 negative_sampler """basic""" +471 6 evaluator """rankbased""" +471 7 dataset """kinships""" +471 7 model """proje""" +471 7 loss """marginranking""" +471 7 regularizer """no""" +471 7 optimizer """adadelta""" +471 7 training_loop """owa""" +471 7 negative_sampler """basic""" +471 7 evaluator """rankbased""" +471 8 dataset """kinships""" +471 8 model """proje""" +471 8 loss """marginranking""" +471 8 regularizer """no""" +471 8 optimizer """adadelta""" +471 8 training_loop """owa""" +471 8 negative_sampler """basic""" +471 8 evaluator """rankbased""" +471 9 dataset """kinships""" +471 9 model """proje""" +471 9 loss """marginranking""" +471 9 regularizer """no""" +471 9 optimizer """adadelta""" +471 9 training_loop """owa""" +471 9 negative_sampler """basic""" +471 9 evaluator """rankbased""" +471 10 dataset """kinships""" +471 10 model """proje""" +471 10 loss """marginranking""" +471 10 regularizer """no""" +471 10 optimizer """adadelta""" +471 10 training_loop """owa""" +471 10 negative_sampler """basic""" +471 10 evaluator """rankbased""" +471 11 dataset """kinships""" +471 11 model """proje""" +471 11 loss """marginranking""" +471 11 regularizer """no""" +471 11 optimizer """adadelta""" +471 11 training_loop """owa""" +471 11 negative_sampler """basic""" +471 11 evaluator """rankbased""" +471 12 dataset """kinships""" +471 12 model """proje""" +471 12 loss """marginranking""" +471 12 regularizer """no""" +471 12 optimizer """adadelta""" +471 12 training_loop """owa""" +471 12 negative_sampler """basic""" +471 12 evaluator """rankbased""" +471 13 dataset """kinships""" +471 13 model """proje""" +471 13 loss """marginranking""" +471 13 regularizer """no""" +471 13 optimizer """adadelta""" +471 13 training_loop """owa""" +471 13 negative_sampler """basic""" +471 13 evaluator """rankbased""" +471 14 dataset """kinships""" +471 14 model """proje""" +471 14 loss """marginranking""" +471 14 regularizer """no""" +471 14 optimizer """adadelta""" +471 14 training_loop """owa""" +471 14 negative_sampler """basic""" +471 14 evaluator """rankbased""" +471 15 dataset """kinships""" +471 15 model """proje""" +471 15 loss """marginranking""" +471 15 regularizer """no""" +471 15 optimizer """adadelta""" +471 15 training_loop """owa""" +471 15 negative_sampler """basic""" +471 15 evaluator """rankbased""" +471 16 dataset """kinships""" +471 16 model """proje""" +471 16 loss """marginranking""" +471 16 regularizer """no""" +471 16 optimizer """adadelta""" +471 16 training_loop """owa""" +471 16 negative_sampler """basic""" +471 16 evaluator """rankbased""" +471 17 dataset """kinships""" +471 17 model """proje""" +471 17 loss """marginranking""" +471 17 regularizer """no""" +471 17 optimizer """adadelta""" +471 17 training_loop """owa""" +471 17 negative_sampler """basic""" +471 17 evaluator """rankbased""" +471 18 dataset """kinships""" +471 18 model """proje""" +471 18 loss """marginranking""" +471 18 regularizer """no""" +471 18 optimizer """adadelta""" +471 18 training_loop """owa""" +471 18 negative_sampler """basic""" +471 18 evaluator """rankbased""" +471 19 dataset """kinships""" +471 19 model """proje""" +471 19 loss """marginranking""" +471 19 regularizer """no""" +471 19 optimizer """adadelta""" +471 19 training_loop """owa""" +471 19 negative_sampler """basic""" +471 19 evaluator """rankbased""" +471 20 dataset """kinships""" +471 20 model """proje""" +471 20 loss """marginranking""" +471 20 regularizer """no""" +471 20 optimizer """adadelta""" +471 20 training_loop """owa""" +471 20 negative_sampler """basic""" +471 20 evaluator """rankbased""" +471 21 dataset """kinships""" +471 21 model """proje""" +471 21 loss """marginranking""" +471 21 regularizer """no""" +471 21 optimizer """adadelta""" +471 21 training_loop """owa""" +471 21 negative_sampler """basic""" +471 21 evaluator """rankbased""" +471 22 dataset """kinships""" +471 22 model """proje""" +471 22 loss """marginranking""" +471 22 regularizer """no""" +471 22 optimizer """adadelta""" +471 22 training_loop """owa""" +471 22 negative_sampler """basic""" +471 22 evaluator """rankbased""" +471 23 dataset """kinships""" +471 23 model """proje""" +471 23 loss """marginranking""" +471 23 regularizer """no""" +471 23 optimizer """adadelta""" +471 23 training_loop """owa""" +471 23 negative_sampler """basic""" +471 23 evaluator """rankbased""" +471 24 dataset """kinships""" +471 24 model """proje""" +471 24 loss """marginranking""" +471 24 regularizer """no""" +471 24 optimizer """adadelta""" +471 24 training_loop """owa""" +471 24 negative_sampler """basic""" +471 24 evaluator """rankbased""" +471 25 dataset """kinships""" +471 25 model """proje""" +471 25 loss """marginranking""" +471 25 regularizer """no""" +471 25 optimizer """adadelta""" +471 25 training_loop """owa""" +471 25 negative_sampler """basic""" +471 25 evaluator """rankbased""" +471 26 dataset """kinships""" +471 26 model """proje""" +471 26 loss """marginranking""" +471 26 regularizer """no""" +471 26 optimizer """adadelta""" +471 26 training_loop """owa""" +471 26 negative_sampler """basic""" +471 26 evaluator """rankbased""" +471 27 dataset """kinships""" +471 27 model """proje""" +471 27 loss """marginranking""" +471 27 regularizer """no""" +471 27 optimizer """adadelta""" +471 27 training_loop """owa""" +471 27 negative_sampler """basic""" +471 27 evaluator """rankbased""" +471 28 dataset """kinships""" +471 28 model """proje""" +471 28 loss """marginranking""" +471 28 regularizer """no""" +471 28 optimizer """adadelta""" +471 28 training_loop """owa""" +471 28 negative_sampler """basic""" +471 28 evaluator """rankbased""" +471 29 dataset """kinships""" +471 29 model """proje""" +471 29 loss """marginranking""" +471 29 regularizer """no""" +471 29 optimizer """adadelta""" +471 29 training_loop """owa""" +471 29 negative_sampler """basic""" +471 29 evaluator """rankbased""" +471 30 dataset """kinships""" +471 30 model """proje""" +471 30 loss """marginranking""" +471 30 regularizer """no""" +471 30 optimizer """adadelta""" +471 30 training_loop """owa""" +471 30 negative_sampler """basic""" +471 30 evaluator """rankbased""" +471 31 dataset """kinships""" +471 31 model """proje""" +471 31 loss """marginranking""" +471 31 regularizer """no""" +471 31 optimizer """adadelta""" +471 31 training_loop """owa""" +471 31 negative_sampler """basic""" +471 31 evaluator """rankbased""" +471 32 dataset """kinships""" +471 32 model """proje""" +471 32 loss """marginranking""" +471 32 regularizer """no""" +471 32 optimizer """adadelta""" +471 32 training_loop """owa""" +471 32 negative_sampler """basic""" +471 32 evaluator """rankbased""" +471 33 dataset """kinships""" +471 33 model """proje""" +471 33 loss """marginranking""" +471 33 regularizer """no""" +471 33 optimizer """adadelta""" +471 33 training_loop """owa""" +471 33 negative_sampler """basic""" +471 33 evaluator """rankbased""" +471 34 dataset """kinships""" +471 34 model """proje""" +471 34 loss """marginranking""" +471 34 regularizer """no""" +471 34 optimizer """adadelta""" +471 34 training_loop """owa""" +471 34 negative_sampler """basic""" +471 34 evaluator """rankbased""" +471 35 dataset """kinships""" +471 35 model """proje""" +471 35 loss """marginranking""" +471 35 regularizer """no""" +471 35 optimizer """adadelta""" +471 35 training_loop """owa""" +471 35 negative_sampler """basic""" +471 35 evaluator """rankbased""" +471 36 dataset """kinships""" +471 36 model """proje""" +471 36 loss """marginranking""" +471 36 regularizer """no""" +471 36 optimizer """adadelta""" +471 36 training_loop """owa""" +471 36 negative_sampler """basic""" +471 36 evaluator """rankbased""" +471 37 dataset """kinships""" +471 37 model """proje""" +471 37 loss """marginranking""" +471 37 regularizer """no""" +471 37 optimizer """adadelta""" +471 37 training_loop """owa""" +471 37 negative_sampler """basic""" +471 37 evaluator """rankbased""" +471 38 dataset """kinships""" +471 38 model """proje""" +471 38 loss """marginranking""" +471 38 regularizer """no""" +471 38 optimizer """adadelta""" +471 38 training_loop """owa""" +471 38 negative_sampler """basic""" +471 38 evaluator """rankbased""" +471 39 dataset """kinships""" +471 39 model """proje""" +471 39 loss """marginranking""" +471 39 regularizer """no""" +471 39 optimizer """adadelta""" +471 39 training_loop """owa""" +471 39 negative_sampler """basic""" +471 39 evaluator """rankbased""" +471 40 dataset """kinships""" +471 40 model """proje""" +471 40 loss """marginranking""" +471 40 regularizer """no""" +471 40 optimizer """adadelta""" +471 40 training_loop """owa""" +471 40 negative_sampler """basic""" +471 40 evaluator """rankbased""" +471 41 dataset """kinships""" +471 41 model """proje""" +471 41 loss """marginranking""" +471 41 regularizer """no""" +471 41 optimizer """adadelta""" +471 41 training_loop """owa""" +471 41 negative_sampler """basic""" +471 41 evaluator """rankbased""" +471 42 dataset """kinships""" +471 42 model """proje""" +471 42 loss """marginranking""" +471 42 regularizer """no""" +471 42 optimizer """adadelta""" +471 42 training_loop """owa""" +471 42 negative_sampler """basic""" +471 42 evaluator """rankbased""" +471 43 dataset """kinships""" +471 43 model """proje""" +471 43 loss """marginranking""" +471 43 regularizer """no""" +471 43 optimizer """adadelta""" +471 43 training_loop """owa""" +471 43 negative_sampler """basic""" +471 43 evaluator """rankbased""" +471 44 dataset """kinships""" +471 44 model """proje""" +471 44 loss """marginranking""" +471 44 regularizer """no""" +471 44 optimizer """adadelta""" +471 44 training_loop """owa""" +471 44 negative_sampler """basic""" +471 44 evaluator """rankbased""" +471 45 dataset """kinships""" +471 45 model """proje""" +471 45 loss """marginranking""" +471 45 regularizer """no""" +471 45 optimizer """adadelta""" +471 45 training_loop """owa""" +471 45 negative_sampler """basic""" +471 45 evaluator """rankbased""" +471 46 dataset """kinships""" +471 46 model """proje""" +471 46 loss """marginranking""" +471 46 regularizer """no""" +471 46 optimizer """adadelta""" +471 46 training_loop """owa""" +471 46 negative_sampler """basic""" +471 46 evaluator """rankbased""" +471 47 dataset """kinships""" +471 47 model """proje""" +471 47 loss """marginranking""" +471 47 regularizer """no""" +471 47 optimizer """adadelta""" +471 47 training_loop """owa""" +471 47 negative_sampler """basic""" +471 47 evaluator """rankbased""" +471 48 dataset """kinships""" +471 48 model """proje""" +471 48 loss """marginranking""" +471 48 regularizer """no""" +471 48 optimizer """adadelta""" +471 48 training_loop """owa""" +471 48 negative_sampler """basic""" +471 48 evaluator """rankbased""" +471 49 dataset """kinships""" +471 49 model """proje""" +471 49 loss """marginranking""" +471 49 regularizer """no""" +471 49 optimizer """adadelta""" +471 49 training_loop """owa""" +471 49 negative_sampler """basic""" +471 49 evaluator """rankbased""" +471 50 dataset """kinships""" +471 50 model """proje""" +471 50 loss """marginranking""" +471 50 regularizer """no""" +471 50 optimizer """adadelta""" +471 50 training_loop """owa""" +471 50 negative_sampler """basic""" +471 50 evaluator """rankbased""" +471 51 dataset """kinships""" +471 51 model """proje""" +471 51 loss """marginranking""" +471 51 regularizer """no""" +471 51 optimizer """adadelta""" +471 51 training_loop """owa""" +471 51 negative_sampler """basic""" +471 51 evaluator """rankbased""" +471 52 dataset """kinships""" +471 52 model """proje""" +471 52 loss """marginranking""" +471 52 regularizer """no""" +471 52 optimizer """adadelta""" +471 52 training_loop """owa""" +471 52 negative_sampler """basic""" +471 52 evaluator """rankbased""" +471 53 dataset """kinships""" +471 53 model """proje""" +471 53 loss """marginranking""" +471 53 regularizer """no""" +471 53 optimizer """adadelta""" +471 53 training_loop """owa""" +471 53 negative_sampler """basic""" +471 53 evaluator """rankbased""" +471 54 dataset """kinships""" +471 54 model """proje""" +471 54 loss """marginranking""" +471 54 regularizer """no""" +471 54 optimizer """adadelta""" +471 54 training_loop """owa""" +471 54 negative_sampler """basic""" +471 54 evaluator """rankbased""" +471 55 dataset """kinships""" +471 55 model """proje""" +471 55 loss """marginranking""" +471 55 regularizer """no""" +471 55 optimizer """adadelta""" +471 55 training_loop """owa""" +471 55 negative_sampler """basic""" +471 55 evaluator """rankbased""" +471 56 dataset """kinships""" +471 56 model """proje""" +471 56 loss """marginranking""" +471 56 regularizer """no""" +471 56 optimizer """adadelta""" +471 56 training_loop """owa""" +471 56 negative_sampler """basic""" +471 56 evaluator """rankbased""" +471 57 dataset """kinships""" +471 57 model """proje""" +471 57 loss """marginranking""" +471 57 regularizer """no""" +471 57 optimizer """adadelta""" +471 57 training_loop """owa""" +471 57 negative_sampler """basic""" +471 57 evaluator """rankbased""" +471 58 dataset """kinships""" +471 58 model """proje""" +471 58 loss """marginranking""" +471 58 regularizer """no""" +471 58 optimizer """adadelta""" +471 58 training_loop """owa""" +471 58 negative_sampler """basic""" +471 58 evaluator """rankbased""" +471 59 dataset """kinships""" +471 59 model """proje""" +471 59 loss """marginranking""" +471 59 regularizer """no""" +471 59 optimizer """adadelta""" +471 59 training_loop """owa""" +471 59 negative_sampler """basic""" +471 59 evaluator """rankbased""" +471 60 dataset """kinships""" +471 60 model """proje""" +471 60 loss """marginranking""" +471 60 regularizer """no""" +471 60 optimizer """adadelta""" +471 60 training_loop """owa""" +471 60 negative_sampler """basic""" +471 60 evaluator """rankbased""" +471 61 dataset """kinships""" +471 61 model """proje""" +471 61 loss """marginranking""" +471 61 regularizer """no""" +471 61 optimizer """adadelta""" +471 61 training_loop """owa""" +471 61 negative_sampler """basic""" +471 61 evaluator """rankbased""" +471 62 dataset """kinships""" +471 62 model """proje""" +471 62 loss """marginranking""" +471 62 regularizer """no""" +471 62 optimizer """adadelta""" +471 62 training_loop """owa""" +471 62 negative_sampler """basic""" +471 62 evaluator """rankbased""" +471 63 dataset """kinships""" +471 63 model """proje""" +471 63 loss """marginranking""" +471 63 regularizer """no""" +471 63 optimizer """adadelta""" +471 63 training_loop """owa""" +471 63 negative_sampler """basic""" +471 63 evaluator """rankbased""" +471 64 dataset """kinships""" +471 64 model """proje""" +471 64 loss """marginranking""" +471 64 regularizer """no""" +471 64 optimizer """adadelta""" +471 64 training_loop """owa""" +471 64 negative_sampler """basic""" +471 64 evaluator """rankbased""" +471 65 dataset """kinships""" +471 65 model """proje""" +471 65 loss """marginranking""" +471 65 regularizer """no""" +471 65 optimizer """adadelta""" +471 65 training_loop """owa""" +471 65 negative_sampler """basic""" +471 65 evaluator """rankbased""" +471 66 dataset """kinships""" +471 66 model """proje""" +471 66 loss """marginranking""" +471 66 regularizer """no""" +471 66 optimizer """adadelta""" +471 66 training_loop """owa""" +471 66 negative_sampler """basic""" +471 66 evaluator """rankbased""" +471 67 dataset """kinships""" +471 67 model """proje""" +471 67 loss """marginranking""" +471 67 regularizer """no""" +471 67 optimizer """adadelta""" +471 67 training_loop """owa""" +471 67 negative_sampler """basic""" +471 67 evaluator """rankbased""" +471 68 dataset """kinships""" +471 68 model """proje""" +471 68 loss """marginranking""" +471 68 regularizer """no""" +471 68 optimizer """adadelta""" +471 68 training_loop """owa""" +471 68 negative_sampler """basic""" +471 68 evaluator """rankbased""" +471 69 dataset """kinships""" +471 69 model """proje""" +471 69 loss """marginranking""" +471 69 regularizer """no""" +471 69 optimizer """adadelta""" +471 69 training_loop """owa""" +471 69 negative_sampler """basic""" +471 69 evaluator """rankbased""" +471 70 dataset """kinships""" +471 70 model """proje""" +471 70 loss """marginranking""" +471 70 regularizer """no""" +471 70 optimizer """adadelta""" +471 70 training_loop """owa""" +471 70 negative_sampler """basic""" +471 70 evaluator """rankbased""" +471 71 dataset """kinships""" +471 71 model """proje""" +471 71 loss """marginranking""" +471 71 regularizer """no""" +471 71 optimizer """adadelta""" +471 71 training_loop """owa""" +471 71 negative_sampler """basic""" +471 71 evaluator """rankbased""" +471 72 dataset """kinships""" +471 72 model """proje""" +471 72 loss """marginranking""" +471 72 regularizer """no""" +471 72 optimizer """adadelta""" +471 72 training_loop """owa""" +471 72 negative_sampler """basic""" +471 72 evaluator """rankbased""" +471 73 dataset """kinships""" +471 73 model """proje""" +471 73 loss """marginranking""" +471 73 regularizer """no""" +471 73 optimizer """adadelta""" +471 73 training_loop """owa""" +471 73 negative_sampler """basic""" +471 73 evaluator """rankbased""" +471 74 dataset """kinships""" +471 74 model """proje""" +471 74 loss """marginranking""" +471 74 regularizer """no""" +471 74 optimizer """adadelta""" +471 74 training_loop """owa""" +471 74 negative_sampler """basic""" +471 74 evaluator """rankbased""" +471 75 dataset """kinships""" +471 75 model """proje""" +471 75 loss """marginranking""" +471 75 regularizer """no""" +471 75 optimizer """adadelta""" +471 75 training_loop """owa""" +471 75 negative_sampler """basic""" +471 75 evaluator """rankbased""" +471 76 dataset """kinships""" +471 76 model """proje""" +471 76 loss """marginranking""" +471 76 regularizer """no""" +471 76 optimizer """adadelta""" +471 76 training_loop """owa""" +471 76 negative_sampler """basic""" +471 76 evaluator """rankbased""" +471 77 dataset """kinships""" +471 77 model """proje""" +471 77 loss """marginranking""" +471 77 regularizer """no""" +471 77 optimizer """adadelta""" +471 77 training_loop """owa""" +471 77 negative_sampler """basic""" +471 77 evaluator """rankbased""" +471 78 dataset """kinships""" +471 78 model """proje""" +471 78 loss """marginranking""" +471 78 regularizer """no""" +471 78 optimizer """adadelta""" +471 78 training_loop """owa""" +471 78 negative_sampler """basic""" +471 78 evaluator """rankbased""" +471 79 dataset """kinships""" +471 79 model """proje""" +471 79 loss """marginranking""" +471 79 regularizer """no""" +471 79 optimizer """adadelta""" +471 79 training_loop """owa""" +471 79 negative_sampler """basic""" +471 79 evaluator """rankbased""" +471 80 dataset """kinships""" +471 80 model """proje""" +471 80 loss """marginranking""" +471 80 regularizer """no""" +471 80 optimizer """adadelta""" +471 80 training_loop """owa""" +471 80 negative_sampler """basic""" +471 80 evaluator """rankbased""" +471 81 dataset """kinships""" +471 81 model """proje""" +471 81 loss """marginranking""" +471 81 regularizer """no""" +471 81 optimizer """adadelta""" +471 81 training_loop """owa""" +471 81 negative_sampler """basic""" +471 81 evaluator """rankbased""" +471 82 dataset """kinships""" +471 82 model """proje""" +471 82 loss """marginranking""" +471 82 regularizer """no""" +471 82 optimizer """adadelta""" +471 82 training_loop """owa""" +471 82 negative_sampler """basic""" +471 82 evaluator """rankbased""" +471 83 dataset """kinships""" +471 83 model """proje""" +471 83 loss """marginranking""" +471 83 regularizer """no""" +471 83 optimizer """adadelta""" +471 83 training_loop """owa""" +471 83 negative_sampler """basic""" +471 83 evaluator """rankbased""" +471 84 dataset """kinships""" +471 84 model """proje""" +471 84 loss """marginranking""" +471 84 regularizer """no""" +471 84 optimizer """adadelta""" +471 84 training_loop """owa""" +471 84 negative_sampler """basic""" +471 84 evaluator """rankbased""" +471 85 dataset """kinships""" +471 85 model """proje""" +471 85 loss """marginranking""" +471 85 regularizer """no""" +471 85 optimizer """adadelta""" +471 85 training_loop """owa""" +471 85 negative_sampler """basic""" +471 85 evaluator """rankbased""" +471 86 dataset """kinships""" +471 86 model """proje""" +471 86 loss """marginranking""" +471 86 regularizer """no""" +471 86 optimizer """adadelta""" +471 86 training_loop """owa""" +471 86 negative_sampler """basic""" +471 86 evaluator """rankbased""" +471 87 dataset """kinships""" +471 87 model """proje""" +471 87 loss """marginranking""" +471 87 regularizer """no""" +471 87 optimizer """adadelta""" +471 87 training_loop """owa""" +471 87 negative_sampler """basic""" +471 87 evaluator """rankbased""" +471 88 dataset """kinships""" +471 88 model """proje""" +471 88 loss """marginranking""" +471 88 regularizer """no""" +471 88 optimizer """adadelta""" +471 88 training_loop """owa""" +471 88 negative_sampler """basic""" +471 88 evaluator """rankbased""" +471 89 dataset """kinships""" +471 89 model """proje""" +471 89 loss """marginranking""" +471 89 regularizer """no""" +471 89 optimizer """adadelta""" +471 89 training_loop """owa""" +471 89 negative_sampler """basic""" +471 89 evaluator """rankbased""" +471 90 dataset """kinships""" +471 90 model """proje""" +471 90 loss """marginranking""" +471 90 regularizer """no""" +471 90 optimizer """adadelta""" +471 90 training_loop """owa""" +471 90 negative_sampler """basic""" +471 90 evaluator """rankbased""" +471 91 dataset """kinships""" +471 91 model """proje""" +471 91 loss """marginranking""" +471 91 regularizer """no""" +471 91 optimizer """adadelta""" +471 91 training_loop """owa""" +471 91 negative_sampler """basic""" +471 91 evaluator """rankbased""" +471 92 dataset """kinships""" +471 92 model """proje""" +471 92 loss """marginranking""" +471 92 regularizer """no""" +471 92 optimizer """adadelta""" +471 92 training_loop """owa""" +471 92 negative_sampler """basic""" +471 92 evaluator """rankbased""" +471 93 dataset """kinships""" +471 93 model """proje""" +471 93 loss """marginranking""" +471 93 regularizer """no""" +471 93 optimizer """adadelta""" +471 93 training_loop """owa""" +471 93 negative_sampler """basic""" +471 93 evaluator """rankbased""" +471 94 dataset """kinships""" +471 94 model """proje""" +471 94 loss """marginranking""" +471 94 regularizer """no""" +471 94 optimizer """adadelta""" +471 94 training_loop """owa""" +471 94 negative_sampler """basic""" +471 94 evaluator """rankbased""" +471 95 dataset """kinships""" +471 95 model """proje""" +471 95 loss """marginranking""" +471 95 regularizer """no""" +471 95 optimizer """adadelta""" +471 95 training_loop """owa""" +471 95 negative_sampler """basic""" +471 95 evaluator """rankbased""" +471 96 dataset """kinships""" +471 96 model """proje""" +471 96 loss """marginranking""" +471 96 regularizer """no""" +471 96 optimizer """adadelta""" +471 96 training_loop """owa""" +471 96 negative_sampler """basic""" +471 96 evaluator """rankbased""" +471 97 dataset """kinships""" +471 97 model """proje""" +471 97 loss """marginranking""" +471 97 regularizer """no""" +471 97 optimizer """adadelta""" +471 97 training_loop """owa""" +471 97 negative_sampler """basic""" +471 97 evaluator """rankbased""" +471 98 dataset """kinships""" +471 98 model """proje""" +471 98 loss """marginranking""" +471 98 regularizer """no""" +471 98 optimizer """adadelta""" +471 98 training_loop """owa""" +471 98 negative_sampler """basic""" +471 98 evaluator """rankbased""" +471 99 dataset """kinships""" +471 99 model """proje""" +471 99 loss """marginranking""" +471 99 regularizer """no""" +471 99 optimizer """adadelta""" +471 99 training_loop """owa""" +471 99 negative_sampler """basic""" +471 99 evaluator """rankbased""" +471 100 dataset """kinships""" +471 100 model """proje""" +471 100 loss """marginranking""" +471 100 regularizer """no""" +471 100 optimizer """adadelta""" +471 100 training_loop """owa""" +471 100 negative_sampler """basic""" +471 100 evaluator """rankbased""" +472 1 model.embedding_dim 0.0 +472 1 loss.margin 3.739677908035941 +472 1 negative_sampler.num_negs_per_pos 41.0 +472 1 training.batch_size 0.0 +472 2 model.embedding_dim 1.0 +472 2 loss.margin 8.646874642310594 +472 2 negative_sampler.num_negs_per_pos 47.0 +472 2 training.batch_size 2.0 +472 3 model.embedding_dim 2.0 +472 3 loss.margin 9.234165053512779 +472 3 negative_sampler.num_negs_per_pos 95.0 +472 3 training.batch_size 0.0 +472 4 model.embedding_dim 2.0 +472 4 loss.margin 8.89172962876681 +472 4 negative_sampler.num_negs_per_pos 50.0 +472 4 training.batch_size 2.0 +472 5 model.embedding_dim 1.0 +472 5 loss.margin 4.274167559091667 +472 5 negative_sampler.num_negs_per_pos 30.0 +472 5 training.batch_size 2.0 +472 6 model.embedding_dim 2.0 +472 6 loss.margin 1.031432825800147 +472 6 negative_sampler.num_negs_per_pos 43.0 +472 6 training.batch_size 0.0 +472 7 model.embedding_dim 0.0 +472 7 loss.margin 3.68980589738596 +472 7 negative_sampler.num_negs_per_pos 77.0 +472 7 training.batch_size 2.0 +472 8 model.embedding_dim 2.0 +472 8 loss.margin 7.144119786678143 +472 8 negative_sampler.num_negs_per_pos 83.0 +472 8 training.batch_size 0.0 +472 9 model.embedding_dim 1.0 +472 9 loss.margin 5.587533647995402 +472 9 negative_sampler.num_negs_per_pos 52.0 +472 9 training.batch_size 2.0 +472 10 model.embedding_dim 1.0 +472 10 loss.margin 9.714644100239832 +472 10 negative_sampler.num_negs_per_pos 62.0 +472 10 training.batch_size 0.0 +472 11 model.embedding_dim 2.0 +472 11 loss.margin 9.922747035858642 +472 11 negative_sampler.num_negs_per_pos 83.0 +472 11 training.batch_size 2.0 +472 12 model.embedding_dim 0.0 +472 12 loss.margin 9.452275555584102 +472 12 negative_sampler.num_negs_per_pos 60.0 +472 12 training.batch_size 0.0 +472 13 model.embedding_dim 0.0 +472 13 loss.margin 2.1283713604637655 +472 13 negative_sampler.num_negs_per_pos 23.0 +472 13 training.batch_size 0.0 +472 14 model.embedding_dim 0.0 +472 14 loss.margin 7.5933014837175765 +472 14 negative_sampler.num_negs_per_pos 67.0 +472 14 training.batch_size 2.0 +472 15 model.embedding_dim 1.0 +472 15 loss.margin 2.481895088659389 +472 15 negative_sampler.num_negs_per_pos 61.0 +472 15 training.batch_size 1.0 +472 16 model.embedding_dim 0.0 +472 16 loss.margin 5.9818799603336466 +472 16 negative_sampler.num_negs_per_pos 58.0 +472 16 training.batch_size 2.0 +472 17 model.embedding_dim 0.0 +472 17 loss.margin 5.504776638094682 +472 17 negative_sampler.num_negs_per_pos 87.0 +472 17 training.batch_size 0.0 +472 18 model.embedding_dim 1.0 +472 18 loss.margin 5.325291926288189 +472 18 negative_sampler.num_negs_per_pos 81.0 +472 18 training.batch_size 2.0 +472 19 model.embedding_dim 2.0 +472 19 loss.margin 9.048326111566466 +472 19 negative_sampler.num_negs_per_pos 23.0 +472 19 training.batch_size 2.0 +472 20 model.embedding_dim 1.0 +472 20 loss.margin 2.0986424885501136 +472 20 negative_sampler.num_negs_per_pos 62.0 +472 20 training.batch_size 0.0 +472 21 model.embedding_dim 1.0 +472 21 loss.margin 5.253480846605834 +472 21 negative_sampler.num_negs_per_pos 66.0 +472 21 training.batch_size 0.0 +472 22 model.embedding_dim 2.0 +472 22 loss.margin 2.144787754865497 +472 22 negative_sampler.num_negs_per_pos 51.0 +472 22 training.batch_size 2.0 +472 23 model.embedding_dim 2.0 +472 23 loss.margin 3.043393667672104 +472 23 negative_sampler.num_negs_per_pos 32.0 +472 23 training.batch_size 0.0 +472 24 model.embedding_dim 0.0 +472 24 loss.margin 4.864821985082742 +472 24 negative_sampler.num_negs_per_pos 15.0 +472 24 training.batch_size 1.0 +472 25 model.embedding_dim 0.0 +472 25 loss.margin 9.499244703196505 +472 25 negative_sampler.num_negs_per_pos 57.0 +472 25 training.batch_size 2.0 +472 26 model.embedding_dim 2.0 +472 26 loss.margin 0.6707370122671079 +472 26 negative_sampler.num_negs_per_pos 9.0 +472 26 training.batch_size 0.0 +472 27 model.embedding_dim 2.0 +472 27 loss.margin 3.846195844423497 +472 27 negative_sampler.num_negs_per_pos 64.0 +472 27 training.batch_size 2.0 +472 28 model.embedding_dim 1.0 +472 28 loss.margin 4.635774008626802 +472 28 negative_sampler.num_negs_per_pos 57.0 +472 28 training.batch_size 2.0 +472 29 model.embedding_dim 1.0 +472 29 loss.margin 2.9082684649432733 +472 29 negative_sampler.num_negs_per_pos 39.0 +472 29 training.batch_size 2.0 +472 30 model.embedding_dim 2.0 +472 30 loss.margin 5.999138748243173 +472 30 negative_sampler.num_negs_per_pos 2.0 +472 30 training.batch_size 2.0 +472 31 model.embedding_dim 0.0 +472 31 loss.margin 3.7660596292374047 +472 31 negative_sampler.num_negs_per_pos 42.0 +472 31 training.batch_size 2.0 +472 32 model.embedding_dim 2.0 +472 32 loss.margin 7.56421535293978 +472 32 negative_sampler.num_negs_per_pos 9.0 +472 32 training.batch_size 1.0 +472 33 model.embedding_dim 0.0 +472 33 loss.margin 2.7492646259001305 +472 33 negative_sampler.num_negs_per_pos 23.0 +472 33 training.batch_size 0.0 +472 34 model.embedding_dim 1.0 +472 34 loss.margin 9.64596131573442 +472 34 negative_sampler.num_negs_per_pos 4.0 +472 34 training.batch_size 2.0 +472 35 model.embedding_dim 0.0 +472 35 loss.margin 4.455042752811981 +472 35 negative_sampler.num_negs_per_pos 40.0 +472 35 training.batch_size 2.0 +472 36 model.embedding_dim 2.0 +472 36 loss.margin 5.126413426291728 +472 36 negative_sampler.num_negs_per_pos 60.0 +472 36 training.batch_size 1.0 +472 37 model.embedding_dim 1.0 +472 37 loss.margin 5.042693556705821 +472 37 negative_sampler.num_negs_per_pos 88.0 +472 37 training.batch_size 0.0 +472 38 model.embedding_dim 2.0 +472 38 loss.margin 4.317528950934619 +472 38 negative_sampler.num_negs_per_pos 79.0 +472 38 training.batch_size 2.0 +472 39 model.embedding_dim 0.0 +472 39 loss.margin 3.6308344746301495 +472 39 negative_sampler.num_negs_per_pos 89.0 +472 39 training.batch_size 1.0 +472 40 model.embedding_dim 1.0 +472 40 loss.margin 9.834598234997035 +472 40 negative_sampler.num_negs_per_pos 15.0 +472 40 training.batch_size 0.0 +472 41 model.embedding_dim 0.0 +472 41 loss.margin 9.538354286021905 +472 41 negative_sampler.num_negs_per_pos 26.0 +472 41 training.batch_size 2.0 +472 42 model.embedding_dim 0.0 +472 42 loss.margin 0.9666690565194975 +472 42 negative_sampler.num_negs_per_pos 76.0 +472 42 training.batch_size 1.0 +472 43 model.embedding_dim 0.0 +472 43 loss.margin 5.023122195338177 +472 43 negative_sampler.num_negs_per_pos 2.0 +472 43 training.batch_size 0.0 +472 44 model.embedding_dim 1.0 +472 44 loss.margin 5.975935155128492 +472 44 negative_sampler.num_negs_per_pos 4.0 +472 44 training.batch_size 0.0 +472 45 model.embedding_dim 1.0 +472 45 loss.margin 7.050275485750637 +472 45 negative_sampler.num_negs_per_pos 20.0 +472 45 training.batch_size 1.0 +472 46 model.embedding_dim 0.0 +472 46 loss.margin 1.7715485407328468 +472 46 negative_sampler.num_negs_per_pos 78.0 +472 46 training.batch_size 0.0 +472 47 model.embedding_dim 0.0 +472 47 loss.margin 1.447305776181787 +472 47 negative_sampler.num_negs_per_pos 79.0 +472 47 training.batch_size 0.0 +472 48 model.embedding_dim 1.0 +472 48 loss.margin 3.6775465765480124 +472 48 negative_sampler.num_negs_per_pos 11.0 +472 48 training.batch_size 1.0 +472 49 model.embedding_dim 1.0 +472 49 loss.margin 7.865626436046183 +472 49 negative_sampler.num_negs_per_pos 19.0 +472 49 training.batch_size 1.0 +472 50 model.embedding_dim 0.0 +472 50 loss.margin 0.9214636987311255 +472 50 negative_sampler.num_negs_per_pos 67.0 +472 50 training.batch_size 0.0 +472 51 model.embedding_dim 1.0 +472 51 loss.margin 9.289210510536373 +472 51 negative_sampler.num_negs_per_pos 49.0 +472 51 training.batch_size 1.0 +472 52 model.embedding_dim 0.0 +472 52 loss.margin 5.463605047430298 +472 52 negative_sampler.num_negs_per_pos 48.0 +472 52 training.batch_size 0.0 +472 53 model.embedding_dim 0.0 +472 53 loss.margin 6.371909507035011 +472 53 negative_sampler.num_negs_per_pos 36.0 +472 53 training.batch_size 1.0 +472 54 model.embedding_dim 2.0 +472 54 loss.margin 2.34448678738719 +472 54 negative_sampler.num_negs_per_pos 65.0 +472 54 training.batch_size 0.0 +472 55 model.embedding_dim 1.0 +472 55 loss.margin 7.288639404651448 +472 55 negative_sampler.num_negs_per_pos 93.0 +472 55 training.batch_size 1.0 +472 56 model.embedding_dim 2.0 +472 56 loss.margin 5.304626901854051 +472 56 negative_sampler.num_negs_per_pos 26.0 +472 56 training.batch_size 0.0 +472 57 model.embedding_dim 0.0 +472 57 loss.margin 7.607549187019982 +472 57 negative_sampler.num_negs_per_pos 84.0 +472 57 training.batch_size 1.0 +472 58 model.embedding_dim 1.0 +472 58 loss.margin 9.87489583547521 +472 58 negative_sampler.num_negs_per_pos 85.0 +472 58 training.batch_size 1.0 +472 59 model.embedding_dim 2.0 +472 59 loss.margin 6.5004406281998675 +472 59 negative_sampler.num_negs_per_pos 66.0 +472 59 training.batch_size 0.0 +472 60 model.embedding_dim 1.0 +472 60 loss.margin 7.678587142569774 +472 60 negative_sampler.num_negs_per_pos 46.0 +472 60 training.batch_size 0.0 +472 61 model.embedding_dim 1.0 +472 61 loss.margin 3.953961082513255 +472 61 negative_sampler.num_negs_per_pos 11.0 +472 61 training.batch_size 2.0 +472 62 model.embedding_dim 0.0 +472 62 loss.margin 5.1783120123814275 +472 62 negative_sampler.num_negs_per_pos 69.0 +472 62 training.batch_size 1.0 +472 63 model.embedding_dim 0.0 +472 63 loss.margin 9.602536163347633 +472 63 negative_sampler.num_negs_per_pos 66.0 +472 63 training.batch_size 0.0 +472 64 model.embedding_dim 2.0 +472 64 loss.margin 2.9027958926634208 +472 64 negative_sampler.num_negs_per_pos 71.0 +472 64 training.batch_size 1.0 +472 65 model.embedding_dim 1.0 +472 65 loss.margin 5.289475325382273 +472 65 negative_sampler.num_negs_per_pos 48.0 +472 65 training.batch_size 0.0 +472 66 model.embedding_dim 0.0 +472 66 loss.margin 2.103718946877126 +472 66 negative_sampler.num_negs_per_pos 26.0 +472 66 training.batch_size 0.0 +472 67 model.embedding_dim 1.0 +472 67 loss.margin 1.6156079900133133 +472 67 negative_sampler.num_negs_per_pos 76.0 +472 67 training.batch_size 0.0 +472 68 model.embedding_dim 1.0 +472 68 loss.margin 4.371327213851999 +472 68 negative_sampler.num_negs_per_pos 86.0 +472 68 training.batch_size 1.0 +472 69 model.embedding_dim 0.0 +472 69 loss.margin 0.6329035351737503 +472 69 negative_sampler.num_negs_per_pos 33.0 +472 69 training.batch_size 2.0 +472 70 model.embedding_dim 1.0 +472 70 loss.margin 6.229534652326036 +472 70 negative_sampler.num_negs_per_pos 57.0 +472 70 training.batch_size 0.0 +472 71 model.embedding_dim 2.0 +472 71 loss.margin 0.8504653836146308 +472 71 negative_sampler.num_negs_per_pos 16.0 +472 71 training.batch_size 2.0 +472 72 model.embedding_dim 0.0 +472 72 loss.margin 0.639805043657589 +472 72 negative_sampler.num_negs_per_pos 45.0 +472 72 training.batch_size 1.0 +472 73 model.embedding_dim 0.0 +472 73 loss.margin 2.5820582602024533 +472 73 negative_sampler.num_negs_per_pos 69.0 +472 73 training.batch_size 0.0 +472 74 model.embedding_dim 0.0 +472 74 loss.margin 5.523721897455461 +472 74 negative_sampler.num_negs_per_pos 72.0 +472 74 training.batch_size 0.0 +472 75 model.embedding_dim 2.0 +472 75 loss.margin 0.6554653810772477 +472 75 negative_sampler.num_negs_per_pos 5.0 +472 75 training.batch_size 0.0 +472 76 model.embedding_dim 0.0 +472 76 loss.margin 9.088948842066355 +472 76 negative_sampler.num_negs_per_pos 97.0 +472 76 training.batch_size 0.0 +472 77 model.embedding_dim 2.0 +472 77 loss.margin 6.981591923372313 +472 77 negative_sampler.num_negs_per_pos 50.0 +472 77 training.batch_size 0.0 +472 78 model.embedding_dim 1.0 +472 78 loss.margin 6.9418412649978904 +472 78 negative_sampler.num_negs_per_pos 87.0 +472 78 training.batch_size 0.0 +472 79 model.embedding_dim 0.0 +472 79 loss.margin 3.4699192383839494 +472 79 negative_sampler.num_negs_per_pos 66.0 +472 79 training.batch_size 1.0 +472 80 model.embedding_dim 2.0 +472 80 loss.margin 7.114797275447243 +472 80 negative_sampler.num_negs_per_pos 48.0 +472 80 training.batch_size 1.0 +472 81 model.embedding_dim 2.0 +472 81 loss.margin 1.0678947315389709 +472 81 negative_sampler.num_negs_per_pos 78.0 +472 81 training.batch_size 1.0 +472 82 model.embedding_dim 0.0 +472 82 loss.margin 4.351098759957019 +472 82 negative_sampler.num_negs_per_pos 29.0 +472 82 training.batch_size 1.0 +472 83 model.embedding_dim 1.0 +472 83 loss.margin 2.5456667807404947 +472 83 negative_sampler.num_negs_per_pos 74.0 +472 83 training.batch_size 1.0 +472 84 model.embedding_dim 1.0 +472 84 loss.margin 8.4283956832763 +472 84 negative_sampler.num_negs_per_pos 55.0 +472 84 training.batch_size 1.0 +472 85 model.embedding_dim 1.0 +472 85 loss.margin 7.305711628361354 +472 85 negative_sampler.num_negs_per_pos 46.0 +472 85 training.batch_size 2.0 +472 86 model.embedding_dim 1.0 +472 86 loss.margin 2.3507868442112283 +472 86 negative_sampler.num_negs_per_pos 79.0 +472 86 training.batch_size 1.0 +472 87 model.embedding_dim 0.0 +472 87 loss.margin 8.571031212876209 +472 87 negative_sampler.num_negs_per_pos 35.0 +472 87 training.batch_size 0.0 +472 88 model.embedding_dim 1.0 +472 88 loss.margin 4.893454374373071 +472 88 negative_sampler.num_negs_per_pos 72.0 +472 88 training.batch_size 1.0 +472 89 model.embedding_dim 2.0 +472 89 loss.margin 4.005897512051744 +472 89 negative_sampler.num_negs_per_pos 18.0 +472 89 training.batch_size 1.0 +472 90 model.embedding_dim 0.0 +472 90 loss.margin 2.9836963293511163 +472 90 negative_sampler.num_negs_per_pos 55.0 +472 90 training.batch_size 0.0 +472 91 model.embedding_dim 2.0 +472 91 loss.margin 9.193444410515847 +472 91 negative_sampler.num_negs_per_pos 94.0 +472 91 training.batch_size 2.0 +472 92 model.embedding_dim 0.0 +472 92 loss.margin 9.159396673398227 +472 92 negative_sampler.num_negs_per_pos 59.0 +472 92 training.batch_size 2.0 +472 93 model.embedding_dim 0.0 +472 93 loss.margin 5.56625151035665 +472 93 negative_sampler.num_negs_per_pos 65.0 +472 93 training.batch_size 2.0 +472 94 model.embedding_dim 0.0 +472 94 loss.margin 6.439765920359363 +472 94 negative_sampler.num_negs_per_pos 95.0 +472 94 training.batch_size 1.0 +472 95 model.embedding_dim 1.0 +472 95 loss.margin 2.5008278438569373 +472 95 negative_sampler.num_negs_per_pos 27.0 +472 95 training.batch_size 0.0 +472 96 model.embedding_dim 1.0 +472 96 loss.margin 9.124730128721913 +472 96 negative_sampler.num_negs_per_pos 32.0 +472 96 training.batch_size 0.0 +472 97 model.embedding_dim 0.0 +472 97 loss.margin 5.6250171689232875 +472 97 negative_sampler.num_negs_per_pos 64.0 +472 97 training.batch_size 1.0 +472 98 model.embedding_dim 2.0 +472 98 loss.margin 4.686564969146116 +472 98 negative_sampler.num_negs_per_pos 16.0 +472 98 training.batch_size 1.0 +472 99 model.embedding_dim 1.0 +472 99 loss.margin 7.7524149145421415 +472 99 negative_sampler.num_negs_per_pos 46.0 +472 99 training.batch_size 0.0 +472 100 model.embedding_dim 0.0 +472 100 loss.margin 5.8012853016488055 +472 100 negative_sampler.num_negs_per_pos 49.0 +472 100 training.batch_size 1.0 +472 1 dataset """kinships""" +472 1 model """proje""" +472 1 loss """marginranking""" +472 1 regularizer """no""" +472 1 optimizer """adadelta""" +472 1 training_loop """owa""" +472 1 negative_sampler """basic""" +472 1 evaluator """rankbased""" +472 2 dataset """kinships""" +472 2 model """proje""" +472 2 loss """marginranking""" +472 2 regularizer """no""" +472 2 optimizer """adadelta""" +472 2 training_loop """owa""" +472 2 negative_sampler """basic""" +472 2 evaluator """rankbased""" +472 3 dataset """kinships""" +472 3 model """proje""" +472 3 loss """marginranking""" +472 3 regularizer """no""" +472 3 optimizer """adadelta""" +472 3 training_loop """owa""" +472 3 negative_sampler """basic""" +472 3 evaluator """rankbased""" +472 4 dataset """kinships""" +472 4 model """proje""" +472 4 loss """marginranking""" +472 4 regularizer """no""" +472 4 optimizer """adadelta""" +472 4 training_loop """owa""" +472 4 negative_sampler """basic""" +472 4 evaluator """rankbased""" +472 5 dataset """kinships""" +472 5 model """proje""" +472 5 loss """marginranking""" +472 5 regularizer """no""" +472 5 optimizer """adadelta""" +472 5 training_loop """owa""" +472 5 negative_sampler """basic""" +472 5 evaluator """rankbased""" +472 6 dataset """kinships""" +472 6 model """proje""" +472 6 loss """marginranking""" +472 6 regularizer """no""" +472 6 optimizer """adadelta""" +472 6 training_loop """owa""" +472 6 negative_sampler """basic""" +472 6 evaluator """rankbased""" +472 7 dataset """kinships""" +472 7 model """proje""" +472 7 loss """marginranking""" +472 7 regularizer """no""" +472 7 optimizer """adadelta""" +472 7 training_loop """owa""" +472 7 negative_sampler """basic""" +472 7 evaluator """rankbased""" +472 8 dataset """kinships""" +472 8 model """proje""" +472 8 loss """marginranking""" +472 8 regularizer """no""" +472 8 optimizer """adadelta""" +472 8 training_loop """owa""" +472 8 negative_sampler """basic""" +472 8 evaluator """rankbased""" +472 9 dataset """kinships""" +472 9 model """proje""" +472 9 loss """marginranking""" +472 9 regularizer """no""" +472 9 optimizer """adadelta""" +472 9 training_loop """owa""" +472 9 negative_sampler """basic""" +472 9 evaluator """rankbased""" +472 10 dataset """kinships""" +472 10 model """proje""" +472 10 loss """marginranking""" +472 10 regularizer """no""" +472 10 optimizer """adadelta""" +472 10 training_loop """owa""" +472 10 negative_sampler """basic""" +472 10 evaluator """rankbased""" +472 11 dataset """kinships""" +472 11 model """proje""" +472 11 loss """marginranking""" +472 11 regularizer """no""" +472 11 optimizer """adadelta""" +472 11 training_loop """owa""" +472 11 negative_sampler """basic""" +472 11 evaluator """rankbased""" +472 12 dataset """kinships""" +472 12 model """proje""" +472 12 loss """marginranking""" +472 12 regularizer """no""" +472 12 optimizer """adadelta""" +472 12 training_loop """owa""" +472 12 negative_sampler """basic""" +472 12 evaluator """rankbased""" +472 13 dataset """kinships""" +472 13 model """proje""" +472 13 loss """marginranking""" +472 13 regularizer """no""" +472 13 optimizer """adadelta""" +472 13 training_loop """owa""" +472 13 negative_sampler """basic""" +472 13 evaluator """rankbased""" +472 14 dataset """kinships""" +472 14 model """proje""" +472 14 loss """marginranking""" +472 14 regularizer """no""" +472 14 optimizer """adadelta""" +472 14 training_loop """owa""" +472 14 negative_sampler """basic""" +472 14 evaluator """rankbased""" +472 15 dataset """kinships""" +472 15 model """proje""" +472 15 loss """marginranking""" +472 15 regularizer """no""" +472 15 optimizer """adadelta""" +472 15 training_loop """owa""" +472 15 negative_sampler """basic""" +472 15 evaluator """rankbased""" +472 16 dataset """kinships""" +472 16 model """proje""" +472 16 loss """marginranking""" +472 16 regularizer """no""" +472 16 optimizer """adadelta""" +472 16 training_loop """owa""" +472 16 negative_sampler """basic""" +472 16 evaluator """rankbased""" +472 17 dataset """kinships""" +472 17 model """proje""" +472 17 loss """marginranking""" +472 17 regularizer """no""" +472 17 optimizer """adadelta""" +472 17 training_loop """owa""" +472 17 negative_sampler """basic""" +472 17 evaluator """rankbased""" +472 18 dataset """kinships""" +472 18 model """proje""" +472 18 loss """marginranking""" +472 18 regularizer """no""" +472 18 optimizer """adadelta""" +472 18 training_loop """owa""" +472 18 negative_sampler """basic""" +472 18 evaluator """rankbased""" +472 19 dataset """kinships""" +472 19 model """proje""" +472 19 loss """marginranking""" +472 19 regularizer """no""" +472 19 optimizer """adadelta""" +472 19 training_loop """owa""" +472 19 negative_sampler """basic""" +472 19 evaluator """rankbased""" +472 20 dataset """kinships""" +472 20 model """proje""" +472 20 loss """marginranking""" +472 20 regularizer """no""" +472 20 optimizer """adadelta""" +472 20 training_loop """owa""" +472 20 negative_sampler """basic""" +472 20 evaluator """rankbased""" +472 21 dataset """kinships""" +472 21 model """proje""" +472 21 loss """marginranking""" +472 21 regularizer """no""" +472 21 optimizer """adadelta""" +472 21 training_loop """owa""" +472 21 negative_sampler """basic""" +472 21 evaluator """rankbased""" +472 22 dataset """kinships""" +472 22 model """proje""" +472 22 loss """marginranking""" +472 22 regularizer """no""" +472 22 optimizer """adadelta""" +472 22 training_loop """owa""" +472 22 negative_sampler """basic""" +472 22 evaluator """rankbased""" +472 23 dataset """kinships""" +472 23 model """proje""" +472 23 loss """marginranking""" +472 23 regularizer """no""" +472 23 optimizer """adadelta""" +472 23 training_loop """owa""" +472 23 negative_sampler """basic""" +472 23 evaluator """rankbased""" +472 24 dataset """kinships""" +472 24 model """proje""" +472 24 loss """marginranking""" +472 24 regularizer """no""" +472 24 optimizer """adadelta""" +472 24 training_loop """owa""" +472 24 negative_sampler """basic""" +472 24 evaluator """rankbased""" +472 25 dataset """kinships""" +472 25 model """proje""" +472 25 loss """marginranking""" +472 25 regularizer """no""" +472 25 optimizer """adadelta""" +472 25 training_loop """owa""" +472 25 negative_sampler """basic""" +472 25 evaluator """rankbased""" +472 26 dataset """kinships""" +472 26 model """proje""" +472 26 loss """marginranking""" +472 26 regularizer """no""" +472 26 optimizer """adadelta""" +472 26 training_loop """owa""" +472 26 negative_sampler """basic""" +472 26 evaluator """rankbased""" +472 27 dataset """kinships""" +472 27 model """proje""" +472 27 loss """marginranking""" +472 27 regularizer """no""" +472 27 optimizer """adadelta""" +472 27 training_loop """owa""" +472 27 negative_sampler """basic""" +472 27 evaluator """rankbased""" +472 28 dataset """kinships""" +472 28 model """proje""" +472 28 loss """marginranking""" +472 28 regularizer """no""" +472 28 optimizer """adadelta""" +472 28 training_loop """owa""" +472 28 negative_sampler """basic""" +472 28 evaluator """rankbased""" +472 29 dataset """kinships""" +472 29 model """proje""" +472 29 loss """marginranking""" +472 29 regularizer """no""" +472 29 optimizer """adadelta""" +472 29 training_loop """owa""" +472 29 negative_sampler """basic""" +472 29 evaluator """rankbased""" +472 30 dataset """kinships""" +472 30 model """proje""" +472 30 loss """marginranking""" +472 30 regularizer """no""" +472 30 optimizer """adadelta""" +472 30 training_loop """owa""" +472 30 negative_sampler """basic""" +472 30 evaluator """rankbased""" +472 31 dataset """kinships""" +472 31 model """proje""" +472 31 loss """marginranking""" +472 31 regularizer """no""" +472 31 optimizer """adadelta""" +472 31 training_loop """owa""" +472 31 negative_sampler """basic""" +472 31 evaluator """rankbased""" +472 32 dataset """kinships""" +472 32 model """proje""" +472 32 loss """marginranking""" +472 32 regularizer """no""" +472 32 optimizer """adadelta""" +472 32 training_loop """owa""" +472 32 negative_sampler """basic""" +472 32 evaluator """rankbased""" +472 33 dataset """kinships""" +472 33 model """proje""" +472 33 loss """marginranking""" +472 33 regularizer """no""" +472 33 optimizer """adadelta""" +472 33 training_loop """owa""" +472 33 negative_sampler """basic""" +472 33 evaluator """rankbased""" +472 34 dataset """kinships""" +472 34 model """proje""" +472 34 loss """marginranking""" +472 34 regularizer """no""" +472 34 optimizer """adadelta""" +472 34 training_loop """owa""" +472 34 negative_sampler """basic""" +472 34 evaluator """rankbased""" +472 35 dataset """kinships""" +472 35 model """proje""" +472 35 loss """marginranking""" +472 35 regularizer """no""" +472 35 optimizer """adadelta""" +472 35 training_loop """owa""" +472 35 negative_sampler """basic""" +472 35 evaluator """rankbased""" +472 36 dataset """kinships""" +472 36 model """proje""" +472 36 loss """marginranking""" +472 36 regularizer """no""" +472 36 optimizer """adadelta""" +472 36 training_loop """owa""" +472 36 negative_sampler """basic""" +472 36 evaluator """rankbased""" +472 37 dataset """kinships""" +472 37 model """proje""" +472 37 loss """marginranking""" +472 37 regularizer """no""" +472 37 optimizer """adadelta""" +472 37 training_loop """owa""" +472 37 negative_sampler """basic""" +472 37 evaluator """rankbased""" +472 38 dataset """kinships""" +472 38 model """proje""" +472 38 loss """marginranking""" +472 38 regularizer """no""" +472 38 optimizer """adadelta""" +472 38 training_loop """owa""" +472 38 negative_sampler """basic""" +472 38 evaluator """rankbased""" +472 39 dataset """kinships""" +472 39 model """proje""" +472 39 loss """marginranking""" +472 39 regularizer """no""" +472 39 optimizer """adadelta""" +472 39 training_loop """owa""" +472 39 negative_sampler """basic""" +472 39 evaluator """rankbased""" +472 40 dataset """kinships""" +472 40 model """proje""" +472 40 loss """marginranking""" +472 40 regularizer """no""" +472 40 optimizer """adadelta""" +472 40 training_loop """owa""" +472 40 negative_sampler """basic""" +472 40 evaluator """rankbased""" +472 41 dataset """kinships""" +472 41 model """proje""" +472 41 loss """marginranking""" +472 41 regularizer """no""" +472 41 optimizer """adadelta""" +472 41 training_loop """owa""" +472 41 negative_sampler """basic""" +472 41 evaluator """rankbased""" +472 42 dataset """kinships""" +472 42 model """proje""" +472 42 loss """marginranking""" +472 42 regularizer """no""" +472 42 optimizer """adadelta""" +472 42 training_loop """owa""" +472 42 negative_sampler """basic""" +472 42 evaluator """rankbased""" +472 43 dataset """kinships""" +472 43 model """proje""" +472 43 loss """marginranking""" +472 43 regularizer """no""" +472 43 optimizer """adadelta""" +472 43 training_loop """owa""" +472 43 negative_sampler """basic""" +472 43 evaluator """rankbased""" +472 44 dataset """kinships""" +472 44 model """proje""" +472 44 loss """marginranking""" +472 44 regularizer """no""" +472 44 optimizer """adadelta""" +472 44 training_loop """owa""" +472 44 negative_sampler """basic""" +472 44 evaluator """rankbased""" +472 45 dataset """kinships""" +472 45 model """proje""" +472 45 loss """marginranking""" +472 45 regularizer """no""" +472 45 optimizer """adadelta""" +472 45 training_loop """owa""" +472 45 negative_sampler """basic""" +472 45 evaluator """rankbased""" +472 46 dataset """kinships""" +472 46 model """proje""" +472 46 loss """marginranking""" +472 46 regularizer """no""" +472 46 optimizer """adadelta""" +472 46 training_loop """owa""" +472 46 negative_sampler """basic""" +472 46 evaluator """rankbased""" +472 47 dataset """kinships""" +472 47 model """proje""" +472 47 loss """marginranking""" +472 47 regularizer """no""" +472 47 optimizer """adadelta""" +472 47 training_loop """owa""" +472 47 negative_sampler """basic""" +472 47 evaluator """rankbased""" +472 48 dataset """kinships""" +472 48 model """proje""" +472 48 loss """marginranking""" +472 48 regularizer """no""" +472 48 optimizer """adadelta""" +472 48 training_loop """owa""" +472 48 negative_sampler """basic""" +472 48 evaluator """rankbased""" +472 49 dataset """kinships""" +472 49 model """proje""" +472 49 loss """marginranking""" +472 49 regularizer """no""" +472 49 optimizer """adadelta""" +472 49 training_loop """owa""" +472 49 negative_sampler """basic""" +472 49 evaluator """rankbased""" +472 50 dataset """kinships""" +472 50 model """proje""" +472 50 loss """marginranking""" +472 50 regularizer """no""" +472 50 optimizer """adadelta""" +472 50 training_loop """owa""" +472 50 negative_sampler """basic""" +472 50 evaluator """rankbased""" +472 51 dataset """kinships""" +472 51 model """proje""" +472 51 loss """marginranking""" +472 51 regularizer """no""" +472 51 optimizer """adadelta""" +472 51 training_loop """owa""" +472 51 negative_sampler """basic""" +472 51 evaluator """rankbased""" +472 52 dataset """kinships""" +472 52 model """proje""" +472 52 loss """marginranking""" +472 52 regularizer """no""" +472 52 optimizer """adadelta""" +472 52 training_loop """owa""" +472 52 negative_sampler """basic""" +472 52 evaluator """rankbased""" +472 53 dataset """kinships""" +472 53 model """proje""" +472 53 loss """marginranking""" +472 53 regularizer """no""" +472 53 optimizer """adadelta""" +472 53 training_loop """owa""" +472 53 negative_sampler """basic""" +472 53 evaluator """rankbased""" +472 54 dataset """kinships""" +472 54 model """proje""" +472 54 loss """marginranking""" +472 54 regularizer """no""" +472 54 optimizer """adadelta""" +472 54 training_loop """owa""" +472 54 negative_sampler """basic""" +472 54 evaluator """rankbased""" +472 55 dataset """kinships""" +472 55 model """proje""" +472 55 loss """marginranking""" +472 55 regularizer """no""" +472 55 optimizer """adadelta""" +472 55 training_loop """owa""" +472 55 negative_sampler """basic""" +472 55 evaluator """rankbased""" +472 56 dataset """kinships""" +472 56 model """proje""" +472 56 loss """marginranking""" +472 56 regularizer """no""" +472 56 optimizer """adadelta""" +472 56 training_loop """owa""" +472 56 negative_sampler """basic""" +472 56 evaluator """rankbased""" +472 57 dataset """kinships""" +472 57 model """proje""" +472 57 loss """marginranking""" +472 57 regularizer """no""" +472 57 optimizer """adadelta""" +472 57 training_loop """owa""" +472 57 negative_sampler """basic""" +472 57 evaluator """rankbased""" +472 58 dataset """kinships""" +472 58 model """proje""" +472 58 loss """marginranking""" +472 58 regularizer """no""" +472 58 optimizer """adadelta""" +472 58 training_loop """owa""" +472 58 negative_sampler """basic""" +472 58 evaluator """rankbased""" +472 59 dataset """kinships""" +472 59 model """proje""" +472 59 loss """marginranking""" +472 59 regularizer """no""" +472 59 optimizer """adadelta""" +472 59 training_loop """owa""" +472 59 negative_sampler """basic""" +472 59 evaluator """rankbased""" +472 60 dataset """kinships""" +472 60 model """proje""" +472 60 loss """marginranking""" +472 60 regularizer """no""" +472 60 optimizer """adadelta""" +472 60 training_loop """owa""" +472 60 negative_sampler """basic""" +472 60 evaluator """rankbased""" +472 61 dataset """kinships""" +472 61 model """proje""" +472 61 loss """marginranking""" +472 61 regularizer """no""" +472 61 optimizer """adadelta""" +472 61 training_loop """owa""" +472 61 negative_sampler """basic""" +472 61 evaluator """rankbased""" +472 62 dataset """kinships""" +472 62 model """proje""" +472 62 loss """marginranking""" +472 62 regularizer """no""" +472 62 optimizer """adadelta""" +472 62 training_loop """owa""" +472 62 negative_sampler """basic""" +472 62 evaluator """rankbased""" +472 63 dataset """kinships""" +472 63 model """proje""" +472 63 loss """marginranking""" +472 63 regularizer """no""" +472 63 optimizer """adadelta""" +472 63 training_loop """owa""" +472 63 negative_sampler """basic""" +472 63 evaluator """rankbased""" +472 64 dataset """kinships""" +472 64 model """proje""" +472 64 loss """marginranking""" +472 64 regularizer """no""" +472 64 optimizer """adadelta""" +472 64 training_loop """owa""" +472 64 negative_sampler """basic""" +472 64 evaluator """rankbased""" +472 65 dataset """kinships""" +472 65 model """proje""" +472 65 loss """marginranking""" +472 65 regularizer """no""" +472 65 optimizer """adadelta""" +472 65 training_loop """owa""" +472 65 negative_sampler """basic""" +472 65 evaluator """rankbased""" +472 66 dataset """kinships""" +472 66 model """proje""" +472 66 loss """marginranking""" +472 66 regularizer """no""" +472 66 optimizer """adadelta""" +472 66 training_loop """owa""" +472 66 negative_sampler """basic""" +472 66 evaluator """rankbased""" +472 67 dataset """kinships""" +472 67 model """proje""" +472 67 loss """marginranking""" +472 67 regularizer """no""" +472 67 optimizer """adadelta""" +472 67 training_loop """owa""" +472 67 negative_sampler """basic""" +472 67 evaluator """rankbased""" +472 68 dataset """kinships""" +472 68 model """proje""" +472 68 loss """marginranking""" +472 68 regularizer """no""" +472 68 optimizer """adadelta""" +472 68 training_loop """owa""" +472 68 negative_sampler """basic""" +472 68 evaluator """rankbased""" +472 69 dataset """kinships""" +472 69 model """proje""" +472 69 loss """marginranking""" +472 69 regularizer """no""" +472 69 optimizer """adadelta""" +472 69 training_loop """owa""" +472 69 negative_sampler """basic""" +472 69 evaluator """rankbased""" +472 70 dataset """kinships""" +472 70 model """proje""" +472 70 loss """marginranking""" +472 70 regularizer """no""" +472 70 optimizer """adadelta""" +472 70 training_loop """owa""" +472 70 negative_sampler """basic""" +472 70 evaluator """rankbased""" +472 71 dataset """kinships""" +472 71 model """proje""" +472 71 loss """marginranking""" +472 71 regularizer """no""" +472 71 optimizer """adadelta""" +472 71 training_loop """owa""" +472 71 negative_sampler """basic""" +472 71 evaluator """rankbased""" +472 72 dataset """kinships""" +472 72 model """proje""" +472 72 loss """marginranking""" +472 72 regularizer """no""" +472 72 optimizer """adadelta""" +472 72 training_loop """owa""" +472 72 negative_sampler """basic""" +472 72 evaluator """rankbased""" +472 73 dataset """kinships""" +472 73 model """proje""" +472 73 loss """marginranking""" +472 73 regularizer """no""" +472 73 optimizer """adadelta""" +472 73 training_loop """owa""" +472 73 negative_sampler """basic""" +472 73 evaluator """rankbased""" +472 74 dataset """kinships""" +472 74 model """proje""" +472 74 loss """marginranking""" +472 74 regularizer """no""" +472 74 optimizer """adadelta""" +472 74 training_loop """owa""" +472 74 negative_sampler """basic""" +472 74 evaluator """rankbased""" +472 75 dataset """kinships""" +472 75 model """proje""" +472 75 loss """marginranking""" +472 75 regularizer """no""" +472 75 optimizer """adadelta""" +472 75 training_loop """owa""" +472 75 negative_sampler """basic""" +472 75 evaluator """rankbased""" +472 76 dataset """kinships""" +472 76 model """proje""" +472 76 loss """marginranking""" +472 76 regularizer """no""" +472 76 optimizer """adadelta""" +472 76 training_loop """owa""" +472 76 negative_sampler """basic""" +472 76 evaluator """rankbased""" +472 77 dataset """kinships""" +472 77 model """proje""" +472 77 loss """marginranking""" +472 77 regularizer """no""" +472 77 optimizer """adadelta""" +472 77 training_loop """owa""" +472 77 negative_sampler """basic""" +472 77 evaluator """rankbased""" +472 78 dataset """kinships""" +472 78 model """proje""" +472 78 loss """marginranking""" +472 78 regularizer """no""" +472 78 optimizer """adadelta""" +472 78 training_loop """owa""" +472 78 negative_sampler """basic""" +472 78 evaluator """rankbased""" +472 79 dataset """kinships""" +472 79 model """proje""" +472 79 loss """marginranking""" +472 79 regularizer """no""" +472 79 optimizer """adadelta""" +472 79 training_loop """owa""" +472 79 negative_sampler """basic""" +472 79 evaluator """rankbased""" +472 80 dataset """kinships""" +472 80 model """proje""" +472 80 loss """marginranking""" +472 80 regularizer """no""" +472 80 optimizer """adadelta""" +472 80 training_loop """owa""" +472 80 negative_sampler """basic""" +472 80 evaluator """rankbased""" +472 81 dataset """kinships""" +472 81 model """proje""" +472 81 loss """marginranking""" +472 81 regularizer """no""" +472 81 optimizer """adadelta""" +472 81 training_loop """owa""" +472 81 negative_sampler """basic""" +472 81 evaluator """rankbased""" +472 82 dataset """kinships""" +472 82 model """proje""" +472 82 loss """marginranking""" +472 82 regularizer """no""" +472 82 optimizer """adadelta""" +472 82 training_loop """owa""" +472 82 negative_sampler """basic""" +472 82 evaluator """rankbased""" +472 83 dataset """kinships""" +472 83 model """proje""" +472 83 loss """marginranking""" +472 83 regularizer """no""" +472 83 optimizer """adadelta""" +472 83 training_loop """owa""" +472 83 negative_sampler """basic""" +472 83 evaluator """rankbased""" +472 84 dataset """kinships""" +472 84 model """proje""" +472 84 loss """marginranking""" +472 84 regularizer """no""" +472 84 optimizer """adadelta""" +472 84 training_loop """owa""" +472 84 negative_sampler """basic""" +472 84 evaluator """rankbased""" +472 85 dataset """kinships""" +472 85 model """proje""" +472 85 loss """marginranking""" +472 85 regularizer """no""" +472 85 optimizer """adadelta""" +472 85 training_loop """owa""" +472 85 negative_sampler """basic""" +472 85 evaluator """rankbased""" +472 86 dataset """kinships""" +472 86 model """proje""" +472 86 loss """marginranking""" +472 86 regularizer """no""" +472 86 optimizer """adadelta""" +472 86 training_loop """owa""" +472 86 negative_sampler """basic""" +472 86 evaluator """rankbased""" +472 87 dataset """kinships""" +472 87 model """proje""" +472 87 loss """marginranking""" +472 87 regularizer """no""" +472 87 optimizer """adadelta""" +472 87 training_loop """owa""" +472 87 negative_sampler """basic""" +472 87 evaluator """rankbased""" +472 88 dataset """kinships""" +472 88 model """proje""" +472 88 loss """marginranking""" +472 88 regularizer """no""" +472 88 optimizer """adadelta""" +472 88 training_loop """owa""" +472 88 negative_sampler """basic""" +472 88 evaluator """rankbased""" +472 89 dataset """kinships""" +472 89 model """proje""" +472 89 loss """marginranking""" +472 89 regularizer """no""" +472 89 optimizer """adadelta""" +472 89 training_loop """owa""" +472 89 negative_sampler """basic""" +472 89 evaluator """rankbased""" +472 90 dataset """kinships""" +472 90 model """proje""" +472 90 loss """marginranking""" +472 90 regularizer """no""" +472 90 optimizer """adadelta""" +472 90 training_loop """owa""" +472 90 negative_sampler """basic""" +472 90 evaluator """rankbased""" +472 91 dataset """kinships""" +472 91 model """proje""" +472 91 loss """marginranking""" +472 91 regularizer """no""" +472 91 optimizer """adadelta""" +472 91 training_loop """owa""" +472 91 negative_sampler """basic""" +472 91 evaluator """rankbased""" +472 92 dataset """kinships""" +472 92 model """proje""" +472 92 loss """marginranking""" +472 92 regularizer """no""" +472 92 optimizer """adadelta""" +472 92 training_loop """owa""" +472 92 negative_sampler """basic""" +472 92 evaluator """rankbased""" +472 93 dataset """kinships""" +472 93 model """proje""" +472 93 loss """marginranking""" +472 93 regularizer """no""" +472 93 optimizer """adadelta""" +472 93 training_loop """owa""" +472 93 negative_sampler """basic""" +472 93 evaluator """rankbased""" +472 94 dataset """kinships""" +472 94 model """proje""" +472 94 loss """marginranking""" +472 94 regularizer """no""" +472 94 optimizer """adadelta""" +472 94 training_loop """owa""" +472 94 negative_sampler """basic""" +472 94 evaluator """rankbased""" +472 95 dataset """kinships""" +472 95 model """proje""" +472 95 loss """marginranking""" +472 95 regularizer """no""" +472 95 optimizer """adadelta""" +472 95 training_loop """owa""" +472 95 negative_sampler """basic""" +472 95 evaluator """rankbased""" +472 96 dataset """kinships""" +472 96 model """proje""" +472 96 loss """marginranking""" +472 96 regularizer """no""" +472 96 optimizer """adadelta""" +472 96 training_loop """owa""" +472 96 negative_sampler """basic""" +472 96 evaluator """rankbased""" +472 97 dataset """kinships""" +472 97 model """proje""" +472 97 loss """marginranking""" +472 97 regularizer """no""" +472 97 optimizer """adadelta""" +472 97 training_loop """owa""" +472 97 negative_sampler """basic""" +472 97 evaluator """rankbased""" +472 98 dataset """kinships""" +472 98 model """proje""" +472 98 loss """marginranking""" +472 98 regularizer """no""" +472 98 optimizer """adadelta""" +472 98 training_loop """owa""" +472 98 negative_sampler """basic""" +472 98 evaluator """rankbased""" +472 99 dataset """kinships""" +472 99 model """proje""" +472 99 loss """marginranking""" +472 99 regularizer """no""" +472 99 optimizer """adadelta""" +472 99 training_loop """owa""" +472 99 negative_sampler """basic""" +472 99 evaluator """rankbased""" +472 100 dataset """kinships""" +472 100 model """proje""" +472 100 loss """marginranking""" +472 100 regularizer """no""" +472 100 optimizer """adadelta""" +472 100 training_loop """owa""" +472 100 negative_sampler """basic""" +472 100 evaluator """rankbased""" +473 1 model.embedding_dim 0.0 +473 1 negative_sampler.num_negs_per_pos 24.0 +473 1 training.batch_size 2.0 +473 2 model.embedding_dim 2.0 +473 2 negative_sampler.num_negs_per_pos 36.0 +473 2 training.batch_size 1.0 +473 3 model.embedding_dim 0.0 +473 3 negative_sampler.num_negs_per_pos 17.0 +473 3 training.batch_size 0.0 +473 4 model.embedding_dim 0.0 +473 4 negative_sampler.num_negs_per_pos 40.0 +473 4 training.batch_size 2.0 +473 5 model.embedding_dim 1.0 +473 5 negative_sampler.num_negs_per_pos 52.0 +473 5 training.batch_size 2.0 +473 6 model.embedding_dim 2.0 +473 6 negative_sampler.num_negs_per_pos 99.0 +473 6 training.batch_size 0.0 +473 7 model.embedding_dim 2.0 +473 7 negative_sampler.num_negs_per_pos 85.0 +473 7 training.batch_size 0.0 +473 8 model.embedding_dim 1.0 +473 8 negative_sampler.num_negs_per_pos 9.0 +473 8 training.batch_size 2.0 +473 9 model.embedding_dim 0.0 +473 9 negative_sampler.num_negs_per_pos 34.0 +473 9 training.batch_size 2.0 +473 10 model.embedding_dim 2.0 +473 10 negative_sampler.num_negs_per_pos 53.0 +473 10 training.batch_size 1.0 +473 11 model.embedding_dim 1.0 +473 11 negative_sampler.num_negs_per_pos 22.0 +473 11 training.batch_size 2.0 +473 12 model.embedding_dim 0.0 +473 12 negative_sampler.num_negs_per_pos 8.0 +473 12 training.batch_size 0.0 +473 13 model.embedding_dim 2.0 +473 13 negative_sampler.num_negs_per_pos 39.0 +473 13 training.batch_size 0.0 +473 14 model.embedding_dim 1.0 +473 14 negative_sampler.num_negs_per_pos 3.0 +473 14 training.batch_size 1.0 +473 15 model.embedding_dim 1.0 +473 15 negative_sampler.num_negs_per_pos 16.0 +473 15 training.batch_size 0.0 +473 16 model.embedding_dim 1.0 +473 16 negative_sampler.num_negs_per_pos 74.0 +473 16 training.batch_size 0.0 +473 17 model.embedding_dim 2.0 +473 17 negative_sampler.num_negs_per_pos 13.0 +473 17 training.batch_size 0.0 +473 18 model.embedding_dim 0.0 +473 18 negative_sampler.num_negs_per_pos 97.0 +473 18 training.batch_size 1.0 +473 19 model.embedding_dim 1.0 +473 19 negative_sampler.num_negs_per_pos 42.0 +473 19 training.batch_size 0.0 +473 20 model.embedding_dim 2.0 +473 20 negative_sampler.num_negs_per_pos 88.0 +473 20 training.batch_size 0.0 +473 21 model.embedding_dim 2.0 +473 21 negative_sampler.num_negs_per_pos 92.0 +473 21 training.batch_size 1.0 +473 22 model.embedding_dim 2.0 +473 22 negative_sampler.num_negs_per_pos 42.0 +473 22 training.batch_size 0.0 +473 23 model.embedding_dim 2.0 +473 23 negative_sampler.num_negs_per_pos 74.0 +473 23 training.batch_size 2.0 +473 24 model.embedding_dim 2.0 +473 24 negative_sampler.num_negs_per_pos 15.0 +473 24 training.batch_size 1.0 +473 25 model.embedding_dim 1.0 +473 25 negative_sampler.num_negs_per_pos 40.0 +473 25 training.batch_size 2.0 +473 26 model.embedding_dim 0.0 +473 26 negative_sampler.num_negs_per_pos 59.0 +473 26 training.batch_size 2.0 +473 27 model.embedding_dim 1.0 +473 27 negative_sampler.num_negs_per_pos 42.0 +473 27 training.batch_size 2.0 +473 28 model.embedding_dim 0.0 +473 28 negative_sampler.num_negs_per_pos 70.0 +473 28 training.batch_size 2.0 +473 29 model.embedding_dim 2.0 +473 29 negative_sampler.num_negs_per_pos 5.0 +473 29 training.batch_size 0.0 +473 30 model.embedding_dim 2.0 +473 30 negative_sampler.num_negs_per_pos 38.0 +473 30 training.batch_size 1.0 +473 31 model.embedding_dim 0.0 +473 31 negative_sampler.num_negs_per_pos 66.0 +473 31 training.batch_size 0.0 +473 32 model.embedding_dim 0.0 +473 32 negative_sampler.num_negs_per_pos 37.0 +473 32 training.batch_size 0.0 +473 33 model.embedding_dim 0.0 +473 33 negative_sampler.num_negs_per_pos 34.0 +473 33 training.batch_size 0.0 +473 34 model.embedding_dim 0.0 +473 34 negative_sampler.num_negs_per_pos 41.0 +473 34 training.batch_size 0.0 +473 35 model.embedding_dim 1.0 +473 35 negative_sampler.num_negs_per_pos 98.0 +473 35 training.batch_size 2.0 +473 36 model.embedding_dim 2.0 +473 36 negative_sampler.num_negs_per_pos 35.0 +473 36 training.batch_size 0.0 +473 37 model.embedding_dim 0.0 +473 37 negative_sampler.num_negs_per_pos 63.0 +473 37 training.batch_size 0.0 +473 38 model.embedding_dim 2.0 +473 38 negative_sampler.num_negs_per_pos 34.0 +473 38 training.batch_size 1.0 +473 39 model.embedding_dim 2.0 +473 39 negative_sampler.num_negs_per_pos 65.0 +473 39 training.batch_size 2.0 +473 40 model.embedding_dim 0.0 +473 40 negative_sampler.num_negs_per_pos 97.0 +473 40 training.batch_size 0.0 +473 41 model.embedding_dim 0.0 +473 41 negative_sampler.num_negs_per_pos 26.0 +473 41 training.batch_size 1.0 +473 42 model.embedding_dim 0.0 +473 42 negative_sampler.num_negs_per_pos 6.0 +473 42 training.batch_size 2.0 +473 43 model.embedding_dim 2.0 +473 43 negative_sampler.num_negs_per_pos 76.0 +473 43 training.batch_size 1.0 +473 44 model.embedding_dim 2.0 +473 44 negative_sampler.num_negs_per_pos 41.0 +473 44 training.batch_size 1.0 +473 45 model.embedding_dim 1.0 +473 45 negative_sampler.num_negs_per_pos 35.0 +473 45 training.batch_size 2.0 +473 46 model.embedding_dim 1.0 +473 46 negative_sampler.num_negs_per_pos 95.0 +473 46 training.batch_size 2.0 +473 47 model.embedding_dim 1.0 +473 47 negative_sampler.num_negs_per_pos 79.0 +473 47 training.batch_size 1.0 +473 48 model.embedding_dim 1.0 +473 48 negative_sampler.num_negs_per_pos 95.0 +473 48 training.batch_size 0.0 +473 49 model.embedding_dim 0.0 +473 49 negative_sampler.num_negs_per_pos 37.0 +473 49 training.batch_size 1.0 +473 50 model.embedding_dim 0.0 +473 50 negative_sampler.num_negs_per_pos 35.0 +473 50 training.batch_size 2.0 +473 51 model.embedding_dim 1.0 +473 51 negative_sampler.num_negs_per_pos 27.0 +473 51 training.batch_size 1.0 +473 52 model.embedding_dim 1.0 +473 52 negative_sampler.num_negs_per_pos 16.0 +473 52 training.batch_size 0.0 +473 53 model.embedding_dim 1.0 +473 53 negative_sampler.num_negs_per_pos 3.0 +473 53 training.batch_size 0.0 +473 54 model.embedding_dim 2.0 +473 54 negative_sampler.num_negs_per_pos 9.0 +473 54 training.batch_size 2.0 +473 55 model.embedding_dim 2.0 +473 55 negative_sampler.num_negs_per_pos 26.0 +473 55 training.batch_size 0.0 +473 56 model.embedding_dim 0.0 +473 56 negative_sampler.num_negs_per_pos 37.0 +473 56 training.batch_size 0.0 +473 57 model.embedding_dim 2.0 +473 57 negative_sampler.num_negs_per_pos 2.0 +473 57 training.batch_size 2.0 +473 58 model.embedding_dim 1.0 +473 58 negative_sampler.num_negs_per_pos 61.0 +473 58 training.batch_size 2.0 +473 59 model.embedding_dim 0.0 +473 59 negative_sampler.num_negs_per_pos 42.0 +473 59 training.batch_size 1.0 +473 60 model.embedding_dim 0.0 +473 60 negative_sampler.num_negs_per_pos 74.0 +473 60 training.batch_size 2.0 +473 61 model.embedding_dim 0.0 +473 61 negative_sampler.num_negs_per_pos 37.0 +473 61 training.batch_size 2.0 +473 62 model.embedding_dim 0.0 +473 62 negative_sampler.num_negs_per_pos 13.0 +473 62 training.batch_size 0.0 +473 63 model.embedding_dim 0.0 +473 63 negative_sampler.num_negs_per_pos 37.0 +473 63 training.batch_size 0.0 +473 64 model.embedding_dim 1.0 +473 64 negative_sampler.num_negs_per_pos 44.0 +473 64 training.batch_size 1.0 +473 65 model.embedding_dim 0.0 +473 65 negative_sampler.num_negs_per_pos 59.0 +473 65 training.batch_size 2.0 +473 66 model.embedding_dim 1.0 +473 66 negative_sampler.num_negs_per_pos 48.0 +473 66 training.batch_size 0.0 +473 67 model.embedding_dim 1.0 +473 67 negative_sampler.num_negs_per_pos 88.0 +473 67 training.batch_size 1.0 +473 68 model.embedding_dim 0.0 +473 68 negative_sampler.num_negs_per_pos 84.0 +473 68 training.batch_size 0.0 +473 69 model.embedding_dim 1.0 +473 69 negative_sampler.num_negs_per_pos 76.0 +473 69 training.batch_size 0.0 +473 70 model.embedding_dim 1.0 +473 70 negative_sampler.num_negs_per_pos 16.0 +473 70 training.batch_size 1.0 +473 71 model.embedding_dim 1.0 +473 71 negative_sampler.num_negs_per_pos 60.0 +473 71 training.batch_size 0.0 +473 72 model.embedding_dim 1.0 +473 72 negative_sampler.num_negs_per_pos 12.0 +473 72 training.batch_size 0.0 +473 73 model.embedding_dim 2.0 +473 73 negative_sampler.num_negs_per_pos 91.0 +473 73 training.batch_size 0.0 +473 74 model.embedding_dim 1.0 +473 74 negative_sampler.num_negs_per_pos 98.0 +473 74 training.batch_size 1.0 +473 75 model.embedding_dim 0.0 +473 75 negative_sampler.num_negs_per_pos 46.0 +473 75 training.batch_size 0.0 +473 76 model.embedding_dim 1.0 +473 76 negative_sampler.num_negs_per_pos 39.0 +473 76 training.batch_size 2.0 +473 77 model.embedding_dim 0.0 +473 77 negative_sampler.num_negs_per_pos 6.0 +473 77 training.batch_size 1.0 +473 78 model.embedding_dim 2.0 +473 78 negative_sampler.num_negs_per_pos 55.0 +473 78 training.batch_size 1.0 +473 79 model.embedding_dim 0.0 +473 79 negative_sampler.num_negs_per_pos 60.0 +473 79 training.batch_size 1.0 +473 80 model.embedding_dim 0.0 +473 80 negative_sampler.num_negs_per_pos 51.0 +473 80 training.batch_size 1.0 +473 81 model.embedding_dim 1.0 +473 81 negative_sampler.num_negs_per_pos 32.0 +473 81 training.batch_size 1.0 +473 82 model.embedding_dim 2.0 +473 82 negative_sampler.num_negs_per_pos 50.0 +473 82 training.batch_size 2.0 +473 83 model.embedding_dim 2.0 +473 83 negative_sampler.num_negs_per_pos 63.0 +473 83 training.batch_size 1.0 +473 84 model.embedding_dim 0.0 +473 84 negative_sampler.num_negs_per_pos 85.0 +473 84 training.batch_size 2.0 +473 85 model.embedding_dim 2.0 +473 85 negative_sampler.num_negs_per_pos 63.0 +473 85 training.batch_size 0.0 +473 86 model.embedding_dim 0.0 +473 86 negative_sampler.num_negs_per_pos 56.0 +473 86 training.batch_size 1.0 +473 87 model.embedding_dim 0.0 +473 87 negative_sampler.num_negs_per_pos 30.0 +473 87 training.batch_size 1.0 +473 88 model.embedding_dim 0.0 +473 88 negative_sampler.num_negs_per_pos 36.0 +473 88 training.batch_size 1.0 +473 89 model.embedding_dim 0.0 +473 89 negative_sampler.num_negs_per_pos 38.0 +473 89 training.batch_size 1.0 +473 90 model.embedding_dim 1.0 +473 90 negative_sampler.num_negs_per_pos 71.0 +473 90 training.batch_size 2.0 +473 91 model.embedding_dim 2.0 +473 91 negative_sampler.num_negs_per_pos 68.0 +473 91 training.batch_size 2.0 +473 92 model.embedding_dim 0.0 +473 92 negative_sampler.num_negs_per_pos 77.0 +473 92 training.batch_size 2.0 +473 93 model.embedding_dim 1.0 +473 93 negative_sampler.num_negs_per_pos 98.0 +473 93 training.batch_size 2.0 +473 94 model.embedding_dim 1.0 +473 94 negative_sampler.num_negs_per_pos 98.0 +473 94 training.batch_size 1.0 +473 95 model.embedding_dim 2.0 +473 95 negative_sampler.num_negs_per_pos 15.0 +473 95 training.batch_size 1.0 +473 96 model.embedding_dim 1.0 +473 96 negative_sampler.num_negs_per_pos 35.0 +473 96 training.batch_size 2.0 +473 97 model.embedding_dim 2.0 +473 97 negative_sampler.num_negs_per_pos 62.0 +473 97 training.batch_size 2.0 +473 98 model.embedding_dim 2.0 +473 98 negative_sampler.num_negs_per_pos 12.0 +473 98 training.batch_size 0.0 +473 99 model.embedding_dim 2.0 +473 99 negative_sampler.num_negs_per_pos 8.0 +473 99 training.batch_size 1.0 +473 100 model.embedding_dim 0.0 +473 100 negative_sampler.num_negs_per_pos 8.0 +473 100 training.batch_size 0.0 +473 1 dataset """kinships""" +473 1 model """proje""" +473 1 loss """bceaftersigmoid""" +473 1 regularizer """no""" +473 1 optimizer """adadelta""" +473 1 training_loop """owa""" +473 1 negative_sampler """basic""" +473 1 evaluator """rankbased""" +473 2 dataset """kinships""" +473 2 model """proje""" +473 2 loss """bceaftersigmoid""" +473 2 regularizer """no""" +473 2 optimizer """adadelta""" +473 2 training_loop """owa""" +473 2 negative_sampler """basic""" +473 2 evaluator """rankbased""" +473 3 dataset """kinships""" +473 3 model """proje""" +473 3 loss """bceaftersigmoid""" +473 3 regularizer """no""" +473 3 optimizer """adadelta""" +473 3 training_loop """owa""" +473 3 negative_sampler """basic""" +473 3 evaluator """rankbased""" +473 4 dataset """kinships""" +473 4 model """proje""" +473 4 loss """bceaftersigmoid""" +473 4 regularizer """no""" +473 4 optimizer """adadelta""" +473 4 training_loop """owa""" +473 4 negative_sampler """basic""" +473 4 evaluator """rankbased""" +473 5 dataset """kinships""" +473 5 model """proje""" +473 5 loss """bceaftersigmoid""" +473 5 regularizer """no""" +473 5 optimizer """adadelta""" +473 5 training_loop """owa""" +473 5 negative_sampler """basic""" +473 5 evaluator """rankbased""" +473 6 dataset """kinships""" +473 6 model """proje""" +473 6 loss """bceaftersigmoid""" +473 6 regularizer """no""" +473 6 optimizer """adadelta""" +473 6 training_loop """owa""" +473 6 negative_sampler """basic""" +473 6 evaluator """rankbased""" +473 7 dataset """kinships""" +473 7 model """proje""" +473 7 loss """bceaftersigmoid""" +473 7 regularizer """no""" +473 7 optimizer """adadelta""" +473 7 training_loop """owa""" +473 7 negative_sampler """basic""" +473 7 evaluator """rankbased""" +473 8 dataset """kinships""" +473 8 model """proje""" +473 8 loss """bceaftersigmoid""" +473 8 regularizer """no""" +473 8 optimizer """adadelta""" +473 8 training_loop """owa""" +473 8 negative_sampler """basic""" +473 8 evaluator """rankbased""" +473 9 dataset """kinships""" +473 9 model """proje""" +473 9 loss """bceaftersigmoid""" +473 9 regularizer """no""" +473 9 optimizer """adadelta""" +473 9 training_loop """owa""" +473 9 negative_sampler """basic""" +473 9 evaluator """rankbased""" +473 10 dataset """kinships""" +473 10 model """proje""" +473 10 loss """bceaftersigmoid""" +473 10 regularizer """no""" +473 10 optimizer """adadelta""" +473 10 training_loop """owa""" +473 10 negative_sampler """basic""" +473 10 evaluator """rankbased""" +473 11 dataset """kinships""" +473 11 model """proje""" +473 11 loss """bceaftersigmoid""" +473 11 regularizer """no""" +473 11 optimizer """adadelta""" +473 11 training_loop """owa""" +473 11 negative_sampler """basic""" +473 11 evaluator """rankbased""" +473 12 dataset """kinships""" +473 12 model """proje""" +473 12 loss """bceaftersigmoid""" +473 12 regularizer """no""" +473 12 optimizer """adadelta""" +473 12 training_loop """owa""" +473 12 negative_sampler """basic""" +473 12 evaluator """rankbased""" +473 13 dataset """kinships""" +473 13 model """proje""" +473 13 loss """bceaftersigmoid""" +473 13 regularizer """no""" +473 13 optimizer """adadelta""" +473 13 training_loop """owa""" +473 13 negative_sampler """basic""" +473 13 evaluator """rankbased""" +473 14 dataset """kinships""" +473 14 model """proje""" +473 14 loss """bceaftersigmoid""" +473 14 regularizer """no""" +473 14 optimizer """adadelta""" +473 14 training_loop """owa""" +473 14 negative_sampler """basic""" +473 14 evaluator """rankbased""" +473 15 dataset """kinships""" +473 15 model """proje""" +473 15 loss """bceaftersigmoid""" +473 15 regularizer """no""" +473 15 optimizer """adadelta""" +473 15 training_loop """owa""" +473 15 negative_sampler """basic""" +473 15 evaluator """rankbased""" +473 16 dataset """kinships""" +473 16 model """proje""" +473 16 loss """bceaftersigmoid""" +473 16 regularizer """no""" +473 16 optimizer """adadelta""" +473 16 training_loop """owa""" +473 16 negative_sampler """basic""" +473 16 evaluator """rankbased""" +473 17 dataset """kinships""" +473 17 model """proje""" +473 17 loss """bceaftersigmoid""" +473 17 regularizer """no""" +473 17 optimizer """adadelta""" +473 17 training_loop """owa""" +473 17 negative_sampler """basic""" +473 17 evaluator """rankbased""" +473 18 dataset """kinships""" +473 18 model """proje""" +473 18 loss """bceaftersigmoid""" +473 18 regularizer """no""" +473 18 optimizer """adadelta""" +473 18 training_loop """owa""" +473 18 negative_sampler """basic""" +473 18 evaluator """rankbased""" +473 19 dataset """kinships""" +473 19 model """proje""" +473 19 loss """bceaftersigmoid""" +473 19 regularizer """no""" +473 19 optimizer """adadelta""" +473 19 training_loop """owa""" +473 19 negative_sampler """basic""" +473 19 evaluator """rankbased""" +473 20 dataset """kinships""" +473 20 model """proje""" +473 20 loss """bceaftersigmoid""" +473 20 regularizer """no""" +473 20 optimizer """adadelta""" +473 20 training_loop """owa""" +473 20 negative_sampler """basic""" +473 20 evaluator """rankbased""" +473 21 dataset """kinships""" +473 21 model """proje""" +473 21 loss """bceaftersigmoid""" +473 21 regularizer """no""" +473 21 optimizer """adadelta""" +473 21 training_loop """owa""" +473 21 negative_sampler """basic""" +473 21 evaluator """rankbased""" +473 22 dataset """kinships""" +473 22 model """proje""" +473 22 loss """bceaftersigmoid""" +473 22 regularizer """no""" +473 22 optimizer """adadelta""" +473 22 training_loop """owa""" +473 22 negative_sampler """basic""" +473 22 evaluator """rankbased""" +473 23 dataset """kinships""" +473 23 model """proje""" +473 23 loss """bceaftersigmoid""" +473 23 regularizer """no""" +473 23 optimizer """adadelta""" +473 23 training_loop """owa""" +473 23 negative_sampler """basic""" +473 23 evaluator """rankbased""" +473 24 dataset """kinships""" +473 24 model """proje""" +473 24 loss """bceaftersigmoid""" +473 24 regularizer """no""" +473 24 optimizer """adadelta""" +473 24 training_loop """owa""" +473 24 negative_sampler """basic""" +473 24 evaluator """rankbased""" +473 25 dataset """kinships""" +473 25 model """proje""" +473 25 loss """bceaftersigmoid""" +473 25 regularizer """no""" +473 25 optimizer """adadelta""" +473 25 training_loop """owa""" +473 25 negative_sampler """basic""" +473 25 evaluator """rankbased""" +473 26 dataset """kinships""" +473 26 model """proje""" +473 26 loss """bceaftersigmoid""" +473 26 regularizer """no""" +473 26 optimizer """adadelta""" +473 26 training_loop """owa""" +473 26 negative_sampler """basic""" +473 26 evaluator """rankbased""" +473 27 dataset """kinships""" +473 27 model """proje""" +473 27 loss """bceaftersigmoid""" +473 27 regularizer """no""" +473 27 optimizer """adadelta""" +473 27 training_loop """owa""" +473 27 negative_sampler """basic""" +473 27 evaluator """rankbased""" +473 28 dataset """kinships""" +473 28 model """proje""" +473 28 loss """bceaftersigmoid""" +473 28 regularizer """no""" +473 28 optimizer """adadelta""" +473 28 training_loop """owa""" +473 28 negative_sampler """basic""" +473 28 evaluator """rankbased""" +473 29 dataset """kinships""" +473 29 model """proje""" +473 29 loss """bceaftersigmoid""" +473 29 regularizer """no""" +473 29 optimizer """adadelta""" +473 29 training_loop """owa""" +473 29 negative_sampler """basic""" +473 29 evaluator """rankbased""" +473 30 dataset """kinships""" +473 30 model """proje""" +473 30 loss """bceaftersigmoid""" +473 30 regularizer """no""" +473 30 optimizer """adadelta""" +473 30 training_loop """owa""" +473 30 negative_sampler """basic""" +473 30 evaluator """rankbased""" +473 31 dataset """kinships""" +473 31 model """proje""" +473 31 loss """bceaftersigmoid""" +473 31 regularizer """no""" +473 31 optimizer """adadelta""" +473 31 training_loop """owa""" +473 31 negative_sampler """basic""" +473 31 evaluator """rankbased""" +473 32 dataset """kinships""" +473 32 model """proje""" +473 32 loss """bceaftersigmoid""" +473 32 regularizer """no""" +473 32 optimizer """adadelta""" +473 32 training_loop """owa""" +473 32 negative_sampler """basic""" +473 32 evaluator """rankbased""" +473 33 dataset """kinships""" +473 33 model """proje""" +473 33 loss """bceaftersigmoid""" +473 33 regularizer """no""" +473 33 optimizer """adadelta""" +473 33 training_loop """owa""" +473 33 negative_sampler """basic""" +473 33 evaluator """rankbased""" +473 34 dataset """kinships""" +473 34 model """proje""" +473 34 loss """bceaftersigmoid""" +473 34 regularizer """no""" +473 34 optimizer """adadelta""" +473 34 training_loop """owa""" +473 34 negative_sampler """basic""" +473 34 evaluator """rankbased""" +473 35 dataset """kinships""" +473 35 model """proje""" +473 35 loss """bceaftersigmoid""" +473 35 regularizer """no""" +473 35 optimizer """adadelta""" +473 35 training_loop """owa""" +473 35 negative_sampler """basic""" +473 35 evaluator """rankbased""" +473 36 dataset """kinships""" +473 36 model """proje""" +473 36 loss """bceaftersigmoid""" +473 36 regularizer """no""" +473 36 optimizer """adadelta""" +473 36 training_loop """owa""" +473 36 negative_sampler """basic""" +473 36 evaluator """rankbased""" +473 37 dataset """kinships""" +473 37 model """proje""" +473 37 loss """bceaftersigmoid""" +473 37 regularizer """no""" +473 37 optimizer """adadelta""" +473 37 training_loop """owa""" +473 37 negative_sampler """basic""" +473 37 evaluator """rankbased""" +473 38 dataset """kinships""" +473 38 model """proje""" +473 38 loss """bceaftersigmoid""" +473 38 regularizer """no""" +473 38 optimizer """adadelta""" +473 38 training_loop """owa""" +473 38 negative_sampler """basic""" +473 38 evaluator """rankbased""" +473 39 dataset """kinships""" +473 39 model """proje""" +473 39 loss """bceaftersigmoid""" +473 39 regularizer """no""" +473 39 optimizer """adadelta""" +473 39 training_loop """owa""" +473 39 negative_sampler """basic""" +473 39 evaluator """rankbased""" +473 40 dataset """kinships""" +473 40 model """proje""" +473 40 loss """bceaftersigmoid""" +473 40 regularizer """no""" +473 40 optimizer """adadelta""" +473 40 training_loop """owa""" +473 40 negative_sampler """basic""" +473 40 evaluator """rankbased""" +473 41 dataset """kinships""" +473 41 model """proje""" +473 41 loss """bceaftersigmoid""" +473 41 regularizer """no""" +473 41 optimizer """adadelta""" +473 41 training_loop """owa""" +473 41 negative_sampler """basic""" +473 41 evaluator """rankbased""" +473 42 dataset """kinships""" +473 42 model """proje""" +473 42 loss """bceaftersigmoid""" +473 42 regularizer """no""" +473 42 optimizer """adadelta""" +473 42 training_loop """owa""" +473 42 negative_sampler """basic""" +473 42 evaluator """rankbased""" +473 43 dataset """kinships""" +473 43 model """proje""" +473 43 loss """bceaftersigmoid""" +473 43 regularizer """no""" +473 43 optimizer """adadelta""" +473 43 training_loop """owa""" +473 43 negative_sampler """basic""" +473 43 evaluator """rankbased""" +473 44 dataset """kinships""" +473 44 model """proje""" +473 44 loss """bceaftersigmoid""" +473 44 regularizer """no""" +473 44 optimizer """adadelta""" +473 44 training_loop """owa""" +473 44 negative_sampler """basic""" +473 44 evaluator """rankbased""" +473 45 dataset """kinships""" +473 45 model """proje""" +473 45 loss """bceaftersigmoid""" +473 45 regularizer """no""" +473 45 optimizer """adadelta""" +473 45 training_loop """owa""" +473 45 negative_sampler """basic""" +473 45 evaluator """rankbased""" +473 46 dataset """kinships""" +473 46 model """proje""" +473 46 loss """bceaftersigmoid""" +473 46 regularizer """no""" +473 46 optimizer """adadelta""" +473 46 training_loop """owa""" +473 46 negative_sampler """basic""" +473 46 evaluator """rankbased""" +473 47 dataset """kinships""" +473 47 model """proje""" +473 47 loss """bceaftersigmoid""" +473 47 regularizer """no""" +473 47 optimizer """adadelta""" +473 47 training_loop """owa""" +473 47 negative_sampler """basic""" +473 47 evaluator """rankbased""" +473 48 dataset """kinships""" +473 48 model """proje""" +473 48 loss """bceaftersigmoid""" +473 48 regularizer """no""" +473 48 optimizer """adadelta""" +473 48 training_loop """owa""" +473 48 negative_sampler """basic""" +473 48 evaluator """rankbased""" +473 49 dataset """kinships""" +473 49 model """proje""" +473 49 loss """bceaftersigmoid""" +473 49 regularizer """no""" +473 49 optimizer """adadelta""" +473 49 training_loop """owa""" +473 49 negative_sampler """basic""" +473 49 evaluator """rankbased""" +473 50 dataset """kinships""" +473 50 model """proje""" +473 50 loss """bceaftersigmoid""" +473 50 regularizer """no""" +473 50 optimizer """adadelta""" +473 50 training_loop """owa""" +473 50 negative_sampler """basic""" +473 50 evaluator """rankbased""" +473 51 dataset """kinships""" +473 51 model """proje""" +473 51 loss """bceaftersigmoid""" +473 51 regularizer """no""" +473 51 optimizer """adadelta""" +473 51 training_loop """owa""" +473 51 negative_sampler """basic""" +473 51 evaluator """rankbased""" +473 52 dataset """kinships""" +473 52 model """proje""" +473 52 loss """bceaftersigmoid""" +473 52 regularizer """no""" +473 52 optimizer """adadelta""" +473 52 training_loop """owa""" +473 52 negative_sampler """basic""" +473 52 evaluator """rankbased""" +473 53 dataset """kinships""" +473 53 model """proje""" +473 53 loss """bceaftersigmoid""" +473 53 regularizer """no""" +473 53 optimizer """adadelta""" +473 53 training_loop """owa""" +473 53 negative_sampler """basic""" +473 53 evaluator """rankbased""" +473 54 dataset """kinships""" +473 54 model """proje""" +473 54 loss """bceaftersigmoid""" +473 54 regularizer """no""" +473 54 optimizer """adadelta""" +473 54 training_loop """owa""" +473 54 negative_sampler """basic""" +473 54 evaluator """rankbased""" +473 55 dataset """kinships""" +473 55 model """proje""" +473 55 loss """bceaftersigmoid""" +473 55 regularizer """no""" +473 55 optimizer """adadelta""" +473 55 training_loop """owa""" +473 55 negative_sampler """basic""" +473 55 evaluator """rankbased""" +473 56 dataset """kinships""" +473 56 model """proje""" +473 56 loss """bceaftersigmoid""" +473 56 regularizer """no""" +473 56 optimizer """adadelta""" +473 56 training_loop """owa""" +473 56 negative_sampler """basic""" +473 56 evaluator """rankbased""" +473 57 dataset """kinships""" +473 57 model """proje""" +473 57 loss """bceaftersigmoid""" +473 57 regularizer """no""" +473 57 optimizer """adadelta""" +473 57 training_loop """owa""" +473 57 negative_sampler """basic""" +473 57 evaluator """rankbased""" +473 58 dataset """kinships""" +473 58 model """proje""" +473 58 loss """bceaftersigmoid""" +473 58 regularizer """no""" +473 58 optimizer """adadelta""" +473 58 training_loop """owa""" +473 58 negative_sampler """basic""" +473 58 evaluator """rankbased""" +473 59 dataset """kinships""" +473 59 model """proje""" +473 59 loss """bceaftersigmoid""" +473 59 regularizer """no""" +473 59 optimizer """adadelta""" +473 59 training_loop """owa""" +473 59 negative_sampler """basic""" +473 59 evaluator """rankbased""" +473 60 dataset """kinships""" +473 60 model """proje""" +473 60 loss """bceaftersigmoid""" +473 60 regularizer """no""" +473 60 optimizer """adadelta""" +473 60 training_loop """owa""" +473 60 negative_sampler """basic""" +473 60 evaluator """rankbased""" +473 61 dataset """kinships""" +473 61 model """proje""" +473 61 loss """bceaftersigmoid""" +473 61 regularizer """no""" +473 61 optimizer """adadelta""" +473 61 training_loop """owa""" +473 61 negative_sampler """basic""" +473 61 evaluator """rankbased""" +473 62 dataset """kinships""" +473 62 model """proje""" +473 62 loss """bceaftersigmoid""" +473 62 regularizer """no""" +473 62 optimizer """adadelta""" +473 62 training_loop """owa""" +473 62 negative_sampler """basic""" +473 62 evaluator """rankbased""" +473 63 dataset """kinships""" +473 63 model """proje""" +473 63 loss """bceaftersigmoid""" +473 63 regularizer """no""" +473 63 optimizer """adadelta""" +473 63 training_loop """owa""" +473 63 negative_sampler """basic""" +473 63 evaluator """rankbased""" +473 64 dataset """kinships""" +473 64 model """proje""" +473 64 loss """bceaftersigmoid""" +473 64 regularizer """no""" +473 64 optimizer """adadelta""" +473 64 training_loop """owa""" +473 64 negative_sampler """basic""" +473 64 evaluator """rankbased""" +473 65 dataset """kinships""" +473 65 model """proje""" +473 65 loss """bceaftersigmoid""" +473 65 regularizer """no""" +473 65 optimizer """adadelta""" +473 65 training_loop """owa""" +473 65 negative_sampler """basic""" +473 65 evaluator """rankbased""" +473 66 dataset """kinships""" +473 66 model """proje""" +473 66 loss """bceaftersigmoid""" +473 66 regularizer """no""" +473 66 optimizer """adadelta""" +473 66 training_loop """owa""" +473 66 negative_sampler """basic""" +473 66 evaluator """rankbased""" +473 67 dataset """kinships""" +473 67 model """proje""" +473 67 loss """bceaftersigmoid""" +473 67 regularizer """no""" +473 67 optimizer """adadelta""" +473 67 training_loop """owa""" +473 67 negative_sampler """basic""" +473 67 evaluator """rankbased""" +473 68 dataset """kinships""" +473 68 model """proje""" +473 68 loss """bceaftersigmoid""" +473 68 regularizer """no""" +473 68 optimizer """adadelta""" +473 68 training_loop """owa""" +473 68 negative_sampler """basic""" +473 68 evaluator """rankbased""" +473 69 dataset """kinships""" +473 69 model """proje""" +473 69 loss """bceaftersigmoid""" +473 69 regularizer """no""" +473 69 optimizer """adadelta""" +473 69 training_loop """owa""" +473 69 negative_sampler """basic""" +473 69 evaluator """rankbased""" +473 70 dataset """kinships""" +473 70 model """proje""" +473 70 loss """bceaftersigmoid""" +473 70 regularizer """no""" +473 70 optimizer """adadelta""" +473 70 training_loop """owa""" +473 70 negative_sampler """basic""" +473 70 evaluator """rankbased""" +473 71 dataset """kinships""" +473 71 model """proje""" +473 71 loss """bceaftersigmoid""" +473 71 regularizer """no""" +473 71 optimizer """adadelta""" +473 71 training_loop """owa""" +473 71 negative_sampler """basic""" +473 71 evaluator """rankbased""" +473 72 dataset """kinships""" +473 72 model """proje""" +473 72 loss """bceaftersigmoid""" +473 72 regularizer """no""" +473 72 optimizer """adadelta""" +473 72 training_loop """owa""" +473 72 negative_sampler """basic""" +473 72 evaluator """rankbased""" +473 73 dataset """kinships""" +473 73 model """proje""" +473 73 loss """bceaftersigmoid""" +473 73 regularizer """no""" +473 73 optimizer """adadelta""" +473 73 training_loop """owa""" +473 73 negative_sampler """basic""" +473 73 evaluator """rankbased""" +473 74 dataset """kinships""" +473 74 model """proje""" +473 74 loss """bceaftersigmoid""" +473 74 regularizer """no""" +473 74 optimizer """adadelta""" +473 74 training_loop """owa""" +473 74 negative_sampler """basic""" +473 74 evaluator """rankbased""" +473 75 dataset """kinships""" +473 75 model """proje""" +473 75 loss """bceaftersigmoid""" +473 75 regularizer """no""" +473 75 optimizer """adadelta""" +473 75 training_loop """owa""" +473 75 negative_sampler """basic""" +473 75 evaluator """rankbased""" +473 76 dataset """kinships""" +473 76 model """proje""" +473 76 loss """bceaftersigmoid""" +473 76 regularizer """no""" +473 76 optimizer """adadelta""" +473 76 training_loop """owa""" +473 76 negative_sampler """basic""" +473 76 evaluator """rankbased""" +473 77 dataset """kinships""" +473 77 model """proje""" +473 77 loss """bceaftersigmoid""" +473 77 regularizer """no""" +473 77 optimizer """adadelta""" +473 77 training_loop """owa""" +473 77 negative_sampler """basic""" +473 77 evaluator """rankbased""" +473 78 dataset """kinships""" +473 78 model """proje""" +473 78 loss """bceaftersigmoid""" +473 78 regularizer """no""" +473 78 optimizer """adadelta""" +473 78 training_loop """owa""" +473 78 negative_sampler """basic""" +473 78 evaluator """rankbased""" +473 79 dataset """kinships""" +473 79 model """proje""" +473 79 loss """bceaftersigmoid""" +473 79 regularizer """no""" +473 79 optimizer """adadelta""" +473 79 training_loop """owa""" +473 79 negative_sampler """basic""" +473 79 evaluator """rankbased""" +473 80 dataset """kinships""" +473 80 model """proje""" +473 80 loss """bceaftersigmoid""" +473 80 regularizer """no""" +473 80 optimizer """adadelta""" +473 80 training_loop """owa""" +473 80 negative_sampler """basic""" +473 80 evaluator """rankbased""" +473 81 dataset """kinships""" +473 81 model """proje""" +473 81 loss """bceaftersigmoid""" +473 81 regularizer """no""" +473 81 optimizer """adadelta""" +473 81 training_loop """owa""" +473 81 negative_sampler """basic""" +473 81 evaluator """rankbased""" +473 82 dataset """kinships""" +473 82 model """proje""" +473 82 loss """bceaftersigmoid""" +473 82 regularizer """no""" +473 82 optimizer """adadelta""" +473 82 training_loop """owa""" +473 82 negative_sampler """basic""" +473 82 evaluator """rankbased""" +473 83 dataset """kinships""" +473 83 model """proje""" +473 83 loss """bceaftersigmoid""" +473 83 regularizer """no""" +473 83 optimizer """adadelta""" +473 83 training_loop """owa""" +473 83 negative_sampler """basic""" +473 83 evaluator """rankbased""" +473 84 dataset """kinships""" +473 84 model """proje""" +473 84 loss """bceaftersigmoid""" +473 84 regularizer """no""" +473 84 optimizer """adadelta""" +473 84 training_loop """owa""" +473 84 negative_sampler """basic""" +473 84 evaluator """rankbased""" +473 85 dataset """kinships""" +473 85 model """proje""" +473 85 loss """bceaftersigmoid""" +473 85 regularizer """no""" +473 85 optimizer """adadelta""" +473 85 training_loop """owa""" +473 85 negative_sampler """basic""" +473 85 evaluator """rankbased""" +473 86 dataset """kinships""" +473 86 model """proje""" +473 86 loss """bceaftersigmoid""" +473 86 regularizer """no""" +473 86 optimizer """adadelta""" +473 86 training_loop """owa""" +473 86 negative_sampler """basic""" +473 86 evaluator """rankbased""" +473 87 dataset """kinships""" +473 87 model """proje""" +473 87 loss """bceaftersigmoid""" +473 87 regularizer """no""" +473 87 optimizer """adadelta""" +473 87 training_loop """owa""" +473 87 negative_sampler """basic""" +473 87 evaluator """rankbased""" +473 88 dataset """kinships""" +473 88 model """proje""" +473 88 loss """bceaftersigmoid""" +473 88 regularizer """no""" +473 88 optimizer """adadelta""" +473 88 training_loop """owa""" +473 88 negative_sampler """basic""" +473 88 evaluator """rankbased""" +473 89 dataset """kinships""" +473 89 model """proje""" +473 89 loss """bceaftersigmoid""" +473 89 regularizer """no""" +473 89 optimizer """adadelta""" +473 89 training_loop """owa""" +473 89 negative_sampler """basic""" +473 89 evaluator """rankbased""" +473 90 dataset """kinships""" +473 90 model """proje""" +473 90 loss """bceaftersigmoid""" +473 90 regularizer """no""" +473 90 optimizer """adadelta""" +473 90 training_loop """owa""" +473 90 negative_sampler """basic""" +473 90 evaluator """rankbased""" +473 91 dataset """kinships""" +473 91 model """proje""" +473 91 loss """bceaftersigmoid""" +473 91 regularizer """no""" +473 91 optimizer """adadelta""" +473 91 training_loop """owa""" +473 91 negative_sampler """basic""" +473 91 evaluator """rankbased""" +473 92 dataset """kinships""" +473 92 model """proje""" +473 92 loss """bceaftersigmoid""" +473 92 regularizer """no""" +473 92 optimizer """adadelta""" +473 92 training_loop """owa""" +473 92 negative_sampler """basic""" +473 92 evaluator """rankbased""" +473 93 dataset """kinships""" +473 93 model """proje""" +473 93 loss """bceaftersigmoid""" +473 93 regularizer """no""" +473 93 optimizer """adadelta""" +473 93 training_loop """owa""" +473 93 negative_sampler """basic""" +473 93 evaluator """rankbased""" +473 94 dataset """kinships""" +473 94 model """proje""" +473 94 loss """bceaftersigmoid""" +473 94 regularizer """no""" +473 94 optimizer """adadelta""" +473 94 training_loop """owa""" +473 94 negative_sampler """basic""" +473 94 evaluator """rankbased""" +473 95 dataset """kinships""" +473 95 model """proje""" +473 95 loss """bceaftersigmoid""" +473 95 regularizer """no""" +473 95 optimizer """adadelta""" +473 95 training_loop """owa""" +473 95 negative_sampler """basic""" +473 95 evaluator """rankbased""" +473 96 dataset """kinships""" +473 96 model """proje""" +473 96 loss """bceaftersigmoid""" +473 96 regularizer """no""" +473 96 optimizer """adadelta""" +473 96 training_loop """owa""" +473 96 negative_sampler """basic""" +473 96 evaluator """rankbased""" +473 97 dataset """kinships""" +473 97 model """proje""" +473 97 loss """bceaftersigmoid""" +473 97 regularizer """no""" +473 97 optimizer """adadelta""" +473 97 training_loop """owa""" +473 97 negative_sampler """basic""" +473 97 evaluator """rankbased""" +473 98 dataset """kinships""" +473 98 model """proje""" +473 98 loss """bceaftersigmoid""" +473 98 regularizer """no""" +473 98 optimizer """adadelta""" +473 98 training_loop """owa""" +473 98 negative_sampler """basic""" +473 98 evaluator """rankbased""" +473 99 dataset """kinships""" +473 99 model """proje""" +473 99 loss """bceaftersigmoid""" +473 99 regularizer """no""" +473 99 optimizer """adadelta""" +473 99 training_loop """owa""" +473 99 negative_sampler """basic""" +473 99 evaluator """rankbased""" +473 100 dataset """kinships""" +473 100 model """proje""" +473 100 loss """bceaftersigmoid""" +473 100 regularizer """no""" +473 100 optimizer """adadelta""" +473 100 training_loop """owa""" +473 100 negative_sampler """basic""" +473 100 evaluator """rankbased""" +474 1 model.embedding_dim 1.0 +474 1 negative_sampler.num_negs_per_pos 33.0 +474 1 training.batch_size 0.0 +474 2 model.embedding_dim 2.0 +474 2 negative_sampler.num_negs_per_pos 20.0 +474 2 training.batch_size 0.0 +474 3 model.embedding_dim 0.0 +474 3 negative_sampler.num_negs_per_pos 35.0 +474 3 training.batch_size 2.0 +474 4 model.embedding_dim 1.0 +474 4 negative_sampler.num_negs_per_pos 62.0 +474 4 training.batch_size 1.0 +474 5 model.embedding_dim 2.0 +474 5 negative_sampler.num_negs_per_pos 4.0 +474 5 training.batch_size 1.0 +474 6 model.embedding_dim 2.0 +474 6 negative_sampler.num_negs_per_pos 91.0 +474 6 training.batch_size 2.0 +474 7 model.embedding_dim 0.0 +474 7 negative_sampler.num_negs_per_pos 87.0 +474 7 training.batch_size 0.0 +474 8 model.embedding_dim 2.0 +474 8 negative_sampler.num_negs_per_pos 3.0 +474 8 training.batch_size 0.0 +474 9 model.embedding_dim 0.0 +474 9 negative_sampler.num_negs_per_pos 25.0 +474 9 training.batch_size 1.0 +474 10 model.embedding_dim 1.0 +474 10 negative_sampler.num_negs_per_pos 81.0 +474 10 training.batch_size 2.0 +474 11 model.embedding_dim 1.0 +474 11 negative_sampler.num_negs_per_pos 83.0 +474 11 training.batch_size 2.0 +474 12 model.embedding_dim 2.0 +474 12 negative_sampler.num_negs_per_pos 18.0 +474 12 training.batch_size 2.0 +474 13 model.embedding_dim 0.0 +474 13 negative_sampler.num_negs_per_pos 89.0 +474 13 training.batch_size 0.0 +474 14 model.embedding_dim 1.0 +474 14 negative_sampler.num_negs_per_pos 88.0 +474 14 training.batch_size 1.0 +474 15 model.embedding_dim 2.0 +474 15 negative_sampler.num_negs_per_pos 92.0 +474 15 training.batch_size 0.0 +474 16 model.embedding_dim 0.0 +474 16 negative_sampler.num_negs_per_pos 60.0 +474 16 training.batch_size 1.0 +474 17 model.embedding_dim 0.0 +474 17 negative_sampler.num_negs_per_pos 88.0 +474 17 training.batch_size 2.0 +474 18 model.embedding_dim 2.0 +474 18 negative_sampler.num_negs_per_pos 97.0 +474 18 training.batch_size 0.0 +474 19 model.embedding_dim 1.0 +474 19 negative_sampler.num_negs_per_pos 12.0 +474 19 training.batch_size 0.0 +474 20 model.embedding_dim 1.0 +474 20 negative_sampler.num_negs_per_pos 45.0 +474 20 training.batch_size 0.0 +474 21 model.embedding_dim 2.0 +474 21 negative_sampler.num_negs_per_pos 85.0 +474 21 training.batch_size 2.0 +474 22 model.embedding_dim 0.0 +474 22 negative_sampler.num_negs_per_pos 51.0 +474 22 training.batch_size 1.0 +474 23 model.embedding_dim 2.0 +474 23 negative_sampler.num_negs_per_pos 22.0 +474 23 training.batch_size 2.0 +474 24 model.embedding_dim 2.0 +474 24 negative_sampler.num_negs_per_pos 24.0 +474 24 training.batch_size 2.0 +474 25 model.embedding_dim 2.0 +474 25 negative_sampler.num_negs_per_pos 97.0 +474 25 training.batch_size 0.0 +474 26 model.embedding_dim 0.0 +474 26 negative_sampler.num_negs_per_pos 97.0 +474 26 training.batch_size 1.0 +474 27 model.embedding_dim 2.0 +474 27 negative_sampler.num_negs_per_pos 57.0 +474 27 training.batch_size 0.0 +474 28 model.embedding_dim 1.0 +474 28 negative_sampler.num_negs_per_pos 52.0 +474 28 training.batch_size 1.0 +474 29 model.embedding_dim 2.0 +474 29 negative_sampler.num_negs_per_pos 11.0 +474 29 training.batch_size 2.0 +474 30 model.embedding_dim 0.0 +474 30 negative_sampler.num_negs_per_pos 42.0 +474 30 training.batch_size 0.0 +474 31 model.embedding_dim 1.0 +474 31 negative_sampler.num_negs_per_pos 70.0 +474 31 training.batch_size 0.0 +474 32 model.embedding_dim 2.0 +474 32 negative_sampler.num_negs_per_pos 98.0 +474 32 training.batch_size 2.0 +474 33 model.embedding_dim 0.0 +474 33 negative_sampler.num_negs_per_pos 58.0 +474 33 training.batch_size 2.0 +474 34 model.embedding_dim 1.0 +474 34 negative_sampler.num_negs_per_pos 51.0 +474 34 training.batch_size 0.0 +474 35 model.embedding_dim 0.0 +474 35 negative_sampler.num_negs_per_pos 25.0 +474 35 training.batch_size 0.0 +474 36 model.embedding_dim 0.0 +474 36 negative_sampler.num_negs_per_pos 2.0 +474 36 training.batch_size 2.0 +474 37 model.embedding_dim 2.0 +474 37 negative_sampler.num_negs_per_pos 47.0 +474 37 training.batch_size 1.0 +474 38 model.embedding_dim 2.0 +474 38 negative_sampler.num_negs_per_pos 20.0 +474 38 training.batch_size 2.0 +474 39 model.embedding_dim 1.0 +474 39 negative_sampler.num_negs_per_pos 2.0 +474 39 training.batch_size 2.0 +474 40 model.embedding_dim 1.0 +474 40 negative_sampler.num_negs_per_pos 20.0 +474 40 training.batch_size 0.0 +474 41 model.embedding_dim 0.0 +474 41 negative_sampler.num_negs_per_pos 26.0 +474 41 training.batch_size 0.0 +474 42 model.embedding_dim 0.0 +474 42 negative_sampler.num_negs_per_pos 64.0 +474 42 training.batch_size 2.0 +474 43 model.embedding_dim 0.0 +474 43 negative_sampler.num_negs_per_pos 77.0 +474 43 training.batch_size 2.0 +474 44 model.embedding_dim 2.0 +474 44 negative_sampler.num_negs_per_pos 18.0 +474 44 training.batch_size 1.0 +474 45 model.embedding_dim 2.0 +474 45 negative_sampler.num_negs_per_pos 17.0 +474 45 training.batch_size 1.0 +474 46 model.embedding_dim 2.0 +474 46 negative_sampler.num_negs_per_pos 70.0 +474 46 training.batch_size 0.0 +474 47 model.embedding_dim 2.0 +474 47 negative_sampler.num_negs_per_pos 59.0 +474 47 training.batch_size 1.0 +474 48 model.embedding_dim 2.0 +474 48 negative_sampler.num_negs_per_pos 92.0 +474 48 training.batch_size 2.0 +474 49 model.embedding_dim 1.0 +474 49 negative_sampler.num_negs_per_pos 68.0 +474 49 training.batch_size 0.0 +474 50 model.embedding_dim 0.0 +474 50 negative_sampler.num_negs_per_pos 59.0 +474 50 training.batch_size 0.0 +474 51 model.embedding_dim 2.0 +474 51 negative_sampler.num_negs_per_pos 57.0 +474 51 training.batch_size 2.0 +474 52 model.embedding_dim 2.0 +474 52 negative_sampler.num_negs_per_pos 22.0 +474 52 training.batch_size 1.0 +474 53 model.embedding_dim 0.0 +474 53 negative_sampler.num_negs_per_pos 20.0 +474 53 training.batch_size 0.0 +474 54 model.embedding_dim 0.0 +474 54 negative_sampler.num_negs_per_pos 95.0 +474 54 training.batch_size 1.0 +474 55 model.embedding_dim 0.0 +474 55 negative_sampler.num_negs_per_pos 71.0 +474 55 training.batch_size 0.0 +474 56 model.embedding_dim 2.0 +474 56 negative_sampler.num_negs_per_pos 29.0 +474 56 training.batch_size 0.0 +474 57 model.embedding_dim 2.0 +474 57 negative_sampler.num_negs_per_pos 96.0 +474 57 training.batch_size 0.0 +474 58 model.embedding_dim 1.0 +474 58 negative_sampler.num_negs_per_pos 33.0 +474 58 training.batch_size 1.0 +474 59 model.embedding_dim 2.0 +474 59 negative_sampler.num_negs_per_pos 9.0 +474 59 training.batch_size 1.0 +474 60 model.embedding_dim 0.0 +474 60 negative_sampler.num_negs_per_pos 1.0 +474 60 training.batch_size 0.0 +474 61 model.embedding_dim 2.0 +474 61 negative_sampler.num_negs_per_pos 7.0 +474 61 training.batch_size 0.0 +474 62 model.embedding_dim 0.0 +474 62 negative_sampler.num_negs_per_pos 62.0 +474 62 training.batch_size 0.0 +474 63 model.embedding_dim 0.0 +474 63 negative_sampler.num_negs_per_pos 65.0 +474 63 training.batch_size 0.0 +474 64 model.embedding_dim 2.0 +474 64 negative_sampler.num_negs_per_pos 38.0 +474 64 training.batch_size 0.0 +474 65 model.embedding_dim 0.0 +474 65 negative_sampler.num_negs_per_pos 17.0 +474 65 training.batch_size 2.0 +474 66 model.embedding_dim 2.0 +474 66 negative_sampler.num_negs_per_pos 46.0 +474 66 training.batch_size 0.0 +474 67 model.embedding_dim 1.0 +474 67 negative_sampler.num_negs_per_pos 6.0 +474 67 training.batch_size 1.0 +474 68 model.embedding_dim 1.0 +474 68 negative_sampler.num_negs_per_pos 69.0 +474 68 training.batch_size 0.0 +474 69 model.embedding_dim 0.0 +474 69 negative_sampler.num_negs_per_pos 46.0 +474 69 training.batch_size 0.0 +474 70 model.embedding_dim 1.0 +474 70 negative_sampler.num_negs_per_pos 50.0 +474 70 training.batch_size 1.0 +474 71 model.embedding_dim 2.0 +474 71 negative_sampler.num_negs_per_pos 93.0 +474 71 training.batch_size 1.0 +474 72 model.embedding_dim 2.0 +474 72 negative_sampler.num_negs_per_pos 13.0 +474 72 training.batch_size 0.0 +474 73 model.embedding_dim 1.0 +474 73 negative_sampler.num_negs_per_pos 48.0 +474 73 training.batch_size 2.0 +474 74 model.embedding_dim 1.0 +474 74 negative_sampler.num_negs_per_pos 94.0 +474 74 training.batch_size 1.0 +474 75 model.embedding_dim 0.0 +474 75 negative_sampler.num_negs_per_pos 33.0 +474 75 training.batch_size 1.0 +474 76 model.embedding_dim 2.0 +474 76 negative_sampler.num_negs_per_pos 39.0 +474 76 training.batch_size 0.0 +474 77 model.embedding_dim 1.0 +474 77 negative_sampler.num_negs_per_pos 17.0 +474 77 training.batch_size 1.0 +474 78 model.embedding_dim 1.0 +474 78 negative_sampler.num_negs_per_pos 51.0 +474 78 training.batch_size 2.0 +474 79 model.embedding_dim 0.0 +474 79 negative_sampler.num_negs_per_pos 27.0 +474 79 training.batch_size 2.0 +474 80 model.embedding_dim 2.0 +474 80 negative_sampler.num_negs_per_pos 85.0 +474 80 training.batch_size 2.0 +474 81 model.embedding_dim 2.0 +474 81 negative_sampler.num_negs_per_pos 21.0 +474 81 training.batch_size 0.0 +474 82 model.embedding_dim 1.0 +474 82 negative_sampler.num_negs_per_pos 19.0 +474 82 training.batch_size 1.0 +474 83 model.embedding_dim 1.0 +474 83 negative_sampler.num_negs_per_pos 49.0 +474 83 training.batch_size 1.0 +474 84 model.embedding_dim 1.0 +474 84 negative_sampler.num_negs_per_pos 96.0 +474 84 training.batch_size 1.0 +474 85 model.embedding_dim 0.0 +474 85 negative_sampler.num_negs_per_pos 63.0 +474 85 training.batch_size 0.0 +474 86 model.embedding_dim 1.0 +474 86 negative_sampler.num_negs_per_pos 7.0 +474 86 training.batch_size 2.0 +474 87 model.embedding_dim 2.0 +474 87 negative_sampler.num_negs_per_pos 9.0 +474 87 training.batch_size 0.0 +474 88 model.embedding_dim 2.0 +474 88 negative_sampler.num_negs_per_pos 54.0 +474 88 training.batch_size 1.0 +474 89 model.embedding_dim 0.0 +474 89 negative_sampler.num_negs_per_pos 78.0 +474 89 training.batch_size 1.0 +474 90 model.embedding_dim 1.0 +474 90 negative_sampler.num_negs_per_pos 80.0 +474 90 training.batch_size 1.0 +474 91 model.embedding_dim 2.0 +474 91 negative_sampler.num_negs_per_pos 44.0 +474 91 training.batch_size 0.0 +474 92 model.embedding_dim 0.0 +474 92 negative_sampler.num_negs_per_pos 98.0 +474 92 training.batch_size 1.0 +474 93 model.embedding_dim 0.0 +474 93 negative_sampler.num_negs_per_pos 84.0 +474 93 training.batch_size 0.0 +474 94 model.embedding_dim 2.0 +474 94 negative_sampler.num_negs_per_pos 43.0 +474 94 training.batch_size 2.0 +474 95 model.embedding_dim 2.0 +474 95 negative_sampler.num_negs_per_pos 11.0 +474 95 training.batch_size 0.0 +474 96 model.embedding_dim 2.0 +474 96 negative_sampler.num_negs_per_pos 94.0 +474 96 training.batch_size 0.0 +474 97 model.embedding_dim 0.0 +474 97 negative_sampler.num_negs_per_pos 11.0 +474 97 training.batch_size 1.0 +474 98 model.embedding_dim 0.0 +474 98 negative_sampler.num_negs_per_pos 29.0 +474 98 training.batch_size 1.0 +474 99 model.embedding_dim 0.0 +474 99 negative_sampler.num_negs_per_pos 79.0 +474 99 training.batch_size 0.0 +474 100 model.embedding_dim 1.0 +474 100 negative_sampler.num_negs_per_pos 98.0 +474 100 training.batch_size 0.0 +474 1 dataset """kinships""" +474 1 model """proje""" +474 1 loss """softplus""" +474 1 regularizer """no""" +474 1 optimizer """adadelta""" +474 1 training_loop """owa""" +474 1 negative_sampler """basic""" +474 1 evaluator """rankbased""" +474 2 dataset """kinships""" +474 2 model """proje""" +474 2 loss """softplus""" +474 2 regularizer """no""" +474 2 optimizer """adadelta""" +474 2 training_loop """owa""" +474 2 negative_sampler """basic""" +474 2 evaluator """rankbased""" +474 3 dataset """kinships""" +474 3 model """proje""" +474 3 loss """softplus""" +474 3 regularizer """no""" +474 3 optimizer """adadelta""" +474 3 training_loop """owa""" +474 3 negative_sampler """basic""" +474 3 evaluator """rankbased""" +474 4 dataset """kinships""" +474 4 model """proje""" +474 4 loss """softplus""" +474 4 regularizer """no""" +474 4 optimizer """adadelta""" +474 4 training_loop """owa""" +474 4 negative_sampler """basic""" +474 4 evaluator """rankbased""" +474 5 dataset """kinships""" +474 5 model """proje""" +474 5 loss """softplus""" +474 5 regularizer """no""" +474 5 optimizer """adadelta""" +474 5 training_loop """owa""" +474 5 negative_sampler """basic""" +474 5 evaluator """rankbased""" +474 6 dataset """kinships""" +474 6 model """proje""" +474 6 loss """softplus""" +474 6 regularizer """no""" +474 6 optimizer """adadelta""" +474 6 training_loop """owa""" +474 6 negative_sampler """basic""" +474 6 evaluator """rankbased""" +474 7 dataset """kinships""" +474 7 model """proje""" +474 7 loss """softplus""" +474 7 regularizer """no""" +474 7 optimizer """adadelta""" +474 7 training_loop """owa""" +474 7 negative_sampler """basic""" +474 7 evaluator """rankbased""" +474 8 dataset """kinships""" +474 8 model """proje""" +474 8 loss """softplus""" +474 8 regularizer """no""" +474 8 optimizer """adadelta""" +474 8 training_loop """owa""" +474 8 negative_sampler """basic""" +474 8 evaluator """rankbased""" +474 9 dataset """kinships""" +474 9 model """proje""" +474 9 loss """softplus""" +474 9 regularizer """no""" +474 9 optimizer """adadelta""" +474 9 training_loop """owa""" +474 9 negative_sampler """basic""" +474 9 evaluator """rankbased""" +474 10 dataset """kinships""" +474 10 model """proje""" +474 10 loss """softplus""" +474 10 regularizer """no""" +474 10 optimizer """adadelta""" +474 10 training_loop """owa""" +474 10 negative_sampler """basic""" +474 10 evaluator """rankbased""" +474 11 dataset """kinships""" +474 11 model """proje""" +474 11 loss """softplus""" +474 11 regularizer """no""" +474 11 optimizer """adadelta""" +474 11 training_loop """owa""" +474 11 negative_sampler """basic""" +474 11 evaluator """rankbased""" +474 12 dataset """kinships""" +474 12 model """proje""" +474 12 loss """softplus""" +474 12 regularizer """no""" +474 12 optimizer """adadelta""" +474 12 training_loop """owa""" +474 12 negative_sampler """basic""" +474 12 evaluator """rankbased""" +474 13 dataset """kinships""" +474 13 model """proje""" +474 13 loss """softplus""" +474 13 regularizer """no""" +474 13 optimizer """adadelta""" +474 13 training_loop """owa""" +474 13 negative_sampler """basic""" +474 13 evaluator """rankbased""" +474 14 dataset """kinships""" +474 14 model """proje""" +474 14 loss """softplus""" +474 14 regularizer """no""" +474 14 optimizer """adadelta""" +474 14 training_loop """owa""" +474 14 negative_sampler """basic""" +474 14 evaluator """rankbased""" +474 15 dataset """kinships""" +474 15 model """proje""" +474 15 loss """softplus""" +474 15 regularizer """no""" +474 15 optimizer """adadelta""" +474 15 training_loop """owa""" +474 15 negative_sampler """basic""" +474 15 evaluator """rankbased""" +474 16 dataset """kinships""" +474 16 model """proje""" +474 16 loss """softplus""" +474 16 regularizer """no""" +474 16 optimizer """adadelta""" +474 16 training_loop """owa""" +474 16 negative_sampler """basic""" +474 16 evaluator """rankbased""" +474 17 dataset """kinships""" +474 17 model """proje""" +474 17 loss """softplus""" +474 17 regularizer """no""" +474 17 optimizer """adadelta""" +474 17 training_loop """owa""" +474 17 negative_sampler """basic""" +474 17 evaluator """rankbased""" +474 18 dataset """kinships""" +474 18 model """proje""" +474 18 loss """softplus""" +474 18 regularizer """no""" +474 18 optimizer """adadelta""" +474 18 training_loop """owa""" +474 18 negative_sampler """basic""" +474 18 evaluator """rankbased""" +474 19 dataset """kinships""" +474 19 model """proje""" +474 19 loss """softplus""" +474 19 regularizer """no""" +474 19 optimizer """adadelta""" +474 19 training_loop """owa""" +474 19 negative_sampler """basic""" +474 19 evaluator """rankbased""" +474 20 dataset """kinships""" +474 20 model """proje""" +474 20 loss """softplus""" +474 20 regularizer """no""" +474 20 optimizer """adadelta""" +474 20 training_loop """owa""" +474 20 negative_sampler """basic""" +474 20 evaluator """rankbased""" +474 21 dataset """kinships""" +474 21 model """proje""" +474 21 loss """softplus""" +474 21 regularizer """no""" +474 21 optimizer """adadelta""" +474 21 training_loop """owa""" +474 21 negative_sampler """basic""" +474 21 evaluator """rankbased""" +474 22 dataset """kinships""" +474 22 model """proje""" +474 22 loss """softplus""" +474 22 regularizer """no""" +474 22 optimizer """adadelta""" +474 22 training_loop """owa""" +474 22 negative_sampler """basic""" +474 22 evaluator """rankbased""" +474 23 dataset """kinships""" +474 23 model """proje""" +474 23 loss """softplus""" +474 23 regularizer """no""" +474 23 optimizer """adadelta""" +474 23 training_loop """owa""" +474 23 negative_sampler """basic""" +474 23 evaluator """rankbased""" +474 24 dataset """kinships""" +474 24 model """proje""" +474 24 loss """softplus""" +474 24 regularizer """no""" +474 24 optimizer """adadelta""" +474 24 training_loop """owa""" +474 24 negative_sampler """basic""" +474 24 evaluator """rankbased""" +474 25 dataset """kinships""" +474 25 model """proje""" +474 25 loss """softplus""" +474 25 regularizer """no""" +474 25 optimizer """adadelta""" +474 25 training_loop """owa""" +474 25 negative_sampler """basic""" +474 25 evaluator """rankbased""" +474 26 dataset """kinships""" +474 26 model """proje""" +474 26 loss """softplus""" +474 26 regularizer """no""" +474 26 optimizer """adadelta""" +474 26 training_loop """owa""" +474 26 negative_sampler """basic""" +474 26 evaluator """rankbased""" +474 27 dataset """kinships""" +474 27 model """proje""" +474 27 loss """softplus""" +474 27 regularizer """no""" +474 27 optimizer """adadelta""" +474 27 training_loop """owa""" +474 27 negative_sampler """basic""" +474 27 evaluator """rankbased""" +474 28 dataset """kinships""" +474 28 model """proje""" +474 28 loss """softplus""" +474 28 regularizer """no""" +474 28 optimizer """adadelta""" +474 28 training_loop """owa""" +474 28 negative_sampler """basic""" +474 28 evaluator """rankbased""" +474 29 dataset """kinships""" +474 29 model """proje""" +474 29 loss """softplus""" +474 29 regularizer """no""" +474 29 optimizer """adadelta""" +474 29 training_loop """owa""" +474 29 negative_sampler """basic""" +474 29 evaluator """rankbased""" +474 30 dataset """kinships""" +474 30 model """proje""" +474 30 loss """softplus""" +474 30 regularizer """no""" +474 30 optimizer """adadelta""" +474 30 training_loop """owa""" +474 30 negative_sampler """basic""" +474 30 evaluator """rankbased""" +474 31 dataset """kinships""" +474 31 model """proje""" +474 31 loss """softplus""" +474 31 regularizer """no""" +474 31 optimizer """adadelta""" +474 31 training_loop """owa""" +474 31 negative_sampler """basic""" +474 31 evaluator """rankbased""" +474 32 dataset """kinships""" +474 32 model """proje""" +474 32 loss """softplus""" +474 32 regularizer """no""" +474 32 optimizer """adadelta""" +474 32 training_loop """owa""" +474 32 negative_sampler """basic""" +474 32 evaluator """rankbased""" +474 33 dataset """kinships""" +474 33 model """proje""" +474 33 loss """softplus""" +474 33 regularizer """no""" +474 33 optimizer """adadelta""" +474 33 training_loop """owa""" +474 33 negative_sampler """basic""" +474 33 evaluator """rankbased""" +474 34 dataset """kinships""" +474 34 model """proje""" +474 34 loss """softplus""" +474 34 regularizer """no""" +474 34 optimizer """adadelta""" +474 34 training_loop """owa""" +474 34 negative_sampler """basic""" +474 34 evaluator """rankbased""" +474 35 dataset """kinships""" +474 35 model """proje""" +474 35 loss """softplus""" +474 35 regularizer """no""" +474 35 optimizer """adadelta""" +474 35 training_loop """owa""" +474 35 negative_sampler """basic""" +474 35 evaluator """rankbased""" +474 36 dataset """kinships""" +474 36 model """proje""" +474 36 loss """softplus""" +474 36 regularizer """no""" +474 36 optimizer """adadelta""" +474 36 training_loop """owa""" +474 36 negative_sampler """basic""" +474 36 evaluator """rankbased""" +474 37 dataset """kinships""" +474 37 model """proje""" +474 37 loss """softplus""" +474 37 regularizer """no""" +474 37 optimizer """adadelta""" +474 37 training_loop """owa""" +474 37 negative_sampler """basic""" +474 37 evaluator """rankbased""" +474 38 dataset """kinships""" +474 38 model """proje""" +474 38 loss """softplus""" +474 38 regularizer """no""" +474 38 optimizer """adadelta""" +474 38 training_loop """owa""" +474 38 negative_sampler """basic""" +474 38 evaluator """rankbased""" +474 39 dataset """kinships""" +474 39 model """proje""" +474 39 loss """softplus""" +474 39 regularizer """no""" +474 39 optimizer """adadelta""" +474 39 training_loop """owa""" +474 39 negative_sampler """basic""" +474 39 evaluator """rankbased""" +474 40 dataset """kinships""" +474 40 model """proje""" +474 40 loss """softplus""" +474 40 regularizer """no""" +474 40 optimizer """adadelta""" +474 40 training_loop """owa""" +474 40 negative_sampler """basic""" +474 40 evaluator """rankbased""" +474 41 dataset """kinships""" +474 41 model """proje""" +474 41 loss """softplus""" +474 41 regularizer """no""" +474 41 optimizer """adadelta""" +474 41 training_loop """owa""" +474 41 negative_sampler """basic""" +474 41 evaluator """rankbased""" +474 42 dataset """kinships""" +474 42 model """proje""" +474 42 loss """softplus""" +474 42 regularizer """no""" +474 42 optimizer """adadelta""" +474 42 training_loop """owa""" +474 42 negative_sampler """basic""" +474 42 evaluator """rankbased""" +474 43 dataset """kinships""" +474 43 model """proje""" +474 43 loss """softplus""" +474 43 regularizer """no""" +474 43 optimizer """adadelta""" +474 43 training_loop """owa""" +474 43 negative_sampler """basic""" +474 43 evaluator """rankbased""" +474 44 dataset """kinships""" +474 44 model """proje""" +474 44 loss """softplus""" +474 44 regularizer """no""" +474 44 optimizer """adadelta""" +474 44 training_loop """owa""" +474 44 negative_sampler """basic""" +474 44 evaluator """rankbased""" +474 45 dataset """kinships""" +474 45 model """proje""" +474 45 loss """softplus""" +474 45 regularizer """no""" +474 45 optimizer """adadelta""" +474 45 training_loop """owa""" +474 45 negative_sampler """basic""" +474 45 evaluator """rankbased""" +474 46 dataset """kinships""" +474 46 model """proje""" +474 46 loss """softplus""" +474 46 regularizer """no""" +474 46 optimizer """adadelta""" +474 46 training_loop """owa""" +474 46 negative_sampler """basic""" +474 46 evaluator """rankbased""" +474 47 dataset """kinships""" +474 47 model """proje""" +474 47 loss """softplus""" +474 47 regularizer """no""" +474 47 optimizer """adadelta""" +474 47 training_loop """owa""" +474 47 negative_sampler """basic""" +474 47 evaluator """rankbased""" +474 48 dataset """kinships""" +474 48 model """proje""" +474 48 loss """softplus""" +474 48 regularizer """no""" +474 48 optimizer """adadelta""" +474 48 training_loop """owa""" +474 48 negative_sampler """basic""" +474 48 evaluator """rankbased""" +474 49 dataset """kinships""" +474 49 model """proje""" +474 49 loss """softplus""" +474 49 regularizer """no""" +474 49 optimizer """adadelta""" +474 49 training_loop """owa""" +474 49 negative_sampler """basic""" +474 49 evaluator """rankbased""" +474 50 dataset """kinships""" +474 50 model """proje""" +474 50 loss """softplus""" +474 50 regularizer """no""" +474 50 optimizer """adadelta""" +474 50 training_loop """owa""" +474 50 negative_sampler """basic""" +474 50 evaluator """rankbased""" +474 51 dataset """kinships""" +474 51 model """proje""" +474 51 loss """softplus""" +474 51 regularizer """no""" +474 51 optimizer """adadelta""" +474 51 training_loop """owa""" +474 51 negative_sampler """basic""" +474 51 evaluator """rankbased""" +474 52 dataset """kinships""" +474 52 model """proje""" +474 52 loss """softplus""" +474 52 regularizer """no""" +474 52 optimizer """adadelta""" +474 52 training_loop """owa""" +474 52 negative_sampler """basic""" +474 52 evaluator """rankbased""" +474 53 dataset """kinships""" +474 53 model """proje""" +474 53 loss """softplus""" +474 53 regularizer """no""" +474 53 optimizer """adadelta""" +474 53 training_loop """owa""" +474 53 negative_sampler """basic""" +474 53 evaluator """rankbased""" +474 54 dataset """kinships""" +474 54 model """proje""" +474 54 loss """softplus""" +474 54 regularizer """no""" +474 54 optimizer """adadelta""" +474 54 training_loop """owa""" +474 54 negative_sampler """basic""" +474 54 evaluator """rankbased""" +474 55 dataset """kinships""" +474 55 model """proje""" +474 55 loss """softplus""" +474 55 regularizer """no""" +474 55 optimizer """adadelta""" +474 55 training_loop """owa""" +474 55 negative_sampler """basic""" +474 55 evaluator """rankbased""" +474 56 dataset """kinships""" +474 56 model """proje""" +474 56 loss """softplus""" +474 56 regularizer """no""" +474 56 optimizer """adadelta""" +474 56 training_loop """owa""" +474 56 negative_sampler """basic""" +474 56 evaluator """rankbased""" +474 57 dataset """kinships""" +474 57 model """proje""" +474 57 loss """softplus""" +474 57 regularizer """no""" +474 57 optimizer """adadelta""" +474 57 training_loop """owa""" +474 57 negative_sampler """basic""" +474 57 evaluator """rankbased""" +474 58 dataset """kinships""" +474 58 model """proje""" +474 58 loss """softplus""" +474 58 regularizer """no""" +474 58 optimizer """adadelta""" +474 58 training_loop """owa""" +474 58 negative_sampler """basic""" +474 58 evaluator """rankbased""" +474 59 dataset """kinships""" +474 59 model """proje""" +474 59 loss """softplus""" +474 59 regularizer """no""" +474 59 optimizer """adadelta""" +474 59 training_loop """owa""" +474 59 negative_sampler """basic""" +474 59 evaluator """rankbased""" +474 60 dataset """kinships""" +474 60 model """proje""" +474 60 loss """softplus""" +474 60 regularizer """no""" +474 60 optimizer """adadelta""" +474 60 training_loop """owa""" +474 60 negative_sampler """basic""" +474 60 evaluator """rankbased""" +474 61 dataset """kinships""" +474 61 model """proje""" +474 61 loss """softplus""" +474 61 regularizer """no""" +474 61 optimizer """adadelta""" +474 61 training_loop """owa""" +474 61 negative_sampler """basic""" +474 61 evaluator """rankbased""" +474 62 dataset """kinships""" +474 62 model """proje""" +474 62 loss """softplus""" +474 62 regularizer """no""" +474 62 optimizer """adadelta""" +474 62 training_loop """owa""" +474 62 negative_sampler """basic""" +474 62 evaluator """rankbased""" +474 63 dataset """kinships""" +474 63 model """proje""" +474 63 loss """softplus""" +474 63 regularizer """no""" +474 63 optimizer """adadelta""" +474 63 training_loop """owa""" +474 63 negative_sampler """basic""" +474 63 evaluator """rankbased""" +474 64 dataset """kinships""" +474 64 model """proje""" +474 64 loss """softplus""" +474 64 regularizer """no""" +474 64 optimizer """adadelta""" +474 64 training_loop """owa""" +474 64 negative_sampler """basic""" +474 64 evaluator """rankbased""" +474 65 dataset """kinships""" +474 65 model """proje""" +474 65 loss """softplus""" +474 65 regularizer """no""" +474 65 optimizer """adadelta""" +474 65 training_loop """owa""" +474 65 negative_sampler """basic""" +474 65 evaluator """rankbased""" +474 66 dataset """kinships""" +474 66 model """proje""" +474 66 loss """softplus""" +474 66 regularizer """no""" +474 66 optimizer """adadelta""" +474 66 training_loop """owa""" +474 66 negative_sampler """basic""" +474 66 evaluator """rankbased""" +474 67 dataset """kinships""" +474 67 model """proje""" +474 67 loss """softplus""" +474 67 regularizer """no""" +474 67 optimizer """adadelta""" +474 67 training_loop """owa""" +474 67 negative_sampler """basic""" +474 67 evaluator """rankbased""" +474 68 dataset """kinships""" +474 68 model """proje""" +474 68 loss """softplus""" +474 68 regularizer """no""" +474 68 optimizer """adadelta""" +474 68 training_loop """owa""" +474 68 negative_sampler """basic""" +474 68 evaluator """rankbased""" +474 69 dataset """kinships""" +474 69 model """proje""" +474 69 loss """softplus""" +474 69 regularizer """no""" +474 69 optimizer """adadelta""" +474 69 training_loop """owa""" +474 69 negative_sampler """basic""" +474 69 evaluator """rankbased""" +474 70 dataset """kinships""" +474 70 model """proje""" +474 70 loss """softplus""" +474 70 regularizer """no""" +474 70 optimizer """adadelta""" +474 70 training_loop """owa""" +474 70 negative_sampler """basic""" +474 70 evaluator """rankbased""" +474 71 dataset """kinships""" +474 71 model """proje""" +474 71 loss """softplus""" +474 71 regularizer """no""" +474 71 optimizer """adadelta""" +474 71 training_loop """owa""" +474 71 negative_sampler """basic""" +474 71 evaluator """rankbased""" +474 72 dataset """kinships""" +474 72 model """proje""" +474 72 loss """softplus""" +474 72 regularizer """no""" +474 72 optimizer """adadelta""" +474 72 training_loop """owa""" +474 72 negative_sampler """basic""" +474 72 evaluator """rankbased""" +474 73 dataset """kinships""" +474 73 model """proje""" +474 73 loss """softplus""" +474 73 regularizer """no""" +474 73 optimizer """adadelta""" +474 73 training_loop """owa""" +474 73 negative_sampler """basic""" +474 73 evaluator """rankbased""" +474 74 dataset """kinships""" +474 74 model """proje""" +474 74 loss """softplus""" +474 74 regularizer """no""" +474 74 optimizer """adadelta""" +474 74 training_loop """owa""" +474 74 negative_sampler """basic""" +474 74 evaluator """rankbased""" +474 75 dataset """kinships""" +474 75 model """proje""" +474 75 loss """softplus""" +474 75 regularizer """no""" +474 75 optimizer """adadelta""" +474 75 training_loop """owa""" +474 75 negative_sampler """basic""" +474 75 evaluator """rankbased""" +474 76 dataset """kinships""" +474 76 model """proje""" +474 76 loss """softplus""" +474 76 regularizer """no""" +474 76 optimizer """adadelta""" +474 76 training_loop """owa""" +474 76 negative_sampler """basic""" +474 76 evaluator """rankbased""" +474 77 dataset """kinships""" +474 77 model """proje""" +474 77 loss """softplus""" +474 77 regularizer """no""" +474 77 optimizer """adadelta""" +474 77 training_loop """owa""" +474 77 negative_sampler """basic""" +474 77 evaluator """rankbased""" +474 78 dataset """kinships""" +474 78 model """proje""" +474 78 loss """softplus""" +474 78 regularizer """no""" +474 78 optimizer """adadelta""" +474 78 training_loop """owa""" +474 78 negative_sampler """basic""" +474 78 evaluator """rankbased""" +474 79 dataset """kinships""" +474 79 model """proje""" +474 79 loss """softplus""" +474 79 regularizer """no""" +474 79 optimizer """adadelta""" +474 79 training_loop """owa""" +474 79 negative_sampler """basic""" +474 79 evaluator """rankbased""" +474 80 dataset """kinships""" +474 80 model """proje""" +474 80 loss """softplus""" +474 80 regularizer """no""" +474 80 optimizer """adadelta""" +474 80 training_loop """owa""" +474 80 negative_sampler """basic""" +474 80 evaluator """rankbased""" +474 81 dataset """kinships""" +474 81 model """proje""" +474 81 loss """softplus""" +474 81 regularizer """no""" +474 81 optimizer """adadelta""" +474 81 training_loop """owa""" +474 81 negative_sampler """basic""" +474 81 evaluator """rankbased""" +474 82 dataset """kinships""" +474 82 model """proje""" +474 82 loss """softplus""" +474 82 regularizer """no""" +474 82 optimizer """adadelta""" +474 82 training_loop """owa""" +474 82 negative_sampler """basic""" +474 82 evaluator """rankbased""" +474 83 dataset """kinships""" +474 83 model """proje""" +474 83 loss """softplus""" +474 83 regularizer """no""" +474 83 optimizer """adadelta""" +474 83 training_loop """owa""" +474 83 negative_sampler """basic""" +474 83 evaluator """rankbased""" +474 84 dataset """kinships""" +474 84 model """proje""" +474 84 loss """softplus""" +474 84 regularizer """no""" +474 84 optimizer """adadelta""" +474 84 training_loop """owa""" +474 84 negative_sampler """basic""" +474 84 evaluator """rankbased""" +474 85 dataset """kinships""" +474 85 model """proje""" +474 85 loss """softplus""" +474 85 regularizer """no""" +474 85 optimizer """adadelta""" +474 85 training_loop """owa""" +474 85 negative_sampler """basic""" +474 85 evaluator """rankbased""" +474 86 dataset """kinships""" +474 86 model """proje""" +474 86 loss """softplus""" +474 86 regularizer """no""" +474 86 optimizer """adadelta""" +474 86 training_loop """owa""" +474 86 negative_sampler """basic""" +474 86 evaluator """rankbased""" +474 87 dataset """kinships""" +474 87 model """proje""" +474 87 loss """softplus""" +474 87 regularizer """no""" +474 87 optimizer """adadelta""" +474 87 training_loop """owa""" +474 87 negative_sampler """basic""" +474 87 evaluator """rankbased""" +474 88 dataset """kinships""" +474 88 model """proje""" +474 88 loss """softplus""" +474 88 regularizer """no""" +474 88 optimizer """adadelta""" +474 88 training_loop """owa""" +474 88 negative_sampler """basic""" +474 88 evaluator """rankbased""" +474 89 dataset """kinships""" +474 89 model """proje""" +474 89 loss """softplus""" +474 89 regularizer """no""" +474 89 optimizer """adadelta""" +474 89 training_loop """owa""" +474 89 negative_sampler """basic""" +474 89 evaluator """rankbased""" +474 90 dataset """kinships""" +474 90 model """proje""" +474 90 loss """softplus""" +474 90 regularizer """no""" +474 90 optimizer """adadelta""" +474 90 training_loop """owa""" +474 90 negative_sampler """basic""" +474 90 evaluator """rankbased""" +474 91 dataset """kinships""" +474 91 model """proje""" +474 91 loss """softplus""" +474 91 regularizer """no""" +474 91 optimizer """adadelta""" +474 91 training_loop """owa""" +474 91 negative_sampler """basic""" +474 91 evaluator """rankbased""" +474 92 dataset """kinships""" +474 92 model """proje""" +474 92 loss """softplus""" +474 92 regularizer """no""" +474 92 optimizer """adadelta""" +474 92 training_loop """owa""" +474 92 negative_sampler """basic""" +474 92 evaluator """rankbased""" +474 93 dataset """kinships""" +474 93 model """proje""" +474 93 loss """softplus""" +474 93 regularizer """no""" +474 93 optimizer """adadelta""" +474 93 training_loop """owa""" +474 93 negative_sampler """basic""" +474 93 evaluator """rankbased""" +474 94 dataset """kinships""" +474 94 model """proje""" +474 94 loss """softplus""" +474 94 regularizer """no""" +474 94 optimizer """adadelta""" +474 94 training_loop """owa""" +474 94 negative_sampler """basic""" +474 94 evaluator """rankbased""" +474 95 dataset """kinships""" +474 95 model """proje""" +474 95 loss """softplus""" +474 95 regularizer """no""" +474 95 optimizer """adadelta""" +474 95 training_loop """owa""" +474 95 negative_sampler """basic""" +474 95 evaluator """rankbased""" +474 96 dataset """kinships""" +474 96 model """proje""" +474 96 loss """softplus""" +474 96 regularizer """no""" +474 96 optimizer """adadelta""" +474 96 training_loop """owa""" +474 96 negative_sampler """basic""" +474 96 evaluator """rankbased""" +474 97 dataset """kinships""" +474 97 model """proje""" +474 97 loss """softplus""" +474 97 regularizer """no""" +474 97 optimizer """adadelta""" +474 97 training_loop """owa""" +474 97 negative_sampler """basic""" +474 97 evaluator """rankbased""" +474 98 dataset """kinships""" +474 98 model """proje""" +474 98 loss """softplus""" +474 98 regularizer """no""" +474 98 optimizer """adadelta""" +474 98 training_loop """owa""" +474 98 negative_sampler """basic""" +474 98 evaluator """rankbased""" +474 99 dataset """kinships""" +474 99 model """proje""" +474 99 loss """softplus""" +474 99 regularizer """no""" +474 99 optimizer """adadelta""" +474 99 training_loop """owa""" +474 99 negative_sampler """basic""" +474 99 evaluator """rankbased""" +474 100 dataset """kinships""" +474 100 model """proje""" +474 100 loss """softplus""" +474 100 regularizer """no""" +474 100 optimizer """adadelta""" +474 100 training_loop """owa""" +474 100 negative_sampler """basic""" +474 100 evaluator """rankbased""" +475 1 model.embedding_dim 2.0 +475 1 negative_sampler.num_negs_per_pos 32.0 +475 1 training.batch_size 1.0 +475 2 model.embedding_dim 1.0 +475 2 negative_sampler.num_negs_per_pos 65.0 +475 2 training.batch_size 0.0 +475 3 model.embedding_dim 2.0 +475 3 negative_sampler.num_negs_per_pos 19.0 +475 3 training.batch_size 2.0 +475 4 model.embedding_dim 1.0 +475 4 negative_sampler.num_negs_per_pos 56.0 +475 4 training.batch_size 0.0 +475 5 model.embedding_dim 1.0 +475 5 negative_sampler.num_negs_per_pos 48.0 +475 5 training.batch_size 0.0 +475 6 model.embedding_dim 2.0 +475 6 negative_sampler.num_negs_per_pos 47.0 +475 6 training.batch_size 1.0 +475 7 model.embedding_dim 2.0 +475 7 negative_sampler.num_negs_per_pos 46.0 +475 7 training.batch_size 2.0 +475 8 model.embedding_dim 1.0 +475 8 negative_sampler.num_negs_per_pos 46.0 +475 8 training.batch_size 0.0 +475 9 model.embedding_dim 1.0 +475 9 negative_sampler.num_negs_per_pos 76.0 +475 9 training.batch_size 0.0 +475 10 model.embedding_dim 0.0 +475 10 negative_sampler.num_negs_per_pos 88.0 +475 10 training.batch_size 2.0 +475 11 model.embedding_dim 0.0 +475 11 negative_sampler.num_negs_per_pos 80.0 +475 11 training.batch_size 0.0 +475 12 model.embedding_dim 1.0 +475 12 negative_sampler.num_negs_per_pos 75.0 +475 12 training.batch_size 1.0 +475 13 model.embedding_dim 0.0 +475 13 negative_sampler.num_negs_per_pos 37.0 +475 13 training.batch_size 0.0 +475 14 model.embedding_dim 0.0 +475 14 negative_sampler.num_negs_per_pos 90.0 +475 14 training.batch_size 2.0 +475 15 model.embedding_dim 1.0 +475 15 negative_sampler.num_negs_per_pos 29.0 +475 15 training.batch_size 0.0 +475 16 model.embedding_dim 0.0 +475 16 negative_sampler.num_negs_per_pos 19.0 +475 16 training.batch_size 1.0 +475 17 model.embedding_dim 0.0 +475 17 negative_sampler.num_negs_per_pos 50.0 +475 17 training.batch_size 2.0 +475 18 model.embedding_dim 2.0 +475 18 negative_sampler.num_negs_per_pos 63.0 +475 18 training.batch_size 0.0 +475 19 model.embedding_dim 1.0 +475 19 negative_sampler.num_negs_per_pos 73.0 +475 19 training.batch_size 1.0 +475 20 model.embedding_dim 1.0 +475 20 negative_sampler.num_negs_per_pos 36.0 +475 20 training.batch_size 2.0 +475 21 model.embedding_dim 0.0 +475 21 negative_sampler.num_negs_per_pos 83.0 +475 21 training.batch_size 1.0 +475 22 model.embedding_dim 0.0 +475 22 negative_sampler.num_negs_per_pos 13.0 +475 22 training.batch_size 2.0 +475 23 model.embedding_dim 1.0 +475 23 negative_sampler.num_negs_per_pos 83.0 +475 23 training.batch_size 1.0 +475 24 model.embedding_dim 0.0 +475 24 negative_sampler.num_negs_per_pos 79.0 +475 24 training.batch_size 2.0 +475 25 model.embedding_dim 2.0 +475 25 negative_sampler.num_negs_per_pos 28.0 +475 25 training.batch_size 0.0 +475 26 model.embedding_dim 1.0 +475 26 negative_sampler.num_negs_per_pos 43.0 +475 26 training.batch_size 0.0 +475 27 model.embedding_dim 1.0 +475 27 negative_sampler.num_negs_per_pos 47.0 +475 27 training.batch_size 2.0 +475 28 model.embedding_dim 2.0 +475 28 negative_sampler.num_negs_per_pos 18.0 +475 28 training.batch_size 0.0 +475 29 model.embedding_dim 2.0 +475 29 negative_sampler.num_negs_per_pos 22.0 +475 29 training.batch_size 0.0 +475 30 model.embedding_dim 1.0 +475 30 negative_sampler.num_negs_per_pos 92.0 +475 30 training.batch_size 0.0 +475 31 model.embedding_dim 2.0 +475 31 negative_sampler.num_negs_per_pos 5.0 +475 31 training.batch_size 0.0 +475 32 model.embedding_dim 0.0 +475 32 negative_sampler.num_negs_per_pos 88.0 +475 32 training.batch_size 1.0 +475 33 model.embedding_dim 0.0 +475 33 negative_sampler.num_negs_per_pos 28.0 +475 33 training.batch_size 1.0 +475 34 model.embedding_dim 1.0 +475 34 negative_sampler.num_negs_per_pos 28.0 +475 34 training.batch_size 2.0 +475 35 model.embedding_dim 1.0 +475 35 negative_sampler.num_negs_per_pos 86.0 +475 35 training.batch_size 1.0 +475 36 model.embedding_dim 1.0 +475 36 negative_sampler.num_negs_per_pos 35.0 +475 36 training.batch_size 2.0 +475 37 model.embedding_dim 0.0 +475 37 negative_sampler.num_negs_per_pos 99.0 +475 37 training.batch_size 0.0 +475 38 model.embedding_dim 0.0 +475 38 negative_sampler.num_negs_per_pos 75.0 +475 38 training.batch_size 0.0 +475 39 model.embedding_dim 0.0 +475 39 negative_sampler.num_negs_per_pos 56.0 +475 39 training.batch_size 1.0 +475 40 model.embedding_dim 1.0 +475 40 negative_sampler.num_negs_per_pos 47.0 +475 40 training.batch_size 0.0 +475 41 model.embedding_dim 1.0 +475 41 negative_sampler.num_negs_per_pos 56.0 +475 41 training.batch_size 1.0 +475 42 model.embedding_dim 1.0 +475 42 negative_sampler.num_negs_per_pos 55.0 +475 42 training.batch_size 0.0 +475 43 model.embedding_dim 1.0 +475 43 negative_sampler.num_negs_per_pos 12.0 +475 43 training.batch_size 2.0 +475 44 model.embedding_dim 2.0 +475 44 negative_sampler.num_negs_per_pos 60.0 +475 44 training.batch_size 2.0 +475 45 model.embedding_dim 1.0 +475 45 negative_sampler.num_negs_per_pos 74.0 +475 45 training.batch_size 2.0 +475 46 model.embedding_dim 0.0 +475 46 negative_sampler.num_negs_per_pos 19.0 +475 46 training.batch_size 1.0 +475 47 model.embedding_dim 1.0 +475 47 negative_sampler.num_negs_per_pos 56.0 +475 47 training.batch_size 2.0 +475 48 model.embedding_dim 1.0 +475 48 negative_sampler.num_negs_per_pos 55.0 +475 48 training.batch_size 0.0 +475 49 model.embedding_dim 2.0 +475 49 negative_sampler.num_negs_per_pos 69.0 +475 49 training.batch_size 1.0 +475 50 model.embedding_dim 1.0 +475 50 negative_sampler.num_negs_per_pos 24.0 +475 50 training.batch_size 0.0 +475 51 model.embedding_dim 0.0 +475 51 negative_sampler.num_negs_per_pos 31.0 +475 51 training.batch_size 0.0 +475 52 model.embedding_dim 2.0 +475 52 negative_sampler.num_negs_per_pos 55.0 +475 52 training.batch_size 1.0 +475 53 model.embedding_dim 2.0 +475 53 negative_sampler.num_negs_per_pos 82.0 +475 53 training.batch_size 1.0 +475 54 model.embedding_dim 1.0 +475 54 negative_sampler.num_negs_per_pos 79.0 +475 54 training.batch_size 2.0 +475 55 model.embedding_dim 0.0 +475 55 negative_sampler.num_negs_per_pos 3.0 +475 55 training.batch_size 1.0 +475 56 model.embedding_dim 1.0 +475 56 negative_sampler.num_negs_per_pos 66.0 +475 56 training.batch_size 2.0 +475 57 model.embedding_dim 2.0 +475 57 negative_sampler.num_negs_per_pos 34.0 +475 57 training.batch_size 1.0 +475 58 model.embedding_dim 1.0 +475 58 negative_sampler.num_negs_per_pos 13.0 +475 58 training.batch_size 2.0 +475 59 model.embedding_dim 0.0 +475 59 negative_sampler.num_negs_per_pos 87.0 +475 59 training.batch_size 2.0 +475 60 model.embedding_dim 2.0 +475 60 negative_sampler.num_negs_per_pos 68.0 +475 60 training.batch_size 0.0 +475 61 model.embedding_dim 0.0 +475 61 negative_sampler.num_negs_per_pos 57.0 +475 61 training.batch_size 1.0 +475 62 model.embedding_dim 1.0 +475 62 negative_sampler.num_negs_per_pos 95.0 +475 62 training.batch_size 2.0 +475 63 model.embedding_dim 2.0 +475 63 negative_sampler.num_negs_per_pos 5.0 +475 63 training.batch_size 2.0 +475 64 model.embedding_dim 0.0 +475 64 negative_sampler.num_negs_per_pos 78.0 +475 64 training.batch_size 2.0 +475 65 model.embedding_dim 2.0 +475 65 negative_sampler.num_negs_per_pos 87.0 +475 65 training.batch_size 1.0 +475 66 model.embedding_dim 1.0 +475 66 negative_sampler.num_negs_per_pos 70.0 +475 66 training.batch_size 1.0 +475 67 model.embedding_dim 0.0 +475 67 negative_sampler.num_negs_per_pos 90.0 +475 67 training.batch_size 0.0 +475 68 model.embedding_dim 0.0 +475 68 negative_sampler.num_negs_per_pos 96.0 +475 68 training.batch_size 0.0 +475 69 model.embedding_dim 1.0 +475 69 negative_sampler.num_negs_per_pos 25.0 +475 69 training.batch_size 2.0 +475 70 model.embedding_dim 1.0 +475 70 negative_sampler.num_negs_per_pos 54.0 +475 70 training.batch_size 1.0 +475 71 model.embedding_dim 1.0 +475 71 negative_sampler.num_negs_per_pos 8.0 +475 71 training.batch_size 1.0 +475 72 model.embedding_dim 2.0 +475 72 negative_sampler.num_negs_per_pos 49.0 +475 72 training.batch_size 1.0 +475 73 model.embedding_dim 2.0 +475 73 negative_sampler.num_negs_per_pos 35.0 +475 73 training.batch_size 0.0 +475 74 model.embedding_dim 1.0 +475 74 negative_sampler.num_negs_per_pos 96.0 +475 74 training.batch_size 1.0 +475 75 model.embedding_dim 2.0 +475 75 negative_sampler.num_negs_per_pos 1.0 +475 75 training.batch_size 0.0 +475 76 model.embedding_dim 2.0 +475 76 negative_sampler.num_negs_per_pos 67.0 +475 76 training.batch_size 2.0 +475 77 model.embedding_dim 0.0 +475 77 negative_sampler.num_negs_per_pos 50.0 +475 77 training.batch_size 0.0 +475 78 model.embedding_dim 0.0 +475 78 negative_sampler.num_negs_per_pos 9.0 +475 78 training.batch_size 2.0 +475 79 model.embedding_dim 0.0 +475 79 negative_sampler.num_negs_per_pos 73.0 +475 79 training.batch_size 2.0 +475 80 model.embedding_dim 0.0 +475 80 negative_sampler.num_negs_per_pos 21.0 +475 80 training.batch_size 0.0 +475 81 model.embedding_dim 2.0 +475 81 negative_sampler.num_negs_per_pos 57.0 +475 81 training.batch_size 1.0 +475 82 model.embedding_dim 2.0 +475 82 negative_sampler.num_negs_per_pos 87.0 +475 82 training.batch_size 0.0 +475 83 model.embedding_dim 1.0 +475 83 negative_sampler.num_negs_per_pos 85.0 +475 83 training.batch_size 2.0 +475 84 model.embedding_dim 1.0 +475 84 negative_sampler.num_negs_per_pos 24.0 +475 84 training.batch_size 1.0 +475 85 model.embedding_dim 2.0 +475 85 negative_sampler.num_negs_per_pos 12.0 +475 85 training.batch_size 2.0 +475 86 model.embedding_dim 2.0 +475 86 negative_sampler.num_negs_per_pos 13.0 +475 86 training.batch_size 2.0 +475 87 model.embedding_dim 0.0 +475 87 negative_sampler.num_negs_per_pos 70.0 +475 87 training.batch_size 2.0 +475 88 model.embedding_dim 0.0 +475 88 negative_sampler.num_negs_per_pos 40.0 +475 88 training.batch_size 1.0 +475 89 model.embedding_dim 1.0 +475 89 negative_sampler.num_negs_per_pos 93.0 +475 89 training.batch_size 0.0 +475 90 model.embedding_dim 0.0 +475 90 negative_sampler.num_negs_per_pos 5.0 +475 90 training.batch_size 2.0 +475 91 model.embedding_dim 1.0 +475 91 negative_sampler.num_negs_per_pos 23.0 +475 91 training.batch_size 2.0 +475 92 model.embedding_dim 2.0 +475 92 negative_sampler.num_negs_per_pos 38.0 +475 92 training.batch_size 0.0 +475 93 model.embedding_dim 2.0 +475 93 negative_sampler.num_negs_per_pos 74.0 +475 93 training.batch_size 1.0 +475 94 model.embedding_dim 0.0 +475 94 negative_sampler.num_negs_per_pos 49.0 +475 94 training.batch_size 2.0 +475 95 model.embedding_dim 2.0 +475 95 negative_sampler.num_negs_per_pos 1.0 +475 95 training.batch_size 0.0 +475 96 model.embedding_dim 1.0 +475 96 negative_sampler.num_negs_per_pos 67.0 +475 96 training.batch_size 1.0 +475 97 model.embedding_dim 0.0 +475 97 negative_sampler.num_negs_per_pos 41.0 +475 97 training.batch_size 2.0 +475 98 model.embedding_dim 2.0 +475 98 negative_sampler.num_negs_per_pos 23.0 +475 98 training.batch_size 2.0 +475 99 model.embedding_dim 1.0 +475 99 negative_sampler.num_negs_per_pos 43.0 +475 99 training.batch_size 2.0 +475 100 model.embedding_dim 1.0 +475 100 negative_sampler.num_negs_per_pos 31.0 +475 100 training.batch_size 2.0 +475 1 dataset """kinships""" +475 1 model """proje""" +475 1 loss """bceaftersigmoid""" +475 1 regularizer """no""" +475 1 optimizer """adadelta""" +475 1 training_loop """owa""" +475 1 negative_sampler """basic""" +475 1 evaluator """rankbased""" +475 2 dataset """kinships""" +475 2 model """proje""" +475 2 loss """bceaftersigmoid""" +475 2 regularizer """no""" +475 2 optimizer """adadelta""" +475 2 training_loop """owa""" +475 2 negative_sampler """basic""" +475 2 evaluator """rankbased""" +475 3 dataset """kinships""" +475 3 model """proje""" +475 3 loss """bceaftersigmoid""" +475 3 regularizer """no""" +475 3 optimizer """adadelta""" +475 3 training_loop """owa""" +475 3 negative_sampler """basic""" +475 3 evaluator """rankbased""" +475 4 dataset """kinships""" +475 4 model """proje""" +475 4 loss """bceaftersigmoid""" +475 4 regularizer """no""" +475 4 optimizer """adadelta""" +475 4 training_loop """owa""" +475 4 negative_sampler """basic""" +475 4 evaluator """rankbased""" +475 5 dataset """kinships""" +475 5 model """proje""" +475 5 loss """bceaftersigmoid""" +475 5 regularizer """no""" +475 5 optimizer """adadelta""" +475 5 training_loop """owa""" +475 5 negative_sampler """basic""" +475 5 evaluator """rankbased""" +475 6 dataset """kinships""" +475 6 model """proje""" +475 6 loss """bceaftersigmoid""" +475 6 regularizer """no""" +475 6 optimizer """adadelta""" +475 6 training_loop """owa""" +475 6 negative_sampler """basic""" +475 6 evaluator """rankbased""" +475 7 dataset """kinships""" +475 7 model """proje""" +475 7 loss """bceaftersigmoid""" +475 7 regularizer """no""" +475 7 optimizer """adadelta""" +475 7 training_loop """owa""" +475 7 negative_sampler """basic""" +475 7 evaluator """rankbased""" +475 8 dataset """kinships""" +475 8 model """proje""" +475 8 loss """bceaftersigmoid""" +475 8 regularizer """no""" +475 8 optimizer """adadelta""" +475 8 training_loop """owa""" +475 8 negative_sampler """basic""" +475 8 evaluator """rankbased""" +475 9 dataset """kinships""" +475 9 model """proje""" +475 9 loss """bceaftersigmoid""" +475 9 regularizer """no""" +475 9 optimizer """adadelta""" +475 9 training_loop """owa""" +475 9 negative_sampler """basic""" +475 9 evaluator """rankbased""" +475 10 dataset """kinships""" +475 10 model """proje""" +475 10 loss """bceaftersigmoid""" +475 10 regularizer """no""" +475 10 optimizer """adadelta""" +475 10 training_loop """owa""" +475 10 negative_sampler """basic""" +475 10 evaluator """rankbased""" +475 11 dataset """kinships""" +475 11 model """proje""" +475 11 loss """bceaftersigmoid""" +475 11 regularizer """no""" +475 11 optimizer """adadelta""" +475 11 training_loop """owa""" +475 11 negative_sampler """basic""" +475 11 evaluator """rankbased""" +475 12 dataset """kinships""" +475 12 model """proje""" +475 12 loss """bceaftersigmoid""" +475 12 regularizer """no""" +475 12 optimizer """adadelta""" +475 12 training_loop """owa""" +475 12 negative_sampler """basic""" +475 12 evaluator """rankbased""" +475 13 dataset """kinships""" +475 13 model """proje""" +475 13 loss """bceaftersigmoid""" +475 13 regularizer """no""" +475 13 optimizer """adadelta""" +475 13 training_loop """owa""" +475 13 negative_sampler """basic""" +475 13 evaluator """rankbased""" +475 14 dataset """kinships""" +475 14 model """proje""" +475 14 loss """bceaftersigmoid""" +475 14 regularizer """no""" +475 14 optimizer """adadelta""" +475 14 training_loop """owa""" +475 14 negative_sampler """basic""" +475 14 evaluator """rankbased""" +475 15 dataset """kinships""" +475 15 model """proje""" +475 15 loss """bceaftersigmoid""" +475 15 regularizer """no""" +475 15 optimizer """adadelta""" +475 15 training_loop """owa""" +475 15 negative_sampler """basic""" +475 15 evaluator """rankbased""" +475 16 dataset """kinships""" +475 16 model """proje""" +475 16 loss """bceaftersigmoid""" +475 16 regularizer """no""" +475 16 optimizer """adadelta""" +475 16 training_loop """owa""" +475 16 negative_sampler """basic""" +475 16 evaluator """rankbased""" +475 17 dataset """kinships""" +475 17 model """proje""" +475 17 loss """bceaftersigmoid""" +475 17 regularizer """no""" +475 17 optimizer """adadelta""" +475 17 training_loop """owa""" +475 17 negative_sampler """basic""" +475 17 evaluator """rankbased""" +475 18 dataset """kinships""" +475 18 model """proje""" +475 18 loss """bceaftersigmoid""" +475 18 regularizer """no""" +475 18 optimizer """adadelta""" +475 18 training_loop """owa""" +475 18 negative_sampler """basic""" +475 18 evaluator """rankbased""" +475 19 dataset """kinships""" +475 19 model """proje""" +475 19 loss """bceaftersigmoid""" +475 19 regularizer """no""" +475 19 optimizer """adadelta""" +475 19 training_loop """owa""" +475 19 negative_sampler """basic""" +475 19 evaluator """rankbased""" +475 20 dataset """kinships""" +475 20 model """proje""" +475 20 loss """bceaftersigmoid""" +475 20 regularizer """no""" +475 20 optimizer """adadelta""" +475 20 training_loop """owa""" +475 20 negative_sampler """basic""" +475 20 evaluator """rankbased""" +475 21 dataset """kinships""" +475 21 model """proje""" +475 21 loss """bceaftersigmoid""" +475 21 regularizer """no""" +475 21 optimizer """adadelta""" +475 21 training_loop """owa""" +475 21 negative_sampler """basic""" +475 21 evaluator """rankbased""" +475 22 dataset """kinships""" +475 22 model """proje""" +475 22 loss """bceaftersigmoid""" +475 22 regularizer """no""" +475 22 optimizer """adadelta""" +475 22 training_loop """owa""" +475 22 negative_sampler """basic""" +475 22 evaluator """rankbased""" +475 23 dataset """kinships""" +475 23 model """proje""" +475 23 loss """bceaftersigmoid""" +475 23 regularizer """no""" +475 23 optimizer """adadelta""" +475 23 training_loop """owa""" +475 23 negative_sampler """basic""" +475 23 evaluator """rankbased""" +475 24 dataset """kinships""" +475 24 model """proje""" +475 24 loss """bceaftersigmoid""" +475 24 regularizer """no""" +475 24 optimizer """adadelta""" +475 24 training_loop """owa""" +475 24 negative_sampler """basic""" +475 24 evaluator """rankbased""" +475 25 dataset """kinships""" +475 25 model """proje""" +475 25 loss """bceaftersigmoid""" +475 25 regularizer """no""" +475 25 optimizer """adadelta""" +475 25 training_loop """owa""" +475 25 negative_sampler """basic""" +475 25 evaluator """rankbased""" +475 26 dataset """kinships""" +475 26 model """proje""" +475 26 loss """bceaftersigmoid""" +475 26 regularizer """no""" +475 26 optimizer """adadelta""" +475 26 training_loop """owa""" +475 26 negative_sampler """basic""" +475 26 evaluator """rankbased""" +475 27 dataset """kinships""" +475 27 model """proje""" +475 27 loss """bceaftersigmoid""" +475 27 regularizer """no""" +475 27 optimizer """adadelta""" +475 27 training_loop """owa""" +475 27 negative_sampler """basic""" +475 27 evaluator """rankbased""" +475 28 dataset """kinships""" +475 28 model """proje""" +475 28 loss """bceaftersigmoid""" +475 28 regularizer """no""" +475 28 optimizer """adadelta""" +475 28 training_loop """owa""" +475 28 negative_sampler """basic""" +475 28 evaluator """rankbased""" +475 29 dataset """kinships""" +475 29 model """proje""" +475 29 loss """bceaftersigmoid""" +475 29 regularizer """no""" +475 29 optimizer """adadelta""" +475 29 training_loop """owa""" +475 29 negative_sampler """basic""" +475 29 evaluator """rankbased""" +475 30 dataset """kinships""" +475 30 model """proje""" +475 30 loss """bceaftersigmoid""" +475 30 regularizer """no""" +475 30 optimizer """adadelta""" +475 30 training_loop """owa""" +475 30 negative_sampler """basic""" +475 30 evaluator """rankbased""" +475 31 dataset """kinships""" +475 31 model """proje""" +475 31 loss """bceaftersigmoid""" +475 31 regularizer """no""" +475 31 optimizer """adadelta""" +475 31 training_loop """owa""" +475 31 negative_sampler """basic""" +475 31 evaluator """rankbased""" +475 32 dataset """kinships""" +475 32 model """proje""" +475 32 loss """bceaftersigmoid""" +475 32 regularizer """no""" +475 32 optimizer """adadelta""" +475 32 training_loop """owa""" +475 32 negative_sampler """basic""" +475 32 evaluator """rankbased""" +475 33 dataset """kinships""" +475 33 model """proje""" +475 33 loss """bceaftersigmoid""" +475 33 regularizer """no""" +475 33 optimizer """adadelta""" +475 33 training_loop """owa""" +475 33 negative_sampler """basic""" +475 33 evaluator """rankbased""" +475 34 dataset """kinships""" +475 34 model """proje""" +475 34 loss """bceaftersigmoid""" +475 34 regularizer """no""" +475 34 optimizer """adadelta""" +475 34 training_loop """owa""" +475 34 negative_sampler """basic""" +475 34 evaluator """rankbased""" +475 35 dataset """kinships""" +475 35 model """proje""" +475 35 loss """bceaftersigmoid""" +475 35 regularizer """no""" +475 35 optimizer """adadelta""" +475 35 training_loop """owa""" +475 35 negative_sampler """basic""" +475 35 evaluator """rankbased""" +475 36 dataset """kinships""" +475 36 model """proje""" +475 36 loss """bceaftersigmoid""" +475 36 regularizer """no""" +475 36 optimizer """adadelta""" +475 36 training_loop """owa""" +475 36 negative_sampler """basic""" +475 36 evaluator """rankbased""" +475 37 dataset """kinships""" +475 37 model """proje""" +475 37 loss """bceaftersigmoid""" +475 37 regularizer """no""" +475 37 optimizer """adadelta""" +475 37 training_loop """owa""" +475 37 negative_sampler """basic""" +475 37 evaluator """rankbased""" +475 38 dataset """kinships""" +475 38 model """proje""" +475 38 loss """bceaftersigmoid""" +475 38 regularizer """no""" +475 38 optimizer """adadelta""" +475 38 training_loop """owa""" +475 38 negative_sampler """basic""" +475 38 evaluator """rankbased""" +475 39 dataset """kinships""" +475 39 model """proje""" +475 39 loss """bceaftersigmoid""" +475 39 regularizer """no""" +475 39 optimizer """adadelta""" +475 39 training_loop """owa""" +475 39 negative_sampler """basic""" +475 39 evaluator """rankbased""" +475 40 dataset """kinships""" +475 40 model """proje""" +475 40 loss """bceaftersigmoid""" +475 40 regularizer """no""" +475 40 optimizer """adadelta""" +475 40 training_loop """owa""" +475 40 negative_sampler """basic""" +475 40 evaluator """rankbased""" +475 41 dataset """kinships""" +475 41 model """proje""" +475 41 loss """bceaftersigmoid""" +475 41 regularizer """no""" +475 41 optimizer """adadelta""" +475 41 training_loop """owa""" +475 41 negative_sampler """basic""" +475 41 evaluator """rankbased""" +475 42 dataset """kinships""" +475 42 model """proje""" +475 42 loss """bceaftersigmoid""" +475 42 regularizer """no""" +475 42 optimizer """adadelta""" +475 42 training_loop """owa""" +475 42 negative_sampler """basic""" +475 42 evaluator """rankbased""" +475 43 dataset """kinships""" +475 43 model """proje""" +475 43 loss """bceaftersigmoid""" +475 43 regularizer """no""" +475 43 optimizer """adadelta""" +475 43 training_loop """owa""" +475 43 negative_sampler """basic""" +475 43 evaluator """rankbased""" +475 44 dataset """kinships""" +475 44 model """proje""" +475 44 loss """bceaftersigmoid""" +475 44 regularizer """no""" +475 44 optimizer """adadelta""" +475 44 training_loop """owa""" +475 44 negative_sampler """basic""" +475 44 evaluator """rankbased""" +475 45 dataset """kinships""" +475 45 model """proje""" +475 45 loss """bceaftersigmoid""" +475 45 regularizer """no""" +475 45 optimizer """adadelta""" +475 45 training_loop """owa""" +475 45 negative_sampler """basic""" +475 45 evaluator """rankbased""" +475 46 dataset """kinships""" +475 46 model """proje""" +475 46 loss """bceaftersigmoid""" +475 46 regularizer """no""" +475 46 optimizer """adadelta""" +475 46 training_loop """owa""" +475 46 negative_sampler """basic""" +475 46 evaluator """rankbased""" +475 47 dataset """kinships""" +475 47 model """proje""" +475 47 loss """bceaftersigmoid""" +475 47 regularizer """no""" +475 47 optimizer """adadelta""" +475 47 training_loop """owa""" +475 47 negative_sampler """basic""" +475 47 evaluator """rankbased""" +475 48 dataset """kinships""" +475 48 model """proje""" +475 48 loss """bceaftersigmoid""" +475 48 regularizer """no""" +475 48 optimizer """adadelta""" +475 48 training_loop """owa""" +475 48 negative_sampler """basic""" +475 48 evaluator """rankbased""" +475 49 dataset """kinships""" +475 49 model """proje""" +475 49 loss """bceaftersigmoid""" +475 49 regularizer """no""" +475 49 optimizer """adadelta""" +475 49 training_loop """owa""" +475 49 negative_sampler """basic""" +475 49 evaluator """rankbased""" +475 50 dataset """kinships""" +475 50 model """proje""" +475 50 loss """bceaftersigmoid""" +475 50 regularizer """no""" +475 50 optimizer """adadelta""" +475 50 training_loop """owa""" +475 50 negative_sampler """basic""" +475 50 evaluator """rankbased""" +475 51 dataset """kinships""" +475 51 model """proje""" +475 51 loss """bceaftersigmoid""" +475 51 regularizer """no""" +475 51 optimizer """adadelta""" +475 51 training_loop """owa""" +475 51 negative_sampler """basic""" +475 51 evaluator """rankbased""" +475 52 dataset """kinships""" +475 52 model """proje""" +475 52 loss """bceaftersigmoid""" +475 52 regularizer """no""" +475 52 optimizer """adadelta""" +475 52 training_loop """owa""" +475 52 negative_sampler """basic""" +475 52 evaluator """rankbased""" +475 53 dataset """kinships""" +475 53 model """proje""" +475 53 loss """bceaftersigmoid""" +475 53 regularizer """no""" +475 53 optimizer """adadelta""" +475 53 training_loop """owa""" +475 53 negative_sampler """basic""" +475 53 evaluator """rankbased""" +475 54 dataset """kinships""" +475 54 model """proje""" +475 54 loss """bceaftersigmoid""" +475 54 regularizer """no""" +475 54 optimizer """adadelta""" +475 54 training_loop """owa""" +475 54 negative_sampler """basic""" +475 54 evaluator """rankbased""" +475 55 dataset """kinships""" +475 55 model """proje""" +475 55 loss """bceaftersigmoid""" +475 55 regularizer """no""" +475 55 optimizer """adadelta""" +475 55 training_loop """owa""" +475 55 negative_sampler """basic""" +475 55 evaluator """rankbased""" +475 56 dataset """kinships""" +475 56 model """proje""" +475 56 loss """bceaftersigmoid""" +475 56 regularizer """no""" +475 56 optimizer """adadelta""" +475 56 training_loop """owa""" +475 56 negative_sampler """basic""" +475 56 evaluator """rankbased""" +475 57 dataset """kinships""" +475 57 model """proje""" +475 57 loss """bceaftersigmoid""" +475 57 regularizer """no""" +475 57 optimizer """adadelta""" +475 57 training_loop """owa""" +475 57 negative_sampler """basic""" +475 57 evaluator """rankbased""" +475 58 dataset """kinships""" +475 58 model """proje""" +475 58 loss """bceaftersigmoid""" +475 58 regularizer """no""" +475 58 optimizer """adadelta""" +475 58 training_loop """owa""" +475 58 negative_sampler """basic""" +475 58 evaluator """rankbased""" +475 59 dataset """kinships""" +475 59 model """proje""" +475 59 loss """bceaftersigmoid""" +475 59 regularizer """no""" +475 59 optimizer """adadelta""" +475 59 training_loop """owa""" +475 59 negative_sampler """basic""" +475 59 evaluator """rankbased""" +475 60 dataset """kinships""" +475 60 model """proje""" +475 60 loss """bceaftersigmoid""" +475 60 regularizer """no""" +475 60 optimizer """adadelta""" +475 60 training_loop """owa""" +475 60 negative_sampler """basic""" +475 60 evaluator """rankbased""" +475 61 dataset """kinships""" +475 61 model """proje""" +475 61 loss """bceaftersigmoid""" +475 61 regularizer """no""" +475 61 optimizer """adadelta""" +475 61 training_loop """owa""" +475 61 negative_sampler """basic""" +475 61 evaluator """rankbased""" +475 62 dataset """kinships""" +475 62 model """proje""" +475 62 loss """bceaftersigmoid""" +475 62 regularizer """no""" +475 62 optimizer """adadelta""" +475 62 training_loop """owa""" +475 62 negative_sampler """basic""" +475 62 evaluator """rankbased""" +475 63 dataset """kinships""" +475 63 model """proje""" +475 63 loss """bceaftersigmoid""" +475 63 regularizer """no""" +475 63 optimizer """adadelta""" +475 63 training_loop """owa""" +475 63 negative_sampler """basic""" +475 63 evaluator """rankbased""" +475 64 dataset """kinships""" +475 64 model """proje""" +475 64 loss """bceaftersigmoid""" +475 64 regularizer """no""" +475 64 optimizer """adadelta""" +475 64 training_loop """owa""" +475 64 negative_sampler """basic""" +475 64 evaluator """rankbased""" +475 65 dataset """kinships""" +475 65 model """proje""" +475 65 loss """bceaftersigmoid""" +475 65 regularizer """no""" +475 65 optimizer """adadelta""" +475 65 training_loop """owa""" +475 65 negative_sampler """basic""" +475 65 evaluator """rankbased""" +475 66 dataset """kinships""" +475 66 model """proje""" +475 66 loss """bceaftersigmoid""" +475 66 regularizer """no""" +475 66 optimizer """adadelta""" +475 66 training_loop """owa""" +475 66 negative_sampler """basic""" +475 66 evaluator """rankbased""" +475 67 dataset """kinships""" +475 67 model """proje""" +475 67 loss """bceaftersigmoid""" +475 67 regularizer """no""" +475 67 optimizer """adadelta""" +475 67 training_loop """owa""" +475 67 negative_sampler """basic""" +475 67 evaluator """rankbased""" +475 68 dataset """kinships""" +475 68 model """proje""" +475 68 loss """bceaftersigmoid""" +475 68 regularizer """no""" +475 68 optimizer """adadelta""" +475 68 training_loop """owa""" +475 68 negative_sampler """basic""" +475 68 evaluator """rankbased""" +475 69 dataset """kinships""" +475 69 model """proje""" +475 69 loss """bceaftersigmoid""" +475 69 regularizer """no""" +475 69 optimizer """adadelta""" +475 69 training_loop """owa""" +475 69 negative_sampler """basic""" +475 69 evaluator """rankbased""" +475 70 dataset """kinships""" +475 70 model """proje""" +475 70 loss """bceaftersigmoid""" +475 70 regularizer """no""" +475 70 optimizer """adadelta""" +475 70 training_loop """owa""" +475 70 negative_sampler """basic""" +475 70 evaluator """rankbased""" +475 71 dataset """kinships""" +475 71 model """proje""" +475 71 loss """bceaftersigmoid""" +475 71 regularizer """no""" +475 71 optimizer """adadelta""" +475 71 training_loop """owa""" +475 71 negative_sampler """basic""" +475 71 evaluator """rankbased""" +475 72 dataset """kinships""" +475 72 model """proje""" +475 72 loss """bceaftersigmoid""" +475 72 regularizer """no""" +475 72 optimizer """adadelta""" +475 72 training_loop """owa""" +475 72 negative_sampler """basic""" +475 72 evaluator """rankbased""" +475 73 dataset """kinships""" +475 73 model """proje""" +475 73 loss """bceaftersigmoid""" +475 73 regularizer """no""" +475 73 optimizer """adadelta""" +475 73 training_loop """owa""" +475 73 negative_sampler """basic""" +475 73 evaluator """rankbased""" +475 74 dataset """kinships""" +475 74 model """proje""" +475 74 loss """bceaftersigmoid""" +475 74 regularizer """no""" +475 74 optimizer """adadelta""" +475 74 training_loop """owa""" +475 74 negative_sampler """basic""" +475 74 evaluator """rankbased""" +475 75 dataset """kinships""" +475 75 model """proje""" +475 75 loss """bceaftersigmoid""" +475 75 regularizer """no""" +475 75 optimizer """adadelta""" +475 75 training_loop """owa""" +475 75 negative_sampler """basic""" +475 75 evaluator """rankbased""" +475 76 dataset """kinships""" +475 76 model """proje""" +475 76 loss """bceaftersigmoid""" +475 76 regularizer """no""" +475 76 optimizer """adadelta""" +475 76 training_loop """owa""" +475 76 negative_sampler """basic""" +475 76 evaluator """rankbased""" +475 77 dataset """kinships""" +475 77 model """proje""" +475 77 loss """bceaftersigmoid""" +475 77 regularizer """no""" +475 77 optimizer """adadelta""" +475 77 training_loop """owa""" +475 77 negative_sampler """basic""" +475 77 evaluator """rankbased""" +475 78 dataset """kinships""" +475 78 model """proje""" +475 78 loss """bceaftersigmoid""" +475 78 regularizer """no""" +475 78 optimizer """adadelta""" +475 78 training_loop """owa""" +475 78 negative_sampler """basic""" +475 78 evaluator """rankbased""" +475 79 dataset """kinships""" +475 79 model """proje""" +475 79 loss """bceaftersigmoid""" +475 79 regularizer """no""" +475 79 optimizer """adadelta""" +475 79 training_loop """owa""" +475 79 negative_sampler """basic""" +475 79 evaluator """rankbased""" +475 80 dataset """kinships""" +475 80 model """proje""" +475 80 loss """bceaftersigmoid""" +475 80 regularizer """no""" +475 80 optimizer """adadelta""" +475 80 training_loop """owa""" +475 80 negative_sampler """basic""" +475 80 evaluator """rankbased""" +475 81 dataset """kinships""" +475 81 model """proje""" +475 81 loss """bceaftersigmoid""" +475 81 regularizer """no""" +475 81 optimizer """adadelta""" +475 81 training_loop """owa""" +475 81 negative_sampler """basic""" +475 81 evaluator """rankbased""" +475 82 dataset """kinships""" +475 82 model """proje""" +475 82 loss """bceaftersigmoid""" +475 82 regularizer """no""" +475 82 optimizer """adadelta""" +475 82 training_loop """owa""" +475 82 negative_sampler """basic""" +475 82 evaluator """rankbased""" +475 83 dataset """kinships""" +475 83 model """proje""" +475 83 loss """bceaftersigmoid""" +475 83 regularizer """no""" +475 83 optimizer """adadelta""" +475 83 training_loop """owa""" +475 83 negative_sampler """basic""" +475 83 evaluator """rankbased""" +475 84 dataset """kinships""" +475 84 model """proje""" +475 84 loss """bceaftersigmoid""" +475 84 regularizer """no""" +475 84 optimizer """adadelta""" +475 84 training_loop """owa""" +475 84 negative_sampler """basic""" +475 84 evaluator """rankbased""" +475 85 dataset """kinships""" +475 85 model """proje""" +475 85 loss """bceaftersigmoid""" +475 85 regularizer """no""" +475 85 optimizer """adadelta""" +475 85 training_loop """owa""" +475 85 negative_sampler """basic""" +475 85 evaluator """rankbased""" +475 86 dataset """kinships""" +475 86 model """proje""" +475 86 loss """bceaftersigmoid""" +475 86 regularizer """no""" +475 86 optimizer """adadelta""" +475 86 training_loop """owa""" +475 86 negative_sampler """basic""" +475 86 evaluator """rankbased""" +475 87 dataset """kinships""" +475 87 model """proje""" +475 87 loss """bceaftersigmoid""" +475 87 regularizer """no""" +475 87 optimizer """adadelta""" +475 87 training_loop """owa""" +475 87 negative_sampler """basic""" +475 87 evaluator """rankbased""" +475 88 dataset """kinships""" +475 88 model """proje""" +475 88 loss """bceaftersigmoid""" +475 88 regularizer """no""" +475 88 optimizer """adadelta""" +475 88 training_loop """owa""" +475 88 negative_sampler """basic""" +475 88 evaluator """rankbased""" +475 89 dataset """kinships""" +475 89 model """proje""" +475 89 loss """bceaftersigmoid""" +475 89 regularizer """no""" +475 89 optimizer """adadelta""" +475 89 training_loop """owa""" +475 89 negative_sampler """basic""" +475 89 evaluator """rankbased""" +475 90 dataset """kinships""" +475 90 model """proje""" +475 90 loss """bceaftersigmoid""" +475 90 regularizer """no""" +475 90 optimizer """adadelta""" +475 90 training_loop """owa""" +475 90 negative_sampler """basic""" +475 90 evaluator """rankbased""" +475 91 dataset """kinships""" +475 91 model """proje""" +475 91 loss """bceaftersigmoid""" +475 91 regularizer """no""" +475 91 optimizer """adadelta""" +475 91 training_loop """owa""" +475 91 negative_sampler """basic""" +475 91 evaluator """rankbased""" +475 92 dataset """kinships""" +475 92 model """proje""" +475 92 loss """bceaftersigmoid""" +475 92 regularizer """no""" +475 92 optimizer """adadelta""" +475 92 training_loop """owa""" +475 92 negative_sampler """basic""" +475 92 evaluator """rankbased""" +475 93 dataset """kinships""" +475 93 model """proje""" +475 93 loss """bceaftersigmoid""" +475 93 regularizer """no""" +475 93 optimizer """adadelta""" +475 93 training_loop """owa""" +475 93 negative_sampler """basic""" +475 93 evaluator """rankbased""" +475 94 dataset """kinships""" +475 94 model """proje""" +475 94 loss """bceaftersigmoid""" +475 94 regularizer """no""" +475 94 optimizer """adadelta""" +475 94 training_loop """owa""" +475 94 negative_sampler """basic""" +475 94 evaluator """rankbased""" +475 95 dataset """kinships""" +475 95 model """proje""" +475 95 loss """bceaftersigmoid""" +475 95 regularizer """no""" +475 95 optimizer """adadelta""" +475 95 training_loop """owa""" +475 95 negative_sampler """basic""" +475 95 evaluator """rankbased""" +475 96 dataset """kinships""" +475 96 model """proje""" +475 96 loss """bceaftersigmoid""" +475 96 regularizer """no""" +475 96 optimizer """adadelta""" +475 96 training_loop """owa""" +475 96 negative_sampler """basic""" +475 96 evaluator """rankbased""" +475 97 dataset """kinships""" +475 97 model """proje""" +475 97 loss """bceaftersigmoid""" +475 97 regularizer """no""" +475 97 optimizer """adadelta""" +475 97 training_loop """owa""" +475 97 negative_sampler """basic""" +475 97 evaluator """rankbased""" +475 98 dataset """kinships""" +475 98 model """proje""" +475 98 loss """bceaftersigmoid""" +475 98 regularizer """no""" +475 98 optimizer """adadelta""" +475 98 training_loop """owa""" +475 98 negative_sampler """basic""" +475 98 evaluator """rankbased""" +475 99 dataset """kinships""" +475 99 model """proje""" +475 99 loss """bceaftersigmoid""" +475 99 regularizer """no""" +475 99 optimizer """adadelta""" +475 99 training_loop """owa""" +475 99 negative_sampler """basic""" +475 99 evaluator """rankbased""" +475 100 dataset """kinships""" +475 100 model """proje""" +475 100 loss """bceaftersigmoid""" +475 100 regularizer """no""" +475 100 optimizer """adadelta""" +475 100 training_loop """owa""" +475 100 negative_sampler """basic""" +475 100 evaluator """rankbased""" +476 1 model.embedding_dim 1.0 +476 1 negative_sampler.num_negs_per_pos 65.0 +476 1 training.batch_size 2.0 +476 2 model.embedding_dim 2.0 +476 2 negative_sampler.num_negs_per_pos 75.0 +476 2 training.batch_size 0.0 +476 3 model.embedding_dim 1.0 +476 3 negative_sampler.num_negs_per_pos 19.0 +476 3 training.batch_size 1.0 +476 4 model.embedding_dim 1.0 +476 4 negative_sampler.num_negs_per_pos 80.0 +476 4 training.batch_size 0.0 +476 5 model.embedding_dim 0.0 +476 5 negative_sampler.num_negs_per_pos 62.0 +476 5 training.batch_size 2.0 +476 6 model.embedding_dim 2.0 +476 6 negative_sampler.num_negs_per_pos 89.0 +476 6 training.batch_size 1.0 +476 7 model.embedding_dim 0.0 +476 7 negative_sampler.num_negs_per_pos 47.0 +476 7 training.batch_size 2.0 +476 8 model.embedding_dim 1.0 +476 8 negative_sampler.num_negs_per_pos 13.0 +476 8 training.batch_size 2.0 +476 9 model.embedding_dim 2.0 +476 9 negative_sampler.num_negs_per_pos 82.0 +476 9 training.batch_size 2.0 +476 10 model.embedding_dim 2.0 +476 10 negative_sampler.num_negs_per_pos 78.0 +476 10 training.batch_size 2.0 +476 11 model.embedding_dim 1.0 +476 11 negative_sampler.num_negs_per_pos 16.0 +476 11 training.batch_size 0.0 +476 12 model.embedding_dim 2.0 +476 12 negative_sampler.num_negs_per_pos 40.0 +476 12 training.batch_size 0.0 +476 13 model.embedding_dim 0.0 +476 13 negative_sampler.num_negs_per_pos 23.0 +476 13 training.batch_size 0.0 +476 14 model.embedding_dim 1.0 +476 14 negative_sampler.num_negs_per_pos 98.0 +476 14 training.batch_size 1.0 +476 15 model.embedding_dim 1.0 +476 15 negative_sampler.num_negs_per_pos 7.0 +476 15 training.batch_size 0.0 +476 16 model.embedding_dim 2.0 +476 16 negative_sampler.num_negs_per_pos 10.0 +476 16 training.batch_size 1.0 +476 17 model.embedding_dim 1.0 +476 17 negative_sampler.num_negs_per_pos 6.0 +476 17 training.batch_size 1.0 +476 18 model.embedding_dim 2.0 +476 18 negative_sampler.num_negs_per_pos 18.0 +476 18 training.batch_size 2.0 +476 19 model.embedding_dim 2.0 +476 19 negative_sampler.num_negs_per_pos 31.0 +476 19 training.batch_size 0.0 +476 20 model.embedding_dim 2.0 +476 20 negative_sampler.num_negs_per_pos 83.0 +476 20 training.batch_size 1.0 +476 21 model.embedding_dim 1.0 +476 21 negative_sampler.num_negs_per_pos 0.0 +476 21 training.batch_size 2.0 +476 22 model.embedding_dim 1.0 +476 22 negative_sampler.num_negs_per_pos 13.0 +476 22 training.batch_size 0.0 +476 23 model.embedding_dim 0.0 +476 23 negative_sampler.num_negs_per_pos 9.0 +476 23 training.batch_size 0.0 +476 24 model.embedding_dim 1.0 +476 24 negative_sampler.num_negs_per_pos 78.0 +476 24 training.batch_size 1.0 +476 25 model.embedding_dim 0.0 +476 25 negative_sampler.num_negs_per_pos 51.0 +476 25 training.batch_size 1.0 +476 26 model.embedding_dim 0.0 +476 26 negative_sampler.num_negs_per_pos 7.0 +476 26 training.batch_size 0.0 +476 27 model.embedding_dim 1.0 +476 27 negative_sampler.num_negs_per_pos 54.0 +476 27 training.batch_size 1.0 +476 28 model.embedding_dim 0.0 +476 28 negative_sampler.num_negs_per_pos 44.0 +476 28 training.batch_size 2.0 +476 29 model.embedding_dim 2.0 +476 29 negative_sampler.num_negs_per_pos 86.0 +476 29 training.batch_size 1.0 +476 30 model.embedding_dim 1.0 +476 30 negative_sampler.num_negs_per_pos 98.0 +476 30 training.batch_size 0.0 +476 31 model.embedding_dim 0.0 +476 31 negative_sampler.num_negs_per_pos 57.0 +476 31 training.batch_size 0.0 +476 32 model.embedding_dim 0.0 +476 32 negative_sampler.num_negs_per_pos 62.0 +476 32 training.batch_size 0.0 +476 33 model.embedding_dim 2.0 +476 33 negative_sampler.num_negs_per_pos 50.0 +476 33 training.batch_size 2.0 +476 34 model.embedding_dim 2.0 +476 34 negative_sampler.num_negs_per_pos 14.0 +476 34 training.batch_size 0.0 +476 35 model.embedding_dim 0.0 +476 35 negative_sampler.num_negs_per_pos 33.0 +476 35 training.batch_size 1.0 +476 36 model.embedding_dim 2.0 +476 36 negative_sampler.num_negs_per_pos 39.0 +476 36 training.batch_size 2.0 +476 37 model.embedding_dim 0.0 +476 37 negative_sampler.num_negs_per_pos 51.0 +476 37 training.batch_size 0.0 +476 38 model.embedding_dim 0.0 +476 38 negative_sampler.num_negs_per_pos 82.0 +476 38 training.batch_size 1.0 +476 39 model.embedding_dim 1.0 +476 39 negative_sampler.num_negs_per_pos 95.0 +476 39 training.batch_size 1.0 +476 40 model.embedding_dim 0.0 +476 40 negative_sampler.num_negs_per_pos 15.0 +476 40 training.batch_size 1.0 +476 41 model.embedding_dim 0.0 +476 41 negative_sampler.num_negs_per_pos 76.0 +476 41 training.batch_size 0.0 +476 42 model.embedding_dim 0.0 +476 42 negative_sampler.num_negs_per_pos 15.0 +476 42 training.batch_size 1.0 +476 43 model.embedding_dim 1.0 +476 43 negative_sampler.num_negs_per_pos 38.0 +476 43 training.batch_size 1.0 +476 44 model.embedding_dim 0.0 +476 44 negative_sampler.num_negs_per_pos 54.0 +476 44 training.batch_size 0.0 +476 45 model.embedding_dim 2.0 +476 45 negative_sampler.num_negs_per_pos 39.0 +476 45 training.batch_size 2.0 +476 46 model.embedding_dim 1.0 +476 46 negative_sampler.num_negs_per_pos 50.0 +476 46 training.batch_size 2.0 +476 47 model.embedding_dim 2.0 +476 47 negative_sampler.num_negs_per_pos 1.0 +476 47 training.batch_size 2.0 +476 48 model.embedding_dim 0.0 +476 48 negative_sampler.num_negs_per_pos 30.0 +476 48 training.batch_size 0.0 +476 49 model.embedding_dim 0.0 +476 49 negative_sampler.num_negs_per_pos 38.0 +476 49 training.batch_size 2.0 +476 50 model.embedding_dim 0.0 +476 50 negative_sampler.num_negs_per_pos 67.0 +476 50 training.batch_size 0.0 +476 51 model.embedding_dim 2.0 +476 51 negative_sampler.num_negs_per_pos 78.0 +476 51 training.batch_size 0.0 +476 52 model.embedding_dim 1.0 +476 52 negative_sampler.num_negs_per_pos 22.0 +476 52 training.batch_size 1.0 +476 53 model.embedding_dim 2.0 +476 53 negative_sampler.num_negs_per_pos 34.0 +476 53 training.batch_size 1.0 +476 54 model.embedding_dim 1.0 +476 54 negative_sampler.num_negs_per_pos 26.0 +476 54 training.batch_size 2.0 +476 55 model.embedding_dim 0.0 +476 55 negative_sampler.num_negs_per_pos 74.0 +476 55 training.batch_size 1.0 +476 56 model.embedding_dim 2.0 +476 56 negative_sampler.num_negs_per_pos 35.0 +476 56 training.batch_size 1.0 +476 57 model.embedding_dim 0.0 +476 57 negative_sampler.num_negs_per_pos 93.0 +476 57 training.batch_size 1.0 +476 58 model.embedding_dim 1.0 +476 58 negative_sampler.num_negs_per_pos 72.0 +476 58 training.batch_size 0.0 +476 59 model.embedding_dim 0.0 +476 59 negative_sampler.num_negs_per_pos 74.0 +476 59 training.batch_size 0.0 +476 60 model.embedding_dim 0.0 +476 60 negative_sampler.num_negs_per_pos 36.0 +476 60 training.batch_size 1.0 +476 61 model.embedding_dim 1.0 +476 61 negative_sampler.num_negs_per_pos 75.0 +476 61 training.batch_size 1.0 +476 62 model.embedding_dim 1.0 +476 62 negative_sampler.num_negs_per_pos 11.0 +476 62 training.batch_size 0.0 +476 63 model.embedding_dim 0.0 +476 63 negative_sampler.num_negs_per_pos 58.0 +476 63 training.batch_size 0.0 +476 64 model.embedding_dim 0.0 +476 64 negative_sampler.num_negs_per_pos 11.0 +476 64 training.batch_size 2.0 +476 65 model.embedding_dim 2.0 +476 65 negative_sampler.num_negs_per_pos 22.0 +476 65 training.batch_size 0.0 +476 66 model.embedding_dim 1.0 +476 66 negative_sampler.num_negs_per_pos 50.0 +476 66 training.batch_size 2.0 +476 67 model.embedding_dim 0.0 +476 67 negative_sampler.num_negs_per_pos 28.0 +476 67 training.batch_size 1.0 +476 68 model.embedding_dim 0.0 +476 68 negative_sampler.num_negs_per_pos 55.0 +476 68 training.batch_size 1.0 +476 69 model.embedding_dim 1.0 +476 69 negative_sampler.num_negs_per_pos 18.0 +476 69 training.batch_size 0.0 +476 70 model.embedding_dim 2.0 +476 70 negative_sampler.num_negs_per_pos 97.0 +476 70 training.batch_size 1.0 +476 71 model.embedding_dim 1.0 +476 71 negative_sampler.num_negs_per_pos 22.0 +476 71 training.batch_size 0.0 +476 72 model.embedding_dim 1.0 +476 72 negative_sampler.num_negs_per_pos 61.0 +476 72 training.batch_size 1.0 +476 73 model.embedding_dim 0.0 +476 73 negative_sampler.num_negs_per_pos 20.0 +476 73 training.batch_size 2.0 +476 74 model.embedding_dim 2.0 +476 74 negative_sampler.num_negs_per_pos 46.0 +476 74 training.batch_size 1.0 +476 75 model.embedding_dim 2.0 +476 75 negative_sampler.num_negs_per_pos 55.0 +476 75 training.batch_size 2.0 +476 76 model.embedding_dim 2.0 +476 76 negative_sampler.num_negs_per_pos 87.0 +476 76 training.batch_size 2.0 +476 77 model.embedding_dim 0.0 +476 77 negative_sampler.num_negs_per_pos 68.0 +476 77 training.batch_size 2.0 +476 78 model.embedding_dim 2.0 +476 78 negative_sampler.num_negs_per_pos 58.0 +476 78 training.batch_size 0.0 +476 79 model.embedding_dim 2.0 +476 79 negative_sampler.num_negs_per_pos 77.0 +476 79 training.batch_size 1.0 +476 80 model.embedding_dim 0.0 +476 80 negative_sampler.num_negs_per_pos 2.0 +476 80 training.batch_size 0.0 +476 81 model.embedding_dim 1.0 +476 81 negative_sampler.num_negs_per_pos 92.0 +476 81 training.batch_size 2.0 +476 82 model.embedding_dim 0.0 +476 82 negative_sampler.num_negs_per_pos 85.0 +476 82 training.batch_size 0.0 +476 83 model.embedding_dim 2.0 +476 83 negative_sampler.num_negs_per_pos 0.0 +476 83 training.batch_size 0.0 +476 84 model.embedding_dim 2.0 +476 84 negative_sampler.num_negs_per_pos 90.0 +476 84 training.batch_size 2.0 +476 85 model.embedding_dim 2.0 +476 85 negative_sampler.num_negs_per_pos 96.0 +476 85 training.batch_size 0.0 +476 86 model.embedding_dim 2.0 +476 86 negative_sampler.num_negs_per_pos 10.0 +476 86 training.batch_size 0.0 +476 87 model.embedding_dim 0.0 +476 87 negative_sampler.num_negs_per_pos 82.0 +476 87 training.batch_size 1.0 +476 88 model.embedding_dim 0.0 +476 88 negative_sampler.num_negs_per_pos 86.0 +476 88 training.batch_size 1.0 +476 89 model.embedding_dim 2.0 +476 89 negative_sampler.num_negs_per_pos 41.0 +476 89 training.batch_size 1.0 +476 90 model.embedding_dim 0.0 +476 90 negative_sampler.num_negs_per_pos 5.0 +476 90 training.batch_size 2.0 +476 91 model.embedding_dim 1.0 +476 91 negative_sampler.num_negs_per_pos 88.0 +476 91 training.batch_size 1.0 +476 92 model.embedding_dim 0.0 +476 92 negative_sampler.num_negs_per_pos 11.0 +476 92 training.batch_size 1.0 +476 93 model.embedding_dim 1.0 +476 93 negative_sampler.num_negs_per_pos 91.0 +476 93 training.batch_size 0.0 +476 94 model.embedding_dim 2.0 +476 94 negative_sampler.num_negs_per_pos 57.0 +476 94 training.batch_size 2.0 +476 95 model.embedding_dim 2.0 +476 95 negative_sampler.num_negs_per_pos 44.0 +476 95 training.batch_size 0.0 +476 96 model.embedding_dim 2.0 +476 96 negative_sampler.num_negs_per_pos 58.0 +476 96 training.batch_size 1.0 +476 97 model.embedding_dim 2.0 +476 97 negative_sampler.num_negs_per_pos 26.0 +476 97 training.batch_size 2.0 +476 98 model.embedding_dim 2.0 +476 98 negative_sampler.num_negs_per_pos 54.0 +476 98 training.batch_size 0.0 +476 99 model.embedding_dim 0.0 +476 99 negative_sampler.num_negs_per_pos 6.0 +476 99 training.batch_size 1.0 +476 100 model.embedding_dim 1.0 +476 100 negative_sampler.num_negs_per_pos 45.0 +476 100 training.batch_size 1.0 +476 1 dataset """kinships""" +476 1 model """proje""" +476 1 loss """softplus""" +476 1 regularizer """no""" +476 1 optimizer """adadelta""" +476 1 training_loop """owa""" +476 1 negative_sampler """basic""" +476 1 evaluator """rankbased""" +476 2 dataset """kinships""" +476 2 model """proje""" +476 2 loss """softplus""" +476 2 regularizer """no""" +476 2 optimizer """adadelta""" +476 2 training_loop """owa""" +476 2 negative_sampler """basic""" +476 2 evaluator """rankbased""" +476 3 dataset """kinships""" +476 3 model """proje""" +476 3 loss """softplus""" +476 3 regularizer """no""" +476 3 optimizer """adadelta""" +476 3 training_loop """owa""" +476 3 negative_sampler """basic""" +476 3 evaluator """rankbased""" +476 4 dataset """kinships""" +476 4 model """proje""" +476 4 loss """softplus""" +476 4 regularizer """no""" +476 4 optimizer """adadelta""" +476 4 training_loop """owa""" +476 4 negative_sampler """basic""" +476 4 evaluator """rankbased""" +476 5 dataset """kinships""" +476 5 model """proje""" +476 5 loss """softplus""" +476 5 regularizer """no""" +476 5 optimizer """adadelta""" +476 5 training_loop """owa""" +476 5 negative_sampler """basic""" +476 5 evaluator """rankbased""" +476 6 dataset """kinships""" +476 6 model """proje""" +476 6 loss """softplus""" +476 6 regularizer """no""" +476 6 optimizer """adadelta""" +476 6 training_loop """owa""" +476 6 negative_sampler """basic""" +476 6 evaluator """rankbased""" +476 7 dataset """kinships""" +476 7 model """proje""" +476 7 loss """softplus""" +476 7 regularizer """no""" +476 7 optimizer """adadelta""" +476 7 training_loop """owa""" +476 7 negative_sampler """basic""" +476 7 evaluator """rankbased""" +476 8 dataset """kinships""" +476 8 model """proje""" +476 8 loss """softplus""" +476 8 regularizer """no""" +476 8 optimizer """adadelta""" +476 8 training_loop """owa""" +476 8 negative_sampler """basic""" +476 8 evaluator """rankbased""" +476 9 dataset """kinships""" +476 9 model """proje""" +476 9 loss """softplus""" +476 9 regularizer """no""" +476 9 optimizer """adadelta""" +476 9 training_loop """owa""" +476 9 negative_sampler """basic""" +476 9 evaluator """rankbased""" +476 10 dataset """kinships""" +476 10 model """proje""" +476 10 loss """softplus""" +476 10 regularizer """no""" +476 10 optimizer """adadelta""" +476 10 training_loop """owa""" +476 10 negative_sampler """basic""" +476 10 evaluator """rankbased""" +476 11 dataset """kinships""" +476 11 model """proje""" +476 11 loss """softplus""" +476 11 regularizer """no""" +476 11 optimizer """adadelta""" +476 11 training_loop """owa""" +476 11 negative_sampler """basic""" +476 11 evaluator """rankbased""" +476 12 dataset """kinships""" +476 12 model """proje""" +476 12 loss """softplus""" +476 12 regularizer """no""" +476 12 optimizer """adadelta""" +476 12 training_loop """owa""" +476 12 negative_sampler """basic""" +476 12 evaluator """rankbased""" +476 13 dataset """kinships""" +476 13 model """proje""" +476 13 loss """softplus""" +476 13 regularizer """no""" +476 13 optimizer """adadelta""" +476 13 training_loop """owa""" +476 13 negative_sampler """basic""" +476 13 evaluator """rankbased""" +476 14 dataset """kinships""" +476 14 model """proje""" +476 14 loss """softplus""" +476 14 regularizer """no""" +476 14 optimizer """adadelta""" +476 14 training_loop """owa""" +476 14 negative_sampler """basic""" +476 14 evaluator """rankbased""" +476 15 dataset """kinships""" +476 15 model """proje""" +476 15 loss """softplus""" +476 15 regularizer """no""" +476 15 optimizer """adadelta""" +476 15 training_loop """owa""" +476 15 negative_sampler """basic""" +476 15 evaluator """rankbased""" +476 16 dataset """kinships""" +476 16 model """proje""" +476 16 loss """softplus""" +476 16 regularizer """no""" +476 16 optimizer """adadelta""" +476 16 training_loop """owa""" +476 16 negative_sampler """basic""" +476 16 evaluator """rankbased""" +476 17 dataset """kinships""" +476 17 model """proje""" +476 17 loss """softplus""" +476 17 regularizer """no""" +476 17 optimizer """adadelta""" +476 17 training_loop """owa""" +476 17 negative_sampler """basic""" +476 17 evaluator """rankbased""" +476 18 dataset """kinships""" +476 18 model """proje""" +476 18 loss """softplus""" +476 18 regularizer """no""" +476 18 optimizer """adadelta""" +476 18 training_loop """owa""" +476 18 negative_sampler """basic""" +476 18 evaluator """rankbased""" +476 19 dataset """kinships""" +476 19 model """proje""" +476 19 loss """softplus""" +476 19 regularizer """no""" +476 19 optimizer """adadelta""" +476 19 training_loop """owa""" +476 19 negative_sampler """basic""" +476 19 evaluator """rankbased""" +476 20 dataset """kinships""" +476 20 model """proje""" +476 20 loss """softplus""" +476 20 regularizer """no""" +476 20 optimizer """adadelta""" +476 20 training_loop """owa""" +476 20 negative_sampler """basic""" +476 20 evaluator """rankbased""" +476 21 dataset """kinships""" +476 21 model """proje""" +476 21 loss """softplus""" +476 21 regularizer """no""" +476 21 optimizer """adadelta""" +476 21 training_loop """owa""" +476 21 negative_sampler """basic""" +476 21 evaluator """rankbased""" +476 22 dataset """kinships""" +476 22 model """proje""" +476 22 loss """softplus""" +476 22 regularizer """no""" +476 22 optimizer """adadelta""" +476 22 training_loop """owa""" +476 22 negative_sampler """basic""" +476 22 evaluator """rankbased""" +476 23 dataset """kinships""" +476 23 model """proje""" +476 23 loss """softplus""" +476 23 regularizer """no""" +476 23 optimizer """adadelta""" +476 23 training_loop """owa""" +476 23 negative_sampler """basic""" +476 23 evaluator """rankbased""" +476 24 dataset """kinships""" +476 24 model """proje""" +476 24 loss """softplus""" +476 24 regularizer """no""" +476 24 optimizer """adadelta""" +476 24 training_loop """owa""" +476 24 negative_sampler """basic""" +476 24 evaluator """rankbased""" +476 25 dataset """kinships""" +476 25 model """proje""" +476 25 loss """softplus""" +476 25 regularizer """no""" +476 25 optimizer """adadelta""" +476 25 training_loop """owa""" +476 25 negative_sampler """basic""" +476 25 evaluator """rankbased""" +476 26 dataset """kinships""" +476 26 model """proje""" +476 26 loss """softplus""" +476 26 regularizer """no""" +476 26 optimizer """adadelta""" +476 26 training_loop """owa""" +476 26 negative_sampler """basic""" +476 26 evaluator """rankbased""" +476 27 dataset """kinships""" +476 27 model """proje""" +476 27 loss """softplus""" +476 27 regularizer """no""" +476 27 optimizer """adadelta""" +476 27 training_loop """owa""" +476 27 negative_sampler """basic""" +476 27 evaluator """rankbased""" +476 28 dataset """kinships""" +476 28 model """proje""" +476 28 loss """softplus""" +476 28 regularizer """no""" +476 28 optimizer """adadelta""" +476 28 training_loop """owa""" +476 28 negative_sampler """basic""" +476 28 evaluator """rankbased""" +476 29 dataset """kinships""" +476 29 model """proje""" +476 29 loss """softplus""" +476 29 regularizer """no""" +476 29 optimizer """adadelta""" +476 29 training_loop """owa""" +476 29 negative_sampler """basic""" +476 29 evaluator """rankbased""" +476 30 dataset """kinships""" +476 30 model """proje""" +476 30 loss """softplus""" +476 30 regularizer """no""" +476 30 optimizer """adadelta""" +476 30 training_loop """owa""" +476 30 negative_sampler """basic""" +476 30 evaluator """rankbased""" +476 31 dataset """kinships""" +476 31 model """proje""" +476 31 loss """softplus""" +476 31 regularizer """no""" +476 31 optimizer """adadelta""" +476 31 training_loop """owa""" +476 31 negative_sampler """basic""" +476 31 evaluator """rankbased""" +476 32 dataset """kinships""" +476 32 model """proje""" +476 32 loss """softplus""" +476 32 regularizer """no""" +476 32 optimizer """adadelta""" +476 32 training_loop """owa""" +476 32 negative_sampler """basic""" +476 32 evaluator """rankbased""" +476 33 dataset """kinships""" +476 33 model """proje""" +476 33 loss """softplus""" +476 33 regularizer """no""" +476 33 optimizer """adadelta""" +476 33 training_loop """owa""" +476 33 negative_sampler """basic""" +476 33 evaluator """rankbased""" +476 34 dataset """kinships""" +476 34 model """proje""" +476 34 loss """softplus""" +476 34 regularizer """no""" +476 34 optimizer """adadelta""" +476 34 training_loop """owa""" +476 34 negative_sampler """basic""" +476 34 evaluator """rankbased""" +476 35 dataset """kinships""" +476 35 model """proje""" +476 35 loss """softplus""" +476 35 regularizer """no""" +476 35 optimizer """adadelta""" +476 35 training_loop """owa""" +476 35 negative_sampler """basic""" +476 35 evaluator """rankbased""" +476 36 dataset """kinships""" +476 36 model """proje""" +476 36 loss """softplus""" +476 36 regularizer """no""" +476 36 optimizer """adadelta""" +476 36 training_loop """owa""" +476 36 negative_sampler """basic""" +476 36 evaluator """rankbased""" +476 37 dataset """kinships""" +476 37 model """proje""" +476 37 loss """softplus""" +476 37 regularizer """no""" +476 37 optimizer """adadelta""" +476 37 training_loop """owa""" +476 37 negative_sampler """basic""" +476 37 evaluator """rankbased""" +476 38 dataset """kinships""" +476 38 model """proje""" +476 38 loss """softplus""" +476 38 regularizer """no""" +476 38 optimizer """adadelta""" +476 38 training_loop """owa""" +476 38 negative_sampler """basic""" +476 38 evaluator """rankbased""" +476 39 dataset """kinships""" +476 39 model """proje""" +476 39 loss """softplus""" +476 39 regularizer """no""" +476 39 optimizer """adadelta""" +476 39 training_loop """owa""" +476 39 negative_sampler """basic""" +476 39 evaluator """rankbased""" +476 40 dataset """kinships""" +476 40 model """proje""" +476 40 loss """softplus""" +476 40 regularizer """no""" +476 40 optimizer """adadelta""" +476 40 training_loop """owa""" +476 40 negative_sampler """basic""" +476 40 evaluator """rankbased""" +476 41 dataset """kinships""" +476 41 model """proje""" +476 41 loss """softplus""" +476 41 regularizer """no""" +476 41 optimizer """adadelta""" +476 41 training_loop """owa""" +476 41 negative_sampler """basic""" +476 41 evaluator """rankbased""" +476 42 dataset """kinships""" +476 42 model """proje""" +476 42 loss """softplus""" +476 42 regularizer """no""" +476 42 optimizer """adadelta""" +476 42 training_loop """owa""" +476 42 negative_sampler """basic""" +476 42 evaluator """rankbased""" +476 43 dataset """kinships""" +476 43 model """proje""" +476 43 loss """softplus""" +476 43 regularizer """no""" +476 43 optimizer """adadelta""" +476 43 training_loop """owa""" +476 43 negative_sampler """basic""" +476 43 evaluator """rankbased""" +476 44 dataset """kinships""" +476 44 model """proje""" +476 44 loss """softplus""" +476 44 regularizer """no""" +476 44 optimizer """adadelta""" +476 44 training_loop """owa""" +476 44 negative_sampler """basic""" +476 44 evaluator """rankbased""" +476 45 dataset """kinships""" +476 45 model """proje""" +476 45 loss """softplus""" +476 45 regularizer """no""" +476 45 optimizer """adadelta""" +476 45 training_loop """owa""" +476 45 negative_sampler """basic""" +476 45 evaluator """rankbased""" +476 46 dataset """kinships""" +476 46 model """proje""" +476 46 loss """softplus""" +476 46 regularizer """no""" +476 46 optimizer """adadelta""" +476 46 training_loop """owa""" +476 46 negative_sampler """basic""" +476 46 evaluator """rankbased""" +476 47 dataset """kinships""" +476 47 model """proje""" +476 47 loss """softplus""" +476 47 regularizer """no""" +476 47 optimizer """adadelta""" +476 47 training_loop """owa""" +476 47 negative_sampler """basic""" +476 47 evaluator """rankbased""" +476 48 dataset """kinships""" +476 48 model """proje""" +476 48 loss """softplus""" +476 48 regularizer """no""" +476 48 optimizer """adadelta""" +476 48 training_loop """owa""" +476 48 negative_sampler """basic""" +476 48 evaluator """rankbased""" +476 49 dataset """kinships""" +476 49 model """proje""" +476 49 loss """softplus""" +476 49 regularizer """no""" +476 49 optimizer """adadelta""" +476 49 training_loop """owa""" +476 49 negative_sampler """basic""" +476 49 evaluator """rankbased""" +476 50 dataset """kinships""" +476 50 model """proje""" +476 50 loss """softplus""" +476 50 regularizer """no""" +476 50 optimizer """adadelta""" +476 50 training_loop """owa""" +476 50 negative_sampler """basic""" +476 50 evaluator """rankbased""" +476 51 dataset """kinships""" +476 51 model """proje""" +476 51 loss """softplus""" +476 51 regularizer """no""" +476 51 optimizer """adadelta""" +476 51 training_loop """owa""" +476 51 negative_sampler """basic""" +476 51 evaluator """rankbased""" +476 52 dataset """kinships""" +476 52 model """proje""" +476 52 loss """softplus""" +476 52 regularizer """no""" +476 52 optimizer """adadelta""" +476 52 training_loop """owa""" +476 52 negative_sampler """basic""" +476 52 evaluator """rankbased""" +476 53 dataset """kinships""" +476 53 model """proje""" +476 53 loss """softplus""" +476 53 regularizer """no""" +476 53 optimizer """adadelta""" +476 53 training_loop """owa""" +476 53 negative_sampler """basic""" +476 53 evaluator """rankbased""" +476 54 dataset """kinships""" +476 54 model """proje""" +476 54 loss """softplus""" +476 54 regularizer """no""" +476 54 optimizer """adadelta""" +476 54 training_loop """owa""" +476 54 negative_sampler """basic""" +476 54 evaluator """rankbased""" +476 55 dataset """kinships""" +476 55 model """proje""" +476 55 loss """softplus""" +476 55 regularizer """no""" +476 55 optimizer """adadelta""" +476 55 training_loop """owa""" +476 55 negative_sampler """basic""" +476 55 evaluator """rankbased""" +476 56 dataset """kinships""" +476 56 model """proje""" +476 56 loss """softplus""" +476 56 regularizer """no""" +476 56 optimizer """adadelta""" +476 56 training_loop """owa""" +476 56 negative_sampler """basic""" +476 56 evaluator """rankbased""" +476 57 dataset """kinships""" +476 57 model """proje""" +476 57 loss """softplus""" +476 57 regularizer """no""" +476 57 optimizer """adadelta""" +476 57 training_loop """owa""" +476 57 negative_sampler """basic""" +476 57 evaluator """rankbased""" +476 58 dataset """kinships""" +476 58 model """proje""" +476 58 loss """softplus""" +476 58 regularizer """no""" +476 58 optimizer """adadelta""" +476 58 training_loop """owa""" +476 58 negative_sampler """basic""" +476 58 evaluator """rankbased""" +476 59 dataset """kinships""" +476 59 model """proje""" +476 59 loss """softplus""" +476 59 regularizer """no""" +476 59 optimizer """adadelta""" +476 59 training_loop """owa""" +476 59 negative_sampler """basic""" +476 59 evaluator """rankbased""" +476 60 dataset """kinships""" +476 60 model """proje""" +476 60 loss """softplus""" +476 60 regularizer """no""" +476 60 optimizer """adadelta""" +476 60 training_loop """owa""" +476 60 negative_sampler """basic""" +476 60 evaluator """rankbased""" +476 61 dataset """kinships""" +476 61 model """proje""" +476 61 loss """softplus""" +476 61 regularizer """no""" +476 61 optimizer """adadelta""" +476 61 training_loop """owa""" +476 61 negative_sampler """basic""" +476 61 evaluator """rankbased""" +476 62 dataset """kinships""" +476 62 model """proje""" +476 62 loss """softplus""" +476 62 regularizer """no""" +476 62 optimizer """adadelta""" +476 62 training_loop """owa""" +476 62 negative_sampler """basic""" +476 62 evaluator """rankbased""" +476 63 dataset """kinships""" +476 63 model """proje""" +476 63 loss """softplus""" +476 63 regularizer """no""" +476 63 optimizer """adadelta""" +476 63 training_loop """owa""" +476 63 negative_sampler """basic""" +476 63 evaluator """rankbased""" +476 64 dataset """kinships""" +476 64 model """proje""" +476 64 loss """softplus""" +476 64 regularizer """no""" +476 64 optimizer """adadelta""" +476 64 training_loop """owa""" +476 64 negative_sampler """basic""" +476 64 evaluator """rankbased""" +476 65 dataset """kinships""" +476 65 model """proje""" +476 65 loss """softplus""" +476 65 regularizer """no""" +476 65 optimizer """adadelta""" +476 65 training_loop """owa""" +476 65 negative_sampler """basic""" +476 65 evaluator """rankbased""" +476 66 dataset """kinships""" +476 66 model """proje""" +476 66 loss """softplus""" +476 66 regularizer """no""" +476 66 optimizer """adadelta""" +476 66 training_loop """owa""" +476 66 negative_sampler """basic""" +476 66 evaluator """rankbased""" +476 67 dataset """kinships""" +476 67 model """proje""" +476 67 loss """softplus""" +476 67 regularizer """no""" +476 67 optimizer """adadelta""" +476 67 training_loop """owa""" +476 67 negative_sampler """basic""" +476 67 evaluator """rankbased""" +476 68 dataset """kinships""" +476 68 model """proje""" +476 68 loss """softplus""" +476 68 regularizer """no""" +476 68 optimizer """adadelta""" +476 68 training_loop """owa""" +476 68 negative_sampler """basic""" +476 68 evaluator """rankbased""" +476 69 dataset """kinships""" +476 69 model """proje""" +476 69 loss """softplus""" +476 69 regularizer """no""" +476 69 optimizer """adadelta""" +476 69 training_loop """owa""" +476 69 negative_sampler """basic""" +476 69 evaluator """rankbased""" +476 70 dataset """kinships""" +476 70 model """proje""" +476 70 loss """softplus""" +476 70 regularizer """no""" +476 70 optimizer """adadelta""" +476 70 training_loop """owa""" +476 70 negative_sampler """basic""" +476 70 evaluator """rankbased""" +476 71 dataset """kinships""" +476 71 model """proje""" +476 71 loss """softplus""" +476 71 regularizer """no""" +476 71 optimizer """adadelta""" +476 71 training_loop """owa""" +476 71 negative_sampler """basic""" +476 71 evaluator """rankbased""" +476 72 dataset """kinships""" +476 72 model """proje""" +476 72 loss """softplus""" +476 72 regularizer """no""" +476 72 optimizer """adadelta""" +476 72 training_loop """owa""" +476 72 negative_sampler """basic""" +476 72 evaluator """rankbased""" +476 73 dataset """kinships""" +476 73 model """proje""" +476 73 loss """softplus""" +476 73 regularizer """no""" +476 73 optimizer """adadelta""" +476 73 training_loop """owa""" +476 73 negative_sampler """basic""" +476 73 evaluator """rankbased""" +476 74 dataset """kinships""" +476 74 model """proje""" +476 74 loss """softplus""" +476 74 regularizer """no""" +476 74 optimizer """adadelta""" +476 74 training_loop """owa""" +476 74 negative_sampler """basic""" +476 74 evaluator """rankbased""" +476 75 dataset """kinships""" +476 75 model """proje""" +476 75 loss """softplus""" +476 75 regularizer """no""" +476 75 optimizer """adadelta""" +476 75 training_loop """owa""" +476 75 negative_sampler """basic""" +476 75 evaluator """rankbased""" +476 76 dataset """kinships""" +476 76 model """proje""" +476 76 loss """softplus""" +476 76 regularizer """no""" +476 76 optimizer """adadelta""" +476 76 training_loop """owa""" +476 76 negative_sampler """basic""" +476 76 evaluator """rankbased""" +476 77 dataset """kinships""" +476 77 model """proje""" +476 77 loss """softplus""" +476 77 regularizer """no""" +476 77 optimizer """adadelta""" +476 77 training_loop """owa""" +476 77 negative_sampler """basic""" +476 77 evaluator """rankbased""" +476 78 dataset """kinships""" +476 78 model """proje""" +476 78 loss """softplus""" +476 78 regularizer """no""" +476 78 optimizer """adadelta""" +476 78 training_loop """owa""" +476 78 negative_sampler """basic""" +476 78 evaluator """rankbased""" +476 79 dataset """kinships""" +476 79 model """proje""" +476 79 loss """softplus""" +476 79 regularizer """no""" +476 79 optimizer """adadelta""" +476 79 training_loop """owa""" +476 79 negative_sampler """basic""" +476 79 evaluator """rankbased""" +476 80 dataset """kinships""" +476 80 model """proje""" +476 80 loss """softplus""" +476 80 regularizer """no""" +476 80 optimizer """adadelta""" +476 80 training_loop """owa""" +476 80 negative_sampler """basic""" +476 80 evaluator """rankbased""" +476 81 dataset """kinships""" +476 81 model """proje""" +476 81 loss """softplus""" +476 81 regularizer """no""" +476 81 optimizer """adadelta""" +476 81 training_loop """owa""" +476 81 negative_sampler """basic""" +476 81 evaluator """rankbased""" +476 82 dataset """kinships""" +476 82 model """proje""" +476 82 loss """softplus""" +476 82 regularizer """no""" +476 82 optimizer """adadelta""" +476 82 training_loop """owa""" +476 82 negative_sampler """basic""" +476 82 evaluator """rankbased""" +476 83 dataset """kinships""" +476 83 model """proje""" +476 83 loss """softplus""" +476 83 regularizer """no""" +476 83 optimizer """adadelta""" +476 83 training_loop """owa""" +476 83 negative_sampler """basic""" +476 83 evaluator """rankbased""" +476 84 dataset """kinships""" +476 84 model """proje""" +476 84 loss """softplus""" +476 84 regularizer """no""" +476 84 optimizer """adadelta""" +476 84 training_loop """owa""" +476 84 negative_sampler """basic""" +476 84 evaluator """rankbased""" +476 85 dataset """kinships""" +476 85 model """proje""" +476 85 loss """softplus""" +476 85 regularizer """no""" +476 85 optimizer """adadelta""" +476 85 training_loop """owa""" +476 85 negative_sampler """basic""" +476 85 evaluator """rankbased""" +476 86 dataset """kinships""" +476 86 model """proje""" +476 86 loss """softplus""" +476 86 regularizer """no""" +476 86 optimizer """adadelta""" +476 86 training_loop """owa""" +476 86 negative_sampler """basic""" +476 86 evaluator """rankbased""" +476 87 dataset """kinships""" +476 87 model """proje""" +476 87 loss """softplus""" +476 87 regularizer """no""" +476 87 optimizer """adadelta""" +476 87 training_loop """owa""" +476 87 negative_sampler """basic""" +476 87 evaluator """rankbased""" +476 88 dataset """kinships""" +476 88 model """proje""" +476 88 loss """softplus""" +476 88 regularizer """no""" +476 88 optimizer """adadelta""" +476 88 training_loop """owa""" +476 88 negative_sampler """basic""" +476 88 evaluator """rankbased""" +476 89 dataset """kinships""" +476 89 model """proje""" +476 89 loss """softplus""" +476 89 regularizer """no""" +476 89 optimizer """adadelta""" +476 89 training_loop """owa""" +476 89 negative_sampler """basic""" +476 89 evaluator """rankbased""" +476 90 dataset """kinships""" +476 90 model """proje""" +476 90 loss """softplus""" +476 90 regularizer """no""" +476 90 optimizer """adadelta""" +476 90 training_loop """owa""" +476 90 negative_sampler """basic""" +476 90 evaluator """rankbased""" +476 91 dataset """kinships""" +476 91 model """proje""" +476 91 loss """softplus""" +476 91 regularizer """no""" +476 91 optimizer """adadelta""" +476 91 training_loop """owa""" +476 91 negative_sampler """basic""" +476 91 evaluator """rankbased""" +476 92 dataset """kinships""" +476 92 model """proje""" +476 92 loss """softplus""" +476 92 regularizer """no""" +476 92 optimizer """adadelta""" +476 92 training_loop """owa""" +476 92 negative_sampler """basic""" +476 92 evaluator """rankbased""" +476 93 dataset """kinships""" +476 93 model """proje""" +476 93 loss """softplus""" +476 93 regularizer """no""" +476 93 optimizer """adadelta""" +476 93 training_loop """owa""" +476 93 negative_sampler """basic""" +476 93 evaluator """rankbased""" +476 94 dataset """kinships""" +476 94 model """proje""" +476 94 loss """softplus""" +476 94 regularizer """no""" +476 94 optimizer """adadelta""" +476 94 training_loop """owa""" +476 94 negative_sampler """basic""" +476 94 evaluator """rankbased""" +476 95 dataset """kinships""" +476 95 model """proje""" +476 95 loss """softplus""" +476 95 regularizer """no""" +476 95 optimizer """adadelta""" +476 95 training_loop """owa""" +476 95 negative_sampler """basic""" +476 95 evaluator """rankbased""" +476 96 dataset """kinships""" +476 96 model """proje""" +476 96 loss """softplus""" +476 96 regularizer """no""" +476 96 optimizer """adadelta""" +476 96 training_loop """owa""" +476 96 negative_sampler """basic""" +476 96 evaluator """rankbased""" +476 97 dataset """kinships""" +476 97 model """proje""" +476 97 loss """softplus""" +476 97 regularizer """no""" +476 97 optimizer """adadelta""" +476 97 training_loop """owa""" +476 97 negative_sampler """basic""" +476 97 evaluator """rankbased""" +476 98 dataset """kinships""" +476 98 model """proje""" +476 98 loss """softplus""" +476 98 regularizer """no""" +476 98 optimizer """adadelta""" +476 98 training_loop """owa""" +476 98 negative_sampler """basic""" +476 98 evaluator """rankbased""" +476 99 dataset """kinships""" +476 99 model """proje""" +476 99 loss """softplus""" +476 99 regularizer """no""" +476 99 optimizer """adadelta""" +476 99 training_loop """owa""" +476 99 negative_sampler """basic""" +476 99 evaluator """rankbased""" +476 100 dataset """kinships""" +476 100 model """proje""" +476 100 loss """softplus""" +476 100 regularizer """no""" +476 100 optimizer """adadelta""" +476 100 training_loop """owa""" +476 100 negative_sampler """basic""" +476 100 evaluator """rankbased""" +477 1 model.embedding_dim 1.0 +477 1 loss.margin 21.680337982306057 +477 1 loss.adversarial_temperature 0.49280320515532905 +477 1 negative_sampler.num_negs_per_pos 99.0 +477 1 training.batch_size 1.0 +477 2 model.embedding_dim 1.0 +477 2 loss.margin 16.313921437877305 +477 2 loss.adversarial_temperature 0.14135709860170417 +477 2 negative_sampler.num_negs_per_pos 63.0 +477 2 training.batch_size 0.0 +477 3 model.embedding_dim 2.0 +477 3 loss.margin 15.118368643956124 +477 3 loss.adversarial_temperature 0.8264093764453784 +477 3 negative_sampler.num_negs_per_pos 30.0 +477 3 training.batch_size 1.0 +477 4 model.embedding_dim 2.0 +477 4 loss.margin 16.57021749545895 +477 4 loss.adversarial_temperature 0.4296206748828151 +477 4 negative_sampler.num_negs_per_pos 29.0 +477 4 training.batch_size 2.0 +477 5 model.embedding_dim 1.0 +477 5 loss.margin 17.108942257945323 +477 5 loss.adversarial_temperature 0.1560597708769773 +477 5 negative_sampler.num_negs_per_pos 29.0 +477 5 training.batch_size 0.0 +477 6 model.embedding_dim 2.0 +477 6 loss.margin 4.005409306846406 +477 6 loss.adversarial_temperature 0.5879066665668572 +477 6 negative_sampler.num_negs_per_pos 43.0 +477 6 training.batch_size 0.0 +477 7 model.embedding_dim 0.0 +477 7 loss.margin 11.116775187519004 +477 7 loss.adversarial_temperature 0.3849252888030005 +477 7 negative_sampler.num_negs_per_pos 98.0 +477 7 training.batch_size 1.0 +477 8 model.embedding_dim 0.0 +477 8 loss.margin 22.03582374382495 +477 8 loss.adversarial_temperature 0.570334743230942 +477 8 negative_sampler.num_negs_per_pos 83.0 +477 8 training.batch_size 1.0 +477 9 model.embedding_dim 0.0 +477 9 loss.margin 20.67141020243208 +477 9 loss.adversarial_temperature 0.6772789859018564 +477 9 negative_sampler.num_negs_per_pos 90.0 +477 9 training.batch_size 2.0 +477 10 model.embedding_dim 0.0 +477 10 loss.margin 26.70233597292949 +477 10 loss.adversarial_temperature 0.8080499931393619 +477 10 negative_sampler.num_negs_per_pos 29.0 +477 10 training.batch_size 2.0 +477 11 model.embedding_dim 2.0 +477 11 loss.margin 6.465418692749822 +477 11 loss.adversarial_temperature 0.8565084564770937 +477 11 negative_sampler.num_negs_per_pos 15.0 +477 11 training.batch_size 1.0 +477 12 model.embedding_dim 1.0 +477 12 loss.margin 27.14454362595265 +477 12 loss.adversarial_temperature 0.6770035468793923 +477 12 negative_sampler.num_negs_per_pos 12.0 +477 12 training.batch_size 0.0 +477 13 model.embedding_dim 1.0 +477 13 loss.margin 22.58269870106025 +477 13 loss.adversarial_temperature 0.17799253079772523 +477 13 negative_sampler.num_negs_per_pos 83.0 +477 13 training.batch_size 1.0 +477 14 model.embedding_dim 0.0 +477 14 loss.margin 4.431776311735216 +477 14 loss.adversarial_temperature 0.364280879600259 +477 14 negative_sampler.num_negs_per_pos 63.0 +477 14 training.batch_size 2.0 +477 15 model.embedding_dim 1.0 +477 15 loss.margin 9.70047195392275 +477 15 loss.adversarial_temperature 0.9406528995973504 +477 15 negative_sampler.num_negs_per_pos 18.0 +477 15 training.batch_size 2.0 +477 16 model.embedding_dim 1.0 +477 16 loss.margin 29.507939540363527 +477 16 loss.adversarial_temperature 0.5094214543436404 +477 16 negative_sampler.num_negs_per_pos 36.0 +477 16 training.batch_size 0.0 +477 17 model.embedding_dim 0.0 +477 17 loss.margin 9.494775738519133 +477 17 loss.adversarial_temperature 0.1497950726094024 +477 17 negative_sampler.num_negs_per_pos 89.0 +477 17 training.batch_size 0.0 +477 18 model.embedding_dim 2.0 +477 18 loss.margin 18.097088803010443 +477 18 loss.adversarial_temperature 0.1078755530525507 +477 18 negative_sampler.num_negs_per_pos 42.0 +477 18 training.batch_size 0.0 +477 19 model.embedding_dim 0.0 +477 19 loss.margin 7.621300198076478 +477 19 loss.adversarial_temperature 0.46712099693433917 +477 19 negative_sampler.num_negs_per_pos 3.0 +477 19 training.batch_size 2.0 +477 20 model.embedding_dim 0.0 +477 20 loss.margin 12.051572130367944 +477 20 loss.adversarial_temperature 0.46172960693394527 +477 20 negative_sampler.num_negs_per_pos 99.0 +477 20 training.batch_size 1.0 +477 21 model.embedding_dim 0.0 +477 21 loss.margin 9.428225646888546 +477 21 loss.adversarial_temperature 0.31784929505459736 +477 21 negative_sampler.num_negs_per_pos 24.0 +477 21 training.batch_size 0.0 +477 22 model.embedding_dim 0.0 +477 22 loss.margin 19.679872709487512 +477 22 loss.adversarial_temperature 0.5291631960759559 +477 22 negative_sampler.num_negs_per_pos 89.0 +477 22 training.batch_size 0.0 +477 23 model.embedding_dim 2.0 +477 23 loss.margin 25.442025365342214 +477 23 loss.adversarial_temperature 0.29419457565532436 +477 23 negative_sampler.num_negs_per_pos 95.0 +477 23 training.batch_size 2.0 +477 24 model.embedding_dim 2.0 +477 24 loss.margin 7.166108496257768 +477 24 loss.adversarial_temperature 0.1372868115362188 +477 24 negative_sampler.num_negs_per_pos 3.0 +477 24 training.batch_size 1.0 +477 25 model.embedding_dim 2.0 +477 25 loss.margin 27.031849670560742 +477 25 loss.adversarial_temperature 0.310178966967404 +477 25 negative_sampler.num_negs_per_pos 98.0 +477 25 training.batch_size 2.0 +477 26 model.embedding_dim 2.0 +477 26 loss.margin 17.453742704794706 +477 26 loss.adversarial_temperature 0.19833224097048957 +477 26 negative_sampler.num_negs_per_pos 36.0 +477 26 training.batch_size 0.0 +477 27 model.embedding_dim 0.0 +477 27 loss.margin 23.698987919620112 +477 27 loss.adversarial_temperature 0.5101729319681866 +477 27 negative_sampler.num_negs_per_pos 50.0 +477 27 training.batch_size 2.0 +477 28 model.embedding_dim 2.0 +477 28 loss.margin 15.239998341086238 +477 28 loss.adversarial_temperature 0.5336435208213579 +477 28 negative_sampler.num_negs_per_pos 89.0 +477 28 training.batch_size 2.0 +477 29 model.embedding_dim 1.0 +477 29 loss.margin 20.262524251785514 +477 29 loss.adversarial_temperature 0.6042171345452616 +477 29 negative_sampler.num_negs_per_pos 63.0 +477 29 training.batch_size 0.0 +477 30 model.embedding_dim 2.0 +477 30 loss.margin 26.197474968718023 +477 30 loss.adversarial_temperature 0.29428351332155467 +477 30 negative_sampler.num_negs_per_pos 37.0 +477 30 training.batch_size 2.0 +477 31 model.embedding_dim 1.0 +477 31 loss.margin 19.235687713413355 +477 31 loss.adversarial_temperature 0.6360555793759255 +477 31 negative_sampler.num_negs_per_pos 37.0 +477 31 training.batch_size 0.0 +477 32 model.embedding_dim 2.0 +477 32 loss.margin 13.69817394664342 +477 32 loss.adversarial_temperature 0.8806091280410203 +477 32 negative_sampler.num_negs_per_pos 56.0 +477 32 training.batch_size 2.0 +477 33 model.embedding_dim 1.0 +477 33 loss.margin 26.83302961791926 +477 33 loss.adversarial_temperature 0.7890575473775473 +477 33 negative_sampler.num_negs_per_pos 39.0 +477 33 training.batch_size 0.0 +477 34 model.embedding_dim 2.0 +477 34 loss.margin 26.717573930295895 +477 34 loss.adversarial_temperature 0.6420671747406848 +477 34 negative_sampler.num_negs_per_pos 57.0 +477 34 training.batch_size 0.0 +477 35 model.embedding_dim 0.0 +477 35 loss.margin 24.61624261719973 +477 35 loss.adversarial_temperature 0.7775571592169135 +477 35 negative_sampler.num_negs_per_pos 1.0 +477 35 training.batch_size 1.0 +477 36 model.embedding_dim 1.0 +477 36 loss.margin 10.352655435065572 +477 36 loss.adversarial_temperature 0.21475262005881718 +477 36 negative_sampler.num_negs_per_pos 41.0 +477 36 training.batch_size 0.0 +477 37 model.embedding_dim 1.0 +477 37 loss.margin 13.754820408653554 +477 37 loss.adversarial_temperature 0.5718623759882486 +477 37 negative_sampler.num_negs_per_pos 10.0 +477 37 training.batch_size 0.0 +477 38 model.embedding_dim 2.0 +477 38 loss.margin 24.33078204449012 +477 38 loss.adversarial_temperature 0.2116407819763907 +477 38 negative_sampler.num_negs_per_pos 75.0 +477 38 training.batch_size 0.0 +477 39 model.embedding_dim 2.0 +477 39 loss.margin 16.54551698130672 +477 39 loss.adversarial_temperature 0.6524480795220842 +477 39 negative_sampler.num_negs_per_pos 43.0 +477 39 training.batch_size 1.0 +477 40 model.embedding_dim 1.0 +477 40 loss.margin 11.02751967480454 +477 40 loss.adversarial_temperature 0.32740131064090416 +477 40 negative_sampler.num_negs_per_pos 63.0 +477 40 training.batch_size 1.0 +477 41 model.embedding_dim 0.0 +477 41 loss.margin 1.1992529960414287 +477 41 loss.adversarial_temperature 0.11827973073765344 +477 41 negative_sampler.num_negs_per_pos 62.0 +477 41 training.batch_size 2.0 +477 42 model.embedding_dim 0.0 +477 42 loss.margin 26.482011849650156 +477 42 loss.adversarial_temperature 0.22285878642824813 +477 42 negative_sampler.num_negs_per_pos 29.0 +477 42 training.batch_size 0.0 +477 43 model.embedding_dim 0.0 +477 43 loss.margin 28.788771118582634 +477 43 loss.adversarial_temperature 0.9591671274257937 +477 43 negative_sampler.num_negs_per_pos 30.0 +477 43 training.batch_size 2.0 +477 44 model.embedding_dim 0.0 +477 44 loss.margin 26.319423778445174 +477 44 loss.adversarial_temperature 0.2657922042449835 +477 44 negative_sampler.num_negs_per_pos 21.0 +477 44 training.batch_size 1.0 +477 45 model.embedding_dim 1.0 +477 45 loss.margin 5.096670816446821 +477 45 loss.adversarial_temperature 0.5085284972311386 +477 45 negative_sampler.num_negs_per_pos 76.0 +477 45 training.batch_size 1.0 +477 46 model.embedding_dim 0.0 +477 46 loss.margin 20.316246036535397 +477 46 loss.adversarial_temperature 0.13897345017355067 +477 46 negative_sampler.num_negs_per_pos 89.0 +477 46 training.batch_size 1.0 +477 47 model.embedding_dim 2.0 +477 47 loss.margin 12.51062565173746 +477 47 loss.adversarial_temperature 0.846779901049248 +477 47 negative_sampler.num_negs_per_pos 38.0 +477 47 training.batch_size 2.0 +477 48 model.embedding_dim 1.0 +477 48 loss.margin 26.489612275472975 +477 48 loss.adversarial_temperature 0.5208955065634648 +477 48 negative_sampler.num_negs_per_pos 46.0 +477 48 training.batch_size 2.0 +477 49 model.embedding_dim 2.0 +477 49 loss.margin 22.058821731704356 +477 49 loss.adversarial_temperature 0.6474464730048638 +477 49 negative_sampler.num_negs_per_pos 30.0 +477 49 training.batch_size 2.0 +477 50 model.embedding_dim 0.0 +477 50 loss.margin 17.32984102092984 +477 50 loss.adversarial_temperature 0.7929094236274077 +477 50 negative_sampler.num_negs_per_pos 39.0 +477 50 training.batch_size 0.0 +477 51 model.embedding_dim 0.0 +477 51 loss.margin 18.028627431625612 +477 51 loss.adversarial_temperature 0.3130082554113166 +477 51 negative_sampler.num_negs_per_pos 35.0 +477 51 training.batch_size 1.0 +477 52 model.embedding_dim 2.0 +477 52 loss.margin 11.644898060716843 +477 52 loss.adversarial_temperature 0.12628508052347473 +477 52 negative_sampler.num_negs_per_pos 51.0 +477 52 training.batch_size 2.0 +477 53 model.embedding_dim 1.0 +477 53 loss.margin 19.880107305032436 +477 53 loss.adversarial_temperature 0.8943340365711485 +477 53 negative_sampler.num_negs_per_pos 55.0 +477 53 training.batch_size 1.0 +477 54 model.embedding_dim 0.0 +477 54 loss.margin 5.324650795941077 +477 54 loss.adversarial_temperature 0.8125889465767006 +477 54 negative_sampler.num_negs_per_pos 57.0 +477 54 training.batch_size 1.0 +477 55 model.embedding_dim 0.0 +477 55 loss.margin 20.97638074885399 +477 55 loss.adversarial_temperature 0.6162437440927622 +477 55 negative_sampler.num_negs_per_pos 58.0 +477 55 training.batch_size 1.0 +477 56 model.embedding_dim 0.0 +477 56 loss.margin 5.078682684598967 +477 56 loss.adversarial_temperature 0.8773691062258809 +477 56 negative_sampler.num_negs_per_pos 65.0 +477 56 training.batch_size 2.0 +477 57 model.embedding_dim 0.0 +477 57 loss.margin 9.755409569129586 +477 57 loss.adversarial_temperature 0.9730794468612665 +477 57 negative_sampler.num_negs_per_pos 71.0 +477 57 training.batch_size 1.0 +477 58 model.embedding_dim 1.0 +477 58 loss.margin 29.701269522103768 +477 58 loss.adversarial_temperature 0.3420156536811586 +477 58 negative_sampler.num_negs_per_pos 91.0 +477 58 training.batch_size 0.0 +477 59 model.embedding_dim 0.0 +477 59 loss.margin 1.9004490881357503 +477 59 loss.adversarial_temperature 0.2836209329545592 +477 59 negative_sampler.num_negs_per_pos 37.0 +477 59 training.batch_size 2.0 +477 60 model.embedding_dim 1.0 +477 60 loss.margin 4.099411984404007 +477 60 loss.adversarial_temperature 0.13828628249587363 +477 60 negative_sampler.num_negs_per_pos 48.0 +477 60 training.batch_size 0.0 +477 61 model.embedding_dim 2.0 +477 61 loss.margin 1.7788542270942287 +477 61 loss.adversarial_temperature 0.5088597120109442 +477 61 negative_sampler.num_negs_per_pos 78.0 +477 61 training.batch_size 0.0 +477 62 model.embedding_dim 0.0 +477 62 loss.margin 4.8176689789273635 +477 62 loss.adversarial_temperature 0.6904457592513047 +477 62 negative_sampler.num_negs_per_pos 56.0 +477 62 training.batch_size 1.0 +477 63 model.embedding_dim 2.0 +477 63 loss.margin 7.724146162015601 +477 63 loss.adversarial_temperature 0.11493512199207556 +477 63 negative_sampler.num_negs_per_pos 75.0 +477 63 training.batch_size 2.0 +477 64 model.embedding_dim 0.0 +477 64 loss.margin 21.91807797677037 +477 64 loss.adversarial_temperature 0.6241346499838819 +477 64 negative_sampler.num_negs_per_pos 79.0 +477 64 training.batch_size 0.0 +477 65 model.embedding_dim 0.0 +477 65 loss.margin 23.798428263684727 +477 65 loss.adversarial_temperature 0.8263634737229889 +477 65 negative_sampler.num_negs_per_pos 38.0 +477 65 training.batch_size 2.0 +477 66 model.embedding_dim 0.0 +477 66 loss.margin 3.1262278919423645 +477 66 loss.adversarial_temperature 0.33973318686389536 +477 66 negative_sampler.num_negs_per_pos 25.0 +477 66 training.batch_size 2.0 +477 67 model.embedding_dim 1.0 +477 67 loss.margin 15.674269143520968 +477 67 loss.adversarial_temperature 0.7487133141056609 +477 67 negative_sampler.num_negs_per_pos 31.0 +477 67 training.batch_size 0.0 +477 68 model.embedding_dim 0.0 +477 68 loss.margin 12.98585487776242 +477 68 loss.adversarial_temperature 0.9645601349053923 +477 68 negative_sampler.num_negs_per_pos 43.0 +477 68 training.batch_size 2.0 +477 69 model.embedding_dim 2.0 +477 69 loss.margin 5.287186416713998 +477 69 loss.adversarial_temperature 0.5799288717989178 +477 69 negative_sampler.num_negs_per_pos 48.0 +477 69 training.batch_size 2.0 +477 70 model.embedding_dim 0.0 +477 70 loss.margin 4.996438236584516 +477 70 loss.adversarial_temperature 0.6531762805827094 +477 70 negative_sampler.num_negs_per_pos 31.0 +477 70 training.batch_size 1.0 +477 71 model.embedding_dim 0.0 +477 71 loss.margin 4.01354360499197 +477 71 loss.adversarial_temperature 0.9195907713354354 +477 71 negative_sampler.num_negs_per_pos 78.0 +477 71 training.batch_size 2.0 +477 72 model.embedding_dim 2.0 +477 72 loss.margin 19.050562177885915 +477 72 loss.adversarial_temperature 0.15340573066125684 +477 72 negative_sampler.num_negs_per_pos 95.0 +477 72 training.batch_size 2.0 +477 73 model.embedding_dim 2.0 +477 73 loss.margin 7.864476631395557 +477 73 loss.adversarial_temperature 0.9610894319040033 +477 73 negative_sampler.num_negs_per_pos 59.0 +477 73 training.batch_size 0.0 +477 74 model.embedding_dim 2.0 +477 74 loss.margin 24.95824551427125 +477 74 loss.adversarial_temperature 0.4239118729684852 +477 74 negative_sampler.num_negs_per_pos 73.0 +477 74 training.batch_size 1.0 +477 75 model.embedding_dim 1.0 +477 75 loss.margin 24.994403124464593 +477 75 loss.adversarial_temperature 0.1038284374436489 +477 75 negative_sampler.num_negs_per_pos 0.0 +477 75 training.batch_size 0.0 +477 76 model.embedding_dim 0.0 +477 76 loss.margin 24.23478571499972 +477 76 loss.adversarial_temperature 0.3551810605221104 +477 76 negative_sampler.num_negs_per_pos 14.0 +477 76 training.batch_size 2.0 +477 77 model.embedding_dim 1.0 +477 77 loss.margin 17.22267478598505 +477 77 loss.adversarial_temperature 0.8706158739080092 +477 77 negative_sampler.num_negs_per_pos 86.0 +477 77 training.batch_size 1.0 +477 78 model.embedding_dim 0.0 +477 78 loss.margin 8.819230406235768 +477 78 loss.adversarial_temperature 0.9531667957035991 +477 78 negative_sampler.num_negs_per_pos 19.0 +477 78 training.batch_size 1.0 +477 79 model.embedding_dim 0.0 +477 79 loss.margin 24.843083447193173 +477 79 loss.adversarial_temperature 0.7780846315758131 +477 79 negative_sampler.num_negs_per_pos 92.0 +477 79 training.batch_size 1.0 +477 80 model.embedding_dim 0.0 +477 80 loss.margin 11.620630726637387 +477 80 loss.adversarial_temperature 0.32803321202188995 +477 80 negative_sampler.num_negs_per_pos 41.0 +477 80 training.batch_size 0.0 +477 81 model.embedding_dim 1.0 +477 81 loss.margin 12.109411692865418 +477 81 loss.adversarial_temperature 0.8178618546302601 +477 81 negative_sampler.num_negs_per_pos 13.0 +477 81 training.batch_size 2.0 +477 82 model.embedding_dim 0.0 +477 82 loss.margin 2.6334599097007674 +477 82 loss.adversarial_temperature 0.2713352602454937 +477 82 negative_sampler.num_negs_per_pos 79.0 +477 82 training.batch_size 1.0 +477 83 model.embedding_dim 2.0 +477 83 loss.margin 14.705953791509945 +477 83 loss.adversarial_temperature 0.30228615466171016 +477 83 negative_sampler.num_negs_per_pos 72.0 +477 83 training.batch_size 0.0 +477 84 model.embedding_dim 2.0 +477 84 loss.margin 11.541281790845677 +477 84 loss.adversarial_temperature 0.6050519155664372 +477 84 negative_sampler.num_negs_per_pos 56.0 +477 84 training.batch_size 1.0 +477 85 model.embedding_dim 0.0 +477 85 loss.margin 22.346639599077125 +477 85 loss.adversarial_temperature 0.8484455077896949 +477 85 negative_sampler.num_negs_per_pos 15.0 +477 85 training.batch_size 2.0 +477 86 model.embedding_dim 1.0 +477 86 loss.margin 28.749745629681726 +477 86 loss.adversarial_temperature 0.16804882860215137 +477 86 negative_sampler.num_negs_per_pos 33.0 +477 86 training.batch_size 0.0 +477 87 model.embedding_dim 0.0 +477 87 loss.margin 2.9792976673408713 +477 87 loss.adversarial_temperature 0.10112453803730198 +477 87 negative_sampler.num_negs_per_pos 56.0 +477 87 training.batch_size 2.0 +477 88 model.embedding_dim 0.0 +477 88 loss.margin 18.483049669571663 +477 88 loss.adversarial_temperature 0.7781749223555798 +477 88 negative_sampler.num_negs_per_pos 8.0 +477 88 training.batch_size 2.0 +477 89 model.embedding_dim 0.0 +477 89 loss.margin 6.495192739170186 +477 89 loss.adversarial_temperature 0.3572245405464317 +477 89 negative_sampler.num_negs_per_pos 48.0 +477 89 training.batch_size 2.0 +477 90 model.embedding_dim 0.0 +477 90 loss.margin 8.55389845957141 +477 90 loss.adversarial_temperature 0.35988038101985576 +477 90 negative_sampler.num_negs_per_pos 3.0 +477 90 training.batch_size 1.0 +477 91 model.embedding_dim 1.0 +477 91 loss.margin 24.076788429336343 +477 91 loss.adversarial_temperature 0.9319866889311585 +477 91 negative_sampler.num_negs_per_pos 96.0 +477 91 training.batch_size 2.0 +477 92 model.embedding_dim 1.0 +477 92 loss.margin 14.794073428100642 +477 92 loss.adversarial_temperature 0.7573053375859373 +477 92 negative_sampler.num_negs_per_pos 76.0 +477 92 training.batch_size 1.0 +477 93 model.embedding_dim 0.0 +477 93 loss.margin 28.837708158619513 +477 93 loss.adversarial_temperature 0.10473047492832029 +477 93 negative_sampler.num_negs_per_pos 37.0 +477 93 training.batch_size 2.0 +477 94 model.embedding_dim 1.0 +477 94 loss.margin 11.340822874632948 +477 94 loss.adversarial_temperature 0.484952615729795 +477 94 negative_sampler.num_negs_per_pos 40.0 +477 94 training.batch_size 2.0 +477 95 model.embedding_dim 0.0 +477 95 loss.margin 5.268468908996098 +477 95 loss.adversarial_temperature 0.12010118114410571 +477 95 negative_sampler.num_negs_per_pos 8.0 +477 95 training.batch_size 1.0 +477 96 model.embedding_dim 1.0 +477 96 loss.margin 23.38662547389892 +477 96 loss.adversarial_temperature 0.34721317788210193 +477 96 negative_sampler.num_negs_per_pos 32.0 +477 96 training.batch_size 1.0 +477 97 model.embedding_dim 0.0 +477 97 loss.margin 10.48052476642454 +477 97 loss.adversarial_temperature 0.8898916182973347 +477 97 negative_sampler.num_negs_per_pos 66.0 +477 97 training.batch_size 0.0 +477 98 model.embedding_dim 2.0 +477 98 loss.margin 17.829704647735245 +477 98 loss.adversarial_temperature 0.3638188819167403 +477 98 negative_sampler.num_negs_per_pos 90.0 +477 98 training.batch_size 2.0 +477 99 model.embedding_dim 1.0 +477 99 loss.margin 13.214294596233067 +477 99 loss.adversarial_temperature 0.1266520788752104 +477 99 negative_sampler.num_negs_per_pos 5.0 +477 99 training.batch_size 0.0 +477 100 model.embedding_dim 0.0 +477 100 loss.margin 15.1397661789761 +477 100 loss.adversarial_temperature 0.1437332580111465 +477 100 negative_sampler.num_negs_per_pos 83.0 +477 100 training.batch_size 2.0 +477 1 dataset """kinships""" +477 1 model """proje""" +477 1 loss """nssa""" +477 1 regularizer """no""" +477 1 optimizer """adadelta""" +477 1 training_loop """owa""" +477 1 negative_sampler """basic""" +477 1 evaluator """rankbased""" +477 2 dataset """kinships""" +477 2 model """proje""" +477 2 loss """nssa""" +477 2 regularizer """no""" +477 2 optimizer """adadelta""" +477 2 training_loop """owa""" +477 2 negative_sampler """basic""" +477 2 evaluator """rankbased""" +477 3 dataset """kinships""" +477 3 model """proje""" +477 3 loss """nssa""" +477 3 regularizer """no""" +477 3 optimizer """adadelta""" +477 3 training_loop """owa""" +477 3 negative_sampler """basic""" +477 3 evaluator """rankbased""" +477 4 dataset """kinships""" +477 4 model """proje""" +477 4 loss """nssa""" +477 4 regularizer """no""" +477 4 optimizer """adadelta""" +477 4 training_loop """owa""" +477 4 negative_sampler """basic""" +477 4 evaluator """rankbased""" +477 5 dataset """kinships""" +477 5 model """proje""" +477 5 loss """nssa""" +477 5 regularizer """no""" +477 5 optimizer """adadelta""" +477 5 training_loop """owa""" +477 5 negative_sampler """basic""" +477 5 evaluator """rankbased""" +477 6 dataset """kinships""" +477 6 model """proje""" +477 6 loss """nssa""" +477 6 regularizer """no""" +477 6 optimizer """adadelta""" +477 6 training_loop """owa""" +477 6 negative_sampler """basic""" +477 6 evaluator """rankbased""" +477 7 dataset """kinships""" +477 7 model """proje""" +477 7 loss """nssa""" +477 7 regularizer """no""" +477 7 optimizer """adadelta""" +477 7 training_loop """owa""" +477 7 negative_sampler """basic""" +477 7 evaluator """rankbased""" +477 8 dataset """kinships""" +477 8 model """proje""" +477 8 loss """nssa""" +477 8 regularizer """no""" +477 8 optimizer """adadelta""" +477 8 training_loop """owa""" +477 8 negative_sampler """basic""" +477 8 evaluator """rankbased""" +477 9 dataset """kinships""" +477 9 model """proje""" +477 9 loss """nssa""" +477 9 regularizer """no""" +477 9 optimizer """adadelta""" +477 9 training_loop """owa""" +477 9 negative_sampler """basic""" +477 9 evaluator """rankbased""" +477 10 dataset """kinships""" +477 10 model """proje""" +477 10 loss """nssa""" +477 10 regularizer """no""" +477 10 optimizer """adadelta""" +477 10 training_loop """owa""" +477 10 negative_sampler """basic""" +477 10 evaluator """rankbased""" +477 11 dataset """kinships""" +477 11 model """proje""" +477 11 loss """nssa""" +477 11 regularizer """no""" +477 11 optimizer """adadelta""" +477 11 training_loop """owa""" +477 11 negative_sampler """basic""" +477 11 evaluator """rankbased""" +477 12 dataset """kinships""" +477 12 model """proje""" +477 12 loss """nssa""" +477 12 regularizer """no""" +477 12 optimizer """adadelta""" +477 12 training_loop """owa""" +477 12 negative_sampler """basic""" +477 12 evaluator """rankbased""" +477 13 dataset """kinships""" +477 13 model """proje""" +477 13 loss """nssa""" +477 13 regularizer """no""" +477 13 optimizer """adadelta""" +477 13 training_loop """owa""" +477 13 negative_sampler """basic""" +477 13 evaluator """rankbased""" +477 14 dataset """kinships""" +477 14 model """proje""" +477 14 loss """nssa""" +477 14 regularizer """no""" +477 14 optimizer """adadelta""" +477 14 training_loop """owa""" +477 14 negative_sampler """basic""" +477 14 evaluator """rankbased""" +477 15 dataset """kinships""" +477 15 model """proje""" +477 15 loss """nssa""" +477 15 regularizer """no""" +477 15 optimizer """adadelta""" +477 15 training_loop """owa""" +477 15 negative_sampler """basic""" +477 15 evaluator """rankbased""" +477 16 dataset """kinships""" +477 16 model """proje""" +477 16 loss """nssa""" +477 16 regularizer """no""" +477 16 optimizer """adadelta""" +477 16 training_loop """owa""" +477 16 negative_sampler """basic""" +477 16 evaluator """rankbased""" +477 17 dataset """kinships""" +477 17 model """proje""" +477 17 loss """nssa""" +477 17 regularizer """no""" +477 17 optimizer """adadelta""" +477 17 training_loop """owa""" +477 17 negative_sampler """basic""" +477 17 evaluator """rankbased""" +477 18 dataset """kinships""" +477 18 model """proje""" +477 18 loss """nssa""" +477 18 regularizer """no""" +477 18 optimizer """adadelta""" +477 18 training_loop """owa""" +477 18 negative_sampler """basic""" +477 18 evaluator """rankbased""" +477 19 dataset """kinships""" +477 19 model """proje""" +477 19 loss """nssa""" +477 19 regularizer """no""" +477 19 optimizer """adadelta""" +477 19 training_loop """owa""" +477 19 negative_sampler """basic""" +477 19 evaluator """rankbased""" +477 20 dataset """kinships""" +477 20 model """proje""" +477 20 loss """nssa""" +477 20 regularizer """no""" +477 20 optimizer """adadelta""" +477 20 training_loop """owa""" +477 20 negative_sampler """basic""" +477 20 evaluator """rankbased""" +477 21 dataset """kinships""" +477 21 model """proje""" +477 21 loss """nssa""" +477 21 regularizer """no""" +477 21 optimizer """adadelta""" +477 21 training_loop """owa""" +477 21 negative_sampler """basic""" +477 21 evaluator """rankbased""" +477 22 dataset """kinships""" +477 22 model """proje""" +477 22 loss """nssa""" +477 22 regularizer """no""" +477 22 optimizer """adadelta""" +477 22 training_loop """owa""" +477 22 negative_sampler """basic""" +477 22 evaluator """rankbased""" +477 23 dataset """kinships""" +477 23 model """proje""" +477 23 loss """nssa""" +477 23 regularizer """no""" +477 23 optimizer """adadelta""" +477 23 training_loop """owa""" +477 23 negative_sampler """basic""" +477 23 evaluator """rankbased""" +477 24 dataset """kinships""" +477 24 model """proje""" +477 24 loss """nssa""" +477 24 regularizer """no""" +477 24 optimizer """adadelta""" +477 24 training_loop """owa""" +477 24 negative_sampler """basic""" +477 24 evaluator """rankbased""" +477 25 dataset """kinships""" +477 25 model """proje""" +477 25 loss """nssa""" +477 25 regularizer """no""" +477 25 optimizer """adadelta""" +477 25 training_loop """owa""" +477 25 negative_sampler """basic""" +477 25 evaluator """rankbased""" +477 26 dataset """kinships""" +477 26 model """proje""" +477 26 loss """nssa""" +477 26 regularizer """no""" +477 26 optimizer """adadelta""" +477 26 training_loop """owa""" +477 26 negative_sampler """basic""" +477 26 evaluator """rankbased""" +477 27 dataset """kinships""" +477 27 model """proje""" +477 27 loss """nssa""" +477 27 regularizer """no""" +477 27 optimizer """adadelta""" +477 27 training_loop """owa""" +477 27 negative_sampler """basic""" +477 27 evaluator """rankbased""" +477 28 dataset """kinships""" +477 28 model """proje""" +477 28 loss """nssa""" +477 28 regularizer """no""" +477 28 optimizer """adadelta""" +477 28 training_loop """owa""" +477 28 negative_sampler """basic""" +477 28 evaluator """rankbased""" +477 29 dataset """kinships""" +477 29 model """proje""" +477 29 loss """nssa""" +477 29 regularizer """no""" +477 29 optimizer """adadelta""" +477 29 training_loop """owa""" +477 29 negative_sampler """basic""" +477 29 evaluator """rankbased""" +477 30 dataset """kinships""" +477 30 model """proje""" +477 30 loss """nssa""" +477 30 regularizer """no""" +477 30 optimizer """adadelta""" +477 30 training_loop """owa""" +477 30 negative_sampler """basic""" +477 30 evaluator """rankbased""" +477 31 dataset """kinships""" +477 31 model """proje""" +477 31 loss """nssa""" +477 31 regularizer """no""" +477 31 optimizer """adadelta""" +477 31 training_loop """owa""" +477 31 negative_sampler """basic""" +477 31 evaluator """rankbased""" +477 32 dataset """kinships""" +477 32 model """proje""" +477 32 loss """nssa""" +477 32 regularizer """no""" +477 32 optimizer """adadelta""" +477 32 training_loop """owa""" +477 32 negative_sampler """basic""" +477 32 evaluator """rankbased""" +477 33 dataset """kinships""" +477 33 model """proje""" +477 33 loss """nssa""" +477 33 regularizer """no""" +477 33 optimizer """adadelta""" +477 33 training_loop """owa""" +477 33 negative_sampler """basic""" +477 33 evaluator """rankbased""" +477 34 dataset """kinships""" +477 34 model """proje""" +477 34 loss """nssa""" +477 34 regularizer """no""" +477 34 optimizer """adadelta""" +477 34 training_loop """owa""" +477 34 negative_sampler """basic""" +477 34 evaluator """rankbased""" +477 35 dataset """kinships""" +477 35 model """proje""" +477 35 loss """nssa""" +477 35 regularizer """no""" +477 35 optimizer """adadelta""" +477 35 training_loop """owa""" +477 35 negative_sampler """basic""" +477 35 evaluator """rankbased""" +477 36 dataset """kinships""" +477 36 model """proje""" +477 36 loss """nssa""" +477 36 regularizer """no""" +477 36 optimizer """adadelta""" +477 36 training_loop """owa""" +477 36 negative_sampler """basic""" +477 36 evaluator """rankbased""" +477 37 dataset """kinships""" +477 37 model """proje""" +477 37 loss """nssa""" +477 37 regularizer """no""" +477 37 optimizer """adadelta""" +477 37 training_loop """owa""" +477 37 negative_sampler """basic""" +477 37 evaluator """rankbased""" +477 38 dataset """kinships""" +477 38 model """proje""" +477 38 loss """nssa""" +477 38 regularizer """no""" +477 38 optimizer """adadelta""" +477 38 training_loop """owa""" +477 38 negative_sampler """basic""" +477 38 evaluator """rankbased""" +477 39 dataset """kinships""" +477 39 model """proje""" +477 39 loss """nssa""" +477 39 regularizer """no""" +477 39 optimizer """adadelta""" +477 39 training_loop """owa""" +477 39 negative_sampler """basic""" +477 39 evaluator """rankbased""" +477 40 dataset """kinships""" +477 40 model """proje""" +477 40 loss """nssa""" +477 40 regularizer """no""" +477 40 optimizer """adadelta""" +477 40 training_loop """owa""" +477 40 negative_sampler """basic""" +477 40 evaluator """rankbased""" +477 41 dataset """kinships""" +477 41 model """proje""" +477 41 loss """nssa""" +477 41 regularizer """no""" +477 41 optimizer """adadelta""" +477 41 training_loop """owa""" +477 41 negative_sampler """basic""" +477 41 evaluator """rankbased""" +477 42 dataset """kinships""" +477 42 model """proje""" +477 42 loss """nssa""" +477 42 regularizer """no""" +477 42 optimizer """adadelta""" +477 42 training_loop """owa""" +477 42 negative_sampler """basic""" +477 42 evaluator """rankbased""" +477 43 dataset """kinships""" +477 43 model """proje""" +477 43 loss """nssa""" +477 43 regularizer """no""" +477 43 optimizer """adadelta""" +477 43 training_loop """owa""" +477 43 negative_sampler """basic""" +477 43 evaluator """rankbased""" +477 44 dataset """kinships""" +477 44 model """proje""" +477 44 loss """nssa""" +477 44 regularizer """no""" +477 44 optimizer """adadelta""" +477 44 training_loop """owa""" +477 44 negative_sampler """basic""" +477 44 evaluator """rankbased""" +477 45 dataset """kinships""" +477 45 model """proje""" +477 45 loss """nssa""" +477 45 regularizer """no""" +477 45 optimizer """adadelta""" +477 45 training_loop """owa""" +477 45 negative_sampler """basic""" +477 45 evaluator """rankbased""" +477 46 dataset """kinships""" +477 46 model """proje""" +477 46 loss """nssa""" +477 46 regularizer """no""" +477 46 optimizer """adadelta""" +477 46 training_loop """owa""" +477 46 negative_sampler """basic""" +477 46 evaluator """rankbased""" +477 47 dataset """kinships""" +477 47 model """proje""" +477 47 loss """nssa""" +477 47 regularizer """no""" +477 47 optimizer """adadelta""" +477 47 training_loop """owa""" +477 47 negative_sampler """basic""" +477 47 evaluator """rankbased""" +477 48 dataset """kinships""" +477 48 model """proje""" +477 48 loss """nssa""" +477 48 regularizer """no""" +477 48 optimizer """adadelta""" +477 48 training_loop """owa""" +477 48 negative_sampler """basic""" +477 48 evaluator """rankbased""" +477 49 dataset """kinships""" +477 49 model """proje""" +477 49 loss """nssa""" +477 49 regularizer """no""" +477 49 optimizer """adadelta""" +477 49 training_loop """owa""" +477 49 negative_sampler """basic""" +477 49 evaluator """rankbased""" +477 50 dataset """kinships""" +477 50 model """proje""" +477 50 loss """nssa""" +477 50 regularizer """no""" +477 50 optimizer """adadelta""" +477 50 training_loop """owa""" +477 50 negative_sampler """basic""" +477 50 evaluator """rankbased""" +477 51 dataset """kinships""" +477 51 model """proje""" +477 51 loss """nssa""" +477 51 regularizer """no""" +477 51 optimizer """adadelta""" +477 51 training_loop """owa""" +477 51 negative_sampler """basic""" +477 51 evaluator """rankbased""" +477 52 dataset """kinships""" +477 52 model """proje""" +477 52 loss """nssa""" +477 52 regularizer """no""" +477 52 optimizer """adadelta""" +477 52 training_loop """owa""" +477 52 negative_sampler """basic""" +477 52 evaluator """rankbased""" +477 53 dataset """kinships""" +477 53 model """proje""" +477 53 loss """nssa""" +477 53 regularizer """no""" +477 53 optimizer """adadelta""" +477 53 training_loop """owa""" +477 53 negative_sampler """basic""" +477 53 evaluator """rankbased""" +477 54 dataset """kinships""" +477 54 model """proje""" +477 54 loss """nssa""" +477 54 regularizer """no""" +477 54 optimizer """adadelta""" +477 54 training_loop """owa""" +477 54 negative_sampler """basic""" +477 54 evaluator """rankbased""" +477 55 dataset """kinships""" +477 55 model """proje""" +477 55 loss """nssa""" +477 55 regularizer """no""" +477 55 optimizer """adadelta""" +477 55 training_loop """owa""" +477 55 negative_sampler """basic""" +477 55 evaluator """rankbased""" +477 56 dataset """kinships""" +477 56 model """proje""" +477 56 loss """nssa""" +477 56 regularizer """no""" +477 56 optimizer """adadelta""" +477 56 training_loop """owa""" +477 56 negative_sampler """basic""" +477 56 evaluator """rankbased""" +477 57 dataset """kinships""" +477 57 model """proje""" +477 57 loss """nssa""" +477 57 regularizer """no""" +477 57 optimizer """adadelta""" +477 57 training_loop """owa""" +477 57 negative_sampler """basic""" +477 57 evaluator """rankbased""" +477 58 dataset """kinships""" +477 58 model """proje""" +477 58 loss """nssa""" +477 58 regularizer """no""" +477 58 optimizer """adadelta""" +477 58 training_loop """owa""" +477 58 negative_sampler """basic""" +477 58 evaluator """rankbased""" +477 59 dataset """kinships""" +477 59 model """proje""" +477 59 loss """nssa""" +477 59 regularizer """no""" +477 59 optimizer """adadelta""" +477 59 training_loop """owa""" +477 59 negative_sampler """basic""" +477 59 evaluator """rankbased""" +477 60 dataset """kinships""" +477 60 model """proje""" +477 60 loss """nssa""" +477 60 regularizer """no""" +477 60 optimizer """adadelta""" +477 60 training_loop """owa""" +477 60 negative_sampler """basic""" +477 60 evaluator """rankbased""" +477 61 dataset """kinships""" +477 61 model """proje""" +477 61 loss """nssa""" +477 61 regularizer """no""" +477 61 optimizer """adadelta""" +477 61 training_loop """owa""" +477 61 negative_sampler """basic""" +477 61 evaluator """rankbased""" +477 62 dataset """kinships""" +477 62 model """proje""" +477 62 loss """nssa""" +477 62 regularizer """no""" +477 62 optimizer """adadelta""" +477 62 training_loop """owa""" +477 62 negative_sampler """basic""" +477 62 evaluator """rankbased""" +477 63 dataset """kinships""" +477 63 model """proje""" +477 63 loss """nssa""" +477 63 regularizer """no""" +477 63 optimizer """adadelta""" +477 63 training_loop """owa""" +477 63 negative_sampler """basic""" +477 63 evaluator """rankbased""" +477 64 dataset """kinships""" +477 64 model """proje""" +477 64 loss """nssa""" +477 64 regularizer """no""" +477 64 optimizer """adadelta""" +477 64 training_loop """owa""" +477 64 negative_sampler """basic""" +477 64 evaluator """rankbased""" +477 65 dataset """kinships""" +477 65 model """proje""" +477 65 loss """nssa""" +477 65 regularizer """no""" +477 65 optimizer """adadelta""" +477 65 training_loop """owa""" +477 65 negative_sampler """basic""" +477 65 evaluator """rankbased""" +477 66 dataset """kinships""" +477 66 model """proje""" +477 66 loss """nssa""" +477 66 regularizer """no""" +477 66 optimizer """adadelta""" +477 66 training_loop """owa""" +477 66 negative_sampler """basic""" +477 66 evaluator """rankbased""" +477 67 dataset """kinships""" +477 67 model """proje""" +477 67 loss """nssa""" +477 67 regularizer """no""" +477 67 optimizer """adadelta""" +477 67 training_loop """owa""" +477 67 negative_sampler """basic""" +477 67 evaluator """rankbased""" +477 68 dataset """kinships""" +477 68 model """proje""" +477 68 loss """nssa""" +477 68 regularizer """no""" +477 68 optimizer """adadelta""" +477 68 training_loop """owa""" +477 68 negative_sampler """basic""" +477 68 evaluator """rankbased""" +477 69 dataset """kinships""" +477 69 model """proje""" +477 69 loss """nssa""" +477 69 regularizer """no""" +477 69 optimizer """adadelta""" +477 69 training_loop """owa""" +477 69 negative_sampler """basic""" +477 69 evaluator """rankbased""" +477 70 dataset """kinships""" +477 70 model """proje""" +477 70 loss """nssa""" +477 70 regularizer """no""" +477 70 optimizer """adadelta""" +477 70 training_loop """owa""" +477 70 negative_sampler """basic""" +477 70 evaluator """rankbased""" +477 71 dataset """kinships""" +477 71 model """proje""" +477 71 loss """nssa""" +477 71 regularizer """no""" +477 71 optimizer """adadelta""" +477 71 training_loop """owa""" +477 71 negative_sampler """basic""" +477 71 evaluator """rankbased""" +477 72 dataset """kinships""" +477 72 model """proje""" +477 72 loss """nssa""" +477 72 regularizer """no""" +477 72 optimizer """adadelta""" +477 72 training_loop """owa""" +477 72 negative_sampler """basic""" +477 72 evaluator """rankbased""" +477 73 dataset """kinships""" +477 73 model """proje""" +477 73 loss """nssa""" +477 73 regularizer """no""" +477 73 optimizer """adadelta""" +477 73 training_loop """owa""" +477 73 negative_sampler """basic""" +477 73 evaluator """rankbased""" +477 74 dataset """kinships""" +477 74 model """proje""" +477 74 loss """nssa""" +477 74 regularizer """no""" +477 74 optimizer """adadelta""" +477 74 training_loop """owa""" +477 74 negative_sampler """basic""" +477 74 evaluator """rankbased""" +477 75 dataset """kinships""" +477 75 model """proje""" +477 75 loss """nssa""" +477 75 regularizer """no""" +477 75 optimizer """adadelta""" +477 75 training_loop """owa""" +477 75 negative_sampler """basic""" +477 75 evaluator """rankbased""" +477 76 dataset """kinships""" +477 76 model """proje""" +477 76 loss """nssa""" +477 76 regularizer """no""" +477 76 optimizer """adadelta""" +477 76 training_loop """owa""" +477 76 negative_sampler """basic""" +477 76 evaluator """rankbased""" +477 77 dataset """kinships""" +477 77 model """proje""" +477 77 loss """nssa""" +477 77 regularizer """no""" +477 77 optimizer """adadelta""" +477 77 training_loop """owa""" +477 77 negative_sampler """basic""" +477 77 evaluator """rankbased""" +477 78 dataset """kinships""" +477 78 model """proje""" +477 78 loss """nssa""" +477 78 regularizer """no""" +477 78 optimizer """adadelta""" +477 78 training_loop """owa""" +477 78 negative_sampler """basic""" +477 78 evaluator """rankbased""" +477 79 dataset """kinships""" +477 79 model """proje""" +477 79 loss """nssa""" +477 79 regularizer """no""" +477 79 optimizer """adadelta""" +477 79 training_loop """owa""" +477 79 negative_sampler """basic""" +477 79 evaluator """rankbased""" +477 80 dataset """kinships""" +477 80 model """proje""" +477 80 loss """nssa""" +477 80 regularizer """no""" +477 80 optimizer """adadelta""" +477 80 training_loop """owa""" +477 80 negative_sampler """basic""" +477 80 evaluator """rankbased""" +477 81 dataset """kinships""" +477 81 model """proje""" +477 81 loss """nssa""" +477 81 regularizer """no""" +477 81 optimizer """adadelta""" +477 81 training_loop """owa""" +477 81 negative_sampler """basic""" +477 81 evaluator """rankbased""" +477 82 dataset """kinships""" +477 82 model """proje""" +477 82 loss """nssa""" +477 82 regularizer """no""" +477 82 optimizer """adadelta""" +477 82 training_loop """owa""" +477 82 negative_sampler """basic""" +477 82 evaluator """rankbased""" +477 83 dataset """kinships""" +477 83 model """proje""" +477 83 loss """nssa""" +477 83 regularizer """no""" +477 83 optimizer """adadelta""" +477 83 training_loop """owa""" +477 83 negative_sampler """basic""" +477 83 evaluator """rankbased""" +477 84 dataset """kinships""" +477 84 model """proje""" +477 84 loss """nssa""" +477 84 regularizer """no""" +477 84 optimizer """adadelta""" +477 84 training_loop """owa""" +477 84 negative_sampler """basic""" +477 84 evaluator """rankbased""" +477 85 dataset """kinships""" +477 85 model """proje""" +477 85 loss """nssa""" +477 85 regularizer """no""" +477 85 optimizer """adadelta""" +477 85 training_loop """owa""" +477 85 negative_sampler """basic""" +477 85 evaluator """rankbased""" +477 86 dataset """kinships""" +477 86 model """proje""" +477 86 loss """nssa""" +477 86 regularizer """no""" +477 86 optimizer """adadelta""" +477 86 training_loop """owa""" +477 86 negative_sampler """basic""" +477 86 evaluator """rankbased""" +477 87 dataset """kinships""" +477 87 model """proje""" +477 87 loss """nssa""" +477 87 regularizer """no""" +477 87 optimizer """adadelta""" +477 87 training_loop """owa""" +477 87 negative_sampler """basic""" +477 87 evaluator """rankbased""" +477 88 dataset """kinships""" +477 88 model """proje""" +477 88 loss """nssa""" +477 88 regularizer """no""" +477 88 optimizer """adadelta""" +477 88 training_loop """owa""" +477 88 negative_sampler """basic""" +477 88 evaluator """rankbased""" +477 89 dataset """kinships""" +477 89 model """proje""" +477 89 loss """nssa""" +477 89 regularizer """no""" +477 89 optimizer """adadelta""" +477 89 training_loop """owa""" +477 89 negative_sampler """basic""" +477 89 evaluator """rankbased""" +477 90 dataset """kinships""" +477 90 model """proje""" +477 90 loss """nssa""" +477 90 regularizer """no""" +477 90 optimizer """adadelta""" +477 90 training_loop """owa""" +477 90 negative_sampler """basic""" +477 90 evaluator """rankbased""" +477 91 dataset """kinships""" +477 91 model """proje""" +477 91 loss """nssa""" +477 91 regularizer """no""" +477 91 optimizer """adadelta""" +477 91 training_loop """owa""" +477 91 negative_sampler """basic""" +477 91 evaluator """rankbased""" +477 92 dataset """kinships""" +477 92 model """proje""" +477 92 loss """nssa""" +477 92 regularizer """no""" +477 92 optimizer """adadelta""" +477 92 training_loop """owa""" +477 92 negative_sampler """basic""" +477 92 evaluator """rankbased""" +477 93 dataset """kinships""" +477 93 model """proje""" +477 93 loss """nssa""" +477 93 regularizer """no""" +477 93 optimizer """adadelta""" +477 93 training_loop """owa""" +477 93 negative_sampler """basic""" +477 93 evaluator """rankbased""" +477 94 dataset """kinships""" +477 94 model """proje""" +477 94 loss """nssa""" +477 94 regularizer """no""" +477 94 optimizer """adadelta""" +477 94 training_loop """owa""" +477 94 negative_sampler """basic""" +477 94 evaluator """rankbased""" +477 95 dataset """kinships""" +477 95 model """proje""" +477 95 loss """nssa""" +477 95 regularizer """no""" +477 95 optimizer """adadelta""" +477 95 training_loop """owa""" +477 95 negative_sampler """basic""" +477 95 evaluator """rankbased""" +477 96 dataset """kinships""" +477 96 model """proje""" +477 96 loss """nssa""" +477 96 regularizer """no""" +477 96 optimizer """adadelta""" +477 96 training_loop """owa""" +477 96 negative_sampler """basic""" +477 96 evaluator """rankbased""" +477 97 dataset """kinships""" +477 97 model """proje""" +477 97 loss """nssa""" +477 97 regularizer """no""" +477 97 optimizer """adadelta""" +477 97 training_loop """owa""" +477 97 negative_sampler """basic""" +477 97 evaluator """rankbased""" +477 98 dataset """kinships""" +477 98 model """proje""" +477 98 loss """nssa""" +477 98 regularizer """no""" +477 98 optimizer """adadelta""" +477 98 training_loop """owa""" +477 98 negative_sampler """basic""" +477 98 evaluator """rankbased""" +477 99 dataset """kinships""" +477 99 model """proje""" +477 99 loss """nssa""" +477 99 regularizer """no""" +477 99 optimizer """adadelta""" +477 99 training_loop """owa""" +477 99 negative_sampler """basic""" +477 99 evaluator """rankbased""" +477 100 dataset """kinships""" +477 100 model """proje""" +477 100 loss """nssa""" +477 100 regularizer """no""" +477 100 optimizer """adadelta""" +477 100 training_loop """owa""" +477 100 negative_sampler """basic""" +477 100 evaluator """rankbased""" +478 1 model.embedding_dim 2.0 +478 1 loss.margin 23.41655890987673 +478 1 loss.adversarial_temperature 0.4216313582245743 +478 1 negative_sampler.num_negs_per_pos 51.0 +478 1 training.batch_size 0.0 +478 2 model.embedding_dim 1.0 +478 2 loss.margin 12.735143719117932 +478 2 loss.adversarial_temperature 0.10742450984976819 +478 2 negative_sampler.num_negs_per_pos 0.0 +478 2 training.batch_size 2.0 +478 3 model.embedding_dim 2.0 +478 3 loss.margin 13.812972211612617 +478 3 loss.adversarial_temperature 0.8085655075551259 +478 3 negative_sampler.num_negs_per_pos 17.0 +478 3 training.batch_size 1.0 +478 4 model.embedding_dim 0.0 +478 4 loss.margin 14.311914622272637 +478 4 loss.adversarial_temperature 0.24270418882869801 +478 4 negative_sampler.num_negs_per_pos 88.0 +478 4 training.batch_size 0.0 +478 5 model.embedding_dim 1.0 +478 5 loss.margin 25.276033280277492 +478 5 loss.adversarial_temperature 0.4916402381514702 +478 5 negative_sampler.num_negs_per_pos 80.0 +478 5 training.batch_size 2.0 +478 6 model.embedding_dim 0.0 +478 6 loss.margin 2.3463607105849453 +478 6 loss.adversarial_temperature 0.18558918886360593 +478 6 negative_sampler.num_negs_per_pos 90.0 +478 6 training.batch_size 1.0 +478 7 model.embedding_dim 1.0 +478 7 loss.margin 14.870036401832602 +478 7 loss.adversarial_temperature 0.16075521223217148 +478 7 negative_sampler.num_negs_per_pos 7.0 +478 7 training.batch_size 1.0 +478 8 model.embedding_dim 0.0 +478 8 loss.margin 11.728176490843726 +478 8 loss.adversarial_temperature 0.7382814904125649 +478 8 negative_sampler.num_negs_per_pos 1.0 +478 8 training.batch_size 2.0 +478 9 model.embedding_dim 1.0 +478 9 loss.margin 3.4156325733760635 +478 9 loss.adversarial_temperature 0.38330966787141874 +478 9 negative_sampler.num_negs_per_pos 76.0 +478 9 training.batch_size 1.0 +478 10 model.embedding_dim 0.0 +478 10 loss.margin 13.939350704113467 +478 10 loss.adversarial_temperature 0.6135508724462442 +478 10 negative_sampler.num_negs_per_pos 87.0 +478 10 training.batch_size 2.0 +478 11 model.embedding_dim 0.0 +478 11 loss.margin 16.57543408821794 +478 11 loss.adversarial_temperature 0.6701081953257252 +478 11 negative_sampler.num_negs_per_pos 60.0 +478 11 training.batch_size 2.0 +478 12 model.embedding_dim 0.0 +478 12 loss.margin 25.623323756738753 +478 12 loss.adversarial_temperature 0.41160071688894206 +478 12 negative_sampler.num_negs_per_pos 13.0 +478 12 training.batch_size 2.0 +478 13 model.embedding_dim 2.0 +478 13 loss.margin 16.510140562243947 +478 13 loss.adversarial_temperature 0.429835030530506 +478 13 negative_sampler.num_negs_per_pos 63.0 +478 13 training.batch_size 0.0 +478 14 model.embedding_dim 1.0 +478 14 loss.margin 3.8745746073106644 +478 14 loss.adversarial_temperature 0.8560796439232853 +478 14 negative_sampler.num_negs_per_pos 28.0 +478 14 training.batch_size 2.0 +478 15 model.embedding_dim 0.0 +478 15 loss.margin 23.882426677952456 +478 15 loss.adversarial_temperature 0.4028633438927315 +478 15 negative_sampler.num_negs_per_pos 62.0 +478 15 training.batch_size 1.0 +478 16 model.embedding_dim 1.0 +478 16 loss.margin 13.169485038975168 +478 16 loss.adversarial_temperature 0.24371263247012823 +478 16 negative_sampler.num_negs_per_pos 76.0 +478 16 training.batch_size 2.0 +478 17 model.embedding_dim 0.0 +478 17 loss.margin 9.960533049079116 +478 17 loss.adversarial_temperature 0.7836482676626468 +478 17 negative_sampler.num_negs_per_pos 73.0 +478 17 training.batch_size 0.0 +478 18 model.embedding_dim 2.0 +478 18 loss.margin 19.235230938181996 +478 18 loss.adversarial_temperature 0.24642160617442557 +478 18 negative_sampler.num_negs_per_pos 23.0 +478 18 training.batch_size 0.0 +478 19 model.embedding_dim 0.0 +478 19 loss.margin 10.473877201986353 +478 19 loss.adversarial_temperature 0.9467699507903191 +478 19 negative_sampler.num_negs_per_pos 2.0 +478 19 training.batch_size 0.0 +478 20 model.embedding_dim 0.0 +478 20 loss.margin 5.137228745930282 +478 20 loss.adversarial_temperature 0.29550157075813915 +478 20 negative_sampler.num_negs_per_pos 78.0 +478 20 training.batch_size 1.0 +478 21 model.embedding_dim 2.0 +478 21 loss.margin 1.3876215737204027 +478 21 loss.adversarial_temperature 0.28970154953907545 +478 21 negative_sampler.num_negs_per_pos 96.0 +478 21 training.batch_size 2.0 +478 22 model.embedding_dim 1.0 +478 22 loss.margin 21.657785234082272 +478 22 loss.adversarial_temperature 0.4193135606365802 +478 22 negative_sampler.num_negs_per_pos 88.0 +478 22 training.batch_size 2.0 +478 23 model.embedding_dim 1.0 +478 23 loss.margin 3.4529393290778416 +478 23 loss.adversarial_temperature 0.6325539471211387 +478 23 negative_sampler.num_negs_per_pos 16.0 +478 23 training.batch_size 0.0 +478 24 model.embedding_dim 2.0 +478 24 loss.margin 7.3071450009741765 +478 24 loss.adversarial_temperature 0.1523861554073079 +478 24 negative_sampler.num_negs_per_pos 85.0 +478 24 training.batch_size 1.0 +478 25 model.embedding_dim 2.0 +478 25 loss.margin 9.820530584044164 +478 25 loss.adversarial_temperature 0.19930859213539592 +478 25 negative_sampler.num_negs_per_pos 46.0 +478 25 training.batch_size 1.0 +478 26 model.embedding_dim 1.0 +478 26 loss.margin 13.102539051348378 +478 26 loss.adversarial_temperature 0.2525202259972892 +478 26 negative_sampler.num_negs_per_pos 87.0 +478 26 training.batch_size 1.0 +478 27 model.embedding_dim 1.0 +478 27 loss.margin 16.551677214482424 +478 27 loss.adversarial_temperature 0.5919781155372175 +478 27 negative_sampler.num_negs_per_pos 40.0 +478 27 training.batch_size 0.0 +478 28 model.embedding_dim 1.0 +478 28 loss.margin 17.924086106610368 +478 28 loss.adversarial_temperature 0.372434683907112 +478 28 negative_sampler.num_negs_per_pos 72.0 +478 28 training.batch_size 2.0 +478 29 model.embedding_dim 2.0 +478 29 loss.margin 21.099003435370232 +478 29 loss.adversarial_temperature 0.7485894904687199 +478 29 negative_sampler.num_negs_per_pos 40.0 +478 29 training.batch_size 0.0 +478 30 model.embedding_dim 0.0 +478 30 loss.margin 25.786162612232243 +478 30 loss.adversarial_temperature 0.8762210831377751 +478 30 negative_sampler.num_negs_per_pos 47.0 +478 30 training.batch_size 0.0 +478 31 model.embedding_dim 0.0 +478 31 loss.margin 26.357706451364805 +478 31 loss.adversarial_temperature 0.15295972004163005 +478 31 negative_sampler.num_negs_per_pos 13.0 +478 31 training.batch_size 2.0 +478 32 model.embedding_dim 0.0 +478 32 loss.margin 20.543439928071958 +478 32 loss.adversarial_temperature 0.7986420710638522 +478 32 negative_sampler.num_negs_per_pos 12.0 +478 32 training.batch_size 1.0 +478 33 model.embedding_dim 2.0 +478 33 loss.margin 3.3256276834112133 +478 33 loss.adversarial_temperature 0.9106056030453931 +478 33 negative_sampler.num_negs_per_pos 56.0 +478 33 training.batch_size 0.0 +478 34 model.embedding_dim 2.0 +478 34 loss.margin 8.66777133856623 +478 34 loss.adversarial_temperature 0.5251450335564286 +478 34 negative_sampler.num_negs_per_pos 67.0 +478 34 training.batch_size 2.0 +478 35 model.embedding_dim 0.0 +478 35 loss.margin 6.703229195853118 +478 35 loss.adversarial_temperature 0.8149753139671175 +478 35 negative_sampler.num_negs_per_pos 65.0 +478 35 training.batch_size 2.0 +478 36 model.embedding_dim 2.0 +478 36 loss.margin 16.24325637399305 +478 36 loss.adversarial_temperature 0.7097250814293444 +478 36 negative_sampler.num_negs_per_pos 93.0 +478 36 training.batch_size 1.0 +478 37 model.embedding_dim 1.0 +478 37 loss.margin 14.31610122538569 +478 37 loss.adversarial_temperature 0.19367713970464356 +478 37 negative_sampler.num_negs_per_pos 95.0 +478 37 training.batch_size 1.0 +478 38 model.embedding_dim 1.0 +478 38 loss.margin 29.18477653302995 +478 38 loss.adversarial_temperature 0.8984312307853646 +478 38 negative_sampler.num_negs_per_pos 38.0 +478 38 training.batch_size 2.0 +478 39 model.embedding_dim 1.0 +478 39 loss.margin 14.478947590473377 +478 39 loss.adversarial_temperature 0.4218748990188351 +478 39 negative_sampler.num_negs_per_pos 66.0 +478 39 training.batch_size 0.0 +478 40 model.embedding_dim 1.0 +478 40 loss.margin 1.655083419190148 +478 40 loss.adversarial_temperature 0.6661075384460966 +478 40 negative_sampler.num_negs_per_pos 67.0 +478 40 training.batch_size 2.0 +478 41 model.embedding_dim 2.0 +478 41 loss.margin 8.78804575663531 +478 41 loss.adversarial_temperature 0.3350447180395078 +478 41 negative_sampler.num_negs_per_pos 81.0 +478 41 training.batch_size 2.0 +478 42 model.embedding_dim 2.0 +478 42 loss.margin 7.1351052150381955 +478 42 loss.adversarial_temperature 0.16937508307601662 +478 42 negative_sampler.num_negs_per_pos 74.0 +478 42 training.batch_size 1.0 +478 43 model.embedding_dim 1.0 +478 43 loss.margin 28.323826753352872 +478 43 loss.adversarial_temperature 0.42318332626946953 +478 43 negative_sampler.num_negs_per_pos 61.0 +478 43 training.batch_size 2.0 +478 44 model.embedding_dim 0.0 +478 44 loss.margin 16.71154860418439 +478 44 loss.adversarial_temperature 0.6715818081339728 +478 44 negative_sampler.num_negs_per_pos 46.0 +478 44 training.batch_size 1.0 +478 45 model.embedding_dim 0.0 +478 45 loss.margin 5.427968475913318 +478 45 loss.adversarial_temperature 0.9024206869053053 +478 45 negative_sampler.num_negs_per_pos 30.0 +478 45 training.batch_size 2.0 +478 46 model.embedding_dim 2.0 +478 46 loss.margin 28.337907762023587 +478 46 loss.adversarial_temperature 0.15541808373319183 +478 46 negative_sampler.num_negs_per_pos 91.0 +478 46 training.batch_size 0.0 +478 47 model.embedding_dim 0.0 +478 47 loss.margin 20.560425516829692 +478 47 loss.adversarial_temperature 0.7094030802535574 +478 47 negative_sampler.num_negs_per_pos 3.0 +478 47 training.batch_size 2.0 +478 48 model.embedding_dim 2.0 +478 48 loss.margin 20.929763997941503 +478 48 loss.adversarial_temperature 0.5908679716981252 +478 48 negative_sampler.num_negs_per_pos 60.0 +478 48 training.batch_size 2.0 +478 49 model.embedding_dim 0.0 +478 49 loss.margin 17.55123790799334 +478 49 loss.adversarial_temperature 0.9729422795742272 +478 49 negative_sampler.num_negs_per_pos 15.0 +478 49 training.batch_size 2.0 +478 50 model.embedding_dim 1.0 +478 50 loss.margin 9.896727447733205 +478 50 loss.adversarial_temperature 0.4099385111590039 +478 50 negative_sampler.num_negs_per_pos 39.0 +478 50 training.batch_size 2.0 +478 51 model.embedding_dim 2.0 +478 51 loss.margin 23.927849985510406 +478 51 loss.adversarial_temperature 0.19027237412478062 +478 51 negative_sampler.num_negs_per_pos 58.0 +478 51 training.batch_size 2.0 +478 52 model.embedding_dim 1.0 +478 52 loss.margin 8.865501957546355 +478 52 loss.adversarial_temperature 0.5212211795389583 +478 52 negative_sampler.num_negs_per_pos 90.0 +478 52 training.batch_size 1.0 +478 53 model.embedding_dim 2.0 +478 53 loss.margin 20.112652089382422 +478 53 loss.adversarial_temperature 0.3281273094418986 +478 53 negative_sampler.num_negs_per_pos 33.0 +478 53 training.batch_size 1.0 +478 54 model.embedding_dim 2.0 +478 54 loss.margin 22.250003929597117 +478 54 loss.adversarial_temperature 0.8886982996030661 +478 54 negative_sampler.num_negs_per_pos 98.0 +478 54 training.batch_size 1.0 +478 55 model.embedding_dim 0.0 +478 55 loss.margin 4.607840940684582 +478 55 loss.adversarial_temperature 0.24604164159224595 +478 55 negative_sampler.num_negs_per_pos 6.0 +478 55 training.batch_size 0.0 +478 56 model.embedding_dim 1.0 +478 56 loss.margin 13.87682369833534 +478 56 loss.adversarial_temperature 0.9291836378470217 +478 56 negative_sampler.num_negs_per_pos 38.0 +478 56 training.batch_size 0.0 +478 57 model.embedding_dim 1.0 +478 57 loss.margin 18.925690767692274 +478 57 loss.adversarial_temperature 0.3157892132096949 +478 57 negative_sampler.num_negs_per_pos 36.0 +478 57 training.batch_size 0.0 +478 58 model.embedding_dim 2.0 +478 58 loss.margin 22.543544367596787 +478 58 loss.adversarial_temperature 0.15864651089681897 +478 58 negative_sampler.num_negs_per_pos 64.0 +478 58 training.batch_size 1.0 +478 59 model.embedding_dim 2.0 +478 59 loss.margin 3.349797625008315 +478 59 loss.adversarial_temperature 0.41601684831585495 +478 59 negative_sampler.num_negs_per_pos 6.0 +478 59 training.batch_size 0.0 +478 60 model.embedding_dim 1.0 +478 60 loss.margin 5.354680829767288 +478 60 loss.adversarial_temperature 0.7415126309231204 +478 60 negative_sampler.num_negs_per_pos 76.0 +478 60 training.batch_size 1.0 +478 61 model.embedding_dim 0.0 +478 61 loss.margin 21.245311236277455 +478 61 loss.adversarial_temperature 0.9278045653217437 +478 61 negative_sampler.num_negs_per_pos 73.0 +478 61 training.batch_size 1.0 +478 62 model.embedding_dim 2.0 +478 62 loss.margin 19.908949653350554 +478 62 loss.adversarial_temperature 0.6189685215787943 +478 62 negative_sampler.num_negs_per_pos 36.0 +478 62 training.batch_size 2.0 +478 63 model.embedding_dim 0.0 +478 63 loss.margin 2.0473812664062385 +478 63 loss.adversarial_temperature 0.4560708104994493 +478 63 negative_sampler.num_negs_per_pos 91.0 +478 63 training.batch_size 0.0 +478 64 model.embedding_dim 0.0 +478 64 loss.margin 6.62266697719344 +478 64 loss.adversarial_temperature 0.369073855966107 +478 64 negative_sampler.num_negs_per_pos 57.0 +478 64 training.batch_size 0.0 +478 65 model.embedding_dim 1.0 +478 65 loss.margin 2.6045252588612637 +478 65 loss.adversarial_temperature 0.13309041563967614 +478 65 negative_sampler.num_negs_per_pos 55.0 +478 65 training.batch_size 0.0 +478 66 model.embedding_dim 0.0 +478 66 loss.margin 8.353314892005384 +478 66 loss.adversarial_temperature 0.2947611423701346 +478 66 negative_sampler.num_negs_per_pos 57.0 +478 66 training.batch_size 0.0 +478 67 model.embedding_dim 2.0 +478 67 loss.margin 4.565835752999082 +478 67 loss.adversarial_temperature 0.7581910830350727 +478 67 negative_sampler.num_negs_per_pos 5.0 +478 67 training.batch_size 1.0 +478 68 model.embedding_dim 2.0 +478 68 loss.margin 12.970978747634833 +478 68 loss.adversarial_temperature 0.46063337480374955 +478 68 negative_sampler.num_negs_per_pos 45.0 +478 68 training.batch_size 1.0 +478 69 model.embedding_dim 2.0 +478 69 loss.margin 15.524326669673416 +478 69 loss.adversarial_temperature 0.5830781978710524 +478 69 negative_sampler.num_negs_per_pos 79.0 +478 69 training.batch_size 2.0 +478 70 model.embedding_dim 0.0 +478 70 loss.margin 29.969886053019145 +478 70 loss.adversarial_temperature 0.6636986362764773 +478 70 negative_sampler.num_negs_per_pos 16.0 +478 70 training.batch_size 0.0 +478 71 model.embedding_dim 0.0 +478 71 loss.margin 28.28644597722964 +478 71 loss.adversarial_temperature 0.5739834423417222 +478 71 negative_sampler.num_negs_per_pos 94.0 +478 71 training.batch_size 0.0 +478 72 model.embedding_dim 1.0 +478 72 loss.margin 26.703824995138955 +478 72 loss.adversarial_temperature 0.2541005340357365 +478 72 negative_sampler.num_negs_per_pos 91.0 +478 72 training.batch_size 2.0 +478 73 model.embedding_dim 0.0 +478 73 loss.margin 16.080209142984486 +478 73 loss.adversarial_temperature 0.6537060605735325 +478 73 negative_sampler.num_negs_per_pos 43.0 +478 73 training.batch_size 1.0 +478 74 model.embedding_dim 1.0 +478 74 loss.margin 7.335452773359615 +478 74 loss.adversarial_temperature 0.7787476925110802 +478 74 negative_sampler.num_negs_per_pos 65.0 +478 74 training.batch_size 2.0 +478 75 model.embedding_dim 0.0 +478 75 loss.margin 1.0401964877574883 +478 75 loss.adversarial_temperature 0.45319292601849015 +478 75 negative_sampler.num_negs_per_pos 86.0 +478 75 training.batch_size 2.0 +478 76 model.embedding_dim 1.0 +478 76 loss.margin 29.855086712859617 +478 76 loss.adversarial_temperature 0.5032170283752097 +478 76 negative_sampler.num_negs_per_pos 74.0 +478 76 training.batch_size 2.0 +478 77 model.embedding_dim 2.0 +478 77 loss.margin 5.758700756778992 +478 77 loss.adversarial_temperature 0.9906407390758564 +478 77 negative_sampler.num_negs_per_pos 82.0 +478 77 training.batch_size 1.0 +478 78 model.embedding_dim 2.0 +478 78 loss.margin 28.35196763093556 +478 78 loss.adversarial_temperature 0.34881382039827613 +478 78 negative_sampler.num_negs_per_pos 74.0 +478 78 training.batch_size 1.0 +478 79 model.embedding_dim 1.0 +478 79 loss.margin 9.128657910840529 +478 79 loss.adversarial_temperature 0.32056764562116996 +478 79 negative_sampler.num_negs_per_pos 82.0 +478 79 training.batch_size 0.0 +478 80 model.embedding_dim 2.0 +478 80 loss.margin 10.147057163081048 +478 80 loss.adversarial_temperature 0.45283116669356727 +478 80 negative_sampler.num_negs_per_pos 27.0 +478 80 training.batch_size 1.0 +478 81 model.embedding_dim 0.0 +478 81 loss.margin 21.513742095488848 +478 81 loss.adversarial_temperature 0.8497935092587392 +478 81 negative_sampler.num_negs_per_pos 88.0 +478 81 training.batch_size 2.0 +478 82 model.embedding_dim 1.0 +478 82 loss.margin 1.4690649078381357 +478 82 loss.adversarial_temperature 0.6130089351174212 +478 82 negative_sampler.num_negs_per_pos 8.0 +478 82 training.batch_size 1.0 +478 83 model.embedding_dim 0.0 +478 83 loss.margin 4.8897861996212155 +478 83 loss.adversarial_temperature 0.7830104994340179 +478 83 negative_sampler.num_negs_per_pos 5.0 +478 83 training.batch_size 0.0 +478 84 model.embedding_dim 0.0 +478 84 loss.margin 28.139626415421926 +478 84 loss.adversarial_temperature 0.1623763215222411 +478 84 negative_sampler.num_negs_per_pos 28.0 +478 84 training.batch_size 2.0 +478 85 model.embedding_dim 0.0 +478 85 loss.margin 10.397597616051819 +478 85 loss.adversarial_temperature 0.946790516636469 +478 85 negative_sampler.num_negs_per_pos 11.0 +478 85 training.batch_size 1.0 +478 86 model.embedding_dim 1.0 +478 86 loss.margin 23.24118712309535 +478 86 loss.adversarial_temperature 0.9282466288986543 +478 86 negative_sampler.num_negs_per_pos 34.0 +478 86 training.batch_size 1.0 +478 87 model.embedding_dim 0.0 +478 87 loss.margin 27.043931004718047 +478 87 loss.adversarial_temperature 0.9666013102735064 +478 87 negative_sampler.num_negs_per_pos 19.0 +478 87 training.batch_size 0.0 +478 88 model.embedding_dim 2.0 +478 88 loss.margin 21.953310365200572 +478 88 loss.adversarial_temperature 0.9268377626072914 +478 88 negative_sampler.num_negs_per_pos 19.0 +478 88 training.batch_size 1.0 +478 89 model.embedding_dim 1.0 +478 89 loss.margin 12.49748854963528 +478 89 loss.adversarial_temperature 0.8765623548356523 +478 89 negative_sampler.num_negs_per_pos 83.0 +478 89 training.batch_size 0.0 +478 90 model.embedding_dim 0.0 +478 90 loss.margin 21.885418825526003 +478 90 loss.adversarial_temperature 0.30105645882144616 +478 90 negative_sampler.num_negs_per_pos 51.0 +478 90 training.batch_size 1.0 +478 91 model.embedding_dim 0.0 +478 91 loss.margin 19.941577674758395 +478 91 loss.adversarial_temperature 0.6721776992688907 +478 91 negative_sampler.num_negs_per_pos 98.0 +478 91 training.batch_size 0.0 +478 92 model.embedding_dim 1.0 +478 92 loss.margin 14.766049261190105 +478 92 loss.adversarial_temperature 0.6526635794171026 +478 92 negative_sampler.num_negs_per_pos 6.0 +478 92 training.batch_size 0.0 +478 93 model.embedding_dim 2.0 +478 93 loss.margin 24.232849664284267 +478 93 loss.adversarial_temperature 0.8180578963347366 +478 93 negative_sampler.num_negs_per_pos 33.0 +478 93 training.batch_size 2.0 +478 94 model.embedding_dim 0.0 +478 94 loss.margin 8.464475992608673 +478 94 loss.adversarial_temperature 0.11317233689259619 +478 94 negative_sampler.num_negs_per_pos 20.0 +478 94 training.batch_size 2.0 +478 95 model.embedding_dim 1.0 +478 95 loss.margin 4.003522573810439 +478 95 loss.adversarial_temperature 0.453962274456837 +478 95 negative_sampler.num_negs_per_pos 18.0 +478 95 training.batch_size 2.0 +478 96 model.embedding_dim 1.0 +478 96 loss.margin 5.323756660998312 +478 96 loss.adversarial_temperature 0.7386243144908907 +478 96 negative_sampler.num_negs_per_pos 84.0 +478 96 training.batch_size 2.0 +478 97 model.embedding_dim 0.0 +478 97 loss.margin 5.102400753380697 +478 97 loss.adversarial_temperature 0.5086816682490615 +478 97 negative_sampler.num_negs_per_pos 63.0 +478 97 training.batch_size 1.0 +478 98 model.embedding_dim 2.0 +478 98 loss.margin 15.013065400138844 +478 98 loss.adversarial_temperature 0.39872233262005263 +478 98 negative_sampler.num_negs_per_pos 54.0 +478 98 training.batch_size 0.0 +478 99 model.embedding_dim 1.0 +478 99 loss.margin 18.98043328171876 +478 99 loss.adversarial_temperature 0.8242762115847344 +478 99 negative_sampler.num_negs_per_pos 63.0 +478 99 training.batch_size 1.0 +478 100 model.embedding_dim 1.0 +478 100 loss.margin 26.73176734950616 +478 100 loss.adversarial_temperature 0.32682594350716454 +478 100 negative_sampler.num_negs_per_pos 73.0 +478 100 training.batch_size 2.0 +478 1 dataset """kinships""" +478 1 model """proje""" +478 1 loss """nssa""" +478 1 regularizer """no""" +478 1 optimizer """adadelta""" +478 1 training_loop """owa""" +478 1 negative_sampler """basic""" +478 1 evaluator """rankbased""" +478 2 dataset """kinships""" +478 2 model """proje""" +478 2 loss """nssa""" +478 2 regularizer """no""" +478 2 optimizer """adadelta""" +478 2 training_loop """owa""" +478 2 negative_sampler """basic""" +478 2 evaluator """rankbased""" +478 3 dataset """kinships""" +478 3 model """proje""" +478 3 loss """nssa""" +478 3 regularizer """no""" +478 3 optimizer """adadelta""" +478 3 training_loop """owa""" +478 3 negative_sampler """basic""" +478 3 evaluator """rankbased""" +478 4 dataset """kinships""" +478 4 model """proje""" +478 4 loss """nssa""" +478 4 regularizer """no""" +478 4 optimizer """adadelta""" +478 4 training_loop """owa""" +478 4 negative_sampler """basic""" +478 4 evaluator """rankbased""" +478 5 dataset """kinships""" +478 5 model """proje""" +478 5 loss """nssa""" +478 5 regularizer """no""" +478 5 optimizer """adadelta""" +478 5 training_loop """owa""" +478 5 negative_sampler """basic""" +478 5 evaluator """rankbased""" +478 6 dataset """kinships""" +478 6 model """proje""" +478 6 loss """nssa""" +478 6 regularizer """no""" +478 6 optimizer """adadelta""" +478 6 training_loop """owa""" +478 6 negative_sampler """basic""" +478 6 evaluator """rankbased""" +478 7 dataset """kinships""" +478 7 model """proje""" +478 7 loss """nssa""" +478 7 regularizer """no""" +478 7 optimizer """adadelta""" +478 7 training_loop """owa""" +478 7 negative_sampler """basic""" +478 7 evaluator """rankbased""" +478 8 dataset """kinships""" +478 8 model """proje""" +478 8 loss """nssa""" +478 8 regularizer """no""" +478 8 optimizer """adadelta""" +478 8 training_loop """owa""" +478 8 negative_sampler """basic""" +478 8 evaluator """rankbased""" +478 9 dataset """kinships""" +478 9 model """proje""" +478 9 loss """nssa""" +478 9 regularizer """no""" +478 9 optimizer """adadelta""" +478 9 training_loop """owa""" +478 9 negative_sampler """basic""" +478 9 evaluator """rankbased""" +478 10 dataset """kinships""" +478 10 model """proje""" +478 10 loss """nssa""" +478 10 regularizer """no""" +478 10 optimizer """adadelta""" +478 10 training_loop """owa""" +478 10 negative_sampler """basic""" +478 10 evaluator """rankbased""" +478 11 dataset """kinships""" +478 11 model """proje""" +478 11 loss """nssa""" +478 11 regularizer """no""" +478 11 optimizer """adadelta""" +478 11 training_loop """owa""" +478 11 negative_sampler """basic""" +478 11 evaluator """rankbased""" +478 12 dataset """kinships""" +478 12 model """proje""" +478 12 loss """nssa""" +478 12 regularizer """no""" +478 12 optimizer """adadelta""" +478 12 training_loop """owa""" +478 12 negative_sampler """basic""" +478 12 evaluator """rankbased""" +478 13 dataset """kinships""" +478 13 model """proje""" +478 13 loss """nssa""" +478 13 regularizer """no""" +478 13 optimizer """adadelta""" +478 13 training_loop """owa""" +478 13 negative_sampler """basic""" +478 13 evaluator """rankbased""" +478 14 dataset """kinships""" +478 14 model """proje""" +478 14 loss """nssa""" +478 14 regularizer """no""" +478 14 optimizer """adadelta""" +478 14 training_loop """owa""" +478 14 negative_sampler """basic""" +478 14 evaluator """rankbased""" +478 15 dataset """kinships""" +478 15 model """proje""" +478 15 loss """nssa""" +478 15 regularizer """no""" +478 15 optimizer """adadelta""" +478 15 training_loop """owa""" +478 15 negative_sampler """basic""" +478 15 evaluator """rankbased""" +478 16 dataset """kinships""" +478 16 model """proje""" +478 16 loss """nssa""" +478 16 regularizer """no""" +478 16 optimizer """adadelta""" +478 16 training_loop """owa""" +478 16 negative_sampler """basic""" +478 16 evaluator """rankbased""" +478 17 dataset """kinships""" +478 17 model """proje""" +478 17 loss """nssa""" +478 17 regularizer """no""" +478 17 optimizer """adadelta""" +478 17 training_loop """owa""" +478 17 negative_sampler """basic""" +478 17 evaluator """rankbased""" +478 18 dataset """kinships""" +478 18 model """proje""" +478 18 loss """nssa""" +478 18 regularizer """no""" +478 18 optimizer """adadelta""" +478 18 training_loop """owa""" +478 18 negative_sampler """basic""" +478 18 evaluator """rankbased""" +478 19 dataset """kinships""" +478 19 model """proje""" +478 19 loss """nssa""" +478 19 regularizer """no""" +478 19 optimizer """adadelta""" +478 19 training_loop """owa""" +478 19 negative_sampler """basic""" +478 19 evaluator """rankbased""" +478 20 dataset """kinships""" +478 20 model """proje""" +478 20 loss """nssa""" +478 20 regularizer """no""" +478 20 optimizer """adadelta""" +478 20 training_loop """owa""" +478 20 negative_sampler """basic""" +478 20 evaluator """rankbased""" +478 21 dataset """kinships""" +478 21 model """proje""" +478 21 loss """nssa""" +478 21 regularizer """no""" +478 21 optimizer """adadelta""" +478 21 training_loop """owa""" +478 21 negative_sampler """basic""" +478 21 evaluator """rankbased""" +478 22 dataset """kinships""" +478 22 model """proje""" +478 22 loss """nssa""" +478 22 regularizer """no""" +478 22 optimizer """adadelta""" +478 22 training_loop """owa""" +478 22 negative_sampler """basic""" +478 22 evaluator """rankbased""" +478 23 dataset """kinships""" +478 23 model """proje""" +478 23 loss """nssa""" +478 23 regularizer """no""" +478 23 optimizer """adadelta""" +478 23 training_loop """owa""" +478 23 negative_sampler """basic""" +478 23 evaluator """rankbased""" +478 24 dataset """kinships""" +478 24 model """proje""" +478 24 loss """nssa""" +478 24 regularizer """no""" +478 24 optimizer """adadelta""" +478 24 training_loop """owa""" +478 24 negative_sampler """basic""" +478 24 evaluator """rankbased""" +478 25 dataset """kinships""" +478 25 model """proje""" +478 25 loss """nssa""" +478 25 regularizer """no""" +478 25 optimizer """adadelta""" +478 25 training_loop """owa""" +478 25 negative_sampler """basic""" +478 25 evaluator """rankbased""" +478 26 dataset """kinships""" +478 26 model """proje""" +478 26 loss """nssa""" +478 26 regularizer """no""" +478 26 optimizer """adadelta""" +478 26 training_loop """owa""" +478 26 negative_sampler """basic""" +478 26 evaluator """rankbased""" +478 27 dataset """kinships""" +478 27 model """proje""" +478 27 loss """nssa""" +478 27 regularizer """no""" +478 27 optimizer """adadelta""" +478 27 training_loop """owa""" +478 27 negative_sampler """basic""" +478 27 evaluator """rankbased""" +478 28 dataset """kinships""" +478 28 model """proje""" +478 28 loss """nssa""" +478 28 regularizer """no""" +478 28 optimizer """adadelta""" +478 28 training_loop """owa""" +478 28 negative_sampler """basic""" +478 28 evaluator """rankbased""" +478 29 dataset """kinships""" +478 29 model """proje""" +478 29 loss """nssa""" +478 29 regularizer """no""" +478 29 optimizer """adadelta""" +478 29 training_loop """owa""" +478 29 negative_sampler """basic""" +478 29 evaluator """rankbased""" +478 30 dataset """kinships""" +478 30 model """proje""" +478 30 loss """nssa""" +478 30 regularizer """no""" +478 30 optimizer """adadelta""" +478 30 training_loop """owa""" +478 30 negative_sampler """basic""" +478 30 evaluator """rankbased""" +478 31 dataset """kinships""" +478 31 model """proje""" +478 31 loss """nssa""" +478 31 regularizer """no""" +478 31 optimizer """adadelta""" +478 31 training_loop """owa""" +478 31 negative_sampler """basic""" +478 31 evaluator """rankbased""" +478 32 dataset """kinships""" +478 32 model """proje""" +478 32 loss """nssa""" +478 32 regularizer """no""" +478 32 optimizer """adadelta""" +478 32 training_loop """owa""" +478 32 negative_sampler """basic""" +478 32 evaluator """rankbased""" +478 33 dataset """kinships""" +478 33 model """proje""" +478 33 loss """nssa""" +478 33 regularizer """no""" +478 33 optimizer """adadelta""" +478 33 training_loop """owa""" +478 33 negative_sampler """basic""" +478 33 evaluator """rankbased""" +478 34 dataset """kinships""" +478 34 model """proje""" +478 34 loss """nssa""" +478 34 regularizer """no""" +478 34 optimizer """adadelta""" +478 34 training_loop """owa""" +478 34 negative_sampler """basic""" +478 34 evaluator """rankbased""" +478 35 dataset """kinships""" +478 35 model """proje""" +478 35 loss """nssa""" +478 35 regularizer """no""" +478 35 optimizer """adadelta""" +478 35 training_loop """owa""" +478 35 negative_sampler """basic""" +478 35 evaluator """rankbased""" +478 36 dataset """kinships""" +478 36 model """proje""" +478 36 loss """nssa""" +478 36 regularizer """no""" +478 36 optimizer """adadelta""" +478 36 training_loop """owa""" +478 36 negative_sampler """basic""" +478 36 evaluator """rankbased""" +478 37 dataset """kinships""" +478 37 model """proje""" +478 37 loss """nssa""" +478 37 regularizer """no""" +478 37 optimizer """adadelta""" +478 37 training_loop """owa""" +478 37 negative_sampler """basic""" +478 37 evaluator """rankbased""" +478 38 dataset """kinships""" +478 38 model """proje""" +478 38 loss """nssa""" +478 38 regularizer """no""" +478 38 optimizer """adadelta""" +478 38 training_loop """owa""" +478 38 negative_sampler """basic""" +478 38 evaluator """rankbased""" +478 39 dataset """kinships""" +478 39 model """proje""" +478 39 loss """nssa""" +478 39 regularizer """no""" +478 39 optimizer """adadelta""" +478 39 training_loop """owa""" +478 39 negative_sampler """basic""" +478 39 evaluator """rankbased""" +478 40 dataset """kinships""" +478 40 model """proje""" +478 40 loss """nssa""" +478 40 regularizer """no""" +478 40 optimizer """adadelta""" +478 40 training_loop """owa""" +478 40 negative_sampler """basic""" +478 40 evaluator """rankbased""" +478 41 dataset """kinships""" +478 41 model """proje""" +478 41 loss """nssa""" +478 41 regularizer """no""" +478 41 optimizer """adadelta""" +478 41 training_loop """owa""" +478 41 negative_sampler """basic""" +478 41 evaluator """rankbased""" +478 42 dataset """kinships""" +478 42 model """proje""" +478 42 loss """nssa""" +478 42 regularizer """no""" +478 42 optimizer """adadelta""" +478 42 training_loop """owa""" +478 42 negative_sampler """basic""" +478 42 evaluator """rankbased""" +478 43 dataset """kinships""" +478 43 model """proje""" +478 43 loss """nssa""" +478 43 regularizer """no""" +478 43 optimizer """adadelta""" +478 43 training_loop """owa""" +478 43 negative_sampler """basic""" +478 43 evaluator """rankbased""" +478 44 dataset """kinships""" +478 44 model """proje""" +478 44 loss """nssa""" +478 44 regularizer """no""" +478 44 optimizer """adadelta""" +478 44 training_loop """owa""" +478 44 negative_sampler """basic""" +478 44 evaluator """rankbased""" +478 45 dataset """kinships""" +478 45 model """proje""" +478 45 loss """nssa""" +478 45 regularizer """no""" +478 45 optimizer """adadelta""" +478 45 training_loop """owa""" +478 45 negative_sampler """basic""" +478 45 evaluator """rankbased""" +478 46 dataset """kinships""" +478 46 model """proje""" +478 46 loss """nssa""" +478 46 regularizer """no""" +478 46 optimizer """adadelta""" +478 46 training_loop """owa""" +478 46 negative_sampler """basic""" +478 46 evaluator """rankbased""" +478 47 dataset """kinships""" +478 47 model """proje""" +478 47 loss """nssa""" +478 47 regularizer """no""" +478 47 optimizer """adadelta""" +478 47 training_loop """owa""" +478 47 negative_sampler """basic""" +478 47 evaluator """rankbased""" +478 48 dataset """kinships""" +478 48 model """proje""" +478 48 loss """nssa""" +478 48 regularizer """no""" +478 48 optimizer """adadelta""" +478 48 training_loop """owa""" +478 48 negative_sampler """basic""" +478 48 evaluator """rankbased""" +478 49 dataset """kinships""" +478 49 model """proje""" +478 49 loss """nssa""" +478 49 regularizer """no""" +478 49 optimizer """adadelta""" +478 49 training_loop """owa""" +478 49 negative_sampler """basic""" +478 49 evaluator """rankbased""" +478 50 dataset """kinships""" +478 50 model """proje""" +478 50 loss """nssa""" +478 50 regularizer """no""" +478 50 optimizer """adadelta""" +478 50 training_loop """owa""" +478 50 negative_sampler """basic""" +478 50 evaluator """rankbased""" +478 51 dataset """kinships""" +478 51 model """proje""" +478 51 loss """nssa""" +478 51 regularizer """no""" +478 51 optimizer """adadelta""" +478 51 training_loop """owa""" +478 51 negative_sampler """basic""" +478 51 evaluator """rankbased""" +478 52 dataset """kinships""" +478 52 model """proje""" +478 52 loss """nssa""" +478 52 regularizer """no""" +478 52 optimizer """adadelta""" +478 52 training_loop """owa""" +478 52 negative_sampler """basic""" +478 52 evaluator """rankbased""" +478 53 dataset """kinships""" +478 53 model """proje""" +478 53 loss """nssa""" +478 53 regularizer """no""" +478 53 optimizer """adadelta""" +478 53 training_loop """owa""" +478 53 negative_sampler """basic""" +478 53 evaluator """rankbased""" +478 54 dataset """kinships""" +478 54 model """proje""" +478 54 loss """nssa""" +478 54 regularizer """no""" +478 54 optimizer """adadelta""" +478 54 training_loop """owa""" +478 54 negative_sampler """basic""" +478 54 evaluator """rankbased""" +478 55 dataset """kinships""" +478 55 model """proje""" +478 55 loss """nssa""" +478 55 regularizer """no""" +478 55 optimizer """adadelta""" +478 55 training_loop """owa""" +478 55 negative_sampler """basic""" +478 55 evaluator """rankbased""" +478 56 dataset """kinships""" +478 56 model """proje""" +478 56 loss """nssa""" +478 56 regularizer """no""" +478 56 optimizer """adadelta""" +478 56 training_loop """owa""" +478 56 negative_sampler """basic""" +478 56 evaluator """rankbased""" +478 57 dataset """kinships""" +478 57 model """proje""" +478 57 loss """nssa""" +478 57 regularizer """no""" +478 57 optimizer """adadelta""" +478 57 training_loop """owa""" +478 57 negative_sampler """basic""" +478 57 evaluator """rankbased""" +478 58 dataset """kinships""" +478 58 model """proje""" +478 58 loss """nssa""" +478 58 regularizer """no""" +478 58 optimizer """adadelta""" +478 58 training_loop """owa""" +478 58 negative_sampler """basic""" +478 58 evaluator """rankbased""" +478 59 dataset """kinships""" +478 59 model """proje""" +478 59 loss """nssa""" +478 59 regularizer """no""" +478 59 optimizer """adadelta""" +478 59 training_loop """owa""" +478 59 negative_sampler """basic""" +478 59 evaluator """rankbased""" +478 60 dataset """kinships""" +478 60 model """proje""" +478 60 loss """nssa""" +478 60 regularizer """no""" +478 60 optimizer """adadelta""" +478 60 training_loop """owa""" +478 60 negative_sampler """basic""" +478 60 evaluator """rankbased""" +478 61 dataset """kinships""" +478 61 model """proje""" +478 61 loss """nssa""" +478 61 regularizer """no""" +478 61 optimizer """adadelta""" +478 61 training_loop """owa""" +478 61 negative_sampler """basic""" +478 61 evaluator """rankbased""" +478 62 dataset """kinships""" +478 62 model """proje""" +478 62 loss """nssa""" +478 62 regularizer """no""" +478 62 optimizer """adadelta""" +478 62 training_loop """owa""" +478 62 negative_sampler """basic""" +478 62 evaluator """rankbased""" +478 63 dataset """kinships""" +478 63 model """proje""" +478 63 loss """nssa""" +478 63 regularizer """no""" +478 63 optimizer """adadelta""" +478 63 training_loop """owa""" +478 63 negative_sampler """basic""" +478 63 evaluator """rankbased""" +478 64 dataset """kinships""" +478 64 model """proje""" +478 64 loss """nssa""" +478 64 regularizer """no""" +478 64 optimizer """adadelta""" +478 64 training_loop """owa""" +478 64 negative_sampler """basic""" +478 64 evaluator """rankbased""" +478 65 dataset """kinships""" +478 65 model """proje""" +478 65 loss """nssa""" +478 65 regularizer """no""" +478 65 optimizer """adadelta""" +478 65 training_loop """owa""" +478 65 negative_sampler """basic""" +478 65 evaluator """rankbased""" +478 66 dataset """kinships""" +478 66 model """proje""" +478 66 loss """nssa""" +478 66 regularizer """no""" +478 66 optimizer """adadelta""" +478 66 training_loop """owa""" +478 66 negative_sampler """basic""" +478 66 evaluator """rankbased""" +478 67 dataset """kinships""" +478 67 model """proje""" +478 67 loss """nssa""" +478 67 regularizer """no""" +478 67 optimizer """adadelta""" +478 67 training_loop """owa""" +478 67 negative_sampler """basic""" +478 67 evaluator """rankbased""" +478 68 dataset """kinships""" +478 68 model """proje""" +478 68 loss """nssa""" +478 68 regularizer """no""" +478 68 optimizer """adadelta""" +478 68 training_loop """owa""" +478 68 negative_sampler """basic""" +478 68 evaluator """rankbased""" +478 69 dataset """kinships""" +478 69 model """proje""" +478 69 loss """nssa""" +478 69 regularizer """no""" +478 69 optimizer """adadelta""" +478 69 training_loop """owa""" +478 69 negative_sampler """basic""" +478 69 evaluator """rankbased""" +478 70 dataset """kinships""" +478 70 model """proje""" +478 70 loss """nssa""" +478 70 regularizer """no""" +478 70 optimizer """adadelta""" +478 70 training_loop """owa""" +478 70 negative_sampler """basic""" +478 70 evaluator """rankbased""" +478 71 dataset """kinships""" +478 71 model """proje""" +478 71 loss """nssa""" +478 71 regularizer """no""" +478 71 optimizer """adadelta""" +478 71 training_loop """owa""" +478 71 negative_sampler """basic""" +478 71 evaluator """rankbased""" +478 72 dataset """kinships""" +478 72 model """proje""" +478 72 loss """nssa""" +478 72 regularizer """no""" +478 72 optimizer """adadelta""" +478 72 training_loop """owa""" +478 72 negative_sampler """basic""" +478 72 evaluator """rankbased""" +478 73 dataset """kinships""" +478 73 model """proje""" +478 73 loss """nssa""" +478 73 regularizer """no""" +478 73 optimizer """adadelta""" +478 73 training_loop """owa""" +478 73 negative_sampler """basic""" +478 73 evaluator """rankbased""" +478 74 dataset """kinships""" +478 74 model """proje""" +478 74 loss """nssa""" +478 74 regularizer """no""" +478 74 optimizer """adadelta""" +478 74 training_loop """owa""" +478 74 negative_sampler """basic""" +478 74 evaluator """rankbased""" +478 75 dataset """kinships""" +478 75 model """proje""" +478 75 loss """nssa""" +478 75 regularizer """no""" +478 75 optimizer """adadelta""" +478 75 training_loop """owa""" +478 75 negative_sampler """basic""" +478 75 evaluator """rankbased""" +478 76 dataset """kinships""" +478 76 model """proje""" +478 76 loss """nssa""" +478 76 regularizer """no""" +478 76 optimizer """adadelta""" +478 76 training_loop """owa""" +478 76 negative_sampler """basic""" +478 76 evaluator """rankbased""" +478 77 dataset """kinships""" +478 77 model """proje""" +478 77 loss """nssa""" +478 77 regularizer """no""" +478 77 optimizer """adadelta""" +478 77 training_loop """owa""" +478 77 negative_sampler """basic""" +478 77 evaluator """rankbased""" +478 78 dataset """kinships""" +478 78 model """proje""" +478 78 loss """nssa""" +478 78 regularizer """no""" +478 78 optimizer """adadelta""" +478 78 training_loop """owa""" +478 78 negative_sampler """basic""" +478 78 evaluator """rankbased""" +478 79 dataset """kinships""" +478 79 model """proje""" +478 79 loss """nssa""" +478 79 regularizer """no""" +478 79 optimizer """adadelta""" +478 79 training_loop """owa""" +478 79 negative_sampler """basic""" +478 79 evaluator """rankbased""" +478 80 dataset """kinships""" +478 80 model """proje""" +478 80 loss """nssa""" +478 80 regularizer """no""" +478 80 optimizer """adadelta""" +478 80 training_loop """owa""" +478 80 negative_sampler """basic""" +478 80 evaluator """rankbased""" +478 81 dataset """kinships""" +478 81 model """proje""" +478 81 loss """nssa""" +478 81 regularizer """no""" +478 81 optimizer """adadelta""" +478 81 training_loop """owa""" +478 81 negative_sampler """basic""" +478 81 evaluator """rankbased""" +478 82 dataset """kinships""" +478 82 model """proje""" +478 82 loss """nssa""" +478 82 regularizer """no""" +478 82 optimizer """adadelta""" +478 82 training_loop """owa""" +478 82 negative_sampler """basic""" +478 82 evaluator """rankbased""" +478 83 dataset """kinships""" +478 83 model """proje""" +478 83 loss """nssa""" +478 83 regularizer """no""" +478 83 optimizer """adadelta""" +478 83 training_loop """owa""" +478 83 negative_sampler """basic""" +478 83 evaluator """rankbased""" +478 84 dataset """kinships""" +478 84 model """proje""" +478 84 loss """nssa""" +478 84 regularizer """no""" +478 84 optimizer """adadelta""" +478 84 training_loop """owa""" +478 84 negative_sampler """basic""" +478 84 evaluator """rankbased""" +478 85 dataset """kinships""" +478 85 model """proje""" +478 85 loss """nssa""" +478 85 regularizer """no""" +478 85 optimizer """adadelta""" +478 85 training_loop """owa""" +478 85 negative_sampler """basic""" +478 85 evaluator """rankbased""" +478 86 dataset """kinships""" +478 86 model """proje""" +478 86 loss """nssa""" +478 86 regularizer """no""" +478 86 optimizer """adadelta""" +478 86 training_loop """owa""" +478 86 negative_sampler """basic""" +478 86 evaluator """rankbased""" +478 87 dataset """kinships""" +478 87 model """proje""" +478 87 loss """nssa""" +478 87 regularizer """no""" +478 87 optimizer """adadelta""" +478 87 training_loop """owa""" +478 87 negative_sampler """basic""" +478 87 evaluator """rankbased""" +478 88 dataset """kinships""" +478 88 model """proje""" +478 88 loss """nssa""" +478 88 regularizer """no""" +478 88 optimizer """adadelta""" +478 88 training_loop """owa""" +478 88 negative_sampler """basic""" +478 88 evaluator """rankbased""" +478 89 dataset """kinships""" +478 89 model """proje""" +478 89 loss """nssa""" +478 89 regularizer """no""" +478 89 optimizer """adadelta""" +478 89 training_loop """owa""" +478 89 negative_sampler """basic""" +478 89 evaluator """rankbased""" +478 90 dataset """kinships""" +478 90 model """proje""" +478 90 loss """nssa""" +478 90 regularizer """no""" +478 90 optimizer """adadelta""" +478 90 training_loop """owa""" +478 90 negative_sampler """basic""" +478 90 evaluator """rankbased""" +478 91 dataset """kinships""" +478 91 model """proje""" +478 91 loss """nssa""" +478 91 regularizer """no""" +478 91 optimizer """adadelta""" +478 91 training_loop """owa""" +478 91 negative_sampler """basic""" +478 91 evaluator """rankbased""" +478 92 dataset """kinships""" +478 92 model """proje""" +478 92 loss """nssa""" +478 92 regularizer """no""" +478 92 optimizer """adadelta""" +478 92 training_loop """owa""" +478 92 negative_sampler """basic""" +478 92 evaluator """rankbased""" +478 93 dataset """kinships""" +478 93 model """proje""" +478 93 loss """nssa""" +478 93 regularizer """no""" +478 93 optimizer """adadelta""" +478 93 training_loop """owa""" +478 93 negative_sampler """basic""" +478 93 evaluator """rankbased""" +478 94 dataset """kinships""" +478 94 model """proje""" +478 94 loss """nssa""" +478 94 regularizer """no""" +478 94 optimizer """adadelta""" +478 94 training_loop """owa""" +478 94 negative_sampler """basic""" +478 94 evaluator """rankbased""" +478 95 dataset """kinships""" +478 95 model """proje""" +478 95 loss """nssa""" +478 95 regularizer """no""" +478 95 optimizer """adadelta""" +478 95 training_loop """owa""" +478 95 negative_sampler """basic""" +478 95 evaluator """rankbased""" +478 96 dataset """kinships""" +478 96 model """proje""" +478 96 loss """nssa""" +478 96 regularizer """no""" +478 96 optimizer """adadelta""" +478 96 training_loop """owa""" +478 96 negative_sampler """basic""" +478 96 evaluator """rankbased""" +478 97 dataset """kinships""" +478 97 model """proje""" +478 97 loss """nssa""" +478 97 regularizer """no""" +478 97 optimizer """adadelta""" +478 97 training_loop """owa""" +478 97 negative_sampler """basic""" +478 97 evaluator """rankbased""" +478 98 dataset """kinships""" +478 98 model """proje""" +478 98 loss """nssa""" +478 98 regularizer """no""" +478 98 optimizer """adadelta""" +478 98 training_loop """owa""" +478 98 negative_sampler """basic""" +478 98 evaluator """rankbased""" +478 99 dataset """kinships""" +478 99 model """proje""" +478 99 loss """nssa""" +478 99 regularizer """no""" +478 99 optimizer """adadelta""" +478 99 training_loop """owa""" +478 99 negative_sampler """basic""" +478 99 evaluator """rankbased""" +478 100 dataset """kinships""" +478 100 model """proje""" +478 100 loss """nssa""" +478 100 regularizer """no""" +478 100 optimizer """adadelta""" +478 100 training_loop """owa""" +478 100 negative_sampler """basic""" +478 100 evaluator """rankbased""" +479 1 model.embedding_dim 0.0 +479 1 optimizer.lr 0.010155057720871703 +479 1 training.batch_size 2.0 +479 1 training.label_smoothing 0.006498830557496166 +479 2 model.embedding_dim 0.0 +479 2 optimizer.lr 0.020358541114955712 +479 2 training.batch_size 0.0 +479 2 training.label_smoothing 0.10764748447921979 +479 3 model.embedding_dim 0.0 +479 3 optimizer.lr 0.010913683123285887 +479 3 training.batch_size 1.0 +479 3 training.label_smoothing 0.2873026872595866 +479 4 model.embedding_dim 2.0 +479 4 optimizer.lr 0.0017193694142314396 +479 4 training.batch_size 0.0 +479 4 training.label_smoothing 0.0026499064389122304 +479 5 model.embedding_dim 2.0 +479 5 optimizer.lr 0.0015992527912359067 +479 5 training.batch_size 1.0 +479 5 training.label_smoothing 0.05973153816855227 +479 6 model.embedding_dim 1.0 +479 6 optimizer.lr 0.0013952555481559542 +479 6 training.batch_size 0.0 +479 6 training.label_smoothing 0.43639150553626976 +479 7 model.embedding_dim 1.0 +479 7 optimizer.lr 0.010319983728403547 +479 7 training.batch_size 2.0 +479 7 training.label_smoothing 0.29097699246479936 +479 8 model.embedding_dim 0.0 +479 8 optimizer.lr 0.01362151852152823 +479 8 training.batch_size 0.0 +479 8 training.label_smoothing 0.4820744100893422 +479 9 model.embedding_dim 2.0 +479 9 optimizer.lr 0.015255456804989864 +479 9 training.batch_size 2.0 +479 9 training.label_smoothing 0.004525398655650455 +479 10 model.embedding_dim 0.0 +479 10 optimizer.lr 0.0022204359177676065 +479 10 training.batch_size 1.0 +479 10 training.label_smoothing 0.00234306133564899 +479 11 model.embedding_dim 2.0 +479 11 optimizer.lr 0.00681316945042849 +479 11 training.batch_size 2.0 +479 11 training.label_smoothing 0.6762351087777133 +479 12 model.embedding_dim 2.0 +479 12 optimizer.lr 0.003109867705694562 +479 12 training.batch_size 1.0 +479 12 training.label_smoothing 0.21928497111248188 +479 13 model.embedding_dim 1.0 +479 13 optimizer.lr 0.0012840342692937148 +479 13 training.batch_size 1.0 +479 13 training.label_smoothing 0.03325508389895047 +479 14 model.embedding_dim 2.0 +479 14 optimizer.lr 0.0020785052029927012 +479 14 training.batch_size 0.0 +479 14 training.label_smoothing 0.07181145233045165 +479 15 model.embedding_dim 0.0 +479 15 optimizer.lr 0.031920264212011315 +479 15 training.batch_size 0.0 +479 15 training.label_smoothing 0.36033929021045213 +479 16 model.embedding_dim 0.0 +479 16 optimizer.lr 0.0193581940511438 +479 16 training.batch_size 0.0 +479 16 training.label_smoothing 0.004136846535143857 +479 17 model.embedding_dim 0.0 +479 17 optimizer.lr 0.018680441609642058 +479 17 training.batch_size 2.0 +479 17 training.label_smoothing 0.07337628078834417 +479 18 model.embedding_dim 0.0 +479 18 optimizer.lr 0.00458125265867227 +479 18 training.batch_size 1.0 +479 18 training.label_smoothing 0.002708918429807844 +479 19 model.embedding_dim 0.0 +479 19 optimizer.lr 0.011815301668328225 +479 19 training.batch_size 2.0 +479 19 training.label_smoothing 0.23499502808032433 +479 20 model.embedding_dim 2.0 +479 20 optimizer.lr 0.007861172417121533 +479 20 training.batch_size 0.0 +479 20 training.label_smoothing 0.5552819761977164 +479 21 model.embedding_dim 0.0 +479 21 optimizer.lr 0.005829518394958458 +479 21 training.batch_size 0.0 +479 21 training.label_smoothing 0.0011430646978865878 +479 22 model.embedding_dim 2.0 +479 22 optimizer.lr 0.00565452247400801 +479 22 training.batch_size 0.0 +479 22 training.label_smoothing 0.06084895684499635 +479 23 model.embedding_dim 0.0 +479 23 optimizer.lr 0.028009965649144763 +479 23 training.batch_size 1.0 +479 23 training.label_smoothing 0.042061249878765736 +479 24 model.embedding_dim 1.0 +479 24 optimizer.lr 0.02575085193758923 +479 24 training.batch_size 2.0 +479 24 training.label_smoothing 0.06604816215169078 +479 25 model.embedding_dim 2.0 +479 25 optimizer.lr 0.053333219880904746 +479 25 training.batch_size 1.0 +479 25 training.label_smoothing 0.3924252851841737 +479 26 model.embedding_dim 0.0 +479 26 optimizer.lr 0.0030786157451702994 +479 26 training.batch_size 2.0 +479 26 training.label_smoothing 0.0026577289685160315 +479 27 model.embedding_dim 0.0 +479 27 optimizer.lr 0.003385685459248607 +479 27 training.batch_size 2.0 +479 27 training.label_smoothing 0.20526299692175817 +479 28 model.embedding_dim 1.0 +479 28 optimizer.lr 0.0037557954735746506 +479 28 training.batch_size 1.0 +479 28 training.label_smoothing 0.009967783092425376 +479 29 model.embedding_dim 2.0 +479 29 optimizer.lr 0.0016317527644361272 +479 29 training.batch_size 1.0 +479 29 training.label_smoothing 0.024019194863873737 +479 30 model.embedding_dim 2.0 +479 30 optimizer.lr 0.002285235431997954 +479 30 training.batch_size 1.0 +479 30 training.label_smoothing 0.07688324238824608 +479 31 model.embedding_dim 2.0 +479 31 optimizer.lr 0.0010333372733650579 +479 31 training.batch_size 0.0 +479 31 training.label_smoothing 0.21461267031694337 +479 32 model.embedding_dim 2.0 +479 32 optimizer.lr 0.0129581589504491 +479 32 training.batch_size 1.0 +479 32 training.label_smoothing 0.009336335272435856 +479 33 model.embedding_dim 0.0 +479 33 optimizer.lr 0.0032864777950434455 +479 33 training.batch_size 0.0 +479 33 training.label_smoothing 0.004890402302342416 +479 34 model.embedding_dim 0.0 +479 34 optimizer.lr 0.0061842743459119215 +479 34 training.batch_size 2.0 +479 34 training.label_smoothing 0.10390170698490439 +479 35 model.embedding_dim 1.0 +479 35 optimizer.lr 0.06412620243316566 +479 35 training.batch_size 2.0 +479 35 training.label_smoothing 0.11729036458282774 +479 36 model.embedding_dim 2.0 +479 36 optimizer.lr 0.06978387715452128 +479 36 training.batch_size 2.0 +479 36 training.label_smoothing 0.7149330973452275 +479 37 model.embedding_dim 1.0 +479 37 optimizer.lr 0.017233280513964024 +479 37 training.batch_size 2.0 +479 37 training.label_smoothing 0.7321500366384064 +479 38 model.embedding_dim 0.0 +479 38 optimizer.lr 0.031537732766597354 +479 38 training.batch_size 1.0 +479 38 training.label_smoothing 0.006197452143446514 +479 39 model.embedding_dim 0.0 +479 39 optimizer.lr 0.011193012351171124 +479 39 training.batch_size 1.0 +479 39 training.label_smoothing 0.44473675582876404 +479 40 model.embedding_dim 2.0 +479 40 optimizer.lr 0.007631318891024679 +479 40 training.batch_size 2.0 +479 40 training.label_smoothing 0.03052643164152715 +479 41 model.embedding_dim 0.0 +479 41 optimizer.lr 0.043880442945905304 +479 41 training.batch_size 0.0 +479 41 training.label_smoothing 0.004047439784595926 +479 42 model.embedding_dim 0.0 +479 42 optimizer.lr 0.0018394422236730323 +479 42 training.batch_size 2.0 +479 42 training.label_smoothing 0.011892696070384958 +479 43 model.embedding_dim 1.0 +479 43 optimizer.lr 0.0016298201665884615 +479 43 training.batch_size 0.0 +479 43 training.label_smoothing 0.11996375819485337 +479 44 model.embedding_dim 2.0 +479 44 optimizer.lr 0.02228498883038592 +479 44 training.batch_size 1.0 +479 44 training.label_smoothing 0.0029570651898945853 +479 45 model.embedding_dim 2.0 +479 45 optimizer.lr 0.001863355124243472 +479 45 training.batch_size 1.0 +479 45 training.label_smoothing 0.7612448059041947 +479 46 model.embedding_dim 0.0 +479 46 optimizer.lr 0.06102884683983521 +479 46 training.batch_size 2.0 +479 46 training.label_smoothing 0.010633932494702816 +479 47 model.embedding_dim 0.0 +479 47 optimizer.lr 0.006812555829988259 +479 47 training.batch_size 0.0 +479 47 training.label_smoothing 0.002636958813069625 +479 48 model.embedding_dim 0.0 +479 48 optimizer.lr 0.05515434991158439 +479 48 training.batch_size 1.0 +479 48 training.label_smoothing 0.002327090538485713 +479 49 model.embedding_dim 0.0 +479 49 optimizer.lr 0.08593795470332528 +479 49 training.batch_size 2.0 +479 49 training.label_smoothing 0.004704696115790298 +479 50 model.embedding_dim 2.0 +479 50 optimizer.lr 0.06470740431150515 +479 50 training.batch_size 0.0 +479 50 training.label_smoothing 0.12534047801737289 +479 51 model.embedding_dim 0.0 +479 51 optimizer.lr 0.0022613017579084867 +479 51 training.batch_size 0.0 +479 51 training.label_smoothing 0.02456415146176681 +479 52 model.embedding_dim 0.0 +479 52 optimizer.lr 0.04510914922457799 +479 52 training.batch_size 2.0 +479 52 training.label_smoothing 0.1295769119697301 +479 53 model.embedding_dim 2.0 +479 53 optimizer.lr 0.006640972831736437 +479 53 training.batch_size 0.0 +479 53 training.label_smoothing 0.5280779270691327 +479 54 model.embedding_dim 2.0 +479 54 optimizer.lr 0.00892366836519538 +479 54 training.batch_size 1.0 +479 54 training.label_smoothing 0.002850927767304824 +479 55 model.embedding_dim 1.0 +479 55 optimizer.lr 0.0014363450586977365 +479 55 training.batch_size 1.0 +479 55 training.label_smoothing 0.2642113371541475 +479 56 model.embedding_dim 2.0 +479 56 optimizer.lr 0.003565575709965883 +479 56 training.batch_size 0.0 +479 56 training.label_smoothing 0.10248380662216547 +479 57 model.embedding_dim 1.0 +479 57 optimizer.lr 0.021755566173150633 +479 57 training.batch_size 0.0 +479 57 training.label_smoothing 0.003613933021672566 +479 58 model.embedding_dim 1.0 +479 58 optimizer.lr 0.009369338469226673 +479 58 training.batch_size 2.0 +479 58 training.label_smoothing 0.017515511331147274 +479 59 model.embedding_dim 1.0 +479 59 optimizer.lr 0.06663386426794973 +479 59 training.batch_size 2.0 +479 59 training.label_smoothing 0.002659833899296864 +479 60 model.embedding_dim 0.0 +479 60 optimizer.lr 0.02837325173686705 +479 60 training.batch_size 2.0 +479 60 training.label_smoothing 0.011309462197378396 +479 61 model.embedding_dim 0.0 +479 61 optimizer.lr 0.024971165378634103 +479 61 training.batch_size 0.0 +479 61 training.label_smoothing 0.005372968200400626 +479 62 model.embedding_dim 0.0 +479 62 optimizer.lr 0.06183595111565171 +479 62 training.batch_size 2.0 +479 62 training.label_smoothing 0.02424159977818139 +479 63 model.embedding_dim 2.0 +479 63 optimizer.lr 0.0048540729777344725 +479 63 training.batch_size 2.0 +479 63 training.label_smoothing 0.24479748804647106 +479 64 model.embedding_dim 2.0 +479 64 optimizer.lr 0.008086640924321051 +479 64 training.batch_size 0.0 +479 64 training.label_smoothing 0.01972682110183078 +479 65 model.embedding_dim 1.0 +479 65 optimizer.lr 0.05775364000682933 +479 65 training.batch_size 2.0 +479 65 training.label_smoothing 0.04702402411595751 +479 66 model.embedding_dim 0.0 +479 66 optimizer.lr 0.04308338754696798 +479 66 training.batch_size 0.0 +479 66 training.label_smoothing 0.008595970429121856 +479 67 model.embedding_dim 2.0 +479 67 optimizer.lr 0.008725837008695291 +479 67 training.batch_size 2.0 +479 67 training.label_smoothing 0.007790985018984891 +479 68 model.embedding_dim 2.0 +479 68 optimizer.lr 0.011331356185343233 +479 68 training.batch_size 0.0 +479 68 training.label_smoothing 0.0019013387331236606 +479 69 model.embedding_dim 2.0 +479 69 optimizer.lr 0.0046611809609989245 +479 69 training.batch_size 0.0 +479 69 training.label_smoothing 0.0014168596394735532 +479 70 model.embedding_dim 0.0 +479 70 optimizer.lr 0.02435577172489471 +479 70 training.batch_size 0.0 +479 70 training.label_smoothing 0.25373346973523697 +479 71 model.embedding_dim 0.0 +479 71 optimizer.lr 0.09012037936184664 +479 71 training.batch_size 0.0 +479 71 training.label_smoothing 0.7506036958104537 +479 72 model.embedding_dim 0.0 +479 72 optimizer.lr 0.02272550631980747 +479 72 training.batch_size 0.0 +479 72 training.label_smoothing 0.09265808739414694 +479 73 model.embedding_dim 1.0 +479 73 optimizer.lr 0.04159035878094327 +479 73 training.batch_size 1.0 +479 73 training.label_smoothing 0.7602044493725192 +479 74 model.embedding_dim 2.0 +479 74 optimizer.lr 0.003558426131622486 +479 74 training.batch_size 2.0 +479 74 training.label_smoothing 0.013224008311389748 +479 75 model.embedding_dim 0.0 +479 75 optimizer.lr 0.04586714987385195 +479 75 training.batch_size 1.0 +479 75 training.label_smoothing 0.0027860941501953018 +479 76 model.embedding_dim 1.0 +479 76 optimizer.lr 0.02448335358196575 +479 76 training.batch_size 2.0 +479 76 training.label_smoothing 0.01782656419175767 +479 77 model.embedding_dim 0.0 +479 77 optimizer.lr 0.027962742284573232 +479 77 training.batch_size 1.0 +479 77 training.label_smoothing 0.04271510996399457 +479 78 model.embedding_dim 2.0 +479 78 optimizer.lr 0.010555836269642899 +479 78 training.batch_size 0.0 +479 78 training.label_smoothing 0.0028643732735195797 +479 79 model.embedding_dim 0.0 +479 79 optimizer.lr 0.009685346268144704 +479 79 training.batch_size 1.0 +479 79 training.label_smoothing 0.46568185318900684 +479 80 model.embedding_dim 0.0 +479 80 optimizer.lr 0.002177384351276997 +479 80 training.batch_size 2.0 +479 80 training.label_smoothing 0.404628255844886 +479 81 model.embedding_dim 1.0 +479 81 optimizer.lr 0.04077823499898819 +479 81 training.batch_size 0.0 +479 81 training.label_smoothing 0.0024260428272625473 +479 82 model.embedding_dim 2.0 +479 82 optimizer.lr 0.001019528339932482 +479 82 training.batch_size 0.0 +479 82 training.label_smoothing 0.31477659535604635 +479 83 model.embedding_dim 2.0 +479 83 optimizer.lr 0.0022175014938575163 +479 83 training.batch_size 2.0 +479 83 training.label_smoothing 0.161660377117421 +479 84 model.embedding_dim 0.0 +479 84 optimizer.lr 0.09183796449435447 +479 84 training.batch_size 1.0 +479 84 training.label_smoothing 0.0040678591958127315 +479 85 model.embedding_dim 2.0 +479 85 optimizer.lr 0.06268144886947505 +479 85 training.batch_size 2.0 +479 85 training.label_smoothing 0.0011749583433661042 +479 86 model.embedding_dim 2.0 +479 86 optimizer.lr 0.0010168253132889903 +479 86 training.batch_size 0.0 +479 86 training.label_smoothing 0.0343732764055786 +479 87 model.embedding_dim 0.0 +479 87 optimizer.lr 0.04300216478756936 +479 87 training.batch_size 0.0 +479 87 training.label_smoothing 0.020889741792723605 +479 88 model.embedding_dim 2.0 +479 88 optimizer.lr 0.032095115064226805 +479 88 training.batch_size 2.0 +479 88 training.label_smoothing 0.028300283082462803 +479 89 model.embedding_dim 2.0 +479 89 optimizer.lr 0.0013122917868239943 +479 89 training.batch_size 2.0 +479 89 training.label_smoothing 0.015507387604912442 +479 90 model.embedding_dim 0.0 +479 90 optimizer.lr 0.02123498704788533 +479 90 training.batch_size 1.0 +479 90 training.label_smoothing 0.006070721476912025 +479 91 model.embedding_dim 0.0 +479 91 optimizer.lr 0.09419294516639964 +479 91 training.batch_size 2.0 +479 91 training.label_smoothing 0.0375696950078164 +479 92 model.embedding_dim 1.0 +479 92 optimizer.lr 0.0010795693611868962 +479 92 training.batch_size 1.0 +479 92 training.label_smoothing 0.04718352457310187 +479 93 model.embedding_dim 1.0 +479 93 optimizer.lr 0.004034324723500763 +479 93 training.batch_size 1.0 +479 93 training.label_smoothing 0.062159069352696494 +479 94 model.embedding_dim 2.0 +479 94 optimizer.lr 0.06972446670805739 +479 94 training.batch_size 1.0 +479 94 training.label_smoothing 0.11623728392080276 +479 95 model.embedding_dim 1.0 +479 95 optimizer.lr 0.038941101675912276 +479 95 training.batch_size 0.0 +479 95 training.label_smoothing 0.002157902820199479 +479 96 model.embedding_dim 0.0 +479 96 optimizer.lr 0.0013677092066102182 +479 96 training.batch_size 1.0 +479 96 training.label_smoothing 0.01218290959081678 +479 97 model.embedding_dim 2.0 +479 97 optimizer.lr 0.0011543053095319838 +479 97 training.batch_size 1.0 +479 97 training.label_smoothing 0.003694857019364955 +479 98 model.embedding_dim 1.0 +479 98 optimizer.lr 0.03198159573784614 +479 98 training.batch_size 1.0 +479 98 training.label_smoothing 0.003222040744925927 +479 99 model.embedding_dim 1.0 +479 99 optimizer.lr 0.0010136368419924613 +479 99 training.batch_size 1.0 +479 99 training.label_smoothing 0.5652971960446634 +479 100 model.embedding_dim 2.0 +479 100 optimizer.lr 0.0050396800168052485 +479 100 training.batch_size 0.0 +479 100 training.label_smoothing 0.2780850483385831 +479 1 dataset """kinships""" +479 1 model """proje""" +479 1 loss """bceaftersigmoid""" +479 1 regularizer """no""" +479 1 optimizer """adam""" +479 1 training_loop """lcwa""" +479 1 evaluator """rankbased""" +479 2 dataset """kinships""" +479 2 model """proje""" +479 2 loss """bceaftersigmoid""" +479 2 regularizer """no""" +479 2 optimizer """adam""" +479 2 training_loop """lcwa""" +479 2 evaluator """rankbased""" +479 3 dataset """kinships""" +479 3 model """proje""" +479 3 loss """bceaftersigmoid""" +479 3 regularizer """no""" +479 3 optimizer """adam""" +479 3 training_loop """lcwa""" +479 3 evaluator """rankbased""" +479 4 dataset """kinships""" +479 4 model """proje""" +479 4 loss """bceaftersigmoid""" +479 4 regularizer """no""" +479 4 optimizer """adam""" +479 4 training_loop """lcwa""" +479 4 evaluator """rankbased""" +479 5 dataset """kinships""" +479 5 model """proje""" +479 5 loss """bceaftersigmoid""" +479 5 regularizer """no""" +479 5 optimizer """adam""" +479 5 training_loop """lcwa""" +479 5 evaluator """rankbased""" +479 6 dataset """kinships""" +479 6 model """proje""" +479 6 loss """bceaftersigmoid""" +479 6 regularizer """no""" +479 6 optimizer """adam""" +479 6 training_loop """lcwa""" +479 6 evaluator """rankbased""" +479 7 dataset """kinships""" +479 7 model """proje""" +479 7 loss """bceaftersigmoid""" +479 7 regularizer """no""" +479 7 optimizer """adam""" +479 7 training_loop """lcwa""" +479 7 evaluator """rankbased""" +479 8 dataset """kinships""" +479 8 model """proje""" +479 8 loss """bceaftersigmoid""" +479 8 regularizer """no""" +479 8 optimizer """adam""" +479 8 training_loop """lcwa""" +479 8 evaluator """rankbased""" +479 9 dataset """kinships""" +479 9 model """proje""" +479 9 loss """bceaftersigmoid""" +479 9 regularizer """no""" +479 9 optimizer """adam""" +479 9 training_loop """lcwa""" +479 9 evaluator """rankbased""" +479 10 dataset """kinships""" +479 10 model """proje""" +479 10 loss """bceaftersigmoid""" +479 10 regularizer """no""" +479 10 optimizer """adam""" +479 10 training_loop """lcwa""" +479 10 evaluator """rankbased""" +479 11 dataset """kinships""" +479 11 model """proje""" +479 11 loss """bceaftersigmoid""" +479 11 regularizer """no""" +479 11 optimizer """adam""" +479 11 training_loop """lcwa""" +479 11 evaluator """rankbased""" +479 12 dataset """kinships""" +479 12 model """proje""" +479 12 loss """bceaftersigmoid""" +479 12 regularizer """no""" +479 12 optimizer """adam""" +479 12 training_loop """lcwa""" +479 12 evaluator """rankbased""" +479 13 dataset """kinships""" +479 13 model """proje""" +479 13 loss """bceaftersigmoid""" +479 13 regularizer """no""" +479 13 optimizer """adam""" +479 13 training_loop """lcwa""" +479 13 evaluator """rankbased""" +479 14 dataset """kinships""" +479 14 model """proje""" +479 14 loss """bceaftersigmoid""" +479 14 regularizer """no""" +479 14 optimizer """adam""" +479 14 training_loop """lcwa""" +479 14 evaluator """rankbased""" +479 15 dataset """kinships""" +479 15 model """proje""" +479 15 loss """bceaftersigmoid""" +479 15 regularizer """no""" +479 15 optimizer """adam""" +479 15 training_loop """lcwa""" +479 15 evaluator """rankbased""" +479 16 dataset """kinships""" +479 16 model """proje""" +479 16 loss """bceaftersigmoid""" +479 16 regularizer """no""" +479 16 optimizer """adam""" +479 16 training_loop """lcwa""" +479 16 evaluator """rankbased""" +479 17 dataset """kinships""" +479 17 model """proje""" +479 17 loss """bceaftersigmoid""" +479 17 regularizer """no""" +479 17 optimizer """adam""" +479 17 training_loop """lcwa""" +479 17 evaluator """rankbased""" +479 18 dataset """kinships""" +479 18 model """proje""" +479 18 loss """bceaftersigmoid""" +479 18 regularizer """no""" +479 18 optimizer """adam""" +479 18 training_loop """lcwa""" +479 18 evaluator """rankbased""" +479 19 dataset """kinships""" +479 19 model """proje""" +479 19 loss """bceaftersigmoid""" +479 19 regularizer """no""" +479 19 optimizer """adam""" +479 19 training_loop """lcwa""" +479 19 evaluator """rankbased""" +479 20 dataset """kinships""" +479 20 model """proje""" +479 20 loss """bceaftersigmoid""" +479 20 regularizer """no""" +479 20 optimizer """adam""" +479 20 training_loop """lcwa""" +479 20 evaluator """rankbased""" +479 21 dataset """kinships""" +479 21 model """proje""" +479 21 loss """bceaftersigmoid""" +479 21 regularizer """no""" +479 21 optimizer """adam""" +479 21 training_loop """lcwa""" +479 21 evaluator """rankbased""" +479 22 dataset """kinships""" +479 22 model """proje""" +479 22 loss """bceaftersigmoid""" +479 22 regularizer """no""" +479 22 optimizer """adam""" +479 22 training_loop """lcwa""" +479 22 evaluator """rankbased""" +479 23 dataset """kinships""" +479 23 model """proje""" +479 23 loss """bceaftersigmoid""" +479 23 regularizer """no""" +479 23 optimizer """adam""" +479 23 training_loop """lcwa""" +479 23 evaluator """rankbased""" +479 24 dataset """kinships""" +479 24 model """proje""" +479 24 loss """bceaftersigmoid""" +479 24 regularizer """no""" +479 24 optimizer """adam""" +479 24 training_loop """lcwa""" +479 24 evaluator """rankbased""" +479 25 dataset """kinships""" +479 25 model """proje""" +479 25 loss """bceaftersigmoid""" +479 25 regularizer """no""" +479 25 optimizer """adam""" +479 25 training_loop """lcwa""" +479 25 evaluator """rankbased""" +479 26 dataset """kinships""" +479 26 model """proje""" +479 26 loss """bceaftersigmoid""" +479 26 regularizer """no""" +479 26 optimizer """adam""" +479 26 training_loop """lcwa""" +479 26 evaluator """rankbased""" +479 27 dataset """kinships""" +479 27 model """proje""" +479 27 loss """bceaftersigmoid""" +479 27 regularizer """no""" +479 27 optimizer """adam""" +479 27 training_loop """lcwa""" +479 27 evaluator """rankbased""" +479 28 dataset """kinships""" +479 28 model """proje""" +479 28 loss """bceaftersigmoid""" +479 28 regularizer """no""" +479 28 optimizer """adam""" +479 28 training_loop """lcwa""" +479 28 evaluator """rankbased""" +479 29 dataset """kinships""" +479 29 model """proje""" +479 29 loss """bceaftersigmoid""" +479 29 regularizer """no""" +479 29 optimizer """adam""" +479 29 training_loop """lcwa""" +479 29 evaluator """rankbased""" +479 30 dataset """kinships""" +479 30 model """proje""" +479 30 loss """bceaftersigmoid""" +479 30 regularizer """no""" +479 30 optimizer """adam""" +479 30 training_loop """lcwa""" +479 30 evaluator """rankbased""" +479 31 dataset """kinships""" +479 31 model """proje""" +479 31 loss """bceaftersigmoid""" +479 31 regularizer """no""" +479 31 optimizer """adam""" +479 31 training_loop """lcwa""" +479 31 evaluator """rankbased""" +479 32 dataset """kinships""" +479 32 model """proje""" +479 32 loss """bceaftersigmoid""" +479 32 regularizer """no""" +479 32 optimizer """adam""" +479 32 training_loop """lcwa""" +479 32 evaluator """rankbased""" +479 33 dataset """kinships""" +479 33 model """proje""" +479 33 loss """bceaftersigmoid""" +479 33 regularizer """no""" +479 33 optimizer """adam""" +479 33 training_loop """lcwa""" +479 33 evaluator """rankbased""" +479 34 dataset """kinships""" +479 34 model """proje""" +479 34 loss """bceaftersigmoid""" +479 34 regularizer """no""" +479 34 optimizer """adam""" +479 34 training_loop """lcwa""" +479 34 evaluator """rankbased""" +479 35 dataset """kinships""" +479 35 model """proje""" +479 35 loss """bceaftersigmoid""" +479 35 regularizer """no""" +479 35 optimizer """adam""" +479 35 training_loop """lcwa""" +479 35 evaluator """rankbased""" +479 36 dataset """kinships""" +479 36 model """proje""" +479 36 loss """bceaftersigmoid""" +479 36 regularizer """no""" +479 36 optimizer """adam""" +479 36 training_loop """lcwa""" +479 36 evaluator """rankbased""" +479 37 dataset """kinships""" +479 37 model """proje""" +479 37 loss """bceaftersigmoid""" +479 37 regularizer """no""" +479 37 optimizer """adam""" +479 37 training_loop """lcwa""" +479 37 evaluator """rankbased""" +479 38 dataset """kinships""" +479 38 model """proje""" +479 38 loss """bceaftersigmoid""" +479 38 regularizer """no""" +479 38 optimizer """adam""" +479 38 training_loop """lcwa""" +479 38 evaluator """rankbased""" +479 39 dataset """kinships""" +479 39 model """proje""" +479 39 loss """bceaftersigmoid""" +479 39 regularizer """no""" +479 39 optimizer """adam""" +479 39 training_loop """lcwa""" +479 39 evaluator """rankbased""" +479 40 dataset """kinships""" +479 40 model """proje""" +479 40 loss """bceaftersigmoid""" +479 40 regularizer """no""" +479 40 optimizer """adam""" +479 40 training_loop """lcwa""" +479 40 evaluator """rankbased""" +479 41 dataset """kinships""" +479 41 model """proje""" +479 41 loss """bceaftersigmoid""" +479 41 regularizer """no""" +479 41 optimizer """adam""" +479 41 training_loop """lcwa""" +479 41 evaluator """rankbased""" +479 42 dataset """kinships""" +479 42 model """proje""" +479 42 loss """bceaftersigmoid""" +479 42 regularizer """no""" +479 42 optimizer """adam""" +479 42 training_loop """lcwa""" +479 42 evaluator """rankbased""" +479 43 dataset """kinships""" +479 43 model """proje""" +479 43 loss """bceaftersigmoid""" +479 43 regularizer """no""" +479 43 optimizer """adam""" +479 43 training_loop """lcwa""" +479 43 evaluator """rankbased""" +479 44 dataset """kinships""" +479 44 model """proje""" +479 44 loss """bceaftersigmoid""" +479 44 regularizer """no""" +479 44 optimizer """adam""" +479 44 training_loop """lcwa""" +479 44 evaluator """rankbased""" +479 45 dataset """kinships""" +479 45 model """proje""" +479 45 loss """bceaftersigmoid""" +479 45 regularizer """no""" +479 45 optimizer """adam""" +479 45 training_loop """lcwa""" +479 45 evaluator """rankbased""" +479 46 dataset """kinships""" +479 46 model """proje""" +479 46 loss """bceaftersigmoid""" +479 46 regularizer """no""" +479 46 optimizer """adam""" +479 46 training_loop """lcwa""" +479 46 evaluator """rankbased""" +479 47 dataset """kinships""" +479 47 model """proje""" +479 47 loss """bceaftersigmoid""" +479 47 regularizer """no""" +479 47 optimizer """adam""" +479 47 training_loop """lcwa""" +479 47 evaluator """rankbased""" +479 48 dataset """kinships""" +479 48 model """proje""" +479 48 loss """bceaftersigmoid""" +479 48 regularizer """no""" +479 48 optimizer """adam""" +479 48 training_loop """lcwa""" +479 48 evaluator """rankbased""" +479 49 dataset """kinships""" +479 49 model """proje""" +479 49 loss """bceaftersigmoid""" +479 49 regularizer """no""" +479 49 optimizer """adam""" +479 49 training_loop """lcwa""" +479 49 evaluator """rankbased""" +479 50 dataset """kinships""" +479 50 model """proje""" +479 50 loss """bceaftersigmoid""" +479 50 regularizer """no""" +479 50 optimizer """adam""" +479 50 training_loop """lcwa""" +479 50 evaluator """rankbased""" +479 51 dataset """kinships""" +479 51 model """proje""" +479 51 loss """bceaftersigmoid""" +479 51 regularizer """no""" +479 51 optimizer """adam""" +479 51 training_loop """lcwa""" +479 51 evaluator """rankbased""" +479 52 dataset """kinships""" +479 52 model """proje""" +479 52 loss """bceaftersigmoid""" +479 52 regularizer """no""" +479 52 optimizer """adam""" +479 52 training_loop """lcwa""" +479 52 evaluator """rankbased""" +479 53 dataset """kinships""" +479 53 model """proje""" +479 53 loss """bceaftersigmoid""" +479 53 regularizer """no""" +479 53 optimizer """adam""" +479 53 training_loop """lcwa""" +479 53 evaluator """rankbased""" +479 54 dataset """kinships""" +479 54 model """proje""" +479 54 loss """bceaftersigmoid""" +479 54 regularizer """no""" +479 54 optimizer """adam""" +479 54 training_loop """lcwa""" +479 54 evaluator """rankbased""" +479 55 dataset """kinships""" +479 55 model """proje""" +479 55 loss """bceaftersigmoid""" +479 55 regularizer """no""" +479 55 optimizer """adam""" +479 55 training_loop """lcwa""" +479 55 evaluator """rankbased""" +479 56 dataset """kinships""" +479 56 model """proje""" +479 56 loss """bceaftersigmoid""" +479 56 regularizer """no""" +479 56 optimizer """adam""" +479 56 training_loop """lcwa""" +479 56 evaluator """rankbased""" +479 57 dataset """kinships""" +479 57 model """proje""" +479 57 loss """bceaftersigmoid""" +479 57 regularizer """no""" +479 57 optimizer """adam""" +479 57 training_loop """lcwa""" +479 57 evaluator """rankbased""" +479 58 dataset """kinships""" +479 58 model """proje""" +479 58 loss """bceaftersigmoid""" +479 58 regularizer """no""" +479 58 optimizer """adam""" +479 58 training_loop """lcwa""" +479 58 evaluator """rankbased""" +479 59 dataset """kinships""" +479 59 model """proje""" +479 59 loss """bceaftersigmoid""" +479 59 regularizer """no""" +479 59 optimizer """adam""" +479 59 training_loop """lcwa""" +479 59 evaluator """rankbased""" +479 60 dataset """kinships""" +479 60 model """proje""" +479 60 loss """bceaftersigmoid""" +479 60 regularizer """no""" +479 60 optimizer """adam""" +479 60 training_loop """lcwa""" +479 60 evaluator """rankbased""" +479 61 dataset """kinships""" +479 61 model """proje""" +479 61 loss """bceaftersigmoid""" +479 61 regularizer """no""" +479 61 optimizer """adam""" +479 61 training_loop """lcwa""" +479 61 evaluator """rankbased""" +479 62 dataset """kinships""" +479 62 model """proje""" +479 62 loss """bceaftersigmoid""" +479 62 regularizer """no""" +479 62 optimizer """adam""" +479 62 training_loop """lcwa""" +479 62 evaluator """rankbased""" +479 63 dataset """kinships""" +479 63 model """proje""" +479 63 loss """bceaftersigmoid""" +479 63 regularizer """no""" +479 63 optimizer """adam""" +479 63 training_loop """lcwa""" +479 63 evaluator """rankbased""" +479 64 dataset """kinships""" +479 64 model """proje""" +479 64 loss """bceaftersigmoid""" +479 64 regularizer """no""" +479 64 optimizer """adam""" +479 64 training_loop """lcwa""" +479 64 evaluator """rankbased""" +479 65 dataset """kinships""" +479 65 model """proje""" +479 65 loss """bceaftersigmoid""" +479 65 regularizer """no""" +479 65 optimizer """adam""" +479 65 training_loop """lcwa""" +479 65 evaluator """rankbased""" +479 66 dataset """kinships""" +479 66 model """proje""" +479 66 loss """bceaftersigmoid""" +479 66 regularizer """no""" +479 66 optimizer """adam""" +479 66 training_loop """lcwa""" +479 66 evaluator """rankbased""" +479 67 dataset """kinships""" +479 67 model """proje""" +479 67 loss """bceaftersigmoid""" +479 67 regularizer """no""" +479 67 optimizer """adam""" +479 67 training_loop """lcwa""" +479 67 evaluator """rankbased""" +479 68 dataset """kinships""" +479 68 model """proje""" +479 68 loss """bceaftersigmoid""" +479 68 regularizer """no""" +479 68 optimizer """adam""" +479 68 training_loop """lcwa""" +479 68 evaluator """rankbased""" +479 69 dataset """kinships""" +479 69 model """proje""" +479 69 loss """bceaftersigmoid""" +479 69 regularizer """no""" +479 69 optimizer """adam""" +479 69 training_loop """lcwa""" +479 69 evaluator """rankbased""" +479 70 dataset """kinships""" +479 70 model """proje""" +479 70 loss """bceaftersigmoid""" +479 70 regularizer """no""" +479 70 optimizer """adam""" +479 70 training_loop """lcwa""" +479 70 evaluator """rankbased""" +479 71 dataset """kinships""" +479 71 model """proje""" +479 71 loss """bceaftersigmoid""" +479 71 regularizer """no""" +479 71 optimizer """adam""" +479 71 training_loop """lcwa""" +479 71 evaluator """rankbased""" +479 72 dataset """kinships""" +479 72 model """proje""" +479 72 loss """bceaftersigmoid""" +479 72 regularizer """no""" +479 72 optimizer """adam""" +479 72 training_loop """lcwa""" +479 72 evaluator """rankbased""" +479 73 dataset """kinships""" +479 73 model """proje""" +479 73 loss """bceaftersigmoid""" +479 73 regularizer """no""" +479 73 optimizer """adam""" +479 73 training_loop """lcwa""" +479 73 evaluator """rankbased""" +479 74 dataset """kinships""" +479 74 model """proje""" +479 74 loss """bceaftersigmoid""" +479 74 regularizer """no""" +479 74 optimizer """adam""" +479 74 training_loop """lcwa""" +479 74 evaluator """rankbased""" +479 75 dataset """kinships""" +479 75 model """proje""" +479 75 loss """bceaftersigmoid""" +479 75 regularizer """no""" +479 75 optimizer """adam""" +479 75 training_loop """lcwa""" +479 75 evaluator """rankbased""" +479 76 dataset """kinships""" +479 76 model """proje""" +479 76 loss """bceaftersigmoid""" +479 76 regularizer """no""" +479 76 optimizer """adam""" +479 76 training_loop """lcwa""" +479 76 evaluator """rankbased""" +479 77 dataset """kinships""" +479 77 model """proje""" +479 77 loss """bceaftersigmoid""" +479 77 regularizer """no""" +479 77 optimizer """adam""" +479 77 training_loop """lcwa""" +479 77 evaluator """rankbased""" +479 78 dataset """kinships""" +479 78 model """proje""" +479 78 loss """bceaftersigmoid""" +479 78 regularizer """no""" +479 78 optimizer """adam""" +479 78 training_loop """lcwa""" +479 78 evaluator """rankbased""" +479 79 dataset """kinships""" +479 79 model """proje""" +479 79 loss """bceaftersigmoid""" +479 79 regularizer """no""" +479 79 optimizer """adam""" +479 79 training_loop """lcwa""" +479 79 evaluator """rankbased""" +479 80 dataset """kinships""" +479 80 model """proje""" +479 80 loss """bceaftersigmoid""" +479 80 regularizer """no""" +479 80 optimizer """adam""" +479 80 training_loop """lcwa""" +479 80 evaluator """rankbased""" +479 81 dataset """kinships""" +479 81 model """proje""" +479 81 loss """bceaftersigmoid""" +479 81 regularizer """no""" +479 81 optimizer """adam""" +479 81 training_loop """lcwa""" +479 81 evaluator """rankbased""" +479 82 dataset """kinships""" +479 82 model """proje""" +479 82 loss """bceaftersigmoid""" +479 82 regularizer """no""" +479 82 optimizer """adam""" +479 82 training_loop """lcwa""" +479 82 evaluator """rankbased""" +479 83 dataset """kinships""" +479 83 model """proje""" +479 83 loss """bceaftersigmoid""" +479 83 regularizer """no""" +479 83 optimizer """adam""" +479 83 training_loop """lcwa""" +479 83 evaluator """rankbased""" +479 84 dataset """kinships""" +479 84 model """proje""" +479 84 loss """bceaftersigmoid""" +479 84 regularizer """no""" +479 84 optimizer """adam""" +479 84 training_loop """lcwa""" +479 84 evaluator """rankbased""" +479 85 dataset """kinships""" +479 85 model """proje""" +479 85 loss """bceaftersigmoid""" +479 85 regularizer """no""" +479 85 optimizer """adam""" +479 85 training_loop """lcwa""" +479 85 evaluator """rankbased""" +479 86 dataset """kinships""" +479 86 model """proje""" +479 86 loss """bceaftersigmoid""" +479 86 regularizer """no""" +479 86 optimizer """adam""" +479 86 training_loop """lcwa""" +479 86 evaluator """rankbased""" +479 87 dataset """kinships""" +479 87 model """proje""" +479 87 loss """bceaftersigmoid""" +479 87 regularizer """no""" +479 87 optimizer """adam""" +479 87 training_loop """lcwa""" +479 87 evaluator """rankbased""" +479 88 dataset """kinships""" +479 88 model """proje""" +479 88 loss """bceaftersigmoid""" +479 88 regularizer """no""" +479 88 optimizer """adam""" +479 88 training_loop """lcwa""" +479 88 evaluator """rankbased""" +479 89 dataset """kinships""" +479 89 model """proje""" +479 89 loss """bceaftersigmoid""" +479 89 regularizer """no""" +479 89 optimizer """adam""" +479 89 training_loop """lcwa""" +479 89 evaluator """rankbased""" +479 90 dataset """kinships""" +479 90 model """proje""" +479 90 loss """bceaftersigmoid""" +479 90 regularizer """no""" +479 90 optimizer """adam""" +479 90 training_loop """lcwa""" +479 90 evaluator """rankbased""" +479 91 dataset """kinships""" +479 91 model """proje""" +479 91 loss """bceaftersigmoid""" +479 91 regularizer """no""" +479 91 optimizer """adam""" +479 91 training_loop """lcwa""" +479 91 evaluator """rankbased""" +479 92 dataset """kinships""" +479 92 model """proje""" +479 92 loss """bceaftersigmoid""" +479 92 regularizer """no""" +479 92 optimizer """adam""" +479 92 training_loop """lcwa""" +479 92 evaluator """rankbased""" +479 93 dataset """kinships""" +479 93 model """proje""" +479 93 loss """bceaftersigmoid""" +479 93 regularizer """no""" +479 93 optimizer """adam""" +479 93 training_loop """lcwa""" +479 93 evaluator """rankbased""" +479 94 dataset """kinships""" +479 94 model """proje""" +479 94 loss """bceaftersigmoid""" +479 94 regularizer """no""" +479 94 optimizer """adam""" +479 94 training_loop """lcwa""" +479 94 evaluator """rankbased""" +479 95 dataset """kinships""" +479 95 model """proje""" +479 95 loss """bceaftersigmoid""" +479 95 regularizer """no""" +479 95 optimizer """adam""" +479 95 training_loop """lcwa""" +479 95 evaluator """rankbased""" +479 96 dataset """kinships""" +479 96 model """proje""" +479 96 loss """bceaftersigmoid""" +479 96 regularizer """no""" +479 96 optimizer """adam""" +479 96 training_loop """lcwa""" +479 96 evaluator """rankbased""" +479 97 dataset """kinships""" +479 97 model """proje""" +479 97 loss """bceaftersigmoid""" +479 97 regularizer """no""" +479 97 optimizer """adam""" +479 97 training_loop """lcwa""" +479 97 evaluator """rankbased""" +479 98 dataset """kinships""" +479 98 model """proje""" +479 98 loss """bceaftersigmoid""" +479 98 regularizer """no""" +479 98 optimizer """adam""" +479 98 training_loop """lcwa""" +479 98 evaluator """rankbased""" +479 99 dataset """kinships""" +479 99 model """proje""" +479 99 loss """bceaftersigmoid""" +479 99 regularizer """no""" +479 99 optimizer """adam""" +479 99 training_loop """lcwa""" +479 99 evaluator """rankbased""" +479 100 dataset """kinships""" +479 100 model """proje""" +479 100 loss """bceaftersigmoid""" +479 100 regularizer """no""" +479 100 optimizer """adam""" +479 100 training_loop """lcwa""" +479 100 evaluator """rankbased""" +480 1 model.embedding_dim 2.0 +480 1 optimizer.lr 0.0038191836844728437 +480 1 training.batch_size 2.0 +480 1 training.label_smoothing 0.003976946147327118 +480 2 model.embedding_dim 0.0 +480 2 optimizer.lr 0.0024004101421532583 +480 2 training.batch_size 0.0 +480 2 training.label_smoothing 0.8694356008968592 +480 3 model.embedding_dim 1.0 +480 3 optimizer.lr 0.014628709020810276 +480 3 training.batch_size 0.0 +480 3 training.label_smoothing 0.0017510658561186118 +480 4 model.embedding_dim 2.0 +480 4 optimizer.lr 0.008971922310728651 +480 4 training.batch_size 2.0 +480 4 training.label_smoothing 0.8601915044336463 +480 5 model.embedding_dim 1.0 +480 5 optimizer.lr 0.005441175188651456 +480 5 training.batch_size 0.0 +480 5 training.label_smoothing 0.006168399921347384 +480 6 model.embedding_dim 0.0 +480 6 optimizer.lr 0.0012765152354857466 +480 6 training.batch_size 1.0 +480 6 training.label_smoothing 0.20151765176954337 +480 7 model.embedding_dim 1.0 +480 7 optimizer.lr 0.004445804559085784 +480 7 training.batch_size 0.0 +480 7 training.label_smoothing 0.011955379579755636 +480 8 model.embedding_dim 0.0 +480 8 optimizer.lr 0.006545111184795369 +480 8 training.batch_size 0.0 +480 8 training.label_smoothing 0.13723727598355226 +480 9 model.embedding_dim 2.0 +480 9 optimizer.lr 0.0014378274320215679 +480 9 training.batch_size 0.0 +480 9 training.label_smoothing 0.04492193668000735 +480 10 model.embedding_dim 0.0 +480 10 optimizer.lr 0.02536881407123967 +480 10 training.batch_size 0.0 +480 10 training.label_smoothing 0.002035047389309115 +480 11 model.embedding_dim 2.0 +480 11 optimizer.lr 0.015621165587651066 +480 11 training.batch_size 1.0 +480 11 training.label_smoothing 0.39228567434890826 +480 12 model.embedding_dim 1.0 +480 12 optimizer.lr 0.0014826826849861678 +480 12 training.batch_size 1.0 +480 12 training.label_smoothing 0.07400396066055721 +480 13 model.embedding_dim 0.0 +480 13 optimizer.lr 0.01090335990046051 +480 13 training.batch_size 0.0 +480 13 training.label_smoothing 0.0038432253741684653 +480 14 model.embedding_dim 2.0 +480 14 optimizer.lr 0.06719777962878762 +480 14 training.batch_size 2.0 +480 14 training.label_smoothing 0.06571805273054843 +480 15 model.embedding_dim 2.0 +480 15 optimizer.lr 0.0027909791628498446 +480 15 training.batch_size 0.0 +480 15 training.label_smoothing 0.013895192501667559 +480 16 model.embedding_dim 2.0 +480 16 optimizer.lr 0.007792026404714613 +480 16 training.batch_size 1.0 +480 16 training.label_smoothing 0.15375652395028877 +480 17 model.embedding_dim 0.0 +480 17 optimizer.lr 0.0011297097588031087 +480 17 training.batch_size 1.0 +480 17 training.label_smoothing 0.09120500833200414 +480 18 model.embedding_dim 1.0 +480 18 optimizer.lr 0.00971661887499601 +480 18 training.batch_size 2.0 +480 18 training.label_smoothing 0.29783563370182875 +480 19 model.embedding_dim 0.0 +480 19 optimizer.lr 0.022486387325564287 +480 19 training.batch_size 0.0 +480 19 training.label_smoothing 0.001422784682490953 +480 20 model.embedding_dim 2.0 +480 20 optimizer.lr 0.07869193806747984 +480 20 training.batch_size 0.0 +480 20 training.label_smoothing 0.00967308081719753 +480 21 model.embedding_dim 0.0 +480 21 optimizer.lr 0.0018650319249430404 +480 21 training.batch_size 2.0 +480 21 training.label_smoothing 0.6110159665477395 +480 22 model.embedding_dim 1.0 +480 22 optimizer.lr 0.07407021166187251 +480 22 training.batch_size 2.0 +480 22 training.label_smoothing 0.9260797536393336 +480 23 model.embedding_dim 1.0 +480 23 optimizer.lr 0.053224815436424544 +480 23 training.batch_size 0.0 +480 23 training.label_smoothing 0.11096128574637118 +480 24 model.embedding_dim 0.0 +480 24 optimizer.lr 0.026047889898818802 +480 24 training.batch_size 0.0 +480 24 training.label_smoothing 0.9645642088279966 +480 25 model.embedding_dim 0.0 +480 25 optimizer.lr 0.002270362012649723 +480 25 training.batch_size 1.0 +480 25 training.label_smoothing 0.021887594561291527 +480 26 model.embedding_dim 1.0 +480 26 optimizer.lr 0.09430743392014443 +480 26 training.batch_size 2.0 +480 26 training.label_smoothing 0.004594726766474388 +480 27 model.embedding_dim 0.0 +480 27 optimizer.lr 0.09833724088448129 +480 27 training.batch_size 2.0 +480 27 training.label_smoothing 0.14758627135692462 +480 28 model.embedding_dim 2.0 +480 28 optimizer.lr 0.004022233743785858 +480 28 training.batch_size 2.0 +480 28 training.label_smoothing 0.004191921140432418 +480 29 model.embedding_dim 2.0 +480 29 optimizer.lr 0.028764556423715896 +480 29 training.batch_size 0.0 +480 29 training.label_smoothing 0.02697652338047011 +480 30 model.embedding_dim 1.0 +480 30 optimizer.lr 0.002701361071481097 +480 30 training.batch_size 2.0 +480 30 training.label_smoothing 0.06689405769598344 +480 31 model.embedding_dim 2.0 +480 31 optimizer.lr 0.009701499088689537 +480 31 training.batch_size 1.0 +480 31 training.label_smoothing 0.12641989347332075 +480 32 model.embedding_dim 0.0 +480 32 optimizer.lr 0.0017741190749277812 +480 32 training.batch_size 0.0 +480 32 training.label_smoothing 0.17062768112544588 +480 33 model.embedding_dim 2.0 +480 33 optimizer.lr 0.0020122614700181633 +480 33 training.batch_size 0.0 +480 33 training.label_smoothing 0.2692323899326055 +480 34 model.embedding_dim 2.0 +480 34 optimizer.lr 0.09157049560752388 +480 34 training.batch_size 2.0 +480 34 training.label_smoothing 0.002023727832857163 +480 35 model.embedding_dim 1.0 +480 35 optimizer.lr 0.0035761682752465437 +480 35 training.batch_size 2.0 +480 35 training.label_smoothing 0.0013745332423708103 +480 36 model.embedding_dim 1.0 +480 36 optimizer.lr 0.00386356766313653 +480 36 training.batch_size 2.0 +480 36 training.label_smoothing 0.31480561924633355 +480 37 model.embedding_dim 2.0 +480 37 optimizer.lr 0.0036649834314502684 +480 37 training.batch_size 0.0 +480 37 training.label_smoothing 0.20803265608995997 +480 38 model.embedding_dim 0.0 +480 38 optimizer.lr 0.05078200254867194 +480 38 training.batch_size 2.0 +480 38 training.label_smoothing 0.690631753454194 +480 39 model.embedding_dim 1.0 +480 39 optimizer.lr 0.004471712376839687 +480 39 training.batch_size 0.0 +480 39 training.label_smoothing 0.03814398007589567 +480 40 model.embedding_dim 1.0 +480 40 optimizer.lr 0.0036452263624271127 +480 40 training.batch_size 0.0 +480 40 training.label_smoothing 0.20048897961526027 +480 41 model.embedding_dim 1.0 +480 41 optimizer.lr 0.025750386487663278 +480 41 training.batch_size 1.0 +480 41 training.label_smoothing 0.2782472326439049 +480 42 model.embedding_dim 1.0 +480 42 optimizer.lr 0.004260722603640039 +480 42 training.batch_size 1.0 +480 42 training.label_smoothing 0.06308965628166509 +480 43 model.embedding_dim 2.0 +480 43 optimizer.lr 0.011198179933132263 +480 43 training.batch_size 1.0 +480 43 training.label_smoothing 0.08183914822862046 +480 44 model.embedding_dim 2.0 +480 44 optimizer.lr 0.001111626426011801 +480 44 training.batch_size 1.0 +480 44 training.label_smoothing 0.22852127984987478 +480 45 model.embedding_dim 0.0 +480 45 optimizer.lr 0.04384893355671683 +480 45 training.batch_size 1.0 +480 45 training.label_smoothing 0.0021755372411494593 +480 46 model.embedding_dim 1.0 +480 46 optimizer.lr 0.005690793992978154 +480 46 training.batch_size 0.0 +480 46 training.label_smoothing 0.034963979964636606 +480 47 model.embedding_dim 1.0 +480 47 optimizer.lr 0.002202888590281065 +480 47 training.batch_size 1.0 +480 47 training.label_smoothing 0.4117636822334515 +480 48 model.embedding_dim 2.0 +480 48 optimizer.lr 0.0157357190294381 +480 48 training.batch_size 1.0 +480 48 training.label_smoothing 0.0037987052544645895 +480 49 model.embedding_dim 1.0 +480 49 optimizer.lr 0.06950638553995728 +480 49 training.batch_size 0.0 +480 49 training.label_smoothing 0.0027101787632530412 +480 50 model.embedding_dim 0.0 +480 50 optimizer.lr 0.015557137062821512 +480 50 training.batch_size 0.0 +480 50 training.label_smoothing 0.018140309892789055 +480 51 model.embedding_dim 2.0 +480 51 optimizer.lr 0.003198836668722566 +480 51 training.batch_size 1.0 +480 51 training.label_smoothing 0.10466958348014907 +480 52 model.embedding_dim 2.0 +480 52 optimizer.lr 0.02679867543379537 +480 52 training.batch_size 0.0 +480 52 training.label_smoothing 0.036327331142911576 +480 53 model.embedding_dim 1.0 +480 53 optimizer.lr 0.015718796256218484 +480 53 training.batch_size 1.0 +480 53 training.label_smoothing 0.26198980558459917 +480 54 model.embedding_dim 0.0 +480 54 optimizer.lr 0.02361966211194565 +480 54 training.batch_size 2.0 +480 54 training.label_smoothing 0.008818703673900743 +480 55 model.embedding_dim 0.0 +480 55 optimizer.lr 0.004624334468660996 +480 55 training.batch_size 0.0 +480 55 training.label_smoothing 0.9123566401625364 +480 56 model.embedding_dim 2.0 +480 56 optimizer.lr 0.004702738397209954 +480 56 training.batch_size 2.0 +480 56 training.label_smoothing 0.01221793298560903 +480 57 model.embedding_dim 0.0 +480 57 optimizer.lr 0.03274569303149603 +480 57 training.batch_size 0.0 +480 57 training.label_smoothing 0.6679352182533324 +480 58 model.embedding_dim 2.0 +480 58 optimizer.lr 0.03395076453282626 +480 58 training.batch_size 1.0 +480 58 training.label_smoothing 0.10541648118123027 +480 59 model.embedding_dim 2.0 +480 59 optimizer.lr 0.005391490376996725 +480 59 training.batch_size 2.0 +480 59 training.label_smoothing 0.08604012872063829 +480 60 model.embedding_dim 2.0 +480 60 optimizer.lr 0.029362408524964433 +480 60 training.batch_size 1.0 +480 60 training.label_smoothing 0.01553849632502042 +480 61 model.embedding_dim 2.0 +480 61 optimizer.lr 0.029895605846172554 +480 61 training.batch_size 2.0 +480 61 training.label_smoothing 0.004150934589748021 +480 62 model.embedding_dim 1.0 +480 62 optimizer.lr 0.04669191876795972 +480 62 training.batch_size 0.0 +480 62 training.label_smoothing 0.085846720079005 +480 63 model.embedding_dim 2.0 +480 63 optimizer.lr 0.02265947122207296 +480 63 training.batch_size 2.0 +480 63 training.label_smoothing 0.4823890240289532 +480 64 model.embedding_dim 0.0 +480 64 optimizer.lr 0.009589536321866399 +480 64 training.batch_size 0.0 +480 64 training.label_smoothing 0.027862744919511916 +480 65 model.embedding_dim 1.0 +480 65 optimizer.lr 0.001824631249681451 +480 65 training.batch_size 1.0 +480 65 training.label_smoothing 0.23372465844317628 +480 66 model.embedding_dim 0.0 +480 66 optimizer.lr 0.015738693391715818 +480 66 training.batch_size 2.0 +480 66 training.label_smoothing 0.20773965899818078 +480 67 model.embedding_dim 0.0 +480 67 optimizer.lr 0.0010209583668781244 +480 67 training.batch_size 0.0 +480 67 training.label_smoothing 0.009489457234688291 +480 68 model.embedding_dim 2.0 +480 68 optimizer.lr 0.013038469640892504 +480 68 training.batch_size 2.0 +480 68 training.label_smoothing 0.9871255814532316 +480 69 model.embedding_dim 2.0 +480 69 optimizer.lr 0.0043239410341528615 +480 69 training.batch_size 1.0 +480 69 training.label_smoothing 0.005971676312951627 +480 70 model.embedding_dim 2.0 +480 70 optimizer.lr 0.02386080429490121 +480 70 training.batch_size 1.0 +480 70 training.label_smoothing 0.004857598645616549 +480 71 model.embedding_dim 0.0 +480 71 optimizer.lr 0.06011012616105165 +480 71 training.batch_size 2.0 +480 71 training.label_smoothing 0.8999717702084326 +480 72 model.embedding_dim 1.0 +480 72 optimizer.lr 0.0107222935323629 +480 72 training.batch_size 1.0 +480 72 training.label_smoothing 0.00140399191288895 +480 73 model.embedding_dim 1.0 +480 73 optimizer.lr 0.017698982072102777 +480 73 training.batch_size 0.0 +480 73 training.label_smoothing 0.7559199760337597 +480 74 model.embedding_dim 0.0 +480 74 optimizer.lr 0.009426166203033944 +480 74 training.batch_size 1.0 +480 74 training.label_smoothing 0.03662970705970301 +480 75 model.embedding_dim 1.0 +480 75 optimizer.lr 0.0031078054926440947 +480 75 training.batch_size 2.0 +480 75 training.label_smoothing 0.3377333926963577 +480 76 model.embedding_dim 2.0 +480 76 optimizer.lr 0.001927357635988378 +480 76 training.batch_size 1.0 +480 76 training.label_smoothing 0.012213188796140908 +480 77 model.embedding_dim 1.0 +480 77 optimizer.lr 0.004930237314904177 +480 77 training.batch_size 0.0 +480 77 training.label_smoothing 0.25339479764757356 +480 78 model.embedding_dim 0.0 +480 78 optimizer.lr 0.002175876385058988 +480 78 training.batch_size 0.0 +480 78 training.label_smoothing 0.04009756855820885 +480 79 model.embedding_dim 2.0 +480 79 optimizer.lr 0.07093396715465781 +480 79 training.batch_size 1.0 +480 79 training.label_smoothing 0.004074057830400342 +480 80 model.embedding_dim 0.0 +480 80 optimizer.lr 0.006355564602601882 +480 80 training.batch_size 2.0 +480 80 training.label_smoothing 0.008381892190733771 +480 81 model.embedding_dim 2.0 +480 81 optimizer.lr 0.0012031631064494746 +480 81 training.batch_size 2.0 +480 81 training.label_smoothing 0.0013371915542322828 +480 82 model.embedding_dim 2.0 +480 82 optimizer.lr 0.005772368898957776 +480 82 training.batch_size 1.0 +480 82 training.label_smoothing 0.3756105664581982 +480 83 model.embedding_dim 2.0 +480 83 optimizer.lr 0.09268198174533564 +480 83 training.batch_size 1.0 +480 83 training.label_smoothing 0.1914692607429327 +480 84 model.embedding_dim 1.0 +480 84 optimizer.lr 0.07682189184454241 +480 84 training.batch_size 2.0 +480 84 training.label_smoothing 0.036049088361866655 +480 85 model.embedding_dim 0.0 +480 85 optimizer.lr 0.008195528390340868 +480 85 training.batch_size 0.0 +480 85 training.label_smoothing 0.03591596685730466 +480 86 model.embedding_dim 2.0 +480 86 optimizer.lr 0.006028878637943151 +480 86 training.batch_size 1.0 +480 86 training.label_smoothing 0.011424322696030048 +480 87 model.embedding_dim 2.0 +480 87 optimizer.lr 0.01775846757706398 +480 87 training.batch_size 1.0 +480 87 training.label_smoothing 0.005648977928809624 +480 88 model.embedding_dim 2.0 +480 88 optimizer.lr 0.07164528659770766 +480 88 training.batch_size 1.0 +480 88 training.label_smoothing 0.01372195069833267 +480 89 model.embedding_dim 0.0 +480 89 optimizer.lr 0.0023288393591882504 +480 89 training.batch_size 0.0 +480 89 training.label_smoothing 0.0014026781901117632 +480 90 model.embedding_dim 1.0 +480 90 optimizer.lr 0.0014933965984396979 +480 90 training.batch_size 2.0 +480 90 training.label_smoothing 0.5432421250408526 +480 91 model.embedding_dim 2.0 +480 91 optimizer.lr 0.014834753687257132 +480 91 training.batch_size 2.0 +480 91 training.label_smoothing 0.03056300010888292 +480 92 model.embedding_dim 0.0 +480 92 optimizer.lr 0.009360620453829875 +480 92 training.batch_size 0.0 +480 92 training.label_smoothing 0.13318836631722075 +480 93 model.embedding_dim 2.0 +480 93 optimizer.lr 0.022710613629707485 +480 93 training.batch_size 1.0 +480 93 training.label_smoothing 0.9297892430441915 +480 94 model.embedding_dim 2.0 +480 94 optimizer.lr 0.0631533179968282 +480 94 training.batch_size 2.0 +480 94 training.label_smoothing 0.0031134086935648083 +480 95 model.embedding_dim 2.0 +480 95 optimizer.lr 0.06410022108543956 +480 95 training.batch_size 1.0 +480 95 training.label_smoothing 0.002158954361829059 +480 96 model.embedding_dim 0.0 +480 96 optimizer.lr 0.03819324050645115 +480 96 training.batch_size 1.0 +480 96 training.label_smoothing 0.0018522731173942378 +480 97 model.embedding_dim 1.0 +480 97 optimizer.lr 0.0076168188789962656 +480 97 training.batch_size 1.0 +480 97 training.label_smoothing 0.003611096295858879 +480 98 model.embedding_dim 1.0 +480 98 optimizer.lr 0.0010080538107455903 +480 98 training.batch_size 1.0 +480 98 training.label_smoothing 0.0013249493591914973 +480 99 model.embedding_dim 1.0 +480 99 optimizer.lr 0.007094496459836107 +480 99 training.batch_size 0.0 +480 99 training.label_smoothing 0.22328832192695244 +480 100 model.embedding_dim 2.0 +480 100 optimizer.lr 0.06485262138501185 +480 100 training.batch_size 1.0 +480 100 training.label_smoothing 0.009669197103518368 +480 1 dataset """kinships""" +480 1 model """proje""" +480 1 loss """softplus""" +480 1 regularizer """no""" +480 1 optimizer """adam""" +480 1 training_loop """lcwa""" +480 1 evaluator """rankbased""" +480 2 dataset """kinships""" +480 2 model """proje""" +480 2 loss """softplus""" +480 2 regularizer """no""" +480 2 optimizer """adam""" +480 2 training_loop """lcwa""" +480 2 evaluator """rankbased""" +480 3 dataset """kinships""" +480 3 model """proje""" +480 3 loss """softplus""" +480 3 regularizer """no""" +480 3 optimizer """adam""" +480 3 training_loop """lcwa""" +480 3 evaluator """rankbased""" +480 4 dataset """kinships""" +480 4 model """proje""" +480 4 loss """softplus""" +480 4 regularizer """no""" +480 4 optimizer """adam""" +480 4 training_loop """lcwa""" +480 4 evaluator """rankbased""" +480 5 dataset """kinships""" +480 5 model """proje""" +480 5 loss """softplus""" +480 5 regularizer """no""" +480 5 optimizer """adam""" +480 5 training_loop """lcwa""" +480 5 evaluator """rankbased""" +480 6 dataset """kinships""" +480 6 model """proje""" +480 6 loss """softplus""" +480 6 regularizer """no""" +480 6 optimizer """adam""" +480 6 training_loop """lcwa""" +480 6 evaluator """rankbased""" +480 7 dataset """kinships""" +480 7 model """proje""" +480 7 loss """softplus""" +480 7 regularizer """no""" +480 7 optimizer """adam""" +480 7 training_loop """lcwa""" +480 7 evaluator """rankbased""" +480 8 dataset """kinships""" +480 8 model """proje""" +480 8 loss """softplus""" +480 8 regularizer """no""" +480 8 optimizer """adam""" +480 8 training_loop """lcwa""" +480 8 evaluator """rankbased""" +480 9 dataset """kinships""" +480 9 model """proje""" +480 9 loss """softplus""" +480 9 regularizer """no""" +480 9 optimizer """adam""" +480 9 training_loop """lcwa""" +480 9 evaluator """rankbased""" +480 10 dataset """kinships""" +480 10 model """proje""" +480 10 loss """softplus""" +480 10 regularizer """no""" +480 10 optimizer """adam""" +480 10 training_loop """lcwa""" +480 10 evaluator """rankbased""" +480 11 dataset """kinships""" +480 11 model """proje""" +480 11 loss """softplus""" +480 11 regularizer """no""" +480 11 optimizer """adam""" +480 11 training_loop """lcwa""" +480 11 evaluator """rankbased""" +480 12 dataset """kinships""" +480 12 model """proje""" +480 12 loss """softplus""" +480 12 regularizer """no""" +480 12 optimizer """adam""" +480 12 training_loop """lcwa""" +480 12 evaluator """rankbased""" +480 13 dataset """kinships""" +480 13 model """proje""" +480 13 loss """softplus""" +480 13 regularizer """no""" +480 13 optimizer """adam""" +480 13 training_loop """lcwa""" +480 13 evaluator """rankbased""" +480 14 dataset """kinships""" +480 14 model """proje""" +480 14 loss """softplus""" +480 14 regularizer """no""" +480 14 optimizer """adam""" +480 14 training_loop """lcwa""" +480 14 evaluator """rankbased""" +480 15 dataset """kinships""" +480 15 model """proje""" +480 15 loss """softplus""" +480 15 regularizer """no""" +480 15 optimizer """adam""" +480 15 training_loop """lcwa""" +480 15 evaluator """rankbased""" +480 16 dataset """kinships""" +480 16 model """proje""" +480 16 loss """softplus""" +480 16 regularizer """no""" +480 16 optimizer """adam""" +480 16 training_loop """lcwa""" +480 16 evaluator """rankbased""" +480 17 dataset """kinships""" +480 17 model """proje""" +480 17 loss """softplus""" +480 17 regularizer """no""" +480 17 optimizer """adam""" +480 17 training_loop """lcwa""" +480 17 evaluator """rankbased""" +480 18 dataset """kinships""" +480 18 model """proje""" +480 18 loss """softplus""" +480 18 regularizer """no""" +480 18 optimizer """adam""" +480 18 training_loop """lcwa""" +480 18 evaluator """rankbased""" +480 19 dataset """kinships""" +480 19 model """proje""" +480 19 loss """softplus""" +480 19 regularizer """no""" +480 19 optimizer """adam""" +480 19 training_loop """lcwa""" +480 19 evaluator """rankbased""" +480 20 dataset """kinships""" +480 20 model """proje""" +480 20 loss """softplus""" +480 20 regularizer """no""" +480 20 optimizer """adam""" +480 20 training_loop """lcwa""" +480 20 evaluator """rankbased""" +480 21 dataset """kinships""" +480 21 model """proje""" +480 21 loss """softplus""" +480 21 regularizer """no""" +480 21 optimizer """adam""" +480 21 training_loop """lcwa""" +480 21 evaluator """rankbased""" +480 22 dataset """kinships""" +480 22 model """proje""" +480 22 loss """softplus""" +480 22 regularizer """no""" +480 22 optimizer """adam""" +480 22 training_loop """lcwa""" +480 22 evaluator """rankbased""" +480 23 dataset """kinships""" +480 23 model """proje""" +480 23 loss """softplus""" +480 23 regularizer """no""" +480 23 optimizer """adam""" +480 23 training_loop """lcwa""" +480 23 evaluator """rankbased""" +480 24 dataset """kinships""" +480 24 model """proje""" +480 24 loss """softplus""" +480 24 regularizer """no""" +480 24 optimizer """adam""" +480 24 training_loop """lcwa""" +480 24 evaluator """rankbased""" +480 25 dataset """kinships""" +480 25 model """proje""" +480 25 loss """softplus""" +480 25 regularizer """no""" +480 25 optimizer """adam""" +480 25 training_loop """lcwa""" +480 25 evaluator """rankbased""" +480 26 dataset """kinships""" +480 26 model """proje""" +480 26 loss """softplus""" +480 26 regularizer """no""" +480 26 optimizer """adam""" +480 26 training_loop """lcwa""" +480 26 evaluator """rankbased""" +480 27 dataset """kinships""" +480 27 model """proje""" +480 27 loss """softplus""" +480 27 regularizer """no""" +480 27 optimizer """adam""" +480 27 training_loop """lcwa""" +480 27 evaluator """rankbased""" +480 28 dataset """kinships""" +480 28 model """proje""" +480 28 loss """softplus""" +480 28 regularizer """no""" +480 28 optimizer """adam""" +480 28 training_loop """lcwa""" +480 28 evaluator """rankbased""" +480 29 dataset """kinships""" +480 29 model """proje""" +480 29 loss """softplus""" +480 29 regularizer """no""" +480 29 optimizer """adam""" +480 29 training_loop """lcwa""" +480 29 evaluator """rankbased""" +480 30 dataset """kinships""" +480 30 model """proje""" +480 30 loss """softplus""" +480 30 regularizer """no""" +480 30 optimizer """adam""" +480 30 training_loop """lcwa""" +480 30 evaluator """rankbased""" +480 31 dataset """kinships""" +480 31 model """proje""" +480 31 loss """softplus""" +480 31 regularizer """no""" +480 31 optimizer """adam""" +480 31 training_loop """lcwa""" +480 31 evaluator """rankbased""" +480 32 dataset """kinships""" +480 32 model """proje""" +480 32 loss """softplus""" +480 32 regularizer """no""" +480 32 optimizer """adam""" +480 32 training_loop """lcwa""" +480 32 evaluator """rankbased""" +480 33 dataset """kinships""" +480 33 model """proje""" +480 33 loss """softplus""" +480 33 regularizer """no""" +480 33 optimizer """adam""" +480 33 training_loop """lcwa""" +480 33 evaluator """rankbased""" +480 34 dataset """kinships""" +480 34 model """proje""" +480 34 loss """softplus""" +480 34 regularizer """no""" +480 34 optimizer """adam""" +480 34 training_loop """lcwa""" +480 34 evaluator """rankbased""" +480 35 dataset """kinships""" +480 35 model """proje""" +480 35 loss """softplus""" +480 35 regularizer """no""" +480 35 optimizer """adam""" +480 35 training_loop """lcwa""" +480 35 evaluator """rankbased""" +480 36 dataset """kinships""" +480 36 model """proje""" +480 36 loss """softplus""" +480 36 regularizer """no""" +480 36 optimizer """adam""" +480 36 training_loop """lcwa""" +480 36 evaluator """rankbased""" +480 37 dataset """kinships""" +480 37 model """proje""" +480 37 loss """softplus""" +480 37 regularizer """no""" +480 37 optimizer """adam""" +480 37 training_loop """lcwa""" +480 37 evaluator """rankbased""" +480 38 dataset """kinships""" +480 38 model """proje""" +480 38 loss """softplus""" +480 38 regularizer """no""" +480 38 optimizer """adam""" +480 38 training_loop """lcwa""" +480 38 evaluator """rankbased""" +480 39 dataset """kinships""" +480 39 model """proje""" +480 39 loss """softplus""" +480 39 regularizer """no""" +480 39 optimizer """adam""" +480 39 training_loop """lcwa""" +480 39 evaluator """rankbased""" +480 40 dataset """kinships""" +480 40 model """proje""" +480 40 loss """softplus""" +480 40 regularizer """no""" +480 40 optimizer """adam""" +480 40 training_loop """lcwa""" +480 40 evaluator """rankbased""" +480 41 dataset """kinships""" +480 41 model """proje""" +480 41 loss """softplus""" +480 41 regularizer """no""" +480 41 optimizer """adam""" +480 41 training_loop """lcwa""" +480 41 evaluator """rankbased""" +480 42 dataset """kinships""" +480 42 model """proje""" +480 42 loss """softplus""" +480 42 regularizer """no""" +480 42 optimizer """adam""" +480 42 training_loop """lcwa""" +480 42 evaluator """rankbased""" +480 43 dataset """kinships""" +480 43 model """proje""" +480 43 loss """softplus""" +480 43 regularizer """no""" +480 43 optimizer """adam""" +480 43 training_loop """lcwa""" +480 43 evaluator """rankbased""" +480 44 dataset """kinships""" +480 44 model """proje""" +480 44 loss """softplus""" +480 44 regularizer """no""" +480 44 optimizer """adam""" +480 44 training_loop """lcwa""" +480 44 evaluator """rankbased""" +480 45 dataset """kinships""" +480 45 model """proje""" +480 45 loss """softplus""" +480 45 regularizer """no""" +480 45 optimizer """adam""" +480 45 training_loop """lcwa""" +480 45 evaluator """rankbased""" +480 46 dataset """kinships""" +480 46 model """proje""" +480 46 loss """softplus""" +480 46 regularizer """no""" +480 46 optimizer """adam""" +480 46 training_loop """lcwa""" +480 46 evaluator """rankbased""" +480 47 dataset """kinships""" +480 47 model """proje""" +480 47 loss """softplus""" +480 47 regularizer """no""" +480 47 optimizer """adam""" +480 47 training_loop """lcwa""" +480 47 evaluator """rankbased""" +480 48 dataset """kinships""" +480 48 model """proje""" +480 48 loss """softplus""" +480 48 regularizer """no""" +480 48 optimizer """adam""" +480 48 training_loop """lcwa""" +480 48 evaluator """rankbased""" +480 49 dataset """kinships""" +480 49 model """proje""" +480 49 loss """softplus""" +480 49 regularizer """no""" +480 49 optimizer """adam""" +480 49 training_loop """lcwa""" +480 49 evaluator """rankbased""" +480 50 dataset """kinships""" +480 50 model """proje""" +480 50 loss """softplus""" +480 50 regularizer """no""" +480 50 optimizer """adam""" +480 50 training_loop """lcwa""" +480 50 evaluator """rankbased""" +480 51 dataset """kinships""" +480 51 model """proje""" +480 51 loss """softplus""" +480 51 regularizer """no""" +480 51 optimizer """adam""" +480 51 training_loop """lcwa""" +480 51 evaluator """rankbased""" +480 52 dataset """kinships""" +480 52 model """proje""" +480 52 loss """softplus""" +480 52 regularizer """no""" +480 52 optimizer """adam""" +480 52 training_loop """lcwa""" +480 52 evaluator """rankbased""" +480 53 dataset """kinships""" +480 53 model """proje""" +480 53 loss """softplus""" +480 53 regularizer """no""" +480 53 optimizer """adam""" +480 53 training_loop """lcwa""" +480 53 evaluator """rankbased""" +480 54 dataset """kinships""" +480 54 model """proje""" +480 54 loss """softplus""" +480 54 regularizer """no""" +480 54 optimizer """adam""" +480 54 training_loop """lcwa""" +480 54 evaluator """rankbased""" +480 55 dataset """kinships""" +480 55 model """proje""" +480 55 loss """softplus""" +480 55 regularizer """no""" +480 55 optimizer """adam""" +480 55 training_loop """lcwa""" +480 55 evaluator """rankbased""" +480 56 dataset """kinships""" +480 56 model """proje""" +480 56 loss """softplus""" +480 56 regularizer """no""" +480 56 optimizer """adam""" +480 56 training_loop """lcwa""" +480 56 evaluator """rankbased""" +480 57 dataset """kinships""" +480 57 model """proje""" +480 57 loss """softplus""" +480 57 regularizer """no""" +480 57 optimizer """adam""" +480 57 training_loop """lcwa""" +480 57 evaluator """rankbased""" +480 58 dataset """kinships""" +480 58 model """proje""" +480 58 loss """softplus""" +480 58 regularizer """no""" +480 58 optimizer """adam""" +480 58 training_loop """lcwa""" +480 58 evaluator """rankbased""" +480 59 dataset """kinships""" +480 59 model """proje""" +480 59 loss """softplus""" +480 59 regularizer """no""" +480 59 optimizer """adam""" +480 59 training_loop """lcwa""" +480 59 evaluator """rankbased""" +480 60 dataset """kinships""" +480 60 model """proje""" +480 60 loss """softplus""" +480 60 regularizer """no""" +480 60 optimizer """adam""" +480 60 training_loop """lcwa""" +480 60 evaluator """rankbased""" +480 61 dataset """kinships""" +480 61 model """proje""" +480 61 loss """softplus""" +480 61 regularizer """no""" +480 61 optimizer """adam""" +480 61 training_loop """lcwa""" +480 61 evaluator """rankbased""" +480 62 dataset """kinships""" +480 62 model """proje""" +480 62 loss """softplus""" +480 62 regularizer """no""" +480 62 optimizer """adam""" +480 62 training_loop """lcwa""" +480 62 evaluator """rankbased""" +480 63 dataset """kinships""" +480 63 model """proje""" +480 63 loss """softplus""" +480 63 regularizer """no""" +480 63 optimizer """adam""" +480 63 training_loop """lcwa""" +480 63 evaluator """rankbased""" +480 64 dataset """kinships""" +480 64 model """proje""" +480 64 loss """softplus""" +480 64 regularizer """no""" +480 64 optimizer """adam""" +480 64 training_loop """lcwa""" +480 64 evaluator """rankbased""" +480 65 dataset """kinships""" +480 65 model """proje""" +480 65 loss """softplus""" +480 65 regularizer """no""" +480 65 optimizer """adam""" +480 65 training_loop """lcwa""" +480 65 evaluator """rankbased""" +480 66 dataset """kinships""" +480 66 model """proje""" +480 66 loss """softplus""" +480 66 regularizer """no""" +480 66 optimizer """adam""" +480 66 training_loop """lcwa""" +480 66 evaluator """rankbased""" +480 67 dataset """kinships""" +480 67 model """proje""" +480 67 loss """softplus""" +480 67 regularizer """no""" +480 67 optimizer """adam""" +480 67 training_loop """lcwa""" +480 67 evaluator """rankbased""" +480 68 dataset """kinships""" +480 68 model """proje""" +480 68 loss """softplus""" +480 68 regularizer """no""" +480 68 optimizer """adam""" +480 68 training_loop """lcwa""" +480 68 evaluator """rankbased""" +480 69 dataset """kinships""" +480 69 model """proje""" +480 69 loss """softplus""" +480 69 regularizer """no""" +480 69 optimizer """adam""" +480 69 training_loop """lcwa""" +480 69 evaluator """rankbased""" +480 70 dataset """kinships""" +480 70 model """proje""" +480 70 loss """softplus""" +480 70 regularizer """no""" +480 70 optimizer """adam""" +480 70 training_loop """lcwa""" +480 70 evaluator """rankbased""" +480 71 dataset """kinships""" +480 71 model """proje""" +480 71 loss """softplus""" +480 71 regularizer """no""" +480 71 optimizer """adam""" +480 71 training_loop """lcwa""" +480 71 evaluator """rankbased""" +480 72 dataset """kinships""" +480 72 model """proje""" +480 72 loss """softplus""" +480 72 regularizer """no""" +480 72 optimizer """adam""" +480 72 training_loop """lcwa""" +480 72 evaluator """rankbased""" +480 73 dataset """kinships""" +480 73 model """proje""" +480 73 loss """softplus""" +480 73 regularizer """no""" +480 73 optimizer """adam""" +480 73 training_loop """lcwa""" +480 73 evaluator """rankbased""" +480 74 dataset """kinships""" +480 74 model """proje""" +480 74 loss """softplus""" +480 74 regularizer """no""" +480 74 optimizer """adam""" +480 74 training_loop """lcwa""" +480 74 evaluator """rankbased""" +480 75 dataset """kinships""" +480 75 model """proje""" +480 75 loss """softplus""" +480 75 regularizer """no""" +480 75 optimizer """adam""" +480 75 training_loop """lcwa""" +480 75 evaluator """rankbased""" +480 76 dataset """kinships""" +480 76 model """proje""" +480 76 loss """softplus""" +480 76 regularizer """no""" +480 76 optimizer """adam""" +480 76 training_loop """lcwa""" +480 76 evaluator """rankbased""" +480 77 dataset """kinships""" +480 77 model """proje""" +480 77 loss """softplus""" +480 77 regularizer """no""" +480 77 optimizer """adam""" +480 77 training_loop """lcwa""" +480 77 evaluator """rankbased""" +480 78 dataset """kinships""" +480 78 model """proje""" +480 78 loss """softplus""" +480 78 regularizer """no""" +480 78 optimizer """adam""" +480 78 training_loop """lcwa""" +480 78 evaluator """rankbased""" +480 79 dataset """kinships""" +480 79 model """proje""" +480 79 loss """softplus""" +480 79 regularizer """no""" +480 79 optimizer """adam""" +480 79 training_loop """lcwa""" +480 79 evaluator """rankbased""" +480 80 dataset """kinships""" +480 80 model """proje""" +480 80 loss """softplus""" +480 80 regularizer """no""" +480 80 optimizer """adam""" +480 80 training_loop """lcwa""" +480 80 evaluator """rankbased""" +480 81 dataset """kinships""" +480 81 model """proje""" +480 81 loss """softplus""" +480 81 regularizer """no""" +480 81 optimizer """adam""" +480 81 training_loop """lcwa""" +480 81 evaluator """rankbased""" +480 82 dataset """kinships""" +480 82 model """proje""" +480 82 loss """softplus""" +480 82 regularizer """no""" +480 82 optimizer """adam""" +480 82 training_loop """lcwa""" +480 82 evaluator """rankbased""" +480 83 dataset """kinships""" +480 83 model """proje""" +480 83 loss """softplus""" +480 83 regularizer """no""" +480 83 optimizer """adam""" +480 83 training_loop """lcwa""" +480 83 evaluator """rankbased""" +480 84 dataset """kinships""" +480 84 model """proje""" +480 84 loss """softplus""" +480 84 regularizer """no""" +480 84 optimizer """adam""" +480 84 training_loop """lcwa""" +480 84 evaluator """rankbased""" +480 85 dataset """kinships""" +480 85 model """proje""" +480 85 loss """softplus""" +480 85 regularizer """no""" +480 85 optimizer """adam""" +480 85 training_loop """lcwa""" +480 85 evaluator """rankbased""" +480 86 dataset """kinships""" +480 86 model """proje""" +480 86 loss """softplus""" +480 86 regularizer """no""" +480 86 optimizer """adam""" +480 86 training_loop """lcwa""" +480 86 evaluator """rankbased""" +480 87 dataset """kinships""" +480 87 model """proje""" +480 87 loss """softplus""" +480 87 regularizer """no""" +480 87 optimizer """adam""" +480 87 training_loop """lcwa""" +480 87 evaluator """rankbased""" +480 88 dataset """kinships""" +480 88 model """proje""" +480 88 loss """softplus""" +480 88 regularizer """no""" +480 88 optimizer """adam""" +480 88 training_loop """lcwa""" +480 88 evaluator """rankbased""" +480 89 dataset """kinships""" +480 89 model """proje""" +480 89 loss """softplus""" +480 89 regularizer """no""" +480 89 optimizer """adam""" +480 89 training_loop """lcwa""" +480 89 evaluator """rankbased""" +480 90 dataset """kinships""" +480 90 model """proje""" +480 90 loss """softplus""" +480 90 regularizer """no""" +480 90 optimizer """adam""" +480 90 training_loop """lcwa""" +480 90 evaluator """rankbased""" +480 91 dataset """kinships""" +480 91 model """proje""" +480 91 loss """softplus""" +480 91 regularizer """no""" +480 91 optimizer """adam""" +480 91 training_loop """lcwa""" +480 91 evaluator """rankbased""" +480 92 dataset """kinships""" +480 92 model """proje""" +480 92 loss """softplus""" +480 92 regularizer """no""" +480 92 optimizer """adam""" +480 92 training_loop """lcwa""" +480 92 evaluator """rankbased""" +480 93 dataset """kinships""" +480 93 model """proje""" +480 93 loss """softplus""" +480 93 regularizer """no""" +480 93 optimizer """adam""" +480 93 training_loop """lcwa""" +480 93 evaluator """rankbased""" +480 94 dataset """kinships""" +480 94 model """proje""" +480 94 loss """softplus""" +480 94 regularizer """no""" +480 94 optimizer """adam""" +480 94 training_loop """lcwa""" +480 94 evaluator """rankbased""" +480 95 dataset """kinships""" +480 95 model """proje""" +480 95 loss """softplus""" +480 95 regularizer """no""" +480 95 optimizer """adam""" +480 95 training_loop """lcwa""" +480 95 evaluator """rankbased""" +480 96 dataset """kinships""" +480 96 model """proje""" +480 96 loss """softplus""" +480 96 regularizer """no""" +480 96 optimizer """adam""" +480 96 training_loop """lcwa""" +480 96 evaluator """rankbased""" +480 97 dataset """kinships""" +480 97 model """proje""" +480 97 loss """softplus""" +480 97 regularizer """no""" +480 97 optimizer """adam""" +480 97 training_loop """lcwa""" +480 97 evaluator """rankbased""" +480 98 dataset """kinships""" +480 98 model """proje""" +480 98 loss """softplus""" +480 98 regularizer """no""" +480 98 optimizer """adam""" +480 98 training_loop """lcwa""" +480 98 evaluator """rankbased""" +480 99 dataset """kinships""" +480 99 model """proje""" +480 99 loss """softplus""" +480 99 regularizer """no""" +480 99 optimizer """adam""" +480 99 training_loop """lcwa""" +480 99 evaluator """rankbased""" +480 100 dataset """kinships""" +480 100 model """proje""" +480 100 loss """softplus""" +480 100 regularizer """no""" +480 100 optimizer """adam""" +480 100 training_loop """lcwa""" +480 100 evaluator """rankbased""" +481 1 model.embedding_dim 1.0 +481 1 optimizer.lr 0.0022723941722003534 +481 1 training.batch_size 1.0 +481 1 training.label_smoothing 0.0015050023099193752 +481 2 model.embedding_dim 1.0 +481 2 optimizer.lr 0.06286744453641462 +481 2 training.batch_size 2.0 +481 2 training.label_smoothing 0.19304629884441793 +481 3 model.embedding_dim 2.0 +481 3 optimizer.lr 0.008529438240219785 +481 3 training.batch_size 2.0 +481 3 training.label_smoothing 0.7572518016564661 +481 4 model.embedding_dim 0.0 +481 4 optimizer.lr 0.002321910016275626 +481 4 training.batch_size 1.0 +481 4 training.label_smoothing 0.0012285796122555248 +481 5 model.embedding_dim 0.0 +481 5 optimizer.lr 0.002413137655096576 +481 5 training.batch_size 0.0 +481 5 training.label_smoothing 0.0027325227162169944 +481 6 model.embedding_dim 1.0 +481 6 optimizer.lr 0.08779958337846967 +481 6 training.batch_size 1.0 +481 6 training.label_smoothing 0.005413466438198805 +481 7 model.embedding_dim 1.0 +481 7 optimizer.lr 0.0011983608343939115 +481 7 training.batch_size 0.0 +481 7 training.label_smoothing 0.0023293599348799465 +481 8 model.embedding_dim 1.0 +481 8 optimizer.lr 0.07584676185258703 +481 8 training.batch_size 1.0 +481 8 training.label_smoothing 0.0010162130988380664 +481 9 model.embedding_dim 2.0 +481 9 optimizer.lr 0.06158927450072304 +481 9 training.batch_size 0.0 +481 9 training.label_smoothing 0.039178693670890746 +481 10 model.embedding_dim 1.0 +481 10 optimizer.lr 0.0014006185889135598 +481 10 training.batch_size 1.0 +481 10 training.label_smoothing 0.72863460830231 +481 11 model.embedding_dim 2.0 +481 11 optimizer.lr 0.0010397006410311892 +481 11 training.batch_size 0.0 +481 11 training.label_smoothing 0.0019437108938876633 +481 12 model.embedding_dim 0.0 +481 12 optimizer.lr 0.0074592350579636224 +481 12 training.batch_size 1.0 +481 12 training.label_smoothing 0.011658785813617139 +481 13 model.embedding_dim 1.0 +481 13 optimizer.lr 0.021122352443108373 +481 13 training.batch_size 1.0 +481 13 training.label_smoothing 0.1377551995501191 +481 14 model.embedding_dim 2.0 +481 14 optimizer.lr 0.00198310567025084 +481 14 training.batch_size 1.0 +481 14 training.label_smoothing 0.0017641449547925049 +481 15 model.embedding_dim 2.0 +481 15 optimizer.lr 0.024074340771557505 +481 15 training.batch_size 2.0 +481 15 training.label_smoothing 0.005122342379083678 +481 16 model.embedding_dim 1.0 +481 16 optimizer.lr 0.03591880026766299 +481 16 training.batch_size 1.0 +481 16 training.label_smoothing 0.01859404879292044 +481 17 model.embedding_dim 2.0 +481 17 optimizer.lr 0.008708692013096596 +481 17 training.batch_size 2.0 +481 17 training.label_smoothing 0.002425241036921007 +481 18 model.embedding_dim 2.0 +481 18 optimizer.lr 0.04248296664698107 +481 18 training.batch_size 1.0 +481 18 training.label_smoothing 0.006444833614471734 +481 19 model.embedding_dim 1.0 +481 19 optimizer.lr 0.004049398879025784 +481 19 training.batch_size 2.0 +481 19 training.label_smoothing 0.002008244457630474 +481 20 model.embedding_dim 0.0 +481 20 optimizer.lr 0.004128752776029613 +481 20 training.batch_size 1.0 +481 20 training.label_smoothing 0.0256543237208224 +481 21 model.embedding_dim 1.0 +481 21 optimizer.lr 0.0010191158966437242 +481 21 training.batch_size 1.0 +481 21 training.label_smoothing 0.0016106353811952123 +481 22 model.embedding_dim 0.0 +481 22 optimizer.lr 0.03431511380516951 +481 22 training.batch_size 0.0 +481 22 training.label_smoothing 0.010598067652122269 +481 23 model.embedding_dim 1.0 +481 23 optimizer.lr 0.005407386758499935 +481 23 training.batch_size 1.0 +481 23 training.label_smoothing 0.0022926028405108453 +481 24 model.embedding_dim 2.0 +481 24 optimizer.lr 0.004877517130751023 +481 24 training.batch_size 0.0 +481 24 training.label_smoothing 0.01623062937665083 +481 25 model.embedding_dim 1.0 +481 25 optimizer.lr 0.004283854505288278 +481 25 training.batch_size 2.0 +481 25 training.label_smoothing 0.0035047818663873215 +481 26 model.embedding_dim 1.0 +481 26 optimizer.lr 0.0652651713015742 +481 26 training.batch_size 2.0 +481 26 training.label_smoothing 0.0022235727830997873 +481 27 model.embedding_dim 2.0 +481 27 optimizer.lr 0.01384730132408218 +481 27 training.batch_size 2.0 +481 27 training.label_smoothing 0.8428392603630952 +481 28 model.embedding_dim 0.0 +481 28 optimizer.lr 0.07509235106098826 +481 28 training.batch_size 1.0 +481 28 training.label_smoothing 0.05089604373903217 +481 29 model.embedding_dim 0.0 +481 29 optimizer.lr 0.004332373519042915 +481 29 training.batch_size 0.0 +481 29 training.label_smoothing 0.024769712535073672 +481 30 model.embedding_dim 0.0 +481 30 optimizer.lr 0.0011095712934236033 +481 30 training.batch_size 2.0 +481 30 training.label_smoothing 0.0034665033736265747 +481 31 model.embedding_dim 1.0 +481 31 optimizer.lr 0.05182739911734515 +481 31 training.batch_size 2.0 +481 31 training.label_smoothing 0.0070787943754639215 +481 32 model.embedding_dim 1.0 +481 32 optimizer.lr 0.0013951172296858662 +481 32 training.batch_size 2.0 +481 32 training.label_smoothing 0.12840956434469356 +481 33 model.embedding_dim 0.0 +481 33 optimizer.lr 0.0157076005098275 +481 33 training.batch_size 1.0 +481 33 training.label_smoothing 0.0013260684216725904 +481 34 model.embedding_dim 2.0 +481 34 optimizer.lr 0.007290127858091983 +481 34 training.batch_size 2.0 +481 34 training.label_smoothing 0.0035317300377653124 +481 35 model.embedding_dim 2.0 +481 35 optimizer.lr 0.018116553777724576 +481 35 training.batch_size 0.0 +481 35 training.label_smoothing 0.5271812197729253 +481 36 model.embedding_dim 1.0 +481 36 optimizer.lr 0.021854386438769066 +481 36 training.batch_size 0.0 +481 36 training.label_smoothing 0.018315016623932343 +481 37 model.embedding_dim 1.0 +481 37 optimizer.lr 0.0818971932212419 +481 37 training.batch_size 0.0 +481 37 training.label_smoothing 0.0030736775211961213 +481 38 model.embedding_dim 1.0 +481 38 optimizer.lr 0.05838144770784509 +481 38 training.batch_size 2.0 +481 38 training.label_smoothing 0.17144004794222806 +481 39 model.embedding_dim 0.0 +481 39 optimizer.lr 0.004093461558537012 +481 39 training.batch_size 1.0 +481 39 training.label_smoothing 0.009649989337144994 +481 40 model.embedding_dim 2.0 +481 40 optimizer.lr 0.003860845242459735 +481 40 training.batch_size 1.0 +481 40 training.label_smoothing 0.0046117653093535865 +481 41 model.embedding_dim 0.0 +481 41 optimizer.lr 0.020457281247288732 +481 41 training.batch_size 2.0 +481 41 training.label_smoothing 0.0017389628100270243 +481 42 model.embedding_dim 1.0 +481 42 optimizer.lr 0.08578773362087194 +481 42 training.batch_size 2.0 +481 42 training.label_smoothing 0.007579951850842993 +481 43 model.embedding_dim 2.0 +481 43 optimizer.lr 0.001744593057555354 +481 43 training.batch_size 0.0 +481 43 training.label_smoothing 0.008247066755098552 +481 44 model.embedding_dim 0.0 +481 44 optimizer.lr 0.002586314766385778 +481 44 training.batch_size 2.0 +481 44 training.label_smoothing 0.005558168157730102 +481 45 model.embedding_dim 2.0 +481 45 optimizer.lr 0.00947236867222712 +481 45 training.batch_size 1.0 +481 45 training.label_smoothing 0.006702068503144327 +481 46 model.embedding_dim 2.0 +481 46 optimizer.lr 0.028662890801737863 +481 46 training.batch_size 2.0 +481 46 training.label_smoothing 0.35611456076226566 +481 47 model.embedding_dim 1.0 +481 47 optimizer.lr 0.007207955270351588 +481 47 training.batch_size 2.0 +481 47 training.label_smoothing 0.42386196820750593 +481 48 model.embedding_dim 2.0 +481 48 optimizer.lr 0.0011084269152736457 +481 48 training.batch_size 1.0 +481 48 training.label_smoothing 0.022698112669482677 +481 49 model.embedding_dim 0.0 +481 49 optimizer.lr 0.0038980040552718835 +481 49 training.batch_size 0.0 +481 49 training.label_smoothing 0.00840944310520533 +481 50 model.embedding_dim 1.0 +481 50 optimizer.lr 0.008884784708713034 +481 50 training.batch_size 2.0 +481 50 training.label_smoothing 0.046533357353956604 +481 51 model.embedding_dim 0.0 +481 51 optimizer.lr 0.05707555952481981 +481 51 training.batch_size 1.0 +481 51 training.label_smoothing 0.022630422719933097 +481 52 model.embedding_dim 1.0 +481 52 optimizer.lr 0.005313400682089175 +481 52 training.batch_size 2.0 +481 52 training.label_smoothing 0.3076126724361636 +481 53 model.embedding_dim 1.0 +481 53 optimizer.lr 0.005948942526060246 +481 53 training.batch_size 1.0 +481 53 training.label_smoothing 0.004101917472671886 +481 54 model.embedding_dim 2.0 +481 54 optimizer.lr 0.05924608266937961 +481 54 training.batch_size 1.0 +481 54 training.label_smoothing 0.22183339443936653 +481 55 model.embedding_dim 2.0 +481 55 optimizer.lr 0.026438767132066943 +481 55 training.batch_size 2.0 +481 55 training.label_smoothing 0.1061069332896598 +481 56 model.embedding_dim 1.0 +481 56 optimizer.lr 0.002835183995281438 +481 56 training.batch_size 1.0 +481 56 training.label_smoothing 0.0831813021591489 +481 57 model.embedding_dim 1.0 +481 57 optimizer.lr 0.0018729522765996737 +481 57 training.batch_size 0.0 +481 57 training.label_smoothing 0.1302773489339078 +481 58 model.embedding_dim 0.0 +481 58 optimizer.lr 0.0277641238894542 +481 58 training.batch_size 0.0 +481 58 training.label_smoothing 0.015864140053912734 +481 59 model.embedding_dim 1.0 +481 59 optimizer.lr 0.005259714894086897 +481 59 training.batch_size 0.0 +481 59 training.label_smoothing 0.009136780148793947 +481 60 model.embedding_dim 1.0 +481 60 optimizer.lr 0.004315120441754083 +481 60 training.batch_size 2.0 +481 60 training.label_smoothing 0.001763973526780421 +481 61 model.embedding_dim 0.0 +481 61 optimizer.lr 0.015137461351348845 +481 61 training.batch_size 1.0 +481 61 training.label_smoothing 0.015903827353202424 +481 62 model.embedding_dim 0.0 +481 62 optimizer.lr 0.04416465821432678 +481 62 training.batch_size 2.0 +481 62 training.label_smoothing 0.0015675133862983457 +481 63 model.embedding_dim 0.0 +481 63 optimizer.lr 0.014269626066892251 +481 63 training.batch_size 2.0 +481 63 training.label_smoothing 0.10401822546085257 +481 64 model.embedding_dim 2.0 +481 64 optimizer.lr 0.008465724713366514 +481 64 training.batch_size 0.0 +481 64 training.label_smoothing 0.0016438432675056568 +481 65 model.embedding_dim 0.0 +481 65 optimizer.lr 0.05893242775836445 +481 65 training.batch_size 2.0 +481 65 training.label_smoothing 0.47467430433859725 +481 66 model.embedding_dim 1.0 +481 66 optimizer.lr 0.0332300935903361 +481 66 training.batch_size 1.0 +481 66 training.label_smoothing 0.001613037943601074 +481 67 model.embedding_dim 0.0 +481 67 optimizer.lr 0.06653147119284941 +481 67 training.batch_size 0.0 +481 67 training.label_smoothing 0.06515346196921058 +481 68 model.embedding_dim 0.0 +481 68 optimizer.lr 0.0028094054386006715 +481 68 training.batch_size 0.0 +481 68 training.label_smoothing 0.001851635159160133 +481 69 model.embedding_dim 1.0 +481 69 optimizer.lr 0.022428952027858835 +481 69 training.batch_size 1.0 +481 69 training.label_smoothing 0.5331088171128313 +481 70 model.embedding_dim 1.0 +481 70 optimizer.lr 0.0015273577959608424 +481 70 training.batch_size 2.0 +481 70 training.label_smoothing 0.674294684634415 +481 71 model.embedding_dim 0.0 +481 71 optimizer.lr 0.03800262483769948 +481 71 training.batch_size 0.0 +481 71 training.label_smoothing 0.013204412295018769 +481 72 model.embedding_dim 1.0 +481 72 optimizer.lr 0.03214703314518009 +481 72 training.batch_size 0.0 +481 72 training.label_smoothing 0.007248593648935731 +481 73 model.embedding_dim 0.0 +481 73 optimizer.lr 0.02213512766654868 +481 73 training.batch_size 2.0 +481 73 training.label_smoothing 0.013221759761181886 +481 74 model.embedding_dim 1.0 +481 74 optimizer.lr 0.0033305223356191473 +481 74 training.batch_size 2.0 +481 74 training.label_smoothing 0.016994747318905188 +481 75 model.embedding_dim 2.0 +481 75 optimizer.lr 0.014072050743534358 +481 75 training.batch_size 2.0 +481 75 training.label_smoothing 0.35433962591865553 +481 76 model.embedding_dim 1.0 +481 76 optimizer.lr 0.004576906127074098 +481 76 training.batch_size 1.0 +481 76 training.label_smoothing 0.004055350522637367 +481 77 model.embedding_dim 0.0 +481 77 optimizer.lr 0.0015417352989228654 +481 77 training.batch_size 0.0 +481 77 training.label_smoothing 0.057920948417987166 +481 78 model.embedding_dim 0.0 +481 78 optimizer.lr 0.0028710182958758814 +481 78 training.batch_size 1.0 +481 78 training.label_smoothing 0.5116713251687348 +481 79 model.embedding_dim 0.0 +481 79 optimizer.lr 0.03392201039256011 +481 79 training.batch_size 0.0 +481 79 training.label_smoothing 0.2690724926708965 +481 80 model.embedding_dim 0.0 +481 80 optimizer.lr 0.037572620185814656 +481 80 training.batch_size 0.0 +481 80 training.label_smoothing 0.012438289260147072 +481 81 model.embedding_dim 0.0 +481 81 optimizer.lr 0.04609742648141279 +481 81 training.batch_size 1.0 +481 81 training.label_smoothing 0.2196806539999351 +481 82 model.embedding_dim 0.0 +481 82 optimizer.lr 0.0021432038521497624 +481 82 training.batch_size 0.0 +481 82 training.label_smoothing 0.017802768765447785 +481 83 model.embedding_dim 2.0 +481 83 optimizer.lr 0.003914762259585978 +481 83 training.batch_size 0.0 +481 83 training.label_smoothing 0.14099536463647314 +481 84 model.embedding_dim 1.0 +481 84 optimizer.lr 0.014682480865138523 +481 84 training.batch_size 2.0 +481 84 training.label_smoothing 0.2204355105719559 +481 85 model.embedding_dim 2.0 +481 85 optimizer.lr 0.013805390774209579 +481 85 training.batch_size 1.0 +481 85 training.label_smoothing 0.001477326173037786 +481 86 model.embedding_dim 2.0 +481 86 optimizer.lr 0.007491615741721466 +481 86 training.batch_size 2.0 +481 86 training.label_smoothing 0.19858049213145385 +481 87 model.embedding_dim 1.0 +481 87 optimizer.lr 0.011962081914272747 +481 87 training.batch_size 1.0 +481 87 training.label_smoothing 0.01972754549519973 +481 88 model.embedding_dim 1.0 +481 88 optimizer.lr 0.030663203198144494 +481 88 training.batch_size 0.0 +481 88 training.label_smoothing 0.32903726379830184 +481 89 model.embedding_dim 1.0 +481 89 optimizer.lr 0.0025165874544778996 +481 89 training.batch_size 1.0 +481 89 training.label_smoothing 0.07565414016622121 +481 90 model.embedding_dim 0.0 +481 90 optimizer.lr 0.01124569383344429 +481 90 training.batch_size 0.0 +481 90 training.label_smoothing 0.0029192709710454453 +481 91 model.embedding_dim 0.0 +481 91 optimizer.lr 0.0011238895896654654 +481 91 training.batch_size 2.0 +481 91 training.label_smoothing 0.2747722723290664 +481 92 model.embedding_dim 1.0 +481 92 optimizer.lr 0.0016685591401372432 +481 92 training.batch_size 0.0 +481 92 training.label_smoothing 0.004530749737162562 +481 93 model.embedding_dim 1.0 +481 93 optimizer.lr 0.0024612301912244204 +481 93 training.batch_size 1.0 +481 93 training.label_smoothing 0.0018268363758777541 +481 94 model.embedding_dim 2.0 +481 94 optimizer.lr 0.05634069891747565 +481 94 training.batch_size 2.0 +481 94 training.label_smoothing 0.011633922702051429 +481 95 model.embedding_dim 1.0 +481 95 optimizer.lr 0.0019511308521673598 +481 95 training.batch_size 0.0 +481 95 training.label_smoothing 0.001118757522358305 +481 96 model.embedding_dim 0.0 +481 96 optimizer.lr 0.05758087606321281 +481 96 training.batch_size 2.0 +481 96 training.label_smoothing 0.5004198985023999 +481 97 model.embedding_dim 2.0 +481 97 optimizer.lr 0.00118007364299297 +481 97 training.batch_size 2.0 +481 97 training.label_smoothing 0.0038996478783759466 +481 98 model.embedding_dim 0.0 +481 98 optimizer.lr 0.028363522322577887 +481 98 training.batch_size 0.0 +481 98 training.label_smoothing 0.051336364894680284 +481 99 model.embedding_dim 0.0 +481 99 optimizer.lr 0.00130427302186651 +481 99 training.batch_size 1.0 +481 99 training.label_smoothing 0.23153347158409363 +481 100 model.embedding_dim 0.0 +481 100 optimizer.lr 0.013983301187803998 +481 100 training.batch_size 1.0 +481 100 training.label_smoothing 0.0011712123679017367 +481 1 dataset """kinships""" +481 1 model """proje""" +481 1 loss """bceaftersigmoid""" +481 1 regularizer """no""" +481 1 optimizer """adam""" +481 1 training_loop """lcwa""" +481 1 evaluator """rankbased""" +481 2 dataset """kinships""" +481 2 model """proje""" +481 2 loss """bceaftersigmoid""" +481 2 regularizer """no""" +481 2 optimizer """adam""" +481 2 training_loop """lcwa""" +481 2 evaluator """rankbased""" +481 3 dataset """kinships""" +481 3 model """proje""" +481 3 loss """bceaftersigmoid""" +481 3 regularizer """no""" +481 3 optimizer """adam""" +481 3 training_loop """lcwa""" +481 3 evaluator """rankbased""" +481 4 dataset """kinships""" +481 4 model """proje""" +481 4 loss """bceaftersigmoid""" +481 4 regularizer """no""" +481 4 optimizer """adam""" +481 4 training_loop """lcwa""" +481 4 evaluator """rankbased""" +481 5 dataset """kinships""" +481 5 model """proje""" +481 5 loss """bceaftersigmoid""" +481 5 regularizer """no""" +481 5 optimizer """adam""" +481 5 training_loop """lcwa""" +481 5 evaluator """rankbased""" +481 6 dataset """kinships""" +481 6 model """proje""" +481 6 loss """bceaftersigmoid""" +481 6 regularizer """no""" +481 6 optimizer """adam""" +481 6 training_loop """lcwa""" +481 6 evaluator """rankbased""" +481 7 dataset """kinships""" +481 7 model """proje""" +481 7 loss """bceaftersigmoid""" +481 7 regularizer """no""" +481 7 optimizer """adam""" +481 7 training_loop """lcwa""" +481 7 evaluator """rankbased""" +481 8 dataset """kinships""" +481 8 model """proje""" +481 8 loss """bceaftersigmoid""" +481 8 regularizer """no""" +481 8 optimizer """adam""" +481 8 training_loop """lcwa""" +481 8 evaluator """rankbased""" +481 9 dataset """kinships""" +481 9 model """proje""" +481 9 loss """bceaftersigmoid""" +481 9 regularizer """no""" +481 9 optimizer """adam""" +481 9 training_loop """lcwa""" +481 9 evaluator """rankbased""" +481 10 dataset """kinships""" +481 10 model """proje""" +481 10 loss """bceaftersigmoid""" +481 10 regularizer """no""" +481 10 optimizer """adam""" +481 10 training_loop """lcwa""" +481 10 evaluator """rankbased""" +481 11 dataset """kinships""" +481 11 model """proje""" +481 11 loss """bceaftersigmoid""" +481 11 regularizer """no""" +481 11 optimizer """adam""" +481 11 training_loop """lcwa""" +481 11 evaluator """rankbased""" +481 12 dataset """kinships""" +481 12 model """proje""" +481 12 loss """bceaftersigmoid""" +481 12 regularizer """no""" +481 12 optimizer """adam""" +481 12 training_loop """lcwa""" +481 12 evaluator """rankbased""" +481 13 dataset """kinships""" +481 13 model """proje""" +481 13 loss """bceaftersigmoid""" +481 13 regularizer """no""" +481 13 optimizer """adam""" +481 13 training_loop """lcwa""" +481 13 evaluator """rankbased""" +481 14 dataset """kinships""" +481 14 model """proje""" +481 14 loss """bceaftersigmoid""" +481 14 regularizer """no""" +481 14 optimizer """adam""" +481 14 training_loop """lcwa""" +481 14 evaluator """rankbased""" +481 15 dataset """kinships""" +481 15 model """proje""" +481 15 loss """bceaftersigmoid""" +481 15 regularizer """no""" +481 15 optimizer """adam""" +481 15 training_loop """lcwa""" +481 15 evaluator """rankbased""" +481 16 dataset """kinships""" +481 16 model """proje""" +481 16 loss """bceaftersigmoid""" +481 16 regularizer """no""" +481 16 optimizer """adam""" +481 16 training_loop """lcwa""" +481 16 evaluator """rankbased""" +481 17 dataset """kinships""" +481 17 model """proje""" +481 17 loss """bceaftersigmoid""" +481 17 regularizer """no""" +481 17 optimizer """adam""" +481 17 training_loop """lcwa""" +481 17 evaluator """rankbased""" +481 18 dataset """kinships""" +481 18 model """proje""" +481 18 loss """bceaftersigmoid""" +481 18 regularizer """no""" +481 18 optimizer """adam""" +481 18 training_loop """lcwa""" +481 18 evaluator """rankbased""" +481 19 dataset """kinships""" +481 19 model """proje""" +481 19 loss """bceaftersigmoid""" +481 19 regularizer """no""" +481 19 optimizer """adam""" +481 19 training_loop """lcwa""" +481 19 evaluator """rankbased""" +481 20 dataset """kinships""" +481 20 model """proje""" +481 20 loss """bceaftersigmoid""" +481 20 regularizer """no""" +481 20 optimizer """adam""" +481 20 training_loop """lcwa""" +481 20 evaluator """rankbased""" +481 21 dataset """kinships""" +481 21 model """proje""" +481 21 loss """bceaftersigmoid""" +481 21 regularizer """no""" +481 21 optimizer """adam""" +481 21 training_loop """lcwa""" +481 21 evaluator """rankbased""" +481 22 dataset """kinships""" +481 22 model """proje""" +481 22 loss """bceaftersigmoid""" +481 22 regularizer """no""" +481 22 optimizer """adam""" +481 22 training_loop """lcwa""" +481 22 evaluator """rankbased""" +481 23 dataset """kinships""" +481 23 model """proje""" +481 23 loss """bceaftersigmoid""" +481 23 regularizer """no""" +481 23 optimizer """adam""" +481 23 training_loop """lcwa""" +481 23 evaluator """rankbased""" +481 24 dataset """kinships""" +481 24 model """proje""" +481 24 loss """bceaftersigmoid""" +481 24 regularizer """no""" +481 24 optimizer """adam""" +481 24 training_loop """lcwa""" +481 24 evaluator """rankbased""" +481 25 dataset """kinships""" +481 25 model """proje""" +481 25 loss """bceaftersigmoid""" +481 25 regularizer """no""" +481 25 optimizer """adam""" +481 25 training_loop """lcwa""" +481 25 evaluator """rankbased""" +481 26 dataset """kinships""" +481 26 model """proje""" +481 26 loss """bceaftersigmoid""" +481 26 regularizer """no""" +481 26 optimizer """adam""" +481 26 training_loop """lcwa""" +481 26 evaluator """rankbased""" +481 27 dataset """kinships""" +481 27 model """proje""" +481 27 loss """bceaftersigmoid""" +481 27 regularizer """no""" +481 27 optimizer """adam""" +481 27 training_loop """lcwa""" +481 27 evaluator """rankbased""" +481 28 dataset """kinships""" +481 28 model """proje""" +481 28 loss """bceaftersigmoid""" +481 28 regularizer """no""" +481 28 optimizer """adam""" +481 28 training_loop """lcwa""" +481 28 evaluator """rankbased""" +481 29 dataset """kinships""" +481 29 model """proje""" +481 29 loss """bceaftersigmoid""" +481 29 regularizer """no""" +481 29 optimizer """adam""" +481 29 training_loop """lcwa""" +481 29 evaluator """rankbased""" +481 30 dataset """kinships""" +481 30 model """proje""" +481 30 loss """bceaftersigmoid""" +481 30 regularizer """no""" +481 30 optimizer """adam""" +481 30 training_loop """lcwa""" +481 30 evaluator """rankbased""" +481 31 dataset """kinships""" +481 31 model """proje""" +481 31 loss """bceaftersigmoid""" +481 31 regularizer """no""" +481 31 optimizer """adam""" +481 31 training_loop """lcwa""" +481 31 evaluator """rankbased""" +481 32 dataset """kinships""" +481 32 model """proje""" +481 32 loss """bceaftersigmoid""" +481 32 regularizer """no""" +481 32 optimizer """adam""" +481 32 training_loop """lcwa""" +481 32 evaluator """rankbased""" +481 33 dataset """kinships""" +481 33 model """proje""" +481 33 loss """bceaftersigmoid""" +481 33 regularizer """no""" +481 33 optimizer """adam""" +481 33 training_loop """lcwa""" +481 33 evaluator """rankbased""" +481 34 dataset """kinships""" +481 34 model """proje""" +481 34 loss """bceaftersigmoid""" +481 34 regularizer """no""" +481 34 optimizer """adam""" +481 34 training_loop """lcwa""" +481 34 evaluator """rankbased""" +481 35 dataset """kinships""" +481 35 model """proje""" +481 35 loss """bceaftersigmoid""" +481 35 regularizer """no""" +481 35 optimizer """adam""" +481 35 training_loop """lcwa""" +481 35 evaluator """rankbased""" +481 36 dataset """kinships""" +481 36 model """proje""" +481 36 loss """bceaftersigmoid""" +481 36 regularizer """no""" +481 36 optimizer """adam""" +481 36 training_loop """lcwa""" +481 36 evaluator """rankbased""" +481 37 dataset """kinships""" +481 37 model """proje""" +481 37 loss """bceaftersigmoid""" +481 37 regularizer """no""" +481 37 optimizer """adam""" +481 37 training_loop """lcwa""" +481 37 evaluator """rankbased""" +481 38 dataset """kinships""" +481 38 model """proje""" +481 38 loss """bceaftersigmoid""" +481 38 regularizer """no""" +481 38 optimizer """adam""" +481 38 training_loop """lcwa""" +481 38 evaluator """rankbased""" +481 39 dataset """kinships""" +481 39 model """proje""" +481 39 loss """bceaftersigmoid""" +481 39 regularizer """no""" +481 39 optimizer """adam""" +481 39 training_loop """lcwa""" +481 39 evaluator """rankbased""" +481 40 dataset """kinships""" +481 40 model """proje""" +481 40 loss """bceaftersigmoid""" +481 40 regularizer """no""" +481 40 optimizer """adam""" +481 40 training_loop """lcwa""" +481 40 evaluator """rankbased""" +481 41 dataset """kinships""" +481 41 model """proje""" +481 41 loss """bceaftersigmoid""" +481 41 regularizer """no""" +481 41 optimizer """adam""" +481 41 training_loop """lcwa""" +481 41 evaluator """rankbased""" +481 42 dataset """kinships""" +481 42 model """proje""" +481 42 loss """bceaftersigmoid""" +481 42 regularizer """no""" +481 42 optimizer """adam""" +481 42 training_loop """lcwa""" +481 42 evaluator """rankbased""" +481 43 dataset """kinships""" +481 43 model """proje""" +481 43 loss """bceaftersigmoid""" +481 43 regularizer """no""" +481 43 optimizer """adam""" +481 43 training_loop """lcwa""" +481 43 evaluator """rankbased""" +481 44 dataset """kinships""" +481 44 model """proje""" +481 44 loss """bceaftersigmoid""" +481 44 regularizer """no""" +481 44 optimizer """adam""" +481 44 training_loop """lcwa""" +481 44 evaluator """rankbased""" +481 45 dataset """kinships""" +481 45 model """proje""" +481 45 loss """bceaftersigmoid""" +481 45 regularizer """no""" +481 45 optimizer """adam""" +481 45 training_loop """lcwa""" +481 45 evaluator """rankbased""" +481 46 dataset """kinships""" +481 46 model """proje""" +481 46 loss """bceaftersigmoid""" +481 46 regularizer """no""" +481 46 optimizer """adam""" +481 46 training_loop """lcwa""" +481 46 evaluator """rankbased""" +481 47 dataset """kinships""" +481 47 model """proje""" +481 47 loss """bceaftersigmoid""" +481 47 regularizer """no""" +481 47 optimizer """adam""" +481 47 training_loop """lcwa""" +481 47 evaluator """rankbased""" +481 48 dataset """kinships""" +481 48 model """proje""" +481 48 loss """bceaftersigmoid""" +481 48 regularizer """no""" +481 48 optimizer """adam""" +481 48 training_loop """lcwa""" +481 48 evaluator """rankbased""" +481 49 dataset """kinships""" +481 49 model """proje""" +481 49 loss """bceaftersigmoid""" +481 49 regularizer """no""" +481 49 optimizer """adam""" +481 49 training_loop """lcwa""" +481 49 evaluator """rankbased""" +481 50 dataset """kinships""" +481 50 model """proje""" +481 50 loss """bceaftersigmoid""" +481 50 regularizer """no""" +481 50 optimizer """adam""" +481 50 training_loop """lcwa""" +481 50 evaluator """rankbased""" +481 51 dataset """kinships""" +481 51 model """proje""" +481 51 loss """bceaftersigmoid""" +481 51 regularizer """no""" +481 51 optimizer """adam""" +481 51 training_loop """lcwa""" +481 51 evaluator """rankbased""" +481 52 dataset """kinships""" +481 52 model """proje""" +481 52 loss """bceaftersigmoid""" +481 52 regularizer """no""" +481 52 optimizer """adam""" +481 52 training_loop """lcwa""" +481 52 evaluator """rankbased""" +481 53 dataset """kinships""" +481 53 model """proje""" +481 53 loss """bceaftersigmoid""" +481 53 regularizer """no""" +481 53 optimizer """adam""" +481 53 training_loop """lcwa""" +481 53 evaluator """rankbased""" +481 54 dataset """kinships""" +481 54 model """proje""" +481 54 loss """bceaftersigmoid""" +481 54 regularizer """no""" +481 54 optimizer """adam""" +481 54 training_loop """lcwa""" +481 54 evaluator """rankbased""" +481 55 dataset """kinships""" +481 55 model """proje""" +481 55 loss """bceaftersigmoid""" +481 55 regularizer """no""" +481 55 optimizer """adam""" +481 55 training_loop """lcwa""" +481 55 evaluator """rankbased""" +481 56 dataset """kinships""" +481 56 model """proje""" +481 56 loss """bceaftersigmoid""" +481 56 regularizer """no""" +481 56 optimizer """adam""" +481 56 training_loop """lcwa""" +481 56 evaluator """rankbased""" +481 57 dataset """kinships""" +481 57 model """proje""" +481 57 loss """bceaftersigmoid""" +481 57 regularizer """no""" +481 57 optimizer """adam""" +481 57 training_loop """lcwa""" +481 57 evaluator """rankbased""" +481 58 dataset """kinships""" +481 58 model """proje""" +481 58 loss """bceaftersigmoid""" +481 58 regularizer """no""" +481 58 optimizer """adam""" +481 58 training_loop """lcwa""" +481 58 evaluator """rankbased""" +481 59 dataset """kinships""" +481 59 model """proje""" +481 59 loss """bceaftersigmoid""" +481 59 regularizer """no""" +481 59 optimizer """adam""" +481 59 training_loop """lcwa""" +481 59 evaluator """rankbased""" +481 60 dataset """kinships""" +481 60 model """proje""" +481 60 loss """bceaftersigmoid""" +481 60 regularizer """no""" +481 60 optimizer """adam""" +481 60 training_loop """lcwa""" +481 60 evaluator """rankbased""" +481 61 dataset """kinships""" +481 61 model """proje""" +481 61 loss """bceaftersigmoid""" +481 61 regularizer """no""" +481 61 optimizer """adam""" +481 61 training_loop """lcwa""" +481 61 evaluator """rankbased""" +481 62 dataset """kinships""" +481 62 model """proje""" +481 62 loss """bceaftersigmoid""" +481 62 regularizer """no""" +481 62 optimizer """adam""" +481 62 training_loop """lcwa""" +481 62 evaluator """rankbased""" +481 63 dataset """kinships""" +481 63 model """proje""" +481 63 loss """bceaftersigmoid""" +481 63 regularizer """no""" +481 63 optimizer """adam""" +481 63 training_loop """lcwa""" +481 63 evaluator """rankbased""" +481 64 dataset """kinships""" +481 64 model """proje""" +481 64 loss """bceaftersigmoid""" +481 64 regularizer """no""" +481 64 optimizer """adam""" +481 64 training_loop """lcwa""" +481 64 evaluator """rankbased""" +481 65 dataset """kinships""" +481 65 model """proje""" +481 65 loss """bceaftersigmoid""" +481 65 regularizer """no""" +481 65 optimizer """adam""" +481 65 training_loop """lcwa""" +481 65 evaluator """rankbased""" +481 66 dataset """kinships""" +481 66 model """proje""" +481 66 loss """bceaftersigmoid""" +481 66 regularizer """no""" +481 66 optimizer """adam""" +481 66 training_loop """lcwa""" +481 66 evaluator """rankbased""" +481 67 dataset """kinships""" +481 67 model """proje""" +481 67 loss """bceaftersigmoid""" +481 67 regularizer """no""" +481 67 optimizer """adam""" +481 67 training_loop """lcwa""" +481 67 evaluator """rankbased""" +481 68 dataset """kinships""" +481 68 model """proje""" +481 68 loss """bceaftersigmoid""" +481 68 regularizer """no""" +481 68 optimizer """adam""" +481 68 training_loop """lcwa""" +481 68 evaluator """rankbased""" +481 69 dataset """kinships""" +481 69 model """proje""" +481 69 loss """bceaftersigmoid""" +481 69 regularizer """no""" +481 69 optimizer """adam""" +481 69 training_loop """lcwa""" +481 69 evaluator """rankbased""" +481 70 dataset """kinships""" +481 70 model """proje""" +481 70 loss """bceaftersigmoid""" +481 70 regularizer """no""" +481 70 optimizer """adam""" +481 70 training_loop """lcwa""" +481 70 evaluator """rankbased""" +481 71 dataset """kinships""" +481 71 model """proje""" +481 71 loss """bceaftersigmoid""" +481 71 regularizer """no""" +481 71 optimizer """adam""" +481 71 training_loop """lcwa""" +481 71 evaluator """rankbased""" +481 72 dataset """kinships""" +481 72 model """proje""" +481 72 loss """bceaftersigmoid""" +481 72 regularizer """no""" +481 72 optimizer """adam""" +481 72 training_loop """lcwa""" +481 72 evaluator """rankbased""" +481 73 dataset """kinships""" +481 73 model """proje""" +481 73 loss """bceaftersigmoid""" +481 73 regularizer """no""" +481 73 optimizer """adam""" +481 73 training_loop """lcwa""" +481 73 evaluator """rankbased""" +481 74 dataset """kinships""" +481 74 model """proje""" +481 74 loss """bceaftersigmoid""" +481 74 regularizer """no""" +481 74 optimizer """adam""" +481 74 training_loop """lcwa""" +481 74 evaluator """rankbased""" +481 75 dataset """kinships""" +481 75 model """proje""" +481 75 loss """bceaftersigmoid""" +481 75 regularizer """no""" +481 75 optimizer """adam""" +481 75 training_loop """lcwa""" +481 75 evaluator """rankbased""" +481 76 dataset """kinships""" +481 76 model """proje""" +481 76 loss """bceaftersigmoid""" +481 76 regularizer """no""" +481 76 optimizer """adam""" +481 76 training_loop """lcwa""" +481 76 evaluator """rankbased""" +481 77 dataset """kinships""" +481 77 model """proje""" +481 77 loss """bceaftersigmoid""" +481 77 regularizer """no""" +481 77 optimizer """adam""" +481 77 training_loop """lcwa""" +481 77 evaluator """rankbased""" +481 78 dataset """kinships""" +481 78 model """proje""" +481 78 loss """bceaftersigmoid""" +481 78 regularizer """no""" +481 78 optimizer """adam""" +481 78 training_loop """lcwa""" +481 78 evaluator """rankbased""" +481 79 dataset """kinships""" +481 79 model """proje""" +481 79 loss """bceaftersigmoid""" +481 79 regularizer """no""" +481 79 optimizer """adam""" +481 79 training_loop """lcwa""" +481 79 evaluator """rankbased""" +481 80 dataset """kinships""" +481 80 model """proje""" +481 80 loss """bceaftersigmoid""" +481 80 regularizer """no""" +481 80 optimizer """adam""" +481 80 training_loop """lcwa""" +481 80 evaluator """rankbased""" +481 81 dataset """kinships""" +481 81 model """proje""" +481 81 loss """bceaftersigmoid""" +481 81 regularizer """no""" +481 81 optimizer """adam""" +481 81 training_loop """lcwa""" +481 81 evaluator """rankbased""" +481 82 dataset """kinships""" +481 82 model """proje""" +481 82 loss """bceaftersigmoid""" +481 82 regularizer """no""" +481 82 optimizer """adam""" +481 82 training_loop """lcwa""" +481 82 evaluator """rankbased""" +481 83 dataset """kinships""" +481 83 model """proje""" +481 83 loss """bceaftersigmoid""" +481 83 regularizer """no""" +481 83 optimizer """adam""" +481 83 training_loop """lcwa""" +481 83 evaluator """rankbased""" +481 84 dataset """kinships""" +481 84 model """proje""" +481 84 loss """bceaftersigmoid""" +481 84 regularizer """no""" +481 84 optimizer """adam""" +481 84 training_loop """lcwa""" +481 84 evaluator """rankbased""" +481 85 dataset """kinships""" +481 85 model """proje""" +481 85 loss """bceaftersigmoid""" +481 85 regularizer """no""" +481 85 optimizer """adam""" +481 85 training_loop """lcwa""" +481 85 evaluator """rankbased""" +481 86 dataset """kinships""" +481 86 model """proje""" +481 86 loss """bceaftersigmoid""" +481 86 regularizer """no""" +481 86 optimizer """adam""" +481 86 training_loop """lcwa""" +481 86 evaluator """rankbased""" +481 87 dataset """kinships""" +481 87 model """proje""" +481 87 loss """bceaftersigmoid""" +481 87 regularizer """no""" +481 87 optimizer """adam""" +481 87 training_loop """lcwa""" +481 87 evaluator """rankbased""" +481 88 dataset """kinships""" +481 88 model """proje""" +481 88 loss """bceaftersigmoid""" +481 88 regularizer """no""" +481 88 optimizer """adam""" +481 88 training_loop """lcwa""" +481 88 evaluator """rankbased""" +481 89 dataset """kinships""" +481 89 model """proje""" +481 89 loss """bceaftersigmoid""" +481 89 regularizer """no""" +481 89 optimizer """adam""" +481 89 training_loop """lcwa""" +481 89 evaluator """rankbased""" +481 90 dataset """kinships""" +481 90 model """proje""" +481 90 loss """bceaftersigmoid""" +481 90 regularizer """no""" +481 90 optimizer """adam""" +481 90 training_loop """lcwa""" +481 90 evaluator """rankbased""" +481 91 dataset """kinships""" +481 91 model """proje""" +481 91 loss """bceaftersigmoid""" +481 91 regularizer """no""" +481 91 optimizer """adam""" +481 91 training_loop """lcwa""" +481 91 evaluator """rankbased""" +481 92 dataset """kinships""" +481 92 model """proje""" +481 92 loss """bceaftersigmoid""" +481 92 regularizer """no""" +481 92 optimizer """adam""" +481 92 training_loop """lcwa""" +481 92 evaluator """rankbased""" +481 93 dataset """kinships""" +481 93 model """proje""" +481 93 loss """bceaftersigmoid""" +481 93 regularizer """no""" +481 93 optimizer """adam""" +481 93 training_loop """lcwa""" +481 93 evaluator """rankbased""" +481 94 dataset """kinships""" +481 94 model """proje""" +481 94 loss """bceaftersigmoid""" +481 94 regularizer """no""" +481 94 optimizer """adam""" +481 94 training_loop """lcwa""" +481 94 evaluator """rankbased""" +481 95 dataset """kinships""" +481 95 model """proje""" +481 95 loss """bceaftersigmoid""" +481 95 regularizer """no""" +481 95 optimizer """adam""" +481 95 training_loop """lcwa""" +481 95 evaluator """rankbased""" +481 96 dataset """kinships""" +481 96 model """proje""" +481 96 loss """bceaftersigmoid""" +481 96 regularizer """no""" +481 96 optimizer """adam""" +481 96 training_loop """lcwa""" +481 96 evaluator """rankbased""" +481 97 dataset """kinships""" +481 97 model """proje""" +481 97 loss """bceaftersigmoid""" +481 97 regularizer """no""" +481 97 optimizer """adam""" +481 97 training_loop """lcwa""" +481 97 evaluator """rankbased""" +481 98 dataset """kinships""" +481 98 model """proje""" +481 98 loss """bceaftersigmoid""" +481 98 regularizer """no""" +481 98 optimizer """adam""" +481 98 training_loop """lcwa""" +481 98 evaluator """rankbased""" +481 99 dataset """kinships""" +481 99 model """proje""" +481 99 loss """bceaftersigmoid""" +481 99 regularizer """no""" +481 99 optimizer """adam""" +481 99 training_loop """lcwa""" +481 99 evaluator """rankbased""" +481 100 dataset """kinships""" +481 100 model """proje""" +481 100 loss """bceaftersigmoid""" +481 100 regularizer """no""" +481 100 optimizer """adam""" +481 100 training_loop """lcwa""" +481 100 evaluator """rankbased""" +482 1 model.embedding_dim 1.0 +482 1 optimizer.lr 0.003331296463424206 +482 1 training.batch_size 2.0 +482 1 training.label_smoothing 0.002723327846370634 +482 2 model.embedding_dim 2.0 +482 2 optimizer.lr 0.00965485584603116 +482 2 training.batch_size 0.0 +482 2 training.label_smoothing 0.01078917128810978 +482 3 model.embedding_dim 0.0 +482 3 optimizer.lr 0.042189871790221024 +482 3 training.batch_size 0.0 +482 3 training.label_smoothing 0.004034977131919824 +482 4 model.embedding_dim 2.0 +482 4 optimizer.lr 0.0035164685744068316 +482 4 training.batch_size 0.0 +482 4 training.label_smoothing 0.0011110801591122151 +482 5 model.embedding_dim 0.0 +482 5 optimizer.lr 0.06739315644965815 +482 5 training.batch_size 1.0 +482 5 training.label_smoothing 0.004211598705896572 +482 6 model.embedding_dim 0.0 +482 6 optimizer.lr 0.004300858582698181 +482 6 training.batch_size 0.0 +482 6 training.label_smoothing 0.05575019752188372 +482 7 model.embedding_dim 2.0 +482 7 optimizer.lr 0.011897654180537742 +482 7 training.batch_size 1.0 +482 7 training.label_smoothing 0.5591605350611738 +482 8 model.embedding_dim 1.0 +482 8 optimizer.lr 0.0022234059752767902 +482 8 training.batch_size 2.0 +482 8 training.label_smoothing 0.006976230463243839 +482 9 model.embedding_dim 1.0 +482 9 optimizer.lr 0.002344197779574398 +482 9 training.batch_size 0.0 +482 9 training.label_smoothing 0.22734005466249957 +482 10 model.embedding_dim 1.0 +482 10 optimizer.lr 0.05821108708443508 +482 10 training.batch_size 2.0 +482 10 training.label_smoothing 0.027550285260004134 +482 11 model.embedding_dim 1.0 +482 11 optimizer.lr 0.0014047046670559167 +482 11 training.batch_size 2.0 +482 11 training.label_smoothing 0.004476970919233727 +482 12 model.embedding_dim 1.0 +482 12 optimizer.lr 0.004740313208098821 +482 12 training.batch_size 0.0 +482 12 training.label_smoothing 0.9362399722894758 +482 13 model.embedding_dim 0.0 +482 13 optimizer.lr 0.004521805624697415 +482 13 training.batch_size 2.0 +482 13 training.label_smoothing 0.0011367110749996572 +482 14 model.embedding_dim 0.0 +482 14 optimizer.lr 0.0151922393965442 +482 14 training.batch_size 2.0 +482 14 training.label_smoothing 0.12350683167322851 +482 15 model.embedding_dim 1.0 +482 15 optimizer.lr 0.003629051025213684 +482 15 training.batch_size 2.0 +482 15 training.label_smoothing 0.007444280122095252 +482 16 model.embedding_dim 0.0 +482 16 optimizer.lr 0.006286442698313689 +482 16 training.batch_size 0.0 +482 16 training.label_smoothing 0.2105981982225661 +482 17 model.embedding_dim 0.0 +482 17 optimizer.lr 0.0013512949356750464 +482 17 training.batch_size 2.0 +482 17 training.label_smoothing 0.11258400877575249 +482 18 model.embedding_dim 2.0 +482 18 optimizer.lr 0.005825071286163581 +482 18 training.batch_size 0.0 +482 18 training.label_smoothing 0.0020866031859439406 +482 19 model.embedding_dim 0.0 +482 19 optimizer.lr 0.0010659019190071616 +482 19 training.batch_size 1.0 +482 19 training.label_smoothing 0.3379354657713197 +482 20 model.embedding_dim 0.0 +482 20 optimizer.lr 0.005307956762682763 +482 20 training.batch_size 2.0 +482 20 training.label_smoothing 0.007660651323367583 +482 21 model.embedding_dim 2.0 +482 21 optimizer.lr 0.0029082108718934256 +482 21 training.batch_size 0.0 +482 21 training.label_smoothing 0.3986491523913333 +482 22 model.embedding_dim 2.0 +482 22 optimizer.lr 0.025250374916929184 +482 22 training.batch_size 2.0 +482 22 training.label_smoothing 0.18721145856330318 +482 23 model.embedding_dim 1.0 +482 23 optimizer.lr 0.0749283844017196 +482 23 training.batch_size 2.0 +482 23 training.label_smoothing 0.03560836743012653 +482 24 model.embedding_dim 1.0 +482 24 optimizer.lr 0.030036755193008545 +482 24 training.batch_size 1.0 +482 24 training.label_smoothing 0.02023383061688673 +482 25 model.embedding_dim 1.0 +482 25 optimizer.lr 0.00584576062379137 +482 25 training.batch_size 2.0 +482 25 training.label_smoothing 0.013414506094644492 +482 26 model.embedding_dim 1.0 +482 26 optimizer.lr 0.03456376492933434 +482 26 training.batch_size 0.0 +482 26 training.label_smoothing 0.03614223735444576 +482 27 model.embedding_dim 0.0 +482 27 optimizer.lr 0.07151175262754031 +482 27 training.batch_size 2.0 +482 27 training.label_smoothing 0.005814347377109686 +482 28 model.embedding_dim 2.0 +482 28 optimizer.lr 0.05128511122563226 +482 28 training.batch_size 1.0 +482 28 training.label_smoothing 0.007214711217398907 +482 29 model.embedding_dim 2.0 +482 29 optimizer.lr 0.0029665270508884988 +482 29 training.batch_size 1.0 +482 29 training.label_smoothing 0.03619420003740112 +482 30 model.embedding_dim 2.0 +482 30 optimizer.lr 0.0017022165579947972 +482 30 training.batch_size 0.0 +482 30 training.label_smoothing 0.6736267988315912 +482 31 model.embedding_dim 2.0 +482 31 optimizer.lr 0.002062139587496537 +482 31 training.batch_size 2.0 +482 31 training.label_smoothing 0.006915271413845609 +482 32 model.embedding_dim 1.0 +482 32 optimizer.lr 0.0013590005694292199 +482 32 training.batch_size 2.0 +482 32 training.label_smoothing 0.019472187551594106 +482 33 model.embedding_dim 2.0 +482 33 optimizer.lr 0.008274859185131838 +482 33 training.batch_size 0.0 +482 33 training.label_smoothing 0.040615400249434025 +482 34 model.embedding_dim 1.0 +482 34 optimizer.lr 0.0037535621861750326 +482 34 training.batch_size 2.0 +482 34 training.label_smoothing 0.49706190969486497 +482 35 model.embedding_dim 1.0 +482 35 optimizer.lr 0.006164317452464916 +482 35 training.batch_size 2.0 +482 35 training.label_smoothing 0.003622446306205938 +482 36 model.embedding_dim 2.0 +482 36 optimizer.lr 0.005134509488903139 +482 36 training.batch_size 0.0 +482 36 training.label_smoothing 0.982101002737317 +482 37 model.embedding_dim 0.0 +482 37 optimizer.lr 0.005910986683713979 +482 37 training.batch_size 0.0 +482 37 training.label_smoothing 0.027802870319870884 +482 38 model.embedding_dim 1.0 +482 38 optimizer.lr 0.007406186632391029 +482 38 training.batch_size 2.0 +482 38 training.label_smoothing 0.05556796266810869 +482 39 model.embedding_dim 0.0 +482 39 optimizer.lr 0.0043708528462243585 +482 39 training.batch_size 0.0 +482 39 training.label_smoothing 0.021399369847372608 +482 40 model.embedding_dim 1.0 +482 40 optimizer.lr 0.027856715649409385 +482 40 training.batch_size 2.0 +482 40 training.label_smoothing 0.0011691587131700872 +482 41 model.embedding_dim 2.0 +482 41 optimizer.lr 0.017137791382542254 +482 41 training.batch_size 0.0 +482 41 training.label_smoothing 0.004953738837945944 +482 42 model.embedding_dim 0.0 +482 42 optimizer.lr 0.002728279719082963 +482 42 training.batch_size 0.0 +482 42 training.label_smoothing 0.0016609757825551696 +482 43 model.embedding_dim 2.0 +482 43 optimizer.lr 0.005376034312580411 +482 43 training.batch_size 0.0 +482 43 training.label_smoothing 0.0023404638335642347 +482 44 model.embedding_dim 0.0 +482 44 optimizer.lr 0.0011085122727510764 +482 44 training.batch_size 2.0 +482 44 training.label_smoothing 0.7015310904084199 +482 45 model.embedding_dim 2.0 +482 45 optimizer.lr 0.0751972752988626 +482 45 training.batch_size 0.0 +482 45 training.label_smoothing 0.00437174051653831 +482 46 model.embedding_dim 2.0 +482 46 optimizer.lr 0.04189241986364699 +482 46 training.batch_size 0.0 +482 46 training.label_smoothing 0.0017416346535364484 +482 47 model.embedding_dim 2.0 +482 47 optimizer.lr 0.09771246035430485 +482 47 training.batch_size 2.0 +482 47 training.label_smoothing 0.004988995679450134 +482 48 model.embedding_dim 2.0 +482 48 optimizer.lr 0.02252406417306946 +482 48 training.batch_size 0.0 +482 48 training.label_smoothing 0.015592649937300867 +482 49 model.embedding_dim 2.0 +482 49 optimizer.lr 0.002614612946534682 +482 49 training.batch_size 1.0 +482 49 training.label_smoothing 0.005964447264820869 +482 50 model.embedding_dim 1.0 +482 50 optimizer.lr 0.0016029043060837049 +482 50 training.batch_size 2.0 +482 50 training.label_smoothing 0.08985257459715452 +482 51 model.embedding_dim 2.0 +482 51 optimizer.lr 0.0055992491656062125 +482 51 training.batch_size 2.0 +482 51 training.label_smoothing 0.020477771730156896 +482 52 model.embedding_dim 1.0 +482 52 optimizer.lr 0.0035421961686949765 +482 52 training.batch_size 2.0 +482 52 training.label_smoothing 0.0018411264118935497 +482 53 model.embedding_dim 0.0 +482 53 optimizer.lr 0.09280341223514653 +482 53 training.batch_size 1.0 +482 53 training.label_smoothing 0.0061688900157348394 +482 54 model.embedding_dim 1.0 +482 54 optimizer.lr 0.002064253139880493 +482 54 training.batch_size 0.0 +482 54 training.label_smoothing 0.032587698724932126 +482 55 model.embedding_dim 0.0 +482 55 optimizer.lr 0.07823760122986845 +482 55 training.batch_size 2.0 +482 55 training.label_smoothing 0.01050750547847684 +482 56 model.embedding_dim 1.0 +482 56 optimizer.lr 0.028398658367636203 +482 56 training.batch_size 1.0 +482 56 training.label_smoothing 0.001676976563724611 +482 57 model.embedding_dim 0.0 +482 57 optimizer.lr 0.02064414428411971 +482 57 training.batch_size 0.0 +482 57 training.label_smoothing 0.053448124423379234 +482 58 model.embedding_dim 0.0 +482 58 optimizer.lr 0.0011047167221724411 +482 58 training.batch_size 0.0 +482 58 training.label_smoothing 0.001946221107710815 +482 59 model.embedding_dim 2.0 +482 59 optimizer.lr 0.06986169701795532 +482 59 training.batch_size 1.0 +482 59 training.label_smoothing 0.001226646941618848 +482 60 model.embedding_dim 0.0 +482 60 optimizer.lr 0.00220738789975177 +482 60 training.batch_size 2.0 +482 60 training.label_smoothing 0.14044849594086897 +482 61 model.embedding_dim 0.0 +482 61 optimizer.lr 0.0809385377078188 +482 61 training.batch_size 1.0 +482 61 training.label_smoothing 0.06947525458133799 +482 62 model.embedding_dim 2.0 +482 62 optimizer.lr 0.032746116061245605 +482 62 training.batch_size 2.0 +482 62 training.label_smoothing 0.004478733717889575 +482 63 model.embedding_dim 2.0 +482 63 optimizer.lr 0.002056844137819339 +482 63 training.batch_size 1.0 +482 63 training.label_smoothing 0.002174463961712211 +482 64 model.embedding_dim 2.0 +482 64 optimizer.lr 0.01966404396841588 +482 64 training.batch_size 2.0 +482 64 training.label_smoothing 0.020773934262036654 +482 65 model.embedding_dim 1.0 +482 65 optimizer.lr 0.0033365436153301046 +482 65 training.batch_size 0.0 +482 65 training.label_smoothing 0.07956371919595073 +482 66 model.embedding_dim 1.0 +482 66 optimizer.lr 0.00909857898932092 +482 66 training.batch_size 0.0 +482 66 training.label_smoothing 0.012959696715484604 +482 67 model.embedding_dim 0.0 +482 67 optimizer.lr 0.03078764479619616 +482 67 training.batch_size 1.0 +482 67 training.label_smoothing 0.0014660331168374289 +482 68 model.embedding_dim 2.0 +482 68 optimizer.lr 0.003915855750365406 +482 68 training.batch_size 0.0 +482 68 training.label_smoothing 0.042957974046857306 +482 69 model.embedding_dim 2.0 +482 69 optimizer.lr 0.018256251476433328 +482 69 training.batch_size 2.0 +482 69 training.label_smoothing 0.023098201566259132 +482 70 model.embedding_dim 0.0 +482 70 optimizer.lr 0.03126048200622225 +482 70 training.batch_size 2.0 +482 70 training.label_smoothing 0.27252438638772564 +482 71 model.embedding_dim 0.0 +482 71 optimizer.lr 0.07278719330138485 +482 71 training.batch_size 0.0 +482 71 training.label_smoothing 0.11513017699400077 +482 72 model.embedding_dim 2.0 +482 72 optimizer.lr 0.004809496399911783 +482 72 training.batch_size 0.0 +482 72 training.label_smoothing 0.9445431266155362 +482 73 model.embedding_dim 2.0 +482 73 optimizer.lr 0.09399552881186966 +482 73 training.batch_size 2.0 +482 73 training.label_smoothing 0.6968298226730972 +482 74 model.embedding_dim 2.0 +482 74 optimizer.lr 0.0036791986831403506 +482 74 training.batch_size 2.0 +482 74 training.label_smoothing 0.22274043984441508 +482 75 model.embedding_dim 0.0 +482 75 optimizer.lr 0.013486266676729457 +482 75 training.batch_size 2.0 +482 75 training.label_smoothing 0.001149608259612915 +482 76 model.embedding_dim 0.0 +482 76 optimizer.lr 0.005192244982292697 +482 76 training.batch_size 2.0 +482 76 training.label_smoothing 0.16576599938883707 +482 77 model.embedding_dim 2.0 +482 77 optimizer.lr 0.001887803725017861 +482 77 training.batch_size 1.0 +482 77 training.label_smoothing 0.7406635446257281 +482 78 model.embedding_dim 1.0 +482 78 optimizer.lr 0.017652785637265914 +482 78 training.batch_size 2.0 +482 78 training.label_smoothing 0.3807604402818249 +482 79 model.embedding_dim 1.0 +482 79 optimizer.lr 0.013516623805275164 +482 79 training.batch_size 1.0 +482 79 training.label_smoothing 0.20128826039323633 +482 80 model.embedding_dim 1.0 +482 80 optimizer.lr 0.05728458833815955 +482 80 training.batch_size 0.0 +482 80 training.label_smoothing 0.003747712351922791 +482 81 model.embedding_dim 2.0 +482 81 optimizer.lr 0.06770127996822194 +482 81 training.batch_size 0.0 +482 81 training.label_smoothing 0.001044887895148138 +482 82 model.embedding_dim 1.0 +482 82 optimizer.lr 0.001374920336245251 +482 82 training.batch_size 2.0 +482 82 training.label_smoothing 0.06299055923590864 +482 83 model.embedding_dim 2.0 +482 83 optimizer.lr 0.0027288503572879988 +482 83 training.batch_size 1.0 +482 83 training.label_smoothing 0.004368748631515835 +482 84 model.embedding_dim 0.0 +482 84 optimizer.lr 0.017378261983573524 +482 84 training.batch_size 2.0 +482 84 training.label_smoothing 0.04648306811702252 +482 85 model.embedding_dim 0.0 +482 85 optimizer.lr 0.04225444531806446 +482 85 training.batch_size 1.0 +482 85 training.label_smoothing 0.0027268107352902272 +482 86 model.embedding_dim 0.0 +482 86 optimizer.lr 0.04376003994067687 +482 86 training.batch_size 1.0 +482 86 training.label_smoothing 0.007547674565366018 +482 87 model.embedding_dim 2.0 +482 87 optimizer.lr 0.003635134198594778 +482 87 training.batch_size 0.0 +482 87 training.label_smoothing 0.5884439766293235 +482 88 model.embedding_dim 0.0 +482 88 optimizer.lr 0.001125355784614273 +482 88 training.batch_size 2.0 +482 88 training.label_smoothing 0.006957363679926409 +482 89 model.embedding_dim 1.0 +482 89 optimizer.lr 0.018638824997505646 +482 89 training.batch_size 1.0 +482 89 training.label_smoothing 0.014840542060617934 +482 90 model.embedding_dim 0.0 +482 90 optimizer.lr 0.004069794737486927 +482 90 training.batch_size 0.0 +482 90 training.label_smoothing 0.2298353787222265 +482 91 model.embedding_dim 2.0 +482 91 optimizer.lr 0.012415832665263811 +482 91 training.batch_size 2.0 +482 91 training.label_smoothing 0.006194638375763249 +482 92 model.embedding_dim 2.0 +482 92 optimizer.lr 0.004547553681644034 +482 92 training.batch_size 1.0 +482 92 training.label_smoothing 0.03703937262812444 +482 93 model.embedding_dim 1.0 +482 93 optimizer.lr 0.0034027039790211445 +482 93 training.batch_size 2.0 +482 93 training.label_smoothing 0.0016895300440505508 +482 94 model.embedding_dim 0.0 +482 94 optimizer.lr 0.02067111217215643 +482 94 training.batch_size 0.0 +482 94 training.label_smoothing 0.0013344843293449245 +482 95 model.embedding_dim 2.0 +482 95 optimizer.lr 0.005509639140266404 +482 95 training.batch_size 0.0 +482 95 training.label_smoothing 0.1297192404151552 +482 96 model.embedding_dim 2.0 +482 96 optimizer.lr 0.014341986618379097 +482 96 training.batch_size 0.0 +482 96 training.label_smoothing 0.04434155168973818 +482 97 model.embedding_dim 1.0 +482 97 optimizer.lr 0.00283596122664395 +482 97 training.batch_size 2.0 +482 97 training.label_smoothing 0.5393860759664577 +482 98 model.embedding_dim 1.0 +482 98 optimizer.lr 0.02260453138006338 +482 98 training.batch_size 1.0 +482 98 training.label_smoothing 0.007656903713377347 +482 99 model.embedding_dim 2.0 +482 99 optimizer.lr 0.007354217111996373 +482 99 training.batch_size 2.0 +482 99 training.label_smoothing 0.3822791381146358 +482 100 model.embedding_dim 0.0 +482 100 optimizer.lr 0.0012308526345575777 +482 100 training.batch_size 2.0 +482 100 training.label_smoothing 0.002420833125679845 +482 1 dataset """kinships""" +482 1 model """proje""" +482 1 loss """softplus""" +482 1 regularizer """no""" +482 1 optimizer """adam""" +482 1 training_loop """lcwa""" +482 1 evaluator """rankbased""" +482 2 dataset """kinships""" +482 2 model """proje""" +482 2 loss """softplus""" +482 2 regularizer """no""" +482 2 optimizer """adam""" +482 2 training_loop """lcwa""" +482 2 evaluator """rankbased""" +482 3 dataset """kinships""" +482 3 model """proje""" +482 3 loss """softplus""" +482 3 regularizer """no""" +482 3 optimizer """adam""" +482 3 training_loop """lcwa""" +482 3 evaluator """rankbased""" +482 4 dataset """kinships""" +482 4 model """proje""" +482 4 loss """softplus""" +482 4 regularizer """no""" +482 4 optimizer """adam""" +482 4 training_loop """lcwa""" +482 4 evaluator """rankbased""" +482 5 dataset """kinships""" +482 5 model """proje""" +482 5 loss """softplus""" +482 5 regularizer """no""" +482 5 optimizer """adam""" +482 5 training_loop """lcwa""" +482 5 evaluator """rankbased""" +482 6 dataset """kinships""" +482 6 model """proje""" +482 6 loss """softplus""" +482 6 regularizer """no""" +482 6 optimizer """adam""" +482 6 training_loop """lcwa""" +482 6 evaluator """rankbased""" +482 7 dataset """kinships""" +482 7 model """proje""" +482 7 loss """softplus""" +482 7 regularizer """no""" +482 7 optimizer """adam""" +482 7 training_loop """lcwa""" +482 7 evaluator """rankbased""" +482 8 dataset """kinships""" +482 8 model """proje""" +482 8 loss """softplus""" +482 8 regularizer """no""" +482 8 optimizer """adam""" +482 8 training_loop """lcwa""" +482 8 evaluator """rankbased""" +482 9 dataset """kinships""" +482 9 model """proje""" +482 9 loss """softplus""" +482 9 regularizer """no""" +482 9 optimizer """adam""" +482 9 training_loop """lcwa""" +482 9 evaluator """rankbased""" +482 10 dataset """kinships""" +482 10 model """proje""" +482 10 loss """softplus""" +482 10 regularizer """no""" +482 10 optimizer """adam""" +482 10 training_loop """lcwa""" +482 10 evaluator """rankbased""" +482 11 dataset """kinships""" +482 11 model """proje""" +482 11 loss """softplus""" +482 11 regularizer """no""" +482 11 optimizer """adam""" +482 11 training_loop """lcwa""" +482 11 evaluator """rankbased""" +482 12 dataset """kinships""" +482 12 model """proje""" +482 12 loss """softplus""" +482 12 regularizer """no""" +482 12 optimizer """adam""" +482 12 training_loop """lcwa""" +482 12 evaluator """rankbased""" +482 13 dataset """kinships""" +482 13 model """proje""" +482 13 loss """softplus""" +482 13 regularizer """no""" +482 13 optimizer """adam""" +482 13 training_loop """lcwa""" +482 13 evaluator """rankbased""" +482 14 dataset """kinships""" +482 14 model """proje""" +482 14 loss """softplus""" +482 14 regularizer """no""" +482 14 optimizer """adam""" +482 14 training_loop """lcwa""" +482 14 evaluator """rankbased""" +482 15 dataset """kinships""" +482 15 model """proje""" +482 15 loss """softplus""" +482 15 regularizer """no""" +482 15 optimizer """adam""" +482 15 training_loop """lcwa""" +482 15 evaluator """rankbased""" +482 16 dataset """kinships""" +482 16 model """proje""" +482 16 loss """softplus""" +482 16 regularizer """no""" +482 16 optimizer """adam""" +482 16 training_loop """lcwa""" +482 16 evaluator """rankbased""" +482 17 dataset """kinships""" +482 17 model """proje""" +482 17 loss """softplus""" +482 17 regularizer """no""" +482 17 optimizer """adam""" +482 17 training_loop """lcwa""" +482 17 evaluator """rankbased""" +482 18 dataset """kinships""" +482 18 model """proje""" +482 18 loss """softplus""" +482 18 regularizer """no""" +482 18 optimizer """adam""" +482 18 training_loop """lcwa""" +482 18 evaluator """rankbased""" +482 19 dataset """kinships""" +482 19 model """proje""" +482 19 loss """softplus""" +482 19 regularizer """no""" +482 19 optimizer """adam""" +482 19 training_loop """lcwa""" +482 19 evaluator """rankbased""" +482 20 dataset """kinships""" +482 20 model """proje""" +482 20 loss """softplus""" +482 20 regularizer """no""" +482 20 optimizer """adam""" +482 20 training_loop """lcwa""" +482 20 evaluator """rankbased""" +482 21 dataset """kinships""" +482 21 model """proje""" +482 21 loss """softplus""" +482 21 regularizer """no""" +482 21 optimizer """adam""" +482 21 training_loop """lcwa""" +482 21 evaluator """rankbased""" +482 22 dataset """kinships""" +482 22 model """proje""" +482 22 loss """softplus""" +482 22 regularizer """no""" +482 22 optimizer """adam""" +482 22 training_loop """lcwa""" +482 22 evaluator """rankbased""" +482 23 dataset """kinships""" +482 23 model """proje""" +482 23 loss """softplus""" +482 23 regularizer """no""" +482 23 optimizer """adam""" +482 23 training_loop """lcwa""" +482 23 evaluator """rankbased""" +482 24 dataset """kinships""" +482 24 model """proje""" +482 24 loss """softplus""" +482 24 regularizer """no""" +482 24 optimizer """adam""" +482 24 training_loop """lcwa""" +482 24 evaluator """rankbased""" +482 25 dataset """kinships""" +482 25 model """proje""" +482 25 loss """softplus""" +482 25 regularizer """no""" +482 25 optimizer """adam""" +482 25 training_loop """lcwa""" +482 25 evaluator """rankbased""" +482 26 dataset """kinships""" +482 26 model """proje""" +482 26 loss """softplus""" +482 26 regularizer """no""" +482 26 optimizer """adam""" +482 26 training_loop """lcwa""" +482 26 evaluator """rankbased""" +482 27 dataset """kinships""" +482 27 model """proje""" +482 27 loss """softplus""" +482 27 regularizer """no""" +482 27 optimizer """adam""" +482 27 training_loop """lcwa""" +482 27 evaluator """rankbased""" +482 28 dataset """kinships""" +482 28 model """proje""" +482 28 loss """softplus""" +482 28 regularizer """no""" +482 28 optimizer """adam""" +482 28 training_loop """lcwa""" +482 28 evaluator """rankbased""" +482 29 dataset """kinships""" +482 29 model """proje""" +482 29 loss """softplus""" +482 29 regularizer """no""" +482 29 optimizer """adam""" +482 29 training_loop """lcwa""" +482 29 evaluator """rankbased""" +482 30 dataset """kinships""" +482 30 model """proje""" +482 30 loss """softplus""" +482 30 regularizer """no""" +482 30 optimizer """adam""" +482 30 training_loop """lcwa""" +482 30 evaluator """rankbased""" +482 31 dataset """kinships""" +482 31 model """proje""" +482 31 loss """softplus""" +482 31 regularizer """no""" +482 31 optimizer """adam""" +482 31 training_loop """lcwa""" +482 31 evaluator """rankbased""" +482 32 dataset """kinships""" +482 32 model """proje""" +482 32 loss """softplus""" +482 32 regularizer """no""" +482 32 optimizer """adam""" +482 32 training_loop """lcwa""" +482 32 evaluator """rankbased""" +482 33 dataset """kinships""" +482 33 model """proje""" +482 33 loss """softplus""" +482 33 regularizer """no""" +482 33 optimizer """adam""" +482 33 training_loop """lcwa""" +482 33 evaluator """rankbased""" +482 34 dataset """kinships""" +482 34 model """proje""" +482 34 loss """softplus""" +482 34 regularizer """no""" +482 34 optimizer """adam""" +482 34 training_loop """lcwa""" +482 34 evaluator """rankbased""" +482 35 dataset """kinships""" +482 35 model """proje""" +482 35 loss """softplus""" +482 35 regularizer """no""" +482 35 optimizer """adam""" +482 35 training_loop """lcwa""" +482 35 evaluator """rankbased""" +482 36 dataset """kinships""" +482 36 model """proje""" +482 36 loss """softplus""" +482 36 regularizer """no""" +482 36 optimizer """adam""" +482 36 training_loop """lcwa""" +482 36 evaluator """rankbased""" +482 37 dataset """kinships""" +482 37 model """proje""" +482 37 loss """softplus""" +482 37 regularizer """no""" +482 37 optimizer """adam""" +482 37 training_loop """lcwa""" +482 37 evaluator """rankbased""" +482 38 dataset """kinships""" +482 38 model """proje""" +482 38 loss """softplus""" +482 38 regularizer """no""" +482 38 optimizer """adam""" +482 38 training_loop """lcwa""" +482 38 evaluator """rankbased""" +482 39 dataset """kinships""" +482 39 model """proje""" +482 39 loss """softplus""" +482 39 regularizer """no""" +482 39 optimizer """adam""" +482 39 training_loop """lcwa""" +482 39 evaluator """rankbased""" +482 40 dataset """kinships""" +482 40 model """proje""" +482 40 loss """softplus""" +482 40 regularizer """no""" +482 40 optimizer """adam""" +482 40 training_loop """lcwa""" +482 40 evaluator """rankbased""" +482 41 dataset """kinships""" +482 41 model """proje""" +482 41 loss """softplus""" +482 41 regularizer """no""" +482 41 optimizer """adam""" +482 41 training_loop """lcwa""" +482 41 evaluator """rankbased""" +482 42 dataset """kinships""" +482 42 model """proje""" +482 42 loss """softplus""" +482 42 regularizer """no""" +482 42 optimizer """adam""" +482 42 training_loop """lcwa""" +482 42 evaluator """rankbased""" +482 43 dataset """kinships""" +482 43 model """proje""" +482 43 loss """softplus""" +482 43 regularizer """no""" +482 43 optimizer """adam""" +482 43 training_loop """lcwa""" +482 43 evaluator """rankbased""" +482 44 dataset """kinships""" +482 44 model """proje""" +482 44 loss """softplus""" +482 44 regularizer """no""" +482 44 optimizer """adam""" +482 44 training_loop """lcwa""" +482 44 evaluator """rankbased""" +482 45 dataset """kinships""" +482 45 model """proje""" +482 45 loss """softplus""" +482 45 regularizer """no""" +482 45 optimizer """adam""" +482 45 training_loop """lcwa""" +482 45 evaluator """rankbased""" +482 46 dataset """kinships""" +482 46 model """proje""" +482 46 loss """softplus""" +482 46 regularizer """no""" +482 46 optimizer """adam""" +482 46 training_loop """lcwa""" +482 46 evaluator """rankbased""" +482 47 dataset """kinships""" +482 47 model """proje""" +482 47 loss """softplus""" +482 47 regularizer """no""" +482 47 optimizer """adam""" +482 47 training_loop """lcwa""" +482 47 evaluator """rankbased""" +482 48 dataset """kinships""" +482 48 model """proje""" +482 48 loss """softplus""" +482 48 regularizer """no""" +482 48 optimizer """adam""" +482 48 training_loop """lcwa""" +482 48 evaluator """rankbased""" +482 49 dataset """kinships""" +482 49 model """proje""" +482 49 loss """softplus""" +482 49 regularizer """no""" +482 49 optimizer """adam""" +482 49 training_loop """lcwa""" +482 49 evaluator """rankbased""" +482 50 dataset """kinships""" +482 50 model """proje""" +482 50 loss """softplus""" +482 50 regularizer """no""" +482 50 optimizer """adam""" +482 50 training_loop """lcwa""" +482 50 evaluator """rankbased""" +482 51 dataset """kinships""" +482 51 model """proje""" +482 51 loss """softplus""" +482 51 regularizer """no""" +482 51 optimizer """adam""" +482 51 training_loop """lcwa""" +482 51 evaluator """rankbased""" +482 52 dataset """kinships""" +482 52 model """proje""" +482 52 loss """softplus""" +482 52 regularizer """no""" +482 52 optimizer """adam""" +482 52 training_loop """lcwa""" +482 52 evaluator """rankbased""" +482 53 dataset """kinships""" +482 53 model """proje""" +482 53 loss """softplus""" +482 53 regularizer """no""" +482 53 optimizer """adam""" +482 53 training_loop """lcwa""" +482 53 evaluator """rankbased""" +482 54 dataset """kinships""" +482 54 model """proje""" +482 54 loss """softplus""" +482 54 regularizer """no""" +482 54 optimizer """adam""" +482 54 training_loop """lcwa""" +482 54 evaluator """rankbased""" +482 55 dataset """kinships""" +482 55 model """proje""" +482 55 loss """softplus""" +482 55 regularizer """no""" +482 55 optimizer """adam""" +482 55 training_loop """lcwa""" +482 55 evaluator """rankbased""" +482 56 dataset """kinships""" +482 56 model """proje""" +482 56 loss """softplus""" +482 56 regularizer """no""" +482 56 optimizer """adam""" +482 56 training_loop """lcwa""" +482 56 evaluator """rankbased""" +482 57 dataset """kinships""" +482 57 model """proje""" +482 57 loss """softplus""" +482 57 regularizer """no""" +482 57 optimizer """adam""" +482 57 training_loop """lcwa""" +482 57 evaluator """rankbased""" +482 58 dataset """kinships""" +482 58 model """proje""" +482 58 loss """softplus""" +482 58 regularizer """no""" +482 58 optimizer """adam""" +482 58 training_loop """lcwa""" +482 58 evaluator """rankbased""" +482 59 dataset """kinships""" +482 59 model """proje""" +482 59 loss """softplus""" +482 59 regularizer """no""" +482 59 optimizer """adam""" +482 59 training_loop """lcwa""" +482 59 evaluator """rankbased""" +482 60 dataset """kinships""" +482 60 model """proje""" +482 60 loss """softplus""" +482 60 regularizer """no""" +482 60 optimizer """adam""" +482 60 training_loop """lcwa""" +482 60 evaluator """rankbased""" +482 61 dataset """kinships""" +482 61 model """proje""" +482 61 loss """softplus""" +482 61 regularizer """no""" +482 61 optimizer """adam""" +482 61 training_loop """lcwa""" +482 61 evaluator """rankbased""" +482 62 dataset """kinships""" +482 62 model """proje""" +482 62 loss """softplus""" +482 62 regularizer """no""" +482 62 optimizer """adam""" +482 62 training_loop """lcwa""" +482 62 evaluator """rankbased""" +482 63 dataset """kinships""" +482 63 model """proje""" +482 63 loss """softplus""" +482 63 regularizer """no""" +482 63 optimizer """adam""" +482 63 training_loop """lcwa""" +482 63 evaluator """rankbased""" +482 64 dataset """kinships""" +482 64 model """proje""" +482 64 loss """softplus""" +482 64 regularizer """no""" +482 64 optimizer """adam""" +482 64 training_loop """lcwa""" +482 64 evaluator """rankbased""" +482 65 dataset """kinships""" +482 65 model """proje""" +482 65 loss """softplus""" +482 65 regularizer """no""" +482 65 optimizer """adam""" +482 65 training_loop """lcwa""" +482 65 evaluator """rankbased""" +482 66 dataset """kinships""" +482 66 model """proje""" +482 66 loss """softplus""" +482 66 regularizer """no""" +482 66 optimizer """adam""" +482 66 training_loop """lcwa""" +482 66 evaluator """rankbased""" +482 67 dataset """kinships""" +482 67 model """proje""" +482 67 loss """softplus""" +482 67 regularizer """no""" +482 67 optimizer """adam""" +482 67 training_loop """lcwa""" +482 67 evaluator """rankbased""" +482 68 dataset """kinships""" +482 68 model """proje""" +482 68 loss """softplus""" +482 68 regularizer """no""" +482 68 optimizer """adam""" +482 68 training_loop """lcwa""" +482 68 evaluator """rankbased""" +482 69 dataset """kinships""" +482 69 model """proje""" +482 69 loss """softplus""" +482 69 regularizer """no""" +482 69 optimizer """adam""" +482 69 training_loop """lcwa""" +482 69 evaluator """rankbased""" +482 70 dataset """kinships""" +482 70 model """proje""" +482 70 loss """softplus""" +482 70 regularizer """no""" +482 70 optimizer """adam""" +482 70 training_loop """lcwa""" +482 70 evaluator """rankbased""" +482 71 dataset """kinships""" +482 71 model """proje""" +482 71 loss """softplus""" +482 71 regularizer """no""" +482 71 optimizer """adam""" +482 71 training_loop """lcwa""" +482 71 evaluator """rankbased""" +482 72 dataset """kinships""" +482 72 model """proje""" +482 72 loss """softplus""" +482 72 regularizer """no""" +482 72 optimizer """adam""" +482 72 training_loop """lcwa""" +482 72 evaluator """rankbased""" +482 73 dataset """kinships""" +482 73 model """proje""" +482 73 loss """softplus""" +482 73 regularizer """no""" +482 73 optimizer """adam""" +482 73 training_loop """lcwa""" +482 73 evaluator """rankbased""" +482 74 dataset """kinships""" +482 74 model """proje""" +482 74 loss """softplus""" +482 74 regularizer """no""" +482 74 optimizer """adam""" +482 74 training_loop """lcwa""" +482 74 evaluator """rankbased""" +482 75 dataset """kinships""" +482 75 model """proje""" +482 75 loss """softplus""" +482 75 regularizer """no""" +482 75 optimizer """adam""" +482 75 training_loop """lcwa""" +482 75 evaluator """rankbased""" +482 76 dataset """kinships""" +482 76 model """proje""" +482 76 loss """softplus""" +482 76 regularizer """no""" +482 76 optimizer """adam""" +482 76 training_loop """lcwa""" +482 76 evaluator """rankbased""" +482 77 dataset """kinships""" +482 77 model """proje""" +482 77 loss """softplus""" +482 77 regularizer """no""" +482 77 optimizer """adam""" +482 77 training_loop """lcwa""" +482 77 evaluator """rankbased""" +482 78 dataset """kinships""" +482 78 model """proje""" +482 78 loss """softplus""" +482 78 regularizer """no""" +482 78 optimizer """adam""" +482 78 training_loop """lcwa""" +482 78 evaluator """rankbased""" +482 79 dataset """kinships""" +482 79 model """proje""" +482 79 loss """softplus""" +482 79 regularizer """no""" +482 79 optimizer """adam""" +482 79 training_loop """lcwa""" +482 79 evaluator """rankbased""" +482 80 dataset """kinships""" +482 80 model """proje""" +482 80 loss """softplus""" +482 80 regularizer """no""" +482 80 optimizer """adam""" +482 80 training_loop """lcwa""" +482 80 evaluator """rankbased""" +482 81 dataset """kinships""" +482 81 model """proje""" +482 81 loss """softplus""" +482 81 regularizer """no""" +482 81 optimizer """adam""" +482 81 training_loop """lcwa""" +482 81 evaluator """rankbased""" +482 82 dataset """kinships""" +482 82 model """proje""" +482 82 loss """softplus""" +482 82 regularizer """no""" +482 82 optimizer """adam""" +482 82 training_loop """lcwa""" +482 82 evaluator """rankbased""" +482 83 dataset """kinships""" +482 83 model """proje""" +482 83 loss """softplus""" +482 83 regularizer """no""" +482 83 optimizer """adam""" +482 83 training_loop """lcwa""" +482 83 evaluator """rankbased""" +482 84 dataset """kinships""" +482 84 model """proje""" +482 84 loss """softplus""" +482 84 regularizer """no""" +482 84 optimizer """adam""" +482 84 training_loop """lcwa""" +482 84 evaluator """rankbased""" +482 85 dataset """kinships""" +482 85 model """proje""" +482 85 loss """softplus""" +482 85 regularizer """no""" +482 85 optimizer """adam""" +482 85 training_loop """lcwa""" +482 85 evaluator """rankbased""" +482 86 dataset """kinships""" +482 86 model """proje""" +482 86 loss """softplus""" +482 86 regularizer """no""" +482 86 optimizer """adam""" +482 86 training_loop """lcwa""" +482 86 evaluator """rankbased""" +482 87 dataset """kinships""" +482 87 model """proje""" +482 87 loss """softplus""" +482 87 regularizer """no""" +482 87 optimizer """adam""" +482 87 training_loop """lcwa""" +482 87 evaluator """rankbased""" +482 88 dataset """kinships""" +482 88 model """proje""" +482 88 loss """softplus""" +482 88 regularizer """no""" +482 88 optimizer """adam""" +482 88 training_loop """lcwa""" +482 88 evaluator """rankbased""" +482 89 dataset """kinships""" +482 89 model """proje""" +482 89 loss """softplus""" +482 89 regularizer """no""" +482 89 optimizer """adam""" +482 89 training_loop """lcwa""" +482 89 evaluator """rankbased""" +482 90 dataset """kinships""" +482 90 model """proje""" +482 90 loss """softplus""" +482 90 regularizer """no""" +482 90 optimizer """adam""" +482 90 training_loop """lcwa""" +482 90 evaluator """rankbased""" +482 91 dataset """kinships""" +482 91 model """proje""" +482 91 loss """softplus""" +482 91 regularizer """no""" +482 91 optimizer """adam""" +482 91 training_loop """lcwa""" +482 91 evaluator """rankbased""" +482 92 dataset """kinships""" +482 92 model """proje""" +482 92 loss """softplus""" +482 92 regularizer """no""" +482 92 optimizer """adam""" +482 92 training_loop """lcwa""" +482 92 evaluator """rankbased""" +482 93 dataset """kinships""" +482 93 model """proje""" +482 93 loss """softplus""" +482 93 regularizer """no""" +482 93 optimizer """adam""" +482 93 training_loop """lcwa""" +482 93 evaluator """rankbased""" +482 94 dataset """kinships""" +482 94 model """proje""" +482 94 loss """softplus""" +482 94 regularizer """no""" +482 94 optimizer """adam""" +482 94 training_loop """lcwa""" +482 94 evaluator """rankbased""" +482 95 dataset """kinships""" +482 95 model """proje""" +482 95 loss """softplus""" +482 95 regularizer """no""" +482 95 optimizer """adam""" +482 95 training_loop """lcwa""" +482 95 evaluator """rankbased""" +482 96 dataset """kinships""" +482 96 model """proje""" +482 96 loss """softplus""" +482 96 regularizer """no""" +482 96 optimizer """adam""" +482 96 training_loop """lcwa""" +482 96 evaluator """rankbased""" +482 97 dataset """kinships""" +482 97 model """proje""" +482 97 loss """softplus""" +482 97 regularizer """no""" +482 97 optimizer """adam""" +482 97 training_loop """lcwa""" +482 97 evaluator """rankbased""" +482 98 dataset """kinships""" +482 98 model """proje""" +482 98 loss """softplus""" +482 98 regularizer """no""" +482 98 optimizer """adam""" +482 98 training_loop """lcwa""" +482 98 evaluator """rankbased""" +482 99 dataset """kinships""" +482 99 model """proje""" +482 99 loss """softplus""" +482 99 regularizer """no""" +482 99 optimizer """adam""" +482 99 training_loop """lcwa""" +482 99 evaluator """rankbased""" +482 100 dataset """kinships""" +482 100 model """proje""" +482 100 loss """softplus""" +482 100 regularizer """no""" +482 100 optimizer """adam""" +482 100 training_loop """lcwa""" +482 100 evaluator """rankbased""" +483 1 model.embedding_dim 2.0 +483 1 optimizer.lr 0.01529484639862468 +483 1 training.batch_size 2.0 +483 1 training.label_smoothing 0.02737582443704356 +483 2 model.embedding_dim 2.0 +483 2 optimizer.lr 0.0027620657256250277 +483 2 training.batch_size 1.0 +483 2 training.label_smoothing 0.018857219735656378 +483 3 model.embedding_dim 1.0 +483 3 optimizer.lr 0.0012166556158990684 +483 3 training.batch_size 0.0 +483 3 training.label_smoothing 0.012323209185499636 +483 4 model.embedding_dim 1.0 +483 4 optimizer.lr 0.02381895279543632 +483 4 training.batch_size 2.0 +483 4 training.label_smoothing 0.08440568797794659 +483 5 model.embedding_dim 0.0 +483 5 optimizer.lr 0.003992804976953066 +483 5 training.batch_size 1.0 +483 5 training.label_smoothing 0.001013523757338454 +483 6 model.embedding_dim 1.0 +483 6 optimizer.lr 0.0048567051426287285 +483 6 training.batch_size 0.0 +483 6 training.label_smoothing 0.10301262150800077 +483 7 model.embedding_dim 1.0 +483 7 optimizer.lr 0.0013456981514891925 +483 7 training.batch_size 2.0 +483 7 training.label_smoothing 0.11090633790433185 +483 8 model.embedding_dim 1.0 +483 8 optimizer.lr 0.05611964378015762 +483 8 training.batch_size 1.0 +483 8 training.label_smoothing 0.27668130240137273 +483 9 model.embedding_dim 2.0 +483 9 optimizer.lr 0.0011642979991968638 +483 9 training.batch_size 0.0 +483 9 training.label_smoothing 0.01933496043050253 +483 10 model.embedding_dim 0.0 +483 10 optimizer.lr 0.010151409223743083 +483 10 training.batch_size 2.0 +483 10 training.label_smoothing 0.06500367415685819 +483 11 model.embedding_dim 2.0 +483 11 optimizer.lr 0.009121442121993801 +483 11 training.batch_size 1.0 +483 11 training.label_smoothing 0.40847217035094646 +483 12 model.embedding_dim 1.0 +483 12 optimizer.lr 0.009318030483669968 +483 12 training.batch_size 0.0 +483 12 training.label_smoothing 0.9206313979774765 +483 13 model.embedding_dim 2.0 +483 13 optimizer.lr 0.0023511437974259057 +483 13 training.batch_size 2.0 +483 13 training.label_smoothing 0.7654556030050276 +483 14 model.embedding_dim 1.0 +483 14 optimizer.lr 0.0059823341286636324 +483 14 training.batch_size 1.0 +483 14 training.label_smoothing 0.2682949389029353 +483 15 model.embedding_dim 1.0 +483 15 optimizer.lr 0.0029812470549017284 +483 15 training.batch_size 0.0 +483 15 training.label_smoothing 0.09310057712645267 +483 16 model.embedding_dim 1.0 +483 16 optimizer.lr 0.009834515121770089 +483 16 training.batch_size 0.0 +483 16 training.label_smoothing 0.008506234840381613 +483 17 model.embedding_dim 2.0 +483 17 optimizer.lr 0.045979787892058865 +483 17 training.batch_size 0.0 +483 17 training.label_smoothing 0.018054486446076105 +483 18 model.embedding_dim 1.0 +483 18 optimizer.lr 0.006060373499014676 +483 18 training.batch_size 0.0 +483 18 training.label_smoothing 0.6307599478692892 +483 19 model.embedding_dim 1.0 +483 19 optimizer.lr 0.0034499144005272347 +483 19 training.batch_size 1.0 +483 19 training.label_smoothing 0.018967520373923748 +483 20 model.embedding_dim 2.0 +483 20 optimizer.lr 0.0011313800230994008 +483 20 training.batch_size 0.0 +483 20 training.label_smoothing 0.003743012267666778 +483 21 model.embedding_dim 1.0 +483 21 optimizer.lr 0.05239637968462169 +483 21 training.batch_size 1.0 +483 21 training.label_smoothing 0.002978134094838647 +483 22 model.embedding_dim 0.0 +483 22 optimizer.lr 0.0031237572489821517 +483 22 training.batch_size 2.0 +483 22 training.label_smoothing 0.4271097028357851 +483 23 model.embedding_dim 1.0 +483 23 optimizer.lr 0.012966884922234643 +483 23 training.batch_size 0.0 +483 23 training.label_smoothing 0.004976535178964175 +483 24 model.embedding_dim 2.0 +483 24 optimizer.lr 0.0040172099856721445 +483 24 training.batch_size 0.0 +483 24 training.label_smoothing 0.016599725675327436 +483 25 model.embedding_dim 2.0 +483 25 optimizer.lr 0.003257806564330491 +483 25 training.batch_size 1.0 +483 25 training.label_smoothing 0.06780832381742034 +483 26 model.embedding_dim 0.0 +483 26 optimizer.lr 0.055403634412613395 +483 26 training.batch_size 0.0 +483 26 training.label_smoothing 0.014603292689517029 +483 27 model.embedding_dim 2.0 +483 27 optimizer.lr 0.08670214686955692 +483 27 training.batch_size 2.0 +483 27 training.label_smoothing 0.04837624704473535 +483 28 model.embedding_dim 0.0 +483 28 optimizer.lr 0.0034109720216177904 +483 28 training.batch_size 1.0 +483 28 training.label_smoothing 0.9838931159816808 +483 29 model.embedding_dim 2.0 +483 29 optimizer.lr 0.043415127813927444 +483 29 training.batch_size 1.0 +483 29 training.label_smoothing 0.0066069323909516775 +483 30 model.embedding_dim 1.0 +483 30 optimizer.lr 0.001241927509185628 +483 30 training.batch_size 1.0 +483 30 training.label_smoothing 0.010381142968953047 +483 31 model.embedding_dim 0.0 +483 31 optimizer.lr 0.03323081145914548 +483 31 training.batch_size 0.0 +483 31 training.label_smoothing 0.045352457250002975 +483 32 model.embedding_dim 0.0 +483 32 optimizer.lr 0.05787341645912114 +483 32 training.batch_size 0.0 +483 32 training.label_smoothing 0.004585963565794407 +483 33 model.embedding_dim 1.0 +483 33 optimizer.lr 0.01224232420082353 +483 33 training.batch_size 2.0 +483 33 training.label_smoothing 0.019508409234866447 +483 34 model.embedding_dim 0.0 +483 34 optimizer.lr 0.012911253200882876 +483 34 training.batch_size 2.0 +483 34 training.label_smoothing 0.4736255186735293 +483 35 model.embedding_dim 1.0 +483 35 optimizer.lr 0.04140635212091068 +483 35 training.batch_size 1.0 +483 35 training.label_smoothing 0.05130061036843554 +483 36 model.embedding_dim 1.0 +483 36 optimizer.lr 0.007798991256688953 +483 36 training.batch_size 0.0 +483 36 training.label_smoothing 0.03433935095755069 +483 37 model.embedding_dim 1.0 +483 37 optimizer.lr 0.009847389822697438 +483 37 training.batch_size 2.0 +483 37 training.label_smoothing 0.006646546020648493 +483 38 model.embedding_dim 2.0 +483 38 optimizer.lr 0.05162623022536138 +483 38 training.batch_size 0.0 +483 38 training.label_smoothing 0.016010539726030483 +483 39 model.embedding_dim 1.0 +483 39 optimizer.lr 0.02378585252124998 +483 39 training.batch_size 0.0 +483 39 training.label_smoothing 0.09702848641921664 +483 40 model.embedding_dim 1.0 +483 40 optimizer.lr 0.0625660128600489 +483 40 training.batch_size 0.0 +483 40 training.label_smoothing 0.009665999968487693 +483 41 model.embedding_dim 2.0 +483 41 optimizer.lr 0.07264758913332263 +483 41 training.batch_size 1.0 +483 41 training.label_smoothing 0.13505876054786803 +483 42 model.embedding_dim 0.0 +483 42 optimizer.lr 0.06631809581493764 +483 42 training.batch_size 0.0 +483 42 training.label_smoothing 0.10203128935612961 +483 43 model.embedding_dim 0.0 +483 43 optimizer.lr 0.026849376906667687 +483 43 training.batch_size 1.0 +483 43 training.label_smoothing 0.21794707318782236 +483 44 model.embedding_dim 0.0 +483 44 optimizer.lr 0.002643330764837096 +483 44 training.batch_size 1.0 +483 44 training.label_smoothing 0.0036829185987642737 +483 45 model.embedding_dim 1.0 +483 45 optimizer.lr 0.0020826163056405925 +483 45 training.batch_size 1.0 +483 45 training.label_smoothing 0.07466701853692206 +483 46 model.embedding_dim 2.0 +483 46 optimizer.lr 0.0010568722696168394 +483 46 training.batch_size 0.0 +483 46 training.label_smoothing 0.00809674718455193 +483 47 model.embedding_dim 0.0 +483 47 optimizer.lr 0.04955878412861526 +483 47 training.batch_size 1.0 +483 47 training.label_smoothing 0.024471797919626535 +483 48 model.embedding_dim 2.0 +483 48 optimizer.lr 0.09575206223213992 +483 48 training.batch_size 0.0 +483 48 training.label_smoothing 0.016052434737137165 +483 49 model.embedding_dim 0.0 +483 49 optimizer.lr 0.05404021074411938 +483 49 training.batch_size 2.0 +483 49 training.label_smoothing 0.2943742043958724 +483 50 model.embedding_dim 1.0 +483 50 optimizer.lr 0.05742287529817346 +483 50 training.batch_size 1.0 +483 50 training.label_smoothing 0.10460764835881903 +483 51 model.embedding_dim 2.0 +483 51 optimizer.lr 0.010092283688413044 +483 51 training.batch_size 2.0 +483 51 training.label_smoothing 0.004189528191354348 +483 52 model.embedding_dim 0.0 +483 52 optimizer.lr 0.007429715256419285 +483 52 training.batch_size 1.0 +483 52 training.label_smoothing 0.012678057259984199 +483 53 model.embedding_dim 2.0 +483 53 optimizer.lr 0.027772251162420186 +483 53 training.batch_size 2.0 +483 53 training.label_smoothing 0.0317553213254784 +483 54 model.embedding_dim 1.0 +483 54 optimizer.lr 0.04464634640940257 +483 54 training.batch_size 1.0 +483 54 training.label_smoothing 0.5206272663692048 +483 55 model.embedding_dim 1.0 +483 55 optimizer.lr 0.06995080287333785 +483 55 training.batch_size 2.0 +483 55 training.label_smoothing 0.0025190265029285336 +483 56 model.embedding_dim 1.0 +483 56 optimizer.lr 0.0013880969015849115 +483 56 training.batch_size 1.0 +483 56 training.label_smoothing 0.8491677340951219 +483 57 model.embedding_dim 1.0 +483 57 optimizer.lr 0.0022788518889422767 +483 57 training.batch_size 2.0 +483 57 training.label_smoothing 0.02448755074979269 +483 58 model.embedding_dim 2.0 +483 58 optimizer.lr 0.06439286748218342 +483 58 training.batch_size 0.0 +483 58 training.label_smoothing 0.0012184938867465765 +483 59 model.embedding_dim 2.0 +483 59 optimizer.lr 0.012225203377374885 +483 59 training.batch_size 1.0 +483 59 training.label_smoothing 0.004223247145175136 +483 60 model.embedding_dim 2.0 +483 60 optimizer.lr 0.013520587968667232 +483 60 training.batch_size 0.0 +483 60 training.label_smoothing 0.0010949135542144028 +483 61 model.embedding_dim 1.0 +483 61 optimizer.lr 0.03536268422297312 +483 61 training.batch_size 0.0 +483 61 training.label_smoothing 0.09301633052251958 +483 62 model.embedding_dim 2.0 +483 62 optimizer.lr 0.02084842839489959 +483 62 training.batch_size 2.0 +483 62 training.label_smoothing 0.006239196803382923 +483 63 model.embedding_dim 1.0 +483 63 optimizer.lr 0.006799078945098181 +483 63 training.batch_size 2.0 +483 63 training.label_smoothing 0.001560626625466631 +483 64 model.embedding_dim 0.0 +483 64 optimizer.lr 0.007525647831124053 +483 64 training.batch_size 1.0 +483 64 training.label_smoothing 0.001024407856846759 +483 65 model.embedding_dim 0.0 +483 65 optimizer.lr 0.003098065222582192 +483 65 training.batch_size 1.0 +483 65 training.label_smoothing 0.004222825207513874 +483 66 model.embedding_dim 1.0 +483 66 optimizer.lr 0.001447084151066178 +483 66 training.batch_size 0.0 +483 66 training.label_smoothing 0.20292206662365875 +483 67 model.embedding_dim 1.0 +483 67 optimizer.lr 0.0012966983468934608 +483 67 training.batch_size 1.0 +483 67 training.label_smoothing 0.0017671283199404803 +483 68 model.embedding_dim 1.0 +483 68 optimizer.lr 0.002902783572467154 +483 68 training.batch_size 0.0 +483 68 training.label_smoothing 0.016624249253631016 +483 69 model.embedding_dim 1.0 +483 69 optimizer.lr 0.024388666664697373 +483 69 training.batch_size 2.0 +483 69 training.label_smoothing 0.09098695032784217 +483 70 model.embedding_dim 1.0 +483 70 optimizer.lr 0.05130305710477661 +483 70 training.batch_size 2.0 +483 70 training.label_smoothing 0.07561069588552566 +483 71 model.embedding_dim 2.0 +483 71 optimizer.lr 0.023058654096284256 +483 71 training.batch_size 0.0 +483 71 training.label_smoothing 0.010859249281867748 +483 72 model.embedding_dim 2.0 +483 72 optimizer.lr 0.002418306896577348 +483 72 training.batch_size 1.0 +483 72 training.label_smoothing 0.11034737280051345 +483 73 model.embedding_dim 2.0 +483 73 optimizer.lr 0.01814603553841799 +483 73 training.batch_size 2.0 +483 73 training.label_smoothing 0.20510243908750547 +483 74 model.embedding_dim 2.0 +483 74 optimizer.lr 0.02188248997405881 +483 74 training.batch_size 2.0 +483 74 training.label_smoothing 0.6669876635556399 +483 75 model.embedding_dim 1.0 +483 75 optimizer.lr 0.0022027103355148894 +483 75 training.batch_size 2.0 +483 75 training.label_smoothing 0.00133213951881941 +483 76 model.embedding_dim 2.0 +483 76 optimizer.lr 0.027235779801693793 +483 76 training.batch_size 2.0 +483 76 training.label_smoothing 0.014124826667065772 +483 77 model.embedding_dim 2.0 +483 77 optimizer.lr 0.022021988159377105 +483 77 training.batch_size 2.0 +483 77 training.label_smoothing 0.001927539409391405 +483 78 model.embedding_dim 1.0 +483 78 optimizer.lr 0.03330405433083684 +483 78 training.batch_size 0.0 +483 78 training.label_smoothing 0.30659816125508266 +483 79 model.embedding_dim 2.0 +483 79 optimizer.lr 0.0013035590161965386 +483 79 training.batch_size 1.0 +483 79 training.label_smoothing 0.0014652230010795417 +483 80 model.embedding_dim 0.0 +483 80 optimizer.lr 0.002986484363680103 +483 80 training.batch_size 2.0 +483 80 training.label_smoothing 0.08775519341337105 +483 81 model.embedding_dim 1.0 +483 81 optimizer.lr 0.018269886921777126 +483 81 training.batch_size 2.0 +483 81 training.label_smoothing 0.0791824050114566 +483 82 model.embedding_dim 2.0 +483 82 optimizer.lr 0.062355565644331014 +483 82 training.batch_size 2.0 +483 82 training.label_smoothing 0.007983481395963514 +483 83 model.embedding_dim 1.0 +483 83 optimizer.lr 0.06759262871643101 +483 83 training.batch_size 1.0 +483 83 training.label_smoothing 0.0016034025673463904 +483 84 model.embedding_dim 2.0 +483 84 optimizer.lr 0.009482698104592601 +483 84 training.batch_size 0.0 +483 84 training.label_smoothing 0.0976993698191096 +483 85 model.embedding_dim 2.0 +483 85 optimizer.lr 0.005182036127511529 +483 85 training.batch_size 0.0 +483 85 training.label_smoothing 0.5278603121757083 +483 86 model.embedding_dim 2.0 +483 86 optimizer.lr 0.013097508291537623 +483 86 training.batch_size 1.0 +483 86 training.label_smoothing 0.753171024502219 +483 87 model.embedding_dim 0.0 +483 87 optimizer.lr 0.0016218404717097655 +483 87 training.batch_size 1.0 +483 87 training.label_smoothing 0.8534091061511628 +483 88 model.embedding_dim 2.0 +483 88 optimizer.lr 0.005689327856145438 +483 88 training.batch_size 0.0 +483 88 training.label_smoothing 0.06305044483635597 +483 89 model.embedding_dim 0.0 +483 89 optimizer.lr 0.07637639619242051 +483 89 training.batch_size 0.0 +483 89 training.label_smoothing 0.0026658314980742373 +483 90 model.embedding_dim 0.0 +483 90 optimizer.lr 0.05822475246000189 +483 90 training.batch_size 2.0 +483 90 training.label_smoothing 0.004899389142968834 +483 91 model.embedding_dim 0.0 +483 91 optimizer.lr 0.0036461107321761406 +483 91 training.batch_size 0.0 +483 91 training.label_smoothing 0.01589519951540681 +483 92 model.embedding_dim 0.0 +483 92 optimizer.lr 0.0031740130592463798 +483 92 training.batch_size 2.0 +483 92 training.label_smoothing 0.6069697267525533 +483 93 model.embedding_dim 2.0 +483 93 optimizer.lr 0.049800531720241535 +483 93 training.batch_size 1.0 +483 93 training.label_smoothing 0.004055883633275707 +483 94 model.embedding_dim 0.0 +483 94 optimizer.lr 0.002189346201815632 +483 94 training.batch_size 0.0 +483 94 training.label_smoothing 0.0014933270058397675 +483 95 model.embedding_dim 0.0 +483 95 optimizer.lr 0.005331807857807339 +483 95 training.batch_size 1.0 +483 95 training.label_smoothing 0.025529536926009205 +483 96 model.embedding_dim 1.0 +483 96 optimizer.lr 0.001424034199728668 +483 96 training.batch_size 0.0 +483 96 training.label_smoothing 0.022304157126080016 +483 97 model.embedding_dim 2.0 +483 97 optimizer.lr 0.001525476127308534 +483 97 training.batch_size 1.0 +483 97 training.label_smoothing 0.007337046637284455 +483 98 model.embedding_dim 0.0 +483 98 optimizer.lr 0.018481110720037844 +483 98 training.batch_size 1.0 +483 98 training.label_smoothing 0.5655235436112594 +483 99 model.embedding_dim 1.0 +483 99 optimizer.lr 0.0053036322411555755 +483 99 training.batch_size 1.0 +483 99 training.label_smoothing 0.004513646884460763 +483 100 model.embedding_dim 0.0 +483 100 optimizer.lr 0.03717342736445328 +483 100 training.batch_size 0.0 +483 100 training.label_smoothing 0.11541852792417981 +483 1 dataset """kinships""" +483 1 model """proje""" +483 1 loss """crossentropy""" +483 1 regularizer """no""" +483 1 optimizer """adam""" +483 1 training_loop """lcwa""" +483 1 evaluator """rankbased""" +483 2 dataset """kinships""" +483 2 model """proje""" +483 2 loss """crossentropy""" +483 2 regularizer """no""" +483 2 optimizer """adam""" +483 2 training_loop """lcwa""" +483 2 evaluator """rankbased""" +483 3 dataset """kinships""" +483 3 model """proje""" +483 3 loss """crossentropy""" +483 3 regularizer """no""" +483 3 optimizer """adam""" +483 3 training_loop """lcwa""" +483 3 evaluator """rankbased""" +483 4 dataset """kinships""" +483 4 model """proje""" +483 4 loss """crossentropy""" +483 4 regularizer """no""" +483 4 optimizer """adam""" +483 4 training_loop """lcwa""" +483 4 evaluator """rankbased""" +483 5 dataset """kinships""" +483 5 model """proje""" +483 5 loss """crossentropy""" +483 5 regularizer """no""" +483 5 optimizer """adam""" +483 5 training_loop """lcwa""" +483 5 evaluator """rankbased""" +483 6 dataset """kinships""" +483 6 model """proje""" +483 6 loss """crossentropy""" +483 6 regularizer """no""" +483 6 optimizer """adam""" +483 6 training_loop """lcwa""" +483 6 evaluator """rankbased""" +483 7 dataset """kinships""" +483 7 model """proje""" +483 7 loss """crossentropy""" +483 7 regularizer """no""" +483 7 optimizer """adam""" +483 7 training_loop """lcwa""" +483 7 evaluator """rankbased""" +483 8 dataset """kinships""" +483 8 model """proje""" +483 8 loss """crossentropy""" +483 8 regularizer """no""" +483 8 optimizer """adam""" +483 8 training_loop """lcwa""" +483 8 evaluator """rankbased""" +483 9 dataset """kinships""" +483 9 model """proje""" +483 9 loss """crossentropy""" +483 9 regularizer """no""" +483 9 optimizer """adam""" +483 9 training_loop """lcwa""" +483 9 evaluator """rankbased""" +483 10 dataset """kinships""" +483 10 model """proje""" +483 10 loss """crossentropy""" +483 10 regularizer """no""" +483 10 optimizer """adam""" +483 10 training_loop """lcwa""" +483 10 evaluator """rankbased""" +483 11 dataset """kinships""" +483 11 model """proje""" +483 11 loss """crossentropy""" +483 11 regularizer """no""" +483 11 optimizer """adam""" +483 11 training_loop """lcwa""" +483 11 evaluator """rankbased""" +483 12 dataset """kinships""" +483 12 model """proje""" +483 12 loss """crossentropy""" +483 12 regularizer """no""" +483 12 optimizer """adam""" +483 12 training_loop """lcwa""" +483 12 evaluator """rankbased""" +483 13 dataset """kinships""" +483 13 model """proje""" +483 13 loss """crossentropy""" +483 13 regularizer """no""" +483 13 optimizer """adam""" +483 13 training_loop """lcwa""" +483 13 evaluator """rankbased""" +483 14 dataset """kinships""" +483 14 model """proje""" +483 14 loss """crossentropy""" +483 14 regularizer """no""" +483 14 optimizer """adam""" +483 14 training_loop """lcwa""" +483 14 evaluator """rankbased""" +483 15 dataset """kinships""" +483 15 model """proje""" +483 15 loss """crossentropy""" +483 15 regularizer """no""" +483 15 optimizer """adam""" +483 15 training_loop """lcwa""" +483 15 evaluator """rankbased""" +483 16 dataset """kinships""" +483 16 model """proje""" +483 16 loss """crossentropy""" +483 16 regularizer """no""" +483 16 optimizer """adam""" +483 16 training_loop """lcwa""" +483 16 evaluator """rankbased""" +483 17 dataset """kinships""" +483 17 model """proje""" +483 17 loss """crossentropy""" +483 17 regularizer """no""" +483 17 optimizer """adam""" +483 17 training_loop """lcwa""" +483 17 evaluator """rankbased""" +483 18 dataset """kinships""" +483 18 model """proje""" +483 18 loss """crossentropy""" +483 18 regularizer """no""" +483 18 optimizer """adam""" +483 18 training_loop """lcwa""" +483 18 evaluator """rankbased""" +483 19 dataset """kinships""" +483 19 model """proje""" +483 19 loss """crossentropy""" +483 19 regularizer """no""" +483 19 optimizer """adam""" +483 19 training_loop """lcwa""" +483 19 evaluator """rankbased""" +483 20 dataset """kinships""" +483 20 model """proje""" +483 20 loss """crossentropy""" +483 20 regularizer """no""" +483 20 optimizer """adam""" +483 20 training_loop """lcwa""" +483 20 evaluator """rankbased""" +483 21 dataset """kinships""" +483 21 model """proje""" +483 21 loss """crossentropy""" +483 21 regularizer """no""" +483 21 optimizer """adam""" +483 21 training_loop """lcwa""" +483 21 evaluator """rankbased""" +483 22 dataset """kinships""" +483 22 model """proje""" +483 22 loss """crossentropy""" +483 22 regularizer """no""" +483 22 optimizer """adam""" +483 22 training_loop """lcwa""" +483 22 evaluator """rankbased""" +483 23 dataset """kinships""" +483 23 model """proje""" +483 23 loss """crossentropy""" +483 23 regularizer """no""" +483 23 optimizer """adam""" +483 23 training_loop """lcwa""" +483 23 evaluator """rankbased""" +483 24 dataset """kinships""" +483 24 model """proje""" +483 24 loss """crossentropy""" +483 24 regularizer """no""" +483 24 optimizer """adam""" +483 24 training_loop """lcwa""" +483 24 evaluator """rankbased""" +483 25 dataset """kinships""" +483 25 model """proje""" +483 25 loss """crossentropy""" +483 25 regularizer """no""" +483 25 optimizer """adam""" +483 25 training_loop """lcwa""" +483 25 evaluator """rankbased""" +483 26 dataset """kinships""" +483 26 model """proje""" +483 26 loss """crossentropy""" +483 26 regularizer """no""" +483 26 optimizer """adam""" +483 26 training_loop """lcwa""" +483 26 evaluator """rankbased""" +483 27 dataset """kinships""" +483 27 model """proje""" +483 27 loss """crossentropy""" +483 27 regularizer """no""" +483 27 optimizer """adam""" +483 27 training_loop """lcwa""" +483 27 evaluator """rankbased""" +483 28 dataset """kinships""" +483 28 model """proje""" +483 28 loss """crossentropy""" +483 28 regularizer """no""" +483 28 optimizer """adam""" +483 28 training_loop """lcwa""" +483 28 evaluator """rankbased""" +483 29 dataset """kinships""" +483 29 model """proje""" +483 29 loss """crossentropy""" +483 29 regularizer """no""" +483 29 optimizer """adam""" +483 29 training_loop """lcwa""" +483 29 evaluator """rankbased""" +483 30 dataset """kinships""" +483 30 model """proje""" +483 30 loss """crossentropy""" +483 30 regularizer """no""" +483 30 optimizer """adam""" +483 30 training_loop """lcwa""" +483 30 evaluator """rankbased""" +483 31 dataset """kinships""" +483 31 model """proje""" +483 31 loss """crossentropy""" +483 31 regularizer """no""" +483 31 optimizer """adam""" +483 31 training_loop """lcwa""" +483 31 evaluator """rankbased""" +483 32 dataset """kinships""" +483 32 model """proje""" +483 32 loss """crossentropy""" +483 32 regularizer """no""" +483 32 optimizer """adam""" +483 32 training_loop """lcwa""" +483 32 evaluator """rankbased""" +483 33 dataset """kinships""" +483 33 model """proje""" +483 33 loss """crossentropy""" +483 33 regularizer """no""" +483 33 optimizer """adam""" +483 33 training_loop """lcwa""" +483 33 evaluator """rankbased""" +483 34 dataset """kinships""" +483 34 model """proje""" +483 34 loss """crossentropy""" +483 34 regularizer """no""" +483 34 optimizer """adam""" +483 34 training_loop """lcwa""" +483 34 evaluator """rankbased""" +483 35 dataset """kinships""" +483 35 model """proje""" +483 35 loss """crossentropy""" +483 35 regularizer """no""" +483 35 optimizer """adam""" +483 35 training_loop """lcwa""" +483 35 evaluator """rankbased""" +483 36 dataset """kinships""" +483 36 model """proje""" +483 36 loss """crossentropy""" +483 36 regularizer """no""" +483 36 optimizer """adam""" +483 36 training_loop """lcwa""" +483 36 evaluator """rankbased""" +483 37 dataset """kinships""" +483 37 model """proje""" +483 37 loss """crossentropy""" +483 37 regularizer """no""" +483 37 optimizer """adam""" +483 37 training_loop """lcwa""" +483 37 evaluator """rankbased""" +483 38 dataset """kinships""" +483 38 model """proje""" +483 38 loss """crossentropy""" +483 38 regularizer """no""" +483 38 optimizer """adam""" +483 38 training_loop """lcwa""" +483 38 evaluator """rankbased""" +483 39 dataset """kinships""" +483 39 model """proje""" +483 39 loss """crossentropy""" +483 39 regularizer """no""" +483 39 optimizer """adam""" +483 39 training_loop """lcwa""" +483 39 evaluator """rankbased""" +483 40 dataset """kinships""" +483 40 model """proje""" +483 40 loss """crossentropy""" +483 40 regularizer """no""" +483 40 optimizer """adam""" +483 40 training_loop """lcwa""" +483 40 evaluator """rankbased""" +483 41 dataset """kinships""" +483 41 model """proje""" +483 41 loss """crossentropy""" +483 41 regularizer """no""" +483 41 optimizer """adam""" +483 41 training_loop """lcwa""" +483 41 evaluator """rankbased""" +483 42 dataset """kinships""" +483 42 model """proje""" +483 42 loss """crossentropy""" +483 42 regularizer """no""" +483 42 optimizer """adam""" +483 42 training_loop """lcwa""" +483 42 evaluator """rankbased""" +483 43 dataset """kinships""" +483 43 model """proje""" +483 43 loss """crossentropy""" +483 43 regularizer """no""" +483 43 optimizer """adam""" +483 43 training_loop """lcwa""" +483 43 evaluator """rankbased""" +483 44 dataset """kinships""" +483 44 model """proje""" +483 44 loss """crossentropy""" +483 44 regularizer """no""" +483 44 optimizer """adam""" +483 44 training_loop """lcwa""" +483 44 evaluator """rankbased""" +483 45 dataset """kinships""" +483 45 model """proje""" +483 45 loss """crossentropy""" +483 45 regularizer """no""" +483 45 optimizer """adam""" +483 45 training_loop """lcwa""" +483 45 evaluator """rankbased""" +483 46 dataset """kinships""" +483 46 model """proje""" +483 46 loss """crossentropy""" +483 46 regularizer """no""" +483 46 optimizer """adam""" +483 46 training_loop """lcwa""" +483 46 evaluator """rankbased""" +483 47 dataset """kinships""" +483 47 model """proje""" +483 47 loss """crossentropy""" +483 47 regularizer """no""" +483 47 optimizer """adam""" +483 47 training_loop """lcwa""" +483 47 evaluator """rankbased""" +483 48 dataset """kinships""" +483 48 model """proje""" +483 48 loss """crossentropy""" +483 48 regularizer """no""" +483 48 optimizer """adam""" +483 48 training_loop """lcwa""" +483 48 evaluator """rankbased""" +483 49 dataset """kinships""" +483 49 model """proje""" +483 49 loss """crossentropy""" +483 49 regularizer """no""" +483 49 optimizer """adam""" +483 49 training_loop """lcwa""" +483 49 evaluator """rankbased""" +483 50 dataset """kinships""" +483 50 model """proje""" +483 50 loss """crossentropy""" +483 50 regularizer """no""" +483 50 optimizer """adam""" +483 50 training_loop """lcwa""" +483 50 evaluator """rankbased""" +483 51 dataset """kinships""" +483 51 model """proje""" +483 51 loss """crossentropy""" +483 51 regularizer """no""" +483 51 optimizer """adam""" +483 51 training_loop """lcwa""" +483 51 evaluator """rankbased""" +483 52 dataset """kinships""" +483 52 model """proje""" +483 52 loss """crossentropy""" +483 52 regularizer """no""" +483 52 optimizer """adam""" +483 52 training_loop """lcwa""" +483 52 evaluator """rankbased""" +483 53 dataset """kinships""" +483 53 model """proje""" +483 53 loss """crossentropy""" +483 53 regularizer """no""" +483 53 optimizer """adam""" +483 53 training_loop """lcwa""" +483 53 evaluator """rankbased""" +483 54 dataset """kinships""" +483 54 model """proje""" +483 54 loss """crossentropy""" +483 54 regularizer """no""" +483 54 optimizer """adam""" +483 54 training_loop """lcwa""" +483 54 evaluator """rankbased""" +483 55 dataset """kinships""" +483 55 model """proje""" +483 55 loss """crossentropy""" +483 55 regularizer """no""" +483 55 optimizer """adam""" +483 55 training_loop """lcwa""" +483 55 evaluator """rankbased""" +483 56 dataset """kinships""" +483 56 model """proje""" +483 56 loss """crossentropy""" +483 56 regularizer """no""" +483 56 optimizer """adam""" +483 56 training_loop """lcwa""" +483 56 evaluator """rankbased""" +483 57 dataset """kinships""" +483 57 model """proje""" +483 57 loss """crossentropy""" +483 57 regularizer """no""" +483 57 optimizer """adam""" +483 57 training_loop """lcwa""" +483 57 evaluator """rankbased""" +483 58 dataset """kinships""" +483 58 model """proje""" +483 58 loss """crossentropy""" +483 58 regularizer """no""" +483 58 optimizer """adam""" +483 58 training_loop """lcwa""" +483 58 evaluator """rankbased""" +483 59 dataset """kinships""" +483 59 model """proje""" +483 59 loss """crossentropy""" +483 59 regularizer """no""" +483 59 optimizer """adam""" +483 59 training_loop """lcwa""" +483 59 evaluator """rankbased""" +483 60 dataset """kinships""" +483 60 model """proje""" +483 60 loss """crossentropy""" +483 60 regularizer """no""" +483 60 optimizer """adam""" +483 60 training_loop """lcwa""" +483 60 evaluator """rankbased""" +483 61 dataset """kinships""" +483 61 model """proje""" +483 61 loss """crossentropy""" +483 61 regularizer """no""" +483 61 optimizer """adam""" +483 61 training_loop """lcwa""" +483 61 evaluator """rankbased""" +483 62 dataset """kinships""" +483 62 model """proje""" +483 62 loss """crossentropy""" +483 62 regularizer """no""" +483 62 optimizer """adam""" +483 62 training_loop """lcwa""" +483 62 evaluator """rankbased""" +483 63 dataset """kinships""" +483 63 model """proje""" +483 63 loss """crossentropy""" +483 63 regularizer """no""" +483 63 optimizer """adam""" +483 63 training_loop """lcwa""" +483 63 evaluator """rankbased""" +483 64 dataset """kinships""" +483 64 model """proje""" +483 64 loss """crossentropy""" +483 64 regularizer """no""" +483 64 optimizer """adam""" +483 64 training_loop """lcwa""" +483 64 evaluator """rankbased""" +483 65 dataset """kinships""" +483 65 model """proje""" +483 65 loss """crossentropy""" +483 65 regularizer """no""" +483 65 optimizer """adam""" +483 65 training_loop """lcwa""" +483 65 evaluator """rankbased""" +483 66 dataset """kinships""" +483 66 model """proje""" +483 66 loss """crossentropy""" +483 66 regularizer """no""" +483 66 optimizer """adam""" +483 66 training_loop """lcwa""" +483 66 evaluator """rankbased""" +483 67 dataset """kinships""" +483 67 model """proje""" +483 67 loss """crossentropy""" +483 67 regularizer """no""" +483 67 optimizer """adam""" +483 67 training_loop """lcwa""" +483 67 evaluator """rankbased""" +483 68 dataset """kinships""" +483 68 model """proje""" +483 68 loss """crossentropy""" +483 68 regularizer """no""" +483 68 optimizer """adam""" +483 68 training_loop """lcwa""" +483 68 evaluator """rankbased""" +483 69 dataset """kinships""" +483 69 model """proje""" +483 69 loss """crossentropy""" +483 69 regularizer """no""" +483 69 optimizer """adam""" +483 69 training_loop """lcwa""" +483 69 evaluator """rankbased""" +483 70 dataset """kinships""" +483 70 model """proje""" +483 70 loss """crossentropy""" +483 70 regularizer """no""" +483 70 optimizer """adam""" +483 70 training_loop """lcwa""" +483 70 evaluator """rankbased""" +483 71 dataset """kinships""" +483 71 model """proje""" +483 71 loss """crossentropy""" +483 71 regularizer """no""" +483 71 optimizer """adam""" +483 71 training_loop """lcwa""" +483 71 evaluator """rankbased""" +483 72 dataset """kinships""" +483 72 model """proje""" +483 72 loss """crossentropy""" +483 72 regularizer """no""" +483 72 optimizer """adam""" +483 72 training_loop """lcwa""" +483 72 evaluator """rankbased""" +483 73 dataset """kinships""" +483 73 model """proje""" +483 73 loss """crossentropy""" +483 73 regularizer """no""" +483 73 optimizer """adam""" +483 73 training_loop """lcwa""" +483 73 evaluator """rankbased""" +483 74 dataset """kinships""" +483 74 model """proje""" +483 74 loss """crossentropy""" +483 74 regularizer """no""" +483 74 optimizer """adam""" +483 74 training_loop """lcwa""" +483 74 evaluator """rankbased""" +483 75 dataset """kinships""" +483 75 model """proje""" +483 75 loss """crossentropy""" +483 75 regularizer """no""" +483 75 optimizer """adam""" +483 75 training_loop """lcwa""" +483 75 evaluator """rankbased""" +483 76 dataset """kinships""" +483 76 model """proje""" +483 76 loss """crossentropy""" +483 76 regularizer """no""" +483 76 optimizer """adam""" +483 76 training_loop """lcwa""" +483 76 evaluator """rankbased""" +483 77 dataset """kinships""" +483 77 model """proje""" +483 77 loss """crossentropy""" +483 77 regularizer """no""" +483 77 optimizer """adam""" +483 77 training_loop """lcwa""" +483 77 evaluator """rankbased""" +483 78 dataset """kinships""" +483 78 model """proje""" +483 78 loss """crossentropy""" +483 78 regularizer """no""" +483 78 optimizer """adam""" +483 78 training_loop """lcwa""" +483 78 evaluator """rankbased""" +483 79 dataset """kinships""" +483 79 model """proje""" +483 79 loss """crossentropy""" +483 79 regularizer """no""" +483 79 optimizer """adam""" +483 79 training_loop """lcwa""" +483 79 evaluator """rankbased""" +483 80 dataset """kinships""" +483 80 model """proje""" +483 80 loss """crossentropy""" +483 80 regularizer """no""" +483 80 optimizer """adam""" +483 80 training_loop """lcwa""" +483 80 evaluator """rankbased""" +483 81 dataset """kinships""" +483 81 model """proje""" +483 81 loss """crossentropy""" +483 81 regularizer """no""" +483 81 optimizer """adam""" +483 81 training_loop """lcwa""" +483 81 evaluator """rankbased""" +483 82 dataset """kinships""" +483 82 model """proje""" +483 82 loss """crossentropy""" +483 82 regularizer """no""" +483 82 optimizer """adam""" +483 82 training_loop """lcwa""" +483 82 evaluator """rankbased""" +483 83 dataset """kinships""" +483 83 model """proje""" +483 83 loss """crossentropy""" +483 83 regularizer """no""" +483 83 optimizer """adam""" +483 83 training_loop """lcwa""" +483 83 evaluator """rankbased""" +483 84 dataset """kinships""" +483 84 model """proje""" +483 84 loss """crossentropy""" +483 84 regularizer """no""" +483 84 optimizer """adam""" +483 84 training_loop """lcwa""" +483 84 evaluator """rankbased""" +483 85 dataset """kinships""" +483 85 model """proje""" +483 85 loss """crossentropy""" +483 85 regularizer """no""" +483 85 optimizer """adam""" +483 85 training_loop """lcwa""" +483 85 evaluator """rankbased""" +483 86 dataset """kinships""" +483 86 model """proje""" +483 86 loss """crossentropy""" +483 86 regularizer """no""" +483 86 optimizer """adam""" +483 86 training_loop """lcwa""" +483 86 evaluator """rankbased""" +483 87 dataset """kinships""" +483 87 model """proje""" +483 87 loss """crossentropy""" +483 87 regularizer """no""" +483 87 optimizer """adam""" +483 87 training_loop """lcwa""" +483 87 evaluator """rankbased""" +483 88 dataset """kinships""" +483 88 model """proje""" +483 88 loss """crossentropy""" +483 88 regularizer """no""" +483 88 optimizer """adam""" +483 88 training_loop """lcwa""" +483 88 evaluator """rankbased""" +483 89 dataset """kinships""" +483 89 model """proje""" +483 89 loss """crossentropy""" +483 89 regularizer """no""" +483 89 optimizer """adam""" +483 89 training_loop """lcwa""" +483 89 evaluator """rankbased""" +483 90 dataset """kinships""" +483 90 model """proje""" +483 90 loss """crossentropy""" +483 90 regularizer """no""" +483 90 optimizer """adam""" +483 90 training_loop """lcwa""" +483 90 evaluator """rankbased""" +483 91 dataset """kinships""" +483 91 model """proje""" +483 91 loss """crossentropy""" +483 91 regularizer """no""" +483 91 optimizer """adam""" +483 91 training_loop """lcwa""" +483 91 evaluator """rankbased""" +483 92 dataset """kinships""" +483 92 model """proje""" +483 92 loss """crossentropy""" +483 92 regularizer """no""" +483 92 optimizer """adam""" +483 92 training_loop """lcwa""" +483 92 evaluator """rankbased""" +483 93 dataset """kinships""" +483 93 model """proje""" +483 93 loss """crossentropy""" +483 93 regularizer """no""" +483 93 optimizer """adam""" +483 93 training_loop """lcwa""" +483 93 evaluator """rankbased""" +483 94 dataset """kinships""" +483 94 model """proje""" +483 94 loss """crossentropy""" +483 94 regularizer """no""" +483 94 optimizer """adam""" +483 94 training_loop """lcwa""" +483 94 evaluator """rankbased""" +483 95 dataset """kinships""" +483 95 model """proje""" +483 95 loss """crossentropy""" +483 95 regularizer """no""" +483 95 optimizer """adam""" +483 95 training_loop """lcwa""" +483 95 evaluator """rankbased""" +483 96 dataset """kinships""" +483 96 model """proje""" +483 96 loss """crossentropy""" +483 96 regularizer """no""" +483 96 optimizer """adam""" +483 96 training_loop """lcwa""" +483 96 evaluator """rankbased""" +483 97 dataset """kinships""" +483 97 model """proje""" +483 97 loss """crossentropy""" +483 97 regularizer """no""" +483 97 optimizer """adam""" +483 97 training_loop """lcwa""" +483 97 evaluator """rankbased""" +483 98 dataset """kinships""" +483 98 model """proje""" +483 98 loss """crossentropy""" +483 98 regularizer """no""" +483 98 optimizer """adam""" +483 98 training_loop """lcwa""" +483 98 evaluator """rankbased""" +483 99 dataset """kinships""" +483 99 model """proje""" +483 99 loss """crossentropy""" +483 99 regularizer """no""" +483 99 optimizer """adam""" +483 99 training_loop """lcwa""" +483 99 evaluator """rankbased""" +483 100 dataset """kinships""" +483 100 model """proje""" +483 100 loss """crossentropy""" +483 100 regularizer """no""" +483 100 optimizer """adam""" +483 100 training_loop """lcwa""" +483 100 evaluator """rankbased""" +484 1 model.embedding_dim 1.0 +484 1 optimizer.lr 0.001040357622241297 +484 1 training.batch_size 0.0 +484 1 training.label_smoothing 0.18856650266524638 +484 2 model.embedding_dim 2.0 +484 2 optimizer.lr 0.049870963875130465 +484 2 training.batch_size 0.0 +484 2 training.label_smoothing 0.00205761098027115 +484 3 model.embedding_dim 2.0 +484 3 optimizer.lr 0.005201672424136473 +484 3 training.batch_size 0.0 +484 3 training.label_smoothing 0.13608481245747403 +484 4 model.embedding_dim 2.0 +484 4 optimizer.lr 0.017599787188739013 +484 4 training.batch_size 2.0 +484 4 training.label_smoothing 0.002001391565074536 +484 5 model.embedding_dim 2.0 +484 5 optimizer.lr 0.0022323419594540103 +484 5 training.batch_size 0.0 +484 5 training.label_smoothing 0.006275452689597104 +484 6 model.embedding_dim 0.0 +484 6 optimizer.lr 0.0018406015827503583 +484 6 training.batch_size 2.0 +484 6 training.label_smoothing 0.008774293977837133 +484 7 model.embedding_dim 0.0 +484 7 optimizer.lr 0.008532327146461417 +484 7 training.batch_size 0.0 +484 7 training.label_smoothing 0.0056714709708286576 +484 8 model.embedding_dim 2.0 +484 8 optimizer.lr 0.001362521046076038 +484 8 training.batch_size 2.0 +484 8 training.label_smoothing 0.0025853514901322287 +484 9 model.embedding_dim 0.0 +484 9 optimizer.lr 0.03984953648777883 +484 9 training.batch_size 2.0 +484 9 training.label_smoothing 0.7091549185293353 +484 10 model.embedding_dim 0.0 +484 10 optimizer.lr 0.0038881980083917563 +484 10 training.batch_size 2.0 +484 10 training.label_smoothing 0.001876228708881326 +484 11 model.embedding_dim 2.0 +484 11 optimizer.lr 0.023462647987925286 +484 11 training.batch_size 2.0 +484 11 training.label_smoothing 0.006492636592482794 +484 12 model.embedding_dim 2.0 +484 12 optimizer.lr 0.043651155752001534 +484 12 training.batch_size 0.0 +484 12 training.label_smoothing 0.0033304212437618 +484 13 model.embedding_dim 0.0 +484 13 optimizer.lr 0.06047314354573916 +484 13 training.batch_size 0.0 +484 13 training.label_smoothing 0.6491066281528458 +484 14 model.embedding_dim 1.0 +484 14 optimizer.lr 0.03097093281788762 +484 14 training.batch_size 0.0 +484 14 training.label_smoothing 0.10072533130204099 +484 15 model.embedding_dim 0.0 +484 15 optimizer.lr 0.001433233821035434 +484 15 training.batch_size 1.0 +484 15 training.label_smoothing 0.015240717044748053 +484 16 model.embedding_dim 1.0 +484 16 optimizer.lr 0.057020762041838875 +484 16 training.batch_size 1.0 +484 16 training.label_smoothing 0.0064341342270432585 +484 17 model.embedding_dim 0.0 +484 17 optimizer.lr 0.002935271053449476 +484 17 training.batch_size 2.0 +484 17 training.label_smoothing 0.0072343455467043846 +484 18 model.embedding_dim 1.0 +484 18 optimizer.lr 0.0024459005599017413 +484 18 training.batch_size 0.0 +484 18 training.label_smoothing 0.047905329893573864 +484 19 model.embedding_dim 1.0 +484 19 optimizer.lr 0.02457070390180022 +484 19 training.batch_size 0.0 +484 19 training.label_smoothing 0.009553957195221379 +484 20 model.embedding_dim 0.0 +484 20 optimizer.lr 0.009165672938709485 +484 20 training.batch_size 0.0 +484 20 training.label_smoothing 0.001038404853434909 +484 21 model.embedding_dim 0.0 +484 21 optimizer.lr 0.006715899854145585 +484 21 training.batch_size 2.0 +484 21 training.label_smoothing 0.0032376522611163873 +484 22 model.embedding_dim 1.0 +484 22 optimizer.lr 0.0717231923521806 +484 22 training.batch_size 0.0 +484 22 training.label_smoothing 0.18059105468394684 +484 23 model.embedding_dim 0.0 +484 23 optimizer.lr 0.0036604037206815648 +484 23 training.batch_size 1.0 +484 23 training.label_smoothing 0.0073405459049175155 +484 24 model.embedding_dim 2.0 +484 24 optimizer.lr 0.024878749059840756 +484 24 training.batch_size 2.0 +484 24 training.label_smoothing 0.0015044446735790105 +484 25 model.embedding_dim 0.0 +484 25 optimizer.lr 0.005135803190586995 +484 25 training.batch_size 1.0 +484 25 training.label_smoothing 0.4861286643438821 +484 26 model.embedding_dim 0.0 +484 26 optimizer.lr 0.024674142912928042 +484 26 training.batch_size 1.0 +484 26 training.label_smoothing 0.007469931894671171 +484 27 model.embedding_dim 0.0 +484 27 optimizer.lr 0.02782711218096539 +484 27 training.batch_size 2.0 +484 27 training.label_smoothing 0.05980721948303156 +484 28 model.embedding_dim 1.0 +484 28 optimizer.lr 0.04469077013224345 +484 28 training.batch_size 0.0 +484 28 training.label_smoothing 0.030236529648270265 +484 29 model.embedding_dim 1.0 +484 29 optimizer.lr 0.010768623508781265 +484 29 training.batch_size 0.0 +484 29 training.label_smoothing 0.9986361492601259 +484 30 model.embedding_dim 2.0 +484 30 optimizer.lr 0.013364476941061191 +484 30 training.batch_size 2.0 +484 30 training.label_smoothing 0.0026056967161635232 +484 31 model.embedding_dim 1.0 +484 31 optimizer.lr 0.0014018407556168604 +484 31 training.batch_size 1.0 +484 31 training.label_smoothing 0.3512328623154653 +484 32 model.embedding_dim 2.0 +484 32 optimizer.lr 0.008425572406791586 +484 32 training.batch_size 0.0 +484 32 training.label_smoothing 0.059904320501454986 +484 33 model.embedding_dim 0.0 +484 33 optimizer.lr 0.0015621369575617259 +484 33 training.batch_size 2.0 +484 33 training.label_smoothing 0.0010097884771583833 +484 34 model.embedding_dim 0.0 +484 34 optimizer.lr 0.0030435361724862847 +484 34 training.batch_size 1.0 +484 34 training.label_smoothing 0.012081153175268271 +484 35 model.embedding_dim 0.0 +484 35 optimizer.lr 0.0019573083857313753 +484 35 training.batch_size 2.0 +484 35 training.label_smoothing 0.008460334234186693 +484 36 model.embedding_dim 1.0 +484 36 optimizer.lr 0.003344680464239521 +484 36 training.batch_size 2.0 +484 36 training.label_smoothing 0.019654479294944033 +484 37 model.embedding_dim 1.0 +484 37 optimizer.lr 0.05152719359747089 +484 37 training.batch_size 1.0 +484 37 training.label_smoothing 0.0017806744553412507 +484 38 model.embedding_dim 1.0 +484 38 optimizer.lr 0.008198230229564393 +484 38 training.batch_size 2.0 +484 38 training.label_smoothing 0.04416521286900648 +484 39 model.embedding_dim 2.0 +484 39 optimizer.lr 0.024136865551614133 +484 39 training.batch_size 0.0 +484 39 training.label_smoothing 0.02640110218140288 +484 40 model.embedding_dim 1.0 +484 40 optimizer.lr 0.08165851437730229 +484 40 training.batch_size 1.0 +484 40 training.label_smoothing 0.00969555261520761 +484 41 model.embedding_dim 0.0 +484 41 optimizer.lr 0.0021091022241724123 +484 41 training.batch_size 1.0 +484 41 training.label_smoothing 0.03411224960460409 +484 42 model.embedding_dim 0.0 +484 42 optimizer.lr 0.03428045228918504 +484 42 training.batch_size 1.0 +484 42 training.label_smoothing 0.0016632333415679192 +484 43 model.embedding_dim 2.0 +484 43 optimizer.lr 0.005260615427818803 +484 43 training.batch_size 0.0 +484 43 training.label_smoothing 0.042301884977806854 +484 44 model.embedding_dim 1.0 +484 44 optimizer.lr 0.00774846734504044 +484 44 training.batch_size 0.0 +484 44 training.label_smoothing 0.15123663182224892 +484 45 model.embedding_dim 1.0 +484 45 optimizer.lr 0.004759493424344131 +484 45 training.batch_size 1.0 +484 45 training.label_smoothing 0.2277106063128852 +484 46 model.embedding_dim 0.0 +484 46 optimizer.lr 0.0037877268279257717 +484 46 training.batch_size 2.0 +484 46 training.label_smoothing 0.06937023892142849 +484 47 model.embedding_dim 1.0 +484 47 optimizer.lr 0.004645420650593799 +484 47 training.batch_size 0.0 +484 47 training.label_smoothing 0.7077097866803844 +484 48 model.embedding_dim 2.0 +484 48 optimizer.lr 0.04062689812429602 +484 48 training.batch_size 2.0 +484 48 training.label_smoothing 0.02003100755828248 +484 49 model.embedding_dim 1.0 +484 49 optimizer.lr 0.010285344528498323 +484 49 training.batch_size 0.0 +484 49 training.label_smoothing 0.3450196587406428 +484 50 model.embedding_dim 1.0 +484 50 optimizer.lr 0.012554460170160884 +484 50 training.batch_size 1.0 +484 50 training.label_smoothing 0.018520186347667406 +484 51 model.embedding_dim 1.0 +484 51 optimizer.lr 0.001468057176537426 +484 51 training.batch_size 2.0 +484 51 training.label_smoothing 0.7089528524214511 +484 52 model.embedding_dim 1.0 +484 52 optimizer.lr 0.0017226167255526377 +484 52 training.batch_size 0.0 +484 52 training.label_smoothing 0.7366853531066762 +484 53 model.embedding_dim 0.0 +484 53 optimizer.lr 0.0015776674086985716 +484 53 training.batch_size 0.0 +484 53 training.label_smoothing 0.3481710711587876 +484 54 model.embedding_dim 2.0 +484 54 optimizer.lr 0.08137833292825017 +484 54 training.batch_size 0.0 +484 54 training.label_smoothing 0.005645223817084671 +484 55 model.embedding_dim 1.0 +484 55 optimizer.lr 0.0016762987440484298 +484 55 training.batch_size 2.0 +484 55 training.label_smoothing 0.19982509327135423 +484 56 model.embedding_dim 1.0 +484 56 optimizer.lr 0.06301600290178937 +484 56 training.batch_size 0.0 +484 56 training.label_smoothing 0.5698272429278748 +484 57 model.embedding_dim 1.0 +484 57 optimizer.lr 0.00127115936495911 +484 57 training.batch_size 0.0 +484 57 training.label_smoothing 0.004927377091847662 +484 58 model.embedding_dim 1.0 +484 58 optimizer.lr 0.004562020434257058 +484 58 training.batch_size 1.0 +484 58 training.label_smoothing 0.010375065808522278 +484 59 model.embedding_dim 1.0 +484 59 optimizer.lr 0.06360042546770045 +484 59 training.batch_size 2.0 +484 59 training.label_smoothing 0.0012490784134734868 +484 60 model.embedding_dim 0.0 +484 60 optimizer.lr 0.001228056518506378 +484 60 training.batch_size 2.0 +484 60 training.label_smoothing 0.0014227232936302285 +484 61 model.embedding_dim 2.0 +484 61 optimizer.lr 0.00576142408011612 +484 61 training.batch_size 2.0 +484 61 training.label_smoothing 0.08058629116911027 +484 62 model.embedding_dim 0.0 +484 62 optimizer.lr 0.0012110781053337237 +484 62 training.batch_size 2.0 +484 62 training.label_smoothing 0.0036500803021459385 +484 63 model.embedding_dim 2.0 +484 63 optimizer.lr 0.019598794052679343 +484 63 training.batch_size 2.0 +484 63 training.label_smoothing 0.0015640837143763616 +484 64 model.embedding_dim 2.0 +484 64 optimizer.lr 0.004456848876151921 +484 64 training.batch_size 2.0 +484 64 training.label_smoothing 0.0038783871648899525 +484 65 model.embedding_dim 2.0 +484 65 optimizer.lr 0.0018062474406415545 +484 65 training.batch_size 2.0 +484 65 training.label_smoothing 0.010808231352761608 +484 66 model.embedding_dim 2.0 +484 66 optimizer.lr 0.03187390637464259 +484 66 training.batch_size 1.0 +484 66 training.label_smoothing 0.4824688626113498 +484 67 model.embedding_dim 2.0 +484 67 optimizer.lr 0.00926131567701687 +484 67 training.batch_size 2.0 +484 67 training.label_smoothing 0.0011620670259078392 +484 68 model.embedding_dim 1.0 +484 68 optimizer.lr 0.0067719043533720725 +484 68 training.batch_size 2.0 +484 68 training.label_smoothing 0.006176083796609404 +484 69 model.embedding_dim 2.0 +484 69 optimizer.lr 0.08794165186408447 +484 69 training.batch_size 0.0 +484 69 training.label_smoothing 0.003505045796245875 +484 70 model.embedding_dim 0.0 +484 70 optimizer.lr 0.002947639309199275 +484 70 training.batch_size 1.0 +484 70 training.label_smoothing 0.2538551010540272 +484 71 model.embedding_dim 2.0 +484 71 optimizer.lr 0.014123329526390603 +484 71 training.batch_size 0.0 +484 71 training.label_smoothing 0.018367656964412817 +484 72 model.embedding_dim 1.0 +484 72 optimizer.lr 0.029809907525930414 +484 72 training.batch_size 1.0 +484 72 training.label_smoothing 0.02167896312002329 +484 73 model.embedding_dim 0.0 +484 73 optimizer.lr 0.05339435953749938 +484 73 training.batch_size 0.0 +484 73 training.label_smoothing 0.003997674694551687 +484 74 model.embedding_dim 0.0 +484 74 optimizer.lr 0.0011426934441370372 +484 74 training.batch_size 0.0 +484 74 training.label_smoothing 0.594328172607812 +484 75 model.embedding_dim 1.0 +484 75 optimizer.lr 0.003563426556131977 +484 75 training.batch_size 1.0 +484 75 training.label_smoothing 0.0021779219160137126 +484 76 model.embedding_dim 0.0 +484 76 optimizer.lr 0.041942985951485115 +484 76 training.batch_size 2.0 +484 76 training.label_smoothing 0.04889716631458752 +484 77 model.embedding_dim 2.0 +484 77 optimizer.lr 0.002052009652628029 +484 77 training.batch_size 2.0 +484 77 training.label_smoothing 0.023761555979837454 +484 78 model.embedding_dim 2.0 +484 78 optimizer.lr 0.007512050654889541 +484 78 training.batch_size 0.0 +484 78 training.label_smoothing 0.039041928285287596 +484 79 model.embedding_dim 1.0 +484 79 optimizer.lr 0.004907250038959538 +484 79 training.batch_size 1.0 +484 79 training.label_smoothing 0.036140916532114986 +484 80 model.embedding_dim 2.0 +484 80 optimizer.lr 0.09855779052941432 +484 80 training.batch_size 0.0 +484 80 training.label_smoothing 0.002409018263440046 +484 81 model.embedding_dim 1.0 +484 81 optimizer.lr 0.019709979460676888 +484 81 training.batch_size 2.0 +484 81 training.label_smoothing 0.007915593776759511 +484 82 model.embedding_dim 1.0 +484 82 optimizer.lr 0.0024929336400188644 +484 82 training.batch_size 0.0 +484 82 training.label_smoothing 0.0012504475768590484 +484 83 model.embedding_dim 2.0 +484 83 optimizer.lr 0.0037069025149044986 +484 83 training.batch_size 2.0 +484 83 training.label_smoothing 0.2733550437271728 +484 84 model.embedding_dim 2.0 +484 84 optimizer.lr 0.004891618363180678 +484 84 training.batch_size 2.0 +484 84 training.label_smoothing 0.001995703002810509 +484 85 model.embedding_dim 2.0 +484 85 optimizer.lr 0.06191986601463808 +484 85 training.batch_size 2.0 +484 85 training.label_smoothing 0.002614482500869504 +484 86 model.embedding_dim 0.0 +484 86 optimizer.lr 0.0030540692421058093 +484 86 training.batch_size 0.0 +484 86 training.label_smoothing 0.006293091704373737 +484 87 model.embedding_dim 1.0 +484 87 optimizer.lr 0.014333096335531176 +484 87 training.batch_size 0.0 +484 87 training.label_smoothing 0.007825981847770709 +484 88 model.embedding_dim 2.0 +484 88 optimizer.lr 0.004484173556839297 +484 88 training.batch_size 1.0 +484 88 training.label_smoothing 0.05187706031672773 +484 89 model.embedding_dim 0.0 +484 89 optimizer.lr 0.009586212018240885 +484 89 training.batch_size 1.0 +484 89 training.label_smoothing 0.0046925970144271485 +484 90 model.embedding_dim 1.0 +484 90 optimizer.lr 0.004012163377642391 +484 90 training.batch_size 1.0 +484 90 training.label_smoothing 0.05840093107341497 +484 91 model.embedding_dim 1.0 +484 91 optimizer.lr 0.0015203773399703154 +484 91 training.batch_size 1.0 +484 91 training.label_smoothing 0.0020499021338883966 +484 92 model.embedding_dim 2.0 +484 92 optimizer.lr 0.07984690950697138 +484 92 training.batch_size 1.0 +484 92 training.label_smoothing 0.036075357679548566 +484 93 model.embedding_dim 2.0 +484 93 optimizer.lr 0.034458639412584244 +484 93 training.batch_size 0.0 +484 93 training.label_smoothing 0.2260519277220079 +484 94 model.embedding_dim 1.0 +484 94 optimizer.lr 0.001132511760951685 +484 94 training.batch_size 2.0 +484 94 training.label_smoothing 0.0010745193618085834 +484 95 model.embedding_dim 2.0 +484 95 optimizer.lr 0.001197809806526959 +484 95 training.batch_size 1.0 +484 95 training.label_smoothing 0.08166972069093718 +484 96 model.embedding_dim 2.0 +484 96 optimizer.lr 0.0017664086816239672 +484 96 training.batch_size 2.0 +484 96 training.label_smoothing 0.0010487986406828315 +484 97 model.embedding_dim 0.0 +484 97 optimizer.lr 0.0021219795952385797 +484 97 training.batch_size 2.0 +484 97 training.label_smoothing 0.13856170239947052 +484 98 model.embedding_dim 2.0 +484 98 optimizer.lr 0.056047057639799565 +484 98 training.batch_size 0.0 +484 98 training.label_smoothing 0.19675425613352995 +484 99 model.embedding_dim 0.0 +484 99 optimizer.lr 0.023186743462487513 +484 99 training.batch_size 0.0 +484 99 training.label_smoothing 0.24811100906276087 +484 100 model.embedding_dim 1.0 +484 100 optimizer.lr 0.02437127848639064 +484 100 training.batch_size 2.0 +484 100 training.label_smoothing 0.010579798035095832 +484 1 dataset """kinships""" +484 1 model """proje""" +484 1 loss """crossentropy""" +484 1 regularizer """no""" +484 1 optimizer """adam""" +484 1 training_loop """lcwa""" +484 1 evaluator """rankbased""" +484 2 dataset """kinships""" +484 2 model """proje""" +484 2 loss """crossentropy""" +484 2 regularizer """no""" +484 2 optimizer """adam""" +484 2 training_loop """lcwa""" +484 2 evaluator """rankbased""" +484 3 dataset """kinships""" +484 3 model """proje""" +484 3 loss """crossentropy""" +484 3 regularizer """no""" +484 3 optimizer """adam""" +484 3 training_loop """lcwa""" +484 3 evaluator """rankbased""" +484 4 dataset """kinships""" +484 4 model """proje""" +484 4 loss """crossentropy""" +484 4 regularizer """no""" +484 4 optimizer """adam""" +484 4 training_loop """lcwa""" +484 4 evaluator """rankbased""" +484 5 dataset """kinships""" +484 5 model """proje""" +484 5 loss """crossentropy""" +484 5 regularizer """no""" +484 5 optimizer """adam""" +484 5 training_loop """lcwa""" +484 5 evaluator """rankbased""" +484 6 dataset """kinships""" +484 6 model """proje""" +484 6 loss """crossentropy""" +484 6 regularizer """no""" +484 6 optimizer """adam""" +484 6 training_loop """lcwa""" +484 6 evaluator """rankbased""" +484 7 dataset """kinships""" +484 7 model """proje""" +484 7 loss """crossentropy""" +484 7 regularizer """no""" +484 7 optimizer """adam""" +484 7 training_loop """lcwa""" +484 7 evaluator """rankbased""" +484 8 dataset """kinships""" +484 8 model """proje""" +484 8 loss """crossentropy""" +484 8 regularizer """no""" +484 8 optimizer """adam""" +484 8 training_loop """lcwa""" +484 8 evaluator """rankbased""" +484 9 dataset """kinships""" +484 9 model """proje""" +484 9 loss """crossentropy""" +484 9 regularizer """no""" +484 9 optimizer """adam""" +484 9 training_loop """lcwa""" +484 9 evaluator """rankbased""" +484 10 dataset """kinships""" +484 10 model """proje""" +484 10 loss """crossentropy""" +484 10 regularizer """no""" +484 10 optimizer """adam""" +484 10 training_loop """lcwa""" +484 10 evaluator """rankbased""" +484 11 dataset """kinships""" +484 11 model """proje""" +484 11 loss """crossentropy""" +484 11 regularizer """no""" +484 11 optimizer """adam""" +484 11 training_loop """lcwa""" +484 11 evaluator """rankbased""" +484 12 dataset """kinships""" +484 12 model """proje""" +484 12 loss """crossentropy""" +484 12 regularizer """no""" +484 12 optimizer """adam""" +484 12 training_loop """lcwa""" +484 12 evaluator """rankbased""" +484 13 dataset """kinships""" +484 13 model """proje""" +484 13 loss """crossentropy""" +484 13 regularizer """no""" +484 13 optimizer """adam""" +484 13 training_loop """lcwa""" +484 13 evaluator """rankbased""" +484 14 dataset """kinships""" +484 14 model """proje""" +484 14 loss """crossentropy""" +484 14 regularizer """no""" +484 14 optimizer """adam""" +484 14 training_loop """lcwa""" +484 14 evaluator """rankbased""" +484 15 dataset """kinships""" +484 15 model """proje""" +484 15 loss """crossentropy""" +484 15 regularizer """no""" +484 15 optimizer """adam""" +484 15 training_loop """lcwa""" +484 15 evaluator """rankbased""" +484 16 dataset """kinships""" +484 16 model """proje""" +484 16 loss """crossentropy""" +484 16 regularizer """no""" +484 16 optimizer """adam""" +484 16 training_loop """lcwa""" +484 16 evaluator """rankbased""" +484 17 dataset """kinships""" +484 17 model """proje""" +484 17 loss """crossentropy""" +484 17 regularizer """no""" +484 17 optimizer """adam""" +484 17 training_loop """lcwa""" +484 17 evaluator """rankbased""" +484 18 dataset """kinships""" +484 18 model """proje""" +484 18 loss """crossentropy""" +484 18 regularizer """no""" +484 18 optimizer """adam""" +484 18 training_loop """lcwa""" +484 18 evaluator """rankbased""" +484 19 dataset """kinships""" +484 19 model """proje""" +484 19 loss """crossentropy""" +484 19 regularizer """no""" +484 19 optimizer """adam""" +484 19 training_loop """lcwa""" +484 19 evaluator """rankbased""" +484 20 dataset """kinships""" +484 20 model """proje""" +484 20 loss """crossentropy""" +484 20 regularizer """no""" +484 20 optimizer """adam""" +484 20 training_loop """lcwa""" +484 20 evaluator """rankbased""" +484 21 dataset """kinships""" +484 21 model """proje""" +484 21 loss """crossentropy""" +484 21 regularizer """no""" +484 21 optimizer """adam""" +484 21 training_loop """lcwa""" +484 21 evaluator """rankbased""" +484 22 dataset """kinships""" +484 22 model """proje""" +484 22 loss """crossentropy""" +484 22 regularizer """no""" +484 22 optimizer """adam""" +484 22 training_loop """lcwa""" +484 22 evaluator """rankbased""" +484 23 dataset """kinships""" +484 23 model """proje""" +484 23 loss """crossentropy""" +484 23 regularizer """no""" +484 23 optimizer """adam""" +484 23 training_loop """lcwa""" +484 23 evaluator """rankbased""" +484 24 dataset """kinships""" +484 24 model """proje""" +484 24 loss """crossentropy""" +484 24 regularizer """no""" +484 24 optimizer """adam""" +484 24 training_loop """lcwa""" +484 24 evaluator """rankbased""" +484 25 dataset """kinships""" +484 25 model """proje""" +484 25 loss """crossentropy""" +484 25 regularizer """no""" +484 25 optimizer """adam""" +484 25 training_loop """lcwa""" +484 25 evaluator """rankbased""" +484 26 dataset """kinships""" +484 26 model """proje""" +484 26 loss """crossentropy""" +484 26 regularizer """no""" +484 26 optimizer """adam""" +484 26 training_loop """lcwa""" +484 26 evaluator """rankbased""" +484 27 dataset """kinships""" +484 27 model """proje""" +484 27 loss """crossentropy""" +484 27 regularizer """no""" +484 27 optimizer """adam""" +484 27 training_loop """lcwa""" +484 27 evaluator """rankbased""" +484 28 dataset """kinships""" +484 28 model """proje""" +484 28 loss """crossentropy""" +484 28 regularizer """no""" +484 28 optimizer """adam""" +484 28 training_loop """lcwa""" +484 28 evaluator """rankbased""" +484 29 dataset """kinships""" +484 29 model """proje""" +484 29 loss """crossentropy""" +484 29 regularizer """no""" +484 29 optimizer """adam""" +484 29 training_loop """lcwa""" +484 29 evaluator """rankbased""" +484 30 dataset """kinships""" +484 30 model """proje""" +484 30 loss """crossentropy""" +484 30 regularizer """no""" +484 30 optimizer """adam""" +484 30 training_loop """lcwa""" +484 30 evaluator """rankbased""" +484 31 dataset """kinships""" +484 31 model """proje""" +484 31 loss """crossentropy""" +484 31 regularizer """no""" +484 31 optimizer """adam""" +484 31 training_loop """lcwa""" +484 31 evaluator """rankbased""" +484 32 dataset """kinships""" +484 32 model """proje""" +484 32 loss """crossentropy""" +484 32 regularizer """no""" +484 32 optimizer """adam""" +484 32 training_loop """lcwa""" +484 32 evaluator """rankbased""" +484 33 dataset """kinships""" +484 33 model """proje""" +484 33 loss """crossentropy""" +484 33 regularizer """no""" +484 33 optimizer """adam""" +484 33 training_loop """lcwa""" +484 33 evaluator """rankbased""" +484 34 dataset """kinships""" +484 34 model """proje""" +484 34 loss """crossentropy""" +484 34 regularizer """no""" +484 34 optimizer """adam""" +484 34 training_loop """lcwa""" +484 34 evaluator """rankbased""" +484 35 dataset """kinships""" +484 35 model """proje""" +484 35 loss """crossentropy""" +484 35 regularizer """no""" +484 35 optimizer """adam""" +484 35 training_loop """lcwa""" +484 35 evaluator """rankbased""" +484 36 dataset """kinships""" +484 36 model """proje""" +484 36 loss """crossentropy""" +484 36 regularizer """no""" +484 36 optimizer """adam""" +484 36 training_loop """lcwa""" +484 36 evaluator """rankbased""" +484 37 dataset """kinships""" +484 37 model """proje""" +484 37 loss """crossentropy""" +484 37 regularizer """no""" +484 37 optimizer """adam""" +484 37 training_loop """lcwa""" +484 37 evaluator """rankbased""" +484 38 dataset """kinships""" +484 38 model """proje""" +484 38 loss """crossentropy""" +484 38 regularizer """no""" +484 38 optimizer """adam""" +484 38 training_loop """lcwa""" +484 38 evaluator """rankbased""" +484 39 dataset """kinships""" +484 39 model """proje""" +484 39 loss """crossentropy""" +484 39 regularizer """no""" +484 39 optimizer """adam""" +484 39 training_loop """lcwa""" +484 39 evaluator """rankbased""" +484 40 dataset """kinships""" +484 40 model """proje""" +484 40 loss """crossentropy""" +484 40 regularizer """no""" +484 40 optimizer """adam""" +484 40 training_loop """lcwa""" +484 40 evaluator """rankbased""" +484 41 dataset """kinships""" +484 41 model """proje""" +484 41 loss """crossentropy""" +484 41 regularizer """no""" +484 41 optimizer """adam""" +484 41 training_loop """lcwa""" +484 41 evaluator """rankbased""" +484 42 dataset """kinships""" +484 42 model """proje""" +484 42 loss """crossentropy""" +484 42 regularizer """no""" +484 42 optimizer """adam""" +484 42 training_loop """lcwa""" +484 42 evaluator """rankbased""" +484 43 dataset """kinships""" +484 43 model """proje""" +484 43 loss """crossentropy""" +484 43 regularizer """no""" +484 43 optimizer """adam""" +484 43 training_loop """lcwa""" +484 43 evaluator """rankbased""" +484 44 dataset """kinships""" +484 44 model """proje""" +484 44 loss """crossentropy""" +484 44 regularizer """no""" +484 44 optimizer """adam""" +484 44 training_loop """lcwa""" +484 44 evaluator """rankbased""" +484 45 dataset """kinships""" +484 45 model """proje""" +484 45 loss """crossentropy""" +484 45 regularizer """no""" +484 45 optimizer """adam""" +484 45 training_loop """lcwa""" +484 45 evaluator """rankbased""" +484 46 dataset """kinships""" +484 46 model """proje""" +484 46 loss """crossentropy""" +484 46 regularizer """no""" +484 46 optimizer """adam""" +484 46 training_loop """lcwa""" +484 46 evaluator """rankbased""" +484 47 dataset """kinships""" +484 47 model """proje""" +484 47 loss """crossentropy""" +484 47 regularizer """no""" +484 47 optimizer """adam""" +484 47 training_loop """lcwa""" +484 47 evaluator """rankbased""" +484 48 dataset """kinships""" +484 48 model """proje""" +484 48 loss """crossentropy""" +484 48 regularizer """no""" +484 48 optimizer """adam""" +484 48 training_loop """lcwa""" +484 48 evaluator """rankbased""" +484 49 dataset """kinships""" +484 49 model """proje""" +484 49 loss """crossentropy""" +484 49 regularizer """no""" +484 49 optimizer """adam""" +484 49 training_loop """lcwa""" +484 49 evaluator """rankbased""" +484 50 dataset """kinships""" +484 50 model """proje""" +484 50 loss """crossentropy""" +484 50 regularizer """no""" +484 50 optimizer """adam""" +484 50 training_loop """lcwa""" +484 50 evaluator """rankbased""" +484 51 dataset """kinships""" +484 51 model """proje""" +484 51 loss """crossentropy""" +484 51 regularizer """no""" +484 51 optimizer """adam""" +484 51 training_loop """lcwa""" +484 51 evaluator """rankbased""" +484 52 dataset """kinships""" +484 52 model """proje""" +484 52 loss """crossentropy""" +484 52 regularizer """no""" +484 52 optimizer """adam""" +484 52 training_loop """lcwa""" +484 52 evaluator """rankbased""" +484 53 dataset """kinships""" +484 53 model """proje""" +484 53 loss """crossentropy""" +484 53 regularizer """no""" +484 53 optimizer """adam""" +484 53 training_loop """lcwa""" +484 53 evaluator """rankbased""" +484 54 dataset """kinships""" +484 54 model """proje""" +484 54 loss """crossentropy""" +484 54 regularizer """no""" +484 54 optimizer """adam""" +484 54 training_loop """lcwa""" +484 54 evaluator """rankbased""" +484 55 dataset """kinships""" +484 55 model """proje""" +484 55 loss """crossentropy""" +484 55 regularizer """no""" +484 55 optimizer """adam""" +484 55 training_loop """lcwa""" +484 55 evaluator """rankbased""" +484 56 dataset """kinships""" +484 56 model """proje""" +484 56 loss """crossentropy""" +484 56 regularizer """no""" +484 56 optimizer """adam""" +484 56 training_loop """lcwa""" +484 56 evaluator """rankbased""" +484 57 dataset """kinships""" +484 57 model """proje""" +484 57 loss """crossentropy""" +484 57 regularizer """no""" +484 57 optimizer """adam""" +484 57 training_loop """lcwa""" +484 57 evaluator """rankbased""" +484 58 dataset """kinships""" +484 58 model """proje""" +484 58 loss """crossentropy""" +484 58 regularizer """no""" +484 58 optimizer """adam""" +484 58 training_loop """lcwa""" +484 58 evaluator """rankbased""" +484 59 dataset """kinships""" +484 59 model """proje""" +484 59 loss """crossentropy""" +484 59 regularizer """no""" +484 59 optimizer """adam""" +484 59 training_loop """lcwa""" +484 59 evaluator """rankbased""" +484 60 dataset """kinships""" +484 60 model """proje""" +484 60 loss """crossentropy""" +484 60 regularizer """no""" +484 60 optimizer """adam""" +484 60 training_loop """lcwa""" +484 60 evaluator """rankbased""" +484 61 dataset """kinships""" +484 61 model """proje""" +484 61 loss """crossentropy""" +484 61 regularizer """no""" +484 61 optimizer """adam""" +484 61 training_loop """lcwa""" +484 61 evaluator """rankbased""" +484 62 dataset """kinships""" +484 62 model """proje""" +484 62 loss """crossentropy""" +484 62 regularizer """no""" +484 62 optimizer """adam""" +484 62 training_loop """lcwa""" +484 62 evaluator """rankbased""" +484 63 dataset """kinships""" +484 63 model """proje""" +484 63 loss """crossentropy""" +484 63 regularizer """no""" +484 63 optimizer """adam""" +484 63 training_loop """lcwa""" +484 63 evaluator """rankbased""" +484 64 dataset """kinships""" +484 64 model """proje""" +484 64 loss """crossentropy""" +484 64 regularizer """no""" +484 64 optimizer """adam""" +484 64 training_loop """lcwa""" +484 64 evaluator """rankbased""" +484 65 dataset """kinships""" +484 65 model """proje""" +484 65 loss """crossentropy""" +484 65 regularizer """no""" +484 65 optimizer """adam""" +484 65 training_loop """lcwa""" +484 65 evaluator """rankbased""" +484 66 dataset """kinships""" +484 66 model """proje""" +484 66 loss """crossentropy""" +484 66 regularizer """no""" +484 66 optimizer """adam""" +484 66 training_loop """lcwa""" +484 66 evaluator """rankbased""" +484 67 dataset """kinships""" +484 67 model """proje""" +484 67 loss """crossentropy""" +484 67 regularizer """no""" +484 67 optimizer """adam""" +484 67 training_loop """lcwa""" +484 67 evaluator """rankbased""" +484 68 dataset """kinships""" +484 68 model """proje""" +484 68 loss """crossentropy""" +484 68 regularizer """no""" +484 68 optimizer """adam""" +484 68 training_loop """lcwa""" +484 68 evaluator """rankbased""" +484 69 dataset """kinships""" +484 69 model """proje""" +484 69 loss """crossentropy""" +484 69 regularizer """no""" +484 69 optimizer """adam""" +484 69 training_loop """lcwa""" +484 69 evaluator """rankbased""" +484 70 dataset """kinships""" +484 70 model """proje""" +484 70 loss """crossentropy""" +484 70 regularizer """no""" +484 70 optimizer """adam""" +484 70 training_loop """lcwa""" +484 70 evaluator """rankbased""" +484 71 dataset """kinships""" +484 71 model """proje""" +484 71 loss """crossentropy""" +484 71 regularizer """no""" +484 71 optimizer """adam""" +484 71 training_loop """lcwa""" +484 71 evaluator """rankbased""" +484 72 dataset """kinships""" +484 72 model """proje""" +484 72 loss """crossentropy""" +484 72 regularizer """no""" +484 72 optimizer """adam""" +484 72 training_loop """lcwa""" +484 72 evaluator """rankbased""" +484 73 dataset """kinships""" +484 73 model """proje""" +484 73 loss """crossentropy""" +484 73 regularizer """no""" +484 73 optimizer """adam""" +484 73 training_loop """lcwa""" +484 73 evaluator """rankbased""" +484 74 dataset """kinships""" +484 74 model """proje""" +484 74 loss """crossentropy""" +484 74 regularizer """no""" +484 74 optimizer """adam""" +484 74 training_loop """lcwa""" +484 74 evaluator """rankbased""" +484 75 dataset """kinships""" +484 75 model """proje""" +484 75 loss """crossentropy""" +484 75 regularizer """no""" +484 75 optimizer """adam""" +484 75 training_loop """lcwa""" +484 75 evaluator """rankbased""" +484 76 dataset """kinships""" +484 76 model """proje""" +484 76 loss """crossentropy""" +484 76 regularizer """no""" +484 76 optimizer """adam""" +484 76 training_loop """lcwa""" +484 76 evaluator """rankbased""" +484 77 dataset """kinships""" +484 77 model """proje""" +484 77 loss """crossentropy""" +484 77 regularizer """no""" +484 77 optimizer """adam""" +484 77 training_loop """lcwa""" +484 77 evaluator """rankbased""" +484 78 dataset """kinships""" +484 78 model """proje""" +484 78 loss """crossentropy""" +484 78 regularizer """no""" +484 78 optimizer """adam""" +484 78 training_loop """lcwa""" +484 78 evaluator """rankbased""" +484 79 dataset """kinships""" +484 79 model """proje""" +484 79 loss """crossentropy""" +484 79 regularizer """no""" +484 79 optimizer """adam""" +484 79 training_loop """lcwa""" +484 79 evaluator """rankbased""" +484 80 dataset """kinships""" +484 80 model """proje""" +484 80 loss """crossentropy""" +484 80 regularizer """no""" +484 80 optimizer """adam""" +484 80 training_loop """lcwa""" +484 80 evaluator """rankbased""" +484 81 dataset """kinships""" +484 81 model """proje""" +484 81 loss """crossentropy""" +484 81 regularizer """no""" +484 81 optimizer """adam""" +484 81 training_loop """lcwa""" +484 81 evaluator """rankbased""" +484 82 dataset """kinships""" +484 82 model """proje""" +484 82 loss """crossentropy""" +484 82 regularizer """no""" +484 82 optimizer """adam""" +484 82 training_loop """lcwa""" +484 82 evaluator """rankbased""" +484 83 dataset """kinships""" +484 83 model """proje""" +484 83 loss """crossentropy""" +484 83 regularizer """no""" +484 83 optimizer """adam""" +484 83 training_loop """lcwa""" +484 83 evaluator """rankbased""" +484 84 dataset """kinships""" +484 84 model """proje""" +484 84 loss """crossentropy""" +484 84 regularizer """no""" +484 84 optimizer """adam""" +484 84 training_loop """lcwa""" +484 84 evaluator """rankbased""" +484 85 dataset """kinships""" +484 85 model """proje""" +484 85 loss """crossentropy""" +484 85 regularizer """no""" +484 85 optimizer """adam""" +484 85 training_loop """lcwa""" +484 85 evaluator """rankbased""" +484 86 dataset """kinships""" +484 86 model """proje""" +484 86 loss """crossentropy""" +484 86 regularizer """no""" +484 86 optimizer """adam""" +484 86 training_loop """lcwa""" +484 86 evaluator """rankbased""" +484 87 dataset """kinships""" +484 87 model """proje""" +484 87 loss """crossentropy""" +484 87 regularizer """no""" +484 87 optimizer """adam""" +484 87 training_loop """lcwa""" +484 87 evaluator """rankbased""" +484 88 dataset """kinships""" +484 88 model """proje""" +484 88 loss """crossentropy""" +484 88 regularizer """no""" +484 88 optimizer """adam""" +484 88 training_loop """lcwa""" +484 88 evaluator """rankbased""" +484 89 dataset """kinships""" +484 89 model """proje""" +484 89 loss """crossentropy""" +484 89 regularizer """no""" +484 89 optimizer """adam""" +484 89 training_loop """lcwa""" +484 89 evaluator """rankbased""" +484 90 dataset """kinships""" +484 90 model """proje""" +484 90 loss """crossentropy""" +484 90 regularizer """no""" +484 90 optimizer """adam""" +484 90 training_loop """lcwa""" +484 90 evaluator """rankbased""" +484 91 dataset """kinships""" +484 91 model """proje""" +484 91 loss """crossentropy""" +484 91 regularizer """no""" +484 91 optimizer """adam""" +484 91 training_loop """lcwa""" +484 91 evaluator """rankbased""" +484 92 dataset """kinships""" +484 92 model """proje""" +484 92 loss """crossentropy""" +484 92 regularizer """no""" +484 92 optimizer """adam""" +484 92 training_loop """lcwa""" +484 92 evaluator """rankbased""" +484 93 dataset """kinships""" +484 93 model """proje""" +484 93 loss """crossentropy""" +484 93 regularizer """no""" +484 93 optimizer """adam""" +484 93 training_loop """lcwa""" +484 93 evaluator """rankbased""" +484 94 dataset """kinships""" +484 94 model """proje""" +484 94 loss """crossentropy""" +484 94 regularizer """no""" +484 94 optimizer """adam""" +484 94 training_loop """lcwa""" +484 94 evaluator """rankbased""" +484 95 dataset """kinships""" +484 95 model """proje""" +484 95 loss """crossentropy""" +484 95 regularizer """no""" +484 95 optimizer """adam""" +484 95 training_loop """lcwa""" +484 95 evaluator """rankbased""" +484 96 dataset """kinships""" +484 96 model """proje""" +484 96 loss """crossentropy""" +484 96 regularizer """no""" +484 96 optimizer """adam""" +484 96 training_loop """lcwa""" +484 96 evaluator """rankbased""" +484 97 dataset """kinships""" +484 97 model """proje""" +484 97 loss """crossentropy""" +484 97 regularizer """no""" +484 97 optimizer """adam""" +484 97 training_loop """lcwa""" +484 97 evaluator """rankbased""" +484 98 dataset """kinships""" +484 98 model """proje""" +484 98 loss """crossentropy""" +484 98 regularizer """no""" +484 98 optimizer """adam""" +484 98 training_loop """lcwa""" +484 98 evaluator """rankbased""" +484 99 dataset """kinships""" +484 99 model """proje""" +484 99 loss """crossentropy""" +484 99 regularizer """no""" +484 99 optimizer """adam""" +484 99 training_loop """lcwa""" +484 99 evaluator """rankbased""" +484 100 dataset """kinships""" +484 100 model """proje""" +484 100 loss """crossentropy""" +484 100 regularizer """no""" +484 100 optimizer """adam""" +484 100 training_loop """lcwa""" +484 100 evaluator """rankbased""" +485 1 model.embedding_dim 1.0 +485 1 optimizer.lr 0.0010492829587136134 +485 1 negative_sampler.num_negs_per_pos 9.0 +485 1 training.batch_size 1.0 +485 2 model.embedding_dim 1.0 +485 2 optimizer.lr 0.0383407856778827 +485 2 negative_sampler.num_negs_per_pos 18.0 +485 2 training.batch_size 1.0 +485 3 model.embedding_dim 2.0 +485 3 optimizer.lr 0.0013468473626007393 +485 3 negative_sampler.num_negs_per_pos 7.0 +485 3 training.batch_size 2.0 +485 4 model.embedding_dim 1.0 +485 4 optimizer.lr 0.0036178780355641416 +485 4 negative_sampler.num_negs_per_pos 31.0 +485 4 training.batch_size 1.0 +485 5 model.embedding_dim 2.0 +485 5 optimizer.lr 0.011910541750343543 +485 5 negative_sampler.num_negs_per_pos 52.0 +485 5 training.batch_size 2.0 +485 6 model.embedding_dim 1.0 +485 6 optimizer.lr 0.007472108285368447 +485 6 negative_sampler.num_negs_per_pos 46.0 +485 6 training.batch_size 2.0 +485 7 model.embedding_dim 0.0 +485 7 optimizer.lr 0.08929919097009033 +485 7 negative_sampler.num_negs_per_pos 54.0 +485 7 training.batch_size 0.0 +485 8 model.embedding_dim 1.0 +485 8 optimizer.lr 0.0033006584721796337 +485 8 negative_sampler.num_negs_per_pos 80.0 +485 8 training.batch_size 2.0 +485 9 model.embedding_dim 2.0 +485 9 optimizer.lr 0.07314477818610389 +485 9 negative_sampler.num_negs_per_pos 2.0 +485 9 training.batch_size 0.0 +485 10 model.embedding_dim 1.0 +485 10 optimizer.lr 0.005418105969178126 +485 10 negative_sampler.num_negs_per_pos 74.0 +485 10 training.batch_size 2.0 +485 11 model.embedding_dim 2.0 +485 11 optimizer.lr 0.00831463073411624 +485 11 negative_sampler.num_negs_per_pos 44.0 +485 11 training.batch_size 1.0 +485 12 model.embedding_dim 2.0 +485 12 optimizer.lr 0.03385093310975607 +485 12 negative_sampler.num_negs_per_pos 43.0 +485 12 training.batch_size 0.0 +485 13 model.embedding_dim 0.0 +485 13 optimizer.lr 0.0561484673396494 +485 13 negative_sampler.num_negs_per_pos 64.0 +485 13 training.batch_size 1.0 +485 14 model.embedding_dim 1.0 +485 14 optimizer.lr 0.0073484851266663025 +485 14 negative_sampler.num_negs_per_pos 95.0 +485 14 training.batch_size 0.0 +485 15 model.embedding_dim 2.0 +485 15 optimizer.lr 0.019962401163822426 +485 15 negative_sampler.num_negs_per_pos 86.0 +485 15 training.batch_size 2.0 +485 16 model.embedding_dim 0.0 +485 16 optimizer.lr 0.04485634598948527 +485 16 negative_sampler.num_negs_per_pos 88.0 +485 16 training.batch_size 0.0 +485 17 model.embedding_dim 2.0 +485 17 optimizer.lr 0.0038606446891086407 +485 17 negative_sampler.num_negs_per_pos 81.0 +485 17 training.batch_size 2.0 +485 18 model.embedding_dim 1.0 +485 18 optimizer.lr 0.0011710528068966343 +485 18 negative_sampler.num_negs_per_pos 59.0 +485 18 training.batch_size 0.0 +485 19 model.embedding_dim 0.0 +485 19 optimizer.lr 0.026782285983392422 +485 19 negative_sampler.num_negs_per_pos 25.0 +485 19 training.batch_size 2.0 +485 20 model.embedding_dim 1.0 +485 20 optimizer.lr 0.012172237540957823 +485 20 negative_sampler.num_negs_per_pos 78.0 +485 20 training.batch_size 0.0 +485 21 model.embedding_dim 1.0 +485 21 optimizer.lr 0.052578912242259364 +485 21 negative_sampler.num_negs_per_pos 34.0 +485 21 training.batch_size 2.0 +485 22 model.embedding_dim 1.0 +485 22 optimizer.lr 0.020292238394997997 +485 22 negative_sampler.num_negs_per_pos 49.0 +485 22 training.batch_size 1.0 +485 23 model.embedding_dim 0.0 +485 23 optimizer.lr 0.011329166281370663 +485 23 negative_sampler.num_negs_per_pos 33.0 +485 23 training.batch_size 1.0 +485 24 model.embedding_dim 2.0 +485 24 optimizer.lr 0.0019563817516306562 +485 24 negative_sampler.num_negs_per_pos 55.0 +485 24 training.batch_size 1.0 +485 25 model.embedding_dim 2.0 +485 25 optimizer.lr 0.04796637185348656 +485 25 negative_sampler.num_negs_per_pos 49.0 +485 25 training.batch_size 0.0 +485 26 model.embedding_dim 2.0 +485 26 optimizer.lr 0.025305480304817683 +485 26 negative_sampler.num_negs_per_pos 66.0 +485 26 training.batch_size 1.0 +485 27 model.embedding_dim 0.0 +485 27 optimizer.lr 0.0013437481590271312 +485 27 negative_sampler.num_negs_per_pos 9.0 +485 27 training.batch_size 1.0 +485 28 model.embedding_dim 1.0 +485 28 optimizer.lr 0.003086365464978794 +485 28 negative_sampler.num_negs_per_pos 19.0 +485 28 training.batch_size 0.0 +485 29 model.embedding_dim 2.0 +485 29 optimizer.lr 0.0017274250637989864 +485 29 negative_sampler.num_negs_per_pos 19.0 +485 29 training.batch_size 2.0 +485 30 model.embedding_dim 2.0 +485 30 optimizer.lr 0.009528795531129789 +485 30 negative_sampler.num_negs_per_pos 59.0 +485 30 training.batch_size 1.0 +485 31 model.embedding_dim 1.0 +485 31 optimizer.lr 0.00803087821473795 +485 31 negative_sampler.num_negs_per_pos 96.0 +485 31 training.batch_size 2.0 +485 32 model.embedding_dim 0.0 +485 32 optimizer.lr 0.02043106496118747 +485 32 negative_sampler.num_negs_per_pos 31.0 +485 32 training.batch_size 2.0 +485 33 model.embedding_dim 2.0 +485 33 optimizer.lr 0.003636240032459696 +485 33 negative_sampler.num_negs_per_pos 89.0 +485 33 training.batch_size 1.0 +485 34 model.embedding_dim 0.0 +485 34 optimizer.lr 0.002015657325689779 +485 34 negative_sampler.num_negs_per_pos 98.0 +485 34 training.batch_size 1.0 +485 35 model.embedding_dim 1.0 +485 35 optimizer.lr 0.015668675043844865 +485 35 negative_sampler.num_negs_per_pos 56.0 +485 35 training.batch_size 1.0 +485 36 model.embedding_dim 2.0 +485 36 optimizer.lr 0.006220736993044831 +485 36 negative_sampler.num_negs_per_pos 94.0 +485 36 training.batch_size 2.0 +485 37 model.embedding_dim 1.0 +485 37 optimizer.lr 0.028956641829388678 +485 37 negative_sampler.num_negs_per_pos 38.0 +485 37 training.batch_size 1.0 +485 38 model.embedding_dim 2.0 +485 38 optimizer.lr 0.010605443307441564 +485 38 negative_sampler.num_negs_per_pos 58.0 +485 38 training.batch_size 1.0 +485 39 model.embedding_dim 0.0 +485 39 optimizer.lr 0.0012990846256090334 +485 39 negative_sampler.num_negs_per_pos 29.0 +485 39 training.batch_size 1.0 +485 40 model.embedding_dim 2.0 +485 40 optimizer.lr 0.007313162441399166 +485 40 negative_sampler.num_negs_per_pos 74.0 +485 40 training.batch_size 0.0 +485 41 model.embedding_dim 1.0 +485 41 optimizer.lr 0.022671271296339685 +485 41 negative_sampler.num_negs_per_pos 33.0 +485 41 training.batch_size 2.0 +485 42 model.embedding_dim 0.0 +485 42 optimizer.lr 0.01098873436680155 +485 42 negative_sampler.num_negs_per_pos 20.0 +485 42 training.batch_size 2.0 +485 43 model.embedding_dim 1.0 +485 43 optimizer.lr 0.001438915023109299 +485 43 negative_sampler.num_negs_per_pos 27.0 +485 43 training.batch_size 2.0 +485 44 model.embedding_dim 1.0 +485 44 optimizer.lr 0.011855964926566438 +485 44 negative_sampler.num_negs_per_pos 10.0 +485 44 training.batch_size 0.0 +485 45 model.embedding_dim 1.0 +485 45 optimizer.lr 0.012901373220521673 +485 45 negative_sampler.num_negs_per_pos 14.0 +485 45 training.batch_size 1.0 +485 46 model.embedding_dim 0.0 +485 46 optimizer.lr 0.0015103204167411113 +485 46 negative_sampler.num_negs_per_pos 18.0 +485 46 training.batch_size 2.0 +485 47 model.embedding_dim 2.0 +485 47 optimizer.lr 0.05343179127757623 +485 47 negative_sampler.num_negs_per_pos 12.0 +485 47 training.batch_size 2.0 +485 48 model.embedding_dim 0.0 +485 48 optimizer.lr 0.009358773611667686 +485 48 negative_sampler.num_negs_per_pos 60.0 +485 48 training.batch_size 0.0 +485 49 model.embedding_dim 0.0 +485 49 optimizer.lr 0.018616471493367707 +485 49 negative_sampler.num_negs_per_pos 82.0 +485 49 training.batch_size 2.0 +485 50 model.embedding_dim 0.0 +485 50 optimizer.lr 0.0018674049498981645 +485 50 negative_sampler.num_negs_per_pos 21.0 +485 50 training.batch_size 1.0 +485 51 model.embedding_dim 0.0 +485 51 optimizer.lr 0.010994491224307618 +485 51 negative_sampler.num_negs_per_pos 55.0 +485 51 training.batch_size 1.0 +485 52 model.embedding_dim 2.0 +485 52 optimizer.lr 0.04787796988538945 +485 52 negative_sampler.num_negs_per_pos 68.0 +485 52 training.batch_size 1.0 +485 53 model.embedding_dim 1.0 +485 53 optimizer.lr 0.005242765415352997 +485 53 negative_sampler.num_negs_per_pos 45.0 +485 53 training.batch_size 2.0 +485 54 model.embedding_dim 2.0 +485 54 optimizer.lr 0.011793405245464286 +485 54 negative_sampler.num_negs_per_pos 21.0 +485 54 training.batch_size 0.0 +485 55 model.embedding_dim 0.0 +485 55 optimizer.lr 0.0022147525256730094 +485 55 negative_sampler.num_negs_per_pos 72.0 +485 55 training.batch_size 2.0 +485 56 model.embedding_dim 0.0 +485 56 optimizer.lr 0.0016321894777859274 +485 56 negative_sampler.num_negs_per_pos 53.0 +485 56 training.batch_size 0.0 +485 57 model.embedding_dim 1.0 +485 57 optimizer.lr 0.0012968152142400391 +485 57 negative_sampler.num_negs_per_pos 80.0 +485 57 training.batch_size 0.0 +485 58 model.embedding_dim 2.0 +485 58 optimizer.lr 0.001434684230049875 +485 58 negative_sampler.num_negs_per_pos 49.0 +485 58 training.batch_size 2.0 +485 59 model.embedding_dim 2.0 +485 59 optimizer.lr 0.09506423684983463 +485 59 negative_sampler.num_negs_per_pos 29.0 +485 59 training.batch_size 1.0 +485 60 model.embedding_dim 1.0 +485 60 optimizer.lr 0.014843185930857074 +485 60 negative_sampler.num_negs_per_pos 40.0 +485 60 training.batch_size 0.0 +485 61 model.embedding_dim 1.0 +485 61 optimizer.lr 0.06254882323966472 +485 61 negative_sampler.num_negs_per_pos 68.0 +485 61 training.batch_size 0.0 +485 62 model.embedding_dim 1.0 +485 62 optimizer.lr 0.0028901870501909514 +485 62 negative_sampler.num_negs_per_pos 69.0 +485 62 training.batch_size 0.0 +485 63 model.embedding_dim 0.0 +485 63 optimizer.lr 0.007057773116966193 +485 63 negative_sampler.num_negs_per_pos 7.0 +485 63 training.batch_size 0.0 +485 64 model.embedding_dim 2.0 +485 64 optimizer.lr 0.013413785012211495 +485 64 negative_sampler.num_negs_per_pos 99.0 +485 64 training.batch_size 1.0 +485 65 model.embedding_dim 1.0 +485 65 optimizer.lr 0.0011216269559416283 +485 65 negative_sampler.num_negs_per_pos 91.0 +485 65 training.batch_size 2.0 +485 66 model.embedding_dim 1.0 +485 66 optimizer.lr 0.0034772477500522353 +485 66 negative_sampler.num_negs_per_pos 78.0 +485 66 training.batch_size 2.0 +485 67 model.embedding_dim 0.0 +485 67 optimizer.lr 0.07659691274129242 +485 67 negative_sampler.num_negs_per_pos 30.0 +485 67 training.batch_size 0.0 +485 68 model.embedding_dim 0.0 +485 68 optimizer.lr 0.019029313542120677 +485 68 negative_sampler.num_negs_per_pos 4.0 +485 68 training.batch_size 0.0 +485 69 model.embedding_dim 1.0 +485 69 optimizer.lr 0.004481022794547514 +485 69 negative_sampler.num_negs_per_pos 43.0 +485 69 training.batch_size 0.0 +485 70 model.embedding_dim 0.0 +485 70 optimizer.lr 0.0010742449770393605 +485 70 negative_sampler.num_negs_per_pos 10.0 +485 70 training.batch_size 2.0 +485 71 model.embedding_dim 1.0 +485 71 optimizer.lr 0.004011373384989583 +485 71 negative_sampler.num_negs_per_pos 58.0 +485 71 training.batch_size 0.0 +485 72 model.embedding_dim 1.0 +485 72 optimizer.lr 0.021350136105988706 +485 72 negative_sampler.num_negs_per_pos 92.0 +485 72 training.batch_size 2.0 +485 73 model.embedding_dim 0.0 +485 73 optimizer.lr 0.001882643602244736 +485 73 negative_sampler.num_negs_per_pos 83.0 +485 73 training.batch_size 1.0 +485 74 model.embedding_dim 2.0 +485 74 optimizer.lr 0.06658759336295139 +485 74 negative_sampler.num_negs_per_pos 78.0 +485 74 training.batch_size 0.0 +485 75 model.embedding_dim 1.0 +485 75 optimizer.lr 0.05723908238764291 +485 75 negative_sampler.num_negs_per_pos 82.0 +485 75 training.batch_size 1.0 +485 76 model.embedding_dim 2.0 +485 76 optimizer.lr 0.0011807386875305906 +485 76 negative_sampler.num_negs_per_pos 2.0 +485 76 training.batch_size 0.0 +485 77 model.embedding_dim 2.0 +485 77 optimizer.lr 0.0018031443052583093 +485 77 negative_sampler.num_negs_per_pos 38.0 +485 77 training.batch_size 0.0 +485 78 model.embedding_dim 0.0 +485 78 optimizer.lr 0.0225454002424104 +485 78 negative_sampler.num_negs_per_pos 21.0 +485 78 training.batch_size 2.0 +485 79 model.embedding_dim 1.0 +485 79 optimizer.lr 0.0010122007697434322 +485 79 negative_sampler.num_negs_per_pos 45.0 +485 79 training.batch_size 1.0 +485 80 model.embedding_dim 1.0 +485 80 optimizer.lr 0.04999409212829635 +485 80 negative_sampler.num_negs_per_pos 51.0 +485 80 training.batch_size 2.0 +485 81 model.embedding_dim 1.0 +485 81 optimizer.lr 0.06777682649540542 +485 81 negative_sampler.num_negs_per_pos 12.0 +485 81 training.batch_size 0.0 +485 82 model.embedding_dim 2.0 +485 82 optimizer.lr 0.003097657726924468 +485 82 negative_sampler.num_negs_per_pos 94.0 +485 82 training.batch_size 2.0 +485 83 model.embedding_dim 1.0 +485 83 optimizer.lr 0.011180170096140944 +485 83 negative_sampler.num_negs_per_pos 9.0 +485 83 training.batch_size 2.0 +485 84 model.embedding_dim 1.0 +485 84 optimizer.lr 0.025750345233277905 +485 84 negative_sampler.num_negs_per_pos 70.0 +485 84 training.batch_size 2.0 +485 85 model.embedding_dim 1.0 +485 85 optimizer.lr 0.0013536439368654358 +485 85 negative_sampler.num_negs_per_pos 64.0 +485 85 training.batch_size 1.0 +485 86 model.embedding_dim 2.0 +485 86 optimizer.lr 0.08580411767335093 +485 86 negative_sampler.num_negs_per_pos 68.0 +485 86 training.batch_size 2.0 +485 87 model.embedding_dim 1.0 +485 87 optimizer.lr 0.0029696329633335647 +485 87 negative_sampler.num_negs_per_pos 29.0 +485 87 training.batch_size 2.0 +485 88 model.embedding_dim 1.0 +485 88 optimizer.lr 0.007230037875448805 +485 88 negative_sampler.num_negs_per_pos 25.0 +485 88 training.batch_size 1.0 +485 89 model.embedding_dim 2.0 +485 89 optimizer.lr 0.004867264320346378 +485 89 negative_sampler.num_negs_per_pos 44.0 +485 89 training.batch_size 0.0 +485 90 model.embedding_dim 2.0 +485 90 optimizer.lr 0.004747479812768953 +485 90 negative_sampler.num_negs_per_pos 44.0 +485 90 training.batch_size 2.0 +485 91 model.embedding_dim 0.0 +485 91 optimizer.lr 0.004669001873749318 +485 91 negative_sampler.num_negs_per_pos 10.0 +485 91 training.batch_size 0.0 +485 92 model.embedding_dim 0.0 +485 92 optimizer.lr 0.07451056885706486 +485 92 negative_sampler.num_negs_per_pos 54.0 +485 92 training.batch_size 1.0 +485 93 model.embedding_dim 1.0 +485 93 optimizer.lr 0.06457491293898661 +485 93 negative_sampler.num_negs_per_pos 59.0 +485 93 training.batch_size 1.0 +485 94 model.embedding_dim 2.0 +485 94 optimizer.lr 0.004423794179367948 +485 94 negative_sampler.num_negs_per_pos 29.0 +485 94 training.batch_size 2.0 +485 95 model.embedding_dim 2.0 +485 95 optimizer.lr 0.04155947087256906 +485 95 negative_sampler.num_negs_per_pos 71.0 +485 95 training.batch_size 2.0 +485 96 model.embedding_dim 0.0 +485 96 optimizer.lr 0.09307376586457436 +485 96 negative_sampler.num_negs_per_pos 30.0 +485 96 training.batch_size 1.0 +485 97 model.embedding_dim 1.0 +485 97 optimizer.lr 0.016182644891312187 +485 97 negative_sampler.num_negs_per_pos 67.0 +485 97 training.batch_size 1.0 +485 98 model.embedding_dim 0.0 +485 98 optimizer.lr 0.0027038513854475265 +485 98 negative_sampler.num_negs_per_pos 20.0 +485 98 training.batch_size 2.0 +485 99 model.embedding_dim 0.0 +485 99 optimizer.lr 0.09528320101579772 +485 99 negative_sampler.num_negs_per_pos 57.0 +485 99 training.batch_size 0.0 +485 100 model.embedding_dim 0.0 +485 100 optimizer.lr 0.08587535441221333 +485 100 negative_sampler.num_negs_per_pos 90.0 +485 100 training.batch_size 0.0 +485 1 dataset """kinships""" +485 1 model """proje""" +485 1 loss """bceaftersigmoid""" +485 1 regularizer """no""" +485 1 optimizer """adam""" +485 1 training_loop """owa""" +485 1 negative_sampler """basic""" +485 1 evaluator """rankbased""" +485 2 dataset """kinships""" +485 2 model """proje""" +485 2 loss """bceaftersigmoid""" +485 2 regularizer """no""" +485 2 optimizer """adam""" +485 2 training_loop """owa""" +485 2 negative_sampler """basic""" +485 2 evaluator """rankbased""" +485 3 dataset """kinships""" +485 3 model """proje""" +485 3 loss """bceaftersigmoid""" +485 3 regularizer """no""" +485 3 optimizer """adam""" +485 3 training_loop """owa""" +485 3 negative_sampler """basic""" +485 3 evaluator """rankbased""" +485 4 dataset """kinships""" +485 4 model """proje""" +485 4 loss """bceaftersigmoid""" +485 4 regularizer """no""" +485 4 optimizer """adam""" +485 4 training_loop """owa""" +485 4 negative_sampler """basic""" +485 4 evaluator """rankbased""" +485 5 dataset """kinships""" +485 5 model """proje""" +485 5 loss """bceaftersigmoid""" +485 5 regularizer """no""" +485 5 optimizer """adam""" +485 5 training_loop """owa""" +485 5 negative_sampler """basic""" +485 5 evaluator """rankbased""" +485 6 dataset """kinships""" +485 6 model """proje""" +485 6 loss """bceaftersigmoid""" +485 6 regularizer """no""" +485 6 optimizer """adam""" +485 6 training_loop """owa""" +485 6 negative_sampler """basic""" +485 6 evaluator """rankbased""" +485 7 dataset """kinships""" +485 7 model """proje""" +485 7 loss """bceaftersigmoid""" +485 7 regularizer """no""" +485 7 optimizer """adam""" +485 7 training_loop """owa""" +485 7 negative_sampler """basic""" +485 7 evaluator """rankbased""" +485 8 dataset """kinships""" +485 8 model """proje""" +485 8 loss """bceaftersigmoid""" +485 8 regularizer """no""" +485 8 optimizer """adam""" +485 8 training_loop """owa""" +485 8 negative_sampler """basic""" +485 8 evaluator """rankbased""" +485 9 dataset """kinships""" +485 9 model """proje""" +485 9 loss """bceaftersigmoid""" +485 9 regularizer """no""" +485 9 optimizer """adam""" +485 9 training_loop """owa""" +485 9 negative_sampler """basic""" +485 9 evaluator """rankbased""" +485 10 dataset """kinships""" +485 10 model """proje""" +485 10 loss """bceaftersigmoid""" +485 10 regularizer """no""" +485 10 optimizer """adam""" +485 10 training_loop """owa""" +485 10 negative_sampler """basic""" +485 10 evaluator """rankbased""" +485 11 dataset """kinships""" +485 11 model """proje""" +485 11 loss """bceaftersigmoid""" +485 11 regularizer """no""" +485 11 optimizer """adam""" +485 11 training_loop """owa""" +485 11 negative_sampler """basic""" +485 11 evaluator """rankbased""" +485 12 dataset """kinships""" +485 12 model """proje""" +485 12 loss """bceaftersigmoid""" +485 12 regularizer """no""" +485 12 optimizer """adam""" +485 12 training_loop """owa""" +485 12 negative_sampler """basic""" +485 12 evaluator """rankbased""" +485 13 dataset """kinships""" +485 13 model """proje""" +485 13 loss """bceaftersigmoid""" +485 13 regularizer """no""" +485 13 optimizer """adam""" +485 13 training_loop """owa""" +485 13 negative_sampler """basic""" +485 13 evaluator """rankbased""" +485 14 dataset """kinships""" +485 14 model """proje""" +485 14 loss """bceaftersigmoid""" +485 14 regularizer """no""" +485 14 optimizer """adam""" +485 14 training_loop """owa""" +485 14 negative_sampler """basic""" +485 14 evaluator """rankbased""" +485 15 dataset """kinships""" +485 15 model """proje""" +485 15 loss """bceaftersigmoid""" +485 15 regularizer """no""" +485 15 optimizer """adam""" +485 15 training_loop """owa""" +485 15 negative_sampler """basic""" +485 15 evaluator """rankbased""" +485 16 dataset """kinships""" +485 16 model """proje""" +485 16 loss """bceaftersigmoid""" +485 16 regularizer """no""" +485 16 optimizer """adam""" +485 16 training_loop """owa""" +485 16 negative_sampler """basic""" +485 16 evaluator """rankbased""" +485 17 dataset """kinships""" +485 17 model """proje""" +485 17 loss """bceaftersigmoid""" +485 17 regularizer """no""" +485 17 optimizer """adam""" +485 17 training_loop """owa""" +485 17 negative_sampler """basic""" +485 17 evaluator """rankbased""" +485 18 dataset """kinships""" +485 18 model """proje""" +485 18 loss """bceaftersigmoid""" +485 18 regularizer """no""" +485 18 optimizer """adam""" +485 18 training_loop """owa""" +485 18 negative_sampler """basic""" +485 18 evaluator """rankbased""" +485 19 dataset """kinships""" +485 19 model """proje""" +485 19 loss """bceaftersigmoid""" +485 19 regularizer """no""" +485 19 optimizer """adam""" +485 19 training_loop """owa""" +485 19 negative_sampler """basic""" +485 19 evaluator """rankbased""" +485 20 dataset """kinships""" +485 20 model """proje""" +485 20 loss """bceaftersigmoid""" +485 20 regularizer """no""" +485 20 optimizer """adam""" +485 20 training_loop """owa""" +485 20 negative_sampler """basic""" +485 20 evaluator """rankbased""" +485 21 dataset """kinships""" +485 21 model """proje""" +485 21 loss """bceaftersigmoid""" +485 21 regularizer """no""" +485 21 optimizer """adam""" +485 21 training_loop """owa""" +485 21 negative_sampler """basic""" +485 21 evaluator """rankbased""" +485 22 dataset """kinships""" +485 22 model """proje""" +485 22 loss """bceaftersigmoid""" +485 22 regularizer """no""" +485 22 optimizer """adam""" +485 22 training_loop """owa""" +485 22 negative_sampler """basic""" +485 22 evaluator """rankbased""" +485 23 dataset """kinships""" +485 23 model """proje""" +485 23 loss """bceaftersigmoid""" +485 23 regularizer """no""" +485 23 optimizer """adam""" +485 23 training_loop """owa""" +485 23 negative_sampler """basic""" +485 23 evaluator """rankbased""" +485 24 dataset """kinships""" +485 24 model """proje""" +485 24 loss """bceaftersigmoid""" +485 24 regularizer """no""" +485 24 optimizer """adam""" +485 24 training_loop """owa""" +485 24 negative_sampler """basic""" +485 24 evaluator """rankbased""" +485 25 dataset """kinships""" +485 25 model """proje""" +485 25 loss """bceaftersigmoid""" +485 25 regularizer """no""" +485 25 optimizer """adam""" +485 25 training_loop """owa""" +485 25 negative_sampler """basic""" +485 25 evaluator """rankbased""" +485 26 dataset """kinships""" +485 26 model """proje""" +485 26 loss """bceaftersigmoid""" +485 26 regularizer """no""" +485 26 optimizer """adam""" +485 26 training_loop """owa""" +485 26 negative_sampler """basic""" +485 26 evaluator """rankbased""" +485 27 dataset """kinships""" +485 27 model """proje""" +485 27 loss """bceaftersigmoid""" +485 27 regularizer """no""" +485 27 optimizer """adam""" +485 27 training_loop """owa""" +485 27 negative_sampler """basic""" +485 27 evaluator """rankbased""" +485 28 dataset """kinships""" +485 28 model """proje""" +485 28 loss """bceaftersigmoid""" +485 28 regularizer """no""" +485 28 optimizer """adam""" +485 28 training_loop """owa""" +485 28 negative_sampler """basic""" +485 28 evaluator """rankbased""" +485 29 dataset """kinships""" +485 29 model """proje""" +485 29 loss """bceaftersigmoid""" +485 29 regularizer """no""" +485 29 optimizer """adam""" +485 29 training_loop """owa""" +485 29 negative_sampler """basic""" +485 29 evaluator """rankbased""" +485 30 dataset """kinships""" +485 30 model """proje""" +485 30 loss """bceaftersigmoid""" +485 30 regularizer """no""" +485 30 optimizer """adam""" +485 30 training_loop """owa""" +485 30 negative_sampler """basic""" +485 30 evaluator """rankbased""" +485 31 dataset """kinships""" +485 31 model """proje""" +485 31 loss """bceaftersigmoid""" +485 31 regularizer """no""" +485 31 optimizer """adam""" +485 31 training_loop """owa""" +485 31 negative_sampler """basic""" +485 31 evaluator """rankbased""" +485 32 dataset """kinships""" +485 32 model """proje""" +485 32 loss """bceaftersigmoid""" +485 32 regularizer """no""" +485 32 optimizer """adam""" +485 32 training_loop """owa""" +485 32 negative_sampler """basic""" +485 32 evaluator """rankbased""" +485 33 dataset """kinships""" +485 33 model """proje""" +485 33 loss """bceaftersigmoid""" +485 33 regularizer """no""" +485 33 optimizer """adam""" +485 33 training_loop """owa""" +485 33 negative_sampler """basic""" +485 33 evaluator """rankbased""" +485 34 dataset """kinships""" +485 34 model """proje""" +485 34 loss """bceaftersigmoid""" +485 34 regularizer """no""" +485 34 optimizer """adam""" +485 34 training_loop """owa""" +485 34 negative_sampler """basic""" +485 34 evaluator """rankbased""" +485 35 dataset """kinships""" +485 35 model """proje""" +485 35 loss """bceaftersigmoid""" +485 35 regularizer """no""" +485 35 optimizer """adam""" +485 35 training_loop """owa""" +485 35 negative_sampler """basic""" +485 35 evaluator """rankbased""" +485 36 dataset """kinships""" +485 36 model """proje""" +485 36 loss """bceaftersigmoid""" +485 36 regularizer """no""" +485 36 optimizer """adam""" +485 36 training_loop """owa""" +485 36 negative_sampler """basic""" +485 36 evaluator """rankbased""" +485 37 dataset """kinships""" +485 37 model """proje""" +485 37 loss """bceaftersigmoid""" +485 37 regularizer """no""" +485 37 optimizer """adam""" +485 37 training_loop """owa""" +485 37 negative_sampler """basic""" +485 37 evaluator """rankbased""" +485 38 dataset """kinships""" +485 38 model """proje""" +485 38 loss """bceaftersigmoid""" +485 38 regularizer """no""" +485 38 optimizer """adam""" +485 38 training_loop """owa""" +485 38 negative_sampler """basic""" +485 38 evaluator """rankbased""" +485 39 dataset """kinships""" +485 39 model """proje""" +485 39 loss """bceaftersigmoid""" +485 39 regularizer """no""" +485 39 optimizer """adam""" +485 39 training_loop """owa""" +485 39 negative_sampler """basic""" +485 39 evaluator """rankbased""" +485 40 dataset """kinships""" +485 40 model """proje""" +485 40 loss """bceaftersigmoid""" +485 40 regularizer """no""" +485 40 optimizer """adam""" +485 40 training_loop """owa""" +485 40 negative_sampler """basic""" +485 40 evaluator """rankbased""" +485 41 dataset """kinships""" +485 41 model """proje""" +485 41 loss """bceaftersigmoid""" +485 41 regularizer """no""" +485 41 optimizer """adam""" +485 41 training_loop """owa""" +485 41 negative_sampler """basic""" +485 41 evaluator """rankbased""" +485 42 dataset """kinships""" +485 42 model """proje""" +485 42 loss """bceaftersigmoid""" +485 42 regularizer """no""" +485 42 optimizer """adam""" +485 42 training_loop """owa""" +485 42 negative_sampler """basic""" +485 42 evaluator """rankbased""" +485 43 dataset """kinships""" +485 43 model """proje""" +485 43 loss """bceaftersigmoid""" +485 43 regularizer """no""" +485 43 optimizer """adam""" +485 43 training_loop """owa""" +485 43 negative_sampler """basic""" +485 43 evaluator """rankbased""" +485 44 dataset """kinships""" +485 44 model """proje""" +485 44 loss """bceaftersigmoid""" +485 44 regularizer """no""" +485 44 optimizer """adam""" +485 44 training_loop """owa""" +485 44 negative_sampler """basic""" +485 44 evaluator """rankbased""" +485 45 dataset """kinships""" +485 45 model """proje""" +485 45 loss """bceaftersigmoid""" +485 45 regularizer """no""" +485 45 optimizer """adam""" +485 45 training_loop """owa""" +485 45 negative_sampler """basic""" +485 45 evaluator """rankbased""" +485 46 dataset """kinships""" +485 46 model """proje""" +485 46 loss """bceaftersigmoid""" +485 46 regularizer """no""" +485 46 optimizer """adam""" +485 46 training_loop """owa""" +485 46 negative_sampler """basic""" +485 46 evaluator """rankbased""" +485 47 dataset """kinships""" +485 47 model """proje""" +485 47 loss """bceaftersigmoid""" +485 47 regularizer """no""" +485 47 optimizer """adam""" +485 47 training_loop """owa""" +485 47 negative_sampler """basic""" +485 47 evaluator """rankbased""" +485 48 dataset """kinships""" +485 48 model """proje""" +485 48 loss """bceaftersigmoid""" +485 48 regularizer """no""" +485 48 optimizer """adam""" +485 48 training_loop """owa""" +485 48 negative_sampler """basic""" +485 48 evaluator """rankbased""" +485 49 dataset """kinships""" +485 49 model """proje""" +485 49 loss """bceaftersigmoid""" +485 49 regularizer """no""" +485 49 optimizer """adam""" +485 49 training_loop """owa""" +485 49 negative_sampler """basic""" +485 49 evaluator """rankbased""" +485 50 dataset """kinships""" +485 50 model """proje""" +485 50 loss """bceaftersigmoid""" +485 50 regularizer """no""" +485 50 optimizer """adam""" +485 50 training_loop """owa""" +485 50 negative_sampler """basic""" +485 50 evaluator """rankbased""" +485 51 dataset """kinships""" +485 51 model """proje""" +485 51 loss """bceaftersigmoid""" +485 51 regularizer """no""" +485 51 optimizer """adam""" +485 51 training_loop """owa""" +485 51 negative_sampler """basic""" +485 51 evaluator """rankbased""" +485 52 dataset """kinships""" +485 52 model """proje""" +485 52 loss """bceaftersigmoid""" +485 52 regularizer """no""" +485 52 optimizer """adam""" +485 52 training_loop """owa""" +485 52 negative_sampler """basic""" +485 52 evaluator """rankbased""" +485 53 dataset """kinships""" +485 53 model """proje""" +485 53 loss """bceaftersigmoid""" +485 53 regularizer """no""" +485 53 optimizer """adam""" +485 53 training_loop """owa""" +485 53 negative_sampler """basic""" +485 53 evaluator """rankbased""" +485 54 dataset """kinships""" +485 54 model """proje""" +485 54 loss """bceaftersigmoid""" +485 54 regularizer """no""" +485 54 optimizer """adam""" +485 54 training_loop """owa""" +485 54 negative_sampler """basic""" +485 54 evaluator """rankbased""" +485 55 dataset """kinships""" +485 55 model """proje""" +485 55 loss """bceaftersigmoid""" +485 55 regularizer """no""" +485 55 optimizer """adam""" +485 55 training_loop """owa""" +485 55 negative_sampler """basic""" +485 55 evaluator """rankbased""" +485 56 dataset """kinships""" +485 56 model """proje""" +485 56 loss """bceaftersigmoid""" +485 56 regularizer """no""" +485 56 optimizer """adam""" +485 56 training_loop """owa""" +485 56 negative_sampler """basic""" +485 56 evaluator """rankbased""" +485 57 dataset """kinships""" +485 57 model """proje""" +485 57 loss """bceaftersigmoid""" +485 57 regularizer """no""" +485 57 optimizer """adam""" +485 57 training_loop """owa""" +485 57 negative_sampler """basic""" +485 57 evaluator """rankbased""" +485 58 dataset """kinships""" +485 58 model """proje""" +485 58 loss """bceaftersigmoid""" +485 58 regularizer """no""" +485 58 optimizer """adam""" +485 58 training_loop """owa""" +485 58 negative_sampler """basic""" +485 58 evaluator """rankbased""" +485 59 dataset """kinships""" +485 59 model """proje""" +485 59 loss """bceaftersigmoid""" +485 59 regularizer """no""" +485 59 optimizer """adam""" +485 59 training_loop """owa""" +485 59 negative_sampler """basic""" +485 59 evaluator """rankbased""" +485 60 dataset """kinships""" +485 60 model """proje""" +485 60 loss """bceaftersigmoid""" +485 60 regularizer """no""" +485 60 optimizer """adam""" +485 60 training_loop """owa""" +485 60 negative_sampler """basic""" +485 60 evaluator """rankbased""" +485 61 dataset """kinships""" +485 61 model """proje""" +485 61 loss """bceaftersigmoid""" +485 61 regularizer """no""" +485 61 optimizer """adam""" +485 61 training_loop """owa""" +485 61 negative_sampler """basic""" +485 61 evaluator """rankbased""" +485 62 dataset """kinships""" +485 62 model """proje""" +485 62 loss """bceaftersigmoid""" +485 62 regularizer """no""" +485 62 optimizer """adam""" +485 62 training_loop """owa""" +485 62 negative_sampler """basic""" +485 62 evaluator """rankbased""" +485 63 dataset """kinships""" +485 63 model """proje""" +485 63 loss """bceaftersigmoid""" +485 63 regularizer """no""" +485 63 optimizer """adam""" +485 63 training_loop """owa""" +485 63 negative_sampler """basic""" +485 63 evaluator """rankbased""" +485 64 dataset """kinships""" +485 64 model """proje""" +485 64 loss """bceaftersigmoid""" +485 64 regularizer """no""" +485 64 optimizer """adam""" +485 64 training_loop """owa""" +485 64 negative_sampler """basic""" +485 64 evaluator """rankbased""" +485 65 dataset """kinships""" +485 65 model """proje""" +485 65 loss """bceaftersigmoid""" +485 65 regularizer """no""" +485 65 optimizer """adam""" +485 65 training_loop """owa""" +485 65 negative_sampler """basic""" +485 65 evaluator """rankbased""" +485 66 dataset """kinships""" +485 66 model """proje""" +485 66 loss """bceaftersigmoid""" +485 66 regularizer """no""" +485 66 optimizer """adam""" +485 66 training_loop """owa""" +485 66 negative_sampler """basic""" +485 66 evaluator """rankbased""" +485 67 dataset """kinships""" +485 67 model """proje""" +485 67 loss """bceaftersigmoid""" +485 67 regularizer """no""" +485 67 optimizer """adam""" +485 67 training_loop """owa""" +485 67 negative_sampler """basic""" +485 67 evaluator """rankbased""" +485 68 dataset """kinships""" +485 68 model """proje""" +485 68 loss """bceaftersigmoid""" +485 68 regularizer """no""" +485 68 optimizer """adam""" +485 68 training_loop """owa""" +485 68 negative_sampler """basic""" +485 68 evaluator """rankbased""" +485 69 dataset """kinships""" +485 69 model """proje""" +485 69 loss """bceaftersigmoid""" +485 69 regularizer """no""" +485 69 optimizer """adam""" +485 69 training_loop """owa""" +485 69 negative_sampler """basic""" +485 69 evaluator """rankbased""" +485 70 dataset """kinships""" +485 70 model """proje""" +485 70 loss """bceaftersigmoid""" +485 70 regularizer """no""" +485 70 optimizer """adam""" +485 70 training_loop """owa""" +485 70 negative_sampler """basic""" +485 70 evaluator """rankbased""" +485 71 dataset """kinships""" +485 71 model """proje""" +485 71 loss """bceaftersigmoid""" +485 71 regularizer """no""" +485 71 optimizer """adam""" +485 71 training_loop """owa""" +485 71 negative_sampler """basic""" +485 71 evaluator """rankbased""" +485 72 dataset """kinships""" +485 72 model """proje""" +485 72 loss """bceaftersigmoid""" +485 72 regularizer """no""" +485 72 optimizer """adam""" +485 72 training_loop """owa""" +485 72 negative_sampler """basic""" +485 72 evaluator """rankbased""" +485 73 dataset """kinships""" +485 73 model """proje""" +485 73 loss """bceaftersigmoid""" +485 73 regularizer """no""" +485 73 optimizer """adam""" +485 73 training_loop """owa""" +485 73 negative_sampler """basic""" +485 73 evaluator """rankbased""" +485 74 dataset """kinships""" +485 74 model """proje""" +485 74 loss """bceaftersigmoid""" +485 74 regularizer """no""" +485 74 optimizer """adam""" +485 74 training_loop """owa""" +485 74 negative_sampler """basic""" +485 74 evaluator """rankbased""" +485 75 dataset """kinships""" +485 75 model """proje""" +485 75 loss """bceaftersigmoid""" +485 75 regularizer """no""" +485 75 optimizer """adam""" +485 75 training_loop """owa""" +485 75 negative_sampler """basic""" +485 75 evaluator """rankbased""" +485 76 dataset """kinships""" +485 76 model """proje""" +485 76 loss """bceaftersigmoid""" +485 76 regularizer """no""" +485 76 optimizer """adam""" +485 76 training_loop """owa""" +485 76 negative_sampler """basic""" +485 76 evaluator """rankbased""" +485 77 dataset """kinships""" +485 77 model """proje""" +485 77 loss """bceaftersigmoid""" +485 77 regularizer """no""" +485 77 optimizer """adam""" +485 77 training_loop """owa""" +485 77 negative_sampler """basic""" +485 77 evaluator """rankbased""" +485 78 dataset """kinships""" +485 78 model """proje""" +485 78 loss """bceaftersigmoid""" +485 78 regularizer """no""" +485 78 optimizer """adam""" +485 78 training_loop """owa""" +485 78 negative_sampler """basic""" +485 78 evaluator """rankbased""" +485 79 dataset """kinships""" +485 79 model """proje""" +485 79 loss """bceaftersigmoid""" +485 79 regularizer """no""" +485 79 optimizer """adam""" +485 79 training_loop """owa""" +485 79 negative_sampler """basic""" +485 79 evaluator """rankbased""" +485 80 dataset """kinships""" +485 80 model """proje""" +485 80 loss """bceaftersigmoid""" +485 80 regularizer """no""" +485 80 optimizer """adam""" +485 80 training_loop """owa""" +485 80 negative_sampler """basic""" +485 80 evaluator """rankbased""" +485 81 dataset """kinships""" +485 81 model """proje""" +485 81 loss """bceaftersigmoid""" +485 81 regularizer """no""" +485 81 optimizer """adam""" +485 81 training_loop """owa""" +485 81 negative_sampler """basic""" +485 81 evaluator """rankbased""" +485 82 dataset """kinships""" +485 82 model """proje""" +485 82 loss """bceaftersigmoid""" +485 82 regularizer """no""" +485 82 optimizer """adam""" +485 82 training_loop """owa""" +485 82 negative_sampler """basic""" +485 82 evaluator """rankbased""" +485 83 dataset """kinships""" +485 83 model """proje""" +485 83 loss """bceaftersigmoid""" +485 83 regularizer """no""" +485 83 optimizer """adam""" +485 83 training_loop """owa""" +485 83 negative_sampler """basic""" +485 83 evaluator """rankbased""" +485 84 dataset """kinships""" +485 84 model """proje""" +485 84 loss """bceaftersigmoid""" +485 84 regularizer """no""" +485 84 optimizer """adam""" +485 84 training_loop """owa""" +485 84 negative_sampler """basic""" +485 84 evaluator """rankbased""" +485 85 dataset """kinships""" +485 85 model """proje""" +485 85 loss """bceaftersigmoid""" +485 85 regularizer """no""" +485 85 optimizer """adam""" +485 85 training_loop """owa""" +485 85 negative_sampler """basic""" +485 85 evaluator """rankbased""" +485 86 dataset """kinships""" +485 86 model """proje""" +485 86 loss """bceaftersigmoid""" +485 86 regularizer """no""" +485 86 optimizer """adam""" +485 86 training_loop """owa""" +485 86 negative_sampler """basic""" +485 86 evaluator """rankbased""" +485 87 dataset """kinships""" +485 87 model """proje""" +485 87 loss """bceaftersigmoid""" +485 87 regularizer """no""" +485 87 optimizer """adam""" +485 87 training_loop """owa""" +485 87 negative_sampler """basic""" +485 87 evaluator """rankbased""" +485 88 dataset """kinships""" +485 88 model """proje""" +485 88 loss """bceaftersigmoid""" +485 88 regularizer """no""" +485 88 optimizer """adam""" +485 88 training_loop """owa""" +485 88 negative_sampler """basic""" +485 88 evaluator """rankbased""" +485 89 dataset """kinships""" +485 89 model """proje""" +485 89 loss """bceaftersigmoid""" +485 89 regularizer """no""" +485 89 optimizer """adam""" +485 89 training_loop """owa""" +485 89 negative_sampler """basic""" +485 89 evaluator """rankbased""" +485 90 dataset """kinships""" +485 90 model """proje""" +485 90 loss """bceaftersigmoid""" +485 90 regularizer """no""" +485 90 optimizer """adam""" +485 90 training_loop """owa""" +485 90 negative_sampler """basic""" +485 90 evaluator """rankbased""" +485 91 dataset """kinships""" +485 91 model """proje""" +485 91 loss """bceaftersigmoid""" +485 91 regularizer """no""" +485 91 optimizer """adam""" +485 91 training_loop """owa""" +485 91 negative_sampler """basic""" +485 91 evaluator """rankbased""" +485 92 dataset """kinships""" +485 92 model """proje""" +485 92 loss """bceaftersigmoid""" +485 92 regularizer """no""" +485 92 optimizer """adam""" +485 92 training_loop """owa""" +485 92 negative_sampler """basic""" +485 92 evaluator """rankbased""" +485 93 dataset """kinships""" +485 93 model """proje""" +485 93 loss """bceaftersigmoid""" +485 93 regularizer """no""" +485 93 optimizer """adam""" +485 93 training_loop """owa""" +485 93 negative_sampler """basic""" +485 93 evaluator """rankbased""" +485 94 dataset """kinships""" +485 94 model """proje""" +485 94 loss """bceaftersigmoid""" +485 94 regularizer """no""" +485 94 optimizer """adam""" +485 94 training_loop """owa""" +485 94 negative_sampler """basic""" +485 94 evaluator """rankbased""" +485 95 dataset """kinships""" +485 95 model """proje""" +485 95 loss """bceaftersigmoid""" +485 95 regularizer """no""" +485 95 optimizer """adam""" +485 95 training_loop """owa""" +485 95 negative_sampler """basic""" +485 95 evaluator """rankbased""" +485 96 dataset """kinships""" +485 96 model """proje""" +485 96 loss """bceaftersigmoid""" +485 96 regularizer """no""" +485 96 optimizer """adam""" +485 96 training_loop """owa""" +485 96 negative_sampler """basic""" +485 96 evaluator """rankbased""" +485 97 dataset """kinships""" +485 97 model """proje""" +485 97 loss """bceaftersigmoid""" +485 97 regularizer """no""" +485 97 optimizer """adam""" +485 97 training_loop """owa""" +485 97 negative_sampler """basic""" +485 97 evaluator """rankbased""" +485 98 dataset """kinships""" +485 98 model """proje""" +485 98 loss """bceaftersigmoid""" +485 98 regularizer """no""" +485 98 optimizer """adam""" +485 98 training_loop """owa""" +485 98 negative_sampler """basic""" +485 98 evaluator """rankbased""" +485 99 dataset """kinships""" +485 99 model """proje""" +485 99 loss """bceaftersigmoid""" +485 99 regularizer """no""" +485 99 optimizer """adam""" +485 99 training_loop """owa""" +485 99 negative_sampler """basic""" +485 99 evaluator """rankbased""" +485 100 dataset """kinships""" +485 100 model """proje""" +485 100 loss """bceaftersigmoid""" +485 100 regularizer """no""" +485 100 optimizer """adam""" +485 100 training_loop """owa""" +485 100 negative_sampler """basic""" +485 100 evaluator """rankbased""" +486 1 model.embedding_dim 0.0 +486 1 optimizer.lr 0.018165971460730932 +486 1 negative_sampler.num_negs_per_pos 70.0 +486 1 training.batch_size 2.0 +486 2 model.embedding_dim 2.0 +486 2 optimizer.lr 0.021608922054609066 +486 2 negative_sampler.num_negs_per_pos 53.0 +486 2 training.batch_size 0.0 +486 3 model.embedding_dim 2.0 +486 3 optimizer.lr 0.02418556169069374 +486 3 negative_sampler.num_negs_per_pos 91.0 +486 3 training.batch_size 2.0 +486 4 model.embedding_dim 0.0 +486 4 optimizer.lr 0.003461255206500456 +486 4 negative_sampler.num_negs_per_pos 53.0 +486 4 training.batch_size 2.0 +486 5 model.embedding_dim 2.0 +486 5 optimizer.lr 0.08817842752200464 +486 5 negative_sampler.num_negs_per_pos 46.0 +486 5 training.batch_size 1.0 +486 6 model.embedding_dim 1.0 +486 6 optimizer.lr 0.022666798607844076 +486 6 negative_sampler.num_negs_per_pos 80.0 +486 6 training.batch_size 1.0 +486 7 model.embedding_dim 0.0 +486 7 optimizer.lr 0.009422957214654927 +486 7 negative_sampler.num_negs_per_pos 1.0 +486 7 training.batch_size 1.0 +486 8 model.embedding_dim 1.0 +486 8 optimizer.lr 0.013518792140410369 +486 8 negative_sampler.num_negs_per_pos 89.0 +486 8 training.batch_size 1.0 +486 9 model.embedding_dim 1.0 +486 9 optimizer.lr 0.0011392221334653035 +486 9 negative_sampler.num_negs_per_pos 62.0 +486 9 training.batch_size 1.0 +486 10 model.embedding_dim 0.0 +486 10 optimizer.lr 0.07358560064053761 +486 10 negative_sampler.num_negs_per_pos 24.0 +486 10 training.batch_size 2.0 +486 11 model.embedding_dim 0.0 +486 11 optimizer.lr 0.01713958471877431 +486 11 negative_sampler.num_negs_per_pos 27.0 +486 11 training.batch_size 1.0 +486 12 model.embedding_dim 2.0 +486 12 optimizer.lr 0.010597984375717958 +486 12 negative_sampler.num_negs_per_pos 74.0 +486 12 training.batch_size 1.0 +486 13 model.embedding_dim 2.0 +486 13 optimizer.lr 0.0011840463416445803 +486 13 negative_sampler.num_negs_per_pos 30.0 +486 13 training.batch_size 1.0 +486 14 model.embedding_dim 1.0 +486 14 optimizer.lr 0.012890222297965518 +486 14 negative_sampler.num_negs_per_pos 22.0 +486 14 training.batch_size 0.0 +486 15 model.embedding_dim 1.0 +486 15 optimizer.lr 0.023870201428953405 +486 15 negative_sampler.num_negs_per_pos 14.0 +486 15 training.batch_size 2.0 +486 16 model.embedding_dim 1.0 +486 16 optimizer.lr 0.001998347478849153 +486 16 negative_sampler.num_negs_per_pos 84.0 +486 16 training.batch_size 0.0 +486 17 model.embedding_dim 0.0 +486 17 optimizer.lr 0.00539857648094262 +486 17 negative_sampler.num_negs_per_pos 90.0 +486 17 training.batch_size 1.0 +486 18 model.embedding_dim 2.0 +486 18 optimizer.lr 0.0010768859985222175 +486 18 negative_sampler.num_negs_per_pos 6.0 +486 18 training.batch_size 2.0 +486 19 model.embedding_dim 0.0 +486 19 optimizer.lr 0.026728835906599056 +486 19 negative_sampler.num_negs_per_pos 69.0 +486 19 training.batch_size 2.0 +486 20 model.embedding_dim 1.0 +486 20 optimizer.lr 0.002085274634444733 +486 20 negative_sampler.num_negs_per_pos 1.0 +486 20 training.batch_size 1.0 +486 21 model.embedding_dim 0.0 +486 21 optimizer.lr 0.006566845873800477 +486 21 negative_sampler.num_negs_per_pos 7.0 +486 21 training.batch_size 1.0 +486 22 model.embedding_dim 1.0 +486 22 optimizer.lr 0.022787871181970207 +486 22 negative_sampler.num_negs_per_pos 21.0 +486 22 training.batch_size 2.0 +486 23 model.embedding_dim 2.0 +486 23 optimizer.lr 0.01494730034595626 +486 23 negative_sampler.num_negs_per_pos 72.0 +486 23 training.batch_size 0.0 +486 24 model.embedding_dim 1.0 +486 24 optimizer.lr 0.07196790620393537 +486 24 negative_sampler.num_negs_per_pos 39.0 +486 24 training.batch_size 1.0 +486 25 model.embedding_dim 2.0 +486 25 optimizer.lr 0.04752683232649342 +486 25 negative_sampler.num_negs_per_pos 46.0 +486 25 training.batch_size 2.0 +486 26 model.embedding_dim 2.0 +486 26 optimizer.lr 0.0631336539455254 +486 26 negative_sampler.num_negs_per_pos 47.0 +486 26 training.batch_size 2.0 +486 27 model.embedding_dim 1.0 +486 27 optimizer.lr 0.004426598462050107 +486 27 negative_sampler.num_negs_per_pos 50.0 +486 27 training.batch_size 0.0 +486 28 model.embedding_dim 2.0 +486 28 optimizer.lr 0.0016479621598550446 +486 28 negative_sampler.num_negs_per_pos 44.0 +486 28 training.batch_size 0.0 +486 29 model.embedding_dim 1.0 +486 29 optimizer.lr 0.05686495558398807 +486 29 negative_sampler.num_negs_per_pos 24.0 +486 29 training.batch_size 1.0 +486 30 model.embedding_dim 0.0 +486 30 optimizer.lr 0.08467876466730039 +486 30 negative_sampler.num_negs_per_pos 36.0 +486 30 training.batch_size 2.0 +486 31 model.embedding_dim 0.0 +486 31 optimizer.lr 0.027964825207431557 +486 31 negative_sampler.num_negs_per_pos 37.0 +486 31 training.batch_size 2.0 +486 32 model.embedding_dim 0.0 +486 32 optimizer.lr 0.01030562900903173 +486 32 negative_sampler.num_negs_per_pos 39.0 +486 32 training.batch_size 2.0 +486 33 model.embedding_dim 0.0 +486 33 optimizer.lr 0.0029414262907503596 +486 33 negative_sampler.num_negs_per_pos 99.0 +486 33 training.batch_size 0.0 +486 34 model.embedding_dim 1.0 +486 34 optimizer.lr 0.008385375402402613 +486 34 negative_sampler.num_negs_per_pos 43.0 +486 34 training.batch_size 1.0 +486 35 model.embedding_dim 1.0 +486 35 optimizer.lr 0.08559238841129889 +486 35 negative_sampler.num_negs_per_pos 56.0 +486 35 training.batch_size 1.0 +486 36 model.embedding_dim 2.0 +486 36 optimizer.lr 0.00552544947075665 +486 36 negative_sampler.num_negs_per_pos 25.0 +486 36 training.batch_size 0.0 +486 37 model.embedding_dim 2.0 +486 37 optimizer.lr 0.020323678233970768 +486 37 negative_sampler.num_negs_per_pos 46.0 +486 37 training.batch_size 2.0 +486 38 model.embedding_dim 0.0 +486 38 optimizer.lr 0.002460464035430246 +486 38 negative_sampler.num_negs_per_pos 30.0 +486 38 training.batch_size 2.0 +486 39 model.embedding_dim 0.0 +486 39 optimizer.lr 0.047193201166722355 +486 39 negative_sampler.num_negs_per_pos 78.0 +486 39 training.batch_size 2.0 +486 40 model.embedding_dim 0.0 +486 40 optimizer.lr 0.002773253780468494 +486 40 negative_sampler.num_negs_per_pos 19.0 +486 40 training.batch_size 0.0 +486 41 model.embedding_dim 2.0 +486 41 optimizer.lr 0.012840657641268505 +486 41 negative_sampler.num_negs_per_pos 89.0 +486 41 training.batch_size 0.0 +486 42 model.embedding_dim 2.0 +486 42 optimizer.lr 0.09213034811133336 +486 42 negative_sampler.num_negs_per_pos 63.0 +486 42 training.batch_size 0.0 +486 43 model.embedding_dim 2.0 +486 43 optimizer.lr 0.0032228402562524184 +486 43 negative_sampler.num_negs_per_pos 6.0 +486 43 training.batch_size 2.0 +486 44 model.embedding_dim 1.0 +486 44 optimizer.lr 0.08407195598734243 +486 44 negative_sampler.num_negs_per_pos 18.0 +486 44 training.batch_size 2.0 +486 45 model.embedding_dim 1.0 +486 45 optimizer.lr 0.003430357228561082 +486 45 negative_sampler.num_negs_per_pos 90.0 +486 45 training.batch_size 0.0 +486 46 model.embedding_dim 1.0 +486 46 optimizer.lr 0.06173176644731735 +486 46 negative_sampler.num_negs_per_pos 15.0 +486 46 training.batch_size 2.0 +486 47 model.embedding_dim 1.0 +486 47 optimizer.lr 0.03310732235284812 +486 47 negative_sampler.num_negs_per_pos 25.0 +486 47 training.batch_size 1.0 +486 48 model.embedding_dim 0.0 +486 48 optimizer.lr 0.0011075678744344067 +486 48 negative_sampler.num_negs_per_pos 67.0 +486 48 training.batch_size 0.0 +486 49 model.embedding_dim 2.0 +486 49 optimizer.lr 0.010835019578150239 +486 49 negative_sampler.num_negs_per_pos 2.0 +486 49 training.batch_size 1.0 +486 50 model.embedding_dim 2.0 +486 50 optimizer.lr 0.0058547299328572 +486 50 negative_sampler.num_negs_per_pos 68.0 +486 50 training.batch_size 0.0 +486 51 model.embedding_dim 2.0 +486 51 optimizer.lr 0.0043650435979695 +486 51 negative_sampler.num_negs_per_pos 92.0 +486 51 training.batch_size 2.0 +486 52 model.embedding_dim 0.0 +486 52 optimizer.lr 0.0019519735712995182 +486 52 negative_sampler.num_negs_per_pos 30.0 +486 52 training.batch_size 0.0 +486 53 model.embedding_dim 0.0 +486 53 optimizer.lr 0.027400687877670183 +486 53 negative_sampler.num_negs_per_pos 47.0 +486 53 training.batch_size 2.0 +486 54 model.embedding_dim 0.0 +486 54 optimizer.lr 0.08338246749818741 +486 54 negative_sampler.num_negs_per_pos 34.0 +486 54 training.batch_size 2.0 +486 55 model.embedding_dim 0.0 +486 55 optimizer.lr 0.025361998562079165 +486 55 negative_sampler.num_negs_per_pos 11.0 +486 55 training.batch_size 0.0 +486 56 model.embedding_dim 1.0 +486 56 optimizer.lr 0.032864382529538765 +486 56 negative_sampler.num_negs_per_pos 5.0 +486 56 training.batch_size 1.0 +486 57 model.embedding_dim 2.0 +486 57 optimizer.lr 0.002539248772914781 +486 57 negative_sampler.num_negs_per_pos 7.0 +486 57 training.batch_size 1.0 +486 58 model.embedding_dim 2.0 +486 58 optimizer.lr 0.01256649548350907 +486 58 negative_sampler.num_negs_per_pos 41.0 +486 58 training.batch_size 0.0 +486 59 model.embedding_dim 1.0 +486 59 optimizer.lr 0.002130822957278207 +486 59 negative_sampler.num_negs_per_pos 32.0 +486 59 training.batch_size 2.0 +486 60 model.embedding_dim 2.0 +486 60 optimizer.lr 0.043145607215307524 +486 60 negative_sampler.num_negs_per_pos 34.0 +486 60 training.batch_size 0.0 +486 61 model.embedding_dim 1.0 +486 61 optimizer.lr 0.05002034511627442 +486 61 negative_sampler.num_negs_per_pos 78.0 +486 61 training.batch_size 2.0 +486 62 model.embedding_dim 0.0 +486 62 optimizer.lr 0.0054612521331788705 +486 62 negative_sampler.num_negs_per_pos 83.0 +486 62 training.batch_size 2.0 +486 63 model.embedding_dim 1.0 +486 63 optimizer.lr 0.005665350460323821 +486 63 negative_sampler.num_negs_per_pos 31.0 +486 63 training.batch_size 0.0 +486 64 model.embedding_dim 2.0 +486 64 optimizer.lr 0.059373327919160265 +486 64 negative_sampler.num_negs_per_pos 91.0 +486 64 training.batch_size 2.0 +486 65 model.embedding_dim 0.0 +486 65 optimizer.lr 0.021053139024393222 +486 65 negative_sampler.num_negs_per_pos 35.0 +486 65 training.batch_size 1.0 +486 66 model.embedding_dim 2.0 +486 66 optimizer.lr 0.0013620775208237467 +486 66 negative_sampler.num_negs_per_pos 64.0 +486 66 training.batch_size 1.0 +486 67 model.embedding_dim 1.0 +486 67 optimizer.lr 0.009850548851278456 +486 67 negative_sampler.num_negs_per_pos 94.0 +486 67 training.batch_size 2.0 +486 68 model.embedding_dim 1.0 +486 68 optimizer.lr 0.03201992291093973 +486 68 negative_sampler.num_negs_per_pos 44.0 +486 68 training.batch_size 1.0 +486 69 model.embedding_dim 2.0 +486 69 optimizer.lr 0.05658798792361669 +486 69 negative_sampler.num_negs_per_pos 37.0 +486 69 training.batch_size 1.0 +486 70 model.embedding_dim 0.0 +486 70 optimizer.lr 0.0526663967960562 +486 70 negative_sampler.num_negs_per_pos 31.0 +486 70 training.batch_size 1.0 +486 71 model.embedding_dim 1.0 +486 71 optimizer.lr 0.006805582353106427 +486 71 negative_sampler.num_negs_per_pos 52.0 +486 71 training.batch_size 1.0 +486 72 model.embedding_dim 2.0 +486 72 optimizer.lr 0.052367269562176876 +486 72 negative_sampler.num_negs_per_pos 49.0 +486 72 training.batch_size 0.0 +486 73 model.embedding_dim 0.0 +486 73 optimizer.lr 0.0548695061458775 +486 73 negative_sampler.num_negs_per_pos 43.0 +486 73 training.batch_size 0.0 +486 74 model.embedding_dim 0.0 +486 74 optimizer.lr 0.03690938957459917 +486 74 negative_sampler.num_negs_per_pos 25.0 +486 74 training.batch_size 0.0 +486 75 model.embedding_dim 1.0 +486 75 optimizer.lr 0.00256728434154721 +486 75 negative_sampler.num_negs_per_pos 27.0 +486 75 training.batch_size 2.0 +486 76 model.embedding_dim 0.0 +486 76 optimizer.lr 0.0020390970404680878 +486 76 negative_sampler.num_negs_per_pos 54.0 +486 76 training.batch_size 2.0 +486 77 model.embedding_dim 2.0 +486 77 optimizer.lr 0.011640418272166622 +486 77 negative_sampler.num_negs_per_pos 68.0 +486 77 training.batch_size 0.0 +486 78 model.embedding_dim 2.0 +486 78 optimizer.lr 0.02669232440822941 +486 78 negative_sampler.num_negs_per_pos 81.0 +486 78 training.batch_size 2.0 +486 79 model.embedding_dim 2.0 +486 79 optimizer.lr 0.016154784103987054 +486 79 negative_sampler.num_negs_per_pos 40.0 +486 79 training.batch_size 1.0 +486 80 model.embedding_dim 0.0 +486 80 optimizer.lr 0.007720888917227474 +486 80 negative_sampler.num_negs_per_pos 59.0 +486 80 training.batch_size 0.0 +486 81 model.embedding_dim 1.0 +486 81 optimizer.lr 0.012903951381879154 +486 81 negative_sampler.num_negs_per_pos 31.0 +486 81 training.batch_size 0.0 +486 82 model.embedding_dim 2.0 +486 82 optimizer.lr 0.023464698171308977 +486 82 negative_sampler.num_negs_per_pos 12.0 +486 82 training.batch_size 1.0 +486 83 model.embedding_dim 0.0 +486 83 optimizer.lr 0.0037695121031748728 +486 83 negative_sampler.num_negs_per_pos 68.0 +486 83 training.batch_size 0.0 +486 84 model.embedding_dim 2.0 +486 84 optimizer.lr 0.008785615280675922 +486 84 negative_sampler.num_negs_per_pos 34.0 +486 84 training.batch_size 1.0 +486 85 model.embedding_dim 1.0 +486 85 optimizer.lr 0.0012602625190368665 +486 85 negative_sampler.num_negs_per_pos 52.0 +486 85 training.batch_size 0.0 +486 86 model.embedding_dim 0.0 +486 86 optimizer.lr 0.009412600781903212 +486 86 negative_sampler.num_negs_per_pos 61.0 +486 86 training.batch_size 1.0 +486 87 model.embedding_dim 1.0 +486 87 optimizer.lr 0.08222602138345314 +486 87 negative_sampler.num_negs_per_pos 73.0 +486 87 training.batch_size 1.0 +486 88 model.embedding_dim 0.0 +486 88 optimizer.lr 0.03893608161246959 +486 88 negative_sampler.num_negs_per_pos 67.0 +486 88 training.batch_size 2.0 +486 89 model.embedding_dim 2.0 +486 89 optimizer.lr 0.003678669102362179 +486 89 negative_sampler.num_negs_per_pos 85.0 +486 89 training.batch_size 2.0 +486 90 model.embedding_dim 2.0 +486 90 optimizer.lr 0.02052339781233625 +486 90 negative_sampler.num_negs_per_pos 73.0 +486 90 training.batch_size 0.0 +486 91 model.embedding_dim 1.0 +486 91 optimizer.lr 0.0034623862306631956 +486 91 negative_sampler.num_negs_per_pos 79.0 +486 91 training.batch_size 1.0 +486 92 model.embedding_dim 2.0 +486 92 optimizer.lr 0.0038752345154978427 +486 92 negative_sampler.num_negs_per_pos 48.0 +486 92 training.batch_size 1.0 +486 93 model.embedding_dim 0.0 +486 93 optimizer.lr 0.023056817776837876 +486 93 negative_sampler.num_negs_per_pos 63.0 +486 93 training.batch_size 1.0 +486 94 model.embedding_dim 1.0 +486 94 optimizer.lr 0.016788574062307406 +486 94 negative_sampler.num_negs_per_pos 59.0 +486 94 training.batch_size 2.0 +486 95 model.embedding_dim 2.0 +486 95 optimizer.lr 0.0017704877918198894 +486 95 negative_sampler.num_negs_per_pos 78.0 +486 95 training.batch_size 2.0 +486 96 model.embedding_dim 0.0 +486 96 optimizer.lr 0.031765198569785 +486 96 negative_sampler.num_negs_per_pos 53.0 +486 96 training.batch_size 1.0 +486 97 model.embedding_dim 2.0 +486 97 optimizer.lr 0.0020629546283260154 +486 97 negative_sampler.num_negs_per_pos 56.0 +486 97 training.batch_size 2.0 +486 98 model.embedding_dim 1.0 +486 98 optimizer.lr 0.02662823163280884 +486 98 negative_sampler.num_negs_per_pos 28.0 +486 98 training.batch_size 2.0 +486 99 model.embedding_dim 2.0 +486 99 optimizer.lr 0.0015948761845263077 +486 99 negative_sampler.num_negs_per_pos 50.0 +486 99 training.batch_size 2.0 +486 100 model.embedding_dim 0.0 +486 100 optimizer.lr 0.022614919773561698 +486 100 negative_sampler.num_negs_per_pos 36.0 +486 100 training.batch_size 1.0 +486 1 dataset """kinships""" +486 1 model """proje""" +486 1 loss """softplus""" +486 1 regularizer """no""" +486 1 optimizer """adam""" +486 1 training_loop """owa""" +486 1 negative_sampler """basic""" +486 1 evaluator """rankbased""" +486 2 dataset """kinships""" +486 2 model """proje""" +486 2 loss """softplus""" +486 2 regularizer """no""" +486 2 optimizer """adam""" +486 2 training_loop """owa""" +486 2 negative_sampler """basic""" +486 2 evaluator """rankbased""" +486 3 dataset """kinships""" +486 3 model """proje""" +486 3 loss """softplus""" +486 3 regularizer """no""" +486 3 optimizer """adam""" +486 3 training_loop """owa""" +486 3 negative_sampler """basic""" +486 3 evaluator """rankbased""" +486 4 dataset """kinships""" +486 4 model """proje""" +486 4 loss """softplus""" +486 4 regularizer """no""" +486 4 optimizer """adam""" +486 4 training_loop """owa""" +486 4 negative_sampler """basic""" +486 4 evaluator """rankbased""" +486 5 dataset """kinships""" +486 5 model """proje""" +486 5 loss """softplus""" +486 5 regularizer """no""" +486 5 optimizer """adam""" +486 5 training_loop """owa""" +486 5 negative_sampler """basic""" +486 5 evaluator """rankbased""" +486 6 dataset """kinships""" +486 6 model """proje""" +486 6 loss """softplus""" +486 6 regularizer """no""" +486 6 optimizer """adam""" +486 6 training_loop """owa""" +486 6 negative_sampler """basic""" +486 6 evaluator """rankbased""" +486 7 dataset """kinships""" +486 7 model """proje""" +486 7 loss """softplus""" +486 7 regularizer """no""" +486 7 optimizer """adam""" +486 7 training_loop """owa""" +486 7 negative_sampler """basic""" +486 7 evaluator """rankbased""" +486 8 dataset """kinships""" +486 8 model """proje""" +486 8 loss """softplus""" +486 8 regularizer """no""" +486 8 optimizer """adam""" +486 8 training_loop """owa""" +486 8 negative_sampler """basic""" +486 8 evaluator """rankbased""" +486 9 dataset """kinships""" +486 9 model """proje""" +486 9 loss """softplus""" +486 9 regularizer """no""" +486 9 optimizer """adam""" +486 9 training_loop """owa""" +486 9 negative_sampler """basic""" +486 9 evaluator """rankbased""" +486 10 dataset """kinships""" +486 10 model """proje""" +486 10 loss """softplus""" +486 10 regularizer """no""" +486 10 optimizer """adam""" +486 10 training_loop """owa""" +486 10 negative_sampler """basic""" +486 10 evaluator """rankbased""" +486 11 dataset """kinships""" +486 11 model """proje""" +486 11 loss """softplus""" +486 11 regularizer """no""" +486 11 optimizer """adam""" +486 11 training_loop """owa""" +486 11 negative_sampler """basic""" +486 11 evaluator """rankbased""" +486 12 dataset """kinships""" +486 12 model """proje""" +486 12 loss """softplus""" +486 12 regularizer """no""" +486 12 optimizer """adam""" +486 12 training_loop """owa""" +486 12 negative_sampler """basic""" +486 12 evaluator """rankbased""" +486 13 dataset """kinships""" +486 13 model """proje""" +486 13 loss """softplus""" +486 13 regularizer """no""" +486 13 optimizer """adam""" +486 13 training_loop """owa""" +486 13 negative_sampler """basic""" +486 13 evaluator """rankbased""" +486 14 dataset """kinships""" +486 14 model """proje""" +486 14 loss """softplus""" +486 14 regularizer """no""" +486 14 optimizer """adam""" +486 14 training_loop """owa""" +486 14 negative_sampler """basic""" +486 14 evaluator """rankbased""" +486 15 dataset """kinships""" +486 15 model """proje""" +486 15 loss """softplus""" +486 15 regularizer """no""" +486 15 optimizer """adam""" +486 15 training_loop """owa""" +486 15 negative_sampler """basic""" +486 15 evaluator """rankbased""" +486 16 dataset """kinships""" +486 16 model """proje""" +486 16 loss """softplus""" +486 16 regularizer """no""" +486 16 optimizer """adam""" +486 16 training_loop """owa""" +486 16 negative_sampler """basic""" +486 16 evaluator """rankbased""" +486 17 dataset """kinships""" +486 17 model """proje""" +486 17 loss """softplus""" +486 17 regularizer """no""" +486 17 optimizer """adam""" +486 17 training_loop """owa""" +486 17 negative_sampler """basic""" +486 17 evaluator """rankbased""" +486 18 dataset """kinships""" +486 18 model """proje""" +486 18 loss """softplus""" +486 18 regularizer """no""" +486 18 optimizer """adam""" +486 18 training_loop """owa""" +486 18 negative_sampler """basic""" +486 18 evaluator """rankbased""" +486 19 dataset """kinships""" +486 19 model """proje""" +486 19 loss """softplus""" +486 19 regularizer """no""" +486 19 optimizer """adam""" +486 19 training_loop """owa""" +486 19 negative_sampler """basic""" +486 19 evaluator """rankbased""" +486 20 dataset """kinships""" +486 20 model """proje""" +486 20 loss """softplus""" +486 20 regularizer """no""" +486 20 optimizer """adam""" +486 20 training_loop """owa""" +486 20 negative_sampler """basic""" +486 20 evaluator """rankbased""" +486 21 dataset """kinships""" +486 21 model """proje""" +486 21 loss """softplus""" +486 21 regularizer """no""" +486 21 optimizer """adam""" +486 21 training_loop """owa""" +486 21 negative_sampler """basic""" +486 21 evaluator """rankbased""" +486 22 dataset """kinships""" +486 22 model """proje""" +486 22 loss """softplus""" +486 22 regularizer """no""" +486 22 optimizer """adam""" +486 22 training_loop """owa""" +486 22 negative_sampler """basic""" +486 22 evaluator """rankbased""" +486 23 dataset """kinships""" +486 23 model """proje""" +486 23 loss """softplus""" +486 23 regularizer """no""" +486 23 optimizer """adam""" +486 23 training_loop """owa""" +486 23 negative_sampler """basic""" +486 23 evaluator """rankbased""" +486 24 dataset """kinships""" +486 24 model """proje""" +486 24 loss """softplus""" +486 24 regularizer """no""" +486 24 optimizer """adam""" +486 24 training_loop """owa""" +486 24 negative_sampler """basic""" +486 24 evaluator """rankbased""" +486 25 dataset """kinships""" +486 25 model """proje""" +486 25 loss """softplus""" +486 25 regularizer """no""" +486 25 optimizer """adam""" +486 25 training_loop """owa""" +486 25 negative_sampler """basic""" +486 25 evaluator """rankbased""" +486 26 dataset """kinships""" +486 26 model """proje""" +486 26 loss """softplus""" +486 26 regularizer """no""" +486 26 optimizer """adam""" +486 26 training_loop """owa""" +486 26 negative_sampler """basic""" +486 26 evaluator """rankbased""" +486 27 dataset """kinships""" +486 27 model """proje""" +486 27 loss """softplus""" +486 27 regularizer """no""" +486 27 optimizer """adam""" +486 27 training_loop """owa""" +486 27 negative_sampler """basic""" +486 27 evaluator """rankbased""" +486 28 dataset """kinships""" +486 28 model """proje""" +486 28 loss """softplus""" +486 28 regularizer """no""" +486 28 optimizer """adam""" +486 28 training_loop """owa""" +486 28 negative_sampler """basic""" +486 28 evaluator """rankbased""" +486 29 dataset """kinships""" +486 29 model """proje""" +486 29 loss """softplus""" +486 29 regularizer """no""" +486 29 optimizer """adam""" +486 29 training_loop """owa""" +486 29 negative_sampler """basic""" +486 29 evaluator """rankbased""" +486 30 dataset """kinships""" +486 30 model """proje""" +486 30 loss """softplus""" +486 30 regularizer """no""" +486 30 optimizer """adam""" +486 30 training_loop """owa""" +486 30 negative_sampler """basic""" +486 30 evaluator """rankbased""" +486 31 dataset """kinships""" +486 31 model """proje""" +486 31 loss """softplus""" +486 31 regularizer """no""" +486 31 optimizer """adam""" +486 31 training_loop """owa""" +486 31 negative_sampler """basic""" +486 31 evaluator """rankbased""" +486 32 dataset """kinships""" +486 32 model """proje""" +486 32 loss """softplus""" +486 32 regularizer """no""" +486 32 optimizer """adam""" +486 32 training_loop """owa""" +486 32 negative_sampler """basic""" +486 32 evaluator """rankbased""" +486 33 dataset """kinships""" +486 33 model """proje""" +486 33 loss """softplus""" +486 33 regularizer """no""" +486 33 optimizer """adam""" +486 33 training_loop """owa""" +486 33 negative_sampler """basic""" +486 33 evaluator """rankbased""" +486 34 dataset """kinships""" +486 34 model """proje""" +486 34 loss """softplus""" +486 34 regularizer """no""" +486 34 optimizer """adam""" +486 34 training_loop """owa""" +486 34 negative_sampler """basic""" +486 34 evaluator """rankbased""" +486 35 dataset """kinships""" +486 35 model """proje""" +486 35 loss """softplus""" +486 35 regularizer """no""" +486 35 optimizer """adam""" +486 35 training_loop """owa""" +486 35 negative_sampler """basic""" +486 35 evaluator """rankbased""" +486 36 dataset """kinships""" +486 36 model """proje""" +486 36 loss """softplus""" +486 36 regularizer """no""" +486 36 optimizer """adam""" +486 36 training_loop """owa""" +486 36 negative_sampler """basic""" +486 36 evaluator """rankbased""" +486 37 dataset """kinships""" +486 37 model """proje""" +486 37 loss """softplus""" +486 37 regularizer """no""" +486 37 optimizer """adam""" +486 37 training_loop """owa""" +486 37 negative_sampler """basic""" +486 37 evaluator """rankbased""" +486 38 dataset """kinships""" +486 38 model """proje""" +486 38 loss """softplus""" +486 38 regularizer """no""" +486 38 optimizer """adam""" +486 38 training_loop """owa""" +486 38 negative_sampler """basic""" +486 38 evaluator """rankbased""" +486 39 dataset """kinships""" +486 39 model """proje""" +486 39 loss """softplus""" +486 39 regularizer """no""" +486 39 optimizer """adam""" +486 39 training_loop """owa""" +486 39 negative_sampler """basic""" +486 39 evaluator """rankbased""" +486 40 dataset """kinships""" +486 40 model """proje""" +486 40 loss """softplus""" +486 40 regularizer """no""" +486 40 optimizer """adam""" +486 40 training_loop """owa""" +486 40 negative_sampler """basic""" +486 40 evaluator """rankbased""" +486 41 dataset """kinships""" +486 41 model """proje""" +486 41 loss """softplus""" +486 41 regularizer """no""" +486 41 optimizer """adam""" +486 41 training_loop """owa""" +486 41 negative_sampler """basic""" +486 41 evaluator """rankbased""" +486 42 dataset """kinships""" +486 42 model """proje""" +486 42 loss """softplus""" +486 42 regularizer """no""" +486 42 optimizer """adam""" +486 42 training_loop """owa""" +486 42 negative_sampler """basic""" +486 42 evaluator """rankbased""" +486 43 dataset """kinships""" +486 43 model """proje""" +486 43 loss """softplus""" +486 43 regularizer """no""" +486 43 optimizer """adam""" +486 43 training_loop """owa""" +486 43 negative_sampler """basic""" +486 43 evaluator """rankbased""" +486 44 dataset """kinships""" +486 44 model """proje""" +486 44 loss """softplus""" +486 44 regularizer """no""" +486 44 optimizer """adam""" +486 44 training_loop """owa""" +486 44 negative_sampler """basic""" +486 44 evaluator """rankbased""" +486 45 dataset """kinships""" +486 45 model """proje""" +486 45 loss """softplus""" +486 45 regularizer """no""" +486 45 optimizer """adam""" +486 45 training_loop """owa""" +486 45 negative_sampler """basic""" +486 45 evaluator """rankbased""" +486 46 dataset """kinships""" +486 46 model """proje""" +486 46 loss """softplus""" +486 46 regularizer """no""" +486 46 optimizer """adam""" +486 46 training_loop """owa""" +486 46 negative_sampler """basic""" +486 46 evaluator """rankbased""" +486 47 dataset """kinships""" +486 47 model """proje""" +486 47 loss """softplus""" +486 47 regularizer """no""" +486 47 optimizer """adam""" +486 47 training_loop """owa""" +486 47 negative_sampler """basic""" +486 47 evaluator """rankbased""" +486 48 dataset """kinships""" +486 48 model """proje""" +486 48 loss """softplus""" +486 48 regularizer """no""" +486 48 optimizer """adam""" +486 48 training_loop """owa""" +486 48 negative_sampler """basic""" +486 48 evaluator """rankbased""" +486 49 dataset """kinships""" +486 49 model """proje""" +486 49 loss """softplus""" +486 49 regularizer """no""" +486 49 optimizer """adam""" +486 49 training_loop """owa""" +486 49 negative_sampler """basic""" +486 49 evaluator """rankbased""" +486 50 dataset """kinships""" +486 50 model """proje""" +486 50 loss """softplus""" +486 50 regularizer """no""" +486 50 optimizer """adam""" +486 50 training_loop """owa""" +486 50 negative_sampler """basic""" +486 50 evaluator """rankbased""" +486 51 dataset """kinships""" +486 51 model """proje""" +486 51 loss """softplus""" +486 51 regularizer """no""" +486 51 optimizer """adam""" +486 51 training_loop """owa""" +486 51 negative_sampler """basic""" +486 51 evaluator """rankbased""" +486 52 dataset """kinships""" +486 52 model """proje""" +486 52 loss """softplus""" +486 52 regularizer """no""" +486 52 optimizer """adam""" +486 52 training_loop """owa""" +486 52 negative_sampler """basic""" +486 52 evaluator """rankbased""" +486 53 dataset """kinships""" +486 53 model """proje""" +486 53 loss """softplus""" +486 53 regularizer """no""" +486 53 optimizer """adam""" +486 53 training_loop """owa""" +486 53 negative_sampler """basic""" +486 53 evaluator """rankbased""" +486 54 dataset """kinships""" +486 54 model """proje""" +486 54 loss """softplus""" +486 54 regularizer """no""" +486 54 optimizer """adam""" +486 54 training_loop """owa""" +486 54 negative_sampler """basic""" +486 54 evaluator """rankbased""" +486 55 dataset """kinships""" +486 55 model """proje""" +486 55 loss """softplus""" +486 55 regularizer """no""" +486 55 optimizer """adam""" +486 55 training_loop """owa""" +486 55 negative_sampler """basic""" +486 55 evaluator """rankbased""" +486 56 dataset """kinships""" +486 56 model """proje""" +486 56 loss """softplus""" +486 56 regularizer """no""" +486 56 optimizer """adam""" +486 56 training_loop """owa""" +486 56 negative_sampler """basic""" +486 56 evaluator """rankbased""" +486 57 dataset """kinships""" +486 57 model """proje""" +486 57 loss """softplus""" +486 57 regularizer """no""" +486 57 optimizer """adam""" +486 57 training_loop """owa""" +486 57 negative_sampler """basic""" +486 57 evaluator """rankbased""" +486 58 dataset """kinships""" +486 58 model """proje""" +486 58 loss """softplus""" +486 58 regularizer """no""" +486 58 optimizer """adam""" +486 58 training_loop """owa""" +486 58 negative_sampler """basic""" +486 58 evaluator """rankbased""" +486 59 dataset """kinships""" +486 59 model """proje""" +486 59 loss """softplus""" +486 59 regularizer """no""" +486 59 optimizer """adam""" +486 59 training_loop """owa""" +486 59 negative_sampler """basic""" +486 59 evaluator """rankbased""" +486 60 dataset """kinships""" +486 60 model """proje""" +486 60 loss """softplus""" +486 60 regularizer """no""" +486 60 optimizer """adam""" +486 60 training_loop """owa""" +486 60 negative_sampler """basic""" +486 60 evaluator """rankbased""" +486 61 dataset """kinships""" +486 61 model """proje""" +486 61 loss """softplus""" +486 61 regularizer """no""" +486 61 optimizer """adam""" +486 61 training_loop """owa""" +486 61 negative_sampler """basic""" +486 61 evaluator """rankbased""" +486 62 dataset """kinships""" +486 62 model """proje""" +486 62 loss """softplus""" +486 62 regularizer """no""" +486 62 optimizer """adam""" +486 62 training_loop """owa""" +486 62 negative_sampler """basic""" +486 62 evaluator """rankbased""" +486 63 dataset """kinships""" +486 63 model """proje""" +486 63 loss """softplus""" +486 63 regularizer """no""" +486 63 optimizer """adam""" +486 63 training_loop """owa""" +486 63 negative_sampler """basic""" +486 63 evaluator """rankbased""" +486 64 dataset """kinships""" +486 64 model """proje""" +486 64 loss """softplus""" +486 64 regularizer """no""" +486 64 optimizer """adam""" +486 64 training_loop """owa""" +486 64 negative_sampler """basic""" +486 64 evaluator """rankbased""" +486 65 dataset """kinships""" +486 65 model """proje""" +486 65 loss """softplus""" +486 65 regularizer """no""" +486 65 optimizer """adam""" +486 65 training_loop """owa""" +486 65 negative_sampler """basic""" +486 65 evaluator """rankbased""" +486 66 dataset """kinships""" +486 66 model """proje""" +486 66 loss """softplus""" +486 66 regularizer """no""" +486 66 optimizer """adam""" +486 66 training_loop """owa""" +486 66 negative_sampler """basic""" +486 66 evaluator """rankbased""" +486 67 dataset """kinships""" +486 67 model """proje""" +486 67 loss """softplus""" +486 67 regularizer """no""" +486 67 optimizer """adam""" +486 67 training_loop """owa""" +486 67 negative_sampler """basic""" +486 67 evaluator """rankbased""" +486 68 dataset """kinships""" +486 68 model """proje""" +486 68 loss """softplus""" +486 68 regularizer """no""" +486 68 optimizer """adam""" +486 68 training_loop """owa""" +486 68 negative_sampler """basic""" +486 68 evaluator """rankbased""" +486 69 dataset """kinships""" +486 69 model """proje""" +486 69 loss """softplus""" +486 69 regularizer """no""" +486 69 optimizer """adam""" +486 69 training_loop """owa""" +486 69 negative_sampler """basic""" +486 69 evaluator """rankbased""" +486 70 dataset """kinships""" +486 70 model """proje""" +486 70 loss """softplus""" +486 70 regularizer """no""" +486 70 optimizer """adam""" +486 70 training_loop """owa""" +486 70 negative_sampler """basic""" +486 70 evaluator """rankbased""" +486 71 dataset """kinships""" +486 71 model """proje""" +486 71 loss """softplus""" +486 71 regularizer """no""" +486 71 optimizer """adam""" +486 71 training_loop """owa""" +486 71 negative_sampler """basic""" +486 71 evaluator """rankbased""" +486 72 dataset """kinships""" +486 72 model """proje""" +486 72 loss """softplus""" +486 72 regularizer """no""" +486 72 optimizer """adam""" +486 72 training_loop """owa""" +486 72 negative_sampler """basic""" +486 72 evaluator """rankbased""" +486 73 dataset """kinships""" +486 73 model """proje""" +486 73 loss """softplus""" +486 73 regularizer """no""" +486 73 optimizer """adam""" +486 73 training_loop """owa""" +486 73 negative_sampler """basic""" +486 73 evaluator """rankbased""" +486 74 dataset """kinships""" +486 74 model """proje""" +486 74 loss """softplus""" +486 74 regularizer """no""" +486 74 optimizer """adam""" +486 74 training_loop """owa""" +486 74 negative_sampler """basic""" +486 74 evaluator """rankbased""" +486 75 dataset """kinships""" +486 75 model """proje""" +486 75 loss """softplus""" +486 75 regularizer """no""" +486 75 optimizer """adam""" +486 75 training_loop """owa""" +486 75 negative_sampler """basic""" +486 75 evaluator """rankbased""" +486 76 dataset """kinships""" +486 76 model """proje""" +486 76 loss """softplus""" +486 76 regularizer """no""" +486 76 optimizer """adam""" +486 76 training_loop """owa""" +486 76 negative_sampler """basic""" +486 76 evaluator """rankbased""" +486 77 dataset """kinships""" +486 77 model """proje""" +486 77 loss """softplus""" +486 77 regularizer """no""" +486 77 optimizer """adam""" +486 77 training_loop """owa""" +486 77 negative_sampler """basic""" +486 77 evaluator """rankbased""" +486 78 dataset """kinships""" +486 78 model """proje""" +486 78 loss """softplus""" +486 78 regularizer """no""" +486 78 optimizer """adam""" +486 78 training_loop """owa""" +486 78 negative_sampler """basic""" +486 78 evaluator """rankbased""" +486 79 dataset """kinships""" +486 79 model """proje""" +486 79 loss """softplus""" +486 79 regularizer """no""" +486 79 optimizer """adam""" +486 79 training_loop """owa""" +486 79 negative_sampler """basic""" +486 79 evaluator """rankbased""" +486 80 dataset """kinships""" +486 80 model """proje""" +486 80 loss """softplus""" +486 80 regularizer """no""" +486 80 optimizer """adam""" +486 80 training_loop """owa""" +486 80 negative_sampler """basic""" +486 80 evaluator """rankbased""" +486 81 dataset """kinships""" +486 81 model """proje""" +486 81 loss """softplus""" +486 81 regularizer """no""" +486 81 optimizer """adam""" +486 81 training_loop """owa""" +486 81 negative_sampler """basic""" +486 81 evaluator """rankbased""" +486 82 dataset """kinships""" +486 82 model """proje""" +486 82 loss """softplus""" +486 82 regularizer """no""" +486 82 optimizer """adam""" +486 82 training_loop """owa""" +486 82 negative_sampler """basic""" +486 82 evaluator """rankbased""" +486 83 dataset """kinships""" +486 83 model """proje""" +486 83 loss """softplus""" +486 83 regularizer """no""" +486 83 optimizer """adam""" +486 83 training_loop """owa""" +486 83 negative_sampler """basic""" +486 83 evaluator """rankbased""" +486 84 dataset """kinships""" +486 84 model """proje""" +486 84 loss """softplus""" +486 84 regularizer """no""" +486 84 optimizer """adam""" +486 84 training_loop """owa""" +486 84 negative_sampler """basic""" +486 84 evaluator """rankbased""" +486 85 dataset """kinships""" +486 85 model """proje""" +486 85 loss """softplus""" +486 85 regularizer """no""" +486 85 optimizer """adam""" +486 85 training_loop """owa""" +486 85 negative_sampler """basic""" +486 85 evaluator """rankbased""" +486 86 dataset """kinships""" +486 86 model """proje""" +486 86 loss """softplus""" +486 86 regularizer """no""" +486 86 optimizer """adam""" +486 86 training_loop """owa""" +486 86 negative_sampler """basic""" +486 86 evaluator """rankbased""" +486 87 dataset """kinships""" +486 87 model """proje""" +486 87 loss """softplus""" +486 87 regularizer """no""" +486 87 optimizer """adam""" +486 87 training_loop """owa""" +486 87 negative_sampler """basic""" +486 87 evaluator """rankbased""" +486 88 dataset """kinships""" +486 88 model """proje""" +486 88 loss """softplus""" +486 88 regularizer """no""" +486 88 optimizer """adam""" +486 88 training_loop """owa""" +486 88 negative_sampler """basic""" +486 88 evaluator """rankbased""" +486 89 dataset """kinships""" +486 89 model """proje""" +486 89 loss """softplus""" +486 89 regularizer """no""" +486 89 optimizer """adam""" +486 89 training_loop """owa""" +486 89 negative_sampler """basic""" +486 89 evaluator """rankbased""" +486 90 dataset """kinships""" +486 90 model """proje""" +486 90 loss """softplus""" +486 90 regularizer """no""" +486 90 optimizer """adam""" +486 90 training_loop """owa""" +486 90 negative_sampler """basic""" +486 90 evaluator """rankbased""" +486 91 dataset """kinships""" +486 91 model """proje""" +486 91 loss """softplus""" +486 91 regularizer """no""" +486 91 optimizer """adam""" +486 91 training_loop """owa""" +486 91 negative_sampler """basic""" +486 91 evaluator """rankbased""" +486 92 dataset """kinships""" +486 92 model """proje""" +486 92 loss """softplus""" +486 92 regularizer """no""" +486 92 optimizer """adam""" +486 92 training_loop """owa""" +486 92 negative_sampler """basic""" +486 92 evaluator """rankbased""" +486 93 dataset """kinships""" +486 93 model """proje""" +486 93 loss """softplus""" +486 93 regularizer """no""" +486 93 optimizer """adam""" +486 93 training_loop """owa""" +486 93 negative_sampler """basic""" +486 93 evaluator """rankbased""" +486 94 dataset """kinships""" +486 94 model """proje""" +486 94 loss """softplus""" +486 94 regularizer """no""" +486 94 optimizer """adam""" +486 94 training_loop """owa""" +486 94 negative_sampler """basic""" +486 94 evaluator """rankbased""" +486 95 dataset """kinships""" +486 95 model """proje""" +486 95 loss """softplus""" +486 95 regularizer """no""" +486 95 optimizer """adam""" +486 95 training_loop """owa""" +486 95 negative_sampler """basic""" +486 95 evaluator """rankbased""" +486 96 dataset """kinships""" +486 96 model """proje""" +486 96 loss """softplus""" +486 96 regularizer """no""" +486 96 optimizer """adam""" +486 96 training_loop """owa""" +486 96 negative_sampler """basic""" +486 96 evaluator """rankbased""" +486 97 dataset """kinships""" +486 97 model """proje""" +486 97 loss """softplus""" +486 97 regularizer """no""" +486 97 optimizer """adam""" +486 97 training_loop """owa""" +486 97 negative_sampler """basic""" +486 97 evaluator """rankbased""" +486 98 dataset """kinships""" +486 98 model """proje""" +486 98 loss """softplus""" +486 98 regularizer """no""" +486 98 optimizer """adam""" +486 98 training_loop """owa""" +486 98 negative_sampler """basic""" +486 98 evaluator """rankbased""" +486 99 dataset """kinships""" +486 99 model """proje""" +486 99 loss """softplus""" +486 99 regularizer """no""" +486 99 optimizer """adam""" +486 99 training_loop """owa""" +486 99 negative_sampler """basic""" +486 99 evaluator """rankbased""" +486 100 dataset """kinships""" +486 100 model """proje""" +486 100 loss """softplus""" +486 100 regularizer """no""" +486 100 optimizer """adam""" +486 100 training_loop """owa""" +486 100 negative_sampler """basic""" +486 100 evaluator """rankbased""" +487 1 model.embedding_dim 2.0 +487 1 optimizer.lr 0.06868658791231687 +487 1 negative_sampler.num_negs_per_pos 53.0 +487 1 training.batch_size 1.0 +487 2 model.embedding_dim 1.0 +487 2 optimizer.lr 0.042756757469471836 +487 2 negative_sampler.num_negs_per_pos 95.0 +487 2 training.batch_size 1.0 +487 3 model.embedding_dim 2.0 +487 3 optimizer.lr 0.01518904912655843 +487 3 negative_sampler.num_negs_per_pos 1.0 +487 3 training.batch_size 2.0 +487 4 model.embedding_dim 2.0 +487 4 optimizer.lr 0.014770775612286475 +487 4 negative_sampler.num_negs_per_pos 47.0 +487 4 training.batch_size 0.0 +487 5 model.embedding_dim 0.0 +487 5 optimizer.lr 0.06473558724910534 +487 5 negative_sampler.num_negs_per_pos 54.0 +487 5 training.batch_size 1.0 +487 6 model.embedding_dim 0.0 +487 6 optimizer.lr 0.0012704707923285716 +487 6 negative_sampler.num_negs_per_pos 24.0 +487 6 training.batch_size 0.0 +487 7 model.embedding_dim 2.0 +487 7 optimizer.lr 0.01882030594692207 +487 7 negative_sampler.num_negs_per_pos 56.0 +487 7 training.batch_size 0.0 +487 8 model.embedding_dim 1.0 +487 8 optimizer.lr 0.013081138090233955 +487 8 negative_sampler.num_negs_per_pos 61.0 +487 8 training.batch_size 0.0 +487 9 model.embedding_dim 2.0 +487 9 optimizer.lr 0.01158210800639633 +487 9 negative_sampler.num_negs_per_pos 76.0 +487 9 training.batch_size 2.0 +487 10 model.embedding_dim 1.0 +487 10 optimizer.lr 0.002098109939722676 +487 10 negative_sampler.num_negs_per_pos 27.0 +487 10 training.batch_size 1.0 +487 11 model.embedding_dim 0.0 +487 11 optimizer.lr 0.08124473880574998 +487 11 negative_sampler.num_negs_per_pos 10.0 +487 11 training.batch_size 1.0 +487 12 model.embedding_dim 2.0 +487 12 optimizer.lr 0.0010235150329668456 +487 12 negative_sampler.num_negs_per_pos 83.0 +487 12 training.batch_size 2.0 +487 13 model.embedding_dim 2.0 +487 13 optimizer.lr 0.0010862035440446565 +487 13 negative_sampler.num_negs_per_pos 64.0 +487 13 training.batch_size 1.0 +487 14 model.embedding_dim 1.0 +487 14 optimizer.lr 0.02359158138395999 +487 14 negative_sampler.num_negs_per_pos 56.0 +487 14 training.batch_size 0.0 +487 15 model.embedding_dim 1.0 +487 15 optimizer.lr 0.03455656222243381 +487 15 negative_sampler.num_negs_per_pos 61.0 +487 15 training.batch_size 2.0 +487 16 model.embedding_dim 0.0 +487 16 optimizer.lr 0.008988268575985127 +487 16 negative_sampler.num_negs_per_pos 17.0 +487 16 training.batch_size 1.0 +487 17 model.embedding_dim 2.0 +487 17 optimizer.lr 0.010001012860297271 +487 17 negative_sampler.num_negs_per_pos 80.0 +487 17 training.batch_size 2.0 +487 18 model.embedding_dim 2.0 +487 18 optimizer.lr 0.0380855347562163 +487 18 negative_sampler.num_negs_per_pos 42.0 +487 18 training.batch_size 1.0 +487 19 model.embedding_dim 0.0 +487 19 optimizer.lr 0.004343126070285255 +487 19 negative_sampler.num_negs_per_pos 92.0 +487 19 training.batch_size 1.0 +487 20 model.embedding_dim 2.0 +487 20 optimizer.lr 0.0029281022035228233 +487 20 negative_sampler.num_negs_per_pos 23.0 +487 20 training.batch_size 2.0 +487 21 model.embedding_dim 0.0 +487 21 optimizer.lr 0.0026054633467739213 +487 21 negative_sampler.num_negs_per_pos 14.0 +487 21 training.batch_size 1.0 +487 22 model.embedding_dim 0.0 +487 22 optimizer.lr 0.021026599410016937 +487 22 negative_sampler.num_negs_per_pos 21.0 +487 22 training.batch_size 2.0 +487 23 model.embedding_dim 2.0 +487 23 optimizer.lr 0.02461734300574323 +487 23 negative_sampler.num_negs_per_pos 55.0 +487 23 training.batch_size 2.0 +487 24 model.embedding_dim 2.0 +487 24 optimizer.lr 0.001190238203138798 +487 24 negative_sampler.num_negs_per_pos 12.0 +487 24 training.batch_size 2.0 +487 25 model.embedding_dim 0.0 +487 25 optimizer.lr 0.057684122270303735 +487 25 negative_sampler.num_negs_per_pos 89.0 +487 25 training.batch_size 0.0 +487 26 model.embedding_dim 2.0 +487 26 optimizer.lr 0.05152207293461484 +487 26 negative_sampler.num_negs_per_pos 80.0 +487 26 training.batch_size 0.0 +487 27 model.embedding_dim 2.0 +487 27 optimizer.lr 0.025871960020493266 +487 27 negative_sampler.num_negs_per_pos 76.0 +487 27 training.batch_size 1.0 +487 28 model.embedding_dim 0.0 +487 28 optimizer.lr 0.004308049031542257 +487 28 negative_sampler.num_negs_per_pos 70.0 +487 28 training.batch_size 2.0 +487 29 model.embedding_dim 0.0 +487 29 optimizer.lr 0.030045452643258135 +487 29 negative_sampler.num_negs_per_pos 52.0 +487 29 training.batch_size 2.0 +487 30 model.embedding_dim 0.0 +487 30 optimizer.lr 0.003745501718095675 +487 30 negative_sampler.num_negs_per_pos 50.0 +487 30 training.batch_size 0.0 +487 31 model.embedding_dim 2.0 +487 31 optimizer.lr 0.09556772547003392 +487 31 negative_sampler.num_negs_per_pos 73.0 +487 31 training.batch_size 1.0 +487 32 model.embedding_dim 2.0 +487 32 optimizer.lr 0.0031338154985693263 +487 32 negative_sampler.num_negs_per_pos 73.0 +487 32 training.batch_size 0.0 +487 33 model.embedding_dim 2.0 +487 33 optimizer.lr 0.002716477514015927 +487 33 negative_sampler.num_negs_per_pos 75.0 +487 33 training.batch_size 2.0 +487 34 model.embedding_dim 0.0 +487 34 optimizer.lr 0.005913112050161391 +487 34 negative_sampler.num_negs_per_pos 87.0 +487 34 training.batch_size 1.0 +487 35 model.embedding_dim 0.0 +487 35 optimizer.lr 0.0014055977189070062 +487 35 negative_sampler.num_negs_per_pos 26.0 +487 35 training.batch_size 0.0 +487 36 model.embedding_dim 1.0 +487 36 optimizer.lr 0.006306687193722848 +487 36 negative_sampler.num_negs_per_pos 79.0 +487 36 training.batch_size 2.0 +487 37 model.embedding_dim 1.0 +487 37 optimizer.lr 0.014184603374373028 +487 37 negative_sampler.num_negs_per_pos 50.0 +487 37 training.batch_size 2.0 +487 38 model.embedding_dim 2.0 +487 38 optimizer.lr 0.00758751736113948 +487 38 negative_sampler.num_negs_per_pos 68.0 +487 38 training.batch_size 1.0 +487 39 model.embedding_dim 1.0 +487 39 optimizer.lr 0.05618323406296323 +487 39 negative_sampler.num_negs_per_pos 39.0 +487 39 training.batch_size 1.0 +487 40 model.embedding_dim 1.0 +487 40 optimizer.lr 0.07441975081363303 +487 40 negative_sampler.num_negs_per_pos 90.0 +487 40 training.batch_size 0.0 +487 41 model.embedding_dim 2.0 +487 41 optimizer.lr 0.0013465807317974909 +487 41 negative_sampler.num_negs_per_pos 62.0 +487 41 training.batch_size 0.0 +487 42 model.embedding_dim 0.0 +487 42 optimizer.lr 0.004962441543983169 +487 42 negative_sampler.num_negs_per_pos 54.0 +487 42 training.batch_size 2.0 +487 43 model.embedding_dim 2.0 +487 43 optimizer.lr 0.022625080584365263 +487 43 negative_sampler.num_negs_per_pos 40.0 +487 43 training.batch_size 1.0 +487 44 model.embedding_dim 0.0 +487 44 optimizer.lr 0.003251606029077736 +487 44 negative_sampler.num_negs_per_pos 21.0 +487 44 training.batch_size 1.0 +487 45 model.embedding_dim 1.0 +487 45 optimizer.lr 0.04097956051773358 +487 45 negative_sampler.num_negs_per_pos 26.0 +487 45 training.batch_size 2.0 +487 46 model.embedding_dim 0.0 +487 46 optimizer.lr 0.0447248662519591 +487 46 negative_sampler.num_negs_per_pos 95.0 +487 46 training.batch_size 1.0 +487 47 model.embedding_dim 2.0 +487 47 optimizer.lr 0.09588315033084506 +487 47 negative_sampler.num_negs_per_pos 30.0 +487 47 training.batch_size 2.0 +487 48 model.embedding_dim 0.0 +487 48 optimizer.lr 0.024090641475296617 +487 48 negative_sampler.num_negs_per_pos 43.0 +487 48 training.batch_size 1.0 +487 49 model.embedding_dim 2.0 +487 49 optimizer.lr 0.0010516085237884303 +487 49 negative_sampler.num_negs_per_pos 47.0 +487 49 training.batch_size 2.0 +487 50 model.embedding_dim 2.0 +487 50 optimizer.lr 0.003439469356148355 +487 50 negative_sampler.num_negs_per_pos 64.0 +487 50 training.batch_size 1.0 +487 51 model.embedding_dim 2.0 +487 51 optimizer.lr 0.0010975664982621968 +487 51 negative_sampler.num_negs_per_pos 63.0 +487 51 training.batch_size 1.0 +487 52 model.embedding_dim 1.0 +487 52 optimizer.lr 0.00549374959144844 +487 52 negative_sampler.num_negs_per_pos 46.0 +487 52 training.batch_size 0.0 +487 53 model.embedding_dim 2.0 +487 53 optimizer.lr 0.002802497761400664 +487 53 negative_sampler.num_negs_per_pos 24.0 +487 53 training.batch_size 1.0 +487 54 model.embedding_dim 1.0 +487 54 optimizer.lr 0.09239343090387087 +487 54 negative_sampler.num_negs_per_pos 39.0 +487 54 training.batch_size 0.0 +487 55 model.embedding_dim 1.0 +487 55 optimizer.lr 0.006488615991443324 +487 55 negative_sampler.num_negs_per_pos 41.0 +487 55 training.batch_size 2.0 +487 56 model.embedding_dim 1.0 +487 56 optimizer.lr 0.006559469467646158 +487 56 negative_sampler.num_negs_per_pos 19.0 +487 56 training.batch_size 0.0 +487 57 model.embedding_dim 2.0 +487 57 optimizer.lr 0.005589422960018584 +487 57 negative_sampler.num_negs_per_pos 48.0 +487 57 training.batch_size 2.0 +487 58 model.embedding_dim 0.0 +487 58 optimizer.lr 0.0037279879449089266 +487 58 negative_sampler.num_negs_per_pos 92.0 +487 58 training.batch_size 1.0 +487 59 model.embedding_dim 0.0 +487 59 optimizer.lr 0.08296417506453153 +487 59 negative_sampler.num_negs_per_pos 37.0 +487 59 training.batch_size 2.0 +487 60 model.embedding_dim 1.0 +487 60 optimizer.lr 0.032923207573534646 +487 60 negative_sampler.num_negs_per_pos 31.0 +487 60 training.batch_size 2.0 +487 61 model.embedding_dim 1.0 +487 61 optimizer.lr 0.004091864658887093 +487 61 negative_sampler.num_negs_per_pos 83.0 +487 61 training.batch_size 0.0 +487 62 model.embedding_dim 0.0 +487 62 optimizer.lr 0.06020732875545382 +487 62 negative_sampler.num_negs_per_pos 34.0 +487 62 training.batch_size 0.0 +487 63 model.embedding_dim 1.0 +487 63 optimizer.lr 0.0021828196398818076 +487 63 negative_sampler.num_negs_per_pos 33.0 +487 63 training.batch_size 0.0 +487 64 model.embedding_dim 1.0 +487 64 optimizer.lr 0.014513937671639946 +487 64 negative_sampler.num_negs_per_pos 50.0 +487 64 training.batch_size 0.0 +487 65 model.embedding_dim 1.0 +487 65 optimizer.lr 0.004381546580593922 +487 65 negative_sampler.num_negs_per_pos 96.0 +487 65 training.batch_size 0.0 +487 66 model.embedding_dim 1.0 +487 66 optimizer.lr 0.009502684221336086 +487 66 negative_sampler.num_negs_per_pos 19.0 +487 66 training.batch_size 0.0 +487 67 model.embedding_dim 1.0 +487 67 optimizer.lr 0.07516261020819118 +487 67 negative_sampler.num_negs_per_pos 88.0 +487 67 training.batch_size 2.0 +487 68 model.embedding_dim 0.0 +487 68 optimizer.lr 0.007627470632463579 +487 68 negative_sampler.num_negs_per_pos 5.0 +487 68 training.batch_size 1.0 +487 69 model.embedding_dim 1.0 +487 69 optimizer.lr 0.01112076779254336 +487 69 negative_sampler.num_negs_per_pos 83.0 +487 69 training.batch_size 0.0 +487 70 model.embedding_dim 1.0 +487 70 optimizer.lr 0.0010625576318761274 +487 70 negative_sampler.num_negs_per_pos 5.0 +487 70 training.batch_size 0.0 +487 71 model.embedding_dim 1.0 +487 71 optimizer.lr 0.013435179870371414 +487 71 negative_sampler.num_negs_per_pos 58.0 +487 71 training.batch_size 0.0 +487 72 model.embedding_dim 2.0 +487 72 optimizer.lr 0.02536831619516814 +487 72 negative_sampler.num_negs_per_pos 54.0 +487 72 training.batch_size 1.0 +487 73 model.embedding_dim 0.0 +487 73 optimizer.lr 0.009730590363642145 +487 73 negative_sampler.num_negs_per_pos 63.0 +487 73 training.batch_size 0.0 +487 74 model.embedding_dim 1.0 +487 74 optimizer.lr 0.022978894244268796 +487 74 negative_sampler.num_negs_per_pos 36.0 +487 74 training.batch_size 1.0 +487 75 model.embedding_dim 1.0 +487 75 optimizer.lr 0.02699512345609647 +487 75 negative_sampler.num_negs_per_pos 54.0 +487 75 training.batch_size 2.0 +487 76 model.embedding_dim 1.0 +487 76 optimizer.lr 0.0031318401649950795 +487 76 negative_sampler.num_negs_per_pos 18.0 +487 76 training.batch_size 0.0 +487 77 model.embedding_dim 1.0 +487 77 optimizer.lr 0.0054608659259582516 +487 77 negative_sampler.num_negs_per_pos 87.0 +487 77 training.batch_size 0.0 +487 78 model.embedding_dim 1.0 +487 78 optimizer.lr 0.019430737109065687 +487 78 negative_sampler.num_negs_per_pos 62.0 +487 78 training.batch_size 1.0 +487 79 model.embedding_dim 2.0 +487 79 optimizer.lr 0.04391185166481091 +487 79 negative_sampler.num_negs_per_pos 11.0 +487 79 training.batch_size 0.0 +487 80 model.embedding_dim 1.0 +487 80 optimizer.lr 0.02082335623642347 +487 80 negative_sampler.num_negs_per_pos 63.0 +487 80 training.batch_size 1.0 +487 81 model.embedding_dim 2.0 +487 81 optimizer.lr 0.010236273053296907 +487 81 negative_sampler.num_negs_per_pos 99.0 +487 81 training.batch_size 0.0 +487 82 model.embedding_dim 0.0 +487 82 optimizer.lr 0.01187925236778624 +487 82 negative_sampler.num_negs_per_pos 61.0 +487 82 training.batch_size 2.0 +487 83 model.embedding_dim 1.0 +487 83 optimizer.lr 0.003340031103408346 +487 83 negative_sampler.num_negs_per_pos 61.0 +487 83 training.batch_size 0.0 +487 84 model.embedding_dim 0.0 +487 84 optimizer.lr 0.0028950446473094077 +487 84 negative_sampler.num_negs_per_pos 21.0 +487 84 training.batch_size 2.0 +487 85 model.embedding_dim 0.0 +487 85 optimizer.lr 0.002916854996993578 +487 85 negative_sampler.num_negs_per_pos 47.0 +487 85 training.batch_size 0.0 +487 86 model.embedding_dim 0.0 +487 86 optimizer.lr 0.0063988636972523594 +487 86 negative_sampler.num_negs_per_pos 73.0 +487 86 training.batch_size 2.0 +487 87 model.embedding_dim 0.0 +487 87 optimizer.lr 0.007948057889661577 +487 87 negative_sampler.num_negs_per_pos 95.0 +487 87 training.batch_size 0.0 +487 88 model.embedding_dim 2.0 +487 88 optimizer.lr 0.007610607886477003 +487 88 negative_sampler.num_negs_per_pos 12.0 +487 88 training.batch_size 1.0 +487 89 model.embedding_dim 2.0 +487 89 optimizer.lr 0.002010390761097894 +487 89 negative_sampler.num_negs_per_pos 1.0 +487 89 training.batch_size 2.0 +487 90 model.embedding_dim 1.0 +487 90 optimizer.lr 0.0031147042580425277 +487 90 negative_sampler.num_negs_per_pos 53.0 +487 90 training.batch_size 1.0 +487 91 model.embedding_dim 0.0 +487 91 optimizer.lr 0.0016473815237144209 +487 91 negative_sampler.num_negs_per_pos 35.0 +487 91 training.batch_size 2.0 +487 92 model.embedding_dim 2.0 +487 92 optimizer.lr 0.0010927629865920276 +487 92 negative_sampler.num_negs_per_pos 89.0 +487 92 training.batch_size 2.0 +487 93 model.embedding_dim 2.0 +487 93 optimizer.lr 0.001938602968336277 +487 93 negative_sampler.num_negs_per_pos 49.0 +487 93 training.batch_size 2.0 +487 94 model.embedding_dim 2.0 +487 94 optimizer.lr 0.035110011430110964 +487 94 negative_sampler.num_negs_per_pos 80.0 +487 94 training.batch_size 1.0 +487 95 model.embedding_dim 1.0 +487 95 optimizer.lr 0.008870581850840796 +487 95 negative_sampler.num_negs_per_pos 30.0 +487 95 training.batch_size 2.0 +487 96 model.embedding_dim 1.0 +487 96 optimizer.lr 0.08037888671060671 +487 96 negative_sampler.num_negs_per_pos 43.0 +487 96 training.batch_size 1.0 +487 97 model.embedding_dim 0.0 +487 97 optimizer.lr 0.0031650468189387015 +487 97 negative_sampler.num_negs_per_pos 70.0 +487 97 training.batch_size 2.0 +487 98 model.embedding_dim 1.0 +487 98 optimizer.lr 0.0012068435817927743 +487 98 negative_sampler.num_negs_per_pos 30.0 +487 98 training.batch_size 2.0 +487 99 model.embedding_dim 1.0 +487 99 optimizer.lr 0.007243925917607531 +487 99 negative_sampler.num_negs_per_pos 7.0 +487 99 training.batch_size 0.0 +487 100 model.embedding_dim 0.0 +487 100 optimizer.lr 0.006610736273503752 +487 100 negative_sampler.num_negs_per_pos 96.0 +487 100 training.batch_size 2.0 +487 1 dataset """kinships""" +487 1 model """proje""" +487 1 loss """bceaftersigmoid""" +487 1 regularizer """no""" +487 1 optimizer """adam""" +487 1 training_loop """owa""" +487 1 negative_sampler """basic""" +487 1 evaluator """rankbased""" +487 2 dataset """kinships""" +487 2 model """proje""" +487 2 loss """bceaftersigmoid""" +487 2 regularizer """no""" +487 2 optimizer """adam""" +487 2 training_loop """owa""" +487 2 negative_sampler """basic""" +487 2 evaluator """rankbased""" +487 3 dataset """kinships""" +487 3 model """proje""" +487 3 loss """bceaftersigmoid""" +487 3 regularizer """no""" +487 3 optimizer """adam""" +487 3 training_loop """owa""" +487 3 negative_sampler """basic""" +487 3 evaluator """rankbased""" +487 4 dataset """kinships""" +487 4 model """proje""" +487 4 loss """bceaftersigmoid""" +487 4 regularizer """no""" +487 4 optimizer """adam""" +487 4 training_loop """owa""" +487 4 negative_sampler """basic""" +487 4 evaluator """rankbased""" +487 5 dataset """kinships""" +487 5 model """proje""" +487 5 loss """bceaftersigmoid""" +487 5 regularizer """no""" +487 5 optimizer """adam""" +487 5 training_loop """owa""" +487 5 negative_sampler """basic""" +487 5 evaluator """rankbased""" +487 6 dataset """kinships""" +487 6 model """proje""" +487 6 loss """bceaftersigmoid""" +487 6 regularizer """no""" +487 6 optimizer """adam""" +487 6 training_loop """owa""" +487 6 negative_sampler """basic""" +487 6 evaluator """rankbased""" +487 7 dataset """kinships""" +487 7 model """proje""" +487 7 loss """bceaftersigmoid""" +487 7 regularizer """no""" +487 7 optimizer """adam""" +487 7 training_loop """owa""" +487 7 negative_sampler """basic""" +487 7 evaluator """rankbased""" +487 8 dataset """kinships""" +487 8 model """proje""" +487 8 loss """bceaftersigmoid""" +487 8 regularizer """no""" +487 8 optimizer """adam""" +487 8 training_loop """owa""" +487 8 negative_sampler """basic""" +487 8 evaluator """rankbased""" +487 9 dataset """kinships""" +487 9 model """proje""" +487 9 loss """bceaftersigmoid""" +487 9 regularizer """no""" +487 9 optimizer """adam""" +487 9 training_loop """owa""" +487 9 negative_sampler """basic""" +487 9 evaluator """rankbased""" +487 10 dataset """kinships""" +487 10 model """proje""" +487 10 loss """bceaftersigmoid""" +487 10 regularizer """no""" +487 10 optimizer """adam""" +487 10 training_loop """owa""" +487 10 negative_sampler """basic""" +487 10 evaluator """rankbased""" +487 11 dataset """kinships""" +487 11 model """proje""" +487 11 loss """bceaftersigmoid""" +487 11 regularizer """no""" +487 11 optimizer """adam""" +487 11 training_loop """owa""" +487 11 negative_sampler """basic""" +487 11 evaluator """rankbased""" +487 12 dataset """kinships""" +487 12 model """proje""" +487 12 loss """bceaftersigmoid""" +487 12 regularizer """no""" +487 12 optimizer """adam""" +487 12 training_loop """owa""" +487 12 negative_sampler """basic""" +487 12 evaluator """rankbased""" +487 13 dataset """kinships""" +487 13 model """proje""" +487 13 loss """bceaftersigmoid""" +487 13 regularizer """no""" +487 13 optimizer """adam""" +487 13 training_loop """owa""" +487 13 negative_sampler """basic""" +487 13 evaluator """rankbased""" +487 14 dataset """kinships""" +487 14 model """proje""" +487 14 loss """bceaftersigmoid""" +487 14 regularizer """no""" +487 14 optimizer """adam""" +487 14 training_loop """owa""" +487 14 negative_sampler """basic""" +487 14 evaluator """rankbased""" +487 15 dataset """kinships""" +487 15 model """proje""" +487 15 loss """bceaftersigmoid""" +487 15 regularizer """no""" +487 15 optimizer """adam""" +487 15 training_loop """owa""" +487 15 negative_sampler """basic""" +487 15 evaluator """rankbased""" +487 16 dataset """kinships""" +487 16 model """proje""" +487 16 loss """bceaftersigmoid""" +487 16 regularizer """no""" +487 16 optimizer """adam""" +487 16 training_loop """owa""" +487 16 negative_sampler """basic""" +487 16 evaluator """rankbased""" +487 17 dataset """kinships""" +487 17 model """proje""" +487 17 loss """bceaftersigmoid""" +487 17 regularizer """no""" +487 17 optimizer """adam""" +487 17 training_loop """owa""" +487 17 negative_sampler """basic""" +487 17 evaluator """rankbased""" +487 18 dataset """kinships""" +487 18 model """proje""" +487 18 loss """bceaftersigmoid""" +487 18 regularizer """no""" +487 18 optimizer """adam""" +487 18 training_loop """owa""" +487 18 negative_sampler """basic""" +487 18 evaluator """rankbased""" +487 19 dataset """kinships""" +487 19 model """proje""" +487 19 loss """bceaftersigmoid""" +487 19 regularizer """no""" +487 19 optimizer """adam""" +487 19 training_loop """owa""" +487 19 negative_sampler """basic""" +487 19 evaluator """rankbased""" +487 20 dataset """kinships""" +487 20 model """proje""" +487 20 loss """bceaftersigmoid""" +487 20 regularizer """no""" +487 20 optimizer """adam""" +487 20 training_loop """owa""" +487 20 negative_sampler """basic""" +487 20 evaluator """rankbased""" +487 21 dataset """kinships""" +487 21 model """proje""" +487 21 loss """bceaftersigmoid""" +487 21 regularizer """no""" +487 21 optimizer """adam""" +487 21 training_loop """owa""" +487 21 negative_sampler """basic""" +487 21 evaluator """rankbased""" +487 22 dataset """kinships""" +487 22 model """proje""" +487 22 loss """bceaftersigmoid""" +487 22 regularizer """no""" +487 22 optimizer """adam""" +487 22 training_loop """owa""" +487 22 negative_sampler """basic""" +487 22 evaluator """rankbased""" +487 23 dataset """kinships""" +487 23 model """proje""" +487 23 loss """bceaftersigmoid""" +487 23 regularizer """no""" +487 23 optimizer """adam""" +487 23 training_loop """owa""" +487 23 negative_sampler """basic""" +487 23 evaluator """rankbased""" +487 24 dataset """kinships""" +487 24 model """proje""" +487 24 loss """bceaftersigmoid""" +487 24 regularizer """no""" +487 24 optimizer """adam""" +487 24 training_loop """owa""" +487 24 negative_sampler """basic""" +487 24 evaluator """rankbased""" +487 25 dataset """kinships""" +487 25 model """proje""" +487 25 loss """bceaftersigmoid""" +487 25 regularizer """no""" +487 25 optimizer """adam""" +487 25 training_loop """owa""" +487 25 negative_sampler """basic""" +487 25 evaluator """rankbased""" +487 26 dataset """kinships""" +487 26 model """proje""" +487 26 loss """bceaftersigmoid""" +487 26 regularizer """no""" +487 26 optimizer """adam""" +487 26 training_loop """owa""" +487 26 negative_sampler """basic""" +487 26 evaluator """rankbased""" +487 27 dataset """kinships""" +487 27 model """proje""" +487 27 loss """bceaftersigmoid""" +487 27 regularizer """no""" +487 27 optimizer """adam""" +487 27 training_loop """owa""" +487 27 negative_sampler """basic""" +487 27 evaluator """rankbased""" +487 28 dataset """kinships""" +487 28 model """proje""" +487 28 loss """bceaftersigmoid""" +487 28 regularizer """no""" +487 28 optimizer """adam""" +487 28 training_loop """owa""" +487 28 negative_sampler """basic""" +487 28 evaluator """rankbased""" +487 29 dataset """kinships""" +487 29 model """proje""" +487 29 loss """bceaftersigmoid""" +487 29 regularizer """no""" +487 29 optimizer """adam""" +487 29 training_loop """owa""" +487 29 negative_sampler """basic""" +487 29 evaluator """rankbased""" +487 30 dataset """kinships""" +487 30 model """proje""" +487 30 loss """bceaftersigmoid""" +487 30 regularizer """no""" +487 30 optimizer """adam""" +487 30 training_loop """owa""" +487 30 negative_sampler """basic""" +487 30 evaluator """rankbased""" +487 31 dataset """kinships""" +487 31 model """proje""" +487 31 loss """bceaftersigmoid""" +487 31 regularizer """no""" +487 31 optimizer """adam""" +487 31 training_loop """owa""" +487 31 negative_sampler """basic""" +487 31 evaluator """rankbased""" +487 32 dataset """kinships""" +487 32 model """proje""" +487 32 loss """bceaftersigmoid""" +487 32 regularizer """no""" +487 32 optimizer """adam""" +487 32 training_loop """owa""" +487 32 negative_sampler """basic""" +487 32 evaluator """rankbased""" +487 33 dataset """kinships""" +487 33 model """proje""" +487 33 loss """bceaftersigmoid""" +487 33 regularizer """no""" +487 33 optimizer """adam""" +487 33 training_loop """owa""" +487 33 negative_sampler """basic""" +487 33 evaluator """rankbased""" +487 34 dataset """kinships""" +487 34 model """proje""" +487 34 loss """bceaftersigmoid""" +487 34 regularizer """no""" +487 34 optimizer """adam""" +487 34 training_loop """owa""" +487 34 negative_sampler """basic""" +487 34 evaluator """rankbased""" +487 35 dataset """kinships""" +487 35 model """proje""" +487 35 loss """bceaftersigmoid""" +487 35 regularizer """no""" +487 35 optimizer """adam""" +487 35 training_loop """owa""" +487 35 negative_sampler """basic""" +487 35 evaluator """rankbased""" +487 36 dataset """kinships""" +487 36 model """proje""" +487 36 loss """bceaftersigmoid""" +487 36 regularizer """no""" +487 36 optimizer """adam""" +487 36 training_loop """owa""" +487 36 negative_sampler """basic""" +487 36 evaluator """rankbased""" +487 37 dataset """kinships""" +487 37 model """proje""" +487 37 loss """bceaftersigmoid""" +487 37 regularizer """no""" +487 37 optimizer """adam""" +487 37 training_loop """owa""" +487 37 negative_sampler """basic""" +487 37 evaluator """rankbased""" +487 38 dataset """kinships""" +487 38 model """proje""" +487 38 loss """bceaftersigmoid""" +487 38 regularizer """no""" +487 38 optimizer """adam""" +487 38 training_loop """owa""" +487 38 negative_sampler """basic""" +487 38 evaluator """rankbased""" +487 39 dataset """kinships""" +487 39 model """proje""" +487 39 loss """bceaftersigmoid""" +487 39 regularizer """no""" +487 39 optimizer """adam""" +487 39 training_loop """owa""" +487 39 negative_sampler """basic""" +487 39 evaluator """rankbased""" +487 40 dataset """kinships""" +487 40 model """proje""" +487 40 loss """bceaftersigmoid""" +487 40 regularizer """no""" +487 40 optimizer """adam""" +487 40 training_loop """owa""" +487 40 negative_sampler """basic""" +487 40 evaluator """rankbased""" +487 41 dataset """kinships""" +487 41 model """proje""" +487 41 loss """bceaftersigmoid""" +487 41 regularizer """no""" +487 41 optimizer """adam""" +487 41 training_loop """owa""" +487 41 negative_sampler """basic""" +487 41 evaluator """rankbased""" +487 42 dataset """kinships""" +487 42 model """proje""" +487 42 loss """bceaftersigmoid""" +487 42 regularizer """no""" +487 42 optimizer """adam""" +487 42 training_loop """owa""" +487 42 negative_sampler """basic""" +487 42 evaluator """rankbased""" +487 43 dataset """kinships""" +487 43 model """proje""" +487 43 loss """bceaftersigmoid""" +487 43 regularizer """no""" +487 43 optimizer """adam""" +487 43 training_loop """owa""" +487 43 negative_sampler """basic""" +487 43 evaluator """rankbased""" +487 44 dataset """kinships""" +487 44 model """proje""" +487 44 loss """bceaftersigmoid""" +487 44 regularizer """no""" +487 44 optimizer """adam""" +487 44 training_loop """owa""" +487 44 negative_sampler """basic""" +487 44 evaluator """rankbased""" +487 45 dataset """kinships""" +487 45 model """proje""" +487 45 loss """bceaftersigmoid""" +487 45 regularizer """no""" +487 45 optimizer """adam""" +487 45 training_loop """owa""" +487 45 negative_sampler """basic""" +487 45 evaluator """rankbased""" +487 46 dataset """kinships""" +487 46 model """proje""" +487 46 loss """bceaftersigmoid""" +487 46 regularizer """no""" +487 46 optimizer """adam""" +487 46 training_loop """owa""" +487 46 negative_sampler """basic""" +487 46 evaluator """rankbased""" +487 47 dataset """kinships""" +487 47 model """proje""" +487 47 loss """bceaftersigmoid""" +487 47 regularizer """no""" +487 47 optimizer """adam""" +487 47 training_loop """owa""" +487 47 negative_sampler """basic""" +487 47 evaluator """rankbased""" +487 48 dataset """kinships""" +487 48 model """proje""" +487 48 loss """bceaftersigmoid""" +487 48 regularizer """no""" +487 48 optimizer """adam""" +487 48 training_loop """owa""" +487 48 negative_sampler """basic""" +487 48 evaluator """rankbased""" +487 49 dataset """kinships""" +487 49 model """proje""" +487 49 loss """bceaftersigmoid""" +487 49 regularizer """no""" +487 49 optimizer """adam""" +487 49 training_loop """owa""" +487 49 negative_sampler """basic""" +487 49 evaluator """rankbased""" +487 50 dataset """kinships""" +487 50 model """proje""" +487 50 loss """bceaftersigmoid""" +487 50 regularizer """no""" +487 50 optimizer """adam""" +487 50 training_loop """owa""" +487 50 negative_sampler """basic""" +487 50 evaluator """rankbased""" +487 51 dataset """kinships""" +487 51 model """proje""" +487 51 loss """bceaftersigmoid""" +487 51 regularizer """no""" +487 51 optimizer """adam""" +487 51 training_loop """owa""" +487 51 negative_sampler """basic""" +487 51 evaluator """rankbased""" +487 52 dataset """kinships""" +487 52 model """proje""" +487 52 loss """bceaftersigmoid""" +487 52 regularizer """no""" +487 52 optimizer """adam""" +487 52 training_loop """owa""" +487 52 negative_sampler """basic""" +487 52 evaluator """rankbased""" +487 53 dataset """kinships""" +487 53 model """proje""" +487 53 loss """bceaftersigmoid""" +487 53 regularizer """no""" +487 53 optimizer """adam""" +487 53 training_loop """owa""" +487 53 negative_sampler """basic""" +487 53 evaluator """rankbased""" +487 54 dataset """kinships""" +487 54 model """proje""" +487 54 loss """bceaftersigmoid""" +487 54 regularizer """no""" +487 54 optimizer """adam""" +487 54 training_loop """owa""" +487 54 negative_sampler """basic""" +487 54 evaluator """rankbased""" +487 55 dataset """kinships""" +487 55 model """proje""" +487 55 loss """bceaftersigmoid""" +487 55 regularizer """no""" +487 55 optimizer """adam""" +487 55 training_loop """owa""" +487 55 negative_sampler """basic""" +487 55 evaluator """rankbased""" +487 56 dataset """kinships""" +487 56 model """proje""" +487 56 loss """bceaftersigmoid""" +487 56 regularizer """no""" +487 56 optimizer """adam""" +487 56 training_loop """owa""" +487 56 negative_sampler """basic""" +487 56 evaluator """rankbased""" +487 57 dataset """kinships""" +487 57 model """proje""" +487 57 loss """bceaftersigmoid""" +487 57 regularizer """no""" +487 57 optimizer """adam""" +487 57 training_loop """owa""" +487 57 negative_sampler """basic""" +487 57 evaluator """rankbased""" +487 58 dataset """kinships""" +487 58 model """proje""" +487 58 loss """bceaftersigmoid""" +487 58 regularizer """no""" +487 58 optimizer """adam""" +487 58 training_loop """owa""" +487 58 negative_sampler """basic""" +487 58 evaluator """rankbased""" +487 59 dataset """kinships""" +487 59 model """proje""" +487 59 loss """bceaftersigmoid""" +487 59 regularizer """no""" +487 59 optimizer """adam""" +487 59 training_loop """owa""" +487 59 negative_sampler """basic""" +487 59 evaluator """rankbased""" +487 60 dataset """kinships""" +487 60 model """proje""" +487 60 loss """bceaftersigmoid""" +487 60 regularizer """no""" +487 60 optimizer """adam""" +487 60 training_loop """owa""" +487 60 negative_sampler """basic""" +487 60 evaluator """rankbased""" +487 61 dataset """kinships""" +487 61 model """proje""" +487 61 loss """bceaftersigmoid""" +487 61 regularizer """no""" +487 61 optimizer """adam""" +487 61 training_loop """owa""" +487 61 negative_sampler """basic""" +487 61 evaluator """rankbased""" +487 62 dataset """kinships""" +487 62 model """proje""" +487 62 loss """bceaftersigmoid""" +487 62 regularizer """no""" +487 62 optimizer """adam""" +487 62 training_loop """owa""" +487 62 negative_sampler """basic""" +487 62 evaluator """rankbased""" +487 63 dataset """kinships""" +487 63 model """proje""" +487 63 loss """bceaftersigmoid""" +487 63 regularizer """no""" +487 63 optimizer """adam""" +487 63 training_loop """owa""" +487 63 negative_sampler """basic""" +487 63 evaluator """rankbased""" +487 64 dataset """kinships""" +487 64 model """proje""" +487 64 loss """bceaftersigmoid""" +487 64 regularizer """no""" +487 64 optimizer """adam""" +487 64 training_loop """owa""" +487 64 negative_sampler """basic""" +487 64 evaluator """rankbased""" +487 65 dataset """kinships""" +487 65 model """proje""" +487 65 loss """bceaftersigmoid""" +487 65 regularizer """no""" +487 65 optimizer """adam""" +487 65 training_loop """owa""" +487 65 negative_sampler """basic""" +487 65 evaluator """rankbased""" +487 66 dataset """kinships""" +487 66 model """proje""" +487 66 loss """bceaftersigmoid""" +487 66 regularizer """no""" +487 66 optimizer """adam""" +487 66 training_loop """owa""" +487 66 negative_sampler """basic""" +487 66 evaluator """rankbased""" +487 67 dataset """kinships""" +487 67 model """proje""" +487 67 loss """bceaftersigmoid""" +487 67 regularizer """no""" +487 67 optimizer """adam""" +487 67 training_loop """owa""" +487 67 negative_sampler """basic""" +487 67 evaluator """rankbased""" +487 68 dataset """kinships""" +487 68 model """proje""" +487 68 loss """bceaftersigmoid""" +487 68 regularizer """no""" +487 68 optimizer """adam""" +487 68 training_loop """owa""" +487 68 negative_sampler """basic""" +487 68 evaluator """rankbased""" +487 69 dataset """kinships""" +487 69 model """proje""" +487 69 loss """bceaftersigmoid""" +487 69 regularizer """no""" +487 69 optimizer """adam""" +487 69 training_loop """owa""" +487 69 negative_sampler """basic""" +487 69 evaluator """rankbased""" +487 70 dataset """kinships""" +487 70 model """proje""" +487 70 loss """bceaftersigmoid""" +487 70 regularizer """no""" +487 70 optimizer """adam""" +487 70 training_loop """owa""" +487 70 negative_sampler """basic""" +487 70 evaluator """rankbased""" +487 71 dataset """kinships""" +487 71 model """proje""" +487 71 loss """bceaftersigmoid""" +487 71 regularizer """no""" +487 71 optimizer """adam""" +487 71 training_loop """owa""" +487 71 negative_sampler """basic""" +487 71 evaluator """rankbased""" +487 72 dataset """kinships""" +487 72 model """proje""" +487 72 loss """bceaftersigmoid""" +487 72 regularizer """no""" +487 72 optimizer """adam""" +487 72 training_loop """owa""" +487 72 negative_sampler """basic""" +487 72 evaluator """rankbased""" +487 73 dataset """kinships""" +487 73 model """proje""" +487 73 loss """bceaftersigmoid""" +487 73 regularizer """no""" +487 73 optimizer """adam""" +487 73 training_loop """owa""" +487 73 negative_sampler """basic""" +487 73 evaluator """rankbased""" +487 74 dataset """kinships""" +487 74 model """proje""" +487 74 loss """bceaftersigmoid""" +487 74 regularizer """no""" +487 74 optimizer """adam""" +487 74 training_loop """owa""" +487 74 negative_sampler """basic""" +487 74 evaluator """rankbased""" +487 75 dataset """kinships""" +487 75 model """proje""" +487 75 loss """bceaftersigmoid""" +487 75 regularizer """no""" +487 75 optimizer """adam""" +487 75 training_loop """owa""" +487 75 negative_sampler """basic""" +487 75 evaluator """rankbased""" +487 76 dataset """kinships""" +487 76 model """proje""" +487 76 loss """bceaftersigmoid""" +487 76 regularizer """no""" +487 76 optimizer """adam""" +487 76 training_loop """owa""" +487 76 negative_sampler """basic""" +487 76 evaluator """rankbased""" +487 77 dataset """kinships""" +487 77 model """proje""" +487 77 loss """bceaftersigmoid""" +487 77 regularizer """no""" +487 77 optimizer """adam""" +487 77 training_loop """owa""" +487 77 negative_sampler """basic""" +487 77 evaluator """rankbased""" +487 78 dataset """kinships""" +487 78 model """proje""" +487 78 loss """bceaftersigmoid""" +487 78 regularizer """no""" +487 78 optimizer """adam""" +487 78 training_loop """owa""" +487 78 negative_sampler """basic""" +487 78 evaluator """rankbased""" +487 79 dataset """kinships""" +487 79 model """proje""" +487 79 loss """bceaftersigmoid""" +487 79 regularizer """no""" +487 79 optimizer """adam""" +487 79 training_loop """owa""" +487 79 negative_sampler """basic""" +487 79 evaluator """rankbased""" +487 80 dataset """kinships""" +487 80 model """proje""" +487 80 loss """bceaftersigmoid""" +487 80 regularizer """no""" +487 80 optimizer """adam""" +487 80 training_loop """owa""" +487 80 negative_sampler """basic""" +487 80 evaluator """rankbased""" +487 81 dataset """kinships""" +487 81 model """proje""" +487 81 loss """bceaftersigmoid""" +487 81 regularizer """no""" +487 81 optimizer """adam""" +487 81 training_loop """owa""" +487 81 negative_sampler """basic""" +487 81 evaluator """rankbased""" +487 82 dataset """kinships""" +487 82 model """proje""" +487 82 loss """bceaftersigmoid""" +487 82 regularizer """no""" +487 82 optimizer """adam""" +487 82 training_loop """owa""" +487 82 negative_sampler """basic""" +487 82 evaluator """rankbased""" +487 83 dataset """kinships""" +487 83 model """proje""" +487 83 loss """bceaftersigmoid""" +487 83 regularizer """no""" +487 83 optimizer """adam""" +487 83 training_loop """owa""" +487 83 negative_sampler """basic""" +487 83 evaluator """rankbased""" +487 84 dataset """kinships""" +487 84 model """proje""" +487 84 loss """bceaftersigmoid""" +487 84 regularizer """no""" +487 84 optimizer """adam""" +487 84 training_loop """owa""" +487 84 negative_sampler """basic""" +487 84 evaluator """rankbased""" +487 85 dataset """kinships""" +487 85 model """proje""" +487 85 loss """bceaftersigmoid""" +487 85 regularizer """no""" +487 85 optimizer """adam""" +487 85 training_loop """owa""" +487 85 negative_sampler """basic""" +487 85 evaluator """rankbased""" +487 86 dataset """kinships""" +487 86 model """proje""" +487 86 loss """bceaftersigmoid""" +487 86 regularizer """no""" +487 86 optimizer """adam""" +487 86 training_loop """owa""" +487 86 negative_sampler """basic""" +487 86 evaluator """rankbased""" +487 87 dataset """kinships""" +487 87 model """proje""" +487 87 loss """bceaftersigmoid""" +487 87 regularizer """no""" +487 87 optimizer """adam""" +487 87 training_loop """owa""" +487 87 negative_sampler """basic""" +487 87 evaluator """rankbased""" +487 88 dataset """kinships""" +487 88 model """proje""" +487 88 loss """bceaftersigmoid""" +487 88 regularizer """no""" +487 88 optimizer """adam""" +487 88 training_loop """owa""" +487 88 negative_sampler """basic""" +487 88 evaluator """rankbased""" +487 89 dataset """kinships""" +487 89 model """proje""" +487 89 loss """bceaftersigmoid""" +487 89 regularizer """no""" +487 89 optimizer """adam""" +487 89 training_loop """owa""" +487 89 negative_sampler """basic""" +487 89 evaluator """rankbased""" +487 90 dataset """kinships""" +487 90 model """proje""" +487 90 loss """bceaftersigmoid""" +487 90 regularizer """no""" +487 90 optimizer """adam""" +487 90 training_loop """owa""" +487 90 negative_sampler """basic""" +487 90 evaluator """rankbased""" +487 91 dataset """kinships""" +487 91 model """proje""" +487 91 loss """bceaftersigmoid""" +487 91 regularizer """no""" +487 91 optimizer """adam""" +487 91 training_loop """owa""" +487 91 negative_sampler """basic""" +487 91 evaluator """rankbased""" +487 92 dataset """kinships""" +487 92 model """proje""" +487 92 loss """bceaftersigmoid""" +487 92 regularizer """no""" +487 92 optimizer """adam""" +487 92 training_loop """owa""" +487 92 negative_sampler """basic""" +487 92 evaluator """rankbased""" +487 93 dataset """kinships""" +487 93 model """proje""" +487 93 loss """bceaftersigmoid""" +487 93 regularizer """no""" +487 93 optimizer """adam""" +487 93 training_loop """owa""" +487 93 negative_sampler """basic""" +487 93 evaluator """rankbased""" +487 94 dataset """kinships""" +487 94 model """proje""" +487 94 loss """bceaftersigmoid""" +487 94 regularizer """no""" +487 94 optimizer """adam""" +487 94 training_loop """owa""" +487 94 negative_sampler """basic""" +487 94 evaluator """rankbased""" +487 95 dataset """kinships""" +487 95 model """proje""" +487 95 loss """bceaftersigmoid""" +487 95 regularizer """no""" +487 95 optimizer """adam""" +487 95 training_loop """owa""" +487 95 negative_sampler """basic""" +487 95 evaluator """rankbased""" +487 96 dataset """kinships""" +487 96 model """proje""" +487 96 loss """bceaftersigmoid""" +487 96 regularizer """no""" +487 96 optimizer """adam""" +487 96 training_loop """owa""" +487 96 negative_sampler """basic""" +487 96 evaluator """rankbased""" +487 97 dataset """kinships""" +487 97 model """proje""" +487 97 loss """bceaftersigmoid""" +487 97 regularizer """no""" +487 97 optimizer """adam""" +487 97 training_loop """owa""" +487 97 negative_sampler """basic""" +487 97 evaluator """rankbased""" +487 98 dataset """kinships""" +487 98 model """proje""" +487 98 loss """bceaftersigmoid""" +487 98 regularizer """no""" +487 98 optimizer """adam""" +487 98 training_loop """owa""" +487 98 negative_sampler """basic""" +487 98 evaluator """rankbased""" +487 99 dataset """kinships""" +487 99 model """proje""" +487 99 loss """bceaftersigmoid""" +487 99 regularizer """no""" +487 99 optimizer """adam""" +487 99 training_loop """owa""" +487 99 negative_sampler """basic""" +487 99 evaluator """rankbased""" +487 100 dataset """kinships""" +487 100 model """proje""" +487 100 loss """bceaftersigmoid""" +487 100 regularizer """no""" +487 100 optimizer """adam""" +487 100 training_loop """owa""" +487 100 negative_sampler """basic""" +487 100 evaluator """rankbased""" +488 1 model.embedding_dim 2.0 +488 1 optimizer.lr 0.0015715925140444504 +488 1 negative_sampler.num_negs_per_pos 82.0 +488 1 training.batch_size 1.0 +488 2 model.embedding_dim 1.0 +488 2 optimizer.lr 0.03528063024410006 +488 2 negative_sampler.num_negs_per_pos 27.0 +488 2 training.batch_size 1.0 +488 3 model.embedding_dim 2.0 +488 3 optimizer.lr 0.0013482541865049511 +488 3 negative_sampler.num_negs_per_pos 23.0 +488 3 training.batch_size 0.0 +488 4 model.embedding_dim 1.0 +488 4 optimizer.lr 0.012792777287901068 +488 4 negative_sampler.num_negs_per_pos 83.0 +488 4 training.batch_size 2.0 +488 5 model.embedding_dim 2.0 +488 5 optimizer.lr 0.0012366783399556018 +488 5 negative_sampler.num_negs_per_pos 78.0 +488 5 training.batch_size 2.0 +488 6 model.embedding_dim 0.0 +488 6 optimizer.lr 0.0038954624458150395 +488 6 negative_sampler.num_negs_per_pos 39.0 +488 6 training.batch_size 0.0 +488 7 model.embedding_dim 2.0 +488 7 optimizer.lr 0.001153748741288721 +488 7 negative_sampler.num_negs_per_pos 41.0 +488 7 training.batch_size 1.0 +488 8 model.embedding_dim 0.0 +488 8 optimizer.lr 0.0032588001505532336 +488 8 negative_sampler.num_negs_per_pos 12.0 +488 8 training.batch_size 0.0 +488 9 model.embedding_dim 2.0 +488 9 optimizer.lr 0.001308139374843368 +488 9 negative_sampler.num_negs_per_pos 44.0 +488 9 training.batch_size 2.0 +488 10 model.embedding_dim 1.0 +488 10 optimizer.lr 0.021842599675103127 +488 10 negative_sampler.num_negs_per_pos 76.0 +488 10 training.batch_size 0.0 +488 11 model.embedding_dim 1.0 +488 11 optimizer.lr 0.023683005015552646 +488 11 negative_sampler.num_negs_per_pos 51.0 +488 11 training.batch_size 1.0 +488 12 model.embedding_dim 0.0 +488 12 optimizer.lr 0.007515734174397661 +488 12 negative_sampler.num_negs_per_pos 99.0 +488 12 training.batch_size 0.0 +488 13 model.embedding_dim 0.0 +488 13 optimizer.lr 0.03821856718745946 +488 13 negative_sampler.num_negs_per_pos 20.0 +488 13 training.batch_size 0.0 +488 14 model.embedding_dim 0.0 +488 14 optimizer.lr 0.00562907854232404 +488 14 negative_sampler.num_negs_per_pos 19.0 +488 14 training.batch_size 2.0 +488 15 model.embedding_dim 0.0 +488 15 optimizer.lr 0.029469946609060865 +488 15 negative_sampler.num_negs_per_pos 80.0 +488 15 training.batch_size 2.0 +488 16 model.embedding_dim 2.0 +488 16 optimizer.lr 0.08638270585129718 +488 16 negative_sampler.num_negs_per_pos 10.0 +488 16 training.batch_size 0.0 +488 17 model.embedding_dim 2.0 +488 17 optimizer.lr 0.008635081904401806 +488 17 negative_sampler.num_negs_per_pos 52.0 +488 17 training.batch_size 1.0 +488 18 model.embedding_dim 1.0 +488 18 optimizer.lr 0.0010690614835456788 +488 18 negative_sampler.num_negs_per_pos 54.0 +488 18 training.batch_size 1.0 +488 19 model.embedding_dim 0.0 +488 19 optimizer.lr 0.0010947700315326484 +488 19 negative_sampler.num_negs_per_pos 39.0 +488 19 training.batch_size 2.0 +488 20 model.embedding_dim 2.0 +488 20 optimizer.lr 0.06728395686994697 +488 20 negative_sampler.num_negs_per_pos 54.0 +488 20 training.batch_size 2.0 +488 21 model.embedding_dim 1.0 +488 21 optimizer.lr 0.007789318383191054 +488 21 negative_sampler.num_negs_per_pos 98.0 +488 21 training.batch_size 1.0 +488 22 model.embedding_dim 0.0 +488 22 optimizer.lr 0.0821390292198219 +488 22 negative_sampler.num_negs_per_pos 8.0 +488 22 training.batch_size 1.0 +488 23 model.embedding_dim 1.0 +488 23 optimizer.lr 0.003629650359405564 +488 23 negative_sampler.num_negs_per_pos 16.0 +488 23 training.batch_size 0.0 +488 24 model.embedding_dim 2.0 +488 24 optimizer.lr 0.01968864252080143 +488 24 negative_sampler.num_negs_per_pos 76.0 +488 24 training.batch_size 1.0 +488 25 model.embedding_dim 0.0 +488 25 optimizer.lr 0.0013752870709760085 +488 25 negative_sampler.num_negs_per_pos 61.0 +488 25 training.batch_size 2.0 +488 26 model.embedding_dim 1.0 +488 26 optimizer.lr 0.08983987333612294 +488 26 negative_sampler.num_negs_per_pos 74.0 +488 26 training.batch_size 1.0 +488 27 model.embedding_dim 2.0 +488 27 optimizer.lr 0.0306415526331128 +488 27 negative_sampler.num_negs_per_pos 67.0 +488 27 training.batch_size 1.0 +488 28 model.embedding_dim 1.0 +488 28 optimizer.lr 0.014657335014940159 +488 28 negative_sampler.num_negs_per_pos 98.0 +488 28 training.batch_size 1.0 +488 29 model.embedding_dim 1.0 +488 29 optimizer.lr 0.004090176701567323 +488 29 negative_sampler.num_negs_per_pos 72.0 +488 29 training.batch_size 0.0 +488 30 model.embedding_dim 2.0 +488 30 optimizer.lr 0.03402330503811656 +488 30 negative_sampler.num_negs_per_pos 50.0 +488 30 training.batch_size 1.0 +488 31 model.embedding_dim 1.0 +488 31 optimizer.lr 0.00270476712257316 +488 31 negative_sampler.num_negs_per_pos 0.0 +488 31 training.batch_size 1.0 +488 32 model.embedding_dim 1.0 +488 32 optimizer.lr 0.004225769779720672 +488 32 negative_sampler.num_negs_per_pos 0.0 +488 32 training.batch_size 2.0 +488 33 model.embedding_dim 0.0 +488 33 optimizer.lr 0.0011160962516082887 +488 33 negative_sampler.num_negs_per_pos 9.0 +488 33 training.batch_size 1.0 +488 34 model.embedding_dim 2.0 +488 34 optimizer.lr 0.0025733488732210697 +488 34 negative_sampler.num_negs_per_pos 80.0 +488 34 training.batch_size 1.0 +488 35 model.embedding_dim 1.0 +488 35 optimizer.lr 0.008972429395248865 +488 35 negative_sampler.num_negs_per_pos 59.0 +488 35 training.batch_size 0.0 +488 36 model.embedding_dim 2.0 +488 36 optimizer.lr 0.026564522405613675 +488 36 negative_sampler.num_negs_per_pos 8.0 +488 36 training.batch_size 1.0 +488 37 model.embedding_dim 2.0 +488 37 optimizer.lr 0.018353007217155774 +488 37 negative_sampler.num_negs_per_pos 37.0 +488 37 training.batch_size 1.0 +488 38 model.embedding_dim 1.0 +488 38 optimizer.lr 0.08479174416635203 +488 38 negative_sampler.num_negs_per_pos 83.0 +488 38 training.batch_size 0.0 +488 39 model.embedding_dim 0.0 +488 39 optimizer.lr 0.010557953468441466 +488 39 negative_sampler.num_negs_per_pos 74.0 +488 39 training.batch_size 2.0 +488 40 model.embedding_dim 0.0 +488 40 optimizer.lr 0.04309820867113556 +488 40 negative_sampler.num_negs_per_pos 27.0 +488 40 training.batch_size 2.0 +488 41 model.embedding_dim 2.0 +488 41 optimizer.lr 0.0017139850428195924 +488 41 negative_sampler.num_negs_per_pos 83.0 +488 41 training.batch_size 2.0 +488 42 model.embedding_dim 1.0 +488 42 optimizer.lr 0.01592359440820681 +488 42 negative_sampler.num_negs_per_pos 76.0 +488 42 training.batch_size 1.0 +488 43 model.embedding_dim 0.0 +488 43 optimizer.lr 0.0063342565764598 +488 43 negative_sampler.num_negs_per_pos 75.0 +488 43 training.batch_size 0.0 +488 44 model.embedding_dim 1.0 +488 44 optimizer.lr 0.025789116112162624 +488 44 negative_sampler.num_negs_per_pos 84.0 +488 44 training.batch_size 0.0 +488 45 model.embedding_dim 2.0 +488 45 optimizer.lr 0.03642932238259688 +488 45 negative_sampler.num_negs_per_pos 88.0 +488 45 training.batch_size 0.0 +488 46 model.embedding_dim 2.0 +488 46 optimizer.lr 0.014016371599035968 +488 46 negative_sampler.num_negs_per_pos 36.0 +488 46 training.batch_size 0.0 +488 47 model.embedding_dim 0.0 +488 47 optimizer.lr 0.01850696232076754 +488 47 negative_sampler.num_negs_per_pos 3.0 +488 47 training.batch_size 0.0 +488 48 model.embedding_dim 0.0 +488 48 optimizer.lr 0.07601025422612565 +488 48 negative_sampler.num_negs_per_pos 25.0 +488 48 training.batch_size 2.0 +488 49 model.embedding_dim 0.0 +488 49 optimizer.lr 0.041661530904833884 +488 49 negative_sampler.num_negs_per_pos 28.0 +488 49 training.batch_size 1.0 +488 50 model.embedding_dim 1.0 +488 50 optimizer.lr 0.0261620807396896 +488 50 negative_sampler.num_negs_per_pos 98.0 +488 50 training.batch_size 1.0 +488 51 model.embedding_dim 0.0 +488 51 optimizer.lr 0.010211819946533272 +488 51 negative_sampler.num_negs_per_pos 9.0 +488 51 training.batch_size 1.0 +488 52 model.embedding_dim 0.0 +488 52 optimizer.lr 0.0020311656410246467 +488 52 negative_sampler.num_negs_per_pos 14.0 +488 52 training.batch_size 1.0 +488 53 model.embedding_dim 0.0 +488 53 optimizer.lr 0.0037171699829971886 +488 53 negative_sampler.num_negs_per_pos 21.0 +488 53 training.batch_size 2.0 +488 54 model.embedding_dim 1.0 +488 54 optimizer.lr 0.07472524857592057 +488 54 negative_sampler.num_negs_per_pos 21.0 +488 54 training.batch_size 2.0 +488 55 model.embedding_dim 2.0 +488 55 optimizer.lr 0.00908413864003621 +488 55 negative_sampler.num_negs_per_pos 7.0 +488 55 training.batch_size 2.0 +488 56 model.embedding_dim 2.0 +488 56 optimizer.lr 0.023949449548928994 +488 56 negative_sampler.num_negs_per_pos 57.0 +488 56 training.batch_size 1.0 +488 57 model.embedding_dim 1.0 +488 57 optimizer.lr 0.01959950468167773 +488 57 negative_sampler.num_negs_per_pos 77.0 +488 57 training.batch_size 1.0 +488 58 model.embedding_dim 1.0 +488 58 optimizer.lr 0.020630772666703944 +488 58 negative_sampler.num_negs_per_pos 78.0 +488 58 training.batch_size 0.0 +488 59 model.embedding_dim 0.0 +488 59 optimizer.lr 0.09572739401580317 +488 59 negative_sampler.num_negs_per_pos 49.0 +488 59 training.batch_size 0.0 +488 60 model.embedding_dim 1.0 +488 60 optimizer.lr 0.03925404856349706 +488 60 negative_sampler.num_negs_per_pos 39.0 +488 60 training.batch_size 2.0 +488 61 model.embedding_dim 2.0 +488 61 optimizer.lr 0.009829587957390709 +488 61 negative_sampler.num_negs_per_pos 74.0 +488 61 training.batch_size 2.0 +488 62 model.embedding_dim 0.0 +488 62 optimizer.lr 0.08722128174013605 +488 62 negative_sampler.num_negs_per_pos 92.0 +488 62 training.batch_size 1.0 +488 63 model.embedding_dim 1.0 +488 63 optimizer.lr 0.09424987654257938 +488 63 negative_sampler.num_negs_per_pos 34.0 +488 63 training.batch_size 2.0 +488 64 model.embedding_dim 1.0 +488 64 optimizer.lr 0.09857187315347794 +488 64 negative_sampler.num_negs_per_pos 1.0 +488 64 training.batch_size 2.0 +488 65 model.embedding_dim 0.0 +488 65 optimizer.lr 0.002940345799311853 +488 65 negative_sampler.num_negs_per_pos 37.0 +488 65 training.batch_size 1.0 +488 66 model.embedding_dim 2.0 +488 66 optimizer.lr 0.04764021932473161 +488 66 negative_sampler.num_negs_per_pos 51.0 +488 66 training.batch_size 0.0 +488 67 model.embedding_dim 1.0 +488 67 optimizer.lr 0.0031314285716980963 +488 67 negative_sampler.num_negs_per_pos 27.0 +488 67 training.batch_size 0.0 +488 68 model.embedding_dim 2.0 +488 68 optimizer.lr 0.002897766907599744 +488 68 negative_sampler.num_negs_per_pos 95.0 +488 68 training.batch_size 1.0 +488 69 model.embedding_dim 0.0 +488 69 optimizer.lr 0.0495392904741965 +488 69 negative_sampler.num_negs_per_pos 70.0 +488 69 training.batch_size 1.0 +488 70 model.embedding_dim 0.0 +488 70 optimizer.lr 0.005029781389877565 +488 70 negative_sampler.num_negs_per_pos 71.0 +488 70 training.batch_size 2.0 +488 71 model.embedding_dim 0.0 +488 71 optimizer.lr 0.027202153952789986 +488 71 negative_sampler.num_negs_per_pos 60.0 +488 71 training.batch_size 2.0 +488 72 model.embedding_dim 1.0 +488 72 optimizer.lr 0.02697357828189383 +488 72 negative_sampler.num_negs_per_pos 29.0 +488 72 training.batch_size 2.0 +488 73 model.embedding_dim 2.0 +488 73 optimizer.lr 0.05817206303027608 +488 73 negative_sampler.num_negs_per_pos 18.0 +488 73 training.batch_size 0.0 +488 74 model.embedding_dim 1.0 +488 74 optimizer.lr 0.026296543530615092 +488 74 negative_sampler.num_negs_per_pos 1.0 +488 74 training.batch_size 0.0 +488 75 model.embedding_dim 0.0 +488 75 optimizer.lr 0.038532873319051066 +488 75 negative_sampler.num_negs_per_pos 45.0 +488 75 training.batch_size 0.0 +488 76 model.embedding_dim 0.0 +488 76 optimizer.lr 0.010138623606601086 +488 76 negative_sampler.num_negs_per_pos 39.0 +488 76 training.batch_size 2.0 +488 77 model.embedding_dim 0.0 +488 77 optimizer.lr 0.011673518769557273 +488 77 negative_sampler.num_negs_per_pos 3.0 +488 77 training.batch_size 1.0 +488 78 model.embedding_dim 0.0 +488 78 optimizer.lr 0.006494805708596008 +488 78 negative_sampler.num_negs_per_pos 59.0 +488 78 training.batch_size 0.0 +488 79 model.embedding_dim 2.0 +488 79 optimizer.lr 0.0050142004815423875 +488 79 negative_sampler.num_negs_per_pos 52.0 +488 79 training.batch_size 1.0 +488 80 model.embedding_dim 2.0 +488 80 optimizer.lr 0.06632029404662249 +488 80 negative_sampler.num_negs_per_pos 52.0 +488 80 training.batch_size 2.0 +488 81 model.embedding_dim 1.0 +488 81 optimizer.lr 0.003097510409438395 +488 81 negative_sampler.num_negs_per_pos 58.0 +488 81 training.batch_size 2.0 +488 82 model.embedding_dim 1.0 +488 82 optimizer.lr 0.004086245373634455 +488 82 negative_sampler.num_negs_per_pos 47.0 +488 82 training.batch_size 2.0 +488 83 model.embedding_dim 2.0 +488 83 optimizer.lr 0.002397676415344585 +488 83 negative_sampler.num_negs_per_pos 31.0 +488 83 training.batch_size 2.0 +488 84 model.embedding_dim 2.0 +488 84 optimizer.lr 0.020052572668750473 +488 84 negative_sampler.num_negs_per_pos 93.0 +488 84 training.batch_size 0.0 +488 85 model.embedding_dim 1.0 +488 85 optimizer.lr 0.006366539382945403 +488 85 negative_sampler.num_negs_per_pos 25.0 +488 85 training.batch_size 1.0 +488 86 model.embedding_dim 2.0 +488 86 optimizer.lr 0.0038352972591482945 +488 86 negative_sampler.num_negs_per_pos 57.0 +488 86 training.batch_size 2.0 +488 87 model.embedding_dim 0.0 +488 87 optimizer.lr 0.045404546190868 +488 87 negative_sampler.num_negs_per_pos 62.0 +488 87 training.batch_size 1.0 +488 88 model.embedding_dim 2.0 +488 88 optimizer.lr 0.00161726505285905 +488 88 negative_sampler.num_negs_per_pos 57.0 +488 88 training.batch_size 2.0 +488 89 model.embedding_dim 0.0 +488 89 optimizer.lr 0.0029311309961914866 +488 89 negative_sampler.num_negs_per_pos 73.0 +488 89 training.batch_size 2.0 +488 90 model.embedding_dim 0.0 +488 90 optimizer.lr 0.012021978795617016 +488 90 negative_sampler.num_negs_per_pos 93.0 +488 90 training.batch_size 1.0 +488 91 model.embedding_dim 0.0 +488 91 optimizer.lr 0.008111491451830162 +488 91 negative_sampler.num_negs_per_pos 91.0 +488 91 training.batch_size 0.0 +488 92 model.embedding_dim 2.0 +488 92 optimizer.lr 0.0014143385859992637 +488 92 negative_sampler.num_negs_per_pos 16.0 +488 92 training.batch_size 2.0 +488 93 model.embedding_dim 1.0 +488 93 optimizer.lr 0.008770808954725526 +488 93 negative_sampler.num_negs_per_pos 95.0 +488 93 training.batch_size 2.0 +488 94 model.embedding_dim 2.0 +488 94 optimizer.lr 0.017555634369178907 +488 94 negative_sampler.num_negs_per_pos 1.0 +488 94 training.batch_size 2.0 +488 95 model.embedding_dim 1.0 +488 95 optimizer.lr 0.09389051678691325 +488 95 negative_sampler.num_negs_per_pos 38.0 +488 95 training.batch_size 2.0 +488 96 model.embedding_dim 0.0 +488 96 optimizer.lr 0.0033270505825367526 +488 96 negative_sampler.num_negs_per_pos 74.0 +488 96 training.batch_size 0.0 +488 97 model.embedding_dim 1.0 +488 97 optimizer.lr 0.010062579211680545 +488 97 negative_sampler.num_negs_per_pos 53.0 +488 97 training.batch_size 0.0 +488 98 model.embedding_dim 2.0 +488 98 optimizer.lr 0.030192699090685945 +488 98 negative_sampler.num_negs_per_pos 82.0 +488 98 training.batch_size 1.0 +488 99 model.embedding_dim 1.0 +488 99 optimizer.lr 0.01953707684517016 +488 99 negative_sampler.num_negs_per_pos 92.0 +488 99 training.batch_size 0.0 +488 100 model.embedding_dim 2.0 +488 100 optimizer.lr 0.057259082196726006 +488 100 negative_sampler.num_negs_per_pos 35.0 +488 100 training.batch_size 0.0 +488 1 dataset """kinships""" +488 1 model """proje""" +488 1 loss """softplus""" +488 1 regularizer """no""" +488 1 optimizer """adam""" +488 1 training_loop """owa""" +488 1 negative_sampler """basic""" +488 1 evaluator """rankbased""" +488 2 dataset """kinships""" +488 2 model """proje""" +488 2 loss """softplus""" +488 2 regularizer """no""" +488 2 optimizer """adam""" +488 2 training_loop """owa""" +488 2 negative_sampler """basic""" +488 2 evaluator """rankbased""" +488 3 dataset """kinships""" +488 3 model """proje""" +488 3 loss """softplus""" +488 3 regularizer """no""" +488 3 optimizer """adam""" +488 3 training_loop """owa""" +488 3 negative_sampler """basic""" +488 3 evaluator """rankbased""" +488 4 dataset """kinships""" +488 4 model """proje""" +488 4 loss """softplus""" +488 4 regularizer """no""" +488 4 optimizer """adam""" +488 4 training_loop """owa""" +488 4 negative_sampler """basic""" +488 4 evaluator """rankbased""" +488 5 dataset """kinships""" +488 5 model """proje""" +488 5 loss """softplus""" +488 5 regularizer """no""" +488 5 optimizer """adam""" +488 5 training_loop """owa""" +488 5 negative_sampler """basic""" +488 5 evaluator """rankbased""" +488 6 dataset """kinships""" +488 6 model """proje""" +488 6 loss """softplus""" +488 6 regularizer """no""" +488 6 optimizer """adam""" +488 6 training_loop """owa""" +488 6 negative_sampler """basic""" +488 6 evaluator """rankbased""" +488 7 dataset """kinships""" +488 7 model """proje""" +488 7 loss """softplus""" +488 7 regularizer """no""" +488 7 optimizer """adam""" +488 7 training_loop """owa""" +488 7 negative_sampler """basic""" +488 7 evaluator """rankbased""" +488 8 dataset """kinships""" +488 8 model """proje""" +488 8 loss """softplus""" +488 8 regularizer """no""" +488 8 optimizer """adam""" +488 8 training_loop """owa""" +488 8 negative_sampler """basic""" +488 8 evaluator """rankbased""" +488 9 dataset """kinships""" +488 9 model """proje""" +488 9 loss """softplus""" +488 9 regularizer """no""" +488 9 optimizer """adam""" +488 9 training_loop """owa""" +488 9 negative_sampler """basic""" +488 9 evaluator """rankbased""" +488 10 dataset """kinships""" +488 10 model """proje""" +488 10 loss """softplus""" +488 10 regularizer """no""" +488 10 optimizer """adam""" +488 10 training_loop """owa""" +488 10 negative_sampler """basic""" +488 10 evaluator """rankbased""" +488 11 dataset """kinships""" +488 11 model """proje""" +488 11 loss """softplus""" +488 11 regularizer """no""" +488 11 optimizer """adam""" +488 11 training_loop """owa""" +488 11 negative_sampler """basic""" +488 11 evaluator """rankbased""" +488 12 dataset """kinships""" +488 12 model """proje""" +488 12 loss """softplus""" +488 12 regularizer """no""" +488 12 optimizer """adam""" +488 12 training_loop """owa""" +488 12 negative_sampler """basic""" +488 12 evaluator """rankbased""" +488 13 dataset """kinships""" +488 13 model """proje""" +488 13 loss """softplus""" +488 13 regularizer """no""" +488 13 optimizer """adam""" +488 13 training_loop """owa""" +488 13 negative_sampler """basic""" +488 13 evaluator """rankbased""" +488 14 dataset """kinships""" +488 14 model """proje""" +488 14 loss """softplus""" +488 14 regularizer """no""" +488 14 optimizer """adam""" +488 14 training_loop """owa""" +488 14 negative_sampler """basic""" +488 14 evaluator """rankbased""" +488 15 dataset """kinships""" +488 15 model """proje""" +488 15 loss """softplus""" +488 15 regularizer """no""" +488 15 optimizer """adam""" +488 15 training_loop """owa""" +488 15 negative_sampler """basic""" +488 15 evaluator """rankbased""" +488 16 dataset """kinships""" +488 16 model """proje""" +488 16 loss """softplus""" +488 16 regularizer """no""" +488 16 optimizer """adam""" +488 16 training_loop """owa""" +488 16 negative_sampler """basic""" +488 16 evaluator """rankbased""" +488 17 dataset """kinships""" +488 17 model """proje""" +488 17 loss """softplus""" +488 17 regularizer """no""" +488 17 optimizer """adam""" +488 17 training_loop """owa""" +488 17 negative_sampler """basic""" +488 17 evaluator """rankbased""" +488 18 dataset """kinships""" +488 18 model """proje""" +488 18 loss """softplus""" +488 18 regularizer """no""" +488 18 optimizer """adam""" +488 18 training_loop """owa""" +488 18 negative_sampler """basic""" +488 18 evaluator """rankbased""" +488 19 dataset """kinships""" +488 19 model """proje""" +488 19 loss """softplus""" +488 19 regularizer """no""" +488 19 optimizer """adam""" +488 19 training_loop """owa""" +488 19 negative_sampler """basic""" +488 19 evaluator """rankbased""" +488 20 dataset """kinships""" +488 20 model """proje""" +488 20 loss """softplus""" +488 20 regularizer """no""" +488 20 optimizer """adam""" +488 20 training_loop """owa""" +488 20 negative_sampler """basic""" +488 20 evaluator """rankbased""" +488 21 dataset """kinships""" +488 21 model """proje""" +488 21 loss """softplus""" +488 21 regularizer """no""" +488 21 optimizer """adam""" +488 21 training_loop """owa""" +488 21 negative_sampler """basic""" +488 21 evaluator """rankbased""" +488 22 dataset """kinships""" +488 22 model """proje""" +488 22 loss """softplus""" +488 22 regularizer """no""" +488 22 optimizer """adam""" +488 22 training_loop """owa""" +488 22 negative_sampler """basic""" +488 22 evaluator """rankbased""" +488 23 dataset """kinships""" +488 23 model """proje""" +488 23 loss """softplus""" +488 23 regularizer """no""" +488 23 optimizer """adam""" +488 23 training_loop """owa""" +488 23 negative_sampler """basic""" +488 23 evaluator """rankbased""" +488 24 dataset """kinships""" +488 24 model """proje""" +488 24 loss """softplus""" +488 24 regularizer """no""" +488 24 optimizer """adam""" +488 24 training_loop """owa""" +488 24 negative_sampler """basic""" +488 24 evaluator """rankbased""" +488 25 dataset """kinships""" +488 25 model """proje""" +488 25 loss """softplus""" +488 25 regularizer """no""" +488 25 optimizer """adam""" +488 25 training_loop """owa""" +488 25 negative_sampler """basic""" +488 25 evaluator """rankbased""" +488 26 dataset """kinships""" +488 26 model """proje""" +488 26 loss """softplus""" +488 26 regularizer """no""" +488 26 optimizer """adam""" +488 26 training_loop """owa""" +488 26 negative_sampler """basic""" +488 26 evaluator """rankbased""" +488 27 dataset """kinships""" +488 27 model """proje""" +488 27 loss """softplus""" +488 27 regularizer """no""" +488 27 optimizer """adam""" +488 27 training_loop """owa""" +488 27 negative_sampler """basic""" +488 27 evaluator """rankbased""" +488 28 dataset """kinships""" +488 28 model """proje""" +488 28 loss """softplus""" +488 28 regularizer """no""" +488 28 optimizer """adam""" +488 28 training_loop """owa""" +488 28 negative_sampler """basic""" +488 28 evaluator """rankbased""" +488 29 dataset """kinships""" +488 29 model """proje""" +488 29 loss """softplus""" +488 29 regularizer """no""" +488 29 optimizer """adam""" +488 29 training_loop """owa""" +488 29 negative_sampler """basic""" +488 29 evaluator """rankbased""" +488 30 dataset """kinships""" +488 30 model """proje""" +488 30 loss """softplus""" +488 30 regularizer """no""" +488 30 optimizer """adam""" +488 30 training_loop """owa""" +488 30 negative_sampler """basic""" +488 30 evaluator """rankbased""" +488 31 dataset """kinships""" +488 31 model """proje""" +488 31 loss """softplus""" +488 31 regularizer """no""" +488 31 optimizer """adam""" +488 31 training_loop """owa""" +488 31 negative_sampler """basic""" +488 31 evaluator """rankbased""" +488 32 dataset """kinships""" +488 32 model """proje""" +488 32 loss """softplus""" +488 32 regularizer """no""" +488 32 optimizer """adam""" +488 32 training_loop """owa""" +488 32 negative_sampler """basic""" +488 32 evaluator """rankbased""" +488 33 dataset """kinships""" +488 33 model """proje""" +488 33 loss """softplus""" +488 33 regularizer """no""" +488 33 optimizer """adam""" +488 33 training_loop """owa""" +488 33 negative_sampler """basic""" +488 33 evaluator """rankbased""" +488 34 dataset """kinships""" +488 34 model """proje""" +488 34 loss """softplus""" +488 34 regularizer """no""" +488 34 optimizer """adam""" +488 34 training_loop """owa""" +488 34 negative_sampler """basic""" +488 34 evaluator """rankbased""" +488 35 dataset """kinships""" +488 35 model """proje""" +488 35 loss """softplus""" +488 35 regularizer """no""" +488 35 optimizer """adam""" +488 35 training_loop """owa""" +488 35 negative_sampler """basic""" +488 35 evaluator """rankbased""" +488 36 dataset """kinships""" +488 36 model """proje""" +488 36 loss """softplus""" +488 36 regularizer """no""" +488 36 optimizer """adam""" +488 36 training_loop """owa""" +488 36 negative_sampler """basic""" +488 36 evaluator """rankbased""" +488 37 dataset """kinships""" +488 37 model """proje""" +488 37 loss """softplus""" +488 37 regularizer """no""" +488 37 optimizer """adam""" +488 37 training_loop """owa""" +488 37 negative_sampler """basic""" +488 37 evaluator """rankbased""" +488 38 dataset """kinships""" +488 38 model """proje""" +488 38 loss """softplus""" +488 38 regularizer """no""" +488 38 optimizer """adam""" +488 38 training_loop """owa""" +488 38 negative_sampler """basic""" +488 38 evaluator """rankbased""" +488 39 dataset """kinships""" +488 39 model """proje""" +488 39 loss """softplus""" +488 39 regularizer """no""" +488 39 optimizer """adam""" +488 39 training_loop """owa""" +488 39 negative_sampler """basic""" +488 39 evaluator """rankbased""" +488 40 dataset """kinships""" +488 40 model """proje""" +488 40 loss """softplus""" +488 40 regularizer """no""" +488 40 optimizer """adam""" +488 40 training_loop """owa""" +488 40 negative_sampler """basic""" +488 40 evaluator """rankbased""" +488 41 dataset """kinships""" +488 41 model """proje""" +488 41 loss """softplus""" +488 41 regularizer """no""" +488 41 optimizer """adam""" +488 41 training_loop """owa""" +488 41 negative_sampler """basic""" +488 41 evaluator """rankbased""" +488 42 dataset """kinships""" +488 42 model """proje""" +488 42 loss """softplus""" +488 42 regularizer """no""" +488 42 optimizer """adam""" +488 42 training_loop """owa""" +488 42 negative_sampler """basic""" +488 42 evaluator """rankbased""" +488 43 dataset """kinships""" +488 43 model """proje""" +488 43 loss """softplus""" +488 43 regularizer """no""" +488 43 optimizer """adam""" +488 43 training_loop """owa""" +488 43 negative_sampler """basic""" +488 43 evaluator """rankbased""" +488 44 dataset """kinships""" +488 44 model """proje""" +488 44 loss """softplus""" +488 44 regularizer """no""" +488 44 optimizer """adam""" +488 44 training_loop """owa""" +488 44 negative_sampler """basic""" +488 44 evaluator """rankbased""" +488 45 dataset """kinships""" +488 45 model """proje""" +488 45 loss """softplus""" +488 45 regularizer """no""" +488 45 optimizer """adam""" +488 45 training_loop """owa""" +488 45 negative_sampler """basic""" +488 45 evaluator """rankbased""" +488 46 dataset """kinships""" +488 46 model """proje""" +488 46 loss """softplus""" +488 46 regularizer """no""" +488 46 optimizer """adam""" +488 46 training_loop """owa""" +488 46 negative_sampler """basic""" +488 46 evaluator """rankbased""" +488 47 dataset """kinships""" +488 47 model """proje""" +488 47 loss """softplus""" +488 47 regularizer """no""" +488 47 optimizer """adam""" +488 47 training_loop """owa""" +488 47 negative_sampler """basic""" +488 47 evaluator """rankbased""" +488 48 dataset """kinships""" +488 48 model """proje""" +488 48 loss """softplus""" +488 48 regularizer """no""" +488 48 optimizer """adam""" +488 48 training_loop """owa""" +488 48 negative_sampler """basic""" +488 48 evaluator """rankbased""" +488 49 dataset """kinships""" +488 49 model """proje""" +488 49 loss """softplus""" +488 49 regularizer """no""" +488 49 optimizer """adam""" +488 49 training_loop """owa""" +488 49 negative_sampler """basic""" +488 49 evaluator """rankbased""" +488 50 dataset """kinships""" +488 50 model """proje""" +488 50 loss """softplus""" +488 50 regularizer """no""" +488 50 optimizer """adam""" +488 50 training_loop """owa""" +488 50 negative_sampler """basic""" +488 50 evaluator """rankbased""" +488 51 dataset """kinships""" +488 51 model """proje""" +488 51 loss """softplus""" +488 51 regularizer """no""" +488 51 optimizer """adam""" +488 51 training_loop """owa""" +488 51 negative_sampler """basic""" +488 51 evaluator """rankbased""" +488 52 dataset """kinships""" +488 52 model """proje""" +488 52 loss """softplus""" +488 52 regularizer """no""" +488 52 optimizer """adam""" +488 52 training_loop """owa""" +488 52 negative_sampler """basic""" +488 52 evaluator """rankbased""" +488 53 dataset """kinships""" +488 53 model """proje""" +488 53 loss """softplus""" +488 53 regularizer """no""" +488 53 optimizer """adam""" +488 53 training_loop """owa""" +488 53 negative_sampler """basic""" +488 53 evaluator """rankbased""" +488 54 dataset """kinships""" +488 54 model """proje""" +488 54 loss """softplus""" +488 54 regularizer """no""" +488 54 optimizer """adam""" +488 54 training_loop """owa""" +488 54 negative_sampler """basic""" +488 54 evaluator """rankbased""" +488 55 dataset """kinships""" +488 55 model """proje""" +488 55 loss """softplus""" +488 55 regularizer """no""" +488 55 optimizer """adam""" +488 55 training_loop """owa""" +488 55 negative_sampler """basic""" +488 55 evaluator """rankbased""" +488 56 dataset """kinships""" +488 56 model """proje""" +488 56 loss """softplus""" +488 56 regularizer """no""" +488 56 optimizer """adam""" +488 56 training_loop """owa""" +488 56 negative_sampler """basic""" +488 56 evaluator """rankbased""" +488 57 dataset """kinships""" +488 57 model """proje""" +488 57 loss """softplus""" +488 57 regularizer """no""" +488 57 optimizer """adam""" +488 57 training_loop """owa""" +488 57 negative_sampler """basic""" +488 57 evaluator """rankbased""" +488 58 dataset """kinships""" +488 58 model """proje""" +488 58 loss """softplus""" +488 58 regularizer """no""" +488 58 optimizer """adam""" +488 58 training_loop """owa""" +488 58 negative_sampler """basic""" +488 58 evaluator """rankbased""" +488 59 dataset """kinships""" +488 59 model """proje""" +488 59 loss """softplus""" +488 59 regularizer """no""" +488 59 optimizer """adam""" +488 59 training_loop """owa""" +488 59 negative_sampler """basic""" +488 59 evaluator """rankbased""" +488 60 dataset """kinships""" +488 60 model """proje""" +488 60 loss """softplus""" +488 60 regularizer """no""" +488 60 optimizer """adam""" +488 60 training_loop """owa""" +488 60 negative_sampler """basic""" +488 60 evaluator """rankbased""" +488 61 dataset """kinships""" +488 61 model """proje""" +488 61 loss """softplus""" +488 61 regularizer """no""" +488 61 optimizer """adam""" +488 61 training_loop """owa""" +488 61 negative_sampler """basic""" +488 61 evaluator """rankbased""" +488 62 dataset """kinships""" +488 62 model """proje""" +488 62 loss """softplus""" +488 62 regularizer """no""" +488 62 optimizer """adam""" +488 62 training_loop """owa""" +488 62 negative_sampler """basic""" +488 62 evaluator """rankbased""" +488 63 dataset """kinships""" +488 63 model """proje""" +488 63 loss """softplus""" +488 63 regularizer """no""" +488 63 optimizer """adam""" +488 63 training_loop """owa""" +488 63 negative_sampler """basic""" +488 63 evaluator """rankbased""" +488 64 dataset """kinships""" +488 64 model """proje""" +488 64 loss """softplus""" +488 64 regularizer """no""" +488 64 optimizer """adam""" +488 64 training_loop """owa""" +488 64 negative_sampler """basic""" +488 64 evaluator """rankbased""" +488 65 dataset """kinships""" +488 65 model """proje""" +488 65 loss """softplus""" +488 65 regularizer """no""" +488 65 optimizer """adam""" +488 65 training_loop """owa""" +488 65 negative_sampler """basic""" +488 65 evaluator """rankbased""" +488 66 dataset """kinships""" +488 66 model """proje""" +488 66 loss """softplus""" +488 66 regularizer """no""" +488 66 optimizer """adam""" +488 66 training_loop """owa""" +488 66 negative_sampler """basic""" +488 66 evaluator """rankbased""" +488 67 dataset """kinships""" +488 67 model """proje""" +488 67 loss """softplus""" +488 67 regularizer """no""" +488 67 optimizer """adam""" +488 67 training_loop """owa""" +488 67 negative_sampler """basic""" +488 67 evaluator """rankbased""" +488 68 dataset """kinships""" +488 68 model """proje""" +488 68 loss """softplus""" +488 68 regularizer """no""" +488 68 optimizer """adam""" +488 68 training_loop """owa""" +488 68 negative_sampler """basic""" +488 68 evaluator """rankbased""" +488 69 dataset """kinships""" +488 69 model """proje""" +488 69 loss """softplus""" +488 69 regularizer """no""" +488 69 optimizer """adam""" +488 69 training_loop """owa""" +488 69 negative_sampler """basic""" +488 69 evaluator """rankbased""" +488 70 dataset """kinships""" +488 70 model """proje""" +488 70 loss """softplus""" +488 70 regularizer """no""" +488 70 optimizer """adam""" +488 70 training_loop """owa""" +488 70 negative_sampler """basic""" +488 70 evaluator """rankbased""" +488 71 dataset """kinships""" +488 71 model """proje""" +488 71 loss """softplus""" +488 71 regularizer """no""" +488 71 optimizer """adam""" +488 71 training_loop """owa""" +488 71 negative_sampler """basic""" +488 71 evaluator """rankbased""" +488 72 dataset """kinships""" +488 72 model """proje""" +488 72 loss """softplus""" +488 72 regularizer """no""" +488 72 optimizer """adam""" +488 72 training_loop """owa""" +488 72 negative_sampler """basic""" +488 72 evaluator """rankbased""" +488 73 dataset """kinships""" +488 73 model """proje""" +488 73 loss """softplus""" +488 73 regularizer """no""" +488 73 optimizer """adam""" +488 73 training_loop """owa""" +488 73 negative_sampler """basic""" +488 73 evaluator """rankbased""" +488 74 dataset """kinships""" +488 74 model """proje""" +488 74 loss """softplus""" +488 74 regularizer """no""" +488 74 optimizer """adam""" +488 74 training_loop """owa""" +488 74 negative_sampler """basic""" +488 74 evaluator """rankbased""" +488 75 dataset """kinships""" +488 75 model """proje""" +488 75 loss """softplus""" +488 75 regularizer """no""" +488 75 optimizer """adam""" +488 75 training_loop """owa""" +488 75 negative_sampler """basic""" +488 75 evaluator """rankbased""" +488 76 dataset """kinships""" +488 76 model """proje""" +488 76 loss """softplus""" +488 76 regularizer """no""" +488 76 optimizer """adam""" +488 76 training_loop """owa""" +488 76 negative_sampler """basic""" +488 76 evaluator """rankbased""" +488 77 dataset """kinships""" +488 77 model """proje""" +488 77 loss """softplus""" +488 77 regularizer """no""" +488 77 optimizer """adam""" +488 77 training_loop """owa""" +488 77 negative_sampler """basic""" +488 77 evaluator """rankbased""" +488 78 dataset """kinships""" +488 78 model """proje""" +488 78 loss """softplus""" +488 78 regularizer """no""" +488 78 optimizer """adam""" +488 78 training_loop """owa""" +488 78 negative_sampler """basic""" +488 78 evaluator """rankbased""" +488 79 dataset """kinships""" +488 79 model """proje""" +488 79 loss """softplus""" +488 79 regularizer """no""" +488 79 optimizer """adam""" +488 79 training_loop """owa""" +488 79 negative_sampler """basic""" +488 79 evaluator """rankbased""" +488 80 dataset """kinships""" +488 80 model """proje""" +488 80 loss """softplus""" +488 80 regularizer """no""" +488 80 optimizer """adam""" +488 80 training_loop """owa""" +488 80 negative_sampler """basic""" +488 80 evaluator """rankbased""" +488 81 dataset """kinships""" +488 81 model """proje""" +488 81 loss """softplus""" +488 81 regularizer """no""" +488 81 optimizer """adam""" +488 81 training_loop """owa""" +488 81 negative_sampler """basic""" +488 81 evaluator """rankbased""" +488 82 dataset """kinships""" +488 82 model """proje""" +488 82 loss """softplus""" +488 82 regularizer """no""" +488 82 optimizer """adam""" +488 82 training_loop """owa""" +488 82 negative_sampler """basic""" +488 82 evaluator """rankbased""" +488 83 dataset """kinships""" +488 83 model """proje""" +488 83 loss """softplus""" +488 83 regularizer """no""" +488 83 optimizer """adam""" +488 83 training_loop """owa""" +488 83 negative_sampler """basic""" +488 83 evaluator """rankbased""" +488 84 dataset """kinships""" +488 84 model """proje""" +488 84 loss """softplus""" +488 84 regularizer """no""" +488 84 optimizer """adam""" +488 84 training_loop """owa""" +488 84 negative_sampler """basic""" +488 84 evaluator """rankbased""" +488 85 dataset """kinships""" +488 85 model """proje""" +488 85 loss """softplus""" +488 85 regularizer """no""" +488 85 optimizer """adam""" +488 85 training_loop """owa""" +488 85 negative_sampler """basic""" +488 85 evaluator """rankbased""" +488 86 dataset """kinships""" +488 86 model """proje""" +488 86 loss """softplus""" +488 86 regularizer """no""" +488 86 optimizer """adam""" +488 86 training_loop """owa""" +488 86 negative_sampler """basic""" +488 86 evaluator """rankbased""" +488 87 dataset """kinships""" +488 87 model """proje""" +488 87 loss """softplus""" +488 87 regularizer """no""" +488 87 optimizer """adam""" +488 87 training_loop """owa""" +488 87 negative_sampler """basic""" +488 87 evaluator """rankbased""" +488 88 dataset """kinships""" +488 88 model """proje""" +488 88 loss """softplus""" +488 88 regularizer """no""" +488 88 optimizer """adam""" +488 88 training_loop """owa""" +488 88 negative_sampler """basic""" +488 88 evaluator """rankbased""" +488 89 dataset """kinships""" +488 89 model """proje""" +488 89 loss """softplus""" +488 89 regularizer """no""" +488 89 optimizer """adam""" +488 89 training_loop """owa""" +488 89 negative_sampler """basic""" +488 89 evaluator """rankbased""" +488 90 dataset """kinships""" +488 90 model """proje""" +488 90 loss """softplus""" +488 90 regularizer """no""" +488 90 optimizer """adam""" +488 90 training_loop """owa""" +488 90 negative_sampler """basic""" +488 90 evaluator """rankbased""" +488 91 dataset """kinships""" +488 91 model """proje""" +488 91 loss """softplus""" +488 91 regularizer """no""" +488 91 optimizer """adam""" +488 91 training_loop """owa""" +488 91 negative_sampler """basic""" +488 91 evaluator """rankbased""" +488 92 dataset """kinships""" +488 92 model """proje""" +488 92 loss """softplus""" +488 92 regularizer """no""" +488 92 optimizer """adam""" +488 92 training_loop """owa""" +488 92 negative_sampler """basic""" +488 92 evaluator """rankbased""" +488 93 dataset """kinships""" +488 93 model """proje""" +488 93 loss """softplus""" +488 93 regularizer """no""" +488 93 optimizer """adam""" +488 93 training_loop """owa""" +488 93 negative_sampler """basic""" +488 93 evaluator """rankbased""" +488 94 dataset """kinships""" +488 94 model """proje""" +488 94 loss """softplus""" +488 94 regularizer """no""" +488 94 optimizer """adam""" +488 94 training_loop """owa""" +488 94 negative_sampler """basic""" +488 94 evaluator """rankbased""" +488 95 dataset """kinships""" +488 95 model """proje""" +488 95 loss """softplus""" +488 95 regularizer """no""" +488 95 optimizer """adam""" +488 95 training_loop """owa""" +488 95 negative_sampler """basic""" +488 95 evaluator """rankbased""" +488 96 dataset """kinships""" +488 96 model """proje""" +488 96 loss """softplus""" +488 96 regularizer """no""" +488 96 optimizer """adam""" +488 96 training_loop """owa""" +488 96 negative_sampler """basic""" +488 96 evaluator """rankbased""" +488 97 dataset """kinships""" +488 97 model """proje""" +488 97 loss """softplus""" +488 97 regularizer """no""" +488 97 optimizer """adam""" +488 97 training_loop """owa""" +488 97 negative_sampler """basic""" +488 97 evaluator """rankbased""" +488 98 dataset """kinships""" +488 98 model """proje""" +488 98 loss """softplus""" +488 98 regularizer """no""" +488 98 optimizer """adam""" +488 98 training_loop """owa""" +488 98 negative_sampler """basic""" +488 98 evaluator """rankbased""" +488 99 dataset """kinships""" +488 99 model """proje""" +488 99 loss """softplus""" +488 99 regularizer """no""" +488 99 optimizer """adam""" +488 99 training_loop """owa""" +488 99 negative_sampler """basic""" +488 99 evaluator """rankbased""" +488 100 dataset """kinships""" +488 100 model """proje""" +488 100 loss """softplus""" +488 100 regularizer """no""" +488 100 optimizer """adam""" +488 100 training_loop """owa""" +488 100 negative_sampler """basic""" +488 100 evaluator """rankbased""" +489 1 model.embedding_dim 0.0 +489 1 loss.margin 4.91313526956724 +489 1 optimizer.lr 0.00438265523748698 +489 1 negative_sampler.num_negs_per_pos 70.0 +489 1 training.batch_size 2.0 +489 2 model.embedding_dim 0.0 +489 2 loss.margin 2.0370839644312984 +489 2 optimizer.lr 0.020661574927918664 +489 2 negative_sampler.num_negs_per_pos 17.0 +489 2 training.batch_size 0.0 +489 3 model.embedding_dim 0.0 +489 3 loss.margin 5.2149393700000655 +489 3 optimizer.lr 0.013323104542907074 +489 3 negative_sampler.num_negs_per_pos 12.0 +489 3 training.batch_size 1.0 +489 4 model.embedding_dim 2.0 +489 4 loss.margin 2.838339756223855 +489 4 optimizer.lr 0.05201495354219387 +489 4 negative_sampler.num_negs_per_pos 98.0 +489 4 training.batch_size 2.0 +489 5 model.embedding_dim 2.0 +489 5 loss.margin 5.290457521232372 +489 5 optimizer.lr 0.009157653594654123 +489 5 negative_sampler.num_negs_per_pos 98.0 +489 5 training.batch_size 2.0 +489 6 model.embedding_dim 0.0 +489 6 loss.margin 8.361793640504512 +489 6 optimizer.lr 0.0017729378408896826 +489 6 negative_sampler.num_negs_per_pos 40.0 +489 6 training.batch_size 1.0 +489 7 model.embedding_dim 0.0 +489 7 loss.margin 2.5089009170314274 +489 7 optimizer.lr 0.002972602921509552 +489 7 negative_sampler.num_negs_per_pos 90.0 +489 7 training.batch_size 0.0 +489 8 model.embedding_dim 0.0 +489 8 loss.margin 3.551191497843114 +489 8 optimizer.lr 0.009958695856959231 +489 8 negative_sampler.num_negs_per_pos 86.0 +489 8 training.batch_size 1.0 +489 9 model.embedding_dim 2.0 +489 9 loss.margin 9.235040198692841 +489 9 optimizer.lr 0.001631047497403297 +489 9 negative_sampler.num_negs_per_pos 86.0 +489 9 training.batch_size 2.0 +489 10 model.embedding_dim 0.0 +489 10 loss.margin 7.953507266867492 +489 10 optimizer.lr 0.0013835983078100181 +489 10 negative_sampler.num_negs_per_pos 59.0 +489 10 training.batch_size 0.0 +489 11 model.embedding_dim 2.0 +489 11 loss.margin 5.7189246215922 +489 11 optimizer.lr 0.0010853854746522755 +489 11 negative_sampler.num_negs_per_pos 88.0 +489 11 training.batch_size 0.0 +489 12 model.embedding_dim 2.0 +489 12 loss.margin 6.102241772014564 +489 12 optimizer.lr 0.06069624401348741 +489 12 negative_sampler.num_negs_per_pos 3.0 +489 12 training.batch_size 1.0 +489 13 model.embedding_dim 2.0 +489 13 loss.margin 1.0054905452604908 +489 13 optimizer.lr 0.00865311118775744 +489 13 negative_sampler.num_negs_per_pos 57.0 +489 13 training.batch_size 2.0 +489 14 model.embedding_dim 0.0 +489 14 loss.margin 3.9491953300162734 +489 14 optimizer.lr 0.005437357761135734 +489 14 negative_sampler.num_negs_per_pos 44.0 +489 14 training.batch_size 1.0 +489 15 model.embedding_dim 2.0 +489 15 loss.margin 7.117514369839029 +489 15 optimizer.lr 0.034681796283230995 +489 15 negative_sampler.num_negs_per_pos 57.0 +489 15 training.batch_size 0.0 +489 16 model.embedding_dim 2.0 +489 16 loss.margin 4.229324729757321 +489 16 optimizer.lr 0.027315935580834814 +489 16 negative_sampler.num_negs_per_pos 82.0 +489 16 training.batch_size 0.0 +489 17 model.embedding_dim 1.0 +489 17 loss.margin 7.096255026560005 +489 17 optimizer.lr 0.008389134893175672 +489 17 negative_sampler.num_negs_per_pos 50.0 +489 17 training.batch_size 2.0 +489 18 model.embedding_dim 0.0 +489 18 loss.margin 3.645879717270865 +489 18 optimizer.lr 0.012513125695152245 +489 18 negative_sampler.num_negs_per_pos 79.0 +489 18 training.batch_size 1.0 +489 19 model.embedding_dim 2.0 +489 19 loss.margin 4.593336706503454 +489 19 optimizer.lr 0.0027439730750833053 +489 19 negative_sampler.num_negs_per_pos 77.0 +489 19 training.batch_size 0.0 +489 20 model.embedding_dim 2.0 +489 20 loss.margin 3.1864334442569513 +489 20 optimizer.lr 0.010154301357466053 +489 20 negative_sampler.num_negs_per_pos 57.0 +489 20 training.batch_size 1.0 +489 21 model.embedding_dim 1.0 +489 21 loss.margin 7.000416853769204 +489 21 optimizer.lr 0.0032977466613648017 +489 21 negative_sampler.num_negs_per_pos 26.0 +489 21 training.batch_size 0.0 +489 22 model.embedding_dim 2.0 +489 22 loss.margin 5.211468935496538 +489 22 optimizer.lr 0.008099623162978763 +489 22 negative_sampler.num_negs_per_pos 91.0 +489 22 training.batch_size 2.0 +489 23 model.embedding_dim 0.0 +489 23 loss.margin 3.0256667219178 +489 23 optimizer.lr 0.012489319922493901 +489 23 negative_sampler.num_negs_per_pos 18.0 +489 23 training.batch_size 0.0 +489 24 model.embedding_dim 1.0 +489 24 loss.margin 4.884954734621183 +489 24 optimizer.lr 0.027461107125746297 +489 24 negative_sampler.num_negs_per_pos 97.0 +489 24 training.batch_size 2.0 +489 25 model.embedding_dim 0.0 +489 25 loss.margin 8.008768049320054 +489 25 optimizer.lr 0.012682293539409768 +489 25 negative_sampler.num_negs_per_pos 96.0 +489 25 training.batch_size 1.0 +489 26 model.embedding_dim 1.0 +489 26 loss.margin 8.73211868230091 +489 26 optimizer.lr 0.03206090082813316 +489 26 negative_sampler.num_negs_per_pos 65.0 +489 26 training.batch_size 1.0 +489 27 model.embedding_dim 1.0 +489 27 loss.margin 0.6712983367090277 +489 27 optimizer.lr 0.006757697446739759 +489 27 negative_sampler.num_negs_per_pos 59.0 +489 27 training.batch_size 2.0 +489 28 model.embedding_dim 0.0 +489 28 loss.margin 1.9360771172097404 +489 28 optimizer.lr 0.04470071134530927 +489 28 negative_sampler.num_negs_per_pos 5.0 +489 28 training.batch_size 1.0 +489 29 model.embedding_dim 1.0 +489 29 loss.margin 8.126284900081878 +489 29 optimizer.lr 0.01746825362593319 +489 29 negative_sampler.num_negs_per_pos 75.0 +489 29 training.batch_size 0.0 +489 30 model.embedding_dim 1.0 +489 30 loss.margin 7.169656818655033 +489 30 optimizer.lr 0.08032114878077824 +489 30 negative_sampler.num_negs_per_pos 91.0 +489 30 training.batch_size 1.0 +489 31 model.embedding_dim 0.0 +489 31 loss.margin 0.8470496353203456 +489 31 optimizer.lr 0.001629469209748612 +489 31 negative_sampler.num_negs_per_pos 47.0 +489 31 training.batch_size 1.0 +489 32 model.embedding_dim 1.0 +489 32 loss.margin 6.759014382511174 +489 32 optimizer.lr 0.0036515168683249506 +489 32 negative_sampler.num_negs_per_pos 90.0 +489 32 training.batch_size 1.0 +489 33 model.embedding_dim 2.0 +489 33 loss.margin 9.752097944167442 +489 33 optimizer.lr 0.01743398947907719 +489 33 negative_sampler.num_negs_per_pos 10.0 +489 33 training.batch_size 1.0 +489 34 model.embedding_dim 1.0 +489 34 loss.margin 8.904305828224775 +489 34 optimizer.lr 0.010694023498771547 +489 34 negative_sampler.num_negs_per_pos 21.0 +489 34 training.batch_size 0.0 +489 35 model.embedding_dim 2.0 +489 35 loss.margin 9.26003642026426 +489 35 optimizer.lr 0.009396733509103071 +489 35 negative_sampler.num_negs_per_pos 93.0 +489 35 training.batch_size 2.0 +489 36 model.embedding_dim 2.0 +489 36 loss.margin 1.047516244057079 +489 36 optimizer.lr 0.013059403958877178 +489 36 negative_sampler.num_negs_per_pos 61.0 +489 36 training.batch_size 1.0 +489 37 model.embedding_dim 1.0 +489 37 loss.margin 3.400563847826813 +489 37 optimizer.lr 0.01182096318564932 +489 37 negative_sampler.num_negs_per_pos 86.0 +489 37 training.batch_size 1.0 +489 38 model.embedding_dim 2.0 +489 38 loss.margin 2.617564689060582 +489 38 optimizer.lr 0.09828458105078036 +489 38 negative_sampler.num_negs_per_pos 55.0 +489 38 training.batch_size 0.0 +489 39 model.embedding_dim 0.0 +489 39 loss.margin 4.037066261761263 +489 39 optimizer.lr 0.011778975585737015 +489 39 negative_sampler.num_negs_per_pos 34.0 +489 39 training.batch_size 0.0 +489 40 model.embedding_dim 0.0 +489 40 loss.margin 3.0345577988099004 +489 40 optimizer.lr 0.029787087431693123 +489 40 negative_sampler.num_negs_per_pos 4.0 +489 40 training.batch_size 2.0 +489 41 model.embedding_dim 1.0 +489 41 loss.margin 5.948340943528878 +489 41 optimizer.lr 0.0020919867927798592 +489 41 negative_sampler.num_negs_per_pos 31.0 +489 41 training.batch_size 0.0 +489 42 model.embedding_dim 1.0 +489 42 loss.margin 7.897521676946324 +489 42 optimizer.lr 0.0015630758612828052 +489 42 negative_sampler.num_negs_per_pos 81.0 +489 42 training.batch_size 2.0 +489 43 model.embedding_dim 1.0 +489 43 loss.margin 9.329766914482034 +489 43 optimizer.lr 0.0016493541165142375 +489 43 negative_sampler.num_negs_per_pos 31.0 +489 43 training.batch_size 0.0 +489 44 model.embedding_dim 1.0 +489 44 loss.margin 5.338681998932722 +489 44 optimizer.lr 0.02111364564396732 +489 44 negative_sampler.num_negs_per_pos 62.0 +489 44 training.batch_size 2.0 +489 45 model.embedding_dim 2.0 +489 45 loss.margin 7.771051603478596 +489 45 optimizer.lr 0.03086297218645552 +489 45 negative_sampler.num_negs_per_pos 71.0 +489 45 training.batch_size 0.0 +489 46 model.embedding_dim 1.0 +489 46 loss.margin 1.3917110328588898 +489 46 optimizer.lr 0.025483109347516023 +489 46 negative_sampler.num_negs_per_pos 96.0 +489 46 training.batch_size 1.0 +489 47 model.embedding_dim 2.0 +489 47 loss.margin 9.502936823099958 +489 47 optimizer.lr 0.07579469754210309 +489 47 negative_sampler.num_negs_per_pos 55.0 +489 47 training.batch_size 2.0 +489 48 model.embedding_dim 2.0 +489 48 loss.margin 2.8197564706520852 +489 48 optimizer.lr 0.011784223715380033 +489 48 negative_sampler.num_negs_per_pos 39.0 +489 48 training.batch_size 0.0 +489 49 model.embedding_dim 0.0 +489 49 loss.margin 1.5389520522160016 +489 49 optimizer.lr 0.0011370606532471637 +489 49 negative_sampler.num_negs_per_pos 69.0 +489 49 training.batch_size 0.0 +489 50 model.embedding_dim 0.0 +489 50 loss.margin 3.89391828901734 +489 50 optimizer.lr 0.05839553151670584 +489 50 negative_sampler.num_negs_per_pos 11.0 +489 50 training.batch_size 2.0 +489 51 model.embedding_dim 1.0 +489 51 loss.margin 8.462045875512068 +489 51 optimizer.lr 0.003022898553628586 +489 51 negative_sampler.num_negs_per_pos 27.0 +489 51 training.batch_size 0.0 +489 52 model.embedding_dim 2.0 +489 52 loss.margin 7.851639311235297 +489 52 optimizer.lr 0.025416756524515714 +489 52 negative_sampler.num_negs_per_pos 48.0 +489 52 training.batch_size 1.0 +489 53 model.embedding_dim 0.0 +489 53 loss.margin 4.359297775254316 +489 53 optimizer.lr 0.00609780805808486 +489 53 negative_sampler.num_negs_per_pos 73.0 +489 53 training.batch_size 0.0 +489 54 model.embedding_dim 0.0 +489 54 loss.margin 9.51669134752646 +489 54 optimizer.lr 0.008711996425696571 +489 54 negative_sampler.num_negs_per_pos 95.0 +489 54 training.batch_size 2.0 +489 55 model.embedding_dim 0.0 +489 55 loss.margin 8.877396641990902 +489 55 optimizer.lr 0.04639425726183085 +489 55 negative_sampler.num_negs_per_pos 46.0 +489 55 training.batch_size 2.0 +489 56 model.embedding_dim 2.0 +489 56 loss.margin 2.9748426681682685 +489 56 optimizer.lr 0.015641448212073365 +489 56 negative_sampler.num_negs_per_pos 1.0 +489 56 training.batch_size 0.0 +489 57 model.embedding_dim 0.0 +489 57 loss.margin 9.122903355060501 +489 57 optimizer.lr 0.01025647949198321 +489 57 negative_sampler.num_negs_per_pos 87.0 +489 57 training.batch_size 0.0 +489 58 model.embedding_dim 0.0 +489 58 loss.margin 2.418436401439917 +489 58 optimizer.lr 0.0768406675023677 +489 58 negative_sampler.num_negs_per_pos 75.0 +489 58 training.batch_size 0.0 +489 59 model.embedding_dim 1.0 +489 59 loss.margin 1.9097110604697718 +489 59 optimizer.lr 0.08409273368702025 +489 59 negative_sampler.num_negs_per_pos 16.0 +489 59 training.batch_size 0.0 +489 60 model.embedding_dim 0.0 +489 60 loss.margin 9.419978432834196 +489 60 optimizer.lr 0.009387162596384387 +489 60 negative_sampler.num_negs_per_pos 59.0 +489 60 training.batch_size 0.0 +489 61 model.embedding_dim 0.0 +489 61 loss.margin 1.2095435122744327 +489 61 optimizer.lr 0.00243191497066295 +489 61 negative_sampler.num_negs_per_pos 73.0 +489 61 training.batch_size 0.0 +489 62 model.embedding_dim 0.0 +489 62 loss.margin 7.587938231590132 +489 62 optimizer.lr 0.02296241220887966 +489 62 negative_sampler.num_negs_per_pos 84.0 +489 62 training.batch_size 1.0 +489 63 model.embedding_dim 0.0 +489 63 loss.margin 8.67588881358909 +489 63 optimizer.lr 0.02704382007232615 +489 63 negative_sampler.num_negs_per_pos 33.0 +489 63 training.batch_size 0.0 +489 64 model.embedding_dim 0.0 +489 64 loss.margin 9.43102729166519 +489 64 optimizer.lr 0.05194885074093257 +489 64 negative_sampler.num_negs_per_pos 11.0 +489 64 training.batch_size 1.0 +489 65 model.embedding_dim 2.0 +489 65 loss.margin 4.145778310018176 +489 65 optimizer.lr 0.01737919487318171 +489 65 negative_sampler.num_negs_per_pos 81.0 +489 65 training.batch_size 0.0 +489 66 model.embedding_dim 2.0 +489 66 loss.margin 1.6410592240139417 +489 66 optimizer.lr 0.0011031112213948458 +489 66 negative_sampler.num_negs_per_pos 23.0 +489 66 training.batch_size 1.0 +489 67 model.embedding_dim 0.0 +489 67 loss.margin 3.473263079840707 +489 67 optimizer.lr 0.020765993323892888 +489 67 negative_sampler.num_negs_per_pos 9.0 +489 67 training.batch_size 1.0 +489 68 model.embedding_dim 1.0 +489 68 loss.margin 2.3455939156063756 +489 68 optimizer.lr 0.006519124208277292 +489 68 negative_sampler.num_negs_per_pos 67.0 +489 68 training.batch_size 0.0 +489 69 model.embedding_dim 2.0 +489 69 loss.margin 5.715204469150451 +489 69 optimizer.lr 0.041091319758539566 +489 69 negative_sampler.num_negs_per_pos 13.0 +489 69 training.batch_size 1.0 +489 70 model.embedding_dim 1.0 +489 70 loss.margin 4.819086092976567 +489 70 optimizer.lr 0.04351694523320121 +489 70 negative_sampler.num_negs_per_pos 97.0 +489 70 training.batch_size 2.0 +489 71 model.embedding_dim 2.0 +489 71 loss.margin 1.7624908193483135 +489 71 optimizer.lr 0.02401703523770256 +489 71 negative_sampler.num_negs_per_pos 41.0 +489 71 training.batch_size 0.0 +489 72 model.embedding_dim 1.0 +489 72 loss.margin 7.664284437916027 +489 72 optimizer.lr 0.027814418259807745 +489 72 negative_sampler.num_negs_per_pos 58.0 +489 72 training.batch_size 0.0 +489 73 model.embedding_dim 0.0 +489 73 loss.margin 6.186747999046654 +489 73 optimizer.lr 0.012533462165212034 +489 73 negative_sampler.num_negs_per_pos 6.0 +489 73 training.batch_size 1.0 +489 74 model.embedding_dim 0.0 +489 74 loss.margin 3.4658584915507156 +489 74 optimizer.lr 0.0018272724725476239 +489 74 negative_sampler.num_negs_per_pos 58.0 +489 74 training.batch_size 2.0 +489 75 model.embedding_dim 0.0 +489 75 loss.margin 5.350371837615311 +489 75 optimizer.lr 0.04667288624339251 +489 75 negative_sampler.num_negs_per_pos 88.0 +489 75 training.batch_size 1.0 +489 76 model.embedding_dim 0.0 +489 76 loss.margin 4.189802787319947 +489 76 optimizer.lr 0.013304695985479252 +489 76 negative_sampler.num_negs_per_pos 54.0 +489 76 training.batch_size 2.0 +489 77 model.embedding_dim 1.0 +489 77 loss.margin 8.01660567424184 +489 77 optimizer.lr 0.0014043436493476065 +489 77 negative_sampler.num_negs_per_pos 73.0 +489 77 training.batch_size 2.0 +489 78 model.embedding_dim 1.0 +489 78 loss.margin 4.195961030982875 +489 78 optimizer.lr 0.0010668178560180024 +489 78 negative_sampler.num_negs_per_pos 71.0 +489 78 training.batch_size 0.0 +489 79 model.embedding_dim 0.0 +489 79 loss.margin 7.442378500071557 +489 79 optimizer.lr 0.04930875517507836 +489 79 negative_sampler.num_negs_per_pos 2.0 +489 79 training.batch_size 2.0 +489 80 model.embedding_dim 0.0 +489 80 loss.margin 5.82499652738603 +489 80 optimizer.lr 0.014035034709319756 +489 80 negative_sampler.num_negs_per_pos 38.0 +489 80 training.batch_size 1.0 +489 81 model.embedding_dim 0.0 +489 81 loss.margin 5.735682977939378 +489 81 optimizer.lr 0.003592209367907558 +489 81 negative_sampler.num_negs_per_pos 29.0 +489 81 training.batch_size 1.0 +489 82 model.embedding_dim 1.0 +489 82 loss.margin 8.281860709614206 +489 82 optimizer.lr 0.058801070635674565 +489 82 negative_sampler.num_negs_per_pos 76.0 +489 82 training.batch_size 1.0 +489 83 model.embedding_dim 2.0 +489 83 loss.margin 2.509338986768551 +489 83 optimizer.lr 0.0024719460428727718 +489 83 negative_sampler.num_negs_per_pos 4.0 +489 83 training.batch_size 1.0 +489 84 model.embedding_dim 2.0 +489 84 loss.margin 3.3287059096266294 +489 84 optimizer.lr 0.01219830969971431 +489 84 negative_sampler.num_negs_per_pos 89.0 +489 84 training.batch_size 2.0 +489 85 model.embedding_dim 0.0 +489 85 loss.margin 8.184140932344246 +489 85 optimizer.lr 0.024014066178602734 +489 85 negative_sampler.num_negs_per_pos 9.0 +489 85 training.batch_size 1.0 +489 86 model.embedding_dim 0.0 +489 86 loss.margin 6.050377092135344 +489 86 optimizer.lr 0.03823493439044802 +489 86 negative_sampler.num_negs_per_pos 5.0 +489 86 training.batch_size 1.0 +489 87 model.embedding_dim 0.0 +489 87 loss.margin 5.6636713673224826 +489 87 optimizer.lr 0.002717751151372448 +489 87 negative_sampler.num_negs_per_pos 27.0 +489 87 training.batch_size 2.0 +489 88 model.embedding_dim 0.0 +489 88 loss.margin 4.548023021394903 +489 88 optimizer.lr 0.03366303470428364 +489 88 negative_sampler.num_negs_per_pos 4.0 +489 88 training.batch_size 1.0 +489 89 model.embedding_dim 2.0 +489 89 loss.margin 7.249142477323828 +489 89 optimizer.lr 0.001216164386262052 +489 89 negative_sampler.num_negs_per_pos 16.0 +489 89 training.batch_size 2.0 +489 90 model.embedding_dim 2.0 +489 90 loss.margin 3.1878815346248857 +489 90 optimizer.lr 0.003044709196144663 +489 90 negative_sampler.num_negs_per_pos 65.0 +489 90 training.batch_size 1.0 +489 91 model.embedding_dim 0.0 +489 91 loss.margin 2.1968797518900445 +489 91 optimizer.lr 0.02380373722157545 +489 91 negative_sampler.num_negs_per_pos 96.0 +489 91 training.batch_size 2.0 +489 92 model.embedding_dim 1.0 +489 92 loss.margin 5.5359515726144 +489 92 optimizer.lr 0.0029081927644285556 +489 92 negative_sampler.num_negs_per_pos 7.0 +489 92 training.batch_size 2.0 +489 93 model.embedding_dim 0.0 +489 93 loss.margin 2.034987986744647 +489 93 optimizer.lr 0.0902301598521909 +489 93 negative_sampler.num_negs_per_pos 9.0 +489 93 training.batch_size 1.0 +489 94 model.embedding_dim 2.0 +489 94 loss.margin 8.927920220166184 +489 94 optimizer.lr 0.021106866416219984 +489 94 negative_sampler.num_negs_per_pos 80.0 +489 94 training.batch_size 2.0 +489 95 model.embedding_dim 2.0 +489 95 loss.margin 2.3655331786907263 +489 95 optimizer.lr 0.001444568866126777 +489 95 negative_sampler.num_negs_per_pos 6.0 +489 95 training.batch_size 1.0 +489 96 model.embedding_dim 2.0 +489 96 loss.margin 3.2083713930774156 +489 96 optimizer.lr 0.02836774451240256 +489 96 negative_sampler.num_negs_per_pos 88.0 +489 96 training.batch_size 2.0 +489 97 model.embedding_dim 0.0 +489 97 loss.margin 4.570466265162181 +489 97 optimizer.lr 0.001393525320174657 +489 97 negative_sampler.num_negs_per_pos 30.0 +489 97 training.batch_size 0.0 +489 98 model.embedding_dim 2.0 +489 98 loss.margin 7.9291645104188495 +489 98 optimizer.lr 0.09677805899210527 +489 98 negative_sampler.num_negs_per_pos 41.0 +489 98 training.batch_size 0.0 +489 99 model.embedding_dim 1.0 +489 99 loss.margin 0.8386034437163108 +489 99 optimizer.lr 0.07616164967668011 +489 99 negative_sampler.num_negs_per_pos 4.0 +489 99 training.batch_size 2.0 +489 100 model.embedding_dim 1.0 +489 100 loss.margin 7.9887021570839325 +489 100 optimizer.lr 0.007626992536778294 +489 100 negative_sampler.num_negs_per_pos 56.0 +489 100 training.batch_size 2.0 +489 1 dataset """kinships""" +489 1 model """proje""" +489 1 loss """marginranking""" +489 1 regularizer """no""" +489 1 optimizer """adam""" +489 1 training_loop """owa""" +489 1 negative_sampler """basic""" +489 1 evaluator """rankbased""" +489 2 dataset """kinships""" +489 2 model """proje""" +489 2 loss """marginranking""" +489 2 regularizer """no""" +489 2 optimizer """adam""" +489 2 training_loop """owa""" +489 2 negative_sampler """basic""" +489 2 evaluator """rankbased""" +489 3 dataset """kinships""" +489 3 model """proje""" +489 3 loss """marginranking""" +489 3 regularizer """no""" +489 3 optimizer """adam""" +489 3 training_loop """owa""" +489 3 negative_sampler """basic""" +489 3 evaluator """rankbased""" +489 4 dataset """kinships""" +489 4 model """proje""" +489 4 loss """marginranking""" +489 4 regularizer """no""" +489 4 optimizer """adam""" +489 4 training_loop """owa""" +489 4 negative_sampler """basic""" +489 4 evaluator """rankbased""" +489 5 dataset """kinships""" +489 5 model """proje""" +489 5 loss """marginranking""" +489 5 regularizer """no""" +489 5 optimizer """adam""" +489 5 training_loop """owa""" +489 5 negative_sampler """basic""" +489 5 evaluator """rankbased""" +489 6 dataset """kinships""" +489 6 model """proje""" +489 6 loss """marginranking""" +489 6 regularizer """no""" +489 6 optimizer """adam""" +489 6 training_loop """owa""" +489 6 negative_sampler """basic""" +489 6 evaluator """rankbased""" +489 7 dataset """kinships""" +489 7 model """proje""" +489 7 loss """marginranking""" +489 7 regularizer """no""" +489 7 optimizer """adam""" +489 7 training_loop """owa""" +489 7 negative_sampler """basic""" +489 7 evaluator """rankbased""" +489 8 dataset """kinships""" +489 8 model """proje""" +489 8 loss """marginranking""" +489 8 regularizer """no""" +489 8 optimizer """adam""" +489 8 training_loop """owa""" +489 8 negative_sampler """basic""" +489 8 evaluator """rankbased""" +489 9 dataset """kinships""" +489 9 model """proje""" +489 9 loss """marginranking""" +489 9 regularizer """no""" +489 9 optimizer """adam""" +489 9 training_loop """owa""" +489 9 negative_sampler """basic""" +489 9 evaluator """rankbased""" +489 10 dataset """kinships""" +489 10 model """proje""" +489 10 loss """marginranking""" +489 10 regularizer """no""" +489 10 optimizer """adam""" +489 10 training_loop """owa""" +489 10 negative_sampler """basic""" +489 10 evaluator """rankbased""" +489 11 dataset """kinships""" +489 11 model """proje""" +489 11 loss """marginranking""" +489 11 regularizer """no""" +489 11 optimizer """adam""" +489 11 training_loop """owa""" +489 11 negative_sampler """basic""" +489 11 evaluator """rankbased""" +489 12 dataset """kinships""" +489 12 model """proje""" +489 12 loss """marginranking""" +489 12 regularizer """no""" +489 12 optimizer """adam""" +489 12 training_loop """owa""" +489 12 negative_sampler """basic""" +489 12 evaluator """rankbased""" +489 13 dataset """kinships""" +489 13 model """proje""" +489 13 loss """marginranking""" +489 13 regularizer """no""" +489 13 optimizer """adam""" +489 13 training_loop """owa""" +489 13 negative_sampler """basic""" +489 13 evaluator """rankbased""" +489 14 dataset """kinships""" +489 14 model """proje""" +489 14 loss """marginranking""" +489 14 regularizer """no""" +489 14 optimizer """adam""" +489 14 training_loop """owa""" +489 14 negative_sampler """basic""" +489 14 evaluator """rankbased""" +489 15 dataset """kinships""" +489 15 model """proje""" +489 15 loss """marginranking""" +489 15 regularizer """no""" +489 15 optimizer """adam""" +489 15 training_loop """owa""" +489 15 negative_sampler """basic""" +489 15 evaluator """rankbased""" +489 16 dataset """kinships""" +489 16 model """proje""" +489 16 loss """marginranking""" +489 16 regularizer """no""" +489 16 optimizer """adam""" +489 16 training_loop """owa""" +489 16 negative_sampler """basic""" +489 16 evaluator """rankbased""" +489 17 dataset """kinships""" +489 17 model """proje""" +489 17 loss """marginranking""" +489 17 regularizer """no""" +489 17 optimizer """adam""" +489 17 training_loop """owa""" +489 17 negative_sampler """basic""" +489 17 evaluator """rankbased""" +489 18 dataset """kinships""" +489 18 model """proje""" +489 18 loss """marginranking""" +489 18 regularizer """no""" +489 18 optimizer """adam""" +489 18 training_loop """owa""" +489 18 negative_sampler """basic""" +489 18 evaluator """rankbased""" +489 19 dataset """kinships""" +489 19 model """proje""" +489 19 loss """marginranking""" +489 19 regularizer """no""" +489 19 optimizer """adam""" +489 19 training_loop """owa""" +489 19 negative_sampler """basic""" +489 19 evaluator """rankbased""" +489 20 dataset """kinships""" +489 20 model """proje""" +489 20 loss """marginranking""" +489 20 regularizer """no""" +489 20 optimizer """adam""" +489 20 training_loop """owa""" +489 20 negative_sampler """basic""" +489 20 evaluator """rankbased""" +489 21 dataset """kinships""" +489 21 model """proje""" +489 21 loss """marginranking""" +489 21 regularizer """no""" +489 21 optimizer """adam""" +489 21 training_loop """owa""" +489 21 negative_sampler """basic""" +489 21 evaluator """rankbased""" +489 22 dataset """kinships""" +489 22 model """proje""" +489 22 loss """marginranking""" +489 22 regularizer """no""" +489 22 optimizer """adam""" +489 22 training_loop """owa""" +489 22 negative_sampler """basic""" +489 22 evaluator """rankbased""" +489 23 dataset """kinships""" +489 23 model """proje""" +489 23 loss """marginranking""" +489 23 regularizer """no""" +489 23 optimizer """adam""" +489 23 training_loop """owa""" +489 23 negative_sampler """basic""" +489 23 evaluator """rankbased""" +489 24 dataset """kinships""" +489 24 model """proje""" +489 24 loss """marginranking""" +489 24 regularizer """no""" +489 24 optimizer """adam""" +489 24 training_loop """owa""" +489 24 negative_sampler """basic""" +489 24 evaluator """rankbased""" +489 25 dataset """kinships""" +489 25 model """proje""" +489 25 loss """marginranking""" +489 25 regularizer """no""" +489 25 optimizer """adam""" +489 25 training_loop """owa""" +489 25 negative_sampler """basic""" +489 25 evaluator """rankbased""" +489 26 dataset """kinships""" +489 26 model """proje""" +489 26 loss """marginranking""" +489 26 regularizer """no""" +489 26 optimizer """adam""" +489 26 training_loop """owa""" +489 26 negative_sampler """basic""" +489 26 evaluator """rankbased""" +489 27 dataset """kinships""" +489 27 model """proje""" +489 27 loss """marginranking""" +489 27 regularizer """no""" +489 27 optimizer """adam""" +489 27 training_loop """owa""" +489 27 negative_sampler """basic""" +489 27 evaluator """rankbased""" +489 28 dataset """kinships""" +489 28 model """proje""" +489 28 loss """marginranking""" +489 28 regularizer """no""" +489 28 optimizer """adam""" +489 28 training_loop """owa""" +489 28 negative_sampler """basic""" +489 28 evaluator """rankbased""" +489 29 dataset """kinships""" +489 29 model """proje""" +489 29 loss """marginranking""" +489 29 regularizer """no""" +489 29 optimizer """adam""" +489 29 training_loop """owa""" +489 29 negative_sampler """basic""" +489 29 evaluator """rankbased""" +489 30 dataset """kinships""" +489 30 model """proje""" +489 30 loss """marginranking""" +489 30 regularizer """no""" +489 30 optimizer """adam""" +489 30 training_loop """owa""" +489 30 negative_sampler """basic""" +489 30 evaluator """rankbased""" +489 31 dataset """kinships""" +489 31 model """proje""" +489 31 loss """marginranking""" +489 31 regularizer """no""" +489 31 optimizer """adam""" +489 31 training_loop """owa""" +489 31 negative_sampler """basic""" +489 31 evaluator """rankbased""" +489 32 dataset """kinships""" +489 32 model """proje""" +489 32 loss """marginranking""" +489 32 regularizer """no""" +489 32 optimizer """adam""" +489 32 training_loop """owa""" +489 32 negative_sampler """basic""" +489 32 evaluator """rankbased""" +489 33 dataset """kinships""" +489 33 model """proje""" +489 33 loss """marginranking""" +489 33 regularizer """no""" +489 33 optimizer """adam""" +489 33 training_loop """owa""" +489 33 negative_sampler """basic""" +489 33 evaluator """rankbased""" +489 34 dataset """kinships""" +489 34 model """proje""" +489 34 loss """marginranking""" +489 34 regularizer """no""" +489 34 optimizer """adam""" +489 34 training_loop """owa""" +489 34 negative_sampler """basic""" +489 34 evaluator """rankbased""" +489 35 dataset """kinships""" +489 35 model """proje""" +489 35 loss """marginranking""" +489 35 regularizer """no""" +489 35 optimizer """adam""" +489 35 training_loop """owa""" +489 35 negative_sampler """basic""" +489 35 evaluator """rankbased""" +489 36 dataset """kinships""" +489 36 model """proje""" +489 36 loss """marginranking""" +489 36 regularizer """no""" +489 36 optimizer """adam""" +489 36 training_loop """owa""" +489 36 negative_sampler """basic""" +489 36 evaluator """rankbased""" +489 37 dataset """kinships""" +489 37 model """proje""" +489 37 loss """marginranking""" +489 37 regularizer """no""" +489 37 optimizer """adam""" +489 37 training_loop """owa""" +489 37 negative_sampler """basic""" +489 37 evaluator """rankbased""" +489 38 dataset """kinships""" +489 38 model """proje""" +489 38 loss """marginranking""" +489 38 regularizer """no""" +489 38 optimizer """adam""" +489 38 training_loop """owa""" +489 38 negative_sampler """basic""" +489 38 evaluator """rankbased""" +489 39 dataset """kinships""" +489 39 model """proje""" +489 39 loss """marginranking""" +489 39 regularizer """no""" +489 39 optimizer """adam""" +489 39 training_loop """owa""" +489 39 negative_sampler """basic""" +489 39 evaluator """rankbased""" +489 40 dataset """kinships""" +489 40 model """proje""" +489 40 loss """marginranking""" +489 40 regularizer """no""" +489 40 optimizer """adam""" +489 40 training_loop """owa""" +489 40 negative_sampler """basic""" +489 40 evaluator """rankbased""" +489 41 dataset """kinships""" +489 41 model """proje""" +489 41 loss """marginranking""" +489 41 regularizer """no""" +489 41 optimizer """adam""" +489 41 training_loop """owa""" +489 41 negative_sampler """basic""" +489 41 evaluator """rankbased""" +489 42 dataset """kinships""" +489 42 model """proje""" +489 42 loss """marginranking""" +489 42 regularizer """no""" +489 42 optimizer """adam""" +489 42 training_loop """owa""" +489 42 negative_sampler """basic""" +489 42 evaluator """rankbased""" +489 43 dataset """kinships""" +489 43 model """proje""" +489 43 loss """marginranking""" +489 43 regularizer """no""" +489 43 optimizer """adam""" +489 43 training_loop """owa""" +489 43 negative_sampler """basic""" +489 43 evaluator """rankbased""" +489 44 dataset """kinships""" +489 44 model """proje""" +489 44 loss """marginranking""" +489 44 regularizer """no""" +489 44 optimizer """adam""" +489 44 training_loop """owa""" +489 44 negative_sampler """basic""" +489 44 evaluator """rankbased""" +489 45 dataset """kinships""" +489 45 model """proje""" +489 45 loss """marginranking""" +489 45 regularizer """no""" +489 45 optimizer """adam""" +489 45 training_loop """owa""" +489 45 negative_sampler """basic""" +489 45 evaluator """rankbased""" +489 46 dataset """kinships""" +489 46 model """proje""" +489 46 loss """marginranking""" +489 46 regularizer """no""" +489 46 optimizer """adam""" +489 46 training_loop """owa""" +489 46 negative_sampler """basic""" +489 46 evaluator """rankbased""" +489 47 dataset """kinships""" +489 47 model """proje""" +489 47 loss """marginranking""" +489 47 regularizer """no""" +489 47 optimizer """adam""" +489 47 training_loop """owa""" +489 47 negative_sampler """basic""" +489 47 evaluator """rankbased""" +489 48 dataset """kinships""" +489 48 model """proje""" +489 48 loss """marginranking""" +489 48 regularizer """no""" +489 48 optimizer """adam""" +489 48 training_loop """owa""" +489 48 negative_sampler """basic""" +489 48 evaluator """rankbased""" +489 49 dataset """kinships""" +489 49 model """proje""" +489 49 loss """marginranking""" +489 49 regularizer """no""" +489 49 optimizer """adam""" +489 49 training_loop """owa""" +489 49 negative_sampler """basic""" +489 49 evaluator """rankbased""" +489 50 dataset """kinships""" +489 50 model """proje""" +489 50 loss """marginranking""" +489 50 regularizer """no""" +489 50 optimizer """adam""" +489 50 training_loop """owa""" +489 50 negative_sampler """basic""" +489 50 evaluator """rankbased""" +489 51 dataset """kinships""" +489 51 model """proje""" +489 51 loss """marginranking""" +489 51 regularizer """no""" +489 51 optimizer """adam""" +489 51 training_loop """owa""" +489 51 negative_sampler """basic""" +489 51 evaluator """rankbased""" +489 52 dataset """kinships""" +489 52 model """proje""" +489 52 loss """marginranking""" +489 52 regularizer """no""" +489 52 optimizer """adam""" +489 52 training_loop """owa""" +489 52 negative_sampler """basic""" +489 52 evaluator """rankbased""" +489 53 dataset """kinships""" +489 53 model """proje""" +489 53 loss """marginranking""" +489 53 regularizer """no""" +489 53 optimizer """adam""" +489 53 training_loop """owa""" +489 53 negative_sampler """basic""" +489 53 evaluator """rankbased""" +489 54 dataset """kinships""" +489 54 model """proje""" +489 54 loss """marginranking""" +489 54 regularizer """no""" +489 54 optimizer """adam""" +489 54 training_loop """owa""" +489 54 negative_sampler """basic""" +489 54 evaluator """rankbased""" +489 55 dataset """kinships""" +489 55 model """proje""" +489 55 loss """marginranking""" +489 55 regularizer """no""" +489 55 optimizer """adam""" +489 55 training_loop """owa""" +489 55 negative_sampler """basic""" +489 55 evaluator """rankbased""" +489 56 dataset """kinships""" +489 56 model """proje""" +489 56 loss """marginranking""" +489 56 regularizer """no""" +489 56 optimizer """adam""" +489 56 training_loop """owa""" +489 56 negative_sampler """basic""" +489 56 evaluator """rankbased""" +489 57 dataset """kinships""" +489 57 model """proje""" +489 57 loss """marginranking""" +489 57 regularizer """no""" +489 57 optimizer """adam""" +489 57 training_loop """owa""" +489 57 negative_sampler """basic""" +489 57 evaluator """rankbased""" +489 58 dataset """kinships""" +489 58 model """proje""" +489 58 loss """marginranking""" +489 58 regularizer """no""" +489 58 optimizer """adam""" +489 58 training_loop """owa""" +489 58 negative_sampler """basic""" +489 58 evaluator """rankbased""" +489 59 dataset """kinships""" +489 59 model """proje""" +489 59 loss """marginranking""" +489 59 regularizer """no""" +489 59 optimizer """adam""" +489 59 training_loop """owa""" +489 59 negative_sampler """basic""" +489 59 evaluator """rankbased""" +489 60 dataset """kinships""" +489 60 model """proje""" +489 60 loss """marginranking""" +489 60 regularizer """no""" +489 60 optimizer """adam""" +489 60 training_loop """owa""" +489 60 negative_sampler """basic""" +489 60 evaluator """rankbased""" +489 61 dataset """kinships""" +489 61 model """proje""" +489 61 loss """marginranking""" +489 61 regularizer """no""" +489 61 optimizer """adam""" +489 61 training_loop """owa""" +489 61 negative_sampler """basic""" +489 61 evaluator """rankbased""" +489 62 dataset """kinships""" +489 62 model """proje""" +489 62 loss """marginranking""" +489 62 regularizer """no""" +489 62 optimizer """adam""" +489 62 training_loop """owa""" +489 62 negative_sampler """basic""" +489 62 evaluator """rankbased""" +489 63 dataset """kinships""" +489 63 model """proje""" +489 63 loss """marginranking""" +489 63 regularizer """no""" +489 63 optimizer """adam""" +489 63 training_loop """owa""" +489 63 negative_sampler """basic""" +489 63 evaluator """rankbased""" +489 64 dataset """kinships""" +489 64 model """proje""" +489 64 loss """marginranking""" +489 64 regularizer """no""" +489 64 optimizer """adam""" +489 64 training_loop """owa""" +489 64 negative_sampler """basic""" +489 64 evaluator """rankbased""" +489 65 dataset """kinships""" +489 65 model """proje""" +489 65 loss """marginranking""" +489 65 regularizer """no""" +489 65 optimizer """adam""" +489 65 training_loop """owa""" +489 65 negative_sampler """basic""" +489 65 evaluator """rankbased""" +489 66 dataset """kinships""" +489 66 model """proje""" +489 66 loss """marginranking""" +489 66 regularizer """no""" +489 66 optimizer """adam""" +489 66 training_loop """owa""" +489 66 negative_sampler """basic""" +489 66 evaluator """rankbased""" +489 67 dataset """kinships""" +489 67 model """proje""" +489 67 loss """marginranking""" +489 67 regularizer """no""" +489 67 optimizer """adam""" +489 67 training_loop """owa""" +489 67 negative_sampler """basic""" +489 67 evaluator """rankbased""" +489 68 dataset """kinships""" +489 68 model """proje""" +489 68 loss """marginranking""" +489 68 regularizer """no""" +489 68 optimizer """adam""" +489 68 training_loop """owa""" +489 68 negative_sampler """basic""" +489 68 evaluator """rankbased""" +489 69 dataset """kinships""" +489 69 model """proje""" +489 69 loss """marginranking""" +489 69 regularizer """no""" +489 69 optimizer """adam""" +489 69 training_loop """owa""" +489 69 negative_sampler """basic""" +489 69 evaluator """rankbased""" +489 70 dataset """kinships""" +489 70 model """proje""" +489 70 loss """marginranking""" +489 70 regularizer """no""" +489 70 optimizer """adam""" +489 70 training_loop """owa""" +489 70 negative_sampler """basic""" +489 70 evaluator """rankbased""" +489 71 dataset """kinships""" +489 71 model """proje""" +489 71 loss """marginranking""" +489 71 regularizer """no""" +489 71 optimizer """adam""" +489 71 training_loop """owa""" +489 71 negative_sampler """basic""" +489 71 evaluator """rankbased""" +489 72 dataset """kinships""" +489 72 model """proje""" +489 72 loss """marginranking""" +489 72 regularizer """no""" +489 72 optimizer """adam""" +489 72 training_loop """owa""" +489 72 negative_sampler """basic""" +489 72 evaluator """rankbased""" +489 73 dataset """kinships""" +489 73 model """proje""" +489 73 loss """marginranking""" +489 73 regularizer """no""" +489 73 optimizer """adam""" +489 73 training_loop """owa""" +489 73 negative_sampler """basic""" +489 73 evaluator """rankbased""" +489 74 dataset """kinships""" +489 74 model """proje""" +489 74 loss """marginranking""" +489 74 regularizer """no""" +489 74 optimizer """adam""" +489 74 training_loop """owa""" +489 74 negative_sampler """basic""" +489 74 evaluator """rankbased""" +489 75 dataset """kinships""" +489 75 model """proje""" +489 75 loss """marginranking""" +489 75 regularizer """no""" +489 75 optimizer """adam""" +489 75 training_loop """owa""" +489 75 negative_sampler """basic""" +489 75 evaluator """rankbased""" +489 76 dataset """kinships""" +489 76 model """proje""" +489 76 loss """marginranking""" +489 76 regularizer """no""" +489 76 optimizer """adam""" +489 76 training_loop """owa""" +489 76 negative_sampler """basic""" +489 76 evaluator """rankbased""" +489 77 dataset """kinships""" +489 77 model """proje""" +489 77 loss """marginranking""" +489 77 regularizer """no""" +489 77 optimizer """adam""" +489 77 training_loop """owa""" +489 77 negative_sampler """basic""" +489 77 evaluator """rankbased""" +489 78 dataset """kinships""" +489 78 model """proje""" +489 78 loss """marginranking""" +489 78 regularizer """no""" +489 78 optimizer """adam""" +489 78 training_loop """owa""" +489 78 negative_sampler """basic""" +489 78 evaluator """rankbased""" +489 79 dataset """kinships""" +489 79 model """proje""" +489 79 loss """marginranking""" +489 79 regularizer """no""" +489 79 optimizer """adam""" +489 79 training_loop """owa""" +489 79 negative_sampler """basic""" +489 79 evaluator """rankbased""" +489 80 dataset """kinships""" +489 80 model """proje""" +489 80 loss """marginranking""" +489 80 regularizer """no""" +489 80 optimizer """adam""" +489 80 training_loop """owa""" +489 80 negative_sampler """basic""" +489 80 evaluator """rankbased""" +489 81 dataset """kinships""" +489 81 model """proje""" +489 81 loss """marginranking""" +489 81 regularizer """no""" +489 81 optimizer """adam""" +489 81 training_loop """owa""" +489 81 negative_sampler """basic""" +489 81 evaluator """rankbased""" +489 82 dataset """kinships""" +489 82 model """proje""" +489 82 loss """marginranking""" +489 82 regularizer """no""" +489 82 optimizer """adam""" +489 82 training_loop """owa""" +489 82 negative_sampler """basic""" +489 82 evaluator """rankbased""" +489 83 dataset """kinships""" +489 83 model """proje""" +489 83 loss """marginranking""" +489 83 regularizer """no""" +489 83 optimizer """adam""" +489 83 training_loop """owa""" +489 83 negative_sampler """basic""" +489 83 evaluator """rankbased""" +489 84 dataset """kinships""" +489 84 model """proje""" +489 84 loss """marginranking""" +489 84 regularizer """no""" +489 84 optimizer """adam""" +489 84 training_loop """owa""" +489 84 negative_sampler """basic""" +489 84 evaluator """rankbased""" +489 85 dataset """kinships""" +489 85 model """proje""" +489 85 loss """marginranking""" +489 85 regularizer """no""" +489 85 optimizer """adam""" +489 85 training_loop """owa""" +489 85 negative_sampler """basic""" +489 85 evaluator """rankbased""" +489 86 dataset """kinships""" +489 86 model """proje""" +489 86 loss """marginranking""" +489 86 regularizer """no""" +489 86 optimizer """adam""" +489 86 training_loop """owa""" +489 86 negative_sampler """basic""" +489 86 evaluator """rankbased""" +489 87 dataset """kinships""" +489 87 model """proje""" +489 87 loss """marginranking""" +489 87 regularizer """no""" +489 87 optimizer """adam""" +489 87 training_loop """owa""" +489 87 negative_sampler """basic""" +489 87 evaluator """rankbased""" +489 88 dataset """kinships""" +489 88 model """proje""" +489 88 loss """marginranking""" +489 88 regularizer """no""" +489 88 optimizer """adam""" +489 88 training_loop """owa""" +489 88 negative_sampler """basic""" +489 88 evaluator """rankbased""" +489 89 dataset """kinships""" +489 89 model """proje""" +489 89 loss """marginranking""" +489 89 regularizer """no""" +489 89 optimizer """adam""" +489 89 training_loop """owa""" +489 89 negative_sampler """basic""" +489 89 evaluator """rankbased""" +489 90 dataset """kinships""" +489 90 model """proje""" +489 90 loss """marginranking""" +489 90 regularizer """no""" +489 90 optimizer """adam""" +489 90 training_loop """owa""" +489 90 negative_sampler """basic""" +489 90 evaluator """rankbased""" +489 91 dataset """kinships""" +489 91 model """proje""" +489 91 loss """marginranking""" +489 91 regularizer """no""" +489 91 optimizer """adam""" +489 91 training_loop """owa""" +489 91 negative_sampler """basic""" +489 91 evaluator """rankbased""" +489 92 dataset """kinships""" +489 92 model """proje""" +489 92 loss """marginranking""" +489 92 regularizer """no""" +489 92 optimizer """adam""" +489 92 training_loop """owa""" +489 92 negative_sampler """basic""" +489 92 evaluator """rankbased""" +489 93 dataset """kinships""" +489 93 model """proje""" +489 93 loss """marginranking""" +489 93 regularizer """no""" +489 93 optimizer """adam""" +489 93 training_loop """owa""" +489 93 negative_sampler """basic""" +489 93 evaluator """rankbased""" +489 94 dataset """kinships""" +489 94 model """proje""" +489 94 loss """marginranking""" +489 94 regularizer """no""" +489 94 optimizer """adam""" +489 94 training_loop """owa""" +489 94 negative_sampler """basic""" +489 94 evaluator """rankbased""" +489 95 dataset """kinships""" +489 95 model """proje""" +489 95 loss """marginranking""" +489 95 regularizer """no""" +489 95 optimizer """adam""" +489 95 training_loop """owa""" +489 95 negative_sampler """basic""" +489 95 evaluator """rankbased""" +489 96 dataset """kinships""" +489 96 model """proje""" +489 96 loss """marginranking""" +489 96 regularizer """no""" +489 96 optimizer """adam""" +489 96 training_loop """owa""" +489 96 negative_sampler """basic""" +489 96 evaluator """rankbased""" +489 97 dataset """kinships""" +489 97 model """proje""" +489 97 loss """marginranking""" +489 97 regularizer """no""" +489 97 optimizer """adam""" +489 97 training_loop """owa""" +489 97 negative_sampler """basic""" +489 97 evaluator """rankbased""" +489 98 dataset """kinships""" +489 98 model """proje""" +489 98 loss """marginranking""" +489 98 regularizer """no""" +489 98 optimizer """adam""" +489 98 training_loop """owa""" +489 98 negative_sampler """basic""" +489 98 evaluator """rankbased""" +489 99 dataset """kinships""" +489 99 model """proje""" +489 99 loss """marginranking""" +489 99 regularizer """no""" +489 99 optimizer """adam""" +489 99 training_loop """owa""" +489 99 negative_sampler """basic""" +489 99 evaluator """rankbased""" +489 100 dataset """kinships""" +489 100 model """proje""" +489 100 loss """marginranking""" +489 100 regularizer """no""" +489 100 optimizer """adam""" +489 100 training_loop """owa""" +489 100 negative_sampler """basic""" +489 100 evaluator """rankbased""" +490 1 model.embedding_dim 2.0 +490 1 loss.margin 9.393749848482539 +490 1 optimizer.lr 0.05957285941138595 +490 1 negative_sampler.num_negs_per_pos 89.0 +490 1 training.batch_size 1.0 +490 2 model.embedding_dim 1.0 +490 2 loss.margin 2.1993154997023487 +490 2 optimizer.lr 0.03548868457912018 +490 2 negative_sampler.num_negs_per_pos 70.0 +490 2 training.batch_size 0.0 +490 3 model.embedding_dim 0.0 +490 3 loss.margin 0.7271189964085192 +490 3 optimizer.lr 0.004060318839884875 +490 3 negative_sampler.num_negs_per_pos 97.0 +490 3 training.batch_size 1.0 +490 4 model.embedding_dim 2.0 +490 4 loss.margin 5.120077958136899 +490 4 optimizer.lr 0.006164878666134516 +490 4 negative_sampler.num_negs_per_pos 24.0 +490 4 training.batch_size 2.0 +490 5 model.embedding_dim 0.0 +490 5 loss.margin 5.816515101127492 +490 5 optimizer.lr 0.01467317747723838 +490 5 negative_sampler.num_negs_per_pos 27.0 +490 5 training.batch_size 2.0 +490 6 model.embedding_dim 2.0 +490 6 loss.margin 3.5421676724489295 +490 6 optimizer.lr 0.0025777424200442442 +490 6 negative_sampler.num_negs_per_pos 73.0 +490 6 training.batch_size 1.0 +490 7 model.embedding_dim 0.0 +490 7 loss.margin 3.7908201282252603 +490 7 optimizer.lr 0.07904789581471401 +490 7 negative_sampler.num_negs_per_pos 51.0 +490 7 training.batch_size 2.0 +490 8 model.embedding_dim 1.0 +490 8 loss.margin 8.253873514075917 +490 8 optimizer.lr 0.0050236336765406234 +490 8 negative_sampler.num_negs_per_pos 35.0 +490 8 training.batch_size 2.0 +490 9 model.embedding_dim 1.0 +490 9 loss.margin 8.099633669276916 +490 9 optimizer.lr 0.023320832660856793 +490 9 negative_sampler.num_negs_per_pos 41.0 +490 9 training.batch_size 2.0 +490 10 model.embedding_dim 2.0 +490 10 loss.margin 4.7219264482760455 +490 10 optimizer.lr 0.0013243952797331586 +490 10 negative_sampler.num_negs_per_pos 30.0 +490 10 training.batch_size 1.0 +490 11 model.embedding_dim 0.0 +490 11 loss.margin 5.427526372864001 +490 11 optimizer.lr 0.008263408441083102 +490 11 negative_sampler.num_negs_per_pos 84.0 +490 11 training.batch_size 2.0 +490 12 model.embedding_dim 2.0 +490 12 loss.margin 1.9257641287466771 +490 12 optimizer.lr 0.004738278517008295 +490 12 negative_sampler.num_negs_per_pos 36.0 +490 12 training.batch_size 1.0 +490 13 model.embedding_dim 0.0 +490 13 loss.margin 6.260646045796886 +490 13 optimizer.lr 0.007892588926431096 +490 13 negative_sampler.num_negs_per_pos 88.0 +490 13 training.batch_size 1.0 +490 14 model.embedding_dim 0.0 +490 14 loss.margin 5.012179220700966 +490 14 optimizer.lr 0.020291360683338026 +490 14 negative_sampler.num_negs_per_pos 61.0 +490 14 training.batch_size 0.0 +490 15 model.embedding_dim 0.0 +490 15 loss.margin 6.415309182610248 +490 15 optimizer.lr 0.009330054346066475 +490 15 negative_sampler.num_negs_per_pos 95.0 +490 15 training.batch_size 0.0 +490 16 model.embedding_dim 1.0 +490 16 loss.margin 1.7246377444721142 +490 16 optimizer.lr 0.013444635687360672 +490 16 negative_sampler.num_negs_per_pos 3.0 +490 16 training.batch_size 2.0 +490 17 model.embedding_dim 1.0 +490 17 loss.margin 3.9669838426652904 +490 17 optimizer.lr 0.0010170313001245737 +490 17 negative_sampler.num_negs_per_pos 36.0 +490 17 training.batch_size 2.0 +490 18 model.embedding_dim 1.0 +490 18 loss.margin 6.823517465917299 +490 18 optimizer.lr 0.007408551690351252 +490 18 negative_sampler.num_negs_per_pos 81.0 +490 18 training.batch_size 1.0 +490 19 model.embedding_dim 0.0 +490 19 loss.margin 1.7277444948079972 +490 19 optimizer.lr 0.0064986071469718305 +490 19 negative_sampler.num_negs_per_pos 79.0 +490 19 training.batch_size 2.0 +490 20 model.embedding_dim 1.0 +490 20 loss.margin 5.836466872090371 +490 20 optimizer.lr 0.01605457515612487 +490 20 negative_sampler.num_negs_per_pos 36.0 +490 20 training.batch_size 2.0 +490 21 model.embedding_dim 0.0 +490 21 loss.margin 2.6795001772037894 +490 21 optimizer.lr 0.0012979893104673092 +490 21 negative_sampler.num_negs_per_pos 73.0 +490 21 training.batch_size 0.0 +490 22 model.embedding_dim 1.0 +490 22 loss.margin 6.30405222528484 +490 22 optimizer.lr 0.020886241255872462 +490 22 negative_sampler.num_negs_per_pos 58.0 +490 22 training.batch_size 2.0 +490 23 model.embedding_dim 2.0 +490 23 loss.margin 7.80431912973123 +490 23 optimizer.lr 0.002755281724664001 +490 23 negative_sampler.num_negs_per_pos 72.0 +490 23 training.batch_size 1.0 +490 24 model.embedding_dim 2.0 +490 24 loss.margin 1.3683081866189823 +490 24 optimizer.lr 0.008114990005569487 +490 24 negative_sampler.num_negs_per_pos 35.0 +490 24 training.batch_size 2.0 +490 25 model.embedding_dim 2.0 +490 25 loss.margin 3.336066781060292 +490 25 optimizer.lr 0.01113402924476776 +490 25 negative_sampler.num_negs_per_pos 9.0 +490 25 training.batch_size 0.0 +490 26 model.embedding_dim 0.0 +490 26 loss.margin 5.161838663536304 +490 26 optimizer.lr 0.012807004963138646 +490 26 negative_sampler.num_negs_per_pos 37.0 +490 26 training.batch_size 1.0 +490 27 model.embedding_dim 0.0 +490 27 loss.margin 9.673267370606995 +490 27 optimizer.lr 0.051390492505915576 +490 27 negative_sampler.num_negs_per_pos 68.0 +490 27 training.batch_size 2.0 +490 28 model.embedding_dim 1.0 +490 28 loss.margin 2.532397455464441 +490 28 optimizer.lr 0.04472127820509437 +490 28 negative_sampler.num_negs_per_pos 84.0 +490 28 training.batch_size 0.0 +490 29 model.embedding_dim 0.0 +490 29 loss.margin 8.33375033108945 +490 29 optimizer.lr 0.012175096826556594 +490 29 negative_sampler.num_negs_per_pos 47.0 +490 29 training.batch_size 0.0 +490 30 model.embedding_dim 0.0 +490 30 loss.margin 1.3206911280945826 +490 30 optimizer.lr 0.0033531504909954453 +490 30 negative_sampler.num_negs_per_pos 27.0 +490 30 training.batch_size 2.0 +490 31 model.embedding_dim 1.0 +490 31 loss.margin 4.643365406504354 +490 31 optimizer.lr 0.0030526755715585474 +490 31 negative_sampler.num_negs_per_pos 8.0 +490 31 training.batch_size 1.0 +490 32 model.embedding_dim 2.0 +490 32 loss.margin 7.214849128475693 +490 32 optimizer.lr 0.004967050260271317 +490 32 negative_sampler.num_negs_per_pos 58.0 +490 32 training.batch_size 0.0 +490 33 model.embedding_dim 1.0 +490 33 loss.margin 6.713508739394148 +490 33 optimizer.lr 0.034000750764422015 +490 33 negative_sampler.num_negs_per_pos 37.0 +490 33 training.batch_size 2.0 +490 34 model.embedding_dim 1.0 +490 34 loss.margin 0.8854210449144588 +490 34 optimizer.lr 0.06540958128489711 +490 34 negative_sampler.num_negs_per_pos 43.0 +490 34 training.batch_size 2.0 +490 35 model.embedding_dim 2.0 +490 35 loss.margin 0.7198304380532297 +490 35 optimizer.lr 0.0021101726838979647 +490 35 negative_sampler.num_negs_per_pos 25.0 +490 35 training.batch_size 2.0 +490 36 model.embedding_dim 2.0 +490 36 loss.margin 9.4573046294012 +490 36 optimizer.lr 0.002796147781304979 +490 36 negative_sampler.num_negs_per_pos 1.0 +490 36 training.batch_size 2.0 +490 37 model.embedding_dim 2.0 +490 37 loss.margin 8.337218704087736 +490 37 optimizer.lr 0.03473532202549848 +490 37 negative_sampler.num_negs_per_pos 8.0 +490 37 training.batch_size 2.0 +490 38 model.embedding_dim 2.0 +490 38 loss.margin 2.7263977084212287 +490 38 optimizer.lr 0.014162966858470315 +490 38 negative_sampler.num_negs_per_pos 0.0 +490 38 training.batch_size 2.0 +490 39 model.embedding_dim 0.0 +490 39 loss.margin 9.262993898254692 +490 39 optimizer.lr 0.006660745396606896 +490 39 negative_sampler.num_negs_per_pos 33.0 +490 39 training.batch_size 0.0 +490 40 model.embedding_dim 1.0 +490 40 loss.margin 4.28383225274222 +490 40 optimizer.lr 0.005484476191071791 +490 40 negative_sampler.num_negs_per_pos 12.0 +490 40 training.batch_size 1.0 +490 41 model.embedding_dim 2.0 +490 41 loss.margin 4.1130255382533765 +490 41 optimizer.lr 0.06232660562589326 +490 41 negative_sampler.num_negs_per_pos 1.0 +490 41 training.batch_size 0.0 +490 42 model.embedding_dim 1.0 +490 42 loss.margin 3.5427982554702457 +490 42 optimizer.lr 0.0025784571537636773 +490 42 negative_sampler.num_negs_per_pos 26.0 +490 42 training.batch_size 0.0 +490 43 model.embedding_dim 0.0 +490 43 loss.margin 0.7338584569321095 +490 43 optimizer.lr 0.051199167949522326 +490 43 negative_sampler.num_negs_per_pos 64.0 +490 43 training.batch_size 0.0 +490 44 model.embedding_dim 2.0 +490 44 loss.margin 8.321039925357645 +490 44 optimizer.lr 0.01019927524049736 +490 44 negative_sampler.num_negs_per_pos 48.0 +490 44 training.batch_size 0.0 +490 45 model.embedding_dim 1.0 +490 45 loss.margin 6.650663645844296 +490 45 optimizer.lr 0.0012819792424032868 +490 45 negative_sampler.num_negs_per_pos 58.0 +490 45 training.batch_size 2.0 +490 46 model.embedding_dim 1.0 +490 46 loss.margin 0.711098197155865 +490 46 optimizer.lr 0.0028000853495092196 +490 46 negative_sampler.num_negs_per_pos 28.0 +490 46 training.batch_size 2.0 +490 47 model.embedding_dim 2.0 +490 47 loss.margin 4.516091885931227 +490 47 optimizer.lr 0.06349644494961099 +490 47 negative_sampler.num_negs_per_pos 69.0 +490 47 training.batch_size 0.0 +490 48 model.embedding_dim 2.0 +490 48 loss.margin 4.982285140705374 +490 48 optimizer.lr 0.005010862853363426 +490 48 negative_sampler.num_negs_per_pos 61.0 +490 48 training.batch_size 0.0 +490 49 model.embedding_dim 1.0 +490 49 loss.margin 5.0972125857035815 +490 49 optimizer.lr 0.0018710130846785404 +490 49 negative_sampler.num_negs_per_pos 58.0 +490 49 training.batch_size 1.0 +490 50 model.embedding_dim 2.0 +490 50 loss.margin 9.385304363290498 +490 50 optimizer.lr 0.011508685949237984 +490 50 negative_sampler.num_negs_per_pos 10.0 +490 50 training.batch_size 1.0 +490 51 model.embedding_dim 0.0 +490 51 loss.margin 4.362368787061097 +490 51 optimizer.lr 0.0012883297442976382 +490 51 negative_sampler.num_negs_per_pos 58.0 +490 51 training.batch_size 1.0 +490 52 model.embedding_dim 1.0 +490 52 loss.margin 2.4714848991572618 +490 52 optimizer.lr 0.004827181142066251 +490 52 negative_sampler.num_negs_per_pos 49.0 +490 52 training.batch_size 2.0 +490 53 model.embedding_dim 0.0 +490 53 loss.margin 0.5034869294571567 +490 53 optimizer.lr 0.0044147531941642745 +490 53 negative_sampler.num_negs_per_pos 11.0 +490 53 training.batch_size 1.0 +490 54 model.embedding_dim 0.0 +490 54 loss.margin 1.8671712255396704 +490 54 optimizer.lr 0.002118554787041524 +490 54 negative_sampler.num_negs_per_pos 12.0 +490 54 training.batch_size 2.0 +490 55 model.embedding_dim 0.0 +490 55 loss.margin 8.177646627019168 +490 55 optimizer.lr 0.0012814690036852942 +490 55 negative_sampler.num_negs_per_pos 7.0 +490 55 training.batch_size 2.0 +490 56 model.embedding_dim 1.0 +490 56 loss.margin 8.256320908956383 +490 56 optimizer.lr 0.08495597500384948 +490 56 negative_sampler.num_negs_per_pos 6.0 +490 56 training.batch_size 0.0 +490 57 model.embedding_dim 2.0 +490 57 loss.margin 3.740834387012805 +490 57 optimizer.lr 0.0012257176244660115 +490 57 negative_sampler.num_negs_per_pos 6.0 +490 57 training.batch_size 1.0 +490 58 model.embedding_dim 0.0 +490 58 loss.margin 2.2226920037957187 +490 58 optimizer.lr 0.007468991691328013 +490 58 negative_sampler.num_negs_per_pos 30.0 +490 58 training.batch_size 1.0 +490 59 model.embedding_dim 0.0 +490 59 loss.margin 7.155318984232356 +490 59 optimizer.lr 0.06750124235417282 +490 59 negative_sampler.num_negs_per_pos 97.0 +490 59 training.batch_size 2.0 +490 60 model.embedding_dim 0.0 +490 60 loss.margin 1.5218405282260077 +490 60 optimizer.lr 0.07409165338165621 +490 60 negative_sampler.num_negs_per_pos 63.0 +490 60 training.batch_size 2.0 +490 61 model.embedding_dim 0.0 +490 61 loss.margin 6.22341949284547 +490 61 optimizer.lr 0.008588682615723204 +490 61 negative_sampler.num_negs_per_pos 85.0 +490 61 training.batch_size 0.0 +490 62 model.embedding_dim 0.0 +490 62 loss.margin 5.28882063777863 +490 62 optimizer.lr 0.009197051626347812 +490 62 negative_sampler.num_negs_per_pos 18.0 +490 62 training.batch_size 0.0 +490 63 model.embedding_dim 2.0 +490 63 loss.margin 3.8293374857034843 +490 63 optimizer.lr 0.03699369919403693 +490 63 negative_sampler.num_negs_per_pos 39.0 +490 63 training.batch_size 2.0 +490 64 model.embedding_dim 0.0 +490 64 loss.margin 9.871188593031082 +490 64 optimizer.lr 0.0010730271399325747 +490 64 negative_sampler.num_negs_per_pos 29.0 +490 64 training.batch_size 2.0 +490 65 model.embedding_dim 1.0 +490 65 loss.margin 2.2516733940731655 +490 65 optimizer.lr 0.00953431395303829 +490 65 negative_sampler.num_negs_per_pos 35.0 +490 65 training.batch_size 0.0 +490 66 model.embedding_dim 1.0 +490 66 loss.margin 2.825145731184706 +490 66 optimizer.lr 0.015311548704615364 +490 66 negative_sampler.num_negs_per_pos 67.0 +490 66 training.batch_size 1.0 +490 67 model.embedding_dim 1.0 +490 67 loss.margin 5.128417420149998 +490 67 optimizer.lr 0.014438239782029275 +490 67 negative_sampler.num_negs_per_pos 26.0 +490 67 training.batch_size 2.0 +490 68 model.embedding_dim 2.0 +490 68 loss.margin 6.466899883567594 +490 68 optimizer.lr 0.06016549264019537 +490 68 negative_sampler.num_negs_per_pos 88.0 +490 68 training.batch_size 2.0 +490 69 model.embedding_dim 0.0 +490 69 loss.margin 4.794494737703703 +490 69 optimizer.lr 0.06062995514373029 +490 69 negative_sampler.num_negs_per_pos 64.0 +490 69 training.batch_size 1.0 +490 70 model.embedding_dim 2.0 +490 70 loss.margin 4.207846529370413 +490 70 optimizer.lr 0.0011380735962622773 +490 70 negative_sampler.num_negs_per_pos 78.0 +490 70 training.batch_size 0.0 +490 71 model.embedding_dim 0.0 +490 71 loss.margin 9.066230945376534 +490 71 optimizer.lr 0.010000195444601886 +490 71 negative_sampler.num_negs_per_pos 1.0 +490 71 training.batch_size 0.0 +490 72 model.embedding_dim 1.0 +490 72 loss.margin 1.619426736160501 +490 72 optimizer.lr 0.03384719705929013 +490 72 negative_sampler.num_negs_per_pos 38.0 +490 72 training.batch_size 2.0 +490 73 model.embedding_dim 2.0 +490 73 loss.margin 5.71321353594429 +490 73 optimizer.lr 0.013955749025598584 +490 73 negative_sampler.num_negs_per_pos 28.0 +490 73 training.batch_size 0.0 +490 74 model.embedding_dim 2.0 +490 74 loss.margin 8.926430497169797 +490 74 optimizer.lr 0.01877055284524582 +490 74 negative_sampler.num_negs_per_pos 45.0 +490 74 training.batch_size 1.0 +490 75 model.embedding_dim 0.0 +490 75 loss.margin 3.5630408205992965 +490 75 optimizer.lr 0.0639514727895883 +490 75 negative_sampler.num_negs_per_pos 68.0 +490 75 training.batch_size 0.0 +490 76 model.embedding_dim 2.0 +490 76 loss.margin 5.485528950523023 +490 76 optimizer.lr 0.029917074790675367 +490 76 negative_sampler.num_negs_per_pos 68.0 +490 76 training.batch_size 0.0 +490 77 model.embedding_dim 2.0 +490 77 loss.margin 4.054726050984259 +490 77 optimizer.lr 0.0607136565796239 +490 77 negative_sampler.num_negs_per_pos 87.0 +490 77 training.batch_size 0.0 +490 78 model.embedding_dim 1.0 +490 78 loss.margin 8.166671981822056 +490 78 optimizer.lr 0.05776321704642488 +490 78 negative_sampler.num_negs_per_pos 6.0 +490 78 training.batch_size 0.0 +490 79 model.embedding_dim 0.0 +490 79 loss.margin 6.537376521911899 +490 79 optimizer.lr 0.044400512255300034 +490 79 negative_sampler.num_negs_per_pos 65.0 +490 79 training.batch_size 2.0 +490 80 model.embedding_dim 1.0 +490 80 loss.margin 5.752576790944705 +490 80 optimizer.lr 0.014110608801182008 +490 80 negative_sampler.num_negs_per_pos 66.0 +490 80 training.batch_size 0.0 +490 81 model.embedding_dim 2.0 +490 81 loss.margin 2.874438898562351 +490 81 optimizer.lr 0.01991430910961585 +490 81 negative_sampler.num_negs_per_pos 11.0 +490 81 training.batch_size 1.0 +490 82 model.embedding_dim 1.0 +490 82 loss.margin 3.7529691921814687 +490 82 optimizer.lr 0.042441116659670396 +490 82 negative_sampler.num_negs_per_pos 68.0 +490 82 training.batch_size 1.0 +490 83 model.embedding_dim 1.0 +490 83 loss.margin 9.135211688171633 +490 83 optimizer.lr 0.03306845394117552 +490 83 negative_sampler.num_negs_per_pos 29.0 +490 83 training.batch_size 2.0 +490 84 model.embedding_dim 2.0 +490 84 loss.margin 6.035797523459476 +490 84 optimizer.lr 0.031049561410925366 +490 84 negative_sampler.num_negs_per_pos 1.0 +490 84 training.batch_size 0.0 +490 85 model.embedding_dim 0.0 +490 85 loss.margin 9.463730110734627 +490 85 optimizer.lr 0.0018489714600336915 +490 85 negative_sampler.num_negs_per_pos 39.0 +490 85 training.batch_size 1.0 +490 86 model.embedding_dim 0.0 +490 86 loss.margin 4.997988030193381 +490 86 optimizer.lr 0.0135285613310703 +490 86 negative_sampler.num_negs_per_pos 17.0 +490 86 training.batch_size 0.0 +490 87 model.embedding_dim 0.0 +490 87 loss.margin 8.828442493893737 +490 87 optimizer.lr 0.0013520009353562034 +490 87 negative_sampler.num_negs_per_pos 23.0 +490 87 training.batch_size 1.0 +490 88 model.embedding_dim 0.0 +490 88 loss.margin 7.22030879875767 +490 88 optimizer.lr 0.022581471193413315 +490 88 negative_sampler.num_negs_per_pos 53.0 +490 88 training.batch_size 0.0 +490 89 model.embedding_dim 2.0 +490 89 loss.margin 0.810333045767768 +490 89 optimizer.lr 0.008554599479559496 +490 89 negative_sampler.num_negs_per_pos 76.0 +490 89 training.batch_size 2.0 +490 90 model.embedding_dim 1.0 +490 90 loss.margin 8.127094427064605 +490 90 optimizer.lr 0.021241173769872754 +490 90 negative_sampler.num_negs_per_pos 33.0 +490 90 training.batch_size 2.0 +490 91 model.embedding_dim 0.0 +490 91 loss.margin 6.815759191228525 +490 91 optimizer.lr 0.08791124152048695 +490 91 negative_sampler.num_negs_per_pos 25.0 +490 91 training.batch_size 2.0 +490 92 model.embedding_dim 0.0 +490 92 loss.margin 6.1889938570676675 +490 92 optimizer.lr 0.0022351367489762525 +490 92 negative_sampler.num_negs_per_pos 50.0 +490 92 training.batch_size 1.0 +490 93 model.embedding_dim 0.0 +490 93 loss.margin 1.6686291702014158 +490 93 optimizer.lr 0.0013543048449582184 +490 93 negative_sampler.num_negs_per_pos 18.0 +490 93 training.batch_size 2.0 +490 94 model.embedding_dim 1.0 +490 94 loss.margin 2.5068950010412863 +490 94 optimizer.lr 0.04368209283396399 +490 94 negative_sampler.num_negs_per_pos 70.0 +490 94 training.batch_size 1.0 +490 95 model.embedding_dim 0.0 +490 95 loss.margin 7.055885231385515 +490 95 optimizer.lr 0.0035517893182407567 +490 95 negative_sampler.num_negs_per_pos 99.0 +490 95 training.batch_size 1.0 +490 96 model.embedding_dim 2.0 +490 96 loss.margin 2.8464435376295216 +490 96 optimizer.lr 0.011881916112792998 +490 96 negative_sampler.num_negs_per_pos 43.0 +490 96 training.batch_size 0.0 +490 97 model.embedding_dim 1.0 +490 97 loss.margin 9.779325142756658 +490 97 optimizer.lr 0.0018720870582610244 +490 97 negative_sampler.num_negs_per_pos 11.0 +490 97 training.batch_size 1.0 +490 98 model.embedding_dim 0.0 +490 98 loss.margin 6.903547566136284 +490 98 optimizer.lr 0.00249075713352573 +490 98 negative_sampler.num_negs_per_pos 26.0 +490 98 training.batch_size 1.0 +490 99 model.embedding_dim 1.0 +490 99 loss.margin 5.194442376320344 +490 99 optimizer.lr 0.004902738077750863 +490 99 negative_sampler.num_negs_per_pos 94.0 +490 99 training.batch_size 0.0 +490 100 model.embedding_dim 1.0 +490 100 loss.margin 2.259928891971805 +490 100 optimizer.lr 0.06051877882486967 +490 100 negative_sampler.num_negs_per_pos 56.0 +490 100 training.batch_size 1.0 +490 1 dataset """kinships""" +490 1 model """proje""" +490 1 loss """marginranking""" +490 1 regularizer """no""" +490 1 optimizer """adam""" +490 1 training_loop """owa""" +490 1 negative_sampler """basic""" +490 1 evaluator """rankbased""" +490 2 dataset """kinships""" +490 2 model """proje""" +490 2 loss """marginranking""" +490 2 regularizer """no""" +490 2 optimizer """adam""" +490 2 training_loop """owa""" +490 2 negative_sampler """basic""" +490 2 evaluator """rankbased""" +490 3 dataset """kinships""" +490 3 model """proje""" +490 3 loss """marginranking""" +490 3 regularizer """no""" +490 3 optimizer """adam""" +490 3 training_loop """owa""" +490 3 negative_sampler """basic""" +490 3 evaluator """rankbased""" +490 4 dataset """kinships""" +490 4 model """proje""" +490 4 loss """marginranking""" +490 4 regularizer """no""" +490 4 optimizer """adam""" +490 4 training_loop """owa""" +490 4 negative_sampler """basic""" +490 4 evaluator """rankbased""" +490 5 dataset """kinships""" +490 5 model """proje""" +490 5 loss """marginranking""" +490 5 regularizer """no""" +490 5 optimizer """adam""" +490 5 training_loop """owa""" +490 5 negative_sampler """basic""" +490 5 evaluator """rankbased""" +490 6 dataset """kinships""" +490 6 model """proje""" +490 6 loss """marginranking""" +490 6 regularizer """no""" +490 6 optimizer """adam""" +490 6 training_loop """owa""" +490 6 negative_sampler """basic""" +490 6 evaluator """rankbased""" +490 7 dataset """kinships""" +490 7 model """proje""" +490 7 loss """marginranking""" +490 7 regularizer """no""" +490 7 optimizer """adam""" +490 7 training_loop """owa""" +490 7 negative_sampler """basic""" +490 7 evaluator """rankbased""" +490 8 dataset """kinships""" +490 8 model """proje""" +490 8 loss """marginranking""" +490 8 regularizer """no""" +490 8 optimizer """adam""" +490 8 training_loop """owa""" +490 8 negative_sampler """basic""" +490 8 evaluator """rankbased""" +490 9 dataset """kinships""" +490 9 model """proje""" +490 9 loss """marginranking""" +490 9 regularizer """no""" +490 9 optimizer """adam""" +490 9 training_loop """owa""" +490 9 negative_sampler """basic""" +490 9 evaluator """rankbased""" +490 10 dataset """kinships""" +490 10 model """proje""" +490 10 loss """marginranking""" +490 10 regularizer """no""" +490 10 optimizer """adam""" +490 10 training_loop """owa""" +490 10 negative_sampler """basic""" +490 10 evaluator """rankbased""" +490 11 dataset """kinships""" +490 11 model """proje""" +490 11 loss """marginranking""" +490 11 regularizer """no""" +490 11 optimizer """adam""" +490 11 training_loop """owa""" +490 11 negative_sampler """basic""" +490 11 evaluator """rankbased""" +490 12 dataset """kinships""" +490 12 model """proje""" +490 12 loss """marginranking""" +490 12 regularizer """no""" +490 12 optimizer """adam""" +490 12 training_loop """owa""" +490 12 negative_sampler """basic""" +490 12 evaluator """rankbased""" +490 13 dataset """kinships""" +490 13 model """proje""" +490 13 loss """marginranking""" +490 13 regularizer """no""" +490 13 optimizer """adam""" +490 13 training_loop """owa""" +490 13 negative_sampler """basic""" +490 13 evaluator """rankbased""" +490 14 dataset """kinships""" +490 14 model """proje""" +490 14 loss """marginranking""" +490 14 regularizer """no""" +490 14 optimizer """adam""" +490 14 training_loop """owa""" +490 14 negative_sampler """basic""" +490 14 evaluator """rankbased""" +490 15 dataset """kinships""" +490 15 model """proje""" +490 15 loss """marginranking""" +490 15 regularizer """no""" +490 15 optimizer """adam""" +490 15 training_loop """owa""" +490 15 negative_sampler """basic""" +490 15 evaluator """rankbased""" +490 16 dataset """kinships""" +490 16 model """proje""" +490 16 loss """marginranking""" +490 16 regularizer """no""" +490 16 optimizer """adam""" +490 16 training_loop """owa""" +490 16 negative_sampler """basic""" +490 16 evaluator """rankbased""" +490 17 dataset """kinships""" +490 17 model """proje""" +490 17 loss """marginranking""" +490 17 regularizer """no""" +490 17 optimizer """adam""" +490 17 training_loop """owa""" +490 17 negative_sampler """basic""" +490 17 evaluator """rankbased""" +490 18 dataset """kinships""" +490 18 model """proje""" +490 18 loss """marginranking""" +490 18 regularizer """no""" +490 18 optimizer """adam""" +490 18 training_loop """owa""" +490 18 negative_sampler """basic""" +490 18 evaluator """rankbased""" +490 19 dataset """kinships""" +490 19 model """proje""" +490 19 loss """marginranking""" +490 19 regularizer """no""" +490 19 optimizer """adam""" +490 19 training_loop """owa""" +490 19 negative_sampler """basic""" +490 19 evaluator """rankbased""" +490 20 dataset """kinships""" +490 20 model """proje""" +490 20 loss """marginranking""" +490 20 regularizer """no""" +490 20 optimizer """adam""" +490 20 training_loop """owa""" +490 20 negative_sampler """basic""" +490 20 evaluator """rankbased""" +490 21 dataset """kinships""" +490 21 model """proje""" +490 21 loss """marginranking""" +490 21 regularizer """no""" +490 21 optimizer """adam""" +490 21 training_loop """owa""" +490 21 negative_sampler """basic""" +490 21 evaluator """rankbased""" +490 22 dataset """kinships""" +490 22 model """proje""" +490 22 loss """marginranking""" +490 22 regularizer """no""" +490 22 optimizer """adam""" +490 22 training_loop """owa""" +490 22 negative_sampler """basic""" +490 22 evaluator """rankbased""" +490 23 dataset """kinships""" +490 23 model """proje""" +490 23 loss """marginranking""" +490 23 regularizer """no""" +490 23 optimizer """adam""" +490 23 training_loop """owa""" +490 23 negative_sampler """basic""" +490 23 evaluator """rankbased""" +490 24 dataset """kinships""" +490 24 model """proje""" +490 24 loss """marginranking""" +490 24 regularizer """no""" +490 24 optimizer """adam""" +490 24 training_loop """owa""" +490 24 negative_sampler """basic""" +490 24 evaluator """rankbased""" +490 25 dataset """kinships""" +490 25 model """proje""" +490 25 loss """marginranking""" +490 25 regularizer """no""" +490 25 optimizer """adam""" +490 25 training_loop """owa""" +490 25 negative_sampler """basic""" +490 25 evaluator """rankbased""" +490 26 dataset """kinships""" +490 26 model """proje""" +490 26 loss """marginranking""" +490 26 regularizer """no""" +490 26 optimizer """adam""" +490 26 training_loop """owa""" +490 26 negative_sampler """basic""" +490 26 evaluator """rankbased""" +490 27 dataset """kinships""" +490 27 model """proje""" +490 27 loss """marginranking""" +490 27 regularizer """no""" +490 27 optimizer """adam""" +490 27 training_loop """owa""" +490 27 negative_sampler """basic""" +490 27 evaluator """rankbased""" +490 28 dataset """kinships""" +490 28 model """proje""" +490 28 loss """marginranking""" +490 28 regularizer """no""" +490 28 optimizer """adam""" +490 28 training_loop """owa""" +490 28 negative_sampler """basic""" +490 28 evaluator """rankbased""" +490 29 dataset """kinships""" +490 29 model """proje""" +490 29 loss """marginranking""" +490 29 regularizer """no""" +490 29 optimizer """adam""" +490 29 training_loop """owa""" +490 29 negative_sampler """basic""" +490 29 evaluator """rankbased""" +490 30 dataset """kinships""" +490 30 model """proje""" +490 30 loss """marginranking""" +490 30 regularizer """no""" +490 30 optimizer """adam""" +490 30 training_loop """owa""" +490 30 negative_sampler """basic""" +490 30 evaluator """rankbased""" +490 31 dataset """kinships""" +490 31 model """proje""" +490 31 loss """marginranking""" +490 31 regularizer """no""" +490 31 optimizer """adam""" +490 31 training_loop """owa""" +490 31 negative_sampler """basic""" +490 31 evaluator """rankbased""" +490 32 dataset """kinships""" +490 32 model """proje""" +490 32 loss """marginranking""" +490 32 regularizer """no""" +490 32 optimizer """adam""" +490 32 training_loop """owa""" +490 32 negative_sampler """basic""" +490 32 evaluator """rankbased""" +490 33 dataset """kinships""" +490 33 model """proje""" +490 33 loss """marginranking""" +490 33 regularizer """no""" +490 33 optimizer """adam""" +490 33 training_loop """owa""" +490 33 negative_sampler """basic""" +490 33 evaluator """rankbased""" +490 34 dataset """kinships""" +490 34 model """proje""" +490 34 loss """marginranking""" +490 34 regularizer """no""" +490 34 optimizer """adam""" +490 34 training_loop """owa""" +490 34 negative_sampler """basic""" +490 34 evaluator """rankbased""" +490 35 dataset """kinships""" +490 35 model """proje""" +490 35 loss """marginranking""" +490 35 regularizer """no""" +490 35 optimizer """adam""" +490 35 training_loop """owa""" +490 35 negative_sampler """basic""" +490 35 evaluator """rankbased""" +490 36 dataset """kinships""" +490 36 model """proje""" +490 36 loss """marginranking""" +490 36 regularizer """no""" +490 36 optimizer """adam""" +490 36 training_loop """owa""" +490 36 negative_sampler """basic""" +490 36 evaluator """rankbased""" +490 37 dataset """kinships""" +490 37 model """proje""" +490 37 loss """marginranking""" +490 37 regularizer """no""" +490 37 optimizer """adam""" +490 37 training_loop """owa""" +490 37 negative_sampler """basic""" +490 37 evaluator """rankbased""" +490 38 dataset """kinships""" +490 38 model """proje""" +490 38 loss """marginranking""" +490 38 regularizer """no""" +490 38 optimizer """adam""" +490 38 training_loop """owa""" +490 38 negative_sampler """basic""" +490 38 evaluator """rankbased""" +490 39 dataset """kinships""" +490 39 model """proje""" +490 39 loss """marginranking""" +490 39 regularizer """no""" +490 39 optimizer """adam""" +490 39 training_loop """owa""" +490 39 negative_sampler """basic""" +490 39 evaluator """rankbased""" +490 40 dataset """kinships""" +490 40 model """proje""" +490 40 loss """marginranking""" +490 40 regularizer """no""" +490 40 optimizer """adam""" +490 40 training_loop """owa""" +490 40 negative_sampler """basic""" +490 40 evaluator """rankbased""" +490 41 dataset """kinships""" +490 41 model """proje""" +490 41 loss """marginranking""" +490 41 regularizer """no""" +490 41 optimizer """adam""" +490 41 training_loop """owa""" +490 41 negative_sampler """basic""" +490 41 evaluator """rankbased""" +490 42 dataset """kinships""" +490 42 model """proje""" +490 42 loss """marginranking""" +490 42 regularizer """no""" +490 42 optimizer """adam""" +490 42 training_loop """owa""" +490 42 negative_sampler """basic""" +490 42 evaluator """rankbased""" +490 43 dataset """kinships""" +490 43 model """proje""" +490 43 loss """marginranking""" +490 43 regularizer """no""" +490 43 optimizer """adam""" +490 43 training_loop """owa""" +490 43 negative_sampler """basic""" +490 43 evaluator """rankbased""" +490 44 dataset """kinships""" +490 44 model """proje""" +490 44 loss """marginranking""" +490 44 regularizer """no""" +490 44 optimizer """adam""" +490 44 training_loop """owa""" +490 44 negative_sampler """basic""" +490 44 evaluator """rankbased""" +490 45 dataset """kinships""" +490 45 model """proje""" +490 45 loss """marginranking""" +490 45 regularizer """no""" +490 45 optimizer """adam""" +490 45 training_loop """owa""" +490 45 negative_sampler """basic""" +490 45 evaluator """rankbased""" +490 46 dataset """kinships""" +490 46 model """proje""" +490 46 loss """marginranking""" +490 46 regularizer """no""" +490 46 optimizer """adam""" +490 46 training_loop """owa""" +490 46 negative_sampler """basic""" +490 46 evaluator """rankbased""" +490 47 dataset """kinships""" +490 47 model """proje""" +490 47 loss """marginranking""" +490 47 regularizer """no""" +490 47 optimizer """adam""" +490 47 training_loop """owa""" +490 47 negative_sampler """basic""" +490 47 evaluator """rankbased""" +490 48 dataset """kinships""" +490 48 model """proje""" +490 48 loss """marginranking""" +490 48 regularizer """no""" +490 48 optimizer """adam""" +490 48 training_loop """owa""" +490 48 negative_sampler """basic""" +490 48 evaluator """rankbased""" +490 49 dataset """kinships""" +490 49 model """proje""" +490 49 loss """marginranking""" +490 49 regularizer """no""" +490 49 optimizer """adam""" +490 49 training_loop """owa""" +490 49 negative_sampler """basic""" +490 49 evaluator """rankbased""" +490 50 dataset """kinships""" +490 50 model """proje""" +490 50 loss """marginranking""" +490 50 regularizer """no""" +490 50 optimizer """adam""" +490 50 training_loop """owa""" +490 50 negative_sampler """basic""" +490 50 evaluator """rankbased""" +490 51 dataset """kinships""" +490 51 model """proje""" +490 51 loss """marginranking""" +490 51 regularizer """no""" +490 51 optimizer """adam""" +490 51 training_loop """owa""" +490 51 negative_sampler """basic""" +490 51 evaluator """rankbased""" +490 52 dataset """kinships""" +490 52 model """proje""" +490 52 loss """marginranking""" +490 52 regularizer """no""" +490 52 optimizer """adam""" +490 52 training_loop """owa""" +490 52 negative_sampler """basic""" +490 52 evaluator """rankbased""" +490 53 dataset """kinships""" +490 53 model """proje""" +490 53 loss """marginranking""" +490 53 regularizer """no""" +490 53 optimizer """adam""" +490 53 training_loop """owa""" +490 53 negative_sampler """basic""" +490 53 evaluator """rankbased""" +490 54 dataset """kinships""" +490 54 model """proje""" +490 54 loss """marginranking""" +490 54 regularizer """no""" +490 54 optimizer """adam""" +490 54 training_loop """owa""" +490 54 negative_sampler """basic""" +490 54 evaluator """rankbased""" +490 55 dataset """kinships""" +490 55 model """proje""" +490 55 loss """marginranking""" +490 55 regularizer """no""" +490 55 optimizer """adam""" +490 55 training_loop """owa""" +490 55 negative_sampler """basic""" +490 55 evaluator """rankbased""" +490 56 dataset """kinships""" +490 56 model """proje""" +490 56 loss """marginranking""" +490 56 regularizer """no""" +490 56 optimizer """adam""" +490 56 training_loop """owa""" +490 56 negative_sampler """basic""" +490 56 evaluator """rankbased""" +490 57 dataset """kinships""" +490 57 model """proje""" +490 57 loss """marginranking""" +490 57 regularizer """no""" +490 57 optimizer """adam""" +490 57 training_loop """owa""" +490 57 negative_sampler """basic""" +490 57 evaluator """rankbased""" +490 58 dataset """kinships""" +490 58 model """proje""" +490 58 loss """marginranking""" +490 58 regularizer """no""" +490 58 optimizer """adam""" +490 58 training_loop """owa""" +490 58 negative_sampler """basic""" +490 58 evaluator """rankbased""" +490 59 dataset """kinships""" +490 59 model """proje""" +490 59 loss """marginranking""" +490 59 regularizer """no""" +490 59 optimizer """adam""" +490 59 training_loop """owa""" +490 59 negative_sampler """basic""" +490 59 evaluator """rankbased""" +490 60 dataset """kinships""" +490 60 model """proje""" +490 60 loss """marginranking""" +490 60 regularizer """no""" +490 60 optimizer """adam""" +490 60 training_loop """owa""" +490 60 negative_sampler """basic""" +490 60 evaluator """rankbased""" +490 61 dataset """kinships""" +490 61 model """proje""" +490 61 loss """marginranking""" +490 61 regularizer """no""" +490 61 optimizer """adam""" +490 61 training_loop """owa""" +490 61 negative_sampler """basic""" +490 61 evaluator """rankbased""" +490 62 dataset """kinships""" +490 62 model """proje""" +490 62 loss """marginranking""" +490 62 regularizer """no""" +490 62 optimizer """adam""" +490 62 training_loop """owa""" +490 62 negative_sampler """basic""" +490 62 evaluator """rankbased""" +490 63 dataset """kinships""" +490 63 model """proje""" +490 63 loss """marginranking""" +490 63 regularizer """no""" +490 63 optimizer """adam""" +490 63 training_loop """owa""" +490 63 negative_sampler """basic""" +490 63 evaluator """rankbased""" +490 64 dataset """kinships""" +490 64 model """proje""" +490 64 loss """marginranking""" +490 64 regularizer """no""" +490 64 optimizer """adam""" +490 64 training_loop """owa""" +490 64 negative_sampler """basic""" +490 64 evaluator """rankbased""" +490 65 dataset """kinships""" +490 65 model """proje""" +490 65 loss """marginranking""" +490 65 regularizer """no""" +490 65 optimizer """adam""" +490 65 training_loop """owa""" +490 65 negative_sampler """basic""" +490 65 evaluator """rankbased""" +490 66 dataset """kinships""" +490 66 model """proje""" +490 66 loss """marginranking""" +490 66 regularizer """no""" +490 66 optimizer """adam""" +490 66 training_loop """owa""" +490 66 negative_sampler """basic""" +490 66 evaluator """rankbased""" +490 67 dataset """kinships""" +490 67 model """proje""" +490 67 loss """marginranking""" +490 67 regularizer """no""" +490 67 optimizer """adam""" +490 67 training_loop """owa""" +490 67 negative_sampler """basic""" +490 67 evaluator """rankbased""" +490 68 dataset """kinships""" +490 68 model """proje""" +490 68 loss """marginranking""" +490 68 regularizer """no""" +490 68 optimizer """adam""" +490 68 training_loop """owa""" +490 68 negative_sampler """basic""" +490 68 evaluator """rankbased""" +490 69 dataset """kinships""" +490 69 model """proje""" +490 69 loss """marginranking""" +490 69 regularizer """no""" +490 69 optimizer """adam""" +490 69 training_loop """owa""" +490 69 negative_sampler """basic""" +490 69 evaluator """rankbased""" +490 70 dataset """kinships""" +490 70 model """proje""" +490 70 loss """marginranking""" +490 70 regularizer """no""" +490 70 optimizer """adam""" +490 70 training_loop """owa""" +490 70 negative_sampler """basic""" +490 70 evaluator """rankbased""" +490 71 dataset """kinships""" +490 71 model """proje""" +490 71 loss """marginranking""" +490 71 regularizer """no""" +490 71 optimizer """adam""" +490 71 training_loop """owa""" +490 71 negative_sampler """basic""" +490 71 evaluator """rankbased""" +490 72 dataset """kinships""" +490 72 model """proje""" +490 72 loss """marginranking""" +490 72 regularizer """no""" +490 72 optimizer """adam""" +490 72 training_loop """owa""" +490 72 negative_sampler """basic""" +490 72 evaluator """rankbased""" +490 73 dataset """kinships""" +490 73 model """proje""" +490 73 loss """marginranking""" +490 73 regularizer """no""" +490 73 optimizer """adam""" +490 73 training_loop """owa""" +490 73 negative_sampler """basic""" +490 73 evaluator """rankbased""" +490 74 dataset """kinships""" +490 74 model """proje""" +490 74 loss """marginranking""" +490 74 regularizer """no""" +490 74 optimizer """adam""" +490 74 training_loop """owa""" +490 74 negative_sampler """basic""" +490 74 evaluator """rankbased""" +490 75 dataset """kinships""" +490 75 model """proje""" +490 75 loss """marginranking""" +490 75 regularizer """no""" +490 75 optimizer """adam""" +490 75 training_loop """owa""" +490 75 negative_sampler """basic""" +490 75 evaluator """rankbased""" +490 76 dataset """kinships""" +490 76 model """proje""" +490 76 loss """marginranking""" +490 76 regularizer """no""" +490 76 optimizer """adam""" +490 76 training_loop """owa""" +490 76 negative_sampler """basic""" +490 76 evaluator """rankbased""" +490 77 dataset """kinships""" +490 77 model """proje""" +490 77 loss """marginranking""" +490 77 regularizer """no""" +490 77 optimizer """adam""" +490 77 training_loop """owa""" +490 77 negative_sampler """basic""" +490 77 evaluator """rankbased""" +490 78 dataset """kinships""" +490 78 model """proje""" +490 78 loss """marginranking""" +490 78 regularizer """no""" +490 78 optimizer """adam""" +490 78 training_loop """owa""" +490 78 negative_sampler """basic""" +490 78 evaluator """rankbased""" +490 79 dataset """kinships""" +490 79 model """proje""" +490 79 loss """marginranking""" +490 79 regularizer """no""" +490 79 optimizer """adam""" +490 79 training_loop """owa""" +490 79 negative_sampler """basic""" +490 79 evaluator """rankbased""" +490 80 dataset """kinships""" +490 80 model """proje""" +490 80 loss """marginranking""" +490 80 regularizer """no""" +490 80 optimizer """adam""" +490 80 training_loop """owa""" +490 80 negative_sampler """basic""" +490 80 evaluator """rankbased""" +490 81 dataset """kinships""" +490 81 model """proje""" +490 81 loss """marginranking""" +490 81 regularizer """no""" +490 81 optimizer """adam""" +490 81 training_loop """owa""" +490 81 negative_sampler """basic""" +490 81 evaluator """rankbased""" +490 82 dataset """kinships""" +490 82 model """proje""" +490 82 loss """marginranking""" +490 82 regularizer """no""" +490 82 optimizer """adam""" +490 82 training_loop """owa""" +490 82 negative_sampler """basic""" +490 82 evaluator """rankbased""" +490 83 dataset """kinships""" +490 83 model """proje""" +490 83 loss """marginranking""" +490 83 regularizer """no""" +490 83 optimizer """adam""" +490 83 training_loop """owa""" +490 83 negative_sampler """basic""" +490 83 evaluator """rankbased""" +490 84 dataset """kinships""" +490 84 model """proje""" +490 84 loss """marginranking""" +490 84 regularizer """no""" +490 84 optimizer """adam""" +490 84 training_loop """owa""" +490 84 negative_sampler """basic""" +490 84 evaluator """rankbased""" +490 85 dataset """kinships""" +490 85 model """proje""" +490 85 loss """marginranking""" +490 85 regularizer """no""" +490 85 optimizer """adam""" +490 85 training_loop """owa""" +490 85 negative_sampler """basic""" +490 85 evaluator """rankbased""" +490 86 dataset """kinships""" +490 86 model """proje""" +490 86 loss """marginranking""" +490 86 regularizer """no""" +490 86 optimizer """adam""" +490 86 training_loop """owa""" +490 86 negative_sampler """basic""" +490 86 evaluator """rankbased""" +490 87 dataset """kinships""" +490 87 model """proje""" +490 87 loss """marginranking""" +490 87 regularizer """no""" +490 87 optimizer """adam""" +490 87 training_loop """owa""" +490 87 negative_sampler """basic""" +490 87 evaluator """rankbased""" +490 88 dataset """kinships""" +490 88 model """proje""" +490 88 loss """marginranking""" +490 88 regularizer """no""" +490 88 optimizer """adam""" +490 88 training_loop """owa""" +490 88 negative_sampler """basic""" +490 88 evaluator """rankbased""" +490 89 dataset """kinships""" +490 89 model """proje""" +490 89 loss """marginranking""" +490 89 regularizer """no""" +490 89 optimizer """adam""" +490 89 training_loop """owa""" +490 89 negative_sampler """basic""" +490 89 evaluator """rankbased""" +490 90 dataset """kinships""" +490 90 model """proje""" +490 90 loss """marginranking""" +490 90 regularizer """no""" +490 90 optimizer """adam""" +490 90 training_loop """owa""" +490 90 negative_sampler """basic""" +490 90 evaluator """rankbased""" +490 91 dataset """kinships""" +490 91 model """proje""" +490 91 loss """marginranking""" +490 91 regularizer """no""" +490 91 optimizer """adam""" +490 91 training_loop """owa""" +490 91 negative_sampler """basic""" +490 91 evaluator """rankbased""" +490 92 dataset """kinships""" +490 92 model """proje""" +490 92 loss """marginranking""" +490 92 regularizer """no""" +490 92 optimizer """adam""" +490 92 training_loop """owa""" +490 92 negative_sampler """basic""" +490 92 evaluator """rankbased""" +490 93 dataset """kinships""" +490 93 model """proje""" +490 93 loss """marginranking""" +490 93 regularizer """no""" +490 93 optimizer """adam""" +490 93 training_loop """owa""" +490 93 negative_sampler """basic""" +490 93 evaluator """rankbased""" +490 94 dataset """kinships""" +490 94 model """proje""" +490 94 loss """marginranking""" +490 94 regularizer """no""" +490 94 optimizer """adam""" +490 94 training_loop """owa""" +490 94 negative_sampler """basic""" +490 94 evaluator """rankbased""" +490 95 dataset """kinships""" +490 95 model """proje""" +490 95 loss """marginranking""" +490 95 regularizer """no""" +490 95 optimizer """adam""" +490 95 training_loop """owa""" +490 95 negative_sampler """basic""" +490 95 evaluator """rankbased""" +490 96 dataset """kinships""" +490 96 model """proje""" +490 96 loss """marginranking""" +490 96 regularizer """no""" +490 96 optimizer """adam""" +490 96 training_loop """owa""" +490 96 negative_sampler """basic""" +490 96 evaluator """rankbased""" +490 97 dataset """kinships""" +490 97 model """proje""" +490 97 loss """marginranking""" +490 97 regularizer """no""" +490 97 optimizer """adam""" +490 97 training_loop """owa""" +490 97 negative_sampler """basic""" +490 97 evaluator """rankbased""" +490 98 dataset """kinships""" +490 98 model """proje""" +490 98 loss """marginranking""" +490 98 regularizer """no""" +490 98 optimizer """adam""" +490 98 training_loop """owa""" +490 98 negative_sampler """basic""" +490 98 evaluator """rankbased""" +490 99 dataset """kinships""" +490 99 model """proje""" +490 99 loss """marginranking""" +490 99 regularizer """no""" +490 99 optimizer """adam""" +490 99 training_loop """owa""" +490 99 negative_sampler """basic""" +490 99 evaluator """rankbased""" +490 100 dataset """kinships""" +490 100 model """proje""" +490 100 loss """marginranking""" +490 100 regularizer """no""" +490 100 optimizer """adam""" +490 100 training_loop """owa""" +490 100 negative_sampler """basic""" +490 100 evaluator """rankbased""" +491 1 model.embedding_dim 1.0 +491 1 loss.margin 12.203318563328294 +491 1 loss.adversarial_temperature 0.3275550527696183 +491 1 optimizer.lr 0.006839030190415743 +491 1 negative_sampler.num_negs_per_pos 70.0 +491 1 training.batch_size 1.0 +491 2 model.embedding_dim 2.0 +491 2 loss.margin 3.641900340179673 +491 2 loss.adversarial_temperature 0.22743494212657508 +491 2 optimizer.lr 0.035948319838186756 +491 2 negative_sampler.num_negs_per_pos 94.0 +491 2 training.batch_size 0.0 +491 3 model.embedding_dim 0.0 +491 3 loss.margin 19.191915798972293 +491 3 loss.adversarial_temperature 0.3503943801316507 +491 3 optimizer.lr 0.03076848244028055 +491 3 negative_sampler.num_negs_per_pos 95.0 +491 3 training.batch_size 2.0 +491 4 model.embedding_dim 0.0 +491 4 loss.margin 26.7797430010489 +491 4 loss.adversarial_temperature 0.7635858568891346 +491 4 optimizer.lr 0.005976944753613384 +491 4 negative_sampler.num_negs_per_pos 8.0 +491 4 training.batch_size 1.0 +491 5 model.embedding_dim 0.0 +491 5 loss.margin 9.371511168654058 +491 5 loss.adversarial_temperature 0.44996745414295247 +491 5 optimizer.lr 0.0011231034332757886 +491 5 negative_sampler.num_negs_per_pos 69.0 +491 5 training.batch_size 0.0 +491 6 model.embedding_dim 0.0 +491 6 loss.margin 18.26361095850016 +491 6 loss.adversarial_temperature 0.24851267077453704 +491 6 optimizer.lr 0.013092252654579488 +491 6 negative_sampler.num_negs_per_pos 33.0 +491 6 training.batch_size 2.0 +491 7 model.embedding_dim 0.0 +491 7 loss.margin 27.791217698341825 +491 7 loss.adversarial_temperature 0.42260880798906997 +491 7 optimizer.lr 0.034057133968406775 +491 7 negative_sampler.num_negs_per_pos 56.0 +491 7 training.batch_size 0.0 +491 8 model.embedding_dim 0.0 +491 8 loss.margin 22.439904860593916 +491 8 loss.adversarial_temperature 0.6588172178678157 +491 8 optimizer.lr 0.018642132487563794 +491 8 negative_sampler.num_negs_per_pos 27.0 +491 8 training.batch_size 1.0 +491 9 model.embedding_dim 1.0 +491 9 loss.margin 25.782887599431998 +491 9 loss.adversarial_temperature 0.2416484221016972 +491 9 optimizer.lr 0.016141758952642138 +491 9 negative_sampler.num_negs_per_pos 81.0 +491 9 training.batch_size 1.0 +491 10 model.embedding_dim 2.0 +491 10 loss.margin 5.772589502154593 +491 10 loss.adversarial_temperature 0.2750180512745579 +491 10 optimizer.lr 0.017285529545839517 +491 10 negative_sampler.num_negs_per_pos 62.0 +491 10 training.batch_size 1.0 +491 11 model.embedding_dim 1.0 +491 11 loss.margin 26.02119034406963 +491 11 loss.adversarial_temperature 0.8711104559122451 +491 11 optimizer.lr 0.03354207523991117 +491 11 negative_sampler.num_negs_per_pos 50.0 +491 11 training.batch_size 2.0 +491 12 model.embedding_dim 0.0 +491 12 loss.margin 12.05334383768161 +491 12 loss.adversarial_temperature 0.37592192025036264 +491 12 optimizer.lr 0.008409970053775298 +491 12 negative_sampler.num_negs_per_pos 18.0 +491 12 training.batch_size 2.0 +491 13 model.embedding_dim 0.0 +491 13 loss.margin 1.821519842020562 +491 13 loss.adversarial_temperature 0.7795151704578356 +491 13 optimizer.lr 0.0873700491633472 +491 13 negative_sampler.num_negs_per_pos 89.0 +491 13 training.batch_size 2.0 +491 14 model.embedding_dim 1.0 +491 14 loss.margin 22.614050184543224 +491 14 loss.adversarial_temperature 0.7127574713646487 +491 14 optimizer.lr 0.0035839795241006476 +491 14 negative_sampler.num_negs_per_pos 54.0 +491 14 training.batch_size 1.0 +491 15 model.embedding_dim 0.0 +491 15 loss.margin 29.966237852873704 +491 15 loss.adversarial_temperature 0.24720375495050076 +491 15 optimizer.lr 0.011084729122306599 +491 15 negative_sampler.num_negs_per_pos 27.0 +491 15 training.batch_size 2.0 +491 16 model.embedding_dim 2.0 +491 16 loss.margin 15.503429399466413 +491 16 loss.adversarial_temperature 0.7764252849872887 +491 16 optimizer.lr 0.036920208989743714 +491 16 negative_sampler.num_negs_per_pos 44.0 +491 16 training.batch_size 0.0 +491 17 model.embedding_dim 2.0 +491 17 loss.margin 3.546392661998575 +491 17 loss.adversarial_temperature 0.4644746873626052 +491 17 optimizer.lr 0.0055871701223884985 +491 17 negative_sampler.num_negs_per_pos 96.0 +491 17 training.batch_size 2.0 +491 18 model.embedding_dim 2.0 +491 18 loss.margin 20.051301962728193 +491 18 loss.adversarial_temperature 0.5916710903110846 +491 18 optimizer.lr 0.06746854176928341 +491 18 negative_sampler.num_negs_per_pos 1.0 +491 18 training.batch_size 2.0 +491 19 model.embedding_dim 0.0 +491 19 loss.margin 8.680483079422132 +491 19 loss.adversarial_temperature 0.9398847964084057 +491 19 optimizer.lr 0.0029485292098132177 +491 19 negative_sampler.num_negs_per_pos 50.0 +491 19 training.batch_size 0.0 +491 20 model.embedding_dim 2.0 +491 20 loss.margin 24.268097613360226 +491 20 loss.adversarial_temperature 0.7486160050240264 +491 20 optimizer.lr 0.001361122579572692 +491 20 negative_sampler.num_negs_per_pos 40.0 +491 20 training.batch_size 1.0 +491 21 model.embedding_dim 1.0 +491 21 loss.margin 21.866693372948244 +491 21 loss.adversarial_temperature 0.8590274835628555 +491 21 optimizer.lr 0.0321320874621992 +491 21 negative_sampler.num_negs_per_pos 74.0 +491 21 training.batch_size 0.0 +491 22 model.embedding_dim 0.0 +491 22 loss.margin 24.94490192814262 +491 22 loss.adversarial_temperature 0.6843964136293391 +491 22 optimizer.lr 0.004192239802538756 +491 22 negative_sampler.num_negs_per_pos 82.0 +491 22 training.batch_size 1.0 +491 23 model.embedding_dim 0.0 +491 23 loss.margin 19.95935299863184 +491 23 loss.adversarial_temperature 0.31106808108829587 +491 23 optimizer.lr 0.001583379638010596 +491 23 negative_sampler.num_negs_per_pos 20.0 +491 23 training.batch_size 1.0 +491 24 model.embedding_dim 0.0 +491 24 loss.margin 2.028445719278354 +491 24 loss.adversarial_temperature 0.867309328209927 +491 24 optimizer.lr 0.004364592622244567 +491 24 negative_sampler.num_negs_per_pos 99.0 +491 24 training.batch_size 1.0 +491 25 model.embedding_dim 1.0 +491 25 loss.margin 14.22739793895781 +491 25 loss.adversarial_temperature 0.7377261255443747 +491 25 optimizer.lr 0.015458160614936161 +491 25 negative_sampler.num_negs_per_pos 41.0 +491 25 training.batch_size 0.0 +491 26 model.embedding_dim 2.0 +491 26 loss.margin 13.661118754903303 +491 26 loss.adversarial_temperature 0.10056917337599819 +491 26 optimizer.lr 0.0058799631363723095 +491 26 negative_sampler.num_negs_per_pos 34.0 +491 26 training.batch_size 1.0 +491 27 model.embedding_dim 2.0 +491 27 loss.margin 9.153507740084274 +491 27 loss.adversarial_temperature 0.3370829634556193 +491 27 optimizer.lr 0.021663106626752457 +491 27 negative_sampler.num_negs_per_pos 88.0 +491 27 training.batch_size 1.0 +491 28 model.embedding_dim 1.0 +491 28 loss.margin 8.18349388157332 +491 28 loss.adversarial_temperature 0.6439590746402305 +491 28 optimizer.lr 0.003875208894297715 +491 28 negative_sampler.num_negs_per_pos 95.0 +491 28 training.batch_size 2.0 +491 29 model.embedding_dim 2.0 +491 29 loss.margin 18.50443136500936 +491 29 loss.adversarial_temperature 0.494731868478979 +491 29 optimizer.lr 0.005701564590575673 +491 29 negative_sampler.num_negs_per_pos 52.0 +491 29 training.batch_size 0.0 +491 30 model.embedding_dim 0.0 +491 30 loss.margin 28.31103380026804 +491 30 loss.adversarial_temperature 0.5098327539565346 +491 30 optimizer.lr 0.0011636557661422744 +491 30 negative_sampler.num_negs_per_pos 63.0 +491 30 training.batch_size 2.0 +491 31 model.embedding_dim 2.0 +491 31 loss.margin 16.129974631723982 +491 31 loss.adversarial_temperature 0.2643037050966289 +491 31 optimizer.lr 0.012095907969329451 +491 31 negative_sampler.num_negs_per_pos 2.0 +491 31 training.batch_size 2.0 +491 32 model.embedding_dim 1.0 +491 32 loss.margin 11.496353302585648 +491 32 loss.adversarial_temperature 0.6175929500440348 +491 32 optimizer.lr 0.0416701801852496 +491 32 negative_sampler.num_negs_per_pos 42.0 +491 32 training.batch_size 2.0 +491 33 model.embedding_dim 2.0 +491 33 loss.margin 25.01161789838 +491 33 loss.adversarial_temperature 0.42416689418361886 +491 33 optimizer.lr 0.004910557055854635 +491 33 negative_sampler.num_negs_per_pos 72.0 +491 33 training.batch_size 1.0 +491 34 model.embedding_dim 1.0 +491 34 loss.margin 14.320394388203894 +491 34 loss.adversarial_temperature 0.5027312743936857 +491 34 optimizer.lr 0.01923936955086448 +491 34 negative_sampler.num_negs_per_pos 18.0 +491 34 training.batch_size 1.0 +491 35 model.embedding_dim 1.0 +491 35 loss.margin 18.194247984342915 +491 35 loss.adversarial_temperature 0.8864886625632808 +491 35 optimizer.lr 0.03347211670075067 +491 35 negative_sampler.num_negs_per_pos 38.0 +491 35 training.batch_size 2.0 +491 36 model.embedding_dim 0.0 +491 36 loss.margin 25.20994548108966 +491 36 loss.adversarial_temperature 0.7828344951493709 +491 36 optimizer.lr 0.003269868165927893 +491 36 negative_sampler.num_negs_per_pos 44.0 +491 36 training.batch_size 2.0 +491 37 model.embedding_dim 2.0 +491 37 loss.margin 26.579150959705693 +491 37 loss.adversarial_temperature 0.24515689205404465 +491 37 optimizer.lr 0.02645997215828116 +491 37 negative_sampler.num_negs_per_pos 45.0 +491 37 training.batch_size 0.0 +491 38 model.embedding_dim 2.0 +491 38 loss.margin 3.956085369018277 +491 38 loss.adversarial_temperature 0.5890685836189945 +491 38 optimizer.lr 0.009972803214484202 +491 38 negative_sampler.num_negs_per_pos 9.0 +491 38 training.batch_size 2.0 +491 39 model.embedding_dim 0.0 +491 39 loss.margin 9.34911709975227 +491 39 loss.adversarial_temperature 0.36715733232481007 +491 39 optimizer.lr 0.04556233016201478 +491 39 negative_sampler.num_negs_per_pos 7.0 +491 39 training.batch_size 0.0 +491 40 model.embedding_dim 2.0 +491 40 loss.margin 11.169350952234884 +491 40 loss.adversarial_temperature 0.42910745061232924 +491 40 optimizer.lr 0.002111043541718324 +491 40 negative_sampler.num_negs_per_pos 49.0 +491 40 training.batch_size 0.0 +491 41 model.embedding_dim 1.0 +491 41 loss.margin 11.99326243942782 +491 41 loss.adversarial_temperature 0.8656183223752115 +491 41 optimizer.lr 0.007975756346624506 +491 41 negative_sampler.num_negs_per_pos 2.0 +491 41 training.batch_size 1.0 +491 42 model.embedding_dim 0.0 +491 42 loss.margin 25.904977532209504 +491 42 loss.adversarial_temperature 0.8315664648776809 +491 42 optimizer.lr 0.09069633256335931 +491 42 negative_sampler.num_negs_per_pos 7.0 +491 42 training.batch_size 1.0 +491 43 model.embedding_dim 0.0 +491 43 loss.margin 15.751289287578313 +491 43 loss.adversarial_temperature 0.9284707415859885 +491 43 optimizer.lr 0.01061501923845375 +491 43 negative_sampler.num_negs_per_pos 77.0 +491 43 training.batch_size 1.0 +491 44 model.embedding_dim 2.0 +491 44 loss.margin 18.207910436960685 +491 44 loss.adversarial_temperature 0.5369419280817314 +491 44 optimizer.lr 0.07148773756445813 +491 44 negative_sampler.num_negs_per_pos 2.0 +491 44 training.batch_size 2.0 +491 45 model.embedding_dim 2.0 +491 45 loss.margin 12.253102056454646 +491 45 loss.adversarial_temperature 0.8300858570299208 +491 45 optimizer.lr 0.0529914290681192 +491 45 negative_sampler.num_negs_per_pos 3.0 +491 45 training.batch_size 2.0 +491 46 model.embedding_dim 2.0 +491 46 loss.margin 12.773531598926832 +491 46 loss.adversarial_temperature 0.5997390119134219 +491 46 optimizer.lr 0.04706250554233876 +491 46 negative_sampler.num_negs_per_pos 4.0 +491 46 training.batch_size 2.0 +491 47 model.embedding_dim 0.0 +491 47 loss.margin 6.8243189557307256 +491 47 loss.adversarial_temperature 0.3221078179552316 +491 47 optimizer.lr 0.016537834526141208 +491 47 negative_sampler.num_negs_per_pos 50.0 +491 47 training.batch_size 0.0 +491 48 model.embedding_dim 0.0 +491 48 loss.margin 5.278756595826731 +491 48 loss.adversarial_temperature 0.3443711025850107 +491 48 optimizer.lr 0.005480446117368521 +491 48 negative_sampler.num_negs_per_pos 72.0 +491 48 training.batch_size 2.0 +491 49 model.embedding_dim 2.0 +491 49 loss.margin 23.896479226860738 +491 49 loss.adversarial_temperature 0.3695936013317104 +491 49 optimizer.lr 0.0037014002292877304 +491 49 negative_sampler.num_negs_per_pos 40.0 +491 49 training.batch_size 2.0 +491 50 model.embedding_dim 2.0 +491 50 loss.margin 1.9253534411885824 +491 50 loss.adversarial_temperature 0.80982968826901 +491 50 optimizer.lr 0.01984271387448895 +491 50 negative_sampler.num_negs_per_pos 50.0 +491 50 training.batch_size 2.0 +491 51 model.embedding_dim 1.0 +491 51 loss.margin 23.860114564164736 +491 51 loss.adversarial_temperature 0.2038116153383378 +491 51 optimizer.lr 0.0027928961000248716 +491 51 negative_sampler.num_negs_per_pos 67.0 +491 51 training.batch_size 1.0 +491 52 model.embedding_dim 2.0 +491 52 loss.margin 15.427681898830858 +491 52 loss.adversarial_temperature 0.38648191954241573 +491 52 optimizer.lr 0.0010494971010424846 +491 52 negative_sampler.num_negs_per_pos 38.0 +491 52 training.batch_size 2.0 +491 53 model.embedding_dim 1.0 +491 53 loss.margin 27.45383858502474 +491 53 loss.adversarial_temperature 0.3287864140719134 +491 53 optimizer.lr 0.0724985475621412 +491 53 negative_sampler.num_negs_per_pos 11.0 +491 53 training.batch_size 0.0 +491 54 model.embedding_dim 1.0 +491 54 loss.margin 18.053978716334136 +491 54 loss.adversarial_temperature 0.6501617609737163 +491 54 optimizer.lr 0.007389506722930822 +491 54 negative_sampler.num_negs_per_pos 80.0 +491 54 training.batch_size 0.0 +491 55 model.embedding_dim 1.0 +491 55 loss.margin 17.169021546182947 +491 55 loss.adversarial_temperature 0.9272676583336446 +491 55 optimizer.lr 0.01639350938763996 +491 55 negative_sampler.num_negs_per_pos 73.0 +491 55 training.batch_size 0.0 +491 56 model.embedding_dim 2.0 +491 56 loss.margin 25.550377395604205 +491 56 loss.adversarial_temperature 0.5034596845014778 +491 56 optimizer.lr 0.0015411281384212013 +491 56 negative_sampler.num_negs_per_pos 39.0 +491 56 training.batch_size 2.0 +491 57 model.embedding_dim 2.0 +491 57 loss.margin 16.569585653689323 +491 57 loss.adversarial_temperature 0.5095081424248881 +491 57 optimizer.lr 0.07301271593705454 +491 57 negative_sampler.num_negs_per_pos 43.0 +491 57 training.batch_size 0.0 +491 58 model.embedding_dim 0.0 +491 58 loss.margin 28.409994584182645 +491 58 loss.adversarial_temperature 0.6053965810166971 +491 58 optimizer.lr 0.004623811701792822 +491 58 negative_sampler.num_negs_per_pos 52.0 +491 58 training.batch_size 0.0 +491 59 model.embedding_dim 0.0 +491 59 loss.margin 2.2134326581183568 +491 59 loss.adversarial_temperature 0.8386216700421335 +491 59 optimizer.lr 0.011285076949402817 +491 59 negative_sampler.num_negs_per_pos 95.0 +491 59 training.batch_size 2.0 +491 60 model.embedding_dim 2.0 +491 60 loss.margin 11.567585920645103 +491 60 loss.adversarial_temperature 0.4269129199117736 +491 60 optimizer.lr 0.008996202048503519 +491 60 negative_sampler.num_negs_per_pos 23.0 +491 60 training.batch_size 1.0 +491 61 model.embedding_dim 2.0 +491 61 loss.margin 24.967242808086205 +491 61 loss.adversarial_temperature 0.7174665661862255 +491 61 optimizer.lr 0.0019789440139866914 +491 61 negative_sampler.num_negs_per_pos 47.0 +491 61 training.batch_size 1.0 +491 62 model.embedding_dim 2.0 +491 62 loss.margin 15.064045012861037 +491 62 loss.adversarial_temperature 0.7626497594202251 +491 62 optimizer.lr 0.0211360664279962 +491 62 negative_sampler.num_negs_per_pos 62.0 +491 62 training.batch_size 0.0 +491 63 model.embedding_dim 1.0 +491 63 loss.margin 1.8977244581745378 +491 63 loss.adversarial_temperature 0.46284904713541863 +491 63 optimizer.lr 0.02542401431996878 +491 63 negative_sampler.num_negs_per_pos 8.0 +491 63 training.batch_size 1.0 +491 64 model.embedding_dim 2.0 +491 64 loss.margin 16.265503759718587 +491 64 loss.adversarial_temperature 0.7475207858975906 +491 64 optimizer.lr 0.0010170738179912874 +491 64 negative_sampler.num_negs_per_pos 80.0 +491 64 training.batch_size 2.0 +491 65 model.embedding_dim 0.0 +491 65 loss.margin 15.192050539657613 +491 65 loss.adversarial_temperature 0.7104149760652106 +491 65 optimizer.lr 0.030928357295238396 +491 65 negative_sampler.num_negs_per_pos 49.0 +491 65 training.batch_size 2.0 +491 66 model.embedding_dim 2.0 +491 66 loss.margin 10.85908354453215 +491 66 loss.adversarial_temperature 0.9637053119945328 +491 66 optimizer.lr 0.0011329512902651215 +491 66 negative_sampler.num_negs_per_pos 83.0 +491 66 training.batch_size 0.0 +491 67 model.embedding_dim 2.0 +491 67 loss.margin 22.214591459607988 +491 67 loss.adversarial_temperature 0.8156828259300589 +491 67 optimizer.lr 0.0013541709555255513 +491 67 negative_sampler.num_negs_per_pos 8.0 +491 67 training.batch_size 2.0 +491 68 model.embedding_dim 2.0 +491 68 loss.margin 17.77755106452655 +491 68 loss.adversarial_temperature 0.9613104400980751 +491 68 optimizer.lr 0.005800878248785863 +491 68 negative_sampler.num_negs_per_pos 22.0 +491 68 training.batch_size 0.0 +491 69 model.embedding_dim 2.0 +491 69 loss.margin 24.638758517454328 +491 69 loss.adversarial_temperature 0.9349172582892536 +491 69 optimizer.lr 0.021949620004491795 +491 69 negative_sampler.num_negs_per_pos 42.0 +491 69 training.batch_size 2.0 +491 70 model.embedding_dim 0.0 +491 70 loss.margin 8.003309732694097 +491 70 loss.adversarial_temperature 0.5494393092622022 +491 70 optimizer.lr 0.007612400138944663 +491 70 negative_sampler.num_negs_per_pos 0.0 +491 70 training.batch_size 0.0 +491 71 model.embedding_dim 2.0 +491 71 loss.margin 1.274701792356489 +491 71 loss.adversarial_temperature 0.7141009241275476 +491 71 optimizer.lr 0.0014529674767318206 +491 71 negative_sampler.num_negs_per_pos 23.0 +491 71 training.batch_size 1.0 +491 72 model.embedding_dim 2.0 +491 72 loss.margin 7.04461416587983 +491 72 loss.adversarial_temperature 0.8811888153359554 +491 72 optimizer.lr 0.08849723794504223 +491 72 negative_sampler.num_negs_per_pos 35.0 +491 72 training.batch_size 0.0 +491 73 model.embedding_dim 2.0 +491 73 loss.margin 24.396924669126072 +491 73 loss.adversarial_temperature 0.5427626679398322 +491 73 optimizer.lr 0.006181465100195181 +491 73 negative_sampler.num_negs_per_pos 81.0 +491 73 training.batch_size 0.0 +491 74 model.embedding_dim 0.0 +491 74 loss.margin 2.384961868455277 +491 74 loss.adversarial_temperature 0.3634884035260595 +491 74 optimizer.lr 0.007748893081669583 +491 74 negative_sampler.num_negs_per_pos 28.0 +491 74 training.batch_size 0.0 +491 75 model.embedding_dim 2.0 +491 75 loss.margin 8.773570670810672 +491 75 loss.adversarial_temperature 0.9142348288518061 +491 75 optimizer.lr 0.012959541904887236 +491 75 negative_sampler.num_negs_per_pos 81.0 +491 75 training.batch_size 0.0 +491 76 model.embedding_dim 0.0 +491 76 loss.margin 8.414742049697871 +491 76 loss.adversarial_temperature 0.9563115867171796 +491 76 optimizer.lr 0.06176426603122685 +491 76 negative_sampler.num_negs_per_pos 2.0 +491 76 training.batch_size 2.0 +491 77 model.embedding_dim 0.0 +491 77 loss.margin 17.048085684604963 +491 77 loss.adversarial_temperature 0.8025485325676548 +491 77 optimizer.lr 0.04139536279818327 +491 77 negative_sampler.num_negs_per_pos 47.0 +491 77 training.batch_size 0.0 +491 78 model.embedding_dim 0.0 +491 78 loss.margin 24.139477698526964 +491 78 loss.adversarial_temperature 0.5676320563815577 +491 78 optimizer.lr 0.0156738321886044 +491 78 negative_sampler.num_negs_per_pos 89.0 +491 78 training.batch_size 1.0 +491 79 model.embedding_dim 0.0 +491 79 loss.margin 21.19609854599692 +491 79 loss.adversarial_temperature 0.909975195688921 +491 79 optimizer.lr 0.0032649404002210135 +491 79 negative_sampler.num_negs_per_pos 1.0 +491 79 training.batch_size 2.0 +491 80 model.embedding_dim 1.0 +491 80 loss.margin 15.894721755754503 +491 80 loss.adversarial_temperature 0.1588294150358461 +491 80 optimizer.lr 0.06760566035907747 +491 80 negative_sampler.num_negs_per_pos 90.0 +491 80 training.batch_size 2.0 +491 81 model.embedding_dim 0.0 +491 81 loss.margin 14.514700574225232 +491 81 loss.adversarial_temperature 0.20805644137596754 +491 81 optimizer.lr 0.02630801330261895 +491 81 negative_sampler.num_negs_per_pos 36.0 +491 81 training.batch_size 1.0 +491 82 model.embedding_dim 1.0 +491 82 loss.margin 5.176004580888761 +491 82 loss.adversarial_temperature 0.8465863872815154 +491 82 optimizer.lr 0.015857574675411584 +491 82 negative_sampler.num_negs_per_pos 37.0 +491 82 training.batch_size 0.0 +491 83 model.embedding_dim 0.0 +491 83 loss.margin 10.919431790988655 +491 83 loss.adversarial_temperature 0.3115237908748023 +491 83 optimizer.lr 0.021759333725156742 +491 83 negative_sampler.num_negs_per_pos 69.0 +491 83 training.batch_size 0.0 +491 84 model.embedding_dim 2.0 +491 84 loss.margin 4.564131360322473 +491 84 loss.adversarial_temperature 0.2386945000737086 +491 84 optimizer.lr 0.01901388720762037 +491 84 negative_sampler.num_negs_per_pos 71.0 +491 84 training.batch_size 1.0 +491 85 model.embedding_dim 0.0 +491 85 loss.margin 8.37950504208348 +491 85 loss.adversarial_temperature 0.666988534944671 +491 85 optimizer.lr 0.014401637317632138 +491 85 negative_sampler.num_negs_per_pos 61.0 +491 85 training.batch_size 1.0 +491 86 model.embedding_dim 0.0 +491 86 loss.margin 10.2004606164267 +491 86 loss.adversarial_temperature 0.342098779159543 +491 86 optimizer.lr 0.03674071791405826 +491 86 negative_sampler.num_negs_per_pos 61.0 +491 86 training.batch_size 1.0 +491 87 model.embedding_dim 2.0 +491 87 loss.margin 3.6158788765170753 +491 87 loss.adversarial_temperature 0.602718637454559 +491 87 optimizer.lr 0.07973229404711057 +491 87 negative_sampler.num_negs_per_pos 11.0 +491 87 training.batch_size 1.0 +491 88 model.embedding_dim 1.0 +491 88 loss.margin 26.733560228346025 +491 88 loss.adversarial_temperature 0.34831337125633655 +491 88 optimizer.lr 0.0010262430322797405 +491 88 negative_sampler.num_negs_per_pos 36.0 +491 88 training.batch_size 1.0 +491 89 model.embedding_dim 2.0 +491 89 loss.margin 1.2819828384486769 +491 89 loss.adversarial_temperature 0.2050715231843972 +491 89 optimizer.lr 0.001917440126508904 +491 89 negative_sampler.num_negs_per_pos 34.0 +491 89 training.batch_size 0.0 +491 90 model.embedding_dim 2.0 +491 90 loss.margin 18.81884094227561 +491 90 loss.adversarial_temperature 0.6503611218997025 +491 90 optimizer.lr 0.006216787856173087 +491 90 negative_sampler.num_negs_per_pos 29.0 +491 90 training.batch_size 1.0 +491 91 model.embedding_dim 0.0 +491 91 loss.margin 15.381550268902975 +491 91 loss.adversarial_temperature 0.7526275674354618 +491 91 optimizer.lr 0.009630184072169125 +491 91 negative_sampler.num_negs_per_pos 97.0 +491 91 training.batch_size 1.0 +491 92 model.embedding_dim 0.0 +491 92 loss.margin 27.797843594970107 +491 92 loss.adversarial_temperature 0.829966226065155 +491 92 optimizer.lr 0.004139690001310888 +491 92 negative_sampler.num_negs_per_pos 3.0 +491 92 training.batch_size 2.0 +491 93 model.embedding_dim 0.0 +491 93 loss.margin 1.1437642888807673 +491 93 loss.adversarial_temperature 0.8410677781290784 +491 93 optimizer.lr 0.02729375545659094 +491 93 negative_sampler.num_negs_per_pos 45.0 +491 93 training.batch_size 0.0 +491 94 model.embedding_dim 0.0 +491 94 loss.margin 23.047415180939833 +491 94 loss.adversarial_temperature 0.2815060978469373 +491 94 optimizer.lr 0.04516260219979548 +491 94 negative_sampler.num_negs_per_pos 57.0 +491 94 training.batch_size 1.0 +491 95 model.embedding_dim 1.0 +491 95 loss.margin 1.2442006204068785 +491 95 loss.adversarial_temperature 0.8025145130460888 +491 95 optimizer.lr 0.007067760717931374 +491 95 negative_sampler.num_negs_per_pos 24.0 +491 95 training.batch_size 1.0 +491 96 model.embedding_dim 1.0 +491 96 loss.margin 14.817613001712905 +491 96 loss.adversarial_temperature 0.9070048521717892 +491 96 optimizer.lr 0.04450655099733743 +491 96 negative_sampler.num_negs_per_pos 30.0 +491 96 training.batch_size 0.0 +491 97 model.embedding_dim 2.0 +491 97 loss.margin 6.140965197510062 +491 97 loss.adversarial_temperature 0.6015373846691708 +491 97 optimizer.lr 0.0011121175007603313 +491 97 negative_sampler.num_negs_per_pos 70.0 +491 97 training.batch_size 1.0 +491 98 model.embedding_dim 1.0 +491 98 loss.margin 6.716956419602999 +491 98 loss.adversarial_temperature 0.4335369638664567 +491 98 optimizer.lr 0.0018093446436708307 +491 98 negative_sampler.num_negs_per_pos 12.0 +491 98 training.batch_size 2.0 +491 99 model.embedding_dim 0.0 +491 99 loss.margin 13.852737550000006 +491 99 loss.adversarial_temperature 0.3999518623707602 +491 99 optimizer.lr 0.004793797464504677 +491 99 negative_sampler.num_negs_per_pos 52.0 +491 99 training.batch_size 1.0 +491 100 model.embedding_dim 0.0 +491 100 loss.margin 1.3939074323965333 +491 100 loss.adversarial_temperature 0.9639656390261432 +491 100 optimizer.lr 0.004108908366127583 +491 100 negative_sampler.num_negs_per_pos 86.0 +491 100 training.batch_size 1.0 +491 1 dataset """kinships""" +491 1 model """proje""" +491 1 loss """nssa""" +491 1 regularizer """no""" +491 1 optimizer """adam""" +491 1 training_loop """owa""" +491 1 negative_sampler """basic""" +491 1 evaluator """rankbased""" +491 2 dataset """kinships""" +491 2 model """proje""" +491 2 loss """nssa""" +491 2 regularizer """no""" +491 2 optimizer """adam""" +491 2 training_loop """owa""" +491 2 negative_sampler """basic""" +491 2 evaluator """rankbased""" +491 3 dataset """kinships""" +491 3 model """proje""" +491 3 loss """nssa""" +491 3 regularizer """no""" +491 3 optimizer """adam""" +491 3 training_loop """owa""" +491 3 negative_sampler """basic""" +491 3 evaluator """rankbased""" +491 4 dataset """kinships""" +491 4 model """proje""" +491 4 loss """nssa""" +491 4 regularizer """no""" +491 4 optimizer """adam""" +491 4 training_loop """owa""" +491 4 negative_sampler """basic""" +491 4 evaluator """rankbased""" +491 5 dataset """kinships""" +491 5 model """proje""" +491 5 loss """nssa""" +491 5 regularizer """no""" +491 5 optimizer """adam""" +491 5 training_loop """owa""" +491 5 negative_sampler """basic""" +491 5 evaluator """rankbased""" +491 6 dataset """kinships""" +491 6 model """proje""" +491 6 loss """nssa""" +491 6 regularizer """no""" +491 6 optimizer """adam""" +491 6 training_loop """owa""" +491 6 negative_sampler """basic""" +491 6 evaluator """rankbased""" +491 7 dataset """kinships""" +491 7 model """proje""" +491 7 loss """nssa""" +491 7 regularizer """no""" +491 7 optimizer """adam""" +491 7 training_loop """owa""" +491 7 negative_sampler """basic""" +491 7 evaluator """rankbased""" +491 8 dataset """kinships""" +491 8 model """proje""" +491 8 loss """nssa""" +491 8 regularizer """no""" +491 8 optimizer """adam""" +491 8 training_loop """owa""" +491 8 negative_sampler """basic""" +491 8 evaluator """rankbased""" +491 9 dataset """kinships""" +491 9 model """proje""" +491 9 loss """nssa""" +491 9 regularizer """no""" +491 9 optimizer """adam""" +491 9 training_loop """owa""" +491 9 negative_sampler """basic""" +491 9 evaluator """rankbased""" +491 10 dataset """kinships""" +491 10 model """proje""" +491 10 loss """nssa""" +491 10 regularizer """no""" +491 10 optimizer """adam""" +491 10 training_loop """owa""" +491 10 negative_sampler """basic""" +491 10 evaluator """rankbased""" +491 11 dataset """kinships""" +491 11 model """proje""" +491 11 loss """nssa""" +491 11 regularizer """no""" +491 11 optimizer """adam""" +491 11 training_loop """owa""" +491 11 negative_sampler """basic""" +491 11 evaluator """rankbased""" +491 12 dataset """kinships""" +491 12 model """proje""" +491 12 loss """nssa""" +491 12 regularizer """no""" +491 12 optimizer """adam""" +491 12 training_loop """owa""" +491 12 negative_sampler """basic""" +491 12 evaluator """rankbased""" +491 13 dataset """kinships""" +491 13 model """proje""" +491 13 loss """nssa""" +491 13 regularizer """no""" +491 13 optimizer """adam""" +491 13 training_loop """owa""" +491 13 negative_sampler """basic""" +491 13 evaluator """rankbased""" +491 14 dataset """kinships""" +491 14 model """proje""" +491 14 loss """nssa""" +491 14 regularizer """no""" +491 14 optimizer """adam""" +491 14 training_loop """owa""" +491 14 negative_sampler """basic""" +491 14 evaluator """rankbased""" +491 15 dataset """kinships""" +491 15 model """proje""" +491 15 loss """nssa""" +491 15 regularizer """no""" +491 15 optimizer """adam""" +491 15 training_loop """owa""" +491 15 negative_sampler """basic""" +491 15 evaluator """rankbased""" +491 16 dataset """kinships""" +491 16 model """proje""" +491 16 loss """nssa""" +491 16 regularizer """no""" +491 16 optimizer """adam""" +491 16 training_loop """owa""" +491 16 negative_sampler """basic""" +491 16 evaluator """rankbased""" +491 17 dataset """kinships""" +491 17 model """proje""" +491 17 loss """nssa""" +491 17 regularizer """no""" +491 17 optimizer """adam""" +491 17 training_loop """owa""" +491 17 negative_sampler """basic""" +491 17 evaluator """rankbased""" +491 18 dataset """kinships""" +491 18 model """proje""" +491 18 loss """nssa""" +491 18 regularizer """no""" +491 18 optimizer """adam""" +491 18 training_loop """owa""" +491 18 negative_sampler """basic""" +491 18 evaluator """rankbased""" +491 19 dataset """kinships""" +491 19 model """proje""" +491 19 loss """nssa""" +491 19 regularizer """no""" +491 19 optimizer """adam""" +491 19 training_loop """owa""" +491 19 negative_sampler """basic""" +491 19 evaluator """rankbased""" +491 20 dataset """kinships""" +491 20 model """proje""" +491 20 loss """nssa""" +491 20 regularizer """no""" +491 20 optimizer """adam""" +491 20 training_loop """owa""" +491 20 negative_sampler """basic""" +491 20 evaluator """rankbased""" +491 21 dataset """kinships""" +491 21 model """proje""" +491 21 loss """nssa""" +491 21 regularizer """no""" +491 21 optimizer """adam""" +491 21 training_loop """owa""" +491 21 negative_sampler """basic""" +491 21 evaluator """rankbased""" +491 22 dataset """kinships""" +491 22 model """proje""" +491 22 loss """nssa""" +491 22 regularizer """no""" +491 22 optimizer """adam""" +491 22 training_loop """owa""" +491 22 negative_sampler """basic""" +491 22 evaluator """rankbased""" +491 23 dataset """kinships""" +491 23 model """proje""" +491 23 loss """nssa""" +491 23 regularizer """no""" +491 23 optimizer """adam""" +491 23 training_loop """owa""" +491 23 negative_sampler """basic""" +491 23 evaluator """rankbased""" +491 24 dataset """kinships""" +491 24 model """proje""" +491 24 loss """nssa""" +491 24 regularizer """no""" +491 24 optimizer """adam""" +491 24 training_loop """owa""" +491 24 negative_sampler """basic""" +491 24 evaluator """rankbased""" +491 25 dataset """kinships""" +491 25 model """proje""" +491 25 loss """nssa""" +491 25 regularizer """no""" +491 25 optimizer """adam""" +491 25 training_loop """owa""" +491 25 negative_sampler """basic""" +491 25 evaluator """rankbased""" +491 26 dataset """kinships""" +491 26 model """proje""" +491 26 loss """nssa""" +491 26 regularizer """no""" +491 26 optimizer """adam""" +491 26 training_loop """owa""" +491 26 negative_sampler """basic""" +491 26 evaluator """rankbased""" +491 27 dataset """kinships""" +491 27 model """proje""" +491 27 loss """nssa""" +491 27 regularizer """no""" +491 27 optimizer """adam""" +491 27 training_loop """owa""" +491 27 negative_sampler """basic""" +491 27 evaluator """rankbased""" +491 28 dataset """kinships""" +491 28 model """proje""" +491 28 loss """nssa""" +491 28 regularizer """no""" +491 28 optimizer """adam""" +491 28 training_loop """owa""" +491 28 negative_sampler """basic""" +491 28 evaluator """rankbased""" +491 29 dataset """kinships""" +491 29 model """proje""" +491 29 loss """nssa""" +491 29 regularizer """no""" +491 29 optimizer """adam""" +491 29 training_loop """owa""" +491 29 negative_sampler """basic""" +491 29 evaluator """rankbased""" +491 30 dataset """kinships""" +491 30 model """proje""" +491 30 loss """nssa""" +491 30 regularizer """no""" +491 30 optimizer """adam""" +491 30 training_loop """owa""" +491 30 negative_sampler """basic""" +491 30 evaluator """rankbased""" +491 31 dataset """kinships""" +491 31 model """proje""" +491 31 loss """nssa""" +491 31 regularizer """no""" +491 31 optimizer """adam""" +491 31 training_loop """owa""" +491 31 negative_sampler """basic""" +491 31 evaluator """rankbased""" +491 32 dataset """kinships""" +491 32 model """proje""" +491 32 loss """nssa""" +491 32 regularizer """no""" +491 32 optimizer """adam""" +491 32 training_loop """owa""" +491 32 negative_sampler """basic""" +491 32 evaluator """rankbased""" +491 33 dataset """kinships""" +491 33 model """proje""" +491 33 loss """nssa""" +491 33 regularizer """no""" +491 33 optimizer """adam""" +491 33 training_loop """owa""" +491 33 negative_sampler """basic""" +491 33 evaluator """rankbased""" +491 34 dataset """kinships""" +491 34 model """proje""" +491 34 loss """nssa""" +491 34 regularizer """no""" +491 34 optimizer """adam""" +491 34 training_loop """owa""" +491 34 negative_sampler """basic""" +491 34 evaluator """rankbased""" +491 35 dataset """kinships""" +491 35 model """proje""" +491 35 loss """nssa""" +491 35 regularizer """no""" +491 35 optimizer """adam""" +491 35 training_loop """owa""" +491 35 negative_sampler """basic""" +491 35 evaluator """rankbased""" +491 36 dataset """kinships""" +491 36 model """proje""" +491 36 loss """nssa""" +491 36 regularizer """no""" +491 36 optimizer """adam""" +491 36 training_loop """owa""" +491 36 negative_sampler """basic""" +491 36 evaluator """rankbased""" +491 37 dataset """kinships""" +491 37 model """proje""" +491 37 loss """nssa""" +491 37 regularizer """no""" +491 37 optimizer """adam""" +491 37 training_loop """owa""" +491 37 negative_sampler """basic""" +491 37 evaluator """rankbased""" +491 38 dataset """kinships""" +491 38 model """proje""" +491 38 loss """nssa""" +491 38 regularizer """no""" +491 38 optimizer """adam""" +491 38 training_loop """owa""" +491 38 negative_sampler """basic""" +491 38 evaluator """rankbased""" +491 39 dataset """kinships""" +491 39 model """proje""" +491 39 loss """nssa""" +491 39 regularizer """no""" +491 39 optimizer """adam""" +491 39 training_loop """owa""" +491 39 negative_sampler """basic""" +491 39 evaluator """rankbased""" +491 40 dataset """kinships""" +491 40 model """proje""" +491 40 loss """nssa""" +491 40 regularizer """no""" +491 40 optimizer """adam""" +491 40 training_loop """owa""" +491 40 negative_sampler """basic""" +491 40 evaluator """rankbased""" +491 41 dataset """kinships""" +491 41 model """proje""" +491 41 loss """nssa""" +491 41 regularizer """no""" +491 41 optimizer """adam""" +491 41 training_loop """owa""" +491 41 negative_sampler """basic""" +491 41 evaluator """rankbased""" +491 42 dataset """kinships""" +491 42 model """proje""" +491 42 loss """nssa""" +491 42 regularizer """no""" +491 42 optimizer """adam""" +491 42 training_loop """owa""" +491 42 negative_sampler """basic""" +491 42 evaluator """rankbased""" +491 43 dataset """kinships""" +491 43 model """proje""" +491 43 loss """nssa""" +491 43 regularizer """no""" +491 43 optimizer """adam""" +491 43 training_loop """owa""" +491 43 negative_sampler """basic""" +491 43 evaluator """rankbased""" +491 44 dataset """kinships""" +491 44 model """proje""" +491 44 loss """nssa""" +491 44 regularizer """no""" +491 44 optimizer """adam""" +491 44 training_loop """owa""" +491 44 negative_sampler """basic""" +491 44 evaluator """rankbased""" +491 45 dataset """kinships""" +491 45 model """proje""" +491 45 loss """nssa""" +491 45 regularizer """no""" +491 45 optimizer """adam""" +491 45 training_loop """owa""" +491 45 negative_sampler """basic""" +491 45 evaluator """rankbased""" +491 46 dataset """kinships""" +491 46 model """proje""" +491 46 loss """nssa""" +491 46 regularizer """no""" +491 46 optimizer """adam""" +491 46 training_loop """owa""" +491 46 negative_sampler """basic""" +491 46 evaluator """rankbased""" +491 47 dataset """kinships""" +491 47 model """proje""" +491 47 loss """nssa""" +491 47 regularizer """no""" +491 47 optimizer """adam""" +491 47 training_loop """owa""" +491 47 negative_sampler """basic""" +491 47 evaluator """rankbased""" +491 48 dataset """kinships""" +491 48 model """proje""" +491 48 loss """nssa""" +491 48 regularizer """no""" +491 48 optimizer """adam""" +491 48 training_loop """owa""" +491 48 negative_sampler """basic""" +491 48 evaluator """rankbased""" +491 49 dataset """kinships""" +491 49 model """proje""" +491 49 loss """nssa""" +491 49 regularizer """no""" +491 49 optimizer """adam""" +491 49 training_loop """owa""" +491 49 negative_sampler """basic""" +491 49 evaluator """rankbased""" +491 50 dataset """kinships""" +491 50 model """proje""" +491 50 loss """nssa""" +491 50 regularizer """no""" +491 50 optimizer """adam""" +491 50 training_loop """owa""" +491 50 negative_sampler """basic""" +491 50 evaluator """rankbased""" +491 51 dataset """kinships""" +491 51 model """proje""" +491 51 loss """nssa""" +491 51 regularizer """no""" +491 51 optimizer """adam""" +491 51 training_loop """owa""" +491 51 negative_sampler """basic""" +491 51 evaluator """rankbased""" +491 52 dataset """kinships""" +491 52 model """proje""" +491 52 loss """nssa""" +491 52 regularizer """no""" +491 52 optimizer """adam""" +491 52 training_loop """owa""" +491 52 negative_sampler """basic""" +491 52 evaluator """rankbased""" +491 53 dataset """kinships""" +491 53 model """proje""" +491 53 loss """nssa""" +491 53 regularizer """no""" +491 53 optimizer """adam""" +491 53 training_loop """owa""" +491 53 negative_sampler """basic""" +491 53 evaluator """rankbased""" +491 54 dataset """kinships""" +491 54 model """proje""" +491 54 loss """nssa""" +491 54 regularizer """no""" +491 54 optimizer """adam""" +491 54 training_loop """owa""" +491 54 negative_sampler """basic""" +491 54 evaluator """rankbased""" +491 55 dataset """kinships""" +491 55 model """proje""" +491 55 loss """nssa""" +491 55 regularizer """no""" +491 55 optimizer """adam""" +491 55 training_loop """owa""" +491 55 negative_sampler """basic""" +491 55 evaluator """rankbased""" +491 56 dataset """kinships""" +491 56 model """proje""" +491 56 loss """nssa""" +491 56 regularizer """no""" +491 56 optimizer """adam""" +491 56 training_loop """owa""" +491 56 negative_sampler """basic""" +491 56 evaluator """rankbased""" +491 57 dataset """kinships""" +491 57 model """proje""" +491 57 loss """nssa""" +491 57 regularizer """no""" +491 57 optimizer """adam""" +491 57 training_loop """owa""" +491 57 negative_sampler """basic""" +491 57 evaluator """rankbased""" +491 58 dataset """kinships""" +491 58 model """proje""" +491 58 loss """nssa""" +491 58 regularizer """no""" +491 58 optimizer """adam""" +491 58 training_loop """owa""" +491 58 negative_sampler """basic""" +491 58 evaluator """rankbased""" +491 59 dataset """kinships""" +491 59 model """proje""" +491 59 loss """nssa""" +491 59 regularizer """no""" +491 59 optimizer """adam""" +491 59 training_loop """owa""" +491 59 negative_sampler """basic""" +491 59 evaluator """rankbased""" +491 60 dataset """kinships""" +491 60 model """proje""" +491 60 loss """nssa""" +491 60 regularizer """no""" +491 60 optimizer """adam""" +491 60 training_loop """owa""" +491 60 negative_sampler """basic""" +491 60 evaluator """rankbased""" +491 61 dataset """kinships""" +491 61 model """proje""" +491 61 loss """nssa""" +491 61 regularizer """no""" +491 61 optimizer """adam""" +491 61 training_loop """owa""" +491 61 negative_sampler """basic""" +491 61 evaluator """rankbased""" +491 62 dataset """kinships""" +491 62 model """proje""" +491 62 loss """nssa""" +491 62 regularizer """no""" +491 62 optimizer """adam""" +491 62 training_loop """owa""" +491 62 negative_sampler """basic""" +491 62 evaluator """rankbased""" +491 63 dataset """kinships""" +491 63 model """proje""" +491 63 loss """nssa""" +491 63 regularizer """no""" +491 63 optimizer """adam""" +491 63 training_loop """owa""" +491 63 negative_sampler """basic""" +491 63 evaluator """rankbased""" +491 64 dataset """kinships""" +491 64 model """proje""" +491 64 loss """nssa""" +491 64 regularizer """no""" +491 64 optimizer """adam""" +491 64 training_loop """owa""" +491 64 negative_sampler """basic""" +491 64 evaluator """rankbased""" +491 65 dataset """kinships""" +491 65 model """proje""" +491 65 loss """nssa""" +491 65 regularizer """no""" +491 65 optimizer """adam""" +491 65 training_loop """owa""" +491 65 negative_sampler """basic""" +491 65 evaluator """rankbased""" +491 66 dataset """kinships""" +491 66 model """proje""" +491 66 loss """nssa""" +491 66 regularizer """no""" +491 66 optimizer """adam""" +491 66 training_loop """owa""" +491 66 negative_sampler """basic""" +491 66 evaluator """rankbased""" +491 67 dataset """kinships""" +491 67 model """proje""" +491 67 loss """nssa""" +491 67 regularizer """no""" +491 67 optimizer """adam""" +491 67 training_loop """owa""" +491 67 negative_sampler """basic""" +491 67 evaluator """rankbased""" +491 68 dataset """kinships""" +491 68 model """proje""" +491 68 loss """nssa""" +491 68 regularizer """no""" +491 68 optimizer """adam""" +491 68 training_loop """owa""" +491 68 negative_sampler """basic""" +491 68 evaluator """rankbased""" +491 69 dataset """kinships""" +491 69 model """proje""" +491 69 loss """nssa""" +491 69 regularizer """no""" +491 69 optimizer """adam""" +491 69 training_loop """owa""" +491 69 negative_sampler """basic""" +491 69 evaluator """rankbased""" +491 70 dataset """kinships""" +491 70 model """proje""" +491 70 loss """nssa""" +491 70 regularizer """no""" +491 70 optimizer """adam""" +491 70 training_loop """owa""" +491 70 negative_sampler """basic""" +491 70 evaluator """rankbased""" +491 71 dataset """kinships""" +491 71 model """proje""" +491 71 loss """nssa""" +491 71 regularizer """no""" +491 71 optimizer """adam""" +491 71 training_loop """owa""" +491 71 negative_sampler """basic""" +491 71 evaluator """rankbased""" +491 72 dataset """kinships""" +491 72 model """proje""" +491 72 loss """nssa""" +491 72 regularizer """no""" +491 72 optimizer """adam""" +491 72 training_loop """owa""" +491 72 negative_sampler """basic""" +491 72 evaluator """rankbased""" +491 73 dataset """kinships""" +491 73 model """proje""" +491 73 loss """nssa""" +491 73 regularizer """no""" +491 73 optimizer """adam""" +491 73 training_loop """owa""" +491 73 negative_sampler """basic""" +491 73 evaluator """rankbased""" +491 74 dataset """kinships""" +491 74 model """proje""" +491 74 loss """nssa""" +491 74 regularizer """no""" +491 74 optimizer """adam""" +491 74 training_loop """owa""" +491 74 negative_sampler """basic""" +491 74 evaluator """rankbased""" +491 75 dataset """kinships""" +491 75 model """proje""" +491 75 loss """nssa""" +491 75 regularizer """no""" +491 75 optimizer """adam""" +491 75 training_loop """owa""" +491 75 negative_sampler """basic""" +491 75 evaluator """rankbased""" +491 76 dataset """kinships""" +491 76 model """proje""" +491 76 loss """nssa""" +491 76 regularizer """no""" +491 76 optimizer """adam""" +491 76 training_loop """owa""" +491 76 negative_sampler """basic""" +491 76 evaluator """rankbased""" +491 77 dataset """kinships""" +491 77 model """proje""" +491 77 loss """nssa""" +491 77 regularizer """no""" +491 77 optimizer """adam""" +491 77 training_loop """owa""" +491 77 negative_sampler """basic""" +491 77 evaluator """rankbased""" +491 78 dataset """kinships""" +491 78 model """proje""" +491 78 loss """nssa""" +491 78 regularizer """no""" +491 78 optimizer """adam""" +491 78 training_loop """owa""" +491 78 negative_sampler """basic""" +491 78 evaluator """rankbased""" +491 79 dataset """kinships""" +491 79 model """proje""" +491 79 loss """nssa""" +491 79 regularizer """no""" +491 79 optimizer """adam""" +491 79 training_loop """owa""" +491 79 negative_sampler """basic""" +491 79 evaluator """rankbased""" +491 80 dataset """kinships""" +491 80 model """proje""" +491 80 loss """nssa""" +491 80 regularizer """no""" +491 80 optimizer """adam""" +491 80 training_loop """owa""" +491 80 negative_sampler """basic""" +491 80 evaluator """rankbased""" +491 81 dataset """kinships""" +491 81 model """proje""" +491 81 loss """nssa""" +491 81 regularizer """no""" +491 81 optimizer """adam""" +491 81 training_loop """owa""" +491 81 negative_sampler """basic""" +491 81 evaluator """rankbased""" +491 82 dataset """kinships""" +491 82 model """proje""" +491 82 loss """nssa""" +491 82 regularizer """no""" +491 82 optimizer """adam""" +491 82 training_loop """owa""" +491 82 negative_sampler """basic""" +491 82 evaluator """rankbased""" +491 83 dataset """kinships""" +491 83 model """proje""" +491 83 loss """nssa""" +491 83 regularizer """no""" +491 83 optimizer """adam""" +491 83 training_loop """owa""" +491 83 negative_sampler """basic""" +491 83 evaluator """rankbased""" +491 84 dataset """kinships""" +491 84 model """proje""" +491 84 loss """nssa""" +491 84 regularizer """no""" +491 84 optimizer """adam""" +491 84 training_loop """owa""" +491 84 negative_sampler """basic""" +491 84 evaluator """rankbased""" +491 85 dataset """kinships""" +491 85 model """proje""" +491 85 loss """nssa""" +491 85 regularizer """no""" +491 85 optimizer """adam""" +491 85 training_loop """owa""" +491 85 negative_sampler """basic""" +491 85 evaluator """rankbased""" +491 86 dataset """kinships""" +491 86 model """proje""" +491 86 loss """nssa""" +491 86 regularizer """no""" +491 86 optimizer """adam""" +491 86 training_loop """owa""" +491 86 negative_sampler """basic""" +491 86 evaluator """rankbased""" +491 87 dataset """kinships""" +491 87 model """proje""" +491 87 loss """nssa""" +491 87 regularizer """no""" +491 87 optimizer """adam""" +491 87 training_loop """owa""" +491 87 negative_sampler """basic""" +491 87 evaluator """rankbased""" +491 88 dataset """kinships""" +491 88 model """proje""" +491 88 loss """nssa""" +491 88 regularizer """no""" +491 88 optimizer """adam""" +491 88 training_loop """owa""" +491 88 negative_sampler """basic""" +491 88 evaluator """rankbased""" +491 89 dataset """kinships""" +491 89 model """proje""" +491 89 loss """nssa""" +491 89 regularizer """no""" +491 89 optimizer """adam""" +491 89 training_loop """owa""" +491 89 negative_sampler """basic""" +491 89 evaluator """rankbased""" +491 90 dataset """kinships""" +491 90 model """proje""" +491 90 loss """nssa""" +491 90 regularizer """no""" +491 90 optimizer """adam""" +491 90 training_loop """owa""" +491 90 negative_sampler """basic""" +491 90 evaluator """rankbased""" +491 91 dataset """kinships""" +491 91 model """proje""" +491 91 loss """nssa""" +491 91 regularizer """no""" +491 91 optimizer """adam""" +491 91 training_loop """owa""" +491 91 negative_sampler """basic""" +491 91 evaluator """rankbased""" +491 92 dataset """kinships""" +491 92 model """proje""" +491 92 loss """nssa""" +491 92 regularizer """no""" +491 92 optimizer """adam""" +491 92 training_loop """owa""" +491 92 negative_sampler """basic""" +491 92 evaluator """rankbased""" +491 93 dataset """kinships""" +491 93 model """proje""" +491 93 loss """nssa""" +491 93 regularizer """no""" +491 93 optimizer """adam""" +491 93 training_loop """owa""" +491 93 negative_sampler """basic""" +491 93 evaluator """rankbased""" +491 94 dataset """kinships""" +491 94 model """proje""" +491 94 loss """nssa""" +491 94 regularizer """no""" +491 94 optimizer """adam""" +491 94 training_loop """owa""" +491 94 negative_sampler """basic""" +491 94 evaluator """rankbased""" +491 95 dataset """kinships""" +491 95 model """proje""" +491 95 loss """nssa""" +491 95 regularizer """no""" +491 95 optimizer """adam""" +491 95 training_loop """owa""" +491 95 negative_sampler """basic""" +491 95 evaluator """rankbased""" +491 96 dataset """kinships""" +491 96 model """proje""" +491 96 loss """nssa""" +491 96 regularizer """no""" +491 96 optimizer """adam""" +491 96 training_loop """owa""" +491 96 negative_sampler """basic""" +491 96 evaluator """rankbased""" +491 97 dataset """kinships""" +491 97 model """proje""" +491 97 loss """nssa""" +491 97 regularizer """no""" +491 97 optimizer """adam""" +491 97 training_loop """owa""" +491 97 negative_sampler """basic""" +491 97 evaluator """rankbased""" +491 98 dataset """kinships""" +491 98 model """proje""" +491 98 loss """nssa""" +491 98 regularizer """no""" +491 98 optimizer """adam""" +491 98 training_loop """owa""" +491 98 negative_sampler """basic""" +491 98 evaluator """rankbased""" +491 99 dataset """kinships""" +491 99 model """proje""" +491 99 loss """nssa""" +491 99 regularizer """no""" +491 99 optimizer """adam""" +491 99 training_loop """owa""" +491 99 negative_sampler """basic""" +491 99 evaluator """rankbased""" +491 100 dataset """kinships""" +491 100 model """proje""" +491 100 loss """nssa""" +491 100 regularizer """no""" +491 100 optimizer """adam""" +491 100 training_loop """owa""" +491 100 negative_sampler """basic""" +491 100 evaluator """rankbased""" +492 1 model.embedding_dim 1.0 +492 1 loss.margin 3.789511833659073 +492 1 loss.adversarial_temperature 0.9079400952004009 +492 1 optimizer.lr 0.029546321641434108 +492 1 negative_sampler.num_negs_per_pos 64.0 +492 1 training.batch_size 1.0 +492 2 model.embedding_dim 0.0 +492 2 loss.margin 29.08519056699959 +492 2 loss.adversarial_temperature 0.46029095667893116 +492 2 optimizer.lr 0.007751225218442169 +492 2 negative_sampler.num_negs_per_pos 49.0 +492 2 training.batch_size 0.0 +492 3 model.embedding_dim 0.0 +492 3 loss.margin 6.6269785025385355 +492 3 loss.adversarial_temperature 0.4254021064969813 +492 3 optimizer.lr 0.0014604327177227007 +492 3 negative_sampler.num_negs_per_pos 10.0 +492 3 training.batch_size 0.0 +492 4 model.embedding_dim 0.0 +492 4 loss.margin 2.7960376660075545 +492 4 loss.adversarial_temperature 0.47975315749696956 +492 4 optimizer.lr 0.018971750275936148 +492 4 negative_sampler.num_negs_per_pos 56.0 +492 4 training.batch_size 0.0 +492 5 model.embedding_dim 2.0 +492 5 loss.margin 10.7619235626866 +492 5 loss.adversarial_temperature 0.5417906872065391 +492 5 optimizer.lr 0.005906057129643456 +492 5 negative_sampler.num_negs_per_pos 2.0 +492 5 training.batch_size 1.0 +492 6 model.embedding_dim 2.0 +492 6 loss.margin 16.163962789935315 +492 6 loss.adversarial_temperature 0.4933739781202464 +492 6 optimizer.lr 0.001341930263341074 +492 6 negative_sampler.num_negs_per_pos 80.0 +492 6 training.batch_size 1.0 +492 7 model.embedding_dim 1.0 +492 7 loss.margin 13.03164565329742 +492 7 loss.adversarial_temperature 0.770862432400381 +492 7 optimizer.lr 0.02386649041543838 +492 7 negative_sampler.num_negs_per_pos 29.0 +492 7 training.batch_size 2.0 +492 8 model.embedding_dim 1.0 +492 8 loss.margin 12.195153242624263 +492 8 loss.adversarial_temperature 0.34896944699623755 +492 8 optimizer.lr 0.00591244699986579 +492 8 negative_sampler.num_negs_per_pos 48.0 +492 8 training.batch_size 1.0 +492 9 model.embedding_dim 1.0 +492 9 loss.margin 27.192162285434332 +492 9 loss.adversarial_temperature 0.3359810187411694 +492 9 optimizer.lr 0.007129826172335404 +492 9 negative_sampler.num_negs_per_pos 76.0 +492 9 training.batch_size 1.0 +492 10 model.embedding_dim 1.0 +492 10 loss.margin 12.899734217130376 +492 10 loss.adversarial_temperature 0.8960563672004908 +492 10 optimizer.lr 0.058575085180303944 +492 10 negative_sampler.num_negs_per_pos 8.0 +492 10 training.batch_size 1.0 +492 11 model.embedding_dim 1.0 +492 11 loss.margin 3.57028096788541 +492 11 loss.adversarial_temperature 0.21179903161570168 +492 11 optimizer.lr 0.010354992824353646 +492 11 negative_sampler.num_negs_per_pos 98.0 +492 11 training.batch_size 0.0 +492 12 model.embedding_dim 0.0 +492 12 loss.margin 5.072077456478252 +492 12 loss.adversarial_temperature 0.9354100323231545 +492 12 optimizer.lr 0.002969583959082023 +492 12 negative_sampler.num_negs_per_pos 37.0 +492 12 training.batch_size 1.0 +492 13 model.embedding_dim 0.0 +492 13 loss.margin 4.874139301296834 +492 13 loss.adversarial_temperature 0.25443694107992765 +492 13 optimizer.lr 0.030061127995432587 +492 13 negative_sampler.num_negs_per_pos 17.0 +492 13 training.batch_size 2.0 +492 14 model.embedding_dim 1.0 +492 14 loss.margin 15.838559415345783 +492 14 loss.adversarial_temperature 0.17124732128840403 +492 14 optimizer.lr 0.06979320023875328 +492 14 negative_sampler.num_negs_per_pos 89.0 +492 14 training.batch_size 2.0 +492 15 model.embedding_dim 0.0 +492 15 loss.margin 15.11302126815844 +492 15 loss.adversarial_temperature 0.556765969045939 +492 15 optimizer.lr 0.08267063844546607 +492 15 negative_sampler.num_negs_per_pos 59.0 +492 15 training.batch_size 1.0 +492 16 model.embedding_dim 0.0 +492 16 loss.margin 17.15824951970077 +492 16 loss.adversarial_temperature 0.26224221637927503 +492 16 optimizer.lr 0.0754603986661717 +492 16 negative_sampler.num_negs_per_pos 53.0 +492 16 training.batch_size 2.0 +492 17 model.embedding_dim 1.0 +492 17 loss.margin 22.001520194852688 +492 17 loss.adversarial_temperature 0.3783770716685372 +492 17 optimizer.lr 0.03799260592811892 +492 17 negative_sampler.num_negs_per_pos 62.0 +492 17 training.batch_size 1.0 +492 18 model.embedding_dim 1.0 +492 18 loss.margin 5.7278477652139 +492 18 loss.adversarial_temperature 0.6226837667675266 +492 18 optimizer.lr 0.002205896978187014 +492 18 negative_sampler.num_negs_per_pos 38.0 +492 18 training.batch_size 2.0 +492 19 model.embedding_dim 0.0 +492 19 loss.margin 21.972815590600046 +492 19 loss.adversarial_temperature 0.972915116717856 +492 19 optimizer.lr 0.0033027111860405513 +492 19 negative_sampler.num_negs_per_pos 26.0 +492 19 training.batch_size 2.0 +492 20 model.embedding_dim 2.0 +492 20 loss.margin 4.534974573929467 +492 20 loss.adversarial_temperature 0.9475316364161245 +492 20 optimizer.lr 0.007107020503365727 +492 20 negative_sampler.num_negs_per_pos 8.0 +492 20 training.batch_size 2.0 +492 21 model.embedding_dim 2.0 +492 21 loss.margin 13.207598944626437 +492 21 loss.adversarial_temperature 0.1283893349910741 +492 21 optimizer.lr 0.016512174873482183 +492 21 negative_sampler.num_negs_per_pos 36.0 +492 21 training.batch_size 1.0 +492 22 model.embedding_dim 1.0 +492 22 loss.margin 29.986548789252137 +492 22 loss.adversarial_temperature 0.7389554507988376 +492 22 optimizer.lr 0.031215720861594645 +492 22 negative_sampler.num_negs_per_pos 71.0 +492 22 training.batch_size 0.0 +492 23 model.embedding_dim 1.0 +492 23 loss.margin 17.014126361398397 +492 23 loss.adversarial_temperature 0.8756073465186671 +492 23 optimizer.lr 0.02119078379104267 +492 23 negative_sampler.num_negs_per_pos 96.0 +492 23 training.batch_size 0.0 +492 24 model.embedding_dim 1.0 +492 24 loss.margin 19.645921523552584 +492 24 loss.adversarial_temperature 0.36762921737073817 +492 24 optimizer.lr 0.024967999470090447 +492 24 negative_sampler.num_negs_per_pos 82.0 +492 24 training.batch_size 0.0 +492 25 model.embedding_dim 0.0 +492 25 loss.margin 20.655053283552164 +492 25 loss.adversarial_temperature 0.8433111298802276 +492 25 optimizer.lr 0.07382867741595442 +492 25 negative_sampler.num_negs_per_pos 35.0 +492 25 training.batch_size 2.0 +492 26 model.embedding_dim 0.0 +492 26 loss.margin 4.434331492238538 +492 26 loss.adversarial_temperature 0.2817929576056652 +492 26 optimizer.lr 0.001815883239630447 +492 26 negative_sampler.num_negs_per_pos 83.0 +492 26 training.batch_size 2.0 +492 27 model.embedding_dim 0.0 +492 27 loss.margin 23.443673160798642 +492 27 loss.adversarial_temperature 0.8131797325095211 +492 27 optimizer.lr 0.08889191025099627 +492 27 negative_sampler.num_negs_per_pos 35.0 +492 27 training.batch_size 1.0 +492 28 model.embedding_dim 0.0 +492 28 loss.margin 15.21486098253329 +492 28 loss.adversarial_temperature 0.5753069990302483 +492 28 optimizer.lr 0.06748057809306159 +492 28 negative_sampler.num_negs_per_pos 54.0 +492 28 training.batch_size 0.0 +492 29 model.embedding_dim 0.0 +492 29 loss.margin 22.228925244570814 +492 29 loss.adversarial_temperature 0.6481882895279145 +492 29 optimizer.lr 0.014325558632222929 +492 29 negative_sampler.num_negs_per_pos 51.0 +492 29 training.batch_size 1.0 +492 30 model.embedding_dim 2.0 +492 30 loss.margin 13.507943434243487 +492 30 loss.adversarial_temperature 0.4503145808318142 +492 30 optimizer.lr 0.009551711814903382 +492 30 negative_sampler.num_negs_per_pos 4.0 +492 30 training.batch_size 0.0 +492 31 model.embedding_dim 2.0 +492 31 loss.margin 24.612952805417567 +492 31 loss.adversarial_temperature 0.9812002551180727 +492 31 optimizer.lr 0.0013962994977667466 +492 31 negative_sampler.num_negs_per_pos 13.0 +492 31 training.batch_size 0.0 +492 32 model.embedding_dim 2.0 +492 32 loss.margin 3.9040401362032644 +492 32 loss.adversarial_temperature 0.26119493346955963 +492 32 optimizer.lr 0.03360226190950418 +492 32 negative_sampler.num_negs_per_pos 68.0 +492 32 training.batch_size 0.0 +492 33 model.embedding_dim 2.0 +492 33 loss.margin 14.39078936812283 +492 33 loss.adversarial_temperature 0.51658051351026 +492 33 optimizer.lr 0.003065547390786133 +492 33 negative_sampler.num_negs_per_pos 82.0 +492 33 training.batch_size 1.0 +492 34 model.embedding_dim 1.0 +492 34 loss.margin 17.977601499909373 +492 34 loss.adversarial_temperature 0.5838421506887609 +492 34 optimizer.lr 0.004426982226859801 +492 34 negative_sampler.num_negs_per_pos 93.0 +492 34 training.batch_size 0.0 +492 35 model.embedding_dim 2.0 +492 35 loss.margin 7.468818433647269 +492 35 loss.adversarial_temperature 0.43921874214709644 +492 35 optimizer.lr 0.0028717933304692527 +492 35 negative_sampler.num_negs_per_pos 12.0 +492 35 training.batch_size 1.0 +492 36 model.embedding_dim 2.0 +492 36 loss.margin 27.372327175325733 +492 36 loss.adversarial_temperature 0.49303195890341545 +492 36 optimizer.lr 0.0019700913136322055 +492 36 negative_sampler.num_negs_per_pos 39.0 +492 36 training.batch_size 0.0 +492 37 model.embedding_dim 0.0 +492 37 loss.margin 13.0381881271288 +492 37 loss.adversarial_temperature 0.7502790787799442 +492 37 optimizer.lr 0.054339117351907065 +492 37 negative_sampler.num_negs_per_pos 19.0 +492 37 training.batch_size 0.0 +492 38 model.embedding_dim 1.0 +492 38 loss.margin 11.80640967210694 +492 38 loss.adversarial_temperature 0.7058092059399599 +492 38 optimizer.lr 0.003205826932491882 +492 38 negative_sampler.num_negs_per_pos 25.0 +492 38 training.batch_size 2.0 +492 39 model.embedding_dim 0.0 +492 39 loss.margin 2.261971548576356 +492 39 loss.adversarial_temperature 0.9128369831732639 +492 39 optimizer.lr 0.0014138299157476512 +492 39 negative_sampler.num_negs_per_pos 7.0 +492 39 training.batch_size 1.0 +492 40 model.embedding_dim 1.0 +492 40 loss.margin 1.0983309954772569 +492 40 loss.adversarial_temperature 0.4522571523992577 +492 40 optimizer.lr 0.04235420703544382 +492 40 negative_sampler.num_negs_per_pos 97.0 +492 40 training.batch_size 0.0 +492 41 model.embedding_dim 1.0 +492 41 loss.margin 7.489930918686033 +492 41 loss.adversarial_temperature 0.5125796177239562 +492 41 optimizer.lr 0.017032076312592955 +492 41 negative_sampler.num_negs_per_pos 99.0 +492 41 training.batch_size 0.0 +492 42 model.embedding_dim 2.0 +492 42 loss.margin 25.051159089459155 +492 42 loss.adversarial_temperature 0.9652192975314878 +492 42 optimizer.lr 0.026430563827622367 +492 42 negative_sampler.num_negs_per_pos 51.0 +492 42 training.batch_size 2.0 +492 43 model.embedding_dim 1.0 +492 43 loss.margin 15.255694175304962 +492 43 loss.adversarial_temperature 0.4033658446686329 +492 43 optimizer.lr 0.08843349369158908 +492 43 negative_sampler.num_negs_per_pos 27.0 +492 43 training.batch_size 2.0 +492 44 model.embedding_dim 1.0 +492 44 loss.margin 15.872250645081007 +492 44 loss.adversarial_temperature 0.17580409548171574 +492 44 optimizer.lr 0.006568870184990566 +492 44 negative_sampler.num_negs_per_pos 37.0 +492 44 training.batch_size 1.0 +492 45 model.embedding_dim 2.0 +492 45 loss.margin 25.35129229648726 +492 45 loss.adversarial_temperature 0.703881844177374 +492 45 optimizer.lr 0.014167221849311754 +492 45 negative_sampler.num_negs_per_pos 25.0 +492 45 training.batch_size 1.0 +492 46 model.embedding_dim 2.0 +492 46 loss.margin 20.607515545603718 +492 46 loss.adversarial_temperature 0.2492144445855593 +492 46 optimizer.lr 0.001163417194122994 +492 46 negative_sampler.num_negs_per_pos 12.0 +492 46 training.batch_size 1.0 +492 47 model.embedding_dim 2.0 +492 47 loss.margin 14.746015715372625 +492 47 loss.adversarial_temperature 0.7345396826413854 +492 47 optimizer.lr 0.0983787060717296 +492 47 negative_sampler.num_negs_per_pos 62.0 +492 47 training.batch_size 2.0 +492 48 model.embedding_dim 2.0 +492 48 loss.margin 17.551698312607876 +492 48 loss.adversarial_temperature 0.12811397938899244 +492 48 optimizer.lr 0.006819499401645731 +492 48 negative_sampler.num_negs_per_pos 36.0 +492 48 training.batch_size 0.0 +492 49 model.embedding_dim 0.0 +492 49 loss.margin 23.505693913153273 +492 49 loss.adversarial_temperature 0.20137777903990536 +492 49 optimizer.lr 0.0024949779290727465 +492 49 negative_sampler.num_negs_per_pos 50.0 +492 49 training.batch_size 1.0 +492 50 model.embedding_dim 2.0 +492 50 loss.margin 12.87416170044508 +492 50 loss.adversarial_temperature 0.1625636203385442 +492 50 optimizer.lr 0.01571364825601213 +492 50 negative_sampler.num_negs_per_pos 1.0 +492 50 training.batch_size 0.0 +492 51 model.embedding_dim 1.0 +492 51 loss.margin 19.39876507693797 +492 51 loss.adversarial_temperature 0.3575574493878476 +492 51 optimizer.lr 0.0013799285082264445 +492 51 negative_sampler.num_negs_per_pos 36.0 +492 51 training.batch_size 0.0 +492 52 model.embedding_dim 0.0 +492 52 loss.margin 3.6936625893582997 +492 52 loss.adversarial_temperature 0.5866990443198967 +492 52 optimizer.lr 0.022101630849353016 +492 52 negative_sampler.num_negs_per_pos 79.0 +492 52 training.batch_size 0.0 +492 53 model.embedding_dim 1.0 +492 53 loss.margin 20.222007454211415 +492 53 loss.adversarial_temperature 0.493603961541096 +492 53 optimizer.lr 0.03649910293112231 +492 53 negative_sampler.num_negs_per_pos 9.0 +492 53 training.batch_size 0.0 +492 54 model.embedding_dim 0.0 +492 54 loss.margin 1.274534895530753 +492 54 loss.adversarial_temperature 0.47946217206690833 +492 54 optimizer.lr 0.0023491470657327 +492 54 negative_sampler.num_negs_per_pos 83.0 +492 54 training.batch_size 0.0 +492 55 model.embedding_dim 2.0 +492 55 loss.margin 20.661670072462098 +492 55 loss.adversarial_temperature 0.4627451465049649 +492 55 optimizer.lr 0.00219965306923416 +492 55 negative_sampler.num_negs_per_pos 26.0 +492 55 training.batch_size 0.0 +492 56 model.embedding_dim 1.0 +492 56 loss.margin 14.25741715366749 +492 56 loss.adversarial_temperature 0.8632951619320849 +492 56 optimizer.lr 0.01963424406843624 +492 56 negative_sampler.num_negs_per_pos 69.0 +492 56 training.batch_size 1.0 +492 57 model.embedding_dim 2.0 +492 57 loss.margin 22.593509902679305 +492 57 loss.adversarial_temperature 0.6989240150680994 +492 57 optimizer.lr 0.016493802625755633 +492 57 negative_sampler.num_negs_per_pos 59.0 +492 57 training.batch_size 0.0 +492 58 model.embedding_dim 0.0 +492 58 loss.margin 4.481875487622414 +492 58 loss.adversarial_temperature 0.8152032029874983 +492 58 optimizer.lr 0.0022837868546755225 +492 58 negative_sampler.num_negs_per_pos 2.0 +492 58 training.batch_size 2.0 +492 59 model.embedding_dim 1.0 +492 59 loss.margin 2.381396801110224 +492 59 loss.adversarial_temperature 0.6228377278358178 +492 59 optimizer.lr 0.006753302938945743 +492 59 negative_sampler.num_negs_per_pos 37.0 +492 59 training.batch_size 1.0 +492 60 model.embedding_dim 1.0 +492 60 loss.margin 11.555572975054115 +492 60 loss.adversarial_temperature 0.42482242968733375 +492 60 optimizer.lr 0.011653138007039127 +492 60 negative_sampler.num_negs_per_pos 87.0 +492 60 training.batch_size 1.0 +492 61 model.embedding_dim 2.0 +492 61 loss.margin 16.094994849674663 +492 61 loss.adversarial_temperature 0.7868766430104438 +492 61 optimizer.lr 0.015651932309340942 +492 61 negative_sampler.num_negs_per_pos 31.0 +492 61 training.batch_size 1.0 +492 62 model.embedding_dim 2.0 +492 62 loss.margin 11.674093141284922 +492 62 loss.adversarial_temperature 0.8542885606155984 +492 62 optimizer.lr 0.04691424009553766 +492 62 negative_sampler.num_negs_per_pos 41.0 +492 62 training.batch_size 1.0 +492 63 model.embedding_dim 0.0 +492 63 loss.margin 7.158796744195098 +492 63 loss.adversarial_temperature 0.30629581158478325 +492 63 optimizer.lr 0.0074753165044751345 +492 63 negative_sampler.num_negs_per_pos 75.0 +492 63 training.batch_size 1.0 +492 64 model.embedding_dim 2.0 +492 64 loss.margin 28.914240678451687 +492 64 loss.adversarial_temperature 0.3929053205125349 +492 64 optimizer.lr 0.0034842400838538978 +492 64 negative_sampler.num_negs_per_pos 11.0 +492 64 training.batch_size 0.0 +492 65 model.embedding_dim 1.0 +492 65 loss.margin 14.34967223166246 +492 65 loss.adversarial_temperature 0.7178598092163478 +492 65 optimizer.lr 0.008263941025727709 +492 65 negative_sampler.num_negs_per_pos 90.0 +492 65 training.batch_size 1.0 +492 66 model.embedding_dim 0.0 +492 66 loss.margin 14.090585140285443 +492 66 loss.adversarial_temperature 0.2266176500716304 +492 66 optimizer.lr 0.0203304020810872 +492 66 negative_sampler.num_negs_per_pos 67.0 +492 66 training.batch_size 2.0 +492 67 model.embedding_dim 1.0 +492 67 loss.margin 29.796302598418087 +492 67 loss.adversarial_temperature 0.6980199175486894 +492 67 optimizer.lr 0.07330709859463092 +492 67 negative_sampler.num_negs_per_pos 89.0 +492 67 training.batch_size 1.0 +492 68 model.embedding_dim 2.0 +492 68 loss.margin 22.954246956970046 +492 68 loss.adversarial_temperature 0.7461267740071952 +492 68 optimizer.lr 0.09464514374494139 +492 68 negative_sampler.num_negs_per_pos 47.0 +492 68 training.batch_size 0.0 +492 69 model.embedding_dim 2.0 +492 69 loss.margin 12.244774771406727 +492 69 loss.adversarial_temperature 0.648315298206254 +492 69 optimizer.lr 0.0019553499821370174 +492 69 negative_sampler.num_negs_per_pos 32.0 +492 69 training.batch_size 2.0 +492 70 model.embedding_dim 0.0 +492 70 loss.margin 24.006374581158504 +492 70 loss.adversarial_temperature 0.3006607941589303 +492 70 optimizer.lr 0.006873675833473997 +492 70 negative_sampler.num_negs_per_pos 47.0 +492 70 training.batch_size 2.0 +492 71 model.embedding_dim 0.0 +492 71 loss.margin 16.98403951020883 +492 71 loss.adversarial_temperature 0.9976408779490195 +492 71 optimizer.lr 0.06690031109349263 +492 71 negative_sampler.num_negs_per_pos 97.0 +492 71 training.batch_size 1.0 +492 72 model.embedding_dim 2.0 +492 72 loss.margin 22.528124760717784 +492 72 loss.adversarial_temperature 0.9141729560803277 +492 72 optimizer.lr 0.010988681650115954 +492 72 negative_sampler.num_negs_per_pos 76.0 +492 72 training.batch_size 0.0 +492 73 model.embedding_dim 0.0 +492 73 loss.margin 8.723811519393932 +492 73 loss.adversarial_temperature 0.2852109039343642 +492 73 optimizer.lr 0.012642814171941425 +492 73 negative_sampler.num_negs_per_pos 75.0 +492 73 training.batch_size 2.0 +492 74 model.embedding_dim 2.0 +492 74 loss.margin 11.310055000013524 +492 74 loss.adversarial_temperature 0.7506767057809568 +492 74 optimizer.lr 0.0031107037074385013 +492 74 negative_sampler.num_negs_per_pos 15.0 +492 74 training.batch_size 2.0 +492 75 model.embedding_dim 1.0 +492 75 loss.margin 1.2728742057077147 +492 75 loss.adversarial_temperature 0.3263775853145405 +492 75 optimizer.lr 0.026207704469756194 +492 75 negative_sampler.num_negs_per_pos 38.0 +492 75 training.batch_size 1.0 +492 76 model.embedding_dim 2.0 +492 76 loss.margin 19.726866861488634 +492 76 loss.adversarial_temperature 0.8106851709029294 +492 76 optimizer.lr 0.054379073836231515 +492 76 negative_sampler.num_negs_per_pos 98.0 +492 76 training.batch_size 0.0 +492 77 model.embedding_dim 2.0 +492 77 loss.margin 20.021613376979694 +492 77 loss.adversarial_temperature 0.8117558325861384 +492 77 optimizer.lr 0.05936192873241638 +492 77 negative_sampler.num_negs_per_pos 22.0 +492 77 training.batch_size 0.0 +492 78 model.embedding_dim 2.0 +492 78 loss.margin 27.79674127330942 +492 78 loss.adversarial_temperature 0.48236947820167697 +492 78 optimizer.lr 0.03813199046604184 +492 78 negative_sampler.num_negs_per_pos 40.0 +492 78 training.batch_size 2.0 +492 79 model.embedding_dim 0.0 +492 79 loss.margin 28.070901018351123 +492 79 loss.adversarial_temperature 0.22946988520564035 +492 79 optimizer.lr 0.009245295259557782 +492 79 negative_sampler.num_negs_per_pos 33.0 +492 79 training.batch_size 0.0 +492 80 model.embedding_dim 2.0 +492 80 loss.margin 8.843986454844293 +492 80 loss.adversarial_temperature 0.3857039256210877 +492 80 optimizer.lr 0.002926158043384339 +492 80 negative_sampler.num_negs_per_pos 33.0 +492 80 training.batch_size 2.0 +492 81 model.embedding_dim 2.0 +492 81 loss.margin 13.104148833313952 +492 81 loss.adversarial_temperature 0.9976056041527906 +492 81 optimizer.lr 0.0022606715438901873 +492 81 negative_sampler.num_negs_per_pos 55.0 +492 81 training.batch_size 0.0 +492 82 model.embedding_dim 0.0 +492 82 loss.margin 22.673883046078913 +492 82 loss.adversarial_temperature 0.518949673811142 +492 82 optimizer.lr 0.09792213170956134 +492 82 negative_sampler.num_negs_per_pos 28.0 +492 82 training.batch_size 0.0 +492 83 model.embedding_dim 1.0 +492 83 loss.margin 6.230268020739412 +492 83 loss.adversarial_temperature 0.9507282724909499 +492 83 optimizer.lr 0.005930208342356504 +492 83 negative_sampler.num_negs_per_pos 18.0 +492 83 training.batch_size 2.0 +492 84 model.embedding_dim 2.0 +492 84 loss.margin 19.85902303600424 +492 84 loss.adversarial_temperature 0.15658314323752603 +492 84 optimizer.lr 0.005310013826161946 +492 84 negative_sampler.num_negs_per_pos 20.0 +492 84 training.batch_size 0.0 +492 85 model.embedding_dim 1.0 +492 85 loss.margin 24.904992101018745 +492 85 loss.adversarial_temperature 0.11386735376329987 +492 85 optimizer.lr 0.01223575178907781 +492 85 negative_sampler.num_negs_per_pos 29.0 +492 85 training.batch_size 1.0 +492 86 model.embedding_dim 2.0 +492 86 loss.margin 17.26596522821079 +492 86 loss.adversarial_temperature 0.5671759382766922 +492 86 optimizer.lr 0.0029909596852661672 +492 86 negative_sampler.num_negs_per_pos 31.0 +492 86 training.batch_size 1.0 +492 87 model.embedding_dim 1.0 +492 87 loss.margin 11.009337909378575 +492 87 loss.adversarial_temperature 0.8796997490844853 +492 87 optimizer.lr 0.006340906483391852 +492 87 negative_sampler.num_negs_per_pos 49.0 +492 87 training.batch_size 1.0 +492 88 model.embedding_dim 0.0 +492 88 loss.margin 19.102495766750934 +492 88 loss.adversarial_temperature 0.426447616829265 +492 88 optimizer.lr 0.024135790527148756 +492 88 negative_sampler.num_negs_per_pos 60.0 +492 88 training.batch_size 2.0 +492 89 model.embedding_dim 1.0 +492 89 loss.margin 11.631426480149232 +492 89 loss.adversarial_temperature 0.7951681542427057 +492 89 optimizer.lr 0.0012360670749789505 +492 89 negative_sampler.num_negs_per_pos 86.0 +492 89 training.batch_size 1.0 +492 90 model.embedding_dim 2.0 +492 90 loss.margin 20.796715476081495 +492 90 loss.adversarial_temperature 0.5234761591628699 +492 90 optimizer.lr 0.017138427455481716 +492 90 negative_sampler.num_negs_per_pos 19.0 +492 90 training.batch_size 0.0 +492 91 model.embedding_dim 0.0 +492 91 loss.margin 27.43459136918613 +492 91 loss.adversarial_temperature 0.28070281170079775 +492 91 optimizer.lr 0.014993569022478318 +492 91 negative_sampler.num_negs_per_pos 21.0 +492 91 training.batch_size 2.0 +492 92 model.embedding_dim 1.0 +492 92 loss.margin 26.955152314123342 +492 92 loss.adversarial_temperature 0.5120012712796799 +492 92 optimizer.lr 0.003677275042976174 +492 92 negative_sampler.num_negs_per_pos 30.0 +492 92 training.batch_size 0.0 +492 93 model.embedding_dim 1.0 +492 93 loss.margin 29.416086512062172 +492 93 loss.adversarial_temperature 0.4641640626023481 +492 93 optimizer.lr 0.011013132573984303 +492 93 negative_sampler.num_negs_per_pos 93.0 +492 93 training.batch_size 1.0 +492 94 model.embedding_dim 0.0 +492 94 loss.margin 22.377762240442095 +492 94 loss.adversarial_temperature 0.24358215201590136 +492 94 optimizer.lr 0.0032445315465579588 +492 94 negative_sampler.num_negs_per_pos 66.0 +492 94 training.batch_size 0.0 +492 95 model.embedding_dim 1.0 +492 95 loss.margin 12.19212600456217 +492 95 loss.adversarial_temperature 0.16257980197608024 +492 95 optimizer.lr 0.014112477526814198 +492 95 negative_sampler.num_negs_per_pos 84.0 +492 95 training.batch_size 2.0 +492 96 model.embedding_dim 2.0 +492 96 loss.margin 2.421402579577572 +492 96 loss.adversarial_temperature 0.40859055901081376 +492 96 optimizer.lr 0.0018789410304528241 +492 96 negative_sampler.num_negs_per_pos 86.0 +492 96 training.batch_size 0.0 +492 97 model.embedding_dim 0.0 +492 97 loss.margin 26.28905133665473 +492 97 loss.adversarial_temperature 0.3749756857368365 +492 97 optimizer.lr 0.02423527596959086 +492 97 negative_sampler.num_negs_per_pos 52.0 +492 97 training.batch_size 0.0 +492 98 model.embedding_dim 0.0 +492 98 loss.margin 13.814370175632938 +492 98 loss.adversarial_temperature 0.637309365275195 +492 98 optimizer.lr 0.04553266663749283 +492 98 negative_sampler.num_negs_per_pos 5.0 +492 98 training.batch_size 1.0 +492 99 model.embedding_dim 0.0 +492 99 loss.margin 14.816139439527234 +492 99 loss.adversarial_temperature 0.5754436841185401 +492 99 optimizer.lr 0.014580768445739143 +492 99 negative_sampler.num_negs_per_pos 33.0 +492 99 training.batch_size 0.0 +492 100 model.embedding_dim 0.0 +492 100 loss.margin 23.49964496499713 +492 100 loss.adversarial_temperature 0.8583759094788381 +492 100 optimizer.lr 0.00932525181855281 +492 100 negative_sampler.num_negs_per_pos 55.0 +492 100 training.batch_size 0.0 +492 1 dataset """kinships""" +492 1 model """proje""" +492 1 loss """nssa""" +492 1 regularizer """no""" +492 1 optimizer """adam""" +492 1 training_loop """owa""" +492 1 negative_sampler """basic""" +492 1 evaluator """rankbased""" +492 2 dataset """kinships""" +492 2 model """proje""" +492 2 loss """nssa""" +492 2 regularizer """no""" +492 2 optimizer """adam""" +492 2 training_loop """owa""" +492 2 negative_sampler """basic""" +492 2 evaluator """rankbased""" +492 3 dataset """kinships""" +492 3 model """proje""" +492 3 loss """nssa""" +492 3 regularizer """no""" +492 3 optimizer """adam""" +492 3 training_loop """owa""" +492 3 negative_sampler """basic""" +492 3 evaluator """rankbased""" +492 4 dataset """kinships""" +492 4 model """proje""" +492 4 loss """nssa""" +492 4 regularizer """no""" +492 4 optimizer """adam""" +492 4 training_loop """owa""" +492 4 negative_sampler """basic""" +492 4 evaluator """rankbased""" +492 5 dataset """kinships""" +492 5 model """proje""" +492 5 loss """nssa""" +492 5 regularizer """no""" +492 5 optimizer """adam""" +492 5 training_loop """owa""" +492 5 negative_sampler """basic""" +492 5 evaluator """rankbased""" +492 6 dataset """kinships""" +492 6 model """proje""" +492 6 loss """nssa""" +492 6 regularizer """no""" +492 6 optimizer """adam""" +492 6 training_loop """owa""" +492 6 negative_sampler """basic""" +492 6 evaluator """rankbased""" +492 7 dataset """kinships""" +492 7 model """proje""" +492 7 loss """nssa""" +492 7 regularizer """no""" +492 7 optimizer """adam""" +492 7 training_loop """owa""" +492 7 negative_sampler """basic""" +492 7 evaluator """rankbased""" +492 8 dataset """kinships""" +492 8 model """proje""" +492 8 loss """nssa""" +492 8 regularizer """no""" +492 8 optimizer """adam""" +492 8 training_loop """owa""" +492 8 negative_sampler """basic""" +492 8 evaluator """rankbased""" +492 9 dataset """kinships""" +492 9 model """proje""" +492 9 loss """nssa""" +492 9 regularizer """no""" +492 9 optimizer """adam""" +492 9 training_loop """owa""" +492 9 negative_sampler """basic""" +492 9 evaluator """rankbased""" +492 10 dataset """kinships""" +492 10 model """proje""" +492 10 loss """nssa""" +492 10 regularizer """no""" +492 10 optimizer """adam""" +492 10 training_loop """owa""" +492 10 negative_sampler """basic""" +492 10 evaluator """rankbased""" +492 11 dataset """kinships""" +492 11 model """proje""" +492 11 loss """nssa""" +492 11 regularizer """no""" +492 11 optimizer """adam""" +492 11 training_loop """owa""" +492 11 negative_sampler """basic""" +492 11 evaluator """rankbased""" +492 12 dataset """kinships""" +492 12 model """proje""" +492 12 loss """nssa""" +492 12 regularizer """no""" +492 12 optimizer """adam""" +492 12 training_loop """owa""" +492 12 negative_sampler """basic""" +492 12 evaluator """rankbased""" +492 13 dataset """kinships""" +492 13 model """proje""" +492 13 loss """nssa""" +492 13 regularizer """no""" +492 13 optimizer """adam""" +492 13 training_loop """owa""" +492 13 negative_sampler """basic""" +492 13 evaluator """rankbased""" +492 14 dataset """kinships""" +492 14 model """proje""" +492 14 loss """nssa""" +492 14 regularizer """no""" +492 14 optimizer """adam""" +492 14 training_loop """owa""" +492 14 negative_sampler """basic""" +492 14 evaluator """rankbased""" +492 15 dataset """kinships""" +492 15 model """proje""" +492 15 loss """nssa""" +492 15 regularizer """no""" +492 15 optimizer """adam""" +492 15 training_loop """owa""" +492 15 negative_sampler """basic""" +492 15 evaluator """rankbased""" +492 16 dataset """kinships""" +492 16 model """proje""" +492 16 loss """nssa""" +492 16 regularizer """no""" +492 16 optimizer """adam""" +492 16 training_loop """owa""" +492 16 negative_sampler """basic""" +492 16 evaluator """rankbased""" +492 17 dataset """kinships""" +492 17 model """proje""" +492 17 loss """nssa""" +492 17 regularizer """no""" +492 17 optimizer """adam""" +492 17 training_loop """owa""" +492 17 negative_sampler """basic""" +492 17 evaluator """rankbased""" +492 18 dataset """kinships""" +492 18 model """proje""" +492 18 loss """nssa""" +492 18 regularizer """no""" +492 18 optimizer """adam""" +492 18 training_loop """owa""" +492 18 negative_sampler """basic""" +492 18 evaluator """rankbased""" +492 19 dataset """kinships""" +492 19 model """proje""" +492 19 loss """nssa""" +492 19 regularizer """no""" +492 19 optimizer """adam""" +492 19 training_loop """owa""" +492 19 negative_sampler """basic""" +492 19 evaluator """rankbased""" +492 20 dataset """kinships""" +492 20 model """proje""" +492 20 loss """nssa""" +492 20 regularizer """no""" +492 20 optimizer """adam""" +492 20 training_loop """owa""" +492 20 negative_sampler """basic""" +492 20 evaluator """rankbased""" +492 21 dataset """kinships""" +492 21 model """proje""" +492 21 loss """nssa""" +492 21 regularizer """no""" +492 21 optimizer """adam""" +492 21 training_loop """owa""" +492 21 negative_sampler """basic""" +492 21 evaluator """rankbased""" +492 22 dataset """kinships""" +492 22 model """proje""" +492 22 loss """nssa""" +492 22 regularizer """no""" +492 22 optimizer """adam""" +492 22 training_loop """owa""" +492 22 negative_sampler """basic""" +492 22 evaluator """rankbased""" +492 23 dataset """kinships""" +492 23 model """proje""" +492 23 loss """nssa""" +492 23 regularizer """no""" +492 23 optimizer """adam""" +492 23 training_loop """owa""" +492 23 negative_sampler """basic""" +492 23 evaluator """rankbased""" +492 24 dataset """kinships""" +492 24 model """proje""" +492 24 loss """nssa""" +492 24 regularizer """no""" +492 24 optimizer """adam""" +492 24 training_loop """owa""" +492 24 negative_sampler """basic""" +492 24 evaluator """rankbased""" +492 25 dataset """kinships""" +492 25 model """proje""" +492 25 loss """nssa""" +492 25 regularizer """no""" +492 25 optimizer """adam""" +492 25 training_loop """owa""" +492 25 negative_sampler """basic""" +492 25 evaluator """rankbased""" +492 26 dataset """kinships""" +492 26 model """proje""" +492 26 loss """nssa""" +492 26 regularizer """no""" +492 26 optimizer """adam""" +492 26 training_loop """owa""" +492 26 negative_sampler """basic""" +492 26 evaluator """rankbased""" +492 27 dataset """kinships""" +492 27 model """proje""" +492 27 loss """nssa""" +492 27 regularizer """no""" +492 27 optimizer """adam""" +492 27 training_loop """owa""" +492 27 negative_sampler """basic""" +492 27 evaluator """rankbased""" +492 28 dataset """kinships""" +492 28 model """proje""" +492 28 loss """nssa""" +492 28 regularizer """no""" +492 28 optimizer """adam""" +492 28 training_loop """owa""" +492 28 negative_sampler """basic""" +492 28 evaluator """rankbased""" +492 29 dataset """kinships""" +492 29 model """proje""" +492 29 loss """nssa""" +492 29 regularizer """no""" +492 29 optimizer """adam""" +492 29 training_loop """owa""" +492 29 negative_sampler """basic""" +492 29 evaluator """rankbased""" +492 30 dataset """kinships""" +492 30 model """proje""" +492 30 loss """nssa""" +492 30 regularizer """no""" +492 30 optimizer """adam""" +492 30 training_loop """owa""" +492 30 negative_sampler """basic""" +492 30 evaluator """rankbased""" +492 31 dataset """kinships""" +492 31 model """proje""" +492 31 loss """nssa""" +492 31 regularizer """no""" +492 31 optimizer """adam""" +492 31 training_loop """owa""" +492 31 negative_sampler """basic""" +492 31 evaluator """rankbased""" +492 32 dataset """kinships""" +492 32 model """proje""" +492 32 loss """nssa""" +492 32 regularizer """no""" +492 32 optimizer """adam""" +492 32 training_loop """owa""" +492 32 negative_sampler """basic""" +492 32 evaluator """rankbased""" +492 33 dataset """kinships""" +492 33 model """proje""" +492 33 loss """nssa""" +492 33 regularizer """no""" +492 33 optimizer """adam""" +492 33 training_loop """owa""" +492 33 negative_sampler """basic""" +492 33 evaluator """rankbased""" +492 34 dataset """kinships""" +492 34 model """proje""" +492 34 loss """nssa""" +492 34 regularizer """no""" +492 34 optimizer """adam""" +492 34 training_loop """owa""" +492 34 negative_sampler """basic""" +492 34 evaluator """rankbased""" +492 35 dataset """kinships""" +492 35 model """proje""" +492 35 loss """nssa""" +492 35 regularizer """no""" +492 35 optimizer """adam""" +492 35 training_loop """owa""" +492 35 negative_sampler """basic""" +492 35 evaluator """rankbased""" +492 36 dataset """kinships""" +492 36 model """proje""" +492 36 loss """nssa""" +492 36 regularizer """no""" +492 36 optimizer """adam""" +492 36 training_loop """owa""" +492 36 negative_sampler """basic""" +492 36 evaluator """rankbased""" +492 37 dataset """kinships""" +492 37 model """proje""" +492 37 loss """nssa""" +492 37 regularizer """no""" +492 37 optimizer """adam""" +492 37 training_loop """owa""" +492 37 negative_sampler """basic""" +492 37 evaluator """rankbased""" +492 38 dataset """kinships""" +492 38 model """proje""" +492 38 loss """nssa""" +492 38 regularizer """no""" +492 38 optimizer """adam""" +492 38 training_loop """owa""" +492 38 negative_sampler """basic""" +492 38 evaluator """rankbased""" +492 39 dataset """kinships""" +492 39 model """proje""" +492 39 loss """nssa""" +492 39 regularizer """no""" +492 39 optimizer """adam""" +492 39 training_loop """owa""" +492 39 negative_sampler """basic""" +492 39 evaluator """rankbased""" +492 40 dataset """kinships""" +492 40 model """proje""" +492 40 loss """nssa""" +492 40 regularizer """no""" +492 40 optimizer """adam""" +492 40 training_loop """owa""" +492 40 negative_sampler """basic""" +492 40 evaluator """rankbased""" +492 41 dataset """kinships""" +492 41 model """proje""" +492 41 loss """nssa""" +492 41 regularizer """no""" +492 41 optimizer """adam""" +492 41 training_loop """owa""" +492 41 negative_sampler """basic""" +492 41 evaluator """rankbased""" +492 42 dataset """kinships""" +492 42 model """proje""" +492 42 loss """nssa""" +492 42 regularizer """no""" +492 42 optimizer """adam""" +492 42 training_loop """owa""" +492 42 negative_sampler """basic""" +492 42 evaluator """rankbased""" +492 43 dataset """kinships""" +492 43 model """proje""" +492 43 loss """nssa""" +492 43 regularizer """no""" +492 43 optimizer """adam""" +492 43 training_loop """owa""" +492 43 negative_sampler """basic""" +492 43 evaluator """rankbased""" +492 44 dataset """kinships""" +492 44 model """proje""" +492 44 loss """nssa""" +492 44 regularizer """no""" +492 44 optimizer """adam""" +492 44 training_loop """owa""" +492 44 negative_sampler """basic""" +492 44 evaluator """rankbased""" +492 45 dataset """kinships""" +492 45 model """proje""" +492 45 loss """nssa""" +492 45 regularizer """no""" +492 45 optimizer """adam""" +492 45 training_loop """owa""" +492 45 negative_sampler """basic""" +492 45 evaluator """rankbased""" +492 46 dataset """kinships""" +492 46 model """proje""" +492 46 loss """nssa""" +492 46 regularizer """no""" +492 46 optimizer """adam""" +492 46 training_loop """owa""" +492 46 negative_sampler """basic""" +492 46 evaluator """rankbased""" +492 47 dataset """kinships""" +492 47 model """proje""" +492 47 loss """nssa""" +492 47 regularizer """no""" +492 47 optimizer """adam""" +492 47 training_loop """owa""" +492 47 negative_sampler """basic""" +492 47 evaluator """rankbased""" +492 48 dataset """kinships""" +492 48 model """proje""" +492 48 loss """nssa""" +492 48 regularizer """no""" +492 48 optimizer """adam""" +492 48 training_loop """owa""" +492 48 negative_sampler """basic""" +492 48 evaluator """rankbased""" +492 49 dataset """kinships""" +492 49 model """proje""" +492 49 loss """nssa""" +492 49 regularizer """no""" +492 49 optimizer """adam""" +492 49 training_loop """owa""" +492 49 negative_sampler """basic""" +492 49 evaluator """rankbased""" +492 50 dataset """kinships""" +492 50 model """proje""" +492 50 loss """nssa""" +492 50 regularizer """no""" +492 50 optimizer """adam""" +492 50 training_loop """owa""" +492 50 negative_sampler """basic""" +492 50 evaluator """rankbased""" +492 51 dataset """kinships""" +492 51 model """proje""" +492 51 loss """nssa""" +492 51 regularizer """no""" +492 51 optimizer """adam""" +492 51 training_loop """owa""" +492 51 negative_sampler """basic""" +492 51 evaluator """rankbased""" +492 52 dataset """kinships""" +492 52 model """proje""" +492 52 loss """nssa""" +492 52 regularizer """no""" +492 52 optimizer """adam""" +492 52 training_loop """owa""" +492 52 negative_sampler """basic""" +492 52 evaluator """rankbased""" +492 53 dataset """kinships""" +492 53 model """proje""" +492 53 loss """nssa""" +492 53 regularizer """no""" +492 53 optimizer """adam""" +492 53 training_loop """owa""" +492 53 negative_sampler """basic""" +492 53 evaluator """rankbased""" +492 54 dataset """kinships""" +492 54 model """proje""" +492 54 loss """nssa""" +492 54 regularizer """no""" +492 54 optimizer """adam""" +492 54 training_loop """owa""" +492 54 negative_sampler """basic""" +492 54 evaluator """rankbased""" +492 55 dataset """kinships""" +492 55 model """proje""" +492 55 loss """nssa""" +492 55 regularizer """no""" +492 55 optimizer """adam""" +492 55 training_loop """owa""" +492 55 negative_sampler """basic""" +492 55 evaluator """rankbased""" +492 56 dataset """kinships""" +492 56 model """proje""" +492 56 loss """nssa""" +492 56 regularizer """no""" +492 56 optimizer """adam""" +492 56 training_loop """owa""" +492 56 negative_sampler """basic""" +492 56 evaluator """rankbased""" +492 57 dataset """kinships""" +492 57 model """proje""" +492 57 loss """nssa""" +492 57 regularizer """no""" +492 57 optimizer """adam""" +492 57 training_loop """owa""" +492 57 negative_sampler """basic""" +492 57 evaluator """rankbased""" +492 58 dataset """kinships""" +492 58 model """proje""" +492 58 loss """nssa""" +492 58 regularizer """no""" +492 58 optimizer """adam""" +492 58 training_loop """owa""" +492 58 negative_sampler """basic""" +492 58 evaluator """rankbased""" +492 59 dataset """kinships""" +492 59 model """proje""" +492 59 loss """nssa""" +492 59 regularizer """no""" +492 59 optimizer """adam""" +492 59 training_loop """owa""" +492 59 negative_sampler """basic""" +492 59 evaluator """rankbased""" +492 60 dataset """kinships""" +492 60 model """proje""" +492 60 loss """nssa""" +492 60 regularizer """no""" +492 60 optimizer """adam""" +492 60 training_loop """owa""" +492 60 negative_sampler """basic""" +492 60 evaluator """rankbased""" +492 61 dataset """kinships""" +492 61 model """proje""" +492 61 loss """nssa""" +492 61 regularizer """no""" +492 61 optimizer """adam""" +492 61 training_loop """owa""" +492 61 negative_sampler """basic""" +492 61 evaluator """rankbased""" +492 62 dataset """kinships""" +492 62 model """proje""" +492 62 loss """nssa""" +492 62 regularizer """no""" +492 62 optimizer """adam""" +492 62 training_loop """owa""" +492 62 negative_sampler """basic""" +492 62 evaluator """rankbased""" +492 63 dataset """kinships""" +492 63 model """proje""" +492 63 loss """nssa""" +492 63 regularizer """no""" +492 63 optimizer """adam""" +492 63 training_loop """owa""" +492 63 negative_sampler """basic""" +492 63 evaluator """rankbased""" +492 64 dataset """kinships""" +492 64 model """proje""" +492 64 loss """nssa""" +492 64 regularizer """no""" +492 64 optimizer """adam""" +492 64 training_loop """owa""" +492 64 negative_sampler """basic""" +492 64 evaluator """rankbased""" +492 65 dataset """kinships""" +492 65 model """proje""" +492 65 loss """nssa""" +492 65 regularizer """no""" +492 65 optimizer """adam""" +492 65 training_loop """owa""" +492 65 negative_sampler """basic""" +492 65 evaluator """rankbased""" +492 66 dataset """kinships""" +492 66 model """proje""" +492 66 loss """nssa""" +492 66 regularizer """no""" +492 66 optimizer """adam""" +492 66 training_loop """owa""" +492 66 negative_sampler """basic""" +492 66 evaluator """rankbased""" +492 67 dataset """kinships""" +492 67 model """proje""" +492 67 loss """nssa""" +492 67 regularizer """no""" +492 67 optimizer """adam""" +492 67 training_loop """owa""" +492 67 negative_sampler """basic""" +492 67 evaluator """rankbased""" +492 68 dataset """kinships""" +492 68 model """proje""" +492 68 loss """nssa""" +492 68 regularizer """no""" +492 68 optimizer """adam""" +492 68 training_loop """owa""" +492 68 negative_sampler """basic""" +492 68 evaluator """rankbased""" +492 69 dataset """kinships""" +492 69 model """proje""" +492 69 loss """nssa""" +492 69 regularizer """no""" +492 69 optimizer """adam""" +492 69 training_loop """owa""" +492 69 negative_sampler """basic""" +492 69 evaluator """rankbased""" +492 70 dataset """kinships""" +492 70 model """proje""" +492 70 loss """nssa""" +492 70 regularizer """no""" +492 70 optimizer """adam""" +492 70 training_loop """owa""" +492 70 negative_sampler """basic""" +492 70 evaluator """rankbased""" +492 71 dataset """kinships""" +492 71 model """proje""" +492 71 loss """nssa""" +492 71 regularizer """no""" +492 71 optimizer """adam""" +492 71 training_loop """owa""" +492 71 negative_sampler """basic""" +492 71 evaluator """rankbased""" +492 72 dataset """kinships""" +492 72 model """proje""" +492 72 loss """nssa""" +492 72 regularizer """no""" +492 72 optimizer """adam""" +492 72 training_loop """owa""" +492 72 negative_sampler """basic""" +492 72 evaluator """rankbased""" +492 73 dataset """kinships""" +492 73 model """proje""" +492 73 loss """nssa""" +492 73 regularizer """no""" +492 73 optimizer """adam""" +492 73 training_loop """owa""" +492 73 negative_sampler """basic""" +492 73 evaluator """rankbased""" +492 74 dataset """kinships""" +492 74 model """proje""" +492 74 loss """nssa""" +492 74 regularizer """no""" +492 74 optimizer """adam""" +492 74 training_loop """owa""" +492 74 negative_sampler """basic""" +492 74 evaluator """rankbased""" +492 75 dataset """kinships""" +492 75 model """proje""" +492 75 loss """nssa""" +492 75 regularizer """no""" +492 75 optimizer """adam""" +492 75 training_loop """owa""" +492 75 negative_sampler """basic""" +492 75 evaluator """rankbased""" +492 76 dataset """kinships""" +492 76 model """proje""" +492 76 loss """nssa""" +492 76 regularizer """no""" +492 76 optimizer """adam""" +492 76 training_loop """owa""" +492 76 negative_sampler """basic""" +492 76 evaluator """rankbased""" +492 77 dataset """kinships""" +492 77 model """proje""" +492 77 loss """nssa""" +492 77 regularizer """no""" +492 77 optimizer """adam""" +492 77 training_loop """owa""" +492 77 negative_sampler """basic""" +492 77 evaluator """rankbased""" +492 78 dataset """kinships""" +492 78 model """proje""" +492 78 loss """nssa""" +492 78 regularizer """no""" +492 78 optimizer """adam""" +492 78 training_loop """owa""" +492 78 negative_sampler """basic""" +492 78 evaluator """rankbased""" +492 79 dataset """kinships""" +492 79 model """proje""" +492 79 loss """nssa""" +492 79 regularizer """no""" +492 79 optimizer """adam""" +492 79 training_loop """owa""" +492 79 negative_sampler """basic""" +492 79 evaluator """rankbased""" +492 80 dataset """kinships""" +492 80 model """proje""" +492 80 loss """nssa""" +492 80 regularizer """no""" +492 80 optimizer """adam""" +492 80 training_loop """owa""" +492 80 negative_sampler """basic""" +492 80 evaluator """rankbased""" +492 81 dataset """kinships""" +492 81 model """proje""" +492 81 loss """nssa""" +492 81 regularizer """no""" +492 81 optimizer """adam""" +492 81 training_loop """owa""" +492 81 negative_sampler """basic""" +492 81 evaluator """rankbased""" +492 82 dataset """kinships""" +492 82 model """proje""" +492 82 loss """nssa""" +492 82 regularizer """no""" +492 82 optimizer """adam""" +492 82 training_loop """owa""" +492 82 negative_sampler """basic""" +492 82 evaluator """rankbased""" +492 83 dataset """kinships""" +492 83 model """proje""" +492 83 loss """nssa""" +492 83 regularizer """no""" +492 83 optimizer """adam""" +492 83 training_loop """owa""" +492 83 negative_sampler """basic""" +492 83 evaluator """rankbased""" +492 84 dataset """kinships""" +492 84 model """proje""" +492 84 loss """nssa""" +492 84 regularizer """no""" +492 84 optimizer """adam""" +492 84 training_loop """owa""" +492 84 negative_sampler """basic""" +492 84 evaluator """rankbased""" +492 85 dataset """kinships""" +492 85 model """proje""" +492 85 loss """nssa""" +492 85 regularizer """no""" +492 85 optimizer """adam""" +492 85 training_loop """owa""" +492 85 negative_sampler """basic""" +492 85 evaluator """rankbased""" +492 86 dataset """kinships""" +492 86 model """proje""" +492 86 loss """nssa""" +492 86 regularizer """no""" +492 86 optimizer """adam""" +492 86 training_loop """owa""" +492 86 negative_sampler """basic""" +492 86 evaluator """rankbased""" +492 87 dataset """kinships""" +492 87 model """proje""" +492 87 loss """nssa""" +492 87 regularizer """no""" +492 87 optimizer """adam""" +492 87 training_loop """owa""" +492 87 negative_sampler """basic""" +492 87 evaluator """rankbased""" +492 88 dataset """kinships""" +492 88 model """proje""" +492 88 loss """nssa""" +492 88 regularizer """no""" +492 88 optimizer """adam""" +492 88 training_loop """owa""" +492 88 negative_sampler """basic""" +492 88 evaluator """rankbased""" +492 89 dataset """kinships""" +492 89 model """proje""" +492 89 loss """nssa""" +492 89 regularizer """no""" +492 89 optimizer """adam""" +492 89 training_loop """owa""" +492 89 negative_sampler """basic""" +492 89 evaluator """rankbased""" +492 90 dataset """kinships""" +492 90 model """proje""" +492 90 loss """nssa""" +492 90 regularizer """no""" +492 90 optimizer """adam""" +492 90 training_loop """owa""" +492 90 negative_sampler """basic""" +492 90 evaluator """rankbased""" +492 91 dataset """kinships""" +492 91 model """proje""" +492 91 loss """nssa""" +492 91 regularizer """no""" +492 91 optimizer """adam""" +492 91 training_loop """owa""" +492 91 negative_sampler """basic""" +492 91 evaluator """rankbased""" +492 92 dataset """kinships""" +492 92 model """proje""" +492 92 loss """nssa""" +492 92 regularizer """no""" +492 92 optimizer """adam""" +492 92 training_loop """owa""" +492 92 negative_sampler """basic""" +492 92 evaluator """rankbased""" +492 93 dataset """kinships""" +492 93 model """proje""" +492 93 loss """nssa""" +492 93 regularizer """no""" +492 93 optimizer """adam""" +492 93 training_loop """owa""" +492 93 negative_sampler """basic""" +492 93 evaluator """rankbased""" +492 94 dataset """kinships""" +492 94 model """proje""" +492 94 loss """nssa""" +492 94 regularizer """no""" +492 94 optimizer """adam""" +492 94 training_loop """owa""" +492 94 negative_sampler """basic""" +492 94 evaluator """rankbased""" +492 95 dataset """kinships""" +492 95 model """proje""" +492 95 loss """nssa""" +492 95 regularizer """no""" +492 95 optimizer """adam""" +492 95 training_loop """owa""" +492 95 negative_sampler """basic""" +492 95 evaluator """rankbased""" +492 96 dataset """kinships""" +492 96 model """proje""" +492 96 loss """nssa""" +492 96 regularizer """no""" +492 96 optimizer """adam""" +492 96 training_loop """owa""" +492 96 negative_sampler """basic""" +492 96 evaluator """rankbased""" +492 97 dataset """kinships""" +492 97 model """proje""" +492 97 loss """nssa""" +492 97 regularizer """no""" +492 97 optimizer """adam""" +492 97 training_loop """owa""" +492 97 negative_sampler """basic""" +492 97 evaluator """rankbased""" +492 98 dataset """kinships""" +492 98 model """proje""" +492 98 loss """nssa""" +492 98 regularizer """no""" +492 98 optimizer """adam""" +492 98 training_loop """owa""" +492 98 negative_sampler """basic""" +492 98 evaluator """rankbased""" +492 99 dataset """kinships""" +492 99 model """proje""" +492 99 loss """nssa""" +492 99 regularizer """no""" +492 99 optimizer """adam""" +492 99 training_loop """owa""" +492 99 negative_sampler """basic""" +492 99 evaluator """rankbased""" +492 100 dataset """kinships""" +492 100 model """proje""" +492 100 loss """nssa""" +492 100 regularizer """no""" +492 100 optimizer """adam""" +492 100 training_loop """owa""" +492 100 negative_sampler """basic""" +492 100 evaluator """rankbased""" +493 1 model.embedding_dim 1.0 +493 1 optimizer.lr 0.04079958392830749 +493 1 training.batch_size 1.0 +493 1 training.label_smoothing 0.03373918826368084 +493 2 model.embedding_dim 1.0 +493 2 optimizer.lr 0.0011041658174154902 +493 2 training.batch_size 1.0 +493 2 training.label_smoothing 0.0034514011532198532 +493 3 model.embedding_dim 1.0 +493 3 optimizer.lr 0.018039695964341015 +493 3 training.batch_size 0.0 +493 3 training.label_smoothing 0.12279330756258632 +493 4 model.embedding_dim 0.0 +493 4 optimizer.lr 0.04470282748636926 +493 4 training.batch_size 0.0 +493 4 training.label_smoothing 0.2826817954293936 +493 5 model.embedding_dim 0.0 +493 5 optimizer.lr 0.002163057429548066 +493 5 training.batch_size 0.0 +493 5 training.label_smoothing 0.06198921726020781 +493 6 model.embedding_dim 2.0 +493 6 optimizer.lr 0.03255624301345676 +493 6 training.batch_size 2.0 +493 6 training.label_smoothing 0.004850913074183162 +493 7 model.embedding_dim 0.0 +493 7 optimizer.lr 0.002998095593683735 +493 7 training.batch_size 2.0 +493 7 training.label_smoothing 0.003405959594326519 +493 8 model.embedding_dim 0.0 +493 8 optimizer.lr 0.012140526454621988 +493 8 training.batch_size 2.0 +493 8 training.label_smoothing 0.003954651635849745 +493 9 model.embedding_dim 2.0 +493 9 optimizer.lr 0.003378499989125594 +493 9 training.batch_size 1.0 +493 9 training.label_smoothing 0.03215281347025269 +493 1 dataset """wn18rr""" +493 1 model """proje""" +493 1 loss """crossentropy""" +493 1 regularizer """no""" +493 1 optimizer """adam""" +493 1 training_loop """lcwa""" +493 1 evaluator """rankbased""" +493 2 dataset """wn18rr""" +493 2 model """proje""" +493 2 loss """crossentropy""" +493 2 regularizer """no""" +493 2 optimizer """adam""" +493 2 training_loop """lcwa""" +493 2 evaluator """rankbased""" +493 3 dataset """wn18rr""" +493 3 model """proje""" +493 3 loss """crossentropy""" +493 3 regularizer """no""" +493 3 optimizer """adam""" +493 3 training_loop """lcwa""" +493 3 evaluator """rankbased""" +493 4 dataset """wn18rr""" +493 4 model """proje""" +493 4 loss """crossentropy""" +493 4 regularizer """no""" +493 4 optimizer """adam""" +493 4 training_loop """lcwa""" +493 4 evaluator """rankbased""" +493 5 dataset """wn18rr""" +493 5 model """proje""" +493 5 loss """crossentropy""" +493 5 regularizer """no""" +493 5 optimizer """adam""" +493 5 training_loop """lcwa""" +493 5 evaluator """rankbased""" +493 6 dataset """wn18rr""" +493 6 model """proje""" +493 6 loss """crossentropy""" +493 6 regularizer """no""" +493 6 optimizer """adam""" +493 6 training_loop """lcwa""" +493 6 evaluator """rankbased""" +493 7 dataset """wn18rr""" +493 7 model """proje""" +493 7 loss """crossentropy""" +493 7 regularizer """no""" +493 7 optimizer """adam""" +493 7 training_loop """lcwa""" +493 7 evaluator """rankbased""" +493 8 dataset """wn18rr""" +493 8 model """proje""" +493 8 loss """crossentropy""" +493 8 regularizer """no""" +493 8 optimizer """adam""" +493 8 training_loop """lcwa""" +493 8 evaluator """rankbased""" +493 9 dataset """wn18rr""" +493 9 model """proje""" +493 9 loss """crossentropy""" +493 9 regularizer """no""" +493 9 optimizer """adam""" +493 9 training_loop """lcwa""" +493 9 evaluator """rankbased""" +494 1 model.embedding_dim 2.0 +494 1 optimizer.lr 0.07367262583662837 +494 1 training.batch_size 0.0 +494 1 training.label_smoothing 0.007762266567048834 +494 2 model.embedding_dim 2.0 +494 2 optimizer.lr 0.0599467331492327 +494 2 training.batch_size 2.0 +494 2 training.label_smoothing 0.007806117582360713 +494 3 model.embedding_dim 0.0 +494 3 optimizer.lr 0.001275104035548806 +494 3 training.batch_size 1.0 +494 3 training.label_smoothing 0.02274906066036175 +494 4 model.embedding_dim 1.0 +494 4 optimizer.lr 0.00914985743476624 +494 4 training.batch_size 2.0 +494 4 training.label_smoothing 0.029221495129236226 +494 5 model.embedding_dim 1.0 +494 5 optimizer.lr 0.0022452586623068703 +494 5 training.batch_size 1.0 +494 5 training.label_smoothing 0.5085953492917623 +494 6 model.embedding_dim 1.0 +494 6 optimizer.lr 0.015066137197800757 +494 6 training.batch_size 2.0 +494 6 training.label_smoothing 0.020020735768757607 +494 7 model.embedding_dim 2.0 +494 7 optimizer.lr 0.02109468201536105 +494 7 training.batch_size 0.0 +494 7 training.label_smoothing 0.2702050061594712 +494 8 model.embedding_dim 0.0 +494 8 optimizer.lr 0.0015441550027217473 +494 8 training.batch_size 0.0 +494 8 training.label_smoothing 0.04024696436320059 +494 9 model.embedding_dim 1.0 +494 9 optimizer.lr 0.0017370135193211782 +494 9 training.batch_size 1.0 +494 9 training.label_smoothing 0.2519657376022511 +494 10 model.embedding_dim 2.0 +494 10 optimizer.lr 0.040655565130313695 +494 10 training.batch_size 2.0 +494 10 training.label_smoothing 0.08048390759448501 +494 11 model.embedding_dim 2.0 +494 11 optimizer.lr 0.0027594337276102467 +494 11 training.batch_size 0.0 +494 11 training.label_smoothing 0.06401454935832385 +494 12 model.embedding_dim 2.0 +494 12 optimizer.lr 0.0021222693468428582 +494 12 training.batch_size 2.0 +494 12 training.label_smoothing 0.052512721147543795 +494 13 model.embedding_dim 1.0 +494 13 optimizer.lr 0.01123674643622012 +494 13 training.batch_size 0.0 +494 13 training.label_smoothing 0.034278146390238046 +494 1 dataset """wn18rr""" +494 1 model """proje""" +494 1 loss """crossentropy""" +494 1 regularizer """no""" +494 1 optimizer """adam""" +494 1 training_loop """lcwa""" +494 1 evaluator """rankbased""" +494 2 dataset """wn18rr""" +494 2 model """proje""" +494 2 loss """crossentropy""" +494 2 regularizer """no""" +494 2 optimizer """adam""" +494 2 training_loop """lcwa""" +494 2 evaluator """rankbased""" +494 3 dataset """wn18rr""" +494 3 model """proje""" +494 3 loss """crossentropy""" +494 3 regularizer """no""" +494 3 optimizer """adam""" +494 3 training_loop """lcwa""" +494 3 evaluator """rankbased""" +494 4 dataset """wn18rr""" +494 4 model """proje""" +494 4 loss """crossentropy""" +494 4 regularizer """no""" +494 4 optimizer """adam""" +494 4 training_loop """lcwa""" +494 4 evaluator """rankbased""" +494 5 dataset """wn18rr""" +494 5 model """proje""" +494 5 loss """crossentropy""" +494 5 regularizer """no""" +494 5 optimizer """adam""" +494 5 training_loop """lcwa""" +494 5 evaluator """rankbased""" +494 6 dataset """wn18rr""" +494 6 model """proje""" +494 6 loss """crossentropy""" +494 6 regularizer """no""" +494 6 optimizer """adam""" +494 6 training_loop """lcwa""" +494 6 evaluator """rankbased""" +494 7 dataset """wn18rr""" +494 7 model """proje""" +494 7 loss """crossentropy""" +494 7 regularizer """no""" +494 7 optimizer """adam""" +494 7 training_loop """lcwa""" +494 7 evaluator """rankbased""" +494 8 dataset """wn18rr""" +494 8 model """proje""" +494 8 loss """crossentropy""" +494 8 regularizer """no""" +494 8 optimizer """adam""" +494 8 training_loop """lcwa""" +494 8 evaluator """rankbased""" +494 9 dataset """wn18rr""" +494 9 model """proje""" +494 9 loss """crossentropy""" +494 9 regularizer """no""" +494 9 optimizer """adam""" +494 9 training_loop """lcwa""" +494 9 evaluator """rankbased""" +494 10 dataset """wn18rr""" +494 10 model """proje""" +494 10 loss """crossentropy""" +494 10 regularizer """no""" +494 10 optimizer """adam""" +494 10 training_loop """lcwa""" +494 10 evaluator """rankbased""" +494 11 dataset """wn18rr""" +494 11 model """proje""" +494 11 loss """crossentropy""" +494 11 regularizer """no""" +494 11 optimizer """adam""" +494 11 training_loop """lcwa""" +494 11 evaluator """rankbased""" +494 12 dataset """wn18rr""" +494 12 model """proje""" +494 12 loss """crossentropy""" +494 12 regularizer """no""" +494 12 optimizer """adam""" +494 12 training_loop """lcwa""" +494 12 evaluator """rankbased""" +494 13 dataset """wn18rr""" +494 13 model """proje""" +494 13 loss """crossentropy""" +494 13 regularizer """no""" +494 13 optimizer """adam""" +494 13 training_loop """lcwa""" +494 13 evaluator """rankbased""" +495 1 model.embedding_dim 0.0 +495 1 loss.margin 6.843394962963038 +495 1 optimizer.lr 0.004007031521866651 +495 1 negative_sampler.num_negs_per_pos 12.0 +495 1 training.batch_size 0.0 +495 2 model.embedding_dim 2.0 +495 2 loss.margin 4.489970452891271 +495 2 optimizer.lr 0.006836413063395384 +495 2 negative_sampler.num_negs_per_pos 68.0 +495 2 training.batch_size 0.0 +495 3 model.embedding_dim 1.0 +495 3 loss.margin 6.654628972171464 +495 3 optimizer.lr 0.01906089216778971 +495 3 negative_sampler.num_negs_per_pos 89.0 +495 3 training.batch_size 2.0 +495 4 model.embedding_dim 0.0 +495 4 loss.margin 4.58442424137603 +495 4 optimizer.lr 0.08204148388905858 +495 4 negative_sampler.num_negs_per_pos 17.0 +495 4 training.batch_size 2.0 +495 5 model.embedding_dim 2.0 +495 5 loss.margin 1.1969891652452649 +495 5 optimizer.lr 0.00575269123257397 +495 5 negative_sampler.num_negs_per_pos 93.0 +495 5 training.batch_size 1.0 +495 6 model.embedding_dim 0.0 +495 6 loss.margin 7.830064121481744 +495 6 optimizer.lr 0.006196511567489536 +495 6 negative_sampler.num_negs_per_pos 28.0 +495 6 training.batch_size 1.0 +495 7 model.embedding_dim 2.0 +495 7 loss.margin 7.626162000239446 +495 7 optimizer.lr 0.00445400435578295 +495 7 negative_sampler.num_negs_per_pos 12.0 +495 7 training.batch_size 0.0 +495 8 model.embedding_dim 1.0 +495 8 loss.margin 6.058113951611022 +495 8 optimizer.lr 0.004938807560147226 +495 8 negative_sampler.num_negs_per_pos 81.0 +495 8 training.batch_size 0.0 +495 9 model.embedding_dim 2.0 +495 9 loss.margin 7.193399861658023 +495 9 optimizer.lr 0.05712522654611176 +495 9 negative_sampler.num_negs_per_pos 79.0 +495 9 training.batch_size 1.0 +495 10 model.embedding_dim 1.0 +495 10 loss.margin 6.907770876735676 +495 10 optimizer.lr 0.018313395363967686 +495 10 negative_sampler.num_negs_per_pos 26.0 +495 10 training.batch_size 0.0 +495 11 model.embedding_dim 2.0 +495 11 loss.margin 8.721904428277004 +495 11 optimizer.lr 0.05948213063914318 +495 11 negative_sampler.num_negs_per_pos 41.0 +495 11 training.batch_size 1.0 +495 12 model.embedding_dim 0.0 +495 12 loss.margin 3.2131229736678786 +495 12 optimizer.lr 0.0023458428984339864 +495 12 negative_sampler.num_negs_per_pos 41.0 +495 12 training.batch_size 1.0 +495 13 model.embedding_dim 1.0 +495 13 loss.margin 4.446014673146039 +495 13 optimizer.lr 0.03804898440963133 +495 13 negative_sampler.num_negs_per_pos 77.0 +495 13 training.batch_size 0.0 +495 14 model.embedding_dim 1.0 +495 14 loss.margin 1.9980524688642762 +495 14 optimizer.lr 0.008041034728519825 +495 14 negative_sampler.num_negs_per_pos 94.0 +495 14 training.batch_size 2.0 +495 15 model.embedding_dim 2.0 +495 15 loss.margin 3.801459954350847 +495 15 optimizer.lr 0.0037163029217545476 +495 15 negative_sampler.num_negs_per_pos 33.0 +495 15 training.batch_size 0.0 +495 16 model.embedding_dim 0.0 +495 16 loss.margin 2.0530080637977646 +495 16 optimizer.lr 0.0011055048228562226 +495 16 negative_sampler.num_negs_per_pos 10.0 +495 16 training.batch_size 2.0 +495 17 model.embedding_dim 0.0 +495 17 loss.margin 5.061265846922868 +495 17 optimizer.lr 0.002557634819590086 +495 17 negative_sampler.num_negs_per_pos 80.0 +495 17 training.batch_size 0.0 +495 18 model.embedding_dim 0.0 +495 18 loss.margin 2.263657033601075 +495 18 optimizer.lr 0.0011088597742835947 +495 18 negative_sampler.num_negs_per_pos 30.0 +495 18 training.batch_size 2.0 +495 19 model.embedding_dim 1.0 +495 19 loss.margin 1.8198516224239645 +495 19 optimizer.lr 0.009014385970248775 +495 19 negative_sampler.num_negs_per_pos 9.0 +495 19 training.batch_size 2.0 +495 20 model.embedding_dim 2.0 +495 20 loss.margin 5.803660523714975 +495 20 optimizer.lr 0.05912038290574097 +495 20 negative_sampler.num_negs_per_pos 46.0 +495 20 training.batch_size 1.0 +495 21 model.embedding_dim 2.0 +495 21 loss.margin 7.316317891871999 +495 21 optimizer.lr 0.06133891876566004 +495 21 negative_sampler.num_negs_per_pos 15.0 +495 21 training.batch_size 1.0 +495 22 model.embedding_dim 2.0 +495 22 loss.margin 7.617299622655917 +495 22 optimizer.lr 0.008132289262285312 +495 22 negative_sampler.num_negs_per_pos 40.0 +495 22 training.batch_size 2.0 +495 23 model.embedding_dim 0.0 +495 23 loss.margin 6.017571594826534 +495 23 optimizer.lr 0.010428765226515667 +495 23 negative_sampler.num_negs_per_pos 0.0 +495 23 training.batch_size 1.0 +495 24 model.embedding_dim 2.0 +495 24 loss.margin 5.22281978753712 +495 24 optimizer.lr 0.02546671260642847 +495 24 negative_sampler.num_negs_per_pos 9.0 +495 24 training.batch_size 0.0 +495 25 model.embedding_dim 2.0 +495 25 loss.margin 2.304016123590323 +495 25 optimizer.lr 0.0010211569808843376 +495 25 negative_sampler.num_negs_per_pos 0.0 +495 25 training.batch_size 1.0 +495 26 model.embedding_dim 1.0 +495 26 loss.margin 3.729150061815996 +495 26 optimizer.lr 0.07431252355579991 +495 26 negative_sampler.num_negs_per_pos 72.0 +495 26 training.batch_size 1.0 +495 27 model.embedding_dim 2.0 +495 27 loss.margin 7.062429688933742 +495 27 optimizer.lr 0.049520171873222045 +495 27 negative_sampler.num_negs_per_pos 24.0 +495 27 training.batch_size 2.0 +495 28 model.embedding_dim 0.0 +495 28 loss.margin 1.427268309174195 +495 28 optimizer.lr 0.0053090774680802436 +495 28 negative_sampler.num_negs_per_pos 84.0 +495 28 training.batch_size 1.0 +495 29 model.embedding_dim 2.0 +495 29 loss.margin 7.735889803445169 +495 29 optimizer.lr 0.001486001053010697 +495 29 negative_sampler.num_negs_per_pos 62.0 +495 29 training.batch_size 0.0 +495 30 model.embedding_dim 0.0 +495 30 loss.margin 9.736007355792603 +495 30 optimizer.lr 0.009388673380117207 +495 30 negative_sampler.num_negs_per_pos 2.0 +495 30 training.batch_size 1.0 +495 31 model.embedding_dim 2.0 +495 31 loss.margin 1.2339554547776448 +495 31 optimizer.lr 0.005432632745468849 +495 31 negative_sampler.num_negs_per_pos 74.0 +495 31 training.batch_size 0.0 +495 32 model.embedding_dim 2.0 +495 32 loss.margin 2.374623851295657 +495 32 optimizer.lr 0.0013568522056224878 +495 32 negative_sampler.num_negs_per_pos 91.0 +495 32 training.batch_size 1.0 +495 33 model.embedding_dim 2.0 +495 33 loss.margin 8.836669723038236 +495 33 optimizer.lr 0.07451504282501001 +495 33 negative_sampler.num_negs_per_pos 95.0 +495 33 training.batch_size 2.0 +495 34 model.embedding_dim 1.0 +495 34 loss.margin 3.065379655453961 +495 34 optimizer.lr 0.018950017835363757 +495 34 negative_sampler.num_negs_per_pos 24.0 +495 34 training.batch_size 1.0 +495 1 dataset """wn18rr""" +495 1 model """proje""" +495 1 loss """marginranking""" +495 1 regularizer """no""" +495 1 optimizer """adam""" +495 1 training_loop """owa""" +495 1 negative_sampler """basic""" +495 1 evaluator """rankbased""" +495 2 dataset """wn18rr""" +495 2 model """proje""" +495 2 loss """marginranking""" +495 2 regularizer """no""" +495 2 optimizer """adam""" +495 2 training_loop """owa""" +495 2 negative_sampler """basic""" +495 2 evaluator """rankbased""" +495 3 dataset """wn18rr""" +495 3 model """proje""" +495 3 loss """marginranking""" +495 3 regularizer """no""" +495 3 optimizer """adam""" +495 3 training_loop """owa""" +495 3 negative_sampler """basic""" +495 3 evaluator """rankbased""" +495 4 dataset """wn18rr""" +495 4 model """proje""" +495 4 loss """marginranking""" +495 4 regularizer """no""" +495 4 optimizer """adam""" +495 4 training_loop """owa""" +495 4 negative_sampler """basic""" +495 4 evaluator """rankbased""" +495 5 dataset """wn18rr""" +495 5 model """proje""" +495 5 loss """marginranking""" +495 5 regularizer """no""" +495 5 optimizer """adam""" +495 5 training_loop """owa""" +495 5 negative_sampler """basic""" +495 5 evaluator """rankbased""" +495 6 dataset """wn18rr""" +495 6 model """proje""" +495 6 loss """marginranking""" +495 6 regularizer """no""" +495 6 optimizer """adam""" +495 6 training_loop """owa""" +495 6 negative_sampler """basic""" +495 6 evaluator """rankbased""" +495 7 dataset """wn18rr""" +495 7 model """proje""" +495 7 loss """marginranking""" +495 7 regularizer """no""" +495 7 optimizer """adam""" +495 7 training_loop """owa""" +495 7 negative_sampler """basic""" +495 7 evaluator """rankbased""" +495 8 dataset """wn18rr""" +495 8 model """proje""" +495 8 loss """marginranking""" +495 8 regularizer """no""" +495 8 optimizer """adam""" +495 8 training_loop """owa""" +495 8 negative_sampler """basic""" +495 8 evaluator """rankbased""" +495 9 dataset """wn18rr""" +495 9 model """proje""" +495 9 loss """marginranking""" +495 9 regularizer """no""" +495 9 optimizer """adam""" +495 9 training_loop """owa""" +495 9 negative_sampler """basic""" +495 9 evaluator """rankbased""" +495 10 dataset """wn18rr""" +495 10 model """proje""" +495 10 loss """marginranking""" +495 10 regularizer """no""" +495 10 optimizer """adam""" +495 10 training_loop """owa""" +495 10 negative_sampler """basic""" +495 10 evaluator """rankbased""" +495 11 dataset """wn18rr""" +495 11 model """proje""" +495 11 loss """marginranking""" +495 11 regularizer """no""" +495 11 optimizer """adam""" +495 11 training_loop """owa""" +495 11 negative_sampler """basic""" +495 11 evaluator """rankbased""" +495 12 dataset """wn18rr""" +495 12 model """proje""" +495 12 loss """marginranking""" +495 12 regularizer """no""" +495 12 optimizer """adam""" +495 12 training_loop """owa""" +495 12 negative_sampler """basic""" +495 12 evaluator """rankbased""" +495 13 dataset """wn18rr""" +495 13 model """proje""" +495 13 loss """marginranking""" +495 13 regularizer """no""" +495 13 optimizer """adam""" +495 13 training_loop """owa""" +495 13 negative_sampler """basic""" +495 13 evaluator """rankbased""" +495 14 dataset """wn18rr""" +495 14 model """proje""" +495 14 loss """marginranking""" +495 14 regularizer """no""" +495 14 optimizer """adam""" +495 14 training_loop """owa""" +495 14 negative_sampler """basic""" +495 14 evaluator """rankbased""" +495 15 dataset """wn18rr""" +495 15 model """proje""" +495 15 loss """marginranking""" +495 15 regularizer """no""" +495 15 optimizer """adam""" +495 15 training_loop """owa""" +495 15 negative_sampler """basic""" +495 15 evaluator """rankbased""" +495 16 dataset """wn18rr""" +495 16 model """proje""" +495 16 loss """marginranking""" +495 16 regularizer """no""" +495 16 optimizer """adam""" +495 16 training_loop """owa""" +495 16 negative_sampler """basic""" +495 16 evaluator """rankbased""" +495 17 dataset """wn18rr""" +495 17 model """proje""" +495 17 loss """marginranking""" +495 17 regularizer """no""" +495 17 optimizer """adam""" +495 17 training_loop """owa""" +495 17 negative_sampler """basic""" +495 17 evaluator """rankbased""" +495 18 dataset """wn18rr""" +495 18 model """proje""" +495 18 loss """marginranking""" +495 18 regularizer """no""" +495 18 optimizer """adam""" +495 18 training_loop """owa""" +495 18 negative_sampler """basic""" +495 18 evaluator """rankbased""" +495 19 dataset """wn18rr""" +495 19 model """proje""" +495 19 loss """marginranking""" +495 19 regularizer """no""" +495 19 optimizer """adam""" +495 19 training_loop """owa""" +495 19 negative_sampler """basic""" +495 19 evaluator """rankbased""" +495 20 dataset """wn18rr""" +495 20 model """proje""" +495 20 loss """marginranking""" +495 20 regularizer """no""" +495 20 optimizer """adam""" +495 20 training_loop """owa""" +495 20 negative_sampler """basic""" +495 20 evaluator """rankbased""" +495 21 dataset """wn18rr""" +495 21 model """proje""" +495 21 loss """marginranking""" +495 21 regularizer """no""" +495 21 optimizer """adam""" +495 21 training_loop """owa""" +495 21 negative_sampler """basic""" +495 21 evaluator """rankbased""" +495 22 dataset """wn18rr""" +495 22 model """proje""" +495 22 loss """marginranking""" +495 22 regularizer """no""" +495 22 optimizer """adam""" +495 22 training_loop """owa""" +495 22 negative_sampler """basic""" +495 22 evaluator """rankbased""" +495 23 dataset """wn18rr""" +495 23 model """proje""" +495 23 loss """marginranking""" +495 23 regularizer """no""" +495 23 optimizer """adam""" +495 23 training_loop """owa""" +495 23 negative_sampler """basic""" +495 23 evaluator """rankbased""" +495 24 dataset """wn18rr""" +495 24 model """proje""" +495 24 loss """marginranking""" +495 24 regularizer """no""" +495 24 optimizer """adam""" +495 24 training_loop """owa""" +495 24 negative_sampler """basic""" +495 24 evaluator """rankbased""" +495 25 dataset """wn18rr""" +495 25 model """proje""" +495 25 loss """marginranking""" +495 25 regularizer """no""" +495 25 optimizer """adam""" +495 25 training_loop """owa""" +495 25 negative_sampler """basic""" +495 25 evaluator """rankbased""" +495 26 dataset """wn18rr""" +495 26 model """proje""" +495 26 loss """marginranking""" +495 26 regularizer """no""" +495 26 optimizer """adam""" +495 26 training_loop """owa""" +495 26 negative_sampler """basic""" +495 26 evaluator """rankbased""" +495 27 dataset """wn18rr""" +495 27 model """proje""" +495 27 loss """marginranking""" +495 27 regularizer """no""" +495 27 optimizer """adam""" +495 27 training_loop """owa""" +495 27 negative_sampler """basic""" +495 27 evaluator """rankbased""" +495 28 dataset """wn18rr""" +495 28 model """proje""" +495 28 loss """marginranking""" +495 28 regularizer """no""" +495 28 optimizer """adam""" +495 28 training_loop """owa""" +495 28 negative_sampler """basic""" +495 28 evaluator """rankbased""" +495 29 dataset """wn18rr""" +495 29 model """proje""" +495 29 loss """marginranking""" +495 29 regularizer """no""" +495 29 optimizer """adam""" +495 29 training_loop """owa""" +495 29 negative_sampler """basic""" +495 29 evaluator """rankbased""" +495 30 dataset """wn18rr""" +495 30 model """proje""" +495 30 loss """marginranking""" +495 30 regularizer """no""" +495 30 optimizer """adam""" +495 30 training_loop """owa""" +495 30 negative_sampler """basic""" +495 30 evaluator """rankbased""" +495 31 dataset """wn18rr""" +495 31 model """proje""" +495 31 loss """marginranking""" +495 31 regularizer """no""" +495 31 optimizer """adam""" +495 31 training_loop """owa""" +495 31 negative_sampler """basic""" +495 31 evaluator """rankbased""" +495 32 dataset """wn18rr""" +495 32 model """proje""" +495 32 loss """marginranking""" +495 32 regularizer """no""" +495 32 optimizer """adam""" +495 32 training_loop """owa""" +495 32 negative_sampler """basic""" +495 32 evaluator """rankbased""" +495 33 dataset """wn18rr""" +495 33 model """proje""" +495 33 loss """marginranking""" +495 33 regularizer """no""" +495 33 optimizer """adam""" +495 33 training_loop """owa""" +495 33 negative_sampler """basic""" +495 33 evaluator """rankbased""" +495 34 dataset """wn18rr""" +495 34 model """proje""" +495 34 loss """marginranking""" +495 34 regularizer """no""" +495 34 optimizer """adam""" +495 34 training_loop """owa""" +495 34 negative_sampler """basic""" +495 34 evaluator """rankbased""" +496 1 model.embedding_dim 2.0 +496 1 loss.margin 3.0319395001999814 +496 1 optimizer.lr 0.004429175561522929 +496 1 negative_sampler.num_negs_per_pos 50.0 +496 1 training.batch_size 0.0 +496 2 model.embedding_dim 1.0 +496 2 loss.margin 5.981821755962278 +496 2 optimizer.lr 0.0011478036943571312 +496 2 negative_sampler.num_negs_per_pos 65.0 +496 2 training.batch_size 0.0 +496 3 model.embedding_dim 1.0 +496 3 loss.margin 7.0743721259374 +496 3 optimizer.lr 0.0010840666312353655 +496 3 negative_sampler.num_negs_per_pos 77.0 +496 3 training.batch_size 1.0 +496 4 model.embedding_dim 2.0 +496 4 loss.margin 3.615860827056634 +496 4 optimizer.lr 0.017550473951066573 +496 4 negative_sampler.num_negs_per_pos 90.0 +496 4 training.batch_size 2.0 +496 5 model.embedding_dim 2.0 +496 5 loss.margin 1.71947346906009 +496 5 optimizer.lr 0.00418176573736859 +496 5 negative_sampler.num_negs_per_pos 15.0 +496 5 training.batch_size 2.0 +496 6 model.embedding_dim 0.0 +496 6 loss.margin 3.2246302512305607 +496 6 optimizer.lr 0.0015460014130943655 +496 6 negative_sampler.num_negs_per_pos 66.0 +496 6 training.batch_size 1.0 +496 7 model.embedding_dim 0.0 +496 7 loss.margin 5.974769970903513 +496 7 optimizer.lr 0.00836084056074773 +496 7 negative_sampler.num_negs_per_pos 89.0 +496 7 training.batch_size 1.0 +496 8 model.embedding_dim 0.0 +496 8 loss.margin 8.011570846667032 +496 8 optimizer.lr 0.0018315553283942713 +496 8 negative_sampler.num_negs_per_pos 99.0 +496 8 training.batch_size 0.0 +496 9 model.embedding_dim 0.0 +496 9 loss.margin 4.84428777145327 +496 9 optimizer.lr 0.04504238606717124 +496 9 negative_sampler.num_negs_per_pos 31.0 +496 9 training.batch_size 0.0 +496 10 model.embedding_dim 0.0 +496 10 loss.margin 6.1701808436834815 +496 10 optimizer.lr 0.04236274228022916 +496 10 negative_sampler.num_negs_per_pos 9.0 +496 10 training.batch_size 2.0 +496 11 model.embedding_dim 1.0 +496 11 loss.margin 8.629589999187134 +496 11 optimizer.lr 0.009393541170620504 +496 11 negative_sampler.num_negs_per_pos 51.0 +496 11 training.batch_size 2.0 +496 12 model.embedding_dim 2.0 +496 12 loss.margin 3.698126120309783 +496 12 optimizer.lr 0.08795452526031537 +496 12 negative_sampler.num_negs_per_pos 30.0 +496 12 training.batch_size 1.0 +496 13 model.embedding_dim 1.0 +496 13 loss.margin 8.409069924290874 +496 13 optimizer.lr 0.0016073622777889075 +496 13 negative_sampler.num_negs_per_pos 64.0 +496 13 training.batch_size 0.0 +496 14 model.embedding_dim 0.0 +496 14 loss.margin 5.599596076298987 +496 14 optimizer.lr 0.002531941082391163 +496 14 negative_sampler.num_negs_per_pos 51.0 +496 14 training.batch_size 1.0 +496 15 model.embedding_dim 2.0 +496 15 loss.margin 9.90616572202168 +496 15 optimizer.lr 0.07725203549221288 +496 15 negative_sampler.num_negs_per_pos 54.0 +496 15 training.batch_size 1.0 +496 16 model.embedding_dim 0.0 +496 16 loss.margin 3.062521334341953 +496 16 optimizer.lr 0.0049628409038775766 +496 16 negative_sampler.num_negs_per_pos 98.0 +496 16 training.batch_size 1.0 +496 17 model.embedding_dim 2.0 +496 17 loss.margin 0.9731747415187753 +496 17 optimizer.lr 0.0024147982128044023 +496 17 negative_sampler.num_negs_per_pos 42.0 +496 17 training.batch_size 0.0 +496 18 model.embedding_dim 2.0 +496 18 loss.margin 7.059968464709393 +496 18 optimizer.lr 0.08471399708575897 +496 18 negative_sampler.num_negs_per_pos 17.0 +496 18 training.batch_size 2.0 +496 19 model.embedding_dim 2.0 +496 19 loss.margin 6.7587435027391 +496 19 optimizer.lr 0.009618806093389252 +496 19 negative_sampler.num_negs_per_pos 46.0 +496 19 training.batch_size 0.0 +496 20 model.embedding_dim 0.0 +496 20 loss.margin 1.3387558959002868 +496 20 optimizer.lr 0.06397579582582053 +496 20 negative_sampler.num_negs_per_pos 16.0 +496 20 training.batch_size 1.0 +496 21 model.embedding_dim 0.0 +496 21 loss.margin 7.518653720200095 +496 21 optimizer.lr 0.0017336386582735323 +496 21 negative_sampler.num_negs_per_pos 48.0 +496 21 training.batch_size 0.0 +496 22 model.embedding_dim 0.0 +496 22 loss.margin 1.1451962124415642 +496 22 optimizer.lr 0.002931565653883023 +496 22 negative_sampler.num_negs_per_pos 49.0 +496 22 training.batch_size 1.0 +496 23 model.embedding_dim 2.0 +496 23 loss.margin 4.8980031269145305 +496 23 optimizer.lr 0.0022933024416190227 +496 23 negative_sampler.num_negs_per_pos 34.0 +496 23 training.batch_size 0.0 +496 24 model.embedding_dim 1.0 +496 24 loss.margin 1.2364861076710458 +496 24 optimizer.lr 0.009413179356295489 +496 24 negative_sampler.num_negs_per_pos 78.0 +496 24 training.batch_size 0.0 +496 25 model.embedding_dim 0.0 +496 25 loss.margin 5.71838993137051 +496 25 optimizer.lr 0.004192178109270676 +496 25 negative_sampler.num_negs_per_pos 30.0 +496 25 training.batch_size 1.0 +496 26 model.embedding_dim 2.0 +496 26 loss.margin 0.9735793799703246 +496 26 optimizer.lr 0.007710331249347707 +496 26 negative_sampler.num_negs_per_pos 17.0 +496 26 training.batch_size 0.0 +496 27 model.embedding_dim 0.0 +496 27 loss.margin 7.778692160672862 +496 27 optimizer.lr 0.07991905754404094 +496 27 negative_sampler.num_negs_per_pos 52.0 +496 27 training.batch_size 2.0 +496 28 model.embedding_dim 0.0 +496 28 loss.margin 8.5506486893957 +496 28 optimizer.lr 0.0035152713032415425 +496 28 negative_sampler.num_negs_per_pos 96.0 +496 28 training.batch_size 2.0 +496 29 model.embedding_dim 2.0 +496 29 loss.margin 2.628090709198561 +496 29 optimizer.lr 0.038554358789783034 +496 29 negative_sampler.num_negs_per_pos 17.0 +496 29 training.batch_size 1.0 +496 30 model.embedding_dim 0.0 +496 30 loss.margin 1.2012221478098428 +496 30 optimizer.lr 0.011720244140655308 +496 30 negative_sampler.num_negs_per_pos 38.0 +496 30 training.batch_size 0.0 +496 31 model.embedding_dim 0.0 +496 31 loss.margin 8.441589834150397 +496 31 optimizer.lr 0.0436599172160161 +496 31 negative_sampler.num_negs_per_pos 67.0 +496 31 training.batch_size 2.0 +496 32 model.embedding_dim 0.0 +496 32 loss.margin 3.7330463768633866 +496 32 optimizer.lr 0.01061299676391656 +496 32 negative_sampler.num_negs_per_pos 49.0 +496 32 training.batch_size 1.0 +496 33 model.embedding_dim 2.0 +496 33 loss.margin 2.5517828188020166 +496 33 optimizer.lr 0.0020619952126777565 +496 33 negative_sampler.num_negs_per_pos 42.0 +496 33 training.batch_size 1.0 +496 34 model.embedding_dim 1.0 +496 34 loss.margin 5.387762933477117 +496 34 optimizer.lr 0.0030037638273301827 +496 34 negative_sampler.num_negs_per_pos 13.0 +496 34 training.batch_size 1.0 +496 35 model.embedding_dim 0.0 +496 35 loss.margin 2.4664186042256464 +496 35 optimizer.lr 0.010926298681984513 +496 35 negative_sampler.num_negs_per_pos 86.0 +496 35 training.batch_size 1.0 +496 36 model.embedding_dim 1.0 +496 36 loss.margin 1.421514782082594 +496 36 optimizer.lr 0.04476520389161643 +496 36 negative_sampler.num_negs_per_pos 12.0 +496 36 training.batch_size 1.0 +496 37 model.embedding_dim 2.0 +496 37 loss.margin 3.4991817805958236 +496 37 optimizer.lr 0.01927637340929967 +496 37 negative_sampler.num_negs_per_pos 9.0 +496 37 training.batch_size 1.0 +496 38 model.embedding_dim 0.0 +496 38 loss.margin 8.4333368778388 +496 38 optimizer.lr 0.003976201833699384 +496 38 negative_sampler.num_negs_per_pos 2.0 +496 38 training.batch_size 2.0 +496 39 model.embedding_dim 2.0 +496 39 loss.margin 1.3857381570746705 +496 39 optimizer.lr 0.0076154794673221286 +496 39 negative_sampler.num_negs_per_pos 2.0 +496 39 training.batch_size 1.0 +496 40 model.embedding_dim 1.0 +496 40 loss.margin 1.464158390435844 +496 40 optimizer.lr 0.0010311831938356684 +496 40 negative_sampler.num_negs_per_pos 8.0 +496 40 training.batch_size 0.0 +496 41 model.embedding_dim 0.0 +496 41 loss.margin 2.1468806771761355 +496 41 optimizer.lr 0.035070402506259576 +496 41 negative_sampler.num_negs_per_pos 1.0 +496 41 training.batch_size 1.0 +496 42 model.embedding_dim 2.0 +496 42 loss.margin 7.572982787165122 +496 42 optimizer.lr 0.001107784616492418 +496 42 negative_sampler.num_negs_per_pos 89.0 +496 42 training.batch_size 2.0 +496 43 model.embedding_dim 1.0 +496 43 loss.margin 7.454603586102743 +496 43 optimizer.lr 0.0821034775778311 +496 43 negative_sampler.num_negs_per_pos 37.0 +496 43 training.batch_size 0.0 +496 44 model.embedding_dim 0.0 +496 44 loss.margin 3.987661406784297 +496 44 optimizer.lr 0.074240017336469 +496 44 negative_sampler.num_negs_per_pos 88.0 +496 44 training.batch_size 2.0 +496 45 model.embedding_dim 2.0 +496 45 loss.margin 9.697037327418515 +496 45 optimizer.lr 0.011916340492731787 +496 45 negative_sampler.num_negs_per_pos 53.0 +496 45 training.batch_size 0.0 +496 46 model.embedding_dim 0.0 +496 46 loss.margin 1.6847469713690026 +496 46 optimizer.lr 0.01134386840703188 +496 46 negative_sampler.num_negs_per_pos 58.0 +496 46 training.batch_size 2.0 +496 47 model.embedding_dim 1.0 +496 47 loss.margin 6.6470894550331225 +496 47 optimizer.lr 0.0010471927799447273 +496 47 negative_sampler.num_negs_per_pos 48.0 +496 47 training.batch_size 1.0 +496 48 model.embedding_dim 0.0 +496 48 loss.margin 6.578209991144327 +496 48 optimizer.lr 0.036448090038273886 +496 48 negative_sampler.num_negs_per_pos 21.0 +496 48 training.batch_size 0.0 +496 49 model.embedding_dim 2.0 +496 49 loss.margin 2.124124694319336 +496 49 optimizer.lr 0.0010079363524229216 +496 49 negative_sampler.num_negs_per_pos 62.0 +496 49 training.batch_size 2.0 +496 50 model.embedding_dim 2.0 +496 50 loss.margin 5.13706668312213 +496 50 optimizer.lr 0.020586203619747466 +496 50 negative_sampler.num_negs_per_pos 9.0 +496 50 training.batch_size 2.0 +496 51 model.embedding_dim 0.0 +496 51 loss.margin 3.7432841211723358 +496 51 optimizer.lr 0.026875038903145534 +496 51 negative_sampler.num_negs_per_pos 88.0 +496 51 training.batch_size 0.0 +496 52 model.embedding_dim 1.0 +496 52 loss.margin 5.913468477434703 +496 52 optimizer.lr 0.005730731207878783 +496 52 negative_sampler.num_negs_per_pos 69.0 +496 52 training.batch_size 1.0 +496 53 model.embedding_dim 1.0 +496 53 loss.margin 9.457088296878537 +496 53 optimizer.lr 0.06423805350341306 +496 53 negative_sampler.num_negs_per_pos 52.0 +496 53 training.batch_size 0.0 +496 54 model.embedding_dim 2.0 +496 54 loss.margin 4.219064363772892 +496 54 optimizer.lr 0.0016295245020008875 +496 54 negative_sampler.num_negs_per_pos 6.0 +496 54 training.batch_size 2.0 +496 55 model.embedding_dim 0.0 +496 55 loss.margin 0.6642924919364377 +496 55 optimizer.lr 0.04506783691952606 +496 55 negative_sampler.num_negs_per_pos 77.0 +496 55 training.batch_size 2.0 +496 56 model.embedding_dim 2.0 +496 56 loss.margin 2.910749005682368 +496 56 optimizer.lr 0.09611873758346641 +496 56 negative_sampler.num_negs_per_pos 7.0 +496 56 training.batch_size 2.0 +496 57 model.embedding_dim 1.0 +496 57 loss.margin 7.44935324889398 +496 57 optimizer.lr 0.0017905148322883493 +496 57 negative_sampler.num_negs_per_pos 63.0 +496 57 training.batch_size 2.0 +496 58 model.embedding_dim 1.0 +496 58 loss.margin 9.124513311476662 +496 58 optimizer.lr 0.002091579097058853 +496 58 negative_sampler.num_negs_per_pos 72.0 +496 58 training.batch_size 1.0 +496 59 model.embedding_dim 1.0 +496 59 loss.margin 3.7726105426706242 +496 59 optimizer.lr 0.0645916593423754 +496 59 negative_sampler.num_negs_per_pos 0.0 +496 59 training.batch_size 0.0 +496 60 model.embedding_dim 0.0 +496 60 loss.margin 9.305276400251556 +496 60 optimizer.lr 0.005016856486034736 +496 60 negative_sampler.num_negs_per_pos 38.0 +496 60 training.batch_size 2.0 +496 61 model.embedding_dim 2.0 +496 61 loss.margin 5.230299423506829 +496 61 optimizer.lr 0.05457737485832433 +496 61 negative_sampler.num_negs_per_pos 49.0 +496 61 training.batch_size 2.0 +496 62 model.embedding_dim 2.0 +496 62 loss.margin 6.283117944230264 +496 62 optimizer.lr 0.025251425191842623 +496 62 negative_sampler.num_negs_per_pos 32.0 +496 62 training.batch_size 1.0 +496 63 model.embedding_dim 1.0 +496 63 loss.margin 8.000218896948335 +496 63 optimizer.lr 0.08269727920039296 +496 63 negative_sampler.num_negs_per_pos 63.0 +496 63 training.batch_size 1.0 +496 64 model.embedding_dim 2.0 +496 64 loss.margin 8.828842258163943 +496 64 optimizer.lr 0.016802189618608757 +496 64 negative_sampler.num_negs_per_pos 42.0 +496 64 training.batch_size 0.0 +496 65 model.embedding_dim 1.0 +496 65 loss.margin 5.475831230657916 +496 65 optimizer.lr 0.0019043106246738118 +496 65 negative_sampler.num_negs_per_pos 45.0 +496 65 training.batch_size 0.0 +496 66 model.embedding_dim 0.0 +496 66 loss.margin 8.321848136593374 +496 66 optimizer.lr 0.012388249231927374 +496 66 negative_sampler.num_negs_per_pos 32.0 +496 66 training.batch_size 2.0 +496 67 model.embedding_dim 1.0 +496 67 loss.margin 4.728334134949462 +496 67 optimizer.lr 0.004490993280762038 +496 67 negative_sampler.num_negs_per_pos 7.0 +496 67 training.batch_size 0.0 +496 68 model.embedding_dim 2.0 +496 68 loss.margin 6.566388389703332 +496 68 optimizer.lr 0.08019123913909357 +496 68 negative_sampler.num_negs_per_pos 85.0 +496 68 training.batch_size 2.0 +496 69 model.embedding_dim 1.0 +496 69 loss.margin 8.731226461909834 +496 69 optimizer.lr 0.021998921976668407 +496 69 negative_sampler.num_negs_per_pos 37.0 +496 69 training.batch_size 0.0 +496 70 model.embedding_dim 2.0 +496 70 loss.margin 9.074113601601129 +496 70 optimizer.lr 0.0017679908367372525 +496 70 negative_sampler.num_negs_per_pos 25.0 +496 70 training.batch_size 2.0 +496 1 dataset """wn18rr""" +496 1 model """proje""" +496 1 loss """marginranking""" +496 1 regularizer """no""" +496 1 optimizer """adam""" +496 1 training_loop """owa""" +496 1 negative_sampler """basic""" +496 1 evaluator """rankbased""" +496 2 dataset """wn18rr""" +496 2 model """proje""" +496 2 loss """marginranking""" +496 2 regularizer """no""" +496 2 optimizer """adam""" +496 2 training_loop """owa""" +496 2 negative_sampler """basic""" +496 2 evaluator """rankbased""" +496 3 dataset """wn18rr""" +496 3 model """proje""" +496 3 loss """marginranking""" +496 3 regularizer """no""" +496 3 optimizer """adam""" +496 3 training_loop """owa""" +496 3 negative_sampler """basic""" +496 3 evaluator """rankbased""" +496 4 dataset """wn18rr""" +496 4 model """proje""" +496 4 loss """marginranking""" +496 4 regularizer """no""" +496 4 optimizer """adam""" +496 4 training_loop """owa""" +496 4 negative_sampler """basic""" +496 4 evaluator """rankbased""" +496 5 dataset """wn18rr""" +496 5 model """proje""" +496 5 loss """marginranking""" +496 5 regularizer """no""" +496 5 optimizer """adam""" +496 5 training_loop """owa""" +496 5 negative_sampler """basic""" +496 5 evaluator """rankbased""" +496 6 dataset """wn18rr""" +496 6 model """proje""" +496 6 loss """marginranking""" +496 6 regularizer """no""" +496 6 optimizer """adam""" +496 6 training_loop """owa""" +496 6 negative_sampler """basic""" +496 6 evaluator """rankbased""" +496 7 dataset """wn18rr""" +496 7 model """proje""" +496 7 loss """marginranking""" +496 7 regularizer """no""" +496 7 optimizer """adam""" +496 7 training_loop """owa""" +496 7 negative_sampler """basic""" +496 7 evaluator """rankbased""" +496 8 dataset """wn18rr""" +496 8 model """proje""" +496 8 loss """marginranking""" +496 8 regularizer """no""" +496 8 optimizer """adam""" +496 8 training_loop """owa""" +496 8 negative_sampler """basic""" +496 8 evaluator """rankbased""" +496 9 dataset """wn18rr""" +496 9 model """proje""" +496 9 loss """marginranking""" +496 9 regularizer """no""" +496 9 optimizer """adam""" +496 9 training_loop """owa""" +496 9 negative_sampler """basic""" +496 9 evaluator """rankbased""" +496 10 dataset """wn18rr""" +496 10 model """proje""" +496 10 loss """marginranking""" +496 10 regularizer """no""" +496 10 optimizer """adam""" +496 10 training_loop """owa""" +496 10 negative_sampler """basic""" +496 10 evaluator """rankbased""" +496 11 dataset """wn18rr""" +496 11 model """proje""" +496 11 loss """marginranking""" +496 11 regularizer """no""" +496 11 optimizer """adam""" +496 11 training_loop """owa""" +496 11 negative_sampler """basic""" +496 11 evaluator """rankbased""" +496 12 dataset """wn18rr""" +496 12 model """proje""" +496 12 loss """marginranking""" +496 12 regularizer """no""" +496 12 optimizer """adam""" +496 12 training_loop """owa""" +496 12 negative_sampler """basic""" +496 12 evaluator """rankbased""" +496 13 dataset """wn18rr""" +496 13 model """proje""" +496 13 loss """marginranking""" +496 13 regularizer """no""" +496 13 optimizer """adam""" +496 13 training_loop """owa""" +496 13 negative_sampler """basic""" +496 13 evaluator """rankbased""" +496 14 dataset """wn18rr""" +496 14 model """proje""" +496 14 loss """marginranking""" +496 14 regularizer """no""" +496 14 optimizer """adam""" +496 14 training_loop """owa""" +496 14 negative_sampler """basic""" +496 14 evaluator """rankbased""" +496 15 dataset """wn18rr""" +496 15 model """proje""" +496 15 loss """marginranking""" +496 15 regularizer """no""" +496 15 optimizer """adam""" +496 15 training_loop """owa""" +496 15 negative_sampler """basic""" +496 15 evaluator """rankbased""" +496 16 dataset """wn18rr""" +496 16 model """proje""" +496 16 loss """marginranking""" +496 16 regularizer """no""" +496 16 optimizer """adam""" +496 16 training_loop """owa""" +496 16 negative_sampler """basic""" +496 16 evaluator """rankbased""" +496 17 dataset """wn18rr""" +496 17 model """proje""" +496 17 loss """marginranking""" +496 17 regularizer """no""" +496 17 optimizer """adam""" +496 17 training_loop """owa""" +496 17 negative_sampler """basic""" +496 17 evaluator """rankbased""" +496 18 dataset """wn18rr""" +496 18 model """proje""" +496 18 loss """marginranking""" +496 18 regularizer """no""" +496 18 optimizer """adam""" +496 18 training_loop """owa""" +496 18 negative_sampler """basic""" +496 18 evaluator """rankbased""" +496 19 dataset """wn18rr""" +496 19 model """proje""" +496 19 loss """marginranking""" +496 19 regularizer """no""" +496 19 optimizer """adam""" +496 19 training_loop """owa""" +496 19 negative_sampler """basic""" +496 19 evaluator """rankbased""" +496 20 dataset """wn18rr""" +496 20 model """proje""" +496 20 loss """marginranking""" +496 20 regularizer """no""" +496 20 optimizer """adam""" +496 20 training_loop """owa""" +496 20 negative_sampler """basic""" +496 20 evaluator """rankbased""" +496 21 dataset """wn18rr""" +496 21 model """proje""" +496 21 loss """marginranking""" +496 21 regularizer """no""" +496 21 optimizer """adam""" +496 21 training_loop """owa""" +496 21 negative_sampler """basic""" +496 21 evaluator """rankbased""" +496 22 dataset """wn18rr""" +496 22 model """proje""" +496 22 loss """marginranking""" +496 22 regularizer """no""" +496 22 optimizer """adam""" +496 22 training_loop """owa""" +496 22 negative_sampler """basic""" +496 22 evaluator """rankbased""" +496 23 dataset """wn18rr""" +496 23 model """proje""" +496 23 loss """marginranking""" +496 23 regularizer """no""" +496 23 optimizer """adam""" +496 23 training_loop """owa""" +496 23 negative_sampler """basic""" +496 23 evaluator """rankbased""" +496 24 dataset """wn18rr""" +496 24 model """proje""" +496 24 loss """marginranking""" +496 24 regularizer """no""" +496 24 optimizer """adam""" +496 24 training_loop """owa""" +496 24 negative_sampler """basic""" +496 24 evaluator """rankbased""" +496 25 dataset """wn18rr""" +496 25 model """proje""" +496 25 loss """marginranking""" +496 25 regularizer """no""" +496 25 optimizer """adam""" +496 25 training_loop """owa""" +496 25 negative_sampler """basic""" +496 25 evaluator """rankbased""" +496 26 dataset """wn18rr""" +496 26 model """proje""" +496 26 loss """marginranking""" +496 26 regularizer """no""" +496 26 optimizer """adam""" +496 26 training_loop """owa""" +496 26 negative_sampler """basic""" +496 26 evaluator """rankbased""" +496 27 dataset """wn18rr""" +496 27 model """proje""" +496 27 loss """marginranking""" +496 27 regularizer """no""" +496 27 optimizer """adam""" +496 27 training_loop """owa""" +496 27 negative_sampler """basic""" +496 27 evaluator """rankbased""" +496 28 dataset """wn18rr""" +496 28 model """proje""" +496 28 loss """marginranking""" +496 28 regularizer """no""" +496 28 optimizer """adam""" +496 28 training_loop """owa""" +496 28 negative_sampler """basic""" +496 28 evaluator """rankbased""" +496 29 dataset """wn18rr""" +496 29 model """proje""" +496 29 loss """marginranking""" +496 29 regularizer """no""" +496 29 optimizer """adam""" +496 29 training_loop """owa""" +496 29 negative_sampler """basic""" +496 29 evaluator """rankbased""" +496 30 dataset """wn18rr""" +496 30 model """proje""" +496 30 loss """marginranking""" +496 30 regularizer """no""" +496 30 optimizer """adam""" +496 30 training_loop """owa""" +496 30 negative_sampler """basic""" +496 30 evaluator """rankbased""" +496 31 dataset """wn18rr""" +496 31 model """proje""" +496 31 loss """marginranking""" +496 31 regularizer """no""" +496 31 optimizer """adam""" +496 31 training_loop """owa""" +496 31 negative_sampler """basic""" +496 31 evaluator """rankbased""" +496 32 dataset """wn18rr""" +496 32 model """proje""" +496 32 loss """marginranking""" +496 32 regularizer """no""" +496 32 optimizer """adam""" +496 32 training_loop """owa""" +496 32 negative_sampler """basic""" +496 32 evaluator """rankbased""" +496 33 dataset """wn18rr""" +496 33 model """proje""" +496 33 loss """marginranking""" +496 33 regularizer """no""" +496 33 optimizer """adam""" +496 33 training_loop """owa""" +496 33 negative_sampler """basic""" +496 33 evaluator """rankbased""" +496 34 dataset """wn18rr""" +496 34 model """proje""" +496 34 loss """marginranking""" +496 34 regularizer """no""" +496 34 optimizer """adam""" +496 34 training_loop """owa""" +496 34 negative_sampler """basic""" +496 34 evaluator """rankbased""" +496 35 dataset """wn18rr""" +496 35 model """proje""" +496 35 loss """marginranking""" +496 35 regularizer """no""" +496 35 optimizer """adam""" +496 35 training_loop """owa""" +496 35 negative_sampler """basic""" +496 35 evaluator """rankbased""" +496 36 dataset """wn18rr""" +496 36 model """proje""" +496 36 loss """marginranking""" +496 36 regularizer """no""" +496 36 optimizer """adam""" +496 36 training_loop """owa""" +496 36 negative_sampler """basic""" +496 36 evaluator """rankbased""" +496 37 dataset """wn18rr""" +496 37 model """proje""" +496 37 loss """marginranking""" +496 37 regularizer """no""" +496 37 optimizer """adam""" +496 37 training_loop """owa""" +496 37 negative_sampler """basic""" +496 37 evaluator """rankbased""" +496 38 dataset """wn18rr""" +496 38 model """proje""" +496 38 loss """marginranking""" +496 38 regularizer """no""" +496 38 optimizer """adam""" +496 38 training_loop """owa""" +496 38 negative_sampler """basic""" +496 38 evaluator """rankbased""" +496 39 dataset """wn18rr""" +496 39 model """proje""" +496 39 loss """marginranking""" +496 39 regularizer """no""" +496 39 optimizer """adam""" +496 39 training_loop """owa""" +496 39 negative_sampler """basic""" +496 39 evaluator """rankbased""" +496 40 dataset """wn18rr""" +496 40 model """proje""" +496 40 loss """marginranking""" +496 40 regularizer """no""" +496 40 optimizer """adam""" +496 40 training_loop """owa""" +496 40 negative_sampler """basic""" +496 40 evaluator """rankbased""" +496 41 dataset """wn18rr""" +496 41 model """proje""" +496 41 loss """marginranking""" +496 41 regularizer """no""" +496 41 optimizer """adam""" +496 41 training_loop """owa""" +496 41 negative_sampler """basic""" +496 41 evaluator """rankbased""" +496 42 dataset """wn18rr""" +496 42 model """proje""" +496 42 loss """marginranking""" +496 42 regularizer """no""" +496 42 optimizer """adam""" +496 42 training_loop """owa""" +496 42 negative_sampler """basic""" +496 42 evaluator """rankbased""" +496 43 dataset """wn18rr""" +496 43 model """proje""" +496 43 loss """marginranking""" +496 43 regularizer """no""" +496 43 optimizer """adam""" +496 43 training_loop """owa""" +496 43 negative_sampler """basic""" +496 43 evaluator """rankbased""" +496 44 dataset """wn18rr""" +496 44 model """proje""" +496 44 loss """marginranking""" +496 44 regularizer """no""" +496 44 optimizer """adam""" +496 44 training_loop """owa""" +496 44 negative_sampler """basic""" +496 44 evaluator """rankbased""" +496 45 dataset """wn18rr""" +496 45 model """proje""" +496 45 loss """marginranking""" +496 45 regularizer """no""" +496 45 optimizer """adam""" +496 45 training_loop """owa""" +496 45 negative_sampler """basic""" +496 45 evaluator """rankbased""" +496 46 dataset """wn18rr""" +496 46 model """proje""" +496 46 loss """marginranking""" +496 46 regularizer """no""" +496 46 optimizer """adam""" +496 46 training_loop """owa""" +496 46 negative_sampler """basic""" +496 46 evaluator """rankbased""" +496 47 dataset """wn18rr""" +496 47 model """proje""" +496 47 loss """marginranking""" +496 47 regularizer """no""" +496 47 optimizer """adam""" +496 47 training_loop """owa""" +496 47 negative_sampler """basic""" +496 47 evaluator """rankbased""" +496 48 dataset """wn18rr""" +496 48 model """proje""" +496 48 loss """marginranking""" +496 48 regularizer """no""" +496 48 optimizer """adam""" +496 48 training_loop """owa""" +496 48 negative_sampler """basic""" +496 48 evaluator """rankbased""" +496 49 dataset """wn18rr""" +496 49 model """proje""" +496 49 loss """marginranking""" +496 49 regularizer """no""" +496 49 optimizer """adam""" +496 49 training_loop """owa""" +496 49 negative_sampler """basic""" +496 49 evaluator """rankbased""" +496 50 dataset """wn18rr""" +496 50 model """proje""" +496 50 loss """marginranking""" +496 50 regularizer """no""" +496 50 optimizer """adam""" +496 50 training_loop """owa""" +496 50 negative_sampler """basic""" +496 50 evaluator """rankbased""" +496 51 dataset """wn18rr""" +496 51 model """proje""" +496 51 loss """marginranking""" +496 51 regularizer """no""" +496 51 optimizer """adam""" +496 51 training_loop """owa""" +496 51 negative_sampler """basic""" +496 51 evaluator """rankbased""" +496 52 dataset """wn18rr""" +496 52 model """proje""" +496 52 loss """marginranking""" +496 52 regularizer """no""" +496 52 optimizer """adam""" +496 52 training_loop """owa""" +496 52 negative_sampler """basic""" +496 52 evaluator """rankbased""" +496 53 dataset """wn18rr""" +496 53 model """proje""" +496 53 loss """marginranking""" +496 53 regularizer """no""" +496 53 optimizer """adam""" +496 53 training_loop """owa""" +496 53 negative_sampler """basic""" +496 53 evaluator """rankbased""" +496 54 dataset """wn18rr""" +496 54 model """proje""" +496 54 loss """marginranking""" +496 54 regularizer """no""" +496 54 optimizer """adam""" +496 54 training_loop """owa""" +496 54 negative_sampler """basic""" +496 54 evaluator """rankbased""" +496 55 dataset """wn18rr""" +496 55 model """proje""" +496 55 loss """marginranking""" +496 55 regularizer """no""" +496 55 optimizer """adam""" +496 55 training_loop """owa""" +496 55 negative_sampler """basic""" +496 55 evaluator """rankbased""" +496 56 dataset """wn18rr""" +496 56 model """proje""" +496 56 loss """marginranking""" +496 56 regularizer """no""" +496 56 optimizer """adam""" +496 56 training_loop """owa""" +496 56 negative_sampler """basic""" +496 56 evaluator """rankbased""" +496 57 dataset """wn18rr""" +496 57 model """proje""" +496 57 loss """marginranking""" +496 57 regularizer """no""" +496 57 optimizer """adam""" +496 57 training_loop """owa""" +496 57 negative_sampler """basic""" +496 57 evaluator """rankbased""" +496 58 dataset """wn18rr""" +496 58 model """proje""" +496 58 loss """marginranking""" +496 58 regularizer """no""" +496 58 optimizer """adam""" +496 58 training_loop """owa""" +496 58 negative_sampler """basic""" +496 58 evaluator """rankbased""" +496 59 dataset """wn18rr""" +496 59 model """proje""" +496 59 loss """marginranking""" +496 59 regularizer """no""" +496 59 optimizer """adam""" +496 59 training_loop """owa""" +496 59 negative_sampler """basic""" +496 59 evaluator """rankbased""" +496 60 dataset """wn18rr""" +496 60 model """proje""" +496 60 loss """marginranking""" +496 60 regularizer """no""" +496 60 optimizer """adam""" +496 60 training_loop """owa""" +496 60 negative_sampler """basic""" +496 60 evaluator """rankbased""" +496 61 dataset """wn18rr""" +496 61 model """proje""" +496 61 loss """marginranking""" +496 61 regularizer """no""" +496 61 optimizer """adam""" +496 61 training_loop """owa""" +496 61 negative_sampler """basic""" +496 61 evaluator """rankbased""" +496 62 dataset """wn18rr""" +496 62 model """proje""" +496 62 loss """marginranking""" +496 62 regularizer """no""" +496 62 optimizer """adam""" +496 62 training_loop """owa""" +496 62 negative_sampler """basic""" +496 62 evaluator """rankbased""" +496 63 dataset """wn18rr""" +496 63 model """proje""" +496 63 loss """marginranking""" +496 63 regularizer """no""" +496 63 optimizer """adam""" +496 63 training_loop """owa""" +496 63 negative_sampler """basic""" +496 63 evaluator """rankbased""" +496 64 dataset """wn18rr""" +496 64 model """proje""" +496 64 loss """marginranking""" +496 64 regularizer """no""" +496 64 optimizer """adam""" +496 64 training_loop """owa""" +496 64 negative_sampler """basic""" +496 64 evaluator """rankbased""" +496 65 dataset """wn18rr""" +496 65 model """proje""" +496 65 loss """marginranking""" +496 65 regularizer """no""" +496 65 optimizer """adam""" +496 65 training_loop """owa""" +496 65 negative_sampler """basic""" +496 65 evaluator """rankbased""" +496 66 dataset """wn18rr""" +496 66 model """proje""" +496 66 loss """marginranking""" +496 66 regularizer """no""" +496 66 optimizer """adam""" +496 66 training_loop """owa""" +496 66 negative_sampler """basic""" +496 66 evaluator """rankbased""" +496 67 dataset """wn18rr""" +496 67 model """proje""" +496 67 loss """marginranking""" +496 67 regularizer """no""" +496 67 optimizer """adam""" +496 67 training_loop """owa""" +496 67 negative_sampler """basic""" +496 67 evaluator """rankbased""" +496 68 dataset """wn18rr""" +496 68 model """proje""" +496 68 loss """marginranking""" +496 68 regularizer """no""" +496 68 optimizer """adam""" +496 68 training_loop """owa""" +496 68 negative_sampler """basic""" +496 68 evaluator """rankbased""" +496 69 dataset """wn18rr""" +496 69 model """proje""" +496 69 loss """marginranking""" +496 69 regularizer """no""" +496 69 optimizer """adam""" +496 69 training_loop """owa""" +496 69 negative_sampler """basic""" +496 69 evaluator """rankbased""" +496 70 dataset """wn18rr""" +496 70 model """proje""" +496 70 loss """marginranking""" +496 70 regularizer """no""" +496 70 optimizer """adam""" +496 70 training_loop """owa""" +496 70 negative_sampler """basic""" +496 70 evaluator """rankbased""" +497 1 model.embedding_dim 0.0 +497 1 optimizer.lr 0.059059692935511936 +497 1 training.batch_size 0.0 +497 1 training.label_smoothing 0.19605065317211146 +497 2 model.embedding_dim 1.0 +497 2 optimizer.lr 0.018573447268142872 +497 2 training.batch_size 2.0 +497 2 training.label_smoothing 0.013307409089004404 +497 3 model.embedding_dim 2.0 +497 3 optimizer.lr 0.002892563808886985 +497 3 training.batch_size 2.0 +497 3 training.label_smoothing 0.02905150707857636 +497 4 model.embedding_dim 2.0 +497 4 optimizer.lr 0.027399531648668336 +497 4 training.batch_size 2.0 +497 4 training.label_smoothing 0.001904412525373033 +497 5 model.embedding_dim 2.0 +497 5 optimizer.lr 0.008216894527334024 +497 5 training.batch_size 2.0 +497 5 training.label_smoothing 0.029638458264529387 +497 6 model.embedding_dim 1.0 +497 6 optimizer.lr 0.0800519199074386 +497 6 training.batch_size 2.0 +497 6 training.label_smoothing 0.05093919580911456 +497 7 model.embedding_dim 1.0 +497 7 optimizer.lr 0.0010487358509621856 +497 7 training.batch_size 1.0 +497 7 training.label_smoothing 0.9277749228571124 +497 8 model.embedding_dim 1.0 +497 8 optimizer.lr 0.01735938129244268 +497 8 training.batch_size 2.0 +497 8 training.label_smoothing 0.1076708230403858 +497 9 model.embedding_dim 0.0 +497 9 optimizer.lr 0.06411431907301375 +497 9 training.batch_size 2.0 +497 9 training.label_smoothing 0.07066913438230701 +497 10 model.embedding_dim 1.0 +497 10 optimizer.lr 0.008082194956955826 +497 10 training.batch_size 2.0 +497 10 training.label_smoothing 0.0015476120861582993 +497 11 model.embedding_dim 0.0 +497 11 optimizer.lr 0.0014678498083584527 +497 11 training.batch_size 2.0 +497 11 training.label_smoothing 0.011658080996530906 +497 1 dataset """wn18rr""" +497 1 model """proje""" +497 1 loss """bceaftersigmoid""" +497 1 regularizer """no""" +497 1 optimizer """adam""" +497 1 training_loop """lcwa""" +497 1 evaluator """rankbased""" +497 2 dataset """wn18rr""" +497 2 model """proje""" +497 2 loss """bceaftersigmoid""" +497 2 regularizer """no""" +497 2 optimizer """adam""" +497 2 training_loop """lcwa""" +497 2 evaluator """rankbased""" +497 3 dataset """wn18rr""" +497 3 model """proje""" +497 3 loss """bceaftersigmoid""" +497 3 regularizer """no""" +497 3 optimizer """adam""" +497 3 training_loop """lcwa""" +497 3 evaluator """rankbased""" +497 4 dataset """wn18rr""" +497 4 model """proje""" +497 4 loss """bceaftersigmoid""" +497 4 regularizer """no""" +497 4 optimizer """adam""" +497 4 training_loop """lcwa""" +497 4 evaluator """rankbased""" +497 5 dataset """wn18rr""" +497 5 model """proje""" +497 5 loss """bceaftersigmoid""" +497 5 regularizer """no""" +497 5 optimizer """adam""" +497 5 training_loop """lcwa""" +497 5 evaluator """rankbased""" +497 6 dataset """wn18rr""" +497 6 model """proje""" +497 6 loss """bceaftersigmoid""" +497 6 regularizer """no""" +497 6 optimizer """adam""" +497 6 training_loop """lcwa""" +497 6 evaluator """rankbased""" +497 7 dataset """wn18rr""" +497 7 model """proje""" +497 7 loss """bceaftersigmoid""" +497 7 regularizer """no""" +497 7 optimizer """adam""" +497 7 training_loop """lcwa""" +497 7 evaluator """rankbased""" +497 8 dataset """wn18rr""" +497 8 model """proje""" +497 8 loss """bceaftersigmoid""" +497 8 regularizer """no""" +497 8 optimizer """adam""" +497 8 training_loop """lcwa""" +497 8 evaluator """rankbased""" +497 9 dataset """wn18rr""" +497 9 model """proje""" +497 9 loss """bceaftersigmoid""" +497 9 regularizer """no""" +497 9 optimizer """adam""" +497 9 training_loop """lcwa""" +497 9 evaluator """rankbased""" +497 10 dataset """wn18rr""" +497 10 model """proje""" +497 10 loss """bceaftersigmoid""" +497 10 regularizer """no""" +497 10 optimizer """adam""" +497 10 training_loop """lcwa""" +497 10 evaluator """rankbased""" +497 11 dataset """wn18rr""" +497 11 model """proje""" +497 11 loss """bceaftersigmoid""" +497 11 regularizer """no""" +497 11 optimizer """adam""" +497 11 training_loop """lcwa""" +497 11 evaluator """rankbased""" +498 1 model.embedding_dim 1.0 +498 1 optimizer.lr 0.024006064074472085 +498 1 training.batch_size 2.0 +498 1 training.label_smoothing 0.02416118910318939 +498 2 model.embedding_dim 0.0 +498 2 optimizer.lr 0.0041618072455109805 +498 2 training.batch_size 1.0 +498 2 training.label_smoothing 0.03637100563252474 +498 3 model.embedding_dim 0.0 +498 3 optimizer.lr 0.007403962338210308 +498 3 training.batch_size 0.0 +498 3 training.label_smoothing 0.1129252158509464 +498 4 model.embedding_dim 0.0 +498 4 optimizer.lr 0.016485280190706074 +498 4 training.batch_size 0.0 +498 4 training.label_smoothing 0.014692529913415292 +498 5 model.embedding_dim 1.0 +498 5 optimizer.lr 0.006621432621319247 +498 5 training.batch_size 1.0 +498 5 training.label_smoothing 0.33955112546636224 +498 6 model.embedding_dim 2.0 +498 6 optimizer.lr 0.0014843777934230695 +498 6 training.batch_size 2.0 +498 6 training.label_smoothing 0.8024945375006437 +498 7 model.embedding_dim 1.0 +498 7 optimizer.lr 0.09012487662313332 +498 7 training.batch_size 2.0 +498 7 training.label_smoothing 0.09814863961610736 +498 8 model.embedding_dim 0.0 +498 8 optimizer.lr 0.06523803027558715 +498 8 training.batch_size 1.0 +498 8 training.label_smoothing 0.6232107517964302 +498 9 model.embedding_dim 0.0 +498 9 optimizer.lr 0.002585807378706368 +498 9 training.batch_size 0.0 +498 9 training.label_smoothing 0.16593285101863212 +498 10 model.embedding_dim 1.0 +498 10 optimizer.lr 0.02226623901522321 +498 10 training.batch_size 2.0 +498 10 training.label_smoothing 0.026419471155162227 +498 11 model.embedding_dim 0.0 +498 11 optimizer.lr 0.01319980618202647 +498 11 training.batch_size 2.0 +498 11 training.label_smoothing 0.38101115956894327 +498 12 model.embedding_dim 1.0 +498 12 optimizer.lr 0.042983561212263294 +498 12 training.batch_size 0.0 +498 12 training.label_smoothing 0.09992701653442247 +498 13 model.embedding_dim 2.0 +498 13 optimizer.lr 0.018575584910669945 +498 13 training.batch_size 0.0 +498 13 training.label_smoothing 0.023959876385928773 +498 1 dataset """wn18rr""" +498 1 model """proje""" +498 1 loss """softplus""" +498 1 regularizer """no""" +498 1 optimizer """adam""" +498 1 training_loop """lcwa""" +498 1 evaluator """rankbased""" +498 2 dataset """wn18rr""" +498 2 model """proje""" +498 2 loss """softplus""" +498 2 regularizer """no""" +498 2 optimizer """adam""" +498 2 training_loop """lcwa""" +498 2 evaluator """rankbased""" +498 3 dataset """wn18rr""" +498 3 model """proje""" +498 3 loss """softplus""" +498 3 regularizer """no""" +498 3 optimizer """adam""" +498 3 training_loop """lcwa""" +498 3 evaluator """rankbased""" +498 4 dataset """wn18rr""" +498 4 model """proje""" +498 4 loss """softplus""" +498 4 regularizer """no""" +498 4 optimizer """adam""" +498 4 training_loop """lcwa""" +498 4 evaluator """rankbased""" +498 5 dataset """wn18rr""" +498 5 model """proje""" +498 5 loss """softplus""" +498 5 regularizer """no""" +498 5 optimizer """adam""" +498 5 training_loop """lcwa""" +498 5 evaluator """rankbased""" +498 6 dataset """wn18rr""" +498 6 model """proje""" +498 6 loss """softplus""" +498 6 regularizer """no""" +498 6 optimizer """adam""" +498 6 training_loop """lcwa""" +498 6 evaluator """rankbased""" +498 7 dataset """wn18rr""" +498 7 model """proje""" +498 7 loss """softplus""" +498 7 regularizer """no""" +498 7 optimizer """adam""" +498 7 training_loop """lcwa""" +498 7 evaluator """rankbased""" +498 8 dataset """wn18rr""" +498 8 model """proje""" +498 8 loss """softplus""" +498 8 regularizer """no""" +498 8 optimizer """adam""" +498 8 training_loop """lcwa""" +498 8 evaluator """rankbased""" +498 9 dataset """wn18rr""" +498 9 model """proje""" +498 9 loss """softplus""" +498 9 regularizer """no""" +498 9 optimizer """adam""" +498 9 training_loop """lcwa""" +498 9 evaluator """rankbased""" +498 10 dataset """wn18rr""" +498 10 model """proje""" +498 10 loss """softplus""" +498 10 regularizer """no""" +498 10 optimizer """adam""" +498 10 training_loop """lcwa""" +498 10 evaluator """rankbased""" +498 11 dataset """wn18rr""" +498 11 model """proje""" +498 11 loss """softplus""" +498 11 regularizer """no""" +498 11 optimizer """adam""" +498 11 training_loop """lcwa""" +498 11 evaluator """rankbased""" +498 12 dataset """wn18rr""" +498 12 model """proje""" +498 12 loss """softplus""" +498 12 regularizer """no""" +498 12 optimizer """adam""" +498 12 training_loop """lcwa""" +498 12 evaluator """rankbased""" +498 13 dataset """wn18rr""" +498 13 model """proje""" +498 13 loss """softplus""" +498 13 regularizer """no""" +498 13 optimizer """adam""" +498 13 training_loop """lcwa""" +498 13 evaluator """rankbased""" +499 1 model.embedding_dim 0.0 +499 1 optimizer.lr 0.02017738214527381 +499 1 training.batch_size 2.0 +499 1 training.label_smoothing 0.0016939354474949024 +499 2 model.embedding_dim 2.0 +499 2 optimizer.lr 0.024638795298680717 +499 2 training.batch_size 2.0 +499 2 training.label_smoothing 0.13414996255689032 +499 3 model.embedding_dim 0.0 +499 3 optimizer.lr 0.007376424411134554 +499 3 training.batch_size 1.0 +499 3 training.label_smoothing 0.02500509306571593 +499 4 model.embedding_dim 2.0 +499 4 optimizer.lr 0.0011261516756040986 +499 4 training.batch_size 2.0 +499 4 training.label_smoothing 0.001064662837042075 +499 5 model.embedding_dim 1.0 +499 5 optimizer.lr 0.06622059207951683 +499 5 training.batch_size 1.0 +499 5 training.label_smoothing 0.02771139986755482 +499 6 model.embedding_dim 0.0 +499 6 optimizer.lr 0.001189777395120071 +499 6 training.batch_size 2.0 +499 6 training.label_smoothing 0.003182848130394069 +499 7 model.embedding_dim 0.0 +499 7 optimizer.lr 0.006358598526626673 +499 7 training.batch_size 1.0 +499 7 training.label_smoothing 0.00570919470895258 +499 8 model.embedding_dim 2.0 +499 8 optimizer.lr 0.002259205247304951 +499 8 training.batch_size 0.0 +499 8 training.label_smoothing 0.8408757214292399 +499 9 model.embedding_dim 1.0 +499 9 optimizer.lr 0.0023837335416539247 +499 9 training.batch_size 2.0 +499 9 training.label_smoothing 0.0190822935296604 +499 10 model.embedding_dim 2.0 +499 10 optimizer.lr 0.0021828716030196996 +499 10 training.batch_size 2.0 +499 10 training.label_smoothing 0.05232633114073844 +499 11 model.embedding_dim 1.0 +499 11 optimizer.lr 0.005851642148692733 +499 11 training.batch_size 1.0 +499 11 training.label_smoothing 0.19388870545253792 +499 12 model.embedding_dim 1.0 +499 12 optimizer.lr 0.07514987363608584 +499 12 training.batch_size 2.0 +499 12 training.label_smoothing 0.015014868728513191 +499 13 model.embedding_dim 2.0 +499 13 optimizer.lr 0.013399761396956257 +499 13 training.batch_size 1.0 +499 13 training.label_smoothing 0.005458735477783536 +499 14 model.embedding_dim 1.0 +499 14 optimizer.lr 0.001795084462999239 +499 14 training.batch_size 1.0 +499 14 training.label_smoothing 0.0010122366922804445 +499 15 model.embedding_dim 2.0 +499 15 optimizer.lr 0.003414446414982168 +499 15 training.batch_size 2.0 +499 15 training.label_smoothing 0.010859032058762166 +499 16 model.embedding_dim 0.0 +499 16 optimizer.lr 0.0011569794134374384 +499 16 training.batch_size 0.0 +499 16 training.label_smoothing 0.3038043538072891 +499 1 dataset """wn18rr""" +499 1 model """proje""" +499 1 loss """bceaftersigmoid""" +499 1 regularizer """no""" +499 1 optimizer """adam""" +499 1 training_loop """lcwa""" +499 1 evaluator """rankbased""" +499 2 dataset """wn18rr""" +499 2 model """proje""" +499 2 loss """bceaftersigmoid""" +499 2 regularizer """no""" +499 2 optimizer """adam""" +499 2 training_loop """lcwa""" +499 2 evaluator """rankbased""" +499 3 dataset """wn18rr""" +499 3 model """proje""" +499 3 loss """bceaftersigmoid""" +499 3 regularizer """no""" +499 3 optimizer """adam""" +499 3 training_loop """lcwa""" +499 3 evaluator """rankbased""" +499 4 dataset """wn18rr""" +499 4 model """proje""" +499 4 loss """bceaftersigmoid""" +499 4 regularizer """no""" +499 4 optimizer """adam""" +499 4 training_loop """lcwa""" +499 4 evaluator """rankbased""" +499 5 dataset """wn18rr""" +499 5 model """proje""" +499 5 loss """bceaftersigmoid""" +499 5 regularizer """no""" +499 5 optimizer """adam""" +499 5 training_loop """lcwa""" +499 5 evaluator """rankbased""" +499 6 dataset """wn18rr""" +499 6 model """proje""" +499 6 loss """bceaftersigmoid""" +499 6 regularizer """no""" +499 6 optimizer """adam""" +499 6 training_loop """lcwa""" +499 6 evaluator """rankbased""" +499 7 dataset """wn18rr""" +499 7 model """proje""" +499 7 loss """bceaftersigmoid""" +499 7 regularizer """no""" +499 7 optimizer """adam""" +499 7 training_loop """lcwa""" +499 7 evaluator """rankbased""" +499 8 dataset """wn18rr""" +499 8 model """proje""" +499 8 loss """bceaftersigmoid""" +499 8 regularizer """no""" +499 8 optimizer """adam""" +499 8 training_loop """lcwa""" +499 8 evaluator """rankbased""" +499 9 dataset """wn18rr""" +499 9 model """proje""" +499 9 loss """bceaftersigmoid""" +499 9 regularizer """no""" +499 9 optimizer """adam""" +499 9 training_loop """lcwa""" +499 9 evaluator """rankbased""" +499 10 dataset """wn18rr""" +499 10 model """proje""" +499 10 loss """bceaftersigmoid""" +499 10 regularizer """no""" +499 10 optimizer """adam""" +499 10 training_loop """lcwa""" +499 10 evaluator """rankbased""" +499 11 dataset """wn18rr""" +499 11 model """proje""" +499 11 loss """bceaftersigmoid""" +499 11 regularizer """no""" +499 11 optimizer """adam""" +499 11 training_loop """lcwa""" +499 11 evaluator """rankbased""" +499 12 dataset """wn18rr""" +499 12 model """proje""" +499 12 loss """bceaftersigmoid""" +499 12 regularizer """no""" +499 12 optimizer """adam""" +499 12 training_loop """lcwa""" +499 12 evaluator """rankbased""" +499 13 dataset """wn18rr""" +499 13 model """proje""" +499 13 loss """bceaftersigmoid""" +499 13 regularizer """no""" +499 13 optimizer """adam""" +499 13 training_loop """lcwa""" +499 13 evaluator """rankbased""" +499 14 dataset """wn18rr""" +499 14 model """proje""" +499 14 loss """bceaftersigmoid""" +499 14 regularizer """no""" +499 14 optimizer """adam""" +499 14 training_loop """lcwa""" +499 14 evaluator """rankbased""" +499 15 dataset """wn18rr""" +499 15 model """proje""" +499 15 loss """bceaftersigmoid""" +499 15 regularizer """no""" +499 15 optimizer """adam""" +499 15 training_loop """lcwa""" +499 15 evaluator """rankbased""" +499 16 dataset """wn18rr""" +499 16 model """proje""" +499 16 loss """bceaftersigmoid""" +499 16 regularizer """no""" +499 16 optimizer """adam""" +499 16 training_loop """lcwa""" +499 16 evaluator """rankbased""" +500 1 model.embedding_dim 1.0 +500 1 optimizer.lr 0.016666192761763097 +500 1 training.batch_size 0.0 +500 1 training.label_smoothing 0.3977526298231557 +500 2 model.embedding_dim 1.0 +500 2 optimizer.lr 0.042748090077299496 +500 2 training.batch_size 1.0 +500 2 training.label_smoothing 0.012560224171621916 +500 3 model.embedding_dim 0.0 +500 3 optimizer.lr 0.008542661374569601 +500 3 training.batch_size 1.0 +500 3 training.label_smoothing 0.0035305938648134603 +500 4 model.embedding_dim 0.0 +500 4 optimizer.lr 0.05656548925263516 +500 4 training.batch_size 1.0 +500 4 training.label_smoothing 0.002533876078827759 +500 5 model.embedding_dim 1.0 +500 5 optimizer.lr 0.016177086371716422 +500 5 training.batch_size 1.0 +500 5 training.label_smoothing 0.628148501312623 +500 6 model.embedding_dim 0.0 +500 6 optimizer.lr 0.0014722879323838403 +500 6 training.batch_size 2.0 +500 6 training.label_smoothing 0.9843616575623083 +500 7 model.embedding_dim 1.0 +500 7 optimizer.lr 0.0030856409605541524 +500 7 training.batch_size 1.0 +500 7 training.label_smoothing 0.34616029021642736 +500 8 model.embedding_dim 1.0 +500 8 optimizer.lr 0.006767598975713448 +500 8 training.batch_size 2.0 +500 8 training.label_smoothing 0.5908798234231052 +500 9 model.embedding_dim 1.0 +500 9 optimizer.lr 0.014719438008246155 +500 9 training.batch_size 2.0 +500 9 training.label_smoothing 0.03320101671625737 +500 10 model.embedding_dim 2.0 +500 10 optimizer.lr 0.07041941656784177 +500 10 training.batch_size 1.0 +500 10 training.label_smoothing 0.009071406755604085 +500 11 model.embedding_dim 2.0 +500 11 optimizer.lr 0.018066966580678596 +500 11 training.batch_size 0.0 +500 11 training.label_smoothing 0.004276860148357339 +500 12 model.embedding_dim 1.0 +500 12 optimizer.lr 0.024485503612748396 +500 12 training.batch_size 2.0 +500 12 training.label_smoothing 0.008214671715195136 +500 13 model.embedding_dim 2.0 +500 13 optimizer.lr 0.0025158990741100578 +500 13 training.batch_size 1.0 +500 13 training.label_smoothing 0.7263333107067722 +500 14 model.embedding_dim 0.0 +500 14 optimizer.lr 0.00401478492718683 +500 14 training.batch_size 0.0 +500 14 training.label_smoothing 0.044356192304844624 +500 15 model.embedding_dim 2.0 +500 15 optimizer.lr 0.029195032937987683 +500 15 training.batch_size 0.0 +500 15 training.label_smoothing 0.05942920510768243 +500 16 model.embedding_dim 0.0 +500 16 optimizer.lr 0.0013298194950443485 +500 16 training.batch_size 0.0 +500 16 training.label_smoothing 0.001458564571734761 +500 17 model.embedding_dim 0.0 +500 17 optimizer.lr 0.03903537007005075 +500 17 training.batch_size 2.0 +500 17 training.label_smoothing 0.08333481576586649 +500 18 model.embedding_dim 1.0 +500 18 optimizer.lr 0.05314353919919903 +500 18 training.batch_size 2.0 +500 18 training.label_smoothing 0.14323951203624077 +500 19 model.embedding_dim 2.0 +500 19 optimizer.lr 0.0028755056844820335 +500 19 training.batch_size 2.0 +500 19 training.label_smoothing 0.014818811587857458 +500 20 model.embedding_dim 2.0 +500 20 optimizer.lr 0.0265872028806474 +500 20 training.batch_size 1.0 +500 20 training.label_smoothing 0.02649039977938421 +500 21 model.embedding_dim 2.0 +500 21 optimizer.lr 0.009987730268788204 +500 21 training.batch_size 2.0 +500 21 training.label_smoothing 0.0014638434558417914 +500 22 model.embedding_dim 0.0 +500 22 optimizer.lr 0.0028241152014370184 +500 22 training.batch_size 0.0 +500 22 training.label_smoothing 0.021679329182350434 +500 23 model.embedding_dim 2.0 +500 23 optimizer.lr 0.0033012173607913737 +500 23 training.batch_size 2.0 +500 23 training.label_smoothing 0.7838455513243779 +500 24 model.embedding_dim 2.0 +500 24 optimizer.lr 0.0030358110596460926 +500 24 training.batch_size 0.0 +500 24 training.label_smoothing 0.0012383748320706767 +500 1 dataset """wn18rr""" +500 1 model """proje""" +500 1 loss """softplus""" +500 1 regularizer """no""" +500 1 optimizer """adam""" +500 1 training_loop """lcwa""" +500 1 evaluator """rankbased""" +500 2 dataset """wn18rr""" +500 2 model """proje""" +500 2 loss """softplus""" +500 2 regularizer """no""" +500 2 optimizer """adam""" +500 2 training_loop """lcwa""" +500 2 evaluator """rankbased""" +500 3 dataset """wn18rr""" +500 3 model """proje""" +500 3 loss """softplus""" +500 3 regularizer """no""" +500 3 optimizer """adam""" +500 3 training_loop """lcwa""" +500 3 evaluator """rankbased""" +500 4 dataset """wn18rr""" +500 4 model """proje""" +500 4 loss """softplus""" +500 4 regularizer """no""" +500 4 optimizer """adam""" +500 4 training_loop """lcwa""" +500 4 evaluator """rankbased""" +500 5 dataset """wn18rr""" +500 5 model """proje""" +500 5 loss """softplus""" +500 5 regularizer """no""" +500 5 optimizer """adam""" +500 5 training_loop """lcwa""" +500 5 evaluator """rankbased""" +500 6 dataset """wn18rr""" +500 6 model """proje""" +500 6 loss """softplus""" +500 6 regularizer """no""" +500 6 optimizer """adam""" +500 6 training_loop """lcwa""" +500 6 evaluator """rankbased""" +500 7 dataset """wn18rr""" +500 7 model """proje""" +500 7 loss """softplus""" +500 7 regularizer """no""" +500 7 optimizer """adam""" +500 7 training_loop """lcwa""" +500 7 evaluator """rankbased""" +500 8 dataset """wn18rr""" +500 8 model """proje""" +500 8 loss """softplus""" +500 8 regularizer """no""" +500 8 optimizer """adam""" +500 8 training_loop """lcwa""" +500 8 evaluator """rankbased""" +500 9 dataset """wn18rr""" +500 9 model """proje""" +500 9 loss """softplus""" +500 9 regularizer """no""" +500 9 optimizer """adam""" +500 9 training_loop """lcwa""" +500 9 evaluator """rankbased""" +500 10 dataset """wn18rr""" +500 10 model """proje""" +500 10 loss """softplus""" +500 10 regularizer """no""" +500 10 optimizer """adam""" +500 10 training_loop """lcwa""" +500 10 evaluator """rankbased""" +500 11 dataset """wn18rr""" +500 11 model """proje""" +500 11 loss """softplus""" +500 11 regularizer """no""" +500 11 optimizer """adam""" +500 11 training_loop """lcwa""" +500 11 evaluator """rankbased""" +500 12 dataset """wn18rr""" +500 12 model """proje""" +500 12 loss """softplus""" +500 12 regularizer """no""" +500 12 optimizer """adam""" +500 12 training_loop """lcwa""" +500 12 evaluator """rankbased""" +500 13 dataset """wn18rr""" +500 13 model """proje""" +500 13 loss """softplus""" +500 13 regularizer """no""" +500 13 optimizer """adam""" +500 13 training_loop """lcwa""" +500 13 evaluator """rankbased""" +500 14 dataset """wn18rr""" +500 14 model """proje""" +500 14 loss """softplus""" +500 14 regularizer """no""" +500 14 optimizer """adam""" +500 14 training_loop """lcwa""" +500 14 evaluator """rankbased""" +500 15 dataset """wn18rr""" +500 15 model """proje""" +500 15 loss """softplus""" +500 15 regularizer """no""" +500 15 optimizer """adam""" +500 15 training_loop """lcwa""" +500 15 evaluator """rankbased""" +500 16 dataset """wn18rr""" +500 16 model """proje""" +500 16 loss """softplus""" +500 16 regularizer """no""" +500 16 optimizer """adam""" +500 16 training_loop """lcwa""" +500 16 evaluator """rankbased""" +500 17 dataset """wn18rr""" +500 17 model """proje""" +500 17 loss """softplus""" +500 17 regularizer """no""" +500 17 optimizer """adam""" +500 17 training_loop """lcwa""" +500 17 evaluator """rankbased""" +500 18 dataset """wn18rr""" +500 18 model """proje""" +500 18 loss """softplus""" +500 18 regularizer """no""" +500 18 optimizer """adam""" +500 18 training_loop """lcwa""" +500 18 evaluator """rankbased""" +500 19 dataset """wn18rr""" +500 19 model """proje""" +500 19 loss """softplus""" +500 19 regularizer """no""" +500 19 optimizer """adam""" +500 19 training_loop """lcwa""" +500 19 evaluator """rankbased""" +500 20 dataset """wn18rr""" +500 20 model """proje""" +500 20 loss """softplus""" +500 20 regularizer """no""" +500 20 optimizer """adam""" +500 20 training_loop """lcwa""" +500 20 evaluator """rankbased""" +500 21 dataset """wn18rr""" +500 21 model """proje""" +500 21 loss """softplus""" +500 21 regularizer """no""" +500 21 optimizer """adam""" +500 21 training_loop """lcwa""" +500 21 evaluator """rankbased""" +500 22 dataset """wn18rr""" +500 22 model """proje""" +500 22 loss """softplus""" +500 22 regularizer """no""" +500 22 optimizer """adam""" +500 22 training_loop """lcwa""" +500 22 evaluator """rankbased""" +500 23 dataset """wn18rr""" +500 23 model """proje""" +500 23 loss """softplus""" +500 23 regularizer """no""" +500 23 optimizer """adam""" +500 23 training_loop """lcwa""" +500 23 evaluator """rankbased""" +500 24 dataset """wn18rr""" +500 24 model """proje""" +500 24 loss """softplus""" +500 24 regularizer """no""" +500 24 optimizer """adam""" +500 24 training_loop """lcwa""" +500 24 evaluator """rankbased""" +501 1 model.embedding_dim 1.0 +501 1 optimizer.lr 0.0014797950334083149 +501 1 negative_sampler.num_negs_per_pos 13.0 +501 1 training.batch_size 2.0 +501 2 model.embedding_dim 2.0 +501 2 optimizer.lr 0.004487031185973273 +501 2 negative_sampler.num_negs_per_pos 99.0 +501 2 training.batch_size 0.0 +501 3 model.embedding_dim 0.0 +501 3 optimizer.lr 0.006100232735940625 +501 3 negative_sampler.num_negs_per_pos 75.0 +501 3 training.batch_size 0.0 +501 4 model.embedding_dim 2.0 +501 4 optimizer.lr 0.001523459543346777 +501 4 negative_sampler.num_negs_per_pos 2.0 +501 4 training.batch_size 0.0 +501 5 model.embedding_dim 1.0 +501 5 optimizer.lr 0.0018540456316424082 +501 5 negative_sampler.num_negs_per_pos 49.0 +501 5 training.batch_size 1.0 +501 6 model.embedding_dim 1.0 +501 6 optimizer.lr 0.002682889006208557 +501 6 negative_sampler.num_negs_per_pos 91.0 +501 6 training.batch_size 1.0 +501 7 model.embedding_dim 2.0 +501 7 optimizer.lr 0.011846520119187128 +501 7 negative_sampler.num_negs_per_pos 13.0 +501 7 training.batch_size 0.0 +501 8 model.embedding_dim 0.0 +501 8 optimizer.lr 0.007611913450046238 +501 8 negative_sampler.num_negs_per_pos 87.0 +501 8 training.batch_size 1.0 +501 9 model.embedding_dim 2.0 +501 9 optimizer.lr 0.003365890844914689 +501 9 negative_sampler.num_negs_per_pos 55.0 +501 9 training.batch_size 1.0 +501 10 model.embedding_dim 2.0 +501 10 optimizer.lr 0.0058295893764256385 +501 10 negative_sampler.num_negs_per_pos 82.0 +501 10 training.batch_size 1.0 +501 11 model.embedding_dim 2.0 +501 11 optimizer.lr 0.0020355318446924186 +501 11 negative_sampler.num_negs_per_pos 56.0 +501 11 training.batch_size 0.0 +501 12 model.embedding_dim 2.0 +501 12 optimizer.lr 0.022997043854674194 +501 12 negative_sampler.num_negs_per_pos 79.0 +501 12 training.batch_size 0.0 +501 13 model.embedding_dim 1.0 +501 13 optimizer.lr 0.09034370810993946 +501 13 negative_sampler.num_negs_per_pos 83.0 +501 13 training.batch_size 1.0 +501 14 model.embedding_dim 0.0 +501 14 optimizer.lr 0.042886993660324856 +501 14 negative_sampler.num_negs_per_pos 57.0 +501 14 training.batch_size 0.0 +501 15 model.embedding_dim 2.0 +501 15 optimizer.lr 0.001957931424079789 +501 15 negative_sampler.num_negs_per_pos 67.0 +501 15 training.batch_size 1.0 +501 16 model.embedding_dim 2.0 +501 16 optimizer.lr 0.013047036547084305 +501 16 negative_sampler.num_negs_per_pos 44.0 +501 16 training.batch_size 2.0 +501 17 model.embedding_dim 2.0 +501 17 optimizer.lr 0.004778484737692416 +501 17 negative_sampler.num_negs_per_pos 85.0 +501 17 training.batch_size 0.0 +501 18 model.embedding_dim 2.0 +501 18 optimizer.lr 0.04366007760604547 +501 18 negative_sampler.num_negs_per_pos 95.0 +501 18 training.batch_size 2.0 +501 19 model.embedding_dim 1.0 +501 19 optimizer.lr 0.0029642044747585964 +501 19 negative_sampler.num_negs_per_pos 67.0 +501 19 training.batch_size 0.0 +501 20 model.embedding_dim 1.0 +501 20 optimizer.lr 0.028806667465325543 +501 20 negative_sampler.num_negs_per_pos 10.0 +501 20 training.batch_size 0.0 +501 21 model.embedding_dim 0.0 +501 21 optimizer.lr 0.004029451938648359 +501 21 negative_sampler.num_negs_per_pos 30.0 +501 21 training.batch_size 2.0 +501 22 model.embedding_dim 2.0 +501 22 optimizer.lr 0.0018604814798113785 +501 22 negative_sampler.num_negs_per_pos 27.0 +501 22 training.batch_size 1.0 +501 23 model.embedding_dim 1.0 +501 23 optimizer.lr 0.08556455983518886 +501 23 negative_sampler.num_negs_per_pos 20.0 +501 23 training.batch_size 1.0 +501 24 model.embedding_dim 1.0 +501 24 optimizer.lr 0.0018293048538384133 +501 24 negative_sampler.num_negs_per_pos 75.0 +501 24 training.batch_size 1.0 +501 25 model.embedding_dim 1.0 +501 25 optimizer.lr 0.09252911415678022 +501 25 negative_sampler.num_negs_per_pos 66.0 +501 25 training.batch_size 1.0 +501 26 model.embedding_dim 2.0 +501 26 optimizer.lr 0.05590671517597378 +501 26 negative_sampler.num_negs_per_pos 17.0 +501 26 training.batch_size 1.0 +501 27 model.embedding_dim 2.0 +501 27 optimizer.lr 0.0011754821340898257 +501 27 negative_sampler.num_negs_per_pos 14.0 +501 27 training.batch_size 1.0 +501 28 model.embedding_dim 2.0 +501 28 optimizer.lr 0.01886988121902845 +501 28 negative_sampler.num_negs_per_pos 19.0 +501 28 training.batch_size 1.0 +501 29 model.embedding_dim 2.0 +501 29 optimizer.lr 0.08214730645292725 +501 29 negative_sampler.num_negs_per_pos 59.0 +501 29 training.batch_size 0.0 +501 30 model.embedding_dim 0.0 +501 30 optimizer.lr 0.028402161407410198 +501 30 negative_sampler.num_negs_per_pos 63.0 +501 30 training.batch_size 2.0 +501 31 model.embedding_dim 2.0 +501 31 optimizer.lr 0.052667479037435495 +501 31 negative_sampler.num_negs_per_pos 81.0 +501 31 training.batch_size 2.0 +501 32 model.embedding_dim 2.0 +501 32 optimizer.lr 0.0066419479840818295 +501 32 negative_sampler.num_negs_per_pos 13.0 +501 32 training.batch_size 1.0 +501 33 model.embedding_dim 0.0 +501 33 optimizer.lr 0.08930183692433244 +501 33 negative_sampler.num_negs_per_pos 48.0 +501 33 training.batch_size 1.0 +501 34 model.embedding_dim 2.0 +501 34 optimizer.lr 0.0037403189351746953 +501 34 negative_sampler.num_negs_per_pos 61.0 +501 34 training.batch_size 2.0 +501 35 model.embedding_dim 2.0 +501 35 optimizer.lr 0.006883798959361775 +501 35 negative_sampler.num_negs_per_pos 53.0 +501 35 training.batch_size 2.0 +501 36 model.embedding_dim 1.0 +501 36 optimizer.lr 0.010603736220784922 +501 36 negative_sampler.num_negs_per_pos 68.0 +501 36 training.batch_size 2.0 +501 37 model.embedding_dim 2.0 +501 37 optimizer.lr 0.028389985175123575 +501 37 negative_sampler.num_negs_per_pos 14.0 +501 37 training.batch_size 0.0 +501 38 model.embedding_dim 1.0 +501 38 optimizer.lr 0.01230209700760984 +501 38 negative_sampler.num_negs_per_pos 18.0 +501 38 training.batch_size 1.0 +501 39 model.embedding_dim 1.0 +501 39 optimizer.lr 0.013643800272511851 +501 39 negative_sampler.num_negs_per_pos 99.0 +501 39 training.batch_size 1.0 +501 40 model.embedding_dim 0.0 +501 40 optimizer.lr 0.0060139643673108424 +501 40 negative_sampler.num_negs_per_pos 29.0 +501 40 training.batch_size 2.0 +501 41 model.embedding_dim 0.0 +501 41 optimizer.lr 0.0555614553697163 +501 41 negative_sampler.num_negs_per_pos 69.0 +501 41 training.batch_size 2.0 +501 42 model.embedding_dim 0.0 +501 42 optimizer.lr 0.0740905109529569 +501 42 negative_sampler.num_negs_per_pos 46.0 +501 42 training.batch_size 1.0 +501 43 model.embedding_dim 2.0 +501 43 optimizer.lr 0.02415296095361537 +501 43 negative_sampler.num_negs_per_pos 14.0 +501 43 training.batch_size 0.0 +501 44 model.embedding_dim 1.0 +501 44 optimizer.lr 0.04972894914441681 +501 44 negative_sampler.num_negs_per_pos 96.0 +501 44 training.batch_size 0.0 +501 45 model.embedding_dim 1.0 +501 45 optimizer.lr 0.003062146591210975 +501 45 negative_sampler.num_negs_per_pos 87.0 +501 45 training.batch_size 2.0 +501 46 model.embedding_dim 1.0 +501 46 optimizer.lr 0.008942415797763087 +501 46 negative_sampler.num_negs_per_pos 59.0 +501 46 training.batch_size 1.0 +501 47 model.embedding_dim 2.0 +501 47 optimizer.lr 0.008561654850086678 +501 47 negative_sampler.num_negs_per_pos 56.0 +501 47 training.batch_size 0.0 +501 48 model.embedding_dim 0.0 +501 48 optimizer.lr 0.01274486323351774 +501 48 negative_sampler.num_negs_per_pos 61.0 +501 48 training.batch_size 0.0 +501 49 model.embedding_dim 1.0 +501 49 optimizer.lr 0.0037887065646772505 +501 49 negative_sampler.num_negs_per_pos 37.0 +501 49 training.batch_size 1.0 +501 50 model.embedding_dim 2.0 +501 50 optimizer.lr 0.003453688483637904 +501 50 negative_sampler.num_negs_per_pos 99.0 +501 50 training.batch_size 1.0 +501 51 model.embedding_dim 2.0 +501 51 optimizer.lr 0.08368259742389311 +501 51 negative_sampler.num_negs_per_pos 37.0 +501 51 training.batch_size 2.0 +501 52 model.embedding_dim 1.0 +501 52 optimizer.lr 0.007640846028323766 +501 52 negative_sampler.num_negs_per_pos 97.0 +501 52 training.batch_size 1.0 +501 53 model.embedding_dim 0.0 +501 53 optimizer.lr 0.006373698776949877 +501 53 negative_sampler.num_negs_per_pos 80.0 +501 53 training.batch_size 0.0 +501 54 model.embedding_dim 2.0 +501 54 optimizer.lr 0.011943217905433326 +501 54 negative_sampler.num_negs_per_pos 36.0 +501 54 training.batch_size 1.0 +501 55 model.embedding_dim 2.0 +501 55 optimizer.lr 0.041351817014475545 +501 55 negative_sampler.num_negs_per_pos 33.0 +501 55 training.batch_size 1.0 +501 56 model.embedding_dim 0.0 +501 56 optimizer.lr 0.009426705388566005 +501 56 negative_sampler.num_negs_per_pos 17.0 +501 56 training.batch_size 2.0 +501 57 model.embedding_dim 1.0 +501 57 optimizer.lr 0.03973737166598922 +501 57 negative_sampler.num_negs_per_pos 44.0 +501 57 training.batch_size 2.0 +501 58 model.embedding_dim 0.0 +501 58 optimizer.lr 0.004935433180198822 +501 58 negative_sampler.num_negs_per_pos 2.0 +501 58 training.batch_size 0.0 +501 59 model.embedding_dim 1.0 +501 59 optimizer.lr 0.08078369284778371 +501 59 negative_sampler.num_negs_per_pos 59.0 +501 59 training.batch_size 0.0 +501 60 model.embedding_dim 2.0 +501 60 optimizer.lr 0.056807317302730434 +501 60 negative_sampler.num_negs_per_pos 16.0 +501 60 training.batch_size 1.0 +501 61 model.embedding_dim 0.0 +501 61 optimizer.lr 0.00497177077482697 +501 61 negative_sampler.num_negs_per_pos 67.0 +501 61 training.batch_size 1.0 +501 62 model.embedding_dim 2.0 +501 62 optimizer.lr 0.06302091367930802 +501 62 negative_sampler.num_negs_per_pos 55.0 +501 62 training.batch_size 0.0 +501 63 model.embedding_dim 0.0 +501 63 optimizer.lr 0.006899581628016454 +501 63 negative_sampler.num_negs_per_pos 41.0 +501 63 training.batch_size 0.0 +501 64 model.embedding_dim 1.0 +501 64 optimizer.lr 0.00326018565279824 +501 64 negative_sampler.num_negs_per_pos 44.0 +501 64 training.batch_size 2.0 +501 65 model.embedding_dim 1.0 +501 65 optimizer.lr 0.0681471801057132 +501 65 negative_sampler.num_negs_per_pos 68.0 +501 65 training.batch_size 0.0 +501 1 dataset """wn18rr""" +501 1 model """proje""" +501 1 loss """bceaftersigmoid""" +501 1 regularizer """no""" +501 1 optimizer """adam""" +501 1 training_loop """owa""" +501 1 negative_sampler """basic""" +501 1 evaluator """rankbased""" +501 2 dataset """wn18rr""" +501 2 model """proje""" +501 2 loss """bceaftersigmoid""" +501 2 regularizer """no""" +501 2 optimizer """adam""" +501 2 training_loop """owa""" +501 2 negative_sampler """basic""" +501 2 evaluator """rankbased""" +501 3 dataset """wn18rr""" +501 3 model """proje""" +501 3 loss """bceaftersigmoid""" +501 3 regularizer """no""" +501 3 optimizer """adam""" +501 3 training_loop """owa""" +501 3 negative_sampler """basic""" +501 3 evaluator """rankbased""" +501 4 dataset """wn18rr""" +501 4 model """proje""" +501 4 loss """bceaftersigmoid""" +501 4 regularizer """no""" +501 4 optimizer """adam""" +501 4 training_loop """owa""" +501 4 negative_sampler """basic""" +501 4 evaluator """rankbased""" +501 5 dataset """wn18rr""" +501 5 model """proje""" +501 5 loss """bceaftersigmoid""" +501 5 regularizer """no""" +501 5 optimizer """adam""" +501 5 training_loop """owa""" +501 5 negative_sampler """basic""" +501 5 evaluator """rankbased""" +501 6 dataset """wn18rr""" +501 6 model """proje""" +501 6 loss """bceaftersigmoid""" +501 6 regularizer """no""" +501 6 optimizer """adam""" +501 6 training_loop """owa""" +501 6 negative_sampler """basic""" +501 6 evaluator """rankbased""" +501 7 dataset """wn18rr""" +501 7 model """proje""" +501 7 loss """bceaftersigmoid""" +501 7 regularizer """no""" +501 7 optimizer """adam""" +501 7 training_loop """owa""" +501 7 negative_sampler """basic""" +501 7 evaluator """rankbased""" +501 8 dataset """wn18rr""" +501 8 model """proje""" +501 8 loss """bceaftersigmoid""" +501 8 regularizer """no""" +501 8 optimizer """adam""" +501 8 training_loop """owa""" +501 8 negative_sampler """basic""" +501 8 evaluator """rankbased""" +501 9 dataset """wn18rr""" +501 9 model """proje""" +501 9 loss """bceaftersigmoid""" +501 9 regularizer """no""" +501 9 optimizer """adam""" +501 9 training_loop """owa""" +501 9 negative_sampler """basic""" +501 9 evaluator """rankbased""" +501 10 dataset """wn18rr""" +501 10 model """proje""" +501 10 loss """bceaftersigmoid""" +501 10 regularizer """no""" +501 10 optimizer """adam""" +501 10 training_loop """owa""" +501 10 negative_sampler """basic""" +501 10 evaluator """rankbased""" +501 11 dataset """wn18rr""" +501 11 model """proje""" +501 11 loss """bceaftersigmoid""" +501 11 regularizer """no""" +501 11 optimizer """adam""" +501 11 training_loop """owa""" +501 11 negative_sampler """basic""" +501 11 evaluator """rankbased""" +501 12 dataset """wn18rr""" +501 12 model """proje""" +501 12 loss """bceaftersigmoid""" +501 12 regularizer """no""" +501 12 optimizer """adam""" +501 12 training_loop """owa""" +501 12 negative_sampler """basic""" +501 12 evaluator """rankbased""" +501 13 dataset """wn18rr""" +501 13 model """proje""" +501 13 loss """bceaftersigmoid""" +501 13 regularizer """no""" +501 13 optimizer """adam""" +501 13 training_loop """owa""" +501 13 negative_sampler """basic""" +501 13 evaluator """rankbased""" +501 14 dataset """wn18rr""" +501 14 model """proje""" +501 14 loss """bceaftersigmoid""" +501 14 regularizer """no""" +501 14 optimizer """adam""" +501 14 training_loop """owa""" +501 14 negative_sampler """basic""" +501 14 evaluator """rankbased""" +501 15 dataset """wn18rr""" +501 15 model """proje""" +501 15 loss """bceaftersigmoid""" +501 15 regularizer """no""" +501 15 optimizer """adam""" +501 15 training_loop """owa""" +501 15 negative_sampler """basic""" +501 15 evaluator """rankbased""" +501 16 dataset """wn18rr""" +501 16 model """proje""" +501 16 loss """bceaftersigmoid""" +501 16 regularizer """no""" +501 16 optimizer """adam""" +501 16 training_loop """owa""" +501 16 negative_sampler """basic""" +501 16 evaluator """rankbased""" +501 17 dataset """wn18rr""" +501 17 model """proje""" +501 17 loss """bceaftersigmoid""" +501 17 regularizer """no""" +501 17 optimizer """adam""" +501 17 training_loop """owa""" +501 17 negative_sampler """basic""" +501 17 evaluator """rankbased""" +501 18 dataset """wn18rr""" +501 18 model """proje""" +501 18 loss """bceaftersigmoid""" +501 18 regularizer """no""" +501 18 optimizer """adam""" +501 18 training_loop """owa""" +501 18 negative_sampler """basic""" +501 18 evaluator """rankbased""" +501 19 dataset """wn18rr""" +501 19 model """proje""" +501 19 loss """bceaftersigmoid""" +501 19 regularizer """no""" +501 19 optimizer """adam""" +501 19 training_loop """owa""" +501 19 negative_sampler """basic""" +501 19 evaluator """rankbased""" +501 20 dataset """wn18rr""" +501 20 model """proje""" +501 20 loss """bceaftersigmoid""" +501 20 regularizer """no""" +501 20 optimizer """adam""" +501 20 training_loop """owa""" +501 20 negative_sampler """basic""" +501 20 evaluator """rankbased""" +501 21 dataset """wn18rr""" +501 21 model """proje""" +501 21 loss """bceaftersigmoid""" +501 21 regularizer """no""" +501 21 optimizer """adam""" +501 21 training_loop """owa""" +501 21 negative_sampler """basic""" +501 21 evaluator """rankbased""" +501 22 dataset """wn18rr""" +501 22 model """proje""" +501 22 loss """bceaftersigmoid""" +501 22 regularizer """no""" +501 22 optimizer """adam""" +501 22 training_loop """owa""" +501 22 negative_sampler """basic""" +501 22 evaluator """rankbased""" +501 23 dataset """wn18rr""" +501 23 model """proje""" +501 23 loss """bceaftersigmoid""" +501 23 regularizer """no""" +501 23 optimizer """adam""" +501 23 training_loop """owa""" +501 23 negative_sampler """basic""" +501 23 evaluator """rankbased""" +501 24 dataset """wn18rr""" +501 24 model """proje""" +501 24 loss """bceaftersigmoid""" +501 24 regularizer """no""" +501 24 optimizer """adam""" +501 24 training_loop """owa""" +501 24 negative_sampler """basic""" +501 24 evaluator """rankbased""" +501 25 dataset """wn18rr""" +501 25 model """proje""" +501 25 loss """bceaftersigmoid""" +501 25 regularizer """no""" +501 25 optimizer """adam""" +501 25 training_loop """owa""" +501 25 negative_sampler """basic""" +501 25 evaluator """rankbased""" +501 26 dataset """wn18rr""" +501 26 model """proje""" +501 26 loss """bceaftersigmoid""" +501 26 regularizer """no""" +501 26 optimizer """adam""" +501 26 training_loop """owa""" +501 26 negative_sampler """basic""" +501 26 evaluator """rankbased""" +501 27 dataset """wn18rr""" +501 27 model """proje""" +501 27 loss """bceaftersigmoid""" +501 27 regularizer """no""" +501 27 optimizer """adam""" +501 27 training_loop """owa""" +501 27 negative_sampler """basic""" +501 27 evaluator """rankbased""" +501 28 dataset """wn18rr""" +501 28 model """proje""" +501 28 loss """bceaftersigmoid""" +501 28 regularizer """no""" +501 28 optimizer """adam""" +501 28 training_loop """owa""" +501 28 negative_sampler """basic""" +501 28 evaluator """rankbased""" +501 29 dataset """wn18rr""" +501 29 model """proje""" +501 29 loss """bceaftersigmoid""" +501 29 regularizer """no""" +501 29 optimizer """adam""" +501 29 training_loop """owa""" +501 29 negative_sampler """basic""" +501 29 evaluator """rankbased""" +501 30 dataset """wn18rr""" +501 30 model """proje""" +501 30 loss """bceaftersigmoid""" +501 30 regularizer """no""" +501 30 optimizer """adam""" +501 30 training_loop """owa""" +501 30 negative_sampler """basic""" +501 30 evaluator """rankbased""" +501 31 dataset """wn18rr""" +501 31 model """proje""" +501 31 loss """bceaftersigmoid""" +501 31 regularizer """no""" +501 31 optimizer """adam""" +501 31 training_loop """owa""" +501 31 negative_sampler """basic""" +501 31 evaluator """rankbased""" +501 32 dataset """wn18rr""" +501 32 model """proje""" +501 32 loss """bceaftersigmoid""" +501 32 regularizer """no""" +501 32 optimizer """adam""" +501 32 training_loop """owa""" +501 32 negative_sampler """basic""" +501 32 evaluator """rankbased""" +501 33 dataset """wn18rr""" +501 33 model """proje""" +501 33 loss """bceaftersigmoid""" +501 33 regularizer """no""" +501 33 optimizer """adam""" +501 33 training_loop """owa""" +501 33 negative_sampler """basic""" +501 33 evaluator """rankbased""" +501 34 dataset """wn18rr""" +501 34 model """proje""" +501 34 loss """bceaftersigmoid""" +501 34 regularizer """no""" +501 34 optimizer """adam""" +501 34 training_loop """owa""" +501 34 negative_sampler """basic""" +501 34 evaluator """rankbased""" +501 35 dataset """wn18rr""" +501 35 model """proje""" +501 35 loss """bceaftersigmoid""" +501 35 regularizer """no""" +501 35 optimizer """adam""" +501 35 training_loop """owa""" +501 35 negative_sampler """basic""" +501 35 evaluator """rankbased""" +501 36 dataset """wn18rr""" +501 36 model """proje""" +501 36 loss """bceaftersigmoid""" +501 36 regularizer """no""" +501 36 optimizer """adam""" +501 36 training_loop """owa""" +501 36 negative_sampler """basic""" +501 36 evaluator """rankbased""" +501 37 dataset """wn18rr""" +501 37 model """proje""" +501 37 loss """bceaftersigmoid""" +501 37 regularizer """no""" +501 37 optimizer """adam""" +501 37 training_loop """owa""" +501 37 negative_sampler """basic""" +501 37 evaluator """rankbased""" +501 38 dataset """wn18rr""" +501 38 model """proje""" +501 38 loss """bceaftersigmoid""" +501 38 regularizer """no""" +501 38 optimizer """adam""" +501 38 training_loop """owa""" +501 38 negative_sampler """basic""" +501 38 evaluator """rankbased""" +501 39 dataset """wn18rr""" +501 39 model """proje""" +501 39 loss """bceaftersigmoid""" +501 39 regularizer """no""" +501 39 optimizer """adam""" +501 39 training_loop """owa""" +501 39 negative_sampler """basic""" +501 39 evaluator """rankbased""" +501 40 dataset """wn18rr""" +501 40 model """proje""" +501 40 loss """bceaftersigmoid""" +501 40 regularizer """no""" +501 40 optimizer """adam""" +501 40 training_loop """owa""" +501 40 negative_sampler """basic""" +501 40 evaluator """rankbased""" +501 41 dataset """wn18rr""" +501 41 model """proje""" +501 41 loss """bceaftersigmoid""" +501 41 regularizer """no""" +501 41 optimizer """adam""" +501 41 training_loop """owa""" +501 41 negative_sampler """basic""" +501 41 evaluator """rankbased""" +501 42 dataset """wn18rr""" +501 42 model """proje""" +501 42 loss """bceaftersigmoid""" +501 42 regularizer """no""" +501 42 optimizer """adam""" +501 42 training_loop """owa""" +501 42 negative_sampler """basic""" +501 42 evaluator """rankbased""" +501 43 dataset """wn18rr""" +501 43 model """proje""" +501 43 loss """bceaftersigmoid""" +501 43 regularizer """no""" +501 43 optimizer """adam""" +501 43 training_loop """owa""" +501 43 negative_sampler """basic""" +501 43 evaluator """rankbased""" +501 44 dataset """wn18rr""" +501 44 model """proje""" +501 44 loss """bceaftersigmoid""" +501 44 regularizer """no""" +501 44 optimizer """adam""" +501 44 training_loop """owa""" +501 44 negative_sampler """basic""" +501 44 evaluator """rankbased""" +501 45 dataset """wn18rr""" +501 45 model """proje""" +501 45 loss """bceaftersigmoid""" +501 45 regularizer """no""" +501 45 optimizer """adam""" +501 45 training_loop """owa""" +501 45 negative_sampler """basic""" +501 45 evaluator """rankbased""" +501 46 dataset """wn18rr""" +501 46 model """proje""" +501 46 loss """bceaftersigmoid""" +501 46 regularizer """no""" +501 46 optimizer """adam""" +501 46 training_loop """owa""" +501 46 negative_sampler """basic""" +501 46 evaluator """rankbased""" +501 47 dataset """wn18rr""" +501 47 model """proje""" +501 47 loss """bceaftersigmoid""" +501 47 regularizer """no""" +501 47 optimizer """adam""" +501 47 training_loop """owa""" +501 47 negative_sampler """basic""" +501 47 evaluator """rankbased""" +501 48 dataset """wn18rr""" +501 48 model """proje""" +501 48 loss """bceaftersigmoid""" +501 48 regularizer """no""" +501 48 optimizer """adam""" +501 48 training_loop """owa""" +501 48 negative_sampler """basic""" +501 48 evaluator """rankbased""" +501 49 dataset """wn18rr""" +501 49 model """proje""" +501 49 loss """bceaftersigmoid""" +501 49 regularizer """no""" +501 49 optimizer """adam""" +501 49 training_loop """owa""" +501 49 negative_sampler """basic""" +501 49 evaluator """rankbased""" +501 50 dataset """wn18rr""" +501 50 model """proje""" +501 50 loss """bceaftersigmoid""" +501 50 regularizer """no""" +501 50 optimizer """adam""" +501 50 training_loop """owa""" +501 50 negative_sampler """basic""" +501 50 evaluator """rankbased""" +501 51 dataset """wn18rr""" +501 51 model """proje""" +501 51 loss """bceaftersigmoid""" +501 51 regularizer """no""" +501 51 optimizer """adam""" +501 51 training_loop """owa""" +501 51 negative_sampler """basic""" +501 51 evaluator """rankbased""" +501 52 dataset """wn18rr""" +501 52 model """proje""" +501 52 loss """bceaftersigmoid""" +501 52 regularizer """no""" +501 52 optimizer """adam""" +501 52 training_loop """owa""" +501 52 negative_sampler """basic""" +501 52 evaluator """rankbased""" +501 53 dataset """wn18rr""" +501 53 model """proje""" +501 53 loss """bceaftersigmoid""" +501 53 regularizer """no""" +501 53 optimizer """adam""" +501 53 training_loop """owa""" +501 53 negative_sampler """basic""" +501 53 evaluator """rankbased""" +501 54 dataset """wn18rr""" +501 54 model """proje""" +501 54 loss """bceaftersigmoid""" +501 54 regularizer """no""" +501 54 optimizer """adam""" +501 54 training_loop """owa""" +501 54 negative_sampler """basic""" +501 54 evaluator """rankbased""" +501 55 dataset """wn18rr""" +501 55 model """proje""" +501 55 loss """bceaftersigmoid""" +501 55 regularizer """no""" +501 55 optimizer """adam""" +501 55 training_loop """owa""" +501 55 negative_sampler """basic""" +501 55 evaluator """rankbased""" +501 56 dataset """wn18rr""" +501 56 model """proje""" +501 56 loss """bceaftersigmoid""" +501 56 regularizer """no""" +501 56 optimizer """adam""" +501 56 training_loop """owa""" +501 56 negative_sampler """basic""" +501 56 evaluator """rankbased""" +501 57 dataset """wn18rr""" +501 57 model """proje""" +501 57 loss """bceaftersigmoid""" +501 57 regularizer """no""" +501 57 optimizer """adam""" +501 57 training_loop """owa""" +501 57 negative_sampler """basic""" +501 57 evaluator """rankbased""" +501 58 dataset """wn18rr""" +501 58 model """proje""" +501 58 loss """bceaftersigmoid""" +501 58 regularizer """no""" +501 58 optimizer """adam""" +501 58 training_loop """owa""" +501 58 negative_sampler """basic""" +501 58 evaluator """rankbased""" +501 59 dataset """wn18rr""" +501 59 model """proje""" +501 59 loss """bceaftersigmoid""" +501 59 regularizer """no""" +501 59 optimizer """adam""" +501 59 training_loop """owa""" +501 59 negative_sampler """basic""" +501 59 evaluator """rankbased""" +501 60 dataset """wn18rr""" +501 60 model """proje""" +501 60 loss """bceaftersigmoid""" +501 60 regularizer """no""" +501 60 optimizer """adam""" +501 60 training_loop """owa""" +501 60 negative_sampler """basic""" +501 60 evaluator """rankbased""" +501 61 dataset """wn18rr""" +501 61 model """proje""" +501 61 loss """bceaftersigmoid""" +501 61 regularizer """no""" +501 61 optimizer """adam""" +501 61 training_loop """owa""" +501 61 negative_sampler """basic""" +501 61 evaluator """rankbased""" +501 62 dataset """wn18rr""" +501 62 model """proje""" +501 62 loss """bceaftersigmoid""" +501 62 regularizer """no""" +501 62 optimizer """adam""" +501 62 training_loop """owa""" +501 62 negative_sampler """basic""" +501 62 evaluator """rankbased""" +501 63 dataset """wn18rr""" +501 63 model """proje""" +501 63 loss """bceaftersigmoid""" +501 63 regularizer """no""" +501 63 optimizer """adam""" +501 63 training_loop """owa""" +501 63 negative_sampler """basic""" +501 63 evaluator """rankbased""" +501 64 dataset """wn18rr""" +501 64 model """proje""" +501 64 loss """bceaftersigmoid""" +501 64 regularizer """no""" +501 64 optimizer """adam""" +501 64 training_loop """owa""" +501 64 negative_sampler """basic""" +501 64 evaluator """rankbased""" +501 65 dataset """wn18rr""" +501 65 model """proje""" +501 65 loss """bceaftersigmoid""" +501 65 regularizer """no""" +501 65 optimizer """adam""" +501 65 training_loop """owa""" +501 65 negative_sampler """basic""" +501 65 evaluator """rankbased""" +502 1 model.embedding_dim 1.0 +502 1 optimizer.lr 0.01568432067610921 +502 1 negative_sampler.num_negs_per_pos 35.0 +502 1 training.batch_size 1.0 +502 2 model.embedding_dim 0.0 +502 2 optimizer.lr 0.017588513499361153 +502 2 negative_sampler.num_negs_per_pos 70.0 +502 2 training.batch_size 2.0 +502 3 model.embedding_dim 1.0 +502 3 optimizer.lr 0.008832104146245866 +502 3 negative_sampler.num_negs_per_pos 98.0 +502 3 training.batch_size 0.0 +502 4 model.embedding_dim 1.0 +502 4 optimizer.lr 0.009607109881444252 +502 4 negative_sampler.num_negs_per_pos 93.0 +502 4 training.batch_size 1.0 +502 5 model.embedding_dim 1.0 +502 5 optimizer.lr 0.04889789034153391 +502 5 negative_sampler.num_negs_per_pos 94.0 +502 5 training.batch_size 0.0 +502 6 model.embedding_dim 0.0 +502 6 optimizer.lr 0.06068649671941948 +502 6 negative_sampler.num_negs_per_pos 84.0 +502 6 training.batch_size 0.0 +502 7 model.embedding_dim 1.0 +502 7 optimizer.lr 0.0027154551358121682 +502 7 negative_sampler.num_negs_per_pos 1.0 +502 7 training.batch_size 2.0 +502 8 model.embedding_dim 2.0 +502 8 optimizer.lr 0.004338954681779291 +502 8 negative_sampler.num_negs_per_pos 22.0 +502 8 training.batch_size 1.0 +502 9 model.embedding_dim 2.0 +502 9 optimizer.lr 0.03964168846293331 +502 9 negative_sampler.num_negs_per_pos 33.0 +502 9 training.batch_size 2.0 +502 10 model.embedding_dim 1.0 +502 10 optimizer.lr 0.0019257781190916356 +502 10 negative_sampler.num_negs_per_pos 50.0 +502 10 training.batch_size 0.0 +502 11 model.embedding_dim 0.0 +502 11 optimizer.lr 0.0028813832447252283 +502 11 negative_sampler.num_negs_per_pos 46.0 +502 11 training.batch_size 1.0 +502 12 model.embedding_dim 2.0 +502 12 optimizer.lr 0.006160084289599982 +502 12 negative_sampler.num_negs_per_pos 75.0 +502 12 training.batch_size 0.0 +502 13 model.embedding_dim 2.0 +502 13 optimizer.lr 0.023190886003301485 +502 13 negative_sampler.num_negs_per_pos 98.0 +502 13 training.batch_size 2.0 +502 14 model.embedding_dim 2.0 +502 14 optimizer.lr 0.0054058576770054205 +502 14 negative_sampler.num_negs_per_pos 55.0 +502 14 training.batch_size 1.0 +502 15 model.embedding_dim 2.0 +502 15 optimizer.lr 0.02494103787505113 +502 15 negative_sampler.num_negs_per_pos 38.0 +502 15 training.batch_size 1.0 +502 16 model.embedding_dim 2.0 +502 16 optimizer.lr 0.0014374409231225028 +502 16 negative_sampler.num_negs_per_pos 45.0 +502 16 training.batch_size 0.0 +502 17 model.embedding_dim 0.0 +502 17 optimizer.lr 0.001996613993403452 +502 17 negative_sampler.num_negs_per_pos 16.0 +502 17 training.batch_size 0.0 +502 18 model.embedding_dim 1.0 +502 18 optimizer.lr 0.001865407248287456 +502 18 negative_sampler.num_negs_per_pos 63.0 +502 18 training.batch_size 1.0 +502 19 model.embedding_dim 1.0 +502 19 optimizer.lr 0.0252179416508926 +502 19 negative_sampler.num_negs_per_pos 40.0 +502 19 training.batch_size 1.0 +502 20 model.embedding_dim 0.0 +502 20 optimizer.lr 0.0013578251225117846 +502 20 negative_sampler.num_negs_per_pos 88.0 +502 20 training.batch_size 2.0 +502 21 model.embedding_dim 2.0 +502 21 optimizer.lr 0.0018742138527369802 +502 21 negative_sampler.num_negs_per_pos 77.0 +502 21 training.batch_size 0.0 +502 22 model.embedding_dim 0.0 +502 22 optimizer.lr 0.02426380810640241 +502 22 negative_sampler.num_negs_per_pos 77.0 +502 22 training.batch_size 0.0 +502 23 model.embedding_dim 1.0 +502 23 optimizer.lr 0.0026453992600924737 +502 23 negative_sampler.num_negs_per_pos 77.0 +502 23 training.batch_size 0.0 +502 24 model.embedding_dim 2.0 +502 24 optimizer.lr 0.007651926025612568 +502 24 negative_sampler.num_negs_per_pos 9.0 +502 24 training.batch_size 0.0 +502 25 model.embedding_dim 1.0 +502 25 optimizer.lr 0.07318357945332149 +502 25 negative_sampler.num_negs_per_pos 93.0 +502 25 training.batch_size 0.0 +502 26 model.embedding_dim 1.0 +502 26 optimizer.lr 0.09735630465958427 +502 26 negative_sampler.num_negs_per_pos 57.0 +502 26 training.batch_size 1.0 +502 27 model.embedding_dim 0.0 +502 27 optimizer.lr 0.0899804060883506 +502 27 negative_sampler.num_negs_per_pos 31.0 +502 27 training.batch_size 1.0 +502 28 model.embedding_dim 0.0 +502 28 optimizer.lr 0.012203834274657842 +502 28 negative_sampler.num_negs_per_pos 10.0 +502 28 training.batch_size 1.0 +502 29 model.embedding_dim 1.0 +502 29 optimizer.lr 0.03643175910281521 +502 29 negative_sampler.num_negs_per_pos 55.0 +502 29 training.batch_size 1.0 +502 30 model.embedding_dim 1.0 +502 30 optimizer.lr 0.003843049460370667 +502 30 negative_sampler.num_negs_per_pos 12.0 +502 30 training.batch_size 1.0 +502 31 model.embedding_dim 1.0 +502 31 optimizer.lr 0.07178696143468946 +502 31 negative_sampler.num_negs_per_pos 51.0 +502 31 training.batch_size 0.0 +502 32 model.embedding_dim 2.0 +502 32 optimizer.lr 0.0010436714134999963 +502 32 negative_sampler.num_negs_per_pos 29.0 +502 32 training.batch_size 1.0 +502 33 model.embedding_dim 1.0 +502 33 optimizer.lr 0.012930057255541881 +502 33 negative_sampler.num_negs_per_pos 11.0 +502 33 training.batch_size 2.0 +502 34 model.embedding_dim 0.0 +502 34 optimizer.lr 0.08165219418544822 +502 34 negative_sampler.num_negs_per_pos 32.0 +502 34 training.batch_size 0.0 +502 35 model.embedding_dim 2.0 +502 35 optimizer.lr 0.006146058814479175 +502 35 negative_sampler.num_negs_per_pos 15.0 +502 35 training.batch_size 0.0 +502 36 model.embedding_dim 0.0 +502 36 optimizer.lr 0.01727331489845985 +502 36 negative_sampler.num_negs_per_pos 63.0 +502 36 training.batch_size 0.0 +502 37 model.embedding_dim 2.0 +502 37 optimizer.lr 0.01901029719289047 +502 37 negative_sampler.num_negs_per_pos 58.0 +502 37 training.batch_size 2.0 +502 38 model.embedding_dim 2.0 +502 38 optimizer.lr 0.07576073501710001 +502 38 negative_sampler.num_negs_per_pos 61.0 +502 38 training.batch_size 2.0 +502 39 model.embedding_dim 1.0 +502 39 optimizer.lr 0.01288163986552936 +502 39 negative_sampler.num_negs_per_pos 96.0 +502 39 training.batch_size 1.0 +502 40 model.embedding_dim 0.0 +502 40 optimizer.lr 0.0030126268489963606 +502 40 negative_sampler.num_negs_per_pos 52.0 +502 40 training.batch_size 0.0 +502 41 model.embedding_dim 0.0 +502 41 optimizer.lr 0.004621846121623995 +502 41 negative_sampler.num_negs_per_pos 18.0 +502 41 training.batch_size 0.0 +502 42 model.embedding_dim 0.0 +502 42 optimizer.lr 0.09426682149616815 +502 42 negative_sampler.num_negs_per_pos 39.0 +502 42 training.batch_size 2.0 +502 43 model.embedding_dim 2.0 +502 43 optimizer.lr 0.02967954995865288 +502 43 negative_sampler.num_negs_per_pos 43.0 +502 43 training.batch_size 0.0 +502 44 model.embedding_dim 1.0 +502 44 optimizer.lr 0.005687458306637927 +502 44 negative_sampler.num_negs_per_pos 88.0 +502 44 training.batch_size 2.0 +502 45 model.embedding_dim 2.0 +502 45 optimizer.lr 0.019116496512891677 +502 45 negative_sampler.num_negs_per_pos 58.0 +502 45 training.batch_size 2.0 +502 46 model.embedding_dim 1.0 +502 46 optimizer.lr 0.01973983843845851 +502 46 negative_sampler.num_negs_per_pos 82.0 +502 46 training.batch_size 1.0 +502 47 model.embedding_dim 0.0 +502 47 optimizer.lr 0.019432730213448487 +502 47 negative_sampler.num_negs_per_pos 14.0 +502 47 training.batch_size 1.0 +502 48 model.embedding_dim 1.0 +502 48 optimizer.lr 0.017332029206632512 +502 48 negative_sampler.num_negs_per_pos 91.0 +502 48 training.batch_size 1.0 +502 49 model.embedding_dim 1.0 +502 49 optimizer.lr 0.013880547804697033 +502 49 negative_sampler.num_negs_per_pos 32.0 +502 49 training.batch_size 2.0 +502 50 model.embedding_dim 2.0 +502 50 optimizer.lr 0.05333144750635121 +502 50 negative_sampler.num_negs_per_pos 94.0 +502 50 training.batch_size 0.0 +502 51 model.embedding_dim 1.0 +502 51 optimizer.lr 0.0029853639274996582 +502 51 negative_sampler.num_negs_per_pos 18.0 +502 51 training.batch_size 2.0 +502 52 model.embedding_dim 2.0 +502 52 optimizer.lr 0.018730240500302656 +502 52 negative_sampler.num_negs_per_pos 93.0 +502 52 training.batch_size 1.0 +502 53 model.embedding_dim 1.0 +502 53 optimizer.lr 0.020292222027137246 +502 53 negative_sampler.num_negs_per_pos 16.0 +502 53 training.batch_size 2.0 +502 54 model.embedding_dim 2.0 +502 54 optimizer.lr 0.030855980642151025 +502 54 negative_sampler.num_negs_per_pos 13.0 +502 54 training.batch_size 2.0 +502 55 model.embedding_dim 0.0 +502 55 optimizer.lr 0.004126985091543252 +502 55 negative_sampler.num_negs_per_pos 49.0 +502 55 training.batch_size 2.0 +502 56 model.embedding_dim 0.0 +502 56 optimizer.lr 0.043085874271463744 +502 56 negative_sampler.num_negs_per_pos 26.0 +502 56 training.batch_size 1.0 +502 57 model.embedding_dim 2.0 +502 57 optimizer.lr 0.04903271639625876 +502 57 negative_sampler.num_negs_per_pos 75.0 +502 57 training.batch_size 2.0 +502 58 model.embedding_dim 1.0 +502 58 optimizer.lr 0.012081491972478205 +502 58 negative_sampler.num_negs_per_pos 71.0 +502 58 training.batch_size 0.0 +502 59 model.embedding_dim 1.0 +502 59 optimizer.lr 0.0038367854212656943 +502 59 negative_sampler.num_negs_per_pos 4.0 +502 59 training.batch_size 0.0 +502 60 model.embedding_dim 0.0 +502 60 optimizer.lr 0.007722635253987698 +502 60 negative_sampler.num_negs_per_pos 88.0 +502 60 training.batch_size 2.0 +502 61 model.embedding_dim 1.0 +502 61 optimizer.lr 0.004065557861907827 +502 61 negative_sampler.num_negs_per_pos 23.0 +502 61 training.batch_size 0.0 +502 62 model.embedding_dim 1.0 +502 62 optimizer.lr 0.08931502649269979 +502 62 negative_sampler.num_negs_per_pos 87.0 +502 62 training.batch_size 2.0 +502 63 model.embedding_dim 2.0 +502 63 optimizer.lr 0.003009586893611828 +502 63 negative_sampler.num_negs_per_pos 37.0 +502 63 training.batch_size 0.0 +502 1 dataset """wn18rr""" +502 1 model """proje""" +502 1 loss """softplus""" +502 1 regularizer """no""" +502 1 optimizer """adam""" +502 1 training_loop """owa""" +502 1 negative_sampler """basic""" +502 1 evaluator """rankbased""" +502 2 dataset """wn18rr""" +502 2 model """proje""" +502 2 loss """softplus""" +502 2 regularizer """no""" +502 2 optimizer """adam""" +502 2 training_loop """owa""" +502 2 negative_sampler """basic""" +502 2 evaluator """rankbased""" +502 3 dataset """wn18rr""" +502 3 model """proje""" +502 3 loss """softplus""" +502 3 regularizer """no""" +502 3 optimizer """adam""" +502 3 training_loop """owa""" +502 3 negative_sampler """basic""" +502 3 evaluator """rankbased""" +502 4 dataset """wn18rr""" +502 4 model """proje""" +502 4 loss """softplus""" +502 4 regularizer """no""" +502 4 optimizer """adam""" +502 4 training_loop """owa""" +502 4 negative_sampler """basic""" +502 4 evaluator """rankbased""" +502 5 dataset """wn18rr""" +502 5 model """proje""" +502 5 loss """softplus""" +502 5 regularizer """no""" +502 5 optimizer """adam""" +502 5 training_loop """owa""" +502 5 negative_sampler """basic""" +502 5 evaluator """rankbased""" +502 6 dataset """wn18rr""" +502 6 model """proje""" +502 6 loss """softplus""" +502 6 regularizer """no""" +502 6 optimizer """adam""" +502 6 training_loop """owa""" +502 6 negative_sampler """basic""" +502 6 evaluator """rankbased""" +502 7 dataset """wn18rr""" +502 7 model """proje""" +502 7 loss """softplus""" +502 7 regularizer """no""" +502 7 optimizer """adam""" +502 7 training_loop """owa""" +502 7 negative_sampler """basic""" +502 7 evaluator """rankbased""" +502 8 dataset """wn18rr""" +502 8 model """proje""" +502 8 loss """softplus""" +502 8 regularizer """no""" +502 8 optimizer """adam""" +502 8 training_loop """owa""" +502 8 negative_sampler """basic""" +502 8 evaluator """rankbased""" +502 9 dataset """wn18rr""" +502 9 model """proje""" +502 9 loss """softplus""" +502 9 regularizer """no""" +502 9 optimizer """adam""" +502 9 training_loop """owa""" +502 9 negative_sampler """basic""" +502 9 evaluator """rankbased""" +502 10 dataset """wn18rr""" +502 10 model """proje""" +502 10 loss """softplus""" +502 10 regularizer """no""" +502 10 optimizer """adam""" +502 10 training_loop """owa""" +502 10 negative_sampler """basic""" +502 10 evaluator """rankbased""" +502 11 dataset """wn18rr""" +502 11 model """proje""" +502 11 loss """softplus""" +502 11 regularizer """no""" +502 11 optimizer """adam""" +502 11 training_loop """owa""" +502 11 negative_sampler """basic""" +502 11 evaluator """rankbased""" +502 12 dataset """wn18rr""" +502 12 model """proje""" +502 12 loss """softplus""" +502 12 regularizer """no""" +502 12 optimizer """adam""" +502 12 training_loop """owa""" +502 12 negative_sampler """basic""" +502 12 evaluator """rankbased""" +502 13 dataset """wn18rr""" +502 13 model """proje""" +502 13 loss """softplus""" +502 13 regularizer """no""" +502 13 optimizer """adam""" +502 13 training_loop """owa""" +502 13 negative_sampler """basic""" +502 13 evaluator """rankbased""" +502 14 dataset """wn18rr""" +502 14 model """proje""" +502 14 loss """softplus""" +502 14 regularizer """no""" +502 14 optimizer """adam""" +502 14 training_loop """owa""" +502 14 negative_sampler """basic""" +502 14 evaluator """rankbased""" +502 15 dataset """wn18rr""" +502 15 model """proje""" +502 15 loss """softplus""" +502 15 regularizer """no""" +502 15 optimizer """adam""" +502 15 training_loop """owa""" +502 15 negative_sampler """basic""" +502 15 evaluator """rankbased""" +502 16 dataset """wn18rr""" +502 16 model """proje""" +502 16 loss """softplus""" +502 16 regularizer """no""" +502 16 optimizer """adam""" +502 16 training_loop """owa""" +502 16 negative_sampler """basic""" +502 16 evaluator """rankbased""" +502 17 dataset """wn18rr""" +502 17 model """proje""" +502 17 loss """softplus""" +502 17 regularizer """no""" +502 17 optimizer """adam""" +502 17 training_loop """owa""" +502 17 negative_sampler """basic""" +502 17 evaluator """rankbased""" +502 18 dataset """wn18rr""" +502 18 model """proje""" +502 18 loss """softplus""" +502 18 regularizer """no""" +502 18 optimizer """adam""" +502 18 training_loop """owa""" +502 18 negative_sampler """basic""" +502 18 evaluator """rankbased""" +502 19 dataset """wn18rr""" +502 19 model """proje""" +502 19 loss """softplus""" +502 19 regularizer """no""" +502 19 optimizer """adam""" +502 19 training_loop """owa""" +502 19 negative_sampler """basic""" +502 19 evaluator """rankbased""" +502 20 dataset """wn18rr""" +502 20 model """proje""" +502 20 loss """softplus""" +502 20 regularizer """no""" +502 20 optimizer """adam""" +502 20 training_loop """owa""" +502 20 negative_sampler """basic""" +502 20 evaluator """rankbased""" +502 21 dataset """wn18rr""" +502 21 model """proje""" +502 21 loss """softplus""" +502 21 regularizer """no""" +502 21 optimizer """adam""" +502 21 training_loop """owa""" +502 21 negative_sampler """basic""" +502 21 evaluator """rankbased""" +502 22 dataset """wn18rr""" +502 22 model """proje""" +502 22 loss """softplus""" +502 22 regularizer """no""" +502 22 optimizer """adam""" +502 22 training_loop """owa""" +502 22 negative_sampler """basic""" +502 22 evaluator """rankbased""" +502 23 dataset """wn18rr""" +502 23 model """proje""" +502 23 loss """softplus""" +502 23 regularizer """no""" +502 23 optimizer """adam""" +502 23 training_loop """owa""" +502 23 negative_sampler """basic""" +502 23 evaluator """rankbased""" +502 24 dataset """wn18rr""" +502 24 model """proje""" +502 24 loss """softplus""" +502 24 regularizer """no""" +502 24 optimizer """adam""" +502 24 training_loop """owa""" +502 24 negative_sampler """basic""" +502 24 evaluator """rankbased""" +502 25 dataset """wn18rr""" +502 25 model """proje""" +502 25 loss """softplus""" +502 25 regularizer """no""" +502 25 optimizer """adam""" +502 25 training_loop """owa""" +502 25 negative_sampler """basic""" +502 25 evaluator """rankbased""" +502 26 dataset """wn18rr""" +502 26 model """proje""" +502 26 loss """softplus""" +502 26 regularizer """no""" +502 26 optimizer """adam""" +502 26 training_loop """owa""" +502 26 negative_sampler """basic""" +502 26 evaluator """rankbased""" +502 27 dataset """wn18rr""" +502 27 model """proje""" +502 27 loss """softplus""" +502 27 regularizer """no""" +502 27 optimizer """adam""" +502 27 training_loop """owa""" +502 27 negative_sampler """basic""" +502 27 evaluator """rankbased""" +502 28 dataset """wn18rr""" +502 28 model """proje""" +502 28 loss """softplus""" +502 28 regularizer """no""" +502 28 optimizer """adam""" +502 28 training_loop """owa""" +502 28 negative_sampler """basic""" +502 28 evaluator """rankbased""" +502 29 dataset """wn18rr""" +502 29 model """proje""" +502 29 loss """softplus""" +502 29 regularizer """no""" +502 29 optimizer """adam""" +502 29 training_loop """owa""" +502 29 negative_sampler """basic""" +502 29 evaluator """rankbased""" +502 30 dataset """wn18rr""" +502 30 model """proje""" +502 30 loss """softplus""" +502 30 regularizer """no""" +502 30 optimizer """adam""" +502 30 training_loop """owa""" +502 30 negative_sampler """basic""" +502 30 evaluator """rankbased""" +502 31 dataset """wn18rr""" +502 31 model """proje""" +502 31 loss """softplus""" +502 31 regularizer """no""" +502 31 optimizer """adam""" +502 31 training_loop """owa""" +502 31 negative_sampler """basic""" +502 31 evaluator """rankbased""" +502 32 dataset """wn18rr""" +502 32 model """proje""" +502 32 loss """softplus""" +502 32 regularizer """no""" +502 32 optimizer """adam""" +502 32 training_loop """owa""" +502 32 negative_sampler """basic""" +502 32 evaluator """rankbased""" +502 33 dataset """wn18rr""" +502 33 model """proje""" +502 33 loss """softplus""" +502 33 regularizer """no""" +502 33 optimizer """adam""" +502 33 training_loop """owa""" +502 33 negative_sampler """basic""" +502 33 evaluator """rankbased""" +502 34 dataset """wn18rr""" +502 34 model """proje""" +502 34 loss """softplus""" +502 34 regularizer """no""" +502 34 optimizer """adam""" +502 34 training_loop """owa""" +502 34 negative_sampler """basic""" +502 34 evaluator """rankbased""" +502 35 dataset """wn18rr""" +502 35 model """proje""" +502 35 loss """softplus""" +502 35 regularizer """no""" +502 35 optimizer """adam""" +502 35 training_loop """owa""" +502 35 negative_sampler """basic""" +502 35 evaluator """rankbased""" +502 36 dataset """wn18rr""" +502 36 model """proje""" +502 36 loss """softplus""" +502 36 regularizer """no""" +502 36 optimizer """adam""" +502 36 training_loop """owa""" +502 36 negative_sampler """basic""" +502 36 evaluator """rankbased""" +502 37 dataset """wn18rr""" +502 37 model """proje""" +502 37 loss """softplus""" +502 37 regularizer """no""" +502 37 optimizer """adam""" +502 37 training_loop """owa""" +502 37 negative_sampler """basic""" +502 37 evaluator """rankbased""" +502 38 dataset """wn18rr""" +502 38 model """proje""" +502 38 loss """softplus""" +502 38 regularizer """no""" +502 38 optimizer """adam""" +502 38 training_loop """owa""" +502 38 negative_sampler """basic""" +502 38 evaluator """rankbased""" +502 39 dataset """wn18rr""" +502 39 model """proje""" +502 39 loss """softplus""" +502 39 regularizer """no""" +502 39 optimizer """adam""" +502 39 training_loop """owa""" +502 39 negative_sampler """basic""" +502 39 evaluator """rankbased""" +502 40 dataset """wn18rr""" +502 40 model """proje""" +502 40 loss """softplus""" +502 40 regularizer """no""" +502 40 optimizer """adam""" +502 40 training_loop """owa""" +502 40 negative_sampler """basic""" +502 40 evaluator """rankbased""" +502 41 dataset """wn18rr""" +502 41 model """proje""" +502 41 loss """softplus""" +502 41 regularizer """no""" +502 41 optimizer """adam""" +502 41 training_loop """owa""" +502 41 negative_sampler """basic""" +502 41 evaluator """rankbased""" +502 42 dataset """wn18rr""" +502 42 model """proje""" +502 42 loss """softplus""" +502 42 regularizer """no""" +502 42 optimizer """adam""" +502 42 training_loop """owa""" +502 42 negative_sampler """basic""" +502 42 evaluator """rankbased""" +502 43 dataset """wn18rr""" +502 43 model """proje""" +502 43 loss """softplus""" +502 43 regularizer """no""" +502 43 optimizer """adam""" +502 43 training_loop """owa""" +502 43 negative_sampler """basic""" +502 43 evaluator """rankbased""" +502 44 dataset """wn18rr""" +502 44 model """proje""" +502 44 loss """softplus""" +502 44 regularizer """no""" +502 44 optimizer """adam""" +502 44 training_loop """owa""" +502 44 negative_sampler """basic""" +502 44 evaluator """rankbased""" +502 45 dataset """wn18rr""" +502 45 model """proje""" +502 45 loss """softplus""" +502 45 regularizer """no""" +502 45 optimizer """adam""" +502 45 training_loop """owa""" +502 45 negative_sampler """basic""" +502 45 evaluator """rankbased""" +502 46 dataset """wn18rr""" +502 46 model """proje""" +502 46 loss """softplus""" +502 46 regularizer """no""" +502 46 optimizer """adam""" +502 46 training_loop """owa""" +502 46 negative_sampler """basic""" +502 46 evaluator """rankbased""" +502 47 dataset """wn18rr""" +502 47 model """proje""" +502 47 loss """softplus""" +502 47 regularizer """no""" +502 47 optimizer """adam""" +502 47 training_loop """owa""" +502 47 negative_sampler """basic""" +502 47 evaluator """rankbased""" +502 48 dataset """wn18rr""" +502 48 model """proje""" +502 48 loss """softplus""" +502 48 regularizer """no""" +502 48 optimizer """adam""" +502 48 training_loop """owa""" +502 48 negative_sampler """basic""" +502 48 evaluator """rankbased""" +502 49 dataset """wn18rr""" +502 49 model """proje""" +502 49 loss """softplus""" +502 49 regularizer """no""" +502 49 optimizer """adam""" +502 49 training_loop """owa""" +502 49 negative_sampler """basic""" +502 49 evaluator """rankbased""" +502 50 dataset """wn18rr""" +502 50 model """proje""" +502 50 loss """softplus""" +502 50 regularizer """no""" +502 50 optimizer """adam""" +502 50 training_loop """owa""" +502 50 negative_sampler """basic""" +502 50 evaluator """rankbased""" +502 51 dataset """wn18rr""" +502 51 model """proje""" +502 51 loss """softplus""" +502 51 regularizer """no""" +502 51 optimizer """adam""" +502 51 training_loop """owa""" +502 51 negative_sampler """basic""" +502 51 evaluator """rankbased""" +502 52 dataset """wn18rr""" +502 52 model """proje""" +502 52 loss """softplus""" +502 52 regularizer """no""" +502 52 optimizer """adam""" +502 52 training_loop """owa""" +502 52 negative_sampler """basic""" +502 52 evaluator """rankbased""" +502 53 dataset """wn18rr""" +502 53 model """proje""" +502 53 loss """softplus""" +502 53 regularizer """no""" +502 53 optimizer """adam""" +502 53 training_loop """owa""" +502 53 negative_sampler """basic""" +502 53 evaluator """rankbased""" +502 54 dataset """wn18rr""" +502 54 model """proje""" +502 54 loss """softplus""" +502 54 regularizer """no""" +502 54 optimizer """adam""" +502 54 training_loop """owa""" +502 54 negative_sampler """basic""" +502 54 evaluator """rankbased""" +502 55 dataset """wn18rr""" +502 55 model """proje""" +502 55 loss """softplus""" +502 55 regularizer """no""" +502 55 optimizer """adam""" +502 55 training_loop """owa""" +502 55 negative_sampler """basic""" +502 55 evaluator """rankbased""" +502 56 dataset """wn18rr""" +502 56 model """proje""" +502 56 loss """softplus""" +502 56 regularizer """no""" +502 56 optimizer """adam""" +502 56 training_loop """owa""" +502 56 negative_sampler """basic""" +502 56 evaluator """rankbased""" +502 57 dataset """wn18rr""" +502 57 model """proje""" +502 57 loss """softplus""" +502 57 regularizer """no""" +502 57 optimizer """adam""" +502 57 training_loop """owa""" +502 57 negative_sampler """basic""" +502 57 evaluator """rankbased""" +502 58 dataset """wn18rr""" +502 58 model """proje""" +502 58 loss """softplus""" +502 58 regularizer """no""" +502 58 optimizer """adam""" +502 58 training_loop """owa""" +502 58 negative_sampler """basic""" +502 58 evaluator """rankbased""" +502 59 dataset """wn18rr""" +502 59 model """proje""" +502 59 loss """softplus""" +502 59 regularizer """no""" +502 59 optimizer """adam""" +502 59 training_loop """owa""" +502 59 negative_sampler """basic""" +502 59 evaluator """rankbased""" +502 60 dataset """wn18rr""" +502 60 model """proje""" +502 60 loss """softplus""" +502 60 regularizer """no""" +502 60 optimizer """adam""" +502 60 training_loop """owa""" +502 60 negative_sampler """basic""" +502 60 evaluator """rankbased""" +502 61 dataset """wn18rr""" +502 61 model """proje""" +502 61 loss """softplus""" +502 61 regularizer """no""" +502 61 optimizer """adam""" +502 61 training_loop """owa""" +502 61 negative_sampler """basic""" +502 61 evaluator """rankbased""" +502 62 dataset """wn18rr""" +502 62 model """proje""" +502 62 loss """softplus""" +502 62 regularizer """no""" +502 62 optimizer """adam""" +502 62 training_loop """owa""" +502 62 negative_sampler """basic""" +502 62 evaluator """rankbased""" +502 63 dataset """wn18rr""" +502 63 model """proje""" +502 63 loss """softplus""" +502 63 regularizer """no""" +502 63 optimizer """adam""" +502 63 training_loop """owa""" +502 63 negative_sampler """basic""" +502 63 evaluator """rankbased""" +503 1 model.embedding_dim 0.0 +503 1 optimizer.lr 0.0042891705183781105 +503 1 negative_sampler.num_negs_per_pos 54.0 +503 1 training.batch_size 0.0 +503 2 model.embedding_dim 1.0 +503 2 optimizer.lr 0.016908324836029328 +503 2 negative_sampler.num_negs_per_pos 39.0 +503 2 training.batch_size 0.0 +503 3 model.embedding_dim 1.0 +503 3 optimizer.lr 0.030835488905750028 +503 3 negative_sampler.num_negs_per_pos 29.0 +503 3 training.batch_size 1.0 +503 4 model.embedding_dim 2.0 +503 4 optimizer.lr 0.001095643557696043 +503 4 negative_sampler.num_negs_per_pos 38.0 +503 4 training.batch_size 2.0 +503 5 model.embedding_dim 2.0 +503 5 optimizer.lr 0.0040214112722269935 +503 5 negative_sampler.num_negs_per_pos 44.0 +503 5 training.batch_size 2.0 +503 6 model.embedding_dim 0.0 +503 6 optimizer.lr 0.0010341363187112674 +503 6 negative_sampler.num_negs_per_pos 63.0 +503 6 training.batch_size 0.0 +503 7 model.embedding_dim 2.0 +503 7 optimizer.lr 0.002935467921259322 +503 7 negative_sampler.num_negs_per_pos 88.0 +503 7 training.batch_size 0.0 +503 8 model.embedding_dim 0.0 +503 8 optimizer.lr 0.012960094722507495 +503 8 negative_sampler.num_negs_per_pos 41.0 +503 8 training.batch_size 1.0 +503 9 model.embedding_dim 1.0 +503 9 optimizer.lr 0.05179878617993986 +503 9 negative_sampler.num_negs_per_pos 6.0 +503 9 training.batch_size 0.0 +503 10 model.embedding_dim 0.0 +503 10 optimizer.lr 0.022195484763944 +503 10 negative_sampler.num_negs_per_pos 23.0 +503 10 training.batch_size 2.0 +503 11 model.embedding_dim 0.0 +503 11 optimizer.lr 0.05791554535050637 +503 11 negative_sampler.num_negs_per_pos 85.0 +503 11 training.batch_size 1.0 +503 12 model.embedding_dim 1.0 +503 12 optimizer.lr 0.0034481341474495763 +503 12 negative_sampler.num_negs_per_pos 21.0 +503 12 training.batch_size 1.0 +503 13 model.embedding_dim 0.0 +503 13 optimizer.lr 0.08536544608017041 +503 13 negative_sampler.num_negs_per_pos 7.0 +503 13 training.batch_size 0.0 +503 14 model.embedding_dim 2.0 +503 14 optimizer.lr 0.00606488924931917 +503 14 negative_sampler.num_negs_per_pos 6.0 +503 14 training.batch_size 0.0 +503 15 model.embedding_dim 0.0 +503 15 optimizer.lr 0.0016854512429379251 +503 15 negative_sampler.num_negs_per_pos 8.0 +503 15 training.batch_size 2.0 +503 16 model.embedding_dim 1.0 +503 16 optimizer.lr 0.0022931587253955496 +503 16 negative_sampler.num_negs_per_pos 15.0 +503 16 training.batch_size 0.0 +503 17 model.embedding_dim 1.0 +503 17 optimizer.lr 0.05642569430069589 +503 17 negative_sampler.num_negs_per_pos 52.0 +503 17 training.batch_size 1.0 +503 18 model.embedding_dim 2.0 +503 18 optimizer.lr 0.02236904122422282 +503 18 negative_sampler.num_negs_per_pos 2.0 +503 18 training.batch_size 0.0 +503 19 model.embedding_dim 2.0 +503 19 optimizer.lr 0.0021767494864643527 +503 19 negative_sampler.num_negs_per_pos 89.0 +503 19 training.batch_size 0.0 +503 20 model.embedding_dim 2.0 +503 20 optimizer.lr 0.0026318819494491096 +503 20 negative_sampler.num_negs_per_pos 52.0 +503 20 training.batch_size 0.0 +503 21 model.embedding_dim 2.0 +503 21 optimizer.lr 0.0011180709972569184 +503 21 negative_sampler.num_negs_per_pos 1.0 +503 21 training.batch_size 1.0 +503 22 model.embedding_dim 1.0 +503 22 optimizer.lr 0.003344010194029017 +503 22 negative_sampler.num_negs_per_pos 36.0 +503 22 training.batch_size 2.0 +503 23 model.embedding_dim 0.0 +503 23 optimizer.lr 0.006618081163955916 +503 23 negative_sampler.num_negs_per_pos 1.0 +503 23 training.batch_size 0.0 +503 24 model.embedding_dim 0.0 +503 24 optimizer.lr 0.0032127166954857784 +503 24 negative_sampler.num_negs_per_pos 45.0 +503 24 training.batch_size 2.0 +503 25 model.embedding_dim 2.0 +503 25 optimizer.lr 0.007431449590295555 +503 25 negative_sampler.num_negs_per_pos 80.0 +503 25 training.batch_size 2.0 +503 26 model.embedding_dim 1.0 +503 26 optimizer.lr 0.04862977736770684 +503 26 negative_sampler.num_negs_per_pos 0.0 +503 26 training.batch_size 2.0 +503 27 model.embedding_dim 0.0 +503 27 optimizer.lr 0.01041245824347478 +503 27 negative_sampler.num_negs_per_pos 32.0 +503 27 training.batch_size 0.0 +503 28 model.embedding_dim 2.0 +503 28 optimizer.lr 0.0070717828174550225 +503 28 negative_sampler.num_negs_per_pos 13.0 +503 28 training.batch_size 0.0 +503 29 model.embedding_dim 0.0 +503 29 optimizer.lr 0.011407226347713623 +503 29 negative_sampler.num_negs_per_pos 65.0 +503 29 training.batch_size 0.0 +503 30 model.embedding_dim 2.0 +503 30 optimizer.lr 0.06019863908593379 +503 30 negative_sampler.num_negs_per_pos 79.0 +503 30 training.batch_size 0.0 +503 31 model.embedding_dim 0.0 +503 31 optimizer.lr 0.003179645864799511 +503 31 negative_sampler.num_negs_per_pos 16.0 +503 31 training.batch_size 2.0 +503 32 model.embedding_dim 0.0 +503 32 optimizer.lr 0.008322874953217435 +503 32 negative_sampler.num_negs_per_pos 9.0 +503 32 training.batch_size 2.0 +503 33 model.embedding_dim 1.0 +503 33 optimizer.lr 0.006181521704937695 +503 33 negative_sampler.num_negs_per_pos 46.0 +503 33 training.batch_size 1.0 +503 34 model.embedding_dim 0.0 +503 34 optimizer.lr 0.05460014689459308 +503 34 negative_sampler.num_negs_per_pos 68.0 +503 34 training.batch_size 0.0 +503 35 model.embedding_dim 1.0 +503 35 optimizer.lr 0.051955233489335395 +503 35 negative_sampler.num_negs_per_pos 13.0 +503 35 training.batch_size 0.0 +503 36 model.embedding_dim 1.0 +503 36 optimizer.lr 0.0337450300531701 +503 36 negative_sampler.num_negs_per_pos 12.0 +503 36 training.batch_size 0.0 +503 37 model.embedding_dim 0.0 +503 37 optimizer.lr 0.006819056796297028 +503 37 negative_sampler.num_negs_per_pos 70.0 +503 37 training.batch_size 1.0 +503 38 model.embedding_dim 2.0 +503 38 optimizer.lr 0.0038607823287653683 +503 38 negative_sampler.num_negs_per_pos 98.0 +503 38 training.batch_size 0.0 +503 39 model.embedding_dim 2.0 +503 39 optimizer.lr 0.005477227778082579 +503 39 negative_sampler.num_negs_per_pos 59.0 +503 39 training.batch_size 1.0 +503 40 model.embedding_dim 2.0 +503 40 optimizer.lr 0.015330446979851893 +503 40 negative_sampler.num_negs_per_pos 45.0 +503 40 training.batch_size 1.0 +503 41 model.embedding_dim 1.0 +503 41 optimizer.lr 0.053224349568878226 +503 41 negative_sampler.num_negs_per_pos 47.0 +503 41 training.batch_size 0.0 +503 42 model.embedding_dim 0.0 +503 42 optimizer.lr 0.06633111475854155 +503 42 negative_sampler.num_negs_per_pos 89.0 +503 42 training.batch_size 2.0 +503 43 model.embedding_dim 1.0 +503 43 optimizer.lr 0.003810103231233096 +503 43 negative_sampler.num_negs_per_pos 20.0 +503 43 training.batch_size 1.0 +503 44 model.embedding_dim 2.0 +503 44 optimizer.lr 0.016466819973732266 +503 44 negative_sampler.num_negs_per_pos 32.0 +503 44 training.batch_size 1.0 +503 45 model.embedding_dim 1.0 +503 45 optimizer.lr 0.02276624476905062 +503 45 negative_sampler.num_negs_per_pos 3.0 +503 45 training.batch_size 2.0 +503 46 model.embedding_dim 1.0 +503 46 optimizer.lr 0.002218965074969312 +503 46 negative_sampler.num_negs_per_pos 1.0 +503 46 training.batch_size 0.0 +503 47 model.embedding_dim 0.0 +503 47 optimizer.lr 0.020804504710377706 +503 47 negative_sampler.num_negs_per_pos 73.0 +503 47 training.batch_size 2.0 +503 48 model.embedding_dim 1.0 +503 48 optimizer.lr 0.013246663831991662 +503 48 negative_sampler.num_negs_per_pos 19.0 +503 48 training.batch_size 2.0 +503 49 model.embedding_dim 1.0 +503 49 optimizer.lr 0.030371846427330235 +503 49 negative_sampler.num_negs_per_pos 66.0 +503 49 training.batch_size 1.0 +503 50 model.embedding_dim 2.0 +503 50 optimizer.lr 0.005801851985599896 +503 50 negative_sampler.num_negs_per_pos 7.0 +503 50 training.batch_size 2.0 +503 51 model.embedding_dim 2.0 +503 51 optimizer.lr 0.0015028535378095232 +503 51 negative_sampler.num_negs_per_pos 3.0 +503 51 training.batch_size 1.0 +503 52 model.embedding_dim 1.0 +503 52 optimizer.lr 0.0664203526473642 +503 52 negative_sampler.num_negs_per_pos 28.0 +503 52 training.batch_size 2.0 +503 53 model.embedding_dim 0.0 +503 53 optimizer.lr 0.019966224104149943 +503 53 negative_sampler.num_negs_per_pos 59.0 +503 53 training.batch_size 1.0 +503 54 model.embedding_dim 2.0 +503 54 optimizer.lr 0.006173769264781625 +503 54 negative_sampler.num_negs_per_pos 72.0 +503 54 training.batch_size 1.0 +503 55 model.embedding_dim 2.0 +503 55 optimizer.lr 0.03681314226693361 +503 55 negative_sampler.num_negs_per_pos 33.0 +503 55 training.batch_size 1.0 +503 56 model.embedding_dim 1.0 +503 56 optimizer.lr 0.07855496735933591 +503 56 negative_sampler.num_negs_per_pos 98.0 +503 56 training.batch_size 1.0 +503 57 model.embedding_dim 1.0 +503 57 optimizer.lr 0.005029713453142386 +503 57 negative_sampler.num_negs_per_pos 87.0 +503 57 training.batch_size 1.0 +503 58 model.embedding_dim 0.0 +503 58 optimizer.lr 0.006716546046301851 +503 58 negative_sampler.num_negs_per_pos 54.0 +503 58 training.batch_size 2.0 +503 59 model.embedding_dim 2.0 +503 59 optimizer.lr 0.004542664151506099 +503 59 negative_sampler.num_negs_per_pos 37.0 +503 59 training.batch_size 1.0 +503 60 model.embedding_dim 1.0 +503 60 optimizer.lr 0.09290968740891103 +503 60 negative_sampler.num_negs_per_pos 48.0 +503 60 training.batch_size 2.0 +503 61 model.embedding_dim 2.0 +503 61 optimizer.lr 0.0016756528298584315 +503 61 negative_sampler.num_negs_per_pos 38.0 +503 61 training.batch_size 2.0 +503 62 model.embedding_dim 0.0 +503 62 optimizer.lr 0.002662732024324989 +503 62 negative_sampler.num_negs_per_pos 52.0 +503 62 training.batch_size 0.0 +503 63 model.embedding_dim 0.0 +503 63 optimizer.lr 0.009171058699443268 +503 63 negative_sampler.num_negs_per_pos 36.0 +503 63 training.batch_size 2.0 +503 64 model.embedding_dim 0.0 +503 64 optimizer.lr 0.07032282398812796 +503 64 negative_sampler.num_negs_per_pos 67.0 +503 64 training.batch_size 1.0 +503 65 model.embedding_dim 1.0 +503 65 optimizer.lr 0.025316340590459153 +503 65 negative_sampler.num_negs_per_pos 65.0 +503 65 training.batch_size 1.0 +503 66 model.embedding_dim 1.0 +503 66 optimizer.lr 0.017120327479468372 +503 66 negative_sampler.num_negs_per_pos 21.0 +503 66 training.batch_size 1.0 +503 67 model.embedding_dim 1.0 +503 67 optimizer.lr 0.0022194799849632953 +503 67 negative_sampler.num_negs_per_pos 79.0 +503 67 training.batch_size 0.0 +503 68 model.embedding_dim 0.0 +503 68 optimizer.lr 0.0048971917438566555 +503 68 negative_sampler.num_negs_per_pos 75.0 +503 68 training.batch_size 0.0 +503 69 model.embedding_dim 2.0 +503 69 optimizer.lr 0.0037917398051268655 +503 69 negative_sampler.num_negs_per_pos 58.0 +503 69 training.batch_size 2.0 +503 70 model.embedding_dim 2.0 +503 70 optimizer.lr 0.004385696434256153 +503 70 negative_sampler.num_negs_per_pos 37.0 +503 70 training.batch_size 2.0 +503 71 model.embedding_dim 1.0 +503 71 optimizer.lr 0.01315546944532434 +503 71 negative_sampler.num_negs_per_pos 0.0 +503 71 training.batch_size 1.0 +503 72 model.embedding_dim 1.0 +503 72 optimizer.lr 0.005018870433489182 +503 72 negative_sampler.num_negs_per_pos 61.0 +503 72 training.batch_size 1.0 +503 73 model.embedding_dim 2.0 +503 73 optimizer.lr 0.020572630755078907 +503 73 negative_sampler.num_negs_per_pos 76.0 +503 73 training.batch_size 2.0 +503 74 model.embedding_dim 1.0 +503 74 optimizer.lr 0.0019946999614048385 +503 74 negative_sampler.num_negs_per_pos 53.0 +503 74 training.batch_size 0.0 +503 75 model.embedding_dim 2.0 +503 75 optimizer.lr 0.03756461068076383 +503 75 negative_sampler.num_negs_per_pos 20.0 +503 75 training.batch_size 1.0 +503 76 model.embedding_dim 2.0 +503 76 optimizer.lr 0.07222959473323821 +503 76 negative_sampler.num_negs_per_pos 82.0 +503 76 training.batch_size 0.0 +503 77 model.embedding_dim 0.0 +503 77 optimizer.lr 0.007971247386975473 +503 77 negative_sampler.num_negs_per_pos 29.0 +503 77 training.batch_size 0.0 +503 78 model.embedding_dim 0.0 +503 78 optimizer.lr 0.006713643572543873 +503 78 negative_sampler.num_negs_per_pos 81.0 +503 78 training.batch_size 2.0 +503 79 model.embedding_dim 0.0 +503 79 optimizer.lr 0.025598977329720708 +503 79 negative_sampler.num_negs_per_pos 34.0 +503 79 training.batch_size 1.0 +503 80 model.embedding_dim 0.0 +503 80 optimizer.lr 0.0019067098121322188 +503 80 negative_sampler.num_negs_per_pos 90.0 +503 80 training.batch_size 1.0 +503 81 model.embedding_dim 1.0 +503 81 optimizer.lr 0.03925997977316976 +503 81 negative_sampler.num_negs_per_pos 59.0 +503 81 training.batch_size 0.0 +503 82 model.embedding_dim 1.0 +503 82 optimizer.lr 0.0012035884404053361 +503 82 negative_sampler.num_negs_per_pos 23.0 +503 82 training.batch_size 1.0 +503 83 model.embedding_dim 1.0 +503 83 optimizer.lr 0.009785158822158482 +503 83 negative_sampler.num_negs_per_pos 82.0 +503 83 training.batch_size 1.0 +503 84 model.embedding_dim 2.0 +503 84 optimizer.lr 0.023167405376491104 +503 84 negative_sampler.num_negs_per_pos 57.0 +503 84 training.batch_size 0.0 +503 85 model.embedding_dim 0.0 +503 85 optimizer.lr 0.0017259934642241176 +503 85 negative_sampler.num_negs_per_pos 96.0 +503 85 training.batch_size 2.0 +503 86 model.embedding_dim 2.0 +503 86 optimizer.lr 0.0020058453256089474 +503 86 negative_sampler.num_negs_per_pos 80.0 +503 86 training.batch_size 1.0 +503 87 model.embedding_dim 0.0 +503 87 optimizer.lr 0.0031410827874011164 +503 87 negative_sampler.num_negs_per_pos 98.0 +503 87 training.batch_size 1.0 +503 88 model.embedding_dim 2.0 +503 88 optimizer.lr 0.014366016559627593 +503 88 negative_sampler.num_negs_per_pos 33.0 +503 88 training.batch_size 1.0 +503 89 model.embedding_dim 1.0 +503 89 optimizer.lr 0.0014599663102386267 +503 89 negative_sampler.num_negs_per_pos 61.0 +503 89 training.batch_size 2.0 +503 90 model.embedding_dim 1.0 +503 90 optimizer.lr 0.013298155962531418 +503 90 negative_sampler.num_negs_per_pos 64.0 +503 90 training.batch_size 1.0 +503 91 model.embedding_dim 0.0 +503 91 optimizer.lr 0.00312590322709908 +503 91 negative_sampler.num_negs_per_pos 70.0 +503 91 training.batch_size 1.0 +503 92 model.embedding_dim 2.0 +503 92 optimizer.lr 0.045402933403011964 +503 92 negative_sampler.num_negs_per_pos 45.0 +503 92 training.batch_size 1.0 +503 93 model.embedding_dim 2.0 +503 93 optimizer.lr 0.0014272965329169827 +503 93 negative_sampler.num_negs_per_pos 30.0 +503 93 training.batch_size 0.0 +503 94 model.embedding_dim 2.0 +503 94 optimizer.lr 0.03120408495396281 +503 94 negative_sampler.num_negs_per_pos 78.0 +503 94 training.batch_size 2.0 +503 95 model.embedding_dim 1.0 +503 95 optimizer.lr 0.0019944797438693615 +503 95 negative_sampler.num_negs_per_pos 32.0 +503 95 training.batch_size 1.0 +503 96 model.embedding_dim 1.0 +503 96 optimizer.lr 0.03525356311127513 +503 96 negative_sampler.num_negs_per_pos 86.0 +503 96 training.batch_size 0.0 +503 97 model.embedding_dim 1.0 +503 97 optimizer.lr 0.0036154497176756053 +503 97 negative_sampler.num_negs_per_pos 50.0 +503 97 training.batch_size 2.0 +503 98 model.embedding_dim 1.0 +503 98 optimizer.lr 0.0011748678055044407 +503 98 negative_sampler.num_negs_per_pos 25.0 +503 98 training.batch_size 2.0 +503 99 model.embedding_dim 2.0 +503 99 optimizer.lr 0.016621471597877142 +503 99 negative_sampler.num_negs_per_pos 33.0 +503 99 training.batch_size 1.0 +503 100 model.embedding_dim 2.0 +503 100 optimizer.lr 0.0014547746454757972 +503 100 negative_sampler.num_negs_per_pos 79.0 +503 100 training.batch_size 0.0 +503 1 dataset """wn18rr""" +503 1 model """proje""" +503 1 loss """bceaftersigmoid""" +503 1 regularizer """no""" +503 1 optimizer """adam""" +503 1 training_loop """owa""" +503 1 negative_sampler """basic""" +503 1 evaluator """rankbased""" +503 2 dataset """wn18rr""" +503 2 model """proje""" +503 2 loss """bceaftersigmoid""" +503 2 regularizer """no""" +503 2 optimizer """adam""" +503 2 training_loop """owa""" +503 2 negative_sampler """basic""" +503 2 evaluator """rankbased""" +503 3 dataset """wn18rr""" +503 3 model """proje""" +503 3 loss """bceaftersigmoid""" +503 3 regularizer """no""" +503 3 optimizer """adam""" +503 3 training_loop """owa""" +503 3 negative_sampler """basic""" +503 3 evaluator """rankbased""" +503 4 dataset """wn18rr""" +503 4 model """proje""" +503 4 loss """bceaftersigmoid""" +503 4 regularizer """no""" +503 4 optimizer """adam""" +503 4 training_loop """owa""" +503 4 negative_sampler """basic""" +503 4 evaluator """rankbased""" +503 5 dataset """wn18rr""" +503 5 model """proje""" +503 5 loss """bceaftersigmoid""" +503 5 regularizer """no""" +503 5 optimizer """adam""" +503 5 training_loop """owa""" +503 5 negative_sampler """basic""" +503 5 evaluator """rankbased""" +503 6 dataset """wn18rr""" +503 6 model """proje""" +503 6 loss """bceaftersigmoid""" +503 6 regularizer """no""" +503 6 optimizer """adam""" +503 6 training_loop """owa""" +503 6 negative_sampler """basic""" +503 6 evaluator """rankbased""" +503 7 dataset """wn18rr""" +503 7 model """proje""" +503 7 loss """bceaftersigmoid""" +503 7 regularizer """no""" +503 7 optimizer """adam""" +503 7 training_loop """owa""" +503 7 negative_sampler """basic""" +503 7 evaluator """rankbased""" +503 8 dataset """wn18rr""" +503 8 model """proje""" +503 8 loss """bceaftersigmoid""" +503 8 regularizer """no""" +503 8 optimizer """adam""" +503 8 training_loop """owa""" +503 8 negative_sampler """basic""" +503 8 evaluator """rankbased""" +503 9 dataset """wn18rr""" +503 9 model """proje""" +503 9 loss """bceaftersigmoid""" +503 9 regularizer """no""" +503 9 optimizer """adam""" +503 9 training_loop """owa""" +503 9 negative_sampler """basic""" +503 9 evaluator """rankbased""" +503 10 dataset """wn18rr""" +503 10 model """proje""" +503 10 loss """bceaftersigmoid""" +503 10 regularizer """no""" +503 10 optimizer """adam""" +503 10 training_loop """owa""" +503 10 negative_sampler """basic""" +503 10 evaluator """rankbased""" +503 11 dataset """wn18rr""" +503 11 model """proje""" +503 11 loss """bceaftersigmoid""" +503 11 regularizer """no""" +503 11 optimizer """adam""" +503 11 training_loop """owa""" +503 11 negative_sampler """basic""" +503 11 evaluator """rankbased""" +503 12 dataset """wn18rr""" +503 12 model """proje""" +503 12 loss """bceaftersigmoid""" +503 12 regularizer """no""" +503 12 optimizer """adam""" +503 12 training_loop """owa""" +503 12 negative_sampler """basic""" +503 12 evaluator """rankbased""" +503 13 dataset """wn18rr""" +503 13 model """proje""" +503 13 loss """bceaftersigmoid""" +503 13 regularizer """no""" +503 13 optimizer """adam""" +503 13 training_loop """owa""" +503 13 negative_sampler """basic""" +503 13 evaluator """rankbased""" +503 14 dataset """wn18rr""" +503 14 model """proje""" +503 14 loss """bceaftersigmoid""" +503 14 regularizer """no""" +503 14 optimizer """adam""" +503 14 training_loop """owa""" +503 14 negative_sampler """basic""" +503 14 evaluator """rankbased""" +503 15 dataset """wn18rr""" +503 15 model """proje""" +503 15 loss """bceaftersigmoid""" +503 15 regularizer """no""" +503 15 optimizer """adam""" +503 15 training_loop """owa""" +503 15 negative_sampler """basic""" +503 15 evaluator """rankbased""" +503 16 dataset """wn18rr""" +503 16 model """proje""" +503 16 loss """bceaftersigmoid""" +503 16 regularizer """no""" +503 16 optimizer """adam""" +503 16 training_loop """owa""" +503 16 negative_sampler """basic""" +503 16 evaluator """rankbased""" +503 17 dataset """wn18rr""" +503 17 model """proje""" +503 17 loss """bceaftersigmoid""" +503 17 regularizer """no""" +503 17 optimizer """adam""" +503 17 training_loop """owa""" +503 17 negative_sampler """basic""" +503 17 evaluator """rankbased""" +503 18 dataset """wn18rr""" +503 18 model """proje""" +503 18 loss """bceaftersigmoid""" +503 18 regularizer """no""" +503 18 optimizer """adam""" +503 18 training_loop """owa""" +503 18 negative_sampler """basic""" +503 18 evaluator """rankbased""" +503 19 dataset """wn18rr""" +503 19 model """proje""" +503 19 loss """bceaftersigmoid""" +503 19 regularizer """no""" +503 19 optimizer """adam""" +503 19 training_loop """owa""" +503 19 negative_sampler """basic""" +503 19 evaluator """rankbased""" +503 20 dataset """wn18rr""" +503 20 model """proje""" +503 20 loss """bceaftersigmoid""" +503 20 regularizer """no""" +503 20 optimizer """adam""" +503 20 training_loop """owa""" +503 20 negative_sampler """basic""" +503 20 evaluator """rankbased""" +503 21 dataset """wn18rr""" +503 21 model """proje""" +503 21 loss """bceaftersigmoid""" +503 21 regularizer """no""" +503 21 optimizer """adam""" +503 21 training_loop """owa""" +503 21 negative_sampler """basic""" +503 21 evaluator """rankbased""" +503 22 dataset """wn18rr""" +503 22 model """proje""" +503 22 loss """bceaftersigmoid""" +503 22 regularizer """no""" +503 22 optimizer """adam""" +503 22 training_loop """owa""" +503 22 negative_sampler """basic""" +503 22 evaluator """rankbased""" +503 23 dataset """wn18rr""" +503 23 model """proje""" +503 23 loss """bceaftersigmoid""" +503 23 regularizer """no""" +503 23 optimizer """adam""" +503 23 training_loop """owa""" +503 23 negative_sampler """basic""" +503 23 evaluator """rankbased""" +503 24 dataset """wn18rr""" +503 24 model """proje""" +503 24 loss """bceaftersigmoid""" +503 24 regularizer """no""" +503 24 optimizer """adam""" +503 24 training_loop """owa""" +503 24 negative_sampler """basic""" +503 24 evaluator """rankbased""" +503 25 dataset """wn18rr""" +503 25 model """proje""" +503 25 loss """bceaftersigmoid""" +503 25 regularizer """no""" +503 25 optimizer """adam""" +503 25 training_loop """owa""" +503 25 negative_sampler """basic""" +503 25 evaluator """rankbased""" +503 26 dataset """wn18rr""" +503 26 model """proje""" +503 26 loss """bceaftersigmoid""" +503 26 regularizer """no""" +503 26 optimizer """adam""" +503 26 training_loop """owa""" +503 26 negative_sampler """basic""" +503 26 evaluator """rankbased""" +503 27 dataset """wn18rr""" +503 27 model """proje""" +503 27 loss """bceaftersigmoid""" +503 27 regularizer """no""" +503 27 optimizer """adam""" +503 27 training_loop """owa""" +503 27 negative_sampler """basic""" +503 27 evaluator """rankbased""" +503 28 dataset """wn18rr""" +503 28 model """proje""" +503 28 loss """bceaftersigmoid""" +503 28 regularizer """no""" +503 28 optimizer """adam""" +503 28 training_loop """owa""" +503 28 negative_sampler """basic""" +503 28 evaluator """rankbased""" +503 29 dataset """wn18rr""" +503 29 model """proje""" +503 29 loss """bceaftersigmoid""" +503 29 regularizer """no""" +503 29 optimizer """adam""" +503 29 training_loop """owa""" +503 29 negative_sampler """basic""" +503 29 evaluator """rankbased""" +503 30 dataset """wn18rr""" +503 30 model """proje""" +503 30 loss """bceaftersigmoid""" +503 30 regularizer """no""" +503 30 optimizer """adam""" +503 30 training_loop """owa""" +503 30 negative_sampler """basic""" +503 30 evaluator """rankbased""" +503 31 dataset """wn18rr""" +503 31 model """proje""" +503 31 loss """bceaftersigmoid""" +503 31 regularizer """no""" +503 31 optimizer """adam""" +503 31 training_loop """owa""" +503 31 negative_sampler """basic""" +503 31 evaluator """rankbased""" +503 32 dataset """wn18rr""" +503 32 model """proje""" +503 32 loss """bceaftersigmoid""" +503 32 regularizer """no""" +503 32 optimizer """adam""" +503 32 training_loop """owa""" +503 32 negative_sampler """basic""" +503 32 evaluator """rankbased""" +503 33 dataset """wn18rr""" +503 33 model """proje""" +503 33 loss """bceaftersigmoid""" +503 33 regularizer """no""" +503 33 optimizer """adam""" +503 33 training_loop """owa""" +503 33 negative_sampler """basic""" +503 33 evaluator """rankbased""" +503 34 dataset """wn18rr""" +503 34 model """proje""" +503 34 loss """bceaftersigmoid""" +503 34 regularizer """no""" +503 34 optimizer """adam""" +503 34 training_loop """owa""" +503 34 negative_sampler """basic""" +503 34 evaluator """rankbased""" +503 35 dataset """wn18rr""" +503 35 model """proje""" +503 35 loss """bceaftersigmoid""" +503 35 regularizer """no""" +503 35 optimizer """adam""" +503 35 training_loop """owa""" +503 35 negative_sampler """basic""" +503 35 evaluator """rankbased""" +503 36 dataset """wn18rr""" +503 36 model """proje""" +503 36 loss """bceaftersigmoid""" +503 36 regularizer """no""" +503 36 optimizer """adam""" +503 36 training_loop """owa""" +503 36 negative_sampler """basic""" +503 36 evaluator """rankbased""" +503 37 dataset """wn18rr""" +503 37 model """proje""" +503 37 loss """bceaftersigmoid""" +503 37 regularizer """no""" +503 37 optimizer """adam""" +503 37 training_loop """owa""" +503 37 negative_sampler """basic""" +503 37 evaluator """rankbased""" +503 38 dataset """wn18rr""" +503 38 model """proje""" +503 38 loss """bceaftersigmoid""" +503 38 regularizer """no""" +503 38 optimizer """adam""" +503 38 training_loop """owa""" +503 38 negative_sampler """basic""" +503 38 evaluator """rankbased""" +503 39 dataset """wn18rr""" +503 39 model """proje""" +503 39 loss """bceaftersigmoid""" +503 39 regularizer """no""" +503 39 optimizer """adam""" +503 39 training_loop """owa""" +503 39 negative_sampler """basic""" +503 39 evaluator """rankbased""" +503 40 dataset """wn18rr""" +503 40 model """proje""" +503 40 loss """bceaftersigmoid""" +503 40 regularizer """no""" +503 40 optimizer """adam""" +503 40 training_loop """owa""" +503 40 negative_sampler """basic""" +503 40 evaluator """rankbased""" +503 41 dataset """wn18rr""" +503 41 model """proje""" +503 41 loss """bceaftersigmoid""" +503 41 regularizer """no""" +503 41 optimizer """adam""" +503 41 training_loop """owa""" +503 41 negative_sampler """basic""" +503 41 evaluator """rankbased""" +503 42 dataset """wn18rr""" +503 42 model """proje""" +503 42 loss """bceaftersigmoid""" +503 42 regularizer """no""" +503 42 optimizer """adam""" +503 42 training_loop """owa""" +503 42 negative_sampler """basic""" +503 42 evaluator """rankbased""" +503 43 dataset """wn18rr""" +503 43 model """proje""" +503 43 loss """bceaftersigmoid""" +503 43 regularizer """no""" +503 43 optimizer """adam""" +503 43 training_loop """owa""" +503 43 negative_sampler """basic""" +503 43 evaluator """rankbased""" +503 44 dataset """wn18rr""" +503 44 model """proje""" +503 44 loss """bceaftersigmoid""" +503 44 regularizer """no""" +503 44 optimizer """adam""" +503 44 training_loop """owa""" +503 44 negative_sampler """basic""" +503 44 evaluator """rankbased""" +503 45 dataset """wn18rr""" +503 45 model """proje""" +503 45 loss """bceaftersigmoid""" +503 45 regularizer """no""" +503 45 optimizer """adam""" +503 45 training_loop """owa""" +503 45 negative_sampler """basic""" +503 45 evaluator """rankbased""" +503 46 dataset """wn18rr""" +503 46 model """proje""" +503 46 loss """bceaftersigmoid""" +503 46 regularizer """no""" +503 46 optimizer """adam""" +503 46 training_loop """owa""" +503 46 negative_sampler """basic""" +503 46 evaluator """rankbased""" +503 47 dataset """wn18rr""" +503 47 model """proje""" +503 47 loss """bceaftersigmoid""" +503 47 regularizer """no""" +503 47 optimizer """adam""" +503 47 training_loop """owa""" +503 47 negative_sampler """basic""" +503 47 evaluator """rankbased""" +503 48 dataset """wn18rr""" +503 48 model """proje""" +503 48 loss """bceaftersigmoid""" +503 48 regularizer """no""" +503 48 optimizer """adam""" +503 48 training_loop """owa""" +503 48 negative_sampler """basic""" +503 48 evaluator """rankbased""" +503 49 dataset """wn18rr""" +503 49 model """proje""" +503 49 loss """bceaftersigmoid""" +503 49 regularizer """no""" +503 49 optimizer """adam""" +503 49 training_loop """owa""" +503 49 negative_sampler """basic""" +503 49 evaluator """rankbased""" +503 50 dataset """wn18rr""" +503 50 model """proje""" +503 50 loss """bceaftersigmoid""" +503 50 regularizer """no""" +503 50 optimizer """adam""" +503 50 training_loop """owa""" +503 50 negative_sampler """basic""" +503 50 evaluator """rankbased""" +503 51 dataset """wn18rr""" +503 51 model """proje""" +503 51 loss """bceaftersigmoid""" +503 51 regularizer """no""" +503 51 optimizer """adam""" +503 51 training_loop """owa""" +503 51 negative_sampler """basic""" +503 51 evaluator """rankbased""" +503 52 dataset """wn18rr""" +503 52 model """proje""" +503 52 loss """bceaftersigmoid""" +503 52 regularizer """no""" +503 52 optimizer """adam""" +503 52 training_loop """owa""" +503 52 negative_sampler """basic""" +503 52 evaluator """rankbased""" +503 53 dataset """wn18rr""" +503 53 model """proje""" +503 53 loss """bceaftersigmoid""" +503 53 regularizer """no""" +503 53 optimizer """adam""" +503 53 training_loop """owa""" +503 53 negative_sampler """basic""" +503 53 evaluator """rankbased""" +503 54 dataset """wn18rr""" +503 54 model """proje""" +503 54 loss """bceaftersigmoid""" +503 54 regularizer """no""" +503 54 optimizer """adam""" +503 54 training_loop """owa""" +503 54 negative_sampler """basic""" +503 54 evaluator """rankbased""" +503 55 dataset """wn18rr""" +503 55 model """proje""" +503 55 loss """bceaftersigmoid""" +503 55 regularizer """no""" +503 55 optimizer """adam""" +503 55 training_loop """owa""" +503 55 negative_sampler """basic""" +503 55 evaluator """rankbased""" +503 56 dataset """wn18rr""" +503 56 model """proje""" +503 56 loss """bceaftersigmoid""" +503 56 regularizer """no""" +503 56 optimizer """adam""" +503 56 training_loop """owa""" +503 56 negative_sampler """basic""" +503 56 evaluator """rankbased""" +503 57 dataset """wn18rr""" +503 57 model """proje""" +503 57 loss """bceaftersigmoid""" +503 57 regularizer """no""" +503 57 optimizer """adam""" +503 57 training_loop """owa""" +503 57 negative_sampler """basic""" +503 57 evaluator """rankbased""" +503 58 dataset """wn18rr""" +503 58 model """proje""" +503 58 loss """bceaftersigmoid""" +503 58 regularizer """no""" +503 58 optimizer """adam""" +503 58 training_loop """owa""" +503 58 negative_sampler """basic""" +503 58 evaluator """rankbased""" +503 59 dataset """wn18rr""" +503 59 model """proje""" +503 59 loss """bceaftersigmoid""" +503 59 regularizer """no""" +503 59 optimizer """adam""" +503 59 training_loop """owa""" +503 59 negative_sampler """basic""" +503 59 evaluator """rankbased""" +503 60 dataset """wn18rr""" +503 60 model """proje""" +503 60 loss """bceaftersigmoid""" +503 60 regularizer """no""" +503 60 optimizer """adam""" +503 60 training_loop """owa""" +503 60 negative_sampler """basic""" +503 60 evaluator """rankbased""" +503 61 dataset """wn18rr""" +503 61 model """proje""" +503 61 loss """bceaftersigmoid""" +503 61 regularizer """no""" +503 61 optimizer """adam""" +503 61 training_loop """owa""" +503 61 negative_sampler """basic""" +503 61 evaluator """rankbased""" +503 62 dataset """wn18rr""" +503 62 model """proje""" +503 62 loss """bceaftersigmoid""" +503 62 regularizer """no""" +503 62 optimizer """adam""" +503 62 training_loop """owa""" +503 62 negative_sampler """basic""" +503 62 evaluator """rankbased""" +503 63 dataset """wn18rr""" +503 63 model """proje""" +503 63 loss """bceaftersigmoid""" +503 63 regularizer """no""" +503 63 optimizer """adam""" +503 63 training_loop """owa""" +503 63 negative_sampler """basic""" +503 63 evaluator """rankbased""" +503 64 dataset """wn18rr""" +503 64 model """proje""" +503 64 loss """bceaftersigmoid""" +503 64 regularizer """no""" +503 64 optimizer """adam""" +503 64 training_loop """owa""" +503 64 negative_sampler """basic""" +503 64 evaluator """rankbased""" +503 65 dataset """wn18rr""" +503 65 model """proje""" +503 65 loss """bceaftersigmoid""" +503 65 regularizer """no""" +503 65 optimizer """adam""" +503 65 training_loop """owa""" +503 65 negative_sampler """basic""" +503 65 evaluator """rankbased""" +503 66 dataset """wn18rr""" +503 66 model """proje""" +503 66 loss """bceaftersigmoid""" +503 66 regularizer """no""" +503 66 optimizer """adam""" +503 66 training_loop """owa""" +503 66 negative_sampler """basic""" +503 66 evaluator """rankbased""" +503 67 dataset """wn18rr""" +503 67 model """proje""" +503 67 loss """bceaftersigmoid""" +503 67 regularizer """no""" +503 67 optimizer """adam""" +503 67 training_loop """owa""" +503 67 negative_sampler """basic""" +503 67 evaluator """rankbased""" +503 68 dataset """wn18rr""" +503 68 model """proje""" +503 68 loss """bceaftersigmoid""" +503 68 regularizer """no""" +503 68 optimizer """adam""" +503 68 training_loop """owa""" +503 68 negative_sampler """basic""" +503 68 evaluator """rankbased""" +503 69 dataset """wn18rr""" +503 69 model """proje""" +503 69 loss """bceaftersigmoid""" +503 69 regularizer """no""" +503 69 optimizer """adam""" +503 69 training_loop """owa""" +503 69 negative_sampler """basic""" +503 69 evaluator """rankbased""" +503 70 dataset """wn18rr""" +503 70 model """proje""" +503 70 loss """bceaftersigmoid""" +503 70 regularizer """no""" +503 70 optimizer """adam""" +503 70 training_loop """owa""" +503 70 negative_sampler """basic""" +503 70 evaluator """rankbased""" +503 71 dataset """wn18rr""" +503 71 model """proje""" +503 71 loss """bceaftersigmoid""" +503 71 regularizer """no""" +503 71 optimizer """adam""" +503 71 training_loop """owa""" +503 71 negative_sampler """basic""" +503 71 evaluator """rankbased""" +503 72 dataset """wn18rr""" +503 72 model """proje""" +503 72 loss """bceaftersigmoid""" +503 72 regularizer """no""" +503 72 optimizer """adam""" +503 72 training_loop """owa""" +503 72 negative_sampler """basic""" +503 72 evaluator """rankbased""" +503 73 dataset """wn18rr""" +503 73 model """proje""" +503 73 loss """bceaftersigmoid""" +503 73 regularizer """no""" +503 73 optimizer """adam""" +503 73 training_loop """owa""" +503 73 negative_sampler """basic""" +503 73 evaluator """rankbased""" +503 74 dataset """wn18rr""" +503 74 model """proje""" +503 74 loss """bceaftersigmoid""" +503 74 regularizer """no""" +503 74 optimizer """adam""" +503 74 training_loop """owa""" +503 74 negative_sampler """basic""" +503 74 evaluator """rankbased""" +503 75 dataset """wn18rr""" +503 75 model """proje""" +503 75 loss """bceaftersigmoid""" +503 75 regularizer """no""" +503 75 optimizer """adam""" +503 75 training_loop """owa""" +503 75 negative_sampler """basic""" +503 75 evaluator """rankbased""" +503 76 dataset """wn18rr""" +503 76 model """proje""" +503 76 loss """bceaftersigmoid""" +503 76 regularizer """no""" +503 76 optimizer """adam""" +503 76 training_loop """owa""" +503 76 negative_sampler """basic""" +503 76 evaluator """rankbased""" +503 77 dataset """wn18rr""" +503 77 model """proje""" +503 77 loss """bceaftersigmoid""" +503 77 regularizer """no""" +503 77 optimizer """adam""" +503 77 training_loop """owa""" +503 77 negative_sampler """basic""" +503 77 evaluator """rankbased""" +503 78 dataset """wn18rr""" +503 78 model """proje""" +503 78 loss """bceaftersigmoid""" +503 78 regularizer """no""" +503 78 optimizer """adam""" +503 78 training_loop """owa""" +503 78 negative_sampler """basic""" +503 78 evaluator """rankbased""" +503 79 dataset """wn18rr""" +503 79 model """proje""" +503 79 loss """bceaftersigmoid""" +503 79 regularizer """no""" +503 79 optimizer """adam""" +503 79 training_loop """owa""" +503 79 negative_sampler """basic""" +503 79 evaluator """rankbased""" +503 80 dataset """wn18rr""" +503 80 model """proje""" +503 80 loss """bceaftersigmoid""" +503 80 regularizer """no""" +503 80 optimizer """adam""" +503 80 training_loop """owa""" +503 80 negative_sampler """basic""" +503 80 evaluator """rankbased""" +503 81 dataset """wn18rr""" +503 81 model """proje""" +503 81 loss """bceaftersigmoid""" +503 81 regularizer """no""" +503 81 optimizer """adam""" +503 81 training_loop """owa""" +503 81 negative_sampler """basic""" +503 81 evaluator """rankbased""" +503 82 dataset """wn18rr""" +503 82 model """proje""" +503 82 loss """bceaftersigmoid""" +503 82 regularizer """no""" +503 82 optimizer """adam""" +503 82 training_loop """owa""" +503 82 negative_sampler """basic""" +503 82 evaluator """rankbased""" +503 83 dataset """wn18rr""" +503 83 model """proje""" +503 83 loss """bceaftersigmoid""" +503 83 regularizer """no""" +503 83 optimizer """adam""" +503 83 training_loop """owa""" +503 83 negative_sampler """basic""" +503 83 evaluator """rankbased""" +503 84 dataset """wn18rr""" +503 84 model """proje""" +503 84 loss """bceaftersigmoid""" +503 84 regularizer """no""" +503 84 optimizer """adam""" +503 84 training_loop """owa""" +503 84 negative_sampler """basic""" +503 84 evaluator """rankbased""" +503 85 dataset """wn18rr""" +503 85 model """proje""" +503 85 loss """bceaftersigmoid""" +503 85 regularizer """no""" +503 85 optimizer """adam""" +503 85 training_loop """owa""" +503 85 negative_sampler """basic""" +503 85 evaluator """rankbased""" +503 86 dataset """wn18rr""" +503 86 model """proje""" +503 86 loss """bceaftersigmoid""" +503 86 regularizer """no""" +503 86 optimizer """adam""" +503 86 training_loop """owa""" +503 86 negative_sampler """basic""" +503 86 evaluator """rankbased""" +503 87 dataset """wn18rr""" +503 87 model """proje""" +503 87 loss """bceaftersigmoid""" +503 87 regularizer """no""" +503 87 optimizer """adam""" +503 87 training_loop """owa""" +503 87 negative_sampler """basic""" +503 87 evaluator """rankbased""" +503 88 dataset """wn18rr""" +503 88 model """proje""" +503 88 loss """bceaftersigmoid""" +503 88 regularizer """no""" +503 88 optimizer """adam""" +503 88 training_loop """owa""" +503 88 negative_sampler """basic""" +503 88 evaluator """rankbased""" +503 89 dataset """wn18rr""" +503 89 model """proje""" +503 89 loss """bceaftersigmoid""" +503 89 regularizer """no""" +503 89 optimizer """adam""" +503 89 training_loop """owa""" +503 89 negative_sampler """basic""" +503 89 evaluator """rankbased""" +503 90 dataset """wn18rr""" +503 90 model """proje""" +503 90 loss """bceaftersigmoid""" +503 90 regularizer """no""" +503 90 optimizer """adam""" +503 90 training_loop """owa""" +503 90 negative_sampler """basic""" +503 90 evaluator """rankbased""" +503 91 dataset """wn18rr""" +503 91 model """proje""" +503 91 loss """bceaftersigmoid""" +503 91 regularizer """no""" +503 91 optimizer """adam""" +503 91 training_loop """owa""" +503 91 negative_sampler """basic""" +503 91 evaluator """rankbased""" +503 92 dataset """wn18rr""" +503 92 model """proje""" +503 92 loss """bceaftersigmoid""" +503 92 regularizer """no""" +503 92 optimizer """adam""" +503 92 training_loop """owa""" +503 92 negative_sampler """basic""" +503 92 evaluator """rankbased""" +503 93 dataset """wn18rr""" +503 93 model """proje""" +503 93 loss """bceaftersigmoid""" +503 93 regularizer """no""" +503 93 optimizer """adam""" +503 93 training_loop """owa""" +503 93 negative_sampler """basic""" +503 93 evaluator """rankbased""" +503 94 dataset """wn18rr""" +503 94 model """proje""" +503 94 loss """bceaftersigmoid""" +503 94 regularizer """no""" +503 94 optimizer """adam""" +503 94 training_loop """owa""" +503 94 negative_sampler """basic""" +503 94 evaluator """rankbased""" +503 95 dataset """wn18rr""" +503 95 model """proje""" +503 95 loss """bceaftersigmoid""" +503 95 regularizer """no""" +503 95 optimizer """adam""" +503 95 training_loop """owa""" +503 95 negative_sampler """basic""" +503 95 evaluator """rankbased""" +503 96 dataset """wn18rr""" +503 96 model """proje""" +503 96 loss """bceaftersigmoid""" +503 96 regularizer """no""" +503 96 optimizer """adam""" +503 96 training_loop """owa""" +503 96 negative_sampler """basic""" +503 96 evaluator """rankbased""" +503 97 dataset """wn18rr""" +503 97 model """proje""" +503 97 loss """bceaftersigmoid""" +503 97 regularizer """no""" +503 97 optimizer """adam""" +503 97 training_loop """owa""" +503 97 negative_sampler """basic""" +503 97 evaluator """rankbased""" +503 98 dataset """wn18rr""" +503 98 model """proje""" +503 98 loss """bceaftersigmoid""" +503 98 regularizer """no""" +503 98 optimizer """adam""" +503 98 training_loop """owa""" +503 98 negative_sampler """basic""" +503 98 evaluator """rankbased""" +503 99 dataset """wn18rr""" +503 99 model """proje""" +503 99 loss """bceaftersigmoid""" +503 99 regularizer """no""" +503 99 optimizer """adam""" +503 99 training_loop """owa""" +503 99 negative_sampler """basic""" +503 99 evaluator """rankbased""" +503 100 dataset """wn18rr""" +503 100 model """proje""" +503 100 loss """bceaftersigmoid""" +503 100 regularizer """no""" +503 100 optimizer """adam""" +503 100 training_loop """owa""" +503 100 negative_sampler """basic""" +503 100 evaluator """rankbased""" +504 1 model.embedding_dim 2.0 +504 1 optimizer.lr 0.018918114040607482 +504 1 negative_sampler.num_negs_per_pos 65.0 +504 1 training.batch_size 2.0 +504 2 model.embedding_dim 0.0 +504 2 optimizer.lr 0.013011432553786381 +504 2 negative_sampler.num_negs_per_pos 54.0 +504 2 training.batch_size 1.0 +504 3 model.embedding_dim 0.0 +504 3 optimizer.lr 0.004292892964168103 +504 3 negative_sampler.num_negs_per_pos 22.0 +504 3 training.batch_size 2.0 +504 4 model.embedding_dim 2.0 +504 4 optimizer.lr 0.0028737264480608294 +504 4 negative_sampler.num_negs_per_pos 72.0 +504 4 training.batch_size 0.0 +504 5 model.embedding_dim 1.0 +504 5 optimizer.lr 0.0037359765426450663 +504 5 negative_sampler.num_negs_per_pos 54.0 +504 5 training.batch_size 0.0 +504 6 model.embedding_dim 0.0 +504 6 optimizer.lr 0.012358256744819688 +504 6 negative_sampler.num_negs_per_pos 76.0 +504 6 training.batch_size 0.0 +504 7 model.embedding_dim 2.0 +504 7 optimizer.lr 0.020607622582970872 +504 7 negative_sampler.num_negs_per_pos 59.0 +504 7 training.batch_size 1.0 +504 8 model.embedding_dim 2.0 +504 8 optimizer.lr 0.025469736872097786 +504 8 negative_sampler.num_negs_per_pos 69.0 +504 8 training.batch_size 2.0 +504 9 model.embedding_dim 1.0 +504 9 optimizer.lr 0.006358144965927867 +504 9 negative_sampler.num_negs_per_pos 3.0 +504 9 training.batch_size 1.0 +504 10 model.embedding_dim 1.0 +504 10 optimizer.lr 0.007216377040759872 +504 10 negative_sampler.num_negs_per_pos 92.0 +504 10 training.batch_size 2.0 +504 11 model.embedding_dim 0.0 +504 11 optimizer.lr 0.00975826527068137 +504 11 negative_sampler.num_negs_per_pos 97.0 +504 11 training.batch_size 2.0 +504 12 model.embedding_dim 2.0 +504 12 optimizer.lr 0.0326717236647391 +504 12 negative_sampler.num_negs_per_pos 82.0 +504 12 training.batch_size 0.0 +504 13 model.embedding_dim 0.0 +504 13 optimizer.lr 0.015314136933826071 +504 13 negative_sampler.num_negs_per_pos 73.0 +504 13 training.batch_size 2.0 +504 14 model.embedding_dim 0.0 +504 14 optimizer.lr 0.008453342432040442 +504 14 negative_sampler.num_negs_per_pos 18.0 +504 14 training.batch_size 1.0 +504 15 model.embedding_dim 0.0 +504 15 optimizer.lr 0.0032681773675133376 +504 15 negative_sampler.num_negs_per_pos 15.0 +504 15 training.batch_size 1.0 +504 16 model.embedding_dim 0.0 +504 16 optimizer.lr 0.0014960972749911076 +504 16 negative_sampler.num_negs_per_pos 21.0 +504 16 training.batch_size 0.0 +504 17 model.embedding_dim 1.0 +504 17 optimizer.lr 0.001649291028083365 +504 17 negative_sampler.num_negs_per_pos 3.0 +504 17 training.batch_size 0.0 +504 18 model.embedding_dim 0.0 +504 18 optimizer.lr 0.005815966221826761 +504 18 negative_sampler.num_negs_per_pos 11.0 +504 18 training.batch_size 2.0 +504 19 model.embedding_dim 0.0 +504 19 optimizer.lr 0.01408001393485615 +504 19 negative_sampler.num_negs_per_pos 26.0 +504 19 training.batch_size 1.0 +504 20 model.embedding_dim 1.0 +504 20 optimizer.lr 0.019425135511705778 +504 20 negative_sampler.num_negs_per_pos 67.0 +504 20 training.batch_size 2.0 +504 21 model.embedding_dim 1.0 +504 21 optimizer.lr 0.011265889817108798 +504 21 negative_sampler.num_negs_per_pos 30.0 +504 21 training.batch_size 0.0 +504 22 model.embedding_dim 1.0 +504 22 optimizer.lr 0.0035058884812943854 +504 22 negative_sampler.num_negs_per_pos 69.0 +504 22 training.batch_size 2.0 +504 23 model.embedding_dim 2.0 +504 23 optimizer.lr 0.09786468617038578 +504 23 negative_sampler.num_negs_per_pos 37.0 +504 23 training.batch_size 1.0 +504 24 model.embedding_dim 0.0 +504 24 optimizer.lr 0.0020383663891653052 +504 24 negative_sampler.num_negs_per_pos 94.0 +504 24 training.batch_size 2.0 +504 25 model.embedding_dim 0.0 +504 25 optimizer.lr 0.025487812284193653 +504 25 negative_sampler.num_negs_per_pos 73.0 +504 25 training.batch_size 1.0 +504 26 model.embedding_dim 0.0 +504 26 optimizer.lr 0.00395205417014214 +504 26 negative_sampler.num_negs_per_pos 62.0 +504 26 training.batch_size 2.0 +504 27 model.embedding_dim 0.0 +504 27 optimizer.lr 0.002935007382595698 +504 27 negative_sampler.num_negs_per_pos 71.0 +504 27 training.batch_size 1.0 +504 28 model.embedding_dim 0.0 +504 28 optimizer.lr 0.057376116647251806 +504 28 negative_sampler.num_negs_per_pos 86.0 +504 28 training.batch_size 1.0 +504 29 model.embedding_dim 1.0 +504 29 optimizer.lr 0.007933820720470325 +504 29 negative_sampler.num_negs_per_pos 52.0 +504 29 training.batch_size 0.0 +504 30 model.embedding_dim 0.0 +504 30 optimizer.lr 0.005346275113313267 +504 30 negative_sampler.num_negs_per_pos 42.0 +504 30 training.batch_size 0.0 +504 31 model.embedding_dim 2.0 +504 31 optimizer.lr 0.04380996385546998 +504 31 negative_sampler.num_negs_per_pos 24.0 +504 31 training.batch_size 0.0 +504 32 model.embedding_dim 2.0 +504 32 optimizer.lr 0.019270066493873077 +504 32 negative_sampler.num_negs_per_pos 37.0 +504 32 training.batch_size 1.0 +504 33 model.embedding_dim 0.0 +504 33 optimizer.lr 0.0042938058699766 +504 33 negative_sampler.num_negs_per_pos 30.0 +504 33 training.batch_size 1.0 +504 34 model.embedding_dim 1.0 +504 34 optimizer.lr 0.002505685822839045 +504 34 negative_sampler.num_negs_per_pos 76.0 +504 34 training.batch_size 2.0 +504 35 model.embedding_dim 1.0 +504 35 optimizer.lr 0.001355258922534618 +504 35 negative_sampler.num_negs_per_pos 41.0 +504 35 training.batch_size 2.0 +504 36 model.embedding_dim 0.0 +504 36 optimizer.lr 0.0025265778340793593 +504 36 negative_sampler.num_negs_per_pos 46.0 +504 36 training.batch_size 2.0 +504 37 model.embedding_dim 2.0 +504 37 optimizer.lr 0.0019777159583941875 +504 37 negative_sampler.num_negs_per_pos 61.0 +504 37 training.batch_size 0.0 +504 38 model.embedding_dim 0.0 +504 38 optimizer.lr 0.021055351656530552 +504 38 negative_sampler.num_negs_per_pos 72.0 +504 38 training.batch_size 2.0 +504 39 model.embedding_dim 1.0 +504 39 optimizer.lr 0.0038821407448304274 +504 39 negative_sampler.num_negs_per_pos 67.0 +504 39 training.batch_size 2.0 +504 40 model.embedding_dim 0.0 +504 40 optimizer.lr 0.0115023564728091 +504 40 negative_sampler.num_negs_per_pos 11.0 +504 40 training.batch_size 1.0 +504 41 model.embedding_dim 2.0 +504 41 optimizer.lr 0.050780789447191096 +504 41 negative_sampler.num_negs_per_pos 38.0 +504 41 training.batch_size 0.0 +504 42 model.embedding_dim 1.0 +504 42 optimizer.lr 0.0013179045686821601 +504 42 negative_sampler.num_negs_per_pos 89.0 +504 42 training.batch_size 2.0 +504 43 model.embedding_dim 2.0 +504 43 optimizer.lr 0.013091193826623111 +504 43 negative_sampler.num_negs_per_pos 70.0 +504 43 training.batch_size 1.0 +504 44 model.embedding_dim 2.0 +504 44 optimizer.lr 0.0013399484811980468 +504 44 negative_sampler.num_negs_per_pos 52.0 +504 44 training.batch_size 1.0 +504 45 model.embedding_dim 1.0 +504 45 optimizer.lr 0.08948254120612185 +504 45 negative_sampler.num_negs_per_pos 91.0 +504 45 training.batch_size 2.0 +504 46 model.embedding_dim 2.0 +504 46 optimizer.lr 0.0187956418131583 +504 46 negative_sampler.num_negs_per_pos 37.0 +504 46 training.batch_size 0.0 +504 47 model.embedding_dim 0.0 +504 47 optimizer.lr 0.008343813074902291 +504 47 negative_sampler.num_negs_per_pos 52.0 +504 47 training.batch_size 2.0 +504 48 model.embedding_dim 1.0 +504 48 optimizer.lr 0.002583225772284203 +504 48 negative_sampler.num_negs_per_pos 91.0 +504 48 training.batch_size 1.0 +504 49 model.embedding_dim 2.0 +504 49 optimizer.lr 0.001356806339230425 +504 49 negative_sampler.num_negs_per_pos 63.0 +504 49 training.batch_size 0.0 +504 50 model.embedding_dim 2.0 +504 50 optimizer.lr 0.07485390316169963 +504 50 negative_sampler.num_negs_per_pos 90.0 +504 50 training.batch_size 0.0 +504 51 model.embedding_dim 1.0 +504 51 optimizer.lr 0.005009326133808019 +504 51 negative_sampler.num_negs_per_pos 99.0 +504 51 training.batch_size 1.0 +504 52 model.embedding_dim 1.0 +504 52 optimizer.lr 0.0015801476725995766 +504 52 negative_sampler.num_negs_per_pos 90.0 +504 52 training.batch_size 1.0 +504 53 model.embedding_dim 0.0 +504 53 optimizer.lr 0.0010216528216005721 +504 53 negative_sampler.num_negs_per_pos 84.0 +504 53 training.batch_size 1.0 +504 54 model.embedding_dim 2.0 +504 54 optimizer.lr 0.005340494246744546 +504 54 negative_sampler.num_negs_per_pos 56.0 +504 54 training.batch_size 2.0 +504 55 model.embedding_dim 0.0 +504 55 optimizer.lr 0.0021810986254968494 +504 55 negative_sampler.num_negs_per_pos 45.0 +504 55 training.batch_size 0.0 +504 56 model.embedding_dim 1.0 +504 56 optimizer.lr 0.007848950789791835 +504 56 negative_sampler.num_negs_per_pos 57.0 +504 56 training.batch_size 2.0 +504 57 model.embedding_dim 1.0 +504 57 optimizer.lr 0.09739045246483581 +504 57 negative_sampler.num_negs_per_pos 30.0 +504 57 training.batch_size 0.0 +504 58 model.embedding_dim 1.0 +504 58 optimizer.lr 0.0039566714887246295 +504 58 negative_sampler.num_negs_per_pos 98.0 +504 58 training.batch_size 0.0 +504 59 model.embedding_dim 0.0 +504 59 optimizer.lr 0.09589120006228485 +504 59 negative_sampler.num_negs_per_pos 24.0 +504 59 training.batch_size 0.0 +504 60 model.embedding_dim 1.0 +504 60 optimizer.lr 0.005712301462923786 +504 60 negative_sampler.num_negs_per_pos 29.0 +504 60 training.batch_size 1.0 +504 61 model.embedding_dim 1.0 +504 61 optimizer.lr 0.0062976052769304946 +504 61 negative_sampler.num_negs_per_pos 13.0 +504 61 training.batch_size 1.0 +504 62 model.embedding_dim 1.0 +504 62 optimizer.lr 0.08250233156006997 +504 62 negative_sampler.num_negs_per_pos 97.0 +504 62 training.batch_size 0.0 +504 63 model.embedding_dim 2.0 +504 63 optimizer.lr 0.031383544715526984 +504 63 negative_sampler.num_negs_per_pos 25.0 +504 63 training.batch_size 0.0 +504 64 model.embedding_dim 0.0 +504 64 optimizer.lr 0.026066304435207632 +504 64 negative_sampler.num_negs_per_pos 73.0 +504 64 training.batch_size 2.0 +504 65 model.embedding_dim 2.0 +504 65 optimizer.lr 0.042675367181186565 +504 65 negative_sampler.num_negs_per_pos 15.0 +504 65 training.batch_size 1.0 +504 66 model.embedding_dim 2.0 +504 66 optimizer.lr 0.07288545935586926 +504 66 negative_sampler.num_negs_per_pos 9.0 +504 66 training.batch_size 2.0 +504 67 model.embedding_dim 2.0 +504 67 optimizer.lr 0.0036340602333980163 +504 67 negative_sampler.num_negs_per_pos 52.0 +504 67 training.batch_size 1.0 +504 68 model.embedding_dim 2.0 +504 68 optimizer.lr 0.015917495197648773 +504 68 negative_sampler.num_negs_per_pos 72.0 +504 68 training.batch_size 1.0 +504 69 model.embedding_dim 2.0 +504 69 optimizer.lr 0.004427714131126168 +504 69 negative_sampler.num_negs_per_pos 94.0 +504 69 training.batch_size 2.0 +504 70 model.embedding_dim 1.0 +504 70 optimizer.lr 0.008409190859855331 +504 70 negative_sampler.num_negs_per_pos 83.0 +504 70 training.batch_size 0.0 +504 71 model.embedding_dim 2.0 +504 71 optimizer.lr 0.0012028801711819047 +504 71 negative_sampler.num_negs_per_pos 27.0 +504 71 training.batch_size 0.0 +504 72 model.embedding_dim 2.0 +504 72 optimizer.lr 0.005224285697189091 +504 72 negative_sampler.num_negs_per_pos 22.0 +504 72 training.batch_size 2.0 +504 73 model.embedding_dim 0.0 +504 73 optimizer.lr 0.003945652092680945 +504 73 negative_sampler.num_negs_per_pos 66.0 +504 73 training.batch_size 1.0 +504 74 model.embedding_dim 0.0 +504 74 optimizer.lr 0.022227490857244403 +504 74 negative_sampler.num_negs_per_pos 73.0 +504 74 training.batch_size 2.0 +504 75 model.embedding_dim 1.0 +504 75 optimizer.lr 0.07309060088432233 +504 75 negative_sampler.num_negs_per_pos 67.0 +504 75 training.batch_size 1.0 +504 76 model.embedding_dim 2.0 +504 76 optimizer.lr 0.00624885524620145 +504 76 negative_sampler.num_negs_per_pos 95.0 +504 76 training.batch_size 0.0 +504 77 model.embedding_dim 1.0 +504 77 optimizer.lr 0.012981564274251017 +504 77 negative_sampler.num_negs_per_pos 9.0 +504 77 training.batch_size 2.0 +504 78 model.embedding_dim 0.0 +504 78 optimizer.lr 0.005562384659484608 +504 78 negative_sampler.num_negs_per_pos 27.0 +504 78 training.batch_size 2.0 +504 79 model.embedding_dim 0.0 +504 79 optimizer.lr 0.005227356555841166 +504 79 negative_sampler.num_negs_per_pos 77.0 +504 79 training.batch_size 0.0 +504 80 model.embedding_dim 2.0 +504 80 optimizer.lr 0.001634067957126276 +504 80 negative_sampler.num_negs_per_pos 57.0 +504 80 training.batch_size 2.0 +504 81 model.embedding_dim 1.0 +504 81 optimizer.lr 0.023141507535945367 +504 81 negative_sampler.num_negs_per_pos 66.0 +504 81 training.batch_size 0.0 +504 82 model.embedding_dim 0.0 +504 82 optimizer.lr 0.05016149126364376 +504 82 negative_sampler.num_negs_per_pos 27.0 +504 82 training.batch_size 2.0 +504 83 model.embedding_dim 0.0 +504 83 optimizer.lr 0.012936806871783885 +504 83 negative_sampler.num_negs_per_pos 23.0 +504 83 training.batch_size 0.0 +504 84 model.embedding_dim 2.0 +504 84 optimizer.lr 0.003910896636868264 +504 84 negative_sampler.num_negs_per_pos 55.0 +504 84 training.batch_size 2.0 +504 85 model.embedding_dim 0.0 +504 85 optimizer.lr 0.001949536956573617 +504 85 negative_sampler.num_negs_per_pos 80.0 +504 85 training.batch_size 2.0 +504 86 model.embedding_dim 2.0 +504 86 optimizer.lr 0.0714890212956895 +504 86 negative_sampler.num_negs_per_pos 14.0 +504 86 training.batch_size 1.0 +504 87 model.embedding_dim 1.0 +504 87 optimizer.lr 0.021203905796160133 +504 87 negative_sampler.num_negs_per_pos 35.0 +504 87 training.batch_size 0.0 +504 88 model.embedding_dim 0.0 +504 88 optimizer.lr 0.058456938462852714 +504 88 negative_sampler.num_negs_per_pos 49.0 +504 88 training.batch_size 0.0 +504 89 model.embedding_dim 0.0 +504 89 optimizer.lr 0.002670934139275975 +504 89 negative_sampler.num_negs_per_pos 84.0 +504 89 training.batch_size 1.0 +504 90 model.embedding_dim 2.0 +504 90 optimizer.lr 0.022771984916590715 +504 90 negative_sampler.num_negs_per_pos 86.0 +504 90 training.batch_size 0.0 +504 91 model.embedding_dim 0.0 +504 91 optimizer.lr 0.017105424211615507 +504 91 negative_sampler.num_negs_per_pos 35.0 +504 91 training.batch_size 1.0 +504 92 model.embedding_dim 2.0 +504 92 optimizer.lr 0.0015922640086093868 +504 92 negative_sampler.num_negs_per_pos 49.0 +504 92 training.batch_size 2.0 +504 93 model.embedding_dim 2.0 +504 93 optimizer.lr 0.003696024626077287 +504 93 negative_sampler.num_negs_per_pos 69.0 +504 93 training.batch_size 2.0 +504 94 model.embedding_dim 0.0 +504 94 optimizer.lr 0.014363768873767266 +504 94 negative_sampler.num_negs_per_pos 93.0 +504 94 training.batch_size 1.0 +504 95 model.embedding_dim 0.0 +504 95 optimizer.lr 0.01501640551763929 +504 95 negative_sampler.num_negs_per_pos 44.0 +504 95 training.batch_size 2.0 +504 96 model.embedding_dim 1.0 +504 96 optimizer.lr 0.0018237579871135969 +504 96 negative_sampler.num_negs_per_pos 88.0 +504 96 training.batch_size 1.0 +504 97 model.embedding_dim 0.0 +504 97 optimizer.lr 0.016534040371442484 +504 97 negative_sampler.num_negs_per_pos 80.0 +504 97 training.batch_size 0.0 +504 98 model.embedding_dim 0.0 +504 98 optimizer.lr 0.01437761498774279 +504 98 negative_sampler.num_negs_per_pos 37.0 +504 98 training.batch_size 0.0 +504 99 model.embedding_dim 1.0 +504 99 optimizer.lr 0.01225514195517183 +504 99 negative_sampler.num_negs_per_pos 58.0 +504 99 training.batch_size 1.0 +504 100 model.embedding_dim 2.0 +504 100 optimizer.lr 0.013217230861497273 +504 100 negative_sampler.num_negs_per_pos 98.0 +504 100 training.batch_size 0.0 +504 1 dataset """wn18rr""" +504 1 model """proje""" +504 1 loss """softplus""" +504 1 regularizer """no""" +504 1 optimizer """adam""" +504 1 training_loop """owa""" +504 1 negative_sampler """basic""" +504 1 evaluator """rankbased""" +504 2 dataset """wn18rr""" +504 2 model """proje""" +504 2 loss """softplus""" +504 2 regularizer """no""" +504 2 optimizer """adam""" +504 2 training_loop """owa""" +504 2 negative_sampler """basic""" +504 2 evaluator """rankbased""" +504 3 dataset """wn18rr""" +504 3 model """proje""" +504 3 loss """softplus""" +504 3 regularizer """no""" +504 3 optimizer """adam""" +504 3 training_loop """owa""" +504 3 negative_sampler """basic""" +504 3 evaluator """rankbased""" +504 4 dataset """wn18rr""" +504 4 model """proje""" +504 4 loss """softplus""" +504 4 regularizer """no""" +504 4 optimizer """adam""" +504 4 training_loop """owa""" +504 4 negative_sampler """basic""" +504 4 evaluator """rankbased""" +504 5 dataset """wn18rr""" +504 5 model """proje""" +504 5 loss """softplus""" +504 5 regularizer """no""" +504 5 optimizer """adam""" +504 5 training_loop """owa""" +504 5 negative_sampler """basic""" +504 5 evaluator """rankbased""" +504 6 dataset """wn18rr""" +504 6 model """proje""" +504 6 loss """softplus""" +504 6 regularizer """no""" +504 6 optimizer """adam""" +504 6 training_loop """owa""" +504 6 negative_sampler """basic""" +504 6 evaluator """rankbased""" +504 7 dataset """wn18rr""" +504 7 model """proje""" +504 7 loss """softplus""" +504 7 regularizer """no""" +504 7 optimizer """adam""" +504 7 training_loop """owa""" +504 7 negative_sampler """basic""" +504 7 evaluator """rankbased""" +504 8 dataset """wn18rr""" +504 8 model """proje""" +504 8 loss """softplus""" +504 8 regularizer """no""" +504 8 optimizer """adam""" +504 8 training_loop """owa""" +504 8 negative_sampler """basic""" +504 8 evaluator """rankbased""" +504 9 dataset """wn18rr""" +504 9 model """proje""" +504 9 loss """softplus""" +504 9 regularizer """no""" +504 9 optimizer """adam""" +504 9 training_loop """owa""" +504 9 negative_sampler """basic""" +504 9 evaluator """rankbased""" +504 10 dataset """wn18rr""" +504 10 model """proje""" +504 10 loss """softplus""" +504 10 regularizer """no""" +504 10 optimizer """adam""" +504 10 training_loop """owa""" +504 10 negative_sampler """basic""" +504 10 evaluator """rankbased""" +504 11 dataset """wn18rr""" +504 11 model """proje""" +504 11 loss """softplus""" +504 11 regularizer """no""" +504 11 optimizer """adam""" +504 11 training_loop """owa""" +504 11 negative_sampler """basic""" +504 11 evaluator """rankbased""" +504 12 dataset """wn18rr""" +504 12 model """proje""" +504 12 loss """softplus""" +504 12 regularizer """no""" +504 12 optimizer """adam""" +504 12 training_loop """owa""" +504 12 negative_sampler """basic""" +504 12 evaluator """rankbased""" +504 13 dataset """wn18rr""" +504 13 model """proje""" +504 13 loss """softplus""" +504 13 regularizer """no""" +504 13 optimizer """adam""" +504 13 training_loop """owa""" +504 13 negative_sampler """basic""" +504 13 evaluator """rankbased""" +504 14 dataset """wn18rr""" +504 14 model """proje""" +504 14 loss """softplus""" +504 14 regularizer """no""" +504 14 optimizer """adam""" +504 14 training_loop """owa""" +504 14 negative_sampler """basic""" +504 14 evaluator """rankbased""" +504 15 dataset """wn18rr""" +504 15 model """proje""" +504 15 loss """softplus""" +504 15 regularizer """no""" +504 15 optimizer """adam""" +504 15 training_loop """owa""" +504 15 negative_sampler """basic""" +504 15 evaluator """rankbased""" +504 16 dataset """wn18rr""" +504 16 model """proje""" +504 16 loss """softplus""" +504 16 regularizer """no""" +504 16 optimizer """adam""" +504 16 training_loop """owa""" +504 16 negative_sampler """basic""" +504 16 evaluator """rankbased""" +504 17 dataset """wn18rr""" +504 17 model """proje""" +504 17 loss """softplus""" +504 17 regularizer """no""" +504 17 optimizer """adam""" +504 17 training_loop """owa""" +504 17 negative_sampler """basic""" +504 17 evaluator """rankbased""" +504 18 dataset """wn18rr""" +504 18 model """proje""" +504 18 loss """softplus""" +504 18 regularizer """no""" +504 18 optimizer """adam""" +504 18 training_loop """owa""" +504 18 negative_sampler """basic""" +504 18 evaluator """rankbased""" +504 19 dataset """wn18rr""" +504 19 model """proje""" +504 19 loss """softplus""" +504 19 regularizer """no""" +504 19 optimizer """adam""" +504 19 training_loop """owa""" +504 19 negative_sampler """basic""" +504 19 evaluator """rankbased""" +504 20 dataset """wn18rr""" +504 20 model """proje""" +504 20 loss """softplus""" +504 20 regularizer """no""" +504 20 optimizer """adam""" +504 20 training_loop """owa""" +504 20 negative_sampler """basic""" +504 20 evaluator """rankbased""" +504 21 dataset """wn18rr""" +504 21 model """proje""" +504 21 loss """softplus""" +504 21 regularizer """no""" +504 21 optimizer """adam""" +504 21 training_loop """owa""" +504 21 negative_sampler """basic""" +504 21 evaluator """rankbased""" +504 22 dataset """wn18rr""" +504 22 model """proje""" +504 22 loss """softplus""" +504 22 regularizer """no""" +504 22 optimizer """adam""" +504 22 training_loop """owa""" +504 22 negative_sampler """basic""" +504 22 evaluator """rankbased""" +504 23 dataset """wn18rr""" +504 23 model """proje""" +504 23 loss """softplus""" +504 23 regularizer """no""" +504 23 optimizer """adam""" +504 23 training_loop """owa""" +504 23 negative_sampler """basic""" +504 23 evaluator """rankbased""" +504 24 dataset """wn18rr""" +504 24 model """proje""" +504 24 loss """softplus""" +504 24 regularizer """no""" +504 24 optimizer """adam""" +504 24 training_loop """owa""" +504 24 negative_sampler """basic""" +504 24 evaluator """rankbased""" +504 25 dataset """wn18rr""" +504 25 model """proje""" +504 25 loss """softplus""" +504 25 regularizer """no""" +504 25 optimizer """adam""" +504 25 training_loop """owa""" +504 25 negative_sampler """basic""" +504 25 evaluator """rankbased""" +504 26 dataset """wn18rr""" +504 26 model """proje""" +504 26 loss """softplus""" +504 26 regularizer """no""" +504 26 optimizer """adam""" +504 26 training_loop """owa""" +504 26 negative_sampler """basic""" +504 26 evaluator """rankbased""" +504 27 dataset """wn18rr""" +504 27 model """proje""" +504 27 loss """softplus""" +504 27 regularizer """no""" +504 27 optimizer """adam""" +504 27 training_loop """owa""" +504 27 negative_sampler """basic""" +504 27 evaluator """rankbased""" +504 28 dataset """wn18rr""" +504 28 model """proje""" +504 28 loss """softplus""" +504 28 regularizer """no""" +504 28 optimizer """adam""" +504 28 training_loop """owa""" +504 28 negative_sampler """basic""" +504 28 evaluator """rankbased""" +504 29 dataset """wn18rr""" +504 29 model """proje""" +504 29 loss """softplus""" +504 29 regularizer """no""" +504 29 optimizer """adam""" +504 29 training_loop """owa""" +504 29 negative_sampler """basic""" +504 29 evaluator """rankbased""" +504 30 dataset """wn18rr""" +504 30 model """proje""" +504 30 loss """softplus""" +504 30 regularizer """no""" +504 30 optimizer """adam""" +504 30 training_loop """owa""" +504 30 negative_sampler """basic""" +504 30 evaluator """rankbased""" +504 31 dataset """wn18rr""" +504 31 model """proje""" +504 31 loss """softplus""" +504 31 regularizer """no""" +504 31 optimizer """adam""" +504 31 training_loop """owa""" +504 31 negative_sampler """basic""" +504 31 evaluator """rankbased""" +504 32 dataset """wn18rr""" +504 32 model """proje""" +504 32 loss """softplus""" +504 32 regularizer """no""" +504 32 optimizer """adam""" +504 32 training_loop """owa""" +504 32 negative_sampler """basic""" +504 32 evaluator """rankbased""" +504 33 dataset """wn18rr""" +504 33 model """proje""" +504 33 loss """softplus""" +504 33 regularizer """no""" +504 33 optimizer """adam""" +504 33 training_loop """owa""" +504 33 negative_sampler """basic""" +504 33 evaluator """rankbased""" +504 34 dataset """wn18rr""" +504 34 model """proje""" +504 34 loss """softplus""" +504 34 regularizer """no""" +504 34 optimizer """adam""" +504 34 training_loop """owa""" +504 34 negative_sampler """basic""" +504 34 evaluator """rankbased""" +504 35 dataset """wn18rr""" +504 35 model """proje""" +504 35 loss """softplus""" +504 35 regularizer """no""" +504 35 optimizer """adam""" +504 35 training_loop """owa""" +504 35 negative_sampler """basic""" +504 35 evaluator """rankbased""" +504 36 dataset """wn18rr""" +504 36 model """proje""" +504 36 loss """softplus""" +504 36 regularizer """no""" +504 36 optimizer """adam""" +504 36 training_loop """owa""" +504 36 negative_sampler """basic""" +504 36 evaluator """rankbased""" +504 37 dataset """wn18rr""" +504 37 model """proje""" +504 37 loss """softplus""" +504 37 regularizer """no""" +504 37 optimizer """adam""" +504 37 training_loop """owa""" +504 37 negative_sampler """basic""" +504 37 evaluator """rankbased""" +504 38 dataset """wn18rr""" +504 38 model """proje""" +504 38 loss """softplus""" +504 38 regularizer """no""" +504 38 optimizer """adam""" +504 38 training_loop """owa""" +504 38 negative_sampler """basic""" +504 38 evaluator """rankbased""" +504 39 dataset """wn18rr""" +504 39 model """proje""" +504 39 loss """softplus""" +504 39 regularizer """no""" +504 39 optimizer """adam""" +504 39 training_loop """owa""" +504 39 negative_sampler """basic""" +504 39 evaluator """rankbased""" +504 40 dataset """wn18rr""" +504 40 model """proje""" +504 40 loss """softplus""" +504 40 regularizer """no""" +504 40 optimizer """adam""" +504 40 training_loop """owa""" +504 40 negative_sampler """basic""" +504 40 evaluator """rankbased""" +504 41 dataset """wn18rr""" +504 41 model """proje""" +504 41 loss """softplus""" +504 41 regularizer """no""" +504 41 optimizer """adam""" +504 41 training_loop """owa""" +504 41 negative_sampler """basic""" +504 41 evaluator """rankbased""" +504 42 dataset """wn18rr""" +504 42 model """proje""" +504 42 loss """softplus""" +504 42 regularizer """no""" +504 42 optimizer """adam""" +504 42 training_loop """owa""" +504 42 negative_sampler """basic""" +504 42 evaluator """rankbased""" +504 43 dataset """wn18rr""" +504 43 model """proje""" +504 43 loss """softplus""" +504 43 regularizer """no""" +504 43 optimizer """adam""" +504 43 training_loop """owa""" +504 43 negative_sampler """basic""" +504 43 evaluator """rankbased""" +504 44 dataset """wn18rr""" +504 44 model """proje""" +504 44 loss """softplus""" +504 44 regularizer """no""" +504 44 optimizer """adam""" +504 44 training_loop """owa""" +504 44 negative_sampler """basic""" +504 44 evaluator """rankbased""" +504 45 dataset """wn18rr""" +504 45 model """proje""" +504 45 loss """softplus""" +504 45 regularizer """no""" +504 45 optimizer """adam""" +504 45 training_loop """owa""" +504 45 negative_sampler """basic""" +504 45 evaluator """rankbased""" +504 46 dataset """wn18rr""" +504 46 model """proje""" +504 46 loss """softplus""" +504 46 regularizer """no""" +504 46 optimizer """adam""" +504 46 training_loop """owa""" +504 46 negative_sampler """basic""" +504 46 evaluator """rankbased""" +504 47 dataset """wn18rr""" +504 47 model """proje""" +504 47 loss """softplus""" +504 47 regularizer """no""" +504 47 optimizer """adam""" +504 47 training_loop """owa""" +504 47 negative_sampler """basic""" +504 47 evaluator """rankbased""" +504 48 dataset """wn18rr""" +504 48 model """proje""" +504 48 loss """softplus""" +504 48 regularizer """no""" +504 48 optimizer """adam""" +504 48 training_loop """owa""" +504 48 negative_sampler """basic""" +504 48 evaluator """rankbased""" +504 49 dataset """wn18rr""" +504 49 model """proje""" +504 49 loss """softplus""" +504 49 regularizer """no""" +504 49 optimizer """adam""" +504 49 training_loop """owa""" +504 49 negative_sampler """basic""" +504 49 evaluator """rankbased""" +504 50 dataset """wn18rr""" +504 50 model """proje""" +504 50 loss """softplus""" +504 50 regularizer """no""" +504 50 optimizer """adam""" +504 50 training_loop """owa""" +504 50 negative_sampler """basic""" +504 50 evaluator """rankbased""" +504 51 dataset """wn18rr""" +504 51 model """proje""" +504 51 loss """softplus""" +504 51 regularizer """no""" +504 51 optimizer """adam""" +504 51 training_loop """owa""" +504 51 negative_sampler """basic""" +504 51 evaluator """rankbased""" +504 52 dataset """wn18rr""" +504 52 model """proje""" +504 52 loss """softplus""" +504 52 regularizer """no""" +504 52 optimizer """adam""" +504 52 training_loop """owa""" +504 52 negative_sampler """basic""" +504 52 evaluator """rankbased""" +504 53 dataset """wn18rr""" +504 53 model """proje""" +504 53 loss """softplus""" +504 53 regularizer """no""" +504 53 optimizer """adam""" +504 53 training_loop """owa""" +504 53 negative_sampler """basic""" +504 53 evaluator """rankbased""" +504 54 dataset """wn18rr""" +504 54 model """proje""" +504 54 loss """softplus""" +504 54 regularizer """no""" +504 54 optimizer """adam""" +504 54 training_loop """owa""" +504 54 negative_sampler """basic""" +504 54 evaluator """rankbased""" +504 55 dataset """wn18rr""" +504 55 model """proje""" +504 55 loss """softplus""" +504 55 regularizer """no""" +504 55 optimizer """adam""" +504 55 training_loop """owa""" +504 55 negative_sampler """basic""" +504 55 evaluator """rankbased""" +504 56 dataset """wn18rr""" +504 56 model """proje""" +504 56 loss """softplus""" +504 56 regularizer """no""" +504 56 optimizer """adam""" +504 56 training_loop """owa""" +504 56 negative_sampler """basic""" +504 56 evaluator """rankbased""" +504 57 dataset """wn18rr""" +504 57 model """proje""" +504 57 loss """softplus""" +504 57 regularizer """no""" +504 57 optimizer """adam""" +504 57 training_loop """owa""" +504 57 negative_sampler """basic""" +504 57 evaluator """rankbased""" +504 58 dataset """wn18rr""" +504 58 model """proje""" +504 58 loss """softplus""" +504 58 regularizer """no""" +504 58 optimizer """adam""" +504 58 training_loop """owa""" +504 58 negative_sampler """basic""" +504 58 evaluator """rankbased""" +504 59 dataset """wn18rr""" +504 59 model """proje""" +504 59 loss """softplus""" +504 59 regularizer """no""" +504 59 optimizer """adam""" +504 59 training_loop """owa""" +504 59 negative_sampler """basic""" +504 59 evaluator """rankbased""" +504 60 dataset """wn18rr""" +504 60 model """proje""" +504 60 loss """softplus""" +504 60 regularizer """no""" +504 60 optimizer """adam""" +504 60 training_loop """owa""" +504 60 negative_sampler """basic""" +504 60 evaluator """rankbased""" +504 61 dataset """wn18rr""" +504 61 model """proje""" +504 61 loss """softplus""" +504 61 regularizer """no""" +504 61 optimizer """adam""" +504 61 training_loop """owa""" +504 61 negative_sampler """basic""" +504 61 evaluator """rankbased""" +504 62 dataset """wn18rr""" +504 62 model """proje""" +504 62 loss """softplus""" +504 62 regularizer """no""" +504 62 optimizer """adam""" +504 62 training_loop """owa""" +504 62 negative_sampler """basic""" +504 62 evaluator """rankbased""" +504 63 dataset """wn18rr""" +504 63 model """proje""" +504 63 loss """softplus""" +504 63 regularizer """no""" +504 63 optimizer """adam""" +504 63 training_loop """owa""" +504 63 negative_sampler """basic""" +504 63 evaluator """rankbased""" +504 64 dataset """wn18rr""" +504 64 model """proje""" +504 64 loss """softplus""" +504 64 regularizer """no""" +504 64 optimizer """adam""" +504 64 training_loop """owa""" +504 64 negative_sampler """basic""" +504 64 evaluator """rankbased""" +504 65 dataset """wn18rr""" +504 65 model """proje""" +504 65 loss """softplus""" +504 65 regularizer """no""" +504 65 optimizer """adam""" +504 65 training_loop """owa""" +504 65 negative_sampler """basic""" +504 65 evaluator """rankbased""" +504 66 dataset """wn18rr""" +504 66 model """proje""" +504 66 loss """softplus""" +504 66 regularizer """no""" +504 66 optimizer """adam""" +504 66 training_loop """owa""" +504 66 negative_sampler """basic""" +504 66 evaluator """rankbased""" +504 67 dataset """wn18rr""" +504 67 model """proje""" +504 67 loss """softplus""" +504 67 regularizer """no""" +504 67 optimizer """adam""" +504 67 training_loop """owa""" +504 67 negative_sampler """basic""" +504 67 evaluator """rankbased""" +504 68 dataset """wn18rr""" +504 68 model """proje""" +504 68 loss """softplus""" +504 68 regularizer """no""" +504 68 optimizer """adam""" +504 68 training_loop """owa""" +504 68 negative_sampler """basic""" +504 68 evaluator """rankbased""" +504 69 dataset """wn18rr""" +504 69 model """proje""" +504 69 loss """softplus""" +504 69 regularizer """no""" +504 69 optimizer """adam""" +504 69 training_loop """owa""" +504 69 negative_sampler """basic""" +504 69 evaluator """rankbased""" +504 70 dataset """wn18rr""" +504 70 model """proje""" +504 70 loss """softplus""" +504 70 regularizer """no""" +504 70 optimizer """adam""" +504 70 training_loop """owa""" +504 70 negative_sampler """basic""" +504 70 evaluator """rankbased""" +504 71 dataset """wn18rr""" +504 71 model """proje""" +504 71 loss """softplus""" +504 71 regularizer """no""" +504 71 optimizer """adam""" +504 71 training_loop """owa""" +504 71 negative_sampler """basic""" +504 71 evaluator """rankbased""" +504 72 dataset """wn18rr""" +504 72 model """proje""" +504 72 loss """softplus""" +504 72 regularizer """no""" +504 72 optimizer """adam""" +504 72 training_loop """owa""" +504 72 negative_sampler """basic""" +504 72 evaluator """rankbased""" +504 73 dataset """wn18rr""" +504 73 model """proje""" +504 73 loss """softplus""" +504 73 regularizer """no""" +504 73 optimizer """adam""" +504 73 training_loop """owa""" +504 73 negative_sampler """basic""" +504 73 evaluator """rankbased""" +504 74 dataset """wn18rr""" +504 74 model """proje""" +504 74 loss """softplus""" +504 74 regularizer """no""" +504 74 optimizer """adam""" +504 74 training_loop """owa""" +504 74 negative_sampler """basic""" +504 74 evaluator """rankbased""" +504 75 dataset """wn18rr""" +504 75 model """proje""" +504 75 loss """softplus""" +504 75 regularizer """no""" +504 75 optimizer """adam""" +504 75 training_loop """owa""" +504 75 negative_sampler """basic""" +504 75 evaluator """rankbased""" +504 76 dataset """wn18rr""" +504 76 model """proje""" +504 76 loss """softplus""" +504 76 regularizer """no""" +504 76 optimizer """adam""" +504 76 training_loop """owa""" +504 76 negative_sampler """basic""" +504 76 evaluator """rankbased""" +504 77 dataset """wn18rr""" +504 77 model """proje""" +504 77 loss """softplus""" +504 77 regularizer """no""" +504 77 optimizer """adam""" +504 77 training_loop """owa""" +504 77 negative_sampler """basic""" +504 77 evaluator """rankbased""" +504 78 dataset """wn18rr""" +504 78 model """proje""" +504 78 loss """softplus""" +504 78 regularizer """no""" +504 78 optimizer """adam""" +504 78 training_loop """owa""" +504 78 negative_sampler """basic""" +504 78 evaluator """rankbased""" +504 79 dataset """wn18rr""" +504 79 model """proje""" +504 79 loss """softplus""" +504 79 regularizer """no""" +504 79 optimizer """adam""" +504 79 training_loop """owa""" +504 79 negative_sampler """basic""" +504 79 evaluator """rankbased""" +504 80 dataset """wn18rr""" +504 80 model """proje""" +504 80 loss """softplus""" +504 80 regularizer """no""" +504 80 optimizer """adam""" +504 80 training_loop """owa""" +504 80 negative_sampler """basic""" +504 80 evaluator """rankbased""" +504 81 dataset """wn18rr""" +504 81 model """proje""" +504 81 loss """softplus""" +504 81 regularizer """no""" +504 81 optimizer """adam""" +504 81 training_loop """owa""" +504 81 negative_sampler """basic""" +504 81 evaluator """rankbased""" +504 82 dataset """wn18rr""" +504 82 model """proje""" +504 82 loss """softplus""" +504 82 regularizer """no""" +504 82 optimizer """adam""" +504 82 training_loop """owa""" +504 82 negative_sampler """basic""" +504 82 evaluator """rankbased""" +504 83 dataset """wn18rr""" +504 83 model """proje""" +504 83 loss """softplus""" +504 83 regularizer """no""" +504 83 optimizer """adam""" +504 83 training_loop """owa""" +504 83 negative_sampler """basic""" +504 83 evaluator """rankbased""" +504 84 dataset """wn18rr""" +504 84 model """proje""" +504 84 loss """softplus""" +504 84 regularizer """no""" +504 84 optimizer """adam""" +504 84 training_loop """owa""" +504 84 negative_sampler """basic""" +504 84 evaluator """rankbased""" +504 85 dataset """wn18rr""" +504 85 model """proje""" +504 85 loss """softplus""" +504 85 regularizer """no""" +504 85 optimizer """adam""" +504 85 training_loop """owa""" +504 85 negative_sampler """basic""" +504 85 evaluator """rankbased""" +504 86 dataset """wn18rr""" +504 86 model """proje""" +504 86 loss """softplus""" +504 86 regularizer """no""" +504 86 optimizer """adam""" +504 86 training_loop """owa""" +504 86 negative_sampler """basic""" +504 86 evaluator """rankbased""" +504 87 dataset """wn18rr""" +504 87 model """proje""" +504 87 loss """softplus""" +504 87 regularizer """no""" +504 87 optimizer """adam""" +504 87 training_loop """owa""" +504 87 negative_sampler """basic""" +504 87 evaluator """rankbased""" +504 88 dataset """wn18rr""" +504 88 model """proje""" +504 88 loss """softplus""" +504 88 regularizer """no""" +504 88 optimizer """adam""" +504 88 training_loop """owa""" +504 88 negative_sampler """basic""" +504 88 evaluator """rankbased""" +504 89 dataset """wn18rr""" +504 89 model """proje""" +504 89 loss """softplus""" +504 89 regularizer """no""" +504 89 optimizer """adam""" +504 89 training_loop """owa""" +504 89 negative_sampler """basic""" +504 89 evaluator """rankbased""" +504 90 dataset """wn18rr""" +504 90 model """proje""" +504 90 loss """softplus""" +504 90 regularizer """no""" +504 90 optimizer """adam""" +504 90 training_loop """owa""" +504 90 negative_sampler """basic""" +504 90 evaluator """rankbased""" +504 91 dataset """wn18rr""" +504 91 model """proje""" +504 91 loss """softplus""" +504 91 regularizer """no""" +504 91 optimizer """adam""" +504 91 training_loop """owa""" +504 91 negative_sampler """basic""" +504 91 evaluator """rankbased""" +504 92 dataset """wn18rr""" +504 92 model """proje""" +504 92 loss """softplus""" +504 92 regularizer """no""" +504 92 optimizer """adam""" +504 92 training_loop """owa""" +504 92 negative_sampler """basic""" +504 92 evaluator """rankbased""" +504 93 dataset """wn18rr""" +504 93 model """proje""" +504 93 loss """softplus""" +504 93 regularizer """no""" +504 93 optimizer """adam""" +504 93 training_loop """owa""" +504 93 negative_sampler """basic""" +504 93 evaluator """rankbased""" +504 94 dataset """wn18rr""" +504 94 model """proje""" +504 94 loss """softplus""" +504 94 regularizer """no""" +504 94 optimizer """adam""" +504 94 training_loop """owa""" +504 94 negative_sampler """basic""" +504 94 evaluator """rankbased""" +504 95 dataset """wn18rr""" +504 95 model """proje""" +504 95 loss """softplus""" +504 95 regularizer """no""" +504 95 optimizer """adam""" +504 95 training_loop """owa""" +504 95 negative_sampler """basic""" +504 95 evaluator """rankbased""" +504 96 dataset """wn18rr""" +504 96 model """proje""" +504 96 loss """softplus""" +504 96 regularizer """no""" +504 96 optimizer """adam""" +504 96 training_loop """owa""" +504 96 negative_sampler """basic""" +504 96 evaluator """rankbased""" +504 97 dataset """wn18rr""" +504 97 model """proje""" +504 97 loss """softplus""" +504 97 regularizer """no""" +504 97 optimizer """adam""" +504 97 training_loop """owa""" +504 97 negative_sampler """basic""" +504 97 evaluator """rankbased""" +504 98 dataset """wn18rr""" +504 98 model """proje""" +504 98 loss """softplus""" +504 98 regularizer """no""" +504 98 optimizer """adam""" +504 98 training_loop """owa""" +504 98 negative_sampler """basic""" +504 98 evaluator """rankbased""" +504 99 dataset """wn18rr""" +504 99 model """proje""" +504 99 loss """softplus""" +504 99 regularizer """no""" +504 99 optimizer """adam""" +504 99 training_loop """owa""" +504 99 negative_sampler """basic""" +504 99 evaluator """rankbased""" +504 100 dataset """wn18rr""" +504 100 model """proje""" +504 100 loss """softplus""" +504 100 regularizer """no""" +504 100 optimizer """adam""" +504 100 training_loop """owa""" +504 100 negative_sampler """basic""" +504 100 evaluator """rankbased""" +505 1 model.embedding_dim 0.0 +505 1 loss.margin 18.137834966322654 +505 1 loss.adversarial_temperature 0.512661660051774 +505 1 optimizer.lr 0.01079628569054215 +505 1 negative_sampler.num_negs_per_pos 84.0 +505 1 training.batch_size 0.0 +505 2 model.embedding_dim 0.0 +505 2 loss.margin 16.297456465447326 +505 2 loss.adversarial_temperature 0.9739618829845393 +505 2 optimizer.lr 0.013060203885730037 +505 2 negative_sampler.num_negs_per_pos 75.0 +505 2 training.batch_size 2.0 +505 3 model.embedding_dim 1.0 +505 3 loss.margin 28.630216497799797 +505 3 loss.adversarial_temperature 0.24580100145165834 +505 3 optimizer.lr 0.06518652805604928 +505 3 negative_sampler.num_negs_per_pos 0.0 +505 3 training.batch_size 2.0 +505 4 model.embedding_dim 2.0 +505 4 loss.margin 14.36845739695829 +505 4 loss.adversarial_temperature 0.8854395415355932 +505 4 optimizer.lr 0.030360833992318998 +505 4 negative_sampler.num_negs_per_pos 78.0 +505 4 training.batch_size 1.0 +505 5 model.embedding_dim 2.0 +505 5 loss.margin 5.914967459126193 +505 5 loss.adversarial_temperature 0.9649957009196911 +505 5 optimizer.lr 0.008936188801462575 +505 5 negative_sampler.num_negs_per_pos 35.0 +505 5 training.batch_size 1.0 +505 6 model.embedding_dim 0.0 +505 6 loss.margin 17.589850538927198 +505 6 loss.adversarial_temperature 0.7003710651852172 +505 6 optimizer.lr 0.002181359461854772 +505 6 negative_sampler.num_negs_per_pos 90.0 +505 6 training.batch_size 1.0 +505 7 model.embedding_dim 1.0 +505 7 loss.margin 16.294226055586936 +505 7 loss.adversarial_temperature 0.14077761162965 +505 7 optimizer.lr 0.001439651264549224 +505 7 negative_sampler.num_negs_per_pos 58.0 +505 7 training.batch_size 0.0 +505 8 model.embedding_dim 1.0 +505 8 loss.margin 12.75001855480556 +505 8 loss.adversarial_temperature 0.1770024218625057 +505 8 optimizer.lr 0.002227753210436054 +505 8 negative_sampler.num_negs_per_pos 76.0 +505 8 training.batch_size 0.0 +505 9 model.embedding_dim 1.0 +505 9 loss.margin 23.299189664090648 +505 9 loss.adversarial_temperature 0.5528634159117485 +505 9 optimizer.lr 0.08647031212371105 +505 9 negative_sampler.num_negs_per_pos 45.0 +505 9 training.batch_size 0.0 +505 10 model.embedding_dim 0.0 +505 10 loss.margin 10.745310913441719 +505 10 loss.adversarial_temperature 0.8008227318050519 +505 10 optimizer.lr 0.0601094323123795 +505 10 negative_sampler.num_negs_per_pos 46.0 +505 10 training.batch_size 2.0 +505 11 model.embedding_dim 2.0 +505 11 loss.margin 10.637272114562014 +505 11 loss.adversarial_temperature 0.6711834545432204 +505 11 optimizer.lr 0.003147033169417749 +505 11 negative_sampler.num_negs_per_pos 10.0 +505 11 training.batch_size 2.0 +505 12 model.embedding_dim 2.0 +505 12 loss.margin 28.823032710587835 +505 12 loss.adversarial_temperature 0.7462451244763502 +505 12 optimizer.lr 0.03641870070310834 +505 12 negative_sampler.num_negs_per_pos 47.0 +505 12 training.batch_size 0.0 +505 13 model.embedding_dim 1.0 +505 13 loss.margin 28.832774238002603 +505 13 loss.adversarial_temperature 0.49486846193106265 +505 13 optimizer.lr 0.011863299933114102 +505 13 negative_sampler.num_negs_per_pos 60.0 +505 13 training.batch_size 1.0 +505 14 model.embedding_dim 1.0 +505 14 loss.margin 15.666947733425552 +505 14 loss.adversarial_temperature 0.19134909406369846 +505 14 optimizer.lr 0.010922219338498199 +505 14 negative_sampler.num_negs_per_pos 58.0 +505 14 training.batch_size 1.0 +505 15 model.embedding_dim 2.0 +505 15 loss.margin 11.932657927705959 +505 15 loss.adversarial_temperature 0.18232851723032137 +505 15 optimizer.lr 0.01497755552676103 +505 15 negative_sampler.num_negs_per_pos 85.0 +505 15 training.batch_size 1.0 +505 16 model.embedding_dim 2.0 +505 16 loss.margin 2.230995430043535 +505 16 loss.adversarial_temperature 0.8660244984269694 +505 16 optimizer.lr 0.012201262209621172 +505 16 negative_sampler.num_negs_per_pos 22.0 +505 16 training.batch_size 2.0 +505 17 model.embedding_dim 1.0 +505 17 loss.margin 3.3149705138577934 +505 17 loss.adversarial_temperature 0.3296872216305055 +505 17 optimizer.lr 0.002272986660179814 +505 17 negative_sampler.num_negs_per_pos 78.0 +505 17 training.batch_size 2.0 +505 18 model.embedding_dim 1.0 +505 18 loss.margin 22.3499856618611 +505 18 loss.adversarial_temperature 0.8571823929905074 +505 18 optimizer.lr 0.0020567438194981844 +505 18 negative_sampler.num_negs_per_pos 82.0 +505 18 training.batch_size 0.0 +505 19 model.embedding_dim 0.0 +505 19 loss.margin 19.742363124207575 +505 19 loss.adversarial_temperature 0.9559245898463589 +505 19 optimizer.lr 0.003632455649603355 +505 19 negative_sampler.num_negs_per_pos 90.0 +505 19 training.batch_size 2.0 +505 20 model.embedding_dim 0.0 +505 20 loss.margin 17.20034682242829 +505 20 loss.adversarial_temperature 0.8472409820464493 +505 20 optimizer.lr 0.013870952172134325 +505 20 negative_sampler.num_negs_per_pos 17.0 +505 20 training.batch_size 1.0 +505 21 model.embedding_dim 0.0 +505 21 loss.margin 9.879121327944329 +505 21 loss.adversarial_temperature 0.25743270272466645 +505 21 optimizer.lr 0.040805566524367266 +505 21 negative_sampler.num_negs_per_pos 93.0 +505 21 training.batch_size 0.0 +505 22 model.embedding_dim 2.0 +505 22 loss.margin 29.179730742955567 +505 22 loss.adversarial_temperature 0.2592174731396162 +505 22 optimizer.lr 0.014177717165482098 +505 22 negative_sampler.num_negs_per_pos 58.0 +505 22 training.batch_size 1.0 +505 23 model.embedding_dim 1.0 +505 23 loss.margin 13.835545157072902 +505 23 loss.adversarial_temperature 0.6071258864101443 +505 23 optimizer.lr 0.014034020460055512 +505 23 negative_sampler.num_negs_per_pos 35.0 +505 23 training.batch_size 0.0 +505 24 model.embedding_dim 0.0 +505 24 loss.margin 22.077986884068505 +505 24 loss.adversarial_temperature 0.9824850687729564 +505 24 optimizer.lr 0.00849303436075876 +505 24 negative_sampler.num_negs_per_pos 7.0 +505 24 training.batch_size 0.0 +505 25 model.embedding_dim 2.0 +505 25 loss.margin 25.262337975584735 +505 25 loss.adversarial_temperature 0.48802149443332743 +505 25 optimizer.lr 0.07328667929827203 +505 25 negative_sampler.num_negs_per_pos 33.0 +505 25 training.batch_size 2.0 +505 26 model.embedding_dim 0.0 +505 26 loss.margin 29.35756333645713 +505 26 loss.adversarial_temperature 0.8714628154459743 +505 26 optimizer.lr 0.02575195496808639 +505 26 negative_sampler.num_negs_per_pos 3.0 +505 26 training.batch_size 1.0 +505 27 model.embedding_dim 2.0 +505 27 loss.margin 21.946033432193055 +505 27 loss.adversarial_temperature 0.3083047132416109 +505 27 optimizer.lr 0.0012660657721778002 +505 27 negative_sampler.num_negs_per_pos 71.0 +505 27 training.batch_size 0.0 +505 28 model.embedding_dim 2.0 +505 28 loss.margin 26.05498811385089 +505 28 loss.adversarial_temperature 0.7058984648818833 +505 28 optimizer.lr 0.00369355808639354 +505 28 negative_sampler.num_negs_per_pos 60.0 +505 28 training.batch_size 0.0 +505 29 model.embedding_dim 1.0 +505 29 loss.margin 24.48301115902671 +505 29 loss.adversarial_temperature 0.8629817622082779 +505 29 optimizer.lr 0.03470016185421827 +505 29 negative_sampler.num_negs_per_pos 13.0 +505 29 training.batch_size 2.0 +505 30 model.embedding_dim 0.0 +505 30 loss.margin 3.412814634479411 +505 30 loss.adversarial_temperature 0.1729094100444628 +505 30 optimizer.lr 0.033365145572534755 +505 30 negative_sampler.num_negs_per_pos 0.0 +505 30 training.batch_size 2.0 +505 31 model.embedding_dim 0.0 +505 31 loss.margin 28.200662564412795 +505 31 loss.adversarial_temperature 0.9368099907160714 +505 31 optimizer.lr 0.0028586789942512247 +505 31 negative_sampler.num_negs_per_pos 14.0 +505 31 training.batch_size 2.0 +505 32 model.embedding_dim 0.0 +505 32 loss.margin 2.570479787695535 +505 32 loss.adversarial_temperature 0.41621284817777515 +505 32 optimizer.lr 0.005014682302533847 +505 32 negative_sampler.num_negs_per_pos 8.0 +505 32 training.batch_size 0.0 +505 33 model.embedding_dim 2.0 +505 33 loss.margin 13.47187093355746 +505 33 loss.adversarial_temperature 0.34107375966166886 +505 33 optimizer.lr 0.005111470560772455 +505 33 negative_sampler.num_negs_per_pos 75.0 +505 33 training.batch_size 2.0 +505 34 model.embedding_dim 0.0 +505 34 loss.margin 8.568699327973233 +505 34 loss.adversarial_temperature 0.9335090652765692 +505 34 optimizer.lr 0.03504653295814432 +505 34 negative_sampler.num_negs_per_pos 51.0 +505 34 training.batch_size 0.0 +505 35 model.embedding_dim 0.0 +505 35 loss.margin 20.425005468240016 +505 35 loss.adversarial_temperature 0.7992484222925953 +505 35 optimizer.lr 0.01191349057290876 +505 35 negative_sampler.num_negs_per_pos 80.0 +505 35 training.batch_size 2.0 +505 36 model.embedding_dim 0.0 +505 36 loss.margin 10.86324600241274 +505 36 loss.adversarial_temperature 0.3575193229128135 +505 36 optimizer.lr 0.022327287978167392 +505 36 negative_sampler.num_negs_per_pos 66.0 +505 36 training.batch_size 1.0 +505 37 model.embedding_dim 0.0 +505 37 loss.margin 14.220083282072729 +505 37 loss.adversarial_temperature 0.7001121004438198 +505 37 optimizer.lr 0.06407447996471778 +505 37 negative_sampler.num_negs_per_pos 3.0 +505 37 training.batch_size 1.0 +505 38 model.embedding_dim 2.0 +505 38 loss.margin 9.919154157941952 +505 38 loss.adversarial_temperature 0.3476049896133579 +505 38 optimizer.lr 0.01841948598956572 +505 38 negative_sampler.num_negs_per_pos 15.0 +505 38 training.batch_size 0.0 +505 39 model.embedding_dim 2.0 +505 39 loss.margin 3.986559725092414 +505 39 loss.adversarial_temperature 0.26023270709545765 +505 39 optimizer.lr 0.06630063733510624 +505 39 negative_sampler.num_negs_per_pos 89.0 +505 39 training.batch_size 1.0 +505 40 model.embedding_dim 2.0 +505 40 loss.margin 24.945191966722952 +505 40 loss.adversarial_temperature 0.8253433292485084 +505 40 optimizer.lr 0.001704080078338333 +505 40 negative_sampler.num_negs_per_pos 21.0 +505 40 training.batch_size 0.0 +505 41 model.embedding_dim 0.0 +505 41 loss.margin 16.739726673082473 +505 41 loss.adversarial_temperature 0.27988797610062205 +505 41 optimizer.lr 0.027670442824965585 +505 41 negative_sampler.num_negs_per_pos 30.0 +505 41 training.batch_size 1.0 +505 42 model.embedding_dim 1.0 +505 42 loss.margin 1.3314087650793045 +505 42 loss.adversarial_temperature 0.7870498964219612 +505 42 optimizer.lr 0.04915996254297261 +505 42 negative_sampler.num_negs_per_pos 74.0 +505 42 training.batch_size 2.0 +505 43 model.embedding_dim 2.0 +505 43 loss.margin 26.43999528528893 +505 43 loss.adversarial_temperature 0.6895696688173294 +505 43 optimizer.lr 0.0022630681566770633 +505 43 negative_sampler.num_negs_per_pos 46.0 +505 43 training.batch_size 2.0 +505 44 model.embedding_dim 2.0 +505 44 loss.margin 9.145451197670452 +505 44 loss.adversarial_temperature 0.877807344237933 +505 44 optimizer.lr 0.0030229607553243555 +505 44 negative_sampler.num_negs_per_pos 68.0 +505 44 training.batch_size 0.0 +505 45 model.embedding_dim 2.0 +505 45 loss.margin 13.069008718190188 +505 45 loss.adversarial_temperature 0.24684508547576725 +505 45 optimizer.lr 0.03478869158842677 +505 45 negative_sampler.num_negs_per_pos 29.0 +505 45 training.batch_size 1.0 +505 46 model.embedding_dim 0.0 +505 46 loss.margin 28.409411614809585 +505 46 loss.adversarial_temperature 0.7403122422738142 +505 46 optimizer.lr 0.03462165128624588 +505 46 negative_sampler.num_negs_per_pos 14.0 +505 46 training.batch_size 2.0 +505 47 model.embedding_dim 1.0 +505 47 loss.margin 2.67538202594977 +505 47 loss.adversarial_temperature 0.8967235067427143 +505 47 optimizer.lr 0.0021578584890051956 +505 47 negative_sampler.num_negs_per_pos 69.0 +505 47 training.batch_size 0.0 +505 48 model.embedding_dim 0.0 +505 48 loss.margin 2.260983968613031 +505 48 loss.adversarial_temperature 0.690379331288081 +505 48 optimizer.lr 0.0038529214035078452 +505 48 negative_sampler.num_negs_per_pos 8.0 +505 48 training.batch_size 0.0 +505 49 model.embedding_dim 1.0 +505 49 loss.margin 10.69421592014929 +505 49 loss.adversarial_temperature 0.29592527889845693 +505 49 optimizer.lr 0.09055930507208153 +505 49 negative_sampler.num_negs_per_pos 10.0 +505 49 training.batch_size 0.0 +505 50 model.embedding_dim 1.0 +505 50 loss.margin 23.135734546648482 +505 50 loss.adversarial_temperature 0.9989454179801598 +505 50 optimizer.lr 0.027394415512762088 +505 50 negative_sampler.num_negs_per_pos 48.0 +505 50 training.batch_size 2.0 +505 51 model.embedding_dim 0.0 +505 51 loss.margin 6.394046520348036 +505 51 loss.adversarial_temperature 0.46215378530457424 +505 51 optimizer.lr 0.021820924231583542 +505 51 negative_sampler.num_negs_per_pos 34.0 +505 51 training.batch_size 1.0 +505 52 model.embedding_dim 1.0 +505 52 loss.margin 10.458669903878686 +505 52 loss.adversarial_temperature 0.4750313930151512 +505 52 optimizer.lr 0.03417063115892338 +505 52 negative_sampler.num_negs_per_pos 58.0 +505 52 training.batch_size 2.0 +505 53 model.embedding_dim 1.0 +505 53 loss.margin 18.699743033166538 +505 53 loss.adversarial_temperature 0.23164767992565582 +505 53 optimizer.lr 0.06175739792719972 +505 53 negative_sampler.num_negs_per_pos 0.0 +505 53 training.batch_size 2.0 +505 54 model.embedding_dim 0.0 +505 54 loss.margin 15.055557040938556 +505 54 loss.adversarial_temperature 0.1602943869844851 +505 54 optimizer.lr 0.002526044682696697 +505 54 negative_sampler.num_negs_per_pos 47.0 +505 54 training.batch_size 2.0 +505 55 model.embedding_dim 2.0 +505 55 loss.margin 23.49505466207344 +505 55 loss.adversarial_temperature 0.7630029407827631 +505 55 optimizer.lr 0.07541385669480499 +505 55 negative_sampler.num_negs_per_pos 64.0 +505 55 training.batch_size 1.0 +505 56 model.embedding_dim 1.0 +505 56 loss.margin 15.567523975336739 +505 56 loss.adversarial_temperature 0.27845013952128733 +505 56 optimizer.lr 0.011947088749910763 +505 56 negative_sampler.num_negs_per_pos 98.0 +505 56 training.batch_size 0.0 +505 57 model.embedding_dim 1.0 +505 57 loss.margin 27.572116143837246 +505 57 loss.adversarial_temperature 0.3132338532620483 +505 57 optimizer.lr 0.005840805935556094 +505 57 negative_sampler.num_negs_per_pos 74.0 +505 57 training.batch_size 0.0 +505 1 dataset """wn18rr""" +505 1 model """proje""" +505 1 loss """nssa""" +505 1 regularizer """no""" +505 1 optimizer """adam""" +505 1 training_loop """owa""" +505 1 negative_sampler """basic""" +505 1 evaluator """rankbased""" +505 2 dataset """wn18rr""" +505 2 model """proje""" +505 2 loss """nssa""" +505 2 regularizer """no""" +505 2 optimizer """adam""" +505 2 training_loop """owa""" +505 2 negative_sampler """basic""" +505 2 evaluator """rankbased""" +505 3 dataset """wn18rr""" +505 3 model """proje""" +505 3 loss """nssa""" +505 3 regularizer """no""" +505 3 optimizer """adam""" +505 3 training_loop """owa""" +505 3 negative_sampler """basic""" +505 3 evaluator """rankbased""" +505 4 dataset """wn18rr""" +505 4 model """proje""" +505 4 loss """nssa""" +505 4 regularizer """no""" +505 4 optimizer """adam""" +505 4 training_loop """owa""" +505 4 negative_sampler """basic""" +505 4 evaluator """rankbased""" +505 5 dataset """wn18rr""" +505 5 model """proje""" +505 5 loss """nssa""" +505 5 regularizer """no""" +505 5 optimizer """adam""" +505 5 training_loop """owa""" +505 5 negative_sampler """basic""" +505 5 evaluator """rankbased""" +505 6 dataset """wn18rr""" +505 6 model """proje""" +505 6 loss """nssa""" +505 6 regularizer """no""" +505 6 optimizer """adam""" +505 6 training_loop """owa""" +505 6 negative_sampler """basic""" +505 6 evaluator """rankbased""" +505 7 dataset """wn18rr""" +505 7 model """proje""" +505 7 loss """nssa""" +505 7 regularizer """no""" +505 7 optimizer """adam""" +505 7 training_loop """owa""" +505 7 negative_sampler """basic""" +505 7 evaluator """rankbased""" +505 8 dataset """wn18rr""" +505 8 model """proje""" +505 8 loss """nssa""" +505 8 regularizer """no""" +505 8 optimizer """adam""" +505 8 training_loop """owa""" +505 8 negative_sampler """basic""" +505 8 evaluator """rankbased""" +505 9 dataset """wn18rr""" +505 9 model """proje""" +505 9 loss """nssa""" +505 9 regularizer """no""" +505 9 optimizer """adam""" +505 9 training_loop """owa""" +505 9 negative_sampler """basic""" +505 9 evaluator """rankbased""" +505 10 dataset """wn18rr""" +505 10 model """proje""" +505 10 loss """nssa""" +505 10 regularizer """no""" +505 10 optimizer """adam""" +505 10 training_loop """owa""" +505 10 negative_sampler """basic""" +505 10 evaluator """rankbased""" +505 11 dataset """wn18rr""" +505 11 model """proje""" +505 11 loss """nssa""" +505 11 regularizer """no""" +505 11 optimizer """adam""" +505 11 training_loop """owa""" +505 11 negative_sampler """basic""" +505 11 evaluator """rankbased""" +505 12 dataset """wn18rr""" +505 12 model """proje""" +505 12 loss """nssa""" +505 12 regularizer """no""" +505 12 optimizer """adam""" +505 12 training_loop """owa""" +505 12 negative_sampler """basic""" +505 12 evaluator """rankbased""" +505 13 dataset """wn18rr""" +505 13 model """proje""" +505 13 loss """nssa""" +505 13 regularizer """no""" +505 13 optimizer """adam""" +505 13 training_loop """owa""" +505 13 negative_sampler """basic""" +505 13 evaluator """rankbased""" +505 14 dataset """wn18rr""" +505 14 model """proje""" +505 14 loss """nssa""" +505 14 regularizer """no""" +505 14 optimizer """adam""" +505 14 training_loop """owa""" +505 14 negative_sampler """basic""" +505 14 evaluator """rankbased""" +505 15 dataset """wn18rr""" +505 15 model """proje""" +505 15 loss """nssa""" +505 15 regularizer """no""" +505 15 optimizer """adam""" +505 15 training_loop """owa""" +505 15 negative_sampler """basic""" +505 15 evaluator """rankbased""" +505 16 dataset """wn18rr""" +505 16 model """proje""" +505 16 loss """nssa""" +505 16 regularizer """no""" +505 16 optimizer """adam""" +505 16 training_loop """owa""" +505 16 negative_sampler """basic""" +505 16 evaluator """rankbased""" +505 17 dataset """wn18rr""" +505 17 model """proje""" +505 17 loss """nssa""" +505 17 regularizer """no""" +505 17 optimizer """adam""" +505 17 training_loop """owa""" +505 17 negative_sampler """basic""" +505 17 evaluator """rankbased""" +505 18 dataset """wn18rr""" +505 18 model """proje""" +505 18 loss """nssa""" +505 18 regularizer """no""" +505 18 optimizer """adam""" +505 18 training_loop """owa""" +505 18 negative_sampler """basic""" +505 18 evaluator """rankbased""" +505 19 dataset """wn18rr""" +505 19 model """proje""" +505 19 loss """nssa""" +505 19 regularizer """no""" +505 19 optimizer """adam""" +505 19 training_loop """owa""" +505 19 negative_sampler """basic""" +505 19 evaluator """rankbased""" +505 20 dataset """wn18rr""" +505 20 model """proje""" +505 20 loss """nssa""" +505 20 regularizer """no""" +505 20 optimizer """adam""" +505 20 training_loop """owa""" +505 20 negative_sampler """basic""" +505 20 evaluator """rankbased""" +505 21 dataset """wn18rr""" +505 21 model """proje""" +505 21 loss """nssa""" +505 21 regularizer """no""" +505 21 optimizer """adam""" +505 21 training_loop """owa""" +505 21 negative_sampler """basic""" +505 21 evaluator """rankbased""" +505 22 dataset """wn18rr""" +505 22 model """proje""" +505 22 loss """nssa""" +505 22 regularizer """no""" +505 22 optimizer """adam""" +505 22 training_loop """owa""" +505 22 negative_sampler """basic""" +505 22 evaluator """rankbased""" +505 23 dataset """wn18rr""" +505 23 model """proje""" +505 23 loss """nssa""" +505 23 regularizer """no""" +505 23 optimizer """adam""" +505 23 training_loop """owa""" +505 23 negative_sampler """basic""" +505 23 evaluator """rankbased""" +505 24 dataset """wn18rr""" +505 24 model """proje""" +505 24 loss """nssa""" +505 24 regularizer """no""" +505 24 optimizer """adam""" +505 24 training_loop """owa""" +505 24 negative_sampler """basic""" +505 24 evaluator """rankbased""" +505 25 dataset """wn18rr""" +505 25 model """proje""" +505 25 loss """nssa""" +505 25 regularizer """no""" +505 25 optimizer """adam""" +505 25 training_loop """owa""" +505 25 negative_sampler """basic""" +505 25 evaluator """rankbased""" +505 26 dataset """wn18rr""" +505 26 model """proje""" +505 26 loss """nssa""" +505 26 regularizer """no""" +505 26 optimizer """adam""" +505 26 training_loop """owa""" +505 26 negative_sampler """basic""" +505 26 evaluator """rankbased""" +505 27 dataset """wn18rr""" +505 27 model """proje""" +505 27 loss """nssa""" +505 27 regularizer """no""" +505 27 optimizer """adam""" +505 27 training_loop """owa""" +505 27 negative_sampler """basic""" +505 27 evaluator """rankbased""" +505 28 dataset """wn18rr""" +505 28 model """proje""" +505 28 loss """nssa""" +505 28 regularizer """no""" +505 28 optimizer """adam""" +505 28 training_loop """owa""" +505 28 negative_sampler """basic""" +505 28 evaluator """rankbased""" +505 29 dataset """wn18rr""" +505 29 model """proje""" +505 29 loss """nssa""" +505 29 regularizer """no""" +505 29 optimizer """adam""" +505 29 training_loop """owa""" +505 29 negative_sampler """basic""" +505 29 evaluator """rankbased""" +505 30 dataset """wn18rr""" +505 30 model """proje""" +505 30 loss """nssa""" +505 30 regularizer """no""" +505 30 optimizer """adam""" +505 30 training_loop """owa""" +505 30 negative_sampler """basic""" +505 30 evaluator """rankbased""" +505 31 dataset """wn18rr""" +505 31 model """proje""" +505 31 loss """nssa""" +505 31 regularizer """no""" +505 31 optimizer """adam""" +505 31 training_loop """owa""" +505 31 negative_sampler """basic""" +505 31 evaluator """rankbased""" +505 32 dataset """wn18rr""" +505 32 model """proje""" +505 32 loss """nssa""" +505 32 regularizer """no""" +505 32 optimizer """adam""" +505 32 training_loop """owa""" +505 32 negative_sampler """basic""" +505 32 evaluator """rankbased""" +505 33 dataset """wn18rr""" +505 33 model """proje""" +505 33 loss """nssa""" +505 33 regularizer """no""" +505 33 optimizer """adam""" +505 33 training_loop """owa""" +505 33 negative_sampler """basic""" +505 33 evaluator """rankbased""" +505 34 dataset """wn18rr""" +505 34 model """proje""" +505 34 loss """nssa""" +505 34 regularizer """no""" +505 34 optimizer """adam""" +505 34 training_loop """owa""" +505 34 negative_sampler """basic""" +505 34 evaluator """rankbased""" +505 35 dataset """wn18rr""" +505 35 model """proje""" +505 35 loss """nssa""" +505 35 regularizer """no""" +505 35 optimizer """adam""" +505 35 training_loop """owa""" +505 35 negative_sampler """basic""" +505 35 evaluator """rankbased""" +505 36 dataset """wn18rr""" +505 36 model """proje""" +505 36 loss """nssa""" +505 36 regularizer """no""" +505 36 optimizer """adam""" +505 36 training_loop """owa""" +505 36 negative_sampler """basic""" +505 36 evaluator """rankbased""" +505 37 dataset """wn18rr""" +505 37 model """proje""" +505 37 loss """nssa""" +505 37 regularizer """no""" +505 37 optimizer """adam""" +505 37 training_loop """owa""" +505 37 negative_sampler """basic""" +505 37 evaluator """rankbased""" +505 38 dataset """wn18rr""" +505 38 model """proje""" +505 38 loss """nssa""" +505 38 regularizer """no""" +505 38 optimizer """adam""" +505 38 training_loop """owa""" +505 38 negative_sampler """basic""" +505 38 evaluator """rankbased""" +505 39 dataset """wn18rr""" +505 39 model """proje""" +505 39 loss """nssa""" +505 39 regularizer """no""" +505 39 optimizer """adam""" +505 39 training_loop """owa""" +505 39 negative_sampler """basic""" +505 39 evaluator """rankbased""" +505 40 dataset """wn18rr""" +505 40 model """proje""" +505 40 loss """nssa""" +505 40 regularizer """no""" +505 40 optimizer """adam""" +505 40 training_loop """owa""" +505 40 negative_sampler """basic""" +505 40 evaluator """rankbased""" +505 41 dataset """wn18rr""" +505 41 model """proje""" +505 41 loss """nssa""" +505 41 regularizer """no""" +505 41 optimizer """adam""" +505 41 training_loop """owa""" +505 41 negative_sampler """basic""" +505 41 evaluator """rankbased""" +505 42 dataset """wn18rr""" +505 42 model """proje""" +505 42 loss """nssa""" +505 42 regularizer """no""" +505 42 optimizer """adam""" +505 42 training_loop """owa""" +505 42 negative_sampler """basic""" +505 42 evaluator """rankbased""" +505 43 dataset """wn18rr""" +505 43 model """proje""" +505 43 loss """nssa""" +505 43 regularizer """no""" +505 43 optimizer """adam""" +505 43 training_loop """owa""" +505 43 negative_sampler """basic""" +505 43 evaluator """rankbased""" +505 44 dataset """wn18rr""" +505 44 model """proje""" +505 44 loss """nssa""" +505 44 regularizer """no""" +505 44 optimizer """adam""" +505 44 training_loop """owa""" +505 44 negative_sampler """basic""" +505 44 evaluator """rankbased""" +505 45 dataset """wn18rr""" +505 45 model """proje""" +505 45 loss """nssa""" +505 45 regularizer """no""" +505 45 optimizer """adam""" +505 45 training_loop """owa""" +505 45 negative_sampler """basic""" +505 45 evaluator """rankbased""" +505 46 dataset """wn18rr""" +505 46 model """proje""" +505 46 loss """nssa""" +505 46 regularizer """no""" +505 46 optimizer """adam""" +505 46 training_loop """owa""" +505 46 negative_sampler """basic""" +505 46 evaluator """rankbased""" +505 47 dataset """wn18rr""" +505 47 model """proje""" +505 47 loss """nssa""" +505 47 regularizer """no""" +505 47 optimizer """adam""" +505 47 training_loop """owa""" +505 47 negative_sampler """basic""" +505 47 evaluator """rankbased""" +505 48 dataset """wn18rr""" +505 48 model """proje""" +505 48 loss """nssa""" +505 48 regularizer """no""" +505 48 optimizer """adam""" +505 48 training_loop """owa""" +505 48 negative_sampler """basic""" +505 48 evaluator """rankbased""" +505 49 dataset """wn18rr""" +505 49 model """proje""" +505 49 loss """nssa""" +505 49 regularizer """no""" +505 49 optimizer """adam""" +505 49 training_loop """owa""" +505 49 negative_sampler """basic""" +505 49 evaluator """rankbased""" +505 50 dataset """wn18rr""" +505 50 model """proje""" +505 50 loss """nssa""" +505 50 regularizer """no""" +505 50 optimizer """adam""" +505 50 training_loop """owa""" +505 50 negative_sampler """basic""" +505 50 evaluator """rankbased""" +505 51 dataset """wn18rr""" +505 51 model """proje""" +505 51 loss """nssa""" +505 51 regularizer """no""" +505 51 optimizer """adam""" +505 51 training_loop """owa""" +505 51 negative_sampler """basic""" +505 51 evaluator """rankbased""" +505 52 dataset """wn18rr""" +505 52 model """proje""" +505 52 loss """nssa""" +505 52 regularizer """no""" +505 52 optimizer """adam""" +505 52 training_loop """owa""" +505 52 negative_sampler """basic""" +505 52 evaluator """rankbased""" +505 53 dataset """wn18rr""" +505 53 model """proje""" +505 53 loss """nssa""" +505 53 regularizer """no""" +505 53 optimizer """adam""" +505 53 training_loop """owa""" +505 53 negative_sampler """basic""" +505 53 evaluator """rankbased""" +505 54 dataset """wn18rr""" +505 54 model """proje""" +505 54 loss """nssa""" +505 54 regularizer """no""" +505 54 optimizer """adam""" +505 54 training_loop """owa""" +505 54 negative_sampler """basic""" +505 54 evaluator """rankbased""" +505 55 dataset """wn18rr""" +505 55 model """proje""" +505 55 loss """nssa""" +505 55 regularizer """no""" +505 55 optimizer """adam""" +505 55 training_loop """owa""" +505 55 negative_sampler """basic""" +505 55 evaluator """rankbased""" +505 56 dataset """wn18rr""" +505 56 model """proje""" +505 56 loss """nssa""" +505 56 regularizer """no""" +505 56 optimizer """adam""" +505 56 training_loop """owa""" +505 56 negative_sampler """basic""" +505 56 evaluator """rankbased""" +505 57 dataset """wn18rr""" +505 57 model """proje""" +505 57 loss """nssa""" +505 57 regularizer """no""" +505 57 optimizer """adam""" +505 57 training_loop """owa""" +505 57 negative_sampler """basic""" +505 57 evaluator """rankbased""" +506 1 model.embedding_dim 0.0 +506 1 loss.margin 9.735259008630894 +506 1 loss.adversarial_temperature 0.4145331580547407 +506 1 optimizer.lr 0.0022070122824623256 +506 1 negative_sampler.num_negs_per_pos 0.0 +506 1 training.batch_size 0.0 +506 2 model.embedding_dim 2.0 +506 2 loss.margin 27.05701953588313 +506 2 loss.adversarial_temperature 0.8849839673810894 +506 2 optimizer.lr 0.001743609486939379 +506 2 negative_sampler.num_negs_per_pos 94.0 +506 2 training.batch_size 0.0 +506 3 model.embedding_dim 2.0 +506 3 loss.margin 3.7635811897136957 +506 3 loss.adversarial_temperature 0.9704954889783572 +506 3 optimizer.lr 0.004893904653774196 +506 3 negative_sampler.num_negs_per_pos 48.0 +506 3 training.batch_size 2.0 +506 4 model.embedding_dim 1.0 +506 4 loss.margin 14.29213412661376 +506 4 loss.adversarial_temperature 0.3000083891533135 +506 4 optimizer.lr 0.0049922834686866825 +506 4 negative_sampler.num_negs_per_pos 1.0 +506 4 training.batch_size 2.0 +506 5 model.embedding_dim 1.0 +506 5 loss.margin 23.122367933114898 +506 5 loss.adversarial_temperature 0.9453014909638752 +506 5 optimizer.lr 0.014618140705582502 +506 5 negative_sampler.num_negs_per_pos 64.0 +506 5 training.batch_size 1.0 +506 6 model.embedding_dim 0.0 +506 6 loss.margin 14.227239783482476 +506 6 loss.adversarial_temperature 0.8057386516211601 +506 6 optimizer.lr 0.0018203335415460196 +506 6 negative_sampler.num_negs_per_pos 0.0 +506 6 training.batch_size 1.0 +506 7 model.embedding_dim 1.0 +506 7 loss.margin 7.5282250049653365 +506 7 loss.adversarial_temperature 0.11405400036967583 +506 7 optimizer.lr 0.0010779031640917446 +506 7 negative_sampler.num_negs_per_pos 65.0 +506 7 training.batch_size 1.0 +506 8 model.embedding_dim 0.0 +506 8 loss.margin 10.690905672766725 +506 8 loss.adversarial_temperature 0.5569303726327459 +506 8 optimizer.lr 0.016527993941154787 +506 8 negative_sampler.num_negs_per_pos 47.0 +506 8 training.batch_size 1.0 +506 9 model.embedding_dim 0.0 +506 9 loss.margin 15.34522817595632 +506 9 loss.adversarial_temperature 0.9905945858656695 +506 9 optimizer.lr 0.0013386921629184253 +506 9 negative_sampler.num_negs_per_pos 44.0 +506 9 training.batch_size 2.0 +506 10 model.embedding_dim 2.0 +506 10 loss.margin 11.186329911328984 +506 10 loss.adversarial_temperature 0.46401209442235625 +506 10 optimizer.lr 0.004312381482902282 +506 10 negative_sampler.num_negs_per_pos 96.0 +506 10 training.batch_size 2.0 +506 11 model.embedding_dim 2.0 +506 11 loss.margin 13.028848591277658 +506 11 loss.adversarial_temperature 0.8497996667883831 +506 11 optimizer.lr 0.02315528317208813 +506 11 negative_sampler.num_negs_per_pos 86.0 +506 11 training.batch_size 1.0 +506 12 model.embedding_dim 0.0 +506 12 loss.margin 7.735343482951475 +506 12 loss.adversarial_temperature 0.4345102795576935 +506 12 optimizer.lr 0.021120716267193775 +506 12 negative_sampler.num_negs_per_pos 73.0 +506 12 training.batch_size 1.0 +506 13 model.embedding_dim 1.0 +506 13 loss.margin 2.556038262614571 +506 13 loss.adversarial_temperature 0.8640949060019424 +506 13 optimizer.lr 0.0032188929712161866 +506 13 negative_sampler.num_negs_per_pos 57.0 +506 13 training.batch_size 0.0 +506 14 model.embedding_dim 0.0 +506 14 loss.margin 19.78358194824179 +506 14 loss.adversarial_temperature 0.459590611198081 +506 14 optimizer.lr 0.001983360847594203 +506 14 negative_sampler.num_negs_per_pos 83.0 +506 14 training.batch_size 1.0 +506 15 model.embedding_dim 2.0 +506 15 loss.margin 19.972964931472987 +506 15 loss.adversarial_temperature 0.5843976057915024 +506 15 optimizer.lr 0.0025517412568561416 +506 15 negative_sampler.num_negs_per_pos 67.0 +506 15 training.batch_size 0.0 +506 16 model.embedding_dim 2.0 +506 16 loss.margin 28.83124277644994 +506 16 loss.adversarial_temperature 0.19130945123388543 +506 16 optimizer.lr 0.002201668079642978 +506 16 negative_sampler.num_negs_per_pos 41.0 +506 16 training.batch_size 2.0 +506 17 model.embedding_dim 1.0 +506 17 loss.margin 24.44123702078936 +506 17 loss.adversarial_temperature 0.3459709408450288 +506 17 optimizer.lr 0.022799886423793086 +506 17 negative_sampler.num_negs_per_pos 67.0 +506 17 training.batch_size 2.0 +506 18 model.embedding_dim 2.0 +506 18 loss.margin 23.936971401884936 +506 18 loss.adversarial_temperature 0.252444945740501 +506 18 optimizer.lr 0.02139974300841005 +506 18 negative_sampler.num_negs_per_pos 31.0 +506 18 training.batch_size 0.0 +506 19 model.embedding_dim 2.0 +506 19 loss.margin 28.548283614824996 +506 19 loss.adversarial_temperature 0.4174473462062222 +506 19 optimizer.lr 0.010770847222812695 +506 19 negative_sampler.num_negs_per_pos 90.0 +506 19 training.batch_size 0.0 +506 20 model.embedding_dim 1.0 +506 20 loss.margin 27.688701678500646 +506 20 loss.adversarial_temperature 0.3717843943672708 +506 20 optimizer.lr 0.039753591349776796 +506 20 negative_sampler.num_negs_per_pos 26.0 +506 20 training.batch_size 2.0 +506 21 model.embedding_dim 1.0 +506 21 loss.margin 26.833296992470174 +506 21 loss.adversarial_temperature 0.3322640031998801 +506 21 optimizer.lr 0.0017897143725507734 +506 21 negative_sampler.num_negs_per_pos 4.0 +506 21 training.batch_size 0.0 +506 22 model.embedding_dim 0.0 +506 22 loss.margin 14.3988367367278 +506 22 loss.adversarial_temperature 0.7982052727687707 +506 22 optimizer.lr 0.09637140511909342 +506 22 negative_sampler.num_negs_per_pos 83.0 +506 22 training.batch_size 1.0 +506 23 model.embedding_dim 2.0 +506 23 loss.margin 22.72302090077697 +506 23 loss.adversarial_temperature 0.15816802249974116 +506 23 optimizer.lr 0.059553342053522806 +506 23 negative_sampler.num_negs_per_pos 25.0 +506 23 training.batch_size 1.0 +506 24 model.embedding_dim 1.0 +506 24 loss.margin 10.99285002813456 +506 24 loss.adversarial_temperature 0.14222906413693953 +506 24 optimizer.lr 0.007888734667974458 +506 24 negative_sampler.num_negs_per_pos 24.0 +506 24 training.batch_size 2.0 +506 25 model.embedding_dim 2.0 +506 25 loss.margin 3.846319188497217 +506 25 loss.adversarial_temperature 0.46114408523095635 +506 25 optimizer.lr 0.0015313889047894082 +506 25 negative_sampler.num_negs_per_pos 49.0 +506 25 training.batch_size 2.0 +506 26 model.embedding_dim 0.0 +506 26 loss.margin 12.87458743765865 +506 26 loss.adversarial_temperature 0.5967438980054517 +506 26 optimizer.lr 0.0026998821773440446 +506 26 negative_sampler.num_negs_per_pos 5.0 +506 26 training.batch_size 2.0 +506 27 model.embedding_dim 1.0 +506 27 loss.margin 6.510690059136512 +506 27 loss.adversarial_temperature 0.5069810158236758 +506 27 optimizer.lr 0.01944201339741335 +506 27 negative_sampler.num_negs_per_pos 37.0 +506 27 training.batch_size 1.0 +506 28 model.embedding_dim 0.0 +506 28 loss.margin 9.393084669948701 +506 28 loss.adversarial_temperature 0.4475559976184145 +506 28 optimizer.lr 0.00894238809806268 +506 28 negative_sampler.num_negs_per_pos 13.0 +506 28 training.batch_size 2.0 +506 29 model.embedding_dim 2.0 +506 29 loss.margin 3.4319290105095375 +506 29 loss.adversarial_temperature 0.4593630969318132 +506 29 optimizer.lr 0.00978325757122418 +506 29 negative_sampler.num_negs_per_pos 80.0 +506 29 training.batch_size 2.0 +506 30 model.embedding_dim 0.0 +506 30 loss.margin 18.09161254655252 +506 30 loss.adversarial_temperature 0.7289772506234028 +506 30 optimizer.lr 0.0073097103692337335 +506 30 negative_sampler.num_negs_per_pos 70.0 +506 30 training.batch_size 0.0 +506 31 model.embedding_dim 1.0 +506 31 loss.margin 7.7857785513287725 +506 31 loss.adversarial_temperature 0.3462880904372474 +506 31 optimizer.lr 0.04586537836982426 +506 31 negative_sampler.num_negs_per_pos 44.0 +506 31 training.batch_size 0.0 +506 32 model.embedding_dim 2.0 +506 32 loss.margin 8.34852774441464 +506 32 loss.adversarial_temperature 0.6500750069233623 +506 32 optimizer.lr 0.0015354273033735609 +506 32 negative_sampler.num_negs_per_pos 94.0 +506 32 training.batch_size 1.0 +506 33 model.embedding_dim 1.0 +506 33 loss.margin 3.3944279658531116 +506 33 loss.adversarial_temperature 0.9513221154013015 +506 33 optimizer.lr 0.04678258998278137 +506 33 negative_sampler.num_negs_per_pos 36.0 +506 33 training.batch_size 0.0 +506 34 model.embedding_dim 2.0 +506 34 loss.margin 22.944631474492088 +506 34 loss.adversarial_temperature 0.4116428843788357 +506 34 optimizer.lr 0.05030660645321444 +506 34 negative_sampler.num_negs_per_pos 68.0 +506 34 training.batch_size 2.0 +506 35 model.embedding_dim 1.0 +506 35 loss.margin 29.268366869862884 +506 35 loss.adversarial_temperature 0.6235756353782592 +506 35 optimizer.lr 0.014344846415018864 +506 35 negative_sampler.num_negs_per_pos 91.0 +506 35 training.batch_size 2.0 +506 36 model.embedding_dim 2.0 +506 36 loss.margin 8.02360763722375 +506 36 loss.adversarial_temperature 0.4460330231334034 +506 36 optimizer.lr 0.0015305842773171133 +506 36 negative_sampler.num_negs_per_pos 69.0 +506 36 training.batch_size 1.0 +506 37 model.embedding_dim 1.0 +506 37 loss.margin 21.403291821202714 +506 37 loss.adversarial_temperature 0.46977493800741765 +506 37 optimizer.lr 0.00121117398212315 +506 37 negative_sampler.num_negs_per_pos 8.0 +506 37 training.batch_size 0.0 +506 38 model.embedding_dim 1.0 +506 38 loss.margin 1.0451564927245034 +506 38 loss.adversarial_temperature 0.21919158681788717 +506 38 optimizer.lr 0.006238653476116203 +506 38 negative_sampler.num_negs_per_pos 78.0 +506 38 training.batch_size 1.0 +506 39 model.embedding_dim 1.0 +506 39 loss.margin 8.880094330990662 +506 39 loss.adversarial_temperature 0.16084419656873972 +506 39 optimizer.lr 0.02196743040575112 +506 39 negative_sampler.num_negs_per_pos 45.0 +506 39 training.batch_size 1.0 +506 40 model.embedding_dim 1.0 +506 40 loss.margin 25.639891342029753 +506 40 loss.adversarial_temperature 0.17921976409255672 +506 40 optimizer.lr 0.010933361804190167 +506 40 negative_sampler.num_negs_per_pos 61.0 +506 40 training.batch_size 2.0 +506 41 model.embedding_dim 0.0 +506 41 loss.margin 18.027107951810127 +506 41 loss.adversarial_temperature 0.9898875711611179 +506 41 optimizer.lr 0.044019650353536624 +506 41 negative_sampler.num_negs_per_pos 26.0 +506 41 training.batch_size 1.0 +506 42 model.embedding_dim 0.0 +506 42 loss.margin 15.054869055274079 +506 42 loss.adversarial_temperature 0.6525432562095925 +506 42 optimizer.lr 0.009563879760399163 +506 42 negative_sampler.num_negs_per_pos 51.0 +506 42 training.batch_size 2.0 +506 43 model.embedding_dim 1.0 +506 43 loss.margin 6.805121079282731 +506 43 loss.adversarial_temperature 0.3335286971035947 +506 43 optimizer.lr 0.001249106401589474 +506 43 negative_sampler.num_negs_per_pos 48.0 +506 43 training.batch_size 2.0 +506 44 model.embedding_dim 2.0 +506 44 loss.margin 8.954260596341193 +506 44 loss.adversarial_temperature 0.2266572697972039 +506 44 optimizer.lr 0.04727375641385785 +506 44 negative_sampler.num_negs_per_pos 82.0 +506 44 training.batch_size 2.0 +506 45 model.embedding_dim 2.0 +506 45 loss.margin 28.843189714249203 +506 45 loss.adversarial_temperature 0.6093457261309001 +506 45 optimizer.lr 0.08727936404898974 +506 45 negative_sampler.num_negs_per_pos 96.0 +506 45 training.batch_size 2.0 +506 46 model.embedding_dim 0.0 +506 46 loss.margin 8.073557099206955 +506 46 loss.adversarial_temperature 0.4060834910264108 +506 46 optimizer.lr 0.043813451155219715 +506 46 negative_sampler.num_negs_per_pos 73.0 +506 46 training.batch_size 0.0 +506 47 model.embedding_dim 0.0 +506 47 loss.margin 11.859858348032846 +506 47 loss.adversarial_temperature 0.8172807280895045 +506 47 optimizer.lr 0.008627932285971451 +506 47 negative_sampler.num_negs_per_pos 80.0 +506 47 training.batch_size 1.0 +506 48 model.embedding_dim 1.0 +506 48 loss.margin 7.414919731093571 +506 48 loss.adversarial_temperature 0.11603458705227902 +506 48 optimizer.lr 0.0019004193617024475 +506 48 negative_sampler.num_negs_per_pos 60.0 +506 48 training.batch_size 2.0 +506 49 model.embedding_dim 2.0 +506 49 loss.margin 6.702680595972369 +506 49 loss.adversarial_temperature 0.3543786816620751 +506 49 optimizer.lr 0.05634614536352782 +506 49 negative_sampler.num_negs_per_pos 28.0 +506 49 training.batch_size 2.0 +506 50 model.embedding_dim 2.0 +506 50 loss.margin 28.307655791067255 +506 50 loss.adversarial_temperature 0.3353638445513781 +506 50 optimizer.lr 0.019595101569315477 +506 50 negative_sampler.num_negs_per_pos 10.0 +506 50 training.batch_size 0.0 +506 51 model.embedding_dim 0.0 +506 51 loss.margin 27.55448460047407 +506 51 loss.adversarial_temperature 0.9940782500632561 +506 51 optimizer.lr 0.02443484843494027 +506 51 negative_sampler.num_negs_per_pos 18.0 +506 51 training.batch_size 1.0 +506 52 model.embedding_dim 0.0 +506 52 loss.margin 22.667599949310173 +506 52 loss.adversarial_temperature 0.7188836042490607 +506 52 optimizer.lr 0.009507840720316286 +506 52 negative_sampler.num_negs_per_pos 45.0 +506 52 training.batch_size 1.0 +506 53 model.embedding_dim 1.0 +506 53 loss.margin 23.99819019733248 +506 53 loss.adversarial_temperature 0.9208473877178209 +506 53 optimizer.lr 0.006242446490267849 +506 53 negative_sampler.num_negs_per_pos 3.0 +506 53 training.batch_size 1.0 +506 54 model.embedding_dim 2.0 +506 54 loss.margin 1.4659619549404808 +506 54 loss.adversarial_temperature 0.25028999130849316 +506 54 optimizer.lr 0.010744063142404293 +506 54 negative_sampler.num_negs_per_pos 3.0 +506 54 training.batch_size 0.0 +506 55 model.embedding_dim 2.0 +506 55 loss.margin 20.058360198467817 +506 55 loss.adversarial_temperature 0.8619870011057007 +506 55 optimizer.lr 0.0030475913324556013 +506 55 negative_sampler.num_negs_per_pos 78.0 +506 55 training.batch_size 0.0 +506 56 model.embedding_dim 2.0 +506 56 loss.margin 29.282120263371134 +506 56 loss.adversarial_temperature 0.7013624927455184 +506 56 optimizer.lr 0.0010555306579494584 +506 56 negative_sampler.num_negs_per_pos 57.0 +506 56 training.batch_size 0.0 +506 57 model.embedding_dim 0.0 +506 57 loss.margin 20.4576284028405 +506 57 loss.adversarial_temperature 0.31642998833803665 +506 57 optimizer.lr 0.009127983191926963 +506 57 negative_sampler.num_negs_per_pos 53.0 +506 57 training.batch_size 1.0 +506 58 model.embedding_dim 1.0 +506 58 loss.margin 2.8503555593682375 +506 58 loss.adversarial_temperature 0.7459366519642864 +506 58 optimizer.lr 0.0031476500595792626 +506 58 negative_sampler.num_negs_per_pos 74.0 +506 58 training.batch_size 0.0 +506 59 model.embedding_dim 1.0 +506 59 loss.margin 6.280304826897228 +506 59 loss.adversarial_temperature 0.8643637854493135 +506 59 optimizer.lr 0.004521557969156381 +506 59 negative_sampler.num_negs_per_pos 43.0 +506 59 training.batch_size 1.0 +506 60 model.embedding_dim 1.0 +506 60 loss.margin 24.634629596570583 +506 60 loss.adversarial_temperature 0.4847384454879087 +506 60 optimizer.lr 0.00271052569911477 +506 60 negative_sampler.num_negs_per_pos 74.0 +506 60 training.batch_size 2.0 +506 61 model.embedding_dim 1.0 +506 61 loss.margin 21.065225798536467 +506 61 loss.adversarial_temperature 0.1810126221447237 +506 61 optimizer.lr 0.026398452987327087 +506 61 negative_sampler.num_negs_per_pos 58.0 +506 61 training.batch_size 1.0 +506 62 model.embedding_dim 0.0 +506 62 loss.margin 20.964169085318236 +506 62 loss.adversarial_temperature 0.7930258143957165 +506 62 optimizer.lr 0.008687296997613853 +506 62 negative_sampler.num_negs_per_pos 41.0 +506 62 training.batch_size 1.0 +506 63 model.embedding_dim 0.0 +506 63 loss.margin 16.593838799552493 +506 63 loss.adversarial_temperature 0.23028291700150122 +506 63 optimizer.lr 0.028969681444162825 +506 63 negative_sampler.num_negs_per_pos 38.0 +506 63 training.batch_size 0.0 +506 64 model.embedding_dim 1.0 +506 64 loss.margin 14.210167604264198 +506 64 loss.adversarial_temperature 0.9489641280133543 +506 64 optimizer.lr 0.09463920924161114 +506 64 negative_sampler.num_negs_per_pos 6.0 +506 64 training.batch_size 2.0 +506 65 model.embedding_dim 0.0 +506 65 loss.margin 29.379170224898786 +506 65 loss.adversarial_temperature 0.943374534796459 +506 65 optimizer.lr 0.014167115371065651 +506 65 negative_sampler.num_negs_per_pos 5.0 +506 65 training.batch_size 1.0 +506 66 model.embedding_dim 2.0 +506 66 loss.margin 2.299789345972981 +506 66 loss.adversarial_temperature 0.19890024884601135 +506 66 optimizer.lr 0.0012683626947716249 +506 66 negative_sampler.num_negs_per_pos 75.0 +506 66 training.batch_size 1.0 +506 67 model.embedding_dim 2.0 +506 67 loss.margin 8.323617342765854 +506 67 loss.adversarial_temperature 0.7020970508024146 +506 67 optimizer.lr 0.026681756902615875 +506 67 negative_sampler.num_negs_per_pos 14.0 +506 67 training.batch_size 0.0 +506 68 model.embedding_dim 2.0 +506 68 loss.margin 28.786512392003868 +506 68 loss.adversarial_temperature 0.5326582999969063 +506 68 optimizer.lr 0.015201344376850178 +506 68 negative_sampler.num_negs_per_pos 45.0 +506 68 training.batch_size 1.0 +506 69 model.embedding_dim 2.0 +506 69 loss.margin 3.715710535782089 +506 69 loss.adversarial_temperature 0.10596614552581801 +506 69 optimizer.lr 0.007073003602372565 +506 69 negative_sampler.num_negs_per_pos 1.0 +506 69 training.batch_size 1.0 +506 70 model.embedding_dim 0.0 +506 70 loss.margin 16.347563153669405 +506 70 loss.adversarial_temperature 0.8163482836371305 +506 70 optimizer.lr 0.002299879305672349 +506 70 negative_sampler.num_negs_per_pos 21.0 +506 70 training.batch_size 1.0 +506 71 model.embedding_dim 2.0 +506 71 loss.margin 3.2563124925536426 +506 71 loss.adversarial_temperature 0.9561839649316102 +506 71 optimizer.lr 0.012246870136748047 +506 71 negative_sampler.num_negs_per_pos 34.0 +506 71 training.batch_size 0.0 +506 72 model.embedding_dim 1.0 +506 72 loss.margin 19.039620957154334 +506 72 loss.adversarial_temperature 0.22190880144139363 +506 72 optimizer.lr 0.03482433498448914 +506 72 negative_sampler.num_negs_per_pos 91.0 +506 72 training.batch_size 0.0 +506 73 model.embedding_dim 1.0 +506 73 loss.margin 25.054288727000856 +506 73 loss.adversarial_temperature 0.5086395147463869 +506 73 optimizer.lr 0.06260623073048568 +506 73 negative_sampler.num_negs_per_pos 83.0 +506 73 training.batch_size 0.0 +506 74 model.embedding_dim 1.0 +506 74 loss.margin 28.068907770856196 +506 74 loss.adversarial_temperature 0.7103675717392023 +506 74 optimizer.lr 0.0014312950997842331 +506 74 negative_sampler.num_negs_per_pos 20.0 +506 74 training.batch_size 2.0 +506 75 model.embedding_dim 1.0 +506 75 loss.margin 18.776935258065613 +506 75 loss.adversarial_temperature 0.6892599546089824 +506 75 optimizer.lr 0.0031042990407760956 +506 75 negative_sampler.num_negs_per_pos 21.0 +506 75 training.batch_size 1.0 +506 76 model.embedding_dim 2.0 +506 76 loss.margin 18.224050800857224 +506 76 loss.adversarial_temperature 0.43090468968325596 +506 76 optimizer.lr 0.0037947569814224024 +506 76 negative_sampler.num_negs_per_pos 73.0 +506 76 training.batch_size 2.0 +506 77 model.embedding_dim 2.0 +506 77 loss.margin 2.3530270106659463 +506 77 loss.adversarial_temperature 0.5641023669902012 +506 77 optimizer.lr 0.007330998794362508 +506 77 negative_sampler.num_negs_per_pos 60.0 +506 77 training.batch_size 2.0 +506 78 model.embedding_dim 0.0 +506 78 loss.margin 11.832080465314103 +506 78 loss.adversarial_temperature 0.32968521917849897 +506 78 optimizer.lr 0.001643507102524116 +506 78 negative_sampler.num_negs_per_pos 89.0 +506 78 training.batch_size 0.0 +506 79 model.embedding_dim 1.0 +506 79 loss.margin 21.074621436168123 +506 79 loss.adversarial_temperature 0.8254646865424401 +506 79 optimizer.lr 0.0731716546044694 +506 79 negative_sampler.num_negs_per_pos 4.0 +506 79 training.batch_size 2.0 +506 80 model.embedding_dim 2.0 +506 80 loss.margin 14.893615048818845 +506 80 loss.adversarial_temperature 0.4853393157424529 +506 80 optimizer.lr 0.0143005673935328 +506 80 negative_sampler.num_negs_per_pos 13.0 +506 80 training.batch_size 1.0 +506 81 model.embedding_dim 0.0 +506 81 loss.margin 23.05248435319421 +506 81 loss.adversarial_temperature 0.7295517021160456 +506 81 optimizer.lr 0.04034755796858827 +506 81 negative_sampler.num_negs_per_pos 89.0 +506 81 training.batch_size 1.0 +506 82 model.embedding_dim 1.0 +506 82 loss.margin 22.104702779477673 +506 82 loss.adversarial_temperature 0.45727045605272393 +506 82 optimizer.lr 0.04164874268371746 +506 82 negative_sampler.num_negs_per_pos 51.0 +506 82 training.batch_size 2.0 +506 83 model.embedding_dim 2.0 +506 83 loss.margin 14.461582147026519 +506 83 loss.adversarial_temperature 0.4851042948506725 +506 83 optimizer.lr 0.009001015850150302 +506 83 negative_sampler.num_negs_per_pos 56.0 +506 83 training.batch_size 1.0 +506 84 model.embedding_dim 2.0 +506 84 loss.margin 6.122089918543801 +506 84 loss.adversarial_temperature 0.8350479186251998 +506 84 optimizer.lr 0.0017813687845701306 +506 84 negative_sampler.num_negs_per_pos 55.0 +506 84 training.batch_size 1.0 +506 85 model.embedding_dim 0.0 +506 85 loss.margin 17.328166789232792 +506 85 loss.adversarial_temperature 0.33483921613620393 +506 85 optimizer.lr 0.020096195885443834 +506 85 negative_sampler.num_negs_per_pos 62.0 +506 85 training.batch_size 0.0 +506 86 model.embedding_dim 1.0 +506 86 loss.margin 18.921398981017198 +506 86 loss.adversarial_temperature 0.5719927477192868 +506 86 optimizer.lr 0.016127003744844343 +506 86 negative_sampler.num_negs_per_pos 38.0 +506 86 training.batch_size 0.0 +506 87 model.embedding_dim 2.0 +506 87 loss.margin 28.950082987119757 +506 87 loss.adversarial_temperature 0.27242278771422 +506 87 optimizer.lr 0.0031413364705094454 +506 87 negative_sampler.num_negs_per_pos 21.0 +506 87 training.batch_size 2.0 +506 88 model.embedding_dim 2.0 +506 88 loss.margin 3.4611004168318766 +506 88 loss.adversarial_temperature 0.7811582108553881 +506 88 optimizer.lr 0.05532478899733124 +506 88 negative_sampler.num_negs_per_pos 65.0 +506 88 training.batch_size 2.0 +506 89 model.embedding_dim 2.0 +506 89 loss.margin 24.377736100062414 +506 89 loss.adversarial_temperature 0.10685238785952193 +506 89 optimizer.lr 0.027247051277003467 +506 89 negative_sampler.num_negs_per_pos 79.0 +506 89 training.batch_size 2.0 +506 90 model.embedding_dim 0.0 +506 90 loss.margin 8.898283078504793 +506 90 loss.adversarial_temperature 0.2265804656210964 +506 90 optimizer.lr 0.021069655843511007 +506 90 negative_sampler.num_negs_per_pos 93.0 +506 90 training.batch_size 1.0 +506 91 model.embedding_dim 0.0 +506 91 loss.margin 25.580525700095542 +506 91 loss.adversarial_temperature 0.7579964998476074 +506 91 optimizer.lr 0.004168570204563654 +506 91 negative_sampler.num_negs_per_pos 9.0 +506 91 training.batch_size 1.0 +506 92 model.embedding_dim 0.0 +506 92 loss.margin 26.804270990972277 +506 92 loss.adversarial_temperature 0.1420105761649678 +506 92 optimizer.lr 0.055005884299161124 +506 92 negative_sampler.num_negs_per_pos 57.0 +506 92 training.batch_size 2.0 +506 93 model.embedding_dim 0.0 +506 93 loss.margin 22.332388643818952 +506 93 loss.adversarial_temperature 0.7093182823342842 +506 93 optimizer.lr 0.06671036782063773 +506 93 negative_sampler.num_negs_per_pos 24.0 +506 93 training.batch_size 1.0 +506 94 model.embedding_dim 0.0 +506 94 loss.margin 22.612481972452365 +506 94 loss.adversarial_temperature 0.6515831496478414 +506 94 optimizer.lr 0.0057627752491507395 +506 94 negative_sampler.num_negs_per_pos 31.0 +506 94 training.batch_size 2.0 +506 95 model.embedding_dim 1.0 +506 95 loss.margin 27.333218649698384 +506 95 loss.adversarial_temperature 0.29565106160042726 +506 95 optimizer.lr 0.010357063414744534 +506 95 negative_sampler.num_negs_per_pos 20.0 +506 95 training.batch_size 0.0 +506 96 model.embedding_dim 2.0 +506 96 loss.margin 29.79046456179417 +506 96 loss.adversarial_temperature 0.3015683100271054 +506 96 optimizer.lr 0.05346915191193786 +506 96 negative_sampler.num_negs_per_pos 42.0 +506 96 training.batch_size 1.0 +506 97 model.embedding_dim 0.0 +506 97 loss.margin 13.329522344105458 +506 97 loss.adversarial_temperature 0.8244276210774225 +506 97 optimizer.lr 0.07418990349377395 +506 97 negative_sampler.num_negs_per_pos 22.0 +506 97 training.batch_size 1.0 +506 98 model.embedding_dim 1.0 +506 98 loss.margin 25.152472245230214 +506 98 loss.adversarial_temperature 0.16429884376759477 +506 98 optimizer.lr 0.02854447497581652 +506 98 negative_sampler.num_negs_per_pos 58.0 +506 98 training.batch_size 0.0 +506 99 model.embedding_dim 2.0 +506 99 loss.margin 9.321910424955986 +506 99 loss.adversarial_temperature 0.5256370043136122 +506 99 optimizer.lr 0.006827157746072533 +506 99 negative_sampler.num_negs_per_pos 45.0 +506 99 training.batch_size 1.0 +506 100 model.embedding_dim 1.0 +506 100 loss.margin 5.421068330635132 +506 100 loss.adversarial_temperature 0.6671643630229475 +506 100 optimizer.lr 0.0026487869052451624 +506 100 negative_sampler.num_negs_per_pos 98.0 +506 100 training.batch_size 1.0 +506 1 dataset """wn18rr""" +506 1 model """proje""" +506 1 loss """nssa""" +506 1 regularizer """no""" +506 1 optimizer """adam""" +506 1 training_loop """owa""" +506 1 negative_sampler """basic""" +506 1 evaluator """rankbased""" +506 2 dataset """wn18rr""" +506 2 model """proje""" +506 2 loss """nssa""" +506 2 regularizer """no""" +506 2 optimizer """adam""" +506 2 training_loop """owa""" +506 2 negative_sampler """basic""" +506 2 evaluator """rankbased""" +506 3 dataset """wn18rr""" +506 3 model """proje""" +506 3 loss """nssa""" +506 3 regularizer """no""" +506 3 optimizer """adam""" +506 3 training_loop """owa""" +506 3 negative_sampler """basic""" +506 3 evaluator """rankbased""" +506 4 dataset """wn18rr""" +506 4 model """proje""" +506 4 loss """nssa""" +506 4 regularizer """no""" +506 4 optimizer """adam""" +506 4 training_loop """owa""" +506 4 negative_sampler """basic""" +506 4 evaluator """rankbased""" +506 5 dataset """wn18rr""" +506 5 model """proje""" +506 5 loss """nssa""" +506 5 regularizer """no""" +506 5 optimizer """adam""" +506 5 training_loop """owa""" +506 5 negative_sampler """basic""" +506 5 evaluator """rankbased""" +506 6 dataset """wn18rr""" +506 6 model """proje""" +506 6 loss """nssa""" +506 6 regularizer """no""" +506 6 optimizer """adam""" +506 6 training_loop """owa""" +506 6 negative_sampler """basic""" +506 6 evaluator """rankbased""" +506 7 dataset """wn18rr""" +506 7 model """proje""" +506 7 loss """nssa""" +506 7 regularizer """no""" +506 7 optimizer """adam""" +506 7 training_loop """owa""" +506 7 negative_sampler """basic""" +506 7 evaluator """rankbased""" +506 8 dataset """wn18rr""" +506 8 model """proje""" +506 8 loss """nssa""" +506 8 regularizer """no""" +506 8 optimizer """adam""" +506 8 training_loop """owa""" +506 8 negative_sampler """basic""" +506 8 evaluator """rankbased""" +506 9 dataset """wn18rr""" +506 9 model """proje""" +506 9 loss """nssa""" +506 9 regularizer """no""" +506 9 optimizer """adam""" +506 9 training_loop """owa""" +506 9 negative_sampler """basic""" +506 9 evaluator """rankbased""" +506 10 dataset """wn18rr""" +506 10 model """proje""" +506 10 loss """nssa""" +506 10 regularizer """no""" +506 10 optimizer """adam""" +506 10 training_loop """owa""" +506 10 negative_sampler """basic""" +506 10 evaluator """rankbased""" +506 11 dataset """wn18rr""" +506 11 model """proje""" +506 11 loss """nssa""" +506 11 regularizer """no""" +506 11 optimizer """adam""" +506 11 training_loop """owa""" +506 11 negative_sampler """basic""" +506 11 evaluator """rankbased""" +506 12 dataset """wn18rr""" +506 12 model """proje""" +506 12 loss """nssa""" +506 12 regularizer """no""" +506 12 optimizer """adam""" +506 12 training_loop """owa""" +506 12 negative_sampler """basic""" +506 12 evaluator """rankbased""" +506 13 dataset """wn18rr""" +506 13 model """proje""" +506 13 loss """nssa""" +506 13 regularizer """no""" +506 13 optimizer """adam""" +506 13 training_loop """owa""" +506 13 negative_sampler """basic""" +506 13 evaluator """rankbased""" +506 14 dataset """wn18rr""" +506 14 model """proje""" +506 14 loss """nssa""" +506 14 regularizer """no""" +506 14 optimizer """adam""" +506 14 training_loop """owa""" +506 14 negative_sampler """basic""" +506 14 evaluator """rankbased""" +506 15 dataset """wn18rr""" +506 15 model """proje""" +506 15 loss """nssa""" +506 15 regularizer """no""" +506 15 optimizer """adam""" +506 15 training_loop """owa""" +506 15 negative_sampler """basic""" +506 15 evaluator """rankbased""" +506 16 dataset """wn18rr""" +506 16 model """proje""" +506 16 loss """nssa""" +506 16 regularizer """no""" +506 16 optimizer """adam""" +506 16 training_loop """owa""" +506 16 negative_sampler """basic""" +506 16 evaluator """rankbased""" +506 17 dataset """wn18rr""" +506 17 model """proje""" +506 17 loss """nssa""" +506 17 regularizer """no""" +506 17 optimizer """adam""" +506 17 training_loop """owa""" +506 17 negative_sampler """basic""" +506 17 evaluator """rankbased""" +506 18 dataset """wn18rr""" +506 18 model """proje""" +506 18 loss """nssa""" +506 18 regularizer """no""" +506 18 optimizer """adam""" +506 18 training_loop """owa""" +506 18 negative_sampler """basic""" +506 18 evaluator """rankbased""" +506 19 dataset """wn18rr""" +506 19 model """proje""" +506 19 loss """nssa""" +506 19 regularizer """no""" +506 19 optimizer """adam""" +506 19 training_loop """owa""" +506 19 negative_sampler """basic""" +506 19 evaluator """rankbased""" +506 20 dataset """wn18rr""" +506 20 model """proje""" +506 20 loss """nssa""" +506 20 regularizer """no""" +506 20 optimizer """adam""" +506 20 training_loop """owa""" +506 20 negative_sampler """basic""" +506 20 evaluator """rankbased""" +506 21 dataset """wn18rr""" +506 21 model """proje""" +506 21 loss """nssa""" +506 21 regularizer """no""" +506 21 optimizer """adam""" +506 21 training_loop """owa""" +506 21 negative_sampler """basic""" +506 21 evaluator """rankbased""" +506 22 dataset """wn18rr""" +506 22 model """proje""" +506 22 loss """nssa""" +506 22 regularizer """no""" +506 22 optimizer """adam""" +506 22 training_loop """owa""" +506 22 negative_sampler """basic""" +506 22 evaluator """rankbased""" +506 23 dataset """wn18rr""" +506 23 model """proje""" +506 23 loss """nssa""" +506 23 regularizer """no""" +506 23 optimizer """adam""" +506 23 training_loop """owa""" +506 23 negative_sampler """basic""" +506 23 evaluator """rankbased""" +506 24 dataset """wn18rr""" +506 24 model """proje""" +506 24 loss """nssa""" +506 24 regularizer """no""" +506 24 optimizer """adam""" +506 24 training_loop """owa""" +506 24 negative_sampler """basic""" +506 24 evaluator """rankbased""" +506 25 dataset """wn18rr""" +506 25 model """proje""" +506 25 loss """nssa""" +506 25 regularizer """no""" +506 25 optimizer """adam""" +506 25 training_loop """owa""" +506 25 negative_sampler """basic""" +506 25 evaluator """rankbased""" +506 26 dataset """wn18rr""" +506 26 model """proje""" +506 26 loss """nssa""" +506 26 regularizer """no""" +506 26 optimizer """adam""" +506 26 training_loop """owa""" +506 26 negative_sampler """basic""" +506 26 evaluator """rankbased""" +506 27 dataset """wn18rr""" +506 27 model """proje""" +506 27 loss """nssa""" +506 27 regularizer """no""" +506 27 optimizer """adam""" +506 27 training_loop """owa""" +506 27 negative_sampler """basic""" +506 27 evaluator """rankbased""" +506 28 dataset """wn18rr""" +506 28 model """proje""" +506 28 loss """nssa""" +506 28 regularizer """no""" +506 28 optimizer """adam""" +506 28 training_loop """owa""" +506 28 negative_sampler """basic""" +506 28 evaluator """rankbased""" +506 29 dataset """wn18rr""" +506 29 model """proje""" +506 29 loss """nssa""" +506 29 regularizer """no""" +506 29 optimizer """adam""" +506 29 training_loop """owa""" +506 29 negative_sampler """basic""" +506 29 evaluator """rankbased""" +506 30 dataset """wn18rr""" +506 30 model """proje""" +506 30 loss """nssa""" +506 30 regularizer """no""" +506 30 optimizer """adam""" +506 30 training_loop """owa""" +506 30 negative_sampler """basic""" +506 30 evaluator """rankbased""" +506 31 dataset """wn18rr""" +506 31 model """proje""" +506 31 loss """nssa""" +506 31 regularizer """no""" +506 31 optimizer """adam""" +506 31 training_loop """owa""" +506 31 negative_sampler """basic""" +506 31 evaluator """rankbased""" +506 32 dataset """wn18rr""" +506 32 model """proje""" +506 32 loss """nssa""" +506 32 regularizer """no""" +506 32 optimizer """adam""" +506 32 training_loop """owa""" +506 32 negative_sampler """basic""" +506 32 evaluator """rankbased""" +506 33 dataset """wn18rr""" +506 33 model """proje""" +506 33 loss """nssa""" +506 33 regularizer """no""" +506 33 optimizer """adam""" +506 33 training_loop """owa""" +506 33 negative_sampler """basic""" +506 33 evaluator """rankbased""" +506 34 dataset """wn18rr""" +506 34 model """proje""" +506 34 loss """nssa""" +506 34 regularizer """no""" +506 34 optimizer """adam""" +506 34 training_loop """owa""" +506 34 negative_sampler """basic""" +506 34 evaluator """rankbased""" +506 35 dataset """wn18rr""" +506 35 model """proje""" +506 35 loss """nssa""" +506 35 regularizer """no""" +506 35 optimizer """adam""" +506 35 training_loop """owa""" +506 35 negative_sampler """basic""" +506 35 evaluator """rankbased""" +506 36 dataset """wn18rr""" +506 36 model """proje""" +506 36 loss """nssa""" +506 36 regularizer """no""" +506 36 optimizer """adam""" +506 36 training_loop """owa""" +506 36 negative_sampler """basic""" +506 36 evaluator """rankbased""" +506 37 dataset """wn18rr""" +506 37 model """proje""" +506 37 loss """nssa""" +506 37 regularizer """no""" +506 37 optimizer """adam""" +506 37 training_loop """owa""" +506 37 negative_sampler """basic""" +506 37 evaluator """rankbased""" +506 38 dataset """wn18rr""" +506 38 model """proje""" +506 38 loss """nssa""" +506 38 regularizer """no""" +506 38 optimizer """adam""" +506 38 training_loop """owa""" +506 38 negative_sampler """basic""" +506 38 evaluator """rankbased""" +506 39 dataset """wn18rr""" +506 39 model """proje""" +506 39 loss """nssa""" +506 39 regularizer """no""" +506 39 optimizer """adam""" +506 39 training_loop """owa""" +506 39 negative_sampler """basic""" +506 39 evaluator """rankbased""" +506 40 dataset """wn18rr""" +506 40 model """proje""" +506 40 loss """nssa""" +506 40 regularizer """no""" +506 40 optimizer """adam""" +506 40 training_loop """owa""" +506 40 negative_sampler """basic""" +506 40 evaluator """rankbased""" +506 41 dataset """wn18rr""" +506 41 model """proje""" +506 41 loss """nssa""" +506 41 regularizer """no""" +506 41 optimizer """adam""" +506 41 training_loop """owa""" +506 41 negative_sampler """basic""" +506 41 evaluator """rankbased""" +506 42 dataset """wn18rr""" +506 42 model """proje""" +506 42 loss """nssa""" +506 42 regularizer """no""" +506 42 optimizer """adam""" +506 42 training_loop """owa""" +506 42 negative_sampler """basic""" +506 42 evaluator """rankbased""" +506 43 dataset """wn18rr""" +506 43 model """proje""" +506 43 loss """nssa""" +506 43 regularizer """no""" +506 43 optimizer """adam""" +506 43 training_loop """owa""" +506 43 negative_sampler """basic""" +506 43 evaluator """rankbased""" +506 44 dataset """wn18rr""" +506 44 model """proje""" +506 44 loss """nssa""" +506 44 regularizer """no""" +506 44 optimizer """adam""" +506 44 training_loop """owa""" +506 44 negative_sampler """basic""" +506 44 evaluator """rankbased""" +506 45 dataset """wn18rr""" +506 45 model """proje""" +506 45 loss """nssa""" +506 45 regularizer """no""" +506 45 optimizer """adam""" +506 45 training_loop """owa""" +506 45 negative_sampler """basic""" +506 45 evaluator """rankbased""" +506 46 dataset """wn18rr""" +506 46 model """proje""" +506 46 loss """nssa""" +506 46 regularizer """no""" +506 46 optimizer """adam""" +506 46 training_loop """owa""" +506 46 negative_sampler """basic""" +506 46 evaluator """rankbased""" +506 47 dataset """wn18rr""" +506 47 model """proje""" +506 47 loss """nssa""" +506 47 regularizer """no""" +506 47 optimizer """adam""" +506 47 training_loop """owa""" +506 47 negative_sampler """basic""" +506 47 evaluator """rankbased""" +506 48 dataset """wn18rr""" +506 48 model """proje""" +506 48 loss """nssa""" +506 48 regularizer """no""" +506 48 optimizer """adam""" +506 48 training_loop """owa""" +506 48 negative_sampler """basic""" +506 48 evaluator """rankbased""" +506 49 dataset """wn18rr""" +506 49 model """proje""" +506 49 loss """nssa""" +506 49 regularizer """no""" +506 49 optimizer """adam""" +506 49 training_loop """owa""" +506 49 negative_sampler """basic""" +506 49 evaluator """rankbased""" +506 50 dataset """wn18rr""" +506 50 model """proje""" +506 50 loss """nssa""" +506 50 regularizer """no""" +506 50 optimizer """adam""" +506 50 training_loop """owa""" +506 50 negative_sampler """basic""" +506 50 evaluator """rankbased""" +506 51 dataset """wn18rr""" +506 51 model """proje""" +506 51 loss """nssa""" +506 51 regularizer """no""" +506 51 optimizer """adam""" +506 51 training_loop """owa""" +506 51 negative_sampler """basic""" +506 51 evaluator """rankbased""" +506 52 dataset """wn18rr""" +506 52 model """proje""" +506 52 loss """nssa""" +506 52 regularizer """no""" +506 52 optimizer """adam""" +506 52 training_loop """owa""" +506 52 negative_sampler """basic""" +506 52 evaluator """rankbased""" +506 53 dataset """wn18rr""" +506 53 model """proje""" +506 53 loss """nssa""" +506 53 regularizer """no""" +506 53 optimizer """adam""" +506 53 training_loop """owa""" +506 53 negative_sampler """basic""" +506 53 evaluator """rankbased""" +506 54 dataset """wn18rr""" +506 54 model """proje""" +506 54 loss """nssa""" +506 54 regularizer """no""" +506 54 optimizer """adam""" +506 54 training_loop """owa""" +506 54 negative_sampler """basic""" +506 54 evaluator """rankbased""" +506 55 dataset """wn18rr""" +506 55 model """proje""" +506 55 loss """nssa""" +506 55 regularizer """no""" +506 55 optimizer """adam""" +506 55 training_loop """owa""" +506 55 negative_sampler """basic""" +506 55 evaluator """rankbased""" +506 56 dataset """wn18rr""" +506 56 model """proje""" +506 56 loss """nssa""" +506 56 regularizer """no""" +506 56 optimizer """adam""" +506 56 training_loop """owa""" +506 56 negative_sampler """basic""" +506 56 evaluator """rankbased""" +506 57 dataset """wn18rr""" +506 57 model """proje""" +506 57 loss """nssa""" +506 57 regularizer """no""" +506 57 optimizer """adam""" +506 57 training_loop """owa""" +506 57 negative_sampler """basic""" +506 57 evaluator """rankbased""" +506 58 dataset """wn18rr""" +506 58 model """proje""" +506 58 loss """nssa""" +506 58 regularizer """no""" +506 58 optimizer """adam""" +506 58 training_loop """owa""" +506 58 negative_sampler """basic""" +506 58 evaluator """rankbased""" +506 59 dataset """wn18rr""" +506 59 model """proje""" +506 59 loss """nssa""" +506 59 regularizer """no""" +506 59 optimizer """adam""" +506 59 training_loop """owa""" +506 59 negative_sampler """basic""" +506 59 evaluator """rankbased""" +506 60 dataset """wn18rr""" +506 60 model """proje""" +506 60 loss """nssa""" +506 60 regularizer """no""" +506 60 optimizer """adam""" +506 60 training_loop """owa""" +506 60 negative_sampler """basic""" +506 60 evaluator """rankbased""" +506 61 dataset """wn18rr""" +506 61 model """proje""" +506 61 loss """nssa""" +506 61 regularizer """no""" +506 61 optimizer """adam""" +506 61 training_loop """owa""" +506 61 negative_sampler """basic""" +506 61 evaluator """rankbased""" +506 62 dataset """wn18rr""" +506 62 model """proje""" +506 62 loss """nssa""" +506 62 regularizer """no""" +506 62 optimizer """adam""" +506 62 training_loop """owa""" +506 62 negative_sampler """basic""" +506 62 evaluator """rankbased""" +506 63 dataset """wn18rr""" +506 63 model """proje""" +506 63 loss """nssa""" +506 63 regularizer """no""" +506 63 optimizer """adam""" +506 63 training_loop """owa""" +506 63 negative_sampler """basic""" +506 63 evaluator """rankbased""" +506 64 dataset """wn18rr""" +506 64 model """proje""" +506 64 loss """nssa""" +506 64 regularizer """no""" +506 64 optimizer """adam""" +506 64 training_loop """owa""" +506 64 negative_sampler """basic""" +506 64 evaluator """rankbased""" +506 65 dataset """wn18rr""" +506 65 model """proje""" +506 65 loss """nssa""" +506 65 regularizer """no""" +506 65 optimizer """adam""" +506 65 training_loop """owa""" +506 65 negative_sampler """basic""" +506 65 evaluator """rankbased""" +506 66 dataset """wn18rr""" +506 66 model """proje""" +506 66 loss """nssa""" +506 66 regularizer """no""" +506 66 optimizer """adam""" +506 66 training_loop """owa""" +506 66 negative_sampler """basic""" +506 66 evaluator """rankbased""" +506 67 dataset """wn18rr""" +506 67 model """proje""" +506 67 loss """nssa""" +506 67 regularizer """no""" +506 67 optimizer """adam""" +506 67 training_loop """owa""" +506 67 negative_sampler """basic""" +506 67 evaluator """rankbased""" +506 68 dataset """wn18rr""" +506 68 model """proje""" +506 68 loss """nssa""" +506 68 regularizer """no""" +506 68 optimizer """adam""" +506 68 training_loop """owa""" +506 68 negative_sampler """basic""" +506 68 evaluator """rankbased""" +506 69 dataset """wn18rr""" +506 69 model """proje""" +506 69 loss """nssa""" +506 69 regularizer """no""" +506 69 optimizer """adam""" +506 69 training_loop """owa""" +506 69 negative_sampler """basic""" +506 69 evaluator """rankbased""" +506 70 dataset """wn18rr""" +506 70 model """proje""" +506 70 loss """nssa""" +506 70 regularizer """no""" +506 70 optimizer """adam""" +506 70 training_loop """owa""" +506 70 negative_sampler """basic""" +506 70 evaluator """rankbased""" +506 71 dataset """wn18rr""" +506 71 model """proje""" +506 71 loss """nssa""" +506 71 regularizer """no""" +506 71 optimizer """adam""" +506 71 training_loop """owa""" +506 71 negative_sampler """basic""" +506 71 evaluator """rankbased""" +506 72 dataset """wn18rr""" +506 72 model """proje""" +506 72 loss """nssa""" +506 72 regularizer """no""" +506 72 optimizer """adam""" +506 72 training_loop """owa""" +506 72 negative_sampler """basic""" +506 72 evaluator """rankbased""" +506 73 dataset """wn18rr""" +506 73 model """proje""" +506 73 loss """nssa""" +506 73 regularizer """no""" +506 73 optimizer """adam""" +506 73 training_loop """owa""" +506 73 negative_sampler """basic""" +506 73 evaluator """rankbased""" +506 74 dataset """wn18rr""" +506 74 model """proje""" +506 74 loss """nssa""" +506 74 regularizer """no""" +506 74 optimizer """adam""" +506 74 training_loop """owa""" +506 74 negative_sampler """basic""" +506 74 evaluator """rankbased""" +506 75 dataset """wn18rr""" +506 75 model """proje""" +506 75 loss """nssa""" +506 75 regularizer """no""" +506 75 optimizer """adam""" +506 75 training_loop """owa""" +506 75 negative_sampler """basic""" +506 75 evaluator """rankbased""" +506 76 dataset """wn18rr""" +506 76 model """proje""" +506 76 loss """nssa""" +506 76 regularizer """no""" +506 76 optimizer """adam""" +506 76 training_loop """owa""" +506 76 negative_sampler """basic""" +506 76 evaluator """rankbased""" +506 77 dataset """wn18rr""" +506 77 model """proje""" +506 77 loss """nssa""" +506 77 regularizer """no""" +506 77 optimizer """adam""" +506 77 training_loop """owa""" +506 77 negative_sampler """basic""" +506 77 evaluator """rankbased""" +506 78 dataset """wn18rr""" +506 78 model """proje""" +506 78 loss """nssa""" +506 78 regularizer """no""" +506 78 optimizer """adam""" +506 78 training_loop """owa""" +506 78 negative_sampler """basic""" +506 78 evaluator """rankbased""" +506 79 dataset """wn18rr""" +506 79 model """proje""" +506 79 loss """nssa""" +506 79 regularizer """no""" +506 79 optimizer """adam""" +506 79 training_loop """owa""" +506 79 negative_sampler """basic""" +506 79 evaluator """rankbased""" +506 80 dataset """wn18rr""" +506 80 model """proje""" +506 80 loss """nssa""" +506 80 regularizer """no""" +506 80 optimizer """adam""" +506 80 training_loop """owa""" +506 80 negative_sampler """basic""" +506 80 evaluator """rankbased""" +506 81 dataset """wn18rr""" +506 81 model """proje""" +506 81 loss """nssa""" +506 81 regularizer """no""" +506 81 optimizer """adam""" +506 81 training_loop """owa""" +506 81 negative_sampler """basic""" +506 81 evaluator """rankbased""" +506 82 dataset """wn18rr""" +506 82 model """proje""" +506 82 loss """nssa""" +506 82 regularizer """no""" +506 82 optimizer """adam""" +506 82 training_loop """owa""" +506 82 negative_sampler """basic""" +506 82 evaluator """rankbased""" +506 83 dataset """wn18rr""" +506 83 model """proje""" +506 83 loss """nssa""" +506 83 regularizer """no""" +506 83 optimizer """adam""" +506 83 training_loop """owa""" +506 83 negative_sampler """basic""" +506 83 evaluator """rankbased""" +506 84 dataset """wn18rr""" +506 84 model """proje""" +506 84 loss """nssa""" +506 84 regularizer """no""" +506 84 optimizer """adam""" +506 84 training_loop """owa""" +506 84 negative_sampler """basic""" +506 84 evaluator """rankbased""" +506 85 dataset """wn18rr""" +506 85 model """proje""" +506 85 loss """nssa""" +506 85 regularizer """no""" +506 85 optimizer """adam""" +506 85 training_loop """owa""" +506 85 negative_sampler """basic""" +506 85 evaluator """rankbased""" +506 86 dataset """wn18rr""" +506 86 model """proje""" +506 86 loss """nssa""" +506 86 regularizer """no""" +506 86 optimizer """adam""" +506 86 training_loop """owa""" +506 86 negative_sampler """basic""" +506 86 evaluator """rankbased""" +506 87 dataset """wn18rr""" +506 87 model """proje""" +506 87 loss """nssa""" +506 87 regularizer """no""" +506 87 optimizer """adam""" +506 87 training_loop """owa""" +506 87 negative_sampler """basic""" +506 87 evaluator """rankbased""" +506 88 dataset """wn18rr""" +506 88 model """proje""" +506 88 loss """nssa""" +506 88 regularizer """no""" +506 88 optimizer """adam""" +506 88 training_loop """owa""" +506 88 negative_sampler """basic""" +506 88 evaluator """rankbased""" +506 89 dataset """wn18rr""" +506 89 model """proje""" +506 89 loss """nssa""" +506 89 regularizer """no""" +506 89 optimizer """adam""" +506 89 training_loop """owa""" +506 89 negative_sampler """basic""" +506 89 evaluator """rankbased""" +506 90 dataset """wn18rr""" +506 90 model """proje""" +506 90 loss """nssa""" +506 90 regularizer """no""" +506 90 optimizer """adam""" +506 90 training_loop """owa""" +506 90 negative_sampler """basic""" +506 90 evaluator """rankbased""" +506 91 dataset """wn18rr""" +506 91 model """proje""" +506 91 loss """nssa""" +506 91 regularizer """no""" +506 91 optimizer """adam""" +506 91 training_loop """owa""" +506 91 negative_sampler """basic""" +506 91 evaluator """rankbased""" +506 92 dataset """wn18rr""" +506 92 model """proje""" +506 92 loss """nssa""" +506 92 regularizer """no""" +506 92 optimizer """adam""" +506 92 training_loop """owa""" +506 92 negative_sampler """basic""" +506 92 evaluator """rankbased""" +506 93 dataset """wn18rr""" +506 93 model """proje""" +506 93 loss """nssa""" +506 93 regularizer """no""" +506 93 optimizer """adam""" +506 93 training_loop """owa""" +506 93 negative_sampler """basic""" +506 93 evaluator """rankbased""" +506 94 dataset """wn18rr""" +506 94 model """proje""" +506 94 loss """nssa""" +506 94 regularizer """no""" +506 94 optimizer """adam""" +506 94 training_loop """owa""" +506 94 negative_sampler """basic""" +506 94 evaluator """rankbased""" +506 95 dataset """wn18rr""" +506 95 model """proje""" +506 95 loss """nssa""" +506 95 regularizer """no""" +506 95 optimizer """adam""" +506 95 training_loop """owa""" +506 95 negative_sampler """basic""" +506 95 evaluator """rankbased""" +506 96 dataset """wn18rr""" +506 96 model """proje""" +506 96 loss """nssa""" +506 96 regularizer """no""" +506 96 optimizer """adam""" +506 96 training_loop """owa""" +506 96 negative_sampler """basic""" +506 96 evaluator """rankbased""" +506 97 dataset """wn18rr""" +506 97 model """proje""" +506 97 loss """nssa""" +506 97 regularizer """no""" +506 97 optimizer """adam""" +506 97 training_loop """owa""" +506 97 negative_sampler """basic""" +506 97 evaluator """rankbased""" +506 98 dataset """wn18rr""" +506 98 model """proje""" +506 98 loss """nssa""" +506 98 regularizer """no""" +506 98 optimizer """adam""" +506 98 training_loop """owa""" +506 98 negative_sampler """basic""" +506 98 evaluator """rankbased""" +506 99 dataset """wn18rr""" +506 99 model """proje""" +506 99 loss """nssa""" +506 99 regularizer """no""" +506 99 optimizer """adam""" +506 99 training_loop """owa""" +506 99 negative_sampler """basic""" +506 99 evaluator """rankbased""" +506 100 dataset """wn18rr""" +506 100 model """proje""" +506 100 loss """nssa""" +506 100 regularizer """no""" +506 100 optimizer """adam""" +506 100 training_loop """owa""" +506 100 negative_sampler """basic""" +506 100 evaluator """rankbased""" +507 1 model.embedding_dim 2.0 +507 1 optimizer.lr 0.08307258879622831 +507 1 training.batch_size 2.0 +507 1 training.label_smoothing 0.012171181655199087 +507 2 model.embedding_dim 1.0 +507 2 optimizer.lr 0.035670117426543836 +507 2 training.batch_size 1.0 +507 2 training.label_smoothing 0.06943936360496691 +507 3 model.embedding_dim 0.0 +507 3 optimizer.lr 0.0284141956194794 +507 3 training.batch_size 2.0 +507 3 training.label_smoothing 0.0011330663757753898 +507 4 model.embedding_dim 1.0 +507 4 optimizer.lr 0.010285970979311202 +507 4 training.batch_size 0.0 +507 4 training.label_smoothing 0.0028171422957943733 +507 5 model.embedding_dim 2.0 +507 5 optimizer.lr 0.09419498140681397 +507 5 training.batch_size 1.0 +507 5 training.label_smoothing 0.14657189967346237 +507 6 model.embedding_dim 2.0 +507 6 optimizer.lr 0.0026423384808815847 +507 6 training.batch_size 0.0 +507 6 training.label_smoothing 0.04347787872730137 +507 7 model.embedding_dim 0.0 +507 7 optimizer.lr 0.008787524375440715 +507 7 training.batch_size 1.0 +507 7 training.label_smoothing 0.06636328215076115 +507 8 model.embedding_dim 0.0 +507 8 optimizer.lr 0.004795924483436824 +507 8 training.batch_size 1.0 +507 8 training.label_smoothing 0.001957196317561079 +507 9 model.embedding_dim 1.0 +507 9 optimizer.lr 0.0039876294690500075 +507 9 training.batch_size 1.0 +507 9 training.label_smoothing 0.08755431306729705 +507 10 model.embedding_dim 1.0 +507 10 optimizer.lr 0.05897621319049904 +507 10 training.batch_size 0.0 +507 10 training.label_smoothing 0.0147073955731912 +507 11 model.embedding_dim 0.0 +507 11 optimizer.lr 0.06904323123338721 +507 11 training.batch_size 1.0 +507 11 training.label_smoothing 0.001984746061616479 +507 12 model.embedding_dim 1.0 +507 12 optimizer.lr 0.0033378245670948564 +507 12 training.batch_size 1.0 +507 12 training.label_smoothing 0.027536294560855688 +507 13 model.embedding_dim 1.0 +507 13 optimizer.lr 0.0038270402768809204 +507 13 training.batch_size 0.0 +507 13 training.label_smoothing 0.0010541124412257166 +507 14 model.embedding_dim 0.0 +507 14 optimizer.lr 0.0075045141355148445 +507 14 training.batch_size 0.0 +507 14 training.label_smoothing 0.011352372002366945 +507 15 model.embedding_dim 0.0 +507 15 optimizer.lr 0.005537718823315627 +507 15 training.batch_size 2.0 +507 15 training.label_smoothing 0.7345100387527252 +507 16 model.embedding_dim 1.0 +507 16 optimizer.lr 0.014401987759966506 +507 16 training.batch_size 1.0 +507 16 training.label_smoothing 0.014032859712705691 +507 17 model.embedding_dim 1.0 +507 17 optimizer.lr 0.007948641988812878 +507 17 training.batch_size 0.0 +507 17 training.label_smoothing 0.7831272281598555 +507 18 model.embedding_dim 0.0 +507 18 optimizer.lr 0.001773096092827264 +507 18 training.batch_size 2.0 +507 18 training.label_smoothing 0.011345187610844068 +507 19 model.embedding_dim 2.0 +507 19 optimizer.lr 0.003168387414301214 +507 19 training.batch_size 0.0 +507 19 training.label_smoothing 0.2555132030129535 +507 20 model.embedding_dim 2.0 +507 20 optimizer.lr 0.00707205251550175 +507 20 training.batch_size 2.0 +507 20 training.label_smoothing 0.42169952177852305 +507 21 model.embedding_dim 2.0 +507 21 optimizer.lr 0.06591286131020167 +507 21 training.batch_size 1.0 +507 21 training.label_smoothing 0.01443799619526026 +507 22 model.embedding_dim 2.0 +507 22 optimizer.lr 0.03648585610223868 +507 22 training.batch_size 1.0 +507 22 training.label_smoothing 0.28857223064885806 +507 23 model.embedding_dim 0.0 +507 23 optimizer.lr 0.0015757341647215947 +507 23 training.batch_size 2.0 +507 23 training.label_smoothing 0.008420355006055234 +507 24 model.embedding_dim 0.0 +507 24 optimizer.lr 0.05620975897702213 +507 24 training.batch_size 0.0 +507 24 training.label_smoothing 0.009799860820817635 +507 25 model.embedding_dim 2.0 +507 25 optimizer.lr 0.013959391085201948 +507 25 training.batch_size 1.0 +507 25 training.label_smoothing 0.0015797317082448116 +507 26 model.embedding_dim 0.0 +507 26 optimizer.lr 0.05496526293685942 +507 26 training.batch_size 2.0 +507 26 training.label_smoothing 0.01154755066713451 +507 27 model.embedding_dim 2.0 +507 27 optimizer.lr 0.00344663524695048 +507 27 training.batch_size 1.0 +507 27 training.label_smoothing 0.009936082623130175 +507 28 model.embedding_dim 0.0 +507 28 optimizer.lr 0.0029205019488713468 +507 28 training.batch_size 0.0 +507 28 training.label_smoothing 0.06589781720553121 +507 29 model.embedding_dim 1.0 +507 29 optimizer.lr 0.03285387168557862 +507 29 training.batch_size 1.0 +507 29 training.label_smoothing 0.26824620981607333 +507 1 dataset """fb15k237""" +507 1 model """rescal""" +507 1 loss """bceaftersigmoid""" +507 1 regularizer """no""" +507 1 optimizer """adam""" +507 1 training_loop """lcwa""" +507 1 evaluator """rankbased""" +507 2 dataset """fb15k237""" +507 2 model """rescal""" +507 2 loss """bceaftersigmoid""" +507 2 regularizer """no""" +507 2 optimizer """adam""" +507 2 training_loop """lcwa""" +507 2 evaluator """rankbased""" +507 3 dataset """fb15k237""" +507 3 model """rescal""" +507 3 loss """bceaftersigmoid""" +507 3 regularizer """no""" +507 3 optimizer """adam""" +507 3 training_loop """lcwa""" +507 3 evaluator """rankbased""" +507 4 dataset """fb15k237""" +507 4 model """rescal""" +507 4 loss """bceaftersigmoid""" +507 4 regularizer """no""" +507 4 optimizer """adam""" +507 4 training_loop """lcwa""" +507 4 evaluator """rankbased""" +507 5 dataset """fb15k237""" +507 5 model """rescal""" +507 5 loss """bceaftersigmoid""" +507 5 regularizer """no""" +507 5 optimizer """adam""" +507 5 training_loop """lcwa""" +507 5 evaluator """rankbased""" +507 6 dataset """fb15k237""" +507 6 model """rescal""" +507 6 loss """bceaftersigmoid""" +507 6 regularizer """no""" +507 6 optimizer """adam""" +507 6 training_loop """lcwa""" +507 6 evaluator """rankbased""" +507 7 dataset """fb15k237""" +507 7 model """rescal""" +507 7 loss """bceaftersigmoid""" +507 7 regularizer """no""" +507 7 optimizer """adam""" +507 7 training_loop """lcwa""" +507 7 evaluator """rankbased""" +507 8 dataset """fb15k237""" +507 8 model """rescal""" +507 8 loss """bceaftersigmoid""" +507 8 regularizer """no""" +507 8 optimizer """adam""" +507 8 training_loop """lcwa""" +507 8 evaluator """rankbased""" +507 9 dataset """fb15k237""" +507 9 model """rescal""" +507 9 loss """bceaftersigmoid""" +507 9 regularizer """no""" +507 9 optimizer """adam""" +507 9 training_loop """lcwa""" +507 9 evaluator """rankbased""" +507 10 dataset """fb15k237""" +507 10 model """rescal""" +507 10 loss """bceaftersigmoid""" +507 10 regularizer """no""" +507 10 optimizer """adam""" +507 10 training_loop """lcwa""" +507 10 evaluator """rankbased""" +507 11 dataset """fb15k237""" +507 11 model """rescal""" +507 11 loss """bceaftersigmoid""" +507 11 regularizer """no""" +507 11 optimizer """adam""" +507 11 training_loop """lcwa""" +507 11 evaluator """rankbased""" +507 12 dataset """fb15k237""" +507 12 model """rescal""" +507 12 loss """bceaftersigmoid""" +507 12 regularizer """no""" +507 12 optimizer """adam""" +507 12 training_loop """lcwa""" +507 12 evaluator """rankbased""" +507 13 dataset """fb15k237""" +507 13 model """rescal""" +507 13 loss """bceaftersigmoid""" +507 13 regularizer """no""" +507 13 optimizer """adam""" +507 13 training_loop """lcwa""" +507 13 evaluator """rankbased""" +507 14 dataset """fb15k237""" +507 14 model """rescal""" +507 14 loss """bceaftersigmoid""" +507 14 regularizer """no""" +507 14 optimizer """adam""" +507 14 training_loop """lcwa""" +507 14 evaluator """rankbased""" +507 15 dataset """fb15k237""" +507 15 model """rescal""" +507 15 loss """bceaftersigmoid""" +507 15 regularizer """no""" +507 15 optimizer """adam""" +507 15 training_loop """lcwa""" +507 15 evaluator """rankbased""" +507 16 dataset """fb15k237""" +507 16 model """rescal""" +507 16 loss """bceaftersigmoid""" +507 16 regularizer """no""" +507 16 optimizer """adam""" +507 16 training_loop """lcwa""" +507 16 evaluator """rankbased""" +507 17 dataset """fb15k237""" +507 17 model """rescal""" +507 17 loss """bceaftersigmoid""" +507 17 regularizer """no""" +507 17 optimizer """adam""" +507 17 training_loop """lcwa""" +507 17 evaluator """rankbased""" +507 18 dataset """fb15k237""" +507 18 model """rescal""" +507 18 loss """bceaftersigmoid""" +507 18 regularizer """no""" +507 18 optimizer """adam""" +507 18 training_loop """lcwa""" +507 18 evaluator """rankbased""" +507 19 dataset """fb15k237""" +507 19 model """rescal""" +507 19 loss """bceaftersigmoid""" +507 19 regularizer """no""" +507 19 optimizer """adam""" +507 19 training_loop """lcwa""" +507 19 evaluator """rankbased""" +507 20 dataset """fb15k237""" +507 20 model """rescal""" +507 20 loss """bceaftersigmoid""" +507 20 regularizer """no""" +507 20 optimizer """adam""" +507 20 training_loop """lcwa""" +507 20 evaluator """rankbased""" +507 21 dataset """fb15k237""" +507 21 model """rescal""" +507 21 loss """bceaftersigmoid""" +507 21 regularizer """no""" +507 21 optimizer """adam""" +507 21 training_loop """lcwa""" +507 21 evaluator """rankbased""" +507 22 dataset """fb15k237""" +507 22 model """rescal""" +507 22 loss """bceaftersigmoid""" +507 22 regularizer """no""" +507 22 optimizer """adam""" +507 22 training_loop """lcwa""" +507 22 evaluator """rankbased""" +507 23 dataset """fb15k237""" +507 23 model """rescal""" +507 23 loss """bceaftersigmoid""" +507 23 regularizer """no""" +507 23 optimizer """adam""" +507 23 training_loop """lcwa""" +507 23 evaluator """rankbased""" +507 24 dataset """fb15k237""" +507 24 model """rescal""" +507 24 loss """bceaftersigmoid""" +507 24 regularizer """no""" +507 24 optimizer """adam""" +507 24 training_loop """lcwa""" +507 24 evaluator """rankbased""" +507 25 dataset """fb15k237""" +507 25 model """rescal""" +507 25 loss """bceaftersigmoid""" +507 25 regularizer """no""" +507 25 optimizer """adam""" +507 25 training_loop """lcwa""" +507 25 evaluator """rankbased""" +507 26 dataset """fb15k237""" +507 26 model """rescal""" +507 26 loss """bceaftersigmoid""" +507 26 regularizer """no""" +507 26 optimizer """adam""" +507 26 training_loop """lcwa""" +507 26 evaluator """rankbased""" +507 27 dataset """fb15k237""" +507 27 model """rescal""" +507 27 loss """bceaftersigmoid""" +507 27 regularizer """no""" +507 27 optimizer """adam""" +507 27 training_loop """lcwa""" +507 27 evaluator """rankbased""" +507 28 dataset """fb15k237""" +507 28 model """rescal""" +507 28 loss """bceaftersigmoid""" +507 28 regularizer """no""" +507 28 optimizer """adam""" +507 28 training_loop """lcwa""" +507 28 evaluator """rankbased""" +507 29 dataset """fb15k237""" +507 29 model """rescal""" +507 29 loss """bceaftersigmoid""" +507 29 regularizer """no""" +507 29 optimizer """adam""" +507 29 training_loop """lcwa""" +507 29 evaluator """rankbased""" +508 1 model.embedding_dim 0.0 +508 1 optimizer.lr 0.0361159706072012 +508 1 training.batch_size 2.0 +508 1 training.label_smoothing 0.008170273909157954 +508 2 model.embedding_dim 0.0 +508 2 optimizer.lr 0.001844519492357775 +508 2 training.batch_size 1.0 +508 2 training.label_smoothing 0.38579007549679645 +508 3 model.embedding_dim 0.0 +508 3 optimizer.lr 0.02139304000006067 +508 3 training.batch_size 1.0 +508 3 training.label_smoothing 0.06862017386907411 +508 4 model.embedding_dim 1.0 +508 4 optimizer.lr 0.032561524077948906 +508 4 training.batch_size 0.0 +508 4 training.label_smoothing 0.0033622681017809763 +508 5 model.embedding_dim 1.0 +508 5 optimizer.lr 0.09447033290796779 +508 5 training.batch_size 0.0 +508 5 training.label_smoothing 0.6075713362354576 +508 6 model.embedding_dim 2.0 +508 6 optimizer.lr 0.0058414968568266095 +508 6 training.batch_size 2.0 +508 6 training.label_smoothing 0.005164957094672922 +508 7 model.embedding_dim 2.0 +508 7 optimizer.lr 0.09007680931768065 +508 7 training.batch_size 1.0 +508 7 training.label_smoothing 0.033511873460600014 +508 8 model.embedding_dim 1.0 +508 8 optimizer.lr 0.006854537900803598 +508 8 training.batch_size 2.0 +508 8 training.label_smoothing 0.004509293524366851 +508 9 model.embedding_dim 2.0 +508 9 optimizer.lr 0.054314261652070484 +508 9 training.batch_size 2.0 +508 9 training.label_smoothing 0.003508321887398925 +508 10 model.embedding_dim 1.0 +508 10 optimizer.lr 0.019594086941144836 +508 10 training.batch_size 0.0 +508 10 training.label_smoothing 0.007203387405951352 +508 11 model.embedding_dim 1.0 +508 11 optimizer.lr 0.0013607552528823457 +508 11 training.batch_size 1.0 +508 11 training.label_smoothing 0.2903251390706724 +508 12 model.embedding_dim 1.0 +508 12 optimizer.lr 0.06554085722138657 +508 12 training.batch_size 2.0 +508 12 training.label_smoothing 0.1595540606676515 +508 13 model.embedding_dim 2.0 +508 13 optimizer.lr 0.014240751753523814 +508 13 training.batch_size 0.0 +508 13 training.label_smoothing 0.004418745710542849 +508 14 model.embedding_dim 0.0 +508 14 optimizer.lr 0.003467652002984796 +508 14 training.batch_size 2.0 +508 14 training.label_smoothing 0.005586882734009682 +508 15 model.embedding_dim 0.0 +508 15 optimizer.lr 0.08852176108896449 +508 15 training.batch_size 2.0 +508 15 training.label_smoothing 0.0029993107887912526 +508 16 model.embedding_dim 0.0 +508 16 optimizer.lr 0.07973891830475716 +508 16 training.batch_size 0.0 +508 16 training.label_smoothing 0.02256805430401855 +508 17 model.embedding_dim 1.0 +508 17 optimizer.lr 0.0957894310412 +508 17 training.batch_size 1.0 +508 17 training.label_smoothing 0.0016615238087985134 +508 18 model.embedding_dim 1.0 +508 18 optimizer.lr 0.0010178070206244613 +508 18 training.batch_size 1.0 +508 18 training.label_smoothing 0.025252925800501615 +508 19 model.embedding_dim 2.0 +508 19 optimizer.lr 0.005407472412139369 +508 19 training.batch_size 0.0 +508 19 training.label_smoothing 0.008782109152447367 +508 1 dataset """fb15k237""" +508 1 model """rescal""" +508 1 loss """softplus""" +508 1 regularizer """no""" +508 1 optimizer """adam""" +508 1 training_loop """lcwa""" +508 1 evaluator """rankbased""" +508 2 dataset """fb15k237""" +508 2 model """rescal""" +508 2 loss """softplus""" +508 2 regularizer """no""" +508 2 optimizer """adam""" +508 2 training_loop """lcwa""" +508 2 evaluator """rankbased""" +508 3 dataset """fb15k237""" +508 3 model """rescal""" +508 3 loss """softplus""" +508 3 regularizer """no""" +508 3 optimizer """adam""" +508 3 training_loop """lcwa""" +508 3 evaluator """rankbased""" +508 4 dataset """fb15k237""" +508 4 model """rescal""" +508 4 loss """softplus""" +508 4 regularizer """no""" +508 4 optimizer """adam""" +508 4 training_loop """lcwa""" +508 4 evaluator """rankbased""" +508 5 dataset """fb15k237""" +508 5 model """rescal""" +508 5 loss """softplus""" +508 5 regularizer """no""" +508 5 optimizer """adam""" +508 5 training_loop """lcwa""" +508 5 evaluator """rankbased""" +508 6 dataset """fb15k237""" +508 6 model """rescal""" +508 6 loss """softplus""" +508 6 regularizer """no""" +508 6 optimizer """adam""" +508 6 training_loop """lcwa""" +508 6 evaluator """rankbased""" +508 7 dataset """fb15k237""" +508 7 model """rescal""" +508 7 loss """softplus""" +508 7 regularizer """no""" +508 7 optimizer """adam""" +508 7 training_loop """lcwa""" +508 7 evaluator """rankbased""" +508 8 dataset """fb15k237""" +508 8 model """rescal""" +508 8 loss """softplus""" +508 8 regularizer """no""" +508 8 optimizer """adam""" +508 8 training_loop """lcwa""" +508 8 evaluator """rankbased""" +508 9 dataset """fb15k237""" +508 9 model """rescal""" +508 9 loss """softplus""" +508 9 regularizer """no""" +508 9 optimizer """adam""" +508 9 training_loop """lcwa""" +508 9 evaluator """rankbased""" +508 10 dataset """fb15k237""" +508 10 model """rescal""" +508 10 loss """softplus""" +508 10 regularizer """no""" +508 10 optimizer """adam""" +508 10 training_loop """lcwa""" +508 10 evaluator """rankbased""" +508 11 dataset """fb15k237""" +508 11 model """rescal""" +508 11 loss """softplus""" +508 11 regularizer """no""" +508 11 optimizer """adam""" +508 11 training_loop """lcwa""" +508 11 evaluator """rankbased""" +508 12 dataset """fb15k237""" +508 12 model """rescal""" +508 12 loss """softplus""" +508 12 regularizer """no""" +508 12 optimizer """adam""" +508 12 training_loop """lcwa""" +508 12 evaluator """rankbased""" +508 13 dataset """fb15k237""" +508 13 model """rescal""" +508 13 loss """softplus""" +508 13 regularizer """no""" +508 13 optimizer """adam""" +508 13 training_loop """lcwa""" +508 13 evaluator """rankbased""" +508 14 dataset """fb15k237""" +508 14 model """rescal""" +508 14 loss """softplus""" +508 14 regularizer """no""" +508 14 optimizer """adam""" +508 14 training_loop """lcwa""" +508 14 evaluator """rankbased""" +508 15 dataset """fb15k237""" +508 15 model """rescal""" +508 15 loss """softplus""" +508 15 regularizer """no""" +508 15 optimizer """adam""" +508 15 training_loop """lcwa""" +508 15 evaluator """rankbased""" +508 16 dataset """fb15k237""" +508 16 model """rescal""" +508 16 loss """softplus""" +508 16 regularizer """no""" +508 16 optimizer """adam""" +508 16 training_loop """lcwa""" +508 16 evaluator """rankbased""" +508 17 dataset """fb15k237""" +508 17 model """rescal""" +508 17 loss """softplus""" +508 17 regularizer """no""" +508 17 optimizer """adam""" +508 17 training_loop """lcwa""" +508 17 evaluator """rankbased""" +508 18 dataset """fb15k237""" +508 18 model """rescal""" +508 18 loss """softplus""" +508 18 regularizer """no""" +508 18 optimizer """adam""" +508 18 training_loop """lcwa""" +508 18 evaluator """rankbased""" +508 19 dataset """fb15k237""" +508 19 model """rescal""" +508 19 loss """softplus""" +508 19 regularizer """no""" +508 19 optimizer """adam""" +508 19 training_loop """lcwa""" +508 19 evaluator """rankbased""" +509 1 model.embedding_dim 2.0 +509 1 optimizer.lr 0.051564911464462716 +509 1 training.batch_size 1.0 +509 1 training.label_smoothing 0.008603272943891297 +509 2 model.embedding_dim 1.0 +509 2 optimizer.lr 0.037535350082619115 +509 2 training.batch_size 2.0 +509 2 training.label_smoothing 0.022955661346031615 +509 3 model.embedding_dim 1.0 +509 3 optimizer.lr 0.0322279180659139 +509 3 training.batch_size 0.0 +509 3 training.label_smoothing 0.11674878471771674 +509 4 model.embedding_dim 1.0 +509 4 optimizer.lr 0.06878635453100596 +509 4 training.batch_size 0.0 +509 4 training.label_smoothing 0.9546589758441413 +509 5 model.embedding_dim 0.0 +509 5 optimizer.lr 0.003598366790247957 +509 5 training.batch_size 0.0 +509 5 training.label_smoothing 0.0028441994847628444 +509 6 model.embedding_dim 0.0 +509 6 optimizer.lr 0.09763175383712731 +509 6 training.batch_size 0.0 +509 6 training.label_smoothing 0.013131457380734873 +509 7 model.embedding_dim 0.0 +509 7 optimizer.lr 0.020918367609585035 +509 7 training.batch_size 0.0 +509 7 training.label_smoothing 0.992998690104497 +509 8 model.embedding_dim 2.0 +509 8 optimizer.lr 0.02030069093805317 +509 8 training.batch_size 1.0 +509 8 training.label_smoothing 0.1572225267013157 +509 9 model.embedding_dim 0.0 +509 9 optimizer.lr 0.021047561531259366 +509 9 training.batch_size 1.0 +509 9 training.label_smoothing 0.37855340038091373 +509 10 model.embedding_dim 2.0 +509 10 optimizer.lr 0.009856510256664794 +509 10 training.batch_size 0.0 +509 10 training.label_smoothing 0.0016948981026622 +509 11 model.embedding_dim 0.0 +509 11 optimizer.lr 0.0015828600072590746 +509 11 training.batch_size 1.0 +509 11 training.label_smoothing 0.44042989787457754 +509 12 model.embedding_dim 0.0 +509 12 optimizer.lr 0.0043384636042170365 +509 12 training.batch_size 2.0 +509 12 training.label_smoothing 0.006893252090483305 +509 13 model.embedding_dim 1.0 +509 13 optimizer.lr 0.022998079702973784 +509 13 training.batch_size 2.0 +509 13 training.label_smoothing 0.012556794529261258 +509 14 model.embedding_dim 2.0 +509 14 optimizer.lr 0.0014101063516283076 +509 14 training.batch_size 2.0 +509 14 training.label_smoothing 0.005606002040482964 +509 15 model.embedding_dim 0.0 +509 15 optimizer.lr 0.0012636594528572776 +509 15 training.batch_size 1.0 +509 15 training.label_smoothing 0.42766611941716054 +509 16 model.embedding_dim 1.0 +509 16 optimizer.lr 0.002090442337084917 +509 16 training.batch_size 2.0 +509 16 training.label_smoothing 0.007472258767610211 +509 17 model.embedding_dim 0.0 +509 17 optimizer.lr 0.07887585724264046 +509 17 training.batch_size 2.0 +509 17 training.label_smoothing 0.004122882180615781 +509 18 model.embedding_dim 1.0 +509 18 optimizer.lr 0.008582900594846093 +509 18 training.batch_size 1.0 +509 18 training.label_smoothing 0.24777112315788458 +509 19 model.embedding_dim 2.0 +509 19 optimizer.lr 0.017248170157217864 +509 19 training.batch_size 2.0 +509 19 training.label_smoothing 0.19071459128734794 +509 20 model.embedding_dim 0.0 +509 20 optimizer.lr 0.010055419782918434 +509 20 training.batch_size 1.0 +509 20 training.label_smoothing 0.018625071225839684 +509 21 model.embedding_dim 0.0 +509 21 optimizer.lr 0.0012741879451087519 +509 21 training.batch_size 2.0 +509 21 training.label_smoothing 0.004045329726921841 +509 22 model.embedding_dim 0.0 +509 22 optimizer.lr 0.0023089384337153253 +509 22 training.batch_size 2.0 +509 22 training.label_smoothing 0.00981511934860684 +509 23 model.embedding_dim 2.0 +509 23 optimizer.lr 0.006168514802717607 +509 23 training.batch_size 1.0 +509 23 training.label_smoothing 0.010600997104165902 +509 24 model.embedding_dim 1.0 +509 24 optimizer.lr 0.09049113538929783 +509 24 training.batch_size 0.0 +509 24 training.label_smoothing 0.42293264037085915 +509 25 model.embedding_dim 2.0 +509 25 optimizer.lr 0.0037566355172549315 +509 25 training.batch_size 0.0 +509 25 training.label_smoothing 0.011169353035398519 +509 26 model.embedding_dim 0.0 +509 26 optimizer.lr 0.003280905197116093 +509 26 training.batch_size 0.0 +509 26 training.label_smoothing 0.34517449944361195 +509 27 model.embedding_dim 0.0 +509 27 optimizer.lr 0.001554732213920331 +509 27 training.batch_size 1.0 +509 27 training.label_smoothing 0.005683925262326561 +509 28 model.embedding_dim 1.0 +509 28 optimizer.lr 0.005136667734572087 +509 28 training.batch_size 0.0 +509 28 training.label_smoothing 0.04459275891078381 +509 29 model.embedding_dim 2.0 +509 29 optimizer.lr 0.007202684559312293 +509 29 training.batch_size 1.0 +509 29 training.label_smoothing 0.03889128095914461 +509 30 model.embedding_dim 0.0 +509 30 optimizer.lr 0.001117912069868748 +509 30 training.batch_size 1.0 +509 30 training.label_smoothing 0.008156321313746544 +509 31 model.embedding_dim 2.0 +509 31 optimizer.lr 0.0026267484740164922 +509 31 training.batch_size 1.0 +509 31 training.label_smoothing 0.4741950192383661 +509 32 model.embedding_dim 2.0 +509 32 optimizer.lr 0.001406740581663974 +509 32 training.batch_size 0.0 +509 32 training.label_smoothing 0.014801745770801435 +509 33 model.embedding_dim 1.0 +509 33 optimizer.lr 0.03433596985572723 +509 33 training.batch_size 1.0 +509 33 training.label_smoothing 0.0064240432324223565 +509 34 model.embedding_dim 2.0 +509 34 optimizer.lr 0.0062717059372979864 +509 34 training.batch_size 0.0 +509 34 training.label_smoothing 0.022692012685169797 +509 35 model.embedding_dim 2.0 +509 35 optimizer.lr 0.0017206804748369242 +509 35 training.batch_size 2.0 +509 35 training.label_smoothing 0.0032199565035126994 +509 36 model.embedding_dim 2.0 +509 36 optimizer.lr 0.0012428054017918004 +509 36 training.batch_size 0.0 +509 36 training.label_smoothing 0.01849649834525364 +509 37 model.embedding_dim 1.0 +509 37 optimizer.lr 0.0881439036535793 +509 37 training.batch_size 2.0 +509 37 training.label_smoothing 0.22228225213831926 +509 38 model.embedding_dim 0.0 +509 38 optimizer.lr 0.004554273536387435 +509 38 training.batch_size 1.0 +509 38 training.label_smoothing 0.0068728824871147525 +509 39 model.embedding_dim 1.0 +509 39 optimizer.lr 0.010658696417728286 +509 39 training.batch_size 0.0 +509 39 training.label_smoothing 0.004237246808376452 +509 40 model.embedding_dim 1.0 +509 40 optimizer.lr 0.04906179148915874 +509 40 training.batch_size 1.0 +509 40 training.label_smoothing 0.4504841928019649 +509 1 dataset """fb15k237""" +509 1 model """rescal""" +509 1 loss """bceaftersigmoid""" +509 1 regularizer """no""" +509 1 optimizer """adam""" +509 1 training_loop """lcwa""" +509 1 evaluator """rankbased""" +509 2 dataset """fb15k237""" +509 2 model """rescal""" +509 2 loss """bceaftersigmoid""" +509 2 regularizer """no""" +509 2 optimizer """adam""" +509 2 training_loop """lcwa""" +509 2 evaluator """rankbased""" +509 3 dataset """fb15k237""" +509 3 model """rescal""" +509 3 loss """bceaftersigmoid""" +509 3 regularizer """no""" +509 3 optimizer """adam""" +509 3 training_loop """lcwa""" +509 3 evaluator """rankbased""" +509 4 dataset """fb15k237""" +509 4 model """rescal""" +509 4 loss """bceaftersigmoid""" +509 4 regularizer """no""" +509 4 optimizer """adam""" +509 4 training_loop """lcwa""" +509 4 evaluator """rankbased""" +509 5 dataset """fb15k237""" +509 5 model """rescal""" +509 5 loss """bceaftersigmoid""" +509 5 regularizer """no""" +509 5 optimizer """adam""" +509 5 training_loop """lcwa""" +509 5 evaluator """rankbased""" +509 6 dataset """fb15k237""" +509 6 model """rescal""" +509 6 loss """bceaftersigmoid""" +509 6 regularizer """no""" +509 6 optimizer """adam""" +509 6 training_loop """lcwa""" +509 6 evaluator """rankbased""" +509 7 dataset """fb15k237""" +509 7 model """rescal""" +509 7 loss """bceaftersigmoid""" +509 7 regularizer """no""" +509 7 optimizer """adam""" +509 7 training_loop """lcwa""" +509 7 evaluator """rankbased""" +509 8 dataset """fb15k237""" +509 8 model """rescal""" +509 8 loss """bceaftersigmoid""" +509 8 regularizer """no""" +509 8 optimizer """adam""" +509 8 training_loop """lcwa""" +509 8 evaluator """rankbased""" +509 9 dataset """fb15k237""" +509 9 model """rescal""" +509 9 loss """bceaftersigmoid""" +509 9 regularizer """no""" +509 9 optimizer """adam""" +509 9 training_loop """lcwa""" +509 9 evaluator """rankbased""" +509 10 dataset """fb15k237""" +509 10 model """rescal""" +509 10 loss """bceaftersigmoid""" +509 10 regularizer """no""" +509 10 optimizer """adam""" +509 10 training_loop """lcwa""" +509 10 evaluator """rankbased""" +509 11 dataset """fb15k237""" +509 11 model """rescal""" +509 11 loss """bceaftersigmoid""" +509 11 regularizer """no""" +509 11 optimizer """adam""" +509 11 training_loop """lcwa""" +509 11 evaluator """rankbased""" +509 12 dataset """fb15k237""" +509 12 model """rescal""" +509 12 loss """bceaftersigmoid""" +509 12 regularizer """no""" +509 12 optimizer """adam""" +509 12 training_loop """lcwa""" +509 12 evaluator """rankbased""" +509 13 dataset """fb15k237""" +509 13 model """rescal""" +509 13 loss """bceaftersigmoid""" +509 13 regularizer """no""" +509 13 optimizer """adam""" +509 13 training_loop """lcwa""" +509 13 evaluator """rankbased""" +509 14 dataset """fb15k237""" +509 14 model """rescal""" +509 14 loss """bceaftersigmoid""" +509 14 regularizer """no""" +509 14 optimizer """adam""" +509 14 training_loop """lcwa""" +509 14 evaluator """rankbased""" +509 15 dataset """fb15k237""" +509 15 model """rescal""" +509 15 loss """bceaftersigmoid""" +509 15 regularizer """no""" +509 15 optimizer """adam""" +509 15 training_loop """lcwa""" +509 15 evaluator """rankbased""" +509 16 dataset """fb15k237""" +509 16 model """rescal""" +509 16 loss """bceaftersigmoid""" +509 16 regularizer """no""" +509 16 optimizer """adam""" +509 16 training_loop """lcwa""" +509 16 evaluator """rankbased""" +509 17 dataset """fb15k237""" +509 17 model """rescal""" +509 17 loss """bceaftersigmoid""" +509 17 regularizer """no""" +509 17 optimizer """adam""" +509 17 training_loop """lcwa""" +509 17 evaluator """rankbased""" +509 18 dataset """fb15k237""" +509 18 model """rescal""" +509 18 loss """bceaftersigmoid""" +509 18 regularizer """no""" +509 18 optimizer """adam""" +509 18 training_loop """lcwa""" +509 18 evaluator """rankbased""" +509 19 dataset """fb15k237""" +509 19 model """rescal""" +509 19 loss """bceaftersigmoid""" +509 19 regularizer """no""" +509 19 optimizer """adam""" +509 19 training_loop """lcwa""" +509 19 evaluator """rankbased""" +509 20 dataset """fb15k237""" +509 20 model """rescal""" +509 20 loss """bceaftersigmoid""" +509 20 regularizer """no""" +509 20 optimizer """adam""" +509 20 training_loop """lcwa""" +509 20 evaluator """rankbased""" +509 21 dataset """fb15k237""" +509 21 model """rescal""" +509 21 loss """bceaftersigmoid""" +509 21 regularizer """no""" +509 21 optimizer """adam""" +509 21 training_loop """lcwa""" +509 21 evaluator """rankbased""" +509 22 dataset """fb15k237""" +509 22 model """rescal""" +509 22 loss """bceaftersigmoid""" +509 22 regularizer """no""" +509 22 optimizer """adam""" +509 22 training_loop """lcwa""" +509 22 evaluator """rankbased""" +509 23 dataset """fb15k237""" +509 23 model """rescal""" +509 23 loss """bceaftersigmoid""" +509 23 regularizer """no""" +509 23 optimizer """adam""" +509 23 training_loop """lcwa""" +509 23 evaluator """rankbased""" +509 24 dataset """fb15k237""" +509 24 model """rescal""" +509 24 loss """bceaftersigmoid""" +509 24 regularizer """no""" +509 24 optimizer """adam""" +509 24 training_loop """lcwa""" +509 24 evaluator """rankbased""" +509 25 dataset """fb15k237""" +509 25 model """rescal""" +509 25 loss """bceaftersigmoid""" +509 25 regularizer """no""" +509 25 optimizer """adam""" +509 25 training_loop """lcwa""" +509 25 evaluator """rankbased""" +509 26 dataset """fb15k237""" +509 26 model """rescal""" +509 26 loss """bceaftersigmoid""" +509 26 regularizer """no""" +509 26 optimizer """adam""" +509 26 training_loop """lcwa""" +509 26 evaluator """rankbased""" +509 27 dataset """fb15k237""" +509 27 model """rescal""" +509 27 loss """bceaftersigmoid""" +509 27 regularizer """no""" +509 27 optimizer """adam""" +509 27 training_loop """lcwa""" +509 27 evaluator """rankbased""" +509 28 dataset """fb15k237""" +509 28 model """rescal""" +509 28 loss """bceaftersigmoid""" +509 28 regularizer """no""" +509 28 optimizer """adam""" +509 28 training_loop """lcwa""" +509 28 evaluator """rankbased""" +509 29 dataset """fb15k237""" +509 29 model """rescal""" +509 29 loss """bceaftersigmoid""" +509 29 regularizer """no""" +509 29 optimizer """adam""" +509 29 training_loop """lcwa""" +509 29 evaluator """rankbased""" +509 30 dataset """fb15k237""" +509 30 model """rescal""" +509 30 loss """bceaftersigmoid""" +509 30 regularizer """no""" +509 30 optimizer """adam""" +509 30 training_loop """lcwa""" +509 30 evaluator """rankbased""" +509 31 dataset """fb15k237""" +509 31 model """rescal""" +509 31 loss """bceaftersigmoid""" +509 31 regularizer """no""" +509 31 optimizer """adam""" +509 31 training_loop """lcwa""" +509 31 evaluator """rankbased""" +509 32 dataset """fb15k237""" +509 32 model """rescal""" +509 32 loss """bceaftersigmoid""" +509 32 regularizer """no""" +509 32 optimizer """adam""" +509 32 training_loop """lcwa""" +509 32 evaluator """rankbased""" +509 33 dataset """fb15k237""" +509 33 model """rescal""" +509 33 loss """bceaftersigmoid""" +509 33 regularizer """no""" +509 33 optimizer """adam""" +509 33 training_loop """lcwa""" +509 33 evaluator """rankbased""" +509 34 dataset """fb15k237""" +509 34 model """rescal""" +509 34 loss """bceaftersigmoid""" +509 34 regularizer """no""" +509 34 optimizer """adam""" +509 34 training_loop """lcwa""" +509 34 evaluator """rankbased""" +509 35 dataset """fb15k237""" +509 35 model """rescal""" +509 35 loss """bceaftersigmoid""" +509 35 regularizer """no""" +509 35 optimizer """adam""" +509 35 training_loop """lcwa""" +509 35 evaluator """rankbased""" +509 36 dataset """fb15k237""" +509 36 model """rescal""" +509 36 loss """bceaftersigmoid""" +509 36 regularizer """no""" +509 36 optimizer """adam""" +509 36 training_loop """lcwa""" +509 36 evaluator """rankbased""" +509 37 dataset """fb15k237""" +509 37 model """rescal""" +509 37 loss """bceaftersigmoid""" +509 37 regularizer """no""" +509 37 optimizer """adam""" +509 37 training_loop """lcwa""" +509 37 evaluator """rankbased""" +509 38 dataset """fb15k237""" +509 38 model """rescal""" +509 38 loss """bceaftersigmoid""" +509 38 regularizer """no""" +509 38 optimizer """adam""" +509 38 training_loop """lcwa""" +509 38 evaluator """rankbased""" +509 39 dataset """fb15k237""" +509 39 model """rescal""" +509 39 loss """bceaftersigmoid""" +509 39 regularizer """no""" +509 39 optimizer """adam""" +509 39 training_loop """lcwa""" +509 39 evaluator """rankbased""" +509 40 dataset """fb15k237""" +509 40 model """rescal""" +509 40 loss """bceaftersigmoid""" +509 40 regularizer """no""" +509 40 optimizer """adam""" +509 40 training_loop """lcwa""" +509 40 evaluator """rankbased""" +510 1 model.embedding_dim 1.0 +510 1 optimizer.lr 0.0020962750863358424 +510 1 training.batch_size 1.0 +510 1 training.label_smoothing 0.01598963296745453 +510 2 model.embedding_dim 0.0 +510 2 optimizer.lr 0.032865774701629165 +510 2 training.batch_size 1.0 +510 2 training.label_smoothing 0.2717052903500444 +510 3 model.embedding_dim 2.0 +510 3 optimizer.lr 0.004153593333821549 +510 3 training.batch_size 0.0 +510 3 training.label_smoothing 0.02662764545832816 +510 4 model.embedding_dim 2.0 +510 4 optimizer.lr 0.019193619969244805 +510 4 training.batch_size 1.0 +510 4 training.label_smoothing 0.5787565131437806 +510 5 model.embedding_dim 1.0 +510 5 optimizer.lr 0.009037441122814389 +510 5 training.batch_size 0.0 +510 5 training.label_smoothing 0.0010227529705917461 +510 6 model.embedding_dim 0.0 +510 6 optimizer.lr 0.04833155746420292 +510 6 training.batch_size 1.0 +510 6 training.label_smoothing 0.0037093553816367557 +510 7 model.embedding_dim 0.0 +510 7 optimizer.lr 0.02025942371492991 +510 7 training.batch_size 2.0 +510 7 training.label_smoothing 0.6941046780729924 +510 8 model.embedding_dim 2.0 +510 8 optimizer.lr 0.055779303692225474 +510 8 training.batch_size 1.0 +510 8 training.label_smoothing 0.06687003397208445 +510 9 model.embedding_dim 1.0 +510 9 optimizer.lr 0.008835755668627288 +510 9 training.batch_size 0.0 +510 9 training.label_smoothing 0.005770202541984686 +510 10 model.embedding_dim 0.0 +510 10 optimizer.lr 0.04554028345239327 +510 10 training.batch_size 1.0 +510 10 training.label_smoothing 0.09945082919379339 +510 11 model.embedding_dim 2.0 +510 11 optimizer.lr 0.008520696768285294 +510 11 training.batch_size 0.0 +510 11 training.label_smoothing 0.017317868384597958 +510 12 model.embedding_dim 1.0 +510 12 optimizer.lr 0.00521317078785544 +510 12 training.batch_size 1.0 +510 12 training.label_smoothing 0.033654729755192465 +510 13 model.embedding_dim 1.0 +510 13 optimizer.lr 0.024438076207262217 +510 13 training.batch_size 2.0 +510 13 training.label_smoothing 0.0011638515428673223 +510 14 model.embedding_dim 1.0 +510 14 optimizer.lr 0.08274828327280549 +510 14 training.batch_size 0.0 +510 14 training.label_smoothing 0.3979457634062097 +510 15 model.embedding_dim 1.0 +510 15 optimizer.lr 0.026816248196244015 +510 15 training.batch_size 0.0 +510 15 training.label_smoothing 0.7155601549619507 +510 16 model.embedding_dim 0.0 +510 16 optimizer.lr 0.003224939820997284 +510 16 training.batch_size 0.0 +510 16 training.label_smoothing 0.23113339425946358 +510 17 model.embedding_dim 1.0 +510 17 optimizer.lr 0.010862595623168316 +510 17 training.batch_size 0.0 +510 17 training.label_smoothing 0.002587801857011834 +510 18 model.embedding_dim 1.0 +510 18 optimizer.lr 0.023235622674079172 +510 18 training.batch_size 2.0 +510 18 training.label_smoothing 0.0041527694248888435 +510 19 model.embedding_dim 1.0 +510 19 optimizer.lr 0.002197385062793975 +510 19 training.batch_size 1.0 +510 19 training.label_smoothing 0.06966779060775102 +510 20 model.embedding_dim 1.0 +510 20 optimizer.lr 0.0032146144583678326 +510 20 training.batch_size 1.0 +510 20 training.label_smoothing 0.05370982862056819 +510 21 model.embedding_dim 0.0 +510 21 optimizer.lr 0.0028615831035070814 +510 21 training.batch_size 0.0 +510 21 training.label_smoothing 0.04160107803587902 +510 22 model.embedding_dim 2.0 +510 22 optimizer.lr 0.008378439716748254 +510 22 training.batch_size 1.0 +510 22 training.label_smoothing 0.35250818500023473 +510 23 model.embedding_dim 0.0 +510 23 optimizer.lr 0.03989724404379235 +510 23 training.batch_size 2.0 +510 23 training.label_smoothing 0.02316119193449329 +510 24 model.embedding_dim 0.0 +510 24 optimizer.lr 0.027840492530853127 +510 24 training.batch_size 1.0 +510 24 training.label_smoothing 0.0024658339665082282 +510 25 model.embedding_dim 2.0 +510 25 optimizer.lr 0.03293938010209219 +510 25 training.batch_size 2.0 +510 25 training.label_smoothing 0.0027536121848196634 +510 26 model.embedding_dim 1.0 +510 26 optimizer.lr 0.002152348912112037 +510 26 training.batch_size 2.0 +510 26 training.label_smoothing 0.008434679379511749 +510 27 model.embedding_dim 1.0 +510 27 optimizer.lr 0.029888336245622923 +510 27 training.batch_size 0.0 +510 27 training.label_smoothing 0.11549243111052346 +510 28 model.embedding_dim 1.0 +510 28 optimizer.lr 0.011377437085731105 +510 28 training.batch_size 1.0 +510 28 training.label_smoothing 0.0015011242470326436 +510 29 model.embedding_dim 1.0 +510 29 optimizer.lr 0.0017805541439170043 +510 29 training.batch_size 1.0 +510 29 training.label_smoothing 0.06709121694827516 +510 30 model.embedding_dim 2.0 +510 30 optimizer.lr 0.06228127722729227 +510 30 training.batch_size 2.0 +510 30 training.label_smoothing 0.01249410212109769 +510 31 model.embedding_dim 2.0 +510 31 optimizer.lr 0.0016355278894896151 +510 31 training.batch_size 1.0 +510 31 training.label_smoothing 0.5363360580077419 +510 32 model.embedding_dim 0.0 +510 32 optimizer.lr 0.05160780278387329 +510 32 training.batch_size 2.0 +510 32 training.label_smoothing 0.21656775156722577 +510 33 model.embedding_dim 1.0 +510 33 optimizer.lr 0.0038291251002019223 +510 33 training.batch_size 0.0 +510 33 training.label_smoothing 0.013173739487299576 +510 34 model.embedding_dim 0.0 +510 34 optimizer.lr 0.0312000802745569 +510 34 training.batch_size 0.0 +510 34 training.label_smoothing 0.0032280220065814126 +510 35 model.embedding_dim 0.0 +510 35 optimizer.lr 0.039291822272719434 +510 35 training.batch_size 0.0 +510 35 training.label_smoothing 0.0917368267038751 +510 36 model.embedding_dim 1.0 +510 36 optimizer.lr 0.035788842819936606 +510 36 training.batch_size 1.0 +510 36 training.label_smoothing 0.4892776134855516 +510 37 model.embedding_dim 1.0 +510 37 optimizer.lr 0.004321067558127995 +510 37 training.batch_size 0.0 +510 37 training.label_smoothing 0.12837973166600827 +510 1 dataset """fb15k237""" +510 1 model """rescal""" +510 1 loss """softplus""" +510 1 regularizer """no""" +510 1 optimizer """adam""" +510 1 training_loop """lcwa""" +510 1 evaluator """rankbased""" +510 2 dataset """fb15k237""" +510 2 model """rescal""" +510 2 loss """softplus""" +510 2 regularizer """no""" +510 2 optimizer """adam""" +510 2 training_loop """lcwa""" +510 2 evaluator """rankbased""" +510 3 dataset """fb15k237""" +510 3 model """rescal""" +510 3 loss """softplus""" +510 3 regularizer """no""" +510 3 optimizer """adam""" +510 3 training_loop """lcwa""" +510 3 evaluator """rankbased""" +510 4 dataset """fb15k237""" +510 4 model """rescal""" +510 4 loss """softplus""" +510 4 regularizer """no""" +510 4 optimizer """adam""" +510 4 training_loop """lcwa""" +510 4 evaluator """rankbased""" +510 5 dataset """fb15k237""" +510 5 model """rescal""" +510 5 loss """softplus""" +510 5 regularizer """no""" +510 5 optimizer """adam""" +510 5 training_loop """lcwa""" +510 5 evaluator """rankbased""" +510 6 dataset """fb15k237""" +510 6 model """rescal""" +510 6 loss """softplus""" +510 6 regularizer """no""" +510 6 optimizer """adam""" +510 6 training_loop """lcwa""" +510 6 evaluator """rankbased""" +510 7 dataset """fb15k237""" +510 7 model """rescal""" +510 7 loss """softplus""" +510 7 regularizer """no""" +510 7 optimizer """adam""" +510 7 training_loop """lcwa""" +510 7 evaluator """rankbased""" +510 8 dataset """fb15k237""" +510 8 model """rescal""" +510 8 loss """softplus""" +510 8 regularizer """no""" +510 8 optimizer """adam""" +510 8 training_loop """lcwa""" +510 8 evaluator """rankbased""" +510 9 dataset """fb15k237""" +510 9 model """rescal""" +510 9 loss """softplus""" +510 9 regularizer """no""" +510 9 optimizer """adam""" +510 9 training_loop """lcwa""" +510 9 evaluator """rankbased""" +510 10 dataset """fb15k237""" +510 10 model """rescal""" +510 10 loss """softplus""" +510 10 regularizer """no""" +510 10 optimizer """adam""" +510 10 training_loop """lcwa""" +510 10 evaluator """rankbased""" +510 11 dataset """fb15k237""" +510 11 model """rescal""" +510 11 loss """softplus""" +510 11 regularizer """no""" +510 11 optimizer """adam""" +510 11 training_loop """lcwa""" +510 11 evaluator """rankbased""" +510 12 dataset """fb15k237""" +510 12 model """rescal""" +510 12 loss """softplus""" +510 12 regularizer """no""" +510 12 optimizer """adam""" +510 12 training_loop """lcwa""" +510 12 evaluator """rankbased""" +510 13 dataset """fb15k237""" +510 13 model """rescal""" +510 13 loss """softplus""" +510 13 regularizer """no""" +510 13 optimizer """adam""" +510 13 training_loop """lcwa""" +510 13 evaluator """rankbased""" +510 14 dataset """fb15k237""" +510 14 model """rescal""" +510 14 loss """softplus""" +510 14 regularizer """no""" +510 14 optimizer """adam""" +510 14 training_loop """lcwa""" +510 14 evaluator """rankbased""" +510 15 dataset """fb15k237""" +510 15 model """rescal""" +510 15 loss """softplus""" +510 15 regularizer """no""" +510 15 optimizer """adam""" +510 15 training_loop """lcwa""" +510 15 evaluator """rankbased""" +510 16 dataset """fb15k237""" +510 16 model """rescal""" +510 16 loss """softplus""" +510 16 regularizer """no""" +510 16 optimizer """adam""" +510 16 training_loop """lcwa""" +510 16 evaluator """rankbased""" +510 17 dataset """fb15k237""" +510 17 model """rescal""" +510 17 loss """softplus""" +510 17 regularizer """no""" +510 17 optimizer """adam""" +510 17 training_loop """lcwa""" +510 17 evaluator """rankbased""" +510 18 dataset """fb15k237""" +510 18 model """rescal""" +510 18 loss """softplus""" +510 18 regularizer """no""" +510 18 optimizer """adam""" +510 18 training_loop """lcwa""" +510 18 evaluator """rankbased""" +510 19 dataset """fb15k237""" +510 19 model """rescal""" +510 19 loss """softplus""" +510 19 regularizer """no""" +510 19 optimizer """adam""" +510 19 training_loop """lcwa""" +510 19 evaluator """rankbased""" +510 20 dataset """fb15k237""" +510 20 model """rescal""" +510 20 loss """softplus""" +510 20 regularizer """no""" +510 20 optimizer """adam""" +510 20 training_loop """lcwa""" +510 20 evaluator """rankbased""" +510 21 dataset """fb15k237""" +510 21 model """rescal""" +510 21 loss """softplus""" +510 21 regularizer """no""" +510 21 optimizer """adam""" +510 21 training_loop """lcwa""" +510 21 evaluator """rankbased""" +510 22 dataset """fb15k237""" +510 22 model """rescal""" +510 22 loss """softplus""" +510 22 regularizer """no""" +510 22 optimizer """adam""" +510 22 training_loop """lcwa""" +510 22 evaluator """rankbased""" +510 23 dataset """fb15k237""" +510 23 model """rescal""" +510 23 loss """softplus""" +510 23 regularizer """no""" +510 23 optimizer """adam""" +510 23 training_loop """lcwa""" +510 23 evaluator """rankbased""" +510 24 dataset """fb15k237""" +510 24 model """rescal""" +510 24 loss """softplus""" +510 24 regularizer """no""" +510 24 optimizer """adam""" +510 24 training_loop """lcwa""" +510 24 evaluator """rankbased""" +510 25 dataset """fb15k237""" +510 25 model """rescal""" +510 25 loss """softplus""" +510 25 regularizer """no""" +510 25 optimizer """adam""" +510 25 training_loop """lcwa""" +510 25 evaluator """rankbased""" +510 26 dataset """fb15k237""" +510 26 model """rescal""" +510 26 loss """softplus""" +510 26 regularizer """no""" +510 26 optimizer """adam""" +510 26 training_loop """lcwa""" +510 26 evaluator """rankbased""" +510 27 dataset """fb15k237""" +510 27 model """rescal""" +510 27 loss """softplus""" +510 27 regularizer """no""" +510 27 optimizer """adam""" +510 27 training_loop """lcwa""" +510 27 evaluator """rankbased""" +510 28 dataset """fb15k237""" +510 28 model """rescal""" +510 28 loss """softplus""" +510 28 regularizer """no""" +510 28 optimizer """adam""" +510 28 training_loop """lcwa""" +510 28 evaluator """rankbased""" +510 29 dataset """fb15k237""" +510 29 model """rescal""" +510 29 loss """softplus""" +510 29 regularizer """no""" +510 29 optimizer """adam""" +510 29 training_loop """lcwa""" +510 29 evaluator """rankbased""" +510 30 dataset """fb15k237""" +510 30 model """rescal""" +510 30 loss """softplus""" +510 30 regularizer """no""" +510 30 optimizer """adam""" +510 30 training_loop """lcwa""" +510 30 evaluator """rankbased""" +510 31 dataset """fb15k237""" +510 31 model """rescal""" +510 31 loss """softplus""" +510 31 regularizer """no""" +510 31 optimizer """adam""" +510 31 training_loop """lcwa""" +510 31 evaluator """rankbased""" +510 32 dataset """fb15k237""" +510 32 model """rescal""" +510 32 loss """softplus""" +510 32 regularizer """no""" +510 32 optimizer """adam""" +510 32 training_loop """lcwa""" +510 32 evaluator """rankbased""" +510 33 dataset """fb15k237""" +510 33 model """rescal""" +510 33 loss """softplus""" +510 33 regularizer """no""" +510 33 optimizer """adam""" +510 33 training_loop """lcwa""" +510 33 evaluator """rankbased""" +510 34 dataset """fb15k237""" +510 34 model """rescal""" +510 34 loss """softplus""" +510 34 regularizer """no""" +510 34 optimizer """adam""" +510 34 training_loop """lcwa""" +510 34 evaluator """rankbased""" +510 35 dataset """fb15k237""" +510 35 model """rescal""" +510 35 loss """softplus""" +510 35 regularizer """no""" +510 35 optimizer """adam""" +510 35 training_loop """lcwa""" +510 35 evaluator """rankbased""" +510 36 dataset """fb15k237""" +510 36 model """rescal""" +510 36 loss """softplus""" +510 36 regularizer """no""" +510 36 optimizer """adam""" +510 36 training_loop """lcwa""" +510 36 evaluator """rankbased""" +510 37 dataset """fb15k237""" +510 37 model """rescal""" +510 37 loss """softplus""" +510 37 regularizer """no""" +510 37 optimizer """adam""" +510 37 training_loop """lcwa""" +510 37 evaluator """rankbased""" +511 1 model.embedding_dim 2.0 +511 1 optimizer.lr 0.0017010383005289112 +511 1 negative_sampler.num_negs_per_pos 84.0 +511 1 training.batch_size 2.0 +511 2 model.embedding_dim 2.0 +511 2 optimizer.lr 0.010397869407424059 +511 2 negative_sampler.num_negs_per_pos 90.0 +511 2 training.batch_size 2.0 +511 3 model.embedding_dim 2.0 +511 3 optimizer.lr 0.027158555291100242 +511 3 negative_sampler.num_negs_per_pos 62.0 +511 3 training.batch_size 0.0 +511 4 model.embedding_dim 0.0 +511 4 optimizer.lr 0.010006317067606178 +511 4 negative_sampler.num_negs_per_pos 18.0 +511 4 training.batch_size 2.0 +511 5 model.embedding_dim 2.0 +511 5 optimizer.lr 0.01003278887178206 +511 5 negative_sampler.num_negs_per_pos 69.0 +511 5 training.batch_size 0.0 +511 6 model.embedding_dim 0.0 +511 6 optimizer.lr 0.009396442272205145 +511 6 negative_sampler.num_negs_per_pos 12.0 +511 6 training.batch_size 2.0 +511 7 model.embedding_dim 2.0 +511 7 optimizer.lr 0.04040800856568493 +511 7 negative_sampler.num_negs_per_pos 54.0 +511 7 training.batch_size 2.0 +511 8 model.embedding_dim 2.0 +511 8 optimizer.lr 0.0011431173475990773 +511 8 negative_sampler.num_negs_per_pos 54.0 +511 8 training.batch_size 2.0 +511 9 model.embedding_dim 0.0 +511 9 optimizer.lr 0.0020303735446217507 +511 9 negative_sampler.num_negs_per_pos 25.0 +511 9 training.batch_size 2.0 +511 10 model.embedding_dim 1.0 +511 10 optimizer.lr 0.005398810673618992 +511 10 negative_sampler.num_negs_per_pos 8.0 +511 10 training.batch_size 2.0 +511 11 model.embedding_dim 0.0 +511 11 optimizer.lr 0.0022360553683963805 +511 11 negative_sampler.num_negs_per_pos 89.0 +511 11 training.batch_size 2.0 +511 1 dataset """fb15k237""" +511 1 model """rescal""" +511 1 loss """bceaftersigmoid""" +511 1 regularizer """no""" +511 1 optimizer """adam""" +511 1 training_loop """owa""" +511 1 negative_sampler """basic""" +511 1 evaluator """rankbased""" +511 2 dataset """fb15k237""" +511 2 model """rescal""" +511 2 loss """bceaftersigmoid""" +511 2 regularizer """no""" +511 2 optimizer """adam""" +511 2 training_loop """owa""" +511 2 negative_sampler """basic""" +511 2 evaluator """rankbased""" +511 3 dataset """fb15k237""" +511 3 model """rescal""" +511 3 loss """bceaftersigmoid""" +511 3 regularizer """no""" +511 3 optimizer """adam""" +511 3 training_loop """owa""" +511 3 negative_sampler """basic""" +511 3 evaluator """rankbased""" +511 4 dataset """fb15k237""" +511 4 model """rescal""" +511 4 loss """bceaftersigmoid""" +511 4 regularizer """no""" +511 4 optimizer """adam""" +511 4 training_loop """owa""" +511 4 negative_sampler """basic""" +511 4 evaluator """rankbased""" +511 5 dataset """fb15k237""" +511 5 model """rescal""" +511 5 loss """bceaftersigmoid""" +511 5 regularizer """no""" +511 5 optimizer """adam""" +511 5 training_loop """owa""" +511 5 negative_sampler """basic""" +511 5 evaluator """rankbased""" +511 6 dataset """fb15k237""" +511 6 model """rescal""" +511 6 loss """bceaftersigmoid""" +511 6 regularizer """no""" +511 6 optimizer """adam""" +511 6 training_loop """owa""" +511 6 negative_sampler """basic""" +511 6 evaluator """rankbased""" +511 7 dataset """fb15k237""" +511 7 model """rescal""" +511 7 loss """bceaftersigmoid""" +511 7 regularizer """no""" +511 7 optimizer """adam""" +511 7 training_loop """owa""" +511 7 negative_sampler """basic""" +511 7 evaluator """rankbased""" +511 8 dataset """fb15k237""" +511 8 model """rescal""" +511 8 loss """bceaftersigmoid""" +511 8 regularizer """no""" +511 8 optimizer """adam""" +511 8 training_loop """owa""" +511 8 negative_sampler """basic""" +511 8 evaluator """rankbased""" +511 9 dataset """fb15k237""" +511 9 model """rescal""" +511 9 loss """bceaftersigmoid""" +511 9 regularizer """no""" +511 9 optimizer """adam""" +511 9 training_loop """owa""" +511 9 negative_sampler """basic""" +511 9 evaluator """rankbased""" +511 10 dataset """fb15k237""" +511 10 model """rescal""" +511 10 loss """bceaftersigmoid""" +511 10 regularizer """no""" +511 10 optimizer """adam""" +511 10 training_loop """owa""" +511 10 negative_sampler """basic""" +511 10 evaluator """rankbased""" +511 11 dataset """fb15k237""" +511 11 model """rescal""" +511 11 loss """bceaftersigmoid""" +511 11 regularizer """no""" +511 11 optimizer """adam""" +511 11 training_loop """owa""" +511 11 negative_sampler """basic""" +511 11 evaluator """rankbased""" +512 1 model.embedding_dim 0.0 +512 1 optimizer.lr 0.0010575312625667273 +512 1 negative_sampler.num_negs_per_pos 37.0 +512 1 training.batch_size 0.0 +512 2 model.embedding_dim 1.0 +512 2 optimizer.lr 0.0022923086292470614 +512 2 negative_sampler.num_negs_per_pos 80.0 +512 2 training.batch_size 1.0 +512 3 model.embedding_dim 0.0 +512 3 optimizer.lr 0.03396060074560811 +512 3 negative_sampler.num_negs_per_pos 33.0 +512 3 training.batch_size 1.0 +512 4 model.embedding_dim 2.0 +512 4 optimizer.lr 0.04154984220134498 +512 4 negative_sampler.num_negs_per_pos 1.0 +512 4 training.batch_size 0.0 +512 5 model.embedding_dim 2.0 +512 5 optimizer.lr 0.005947463637121174 +512 5 negative_sampler.num_negs_per_pos 81.0 +512 5 training.batch_size 0.0 +512 6 model.embedding_dim 2.0 +512 6 optimizer.lr 0.001111775947151172 +512 6 negative_sampler.num_negs_per_pos 80.0 +512 6 training.batch_size 0.0 +512 1 dataset """fb15k237""" +512 1 model """rescal""" +512 1 loss """softplus""" +512 1 regularizer """no""" +512 1 optimizer """adam""" +512 1 training_loop """owa""" +512 1 negative_sampler """basic""" +512 1 evaluator """rankbased""" +512 2 dataset """fb15k237""" +512 2 model """rescal""" +512 2 loss """softplus""" +512 2 regularizer """no""" +512 2 optimizer """adam""" +512 2 training_loop """owa""" +512 2 negative_sampler """basic""" +512 2 evaluator """rankbased""" +512 3 dataset """fb15k237""" +512 3 model """rescal""" +512 3 loss """softplus""" +512 3 regularizer """no""" +512 3 optimizer """adam""" +512 3 training_loop """owa""" +512 3 negative_sampler """basic""" +512 3 evaluator """rankbased""" +512 4 dataset """fb15k237""" +512 4 model """rescal""" +512 4 loss """softplus""" +512 4 regularizer """no""" +512 4 optimizer """adam""" +512 4 training_loop """owa""" +512 4 negative_sampler """basic""" +512 4 evaluator """rankbased""" +512 5 dataset """fb15k237""" +512 5 model """rescal""" +512 5 loss """softplus""" +512 5 regularizer """no""" +512 5 optimizer """adam""" +512 5 training_loop """owa""" +512 5 negative_sampler """basic""" +512 5 evaluator """rankbased""" +512 6 dataset """fb15k237""" +512 6 model """rescal""" +512 6 loss """softplus""" +512 6 regularizer """no""" +512 6 optimizer """adam""" +512 6 training_loop """owa""" +512 6 negative_sampler """basic""" +512 6 evaluator """rankbased""" +513 1 model.embedding_dim 1.0 +513 1 optimizer.lr 0.001221261050686078 +513 1 negative_sampler.num_negs_per_pos 7.0 +513 1 training.batch_size 2.0 +513 2 model.embedding_dim 2.0 +513 2 optimizer.lr 0.021692844559635473 +513 2 negative_sampler.num_negs_per_pos 45.0 +513 2 training.batch_size 1.0 +513 3 model.embedding_dim 0.0 +513 3 optimizer.lr 0.008010195335260694 +513 3 negative_sampler.num_negs_per_pos 11.0 +513 3 training.batch_size 1.0 +513 4 model.embedding_dim 2.0 +513 4 optimizer.lr 0.004768883938724851 +513 4 negative_sampler.num_negs_per_pos 71.0 +513 4 training.batch_size 0.0 +513 5 model.embedding_dim 2.0 +513 5 optimizer.lr 0.0023000419438278658 +513 5 negative_sampler.num_negs_per_pos 29.0 +513 5 training.batch_size 1.0 +513 6 model.embedding_dim 0.0 +513 6 optimizer.lr 0.04984323507202076 +513 6 negative_sampler.num_negs_per_pos 31.0 +513 6 training.batch_size 1.0 +513 7 model.embedding_dim 0.0 +513 7 optimizer.lr 0.01722238462901614 +513 7 negative_sampler.num_negs_per_pos 75.0 +513 7 training.batch_size 0.0 +513 8 model.embedding_dim 2.0 +513 8 optimizer.lr 0.0024057428923927645 +513 8 negative_sampler.num_negs_per_pos 94.0 +513 8 training.batch_size 0.0 +513 9 model.embedding_dim 2.0 +513 9 optimizer.lr 0.07703515720532846 +513 9 negative_sampler.num_negs_per_pos 68.0 +513 9 training.batch_size 0.0 +513 10 model.embedding_dim 1.0 +513 10 optimizer.lr 0.06708629922003322 +513 10 negative_sampler.num_negs_per_pos 79.0 +513 10 training.batch_size 2.0 +513 11 model.embedding_dim 1.0 +513 11 optimizer.lr 0.02392941653663121 +513 11 negative_sampler.num_negs_per_pos 59.0 +513 11 training.batch_size 2.0 +513 12 model.embedding_dim 0.0 +513 12 optimizer.lr 0.0019434861719542856 +513 12 negative_sampler.num_negs_per_pos 46.0 +513 12 training.batch_size 1.0 +513 13 model.embedding_dim 1.0 +513 13 optimizer.lr 0.004950897006843956 +513 13 negative_sampler.num_negs_per_pos 36.0 +513 13 training.batch_size 0.0 +513 14 model.embedding_dim 1.0 +513 14 optimizer.lr 0.020816003468453866 +513 14 negative_sampler.num_negs_per_pos 11.0 +513 14 training.batch_size 1.0 +513 15 model.embedding_dim 1.0 +513 15 optimizer.lr 0.006053733024463235 +513 15 negative_sampler.num_negs_per_pos 74.0 +513 15 training.batch_size 2.0 +513 16 model.embedding_dim 1.0 +513 16 optimizer.lr 0.001766446780511333 +513 16 negative_sampler.num_negs_per_pos 20.0 +513 16 training.batch_size 1.0 +513 17 model.embedding_dim 1.0 +513 17 optimizer.lr 0.017518831502558587 +513 17 negative_sampler.num_negs_per_pos 48.0 +513 17 training.batch_size 1.0 +513 18 model.embedding_dim 2.0 +513 18 optimizer.lr 0.0023457275945171804 +513 18 negative_sampler.num_negs_per_pos 20.0 +513 18 training.batch_size 1.0 +513 19 model.embedding_dim 0.0 +513 19 optimizer.lr 0.04942952550108293 +513 19 negative_sampler.num_negs_per_pos 34.0 +513 19 training.batch_size 1.0 +513 20 model.embedding_dim 1.0 +513 20 optimizer.lr 0.0027247544641802886 +513 20 negative_sampler.num_negs_per_pos 63.0 +513 20 training.batch_size 0.0 +513 21 model.embedding_dim 2.0 +513 21 optimizer.lr 0.008442928797703968 +513 21 negative_sampler.num_negs_per_pos 54.0 +513 21 training.batch_size 2.0 +513 22 model.embedding_dim 1.0 +513 22 optimizer.lr 0.001024433384743088 +513 22 negative_sampler.num_negs_per_pos 5.0 +513 22 training.batch_size 2.0 +513 23 model.embedding_dim 0.0 +513 23 optimizer.lr 0.07469098506704984 +513 23 negative_sampler.num_negs_per_pos 0.0 +513 23 training.batch_size 1.0 +513 24 model.embedding_dim 0.0 +513 24 optimizer.lr 0.04959428144922129 +513 24 negative_sampler.num_negs_per_pos 74.0 +513 24 training.batch_size 2.0 +513 25 model.embedding_dim 0.0 +513 25 optimizer.lr 0.02110457237113236 +513 25 negative_sampler.num_negs_per_pos 83.0 +513 25 training.batch_size 0.0 +513 26 model.embedding_dim 1.0 +513 26 optimizer.lr 0.021563234278825512 +513 26 negative_sampler.num_negs_per_pos 96.0 +513 26 training.batch_size 1.0 +513 27 model.embedding_dim 2.0 +513 27 optimizer.lr 0.022544412321451924 +513 27 negative_sampler.num_negs_per_pos 69.0 +513 27 training.batch_size 2.0 +513 28 model.embedding_dim 1.0 +513 28 optimizer.lr 0.0015515016984700787 +513 28 negative_sampler.num_negs_per_pos 58.0 +513 28 training.batch_size 2.0 +513 29 model.embedding_dim 1.0 +513 29 optimizer.lr 0.0014323481773880426 +513 29 negative_sampler.num_negs_per_pos 93.0 +513 29 training.batch_size 2.0 +513 1 dataset """fb15k237""" +513 1 model """rescal""" +513 1 loss """bceaftersigmoid""" +513 1 regularizer """no""" +513 1 optimizer """adam""" +513 1 training_loop """owa""" +513 1 negative_sampler """basic""" +513 1 evaluator """rankbased""" +513 2 dataset """fb15k237""" +513 2 model """rescal""" +513 2 loss """bceaftersigmoid""" +513 2 regularizer """no""" +513 2 optimizer """adam""" +513 2 training_loop """owa""" +513 2 negative_sampler """basic""" +513 2 evaluator """rankbased""" +513 3 dataset """fb15k237""" +513 3 model """rescal""" +513 3 loss """bceaftersigmoid""" +513 3 regularizer """no""" +513 3 optimizer """adam""" +513 3 training_loop """owa""" +513 3 negative_sampler """basic""" +513 3 evaluator """rankbased""" +513 4 dataset """fb15k237""" +513 4 model """rescal""" +513 4 loss """bceaftersigmoid""" +513 4 regularizer """no""" +513 4 optimizer """adam""" +513 4 training_loop """owa""" +513 4 negative_sampler """basic""" +513 4 evaluator """rankbased""" +513 5 dataset """fb15k237""" +513 5 model """rescal""" +513 5 loss """bceaftersigmoid""" +513 5 regularizer """no""" +513 5 optimizer """adam""" +513 5 training_loop """owa""" +513 5 negative_sampler """basic""" +513 5 evaluator """rankbased""" +513 6 dataset """fb15k237""" +513 6 model """rescal""" +513 6 loss """bceaftersigmoid""" +513 6 regularizer """no""" +513 6 optimizer """adam""" +513 6 training_loop """owa""" +513 6 negative_sampler """basic""" +513 6 evaluator """rankbased""" +513 7 dataset """fb15k237""" +513 7 model """rescal""" +513 7 loss """bceaftersigmoid""" +513 7 regularizer """no""" +513 7 optimizer """adam""" +513 7 training_loop """owa""" +513 7 negative_sampler """basic""" +513 7 evaluator """rankbased""" +513 8 dataset """fb15k237""" +513 8 model """rescal""" +513 8 loss """bceaftersigmoid""" +513 8 regularizer """no""" +513 8 optimizer """adam""" +513 8 training_loop """owa""" +513 8 negative_sampler """basic""" +513 8 evaluator """rankbased""" +513 9 dataset """fb15k237""" +513 9 model """rescal""" +513 9 loss """bceaftersigmoid""" +513 9 regularizer """no""" +513 9 optimizer """adam""" +513 9 training_loop """owa""" +513 9 negative_sampler """basic""" +513 9 evaluator """rankbased""" +513 10 dataset """fb15k237""" +513 10 model """rescal""" +513 10 loss """bceaftersigmoid""" +513 10 regularizer """no""" +513 10 optimizer """adam""" +513 10 training_loop """owa""" +513 10 negative_sampler """basic""" +513 10 evaluator """rankbased""" +513 11 dataset """fb15k237""" +513 11 model """rescal""" +513 11 loss """bceaftersigmoid""" +513 11 regularizer """no""" +513 11 optimizer """adam""" +513 11 training_loop """owa""" +513 11 negative_sampler """basic""" +513 11 evaluator """rankbased""" +513 12 dataset """fb15k237""" +513 12 model """rescal""" +513 12 loss """bceaftersigmoid""" +513 12 regularizer """no""" +513 12 optimizer """adam""" +513 12 training_loop """owa""" +513 12 negative_sampler """basic""" +513 12 evaluator """rankbased""" +513 13 dataset """fb15k237""" +513 13 model """rescal""" +513 13 loss """bceaftersigmoid""" +513 13 regularizer """no""" +513 13 optimizer """adam""" +513 13 training_loop """owa""" +513 13 negative_sampler """basic""" +513 13 evaluator """rankbased""" +513 14 dataset """fb15k237""" +513 14 model """rescal""" +513 14 loss """bceaftersigmoid""" +513 14 regularizer """no""" +513 14 optimizer """adam""" +513 14 training_loop """owa""" +513 14 negative_sampler """basic""" +513 14 evaluator """rankbased""" +513 15 dataset """fb15k237""" +513 15 model """rescal""" +513 15 loss """bceaftersigmoid""" +513 15 regularizer """no""" +513 15 optimizer """adam""" +513 15 training_loop """owa""" +513 15 negative_sampler """basic""" +513 15 evaluator """rankbased""" +513 16 dataset """fb15k237""" +513 16 model """rescal""" +513 16 loss """bceaftersigmoid""" +513 16 regularizer """no""" +513 16 optimizer """adam""" +513 16 training_loop """owa""" +513 16 negative_sampler """basic""" +513 16 evaluator """rankbased""" +513 17 dataset """fb15k237""" +513 17 model """rescal""" +513 17 loss """bceaftersigmoid""" +513 17 regularizer """no""" +513 17 optimizer """adam""" +513 17 training_loop """owa""" +513 17 negative_sampler """basic""" +513 17 evaluator """rankbased""" +513 18 dataset """fb15k237""" +513 18 model """rescal""" +513 18 loss """bceaftersigmoid""" +513 18 regularizer """no""" +513 18 optimizer """adam""" +513 18 training_loop """owa""" +513 18 negative_sampler """basic""" +513 18 evaluator """rankbased""" +513 19 dataset """fb15k237""" +513 19 model """rescal""" +513 19 loss """bceaftersigmoid""" +513 19 regularizer """no""" +513 19 optimizer """adam""" +513 19 training_loop """owa""" +513 19 negative_sampler """basic""" +513 19 evaluator """rankbased""" +513 20 dataset """fb15k237""" +513 20 model """rescal""" +513 20 loss """bceaftersigmoid""" +513 20 regularizer """no""" +513 20 optimizer """adam""" +513 20 training_loop """owa""" +513 20 negative_sampler """basic""" +513 20 evaluator """rankbased""" +513 21 dataset """fb15k237""" +513 21 model """rescal""" +513 21 loss """bceaftersigmoid""" +513 21 regularizer """no""" +513 21 optimizer """adam""" +513 21 training_loop """owa""" +513 21 negative_sampler """basic""" +513 21 evaluator """rankbased""" +513 22 dataset """fb15k237""" +513 22 model """rescal""" +513 22 loss """bceaftersigmoid""" +513 22 regularizer """no""" +513 22 optimizer """adam""" +513 22 training_loop """owa""" +513 22 negative_sampler """basic""" +513 22 evaluator """rankbased""" +513 23 dataset """fb15k237""" +513 23 model """rescal""" +513 23 loss """bceaftersigmoid""" +513 23 regularizer """no""" +513 23 optimizer """adam""" +513 23 training_loop """owa""" +513 23 negative_sampler """basic""" +513 23 evaluator """rankbased""" +513 24 dataset """fb15k237""" +513 24 model """rescal""" +513 24 loss """bceaftersigmoid""" +513 24 regularizer """no""" +513 24 optimizer """adam""" +513 24 training_loop """owa""" +513 24 negative_sampler """basic""" +513 24 evaluator """rankbased""" +513 25 dataset """fb15k237""" +513 25 model """rescal""" +513 25 loss """bceaftersigmoid""" +513 25 regularizer """no""" +513 25 optimizer """adam""" +513 25 training_loop """owa""" +513 25 negative_sampler """basic""" +513 25 evaluator """rankbased""" +513 26 dataset """fb15k237""" +513 26 model """rescal""" +513 26 loss """bceaftersigmoid""" +513 26 regularizer """no""" +513 26 optimizer """adam""" +513 26 training_loop """owa""" +513 26 negative_sampler """basic""" +513 26 evaluator """rankbased""" +513 27 dataset """fb15k237""" +513 27 model """rescal""" +513 27 loss """bceaftersigmoid""" +513 27 regularizer """no""" +513 27 optimizer """adam""" +513 27 training_loop """owa""" +513 27 negative_sampler """basic""" +513 27 evaluator """rankbased""" +513 28 dataset """fb15k237""" +513 28 model """rescal""" +513 28 loss """bceaftersigmoid""" +513 28 regularizer """no""" +513 28 optimizer """adam""" +513 28 training_loop """owa""" +513 28 negative_sampler """basic""" +513 28 evaluator """rankbased""" +513 29 dataset """fb15k237""" +513 29 model """rescal""" +513 29 loss """bceaftersigmoid""" +513 29 regularizer """no""" +513 29 optimizer """adam""" +513 29 training_loop """owa""" +513 29 negative_sampler """basic""" +513 29 evaluator """rankbased""" +514 1 model.embedding_dim 1.0 +514 1 optimizer.lr 0.05603711404334482 +514 1 negative_sampler.num_negs_per_pos 16.0 +514 1 training.batch_size 2.0 +514 2 model.embedding_dim 2.0 +514 2 optimizer.lr 0.015170789370038506 +514 2 negative_sampler.num_negs_per_pos 38.0 +514 2 training.batch_size 0.0 +514 3 model.embedding_dim 2.0 +514 3 optimizer.lr 0.01622560199799221 +514 3 negative_sampler.num_negs_per_pos 52.0 +514 3 training.batch_size 1.0 +514 4 model.embedding_dim 1.0 +514 4 optimizer.lr 0.0018655116220357097 +514 4 negative_sampler.num_negs_per_pos 89.0 +514 4 training.batch_size 2.0 +514 5 model.embedding_dim 0.0 +514 5 optimizer.lr 0.019420029084213395 +514 5 negative_sampler.num_negs_per_pos 88.0 +514 5 training.batch_size 0.0 +514 6 model.embedding_dim 2.0 +514 6 optimizer.lr 0.005499268307056495 +514 6 negative_sampler.num_negs_per_pos 99.0 +514 6 training.batch_size 0.0 +514 7 model.embedding_dim 0.0 +514 7 optimizer.lr 0.008724186180170361 +514 7 negative_sampler.num_negs_per_pos 49.0 +514 7 training.batch_size 0.0 +514 8 model.embedding_dim 1.0 +514 8 optimizer.lr 0.018595456512255085 +514 8 negative_sampler.num_negs_per_pos 62.0 +514 8 training.batch_size 1.0 +514 9 model.embedding_dim 2.0 +514 9 optimizer.lr 0.009477301665658452 +514 9 negative_sampler.num_negs_per_pos 32.0 +514 9 training.batch_size 0.0 +514 1 dataset """fb15k237""" +514 1 model """rescal""" +514 1 loss """softplus""" +514 1 regularizer """no""" +514 1 optimizer """adam""" +514 1 training_loop """owa""" +514 1 negative_sampler """basic""" +514 1 evaluator """rankbased""" +514 2 dataset """fb15k237""" +514 2 model """rescal""" +514 2 loss """softplus""" +514 2 regularizer """no""" +514 2 optimizer """adam""" +514 2 training_loop """owa""" +514 2 negative_sampler """basic""" +514 2 evaluator """rankbased""" +514 3 dataset """fb15k237""" +514 3 model """rescal""" +514 3 loss """softplus""" +514 3 regularizer """no""" +514 3 optimizer """adam""" +514 3 training_loop """owa""" +514 3 negative_sampler """basic""" +514 3 evaluator """rankbased""" +514 4 dataset """fb15k237""" +514 4 model """rescal""" +514 4 loss """softplus""" +514 4 regularizer """no""" +514 4 optimizer """adam""" +514 4 training_loop """owa""" +514 4 negative_sampler """basic""" +514 4 evaluator """rankbased""" +514 5 dataset """fb15k237""" +514 5 model """rescal""" +514 5 loss """softplus""" +514 5 regularizer """no""" +514 5 optimizer """adam""" +514 5 training_loop """owa""" +514 5 negative_sampler """basic""" +514 5 evaluator """rankbased""" +514 6 dataset """fb15k237""" +514 6 model """rescal""" +514 6 loss """softplus""" +514 6 regularizer """no""" +514 6 optimizer """adam""" +514 6 training_loop """owa""" +514 6 negative_sampler """basic""" +514 6 evaluator """rankbased""" +514 7 dataset """fb15k237""" +514 7 model """rescal""" +514 7 loss """softplus""" +514 7 regularizer """no""" +514 7 optimizer """adam""" +514 7 training_loop """owa""" +514 7 negative_sampler """basic""" +514 7 evaluator """rankbased""" +514 8 dataset """fb15k237""" +514 8 model """rescal""" +514 8 loss """softplus""" +514 8 regularizer """no""" +514 8 optimizer """adam""" +514 8 training_loop """owa""" +514 8 negative_sampler """basic""" +514 8 evaluator """rankbased""" +514 9 dataset """fb15k237""" +514 9 model """rescal""" +514 9 loss """softplus""" +514 9 regularizer """no""" +514 9 optimizer """adam""" +514 9 training_loop """owa""" +514 9 negative_sampler """basic""" +514 9 evaluator """rankbased""" +515 1 model.embedding_dim 1.0 +515 1 optimizer.lr 0.007282442958914506 +515 1 training.batch_size 0.0 +515 1 training.label_smoothing 0.010005830981132688 +515 2 model.embedding_dim 1.0 +515 2 optimizer.lr 0.03026115432004682 +515 2 training.batch_size 0.0 +515 2 training.label_smoothing 0.0016522287841370122 +515 3 model.embedding_dim 0.0 +515 3 optimizer.lr 0.009314115770111393 +515 3 training.batch_size 0.0 +515 3 training.label_smoothing 0.04506049066266433 +515 4 model.embedding_dim 2.0 +515 4 optimizer.lr 0.003442653313243005 +515 4 training.batch_size 2.0 +515 4 training.label_smoothing 0.011161012837964961 +515 5 model.embedding_dim 1.0 +515 5 optimizer.lr 0.003092363800552886 +515 5 training.batch_size 2.0 +515 5 training.label_smoothing 0.001467923829170377 +515 6 model.embedding_dim 0.0 +515 6 optimizer.lr 0.0211812659673684 +515 6 training.batch_size 2.0 +515 6 training.label_smoothing 0.0061899955932592145 +515 7 model.embedding_dim 1.0 +515 7 optimizer.lr 0.020181220347231268 +515 7 training.batch_size 2.0 +515 7 training.label_smoothing 0.0013655202110093672 +515 8 model.embedding_dim 0.0 +515 8 optimizer.lr 0.025768022690269034 +515 8 training.batch_size 1.0 +515 8 training.label_smoothing 0.05716021571741394 +515 9 model.embedding_dim 1.0 +515 9 optimizer.lr 0.021814488874600087 +515 9 training.batch_size 2.0 +515 9 training.label_smoothing 0.003787524725137225 +515 10 model.embedding_dim 0.0 +515 10 optimizer.lr 0.08231402844197834 +515 10 training.batch_size 1.0 +515 10 training.label_smoothing 0.021687547235911618 +515 11 model.embedding_dim 1.0 +515 11 optimizer.lr 0.0612598963591262 +515 11 training.batch_size 0.0 +515 11 training.label_smoothing 0.6388577800397873 +515 12 model.embedding_dim 2.0 +515 12 optimizer.lr 0.08435120604292022 +515 12 training.batch_size 2.0 +515 12 training.label_smoothing 0.8089796481342093 +515 1 dataset """fb15k237""" +515 1 model """rescal""" +515 1 loss """crossentropy""" +515 1 regularizer """no""" +515 1 optimizer """adam""" +515 1 training_loop """lcwa""" +515 1 evaluator """rankbased""" +515 2 dataset """fb15k237""" +515 2 model """rescal""" +515 2 loss """crossentropy""" +515 2 regularizer """no""" +515 2 optimizer """adam""" +515 2 training_loop """lcwa""" +515 2 evaluator """rankbased""" +515 3 dataset """fb15k237""" +515 3 model """rescal""" +515 3 loss """crossentropy""" +515 3 regularizer """no""" +515 3 optimizer """adam""" +515 3 training_loop """lcwa""" +515 3 evaluator """rankbased""" +515 4 dataset """fb15k237""" +515 4 model """rescal""" +515 4 loss """crossentropy""" +515 4 regularizer """no""" +515 4 optimizer """adam""" +515 4 training_loop """lcwa""" +515 4 evaluator """rankbased""" +515 5 dataset """fb15k237""" +515 5 model """rescal""" +515 5 loss """crossentropy""" +515 5 regularizer """no""" +515 5 optimizer """adam""" +515 5 training_loop """lcwa""" +515 5 evaluator """rankbased""" +515 6 dataset """fb15k237""" +515 6 model """rescal""" +515 6 loss """crossentropy""" +515 6 regularizer """no""" +515 6 optimizer """adam""" +515 6 training_loop """lcwa""" +515 6 evaluator """rankbased""" +515 7 dataset """fb15k237""" +515 7 model """rescal""" +515 7 loss """crossentropy""" +515 7 regularizer """no""" +515 7 optimizer """adam""" +515 7 training_loop """lcwa""" +515 7 evaluator """rankbased""" +515 8 dataset """fb15k237""" +515 8 model """rescal""" +515 8 loss """crossentropy""" +515 8 regularizer """no""" +515 8 optimizer """adam""" +515 8 training_loop """lcwa""" +515 8 evaluator """rankbased""" +515 9 dataset """fb15k237""" +515 9 model """rescal""" +515 9 loss """crossentropy""" +515 9 regularizer """no""" +515 9 optimizer """adam""" +515 9 training_loop """lcwa""" +515 9 evaluator """rankbased""" +515 10 dataset """fb15k237""" +515 10 model """rescal""" +515 10 loss """crossentropy""" +515 10 regularizer """no""" +515 10 optimizer """adam""" +515 10 training_loop """lcwa""" +515 10 evaluator """rankbased""" +515 11 dataset """fb15k237""" +515 11 model """rescal""" +515 11 loss """crossentropy""" +515 11 regularizer """no""" +515 11 optimizer """adam""" +515 11 training_loop """lcwa""" +515 11 evaluator """rankbased""" +515 12 dataset """fb15k237""" +515 12 model """rescal""" +515 12 loss """crossentropy""" +515 12 regularizer """no""" +515 12 optimizer """adam""" +515 12 training_loop """lcwa""" +515 12 evaluator """rankbased""" +516 1 model.embedding_dim 2.0 +516 1 optimizer.lr 0.03655091479539402 +516 1 training.batch_size 1.0 +516 1 training.label_smoothing 0.2248952631147477 +516 2 model.embedding_dim 1.0 +516 2 optimizer.lr 0.0031087258646201855 +516 2 training.batch_size 1.0 +516 2 training.label_smoothing 0.09221322757866234 +516 3 model.embedding_dim 1.0 +516 3 optimizer.lr 0.0011364659457703257 +516 3 training.batch_size 2.0 +516 3 training.label_smoothing 0.0018384828816853974 +516 4 model.embedding_dim 2.0 +516 4 optimizer.lr 0.05051592813815449 +516 4 training.batch_size 2.0 +516 4 training.label_smoothing 0.35123032177794533 +516 5 model.embedding_dim 0.0 +516 5 optimizer.lr 0.028164352585492106 +516 5 training.batch_size 2.0 +516 5 training.label_smoothing 0.03324433096561371 +516 6 model.embedding_dim 0.0 +516 6 optimizer.lr 0.05626932184648397 +516 6 training.batch_size 1.0 +516 6 training.label_smoothing 0.0018744560974178 +516 7 model.embedding_dim 2.0 +516 7 optimizer.lr 0.031995242736422014 +516 7 training.batch_size 2.0 +516 7 training.label_smoothing 0.04483945756840746 +516 8 model.embedding_dim 1.0 +516 8 optimizer.lr 0.008449153602431125 +516 8 training.batch_size 2.0 +516 8 training.label_smoothing 0.015210341503285764 +516 9 model.embedding_dim 1.0 +516 9 optimizer.lr 0.046644279406488624 +516 9 training.batch_size 0.0 +516 9 training.label_smoothing 0.001962308839563918 +516 10 model.embedding_dim 0.0 +516 10 optimizer.lr 0.043777705824256795 +516 10 training.batch_size 1.0 +516 10 training.label_smoothing 0.042210660039614864 +516 11 model.embedding_dim 0.0 +516 11 optimizer.lr 0.014457227998190526 +516 11 training.batch_size 1.0 +516 11 training.label_smoothing 0.45893843053349664 +516 12 model.embedding_dim 0.0 +516 12 optimizer.lr 0.007782152578817511 +516 12 training.batch_size 1.0 +516 12 training.label_smoothing 0.04930454156484194 +516 13 model.embedding_dim 0.0 +516 13 optimizer.lr 0.07011251702129563 +516 13 training.batch_size 2.0 +516 13 training.label_smoothing 0.25968545923664393 +516 14 model.embedding_dim 2.0 +516 14 optimizer.lr 0.023254918529466283 +516 14 training.batch_size 0.0 +516 14 training.label_smoothing 0.002618993845275537 +516 15 model.embedding_dim 0.0 +516 15 optimizer.lr 0.005874294222898741 +516 15 training.batch_size 2.0 +516 15 training.label_smoothing 0.13157123294934253 +516 16 model.embedding_dim 0.0 +516 16 optimizer.lr 0.029472731931976946 +516 16 training.batch_size 0.0 +516 16 training.label_smoothing 0.017692306343479094 +516 17 model.embedding_dim 2.0 +516 17 optimizer.lr 0.010444461966306787 +516 17 training.batch_size 1.0 +516 17 training.label_smoothing 0.2804998523695637 +516 18 model.embedding_dim 1.0 +516 18 optimizer.lr 0.0019180485416366092 +516 18 training.batch_size 2.0 +516 18 training.label_smoothing 0.9406988843711626 +516 19 model.embedding_dim 0.0 +516 19 optimizer.lr 0.0712557682855686 +516 19 training.batch_size 2.0 +516 19 training.label_smoothing 0.6999698048909067 +516 20 model.embedding_dim 2.0 +516 20 optimizer.lr 0.0024416733371156983 +516 20 training.batch_size 1.0 +516 20 training.label_smoothing 0.0038283264479354283 +516 1 dataset """fb15k237""" +516 1 model """rescal""" +516 1 loss """crossentropy""" +516 1 regularizer """no""" +516 1 optimizer """adam""" +516 1 training_loop """lcwa""" +516 1 evaluator """rankbased""" +516 2 dataset """fb15k237""" +516 2 model """rescal""" +516 2 loss """crossentropy""" +516 2 regularizer """no""" +516 2 optimizer """adam""" +516 2 training_loop """lcwa""" +516 2 evaluator """rankbased""" +516 3 dataset """fb15k237""" +516 3 model """rescal""" +516 3 loss """crossentropy""" +516 3 regularizer """no""" +516 3 optimizer """adam""" +516 3 training_loop """lcwa""" +516 3 evaluator """rankbased""" +516 4 dataset """fb15k237""" +516 4 model """rescal""" +516 4 loss """crossentropy""" +516 4 regularizer """no""" +516 4 optimizer """adam""" +516 4 training_loop """lcwa""" +516 4 evaluator """rankbased""" +516 5 dataset """fb15k237""" +516 5 model """rescal""" +516 5 loss """crossentropy""" +516 5 regularizer """no""" +516 5 optimizer """adam""" +516 5 training_loop """lcwa""" +516 5 evaluator """rankbased""" +516 6 dataset """fb15k237""" +516 6 model """rescal""" +516 6 loss """crossentropy""" +516 6 regularizer """no""" +516 6 optimizer """adam""" +516 6 training_loop """lcwa""" +516 6 evaluator """rankbased""" +516 7 dataset """fb15k237""" +516 7 model """rescal""" +516 7 loss """crossentropy""" +516 7 regularizer """no""" +516 7 optimizer """adam""" +516 7 training_loop """lcwa""" +516 7 evaluator """rankbased""" +516 8 dataset """fb15k237""" +516 8 model """rescal""" +516 8 loss """crossentropy""" +516 8 regularizer """no""" +516 8 optimizer """adam""" +516 8 training_loop """lcwa""" +516 8 evaluator """rankbased""" +516 9 dataset """fb15k237""" +516 9 model """rescal""" +516 9 loss """crossentropy""" +516 9 regularizer """no""" +516 9 optimizer """adam""" +516 9 training_loop """lcwa""" +516 9 evaluator """rankbased""" +516 10 dataset """fb15k237""" +516 10 model """rescal""" +516 10 loss """crossentropy""" +516 10 regularizer """no""" +516 10 optimizer """adam""" +516 10 training_loop """lcwa""" +516 10 evaluator """rankbased""" +516 11 dataset """fb15k237""" +516 11 model """rescal""" +516 11 loss """crossentropy""" +516 11 regularizer """no""" +516 11 optimizer """adam""" +516 11 training_loop """lcwa""" +516 11 evaluator """rankbased""" +516 12 dataset """fb15k237""" +516 12 model """rescal""" +516 12 loss """crossentropy""" +516 12 regularizer """no""" +516 12 optimizer """adam""" +516 12 training_loop """lcwa""" +516 12 evaluator """rankbased""" +516 13 dataset """fb15k237""" +516 13 model """rescal""" +516 13 loss """crossentropy""" +516 13 regularizer """no""" +516 13 optimizer """adam""" +516 13 training_loop """lcwa""" +516 13 evaluator """rankbased""" +516 14 dataset """fb15k237""" +516 14 model """rescal""" +516 14 loss """crossentropy""" +516 14 regularizer """no""" +516 14 optimizer """adam""" +516 14 training_loop """lcwa""" +516 14 evaluator """rankbased""" +516 15 dataset """fb15k237""" +516 15 model """rescal""" +516 15 loss """crossentropy""" +516 15 regularizer """no""" +516 15 optimizer """adam""" +516 15 training_loop """lcwa""" +516 15 evaluator """rankbased""" +516 16 dataset """fb15k237""" +516 16 model """rescal""" +516 16 loss """crossentropy""" +516 16 regularizer """no""" +516 16 optimizer """adam""" +516 16 training_loop """lcwa""" +516 16 evaluator """rankbased""" +516 17 dataset """fb15k237""" +516 17 model """rescal""" +516 17 loss """crossentropy""" +516 17 regularizer """no""" +516 17 optimizer """adam""" +516 17 training_loop """lcwa""" +516 17 evaluator """rankbased""" +516 18 dataset """fb15k237""" +516 18 model """rescal""" +516 18 loss """crossentropy""" +516 18 regularizer """no""" +516 18 optimizer """adam""" +516 18 training_loop """lcwa""" +516 18 evaluator """rankbased""" +516 19 dataset """fb15k237""" +516 19 model """rescal""" +516 19 loss """crossentropy""" +516 19 regularizer """no""" +516 19 optimizer """adam""" +516 19 training_loop """lcwa""" +516 19 evaluator """rankbased""" +516 20 dataset """fb15k237""" +516 20 model """rescal""" +516 20 loss """crossentropy""" +516 20 regularizer """no""" +516 20 optimizer """adam""" +516 20 training_loop """lcwa""" +516 20 evaluator """rankbased""" +517 1 model.embedding_dim 0.0 +517 1 loss.margin 6.784520594658973 +517 1 loss.adversarial_temperature 0.12221773787096141 +517 1 optimizer.lr 0.07979273398401483 +517 1 negative_sampler.num_negs_per_pos 70.0 +517 1 training.batch_size 1.0 +517 2 model.embedding_dim 2.0 +517 2 loss.margin 7.349105204237312 +517 2 loss.adversarial_temperature 0.6675874484349947 +517 2 optimizer.lr 0.04889450500903714 +517 2 negative_sampler.num_negs_per_pos 88.0 +517 2 training.batch_size 0.0 +517 1 dataset """fb15k237""" +517 1 model """rescal""" +517 1 loss """nssa""" +517 1 regularizer """no""" +517 1 optimizer """adam""" +517 1 training_loop """owa""" +517 1 negative_sampler """basic""" +517 1 evaluator """rankbased""" +517 2 dataset """fb15k237""" +517 2 model """rescal""" +517 2 loss """nssa""" +517 2 regularizer """no""" +517 2 optimizer """adam""" +517 2 training_loop """owa""" +517 2 negative_sampler """basic""" +517 2 evaluator """rankbased""" +518 1 model.embedding_dim 2.0 +518 1 loss.margin 25.39174663916305 +518 1 loss.adversarial_temperature 0.49853094134692527 +518 1 optimizer.lr 0.0013873316688097397 +518 1 negative_sampler.num_negs_per_pos 67.0 +518 1 training.batch_size 0.0 +518 2 model.embedding_dim 1.0 +518 2 loss.margin 12.185703821469307 +518 2 loss.adversarial_temperature 0.9378215645189025 +518 2 optimizer.lr 0.006961175596941162 +518 2 negative_sampler.num_negs_per_pos 96.0 +518 2 training.batch_size 1.0 +518 3 model.embedding_dim 0.0 +518 3 loss.margin 6.04191989048168 +518 3 loss.adversarial_temperature 0.9851932971995493 +518 3 optimizer.lr 0.007255777287687147 +518 3 negative_sampler.num_negs_per_pos 38.0 +518 3 training.batch_size 0.0 +518 4 model.embedding_dim 1.0 +518 4 loss.margin 2.948432543137273 +518 4 loss.adversarial_temperature 0.9791853413349186 +518 4 optimizer.lr 0.08243451108004436 +518 4 negative_sampler.num_negs_per_pos 95.0 +518 4 training.batch_size 1.0 +518 5 model.embedding_dim 2.0 +518 5 loss.margin 12.938989244014595 +518 5 loss.adversarial_temperature 0.9686449999116832 +518 5 optimizer.lr 0.014494683114595477 +518 5 negative_sampler.num_negs_per_pos 88.0 +518 5 training.batch_size 2.0 +518 6 model.embedding_dim 0.0 +518 6 loss.margin 1.7496268634786722 +518 6 loss.adversarial_temperature 0.23809420280710122 +518 6 optimizer.lr 0.0015162999901571638 +518 6 negative_sampler.num_negs_per_pos 58.0 +518 6 training.batch_size 2.0 +518 7 model.embedding_dim 1.0 +518 7 loss.margin 17.343704153917024 +518 7 loss.adversarial_temperature 0.8137507592597955 +518 7 optimizer.lr 0.0017157562471858574 +518 7 negative_sampler.num_negs_per_pos 69.0 +518 7 training.batch_size 1.0 +518 8 model.embedding_dim 2.0 +518 8 loss.margin 14.129499468384392 +518 8 loss.adversarial_temperature 0.5733919620683281 +518 8 optimizer.lr 0.01008731739943095 +518 8 negative_sampler.num_negs_per_pos 61.0 +518 8 training.batch_size 2.0 +518 1 dataset """fb15k237""" +518 1 model """rescal""" +518 1 loss """nssa""" +518 1 regularizer """no""" +518 1 optimizer """adam""" +518 1 training_loop """owa""" +518 1 negative_sampler """basic""" +518 1 evaluator """rankbased""" +518 2 dataset """fb15k237""" +518 2 model """rescal""" +518 2 loss """nssa""" +518 2 regularizer """no""" +518 2 optimizer """adam""" +518 2 training_loop """owa""" +518 2 negative_sampler """basic""" +518 2 evaluator """rankbased""" +518 3 dataset """fb15k237""" +518 3 model """rescal""" +518 3 loss """nssa""" +518 3 regularizer """no""" +518 3 optimizer """adam""" +518 3 training_loop """owa""" +518 3 negative_sampler """basic""" +518 3 evaluator """rankbased""" +518 4 dataset """fb15k237""" +518 4 model """rescal""" +518 4 loss """nssa""" +518 4 regularizer """no""" +518 4 optimizer """adam""" +518 4 training_loop """owa""" +518 4 negative_sampler """basic""" +518 4 evaluator """rankbased""" +518 5 dataset """fb15k237""" +518 5 model """rescal""" +518 5 loss """nssa""" +518 5 regularizer """no""" +518 5 optimizer """adam""" +518 5 training_loop """owa""" +518 5 negative_sampler """basic""" +518 5 evaluator """rankbased""" +518 6 dataset """fb15k237""" +518 6 model """rescal""" +518 6 loss """nssa""" +518 6 regularizer """no""" +518 6 optimizer """adam""" +518 6 training_loop """owa""" +518 6 negative_sampler """basic""" +518 6 evaluator """rankbased""" +518 7 dataset """fb15k237""" +518 7 model """rescal""" +518 7 loss """nssa""" +518 7 regularizer """no""" +518 7 optimizer """adam""" +518 7 training_loop """owa""" +518 7 negative_sampler """basic""" +518 7 evaluator """rankbased""" +518 8 dataset """fb15k237""" +518 8 model """rescal""" +518 8 loss """nssa""" +518 8 regularizer """no""" +518 8 optimizer """adam""" +518 8 training_loop """owa""" +518 8 negative_sampler """basic""" +518 8 evaluator """rankbased""" +519 1 model.embedding_dim 2.0 +519 1 loss.margin 3.6154348960803935 +519 1 optimizer.lr 0.0032946721749157843 +519 1 negative_sampler.num_negs_per_pos 61.0 +519 1 training.batch_size 0.0 +519 2 model.embedding_dim 1.0 +519 2 loss.margin 8.80006102295683 +519 2 optimizer.lr 0.009522034301349387 +519 2 negative_sampler.num_negs_per_pos 45.0 +519 2 training.batch_size 0.0 +519 3 model.embedding_dim 0.0 +519 3 loss.margin 8.175777856099558 +519 3 optimizer.lr 0.045043520916149334 +519 3 negative_sampler.num_negs_per_pos 84.0 +519 3 training.batch_size 1.0 +519 4 model.embedding_dim 0.0 +519 4 loss.margin 8.995261706441315 +519 4 optimizer.lr 0.03105005495717649 +519 4 negative_sampler.num_negs_per_pos 67.0 +519 4 training.batch_size 1.0 +519 5 model.embedding_dim 1.0 +519 5 loss.margin 5.848037394349878 +519 5 optimizer.lr 0.00538874222967007 +519 5 negative_sampler.num_negs_per_pos 28.0 +519 5 training.batch_size 1.0 +519 6 model.embedding_dim 1.0 +519 6 loss.margin 2.3051545217716773 +519 6 optimizer.lr 0.011409721806129943 +519 6 negative_sampler.num_negs_per_pos 59.0 +519 6 training.batch_size 1.0 +519 1 dataset """fb15k237""" +519 1 model """rescal""" +519 1 loss """marginranking""" +519 1 regularizer """no""" +519 1 optimizer """adam""" +519 1 training_loop """owa""" +519 1 negative_sampler """basic""" +519 1 evaluator """rankbased""" +519 2 dataset """fb15k237""" +519 2 model """rescal""" +519 2 loss """marginranking""" +519 2 regularizer """no""" +519 2 optimizer """adam""" +519 2 training_loop """owa""" +519 2 negative_sampler """basic""" +519 2 evaluator """rankbased""" +519 3 dataset """fb15k237""" +519 3 model """rescal""" +519 3 loss """marginranking""" +519 3 regularizer """no""" +519 3 optimizer """adam""" +519 3 training_loop """owa""" +519 3 negative_sampler """basic""" +519 3 evaluator """rankbased""" +519 4 dataset """fb15k237""" +519 4 model """rescal""" +519 4 loss """marginranking""" +519 4 regularizer """no""" +519 4 optimizer """adam""" +519 4 training_loop """owa""" +519 4 negative_sampler """basic""" +519 4 evaluator """rankbased""" +519 5 dataset """fb15k237""" +519 5 model """rescal""" +519 5 loss """marginranking""" +519 5 regularizer """no""" +519 5 optimizer """adam""" +519 5 training_loop """owa""" +519 5 negative_sampler """basic""" +519 5 evaluator """rankbased""" +519 6 dataset """fb15k237""" +519 6 model """rescal""" +519 6 loss """marginranking""" +519 6 regularizer """no""" +519 6 optimizer """adam""" +519 6 training_loop """owa""" +519 6 negative_sampler """basic""" +519 6 evaluator """rankbased""" +520 1 model.embedding_dim 2.0 +520 1 loss.margin 8.648055250730847 +520 1 optimizer.lr 0.048614621228189416 +520 1 negative_sampler.num_negs_per_pos 40.0 +520 1 training.batch_size 0.0 +520 2 model.embedding_dim 1.0 +520 2 loss.margin 8.56044853660941 +520 2 optimizer.lr 0.0012388350833229976 +520 2 negative_sampler.num_negs_per_pos 66.0 +520 2 training.batch_size 1.0 +520 3 model.embedding_dim 0.0 +520 3 loss.margin 5.766652484287585 +520 3 optimizer.lr 0.00336417809395283 +520 3 negative_sampler.num_negs_per_pos 49.0 +520 3 training.batch_size 2.0 +520 4 model.embedding_dim 2.0 +520 4 loss.margin 3.1898664300732205 +520 4 optimizer.lr 0.0030985708885705003 +520 4 negative_sampler.num_negs_per_pos 97.0 +520 4 training.batch_size 2.0 +520 5 model.embedding_dim 1.0 +520 5 loss.margin 3.2422622067328444 +520 5 optimizer.lr 0.002990405523521489 +520 5 negative_sampler.num_negs_per_pos 71.0 +520 5 training.batch_size 2.0 +520 6 model.embedding_dim 0.0 +520 6 loss.margin 3.1812635333045782 +520 6 optimizer.lr 0.0034715464939660294 +520 6 negative_sampler.num_negs_per_pos 76.0 +520 6 training.batch_size 2.0 +520 7 model.embedding_dim 2.0 +520 7 loss.margin 9.620401166977732 +520 7 optimizer.lr 0.0243295222124725 +520 7 negative_sampler.num_negs_per_pos 34.0 +520 7 training.batch_size 1.0 +520 1 dataset """fb15k237""" +520 1 model """rescal""" +520 1 loss """marginranking""" +520 1 regularizer """no""" +520 1 optimizer """adam""" +520 1 training_loop """owa""" +520 1 negative_sampler """basic""" +520 1 evaluator """rankbased""" +520 2 dataset """fb15k237""" +520 2 model """rescal""" +520 2 loss """marginranking""" +520 2 regularizer """no""" +520 2 optimizer """adam""" +520 2 training_loop """owa""" +520 2 negative_sampler """basic""" +520 2 evaluator """rankbased""" +520 3 dataset """fb15k237""" +520 3 model """rescal""" +520 3 loss """marginranking""" +520 3 regularizer """no""" +520 3 optimizer """adam""" +520 3 training_loop """owa""" +520 3 negative_sampler """basic""" +520 3 evaluator """rankbased""" +520 4 dataset """fb15k237""" +520 4 model """rescal""" +520 4 loss """marginranking""" +520 4 regularizer """no""" +520 4 optimizer """adam""" +520 4 training_loop """owa""" +520 4 negative_sampler """basic""" +520 4 evaluator """rankbased""" +520 5 dataset """fb15k237""" +520 5 model """rescal""" +520 5 loss """marginranking""" +520 5 regularizer """no""" +520 5 optimizer """adam""" +520 5 training_loop """owa""" +520 5 negative_sampler """basic""" +520 5 evaluator """rankbased""" +520 6 dataset """fb15k237""" +520 6 model """rescal""" +520 6 loss """marginranking""" +520 6 regularizer """no""" +520 6 optimizer """adam""" +520 6 training_loop """owa""" +520 6 negative_sampler """basic""" +520 6 evaluator """rankbased""" +520 7 dataset """fb15k237""" +520 7 model """rescal""" +520 7 loss """marginranking""" +520 7 regularizer """no""" +520 7 optimizer """adam""" +520 7 training_loop """owa""" +520 7 negative_sampler """basic""" +520 7 evaluator """rankbased""" +521 1 model.embedding_dim 1.0 +521 1 training.batch_size 2.0 +521 1 training.label_smoothing 0.0013619120793602014 +521 2 model.embedding_dim 0.0 +521 2 training.batch_size 0.0 +521 2 training.label_smoothing 0.00720315077141967 +521 3 model.embedding_dim 2.0 +521 3 training.batch_size 0.0 +521 3 training.label_smoothing 0.33094511537678595 +521 4 model.embedding_dim 1.0 +521 4 training.batch_size 1.0 +521 4 training.label_smoothing 0.007046907974426491 +521 5 model.embedding_dim 0.0 +521 5 training.batch_size 0.0 +521 5 training.label_smoothing 0.14657115078724187 +521 6 model.embedding_dim 0.0 +521 6 training.batch_size 0.0 +521 6 training.label_smoothing 0.01773824491715766 +521 7 model.embedding_dim 2.0 +521 7 training.batch_size 0.0 +521 7 training.label_smoothing 0.040998090164255416 +521 8 model.embedding_dim 0.0 +521 8 training.batch_size 1.0 +521 8 training.label_smoothing 0.01433078379167068 +521 9 model.embedding_dim 2.0 +521 9 training.batch_size 2.0 +521 9 training.label_smoothing 0.005966002065605254 +521 10 model.embedding_dim 2.0 +521 10 training.batch_size 1.0 +521 10 training.label_smoothing 0.5384835738329681 +521 11 model.embedding_dim 0.0 +521 11 training.batch_size 0.0 +521 11 training.label_smoothing 0.0019855155765312266 +521 12 model.embedding_dim 2.0 +521 12 training.batch_size 0.0 +521 12 training.label_smoothing 0.9056908502214002 +521 13 model.embedding_dim 2.0 +521 13 training.batch_size 2.0 +521 13 training.label_smoothing 0.010208845704319973 +521 14 model.embedding_dim 2.0 +521 14 training.batch_size 2.0 +521 14 training.label_smoothing 0.5107781866988932 +521 15 model.embedding_dim 2.0 +521 15 training.batch_size 0.0 +521 15 training.label_smoothing 0.0457248063410635 +521 16 model.embedding_dim 0.0 +521 16 training.batch_size 2.0 +521 16 training.label_smoothing 0.09781681183973927 +521 17 model.embedding_dim 2.0 +521 17 training.batch_size 1.0 +521 17 training.label_smoothing 0.005699443111187307 +521 18 model.embedding_dim 1.0 +521 18 training.batch_size 0.0 +521 18 training.label_smoothing 0.03234833987463688 +521 19 model.embedding_dim 0.0 +521 19 training.batch_size 1.0 +521 19 training.label_smoothing 0.06792554271276123 +521 20 model.embedding_dim 2.0 +521 20 training.batch_size 0.0 +521 20 training.label_smoothing 0.028787268136747136 +521 21 model.embedding_dim 0.0 +521 21 training.batch_size 1.0 +521 21 training.label_smoothing 0.4383743716765292 +521 22 model.embedding_dim 2.0 +521 22 training.batch_size 1.0 +521 22 training.label_smoothing 0.06255971739901325 +521 23 model.embedding_dim 0.0 +521 23 training.batch_size 0.0 +521 23 training.label_smoothing 0.17956499288523056 +521 24 model.embedding_dim 1.0 +521 24 training.batch_size 0.0 +521 24 training.label_smoothing 0.006366720822600999 +521 25 model.embedding_dim 2.0 +521 25 training.batch_size 2.0 +521 25 training.label_smoothing 0.05130775230421002 +521 26 model.embedding_dim 0.0 +521 26 training.batch_size 2.0 +521 26 training.label_smoothing 0.08858759367387589 +521 27 model.embedding_dim 1.0 +521 27 training.batch_size 2.0 +521 27 training.label_smoothing 0.01964078280519854 +521 28 model.embedding_dim 0.0 +521 28 training.batch_size 1.0 +521 28 training.label_smoothing 0.12050172670472718 +521 29 model.embedding_dim 0.0 +521 29 training.batch_size 2.0 +521 29 training.label_smoothing 0.010230122867860215 +521 30 model.embedding_dim 2.0 +521 30 training.batch_size 2.0 +521 30 training.label_smoothing 0.02184178522540556 +521 31 model.embedding_dim 0.0 +521 31 training.batch_size 1.0 +521 31 training.label_smoothing 0.12927994763878423 +521 32 model.embedding_dim 1.0 +521 32 training.batch_size 1.0 +521 32 training.label_smoothing 0.0040321225849105045 +521 33 model.embedding_dim 0.0 +521 33 training.batch_size 0.0 +521 33 training.label_smoothing 0.013728785812670525 +521 34 model.embedding_dim 0.0 +521 34 training.batch_size 1.0 +521 34 training.label_smoothing 0.006282541171280856 +521 35 model.embedding_dim 1.0 +521 35 training.batch_size 0.0 +521 35 training.label_smoothing 0.9091851736319302 +521 36 model.embedding_dim 1.0 +521 36 training.batch_size 2.0 +521 36 training.label_smoothing 0.021042476705808763 +521 37 model.embedding_dim 0.0 +521 37 training.batch_size 2.0 +521 37 training.label_smoothing 0.001395504646467893 +521 38 model.embedding_dim 0.0 +521 38 training.batch_size 1.0 +521 38 training.label_smoothing 0.0260453154192271 +521 39 model.embedding_dim 0.0 +521 39 training.batch_size 2.0 +521 39 training.label_smoothing 0.4637767465233939 +521 40 model.embedding_dim 0.0 +521 40 training.batch_size 1.0 +521 40 training.label_smoothing 0.014718241576272757 +521 41 model.embedding_dim 0.0 +521 41 training.batch_size 0.0 +521 41 training.label_smoothing 0.0054394451346445535 +521 42 model.embedding_dim 0.0 +521 42 training.batch_size 2.0 +521 42 training.label_smoothing 0.005380147786877711 +521 43 model.embedding_dim 2.0 +521 43 training.batch_size 2.0 +521 43 training.label_smoothing 0.0014016027059135237 +521 44 model.embedding_dim 0.0 +521 44 training.batch_size 0.0 +521 44 training.label_smoothing 0.014637335288227622 +521 45 model.embedding_dim 0.0 +521 45 training.batch_size 2.0 +521 45 training.label_smoothing 0.2742297704410689 +521 46 model.embedding_dim 1.0 +521 46 training.batch_size 0.0 +521 46 training.label_smoothing 0.001238200228812448 +521 47 model.embedding_dim 2.0 +521 47 training.batch_size 0.0 +521 47 training.label_smoothing 0.0013826442477501979 +521 48 model.embedding_dim 1.0 +521 48 training.batch_size 2.0 +521 48 training.label_smoothing 0.04008629167961942 +521 49 model.embedding_dim 0.0 +521 49 training.batch_size 0.0 +521 49 training.label_smoothing 0.06019168782375423 +521 50 model.embedding_dim 1.0 +521 50 training.batch_size 2.0 +521 50 training.label_smoothing 0.009304471670098341 +521 51 model.embedding_dim 2.0 +521 51 training.batch_size 1.0 +521 51 training.label_smoothing 0.005848065816508638 +521 52 model.embedding_dim 1.0 +521 52 training.batch_size 1.0 +521 52 training.label_smoothing 0.0028378877599334346 +521 53 model.embedding_dim 1.0 +521 53 training.batch_size 1.0 +521 53 training.label_smoothing 0.06019968173369842 +521 54 model.embedding_dim 0.0 +521 54 training.batch_size 2.0 +521 54 training.label_smoothing 0.0033259928775398553 +521 55 model.embedding_dim 2.0 +521 55 training.batch_size 1.0 +521 55 training.label_smoothing 0.6663229772027347 +521 56 model.embedding_dim 2.0 +521 56 training.batch_size 1.0 +521 56 training.label_smoothing 0.11222039668595601 +521 57 model.embedding_dim 0.0 +521 57 training.batch_size 1.0 +521 57 training.label_smoothing 0.07908804347389842 +521 58 model.embedding_dim 0.0 +521 58 training.batch_size 1.0 +521 58 training.label_smoothing 0.009284119767921107 +521 59 model.embedding_dim 1.0 +521 59 training.batch_size 2.0 +521 59 training.label_smoothing 0.18032289727562534 +521 60 model.embedding_dim 2.0 +521 60 training.batch_size 1.0 +521 60 training.label_smoothing 0.01381940403347298 +521 61 model.embedding_dim 0.0 +521 61 training.batch_size 1.0 +521 61 training.label_smoothing 0.002026370190822285 +521 62 model.embedding_dim 2.0 +521 62 training.batch_size 2.0 +521 62 training.label_smoothing 0.017156465609031318 +521 63 model.embedding_dim 1.0 +521 63 training.batch_size 2.0 +521 63 training.label_smoothing 0.07686864682901408 +521 64 model.embedding_dim 0.0 +521 64 training.batch_size 1.0 +521 64 training.label_smoothing 0.006121358698943577 +521 65 model.embedding_dim 1.0 +521 65 training.batch_size 1.0 +521 65 training.label_smoothing 0.0022508412038611102 +521 66 model.embedding_dim 1.0 +521 66 training.batch_size 0.0 +521 66 training.label_smoothing 0.28305567203672255 +521 67 model.embedding_dim 1.0 +521 67 training.batch_size 0.0 +521 67 training.label_smoothing 0.021828122085162636 +521 68 model.embedding_dim 2.0 +521 68 training.batch_size 2.0 +521 68 training.label_smoothing 0.04955348754428441 +521 69 model.embedding_dim 2.0 +521 69 training.batch_size 1.0 +521 69 training.label_smoothing 0.11293051121922704 +521 70 model.embedding_dim 0.0 +521 70 training.batch_size 2.0 +521 70 training.label_smoothing 0.00244798649928079 +521 71 model.embedding_dim 1.0 +521 71 training.batch_size 2.0 +521 71 training.label_smoothing 0.002810750570287185 +521 72 model.embedding_dim 2.0 +521 72 training.batch_size 1.0 +521 72 training.label_smoothing 0.06533264646902998 +521 73 model.embedding_dim 2.0 +521 73 training.batch_size 2.0 +521 73 training.label_smoothing 0.006804140816008321 +521 74 model.embedding_dim 2.0 +521 74 training.batch_size 0.0 +521 74 training.label_smoothing 0.2683693910918898 +521 75 model.embedding_dim 2.0 +521 75 training.batch_size 1.0 +521 75 training.label_smoothing 0.06888403779689077 +521 76 model.embedding_dim 1.0 +521 76 training.batch_size 1.0 +521 76 training.label_smoothing 0.034414533792343015 +521 77 model.embedding_dim 0.0 +521 77 training.batch_size 1.0 +521 77 training.label_smoothing 0.019240278652736035 +521 78 model.embedding_dim 0.0 +521 78 training.batch_size 1.0 +521 78 training.label_smoothing 0.004228308048707785 +521 79 model.embedding_dim 0.0 +521 79 training.batch_size 2.0 +521 79 training.label_smoothing 0.03529518140154877 +521 80 model.embedding_dim 1.0 +521 80 training.batch_size 0.0 +521 80 training.label_smoothing 0.3555788465850457 +521 81 model.embedding_dim 2.0 +521 81 training.batch_size 0.0 +521 81 training.label_smoothing 0.03258815326898656 +521 82 model.embedding_dim 0.0 +521 82 training.batch_size 2.0 +521 82 training.label_smoothing 0.0045664849983548745 +521 83 model.embedding_dim 0.0 +521 83 training.batch_size 2.0 +521 83 training.label_smoothing 0.09624990153543979 +521 84 model.embedding_dim 0.0 +521 84 training.batch_size 0.0 +521 84 training.label_smoothing 0.015449864908270787 +521 85 model.embedding_dim 0.0 +521 85 training.batch_size 0.0 +521 85 training.label_smoothing 0.0010649160194913328 +521 86 model.embedding_dim 1.0 +521 86 training.batch_size 2.0 +521 86 training.label_smoothing 0.001706607392938368 +521 87 model.embedding_dim 1.0 +521 87 training.batch_size 1.0 +521 87 training.label_smoothing 0.0024020872028711303 +521 88 model.embedding_dim 2.0 +521 88 training.batch_size 1.0 +521 88 training.label_smoothing 0.7531965299774069 +521 89 model.embedding_dim 0.0 +521 89 training.batch_size 2.0 +521 89 training.label_smoothing 0.5238307815236461 +521 90 model.embedding_dim 2.0 +521 90 training.batch_size 2.0 +521 90 training.label_smoothing 0.2637537427381588 +521 91 model.embedding_dim 1.0 +521 91 training.batch_size 2.0 +521 91 training.label_smoothing 0.0010146871926846627 +521 92 model.embedding_dim 2.0 +521 92 training.batch_size 0.0 +521 92 training.label_smoothing 0.40527781930434753 +521 93 model.embedding_dim 0.0 +521 93 training.batch_size 1.0 +521 93 training.label_smoothing 0.5790462732881945 +521 94 model.embedding_dim 2.0 +521 94 training.batch_size 1.0 +521 94 training.label_smoothing 0.030167455112668443 +521 95 model.embedding_dim 0.0 +521 95 training.batch_size 0.0 +521 95 training.label_smoothing 0.10259352989930337 +521 96 model.embedding_dim 0.0 +521 96 training.batch_size 0.0 +521 96 training.label_smoothing 0.0013935293529415828 +521 97 model.embedding_dim 2.0 +521 97 training.batch_size 0.0 +521 97 training.label_smoothing 0.003324198668240259 +521 98 model.embedding_dim 2.0 +521 98 training.batch_size 1.0 +521 98 training.label_smoothing 0.48491925701501865 +521 99 model.embedding_dim 1.0 +521 99 training.batch_size 1.0 +521 99 training.label_smoothing 0.1027336538967514 +521 100 model.embedding_dim 1.0 +521 100 training.batch_size 1.0 +521 100 training.label_smoothing 0.12228086663535544 +521 1 dataset """kinships""" +521 1 model """rescal""" +521 1 loss """bceaftersigmoid""" +521 1 regularizer """no""" +521 1 optimizer """adadelta""" +521 1 training_loop """lcwa""" +521 1 evaluator """rankbased""" +521 2 dataset """kinships""" +521 2 model """rescal""" +521 2 loss """bceaftersigmoid""" +521 2 regularizer """no""" +521 2 optimizer """adadelta""" +521 2 training_loop """lcwa""" +521 2 evaluator """rankbased""" +521 3 dataset """kinships""" +521 3 model """rescal""" +521 3 loss """bceaftersigmoid""" +521 3 regularizer """no""" +521 3 optimizer """adadelta""" +521 3 training_loop """lcwa""" +521 3 evaluator """rankbased""" +521 4 dataset """kinships""" +521 4 model """rescal""" +521 4 loss """bceaftersigmoid""" +521 4 regularizer """no""" +521 4 optimizer """adadelta""" +521 4 training_loop """lcwa""" +521 4 evaluator """rankbased""" +521 5 dataset """kinships""" +521 5 model """rescal""" +521 5 loss """bceaftersigmoid""" +521 5 regularizer """no""" +521 5 optimizer """adadelta""" +521 5 training_loop """lcwa""" +521 5 evaluator """rankbased""" +521 6 dataset """kinships""" +521 6 model """rescal""" +521 6 loss """bceaftersigmoid""" +521 6 regularizer """no""" +521 6 optimizer """adadelta""" +521 6 training_loop """lcwa""" +521 6 evaluator """rankbased""" +521 7 dataset """kinships""" +521 7 model """rescal""" +521 7 loss """bceaftersigmoid""" +521 7 regularizer """no""" +521 7 optimizer """adadelta""" +521 7 training_loop """lcwa""" +521 7 evaluator """rankbased""" +521 8 dataset """kinships""" +521 8 model """rescal""" +521 8 loss """bceaftersigmoid""" +521 8 regularizer """no""" +521 8 optimizer """adadelta""" +521 8 training_loop """lcwa""" +521 8 evaluator """rankbased""" +521 9 dataset """kinships""" +521 9 model """rescal""" +521 9 loss """bceaftersigmoid""" +521 9 regularizer """no""" +521 9 optimizer """adadelta""" +521 9 training_loop """lcwa""" +521 9 evaluator """rankbased""" +521 10 dataset """kinships""" +521 10 model """rescal""" +521 10 loss """bceaftersigmoid""" +521 10 regularizer """no""" +521 10 optimizer """adadelta""" +521 10 training_loop """lcwa""" +521 10 evaluator """rankbased""" +521 11 dataset """kinships""" +521 11 model """rescal""" +521 11 loss """bceaftersigmoid""" +521 11 regularizer """no""" +521 11 optimizer """adadelta""" +521 11 training_loop """lcwa""" +521 11 evaluator """rankbased""" +521 12 dataset """kinships""" +521 12 model """rescal""" +521 12 loss """bceaftersigmoid""" +521 12 regularizer """no""" +521 12 optimizer """adadelta""" +521 12 training_loop """lcwa""" +521 12 evaluator """rankbased""" +521 13 dataset """kinships""" +521 13 model """rescal""" +521 13 loss """bceaftersigmoid""" +521 13 regularizer """no""" +521 13 optimizer """adadelta""" +521 13 training_loop """lcwa""" +521 13 evaluator """rankbased""" +521 14 dataset """kinships""" +521 14 model """rescal""" +521 14 loss """bceaftersigmoid""" +521 14 regularizer """no""" +521 14 optimizer """adadelta""" +521 14 training_loop """lcwa""" +521 14 evaluator """rankbased""" +521 15 dataset """kinships""" +521 15 model """rescal""" +521 15 loss """bceaftersigmoid""" +521 15 regularizer """no""" +521 15 optimizer """adadelta""" +521 15 training_loop """lcwa""" +521 15 evaluator """rankbased""" +521 16 dataset """kinships""" +521 16 model """rescal""" +521 16 loss """bceaftersigmoid""" +521 16 regularizer """no""" +521 16 optimizer """adadelta""" +521 16 training_loop """lcwa""" +521 16 evaluator """rankbased""" +521 17 dataset """kinships""" +521 17 model """rescal""" +521 17 loss """bceaftersigmoid""" +521 17 regularizer """no""" +521 17 optimizer """adadelta""" +521 17 training_loop """lcwa""" +521 17 evaluator """rankbased""" +521 18 dataset """kinships""" +521 18 model """rescal""" +521 18 loss """bceaftersigmoid""" +521 18 regularizer """no""" +521 18 optimizer """adadelta""" +521 18 training_loop """lcwa""" +521 18 evaluator """rankbased""" +521 19 dataset """kinships""" +521 19 model """rescal""" +521 19 loss """bceaftersigmoid""" +521 19 regularizer """no""" +521 19 optimizer """adadelta""" +521 19 training_loop """lcwa""" +521 19 evaluator """rankbased""" +521 20 dataset """kinships""" +521 20 model """rescal""" +521 20 loss """bceaftersigmoid""" +521 20 regularizer """no""" +521 20 optimizer """adadelta""" +521 20 training_loop """lcwa""" +521 20 evaluator """rankbased""" +521 21 dataset """kinships""" +521 21 model """rescal""" +521 21 loss """bceaftersigmoid""" +521 21 regularizer """no""" +521 21 optimizer """adadelta""" +521 21 training_loop """lcwa""" +521 21 evaluator """rankbased""" +521 22 dataset """kinships""" +521 22 model """rescal""" +521 22 loss """bceaftersigmoid""" +521 22 regularizer """no""" +521 22 optimizer """adadelta""" +521 22 training_loop """lcwa""" +521 22 evaluator """rankbased""" +521 23 dataset """kinships""" +521 23 model """rescal""" +521 23 loss """bceaftersigmoid""" +521 23 regularizer """no""" +521 23 optimizer """adadelta""" +521 23 training_loop """lcwa""" +521 23 evaluator """rankbased""" +521 24 dataset """kinships""" +521 24 model """rescal""" +521 24 loss """bceaftersigmoid""" +521 24 regularizer """no""" +521 24 optimizer """adadelta""" +521 24 training_loop """lcwa""" +521 24 evaluator """rankbased""" +521 25 dataset """kinships""" +521 25 model """rescal""" +521 25 loss """bceaftersigmoid""" +521 25 regularizer """no""" +521 25 optimizer """adadelta""" +521 25 training_loop """lcwa""" +521 25 evaluator """rankbased""" +521 26 dataset """kinships""" +521 26 model """rescal""" +521 26 loss """bceaftersigmoid""" +521 26 regularizer """no""" +521 26 optimizer """adadelta""" +521 26 training_loop """lcwa""" +521 26 evaluator """rankbased""" +521 27 dataset """kinships""" +521 27 model """rescal""" +521 27 loss """bceaftersigmoid""" +521 27 regularizer """no""" +521 27 optimizer """adadelta""" +521 27 training_loop """lcwa""" +521 27 evaluator """rankbased""" +521 28 dataset """kinships""" +521 28 model """rescal""" +521 28 loss """bceaftersigmoid""" +521 28 regularizer """no""" +521 28 optimizer """adadelta""" +521 28 training_loop """lcwa""" +521 28 evaluator """rankbased""" +521 29 dataset """kinships""" +521 29 model """rescal""" +521 29 loss """bceaftersigmoid""" +521 29 regularizer """no""" +521 29 optimizer """adadelta""" +521 29 training_loop """lcwa""" +521 29 evaluator """rankbased""" +521 30 dataset """kinships""" +521 30 model """rescal""" +521 30 loss """bceaftersigmoid""" +521 30 regularizer """no""" +521 30 optimizer """adadelta""" +521 30 training_loop """lcwa""" +521 30 evaluator """rankbased""" +521 31 dataset """kinships""" +521 31 model """rescal""" +521 31 loss """bceaftersigmoid""" +521 31 regularizer """no""" +521 31 optimizer """adadelta""" +521 31 training_loop """lcwa""" +521 31 evaluator """rankbased""" +521 32 dataset """kinships""" +521 32 model """rescal""" +521 32 loss """bceaftersigmoid""" +521 32 regularizer """no""" +521 32 optimizer """adadelta""" +521 32 training_loop """lcwa""" +521 32 evaluator """rankbased""" +521 33 dataset """kinships""" +521 33 model """rescal""" +521 33 loss """bceaftersigmoid""" +521 33 regularizer """no""" +521 33 optimizer """adadelta""" +521 33 training_loop """lcwa""" +521 33 evaluator """rankbased""" +521 34 dataset """kinships""" +521 34 model """rescal""" +521 34 loss """bceaftersigmoid""" +521 34 regularizer """no""" +521 34 optimizer """adadelta""" +521 34 training_loop """lcwa""" +521 34 evaluator """rankbased""" +521 35 dataset """kinships""" +521 35 model """rescal""" +521 35 loss """bceaftersigmoid""" +521 35 regularizer """no""" +521 35 optimizer """adadelta""" +521 35 training_loop """lcwa""" +521 35 evaluator """rankbased""" +521 36 dataset """kinships""" +521 36 model """rescal""" +521 36 loss """bceaftersigmoid""" +521 36 regularizer """no""" +521 36 optimizer """adadelta""" +521 36 training_loop """lcwa""" +521 36 evaluator """rankbased""" +521 37 dataset """kinships""" +521 37 model """rescal""" +521 37 loss """bceaftersigmoid""" +521 37 regularizer """no""" +521 37 optimizer """adadelta""" +521 37 training_loop """lcwa""" +521 37 evaluator """rankbased""" +521 38 dataset """kinships""" +521 38 model """rescal""" +521 38 loss """bceaftersigmoid""" +521 38 regularizer """no""" +521 38 optimizer """adadelta""" +521 38 training_loop """lcwa""" +521 38 evaluator """rankbased""" +521 39 dataset """kinships""" +521 39 model """rescal""" +521 39 loss """bceaftersigmoid""" +521 39 regularizer """no""" +521 39 optimizer """adadelta""" +521 39 training_loop """lcwa""" +521 39 evaluator """rankbased""" +521 40 dataset """kinships""" +521 40 model """rescal""" +521 40 loss """bceaftersigmoid""" +521 40 regularizer """no""" +521 40 optimizer """adadelta""" +521 40 training_loop """lcwa""" +521 40 evaluator """rankbased""" +521 41 dataset """kinships""" +521 41 model """rescal""" +521 41 loss """bceaftersigmoid""" +521 41 regularizer """no""" +521 41 optimizer """adadelta""" +521 41 training_loop """lcwa""" +521 41 evaluator """rankbased""" +521 42 dataset """kinships""" +521 42 model """rescal""" +521 42 loss """bceaftersigmoid""" +521 42 regularizer """no""" +521 42 optimizer """adadelta""" +521 42 training_loop """lcwa""" +521 42 evaluator """rankbased""" +521 43 dataset """kinships""" +521 43 model """rescal""" +521 43 loss """bceaftersigmoid""" +521 43 regularizer """no""" +521 43 optimizer """adadelta""" +521 43 training_loop """lcwa""" +521 43 evaluator """rankbased""" +521 44 dataset """kinships""" +521 44 model """rescal""" +521 44 loss """bceaftersigmoid""" +521 44 regularizer """no""" +521 44 optimizer """adadelta""" +521 44 training_loop """lcwa""" +521 44 evaluator """rankbased""" +521 45 dataset """kinships""" +521 45 model """rescal""" +521 45 loss """bceaftersigmoid""" +521 45 regularizer """no""" +521 45 optimizer """adadelta""" +521 45 training_loop """lcwa""" +521 45 evaluator """rankbased""" +521 46 dataset """kinships""" +521 46 model """rescal""" +521 46 loss """bceaftersigmoid""" +521 46 regularizer """no""" +521 46 optimizer """adadelta""" +521 46 training_loop """lcwa""" +521 46 evaluator """rankbased""" +521 47 dataset """kinships""" +521 47 model """rescal""" +521 47 loss """bceaftersigmoid""" +521 47 regularizer """no""" +521 47 optimizer """adadelta""" +521 47 training_loop """lcwa""" +521 47 evaluator """rankbased""" +521 48 dataset """kinships""" +521 48 model """rescal""" +521 48 loss """bceaftersigmoid""" +521 48 regularizer """no""" +521 48 optimizer """adadelta""" +521 48 training_loop """lcwa""" +521 48 evaluator """rankbased""" +521 49 dataset """kinships""" +521 49 model """rescal""" +521 49 loss """bceaftersigmoid""" +521 49 regularizer """no""" +521 49 optimizer """adadelta""" +521 49 training_loop """lcwa""" +521 49 evaluator """rankbased""" +521 50 dataset """kinships""" +521 50 model """rescal""" +521 50 loss """bceaftersigmoid""" +521 50 regularizer """no""" +521 50 optimizer """adadelta""" +521 50 training_loop """lcwa""" +521 50 evaluator """rankbased""" +521 51 dataset """kinships""" +521 51 model """rescal""" +521 51 loss """bceaftersigmoid""" +521 51 regularizer """no""" +521 51 optimizer """adadelta""" +521 51 training_loop """lcwa""" +521 51 evaluator """rankbased""" +521 52 dataset """kinships""" +521 52 model """rescal""" +521 52 loss """bceaftersigmoid""" +521 52 regularizer """no""" +521 52 optimizer """adadelta""" +521 52 training_loop """lcwa""" +521 52 evaluator """rankbased""" +521 53 dataset """kinships""" +521 53 model """rescal""" +521 53 loss """bceaftersigmoid""" +521 53 regularizer """no""" +521 53 optimizer """adadelta""" +521 53 training_loop """lcwa""" +521 53 evaluator """rankbased""" +521 54 dataset """kinships""" +521 54 model """rescal""" +521 54 loss """bceaftersigmoid""" +521 54 regularizer """no""" +521 54 optimizer """adadelta""" +521 54 training_loop """lcwa""" +521 54 evaluator """rankbased""" +521 55 dataset """kinships""" +521 55 model """rescal""" +521 55 loss """bceaftersigmoid""" +521 55 regularizer """no""" +521 55 optimizer """adadelta""" +521 55 training_loop """lcwa""" +521 55 evaluator """rankbased""" +521 56 dataset """kinships""" +521 56 model """rescal""" +521 56 loss """bceaftersigmoid""" +521 56 regularizer """no""" +521 56 optimizer """adadelta""" +521 56 training_loop """lcwa""" +521 56 evaluator """rankbased""" +521 57 dataset """kinships""" +521 57 model """rescal""" +521 57 loss """bceaftersigmoid""" +521 57 regularizer """no""" +521 57 optimizer """adadelta""" +521 57 training_loop """lcwa""" +521 57 evaluator """rankbased""" +521 58 dataset """kinships""" +521 58 model """rescal""" +521 58 loss """bceaftersigmoid""" +521 58 regularizer """no""" +521 58 optimizer """adadelta""" +521 58 training_loop """lcwa""" +521 58 evaluator """rankbased""" +521 59 dataset """kinships""" +521 59 model """rescal""" +521 59 loss """bceaftersigmoid""" +521 59 regularizer """no""" +521 59 optimizer """adadelta""" +521 59 training_loop """lcwa""" +521 59 evaluator """rankbased""" +521 60 dataset """kinships""" +521 60 model """rescal""" +521 60 loss """bceaftersigmoid""" +521 60 regularizer """no""" +521 60 optimizer """adadelta""" +521 60 training_loop """lcwa""" +521 60 evaluator """rankbased""" +521 61 dataset """kinships""" +521 61 model """rescal""" +521 61 loss """bceaftersigmoid""" +521 61 regularizer """no""" +521 61 optimizer """adadelta""" +521 61 training_loop """lcwa""" +521 61 evaluator """rankbased""" +521 62 dataset """kinships""" +521 62 model """rescal""" +521 62 loss """bceaftersigmoid""" +521 62 regularizer """no""" +521 62 optimizer """adadelta""" +521 62 training_loop """lcwa""" +521 62 evaluator """rankbased""" +521 63 dataset """kinships""" +521 63 model """rescal""" +521 63 loss """bceaftersigmoid""" +521 63 regularizer """no""" +521 63 optimizer """adadelta""" +521 63 training_loop """lcwa""" +521 63 evaluator """rankbased""" +521 64 dataset """kinships""" +521 64 model """rescal""" +521 64 loss """bceaftersigmoid""" +521 64 regularizer """no""" +521 64 optimizer """adadelta""" +521 64 training_loop """lcwa""" +521 64 evaluator """rankbased""" +521 65 dataset """kinships""" +521 65 model """rescal""" +521 65 loss """bceaftersigmoid""" +521 65 regularizer """no""" +521 65 optimizer """adadelta""" +521 65 training_loop """lcwa""" +521 65 evaluator """rankbased""" +521 66 dataset """kinships""" +521 66 model """rescal""" +521 66 loss """bceaftersigmoid""" +521 66 regularizer """no""" +521 66 optimizer """adadelta""" +521 66 training_loop """lcwa""" +521 66 evaluator """rankbased""" +521 67 dataset """kinships""" +521 67 model """rescal""" +521 67 loss """bceaftersigmoid""" +521 67 regularizer """no""" +521 67 optimizer """adadelta""" +521 67 training_loop """lcwa""" +521 67 evaluator """rankbased""" +521 68 dataset """kinships""" +521 68 model """rescal""" +521 68 loss """bceaftersigmoid""" +521 68 regularizer """no""" +521 68 optimizer """adadelta""" +521 68 training_loop """lcwa""" +521 68 evaluator """rankbased""" +521 69 dataset """kinships""" +521 69 model """rescal""" +521 69 loss """bceaftersigmoid""" +521 69 regularizer """no""" +521 69 optimizer """adadelta""" +521 69 training_loop """lcwa""" +521 69 evaluator """rankbased""" +521 70 dataset """kinships""" +521 70 model """rescal""" +521 70 loss """bceaftersigmoid""" +521 70 regularizer """no""" +521 70 optimizer """adadelta""" +521 70 training_loop """lcwa""" +521 70 evaluator """rankbased""" +521 71 dataset """kinships""" +521 71 model """rescal""" +521 71 loss """bceaftersigmoid""" +521 71 regularizer """no""" +521 71 optimizer """adadelta""" +521 71 training_loop """lcwa""" +521 71 evaluator """rankbased""" +521 72 dataset """kinships""" +521 72 model """rescal""" +521 72 loss """bceaftersigmoid""" +521 72 regularizer """no""" +521 72 optimizer """adadelta""" +521 72 training_loop """lcwa""" +521 72 evaluator """rankbased""" +521 73 dataset """kinships""" +521 73 model """rescal""" +521 73 loss """bceaftersigmoid""" +521 73 regularizer """no""" +521 73 optimizer """adadelta""" +521 73 training_loop """lcwa""" +521 73 evaluator """rankbased""" +521 74 dataset """kinships""" +521 74 model """rescal""" +521 74 loss """bceaftersigmoid""" +521 74 regularizer """no""" +521 74 optimizer """adadelta""" +521 74 training_loop """lcwa""" +521 74 evaluator """rankbased""" +521 75 dataset """kinships""" +521 75 model """rescal""" +521 75 loss """bceaftersigmoid""" +521 75 regularizer """no""" +521 75 optimizer """adadelta""" +521 75 training_loop """lcwa""" +521 75 evaluator """rankbased""" +521 76 dataset """kinships""" +521 76 model """rescal""" +521 76 loss """bceaftersigmoid""" +521 76 regularizer """no""" +521 76 optimizer """adadelta""" +521 76 training_loop """lcwa""" +521 76 evaluator """rankbased""" +521 77 dataset """kinships""" +521 77 model """rescal""" +521 77 loss """bceaftersigmoid""" +521 77 regularizer """no""" +521 77 optimizer """adadelta""" +521 77 training_loop """lcwa""" +521 77 evaluator """rankbased""" +521 78 dataset """kinships""" +521 78 model """rescal""" +521 78 loss """bceaftersigmoid""" +521 78 regularizer """no""" +521 78 optimizer """adadelta""" +521 78 training_loop """lcwa""" +521 78 evaluator """rankbased""" +521 79 dataset """kinships""" +521 79 model """rescal""" +521 79 loss """bceaftersigmoid""" +521 79 regularizer """no""" +521 79 optimizer """adadelta""" +521 79 training_loop """lcwa""" +521 79 evaluator """rankbased""" +521 80 dataset """kinships""" +521 80 model """rescal""" +521 80 loss """bceaftersigmoid""" +521 80 regularizer """no""" +521 80 optimizer """adadelta""" +521 80 training_loop """lcwa""" +521 80 evaluator """rankbased""" +521 81 dataset """kinships""" +521 81 model """rescal""" +521 81 loss """bceaftersigmoid""" +521 81 regularizer """no""" +521 81 optimizer """adadelta""" +521 81 training_loop """lcwa""" +521 81 evaluator """rankbased""" +521 82 dataset """kinships""" +521 82 model """rescal""" +521 82 loss """bceaftersigmoid""" +521 82 regularizer """no""" +521 82 optimizer """adadelta""" +521 82 training_loop """lcwa""" +521 82 evaluator """rankbased""" +521 83 dataset """kinships""" +521 83 model """rescal""" +521 83 loss """bceaftersigmoid""" +521 83 regularizer """no""" +521 83 optimizer """adadelta""" +521 83 training_loop """lcwa""" +521 83 evaluator """rankbased""" +521 84 dataset """kinships""" +521 84 model """rescal""" +521 84 loss """bceaftersigmoid""" +521 84 regularizer """no""" +521 84 optimizer """adadelta""" +521 84 training_loop """lcwa""" +521 84 evaluator """rankbased""" +521 85 dataset """kinships""" +521 85 model """rescal""" +521 85 loss """bceaftersigmoid""" +521 85 regularizer """no""" +521 85 optimizer """adadelta""" +521 85 training_loop """lcwa""" +521 85 evaluator """rankbased""" +521 86 dataset """kinships""" +521 86 model """rescal""" +521 86 loss """bceaftersigmoid""" +521 86 regularizer """no""" +521 86 optimizer """adadelta""" +521 86 training_loop """lcwa""" +521 86 evaluator """rankbased""" +521 87 dataset """kinships""" +521 87 model """rescal""" +521 87 loss """bceaftersigmoid""" +521 87 regularizer """no""" +521 87 optimizer """adadelta""" +521 87 training_loop """lcwa""" +521 87 evaluator """rankbased""" +521 88 dataset """kinships""" +521 88 model """rescal""" +521 88 loss """bceaftersigmoid""" +521 88 regularizer """no""" +521 88 optimizer """adadelta""" +521 88 training_loop """lcwa""" +521 88 evaluator """rankbased""" +521 89 dataset """kinships""" +521 89 model """rescal""" +521 89 loss """bceaftersigmoid""" +521 89 regularizer """no""" +521 89 optimizer """adadelta""" +521 89 training_loop """lcwa""" +521 89 evaluator """rankbased""" +521 90 dataset """kinships""" +521 90 model """rescal""" +521 90 loss """bceaftersigmoid""" +521 90 regularizer """no""" +521 90 optimizer """adadelta""" +521 90 training_loop """lcwa""" +521 90 evaluator """rankbased""" +521 91 dataset """kinships""" +521 91 model """rescal""" +521 91 loss """bceaftersigmoid""" +521 91 regularizer """no""" +521 91 optimizer """adadelta""" +521 91 training_loop """lcwa""" +521 91 evaluator """rankbased""" +521 92 dataset """kinships""" +521 92 model """rescal""" +521 92 loss """bceaftersigmoid""" +521 92 regularizer """no""" +521 92 optimizer """adadelta""" +521 92 training_loop """lcwa""" +521 92 evaluator """rankbased""" +521 93 dataset """kinships""" +521 93 model """rescal""" +521 93 loss """bceaftersigmoid""" +521 93 regularizer """no""" +521 93 optimizer """adadelta""" +521 93 training_loop """lcwa""" +521 93 evaluator """rankbased""" +521 94 dataset """kinships""" +521 94 model """rescal""" +521 94 loss """bceaftersigmoid""" +521 94 regularizer """no""" +521 94 optimizer """adadelta""" +521 94 training_loop """lcwa""" +521 94 evaluator """rankbased""" +521 95 dataset """kinships""" +521 95 model """rescal""" +521 95 loss """bceaftersigmoid""" +521 95 regularizer """no""" +521 95 optimizer """adadelta""" +521 95 training_loop """lcwa""" +521 95 evaluator """rankbased""" +521 96 dataset """kinships""" +521 96 model """rescal""" +521 96 loss """bceaftersigmoid""" +521 96 regularizer """no""" +521 96 optimizer """adadelta""" +521 96 training_loop """lcwa""" +521 96 evaluator """rankbased""" +521 97 dataset """kinships""" +521 97 model """rescal""" +521 97 loss """bceaftersigmoid""" +521 97 regularizer """no""" +521 97 optimizer """adadelta""" +521 97 training_loop """lcwa""" +521 97 evaluator """rankbased""" +521 98 dataset """kinships""" +521 98 model """rescal""" +521 98 loss """bceaftersigmoid""" +521 98 regularizer """no""" +521 98 optimizer """adadelta""" +521 98 training_loop """lcwa""" +521 98 evaluator """rankbased""" +521 99 dataset """kinships""" +521 99 model """rescal""" +521 99 loss """bceaftersigmoid""" +521 99 regularizer """no""" +521 99 optimizer """adadelta""" +521 99 training_loop """lcwa""" +521 99 evaluator """rankbased""" +521 100 dataset """kinships""" +521 100 model """rescal""" +521 100 loss """bceaftersigmoid""" +521 100 regularizer """no""" +521 100 optimizer """adadelta""" +521 100 training_loop """lcwa""" +521 100 evaluator """rankbased""" +522 1 model.embedding_dim 2.0 +522 1 training.batch_size 0.0 +522 1 training.label_smoothing 0.003968975061520053 +522 2 model.embedding_dim 0.0 +522 2 training.batch_size 1.0 +522 2 training.label_smoothing 0.0059772282088046 +522 3 model.embedding_dim 1.0 +522 3 training.batch_size 0.0 +522 3 training.label_smoothing 0.04071126887630764 +522 4 model.embedding_dim 0.0 +522 4 training.batch_size 0.0 +522 4 training.label_smoothing 0.0022738359076208874 +522 5 model.embedding_dim 1.0 +522 5 training.batch_size 2.0 +522 5 training.label_smoothing 0.0040154583405705695 +522 6 model.embedding_dim 0.0 +522 6 training.batch_size 1.0 +522 6 training.label_smoothing 0.2125634054494719 +522 7 model.embedding_dim 0.0 +522 7 training.batch_size 1.0 +522 7 training.label_smoothing 0.01265463258781004 +522 8 model.embedding_dim 0.0 +522 8 training.batch_size 2.0 +522 8 training.label_smoothing 0.0181010584676329 +522 9 model.embedding_dim 0.0 +522 9 training.batch_size 2.0 +522 9 training.label_smoothing 0.03577273568985174 +522 10 model.embedding_dim 2.0 +522 10 training.batch_size 0.0 +522 10 training.label_smoothing 0.027381298358695516 +522 11 model.embedding_dim 0.0 +522 11 training.batch_size 2.0 +522 11 training.label_smoothing 0.1315552303315061 +522 12 model.embedding_dim 0.0 +522 12 training.batch_size 0.0 +522 12 training.label_smoothing 0.1591075261951009 +522 13 model.embedding_dim 1.0 +522 13 training.batch_size 1.0 +522 13 training.label_smoothing 0.18789231798013054 +522 14 model.embedding_dim 1.0 +522 14 training.batch_size 0.0 +522 14 training.label_smoothing 0.0025533056040554893 +522 15 model.embedding_dim 1.0 +522 15 training.batch_size 0.0 +522 15 training.label_smoothing 0.0012755319760784922 +522 16 model.embedding_dim 1.0 +522 16 training.batch_size 1.0 +522 16 training.label_smoothing 0.005629427669398177 +522 17 model.embedding_dim 2.0 +522 17 training.batch_size 2.0 +522 17 training.label_smoothing 0.4352635749096461 +522 18 model.embedding_dim 1.0 +522 18 training.batch_size 2.0 +522 18 training.label_smoothing 0.035817617668620565 +522 19 model.embedding_dim 1.0 +522 19 training.batch_size 0.0 +522 19 training.label_smoothing 0.008589058573976125 +522 20 model.embedding_dim 0.0 +522 20 training.batch_size 2.0 +522 20 training.label_smoothing 0.869792454679498 +522 21 model.embedding_dim 2.0 +522 21 training.batch_size 1.0 +522 21 training.label_smoothing 0.0011068001804289398 +522 22 model.embedding_dim 2.0 +522 22 training.batch_size 1.0 +522 22 training.label_smoothing 0.00516002298310237 +522 23 model.embedding_dim 0.0 +522 23 training.batch_size 2.0 +522 23 training.label_smoothing 0.007695604422980697 +522 24 model.embedding_dim 1.0 +522 24 training.batch_size 2.0 +522 24 training.label_smoothing 0.10212151476271462 +522 25 model.embedding_dim 1.0 +522 25 training.batch_size 0.0 +522 25 training.label_smoothing 0.024554796174968786 +522 26 model.embedding_dim 1.0 +522 26 training.batch_size 0.0 +522 26 training.label_smoothing 0.5301493226942157 +522 27 model.embedding_dim 0.0 +522 27 training.batch_size 2.0 +522 27 training.label_smoothing 0.0029767408157362445 +522 28 model.embedding_dim 0.0 +522 28 training.batch_size 2.0 +522 28 training.label_smoothing 0.46626615663002524 +522 29 model.embedding_dim 2.0 +522 29 training.batch_size 2.0 +522 29 training.label_smoothing 0.032490603916150676 +522 30 model.embedding_dim 2.0 +522 30 training.batch_size 1.0 +522 30 training.label_smoothing 0.46524510861478113 +522 31 model.embedding_dim 0.0 +522 31 training.batch_size 1.0 +522 31 training.label_smoothing 0.7184424301281048 +522 32 model.embedding_dim 0.0 +522 32 training.batch_size 1.0 +522 32 training.label_smoothing 0.0883391719770518 +522 33 model.embedding_dim 0.0 +522 33 training.batch_size 0.0 +522 33 training.label_smoothing 0.0016624508021627152 +522 34 model.embedding_dim 0.0 +522 34 training.batch_size 2.0 +522 34 training.label_smoothing 0.025587656142991088 +522 35 model.embedding_dim 2.0 +522 35 training.batch_size 1.0 +522 35 training.label_smoothing 0.17505167486190454 +522 36 model.embedding_dim 0.0 +522 36 training.batch_size 1.0 +522 36 training.label_smoothing 0.3451065978788446 +522 37 model.embedding_dim 2.0 +522 37 training.batch_size 0.0 +522 37 training.label_smoothing 0.018622075178788364 +522 38 model.embedding_dim 2.0 +522 38 training.batch_size 1.0 +522 38 training.label_smoothing 0.008071014322127017 +522 39 model.embedding_dim 0.0 +522 39 training.batch_size 2.0 +522 39 training.label_smoothing 0.008865065475313657 +522 40 model.embedding_dim 0.0 +522 40 training.batch_size 1.0 +522 40 training.label_smoothing 0.24751708018558063 +522 41 model.embedding_dim 2.0 +522 41 training.batch_size 1.0 +522 41 training.label_smoothing 0.009488478956724784 +522 42 model.embedding_dim 1.0 +522 42 training.batch_size 1.0 +522 42 training.label_smoothing 0.03249671388478114 +522 43 model.embedding_dim 0.0 +522 43 training.batch_size 2.0 +522 43 training.label_smoothing 0.0010880083125733824 +522 44 model.embedding_dim 2.0 +522 44 training.batch_size 0.0 +522 44 training.label_smoothing 0.00881871478215324 +522 45 model.embedding_dim 0.0 +522 45 training.batch_size 1.0 +522 45 training.label_smoothing 0.21070862346072564 +522 46 model.embedding_dim 1.0 +522 46 training.batch_size 1.0 +522 46 training.label_smoothing 0.015220253512110786 +522 47 model.embedding_dim 0.0 +522 47 training.batch_size 0.0 +522 47 training.label_smoothing 0.007768508725033785 +522 48 model.embedding_dim 2.0 +522 48 training.batch_size 2.0 +522 48 training.label_smoothing 0.19262597215429225 +522 49 model.embedding_dim 1.0 +522 49 training.batch_size 2.0 +522 49 training.label_smoothing 0.0012146086474518114 +522 50 model.embedding_dim 1.0 +522 50 training.batch_size 1.0 +522 50 training.label_smoothing 0.004839107342148144 +522 51 model.embedding_dim 0.0 +522 51 training.batch_size 0.0 +522 51 training.label_smoothing 0.10642433996770921 +522 52 model.embedding_dim 2.0 +522 52 training.batch_size 0.0 +522 52 training.label_smoothing 0.013226603989075013 +522 53 model.embedding_dim 0.0 +522 53 training.batch_size 1.0 +522 53 training.label_smoothing 0.015891351154777328 +522 54 model.embedding_dim 0.0 +522 54 training.batch_size 1.0 +522 54 training.label_smoothing 0.02762163291340774 +522 55 model.embedding_dim 1.0 +522 55 training.batch_size 2.0 +522 55 training.label_smoothing 0.01680316057452617 +522 56 model.embedding_dim 2.0 +522 56 training.batch_size 0.0 +522 56 training.label_smoothing 0.038812966400859454 +522 57 model.embedding_dim 1.0 +522 57 training.batch_size 0.0 +522 57 training.label_smoothing 0.16381281613679888 +522 58 model.embedding_dim 2.0 +522 58 training.batch_size 0.0 +522 58 training.label_smoothing 0.0013943592729071866 +522 59 model.embedding_dim 0.0 +522 59 training.batch_size 0.0 +522 59 training.label_smoothing 0.0015456665816064906 +522 60 model.embedding_dim 2.0 +522 60 training.batch_size 1.0 +522 60 training.label_smoothing 0.03797244567584157 +522 61 model.embedding_dim 1.0 +522 61 training.batch_size 1.0 +522 61 training.label_smoothing 0.00571657038410244 +522 62 model.embedding_dim 0.0 +522 62 training.batch_size 1.0 +522 62 training.label_smoothing 0.5722552504695547 +522 63 model.embedding_dim 0.0 +522 63 training.batch_size 0.0 +522 63 training.label_smoothing 0.056420742875675535 +522 64 model.embedding_dim 2.0 +522 64 training.batch_size 0.0 +522 64 training.label_smoothing 0.020379562695430457 +522 65 model.embedding_dim 0.0 +522 65 training.batch_size 0.0 +522 65 training.label_smoothing 0.0021945487233312427 +522 66 model.embedding_dim 0.0 +522 66 training.batch_size 1.0 +522 66 training.label_smoothing 0.0451746332607732 +522 67 model.embedding_dim 2.0 +522 67 training.batch_size 0.0 +522 67 training.label_smoothing 0.22242529969854022 +522 68 model.embedding_dim 2.0 +522 68 training.batch_size 0.0 +522 68 training.label_smoothing 0.17021148764161487 +522 69 model.embedding_dim 2.0 +522 69 training.batch_size 1.0 +522 69 training.label_smoothing 0.05765656168558504 +522 70 model.embedding_dim 0.0 +522 70 training.batch_size 1.0 +522 70 training.label_smoothing 0.004762132526780676 +522 71 model.embedding_dim 2.0 +522 71 training.batch_size 0.0 +522 71 training.label_smoothing 0.7501400843397524 +522 72 model.embedding_dim 1.0 +522 72 training.batch_size 2.0 +522 72 training.label_smoothing 0.03351983404942192 +522 73 model.embedding_dim 2.0 +522 73 training.batch_size 0.0 +522 73 training.label_smoothing 0.7815514883122268 +522 74 model.embedding_dim 1.0 +522 74 training.batch_size 2.0 +522 74 training.label_smoothing 0.04855775718379493 +522 75 model.embedding_dim 1.0 +522 75 training.batch_size 2.0 +522 75 training.label_smoothing 0.0168138298906786 +522 76 model.embedding_dim 0.0 +522 76 training.batch_size 1.0 +522 76 training.label_smoothing 0.053055844611613744 +522 77 model.embedding_dim 2.0 +522 77 training.batch_size 1.0 +522 77 training.label_smoothing 0.007696900052624658 +522 78 model.embedding_dim 2.0 +522 78 training.batch_size 2.0 +522 78 training.label_smoothing 0.015432302499111298 +522 79 model.embedding_dim 2.0 +522 79 training.batch_size 0.0 +522 79 training.label_smoothing 0.008909299867369627 +522 80 model.embedding_dim 2.0 +522 80 training.batch_size 1.0 +522 80 training.label_smoothing 0.006561182282498729 +522 81 model.embedding_dim 1.0 +522 81 training.batch_size 2.0 +522 81 training.label_smoothing 0.3186960300765486 +522 82 model.embedding_dim 2.0 +522 82 training.batch_size 2.0 +522 82 training.label_smoothing 0.0011356962179756068 +522 83 model.embedding_dim 2.0 +522 83 training.batch_size 0.0 +522 83 training.label_smoothing 0.07926312953457777 +522 84 model.embedding_dim 1.0 +522 84 training.batch_size 1.0 +522 84 training.label_smoothing 0.3348021556959317 +522 85 model.embedding_dim 2.0 +522 85 training.batch_size 0.0 +522 85 training.label_smoothing 0.37360113032189807 +522 86 model.embedding_dim 1.0 +522 86 training.batch_size 2.0 +522 86 training.label_smoothing 0.2049715608461004 +522 87 model.embedding_dim 0.0 +522 87 training.batch_size 0.0 +522 87 training.label_smoothing 0.0019711952446745656 +522 88 model.embedding_dim 0.0 +522 88 training.batch_size 1.0 +522 88 training.label_smoothing 0.04331173963080601 +522 89 model.embedding_dim 2.0 +522 89 training.batch_size 1.0 +522 89 training.label_smoothing 0.013701068418369109 +522 90 model.embedding_dim 1.0 +522 90 training.batch_size 2.0 +522 90 training.label_smoothing 0.6029465343047848 +522 91 model.embedding_dim 0.0 +522 91 training.batch_size 0.0 +522 91 training.label_smoothing 0.011632875670889851 +522 92 model.embedding_dim 0.0 +522 92 training.batch_size 0.0 +522 92 training.label_smoothing 0.02778506860371784 +522 93 model.embedding_dim 2.0 +522 93 training.batch_size 1.0 +522 93 training.label_smoothing 0.02234168894173275 +522 94 model.embedding_dim 1.0 +522 94 training.batch_size 1.0 +522 94 training.label_smoothing 0.051152058120481333 +522 95 model.embedding_dim 2.0 +522 95 training.batch_size 1.0 +522 95 training.label_smoothing 0.025882559180610488 +522 96 model.embedding_dim 1.0 +522 96 training.batch_size 0.0 +522 96 training.label_smoothing 0.23600152318488515 +522 97 model.embedding_dim 2.0 +522 97 training.batch_size 0.0 +522 97 training.label_smoothing 0.0019998426106970103 +522 98 model.embedding_dim 1.0 +522 98 training.batch_size 0.0 +522 98 training.label_smoothing 0.3585028002858198 +522 99 model.embedding_dim 0.0 +522 99 training.batch_size 0.0 +522 99 training.label_smoothing 0.09817838872251163 +522 100 model.embedding_dim 0.0 +522 100 training.batch_size 2.0 +522 100 training.label_smoothing 0.0031101181758621576 +522 1 dataset """kinships""" +522 1 model """rescal""" +522 1 loss """softplus""" +522 1 regularizer """no""" +522 1 optimizer """adadelta""" +522 1 training_loop """lcwa""" +522 1 evaluator """rankbased""" +522 2 dataset """kinships""" +522 2 model """rescal""" +522 2 loss """softplus""" +522 2 regularizer """no""" +522 2 optimizer """adadelta""" +522 2 training_loop """lcwa""" +522 2 evaluator """rankbased""" +522 3 dataset """kinships""" +522 3 model """rescal""" +522 3 loss """softplus""" +522 3 regularizer """no""" +522 3 optimizer """adadelta""" +522 3 training_loop """lcwa""" +522 3 evaluator """rankbased""" +522 4 dataset """kinships""" +522 4 model """rescal""" +522 4 loss """softplus""" +522 4 regularizer """no""" +522 4 optimizer """adadelta""" +522 4 training_loop """lcwa""" +522 4 evaluator """rankbased""" +522 5 dataset """kinships""" +522 5 model """rescal""" +522 5 loss """softplus""" +522 5 regularizer """no""" +522 5 optimizer """adadelta""" +522 5 training_loop """lcwa""" +522 5 evaluator """rankbased""" +522 6 dataset """kinships""" +522 6 model """rescal""" +522 6 loss """softplus""" +522 6 regularizer """no""" +522 6 optimizer """adadelta""" +522 6 training_loop """lcwa""" +522 6 evaluator """rankbased""" +522 7 dataset """kinships""" +522 7 model """rescal""" +522 7 loss """softplus""" +522 7 regularizer """no""" +522 7 optimizer """adadelta""" +522 7 training_loop """lcwa""" +522 7 evaluator """rankbased""" +522 8 dataset """kinships""" +522 8 model """rescal""" +522 8 loss """softplus""" +522 8 regularizer """no""" +522 8 optimizer """adadelta""" +522 8 training_loop """lcwa""" +522 8 evaluator """rankbased""" +522 9 dataset """kinships""" +522 9 model """rescal""" +522 9 loss """softplus""" +522 9 regularizer """no""" +522 9 optimizer """adadelta""" +522 9 training_loop """lcwa""" +522 9 evaluator """rankbased""" +522 10 dataset """kinships""" +522 10 model """rescal""" +522 10 loss """softplus""" +522 10 regularizer """no""" +522 10 optimizer """adadelta""" +522 10 training_loop """lcwa""" +522 10 evaluator """rankbased""" +522 11 dataset """kinships""" +522 11 model """rescal""" +522 11 loss """softplus""" +522 11 regularizer """no""" +522 11 optimizer """adadelta""" +522 11 training_loop """lcwa""" +522 11 evaluator """rankbased""" +522 12 dataset """kinships""" +522 12 model """rescal""" +522 12 loss """softplus""" +522 12 regularizer """no""" +522 12 optimizer """adadelta""" +522 12 training_loop """lcwa""" +522 12 evaluator """rankbased""" +522 13 dataset """kinships""" +522 13 model """rescal""" +522 13 loss """softplus""" +522 13 regularizer """no""" +522 13 optimizer """adadelta""" +522 13 training_loop """lcwa""" +522 13 evaluator """rankbased""" +522 14 dataset """kinships""" +522 14 model """rescal""" +522 14 loss """softplus""" +522 14 regularizer """no""" +522 14 optimizer """adadelta""" +522 14 training_loop """lcwa""" +522 14 evaluator """rankbased""" +522 15 dataset """kinships""" +522 15 model """rescal""" +522 15 loss """softplus""" +522 15 regularizer """no""" +522 15 optimizer """adadelta""" +522 15 training_loop """lcwa""" +522 15 evaluator """rankbased""" +522 16 dataset """kinships""" +522 16 model """rescal""" +522 16 loss """softplus""" +522 16 regularizer """no""" +522 16 optimizer """adadelta""" +522 16 training_loop """lcwa""" +522 16 evaluator """rankbased""" +522 17 dataset """kinships""" +522 17 model """rescal""" +522 17 loss """softplus""" +522 17 regularizer """no""" +522 17 optimizer """adadelta""" +522 17 training_loop """lcwa""" +522 17 evaluator """rankbased""" +522 18 dataset """kinships""" +522 18 model """rescal""" +522 18 loss """softplus""" +522 18 regularizer """no""" +522 18 optimizer """adadelta""" +522 18 training_loop """lcwa""" +522 18 evaluator """rankbased""" +522 19 dataset """kinships""" +522 19 model """rescal""" +522 19 loss """softplus""" +522 19 regularizer """no""" +522 19 optimizer """adadelta""" +522 19 training_loop """lcwa""" +522 19 evaluator """rankbased""" +522 20 dataset """kinships""" +522 20 model """rescal""" +522 20 loss """softplus""" +522 20 regularizer """no""" +522 20 optimizer """adadelta""" +522 20 training_loop """lcwa""" +522 20 evaluator """rankbased""" +522 21 dataset """kinships""" +522 21 model """rescal""" +522 21 loss """softplus""" +522 21 regularizer """no""" +522 21 optimizer """adadelta""" +522 21 training_loop """lcwa""" +522 21 evaluator """rankbased""" +522 22 dataset """kinships""" +522 22 model """rescal""" +522 22 loss """softplus""" +522 22 regularizer """no""" +522 22 optimizer """adadelta""" +522 22 training_loop """lcwa""" +522 22 evaluator """rankbased""" +522 23 dataset """kinships""" +522 23 model """rescal""" +522 23 loss """softplus""" +522 23 regularizer """no""" +522 23 optimizer """adadelta""" +522 23 training_loop """lcwa""" +522 23 evaluator """rankbased""" +522 24 dataset """kinships""" +522 24 model """rescal""" +522 24 loss """softplus""" +522 24 regularizer """no""" +522 24 optimizer """adadelta""" +522 24 training_loop """lcwa""" +522 24 evaluator """rankbased""" +522 25 dataset """kinships""" +522 25 model """rescal""" +522 25 loss """softplus""" +522 25 regularizer """no""" +522 25 optimizer """adadelta""" +522 25 training_loop """lcwa""" +522 25 evaluator """rankbased""" +522 26 dataset """kinships""" +522 26 model """rescal""" +522 26 loss """softplus""" +522 26 regularizer """no""" +522 26 optimizer """adadelta""" +522 26 training_loop """lcwa""" +522 26 evaluator """rankbased""" +522 27 dataset """kinships""" +522 27 model """rescal""" +522 27 loss """softplus""" +522 27 regularizer """no""" +522 27 optimizer """adadelta""" +522 27 training_loop """lcwa""" +522 27 evaluator """rankbased""" +522 28 dataset """kinships""" +522 28 model """rescal""" +522 28 loss """softplus""" +522 28 regularizer """no""" +522 28 optimizer """adadelta""" +522 28 training_loop """lcwa""" +522 28 evaluator """rankbased""" +522 29 dataset """kinships""" +522 29 model """rescal""" +522 29 loss """softplus""" +522 29 regularizer """no""" +522 29 optimizer """adadelta""" +522 29 training_loop """lcwa""" +522 29 evaluator """rankbased""" +522 30 dataset """kinships""" +522 30 model """rescal""" +522 30 loss """softplus""" +522 30 regularizer """no""" +522 30 optimizer """adadelta""" +522 30 training_loop """lcwa""" +522 30 evaluator """rankbased""" +522 31 dataset """kinships""" +522 31 model """rescal""" +522 31 loss """softplus""" +522 31 regularizer """no""" +522 31 optimizer """adadelta""" +522 31 training_loop """lcwa""" +522 31 evaluator """rankbased""" +522 32 dataset """kinships""" +522 32 model """rescal""" +522 32 loss """softplus""" +522 32 regularizer """no""" +522 32 optimizer """adadelta""" +522 32 training_loop """lcwa""" +522 32 evaluator """rankbased""" +522 33 dataset """kinships""" +522 33 model """rescal""" +522 33 loss """softplus""" +522 33 regularizer """no""" +522 33 optimizer """adadelta""" +522 33 training_loop """lcwa""" +522 33 evaluator """rankbased""" +522 34 dataset """kinships""" +522 34 model """rescal""" +522 34 loss """softplus""" +522 34 regularizer """no""" +522 34 optimizer """adadelta""" +522 34 training_loop """lcwa""" +522 34 evaluator """rankbased""" +522 35 dataset """kinships""" +522 35 model """rescal""" +522 35 loss """softplus""" +522 35 regularizer """no""" +522 35 optimizer """adadelta""" +522 35 training_loop """lcwa""" +522 35 evaluator """rankbased""" +522 36 dataset """kinships""" +522 36 model """rescal""" +522 36 loss """softplus""" +522 36 regularizer """no""" +522 36 optimizer """adadelta""" +522 36 training_loop """lcwa""" +522 36 evaluator """rankbased""" +522 37 dataset """kinships""" +522 37 model """rescal""" +522 37 loss """softplus""" +522 37 regularizer """no""" +522 37 optimizer """adadelta""" +522 37 training_loop """lcwa""" +522 37 evaluator """rankbased""" +522 38 dataset """kinships""" +522 38 model """rescal""" +522 38 loss """softplus""" +522 38 regularizer """no""" +522 38 optimizer """adadelta""" +522 38 training_loop """lcwa""" +522 38 evaluator """rankbased""" +522 39 dataset """kinships""" +522 39 model """rescal""" +522 39 loss """softplus""" +522 39 regularizer """no""" +522 39 optimizer """adadelta""" +522 39 training_loop """lcwa""" +522 39 evaluator """rankbased""" +522 40 dataset """kinships""" +522 40 model """rescal""" +522 40 loss """softplus""" +522 40 regularizer """no""" +522 40 optimizer """adadelta""" +522 40 training_loop """lcwa""" +522 40 evaluator """rankbased""" +522 41 dataset """kinships""" +522 41 model """rescal""" +522 41 loss """softplus""" +522 41 regularizer """no""" +522 41 optimizer """adadelta""" +522 41 training_loop """lcwa""" +522 41 evaluator """rankbased""" +522 42 dataset """kinships""" +522 42 model """rescal""" +522 42 loss """softplus""" +522 42 regularizer """no""" +522 42 optimizer """adadelta""" +522 42 training_loop """lcwa""" +522 42 evaluator """rankbased""" +522 43 dataset """kinships""" +522 43 model """rescal""" +522 43 loss """softplus""" +522 43 regularizer """no""" +522 43 optimizer """adadelta""" +522 43 training_loop """lcwa""" +522 43 evaluator """rankbased""" +522 44 dataset """kinships""" +522 44 model """rescal""" +522 44 loss """softplus""" +522 44 regularizer """no""" +522 44 optimizer """adadelta""" +522 44 training_loop """lcwa""" +522 44 evaluator """rankbased""" +522 45 dataset """kinships""" +522 45 model """rescal""" +522 45 loss """softplus""" +522 45 regularizer """no""" +522 45 optimizer """adadelta""" +522 45 training_loop """lcwa""" +522 45 evaluator """rankbased""" +522 46 dataset """kinships""" +522 46 model """rescal""" +522 46 loss """softplus""" +522 46 regularizer """no""" +522 46 optimizer """adadelta""" +522 46 training_loop """lcwa""" +522 46 evaluator """rankbased""" +522 47 dataset """kinships""" +522 47 model """rescal""" +522 47 loss """softplus""" +522 47 regularizer """no""" +522 47 optimizer """adadelta""" +522 47 training_loop """lcwa""" +522 47 evaluator """rankbased""" +522 48 dataset """kinships""" +522 48 model """rescal""" +522 48 loss """softplus""" +522 48 regularizer """no""" +522 48 optimizer """adadelta""" +522 48 training_loop """lcwa""" +522 48 evaluator """rankbased""" +522 49 dataset """kinships""" +522 49 model """rescal""" +522 49 loss """softplus""" +522 49 regularizer """no""" +522 49 optimizer """adadelta""" +522 49 training_loop """lcwa""" +522 49 evaluator """rankbased""" +522 50 dataset """kinships""" +522 50 model """rescal""" +522 50 loss """softplus""" +522 50 regularizer """no""" +522 50 optimizer """adadelta""" +522 50 training_loop """lcwa""" +522 50 evaluator """rankbased""" +522 51 dataset """kinships""" +522 51 model """rescal""" +522 51 loss """softplus""" +522 51 regularizer """no""" +522 51 optimizer """adadelta""" +522 51 training_loop """lcwa""" +522 51 evaluator """rankbased""" +522 52 dataset """kinships""" +522 52 model """rescal""" +522 52 loss """softplus""" +522 52 regularizer """no""" +522 52 optimizer """adadelta""" +522 52 training_loop """lcwa""" +522 52 evaluator """rankbased""" +522 53 dataset """kinships""" +522 53 model """rescal""" +522 53 loss """softplus""" +522 53 regularizer """no""" +522 53 optimizer """adadelta""" +522 53 training_loop """lcwa""" +522 53 evaluator """rankbased""" +522 54 dataset """kinships""" +522 54 model """rescal""" +522 54 loss """softplus""" +522 54 regularizer """no""" +522 54 optimizer """adadelta""" +522 54 training_loop """lcwa""" +522 54 evaluator """rankbased""" +522 55 dataset """kinships""" +522 55 model """rescal""" +522 55 loss """softplus""" +522 55 regularizer """no""" +522 55 optimizer """adadelta""" +522 55 training_loop """lcwa""" +522 55 evaluator """rankbased""" +522 56 dataset """kinships""" +522 56 model """rescal""" +522 56 loss """softplus""" +522 56 regularizer """no""" +522 56 optimizer """adadelta""" +522 56 training_loop """lcwa""" +522 56 evaluator """rankbased""" +522 57 dataset """kinships""" +522 57 model """rescal""" +522 57 loss """softplus""" +522 57 regularizer """no""" +522 57 optimizer """adadelta""" +522 57 training_loop """lcwa""" +522 57 evaluator """rankbased""" +522 58 dataset """kinships""" +522 58 model """rescal""" +522 58 loss """softplus""" +522 58 regularizer """no""" +522 58 optimizer """adadelta""" +522 58 training_loop """lcwa""" +522 58 evaluator """rankbased""" +522 59 dataset """kinships""" +522 59 model """rescal""" +522 59 loss """softplus""" +522 59 regularizer """no""" +522 59 optimizer """adadelta""" +522 59 training_loop """lcwa""" +522 59 evaluator """rankbased""" +522 60 dataset """kinships""" +522 60 model """rescal""" +522 60 loss """softplus""" +522 60 regularizer """no""" +522 60 optimizer """adadelta""" +522 60 training_loop """lcwa""" +522 60 evaluator """rankbased""" +522 61 dataset """kinships""" +522 61 model """rescal""" +522 61 loss """softplus""" +522 61 regularizer """no""" +522 61 optimizer """adadelta""" +522 61 training_loop """lcwa""" +522 61 evaluator """rankbased""" +522 62 dataset """kinships""" +522 62 model """rescal""" +522 62 loss """softplus""" +522 62 regularizer """no""" +522 62 optimizer """adadelta""" +522 62 training_loop """lcwa""" +522 62 evaluator """rankbased""" +522 63 dataset """kinships""" +522 63 model """rescal""" +522 63 loss """softplus""" +522 63 regularizer """no""" +522 63 optimizer """adadelta""" +522 63 training_loop """lcwa""" +522 63 evaluator """rankbased""" +522 64 dataset """kinships""" +522 64 model """rescal""" +522 64 loss """softplus""" +522 64 regularizer """no""" +522 64 optimizer """adadelta""" +522 64 training_loop """lcwa""" +522 64 evaluator """rankbased""" +522 65 dataset """kinships""" +522 65 model """rescal""" +522 65 loss """softplus""" +522 65 regularizer """no""" +522 65 optimizer """adadelta""" +522 65 training_loop """lcwa""" +522 65 evaluator """rankbased""" +522 66 dataset """kinships""" +522 66 model """rescal""" +522 66 loss """softplus""" +522 66 regularizer """no""" +522 66 optimizer """adadelta""" +522 66 training_loop """lcwa""" +522 66 evaluator """rankbased""" +522 67 dataset """kinships""" +522 67 model """rescal""" +522 67 loss """softplus""" +522 67 regularizer """no""" +522 67 optimizer """adadelta""" +522 67 training_loop """lcwa""" +522 67 evaluator """rankbased""" +522 68 dataset """kinships""" +522 68 model """rescal""" +522 68 loss """softplus""" +522 68 regularizer """no""" +522 68 optimizer """adadelta""" +522 68 training_loop """lcwa""" +522 68 evaluator """rankbased""" +522 69 dataset """kinships""" +522 69 model """rescal""" +522 69 loss """softplus""" +522 69 regularizer """no""" +522 69 optimizer """adadelta""" +522 69 training_loop """lcwa""" +522 69 evaluator """rankbased""" +522 70 dataset """kinships""" +522 70 model """rescal""" +522 70 loss """softplus""" +522 70 regularizer """no""" +522 70 optimizer """adadelta""" +522 70 training_loop """lcwa""" +522 70 evaluator """rankbased""" +522 71 dataset """kinships""" +522 71 model """rescal""" +522 71 loss """softplus""" +522 71 regularizer """no""" +522 71 optimizer """adadelta""" +522 71 training_loop """lcwa""" +522 71 evaluator """rankbased""" +522 72 dataset """kinships""" +522 72 model """rescal""" +522 72 loss """softplus""" +522 72 regularizer """no""" +522 72 optimizer """adadelta""" +522 72 training_loop """lcwa""" +522 72 evaluator """rankbased""" +522 73 dataset """kinships""" +522 73 model """rescal""" +522 73 loss """softplus""" +522 73 regularizer """no""" +522 73 optimizer """adadelta""" +522 73 training_loop """lcwa""" +522 73 evaluator """rankbased""" +522 74 dataset """kinships""" +522 74 model """rescal""" +522 74 loss """softplus""" +522 74 regularizer """no""" +522 74 optimizer """adadelta""" +522 74 training_loop """lcwa""" +522 74 evaluator """rankbased""" +522 75 dataset """kinships""" +522 75 model """rescal""" +522 75 loss """softplus""" +522 75 regularizer """no""" +522 75 optimizer """adadelta""" +522 75 training_loop """lcwa""" +522 75 evaluator """rankbased""" +522 76 dataset """kinships""" +522 76 model """rescal""" +522 76 loss """softplus""" +522 76 regularizer """no""" +522 76 optimizer """adadelta""" +522 76 training_loop """lcwa""" +522 76 evaluator """rankbased""" +522 77 dataset """kinships""" +522 77 model """rescal""" +522 77 loss """softplus""" +522 77 regularizer """no""" +522 77 optimizer """adadelta""" +522 77 training_loop """lcwa""" +522 77 evaluator """rankbased""" +522 78 dataset """kinships""" +522 78 model """rescal""" +522 78 loss """softplus""" +522 78 regularizer """no""" +522 78 optimizer """adadelta""" +522 78 training_loop """lcwa""" +522 78 evaluator """rankbased""" +522 79 dataset """kinships""" +522 79 model """rescal""" +522 79 loss """softplus""" +522 79 regularizer """no""" +522 79 optimizer """adadelta""" +522 79 training_loop """lcwa""" +522 79 evaluator """rankbased""" +522 80 dataset """kinships""" +522 80 model """rescal""" +522 80 loss """softplus""" +522 80 regularizer """no""" +522 80 optimizer """adadelta""" +522 80 training_loop """lcwa""" +522 80 evaluator """rankbased""" +522 81 dataset """kinships""" +522 81 model """rescal""" +522 81 loss """softplus""" +522 81 regularizer """no""" +522 81 optimizer """adadelta""" +522 81 training_loop """lcwa""" +522 81 evaluator """rankbased""" +522 82 dataset """kinships""" +522 82 model """rescal""" +522 82 loss """softplus""" +522 82 regularizer """no""" +522 82 optimizer """adadelta""" +522 82 training_loop """lcwa""" +522 82 evaluator """rankbased""" +522 83 dataset """kinships""" +522 83 model """rescal""" +522 83 loss """softplus""" +522 83 regularizer """no""" +522 83 optimizer """adadelta""" +522 83 training_loop """lcwa""" +522 83 evaluator """rankbased""" +522 84 dataset """kinships""" +522 84 model """rescal""" +522 84 loss """softplus""" +522 84 regularizer """no""" +522 84 optimizer """adadelta""" +522 84 training_loop """lcwa""" +522 84 evaluator """rankbased""" +522 85 dataset """kinships""" +522 85 model """rescal""" +522 85 loss """softplus""" +522 85 regularizer """no""" +522 85 optimizer """adadelta""" +522 85 training_loop """lcwa""" +522 85 evaluator """rankbased""" +522 86 dataset """kinships""" +522 86 model """rescal""" +522 86 loss """softplus""" +522 86 regularizer """no""" +522 86 optimizer """adadelta""" +522 86 training_loop """lcwa""" +522 86 evaluator """rankbased""" +522 87 dataset """kinships""" +522 87 model """rescal""" +522 87 loss """softplus""" +522 87 regularizer """no""" +522 87 optimizer """adadelta""" +522 87 training_loop """lcwa""" +522 87 evaluator """rankbased""" +522 88 dataset """kinships""" +522 88 model """rescal""" +522 88 loss """softplus""" +522 88 regularizer """no""" +522 88 optimizer """adadelta""" +522 88 training_loop """lcwa""" +522 88 evaluator """rankbased""" +522 89 dataset """kinships""" +522 89 model """rescal""" +522 89 loss """softplus""" +522 89 regularizer """no""" +522 89 optimizer """adadelta""" +522 89 training_loop """lcwa""" +522 89 evaluator """rankbased""" +522 90 dataset """kinships""" +522 90 model """rescal""" +522 90 loss """softplus""" +522 90 regularizer """no""" +522 90 optimizer """adadelta""" +522 90 training_loop """lcwa""" +522 90 evaluator """rankbased""" +522 91 dataset """kinships""" +522 91 model """rescal""" +522 91 loss """softplus""" +522 91 regularizer """no""" +522 91 optimizer """adadelta""" +522 91 training_loop """lcwa""" +522 91 evaluator """rankbased""" +522 92 dataset """kinships""" +522 92 model """rescal""" +522 92 loss """softplus""" +522 92 regularizer """no""" +522 92 optimizer """adadelta""" +522 92 training_loop """lcwa""" +522 92 evaluator """rankbased""" +522 93 dataset """kinships""" +522 93 model """rescal""" +522 93 loss """softplus""" +522 93 regularizer """no""" +522 93 optimizer """adadelta""" +522 93 training_loop """lcwa""" +522 93 evaluator """rankbased""" +522 94 dataset """kinships""" +522 94 model """rescal""" +522 94 loss """softplus""" +522 94 regularizer """no""" +522 94 optimizer """adadelta""" +522 94 training_loop """lcwa""" +522 94 evaluator """rankbased""" +522 95 dataset """kinships""" +522 95 model """rescal""" +522 95 loss """softplus""" +522 95 regularizer """no""" +522 95 optimizer """adadelta""" +522 95 training_loop """lcwa""" +522 95 evaluator """rankbased""" +522 96 dataset """kinships""" +522 96 model """rescal""" +522 96 loss """softplus""" +522 96 regularizer """no""" +522 96 optimizer """adadelta""" +522 96 training_loop """lcwa""" +522 96 evaluator """rankbased""" +522 97 dataset """kinships""" +522 97 model """rescal""" +522 97 loss """softplus""" +522 97 regularizer """no""" +522 97 optimizer """adadelta""" +522 97 training_loop """lcwa""" +522 97 evaluator """rankbased""" +522 98 dataset """kinships""" +522 98 model """rescal""" +522 98 loss """softplus""" +522 98 regularizer """no""" +522 98 optimizer """adadelta""" +522 98 training_loop """lcwa""" +522 98 evaluator """rankbased""" +522 99 dataset """kinships""" +522 99 model """rescal""" +522 99 loss """softplus""" +522 99 regularizer """no""" +522 99 optimizer """adadelta""" +522 99 training_loop """lcwa""" +522 99 evaluator """rankbased""" +522 100 dataset """kinships""" +522 100 model """rescal""" +522 100 loss """softplus""" +522 100 regularizer """no""" +522 100 optimizer """adadelta""" +522 100 training_loop """lcwa""" +522 100 evaluator """rankbased""" +523 1 model.embedding_dim 2.0 +523 1 training.batch_size 0.0 +523 1 training.label_smoothing 0.04145749711542683 +523 2 model.embedding_dim 0.0 +523 2 training.batch_size 1.0 +523 2 training.label_smoothing 0.013538363911289736 +523 3 model.embedding_dim 1.0 +523 3 training.batch_size 1.0 +523 3 training.label_smoothing 0.050641393287190965 +523 4 model.embedding_dim 2.0 +523 4 training.batch_size 1.0 +523 4 training.label_smoothing 0.001077349731016298 +523 5 model.embedding_dim 1.0 +523 5 training.batch_size 2.0 +523 5 training.label_smoothing 0.001949538775046857 +523 6 model.embedding_dim 1.0 +523 6 training.batch_size 0.0 +523 6 training.label_smoothing 0.1136058656069017 +523 7 model.embedding_dim 0.0 +523 7 training.batch_size 0.0 +523 7 training.label_smoothing 0.05075443539585116 +523 8 model.embedding_dim 0.0 +523 8 training.batch_size 1.0 +523 8 training.label_smoothing 0.012633328575223701 +523 9 model.embedding_dim 1.0 +523 9 training.batch_size 0.0 +523 9 training.label_smoothing 0.2059671184153918 +523 10 model.embedding_dim 0.0 +523 10 training.batch_size 2.0 +523 10 training.label_smoothing 0.0206435666146649 +523 11 model.embedding_dim 1.0 +523 11 training.batch_size 2.0 +523 11 training.label_smoothing 0.010864855781034717 +523 12 model.embedding_dim 0.0 +523 12 training.batch_size 2.0 +523 12 training.label_smoothing 0.039136737599688144 +523 13 model.embedding_dim 1.0 +523 13 training.batch_size 2.0 +523 13 training.label_smoothing 0.0056239714532818315 +523 14 model.embedding_dim 0.0 +523 14 training.batch_size 1.0 +523 14 training.label_smoothing 0.020772089046931046 +523 15 model.embedding_dim 2.0 +523 15 training.batch_size 1.0 +523 15 training.label_smoothing 0.2853168685896347 +523 16 model.embedding_dim 2.0 +523 16 training.batch_size 1.0 +523 16 training.label_smoothing 0.18593837886023698 +523 17 model.embedding_dim 0.0 +523 17 training.batch_size 0.0 +523 17 training.label_smoothing 0.7962812897388792 +523 18 model.embedding_dim 1.0 +523 18 training.batch_size 0.0 +523 18 training.label_smoothing 0.001713012634654024 +523 19 model.embedding_dim 0.0 +523 19 training.batch_size 2.0 +523 19 training.label_smoothing 0.0018882604878177826 +523 20 model.embedding_dim 1.0 +523 20 training.batch_size 0.0 +523 20 training.label_smoothing 0.006105206412029357 +523 21 model.embedding_dim 0.0 +523 21 training.batch_size 1.0 +523 21 training.label_smoothing 0.0863290094550124 +523 22 model.embedding_dim 2.0 +523 22 training.batch_size 2.0 +523 22 training.label_smoothing 0.012405268819049629 +523 23 model.embedding_dim 0.0 +523 23 training.batch_size 0.0 +523 23 training.label_smoothing 0.002381383495703773 +523 24 model.embedding_dim 2.0 +523 24 training.batch_size 1.0 +523 24 training.label_smoothing 0.7438852417150179 +523 25 model.embedding_dim 1.0 +523 25 training.batch_size 0.0 +523 25 training.label_smoothing 0.02164765014446494 +523 26 model.embedding_dim 0.0 +523 26 training.batch_size 1.0 +523 26 training.label_smoothing 0.01627737134634218 +523 27 model.embedding_dim 0.0 +523 27 training.batch_size 2.0 +523 27 training.label_smoothing 0.19190155214401405 +523 28 model.embedding_dim 0.0 +523 28 training.batch_size 2.0 +523 28 training.label_smoothing 0.0013628434297386371 +523 29 model.embedding_dim 0.0 +523 29 training.batch_size 0.0 +523 29 training.label_smoothing 0.03946438773396858 +523 30 model.embedding_dim 2.0 +523 30 training.batch_size 1.0 +523 30 training.label_smoothing 0.12495378944198424 +523 31 model.embedding_dim 0.0 +523 31 training.batch_size 1.0 +523 31 training.label_smoothing 0.07514534027134097 +523 32 model.embedding_dim 1.0 +523 32 training.batch_size 1.0 +523 32 training.label_smoothing 0.0011109430718910785 +523 33 model.embedding_dim 2.0 +523 33 training.batch_size 0.0 +523 33 training.label_smoothing 0.01317937896782894 +523 34 model.embedding_dim 2.0 +523 34 training.batch_size 0.0 +523 34 training.label_smoothing 0.01577494381658869 +523 35 model.embedding_dim 2.0 +523 35 training.batch_size 1.0 +523 35 training.label_smoothing 0.007573442482452967 +523 36 model.embedding_dim 1.0 +523 36 training.batch_size 1.0 +523 36 training.label_smoothing 0.16428931352288764 +523 37 model.embedding_dim 2.0 +523 37 training.batch_size 0.0 +523 37 training.label_smoothing 0.20637186573695473 +523 38 model.embedding_dim 1.0 +523 38 training.batch_size 2.0 +523 38 training.label_smoothing 0.4022625265157261 +523 39 model.embedding_dim 2.0 +523 39 training.batch_size 0.0 +523 39 training.label_smoothing 0.2131274857440034 +523 40 model.embedding_dim 0.0 +523 40 training.batch_size 1.0 +523 40 training.label_smoothing 0.0016262271014478651 +523 41 model.embedding_dim 1.0 +523 41 training.batch_size 0.0 +523 41 training.label_smoothing 0.4114970708949202 +523 42 model.embedding_dim 2.0 +523 42 training.batch_size 1.0 +523 42 training.label_smoothing 0.011948498749255427 +523 43 model.embedding_dim 1.0 +523 43 training.batch_size 1.0 +523 43 training.label_smoothing 0.13561740063214667 +523 44 model.embedding_dim 0.0 +523 44 training.batch_size 0.0 +523 44 training.label_smoothing 0.6744319052846922 +523 45 model.embedding_dim 1.0 +523 45 training.batch_size 0.0 +523 45 training.label_smoothing 0.007918816695541698 +523 46 model.embedding_dim 0.0 +523 46 training.batch_size 0.0 +523 46 training.label_smoothing 0.03202919704866866 +523 47 model.embedding_dim 2.0 +523 47 training.batch_size 2.0 +523 47 training.label_smoothing 0.0014287401786993274 +523 48 model.embedding_dim 0.0 +523 48 training.batch_size 1.0 +523 48 training.label_smoothing 0.03205366210586217 +523 49 model.embedding_dim 2.0 +523 49 training.batch_size 0.0 +523 49 training.label_smoothing 0.08334159755640141 +523 50 model.embedding_dim 0.0 +523 50 training.batch_size 1.0 +523 50 training.label_smoothing 0.020198147677254915 +523 51 model.embedding_dim 0.0 +523 51 training.batch_size 2.0 +523 51 training.label_smoothing 0.001825086972958989 +523 52 model.embedding_dim 2.0 +523 52 training.batch_size 0.0 +523 52 training.label_smoothing 0.001465458824073953 +523 53 model.embedding_dim 0.0 +523 53 training.batch_size 2.0 +523 53 training.label_smoothing 0.002214281767576213 +523 54 model.embedding_dim 0.0 +523 54 training.batch_size 0.0 +523 54 training.label_smoothing 0.001065996810451822 +523 55 model.embedding_dim 2.0 +523 55 training.batch_size 2.0 +523 55 training.label_smoothing 0.005116472145040696 +523 56 model.embedding_dim 2.0 +523 56 training.batch_size 0.0 +523 56 training.label_smoothing 0.6179808256288334 +523 57 model.embedding_dim 0.0 +523 57 training.batch_size 1.0 +523 57 training.label_smoothing 0.004678496793675456 +523 58 model.embedding_dim 2.0 +523 58 training.batch_size 1.0 +523 58 training.label_smoothing 0.0012436174304182523 +523 59 model.embedding_dim 1.0 +523 59 training.batch_size 2.0 +523 59 training.label_smoothing 0.09632889773007657 +523 60 model.embedding_dim 0.0 +523 60 training.batch_size 0.0 +523 60 training.label_smoothing 0.004583151398913424 +523 61 model.embedding_dim 2.0 +523 61 training.batch_size 2.0 +523 61 training.label_smoothing 0.004685274874833689 +523 62 model.embedding_dim 0.0 +523 62 training.batch_size 2.0 +523 62 training.label_smoothing 0.0018009443497435625 +523 63 model.embedding_dim 0.0 +523 63 training.batch_size 2.0 +523 63 training.label_smoothing 0.030666025145397707 +523 64 model.embedding_dim 1.0 +523 64 training.batch_size 0.0 +523 64 training.label_smoothing 0.0012515226363508927 +523 65 model.embedding_dim 0.0 +523 65 training.batch_size 2.0 +523 65 training.label_smoothing 0.07841582537771297 +523 66 model.embedding_dim 0.0 +523 66 training.batch_size 0.0 +523 66 training.label_smoothing 0.5189831114162692 +523 67 model.embedding_dim 1.0 +523 67 training.batch_size 1.0 +523 67 training.label_smoothing 0.02756779642960354 +523 68 model.embedding_dim 0.0 +523 68 training.batch_size 1.0 +523 68 training.label_smoothing 0.0803200103400882 +523 69 model.embedding_dim 2.0 +523 69 training.batch_size 2.0 +523 69 training.label_smoothing 0.0017425312115528853 +523 70 model.embedding_dim 2.0 +523 70 training.batch_size 0.0 +523 70 training.label_smoothing 0.0016033425189937204 +523 71 model.embedding_dim 0.0 +523 71 training.batch_size 0.0 +523 71 training.label_smoothing 0.1127275204919839 +523 72 model.embedding_dim 1.0 +523 72 training.batch_size 2.0 +523 72 training.label_smoothing 0.031013907817197844 +523 73 model.embedding_dim 0.0 +523 73 training.batch_size 2.0 +523 73 training.label_smoothing 0.013544240164428858 +523 74 model.embedding_dim 2.0 +523 74 training.batch_size 2.0 +523 74 training.label_smoothing 0.15206311588278001 +523 75 model.embedding_dim 1.0 +523 75 training.batch_size 1.0 +523 75 training.label_smoothing 0.2934794581851962 +523 76 model.embedding_dim 0.0 +523 76 training.batch_size 2.0 +523 76 training.label_smoothing 0.01853150743230906 +523 77 model.embedding_dim 0.0 +523 77 training.batch_size 0.0 +523 77 training.label_smoothing 0.1456899673813407 +523 78 model.embedding_dim 2.0 +523 78 training.batch_size 2.0 +523 78 training.label_smoothing 0.027137916068814542 +523 79 model.embedding_dim 2.0 +523 79 training.batch_size 0.0 +523 79 training.label_smoothing 0.03404475219534327 +523 80 model.embedding_dim 1.0 +523 80 training.batch_size 2.0 +523 80 training.label_smoothing 0.003248284330895406 +523 81 model.embedding_dim 0.0 +523 81 training.batch_size 0.0 +523 81 training.label_smoothing 0.320936599437872 +523 82 model.embedding_dim 1.0 +523 82 training.batch_size 1.0 +523 82 training.label_smoothing 0.726278606145441 +523 83 model.embedding_dim 0.0 +523 83 training.batch_size 0.0 +523 83 training.label_smoothing 0.2666752440551826 +523 84 model.embedding_dim 2.0 +523 84 training.batch_size 2.0 +523 84 training.label_smoothing 0.00484092615645586 +523 85 model.embedding_dim 1.0 +523 85 training.batch_size 0.0 +523 85 training.label_smoothing 0.043828679887968444 +523 86 model.embedding_dim 0.0 +523 86 training.batch_size 0.0 +523 86 training.label_smoothing 0.30651723605346054 +523 87 model.embedding_dim 2.0 +523 87 training.batch_size 1.0 +523 87 training.label_smoothing 0.1959248152111306 +523 88 model.embedding_dim 2.0 +523 88 training.batch_size 0.0 +523 88 training.label_smoothing 0.0032033630805472857 +523 89 model.embedding_dim 2.0 +523 89 training.batch_size 0.0 +523 89 training.label_smoothing 0.0018482217392326312 +523 90 model.embedding_dim 1.0 +523 90 training.batch_size 0.0 +523 90 training.label_smoothing 0.08126736830495306 +523 91 model.embedding_dim 0.0 +523 91 training.batch_size 1.0 +523 91 training.label_smoothing 0.016428797101711856 +523 92 model.embedding_dim 1.0 +523 92 training.batch_size 2.0 +523 92 training.label_smoothing 0.1507757425109563 +523 93 model.embedding_dim 1.0 +523 93 training.batch_size 0.0 +523 93 training.label_smoothing 0.04635437695078401 +523 94 model.embedding_dim 0.0 +523 94 training.batch_size 2.0 +523 94 training.label_smoothing 0.004515177427870297 +523 95 model.embedding_dim 1.0 +523 95 training.batch_size 2.0 +523 95 training.label_smoothing 0.06770603576351875 +523 96 model.embedding_dim 1.0 +523 96 training.batch_size 1.0 +523 96 training.label_smoothing 0.01418468714822026 +523 97 model.embedding_dim 1.0 +523 97 training.batch_size 1.0 +523 97 training.label_smoothing 0.032967729531683906 +523 98 model.embedding_dim 1.0 +523 98 training.batch_size 1.0 +523 98 training.label_smoothing 0.011953766921275977 +523 99 model.embedding_dim 1.0 +523 99 training.batch_size 0.0 +523 99 training.label_smoothing 0.015375583173190873 +523 100 model.embedding_dim 0.0 +523 100 training.batch_size 0.0 +523 100 training.label_smoothing 0.2894924472693568 +523 1 dataset """kinships""" +523 1 model """rescal""" +523 1 loss """bceaftersigmoid""" +523 1 regularizer """no""" +523 1 optimizer """adadelta""" +523 1 training_loop """lcwa""" +523 1 evaluator """rankbased""" +523 2 dataset """kinships""" +523 2 model """rescal""" +523 2 loss """bceaftersigmoid""" +523 2 regularizer """no""" +523 2 optimizer """adadelta""" +523 2 training_loop """lcwa""" +523 2 evaluator """rankbased""" +523 3 dataset """kinships""" +523 3 model """rescal""" +523 3 loss """bceaftersigmoid""" +523 3 regularizer """no""" +523 3 optimizer """adadelta""" +523 3 training_loop """lcwa""" +523 3 evaluator """rankbased""" +523 4 dataset """kinships""" +523 4 model """rescal""" +523 4 loss """bceaftersigmoid""" +523 4 regularizer """no""" +523 4 optimizer """adadelta""" +523 4 training_loop """lcwa""" +523 4 evaluator """rankbased""" +523 5 dataset """kinships""" +523 5 model """rescal""" +523 5 loss """bceaftersigmoid""" +523 5 regularizer """no""" +523 5 optimizer """adadelta""" +523 5 training_loop """lcwa""" +523 5 evaluator """rankbased""" +523 6 dataset """kinships""" +523 6 model """rescal""" +523 6 loss """bceaftersigmoid""" +523 6 regularizer """no""" +523 6 optimizer """adadelta""" +523 6 training_loop """lcwa""" +523 6 evaluator """rankbased""" +523 7 dataset """kinships""" +523 7 model """rescal""" +523 7 loss """bceaftersigmoid""" +523 7 regularizer """no""" +523 7 optimizer """adadelta""" +523 7 training_loop """lcwa""" +523 7 evaluator """rankbased""" +523 8 dataset """kinships""" +523 8 model """rescal""" +523 8 loss """bceaftersigmoid""" +523 8 regularizer """no""" +523 8 optimizer """adadelta""" +523 8 training_loop """lcwa""" +523 8 evaluator """rankbased""" +523 9 dataset """kinships""" +523 9 model """rescal""" +523 9 loss """bceaftersigmoid""" +523 9 regularizer """no""" +523 9 optimizer """adadelta""" +523 9 training_loop """lcwa""" +523 9 evaluator """rankbased""" +523 10 dataset """kinships""" +523 10 model """rescal""" +523 10 loss """bceaftersigmoid""" +523 10 regularizer """no""" +523 10 optimizer """adadelta""" +523 10 training_loop """lcwa""" +523 10 evaluator """rankbased""" +523 11 dataset """kinships""" +523 11 model """rescal""" +523 11 loss """bceaftersigmoid""" +523 11 regularizer """no""" +523 11 optimizer """adadelta""" +523 11 training_loop """lcwa""" +523 11 evaluator """rankbased""" +523 12 dataset """kinships""" +523 12 model """rescal""" +523 12 loss """bceaftersigmoid""" +523 12 regularizer """no""" +523 12 optimizer """adadelta""" +523 12 training_loop """lcwa""" +523 12 evaluator """rankbased""" +523 13 dataset """kinships""" +523 13 model """rescal""" +523 13 loss """bceaftersigmoid""" +523 13 regularizer """no""" +523 13 optimizer """adadelta""" +523 13 training_loop """lcwa""" +523 13 evaluator """rankbased""" +523 14 dataset """kinships""" +523 14 model """rescal""" +523 14 loss """bceaftersigmoid""" +523 14 regularizer """no""" +523 14 optimizer """adadelta""" +523 14 training_loop """lcwa""" +523 14 evaluator """rankbased""" +523 15 dataset """kinships""" +523 15 model """rescal""" +523 15 loss """bceaftersigmoid""" +523 15 regularizer """no""" +523 15 optimizer """adadelta""" +523 15 training_loop """lcwa""" +523 15 evaluator """rankbased""" +523 16 dataset """kinships""" +523 16 model """rescal""" +523 16 loss """bceaftersigmoid""" +523 16 regularizer """no""" +523 16 optimizer """adadelta""" +523 16 training_loop """lcwa""" +523 16 evaluator """rankbased""" +523 17 dataset """kinships""" +523 17 model """rescal""" +523 17 loss """bceaftersigmoid""" +523 17 regularizer """no""" +523 17 optimizer """adadelta""" +523 17 training_loop """lcwa""" +523 17 evaluator """rankbased""" +523 18 dataset """kinships""" +523 18 model """rescal""" +523 18 loss """bceaftersigmoid""" +523 18 regularizer """no""" +523 18 optimizer """adadelta""" +523 18 training_loop """lcwa""" +523 18 evaluator """rankbased""" +523 19 dataset """kinships""" +523 19 model """rescal""" +523 19 loss """bceaftersigmoid""" +523 19 regularizer """no""" +523 19 optimizer """adadelta""" +523 19 training_loop """lcwa""" +523 19 evaluator """rankbased""" +523 20 dataset """kinships""" +523 20 model """rescal""" +523 20 loss """bceaftersigmoid""" +523 20 regularizer """no""" +523 20 optimizer """adadelta""" +523 20 training_loop """lcwa""" +523 20 evaluator """rankbased""" +523 21 dataset """kinships""" +523 21 model """rescal""" +523 21 loss """bceaftersigmoid""" +523 21 regularizer """no""" +523 21 optimizer """adadelta""" +523 21 training_loop """lcwa""" +523 21 evaluator """rankbased""" +523 22 dataset """kinships""" +523 22 model """rescal""" +523 22 loss """bceaftersigmoid""" +523 22 regularizer """no""" +523 22 optimizer """adadelta""" +523 22 training_loop """lcwa""" +523 22 evaluator """rankbased""" +523 23 dataset """kinships""" +523 23 model """rescal""" +523 23 loss """bceaftersigmoid""" +523 23 regularizer """no""" +523 23 optimizer """adadelta""" +523 23 training_loop """lcwa""" +523 23 evaluator """rankbased""" +523 24 dataset """kinships""" +523 24 model """rescal""" +523 24 loss """bceaftersigmoid""" +523 24 regularizer """no""" +523 24 optimizer """adadelta""" +523 24 training_loop """lcwa""" +523 24 evaluator """rankbased""" +523 25 dataset """kinships""" +523 25 model """rescal""" +523 25 loss """bceaftersigmoid""" +523 25 regularizer """no""" +523 25 optimizer """adadelta""" +523 25 training_loop """lcwa""" +523 25 evaluator """rankbased""" +523 26 dataset """kinships""" +523 26 model """rescal""" +523 26 loss """bceaftersigmoid""" +523 26 regularizer """no""" +523 26 optimizer """adadelta""" +523 26 training_loop """lcwa""" +523 26 evaluator """rankbased""" +523 27 dataset """kinships""" +523 27 model """rescal""" +523 27 loss """bceaftersigmoid""" +523 27 regularizer """no""" +523 27 optimizer """adadelta""" +523 27 training_loop """lcwa""" +523 27 evaluator """rankbased""" +523 28 dataset """kinships""" +523 28 model """rescal""" +523 28 loss """bceaftersigmoid""" +523 28 regularizer """no""" +523 28 optimizer """adadelta""" +523 28 training_loop """lcwa""" +523 28 evaluator """rankbased""" +523 29 dataset """kinships""" +523 29 model """rescal""" +523 29 loss """bceaftersigmoid""" +523 29 regularizer """no""" +523 29 optimizer """adadelta""" +523 29 training_loop """lcwa""" +523 29 evaluator """rankbased""" +523 30 dataset """kinships""" +523 30 model """rescal""" +523 30 loss """bceaftersigmoid""" +523 30 regularizer """no""" +523 30 optimizer """adadelta""" +523 30 training_loop """lcwa""" +523 30 evaluator """rankbased""" +523 31 dataset """kinships""" +523 31 model """rescal""" +523 31 loss """bceaftersigmoid""" +523 31 regularizer """no""" +523 31 optimizer """adadelta""" +523 31 training_loop """lcwa""" +523 31 evaluator """rankbased""" +523 32 dataset """kinships""" +523 32 model """rescal""" +523 32 loss """bceaftersigmoid""" +523 32 regularizer """no""" +523 32 optimizer """adadelta""" +523 32 training_loop """lcwa""" +523 32 evaluator """rankbased""" +523 33 dataset """kinships""" +523 33 model """rescal""" +523 33 loss """bceaftersigmoid""" +523 33 regularizer """no""" +523 33 optimizer """adadelta""" +523 33 training_loop """lcwa""" +523 33 evaluator """rankbased""" +523 34 dataset """kinships""" +523 34 model """rescal""" +523 34 loss """bceaftersigmoid""" +523 34 regularizer """no""" +523 34 optimizer """adadelta""" +523 34 training_loop """lcwa""" +523 34 evaluator """rankbased""" +523 35 dataset """kinships""" +523 35 model """rescal""" +523 35 loss """bceaftersigmoid""" +523 35 regularizer """no""" +523 35 optimizer """adadelta""" +523 35 training_loop """lcwa""" +523 35 evaluator """rankbased""" +523 36 dataset """kinships""" +523 36 model """rescal""" +523 36 loss """bceaftersigmoid""" +523 36 regularizer """no""" +523 36 optimizer """adadelta""" +523 36 training_loop """lcwa""" +523 36 evaluator """rankbased""" +523 37 dataset """kinships""" +523 37 model """rescal""" +523 37 loss """bceaftersigmoid""" +523 37 regularizer """no""" +523 37 optimizer """adadelta""" +523 37 training_loop """lcwa""" +523 37 evaluator """rankbased""" +523 38 dataset """kinships""" +523 38 model """rescal""" +523 38 loss """bceaftersigmoid""" +523 38 regularizer """no""" +523 38 optimizer """adadelta""" +523 38 training_loop """lcwa""" +523 38 evaluator """rankbased""" +523 39 dataset """kinships""" +523 39 model """rescal""" +523 39 loss """bceaftersigmoid""" +523 39 regularizer """no""" +523 39 optimizer """adadelta""" +523 39 training_loop """lcwa""" +523 39 evaluator """rankbased""" +523 40 dataset """kinships""" +523 40 model """rescal""" +523 40 loss """bceaftersigmoid""" +523 40 regularizer """no""" +523 40 optimizer """adadelta""" +523 40 training_loop """lcwa""" +523 40 evaluator """rankbased""" +523 41 dataset """kinships""" +523 41 model """rescal""" +523 41 loss """bceaftersigmoid""" +523 41 regularizer """no""" +523 41 optimizer """adadelta""" +523 41 training_loop """lcwa""" +523 41 evaluator """rankbased""" +523 42 dataset """kinships""" +523 42 model """rescal""" +523 42 loss """bceaftersigmoid""" +523 42 regularizer """no""" +523 42 optimizer """adadelta""" +523 42 training_loop """lcwa""" +523 42 evaluator """rankbased""" +523 43 dataset """kinships""" +523 43 model """rescal""" +523 43 loss """bceaftersigmoid""" +523 43 regularizer """no""" +523 43 optimizer """adadelta""" +523 43 training_loop """lcwa""" +523 43 evaluator """rankbased""" +523 44 dataset """kinships""" +523 44 model """rescal""" +523 44 loss """bceaftersigmoid""" +523 44 regularizer """no""" +523 44 optimizer """adadelta""" +523 44 training_loop """lcwa""" +523 44 evaluator """rankbased""" +523 45 dataset """kinships""" +523 45 model """rescal""" +523 45 loss """bceaftersigmoid""" +523 45 regularizer """no""" +523 45 optimizer """adadelta""" +523 45 training_loop """lcwa""" +523 45 evaluator """rankbased""" +523 46 dataset """kinships""" +523 46 model """rescal""" +523 46 loss """bceaftersigmoid""" +523 46 regularizer """no""" +523 46 optimizer """adadelta""" +523 46 training_loop """lcwa""" +523 46 evaluator """rankbased""" +523 47 dataset """kinships""" +523 47 model """rescal""" +523 47 loss """bceaftersigmoid""" +523 47 regularizer """no""" +523 47 optimizer """adadelta""" +523 47 training_loop """lcwa""" +523 47 evaluator """rankbased""" +523 48 dataset """kinships""" +523 48 model """rescal""" +523 48 loss """bceaftersigmoid""" +523 48 regularizer """no""" +523 48 optimizer """adadelta""" +523 48 training_loop """lcwa""" +523 48 evaluator """rankbased""" +523 49 dataset """kinships""" +523 49 model """rescal""" +523 49 loss """bceaftersigmoid""" +523 49 regularizer """no""" +523 49 optimizer """adadelta""" +523 49 training_loop """lcwa""" +523 49 evaluator """rankbased""" +523 50 dataset """kinships""" +523 50 model """rescal""" +523 50 loss """bceaftersigmoid""" +523 50 regularizer """no""" +523 50 optimizer """adadelta""" +523 50 training_loop """lcwa""" +523 50 evaluator """rankbased""" +523 51 dataset """kinships""" +523 51 model """rescal""" +523 51 loss """bceaftersigmoid""" +523 51 regularizer """no""" +523 51 optimizer """adadelta""" +523 51 training_loop """lcwa""" +523 51 evaluator """rankbased""" +523 52 dataset """kinships""" +523 52 model """rescal""" +523 52 loss """bceaftersigmoid""" +523 52 regularizer """no""" +523 52 optimizer """adadelta""" +523 52 training_loop """lcwa""" +523 52 evaluator """rankbased""" +523 53 dataset """kinships""" +523 53 model """rescal""" +523 53 loss """bceaftersigmoid""" +523 53 regularizer """no""" +523 53 optimizer """adadelta""" +523 53 training_loop """lcwa""" +523 53 evaluator """rankbased""" +523 54 dataset """kinships""" +523 54 model """rescal""" +523 54 loss """bceaftersigmoid""" +523 54 regularizer """no""" +523 54 optimizer """adadelta""" +523 54 training_loop """lcwa""" +523 54 evaluator """rankbased""" +523 55 dataset """kinships""" +523 55 model """rescal""" +523 55 loss """bceaftersigmoid""" +523 55 regularizer """no""" +523 55 optimizer """adadelta""" +523 55 training_loop """lcwa""" +523 55 evaluator """rankbased""" +523 56 dataset """kinships""" +523 56 model """rescal""" +523 56 loss """bceaftersigmoid""" +523 56 regularizer """no""" +523 56 optimizer """adadelta""" +523 56 training_loop """lcwa""" +523 56 evaluator """rankbased""" +523 57 dataset """kinships""" +523 57 model """rescal""" +523 57 loss """bceaftersigmoid""" +523 57 regularizer """no""" +523 57 optimizer """adadelta""" +523 57 training_loop """lcwa""" +523 57 evaluator """rankbased""" +523 58 dataset """kinships""" +523 58 model """rescal""" +523 58 loss """bceaftersigmoid""" +523 58 regularizer """no""" +523 58 optimizer """adadelta""" +523 58 training_loop """lcwa""" +523 58 evaluator """rankbased""" +523 59 dataset """kinships""" +523 59 model """rescal""" +523 59 loss """bceaftersigmoid""" +523 59 regularizer """no""" +523 59 optimizer """adadelta""" +523 59 training_loop """lcwa""" +523 59 evaluator """rankbased""" +523 60 dataset """kinships""" +523 60 model """rescal""" +523 60 loss """bceaftersigmoid""" +523 60 regularizer """no""" +523 60 optimizer """adadelta""" +523 60 training_loop """lcwa""" +523 60 evaluator """rankbased""" +523 61 dataset """kinships""" +523 61 model """rescal""" +523 61 loss """bceaftersigmoid""" +523 61 regularizer """no""" +523 61 optimizer """adadelta""" +523 61 training_loop """lcwa""" +523 61 evaluator """rankbased""" +523 62 dataset """kinships""" +523 62 model """rescal""" +523 62 loss """bceaftersigmoid""" +523 62 regularizer """no""" +523 62 optimizer """adadelta""" +523 62 training_loop """lcwa""" +523 62 evaluator """rankbased""" +523 63 dataset """kinships""" +523 63 model """rescal""" +523 63 loss """bceaftersigmoid""" +523 63 regularizer """no""" +523 63 optimizer """adadelta""" +523 63 training_loop """lcwa""" +523 63 evaluator """rankbased""" +523 64 dataset """kinships""" +523 64 model """rescal""" +523 64 loss """bceaftersigmoid""" +523 64 regularizer """no""" +523 64 optimizer """adadelta""" +523 64 training_loop """lcwa""" +523 64 evaluator """rankbased""" +523 65 dataset """kinships""" +523 65 model """rescal""" +523 65 loss """bceaftersigmoid""" +523 65 regularizer """no""" +523 65 optimizer """adadelta""" +523 65 training_loop """lcwa""" +523 65 evaluator """rankbased""" +523 66 dataset """kinships""" +523 66 model """rescal""" +523 66 loss """bceaftersigmoid""" +523 66 regularizer """no""" +523 66 optimizer """adadelta""" +523 66 training_loop """lcwa""" +523 66 evaluator """rankbased""" +523 67 dataset """kinships""" +523 67 model """rescal""" +523 67 loss """bceaftersigmoid""" +523 67 regularizer """no""" +523 67 optimizer """adadelta""" +523 67 training_loop """lcwa""" +523 67 evaluator """rankbased""" +523 68 dataset """kinships""" +523 68 model """rescal""" +523 68 loss """bceaftersigmoid""" +523 68 regularizer """no""" +523 68 optimizer """adadelta""" +523 68 training_loop """lcwa""" +523 68 evaluator """rankbased""" +523 69 dataset """kinships""" +523 69 model """rescal""" +523 69 loss """bceaftersigmoid""" +523 69 regularizer """no""" +523 69 optimizer """adadelta""" +523 69 training_loop """lcwa""" +523 69 evaluator """rankbased""" +523 70 dataset """kinships""" +523 70 model """rescal""" +523 70 loss """bceaftersigmoid""" +523 70 regularizer """no""" +523 70 optimizer """adadelta""" +523 70 training_loop """lcwa""" +523 70 evaluator """rankbased""" +523 71 dataset """kinships""" +523 71 model """rescal""" +523 71 loss """bceaftersigmoid""" +523 71 regularizer """no""" +523 71 optimizer """adadelta""" +523 71 training_loop """lcwa""" +523 71 evaluator """rankbased""" +523 72 dataset """kinships""" +523 72 model """rescal""" +523 72 loss """bceaftersigmoid""" +523 72 regularizer """no""" +523 72 optimizer """adadelta""" +523 72 training_loop """lcwa""" +523 72 evaluator """rankbased""" +523 73 dataset """kinships""" +523 73 model """rescal""" +523 73 loss """bceaftersigmoid""" +523 73 regularizer """no""" +523 73 optimizer """adadelta""" +523 73 training_loop """lcwa""" +523 73 evaluator """rankbased""" +523 74 dataset """kinships""" +523 74 model """rescal""" +523 74 loss """bceaftersigmoid""" +523 74 regularizer """no""" +523 74 optimizer """adadelta""" +523 74 training_loop """lcwa""" +523 74 evaluator """rankbased""" +523 75 dataset """kinships""" +523 75 model """rescal""" +523 75 loss """bceaftersigmoid""" +523 75 regularizer """no""" +523 75 optimizer """adadelta""" +523 75 training_loop """lcwa""" +523 75 evaluator """rankbased""" +523 76 dataset """kinships""" +523 76 model """rescal""" +523 76 loss """bceaftersigmoid""" +523 76 regularizer """no""" +523 76 optimizer """adadelta""" +523 76 training_loop """lcwa""" +523 76 evaluator """rankbased""" +523 77 dataset """kinships""" +523 77 model """rescal""" +523 77 loss """bceaftersigmoid""" +523 77 regularizer """no""" +523 77 optimizer """adadelta""" +523 77 training_loop """lcwa""" +523 77 evaluator """rankbased""" +523 78 dataset """kinships""" +523 78 model """rescal""" +523 78 loss """bceaftersigmoid""" +523 78 regularizer """no""" +523 78 optimizer """adadelta""" +523 78 training_loop """lcwa""" +523 78 evaluator """rankbased""" +523 79 dataset """kinships""" +523 79 model """rescal""" +523 79 loss """bceaftersigmoid""" +523 79 regularizer """no""" +523 79 optimizer """adadelta""" +523 79 training_loop """lcwa""" +523 79 evaluator """rankbased""" +523 80 dataset """kinships""" +523 80 model """rescal""" +523 80 loss """bceaftersigmoid""" +523 80 regularizer """no""" +523 80 optimizer """adadelta""" +523 80 training_loop """lcwa""" +523 80 evaluator """rankbased""" +523 81 dataset """kinships""" +523 81 model """rescal""" +523 81 loss """bceaftersigmoid""" +523 81 regularizer """no""" +523 81 optimizer """adadelta""" +523 81 training_loop """lcwa""" +523 81 evaluator """rankbased""" +523 82 dataset """kinships""" +523 82 model """rescal""" +523 82 loss """bceaftersigmoid""" +523 82 regularizer """no""" +523 82 optimizer """adadelta""" +523 82 training_loop """lcwa""" +523 82 evaluator """rankbased""" +523 83 dataset """kinships""" +523 83 model """rescal""" +523 83 loss """bceaftersigmoid""" +523 83 regularizer """no""" +523 83 optimizer """adadelta""" +523 83 training_loop """lcwa""" +523 83 evaluator """rankbased""" +523 84 dataset """kinships""" +523 84 model """rescal""" +523 84 loss """bceaftersigmoid""" +523 84 regularizer """no""" +523 84 optimizer """adadelta""" +523 84 training_loop """lcwa""" +523 84 evaluator """rankbased""" +523 85 dataset """kinships""" +523 85 model """rescal""" +523 85 loss """bceaftersigmoid""" +523 85 regularizer """no""" +523 85 optimizer """adadelta""" +523 85 training_loop """lcwa""" +523 85 evaluator """rankbased""" +523 86 dataset """kinships""" +523 86 model """rescal""" +523 86 loss """bceaftersigmoid""" +523 86 regularizer """no""" +523 86 optimizer """adadelta""" +523 86 training_loop """lcwa""" +523 86 evaluator """rankbased""" +523 87 dataset """kinships""" +523 87 model """rescal""" +523 87 loss """bceaftersigmoid""" +523 87 regularizer """no""" +523 87 optimizer """adadelta""" +523 87 training_loop """lcwa""" +523 87 evaluator """rankbased""" +523 88 dataset """kinships""" +523 88 model """rescal""" +523 88 loss """bceaftersigmoid""" +523 88 regularizer """no""" +523 88 optimizer """adadelta""" +523 88 training_loop """lcwa""" +523 88 evaluator """rankbased""" +523 89 dataset """kinships""" +523 89 model """rescal""" +523 89 loss """bceaftersigmoid""" +523 89 regularizer """no""" +523 89 optimizer """adadelta""" +523 89 training_loop """lcwa""" +523 89 evaluator """rankbased""" +523 90 dataset """kinships""" +523 90 model """rescal""" +523 90 loss """bceaftersigmoid""" +523 90 regularizer """no""" +523 90 optimizer """adadelta""" +523 90 training_loop """lcwa""" +523 90 evaluator """rankbased""" +523 91 dataset """kinships""" +523 91 model """rescal""" +523 91 loss """bceaftersigmoid""" +523 91 regularizer """no""" +523 91 optimizer """adadelta""" +523 91 training_loop """lcwa""" +523 91 evaluator """rankbased""" +523 92 dataset """kinships""" +523 92 model """rescal""" +523 92 loss """bceaftersigmoid""" +523 92 regularizer """no""" +523 92 optimizer """adadelta""" +523 92 training_loop """lcwa""" +523 92 evaluator """rankbased""" +523 93 dataset """kinships""" +523 93 model """rescal""" +523 93 loss """bceaftersigmoid""" +523 93 regularizer """no""" +523 93 optimizer """adadelta""" +523 93 training_loop """lcwa""" +523 93 evaluator """rankbased""" +523 94 dataset """kinships""" +523 94 model """rescal""" +523 94 loss """bceaftersigmoid""" +523 94 regularizer """no""" +523 94 optimizer """adadelta""" +523 94 training_loop """lcwa""" +523 94 evaluator """rankbased""" +523 95 dataset """kinships""" +523 95 model """rescal""" +523 95 loss """bceaftersigmoid""" +523 95 regularizer """no""" +523 95 optimizer """adadelta""" +523 95 training_loop """lcwa""" +523 95 evaluator """rankbased""" +523 96 dataset """kinships""" +523 96 model """rescal""" +523 96 loss """bceaftersigmoid""" +523 96 regularizer """no""" +523 96 optimizer """adadelta""" +523 96 training_loop """lcwa""" +523 96 evaluator """rankbased""" +523 97 dataset """kinships""" +523 97 model """rescal""" +523 97 loss """bceaftersigmoid""" +523 97 regularizer """no""" +523 97 optimizer """adadelta""" +523 97 training_loop """lcwa""" +523 97 evaluator """rankbased""" +523 98 dataset """kinships""" +523 98 model """rescal""" +523 98 loss """bceaftersigmoid""" +523 98 regularizer """no""" +523 98 optimizer """adadelta""" +523 98 training_loop """lcwa""" +523 98 evaluator """rankbased""" +523 99 dataset """kinships""" +523 99 model """rescal""" +523 99 loss """bceaftersigmoid""" +523 99 regularizer """no""" +523 99 optimizer """adadelta""" +523 99 training_loop """lcwa""" +523 99 evaluator """rankbased""" +523 100 dataset """kinships""" +523 100 model """rescal""" +523 100 loss """bceaftersigmoid""" +523 100 regularizer """no""" +523 100 optimizer """adadelta""" +523 100 training_loop """lcwa""" +523 100 evaluator """rankbased""" +524 1 model.embedding_dim 1.0 +524 1 training.batch_size 0.0 +524 1 training.label_smoothing 0.005795850931743916 +524 2 model.embedding_dim 2.0 +524 2 training.batch_size 1.0 +524 2 training.label_smoothing 0.0022201511459634974 +524 3 model.embedding_dim 1.0 +524 3 training.batch_size 0.0 +524 3 training.label_smoothing 0.2481385847883945 +524 4 model.embedding_dim 0.0 +524 4 training.batch_size 2.0 +524 4 training.label_smoothing 0.12078398894602836 +524 5 model.embedding_dim 0.0 +524 5 training.batch_size 2.0 +524 5 training.label_smoothing 0.0017625737174320887 +524 6 model.embedding_dim 2.0 +524 6 training.batch_size 0.0 +524 6 training.label_smoothing 0.002088616407465633 +524 7 model.embedding_dim 0.0 +524 7 training.batch_size 2.0 +524 7 training.label_smoothing 0.013729305423640216 +524 8 model.embedding_dim 2.0 +524 8 training.batch_size 1.0 +524 8 training.label_smoothing 0.05425101027411753 +524 9 model.embedding_dim 1.0 +524 9 training.batch_size 1.0 +524 9 training.label_smoothing 0.06036168813667578 +524 10 model.embedding_dim 0.0 +524 10 training.batch_size 0.0 +524 10 training.label_smoothing 0.005806927888463078 +524 11 model.embedding_dim 2.0 +524 11 training.batch_size 2.0 +524 11 training.label_smoothing 0.09977598918854255 +524 12 model.embedding_dim 1.0 +524 12 training.batch_size 2.0 +524 12 training.label_smoothing 0.0010481104865908534 +524 13 model.embedding_dim 2.0 +524 13 training.batch_size 1.0 +524 13 training.label_smoothing 0.001209896108437825 +524 14 model.embedding_dim 2.0 +524 14 training.batch_size 0.0 +524 14 training.label_smoothing 0.7465076997176643 +524 15 model.embedding_dim 1.0 +524 15 training.batch_size 0.0 +524 15 training.label_smoothing 0.16392910240178654 +524 16 model.embedding_dim 2.0 +524 16 training.batch_size 1.0 +524 16 training.label_smoothing 0.4862148237679031 +524 17 model.embedding_dim 2.0 +524 17 training.batch_size 2.0 +524 17 training.label_smoothing 0.004604385480599308 +524 18 model.embedding_dim 2.0 +524 18 training.batch_size 2.0 +524 18 training.label_smoothing 0.39323429826214135 +524 19 model.embedding_dim 2.0 +524 19 training.batch_size 2.0 +524 19 training.label_smoothing 0.03113649394983798 +524 20 model.embedding_dim 2.0 +524 20 training.batch_size 0.0 +524 20 training.label_smoothing 0.003756257578812393 +524 21 model.embedding_dim 0.0 +524 21 training.batch_size 2.0 +524 21 training.label_smoothing 0.09294910104481187 +524 22 model.embedding_dim 1.0 +524 22 training.batch_size 1.0 +524 22 training.label_smoothing 0.03873476213730959 +524 23 model.embedding_dim 1.0 +524 23 training.batch_size 0.0 +524 23 training.label_smoothing 0.0012069350237767053 +524 24 model.embedding_dim 2.0 +524 24 training.batch_size 1.0 +524 24 training.label_smoothing 0.004028997809065523 +524 25 model.embedding_dim 2.0 +524 25 training.batch_size 0.0 +524 25 training.label_smoothing 0.15489984540884666 +524 26 model.embedding_dim 2.0 +524 26 training.batch_size 1.0 +524 26 training.label_smoothing 0.002653759988205904 +524 27 model.embedding_dim 2.0 +524 27 training.batch_size 2.0 +524 27 training.label_smoothing 0.0865741502410339 +524 28 model.embedding_dim 2.0 +524 28 training.batch_size 2.0 +524 28 training.label_smoothing 0.051072072708494495 +524 29 model.embedding_dim 2.0 +524 29 training.batch_size 2.0 +524 29 training.label_smoothing 0.005098191185129361 +524 30 model.embedding_dim 2.0 +524 30 training.batch_size 1.0 +524 30 training.label_smoothing 0.010717015340012449 +524 31 model.embedding_dim 2.0 +524 31 training.batch_size 2.0 +524 31 training.label_smoothing 0.001643493616014504 +524 32 model.embedding_dim 1.0 +524 32 training.batch_size 0.0 +524 32 training.label_smoothing 0.015576541278322892 +524 33 model.embedding_dim 0.0 +524 33 training.batch_size 2.0 +524 33 training.label_smoothing 0.05520645891097497 +524 34 model.embedding_dim 1.0 +524 34 training.batch_size 0.0 +524 34 training.label_smoothing 0.005240281247102671 +524 35 model.embedding_dim 2.0 +524 35 training.batch_size 0.0 +524 35 training.label_smoothing 0.0012453286462153774 +524 36 model.embedding_dim 1.0 +524 36 training.batch_size 1.0 +524 36 training.label_smoothing 0.001890790791963393 +524 37 model.embedding_dim 2.0 +524 37 training.batch_size 2.0 +524 37 training.label_smoothing 0.417337070281493 +524 38 model.embedding_dim 2.0 +524 38 training.batch_size 2.0 +524 38 training.label_smoothing 0.10455458874395841 +524 39 model.embedding_dim 1.0 +524 39 training.batch_size 1.0 +524 39 training.label_smoothing 0.9947829507257221 +524 40 model.embedding_dim 2.0 +524 40 training.batch_size 1.0 +524 40 training.label_smoothing 0.003285186613699555 +524 41 model.embedding_dim 0.0 +524 41 training.batch_size 2.0 +524 41 training.label_smoothing 0.06346493521344457 +524 42 model.embedding_dim 2.0 +524 42 training.batch_size 2.0 +524 42 training.label_smoothing 0.015843668292487262 +524 43 model.embedding_dim 2.0 +524 43 training.batch_size 1.0 +524 43 training.label_smoothing 0.010996752956366877 +524 44 model.embedding_dim 0.0 +524 44 training.batch_size 0.0 +524 44 training.label_smoothing 0.15822290491527893 +524 45 model.embedding_dim 1.0 +524 45 training.batch_size 1.0 +524 45 training.label_smoothing 0.27241573448057177 +524 46 model.embedding_dim 1.0 +524 46 training.batch_size 2.0 +524 46 training.label_smoothing 0.505567610010049 +524 47 model.embedding_dim 0.0 +524 47 training.batch_size 0.0 +524 47 training.label_smoothing 0.0343820186690576 +524 48 model.embedding_dim 1.0 +524 48 training.batch_size 1.0 +524 48 training.label_smoothing 0.7005381247661933 +524 49 model.embedding_dim 2.0 +524 49 training.batch_size 0.0 +524 49 training.label_smoothing 0.14264139849201088 +524 50 model.embedding_dim 0.0 +524 50 training.batch_size 1.0 +524 50 training.label_smoothing 0.032895419432931776 +524 51 model.embedding_dim 1.0 +524 51 training.batch_size 2.0 +524 51 training.label_smoothing 0.008229046481924653 +524 52 model.embedding_dim 0.0 +524 52 training.batch_size 2.0 +524 52 training.label_smoothing 0.0029810815223178665 +524 53 model.embedding_dim 2.0 +524 53 training.batch_size 2.0 +524 53 training.label_smoothing 0.013882391060978787 +524 54 model.embedding_dim 0.0 +524 54 training.batch_size 1.0 +524 54 training.label_smoothing 0.01795049132391683 +524 55 model.embedding_dim 0.0 +524 55 training.batch_size 2.0 +524 55 training.label_smoothing 0.47515811828397153 +524 56 model.embedding_dim 1.0 +524 56 training.batch_size 0.0 +524 56 training.label_smoothing 0.04635490534597906 +524 57 model.embedding_dim 1.0 +524 57 training.batch_size 1.0 +524 57 training.label_smoothing 0.8000876179759538 +524 58 model.embedding_dim 2.0 +524 58 training.batch_size 2.0 +524 58 training.label_smoothing 0.044836374225731036 +524 59 model.embedding_dim 2.0 +524 59 training.batch_size 0.0 +524 59 training.label_smoothing 0.17157613048327972 +524 60 model.embedding_dim 0.0 +524 60 training.batch_size 1.0 +524 60 training.label_smoothing 0.0022738720254055706 +524 61 model.embedding_dim 1.0 +524 61 training.batch_size 0.0 +524 61 training.label_smoothing 0.009061713846434625 +524 62 model.embedding_dim 0.0 +524 62 training.batch_size 0.0 +524 62 training.label_smoothing 0.634832707840407 +524 63 model.embedding_dim 2.0 +524 63 training.batch_size 0.0 +524 63 training.label_smoothing 0.4864167142689021 +524 64 model.embedding_dim 2.0 +524 64 training.batch_size 1.0 +524 64 training.label_smoothing 0.7671683859024196 +524 65 model.embedding_dim 2.0 +524 65 training.batch_size 0.0 +524 65 training.label_smoothing 0.0030462216198314336 +524 66 model.embedding_dim 2.0 +524 66 training.batch_size 1.0 +524 66 training.label_smoothing 0.014180551499759915 +524 67 model.embedding_dim 0.0 +524 67 training.batch_size 0.0 +524 67 training.label_smoothing 0.0012846154259412144 +524 68 model.embedding_dim 1.0 +524 68 training.batch_size 1.0 +524 68 training.label_smoothing 0.038317796226055176 +524 69 model.embedding_dim 1.0 +524 69 training.batch_size 0.0 +524 69 training.label_smoothing 0.1783628806761415 +524 70 model.embedding_dim 0.0 +524 70 training.batch_size 2.0 +524 70 training.label_smoothing 0.17406613630005727 +524 71 model.embedding_dim 2.0 +524 71 training.batch_size 0.0 +524 71 training.label_smoothing 0.0010196354949287825 +524 72 model.embedding_dim 2.0 +524 72 training.batch_size 0.0 +524 72 training.label_smoothing 0.0013899418767094904 +524 73 model.embedding_dim 0.0 +524 73 training.batch_size 0.0 +524 73 training.label_smoothing 0.03220268153692559 +524 74 model.embedding_dim 0.0 +524 74 training.batch_size 1.0 +524 74 training.label_smoothing 0.12298128334657092 +524 75 model.embedding_dim 2.0 +524 75 training.batch_size 0.0 +524 75 training.label_smoothing 0.1418007792827441 +524 76 model.embedding_dim 2.0 +524 76 training.batch_size 0.0 +524 76 training.label_smoothing 0.002450050993384166 +524 77 model.embedding_dim 0.0 +524 77 training.batch_size 2.0 +524 77 training.label_smoothing 0.0038974487364424695 +524 78 model.embedding_dim 2.0 +524 78 training.batch_size 1.0 +524 78 training.label_smoothing 0.001095953891218856 +524 79 model.embedding_dim 0.0 +524 79 training.batch_size 1.0 +524 79 training.label_smoothing 0.0096746365503211 +524 80 model.embedding_dim 0.0 +524 80 training.batch_size 0.0 +524 80 training.label_smoothing 0.6775182287972756 +524 81 model.embedding_dim 1.0 +524 81 training.batch_size 1.0 +524 81 training.label_smoothing 0.0172789541926223 +524 82 model.embedding_dim 1.0 +524 82 training.batch_size 1.0 +524 82 training.label_smoothing 0.008861323932796651 +524 83 model.embedding_dim 1.0 +524 83 training.batch_size 0.0 +524 83 training.label_smoothing 0.007867464526370441 +524 84 model.embedding_dim 0.0 +524 84 training.batch_size 2.0 +524 84 training.label_smoothing 0.0031349732524153925 +524 85 model.embedding_dim 2.0 +524 85 training.batch_size 1.0 +524 85 training.label_smoothing 0.0032246235237785954 +524 86 model.embedding_dim 1.0 +524 86 training.batch_size 2.0 +524 86 training.label_smoothing 0.9874204283369581 +524 87 model.embedding_dim 0.0 +524 87 training.batch_size 1.0 +524 87 training.label_smoothing 0.02394856551044029 +524 88 model.embedding_dim 1.0 +524 88 training.batch_size 0.0 +524 88 training.label_smoothing 0.0034245318124023726 +524 89 model.embedding_dim 0.0 +524 89 training.batch_size 2.0 +524 89 training.label_smoothing 0.015533370481695803 +524 90 model.embedding_dim 2.0 +524 90 training.batch_size 1.0 +524 90 training.label_smoothing 0.0064786199251837635 +524 91 model.embedding_dim 1.0 +524 91 training.batch_size 0.0 +524 91 training.label_smoothing 0.0065790253867369156 +524 92 model.embedding_dim 0.0 +524 92 training.batch_size 2.0 +524 92 training.label_smoothing 0.6449596115176215 +524 93 model.embedding_dim 0.0 +524 93 training.batch_size 1.0 +524 93 training.label_smoothing 0.9644095560986848 +524 94 model.embedding_dim 2.0 +524 94 training.batch_size 2.0 +524 94 training.label_smoothing 0.007903716092758062 +524 95 model.embedding_dim 1.0 +524 95 training.batch_size 0.0 +524 95 training.label_smoothing 0.0011009039797211357 +524 96 model.embedding_dim 2.0 +524 96 training.batch_size 1.0 +524 96 training.label_smoothing 0.5739163370227631 +524 97 model.embedding_dim 2.0 +524 97 training.batch_size 2.0 +524 97 training.label_smoothing 0.10975679220787912 +524 98 model.embedding_dim 0.0 +524 98 training.batch_size 0.0 +524 98 training.label_smoothing 0.07804733859097225 +524 99 model.embedding_dim 0.0 +524 99 training.batch_size 0.0 +524 99 training.label_smoothing 0.0013669089416810706 +524 100 model.embedding_dim 2.0 +524 100 training.batch_size 1.0 +524 100 training.label_smoothing 0.03589420972529194 +524 1 dataset """kinships""" +524 1 model """rescal""" +524 1 loss """softplus""" +524 1 regularizer """no""" +524 1 optimizer """adadelta""" +524 1 training_loop """lcwa""" +524 1 evaluator """rankbased""" +524 2 dataset """kinships""" +524 2 model """rescal""" +524 2 loss """softplus""" +524 2 regularizer """no""" +524 2 optimizer """adadelta""" +524 2 training_loop """lcwa""" +524 2 evaluator """rankbased""" +524 3 dataset """kinships""" +524 3 model """rescal""" +524 3 loss """softplus""" +524 3 regularizer """no""" +524 3 optimizer """adadelta""" +524 3 training_loop """lcwa""" +524 3 evaluator """rankbased""" +524 4 dataset """kinships""" +524 4 model """rescal""" +524 4 loss """softplus""" +524 4 regularizer """no""" +524 4 optimizer """adadelta""" +524 4 training_loop """lcwa""" +524 4 evaluator """rankbased""" +524 5 dataset """kinships""" +524 5 model """rescal""" +524 5 loss """softplus""" +524 5 regularizer """no""" +524 5 optimizer """adadelta""" +524 5 training_loop """lcwa""" +524 5 evaluator """rankbased""" +524 6 dataset """kinships""" +524 6 model """rescal""" +524 6 loss """softplus""" +524 6 regularizer """no""" +524 6 optimizer """adadelta""" +524 6 training_loop """lcwa""" +524 6 evaluator """rankbased""" +524 7 dataset """kinships""" +524 7 model """rescal""" +524 7 loss """softplus""" +524 7 regularizer """no""" +524 7 optimizer """adadelta""" +524 7 training_loop """lcwa""" +524 7 evaluator """rankbased""" +524 8 dataset """kinships""" +524 8 model """rescal""" +524 8 loss """softplus""" +524 8 regularizer """no""" +524 8 optimizer """adadelta""" +524 8 training_loop """lcwa""" +524 8 evaluator """rankbased""" +524 9 dataset """kinships""" +524 9 model """rescal""" +524 9 loss """softplus""" +524 9 regularizer """no""" +524 9 optimizer """adadelta""" +524 9 training_loop """lcwa""" +524 9 evaluator """rankbased""" +524 10 dataset """kinships""" +524 10 model """rescal""" +524 10 loss """softplus""" +524 10 regularizer """no""" +524 10 optimizer """adadelta""" +524 10 training_loop """lcwa""" +524 10 evaluator """rankbased""" +524 11 dataset """kinships""" +524 11 model """rescal""" +524 11 loss """softplus""" +524 11 regularizer """no""" +524 11 optimizer """adadelta""" +524 11 training_loop """lcwa""" +524 11 evaluator """rankbased""" +524 12 dataset """kinships""" +524 12 model """rescal""" +524 12 loss """softplus""" +524 12 regularizer """no""" +524 12 optimizer """adadelta""" +524 12 training_loop """lcwa""" +524 12 evaluator """rankbased""" +524 13 dataset """kinships""" +524 13 model """rescal""" +524 13 loss """softplus""" +524 13 regularizer """no""" +524 13 optimizer """adadelta""" +524 13 training_loop """lcwa""" +524 13 evaluator """rankbased""" +524 14 dataset """kinships""" +524 14 model """rescal""" +524 14 loss """softplus""" +524 14 regularizer """no""" +524 14 optimizer """adadelta""" +524 14 training_loop """lcwa""" +524 14 evaluator """rankbased""" +524 15 dataset """kinships""" +524 15 model """rescal""" +524 15 loss """softplus""" +524 15 regularizer """no""" +524 15 optimizer """adadelta""" +524 15 training_loop """lcwa""" +524 15 evaluator """rankbased""" +524 16 dataset """kinships""" +524 16 model """rescal""" +524 16 loss """softplus""" +524 16 regularizer """no""" +524 16 optimizer """adadelta""" +524 16 training_loop """lcwa""" +524 16 evaluator """rankbased""" +524 17 dataset """kinships""" +524 17 model """rescal""" +524 17 loss """softplus""" +524 17 regularizer """no""" +524 17 optimizer """adadelta""" +524 17 training_loop """lcwa""" +524 17 evaluator """rankbased""" +524 18 dataset """kinships""" +524 18 model """rescal""" +524 18 loss """softplus""" +524 18 regularizer """no""" +524 18 optimizer """adadelta""" +524 18 training_loop """lcwa""" +524 18 evaluator """rankbased""" +524 19 dataset """kinships""" +524 19 model """rescal""" +524 19 loss """softplus""" +524 19 regularizer """no""" +524 19 optimizer """adadelta""" +524 19 training_loop """lcwa""" +524 19 evaluator """rankbased""" +524 20 dataset """kinships""" +524 20 model """rescal""" +524 20 loss """softplus""" +524 20 regularizer """no""" +524 20 optimizer """adadelta""" +524 20 training_loop """lcwa""" +524 20 evaluator """rankbased""" +524 21 dataset """kinships""" +524 21 model """rescal""" +524 21 loss """softplus""" +524 21 regularizer """no""" +524 21 optimizer """adadelta""" +524 21 training_loop """lcwa""" +524 21 evaluator """rankbased""" +524 22 dataset """kinships""" +524 22 model """rescal""" +524 22 loss """softplus""" +524 22 regularizer """no""" +524 22 optimizer """adadelta""" +524 22 training_loop """lcwa""" +524 22 evaluator """rankbased""" +524 23 dataset """kinships""" +524 23 model """rescal""" +524 23 loss """softplus""" +524 23 regularizer """no""" +524 23 optimizer """adadelta""" +524 23 training_loop """lcwa""" +524 23 evaluator """rankbased""" +524 24 dataset """kinships""" +524 24 model """rescal""" +524 24 loss """softplus""" +524 24 regularizer """no""" +524 24 optimizer """adadelta""" +524 24 training_loop """lcwa""" +524 24 evaluator """rankbased""" +524 25 dataset """kinships""" +524 25 model """rescal""" +524 25 loss """softplus""" +524 25 regularizer """no""" +524 25 optimizer """adadelta""" +524 25 training_loop """lcwa""" +524 25 evaluator """rankbased""" +524 26 dataset """kinships""" +524 26 model """rescal""" +524 26 loss """softplus""" +524 26 regularizer """no""" +524 26 optimizer """adadelta""" +524 26 training_loop """lcwa""" +524 26 evaluator """rankbased""" +524 27 dataset """kinships""" +524 27 model """rescal""" +524 27 loss """softplus""" +524 27 regularizer """no""" +524 27 optimizer """adadelta""" +524 27 training_loop """lcwa""" +524 27 evaluator """rankbased""" +524 28 dataset """kinships""" +524 28 model """rescal""" +524 28 loss """softplus""" +524 28 regularizer """no""" +524 28 optimizer """adadelta""" +524 28 training_loop """lcwa""" +524 28 evaluator """rankbased""" +524 29 dataset """kinships""" +524 29 model """rescal""" +524 29 loss """softplus""" +524 29 regularizer """no""" +524 29 optimizer """adadelta""" +524 29 training_loop """lcwa""" +524 29 evaluator """rankbased""" +524 30 dataset """kinships""" +524 30 model """rescal""" +524 30 loss """softplus""" +524 30 regularizer """no""" +524 30 optimizer """adadelta""" +524 30 training_loop """lcwa""" +524 30 evaluator """rankbased""" +524 31 dataset """kinships""" +524 31 model """rescal""" +524 31 loss """softplus""" +524 31 regularizer """no""" +524 31 optimizer """adadelta""" +524 31 training_loop """lcwa""" +524 31 evaluator """rankbased""" +524 32 dataset """kinships""" +524 32 model """rescal""" +524 32 loss """softplus""" +524 32 regularizer """no""" +524 32 optimizer """adadelta""" +524 32 training_loop """lcwa""" +524 32 evaluator """rankbased""" +524 33 dataset """kinships""" +524 33 model """rescal""" +524 33 loss """softplus""" +524 33 regularizer """no""" +524 33 optimizer """adadelta""" +524 33 training_loop """lcwa""" +524 33 evaluator """rankbased""" +524 34 dataset """kinships""" +524 34 model """rescal""" +524 34 loss """softplus""" +524 34 regularizer """no""" +524 34 optimizer """adadelta""" +524 34 training_loop """lcwa""" +524 34 evaluator """rankbased""" +524 35 dataset """kinships""" +524 35 model """rescal""" +524 35 loss """softplus""" +524 35 regularizer """no""" +524 35 optimizer """adadelta""" +524 35 training_loop """lcwa""" +524 35 evaluator """rankbased""" +524 36 dataset """kinships""" +524 36 model """rescal""" +524 36 loss """softplus""" +524 36 regularizer """no""" +524 36 optimizer """adadelta""" +524 36 training_loop """lcwa""" +524 36 evaluator """rankbased""" +524 37 dataset """kinships""" +524 37 model """rescal""" +524 37 loss """softplus""" +524 37 regularizer """no""" +524 37 optimizer """adadelta""" +524 37 training_loop """lcwa""" +524 37 evaluator """rankbased""" +524 38 dataset """kinships""" +524 38 model """rescal""" +524 38 loss """softplus""" +524 38 regularizer """no""" +524 38 optimizer """adadelta""" +524 38 training_loop """lcwa""" +524 38 evaluator """rankbased""" +524 39 dataset """kinships""" +524 39 model """rescal""" +524 39 loss """softplus""" +524 39 regularizer """no""" +524 39 optimizer """adadelta""" +524 39 training_loop """lcwa""" +524 39 evaluator """rankbased""" +524 40 dataset """kinships""" +524 40 model """rescal""" +524 40 loss """softplus""" +524 40 regularizer """no""" +524 40 optimizer """adadelta""" +524 40 training_loop """lcwa""" +524 40 evaluator """rankbased""" +524 41 dataset """kinships""" +524 41 model """rescal""" +524 41 loss """softplus""" +524 41 regularizer """no""" +524 41 optimizer """adadelta""" +524 41 training_loop """lcwa""" +524 41 evaluator """rankbased""" +524 42 dataset """kinships""" +524 42 model """rescal""" +524 42 loss """softplus""" +524 42 regularizer """no""" +524 42 optimizer """adadelta""" +524 42 training_loop """lcwa""" +524 42 evaluator """rankbased""" +524 43 dataset """kinships""" +524 43 model """rescal""" +524 43 loss """softplus""" +524 43 regularizer """no""" +524 43 optimizer """adadelta""" +524 43 training_loop """lcwa""" +524 43 evaluator """rankbased""" +524 44 dataset """kinships""" +524 44 model """rescal""" +524 44 loss """softplus""" +524 44 regularizer """no""" +524 44 optimizer """adadelta""" +524 44 training_loop """lcwa""" +524 44 evaluator """rankbased""" +524 45 dataset """kinships""" +524 45 model """rescal""" +524 45 loss """softplus""" +524 45 regularizer """no""" +524 45 optimizer """adadelta""" +524 45 training_loop """lcwa""" +524 45 evaluator """rankbased""" +524 46 dataset """kinships""" +524 46 model """rescal""" +524 46 loss """softplus""" +524 46 regularizer """no""" +524 46 optimizer """adadelta""" +524 46 training_loop """lcwa""" +524 46 evaluator """rankbased""" +524 47 dataset """kinships""" +524 47 model """rescal""" +524 47 loss """softplus""" +524 47 regularizer """no""" +524 47 optimizer """adadelta""" +524 47 training_loop """lcwa""" +524 47 evaluator """rankbased""" +524 48 dataset """kinships""" +524 48 model """rescal""" +524 48 loss """softplus""" +524 48 regularizer """no""" +524 48 optimizer """adadelta""" +524 48 training_loop """lcwa""" +524 48 evaluator """rankbased""" +524 49 dataset """kinships""" +524 49 model """rescal""" +524 49 loss """softplus""" +524 49 regularizer """no""" +524 49 optimizer """adadelta""" +524 49 training_loop """lcwa""" +524 49 evaluator """rankbased""" +524 50 dataset """kinships""" +524 50 model """rescal""" +524 50 loss """softplus""" +524 50 regularizer """no""" +524 50 optimizer """adadelta""" +524 50 training_loop """lcwa""" +524 50 evaluator """rankbased""" +524 51 dataset """kinships""" +524 51 model """rescal""" +524 51 loss """softplus""" +524 51 regularizer """no""" +524 51 optimizer """adadelta""" +524 51 training_loop """lcwa""" +524 51 evaluator """rankbased""" +524 52 dataset """kinships""" +524 52 model """rescal""" +524 52 loss """softplus""" +524 52 regularizer """no""" +524 52 optimizer """adadelta""" +524 52 training_loop """lcwa""" +524 52 evaluator """rankbased""" +524 53 dataset """kinships""" +524 53 model """rescal""" +524 53 loss """softplus""" +524 53 regularizer """no""" +524 53 optimizer """adadelta""" +524 53 training_loop """lcwa""" +524 53 evaluator """rankbased""" +524 54 dataset """kinships""" +524 54 model """rescal""" +524 54 loss """softplus""" +524 54 regularizer """no""" +524 54 optimizer """adadelta""" +524 54 training_loop """lcwa""" +524 54 evaluator """rankbased""" +524 55 dataset """kinships""" +524 55 model """rescal""" +524 55 loss """softplus""" +524 55 regularizer """no""" +524 55 optimizer """adadelta""" +524 55 training_loop """lcwa""" +524 55 evaluator """rankbased""" +524 56 dataset """kinships""" +524 56 model """rescal""" +524 56 loss """softplus""" +524 56 regularizer """no""" +524 56 optimizer """adadelta""" +524 56 training_loop """lcwa""" +524 56 evaluator """rankbased""" +524 57 dataset """kinships""" +524 57 model """rescal""" +524 57 loss """softplus""" +524 57 regularizer """no""" +524 57 optimizer """adadelta""" +524 57 training_loop """lcwa""" +524 57 evaluator """rankbased""" +524 58 dataset """kinships""" +524 58 model """rescal""" +524 58 loss """softplus""" +524 58 regularizer """no""" +524 58 optimizer """adadelta""" +524 58 training_loop """lcwa""" +524 58 evaluator """rankbased""" +524 59 dataset """kinships""" +524 59 model """rescal""" +524 59 loss """softplus""" +524 59 regularizer """no""" +524 59 optimizer """adadelta""" +524 59 training_loop """lcwa""" +524 59 evaluator """rankbased""" +524 60 dataset """kinships""" +524 60 model """rescal""" +524 60 loss """softplus""" +524 60 regularizer """no""" +524 60 optimizer """adadelta""" +524 60 training_loop """lcwa""" +524 60 evaluator """rankbased""" +524 61 dataset """kinships""" +524 61 model """rescal""" +524 61 loss """softplus""" +524 61 regularizer """no""" +524 61 optimizer """adadelta""" +524 61 training_loop """lcwa""" +524 61 evaluator """rankbased""" +524 62 dataset """kinships""" +524 62 model """rescal""" +524 62 loss """softplus""" +524 62 regularizer """no""" +524 62 optimizer """adadelta""" +524 62 training_loop """lcwa""" +524 62 evaluator """rankbased""" +524 63 dataset """kinships""" +524 63 model """rescal""" +524 63 loss """softplus""" +524 63 regularizer """no""" +524 63 optimizer """adadelta""" +524 63 training_loop """lcwa""" +524 63 evaluator """rankbased""" +524 64 dataset """kinships""" +524 64 model """rescal""" +524 64 loss """softplus""" +524 64 regularizer """no""" +524 64 optimizer """adadelta""" +524 64 training_loop """lcwa""" +524 64 evaluator """rankbased""" +524 65 dataset """kinships""" +524 65 model """rescal""" +524 65 loss """softplus""" +524 65 regularizer """no""" +524 65 optimizer """adadelta""" +524 65 training_loop """lcwa""" +524 65 evaluator """rankbased""" +524 66 dataset """kinships""" +524 66 model """rescal""" +524 66 loss """softplus""" +524 66 regularizer """no""" +524 66 optimizer """adadelta""" +524 66 training_loop """lcwa""" +524 66 evaluator """rankbased""" +524 67 dataset """kinships""" +524 67 model """rescal""" +524 67 loss """softplus""" +524 67 regularizer """no""" +524 67 optimizer """adadelta""" +524 67 training_loop """lcwa""" +524 67 evaluator """rankbased""" +524 68 dataset """kinships""" +524 68 model """rescal""" +524 68 loss """softplus""" +524 68 regularizer """no""" +524 68 optimizer """adadelta""" +524 68 training_loop """lcwa""" +524 68 evaluator """rankbased""" +524 69 dataset """kinships""" +524 69 model """rescal""" +524 69 loss """softplus""" +524 69 regularizer """no""" +524 69 optimizer """adadelta""" +524 69 training_loop """lcwa""" +524 69 evaluator """rankbased""" +524 70 dataset """kinships""" +524 70 model """rescal""" +524 70 loss """softplus""" +524 70 regularizer """no""" +524 70 optimizer """adadelta""" +524 70 training_loop """lcwa""" +524 70 evaluator """rankbased""" +524 71 dataset """kinships""" +524 71 model """rescal""" +524 71 loss """softplus""" +524 71 regularizer """no""" +524 71 optimizer """adadelta""" +524 71 training_loop """lcwa""" +524 71 evaluator """rankbased""" +524 72 dataset """kinships""" +524 72 model """rescal""" +524 72 loss """softplus""" +524 72 regularizer """no""" +524 72 optimizer """adadelta""" +524 72 training_loop """lcwa""" +524 72 evaluator """rankbased""" +524 73 dataset """kinships""" +524 73 model """rescal""" +524 73 loss """softplus""" +524 73 regularizer """no""" +524 73 optimizer """adadelta""" +524 73 training_loop """lcwa""" +524 73 evaluator """rankbased""" +524 74 dataset """kinships""" +524 74 model """rescal""" +524 74 loss """softplus""" +524 74 regularizer """no""" +524 74 optimizer """adadelta""" +524 74 training_loop """lcwa""" +524 74 evaluator """rankbased""" +524 75 dataset """kinships""" +524 75 model """rescal""" +524 75 loss """softplus""" +524 75 regularizer """no""" +524 75 optimizer """adadelta""" +524 75 training_loop """lcwa""" +524 75 evaluator """rankbased""" +524 76 dataset """kinships""" +524 76 model """rescal""" +524 76 loss """softplus""" +524 76 regularizer """no""" +524 76 optimizer """adadelta""" +524 76 training_loop """lcwa""" +524 76 evaluator """rankbased""" +524 77 dataset """kinships""" +524 77 model """rescal""" +524 77 loss """softplus""" +524 77 regularizer """no""" +524 77 optimizer """adadelta""" +524 77 training_loop """lcwa""" +524 77 evaluator """rankbased""" +524 78 dataset """kinships""" +524 78 model """rescal""" +524 78 loss """softplus""" +524 78 regularizer """no""" +524 78 optimizer """adadelta""" +524 78 training_loop """lcwa""" +524 78 evaluator """rankbased""" +524 79 dataset """kinships""" +524 79 model """rescal""" +524 79 loss """softplus""" +524 79 regularizer """no""" +524 79 optimizer """adadelta""" +524 79 training_loop """lcwa""" +524 79 evaluator """rankbased""" +524 80 dataset """kinships""" +524 80 model """rescal""" +524 80 loss """softplus""" +524 80 regularizer """no""" +524 80 optimizer """adadelta""" +524 80 training_loop """lcwa""" +524 80 evaluator """rankbased""" +524 81 dataset """kinships""" +524 81 model """rescal""" +524 81 loss """softplus""" +524 81 regularizer """no""" +524 81 optimizer """adadelta""" +524 81 training_loop """lcwa""" +524 81 evaluator """rankbased""" +524 82 dataset """kinships""" +524 82 model """rescal""" +524 82 loss """softplus""" +524 82 regularizer """no""" +524 82 optimizer """adadelta""" +524 82 training_loop """lcwa""" +524 82 evaluator """rankbased""" +524 83 dataset """kinships""" +524 83 model """rescal""" +524 83 loss """softplus""" +524 83 regularizer """no""" +524 83 optimizer """adadelta""" +524 83 training_loop """lcwa""" +524 83 evaluator """rankbased""" +524 84 dataset """kinships""" +524 84 model """rescal""" +524 84 loss """softplus""" +524 84 regularizer """no""" +524 84 optimizer """adadelta""" +524 84 training_loop """lcwa""" +524 84 evaluator """rankbased""" +524 85 dataset """kinships""" +524 85 model """rescal""" +524 85 loss """softplus""" +524 85 regularizer """no""" +524 85 optimizer """adadelta""" +524 85 training_loop """lcwa""" +524 85 evaluator """rankbased""" +524 86 dataset """kinships""" +524 86 model """rescal""" +524 86 loss """softplus""" +524 86 regularizer """no""" +524 86 optimizer """adadelta""" +524 86 training_loop """lcwa""" +524 86 evaluator """rankbased""" +524 87 dataset """kinships""" +524 87 model """rescal""" +524 87 loss """softplus""" +524 87 regularizer """no""" +524 87 optimizer """adadelta""" +524 87 training_loop """lcwa""" +524 87 evaluator """rankbased""" +524 88 dataset """kinships""" +524 88 model """rescal""" +524 88 loss """softplus""" +524 88 regularizer """no""" +524 88 optimizer """adadelta""" +524 88 training_loop """lcwa""" +524 88 evaluator """rankbased""" +524 89 dataset """kinships""" +524 89 model """rescal""" +524 89 loss """softplus""" +524 89 regularizer """no""" +524 89 optimizer """adadelta""" +524 89 training_loop """lcwa""" +524 89 evaluator """rankbased""" +524 90 dataset """kinships""" +524 90 model """rescal""" +524 90 loss """softplus""" +524 90 regularizer """no""" +524 90 optimizer """adadelta""" +524 90 training_loop """lcwa""" +524 90 evaluator """rankbased""" +524 91 dataset """kinships""" +524 91 model """rescal""" +524 91 loss """softplus""" +524 91 regularizer """no""" +524 91 optimizer """adadelta""" +524 91 training_loop """lcwa""" +524 91 evaluator """rankbased""" +524 92 dataset """kinships""" +524 92 model """rescal""" +524 92 loss """softplus""" +524 92 regularizer """no""" +524 92 optimizer """adadelta""" +524 92 training_loop """lcwa""" +524 92 evaluator """rankbased""" +524 93 dataset """kinships""" +524 93 model """rescal""" +524 93 loss """softplus""" +524 93 regularizer """no""" +524 93 optimizer """adadelta""" +524 93 training_loop """lcwa""" +524 93 evaluator """rankbased""" +524 94 dataset """kinships""" +524 94 model """rescal""" +524 94 loss """softplus""" +524 94 regularizer """no""" +524 94 optimizer """adadelta""" +524 94 training_loop """lcwa""" +524 94 evaluator """rankbased""" +524 95 dataset """kinships""" +524 95 model """rescal""" +524 95 loss """softplus""" +524 95 regularizer """no""" +524 95 optimizer """adadelta""" +524 95 training_loop """lcwa""" +524 95 evaluator """rankbased""" +524 96 dataset """kinships""" +524 96 model """rescal""" +524 96 loss """softplus""" +524 96 regularizer """no""" +524 96 optimizer """adadelta""" +524 96 training_loop """lcwa""" +524 96 evaluator """rankbased""" +524 97 dataset """kinships""" +524 97 model """rescal""" +524 97 loss """softplus""" +524 97 regularizer """no""" +524 97 optimizer """adadelta""" +524 97 training_loop """lcwa""" +524 97 evaluator """rankbased""" +524 98 dataset """kinships""" +524 98 model """rescal""" +524 98 loss """softplus""" +524 98 regularizer """no""" +524 98 optimizer """adadelta""" +524 98 training_loop """lcwa""" +524 98 evaluator """rankbased""" +524 99 dataset """kinships""" +524 99 model """rescal""" +524 99 loss """softplus""" +524 99 regularizer """no""" +524 99 optimizer """adadelta""" +524 99 training_loop """lcwa""" +524 99 evaluator """rankbased""" +524 100 dataset """kinships""" +524 100 model """rescal""" +524 100 loss """softplus""" +524 100 regularizer """no""" +524 100 optimizer """adadelta""" +524 100 training_loop """lcwa""" +524 100 evaluator """rankbased""" +525 1 model.embedding_dim 1.0 +525 1 training.batch_size 1.0 +525 1 training.label_smoothing 0.07041970916205617 +525 2 model.embedding_dim 1.0 +525 2 training.batch_size 2.0 +525 2 training.label_smoothing 0.600493143611246 +525 3 model.embedding_dim 0.0 +525 3 training.batch_size 0.0 +525 3 training.label_smoothing 0.009290663965560922 +525 4 model.embedding_dim 2.0 +525 4 training.batch_size 1.0 +525 4 training.label_smoothing 0.06798477725463666 +525 5 model.embedding_dim 2.0 +525 5 training.batch_size 0.0 +525 5 training.label_smoothing 0.15173810076458558 +525 6 model.embedding_dim 0.0 +525 6 training.batch_size 1.0 +525 6 training.label_smoothing 0.959512021556033 +525 7 model.embedding_dim 2.0 +525 7 training.batch_size 2.0 +525 7 training.label_smoothing 0.0011227879370531947 +525 8 model.embedding_dim 0.0 +525 8 training.batch_size 2.0 +525 8 training.label_smoothing 0.24800702224059035 +525 9 model.embedding_dim 0.0 +525 9 training.batch_size 1.0 +525 9 training.label_smoothing 0.009526152211053981 +525 10 model.embedding_dim 2.0 +525 10 training.batch_size 0.0 +525 10 training.label_smoothing 0.23570601767430047 +525 11 model.embedding_dim 2.0 +525 11 training.batch_size 1.0 +525 11 training.label_smoothing 0.018575719171472934 +525 12 model.embedding_dim 1.0 +525 12 training.batch_size 2.0 +525 12 training.label_smoothing 0.09096682899150721 +525 13 model.embedding_dim 2.0 +525 13 training.batch_size 2.0 +525 13 training.label_smoothing 0.7593403178359677 +525 14 model.embedding_dim 2.0 +525 14 training.batch_size 0.0 +525 14 training.label_smoothing 0.17083645624674804 +525 15 model.embedding_dim 2.0 +525 15 training.batch_size 2.0 +525 15 training.label_smoothing 0.007665616931172034 +525 16 model.embedding_dim 1.0 +525 16 training.batch_size 2.0 +525 16 training.label_smoothing 0.03281106042308429 +525 17 model.embedding_dim 2.0 +525 17 training.batch_size 2.0 +525 17 training.label_smoothing 0.017559307942063326 +525 18 model.embedding_dim 1.0 +525 18 training.batch_size 1.0 +525 18 training.label_smoothing 0.012909714006600164 +525 19 model.embedding_dim 1.0 +525 19 training.batch_size 2.0 +525 19 training.label_smoothing 0.6930972160588467 +525 20 model.embedding_dim 2.0 +525 20 training.batch_size 2.0 +525 20 training.label_smoothing 0.006710128858179135 +525 21 model.embedding_dim 0.0 +525 21 training.batch_size 0.0 +525 21 training.label_smoothing 0.5776029778168246 +525 22 model.embedding_dim 0.0 +525 22 training.batch_size 1.0 +525 22 training.label_smoothing 0.0014612251181479266 +525 23 model.embedding_dim 2.0 +525 23 training.batch_size 2.0 +525 23 training.label_smoothing 0.011016923652263439 +525 24 model.embedding_dim 2.0 +525 24 training.batch_size 0.0 +525 24 training.label_smoothing 0.007176770951245223 +525 25 model.embedding_dim 2.0 +525 25 training.batch_size 1.0 +525 25 training.label_smoothing 0.038654918167656725 +525 26 model.embedding_dim 1.0 +525 26 training.batch_size 0.0 +525 26 training.label_smoothing 0.5306199087097421 +525 27 model.embedding_dim 2.0 +525 27 training.batch_size 0.0 +525 27 training.label_smoothing 0.09899256887434697 +525 28 model.embedding_dim 0.0 +525 28 training.batch_size 1.0 +525 28 training.label_smoothing 0.02268496742781358 +525 29 model.embedding_dim 2.0 +525 29 training.batch_size 0.0 +525 29 training.label_smoothing 0.9479280902989982 +525 30 model.embedding_dim 2.0 +525 30 training.batch_size 0.0 +525 30 training.label_smoothing 0.42351544678756126 +525 31 model.embedding_dim 1.0 +525 31 training.batch_size 0.0 +525 31 training.label_smoothing 0.036527837544472246 +525 32 model.embedding_dim 1.0 +525 32 training.batch_size 0.0 +525 32 training.label_smoothing 0.7726507802818785 +525 33 model.embedding_dim 2.0 +525 33 training.batch_size 0.0 +525 33 training.label_smoothing 0.33610348381027494 +525 34 model.embedding_dim 1.0 +525 34 training.batch_size 1.0 +525 34 training.label_smoothing 0.1488594740611729 +525 35 model.embedding_dim 0.0 +525 35 training.batch_size 1.0 +525 35 training.label_smoothing 0.3047014928017193 +525 36 model.embedding_dim 0.0 +525 36 training.batch_size 1.0 +525 36 training.label_smoothing 0.0012440882173410121 +525 37 model.embedding_dim 1.0 +525 37 training.batch_size 1.0 +525 37 training.label_smoothing 0.007439151402924184 +525 38 model.embedding_dim 0.0 +525 38 training.batch_size 2.0 +525 38 training.label_smoothing 0.9877565178859345 +525 39 model.embedding_dim 2.0 +525 39 training.batch_size 2.0 +525 39 training.label_smoothing 0.3186359600684416 +525 40 model.embedding_dim 0.0 +525 40 training.batch_size 1.0 +525 40 training.label_smoothing 0.057048848934428985 +525 41 model.embedding_dim 1.0 +525 41 training.batch_size 2.0 +525 41 training.label_smoothing 0.004380507655287718 +525 42 model.embedding_dim 1.0 +525 42 training.batch_size 0.0 +525 42 training.label_smoothing 0.004530053410694761 +525 43 model.embedding_dim 0.0 +525 43 training.batch_size 0.0 +525 43 training.label_smoothing 0.008391452986903834 +525 44 model.embedding_dim 2.0 +525 44 training.batch_size 1.0 +525 44 training.label_smoothing 0.1794427945428912 +525 45 model.embedding_dim 2.0 +525 45 training.batch_size 0.0 +525 45 training.label_smoothing 0.0028658901324421717 +525 46 model.embedding_dim 1.0 +525 46 training.batch_size 2.0 +525 46 training.label_smoothing 0.001374156127195988 +525 47 model.embedding_dim 2.0 +525 47 training.batch_size 1.0 +525 47 training.label_smoothing 0.009418182174786445 +525 48 model.embedding_dim 1.0 +525 48 training.batch_size 0.0 +525 48 training.label_smoothing 0.0016366924841142987 +525 49 model.embedding_dim 1.0 +525 49 training.batch_size 2.0 +525 49 training.label_smoothing 0.02342849060431843 +525 50 model.embedding_dim 2.0 +525 50 training.batch_size 2.0 +525 50 training.label_smoothing 0.08959562213229927 +525 51 model.embedding_dim 0.0 +525 51 training.batch_size 1.0 +525 51 training.label_smoothing 0.13728676368820367 +525 52 model.embedding_dim 2.0 +525 52 training.batch_size 0.0 +525 52 training.label_smoothing 0.0014543973886742762 +525 53 model.embedding_dim 1.0 +525 53 training.batch_size 0.0 +525 53 training.label_smoothing 0.04688174707155518 +525 54 model.embedding_dim 0.0 +525 54 training.batch_size 1.0 +525 54 training.label_smoothing 0.0035734505450167285 +525 55 model.embedding_dim 0.0 +525 55 training.batch_size 2.0 +525 55 training.label_smoothing 0.003103870879093544 +525 56 model.embedding_dim 1.0 +525 56 training.batch_size 1.0 +525 56 training.label_smoothing 0.8411634570650981 +525 57 model.embedding_dim 1.0 +525 57 training.batch_size 1.0 +525 57 training.label_smoothing 0.43926846340114417 +525 58 model.embedding_dim 1.0 +525 58 training.batch_size 1.0 +525 58 training.label_smoothing 0.16480408108361833 +525 59 model.embedding_dim 1.0 +525 59 training.batch_size 1.0 +525 59 training.label_smoothing 0.001126900169331157 +525 60 model.embedding_dim 1.0 +525 60 training.batch_size 0.0 +525 60 training.label_smoothing 0.050821273753824205 +525 61 model.embedding_dim 2.0 +525 61 training.batch_size 0.0 +525 61 training.label_smoothing 0.9491192059833696 +525 62 model.embedding_dim 2.0 +525 62 training.batch_size 0.0 +525 62 training.label_smoothing 0.002730332230898198 +525 63 model.embedding_dim 0.0 +525 63 training.batch_size 2.0 +525 63 training.label_smoothing 0.036971680933795464 +525 64 model.embedding_dim 1.0 +525 64 training.batch_size 2.0 +525 64 training.label_smoothing 0.17096830645578118 +525 65 model.embedding_dim 0.0 +525 65 training.batch_size 1.0 +525 65 training.label_smoothing 0.0540294897777174 +525 66 model.embedding_dim 0.0 +525 66 training.batch_size 2.0 +525 66 training.label_smoothing 0.0022540008640861437 +525 67 model.embedding_dim 2.0 +525 67 training.batch_size 0.0 +525 67 training.label_smoothing 0.0019654873074356214 +525 68 model.embedding_dim 0.0 +525 68 training.batch_size 2.0 +525 68 training.label_smoothing 0.7839200613411644 +525 69 model.embedding_dim 1.0 +525 69 training.batch_size 1.0 +525 69 training.label_smoothing 0.001041035720061862 +525 70 model.embedding_dim 2.0 +525 70 training.batch_size 0.0 +525 70 training.label_smoothing 0.04613278397262788 +525 71 model.embedding_dim 1.0 +525 71 training.batch_size 1.0 +525 71 training.label_smoothing 0.03221687968743913 +525 72 model.embedding_dim 2.0 +525 72 training.batch_size 0.0 +525 72 training.label_smoothing 0.3644788351133577 +525 73 model.embedding_dim 1.0 +525 73 training.batch_size 0.0 +525 73 training.label_smoothing 0.10285219681056751 +525 74 model.embedding_dim 1.0 +525 74 training.batch_size 1.0 +525 74 training.label_smoothing 0.4035372067145077 +525 75 model.embedding_dim 1.0 +525 75 training.batch_size 1.0 +525 75 training.label_smoothing 0.00415230669399924 +525 76 model.embedding_dim 2.0 +525 76 training.batch_size 1.0 +525 76 training.label_smoothing 0.0163738012594822 +525 77 model.embedding_dim 0.0 +525 77 training.batch_size 0.0 +525 77 training.label_smoothing 0.37117055263602944 +525 78 model.embedding_dim 1.0 +525 78 training.batch_size 2.0 +525 78 training.label_smoothing 0.019179702307167094 +525 79 model.embedding_dim 1.0 +525 79 training.batch_size 2.0 +525 79 training.label_smoothing 0.003773628502946669 +525 80 model.embedding_dim 0.0 +525 80 training.batch_size 1.0 +525 80 training.label_smoothing 0.010102229305021374 +525 81 model.embedding_dim 0.0 +525 81 training.batch_size 2.0 +525 81 training.label_smoothing 0.18925912115431823 +525 82 model.embedding_dim 0.0 +525 82 training.batch_size 1.0 +525 82 training.label_smoothing 0.016317369524214195 +525 83 model.embedding_dim 2.0 +525 83 training.batch_size 1.0 +525 83 training.label_smoothing 0.0015271937415614405 +525 84 model.embedding_dim 0.0 +525 84 training.batch_size 1.0 +525 84 training.label_smoothing 0.005885121166235579 +525 85 model.embedding_dim 0.0 +525 85 training.batch_size 1.0 +525 85 training.label_smoothing 0.27583600219337484 +525 86 model.embedding_dim 2.0 +525 86 training.batch_size 0.0 +525 86 training.label_smoothing 0.31948692895649106 +525 87 model.embedding_dim 1.0 +525 87 training.batch_size 2.0 +525 87 training.label_smoothing 0.03840118528771057 +525 88 model.embedding_dim 0.0 +525 88 training.batch_size 0.0 +525 88 training.label_smoothing 0.10545027301999364 +525 89 model.embedding_dim 1.0 +525 89 training.batch_size 0.0 +525 89 training.label_smoothing 0.005585694304476844 +525 90 model.embedding_dim 2.0 +525 90 training.batch_size 1.0 +525 90 training.label_smoothing 0.03306295889527217 +525 91 model.embedding_dim 0.0 +525 91 training.batch_size 1.0 +525 91 training.label_smoothing 0.3396550536556588 +525 92 model.embedding_dim 1.0 +525 92 training.batch_size 0.0 +525 92 training.label_smoothing 0.0019291033838882115 +525 93 model.embedding_dim 0.0 +525 93 training.batch_size 2.0 +525 93 training.label_smoothing 0.053023320441289205 +525 94 model.embedding_dim 2.0 +525 94 training.batch_size 2.0 +525 94 training.label_smoothing 0.002096312823825687 +525 95 model.embedding_dim 2.0 +525 95 training.batch_size 2.0 +525 95 training.label_smoothing 0.00311514942862082 +525 96 model.embedding_dim 1.0 +525 96 training.batch_size 2.0 +525 96 training.label_smoothing 0.039450193610383114 +525 97 model.embedding_dim 2.0 +525 97 training.batch_size 1.0 +525 97 training.label_smoothing 0.11448716655006423 +525 98 model.embedding_dim 1.0 +525 98 training.batch_size 0.0 +525 98 training.label_smoothing 0.5010129843898852 +525 99 model.embedding_dim 0.0 +525 99 training.batch_size 2.0 +525 99 training.label_smoothing 0.19900176548333842 +525 100 model.embedding_dim 2.0 +525 100 training.batch_size 0.0 +525 100 training.label_smoothing 0.05142809054827752 +525 1 dataset """kinships""" +525 1 model """rescal""" +525 1 loss """crossentropy""" +525 1 regularizer """no""" +525 1 optimizer """adadelta""" +525 1 training_loop """lcwa""" +525 1 evaluator """rankbased""" +525 2 dataset """kinships""" +525 2 model """rescal""" +525 2 loss """crossentropy""" +525 2 regularizer """no""" +525 2 optimizer """adadelta""" +525 2 training_loop """lcwa""" +525 2 evaluator """rankbased""" +525 3 dataset """kinships""" +525 3 model """rescal""" +525 3 loss """crossentropy""" +525 3 regularizer """no""" +525 3 optimizer """adadelta""" +525 3 training_loop """lcwa""" +525 3 evaluator """rankbased""" +525 4 dataset """kinships""" +525 4 model """rescal""" +525 4 loss """crossentropy""" +525 4 regularizer """no""" +525 4 optimizer """adadelta""" +525 4 training_loop """lcwa""" +525 4 evaluator """rankbased""" +525 5 dataset """kinships""" +525 5 model """rescal""" +525 5 loss """crossentropy""" +525 5 regularizer """no""" +525 5 optimizer """adadelta""" +525 5 training_loop """lcwa""" +525 5 evaluator """rankbased""" +525 6 dataset """kinships""" +525 6 model """rescal""" +525 6 loss """crossentropy""" +525 6 regularizer """no""" +525 6 optimizer """adadelta""" +525 6 training_loop """lcwa""" +525 6 evaluator """rankbased""" +525 7 dataset """kinships""" +525 7 model """rescal""" +525 7 loss """crossentropy""" +525 7 regularizer """no""" +525 7 optimizer """adadelta""" +525 7 training_loop """lcwa""" +525 7 evaluator """rankbased""" +525 8 dataset """kinships""" +525 8 model """rescal""" +525 8 loss """crossentropy""" +525 8 regularizer """no""" +525 8 optimizer """adadelta""" +525 8 training_loop """lcwa""" +525 8 evaluator """rankbased""" +525 9 dataset """kinships""" +525 9 model """rescal""" +525 9 loss """crossentropy""" +525 9 regularizer """no""" +525 9 optimizer """adadelta""" +525 9 training_loop """lcwa""" +525 9 evaluator """rankbased""" +525 10 dataset """kinships""" +525 10 model """rescal""" +525 10 loss """crossentropy""" +525 10 regularizer """no""" +525 10 optimizer """adadelta""" +525 10 training_loop """lcwa""" +525 10 evaluator """rankbased""" +525 11 dataset """kinships""" +525 11 model """rescal""" +525 11 loss """crossentropy""" +525 11 regularizer """no""" +525 11 optimizer """adadelta""" +525 11 training_loop """lcwa""" +525 11 evaluator """rankbased""" +525 12 dataset """kinships""" +525 12 model """rescal""" +525 12 loss """crossentropy""" +525 12 regularizer """no""" +525 12 optimizer """adadelta""" +525 12 training_loop """lcwa""" +525 12 evaluator """rankbased""" +525 13 dataset """kinships""" +525 13 model """rescal""" +525 13 loss """crossentropy""" +525 13 regularizer """no""" +525 13 optimizer """adadelta""" +525 13 training_loop """lcwa""" +525 13 evaluator """rankbased""" +525 14 dataset """kinships""" +525 14 model """rescal""" +525 14 loss """crossentropy""" +525 14 regularizer """no""" +525 14 optimizer """adadelta""" +525 14 training_loop """lcwa""" +525 14 evaluator """rankbased""" +525 15 dataset """kinships""" +525 15 model """rescal""" +525 15 loss """crossentropy""" +525 15 regularizer """no""" +525 15 optimizer """adadelta""" +525 15 training_loop """lcwa""" +525 15 evaluator """rankbased""" +525 16 dataset """kinships""" +525 16 model """rescal""" +525 16 loss """crossentropy""" +525 16 regularizer """no""" +525 16 optimizer """adadelta""" +525 16 training_loop """lcwa""" +525 16 evaluator """rankbased""" +525 17 dataset """kinships""" +525 17 model """rescal""" +525 17 loss """crossentropy""" +525 17 regularizer """no""" +525 17 optimizer """adadelta""" +525 17 training_loop """lcwa""" +525 17 evaluator """rankbased""" +525 18 dataset """kinships""" +525 18 model """rescal""" +525 18 loss """crossentropy""" +525 18 regularizer """no""" +525 18 optimizer """adadelta""" +525 18 training_loop """lcwa""" +525 18 evaluator """rankbased""" +525 19 dataset """kinships""" +525 19 model """rescal""" +525 19 loss """crossentropy""" +525 19 regularizer """no""" +525 19 optimizer """adadelta""" +525 19 training_loop """lcwa""" +525 19 evaluator """rankbased""" +525 20 dataset """kinships""" +525 20 model """rescal""" +525 20 loss """crossentropy""" +525 20 regularizer """no""" +525 20 optimizer """adadelta""" +525 20 training_loop """lcwa""" +525 20 evaluator """rankbased""" +525 21 dataset """kinships""" +525 21 model """rescal""" +525 21 loss """crossentropy""" +525 21 regularizer """no""" +525 21 optimizer """adadelta""" +525 21 training_loop """lcwa""" +525 21 evaluator """rankbased""" +525 22 dataset """kinships""" +525 22 model """rescal""" +525 22 loss """crossentropy""" +525 22 regularizer """no""" +525 22 optimizer """adadelta""" +525 22 training_loop """lcwa""" +525 22 evaluator """rankbased""" +525 23 dataset """kinships""" +525 23 model """rescal""" +525 23 loss """crossentropy""" +525 23 regularizer """no""" +525 23 optimizer """adadelta""" +525 23 training_loop """lcwa""" +525 23 evaluator """rankbased""" +525 24 dataset """kinships""" +525 24 model """rescal""" +525 24 loss """crossentropy""" +525 24 regularizer """no""" +525 24 optimizer """adadelta""" +525 24 training_loop """lcwa""" +525 24 evaluator """rankbased""" +525 25 dataset """kinships""" +525 25 model """rescal""" +525 25 loss """crossentropy""" +525 25 regularizer """no""" +525 25 optimizer """adadelta""" +525 25 training_loop """lcwa""" +525 25 evaluator """rankbased""" +525 26 dataset """kinships""" +525 26 model """rescal""" +525 26 loss """crossentropy""" +525 26 regularizer """no""" +525 26 optimizer """adadelta""" +525 26 training_loop """lcwa""" +525 26 evaluator """rankbased""" +525 27 dataset """kinships""" +525 27 model """rescal""" +525 27 loss """crossentropy""" +525 27 regularizer """no""" +525 27 optimizer """adadelta""" +525 27 training_loop """lcwa""" +525 27 evaluator """rankbased""" +525 28 dataset """kinships""" +525 28 model """rescal""" +525 28 loss """crossentropy""" +525 28 regularizer """no""" +525 28 optimizer """adadelta""" +525 28 training_loop """lcwa""" +525 28 evaluator """rankbased""" +525 29 dataset """kinships""" +525 29 model """rescal""" +525 29 loss """crossentropy""" +525 29 regularizer """no""" +525 29 optimizer """adadelta""" +525 29 training_loop """lcwa""" +525 29 evaluator """rankbased""" +525 30 dataset """kinships""" +525 30 model """rescal""" +525 30 loss """crossentropy""" +525 30 regularizer """no""" +525 30 optimizer """adadelta""" +525 30 training_loop """lcwa""" +525 30 evaluator """rankbased""" +525 31 dataset """kinships""" +525 31 model """rescal""" +525 31 loss """crossentropy""" +525 31 regularizer """no""" +525 31 optimizer """adadelta""" +525 31 training_loop """lcwa""" +525 31 evaluator """rankbased""" +525 32 dataset """kinships""" +525 32 model """rescal""" +525 32 loss """crossentropy""" +525 32 regularizer """no""" +525 32 optimizer """adadelta""" +525 32 training_loop """lcwa""" +525 32 evaluator """rankbased""" +525 33 dataset """kinships""" +525 33 model """rescal""" +525 33 loss """crossentropy""" +525 33 regularizer """no""" +525 33 optimizer """adadelta""" +525 33 training_loop """lcwa""" +525 33 evaluator """rankbased""" +525 34 dataset """kinships""" +525 34 model """rescal""" +525 34 loss """crossentropy""" +525 34 regularizer """no""" +525 34 optimizer """adadelta""" +525 34 training_loop """lcwa""" +525 34 evaluator """rankbased""" +525 35 dataset """kinships""" +525 35 model """rescal""" +525 35 loss """crossentropy""" +525 35 regularizer """no""" +525 35 optimizer """adadelta""" +525 35 training_loop """lcwa""" +525 35 evaluator """rankbased""" +525 36 dataset """kinships""" +525 36 model """rescal""" +525 36 loss """crossentropy""" +525 36 regularizer """no""" +525 36 optimizer """adadelta""" +525 36 training_loop """lcwa""" +525 36 evaluator """rankbased""" +525 37 dataset """kinships""" +525 37 model """rescal""" +525 37 loss """crossentropy""" +525 37 regularizer """no""" +525 37 optimizer """adadelta""" +525 37 training_loop """lcwa""" +525 37 evaluator """rankbased""" +525 38 dataset """kinships""" +525 38 model """rescal""" +525 38 loss """crossentropy""" +525 38 regularizer """no""" +525 38 optimizer """adadelta""" +525 38 training_loop """lcwa""" +525 38 evaluator """rankbased""" +525 39 dataset """kinships""" +525 39 model """rescal""" +525 39 loss """crossentropy""" +525 39 regularizer """no""" +525 39 optimizer """adadelta""" +525 39 training_loop """lcwa""" +525 39 evaluator """rankbased""" +525 40 dataset """kinships""" +525 40 model """rescal""" +525 40 loss """crossentropy""" +525 40 regularizer """no""" +525 40 optimizer """adadelta""" +525 40 training_loop """lcwa""" +525 40 evaluator """rankbased""" +525 41 dataset """kinships""" +525 41 model """rescal""" +525 41 loss """crossentropy""" +525 41 regularizer """no""" +525 41 optimizer """adadelta""" +525 41 training_loop """lcwa""" +525 41 evaluator """rankbased""" +525 42 dataset """kinships""" +525 42 model """rescal""" +525 42 loss """crossentropy""" +525 42 regularizer """no""" +525 42 optimizer """adadelta""" +525 42 training_loop """lcwa""" +525 42 evaluator """rankbased""" +525 43 dataset """kinships""" +525 43 model """rescal""" +525 43 loss """crossentropy""" +525 43 regularizer """no""" +525 43 optimizer """adadelta""" +525 43 training_loop """lcwa""" +525 43 evaluator """rankbased""" +525 44 dataset """kinships""" +525 44 model """rescal""" +525 44 loss """crossentropy""" +525 44 regularizer """no""" +525 44 optimizer """adadelta""" +525 44 training_loop """lcwa""" +525 44 evaluator """rankbased""" +525 45 dataset """kinships""" +525 45 model """rescal""" +525 45 loss """crossentropy""" +525 45 regularizer """no""" +525 45 optimizer """adadelta""" +525 45 training_loop """lcwa""" +525 45 evaluator """rankbased""" +525 46 dataset """kinships""" +525 46 model """rescal""" +525 46 loss """crossentropy""" +525 46 regularizer """no""" +525 46 optimizer """adadelta""" +525 46 training_loop """lcwa""" +525 46 evaluator """rankbased""" +525 47 dataset """kinships""" +525 47 model """rescal""" +525 47 loss """crossentropy""" +525 47 regularizer """no""" +525 47 optimizer """adadelta""" +525 47 training_loop """lcwa""" +525 47 evaluator """rankbased""" +525 48 dataset """kinships""" +525 48 model """rescal""" +525 48 loss """crossentropy""" +525 48 regularizer """no""" +525 48 optimizer """adadelta""" +525 48 training_loop """lcwa""" +525 48 evaluator """rankbased""" +525 49 dataset """kinships""" +525 49 model """rescal""" +525 49 loss """crossentropy""" +525 49 regularizer """no""" +525 49 optimizer """adadelta""" +525 49 training_loop """lcwa""" +525 49 evaluator """rankbased""" +525 50 dataset """kinships""" +525 50 model """rescal""" +525 50 loss """crossentropy""" +525 50 regularizer """no""" +525 50 optimizer """adadelta""" +525 50 training_loop """lcwa""" +525 50 evaluator """rankbased""" +525 51 dataset """kinships""" +525 51 model """rescal""" +525 51 loss """crossentropy""" +525 51 regularizer """no""" +525 51 optimizer """adadelta""" +525 51 training_loop """lcwa""" +525 51 evaluator """rankbased""" +525 52 dataset """kinships""" +525 52 model """rescal""" +525 52 loss """crossentropy""" +525 52 regularizer """no""" +525 52 optimizer """adadelta""" +525 52 training_loop """lcwa""" +525 52 evaluator """rankbased""" +525 53 dataset """kinships""" +525 53 model """rescal""" +525 53 loss """crossentropy""" +525 53 regularizer """no""" +525 53 optimizer """adadelta""" +525 53 training_loop """lcwa""" +525 53 evaluator """rankbased""" +525 54 dataset """kinships""" +525 54 model """rescal""" +525 54 loss """crossentropy""" +525 54 regularizer """no""" +525 54 optimizer """adadelta""" +525 54 training_loop """lcwa""" +525 54 evaluator """rankbased""" +525 55 dataset """kinships""" +525 55 model """rescal""" +525 55 loss """crossentropy""" +525 55 regularizer """no""" +525 55 optimizer """adadelta""" +525 55 training_loop """lcwa""" +525 55 evaluator """rankbased""" +525 56 dataset """kinships""" +525 56 model """rescal""" +525 56 loss """crossentropy""" +525 56 regularizer """no""" +525 56 optimizer """adadelta""" +525 56 training_loop """lcwa""" +525 56 evaluator """rankbased""" +525 57 dataset """kinships""" +525 57 model """rescal""" +525 57 loss """crossentropy""" +525 57 regularizer """no""" +525 57 optimizer """adadelta""" +525 57 training_loop """lcwa""" +525 57 evaluator """rankbased""" +525 58 dataset """kinships""" +525 58 model """rescal""" +525 58 loss """crossentropy""" +525 58 regularizer """no""" +525 58 optimizer """adadelta""" +525 58 training_loop """lcwa""" +525 58 evaluator """rankbased""" +525 59 dataset """kinships""" +525 59 model """rescal""" +525 59 loss """crossentropy""" +525 59 regularizer """no""" +525 59 optimizer """adadelta""" +525 59 training_loop """lcwa""" +525 59 evaluator """rankbased""" +525 60 dataset """kinships""" +525 60 model """rescal""" +525 60 loss """crossentropy""" +525 60 regularizer """no""" +525 60 optimizer """adadelta""" +525 60 training_loop """lcwa""" +525 60 evaluator """rankbased""" +525 61 dataset """kinships""" +525 61 model """rescal""" +525 61 loss """crossentropy""" +525 61 regularizer """no""" +525 61 optimizer """adadelta""" +525 61 training_loop """lcwa""" +525 61 evaluator """rankbased""" +525 62 dataset """kinships""" +525 62 model """rescal""" +525 62 loss """crossentropy""" +525 62 regularizer """no""" +525 62 optimizer """adadelta""" +525 62 training_loop """lcwa""" +525 62 evaluator """rankbased""" +525 63 dataset """kinships""" +525 63 model """rescal""" +525 63 loss """crossentropy""" +525 63 regularizer """no""" +525 63 optimizer """adadelta""" +525 63 training_loop """lcwa""" +525 63 evaluator """rankbased""" +525 64 dataset """kinships""" +525 64 model """rescal""" +525 64 loss """crossentropy""" +525 64 regularizer """no""" +525 64 optimizer """adadelta""" +525 64 training_loop """lcwa""" +525 64 evaluator """rankbased""" +525 65 dataset """kinships""" +525 65 model """rescal""" +525 65 loss """crossentropy""" +525 65 regularizer """no""" +525 65 optimizer """adadelta""" +525 65 training_loop """lcwa""" +525 65 evaluator """rankbased""" +525 66 dataset """kinships""" +525 66 model """rescal""" +525 66 loss """crossentropy""" +525 66 regularizer """no""" +525 66 optimizer """adadelta""" +525 66 training_loop """lcwa""" +525 66 evaluator """rankbased""" +525 67 dataset """kinships""" +525 67 model """rescal""" +525 67 loss """crossentropy""" +525 67 regularizer """no""" +525 67 optimizer """adadelta""" +525 67 training_loop """lcwa""" +525 67 evaluator """rankbased""" +525 68 dataset """kinships""" +525 68 model """rescal""" +525 68 loss """crossentropy""" +525 68 regularizer """no""" +525 68 optimizer """adadelta""" +525 68 training_loop """lcwa""" +525 68 evaluator """rankbased""" +525 69 dataset """kinships""" +525 69 model """rescal""" +525 69 loss """crossentropy""" +525 69 regularizer """no""" +525 69 optimizer """adadelta""" +525 69 training_loop """lcwa""" +525 69 evaluator """rankbased""" +525 70 dataset """kinships""" +525 70 model """rescal""" +525 70 loss """crossentropy""" +525 70 regularizer """no""" +525 70 optimizer """adadelta""" +525 70 training_loop """lcwa""" +525 70 evaluator """rankbased""" +525 71 dataset """kinships""" +525 71 model """rescal""" +525 71 loss """crossentropy""" +525 71 regularizer """no""" +525 71 optimizer """adadelta""" +525 71 training_loop """lcwa""" +525 71 evaluator """rankbased""" +525 72 dataset """kinships""" +525 72 model """rescal""" +525 72 loss """crossentropy""" +525 72 regularizer """no""" +525 72 optimizer """adadelta""" +525 72 training_loop """lcwa""" +525 72 evaluator """rankbased""" +525 73 dataset """kinships""" +525 73 model """rescal""" +525 73 loss """crossentropy""" +525 73 regularizer """no""" +525 73 optimizer """adadelta""" +525 73 training_loop """lcwa""" +525 73 evaluator """rankbased""" +525 74 dataset """kinships""" +525 74 model """rescal""" +525 74 loss """crossentropy""" +525 74 regularizer """no""" +525 74 optimizer """adadelta""" +525 74 training_loop """lcwa""" +525 74 evaluator """rankbased""" +525 75 dataset """kinships""" +525 75 model """rescal""" +525 75 loss """crossentropy""" +525 75 regularizer """no""" +525 75 optimizer """adadelta""" +525 75 training_loop """lcwa""" +525 75 evaluator """rankbased""" +525 76 dataset """kinships""" +525 76 model """rescal""" +525 76 loss """crossentropy""" +525 76 regularizer """no""" +525 76 optimizer """adadelta""" +525 76 training_loop """lcwa""" +525 76 evaluator """rankbased""" +525 77 dataset """kinships""" +525 77 model """rescal""" +525 77 loss """crossentropy""" +525 77 regularizer """no""" +525 77 optimizer """adadelta""" +525 77 training_loop """lcwa""" +525 77 evaluator """rankbased""" +525 78 dataset """kinships""" +525 78 model """rescal""" +525 78 loss """crossentropy""" +525 78 regularizer """no""" +525 78 optimizer """adadelta""" +525 78 training_loop """lcwa""" +525 78 evaluator """rankbased""" +525 79 dataset """kinships""" +525 79 model """rescal""" +525 79 loss """crossentropy""" +525 79 regularizer """no""" +525 79 optimizer """adadelta""" +525 79 training_loop """lcwa""" +525 79 evaluator """rankbased""" +525 80 dataset """kinships""" +525 80 model """rescal""" +525 80 loss """crossentropy""" +525 80 regularizer """no""" +525 80 optimizer """adadelta""" +525 80 training_loop """lcwa""" +525 80 evaluator """rankbased""" +525 81 dataset """kinships""" +525 81 model """rescal""" +525 81 loss """crossentropy""" +525 81 regularizer """no""" +525 81 optimizer """adadelta""" +525 81 training_loop """lcwa""" +525 81 evaluator """rankbased""" +525 82 dataset """kinships""" +525 82 model """rescal""" +525 82 loss """crossentropy""" +525 82 regularizer """no""" +525 82 optimizer """adadelta""" +525 82 training_loop """lcwa""" +525 82 evaluator """rankbased""" +525 83 dataset """kinships""" +525 83 model """rescal""" +525 83 loss """crossentropy""" +525 83 regularizer """no""" +525 83 optimizer """adadelta""" +525 83 training_loop """lcwa""" +525 83 evaluator """rankbased""" +525 84 dataset """kinships""" +525 84 model """rescal""" +525 84 loss """crossentropy""" +525 84 regularizer """no""" +525 84 optimizer """adadelta""" +525 84 training_loop """lcwa""" +525 84 evaluator """rankbased""" +525 85 dataset """kinships""" +525 85 model """rescal""" +525 85 loss """crossentropy""" +525 85 regularizer """no""" +525 85 optimizer """adadelta""" +525 85 training_loop """lcwa""" +525 85 evaluator """rankbased""" +525 86 dataset """kinships""" +525 86 model """rescal""" +525 86 loss """crossentropy""" +525 86 regularizer """no""" +525 86 optimizer """adadelta""" +525 86 training_loop """lcwa""" +525 86 evaluator """rankbased""" +525 87 dataset """kinships""" +525 87 model """rescal""" +525 87 loss """crossentropy""" +525 87 regularizer """no""" +525 87 optimizer """adadelta""" +525 87 training_loop """lcwa""" +525 87 evaluator """rankbased""" +525 88 dataset """kinships""" +525 88 model """rescal""" +525 88 loss """crossentropy""" +525 88 regularizer """no""" +525 88 optimizer """adadelta""" +525 88 training_loop """lcwa""" +525 88 evaluator """rankbased""" +525 89 dataset """kinships""" +525 89 model """rescal""" +525 89 loss """crossentropy""" +525 89 regularizer """no""" +525 89 optimizer """adadelta""" +525 89 training_loop """lcwa""" +525 89 evaluator """rankbased""" +525 90 dataset """kinships""" +525 90 model """rescal""" +525 90 loss """crossentropy""" +525 90 regularizer """no""" +525 90 optimizer """adadelta""" +525 90 training_loop """lcwa""" +525 90 evaluator """rankbased""" +525 91 dataset """kinships""" +525 91 model """rescal""" +525 91 loss """crossentropy""" +525 91 regularizer """no""" +525 91 optimizer """adadelta""" +525 91 training_loop """lcwa""" +525 91 evaluator """rankbased""" +525 92 dataset """kinships""" +525 92 model """rescal""" +525 92 loss """crossentropy""" +525 92 regularizer """no""" +525 92 optimizer """adadelta""" +525 92 training_loop """lcwa""" +525 92 evaluator """rankbased""" +525 93 dataset """kinships""" +525 93 model """rescal""" +525 93 loss """crossentropy""" +525 93 regularizer """no""" +525 93 optimizer """adadelta""" +525 93 training_loop """lcwa""" +525 93 evaluator """rankbased""" +525 94 dataset """kinships""" +525 94 model """rescal""" +525 94 loss """crossentropy""" +525 94 regularizer """no""" +525 94 optimizer """adadelta""" +525 94 training_loop """lcwa""" +525 94 evaluator """rankbased""" +525 95 dataset """kinships""" +525 95 model """rescal""" +525 95 loss """crossentropy""" +525 95 regularizer """no""" +525 95 optimizer """adadelta""" +525 95 training_loop """lcwa""" +525 95 evaluator """rankbased""" +525 96 dataset """kinships""" +525 96 model """rescal""" +525 96 loss """crossentropy""" +525 96 regularizer """no""" +525 96 optimizer """adadelta""" +525 96 training_loop """lcwa""" +525 96 evaluator """rankbased""" +525 97 dataset """kinships""" +525 97 model """rescal""" +525 97 loss """crossentropy""" +525 97 regularizer """no""" +525 97 optimizer """adadelta""" +525 97 training_loop """lcwa""" +525 97 evaluator """rankbased""" +525 98 dataset """kinships""" +525 98 model """rescal""" +525 98 loss """crossentropy""" +525 98 regularizer """no""" +525 98 optimizer """adadelta""" +525 98 training_loop """lcwa""" +525 98 evaluator """rankbased""" +525 99 dataset """kinships""" +525 99 model """rescal""" +525 99 loss """crossentropy""" +525 99 regularizer """no""" +525 99 optimizer """adadelta""" +525 99 training_loop """lcwa""" +525 99 evaluator """rankbased""" +525 100 dataset """kinships""" +525 100 model """rescal""" +525 100 loss """crossentropy""" +525 100 regularizer """no""" +525 100 optimizer """adadelta""" +525 100 training_loop """lcwa""" +525 100 evaluator """rankbased""" +526 1 model.embedding_dim 0.0 +526 1 training.batch_size 0.0 +526 1 training.label_smoothing 0.2843699281238013 +526 2 model.embedding_dim 2.0 +526 2 training.batch_size 2.0 +526 2 training.label_smoothing 0.005299397936543122 +526 3 model.embedding_dim 1.0 +526 3 training.batch_size 0.0 +526 3 training.label_smoothing 0.00704431978862956 +526 4 model.embedding_dim 1.0 +526 4 training.batch_size 1.0 +526 4 training.label_smoothing 0.14443413354012744 +526 5 model.embedding_dim 1.0 +526 5 training.batch_size 2.0 +526 5 training.label_smoothing 0.039028761338917244 +526 6 model.embedding_dim 0.0 +526 6 training.batch_size 1.0 +526 6 training.label_smoothing 0.05361656109226899 +526 7 model.embedding_dim 1.0 +526 7 training.batch_size 1.0 +526 7 training.label_smoothing 0.005510117764465802 +526 8 model.embedding_dim 1.0 +526 8 training.batch_size 0.0 +526 8 training.label_smoothing 0.006204606957278139 +526 9 model.embedding_dim 0.0 +526 9 training.batch_size 2.0 +526 9 training.label_smoothing 0.009432908919591629 +526 10 model.embedding_dim 0.0 +526 10 training.batch_size 0.0 +526 10 training.label_smoothing 0.11657447184416292 +526 11 model.embedding_dim 0.0 +526 11 training.batch_size 1.0 +526 11 training.label_smoothing 0.03532104633537296 +526 12 model.embedding_dim 2.0 +526 12 training.batch_size 1.0 +526 12 training.label_smoothing 0.005760940532885107 +526 13 model.embedding_dim 1.0 +526 13 training.batch_size 1.0 +526 13 training.label_smoothing 0.1700781554721873 +526 14 model.embedding_dim 1.0 +526 14 training.batch_size 0.0 +526 14 training.label_smoothing 0.02565281553845892 +526 15 model.embedding_dim 1.0 +526 15 training.batch_size 2.0 +526 15 training.label_smoothing 0.32597753567787624 +526 16 model.embedding_dim 0.0 +526 16 training.batch_size 1.0 +526 16 training.label_smoothing 0.021552279307623465 +526 17 model.embedding_dim 1.0 +526 17 training.batch_size 2.0 +526 17 training.label_smoothing 0.0011996217846392617 +526 18 model.embedding_dim 1.0 +526 18 training.batch_size 2.0 +526 18 training.label_smoothing 0.015358512229664462 +526 19 model.embedding_dim 1.0 +526 19 training.batch_size 1.0 +526 19 training.label_smoothing 0.10008484152690277 +526 20 model.embedding_dim 1.0 +526 20 training.batch_size 2.0 +526 20 training.label_smoothing 0.010740730351932211 +526 21 model.embedding_dim 0.0 +526 21 training.batch_size 2.0 +526 21 training.label_smoothing 0.16385992538056243 +526 22 model.embedding_dim 2.0 +526 22 training.batch_size 1.0 +526 22 training.label_smoothing 0.008157981063633638 +526 23 model.embedding_dim 2.0 +526 23 training.batch_size 0.0 +526 23 training.label_smoothing 0.19491090367126632 +526 24 model.embedding_dim 0.0 +526 24 training.batch_size 2.0 +526 24 training.label_smoothing 0.9752120706986858 +526 25 model.embedding_dim 2.0 +526 25 training.batch_size 1.0 +526 25 training.label_smoothing 0.09933482844263292 +526 26 model.embedding_dim 1.0 +526 26 training.batch_size 1.0 +526 26 training.label_smoothing 0.022514679645162468 +526 27 model.embedding_dim 1.0 +526 27 training.batch_size 0.0 +526 27 training.label_smoothing 0.0036956123243498742 +526 28 model.embedding_dim 1.0 +526 28 training.batch_size 0.0 +526 28 training.label_smoothing 0.23605523662305886 +526 29 model.embedding_dim 2.0 +526 29 training.batch_size 2.0 +526 29 training.label_smoothing 0.005831484997809468 +526 30 model.embedding_dim 1.0 +526 30 training.batch_size 2.0 +526 30 training.label_smoothing 0.005569451592631457 +526 31 model.embedding_dim 2.0 +526 31 training.batch_size 2.0 +526 31 training.label_smoothing 0.06899432551472895 +526 32 model.embedding_dim 1.0 +526 32 training.batch_size 2.0 +526 32 training.label_smoothing 0.08893241544965087 +526 33 model.embedding_dim 0.0 +526 33 training.batch_size 0.0 +526 33 training.label_smoothing 0.14122444321720323 +526 34 model.embedding_dim 1.0 +526 34 training.batch_size 0.0 +526 34 training.label_smoothing 0.0014582066460074612 +526 35 model.embedding_dim 2.0 +526 35 training.batch_size 1.0 +526 35 training.label_smoothing 0.004592390082791119 +526 36 model.embedding_dim 0.0 +526 36 training.batch_size 1.0 +526 36 training.label_smoothing 0.5466501729666357 +526 37 model.embedding_dim 1.0 +526 37 training.batch_size 1.0 +526 37 training.label_smoothing 0.1384487342913257 +526 38 model.embedding_dim 1.0 +526 38 training.batch_size 0.0 +526 38 training.label_smoothing 0.012223578619012231 +526 39 model.embedding_dim 1.0 +526 39 training.batch_size 1.0 +526 39 training.label_smoothing 0.023395885623050245 +526 40 model.embedding_dim 1.0 +526 40 training.batch_size 1.0 +526 40 training.label_smoothing 0.001317565580953397 +526 41 model.embedding_dim 2.0 +526 41 training.batch_size 0.0 +526 41 training.label_smoothing 0.0020218693623045942 +526 42 model.embedding_dim 1.0 +526 42 training.batch_size 2.0 +526 42 training.label_smoothing 0.005758132478885045 +526 43 model.embedding_dim 0.0 +526 43 training.batch_size 0.0 +526 43 training.label_smoothing 0.002094484960700816 +526 44 model.embedding_dim 0.0 +526 44 training.batch_size 0.0 +526 44 training.label_smoothing 0.6660691684413668 +526 45 model.embedding_dim 1.0 +526 45 training.batch_size 2.0 +526 45 training.label_smoothing 0.3070499659508204 +526 46 model.embedding_dim 1.0 +526 46 training.batch_size 1.0 +526 46 training.label_smoothing 0.7921479372480956 +526 47 model.embedding_dim 1.0 +526 47 training.batch_size 1.0 +526 47 training.label_smoothing 0.0052334601703775425 +526 48 model.embedding_dim 0.0 +526 48 training.batch_size 0.0 +526 48 training.label_smoothing 0.517258372951125 +526 49 model.embedding_dim 2.0 +526 49 training.batch_size 1.0 +526 49 training.label_smoothing 0.05936374820394808 +526 50 model.embedding_dim 0.0 +526 50 training.batch_size 1.0 +526 50 training.label_smoothing 0.029206205638397213 +526 51 model.embedding_dim 1.0 +526 51 training.batch_size 0.0 +526 51 training.label_smoothing 0.00518439353671199 +526 52 model.embedding_dim 2.0 +526 52 training.batch_size 2.0 +526 52 training.label_smoothing 0.005578120064525815 +526 53 model.embedding_dim 1.0 +526 53 training.batch_size 1.0 +526 53 training.label_smoothing 0.010340317419608539 +526 54 model.embedding_dim 2.0 +526 54 training.batch_size 1.0 +526 54 training.label_smoothing 0.33770848893231575 +526 55 model.embedding_dim 1.0 +526 55 training.batch_size 1.0 +526 55 training.label_smoothing 0.0012029829734807461 +526 56 model.embedding_dim 0.0 +526 56 training.batch_size 1.0 +526 56 training.label_smoothing 0.01155692473503683 +526 57 model.embedding_dim 0.0 +526 57 training.batch_size 2.0 +526 57 training.label_smoothing 0.1808771241833568 +526 58 model.embedding_dim 1.0 +526 58 training.batch_size 1.0 +526 58 training.label_smoothing 0.0072769800428614875 +526 59 model.embedding_dim 0.0 +526 59 training.batch_size 0.0 +526 59 training.label_smoothing 0.050205228035145794 +526 60 model.embedding_dim 2.0 +526 60 training.batch_size 0.0 +526 60 training.label_smoothing 0.03951351587764483 +526 61 model.embedding_dim 1.0 +526 61 training.batch_size 2.0 +526 61 training.label_smoothing 0.02381825814357849 +526 62 model.embedding_dim 1.0 +526 62 training.batch_size 2.0 +526 62 training.label_smoothing 0.0011530841215448885 +526 63 model.embedding_dim 1.0 +526 63 training.batch_size 2.0 +526 63 training.label_smoothing 0.14412806697789307 +526 64 model.embedding_dim 2.0 +526 64 training.batch_size 2.0 +526 64 training.label_smoothing 0.5764897882153854 +526 65 model.embedding_dim 1.0 +526 65 training.batch_size 0.0 +526 65 training.label_smoothing 0.304855699315953 +526 66 model.embedding_dim 2.0 +526 66 training.batch_size 1.0 +526 66 training.label_smoothing 0.8378641400455961 +526 67 model.embedding_dim 2.0 +526 67 training.batch_size 1.0 +526 67 training.label_smoothing 0.36269475147873637 +526 68 model.embedding_dim 2.0 +526 68 training.batch_size 2.0 +526 68 training.label_smoothing 0.07464430059089348 +526 69 model.embedding_dim 1.0 +526 69 training.batch_size 1.0 +526 69 training.label_smoothing 0.01991563502506204 +526 70 model.embedding_dim 1.0 +526 70 training.batch_size 2.0 +526 70 training.label_smoothing 0.06139392748593404 +526 71 model.embedding_dim 1.0 +526 71 training.batch_size 0.0 +526 71 training.label_smoothing 0.9809483656210023 +526 72 model.embedding_dim 1.0 +526 72 training.batch_size 2.0 +526 72 training.label_smoothing 0.6025359069950896 +526 73 model.embedding_dim 2.0 +526 73 training.batch_size 2.0 +526 73 training.label_smoothing 0.14799454107564589 +526 74 model.embedding_dim 1.0 +526 74 training.batch_size 2.0 +526 74 training.label_smoothing 0.0017939151223021093 +526 75 model.embedding_dim 1.0 +526 75 training.batch_size 2.0 +526 75 training.label_smoothing 0.10985511942803151 +526 76 model.embedding_dim 2.0 +526 76 training.batch_size 2.0 +526 76 training.label_smoothing 0.03669007258974266 +526 77 model.embedding_dim 1.0 +526 77 training.batch_size 2.0 +526 77 training.label_smoothing 0.3302938693658026 +526 78 model.embedding_dim 2.0 +526 78 training.batch_size 0.0 +526 78 training.label_smoothing 0.4703496103963278 +526 79 model.embedding_dim 0.0 +526 79 training.batch_size 1.0 +526 79 training.label_smoothing 0.009077218754018007 +526 80 model.embedding_dim 2.0 +526 80 training.batch_size 2.0 +526 80 training.label_smoothing 0.5816880750751932 +526 81 model.embedding_dim 2.0 +526 81 training.batch_size 0.0 +526 81 training.label_smoothing 0.1511508899737515 +526 82 model.embedding_dim 0.0 +526 82 training.batch_size 0.0 +526 82 training.label_smoothing 0.015114397108054933 +526 83 model.embedding_dim 1.0 +526 83 training.batch_size 1.0 +526 83 training.label_smoothing 0.0040530052963257185 +526 84 model.embedding_dim 0.0 +526 84 training.batch_size 2.0 +526 84 training.label_smoothing 0.0048174328253465385 +526 85 model.embedding_dim 0.0 +526 85 training.batch_size 2.0 +526 85 training.label_smoothing 0.4130456752507569 +526 86 model.embedding_dim 1.0 +526 86 training.batch_size 2.0 +526 86 training.label_smoothing 0.005567437029631226 +526 87 model.embedding_dim 0.0 +526 87 training.batch_size 2.0 +526 87 training.label_smoothing 0.14643363638896395 +526 88 model.embedding_dim 1.0 +526 88 training.batch_size 1.0 +526 88 training.label_smoothing 0.07851866607253685 +526 89 model.embedding_dim 2.0 +526 89 training.batch_size 1.0 +526 89 training.label_smoothing 0.47779486773275537 +526 90 model.embedding_dim 1.0 +526 90 training.batch_size 2.0 +526 90 training.label_smoothing 0.018674476868349315 +526 91 model.embedding_dim 0.0 +526 91 training.batch_size 2.0 +526 91 training.label_smoothing 0.0032135884488234787 +526 92 model.embedding_dim 1.0 +526 92 training.batch_size 2.0 +526 92 training.label_smoothing 0.0037009808582355776 +526 93 model.embedding_dim 1.0 +526 93 training.batch_size 2.0 +526 93 training.label_smoothing 0.547553727074118 +526 94 model.embedding_dim 2.0 +526 94 training.batch_size 2.0 +526 94 training.label_smoothing 0.01409984086961399 +526 95 model.embedding_dim 2.0 +526 95 training.batch_size 0.0 +526 95 training.label_smoothing 0.004011548361050195 +526 96 model.embedding_dim 1.0 +526 96 training.batch_size 2.0 +526 96 training.label_smoothing 0.8004698487462003 +526 97 model.embedding_dim 2.0 +526 97 training.batch_size 1.0 +526 97 training.label_smoothing 0.039533250113628876 +526 98 model.embedding_dim 0.0 +526 98 training.batch_size 0.0 +526 98 training.label_smoothing 0.004560027555595362 +526 99 model.embedding_dim 0.0 +526 99 training.batch_size 0.0 +526 99 training.label_smoothing 0.0011422776408004482 +526 100 model.embedding_dim 0.0 +526 100 training.batch_size 0.0 +526 100 training.label_smoothing 0.036857216766404466 +526 1 dataset """kinships""" +526 1 model """rescal""" +526 1 loss """crossentropy""" +526 1 regularizer """no""" +526 1 optimizer """adadelta""" +526 1 training_loop """lcwa""" +526 1 evaluator """rankbased""" +526 2 dataset """kinships""" +526 2 model """rescal""" +526 2 loss """crossentropy""" +526 2 regularizer """no""" +526 2 optimizer """adadelta""" +526 2 training_loop """lcwa""" +526 2 evaluator """rankbased""" +526 3 dataset """kinships""" +526 3 model """rescal""" +526 3 loss """crossentropy""" +526 3 regularizer """no""" +526 3 optimizer """adadelta""" +526 3 training_loop """lcwa""" +526 3 evaluator """rankbased""" +526 4 dataset """kinships""" +526 4 model """rescal""" +526 4 loss """crossentropy""" +526 4 regularizer """no""" +526 4 optimizer """adadelta""" +526 4 training_loop """lcwa""" +526 4 evaluator """rankbased""" +526 5 dataset """kinships""" +526 5 model """rescal""" +526 5 loss """crossentropy""" +526 5 regularizer """no""" +526 5 optimizer """adadelta""" +526 5 training_loop """lcwa""" +526 5 evaluator """rankbased""" +526 6 dataset """kinships""" +526 6 model """rescal""" +526 6 loss """crossentropy""" +526 6 regularizer """no""" +526 6 optimizer """adadelta""" +526 6 training_loop """lcwa""" +526 6 evaluator """rankbased""" +526 7 dataset """kinships""" +526 7 model """rescal""" +526 7 loss """crossentropy""" +526 7 regularizer """no""" +526 7 optimizer """adadelta""" +526 7 training_loop """lcwa""" +526 7 evaluator """rankbased""" +526 8 dataset """kinships""" +526 8 model """rescal""" +526 8 loss """crossentropy""" +526 8 regularizer """no""" +526 8 optimizer """adadelta""" +526 8 training_loop """lcwa""" +526 8 evaluator """rankbased""" +526 9 dataset """kinships""" +526 9 model """rescal""" +526 9 loss """crossentropy""" +526 9 regularizer """no""" +526 9 optimizer """adadelta""" +526 9 training_loop """lcwa""" +526 9 evaluator """rankbased""" +526 10 dataset """kinships""" +526 10 model """rescal""" +526 10 loss """crossentropy""" +526 10 regularizer """no""" +526 10 optimizer """adadelta""" +526 10 training_loop """lcwa""" +526 10 evaluator """rankbased""" +526 11 dataset """kinships""" +526 11 model """rescal""" +526 11 loss """crossentropy""" +526 11 regularizer """no""" +526 11 optimizer """adadelta""" +526 11 training_loop """lcwa""" +526 11 evaluator """rankbased""" +526 12 dataset """kinships""" +526 12 model """rescal""" +526 12 loss """crossentropy""" +526 12 regularizer """no""" +526 12 optimizer """adadelta""" +526 12 training_loop """lcwa""" +526 12 evaluator """rankbased""" +526 13 dataset """kinships""" +526 13 model """rescal""" +526 13 loss """crossentropy""" +526 13 regularizer """no""" +526 13 optimizer """adadelta""" +526 13 training_loop """lcwa""" +526 13 evaluator """rankbased""" +526 14 dataset """kinships""" +526 14 model """rescal""" +526 14 loss """crossentropy""" +526 14 regularizer """no""" +526 14 optimizer """adadelta""" +526 14 training_loop """lcwa""" +526 14 evaluator """rankbased""" +526 15 dataset """kinships""" +526 15 model """rescal""" +526 15 loss """crossentropy""" +526 15 regularizer """no""" +526 15 optimizer """adadelta""" +526 15 training_loop """lcwa""" +526 15 evaluator """rankbased""" +526 16 dataset """kinships""" +526 16 model """rescal""" +526 16 loss """crossentropy""" +526 16 regularizer """no""" +526 16 optimizer """adadelta""" +526 16 training_loop """lcwa""" +526 16 evaluator """rankbased""" +526 17 dataset """kinships""" +526 17 model """rescal""" +526 17 loss """crossentropy""" +526 17 regularizer """no""" +526 17 optimizer """adadelta""" +526 17 training_loop """lcwa""" +526 17 evaluator """rankbased""" +526 18 dataset """kinships""" +526 18 model """rescal""" +526 18 loss """crossentropy""" +526 18 regularizer """no""" +526 18 optimizer """adadelta""" +526 18 training_loop """lcwa""" +526 18 evaluator """rankbased""" +526 19 dataset """kinships""" +526 19 model """rescal""" +526 19 loss """crossentropy""" +526 19 regularizer """no""" +526 19 optimizer """adadelta""" +526 19 training_loop """lcwa""" +526 19 evaluator """rankbased""" +526 20 dataset """kinships""" +526 20 model """rescal""" +526 20 loss """crossentropy""" +526 20 regularizer """no""" +526 20 optimizer """adadelta""" +526 20 training_loop """lcwa""" +526 20 evaluator """rankbased""" +526 21 dataset """kinships""" +526 21 model """rescal""" +526 21 loss """crossentropy""" +526 21 regularizer """no""" +526 21 optimizer """adadelta""" +526 21 training_loop """lcwa""" +526 21 evaluator """rankbased""" +526 22 dataset """kinships""" +526 22 model """rescal""" +526 22 loss """crossentropy""" +526 22 regularizer """no""" +526 22 optimizer """adadelta""" +526 22 training_loop """lcwa""" +526 22 evaluator """rankbased""" +526 23 dataset """kinships""" +526 23 model """rescal""" +526 23 loss """crossentropy""" +526 23 regularizer """no""" +526 23 optimizer """adadelta""" +526 23 training_loop """lcwa""" +526 23 evaluator """rankbased""" +526 24 dataset """kinships""" +526 24 model """rescal""" +526 24 loss """crossentropy""" +526 24 regularizer """no""" +526 24 optimizer """adadelta""" +526 24 training_loop """lcwa""" +526 24 evaluator """rankbased""" +526 25 dataset """kinships""" +526 25 model """rescal""" +526 25 loss """crossentropy""" +526 25 regularizer """no""" +526 25 optimizer """adadelta""" +526 25 training_loop """lcwa""" +526 25 evaluator """rankbased""" +526 26 dataset """kinships""" +526 26 model """rescal""" +526 26 loss """crossentropy""" +526 26 regularizer """no""" +526 26 optimizer """adadelta""" +526 26 training_loop """lcwa""" +526 26 evaluator """rankbased""" +526 27 dataset """kinships""" +526 27 model """rescal""" +526 27 loss """crossentropy""" +526 27 regularizer """no""" +526 27 optimizer """adadelta""" +526 27 training_loop """lcwa""" +526 27 evaluator """rankbased""" +526 28 dataset """kinships""" +526 28 model """rescal""" +526 28 loss """crossentropy""" +526 28 regularizer """no""" +526 28 optimizer """adadelta""" +526 28 training_loop """lcwa""" +526 28 evaluator """rankbased""" +526 29 dataset """kinships""" +526 29 model """rescal""" +526 29 loss """crossentropy""" +526 29 regularizer """no""" +526 29 optimizer """adadelta""" +526 29 training_loop """lcwa""" +526 29 evaluator """rankbased""" +526 30 dataset """kinships""" +526 30 model """rescal""" +526 30 loss """crossentropy""" +526 30 regularizer """no""" +526 30 optimizer """adadelta""" +526 30 training_loop """lcwa""" +526 30 evaluator """rankbased""" +526 31 dataset """kinships""" +526 31 model """rescal""" +526 31 loss """crossentropy""" +526 31 regularizer """no""" +526 31 optimizer """adadelta""" +526 31 training_loop """lcwa""" +526 31 evaluator """rankbased""" +526 32 dataset """kinships""" +526 32 model """rescal""" +526 32 loss """crossentropy""" +526 32 regularizer """no""" +526 32 optimizer """adadelta""" +526 32 training_loop """lcwa""" +526 32 evaluator """rankbased""" +526 33 dataset """kinships""" +526 33 model """rescal""" +526 33 loss """crossentropy""" +526 33 regularizer """no""" +526 33 optimizer """adadelta""" +526 33 training_loop """lcwa""" +526 33 evaluator """rankbased""" +526 34 dataset """kinships""" +526 34 model """rescal""" +526 34 loss """crossentropy""" +526 34 regularizer """no""" +526 34 optimizer """adadelta""" +526 34 training_loop """lcwa""" +526 34 evaluator """rankbased""" +526 35 dataset """kinships""" +526 35 model """rescal""" +526 35 loss """crossentropy""" +526 35 regularizer """no""" +526 35 optimizer """adadelta""" +526 35 training_loop """lcwa""" +526 35 evaluator """rankbased""" +526 36 dataset """kinships""" +526 36 model """rescal""" +526 36 loss """crossentropy""" +526 36 regularizer """no""" +526 36 optimizer """adadelta""" +526 36 training_loop """lcwa""" +526 36 evaluator """rankbased""" +526 37 dataset """kinships""" +526 37 model """rescal""" +526 37 loss """crossentropy""" +526 37 regularizer """no""" +526 37 optimizer """adadelta""" +526 37 training_loop """lcwa""" +526 37 evaluator """rankbased""" +526 38 dataset """kinships""" +526 38 model """rescal""" +526 38 loss """crossentropy""" +526 38 regularizer """no""" +526 38 optimizer """adadelta""" +526 38 training_loop """lcwa""" +526 38 evaluator """rankbased""" +526 39 dataset """kinships""" +526 39 model """rescal""" +526 39 loss """crossentropy""" +526 39 regularizer """no""" +526 39 optimizer """adadelta""" +526 39 training_loop """lcwa""" +526 39 evaluator """rankbased""" +526 40 dataset """kinships""" +526 40 model """rescal""" +526 40 loss """crossentropy""" +526 40 regularizer """no""" +526 40 optimizer """adadelta""" +526 40 training_loop """lcwa""" +526 40 evaluator """rankbased""" +526 41 dataset """kinships""" +526 41 model """rescal""" +526 41 loss """crossentropy""" +526 41 regularizer """no""" +526 41 optimizer """adadelta""" +526 41 training_loop """lcwa""" +526 41 evaluator """rankbased""" +526 42 dataset """kinships""" +526 42 model """rescal""" +526 42 loss """crossentropy""" +526 42 regularizer """no""" +526 42 optimizer """adadelta""" +526 42 training_loop """lcwa""" +526 42 evaluator """rankbased""" +526 43 dataset """kinships""" +526 43 model """rescal""" +526 43 loss """crossentropy""" +526 43 regularizer """no""" +526 43 optimizer """adadelta""" +526 43 training_loop """lcwa""" +526 43 evaluator """rankbased""" +526 44 dataset """kinships""" +526 44 model """rescal""" +526 44 loss """crossentropy""" +526 44 regularizer """no""" +526 44 optimizer """adadelta""" +526 44 training_loop """lcwa""" +526 44 evaluator """rankbased""" +526 45 dataset """kinships""" +526 45 model """rescal""" +526 45 loss """crossentropy""" +526 45 regularizer """no""" +526 45 optimizer """adadelta""" +526 45 training_loop """lcwa""" +526 45 evaluator """rankbased""" +526 46 dataset """kinships""" +526 46 model """rescal""" +526 46 loss """crossentropy""" +526 46 regularizer """no""" +526 46 optimizer """adadelta""" +526 46 training_loop """lcwa""" +526 46 evaluator """rankbased""" +526 47 dataset """kinships""" +526 47 model """rescal""" +526 47 loss """crossentropy""" +526 47 regularizer """no""" +526 47 optimizer """adadelta""" +526 47 training_loop """lcwa""" +526 47 evaluator """rankbased""" +526 48 dataset """kinships""" +526 48 model """rescal""" +526 48 loss """crossentropy""" +526 48 regularizer """no""" +526 48 optimizer """adadelta""" +526 48 training_loop """lcwa""" +526 48 evaluator """rankbased""" +526 49 dataset """kinships""" +526 49 model """rescal""" +526 49 loss """crossentropy""" +526 49 regularizer """no""" +526 49 optimizer """adadelta""" +526 49 training_loop """lcwa""" +526 49 evaluator """rankbased""" +526 50 dataset """kinships""" +526 50 model """rescal""" +526 50 loss """crossentropy""" +526 50 regularizer """no""" +526 50 optimizer """adadelta""" +526 50 training_loop """lcwa""" +526 50 evaluator """rankbased""" +526 51 dataset """kinships""" +526 51 model """rescal""" +526 51 loss """crossentropy""" +526 51 regularizer """no""" +526 51 optimizer """adadelta""" +526 51 training_loop """lcwa""" +526 51 evaluator """rankbased""" +526 52 dataset """kinships""" +526 52 model """rescal""" +526 52 loss """crossentropy""" +526 52 regularizer """no""" +526 52 optimizer """adadelta""" +526 52 training_loop """lcwa""" +526 52 evaluator """rankbased""" +526 53 dataset """kinships""" +526 53 model """rescal""" +526 53 loss """crossentropy""" +526 53 regularizer """no""" +526 53 optimizer """adadelta""" +526 53 training_loop """lcwa""" +526 53 evaluator """rankbased""" +526 54 dataset """kinships""" +526 54 model """rescal""" +526 54 loss """crossentropy""" +526 54 regularizer """no""" +526 54 optimizer """adadelta""" +526 54 training_loop """lcwa""" +526 54 evaluator """rankbased""" +526 55 dataset """kinships""" +526 55 model """rescal""" +526 55 loss """crossentropy""" +526 55 regularizer """no""" +526 55 optimizer """adadelta""" +526 55 training_loop """lcwa""" +526 55 evaluator """rankbased""" +526 56 dataset """kinships""" +526 56 model """rescal""" +526 56 loss """crossentropy""" +526 56 regularizer """no""" +526 56 optimizer """adadelta""" +526 56 training_loop """lcwa""" +526 56 evaluator """rankbased""" +526 57 dataset """kinships""" +526 57 model """rescal""" +526 57 loss """crossentropy""" +526 57 regularizer """no""" +526 57 optimizer """adadelta""" +526 57 training_loop """lcwa""" +526 57 evaluator """rankbased""" +526 58 dataset """kinships""" +526 58 model """rescal""" +526 58 loss """crossentropy""" +526 58 regularizer """no""" +526 58 optimizer """adadelta""" +526 58 training_loop """lcwa""" +526 58 evaluator """rankbased""" +526 59 dataset """kinships""" +526 59 model """rescal""" +526 59 loss """crossentropy""" +526 59 regularizer """no""" +526 59 optimizer """adadelta""" +526 59 training_loop """lcwa""" +526 59 evaluator """rankbased""" +526 60 dataset """kinships""" +526 60 model """rescal""" +526 60 loss """crossentropy""" +526 60 regularizer """no""" +526 60 optimizer """adadelta""" +526 60 training_loop """lcwa""" +526 60 evaluator """rankbased""" +526 61 dataset """kinships""" +526 61 model """rescal""" +526 61 loss """crossentropy""" +526 61 regularizer """no""" +526 61 optimizer """adadelta""" +526 61 training_loop """lcwa""" +526 61 evaluator """rankbased""" +526 62 dataset """kinships""" +526 62 model """rescal""" +526 62 loss """crossentropy""" +526 62 regularizer """no""" +526 62 optimizer """adadelta""" +526 62 training_loop """lcwa""" +526 62 evaluator """rankbased""" +526 63 dataset """kinships""" +526 63 model """rescal""" +526 63 loss """crossentropy""" +526 63 regularizer """no""" +526 63 optimizer """adadelta""" +526 63 training_loop """lcwa""" +526 63 evaluator """rankbased""" +526 64 dataset """kinships""" +526 64 model """rescal""" +526 64 loss """crossentropy""" +526 64 regularizer """no""" +526 64 optimizer """adadelta""" +526 64 training_loop """lcwa""" +526 64 evaluator """rankbased""" +526 65 dataset """kinships""" +526 65 model """rescal""" +526 65 loss """crossentropy""" +526 65 regularizer """no""" +526 65 optimizer """adadelta""" +526 65 training_loop """lcwa""" +526 65 evaluator """rankbased""" +526 66 dataset """kinships""" +526 66 model """rescal""" +526 66 loss """crossentropy""" +526 66 regularizer """no""" +526 66 optimizer """adadelta""" +526 66 training_loop """lcwa""" +526 66 evaluator """rankbased""" +526 67 dataset """kinships""" +526 67 model """rescal""" +526 67 loss """crossentropy""" +526 67 regularizer """no""" +526 67 optimizer """adadelta""" +526 67 training_loop """lcwa""" +526 67 evaluator """rankbased""" +526 68 dataset """kinships""" +526 68 model """rescal""" +526 68 loss """crossentropy""" +526 68 regularizer """no""" +526 68 optimizer """adadelta""" +526 68 training_loop """lcwa""" +526 68 evaluator """rankbased""" +526 69 dataset """kinships""" +526 69 model """rescal""" +526 69 loss """crossentropy""" +526 69 regularizer """no""" +526 69 optimizer """adadelta""" +526 69 training_loop """lcwa""" +526 69 evaluator """rankbased""" +526 70 dataset """kinships""" +526 70 model """rescal""" +526 70 loss """crossentropy""" +526 70 regularizer """no""" +526 70 optimizer """adadelta""" +526 70 training_loop """lcwa""" +526 70 evaluator """rankbased""" +526 71 dataset """kinships""" +526 71 model """rescal""" +526 71 loss """crossentropy""" +526 71 regularizer """no""" +526 71 optimizer """adadelta""" +526 71 training_loop """lcwa""" +526 71 evaluator """rankbased""" +526 72 dataset """kinships""" +526 72 model """rescal""" +526 72 loss """crossentropy""" +526 72 regularizer """no""" +526 72 optimizer """adadelta""" +526 72 training_loop """lcwa""" +526 72 evaluator """rankbased""" +526 73 dataset """kinships""" +526 73 model """rescal""" +526 73 loss """crossentropy""" +526 73 regularizer """no""" +526 73 optimizer """adadelta""" +526 73 training_loop """lcwa""" +526 73 evaluator """rankbased""" +526 74 dataset """kinships""" +526 74 model """rescal""" +526 74 loss """crossentropy""" +526 74 regularizer """no""" +526 74 optimizer """adadelta""" +526 74 training_loop """lcwa""" +526 74 evaluator """rankbased""" +526 75 dataset """kinships""" +526 75 model """rescal""" +526 75 loss """crossentropy""" +526 75 regularizer """no""" +526 75 optimizer """adadelta""" +526 75 training_loop """lcwa""" +526 75 evaluator """rankbased""" +526 76 dataset """kinships""" +526 76 model """rescal""" +526 76 loss """crossentropy""" +526 76 regularizer """no""" +526 76 optimizer """adadelta""" +526 76 training_loop """lcwa""" +526 76 evaluator """rankbased""" +526 77 dataset """kinships""" +526 77 model """rescal""" +526 77 loss """crossentropy""" +526 77 regularizer """no""" +526 77 optimizer """adadelta""" +526 77 training_loop """lcwa""" +526 77 evaluator """rankbased""" +526 78 dataset """kinships""" +526 78 model """rescal""" +526 78 loss """crossentropy""" +526 78 regularizer """no""" +526 78 optimizer """adadelta""" +526 78 training_loop """lcwa""" +526 78 evaluator """rankbased""" +526 79 dataset """kinships""" +526 79 model """rescal""" +526 79 loss """crossentropy""" +526 79 regularizer """no""" +526 79 optimizer """adadelta""" +526 79 training_loop """lcwa""" +526 79 evaluator """rankbased""" +526 80 dataset """kinships""" +526 80 model """rescal""" +526 80 loss """crossentropy""" +526 80 regularizer """no""" +526 80 optimizer """adadelta""" +526 80 training_loop """lcwa""" +526 80 evaluator """rankbased""" +526 81 dataset """kinships""" +526 81 model """rescal""" +526 81 loss """crossentropy""" +526 81 regularizer """no""" +526 81 optimizer """adadelta""" +526 81 training_loop """lcwa""" +526 81 evaluator """rankbased""" +526 82 dataset """kinships""" +526 82 model """rescal""" +526 82 loss """crossentropy""" +526 82 regularizer """no""" +526 82 optimizer """adadelta""" +526 82 training_loop """lcwa""" +526 82 evaluator """rankbased""" +526 83 dataset """kinships""" +526 83 model """rescal""" +526 83 loss """crossentropy""" +526 83 regularizer """no""" +526 83 optimizer """adadelta""" +526 83 training_loop """lcwa""" +526 83 evaluator """rankbased""" +526 84 dataset """kinships""" +526 84 model """rescal""" +526 84 loss """crossentropy""" +526 84 regularizer """no""" +526 84 optimizer """adadelta""" +526 84 training_loop """lcwa""" +526 84 evaluator """rankbased""" +526 85 dataset """kinships""" +526 85 model """rescal""" +526 85 loss """crossentropy""" +526 85 regularizer """no""" +526 85 optimizer """adadelta""" +526 85 training_loop """lcwa""" +526 85 evaluator """rankbased""" +526 86 dataset """kinships""" +526 86 model """rescal""" +526 86 loss """crossentropy""" +526 86 regularizer """no""" +526 86 optimizer """adadelta""" +526 86 training_loop """lcwa""" +526 86 evaluator """rankbased""" +526 87 dataset """kinships""" +526 87 model """rescal""" +526 87 loss """crossentropy""" +526 87 regularizer """no""" +526 87 optimizer """adadelta""" +526 87 training_loop """lcwa""" +526 87 evaluator """rankbased""" +526 88 dataset """kinships""" +526 88 model """rescal""" +526 88 loss """crossentropy""" +526 88 regularizer """no""" +526 88 optimizer """adadelta""" +526 88 training_loop """lcwa""" +526 88 evaluator """rankbased""" +526 89 dataset """kinships""" +526 89 model """rescal""" +526 89 loss """crossentropy""" +526 89 regularizer """no""" +526 89 optimizer """adadelta""" +526 89 training_loop """lcwa""" +526 89 evaluator """rankbased""" +526 90 dataset """kinships""" +526 90 model """rescal""" +526 90 loss """crossentropy""" +526 90 regularizer """no""" +526 90 optimizer """adadelta""" +526 90 training_loop """lcwa""" +526 90 evaluator """rankbased""" +526 91 dataset """kinships""" +526 91 model """rescal""" +526 91 loss """crossentropy""" +526 91 regularizer """no""" +526 91 optimizer """adadelta""" +526 91 training_loop """lcwa""" +526 91 evaluator """rankbased""" +526 92 dataset """kinships""" +526 92 model """rescal""" +526 92 loss """crossentropy""" +526 92 regularizer """no""" +526 92 optimizer """adadelta""" +526 92 training_loop """lcwa""" +526 92 evaluator """rankbased""" +526 93 dataset """kinships""" +526 93 model """rescal""" +526 93 loss """crossentropy""" +526 93 regularizer """no""" +526 93 optimizer """adadelta""" +526 93 training_loop """lcwa""" +526 93 evaluator """rankbased""" +526 94 dataset """kinships""" +526 94 model """rescal""" +526 94 loss """crossentropy""" +526 94 regularizer """no""" +526 94 optimizer """adadelta""" +526 94 training_loop """lcwa""" +526 94 evaluator """rankbased""" +526 95 dataset """kinships""" +526 95 model """rescal""" +526 95 loss """crossentropy""" +526 95 regularizer """no""" +526 95 optimizer """adadelta""" +526 95 training_loop """lcwa""" +526 95 evaluator """rankbased""" +526 96 dataset """kinships""" +526 96 model """rescal""" +526 96 loss """crossentropy""" +526 96 regularizer """no""" +526 96 optimizer """adadelta""" +526 96 training_loop """lcwa""" +526 96 evaluator """rankbased""" +526 97 dataset """kinships""" +526 97 model """rescal""" +526 97 loss """crossentropy""" +526 97 regularizer """no""" +526 97 optimizer """adadelta""" +526 97 training_loop """lcwa""" +526 97 evaluator """rankbased""" +526 98 dataset """kinships""" +526 98 model """rescal""" +526 98 loss """crossentropy""" +526 98 regularizer """no""" +526 98 optimizer """adadelta""" +526 98 training_loop """lcwa""" +526 98 evaluator """rankbased""" +526 99 dataset """kinships""" +526 99 model """rescal""" +526 99 loss """crossentropy""" +526 99 regularizer """no""" +526 99 optimizer """adadelta""" +526 99 training_loop """lcwa""" +526 99 evaluator """rankbased""" +526 100 dataset """kinships""" +526 100 model """rescal""" +526 100 loss """crossentropy""" +526 100 regularizer """no""" +526 100 optimizer """adadelta""" +526 100 training_loop """lcwa""" +526 100 evaluator """rankbased""" +527 1 model.embedding_dim 0.0 +527 1 loss.margin 3.598115634698659 +527 1 negative_sampler.num_negs_per_pos 50.0 +527 1 training.batch_size 2.0 +527 2 model.embedding_dim 2.0 +527 2 loss.margin 5.248993057499536 +527 2 negative_sampler.num_negs_per_pos 55.0 +527 2 training.batch_size 1.0 +527 3 model.embedding_dim 1.0 +527 3 loss.margin 5.0668628734275405 +527 3 negative_sampler.num_negs_per_pos 85.0 +527 3 training.batch_size 2.0 +527 4 model.embedding_dim 1.0 +527 4 loss.margin 0.5555430712574966 +527 4 negative_sampler.num_negs_per_pos 91.0 +527 4 training.batch_size 0.0 +527 5 model.embedding_dim 0.0 +527 5 loss.margin 2.487936311014943 +527 5 negative_sampler.num_negs_per_pos 49.0 +527 5 training.batch_size 0.0 +527 6 model.embedding_dim 0.0 +527 6 loss.margin 5.569850333420147 +527 6 negative_sampler.num_negs_per_pos 16.0 +527 6 training.batch_size 2.0 +527 7 model.embedding_dim 2.0 +527 7 loss.margin 6.319905932567728 +527 7 negative_sampler.num_negs_per_pos 75.0 +527 7 training.batch_size 0.0 +527 8 model.embedding_dim 2.0 +527 8 loss.margin 2.690644807678808 +527 8 negative_sampler.num_negs_per_pos 65.0 +527 8 training.batch_size 1.0 +527 9 model.embedding_dim 0.0 +527 9 loss.margin 3.8652926023158463 +527 9 negative_sampler.num_negs_per_pos 9.0 +527 9 training.batch_size 2.0 +527 10 model.embedding_dim 0.0 +527 10 loss.margin 5.263798392894099 +527 10 negative_sampler.num_negs_per_pos 79.0 +527 10 training.batch_size 0.0 +527 11 model.embedding_dim 0.0 +527 11 loss.margin 9.687464636739476 +527 11 negative_sampler.num_negs_per_pos 73.0 +527 11 training.batch_size 0.0 +527 12 model.embedding_dim 0.0 +527 12 loss.margin 9.187234594529212 +527 12 negative_sampler.num_negs_per_pos 49.0 +527 12 training.batch_size 0.0 +527 13 model.embedding_dim 2.0 +527 13 loss.margin 4.900044393650344 +527 13 negative_sampler.num_negs_per_pos 66.0 +527 13 training.batch_size 0.0 +527 14 model.embedding_dim 0.0 +527 14 loss.margin 1.7323382120865336 +527 14 negative_sampler.num_negs_per_pos 29.0 +527 14 training.batch_size 0.0 +527 15 model.embedding_dim 2.0 +527 15 loss.margin 7.900441175265719 +527 15 negative_sampler.num_negs_per_pos 89.0 +527 15 training.batch_size 1.0 +527 16 model.embedding_dim 0.0 +527 16 loss.margin 1.1613723610515612 +527 16 negative_sampler.num_negs_per_pos 89.0 +527 16 training.batch_size 2.0 +527 17 model.embedding_dim 0.0 +527 17 loss.margin 0.6086422663223103 +527 17 negative_sampler.num_negs_per_pos 33.0 +527 17 training.batch_size 1.0 +527 18 model.embedding_dim 0.0 +527 18 loss.margin 2.6473764612300505 +527 18 negative_sampler.num_negs_per_pos 69.0 +527 18 training.batch_size 2.0 +527 19 model.embedding_dim 1.0 +527 19 loss.margin 9.052455995736855 +527 19 negative_sampler.num_negs_per_pos 39.0 +527 19 training.batch_size 1.0 +527 20 model.embedding_dim 1.0 +527 20 loss.margin 4.000553197375893 +527 20 negative_sampler.num_negs_per_pos 28.0 +527 20 training.batch_size 0.0 +527 21 model.embedding_dim 0.0 +527 21 loss.margin 4.627584180274056 +527 21 negative_sampler.num_negs_per_pos 91.0 +527 21 training.batch_size 2.0 +527 22 model.embedding_dim 0.0 +527 22 loss.margin 8.916172549809733 +527 22 negative_sampler.num_negs_per_pos 13.0 +527 22 training.batch_size 2.0 +527 23 model.embedding_dim 2.0 +527 23 loss.margin 8.2731694893023 +527 23 negative_sampler.num_negs_per_pos 36.0 +527 23 training.batch_size 1.0 +527 24 model.embedding_dim 2.0 +527 24 loss.margin 7.784223267121429 +527 24 negative_sampler.num_negs_per_pos 89.0 +527 24 training.batch_size 2.0 +527 25 model.embedding_dim 2.0 +527 25 loss.margin 5.131607839002026 +527 25 negative_sampler.num_negs_per_pos 49.0 +527 25 training.batch_size 2.0 +527 26 model.embedding_dim 1.0 +527 26 loss.margin 6.410419103120969 +527 26 negative_sampler.num_negs_per_pos 97.0 +527 26 training.batch_size 1.0 +527 27 model.embedding_dim 1.0 +527 27 loss.margin 0.678075131555785 +527 27 negative_sampler.num_negs_per_pos 15.0 +527 27 training.batch_size 0.0 +527 28 model.embedding_dim 1.0 +527 28 loss.margin 9.01859911426913 +527 28 negative_sampler.num_negs_per_pos 95.0 +527 28 training.batch_size 0.0 +527 29 model.embedding_dim 1.0 +527 29 loss.margin 6.865660734051174 +527 29 negative_sampler.num_negs_per_pos 68.0 +527 29 training.batch_size 1.0 +527 30 model.embedding_dim 0.0 +527 30 loss.margin 2.7687168322604476 +527 30 negative_sampler.num_negs_per_pos 91.0 +527 30 training.batch_size 1.0 +527 31 model.embedding_dim 2.0 +527 31 loss.margin 3.0039742536342526 +527 31 negative_sampler.num_negs_per_pos 79.0 +527 31 training.batch_size 0.0 +527 32 model.embedding_dim 1.0 +527 32 loss.margin 9.645630251103054 +527 32 negative_sampler.num_negs_per_pos 66.0 +527 32 training.batch_size 1.0 +527 33 model.embedding_dim 0.0 +527 33 loss.margin 8.339669258066184 +527 33 negative_sampler.num_negs_per_pos 37.0 +527 33 training.batch_size 1.0 +527 34 model.embedding_dim 0.0 +527 34 loss.margin 2.2892934700459415 +527 34 negative_sampler.num_negs_per_pos 59.0 +527 34 training.batch_size 2.0 +527 35 model.embedding_dim 1.0 +527 35 loss.margin 7.647318106539521 +527 35 negative_sampler.num_negs_per_pos 36.0 +527 35 training.batch_size 1.0 +527 36 model.embedding_dim 0.0 +527 36 loss.margin 5.943396856700103 +527 36 negative_sampler.num_negs_per_pos 85.0 +527 36 training.batch_size 1.0 +527 37 model.embedding_dim 2.0 +527 37 loss.margin 2.3397208016464788 +527 37 negative_sampler.num_negs_per_pos 76.0 +527 37 training.batch_size 1.0 +527 38 model.embedding_dim 0.0 +527 38 loss.margin 5.162726218606978 +527 38 negative_sampler.num_negs_per_pos 36.0 +527 38 training.batch_size 1.0 +527 39 model.embedding_dim 2.0 +527 39 loss.margin 5.45089753620701 +527 39 negative_sampler.num_negs_per_pos 65.0 +527 39 training.batch_size 2.0 +527 40 model.embedding_dim 2.0 +527 40 loss.margin 6.712268942454885 +527 40 negative_sampler.num_negs_per_pos 97.0 +527 40 training.batch_size 1.0 +527 41 model.embedding_dim 1.0 +527 41 loss.margin 6.80677947698586 +527 41 negative_sampler.num_negs_per_pos 27.0 +527 41 training.batch_size 0.0 +527 42 model.embedding_dim 2.0 +527 42 loss.margin 0.7167759651784864 +527 42 negative_sampler.num_negs_per_pos 85.0 +527 42 training.batch_size 1.0 +527 43 model.embedding_dim 0.0 +527 43 loss.margin 2.8981733567687162 +527 43 negative_sampler.num_negs_per_pos 2.0 +527 43 training.batch_size 1.0 +527 44 model.embedding_dim 1.0 +527 44 loss.margin 7.85990095678414 +527 44 negative_sampler.num_negs_per_pos 86.0 +527 44 training.batch_size 1.0 +527 45 model.embedding_dim 1.0 +527 45 loss.margin 2.791304610778844 +527 45 negative_sampler.num_negs_per_pos 53.0 +527 45 training.batch_size 2.0 +527 46 model.embedding_dim 1.0 +527 46 loss.margin 9.191536828887987 +527 46 negative_sampler.num_negs_per_pos 21.0 +527 46 training.batch_size 0.0 +527 47 model.embedding_dim 2.0 +527 47 loss.margin 2.2652718283374833 +527 47 negative_sampler.num_negs_per_pos 41.0 +527 47 training.batch_size 1.0 +527 48 model.embedding_dim 1.0 +527 48 loss.margin 8.626287561503432 +527 48 negative_sampler.num_negs_per_pos 92.0 +527 48 training.batch_size 2.0 +527 49 model.embedding_dim 1.0 +527 49 loss.margin 1.3868914862330413 +527 49 negative_sampler.num_negs_per_pos 75.0 +527 49 training.batch_size 0.0 +527 50 model.embedding_dim 2.0 +527 50 loss.margin 9.833727853533507 +527 50 negative_sampler.num_negs_per_pos 44.0 +527 50 training.batch_size 0.0 +527 51 model.embedding_dim 1.0 +527 51 loss.margin 4.285364990637335 +527 51 negative_sampler.num_negs_per_pos 81.0 +527 51 training.batch_size 1.0 +527 52 model.embedding_dim 0.0 +527 52 loss.margin 0.5168728433748773 +527 52 negative_sampler.num_negs_per_pos 40.0 +527 52 training.batch_size 1.0 +527 53 model.embedding_dim 1.0 +527 53 loss.margin 0.5662506622157257 +527 53 negative_sampler.num_negs_per_pos 19.0 +527 53 training.batch_size 0.0 +527 54 model.embedding_dim 0.0 +527 54 loss.margin 4.583764191465538 +527 54 negative_sampler.num_negs_per_pos 70.0 +527 54 training.batch_size 0.0 +527 55 model.embedding_dim 0.0 +527 55 loss.margin 1.6372763837352369 +527 55 negative_sampler.num_negs_per_pos 99.0 +527 55 training.batch_size 1.0 +527 56 model.embedding_dim 2.0 +527 56 loss.margin 2.636978521924817 +527 56 negative_sampler.num_negs_per_pos 28.0 +527 56 training.batch_size 2.0 +527 57 model.embedding_dim 1.0 +527 57 loss.margin 3.070254201831152 +527 57 negative_sampler.num_negs_per_pos 57.0 +527 57 training.batch_size 0.0 +527 58 model.embedding_dim 0.0 +527 58 loss.margin 5.407992348791575 +527 58 negative_sampler.num_negs_per_pos 21.0 +527 58 training.batch_size 2.0 +527 59 model.embedding_dim 0.0 +527 59 loss.margin 9.563165220049925 +527 59 negative_sampler.num_negs_per_pos 17.0 +527 59 training.batch_size 0.0 +527 60 model.embedding_dim 2.0 +527 60 loss.margin 7.54077798842406 +527 60 negative_sampler.num_negs_per_pos 81.0 +527 60 training.batch_size 2.0 +527 61 model.embedding_dim 1.0 +527 61 loss.margin 7.7857880438545 +527 61 negative_sampler.num_negs_per_pos 90.0 +527 61 training.batch_size 2.0 +527 62 model.embedding_dim 1.0 +527 62 loss.margin 0.6251959757238819 +527 62 negative_sampler.num_negs_per_pos 54.0 +527 62 training.batch_size 0.0 +527 63 model.embedding_dim 1.0 +527 63 loss.margin 6.954408213684449 +527 63 negative_sampler.num_negs_per_pos 75.0 +527 63 training.batch_size 0.0 +527 64 model.embedding_dim 1.0 +527 64 loss.margin 1.9699524269430109 +527 64 negative_sampler.num_negs_per_pos 64.0 +527 64 training.batch_size 0.0 +527 65 model.embedding_dim 1.0 +527 65 loss.margin 6.843510662895983 +527 65 negative_sampler.num_negs_per_pos 56.0 +527 65 training.batch_size 0.0 +527 66 model.embedding_dim 2.0 +527 66 loss.margin 1.8367999306731075 +527 66 negative_sampler.num_negs_per_pos 77.0 +527 66 training.batch_size 0.0 +527 67 model.embedding_dim 2.0 +527 67 loss.margin 1.8169730125271562 +527 67 negative_sampler.num_negs_per_pos 81.0 +527 67 training.batch_size 2.0 +527 68 model.embedding_dim 0.0 +527 68 loss.margin 9.084589150826453 +527 68 negative_sampler.num_negs_per_pos 13.0 +527 68 training.batch_size 2.0 +527 69 model.embedding_dim 0.0 +527 69 loss.margin 5.4372853781159876 +527 69 negative_sampler.num_negs_per_pos 81.0 +527 69 training.batch_size 2.0 +527 70 model.embedding_dim 0.0 +527 70 loss.margin 3.500121376055858 +527 70 negative_sampler.num_negs_per_pos 74.0 +527 70 training.batch_size 2.0 +527 71 model.embedding_dim 0.0 +527 71 loss.margin 1.3823209792478235 +527 71 negative_sampler.num_negs_per_pos 93.0 +527 71 training.batch_size 0.0 +527 72 model.embedding_dim 1.0 +527 72 loss.margin 1.6156778161125434 +527 72 negative_sampler.num_negs_per_pos 31.0 +527 72 training.batch_size 0.0 +527 73 model.embedding_dim 1.0 +527 73 loss.margin 3.4569631807788492 +527 73 negative_sampler.num_negs_per_pos 76.0 +527 73 training.batch_size 1.0 +527 74 model.embedding_dim 2.0 +527 74 loss.margin 4.719282027522408 +527 74 negative_sampler.num_negs_per_pos 27.0 +527 74 training.batch_size 0.0 +527 75 model.embedding_dim 1.0 +527 75 loss.margin 8.271965484598045 +527 75 negative_sampler.num_negs_per_pos 15.0 +527 75 training.batch_size 1.0 +527 76 model.embedding_dim 0.0 +527 76 loss.margin 7.395469608637531 +527 76 negative_sampler.num_negs_per_pos 46.0 +527 76 training.batch_size 2.0 +527 77 model.embedding_dim 0.0 +527 77 loss.margin 5.051704715067273 +527 77 negative_sampler.num_negs_per_pos 50.0 +527 77 training.batch_size 1.0 +527 78 model.embedding_dim 0.0 +527 78 loss.margin 7.878652267212783 +527 78 negative_sampler.num_negs_per_pos 94.0 +527 78 training.batch_size 1.0 +527 79 model.embedding_dim 1.0 +527 79 loss.margin 3.3176826781154847 +527 79 negative_sampler.num_negs_per_pos 43.0 +527 79 training.batch_size 0.0 +527 80 model.embedding_dim 0.0 +527 80 loss.margin 5.274783163877626 +527 80 negative_sampler.num_negs_per_pos 90.0 +527 80 training.batch_size 0.0 +527 81 model.embedding_dim 2.0 +527 81 loss.margin 0.9819200166203961 +527 81 negative_sampler.num_negs_per_pos 45.0 +527 81 training.batch_size 2.0 +527 82 model.embedding_dim 2.0 +527 82 loss.margin 5.431300639406753 +527 82 negative_sampler.num_negs_per_pos 2.0 +527 82 training.batch_size 2.0 +527 83 model.embedding_dim 0.0 +527 83 loss.margin 5.011152311043954 +527 83 negative_sampler.num_negs_per_pos 93.0 +527 83 training.batch_size 1.0 +527 84 model.embedding_dim 2.0 +527 84 loss.margin 8.8033482531348 +527 84 negative_sampler.num_negs_per_pos 34.0 +527 84 training.batch_size 2.0 +527 85 model.embedding_dim 2.0 +527 85 loss.margin 7.876539520157806 +527 85 negative_sampler.num_negs_per_pos 46.0 +527 85 training.batch_size 1.0 +527 86 model.embedding_dim 2.0 +527 86 loss.margin 9.693281150664916 +527 86 negative_sampler.num_negs_per_pos 28.0 +527 86 training.batch_size 1.0 +527 87 model.embedding_dim 2.0 +527 87 loss.margin 9.856093072910602 +527 87 negative_sampler.num_negs_per_pos 40.0 +527 87 training.batch_size 0.0 +527 88 model.embedding_dim 1.0 +527 88 loss.margin 1.3012367985573312 +527 88 negative_sampler.num_negs_per_pos 4.0 +527 88 training.batch_size 1.0 +527 89 model.embedding_dim 1.0 +527 89 loss.margin 3.6964825822081204 +527 89 negative_sampler.num_negs_per_pos 25.0 +527 89 training.batch_size 2.0 +527 90 model.embedding_dim 1.0 +527 90 loss.margin 9.229619564463414 +527 90 negative_sampler.num_negs_per_pos 19.0 +527 90 training.batch_size 0.0 +527 91 model.embedding_dim 1.0 +527 91 loss.margin 0.7238105879844304 +527 91 negative_sampler.num_negs_per_pos 29.0 +527 91 training.batch_size 0.0 +527 92 model.embedding_dim 1.0 +527 92 loss.margin 0.9210963901154652 +527 92 negative_sampler.num_negs_per_pos 4.0 +527 92 training.batch_size 1.0 +527 93 model.embedding_dim 0.0 +527 93 loss.margin 1.542640454371535 +527 93 negative_sampler.num_negs_per_pos 38.0 +527 93 training.batch_size 1.0 +527 94 model.embedding_dim 0.0 +527 94 loss.margin 8.166492312221923 +527 94 negative_sampler.num_negs_per_pos 31.0 +527 94 training.batch_size 0.0 +527 95 model.embedding_dim 0.0 +527 95 loss.margin 9.47640707199623 +527 95 negative_sampler.num_negs_per_pos 43.0 +527 95 training.batch_size 1.0 +527 96 model.embedding_dim 0.0 +527 96 loss.margin 7.262969362362281 +527 96 negative_sampler.num_negs_per_pos 54.0 +527 96 training.batch_size 2.0 +527 97 model.embedding_dim 0.0 +527 97 loss.margin 6.435449809015222 +527 97 negative_sampler.num_negs_per_pos 19.0 +527 97 training.batch_size 0.0 +527 98 model.embedding_dim 2.0 +527 98 loss.margin 8.020992317102833 +527 98 negative_sampler.num_negs_per_pos 18.0 +527 98 training.batch_size 1.0 +527 99 model.embedding_dim 2.0 +527 99 loss.margin 0.8471128216872781 +527 99 negative_sampler.num_negs_per_pos 21.0 +527 99 training.batch_size 0.0 +527 100 model.embedding_dim 2.0 +527 100 loss.margin 2.6161652412108687 +527 100 negative_sampler.num_negs_per_pos 71.0 +527 100 training.batch_size 1.0 +527 1 dataset """kinships""" +527 1 model """rescal""" +527 1 loss """marginranking""" +527 1 regularizer """no""" +527 1 optimizer """adadelta""" +527 1 training_loop """owa""" +527 1 negative_sampler """basic""" +527 1 evaluator """rankbased""" +527 2 dataset """kinships""" +527 2 model """rescal""" +527 2 loss """marginranking""" +527 2 regularizer """no""" +527 2 optimizer """adadelta""" +527 2 training_loop """owa""" +527 2 negative_sampler """basic""" +527 2 evaluator """rankbased""" +527 3 dataset """kinships""" +527 3 model """rescal""" +527 3 loss """marginranking""" +527 3 regularizer """no""" +527 3 optimizer """adadelta""" +527 3 training_loop """owa""" +527 3 negative_sampler """basic""" +527 3 evaluator """rankbased""" +527 4 dataset """kinships""" +527 4 model """rescal""" +527 4 loss """marginranking""" +527 4 regularizer """no""" +527 4 optimizer """adadelta""" +527 4 training_loop """owa""" +527 4 negative_sampler """basic""" +527 4 evaluator """rankbased""" +527 5 dataset """kinships""" +527 5 model """rescal""" +527 5 loss """marginranking""" +527 5 regularizer """no""" +527 5 optimizer """adadelta""" +527 5 training_loop """owa""" +527 5 negative_sampler """basic""" +527 5 evaluator """rankbased""" +527 6 dataset """kinships""" +527 6 model """rescal""" +527 6 loss """marginranking""" +527 6 regularizer """no""" +527 6 optimizer """adadelta""" +527 6 training_loop """owa""" +527 6 negative_sampler """basic""" +527 6 evaluator """rankbased""" +527 7 dataset """kinships""" +527 7 model """rescal""" +527 7 loss """marginranking""" +527 7 regularizer """no""" +527 7 optimizer """adadelta""" +527 7 training_loop """owa""" +527 7 negative_sampler """basic""" +527 7 evaluator """rankbased""" +527 8 dataset """kinships""" +527 8 model """rescal""" +527 8 loss """marginranking""" +527 8 regularizer """no""" +527 8 optimizer """adadelta""" +527 8 training_loop """owa""" +527 8 negative_sampler """basic""" +527 8 evaluator """rankbased""" +527 9 dataset """kinships""" +527 9 model """rescal""" +527 9 loss """marginranking""" +527 9 regularizer """no""" +527 9 optimizer """adadelta""" +527 9 training_loop """owa""" +527 9 negative_sampler """basic""" +527 9 evaluator """rankbased""" +527 10 dataset """kinships""" +527 10 model """rescal""" +527 10 loss """marginranking""" +527 10 regularizer """no""" +527 10 optimizer """adadelta""" +527 10 training_loop """owa""" +527 10 negative_sampler """basic""" +527 10 evaluator """rankbased""" +527 11 dataset """kinships""" +527 11 model """rescal""" +527 11 loss """marginranking""" +527 11 regularizer """no""" +527 11 optimizer """adadelta""" +527 11 training_loop """owa""" +527 11 negative_sampler """basic""" +527 11 evaluator """rankbased""" +527 12 dataset """kinships""" +527 12 model """rescal""" +527 12 loss """marginranking""" +527 12 regularizer """no""" +527 12 optimizer """adadelta""" +527 12 training_loop """owa""" +527 12 negative_sampler """basic""" +527 12 evaluator """rankbased""" +527 13 dataset """kinships""" +527 13 model """rescal""" +527 13 loss """marginranking""" +527 13 regularizer """no""" +527 13 optimizer """adadelta""" +527 13 training_loop """owa""" +527 13 negative_sampler """basic""" +527 13 evaluator """rankbased""" +527 14 dataset """kinships""" +527 14 model """rescal""" +527 14 loss """marginranking""" +527 14 regularizer """no""" +527 14 optimizer """adadelta""" +527 14 training_loop """owa""" +527 14 negative_sampler """basic""" +527 14 evaluator """rankbased""" +527 15 dataset """kinships""" +527 15 model """rescal""" +527 15 loss """marginranking""" +527 15 regularizer """no""" +527 15 optimizer """adadelta""" +527 15 training_loop """owa""" +527 15 negative_sampler """basic""" +527 15 evaluator """rankbased""" +527 16 dataset """kinships""" +527 16 model """rescal""" +527 16 loss """marginranking""" +527 16 regularizer """no""" +527 16 optimizer """adadelta""" +527 16 training_loop """owa""" +527 16 negative_sampler """basic""" +527 16 evaluator """rankbased""" +527 17 dataset """kinships""" +527 17 model """rescal""" +527 17 loss """marginranking""" +527 17 regularizer """no""" +527 17 optimizer """adadelta""" +527 17 training_loop """owa""" +527 17 negative_sampler """basic""" +527 17 evaluator """rankbased""" +527 18 dataset """kinships""" +527 18 model """rescal""" +527 18 loss """marginranking""" +527 18 regularizer """no""" +527 18 optimizer """adadelta""" +527 18 training_loop """owa""" +527 18 negative_sampler """basic""" +527 18 evaluator """rankbased""" +527 19 dataset """kinships""" +527 19 model """rescal""" +527 19 loss """marginranking""" +527 19 regularizer """no""" +527 19 optimizer """adadelta""" +527 19 training_loop """owa""" +527 19 negative_sampler """basic""" +527 19 evaluator """rankbased""" +527 20 dataset """kinships""" +527 20 model """rescal""" +527 20 loss """marginranking""" +527 20 regularizer """no""" +527 20 optimizer """adadelta""" +527 20 training_loop """owa""" +527 20 negative_sampler """basic""" +527 20 evaluator """rankbased""" +527 21 dataset """kinships""" +527 21 model """rescal""" +527 21 loss """marginranking""" +527 21 regularizer """no""" +527 21 optimizer """adadelta""" +527 21 training_loop """owa""" +527 21 negative_sampler """basic""" +527 21 evaluator """rankbased""" +527 22 dataset """kinships""" +527 22 model """rescal""" +527 22 loss """marginranking""" +527 22 regularizer """no""" +527 22 optimizer """adadelta""" +527 22 training_loop """owa""" +527 22 negative_sampler """basic""" +527 22 evaluator """rankbased""" +527 23 dataset """kinships""" +527 23 model """rescal""" +527 23 loss """marginranking""" +527 23 regularizer """no""" +527 23 optimizer """adadelta""" +527 23 training_loop """owa""" +527 23 negative_sampler """basic""" +527 23 evaluator """rankbased""" +527 24 dataset """kinships""" +527 24 model """rescal""" +527 24 loss """marginranking""" +527 24 regularizer """no""" +527 24 optimizer """adadelta""" +527 24 training_loop """owa""" +527 24 negative_sampler """basic""" +527 24 evaluator """rankbased""" +527 25 dataset """kinships""" +527 25 model """rescal""" +527 25 loss """marginranking""" +527 25 regularizer """no""" +527 25 optimizer """adadelta""" +527 25 training_loop """owa""" +527 25 negative_sampler """basic""" +527 25 evaluator """rankbased""" +527 26 dataset """kinships""" +527 26 model """rescal""" +527 26 loss """marginranking""" +527 26 regularizer """no""" +527 26 optimizer """adadelta""" +527 26 training_loop """owa""" +527 26 negative_sampler """basic""" +527 26 evaluator """rankbased""" +527 27 dataset """kinships""" +527 27 model """rescal""" +527 27 loss """marginranking""" +527 27 regularizer """no""" +527 27 optimizer """adadelta""" +527 27 training_loop """owa""" +527 27 negative_sampler """basic""" +527 27 evaluator """rankbased""" +527 28 dataset """kinships""" +527 28 model """rescal""" +527 28 loss """marginranking""" +527 28 regularizer """no""" +527 28 optimizer """adadelta""" +527 28 training_loop """owa""" +527 28 negative_sampler """basic""" +527 28 evaluator """rankbased""" +527 29 dataset """kinships""" +527 29 model """rescal""" +527 29 loss """marginranking""" +527 29 regularizer """no""" +527 29 optimizer """adadelta""" +527 29 training_loop """owa""" +527 29 negative_sampler """basic""" +527 29 evaluator """rankbased""" +527 30 dataset """kinships""" +527 30 model """rescal""" +527 30 loss """marginranking""" +527 30 regularizer """no""" +527 30 optimizer """adadelta""" +527 30 training_loop """owa""" +527 30 negative_sampler """basic""" +527 30 evaluator """rankbased""" +527 31 dataset """kinships""" +527 31 model """rescal""" +527 31 loss """marginranking""" +527 31 regularizer """no""" +527 31 optimizer """adadelta""" +527 31 training_loop """owa""" +527 31 negative_sampler """basic""" +527 31 evaluator """rankbased""" +527 32 dataset """kinships""" +527 32 model """rescal""" +527 32 loss """marginranking""" +527 32 regularizer """no""" +527 32 optimizer """adadelta""" +527 32 training_loop """owa""" +527 32 negative_sampler """basic""" +527 32 evaluator """rankbased""" +527 33 dataset """kinships""" +527 33 model """rescal""" +527 33 loss """marginranking""" +527 33 regularizer """no""" +527 33 optimizer """adadelta""" +527 33 training_loop """owa""" +527 33 negative_sampler """basic""" +527 33 evaluator """rankbased""" +527 34 dataset """kinships""" +527 34 model """rescal""" +527 34 loss """marginranking""" +527 34 regularizer """no""" +527 34 optimizer """adadelta""" +527 34 training_loop """owa""" +527 34 negative_sampler """basic""" +527 34 evaluator """rankbased""" +527 35 dataset """kinships""" +527 35 model """rescal""" +527 35 loss """marginranking""" +527 35 regularizer """no""" +527 35 optimizer """adadelta""" +527 35 training_loop """owa""" +527 35 negative_sampler """basic""" +527 35 evaluator """rankbased""" +527 36 dataset """kinships""" +527 36 model """rescal""" +527 36 loss """marginranking""" +527 36 regularizer """no""" +527 36 optimizer """adadelta""" +527 36 training_loop """owa""" +527 36 negative_sampler """basic""" +527 36 evaluator """rankbased""" +527 37 dataset """kinships""" +527 37 model """rescal""" +527 37 loss """marginranking""" +527 37 regularizer """no""" +527 37 optimizer """adadelta""" +527 37 training_loop """owa""" +527 37 negative_sampler """basic""" +527 37 evaluator """rankbased""" +527 38 dataset """kinships""" +527 38 model """rescal""" +527 38 loss """marginranking""" +527 38 regularizer """no""" +527 38 optimizer """adadelta""" +527 38 training_loop """owa""" +527 38 negative_sampler """basic""" +527 38 evaluator """rankbased""" +527 39 dataset """kinships""" +527 39 model """rescal""" +527 39 loss """marginranking""" +527 39 regularizer """no""" +527 39 optimizer """adadelta""" +527 39 training_loop """owa""" +527 39 negative_sampler """basic""" +527 39 evaluator """rankbased""" +527 40 dataset """kinships""" +527 40 model """rescal""" +527 40 loss """marginranking""" +527 40 regularizer """no""" +527 40 optimizer """adadelta""" +527 40 training_loop """owa""" +527 40 negative_sampler """basic""" +527 40 evaluator """rankbased""" +527 41 dataset """kinships""" +527 41 model """rescal""" +527 41 loss """marginranking""" +527 41 regularizer """no""" +527 41 optimizer """adadelta""" +527 41 training_loop """owa""" +527 41 negative_sampler """basic""" +527 41 evaluator """rankbased""" +527 42 dataset """kinships""" +527 42 model """rescal""" +527 42 loss """marginranking""" +527 42 regularizer """no""" +527 42 optimizer """adadelta""" +527 42 training_loop """owa""" +527 42 negative_sampler """basic""" +527 42 evaluator """rankbased""" +527 43 dataset """kinships""" +527 43 model """rescal""" +527 43 loss """marginranking""" +527 43 regularizer """no""" +527 43 optimizer """adadelta""" +527 43 training_loop """owa""" +527 43 negative_sampler """basic""" +527 43 evaluator """rankbased""" +527 44 dataset """kinships""" +527 44 model """rescal""" +527 44 loss """marginranking""" +527 44 regularizer """no""" +527 44 optimizer """adadelta""" +527 44 training_loop """owa""" +527 44 negative_sampler """basic""" +527 44 evaluator """rankbased""" +527 45 dataset """kinships""" +527 45 model """rescal""" +527 45 loss """marginranking""" +527 45 regularizer """no""" +527 45 optimizer """adadelta""" +527 45 training_loop """owa""" +527 45 negative_sampler """basic""" +527 45 evaluator """rankbased""" +527 46 dataset """kinships""" +527 46 model """rescal""" +527 46 loss """marginranking""" +527 46 regularizer """no""" +527 46 optimizer """adadelta""" +527 46 training_loop """owa""" +527 46 negative_sampler """basic""" +527 46 evaluator """rankbased""" +527 47 dataset """kinships""" +527 47 model """rescal""" +527 47 loss """marginranking""" +527 47 regularizer """no""" +527 47 optimizer """adadelta""" +527 47 training_loop """owa""" +527 47 negative_sampler """basic""" +527 47 evaluator """rankbased""" +527 48 dataset """kinships""" +527 48 model """rescal""" +527 48 loss """marginranking""" +527 48 regularizer """no""" +527 48 optimizer """adadelta""" +527 48 training_loop """owa""" +527 48 negative_sampler """basic""" +527 48 evaluator """rankbased""" +527 49 dataset """kinships""" +527 49 model """rescal""" +527 49 loss """marginranking""" +527 49 regularizer """no""" +527 49 optimizer """adadelta""" +527 49 training_loop """owa""" +527 49 negative_sampler """basic""" +527 49 evaluator """rankbased""" +527 50 dataset """kinships""" +527 50 model """rescal""" +527 50 loss """marginranking""" +527 50 regularizer """no""" +527 50 optimizer """adadelta""" +527 50 training_loop """owa""" +527 50 negative_sampler """basic""" +527 50 evaluator """rankbased""" +527 51 dataset """kinships""" +527 51 model """rescal""" +527 51 loss """marginranking""" +527 51 regularizer """no""" +527 51 optimizer """adadelta""" +527 51 training_loop """owa""" +527 51 negative_sampler """basic""" +527 51 evaluator """rankbased""" +527 52 dataset """kinships""" +527 52 model """rescal""" +527 52 loss """marginranking""" +527 52 regularizer """no""" +527 52 optimizer """adadelta""" +527 52 training_loop """owa""" +527 52 negative_sampler """basic""" +527 52 evaluator """rankbased""" +527 53 dataset """kinships""" +527 53 model """rescal""" +527 53 loss """marginranking""" +527 53 regularizer """no""" +527 53 optimizer """adadelta""" +527 53 training_loop """owa""" +527 53 negative_sampler """basic""" +527 53 evaluator """rankbased""" +527 54 dataset """kinships""" +527 54 model """rescal""" +527 54 loss """marginranking""" +527 54 regularizer """no""" +527 54 optimizer """adadelta""" +527 54 training_loop """owa""" +527 54 negative_sampler """basic""" +527 54 evaluator """rankbased""" +527 55 dataset """kinships""" +527 55 model """rescal""" +527 55 loss """marginranking""" +527 55 regularizer """no""" +527 55 optimizer """adadelta""" +527 55 training_loop """owa""" +527 55 negative_sampler """basic""" +527 55 evaluator """rankbased""" +527 56 dataset """kinships""" +527 56 model """rescal""" +527 56 loss """marginranking""" +527 56 regularizer """no""" +527 56 optimizer """adadelta""" +527 56 training_loop """owa""" +527 56 negative_sampler """basic""" +527 56 evaluator """rankbased""" +527 57 dataset """kinships""" +527 57 model """rescal""" +527 57 loss """marginranking""" +527 57 regularizer """no""" +527 57 optimizer """adadelta""" +527 57 training_loop """owa""" +527 57 negative_sampler """basic""" +527 57 evaluator """rankbased""" +527 58 dataset """kinships""" +527 58 model """rescal""" +527 58 loss """marginranking""" +527 58 regularizer """no""" +527 58 optimizer """adadelta""" +527 58 training_loop """owa""" +527 58 negative_sampler """basic""" +527 58 evaluator """rankbased""" +527 59 dataset """kinships""" +527 59 model """rescal""" +527 59 loss """marginranking""" +527 59 regularizer """no""" +527 59 optimizer """adadelta""" +527 59 training_loop """owa""" +527 59 negative_sampler """basic""" +527 59 evaluator """rankbased""" +527 60 dataset """kinships""" +527 60 model """rescal""" +527 60 loss """marginranking""" +527 60 regularizer """no""" +527 60 optimizer """adadelta""" +527 60 training_loop """owa""" +527 60 negative_sampler """basic""" +527 60 evaluator """rankbased""" +527 61 dataset """kinships""" +527 61 model """rescal""" +527 61 loss """marginranking""" +527 61 regularizer """no""" +527 61 optimizer """adadelta""" +527 61 training_loop """owa""" +527 61 negative_sampler """basic""" +527 61 evaluator """rankbased""" +527 62 dataset """kinships""" +527 62 model """rescal""" +527 62 loss """marginranking""" +527 62 regularizer """no""" +527 62 optimizer """adadelta""" +527 62 training_loop """owa""" +527 62 negative_sampler """basic""" +527 62 evaluator """rankbased""" +527 63 dataset """kinships""" +527 63 model """rescal""" +527 63 loss """marginranking""" +527 63 regularizer """no""" +527 63 optimizer """adadelta""" +527 63 training_loop """owa""" +527 63 negative_sampler """basic""" +527 63 evaluator """rankbased""" +527 64 dataset """kinships""" +527 64 model """rescal""" +527 64 loss """marginranking""" +527 64 regularizer """no""" +527 64 optimizer """adadelta""" +527 64 training_loop """owa""" +527 64 negative_sampler """basic""" +527 64 evaluator """rankbased""" +527 65 dataset """kinships""" +527 65 model """rescal""" +527 65 loss """marginranking""" +527 65 regularizer """no""" +527 65 optimizer """adadelta""" +527 65 training_loop """owa""" +527 65 negative_sampler """basic""" +527 65 evaluator """rankbased""" +527 66 dataset """kinships""" +527 66 model """rescal""" +527 66 loss """marginranking""" +527 66 regularizer """no""" +527 66 optimizer """adadelta""" +527 66 training_loop """owa""" +527 66 negative_sampler """basic""" +527 66 evaluator """rankbased""" +527 67 dataset """kinships""" +527 67 model """rescal""" +527 67 loss """marginranking""" +527 67 regularizer """no""" +527 67 optimizer """adadelta""" +527 67 training_loop """owa""" +527 67 negative_sampler """basic""" +527 67 evaluator """rankbased""" +527 68 dataset """kinships""" +527 68 model """rescal""" +527 68 loss """marginranking""" +527 68 regularizer """no""" +527 68 optimizer """adadelta""" +527 68 training_loop """owa""" +527 68 negative_sampler """basic""" +527 68 evaluator """rankbased""" +527 69 dataset """kinships""" +527 69 model """rescal""" +527 69 loss """marginranking""" +527 69 regularizer """no""" +527 69 optimizer """adadelta""" +527 69 training_loop """owa""" +527 69 negative_sampler """basic""" +527 69 evaluator """rankbased""" +527 70 dataset """kinships""" +527 70 model """rescal""" +527 70 loss """marginranking""" +527 70 regularizer """no""" +527 70 optimizer """adadelta""" +527 70 training_loop """owa""" +527 70 negative_sampler """basic""" +527 70 evaluator """rankbased""" +527 71 dataset """kinships""" +527 71 model """rescal""" +527 71 loss """marginranking""" +527 71 regularizer """no""" +527 71 optimizer """adadelta""" +527 71 training_loop """owa""" +527 71 negative_sampler """basic""" +527 71 evaluator """rankbased""" +527 72 dataset """kinships""" +527 72 model """rescal""" +527 72 loss """marginranking""" +527 72 regularizer """no""" +527 72 optimizer """adadelta""" +527 72 training_loop """owa""" +527 72 negative_sampler """basic""" +527 72 evaluator """rankbased""" +527 73 dataset """kinships""" +527 73 model """rescal""" +527 73 loss """marginranking""" +527 73 regularizer """no""" +527 73 optimizer """adadelta""" +527 73 training_loop """owa""" +527 73 negative_sampler """basic""" +527 73 evaluator """rankbased""" +527 74 dataset """kinships""" +527 74 model """rescal""" +527 74 loss """marginranking""" +527 74 regularizer """no""" +527 74 optimizer """adadelta""" +527 74 training_loop """owa""" +527 74 negative_sampler """basic""" +527 74 evaluator """rankbased""" +527 75 dataset """kinships""" +527 75 model """rescal""" +527 75 loss """marginranking""" +527 75 regularizer """no""" +527 75 optimizer """adadelta""" +527 75 training_loop """owa""" +527 75 negative_sampler """basic""" +527 75 evaluator """rankbased""" +527 76 dataset """kinships""" +527 76 model """rescal""" +527 76 loss """marginranking""" +527 76 regularizer """no""" +527 76 optimizer """adadelta""" +527 76 training_loop """owa""" +527 76 negative_sampler """basic""" +527 76 evaluator """rankbased""" +527 77 dataset """kinships""" +527 77 model """rescal""" +527 77 loss """marginranking""" +527 77 regularizer """no""" +527 77 optimizer """adadelta""" +527 77 training_loop """owa""" +527 77 negative_sampler """basic""" +527 77 evaluator """rankbased""" +527 78 dataset """kinships""" +527 78 model """rescal""" +527 78 loss """marginranking""" +527 78 regularizer """no""" +527 78 optimizer """adadelta""" +527 78 training_loop """owa""" +527 78 negative_sampler """basic""" +527 78 evaluator """rankbased""" +527 79 dataset """kinships""" +527 79 model """rescal""" +527 79 loss """marginranking""" +527 79 regularizer """no""" +527 79 optimizer """adadelta""" +527 79 training_loop """owa""" +527 79 negative_sampler """basic""" +527 79 evaluator """rankbased""" +527 80 dataset """kinships""" +527 80 model """rescal""" +527 80 loss """marginranking""" +527 80 regularizer """no""" +527 80 optimizer """adadelta""" +527 80 training_loop """owa""" +527 80 negative_sampler """basic""" +527 80 evaluator """rankbased""" +527 81 dataset """kinships""" +527 81 model """rescal""" +527 81 loss """marginranking""" +527 81 regularizer """no""" +527 81 optimizer """adadelta""" +527 81 training_loop """owa""" +527 81 negative_sampler """basic""" +527 81 evaluator """rankbased""" +527 82 dataset """kinships""" +527 82 model """rescal""" +527 82 loss """marginranking""" +527 82 regularizer """no""" +527 82 optimizer """adadelta""" +527 82 training_loop """owa""" +527 82 negative_sampler """basic""" +527 82 evaluator """rankbased""" +527 83 dataset """kinships""" +527 83 model """rescal""" +527 83 loss """marginranking""" +527 83 regularizer """no""" +527 83 optimizer """adadelta""" +527 83 training_loop """owa""" +527 83 negative_sampler """basic""" +527 83 evaluator """rankbased""" +527 84 dataset """kinships""" +527 84 model """rescal""" +527 84 loss """marginranking""" +527 84 regularizer """no""" +527 84 optimizer """adadelta""" +527 84 training_loop """owa""" +527 84 negative_sampler """basic""" +527 84 evaluator """rankbased""" +527 85 dataset """kinships""" +527 85 model """rescal""" +527 85 loss """marginranking""" +527 85 regularizer """no""" +527 85 optimizer """adadelta""" +527 85 training_loop """owa""" +527 85 negative_sampler """basic""" +527 85 evaluator """rankbased""" +527 86 dataset """kinships""" +527 86 model """rescal""" +527 86 loss """marginranking""" +527 86 regularizer """no""" +527 86 optimizer """adadelta""" +527 86 training_loop """owa""" +527 86 negative_sampler """basic""" +527 86 evaluator """rankbased""" +527 87 dataset """kinships""" +527 87 model """rescal""" +527 87 loss """marginranking""" +527 87 regularizer """no""" +527 87 optimizer """adadelta""" +527 87 training_loop """owa""" +527 87 negative_sampler """basic""" +527 87 evaluator """rankbased""" +527 88 dataset """kinships""" +527 88 model """rescal""" +527 88 loss """marginranking""" +527 88 regularizer """no""" +527 88 optimizer """adadelta""" +527 88 training_loop """owa""" +527 88 negative_sampler """basic""" +527 88 evaluator """rankbased""" +527 89 dataset """kinships""" +527 89 model """rescal""" +527 89 loss """marginranking""" +527 89 regularizer """no""" +527 89 optimizer """adadelta""" +527 89 training_loop """owa""" +527 89 negative_sampler """basic""" +527 89 evaluator """rankbased""" +527 90 dataset """kinships""" +527 90 model """rescal""" +527 90 loss """marginranking""" +527 90 regularizer """no""" +527 90 optimizer """adadelta""" +527 90 training_loop """owa""" +527 90 negative_sampler """basic""" +527 90 evaluator """rankbased""" +527 91 dataset """kinships""" +527 91 model """rescal""" +527 91 loss """marginranking""" +527 91 regularizer """no""" +527 91 optimizer """adadelta""" +527 91 training_loop """owa""" +527 91 negative_sampler """basic""" +527 91 evaluator """rankbased""" +527 92 dataset """kinships""" +527 92 model """rescal""" +527 92 loss """marginranking""" +527 92 regularizer """no""" +527 92 optimizer """adadelta""" +527 92 training_loop """owa""" +527 92 negative_sampler """basic""" +527 92 evaluator """rankbased""" +527 93 dataset """kinships""" +527 93 model """rescal""" +527 93 loss """marginranking""" +527 93 regularizer """no""" +527 93 optimizer """adadelta""" +527 93 training_loop """owa""" +527 93 negative_sampler """basic""" +527 93 evaluator """rankbased""" +527 94 dataset """kinships""" +527 94 model """rescal""" +527 94 loss """marginranking""" +527 94 regularizer """no""" +527 94 optimizer """adadelta""" +527 94 training_loop """owa""" +527 94 negative_sampler """basic""" +527 94 evaluator """rankbased""" +527 95 dataset """kinships""" +527 95 model """rescal""" +527 95 loss """marginranking""" +527 95 regularizer """no""" +527 95 optimizer """adadelta""" +527 95 training_loop """owa""" +527 95 negative_sampler """basic""" +527 95 evaluator """rankbased""" +527 96 dataset """kinships""" +527 96 model """rescal""" +527 96 loss """marginranking""" +527 96 regularizer """no""" +527 96 optimizer """adadelta""" +527 96 training_loop """owa""" +527 96 negative_sampler """basic""" +527 96 evaluator """rankbased""" +527 97 dataset """kinships""" +527 97 model """rescal""" +527 97 loss """marginranking""" +527 97 regularizer """no""" +527 97 optimizer """adadelta""" +527 97 training_loop """owa""" +527 97 negative_sampler """basic""" +527 97 evaluator """rankbased""" +527 98 dataset """kinships""" +527 98 model """rescal""" +527 98 loss """marginranking""" +527 98 regularizer """no""" +527 98 optimizer """adadelta""" +527 98 training_loop """owa""" +527 98 negative_sampler """basic""" +527 98 evaluator """rankbased""" +527 99 dataset """kinships""" +527 99 model """rescal""" +527 99 loss """marginranking""" +527 99 regularizer """no""" +527 99 optimizer """adadelta""" +527 99 training_loop """owa""" +527 99 negative_sampler """basic""" +527 99 evaluator """rankbased""" +527 100 dataset """kinships""" +527 100 model """rescal""" +527 100 loss """marginranking""" +527 100 regularizer """no""" +527 100 optimizer """adadelta""" +527 100 training_loop """owa""" +527 100 negative_sampler """basic""" +527 100 evaluator """rankbased""" +528 1 model.embedding_dim 0.0 +528 1 loss.margin 0.866120051303073 +528 1 negative_sampler.num_negs_per_pos 13.0 +528 1 training.batch_size 2.0 +528 2 model.embedding_dim 0.0 +528 2 loss.margin 5.210148730115793 +528 2 negative_sampler.num_negs_per_pos 44.0 +528 2 training.batch_size 0.0 +528 3 model.embedding_dim 0.0 +528 3 loss.margin 4.342659283661443 +528 3 negative_sampler.num_negs_per_pos 61.0 +528 3 training.batch_size 1.0 +528 4 model.embedding_dim 2.0 +528 4 loss.margin 8.507865236822978 +528 4 negative_sampler.num_negs_per_pos 50.0 +528 4 training.batch_size 1.0 +528 5 model.embedding_dim 2.0 +528 5 loss.margin 1.697810553054952 +528 5 negative_sampler.num_negs_per_pos 3.0 +528 5 training.batch_size 1.0 +528 6 model.embedding_dim 0.0 +528 6 loss.margin 9.596446993603232 +528 6 negative_sampler.num_negs_per_pos 72.0 +528 6 training.batch_size 0.0 +528 7 model.embedding_dim 2.0 +528 7 loss.margin 7.639775246831558 +528 7 negative_sampler.num_negs_per_pos 39.0 +528 7 training.batch_size 1.0 +528 8 model.embedding_dim 0.0 +528 8 loss.margin 0.8087818674064358 +528 8 negative_sampler.num_negs_per_pos 39.0 +528 8 training.batch_size 2.0 +528 9 model.embedding_dim 2.0 +528 9 loss.margin 3.9599381680290446 +528 9 negative_sampler.num_negs_per_pos 3.0 +528 9 training.batch_size 0.0 +528 10 model.embedding_dim 0.0 +528 10 loss.margin 8.542058668987265 +528 10 negative_sampler.num_negs_per_pos 31.0 +528 10 training.batch_size 1.0 +528 11 model.embedding_dim 2.0 +528 11 loss.margin 8.457849765693263 +528 11 negative_sampler.num_negs_per_pos 48.0 +528 11 training.batch_size 0.0 +528 12 model.embedding_dim 0.0 +528 12 loss.margin 5.418180958647093 +528 12 negative_sampler.num_negs_per_pos 23.0 +528 12 training.batch_size 1.0 +528 13 model.embedding_dim 2.0 +528 13 loss.margin 2.082733368483386 +528 13 negative_sampler.num_negs_per_pos 60.0 +528 13 training.batch_size 0.0 +528 14 model.embedding_dim 0.0 +528 14 loss.margin 5.58350401218653 +528 14 negative_sampler.num_negs_per_pos 98.0 +528 14 training.batch_size 0.0 +528 15 model.embedding_dim 1.0 +528 15 loss.margin 2.215228432976235 +528 15 negative_sampler.num_negs_per_pos 50.0 +528 15 training.batch_size 0.0 +528 16 model.embedding_dim 1.0 +528 16 loss.margin 5.215009344618496 +528 16 negative_sampler.num_negs_per_pos 79.0 +528 16 training.batch_size 0.0 +528 17 model.embedding_dim 2.0 +528 17 loss.margin 7.619749253348046 +528 17 negative_sampler.num_negs_per_pos 64.0 +528 17 training.batch_size 0.0 +528 18 model.embedding_dim 1.0 +528 18 loss.margin 1.4081301178817827 +528 18 negative_sampler.num_negs_per_pos 10.0 +528 18 training.batch_size 0.0 +528 19 model.embedding_dim 1.0 +528 19 loss.margin 8.210114161258364 +528 19 negative_sampler.num_negs_per_pos 30.0 +528 19 training.batch_size 2.0 +528 20 model.embedding_dim 1.0 +528 20 loss.margin 8.881095146629983 +528 20 negative_sampler.num_negs_per_pos 23.0 +528 20 training.batch_size 1.0 +528 21 model.embedding_dim 2.0 +528 21 loss.margin 6.056524065581468 +528 21 negative_sampler.num_negs_per_pos 53.0 +528 21 training.batch_size 1.0 +528 22 model.embedding_dim 0.0 +528 22 loss.margin 4.982496513468654 +528 22 negative_sampler.num_negs_per_pos 14.0 +528 22 training.batch_size 2.0 +528 23 model.embedding_dim 1.0 +528 23 loss.margin 1.923604669969552 +528 23 negative_sampler.num_negs_per_pos 7.0 +528 23 training.batch_size 2.0 +528 24 model.embedding_dim 2.0 +528 24 loss.margin 1.6543731333541194 +528 24 negative_sampler.num_negs_per_pos 62.0 +528 24 training.batch_size 0.0 +528 25 model.embedding_dim 0.0 +528 25 loss.margin 9.108928997030686 +528 25 negative_sampler.num_negs_per_pos 1.0 +528 25 training.batch_size 2.0 +528 26 model.embedding_dim 2.0 +528 26 loss.margin 2.1424628952411053 +528 26 negative_sampler.num_negs_per_pos 64.0 +528 26 training.batch_size 2.0 +528 27 model.embedding_dim 0.0 +528 27 loss.margin 3.570139081807364 +528 27 negative_sampler.num_negs_per_pos 51.0 +528 27 training.batch_size 1.0 +528 28 model.embedding_dim 1.0 +528 28 loss.margin 4.390617405233903 +528 28 negative_sampler.num_negs_per_pos 86.0 +528 28 training.batch_size 1.0 +528 29 model.embedding_dim 0.0 +528 29 loss.margin 9.430424265667043 +528 29 negative_sampler.num_negs_per_pos 78.0 +528 29 training.batch_size 0.0 +528 30 model.embedding_dim 0.0 +528 30 loss.margin 7.568169451450065 +528 30 negative_sampler.num_negs_per_pos 64.0 +528 30 training.batch_size 2.0 +528 31 model.embedding_dim 0.0 +528 31 loss.margin 4.4199064864625575 +528 31 negative_sampler.num_negs_per_pos 60.0 +528 31 training.batch_size 2.0 +528 32 model.embedding_dim 0.0 +528 32 loss.margin 5.59082261012544 +528 32 negative_sampler.num_negs_per_pos 14.0 +528 32 training.batch_size 1.0 +528 33 model.embedding_dim 0.0 +528 33 loss.margin 5.91431964813214 +528 33 negative_sampler.num_negs_per_pos 56.0 +528 33 training.batch_size 2.0 +528 34 model.embedding_dim 2.0 +528 34 loss.margin 9.845435413740177 +528 34 negative_sampler.num_negs_per_pos 46.0 +528 34 training.batch_size 0.0 +528 35 model.embedding_dim 2.0 +528 35 loss.margin 3.609499543271551 +528 35 negative_sampler.num_negs_per_pos 57.0 +528 35 training.batch_size 0.0 +528 36 model.embedding_dim 0.0 +528 36 loss.margin 2.18821259944193 +528 36 negative_sampler.num_negs_per_pos 98.0 +528 36 training.batch_size 1.0 +528 37 model.embedding_dim 2.0 +528 37 loss.margin 3.266742674742234 +528 37 negative_sampler.num_negs_per_pos 4.0 +528 37 training.batch_size 0.0 +528 38 model.embedding_dim 2.0 +528 38 loss.margin 6.712931319897956 +528 38 negative_sampler.num_negs_per_pos 39.0 +528 38 training.batch_size 2.0 +528 39 model.embedding_dim 2.0 +528 39 loss.margin 5.521861181288049 +528 39 negative_sampler.num_negs_per_pos 1.0 +528 39 training.batch_size 2.0 +528 40 model.embedding_dim 0.0 +528 40 loss.margin 4.6623642780158105 +528 40 negative_sampler.num_negs_per_pos 87.0 +528 40 training.batch_size 1.0 +528 41 model.embedding_dim 1.0 +528 41 loss.margin 1.5535549844494234 +528 41 negative_sampler.num_negs_per_pos 23.0 +528 41 training.batch_size 2.0 +528 42 model.embedding_dim 0.0 +528 42 loss.margin 5.360272507478726 +528 42 negative_sampler.num_negs_per_pos 44.0 +528 42 training.batch_size 0.0 +528 43 model.embedding_dim 0.0 +528 43 loss.margin 4.5807730188759415 +528 43 negative_sampler.num_negs_per_pos 92.0 +528 43 training.batch_size 2.0 +528 44 model.embedding_dim 0.0 +528 44 loss.margin 6.475856088485962 +528 44 negative_sampler.num_negs_per_pos 87.0 +528 44 training.batch_size 2.0 +528 45 model.embedding_dim 2.0 +528 45 loss.margin 3.6978699708704994 +528 45 negative_sampler.num_negs_per_pos 51.0 +528 45 training.batch_size 1.0 +528 46 model.embedding_dim 2.0 +528 46 loss.margin 5.668686225193145 +528 46 negative_sampler.num_negs_per_pos 83.0 +528 46 training.batch_size 0.0 +528 47 model.embedding_dim 2.0 +528 47 loss.margin 1.3212601690880519 +528 47 negative_sampler.num_negs_per_pos 31.0 +528 47 training.batch_size 2.0 +528 48 model.embedding_dim 2.0 +528 48 loss.margin 9.155744942000863 +528 48 negative_sampler.num_negs_per_pos 28.0 +528 48 training.batch_size 2.0 +528 49 model.embedding_dim 1.0 +528 49 loss.margin 0.7418753813087218 +528 49 negative_sampler.num_negs_per_pos 61.0 +528 49 training.batch_size 1.0 +528 50 model.embedding_dim 0.0 +528 50 loss.margin 8.266583797079793 +528 50 negative_sampler.num_negs_per_pos 46.0 +528 50 training.batch_size 1.0 +528 51 model.embedding_dim 1.0 +528 51 loss.margin 9.184726828437684 +528 51 negative_sampler.num_negs_per_pos 63.0 +528 51 training.batch_size 0.0 +528 52 model.embedding_dim 0.0 +528 52 loss.margin 4.949391339486084 +528 52 negative_sampler.num_negs_per_pos 87.0 +528 52 training.batch_size 2.0 +528 53 model.embedding_dim 1.0 +528 53 loss.margin 6.1381847016577735 +528 53 negative_sampler.num_negs_per_pos 89.0 +528 53 training.batch_size 2.0 +528 54 model.embedding_dim 1.0 +528 54 loss.margin 9.4064592556476 +528 54 negative_sampler.num_negs_per_pos 58.0 +528 54 training.batch_size 1.0 +528 55 model.embedding_dim 2.0 +528 55 loss.margin 1.3146613816084287 +528 55 negative_sampler.num_negs_per_pos 55.0 +528 55 training.batch_size 2.0 +528 56 model.embedding_dim 0.0 +528 56 loss.margin 1.4963661159302246 +528 56 negative_sampler.num_negs_per_pos 25.0 +528 56 training.batch_size 2.0 +528 57 model.embedding_dim 2.0 +528 57 loss.margin 7.122788823018904 +528 57 negative_sampler.num_negs_per_pos 68.0 +528 57 training.batch_size 2.0 +528 58 model.embedding_dim 2.0 +528 58 loss.margin 2.2400003909065482 +528 58 negative_sampler.num_negs_per_pos 47.0 +528 58 training.batch_size 1.0 +528 59 model.embedding_dim 2.0 +528 59 loss.margin 3.3187743752540437 +528 59 negative_sampler.num_negs_per_pos 32.0 +528 59 training.batch_size 2.0 +528 60 model.embedding_dim 2.0 +528 60 loss.margin 5.553109966131783 +528 60 negative_sampler.num_negs_per_pos 8.0 +528 60 training.batch_size 1.0 +528 61 model.embedding_dim 0.0 +528 61 loss.margin 9.75481939201728 +528 61 negative_sampler.num_negs_per_pos 62.0 +528 61 training.batch_size 0.0 +528 62 model.embedding_dim 2.0 +528 62 loss.margin 5.698447965464929 +528 62 negative_sampler.num_negs_per_pos 57.0 +528 62 training.batch_size 2.0 +528 63 model.embedding_dim 1.0 +528 63 loss.margin 3.584896753154888 +528 63 negative_sampler.num_negs_per_pos 40.0 +528 63 training.batch_size 0.0 +528 64 model.embedding_dim 1.0 +528 64 loss.margin 8.911192814605396 +528 64 negative_sampler.num_negs_per_pos 2.0 +528 64 training.batch_size 1.0 +528 65 model.embedding_dim 0.0 +528 65 loss.margin 4.2480166545258475 +528 65 negative_sampler.num_negs_per_pos 70.0 +528 65 training.batch_size 1.0 +528 66 model.embedding_dim 2.0 +528 66 loss.margin 6.692335993771727 +528 66 negative_sampler.num_negs_per_pos 34.0 +528 66 training.batch_size 0.0 +528 67 model.embedding_dim 1.0 +528 67 loss.margin 3.6441123653040917 +528 67 negative_sampler.num_negs_per_pos 20.0 +528 67 training.batch_size 2.0 +528 68 model.embedding_dim 2.0 +528 68 loss.margin 2.2388047152852963 +528 68 negative_sampler.num_negs_per_pos 48.0 +528 68 training.batch_size 0.0 +528 69 model.embedding_dim 1.0 +528 69 loss.margin 4.386988460319287 +528 69 negative_sampler.num_negs_per_pos 37.0 +528 69 training.batch_size 0.0 +528 70 model.embedding_dim 2.0 +528 70 loss.margin 0.6016388262482448 +528 70 negative_sampler.num_negs_per_pos 76.0 +528 70 training.batch_size 2.0 +528 71 model.embedding_dim 2.0 +528 71 loss.margin 8.128170479527732 +528 71 negative_sampler.num_negs_per_pos 54.0 +528 71 training.batch_size 0.0 +528 72 model.embedding_dim 1.0 +528 72 loss.margin 2.021836633922067 +528 72 negative_sampler.num_negs_per_pos 13.0 +528 72 training.batch_size 2.0 +528 73 model.embedding_dim 0.0 +528 73 loss.margin 3.7738217361857065 +528 73 negative_sampler.num_negs_per_pos 37.0 +528 73 training.batch_size 2.0 +528 74 model.embedding_dim 0.0 +528 74 loss.margin 0.8904169135490674 +528 74 negative_sampler.num_negs_per_pos 18.0 +528 74 training.batch_size 2.0 +528 75 model.embedding_dim 0.0 +528 75 loss.margin 5.10139412741198 +528 75 negative_sampler.num_negs_per_pos 49.0 +528 75 training.batch_size 2.0 +528 76 model.embedding_dim 1.0 +528 76 loss.margin 7.727832891632364 +528 76 negative_sampler.num_negs_per_pos 29.0 +528 76 training.batch_size 0.0 +528 77 model.embedding_dim 1.0 +528 77 loss.margin 8.460276112379113 +528 77 negative_sampler.num_negs_per_pos 71.0 +528 77 training.batch_size 2.0 +528 78 model.embedding_dim 0.0 +528 78 loss.margin 5.43009675963675 +528 78 negative_sampler.num_negs_per_pos 75.0 +528 78 training.batch_size 2.0 +528 79 model.embedding_dim 2.0 +528 79 loss.margin 1.0898288229485698 +528 79 negative_sampler.num_negs_per_pos 22.0 +528 79 training.batch_size 2.0 +528 80 model.embedding_dim 1.0 +528 80 loss.margin 6.248056279437442 +528 80 negative_sampler.num_negs_per_pos 63.0 +528 80 training.batch_size 1.0 +528 81 model.embedding_dim 2.0 +528 81 loss.margin 6.795548481767473 +528 81 negative_sampler.num_negs_per_pos 10.0 +528 81 training.batch_size 2.0 +528 82 model.embedding_dim 0.0 +528 82 loss.margin 8.702419209354836 +528 82 negative_sampler.num_negs_per_pos 80.0 +528 82 training.batch_size 0.0 +528 83 model.embedding_dim 1.0 +528 83 loss.margin 3.959756909570765 +528 83 negative_sampler.num_negs_per_pos 29.0 +528 83 training.batch_size 2.0 +528 84 model.embedding_dim 2.0 +528 84 loss.margin 6.321697281282065 +528 84 negative_sampler.num_negs_per_pos 54.0 +528 84 training.batch_size 1.0 +528 85 model.embedding_dim 1.0 +528 85 loss.margin 3.9811429467695825 +528 85 negative_sampler.num_negs_per_pos 64.0 +528 85 training.batch_size 0.0 +528 86 model.embedding_dim 0.0 +528 86 loss.margin 4.028638131063374 +528 86 negative_sampler.num_negs_per_pos 93.0 +528 86 training.batch_size 1.0 +528 87 model.embedding_dim 2.0 +528 87 loss.margin 3.2224560073458277 +528 87 negative_sampler.num_negs_per_pos 14.0 +528 87 training.batch_size 0.0 +528 88 model.embedding_dim 0.0 +528 88 loss.margin 7.460207713103047 +528 88 negative_sampler.num_negs_per_pos 82.0 +528 88 training.batch_size 0.0 +528 89 model.embedding_dim 1.0 +528 89 loss.margin 1.2965145280147483 +528 89 negative_sampler.num_negs_per_pos 78.0 +528 89 training.batch_size 2.0 +528 90 model.embedding_dim 1.0 +528 90 loss.margin 3.3860019979349056 +528 90 negative_sampler.num_negs_per_pos 50.0 +528 90 training.batch_size 0.0 +528 91 model.embedding_dim 1.0 +528 91 loss.margin 1.578927907818542 +528 91 negative_sampler.num_negs_per_pos 57.0 +528 91 training.batch_size 0.0 +528 92 model.embedding_dim 2.0 +528 92 loss.margin 7.174518621092729 +528 92 negative_sampler.num_negs_per_pos 45.0 +528 92 training.batch_size 2.0 +528 93 model.embedding_dim 0.0 +528 93 loss.margin 2.7544268118122424 +528 93 negative_sampler.num_negs_per_pos 74.0 +528 93 training.batch_size 1.0 +528 94 model.embedding_dim 2.0 +528 94 loss.margin 9.20521791225256 +528 94 negative_sampler.num_negs_per_pos 91.0 +528 94 training.batch_size 2.0 +528 95 model.embedding_dim 1.0 +528 95 loss.margin 2.1039040651263465 +528 95 negative_sampler.num_negs_per_pos 62.0 +528 95 training.batch_size 2.0 +528 96 model.embedding_dim 2.0 +528 96 loss.margin 5.464196627495143 +528 96 negative_sampler.num_negs_per_pos 23.0 +528 96 training.batch_size 2.0 +528 97 model.embedding_dim 0.0 +528 97 loss.margin 8.6408742298884 +528 97 negative_sampler.num_negs_per_pos 65.0 +528 97 training.batch_size 0.0 +528 98 model.embedding_dim 0.0 +528 98 loss.margin 1.1360432944763719 +528 98 negative_sampler.num_negs_per_pos 46.0 +528 98 training.batch_size 2.0 +528 99 model.embedding_dim 2.0 +528 99 loss.margin 6.136319058771016 +528 99 negative_sampler.num_negs_per_pos 48.0 +528 99 training.batch_size 1.0 +528 100 model.embedding_dim 1.0 +528 100 loss.margin 0.5365485945755841 +528 100 negative_sampler.num_negs_per_pos 96.0 +528 100 training.batch_size 1.0 +528 1 dataset """kinships""" +528 1 model """rescal""" +528 1 loss """marginranking""" +528 1 regularizer """no""" +528 1 optimizer """adadelta""" +528 1 training_loop """owa""" +528 1 negative_sampler """basic""" +528 1 evaluator """rankbased""" +528 2 dataset """kinships""" +528 2 model """rescal""" +528 2 loss """marginranking""" +528 2 regularizer """no""" +528 2 optimizer """adadelta""" +528 2 training_loop """owa""" +528 2 negative_sampler """basic""" +528 2 evaluator """rankbased""" +528 3 dataset """kinships""" +528 3 model """rescal""" +528 3 loss """marginranking""" +528 3 regularizer """no""" +528 3 optimizer """adadelta""" +528 3 training_loop """owa""" +528 3 negative_sampler """basic""" +528 3 evaluator """rankbased""" +528 4 dataset """kinships""" +528 4 model """rescal""" +528 4 loss """marginranking""" +528 4 regularizer """no""" +528 4 optimizer """adadelta""" +528 4 training_loop """owa""" +528 4 negative_sampler """basic""" +528 4 evaluator """rankbased""" +528 5 dataset """kinships""" +528 5 model """rescal""" +528 5 loss """marginranking""" +528 5 regularizer """no""" +528 5 optimizer """adadelta""" +528 5 training_loop """owa""" +528 5 negative_sampler """basic""" +528 5 evaluator """rankbased""" +528 6 dataset """kinships""" +528 6 model """rescal""" +528 6 loss """marginranking""" +528 6 regularizer """no""" +528 6 optimizer """adadelta""" +528 6 training_loop """owa""" +528 6 negative_sampler """basic""" +528 6 evaluator """rankbased""" +528 7 dataset """kinships""" +528 7 model """rescal""" +528 7 loss """marginranking""" +528 7 regularizer """no""" +528 7 optimizer """adadelta""" +528 7 training_loop """owa""" +528 7 negative_sampler """basic""" +528 7 evaluator """rankbased""" +528 8 dataset """kinships""" +528 8 model """rescal""" +528 8 loss """marginranking""" +528 8 regularizer """no""" +528 8 optimizer """adadelta""" +528 8 training_loop """owa""" +528 8 negative_sampler """basic""" +528 8 evaluator """rankbased""" +528 9 dataset """kinships""" +528 9 model """rescal""" +528 9 loss """marginranking""" +528 9 regularizer """no""" +528 9 optimizer """adadelta""" +528 9 training_loop """owa""" +528 9 negative_sampler """basic""" +528 9 evaluator """rankbased""" +528 10 dataset """kinships""" +528 10 model """rescal""" +528 10 loss """marginranking""" +528 10 regularizer """no""" +528 10 optimizer """adadelta""" +528 10 training_loop """owa""" +528 10 negative_sampler """basic""" +528 10 evaluator """rankbased""" +528 11 dataset """kinships""" +528 11 model """rescal""" +528 11 loss """marginranking""" +528 11 regularizer """no""" +528 11 optimizer """adadelta""" +528 11 training_loop """owa""" +528 11 negative_sampler """basic""" +528 11 evaluator """rankbased""" +528 12 dataset """kinships""" +528 12 model """rescal""" +528 12 loss """marginranking""" +528 12 regularizer """no""" +528 12 optimizer """adadelta""" +528 12 training_loop """owa""" +528 12 negative_sampler """basic""" +528 12 evaluator """rankbased""" +528 13 dataset """kinships""" +528 13 model """rescal""" +528 13 loss """marginranking""" +528 13 regularizer """no""" +528 13 optimizer """adadelta""" +528 13 training_loop """owa""" +528 13 negative_sampler """basic""" +528 13 evaluator """rankbased""" +528 14 dataset """kinships""" +528 14 model """rescal""" +528 14 loss """marginranking""" +528 14 regularizer """no""" +528 14 optimizer """adadelta""" +528 14 training_loop """owa""" +528 14 negative_sampler """basic""" +528 14 evaluator """rankbased""" +528 15 dataset """kinships""" +528 15 model """rescal""" +528 15 loss """marginranking""" +528 15 regularizer """no""" +528 15 optimizer """adadelta""" +528 15 training_loop """owa""" +528 15 negative_sampler """basic""" +528 15 evaluator """rankbased""" +528 16 dataset """kinships""" +528 16 model """rescal""" +528 16 loss """marginranking""" +528 16 regularizer """no""" +528 16 optimizer """adadelta""" +528 16 training_loop """owa""" +528 16 negative_sampler """basic""" +528 16 evaluator """rankbased""" +528 17 dataset """kinships""" +528 17 model """rescal""" +528 17 loss """marginranking""" +528 17 regularizer """no""" +528 17 optimizer """adadelta""" +528 17 training_loop """owa""" +528 17 negative_sampler """basic""" +528 17 evaluator """rankbased""" +528 18 dataset """kinships""" +528 18 model """rescal""" +528 18 loss """marginranking""" +528 18 regularizer """no""" +528 18 optimizer """adadelta""" +528 18 training_loop """owa""" +528 18 negative_sampler """basic""" +528 18 evaluator """rankbased""" +528 19 dataset """kinships""" +528 19 model """rescal""" +528 19 loss """marginranking""" +528 19 regularizer """no""" +528 19 optimizer """adadelta""" +528 19 training_loop """owa""" +528 19 negative_sampler """basic""" +528 19 evaluator """rankbased""" +528 20 dataset """kinships""" +528 20 model """rescal""" +528 20 loss """marginranking""" +528 20 regularizer """no""" +528 20 optimizer """adadelta""" +528 20 training_loop """owa""" +528 20 negative_sampler """basic""" +528 20 evaluator """rankbased""" +528 21 dataset """kinships""" +528 21 model """rescal""" +528 21 loss """marginranking""" +528 21 regularizer """no""" +528 21 optimizer """adadelta""" +528 21 training_loop """owa""" +528 21 negative_sampler """basic""" +528 21 evaluator """rankbased""" +528 22 dataset """kinships""" +528 22 model """rescal""" +528 22 loss """marginranking""" +528 22 regularizer """no""" +528 22 optimizer """adadelta""" +528 22 training_loop """owa""" +528 22 negative_sampler """basic""" +528 22 evaluator """rankbased""" +528 23 dataset """kinships""" +528 23 model """rescal""" +528 23 loss """marginranking""" +528 23 regularizer """no""" +528 23 optimizer """adadelta""" +528 23 training_loop """owa""" +528 23 negative_sampler """basic""" +528 23 evaluator """rankbased""" +528 24 dataset """kinships""" +528 24 model """rescal""" +528 24 loss """marginranking""" +528 24 regularizer """no""" +528 24 optimizer """adadelta""" +528 24 training_loop """owa""" +528 24 negative_sampler """basic""" +528 24 evaluator """rankbased""" +528 25 dataset """kinships""" +528 25 model """rescal""" +528 25 loss """marginranking""" +528 25 regularizer """no""" +528 25 optimizer """adadelta""" +528 25 training_loop """owa""" +528 25 negative_sampler """basic""" +528 25 evaluator """rankbased""" +528 26 dataset """kinships""" +528 26 model """rescal""" +528 26 loss """marginranking""" +528 26 regularizer """no""" +528 26 optimizer """adadelta""" +528 26 training_loop """owa""" +528 26 negative_sampler """basic""" +528 26 evaluator """rankbased""" +528 27 dataset """kinships""" +528 27 model """rescal""" +528 27 loss """marginranking""" +528 27 regularizer """no""" +528 27 optimizer """adadelta""" +528 27 training_loop """owa""" +528 27 negative_sampler """basic""" +528 27 evaluator """rankbased""" +528 28 dataset """kinships""" +528 28 model """rescal""" +528 28 loss """marginranking""" +528 28 regularizer """no""" +528 28 optimizer """adadelta""" +528 28 training_loop """owa""" +528 28 negative_sampler """basic""" +528 28 evaluator """rankbased""" +528 29 dataset """kinships""" +528 29 model """rescal""" +528 29 loss """marginranking""" +528 29 regularizer """no""" +528 29 optimizer """adadelta""" +528 29 training_loop """owa""" +528 29 negative_sampler """basic""" +528 29 evaluator """rankbased""" +528 30 dataset """kinships""" +528 30 model """rescal""" +528 30 loss """marginranking""" +528 30 regularizer """no""" +528 30 optimizer """adadelta""" +528 30 training_loop """owa""" +528 30 negative_sampler """basic""" +528 30 evaluator """rankbased""" +528 31 dataset """kinships""" +528 31 model """rescal""" +528 31 loss """marginranking""" +528 31 regularizer """no""" +528 31 optimizer """adadelta""" +528 31 training_loop """owa""" +528 31 negative_sampler """basic""" +528 31 evaluator """rankbased""" +528 32 dataset """kinships""" +528 32 model """rescal""" +528 32 loss """marginranking""" +528 32 regularizer """no""" +528 32 optimizer """adadelta""" +528 32 training_loop """owa""" +528 32 negative_sampler """basic""" +528 32 evaluator """rankbased""" +528 33 dataset """kinships""" +528 33 model """rescal""" +528 33 loss """marginranking""" +528 33 regularizer """no""" +528 33 optimizer """adadelta""" +528 33 training_loop """owa""" +528 33 negative_sampler """basic""" +528 33 evaluator """rankbased""" +528 34 dataset """kinships""" +528 34 model """rescal""" +528 34 loss """marginranking""" +528 34 regularizer """no""" +528 34 optimizer """adadelta""" +528 34 training_loop """owa""" +528 34 negative_sampler """basic""" +528 34 evaluator """rankbased""" +528 35 dataset """kinships""" +528 35 model """rescal""" +528 35 loss """marginranking""" +528 35 regularizer """no""" +528 35 optimizer """adadelta""" +528 35 training_loop """owa""" +528 35 negative_sampler """basic""" +528 35 evaluator """rankbased""" +528 36 dataset """kinships""" +528 36 model """rescal""" +528 36 loss """marginranking""" +528 36 regularizer """no""" +528 36 optimizer """adadelta""" +528 36 training_loop """owa""" +528 36 negative_sampler """basic""" +528 36 evaluator """rankbased""" +528 37 dataset """kinships""" +528 37 model """rescal""" +528 37 loss """marginranking""" +528 37 regularizer """no""" +528 37 optimizer """adadelta""" +528 37 training_loop """owa""" +528 37 negative_sampler """basic""" +528 37 evaluator """rankbased""" +528 38 dataset """kinships""" +528 38 model """rescal""" +528 38 loss """marginranking""" +528 38 regularizer """no""" +528 38 optimizer """adadelta""" +528 38 training_loop """owa""" +528 38 negative_sampler """basic""" +528 38 evaluator """rankbased""" +528 39 dataset """kinships""" +528 39 model """rescal""" +528 39 loss """marginranking""" +528 39 regularizer """no""" +528 39 optimizer """adadelta""" +528 39 training_loop """owa""" +528 39 negative_sampler """basic""" +528 39 evaluator """rankbased""" +528 40 dataset """kinships""" +528 40 model """rescal""" +528 40 loss """marginranking""" +528 40 regularizer """no""" +528 40 optimizer """adadelta""" +528 40 training_loop """owa""" +528 40 negative_sampler """basic""" +528 40 evaluator """rankbased""" +528 41 dataset """kinships""" +528 41 model """rescal""" +528 41 loss """marginranking""" +528 41 regularizer """no""" +528 41 optimizer """adadelta""" +528 41 training_loop """owa""" +528 41 negative_sampler """basic""" +528 41 evaluator """rankbased""" +528 42 dataset """kinships""" +528 42 model """rescal""" +528 42 loss """marginranking""" +528 42 regularizer """no""" +528 42 optimizer """adadelta""" +528 42 training_loop """owa""" +528 42 negative_sampler """basic""" +528 42 evaluator """rankbased""" +528 43 dataset """kinships""" +528 43 model """rescal""" +528 43 loss """marginranking""" +528 43 regularizer """no""" +528 43 optimizer """adadelta""" +528 43 training_loop """owa""" +528 43 negative_sampler """basic""" +528 43 evaluator """rankbased""" +528 44 dataset """kinships""" +528 44 model """rescal""" +528 44 loss """marginranking""" +528 44 regularizer """no""" +528 44 optimizer """adadelta""" +528 44 training_loop """owa""" +528 44 negative_sampler """basic""" +528 44 evaluator """rankbased""" +528 45 dataset """kinships""" +528 45 model """rescal""" +528 45 loss """marginranking""" +528 45 regularizer """no""" +528 45 optimizer """adadelta""" +528 45 training_loop """owa""" +528 45 negative_sampler """basic""" +528 45 evaluator """rankbased""" +528 46 dataset """kinships""" +528 46 model """rescal""" +528 46 loss """marginranking""" +528 46 regularizer """no""" +528 46 optimizer """adadelta""" +528 46 training_loop """owa""" +528 46 negative_sampler """basic""" +528 46 evaluator """rankbased""" +528 47 dataset """kinships""" +528 47 model """rescal""" +528 47 loss """marginranking""" +528 47 regularizer """no""" +528 47 optimizer """adadelta""" +528 47 training_loop """owa""" +528 47 negative_sampler """basic""" +528 47 evaluator """rankbased""" +528 48 dataset """kinships""" +528 48 model """rescal""" +528 48 loss """marginranking""" +528 48 regularizer """no""" +528 48 optimizer """adadelta""" +528 48 training_loop """owa""" +528 48 negative_sampler """basic""" +528 48 evaluator """rankbased""" +528 49 dataset """kinships""" +528 49 model """rescal""" +528 49 loss """marginranking""" +528 49 regularizer """no""" +528 49 optimizer """adadelta""" +528 49 training_loop """owa""" +528 49 negative_sampler """basic""" +528 49 evaluator """rankbased""" +528 50 dataset """kinships""" +528 50 model """rescal""" +528 50 loss """marginranking""" +528 50 regularizer """no""" +528 50 optimizer """adadelta""" +528 50 training_loop """owa""" +528 50 negative_sampler """basic""" +528 50 evaluator """rankbased""" +528 51 dataset """kinships""" +528 51 model """rescal""" +528 51 loss """marginranking""" +528 51 regularizer """no""" +528 51 optimizer """adadelta""" +528 51 training_loop """owa""" +528 51 negative_sampler """basic""" +528 51 evaluator """rankbased""" +528 52 dataset """kinships""" +528 52 model """rescal""" +528 52 loss """marginranking""" +528 52 regularizer """no""" +528 52 optimizer """adadelta""" +528 52 training_loop """owa""" +528 52 negative_sampler """basic""" +528 52 evaluator """rankbased""" +528 53 dataset """kinships""" +528 53 model """rescal""" +528 53 loss """marginranking""" +528 53 regularizer """no""" +528 53 optimizer """adadelta""" +528 53 training_loop """owa""" +528 53 negative_sampler """basic""" +528 53 evaluator """rankbased""" +528 54 dataset """kinships""" +528 54 model """rescal""" +528 54 loss """marginranking""" +528 54 regularizer """no""" +528 54 optimizer """adadelta""" +528 54 training_loop """owa""" +528 54 negative_sampler """basic""" +528 54 evaluator """rankbased""" +528 55 dataset """kinships""" +528 55 model """rescal""" +528 55 loss """marginranking""" +528 55 regularizer """no""" +528 55 optimizer """adadelta""" +528 55 training_loop """owa""" +528 55 negative_sampler """basic""" +528 55 evaluator """rankbased""" +528 56 dataset """kinships""" +528 56 model """rescal""" +528 56 loss """marginranking""" +528 56 regularizer """no""" +528 56 optimizer """adadelta""" +528 56 training_loop """owa""" +528 56 negative_sampler """basic""" +528 56 evaluator """rankbased""" +528 57 dataset """kinships""" +528 57 model """rescal""" +528 57 loss """marginranking""" +528 57 regularizer """no""" +528 57 optimizer """adadelta""" +528 57 training_loop """owa""" +528 57 negative_sampler """basic""" +528 57 evaluator """rankbased""" +528 58 dataset """kinships""" +528 58 model """rescal""" +528 58 loss """marginranking""" +528 58 regularizer """no""" +528 58 optimizer """adadelta""" +528 58 training_loop """owa""" +528 58 negative_sampler """basic""" +528 58 evaluator """rankbased""" +528 59 dataset """kinships""" +528 59 model """rescal""" +528 59 loss """marginranking""" +528 59 regularizer """no""" +528 59 optimizer """adadelta""" +528 59 training_loop """owa""" +528 59 negative_sampler """basic""" +528 59 evaluator """rankbased""" +528 60 dataset """kinships""" +528 60 model """rescal""" +528 60 loss """marginranking""" +528 60 regularizer """no""" +528 60 optimizer """adadelta""" +528 60 training_loop """owa""" +528 60 negative_sampler """basic""" +528 60 evaluator """rankbased""" +528 61 dataset """kinships""" +528 61 model """rescal""" +528 61 loss """marginranking""" +528 61 regularizer """no""" +528 61 optimizer """adadelta""" +528 61 training_loop """owa""" +528 61 negative_sampler """basic""" +528 61 evaluator """rankbased""" +528 62 dataset """kinships""" +528 62 model """rescal""" +528 62 loss """marginranking""" +528 62 regularizer """no""" +528 62 optimizer """adadelta""" +528 62 training_loop """owa""" +528 62 negative_sampler """basic""" +528 62 evaluator """rankbased""" +528 63 dataset """kinships""" +528 63 model """rescal""" +528 63 loss """marginranking""" +528 63 regularizer """no""" +528 63 optimizer """adadelta""" +528 63 training_loop """owa""" +528 63 negative_sampler """basic""" +528 63 evaluator """rankbased""" +528 64 dataset """kinships""" +528 64 model """rescal""" +528 64 loss """marginranking""" +528 64 regularizer """no""" +528 64 optimizer """adadelta""" +528 64 training_loop """owa""" +528 64 negative_sampler """basic""" +528 64 evaluator """rankbased""" +528 65 dataset """kinships""" +528 65 model """rescal""" +528 65 loss """marginranking""" +528 65 regularizer """no""" +528 65 optimizer """adadelta""" +528 65 training_loop """owa""" +528 65 negative_sampler """basic""" +528 65 evaluator """rankbased""" +528 66 dataset """kinships""" +528 66 model """rescal""" +528 66 loss """marginranking""" +528 66 regularizer """no""" +528 66 optimizer """adadelta""" +528 66 training_loop """owa""" +528 66 negative_sampler """basic""" +528 66 evaluator """rankbased""" +528 67 dataset """kinships""" +528 67 model """rescal""" +528 67 loss """marginranking""" +528 67 regularizer """no""" +528 67 optimizer """adadelta""" +528 67 training_loop """owa""" +528 67 negative_sampler """basic""" +528 67 evaluator """rankbased""" +528 68 dataset """kinships""" +528 68 model """rescal""" +528 68 loss """marginranking""" +528 68 regularizer """no""" +528 68 optimizer """adadelta""" +528 68 training_loop """owa""" +528 68 negative_sampler """basic""" +528 68 evaluator """rankbased""" +528 69 dataset """kinships""" +528 69 model """rescal""" +528 69 loss """marginranking""" +528 69 regularizer """no""" +528 69 optimizer """adadelta""" +528 69 training_loop """owa""" +528 69 negative_sampler """basic""" +528 69 evaluator """rankbased""" +528 70 dataset """kinships""" +528 70 model """rescal""" +528 70 loss """marginranking""" +528 70 regularizer """no""" +528 70 optimizer """adadelta""" +528 70 training_loop """owa""" +528 70 negative_sampler """basic""" +528 70 evaluator """rankbased""" +528 71 dataset """kinships""" +528 71 model """rescal""" +528 71 loss """marginranking""" +528 71 regularizer """no""" +528 71 optimizer """adadelta""" +528 71 training_loop """owa""" +528 71 negative_sampler """basic""" +528 71 evaluator """rankbased""" +528 72 dataset """kinships""" +528 72 model """rescal""" +528 72 loss """marginranking""" +528 72 regularizer """no""" +528 72 optimizer """adadelta""" +528 72 training_loop """owa""" +528 72 negative_sampler """basic""" +528 72 evaluator """rankbased""" +528 73 dataset """kinships""" +528 73 model """rescal""" +528 73 loss """marginranking""" +528 73 regularizer """no""" +528 73 optimizer """adadelta""" +528 73 training_loop """owa""" +528 73 negative_sampler """basic""" +528 73 evaluator """rankbased""" +528 74 dataset """kinships""" +528 74 model """rescal""" +528 74 loss """marginranking""" +528 74 regularizer """no""" +528 74 optimizer """adadelta""" +528 74 training_loop """owa""" +528 74 negative_sampler """basic""" +528 74 evaluator """rankbased""" +528 75 dataset """kinships""" +528 75 model """rescal""" +528 75 loss """marginranking""" +528 75 regularizer """no""" +528 75 optimizer """adadelta""" +528 75 training_loop """owa""" +528 75 negative_sampler """basic""" +528 75 evaluator """rankbased""" +528 76 dataset """kinships""" +528 76 model """rescal""" +528 76 loss """marginranking""" +528 76 regularizer """no""" +528 76 optimizer """adadelta""" +528 76 training_loop """owa""" +528 76 negative_sampler """basic""" +528 76 evaluator """rankbased""" +528 77 dataset """kinships""" +528 77 model """rescal""" +528 77 loss """marginranking""" +528 77 regularizer """no""" +528 77 optimizer """adadelta""" +528 77 training_loop """owa""" +528 77 negative_sampler """basic""" +528 77 evaluator """rankbased""" +528 78 dataset """kinships""" +528 78 model """rescal""" +528 78 loss """marginranking""" +528 78 regularizer """no""" +528 78 optimizer """adadelta""" +528 78 training_loop """owa""" +528 78 negative_sampler """basic""" +528 78 evaluator """rankbased""" +528 79 dataset """kinships""" +528 79 model """rescal""" +528 79 loss """marginranking""" +528 79 regularizer """no""" +528 79 optimizer """adadelta""" +528 79 training_loop """owa""" +528 79 negative_sampler """basic""" +528 79 evaluator """rankbased""" +528 80 dataset """kinships""" +528 80 model """rescal""" +528 80 loss """marginranking""" +528 80 regularizer """no""" +528 80 optimizer """adadelta""" +528 80 training_loop """owa""" +528 80 negative_sampler """basic""" +528 80 evaluator """rankbased""" +528 81 dataset """kinships""" +528 81 model """rescal""" +528 81 loss """marginranking""" +528 81 regularizer """no""" +528 81 optimizer """adadelta""" +528 81 training_loop """owa""" +528 81 negative_sampler """basic""" +528 81 evaluator """rankbased""" +528 82 dataset """kinships""" +528 82 model """rescal""" +528 82 loss """marginranking""" +528 82 regularizer """no""" +528 82 optimizer """adadelta""" +528 82 training_loop """owa""" +528 82 negative_sampler """basic""" +528 82 evaluator """rankbased""" +528 83 dataset """kinships""" +528 83 model """rescal""" +528 83 loss """marginranking""" +528 83 regularizer """no""" +528 83 optimizer """adadelta""" +528 83 training_loop """owa""" +528 83 negative_sampler """basic""" +528 83 evaluator """rankbased""" +528 84 dataset """kinships""" +528 84 model """rescal""" +528 84 loss """marginranking""" +528 84 regularizer """no""" +528 84 optimizer """adadelta""" +528 84 training_loop """owa""" +528 84 negative_sampler """basic""" +528 84 evaluator """rankbased""" +528 85 dataset """kinships""" +528 85 model """rescal""" +528 85 loss """marginranking""" +528 85 regularizer """no""" +528 85 optimizer """adadelta""" +528 85 training_loop """owa""" +528 85 negative_sampler """basic""" +528 85 evaluator """rankbased""" +528 86 dataset """kinships""" +528 86 model """rescal""" +528 86 loss """marginranking""" +528 86 regularizer """no""" +528 86 optimizer """adadelta""" +528 86 training_loop """owa""" +528 86 negative_sampler """basic""" +528 86 evaluator """rankbased""" +528 87 dataset """kinships""" +528 87 model """rescal""" +528 87 loss """marginranking""" +528 87 regularizer """no""" +528 87 optimizer """adadelta""" +528 87 training_loop """owa""" +528 87 negative_sampler """basic""" +528 87 evaluator """rankbased""" +528 88 dataset """kinships""" +528 88 model """rescal""" +528 88 loss """marginranking""" +528 88 regularizer """no""" +528 88 optimizer """adadelta""" +528 88 training_loop """owa""" +528 88 negative_sampler """basic""" +528 88 evaluator """rankbased""" +528 89 dataset """kinships""" +528 89 model """rescal""" +528 89 loss """marginranking""" +528 89 regularizer """no""" +528 89 optimizer """adadelta""" +528 89 training_loop """owa""" +528 89 negative_sampler """basic""" +528 89 evaluator """rankbased""" +528 90 dataset """kinships""" +528 90 model """rescal""" +528 90 loss """marginranking""" +528 90 regularizer """no""" +528 90 optimizer """adadelta""" +528 90 training_loop """owa""" +528 90 negative_sampler """basic""" +528 90 evaluator """rankbased""" +528 91 dataset """kinships""" +528 91 model """rescal""" +528 91 loss """marginranking""" +528 91 regularizer """no""" +528 91 optimizer """adadelta""" +528 91 training_loop """owa""" +528 91 negative_sampler """basic""" +528 91 evaluator """rankbased""" +528 92 dataset """kinships""" +528 92 model """rescal""" +528 92 loss """marginranking""" +528 92 regularizer """no""" +528 92 optimizer """adadelta""" +528 92 training_loop """owa""" +528 92 negative_sampler """basic""" +528 92 evaluator """rankbased""" +528 93 dataset """kinships""" +528 93 model """rescal""" +528 93 loss """marginranking""" +528 93 regularizer """no""" +528 93 optimizer """adadelta""" +528 93 training_loop """owa""" +528 93 negative_sampler """basic""" +528 93 evaluator """rankbased""" +528 94 dataset """kinships""" +528 94 model """rescal""" +528 94 loss """marginranking""" +528 94 regularizer """no""" +528 94 optimizer """adadelta""" +528 94 training_loop """owa""" +528 94 negative_sampler """basic""" +528 94 evaluator """rankbased""" +528 95 dataset """kinships""" +528 95 model """rescal""" +528 95 loss """marginranking""" +528 95 regularizer """no""" +528 95 optimizer """adadelta""" +528 95 training_loop """owa""" +528 95 negative_sampler """basic""" +528 95 evaluator """rankbased""" +528 96 dataset """kinships""" +528 96 model """rescal""" +528 96 loss """marginranking""" +528 96 regularizer """no""" +528 96 optimizer """adadelta""" +528 96 training_loop """owa""" +528 96 negative_sampler """basic""" +528 96 evaluator """rankbased""" +528 97 dataset """kinships""" +528 97 model """rescal""" +528 97 loss """marginranking""" +528 97 regularizer """no""" +528 97 optimizer """adadelta""" +528 97 training_loop """owa""" +528 97 negative_sampler """basic""" +528 97 evaluator """rankbased""" +528 98 dataset """kinships""" +528 98 model """rescal""" +528 98 loss """marginranking""" +528 98 regularizer """no""" +528 98 optimizer """adadelta""" +528 98 training_loop """owa""" +528 98 negative_sampler """basic""" +528 98 evaluator """rankbased""" +528 99 dataset """kinships""" +528 99 model """rescal""" +528 99 loss """marginranking""" +528 99 regularizer """no""" +528 99 optimizer """adadelta""" +528 99 training_loop """owa""" +528 99 negative_sampler """basic""" +528 99 evaluator """rankbased""" +528 100 dataset """kinships""" +528 100 model """rescal""" +528 100 loss """marginranking""" +528 100 regularizer """no""" +528 100 optimizer """adadelta""" +528 100 training_loop """owa""" +528 100 negative_sampler """basic""" +528 100 evaluator """rankbased""" +529 1 model.embedding_dim 0.0 +529 1 loss.margin 11.830746693079039 +529 1 loss.adversarial_temperature 0.1858583629242393 +529 1 negative_sampler.num_negs_per_pos 59.0 +529 1 training.batch_size 2.0 +529 2 model.embedding_dim 1.0 +529 2 loss.margin 8.201491433479918 +529 2 loss.adversarial_temperature 0.8249915428549457 +529 2 negative_sampler.num_negs_per_pos 97.0 +529 2 training.batch_size 1.0 +529 3 model.embedding_dim 0.0 +529 3 loss.margin 24.549628353442344 +529 3 loss.adversarial_temperature 0.6764660512182727 +529 3 negative_sampler.num_negs_per_pos 82.0 +529 3 training.batch_size 2.0 +529 4 model.embedding_dim 1.0 +529 4 loss.margin 21.853337770322018 +529 4 loss.adversarial_temperature 0.23176351892415276 +529 4 negative_sampler.num_negs_per_pos 51.0 +529 4 training.batch_size 0.0 +529 5 model.embedding_dim 1.0 +529 5 loss.margin 17.94106272460366 +529 5 loss.adversarial_temperature 0.6357295838885344 +529 5 negative_sampler.num_negs_per_pos 1.0 +529 5 training.batch_size 2.0 +529 6 model.embedding_dim 0.0 +529 6 loss.margin 18.9526749977327 +529 6 loss.adversarial_temperature 0.7227677363643212 +529 6 negative_sampler.num_negs_per_pos 68.0 +529 6 training.batch_size 0.0 +529 7 model.embedding_dim 2.0 +529 7 loss.margin 8.197214732489613 +529 7 loss.adversarial_temperature 0.40960526651880136 +529 7 negative_sampler.num_negs_per_pos 20.0 +529 7 training.batch_size 2.0 +529 8 model.embedding_dim 0.0 +529 8 loss.margin 18.406374664951464 +529 8 loss.adversarial_temperature 0.8687106850129729 +529 8 negative_sampler.num_negs_per_pos 27.0 +529 8 training.batch_size 2.0 +529 9 model.embedding_dim 1.0 +529 9 loss.margin 27.82272822497625 +529 9 loss.adversarial_temperature 0.8109105099275717 +529 9 negative_sampler.num_negs_per_pos 15.0 +529 9 training.batch_size 1.0 +529 10 model.embedding_dim 0.0 +529 10 loss.margin 6.13654460694813 +529 10 loss.adversarial_temperature 0.5806533321987487 +529 10 negative_sampler.num_negs_per_pos 60.0 +529 10 training.batch_size 1.0 +529 11 model.embedding_dim 2.0 +529 11 loss.margin 11.315534647122508 +529 11 loss.adversarial_temperature 0.6278295422772004 +529 11 negative_sampler.num_negs_per_pos 48.0 +529 11 training.batch_size 0.0 +529 12 model.embedding_dim 1.0 +529 12 loss.margin 6.524614068288852 +529 12 loss.adversarial_temperature 0.8492107528096281 +529 12 negative_sampler.num_negs_per_pos 34.0 +529 12 training.batch_size 0.0 +529 13 model.embedding_dim 1.0 +529 13 loss.margin 8.94614746472444 +529 13 loss.adversarial_temperature 0.6598539961246869 +529 13 negative_sampler.num_negs_per_pos 61.0 +529 13 training.batch_size 1.0 +529 14 model.embedding_dim 0.0 +529 14 loss.margin 18.242836186956353 +529 14 loss.adversarial_temperature 0.7392466825483253 +529 14 negative_sampler.num_negs_per_pos 48.0 +529 14 training.batch_size 0.0 +529 15 model.embedding_dim 2.0 +529 15 loss.margin 14.088724282547197 +529 15 loss.adversarial_temperature 0.3320266124886228 +529 15 negative_sampler.num_negs_per_pos 5.0 +529 15 training.batch_size 2.0 +529 16 model.embedding_dim 0.0 +529 16 loss.margin 11.472359501641801 +529 16 loss.adversarial_temperature 0.7751409827920834 +529 16 negative_sampler.num_negs_per_pos 59.0 +529 16 training.batch_size 1.0 +529 17 model.embedding_dim 2.0 +529 17 loss.margin 15.558292441800393 +529 17 loss.adversarial_temperature 0.8371430194915161 +529 17 negative_sampler.num_negs_per_pos 74.0 +529 17 training.batch_size 2.0 +529 18 model.embedding_dim 0.0 +529 18 loss.margin 19.155477354184146 +529 18 loss.adversarial_temperature 0.9522669424844159 +529 18 negative_sampler.num_negs_per_pos 6.0 +529 18 training.batch_size 0.0 +529 19 model.embedding_dim 1.0 +529 19 loss.margin 14.180538501893857 +529 19 loss.adversarial_temperature 0.37091755010971816 +529 19 negative_sampler.num_negs_per_pos 78.0 +529 19 training.batch_size 0.0 +529 20 model.embedding_dim 1.0 +529 20 loss.margin 2.2006980310621556 +529 20 loss.adversarial_temperature 0.17711143180419078 +529 20 negative_sampler.num_negs_per_pos 3.0 +529 20 training.batch_size 2.0 +529 21 model.embedding_dim 0.0 +529 21 loss.margin 2.577212729464464 +529 21 loss.adversarial_temperature 0.20209833671181943 +529 21 negative_sampler.num_negs_per_pos 1.0 +529 21 training.batch_size 1.0 +529 22 model.embedding_dim 2.0 +529 22 loss.margin 21.091154641259973 +529 22 loss.adversarial_temperature 0.3259535198232687 +529 22 negative_sampler.num_negs_per_pos 94.0 +529 22 training.batch_size 2.0 +529 23 model.embedding_dim 2.0 +529 23 loss.margin 21.90148068143036 +529 23 loss.adversarial_temperature 0.7403348008539179 +529 23 negative_sampler.num_negs_per_pos 52.0 +529 23 training.batch_size 1.0 +529 24 model.embedding_dim 0.0 +529 24 loss.margin 9.930238133008068 +529 24 loss.adversarial_temperature 0.11308805456363813 +529 24 negative_sampler.num_negs_per_pos 2.0 +529 24 training.batch_size 2.0 +529 25 model.embedding_dim 1.0 +529 25 loss.margin 11.863230330472838 +529 25 loss.adversarial_temperature 0.92343456091845 +529 25 negative_sampler.num_negs_per_pos 23.0 +529 25 training.batch_size 1.0 +529 26 model.embedding_dim 0.0 +529 26 loss.margin 16.75434535012019 +529 26 loss.adversarial_temperature 0.7211159967416443 +529 26 negative_sampler.num_negs_per_pos 7.0 +529 26 training.batch_size 1.0 +529 27 model.embedding_dim 0.0 +529 27 loss.margin 23.747228547894853 +529 27 loss.adversarial_temperature 0.26171760900423086 +529 27 negative_sampler.num_negs_per_pos 74.0 +529 27 training.batch_size 0.0 +529 28 model.embedding_dim 0.0 +529 28 loss.margin 15.731281369983284 +529 28 loss.adversarial_temperature 0.4316723168069587 +529 28 negative_sampler.num_negs_per_pos 13.0 +529 28 training.batch_size 2.0 +529 29 model.embedding_dim 2.0 +529 29 loss.margin 25.63334901957811 +529 29 loss.adversarial_temperature 0.7659818902293074 +529 29 negative_sampler.num_negs_per_pos 98.0 +529 29 training.batch_size 2.0 +529 30 model.embedding_dim 0.0 +529 30 loss.margin 13.472501063165215 +529 30 loss.adversarial_temperature 0.7016054898679628 +529 30 negative_sampler.num_negs_per_pos 0.0 +529 30 training.batch_size 2.0 +529 31 model.embedding_dim 0.0 +529 31 loss.margin 9.55747117554172 +529 31 loss.adversarial_temperature 0.1550567404997483 +529 31 negative_sampler.num_negs_per_pos 63.0 +529 31 training.batch_size 1.0 +529 32 model.embedding_dim 1.0 +529 32 loss.margin 27.555352448276814 +529 32 loss.adversarial_temperature 0.47725150552530116 +529 32 negative_sampler.num_negs_per_pos 8.0 +529 32 training.batch_size 2.0 +529 33 model.embedding_dim 0.0 +529 33 loss.margin 4.138482262728401 +529 33 loss.adversarial_temperature 0.7497516158740671 +529 33 negative_sampler.num_negs_per_pos 65.0 +529 33 training.batch_size 1.0 +529 34 model.embedding_dim 1.0 +529 34 loss.margin 5.7049498013400735 +529 34 loss.adversarial_temperature 0.8633031565619812 +529 34 negative_sampler.num_negs_per_pos 98.0 +529 34 training.batch_size 2.0 +529 35 model.embedding_dim 0.0 +529 35 loss.margin 18.208359806612822 +529 35 loss.adversarial_temperature 0.3038526907433947 +529 35 negative_sampler.num_negs_per_pos 27.0 +529 35 training.batch_size 1.0 +529 36 model.embedding_dim 2.0 +529 36 loss.margin 9.793904069155632 +529 36 loss.adversarial_temperature 0.984835211917847 +529 36 negative_sampler.num_negs_per_pos 76.0 +529 36 training.batch_size 2.0 +529 37 model.embedding_dim 0.0 +529 37 loss.margin 12.696044601902459 +529 37 loss.adversarial_temperature 0.8291348691223728 +529 37 negative_sampler.num_negs_per_pos 71.0 +529 37 training.batch_size 0.0 +529 38 model.embedding_dim 0.0 +529 38 loss.margin 27.76552789664973 +529 38 loss.adversarial_temperature 0.8840843736341186 +529 38 negative_sampler.num_negs_per_pos 38.0 +529 38 training.batch_size 1.0 +529 39 model.embedding_dim 2.0 +529 39 loss.margin 27.02913667760457 +529 39 loss.adversarial_temperature 0.5594366833585482 +529 39 negative_sampler.num_negs_per_pos 39.0 +529 39 training.batch_size 0.0 +529 40 model.embedding_dim 2.0 +529 40 loss.margin 14.912284076129515 +529 40 loss.adversarial_temperature 0.2212993627052674 +529 40 negative_sampler.num_negs_per_pos 54.0 +529 40 training.batch_size 1.0 +529 41 model.embedding_dim 2.0 +529 41 loss.margin 10.473888165359812 +529 41 loss.adversarial_temperature 0.3317301702255613 +529 41 negative_sampler.num_negs_per_pos 32.0 +529 41 training.batch_size 1.0 +529 42 model.embedding_dim 0.0 +529 42 loss.margin 26.408973600381493 +529 42 loss.adversarial_temperature 0.8374333065390269 +529 42 negative_sampler.num_negs_per_pos 75.0 +529 42 training.batch_size 1.0 +529 43 model.embedding_dim 1.0 +529 43 loss.margin 7.485241026227328 +529 43 loss.adversarial_temperature 0.8565595272007268 +529 43 negative_sampler.num_negs_per_pos 51.0 +529 43 training.batch_size 1.0 +529 44 model.embedding_dim 0.0 +529 44 loss.margin 26.3797619144032 +529 44 loss.adversarial_temperature 0.6674303291155671 +529 44 negative_sampler.num_negs_per_pos 82.0 +529 44 training.batch_size 2.0 +529 45 model.embedding_dim 2.0 +529 45 loss.margin 29.395740821400555 +529 45 loss.adversarial_temperature 0.1642000489733912 +529 45 negative_sampler.num_negs_per_pos 31.0 +529 45 training.batch_size 1.0 +529 46 model.embedding_dim 0.0 +529 46 loss.margin 8.55041058668051 +529 46 loss.adversarial_temperature 0.7127036211538851 +529 46 negative_sampler.num_negs_per_pos 9.0 +529 46 training.batch_size 1.0 +529 47 model.embedding_dim 1.0 +529 47 loss.margin 5.541278760714111 +529 47 loss.adversarial_temperature 0.5284818834887409 +529 47 negative_sampler.num_negs_per_pos 80.0 +529 47 training.batch_size 1.0 +529 48 model.embedding_dim 2.0 +529 48 loss.margin 21.072034136990453 +529 48 loss.adversarial_temperature 0.5048728252080759 +529 48 negative_sampler.num_negs_per_pos 65.0 +529 48 training.batch_size 2.0 +529 49 model.embedding_dim 1.0 +529 49 loss.margin 26.067674532042542 +529 49 loss.adversarial_temperature 0.18901851440765843 +529 49 negative_sampler.num_negs_per_pos 32.0 +529 49 training.batch_size 2.0 +529 50 model.embedding_dim 2.0 +529 50 loss.margin 3.0643203856463384 +529 50 loss.adversarial_temperature 0.26920671685573466 +529 50 negative_sampler.num_negs_per_pos 24.0 +529 50 training.batch_size 1.0 +529 51 model.embedding_dim 2.0 +529 51 loss.margin 8.130355743768993 +529 51 loss.adversarial_temperature 0.3032669176291154 +529 51 negative_sampler.num_negs_per_pos 94.0 +529 51 training.batch_size 0.0 +529 52 model.embedding_dim 2.0 +529 52 loss.margin 7.544700311401673 +529 52 loss.adversarial_temperature 0.281433184803988 +529 52 negative_sampler.num_negs_per_pos 20.0 +529 52 training.batch_size 1.0 +529 53 model.embedding_dim 0.0 +529 53 loss.margin 9.21748022714445 +529 53 loss.adversarial_temperature 0.691427300320076 +529 53 negative_sampler.num_negs_per_pos 37.0 +529 53 training.batch_size 0.0 +529 54 model.embedding_dim 2.0 +529 54 loss.margin 17.554397204785644 +529 54 loss.adversarial_temperature 0.6198529926359255 +529 54 negative_sampler.num_negs_per_pos 42.0 +529 54 training.batch_size 0.0 +529 55 model.embedding_dim 1.0 +529 55 loss.margin 19.552986913937332 +529 55 loss.adversarial_temperature 0.15777618116259234 +529 55 negative_sampler.num_negs_per_pos 31.0 +529 55 training.batch_size 2.0 +529 56 model.embedding_dim 0.0 +529 56 loss.margin 1.611734343385784 +529 56 loss.adversarial_temperature 0.27894652633025663 +529 56 negative_sampler.num_negs_per_pos 13.0 +529 56 training.batch_size 0.0 +529 57 model.embedding_dim 1.0 +529 57 loss.margin 28.009101568371893 +529 57 loss.adversarial_temperature 0.4087099127261039 +529 57 negative_sampler.num_negs_per_pos 68.0 +529 57 training.batch_size 2.0 +529 58 model.embedding_dim 1.0 +529 58 loss.margin 28.532422535737513 +529 58 loss.adversarial_temperature 0.3960536275496386 +529 58 negative_sampler.num_negs_per_pos 35.0 +529 58 training.batch_size 1.0 +529 59 model.embedding_dim 1.0 +529 59 loss.margin 28.123045921920973 +529 59 loss.adversarial_temperature 0.8838024944968658 +529 59 negative_sampler.num_negs_per_pos 59.0 +529 59 training.batch_size 0.0 +529 60 model.embedding_dim 0.0 +529 60 loss.margin 19.154943449431123 +529 60 loss.adversarial_temperature 0.9388167205738837 +529 60 negative_sampler.num_negs_per_pos 30.0 +529 60 training.batch_size 0.0 +529 61 model.embedding_dim 0.0 +529 61 loss.margin 14.15375583638664 +529 61 loss.adversarial_temperature 0.7756595475745957 +529 61 negative_sampler.num_negs_per_pos 68.0 +529 61 training.batch_size 0.0 +529 62 model.embedding_dim 0.0 +529 62 loss.margin 4.396290566885845 +529 62 loss.adversarial_temperature 0.44363250961173184 +529 62 negative_sampler.num_negs_per_pos 59.0 +529 62 training.batch_size 2.0 +529 63 model.embedding_dim 1.0 +529 63 loss.margin 27.623514622608933 +529 63 loss.adversarial_temperature 0.6228132306051712 +529 63 negative_sampler.num_negs_per_pos 26.0 +529 63 training.batch_size 0.0 +529 64 model.embedding_dim 1.0 +529 64 loss.margin 22.346068711393805 +529 64 loss.adversarial_temperature 0.4487057910974007 +529 64 negative_sampler.num_negs_per_pos 24.0 +529 64 training.batch_size 0.0 +529 65 model.embedding_dim 2.0 +529 65 loss.margin 8.256994129492071 +529 65 loss.adversarial_temperature 0.635841300366122 +529 65 negative_sampler.num_negs_per_pos 19.0 +529 65 training.batch_size 1.0 +529 66 model.embedding_dim 2.0 +529 66 loss.margin 1.7128261266792975 +529 66 loss.adversarial_temperature 0.17589101980419117 +529 66 negative_sampler.num_negs_per_pos 32.0 +529 66 training.batch_size 2.0 +529 67 model.embedding_dim 1.0 +529 67 loss.margin 29.27279889301178 +529 67 loss.adversarial_temperature 0.3298896090192294 +529 67 negative_sampler.num_negs_per_pos 70.0 +529 67 training.batch_size 0.0 +529 68 model.embedding_dim 0.0 +529 68 loss.margin 5.23705367151138 +529 68 loss.adversarial_temperature 0.29433788041129605 +529 68 negative_sampler.num_negs_per_pos 17.0 +529 68 training.batch_size 0.0 +529 69 model.embedding_dim 2.0 +529 69 loss.margin 14.678327833690961 +529 69 loss.adversarial_temperature 0.5514569502121528 +529 69 negative_sampler.num_negs_per_pos 53.0 +529 69 training.batch_size 1.0 +529 70 model.embedding_dim 1.0 +529 70 loss.margin 26.937523069617313 +529 70 loss.adversarial_temperature 0.7186024094273881 +529 70 negative_sampler.num_negs_per_pos 5.0 +529 70 training.batch_size 1.0 +529 71 model.embedding_dim 1.0 +529 71 loss.margin 12.97036109113599 +529 71 loss.adversarial_temperature 0.19506738878369223 +529 71 negative_sampler.num_negs_per_pos 46.0 +529 71 training.batch_size 2.0 +529 72 model.embedding_dim 2.0 +529 72 loss.margin 1.5886540608850477 +529 72 loss.adversarial_temperature 0.7655638090117797 +529 72 negative_sampler.num_negs_per_pos 84.0 +529 72 training.batch_size 2.0 +529 73 model.embedding_dim 1.0 +529 73 loss.margin 23.024120621987095 +529 73 loss.adversarial_temperature 0.7740644576318647 +529 73 negative_sampler.num_negs_per_pos 11.0 +529 73 training.batch_size 2.0 +529 74 model.embedding_dim 0.0 +529 74 loss.margin 16.81177790276719 +529 74 loss.adversarial_temperature 0.5116811657661529 +529 74 negative_sampler.num_negs_per_pos 19.0 +529 74 training.batch_size 2.0 +529 75 model.embedding_dim 2.0 +529 75 loss.margin 12.576020814346103 +529 75 loss.adversarial_temperature 0.91885220960753 +529 75 negative_sampler.num_negs_per_pos 37.0 +529 75 training.batch_size 1.0 +529 76 model.embedding_dim 2.0 +529 76 loss.margin 16.215579212116495 +529 76 loss.adversarial_temperature 0.7827092870954034 +529 76 negative_sampler.num_negs_per_pos 74.0 +529 76 training.batch_size 1.0 +529 77 model.embedding_dim 2.0 +529 77 loss.margin 8.189048861616886 +529 77 loss.adversarial_temperature 0.9151127818707784 +529 77 negative_sampler.num_negs_per_pos 46.0 +529 77 training.batch_size 1.0 +529 78 model.embedding_dim 0.0 +529 78 loss.margin 21.704657001401493 +529 78 loss.adversarial_temperature 0.3646463269590482 +529 78 negative_sampler.num_negs_per_pos 18.0 +529 78 training.batch_size 2.0 +529 79 model.embedding_dim 2.0 +529 79 loss.margin 28.5274935264444 +529 79 loss.adversarial_temperature 0.2557632741958251 +529 79 negative_sampler.num_negs_per_pos 21.0 +529 79 training.batch_size 2.0 +529 80 model.embedding_dim 0.0 +529 80 loss.margin 5.216477609312672 +529 80 loss.adversarial_temperature 0.27440899976145994 +529 80 negative_sampler.num_negs_per_pos 64.0 +529 80 training.batch_size 0.0 +529 81 model.embedding_dim 1.0 +529 81 loss.margin 24.90203040665188 +529 81 loss.adversarial_temperature 0.3924408457666366 +529 81 negative_sampler.num_negs_per_pos 13.0 +529 81 training.batch_size 2.0 +529 82 model.embedding_dim 2.0 +529 82 loss.margin 23.210285637590218 +529 82 loss.adversarial_temperature 0.4120948224127997 +529 82 negative_sampler.num_negs_per_pos 25.0 +529 82 training.batch_size 0.0 +529 83 model.embedding_dim 0.0 +529 83 loss.margin 2.7700860741717666 +529 83 loss.adversarial_temperature 0.5787648443492626 +529 83 negative_sampler.num_negs_per_pos 69.0 +529 83 training.batch_size 2.0 +529 84 model.embedding_dim 0.0 +529 84 loss.margin 2.952773975150493 +529 84 loss.adversarial_temperature 0.16162842441195205 +529 84 negative_sampler.num_negs_per_pos 44.0 +529 84 training.batch_size 1.0 +529 85 model.embedding_dim 0.0 +529 85 loss.margin 4.259635146686171 +529 85 loss.adversarial_temperature 0.2374957271721305 +529 85 negative_sampler.num_negs_per_pos 95.0 +529 85 training.batch_size 1.0 +529 86 model.embedding_dim 1.0 +529 86 loss.margin 20.690875804311663 +529 86 loss.adversarial_temperature 0.3532897916431478 +529 86 negative_sampler.num_negs_per_pos 85.0 +529 86 training.batch_size 2.0 +529 87 model.embedding_dim 0.0 +529 87 loss.margin 12.541122708561408 +529 87 loss.adversarial_temperature 0.14087756860580097 +529 87 negative_sampler.num_negs_per_pos 19.0 +529 87 training.batch_size 2.0 +529 88 model.embedding_dim 2.0 +529 88 loss.margin 6.337057869711444 +529 88 loss.adversarial_temperature 0.46407320346912595 +529 88 negative_sampler.num_negs_per_pos 0.0 +529 88 training.batch_size 0.0 +529 89 model.embedding_dim 2.0 +529 89 loss.margin 19.31454478229126 +529 89 loss.adversarial_temperature 0.48567258360631915 +529 89 negative_sampler.num_negs_per_pos 74.0 +529 89 training.batch_size 2.0 +529 90 model.embedding_dim 2.0 +529 90 loss.margin 24.447018692824138 +529 90 loss.adversarial_temperature 0.7790491066903577 +529 90 negative_sampler.num_negs_per_pos 15.0 +529 90 training.batch_size 2.0 +529 91 model.embedding_dim 1.0 +529 91 loss.margin 13.284852500007124 +529 91 loss.adversarial_temperature 0.13285535481017066 +529 91 negative_sampler.num_negs_per_pos 63.0 +529 91 training.batch_size 0.0 +529 92 model.embedding_dim 0.0 +529 92 loss.margin 19.84638311307977 +529 92 loss.adversarial_temperature 0.2112550670754778 +529 92 negative_sampler.num_negs_per_pos 3.0 +529 92 training.batch_size 1.0 +529 93 model.embedding_dim 2.0 +529 93 loss.margin 16.517457066328884 +529 93 loss.adversarial_temperature 0.967062620819182 +529 93 negative_sampler.num_negs_per_pos 64.0 +529 93 training.batch_size 1.0 +529 94 model.embedding_dim 0.0 +529 94 loss.margin 13.811542807178318 +529 94 loss.adversarial_temperature 0.6469331805055056 +529 94 negative_sampler.num_negs_per_pos 19.0 +529 94 training.batch_size 1.0 +529 95 model.embedding_dim 2.0 +529 95 loss.margin 6.962102078727171 +529 95 loss.adversarial_temperature 0.5609173999159233 +529 95 negative_sampler.num_negs_per_pos 29.0 +529 95 training.batch_size 0.0 +529 96 model.embedding_dim 2.0 +529 96 loss.margin 5.852965188134694 +529 96 loss.adversarial_temperature 0.5444466118569837 +529 96 negative_sampler.num_negs_per_pos 95.0 +529 96 training.batch_size 1.0 +529 97 model.embedding_dim 1.0 +529 97 loss.margin 24.193607991855888 +529 97 loss.adversarial_temperature 0.4573106253800412 +529 97 negative_sampler.num_negs_per_pos 23.0 +529 97 training.batch_size 0.0 +529 98 model.embedding_dim 0.0 +529 98 loss.margin 9.76861633080162 +529 98 loss.adversarial_temperature 0.32285424846734584 +529 98 negative_sampler.num_negs_per_pos 45.0 +529 98 training.batch_size 2.0 +529 99 model.embedding_dim 0.0 +529 99 loss.margin 19.40241411130493 +529 99 loss.adversarial_temperature 0.36404570175132667 +529 99 negative_sampler.num_negs_per_pos 33.0 +529 99 training.batch_size 1.0 +529 100 model.embedding_dim 1.0 +529 100 loss.margin 6.540746219281827 +529 100 loss.adversarial_temperature 0.3398547018933792 +529 100 negative_sampler.num_negs_per_pos 44.0 +529 100 training.batch_size 0.0 +529 1 dataset """kinships""" +529 1 model """rescal""" +529 1 loss """nssa""" +529 1 regularizer """no""" +529 1 optimizer """adadelta""" +529 1 training_loop """owa""" +529 1 negative_sampler """basic""" +529 1 evaluator """rankbased""" +529 2 dataset """kinships""" +529 2 model """rescal""" +529 2 loss """nssa""" +529 2 regularizer """no""" +529 2 optimizer """adadelta""" +529 2 training_loop """owa""" +529 2 negative_sampler """basic""" +529 2 evaluator """rankbased""" +529 3 dataset """kinships""" +529 3 model """rescal""" +529 3 loss """nssa""" +529 3 regularizer """no""" +529 3 optimizer """adadelta""" +529 3 training_loop """owa""" +529 3 negative_sampler """basic""" +529 3 evaluator """rankbased""" +529 4 dataset """kinships""" +529 4 model """rescal""" +529 4 loss """nssa""" +529 4 regularizer """no""" +529 4 optimizer """adadelta""" +529 4 training_loop """owa""" +529 4 negative_sampler """basic""" +529 4 evaluator """rankbased""" +529 5 dataset """kinships""" +529 5 model """rescal""" +529 5 loss """nssa""" +529 5 regularizer """no""" +529 5 optimizer """adadelta""" +529 5 training_loop """owa""" +529 5 negative_sampler """basic""" +529 5 evaluator """rankbased""" +529 6 dataset """kinships""" +529 6 model """rescal""" +529 6 loss """nssa""" +529 6 regularizer """no""" +529 6 optimizer """adadelta""" +529 6 training_loop """owa""" +529 6 negative_sampler """basic""" +529 6 evaluator """rankbased""" +529 7 dataset """kinships""" +529 7 model """rescal""" +529 7 loss """nssa""" +529 7 regularizer """no""" +529 7 optimizer """adadelta""" +529 7 training_loop """owa""" +529 7 negative_sampler """basic""" +529 7 evaluator """rankbased""" +529 8 dataset """kinships""" +529 8 model """rescal""" +529 8 loss """nssa""" +529 8 regularizer """no""" +529 8 optimizer """adadelta""" +529 8 training_loop """owa""" +529 8 negative_sampler """basic""" +529 8 evaluator """rankbased""" +529 9 dataset """kinships""" +529 9 model """rescal""" +529 9 loss """nssa""" +529 9 regularizer """no""" +529 9 optimizer """adadelta""" +529 9 training_loop """owa""" +529 9 negative_sampler """basic""" +529 9 evaluator """rankbased""" +529 10 dataset """kinships""" +529 10 model """rescal""" +529 10 loss """nssa""" +529 10 regularizer """no""" +529 10 optimizer """adadelta""" +529 10 training_loop """owa""" +529 10 negative_sampler """basic""" +529 10 evaluator """rankbased""" +529 11 dataset """kinships""" +529 11 model """rescal""" +529 11 loss """nssa""" +529 11 regularizer """no""" +529 11 optimizer """adadelta""" +529 11 training_loop """owa""" +529 11 negative_sampler """basic""" +529 11 evaluator """rankbased""" +529 12 dataset """kinships""" +529 12 model """rescal""" +529 12 loss """nssa""" +529 12 regularizer """no""" +529 12 optimizer """adadelta""" +529 12 training_loop """owa""" +529 12 negative_sampler """basic""" +529 12 evaluator """rankbased""" +529 13 dataset """kinships""" +529 13 model """rescal""" +529 13 loss """nssa""" +529 13 regularizer """no""" +529 13 optimizer """adadelta""" +529 13 training_loop """owa""" +529 13 negative_sampler """basic""" +529 13 evaluator """rankbased""" +529 14 dataset """kinships""" +529 14 model """rescal""" +529 14 loss """nssa""" +529 14 regularizer """no""" +529 14 optimizer """adadelta""" +529 14 training_loop """owa""" +529 14 negative_sampler """basic""" +529 14 evaluator """rankbased""" +529 15 dataset """kinships""" +529 15 model """rescal""" +529 15 loss """nssa""" +529 15 regularizer """no""" +529 15 optimizer """adadelta""" +529 15 training_loop """owa""" +529 15 negative_sampler """basic""" +529 15 evaluator """rankbased""" +529 16 dataset """kinships""" +529 16 model """rescal""" +529 16 loss """nssa""" +529 16 regularizer """no""" +529 16 optimizer """adadelta""" +529 16 training_loop """owa""" +529 16 negative_sampler """basic""" +529 16 evaluator """rankbased""" +529 17 dataset """kinships""" +529 17 model """rescal""" +529 17 loss """nssa""" +529 17 regularizer """no""" +529 17 optimizer """adadelta""" +529 17 training_loop """owa""" +529 17 negative_sampler """basic""" +529 17 evaluator """rankbased""" +529 18 dataset """kinships""" +529 18 model """rescal""" +529 18 loss """nssa""" +529 18 regularizer """no""" +529 18 optimizer """adadelta""" +529 18 training_loop """owa""" +529 18 negative_sampler """basic""" +529 18 evaluator """rankbased""" +529 19 dataset """kinships""" +529 19 model """rescal""" +529 19 loss """nssa""" +529 19 regularizer """no""" +529 19 optimizer """adadelta""" +529 19 training_loop """owa""" +529 19 negative_sampler """basic""" +529 19 evaluator """rankbased""" +529 20 dataset """kinships""" +529 20 model """rescal""" +529 20 loss """nssa""" +529 20 regularizer """no""" +529 20 optimizer """adadelta""" +529 20 training_loop """owa""" +529 20 negative_sampler """basic""" +529 20 evaluator """rankbased""" +529 21 dataset """kinships""" +529 21 model """rescal""" +529 21 loss """nssa""" +529 21 regularizer """no""" +529 21 optimizer """adadelta""" +529 21 training_loop """owa""" +529 21 negative_sampler """basic""" +529 21 evaluator """rankbased""" +529 22 dataset """kinships""" +529 22 model """rescal""" +529 22 loss """nssa""" +529 22 regularizer """no""" +529 22 optimizer """adadelta""" +529 22 training_loop """owa""" +529 22 negative_sampler """basic""" +529 22 evaluator """rankbased""" +529 23 dataset """kinships""" +529 23 model """rescal""" +529 23 loss """nssa""" +529 23 regularizer """no""" +529 23 optimizer """adadelta""" +529 23 training_loop """owa""" +529 23 negative_sampler """basic""" +529 23 evaluator """rankbased""" +529 24 dataset """kinships""" +529 24 model """rescal""" +529 24 loss """nssa""" +529 24 regularizer """no""" +529 24 optimizer """adadelta""" +529 24 training_loop """owa""" +529 24 negative_sampler """basic""" +529 24 evaluator """rankbased""" +529 25 dataset """kinships""" +529 25 model """rescal""" +529 25 loss """nssa""" +529 25 regularizer """no""" +529 25 optimizer """adadelta""" +529 25 training_loop """owa""" +529 25 negative_sampler """basic""" +529 25 evaluator """rankbased""" +529 26 dataset """kinships""" +529 26 model """rescal""" +529 26 loss """nssa""" +529 26 regularizer """no""" +529 26 optimizer """adadelta""" +529 26 training_loop """owa""" +529 26 negative_sampler """basic""" +529 26 evaluator """rankbased""" +529 27 dataset """kinships""" +529 27 model """rescal""" +529 27 loss """nssa""" +529 27 regularizer """no""" +529 27 optimizer """adadelta""" +529 27 training_loop """owa""" +529 27 negative_sampler """basic""" +529 27 evaluator """rankbased""" +529 28 dataset """kinships""" +529 28 model """rescal""" +529 28 loss """nssa""" +529 28 regularizer """no""" +529 28 optimizer """adadelta""" +529 28 training_loop """owa""" +529 28 negative_sampler """basic""" +529 28 evaluator """rankbased""" +529 29 dataset """kinships""" +529 29 model """rescal""" +529 29 loss """nssa""" +529 29 regularizer """no""" +529 29 optimizer """adadelta""" +529 29 training_loop """owa""" +529 29 negative_sampler """basic""" +529 29 evaluator """rankbased""" +529 30 dataset """kinships""" +529 30 model """rescal""" +529 30 loss """nssa""" +529 30 regularizer """no""" +529 30 optimizer """adadelta""" +529 30 training_loop """owa""" +529 30 negative_sampler """basic""" +529 30 evaluator """rankbased""" +529 31 dataset """kinships""" +529 31 model """rescal""" +529 31 loss """nssa""" +529 31 regularizer """no""" +529 31 optimizer """adadelta""" +529 31 training_loop """owa""" +529 31 negative_sampler """basic""" +529 31 evaluator """rankbased""" +529 32 dataset """kinships""" +529 32 model """rescal""" +529 32 loss """nssa""" +529 32 regularizer """no""" +529 32 optimizer """adadelta""" +529 32 training_loop """owa""" +529 32 negative_sampler """basic""" +529 32 evaluator """rankbased""" +529 33 dataset """kinships""" +529 33 model """rescal""" +529 33 loss """nssa""" +529 33 regularizer """no""" +529 33 optimizer """adadelta""" +529 33 training_loop """owa""" +529 33 negative_sampler """basic""" +529 33 evaluator """rankbased""" +529 34 dataset """kinships""" +529 34 model """rescal""" +529 34 loss """nssa""" +529 34 regularizer """no""" +529 34 optimizer """adadelta""" +529 34 training_loop """owa""" +529 34 negative_sampler """basic""" +529 34 evaluator """rankbased""" +529 35 dataset """kinships""" +529 35 model """rescal""" +529 35 loss """nssa""" +529 35 regularizer """no""" +529 35 optimizer """adadelta""" +529 35 training_loop """owa""" +529 35 negative_sampler """basic""" +529 35 evaluator """rankbased""" +529 36 dataset """kinships""" +529 36 model """rescal""" +529 36 loss """nssa""" +529 36 regularizer """no""" +529 36 optimizer """adadelta""" +529 36 training_loop """owa""" +529 36 negative_sampler """basic""" +529 36 evaluator """rankbased""" +529 37 dataset """kinships""" +529 37 model """rescal""" +529 37 loss """nssa""" +529 37 regularizer """no""" +529 37 optimizer """adadelta""" +529 37 training_loop """owa""" +529 37 negative_sampler """basic""" +529 37 evaluator """rankbased""" +529 38 dataset """kinships""" +529 38 model """rescal""" +529 38 loss """nssa""" +529 38 regularizer """no""" +529 38 optimizer """adadelta""" +529 38 training_loop """owa""" +529 38 negative_sampler """basic""" +529 38 evaluator """rankbased""" +529 39 dataset """kinships""" +529 39 model """rescal""" +529 39 loss """nssa""" +529 39 regularizer """no""" +529 39 optimizer """adadelta""" +529 39 training_loop """owa""" +529 39 negative_sampler """basic""" +529 39 evaluator """rankbased""" +529 40 dataset """kinships""" +529 40 model """rescal""" +529 40 loss """nssa""" +529 40 regularizer """no""" +529 40 optimizer """adadelta""" +529 40 training_loop """owa""" +529 40 negative_sampler """basic""" +529 40 evaluator """rankbased""" +529 41 dataset """kinships""" +529 41 model """rescal""" +529 41 loss """nssa""" +529 41 regularizer """no""" +529 41 optimizer """adadelta""" +529 41 training_loop """owa""" +529 41 negative_sampler """basic""" +529 41 evaluator """rankbased""" +529 42 dataset """kinships""" +529 42 model """rescal""" +529 42 loss """nssa""" +529 42 regularizer """no""" +529 42 optimizer """adadelta""" +529 42 training_loop """owa""" +529 42 negative_sampler """basic""" +529 42 evaluator """rankbased""" +529 43 dataset """kinships""" +529 43 model """rescal""" +529 43 loss """nssa""" +529 43 regularizer """no""" +529 43 optimizer """adadelta""" +529 43 training_loop """owa""" +529 43 negative_sampler """basic""" +529 43 evaluator """rankbased""" +529 44 dataset """kinships""" +529 44 model """rescal""" +529 44 loss """nssa""" +529 44 regularizer """no""" +529 44 optimizer """adadelta""" +529 44 training_loop """owa""" +529 44 negative_sampler """basic""" +529 44 evaluator """rankbased""" +529 45 dataset """kinships""" +529 45 model """rescal""" +529 45 loss """nssa""" +529 45 regularizer """no""" +529 45 optimizer """adadelta""" +529 45 training_loop """owa""" +529 45 negative_sampler """basic""" +529 45 evaluator """rankbased""" +529 46 dataset """kinships""" +529 46 model """rescal""" +529 46 loss """nssa""" +529 46 regularizer """no""" +529 46 optimizer """adadelta""" +529 46 training_loop """owa""" +529 46 negative_sampler """basic""" +529 46 evaluator """rankbased""" +529 47 dataset """kinships""" +529 47 model """rescal""" +529 47 loss """nssa""" +529 47 regularizer """no""" +529 47 optimizer """adadelta""" +529 47 training_loop """owa""" +529 47 negative_sampler """basic""" +529 47 evaluator """rankbased""" +529 48 dataset """kinships""" +529 48 model """rescal""" +529 48 loss """nssa""" +529 48 regularizer """no""" +529 48 optimizer """adadelta""" +529 48 training_loop """owa""" +529 48 negative_sampler """basic""" +529 48 evaluator """rankbased""" +529 49 dataset """kinships""" +529 49 model """rescal""" +529 49 loss """nssa""" +529 49 regularizer """no""" +529 49 optimizer """adadelta""" +529 49 training_loop """owa""" +529 49 negative_sampler """basic""" +529 49 evaluator """rankbased""" +529 50 dataset """kinships""" +529 50 model """rescal""" +529 50 loss """nssa""" +529 50 regularizer """no""" +529 50 optimizer """adadelta""" +529 50 training_loop """owa""" +529 50 negative_sampler """basic""" +529 50 evaluator """rankbased""" +529 51 dataset """kinships""" +529 51 model """rescal""" +529 51 loss """nssa""" +529 51 regularizer """no""" +529 51 optimizer """adadelta""" +529 51 training_loop """owa""" +529 51 negative_sampler """basic""" +529 51 evaluator """rankbased""" +529 52 dataset """kinships""" +529 52 model """rescal""" +529 52 loss """nssa""" +529 52 regularizer """no""" +529 52 optimizer """adadelta""" +529 52 training_loop """owa""" +529 52 negative_sampler """basic""" +529 52 evaluator """rankbased""" +529 53 dataset """kinships""" +529 53 model """rescal""" +529 53 loss """nssa""" +529 53 regularizer """no""" +529 53 optimizer """adadelta""" +529 53 training_loop """owa""" +529 53 negative_sampler """basic""" +529 53 evaluator """rankbased""" +529 54 dataset """kinships""" +529 54 model """rescal""" +529 54 loss """nssa""" +529 54 regularizer """no""" +529 54 optimizer """adadelta""" +529 54 training_loop """owa""" +529 54 negative_sampler """basic""" +529 54 evaluator """rankbased""" +529 55 dataset """kinships""" +529 55 model """rescal""" +529 55 loss """nssa""" +529 55 regularizer """no""" +529 55 optimizer """adadelta""" +529 55 training_loop """owa""" +529 55 negative_sampler """basic""" +529 55 evaluator """rankbased""" +529 56 dataset """kinships""" +529 56 model """rescal""" +529 56 loss """nssa""" +529 56 regularizer """no""" +529 56 optimizer """adadelta""" +529 56 training_loop """owa""" +529 56 negative_sampler """basic""" +529 56 evaluator """rankbased""" +529 57 dataset """kinships""" +529 57 model """rescal""" +529 57 loss """nssa""" +529 57 regularizer """no""" +529 57 optimizer """adadelta""" +529 57 training_loop """owa""" +529 57 negative_sampler """basic""" +529 57 evaluator """rankbased""" +529 58 dataset """kinships""" +529 58 model """rescal""" +529 58 loss """nssa""" +529 58 regularizer """no""" +529 58 optimizer """adadelta""" +529 58 training_loop """owa""" +529 58 negative_sampler """basic""" +529 58 evaluator """rankbased""" +529 59 dataset """kinships""" +529 59 model """rescal""" +529 59 loss """nssa""" +529 59 regularizer """no""" +529 59 optimizer """adadelta""" +529 59 training_loop """owa""" +529 59 negative_sampler """basic""" +529 59 evaluator """rankbased""" +529 60 dataset """kinships""" +529 60 model """rescal""" +529 60 loss """nssa""" +529 60 regularizer """no""" +529 60 optimizer """adadelta""" +529 60 training_loop """owa""" +529 60 negative_sampler """basic""" +529 60 evaluator """rankbased""" +529 61 dataset """kinships""" +529 61 model """rescal""" +529 61 loss """nssa""" +529 61 regularizer """no""" +529 61 optimizer """adadelta""" +529 61 training_loop """owa""" +529 61 negative_sampler """basic""" +529 61 evaluator """rankbased""" +529 62 dataset """kinships""" +529 62 model """rescal""" +529 62 loss """nssa""" +529 62 regularizer """no""" +529 62 optimizer """adadelta""" +529 62 training_loop """owa""" +529 62 negative_sampler """basic""" +529 62 evaluator """rankbased""" +529 63 dataset """kinships""" +529 63 model """rescal""" +529 63 loss """nssa""" +529 63 regularizer """no""" +529 63 optimizer """adadelta""" +529 63 training_loop """owa""" +529 63 negative_sampler """basic""" +529 63 evaluator """rankbased""" +529 64 dataset """kinships""" +529 64 model """rescal""" +529 64 loss """nssa""" +529 64 regularizer """no""" +529 64 optimizer """adadelta""" +529 64 training_loop """owa""" +529 64 negative_sampler """basic""" +529 64 evaluator """rankbased""" +529 65 dataset """kinships""" +529 65 model """rescal""" +529 65 loss """nssa""" +529 65 regularizer """no""" +529 65 optimizer """adadelta""" +529 65 training_loop """owa""" +529 65 negative_sampler """basic""" +529 65 evaluator """rankbased""" +529 66 dataset """kinships""" +529 66 model """rescal""" +529 66 loss """nssa""" +529 66 regularizer """no""" +529 66 optimizer """adadelta""" +529 66 training_loop """owa""" +529 66 negative_sampler """basic""" +529 66 evaluator """rankbased""" +529 67 dataset """kinships""" +529 67 model """rescal""" +529 67 loss """nssa""" +529 67 regularizer """no""" +529 67 optimizer """adadelta""" +529 67 training_loop """owa""" +529 67 negative_sampler """basic""" +529 67 evaluator """rankbased""" +529 68 dataset """kinships""" +529 68 model """rescal""" +529 68 loss """nssa""" +529 68 regularizer """no""" +529 68 optimizer """adadelta""" +529 68 training_loop """owa""" +529 68 negative_sampler """basic""" +529 68 evaluator """rankbased""" +529 69 dataset """kinships""" +529 69 model """rescal""" +529 69 loss """nssa""" +529 69 regularizer """no""" +529 69 optimizer """adadelta""" +529 69 training_loop """owa""" +529 69 negative_sampler """basic""" +529 69 evaluator """rankbased""" +529 70 dataset """kinships""" +529 70 model """rescal""" +529 70 loss """nssa""" +529 70 regularizer """no""" +529 70 optimizer """adadelta""" +529 70 training_loop """owa""" +529 70 negative_sampler """basic""" +529 70 evaluator """rankbased""" +529 71 dataset """kinships""" +529 71 model """rescal""" +529 71 loss """nssa""" +529 71 regularizer """no""" +529 71 optimizer """adadelta""" +529 71 training_loop """owa""" +529 71 negative_sampler """basic""" +529 71 evaluator """rankbased""" +529 72 dataset """kinships""" +529 72 model """rescal""" +529 72 loss """nssa""" +529 72 regularizer """no""" +529 72 optimizer """adadelta""" +529 72 training_loop """owa""" +529 72 negative_sampler """basic""" +529 72 evaluator """rankbased""" +529 73 dataset """kinships""" +529 73 model """rescal""" +529 73 loss """nssa""" +529 73 regularizer """no""" +529 73 optimizer """adadelta""" +529 73 training_loop """owa""" +529 73 negative_sampler """basic""" +529 73 evaluator """rankbased""" +529 74 dataset """kinships""" +529 74 model """rescal""" +529 74 loss """nssa""" +529 74 regularizer """no""" +529 74 optimizer """adadelta""" +529 74 training_loop """owa""" +529 74 negative_sampler """basic""" +529 74 evaluator """rankbased""" +529 75 dataset """kinships""" +529 75 model """rescal""" +529 75 loss """nssa""" +529 75 regularizer """no""" +529 75 optimizer """adadelta""" +529 75 training_loop """owa""" +529 75 negative_sampler """basic""" +529 75 evaluator """rankbased""" +529 76 dataset """kinships""" +529 76 model """rescal""" +529 76 loss """nssa""" +529 76 regularizer """no""" +529 76 optimizer """adadelta""" +529 76 training_loop """owa""" +529 76 negative_sampler """basic""" +529 76 evaluator """rankbased""" +529 77 dataset """kinships""" +529 77 model """rescal""" +529 77 loss """nssa""" +529 77 regularizer """no""" +529 77 optimizer """adadelta""" +529 77 training_loop """owa""" +529 77 negative_sampler """basic""" +529 77 evaluator """rankbased""" +529 78 dataset """kinships""" +529 78 model """rescal""" +529 78 loss """nssa""" +529 78 regularizer """no""" +529 78 optimizer """adadelta""" +529 78 training_loop """owa""" +529 78 negative_sampler """basic""" +529 78 evaluator """rankbased""" +529 79 dataset """kinships""" +529 79 model """rescal""" +529 79 loss """nssa""" +529 79 regularizer """no""" +529 79 optimizer """adadelta""" +529 79 training_loop """owa""" +529 79 negative_sampler """basic""" +529 79 evaluator """rankbased""" +529 80 dataset """kinships""" +529 80 model """rescal""" +529 80 loss """nssa""" +529 80 regularizer """no""" +529 80 optimizer """adadelta""" +529 80 training_loop """owa""" +529 80 negative_sampler """basic""" +529 80 evaluator """rankbased""" +529 81 dataset """kinships""" +529 81 model """rescal""" +529 81 loss """nssa""" +529 81 regularizer """no""" +529 81 optimizer """adadelta""" +529 81 training_loop """owa""" +529 81 negative_sampler """basic""" +529 81 evaluator """rankbased""" +529 82 dataset """kinships""" +529 82 model """rescal""" +529 82 loss """nssa""" +529 82 regularizer """no""" +529 82 optimizer """adadelta""" +529 82 training_loop """owa""" +529 82 negative_sampler """basic""" +529 82 evaluator """rankbased""" +529 83 dataset """kinships""" +529 83 model """rescal""" +529 83 loss """nssa""" +529 83 regularizer """no""" +529 83 optimizer """adadelta""" +529 83 training_loop """owa""" +529 83 negative_sampler """basic""" +529 83 evaluator """rankbased""" +529 84 dataset """kinships""" +529 84 model """rescal""" +529 84 loss """nssa""" +529 84 regularizer """no""" +529 84 optimizer """adadelta""" +529 84 training_loop """owa""" +529 84 negative_sampler """basic""" +529 84 evaluator """rankbased""" +529 85 dataset """kinships""" +529 85 model """rescal""" +529 85 loss """nssa""" +529 85 regularizer """no""" +529 85 optimizer """adadelta""" +529 85 training_loop """owa""" +529 85 negative_sampler """basic""" +529 85 evaluator """rankbased""" +529 86 dataset """kinships""" +529 86 model """rescal""" +529 86 loss """nssa""" +529 86 regularizer """no""" +529 86 optimizer """adadelta""" +529 86 training_loop """owa""" +529 86 negative_sampler """basic""" +529 86 evaluator """rankbased""" +529 87 dataset """kinships""" +529 87 model """rescal""" +529 87 loss """nssa""" +529 87 regularizer """no""" +529 87 optimizer """adadelta""" +529 87 training_loop """owa""" +529 87 negative_sampler """basic""" +529 87 evaluator """rankbased""" +529 88 dataset """kinships""" +529 88 model """rescal""" +529 88 loss """nssa""" +529 88 regularizer """no""" +529 88 optimizer """adadelta""" +529 88 training_loop """owa""" +529 88 negative_sampler """basic""" +529 88 evaluator """rankbased""" +529 89 dataset """kinships""" +529 89 model """rescal""" +529 89 loss """nssa""" +529 89 regularizer """no""" +529 89 optimizer """adadelta""" +529 89 training_loop """owa""" +529 89 negative_sampler """basic""" +529 89 evaluator """rankbased""" +529 90 dataset """kinships""" +529 90 model """rescal""" +529 90 loss """nssa""" +529 90 regularizer """no""" +529 90 optimizer """adadelta""" +529 90 training_loop """owa""" +529 90 negative_sampler """basic""" +529 90 evaluator """rankbased""" +529 91 dataset """kinships""" +529 91 model """rescal""" +529 91 loss """nssa""" +529 91 regularizer """no""" +529 91 optimizer """adadelta""" +529 91 training_loop """owa""" +529 91 negative_sampler """basic""" +529 91 evaluator """rankbased""" +529 92 dataset """kinships""" +529 92 model """rescal""" +529 92 loss """nssa""" +529 92 regularizer """no""" +529 92 optimizer """adadelta""" +529 92 training_loop """owa""" +529 92 negative_sampler """basic""" +529 92 evaluator """rankbased""" +529 93 dataset """kinships""" +529 93 model """rescal""" +529 93 loss """nssa""" +529 93 regularizer """no""" +529 93 optimizer """adadelta""" +529 93 training_loop """owa""" +529 93 negative_sampler """basic""" +529 93 evaluator """rankbased""" +529 94 dataset """kinships""" +529 94 model """rescal""" +529 94 loss """nssa""" +529 94 regularizer """no""" +529 94 optimizer """adadelta""" +529 94 training_loop """owa""" +529 94 negative_sampler """basic""" +529 94 evaluator """rankbased""" +529 95 dataset """kinships""" +529 95 model """rescal""" +529 95 loss """nssa""" +529 95 regularizer """no""" +529 95 optimizer """adadelta""" +529 95 training_loop """owa""" +529 95 negative_sampler """basic""" +529 95 evaluator """rankbased""" +529 96 dataset """kinships""" +529 96 model """rescal""" +529 96 loss """nssa""" +529 96 regularizer """no""" +529 96 optimizer """adadelta""" +529 96 training_loop """owa""" +529 96 negative_sampler """basic""" +529 96 evaluator """rankbased""" +529 97 dataset """kinships""" +529 97 model """rescal""" +529 97 loss """nssa""" +529 97 regularizer """no""" +529 97 optimizer """adadelta""" +529 97 training_loop """owa""" +529 97 negative_sampler """basic""" +529 97 evaluator """rankbased""" +529 98 dataset """kinships""" +529 98 model """rescal""" +529 98 loss """nssa""" +529 98 regularizer """no""" +529 98 optimizer """adadelta""" +529 98 training_loop """owa""" +529 98 negative_sampler """basic""" +529 98 evaluator """rankbased""" +529 99 dataset """kinships""" +529 99 model """rescal""" +529 99 loss """nssa""" +529 99 regularizer """no""" +529 99 optimizer """adadelta""" +529 99 training_loop """owa""" +529 99 negative_sampler """basic""" +529 99 evaluator """rankbased""" +529 100 dataset """kinships""" +529 100 model """rescal""" +529 100 loss """nssa""" +529 100 regularizer """no""" +529 100 optimizer """adadelta""" +529 100 training_loop """owa""" +529 100 negative_sampler """basic""" +529 100 evaluator """rankbased""" +530 1 model.embedding_dim 0.0 +530 1 loss.margin 12.90800031277018 +530 1 loss.adversarial_temperature 0.4769910553787624 +530 1 negative_sampler.num_negs_per_pos 62.0 +530 1 training.batch_size 2.0 +530 2 model.embedding_dim 1.0 +530 2 loss.margin 14.461639375717251 +530 2 loss.adversarial_temperature 0.5555257649514646 +530 2 negative_sampler.num_negs_per_pos 65.0 +530 2 training.batch_size 1.0 +530 3 model.embedding_dim 2.0 +530 3 loss.margin 25.407257758464688 +530 3 loss.adversarial_temperature 0.22014259003551456 +530 3 negative_sampler.num_negs_per_pos 97.0 +530 3 training.batch_size 2.0 +530 4 model.embedding_dim 1.0 +530 4 loss.margin 28.95385762327501 +530 4 loss.adversarial_temperature 0.6827203796885815 +530 4 negative_sampler.num_negs_per_pos 19.0 +530 4 training.batch_size 0.0 +530 5 model.embedding_dim 0.0 +530 5 loss.margin 15.760013409385087 +530 5 loss.adversarial_temperature 0.5879988757441919 +530 5 negative_sampler.num_negs_per_pos 0.0 +530 5 training.batch_size 2.0 +530 6 model.embedding_dim 1.0 +530 6 loss.margin 21.99309473026903 +530 6 loss.adversarial_temperature 0.5895179447043527 +530 6 negative_sampler.num_negs_per_pos 64.0 +530 6 training.batch_size 2.0 +530 7 model.embedding_dim 0.0 +530 7 loss.margin 1.033817948729878 +530 7 loss.adversarial_temperature 0.5836271493772401 +530 7 negative_sampler.num_negs_per_pos 15.0 +530 7 training.batch_size 2.0 +530 8 model.embedding_dim 1.0 +530 8 loss.margin 12.579309959089327 +530 8 loss.adversarial_temperature 0.8751609965272255 +530 8 negative_sampler.num_negs_per_pos 77.0 +530 8 training.batch_size 1.0 +530 9 model.embedding_dim 0.0 +530 9 loss.margin 1.8806375428997582 +530 9 loss.adversarial_temperature 0.5705770028945223 +530 9 negative_sampler.num_negs_per_pos 41.0 +530 9 training.batch_size 1.0 +530 10 model.embedding_dim 2.0 +530 10 loss.margin 8.803471952957612 +530 10 loss.adversarial_temperature 0.193676996174975 +530 10 negative_sampler.num_negs_per_pos 87.0 +530 10 training.batch_size 1.0 +530 11 model.embedding_dim 1.0 +530 11 loss.margin 22.99208055085363 +530 11 loss.adversarial_temperature 0.5948576704989453 +530 11 negative_sampler.num_negs_per_pos 77.0 +530 11 training.batch_size 1.0 +530 12 model.embedding_dim 1.0 +530 12 loss.margin 2.499671009504206 +530 12 loss.adversarial_temperature 0.4078401263756797 +530 12 negative_sampler.num_negs_per_pos 47.0 +530 12 training.batch_size 2.0 +530 13 model.embedding_dim 2.0 +530 13 loss.margin 2.858352288493491 +530 13 loss.adversarial_temperature 0.36064428892425615 +530 13 negative_sampler.num_negs_per_pos 93.0 +530 13 training.batch_size 0.0 +530 14 model.embedding_dim 2.0 +530 14 loss.margin 9.889824360775533 +530 14 loss.adversarial_temperature 0.8753601732716311 +530 14 negative_sampler.num_negs_per_pos 92.0 +530 14 training.batch_size 0.0 +530 15 model.embedding_dim 0.0 +530 15 loss.margin 26.57951220279238 +530 15 loss.adversarial_temperature 0.43320471973666197 +530 15 negative_sampler.num_negs_per_pos 38.0 +530 15 training.batch_size 0.0 +530 16 model.embedding_dim 1.0 +530 16 loss.margin 3.326863228831483 +530 16 loss.adversarial_temperature 0.5342597689641639 +530 16 negative_sampler.num_negs_per_pos 18.0 +530 16 training.batch_size 1.0 +530 17 model.embedding_dim 1.0 +530 17 loss.margin 9.03821521582191 +530 17 loss.adversarial_temperature 0.3723158820244584 +530 17 negative_sampler.num_negs_per_pos 80.0 +530 17 training.batch_size 0.0 +530 18 model.embedding_dim 1.0 +530 18 loss.margin 21.762960510713377 +530 18 loss.adversarial_temperature 0.23566735549215523 +530 18 negative_sampler.num_negs_per_pos 31.0 +530 18 training.batch_size 0.0 +530 19 model.embedding_dim 2.0 +530 19 loss.margin 11.189820891603187 +530 19 loss.adversarial_temperature 0.3515669390176147 +530 19 negative_sampler.num_negs_per_pos 47.0 +530 19 training.batch_size 0.0 +530 20 model.embedding_dim 1.0 +530 20 loss.margin 17.406558977539035 +530 20 loss.adversarial_temperature 0.46186053416422734 +530 20 negative_sampler.num_negs_per_pos 71.0 +530 20 training.batch_size 2.0 +530 21 model.embedding_dim 2.0 +530 21 loss.margin 5.208317324047204 +530 21 loss.adversarial_temperature 0.4538607607206062 +530 21 negative_sampler.num_negs_per_pos 32.0 +530 21 training.batch_size 2.0 +530 22 model.embedding_dim 2.0 +530 22 loss.margin 22.57980640728393 +530 22 loss.adversarial_temperature 0.793905581531712 +530 22 negative_sampler.num_negs_per_pos 75.0 +530 22 training.batch_size 0.0 +530 23 model.embedding_dim 0.0 +530 23 loss.margin 15.662119847952965 +530 23 loss.adversarial_temperature 0.45489078221510815 +530 23 negative_sampler.num_negs_per_pos 92.0 +530 23 training.batch_size 0.0 +530 24 model.embedding_dim 0.0 +530 24 loss.margin 11.146749074500185 +530 24 loss.adversarial_temperature 0.35765918703284205 +530 24 negative_sampler.num_negs_per_pos 45.0 +530 24 training.batch_size 2.0 +530 25 model.embedding_dim 1.0 +530 25 loss.margin 10.941220323906395 +530 25 loss.adversarial_temperature 0.9972579349121469 +530 25 negative_sampler.num_negs_per_pos 25.0 +530 25 training.batch_size 1.0 +530 26 model.embedding_dim 1.0 +530 26 loss.margin 25.26400194628006 +530 26 loss.adversarial_temperature 0.6045410238446125 +530 26 negative_sampler.num_negs_per_pos 4.0 +530 26 training.batch_size 0.0 +530 27 model.embedding_dim 0.0 +530 27 loss.margin 18.626167321369003 +530 27 loss.adversarial_temperature 0.7142563606059754 +530 27 negative_sampler.num_negs_per_pos 79.0 +530 27 training.batch_size 0.0 +530 28 model.embedding_dim 0.0 +530 28 loss.margin 13.897932835013092 +530 28 loss.adversarial_temperature 0.5409685516489827 +530 28 negative_sampler.num_negs_per_pos 63.0 +530 28 training.batch_size 0.0 +530 29 model.embedding_dim 1.0 +530 29 loss.margin 28.73924797559552 +530 29 loss.adversarial_temperature 0.49707650639479684 +530 29 negative_sampler.num_negs_per_pos 97.0 +530 29 training.batch_size 2.0 +530 30 model.embedding_dim 2.0 +530 30 loss.margin 17.1673932031222 +530 30 loss.adversarial_temperature 0.7455988598312124 +530 30 negative_sampler.num_negs_per_pos 94.0 +530 30 training.batch_size 2.0 +530 31 model.embedding_dim 0.0 +530 31 loss.margin 1.795003825205222 +530 31 loss.adversarial_temperature 0.8641895994053009 +530 31 negative_sampler.num_negs_per_pos 99.0 +530 31 training.batch_size 1.0 +530 32 model.embedding_dim 0.0 +530 32 loss.margin 16.54956822822527 +530 32 loss.adversarial_temperature 0.5021309312718446 +530 32 negative_sampler.num_negs_per_pos 94.0 +530 32 training.batch_size 2.0 +530 33 model.embedding_dim 1.0 +530 33 loss.margin 1.7526670822922767 +530 33 loss.adversarial_temperature 0.418677325972315 +530 33 negative_sampler.num_negs_per_pos 93.0 +530 33 training.batch_size 0.0 +530 34 model.embedding_dim 2.0 +530 34 loss.margin 15.905184703643021 +530 34 loss.adversarial_temperature 0.8765783849252368 +530 34 negative_sampler.num_negs_per_pos 74.0 +530 34 training.batch_size 0.0 +530 35 model.embedding_dim 1.0 +530 35 loss.margin 28.80019531422673 +530 35 loss.adversarial_temperature 0.17762866625277382 +530 35 negative_sampler.num_negs_per_pos 86.0 +530 35 training.batch_size 0.0 +530 36 model.embedding_dim 0.0 +530 36 loss.margin 13.621670748945311 +530 36 loss.adversarial_temperature 0.6303365471087753 +530 36 negative_sampler.num_negs_per_pos 7.0 +530 36 training.batch_size 0.0 +530 37 model.embedding_dim 0.0 +530 37 loss.margin 7.9953317250344735 +530 37 loss.adversarial_temperature 0.8797119354809195 +530 37 negative_sampler.num_negs_per_pos 77.0 +530 37 training.batch_size 2.0 +530 38 model.embedding_dim 1.0 +530 38 loss.margin 7.711620952228053 +530 38 loss.adversarial_temperature 0.9294697314024065 +530 38 negative_sampler.num_negs_per_pos 65.0 +530 38 training.batch_size 2.0 +530 39 model.embedding_dim 0.0 +530 39 loss.margin 9.411521608371782 +530 39 loss.adversarial_temperature 0.4007843575267258 +530 39 negative_sampler.num_negs_per_pos 12.0 +530 39 training.batch_size 0.0 +530 40 model.embedding_dim 1.0 +530 40 loss.margin 20.858083697312306 +530 40 loss.adversarial_temperature 0.4812640273492672 +530 40 negative_sampler.num_negs_per_pos 7.0 +530 40 training.batch_size 2.0 +530 41 model.embedding_dim 2.0 +530 41 loss.margin 5.874316541756157 +530 41 loss.adversarial_temperature 0.6845451323015923 +530 41 negative_sampler.num_negs_per_pos 0.0 +530 41 training.batch_size 1.0 +530 42 model.embedding_dim 0.0 +530 42 loss.margin 4.140015442053374 +530 42 loss.adversarial_temperature 0.8388610794153294 +530 42 negative_sampler.num_negs_per_pos 69.0 +530 42 training.batch_size 0.0 +530 43 model.embedding_dim 0.0 +530 43 loss.margin 8.539914103039326 +530 43 loss.adversarial_temperature 0.503691149229781 +530 43 negative_sampler.num_negs_per_pos 89.0 +530 43 training.batch_size 0.0 +530 44 model.embedding_dim 1.0 +530 44 loss.margin 3.8462483995406433 +530 44 loss.adversarial_temperature 0.3489222374385173 +530 44 negative_sampler.num_negs_per_pos 5.0 +530 44 training.batch_size 1.0 +530 45 model.embedding_dim 1.0 +530 45 loss.margin 18.998553251994778 +530 45 loss.adversarial_temperature 0.5253499988316687 +530 45 negative_sampler.num_negs_per_pos 48.0 +530 45 training.batch_size 0.0 +530 46 model.embedding_dim 0.0 +530 46 loss.margin 20.833135271933944 +530 46 loss.adversarial_temperature 0.34873184402236923 +530 46 negative_sampler.num_negs_per_pos 90.0 +530 46 training.batch_size 0.0 +530 47 model.embedding_dim 1.0 +530 47 loss.margin 23.166464462214215 +530 47 loss.adversarial_temperature 0.7331902946538399 +530 47 negative_sampler.num_negs_per_pos 53.0 +530 47 training.batch_size 2.0 +530 48 model.embedding_dim 0.0 +530 48 loss.margin 29.632923243779274 +530 48 loss.adversarial_temperature 0.24962104350255027 +530 48 negative_sampler.num_negs_per_pos 65.0 +530 48 training.batch_size 1.0 +530 49 model.embedding_dim 0.0 +530 49 loss.margin 5.589742176618106 +530 49 loss.adversarial_temperature 0.6864748733221008 +530 49 negative_sampler.num_negs_per_pos 20.0 +530 49 training.batch_size 0.0 +530 50 model.embedding_dim 0.0 +530 50 loss.margin 26.803846699468394 +530 50 loss.adversarial_temperature 0.23421990849360824 +530 50 negative_sampler.num_negs_per_pos 44.0 +530 50 training.batch_size 2.0 +530 51 model.embedding_dim 1.0 +530 51 loss.margin 15.474444670677883 +530 51 loss.adversarial_temperature 0.7103652579290586 +530 51 negative_sampler.num_negs_per_pos 92.0 +530 51 training.batch_size 1.0 +530 52 model.embedding_dim 0.0 +530 52 loss.margin 17.89791729514592 +530 52 loss.adversarial_temperature 0.8243575153803073 +530 52 negative_sampler.num_negs_per_pos 8.0 +530 52 training.batch_size 2.0 +530 53 model.embedding_dim 0.0 +530 53 loss.margin 8.455751913054701 +530 53 loss.adversarial_temperature 0.8800922735383713 +530 53 negative_sampler.num_negs_per_pos 79.0 +530 53 training.batch_size 2.0 +530 54 model.embedding_dim 0.0 +530 54 loss.margin 20.33501102367894 +530 54 loss.adversarial_temperature 0.6594556454998892 +530 54 negative_sampler.num_negs_per_pos 1.0 +530 54 training.batch_size 2.0 +530 55 model.embedding_dim 0.0 +530 55 loss.margin 11.044442826584666 +530 55 loss.adversarial_temperature 0.6601876431407497 +530 55 negative_sampler.num_negs_per_pos 66.0 +530 55 training.batch_size 0.0 +530 56 model.embedding_dim 1.0 +530 56 loss.margin 25.44152714451953 +530 56 loss.adversarial_temperature 0.2838296206423725 +530 56 negative_sampler.num_negs_per_pos 64.0 +530 56 training.batch_size 0.0 +530 57 model.embedding_dim 1.0 +530 57 loss.margin 11.84004057283412 +530 57 loss.adversarial_temperature 0.3644135559169137 +530 57 negative_sampler.num_negs_per_pos 44.0 +530 57 training.batch_size 2.0 +530 58 model.embedding_dim 0.0 +530 58 loss.margin 10.502055686916346 +530 58 loss.adversarial_temperature 0.6438444578155286 +530 58 negative_sampler.num_negs_per_pos 82.0 +530 58 training.batch_size 1.0 +530 59 model.embedding_dim 0.0 +530 59 loss.margin 1.0922088734259907 +530 59 loss.adversarial_temperature 0.5769055314435187 +530 59 negative_sampler.num_negs_per_pos 81.0 +530 59 training.batch_size 2.0 +530 60 model.embedding_dim 1.0 +530 60 loss.margin 5.789682016312081 +530 60 loss.adversarial_temperature 0.4266710301888269 +530 60 negative_sampler.num_negs_per_pos 66.0 +530 60 training.batch_size 0.0 +530 61 model.embedding_dim 2.0 +530 61 loss.margin 27.705165056214362 +530 61 loss.adversarial_temperature 0.7665745878995035 +530 61 negative_sampler.num_negs_per_pos 2.0 +530 61 training.batch_size 2.0 +530 62 model.embedding_dim 0.0 +530 62 loss.margin 17.562563523869947 +530 62 loss.adversarial_temperature 0.38433465287210067 +530 62 negative_sampler.num_negs_per_pos 49.0 +530 62 training.batch_size 1.0 +530 63 model.embedding_dim 2.0 +530 63 loss.margin 13.275028327312333 +530 63 loss.adversarial_temperature 0.6264045328499546 +530 63 negative_sampler.num_negs_per_pos 41.0 +530 63 training.batch_size 1.0 +530 64 model.embedding_dim 0.0 +530 64 loss.margin 16.954313697443563 +530 64 loss.adversarial_temperature 0.5718706032243486 +530 64 negative_sampler.num_negs_per_pos 91.0 +530 64 training.batch_size 1.0 +530 65 model.embedding_dim 1.0 +530 65 loss.margin 19.809794675277885 +530 65 loss.adversarial_temperature 0.8562565886041107 +530 65 negative_sampler.num_negs_per_pos 23.0 +530 65 training.batch_size 2.0 +530 66 model.embedding_dim 0.0 +530 66 loss.margin 25.30029096040417 +530 66 loss.adversarial_temperature 0.48317028158223796 +530 66 negative_sampler.num_negs_per_pos 26.0 +530 66 training.batch_size 1.0 +530 67 model.embedding_dim 0.0 +530 67 loss.margin 20.10121824708519 +530 67 loss.adversarial_temperature 0.7340953466789314 +530 67 negative_sampler.num_negs_per_pos 72.0 +530 67 training.batch_size 2.0 +530 68 model.embedding_dim 1.0 +530 68 loss.margin 4.059036725881725 +530 68 loss.adversarial_temperature 0.9297602107041896 +530 68 negative_sampler.num_negs_per_pos 54.0 +530 68 training.batch_size 1.0 +530 69 model.embedding_dim 0.0 +530 69 loss.margin 13.84734057627242 +530 69 loss.adversarial_temperature 0.4655733282181358 +530 69 negative_sampler.num_negs_per_pos 60.0 +530 69 training.batch_size 0.0 +530 70 model.embedding_dim 0.0 +530 70 loss.margin 7.43324910358079 +530 70 loss.adversarial_temperature 0.5872704828601504 +530 70 negative_sampler.num_negs_per_pos 48.0 +530 70 training.batch_size 1.0 +530 71 model.embedding_dim 2.0 +530 71 loss.margin 15.985685773124198 +530 71 loss.adversarial_temperature 0.637741642084627 +530 71 negative_sampler.num_negs_per_pos 21.0 +530 71 training.batch_size 0.0 +530 72 model.embedding_dim 0.0 +530 72 loss.margin 12.598280775896809 +530 72 loss.adversarial_temperature 0.15668929392086645 +530 72 negative_sampler.num_negs_per_pos 44.0 +530 72 training.batch_size 2.0 +530 73 model.embedding_dim 1.0 +530 73 loss.margin 17.076713144240514 +530 73 loss.adversarial_temperature 0.21310884910183903 +530 73 negative_sampler.num_negs_per_pos 42.0 +530 73 training.batch_size 0.0 +530 74 model.embedding_dim 1.0 +530 74 loss.margin 26.709507850064238 +530 74 loss.adversarial_temperature 0.11211227017037059 +530 74 negative_sampler.num_negs_per_pos 95.0 +530 74 training.batch_size 1.0 +530 75 model.embedding_dim 1.0 +530 75 loss.margin 6.27663805288025 +530 75 loss.adversarial_temperature 0.3032032618838273 +530 75 negative_sampler.num_negs_per_pos 97.0 +530 75 training.batch_size 2.0 +530 76 model.embedding_dim 0.0 +530 76 loss.margin 29.3504628158519 +530 76 loss.adversarial_temperature 0.61422349541009 +530 76 negative_sampler.num_negs_per_pos 71.0 +530 76 training.batch_size 2.0 +530 77 model.embedding_dim 1.0 +530 77 loss.margin 11.314343140565093 +530 77 loss.adversarial_temperature 0.3529706390373051 +530 77 negative_sampler.num_negs_per_pos 9.0 +530 77 training.batch_size 1.0 +530 78 model.embedding_dim 2.0 +530 78 loss.margin 19.520495673503707 +530 78 loss.adversarial_temperature 0.8688703708639739 +530 78 negative_sampler.num_negs_per_pos 54.0 +530 78 training.batch_size 2.0 +530 79 model.embedding_dim 0.0 +530 79 loss.margin 22.06176066147423 +530 79 loss.adversarial_temperature 0.6338600822643508 +530 79 negative_sampler.num_negs_per_pos 87.0 +530 79 training.batch_size 2.0 +530 80 model.embedding_dim 2.0 +530 80 loss.margin 16.661793801693985 +530 80 loss.adversarial_temperature 0.5418483055948234 +530 80 negative_sampler.num_negs_per_pos 20.0 +530 80 training.batch_size 0.0 +530 81 model.embedding_dim 1.0 +530 81 loss.margin 3.2495060095175665 +530 81 loss.adversarial_temperature 0.17035776239574643 +530 81 negative_sampler.num_negs_per_pos 16.0 +530 81 training.batch_size 0.0 +530 82 model.embedding_dim 1.0 +530 82 loss.margin 16.026568613636318 +530 82 loss.adversarial_temperature 0.4090871508289343 +530 82 negative_sampler.num_negs_per_pos 60.0 +530 82 training.batch_size 0.0 +530 83 model.embedding_dim 1.0 +530 83 loss.margin 23.902627882533807 +530 83 loss.adversarial_temperature 0.9603414307462418 +530 83 negative_sampler.num_negs_per_pos 51.0 +530 83 training.batch_size 0.0 +530 84 model.embedding_dim 2.0 +530 84 loss.margin 25.796816148499992 +530 84 loss.adversarial_temperature 0.8568968471682592 +530 84 negative_sampler.num_negs_per_pos 56.0 +530 84 training.batch_size 2.0 +530 85 model.embedding_dim 1.0 +530 85 loss.margin 29.34190418860354 +530 85 loss.adversarial_temperature 0.26498096534854576 +530 85 negative_sampler.num_negs_per_pos 1.0 +530 85 training.batch_size 1.0 +530 86 model.embedding_dim 0.0 +530 86 loss.margin 20.322391057517216 +530 86 loss.adversarial_temperature 0.9870745207352913 +530 86 negative_sampler.num_negs_per_pos 21.0 +530 86 training.batch_size 1.0 +530 87 model.embedding_dim 2.0 +530 87 loss.margin 24.771912115305376 +530 87 loss.adversarial_temperature 0.6772605367891038 +530 87 negative_sampler.num_negs_per_pos 23.0 +530 87 training.batch_size 0.0 +530 88 model.embedding_dim 2.0 +530 88 loss.margin 4.172619372124215 +530 88 loss.adversarial_temperature 0.4427960710888904 +530 88 negative_sampler.num_negs_per_pos 14.0 +530 88 training.batch_size 1.0 +530 89 model.embedding_dim 2.0 +530 89 loss.margin 3.2165006805290606 +530 89 loss.adversarial_temperature 0.336483302618627 +530 89 negative_sampler.num_negs_per_pos 61.0 +530 89 training.batch_size 2.0 +530 90 model.embedding_dim 1.0 +530 90 loss.margin 28.215715887133534 +530 90 loss.adversarial_temperature 0.1944259945061677 +530 90 negative_sampler.num_negs_per_pos 28.0 +530 90 training.batch_size 0.0 +530 91 model.embedding_dim 1.0 +530 91 loss.margin 22.85590711674208 +530 91 loss.adversarial_temperature 0.9980680460436435 +530 91 negative_sampler.num_negs_per_pos 81.0 +530 91 training.batch_size 2.0 +530 92 model.embedding_dim 0.0 +530 92 loss.margin 1.5442037446476609 +530 92 loss.adversarial_temperature 0.8077261996952785 +530 92 negative_sampler.num_negs_per_pos 55.0 +530 92 training.batch_size 1.0 +530 93 model.embedding_dim 2.0 +530 93 loss.margin 13.318195098051628 +530 93 loss.adversarial_temperature 0.1437023065145137 +530 93 negative_sampler.num_negs_per_pos 15.0 +530 93 training.batch_size 2.0 +530 94 model.embedding_dim 1.0 +530 94 loss.margin 8.604398655308827 +530 94 loss.adversarial_temperature 0.837823838638485 +530 94 negative_sampler.num_negs_per_pos 39.0 +530 94 training.batch_size 2.0 +530 95 model.embedding_dim 2.0 +530 95 loss.margin 16.88596622362874 +530 95 loss.adversarial_temperature 0.30105557316364284 +530 95 negative_sampler.num_negs_per_pos 62.0 +530 95 training.batch_size 1.0 +530 96 model.embedding_dim 2.0 +530 96 loss.margin 5.055361730919365 +530 96 loss.adversarial_temperature 0.7047173723942645 +530 96 negative_sampler.num_negs_per_pos 18.0 +530 96 training.batch_size 1.0 +530 97 model.embedding_dim 2.0 +530 97 loss.margin 11.665143829867809 +530 97 loss.adversarial_temperature 0.1735127505180378 +530 97 negative_sampler.num_negs_per_pos 52.0 +530 97 training.batch_size 1.0 +530 98 model.embedding_dim 2.0 +530 98 loss.margin 12.781865965837802 +530 98 loss.adversarial_temperature 0.21664417054242907 +530 98 negative_sampler.num_negs_per_pos 65.0 +530 98 training.batch_size 0.0 +530 99 model.embedding_dim 2.0 +530 99 loss.margin 22.774325195710244 +530 99 loss.adversarial_temperature 0.25401015544841354 +530 99 negative_sampler.num_negs_per_pos 16.0 +530 99 training.batch_size 0.0 +530 100 model.embedding_dim 1.0 +530 100 loss.margin 16.360108499818708 +530 100 loss.adversarial_temperature 0.86237976823443 +530 100 negative_sampler.num_negs_per_pos 96.0 +530 100 training.batch_size 2.0 +530 1 dataset """kinships""" +530 1 model """rescal""" +530 1 loss """nssa""" +530 1 regularizer """no""" +530 1 optimizer """adadelta""" +530 1 training_loop """owa""" +530 1 negative_sampler """basic""" +530 1 evaluator """rankbased""" +530 2 dataset """kinships""" +530 2 model """rescal""" +530 2 loss """nssa""" +530 2 regularizer """no""" +530 2 optimizer """adadelta""" +530 2 training_loop """owa""" +530 2 negative_sampler """basic""" +530 2 evaluator """rankbased""" +530 3 dataset """kinships""" +530 3 model """rescal""" +530 3 loss """nssa""" +530 3 regularizer """no""" +530 3 optimizer """adadelta""" +530 3 training_loop """owa""" +530 3 negative_sampler """basic""" +530 3 evaluator """rankbased""" +530 4 dataset """kinships""" +530 4 model """rescal""" +530 4 loss """nssa""" +530 4 regularizer """no""" +530 4 optimizer """adadelta""" +530 4 training_loop """owa""" +530 4 negative_sampler """basic""" +530 4 evaluator """rankbased""" +530 5 dataset """kinships""" +530 5 model """rescal""" +530 5 loss """nssa""" +530 5 regularizer """no""" +530 5 optimizer """adadelta""" +530 5 training_loop """owa""" +530 5 negative_sampler """basic""" +530 5 evaluator """rankbased""" +530 6 dataset """kinships""" +530 6 model """rescal""" +530 6 loss """nssa""" +530 6 regularizer """no""" +530 6 optimizer """adadelta""" +530 6 training_loop """owa""" +530 6 negative_sampler """basic""" +530 6 evaluator """rankbased""" +530 7 dataset """kinships""" +530 7 model """rescal""" +530 7 loss """nssa""" +530 7 regularizer """no""" +530 7 optimizer """adadelta""" +530 7 training_loop """owa""" +530 7 negative_sampler """basic""" +530 7 evaluator """rankbased""" +530 8 dataset """kinships""" +530 8 model """rescal""" +530 8 loss """nssa""" +530 8 regularizer """no""" +530 8 optimizer """adadelta""" +530 8 training_loop """owa""" +530 8 negative_sampler """basic""" +530 8 evaluator """rankbased""" +530 9 dataset """kinships""" +530 9 model """rescal""" +530 9 loss """nssa""" +530 9 regularizer """no""" +530 9 optimizer """adadelta""" +530 9 training_loop """owa""" +530 9 negative_sampler """basic""" +530 9 evaluator """rankbased""" +530 10 dataset """kinships""" +530 10 model """rescal""" +530 10 loss """nssa""" +530 10 regularizer """no""" +530 10 optimizer """adadelta""" +530 10 training_loop """owa""" +530 10 negative_sampler """basic""" +530 10 evaluator """rankbased""" +530 11 dataset """kinships""" +530 11 model """rescal""" +530 11 loss """nssa""" +530 11 regularizer """no""" +530 11 optimizer """adadelta""" +530 11 training_loop """owa""" +530 11 negative_sampler """basic""" +530 11 evaluator """rankbased""" +530 12 dataset """kinships""" +530 12 model """rescal""" +530 12 loss """nssa""" +530 12 regularizer """no""" +530 12 optimizer """adadelta""" +530 12 training_loop """owa""" +530 12 negative_sampler """basic""" +530 12 evaluator """rankbased""" +530 13 dataset """kinships""" +530 13 model """rescal""" +530 13 loss """nssa""" +530 13 regularizer """no""" +530 13 optimizer """adadelta""" +530 13 training_loop """owa""" +530 13 negative_sampler """basic""" +530 13 evaluator """rankbased""" +530 14 dataset """kinships""" +530 14 model """rescal""" +530 14 loss """nssa""" +530 14 regularizer """no""" +530 14 optimizer """adadelta""" +530 14 training_loop """owa""" +530 14 negative_sampler """basic""" +530 14 evaluator """rankbased""" +530 15 dataset """kinships""" +530 15 model """rescal""" +530 15 loss """nssa""" +530 15 regularizer """no""" +530 15 optimizer """adadelta""" +530 15 training_loop """owa""" +530 15 negative_sampler """basic""" +530 15 evaluator """rankbased""" +530 16 dataset """kinships""" +530 16 model """rescal""" +530 16 loss """nssa""" +530 16 regularizer """no""" +530 16 optimizer """adadelta""" +530 16 training_loop """owa""" +530 16 negative_sampler """basic""" +530 16 evaluator """rankbased""" +530 17 dataset """kinships""" +530 17 model """rescal""" +530 17 loss """nssa""" +530 17 regularizer """no""" +530 17 optimizer """adadelta""" +530 17 training_loop """owa""" +530 17 negative_sampler """basic""" +530 17 evaluator """rankbased""" +530 18 dataset """kinships""" +530 18 model """rescal""" +530 18 loss """nssa""" +530 18 regularizer """no""" +530 18 optimizer """adadelta""" +530 18 training_loop """owa""" +530 18 negative_sampler """basic""" +530 18 evaluator """rankbased""" +530 19 dataset """kinships""" +530 19 model """rescal""" +530 19 loss """nssa""" +530 19 regularizer """no""" +530 19 optimizer """adadelta""" +530 19 training_loop """owa""" +530 19 negative_sampler """basic""" +530 19 evaluator """rankbased""" +530 20 dataset """kinships""" +530 20 model """rescal""" +530 20 loss """nssa""" +530 20 regularizer """no""" +530 20 optimizer """adadelta""" +530 20 training_loop """owa""" +530 20 negative_sampler """basic""" +530 20 evaluator """rankbased""" +530 21 dataset """kinships""" +530 21 model """rescal""" +530 21 loss """nssa""" +530 21 regularizer """no""" +530 21 optimizer """adadelta""" +530 21 training_loop """owa""" +530 21 negative_sampler """basic""" +530 21 evaluator """rankbased""" +530 22 dataset """kinships""" +530 22 model """rescal""" +530 22 loss """nssa""" +530 22 regularizer """no""" +530 22 optimizer """adadelta""" +530 22 training_loop """owa""" +530 22 negative_sampler """basic""" +530 22 evaluator """rankbased""" +530 23 dataset """kinships""" +530 23 model """rescal""" +530 23 loss """nssa""" +530 23 regularizer """no""" +530 23 optimizer """adadelta""" +530 23 training_loop """owa""" +530 23 negative_sampler """basic""" +530 23 evaluator """rankbased""" +530 24 dataset """kinships""" +530 24 model """rescal""" +530 24 loss """nssa""" +530 24 regularizer """no""" +530 24 optimizer """adadelta""" +530 24 training_loop """owa""" +530 24 negative_sampler """basic""" +530 24 evaluator """rankbased""" +530 25 dataset """kinships""" +530 25 model """rescal""" +530 25 loss """nssa""" +530 25 regularizer """no""" +530 25 optimizer """adadelta""" +530 25 training_loop """owa""" +530 25 negative_sampler """basic""" +530 25 evaluator """rankbased""" +530 26 dataset """kinships""" +530 26 model """rescal""" +530 26 loss """nssa""" +530 26 regularizer """no""" +530 26 optimizer """adadelta""" +530 26 training_loop """owa""" +530 26 negative_sampler """basic""" +530 26 evaluator """rankbased""" +530 27 dataset """kinships""" +530 27 model """rescal""" +530 27 loss """nssa""" +530 27 regularizer """no""" +530 27 optimizer """adadelta""" +530 27 training_loop """owa""" +530 27 negative_sampler """basic""" +530 27 evaluator """rankbased""" +530 28 dataset """kinships""" +530 28 model """rescal""" +530 28 loss """nssa""" +530 28 regularizer """no""" +530 28 optimizer """adadelta""" +530 28 training_loop """owa""" +530 28 negative_sampler """basic""" +530 28 evaluator """rankbased""" +530 29 dataset """kinships""" +530 29 model """rescal""" +530 29 loss """nssa""" +530 29 regularizer """no""" +530 29 optimizer """adadelta""" +530 29 training_loop """owa""" +530 29 negative_sampler """basic""" +530 29 evaluator """rankbased""" +530 30 dataset """kinships""" +530 30 model """rescal""" +530 30 loss """nssa""" +530 30 regularizer """no""" +530 30 optimizer """adadelta""" +530 30 training_loop """owa""" +530 30 negative_sampler """basic""" +530 30 evaluator """rankbased""" +530 31 dataset """kinships""" +530 31 model """rescal""" +530 31 loss """nssa""" +530 31 regularizer """no""" +530 31 optimizer """adadelta""" +530 31 training_loop """owa""" +530 31 negative_sampler """basic""" +530 31 evaluator """rankbased""" +530 32 dataset """kinships""" +530 32 model """rescal""" +530 32 loss """nssa""" +530 32 regularizer """no""" +530 32 optimizer """adadelta""" +530 32 training_loop """owa""" +530 32 negative_sampler """basic""" +530 32 evaluator """rankbased""" +530 33 dataset """kinships""" +530 33 model """rescal""" +530 33 loss """nssa""" +530 33 regularizer """no""" +530 33 optimizer """adadelta""" +530 33 training_loop """owa""" +530 33 negative_sampler """basic""" +530 33 evaluator """rankbased""" +530 34 dataset """kinships""" +530 34 model """rescal""" +530 34 loss """nssa""" +530 34 regularizer """no""" +530 34 optimizer """adadelta""" +530 34 training_loop """owa""" +530 34 negative_sampler """basic""" +530 34 evaluator """rankbased""" +530 35 dataset """kinships""" +530 35 model """rescal""" +530 35 loss """nssa""" +530 35 regularizer """no""" +530 35 optimizer """adadelta""" +530 35 training_loop """owa""" +530 35 negative_sampler """basic""" +530 35 evaluator """rankbased""" +530 36 dataset """kinships""" +530 36 model """rescal""" +530 36 loss """nssa""" +530 36 regularizer """no""" +530 36 optimizer """adadelta""" +530 36 training_loop """owa""" +530 36 negative_sampler """basic""" +530 36 evaluator """rankbased""" +530 37 dataset """kinships""" +530 37 model """rescal""" +530 37 loss """nssa""" +530 37 regularizer """no""" +530 37 optimizer """adadelta""" +530 37 training_loop """owa""" +530 37 negative_sampler """basic""" +530 37 evaluator """rankbased""" +530 38 dataset """kinships""" +530 38 model """rescal""" +530 38 loss """nssa""" +530 38 regularizer """no""" +530 38 optimizer """adadelta""" +530 38 training_loop """owa""" +530 38 negative_sampler """basic""" +530 38 evaluator """rankbased""" +530 39 dataset """kinships""" +530 39 model """rescal""" +530 39 loss """nssa""" +530 39 regularizer """no""" +530 39 optimizer """adadelta""" +530 39 training_loop """owa""" +530 39 negative_sampler """basic""" +530 39 evaluator """rankbased""" +530 40 dataset """kinships""" +530 40 model """rescal""" +530 40 loss """nssa""" +530 40 regularizer """no""" +530 40 optimizer """adadelta""" +530 40 training_loop """owa""" +530 40 negative_sampler """basic""" +530 40 evaluator """rankbased""" +530 41 dataset """kinships""" +530 41 model """rescal""" +530 41 loss """nssa""" +530 41 regularizer """no""" +530 41 optimizer """adadelta""" +530 41 training_loop """owa""" +530 41 negative_sampler """basic""" +530 41 evaluator """rankbased""" +530 42 dataset """kinships""" +530 42 model """rescal""" +530 42 loss """nssa""" +530 42 regularizer """no""" +530 42 optimizer """adadelta""" +530 42 training_loop """owa""" +530 42 negative_sampler """basic""" +530 42 evaluator """rankbased""" +530 43 dataset """kinships""" +530 43 model """rescal""" +530 43 loss """nssa""" +530 43 regularizer """no""" +530 43 optimizer """adadelta""" +530 43 training_loop """owa""" +530 43 negative_sampler """basic""" +530 43 evaluator """rankbased""" +530 44 dataset """kinships""" +530 44 model """rescal""" +530 44 loss """nssa""" +530 44 regularizer """no""" +530 44 optimizer """adadelta""" +530 44 training_loop """owa""" +530 44 negative_sampler """basic""" +530 44 evaluator """rankbased""" +530 45 dataset """kinships""" +530 45 model """rescal""" +530 45 loss """nssa""" +530 45 regularizer """no""" +530 45 optimizer """adadelta""" +530 45 training_loop """owa""" +530 45 negative_sampler """basic""" +530 45 evaluator """rankbased""" +530 46 dataset """kinships""" +530 46 model """rescal""" +530 46 loss """nssa""" +530 46 regularizer """no""" +530 46 optimizer """adadelta""" +530 46 training_loop """owa""" +530 46 negative_sampler """basic""" +530 46 evaluator """rankbased""" +530 47 dataset """kinships""" +530 47 model """rescal""" +530 47 loss """nssa""" +530 47 regularizer """no""" +530 47 optimizer """adadelta""" +530 47 training_loop """owa""" +530 47 negative_sampler """basic""" +530 47 evaluator """rankbased""" +530 48 dataset """kinships""" +530 48 model """rescal""" +530 48 loss """nssa""" +530 48 regularizer """no""" +530 48 optimizer """adadelta""" +530 48 training_loop """owa""" +530 48 negative_sampler """basic""" +530 48 evaluator """rankbased""" +530 49 dataset """kinships""" +530 49 model """rescal""" +530 49 loss """nssa""" +530 49 regularizer """no""" +530 49 optimizer """adadelta""" +530 49 training_loop """owa""" +530 49 negative_sampler """basic""" +530 49 evaluator """rankbased""" +530 50 dataset """kinships""" +530 50 model """rescal""" +530 50 loss """nssa""" +530 50 regularizer """no""" +530 50 optimizer """adadelta""" +530 50 training_loop """owa""" +530 50 negative_sampler """basic""" +530 50 evaluator """rankbased""" +530 51 dataset """kinships""" +530 51 model """rescal""" +530 51 loss """nssa""" +530 51 regularizer """no""" +530 51 optimizer """adadelta""" +530 51 training_loop """owa""" +530 51 negative_sampler """basic""" +530 51 evaluator """rankbased""" +530 52 dataset """kinships""" +530 52 model """rescal""" +530 52 loss """nssa""" +530 52 regularizer """no""" +530 52 optimizer """adadelta""" +530 52 training_loop """owa""" +530 52 negative_sampler """basic""" +530 52 evaluator """rankbased""" +530 53 dataset """kinships""" +530 53 model """rescal""" +530 53 loss """nssa""" +530 53 regularizer """no""" +530 53 optimizer """adadelta""" +530 53 training_loop """owa""" +530 53 negative_sampler """basic""" +530 53 evaluator """rankbased""" +530 54 dataset """kinships""" +530 54 model """rescal""" +530 54 loss """nssa""" +530 54 regularizer """no""" +530 54 optimizer """adadelta""" +530 54 training_loop """owa""" +530 54 negative_sampler """basic""" +530 54 evaluator """rankbased""" +530 55 dataset """kinships""" +530 55 model """rescal""" +530 55 loss """nssa""" +530 55 regularizer """no""" +530 55 optimizer """adadelta""" +530 55 training_loop """owa""" +530 55 negative_sampler """basic""" +530 55 evaluator """rankbased""" +530 56 dataset """kinships""" +530 56 model """rescal""" +530 56 loss """nssa""" +530 56 regularizer """no""" +530 56 optimizer """adadelta""" +530 56 training_loop """owa""" +530 56 negative_sampler """basic""" +530 56 evaluator """rankbased""" +530 57 dataset """kinships""" +530 57 model """rescal""" +530 57 loss """nssa""" +530 57 regularizer """no""" +530 57 optimizer """adadelta""" +530 57 training_loop """owa""" +530 57 negative_sampler """basic""" +530 57 evaluator """rankbased""" +530 58 dataset """kinships""" +530 58 model """rescal""" +530 58 loss """nssa""" +530 58 regularizer """no""" +530 58 optimizer """adadelta""" +530 58 training_loop """owa""" +530 58 negative_sampler """basic""" +530 58 evaluator """rankbased""" +530 59 dataset """kinships""" +530 59 model """rescal""" +530 59 loss """nssa""" +530 59 regularizer """no""" +530 59 optimizer """adadelta""" +530 59 training_loop """owa""" +530 59 negative_sampler """basic""" +530 59 evaluator """rankbased""" +530 60 dataset """kinships""" +530 60 model """rescal""" +530 60 loss """nssa""" +530 60 regularizer """no""" +530 60 optimizer """adadelta""" +530 60 training_loop """owa""" +530 60 negative_sampler """basic""" +530 60 evaluator """rankbased""" +530 61 dataset """kinships""" +530 61 model """rescal""" +530 61 loss """nssa""" +530 61 regularizer """no""" +530 61 optimizer """adadelta""" +530 61 training_loop """owa""" +530 61 negative_sampler """basic""" +530 61 evaluator """rankbased""" +530 62 dataset """kinships""" +530 62 model """rescal""" +530 62 loss """nssa""" +530 62 regularizer """no""" +530 62 optimizer """adadelta""" +530 62 training_loop """owa""" +530 62 negative_sampler """basic""" +530 62 evaluator """rankbased""" +530 63 dataset """kinships""" +530 63 model """rescal""" +530 63 loss """nssa""" +530 63 regularizer """no""" +530 63 optimizer """adadelta""" +530 63 training_loop """owa""" +530 63 negative_sampler """basic""" +530 63 evaluator """rankbased""" +530 64 dataset """kinships""" +530 64 model """rescal""" +530 64 loss """nssa""" +530 64 regularizer """no""" +530 64 optimizer """adadelta""" +530 64 training_loop """owa""" +530 64 negative_sampler """basic""" +530 64 evaluator """rankbased""" +530 65 dataset """kinships""" +530 65 model """rescal""" +530 65 loss """nssa""" +530 65 regularizer """no""" +530 65 optimizer """adadelta""" +530 65 training_loop """owa""" +530 65 negative_sampler """basic""" +530 65 evaluator """rankbased""" +530 66 dataset """kinships""" +530 66 model """rescal""" +530 66 loss """nssa""" +530 66 regularizer """no""" +530 66 optimizer """adadelta""" +530 66 training_loop """owa""" +530 66 negative_sampler """basic""" +530 66 evaluator """rankbased""" +530 67 dataset """kinships""" +530 67 model """rescal""" +530 67 loss """nssa""" +530 67 regularizer """no""" +530 67 optimizer """adadelta""" +530 67 training_loop """owa""" +530 67 negative_sampler """basic""" +530 67 evaluator """rankbased""" +530 68 dataset """kinships""" +530 68 model """rescal""" +530 68 loss """nssa""" +530 68 regularizer """no""" +530 68 optimizer """adadelta""" +530 68 training_loop """owa""" +530 68 negative_sampler """basic""" +530 68 evaluator """rankbased""" +530 69 dataset """kinships""" +530 69 model """rescal""" +530 69 loss """nssa""" +530 69 regularizer """no""" +530 69 optimizer """adadelta""" +530 69 training_loop """owa""" +530 69 negative_sampler """basic""" +530 69 evaluator """rankbased""" +530 70 dataset """kinships""" +530 70 model """rescal""" +530 70 loss """nssa""" +530 70 regularizer """no""" +530 70 optimizer """adadelta""" +530 70 training_loop """owa""" +530 70 negative_sampler """basic""" +530 70 evaluator """rankbased""" +530 71 dataset """kinships""" +530 71 model """rescal""" +530 71 loss """nssa""" +530 71 regularizer """no""" +530 71 optimizer """adadelta""" +530 71 training_loop """owa""" +530 71 negative_sampler """basic""" +530 71 evaluator """rankbased""" +530 72 dataset """kinships""" +530 72 model """rescal""" +530 72 loss """nssa""" +530 72 regularizer """no""" +530 72 optimizer """adadelta""" +530 72 training_loop """owa""" +530 72 negative_sampler """basic""" +530 72 evaluator """rankbased""" +530 73 dataset """kinships""" +530 73 model """rescal""" +530 73 loss """nssa""" +530 73 regularizer """no""" +530 73 optimizer """adadelta""" +530 73 training_loop """owa""" +530 73 negative_sampler """basic""" +530 73 evaluator """rankbased""" +530 74 dataset """kinships""" +530 74 model """rescal""" +530 74 loss """nssa""" +530 74 regularizer """no""" +530 74 optimizer """adadelta""" +530 74 training_loop """owa""" +530 74 negative_sampler """basic""" +530 74 evaluator """rankbased""" +530 75 dataset """kinships""" +530 75 model """rescal""" +530 75 loss """nssa""" +530 75 regularizer """no""" +530 75 optimizer """adadelta""" +530 75 training_loop """owa""" +530 75 negative_sampler """basic""" +530 75 evaluator """rankbased""" +530 76 dataset """kinships""" +530 76 model """rescal""" +530 76 loss """nssa""" +530 76 regularizer """no""" +530 76 optimizer """adadelta""" +530 76 training_loop """owa""" +530 76 negative_sampler """basic""" +530 76 evaluator """rankbased""" +530 77 dataset """kinships""" +530 77 model """rescal""" +530 77 loss """nssa""" +530 77 regularizer """no""" +530 77 optimizer """adadelta""" +530 77 training_loop """owa""" +530 77 negative_sampler """basic""" +530 77 evaluator """rankbased""" +530 78 dataset """kinships""" +530 78 model """rescal""" +530 78 loss """nssa""" +530 78 regularizer """no""" +530 78 optimizer """adadelta""" +530 78 training_loop """owa""" +530 78 negative_sampler """basic""" +530 78 evaluator """rankbased""" +530 79 dataset """kinships""" +530 79 model """rescal""" +530 79 loss """nssa""" +530 79 regularizer """no""" +530 79 optimizer """adadelta""" +530 79 training_loop """owa""" +530 79 negative_sampler """basic""" +530 79 evaluator """rankbased""" +530 80 dataset """kinships""" +530 80 model """rescal""" +530 80 loss """nssa""" +530 80 regularizer """no""" +530 80 optimizer """adadelta""" +530 80 training_loop """owa""" +530 80 negative_sampler """basic""" +530 80 evaluator """rankbased""" +530 81 dataset """kinships""" +530 81 model """rescal""" +530 81 loss """nssa""" +530 81 regularizer """no""" +530 81 optimizer """adadelta""" +530 81 training_loop """owa""" +530 81 negative_sampler """basic""" +530 81 evaluator """rankbased""" +530 82 dataset """kinships""" +530 82 model """rescal""" +530 82 loss """nssa""" +530 82 regularizer """no""" +530 82 optimizer """adadelta""" +530 82 training_loop """owa""" +530 82 negative_sampler """basic""" +530 82 evaluator """rankbased""" +530 83 dataset """kinships""" +530 83 model """rescal""" +530 83 loss """nssa""" +530 83 regularizer """no""" +530 83 optimizer """adadelta""" +530 83 training_loop """owa""" +530 83 negative_sampler """basic""" +530 83 evaluator """rankbased""" +530 84 dataset """kinships""" +530 84 model """rescal""" +530 84 loss """nssa""" +530 84 regularizer """no""" +530 84 optimizer """adadelta""" +530 84 training_loop """owa""" +530 84 negative_sampler """basic""" +530 84 evaluator """rankbased""" +530 85 dataset """kinships""" +530 85 model """rescal""" +530 85 loss """nssa""" +530 85 regularizer """no""" +530 85 optimizer """adadelta""" +530 85 training_loop """owa""" +530 85 negative_sampler """basic""" +530 85 evaluator """rankbased""" +530 86 dataset """kinships""" +530 86 model """rescal""" +530 86 loss """nssa""" +530 86 regularizer """no""" +530 86 optimizer """adadelta""" +530 86 training_loop """owa""" +530 86 negative_sampler """basic""" +530 86 evaluator """rankbased""" +530 87 dataset """kinships""" +530 87 model """rescal""" +530 87 loss """nssa""" +530 87 regularizer """no""" +530 87 optimizer """adadelta""" +530 87 training_loop """owa""" +530 87 negative_sampler """basic""" +530 87 evaluator """rankbased""" +530 88 dataset """kinships""" +530 88 model """rescal""" +530 88 loss """nssa""" +530 88 regularizer """no""" +530 88 optimizer """adadelta""" +530 88 training_loop """owa""" +530 88 negative_sampler """basic""" +530 88 evaluator """rankbased""" +530 89 dataset """kinships""" +530 89 model """rescal""" +530 89 loss """nssa""" +530 89 regularizer """no""" +530 89 optimizer """adadelta""" +530 89 training_loop """owa""" +530 89 negative_sampler """basic""" +530 89 evaluator """rankbased""" +530 90 dataset """kinships""" +530 90 model """rescal""" +530 90 loss """nssa""" +530 90 regularizer """no""" +530 90 optimizer """adadelta""" +530 90 training_loop """owa""" +530 90 negative_sampler """basic""" +530 90 evaluator """rankbased""" +530 91 dataset """kinships""" +530 91 model """rescal""" +530 91 loss """nssa""" +530 91 regularizer """no""" +530 91 optimizer """adadelta""" +530 91 training_loop """owa""" +530 91 negative_sampler """basic""" +530 91 evaluator """rankbased""" +530 92 dataset """kinships""" +530 92 model """rescal""" +530 92 loss """nssa""" +530 92 regularizer """no""" +530 92 optimizer """adadelta""" +530 92 training_loop """owa""" +530 92 negative_sampler """basic""" +530 92 evaluator """rankbased""" +530 93 dataset """kinships""" +530 93 model """rescal""" +530 93 loss """nssa""" +530 93 regularizer """no""" +530 93 optimizer """adadelta""" +530 93 training_loop """owa""" +530 93 negative_sampler """basic""" +530 93 evaluator """rankbased""" +530 94 dataset """kinships""" +530 94 model """rescal""" +530 94 loss """nssa""" +530 94 regularizer """no""" +530 94 optimizer """adadelta""" +530 94 training_loop """owa""" +530 94 negative_sampler """basic""" +530 94 evaluator """rankbased""" +530 95 dataset """kinships""" +530 95 model """rescal""" +530 95 loss """nssa""" +530 95 regularizer """no""" +530 95 optimizer """adadelta""" +530 95 training_loop """owa""" +530 95 negative_sampler """basic""" +530 95 evaluator """rankbased""" +530 96 dataset """kinships""" +530 96 model """rescal""" +530 96 loss """nssa""" +530 96 regularizer """no""" +530 96 optimizer """adadelta""" +530 96 training_loop """owa""" +530 96 negative_sampler """basic""" +530 96 evaluator """rankbased""" +530 97 dataset """kinships""" +530 97 model """rescal""" +530 97 loss """nssa""" +530 97 regularizer """no""" +530 97 optimizer """adadelta""" +530 97 training_loop """owa""" +530 97 negative_sampler """basic""" +530 97 evaluator """rankbased""" +530 98 dataset """kinships""" +530 98 model """rescal""" +530 98 loss """nssa""" +530 98 regularizer """no""" +530 98 optimizer """adadelta""" +530 98 training_loop """owa""" +530 98 negative_sampler """basic""" +530 98 evaluator """rankbased""" +530 99 dataset """kinships""" +530 99 model """rescal""" +530 99 loss """nssa""" +530 99 regularizer """no""" +530 99 optimizer """adadelta""" +530 99 training_loop """owa""" +530 99 negative_sampler """basic""" +530 99 evaluator """rankbased""" +530 100 dataset """kinships""" +530 100 model """rescal""" +530 100 loss """nssa""" +530 100 regularizer """no""" +530 100 optimizer """adadelta""" +530 100 training_loop """owa""" +530 100 negative_sampler """basic""" +530 100 evaluator """rankbased""" +531 1 model.embedding_dim 2.0 +531 1 training.batch_size 2.0 +531 1 training.label_smoothing 0.011244557362068449 +531 2 model.embedding_dim 1.0 +531 2 training.batch_size 0.0 +531 2 training.label_smoothing 0.006577333758550003 +531 3 model.embedding_dim 0.0 +531 3 training.batch_size 1.0 +531 3 training.label_smoothing 0.01081763771833341 +531 4 model.embedding_dim 0.0 +531 4 training.batch_size 2.0 +531 4 training.label_smoothing 0.19300265574373404 +531 5 model.embedding_dim 1.0 +531 5 training.batch_size 1.0 +531 5 training.label_smoothing 0.5072375003327915 +531 6 model.embedding_dim 1.0 +531 6 training.batch_size 0.0 +531 6 training.label_smoothing 0.024978126677734767 +531 7 model.embedding_dim 2.0 +531 7 training.batch_size 1.0 +531 7 training.label_smoothing 0.024998406238303822 +531 8 model.embedding_dim 1.0 +531 8 training.batch_size 1.0 +531 8 training.label_smoothing 0.7474878942380205 +531 9 model.embedding_dim 2.0 +531 9 training.batch_size 1.0 +531 9 training.label_smoothing 0.0011580258988941874 +531 10 model.embedding_dim 1.0 +531 10 training.batch_size 0.0 +531 10 training.label_smoothing 0.00825875021722668 +531 11 model.embedding_dim 0.0 +531 11 training.batch_size 1.0 +531 11 training.label_smoothing 0.07857553470106231 +531 12 model.embedding_dim 2.0 +531 12 training.batch_size 1.0 +531 12 training.label_smoothing 0.045259890637387654 +531 13 model.embedding_dim 2.0 +531 13 training.batch_size 1.0 +531 13 training.label_smoothing 0.6571171330837158 +531 14 model.embedding_dim 2.0 +531 14 training.batch_size 0.0 +531 14 training.label_smoothing 0.3537537593179906 +531 15 model.embedding_dim 2.0 +531 15 training.batch_size 0.0 +531 15 training.label_smoothing 0.014946297171215369 +531 16 model.embedding_dim 0.0 +531 16 training.batch_size 0.0 +531 16 training.label_smoothing 0.02008662166771631 +531 17 model.embedding_dim 1.0 +531 17 training.batch_size 2.0 +531 17 training.label_smoothing 0.010241145495916568 +531 18 model.embedding_dim 2.0 +531 18 training.batch_size 0.0 +531 18 training.label_smoothing 0.007793619048958243 +531 19 model.embedding_dim 1.0 +531 19 training.batch_size 1.0 +531 19 training.label_smoothing 0.04092044409213881 +531 20 model.embedding_dim 1.0 +531 20 training.batch_size 2.0 +531 20 training.label_smoothing 0.0287121500341507 +531 21 model.embedding_dim 2.0 +531 21 training.batch_size 1.0 +531 21 training.label_smoothing 0.0036269389579356485 +531 22 model.embedding_dim 0.0 +531 22 training.batch_size 2.0 +531 22 training.label_smoothing 0.7711267139780904 +531 23 model.embedding_dim 2.0 +531 23 training.batch_size 0.0 +531 23 training.label_smoothing 0.1481675233241484 +531 24 model.embedding_dim 2.0 +531 24 training.batch_size 1.0 +531 24 training.label_smoothing 0.8205092384956907 +531 25 model.embedding_dim 2.0 +531 25 training.batch_size 0.0 +531 25 training.label_smoothing 0.0018030846169369312 +531 26 model.embedding_dim 0.0 +531 26 training.batch_size 2.0 +531 26 training.label_smoothing 0.0048582710694069125 +531 27 model.embedding_dim 2.0 +531 27 training.batch_size 1.0 +531 27 training.label_smoothing 0.009862637366025957 +531 28 model.embedding_dim 0.0 +531 28 training.batch_size 1.0 +531 28 training.label_smoothing 0.017453190285017586 +531 29 model.embedding_dim 2.0 +531 29 training.batch_size 2.0 +531 29 training.label_smoothing 0.0650453034793245 +531 30 model.embedding_dim 1.0 +531 30 training.batch_size 1.0 +531 30 training.label_smoothing 0.006796646472328912 +531 31 model.embedding_dim 2.0 +531 31 training.batch_size 2.0 +531 31 training.label_smoothing 0.002427450862129064 +531 32 model.embedding_dim 2.0 +531 32 training.batch_size 2.0 +531 32 training.label_smoothing 0.06887237440617804 +531 33 model.embedding_dim 1.0 +531 33 training.batch_size 0.0 +531 33 training.label_smoothing 0.00771882812078353 +531 34 model.embedding_dim 1.0 +531 34 training.batch_size 2.0 +531 34 training.label_smoothing 0.6622704763674595 +531 35 model.embedding_dim 0.0 +531 35 training.batch_size 2.0 +531 35 training.label_smoothing 0.019606915632252964 +531 36 model.embedding_dim 1.0 +531 36 training.batch_size 2.0 +531 36 training.label_smoothing 0.06599784060890103 +531 37 model.embedding_dim 2.0 +531 37 training.batch_size 1.0 +531 37 training.label_smoothing 0.8189909136612104 +531 38 model.embedding_dim 2.0 +531 38 training.batch_size 1.0 +531 38 training.label_smoothing 0.009693796813423774 +531 39 model.embedding_dim 0.0 +531 39 training.batch_size 1.0 +531 39 training.label_smoothing 0.0012168941137444514 +531 40 model.embedding_dim 2.0 +531 40 training.batch_size 2.0 +531 40 training.label_smoothing 0.021073895704010852 +531 41 model.embedding_dim 0.0 +531 41 training.batch_size 1.0 +531 41 training.label_smoothing 0.28751560972609363 +531 42 model.embedding_dim 0.0 +531 42 training.batch_size 1.0 +531 42 training.label_smoothing 0.005663048191283871 +531 43 model.embedding_dim 2.0 +531 43 training.batch_size 0.0 +531 43 training.label_smoothing 0.008748369586918365 +531 44 model.embedding_dim 2.0 +531 44 training.batch_size 1.0 +531 44 training.label_smoothing 0.2972793990158651 +531 45 model.embedding_dim 0.0 +531 45 training.batch_size 2.0 +531 45 training.label_smoothing 0.1190212783356228 +531 46 model.embedding_dim 0.0 +531 46 training.batch_size 2.0 +531 46 training.label_smoothing 0.004039033888895785 +531 47 model.embedding_dim 2.0 +531 47 training.batch_size 1.0 +531 47 training.label_smoothing 0.008700981653617568 +531 48 model.embedding_dim 1.0 +531 48 training.batch_size 1.0 +531 48 training.label_smoothing 0.5907142644148623 +531 49 model.embedding_dim 2.0 +531 49 training.batch_size 1.0 +531 49 training.label_smoothing 0.3187823850356051 +531 50 model.embedding_dim 2.0 +531 50 training.batch_size 1.0 +531 50 training.label_smoothing 0.0050922840280447695 +531 51 model.embedding_dim 1.0 +531 51 training.batch_size 0.0 +531 51 training.label_smoothing 0.06693881189975437 +531 52 model.embedding_dim 2.0 +531 52 training.batch_size 0.0 +531 52 training.label_smoothing 0.033425884788907664 +531 53 model.embedding_dim 0.0 +531 53 training.batch_size 2.0 +531 53 training.label_smoothing 0.02238771834254262 +531 54 model.embedding_dim 1.0 +531 54 training.batch_size 0.0 +531 54 training.label_smoothing 0.03511704606659178 +531 55 model.embedding_dim 1.0 +531 55 training.batch_size 2.0 +531 55 training.label_smoothing 0.09611376105749704 +531 56 model.embedding_dim 0.0 +531 56 training.batch_size 1.0 +531 56 training.label_smoothing 0.024402496653151255 +531 57 model.embedding_dim 2.0 +531 57 training.batch_size 0.0 +531 57 training.label_smoothing 0.2613799007202012 +531 58 model.embedding_dim 1.0 +531 58 training.batch_size 1.0 +531 58 training.label_smoothing 0.19533345948241235 +531 59 model.embedding_dim 1.0 +531 59 training.batch_size 1.0 +531 59 training.label_smoothing 0.0023644343427370144 +531 60 model.embedding_dim 1.0 +531 60 training.batch_size 0.0 +531 60 training.label_smoothing 0.004608664189422255 +531 61 model.embedding_dim 1.0 +531 61 training.batch_size 1.0 +531 61 training.label_smoothing 0.2223464837289241 +531 62 model.embedding_dim 2.0 +531 62 training.batch_size 2.0 +531 62 training.label_smoothing 0.4311871569043313 +531 63 model.embedding_dim 2.0 +531 63 training.batch_size 0.0 +531 63 training.label_smoothing 0.04875303715840863 +531 64 model.embedding_dim 1.0 +531 64 training.batch_size 0.0 +531 64 training.label_smoothing 0.04309152127703057 +531 65 model.embedding_dim 1.0 +531 65 training.batch_size 0.0 +531 65 training.label_smoothing 0.01498893945618264 +531 66 model.embedding_dim 2.0 +531 66 training.batch_size 0.0 +531 66 training.label_smoothing 0.011681093879937262 +531 67 model.embedding_dim 0.0 +531 67 training.batch_size 0.0 +531 67 training.label_smoothing 0.004518825548817715 +531 68 model.embedding_dim 1.0 +531 68 training.batch_size 1.0 +531 68 training.label_smoothing 0.008710030238613017 +531 69 model.embedding_dim 1.0 +531 69 training.batch_size 2.0 +531 69 training.label_smoothing 0.36795967298931975 +531 70 model.embedding_dim 2.0 +531 70 training.batch_size 1.0 +531 70 training.label_smoothing 0.13724190768011696 +531 71 model.embedding_dim 1.0 +531 71 training.batch_size 1.0 +531 71 training.label_smoothing 0.002474826801421625 +531 72 model.embedding_dim 1.0 +531 72 training.batch_size 1.0 +531 72 training.label_smoothing 0.00463531805130263 +531 73 model.embedding_dim 0.0 +531 73 training.batch_size 2.0 +531 73 training.label_smoothing 0.06811069761419321 +531 74 model.embedding_dim 0.0 +531 74 training.batch_size 0.0 +531 74 training.label_smoothing 0.0038703957448784077 +531 75 model.embedding_dim 2.0 +531 75 training.batch_size 1.0 +531 75 training.label_smoothing 0.0018861375071964391 +531 76 model.embedding_dim 0.0 +531 76 training.batch_size 0.0 +531 76 training.label_smoothing 0.28578657424307335 +531 77 model.embedding_dim 1.0 +531 77 training.batch_size 0.0 +531 77 training.label_smoothing 0.006159303465261356 +531 78 model.embedding_dim 2.0 +531 78 training.batch_size 2.0 +531 78 training.label_smoothing 0.002744043730149866 +531 79 model.embedding_dim 2.0 +531 79 training.batch_size 1.0 +531 79 training.label_smoothing 0.38108130023630726 +531 80 model.embedding_dim 2.0 +531 80 training.batch_size 0.0 +531 80 training.label_smoothing 0.011967605463185522 +531 81 model.embedding_dim 0.0 +531 81 training.batch_size 2.0 +531 81 training.label_smoothing 0.06325517693300219 +531 82 model.embedding_dim 2.0 +531 82 training.batch_size 0.0 +531 82 training.label_smoothing 0.005964663339801901 +531 83 model.embedding_dim 0.0 +531 83 training.batch_size 1.0 +531 83 training.label_smoothing 0.0011118390723125011 +531 84 model.embedding_dim 2.0 +531 84 training.batch_size 2.0 +531 84 training.label_smoothing 0.1452871685879696 +531 85 model.embedding_dim 1.0 +531 85 training.batch_size 2.0 +531 85 training.label_smoothing 0.00406126730327771 +531 86 model.embedding_dim 2.0 +531 86 training.batch_size 2.0 +531 86 training.label_smoothing 0.0016328061664114338 +531 87 model.embedding_dim 1.0 +531 87 training.batch_size 0.0 +531 87 training.label_smoothing 0.02980560334789444 +531 88 model.embedding_dim 1.0 +531 88 training.batch_size 2.0 +531 88 training.label_smoothing 0.0017414125058776782 +531 89 model.embedding_dim 1.0 +531 89 training.batch_size 1.0 +531 89 training.label_smoothing 0.005091588889422208 +531 90 model.embedding_dim 2.0 +531 90 training.batch_size 0.0 +531 90 training.label_smoothing 0.2030917909221146 +531 91 model.embedding_dim 1.0 +531 91 training.batch_size 1.0 +531 91 training.label_smoothing 0.18598377843038172 +531 92 model.embedding_dim 1.0 +531 92 training.batch_size 0.0 +531 92 training.label_smoothing 0.013928892861629185 +531 93 model.embedding_dim 2.0 +531 93 training.batch_size 1.0 +531 93 training.label_smoothing 0.002902035542759902 +531 94 model.embedding_dim 2.0 +531 94 training.batch_size 1.0 +531 94 training.label_smoothing 0.0058081459328756775 +531 95 model.embedding_dim 0.0 +531 95 training.batch_size 0.0 +531 95 training.label_smoothing 0.01561439403501381 +531 96 model.embedding_dim 1.0 +531 96 training.batch_size 0.0 +531 96 training.label_smoothing 0.0030367492657545005 +531 97 model.embedding_dim 2.0 +531 97 training.batch_size 2.0 +531 97 training.label_smoothing 0.25074222536625684 +531 98 model.embedding_dim 2.0 +531 98 training.batch_size 1.0 +531 98 training.label_smoothing 0.0014139855414441096 +531 99 model.embedding_dim 2.0 +531 99 training.batch_size 0.0 +531 99 training.label_smoothing 0.012862623953253952 +531 100 model.embedding_dim 1.0 +531 100 training.batch_size 0.0 +531 100 training.label_smoothing 0.040188486043635736 +531 1 dataset """kinships""" +531 1 model """rescal""" +531 1 loss """crossentropy""" +531 1 regularizer """no""" +531 1 optimizer """adadelta""" +531 1 training_loop """lcwa""" +531 1 evaluator """rankbased""" +531 2 dataset """kinships""" +531 2 model """rescal""" +531 2 loss """crossentropy""" +531 2 regularizer """no""" +531 2 optimizer """adadelta""" +531 2 training_loop """lcwa""" +531 2 evaluator """rankbased""" +531 3 dataset """kinships""" +531 3 model """rescal""" +531 3 loss """crossentropy""" +531 3 regularizer """no""" +531 3 optimizer """adadelta""" +531 3 training_loop """lcwa""" +531 3 evaluator """rankbased""" +531 4 dataset """kinships""" +531 4 model """rescal""" +531 4 loss """crossentropy""" +531 4 regularizer """no""" +531 4 optimizer """adadelta""" +531 4 training_loop """lcwa""" +531 4 evaluator """rankbased""" +531 5 dataset """kinships""" +531 5 model """rescal""" +531 5 loss """crossentropy""" +531 5 regularizer """no""" +531 5 optimizer """adadelta""" +531 5 training_loop """lcwa""" +531 5 evaluator """rankbased""" +531 6 dataset """kinships""" +531 6 model """rescal""" +531 6 loss """crossentropy""" +531 6 regularizer """no""" +531 6 optimizer """adadelta""" +531 6 training_loop """lcwa""" +531 6 evaluator """rankbased""" +531 7 dataset """kinships""" +531 7 model """rescal""" +531 7 loss """crossentropy""" +531 7 regularizer """no""" +531 7 optimizer """adadelta""" +531 7 training_loop """lcwa""" +531 7 evaluator """rankbased""" +531 8 dataset """kinships""" +531 8 model """rescal""" +531 8 loss """crossentropy""" +531 8 regularizer """no""" +531 8 optimizer """adadelta""" +531 8 training_loop """lcwa""" +531 8 evaluator """rankbased""" +531 9 dataset """kinships""" +531 9 model """rescal""" +531 9 loss """crossentropy""" +531 9 regularizer """no""" +531 9 optimizer """adadelta""" +531 9 training_loop """lcwa""" +531 9 evaluator """rankbased""" +531 10 dataset """kinships""" +531 10 model """rescal""" +531 10 loss """crossentropy""" +531 10 regularizer """no""" +531 10 optimizer """adadelta""" +531 10 training_loop """lcwa""" +531 10 evaluator """rankbased""" +531 11 dataset """kinships""" +531 11 model """rescal""" +531 11 loss """crossentropy""" +531 11 regularizer """no""" +531 11 optimizer """adadelta""" +531 11 training_loop """lcwa""" +531 11 evaluator """rankbased""" +531 12 dataset """kinships""" +531 12 model """rescal""" +531 12 loss """crossentropy""" +531 12 regularizer """no""" +531 12 optimizer """adadelta""" +531 12 training_loop """lcwa""" +531 12 evaluator """rankbased""" +531 13 dataset """kinships""" +531 13 model """rescal""" +531 13 loss """crossentropy""" +531 13 regularizer """no""" +531 13 optimizer """adadelta""" +531 13 training_loop """lcwa""" +531 13 evaluator """rankbased""" +531 14 dataset """kinships""" +531 14 model """rescal""" +531 14 loss """crossentropy""" +531 14 regularizer """no""" +531 14 optimizer """adadelta""" +531 14 training_loop """lcwa""" +531 14 evaluator """rankbased""" +531 15 dataset """kinships""" +531 15 model """rescal""" +531 15 loss """crossentropy""" +531 15 regularizer """no""" +531 15 optimizer """adadelta""" +531 15 training_loop """lcwa""" +531 15 evaluator """rankbased""" +531 16 dataset """kinships""" +531 16 model """rescal""" +531 16 loss """crossentropy""" +531 16 regularizer """no""" +531 16 optimizer """adadelta""" +531 16 training_loop """lcwa""" +531 16 evaluator """rankbased""" +531 17 dataset """kinships""" +531 17 model """rescal""" +531 17 loss """crossentropy""" +531 17 regularizer """no""" +531 17 optimizer """adadelta""" +531 17 training_loop """lcwa""" +531 17 evaluator """rankbased""" +531 18 dataset """kinships""" +531 18 model """rescal""" +531 18 loss """crossentropy""" +531 18 regularizer """no""" +531 18 optimizer """adadelta""" +531 18 training_loop """lcwa""" +531 18 evaluator """rankbased""" +531 19 dataset """kinships""" +531 19 model """rescal""" +531 19 loss """crossentropy""" +531 19 regularizer """no""" +531 19 optimizer """adadelta""" +531 19 training_loop """lcwa""" +531 19 evaluator """rankbased""" +531 20 dataset """kinships""" +531 20 model """rescal""" +531 20 loss """crossentropy""" +531 20 regularizer """no""" +531 20 optimizer """adadelta""" +531 20 training_loop """lcwa""" +531 20 evaluator """rankbased""" +531 21 dataset """kinships""" +531 21 model """rescal""" +531 21 loss """crossentropy""" +531 21 regularizer """no""" +531 21 optimizer """adadelta""" +531 21 training_loop """lcwa""" +531 21 evaluator """rankbased""" +531 22 dataset """kinships""" +531 22 model """rescal""" +531 22 loss """crossentropy""" +531 22 regularizer """no""" +531 22 optimizer """adadelta""" +531 22 training_loop """lcwa""" +531 22 evaluator """rankbased""" +531 23 dataset """kinships""" +531 23 model """rescal""" +531 23 loss """crossentropy""" +531 23 regularizer """no""" +531 23 optimizer """adadelta""" +531 23 training_loop """lcwa""" +531 23 evaluator """rankbased""" +531 24 dataset """kinships""" +531 24 model """rescal""" +531 24 loss """crossentropy""" +531 24 regularizer """no""" +531 24 optimizer """adadelta""" +531 24 training_loop """lcwa""" +531 24 evaluator """rankbased""" +531 25 dataset """kinships""" +531 25 model """rescal""" +531 25 loss """crossentropy""" +531 25 regularizer """no""" +531 25 optimizer """adadelta""" +531 25 training_loop """lcwa""" +531 25 evaluator """rankbased""" +531 26 dataset """kinships""" +531 26 model """rescal""" +531 26 loss """crossentropy""" +531 26 regularizer """no""" +531 26 optimizer """adadelta""" +531 26 training_loop """lcwa""" +531 26 evaluator """rankbased""" +531 27 dataset """kinships""" +531 27 model """rescal""" +531 27 loss """crossentropy""" +531 27 regularizer """no""" +531 27 optimizer """adadelta""" +531 27 training_loop """lcwa""" +531 27 evaluator """rankbased""" +531 28 dataset """kinships""" +531 28 model """rescal""" +531 28 loss """crossentropy""" +531 28 regularizer """no""" +531 28 optimizer """adadelta""" +531 28 training_loop """lcwa""" +531 28 evaluator """rankbased""" +531 29 dataset """kinships""" +531 29 model """rescal""" +531 29 loss """crossentropy""" +531 29 regularizer """no""" +531 29 optimizer """adadelta""" +531 29 training_loop """lcwa""" +531 29 evaluator """rankbased""" +531 30 dataset """kinships""" +531 30 model """rescal""" +531 30 loss """crossentropy""" +531 30 regularizer """no""" +531 30 optimizer """adadelta""" +531 30 training_loop """lcwa""" +531 30 evaluator """rankbased""" +531 31 dataset """kinships""" +531 31 model """rescal""" +531 31 loss """crossentropy""" +531 31 regularizer """no""" +531 31 optimizer """adadelta""" +531 31 training_loop """lcwa""" +531 31 evaluator """rankbased""" +531 32 dataset """kinships""" +531 32 model """rescal""" +531 32 loss """crossentropy""" +531 32 regularizer """no""" +531 32 optimizer """adadelta""" +531 32 training_loop """lcwa""" +531 32 evaluator """rankbased""" +531 33 dataset """kinships""" +531 33 model """rescal""" +531 33 loss """crossentropy""" +531 33 regularizer """no""" +531 33 optimizer """adadelta""" +531 33 training_loop """lcwa""" +531 33 evaluator """rankbased""" +531 34 dataset """kinships""" +531 34 model """rescal""" +531 34 loss """crossentropy""" +531 34 regularizer """no""" +531 34 optimizer """adadelta""" +531 34 training_loop """lcwa""" +531 34 evaluator """rankbased""" +531 35 dataset """kinships""" +531 35 model """rescal""" +531 35 loss """crossentropy""" +531 35 regularizer """no""" +531 35 optimizer """adadelta""" +531 35 training_loop """lcwa""" +531 35 evaluator """rankbased""" +531 36 dataset """kinships""" +531 36 model """rescal""" +531 36 loss """crossentropy""" +531 36 regularizer """no""" +531 36 optimizer """adadelta""" +531 36 training_loop """lcwa""" +531 36 evaluator """rankbased""" +531 37 dataset """kinships""" +531 37 model """rescal""" +531 37 loss """crossentropy""" +531 37 regularizer """no""" +531 37 optimizer """adadelta""" +531 37 training_loop """lcwa""" +531 37 evaluator """rankbased""" +531 38 dataset """kinships""" +531 38 model """rescal""" +531 38 loss """crossentropy""" +531 38 regularizer """no""" +531 38 optimizer """adadelta""" +531 38 training_loop """lcwa""" +531 38 evaluator """rankbased""" +531 39 dataset """kinships""" +531 39 model """rescal""" +531 39 loss """crossentropy""" +531 39 regularizer """no""" +531 39 optimizer """adadelta""" +531 39 training_loop """lcwa""" +531 39 evaluator """rankbased""" +531 40 dataset """kinships""" +531 40 model """rescal""" +531 40 loss """crossentropy""" +531 40 regularizer """no""" +531 40 optimizer """adadelta""" +531 40 training_loop """lcwa""" +531 40 evaluator """rankbased""" +531 41 dataset """kinships""" +531 41 model """rescal""" +531 41 loss """crossentropy""" +531 41 regularizer """no""" +531 41 optimizer """adadelta""" +531 41 training_loop """lcwa""" +531 41 evaluator """rankbased""" +531 42 dataset """kinships""" +531 42 model """rescal""" +531 42 loss """crossentropy""" +531 42 regularizer """no""" +531 42 optimizer """adadelta""" +531 42 training_loop """lcwa""" +531 42 evaluator """rankbased""" +531 43 dataset """kinships""" +531 43 model """rescal""" +531 43 loss """crossentropy""" +531 43 regularizer """no""" +531 43 optimizer """adadelta""" +531 43 training_loop """lcwa""" +531 43 evaluator """rankbased""" +531 44 dataset """kinships""" +531 44 model """rescal""" +531 44 loss """crossentropy""" +531 44 regularizer """no""" +531 44 optimizer """adadelta""" +531 44 training_loop """lcwa""" +531 44 evaluator """rankbased""" +531 45 dataset """kinships""" +531 45 model """rescal""" +531 45 loss """crossentropy""" +531 45 regularizer """no""" +531 45 optimizer """adadelta""" +531 45 training_loop """lcwa""" +531 45 evaluator """rankbased""" +531 46 dataset """kinships""" +531 46 model """rescal""" +531 46 loss """crossentropy""" +531 46 regularizer """no""" +531 46 optimizer """adadelta""" +531 46 training_loop """lcwa""" +531 46 evaluator """rankbased""" +531 47 dataset """kinships""" +531 47 model """rescal""" +531 47 loss """crossentropy""" +531 47 regularizer """no""" +531 47 optimizer """adadelta""" +531 47 training_loop """lcwa""" +531 47 evaluator """rankbased""" +531 48 dataset """kinships""" +531 48 model """rescal""" +531 48 loss """crossentropy""" +531 48 regularizer """no""" +531 48 optimizer """adadelta""" +531 48 training_loop """lcwa""" +531 48 evaluator """rankbased""" +531 49 dataset """kinships""" +531 49 model """rescal""" +531 49 loss """crossentropy""" +531 49 regularizer """no""" +531 49 optimizer """adadelta""" +531 49 training_loop """lcwa""" +531 49 evaluator """rankbased""" +531 50 dataset """kinships""" +531 50 model """rescal""" +531 50 loss """crossentropy""" +531 50 regularizer """no""" +531 50 optimizer """adadelta""" +531 50 training_loop """lcwa""" +531 50 evaluator """rankbased""" +531 51 dataset """kinships""" +531 51 model """rescal""" +531 51 loss """crossentropy""" +531 51 regularizer """no""" +531 51 optimizer """adadelta""" +531 51 training_loop """lcwa""" +531 51 evaluator """rankbased""" +531 52 dataset """kinships""" +531 52 model """rescal""" +531 52 loss """crossentropy""" +531 52 regularizer """no""" +531 52 optimizer """adadelta""" +531 52 training_loop """lcwa""" +531 52 evaluator """rankbased""" +531 53 dataset """kinships""" +531 53 model """rescal""" +531 53 loss """crossentropy""" +531 53 regularizer """no""" +531 53 optimizer """adadelta""" +531 53 training_loop """lcwa""" +531 53 evaluator """rankbased""" +531 54 dataset """kinships""" +531 54 model """rescal""" +531 54 loss """crossentropy""" +531 54 regularizer """no""" +531 54 optimizer """adadelta""" +531 54 training_loop """lcwa""" +531 54 evaluator """rankbased""" +531 55 dataset """kinships""" +531 55 model """rescal""" +531 55 loss """crossentropy""" +531 55 regularizer """no""" +531 55 optimizer """adadelta""" +531 55 training_loop """lcwa""" +531 55 evaluator """rankbased""" +531 56 dataset """kinships""" +531 56 model """rescal""" +531 56 loss """crossentropy""" +531 56 regularizer """no""" +531 56 optimizer """adadelta""" +531 56 training_loop """lcwa""" +531 56 evaluator """rankbased""" +531 57 dataset """kinships""" +531 57 model """rescal""" +531 57 loss """crossentropy""" +531 57 regularizer """no""" +531 57 optimizer """adadelta""" +531 57 training_loop """lcwa""" +531 57 evaluator """rankbased""" +531 58 dataset """kinships""" +531 58 model """rescal""" +531 58 loss """crossentropy""" +531 58 regularizer """no""" +531 58 optimizer """adadelta""" +531 58 training_loop """lcwa""" +531 58 evaluator """rankbased""" +531 59 dataset """kinships""" +531 59 model """rescal""" +531 59 loss """crossentropy""" +531 59 regularizer """no""" +531 59 optimizer """adadelta""" +531 59 training_loop """lcwa""" +531 59 evaluator """rankbased""" +531 60 dataset """kinships""" +531 60 model """rescal""" +531 60 loss """crossentropy""" +531 60 regularizer """no""" +531 60 optimizer """adadelta""" +531 60 training_loop """lcwa""" +531 60 evaluator """rankbased""" +531 61 dataset """kinships""" +531 61 model """rescal""" +531 61 loss """crossentropy""" +531 61 regularizer """no""" +531 61 optimizer """adadelta""" +531 61 training_loop """lcwa""" +531 61 evaluator """rankbased""" +531 62 dataset """kinships""" +531 62 model """rescal""" +531 62 loss """crossentropy""" +531 62 regularizer """no""" +531 62 optimizer """adadelta""" +531 62 training_loop """lcwa""" +531 62 evaluator """rankbased""" +531 63 dataset """kinships""" +531 63 model """rescal""" +531 63 loss """crossentropy""" +531 63 regularizer """no""" +531 63 optimizer """adadelta""" +531 63 training_loop """lcwa""" +531 63 evaluator """rankbased""" +531 64 dataset """kinships""" +531 64 model """rescal""" +531 64 loss """crossentropy""" +531 64 regularizer """no""" +531 64 optimizer """adadelta""" +531 64 training_loop """lcwa""" +531 64 evaluator """rankbased""" +531 65 dataset """kinships""" +531 65 model """rescal""" +531 65 loss """crossentropy""" +531 65 regularizer """no""" +531 65 optimizer """adadelta""" +531 65 training_loop """lcwa""" +531 65 evaluator """rankbased""" +531 66 dataset """kinships""" +531 66 model """rescal""" +531 66 loss """crossentropy""" +531 66 regularizer """no""" +531 66 optimizer """adadelta""" +531 66 training_loop """lcwa""" +531 66 evaluator """rankbased""" +531 67 dataset """kinships""" +531 67 model """rescal""" +531 67 loss """crossentropy""" +531 67 regularizer """no""" +531 67 optimizer """adadelta""" +531 67 training_loop """lcwa""" +531 67 evaluator """rankbased""" +531 68 dataset """kinships""" +531 68 model """rescal""" +531 68 loss """crossentropy""" +531 68 regularizer """no""" +531 68 optimizer """adadelta""" +531 68 training_loop """lcwa""" +531 68 evaluator """rankbased""" +531 69 dataset """kinships""" +531 69 model """rescal""" +531 69 loss """crossentropy""" +531 69 regularizer """no""" +531 69 optimizer """adadelta""" +531 69 training_loop """lcwa""" +531 69 evaluator """rankbased""" +531 70 dataset """kinships""" +531 70 model """rescal""" +531 70 loss """crossentropy""" +531 70 regularizer """no""" +531 70 optimizer """adadelta""" +531 70 training_loop """lcwa""" +531 70 evaluator """rankbased""" +531 71 dataset """kinships""" +531 71 model """rescal""" +531 71 loss """crossentropy""" +531 71 regularizer """no""" +531 71 optimizer """adadelta""" +531 71 training_loop """lcwa""" +531 71 evaluator """rankbased""" +531 72 dataset """kinships""" +531 72 model """rescal""" +531 72 loss """crossentropy""" +531 72 regularizer """no""" +531 72 optimizer """adadelta""" +531 72 training_loop """lcwa""" +531 72 evaluator """rankbased""" +531 73 dataset """kinships""" +531 73 model """rescal""" +531 73 loss """crossentropy""" +531 73 regularizer """no""" +531 73 optimizer """adadelta""" +531 73 training_loop """lcwa""" +531 73 evaluator """rankbased""" +531 74 dataset """kinships""" +531 74 model """rescal""" +531 74 loss """crossentropy""" +531 74 regularizer """no""" +531 74 optimizer """adadelta""" +531 74 training_loop """lcwa""" +531 74 evaluator """rankbased""" +531 75 dataset """kinships""" +531 75 model """rescal""" +531 75 loss """crossentropy""" +531 75 regularizer """no""" +531 75 optimizer """adadelta""" +531 75 training_loop """lcwa""" +531 75 evaluator """rankbased""" +531 76 dataset """kinships""" +531 76 model """rescal""" +531 76 loss """crossentropy""" +531 76 regularizer """no""" +531 76 optimizer """adadelta""" +531 76 training_loop """lcwa""" +531 76 evaluator """rankbased""" +531 77 dataset """kinships""" +531 77 model """rescal""" +531 77 loss """crossentropy""" +531 77 regularizer """no""" +531 77 optimizer """adadelta""" +531 77 training_loop """lcwa""" +531 77 evaluator """rankbased""" +531 78 dataset """kinships""" +531 78 model """rescal""" +531 78 loss """crossentropy""" +531 78 regularizer """no""" +531 78 optimizer """adadelta""" +531 78 training_loop """lcwa""" +531 78 evaluator """rankbased""" +531 79 dataset """kinships""" +531 79 model """rescal""" +531 79 loss """crossentropy""" +531 79 regularizer """no""" +531 79 optimizer """adadelta""" +531 79 training_loop """lcwa""" +531 79 evaluator """rankbased""" +531 80 dataset """kinships""" +531 80 model """rescal""" +531 80 loss """crossentropy""" +531 80 regularizer """no""" +531 80 optimizer """adadelta""" +531 80 training_loop """lcwa""" +531 80 evaluator """rankbased""" +531 81 dataset """kinships""" +531 81 model """rescal""" +531 81 loss """crossentropy""" +531 81 regularizer """no""" +531 81 optimizer """adadelta""" +531 81 training_loop """lcwa""" +531 81 evaluator """rankbased""" +531 82 dataset """kinships""" +531 82 model """rescal""" +531 82 loss """crossentropy""" +531 82 regularizer """no""" +531 82 optimizer """adadelta""" +531 82 training_loop """lcwa""" +531 82 evaluator """rankbased""" +531 83 dataset """kinships""" +531 83 model """rescal""" +531 83 loss """crossentropy""" +531 83 regularizer """no""" +531 83 optimizer """adadelta""" +531 83 training_loop """lcwa""" +531 83 evaluator """rankbased""" +531 84 dataset """kinships""" +531 84 model """rescal""" +531 84 loss """crossentropy""" +531 84 regularizer """no""" +531 84 optimizer """adadelta""" +531 84 training_loop """lcwa""" +531 84 evaluator """rankbased""" +531 85 dataset """kinships""" +531 85 model """rescal""" +531 85 loss """crossentropy""" +531 85 regularizer """no""" +531 85 optimizer """adadelta""" +531 85 training_loop """lcwa""" +531 85 evaluator """rankbased""" +531 86 dataset """kinships""" +531 86 model """rescal""" +531 86 loss """crossentropy""" +531 86 regularizer """no""" +531 86 optimizer """adadelta""" +531 86 training_loop """lcwa""" +531 86 evaluator """rankbased""" +531 87 dataset """kinships""" +531 87 model """rescal""" +531 87 loss """crossentropy""" +531 87 regularizer """no""" +531 87 optimizer """adadelta""" +531 87 training_loop """lcwa""" +531 87 evaluator """rankbased""" +531 88 dataset """kinships""" +531 88 model """rescal""" +531 88 loss """crossentropy""" +531 88 regularizer """no""" +531 88 optimizer """adadelta""" +531 88 training_loop """lcwa""" +531 88 evaluator """rankbased""" +531 89 dataset """kinships""" +531 89 model """rescal""" +531 89 loss """crossentropy""" +531 89 regularizer """no""" +531 89 optimizer """adadelta""" +531 89 training_loop """lcwa""" +531 89 evaluator """rankbased""" +531 90 dataset """kinships""" +531 90 model """rescal""" +531 90 loss """crossentropy""" +531 90 regularizer """no""" +531 90 optimizer """adadelta""" +531 90 training_loop """lcwa""" +531 90 evaluator """rankbased""" +531 91 dataset """kinships""" +531 91 model """rescal""" +531 91 loss """crossentropy""" +531 91 regularizer """no""" +531 91 optimizer """adadelta""" +531 91 training_loop """lcwa""" +531 91 evaluator """rankbased""" +531 92 dataset """kinships""" +531 92 model """rescal""" +531 92 loss """crossentropy""" +531 92 regularizer """no""" +531 92 optimizer """adadelta""" +531 92 training_loop """lcwa""" +531 92 evaluator """rankbased""" +531 93 dataset """kinships""" +531 93 model """rescal""" +531 93 loss """crossentropy""" +531 93 regularizer """no""" +531 93 optimizer """adadelta""" +531 93 training_loop """lcwa""" +531 93 evaluator """rankbased""" +531 94 dataset """kinships""" +531 94 model """rescal""" +531 94 loss """crossentropy""" +531 94 regularizer """no""" +531 94 optimizer """adadelta""" +531 94 training_loop """lcwa""" +531 94 evaluator """rankbased""" +531 95 dataset """kinships""" +531 95 model """rescal""" +531 95 loss """crossentropy""" +531 95 regularizer """no""" +531 95 optimizer """adadelta""" +531 95 training_loop """lcwa""" +531 95 evaluator """rankbased""" +531 96 dataset """kinships""" +531 96 model """rescal""" +531 96 loss """crossentropy""" +531 96 regularizer """no""" +531 96 optimizer """adadelta""" +531 96 training_loop """lcwa""" +531 96 evaluator """rankbased""" +531 97 dataset """kinships""" +531 97 model """rescal""" +531 97 loss """crossentropy""" +531 97 regularizer """no""" +531 97 optimizer """adadelta""" +531 97 training_loop """lcwa""" +531 97 evaluator """rankbased""" +531 98 dataset """kinships""" +531 98 model """rescal""" +531 98 loss """crossentropy""" +531 98 regularizer """no""" +531 98 optimizer """adadelta""" +531 98 training_loop """lcwa""" +531 98 evaluator """rankbased""" +531 99 dataset """kinships""" +531 99 model """rescal""" +531 99 loss """crossentropy""" +531 99 regularizer """no""" +531 99 optimizer """adadelta""" +531 99 training_loop """lcwa""" +531 99 evaluator """rankbased""" +531 100 dataset """kinships""" +531 100 model """rescal""" +531 100 loss """crossentropy""" +531 100 regularizer """no""" +531 100 optimizer """adadelta""" +531 100 training_loop """lcwa""" +531 100 evaluator """rankbased""" +532 1 model.embedding_dim 2.0 +532 1 training.batch_size 1.0 +532 1 training.label_smoothing 0.017702494321886874 +532 2 model.embedding_dim 1.0 +532 2 training.batch_size 1.0 +532 2 training.label_smoothing 0.10851363245039135 +532 3 model.embedding_dim 2.0 +532 3 training.batch_size 1.0 +532 3 training.label_smoothing 0.05280738450255521 +532 4 model.embedding_dim 0.0 +532 4 training.batch_size 0.0 +532 4 training.label_smoothing 0.0037895386767121445 +532 5 model.embedding_dim 0.0 +532 5 training.batch_size 1.0 +532 5 training.label_smoothing 0.1696382598100454 +532 6 model.embedding_dim 0.0 +532 6 training.batch_size 1.0 +532 6 training.label_smoothing 0.45845745039210245 +532 7 model.embedding_dim 0.0 +532 7 training.batch_size 2.0 +532 7 training.label_smoothing 0.25492541292060306 +532 8 model.embedding_dim 1.0 +532 8 training.batch_size 1.0 +532 8 training.label_smoothing 0.0020014090265296574 +532 9 model.embedding_dim 1.0 +532 9 training.batch_size 1.0 +532 9 training.label_smoothing 0.017388861665700223 +532 10 model.embedding_dim 1.0 +532 10 training.batch_size 2.0 +532 10 training.label_smoothing 0.7624994603848015 +532 11 model.embedding_dim 1.0 +532 11 training.batch_size 0.0 +532 11 training.label_smoothing 0.001902750193285326 +532 12 model.embedding_dim 0.0 +532 12 training.batch_size 1.0 +532 12 training.label_smoothing 0.026919094397596162 +532 13 model.embedding_dim 1.0 +532 13 training.batch_size 0.0 +532 13 training.label_smoothing 0.037433664575763574 +532 14 model.embedding_dim 0.0 +532 14 training.batch_size 0.0 +532 14 training.label_smoothing 0.005552824781961341 +532 15 model.embedding_dim 2.0 +532 15 training.batch_size 1.0 +532 15 training.label_smoothing 0.001627569675163309 +532 16 model.embedding_dim 0.0 +532 16 training.batch_size 2.0 +532 16 training.label_smoothing 0.008120657944139232 +532 17 model.embedding_dim 1.0 +532 17 training.batch_size 1.0 +532 17 training.label_smoothing 0.013924374000742938 +532 18 model.embedding_dim 2.0 +532 18 training.batch_size 2.0 +532 18 training.label_smoothing 0.002857296134074363 +532 19 model.embedding_dim 0.0 +532 19 training.batch_size 2.0 +532 19 training.label_smoothing 0.039903337406209174 +532 20 model.embedding_dim 1.0 +532 20 training.batch_size 1.0 +532 20 training.label_smoothing 0.016931335609953923 +532 21 model.embedding_dim 2.0 +532 21 training.batch_size 0.0 +532 21 training.label_smoothing 0.0034787541121720235 +532 22 model.embedding_dim 1.0 +532 22 training.batch_size 1.0 +532 22 training.label_smoothing 0.014530987551914258 +532 23 model.embedding_dim 0.0 +532 23 training.batch_size 1.0 +532 23 training.label_smoothing 0.0032650659687850784 +532 24 model.embedding_dim 1.0 +532 24 training.batch_size 1.0 +532 24 training.label_smoothing 0.0618620245670856 +532 25 model.embedding_dim 1.0 +532 25 training.batch_size 1.0 +532 25 training.label_smoothing 0.0011242022565113926 +532 26 model.embedding_dim 0.0 +532 26 training.batch_size 1.0 +532 26 training.label_smoothing 0.019312764808582815 +532 27 model.embedding_dim 0.0 +532 27 training.batch_size 1.0 +532 27 training.label_smoothing 0.05182794739661422 +532 28 model.embedding_dim 1.0 +532 28 training.batch_size 1.0 +532 28 training.label_smoothing 0.013046749215841265 +532 29 model.embedding_dim 1.0 +532 29 training.batch_size 0.0 +532 29 training.label_smoothing 0.007619173315322902 +532 30 model.embedding_dim 1.0 +532 30 training.batch_size 2.0 +532 30 training.label_smoothing 0.19649130918283267 +532 31 model.embedding_dim 1.0 +532 31 training.batch_size 1.0 +532 31 training.label_smoothing 0.10813907231459409 +532 32 model.embedding_dim 0.0 +532 32 training.batch_size 1.0 +532 32 training.label_smoothing 0.008513253702808447 +532 33 model.embedding_dim 2.0 +532 33 training.batch_size 0.0 +532 33 training.label_smoothing 0.012078586909109377 +532 34 model.embedding_dim 1.0 +532 34 training.batch_size 2.0 +532 34 training.label_smoothing 0.007474031135767796 +532 35 model.embedding_dim 1.0 +532 35 training.batch_size 2.0 +532 35 training.label_smoothing 0.2852413847979274 +532 36 model.embedding_dim 1.0 +532 36 training.batch_size 0.0 +532 36 training.label_smoothing 0.09177135126944465 +532 37 model.embedding_dim 0.0 +532 37 training.batch_size 1.0 +532 37 training.label_smoothing 0.4545249109454283 +532 38 model.embedding_dim 0.0 +532 38 training.batch_size 2.0 +532 38 training.label_smoothing 0.17908102700380102 +532 39 model.embedding_dim 0.0 +532 39 training.batch_size 1.0 +532 39 training.label_smoothing 0.1278649243854946 +532 40 model.embedding_dim 1.0 +532 40 training.batch_size 1.0 +532 40 training.label_smoothing 0.0016473273440227006 +532 41 model.embedding_dim 0.0 +532 41 training.batch_size 1.0 +532 41 training.label_smoothing 0.014660285118028714 +532 42 model.embedding_dim 0.0 +532 42 training.batch_size 1.0 +532 42 training.label_smoothing 0.013172831269432164 +532 43 model.embedding_dim 1.0 +532 43 training.batch_size 0.0 +532 43 training.label_smoothing 0.0013607458353433217 +532 44 model.embedding_dim 0.0 +532 44 training.batch_size 0.0 +532 44 training.label_smoothing 0.026168840540806187 +532 45 model.embedding_dim 1.0 +532 45 training.batch_size 2.0 +532 45 training.label_smoothing 0.32350558058409784 +532 46 model.embedding_dim 0.0 +532 46 training.batch_size 1.0 +532 46 training.label_smoothing 0.0940577990353695 +532 47 model.embedding_dim 2.0 +532 47 training.batch_size 2.0 +532 47 training.label_smoothing 0.0011535662042599021 +532 48 model.embedding_dim 0.0 +532 48 training.batch_size 0.0 +532 48 training.label_smoothing 0.05559660677940045 +532 49 model.embedding_dim 1.0 +532 49 training.batch_size 1.0 +532 49 training.label_smoothing 0.030242981963653107 +532 50 model.embedding_dim 2.0 +532 50 training.batch_size 0.0 +532 50 training.label_smoothing 0.09907883962176672 +532 51 model.embedding_dim 1.0 +532 51 training.batch_size 0.0 +532 51 training.label_smoothing 0.3945532777277569 +532 52 model.embedding_dim 1.0 +532 52 training.batch_size 0.0 +532 52 training.label_smoothing 0.0012219592137174401 +532 53 model.embedding_dim 2.0 +532 53 training.batch_size 0.0 +532 53 training.label_smoothing 0.21917957680524372 +532 54 model.embedding_dim 0.0 +532 54 training.batch_size 2.0 +532 54 training.label_smoothing 0.11532560394199541 +532 55 model.embedding_dim 1.0 +532 55 training.batch_size 1.0 +532 55 training.label_smoothing 0.11140291075536687 +532 56 model.embedding_dim 2.0 +532 56 training.batch_size 2.0 +532 56 training.label_smoothing 0.8795103421459609 +532 57 model.embedding_dim 2.0 +532 57 training.batch_size 0.0 +532 57 training.label_smoothing 0.04832345969277898 +532 58 model.embedding_dim 1.0 +532 58 training.batch_size 1.0 +532 58 training.label_smoothing 0.004071283222559056 +532 59 model.embedding_dim 1.0 +532 59 training.batch_size 0.0 +532 59 training.label_smoothing 0.0022904788771473 +532 60 model.embedding_dim 1.0 +532 60 training.batch_size 2.0 +532 60 training.label_smoothing 0.021326437015695046 +532 61 model.embedding_dim 2.0 +532 61 training.batch_size 2.0 +532 61 training.label_smoothing 0.016001434451996432 +532 62 model.embedding_dim 2.0 +532 62 training.batch_size 0.0 +532 62 training.label_smoothing 0.03189424333572942 +532 63 model.embedding_dim 1.0 +532 63 training.batch_size 2.0 +532 63 training.label_smoothing 0.005639959302314379 +532 64 model.embedding_dim 2.0 +532 64 training.batch_size 2.0 +532 64 training.label_smoothing 0.3205328448539356 +532 65 model.embedding_dim 2.0 +532 65 training.batch_size 2.0 +532 65 training.label_smoothing 0.008860721380482103 +532 66 model.embedding_dim 2.0 +532 66 training.batch_size 1.0 +532 66 training.label_smoothing 0.002733371924740235 +532 67 model.embedding_dim 1.0 +532 67 training.batch_size 2.0 +532 67 training.label_smoothing 0.13149845084254508 +532 68 model.embedding_dim 1.0 +532 68 training.batch_size 0.0 +532 68 training.label_smoothing 0.17949015797904244 +532 69 model.embedding_dim 0.0 +532 69 training.batch_size 2.0 +532 69 training.label_smoothing 0.05404467058129169 +532 70 model.embedding_dim 2.0 +532 70 training.batch_size 2.0 +532 70 training.label_smoothing 0.0061822978789508355 +532 71 model.embedding_dim 2.0 +532 71 training.batch_size 2.0 +532 71 training.label_smoothing 0.0012446438145204985 +532 72 model.embedding_dim 0.0 +532 72 training.batch_size 2.0 +532 72 training.label_smoothing 0.029983503679772518 +532 73 model.embedding_dim 0.0 +532 73 training.batch_size 2.0 +532 73 training.label_smoothing 0.6841522433716688 +532 74 model.embedding_dim 0.0 +532 74 training.batch_size 0.0 +532 74 training.label_smoothing 0.8444890486105268 +532 75 model.embedding_dim 2.0 +532 75 training.batch_size 1.0 +532 75 training.label_smoothing 0.014039883112149747 +532 76 model.embedding_dim 1.0 +532 76 training.batch_size 1.0 +532 76 training.label_smoothing 0.892542106652513 +532 77 model.embedding_dim 2.0 +532 77 training.batch_size 2.0 +532 77 training.label_smoothing 0.2894352001321121 +532 78 model.embedding_dim 0.0 +532 78 training.batch_size 0.0 +532 78 training.label_smoothing 0.021989720600900733 +532 79 model.embedding_dim 2.0 +532 79 training.batch_size 1.0 +532 79 training.label_smoothing 0.9724671629855172 +532 80 model.embedding_dim 1.0 +532 80 training.batch_size 0.0 +532 80 training.label_smoothing 0.003601699730044209 +532 81 model.embedding_dim 2.0 +532 81 training.batch_size 0.0 +532 81 training.label_smoothing 0.0013379829911787176 +532 82 model.embedding_dim 0.0 +532 82 training.batch_size 2.0 +532 82 training.label_smoothing 0.0019384368735212093 +532 83 model.embedding_dim 1.0 +532 83 training.batch_size 2.0 +532 83 training.label_smoothing 0.026061955991086305 +532 84 model.embedding_dim 0.0 +532 84 training.batch_size 0.0 +532 84 training.label_smoothing 0.3995236125702926 +532 85 model.embedding_dim 1.0 +532 85 training.batch_size 1.0 +532 85 training.label_smoothing 0.008336037945888504 +532 86 model.embedding_dim 0.0 +532 86 training.batch_size 0.0 +532 86 training.label_smoothing 0.16154549162199125 +532 87 model.embedding_dim 1.0 +532 87 training.batch_size 1.0 +532 87 training.label_smoothing 0.20190392193780898 +532 88 model.embedding_dim 0.0 +532 88 training.batch_size 0.0 +532 88 training.label_smoothing 0.19102165209574454 +532 89 model.embedding_dim 1.0 +532 89 training.batch_size 0.0 +532 89 training.label_smoothing 0.030948287905646823 +532 90 model.embedding_dim 2.0 +532 90 training.batch_size 0.0 +532 90 training.label_smoothing 0.425306922692915 +532 91 model.embedding_dim 0.0 +532 91 training.batch_size 1.0 +532 91 training.label_smoothing 0.38601086684314084 +532 92 model.embedding_dim 0.0 +532 92 training.batch_size 0.0 +532 92 training.label_smoothing 0.2883720897036785 +532 93 model.embedding_dim 1.0 +532 93 training.batch_size 2.0 +532 93 training.label_smoothing 0.0022005787553758802 +532 94 model.embedding_dim 1.0 +532 94 training.batch_size 1.0 +532 94 training.label_smoothing 0.0014561940970094784 +532 95 model.embedding_dim 1.0 +532 95 training.batch_size 0.0 +532 95 training.label_smoothing 0.30199279168563437 +532 96 model.embedding_dim 0.0 +532 96 training.batch_size 2.0 +532 96 training.label_smoothing 0.011210904196130399 +532 97 model.embedding_dim 0.0 +532 97 training.batch_size 2.0 +532 97 training.label_smoothing 0.8421966988250448 +532 98 model.embedding_dim 0.0 +532 98 training.batch_size 1.0 +532 98 training.label_smoothing 0.0038095696466735576 +532 99 model.embedding_dim 1.0 +532 99 training.batch_size 1.0 +532 99 training.label_smoothing 0.0028693042899027676 +532 100 model.embedding_dim 1.0 +532 100 training.batch_size 0.0 +532 100 training.label_smoothing 0.03773411776278806 +532 1 dataset """kinships""" +532 1 model """rescal""" +532 1 loss """crossentropy""" +532 1 regularizer """no""" +532 1 optimizer """adadelta""" +532 1 training_loop """lcwa""" +532 1 evaluator """rankbased""" +532 2 dataset """kinships""" +532 2 model """rescal""" +532 2 loss """crossentropy""" +532 2 regularizer """no""" +532 2 optimizer """adadelta""" +532 2 training_loop """lcwa""" +532 2 evaluator """rankbased""" +532 3 dataset """kinships""" +532 3 model """rescal""" +532 3 loss """crossentropy""" +532 3 regularizer """no""" +532 3 optimizer """adadelta""" +532 3 training_loop """lcwa""" +532 3 evaluator """rankbased""" +532 4 dataset """kinships""" +532 4 model """rescal""" +532 4 loss """crossentropy""" +532 4 regularizer """no""" +532 4 optimizer """adadelta""" +532 4 training_loop """lcwa""" +532 4 evaluator """rankbased""" +532 5 dataset """kinships""" +532 5 model """rescal""" +532 5 loss """crossentropy""" +532 5 regularizer """no""" +532 5 optimizer """adadelta""" +532 5 training_loop """lcwa""" +532 5 evaluator """rankbased""" +532 6 dataset """kinships""" +532 6 model """rescal""" +532 6 loss """crossentropy""" +532 6 regularizer """no""" +532 6 optimizer """adadelta""" +532 6 training_loop """lcwa""" +532 6 evaluator """rankbased""" +532 7 dataset """kinships""" +532 7 model """rescal""" +532 7 loss """crossentropy""" +532 7 regularizer """no""" +532 7 optimizer """adadelta""" +532 7 training_loop """lcwa""" +532 7 evaluator """rankbased""" +532 8 dataset """kinships""" +532 8 model """rescal""" +532 8 loss """crossentropy""" +532 8 regularizer """no""" +532 8 optimizer """adadelta""" +532 8 training_loop """lcwa""" +532 8 evaluator """rankbased""" +532 9 dataset """kinships""" +532 9 model """rescal""" +532 9 loss """crossentropy""" +532 9 regularizer """no""" +532 9 optimizer """adadelta""" +532 9 training_loop """lcwa""" +532 9 evaluator """rankbased""" +532 10 dataset """kinships""" +532 10 model """rescal""" +532 10 loss """crossentropy""" +532 10 regularizer """no""" +532 10 optimizer """adadelta""" +532 10 training_loop """lcwa""" +532 10 evaluator """rankbased""" +532 11 dataset """kinships""" +532 11 model """rescal""" +532 11 loss """crossentropy""" +532 11 regularizer """no""" +532 11 optimizer """adadelta""" +532 11 training_loop """lcwa""" +532 11 evaluator """rankbased""" +532 12 dataset """kinships""" +532 12 model """rescal""" +532 12 loss """crossentropy""" +532 12 regularizer """no""" +532 12 optimizer """adadelta""" +532 12 training_loop """lcwa""" +532 12 evaluator """rankbased""" +532 13 dataset """kinships""" +532 13 model """rescal""" +532 13 loss """crossentropy""" +532 13 regularizer """no""" +532 13 optimizer """adadelta""" +532 13 training_loop """lcwa""" +532 13 evaluator """rankbased""" +532 14 dataset """kinships""" +532 14 model """rescal""" +532 14 loss """crossentropy""" +532 14 regularizer """no""" +532 14 optimizer """adadelta""" +532 14 training_loop """lcwa""" +532 14 evaluator """rankbased""" +532 15 dataset """kinships""" +532 15 model """rescal""" +532 15 loss """crossentropy""" +532 15 regularizer """no""" +532 15 optimizer """adadelta""" +532 15 training_loop """lcwa""" +532 15 evaluator """rankbased""" +532 16 dataset """kinships""" +532 16 model """rescal""" +532 16 loss """crossentropy""" +532 16 regularizer """no""" +532 16 optimizer """adadelta""" +532 16 training_loop """lcwa""" +532 16 evaluator """rankbased""" +532 17 dataset """kinships""" +532 17 model """rescal""" +532 17 loss """crossentropy""" +532 17 regularizer """no""" +532 17 optimizer """adadelta""" +532 17 training_loop """lcwa""" +532 17 evaluator """rankbased""" +532 18 dataset """kinships""" +532 18 model """rescal""" +532 18 loss """crossentropy""" +532 18 regularizer """no""" +532 18 optimizer """adadelta""" +532 18 training_loop """lcwa""" +532 18 evaluator """rankbased""" +532 19 dataset """kinships""" +532 19 model """rescal""" +532 19 loss """crossentropy""" +532 19 regularizer """no""" +532 19 optimizer """adadelta""" +532 19 training_loop """lcwa""" +532 19 evaluator """rankbased""" +532 20 dataset """kinships""" +532 20 model """rescal""" +532 20 loss """crossentropy""" +532 20 regularizer """no""" +532 20 optimizer """adadelta""" +532 20 training_loop """lcwa""" +532 20 evaluator """rankbased""" +532 21 dataset """kinships""" +532 21 model """rescal""" +532 21 loss """crossentropy""" +532 21 regularizer """no""" +532 21 optimizer """adadelta""" +532 21 training_loop """lcwa""" +532 21 evaluator """rankbased""" +532 22 dataset """kinships""" +532 22 model """rescal""" +532 22 loss """crossentropy""" +532 22 regularizer """no""" +532 22 optimizer """adadelta""" +532 22 training_loop """lcwa""" +532 22 evaluator """rankbased""" +532 23 dataset """kinships""" +532 23 model """rescal""" +532 23 loss """crossentropy""" +532 23 regularizer """no""" +532 23 optimizer """adadelta""" +532 23 training_loop """lcwa""" +532 23 evaluator """rankbased""" +532 24 dataset """kinships""" +532 24 model """rescal""" +532 24 loss """crossentropy""" +532 24 regularizer """no""" +532 24 optimizer """adadelta""" +532 24 training_loop """lcwa""" +532 24 evaluator """rankbased""" +532 25 dataset """kinships""" +532 25 model """rescal""" +532 25 loss """crossentropy""" +532 25 regularizer """no""" +532 25 optimizer """adadelta""" +532 25 training_loop """lcwa""" +532 25 evaluator """rankbased""" +532 26 dataset """kinships""" +532 26 model """rescal""" +532 26 loss """crossentropy""" +532 26 regularizer """no""" +532 26 optimizer """adadelta""" +532 26 training_loop """lcwa""" +532 26 evaluator """rankbased""" +532 27 dataset """kinships""" +532 27 model """rescal""" +532 27 loss """crossentropy""" +532 27 regularizer """no""" +532 27 optimizer """adadelta""" +532 27 training_loop """lcwa""" +532 27 evaluator """rankbased""" +532 28 dataset """kinships""" +532 28 model """rescal""" +532 28 loss """crossentropy""" +532 28 regularizer """no""" +532 28 optimizer """adadelta""" +532 28 training_loop """lcwa""" +532 28 evaluator """rankbased""" +532 29 dataset """kinships""" +532 29 model """rescal""" +532 29 loss """crossentropy""" +532 29 regularizer """no""" +532 29 optimizer """adadelta""" +532 29 training_loop """lcwa""" +532 29 evaluator """rankbased""" +532 30 dataset """kinships""" +532 30 model """rescal""" +532 30 loss """crossentropy""" +532 30 regularizer """no""" +532 30 optimizer """adadelta""" +532 30 training_loop """lcwa""" +532 30 evaluator """rankbased""" +532 31 dataset """kinships""" +532 31 model """rescal""" +532 31 loss """crossentropy""" +532 31 regularizer """no""" +532 31 optimizer """adadelta""" +532 31 training_loop """lcwa""" +532 31 evaluator """rankbased""" +532 32 dataset """kinships""" +532 32 model """rescal""" +532 32 loss """crossentropy""" +532 32 regularizer """no""" +532 32 optimizer """adadelta""" +532 32 training_loop """lcwa""" +532 32 evaluator """rankbased""" +532 33 dataset """kinships""" +532 33 model """rescal""" +532 33 loss """crossentropy""" +532 33 regularizer """no""" +532 33 optimizer """adadelta""" +532 33 training_loop """lcwa""" +532 33 evaluator """rankbased""" +532 34 dataset """kinships""" +532 34 model """rescal""" +532 34 loss """crossentropy""" +532 34 regularizer """no""" +532 34 optimizer """adadelta""" +532 34 training_loop """lcwa""" +532 34 evaluator """rankbased""" +532 35 dataset """kinships""" +532 35 model """rescal""" +532 35 loss """crossentropy""" +532 35 regularizer """no""" +532 35 optimizer """adadelta""" +532 35 training_loop """lcwa""" +532 35 evaluator """rankbased""" +532 36 dataset """kinships""" +532 36 model """rescal""" +532 36 loss """crossentropy""" +532 36 regularizer """no""" +532 36 optimizer """adadelta""" +532 36 training_loop """lcwa""" +532 36 evaluator """rankbased""" +532 37 dataset """kinships""" +532 37 model """rescal""" +532 37 loss """crossentropy""" +532 37 regularizer """no""" +532 37 optimizer """adadelta""" +532 37 training_loop """lcwa""" +532 37 evaluator """rankbased""" +532 38 dataset """kinships""" +532 38 model """rescal""" +532 38 loss """crossentropy""" +532 38 regularizer """no""" +532 38 optimizer """adadelta""" +532 38 training_loop """lcwa""" +532 38 evaluator """rankbased""" +532 39 dataset """kinships""" +532 39 model """rescal""" +532 39 loss """crossentropy""" +532 39 regularizer """no""" +532 39 optimizer """adadelta""" +532 39 training_loop """lcwa""" +532 39 evaluator """rankbased""" +532 40 dataset """kinships""" +532 40 model """rescal""" +532 40 loss """crossentropy""" +532 40 regularizer """no""" +532 40 optimizer """adadelta""" +532 40 training_loop """lcwa""" +532 40 evaluator """rankbased""" +532 41 dataset """kinships""" +532 41 model """rescal""" +532 41 loss """crossentropy""" +532 41 regularizer """no""" +532 41 optimizer """adadelta""" +532 41 training_loop """lcwa""" +532 41 evaluator """rankbased""" +532 42 dataset """kinships""" +532 42 model """rescal""" +532 42 loss """crossentropy""" +532 42 regularizer """no""" +532 42 optimizer """adadelta""" +532 42 training_loop """lcwa""" +532 42 evaluator """rankbased""" +532 43 dataset """kinships""" +532 43 model """rescal""" +532 43 loss """crossentropy""" +532 43 regularizer """no""" +532 43 optimizer """adadelta""" +532 43 training_loop """lcwa""" +532 43 evaluator """rankbased""" +532 44 dataset """kinships""" +532 44 model """rescal""" +532 44 loss """crossentropy""" +532 44 regularizer """no""" +532 44 optimizer """adadelta""" +532 44 training_loop """lcwa""" +532 44 evaluator """rankbased""" +532 45 dataset """kinships""" +532 45 model """rescal""" +532 45 loss """crossentropy""" +532 45 regularizer """no""" +532 45 optimizer """adadelta""" +532 45 training_loop """lcwa""" +532 45 evaluator """rankbased""" +532 46 dataset """kinships""" +532 46 model """rescal""" +532 46 loss """crossentropy""" +532 46 regularizer """no""" +532 46 optimizer """adadelta""" +532 46 training_loop """lcwa""" +532 46 evaluator """rankbased""" +532 47 dataset """kinships""" +532 47 model """rescal""" +532 47 loss """crossentropy""" +532 47 regularizer """no""" +532 47 optimizer """adadelta""" +532 47 training_loop """lcwa""" +532 47 evaluator """rankbased""" +532 48 dataset """kinships""" +532 48 model """rescal""" +532 48 loss """crossentropy""" +532 48 regularizer """no""" +532 48 optimizer """adadelta""" +532 48 training_loop """lcwa""" +532 48 evaluator """rankbased""" +532 49 dataset """kinships""" +532 49 model """rescal""" +532 49 loss """crossentropy""" +532 49 regularizer """no""" +532 49 optimizer """adadelta""" +532 49 training_loop """lcwa""" +532 49 evaluator """rankbased""" +532 50 dataset """kinships""" +532 50 model """rescal""" +532 50 loss """crossentropy""" +532 50 regularizer """no""" +532 50 optimizer """adadelta""" +532 50 training_loop """lcwa""" +532 50 evaluator """rankbased""" +532 51 dataset """kinships""" +532 51 model """rescal""" +532 51 loss """crossentropy""" +532 51 regularizer """no""" +532 51 optimizer """adadelta""" +532 51 training_loop """lcwa""" +532 51 evaluator """rankbased""" +532 52 dataset """kinships""" +532 52 model """rescal""" +532 52 loss """crossentropy""" +532 52 regularizer """no""" +532 52 optimizer """adadelta""" +532 52 training_loop """lcwa""" +532 52 evaluator """rankbased""" +532 53 dataset """kinships""" +532 53 model """rescal""" +532 53 loss """crossentropy""" +532 53 regularizer """no""" +532 53 optimizer """adadelta""" +532 53 training_loop """lcwa""" +532 53 evaluator """rankbased""" +532 54 dataset """kinships""" +532 54 model """rescal""" +532 54 loss """crossentropy""" +532 54 regularizer """no""" +532 54 optimizer """adadelta""" +532 54 training_loop """lcwa""" +532 54 evaluator """rankbased""" +532 55 dataset """kinships""" +532 55 model """rescal""" +532 55 loss """crossentropy""" +532 55 regularizer """no""" +532 55 optimizer """adadelta""" +532 55 training_loop """lcwa""" +532 55 evaluator """rankbased""" +532 56 dataset """kinships""" +532 56 model """rescal""" +532 56 loss """crossentropy""" +532 56 regularizer """no""" +532 56 optimizer """adadelta""" +532 56 training_loop """lcwa""" +532 56 evaluator """rankbased""" +532 57 dataset """kinships""" +532 57 model """rescal""" +532 57 loss """crossentropy""" +532 57 regularizer """no""" +532 57 optimizer """adadelta""" +532 57 training_loop """lcwa""" +532 57 evaluator """rankbased""" +532 58 dataset """kinships""" +532 58 model """rescal""" +532 58 loss """crossentropy""" +532 58 regularizer """no""" +532 58 optimizer """adadelta""" +532 58 training_loop """lcwa""" +532 58 evaluator """rankbased""" +532 59 dataset """kinships""" +532 59 model """rescal""" +532 59 loss """crossentropy""" +532 59 regularizer """no""" +532 59 optimizer """adadelta""" +532 59 training_loop """lcwa""" +532 59 evaluator """rankbased""" +532 60 dataset """kinships""" +532 60 model """rescal""" +532 60 loss """crossentropy""" +532 60 regularizer """no""" +532 60 optimizer """adadelta""" +532 60 training_loop """lcwa""" +532 60 evaluator """rankbased""" +532 61 dataset """kinships""" +532 61 model """rescal""" +532 61 loss """crossentropy""" +532 61 regularizer """no""" +532 61 optimizer """adadelta""" +532 61 training_loop """lcwa""" +532 61 evaluator """rankbased""" +532 62 dataset """kinships""" +532 62 model """rescal""" +532 62 loss """crossentropy""" +532 62 regularizer """no""" +532 62 optimizer """adadelta""" +532 62 training_loop """lcwa""" +532 62 evaluator """rankbased""" +532 63 dataset """kinships""" +532 63 model """rescal""" +532 63 loss """crossentropy""" +532 63 regularizer """no""" +532 63 optimizer """adadelta""" +532 63 training_loop """lcwa""" +532 63 evaluator """rankbased""" +532 64 dataset """kinships""" +532 64 model """rescal""" +532 64 loss """crossentropy""" +532 64 regularizer """no""" +532 64 optimizer """adadelta""" +532 64 training_loop """lcwa""" +532 64 evaluator """rankbased""" +532 65 dataset """kinships""" +532 65 model """rescal""" +532 65 loss """crossentropy""" +532 65 regularizer """no""" +532 65 optimizer """adadelta""" +532 65 training_loop """lcwa""" +532 65 evaluator """rankbased""" +532 66 dataset """kinships""" +532 66 model """rescal""" +532 66 loss """crossentropy""" +532 66 regularizer """no""" +532 66 optimizer """adadelta""" +532 66 training_loop """lcwa""" +532 66 evaluator """rankbased""" +532 67 dataset """kinships""" +532 67 model """rescal""" +532 67 loss """crossentropy""" +532 67 regularizer """no""" +532 67 optimizer """adadelta""" +532 67 training_loop """lcwa""" +532 67 evaluator """rankbased""" +532 68 dataset """kinships""" +532 68 model """rescal""" +532 68 loss """crossentropy""" +532 68 regularizer """no""" +532 68 optimizer """adadelta""" +532 68 training_loop """lcwa""" +532 68 evaluator """rankbased""" +532 69 dataset """kinships""" +532 69 model """rescal""" +532 69 loss """crossentropy""" +532 69 regularizer """no""" +532 69 optimizer """adadelta""" +532 69 training_loop """lcwa""" +532 69 evaluator """rankbased""" +532 70 dataset """kinships""" +532 70 model """rescal""" +532 70 loss """crossentropy""" +532 70 regularizer """no""" +532 70 optimizer """adadelta""" +532 70 training_loop """lcwa""" +532 70 evaluator """rankbased""" +532 71 dataset """kinships""" +532 71 model """rescal""" +532 71 loss """crossentropy""" +532 71 regularizer """no""" +532 71 optimizer """adadelta""" +532 71 training_loop """lcwa""" +532 71 evaluator """rankbased""" +532 72 dataset """kinships""" +532 72 model """rescal""" +532 72 loss """crossentropy""" +532 72 regularizer """no""" +532 72 optimizer """adadelta""" +532 72 training_loop """lcwa""" +532 72 evaluator """rankbased""" +532 73 dataset """kinships""" +532 73 model """rescal""" +532 73 loss """crossentropy""" +532 73 regularizer """no""" +532 73 optimizer """adadelta""" +532 73 training_loop """lcwa""" +532 73 evaluator """rankbased""" +532 74 dataset """kinships""" +532 74 model """rescal""" +532 74 loss """crossentropy""" +532 74 regularizer """no""" +532 74 optimizer """adadelta""" +532 74 training_loop """lcwa""" +532 74 evaluator """rankbased""" +532 75 dataset """kinships""" +532 75 model """rescal""" +532 75 loss """crossentropy""" +532 75 regularizer """no""" +532 75 optimizer """adadelta""" +532 75 training_loop """lcwa""" +532 75 evaluator """rankbased""" +532 76 dataset """kinships""" +532 76 model """rescal""" +532 76 loss """crossentropy""" +532 76 regularizer """no""" +532 76 optimizer """adadelta""" +532 76 training_loop """lcwa""" +532 76 evaluator """rankbased""" +532 77 dataset """kinships""" +532 77 model """rescal""" +532 77 loss """crossentropy""" +532 77 regularizer """no""" +532 77 optimizer """adadelta""" +532 77 training_loop """lcwa""" +532 77 evaluator """rankbased""" +532 78 dataset """kinships""" +532 78 model """rescal""" +532 78 loss """crossentropy""" +532 78 regularizer """no""" +532 78 optimizer """adadelta""" +532 78 training_loop """lcwa""" +532 78 evaluator """rankbased""" +532 79 dataset """kinships""" +532 79 model """rescal""" +532 79 loss """crossentropy""" +532 79 regularizer """no""" +532 79 optimizer """adadelta""" +532 79 training_loop """lcwa""" +532 79 evaluator """rankbased""" +532 80 dataset """kinships""" +532 80 model """rescal""" +532 80 loss """crossentropy""" +532 80 regularizer """no""" +532 80 optimizer """adadelta""" +532 80 training_loop """lcwa""" +532 80 evaluator """rankbased""" +532 81 dataset """kinships""" +532 81 model """rescal""" +532 81 loss """crossentropy""" +532 81 regularizer """no""" +532 81 optimizer """adadelta""" +532 81 training_loop """lcwa""" +532 81 evaluator """rankbased""" +532 82 dataset """kinships""" +532 82 model """rescal""" +532 82 loss """crossentropy""" +532 82 regularizer """no""" +532 82 optimizer """adadelta""" +532 82 training_loop """lcwa""" +532 82 evaluator """rankbased""" +532 83 dataset """kinships""" +532 83 model """rescal""" +532 83 loss """crossentropy""" +532 83 regularizer """no""" +532 83 optimizer """adadelta""" +532 83 training_loop """lcwa""" +532 83 evaluator """rankbased""" +532 84 dataset """kinships""" +532 84 model """rescal""" +532 84 loss """crossentropy""" +532 84 regularizer """no""" +532 84 optimizer """adadelta""" +532 84 training_loop """lcwa""" +532 84 evaluator """rankbased""" +532 85 dataset """kinships""" +532 85 model """rescal""" +532 85 loss """crossentropy""" +532 85 regularizer """no""" +532 85 optimizer """adadelta""" +532 85 training_loop """lcwa""" +532 85 evaluator """rankbased""" +532 86 dataset """kinships""" +532 86 model """rescal""" +532 86 loss """crossentropy""" +532 86 regularizer """no""" +532 86 optimizer """adadelta""" +532 86 training_loop """lcwa""" +532 86 evaluator """rankbased""" +532 87 dataset """kinships""" +532 87 model """rescal""" +532 87 loss """crossentropy""" +532 87 regularizer """no""" +532 87 optimizer """adadelta""" +532 87 training_loop """lcwa""" +532 87 evaluator """rankbased""" +532 88 dataset """kinships""" +532 88 model """rescal""" +532 88 loss """crossentropy""" +532 88 regularizer """no""" +532 88 optimizer """adadelta""" +532 88 training_loop """lcwa""" +532 88 evaluator """rankbased""" +532 89 dataset """kinships""" +532 89 model """rescal""" +532 89 loss """crossentropy""" +532 89 regularizer """no""" +532 89 optimizer """adadelta""" +532 89 training_loop """lcwa""" +532 89 evaluator """rankbased""" +532 90 dataset """kinships""" +532 90 model """rescal""" +532 90 loss """crossentropy""" +532 90 regularizer """no""" +532 90 optimizer """adadelta""" +532 90 training_loop """lcwa""" +532 90 evaluator """rankbased""" +532 91 dataset """kinships""" +532 91 model """rescal""" +532 91 loss """crossentropy""" +532 91 regularizer """no""" +532 91 optimizer """adadelta""" +532 91 training_loop """lcwa""" +532 91 evaluator """rankbased""" +532 92 dataset """kinships""" +532 92 model """rescal""" +532 92 loss """crossentropy""" +532 92 regularizer """no""" +532 92 optimizer """adadelta""" +532 92 training_loop """lcwa""" +532 92 evaluator """rankbased""" +532 93 dataset """kinships""" +532 93 model """rescal""" +532 93 loss """crossentropy""" +532 93 regularizer """no""" +532 93 optimizer """adadelta""" +532 93 training_loop """lcwa""" +532 93 evaluator """rankbased""" +532 94 dataset """kinships""" +532 94 model """rescal""" +532 94 loss """crossentropy""" +532 94 regularizer """no""" +532 94 optimizer """adadelta""" +532 94 training_loop """lcwa""" +532 94 evaluator """rankbased""" +532 95 dataset """kinships""" +532 95 model """rescal""" +532 95 loss """crossentropy""" +532 95 regularizer """no""" +532 95 optimizer """adadelta""" +532 95 training_loop """lcwa""" +532 95 evaluator """rankbased""" +532 96 dataset """kinships""" +532 96 model """rescal""" +532 96 loss """crossentropy""" +532 96 regularizer """no""" +532 96 optimizer """adadelta""" +532 96 training_loop """lcwa""" +532 96 evaluator """rankbased""" +532 97 dataset """kinships""" +532 97 model """rescal""" +532 97 loss """crossentropy""" +532 97 regularizer """no""" +532 97 optimizer """adadelta""" +532 97 training_loop """lcwa""" +532 97 evaluator """rankbased""" +532 98 dataset """kinships""" +532 98 model """rescal""" +532 98 loss """crossentropy""" +532 98 regularizer """no""" +532 98 optimizer """adadelta""" +532 98 training_loop """lcwa""" +532 98 evaluator """rankbased""" +532 99 dataset """kinships""" +532 99 model """rescal""" +532 99 loss """crossentropy""" +532 99 regularizer """no""" +532 99 optimizer """adadelta""" +532 99 training_loop """lcwa""" +532 99 evaluator """rankbased""" +532 100 dataset """kinships""" +532 100 model """rescal""" +532 100 loss """crossentropy""" +532 100 regularizer """no""" +532 100 optimizer """adadelta""" +532 100 training_loop """lcwa""" +532 100 evaluator """rankbased""" +533 1 model.embedding_dim 0.0 +533 1 optimizer.lr 0.012095226004341783 +533 1 training.batch_size 0.0 +533 1 training.label_smoothing 0.20417433130294757 +533 2 model.embedding_dim 1.0 +533 2 optimizer.lr 0.0027898305669813313 +533 2 training.batch_size 1.0 +533 2 training.label_smoothing 0.04631942016458797 +533 3 model.embedding_dim 1.0 +533 3 optimizer.lr 0.042959297023660366 +533 3 training.batch_size 0.0 +533 3 training.label_smoothing 0.02710491822556775 +533 4 model.embedding_dim 1.0 +533 4 optimizer.lr 0.07580481499345716 +533 4 training.batch_size 2.0 +533 4 training.label_smoothing 0.012157587758588963 +533 5 model.embedding_dim 0.0 +533 5 optimizer.lr 0.00960327727964594 +533 5 training.batch_size 1.0 +533 5 training.label_smoothing 0.0025909619865563155 +533 6 model.embedding_dim 0.0 +533 6 optimizer.lr 0.0010740411811299689 +533 6 training.batch_size 0.0 +533 6 training.label_smoothing 0.6333201149705084 +533 7 model.embedding_dim 2.0 +533 7 optimizer.lr 0.03911680950514843 +533 7 training.batch_size 2.0 +533 7 training.label_smoothing 0.06421669006464074 +533 8 model.embedding_dim 2.0 +533 8 optimizer.lr 0.01109787143834889 +533 8 training.batch_size 0.0 +533 8 training.label_smoothing 0.17312942114984886 +533 9 model.embedding_dim 1.0 +533 9 optimizer.lr 0.020695981425590076 +533 9 training.batch_size 2.0 +533 9 training.label_smoothing 0.0045130465163487395 +533 10 model.embedding_dim 1.0 +533 10 optimizer.lr 0.005953986276918755 +533 10 training.batch_size 2.0 +533 10 training.label_smoothing 0.23476245572500104 +533 11 model.embedding_dim 1.0 +533 11 optimizer.lr 0.0086992134580647 +533 11 training.batch_size 1.0 +533 11 training.label_smoothing 0.0010796488923472676 +533 12 model.embedding_dim 1.0 +533 12 optimizer.lr 0.0035522623723773837 +533 12 training.batch_size 2.0 +533 12 training.label_smoothing 0.001685395142678497 +533 13 model.embedding_dim 0.0 +533 13 optimizer.lr 0.0033421530603060723 +533 13 training.batch_size 0.0 +533 13 training.label_smoothing 0.06304368800769938 +533 14 model.embedding_dim 0.0 +533 14 optimizer.lr 0.0034666640658425874 +533 14 training.batch_size 2.0 +533 14 training.label_smoothing 0.017276621882465554 +533 15 model.embedding_dim 2.0 +533 15 optimizer.lr 0.024341619041683513 +533 15 training.batch_size 1.0 +533 15 training.label_smoothing 0.0036432015748789816 +533 16 model.embedding_dim 0.0 +533 16 optimizer.lr 0.018698073018765193 +533 16 training.batch_size 2.0 +533 16 training.label_smoothing 0.01804817201082706 +533 17 model.embedding_dim 2.0 +533 17 optimizer.lr 0.0019284892319894002 +533 17 training.batch_size 2.0 +533 17 training.label_smoothing 0.1681284257112028 +533 18 model.embedding_dim 1.0 +533 18 optimizer.lr 0.0014509749268972417 +533 18 training.batch_size 1.0 +533 18 training.label_smoothing 0.0030009477335171996 +533 19 model.embedding_dim 1.0 +533 19 optimizer.lr 0.011898916534181595 +533 19 training.batch_size 2.0 +533 19 training.label_smoothing 0.01859476593050209 +533 20 model.embedding_dim 2.0 +533 20 optimizer.lr 0.014225738946563577 +533 20 training.batch_size 2.0 +533 20 training.label_smoothing 0.012214816540527189 +533 21 model.embedding_dim 0.0 +533 21 optimizer.lr 0.027504973364282776 +533 21 training.batch_size 0.0 +533 21 training.label_smoothing 0.0024566659152514586 +533 22 model.embedding_dim 0.0 +533 22 optimizer.lr 0.041481313439409244 +533 22 training.batch_size 2.0 +533 22 training.label_smoothing 0.0253686316383632 +533 23 model.embedding_dim 0.0 +533 23 optimizer.lr 0.02944524265170036 +533 23 training.batch_size 1.0 +533 23 training.label_smoothing 0.027389090409087342 +533 24 model.embedding_dim 2.0 +533 24 optimizer.lr 0.030142641896111717 +533 24 training.batch_size 0.0 +533 24 training.label_smoothing 0.001329865568541052 +533 25 model.embedding_dim 1.0 +533 25 optimizer.lr 0.05992322114590503 +533 25 training.batch_size 1.0 +533 25 training.label_smoothing 0.28805250462250415 +533 26 model.embedding_dim 0.0 +533 26 optimizer.lr 0.013698666088488456 +533 26 training.batch_size 0.0 +533 26 training.label_smoothing 0.08136818425895548 +533 27 model.embedding_dim 1.0 +533 27 optimizer.lr 0.02213190902686881 +533 27 training.batch_size 2.0 +533 27 training.label_smoothing 0.017051278056376427 +533 28 model.embedding_dim 1.0 +533 28 optimizer.lr 0.08849548582063382 +533 28 training.batch_size 2.0 +533 28 training.label_smoothing 0.46093760897163044 +533 29 model.embedding_dim 2.0 +533 29 optimizer.lr 0.04743140842220419 +533 29 training.batch_size 2.0 +533 29 training.label_smoothing 0.001249395670246538 +533 30 model.embedding_dim 2.0 +533 30 optimizer.lr 0.018758268942581004 +533 30 training.batch_size 2.0 +533 30 training.label_smoothing 0.28235910445568535 +533 31 model.embedding_dim 0.0 +533 31 optimizer.lr 0.0038092230120184262 +533 31 training.batch_size 1.0 +533 31 training.label_smoothing 0.010702888551868128 +533 32 model.embedding_dim 1.0 +533 32 optimizer.lr 0.002716823954903817 +533 32 training.batch_size 0.0 +533 32 training.label_smoothing 0.0053790020901546415 +533 33 model.embedding_dim 2.0 +533 33 optimizer.lr 0.08653064366796044 +533 33 training.batch_size 2.0 +533 33 training.label_smoothing 0.012545564462576634 +533 34 model.embedding_dim 1.0 +533 34 optimizer.lr 0.03984461687999232 +533 34 training.batch_size 2.0 +533 34 training.label_smoothing 0.045216968310138224 +533 35 model.embedding_dim 1.0 +533 35 optimizer.lr 0.045797246541451536 +533 35 training.batch_size 1.0 +533 35 training.label_smoothing 0.6704209643151747 +533 36 model.embedding_dim 0.0 +533 36 optimizer.lr 0.0019385668726958156 +533 36 training.batch_size 2.0 +533 36 training.label_smoothing 0.12933364089748722 +533 37 model.embedding_dim 0.0 +533 37 optimizer.lr 0.0027578722493223026 +533 37 training.batch_size 0.0 +533 37 training.label_smoothing 0.003062707472727504 +533 38 model.embedding_dim 1.0 +533 38 optimizer.lr 0.007100598805002851 +533 38 training.batch_size 2.0 +533 38 training.label_smoothing 0.016724312441633256 +533 39 model.embedding_dim 1.0 +533 39 optimizer.lr 0.0407648813007558 +533 39 training.batch_size 2.0 +533 39 training.label_smoothing 0.001016210999285316 +533 40 model.embedding_dim 0.0 +533 40 optimizer.lr 0.01095261080820989 +533 40 training.batch_size 2.0 +533 40 training.label_smoothing 0.09752108358537223 +533 41 model.embedding_dim 1.0 +533 41 optimizer.lr 0.0010209678197291832 +533 41 training.batch_size 2.0 +533 41 training.label_smoothing 0.002099826666766335 +533 42 model.embedding_dim 1.0 +533 42 optimizer.lr 0.0012267550732693701 +533 42 training.batch_size 1.0 +533 42 training.label_smoothing 0.0015426545113430895 +533 43 model.embedding_dim 2.0 +533 43 optimizer.lr 0.04614663198083673 +533 43 training.batch_size 2.0 +533 43 training.label_smoothing 0.0011316259980889145 +533 44 model.embedding_dim 2.0 +533 44 optimizer.lr 0.004724619055977173 +533 44 training.batch_size 0.0 +533 44 training.label_smoothing 0.9309225691128725 +533 45 model.embedding_dim 1.0 +533 45 optimizer.lr 0.045121554041406595 +533 45 training.batch_size 1.0 +533 45 training.label_smoothing 0.03276103793358722 +533 46 model.embedding_dim 2.0 +533 46 optimizer.lr 0.0025239900426724565 +533 46 training.batch_size 0.0 +533 46 training.label_smoothing 0.005295257549706763 +533 47 model.embedding_dim 2.0 +533 47 optimizer.lr 0.05520687161306957 +533 47 training.batch_size 1.0 +533 47 training.label_smoothing 0.0011862237583217681 +533 48 model.embedding_dim 2.0 +533 48 optimizer.lr 0.024566507450040652 +533 48 training.batch_size 1.0 +533 48 training.label_smoothing 0.030343543736958467 +533 49 model.embedding_dim 1.0 +533 49 optimizer.lr 0.009045217994108507 +533 49 training.batch_size 1.0 +533 49 training.label_smoothing 0.0825690609396495 +533 50 model.embedding_dim 1.0 +533 50 optimizer.lr 0.001249572329605871 +533 50 training.batch_size 1.0 +533 50 training.label_smoothing 0.7946524900311772 +533 51 model.embedding_dim 1.0 +533 51 optimizer.lr 0.0038311649060558334 +533 51 training.batch_size 1.0 +533 51 training.label_smoothing 0.08846214378575423 +533 52 model.embedding_dim 1.0 +533 52 optimizer.lr 0.019506716001339516 +533 52 training.batch_size 2.0 +533 52 training.label_smoothing 0.04290037623860974 +533 53 model.embedding_dim 1.0 +533 53 optimizer.lr 0.009314291126019081 +533 53 training.batch_size 2.0 +533 53 training.label_smoothing 0.0538139211522597 +533 54 model.embedding_dim 2.0 +533 54 optimizer.lr 0.011921764932969854 +533 54 training.batch_size 2.0 +533 54 training.label_smoothing 0.0023715825763738077 +533 55 model.embedding_dim 2.0 +533 55 optimizer.lr 0.00901368660403851 +533 55 training.batch_size 0.0 +533 55 training.label_smoothing 0.0010025974925188827 +533 56 model.embedding_dim 0.0 +533 56 optimizer.lr 0.0013175023061184551 +533 56 training.batch_size 2.0 +533 56 training.label_smoothing 0.0014916552792064068 +533 57 model.embedding_dim 2.0 +533 57 optimizer.lr 0.024706103157622755 +533 57 training.batch_size 2.0 +533 57 training.label_smoothing 0.7754139183537376 +533 58 model.embedding_dim 2.0 +533 58 optimizer.lr 0.026032182540169344 +533 58 training.batch_size 1.0 +533 58 training.label_smoothing 0.0045816566056553956 +533 59 model.embedding_dim 1.0 +533 59 optimizer.lr 0.0114793467698584 +533 59 training.batch_size 1.0 +533 59 training.label_smoothing 0.002859963181071527 +533 60 model.embedding_dim 1.0 +533 60 optimizer.lr 0.05772536333367072 +533 60 training.batch_size 0.0 +533 60 training.label_smoothing 0.03763683363182437 +533 61 model.embedding_dim 2.0 +533 61 optimizer.lr 0.023964781231707536 +533 61 training.batch_size 2.0 +533 61 training.label_smoothing 0.0070624076502103115 +533 62 model.embedding_dim 2.0 +533 62 optimizer.lr 0.023449218715792083 +533 62 training.batch_size 1.0 +533 62 training.label_smoothing 0.03949574157947315 +533 63 model.embedding_dim 0.0 +533 63 optimizer.lr 0.001864035906328303 +533 63 training.batch_size 2.0 +533 63 training.label_smoothing 0.0023898506066611517 +533 64 model.embedding_dim 0.0 +533 64 optimizer.lr 0.029593635610002212 +533 64 training.batch_size 2.0 +533 64 training.label_smoothing 0.09245813631273711 +533 65 model.embedding_dim 2.0 +533 65 optimizer.lr 0.004058182018008193 +533 65 training.batch_size 1.0 +533 65 training.label_smoothing 0.010021476320188533 +533 66 model.embedding_dim 1.0 +533 66 optimizer.lr 0.02265034273194106 +533 66 training.batch_size 2.0 +533 66 training.label_smoothing 0.0701002457989744 +533 67 model.embedding_dim 1.0 +533 67 optimizer.lr 0.04115596764002959 +533 67 training.batch_size 0.0 +533 67 training.label_smoothing 0.3805686953783208 +533 68 model.embedding_dim 1.0 +533 68 optimizer.lr 0.008352481799253903 +533 68 training.batch_size 2.0 +533 68 training.label_smoothing 0.019721540305548537 +533 69 model.embedding_dim 1.0 +533 69 optimizer.lr 0.0030114405135921785 +533 69 training.batch_size 2.0 +533 69 training.label_smoothing 0.9954434214698283 +533 70 model.embedding_dim 1.0 +533 70 optimizer.lr 0.002696793464426872 +533 70 training.batch_size 2.0 +533 70 training.label_smoothing 0.003374118147414521 +533 71 model.embedding_dim 1.0 +533 71 optimizer.lr 0.024523743702535745 +533 71 training.batch_size 0.0 +533 71 training.label_smoothing 0.08949207990160211 +533 72 model.embedding_dim 0.0 +533 72 optimizer.lr 0.0029242483949136837 +533 72 training.batch_size 1.0 +533 72 training.label_smoothing 0.9912771293899613 +533 73 model.embedding_dim 0.0 +533 73 optimizer.lr 0.0013740799371481805 +533 73 training.batch_size 2.0 +533 73 training.label_smoothing 0.16022111596972305 +533 74 model.embedding_dim 1.0 +533 74 optimizer.lr 0.001289888952227435 +533 74 training.batch_size 0.0 +533 74 training.label_smoothing 0.04014266304699073 +533 75 model.embedding_dim 2.0 +533 75 optimizer.lr 0.04883001638733061 +533 75 training.batch_size 1.0 +533 75 training.label_smoothing 0.019789702565306967 +533 76 model.embedding_dim 2.0 +533 76 optimizer.lr 0.0012304418067815997 +533 76 training.batch_size 1.0 +533 76 training.label_smoothing 0.04372290472316368 +533 77 model.embedding_dim 0.0 +533 77 optimizer.lr 0.006452459036364105 +533 77 training.batch_size 0.0 +533 77 training.label_smoothing 0.19244594857137537 +533 78 model.embedding_dim 1.0 +533 78 optimizer.lr 0.05580569980755936 +533 78 training.batch_size 0.0 +533 78 training.label_smoothing 0.0013697388510974977 +533 79 model.embedding_dim 0.0 +533 79 optimizer.lr 0.07682979185814096 +533 79 training.batch_size 2.0 +533 79 training.label_smoothing 0.010812695600390927 +533 80 model.embedding_dim 2.0 +533 80 optimizer.lr 0.027218931305389153 +533 80 training.batch_size 2.0 +533 80 training.label_smoothing 0.2566730591871493 +533 81 model.embedding_dim 1.0 +533 81 optimizer.lr 0.005987008405495947 +533 81 training.batch_size 2.0 +533 81 training.label_smoothing 0.5203124302922213 +533 82 model.embedding_dim 1.0 +533 82 optimizer.lr 0.004024961032943815 +533 82 training.batch_size 1.0 +533 82 training.label_smoothing 0.157761865200516 +533 83 model.embedding_dim 2.0 +533 83 optimizer.lr 0.04438962544489151 +533 83 training.batch_size 2.0 +533 83 training.label_smoothing 0.013317790941361166 +533 84 model.embedding_dim 1.0 +533 84 optimizer.lr 0.016171258542404244 +533 84 training.batch_size 1.0 +533 84 training.label_smoothing 0.0011059822077770928 +533 85 model.embedding_dim 0.0 +533 85 optimizer.lr 0.061831236349161066 +533 85 training.batch_size 1.0 +533 85 training.label_smoothing 0.0014577644339553908 +533 86 model.embedding_dim 0.0 +533 86 optimizer.lr 0.001821630996882687 +533 86 training.batch_size 2.0 +533 86 training.label_smoothing 0.013480681191442601 +533 87 model.embedding_dim 0.0 +533 87 optimizer.lr 0.0041910256668450056 +533 87 training.batch_size 1.0 +533 87 training.label_smoothing 0.22370774421979783 +533 88 model.embedding_dim 1.0 +533 88 optimizer.lr 0.009054572852475986 +533 88 training.batch_size 0.0 +533 88 training.label_smoothing 0.3222638573389117 +533 89 model.embedding_dim 1.0 +533 89 optimizer.lr 0.011115527932015406 +533 89 training.batch_size 0.0 +533 89 training.label_smoothing 0.05208746387599771 +533 90 model.embedding_dim 1.0 +533 90 optimizer.lr 0.043750845204075024 +533 90 training.batch_size 2.0 +533 90 training.label_smoothing 0.06603946633333875 +533 91 model.embedding_dim 1.0 +533 91 optimizer.lr 0.030589787058662752 +533 91 training.batch_size 2.0 +533 91 training.label_smoothing 0.10132143952273512 +533 92 model.embedding_dim 1.0 +533 92 optimizer.lr 0.03185460090178953 +533 92 training.batch_size 0.0 +533 92 training.label_smoothing 0.5175156634798364 +533 93 model.embedding_dim 1.0 +533 93 optimizer.lr 0.001513019488385874 +533 93 training.batch_size 1.0 +533 93 training.label_smoothing 0.05644485041391812 +533 94 model.embedding_dim 0.0 +533 94 optimizer.lr 0.04901670497721334 +533 94 training.batch_size 0.0 +533 94 training.label_smoothing 0.011379146935781473 +533 95 model.embedding_dim 2.0 +533 95 optimizer.lr 0.013947148601891739 +533 95 training.batch_size 2.0 +533 95 training.label_smoothing 0.01035370187239688 +533 96 model.embedding_dim 1.0 +533 96 optimizer.lr 0.0016337846215478812 +533 96 training.batch_size 1.0 +533 96 training.label_smoothing 0.2744503733669082 +533 97 model.embedding_dim 2.0 +533 97 optimizer.lr 0.003949848772846881 +533 97 training.batch_size 1.0 +533 97 training.label_smoothing 0.2406297381214074 +533 98 model.embedding_dim 2.0 +533 98 optimizer.lr 0.004989820293972355 +533 98 training.batch_size 1.0 +533 98 training.label_smoothing 0.8610642087646242 +533 99 model.embedding_dim 0.0 +533 99 optimizer.lr 0.0975948869882425 +533 99 training.batch_size 2.0 +533 99 training.label_smoothing 0.0322825928150018 +533 100 model.embedding_dim 0.0 +533 100 optimizer.lr 0.0743214638067884 +533 100 training.batch_size 2.0 +533 100 training.label_smoothing 0.1488875205163014 +533 1 dataset """kinships""" +533 1 model """rescal""" +533 1 loss """bceaftersigmoid""" +533 1 regularizer """no""" +533 1 optimizer """adam""" +533 1 training_loop """lcwa""" +533 1 evaluator """rankbased""" +533 2 dataset """kinships""" +533 2 model """rescal""" +533 2 loss """bceaftersigmoid""" +533 2 regularizer """no""" +533 2 optimizer """adam""" +533 2 training_loop """lcwa""" +533 2 evaluator """rankbased""" +533 3 dataset """kinships""" +533 3 model """rescal""" +533 3 loss """bceaftersigmoid""" +533 3 regularizer """no""" +533 3 optimizer """adam""" +533 3 training_loop """lcwa""" +533 3 evaluator """rankbased""" +533 4 dataset """kinships""" +533 4 model """rescal""" +533 4 loss """bceaftersigmoid""" +533 4 regularizer """no""" +533 4 optimizer """adam""" +533 4 training_loop """lcwa""" +533 4 evaluator """rankbased""" +533 5 dataset """kinships""" +533 5 model """rescal""" +533 5 loss """bceaftersigmoid""" +533 5 regularizer """no""" +533 5 optimizer """adam""" +533 5 training_loop """lcwa""" +533 5 evaluator """rankbased""" +533 6 dataset """kinships""" +533 6 model """rescal""" +533 6 loss """bceaftersigmoid""" +533 6 regularizer """no""" +533 6 optimizer """adam""" +533 6 training_loop """lcwa""" +533 6 evaluator """rankbased""" +533 7 dataset """kinships""" +533 7 model """rescal""" +533 7 loss """bceaftersigmoid""" +533 7 regularizer """no""" +533 7 optimizer """adam""" +533 7 training_loop """lcwa""" +533 7 evaluator """rankbased""" +533 8 dataset """kinships""" +533 8 model """rescal""" +533 8 loss """bceaftersigmoid""" +533 8 regularizer """no""" +533 8 optimizer """adam""" +533 8 training_loop """lcwa""" +533 8 evaluator """rankbased""" +533 9 dataset """kinships""" +533 9 model """rescal""" +533 9 loss """bceaftersigmoid""" +533 9 regularizer """no""" +533 9 optimizer """adam""" +533 9 training_loop """lcwa""" +533 9 evaluator """rankbased""" +533 10 dataset """kinships""" +533 10 model """rescal""" +533 10 loss """bceaftersigmoid""" +533 10 regularizer """no""" +533 10 optimizer """adam""" +533 10 training_loop """lcwa""" +533 10 evaluator """rankbased""" +533 11 dataset """kinships""" +533 11 model """rescal""" +533 11 loss """bceaftersigmoid""" +533 11 regularizer """no""" +533 11 optimizer """adam""" +533 11 training_loop """lcwa""" +533 11 evaluator """rankbased""" +533 12 dataset """kinships""" +533 12 model """rescal""" +533 12 loss """bceaftersigmoid""" +533 12 regularizer """no""" +533 12 optimizer """adam""" +533 12 training_loop """lcwa""" +533 12 evaluator """rankbased""" +533 13 dataset """kinships""" +533 13 model """rescal""" +533 13 loss """bceaftersigmoid""" +533 13 regularizer """no""" +533 13 optimizer """adam""" +533 13 training_loop """lcwa""" +533 13 evaluator """rankbased""" +533 14 dataset """kinships""" +533 14 model """rescal""" +533 14 loss """bceaftersigmoid""" +533 14 regularizer """no""" +533 14 optimizer """adam""" +533 14 training_loop """lcwa""" +533 14 evaluator """rankbased""" +533 15 dataset """kinships""" +533 15 model """rescal""" +533 15 loss """bceaftersigmoid""" +533 15 regularizer """no""" +533 15 optimizer """adam""" +533 15 training_loop """lcwa""" +533 15 evaluator """rankbased""" +533 16 dataset """kinships""" +533 16 model """rescal""" +533 16 loss """bceaftersigmoid""" +533 16 regularizer """no""" +533 16 optimizer """adam""" +533 16 training_loop """lcwa""" +533 16 evaluator """rankbased""" +533 17 dataset """kinships""" +533 17 model """rescal""" +533 17 loss """bceaftersigmoid""" +533 17 regularizer """no""" +533 17 optimizer """adam""" +533 17 training_loop """lcwa""" +533 17 evaluator """rankbased""" +533 18 dataset """kinships""" +533 18 model """rescal""" +533 18 loss """bceaftersigmoid""" +533 18 regularizer """no""" +533 18 optimizer """adam""" +533 18 training_loop """lcwa""" +533 18 evaluator """rankbased""" +533 19 dataset """kinships""" +533 19 model """rescal""" +533 19 loss """bceaftersigmoid""" +533 19 regularizer """no""" +533 19 optimizer """adam""" +533 19 training_loop """lcwa""" +533 19 evaluator """rankbased""" +533 20 dataset """kinships""" +533 20 model """rescal""" +533 20 loss """bceaftersigmoid""" +533 20 regularizer """no""" +533 20 optimizer """adam""" +533 20 training_loop """lcwa""" +533 20 evaluator """rankbased""" +533 21 dataset """kinships""" +533 21 model """rescal""" +533 21 loss """bceaftersigmoid""" +533 21 regularizer """no""" +533 21 optimizer """adam""" +533 21 training_loop """lcwa""" +533 21 evaluator """rankbased""" +533 22 dataset """kinships""" +533 22 model """rescal""" +533 22 loss """bceaftersigmoid""" +533 22 regularizer """no""" +533 22 optimizer """adam""" +533 22 training_loop """lcwa""" +533 22 evaluator """rankbased""" +533 23 dataset """kinships""" +533 23 model """rescal""" +533 23 loss """bceaftersigmoid""" +533 23 regularizer """no""" +533 23 optimizer """adam""" +533 23 training_loop """lcwa""" +533 23 evaluator """rankbased""" +533 24 dataset """kinships""" +533 24 model """rescal""" +533 24 loss """bceaftersigmoid""" +533 24 regularizer """no""" +533 24 optimizer """adam""" +533 24 training_loop """lcwa""" +533 24 evaluator """rankbased""" +533 25 dataset """kinships""" +533 25 model """rescal""" +533 25 loss """bceaftersigmoid""" +533 25 regularizer """no""" +533 25 optimizer """adam""" +533 25 training_loop """lcwa""" +533 25 evaluator """rankbased""" +533 26 dataset """kinships""" +533 26 model """rescal""" +533 26 loss """bceaftersigmoid""" +533 26 regularizer """no""" +533 26 optimizer """adam""" +533 26 training_loop """lcwa""" +533 26 evaluator """rankbased""" +533 27 dataset """kinships""" +533 27 model """rescal""" +533 27 loss """bceaftersigmoid""" +533 27 regularizer """no""" +533 27 optimizer """adam""" +533 27 training_loop """lcwa""" +533 27 evaluator """rankbased""" +533 28 dataset """kinships""" +533 28 model """rescal""" +533 28 loss """bceaftersigmoid""" +533 28 regularizer """no""" +533 28 optimizer """adam""" +533 28 training_loop """lcwa""" +533 28 evaluator """rankbased""" +533 29 dataset """kinships""" +533 29 model """rescal""" +533 29 loss """bceaftersigmoid""" +533 29 regularizer """no""" +533 29 optimizer """adam""" +533 29 training_loop """lcwa""" +533 29 evaluator """rankbased""" +533 30 dataset """kinships""" +533 30 model """rescal""" +533 30 loss """bceaftersigmoid""" +533 30 regularizer """no""" +533 30 optimizer """adam""" +533 30 training_loop """lcwa""" +533 30 evaluator """rankbased""" +533 31 dataset """kinships""" +533 31 model """rescal""" +533 31 loss """bceaftersigmoid""" +533 31 regularizer """no""" +533 31 optimizer """adam""" +533 31 training_loop """lcwa""" +533 31 evaluator """rankbased""" +533 32 dataset """kinships""" +533 32 model """rescal""" +533 32 loss """bceaftersigmoid""" +533 32 regularizer """no""" +533 32 optimizer """adam""" +533 32 training_loop """lcwa""" +533 32 evaluator """rankbased""" +533 33 dataset """kinships""" +533 33 model """rescal""" +533 33 loss """bceaftersigmoid""" +533 33 regularizer """no""" +533 33 optimizer """adam""" +533 33 training_loop """lcwa""" +533 33 evaluator """rankbased""" +533 34 dataset """kinships""" +533 34 model """rescal""" +533 34 loss """bceaftersigmoid""" +533 34 regularizer """no""" +533 34 optimizer """adam""" +533 34 training_loop """lcwa""" +533 34 evaluator """rankbased""" +533 35 dataset """kinships""" +533 35 model """rescal""" +533 35 loss """bceaftersigmoid""" +533 35 regularizer """no""" +533 35 optimizer """adam""" +533 35 training_loop """lcwa""" +533 35 evaluator """rankbased""" +533 36 dataset """kinships""" +533 36 model """rescal""" +533 36 loss """bceaftersigmoid""" +533 36 regularizer """no""" +533 36 optimizer """adam""" +533 36 training_loop """lcwa""" +533 36 evaluator """rankbased""" +533 37 dataset """kinships""" +533 37 model """rescal""" +533 37 loss """bceaftersigmoid""" +533 37 regularizer """no""" +533 37 optimizer """adam""" +533 37 training_loop """lcwa""" +533 37 evaluator """rankbased""" +533 38 dataset """kinships""" +533 38 model """rescal""" +533 38 loss """bceaftersigmoid""" +533 38 regularizer """no""" +533 38 optimizer """adam""" +533 38 training_loop """lcwa""" +533 38 evaluator """rankbased""" +533 39 dataset """kinships""" +533 39 model """rescal""" +533 39 loss """bceaftersigmoid""" +533 39 regularizer """no""" +533 39 optimizer """adam""" +533 39 training_loop """lcwa""" +533 39 evaluator """rankbased""" +533 40 dataset """kinships""" +533 40 model """rescal""" +533 40 loss """bceaftersigmoid""" +533 40 regularizer """no""" +533 40 optimizer """adam""" +533 40 training_loop """lcwa""" +533 40 evaluator """rankbased""" +533 41 dataset """kinships""" +533 41 model """rescal""" +533 41 loss """bceaftersigmoid""" +533 41 regularizer """no""" +533 41 optimizer """adam""" +533 41 training_loop """lcwa""" +533 41 evaluator """rankbased""" +533 42 dataset """kinships""" +533 42 model """rescal""" +533 42 loss """bceaftersigmoid""" +533 42 regularizer """no""" +533 42 optimizer """adam""" +533 42 training_loop """lcwa""" +533 42 evaluator """rankbased""" +533 43 dataset """kinships""" +533 43 model """rescal""" +533 43 loss """bceaftersigmoid""" +533 43 regularizer """no""" +533 43 optimizer """adam""" +533 43 training_loop """lcwa""" +533 43 evaluator """rankbased""" +533 44 dataset """kinships""" +533 44 model """rescal""" +533 44 loss """bceaftersigmoid""" +533 44 regularizer """no""" +533 44 optimizer """adam""" +533 44 training_loop """lcwa""" +533 44 evaluator """rankbased""" +533 45 dataset """kinships""" +533 45 model """rescal""" +533 45 loss """bceaftersigmoid""" +533 45 regularizer """no""" +533 45 optimizer """adam""" +533 45 training_loop """lcwa""" +533 45 evaluator """rankbased""" +533 46 dataset """kinships""" +533 46 model """rescal""" +533 46 loss """bceaftersigmoid""" +533 46 regularizer """no""" +533 46 optimizer """adam""" +533 46 training_loop """lcwa""" +533 46 evaluator """rankbased""" +533 47 dataset """kinships""" +533 47 model """rescal""" +533 47 loss """bceaftersigmoid""" +533 47 regularizer """no""" +533 47 optimizer """adam""" +533 47 training_loop """lcwa""" +533 47 evaluator """rankbased""" +533 48 dataset """kinships""" +533 48 model """rescal""" +533 48 loss """bceaftersigmoid""" +533 48 regularizer """no""" +533 48 optimizer """adam""" +533 48 training_loop """lcwa""" +533 48 evaluator """rankbased""" +533 49 dataset """kinships""" +533 49 model """rescal""" +533 49 loss """bceaftersigmoid""" +533 49 regularizer """no""" +533 49 optimizer """adam""" +533 49 training_loop """lcwa""" +533 49 evaluator """rankbased""" +533 50 dataset """kinships""" +533 50 model """rescal""" +533 50 loss """bceaftersigmoid""" +533 50 regularizer """no""" +533 50 optimizer """adam""" +533 50 training_loop """lcwa""" +533 50 evaluator """rankbased""" +533 51 dataset """kinships""" +533 51 model """rescal""" +533 51 loss """bceaftersigmoid""" +533 51 regularizer """no""" +533 51 optimizer """adam""" +533 51 training_loop """lcwa""" +533 51 evaluator """rankbased""" +533 52 dataset """kinships""" +533 52 model """rescal""" +533 52 loss """bceaftersigmoid""" +533 52 regularizer """no""" +533 52 optimizer """adam""" +533 52 training_loop """lcwa""" +533 52 evaluator """rankbased""" +533 53 dataset """kinships""" +533 53 model """rescal""" +533 53 loss """bceaftersigmoid""" +533 53 regularizer """no""" +533 53 optimizer """adam""" +533 53 training_loop """lcwa""" +533 53 evaluator """rankbased""" +533 54 dataset """kinships""" +533 54 model """rescal""" +533 54 loss """bceaftersigmoid""" +533 54 regularizer """no""" +533 54 optimizer """adam""" +533 54 training_loop """lcwa""" +533 54 evaluator """rankbased""" +533 55 dataset """kinships""" +533 55 model """rescal""" +533 55 loss """bceaftersigmoid""" +533 55 regularizer """no""" +533 55 optimizer """adam""" +533 55 training_loop """lcwa""" +533 55 evaluator """rankbased""" +533 56 dataset """kinships""" +533 56 model """rescal""" +533 56 loss """bceaftersigmoid""" +533 56 regularizer """no""" +533 56 optimizer """adam""" +533 56 training_loop """lcwa""" +533 56 evaluator """rankbased""" +533 57 dataset """kinships""" +533 57 model """rescal""" +533 57 loss """bceaftersigmoid""" +533 57 regularizer """no""" +533 57 optimizer """adam""" +533 57 training_loop """lcwa""" +533 57 evaluator """rankbased""" +533 58 dataset """kinships""" +533 58 model """rescal""" +533 58 loss """bceaftersigmoid""" +533 58 regularizer """no""" +533 58 optimizer """adam""" +533 58 training_loop """lcwa""" +533 58 evaluator """rankbased""" +533 59 dataset """kinships""" +533 59 model """rescal""" +533 59 loss """bceaftersigmoid""" +533 59 regularizer """no""" +533 59 optimizer """adam""" +533 59 training_loop """lcwa""" +533 59 evaluator """rankbased""" +533 60 dataset """kinships""" +533 60 model """rescal""" +533 60 loss """bceaftersigmoid""" +533 60 regularizer """no""" +533 60 optimizer """adam""" +533 60 training_loop """lcwa""" +533 60 evaluator """rankbased""" +533 61 dataset """kinships""" +533 61 model """rescal""" +533 61 loss """bceaftersigmoid""" +533 61 regularizer """no""" +533 61 optimizer """adam""" +533 61 training_loop """lcwa""" +533 61 evaluator """rankbased""" +533 62 dataset """kinships""" +533 62 model """rescal""" +533 62 loss """bceaftersigmoid""" +533 62 regularizer """no""" +533 62 optimizer """adam""" +533 62 training_loop """lcwa""" +533 62 evaluator """rankbased""" +533 63 dataset """kinships""" +533 63 model """rescal""" +533 63 loss """bceaftersigmoid""" +533 63 regularizer """no""" +533 63 optimizer """adam""" +533 63 training_loop """lcwa""" +533 63 evaluator """rankbased""" +533 64 dataset """kinships""" +533 64 model """rescal""" +533 64 loss """bceaftersigmoid""" +533 64 regularizer """no""" +533 64 optimizer """adam""" +533 64 training_loop """lcwa""" +533 64 evaluator """rankbased""" +533 65 dataset """kinships""" +533 65 model """rescal""" +533 65 loss """bceaftersigmoid""" +533 65 regularizer """no""" +533 65 optimizer """adam""" +533 65 training_loop """lcwa""" +533 65 evaluator """rankbased""" +533 66 dataset """kinships""" +533 66 model """rescal""" +533 66 loss """bceaftersigmoid""" +533 66 regularizer """no""" +533 66 optimizer """adam""" +533 66 training_loop """lcwa""" +533 66 evaluator """rankbased""" +533 67 dataset """kinships""" +533 67 model """rescal""" +533 67 loss """bceaftersigmoid""" +533 67 regularizer """no""" +533 67 optimizer """adam""" +533 67 training_loop """lcwa""" +533 67 evaluator """rankbased""" +533 68 dataset """kinships""" +533 68 model """rescal""" +533 68 loss """bceaftersigmoid""" +533 68 regularizer """no""" +533 68 optimizer """adam""" +533 68 training_loop """lcwa""" +533 68 evaluator """rankbased""" +533 69 dataset """kinships""" +533 69 model """rescal""" +533 69 loss """bceaftersigmoid""" +533 69 regularizer """no""" +533 69 optimizer """adam""" +533 69 training_loop """lcwa""" +533 69 evaluator """rankbased""" +533 70 dataset """kinships""" +533 70 model """rescal""" +533 70 loss """bceaftersigmoid""" +533 70 regularizer """no""" +533 70 optimizer """adam""" +533 70 training_loop """lcwa""" +533 70 evaluator """rankbased""" +533 71 dataset """kinships""" +533 71 model """rescal""" +533 71 loss """bceaftersigmoid""" +533 71 regularizer """no""" +533 71 optimizer """adam""" +533 71 training_loop """lcwa""" +533 71 evaluator """rankbased""" +533 72 dataset """kinships""" +533 72 model """rescal""" +533 72 loss """bceaftersigmoid""" +533 72 regularizer """no""" +533 72 optimizer """adam""" +533 72 training_loop """lcwa""" +533 72 evaluator """rankbased""" +533 73 dataset """kinships""" +533 73 model """rescal""" +533 73 loss """bceaftersigmoid""" +533 73 regularizer """no""" +533 73 optimizer """adam""" +533 73 training_loop """lcwa""" +533 73 evaluator """rankbased""" +533 74 dataset """kinships""" +533 74 model """rescal""" +533 74 loss """bceaftersigmoid""" +533 74 regularizer """no""" +533 74 optimizer """adam""" +533 74 training_loop """lcwa""" +533 74 evaluator """rankbased""" +533 75 dataset """kinships""" +533 75 model """rescal""" +533 75 loss """bceaftersigmoid""" +533 75 regularizer """no""" +533 75 optimizer """adam""" +533 75 training_loop """lcwa""" +533 75 evaluator """rankbased""" +533 76 dataset """kinships""" +533 76 model """rescal""" +533 76 loss """bceaftersigmoid""" +533 76 regularizer """no""" +533 76 optimizer """adam""" +533 76 training_loop """lcwa""" +533 76 evaluator """rankbased""" +533 77 dataset """kinships""" +533 77 model """rescal""" +533 77 loss """bceaftersigmoid""" +533 77 regularizer """no""" +533 77 optimizer """adam""" +533 77 training_loop """lcwa""" +533 77 evaluator """rankbased""" +533 78 dataset """kinships""" +533 78 model """rescal""" +533 78 loss """bceaftersigmoid""" +533 78 regularizer """no""" +533 78 optimizer """adam""" +533 78 training_loop """lcwa""" +533 78 evaluator """rankbased""" +533 79 dataset """kinships""" +533 79 model """rescal""" +533 79 loss """bceaftersigmoid""" +533 79 regularizer """no""" +533 79 optimizer """adam""" +533 79 training_loop """lcwa""" +533 79 evaluator """rankbased""" +533 80 dataset """kinships""" +533 80 model """rescal""" +533 80 loss """bceaftersigmoid""" +533 80 regularizer """no""" +533 80 optimizer """adam""" +533 80 training_loop """lcwa""" +533 80 evaluator """rankbased""" +533 81 dataset """kinships""" +533 81 model """rescal""" +533 81 loss """bceaftersigmoid""" +533 81 regularizer """no""" +533 81 optimizer """adam""" +533 81 training_loop """lcwa""" +533 81 evaluator """rankbased""" +533 82 dataset """kinships""" +533 82 model """rescal""" +533 82 loss """bceaftersigmoid""" +533 82 regularizer """no""" +533 82 optimizer """adam""" +533 82 training_loop """lcwa""" +533 82 evaluator """rankbased""" +533 83 dataset """kinships""" +533 83 model """rescal""" +533 83 loss """bceaftersigmoid""" +533 83 regularizer """no""" +533 83 optimizer """adam""" +533 83 training_loop """lcwa""" +533 83 evaluator """rankbased""" +533 84 dataset """kinships""" +533 84 model """rescal""" +533 84 loss """bceaftersigmoid""" +533 84 regularizer """no""" +533 84 optimizer """adam""" +533 84 training_loop """lcwa""" +533 84 evaluator """rankbased""" +533 85 dataset """kinships""" +533 85 model """rescal""" +533 85 loss """bceaftersigmoid""" +533 85 regularizer """no""" +533 85 optimizer """adam""" +533 85 training_loop """lcwa""" +533 85 evaluator """rankbased""" +533 86 dataset """kinships""" +533 86 model """rescal""" +533 86 loss """bceaftersigmoid""" +533 86 regularizer """no""" +533 86 optimizer """adam""" +533 86 training_loop """lcwa""" +533 86 evaluator """rankbased""" +533 87 dataset """kinships""" +533 87 model """rescal""" +533 87 loss """bceaftersigmoid""" +533 87 regularizer """no""" +533 87 optimizer """adam""" +533 87 training_loop """lcwa""" +533 87 evaluator """rankbased""" +533 88 dataset """kinships""" +533 88 model """rescal""" +533 88 loss """bceaftersigmoid""" +533 88 regularizer """no""" +533 88 optimizer """adam""" +533 88 training_loop """lcwa""" +533 88 evaluator """rankbased""" +533 89 dataset """kinships""" +533 89 model """rescal""" +533 89 loss """bceaftersigmoid""" +533 89 regularizer """no""" +533 89 optimizer """adam""" +533 89 training_loop """lcwa""" +533 89 evaluator """rankbased""" +533 90 dataset """kinships""" +533 90 model """rescal""" +533 90 loss """bceaftersigmoid""" +533 90 regularizer """no""" +533 90 optimizer """adam""" +533 90 training_loop """lcwa""" +533 90 evaluator """rankbased""" +533 91 dataset """kinships""" +533 91 model """rescal""" +533 91 loss """bceaftersigmoid""" +533 91 regularizer """no""" +533 91 optimizer """adam""" +533 91 training_loop """lcwa""" +533 91 evaluator """rankbased""" +533 92 dataset """kinships""" +533 92 model """rescal""" +533 92 loss """bceaftersigmoid""" +533 92 regularizer """no""" +533 92 optimizer """adam""" +533 92 training_loop """lcwa""" +533 92 evaluator """rankbased""" +533 93 dataset """kinships""" +533 93 model """rescal""" +533 93 loss """bceaftersigmoid""" +533 93 regularizer """no""" +533 93 optimizer """adam""" +533 93 training_loop """lcwa""" +533 93 evaluator """rankbased""" +533 94 dataset """kinships""" +533 94 model """rescal""" +533 94 loss """bceaftersigmoid""" +533 94 regularizer """no""" +533 94 optimizer """adam""" +533 94 training_loop """lcwa""" +533 94 evaluator """rankbased""" +533 95 dataset """kinships""" +533 95 model """rescal""" +533 95 loss """bceaftersigmoid""" +533 95 regularizer """no""" +533 95 optimizer """adam""" +533 95 training_loop """lcwa""" +533 95 evaluator """rankbased""" +533 96 dataset """kinships""" +533 96 model """rescal""" +533 96 loss """bceaftersigmoid""" +533 96 regularizer """no""" +533 96 optimizer """adam""" +533 96 training_loop """lcwa""" +533 96 evaluator """rankbased""" +533 97 dataset """kinships""" +533 97 model """rescal""" +533 97 loss """bceaftersigmoid""" +533 97 regularizer """no""" +533 97 optimizer """adam""" +533 97 training_loop """lcwa""" +533 97 evaluator """rankbased""" +533 98 dataset """kinships""" +533 98 model """rescal""" +533 98 loss """bceaftersigmoid""" +533 98 regularizer """no""" +533 98 optimizer """adam""" +533 98 training_loop """lcwa""" +533 98 evaluator """rankbased""" +533 99 dataset """kinships""" +533 99 model """rescal""" +533 99 loss """bceaftersigmoid""" +533 99 regularizer """no""" +533 99 optimizer """adam""" +533 99 training_loop """lcwa""" +533 99 evaluator """rankbased""" +533 100 dataset """kinships""" +533 100 model """rescal""" +533 100 loss """bceaftersigmoid""" +533 100 regularizer """no""" +533 100 optimizer """adam""" +533 100 training_loop """lcwa""" +533 100 evaluator """rankbased""" +534 1 model.embedding_dim 2.0 +534 1 optimizer.lr 0.005607843710969107 +534 1 training.batch_size 1.0 +534 1 training.label_smoothing 0.07594874558991692 +534 2 model.embedding_dim 2.0 +534 2 optimizer.lr 0.05817717170040513 +534 2 training.batch_size 0.0 +534 2 training.label_smoothing 0.27900306881242715 +534 3 model.embedding_dim 0.0 +534 3 optimizer.lr 0.005845493118893011 +534 3 training.batch_size 0.0 +534 3 training.label_smoothing 0.16756919005350854 +534 4 model.embedding_dim 1.0 +534 4 optimizer.lr 0.0010535943275427804 +534 4 training.batch_size 2.0 +534 4 training.label_smoothing 0.6619837360806649 +534 5 model.embedding_dim 0.0 +534 5 optimizer.lr 0.03717461537688782 +534 5 training.batch_size 2.0 +534 5 training.label_smoothing 0.13645132352021108 +534 6 model.embedding_dim 2.0 +534 6 optimizer.lr 0.004288067765995562 +534 6 training.batch_size 2.0 +534 6 training.label_smoothing 0.19202804513731453 +534 7 model.embedding_dim 1.0 +534 7 optimizer.lr 0.05448467860640054 +534 7 training.batch_size 1.0 +534 7 training.label_smoothing 0.0010446584331143592 +534 8 model.embedding_dim 1.0 +534 8 optimizer.lr 0.0011458478175931546 +534 8 training.batch_size 1.0 +534 8 training.label_smoothing 0.8922959172525258 +534 9 model.embedding_dim 2.0 +534 9 optimizer.lr 0.00433605963199991 +534 9 training.batch_size 0.0 +534 9 training.label_smoothing 0.013661976628536025 +534 10 model.embedding_dim 2.0 +534 10 optimizer.lr 0.045319919869312615 +534 10 training.batch_size 0.0 +534 10 training.label_smoothing 0.2912984331473811 +534 11 model.embedding_dim 2.0 +534 11 optimizer.lr 0.004384292685641759 +534 11 training.batch_size 1.0 +534 11 training.label_smoothing 0.001267801128800315 +534 12 model.embedding_dim 0.0 +534 12 optimizer.lr 0.0010450977339982088 +534 12 training.batch_size 2.0 +534 12 training.label_smoothing 0.07883767455981663 +534 13 model.embedding_dim 2.0 +534 13 optimizer.lr 0.09159149739552402 +534 13 training.batch_size 0.0 +534 13 training.label_smoothing 0.03457346228162068 +534 14 model.embedding_dim 1.0 +534 14 optimizer.lr 0.03781574218093062 +534 14 training.batch_size 1.0 +534 14 training.label_smoothing 0.25733469399182946 +534 15 model.embedding_dim 0.0 +534 15 optimizer.lr 0.003701863325871557 +534 15 training.batch_size 2.0 +534 15 training.label_smoothing 0.36617771400641336 +534 16 model.embedding_dim 1.0 +534 16 optimizer.lr 0.002549313258468862 +534 16 training.batch_size 2.0 +534 16 training.label_smoothing 0.86304922293529 +534 17 model.embedding_dim 1.0 +534 17 optimizer.lr 0.00470335885794916 +534 17 training.batch_size 2.0 +534 17 training.label_smoothing 0.03227518078055571 +534 18 model.embedding_dim 2.0 +534 18 optimizer.lr 0.008421022476595402 +534 18 training.batch_size 1.0 +534 18 training.label_smoothing 0.3061263869769232 +534 19 model.embedding_dim 1.0 +534 19 optimizer.lr 0.002227972054417977 +534 19 training.batch_size 1.0 +534 19 training.label_smoothing 0.001286779867429954 +534 20 model.embedding_dim 1.0 +534 20 optimizer.lr 0.08493921432367765 +534 20 training.batch_size 2.0 +534 20 training.label_smoothing 0.030933229571161607 +534 21 model.embedding_dim 0.0 +534 21 optimizer.lr 0.0838818748332764 +534 21 training.batch_size 0.0 +534 21 training.label_smoothing 0.5080567638053343 +534 22 model.embedding_dim 2.0 +534 22 optimizer.lr 0.022143999599682013 +534 22 training.batch_size 0.0 +534 22 training.label_smoothing 0.005403452308421987 +534 23 model.embedding_dim 2.0 +534 23 optimizer.lr 0.009496527618552745 +534 23 training.batch_size 0.0 +534 23 training.label_smoothing 0.019237658278039912 +534 24 model.embedding_dim 1.0 +534 24 optimizer.lr 0.04303582409902111 +534 24 training.batch_size 0.0 +534 24 training.label_smoothing 0.7109830256882546 +534 25 model.embedding_dim 1.0 +534 25 optimizer.lr 0.002980213947515392 +534 25 training.batch_size 0.0 +534 25 training.label_smoothing 0.019977040899791354 +534 26 model.embedding_dim 0.0 +534 26 optimizer.lr 0.0018678000297984714 +534 26 training.batch_size 1.0 +534 26 training.label_smoothing 0.9125461334243814 +534 27 model.embedding_dim 2.0 +534 27 optimizer.lr 0.0014333066461655246 +534 27 training.batch_size 2.0 +534 27 training.label_smoothing 0.005654899834811046 +534 28 model.embedding_dim 1.0 +534 28 optimizer.lr 0.019510201820810154 +534 28 training.batch_size 1.0 +534 28 training.label_smoothing 0.06821239855868318 +534 29 model.embedding_dim 1.0 +534 29 optimizer.lr 0.024359653226745586 +534 29 training.batch_size 0.0 +534 29 training.label_smoothing 0.028560941765910976 +534 30 model.embedding_dim 1.0 +534 30 optimizer.lr 0.005606760499035263 +534 30 training.batch_size 0.0 +534 30 training.label_smoothing 0.013819797379827272 +534 31 model.embedding_dim 0.0 +534 31 optimizer.lr 0.0323038810790975 +534 31 training.batch_size 1.0 +534 31 training.label_smoothing 0.2668700845982406 +534 32 model.embedding_dim 0.0 +534 32 optimizer.lr 0.06159824980259217 +534 32 training.batch_size 2.0 +534 32 training.label_smoothing 0.38643851027557957 +534 33 model.embedding_dim 0.0 +534 33 optimizer.lr 0.004230635010464439 +534 33 training.batch_size 2.0 +534 33 training.label_smoothing 0.43634319224698515 +534 34 model.embedding_dim 1.0 +534 34 optimizer.lr 0.05801968096091151 +534 34 training.batch_size 2.0 +534 34 training.label_smoothing 0.3651408608462482 +534 35 model.embedding_dim 0.0 +534 35 optimizer.lr 0.02308214035115419 +534 35 training.batch_size 1.0 +534 35 training.label_smoothing 0.020588940878975247 +534 36 model.embedding_dim 2.0 +534 36 optimizer.lr 0.009810893051174524 +534 36 training.batch_size 0.0 +534 36 training.label_smoothing 0.14233847635366753 +534 37 model.embedding_dim 1.0 +534 37 optimizer.lr 0.0021580656499985123 +534 37 training.batch_size 0.0 +534 37 training.label_smoothing 0.023253597809446854 +534 38 model.embedding_dim 2.0 +534 38 optimizer.lr 0.0038913279436371853 +534 38 training.batch_size 2.0 +534 38 training.label_smoothing 0.0017480535828561827 +534 39 model.embedding_dim 2.0 +534 39 optimizer.lr 0.026561064123754145 +534 39 training.batch_size 1.0 +534 39 training.label_smoothing 0.007865572793853887 +534 40 model.embedding_dim 2.0 +534 40 optimizer.lr 0.010297566557357458 +534 40 training.batch_size 0.0 +534 40 training.label_smoothing 0.49749542658680995 +534 41 model.embedding_dim 1.0 +534 41 optimizer.lr 0.09749023355346761 +534 41 training.batch_size 1.0 +534 41 training.label_smoothing 0.20813153886037686 +534 42 model.embedding_dim 1.0 +534 42 optimizer.lr 0.07777231817715267 +534 42 training.batch_size 2.0 +534 42 training.label_smoothing 0.006644730861191341 +534 43 model.embedding_dim 1.0 +534 43 optimizer.lr 0.008994553183351087 +534 43 training.batch_size 1.0 +534 43 training.label_smoothing 0.001535238368519801 +534 44 model.embedding_dim 0.0 +534 44 optimizer.lr 0.010495130815693995 +534 44 training.batch_size 0.0 +534 44 training.label_smoothing 0.0012433834947569375 +534 45 model.embedding_dim 1.0 +534 45 optimizer.lr 0.015374308165396517 +534 45 training.batch_size 2.0 +534 45 training.label_smoothing 0.06257255940689972 +534 46 model.embedding_dim 0.0 +534 46 optimizer.lr 0.0014712706860394117 +534 46 training.batch_size 0.0 +534 46 training.label_smoothing 0.00411190768582538 +534 47 model.embedding_dim 1.0 +534 47 optimizer.lr 0.0013061347665630331 +534 47 training.batch_size 2.0 +534 47 training.label_smoothing 0.6624399232908506 +534 48 model.embedding_dim 1.0 +534 48 optimizer.lr 0.006228280249737445 +534 48 training.batch_size 0.0 +534 48 training.label_smoothing 0.0011089987082165459 +534 49 model.embedding_dim 2.0 +534 49 optimizer.lr 0.023502054554682943 +534 49 training.batch_size 2.0 +534 49 training.label_smoothing 0.7060692389458771 +534 50 model.embedding_dim 2.0 +534 50 optimizer.lr 0.002199711465627046 +534 50 training.batch_size 1.0 +534 50 training.label_smoothing 0.4163454191266039 +534 51 model.embedding_dim 1.0 +534 51 optimizer.lr 0.0034415758103905477 +534 51 training.batch_size 1.0 +534 51 training.label_smoothing 0.003913981754935537 +534 52 model.embedding_dim 0.0 +534 52 optimizer.lr 0.0014901161967550197 +534 52 training.batch_size 2.0 +534 52 training.label_smoothing 0.005190701428977206 +534 53 model.embedding_dim 0.0 +534 53 optimizer.lr 0.003528701064502035 +534 53 training.batch_size 0.0 +534 53 training.label_smoothing 0.5434626688313825 +534 54 model.embedding_dim 2.0 +534 54 optimizer.lr 0.05222739588114746 +534 54 training.batch_size 1.0 +534 54 training.label_smoothing 0.004776065625213242 +534 55 model.embedding_dim 1.0 +534 55 optimizer.lr 0.025867114528474337 +534 55 training.batch_size 2.0 +534 55 training.label_smoothing 0.004752074139283467 +534 56 model.embedding_dim 2.0 +534 56 optimizer.lr 0.0037752414690703897 +534 56 training.batch_size 2.0 +534 56 training.label_smoothing 0.03388039682530058 +534 57 model.embedding_dim 0.0 +534 57 optimizer.lr 0.009859995681279092 +534 57 training.batch_size 2.0 +534 57 training.label_smoothing 0.002481886790250221 +534 58 model.embedding_dim 1.0 +534 58 optimizer.lr 0.0020167259501395647 +534 58 training.batch_size 2.0 +534 58 training.label_smoothing 0.01672478258098495 +534 59 model.embedding_dim 1.0 +534 59 optimizer.lr 0.0011317345738628996 +534 59 training.batch_size 0.0 +534 59 training.label_smoothing 0.30502955390751346 +534 60 model.embedding_dim 1.0 +534 60 optimizer.lr 0.0033128328931260084 +534 60 training.batch_size 2.0 +534 60 training.label_smoothing 0.018476042252574716 +534 61 model.embedding_dim 0.0 +534 61 optimizer.lr 0.0020991888487625196 +534 61 training.batch_size 1.0 +534 61 training.label_smoothing 0.019968734661575366 +534 62 model.embedding_dim 0.0 +534 62 optimizer.lr 0.00803123552488386 +534 62 training.batch_size 2.0 +534 62 training.label_smoothing 0.2028865022550146 +534 63 model.embedding_dim 1.0 +534 63 optimizer.lr 0.0023845307660463363 +534 63 training.batch_size 1.0 +534 63 training.label_smoothing 0.0475808597852413 +534 64 model.embedding_dim 1.0 +534 64 optimizer.lr 0.018026149551342123 +534 64 training.batch_size 2.0 +534 64 training.label_smoothing 0.01611932775170877 +534 65 model.embedding_dim 1.0 +534 65 optimizer.lr 0.0645554992343145 +534 65 training.batch_size 0.0 +534 65 training.label_smoothing 0.9184041768013257 +534 66 model.embedding_dim 2.0 +534 66 optimizer.lr 0.03295966085229943 +534 66 training.batch_size 1.0 +534 66 training.label_smoothing 0.015226389100443442 +534 67 model.embedding_dim 2.0 +534 67 optimizer.lr 0.022679860446499753 +534 67 training.batch_size 2.0 +534 67 training.label_smoothing 0.048573180765176846 +534 68 model.embedding_dim 1.0 +534 68 optimizer.lr 0.002801274843730282 +534 68 training.batch_size 1.0 +534 68 training.label_smoothing 0.03461097146911058 +534 69 model.embedding_dim 1.0 +534 69 optimizer.lr 0.03227063766335272 +534 69 training.batch_size 2.0 +534 69 training.label_smoothing 0.06052310479475979 +534 70 model.embedding_dim 0.0 +534 70 optimizer.lr 0.005795863221475528 +534 70 training.batch_size 0.0 +534 70 training.label_smoothing 0.4188307222915531 +534 71 model.embedding_dim 1.0 +534 71 optimizer.lr 0.07197938313468903 +534 71 training.batch_size 0.0 +534 71 training.label_smoothing 0.6732199137770617 +534 72 model.embedding_dim 1.0 +534 72 optimizer.lr 0.0011726931007541607 +534 72 training.batch_size 0.0 +534 72 training.label_smoothing 0.001788366209395508 +534 73 model.embedding_dim 1.0 +534 73 optimizer.lr 0.007359278346321189 +534 73 training.batch_size 0.0 +534 73 training.label_smoothing 0.011907255730201212 +534 74 model.embedding_dim 1.0 +534 74 optimizer.lr 0.005352783468457611 +534 74 training.batch_size 0.0 +534 74 training.label_smoothing 0.011721338063782671 +534 75 model.embedding_dim 1.0 +534 75 optimizer.lr 0.04053283489964909 +534 75 training.batch_size 2.0 +534 75 training.label_smoothing 0.0011129619574033805 +534 76 model.embedding_dim 1.0 +534 76 optimizer.lr 0.0013732582623764847 +534 76 training.batch_size 0.0 +534 76 training.label_smoothing 0.04240532846360773 +534 77 model.embedding_dim 1.0 +534 77 optimizer.lr 0.027594803807443034 +534 77 training.batch_size 1.0 +534 77 training.label_smoothing 0.34297854001098205 +534 78 model.embedding_dim 2.0 +534 78 optimizer.lr 0.0015246845754559367 +534 78 training.batch_size 2.0 +534 78 training.label_smoothing 0.0052477141492520175 +534 79 model.embedding_dim 2.0 +534 79 optimizer.lr 0.028308667069790478 +534 79 training.batch_size 0.0 +534 79 training.label_smoothing 0.007894725989403691 +534 80 model.embedding_dim 1.0 +534 80 optimizer.lr 0.018410120078486303 +534 80 training.batch_size 0.0 +534 80 training.label_smoothing 0.1008972158485089 +534 81 model.embedding_dim 1.0 +534 81 optimizer.lr 0.0015224828728571086 +534 81 training.batch_size 1.0 +534 81 training.label_smoothing 0.3793459216966272 +534 82 model.embedding_dim 2.0 +534 82 optimizer.lr 0.010456050347613868 +534 82 training.batch_size 0.0 +534 82 training.label_smoothing 0.011130434218512179 +534 83 model.embedding_dim 2.0 +534 83 optimizer.lr 0.011374729517818365 +534 83 training.batch_size 2.0 +534 83 training.label_smoothing 0.06695857535487793 +534 84 model.embedding_dim 1.0 +534 84 optimizer.lr 0.0033576619722385133 +534 84 training.batch_size 2.0 +534 84 training.label_smoothing 0.6597140966575459 +534 85 model.embedding_dim 0.0 +534 85 optimizer.lr 0.00797505524499275 +534 85 training.batch_size 1.0 +534 85 training.label_smoothing 0.05490175713715837 +534 86 model.embedding_dim 0.0 +534 86 optimizer.lr 0.0441476273224181 +534 86 training.batch_size 0.0 +534 86 training.label_smoothing 0.17687028351415185 +534 87 model.embedding_dim 2.0 +534 87 optimizer.lr 0.06771902501972006 +534 87 training.batch_size 1.0 +534 87 training.label_smoothing 0.005769065585193262 +534 88 model.embedding_dim 0.0 +534 88 optimizer.lr 0.0012483409736927884 +534 88 training.batch_size 1.0 +534 88 training.label_smoothing 0.04640167620443019 +534 89 model.embedding_dim 2.0 +534 89 optimizer.lr 0.02529394969593218 +534 89 training.batch_size 1.0 +534 89 training.label_smoothing 0.9931694731906302 +534 90 model.embedding_dim 0.0 +534 90 optimizer.lr 0.029850946926844805 +534 90 training.batch_size 0.0 +534 90 training.label_smoothing 0.006226998293890654 +534 91 model.embedding_dim 2.0 +534 91 optimizer.lr 0.06167998576528045 +534 91 training.batch_size 2.0 +534 91 training.label_smoothing 0.03833059951481852 +534 92 model.embedding_dim 0.0 +534 92 optimizer.lr 0.007202195497169567 +534 92 training.batch_size 2.0 +534 92 training.label_smoothing 0.007698296746789611 +534 93 model.embedding_dim 1.0 +534 93 optimizer.lr 0.0037133625455704317 +534 93 training.batch_size 0.0 +534 93 training.label_smoothing 0.0036095192518924737 +534 94 model.embedding_dim 0.0 +534 94 optimizer.lr 0.012151295758920844 +534 94 training.batch_size 0.0 +534 94 training.label_smoothing 0.05690854840858887 +534 95 model.embedding_dim 0.0 +534 95 optimizer.lr 0.04556261527288902 +534 95 training.batch_size 0.0 +534 95 training.label_smoothing 0.022647260016938123 +534 96 model.embedding_dim 0.0 +534 96 optimizer.lr 0.002869499935687855 +534 96 training.batch_size 1.0 +534 96 training.label_smoothing 0.1555254467085179 +534 97 model.embedding_dim 0.0 +534 97 optimizer.lr 0.0020659490866203876 +534 97 training.batch_size 1.0 +534 97 training.label_smoothing 0.04401806322044228 +534 98 model.embedding_dim 0.0 +534 98 optimizer.lr 0.039262864970995404 +534 98 training.batch_size 1.0 +534 98 training.label_smoothing 0.27776952429880286 +534 99 model.embedding_dim 0.0 +534 99 optimizer.lr 0.02148028118661824 +534 99 training.batch_size 0.0 +534 99 training.label_smoothing 0.026497048519066962 +534 100 model.embedding_dim 2.0 +534 100 optimizer.lr 0.07105873975149715 +534 100 training.batch_size 1.0 +534 100 training.label_smoothing 0.0398974028270588 +534 1 dataset """kinships""" +534 1 model """rescal""" +534 1 loss """softplus""" +534 1 regularizer """no""" +534 1 optimizer """adam""" +534 1 training_loop """lcwa""" +534 1 evaluator """rankbased""" +534 2 dataset """kinships""" +534 2 model """rescal""" +534 2 loss """softplus""" +534 2 regularizer """no""" +534 2 optimizer """adam""" +534 2 training_loop """lcwa""" +534 2 evaluator """rankbased""" +534 3 dataset """kinships""" +534 3 model """rescal""" +534 3 loss """softplus""" +534 3 regularizer """no""" +534 3 optimizer """adam""" +534 3 training_loop """lcwa""" +534 3 evaluator """rankbased""" +534 4 dataset """kinships""" +534 4 model """rescal""" +534 4 loss """softplus""" +534 4 regularizer """no""" +534 4 optimizer """adam""" +534 4 training_loop """lcwa""" +534 4 evaluator """rankbased""" +534 5 dataset """kinships""" +534 5 model """rescal""" +534 5 loss """softplus""" +534 5 regularizer """no""" +534 5 optimizer """adam""" +534 5 training_loop """lcwa""" +534 5 evaluator """rankbased""" +534 6 dataset """kinships""" +534 6 model """rescal""" +534 6 loss """softplus""" +534 6 regularizer """no""" +534 6 optimizer """adam""" +534 6 training_loop """lcwa""" +534 6 evaluator """rankbased""" +534 7 dataset """kinships""" +534 7 model """rescal""" +534 7 loss """softplus""" +534 7 regularizer """no""" +534 7 optimizer """adam""" +534 7 training_loop """lcwa""" +534 7 evaluator """rankbased""" +534 8 dataset """kinships""" +534 8 model """rescal""" +534 8 loss """softplus""" +534 8 regularizer """no""" +534 8 optimizer """adam""" +534 8 training_loop """lcwa""" +534 8 evaluator """rankbased""" +534 9 dataset """kinships""" +534 9 model """rescal""" +534 9 loss """softplus""" +534 9 regularizer """no""" +534 9 optimizer """adam""" +534 9 training_loop """lcwa""" +534 9 evaluator """rankbased""" +534 10 dataset """kinships""" +534 10 model """rescal""" +534 10 loss """softplus""" +534 10 regularizer """no""" +534 10 optimizer """adam""" +534 10 training_loop """lcwa""" +534 10 evaluator """rankbased""" +534 11 dataset """kinships""" +534 11 model """rescal""" +534 11 loss """softplus""" +534 11 regularizer """no""" +534 11 optimizer """adam""" +534 11 training_loop """lcwa""" +534 11 evaluator """rankbased""" +534 12 dataset """kinships""" +534 12 model """rescal""" +534 12 loss """softplus""" +534 12 regularizer """no""" +534 12 optimizer """adam""" +534 12 training_loop """lcwa""" +534 12 evaluator """rankbased""" +534 13 dataset """kinships""" +534 13 model """rescal""" +534 13 loss """softplus""" +534 13 regularizer """no""" +534 13 optimizer """adam""" +534 13 training_loop """lcwa""" +534 13 evaluator """rankbased""" +534 14 dataset """kinships""" +534 14 model """rescal""" +534 14 loss """softplus""" +534 14 regularizer """no""" +534 14 optimizer """adam""" +534 14 training_loop """lcwa""" +534 14 evaluator """rankbased""" +534 15 dataset """kinships""" +534 15 model """rescal""" +534 15 loss """softplus""" +534 15 regularizer """no""" +534 15 optimizer """adam""" +534 15 training_loop """lcwa""" +534 15 evaluator """rankbased""" +534 16 dataset """kinships""" +534 16 model """rescal""" +534 16 loss """softplus""" +534 16 regularizer """no""" +534 16 optimizer """adam""" +534 16 training_loop """lcwa""" +534 16 evaluator """rankbased""" +534 17 dataset """kinships""" +534 17 model """rescal""" +534 17 loss """softplus""" +534 17 regularizer """no""" +534 17 optimizer """adam""" +534 17 training_loop """lcwa""" +534 17 evaluator """rankbased""" +534 18 dataset """kinships""" +534 18 model """rescal""" +534 18 loss """softplus""" +534 18 regularizer """no""" +534 18 optimizer """adam""" +534 18 training_loop """lcwa""" +534 18 evaluator """rankbased""" +534 19 dataset """kinships""" +534 19 model """rescal""" +534 19 loss """softplus""" +534 19 regularizer """no""" +534 19 optimizer """adam""" +534 19 training_loop """lcwa""" +534 19 evaluator """rankbased""" +534 20 dataset """kinships""" +534 20 model """rescal""" +534 20 loss """softplus""" +534 20 regularizer """no""" +534 20 optimizer """adam""" +534 20 training_loop """lcwa""" +534 20 evaluator """rankbased""" +534 21 dataset """kinships""" +534 21 model """rescal""" +534 21 loss """softplus""" +534 21 regularizer """no""" +534 21 optimizer """adam""" +534 21 training_loop """lcwa""" +534 21 evaluator """rankbased""" +534 22 dataset """kinships""" +534 22 model """rescal""" +534 22 loss """softplus""" +534 22 regularizer """no""" +534 22 optimizer """adam""" +534 22 training_loop """lcwa""" +534 22 evaluator """rankbased""" +534 23 dataset """kinships""" +534 23 model """rescal""" +534 23 loss """softplus""" +534 23 regularizer """no""" +534 23 optimizer """adam""" +534 23 training_loop """lcwa""" +534 23 evaluator """rankbased""" +534 24 dataset """kinships""" +534 24 model """rescal""" +534 24 loss """softplus""" +534 24 regularizer """no""" +534 24 optimizer """adam""" +534 24 training_loop """lcwa""" +534 24 evaluator """rankbased""" +534 25 dataset """kinships""" +534 25 model """rescal""" +534 25 loss """softplus""" +534 25 regularizer """no""" +534 25 optimizer """adam""" +534 25 training_loop """lcwa""" +534 25 evaluator """rankbased""" +534 26 dataset """kinships""" +534 26 model """rescal""" +534 26 loss """softplus""" +534 26 regularizer """no""" +534 26 optimizer """adam""" +534 26 training_loop """lcwa""" +534 26 evaluator """rankbased""" +534 27 dataset """kinships""" +534 27 model """rescal""" +534 27 loss """softplus""" +534 27 regularizer """no""" +534 27 optimizer """adam""" +534 27 training_loop """lcwa""" +534 27 evaluator """rankbased""" +534 28 dataset """kinships""" +534 28 model """rescal""" +534 28 loss """softplus""" +534 28 regularizer """no""" +534 28 optimizer """adam""" +534 28 training_loop """lcwa""" +534 28 evaluator """rankbased""" +534 29 dataset """kinships""" +534 29 model """rescal""" +534 29 loss """softplus""" +534 29 regularizer """no""" +534 29 optimizer """adam""" +534 29 training_loop """lcwa""" +534 29 evaluator """rankbased""" +534 30 dataset """kinships""" +534 30 model """rescal""" +534 30 loss """softplus""" +534 30 regularizer """no""" +534 30 optimizer """adam""" +534 30 training_loop """lcwa""" +534 30 evaluator """rankbased""" +534 31 dataset """kinships""" +534 31 model """rescal""" +534 31 loss """softplus""" +534 31 regularizer """no""" +534 31 optimizer """adam""" +534 31 training_loop """lcwa""" +534 31 evaluator """rankbased""" +534 32 dataset """kinships""" +534 32 model """rescal""" +534 32 loss """softplus""" +534 32 regularizer """no""" +534 32 optimizer """adam""" +534 32 training_loop """lcwa""" +534 32 evaluator """rankbased""" +534 33 dataset """kinships""" +534 33 model """rescal""" +534 33 loss """softplus""" +534 33 regularizer """no""" +534 33 optimizer """adam""" +534 33 training_loop """lcwa""" +534 33 evaluator """rankbased""" +534 34 dataset """kinships""" +534 34 model """rescal""" +534 34 loss """softplus""" +534 34 regularizer """no""" +534 34 optimizer """adam""" +534 34 training_loop """lcwa""" +534 34 evaluator """rankbased""" +534 35 dataset """kinships""" +534 35 model """rescal""" +534 35 loss """softplus""" +534 35 regularizer """no""" +534 35 optimizer """adam""" +534 35 training_loop """lcwa""" +534 35 evaluator """rankbased""" +534 36 dataset """kinships""" +534 36 model """rescal""" +534 36 loss """softplus""" +534 36 regularizer """no""" +534 36 optimizer """adam""" +534 36 training_loop """lcwa""" +534 36 evaluator """rankbased""" +534 37 dataset """kinships""" +534 37 model """rescal""" +534 37 loss """softplus""" +534 37 regularizer """no""" +534 37 optimizer """adam""" +534 37 training_loop """lcwa""" +534 37 evaluator """rankbased""" +534 38 dataset """kinships""" +534 38 model """rescal""" +534 38 loss """softplus""" +534 38 regularizer """no""" +534 38 optimizer """adam""" +534 38 training_loop """lcwa""" +534 38 evaluator """rankbased""" +534 39 dataset """kinships""" +534 39 model """rescal""" +534 39 loss """softplus""" +534 39 regularizer """no""" +534 39 optimizer """adam""" +534 39 training_loop """lcwa""" +534 39 evaluator """rankbased""" +534 40 dataset """kinships""" +534 40 model """rescal""" +534 40 loss """softplus""" +534 40 regularizer """no""" +534 40 optimizer """adam""" +534 40 training_loop """lcwa""" +534 40 evaluator """rankbased""" +534 41 dataset """kinships""" +534 41 model """rescal""" +534 41 loss """softplus""" +534 41 regularizer """no""" +534 41 optimizer """adam""" +534 41 training_loop """lcwa""" +534 41 evaluator """rankbased""" +534 42 dataset """kinships""" +534 42 model """rescal""" +534 42 loss """softplus""" +534 42 regularizer """no""" +534 42 optimizer """adam""" +534 42 training_loop """lcwa""" +534 42 evaluator """rankbased""" +534 43 dataset """kinships""" +534 43 model """rescal""" +534 43 loss """softplus""" +534 43 regularizer """no""" +534 43 optimizer """adam""" +534 43 training_loop """lcwa""" +534 43 evaluator """rankbased""" +534 44 dataset """kinships""" +534 44 model """rescal""" +534 44 loss """softplus""" +534 44 regularizer """no""" +534 44 optimizer """adam""" +534 44 training_loop """lcwa""" +534 44 evaluator """rankbased""" +534 45 dataset """kinships""" +534 45 model """rescal""" +534 45 loss """softplus""" +534 45 regularizer """no""" +534 45 optimizer """adam""" +534 45 training_loop """lcwa""" +534 45 evaluator """rankbased""" +534 46 dataset """kinships""" +534 46 model """rescal""" +534 46 loss """softplus""" +534 46 regularizer """no""" +534 46 optimizer """adam""" +534 46 training_loop """lcwa""" +534 46 evaluator """rankbased""" +534 47 dataset """kinships""" +534 47 model """rescal""" +534 47 loss """softplus""" +534 47 regularizer """no""" +534 47 optimizer """adam""" +534 47 training_loop """lcwa""" +534 47 evaluator """rankbased""" +534 48 dataset """kinships""" +534 48 model """rescal""" +534 48 loss """softplus""" +534 48 regularizer """no""" +534 48 optimizer """adam""" +534 48 training_loop """lcwa""" +534 48 evaluator """rankbased""" +534 49 dataset """kinships""" +534 49 model """rescal""" +534 49 loss """softplus""" +534 49 regularizer """no""" +534 49 optimizer """adam""" +534 49 training_loop """lcwa""" +534 49 evaluator """rankbased""" +534 50 dataset """kinships""" +534 50 model """rescal""" +534 50 loss """softplus""" +534 50 regularizer """no""" +534 50 optimizer """adam""" +534 50 training_loop """lcwa""" +534 50 evaluator """rankbased""" +534 51 dataset """kinships""" +534 51 model """rescal""" +534 51 loss """softplus""" +534 51 regularizer """no""" +534 51 optimizer """adam""" +534 51 training_loop """lcwa""" +534 51 evaluator """rankbased""" +534 52 dataset """kinships""" +534 52 model """rescal""" +534 52 loss """softplus""" +534 52 regularizer """no""" +534 52 optimizer """adam""" +534 52 training_loop """lcwa""" +534 52 evaluator """rankbased""" +534 53 dataset """kinships""" +534 53 model """rescal""" +534 53 loss """softplus""" +534 53 regularizer """no""" +534 53 optimizer """adam""" +534 53 training_loop """lcwa""" +534 53 evaluator """rankbased""" +534 54 dataset """kinships""" +534 54 model """rescal""" +534 54 loss """softplus""" +534 54 regularizer """no""" +534 54 optimizer """adam""" +534 54 training_loop """lcwa""" +534 54 evaluator """rankbased""" +534 55 dataset """kinships""" +534 55 model """rescal""" +534 55 loss """softplus""" +534 55 regularizer """no""" +534 55 optimizer """adam""" +534 55 training_loop """lcwa""" +534 55 evaluator """rankbased""" +534 56 dataset """kinships""" +534 56 model """rescal""" +534 56 loss """softplus""" +534 56 regularizer """no""" +534 56 optimizer """adam""" +534 56 training_loop """lcwa""" +534 56 evaluator """rankbased""" +534 57 dataset """kinships""" +534 57 model """rescal""" +534 57 loss """softplus""" +534 57 regularizer """no""" +534 57 optimizer """adam""" +534 57 training_loop """lcwa""" +534 57 evaluator """rankbased""" +534 58 dataset """kinships""" +534 58 model """rescal""" +534 58 loss """softplus""" +534 58 regularizer """no""" +534 58 optimizer """adam""" +534 58 training_loop """lcwa""" +534 58 evaluator """rankbased""" +534 59 dataset """kinships""" +534 59 model """rescal""" +534 59 loss """softplus""" +534 59 regularizer """no""" +534 59 optimizer """adam""" +534 59 training_loop """lcwa""" +534 59 evaluator """rankbased""" +534 60 dataset """kinships""" +534 60 model """rescal""" +534 60 loss """softplus""" +534 60 regularizer """no""" +534 60 optimizer """adam""" +534 60 training_loop """lcwa""" +534 60 evaluator """rankbased""" +534 61 dataset """kinships""" +534 61 model """rescal""" +534 61 loss """softplus""" +534 61 regularizer """no""" +534 61 optimizer """adam""" +534 61 training_loop """lcwa""" +534 61 evaluator """rankbased""" +534 62 dataset """kinships""" +534 62 model """rescal""" +534 62 loss """softplus""" +534 62 regularizer """no""" +534 62 optimizer """adam""" +534 62 training_loop """lcwa""" +534 62 evaluator """rankbased""" +534 63 dataset """kinships""" +534 63 model """rescal""" +534 63 loss """softplus""" +534 63 regularizer """no""" +534 63 optimizer """adam""" +534 63 training_loop """lcwa""" +534 63 evaluator """rankbased""" +534 64 dataset """kinships""" +534 64 model """rescal""" +534 64 loss """softplus""" +534 64 regularizer """no""" +534 64 optimizer """adam""" +534 64 training_loop """lcwa""" +534 64 evaluator """rankbased""" +534 65 dataset """kinships""" +534 65 model """rescal""" +534 65 loss """softplus""" +534 65 regularizer """no""" +534 65 optimizer """adam""" +534 65 training_loop """lcwa""" +534 65 evaluator """rankbased""" +534 66 dataset """kinships""" +534 66 model """rescal""" +534 66 loss """softplus""" +534 66 regularizer """no""" +534 66 optimizer """adam""" +534 66 training_loop """lcwa""" +534 66 evaluator """rankbased""" +534 67 dataset """kinships""" +534 67 model """rescal""" +534 67 loss """softplus""" +534 67 regularizer """no""" +534 67 optimizer """adam""" +534 67 training_loop """lcwa""" +534 67 evaluator """rankbased""" +534 68 dataset """kinships""" +534 68 model """rescal""" +534 68 loss """softplus""" +534 68 regularizer """no""" +534 68 optimizer """adam""" +534 68 training_loop """lcwa""" +534 68 evaluator """rankbased""" +534 69 dataset """kinships""" +534 69 model """rescal""" +534 69 loss """softplus""" +534 69 regularizer """no""" +534 69 optimizer """adam""" +534 69 training_loop """lcwa""" +534 69 evaluator """rankbased""" +534 70 dataset """kinships""" +534 70 model """rescal""" +534 70 loss """softplus""" +534 70 regularizer """no""" +534 70 optimizer """adam""" +534 70 training_loop """lcwa""" +534 70 evaluator """rankbased""" +534 71 dataset """kinships""" +534 71 model """rescal""" +534 71 loss """softplus""" +534 71 regularizer """no""" +534 71 optimizer """adam""" +534 71 training_loop """lcwa""" +534 71 evaluator """rankbased""" +534 72 dataset """kinships""" +534 72 model """rescal""" +534 72 loss """softplus""" +534 72 regularizer """no""" +534 72 optimizer """adam""" +534 72 training_loop """lcwa""" +534 72 evaluator """rankbased""" +534 73 dataset """kinships""" +534 73 model """rescal""" +534 73 loss """softplus""" +534 73 regularizer """no""" +534 73 optimizer """adam""" +534 73 training_loop """lcwa""" +534 73 evaluator """rankbased""" +534 74 dataset """kinships""" +534 74 model """rescal""" +534 74 loss """softplus""" +534 74 regularizer """no""" +534 74 optimizer """adam""" +534 74 training_loop """lcwa""" +534 74 evaluator """rankbased""" +534 75 dataset """kinships""" +534 75 model """rescal""" +534 75 loss """softplus""" +534 75 regularizer """no""" +534 75 optimizer """adam""" +534 75 training_loop """lcwa""" +534 75 evaluator """rankbased""" +534 76 dataset """kinships""" +534 76 model """rescal""" +534 76 loss """softplus""" +534 76 regularizer """no""" +534 76 optimizer """adam""" +534 76 training_loop """lcwa""" +534 76 evaluator """rankbased""" +534 77 dataset """kinships""" +534 77 model """rescal""" +534 77 loss """softplus""" +534 77 regularizer """no""" +534 77 optimizer """adam""" +534 77 training_loop """lcwa""" +534 77 evaluator """rankbased""" +534 78 dataset """kinships""" +534 78 model """rescal""" +534 78 loss """softplus""" +534 78 regularizer """no""" +534 78 optimizer """adam""" +534 78 training_loop """lcwa""" +534 78 evaluator """rankbased""" +534 79 dataset """kinships""" +534 79 model """rescal""" +534 79 loss """softplus""" +534 79 regularizer """no""" +534 79 optimizer """adam""" +534 79 training_loop """lcwa""" +534 79 evaluator """rankbased""" +534 80 dataset """kinships""" +534 80 model """rescal""" +534 80 loss """softplus""" +534 80 regularizer """no""" +534 80 optimizer """adam""" +534 80 training_loop """lcwa""" +534 80 evaluator """rankbased""" +534 81 dataset """kinships""" +534 81 model """rescal""" +534 81 loss """softplus""" +534 81 regularizer """no""" +534 81 optimizer """adam""" +534 81 training_loop """lcwa""" +534 81 evaluator """rankbased""" +534 82 dataset """kinships""" +534 82 model """rescal""" +534 82 loss """softplus""" +534 82 regularizer """no""" +534 82 optimizer """adam""" +534 82 training_loop """lcwa""" +534 82 evaluator """rankbased""" +534 83 dataset """kinships""" +534 83 model """rescal""" +534 83 loss """softplus""" +534 83 regularizer """no""" +534 83 optimizer """adam""" +534 83 training_loop """lcwa""" +534 83 evaluator """rankbased""" +534 84 dataset """kinships""" +534 84 model """rescal""" +534 84 loss """softplus""" +534 84 regularizer """no""" +534 84 optimizer """adam""" +534 84 training_loop """lcwa""" +534 84 evaluator """rankbased""" +534 85 dataset """kinships""" +534 85 model """rescal""" +534 85 loss """softplus""" +534 85 regularizer """no""" +534 85 optimizer """adam""" +534 85 training_loop """lcwa""" +534 85 evaluator """rankbased""" +534 86 dataset """kinships""" +534 86 model """rescal""" +534 86 loss """softplus""" +534 86 regularizer """no""" +534 86 optimizer """adam""" +534 86 training_loop """lcwa""" +534 86 evaluator """rankbased""" +534 87 dataset """kinships""" +534 87 model """rescal""" +534 87 loss """softplus""" +534 87 regularizer """no""" +534 87 optimizer """adam""" +534 87 training_loop """lcwa""" +534 87 evaluator """rankbased""" +534 88 dataset """kinships""" +534 88 model """rescal""" +534 88 loss """softplus""" +534 88 regularizer """no""" +534 88 optimizer """adam""" +534 88 training_loop """lcwa""" +534 88 evaluator """rankbased""" +534 89 dataset """kinships""" +534 89 model """rescal""" +534 89 loss """softplus""" +534 89 regularizer """no""" +534 89 optimizer """adam""" +534 89 training_loop """lcwa""" +534 89 evaluator """rankbased""" +534 90 dataset """kinships""" +534 90 model """rescal""" +534 90 loss """softplus""" +534 90 regularizer """no""" +534 90 optimizer """adam""" +534 90 training_loop """lcwa""" +534 90 evaluator """rankbased""" +534 91 dataset """kinships""" +534 91 model """rescal""" +534 91 loss """softplus""" +534 91 regularizer """no""" +534 91 optimizer """adam""" +534 91 training_loop """lcwa""" +534 91 evaluator """rankbased""" +534 92 dataset """kinships""" +534 92 model """rescal""" +534 92 loss """softplus""" +534 92 regularizer """no""" +534 92 optimizer """adam""" +534 92 training_loop """lcwa""" +534 92 evaluator """rankbased""" +534 93 dataset """kinships""" +534 93 model """rescal""" +534 93 loss """softplus""" +534 93 regularizer """no""" +534 93 optimizer """adam""" +534 93 training_loop """lcwa""" +534 93 evaluator """rankbased""" +534 94 dataset """kinships""" +534 94 model """rescal""" +534 94 loss """softplus""" +534 94 regularizer """no""" +534 94 optimizer """adam""" +534 94 training_loop """lcwa""" +534 94 evaluator """rankbased""" +534 95 dataset """kinships""" +534 95 model """rescal""" +534 95 loss """softplus""" +534 95 regularizer """no""" +534 95 optimizer """adam""" +534 95 training_loop """lcwa""" +534 95 evaluator """rankbased""" +534 96 dataset """kinships""" +534 96 model """rescal""" +534 96 loss """softplus""" +534 96 regularizer """no""" +534 96 optimizer """adam""" +534 96 training_loop """lcwa""" +534 96 evaluator """rankbased""" +534 97 dataset """kinships""" +534 97 model """rescal""" +534 97 loss """softplus""" +534 97 regularizer """no""" +534 97 optimizer """adam""" +534 97 training_loop """lcwa""" +534 97 evaluator """rankbased""" +534 98 dataset """kinships""" +534 98 model """rescal""" +534 98 loss """softplus""" +534 98 regularizer """no""" +534 98 optimizer """adam""" +534 98 training_loop """lcwa""" +534 98 evaluator """rankbased""" +534 99 dataset """kinships""" +534 99 model """rescal""" +534 99 loss """softplus""" +534 99 regularizer """no""" +534 99 optimizer """adam""" +534 99 training_loop """lcwa""" +534 99 evaluator """rankbased""" +534 100 dataset """kinships""" +534 100 model """rescal""" +534 100 loss """softplus""" +534 100 regularizer """no""" +534 100 optimizer """adam""" +534 100 training_loop """lcwa""" +534 100 evaluator """rankbased""" +535 1 model.embedding_dim 2.0 +535 1 optimizer.lr 0.002301848097438397 +535 1 training.batch_size 0.0 +535 1 training.label_smoothing 0.13321971064564717 +535 2 model.embedding_dim 0.0 +535 2 optimizer.lr 0.03156905684526708 +535 2 training.batch_size 0.0 +535 2 training.label_smoothing 0.20758371976947226 +535 3 model.embedding_dim 1.0 +535 3 optimizer.lr 0.0012917327285595088 +535 3 training.batch_size 0.0 +535 3 training.label_smoothing 0.8435454970086571 +535 4 model.embedding_dim 0.0 +535 4 optimizer.lr 0.009251713617953041 +535 4 training.batch_size 2.0 +535 4 training.label_smoothing 0.48548886476112113 +535 5 model.embedding_dim 0.0 +535 5 optimizer.lr 0.003609269180946066 +535 5 training.batch_size 1.0 +535 5 training.label_smoothing 0.5430192464786324 +535 6 model.embedding_dim 2.0 +535 6 optimizer.lr 0.012981585829927183 +535 6 training.batch_size 1.0 +535 6 training.label_smoothing 0.05767480352700142 +535 7 model.embedding_dim 1.0 +535 7 optimizer.lr 0.059255357794695554 +535 7 training.batch_size 0.0 +535 7 training.label_smoothing 0.02270924674201308 +535 8 model.embedding_dim 1.0 +535 8 optimizer.lr 0.0014015991311152427 +535 8 training.batch_size 0.0 +535 8 training.label_smoothing 0.02675667372495286 +535 9 model.embedding_dim 2.0 +535 9 optimizer.lr 0.008709361421483418 +535 9 training.batch_size 2.0 +535 9 training.label_smoothing 0.4139670786714383 +535 10 model.embedding_dim 2.0 +535 10 optimizer.lr 0.03009309225407681 +535 10 training.batch_size 2.0 +535 10 training.label_smoothing 0.0070077731906678115 +535 11 model.embedding_dim 2.0 +535 11 optimizer.lr 0.011327137404901993 +535 11 training.batch_size 0.0 +535 11 training.label_smoothing 0.02074491772629613 +535 12 model.embedding_dim 2.0 +535 12 optimizer.lr 0.00517741333887278 +535 12 training.batch_size 1.0 +535 12 training.label_smoothing 0.1050269312492195 +535 13 model.embedding_dim 2.0 +535 13 optimizer.lr 0.011996313915716364 +535 13 training.batch_size 1.0 +535 13 training.label_smoothing 0.8772820766140671 +535 14 model.embedding_dim 0.0 +535 14 optimizer.lr 0.0027585122470919904 +535 14 training.batch_size 1.0 +535 14 training.label_smoothing 0.013684678528754615 +535 15 model.embedding_dim 2.0 +535 15 optimizer.lr 0.001386929111365706 +535 15 training.batch_size 2.0 +535 15 training.label_smoothing 0.18338064130778406 +535 16 model.embedding_dim 2.0 +535 16 optimizer.lr 0.023251979977798872 +535 16 training.batch_size 0.0 +535 16 training.label_smoothing 0.011672649115378278 +535 17 model.embedding_dim 1.0 +535 17 optimizer.lr 0.002707408053581748 +535 17 training.batch_size 2.0 +535 17 training.label_smoothing 0.12861436204330498 +535 18 model.embedding_dim 1.0 +535 18 optimizer.lr 0.004601593593107427 +535 18 training.batch_size 1.0 +535 18 training.label_smoothing 0.09397890946750675 +535 19 model.embedding_dim 0.0 +535 19 optimizer.lr 0.02109302248543011 +535 19 training.batch_size 1.0 +535 19 training.label_smoothing 0.17849229788306634 +535 20 model.embedding_dim 1.0 +535 20 optimizer.lr 0.022895361393471075 +535 20 training.batch_size 0.0 +535 20 training.label_smoothing 0.13177794433135426 +535 21 model.embedding_dim 2.0 +535 21 optimizer.lr 0.024616746941261883 +535 21 training.batch_size 2.0 +535 21 training.label_smoothing 0.9506059009645764 +535 22 model.embedding_dim 1.0 +535 22 optimizer.lr 0.010260262706058978 +535 22 training.batch_size 0.0 +535 22 training.label_smoothing 0.005181132794520717 +535 23 model.embedding_dim 0.0 +535 23 optimizer.lr 0.024383050219182637 +535 23 training.batch_size 2.0 +535 23 training.label_smoothing 0.32866458164171164 +535 24 model.embedding_dim 1.0 +535 24 optimizer.lr 0.0225676851239501 +535 24 training.batch_size 0.0 +535 24 training.label_smoothing 0.007521618667241177 +535 25 model.embedding_dim 2.0 +535 25 optimizer.lr 0.004980091928406036 +535 25 training.batch_size 2.0 +535 25 training.label_smoothing 0.8027119447666831 +535 26 model.embedding_dim 2.0 +535 26 optimizer.lr 0.0014706099174748566 +535 26 training.batch_size 0.0 +535 26 training.label_smoothing 0.006198544429434667 +535 27 model.embedding_dim 2.0 +535 27 optimizer.lr 0.006509184817363878 +535 27 training.batch_size 1.0 +535 27 training.label_smoothing 0.006981136377939617 +535 28 model.embedding_dim 2.0 +535 28 optimizer.lr 0.027853558858510933 +535 28 training.batch_size 0.0 +535 28 training.label_smoothing 0.010144431252525805 +535 29 model.embedding_dim 1.0 +535 29 optimizer.lr 0.003358926168392451 +535 29 training.batch_size 2.0 +535 29 training.label_smoothing 0.2936297935338379 +535 30 model.embedding_dim 1.0 +535 30 optimizer.lr 0.02501197138074878 +535 30 training.batch_size 1.0 +535 30 training.label_smoothing 0.058289369741251255 +535 31 model.embedding_dim 0.0 +535 31 optimizer.lr 0.0027106776567852823 +535 31 training.batch_size 2.0 +535 31 training.label_smoothing 0.1607076037083986 +535 32 model.embedding_dim 0.0 +535 32 optimizer.lr 0.0913312441046735 +535 32 training.batch_size 2.0 +535 32 training.label_smoothing 0.001659888599656707 +535 33 model.embedding_dim 1.0 +535 33 optimizer.lr 0.0022944862220611547 +535 33 training.batch_size 1.0 +535 33 training.label_smoothing 0.013987471235827047 +535 34 model.embedding_dim 1.0 +535 34 optimizer.lr 0.04144016808733492 +535 34 training.batch_size 0.0 +535 34 training.label_smoothing 0.0022945896642379907 +535 35 model.embedding_dim 2.0 +535 35 optimizer.lr 0.008460049996965814 +535 35 training.batch_size 1.0 +535 35 training.label_smoothing 0.5475921083629371 +535 36 model.embedding_dim 0.0 +535 36 optimizer.lr 0.002964995972727849 +535 36 training.batch_size 2.0 +535 36 training.label_smoothing 0.8023461166732692 +535 37 model.embedding_dim 0.0 +535 37 optimizer.lr 0.03351829505284367 +535 37 training.batch_size 1.0 +535 37 training.label_smoothing 0.41791200958225366 +535 38 model.embedding_dim 2.0 +535 38 optimizer.lr 0.09358979209654736 +535 38 training.batch_size 0.0 +535 38 training.label_smoothing 0.0033780173845906836 +535 39 model.embedding_dim 0.0 +535 39 optimizer.lr 0.006207264497721388 +535 39 training.batch_size 0.0 +535 39 training.label_smoothing 0.0015136930635794204 +535 40 model.embedding_dim 0.0 +535 40 optimizer.lr 0.004650654371904966 +535 40 training.batch_size 1.0 +535 40 training.label_smoothing 0.0010519196859434415 +535 41 model.embedding_dim 2.0 +535 41 optimizer.lr 0.08115409812250374 +535 41 training.batch_size 1.0 +535 41 training.label_smoothing 0.17741956825200275 +535 42 model.embedding_dim 2.0 +535 42 optimizer.lr 0.004389174758522446 +535 42 training.batch_size 0.0 +535 42 training.label_smoothing 0.04232684556597678 +535 43 model.embedding_dim 0.0 +535 43 optimizer.lr 0.015235362613963507 +535 43 training.batch_size 0.0 +535 43 training.label_smoothing 0.010128761859037577 +535 44 model.embedding_dim 2.0 +535 44 optimizer.lr 0.006232758341166919 +535 44 training.batch_size 2.0 +535 44 training.label_smoothing 0.0010442376319609063 +535 45 model.embedding_dim 1.0 +535 45 optimizer.lr 0.0015000954854410171 +535 45 training.batch_size 1.0 +535 45 training.label_smoothing 0.013498493678210263 +535 46 model.embedding_dim 0.0 +535 46 optimizer.lr 0.03549210624450894 +535 46 training.batch_size 0.0 +535 46 training.label_smoothing 0.7547311703803647 +535 47 model.embedding_dim 0.0 +535 47 optimizer.lr 0.05269806475306216 +535 47 training.batch_size 2.0 +535 47 training.label_smoothing 0.0028355669219730963 +535 48 model.embedding_dim 2.0 +535 48 optimizer.lr 0.037834799230324984 +535 48 training.batch_size 1.0 +535 48 training.label_smoothing 0.014729045139553473 +535 49 model.embedding_dim 1.0 +535 49 optimizer.lr 0.04811918518458934 +535 49 training.batch_size 0.0 +535 49 training.label_smoothing 0.021766214962888 +535 50 model.embedding_dim 1.0 +535 50 optimizer.lr 0.006132793663088854 +535 50 training.batch_size 2.0 +535 50 training.label_smoothing 0.00835813627429255 +535 51 model.embedding_dim 1.0 +535 51 optimizer.lr 0.028735466669631695 +535 51 training.batch_size 1.0 +535 51 training.label_smoothing 0.0015212323515842504 +535 52 model.embedding_dim 0.0 +535 52 optimizer.lr 0.019986092391816774 +535 52 training.batch_size 0.0 +535 52 training.label_smoothing 0.015446754058378684 +535 53 model.embedding_dim 2.0 +535 53 optimizer.lr 0.03775230492805156 +535 53 training.batch_size 2.0 +535 53 training.label_smoothing 0.0031861713326909475 +535 54 model.embedding_dim 2.0 +535 54 optimizer.lr 0.00568574054788703 +535 54 training.batch_size 1.0 +535 54 training.label_smoothing 0.0013862350621750646 +535 55 model.embedding_dim 2.0 +535 55 optimizer.lr 0.0038961934884231032 +535 55 training.batch_size 1.0 +535 55 training.label_smoothing 0.5400933374683264 +535 56 model.embedding_dim 1.0 +535 56 optimizer.lr 0.001966723802611245 +535 56 training.batch_size 2.0 +535 56 training.label_smoothing 0.004272122421698557 +535 57 model.embedding_dim 2.0 +535 57 optimizer.lr 0.042599150837710445 +535 57 training.batch_size 0.0 +535 57 training.label_smoothing 0.03592830290540395 +535 58 model.embedding_dim 0.0 +535 58 optimizer.lr 0.019562561397097535 +535 58 training.batch_size 0.0 +535 58 training.label_smoothing 0.0018358697572384952 +535 59 model.embedding_dim 1.0 +535 59 optimizer.lr 0.0102796882254479 +535 59 training.batch_size 0.0 +535 59 training.label_smoothing 0.0026306831350741203 +535 60 model.embedding_dim 1.0 +535 60 optimizer.lr 0.034209349620505244 +535 60 training.batch_size 0.0 +535 60 training.label_smoothing 0.10183122006191868 +535 61 model.embedding_dim 0.0 +535 61 optimizer.lr 0.06736956358163335 +535 61 training.batch_size 1.0 +535 61 training.label_smoothing 0.0016137080161365937 +535 62 model.embedding_dim 1.0 +535 62 optimizer.lr 0.0510346869345445 +535 62 training.batch_size 1.0 +535 62 training.label_smoothing 0.36800302408782193 +535 63 model.embedding_dim 2.0 +535 63 optimizer.lr 0.0033092910337420285 +535 63 training.batch_size 0.0 +535 63 training.label_smoothing 0.00432883222406012 +535 64 model.embedding_dim 2.0 +535 64 optimizer.lr 0.002393084355920078 +535 64 training.batch_size 1.0 +535 64 training.label_smoothing 0.04949295177031511 +535 65 model.embedding_dim 0.0 +535 65 optimizer.lr 0.005640016785279008 +535 65 training.batch_size 0.0 +535 65 training.label_smoothing 0.024757522152892203 +535 66 model.embedding_dim 1.0 +535 66 optimizer.lr 0.03509785090948805 +535 66 training.batch_size 2.0 +535 66 training.label_smoothing 0.06499795035386419 +535 67 model.embedding_dim 1.0 +535 67 optimizer.lr 0.011806067338948838 +535 67 training.batch_size 1.0 +535 67 training.label_smoothing 0.009888001692435885 +535 68 model.embedding_dim 2.0 +535 68 optimizer.lr 0.007027894752527894 +535 68 training.batch_size 0.0 +535 68 training.label_smoothing 0.03710168420661398 +535 69 model.embedding_dim 2.0 +535 69 optimizer.lr 0.032397399940454874 +535 69 training.batch_size 2.0 +535 69 training.label_smoothing 0.0011267300662040408 +535 70 model.embedding_dim 1.0 +535 70 optimizer.lr 0.001198798352508145 +535 70 training.batch_size 2.0 +535 70 training.label_smoothing 0.06102525090665276 +535 71 model.embedding_dim 2.0 +535 71 optimizer.lr 0.0021405415460923134 +535 71 training.batch_size 1.0 +535 71 training.label_smoothing 0.010205543021801837 +535 72 model.embedding_dim 0.0 +535 72 optimizer.lr 0.0052407604755306545 +535 72 training.batch_size 2.0 +535 72 training.label_smoothing 0.5422844719154543 +535 73 model.embedding_dim 2.0 +535 73 optimizer.lr 0.022586169942452446 +535 73 training.batch_size 0.0 +535 73 training.label_smoothing 0.24987471581316475 +535 74 model.embedding_dim 0.0 +535 74 optimizer.lr 0.0779057175855974 +535 74 training.batch_size 2.0 +535 74 training.label_smoothing 0.13590604756310037 +535 75 model.embedding_dim 2.0 +535 75 optimizer.lr 0.0510397700675203 +535 75 training.batch_size 2.0 +535 75 training.label_smoothing 0.0010602229298076458 +535 76 model.embedding_dim 0.0 +535 76 optimizer.lr 0.0024715208985818865 +535 76 training.batch_size 2.0 +535 76 training.label_smoothing 0.005477853439221142 +535 77 model.embedding_dim 2.0 +535 77 optimizer.lr 0.07252782490176889 +535 77 training.batch_size 1.0 +535 77 training.label_smoothing 0.0036414800741188065 +535 78 model.embedding_dim 0.0 +535 78 optimizer.lr 0.004744229245770807 +535 78 training.batch_size 1.0 +535 78 training.label_smoothing 0.020699262223913762 +535 79 model.embedding_dim 0.0 +535 79 optimizer.lr 0.0419486313255229 +535 79 training.batch_size 0.0 +535 79 training.label_smoothing 0.016674031189447765 +535 80 model.embedding_dim 1.0 +535 80 optimizer.lr 0.026892667497508734 +535 80 training.batch_size 2.0 +535 80 training.label_smoothing 0.726118096971959 +535 81 model.embedding_dim 2.0 +535 81 optimizer.lr 0.0012632944005633738 +535 81 training.batch_size 1.0 +535 81 training.label_smoothing 0.04240938648566403 +535 82 model.embedding_dim 1.0 +535 82 optimizer.lr 0.09111117473887397 +535 82 training.batch_size 1.0 +535 82 training.label_smoothing 0.04758718818532137 +535 83 model.embedding_dim 1.0 +535 83 optimizer.lr 0.030470895710870114 +535 83 training.batch_size 1.0 +535 83 training.label_smoothing 0.34682317984744354 +535 84 model.embedding_dim 1.0 +535 84 optimizer.lr 0.007604450510683012 +535 84 training.batch_size 1.0 +535 84 training.label_smoothing 0.00434600700016525 +535 85 model.embedding_dim 2.0 +535 85 optimizer.lr 0.004121308048306908 +535 85 training.batch_size 0.0 +535 85 training.label_smoothing 0.07437530219311089 +535 86 model.embedding_dim 2.0 +535 86 optimizer.lr 0.005283016975115665 +535 86 training.batch_size 1.0 +535 86 training.label_smoothing 0.014208879180924242 +535 87 model.embedding_dim 2.0 +535 87 optimizer.lr 0.09582975765191373 +535 87 training.batch_size 0.0 +535 87 training.label_smoothing 0.0023130745083217233 +535 88 model.embedding_dim 1.0 +535 88 optimizer.lr 0.008035027630437676 +535 88 training.batch_size 1.0 +535 88 training.label_smoothing 0.009291692219884531 +535 89 model.embedding_dim 0.0 +535 89 optimizer.lr 0.002582955471737508 +535 89 training.batch_size 2.0 +535 89 training.label_smoothing 0.011582201857759251 +535 90 model.embedding_dim 2.0 +535 90 optimizer.lr 0.0018375074505157713 +535 90 training.batch_size 1.0 +535 90 training.label_smoothing 0.31377656189640085 +535 91 model.embedding_dim 2.0 +535 91 optimizer.lr 0.029548394475712295 +535 91 training.batch_size 1.0 +535 91 training.label_smoothing 0.045017610281765257 +535 92 model.embedding_dim 2.0 +535 92 optimizer.lr 0.0016456938881235356 +535 92 training.batch_size 2.0 +535 92 training.label_smoothing 0.41835845816881084 +535 93 model.embedding_dim 1.0 +535 93 optimizer.lr 0.0017981043273083332 +535 93 training.batch_size 2.0 +535 93 training.label_smoothing 0.013530378675900494 +535 94 model.embedding_dim 0.0 +535 94 optimizer.lr 0.007773776991902738 +535 94 training.batch_size 0.0 +535 94 training.label_smoothing 0.002147937153645249 +535 95 model.embedding_dim 1.0 +535 95 optimizer.lr 0.029454052610500506 +535 95 training.batch_size 2.0 +535 95 training.label_smoothing 0.3157463225240225 +535 96 model.embedding_dim 0.0 +535 96 optimizer.lr 0.02038658453098625 +535 96 training.batch_size 0.0 +535 96 training.label_smoothing 0.0018046455837853576 +535 97 model.embedding_dim 1.0 +535 97 optimizer.lr 0.0010941275104788257 +535 97 training.batch_size 0.0 +535 97 training.label_smoothing 0.0023581999054300066 +535 98 model.embedding_dim 0.0 +535 98 optimizer.lr 0.04546634624647766 +535 98 training.batch_size 1.0 +535 98 training.label_smoothing 0.0031637218969011215 +535 99 model.embedding_dim 0.0 +535 99 optimizer.lr 0.07900229686590668 +535 99 training.batch_size 0.0 +535 99 training.label_smoothing 0.0014759681616995368 +535 100 model.embedding_dim 0.0 +535 100 optimizer.lr 0.0020454081263358856 +535 100 training.batch_size 0.0 +535 100 training.label_smoothing 0.01950821398996706 +535 1 dataset """kinships""" +535 1 model """rescal""" +535 1 loss """bceaftersigmoid""" +535 1 regularizer """no""" +535 1 optimizer """adam""" +535 1 training_loop """lcwa""" +535 1 evaluator """rankbased""" +535 2 dataset """kinships""" +535 2 model """rescal""" +535 2 loss """bceaftersigmoid""" +535 2 regularizer """no""" +535 2 optimizer """adam""" +535 2 training_loop """lcwa""" +535 2 evaluator """rankbased""" +535 3 dataset """kinships""" +535 3 model """rescal""" +535 3 loss """bceaftersigmoid""" +535 3 regularizer """no""" +535 3 optimizer """adam""" +535 3 training_loop """lcwa""" +535 3 evaluator """rankbased""" +535 4 dataset """kinships""" +535 4 model """rescal""" +535 4 loss """bceaftersigmoid""" +535 4 regularizer """no""" +535 4 optimizer """adam""" +535 4 training_loop """lcwa""" +535 4 evaluator """rankbased""" +535 5 dataset """kinships""" +535 5 model """rescal""" +535 5 loss """bceaftersigmoid""" +535 5 regularizer """no""" +535 5 optimizer """adam""" +535 5 training_loop """lcwa""" +535 5 evaluator """rankbased""" +535 6 dataset """kinships""" +535 6 model """rescal""" +535 6 loss """bceaftersigmoid""" +535 6 regularizer """no""" +535 6 optimizer """adam""" +535 6 training_loop """lcwa""" +535 6 evaluator """rankbased""" +535 7 dataset """kinships""" +535 7 model """rescal""" +535 7 loss """bceaftersigmoid""" +535 7 regularizer """no""" +535 7 optimizer """adam""" +535 7 training_loop """lcwa""" +535 7 evaluator """rankbased""" +535 8 dataset """kinships""" +535 8 model """rescal""" +535 8 loss """bceaftersigmoid""" +535 8 regularizer """no""" +535 8 optimizer """adam""" +535 8 training_loop """lcwa""" +535 8 evaluator """rankbased""" +535 9 dataset """kinships""" +535 9 model """rescal""" +535 9 loss """bceaftersigmoid""" +535 9 regularizer """no""" +535 9 optimizer """adam""" +535 9 training_loop """lcwa""" +535 9 evaluator """rankbased""" +535 10 dataset """kinships""" +535 10 model """rescal""" +535 10 loss """bceaftersigmoid""" +535 10 regularizer """no""" +535 10 optimizer """adam""" +535 10 training_loop """lcwa""" +535 10 evaluator """rankbased""" +535 11 dataset """kinships""" +535 11 model """rescal""" +535 11 loss """bceaftersigmoid""" +535 11 regularizer """no""" +535 11 optimizer """adam""" +535 11 training_loop """lcwa""" +535 11 evaluator """rankbased""" +535 12 dataset """kinships""" +535 12 model """rescal""" +535 12 loss """bceaftersigmoid""" +535 12 regularizer """no""" +535 12 optimizer """adam""" +535 12 training_loop """lcwa""" +535 12 evaluator """rankbased""" +535 13 dataset """kinships""" +535 13 model """rescal""" +535 13 loss """bceaftersigmoid""" +535 13 regularizer """no""" +535 13 optimizer """adam""" +535 13 training_loop """lcwa""" +535 13 evaluator """rankbased""" +535 14 dataset """kinships""" +535 14 model """rescal""" +535 14 loss """bceaftersigmoid""" +535 14 regularizer """no""" +535 14 optimizer """adam""" +535 14 training_loop """lcwa""" +535 14 evaluator """rankbased""" +535 15 dataset """kinships""" +535 15 model """rescal""" +535 15 loss """bceaftersigmoid""" +535 15 regularizer """no""" +535 15 optimizer """adam""" +535 15 training_loop """lcwa""" +535 15 evaluator """rankbased""" +535 16 dataset """kinships""" +535 16 model """rescal""" +535 16 loss """bceaftersigmoid""" +535 16 regularizer """no""" +535 16 optimizer """adam""" +535 16 training_loop """lcwa""" +535 16 evaluator """rankbased""" +535 17 dataset """kinships""" +535 17 model """rescal""" +535 17 loss """bceaftersigmoid""" +535 17 regularizer """no""" +535 17 optimizer """adam""" +535 17 training_loop """lcwa""" +535 17 evaluator """rankbased""" +535 18 dataset """kinships""" +535 18 model """rescal""" +535 18 loss """bceaftersigmoid""" +535 18 regularizer """no""" +535 18 optimizer """adam""" +535 18 training_loop """lcwa""" +535 18 evaluator """rankbased""" +535 19 dataset """kinships""" +535 19 model """rescal""" +535 19 loss """bceaftersigmoid""" +535 19 regularizer """no""" +535 19 optimizer """adam""" +535 19 training_loop """lcwa""" +535 19 evaluator """rankbased""" +535 20 dataset """kinships""" +535 20 model """rescal""" +535 20 loss """bceaftersigmoid""" +535 20 regularizer """no""" +535 20 optimizer """adam""" +535 20 training_loop """lcwa""" +535 20 evaluator """rankbased""" +535 21 dataset """kinships""" +535 21 model """rescal""" +535 21 loss """bceaftersigmoid""" +535 21 regularizer """no""" +535 21 optimizer """adam""" +535 21 training_loop """lcwa""" +535 21 evaluator """rankbased""" +535 22 dataset """kinships""" +535 22 model """rescal""" +535 22 loss """bceaftersigmoid""" +535 22 regularizer """no""" +535 22 optimizer """adam""" +535 22 training_loop """lcwa""" +535 22 evaluator """rankbased""" +535 23 dataset """kinships""" +535 23 model """rescal""" +535 23 loss """bceaftersigmoid""" +535 23 regularizer """no""" +535 23 optimizer """adam""" +535 23 training_loop """lcwa""" +535 23 evaluator """rankbased""" +535 24 dataset """kinships""" +535 24 model """rescal""" +535 24 loss """bceaftersigmoid""" +535 24 regularizer """no""" +535 24 optimizer """adam""" +535 24 training_loop """lcwa""" +535 24 evaluator """rankbased""" +535 25 dataset """kinships""" +535 25 model """rescal""" +535 25 loss """bceaftersigmoid""" +535 25 regularizer """no""" +535 25 optimizer """adam""" +535 25 training_loop """lcwa""" +535 25 evaluator """rankbased""" +535 26 dataset """kinships""" +535 26 model """rescal""" +535 26 loss """bceaftersigmoid""" +535 26 regularizer """no""" +535 26 optimizer """adam""" +535 26 training_loop """lcwa""" +535 26 evaluator """rankbased""" +535 27 dataset """kinships""" +535 27 model """rescal""" +535 27 loss """bceaftersigmoid""" +535 27 regularizer """no""" +535 27 optimizer """adam""" +535 27 training_loop """lcwa""" +535 27 evaluator """rankbased""" +535 28 dataset """kinships""" +535 28 model """rescal""" +535 28 loss """bceaftersigmoid""" +535 28 regularizer """no""" +535 28 optimizer """adam""" +535 28 training_loop """lcwa""" +535 28 evaluator """rankbased""" +535 29 dataset """kinships""" +535 29 model """rescal""" +535 29 loss """bceaftersigmoid""" +535 29 regularizer """no""" +535 29 optimizer """adam""" +535 29 training_loop """lcwa""" +535 29 evaluator """rankbased""" +535 30 dataset """kinships""" +535 30 model """rescal""" +535 30 loss """bceaftersigmoid""" +535 30 regularizer """no""" +535 30 optimizer """adam""" +535 30 training_loop """lcwa""" +535 30 evaluator """rankbased""" +535 31 dataset """kinships""" +535 31 model """rescal""" +535 31 loss """bceaftersigmoid""" +535 31 regularizer """no""" +535 31 optimizer """adam""" +535 31 training_loop """lcwa""" +535 31 evaluator """rankbased""" +535 32 dataset """kinships""" +535 32 model """rescal""" +535 32 loss """bceaftersigmoid""" +535 32 regularizer """no""" +535 32 optimizer """adam""" +535 32 training_loop """lcwa""" +535 32 evaluator """rankbased""" +535 33 dataset """kinships""" +535 33 model """rescal""" +535 33 loss """bceaftersigmoid""" +535 33 regularizer """no""" +535 33 optimizer """adam""" +535 33 training_loop """lcwa""" +535 33 evaluator """rankbased""" +535 34 dataset """kinships""" +535 34 model """rescal""" +535 34 loss """bceaftersigmoid""" +535 34 regularizer """no""" +535 34 optimizer """adam""" +535 34 training_loop """lcwa""" +535 34 evaluator """rankbased""" +535 35 dataset """kinships""" +535 35 model """rescal""" +535 35 loss """bceaftersigmoid""" +535 35 regularizer """no""" +535 35 optimizer """adam""" +535 35 training_loop """lcwa""" +535 35 evaluator """rankbased""" +535 36 dataset """kinships""" +535 36 model """rescal""" +535 36 loss """bceaftersigmoid""" +535 36 regularizer """no""" +535 36 optimizer """adam""" +535 36 training_loop """lcwa""" +535 36 evaluator """rankbased""" +535 37 dataset """kinships""" +535 37 model """rescal""" +535 37 loss """bceaftersigmoid""" +535 37 regularizer """no""" +535 37 optimizer """adam""" +535 37 training_loop """lcwa""" +535 37 evaluator """rankbased""" +535 38 dataset """kinships""" +535 38 model """rescal""" +535 38 loss """bceaftersigmoid""" +535 38 regularizer """no""" +535 38 optimizer """adam""" +535 38 training_loop """lcwa""" +535 38 evaluator """rankbased""" +535 39 dataset """kinships""" +535 39 model """rescal""" +535 39 loss """bceaftersigmoid""" +535 39 regularizer """no""" +535 39 optimizer """adam""" +535 39 training_loop """lcwa""" +535 39 evaluator """rankbased""" +535 40 dataset """kinships""" +535 40 model """rescal""" +535 40 loss """bceaftersigmoid""" +535 40 regularizer """no""" +535 40 optimizer """adam""" +535 40 training_loop """lcwa""" +535 40 evaluator """rankbased""" +535 41 dataset """kinships""" +535 41 model """rescal""" +535 41 loss """bceaftersigmoid""" +535 41 regularizer """no""" +535 41 optimizer """adam""" +535 41 training_loop """lcwa""" +535 41 evaluator """rankbased""" +535 42 dataset """kinships""" +535 42 model """rescal""" +535 42 loss """bceaftersigmoid""" +535 42 regularizer """no""" +535 42 optimizer """adam""" +535 42 training_loop """lcwa""" +535 42 evaluator """rankbased""" +535 43 dataset """kinships""" +535 43 model """rescal""" +535 43 loss """bceaftersigmoid""" +535 43 regularizer """no""" +535 43 optimizer """adam""" +535 43 training_loop """lcwa""" +535 43 evaluator """rankbased""" +535 44 dataset """kinships""" +535 44 model """rescal""" +535 44 loss """bceaftersigmoid""" +535 44 regularizer """no""" +535 44 optimizer """adam""" +535 44 training_loop """lcwa""" +535 44 evaluator """rankbased""" +535 45 dataset """kinships""" +535 45 model """rescal""" +535 45 loss """bceaftersigmoid""" +535 45 regularizer """no""" +535 45 optimizer """adam""" +535 45 training_loop """lcwa""" +535 45 evaluator """rankbased""" +535 46 dataset """kinships""" +535 46 model """rescal""" +535 46 loss """bceaftersigmoid""" +535 46 regularizer """no""" +535 46 optimizer """adam""" +535 46 training_loop """lcwa""" +535 46 evaluator """rankbased""" +535 47 dataset """kinships""" +535 47 model """rescal""" +535 47 loss """bceaftersigmoid""" +535 47 regularizer """no""" +535 47 optimizer """adam""" +535 47 training_loop """lcwa""" +535 47 evaluator """rankbased""" +535 48 dataset """kinships""" +535 48 model """rescal""" +535 48 loss """bceaftersigmoid""" +535 48 regularizer """no""" +535 48 optimizer """adam""" +535 48 training_loop """lcwa""" +535 48 evaluator """rankbased""" +535 49 dataset """kinships""" +535 49 model """rescal""" +535 49 loss """bceaftersigmoid""" +535 49 regularizer """no""" +535 49 optimizer """adam""" +535 49 training_loop """lcwa""" +535 49 evaluator """rankbased""" +535 50 dataset """kinships""" +535 50 model """rescal""" +535 50 loss """bceaftersigmoid""" +535 50 regularizer """no""" +535 50 optimizer """adam""" +535 50 training_loop """lcwa""" +535 50 evaluator """rankbased""" +535 51 dataset """kinships""" +535 51 model """rescal""" +535 51 loss """bceaftersigmoid""" +535 51 regularizer """no""" +535 51 optimizer """adam""" +535 51 training_loop """lcwa""" +535 51 evaluator """rankbased""" +535 52 dataset """kinships""" +535 52 model """rescal""" +535 52 loss """bceaftersigmoid""" +535 52 regularizer """no""" +535 52 optimizer """adam""" +535 52 training_loop """lcwa""" +535 52 evaluator """rankbased""" +535 53 dataset """kinships""" +535 53 model """rescal""" +535 53 loss """bceaftersigmoid""" +535 53 regularizer """no""" +535 53 optimizer """adam""" +535 53 training_loop """lcwa""" +535 53 evaluator """rankbased""" +535 54 dataset """kinships""" +535 54 model """rescal""" +535 54 loss """bceaftersigmoid""" +535 54 regularizer """no""" +535 54 optimizer """adam""" +535 54 training_loop """lcwa""" +535 54 evaluator """rankbased""" +535 55 dataset """kinships""" +535 55 model """rescal""" +535 55 loss """bceaftersigmoid""" +535 55 regularizer """no""" +535 55 optimizer """adam""" +535 55 training_loop """lcwa""" +535 55 evaluator """rankbased""" +535 56 dataset """kinships""" +535 56 model """rescal""" +535 56 loss """bceaftersigmoid""" +535 56 regularizer """no""" +535 56 optimizer """adam""" +535 56 training_loop """lcwa""" +535 56 evaluator """rankbased""" +535 57 dataset """kinships""" +535 57 model """rescal""" +535 57 loss """bceaftersigmoid""" +535 57 regularizer """no""" +535 57 optimizer """adam""" +535 57 training_loop """lcwa""" +535 57 evaluator """rankbased""" +535 58 dataset """kinships""" +535 58 model """rescal""" +535 58 loss """bceaftersigmoid""" +535 58 regularizer """no""" +535 58 optimizer """adam""" +535 58 training_loop """lcwa""" +535 58 evaluator """rankbased""" +535 59 dataset """kinships""" +535 59 model """rescal""" +535 59 loss """bceaftersigmoid""" +535 59 regularizer """no""" +535 59 optimizer """adam""" +535 59 training_loop """lcwa""" +535 59 evaluator """rankbased""" +535 60 dataset """kinships""" +535 60 model """rescal""" +535 60 loss """bceaftersigmoid""" +535 60 regularizer """no""" +535 60 optimizer """adam""" +535 60 training_loop """lcwa""" +535 60 evaluator """rankbased""" +535 61 dataset """kinships""" +535 61 model """rescal""" +535 61 loss """bceaftersigmoid""" +535 61 regularizer """no""" +535 61 optimizer """adam""" +535 61 training_loop """lcwa""" +535 61 evaluator """rankbased""" +535 62 dataset """kinships""" +535 62 model """rescal""" +535 62 loss """bceaftersigmoid""" +535 62 regularizer """no""" +535 62 optimizer """adam""" +535 62 training_loop """lcwa""" +535 62 evaluator """rankbased""" +535 63 dataset """kinships""" +535 63 model """rescal""" +535 63 loss """bceaftersigmoid""" +535 63 regularizer """no""" +535 63 optimizer """adam""" +535 63 training_loop """lcwa""" +535 63 evaluator """rankbased""" +535 64 dataset """kinships""" +535 64 model """rescal""" +535 64 loss """bceaftersigmoid""" +535 64 regularizer """no""" +535 64 optimizer """adam""" +535 64 training_loop """lcwa""" +535 64 evaluator """rankbased""" +535 65 dataset """kinships""" +535 65 model """rescal""" +535 65 loss """bceaftersigmoid""" +535 65 regularizer """no""" +535 65 optimizer """adam""" +535 65 training_loop """lcwa""" +535 65 evaluator """rankbased""" +535 66 dataset """kinships""" +535 66 model """rescal""" +535 66 loss """bceaftersigmoid""" +535 66 regularizer """no""" +535 66 optimizer """adam""" +535 66 training_loop """lcwa""" +535 66 evaluator """rankbased""" +535 67 dataset """kinships""" +535 67 model """rescal""" +535 67 loss """bceaftersigmoid""" +535 67 regularizer """no""" +535 67 optimizer """adam""" +535 67 training_loop """lcwa""" +535 67 evaluator """rankbased""" +535 68 dataset """kinships""" +535 68 model """rescal""" +535 68 loss """bceaftersigmoid""" +535 68 regularizer """no""" +535 68 optimizer """adam""" +535 68 training_loop """lcwa""" +535 68 evaluator """rankbased""" +535 69 dataset """kinships""" +535 69 model """rescal""" +535 69 loss """bceaftersigmoid""" +535 69 regularizer """no""" +535 69 optimizer """adam""" +535 69 training_loop """lcwa""" +535 69 evaluator """rankbased""" +535 70 dataset """kinships""" +535 70 model """rescal""" +535 70 loss """bceaftersigmoid""" +535 70 regularizer """no""" +535 70 optimizer """adam""" +535 70 training_loop """lcwa""" +535 70 evaluator """rankbased""" +535 71 dataset """kinships""" +535 71 model """rescal""" +535 71 loss """bceaftersigmoid""" +535 71 regularizer """no""" +535 71 optimizer """adam""" +535 71 training_loop """lcwa""" +535 71 evaluator """rankbased""" +535 72 dataset """kinships""" +535 72 model """rescal""" +535 72 loss """bceaftersigmoid""" +535 72 regularizer """no""" +535 72 optimizer """adam""" +535 72 training_loop """lcwa""" +535 72 evaluator """rankbased""" +535 73 dataset """kinships""" +535 73 model """rescal""" +535 73 loss """bceaftersigmoid""" +535 73 regularizer """no""" +535 73 optimizer """adam""" +535 73 training_loop """lcwa""" +535 73 evaluator """rankbased""" +535 74 dataset """kinships""" +535 74 model """rescal""" +535 74 loss """bceaftersigmoid""" +535 74 regularizer """no""" +535 74 optimizer """adam""" +535 74 training_loop """lcwa""" +535 74 evaluator """rankbased""" +535 75 dataset """kinships""" +535 75 model """rescal""" +535 75 loss """bceaftersigmoid""" +535 75 regularizer """no""" +535 75 optimizer """adam""" +535 75 training_loop """lcwa""" +535 75 evaluator """rankbased""" +535 76 dataset """kinships""" +535 76 model """rescal""" +535 76 loss """bceaftersigmoid""" +535 76 regularizer """no""" +535 76 optimizer """adam""" +535 76 training_loop """lcwa""" +535 76 evaluator """rankbased""" +535 77 dataset """kinships""" +535 77 model """rescal""" +535 77 loss """bceaftersigmoid""" +535 77 regularizer """no""" +535 77 optimizer """adam""" +535 77 training_loop """lcwa""" +535 77 evaluator """rankbased""" +535 78 dataset """kinships""" +535 78 model """rescal""" +535 78 loss """bceaftersigmoid""" +535 78 regularizer """no""" +535 78 optimizer """adam""" +535 78 training_loop """lcwa""" +535 78 evaluator """rankbased""" +535 79 dataset """kinships""" +535 79 model """rescal""" +535 79 loss """bceaftersigmoid""" +535 79 regularizer """no""" +535 79 optimizer """adam""" +535 79 training_loop """lcwa""" +535 79 evaluator """rankbased""" +535 80 dataset """kinships""" +535 80 model """rescal""" +535 80 loss """bceaftersigmoid""" +535 80 regularizer """no""" +535 80 optimizer """adam""" +535 80 training_loop """lcwa""" +535 80 evaluator """rankbased""" +535 81 dataset """kinships""" +535 81 model """rescal""" +535 81 loss """bceaftersigmoid""" +535 81 regularizer """no""" +535 81 optimizer """adam""" +535 81 training_loop """lcwa""" +535 81 evaluator """rankbased""" +535 82 dataset """kinships""" +535 82 model """rescal""" +535 82 loss """bceaftersigmoid""" +535 82 regularizer """no""" +535 82 optimizer """adam""" +535 82 training_loop """lcwa""" +535 82 evaluator """rankbased""" +535 83 dataset """kinships""" +535 83 model """rescal""" +535 83 loss """bceaftersigmoid""" +535 83 regularizer """no""" +535 83 optimizer """adam""" +535 83 training_loop """lcwa""" +535 83 evaluator """rankbased""" +535 84 dataset """kinships""" +535 84 model """rescal""" +535 84 loss """bceaftersigmoid""" +535 84 regularizer """no""" +535 84 optimizer """adam""" +535 84 training_loop """lcwa""" +535 84 evaluator """rankbased""" +535 85 dataset """kinships""" +535 85 model """rescal""" +535 85 loss """bceaftersigmoid""" +535 85 regularizer """no""" +535 85 optimizer """adam""" +535 85 training_loop """lcwa""" +535 85 evaluator """rankbased""" +535 86 dataset """kinships""" +535 86 model """rescal""" +535 86 loss """bceaftersigmoid""" +535 86 regularizer """no""" +535 86 optimizer """adam""" +535 86 training_loop """lcwa""" +535 86 evaluator """rankbased""" +535 87 dataset """kinships""" +535 87 model """rescal""" +535 87 loss """bceaftersigmoid""" +535 87 regularizer """no""" +535 87 optimizer """adam""" +535 87 training_loop """lcwa""" +535 87 evaluator """rankbased""" +535 88 dataset """kinships""" +535 88 model """rescal""" +535 88 loss """bceaftersigmoid""" +535 88 regularizer """no""" +535 88 optimizer """adam""" +535 88 training_loop """lcwa""" +535 88 evaluator """rankbased""" +535 89 dataset """kinships""" +535 89 model """rescal""" +535 89 loss """bceaftersigmoid""" +535 89 regularizer """no""" +535 89 optimizer """adam""" +535 89 training_loop """lcwa""" +535 89 evaluator """rankbased""" +535 90 dataset """kinships""" +535 90 model """rescal""" +535 90 loss """bceaftersigmoid""" +535 90 regularizer """no""" +535 90 optimizer """adam""" +535 90 training_loop """lcwa""" +535 90 evaluator """rankbased""" +535 91 dataset """kinships""" +535 91 model """rescal""" +535 91 loss """bceaftersigmoid""" +535 91 regularizer """no""" +535 91 optimizer """adam""" +535 91 training_loop """lcwa""" +535 91 evaluator """rankbased""" +535 92 dataset """kinships""" +535 92 model """rescal""" +535 92 loss """bceaftersigmoid""" +535 92 regularizer """no""" +535 92 optimizer """adam""" +535 92 training_loop """lcwa""" +535 92 evaluator """rankbased""" +535 93 dataset """kinships""" +535 93 model """rescal""" +535 93 loss """bceaftersigmoid""" +535 93 regularizer """no""" +535 93 optimizer """adam""" +535 93 training_loop """lcwa""" +535 93 evaluator """rankbased""" +535 94 dataset """kinships""" +535 94 model """rescal""" +535 94 loss """bceaftersigmoid""" +535 94 regularizer """no""" +535 94 optimizer """adam""" +535 94 training_loop """lcwa""" +535 94 evaluator """rankbased""" +535 95 dataset """kinships""" +535 95 model """rescal""" +535 95 loss """bceaftersigmoid""" +535 95 regularizer """no""" +535 95 optimizer """adam""" +535 95 training_loop """lcwa""" +535 95 evaluator """rankbased""" +535 96 dataset """kinships""" +535 96 model """rescal""" +535 96 loss """bceaftersigmoid""" +535 96 regularizer """no""" +535 96 optimizer """adam""" +535 96 training_loop """lcwa""" +535 96 evaluator """rankbased""" +535 97 dataset """kinships""" +535 97 model """rescal""" +535 97 loss """bceaftersigmoid""" +535 97 regularizer """no""" +535 97 optimizer """adam""" +535 97 training_loop """lcwa""" +535 97 evaluator """rankbased""" +535 98 dataset """kinships""" +535 98 model """rescal""" +535 98 loss """bceaftersigmoid""" +535 98 regularizer """no""" +535 98 optimizer """adam""" +535 98 training_loop """lcwa""" +535 98 evaluator """rankbased""" +535 99 dataset """kinships""" +535 99 model """rescal""" +535 99 loss """bceaftersigmoid""" +535 99 regularizer """no""" +535 99 optimizer """adam""" +535 99 training_loop """lcwa""" +535 99 evaluator """rankbased""" +535 100 dataset """kinships""" +535 100 model """rescal""" +535 100 loss """bceaftersigmoid""" +535 100 regularizer """no""" +535 100 optimizer """adam""" +535 100 training_loop """lcwa""" +535 100 evaluator """rankbased""" +536 1 model.embedding_dim 2.0 +536 1 optimizer.lr 0.007338328126410634 +536 1 training.batch_size 2.0 +536 1 training.label_smoothing 0.812440675665538 +536 2 model.embedding_dim 2.0 +536 2 optimizer.lr 0.06791839518434001 +536 2 training.batch_size 0.0 +536 2 training.label_smoothing 0.21261540746688126 +536 3 model.embedding_dim 0.0 +536 3 optimizer.lr 0.06083634095246046 +536 3 training.batch_size 0.0 +536 3 training.label_smoothing 0.034343687041869055 +536 4 model.embedding_dim 1.0 +536 4 optimizer.lr 0.004309029154361849 +536 4 training.batch_size 2.0 +536 4 training.label_smoothing 0.0018642700511058803 +536 5 model.embedding_dim 1.0 +536 5 optimizer.lr 0.0012714384637377712 +536 5 training.batch_size 1.0 +536 5 training.label_smoothing 0.2243198339961382 +536 6 model.embedding_dim 0.0 +536 6 optimizer.lr 0.0037688283995394177 +536 6 training.batch_size 1.0 +536 6 training.label_smoothing 0.013200326285758651 +536 7 model.embedding_dim 0.0 +536 7 optimizer.lr 0.01004396301414112 +536 7 training.batch_size 2.0 +536 7 training.label_smoothing 0.6024957058103282 +536 8 model.embedding_dim 1.0 +536 8 optimizer.lr 0.07118006612148915 +536 8 training.batch_size 0.0 +536 8 training.label_smoothing 0.017478403597353545 +536 9 model.embedding_dim 1.0 +536 9 optimizer.lr 0.004008135042691589 +536 9 training.batch_size 2.0 +536 9 training.label_smoothing 0.0027719691615294426 +536 10 model.embedding_dim 2.0 +536 10 optimizer.lr 0.0179930103355407 +536 10 training.batch_size 0.0 +536 10 training.label_smoothing 0.14982571747754353 +536 11 model.embedding_dim 0.0 +536 11 optimizer.lr 0.007812310778364333 +536 11 training.batch_size 1.0 +536 11 training.label_smoothing 0.042858422491060984 +536 12 model.embedding_dim 0.0 +536 12 optimizer.lr 0.005280866930712051 +536 12 training.batch_size 1.0 +536 12 training.label_smoothing 0.1600712458973212 +536 13 model.embedding_dim 1.0 +536 13 optimizer.lr 0.009846184347772768 +536 13 training.batch_size 1.0 +536 13 training.label_smoothing 0.002546961170956089 +536 14 model.embedding_dim 0.0 +536 14 optimizer.lr 0.004416235921968436 +536 14 training.batch_size 1.0 +536 14 training.label_smoothing 0.04282819353914536 +536 15 model.embedding_dim 1.0 +536 15 optimizer.lr 0.07070574532938524 +536 15 training.batch_size 2.0 +536 15 training.label_smoothing 0.030891918162348368 +536 16 model.embedding_dim 2.0 +536 16 optimizer.lr 0.008127320741306766 +536 16 training.batch_size 0.0 +536 16 training.label_smoothing 0.00759998155128433 +536 17 model.embedding_dim 1.0 +536 17 optimizer.lr 0.021043276894775437 +536 17 training.batch_size 2.0 +536 17 training.label_smoothing 0.09758257913454613 +536 18 model.embedding_dim 1.0 +536 18 optimizer.lr 0.003912368854661202 +536 18 training.batch_size 2.0 +536 18 training.label_smoothing 0.02112322327469432 +536 19 model.embedding_dim 0.0 +536 19 optimizer.lr 0.00450090285523116 +536 19 training.batch_size 0.0 +536 19 training.label_smoothing 0.029648649429890025 +536 20 model.embedding_dim 0.0 +536 20 optimizer.lr 0.001040127531550519 +536 20 training.batch_size 0.0 +536 20 training.label_smoothing 0.0013826573326787982 +536 21 model.embedding_dim 0.0 +536 21 optimizer.lr 0.007686799178859489 +536 21 training.batch_size 1.0 +536 21 training.label_smoothing 0.23425273461753823 +536 22 model.embedding_dim 1.0 +536 22 optimizer.lr 0.04291328399111738 +536 22 training.batch_size 1.0 +536 22 training.label_smoothing 0.016147378327471724 +536 23 model.embedding_dim 2.0 +536 23 optimizer.lr 0.007807893336147033 +536 23 training.batch_size 0.0 +536 23 training.label_smoothing 0.1250152350361854 +536 24 model.embedding_dim 1.0 +536 24 optimizer.lr 0.006656658790064458 +536 24 training.batch_size 1.0 +536 24 training.label_smoothing 0.012593309334629032 +536 25 model.embedding_dim 1.0 +536 25 optimizer.lr 0.0012493830535020916 +536 25 training.batch_size 0.0 +536 25 training.label_smoothing 0.0016908552180563098 +536 26 model.embedding_dim 0.0 +536 26 optimizer.lr 0.06200709786247477 +536 26 training.batch_size 0.0 +536 26 training.label_smoothing 0.010308729268393801 +536 27 model.embedding_dim 2.0 +536 27 optimizer.lr 0.027873321131099683 +536 27 training.batch_size 2.0 +536 27 training.label_smoothing 0.07711547061570481 +536 28 model.embedding_dim 0.0 +536 28 optimizer.lr 0.03587267317590232 +536 28 training.batch_size 0.0 +536 28 training.label_smoothing 0.4704521490092184 +536 29 model.embedding_dim 0.0 +536 29 optimizer.lr 0.025850254781719224 +536 29 training.batch_size 1.0 +536 29 training.label_smoothing 0.02934113930246265 +536 30 model.embedding_dim 1.0 +536 30 optimizer.lr 0.0014967502113941986 +536 30 training.batch_size 1.0 +536 30 training.label_smoothing 0.018350190658164303 +536 31 model.embedding_dim 0.0 +536 31 optimizer.lr 0.036069219038866306 +536 31 training.batch_size 1.0 +536 31 training.label_smoothing 0.0010959234037032461 +536 32 model.embedding_dim 1.0 +536 32 optimizer.lr 0.01359182124329144 +536 32 training.batch_size 2.0 +536 32 training.label_smoothing 0.2577756646693424 +536 33 model.embedding_dim 0.0 +536 33 optimizer.lr 0.049209178920012316 +536 33 training.batch_size 0.0 +536 33 training.label_smoothing 0.3956557236791953 +536 34 model.embedding_dim 0.0 +536 34 optimizer.lr 0.08175144758285684 +536 34 training.batch_size 2.0 +536 34 training.label_smoothing 0.01247090657242307 +536 35 model.embedding_dim 2.0 +536 35 optimizer.lr 0.05834941992219872 +536 35 training.batch_size 1.0 +536 35 training.label_smoothing 0.05274231071116688 +536 36 model.embedding_dim 2.0 +536 36 optimizer.lr 0.002103215129921451 +536 36 training.batch_size 0.0 +536 36 training.label_smoothing 0.0010355725605123256 +536 37 model.embedding_dim 1.0 +536 37 optimizer.lr 0.08325912177587771 +536 37 training.batch_size 0.0 +536 37 training.label_smoothing 0.2979145532451214 +536 38 model.embedding_dim 2.0 +536 38 optimizer.lr 0.007105600363274807 +536 38 training.batch_size 1.0 +536 38 training.label_smoothing 0.04970492301345128 +536 39 model.embedding_dim 1.0 +536 39 optimizer.lr 0.04886045377079623 +536 39 training.batch_size 1.0 +536 39 training.label_smoothing 0.006288317394579925 +536 40 model.embedding_dim 2.0 +536 40 optimizer.lr 0.08098339012834022 +536 40 training.batch_size 0.0 +536 40 training.label_smoothing 0.07128095997742319 +536 41 model.embedding_dim 1.0 +536 41 optimizer.lr 0.021940517852578393 +536 41 training.batch_size 0.0 +536 41 training.label_smoothing 0.03346896429440139 +536 42 model.embedding_dim 2.0 +536 42 optimizer.lr 0.007443215539189273 +536 42 training.batch_size 0.0 +536 42 training.label_smoothing 0.15280843391157978 +536 43 model.embedding_dim 2.0 +536 43 optimizer.lr 0.07815066266916769 +536 43 training.batch_size 1.0 +536 43 training.label_smoothing 0.010470985537697304 +536 44 model.embedding_dim 0.0 +536 44 optimizer.lr 0.0323701857082593 +536 44 training.batch_size 0.0 +536 44 training.label_smoothing 0.11773482615877015 +536 45 model.embedding_dim 2.0 +536 45 optimizer.lr 0.0015150126733889672 +536 45 training.batch_size 0.0 +536 45 training.label_smoothing 0.08829361320665867 +536 46 model.embedding_dim 0.0 +536 46 optimizer.lr 0.03432969938090362 +536 46 training.batch_size 1.0 +536 46 training.label_smoothing 0.05453888456097135 +536 47 model.embedding_dim 2.0 +536 47 optimizer.lr 0.05454478352859166 +536 47 training.batch_size 1.0 +536 47 training.label_smoothing 0.002826079584718101 +536 48 model.embedding_dim 2.0 +536 48 optimizer.lr 0.0024559771056637163 +536 48 training.batch_size 2.0 +536 48 training.label_smoothing 0.061762744595011236 +536 49 model.embedding_dim 1.0 +536 49 optimizer.lr 0.001768027864931709 +536 49 training.batch_size 1.0 +536 49 training.label_smoothing 0.004348099319063884 +536 50 model.embedding_dim 0.0 +536 50 optimizer.lr 0.011414880573371061 +536 50 training.batch_size 0.0 +536 50 training.label_smoothing 0.41447055666669763 +536 51 model.embedding_dim 2.0 +536 51 optimizer.lr 0.04985520231959399 +536 51 training.batch_size 0.0 +536 51 training.label_smoothing 0.1099830057730797 +536 52 model.embedding_dim 1.0 +536 52 optimizer.lr 0.00890638960806565 +536 52 training.batch_size 2.0 +536 52 training.label_smoothing 0.0015581079184984207 +536 53 model.embedding_dim 0.0 +536 53 optimizer.lr 0.015148611570403512 +536 53 training.batch_size 1.0 +536 53 training.label_smoothing 0.7169444349091151 +536 54 model.embedding_dim 0.0 +536 54 optimizer.lr 0.04959546204364804 +536 54 training.batch_size 1.0 +536 54 training.label_smoothing 0.057820255887556675 +536 55 model.embedding_dim 0.0 +536 55 optimizer.lr 0.030813102397803698 +536 55 training.batch_size 0.0 +536 55 training.label_smoothing 0.012205883835797618 +536 56 model.embedding_dim 2.0 +536 56 optimizer.lr 0.015206262381305095 +536 56 training.batch_size 0.0 +536 56 training.label_smoothing 0.0010783587531319656 +536 57 model.embedding_dim 1.0 +536 57 optimizer.lr 0.008516250390136457 +536 57 training.batch_size 0.0 +536 57 training.label_smoothing 0.07646962549171808 +536 58 model.embedding_dim 2.0 +536 58 optimizer.lr 0.025240910390871184 +536 58 training.batch_size 0.0 +536 58 training.label_smoothing 0.8011646413871746 +536 59 model.embedding_dim 1.0 +536 59 optimizer.lr 0.05764105549454789 +536 59 training.batch_size 2.0 +536 59 training.label_smoothing 0.09741958438051403 +536 60 model.embedding_dim 2.0 +536 60 optimizer.lr 0.019751567729523564 +536 60 training.batch_size 1.0 +536 60 training.label_smoothing 0.012568077470104036 +536 61 model.embedding_dim 1.0 +536 61 optimizer.lr 0.02262455893518158 +536 61 training.batch_size 2.0 +536 61 training.label_smoothing 0.03769930798092256 +536 62 model.embedding_dim 0.0 +536 62 optimizer.lr 0.007264957732708173 +536 62 training.batch_size 2.0 +536 62 training.label_smoothing 0.014832606969368544 +536 63 model.embedding_dim 2.0 +536 63 optimizer.lr 0.03766470082850014 +536 63 training.batch_size 2.0 +536 63 training.label_smoothing 0.012543498252504398 +536 64 model.embedding_dim 0.0 +536 64 optimizer.lr 0.02926620956973513 +536 64 training.batch_size 0.0 +536 64 training.label_smoothing 0.877039572561546 +536 65 model.embedding_dim 0.0 +536 65 optimizer.lr 0.024708374591341398 +536 65 training.batch_size 2.0 +536 65 training.label_smoothing 0.004683783383150697 +536 66 model.embedding_dim 1.0 +536 66 optimizer.lr 0.0010368849341132315 +536 66 training.batch_size 1.0 +536 66 training.label_smoothing 0.009370489269748875 +536 67 model.embedding_dim 2.0 +536 67 optimizer.lr 0.006308952077195763 +536 67 training.batch_size 0.0 +536 67 training.label_smoothing 0.048357180011350025 +536 68 model.embedding_dim 1.0 +536 68 optimizer.lr 0.057844140027973265 +536 68 training.batch_size 0.0 +536 68 training.label_smoothing 0.12993323903399664 +536 69 model.embedding_dim 1.0 +536 69 optimizer.lr 0.004132580110447178 +536 69 training.batch_size 1.0 +536 69 training.label_smoothing 0.12416977541331885 +536 70 model.embedding_dim 2.0 +536 70 optimizer.lr 0.007864837082910567 +536 70 training.batch_size 0.0 +536 70 training.label_smoothing 0.23372970634393916 +536 71 model.embedding_dim 0.0 +536 71 optimizer.lr 0.0011629543784932026 +536 71 training.batch_size 0.0 +536 71 training.label_smoothing 0.12817064234611036 +536 72 model.embedding_dim 1.0 +536 72 optimizer.lr 0.0024049371723897984 +536 72 training.batch_size 0.0 +536 72 training.label_smoothing 0.17166274030955547 +536 73 model.embedding_dim 1.0 +536 73 optimizer.lr 0.0748599072819081 +536 73 training.batch_size 2.0 +536 73 training.label_smoothing 0.04319639021909528 +536 74 model.embedding_dim 2.0 +536 74 optimizer.lr 0.0029011753715172768 +536 74 training.batch_size 1.0 +536 74 training.label_smoothing 0.0984481825030487 +536 75 model.embedding_dim 1.0 +536 75 optimizer.lr 0.04949438825010924 +536 75 training.batch_size 2.0 +536 75 training.label_smoothing 0.04212334468961388 +536 76 model.embedding_dim 1.0 +536 76 optimizer.lr 0.04843774397643903 +536 76 training.batch_size 0.0 +536 76 training.label_smoothing 0.07435068328398751 +536 77 model.embedding_dim 2.0 +536 77 optimizer.lr 0.00201035909708084 +536 77 training.batch_size 2.0 +536 77 training.label_smoothing 0.028685622880016425 +536 78 model.embedding_dim 2.0 +536 78 optimizer.lr 0.019894144750205407 +536 78 training.batch_size 1.0 +536 78 training.label_smoothing 0.0666485074555861 +536 79 model.embedding_dim 0.0 +536 79 optimizer.lr 0.009949182027065882 +536 79 training.batch_size 1.0 +536 79 training.label_smoothing 0.0063447199983224425 +536 80 model.embedding_dim 1.0 +536 80 optimizer.lr 0.011114629544292402 +536 80 training.batch_size 0.0 +536 80 training.label_smoothing 0.00992225790693841 +536 81 model.embedding_dim 2.0 +536 81 optimizer.lr 0.07223036004144125 +536 81 training.batch_size 2.0 +536 81 training.label_smoothing 0.09039571833413752 +536 82 model.embedding_dim 1.0 +536 82 optimizer.lr 0.005544060208188337 +536 82 training.batch_size 0.0 +536 82 training.label_smoothing 0.0016401364405888804 +536 83 model.embedding_dim 2.0 +536 83 optimizer.lr 0.0025299850426429805 +536 83 training.batch_size 2.0 +536 83 training.label_smoothing 0.0013335197024327411 +536 84 model.embedding_dim 2.0 +536 84 optimizer.lr 0.06410479022825684 +536 84 training.batch_size 1.0 +536 84 training.label_smoothing 0.3281112504404946 +536 85 model.embedding_dim 2.0 +536 85 optimizer.lr 0.0015083884318408526 +536 85 training.batch_size 0.0 +536 85 training.label_smoothing 0.08550568316679481 +536 86 model.embedding_dim 2.0 +536 86 optimizer.lr 0.003957780679623048 +536 86 training.batch_size 0.0 +536 86 training.label_smoothing 0.1863369984426588 +536 87 model.embedding_dim 0.0 +536 87 optimizer.lr 0.0061028146450528425 +536 87 training.batch_size 0.0 +536 87 training.label_smoothing 0.13056137210348076 +536 88 model.embedding_dim 1.0 +536 88 optimizer.lr 0.0019998042055234485 +536 88 training.batch_size 0.0 +536 88 training.label_smoothing 0.028401879479004653 +536 89 model.embedding_dim 2.0 +536 89 optimizer.lr 0.029646568500275566 +536 89 training.batch_size 2.0 +536 89 training.label_smoothing 0.02612890588024815 +536 90 model.embedding_dim 0.0 +536 90 optimizer.lr 0.012862818146516049 +536 90 training.batch_size 2.0 +536 90 training.label_smoothing 0.14489541332293598 +536 91 model.embedding_dim 2.0 +536 91 optimizer.lr 0.024558463081653176 +536 91 training.batch_size 0.0 +536 91 training.label_smoothing 0.3500650519920759 +536 92 model.embedding_dim 0.0 +536 92 optimizer.lr 0.008705699898960303 +536 92 training.batch_size 1.0 +536 92 training.label_smoothing 0.003004008976887877 +536 93 model.embedding_dim 1.0 +536 93 optimizer.lr 0.02080215345691346 +536 93 training.batch_size 1.0 +536 93 training.label_smoothing 0.4401630379862336 +536 94 model.embedding_dim 2.0 +536 94 optimizer.lr 0.03752949250039601 +536 94 training.batch_size 0.0 +536 94 training.label_smoothing 0.0025326601445335105 +536 95 model.embedding_dim 1.0 +536 95 optimizer.lr 0.014915388653521839 +536 95 training.batch_size 1.0 +536 95 training.label_smoothing 0.048344266586669335 +536 96 model.embedding_dim 1.0 +536 96 optimizer.lr 0.007667808387731779 +536 96 training.batch_size 1.0 +536 96 training.label_smoothing 0.08382711261307305 +536 97 model.embedding_dim 0.0 +536 97 optimizer.lr 0.08152211513816968 +536 97 training.batch_size 2.0 +536 97 training.label_smoothing 0.0011131743231296042 +536 98 model.embedding_dim 0.0 +536 98 optimizer.lr 0.0013104676294708297 +536 98 training.batch_size 1.0 +536 98 training.label_smoothing 0.05909449423414605 +536 99 model.embedding_dim 2.0 +536 99 optimizer.lr 0.039084241774621505 +536 99 training.batch_size 1.0 +536 99 training.label_smoothing 0.004241063409325657 +536 100 model.embedding_dim 1.0 +536 100 optimizer.lr 0.009176640324781962 +536 100 training.batch_size 2.0 +536 100 training.label_smoothing 0.18165795141007016 +536 1 dataset """kinships""" +536 1 model """rescal""" +536 1 loss """softplus""" +536 1 regularizer """no""" +536 1 optimizer """adam""" +536 1 training_loop """lcwa""" +536 1 evaluator """rankbased""" +536 2 dataset """kinships""" +536 2 model """rescal""" +536 2 loss """softplus""" +536 2 regularizer """no""" +536 2 optimizer """adam""" +536 2 training_loop """lcwa""" +536 2 evaluator """rankbased""" +536 3 dataset """kinships""" +536 3 model """rescal""" +536 3 loss """softplus""" +536 3 regularizer """no""" +536 3 optimizer """adam""" +536 3 training_loop """lcwa""" +536 3 evaluator """rankbased""" +536 4 dataset """kinships""" +536 4 model """rescal""" +536 4 loss """softplus""" +536 4 regularizer """no""" +536 4 optimizer """adam""" +536 4 training_loop """lcwa""" +536 4 evaluator """rankbased""" +536 5 dataset """kinships""" +536 5 model """rescal""" +536 5 loss """softplus""" +536 5 regularizer """no""" +536 5 optimizer """adam""" +536 5 training_loop """lcwa""" +536 5 evaluator """rankbased""" +536 6 dataset """kinships""" +536 6 model """rescal""" +536 6 loss """softplus""" +536 6 regularizer """no""" +536 6 optimizer """adam""" +536 6 training_loop """lcwa""" +536 6 evaluator """rankbased""" +536 7 dataset """kinships""" +536 7 model """rescal""" +536 7 loss """softplus""" +536 7 regularizer """no""" +536 7 optimizer """adam""" +536 7 training_loop """lcwa""" +536 7 evaluator """rankbased""" +536 8 dataset """kinships""" +536 8 model """rescal""" +536 8 loss """softplus""" +536 8 regularizer """no""" +536 8 optimizer """adam""" +536 8 training_loop """lcwa""" +536 8 evaluator """rankbased""" +536 9 dataset """kinships""" +536 9 model """rescal""" +536 9 loss """softplus""" +536 9 regularizer """no""" +536 9 optimizer """adam""" +536 9 training_loop """lcwa""" +536 9 evaluator """rankbased""" +536 10 dataset """kinships""" +536 10 model """rescal""" +536 10 loss """softplus""" +536 10 regularizer """no""" +536 10 optimizer """adam""" +536 10 training_loop """lcwa""" +536 10 evaluator """rankbased""" +536 11 dataset """kinships""" +536 11 model """rescal""" +536 11 loss """softplus""" +536 11 regularizer """no""" +536 11 optimizer """adam""" +536 11 training_loop """lcwa""" +536 11 evaluator """rankbased""" +536 12 dataset """kinships""" +536 12 model """rescal""" +536 12 loss """softplus""" +536 12 regularizer """no""" +536 12 optimizer """adam""" +536 12 training_loop """lcwa""" +536 12 evaluator """rankbased""" +536 13 dataset """kinships""" +536 13 model """rescal""" +536 13 loss """softplus""" +536 13 regularizer """no""" +536 13 optimizer """adam""" +536 13 training_loop """lcwa""" +536 13 evaluator """rankbased""" +536 14 dataset """kinships""" +536 14 model """rescal""" +536 14 loss """softplus""" +536 14 regularizer """no""" +536 14 optimizer """adam""" +536 14 training_loop """lcwa""" +536 14 evaluator """rankbased""" +536 15 dataset """kinships""" +536 15 model """rescal""" +536 15 loss """softplus""" +536 15 regularizer """no""" +536 15 optimizer """adam""" +536 15 training_loop """lcwa""" +536 15 evaluator """rankbased""" +536 16 dataset """kinships""" +536 16 model """rescal""" +536 16 loss """softplus""" +536 16 regularizer """no""" +536 16 optimizer """adam""" +536 16 training_loop """lcwa""" +536 16 evaluator """rankbased""" +536 17 dataset """kinships""" +536 17 model """rescal""" +536 17 loss """softplus""" +536 17 regularizer """no""" +536 17 optimizer """adam""" +536 17 training_loop """lcwa""" +536 17 evaluator """rankbased""" +536 18 dataset """kinships""" +536 18 model """rescal""" +536 18 loss """softplus""" +536 18 regularizer """no""" +536 18 optimizer """adam""" +536 18 training_loop """lcwa""" +536 18 evaluator """rankbased""" +536 19 dataset """kinships""" +536 19 model """rescal""" +536 19 loss """softplus""" +536 19 regularizer """no""" +536 19 optimizer """adam""" +536 19 training_loop """lcwa""" +536 19 evaluator """rankbased""" +536 20 dataset """kinships""" +536 20 model """rescal""" +536 20 loss """softplus""" +536 20 regularizer """no""" +536 20 optimizer """adam""" +536 20 training_loop """lcwa""" +536 20 evaluator """rankbased""" +536 21 dataset """kinships""" +536 21 model """rescal""" +536 21 loss """softplus""" +536 21 regularizer """no""" +536 21 optimizer """adam""" +536 21 training_loop """lcwa""" +536 21 evaluator """rankbased""" +536 22 dataset """kinships""" +536 22 model """rescal""" +536 22 loss """softplus""" +536 22 regularizer """no""" +536 22 optimizer """adam""" +536 22 training_loop """lcwa""" +536 22 evaluator """rankbased""" +536 23 dataset """kinships""" +536 23 model """rescal""" +536 23 loss """softplus""" +536 23 regularizer """no""" +536 23 optimizer """adam""" +536 23 training_loop """lcwa""" +536 23 evaluator """rankbased""" +536 24 dataset """kinships""" +536 24 model """rescal""" +536 24 loss """softplus""" +536 24 regularizer """no""" +536 24 optimizer """adam""" +536 24 training_loop """lcwa""" +536 24 evaluator """rankbased""" +536 25 dataset """kinships""" +536 25 model """rescal""" +536 25 loss """softplus""" +536 25 regularizer """no""" +536 25 optimizer """adam""" +536 25 training_loop """lcwa""" +536 25 evaluator """rankbased""" +536 26 dataset """kinships""" +536 26 model """rescal""" +536 26 loss """softplus""" +536 26 regularizer """no""" +536 26 optimizer """adam""" +536 26 training_loop """lcwa""" +536 26 evaluator """rankbased""" +536 27 dataset """kinships""" +536 27 model """rescal""" +536 27 loss """softplus""" +536 27 regularizer """no""" +536 27 optimizer """adam""" +536 27 training_loop """lcwa""" +536 27 evaluator """rankbased""" +536 28 dataset """kinships""" +536 28 model """rescal""" +536 28 loss """softplus""" +536 28 regularizer """no""" +536 28 optimizer """adam""" +536 28 training_loop """lcwa""" +536 28 evaluator """rankbased""" +536 29 dataset """kinships""" +536 29 model """rescal""" +536 29 loss """softplus""" +536 29 regularizer """no""" +536 29 optimizer """adam""" +536 29 training_loop """lcwa""" +536 29 evaluator """rankbased""" +536 30 dataset """kinships""" +536 30 model """rescal""" +536 30 loss """softplus""" +536 30 regularizer """no""" +536 30 optimizer """adam""" +536 30 training_loop """lcwa""" +536 30 evaluator """rankbased""" +536 31 dataset """kinships""" +536 31 model """rescal""" +536 31 loss """softplus""" +536 31 regularizer """no""" +536 31 optimizer """adam""" +536 31 training_loop """lcwa""" +536 31 evaluator """rankbased""" +536 32 dataset """kinships""" +536 32 model """rescal""" +536 32 loss """softplus""" +536 32 regularizer """no""" +536 32 optimizer """adam""" +536 32 training_loop """lcwa""" +536 32 evaluator """rankbased""" +536 33 dataset """kinships""" +536 33 model """rescal""" +536 33 loss """softplus""" +536 33 regularizer """no""" +536 33 optimizer """adam""" +536 33 training_loop """lcwa""" +536 33 evaluator """rankbased""" +536 34 dataset """kinships""" +536 34 model """rescal""" +536 34 loss """softplus""" +536 34 regularizer """no""" +536 34 optimizer """adam""" +536 34 training_loop """lcwa""" +536 34 evaluator """rankbased""" +536 35 dataset """kinships""" +536 35 model """rescal""" +536 35 loss """softplus""" +536 35 regularizer """no""" +536 35 optimizer """adam""" +536 35 training_loop """lcwa""" +536 35 evaluator """rankbased""" +536 36 dataset """kinships""" +536 36 model """rescal""" +536 36 loss """softplus""" +536 36 regularizer """no""" +536 36 optimizer """adam""" +536 36 training_loop """lcwa""" +536 36 evaluator """rankbased""" +536 37 dataset """kinships""" +536 37 model """rescal""" +536 37 loss """softplus""" +536 37 regularizer """no""" +536 37 optimizer """adam""" +536 37 training_loop """lcwa""" +536 37 evaluator """rankbased""" +536 38 dataset """kinships""" +536 38 model """rescal""" +536 38 loss """softplus""" +536 38 regularizer """no""" +536 38 optimizer """adam""" +536 38 training_loop """lcwa""" +536 38 evaluator """rankbased""" +536 39 dataset """kinships""" +536 39 model """rescal""" +536 39 loss """softplus""" +536 39 regularizer """no""" +536 39 optimizer """adam""" +536 39 training_loop """lcwa""" +536 39 evaluator """rankbased""" +536 40 dataset """kinships""" +536 40 model """rescal""" +536 40 loss """softplus""" +536 40 regularizer """no""" +536 40 optimizer """adam""" +536 40 training_loop """lcwa""" +536 40 evaluator """rankbased""" +536 41 dataset """kinships""" +536 41 model """rescal""" +536 41 loss """softplus""" +536 41 regularizer """no""" +536 41 optimizer """adam""" +536 41 training_loop """lcwa""" +536 41 evaluator """rankbased""" +536 42 dataset """kinships""" +536 42 model """rescal""" +536 42 loss """softplus""" +536 42 regularizer """no""" +536 42 optimizer """adam""" +536 42 training_loop """lcwa""" +536 42 evaluator """rankbased""" +536 43 dataset """kinships""" +536 43 model """rescal""" +536 43 loss """softplus""" +536 43 regularizer """no""" +536 43 optimizer """adam""" +536 43 training_loop """lcwa""" +536 43 evaluator """rankbased""" +536 44 dataset """kinships""" +536 44 model """rescal""" +536 44 loss """softplus""" +536 44 regularizer """no""" +536 44 optimizer """adam""" +536 44 training_loop """lcwa""" +536 44 evaluator """rankbased""" +536 45 dataset """kinships""" +536 45 model """rescal""" +536 45 loss """softplus""" +536 45 regularizer """no""" +536 45 optimizer """adam""" +536 45 training_loop """lcwa""" +536 45 evaluator """rankbased""" +536 46 dataset """kinships""" +536 46 model """rescal""" +536 46 loss """softplus""" +536 46 regularizer """no""" +536 46 optimizer """adam""" +536 46 training_loop """lcwa""" +536 46 evaluator """rankbased""" +536 47 dataset """kinships""" +536 47 model """rescal""" +536 47 loss """softplus""" +536 47 regularizer """no""" +536 47 optimizer """adam""" +536 47 training_loop """lcwa""" +536 47 evaluator """rankbased""" +536 48 dataset """kinships""" +536 48 model """rescal""" +536 48 loss """softplus""" +536 48 regularizer """no""" +536 48 optimizer """adam""" +536 48 training_loop """lcwa""" +536 48 evaluator """rankbased""" +536 49 dataset """kinships""" +536 49 model """rescal""" +536 49 loss """softplus""" +536 49 regularizer """no""" +536 49 optimizer """adam""" +536 49 training_loop """lcwa""" +536 49 evaluator """rankbased""" +536 50 dataset """kinships""" +536 50 model """rescal""" +536 50 loss """softplus""" +536 50 regularizer """no""" +536 50 optimizer """adam""" +536 50 training_loop """lcwa""" +536 50 evaluator """rankbased""" +536 51 dataset """kinships""" +536 51 model """rescal""" +536 51 loss """softplus""" +536 51 regularizer """no""" +536 51 optimizer """adam""" +536 51 training_loop """lcwa""" +536 51 evaluator """rankbased""" +536 52 dataset """kinships""" +536 52 model """rescal""" +536 52 loss """softplus""" +536 52 regularizer """no""" +536 52 optimizer """adam""" +536 52 training_loop """lcwa""" +536 52 evaluator """rankbased""" +536 53 dataset """kinships""" +536 53 model """rescal""" +536 53 loss """softplus""" +536 53 regularizer """no""" +536 53 optimizer """adam""" +536 53 training_loop """lcwa""" +536 53 evaluator """rankbased""" +536 54 dataset """kinships""" +536 54 model """rescal""" +536 54 loss """softplus""" +536 54 regularizer """no""" +536 54 optimizer """adam""" +536 54 training_loop """lcwa""" +536 54 evaluator """rankbased""" +536 55 dataset """kinships""" +536 55 model """rescal""" +536 55 loss """softplus""" +536 55 regularizer """no""" +536 55 optimizer """adam""" +536 55 training_loop """lcwa""" +536 55 evaluator """rankbased""" +536 56 dataset """kinships""" +536 56 model """rescal""" +536 56 loss """softplus""" +536 56 regularizer """no""" +536 56 optimizer """adam""" +536 56 training_loop """lcwa""" +536 56 evaluator """rankbased""" +536 57 dataset """kinships""" +536 57 model """rescal""" +536 57 loss """softplus""" +536 57 regularizer """no""" +536 57 optimizer """adam""" +536 57 training_loop """lcwa""" +536 57 evaluator """rankbased""" +536 58 dataset """kinships""" +536 58 model """rescal""" +536 58 loss """softplus""" +536 58 regularizer """no""" +536 58 optimizer """adam""" +536 58 training_loop """lcwa""" +536 58 evaluator """rankbased""" +536 59 dataset """kinships""" +536 59 model """rescal""" +536 59 loss """softplus""" +536 59 regularizer """no""" +536 59 optimizer """adam""" +536 59 training_loop """lcwa""" +536 59 evaluator """rankbased""" +536 60 dataset """kinships""" +536 60 model """rescal""" +536 60 loss """softplus""" +536 60 regularizer """no""" +536 60 optimizer """adam""" +536 60 training_loop """lcwa""" +536 60 evaluator """rankbased""" +536 61 dataset """kinships""" +536 61 model """rescal""" +536 61 loss """softplus""" +536 61 regularizer """no""" +536 61 optimizer """adam""" +536 61 training_loop """lcwa""" +536 61 evaluator """rankbased""" +536 62 dataset """kinships""" +536 62 model """rescal""" +536 62 loss """softplus""" +536 62 regularizer """no""" +536 62 optimizer """adam""" +536 62 training_loop """lcwa""" +536 62 evaluator """rankbased""" +536 63 dataset """kinships""" +536 63 model """rescal""" +536 63 loss """softplus""" +536 63 regularizer """no""" +536 63 optimizer """adam""" +536 63 training_loop """lcwa""" +536 63 evaluator """rankbased""" +536 64 dataset """kinships""" +536 64 model """rescal""" +536 64 loss """softplus""" +536 64 regularizer """no""" +536 64 optimizer """adam""" +536 64 training_loop """lcwa""" +536 64 evaluator """rankbased""" +536 65 dataset """kinships""" +536 65 model """rescal""" +536 65 loss """softplus""" +536 65 regularizer """no""" +536 65 optimizer """adam""" +536 65 training_loop """lcwa""" +536 65 evaluator """rankbased""" +536 66 dataset """kinships""" +536 66 model """rescal""" +536 66 loss """softplus""" +536 66 regularizer """no""" +536 66 optimizer """adam""" +536 66 training_loop """lcwa""" +536 66 evaluator """rankbased""" +536 67 dataset """kinships""" +536 67 model """rescal""" +536 67 loss """softplus""" +536 67 regularizer """no""" +536 67 optimizer """adam""" +536 67 training_loop """lcwa""" +536 67 evaluator """rankbased""" +536 68 dataset """kinships""" +536 68 model """rescal""" +536 68 loss """softplus""" +536 68 regularizer """no""" +536 68 optimizer """adam""" +536 68 training_loop """lcwa""" +536 68 evaluator """rankbased""" +536 69 dataset """kinships""" +536 69 model """rescal""" +536 69 loss """softplus""" +536 69 regularizer """no""" +536 69 optimizer """adam""" +536 69 training_loop """lcwa""" +536 69 evaluator """rankbased""" +536 70 dataset """kinships""" +536 70 model """rescal""" +536 70 loss """softplus""" +536 70 regularizer """no""" +536 70 optimizer """adam""" +536 70 training_loop """lcwa""" +536 70 evaluator """rankbased""" +536 71 dataset """kinships""" +536 71 model """rescal""" +536 71 loss """softplus""" +536 71 regularizer """no""" +536 71 optimizer """adam""" +536 71 training_loop """lcwa""" +536 71 evaluator """rankbased""" +536 72 dataset """kinships""" +536 72 model """rescal""" +536 72 loss """softplus""" +536 72 regularizer """no""" +536 72 optimizer """adam""" +536 72 training_loop """lcwa""" +536 72 evaluator """rankbased""" +536 73 dataset """kinships""" +536 73 model """rescal""" +536 73 loss """softplus""" +536 73 regularizer """no""" +536 73 optimizer """adam""" +536 73 training_loop """lcwa""" +536 73 evaluator """rankbased""" +536 74 dataset """kinships""" +536 74 model """rescal""" +536 74 loss """softplus""" +536 74 regularizer """no""" +536 74 optimizer """adam""" +536 74 training_loop """lcwa""" +536 74 evaluator """rankbased""" +536 75 dataset """kinships""" +536 75 model """rescal""" +536 75 loss """softplus""" +536 75 regularizer """no""" +536 75 optimizer """adam""" +536 75 training_loop """lcwa""" +536 75 evaluator """rankbased""" +536 76 dataset """kinships""" +536 76 model """rescal""" +536 76 loss """softplus""" +536 76 regularizer """no""" +536 76 optimizer """adam""" +536 76 training_loop """lcwa""" +536 76 evaluator """rankbased""" +536 77 dataset """kinships""" +536 77 model """rescal""" +536 77 loss """softplus""" +536 77 regularizer """no""" +536 77 optimizer """adam""" +536 77 training_loop """lcwa""" +536 77 evaluator """rankbased""" +536 78 dataset """kinships""" +536 78 model """rescal""" +536 78 loss """softplus""" +536 78 regularizer """no""" +536 78 optimizer """adam""" +536 78 training_loop """lcwa""" +536 78 evaluator """rankbased""" +536 79 dataset """kinships""" +536 79 model """rescal""" +536 79 loss """softplus""" +536 79 regularizer """no""" +536 79 optimizer """adam""" +536 79 training_loop """lcwa""" +536 79 evaluator """rankbased""" +536 80 dataset """kinships""" +536 80 model """rescal""" +536 80 loss """softplus""" +536 80 regularizer """no""" +536 80 optimizer """adam""" +536 80 training_loop """lcwa""" +536 80 evaluator """rankbased""" +536 81 dataset """kinships""" +536 81 model """rescal""" +536 81 loss """softplus""" +536 81 regularizer """no""" +536 81 optimizer """adam""" +536 81 training_loop """lcwa""" +536 81 evaluator """rankbased""" +536 82 dataset """kinships""" +536 82 model """rescal""" +536 82 loss """softplus""" +536 82 regularizer """no""" +536 82 optimizer """adam""" +536 82 training_loop """lcwa""" +536 82 evaluator """rankbased""" +536 83 dataset """kinships""" +536 83 model """rescal""" +536 83 loss """softplus""" +536 83 regularizer """no""" +536 83 optimizer """adam""" +536 83 training_loop """lcwa""" +536 83 evaluator """rankbased""" +536 84 dataset """kinships""" +536 84 model """rescal""" +536 84 loss """softplus""" +536 84 regularizer """no""" +536 84 optimizer """adam""" +536 84 training_loop """lcwa""" +536 84 evaluator """rankbased""" +536 85 dataset """kinships""" +536 85 model """rescal""" +536 85 loss """softplus""" +536 85 regularizer """no""" +536 85 optimizer """adam""" +536 85 training_loop """lcwa""" +536 85 evaluator """rankbased""" +536 86 dataset """kinships""" +536 86 model """rescal""" +536 86 loss """softplus""" +536 86 regularizer """no""" +536 86 optimizer """adam""" +536 86 training_loop """lcwa""" +536 86 evaluator """rankbased""" +536 87 dataset """kinships""" +536 87 model """rescal""" +536 87 loss """softplus""" +536 87 regularizer """no""" +536 87 optimizer """adam""" +536 87 training_loop """lcwa""" +536 87 evaluator """rankbased""" +536 88 dataset """kinships""" +536 88 model """rescal""" +536 88 loss """softplus""" +536 88 regularizer """no""" +536 88 optimizer """adam""" +536 88 training_loop """lcwa""" +536 88 evaluator """rankbased""" +536 89 dataset """kinships""" +536 89 model """rescal""" +536 89 loss """softplus""" +536 89 regularizer """no""" +536 89 optimizer """adam""" +536 89 training_loop """lcwa""" +536 89 evaluator """rankbased""" +536 90 dataset """kinships""" +536 90 model """rescal""" +536 90 loss """softplus""" +536 90 regularizer """no""" +536 90 optimizer """adam""" +536 90 training_loop """lcwa""" +536 90 evaluator """rankbased""" +536 91 dataset """kinships""" +536 91 model """rescal""" +536 91 loss """softplus""" +536 91 regularizer """no""" +536 91 optimizer """adam""" +536 91 training_loop """lcwa""" +536 91 evaluator """rankbased""" +536 92 dataset """kinships""" +536 92 model """rescal""" +536 92 loss """softplus""" +536 92 regularizer """no""" +536 92 optimizer """adam""" +536 92 training_loop """lcwa""" +536 92 evaluator """rankbased""" +536 93 dataset """kinships""" +536 93 model """rescal""" +536 93 loss """softplus""" +536 93 regularizer """no""" +536 93 optimizer """adam""" +536 93 training_loop """lcwa""" +536 93 evaluator """rankbased""" +536 94 dataset """kinships""" +536 94 model """rescal""" +536 94 loss """softplus""" +536 94 regularizer """no""" +536 94 optimizer """adam""" +536 94 training_loop """lcwa""" +536 94 evaluator """rankbased""" +536 95 dataset """kinships""" +536 95 model """rescal""" +536 95 loss """softplus""" +536 95 regularizer """no""" +536 95 optimizer """adam""" +536 95 training_loop """lcwa""" +536 95 evaluator """rankbased""" +536 96 dataset """kinships""" +536 96 model """rescal""" +536 96 loss """softplus""" +536 96 regularizer """no""" +536 96 optimizer """adam""" +536 96 training_loop """lcwa""" +536 96 evaluator """rankbased""" +536 97 dataset """kinships""" +536 97 model """rescal""" +536 97 loss """softplus""" +536 97 regularizer """no""" +536 97 optimizer """adam""" +536 97 training_loop """lcwa""" +536 97 evaluator """rankbased""" +536 98 dataset """kinships""" +536 98 model """rescal""" +536 98 loss """softplus""" +536 98 regularizer """no""" +536 98 optimizer """adam""" +536 98 training_loop """lcwa""" +536 98 evaluator """rankbased""" +536 99 dataset """kinships""" +536 99 model """rescal""" +536 99 loss """softplus""" +536 99 regularizer """no""" +536 99 optimizer """adam""" +536 99 training_loop """lcwa""" +536 99 evaluator """rankbased""" +536 100 dataset """kinships""" +536 100 model """rescal""" +536 100 loss """softplus""" +536 100 regularizer """no""" +536 100 optimizer """adam""" +536 100 training_loop """lcwa""" +536 100 evaluator """rankbased""" +537 1 model.embedding_dim 1.0 +537 1 optimizer.lr 0.001532348637167663 +537 1 training.batch_size 1.0 +537 1 training.label_smoothing 0.0017341188172489263 +537 2 model.embedding_dim 0.0 +537 2 optimizer.lr 0.047496172515501534 +537 2 training.batch_size 1.0 +537 2 training.label_smoothing 0.04007192600827425 +537 3 model.embedding_dim 2.0 +537 3 optimizer.lr 0.0022301401273275416 +537 3 training.batch_size 0.0 +537 3 training.label_smoothing 0.057034329231490884 +537 4 model.embedding_dim 0.0 +537 4 optimizer.lr 0.013098249705070113 +537 4 training.batch_size 1.0 +537 4 training.label_smoothing 0.10161265687728405 +537 5 model.embedding_dim 1.0 +537 5 optimizer.lr 0.0014773090782110308 +537 5 training.batch_size 1.0 +537 5 training.label_smoothing 0.033215121542838406 +537 6 model.embedding_dim 1.0 +537 6 optimizer.lr 0.032179077078071196 +537 6 training.batch_size 0.0 +537 6 training.label_smoothing 0.2891542867245639 +537 7 model.embedding_dim 1.0 +537 7 optimizer.lr 0.024730542122412012 +537 7 training.batch_size 2.0 +537 7 training.label_smoothing 0.0349945659703861 +537 8 model.embedding_dim 1.0 +537 8 optimizer.lr 0.0014240375578396041 +537 8 training.batch_size 1.0 +537 8 training.label_smoothing 0.008976927826823267 +537 9 model.embedding_dim 2.0 +537 9 optimizer.lr 0.004393757148136735 +537 9 training.batch_size 1.0 +537 9 training.label_smoothing 0.19637304832558788 +537 10 model.embedding_dim 2.0 +537 10 optimizer.lr 0.0034755675965004564 +537 10 training.batch_size 0.0 +537 10 training.label_smoothing 0.22309586553642458 +537 11 model.embedding_dim 2.0 +537 11 optimizer.lr 0.0053213519039184455 +537 11 training.batch_size 0.0 +537 11 training.label_smoothing 0.0021603232727880986 +537 12 model.embedding_dim 0.0 +537 12 optimizer.lr 0.029706463173524794 +537 12 training.batch_size 2.0 +537 12 training.label_smoothing 0.0012272638478933544 +537 13 model.embedding_dim 0.0 +537 13 optimizer.lr 0.034584369211790485 +537 13 training.batch_size 2.0 +537 13 training.label_smoothing 0.737206804322989 +537 14 model.embedding_dim 1.0 +537 14 optimizer.lr 0.021664872108883324 +537 14 training.batch_size 0.0 +537 14 training.label_smoothing 0.0022350441967188455 +537 15 model.embedding_dim 0.0 +537 15 optimizer.lr 0.0029298618472352214 +537 15 training.batch_size 2.0 +537 15 training.label_smoothing 0.0638533462792371 +537 16 model.embedding_dim 2.0 +537 16 optimizer.lr 0.09476733250414746 +537 16 training.batch_size 2.0 +537 16 training.label_smoothing 0.3547843263464019 +537 17 model.embedding_dim 0.0 +537 17 optimizer.lr 0.0521848786955505 +537 17 training.batch_size 2.0 +537 17 training.label_smoothing 0.04369202422744232 +537 18 model.embedding_dim 2.0 +537 18 optimizer.lr 0.0024962600827121914 +537 18 training.batch_size 2.0 +537 18 training.label_smoothing 0.047695106925357326 +537 19 model.embedding_dim 2.0 +537 19 optimizer.lr 0.0016968337056237882 +537 19 training.batch_size 0.0 +537 19 training.label_smoothing 0.009333721512306659 +537 20 model.embedding_dim 0.0 +537 20 optimizer.lr 0.0587106226394028 +537 20 training.batch_size 1.0 +537 20 training.label_smoothing 0.0012548196678984068 +537 21 model.embedding_dim 0.0 +537 21 optimizer.lr 0.0038312814237978846 +537 21 training.batch_size 2.0 +537 21 training.label_smoothing 0.016842542768234582 +537 22 model.embedding_dim 1.0 +537 22 optimizer.lr 0.002042523879046654 +537 22 training.batch_size 0.0 +537 22 training.label_smoothing 0.9965045277163433 +537 23 model.embedding_dim 1.0 +537 23 optimizer.lr 0.0036987213522972585 +537 23 training.batch_size 1.0 +537 23 training.label_smoothing 0.0029752444172260395 +537 24 model.embedding_dim 0.0 +537 24 optimizer.lr 0.00796209685679076 +537 24 training.batch_size 1.0 +537 24 training.label_smoothing 0.007376888116675361 +537 25 model.embedding_dim 0.0 +537 25 optimizer.lr 0.02022296163927475 +537 25 training.batch_size 2.0 +537 25 training.label_smoothing 0.001911661930381586 +537 26 model.embedding_dim 1.0 +537 26 optimizer.lr 0.06764867813758065 +537 26 training.batch_size 1.0 +537 26 training.label_smoothing 0.012227302667063608 +537 27 model.embedding_dim 2.0 +537 27 optimizer.lr 0.02845424878339421 +537 27 training.batch_size 0.0 +537 27 training.label_smoothing 0.14213320499119966 +537 28 model.embedding_dim 0.0 +537 28 optimizer.lr 0.025955150696613364 +537 28 training.batch_size 0.0 +537 28 training.label_smoothing 0.0012551903911150212 +537 29 model.embedding_dim 1.0 +537 29 optimizer.lr 0.0027946287101178525 +537 29 training.batch_size 2.0 +537 29 training.label_smoothing 0.001043293021496622 +537 30 model.embedding_dim 2.0 +537 30 optimizer.lr 0.0014645607731869792 +537 30 training.batch_size 2.0 +537 30 training.label_smoothing 0.0026827399904271595 +537 31 model.embedding_dim 0.0 +537 31 optimizer.lr 0.0012876259159666943 +537 31 training.batch_size 1.0 +537 31 training.label_smoothing 0.022262659053496354 +537 32 model.embedding_dim 1.0 +537 32 optimizer.lr 0.001609801629217417 +537 32 training.batch_size 2.0 +537 32 training.label_smoothing 0.10458167049736268 +537 33 model.embedding_dim 2.0 +537 33 optimizer.lr 0.007281992642037993 +537 33 training.batch_size 1.0 +537 33 training.label_smoothing 0.7833199953170377 +537 34 model.embedding_dim 0.0 +537 34 optimizer.lr 0.0037033945725574537 +537 34 training.batch_size 1.0 +537 34 training.label_smoothing 0.03314272426276942 +537 35 model.embedding_dim 0.0 +537 35 optimizer.lr 0.03649422244367715 +537 35 training.batch_size 2.0 +537 35 training.label_smoothing 0.0035127523678353885 +537 36 model.embedding_dim 1.0 +537 36 optimizer.lr 0.029307493624025464 +537 36 training.batch_size 0.0 +537 36 training.label_smoothing 0.7946861676460306 +537 37 model.embedding_dim 0.0 +537 37 optimizer.lr 0.0026753192006250034 +537 37 training.batch_size 1.0 +537 37 training.label_smoothing 0.03129133479600505 +537 38 model.embedding_dim 1.0 +537 38 optimizer.lr 0.002315390017985454 +537 38 training.batch_size 0.0 +537 38 training.label_smoothing 0.12244610343743413 +537 39 model.embedding_dim 2.0 +537 39 optimizer.lr 0.030380280227628235 +537 39 training.batch_size 2.0 +537 39 training.label_smoothing 0.0034414945914623004 +537 40 model.embedding_dim 0.0 +537 40 optimizer.lr 0.010193608149968955 +537 40 training.batch_size 0.0 +537 40 training.label_smoothing 0.0787447368949364 +537 41 model.embedding_dim 0.0 +537 41 optimizer.lr 0.006632837134811688 +537 41 training.batch_size 1.0 +537 41 training.label_smoothing 0.053573710379851903 +537 42 model.embedding_dim 2.0 +537 42 optimizer.lr 0.0017820589460968439 +537 42 training.batch_size 2.0 +537 42 training.label_smoothing 0.001170476136608433 +537 43 model.embedding_dim 0.0 +537 43 optimizer.lr 0.062209165885743344 +537 43 training.batch_size 0.0 +537 43 training.label_smoothing 0.08739342895445264 +537 44 model.embedding_dim 0.0 +537 44 optimizer.lr 0.0016988837158859625 +537 44 training.batch_size 2.0 +537 44 training.label_smoothing 0.001022228261395463 +537 45 model.embedding_dim 1.0 +537 45 optimizer.lr 0.0028363724125832273 +537 45 training.batch_size 2.0 +537 45 training.label_smoothing 0.03394149884382905 +537 46 model.embedding_dim 2.0 +537 46 optimizer.lr 0.004059588514937274 +537 46 training.batch_size 1.0 +537 46 training.label_smoothing 0.0011993768033683213 +537 47 model.embedding_dim 1.0 +537 47 optimizer.lr 0.0010566589809537813 +537 47 training.batch_size 0.0 +537 47 training.label_smoothing 0.002380398787961279 +537 48 model.embedding_dim 2.0 +537 48 optimizer.lr 0.0056151759990220144 +537 48 training.batch_size 0.0 +537 48 training.label_smoothing 0.2170690287711703 +537 49 model.embedding_dim 2.0 +537 49 optimizer.lr 0.08744058198588868 +537 49 training.batch_size 1.0 +537 49 training.label_smoothing 0.03571453942128159 +537 50 model.embedding_dim 1.0 +537 50 optimizer.lr 0.0029953415255966572 +537 50 training.batch_size 1.0 +537 50 training.label_smoothing 0.03926418961236267 +537 51 model.embedding_dim 1.0 +537 51 optimizer.lr 0.09849026903422685 +537 51 training.batch_size 0.0 +537 51 training.label_smoothing 0.23362161668838596 +537 52 model.embedding_dim 0.0 +537 52 optimizer.lr 0.035247952403095945 +537 52 training.batch_size 1.0 +537 52 training.label_smoothing 0.003139711797764473 +537 53 model.embedding_dim 1.0 +537 53 optimizer.lr 0.015046649908066116 +537 53 training.batch_size 0.0 +537 53 training.label_smoothing 0.0074956125557849975 +537 54 model.embedding_dim 2.0 +537 54 optimizer.lr 0.003359497869535337 +537 54 training.batch_size 0.0 +537 54 training.label_smoothing 0.005545722972625362 +537 55 model.embedding_dim 0.0 +537 55 optimizer.lr 0.008624442036119442 +537 55 training.batch_size 1.0 +537 55 training.label_smoothing 0.0044846258245572 +537 56 model.embedding_dim 2.0 +537 56 optimizer.lr 0.045310452072480245 +537 56 training.batch_size 1.0 +537 56 training.label_smoothing 0.02054754063878136 +537 57 model.embedding_dim 1.0 +537 57 optimizer.lr 0.02755503542678474 +537 57 training.batch_size 0.0 +537 57 training.label_smoothing 0.610528454299686 +537 58 model.embedding_dim 2.0 +537 58 optimizer.lr 0.0020975234630635545 +537 58 training.batch_size 1.0 +537 58 training.label_smoothing 0.3958037949290221 +537 59 model.embedding_dim 0.0 +537 59 optimizer.lr 0.007830711692586607 +537 59 training.batch_size 2.0 +537 59 training.label_smoothing 0.22538805843696547 +537 60 model.embedding_dim 0.0 +537 60 optimizer.lr 0.0043393040004243745 +537 60 training.batch_size 2.0 +537 60 training.label_smoothing 0.003377603076000989 +537 61 model.embedding_dim 1.0 +537 61 optimizer.lr 0.012991097120884466 +537 61 training.batch_size 1.0 +537 61 training.label_smoothing 0.0010076886420387044 +537 62 model.embedding_dim 0.0 +537 62 optimizer.lr 0.0818805402426129 +537 62 training.batch_size 0.0 +537 62 training.label_smoothing 0.021199367668739467 +537 63 model.embedding_dim 0.0 +537 63 optimizer.lr 0.001700592088745538 +537 63 training.batch_size 1.0 +537 63 training.label_smoothing 0.17261023336960837 +537 64 model.embedding_dim 1.0 +537 64 optimizer.lr 0.06095486134070562 +537 64 training.batch_size 1.0 +537 64 training.label_smoothing 0.00473073333366634 +537 65 model.embedding_dim 1.0 +537 65 optimizer.lr 0.010715150739189647 +537 65 training.batch_size 0.0 +537 65 training.label_smoothing 0.0025462989165881402 +537 66 model.embedding_dim 1.0 +537 66 optimizer.lr 0.00600178466099437 +537 66 training.batch_size 0.0 +537 66 training.label_smoothing 0.0708530376759968 +537 67 model.embedding_dim 1.0 +537 67 optimizer.lr 0.0034196561214661384 +537 67 training.batch_size 1.0 +537 67 training.label_smoothing 0.05672524776648614 +537 68 model.embedding_dim 1.0 +537 68 optimizer.lr 0.016460671003936684 +537 68 training.batch_size 2.0 +537 68 training.label_smoothing 0.01525147565927921 +537 69 model.embedding_dim 0.0 +537 69 optimizer.lr 0.0052832520683026764 +537 69 training.batch_size 0.0 +537 69 training.label_smoothing 0.01982577072026388 +537 70 model.embedding_dim 0.0 +537 70 optimizer.lr 0.0513437237556511 +537 70 training.batch_size 1.0 +537 70 training.label_smoothing 0.28066230247760904 +537 71 model.embedding_dim 2.0 +537 71 optimizer.lr 0.001249718884894952 +537 71 training.batch_size 0.0 +537 71 training.label_smoothing 0.005881381710018801 +537 72 model.embedding_dim 2.0 +537 72 optimizer.lr 0.05867656816784473 +537 72 training.batch_size 2.0 +537 72 training.label_smoothing 0.10166638641407212 +537 73 model.embedding_dim 0.0 +537 73 optimizer.lr 0.027323241385085836 +537 73 training.batch_size 1.0 +537 73 training.label_smoothing 0.06619359611966519 +537 74 model.embedding_dim 2.0 +537 74 optimizer.lr 0.037908503237844095 +537 74 training.batch_size 2.0 +537 74 training.label_smoothing 0.0036710252572173478 +537 75 model.embedding_dim 0.0 +537 75 optimizer.lr 0.005976131336188887 +537 75 training.batch_size 1.0 +537 75 training.label_smoothing 0.03950859254009897 +537 76 model.embedding_dim 0.0 +537 76 optimizer.lr 0.018816814584697982 +537 76 training.batch_size 0.0 +537 76 training.label_smoothing 0.6203041738294368 +537 77 model.embedding_dim 2.0 +537 77 optimizer.lr 0.04278078437906788 +537 77 training.batch_size 1.0 +537 77 training.label_smoothing 0.0013864142527087158 +537 78 model.embedding_dim 2.0 +537 78 optimizer.lr 0.0023677836555098227 +537 78 training.batch_size 1.0 +537 78 training.label_smoothing 0.001359815981755894 +537 79 model.embedding_dim 0.0 +537 79 optimizer.lr 0.05084385583984757 +537 79 training.batch_size 1.0 +537 79 training.label_smoothing 0.6317278682209407 +537 80 model.embedding_dim 0.0 +537 80 optimizer.lr 0.0011176998573344595 +537 80 training.batch_size 1.0 +537 80 training.label_smoothing 0.8737217732589447 +537 81 model.embedding_dim 2.0 +537 81 optimizer.lr 0.02031595258683604 +537 81 training.batch_size 0.0 +537 81 training.label_smoothing 0.058826631523924186 +537 82 model.embedding_dim 1.0 +537 82 optimizer.lr 0.00277115505197314 +537 82 training.batch_size 0.0 +537 82 training.label_smoothing 0.0014938380429871405 +537 83 model.embedding_dim 2.0 +537 83 optimizer.lr 0.033015639259170844 +537 83 training.batch_size 2.0 +537 83 training.label_smoothing 0.0023412754505932453 +537 84 model.embedding_dim 1.0 +537 84 optimizer.lr 0.06993186984305665 +537 84 training.batch_size 2.0 +537 84 training.label_smoothing 0.008653490433573567 +537 85 model.embedding_dim 1.0 +537 85 optimizer.lr 0.03525693227064536 +537 85 training.batch_size 1.0 +537 85 training.label_smoothing 0.1783464072750346 +537 86 model.embedding_dim 1.0 +537 86 optimizer.lr 0.0917623934280304 +537 86 training.batch_size 0.0 +537 86 training.label_smoothing 0.003771038969697372 +537 87 model.embedding_dim 1.0 +537 87 optimizer.lr 0.0064159288363936475 +537 87 training.batch_size 0.0 +537 87 training.label_smoothing 0.0424582087368488 +537 88 model.embedding_dim 2.0 +537 88 optimizer.lr 0.0503572534708636 +537 88 training.batch_size 2.0 +537 88 training.label_smoothing 0.04284645626813474 +537 89 model.embedding_dim 2.0 +537 89 optimizer.lr 0.002168249689097942 +537 89 training.batch_size 0.0 +537 89 training.label_smoothing 0.28179805093816473 +537 90 model.embedding_dim 2.0 +537 90 optimizer.lr 0.008846955666282715 +537 90 training.batch_size 2.0 +537 90 training.label_smoothing 0.14434347217988325 +537 91 model.embedding_dim 0.0 +537 91 optimizer.lr 0.05237620298571108 +537 91 training.batch_size 2.0 +537 91 training.label_smoothing 0.0011826681211544 +537 92 model.embedding_dim 2.0 +537 92 optimizer.lr 0.03589403765922952 +537 92 training.batch_size 1.0 +537 92 training.label_smoothing 0.3173312408985918 +537 93 model.embedding_dim 1.0 +537 93 optimizer.lr 0.03417414458988101 +537 93 training.batch_size 0.0 +537 93 training.label_smoothing 0.06077628691888448 +537 94 model.embedding_dim 0.0 +537 94 optimizer.lr 0.02463358471245089 +537 94 training.batch_size 0.0 +537 94 training.label_smoothing 0.02119285309229675 +537 95 model.embedding_dim 1.0 +537 95 optimizer.lr 0.007371105922417158 +537 95 training.batch_size 0.0 +537 95 training.label_smoothing 0.009887380863789033 +537 96 model.embedding_dim 2.0 +537 96 optimizer.lr 0.02090853035114602 +537 96 training.batch_size 0.0 +537 96 training.label_smoothing 0.6395919359876647 +537 97 model.embedding_dim 1.0 +537 97 optimizer.lr 0.011509546874197253 +537 97 training.batch_size 2.0 +537 97 training.label_smoothing 0.0023063385204940335 +537 98 model.embedding_dim 1.0 +537 98 optimizer.lr 0.003151539422166404 +537 98 training.batch_size 2.0 +537 98 training.label_smoothing 0.002608175479571708 +537 99 model.embedding_dim 0.0 +537 99 optimizer.lr 0.006196144499142878 +537 99 training.batch_size 0.0 +537 99 training.label_smoothing 0.4846285695168163 +537 100 model.embedding_dim 0.0 +537 100 optimizer.lr 0.03919922416651981 +537 100 training.batch_size 2.0 +537 100 training.label_smoothing 0.006662848120646227 +537 1 dataset """kinships""" +537 1 model """rescal""" +537 1 loss """crossentropy""" +537 1 regularizer """no""" +537 1 optimizer """adam""" +537 1 training_loop """lcwa""" +537 1 evaluator """rankbased""" +537 2 dataset """kinships""" +537 2 model """rescal""" +537 2 loss """crossentropy""" +537 2 regularizer """no""" +537 2 optimizer """adam""" +537 2 training_loop """lcwa""" +537 2 evaluator """rankbased""" +537 3 dataset """kinships""" +537 3 model """rescal""" +537 3 loss """crossentropy""" +537 3 regularizer """no""" +537 3 optimizer """adam""" +537 3 training_loop """lcwa""" +537 3 evaluator """rankbased""" +537 4 dataset """kinships""" +537 4 model """rescal""" +537 4 loss """crossentropy""" +537 4 regularizer """no""" +537 4 optimizer """adam""" +537 4 training_loop """lcwa""" +537 4 evaluator """rankbased""" +537 5 dataset """kinships""" +537 5 model """rescal""" +537 5 loss """crossentropy""" +537 5 regularizer """no""" +537 5 optimizer """adam""" +537 5 training_loop """lcwa""" +537 5 evaluator """rankbased""" +537 6 dataset """kinships""" +537 6 model """rescal""" +537 6 loss """crossentropy""" +537 6 regularizer """no""" +537 6 optimizer """adam""" +537 6 training_loop """lcwa""" +537 6 evaluator """rankbased""" +537 7 dataset """kinships""" +537 7 model """rescal""" +537 7 loss """crossentropy""" +537 7 regularizer """no""" +537 7 optimizer """adam""" +537 7 training_loop """lcwa""" +537 7 evaluator """rankbased""" +537 8 dataset """kinships""" +537 8 model """rescal""" +537 8 loss """crossentropy""" +537 8 regularizer """no""" +537 8 optimizer """adam""" +537 8 training_loop """lcwa""" +537 8 evaluator """rankbased""" +537 9 dataset """kinships""" +537 9 model """rescal""" +537 9 loss """crossentropy""" +537 9 regularizer """no""" +537 9 optimizer """adam""" +537 9 training_loop """lcwa""" +537 9 evaluator """rankbased""" +537 10 dataset """kinships""" +537 10 model """rescal""" +537 10 loss """crossentropy""" +537 10 regularizer """no""" +537 10 optimizer """adam""" +537 10 training_loop """lcwa""" +537 10 evaluator """rankbased""" +537 11 dataset """kinships""" +537 11 model """rescal""" +537 11 loss """crossentropy""" +537 11 regularizer """no""" +537 11 optimizer """adam""" +537 11 training_loop """lcwa""" +537 11 evaluator """rankbased""" +537 12 dataset """kinships""" +537 12 model """rescal""" +537 12 loss """crossentropy""" +537 12 regularizer """no""" +537 12 optimizer """adam""" +537 12 training_loop """lcwa""" +537 12 evaluator """rankbased""" +537 13 dataset """kinships""" +537 13 model """rescal""" +537 13 loss """crossentropy""" +537 13 regularizer """no""" +537 13 optimizer """adam""" +537 13 training_loop """lcwa""" +537 13 evaluator """rankbased""" +537 14 dataset """kinships""" +537 14 model """rescal""" +537 14 loss """crossentropy""" +537 14 regularizer """no""" +537 14 optimizer """adam""" +537 14 training_loop """lcwa""" +537 14 evaluator """rankbased""" +537 15 dataset """kinships""" +537 15 model """rescal""" +537 15 loss """crossentropy""" +537 15 regularizer """no""" +537 15 optimizer """adam""" +537 15 training_loop """lcwa""" +537 15 evaluator """rankbased""" +537 16 dataset """kinships""" +537 16 model """rescal""" +537 16 loss """crossentropy""" +537 16 regularizer """no""" +537 16 optimizer """adam""" +537 16 training_loop """lcwa""" +537 16 evaluator """rankbased""" +537 17 dataset """kinships""" +537 17 model """rescal""" +537 17 loss """crossentropy""" +537 17 regularizer """no""" +537 17 optimizer """adam""" +537 17 training_loop """lcwa""" +537 17 evaluator """rankbased""" +537 18 dataset """kinships""" +537 18 model """rescal""" +537 18 loss """crossentropy""" +537 18 regularizer """no""" +537 18 optimizer """adam""" +537 18 training_loop """lcwa""" +537 18 evaluator """rankbased""" +537 19 dataset """kinships""" +537 19 model """rescal""" +537 19 loss """crossentropy""" +537 19 regularizer """no""" +537 19 optimizer """adam""" +537 19 training_loop """lcwa""" +537 19 evaluator """rankbased""" +537 20 dataset """kinships""" +537 20 model """rescal""" +537 20 loss """crossentropy""" +537 20 regularizer """no""" +537 20 optimizer """adam""" +537 20 training_loop """lcwa""" +537 20 evaluator """rankbased""" +537 21 dataset """kinships""" +537 21 model """rescal""" +537 21 loss """crossentropy""" +537 21 regularizer """no""" +537 21 optimizer """adam""" +537 21 training_loop """lcwa""" +537 21 evaluator """rankbased""" +537 22 dataset """kinships""" +537 22 model """rescal""" +537 22 loss """crossentropy""" +537 22 regularizer """no""" +537 22 optimizer """adam""" +537 22 training_loop """lcwa""" +537 22 evaluator """rankbased""" +537 23 dataset """kinships""" +537 23 model """rescal""" +537 23 loss """crossentropy""" +537 23 regularizer """no""" +537 23 optimizer """adam""" +537 23 training_loop """lcwa""" +537 23 evaluator """rankbased""" +537 24 dataset """kinships""" +537 24 model """rescal""" +537 24 loss """crossentropy""" +537 24 regularizer """no""" +537 24 optimizer """adam""" +537 24 training_loop """lcwa""" +537 24 evaluator """rankbased""" +537 25 dataset """kinships""" +537 25 model """rescal""" +537 25 loss """crossentropy""" +537 25 regularizer """no""" +537 25 optimizer """adam""" +537 25 training_loop """lcwa""" +537 25 evaluator """rankbased""" +537 26 dataset """kinships""" +537 26 model """rescal""" +537 26 loss """crossentropy""" +537 26 regularizer """no""" +537 26 optimizer """adam""" +537 26 training_loop """lcwa""" +537 26 evaluator """rankbased""" +537 27 dataset """kinships""" +537 27 model """rescal""" +537 27 loss """crossentropy""" +537 27 regularizer """no""" +537 27 optimizer """adam""" +537 27 training_loop """lcwa""" +537 27 evaluator """rankbased""" +537 28 dataset """kinships""" +537 28 model """rescal""" +537 28 loss """crossentropy""" +537 28 regularizer """no""" +537 28 optimizer """adam""" +537 28 training_loop """lcwa""" +537 28 evaluator """rankbased""" +537 29 dataset """kinships""" +537 29 model """rescal""" +537 29 loss """crossentropy""" +537 29 regularizer """no""" +537 29 optimizer """adam""" +537 29 training_loop """lcwa""" +537 29 evaluator """rankbased""" +537 30 dataset """kinships""" +537 30 model """rescal""" +537 30 loss """crossentropy""" +537 30 regularizer """no""" +537 30 optimizer """adam""" +537 30 training_loop """lcwa""" +537 30 evaluator """rankbased""" +537 31 dataset """kinships""" +537 31 model """rescal""" +537 31 loss """crossentropy""" +537 31 regularizer """no""" +537 31 optimizer """adam""" +537 31 training_loop """lcwa""" +537 31 evaluator """rankbased""" +537 32 dataset """kinships""" +537 32 model """rescal""" +537 32 loss """crossentropy""" +537 32 regularizer """no""" +537 32 optimizer """adam""" +537 32 training_loop """lcwa""" +537 32 evaluator """rankbased""" +537 33 dataset """kinships""" +537 33 model """rescal""" +537 33 loss """crossentropy""" +537 33 regularizer """no""" +537 33 optimizer """adam""" +537 33 training_loop """lcwa""" +537 33 evaluator """rankbased""" +537 34 dataset """kinships""" +537 34 model """rescal""" +537 34 loss """crossentropy""" +537 34 regularizer """no""" +537 34 optimizer """adam""" +537 34 training_loop """lcwa""" +537 34 evaluator """rankbased""" +537 35 dataset """kinships""" +537 35 model """rescal""" +537 35 loss """crossentropy""" +537 35 regularizer """no""" +537 35 optimizer """adam""" +537 35 training_loop """lcwa""" +537 35 evaluator """rankbased""" +537 36 dataset """kinships""" +537 36 model """rescal""" +537 36 loss """crossentropy""" +537 36 regularizer """no""" +537 36 optimizer """adam""" +537 36 training_loop """lcwa""" +537 36 evaluator """rankbased""" +537 37 dataset """kinships""" +537 37 model """rescal""" +537 37 loss """crossentropy""" +537 37 regularizer """no""" +537 37 optimizer """adam""" +537 37 training_loop """lcwa""" +537 37 evaluator """rankbased""" +537 38 dataset """kinships""" +537 38 model """rescal""" +537 38 loss """crossentropy""" +537 38 regularizer """no""" +537 38 optimizer """adam""" +537 38 training_loop """lcwa""" +537 38 evaluator """rankbased""" +537 39 dataset """kinships""" +537 39 model """rescal""" +537 39 loss """crossentropy""" +537 39 regularizer """no""" +537 39 optimizer """adam""" +537 39 training_loop """lcwa""" +537 39 evaluator """rankbased""" +537 40 dataset """kinships""" +537 40 model """rescal""" +537 40 loss """crossentropy""" +537 40 regularizer """no""" +537 40 optimizer """adam""" +537 40 training_loop """lcwa""" +537 40 evaluator """rankbased""" +537 41 dataset """kinships""" +537 41 model """rescal""" +537 41 loss """crossentropy""" +537 41 regularizer """no""" +537 41 optimizer """adam""" +537 41 training_loop """lcwa""" +537 41 evaluator """rankbased""" +537 42 dataset """kinships""" +537 42 model """rescal""" +537 42 loss """crossentropy""" +537 42 regularizer """no""" +537 42 optimizer """adam""" +537 42 training_loop """lcwa""" +537 42 evaluator """rankbased""" +537 43 dataset """kinships""" +537 43 model """rescal""" +537 43 loss """crossentropy""" +537 43 regularizer """no""" +537 43 optimizer """adam""" +537 43 training_loop """lcwa""" +537 43 evaluator """rankbased""" +537 44 dataset """kinships""" +537 44 model """rescal""" +537 44 loss """crossentropy""" +537 44 regularizer """no""" +537 44 optimizer """adam""" +537 44 training_loop """lcwa""" +537 44 evaluator """rankbased""" +537 45 dataset """kinships""" +537 45 model """rescal""" +537 45 loss """crossentropy""" +537 45 regularizer """no""" +537 45 optimizer """adam""" +537 45 training_loop """lcwa""" +537 45 evaluator """rankbased""" +537 46 dataset """kinships""" +537 46 model """rescal""" +537 46 loss """crossentropy""" +537 46 regularizer """no""" +537 46 optimizer """adam""" +537 46 training_loop """lcwa""" +537 46 evaluator """rankbased""" +537 47 dataset """kinships""" +537 47 model """rescal""" +537 47 loss """crossentropy""" +537 47 regularizer """no""" +537 47 optimizer """adam""" +537 47 training_loop """lcwa""" +537 47 evaluator """rankbased""" +537 48 dataset """kinships""" +537 48 model """rescal""" +537 48 loss """crossentropy""" +537 48 regularizer """no""" +537 48 optimizer """adam""" +537 48 training_loop """lcwa""" +537 48 evaluator """rankbased""" +537 49 dataset """kinships""" +537 49 model """rescal""" +537 49 loss """crossentropy""" +537 49 regularizer """no""" +537 49 optimizer """adam""" +537 49 training_loop """lcwa""" +537 49 evaluator """rankbased""" +537 50 dataset """kinships""" +537 50 model """rescal""" +537 50 loss """crossentropy""" +537 50 regularizer """no""" +537 50 optimizer """adam""" +537 50 training_loop """lcwa""" +537 50 evaluator """rankbased""" +537 51 dataset """kinships""" +537 51 model """rescal""" +537 51 loss """crossentropy""" +537 51 regularizer """no""" +537 51 optimizer """adam""" +537 51 training_loop """lcwa""" +537 51 evaluator """rankbased""" +537 52 dataset """kinships""" +537 52 model """rescal""" +537 52 loss """crossentropy""" +537 52 regularizer """no""" +537 52 optimizer """adam""" +537 52 training_loop """lcwa""" +537 52 evaluator """rankbased""" +537 53 dataset """kinships""" +537 53 model """rescal""" +537 53 loss """crossentropy""" +537 53 regularizer """no""" +537 53 optimizer """adam""" +537 53 training_loop """lcwa""" +537 53 evaluator """rankbased""" +537 54 dataset """kinships""" +537 54 model """rescal""" +537 54 loss """crossentropy""" +537 54 regularizer """no""" +537 54 optimizer """adam""" +537 54 training_loop """lcwa""" +537 54 evaluator """rankbased""" +537 55 dataset """kinships""" +537 55 model """rescal""" +537 55 loss """crossentropy""" +537 55 regularizer """no""" +537 55 optimizer """adam""" +537 55 training_loop """lcwa""" +537 55 evaluator """rankbased""" +537 56 dataset """kinships""" +537 56 model """rescal""" +537 56 loss """crossentropy""" +537 56 regularizer """no""" +537 56 optimizer """adam""" +537 56 training_loop """lcwa""" +537 56 evaluator """rankbased""" +537 57 dataset """kinships""" +537 57 model """rescal""" +537 57 loss """crossentropy""" +537 57 regularizer """no""" +537 57 optimizer """adam""" +537 57 training_loop """lcwa""" +537 57 evaluator """rankbased""" +537 58 dataset """kinships""" +537 58 model """rescal""" +537 58 loss """crossentropy""" +537 58 regularizer """no""" +537 58 optimizer """adam""" +537 58 training_loop """lcwa""" +537 58 evaluator """rankbased""" +537 59 dataset """kinships""" +537 59 model """rescal""" +537 59 loss """crossentropy""" +537 59 regularizer """no""" +537 59 optimizer """adam""" +537 59 training_loop """lcwa""" +537 59 evaluator """rankbased""" +537 60 dataset """kinships""" +537 60 model """rescal""" +537 60 loss """crossentropy""" +537 60 regularizer """no""" +537 60 optimizer """adam""" +537 60 training_loop """lcwa""" +537 60 evaluator """rankbased""" +537 61 dataset """kinships""" +537 61 model """rescal""" +537 61 loss """crossentropy""" +537 61 regularizer """no""" +537 61 optimizer """adam""" +537 61 training_loop """lcwa""" +537 61 evaluator """rankbased""" +537 62 dataset """kinships""" +537 62 model """rescal""" +537 62 loss """crossentropy""" +537 62 regularizer """no""" +537 62 optimizer """adam""" +537 62 training_loop """lcwa""" +537 62 evaluator """rankbased""" +537 63 dataset """kinships""" +537 63 model """rescal""" +537 63 loss """crossentropy""" +537 63 regularizer """no""" +537 63 optimizer """adam""" +537 63 training_loop """lcwa""" +537 63 evaluator """rankbased""" +537 64 dataset """kinships""" +537 64 model """rescal""" +537 64 loss """crossentropy""" +537 64 regularizer """no""" +537 64 optimizer """adam""" +537 64 training_loop """lcwa""" +537 64 evaluator """rankbased""" +537 65 dataset """kinships""" +537 65 model """rescal""" +537 65 loss """crossentropy""" +537 65 regularizer """no""" +537 65 optimizer """adam""" +537 65 training_loop """lcwa""" +537 65 evaluator """rankbased""" +537 66 dataset """kinships""" +537 66 model """rescal""" +537 66 loss """crossentropy""" +537 66 regularizer """no""" +537 66 optimizer """adam""" +537 66 training_loop """lcwa""" +537 66 evaluator """rankbased""" +537 67 dataset """kinships""" +537 67 model """rescal""" +537 67 loss """crossentropy""" +537 67 regularizer """no""" +537 67 optimizer """adam""" +537 67 training_loop """lcwa""" +537 67 evaluator """rankbased""" +537 68 dataset """kinships""" +537 68 model """rescal""" +537 68 loss """crossentropy""" +537 68 regularizer """no""" +537 68 optimizer """adam""" +537 68 training_loop """lcwa""" +537 68 evaluator """rankbased""" +537 69 dataset """kinships""" +537 69 model """rescal""" +537 69 loss """crossentropy""" +537 69 regularizer """no""" +537 69 optimizer """adam""" +537 69 training_loop """lcwa""" +537 69 evaluator """rankbased""" +537 70 dataset """kinships""" +537 70 model """rescal""" +537 70 loss """crossentropy""" +537 70 regularizer """no""" +537 70 optimizer """adam""" +537 70 training_loop """lcwa""" +537 70 evaluator """rankbased""" +537 71 dataset """kinships""" +537 71 model """rescal""" +537 71 loss """crossentropy""" +537 71 regularizer """no""" +537 71 optimizer """adam""" +537 71 training_loop """lcwa""" +537 71 evaluator """rankbased""" +537 72 dataset """kinships""" +537 72 model """rescal""" +537 72 loss """crossentropy""" +537 72 regularizer """no""" +537 72 optimizer """adam""" +537 72 training_loop """lcwa""" +537 72 evaluator """rankbased""" +537 73 dataset """kinships""" +537 73 model """rescal""" +537 73 loss """crossentropy""" +537 73 regularizer """no""" +537 73 optimizer """adam""" +537 73 training_loop """lcwa""" +537 73 evaluator """rankbased""" +537 74 dataset """kinships""" +537 74 model """rescal""" +537 74 loss """crossentropy""" +537 74 regularizer """no""" +537 74 optimizer """adam""" +537 74 training_loop """lcwa""" +537 74 evaluator """rankbased""" +537 75 dataset """kinships""" +537 75 model """rescal""" +537 75 loss """crossentropy""" +537 75 regularizer """no""" +537 75 optimizer """adam""" +537 75 training_loop """lcwa""" +537 75 evaluator """rankbased""" +537 76 dataset """kinships""" +537 76 model """rescal""" +537 76 loss """crossentropy""" +537 76 regularizer """no""" +537 76 optimizer """adam""" +537 76 training_loop """lcwa""" +537 76 evaluator """rankbased""" +537 77 dataset """kinships""" +537 77 model """rescal""" +537 77 loss """crossentropy""" +537 77 regularizer """no""" +537 77 optimizer """adam""" +537 77 training_loop """lcwa""" +537 77 evaluator """rankbased""" +537 78 dataset """kinships""" +537 78 model """rescal""" +537 78 loss """crossentropy""" +537 78 regularizer """no""" +537 78 optimizer """adam""" +537 78 training_loop """lcwa""" +537 78 evaluator """rankbased""" +537 79 dataset """kinships""" +537 79 model """rescal""" +537 79 loss """crossentropy""" +537 79 regularizer """no""" +537 79 optimizer """adam""" +537 79 training_loop """lcwa""" +537 79 evaluator """rankbased""" +537 80 dataset """kinships""" +537 80 model """rescal""" +537 80 loss """crossentropy""" +537 80 regularizer """no""" +537 80 optimizer """adam""" +537 80 training_loop """lcwa""" +537 80 evaluator """rankbased""" +537 81 dataset """kinships""" +537 81 model """rescal""" +537 81 loss """crossentropy""" +537 81 regularizer """no""" +537 81 optimizer """adam""" +537 81 training_loop """lcwa""" +537 81 evaluator """rankbased""" +537 82 dataset """kinships""" +537 82 model """rescal""" +537 82 loss """crossentropy""" +537 82 regularizer """no""" +537 82 optimizer """adam""" +537 82 training_loop """lcwa""" +537 82 evaluator """rankbased""" +537 83 dataset """kinships""" +537 83 model """rescal""" +537 83 loss """crossentropy""" +537 83 regularizer """no""" +537 83 optimizer """adam""" +537 83 training_loop """lcwa""" +537 83 evaluator """rankbased""" +537 84 dataset """kinships""" +537 84 model """rescal""" +537 84 loss """crossentropy""" +537 84 regularizer """no""" +537 84 optimizer """adam""" +537 84 training_loop """lcwa""" +537 84 evaluator """rankbased""" +537 85 dataset """kinships""" +537 85 model """rescal""" +537 85 loss """crossentropy""" +537 85 regularizer """no""" +537 85 optimizer """adam""" +537 85 training_loop """lcwa""" +537 85 evaluator """rankbased""" +537 86 dataset """kinships""" +537 86 model """rescal""" +537 86 loss """crossentropy""" +537 86 regularizer """no""" +537 86 optimizer """adam""" +537 86 training_loop """lcwa""" +537 86 evaluator """rankbased""" +537 87 dataset """kinships""" +537 87 model """rescal""" +537 87 loss """crossentropy""" +537 87 regularizer """no""" +537 87 optimizer """adam""" +537 87 training_loop """lcwa""" +537 87 evaluator """rankbased""" +537 88 dataset """kinships""" +537 88 model """rescal""" +537 88 loss """crossentropy""" +537 88 regularizer """no""" +537 88 optimizer """adam""" +537 88 training_loop """lcwa""" +537 88 evaluator """rankbased""" +537 89 dataset """kinships""" +537 89 model """rescal""" +537 89 loss """crossentropy""" +537 89 regularizer """no""" +537 89 optimizer """adam""" +537 89 training_loop """lcwa""" +537 89 evaluator """rankbased""" +537 90 dataset """kinships""" +537 90 model """rescal""" +537 90 loss """crossentropy""" +537 90 regularizer """no""" +537 90 optimizer """adam""" +537 90 training_loop """lcwa""" +537 90 evaluator """rankbased""" +537 91 dataset """kinships""" +537 91 model """rescal""" +537 91 loss """crossentropy""" +537 91 regularizer """no""" +537 91 optimizer """adam""" +537 91 training_loop """lcwa""" +537 91 evaluator """rankbased""" +537 92 dataset """kinships""" +537 92 model """rescal""" +537 92 loss """crossentropy""" +537 92 regularizer """no""" +537 92 optimizer """adam""" +537 92 training_loop """lcwa""" +537 92 evaluator """rankbased""" +537 93 dataset """kinships""" +537 93 model """rescal""" +537 93 loss """crossentropy""" +537 93 regularizer """no""" +537 93 optimizer """adam""" +537 93 training_loop """lcwa""" +537 93 evaluator """rankbased""" +537 94 dataset """kinships""" +537 94 model """rescal""" +537 94 loss """crossentropy""" +537 94 regularizer """no""" +537 94 optimizer """adam""" +537 94 training_loop """lcwa""" +537 94 evaluator """rankbased""" +537 95 dataset """kinships""" +537 95 model """rescal""" +537 95 loss """crossentropy""" +537 95 regularizer """no""" +537 95 optimizer """adam""" +537 95 training_loop """lcwa""" +537 95 evaluator """rankbased""" +537 96 dataset """kinships""" +537 96 model """rescal""" +537 96 loss """crossentropy""" +537 96 regularizer """no""" +537 96 optimizer """adam""" +537 96 training_loop """lcwa""" +537 96 evaluator """rankbased""" +537 97 dataset """kinships""" +537 97 model """rescal""" +537 97 loss """crossentropy""" +537 97 regularizer """no""" +537 97 optimizer """adam""" +537 97 training_loop """lcwa""" +537 97 evaluator """rankbased""" +537 98 dataset """kinships""" +537 98 model """rescal""" +537 98 loss """crossentropy""" +537 98 regularizer """no""" +537 98 optimizer """adam""" +537 98 training_loop """lcwa""" +537 98 evaluator """rankbased""" +537 99 dataset """kinships""" +537 99 model """rescal""" +537 99 loss """crossentropy""" +537 99 regularizer """no""" +537 99 optimizer """adam""" +537 99 training_loop """lcwa""" +537 99 evaluator """rankbased""" +537 100 dataset """kinships""" +537 100 model """rescal""" +537 100 loss """crossentropy""" +537 100 regularizer """no""" +537 100 optimizer """adam""" +537 100 training_loop """lcwa""" +537 100 evaluator """rankbased""" +538 1 model.embedding_dim 0.0 +538 1 optimizer.lr 0.001685030925292249 +538 1 training.batch_size 1.0 +538 1 training.label_smoothing 0.5314417835799539 +538 2 model.embedding_dim 0.0 +538 2 optimizer.lr 0.011016369274808237 +538 2 training.batch_size 2.0 +538 2 training.label_smoothing 0.5289502276256433 +538 3 model.embedding_dim 0.0 +538 3 optimizer.lr 0.009680427436859493 +538 3 training.batch_size 1.0 +538 3 training.label_smoothing 0.008334103796925884 +538 4 model.embedding_dim 0.0 +538 4 optimizer.lr 0.005848192369419903 +538 4 training.batch_size 0.0 +538 4 training.label_smoothing 0.015968672296409615 +538 5 model.embedding_dim 0.0 +538 5 optimizer.lr 0.08526429477319598 +538 5 training.batch_size 1.0 +538 5 training.label_smoothing 0.01488767562552607 +538 6 model.embedding_dim 1.0 +538 6 optimizer.lr 0.0028290966958823493 +538 6 training.batch_size 2.0 +538 6 training.label_smoothing 0.15503468823196437 +538 7 model.embedding_dim 0.0 +538 7 optimizer.lr 0.026468679052957786 +538 7 training.batch_size 0.0 +538 7 training.label_smoothing 0.004758226344777446 +538 8 model.embedding_dim 1.0 +538 8 optimizer.lr 0.006724740641624506 +538 8 training.batch_size 1.0 +538 8 training.label_smoothing 0.013452932678336105 +538 9 model.embedding_dim 2.0 +538 9 optimizer.lr 0.059616073558343266 +538 9 training.batch_size 1.0 +538 9 training.label_smoothing 0.09455714029154481 +538 10 model.embedding_dim 1.0 +538 10 optimizer.lr 0.09134761282996609 +538 10 training.batch_size 0.0 +538 10 training.label_smoothing 0.007253894965122906 +538 11 model.embedding_dim 1.0 +538 11 optimizer.lr 0.0014006557185013955 +538 11 training.batch_size 2.0 +538 11 training.label_smoothing 0.31134663022997056 +538 12 model.embedding_dim 1.0 +538 12 optimizer.lr 0.010706600733812192 +538 12 training.batch_size 1.0 +538 12 training.label_smoothing 0.0030065470205201546 +538 13 model.embedding_dim 2.0 +538 13 optimizer.lr 0.00981986510431172 +538 13 training.batch_size 0.0 +538 13 training.label_smoothing 0.03585126253811579 +538 14 model.embedding_dim 2.0 +538 14 optimizer.lr 0.006839361728210946 +538 14 training.batch_size 2.0 +538 14 training.label_smoothing 0.24531319610264615 +538 15 model.embedding_dim 1.0 +538 15 optimizer.lr 0.007312326643998531 +538 15 training.batch_size 2.0 +538 15 training.label_smoothing 0.007249157522234924 +538 16 model.embedding_dim 0.0 +538 16 optimizer.lr 0.005394677115088896 +538 16 training.batch_size 2.0 +538 16 training.label_smoothing 0.11647794365210357 +538 17 model.embedding_dim 1.0 +538 17 optimizer.lr 0.011726761506489148 +538 17 training.batch_size 2.0 +538 17 training.label_smoothing 0.44328538869742723 +538 18 model.embedding_dim 0.0 +538 18 optimizer.lr 0.0016275430756314446 +538 18 training.batch_size 2.0 +538 18 training.label_smoothing 0.03565133506547142 +538 19 model.embedding_dim 0.0 +538 19 optimizer.lr 0.008434494565679752 +538 19 training.batch_size 0.0 +538 19 training.label_smoothing 0.003841204276518392 +538 20 model.embedding_dim 1.0 +538 20 optimizer.lr 0.0328492360341889 +538 20 training.batch_size 2.0 +538 20 training.label_smoothing 0.07484237177815002 +538 21 model.embedding_dim 2.0 +538 21 optimizer.lr 0.030575583687723087 +538 21 training.batch_size 0.0 +538 21 training.label_smoothing 0.05828572714036553 +538 22 model.embedding_dim 2.0 +538 22 optimizer.lr 0.002220913743616372 +538 22 training.batch_size 1.0 +538 22 training.label_smoothing 0.7184285048234361 +538 23 model.embedding_dim 1.0 +538 23 optimizer.lr 0.023910086739761128 +538 23 training.batch_size 1.0 +538 23 training.label_smoothing 0.030367818479135802 +538 24 model.embedding_dim 2.0 +538 24 optimizer.lr 0.0022707690494739876 +538 24 training.batch_size 2.0 +538 24 training.label_smoothing 0.34854845292882236 +538 25 model.embedding_dim 2.0 +538 25 optimizer.lr 0.08578418822901053 +538 25 training.batch_size 2.0 +538 25 training.label_smoothing 0.0032621080228258832 +538 26 model.embedding_dim 2.0 +538 26 optimizer.lr 0.0010640512875327841 +538 26 training.batch_size 0.0 +538 26 training.label_smoothing 0.019866180560275976 +538 27 model.embedding_dim 2.0 +538 27 optimizer.lr 0.0013576846494966568 +538 27 training.batch_size 2.0 +538 27 training.label_smoothing 0.009943312285861735 +538 28 model.embedding_dim 1.0 +538 28 optimizer.lr 0.005245101367082459 +538 28 training.batch_size 0.0 +538 28 training.label_smoothing 0.6408094520310866 +538 29 model.embedding_dim 2.0 +538 29 optimizer.lr 0.0070059241926434725 +538 29 training.batch_size 1.0 +538 29 training.label_smoothing 0.05276641884381344 +538 30 model.embedding_dim 2.0 +538 30 optimizer.lr 0.001517111274268413 +538 30 training.batch_size 0.0 +538 30 training.label_smoothing 0.03850870954444256 +538 31 model.embedding_dim 0.0 +538 31 optimizer.lr 0.001170861802129874 +538 31 training.batch_size 0.0 +538 31 training.label_smoothing 0.16096409332509148 +538 32 model.embedding_dim 1.0 +538 32 optimizer.lr 0.0024213071558026604 +538 32 training.batch_size 0.0 +538 32 training.label_smoothing 0.0025531230103677267 +538 33 model.embedding_dim 0.0 +538 33 optimizer.lr 0.004407015286821462 +538 33 training.batch_size 1.0 +538 33 training.label_smoothing 0.13280376362283183 +538 34 model.embedding_dim 1.0 +538 34 optimizer.lr 0.022022462304477804 +538 34 training.batch_size 0.0 +538 34 training.label_smoothing 0.004451322146850076 +538 35 model.embedding_dim 0.0 +538 35 optimizer.lr 0.038298323302802834 +538 35 training.batch_size 2.0 +538 35 training.label_smoothing 0.072085098114063 +538 36 model.embedding_dim 1.0 +538 36 optimizer.lr 0.04530527164594624 +538 36 training.batch_size 1.0 +538 36 training.label_smoothing 0.031567723201801526 +538 37 model.embedding_dim 0.0 +538 37 optimizer.lr 0.015186170147340678 +538 37 training.batch_size 1.0 +538 37 training.label_smoothing 0.3349243907291002 +538 38 model.embedding_dim 0.0 +538 38 optimizer.lr 0.026986982796676513 +538 38 training.batch_size 1.0 +538 38 training.label_smoothing 0.0018204866808220691 +538 39 model.embedding_dim 0.0 +538 39 optimizer.lr 0.036997781236907924 +538 39 training.batch_size 1.0 +538 39 training.label_smoothing 0.2971829536789951 +538 40 model.embedding_dim 1.0 +538 40 optimizer.lr 0.018989707878955757 +538 40 training.batch_size 0.0 +538 40 training.label_smoothing 0.0863964830655686 +538 41 model.embedding_dim 2.0 +538 41 optimizer.lr 0.044872817231348484 +538 41 training.batch_size 2.0 +538 41 training.label_smoothing 0.008962077982011447 +538 42 model.embedding_dim 1.0 +538 42 optimizer.lr 0.0021139123543504213 +538 42 training.batch_size 1.0 +538 42 training.label_smoothing 0.0017083752194881916 +538 43 model.embedding_dim 0.0 +538 43 optimizer.lr 0.00883880818093843 +538 43 training.batch_size 2.0 +538 43 training.label_smoothing 0.01371091379289551 +538 44 model.embedding_dim 0.0 +538 44 optimizer.lr 0.006202652415685371 +538 44 training.batch_size 2.0 +538 44 training.label_smoothing 0.03430404227519377 +538 45 model.embedding_dim 2.0 +538 45 optimizer.lr 0.07897588759871334 +538 45 training.batch_size 0.0 +538 45 training.label_smoothing 0.14322010908154018 +538 46 model.embedding_dim 0.0 +538 46 optimizer.lr 0.042489659893874945 +538 46 training.batch_size 1.0 +538 46 training.label_smoothing 0.004106983228481689 +538 47 model.embedding_dim 0.0 +538 47 optimizer.lr 0.07981750746486274 +538 47 training.batch_size 2.0 +538 47 training.label_smoothing 0.006032018674660803 +538 48 model.embedding_dim 1.0 +538 48 optimizer.lr 0.03284590200038909 +538 48 training.batch_size 1.0 +538 48 training.label_smoothing 0.009742170192088588 +538 49 model.embedding_dim 0.0 +538 49 optimizer.lr 0.006025076803420858 +538 49 training.batch_size 1.0 +538 49 training.label_smoothing 0.11321135897840731 +538 50 model.embedding_dim 1.0 +538 50 optimizer.lr 0.007289087707412221 +538 50 training.batch_size 1.0 +538 50 training.label_smoothing 0.053887309030510026 +538 51 model.embedding_dim 0.0 +538 51 optimizer.lr 0.026260897825301326 +538 51 training.batch_size 0.0 +538 51 training.label_smoothing 0.18499094984063122 +538 52 model.embedding_dim 1.0 +538 52 optimizer.lr 0.006123185142855062 +538 52 training.batch_size 1.0 +538 52 training.label_smoothing 0.6833701506287613 +538 53 model.embedding_dim 0.0 +538 53 optimizer.lr 0.00667018528757746 +538 53 training.batch_size 1.0 +538 53 training.label_smoothing 0.0091142414488517 +538 54 model.embedding_dim 1.0 +538 54 optimizer.lr 0.001219362617432552 +538 54 training.batch_size 2.0 +538 54 training.label_smoothing 0.9829717594093946 +538 55 model.embedding_dim 1.0 +538 55 optimizer.lr 0.024999214505975902 +538 55 training.batch_size 2.0 +538 55 training.label_smoothing 0.005907786682230584 +538 56 model.embedding_dim 2.0 +538 56 optimizer.lr 0.001511363552854182 +538 56 training.batch_size 1.0 +538 56 training.label_smoothing 0.006782434048766728 +538 57 model.embedding_dim 2.0 +538 57 optimizer.lr 0.012850803941781643 +538 57 training.batch_size 1.0 +538 57 training.label_smoothing 0.11139485774216583 +538 58 model.embedding_dim 2.0 +538 58 optimizer.lr 0.009626158170967167 +538 58 training.batch_size 0.0 +538 58 training.label_smoothing 0.363825722220047 +538 59 model.embedding_dim 0.0 +538 59 optimizer.lr 0.014970776775385396 +538 59 training.batch_size 0.0 +538 59 training.label_smoothing 0.011315560523706988 +538 60 model.embedding_dim 0.0 +538 60 optimizer.lr 0.03371077843270627 +538 60 training.batch_size 0.0 +538 60 training.label_smoothing 0.0018249122476487992 +538 61 model.embedding_dim 1.0 +538 61 optimizer.lr 0.003959019929797573 +538 61 training.batch_size 1.0 +538 61 training.label_smoothing 0.0010832690133820116 +538 62 model.embedding_dim 0.0 +538 62 optimizer.lr 0.0011068687626026922 +538 62 training.batch_size 2.0 +538 62 training.label_smoothing 0.025383950384911196 +538 63 model.embedding_dim 2.0 +538 63 optimizer.lr 0.0019227536039008362 +538 63 training.batch_size 2.0 +538 63 training.label_smoothing 0.7726027404034422 +538 64 model.embedding_dim 2.0 +538 64 optimizer.lr 0.06806268702313303 +538 64 training.batch_size 1.0 +538 64 training.label_smoothing 0.019509756328368138 +538 65 model.embedding_dim 0.0 +538 65 optimizer.lr 0.005101701153770653 +538 65 training.batch_size 2.0 +538 65 training.label_smoothing 0.14370860114242043 +538 66 model.embedding_dim 0.0 +538 66 optimizer.lr 0.06582273008201689 +538 66 training.batch_size 0.0 +538 66 training.label_smoothing 0.0023971534955063993 +538 67 model.embedding_dim 2.0 +538 67 optimizer.lr 0.03579115856237021 +538 67 training.batch_size 1.0 +538 67 training.label_smoothing 0.0010019144493180873 +538 68 model.embedding_dim 2.0 +538 68 optimizer.lr 0.015372452776420785 +538 68 training.batch_size 0.0 +538 68 training.label_smoothing 0.002939618165723796 +538 69 model.embedding_dim 1.0 +538 69 optimizer.lr 0.06466274139268165 +538 69 training.batch_size 2.0 +538 69 training.label_smoothing 0.8627102865771665 +538 70 model.embedding_dim 0.0 +538 70 optimizer.lr 0.016411029592131667 +538 70 training.batch_size 1.0 +538 70 training.label_smoothing 0.002633495862320687 +538 71 model.embedding_dim 1.0 +538 71 optimizer.lr 0.047434846669535125 +538 71 training.batch_size 1.0 +538 71 training.label_smoothing 0.003778250998063756 +538 72 model.embedding_dim 1.0 +538 72 optimizer.lr 0.014055947268892241 +538 72 training.batch_size 2.0 +538 72 training.label_smoothing 0.0282605720272386 +538 73 model.embedding_dim 2.0 +538 73 optimizer.lr 0.03174570680800897 +538 73 training.batch_size 2.0 +538 73 training.label_smoothing 0.3783281158566806 +538 74 model.embedding_dim 1.0 +538 74 optimizer.lr 0.0014153391020647814 +538 74 training.batch_size 1.0 +538 74 training.label_smoothing 0.4967992494833312 +538 75 model.embedding_dim 2.0 +538 75 optimizer.lr 0.0064835314087398005 +538 75 training.batch_size 1.0 +538 75 training.label_smoothing 0.007714225525358735 +538 76 model.embedding_dim 0.0 +538 76 optimizer.lr 0.027253879969378548 +538 76 training.batch_size 0.0 +538 76 training.label_smoothing 0.30006297996790027 +538 77 model.embedding_dim 2.0 +538 77 optimizer.lr 0.04747195584212002 +538 77 training.batch_size 0.0 +538 77 training.label_smoothing 0.003607497966551193 +538 78 model.embedding_dim 0.0 +538 78 optimizer.lr 0.007946500267401768 +538 78 training.batch_size 2.0 +538 78 training.label_smoothing 0.37302919108688903 +538 79 model.embedding_dim 1.0 +538 79 optimizer.lr 0.0016447442786434118 +538 79 training.batch_size 0.0 +538 79 training.label_smoothing 0.001313742492411004 +538 80 model.embedding_dim 0.0 +538 80 optimizer.lr 0.00656592374195981 +538 80 training.batch_size 1.0 +538 80 training.label_smoothing 0.21199419289723584 +538 81 model.embedding_dim 1.0 +538 81 optimizer.lr 0.0358628952992689 +538 81 training.batch_size 2.0 +538 81 training.label_smoothing 0.552868788530163 +538 82 model.embedding_dim 0.0 +538 82 optimizer.lr 0.03582010165446078 +538 82 training.batch_size 2.0 +538 82 training.label_smoothing 0.020027144614358647 +538 83 model.embedding_dim 2.0 +538 83 optimizer.lr 0.0021105044561597814 +538 83 training.batch_size 1.0 +538 83 training.label_smoothing 0.6633137516789818 +538 84 model.embedding_dim 2.0 +538 84 optimizer.lr 0.019747027744705558 +538 84 training.batch_size 0.0 +538 84 training.label_smoothing 0.0019016503401300135 +538 85 model.embedding_dim 0.0 +538 85 optimizer.lr 0.01767767641348394 +538 85 training.batch_size 0.0 +538 85 training.label_smoothing 0.23352997851574409 +538 86 model.embedding_dim 2.0 +538 86 optimizer.lr 0.0014543631609726811 +538 86 training.batch_size 2.0 +538 86 training.label_smoothing 0.03329108600203612 +538 87 model.embedding_dim 1.0 +538 87 optimizer.lr 0.019287515695335787 +538 87 training.batch_size 1.0 +538 87 training.label_smoothing 0.016619614626674523 +538 88 model.embedding_dim 2.0 +538 88 optimizer.lr 0.01734754497659526 +538 88 training.batch_size 2.0 +538 88 training.label_smoothing 0.0082778992162612 +538 89 model.embedding_dim 2.0 +538 89 optimizer.lr 0.002476980421114508 +538 89 training.batch_size 2.0 +538 89 training.label_smoothing 0.7980980605062373 +538 90 model.embedding_dim 0.0 +538 90 optimizer.lr 0.003287260552541233 +538 90 training.batch_size 1.0 +538 90 training.label_smoothing 0.0010100515940661635 +538 91 model.embedding_dim 1.0 +538 91 optimizer.lr 0.005414281862757348 +538 91 training.batch_size 1.0 +538 91 training.label_smoothing 0.1471314222750012 +538 92 model.embedding_dim 1.0 +538 92 optimizer.lr 0.007867015146338217 +538 92 training.batch_size 2.0 +538 92 training.label_smoothing 0.17907890098938703 +538 93 model.embedding_dim 1.0 +538 93 optimizer.lr 0.030003689994484287 +538 93 training.batch_size 1.0 +538 93 training.label_smoothing 0.0012820173667408808 +538 94 model.embedding_dim 0.0 +538 94 optimizer.lr 0.09704716929745882 +538 94 training.batch_size 0.0 +538 94 training.label_smoothing 0.3530331020878989 +538 95 model.embedding_dim 0.0 +538 95 optimizer.lr 0.0013415884274393416 +538 95 training.batch_size 2.0 +538 95 training.label_smoothing 0.20571658829950226 +538 96 model.embedding_dim 0.0 +538 96 optimizer.lr 0.004919540264409842 +538 96 training.batch_size 1.0 +538 96 training.label_smoothing 0.0047312945011546564 +538 97 model.embedding_dim 2.0 +538 97 optimizer.lr 0.001540082072342059 +538 97 training.batch_size 1.0 +538 97 training.label_smoothing 0.9235172480069614 +538 98 model.embedding_dim 0.0 +538 98 optimizer.lr 0.0029540909263702938 +538 98 training.batch_size 1.0 +538 98 training.label_smoothing 0.03171668537107689 +538 99 model.embedding_dim 1.0 +538 99 optimizer.lr 0.019075787060554454 +538 99 training.batch_size 2.0 +538 99 training.label_smoothing 0.03048268237317864 +538 100 model.embedding_dim 2.0 +538 100 optimizer.lr 0.0016754037761139466 +538 100 training.batch_size 2.0 +538 100 training.label_smoothing 0.20740562677459726 +538 1 dataset """kinships""" +538 1 model """rescal""" +538 1 loss """crossentropy""" +538 1 regularizer """no""" +538 1 optimizer """adam""" +538 1 training_loop """lcwa""" +538 1 evaluator """rankbased""" +538 2 dataset """kinships""" +538 2 model """rescal""" +538 2 loss """crossentropy""" +538 2 regularizer """no""" +538 2 optimizer """adam""" +538 2 training_loop """lcwa""" +538 2 evaluator """rankbased""" +538 3 dataset """kinships""" +538 3 model """rescal""" +538 3 loss """crossentropy""" +538 3 regularizer """no""" +538 3 optimizer """adam""" +538 3 training_loop """lcwa""" +538 3 evaluator """rankbased""" +538 4 dataset """kinships""" +538 4 model """rescal""" +538 4 loss """crossentropy""" +538 4 regularizer """no""" +538 4 optimizer """adam""" +538 4 training_loop """lcwa""" +538 4 evaluator """rankbased""" +538 5 dataset """kinships""" +538 5 model """rescal""" +538 5 loss """crossentropy""" +538 5 regularizer """no""" +538 5 optimizer """adam""" +538 5 training_loop """lcwa""" +538 5 evaluator """rankbased""" +538 6 dataset """kinships""" +538 6 model """rescal""" +538 6 loss """crossentropy""" +538 6 regularizer """no""" +538 6 optimizer """adam""" +538 6 training_loop """lcwa""" +538 6 evaluator """rankbased""" +538 7 dataset """kinships""" +538 7 model """rescal""" +538 7 loss """crossentropy""" +538 7 regularizer """no""" +538 7 optimizer """adam""" +538 7 training_loop """lcwa""" +538 7 evaluator """rankbased""" +538 8 dataset """kinships""" +538 8 model """rescal""" +538 8 loss """crossentropy""" +538 8 regularizer """no""" +538 8 optimizer """adam""" +538 8 training_loop """lcwa""" +538 8 evaluator """rankbased""" +538 9 dataset """kinships""" +538 9 model """rescal""" +538 9 loss """crossentropy""" +538 9 regularizer """no""" +538 9 optimizer """adam""" +538 9 training_loop """lcwa""" +538 9 evaluator """rankbased""" +538 10 dataset """kinships""" +538 10 model """rescal""" +538 10 loss """crossentropy""" +538 10 regularizer """no""" +538 10 optimizer """adam""" +538 10 training_loop """lcwa""" +538 10 evaluator """rankbased""" +538 11 dataset """kinships""" +538 11 model """rescal""" +538 11 loss """crossentropy""" +538 11 regularizer """no""" +538 11 optimizer """adam""" +538 11 training_loop """lcwa""" +538 11 evaluator """rankbased""" +538 12 dataset """kinships""" +538 12 model """rescal""" +538 12 loss """crossentropy""" +538 12 regularizer """no""" +538 12 optimizer """adam""" +538 12 training_loop """lcwa""" +538 12 evaluator """rankbased""" +538 13 dataset """kinships""" +538 13 model """rescal""" +538 13 loss """crossentropy""" +538 13 regularizer """no""" +538 13 optimizer """adam""" +538 13 training_loop """lcwa""" +538 13 evaluator """rankbased""" +538 14 dataset """kinships""" +538 14 model """rescal""" +538 14 loss """crossentropy""" +538 14 regularizer """no""" +538 14 optimizer """adam""" +538 14 training_loop """lcwa""" +538 14 evaluator """rankbased""" +538 15 dataset """kinships""" +538 15 model """rescal""" +538 15 loss """crossentropy""" +538 15 regularizer """no""" +538 15 optimizer """adam""" +538 15 training_loop """lcwa""" +538 15 evaluator """rankbased""" +538 16 dataset """kinships""" +538 16 model """rescal""" +538 16 loss """crossentropy""" +538 16 regularizer """no""" +538 16 optimizer """adam""" +538 16 training_loop """lcwa""" +538 16 evaluator """rankbased""" +538 17 dataset """kinships""" +538 17 model """rescal""" +538 17 loss """crossentropy""" +538 17 regularizer """no""" +538 17 optimizer """adam""" +538 17 training_loop """lcwa""" +538 17 evaluator """rankbased""" +538 18 dataset """kinships""" +538 18 model """rescal""" +538 18 loss """crossentropy""" +538 18 regularizer """no""" +538 18 optimizer """adam""" +538 18 training_loop """lcwa""" +538 18 evaluator """rankbased""" +538 19 dataset """kinships""" +538 19 model """rescal""" +538 19 loss """crossentropy""" +538 19 regularizer """no""" +538 19 optimizer """adam""" +538 19 training_loop """lcwa""" +538 19 evaluator """rankbased""" +538 20 dataset """kinships""" +538 20 model """rescal""" +538 20 loss """crossentropy""" +538 20 regularizer """no""" +538 20 optimizer """adam""" +538 20 training_loop """lcwa""" +538 20 evaluator """rankbased""" +538 21 dataset """kinships""" +538 21 model """rescal""" +538 21 loss """crossentropy""" +538 21 regularizer """no""" +538 21 optimizer """adam""" +538 21 training_loop """lcwa""" +538 21 evaluator """rankbased""" +538 22 dataset """kinships""" +538 22 model """rescal""" +538 22 loss """crossentropy""" +538 22 regularizer """no""" +538 22 optimizer """adam""" +538 22 training_loop """lcwa""" +538 22 evaluator """rankbased""" +538 23 dataset """kinships""" +538 23 model """rescal""" +538 23 loss """crossentropy""" +538 23 regularizer """no""" +538 23 optimizer """adam""" +538 23 training_loop """lcwa""" +538 23 evaluator """rankbased""" +538 24 dataset """kinships""" +538 24 model """rescal""" +538 24 loss """crossentropy""" +538 24 regularizer """no""" +538 24 optimizer """adam""" +538 24 training_loop """lcwa""" +538 24 evaluator """rankbased""" +538 25 dataset """kinships""" +538 25 model """rescal""" +538 25 loss """crossentropy""" +538 25 regularizer """no""" +538 25 optimizer """adam""" +538 25 training_loop """lcwa""" +538 25 evaluator """rankbased""" +538 26 dataset """kinships""" +538 26 model """rescal""" +538 26 loss """crossentropy""" +538 26 regularizer """no""" +538 26 optimizer """adam""" +538 26 training_loop """lcwa""" +538 26 evaluator """rankbased""" +538 27 dataset """kinships""" +538 27 model """rescal""" +538 27 loss """crossentropy""" +538 27 regularizer """no""" +538 27 optimizer """adam""" +538 27 training_loop """lcwa""" +538 27 evaluator """rankbased""" +538 28 dataset """kinships""" +538 28 model """rescal""" +538 28 loss """crossentropy""" +538 28 regularizer """no""" +538 28 optimizer """adam""" +538 28 training_loop """lcwa""" +538 28 evaluator """rankbased""" +538 29 dataset """kinships""" +538 29 model """rescal""" +538 29 loss """crossentropy""" +538 29 regularizer """no""" +538 29 optimizer """adam""" +538 29 training_loop """lcwa""" +538 29 evaluator """rankbased""" +538 30 dataset """kinships""" +538 30 model """rescal""" +538 30 loss """crossentropy""" +538 30 regularizer """no""" +538 30 optimizer """adam""" +538 30 training_loop """lcwa""" +538 30 evaluator """rankbased""" +538 31 dataset """kinships""" +538 31 model """rescal""" +538 31 loss """crossentropy""" +538 31 regularizer """no""" +538 31 optimizer """adam""" +538 31 training_loop """lcwa""" +538 31 evaluator """rankbased""" +538 32 dataset """kinships""" +538 32 model """rescal""" +538 32 loss """crossentropy""" +538 32 regularizer """no""" +538 32 optimizer """adam""" +538 32 training_loop """lcwa""" +538 32 evaluator """rankbased""" +538 33 dataset """kinships""" +538 33 model """rescal""" +538 33 loss """crossentropy""" +538 33 regularizer """no""" +538 33 optimizer """adam""" +538 33 training_loop """lcwa""" +538 33 evaluator """rankbased""" +538 34 dataset """kinships""" +538 34 model """rescal""" +538 34 loss """crossentropy""" +538 34 regularizer """no""" +538 34 optimizer """adam""" +538 34 training_loop """lcwa""" +538 34 evaluator """rankbased""" +538 35 dataset """kinships""" +538 35 model """rescal""" +538 35 loss """crossentropy""" +538 35 regularizer """no""" +538 35 optimizer """adam""" +538 35 training_loop """lcwa""" +538 35 evaluator """rankbased""" +538 36 dataset """kinships""" +538 36 model """rescal""" +538 36 loss """crossentropy""" +538 36 regularizer """no""" +538 36 optimizer """adam""" +538 36 training_loop """lcwa""" +538 36 evaluator """rankbased""" +538 37 dataset """kinships""" +538 37 model """rescal""" +538 37 loss """crossentropy""" +538 37 regularizer """no""" +538 37 optimizer """adam""" +538 37 training_loop """lcwa""" +538 37 evaluator """rankbased""" +538 38 dataset """kinships""" +538 38 model """rescal""" +538 38 loss """crossentropy""" +538 38 regularizer """no""" +538 38 optimizer """adam""" +538 38 training_loop """lcwa""" +538 38 evaluator """rankbased""" +538 39 dataset """kinships""" +538 39 model """rescal""" +538 39 loss """crossentropy""" +538 39 regularizer """no""" +538 39 optimizer """adam""" +538 39 training_loop """lcwa""" +538 39 evaluator """rankbased""" +538 40 dataset """kinships""" +538 40 model """rescal""" +538 40 loss """crossentropy""" +538 40 regularizer """no""" +538 40 optimizer """adam""" +538 40 training_loop """lcwa""" +538 40 evaluator """rankbased""" +538 41 dataset """kinships""" +538 41 model """rescal""" +538 41 loss """crossentropy""" +538 41 regularizer """no""" +538 41 optimizer """adam""" +538 41 training_loop """lcwa""" +538 41 evaluator """rankbased""" +538 42 dataset """kinships""" +538 42 model """rescal""" +538 42 loss """crossentropy""" +538 42 regularizer """no""" +538 42 optimizer """adam""" +538 42 training_loop """lcwa""" +538 42 evaluator """rankbased""" +538 43 dataset """kinships""" +538 43 model """rescal""" +538 43 loss """crossentropy""" +538 43 regularizer """no""" +538 43 optimizer """adam""" +538 43 training_loop """lcwa""" +538 43 evaluator """rankbased""" +538 44 dataset """kinships""" +538 44 model """rescal""" +538 44 loss """crossentropy""" +538 44 regularizer """no""" +538 44 optimizer """adam""" +538 44 training_loop """lcwa""" +538 44 evaluator """rankbased""" +538 45 dataset """kinships""" +538 45 model """rescal""" +538 45 loss """crossentropy""" +538 45 regularizer """no""" +538 45 optimizer """adam""" +538 45 training_loop """lcwa""" +538 45 evaluator """rankbased""" +538 46 dataset """kinships""" +538 46 model """rescal""" +538 46 loss """crossentropy""" +538 46 regularizer """no""" +538 46 optimizer """adam""" +538 46 training_loop """lcwa""" +538 46 evaluator """rankbased""" +538 47 dataset """kinships""" +538 47 model """rescal""" +538 47 loss """crossentropy""" +538 47 regularizer """no""" +538 47 optimizer """adam""" +538 47 training_loop """lcwa""" +538 47 evaluator """rankbased""" +538 48 dataset """kinships""" +538 48 model """rescal""" +538 48 loss """crossentropy""" +538 48 regularizer """no""" +538 48 optimizer """adam""" +538 48 training_loop """lcwa""" +538 48 evaluator """rankbased""" +538 49 dataset """kinships""" +538 49 model """rescal""" +538 49 loss """crossentropy""" +538 49 regularizer """no""" +538 49 optimizer """adam""" +538 49 training_loop """lcwa""" +538 49 evaluator """rankbased""" +538 50 dataset """kinships""" +538 50 model """rescal""" +538 50 loss """crossentropy""" +538 50 regularizer """no""" +538 50 optimizer """adam""" +538 50 training_loop """lcwa""" +538 50 evaluator """rankbased""" +538 51 dataset """kinships""" +538 51 model """rescal""" +538 51 loss """crossentropy""" +538 51 regularizer """no""" +538 51 optimizer """adam""" +538 51 training_loop """lcwa""" +538 51 evaluator """rankbased""" +538 52 dataset """kinships""" +538 52 model """rescal""" +538 52 loss """crossentropy""" +538 52 regularizer """no""" +538 52 optimizer """adam""" +538 52 training_loop """lcwa""" +538 52 evaluator """rankbased""" +538 53 dataset """kinships""" +538 53 model """rescal""" +538 53 loss """crossentropy""" +538 53 regularizer """no""" +538 53 optimizer """adam""" +538 53 training_loop """lcwa""" +538 53 evaluator """rankbased""" +538 54 dataset """kinships""" +538 54 model """rescal""" +538 54 loss """crossentropy""" +538 54 regularizer """no""" +538 54 optimizer """adam""" +538 54 training_loop """lcwa""" +538 54 evaluator """rankbased""" +538 55 dataset """kinships""" +538 55 model """rescal""" +538 55 loss """crossentropy""" +538 55 regularizer """no""" +538 55 optimizer """adam""" +538 55 training_loop """lcwa""" +538 55 evaluator """rankbased""" +538 56 dataset """kinships""" +538 56 model """rescal""" +538 56 loss """crossentropy""" +538 56 regularizer """no""" +538 56 optimizer """adam""" +538 56 training_loop """lcwa""" +538 56 evaluator """rankbased""" +538 57 dataset """kinships""" +538 57 model """rescal""" +538 57 loss """crossentropy""" +538 57 regularizer """no""" +538 57 optimizer """adam""" +538 57 training_loop """lcwa""" +538 57 evaluator """rankbased""" +538 58 dataset """kinships""" +538 58 model """rescal""" +538 58 loss """crossentropy""" +538 58 regularizer """no""" +538 58 optimizer """adam""" +538 58 training_loop """lcwa""" +538 58 evaluator """rankbased""" +538 59 dataset """kinships""" +538 59 model """rescal""" +538 59 loss """crossentropy""" +538 59 regularizer """no""" +538 59 optimizer """adam""" +538 59 training_loop """lcwa""" +538 59 evaluator """rankbased""" +538 60 dataset """kinships""" +538 60 model """rescal""" +538 60 loss """crossentropy""" +538 60 regularizer """no""" +538 60 optimizer """adam""" +538 60 training_loop """lcwa""" +538 60 evaluator """rankbased""" +538 61 dataset """kinships""" +538 61 model """rescal""" +538 61 loss """crossentropy""" +538 61 regularizer """no""" +538 61 optimizer """adam""" +538 61 training_loop """lcwa""" +538 61 evaluator """rankbased""" +538 62 dataset """kinships""" +538 62 model """rescal""" +538 62 loss """crossentropy""" +538 62 regularizer """no""" +538 62 optimizer """adam""" +538 62 training_loop """lcwa""" +538 62 evaluator """rankbased""" +538 63 dataset """kinships""" +538 63 model """rescal""" +538 63 loss """crossentropy""" +538 63 regularizer """no""" +538 63 optimizer """adam""" +538 63 training_loop """lcwa""" +538 63 evaluator """rankbased""" +538 64 dataset """kinships""" +538 64 model """rescal""" +538 64 loss """crossentropy""" +538 64 regularizer """no""" +538 64 optimizer """adam""" +538 64 training_loop """lcwa""" +538 64 evaluator """rankbased""" +538 65 dataset """kinships""" +538 65 model """rescal""" +538 65 loss """crossentropy""" +538 65 regularizer """no""" +538 65 optimizer """adam""" +538 65 training_loop """lcwa""" +538 65 evaluator """rankbased""" +538 66 dataset """kinships""" +538 66 model """rescal""" +538 66 loss """crossentropy""" +538 66 regularizer """no""" +538 66 optimizer """adam""" +538 66 training_loop """lcwa""" +538 66 evaluator """rankbased""" +538 67 dataset """kinships""" +538 67 model """rescal""" +538 67 loss """crossentropy""" +538 67 regularizer """no""" +538 67 optimizer """adam""" +538 67 training_loop """lcwa""" +538 67 evaluator """rankbased""" +538 68 dataset """kinships""" +538 68 model """rescal""" +538 68 loss """crossentropy""" +538 68 regularizer """no""" +538 68 optimizer """adam""" +538 68 training_loop """lcwa""" +538 68 evaluator """rankbased""" +538 69 dataset """kinships""" +538 69 model """rescal""" +538 69 loss """crossentropy""" +538 69 regularizer """no""" +538 69 optimizer """adam""" +538 69 training_loop """lcwa""" +538 69 evaluator """rankbased""" +538 70 dataset """kinships""" +538 70 model """rescal""" +538 70 loss """crossentropy""" +538 70 regularizer """no""" +538 70 optimizer """adam""" +538 70 training_loop """lcwa""" +538 70 evaluator """rankbased""" +538 71 dataset """kinships""" +538 71 model """rescal""" +538 71 loss """crossentropy""" +538 71 regularizer """no""" +538 71 optimizer """adam""" +538 71 training_loop """lcwa""" +538 71 evaluator """rankbased""" +538 72 dataset """kinships""" +538 72 model """rescal""" +538 72 loss """crossentropy""" +538 72 regularizer """no""" +538 72 optimizer """adam""" +538 72 training_loop """lcwa""" +538 72 evaluator """rankbased""" +538 73 dataset """kinships""" +538 73 model """rescal""" +538 73 loss """crossentropy""" +538 73 regularizer """no""" +538 73 optimizer """adam""" +538 73 training_loop """lcwa""" +538 73 evaluator """rankbased""" +538 74 dataset """kinships""" +538 74 model """rescal""" +538 74 loss """crossentropy""" +538 74 regularizer """no""" +538 74 optimizer """adam""" +538 74 training_loop """lcwa""" +538 74 evaluator """rankbased""" +538 75 dataset """kinships""" +538 75 model """rescal""" +538 75 loss """crossentropy""" +538 75 regularizer """no""" +538 75 optimizer """adam""" +538 75 training_loop """lcwa""" +538 75 evaluator """rankbased""" +538 76 dataset """kinships""" +538 76 model """rescal""" +538 76 loss """crossentropy""" +538 76 regularizer """no""" +538 76 optimizer """adam""" +538 76 training_loop """lcwa""" +538 76 evaluator """rankbased""" +538 77 dataset """kinships""" +538 77 model """rescal""" +538 77 loss """crossentropy""" +538 77 regularizer """no""" +538 77 optimizer """adam""" +538 77 training_loop """lcwa""" +538 77 evaluator """rankbased""" +538 78 dataset """kinships""" +538 78 model """rescal""" +538 78 loss """crossentropy""" +538 78 regularizer """no""" +538 78 optimizer """adam""" +538 78 training_loop """lcwa""" +538 78 evaluator """rankbased""" +538 79 dataset """kinships""" +538 79 model """rescal""" +538 79 loss """crossentropy""" +538 79 regularizer """no""" +538 79 optimizer """adam""" +538 79 training_loop """lcwa""" +538 79 evaluator """rankbased""" +538 80 dataset """kinships""" +538 80 model """rescal""" +538 80 loss """crossentropy""" +538 80 regularizer """no""" +538 80 optimizer """adam""" +538 80 training_loop """lcwa""" +538 80 evaluator """rankbased""" +538 81 dataset """kinships""" +538 81 model """rescal""" +538 81 loss """crossentropy""" +538 81 regularizer """no""" +538 81 optimizer """adam""" +538 81 training_loop """lcwa""" +538 81 evaluator """rankbased""" +538 82 dataset """kinships""" +538 82 model """rescal""" +538 82 loss """crossentropy""" +538 82 regularizer """no""" +538 82 optimizer """adam""" +538 82 training_loop """lcwa""" +538 82 evaluator """rankbased""" +538 83 dataset """kinships""" +538 83 model """rescal""" +538 83 loss """crossentropy""" +538 83 regularizer """no""" +538 83 optimizer """adam""" +538 83 training_loop """lcwa""" +538 83 evaluator """rankbased""" +538 84 dataset """kinships""" +538 84 model """rescal""" +538 84 loss """crossentropy""" +538 84 regularizer """no""" +538 84 optimizer """adam""" +538 84 training_loop """lcwa""" +538 84 evaluator """rankbased""" +538 85 dataset """kinships""" +538 85 model """rescal""" +538 85 loss """crossentropy""" +538 85 regularizer """no""" +538 85 optimizer """adam""" +538 85 training_loop """lcwa""" +538 85 evaluator """rankbased""" +538 86 dataset """kinships""" +538 86 model """rescal""" +538 86 loss """crossentropy""" +538 86 regularizer """no""" +538 86 optimizer """adam""" +538 86 training_loop """lcwa""" +538 86 evaluator """rankbased""" +538 87 dataset """kinships""" +538 87 model """rescal""" +538 87 loss """crossentropy""" +538 87 regularizer """no""" +538 87 optimizer """adam""" +538 87 training_loop """lcwa""" +538 87 evaluator """rankbased""" +538 88 dataset """kinships""" +538 88 model """rescal""" +538 88 loss """crossentropy""" +538 88 regularizer """no""" +538 88 optimizer """adam""" +538 88 training_loop """lcwa""" +538 88 evaluator """rankbased""" +538 89 dataset """kinships""" +538 89 model """rescal""" +538 89 loss """crossentropy""" +538 89 regularizer """no""" +538 89 optimizer """adam""" +538 89 training_loop """lcwa""" +538 89 evaluator """rankbased""" +538 90 dataset """kinships""" +538 90 model """rescal""" +538 90 loss """crossentropy""" +538 90 regularizer """no""" +538 90 optimizer """adam""" +538 90 training_loop """lcwa""" +538 90 evaluator """rankbased""" +538 91 dataset """kinships""" +538 91 model """rescal""" +538 91 loss """crossentropy""" +538 91 regularizer """no""" +538 91 optimizer """adam""" +538 91 training_loop """lcwa""" +538 91 evaluator """rankbased""" +538 92 dataset """kinships""" +538 92 model """rescal""" +538 92 loss """crossentropy""" +538 92 regularizer """no""" +538 92 optimizer """adam""" +538 92 training_loop """lcwa""" +538 92 evaluator """rankbased""" +538 93 dataset """kinships""" +538 93 model """rescal""" +538 93 loss """crossentropy""" +538 93 regularizer """no""" +538 93 optimizer """adam""" +538 93 training_loop """lcwa""" +538 93 evaluator """rankbased""" +538 94 dataset """kinships""" +538 94 model """rescal""" +538 94 loss """crossentropy""" +538 94 regularizer """no""" +538 94 optimizer """adam""" +538 94 training_loop """lcwa""" +538 94 evaluator """rankbased""" +538 95 dataset """kinships""" +538 95 model """rescal""" +538 95 loss """crossentropy""" +538 95 regularizer """no""" +538 95 optimizer """adam""" +538 95 training_loop """lcwa""" +538 95 evaluator """rankbased""" +538 96 dataset """kinships""" +538 96 model """rescal""" +538 96 loss """crossentropy""" +538 96 regularizer """no""" +538 96 optimizer """adam""" +538 96 training_loop """lcwa""" +538 96 evaluator """rankbased""" +538 97 dataset """kinships""" +538 97 model """rescal""" +538 97 loss """crossentropy""" +538 97 regularizer """no""" +538 97 optimizer """adam""" +538 97 training_loop """lcwa""" +538 97 evaluator """rankbased""" +538 98 dataset """kinships""" +538 98 model """rescal""" +538 98 loss """crossentropy""" +538 98 regularizer """no""" +538 98 optimizer """adam""" +538 98 training_loop """lcwa""" +538 98 evaluator """rankbased""" +538 99 dataset """kinships""" +538 99 model """rescal""" +538 99 loss """crossentropy""" +538 99 regularizer """no""" +538 99 optimizer """adam""" +538 99 training_loop """lcwa""" +538 99 evaluator """rankbased""" +538 100 dataset """kinships""" +538 100 model """rescal""" +538 100 loss """crossentropy""" +538 100 regularizer """no""" +538 100 optimizer """adam""" +538 100 training_loop """lcwa""" +538 100 evaluator """rankbased""" +539 1 model.embedding_dim 1.0 +539 1 optimizer.lr 0.03191336242008494 +539 1 negative_sampler.num_negs_per_pos 30.0 +539 1 training.batch_size 2.0 +539 2 model.embedding_dim 0.0 +539 2 optimizer.lr 0.010069351740297168 +539 2 negative_sampler.num_negs_per_pos 14.0 +539 2 training.batch_size 2.0 +539 3 model.embedding_dim 0.0 +539 3 optimizer.lr 0.08684944778639772 +539 3 negative_sampler.num_negs_per_pos 27.0 +539 3 training.batch_size 0.0 +539 4 model.embedding_dim 0.0 +539 4 optimizer.lr 0.025770363873438215 +539 4 negative_sampler.num_negs_per_pos 60.0 +539 4 training.batch_size 1.0 +539 5 model.embedding_dim 0.0 +539 5 optimizer.lr 0.02378022453116513 +539 5 negative_sampler.num_negs_per_pos 34.0 +539 5 training.batch_size 2.0 +539 6 model.embedding_dim 1.0 +539 6 optimizer.lr 0.0012443167978090223 +539 6 negative_sampler.num_negs_per_pos 43.0 +539 6 training.batch_size 0.0 +539 7 model.embedding_dim 1.0 +539 7 optimizer.lr 0.019844538499907655 +539 7 negative_sampler.num_negs_per_pos 70.0 +539 7 training.batch_size 1.0 +539 8 model.embedding_dim 1.0 +539 8 optimizer.lr 0.00413938861428959 +539 8 negative_sampler.num_negs_per_pos 92.0 +539 8 training.batch_size 0.0 +539 9 model.embedding_dim 2.0 +539 9 optimizer.lr 0.001271578526081361 +539 9 negative_sampler.num_negs_per_pos 84.0 +539 9 training.batch_size 2.0 +539 10 model.embedding_dim 0.0 +539 10 optimizer.lr 0.0020297662566077586 +539 10 negative_sampler.num_negs_per_pos 17.0 +539 10 training.batch_size 2.0 +539 11 model.embedding_dim 1.0 +539 11 optimizer.lr 0.013617932291190467 +539 11 negative_sampler.num_negs_per_pos 41.0 +539 11 training.batch_size 2.0 +539 12 model.embedding_dim 1.0 +539 12 optimizer.lr 0.002643880544859917 +539 12 negative_sampler.num_negs_per_pos 52.0 +539 12 training.batch_size 2.0 +539 13 model.embedding_dim 0.0 +539 13 optimizer.lr 0.0014373918879062191 +539 13 negative_sampler.num_negs_per_pos 42.0 +539 13 training.batch_size 2.0 +539 14 model.embedding_dim 0.0 +539 14 optimizer.lr 0.0014102655102247168 +539 14 negative_sampler.num_negs_per_pos 73.0 +539 14 training.batch_size 1.0 +539 15 model.embedding_dim 0.0 +539 15 optimizer.lr 0.016326188072788874 +539 15 negative_sampler.num_negs_per_pos 96.0 +539 15 training.batch_size 1.0 +539 16 model.embedding_dim 2.0 +539 16 optimizer.lr 0.001255541282943468 +539 16 negative_sampler.num_negs_per_pos 51.0 +539 16 training.batch_size 1.0 +539 17 model.embedding_dim 1.0 +539 17 optimizer.lr 0.007553756899605252 +539 17 negative_sampler.num_negs_per_pos 61.0 +539 17 training.batch_size 2.0 +539 18 model.embedding_dim 1.0 +539 18 optimizer.lr 0.031119059750791232 +539 18 negative_sampler.num_negs_per_pos 69.0 +539 18 training.batch_size 2.0 +539 19 model.embedding_dim 2.0 +539 19 optimizer.lr 0.001899122882121256 +539 19 negative_sampler.num_negs_per_pos 11.0 +539 19 training.batch_size 0.0 +539 20 model.embedding_dim 1.0 +539 20 optimizer.lr 0.0011315727531938096 +539 20 negative_sampler.num_negs_per_pos 77.0 +539 20 training.batch_size 0.0 +539 21 model.embedding_dim 2.0 +539 21 optimizer.lr 0.09733068973909496 +539 21 negative_sampler.num_negs_per_pos 59.0 +539 21 training.batch_size 1.0 +539 22 model.embedding_dim 1.0 +539 22 optimizer.lr 0.03657150686004551 +539 22 negative_sampler.num_negs_per_pos 47.0 +539 22 training.batch_size 1.0 +539 23 model.embedding_dim 2.0 +539 23 optimizer.lr 0.0027498824091034824 +539 23 negative_sampler.num_negs_per_pos 14.0 +539 23 training.batch_size 2.0 +539 24 model.embedding_dim 1.0 +539 24 optimizer.lr 0.002394970739288038 +539 24 negative_sampler.num_negs_per_pos 92.0 +539 24 training.batch_size 1.0 +539 25 model.embedding_dim 0.0 +539 25 optimizer.lr 0.018891712925017807 +539 25 negative_sampler.num_negs_per_pos 85.0 +539 25 training.batch_size 1.0 +539 26 model.embedding_dim 1.0 +539 26 optimizer.lr 0.00182161588383729 +539 26 negative_sampler.num_negs_per_pos 66.0 +539 26 training.batch_size 1.0 +539 27 model.embedding_dim 2.0 +539 27 optimizer.lr 0.008862455876213435 +539 27 negative_sampler.num_negs_per_pos 32.0 +539 27 training.batch_size 0.0 +539 28 model.embedding_dim 2.0 +539 28 optimizer.lr 0.001747716783286952 +539 28 negative_sampler.num_negs_per_pos 51.0 +539 28 training.batch_size 1.0 +539 29 model.embedding_dim 0.0 +539 29 optimizer.lr 0.003310006498790351 +539 29 negative_sampler.num_negs_per_pos 37.0 +539 29 training.batch_size 1.0 +539 30 model.embedding_dim 0.0 +539 30 optimizer.lr 0.01110034647623201 +539 30 negative_sampler.num_negs_per_pos 47.0 +539 30 training.batch_size 1.0 +539 31 model.embedding_dim 1.0 +539 31 optimizer.lr 0.0020801999537117897 +539 31 negative_sampler.num_negs_per_pos 84.0 +539 31 training.batch_size 0.0 +539 32 model.embedding_dim 0.0 +539 32 optimizer.lr 0.08554324831371074 +539 32 negative_sampler.num_negs_per_pos 72.0 +539 32 training.batch_size 1.0 +539 33 model.embedding_dim 2.0 +539 33 optimizer.lr 0.012885567593982199 +539 33 negative_sampler.num_negs_per_pos 67.0 +539 33 training.batch_size 0.0 +539 34 model.embedding_dim 0.0 +539 34 optimizer.lr 0.002031221029327623 +539 34 negative_sampler.num_negs_per_pos 49.0 +539 34 training.batch_size 2.0 +539 35 model.embedding_dim 2.0 +539 35 optimizer.lr 0.0011800909561409884 +539 35 negative_sampler.num_negs_per_pos 45.0 +539 35 training.batch_size 2.0 +539 36 model.embedding_dim 1.0 +539 36 optimizer.lr 0.0013571210233952894 +539 36 negative_sampler.num_negs_per_pos 12.0 +539 36 training.batch_size 0.0 +539 37 model.embedding_dim 0.0 +539 37 optimizer.lr 0.011892877941673066 +539 37 negative_sampler.num_negs_per_pos 78.0 +539 37 training.batch_size 2.0 +539 38 model.embedding_dim 2.0 +539 38 optimizer.lr 0.007573691138407031 +539 38 negative_sampler.num_negs_per_pos 86.0 +539 38 training.batch_size 1.0 +539 39 model.embedding_dim 2.0 +539 39 optimizer.lr 0.039306452664204655 +539 39 negative_sampler.num_negs_per_pos 67.0 +539 39 training.batch_size 0.0 +539 40 model.embedding_dim 0.0 +539 40 optimizer.lr 0.0026192017078497617 +539 40 negative_sampler.num_negs_per_pos 30.0 +539 40 training.batch_size 0.0 +539 41 model.embedding_dim 1.0 +539 41 optimizer.lr 0.007124598201158338 +539 41 negative_sampler.num_negs_per_pos 63.0 +539 41 training.batch_size 0.0 +539 42 model.embedding_dim 2.0 +539 42 optimizer.lr 0.0014373589763047446 +539 42 negative_sampler.num_negs_per_pos 49.0 +539 42 training.batch_size 0.0 +539 43 model.embedding_dim 0.0 +539 43 optimizer.lr 0.011123213244163065 +539 43 negative_sampler.num_negs_per_pos 57.0 +539 43 training.batch_size 0.0 +539 44 model.embedding_dim 1.0 +539 44 optimizer.lr 0.001755960767962629 +539 44 negative_sampler.num_negs_per_pos 16.0 +539 44 training.batch_size 2.0 +539 45 model.embedding_dim 1.0 +539 45 optimizer.lr 0.011402465979369105 +539 45 negative_sampler.num_negs_per_pos 70.0 +539 45 training.batch_size 2.0 +539 46 model.embedding_dim 0.0 +539 46 optimizer.lr 0.001123107773184353 +539 46 negative_sampler.num_negs_per_pos 6.0 +539 46 training.batch_size 2.0 +539 47 model.embedding_dim 2.0 +539 47 optimizer.lr 0.0434003319657009 +539 47 negative_sampler.num_negs_per_pos 57.0 +539 47 training.batch_size 1.0 +539 48 model.embedding_dim 2.0 +539 48 optimizer.lr 0.0016679587690655473 +539 48 negative_sampler.num_negs_per_pos 0.0 +539 48 training.batch_size 1.0 +539 49 model.embedding_dim 0.0 +539 49 optimizer.lr 0.02028094995970078 +539 49 negative_sampler.num_negs_per_pos 57.0 +539 49 training.batch_size 2.0 +539 50 model.embedding_dim 1.0 +539 50 optimizer.lr 0.02114108980235589 +539 50 negative_sampler.num_negs_per_pos 82.0 +539 50 training.batch_size 1.0 +539 51 model.embedding_dim 1.0 +539 51 optimizer.lr 0.023790010198752865 +539 51 negative_sampler.num_negs_per_pos 32.0 +539 51 training.batch_size 2.0 +539 52 model.embedding_dim 1.0 +539 52 optimizer.lr 0.0014916526187904982 +539 52 negative_sampler.num_negs_per_pos 39.0 +539 52 training.batch_size 1.0 +539 53 model.embedding_dim 0.0 +539 53 optimizer.lr 0.01920929927473013 +539 53 negative_sampler.num_negs_per_pos 80.0 +539 53 training.batch_size 1.0 +539 54 model.embedding_dim 1.0 +539 54 optimizer.lr 0.02303646717875832 +539 54 negative_sampler.num_negs_per_pos 15.0 +539 54 training.batch_size 0.0 +539 55 model.embedding_dim 1.0 +539 55 optimizer.lr 0.007828264540378988 +539 55 negative_sampler.num_negs_per_pos 34.0 +539 55 training.batch_size 1.0 +539 56 model.embedding_dim 2.0 +539 56 optimizer.lr 0.006055634806591392 +539 56 negative_sampler.num_negs_per_pos 85.0 +539 56 training.batch_size 1.0 +539 57 model.embedding_dim 1.0 +539 57 optimizer.lr 0.007578940138750655 +539 57 negative_sampler.num_negs_per_pos 86.0 +539 57 training.batch_size 2.0 +539 58 model.embedding_dim 2.0 +539 58 optimizer.lr 0.005110808805352956 +539 58 negative_sampler.num_negs_per_pos 87.0 +539 58 training.batch_size 0.0 +539 59 model.embedding_dim 2.0 +539 59 optimizer.lr 0.06343304449684113 +539 59 negative_sampler.num_negs_per_pos 99.0 +539 59 training.batch_size 2.0 +539 60 model.embedding_dim 1.0 +539 60 optimizer.lr 0.001987505939027053 +539 60 negative_sampler.num_negs_per_pos 27.0 +539 60 training.batch_size 0.0 +539 61 model.embedding_dim 1.0 +539 61 optimizer.lr 0.0024501468720904253 +539 61 negative_sampler.num_negs_per_pos 57.0 +539 61 training.batch_size 1.0 +539 62 model.embedding_dim 1.0 +539 62 optimizer.lr 0.0031155597737952128 +539 62 negative_sampler.num_negs_per_pos 34.0 +539 62 training.batch_size 2.0 +539 63 model.embedding_dim 2.0 +539 63 optimizer.lr 0.00547165541889616 +539 63 negative_sampler.num_negs_per_pos 22.0 +539 63 training.batch_size 0.0 +539 64 model.embedding_dim 2.0 +539 64 optimizer.lr 0.020338216259262378 +539 64 negative_sampler.num_negs_per_pos 73.0 +539 64 training.batch_size 2.0 +539 65 model.embedding_dim 1.0 +539 65 optimizer.lr 0.0021253399799931964 +539 65 negative_sampler.num_negs_per_pos 96.0 +539 65 training.batch_size 0.0 +539 66 model.embedding_dim 2.0 +539 66 optimizer.lr 0.02673157217683634 +539 66 negative_sampler.num_negs_per_pos 42.0 +539 66 training.batch_size 1.0 +539 67 model.embedding_dim 0.0 +539 67 optimizer.lr 0.010596145405748603 +539 67 negative_sampler.num_negs_per_pos 24.0 +539 67 training.batch_size 1.0 +539 68 model.embedding_dim 0.0 +539 68 optimizer.lr 0.07264590356513972 +539 68 negative_sampler.num_negs_per_pos 68.0 +539 68 training.batch_size 0.0 +539 69 model.embedding_dim 0.0 +539 69 optimizer.lr 0.05007357201920251 +539 69 negative_sampler.num_negs_per_pos 68.0 +539 69 training.batch_size 1.0 +539 70 model.embedding_dim 2.0 +539 70 optimizer.lr 0.0021001527950585182 +539 70 negative_sampler.num_negs_per_pos 61.0 +539 70 training.batch_size 2.0 +539 71 model.embedding_dim 0.0 +539 71 optimizer.lr 0.07050641071566965 +539 71 negative_sampler.num_negs_per_pos 52.0 +539 71 training.batch_size 1.0 +539 72 model.embedding_dim 1.0 +539 72 optimizer.lr 0.0031162935195614616 +539 72 negative_sampler.num_negs_per_pos 54.0 +539 72 training.batch_size 1.0 +539 73 model.embedding_dim 0.0 +539 73 optimizer.lr 0.00909513180727187 +539 73 negative_sampler.num_negs_per_pos 86.0 +539 73 training.batch_size 2.0 +539 74 model.embedding_dim 1.0 +539 74 optimizer.lr 0.002832551434342331 +539 74 negative_sampler.num_negs_per_pos 19.0 +539 74 training.batch_size 1.0 +539 75 model.embedding_dim 2.0 +539 75 optimizer.lr 0.0031544163782102953 +539 75 negative_sampler.num_negs_per_pos 14.0 +539 75 training.batch_size 0.0 +539 76 model.embedding_dim 0.0 +539 76 optimizer.lr 0.012420743665027862 +539 76 negative_sampler.num_negs_per_pos 47.0 +539 76 training.batch_size 0.0 +539 77 model.embedding_dim 0.0 +539 77 optimizer.lr 0.010869977230038614 +539 77 negative_sampler.num_negs_per_pos 28.0 +539 77 training.batch_size 1.0 +539 78 model.embedding_dim 0.0 +539 78 optimizer.lr 0.002078293927565951 +539 78 negative_sampler.num_negs_per_pos 80.0 +539 78 training.batch_size 0.0 +539 79 model.embedding_dim 0.0 +539 79 optimizer.lr 0.0011020857615705495 +539 79 negative_sampler.num_negs_per_pos 3.0 +539 79 training.batch_size 2.0 +539 80 model.embedding_dim 1.0 +539 80 optimizer.lr 0.008343816908581375 +539 80 negative_sampler.num_negs_per_pos 26.0 +539 80 training.batch_size 2.0 +539 81 model.embedding_dim 0.0 +539 81 optimizer.lr 0.045957920203659006 +539 81 negative_sampler.num_negs_per_pos 70.0 +539 81 training.batch_size 2.0 +539 82 model.embedding_dim 2.0 +539 82 optimizer.lr 0.026425596814826056 +539 82 negative_sampler.num_negs_per_pos 63.0 +539 82 training.batch_size 0.0 +539 83 model.embedding_dim 0.0 +539 83 optimizer.lr 0.0017810138839277495 +539 83 negative_sampler.num_negs_per_pos 43.0 +539 83 training.batch_size 2.0 +539 84 model.embedding_dim 1.0 +539 84 optimizer.lr 0.005642811565963115 +539 84 negative_sampler.num_negs_per_pos 67.0 +539 84 training.batch_size 0.0 +539 85 model.embedding_dim 2.0 +539 85 optimizer.lr 0.037209737375726304 +539 85 negative_sampler.num_negs_per_pos 92.0 +539 85 training.batch_size 1.0 +539 86 model.embedding_dim 0.0 +539 86 optimizer.lr 0.001154935403303705 +539 86 negative_sampler.num_negs_per_pos 60.0 +539 86 training.batch_size 0.0 +539 87 model.embedding_dim 1.0 +539 87 optimizer.lr 0.0024997074256418225 +539 87 negative_sampler.num_negs_per_pos 37.0 +539 87 training.batch_size 1.0 +539 88 model.embedding_dim 1.0 +539 88 optimizer.lr 0.017572858796225742 +539 88 negative_sampler.num_negs_per_pos 48.0 +539 88 training.batch_size 1.0 +539 89 model.embedding_dim 2.0 +539 89 optimizer.lr 0.018532645278701767 +539 89 negative_sampler.num_negs_per_pos 34.0 +539 89 training.batch_size 2.0 +539 90 model.embedding_dim 1.0 +539 90 optimizer.lr 0.006143854162567564 +539 90 negative_sampler.num_negs_per_pos 30.0 +539 90 training.batch_size 2.0 +539 91 model.embedding_dim 0.0 +539 91 optimizer.lr 0.009271314976993652 +539 91 negative_sampler.num_negs_per_pos 79.0 +539 91 training.batch_size 2.0 +539 92 model.embedding_dim 0.0 +539 92 optimizer.lr 0.00357809057079277 +539 92 negative_sampler.num_negs_per_pos 34.0 +539 92 training.batch_size 2.0 +539 93 model.embedding_dim 1.0 +539 93 optimizer.lr 0.05448330785344421 +539 93 negative_sampler.num_negs_per_pos 76.0 +539 93 training.batch_size 2.0 +539 94 model.embedding_dim 0.0 +539 94 optimizer.lr 0.0013676117623955537 +539 94 negative_sampler.num_negs_per_pos 61.0 +539 94 training.batch_size 0.0 +539 95 model.embedding_dim 2.0 +539 95 optimizer.lr 0.0015600012704633656 +539 95 negative_sampler.num_negs_per_pos 42.0 +539 95 training.batch_size 0.0 +539 96 model.embedding_dim 0.0 +539 96 optimizer.lr 0.03100218764022625 +539 96 negative_sampler.num_negs_per_pos 36.0 +539 96 training.batch_size 1.0 +539 97 model.embedding_dim 1.0 +539 97 optimizer.lr 0.056829439539422585 +539 97 negative_sampler.num_negs_per_pos 78.0 +539 97 training.batch_size 1.0 +539 98 model.embedding_dim 1.0 +539 98 optimizer.lr 0.04131942226259186 +539 98 negative_sampler.num_negs_per_pos 99.0 +539 98 training.batch_size 2.0 +539 99 model.embedding_dim 0.0 +539 99 optimizer.lr 0.005743408008348855 +539 99 negative_sampler.num_negs_per_pos 54.0 +539 99 training.batch_size 1.0 +539 100 model.embedding_dim 1.0 +539 100 optimizer.lr 0.0012456810842634434 +539 100 negative_sampler.num_negs_per_pos 47.0 +539 100 training.batch_size 2.0 +539 1 dataset """kinships""" +539 1 model """rescal""" +539 1 loss """bceaftersigmoid""" +539 1 regularizer """no""" +539 1 optimizer """adam""" +539 1 training_loop """owa""" +539 1 negative_sampler """basic""" +539 1 evaluator """rankbased""" +539 2 dataset """kinships""" +539 2 model """rescal""" +539 2 loss """bceaftersigmoid""" +539 2 regularizer """no""" +539 2 optimizer """adam""" +539 2 training_loop """owa""" +539 2 negative_sampler """basic""" +539 2 evaluator """rankbased""" +539 3 dataset """kinships""" +539 3 model """rescal""" +539 3 loss """bceaftersigmoid""" +539 3 regularizer """no""" +539 3 optimizer """adam""" +539 3 training_loop """owa""" +539 3 negative_sampler """basic""" +539 3 evaluator """rankbased""" +539 4 dataset """kinships""" +539 4 model """rescal""" +539 4 loss """bceaftersigmoid""" +539 4 regularizer """no""" +539 4 optimizer """adam""" +539 4 training_loop """owa""" +539 4 negative_sampler """basic""" +539 4 evaluator """rankbased""" +539 5 dataset """kinships""" +539 5 model """rescal""" +539 5 loss """bceaftersigmoid""" +539 5 regularizer """no""" +539 5 optimizer """adam""" +539 5 training_loop """owa""" +539 5 negative_sampler """basic""" +539 5 evaluator """rankbased""" +539 6 dataset """kinships""" +539 6 model """rescal""" +539 6 loss """bceaftersigmoid""" +539 6 regularizer """no""" +539 6 optimizer """adam""" +539 6 training_loop """owa""" +539 6 negative_sampler """basic""" +539 6 evaluator """rankbased""" +539 7 dataset """kinships""" +539 7 model """rescal""" +539 7 loss """bceaftersigmoid""" +539 7 regularizer """no""" +539 7 optimizer """adam""" +539 7 training_loop """owa""" +539 7 negative_sampler """basic""" +539 7 evaluator """rankbased""" +539 8 dataset """kinships""" +539 8 model """rescal""" +539 8 loss """bceaftersigmoid""" +539 8 regularizer """no""" +539 8 optimizer """adam""" +539 8 training_loop """owa""" +539 8 negative_sampler """basic""" +539 8 evaluator """rankbased""" +539 9 dataset """kinships""" +539 9 model """rescal""" +539 9 loss """bceaftersigmoid""" +539 9 regularizer """no""" +539 9 optimizer """adam""" +539 9 training_loop """owa""" +539 9 negative_sampler """basic""" +539 9 evaluator """rankbased""" +539 10 dataset """kinships""" +539 10 model """rescal""" +539 10 loss """bceaftersigmoid""" +539 10 regularizer """no""" +539 10 optimizer """adam""" +539 10 training_loop """owa""" +539 10 negative_sampler """basic""" +539 10 evaluator """rankbased""" +539 11 dataset """kinships""" +539 11 model """rescal""" +539 11 loss """bceaftersigmoid""" +539 11 regularizer """no""" +539 11 optimizer """adam""" +539 11 training_loop """owa""" +539 11 negative_sampler """basic""" +539 11 evaluator """rankbased""" +539 12 dataset """kinships""" +539 12 model """rescal""" +539 12 loss """bceaftersigmoid""" +539 12 regularizer """no""" +539 12 optimizer """adam""" +539 12 training_loop """owa""" +539 12 negative_sampler """basic""" +539 12 evaluator """rankbased""" +539 13 dataset """kinships""" +539 13 model """rescal""" +539 13 loss """bceaftersigmoid""" +539 13 regularizer """no""" +539 13 optimizer """adam""" +539 13 training_loop """owa""" +539 13 negative_sampler """basic""" +539 13 evaluator """rankbased""" +539 14 dataset """kinships""" +539 14 model """rescal""" +539 14 loss """bceaftersigmoid""" +539 14 regularizer """no""" +539 14 optimizer """adam""" +539 14 training_loop """owa""" +539 14 negative_sampler """basic""" +539 14 evaluator """rankbased""" +539 15 dataset """kinships""" +539 15 model """rescal""" +539 15 loss """bceaftersigmoid""" +539 15 regularizer """no""" +539 15 optimizer """adam""" +539 15 training_loop """owa""" +539 15 negative_sampler """basic""" +539 15 evaluator """rankbased""" +539 16 dataset """kinships""" +539 16 model """rescal""" +539 16 loss """bceaftersigmoid""" +539 16 regularizer """no""" +539 16 optimizer """adam""" +539 16 training_loop """owa""" +539 16 negative_sampler """basic""" +539 16 evaluator """rankbased""" +539 17 dataset """kinships""" +539 17 model """rescal""" +539 17 loss """bceaftersigmoid""" +539 17 regularizer """no""" +539 17 optimizer """adam""" +539 17 training_loop """owa""" +539 17 negative_sampler """basic""" +539 17 evaluator """rankbased""" +539 18 dataset """kinships""" +539 18 model """rescal""" +539 18 loss """bceaftersigmoid""" +539 18 regularizer """no""" +539 18 optimizer """adam""" +539 18 training_loop """owa""" +539 18 negative_sampler """basic""" +539 18 evaluator """rankbased""" +539 19 dataset """kinships""" +539 19 model """rescal""" +539 19 loss """bceaftersigmoid""" +539 19 regularizer """no""" +539 19 optimizer """adam""" +539 19 training_loop """owa""" +539 19 negative_sampler """basic""" +539 19 evaluator """rankbased""" +539 20 dataset """kinships""" +539 20 model """rescal""" +539 20 loss """bceaftersigmoid""" +539 20 regularizer """no""" +539 20 optimizer """adam""" +539 20 training_loop """owa""" +539 20 negative_sampler """basic""" +539 20 evaluator """rankbased""" +539 21 dataset """kinships""" +539 21 model """rescal""" +539 21 loss """bceaftersigmoid""" +539 21 regularizer """no""" +539 21 optimizer """adam""" +539 21 training_loop """owa""" +539 21 negative_sampler """basic""" +539 21 evaluator """rankbased""" +539 22 dataset """kinships""" +539 22 model """rescal""" +539 22 loss """bceaftersigmoid""" +539 22 regularizer """no""" +539 22 optimizer """adam""" +539 22 training_loop """owa""" +539 22 negative_sampler """basic""" +539 22 evaluator """rankbased""" +539 23 dataset """kinships""" +539 23 model """rescal""" +539 23 loss """bceaftersigmoid""" +539 23 regularizer """no""" +539 23 optimizer """adam""" +539 23 training_loop """owa""" +539 23 negative_sampler """basic""" +539 23 evaluator """rankbased""" +539 24 dataset """kinships""" +539 24 model """rescal""" +539 24 loss """bceaftersigmoid""" +539 24 regularizer """no""" +539 24 optimizer """adam""" +539 24 training_loop """owa""" +539 24 negative_sampler """basic""" +539 24 evaluator """rankbased""" +539 25 dataset """kinships""" +539 25 model """rescal""" +539 25 loss """bceaftersigmoid""" +539 25 regularizer """no""" +539 25 optimizer """adam""" +539 25 training_loop """owa""" +539 25 negative_sampler """basic""" +539 25 evaluator """rankbased""" +539 26 dataset """kinships""" +539 26 model """rescal""" +539 26 loss """bceaftersigmoid""" +539 26 regularizer """no""" +539 26 optimizer """adam""" +539 26 training_loop """owa""" +539 26 negative_sampler """basic""" +539 26 evaluator """rankbased""" +539 27 dataset """kinships""" +539 27 model """rescal""" +539 27 loss """bceaftersigmoid""" +539 27 regularizer """no""" +539 27 optimizer """adam""" +539 27 training_loop """owa""" +539 27 negative_sampler """basic""" +539 27 evaluator """rankbased""" +539 28 dataset """kinships""" +539 28 model """rescal""" +539 28 loss """bceaftersigmoid""" +539 28 regularizer """no""" +539 28 optimizer """adam""" +539 28 training_loop """owa""" +539 28 negative_sampler """basic""" +539 28 evaluator """rankbased""" +539 29 dataset """kinships""" +539 29 model """rescal""" +539 29 loss """bceaftersigmoid""" +539 29 regularizer """no""" +539 29 optimizer """adam""" +539 29 training_loop """owa""" +539 29 negative_sampler """basic""" +539 29 evaluator """rankbased""" +539 30 dataset """kinships""" +539 30 model """rescal""" +539 30 loss """bceaftersigmoid""" +539 30 regularizer """no""" +539 30 optimizer """adam""" +539 30 training_loop """owa""" +539 30 negative_sampler """basic""" +539 30 evaluator """rankbased""" +539 31 dataset """kinships""" +539 31 model """rescal""" +539 31 loss """bceaftersigmoid""" +539 31 regularizer """no""" +539 31 optimizer """adam""" +539 31 training_loop """owa""" +539 31 negative_sampler """basic""" +539 31 evaluator """rankbased""" +539 32 dataset """kinships""" +539 32 model """rescal""" +539 32 loss """bceaftersigmoid""" +539 32 regularizer """no""" +539 32 optimizer """adam""" +539 32 training_loop """owa""" +539 32 negative_sampler """basic""" +539 32 evaluator """rankbased""" +539 33 dataset """kinships""" +539 33 model """rescal""" +539 33 loss """bceaftersigmoid""" +539 33 regularizer """no""" +539 33 optimizer """adam""" +539 33 training_loop """owa""" +539 33 negative_sampler """basic""" +539 33 evaluator """rankbased""" +539 34 dataset """kinships""" +539 34 model """rescal""" +539 34 loss """bceaftersigmoid""" +539 34 regularizer """no""" +539 34 optimizer """adam""" +539 34 training_loop """owa""" +539 34 negative_sampler """basic""" +539 34 evaluator """rankbased""" +539 35 dataset """kinships""" +539 35 model """rescal""" +539 35 loss """bceaftersigmoid""" +539 35 regularizer """no""" +539 35 optimizer """adam""" +539 35 training_loop """owa""" +539 35 negative_sampler """basic""" +539 35 evaluator """rankbased""" +539 36 dataset """kinships""" +539 36 model """rescal""" +539 36 loss """bceaftersigmoid""" +539 36 regularizer """no""" +539 36 optimizer """adam""" +539 36 training_loop """owa""" +539 36 negative_sampler """basic""" +539 36 evaluator """rankbased""" +539 37 dataset """kinships""" +539 37 model """rescal""" +539 37 loss """bceaftersigmoid""" +539 37 regularizer """no""" +539 37 optimizer """adam""" +539 37 training_loop """owa""" +539 37 negative_sampler """basic""" +539 37 evaluator """rankbased""" +539 38 dataset """kinships""" +539 38 model """rescal""" +539 38 loss """bceaftersigmoid""" +539 38 regularizer """no""" +539 38 optimizer """adam""" +539 38 training_loop """owa""" +539 38 negative_sampler """basic""" +539 38 evaluator """rankbased""" +539 39 dataset """kinships""" +539 39 model """rescal""" +539 39 loss """bceaftersigmoid""" +539 39 regularizer """no""" +539 39 optimizer """adam""" +539 39 training_loop """owa""" +539 39 negative_sampler """basic""" +539 39 evaluator """rankbased""" +539 40 dataset """kinships""" +539 40 model """rescal""" +539 40 loss """bceaftersigmoid""" +539 40 regularizer """no""" +539 40 optimizer """adam""" +539 40 training_loop """owa""" +539 40 negative_sampler """basic""" +539 40 evaluator """rankbased""" +539 41 dataset """kinships""" +539 41 model """rescal""" +539 41 loss """bceaftersigmoid""" +539 41 regularizer """no""" +539 41 optimizer """adam""" +539 41 training_loop """owa""" +539 41 negative_sampler """basic""" +539 41 evaluator """rankbased""" +539 42 dataset """kinships""" +539 42 model """rescal""" +539 42 loss """bceaftersigmoid""" +539 42 regularizer """no""" +539 42 optimizer """adam""" +539 42 training_loop """owa""" +539 42 negative_sampler """basic""" +539 42 evaluator """rankbased""" +539 43 dataset """kinships""" +539 43 model """rescal""" +539 43 loss """bceaftersigmoid""" +539 43 regularizer """no""" +539 43 optimizer """adam""" +539 43 training_loop """owa""" +539 43 negative_sampler """basic""" +539 43 evaluator """rankbased""" +539 44 dataset """kinships""" +539 44 model """rescal""" +539 44 loss """bceaftersigmoid""" +539 44 regularizer """no""" +539 44 optimizer """adam""" +539 44 training_loop """owa""" +539 44 negative_sampler """basic""" +539 44 evaluator """rankbased""" +539 45 dataset """kinships""" +539 45 model """rescal""" +539 45 loss """bceaftersigmoid""" +539 45 regularizer """no""" +539 45 optimizer """adam""" +539 45 training_loop """owa""" +539 45 negative_sampler """basic""" +539 45 evaluator """rankbased""" +539 46 dataset """kinships""" +539 46 model """rescal""" +539 46 loss """bceaftersigmoid""" +539 46 regularizer """no""" +539 46 optimizer """adam""" +539 46 training_loop """owa""" +539 46 negative_sampler """basic""" +539 46 evaluator """rankbased""" +539 47 dataset """kinships""" +539 47 model """rescal""" +539 47 loss """bceaftersigmoid""" +539 47 regularizer """no""" +539 47 optimizer """adam""" +539 47 training_loop """owa""" +539 47 negative_sampler """basic""" +539 47 evaluator """rankbased""" +539 48 dataset """kinships""" +539 48 model """rescal""" +539 48 loss """bceaftersigmoid""" +539 48 regularizer """no""" +539 48 optimizer """adam""" +539 48 training_loop """owa""" +539 48 negative_sampler """basic""" +539 48 evaluator """rankbased""" +539 49 dataset """kinships""" +539 49 model """rescal""" +539 49 loss """bceaftersigmoid""" +539 49 regularizer """no""" +539 49 optimizer """adam""" +539 49 training_loop """owa""" +539 49 negative_sampler """basic""" +539 49 evaluator """rankbased""" +539 50 dataset """kinships""" +539 50 model """rescal""" +539 50 loss """bceaftersigmoid""" +539 50 regularizer """no""" +539 50 optimizer """adam""" +539 50 training_loop """owa""" +539 50 negative_sampler """basic""" +539 50 evaluator """rankbased""" +539 51 dataset """kinships""" +539 51 model """rescal""" +539 51 loss """bceaftersigmoid""" +539 51 regularizer """no""" +539 51 optimizer """adam""" +539 51 training_loop """owa""" +539 51 negative_sampler """basic""" +539 51 evaluator """rankbased""" +539 52 dataset """kinships""" +539 52 model """rescal""" +539 52 loss """bceaftersigmoid""" +539 52 regularizer """no""" +539 52 optimizer """adam""" +539 52 training_loop """owa""" +539 52 negative_sampler """basic""" +539 52 evaluator """rankbased""" +539 53 dataset """kinships""" +539 53 model """rescal""" +539 53 loss """bceaftersigmoid""" +539 53 regularizer """no""" +539 53 optimizer """adam""" +539 53 training_loop """owa""" +539 53 negative_sampler """basic""" +539 53 evaluator """rankbased""" +539 54 dataset """kinships""" +539 54 model """rescal""" +539 54 loss """bceaftersigmoid""" +539 54 regularizer """no""" +539 54 optimizer """adam""" +539 54 training_loop """owa""" +539 54 negative_sampler """basic""" +539 54 evaluator """rankbased""" +539 55 dataset """kinships""" +539 55 model """rescal""" +539 55 loss """bceaftersigmoid""" +539 55 regularizer """no""" +539 55 optimizer """adam""" +539 55 training_loop """owa""" +539 55 negative_sampler """basic""" +539 55 evaluator """rankbased""" +539 56 dataset """kinships""" +539 56 model """rescal""" +539 56 loss """bceaftersigmoid""" +539 56 regularizer """no""" +539 56 optimizer """adam""" +539 56 training_loop """owa""" +539 56 negative_sampler """basic""" +539 56 evaluator """rankbased""" +539 57 dataset """kinships""" +539 57 model """rescal""" +539 57 loss """bceaftersigmoid""" +539 57 regularizer """no""" +539 57 optimizer """adam""" +539 57 training_loop """owa""" +539 57 negative_sampler """basic""" +539 57 evaluator """rankbased""" +539 58 dataset """kinships""" +539 58 model """rescal""" +539 58 loss """bceaftersigmoid""" +539 58 regularizer """no""" +539 58 optimizer """adam""" +539 58 training_loop """owa""" +539 58 negative_sampler """basic""" +539 58 evaluator """rankbased""" +539 59 dataset """kinships""" +539 59 model """rescal""" +539 59 loss """bceaftersigmoid""" +539 59 regularizer """no""" +539 59 optimizer """adam""" +539 59 training_loop """owa""" +539 59 negative_sampler """basic""" +539 59 evaluator """rankbased""" +539 60 dataset """kinships""" +539 60 model """rescal""" +539 60 loss """bceaftersigmoid""" +539 60 regularizer """no""" +539 60 optimizer """adam""" +539 60 training_loop """owa""" +539 60 negative_sampler """basic""" +539 60 evaluator """rankbased""" +539 61 dataset """kinships""" +539 61 model """rescal""" +539 61 loss """bceaftersigmoid""" +539 61 regularizer """no""" +539 61 optimizer """adam""" +539 61 training_loop """owa""" +539 61 negative_sampler """basic""" +539 61 evaluator """rankbased""" +539 62 dataset """kinships""" +539 62 model """rescal""" +539 62 loss """bceaftersigmoid""" +539 62 regularizer """no""" +539 62 optimizer """adam""" +539 62 training_loop """owa""" +539 62 negative_sampler """basic""" +539 62 evaluator """rankbased""" +539 63 dataset """kinships""" +539 63 model """rescal""" +539 63 loss """bceaftersigmoid""" +539 63 regularizer """no""" +539 63 optimizer """adam""" +539 63 training_loop """owa""" +539 63 negative_sampler """basic""" +539 63 evaluator """rankbased""" +539 64 dataset """kinships""" +539 64 model """rescal""" +539 64 loss """bceaftersigmoid""" +539 64 regularizer """no""" +539 64 optimizer """adam""" +539 64 training_loop """owa""" +539 64 negative_sampler """basic""" +539 64 evaluator """rankbased""" +539 65 dataset """kinships""" +539 65 model """rescal""" +539 65 loss """bceaftersigmoid""" +539 65 regularizer """no""" +539 65 optimizer """adam""" +539 65 training_loop """owa""" +539 65 negative_sampler """basic""" +539 65 evaluator """rankbased""" +539 66 dataset """kinships""" +539 66 model """rescal""" +539 66 loss """bceaftersigmoid""" +539 66 regularizer """no""" +539 66 optimizer """adam""" +539 66 training_loop """owa""" +539 66 negative_sampler """basic""" +539 66 evaluator """rankbased""" +539 67 dataset """kinships""" +539 67 model """rescal""" +539 67 loss """bceaftersigmoid""" +539 67 regularizer """no""" +539 67 optimizer """adam""" +539 67 training_loop """owa""" +539 67 negative_sampler """basic""" +539 67 evaluator """rankbased""" +539 68 dataset """kinships""" +539 68 model """rescal""" +539 68 loss """bceaftersigmoid""" +539 68 regularizer """no""" +539 68 optimizer """adam""" +539 68 training_loop """owa""" +539 68 negative_sampler """basic""" +539 68 evaluator """rankbased""" +539 69 dataset """kinships""" +539 69 model """rescal""" +539 69 loss """bceaftersigmoid""" +539 69 regularizer """no""" +539 69 optimizer """adam""" +539 69 training_loop """owa""" +539 69 negative_sampler """basic""" +539 69 evaluator """rankbased""" +539 70 dataset """kinships""" +539 70 model """rescal""" +539 70 loss """bceaftersigmoid""" +539 70 regularizer """no""" +539 70 optimizer """adam""" +539 70 training_loop """owa""" +539 70 negative_sampler """basic""" +539 70 evaluator """rankbased""" +539 71 dataset """kinships""" +539 71 model """rescal""" +539 71 loss """bceaftersigmoid""" +539 71 regularizer """no""" +539 71 optimizer """adam""" +539 71 training_loop """owa""" +539 71 negative_sampler """basic""" +539 71 evaluator """rankbased""" +539 72 dataset """kinships""" +539 72 model """rescal""" +539 72 loss """bceaftersigmoid""" +539 72 regularizer """no""" +539 72 optimizer """adam""" +539 72 training_loop """owa""" +539 72 negative_sampler """basic""" +539 72 evaluator """rankbased""" +539 73 dataset """kinships""" +539 73 model """rescal""" +539 73 loss """bceaftersigmoid""" +539 73 regularizer """no""" +539 73 optimizer """adam""" +539 73 training_loop """owa""" +539 73 negative_sampler """basic""" +539 73 evaluator """rankbased""" +539 74 dataset """kinships""" +539 74 model """rescal""" +539 74 loss """bceaftersigmoid""" +539 74 regularizer """no""" +539 74 optimizer """adam""" +539 74 training_loop """owa""" +539 74 negative_sampler """basic""" +539 74 evaluator """rankbased""" +539 75 dataset """kinships""" +539 75 model """rescal""" +539 75 loss """bceaftersigmoid""" +539 75 regularizer """no""" +539 75 optimizer """adam""" +539 75 training_loop """owa""" +539 75 negative_sampler """basic""" +539 75 evaluator """rankbased""" +539 76 dataset """kinships""" +539 76 model """rescal""" +539 76 loss """bceaftersigmoid""" +539 76 regularizer """no""" +539 76 optimizer """adam""" +539 76 training_loop """owa""" +539 76 negative_sampler """basic""" +539 76 evaluator """rankbased""" +539 77 dataset """kinships""" +539 77 model """rescal""" +539 77 loss """bceaftersigmoid""" +539 77 regularizer """no""" +539 77 optimizer """adam""" +539 77 training_loop """owa""" +539 77 negative_sampler """basic""" +539 77 evaluator """rankbased""" +539 78 dataset """kinships""" +539 78 model """rescal""" +539 78 loss """bceaftersigmoid""" +539 78 regularizer """no""" +539 78 optimizer """adam""" +539 78 training_loop """owa""" +539 78 negative_sampler """basic""" +539 78 evaluator """rankbased""" +539 79 dataset """kinships""" +539 79 model """rescal""" +539 79 loss """bceaftersigmoid""" +539 79 regularizer """no""" +539 79 optimizer """adam""" +539 79 training_loop """owa""" +539 79 negative_sampler """basic""" +539 79 evaluator """rankbased""" +539 80 dataset """kinships""" +539 80 model """rescal""" +539 80 loss """bceaftersigmoid""" +539 80 regularizer """no""" +539 80 optimizer """adam""" +539 80 training_loop """owa""" +539 80 negative_sampler """basic""" +539 80 evaluator """rankbased""" +539 81 dataset """kinships""" +539 81 model """rescal""" +539 81 loss """bceaftersigmoid""" +539 81 regularizer """no""" +539 81 optimizer """adam""" +539 81 training_loop """owa""" +539 81 negative_sampler """basic""" +539 81 evaluator """rankbased""" +539 82 dataset """kinships""" +539 82 model """rescal""" +539 82 loss """bceaftersigmoid""" +539 82 regularizer """no""" +539 82 optimizer """adam""" +539 82 training_loop """owa""" +539 82 negative_sampler """basic""" +539 82 evaluator """rankbased""" +539 83 dataset """kinships""" +539 83 model """rescal""" +539 83 loss """bceaftersigmoid""" +539 83 regularizer """no""" +539 83 optimizer """adam""" +539 83 training_loop """owa""" +539 83 negative_sampler """basic""" +539 83 evaluator """rankbased""" +539 84 dataset """kinships""" +539 84 model """rescal""" +539 84 loss """bceaftersigmoid""" +539 84 regularizer """no""" +539 84 optimizer """adam""" +539 84 training_loop """owa""" +539 84 negative_sampler """basic""" +539 84 evaluator """rankbased""" +539 85 dataset """kinships""" +539 85 model """rescal""" +539 85 loss """bceaftersigmoid""" +539 85 regularizer """no""" +539 85 optimizer """adam""" +539 85 training_loop """owa""" +539 85 negative_sampler """basic""" +539 85 evaluator """rankbased""" +539 86 dataset """kinships""" +539 86 model """rescal""" +539 86 loss """bceaftersigmoid""" +539 86 regularizer """no""" +539 86 optimizer """adam""" +539 86 training_loop """owa""" +539 86 negative_sampler """basic""" +539 86 evaluator """rankbased""" +539 87 dataset """kinships""" +539 87 model """rescal""" +539 87 loss """bceaftersigmoid""" +539 87 regularizer """no""" +539 87 optimizer """adam""" +539 87 training_loop """owa""" +539 87 negative_sampler """basic""" +539 87 evaluator """rankbased""" +539 88 dataset """kinships""" +539 88 model """rescal""" +539 88 loss """bceaftersigmoid""" +539 88 regularizer """no""" +539 88 optimizer """adam""" +539 88 training_loop """owa""" +539 88 negative_sampler """basic""" +539 88 evaluator """rankbased""" +539 89 dataset """kinships""" +539 89 model """rescal""" +539 89 loss """bceaftersigmoid""" +539 89 regularizer """no""" +539 89 optimizer """adam""" +539 89 training_loop """owa""" +539 89 negative_sampler """basic""" +539 89 evaluator """rankbased""" +539 90 dataset """kinships""" +539 90 model """rescal""" +539 90 loss """bceaftersigmoid""" +539 90 regularizer """no""" +539 90 optimizer """adam""" +539 90 training_loop """owa""" +539 90 negative_sampler """basic""" +539 90 evaluator """rankbased""" +539 91 dataset """kinships""" +539 91 model """rescal""" +539 91 loss """bceaftersigmoid""" +539 91 regularizer """no""" +539 91 optimizer """adam""" +539 91 training_loop """owa""" +539 91 negative_sampler """basic""" +539 91 evaluator """rankbased""" +539 92 dataset """kinships""" +539 92 model """rescal""" +539 92 loss """bceaftersigmoid""" +539 92 regularizer """no""" +539 92 optimizer """adam""" +539 92 training_loop """owa""" +539 92 negative_sampler """basic""" +539 92 evaluator """rankbased""" +539 93 dataset """kinships""" +539 93 model """rescal""" +539 93 loss """bceaftersigmoid""" +539 93 regularizer """no""" +539 93 optimizer """adam""" +539 93 training_loop """owa""" +539 93 negative_sampler """basic""" +539 93 evaluator """rankbased""" +539 94 dataset """kinships""" +539 94 model """rescal""" +539 94 loss """bceaftersigmoid""" +539 94 regularizer """no""" +539 94 optimizer """adam""" +539 94 training_loop """owa""" +539 94 negative_sampler """basic""" +539 94 evaluator """rankbased""" +539 95 dataset """kinships""" +539 95 model """rescal""" +539 95 loss """bceaftersigmoid""" +539 95 regularizer """no""" +539 95 optimizer """adam""" +539 95 training_loop """owa""" +539 95 negative_sampler """basic""" +539 95 evaluator """rankbased""" +539 96 dataset """kinships""" +539 96 model """rescal""" +539 96 loss """bceaftersigmoid""" +539 96 regularizer """no""" +539 96 optimizer """adam""" +539 96 training_loop """owa""" +539 96 negative_sampler """basic""" +539 96 evaluator """rankbased""" +539 97 dataset """kinships""" +539 97 model """rescal""" +539 97 loss """bceaftersigmoid""" +539 97 regularizer """no""" +539 97 optimizer """adam""" +539 97 training_loop """owa""" +539 97 negative_sampler """basic""" +539 97 evaluator """rankbased""" +539 98 dataset """kinships""" +539 98 model """rescal""" +539 98 loss """bceaftersigmoid""" +539 98 regularizer """no""" +539 98 optimizer """adam""" +539 98 training_loop """owa""" +539 98 negative_sampler """basic""" +539 98 evaluator """rankbased""" +539 99 dataset """kinships""" +539 99 model """rescal""" +539 99 loss """bceaftersigmoid""" +539 99 regularizer """no""" +539 99 optimizer """adam""" +539 99 training_loop """owa""" +539 99 negative_sampler """basic""" +539 99 evaluator """rankbased""" +539 100 dataset """kinships""" +539 100 model """rescal""" +539 100 loss """bceaftersigmoid""" +539 100 regularizer """no""" +539 100 optimizer """adam""" +539 100 training_loop """owa""" +539 100 negative_sampler """basic""" +539 100 evaluator """rankbased""" +540 1 model.embedding_dim 0.0 +540 1 optimizer.lr 0.048146330280705266 +540 1 negative_sampler.num_negs_per_pos 7.0 +540 1 training.batch_size 2.0 +540 2 model.embedding_dim 2.0 +540 2 optimizer.lr 0.018066253222434988 +540 2 negative_sampler.num_negs_per_pos 31.0 +540 2 training.batch_size 2.0 +540 3 model.embedding_dim 0.0 +540 3 optimizer.lr 0.011973797497942792 +540 3 negative_sampler.num_negs_per_pos 0.0 +540 3 training.batch_size 1.0 +540 4 model.embedding_dim 0.0 +540 4 optimizer.lr 0.009176617295656242 +540 4 negative_sampler.num_negs_per_pos 96.0 +540 4 training.batch_size 1.0 +540 5 model.embedding_dim 0.0 +540 5 optimizer.lr 0.09845760949502468 +540 5 negative_sampler.num_negs_per_pos 25.0 +540 5 training.batch_size 0.0 +540 6 model.embedding_dim 1.0 +540 6 optimizer.lr 0.006135828626939266 +540 6 negative_sampler.num_negs_per_pos 33.0 +540 6 training.batch_size 2.0 +540 7 model.embedding_dim 0.0 +540 7 optimizer.lr 0.06093522832574654 +540 7 negative_sampler.num_negs_per_pos 55.0 +540 7 training.batch_size 0.0 +540 8 model.embedding_dim 0.0 +540 8 optimizer.lr 0.06926186789313393 +540 8 negative_sampler.num_negs_per_pos 43.0 +540 8 training.batch_size 0.0 +540 9 model.embedding_dim 0.0 +540 9 optimizer.lr 0.001784311737161828 +540 9 negative_sampler.num_negs_per_pos 14.0 +540 9 training.batch_size 0.0 +540 10 model.embedding_dim 1.0 +540 10 optimizer.lr 0.05456715921414968 +540 10 negative_sampler.num_negs_per_pos 24.0 +540 10 training.batch_size 2.0 +540 11 model.embedding_dim 2.0 +540 11 optimizer.lr 0.013319515976154285 +540 11 negative_sampler.num_negs_per_pos 5.0 +540 11 training.batch_size 2.0 +540 12 model.embedding_dim 0.0 +540 12 optimizer.lr 0.009696094780612861 +540 12 negative_sampler.num_negs_per_pos 78.0 +540 12 training.batch_size 1.0 +540 13 model.embedding_dim 0.0 +540 13 optimizer.lr 0.0012112266419410575 +540 13 negative_sampler.num_negs_per_pos 55.0 +540 13 training.batch_size 0.0 +540 14 model.embedding_dim 2.0 +540 14 optimizer.lr 0.002360861612819698 +540 14 negative_sampler.num_negs_per_pos 30.0 +540 14 training.batch_size 2.0 +540 15 model.embedding_dim 2.0 +540 15 optimizer.lr 0.0026100590178987984 +540 15 negative_sampler.num_negs_per_pos 45.0 +540 15 training.batch_size 2.0 +540 16 model.embedding_dim 0.0 +540 16 optimizer.lr 0.002693174380326495 +540 16 negative_sampler.num_negs_per_pos 17.0 +540 16 training.batch_size 0.0 +540 17 model.embedding_dim 1.0 +540 17 optimizer.lr 0.001019782441091343 +540 17 negative_sampler.num_negs_per_pos 47.0 +540 17 training.batch_size 1.0 +540 18 model.embedding_dim 0.0 +540 18 optimizer.lr 0.008203379557773111 +540 18 negative_sampler.num_negs_per_pos 49.0 +540 18 training.batch_size 1.0 +540 19 model.embedding_dim 1.0 +540 19 optimizer.lr 0.019483129545364172 +540 19 negative_sampler.num_negs_per_pos 37.0 +540 19 training.batch_size 0.0 +540 20 model.embedding_dim 0.0 +540 20 optimizer.lr 0.015270086739059488 +540 20 negative_sampler.num_negs_per_pos 77.0 +540 20 training.batch_size 2.0 +540 21 model.embedding_dim 2.0 +540 21 optimizer.lr 0.004589332798134848 +540 21 negative_sampler.num_negs_per_pos 36.0 +540 21 training.batch_size 0.0 +540 22 model.embedding_dim 1.0 +540 22 optimizer.lr 0.005769314759683418 +540 22 negative_sampler.num_negs_per_pos 10.0 +540 22 training.batch_size 0.0 +540 23 model.embedding_dim 2.0 +540 23 optimizer.lr 0.06978061092030355 +540 23 negative_sampler.num_negs_per_pos 57.0 +540 23 training.batch_size 1.0 +540 24 model.embedding_dim 1.0 +540 24 optimizer.lr 0.002279960410253889 +540 24 negative_sampler.num_negs_per_pos 96.0 +540 24 training.batch_size 2.0 +540 25 model.embedding_dim 2.0 +540 25 optimizer.lr 0.0015955578246332207 +540 25 negative_sampler.num_negs_per_pos 79.0 +540 25 training.batch_size 1.0 +540 26 model.embedding_dim 0.0 +540 26 optimizer.lr 0.016081756257044808 +540 26 negative_sampler.num_negs_per_pos 92.0 +540 26 training.batch_size 0.0 +540 27 model.embedding_dim 1.0 +540 27 optimizer.lr 0.03661842399104317 +540 27 negative_sampler.num_negs_per_pos 80.0 +540 27 training.batch_size 0.0 +540 28 model.embedding_dim 0.0 +540 28 optimizer.lr 0.0018420700266232372 +540 28 negative_sampler.num_negs_per_pos 35.0 +540 28 training.batch_size 1.0 +540 29 model.embedding_dim 2.0 +540 29 optimizer.lr 0.08503825099715277 +540 29 negative_sampler.num_negs_per_pos 46.0 +540 29 training.batch_size 2.0 +540 30 model.embedding_dim 1.0 +540 30 optimizer.lr 0.0036241697683495762 +540 30 negative_sampler.num_negs_per_pos 41.0 +540 30 training.batch_size 2.0 +540 31 model.embedding_dim 2.0 +540 31 optimizer.lr 0.02887007388430242 +540 31 negative_sampler.num_negs_per_pos 19.0 +540 31 training.batch_size 1.0 +540 32 model.embedding_dim 2.0 +540 32 optimizer.lr 0.08792153032297044 +540 32 negative_sampler.num_negs_per_pos 19.0 +540 32 training.batch_size 2.0 +540 33 model.embedding_dim 0.0 +540 33 optimizer.lr 0.0025371949002859187 +540 33 negative_sampler.num_negs_per_pos 89.0 +540 33 training.batch_size 2.0 +540 34 model.embedding_dim 2.0 +540 34 optimizer.lr 0.003921542835372619 +540 34 negative_sampler.num_negs_per_pos 96.0 +540 34 training.batch_size 1.0 +540 35 model.embedding_dim 0.0 +540 35 optimizer.lr 0.001665338107807097 +540 35 negative_sampler.num_negs_per_pos 40.0 +540 35 training.batch_size 1.0 +540 36 model.embedding_dim 0.0 +540 36 optimizer.lr 0.017443057157504608 +540 36 negative_sampler.num_negs_per_pos 66.0 +540 36 training.batch_size 0.0 +540 37 model.embedding_dim 2.0 +540 37 optimizer.lr 0.016816109681599807 +540 37 negative_sampler.num_negs_per_pos 98.0 +540 37 training.batch_size 1.0 +540 38 model.embedding_dim 2.0 +540 38 optimizer.lr 0.001354051225708207 +540 38 negative_sampler.num_negs_per_pos 67.0 +540 38 training.batch_size 2.0 +540 39 model.embedding_dim 2.0 +540 39 optimizer.lr 0.03969136991307343 +540 39 negative_sampler.num_negs_per_pos 75.0 +540 39 training.batch_size 1.0 +540 40 model.embedding_dim 0.0 +540 40 optimizer.lr 0.0032259382348512793 +540 40 negative_sampler.num_negs_per_pos 70.0 +540 40 training.batch_size 2.0 +540 41 model.embedding_dim 2.0 +540 41 optimizer.lr 0.0015686881593392876 +540 41 negative_sampler.num_negs_per_pos 59.0 +540 41 training.batch_size 0.0 +540 42 model.embedding_dim 1.0 +540 42 optimizer.lr 0.00562862718020085 +540 42 negative_sampler.num_negs_per_pos 39.0 +540 42 training.batch_size 2.0 +540 43 model.embedding_dim 0.0 +540 43 optimizer.lr 0.08760271059985707 +540 43 negative_sampler.num_negs_per_pos 4.0 +540 43 training.batch_size 0.0 +540 44 model.embedding_dim 0.0 +540 44 optimizer.lr 0.0017629330506058099 +540 44 negative_sampler.num_negs_per_pos 0.0 +540 44 training.batch_size 0.0 +540 45 model.embedding_dim 0.0 +540 45 optimizer.lr 0.0062217595657728855 +540 45 negative_sampler.num_negs_per_pos 66.0 +540 45 training.batch_size 0.0 +540 46 model.embedding_dim 1.0 +540 46 optimizer.lr 0.014329235238250663 +540 46 negative_sampler.num_negs_per_pos 9.0 +540 46 training.batch_size 1.0 +540 47 model.embedding_dim 1.0 +540 47 optimizer.lr 0.007930248772868825 +540 47 negative_sampler.num_negs_per_pos 29.0 +540 47 training.batch_size 2.0 +540 48 model.embedding_dim 1.0 +540 48 optimizer.lr 0.002066911104800519 +540 48 negative_sampler.num_negs_per_pos 75.0 +540 48 training.batch_size 0.0 +540 49 model.embedding_dim 1.0 +540 49 optimizer.lr 0.005608006664893212 +540 49 negative_sampler.num_negs_per_pos 9.0 +540 49 training.batch_size 2.0 +540 50 model.embedding_dim 2.0 +540 50 optimizer.lr 0.05559298075861851 +540 50 negative_sampler.num_negs_per_pos 99.0 +540 50 training.batch_size 1.0 +540 51 model.embedding_dim 2.0 +540 51 optimizer.lr 0.023057315645536623 +540 51 negative_sampler.num_negs_per_pos 21.0 +540 51 training.batch_size 2.0 +540 52 model.embedding_dim 1.0 +540 52 optimizer.lr 0.012577054035964175 +540 52 negative_sampler.num_negs_per_pos 33.0 +540 52 training.batch_size 2.0 +540 53 model.embedding_dim 1.0 +540 53 optimizer.lr 0.005850800709266845 +540 53 negative_sampler.num_negs_per_pos 47.0 +540 53 training.batch_size 2.0 +540 54 model.embedding_dim 1.0 +540 54 optimizer.lr 0.0018887454409594047 +540 54 negative_sampler.num_negs_per_pos 4.0 +540 54 training.batch_size 1.0 +540 55 model.embedding_dim 1.0 +540 55 optimizer.lr 0.001476402782317665 +540 55 negative_sampler.num_negs_per_pos 80.0 +540 55 training.batch_size 0.0 +540 56 model.embedding_dim 0.0 +540 56 optimizer.lr 0.03065738077857134 +540 56 negative_sampler.num_negs_per_pos 42.0 +540 56 training.batch_size 2.0 +540 57 model.embedding_dim 1.0 +540 57 optimizer.lr 0.011995861483928593 +540 57 negative_sampler.num_negs_per_pos 32.0 +540 57 training.batch_size 0.0 +540 58 model.embedding_dim 2.0 +540 58 optimizer.lr 0.0027592778714500854 +540 58 negative_sampler.num_negs_per_pos 58.0 +540 58 training.batch_size 2.0 +540 59 model.embedding_dim 1.0 +540 59 optimizer.lr 0.05225788405123253 +540 59 negative_sampler.num_negs_per_pos 41.0 +540 59 training.batch_size 2.0 +540 60 model.embedding_dim 1.0 +540 60 optimizer.lr 0.012249578679263141 +540 60 negative_sampler.num_negs_per_pos 14.0 +540 60 training.batch_size 1.0 +540 61 model.embedding_dim 0.0 +540 61 optimizer.lr 0.023750106902539408 +540 61 negative_sampler.num_negs_per_pos 3.0 +540 61 training.batch_size 1.0 +540 62 model.embedding_dim 2.0 +540 62 optimizer.lr 0.01243301748828255 +540 62 negative_sampler.num_negs_per_pos 89.0 +540 62 training.batch_size 0.0 +540 63 model.embedding_dim 1.0 +540 63 optimizer.lr 0.010744971890009133 +540 63 negative_sampler.num_negs_per_pos 55.0 +540 63 training.batch_size 1.0 +540 64 model.embedding_dim 0.0 +540 64 optimizer.lr 0.0012183568656953237 +540 64 negative_sampler.num_negs_per_pos 32.0 +540 64 training.batch_size 2.0 +540 65 model.embedding_dim 1.0 +540 65 optimizer.lr 0.011589146580375187 +540 65 negative_sampler.num_negs_per_pos 28.0 +540 65 training.batch_size 2.0 +540 66 model.embedding_dim 2.0 +540 66 optimizer.lr 0.0016131784616405305 +540 66 negative_sampler.num_negs_per_pos 1.0 +540 66 training.batch_size 1.0 +540 67 model.embedding_dim 2.0 +540 67 optimizer.lr 0.0362014546109159 +540 67 negative_sampler.num_negs_per_pos 33.0 +540 67 training.batch_size 2.0 +540 68 model.embedding_dim 2.0 +540 68 optimizer.lr 0.0031498981581509415 +540 68 negative_sampler.num_negs_per_pos 52.0 +540 68 training.batch_size 2.0 +540 69 model.embedding_dim 2.0 +540 69 optimizer.lr 0.005653630496907869 +540 69 negative_sampler.num_negs_per_pos 15.0 +540 69 training.batch_size 2.0 +540 70 model.embedding_dim 0.0 +540 70 optimizer.lr 0.005985346080420111 +540 70 negative_sampler.num_negs_per_pos 44.0 +540 70 training.batch_size 2.0 +540 71 model.embedding_dim 1.0 +540 71 optimizer.lr 0.05834873236344681 +540 71 negative_sampler.num_negs_per_pos 7.0 +540 71 training.batch_size 2.0 +540 72 model.embedding_dim 2.0 +540 72 optimizer.lr 0.0032916773374171743 +540 72 negative_sampler.num_negs_per_pos 40.0 +540 72 training.batch_size 0.0 +540 73 model.embedding_dim 2.0 +540 73 optimizer.lr 0.00940415174558496 +540 73 negative_sampler.num_negs_per_pos 31.0 +540 73 training.batch_size 1.0 +540 74 model.embedding_dim 2.0 +540 74 optimizer.lr 0.0029129266635327025 +540 74 negative_sampler.num_negs_per_pos 28.0 +540 74 training.batch_size 2.0 +540 75 model.embedding_dim 0.0 +540 75 optimizer.lr 0.004786618322616688 +540 75 negative_sampler.num_negs_per_pos 62.0 +540 75 training.batch_size 1.0 +540 76 model.embedding_dim 0.0 +540 76 optimizer.lr 0.004814726754248139 +540 76 negative_sampler.num_negs_per_pos 68.0 +540 76 training.batch_size 0.0 +540 77 model.embedding_dim 0.0 +540 77 optimizer.lr 0.01594645798108079 +540 77 negative_sampler.num_negs_per_pos 26.0 +540 77 training.batch_size 1.0 +540 78 model.embedding_dim 1.0 +540 78 optimizer.lr 0.006437754257506568 +540 78 negative_sampler.num_negs_per_pos 74.0 +540 78 training.batch_size 1.0 +540 79 model.embedding_dim 1.0 +540 79 optimizer.lr 0.026379862805201087 +540 79 negative_sampler.num_negs_per_pos 86.0 +540 79 training.batch_size 0.0 +540 80 model.embedding_dim 1.0 +540 80 optimizer.lr 0.0037034972185574135 +540 80 negative_sampler.num_negs_per_pos 55.0 +540 80 training.batch_size 1.0 +540 81 model.embedding_dim 0.0 +540 81 optimizer.lr 0.008653512419551502 +540 81 negative_sampler.num_negs_per_pos 88.0 +540 81 training.batch_size 1.0 +540 82 model.embedding_dim 2.0 +540 82 optimizer.lr 0.0036052716676059538 +540 82 negative_sampler.num_negs_per_pos 93.0 +540 82 training.batch_size 1.0 +540 83 model.embedding_dim 2.0 +540 83 optimizer.lr 0.0013898005548201853 +540 83 negative_sampler.num_negs_per_pos 0.0 +540 83 training.batch_size 0.0 +540 84 model.embedding_dim 0.0 +540 84 optimizer.lr 0.020502141440535342 +540 84 negative_sampler.num_negs_per_pos 51.0 +540 84 training.batch_size 1.0 +540 85 model.embedding_dim 2.0 +540 85 optimizer.lr 0.0012190092618966535 +540 85 negative_sampler.num_negs_per_pos 65.0 +540 85 training.batch_size 1.0 +540 86 model.embedding_dim 0.0 +540 86 optimizer.lr 0.07766589640117863 +540 86 negative_sampler.num_negs_per_pos 6.0 +540 86 training.batch_size 1.0 +540 87 model.embedding_dim 1.0 +540 87 optimizer.lr 0.0029510472225503384 +540 87 negative_sampler.num_negs_per_pos 68.0 +540 87 training.batch_size 2.0 +540 88 model.embedding_dim 0.0 +540 88 optimizer.lr 0.07923204222730684 +540 88 negative_sampler.num_negs_per_pos 83.0 +540 88 training.batch_size 0.0 +540 89 model.embedding_dim 1.0 +540 89 optimizer.lr 0.04093507518490548 +540 89 negative_sampler.num_negs_per_pos 74.0 +540 89 training.batch_size 2.0 +540 90 model.embedding_dim 2.0 +540 90 optimizer.lr 0.00982436802355275 +540 90 negative_sampler.num_negs_per_pos 16.0 +540 90 training.batch_size 0.0 +540 91 model.embedding_dim 0.0 +540 91 optimizer.lr 0.005912137169629886 +540 91 negative_sampler.num_negs_per_pos 3.0 +540 91 training.batch_size 2.0 +540 92 model.embedding_dim 2.0 +540 92 optimizer.lr 0.0034453348081374155 +540 92 negative_sampler.num_negs_per_pos 61.0 +540 92 training.batch_size 2.0 +540 93 model.embedding_dim 0.0 +540 93 optimizer.lr 0.04341538799343801 +540 93 negative_sampler.num_negs_per_pos 97.0 +540 93 training.batch_size 2.0 +540 94 model.embedding_dim 0.0 +540 94 optimizer.lr 0.022963971911189183 +540 94 negative_sampler.num_negs_per_pos 81.0 +540 94 training.batch_size 0.0 +540 95 model.embedding_dim 0.0 +540 95 optimizer.lr 0.0020032940060173057 +540 95 negative_sampler.num_negs_per_pos 8.0 +540 95 training.batch_size 2.0 +540 96 model.embedding_dim 1.0 +540 96 optimizer.lr 0.07898478713492381 +540 96 negative_sampler.num_negs_per_pos 54.0 +540 96 training.batch_size 1.0 +540 97 model.embedding_dim 1.0 +540 97 optimizer.lr 0.055840229305117194 +540 97 negative_sampler.num_negs_per_pos 11.0 +540 97 training.batch_size 2.0 +540 98 model.embedding_dim 2.0 +540 98 optimizer.lr 0.02638413709942664 +540 98 negative_sampler.num_negs_per_pos 61.0 +540 98 training.batch_size 2.0 +540 99 model.embedding_dim 2.0 +540 99 optimizer.lr 0.02916627217804756 +540 99 negative_sampler.num_negs_per_pos 69.0 +540 99 training.batch_size 1.0 +540 100 model.embedding_dim 2.0 +540 100 optimizer.lr 0.007897434866664712 +540 100 negative_sampler.num_negs_per_pos 76.0 +540 100 training.batch_size 1.0 +540 1 dataset """kinships""" +540 1 model """rescal""" +540 1 loss """softplus""" +540 1 regularizer """no""" +540 1 optimizer """adam""" +540 1 training_loop """owa""" +540 1 negative_sampler """basic""" +540 1 evaluator """rankbased""" +540 2 dataset """kinships""" +540 2 model """rescal""" +540 2 loss """softplus""" +540 2 regularizer """no""" +540 2 optimizer """adam""" +540 2 training_loop """owa""" +540 2 negative_sampler """basic""" +540 2 evaluator """rankbased""" +540 3 dataset """kinships""" +540 3 model """rescal""" +540 3 loss """softplus""" +540 3 regularizer """no""" +540 3 optimizer """adam""" +540 3 training_loop """owa""" +540 3 negative_sampler """basic""" +540 3 evaluator """rankbased""" +540 4 dataset """kinships""" +540 4 model """rescal""" +540 4 loss """softplus""" +540 4 regularizer """no""" +540 4 optimizer """adam""" +540 4 training_loop """owa""" +540 4 negative_sampler """basic""" +540 4 evaluator """rankbased""" +540 5 dataset """kinships""" +540 5 model """rescal""" +540 5 loss """softplus""" +540 5 regularizer """no""" +540 5 optimizer """adam""" +540 5 training_loop """owa""" +540 5 negative_sampler """basic""" +540 5 evaluator """rankbased""" +540 6 dataset """kinships""" +540 6 model """rescal""" +540 6 loss """softplus""" +540 6 regularizer """no""" +540 6 optimizer """adam""" +540 6 training_loop """owa""" +540 6 negative_sampler """basic""" +540 6 evaluator """rankbased""" +540 7 dataset """kinships""" +540 7 model """rescal""" +540 7 loss """softplus""" +540 7 regularizer """no""" +540 7 optimizer """adam""" +540 7 training_loop """owa""" +540 7 negative_sampler """basic""" +540 7 evaluator """rankbased""" +540 8 dataset """kinships""" +540 8 model """rescal""" +540 8 loss """softplus""" +540 8 regularizer """no""" +540 8 optimizer """adam""" +540 8 training_loop """owa""" +540 8 negative_sampler """basic""" +540 8 evaluator """rankbased""" +540 9 dataset """kinships""" +540 9 model """rescal""" +540 9 loss """softplus""" +540 9 regularizer """no""" +540 9 optimizer """adam""" +540 9 training_loop """owa""" +540 9 negative_sampler """basic""" +540 9 evaluator """rankbased""" +540 10 dataset """kinships""" +540 10 model """rescal""" +540 10 loss """softplus""" +540 10 regularizer """no""" +540 10 optimizer """adam""" +540 10 training_loop """owa""" +540 10 negative_sampler """basic""" +540 10 evaluator """rankbased""" +540 11 dataset """kinships""" +540 11 model """rescal""" +540 11 loss """softplus""" +540 11 regularizer """no""" +540 11 optimizer """adam""" +540 11 training_loop """owa""" +540 11 negative_sampler """basic""" +540 11 evaluator """rankbased""" +540 12 dataset """kinships""" +540 12 model """rescal""" +540 12 loss """softplus""" +540 12 regularizer """no""" +540 12 optimizer """adam""" +540 12 training_loop """owa""" +540 12 negative_sampler """basic""" +540 12 evaluator """rankbased""" +540 13 dataset """kinships""" +540 13 model """rescal""" +540 13 loss """softplus""" +540 13 regularizer """no""" +540 13 optimizer """adam""" +540 13 training_loop """owa""" +540 13 negative_sampler """basic""" +540 13 evaluator """rankbased""" +540 14 dataset """kinships""" +540 14 model """rescal""" +540 14 loss """softplus""" +540 14 regularizer """no""" +540 14 optimizer """adam""" +540 14 training_loop """owa""" +540 14 negative_sampler """basic""" +540 14 evaluator """rankbased""" +540 15 dataset """kinships""" +540 15 model """rescal""" +540 15 loss """softplus""" +540 15 regularizer """no""" +540 15 optimizer """adam""" +540 15 training_loop """owa""" +540 15 negative_sampler """basic""" +540 15 evaluator """rankbased""" +540 16 dataset """kinships""" +540 16 model """rescal""" +540 16 loss """softplus""" +540 16 regularizer """no""" +540 16 optimizer """adam""" +540 16 training_loop """owa""" +540 16 negative_sampler """basic""" +540 16 evaluator """rankbased""" +540 17 dataset """kinships""" +540 17 model """rescal""" +540 17 loss """softplus""" +540 17 regularizer """no""" +540 17 optimizer """adam""" +540 17 training_loop """owa""" +540 17 negative_sampler """basic""" +540 17 evaluator """rankbased""" +540 18 dataset """kinships""" +540 18 model """rescal""" +540 18 loss """softplus""" +540 18 regularizer """no""" +540 18 optimizer """adam""" +540 18 training_loop """owa""" +540 18 negative_sampler """basic""" +540 18 evaluator """rankbased""" +540 19 dataset """kinships""" +540 19 model """rescal""" +540 19 loss """softplus""" +540 19 regularizer """no""" +540 19 optimizer """adam""" +540 19 training_loop """owa""" +540 19 negative_sampler """basic""" +540 19 evaluator """rankbased""" +540 20 dataset """kinships""" +540 20 model """rescal""" +540 20 loss """softplus""" +540 20 regularizer """no""" +540 20 optimizer """adam""" +540 20 training_loop """owa""" +540 20 negative_sampler """basic""" +540 20 evaluator """rankbased""" +540 21 dataset """kinships""" +540 21 model """rescal""" +540 21 loss """softplus""" +540 21 regularizer """no""" +540 21 optimizer """adam""" +540 21 training_loop """owa""" +540 21 negative_sampler """basic""" +540 21 evaluator """rankbased""" +540 22 dataset """kinships""" +540 22 model """rescal""" +540 22 loss """softplus""" +540 22 regularizer """no""" +540 22 optimizer """adam""" +540 22 training_loop """owa""" +540 22 negative_sampler """basic""" +540 22 evaluator """rankbased""" +540 23 dataset """kinships""" +540 23 model """rescal""" +540 23 loss """softplus""" +540 23 regularizer """no""" +540 23 optimizer """adam""" +540 23 training_loop """owa""" +540 23 negative_sampler """basic""" +540 23 evaluator """rankbased""" +540 24 dataset """kinships""" +540 24 model """rescal""" +540 24 loss """softplus""" +540 24 regularizer """no""" +540 24 optimizer """adam""" +540 24 training_loop """owa""" +540 24 negative_sampler """basic""" +540 24 evaluator """rankbased""" +540 25 dataset """kinships""" +540 25 model """rescal""" +540 25 loss """softplus""" +540 25 regularizer """no""" +540 25 optimizer """adam""" +540 25 training_loop """owa""" +540 25 negative_sampler """basic""" +540 25 evaluator """rankbased""" +540 26 dataset """kinships""" +540 26 model """rescal""" +540 26 loss """softplus""" +540 26 regularizer """no""" +540 26 optimizer """adam""" +540 26 training_loop """owa""" +540 26 negative_sampler """basic""" +540 26 evaluator """rankbased""" +540 27 dataset """kinships""" +540 27 model """rescal""" +540 27 loss """softplus""" +540 27 regularizer """no""" +540 27 optimizer """adam""" +540 27 training_loop """owa""" +540 27 negative_sampler """basic""" +540 27 evaluator """rankbased""" +540 28 dataset """kinships""" +540 28 model """rescal""" +540 28 loss """softplus""" +540 28 regularizer """no""" +540 28 optimizer """adam""" +540 28 training_loop """owa""" +540 28 negative_sampler """basic""" +540 28 evaluator """rankbased""" +540 29 dataset """kinships""" +540 29 model """rescal""" +540 29 loss """softplus""" +540 29 regularizer """no""" +540 29 optimizer """adam""" +540 29 training_loop """owa""" +540 29 negative_sampler """basic""" +540 29 evaluator """rankbased""" +540 30 dataset """kinships""" +540 30 model """rescal""" +540 30 loss """softplus""" +540 30 regularizer """no""" +540 30 optimizer """adam""" +540 30 training_loop """owa""" +540 30 negative_sampler """basic""" +540 30 evaluator """rankbased""" +540 31 dataset """kinships""" +540 31 model """rescal""" +540 31 loss """softplus""" +540 31 regularizer """no""" +540 31 optimizer """adam""" +540 31 training_loop """owa""" +540 31 negative_sampler """basic""" +540 31 evaluator """rankbased""" +540 32 dataset """kinships""" +540 32 model """rescal""" +540 32 loss """softplus""" +540 32 regularizer """no""" +540 32 optimizer """adam""" +540 32 training_loop """owa""" +540 32 negative_sampler """basic""" +540 32 evaluator """rankbased""" +540 33 dataset """kinships""" +540 33 model """rescal""" +540 33 loss """softplus""" +540 33 regularizer """no""" +540 33 optimizer """adam""" +540 33 training_loop """owa""" +540 33 negative_sampler """basic""" +540 33 evaluator """rankbased""" +540 34 dataset """kinships""" +540 34 model """rescal""" +540 34 loss """softplus""" +540 34 regularizer """no""" +540 34 optimizer """adam""" +540 34 training_loop """owa""" +540 34 negative_sampler """basic""" +540 34 evaluator """rankbased""" +540 35 dataset """kinships""" +540 35 model """rescal""" +540 35 loss """softplus""" +540 35 regularizer """no""" +540 35 optimizer """adam""" +540 35 training_loop """owa""" +540 35 negative_sampler """basic""" +540 35 evaluator """rankbased""" +540 36 dataset """kinships""" +540 36 model """rescal""" +540 36 loss """softplus""" +540 36 regularizer """no""" +540 36 optimizer """adam""" +540 36 training_loop """owa""" +540 36 negative_sampler """basic""" +540 36 evaluator """rankbased""" +540 37 dataset """kinships""" +540 37 model """rescal""" +540 37 loss """softplus""" +540 37 regularizer """no""" +540 37 optimizer """adam""" +540 37 training_loop """owa""" +540 37 negative_sampler """basic""" +540 37 evaluator """rankbased""" +540 38 dataset """kinships""" +540 38 model """rescal""" +540 38 loss """softplus""" +540 38 regularizer """no""" +540 38 optimizer """adam""" +540 38 training_loop """owa""" +540 38 negative_sampler """basic""" +540 38 evaluator """rankbased""" +540 39 dataset """kinships""" +540 39 model """rescal""" +540 39 loss """softplus""" +540 39 regularizer """no""" +540 39 optimizer """adam""" +540 39 training_loop """owa""" +540 39 negative_sampler """basic""" +540 39 evaluator """rankbased""" +540 40 dataset """kinships""" +540 40 model """rescal""" +540 40 loss """softplus""" +540 40 regularizer """no""" +540 40 optimizer """adam""" +540 40 training_loop """owa""" +540 40 negative_sampler """basic""" +540 40 evaluator """rankbased""" +540 41 dataset """kinships""" +540 41 model """rescal""" +540 41 loss """softplus""" +540 41 regularizer """no""" +540 41 optimizer """adam""" +540 41 training_loop """owa""" +540 41 negative_sampler """basic""" +540 41 evaluator """rankbased""" +540 42 dataset """kinships""" +540 42 model """rescal""" +540 42 loss """softplus""" +540 42 regularizer """no""" +540 42 optimizer """adam""" +540 42 training_loop """owa""" +540 42 negative_sampler """basic""" +540 42 evaluator """rankbased""" +540 43 dataset """kinships""" +540 43 model """rescal""" +540 43 loss """softplus""" +540 43 regularizer """no""" +540 43 optimizer """adam""" +540 43 training_loop """owa""" +540 43 negative_sampler """basic""" +540 43 evaluator """rankbased""" +540 44 dataset """kinships""" +540 44 model """rescal""" +540 44 loss """softplus""" +540 44 regularizer """no""" +540 44 optimizer """adam""" +540 44 training_loop """owa""" +540 44 negative_sampler """basic""" +540 44 evaluator """rankbased""" +540 45 dataset """kinships""" +540 45 model """rescal""" +540 45 loss """softplus""" +540 45 regularizer """no""" +540 45 optimizer """adam""" +540 45 training_loop """owa""" +540 45 negative_sampler """basic""" +540 45 evaluator """rankbased""" +540 46 dataset """kinships""" +540 46 model """rescal""" +540 46 loss """softplus""" +540 46 regularizer """no""" +540 46 optimizer """adam""" +540 46 training_loop """owa""" +540 46 negative_sampler """basic""" +540 46 evaluator """rankbased""" +540 47 dataset """kinships""" +540 47 model """rescal""" +540 47 loss """softplus""" +540 47 regularizer """no""" +540 47 optimizer """adam""" +540 47 training_loop """owa""" +540 47 negative_sampler """basic""" +540 47 evaluator """rankbased""" +540 48 dataset """kinships""" +540 48 model """rescal""" +540 48 loss """softplus""" +540 48 regularizer """no""" +540 48 optimizer """adam""" +540 48 training_loop """owa""" +540 48 negative_sampler """basic""" +540 48 evaluator """rankbased""" +540 49 dataset """kinships""" +540 49 model """rescal""" +540 49 loss """softplus""" +540 49 regularizer """no""" +540 49 optimizer """adam""" +540 49 training_loop """owa""" +540 49 negative_sampler """basic""" +540 49 evaluator """rankbased""" +540 50 dataset """kinships""" +540 50 model """rescal""" +540 50 loss """softplus""" +540 50 regularizer """no""" +540 50 optimizer """adam""" +540 50 training_loop """owa""" +540 50 negative_sampler """basic""" +540 50 evaluator """rankbased""" +540 51 dataset """kinships""" +540 51 model """rescal""" +540 51 loss """softplus""" +540 51 regularizer """no""" +540 51 optimizer """adam""" +540 51 training_loop """owa""" +540 51 negative_sampler """basic""" +540 51 evaluator """rankbased""" +540 52 dataset """kinships""" +540 52 model """rescal""" +540 52 loss """softplus""" +540 52 regularizer """no""" +540 52 optimizer """adam""" +540 52 training_loop """owa""" +540 52 negative_sampler """basic""" +540 52 evaluator """rankbased""" +540 53 dataset """kinships""" +540 53 model """rescal""" +540 53 loss """softplus""" +540 53 regularizer """no""" +540 53 optimizer """adam""" +540 53 training_loop """owa""" +540 53 negative_sampler """basic""" +540 53 evaluator """rankbased""" +540 54 dataset """kinships""" +540 54 model """rescal""" +540 54 loss """softplus""" +540 54 regularizer """no""" +540 54 optimizer """adam""" +540 54 training_loop """owa""" +540 54 negative_sampler """basic""" +540 54 evaluator """rankbased""" +540 55 dataset """kinships""" +540 55 model """rescal""" +540 55 loss """softplus""" +540 55 regularizer """no""" +540 55 optimizer """adam""" +540 55 training_loop """owa""" +540 55 negative_sampler """basic""" +540 55 evaluator """rankbased""" +540 56 dataset """kinships""" +540 56 model """rescal""" +540 56 loss """softplus""" +540 56 regularizer """no""" +540 56 optimizer """adam""" +540 56 training_loop """owa""" +540 56 negative_sampler """basic""" +540 56 evaluator """rankbased""" +540 57 dataset """kinships""" +540 57 model """rescal""" +540 57 loss """softplus""" +540 57 regularizer """no""" +540 57 optimizer """adam""" +540 57 training_loop """owa""" +540 57 negative_sampler """basic""" +540 57 evaluator """rankbased""" +540 58 dataset """kinships""" +540 58 model """rescal""" +540 58 loss """softplus""" +540 58 regularizer """no""" +540 58 optimizer """adam""" +540 58 training_loop """owa""" +540 58 negative_sampler """basic""" +540 58 evaluator """rankbased""" +540 59 dataset """kinships""" +540 59 model """rescal""" +540 59 loss """softplus""" +540 59 regularizer """no""" +540 59 optimizer """adam""" +540 59 training_loop """owa""" +540 59 negative_sampler """basic""" +540 59 evaluator """rankbased""" +540 60 dataset """kinships""" +540 60 model """rescal""" +540 60 loss """softplus""" +540 60 regularizer """no""" +540 60 optimizer """adam""" +540 60 training_loop """owa""" +540 60 negative_sampler """basic""" +540 60 evaluator """rankbased""" +540 61 dataset """kinships""" +540 61 model """rescal""" +540 61 loss """softplus""" +540 61 regularizer """no""" +540 61 optimizer """adam""" +540 61 training_loop """owa""" +540 61 negative_sampler """basic""" +540 61 evaluator """rankbased""" +540 62 dataset """kinships""" +540 62 model """rescal""" +540 62 loss """softplus""" +540 62 regularizer """no""" +540 62 optimizer """adam""" +540 62 training_loop """owa""" +540 62 negative_sampler """basic""" +540 62 evaluator """rankbased""" +540 63 dataset """kinships""" +540 63 model """rescal""" +540 63 loss """softplus""" +540 63 regularizer """no""" +540 63 optimizer """adam""" +540 63 training_loop """owa""" +540 63 negative_sampler """basic""" +540 63 evaluator """rankbased""" +540 64 dataset """kinships""" +540 64 model """rescal""" +540 64 loss """softplus""" +540 64 regularizer """no""" +540 64 optimizer """adam""" +540 64 training_loop """owa""" +540 64 negative_sampler """basic""" +540 64 evaluator """rankbased""" +540 65 dataset """kinships""" +540 65 model """rescal""" +540 65 loss """softplus""" +540 65 regularizer """no""" +540 65 optimizer """adam""" +540 65 training_loop """owa""" +540 65 negative_sampler """basic""" +540 65 evaluator """rankbased""" +540 66 dataset """kinships""" +540 66 model """rescal""" +540 66 loss """softplus""" +540 66 regularizer """no""" +540 66 optimizer """adam""" +540 66 training_loop """owa""" +540 66 negative_sampler """basic""" +540 66 evaluator """rankbased""" +540 67 dataset """kinships""" +540 67 model """rescal""" +540 67 loss """softplus""" +540 67 regularizer """no""" +540 67 optimizer """adam""" +540 67 training_loop """owa""" +540 67 negative_sampler """basic""" +540 67 evaluator """rankbased""" +540 68 dataset """kinships""" +540 68 model """rescal""" +540 68 loss """softplus""" +540 68 regularizer """no""" +540 68 optimizer """adam""" +540 68 training_loop """owa""" +540 68 negative_sampler """basic""" +540 68 evaluator """rankbased""" +540 69 dataset """kinships""" +540 69 model """rescal""" +540 69 loss """softplus""" +540 69 regularizer """no""" +540 69 optimizer """adam""" +540 69 training_loop """owa""" +540 69 negative_sampler """basic""" +540 69 evaluator """rankbased""" +540 70 dataset """kinships""" +540 70 model """rescal""" +540 70 loss """softplus""" +540 70 regularizer """no""" +540 70 optimizer """adam""" +540 70 training_loop """owa""" +540 70 negative_sampler """basic""" +540 70 evaluator """rankbased""" +540 71 dataset """kinships""" +540 71 model """rescal""" +540 71 loss """softplus""" +540 71 regularizer """no""" +540 71 optimizer """adam""" +540 71 training_loop """owa""" +540 71 negative_sampler """basic""" +540 71 evaluator """rankbased""" +540 72 dataset """kinships""" +540 72 model """rescal""" +540 72 loss """softplus""" +540 72 regularizer """no""" +540 72 optimizer """adam""" +540 72 training_loop """owa""" +540 72 negative_sampler """basic""" +540 72 evaluator """rankbased""" +540 73 dataset """kinships""" +540 73 model """rescal""" +540 73 loss """softplus""" +540 73 regularizer """no""" +540 73 optimizer """adam""" +540 73 training_loop """owa""" +540 73 negative_sampler """basic""" +540 73 evaluator """rankbased""" +540 74 dataset """kinships""" +540 74 model """rescal""" +540 74 loss """softplus""" +540 74 regularizer """no""" +540 74 optimizer """adam""" +540 74 training_loop """owa""" +540 74 negative_sampler """basic""" +540 74 evaluator """rankbased""" +540 75 dataset """kinships""" +540 75 model """rescal""" +540 75 loss """softplus""" +540 75 regularizer """no""" +540 75 optimizer """adam""" +540 75 training_loop """owa""" +540 75 negative_sampler """basic""" +540 75 evaluator """rankbased""" +540 76 dataset """kinships""" +540 76 model """rescal""" +540 76 loss """softplus""" +540 76 regularizer """no""" +540 76 optimizer """adam""" +540 76 training_loop """owa""" +540 76 negative_sampler """basic""" +540 76 evaluator """rankbased""" +540 77 dataset """kinships""" +540 77 model """rescal""" +540 77 loss """softplus""" +540 77 regularizer """no""" +540 77 optimizer """adam""" +540 77 training_loop """owa""" +540 77 negative_sampler """basic""" +540 77 evaluator """rankbased""" +540 78 dataset """kinships""" +540 78 model """rescal""" +540 78 loss """softplus""" +540 78 regularizer """no""" +540 78 optimizer """adam""" +540 78 training_loop """owa""" +540 78 negative_sampler """basic""" +540 78 evaluator """rankbased""" +540 79 dataset """kinships""" +540 79 model """rescal""" +540 79 loss """softplus""" +540 79 regularizer """no""" +540 79 optimizer """adam""" +540 79 training_loop """owa""" +540 79 negative_sampler """basic""" +540 79 evaluator """rankbased""" +540 80 dataset """kinships""" +540 80 model """rescal""" +540 80 loss """softplus""" +540 80 regularizer """no""" +540 80 optimizer """adam""" +540 80 training_loop """owa""" +540 80 negative_sampler """basic""" +540 80 evaluator """rankbased""" +540 81 dataset """kinships""" +540 81 model """rescal""" +540 81 loss """softplus""" +540 81 regularizer """no""" +540 81 optimizer """adam""" +540 81 training_loop """owa""" +540 81 negative_sampler """basic""" +540 81 evaluator """rankbased""" +540 82 dataset """kinships""" +540 82 model """rescal""" +540 82 loss """softplus""" +540 82 regularizer """no""" +540 82 optimizer """adam""" +540 82 training_loop """owa""" +540 82 negative_sampler """basic""" +540 82 evaluator """rankbased""" +540 83 dataset """kinships""" +540 83 model """rescal""" +540 83 loss """softplus""" +540 83 regularizer """no""" +540 83 optimizer """adam""" +540 83 training_loop """owa""" +540 83 negative_sampler """basic""" +540 83 evaluator """rankbased""" +540 84 dataset """kinships""" +540 84 model """rescal""" +540 84 loss """softplus""" +540 84 regularizer """no""" +540 84 optimizer """adam""" +540 84 training_loop """owa""" +540 84 negative_sampler """basic""" +540 84 evaluator """rankbased""" +540 85 dataset """kinships""" +540 85 model """rescal""" +540 85 loss """softplus""" +540 85 regularizer """no""" +540 85 optimizer """adam""" +540 85 training_loop """owa""" +540 85 negative_sampler """basic""" +540 85 evaluator """rankbased""" +540 86 dataset """kinships""" +540 86 model """rescal""" +540 86 loss """softplus""" +540 86 regularizer """no""" +540 86 optimizer """adam""" +540 86 training_loop """owa""" +540 86 negative_sampler """basic""" +540 86 evaluator """rankbased""" +540 87 dataset """kinships""" +540 87 model """rescal""" +540 87 loss """softplus""" +540 87 regularizer """no""" +540 87 optimizer """adam""" +540 87 training_loop """owa""" +540 87 negative_sampler """basic""" +540 87 evaluator """rankbased""" +540 88 dataset """kinships""" +540 88 model """rescal""" +540 88 loss """softplus""" +540 88 regularizer """no""" +540 88 optimizer """adam""" +540 88 training_loop """owa""" +540 88 negative_sampler """basic""" +540 88 evaluator """rankbased""" +540 89 dataset """kinships""" +540 89 model """rescal""" +540 89 loss """softplus""" +540 89 regularizer """no""" +540 89 optimizer """adam""" +540 89 training_loop """owa""" +540 89 negative_sampler """basic""" +540 89 evaluator """rankbased""" +540 90 dataset """kinships""" +540 90 model """rescal""" +540 90 loss """softplus""" +540 90 regularizer """no""" +540 90 optimizer """adam""" +540 90 training_loop """owa""" +540 90 negative_sampler """basic""" +540 90 evaluator """rankbased""" +540 91 dataset """kinships""" +540 91 model """rescal""" +540 91 loss """softplus""" +540 91 regularizer """no""" +540 91 optimizer """adam""" +540 91 training_loop """owa""" +540 91 negative_sampler """basic""" +540 91 evaluator """rankbased""" +540 92 dataset """kinships""" +540 92 model """rescal""" +540 92 loss """softplus""" +540 92 regularizer """no""" +540 92 optimizer """adam""" +540 92 training_loop """owa""" +540 92 negative_sampler """basic""" +540 92 evaluator """rankbased""" +540 93 dataset """kinships""" +540 93 model """rescal""" +540 93 loss """softplus""" +540 93 regularizer """no""" +540 93 optimizer """adam""" +540 93 training_loop """owa""" +540 93 negative_sampler """basic""" +540 93 evaluator """rankbased""" +540 94 dataset """kinships""" +540 94 model """rescal""" +540 94 loss """softplus""" +540 94 regularizer """no""" +540 94 optimizer """adam""" +540 94 training_loop """owa""" +540 94 negative_sampler """basic""" +540 94 evaluator """rankbased""" +540 95 dataset """kinships""" +540 95 model """rescal""" +540 95 loss """softplus""" +540 95 regularizer """no""" +540 95 optimizer """adam""" +540 95 training_loop """owa""" +540 95 negative_sampler """basic""" +540 95 evaluator """rankbased""" +540 96 dataset """kinships""" +540 96 model """rescal""" +540 96 loss """softplus""" +540 96 regularizer """no""" +540 96 optimizer """adam""" +540 96 training_loop """owa""" +540 96 negative_sampler """basic""" +540 96 evaluator """rankbased""" +540 97 dataset """kinships""" +540 97 model """rescal""" +540 97 loss """softplus""" +540 97 regularizer """no""" +540 97 optimizer """adam""" +540 97 training_loop """owa""" +540 97 negative_sampler """basic""" +540 97 evaluator """rankbased""" +540 98 dataset """kinships""" +540 98 model """rescal""" +540 98 loss """softplus""" +540 98 regularizer """no""" +540 98 optimizer """adam""" +540 98 training_loop """owa""" +540 98 negative_sampler """basic""" +540 98 evaluator """rankbased""" +540 99 dataset """kinships""" +540 99 model """rescal""" +540 99 loss """softplus""" +540 99 regularizer """no""" +540 99 optimizer """adam""" +540 99 training_loop """owa""" +540 99 negative_sampler """basic""" +540 99 evaluator """rankbased""" +540 100 dataset """kinships""" +540 100 model """rescal""" +540 100 loss """softplus""" +540 100 regularizer """no""" +540 100 optimizer """adam""" +540 100 training_loop """owa""" +540 100 negative_sampler """basic""" +540 100 evaluator """rankbased""" +541 1 model.embedding_dim 1.0 +541 1 optimizer.lr 0.024387998061993123 +541 1 negative_sampler.num_negs_per_pos 83.0 +541 1 training.batch_size 2.0 +541 2 model.embedding_dim 0.0 +541 2 optimizer.lr 0.0380106440825024 +541 2 negative_sampler.num_negs_per_pos 59.0 +541 2 training.batch_size 0.0 +541 3 model.embedding_dim 1.0 +541 3 optimizer.lr 0.00666570063211687 +541 3 negative_sampler.num_negs_per_pos 65.0 +541 3 training.batch_size 2.0 +541 4 model.embedding_dim 1.0 +541 4 optimizer.lr 0.0953800597877768 +541 4 negative_sampler.num_negs_per_pos 16.0 +541 4 training.batch_size 0.0 +541 5 model.embedding_dim 2.0 +541 5 optimizer.lr 0.017575635863489758 +541 5 negative_sampler.num_negs_per_pos 7.0 +541 5 training.batch_size 2.0 +541 6 model.embedding_dim 1.0 +541 6 optimizer.lr 0.002411452496648649 +541 6 negative_sampler.num_negs_per_pos 64.0 +541 6 training.batch_size 2.0 +541 7 model.embedding_dim 0.0 +541 7 optimizer.lr 0.016821756563186102 +541 7 negative_sampler.num_negs_per_pos 34.0 +541 7 training.batch_size 1.0 +541 8 model.embedding_dim 1.0 +541 8 optimizer.lr 0.004822222526769372 +541 8 negative_sampler.num_negs_per_pos 65.0 +541 8 training.batch_size 1.0 +541 9 model.embedding_dim 1.0 +541 9 optimizer.lr 0.0010304950284436268 +541 9 negative_sampler.num_negs_per_pos 8.0 +541 9 training.batch_size 2.0 +541 10 model.embedding_dim 0.0 +541 10 optimizer.lr 0.015595551664692415 +541 10 negative_sampler.num_negs_per_pos 30.0 +541 10 training.batch_size 2.0 +541 11 model.embedding_dim 1.0 +541 11 optimizer.lr 0.023744796423473255 +541 11 negative_sampler.num_negs_per_pos 95.0 +541 11 training.batch_size 1.0 +541 12 model.embedding_dim 2.0 +541 12 optimizer.lr 0.0027830603384298327 +541 12 negative_sampler.num_negs_per_pos 54.0 +541 12 training.batch_size 2.0 +541 13 model.embedding_dim 2.0 +541 13 optimizer.lr 0.003770309884876163 +541 13 negative_sampler.num_negs_per_pos 94.0 +541 13 training.batch_size 1.0 +541 14 model.embedding_dim 2.0 +541 14 optimizer.lr 0.001553496176836476 +541 14 negative_sampler.num_negs_per_pos 76.0 +541 14 training.batch_size 1.0 +541 15 model.embedding_dim 0.0 +541 15 optimizer.lr 0.03679294446959681 +541 15 negative_sampler.num_negs_per_pos 48.0 +541 15 training.batch_size 0.0 +541 16 model.embedding_dim 0.0 +541 16 optimizer.lr 0.026798978028679317 +541 16 negative_sampler.num_negs_per_pos 28.0 +541 16 training.batch_size 1.0 +541 17 model.embedding_dim 2.0 +541 17 optimizer.lr 0.0010306076111036135 +541 17 negative_sampler.num_negs_per_pos 51.0 +541 17 training.batch_size 1.0 +541 18 model.embedding_dim 0.0 +541 18 optimizer.lr 0.03307503035306477 +541 18 negative_sampler.num_negs_per_pos 63.0 +541 18 training.batch_size 0.0 +541 19 model.embedding_dim 2.0 +541 19 optimizer.lr 0.010270905508234758 +541 19 negative_sampler.num_negs_per_pos 8.0 +541 19 training.batch_size 0.0 +541 20 model.embedding_dim 0.0 +541 20 optimizer.lr 0.01865973317938183 +541 20 negative_sampler.num_negs_per_pos 71.0 +541 20 training.batch_size 0.0 +541 21 model.embedding_dim 2.0 +541 21 optimizer.lr 0.00576597720186446 +541 21 negative_sampler.num_negs_per_pos 44.0 +541 21 training.batch_size 0.0 +541 22 model.embedding_dim 2.0 +541 22 optimizer.lr 0.0023152517666692007 +541 22 negative_sampler.num_negs_per_pos 49.0 +541 22 training.batch_size 1.0 +541 23 model.embedding_dim 0.0 +541 23 optimizer.lr 0.0014902803203470751 +541 23 negative_sampler.num_negs_per_pos 98.0 +541 23 training.batch_size 1.0 +541 24 model.embedding_dim 1.0 +541 24 optimizer.lr 0.001082310849141818 +541 24 negative_sampler.num_negs_per_pos 38.0 +541 24 training.batch_size 0.0 +541 25 model.embedding_dim 2.0 +541 25 optimizer.lr 0.018487933377405546 +541 25 negative_sampler.num_negs_per_pos 55.0 +541 25 training.batch_size 0.0 +541 26 model.embedding_dim 1.0 +541 26 optimizer.lr 0.0012323571967479841 +541 26 negative_sampler.num_negs_per_pos 35.0 +541 26 training.batch_size 0.0 +541 27 model.embedding_dim 2.0 +541 27 optimizer.lr 0.015630609982662098 +541 27 negative_sampler.num_negs_per_pos 97.0 +541 27 training.batch_size 1.0 +541 28 model.embedding_dim 0.0 +541 28 optimizer.lr 0.011549009800793847 +541 28 negative_sampler.num_negs_per_pos 92.0 +541 28 training.batch_size 2.0 +541 29 model.embedding_dim 1.0 +541 29 optimizer.lr 0.0023179151883505845 +541 29 negative_sampler.num_negs_per_pos 82.0 +541 29 training.batch_size 2.0 +541 30 model.embedding_dim 2.0 +541 30 optimizer.lr 0.056958399990500314 +541 30 negative_sampler.num_negs_per_pos 76.0 +541 30 training.batch_size 0.0 +541 31 model.embedding_dim 0.0 +541 31 optimizer.lr 0.002335289789006489 +541 31 negative_sampler.num_negs_per_pos 31.0 +541 31 training.batch_size 2.0 +541 32 model.embedding_dim 2.0 +541 32 optimizer.lr 0.0016003785113031157 +541 32 negative_sampler.num_negs_per_pos 89.0 +541 32 training.batch_size 0.0 +541 33 model.embedding_dim 2.0 +541 33 optimizer.lr 0.02629623620659034 +541 33 negative_sampler.num_negs_per_pos 99.0 +541 33 training.batch_size 1.0 +541 34 model.embedding_dim 0.0 +541 34 optimizer.lr 0.006946582144178121 +541 34 negative_sampler.num_negs_per_pos 30.0 +541 34 training.batch_size 2.0 +541 35 model.embedding_dim 1.0 +541 35 optimizer.lr 0.003125434310778727 +541 35 negative_sampler.num_negs_per_pos 17.0 +541 35 training.batch_size 2.0 +541 36 model.embedding_dim 0.0 +541 36 optimizer.lr 0.04766250166517534 +541 36 negative_sampler.num_negs_per_pos 29.0 +541 36 training.batch_size 0.0 +541 37 model.embedding_dim 1.0 +541 37 optimizer.lr 0.011840668594922445 +541 37 negative_sampler.num_negs_per_pos 13.0 +541 37 training.batch_size 2.0 +541 38 model.embedding_dim 2.0 +541 38 optimizer.lr 0.007465857980712495 +541 38 negative_sampler.num_negs_per_pos 90.0 +541 38 training.batch_size 2.0 +541 39 model.embedding_dim 2.0 +541 39 optimizer.lr 0.004492281832208789 +541 39 negative_sampler.num_negs_per_pos 67.0 +541 39 training.batch_size 1.0 +541 40 model.embedding_dim 0.0 +541 40 optimizer.lr 0.00854725502295239 +541 40 negative_sampler.num_negs_per_pos 17.0 +541 40 training.batch_size 0.0 +541 41 model.embedding_dim 2.0 +541 41 optimizer.lr 0.018840386386845234 +541 41 negative_sampler.num_negs_per_pos 25.0 +541 41 training.batch_size 1.0 +541 42 model.embedding_dim 2.0 +541 42 optimizer.lr 0.0014479307738831755 +541 42 negative_sampler.num_negs_per_pos 31.0 +541 42 training.batch_size 2.0 +541 43 model.embedding_dim 1.0 +541 43 optimizer.lr 0.007890495640811417 +541 43 negative_sampler.num_negs_per_pos 96.0 +541 43 training.batch_size 1.0 +541 44 model.embedding_dim 0.0 +541 44 optimizer.lr 0.009090868944194109 +541 44 negative_sampler.num_negs_per_pos 44.0 +541 44 training.batch_size 2.0 +541 45 model.embedding_dim 1.0 +541 45 optimizer.lr 0.0031197273630590846 +541 45 negative_sampler.num_negs_per_pos 84.0 +541 45 training.batch_size 1.0 +541 46 model.embedding_dim 0.0 +541 46 optimizer.lr 0.005821231099505043 +541 46 negative_sampler.num_negs_per_pos 21.0 +541 46 training.batch_size 0.0 +541 47 model.embedding_dim 0.0 +541 47 optimizer.lr 0.0020704029359039245 +541 47 negative_sampler.num_negs_per_pos 58.0 +541 47 training.batch_size 0.0 +541 48 model.embedding_dim 1.0 +541 48 optimizer.lr 0.006896002656821564 +541 48 negative_sampler.num_negs_per_pos 2.0 +541 48 training.batch_size 1.0 +541 49 model.embedding_dim 2.0 +541 49 optimizer.lr 0.035293136295170446 +541 49 negative_sampler.num_negs_per_pos 86.0 +541 49 training.batch_size 0.0 +541 50 model.embedding_dim 1.0 +541 50 optimizer.lr 0.03123913759758485 +541 50 negative_sampler.num_negs_per_pos 56.0 +541 50 training.batch_size 1.0 +541 51 model.embedding_dim 0.0 +541 51 optimizer.lr 0.02449447012081379 +541 51 negative_sampler.num_negs_per_pos 7.0 +541 51 training.batch_size 2.0 +541 52 model.embedding_dim 0.0 +541 52 optimizer.lr 0.012010453230048361 +541 52 negative_sampler.num_negs_per_pos 5.0 +541 52 training.batch_size 2.0 +541 53 model.embedding_dim 1.0 +541 53 optimizer.lr 0.03550601897049021 +541 53 negative_sampler.num_negs_per_pos 88.0 +541 53 training.batch_size 0.0 +541 54 model.embedding_dim 0.0 +541 54 optimizer.lr 0.0256160663720107 +541 54 negative_sampler.num_negs_per_pos 67.0 +541 54 training.batch_size 0.0 +541 55 model.embedding_dim 1.0 +541 55 optimizer.lr 0.07734518054431022 +541 55 negative_sampler.num_negs_per_pos 68.0 +541 55 training.batch_size 0.0 +541 56 model.embedding_dim 1.0 +541 56 optimizer.lr 0.008092256752261672 +541 56 negative_sampler.num_negs_per_pos 37.0 +541 56 training.batch_size 2.0 +541 57 model.embedding_dim 0.0 +541 57 optimizer.lr 0.07762819250680333 +541 57 negative_sampler.num_negs_per_pos 43.0 +541 57 training.batch_size 0.0 +541 58 model.embedding_dim 2.0 +541 58 optimizer.lr 0.0743566273074214 +541 58 negative_sampler.num_negs_per_pos 82.0 +541 58 training.batch_size 1.0 +541 59 model.embedding_dim 0.0 +541 59 optimizer.lr 0.04075615602961293 +541 59 negative_sampler.num_negs_per_pos 71.0 +541 59 training.batch_size 1.0 +541 60 model.embedding_dim 0.0 +541 60 optimizer.lr 0.01144286215156975 +541 60 negative_sampler.num_negs_per_pos 32.0 +541 60 training.batch_size 1.0 +541 61 model.embedding_dim 1.0 +541 61 optimizer.lr 0.07860694453298366 +541 61 negative_sampler.num_negs_per_pos 62.0 +541 61 training.batch_size 0.0 +541 62 model.embedding_dim 0.0 +541 62 optimizer.lr 0.03823674048466386 +541 62 negative_sampler.num_negs_per_pos 75.0 +541 62 training.batch_size 1.0 +541 63 model.embedding_dim 2.0 +541 63 optimizer.lr 0.013683346351025624 +541 63 negative_sampler.num_negs_per_pos 59.0 +541 63 training.batch_size 2.0 +541 64 model.embedding_dim 0.0 +541 64 optimizer.lr 0.002902209939012254 +541 64 negative_sampler.num_negs_per_pos 8.0 +541 64 training.batch_size 1.0 +541 65 model.embedding_dim 2.0 +541 65 optimizer.lr 0.0856580397213257 +541 65 negative_sampler.num_negs_per_pos 74.0 +541 65 training.batch_size 2.0 +541 66 model.embedding_dim 1.0 +541 66 optimizer.lr 0.006306323657517263 +541 66 negative_sampler.num_negs_per_pos 3.0 +541 66 training.batch_size 1.0 +541 67 model.embedding_dim 1.0 +541 67 optimizer.lr 0.001584839024109288 +541 67 negative_sampler.num_negs_per_pos 24.0 +541 67 training.batch_size 1.0 +541 68 model.embedding_dim 0.0 +541 68 optimizer.lr 0.03799015979206247 +541 68 negative_sampler.num_negs_per_pos 15.0 +541 68 training.batch_size 0.0 +541 69 model.embedding_dim 1.0 +541 69 optimizer.lr 0.009018904179876166 +541 69 negative_sampler.num_negs_per_pos 11.0 +541 69 training.batch_size 0.0 +541 70 model.embedding_dim 0.0 +541 70 optimizer.lr 0.00887250240913626 +541 70 negative_sampler.num_negs_per_pos 63.0 +541 70 training.batch_size 1.0 +541 71 model.embedding_dim 0.0 +541 71 optimizer.lr 0.04443346232956683 +541 71 negative_sampler.num_negs_per_pos 64.0 +541 71 training.batch_size 0.0 +541 72 model.embedding_dim 0.0 +541 72 optimizer.lr 0.0032885122166622285 +541 72 negative_sampler.num_negs_per_pos 97.0 +541 72 training.batch_size 2.0 +541 73 model.embedding_dim 2.0 +541 73 optimizer.lr 0.014209727145522866 +541 73 negative_sampler.num_negs_per_pos 89.0 +541 73 training.batch_size 0.0 +541 74 model.embedding_dim 0.0 +541 74 optimizer.lr 0.014048393147700847 +541 74 negative_sampler.num_negs_per_pos 19.0 +541 74 training.batch_size 0.0 +541 75 model.embedding_dim 0.0 +541 75 optimizer.lr 0.0026721245698085663 +541 75 negative_sampler.num_negs_per_pos 68.0 +541 75 training.batch_size 1.0 +541 76 model.embedding_dim 0.0 +541 76 optimizer.lr 0.006156860393999338 +541 76 negative_sampler.num_negs_per_pos 45.0 +541 76 training.batch_size 0.0 +541 77 model.embedding_dim 2.0 +541 77 optimizer.lr 0.037445487860782466 +541 77 negative_sampler.num_negs_per_pos 94.0 +541 77 training.batch_size 2.0 +541 78 model.embedding_dim 1.0 +541 78 optimizer.lr 0.005282347878777605 +541 78 negative_sampler.num_negs_per_pos 95.0 +541 78 training.batch_size 2.0 +541 79 model.embedding_dim 0.0 +541 79 optimizer.lr 0.0038002993351943805 +541 79 negative_sampler.num_negs_per_pos 32.0 +541 79 training.batch_size 2.0 +541 80 model.embedding_dim 1.0 +541 80 optimizer.lr 0.004131341161972224 +541 80 negative_sampler.num_negs_per_pos 67.0 +541 80 training.batch_size 1.0 +541 81 model.embedding_dim 2.0 +541 81 optimizer.lr 0.024142028388504853 +541 81 negative_sampler.num_negs_per_pos 82.0 +541 81 training.batch_size 2.0 +541 82 model.embedding_dim 0.0 +541 82 optimizer.lr 0.001632025622942707 +541 82 negative_sampler.num_negs_per_pos 72.0 +541 82 training.batch_size 2.0 +541 83 model.embedding_dim 2.0 +541 83 optimizer.lr 0.03842291281242151 +541 83 negative_sampler.num_negs_per_pos 13.0 +541 83 training.batch_size 1.0 +541 84 model.embedding_dim 2.0 +541 84 optimizer.lr 0.046026613109363484 +541 84 negative_sampler.num_negs_per_pos 1.0 +541 84 training.batch_size 1.0 +541 85 model.embedding_dim 0.0 +541 85 optimizer.lr 0.001225982165749135 +541 85 negative_sampler.num_negs_per_pos 58.0 +541 85 training.batch_size 0.0 +541 86 model.embedding_dim 1.0 +541 86 optimizer.lr 0.0548994186527667 +541 86 negative_sampler.num_negs_per_pos 70.0 +541 86 training.batch_size 2.0 +541 87 model.embedding_dim 1.0 +541 87 optimizer.lr 0.02540679987936382 +541 87 negative_sampler.num_negs_per_pos 13.0 +541 87 training.batch_size 0.0 +541 88 model.embedding_dim 0.0 +541 88 optimizer.lr 0.08893378724628659 +541 88 negative_sampler.num_negs_per_pos 31.0 +541 88 training.batch_size 2.0 +541 89 model.embedding_dim 0.0 +541 89 optimizer.lr 0.008646019715085274 +541 89 negative_sampler.num_negs_per_pos 99.0 +541 89 training.batch_size 1.0 +541 90 model.embedding_dim 0.0 +541 90 optimizer.lr 0.02651329087977377 +541 90 negative_sampler.num_negs_per_pos 59.0 +541 90 training.batch_size 1.0 +541 91 model.embedding_dim 1.0 +541 91 optimizer.lr 0.09707551520188243 +541 91 negative_sampler.num_negs_per_pos 2.0 +541 91 training.batch_size 1.0 +541 92 model.embedding_dim 0.0 +541 92 optimizer.lr 0.0018810083967085375 +541 92 negative_sampler.num_negs_per_pos 78.0 +541 92 training.batch_size 1.0 +541 93 model.embedding_dim 2.0 +541 93 optimizer.lr 0.07481490026902934 +541 93 negative_sampler.num_negs_per_pos 20.0 +541 93 training.batch_size 1.0 +541 94 model.embedding_dim 2.0 +541 94 optimizer.lr 0.0015924213932858207 +541 94 negative_sampler.num_negs_per_pos 13.0 +541 94 training.batch_size 2.0 +541 95 model.embedding_dim 1.0 +541 95 optimizer.lr 0.021377061081250516 +541 95 negative_sampler.num_negs_per_pos 38.0 +541 95 training.batch_size 1.0 +541 96 model.embedding_dim 0.0 +541 96 optimizer.lr 0.060517113339594794 +541 96 negative_sampler.num_negs_per_pos 25.0 +541 96 training.batch_size 1.0 +541 97 model.embedding_dim 0.0 +541 97 optimizer.lr 0.0434383087185773 +541 97 negative_sampler.num_negs_per_pos 19.0 +541 97 training.batch_size 0.0 +541 98 model.embedding_dim 2.0 +541 98 optimizer.lr 0.01538392745377393 +541 98 negative_sampler.num_negs_per_pos 13.0 +541 98 training.batch_size 1.0 +541 99 model.embedding_dim 1.0 +541 99 optimizer.lr 0.011391126029542085 +541 99 negative_sampler.num_negs_per_pos 66.0 +541 99 training.batch_size 0.0 +541 100 model.embedding_dim 1.0 +541 100 optimizer.lr 0.007441117906296307 +541 100 negative_sampler.num_negs_per_pos 89.0 +541 100 training.batch_size 0.0 +541 1 dataset """kinships""" +541 1 model """rescal""" +541 1 loss """bceaftersigmoid""" +541 1 regularizer """no""" +541 1 optimizer """adam""" +541 1 training_loop """owa""" +541 1 negative_sampler """basic""" +541 1 evaluator """rankbased""" +541 2 dataset """kinships""" +541 2 model """rescal""" +541 2 loss """bceaftersigmoid""" +541 2 regularizer """no""" +541 2 optimizer """adam""" +541 2 training_loop """owa""" +541 2 negative_sampler """basic""" +541 2 evaluator """rankbased""" +541 3 dataset """kinships""" +541 3 model """rescal""" +541 3 loss """bceaftersigmoid""" +541 3 regularizer """no""" +541 3 optimizer """adam""" +541 3 training_loop """owa""" +541 3 negative_sampler """basic""" +541 3 evaluator """rankbased""" +541 4 dataset """kinships""" +541 4 model """rescal""" +541 4 loss """bceaftersigmoid""" +541 4 regularizer """no""" +541 4 optimizer """adam""" +541 4 training_loop """owa""" +541 4 negative_sampler """basic""" +541 4 evaluator """rankbased""" +541 5 dataset """kinships""" +541 5 model """rescal""" +541 5 loss """bceaftersigmoid""" +541 5 regularizer """no""" +541 5 optimizer """adam""" +541 5 training_loop """owa""" +541 5 negative_sampler """basic""" +541 5 evaluator """rankbased""" +541 6 dataset """kinships""" +541 6 model """rescal""" +541 6 loss """bceaftersigmoid""" +541 6 regularizer """no""" +541 6 optimizer """adam""" +541 6 training_loop """owa""" +541 6 negative_sampler """basic""" +541 6 evaluator """rankbased""" +541 7 dataset """kinships""" +541 7 model """rescal""" +541 7 loss """bceaftersigmoid""" +541 7 regularizer """no""" +541 7 optimizer """adam""" +541 7 training_loop """owa""" +541 7 negative_sampler """basic""" +541 7 evaluator """rankbased""" +541 8 dataset """kinships""" +541 8 model """rescal""" +541 8 loss """bceaftersigmoid""" +541 8 regularizer """no""" +541 8 optimizer """adam""" +541 8 training_loop """owa""" +541 8 negative_sampler """basic""" +541 8 evaluator """rankbased""" +541 9 dataset """kinships""" +541 9 model """rescal""" +541 9 loss """bceaftersigmoid""" +541 9 regularizer """no""" +541 9 optimizer """adam""" +541 9 training_loop """owa""" +541 9 negative_sampler """basic""" +541 9 evaluator """rankbased""" +541 10 dataset """kinships""" +541 10 model """rescal""" +541 10 loss """bceaftersigmoid""" +541 10 regularizer """no""" +541 10 optimizer """adam""" +541 10 training_loop """owa""" +541 10 negative_sampler """basic""" +541 10 evaluator """rankbased""" +541 11 dataset """kinships""" +541 11 model """rescal""" +541 11 loss """bceaftersigmoid""" +541 11 regularizer """no""" +541 11 optimizer """adam""" +541 11 training_loop """owa""" +541 11 negative_sampler """basic""" +541 11 evaluator """rankbased""" +541 12 dataset """kinships""" +541 12 model """rescal""" +541 12 loss """bceaftersigmoid""" +541 12 regularizer """no""" +541 12 optimizer """adam""" +541 12 training_loop """owa""" +541 12 negative_sampler """basic""" +541 12 evaluator """rankbased""" +541 13 dataset """kinships""" +541 13 model """rescal""" +541 13 loss """bceaftersigmoid""" +541 13 regularizer """no""" +541 13 optimizer """adam""" +541 13 training_loop """owa""" +541 13 negative_sampler """basic""" +541 13 evaluator """rankbased""" +541 14 dataset """kinships""" +541 14 model """rescal""" +541 14 loss """bceaftersigmoid""" +541 14 regularizer """no""" +541 14 optimizer """adam""" +541 14 training_loop """owa""" +541 14 negative_sampler """basic""" +541 14 evaluator """rankbased""" +541 15 dataset """kinships""" +541 15 model """rescal""" +541 15 loss """bceaftersigmoid""" +541 15 regularizer """no""" +541 15 optimizer """adam""" +541 15 training_loop """owa""" +541 15 negative_sampler """basic""" +541 15 evaluator """rankbased""" +541 16 dataset """kinships""" +541 16 model """rescal""" +541 16 loss """bceaftersigmoid""" +541 16 regularizer """no""" +541 16 optimizer """adam""" +541 16 training_loop """owa""" +541 16 negative_sampler """basic""" +541 16 evaluator """rankbased""" +541 17 dataset """kinships""" +541 17 model """rescal""" +541 17 loss """bceaftersigmoid""" +541 17 regularizer """no""" +541 17 optimizer """adam""" +541 17 training_loop """owa""" +541 17 negative_sampler """basic""" +541 17 evaluator """rankbased""" +541 18 dataset """kinships""" +541 18 model """rescal""" +541 18 loss """bceaftersigmoid""" +541 18 regularizer """no""" +541 18 optimizer """adam""" +541 18 training_loop """owa""" +541 18 negative_sampler """basic""" +541 18 evaluator """rankbased""" +541 19 dataset """kinships""" +541 19 model """rescal""" +541 19 loss """bceaftersigmoid""" +541 19 regularizer """no""" +541 19 optimizer """adam""" +541 19 training_loop """owa""" +541 19 negative_sampler """basic""" +541 19 evaluator """rankbased""" +541 20 dataset """kinships""" +541 20 model """rescal""" +541 20 loss """bceaftersigmoid""" +541 20 regularizer """no""" +541 20 optimizer """adam""" +541 20 training_loop """owa""" +541 20 negative_sampler """basic""" +541 20 evaluator """rankbased""" +541 21 dataset """kinships""" +541 21 model """rescal""" +541 21 loss """bceaftersigmoid""" +541 21 regularizer """no""" +541 21 optimizer """adam""" +541 21 training_loop """owa""" +541 21 negative_sampler """basic""" +541 21 evaluator """rankbased""" +541 22 dataset """kinships""" +541 22 model """rescal""" +541 22 loss """bceaftersigmoid""" +541 22 regularizer """no""" +541 22 optimizer """adam""" +541 22 training_loop """owa""" +541 22 negative_sampler """basic""" +541 22 evaluator """rankbased""" +541 23 dataset """kinships""" +541 23 model """rescal""" +541 23 loss """bceaftersigmoid""" +541 23 regularizer """no""" +541 23 optimizer """adam""" +541 23 training_loop """owa""" +541 23 negative_sampler """basic""" +541 23 evaluator """rankbased""" +541 24 dataset """kinships""" +541 24 model """rescal""" +541 24 loss """bceaftersigmoid""" +541 24 regularizer """no""" +541 24 optimizer """adam""" +541 24 training_loop """owa""" +541 24 negative_sampler """basic""" +541 24 evaluator """rankbased""" +541 25 dataset """kinships""" +541 25 model """rescal""" +541 25 loss """bceaftersigmoid""" +541 25 regularizer """no""" +541 25 optimizer """adam""" +541 25 training_loop """owa""" +541 25 negative_sampler """basic""" +541 25 evaluator """rankbased""" +541 26 dataset """kinships""" +541 26 model """rescal""" +541 26 loss """bceaftersigmoid""" +541 26 regularizer """no""" +541 26 optimizer """adam""" +541 26 training_loop """owa""" +541 26 negative_sampler """basic""" +541 26 evaluator """rankbased""" +541 27 dataset """kinships""" +541 27 model """rescal""" +541 27 loss """bceaftersigmoid""" +541 27 regularizer """no""" +541 27 optimizer """adam""" +541 27 training_loop """owa""" +541 27 negative_sampler """basic""" +541 27 evaluator """rankbased""" +541 28 dataset """kinships""" +541 28 model """rescal""" +541 28 loss """bceaftersigmoid""" +541 28 regularizer """no""" +541 28 optimizer """adam""" +541 28 training_loop """owa""" +541 28 negative_sampler """basic""" +541 28 evaluator """rankbased""" +541 29 dataset """kinships""" +541 29 model """rescal""" +541 29 loss """bceaftersigmoid""" +541 29 regularizer """no""" +541 29 optimizer """adam""" +541 29 training_loop """owa""" +541 29 negative_sampler """basic""" +541 29 evaluator """rankbased""" +541 30 dataset """kinships""" +541 30 model """rescal""" +541 30 loss """bceaftersigmoid""" +541 30 regularizer """no""" +541 30 optimizer """adam""" +541 30 training_loop """owa""" +541 30 negative_sampler """basic""" +541 30 evaluator """rankbased""" +541 31 dataset """kinships""" +541 31 model """rescal""" +541 31 loss """bceaftersigmoid""" +541 31 regularizer """no""" +541 31 optimizer """adam""" +541 31 training_loop """owa""" +541 31 negative_sampler """basic""" +541 31 evaluator """rankbased""" +541 32 dataset """kinships""" +541 32 model """rescal""" +541 32 loss """bceaftersigmoid""" +541 32 regularizer """no""" +541 32 optimizer """adam""" +541 32 training_loop """owa""" +541 32 negative_sampler """basic""" +541 32 evaluator """rankbased""" +541 33 dataset """kinships""" +541 33 model """rescal""" +541 33 loss """bceaftersigmoid""" +541 33 regularizer """no""" +541 33 optimizer """adam""" +541 33 training_loop """owa""" +541 33 negative_sampler """basic""" +541 33 evaluator """rankbased""" +541 34 dataset """kinships""" +541 34 model """rescal""" +541 34 loss """bceaftersigmoid""" +541 34 regularizer """no""" +541 34 optimizer """adam""" +541 34 training_loop """owa""" +541 34 negative_sampler """basic""" +541 34 evaluator """rankbased""" +541 35 dataset """kinships""" +541 35 model """rescal""" +541 35 loss """bceaftersigmoid""" +541 35 regularizer """no""" +541 35 optimizer """adam""" +541 35 training_loop """owa""" +541 35 negative_sampler """basic""" +541 35 evaluator """rankbased""" +541 36 dataset """kinships""" +541 36 model """rescal""" +541 36 loss """bceaftersigmoid""" +541 36 regularizer """no""" +541 36 optimizer """adam""" +541 36 training_loop """owa""" +541 36 negative_sampler """basic""" +541 36 evaluator """rankbased""" +541 37 dataset """kinships""" +541 37 model """rescal""" +541 37 loss """bceaftersigmoid""" +541 37 regularizer """no""" +541 37 optimizer """adam""" +541 37 training_loop """owa""" +541 37 negative_sampler """basic""" +541 37 evaluator """rankbased""" +541 38 dataset """kinships""" +541 38 model """rescal""" +541 38 loss """bceaftersigmoid""" +541 38 regularizer """no""" +541 38 optimizer """adam""" +541 38 training_loop """owa""" +541 38 negative_sampler """basic""" +541 38 evaluator """rankbased""" +541 39 dataset """kinships""" +541 39 model """rescal""" +541 39 loss """bceaftersigmoid""" +541 39 regularizer """no""" +541 39 optimizer """adam""" +541 39 training_loop """owa""" +541 39 negative_sampler """basic""" +541 39 evaluator """rankbased""" +541 40 dataset """kinships""" +541 40 model """rescal""" +541 40 loss """bceaftersigmoid""" +541 40 regularizer """no""" +541 40 optimizer """adam""" +541 40 training_loop """owa""" +541 40 negative_sampler """basic""" +541 40 evaluator """rankbased""" +541 41 dataset """kinships""" +541 41 model """rescal""" +541 41 loss """bceaftersigmoid""" +541 41 regularizer """no""" +541 41 optimizer """adam""" +541 41 training_loop """owa""" +541 41 negative_sampler """basic""" +541 41 evaluator """rankbased""" +541 42 dataset """kinships""" +541 42 model """rescal""" +541 42 loss """bceaftersigmoid""" +541 42 regularizer """no""" +541 42 optimizer """adam""" +541 42 training_loop """owa""" +541 42 negative_sampler """basic""" +541 42 evaluator """rankbased""" +541 43 dataset """kinships""" +541 43 model """rescal""" +541 43 loss """bceaftersigmoid""" +541 43 regularizer """no""" +541 43 optimizer """adam""" +541 43 training_loop """owa""" +541 43 negative_sampler """basic""" +541 43 evaluator """rankbased""" +541 44 dataset """kinships""" +541 44 model """rescal""" +541 44 loss """bceaftersigmoid""" +541 44 regularizer """no""" +541 44 optimizer """adam""" +541 44 training_loop """owa""" +541 44 negative_sampler """basic""" +541 44 evaluator """rankbased""" +541 45 dataset """kinships""" +541 45 model """rescal""" +541 45 loss """bceaftersigmoid""" +541 45 regularizer """no""" +541 45 optimizer """adam""" +541 45 training_loop """owa""" +541 45 negative_sampler """basic""" +541 45 evaluator """rankbased""" +541 46 dataset """kinships""" +541 46 model """rescal""" +541 46 loss """bceaftersigmoid""" +541 46 regularizer """no""" +541 46 optimizer """adam""" +541 46 training_loop """owa""" +541 46 negative_sampler """basic""" +541 46 evaluator """rankbased""" +541 47 dataset """kinships""" +541 47 model """rescal""" +541 47 loss """bceaftersigmoid""" +541 47 regularizer """no""" +541 47 optimizer """adam""" +541 47 training_loop """owa""" +541 47 negative_sampler """basic""" +541 47 evaluator """rankbased""" +541 48 dataset """kinships""" +541 48 model """rescal""" +541 48 loss """bceaftersigmoid""" +541 48 regularizer """no""" +541 48 optimizer """adam""" +541 48 training_loop """owa""" +541 48 negative_sampler """basic""" +541 48 evaluator """rankbased""" +541 49 dataset """kinships""" +541 49 model """rescal""" +541 49 loss """bceaftersigmoid""" +541 49 regularizer """no""" +541 49 optimizer """adam""" +541 49 training_loop """owa""" +541 49 negative_sampler """basic""" +541 49 evaluator """rankbased""" +541 50 dataset """kinships""" +541 50 model """rescal""" +541 50 loss """bceaftersigmoid""" +541 50 regularizer """no""" +541 50 optimizer """adam""" +541 50 training_loop """owa""" +541 50 negative_sampler """basic""" +541 50 evaluator """rankbased""" +541 51 dataset """kinships""" +541 51 model """rescal""" +541 51 loss """bceaftersigmoid""" +541 51 regularizer """no""" +541 51 optimizer """adam""" +541 51 training_loop """owa""" +541 51 negative_sampler """basic""" +541 51 evaluator """rankbased""" +541 52 dataset """kinships""" +541 52 model """rescal""" +541 52 loss """bceaftersigmoid""" +541 52 regularizer """no""" +541 52 optimizer """adam""" +541 52 training_loop """owa""" +541 52 negative_sampler """basic""" +541 52 evaluator """rankbased""" +541 53 dataset """kinships""" +541 53 model """rescal""" +541 53 loss """bceaftersigmoid""" +541 53 regularizer """no""" +541 53 optimizer """adam""" +541 53 training_loop """owa""" +541 53 negative_sampler """basic""" +541 53 evaluator """rankbased""" +541 54 dataset """kinships""" +541 54 model """rescal""" +541 54 loss """bceaftersigmoid""" +541 54 regularizer """no""" +541 54 optimizer """adam""" +541 54 training_loop """owa""" +541 54 negative_sampler """basic""" +541 54 evaluator """rankbased""" +541 55 dataset """kinships""" +541 55 model """rescal""" +541 55 loss """bceaftersigmoid""" +541 55 regularizer """no""" +541 55 optimizer """adam""" +541 55 training_loop """owa""" +541 55 negative_sampler """basic""" +541 55 evaluator """rankbased""" +541 56 dataset """kinships""" +541 56 model """rescal""" +541 56 loss """bceaftersigmoid""" +541 56 regularizer """no""" +541 56 optimizer """adam""" +541 56 training_loop """owa""" +541 56 negative_sampler """basic""" +541 56 evaluator """rankbased""" +541 57 dataset """kinships""" +541 57 model """rescal""" +541 57 loss """bceaftersigmoid""" +541 57 regularizer """no""" +541 57 optimizer """adam""" +541 57 training_loop """owa""" +541 57 negative_sampler """basic""" +541 57 evaluator """rankbased""" +541 58 dataset """kinships""" +541 58 model """rescal""" +541 58 loss """bceaftersigmoid""" +541 58 regularizer """no""" +541 58 optimizer """adam""" +541 58 training_loop """owa""" +541 58 negative_sampler """basic""" +541 58 evaluator """rankbased""" +541 59 dataset """kinships""" +541 59 model """rescal""" +541 59 loss """bceaftersigmoid""" +541 59 regularizer """no""" +541 59 optimizer """adam""" +541 59 training_loop """owa""" +541 59 negative_sampler """basic""" +541 59 evaluator """rankbased""" +541 60 dataset """kinships""" +541 60 model """rescal""" +541 60 loss """bceaftersigmoid""" +541 60 regularizer """no""" +541 60 optimizer """adam""" +541 60 training_loop """owa""" +541 60 negative_sampler """basic""" +541 60 evaluator """rankbased""" +541 61 dataset """kinships""" +541 61 model """rescal""" +541 61 loss """bceaftersigmoid""" +541 61 regularizer """no""" +541 61 optimizer """adam""" +541 61 training_loop """owa""" +541 61 negative_sampler """basic""" +541 61 evaluator """rankbased""" +541 62 dataset """kinships""" +541 62 model """rescal""" +541 62 loss """bceaftersigmoid""" +541 62 regularizer """no""" +541 62 optimizer """adam""" +541 62 training_loop """owa""" +541 62 negative_sampler """basic""" +541 62 evaluator """rankbased""" +541 63 dataset """kinships""" +541 63 model """rescal""" +541 63 loss """bceaftersigmoid""" +541 63 regularizer """no""" +541 63 optimizer """adam""" +541 63 training_loop """owa""" +541 63 negative_sampler """basic""" +541 63 evaluator """rankbased""" +541 64 dataset """kinships""" +541 64 model """rescal""" +541 64 loss """bceaftersigmoid""" +541 64 regularizer """no""" +541 64 optimizer """adam""" +541 64 training_loop """owa""" +541 64 negative_sampler """basic""" +541 64 evaluator """rankbased""" +541 65 dataset """kinships""" +541 65 model """rescal""" +541 65 loss """bceaftersigmoid""" +541 65 regularizer """no""" +541 65 optimizer """adam""" +541 65 training_loop """owa""" +541 65 negative_sampler """basic""" +541 65 evaluator """rankbased""" +541 66 dataset """kinships""" +541 66 model """rescal""" +541 66 loss """bceaftersigmoid""" +541 66 regularizer """no""" +541 66 optimizer """adam""" +541 66 training_loop """owa""" +541 66 negative_sampler """basic""" +541 66 evaluator """rankbased""" +541 67 dataset """kinships""" +541 67 model """rescal""" +541 67 loss """bceaftersigmoid""" +541 67 regularizer """no""" +541 67 optimizer """adam""" +541 67 training_loop """owa""" +541 67 negative_sampler """basic""" +541 67 evaluator """rankbased""" +541 68 dataset """kinships""" +541 68 model """rescal""" +541 68 loss """bceaftersigmoid""" +541 68 regularizer """no""" +541 68 optimizer """adam""" +541 68 training_loop """owa""" +541 68 negative_sampler """basic""" +541 68 evaluator """rankbased""" +541 69 dataset """kinships""" +541 69 model """rescal""" +541 69 loss """bceaftersigmoid""" +541 69 regularizer """no""" +541 69 optimizer """adam""" +541 69 training_loop """owa""" +541 69 negative_sampler """basic""" +541 69 evaluator """rankbased""" +541 70 dataset """kinships""" +541 70 model """rescal""" +541 70 loss """bceaftersigmoid""" +541 70 regularizer """no""" +541 70 optimizer """adam""" +541 70 training_loop """owa""" +541 70 negative_sampler """basic""" +541 70 evaluator """rankbased""" +541 71 dataset """kinships""" +541 71 model """rescal""" +541 71 loss """bceaftersigmoid""" +541 71 regularizer """no""" +541 71 optimizer """adam""" +541 71 training_loop """owa""" +541 71 negative_sampler """basic""" +541 71 evaluator """rankbased""" +541 72 dataset """kinships""" +541 72 model """rescal""" +541 72 loss """bceaftersigmoid""" +541 72 regularizer """no""" +541 72 optimizer """adam""" +541 72 training_loop """owa""" +541 72 negative_sampler """basic""" +541 72 evaluator """rankbased""" +541 73 dataset """kinships""" +541 73 model """rescal""" +541 73 loss """bceaftersigmoid""" +541 73 regularizer """no""" +541 73 optimizer """adam""" +541 73 training_loop """owa""" +541 73 negative_sampler """basic""" +541 73 evaluator """rankbased""" +541 74 dataset """kinships""" +541 74 model """rescal""" +541 74 loss """bceaftersigmoid""" +541 74 regularizer """no""" +541 74 optimizer """adam""" +541 74 training_loop """owa""" +541 74 negative_sampler """basic""" +541 74 evaluator """rankbased""" +541 75 dataset """kinships""" +541 75 model """rescal""" +541 75 loss """bceaftersigmoid""" +541 75 regularizer """no""" +541 75 optimizer """adam""" +541 75 training_loop """owa""" +541 75 negative_sampler """basic""" +541 75 evaluator """rankbased""" +541 76 dataset """kinships""" +541 76 model """rescal""" +541 76 loss """bceaftersigmoid""" +541 76 regularizer """no""" +541 76 optimizer """adam""" +541 76 training_loop """owa""" +541 76 negative_sampler """basic""" +541 76 evaluator """rankbased""" +541 77 dataset """kinships""" +541 77 model """rescal""" +541 77 loss """bceaftersigmoid""" +541 77 regularizer """no""" +541 77 optimizer """adam""" +541 77 training_loop """owa""" +541 77 negative_sampler """basic""" +541 77 evaluator """rankbased""" +541 78 dataset """kinships""" +541 78 model """rescal""" +541 78 loss """bceaftersigmoid""" +541 78 regularizer """no""" +541 78 optimizer """adam""" +541 78 training_loop """owa""" +541 78 negative_sampler """basic""" +541 78 evaluator """rankbased""" +541 79 dataset """kinships""" +541 79 model """rescal""" +541 79 loss """bceaftersigmoid""" +541 79 regularizer """no""" +541 79 optimizer """adam""" +541 79 training_loop """owa""" +541 79 negative_sampler """basic""" +541 79 evaluator """rankbased""" +541 80 dataset """kinships""" +541 80 model """rescal""" +541 80 loss """bceaftersigmoid""" +541 80 regularizer """no""" +541 80 optimizer """adam""" +541 80 training_loop """owa""" +541 80 negative_sampler """basic""" +541 80 evaluator """rankbased""" +541 81 dataset """kinships""" +541 81 model """rescal""" +541 81 loss """bceaftersigmoid""" +541 81 regularizer """no""" +541 81 optimizer """adam""" +541 81 training_loop """owa""" +541 81 negative_sampler """basic""" +541 81 evaluator """rankbased""" +541 82 dataset """kinships""" +541 82 model """rescal""" +541 82 loss """bceaftersigmoid""" +541 82 regularizer """no""" +541 82 optimizer """adam""" +541 82 training_loop """owa""" +541 82 negative_sampler """basic""" +541 82 evaluator """rankbased""" +541 83 dataset """kinships""" +541 83 model """rescal""" +541 83 loss """bceaftersigmoid""" +541 83 regularizer """no""" +541 83 optimizer """adam""" +541 83 training_loop """owa""" +541 83 negative_sampler """basic""" +541 83 evaluator """rankbased""" +541 84 dataset """kinships""" +541 84 model """rescal""" +541 84 loss """bceaftersigmoid""" +541 84 regularizer """no""" +541 84 optimizer """adam""" +541 84 training_loop """owa""" +541 84 negative_sampler """basic""" +541 84 evaluator """rankbased""" +541 85 dataset """kinships""" +541 85 model """rescal""" +541 85 loss """bceaftersigmoid""" +541 85 regularizer """no""" +541 85 optimizer """adam""" +541 85 training_loop """owa""" +541 85 negative_sampler """basic""" +541 85 evaluator """rankbased""" +541 86 dataset """kinships""" +541 86 model """rescal""" +541 86 loss """bceaftersigmoid""" +541 86 regularizer """no""" +541 86 optimizer """adam""" +541 86 training_loop """owa""" +541 86 negative_sampler """basic""" +541 86 evaluator """rankbased""" +541 87 dataset """kinships""" +541 87 model """rescal""" +541 87 loss """bceaftersigmoid""" +541 87 regularizer """no""" +541 87 optimizer """adam""" +541 87 training_loop """owa""" +541 87 negative_sampler """basic""" +541 87 evaluator """rankbased""" +541 88 dataset """kinships""" +541 88 model """rescal""" +541 88 loss """bceaftersigmoid""" +541 88 regularizer """no""" +541 88 optimizer """adam""" +541 88 training_loop """owa""" +541 88 negative_sampler """basic""" +541 88 evaluator """rankbased""" +541 89 dataset """kinships""" +541 89 model """rescal""" +541 89 loss """bceaftersigmoid""" +541 89 regularizer """no""" +541 89 optimizer """adam""" +541 89 training_loop """owa""" +541 89 negative_sampler """basic""" +541 89 evaluator """rankbased""" +541 90 dataset """kinships""" +541 90 model """rescal""" +541 90 loss """bceaftersigmoid""" +541 90 regularizer """no""" +541 90 optimizer """adam""" +541 90 training_loop """owa""" +541 90 negative_sampler """basic""" +541 90 evaluator """rankbased""" +541 91 dataset """kinships""" +541 91 model """rescal""" +541 91 loss """bceaftersigmoid""" +541 91 regularizer """no""" +541 91 optimizer """adam""" +541 91 training_loop """owa""" +541 91 negative_sampler """basic""" +541 91 evaluator """rankbased""" +541 92 dataset """kinships""" +541 92 model """rescal""" +541 92 loss """bceaftersigmoid""" +541 92 regularizer """no""" +541 92 optimizer """adam""" +541 92 training_loop """owa""" +541 92 negative_sampler """basic""" +541 92 evaluator """rankbased""" +541 93 dataset """kinships""" +541 93 model """rescal""" +541 93 loss """bceaftersigmoid""" +541 93 regularizer """no""" +541 93 optimizer """adam""" +541 93 training_loop """owa""" +541 93 negative_sampler """basic""" +541 93 evaluator """rankbased""" +541 94 dataset """kinships""" +541 94 model """rescal""" +541 94 loss """bceaftersigmoid""" +541 94 regularizer """no""" +541 94 optimizer """adam""" +541 94 training_loop """owa""" +541 94 negative_sampler """basic""" +541 94 evaluator """rankbased""" +541 95 dataset """kinships""" +541 95 model """rescal""" +541 95 loss """bceaftersigmoid""" +541 95 regularizer """no""" +541 95 optimizer """adam""" +541 95 training_loop """owa""" +541 95 negative_sampler """basic""" +541 95 evaluator """rankbased""" +541 96 dataset """kinships""" +541 96 model """rescal""" +541 96 loss """bceaftersigmoid""" +541 96 regularizer """no""" +541 96 optimizer """adam""" +541 96 training_loop """owa""" +541 96 negative_sampler """basic""" +541 96 evaluator """rankbased""" +541 97 dataset """kinships""" +541 97 model """rescal""" +541 97 loss """bceaftersigmoid""" +541 97 regularizer """no""" +541 97 optimizer """adam""" +541 97 training_loop """owa""" +541 97 negative_sampler """basic""" +541 97 evaluator """rankbased""" +541 98 dataset """kinships""" +541 98 model """rescal""" +541 98 loss """bceaftersigmoid""" +541 98 regularizer """no""" +541 98 optimizer """adam""" +541 98 training_loop """owa""" +541 98 negative_sampler """basic""" +541 98 evaluator """rankbased""" +541 99 dataset """kinships""" +541 99 model """rescal""" +541 99 loss """bceaftersigmoid""" +541 99 regularizer """no""" +541 99 optimizer """adam""" +541 99 training_loop """owa""" +541 99 negative_sampler """basic""" +541 99 evaluator """rankbased""" +541 100 dataset """kinships""" +541 100 model """rescal""" +541 100 loss """bceaftersigmoid""" +541 100 regularizer """no""" +541 100 optimizer """adam""" +541 100 training_loop """owa""" +541 100 negative_sampler """basic""" +541 100 evaluator """rankbased""" +542 1 model.embedding_dim 2.0 +542 1 optimizer.lr 0.08585719315036376 +542 1 negative_sampler.num_negs_per_pos 56.0 +542 1 training.batch_size 2.0 +542 2 model.embedding_dim 0.0 +542 2 optimizer.lr 0.03129395441107133 +542 2 negative_sampler.num_negs_per_pos 32.0 +542 2 training.batch_size 2.0 +542 3 model.embedding_dim 2.0 +542 3 optimizer.lr 0.023418543186211623 +542 3 negative_sampler.num_negs_per_pos 8.0 +542 3 training.batch_size 2.0 +542 4 model.embedding_dim 2.0 +542 4 optimizer.lr 0.0011305154939573404 +542 4 negative_sampler.num_negs_per_pos 54.0 +542 4 training.batch_size 2.0 +542 5 model.embedding_dim 0.0 +542 5 optimizer.lr 0.002011961613349735 +542 5 negative_sampler.num_negs_per_pos 50.0 +542 5 training.batch_size 0.0 +542 6 model.embedding_dim 1.0 +542 6 optimizer.lr 0.007257349324452044 +542 6 negative_sampler.num_negs_per_pos 52.0 +542 6 training.batch_size 1.0 +542 7 model.embedding_dim 1.0 +542 7 optimizer.lr 0.0754645649194669 +542 7 negative_sampler.num_negs_per_pos 2.0 +542 7 training.batch_size 1.0 +542 8 model.embedding_dim 2.0 +542 8 optimizer.lr 0.0012560342862272089 +542 8 negative_sampler.num_negs_per_pos 33.0 +542 8 training.batch_size 0.0 +542 9 model.embedding_dim 2.0 +542 9 optimizer.lr 0.03920320001276187 +542 9 negative_sampler.num_negs_per_pos 6.0 +542 9 training.batch_size 2.0 +542 10 model.embedding_dim 0.0 +542 10 optimizer.lr 0.046005359987812494 +542 10 negative_sampler.num_negs_per_pos 79.0 +542 10 training.batch_size 1.0 +542 11 model.embedding_dim 1.0 +542 11 optimizer.lr 0.0017402909963206874 +542 11 negative_sampler.num_negs_per_pos 41.0 +542 11 training.batch_size 0.0 +542 12 model.embedding_dim 0.0 +542 12 optimizer.lr 0.0022792000306624946 +542 12 negative_sampler.num_negs_per_pos 64.0 +542 12 training.batch_size 1.0 +542 13 model.embedding_dim 0.0 +542 13 optimizer.lr 0.02027278601548196 +542 13 negative_sampler.num_negs_per_pos 26.0 +542 13 training.batch_size 0.0 +542 14 model.embedding_dim 2.0 +542 14 optimizer.lr 0.005078707002852688 +542 14 negative_sampler.num_negs_per_pos 78.0 +542 14 training.batch_size 1.0 +542 15 model.embedding_dim 1.0 +542 15 optimizer.lr 0.004482178845226761 +542 15 negative_sampler.num_negs_per_pos 56.0 +542 15 training.batch_size 1.0 +542 16 model.embedding_dim 1.0 +542 16 optimizer.lr 0.09875398544060096 +542 16 negative_sampler.num_negs_per_pos 6.0 +542 16 training.batch_size 0.0 +542 17 model.embedding_dim 1.0 +542 17 optimizer.lr 0.0015230716721701927 +542 17 negative_sampler.num_negs_per_pos 4.0 +542 17 training.batch_size 0.0 +542 18 model.embedding_dim 1.0 +542 18 optimizer.lr 0.0034168004266310072 +542 18 negative_sampler.num_negs_per_pos 92.0 +542 18 training.batch_size 1.0 +542 19 model.embedding_dim 0.0 +542 19 optimizer.lr 0.00356479833550095 +542 19 negative_sampler.num_negs_per_pos 31.0 +542 19 training.batch_size 1.0 +542 20 model.embedding_dim 2.0 +542 20 optimizer.lr 0.008839547072283615 +542 20 negative_sampler.num_negs_per_pos 88.0 +542 20 training.batch_size 0.0 +542 21 model.embedding_dim 1.0 +542 21 optimizer.lr 0.008282998551829755 +542 21 negative_sampler.num_negs_per_pos 31.0 +542 21 training.batch_size 0.0 +542 22 model.embedding_dim 2.0 +542 22 optimizer.lr 0.006756198353249621 +542 22 negative_sampler.num_negs_per_pos 74.0 +542 22 training.batch_size 0.0 +542 23 model.embedding_dim 0.0 +542 23 optimizer.lr 0.05342224117944876 +542 23 negative_sampler.num_negs_per_pos 62.0 +542 23 training.batch_size 2.0 +542 24 model.embedding_dim 0.0 +542 24 optimizer.lr 0.018173236940883023 +542 24 negative_sampler.num_negs_per_pos 35.0 +542 24 training.batch_size 1.0 +542 25 model.embedding_dim 1.0 +542 25 optimizer.lr 0.07555298019456563 +542 25 negative_sampler.num_negs_per_pos 14.0 +542 25 training.batch_size 0.0 +542 26 model.embedding_dim 0.0 +542 26 optimizer.lr 0.022195658425871494 +542 26 negative_sampler.num_negs_per_pos 54.0 +542 26 training.batch_size 2.0 +542 27 model.embedding_dim 0.0 +542 27 optimizer.lr 0.001217229497159046 +542 27 negative_sampler.num_negs_per_pos 2.0 +542 27 training.batch_size 0.0 +542 28 model.embedding_dim 1.0 +542 28 optimizer.lr 0.001413984433358812 +542 28 negative_sampler.num_negs_per_pos 68.0 +542 28 training.batch_size 1.0 +542 29 model.embedding_dim 2.0 +542 29 optimizer.lr 0.001582794040987628 +542 29 negative_sampler.num_negs_per_pos 75.0 +542 29 training.batch_size 0.0 +542 30 model.embedding_dim 1.0 +542 30 optimizer.lr 0.016500339259268956 +542 30 negative_sampler.num_negs_per_pos 87.0 +542 30 training.batch_size 2.0 +542 31 model.embedding_dim 1.0 +542 31 optimizer.lr 0.024395943026458193 +542 31 negative_sampler.num_negs_per_pos 12.0 +542 31 training.batch_size 1.0 +542 32 model.embedding_dim 1.0 +542 32 optimizer.lr 0.0045035235596798655 +542 32 negative_sampler.num_negs_per_pos 77.0 +542 32 training.batch_size 1.0 +542 33 model.embedding_dim 0.0 +542 33 optimizer.lr 0.003595602427610575 +542 33 negative_sampler.num_negs_per_pos 79.0 +542 33 training.batch_size 1.0 +542 34 model.embedding_dim 2.0 +542 34 optimizer.lr 0.00255722501405987 +542 34 negative_sampler.num_negs_per_pos 49.0 +542 34 training.batch_size 1.0 +542 35 model.embedding_dim 0.0 +542 35 optimizer.lr 0.01987638185046772 +542 35 negative_sampler.num_negs_per_pos 89.0 +542 35 training.batch_size 1.0 +542 36 model.embedding_dim 1.0 +542 36 optimizer.lr 0.004182013298886464 +542 36 negative_sampler.num_negs_per_pos 53.0 +542 36 training.batch_size 0.0 +542 37 model.embedding_dim 1.0 +542 37 optimizer.lr 0.013045073250031904 +542 37 negative_sampler.num_negs_per_pos 16.0 +542 37 training.batch_size 0.0 +542 38 model.embedding_dim 0.0 +542 38 optimizer.lr 0.05790465718025807 +542 38 negative_sampler.num_negs_per_pos 50.0 +542 38 training.batch_size 2.0 +542 39 model.embedding_dim 0.0 +542 39 optimizer.lr 0.01663817767899286 +542 39 negative_sampler.num_negs_per_pos 46.0 +542 39 training.batch_size 1.0 +542 40 model.embedding_dim 1.0 +542 40 optimizer.lr 0.026488506052323384 +542 40 negative_sampler.num_negs_per_pos 91.0 +542 40 training.batch_size 1.0 +542 41 model.embedding_dim 2.0 +542 41 optimizer.lr 0.0883930072830524 +542 41 negative_sampler.num_negs_per_pos 56.0 +542 41 training.batch_size 2.0 +542 42 model.embedding_dim 0.0 +542 42 optimizer.lr 0.010447023013383237 +542 42 negative_sampler.num_negs_per_pos 98.0 +542 42 training.batch_size 0.0 +542 43 model.embedding_dim 1.0 +542 43 optimizer.lr 0.057531530923569475 +542 43 negative_sampler.num_negs_per_pos 49.0 +542 43 training.batch_size 0.0 +542 44 model.embedding_dim 1.0 +542 44 optimizer.lr 0.013738403251909035 +542 44 negative_sampler.num_negs_per_pos 51.0 +542 44 training.batch_size 1.0 +542 45 model.embedding_dim 1.0 +542 45 optimizer.lr 0.0034422898192360872 +542 45 negative_sampler.num_negs_per_pos 76.0 +542 45 training.batch_size 2.0 +542 46 model.embedding_dim 2.0 +542 46 optimizer.lr 0.0030741286916522188 +542 46 negative_sampler.num_negs_per_pos 81.0 +542 46 training.batch_size 1.0 +542 47 model.embedding_dim 2.0 +542 47 optimizer.lr 0.03587525300890789 +542 47 negative_sampler.num_negs_per_pos 60.0 +542 47 training.batch_size 0.0 +542 48 model.embedding_dim 2.0 +542 48 optimizer.lr 0.0029897596316012486 +542 48 negative_sampler.num_negs_per_pos 95.0 +542 48 training.batch_size 1.0 +542 49 model.embedding_dim 2.0 +542 49 optimizer.lr 0.026178187352428366 +542 49 negative_sampler.num_negs_per_pos 24.0 +542 49 training.batch_size 0.0 +542 50 model.embedding_dim 2.0 +542 50 optimizer.lr 0.01938921496450289 +542 50 negative_sampler.num_negs_per_pos 22.0 +542 50 training.batch_size 0.0 +542 51 model.embedding_dim 2.0 +542 51 optimizer.lr 0.010974353997092737 +542 51 negative_sampler.num_negs_per_pos 82.0 +542 51 training.batch_size 0.0 +542 52 model.embedding_dim 2.0 +542 52 optimizer.lr 0.022942924994881247 +542 52 negative_sampler.num_negs_per_pos 51.0 +542 52 training.batch_size 0.0 +542 53 model.embedding_dim 2.0 +542 53 optimizer.lr 0.005119645867218031 +542 53 negative_sampler.num_negs_per_pos 73.0 +542 53 training.batch_size 1.0 +542 54 model.embedding_dim 0.0 +542 54 optimizer.lr 0.008632959643658913 +542 54 negative_sampler.num_negs_per_pos 32.0 +542 54 training.batch_size 1.0 +542 55 model.embedding_dim 0.0 +542 55 optimizer.lr 0.012230432977278728 +542 55 negative_sampler.num_negs_per_pos 95.0 +542 55 training.batch_size 2.0 +542 56 model.embedding_dim 1.0 +542 56 optimizer.lr 0.005071886402723437 +542 56 negative_sampler.num_negs_per_pos 27.0 +542 56 training.batch_size 1.0 +542 57 model.embedding_dim 0.0 +542 57 optimizer.lr 0.00335074219476769 +542 57 negative_sampler.num_negs_per_pos 56.0 +542 57 training.batch_size 0.0 +542 58 model.embedding_dim 1.0 +542 58 optimizer.lr 0.008336824990255854 +542 58 negative_sampler.num_negs_per_pos 19.0 +542 58 training.batch_size 2.0 +542 59 model.embedding_dim 2.0 +542 59 optimizer.lr 0.028153786802427118 +542 59 negative_sampler.num_negs_per_pos 2.0 +542 59 training.batch_size 0.0 +542 60 model.embedding_dim 1.0 +542 60 optimizer.lr 0.004244979816605221 +542 60 negative_sampler.num_negs_per_pos 11.0 +542 60 training.batch_size 1.0 +542 61 model.embedding_dim 2.0 +542 61 optimizer.lr 0.005650861368039831 +542 61 negative_sampler.num_negs_per_pos 94.0 +542 61 training.batch_size 1.0 +542 62 model.embedding_dim 1.0 +542 62 optimizer.lr 0.030675682158077554 +542 62 negative_sampler.num_negs_per_pos 33.0 +542 62 training.batch_size 2.0 +542 63 model.embedding_dim 1.0 +542 63 optimizer.lr 0.00106331782150605 +542 63 negative_sampler.num_negs_per_pos 42.0 +542 63 training.batch_size 0.0 +542 64 model.embedding_dim 1.0 +542 64 optimizer.lr 0.004658862673744397 +542 64 negative_sampler.num_negs_per_pos 65.0 +542 64 training.batch_size 2.0 +542 65 model.embedding_dim 2.0 +542 65 optimizer.lr 0.004095439140430071 +542 65 negative_sampler.num_negs_per_pos 62.0 +542 65 training.batch_size 1.0 +542 66 model.embedding_dim 1.0 +542 66 optimizer.lr 0.02545530606530037 +542 66 negative_sampler.num_negs_per_pos 88.0 +542 66 training.batch_size 1.0 +542 67 model.embedding_dim 0.0 +542 67 optimizer.lr 0.001108410814251131 +542 67 negative_sampler.num_negs_per_pos 68.0 +542 67 training.batch_size 1.0 +542 68 model.embedding_dim 2.0 +542 68 optimizer.lr 0.07815565304359434 +542 68 negative_sampler.num_negs_per_pos 34.0 +542 68 training.batch_size 1.0 +542 69 model.embedding_dim 2.0 +542 69 optimizer.lr 0.001411285061788978 +542 69 negative_sampler.num_negs_per_pos 23.0 +542 69 training.batch_size 2.0 +542 70 model.embedding_dim 2.0 +542 70 optimizer.lr 0.0011143083794785543 +542 70 negative_sampler.num_negs_per_pos 0.0 +542 70 training.batch_size 2.0 +542 71 model.embedding_dim 1.0 +542 71 optimizer.lr 0.004910919001559881 +542 71 negative_sampler.num_negs_per_pos 14.0 +542 71 training.batch_size 1.0 +542 72 model.embedding_dim 0.0 +542 72 optimizer.lr 0.003633923646602531 +542 72 negative_sampler.num_negs_per_pos 84.0 +542 72 training.batch_size 0.0 +542 73 model.embedding_dim 1.0 +542 73 optimizer.lr 0.04029440705204064 +542 73 negative_sampler.num_negs_per_pos 56.0 +542 73 training.batch_size 1.0 +542 74 model.embedding_dim 1.0 +542 74 optimizer.lr 0.06553739357802313 +542 74 negative_sampler.num_negs_per_pos 61.0 +542 74 training.batch_size 1.0 +542 75 model.embedding_dim 0.0 +542 75 optimizer.lr 0.007902594124217769 +542 75 negative_sampler.num_negs_per_pos 33.0 +542 75 training.batch_size 1.0 +542 76 model.embedding_dim 0.0 +542 76 optimizer.lr 0.058507031960945574 +542 76 negative_sampler.num_negs_per_pos 74.0 +542 76 training.batch_size 1.0 +542 77 model.embedding_dim 2.0 +542 77 optimizer.lr 0.0023855222668084006 +542 77 negative_sampler.num_negs_per_pos 51.0 +542 77 training.batch_size 1.0 +542 78 model.embedding_dim 0.0 +542 78 optimizer.lr 0.013118384314410585 +542 78 negative_sampler.num_negs_per_pos 48.0 +542 78 training.batch_size 1.0 +542 79 model.embedding_dim 2.0 +542 79 optimizer.lr 0.07983694346349007 +542 79 negative_sampler.num_negs_per_pos 55.0 +542 79 training.batch_size 2.0 +542 80 model.embedding_dim 0.0 +542 80 optimizer.lr 0.0018350713846413092 +542 80 negative_sampler.num_negs_per_pos 6.0 +542 80 training.batch_size 1.0 +542 81 model.embedding_dim 1.0 +542 81 optimizer.lr 0.005227702776198579 +542 81 negative_sampler.num_negs_per_pos 17.0 +542 81 training.batch_size 1.0 +542 82 model.embedding_dim 0.0 +542 82 optimizer.lr 0.0013363743391070707 +542 82 negative_sampler.num_negs_per_pos 94.0 +542 82 training.batch_size 0.0 +542 83 model.embedding_dim 2.0 +542 83 optimizer.lr 0.0552697610048346 +542 83 negative_sampler.num_negs_per_pos 30.0 +542 83 training.batch_size 2.0 +542 84 model.embedding_dim 2.0 +542 84 optimizer.lr 0.009044310802552918 +542 84 negative_sampler.num_negs_per_pos 81.0 +542 84 training.batch_size 0.0 +542 85 model.embedding_dim 2.0 +542 85 optimizer.lr 0.002113633080321735 +542 85 negative_sampler.num_negs_per_pos 33.0 +542 85 training.batch_size 1.0 +542 86 model.embedding_dim 2.0 +542 86 optimizer.lr 0.07565022983913042 +542 86 negative_sampler.num_negs_per_pos 22.0 +542 86 training.batch_size 0.0 +542 87 model.embedding_dim 2.0 +542 87 optimizer.lr 0.0032195381870510774 +542 87 negative_sampler.num_negs_per_pos 62.0 +542 87 training.batch_size 2.0 +542 88 model.embedding_dim 2.0 +542 88 optimizer.lr 0.04619092761160408 +542 88 negative_sampler.num_negs_per_pos 28.0 +542 88 training.batch_size 1.0 +542 89 model.embedding_dim 0.0 +542 89 optimizer.lr 0.0034957288840244357 +542 89 negative_sampler.num_negs_per_pos 40.0 +542 89 training.batch_size 0.0 +542 90 model.embedding_dim 2.0 +542 90 optimizer.lr 0.03380964942128759 +542 90 negative_sampler.num_negs_per_pos 58.0 +542 90 training.batch_size 2.0 +542 91 model.embedding_dim 1.0 +542 91 optimizer.lr 0.0039468142999175585 +542 91 negative_sampler.num_negs_per_pos 46.0 +542 91 training.batch_size 2.0 +542 92 model.embedding_dim 2.0 +542 92 optimizer.lr 0.0016112492839327971 +542 92 negative_sampler.num_negs_per_pos 63.0 +542 92 training.batch_size 2.0 +542 93 model.embedding_dim 2.0 +542 93 optimizer.lr 0.04790345571816047 +542 93 negative_sampler.num_negs_per_pos 61.0 +542 93 training.batch_size 2.0 +542 94 model.embedding_dim 2.0 +542 94 optimizer.lr 0.03243032908512398 +542 94 negative_sampler.num_negs_per_pos 57.0 +542 94 training.batch_size 2.0 +542 95 model.embedding_dim 0.0 +542 95 optimizer.lr 0.0010407340455165094 +542 95 negative_sampler.num_negs_per_pos 72.0 +542 95 training.batch_size 0.0 +542 96 model.embedding_dim 0.0 +542 96 optimizer.lr 0.047326562962578964 +542 96 negative_sampler.num_negs_per_pos 22.0 +542 96 training.batch_size 1.0 +542 97 model.embedding_dim 1.0 +542 97 optimizer.lr 0.0012943723845570032 +542 97 negative_sampler.num_negs_per_pos 61.0 +542 97 training.batch_size 1.0 +542 98 model.embedding_dim 2.0 +542 98 optimizer.lr 0.00362167009174116 +542 98 negative_sampler.num_negs_per_pos 41.0 +542 98 training.batch_size 2.0 +542 99 model.embedding_dim 1.0 +542 99 optimizer.lr 0.04036910368490924 +542 99 negative_sampler.num_negs_per_pos 3.0 +542 99 training.batch_size 1.0 +542 100 model.embedding_dim 2.0 +542 100 optimizer.lr 0.01937856894911993 +542 100 negative_sampler.num_negs_per_pos 58.0 +542 100 training.batch_size 2.0 +542 1 dataset """kinships""" +542 1 model """rescal""" +542 1 loss """softplus""" +542 1 regularizer """no""" +542 1 optimizer """adam""" +542 1 training_loop """owa""" +542 1 negative_sampler """basic""" +542 1 evaluator """rankbased""" +542 2 dataset """kinships""" +542 2 model """rescal""" +542 2 loss """softplus""" +542 2 regularizer """no""" +542 2 optimizer """adam""" +542 2 training_loop """owa""" +542 2 negative_sampler """basic""" +542 2 evaluator """rankbased""" +542 3 dataset """kinships""" +542 3 model """rescal""" +542 3 loss """softplus""" +542 3 regularizer """no""" +542 3 optimizer """adam""" +542 3 training_loop """owa""" +542 3 negative_sampler """basic""" +542 3 evaluator """rankbased""" +542 4 dataset """kinships""" +542 4 model """rescal""" +542 4 loss """softplus""" +542 4 regularizer """no""" +542 4 optimizer """adam""" +542 4 training_loop """owa""" +542 4 negative_sampler """basic""" +542 4 evaluator """rankbased""" +542 5 dataset """kinships""" +542 5 model """rescal""" +542 5 loss """softplus""" +542 5 regularizer """no""" +542 5 optimizer """adam""" +542 5 training_loop """owa""" +542 5 negative_sampler """basic""" +542 5 evaluator """rankbased""" +542 6 dataset """kinships""" +542 6 model """rescal""" +542 6 loss """softplus""" +542 6 regularizer """no""" +542 6 optimizer """adam""" +542 6 training_loop """owa""" +542 6 negative_sampler """basic""" +542 6 evaluator """rankbased""" +542 7 dataset """kinships""" +542 7 model """rescal""" +542 7 loss """softplus""" +542 7 regularizer """no""" +542 7 optimizer """adam""" +542 7 training_loop """owa""" +542 7 negative_sampler """basic""" +542 7 evaluator """rankbased""" +542 8 dataset """kinships""" +542 8 model """rescal""" +542 8 loss """softplus""" +542 8 regularizer """no""" +542 8 optimizer """adam""" +542 8 training_loop """owa""" +542 8 negative_sampler """basic""" +542 8 evaluator """rankbased""" +542 9 dataset """kinships""" +542 9 model """rescal""" +542 9 loss """softplus""" +542 9 regularizer """no""" +542 9 optimizer """adam""" +542 9 training_loop """owa""" +542 9 negative_sampler """basic""" +542 9 evaluator """rankbased""" +542 10 dataset """kinships""" +542 10 model """rescal""" +542 10 loss """softplus""" +542 10 regularizer """no""" +542 10 optimizer """adam""" +542 10 training_loop """owa""" +542 10 negative_sampler """basic""" +542 10 evaluator """rankbased""" +542 11 dataset """kinships""" +542 11 model """rescal""" +542 11 loss """softplus""" +542 11 regularizer """no""" +542 11 optimizer """adam""" +542 11 training_loop """owa""" +542 11 negative_sampler """basic""" +542 11 evaluator """rankbased""" +542 12 dataset """kinships""" +542 12 model """rescal""" +542 12 loss """softplus""" +542 12 regularizer """no""" +542 12 optimizer """adam""" +542 12 training_loop """owa""" +542 12 negative_sampler """basic""" +542 12 evaluator """rankbased""" +542 13 dataset """kinships""" +542 13 model """rescal""" +542 13 loss """softplus""" +542 13 regularizer """no""" +542 13 optimizer """adam""" +542 13 training_loop """owa""" +542 13 negative_sampler """basic""" +542 13 evaluator """rankbased""" +542 14 dataset """kinships""" +542 14 model """rescal""" +542 14 loss """softplus""" +542 14 regularizer """no""" +542 14 optimizer """adam""" +542 14 training_loop """owa""" +542 14 negative_sampler """basic""" +542 14 evaluator """rankbased""" +542 15 dataset """kinships""" +542 15 model """rescal""" +542 15 loss """softplus""" +542 15 regularizer """no""" +542 15 optimizer """adam""" +542 15 training_loop """owa""" +542 15 negative_sampler """basic""" +542 15 evaluator """rankbased""" +542 16 dataset """kinships""" +542 16 model """rescal""" +542 16 loss """softplus""" +542 16 regularizer """no""" +542 16 optimizer """adam""" +542 16 training_loop """owa""" +542 16 negative_sampler """basic""" +542 16 evaluator """rankbased""" +542 17 dataset """kinships""" +542 17 model """rescal""" +542 17 loss """softplus""" +542 17 regularizer """no""" +542 17 optimizer """adam""" +542 17 training_loop """owa""" +542 17 negative_sampler """basic""" +542 17 evaluator """rankbased""" +542 18 dataset """kinships""" +542 18 model """rescal""" +542 18 loss """softplus""" +542 18 regularizer """no""" +542 18 optimizer """adam""" +542 18 training_loop """owa""" +542 18 negative_sampler """basic""" +542 18 evaluator """rankbased""" +542 19 dataset """kinships""" +542 19 model """rescal""" +542 19 loss """softplus""" +542 19 regularizer """no""" +542 19 optimizer """adam""" +542 19 training_loop """owa""" +542 19 negative_sampler """basic""" +542 19 evaluator """rankbased""" +542 20 dataset """kinships""" +542 20 model """rescal""" +542 20 loss """softplus""" +542 20 regularizer """no""" +542 20 optimizer """adam""" +542 20 training_loop """owa""" +542 20 negative_sampler """basic""" +542 20 evaluator """rankbased""" +542 21 dataset """kinships""" +542 21 model """rescal""" +542 21 loss """softplus""" +542 21 regularizer """no""" +542 21 optimizer """adam""" +542 21 training_loop """owa""" +542 21 negative_sampler """basic""" +542 21 evaluator """rankbased""" +542 22 dataset """kinships""" +542 22 model """rescal""" +542 22 loss """softplus""" +542 22 regularizer """no""" +542 22 optimizer """adam""" +542 22 training_loop """owa""" +542 22 negative_sampler """basic""" +542 22 evaluator """rankbased""" +542 23 dataset """kinships""" +542 23 model """rescal""" +542 23 loss """softplus""" +542 23 regularizer """no""" +542 23 optimizer """adam""" +542 23 training_loop """owa""" +542 23 negative_sampler """basic""" +542 23 evaluator """rankbased""" +542 24 dataset """kinships""" +542 24 model """rescal""" +542 24 loss """softplus""" +542 24 regularizer """no""" +542 24 optimizer """adam""" +542 24 training_loop """owa""" +542 24 negative_sampler """basic""" +542 24 evaluator """rankbased""" +542 25 dataset """kinships""" +542 25 model """rescal""" +542 25 loss """softplus""" +542 25 regularizer """no""" +542 25 optimizer """adam""" +542 25 training_loop """owa""" +542 25 negative_sampler """basic""" +542 25 evaluator """rankbased""" +542 26 dataset """kinships""" +542 26 model """rescal""" +542 26 loss """softplus""" +542 26 regularizer """no""" +542 26 optimizer """adam""" +542 26 training_loop """owa""" +542 26 negative_sampler """basic""" +542 26 evaluator """rankbased""" +542 27 dataset """kinships""" +542 27 model """rescal""" +542 27 loss """softplus""" +542 27 regularizer """no""" +542 27 optimizer """adam""" +542 27 training_loop """owa""" +542 27 negative_sampler """basic""" +542 27 evaluator """rankbased""" +542 28 dataset """kinships""" +542 28 model """rescal""" +542 28 loss """softplus""" +542 28 regularizer """no""" +542 28 optimizer """adam""" +542 28 training_loop """owa""" +542 28 negative_sampler """basic""" +542 28 evaluator """rankbased""" +542 29 dataset """kinships""" +542 29 model """rescal""" +542 29 loss """softplus""" +542 29 regularizer """no""" +542 29 optimizer """adam""" +542 29 training_loop """owa""" +542 29 negative_sampler """basic""" +542 29 evaluator """rankbased""" +542 30 dataset """kinships""" +542 30 model """rescal""" +542 30 loss """softplus""" +542 30 regularizer """no""" +542 30 optimizer """adam""" +542 30 training_loop """owa""" +542 30 negative_sampler """basic""" +542 30 evaluator """rankbased""" +542 31 dataset """kinships""" +542 31 model """rescal""" +542 31 loss """softplus""" +542 31 regularizer """no""" +542 31 optimizer """adam""" +542 31 training_loop """owa""" +542 31 negative_sampler """basic""" +542 31 evaluator """rankbased""" +542 32 dataset """kinships""" +542 32 model """rescal""" +542 32 loss """softplus""" +542 32 regularizer """no""" +542 32 optimizer """adam""" +542 32 training_loop """owa""" +542 32 negative_sampler """basic""" +542 32 evaluator """rankbased""" +542 33 dataset """kinships""" +542 33 model """rescal""" +542 33 loss """softplus""" +542 33 regularizer """no""" +542 33 optimizer """adam""" +542 33 training_loop """owa""" +542 33 negative_sampler """basic""" +542 33 evaluator """rankbased""" +542 34 dataset """kinships""" +542 34 model """rescal""" +542 34 loss """softplus""" +542 34 regularizer """no""" +542 34 optimizer """adam""" +542 34 training_loop """owa""" +542 34 negative_sampler """basic""" +542 34 evaluator """rankbased""" +542 35 dataset """kinships""" +542 35 model """rescal""" +542 35 loss """softplus""" +542 35 regularizer """no""" +542 35 optimizer """adam""" +542 35 training_loop """owa""" +542 35 negative_sampler """basic""" +542 35 evaluator """rankbased""" +542 36 dataset """kinships""" +542 36 model """rescal""" +542 36 loss """softplus""" +542 36 regularizer """no""" +542 36 optimizer """adam""" +542 36 training_loop """owa""" +542 36 negative_sampler """basic""" +542 36 evaluator """rankbased""" +542 37 dataset """kinships""" +542 37 model """rescal""" +542 37 loss """softplus""" +542 37 regularizer """no""" +542 37 optimizer """adam""" +542 37 training_loop """owa""" +542 37 negative_sampler """basic""" +542 37 evaluator """rankbased""" +542 38 dataset """kinships""" +542 38 model """rescal""" +542 38 loss """softplus""" +542 38 regularizer """no""" +542 38 optimizer """adam""" +542 38 training_loop """owa""" +542 38 negative_sampler """basic""" +542 38 evaluator """rankbased""" +542 39 dataset """kinships""" +542 39 model """rescal""" +542 39 loss """softplus""" +542 39 regularizer """no""" +542 39 optimizer """adam""" +542 39 training_loop """owa""" +542 39 negative_sampler """basic""" +542 39 evaluator """rankbased""" +542 40 dataset """kinships""" +542 40 model """rescal""" +542 40 loss """softplus""" +542 40 regularizer """no""" +542 40 optimizer """adam""" +542 40 training_loop """owa""" +542 40 negative_sampler """basic""" +542 40 evaluator """rankbased""" +542 41 dataset """kinships""" +542 41 model """rescal""" +542 41 loss """softplus""" +542 41 regularizer """no""" +542 41 optimizer """adam""" +542 41 training_loop """owa""" +542 41 negative_sampler """basic""" +542 41 evaluator """rankbased""" +542 42 dataset """kinships""" +542 42 model """rescal""" +542 42 loss """softplus""" +542 42 regularizer """no""" +542 42 optimizer """adam""" +542 42 training_loop """owa""" +542 42 negative_sampler """basic""" +542 42 evaluator """rankbased""" +542 43 dataset """kinships""" +542 43 model """rescal""" +542 43 loss """softplus""" +542 43 regularizer """no""" +542 43 optimizer """adam""" +542 43 training_loop """owa""" +542 43 negative_sampler """basic""" +542 43 evaluator """rankbased""" +542 44 dataset """kinships""" +542 44 model """rescal""" +542 44 loss """softplus""" +542 44 regularizer """no""" +542 44 optimizer """adam""" +542 44 training_loop """owa""" +542 44 negative_sampler """basic""" +542 44 evaluator """rankbased""" +542 45 dataset """kinships""" +542 45 model """rescal""" +542 45 loss """softplus""" +542 45 regularizer """no""" +542 45 optimizer """adam""" +542 45 training_loop """owa""" +542 45 negative_sampler """basic""" +542 45 evaluator """rankbased""" +542 46 dataset """kinships""" +542 46 model """rescal""" +542 46 loss """softplus""" +542 46 regularizer """no""" +542 46 optimizer """adam""" +542 46 training_loop """owa""" +542 46 negative_sampler """basic""" +542 46 evaluator """rankbased""" +542 47 dataset """kinships""" +542 47 model """rescal""" +542 47 loss """softplus""" +542 47 regularizer """no""" +542 47 optimizer """adam""" +542 47 training_loop """owa""" +542 47 negative_sampler """basic""" +542 47 evaluator """rankbased""" +542 48 dataset """kinships""" +542 48 model """rescal""" +542 48 loss """softplus""" +542 48 regularizer """no""" +542 48 optimizer """adam""" +542 48 training_loop """owa""" +542 48 negative_sampler """basic""" +542 48 evaluator """rankbased""" +542 49 dataset """kinships""" +542 49 model """rescal""" +542 49 loss """softplus""" +542 49 regularizer """no""" +542 49 optimizer """adam""" +542 49 training_loop """owa""" +542 49 negative_sampler """basic""" +542 49 evaluator """rankbased""" +542 50 dataset """kinships""" +542 50 model """rescal""" +542 50 loss """softplus""" +542 50 regularizer """no""" +542 50 optimizer """adam""" +542 50 training_loop """owa""" +542 50 negative_sampler """basic""" +542 50 evaluator """rankbased""" +542 51 dataset """kinships""" +542 51 model """rescal""" +542 51 loss """softplus""" +542 51 regularizer """no""" +542 51 optimizer """adam""" +542 51 training_loop """owa""" +542 51 negative_sampler """basic""" +542 51 evaluator """rankbased""" +542 52 dataset """kinships""" +542 52 model """rescal""" +542 52 loss """softplus""" +542 52 regularizer """no""" +542 52 optimizer """adam""" +542 52 training_loop """owa""" +542 52 negative_sampler """basic""" +542 52 evaluator """rankbased""" +542 53 dataset """kinships""" +542 53 model """rescal""" +542 53 loss """softplus""" +542 53 regularizer """no""" +542 53 optimizer """adam""" +542 53 training_loop """owa""" +542 53 negative_sampler """basic""" +542 53 evaluator """rankbased""" +542 54 dataset """kinships""" +542 54 model """rescal""" +542 54 loss """softplus""" +542 54 regularizer """no""" +542 54 optimizer """adam""" +542 54 training_loop """owa""" +542 54 negative_sampler """basic""" +542 54 evaluator """rankbased""" +542 55 dataset """kinships""" +542 55 model """rescal""" +542 55 loss """softplus""" +542 55 regularizer """no""" +542 55 optimizer """adam""" +542 55 training_loop """owa""" +542 55 negative_sampler """basic""" +542 55 evaluator """rankbased""" +542 56 dataset """kinships""" +542 56 model """rescal""" +542 56 loss """softplus""" +542 56 regularizer """no""" +542 56 optimizer """adam""" +542 56 training_loop """owa""" +542 56 negative_sampler """basic""" +542 56 evaluator """rankbased""" +542 57 dataset """kinships""" +542 57 model """rescal""" +542 57 loss """softplus""" +542 57 regularizer """no""" +542 57 optimizer """adam""" +542 57 training_loop """owa""" +542 57 negative_sampler """basic""" +542 57 evaluator """rankbased""" +542 58 dataset """kinships""" +542 58 model """rescal""" +542 58 loss """softplus""" +542 58 regularizer """no""" +542 58 optimizer """adam""" +542 58 training_loop """owa""" +542 58 negative_sampler """basic""" +542 58 evaluator """rankbased""" +542 59 dataset """kinships""" +542 59 model """rescal""" +542 59 loss """softplus""" +542 59 regularizer """no""" +542 59 optimizer """adam""" +542 59 training_loop """owa""" +542 59 negative_sampler """basic""" +542 59 evaluator """rankbased""" +542 60 dataset """kinships""" +542 60 model """rescal""" +542 60 loss """softplus""" +542 60 regularizer """no""" +542 60 optimizer """adam""" +542 60 training_loop """owa""" +542 60 negative_sampler """basic""" +542 60 evaluator """rankbased""" +542 61 dataset """kinships""" +542 61 model """rescal""" +542 61 loss """softplus""" +542 61 regularizer """no""" +542 61 optimizer """adam""" +542 61 training_loop """owa""" +542 61 negative_sampler """basic""" +542 61 evaluator """rankbased""" +542 62 dataset """kinships""" +542 62 model """rescal""" +542 62 loss """softplus""" +542 62 regularizer """no""" +542 62 optimizer """adam""" +542 62 training_loop """owa""" +542 62 negative_sampler """basic""" +542 62 evaluator """rankbased""" +542 63 dataset """kinships""" +542 63 model """rescal""" +542 63 loss """softplus""" +542 63 regularizer """no""" +542 63 optimizer """adam""" +542 63 training_loop """owa""" +542 63 negative_sampler """basic""" +542 63 evaluator """rankbased""" +542 64 dataset """kinships""" +542 64 model """rescal""" +542 64 loss """softplus""" +542 64 regularizer """no""" +542 64 optimizer """adam""" +542 64 training_loop """owa""" +542 64 negative_sampler """basic""" +542 64 evaluator """rankbased""" +542 65 dataset """kinships""" +542 65 model """rescal""" +542 65 loss """softplus""" +542 65 regularizer """no""" +542 65 optimizer """adam""" +542 65 training_loop """owa""" +542 65 negative_sampler """basic""" +542 65 evaluator """rankbased""" +542 66 dataset """kinships""" +542 66 model """rescal""" +542 66 loss """softplus""" +542 66 regularizer """no""" +542 66 optimizer """adam""" +542 66 training_loop """owa""" +542 66 negative_sampler """basic""" +542 66 evaluator """rankbased""" +542 67 dataset """kinships""" +542 67 model """rescal""" +542 67 loss """softplus""" +542 67 regularizer """no""" +542 67 optimizer """adam""" +542 67 training_loop """owa""" +542 67 negative_sampler """basic""" +542 67 evaluator """rankbased""" +542 68 dataset """kinships""" +542 68 model """rescal""" +542 68 loss """softplus""" +542 68 regularizer """no""" +542 68 optimizer """adam""" +542 68 training_loop """owa""" +542 68 negative_sampler """basic""" +542 68 evaluator """rankbased""" +542 69 dataset """kinships""" +542 69 model """rescal""" +542 69 loss """softplus""" +542 69 regularizer """no""" +542 69 optimizer """adam""" +542 69 training_loop """owa""" +542 69 negative_sampler """basic""" +542 69 evaluator """rankbased""" +542 70 dataset """kinships""" +542 70 model """rescal""" +542 70 loss """softplus""" +542 70 regularizer """no""" +542 70 optimizer """adam""" +542 70 training_loop """owa""" +542 70 negative_sampler """basic""" +542 70 evaluator """rankbased""" +542 71 dataset """kinships""" +542 71 model """rescal""" +542 71 loss """softplus""" +542 71 regularizer """no""" +542 71 optimizer """adam""" +542 71 training_loop """owa""" +542 71 negative_sampler """basic""" +542 71 evaluator """rankbased""" +542 72 dataset """kinships""" +542 72 model """rescal""" +542 72 loss """softplus""" +542 72 regularizer """no""" +542 72 optimizer """adam""" +542 72 training_loop """owa""" +542 72 negative_sampler """basic""" +542 72 evaluator """rankbased""" +542 73 dataset """kinships""" +542 73 model """rescal""" +542 73 loss """softplus""" +542 73 regularizer """no""" +542 73 optimizer """adam""" +542 73 training_loop """owa""" +542 73 negative_sampler """basic""" +542 73 evaluator """rankbased""" +542 74 dataset """kinships""" +542 74 model """rescal""" +542 74 loss """softplus""" +542 74 regularizer """no""" +542 74 optimizer """adam""" +542 74 training_loop """owa""" +542 74 negative_sampler """basic""" +542 74 evaluator """rankbased""" +542 75 dataset """kinships""" +542 75 model """rescal""" +542 75 loss """softplus""" +542 75 regularizer """no""" +542 75 optimizer """adam""" +542 75 training_loop """owa""" +542 75 negative_sampler """basic""" +542 75 evaluator """rankbased""" +542 76 dataset """kinships""" +542 76 model """rescal""" +542 76 loss """softplus""" +542 76 regularizer """no""" +542 76 optimizer """adam""" +542 76 training_loop """owa""" +542 76 negative_sampler """basic""" +542 76 evaluator """rankbased""" +542 77 dataset """kinships""" +542 77 model """rescal""" +542 77 loss """softplus""" +542 77 regularizer """no""" +542 77 optimizer """adam""" +542 77 training_loop """owa""" +542 77 negative_sampler """basic""" +542 77 evaluator """rankbased""" +542 78 dataset """kinships""" +542 78 model """rescal""" +542 78 loss """softplus""" +542 78 regularizer """no""" +542 78 optimizer """adam""" +542 78 training_loop """owa""" +542 78 negative_sampler """basic""" +542 78 evaluator """rankbased""" +542 79 dataset """kinships""" +542 79 model """rescal""" +542 79 loss """softplus""" +542 79 regularizer """no""" +542 79 optimizer """adam""" +542 79 training_loop """owa""" +542 79 negative_sampler """basic""" +542 79 evaluator """rankbased""" +542 80 dataset """kinships""" +542 80 model """rescal""" +542 80 loss """softplus""" +542 80 regularizer """no""" +542 80 optimizer """adam""" +542 80 training_loop """owa""" +542 80 negative_sampler """basic""" +542 80 evaluator """rankbased""" +542 81 dataset """kinships""" +542 81 model """rescal""" +542 81 loss """softplus""" +542 81 regularizer """no""" +542 81 optimizer """adam""" +542 81 training_loop """owa""" +542 81 negative_sampler """basic""" +542 81 evaluator """rankbased""" +542 82 dataset """kinships""" +542 82 model """rescal""" +542 82 loss """softplus""" +542 82 regularizer """no""" +542 82 optimizer """adam""" +542 82 training_loop """owa""" +542 82 negative_sampler """basic""" +542 82 evaluator """rankbased""" +542 83 dataset """kinships""" +542 83 model """rescal""" +542 83 loss """softplus""" +542 83 regularizer """no""" +542 83 optimizer """adam""" +542 83 training_loop """owa""" +542 83 negative_sampler """basic""" +542 83 evaluator """rankbased""" +542 84 dataset """kinships""" +542 84 model """rescal""" +542 84 loss """softplus""" +542 84 regularizer """no""" +542 84 optimizer """adam""" +542 84 training_loop """owa""" +542 84 negative_sampler """basic""" +542 84 evaluator """rankbased""" +542 85 dataset """kinships""" +542 85 model """rescal""" +542 85 loss """softplus""" +542 85 regularizer """no""" +542 85 optimizer """adam""" +542 85 training_loop """owa""" +542 85 negative_sampler """basic""" +542 85 evaluator """rankbased""" +542 86 dataset """kinships""" +542 86 model """rescal""" +542 86 loss """softplus""" +542 86 regularizer """no""" +542 86 optimizer """adam""" +542 86 training_loop """owa""" +542 86 negative_sampler """basic""" +542 86 evaluator """rankbased""" +542 87 dataset """kinships""" +542 87 model """rescal""" +542 87 loss """softplus""" +542 87 regularizer """no""" +542 87 optimizer """adam""" +542 87 training_loop """owa""" +542 87 negative_sampler """basic""" +542 87 evaluator """rankbased""" +542 88 dataset """kinships""" +542 88 model """rescal""" +542 88 loss """softplus""" +542 88 regularizer """no""" +542 88 optimizer """adam""" +542 88 training_loop """owa""" +542 88 negative_sampler """basic""" +542 88 evaluator """rankbased""" +542 89 dataset """kinships""" +542 89 model """rescal""" +542 89 loss """softplus""" +542 89 regularizer """no""" +542 89 optimizer """adam""" +542 89 training_loop """owa""" +542 89 negative_sampler """basic""" +542 89 evaluator """rankbased""" +542 90 dataset """kinships""" +542 90 model """rescal""" +542 90 loss """softplus""" +542 90 regularizer """no""" +542 90 optimizer """adam""" +542 90 training_loop """owa""" +542 90 negative_sampler """basic""" +542 90 evaluator """rankbased""" +542 91 dataset """kinships""" +542 91 model """rescal""" +542 91 loss """softplus""" +542 91 regularizer """no""" +542 91 optimizer """adam""" +542 91 training_loop """owa""" +542 91 negative_sampler """basic""" +542 91 evaluator """rankbased""" +542 92 dataset """kinships""" +542 92 model """rescal""" +542 92 loss """softplus""" +542 92 regularizer """no""" +542 92 optimizer """adam""" +542 92 training_loop """owa""" +542 92 negative_sampler """basic""" +542 92 evaluator """rankbased""" +542 93 dataset """kinships""" +542 93 model """rescal""" +542 93 loss """softplus""" +542 93 regularizer """no""" +542 93 optimizer """adam""" +542 93 training_loop """owa""" +542 93 negative_sampler """basic""" +542 93 evaluator """rankbased""" +542 94 dataset """kinships""" +542 94 model """rescal""" +542 94 loss """softplus""" +542 94 regularizer """no""" +542 94 optimizer """adam""" +542 94 training_loop """owa""" +542 94 negative_sampler """basic""" +542 94 evaluator """rankbased""" +542 95 dataset """kinships""" +542 95 model """rescal""" +542 95 loss """softplus""" +542 95 regularizer """no""" +542 95 optimizer """adam""" +542 95 training_loop """owa""" +542 95 negative_sampler """basic""" +542 95 evaluator """rankbased""" +542 96 dataset """kinships""" +542 96 model """rescal""" +542 96 loss """softplus""" +542 96 regularizer """no""" +542 96 optimizer """adam""" +542 96 training_loop """owa""" +542 96 negative_sampler """basic""" +542 96 evaluator """rankbased""" +542 97 dataset """kinships""" +542 97 model """rescal""" +542 97 loss """softplus""" +542 97 regularizer """no""" +542 97 optimizer """adam""" +542 97 training_loop """owa""" +542 97 negative_sampler """basic""" +542 97 evaluator """rankbased""" +542 98 dataset """kinships""" +542 98 model """rescal""" +542 98 loss """softplus""" +542 98 regularizer """no""" +542 98 optimizer """adam""" +542 98 training_loop """owa""" +542 98 negative_sampler """basic""" +542 98 evaluator """rankbased""" +542 99 dataset """kinships""" +542 99 model """rescal""" +542 99 loss """softplus""" +542 99 regularizer """no""" +542 99 optimizer """adam""" +542 99 training_loop """owa""" +542 99 negative_sampler """basic""" +542 99 evaluator """rankbased""" +542 100 dataset """kinships""" +542 100 model """rescal""" +542 100 loss """softplus""" +542 100 regularizer """no""" +542 100 optimizer """adam""" +542 100 training_loop """owa""" +542 100 negative_sampler """basic""" +542 100 evaluator """rankbased""" +543 1 model.embedding_dim 0.0 +543 1 loss.margin 20.284747627636026 +543 1 loss.adversarial_temperature 0.6271120274898074 +543 1 optimizer.lr 0.005298667372156801 +543 1 negative_sampler.num_negs_per_pos 87.0 +543 1 training.batch_size 1.0 +543 2 model.embedding_dim 1.0 +543 2 loss.margin 10.112497870595101 +543 2 loss.adversarial_temperature 0.22844102425271987 +543 2 optimizer.lr 0.020768720851046855 +543 2 negative_sampler.num_negs_per_pos 91.0 +543 2 training.batch_size 2.0 +543 3 model.embedding_dim 2.0 +543 3 loss.margin 16.431769497445245 +543 3 loss.adversarial_temperature 0.2834386532217705 +543 3 optimizer.lr 0.09382808927141553 +543 3 negative_sampler.num_negs_per_pos 91.0 +543 3 training.batch_size 1.0 +543 4 model.embedding_dim 2.0 +543 4 loss.margin 8.504073543304937 +543 4 loss.adversarial_temperature 0.2789144229103294 +543 4 optimizer.lr 0.0625442617041356 +543 4 negative_sampler.num_negs_per_pos 45.0 +543 4 training.batch_size 2.0 +543 5 model.embedding_dim 2.0 +543 5 loss.margin 13.52453569435064 +543 5 loss.adversarial_temperature 0.33371661184947976 +543 5 optimizer.lr 0.05529788025871429 +543 5 negative_sampler.num_negs_per_pos 34.0 +543 5 training.batch_size 1.0 +543 6 model.embedding_dim 2.0 +543 6 loss.margin 23.015647640474118 +543 6 loss.adversarial_temperature 0.3247905590442384 +543 6 optimizer.lr 0.004518652512436489 +543 6 negative_sampler.num_negs_per_pos 30.0 +543 6 training.batch_size 1.0 +543 7 model.embedding_dim 0.0 +543 7 loss.margin 14.377644045754224 +543 7 loss.adversarial_temperature 0.4749450324897221 +543 7 optimizer.lr 0.002255846735867982 +543 7 negative_sampler.num_negs_per_pos 62.0 +543 7 training.batch_size 0.0 +543 8 model.embedding_dim 2.0 +543 8 loss.margin 24.435137780728162 +543 8 loss.adversarial_temperature 0.9706219889437919 +543 8 optimizer.lr 0.019364208769107095 +543 8 negative_sampler.num_negs_per_pos 48.0 +543 8 training.batch_size 0.0 +543 9 model.embedding_dim 0.0 +543 9 loss.margin 12.202448278223569 +543 9 loss.adversarial_temperature 0.5237325006999235 +543 9 optimizer.lr 0.025499731483503728 +543 9 negative_sampler.num_negs_per_pos 27.0 +543 9 training.batch_size 1.0 +543 10 model.embedding_dim 0.0 +543 10 loss.margin 29.946660877591793 +543 10 loss.adversarial_temperature 0.18149141198718266 +543 10 optimizer.lr 0.008542561943558747 +543 10 negative_sampler.num_negs_per_pos 87.0 +543 10 training.batch_size 2.0 +543 11 model.embedding_dim 2.0 +543 11 loss.margin 9.225054078844009 +543 11 loss.adversarial_temperature 0.994894847502036 +543 11 optimizer.lr 0.021288877093379906 +543 11 negative_sampler.num_negs_per_pos 73.0 +543 11 training.batch_size 0.0 +543 12 model.embedding_dim 1.0 +543 12 loss.margin 18.8457285558002 +543 12 loss.adversarial_temperature 0.7132135077569152 +543 12 optimizer.lr 0.002081467527047467 +543 12 negative_sampler.num_negs_per_pos 72.0 +543 12 training.batch_size 2.0 +543 13 model.embedding_dim 2.0 +543 13 loss.margin 15.253526628132477 +543 13 loss.adversarial_temperature 0.2731647140034744 +543 13 optimizer.lr 0.010348859984261926 +543 13 negative_sampler.num_negs_per_pos 90.0 +543 13 training.batch_size 2.0 +543 14 model.embedding_dim 0.0 +543 14 loss.margin 8.217261169010715 +543 14 loss.adversarial_temperature 0.12800966295202942 +543 14 optimizer.lr 0.0011284295300925708 +543 14 negative_sampler.num_negs_per_pos 70.0 +543 14 training.batch_size 2.0 +543 15 model.embedding_dim 2.0 +543 15 loss.margin 10.3133957220891 +543 15 loss.adversarial_temperature 0.17221442316470975 +543 15 optimizer.lr 0.012052482305469401 +543 15 negative_sampler.num_negs_per_pos 43.0 +543 15 training.batch_size 0.0 +543 16 model.embedding_dim 0.0 +543 16 loss.margin 8.331239676626284 +543 16 loss.adversarial_temperature 0.8212424778144058 +543 16 optimizer.lr 0.05169752261286916 +543 16 negative_sampler.num_negs_per_pos 35.0 +543 16 training.batch_size 2.0 +543 17 model.embedding_dim 1.0 +543 17 loss.margin 1.8783795784263186 +543 17 loss.adversarial_temperature 0.29423735965379555 +543 17 optimizer.lr 0.004349888565931731 +543 17 negative_sampler.num_negs_per_pos 58.0 +543 17 training.batch_size 2.0 +543 18 model.embedding_dim 2.0 +543 18 loss.margin 23.34273106669072 +543 18 loss.adversarial_temperature 0.9831155588768626 +543 18 optimizer.lr 0.08862489687425447 +543 18 negative_sampler.num_negs_per_pos 25.0 +543 18 training.batch_size 2.0 +543 19 model.embedding_dim 2.0 +543 19 loss.margin 17.020054878060737 +543 19 loss.adversarial_temperature 0.49082023549003473 +543 19 optimizer.lr 0.06065314727866177 +543 19 negative_sampler.num_negs_per_pos 50.0 +543 19 training.batch_size 0.0 +543 20 model.embedding_dim 2.0 +543 20 loss.margin 15.190163073732496 +543 20 loss.adversarial_temperature 0.20853947622511618 +543 20 optimizer.lr 0.0011222261157172295 +543 20 negative_sampler.num_negs_per_pos 89.0 +543 20 training.batch_size 1.0 +543 21 model.embedding_dim 1.0 +543 21 loss.margin 3.0635430696713684 +543 21 loss.adversarial_temperature 0.460546580051742 +543 21 optimizer.lr 0.023767449599771714 +543 21 negative_sampler.num_negs_per_pos 82.0 +543 21 training.batch_size 1.0 +543 22 model.embedding_dim 0.0 +543 22 loss.margin 6.665692802712332 +543 22 loss.adversarial_temperature 0.2692225933181409 +543 22 optimizer.lr 0.0026516764778558594 +543 22 negative_sampler.num_negs_per_pos 3.0 +543 22 training.batch_size 0.0 +543 23 model.embedding_dim 2.0 +543 23 loss.margin 11.53545203659088 +543 23 loss.adversarial_temperature 0.15297828019775275 +543 23 optimizer.lr 0.010399933708447424 +543 23 negative_sampler.num_negs_per_pos 93.0 +543 23 training.batch_size 2.0 +543 24 model.embedding_dim 1.0 +543 24 loss.margin 17.657962827255766 +543 24 loss.adversarial_temperature 0.6041045494212591 +543 24 optimizer.lr 0.0019392202150588453 +543 24 negative_sampler.num_negs_per_pos 13.0 +543 24 training.batch_size 1.0 +543 25 model.embedding_dim 0.0 +543 25 loss.margin 9.209575475598793 +543 25 loss.adversarial_temperature 0.11171663145084104 +543 25 optimizer.lr 0.0032480797858353443 +543 25 negative_sampler.num_negs_per_pos 84.0 +543 25 training.batch_size 0.0 +543 26 model.embedding_dim 1.0 +543 26 loss.margin 12.398005143882036 +543 26 loss.adversarial_temperature 0.5790182392852314 +543 26 optimizer.lr 0.0010436978796265677 +543 26 negative_sampler.num_negs_per_pos 57.0 +543 26 training.batch_size 1.0 +543 27 model.embedding_dim 2.0 +543 27 loss.margin 11.935276152690829 +543 27 loss.adversarial_temperature 0.21873335285182513 +543 27 optimizer.lr 0.040391924245200876 +543 27 negative_sampler.num_negs_per_pos 78.0 +543 27 training.batch_size 1.0 +543 28 model.embedding_dim 1.0 +543 28 loss.margin 14.701437027938201 +543 28 loss.adversarial_temperature 0.4224907228930972 +543 28 optimizer.lr 0.07222667379004183 +543 28 negative_sampler.num_negs_per_pos 84.0 +543 28 training.batch_size 2.0 +543 29 model.embedding_dim 0.0 +543 29 loss.margin 14.217990600394986 +543 29 loss.adversarial_temperature 0.8436518964441172 +543 29 optimizer.lr 0.01917825064973955 +543 29 negative_sampler.num_negs_per_pos 71.0 +543 29 training.batch_size 0.0 +543 30 model.embedding_dim 0.0 +543 30 loss.margin 19.957855114210894 +543 30 loss.adversarial_temperature 0.911423522828907 +543 30 optimizer.lr 0.0026091929126619245 +543 30 negative_sampler.num_negs_per_pos 21.0 +543 30 training.batch_size 1.0 +543 31 model.embedding_dim 1.0 +543 31 loss.margin 19.822460393455994 +543 31 loss.adversarial_temperature 0.5679974582830454 +543 31 optimizer.lr 0.009117641730869899 +543 31 negative_sampler.num_negs_per_pos 19.0 +543 31 training.batch_size 2.0 +543 32 model.embedding_dim 0.0 +543 32 loss.margin 2.8174175850616088 +543 32 loss.adversarial_temperature 0.9528350536447754 +543 32 optimizer.lr 0.0011071436463418718 +543 32 negative_sampler.num_negs_per_pos 77.0 +543 32 training.batch_size 2.0 +543 33 model.embedding_dim 0.0 +543 33 loss.margin 15.804522849629457 +543 33 loss.adversarial_temperature 0.15551087469303337 +543 33 optimizer.lr 0.008628486525392514 +543 33 negative_sampler.num_negs_per_pos 71.0 +543 33 training.batch_size 2.0 +543 34 model.embedding_dim 2.0 +543 34 loss.margin 8.723265425712587 +543 34 loss.adversarial_temperature 0.31573148993858247 +543 34 optimizer.lr 0.041138194410070654 +543 34 negative_sampler.num_negs_per_pos 1.0 +543 34 training.batch_size 2.0 +543 35 model.embedding_dim 2.0 +543 35 loss.margin 25.331278164360402 +543 35 loss.adversarial_temperature 0.988217109022294 +543 35 optimizer.lr 0.06035907879150265 +543 35 negative_sampler.num_negs_per_pos 86.0 +543 35 training.batch_size 1.0 +543 36 model.embedding_dim 1.0 +543 36 loss.margin 10.107081552603875 +543 36 loss.adversarial_temperature 0.40671436518689597 +543 36 optimizer.lr 0.08370229679970942 +543 36 negative_sampler.num_negs_per_pos 10.0 +543 36 training.batch_size 2.0 +543 37 model.embedding_dim 2.0 +543 37 loss.margin 15.540513606131572 +543 37 loss.adversarial_temperature 0.8359144774328803 +543 37 optimizer.lr 0.005820003504964883 +543 37 negative_sampler.num_negs_per_pos 53.0 +543 37 training.batch_size 1.0 +543 38 model.embedding_dim 0.0 +543 38 loss.margin 10.289888529656023 +543 38 loss.adversarial_temperature 0.6237291232195831 +543 38 optimizer.lr 0.0016287389966480946 +543 38 negative_sampler.num_negs_per_pos 45.0 +543 38 training.batch_size 1.0 +543 39 model.embedding_dim 0.0 +543 39 loss.margin 18.542045344118367 +543 39 loss.adversarial_temperature 0.4284236213922531 +543 39 optimizer.lr 0.0011073191360263042 +543 39 negative_sampler.num_negs_per_pos 11.0 +543 39 training.batch_size 2.0 +543 40 model.embedding_dim 0.0 +543 40 loss.margin 1.2961551276024115 +543 40 loss.adversarial_temperature 0.8096599126416423 +543 40 optimizer.lr 0.014706764630108676 +543 40 negative_sampler.num_negs_per_pos 3.0 +543 40 training.batch_size 0.0 +543 41 model.embedding_dim 0.0 +543 41 loss.margin 14.683692305046987 +543 41 loss.adversarial_temperature 0.3847442181547248 +543 41 optimizer.lr 0.02368658908665967 +543 41 negative_sampler.num_negs_per_pos 72.0 +543 41 training.batch_size 0.0 +543 42 model.embedding_dim 1.0 +543 42 loss.margin 10.410805843972549 +543 42 loss.adversarial_temperature 0.3023981964754756 +543 42 optimizer.lr 0.0013478028656639816 +543 42 negative_sampler.num_negs_per_pos 35.0 +543 42 training.batch_size 2.0 +543 43 model.embedding_dim 1.0 +543 43 loss.margin 3.688908168032773 +543 43 loss.adversarial_temperature 0.19219492201567628 +543 43 optimizer.lr 0.023872099926865908 +543 43 negative_sampler.num_negs_per_pos 88.0 +543 43 training.batch_size 0.0 +543 44 model.embedding_dim 1.0 +543 44 loss.margin 26.7133652956521 +543 44 loss.adversarial_temperature 0.5072147173717957 +543 44 optimizer.lr 0.01059416194179993 +543 44 negative_sampler.num_negs_per_pos 81.0 +543 44 training.batch_size 0.0 +543 45 model.embedding_dim 1.0 +543 45 loss.margin 14.065587853185436 +543 45 loss.adversarial_temperature 0.8545961558568739 +543 45 optimizer.lr 0.06306118096435334 +543 45 negative_sampler.num_negs_per_pos 60.0 +543 45 training.batch_size 1.0 +543 46 model.embedding_dim 1.0 +543 46 loss.margin 15.350678811934676 +543 46 loss.adversarial_temperature 0.8382915819109165 +543 46 optimizer.lr 0.0013274469045057627 +543 46 negative_sampler.num_negs_per_pos 0.0 +543 46 training.batch_size 1.0 +543 47 model.embedding_dim 1.0 +543 47 loss.margin 4.4058734004954525 +543 47 loss.adversarial_temperature 0.6645581620724489 +543 47 optimizer.lr 0.0012277252288832364 +543 47 negative_sampler.num_negs_per_pos 99.0 +543 47 training.batch_size 2.0 +543 48 model.embedding_dim 2.0 +543 48 loss.margin 17.431544233164562 +543 48 loss.adversarial_temperature 0.9113792426823216 +543 48 optimizer.lr 0.05460031334947168 +543 48 negative_sampler.num_negs_per_pos 36.0 +543 48 training.batch_size 1.0 +543 49 model.embedding_dim 2.0 +543 49 loss.margin 20.377947220143906 +543 49 loss.adversarial_temperature 0.3695529015175163 +543 49 optimizer.lr 0.0010009514380795329 +543 49 negative_sampler.num_negs_per_pos 9.0 +543 49 training.batch_size 1.0 +543 50 model.embedding_dim 1.0 +543 50 loss.margin 4.298703916087772 +543 50 loss.adversarial_temperature 0.869581001192897 +543 50 optimizer.lr 0.01114126791512339 +543 50 negative_sampler.num_negs_per_pos 14.0 +543 50 training.batch_size 1.0 +543 51 model.embedding_dim 1.0 +543 51 loss.margin 14.374074637712406 +543 51 loss.adversarial_temperature 0.11606895126239192 +543 51 optimizer.lr 0.0011426446064910494 +543 51 negative_sampler.num_negs_per_pos 96.0 +543 51 training.batch_size 1.0 +543 52 model.embedding_dim 0.0 +543 52 loss.margin 11.884665357853194 +543 52 loss.adversarial_temperature 0.7941518042519132 +543 52 optimizer.lr 0.005268197475045542 +543 52 negative_sampler.num_negs_per_pos 57.0 +543 52 training.batch_size 0.0 +543 53 model.embedding_dim 0.0 +543 53 loss.margin 25.98647055656315 +543 53 loss.adversarial_temperature 0.662549428888019 +543 53 optimizer.lr 0.0031935595279906155 +543 53 negative_sampler.num_negs_per_pos 0.0 +543 53 training.batch_size 1.0 +543 54 model.embedding_dim 0.0 +543 54 loss.margin 8.38599644918211 +543 54 loss.adversarial_temperature 0.77703405683628 +543 54 optimizer.lr 0.04200763013438745 +543 54 negative_sampler.num_negs_per_pos 21.0 +543 54 training.batch_size 1.0 +543 55 model.embedding_dim 0.0 +543 55 loss.margin 3.8169156656068037 +543 55 loss.adversarial_temperature 0.5448693877470648 +543 55 optimizer.lr 0.009736222436592619 +543 55 negative_sampler.num_negs_per_pos 78.0 +543 55 training.batch_size 0.0 +543 56 model.embedding_dim 2.0 +543 56 loss.margin 4.564830415858521 +543 56 loss.adversarial_temperature 0.7618642659607632 +543 56 optimizer.lr 0.005597474163643973 +543 56 negative_sampler.num_negs_per_pos 56.0 +543 56 training.batch_size 0.0 +543 57 model.embedding_dim 1.0 +543 57 loss.margin 5.905973487782024 +543 57 loss.adversarial_temperature 0.10594723332503059 +543 57 optimizer.lr 0.002259636686992279 +543 57 negative_sampler.num_negs_per_pos 31.0 +543 57 training.batch_size 2.0 +543 58 model.embedding_dim 2.0 +543 58 loss.margin 17.91700002270163 +543 58 loss.adversarial_temperature 0.9761474954502936 +543 58 optimizer.lr 0.022403036332983132 +543 58 negative_sampler.num_negs_per_pos 55.0 +543 58 training.batch_size 0.0 +543 59 model.embedding_dim 1.0 +543 59 loss.margin 6.2892839474127875 +543 59 loss.adversarial_temperature 0.19980877546065068 +543 59 optimizer.lr 0.007885184382947425 +543 59 negative_sampler.num_negs_per_pos 95.0 +543 59 training.batch_size 1.0 +543 60 model.embedding_dim 0.0 +543 60 loss.margin 22.069457625338327 +543 60 loss.adversarial_temperature 0.18674783029455205 +543 60 optimizer.lr 0.003170832805516261 +543 60 negative_sampler.num_negs_per_pos 62.0 +543 60 training.batch_size 2.0 +543 61 model.embedding_dim 2.0 +543 61 loss.margin 8.287788362949524 +543 61 loss.adversarial_temperature 0.4504047385112231 +543 61 optimizer.lr 0.01558518803664623 +543 61 negative_sampler.num_negs_per_pos 23.0 +543 61 training.batch_size 1.0 +543 62 model.embedding_dim 2.0 +543 62 loss.margin 20.293778122032702 +543 62 loss.adversarial_temperature 0.34917061378453756 +543 62 optimizer.lr 0.0040671891099519795 +543 62 negative_sampler.num_negs_per_pos 21.0 +543 62 training.batch_size 2.0 +543 63 model.embedding_dim 0.0 +543 63 loss.margin 18.83040075698483 +543 63 loss.adversarial_temperature 0.17585399473564178 +543 63 optimizer.lr 0.017653617922917877 +543 63 negative_sampler.num_negs_per_pos 46.0 +543 63 training.batch_size 2.0 +543 64 model.embedding_dim 0.0 +543 64 loss.margin 3.7192894590179115 +543 64 loss.adversarial_temperature 0.5045632730900593 +543 64 optimizer.lr 0.01086286350063557 +543 64 negative_sampler.num_negs_per_pos 45.0 +543 64 training.batch_size 0.0 +543 65 model.embedding_dim 2.0 +543 65 loss.margin 10.344862681423887 +543 65 loss.adversarial_temperature 0.5773099452383512 +543 65 optimizer.lr 0.0011190393243515578 +543 65 negative_sampler.num_negs_per_pos 96.0 +543 65 training.batch_size 2.0 +543 66 model.embedding_dim 1.0 +543 66 loss.margin 12.418858847753532 +543 66 loss.adversarial_temperature 0.15463463895463553 +543 66 optimizer.lr 0.0014471467793184685 +543 66 negative_sampler.num_negs_per_pos 66.0 +543 66 training.batch_size 1.0 +543 67 model.embedding_dim 2.0 +543 67 loss.margin 28.949313756909866 +543 67 loss.adversarial_temperature 0.2312954984083834 +543 67 optimizer.lr 0.011173446613499598 +543 67 negative_sampler.num_negs_per_pos 91.0 +543 67 training.batch_size 2.0 +543 68 model.embedding_dim 2.0 +543 68 loss.margin 5.5756606107795275 +543 68 loss.adversarial_temperature 0.8923334440993569 +543 68 optimizer.lr 0.07235716412600791 +543 68 negative_sampler.num_negs_per_pos 58.0 +543 68 training.batch_size 0.0 +543 69 model.embedding_dim 2.0 +543 69 loss.margin 20.673281796617736 +543 69 loss.adversarial_temperature 0.2393118568808116 +543 69 optimizer.lr 0.015287705308284525 +543 69 negative_sampler.num_negs_per_pos 90.0 +543 69 training.batch_size 0.0 +543 70 model.embedding_dim 0.0 +543 70 loss.margin 14.107692186665872 +543 70 loss.adversarial_temperature 0.7427087047128531 +543 70 optimizer.lr 0.0034412814703531996 +543 70 negative_sampler.num_negs_per_pos 26.0 +543 70 training.batch_size 2.0 +543 71 model.embedding_dim 0.0 +543 71 loss.margin 24.19125860736061 +543 71 loss.adversarial_temperature 0.3511702009995632 +543 71 optimizer.lr 0.002741351428759333 +543 71 negative_sampler.num_negs_per_pos 82.0 +543 71 training.batch_size 1.0 +543 72 model.embedding_dim 2.0 +543 72 loss.margin 10.07154362738826 +543 72 loss.adversarial_temperature 0.19576449074949842 +543 72 optimizer.lr 0.0010775224300373 +543 72 negative_sampler.num_negs_per_pos 89.0 +543 72 training.batch_size 2.0 +543 73 model.embedding_dim 0.0 +543 73 loss.margin 4.639095833025825 +543 73 loss.adversarial_temperature 0.6844738582873161 +543 73 optimizer.lr 0.007023022164775397 +543 73 negative_sampler.num_negs_per_pos 42.0 +543 73 training.batch_size 0.0 +543 74 model.embedding_dim 0.0 +543 74 loss.margin 13.619519737312634 +543 74 loss.adversarial_temperature 0.3897973345168084 +543 74 optimizer.lr 0.001624748900304755 +543 74 negative_sampler.num_negs_per_pos 4.0 +543 74 training.batch_size 2.0 +543 75 model.embedding_dim 0.0 +543 75 loss.margin 11.976060124419945 +543 75 loss.adversarial_temperature 0.21952701710172223 +543 75 optimizer.lr 0.0053938107574825755 +543 75 negative_sampler.num_negs_per_pos 90.0 +543 75 training.batch_size 2.0 +543 76 model.embedding_dim 1.0 +543 76 loss.margin 4.328247239624811 +543 76 loss.adversarial_temperature 0.970951259576084 +543 76 optimizer.lr 0.0034948532691757186 +543 76 negative_sampler.num_negs_per_pos 53.0 +543 76 training.batch_size 2.0 +543 77 model.embedding_dim 0.0 +543 77 loss.margin 24.633198502580036 +543 77 loss.adversarial_temperature 0.5442317168812343 +543 77 optimizer.lr 0.011516737727278476 +543 77 negative_sampler.num_negs_per_pos 72.0 +543 77 training.batch_size 0.0 +543 78 model.embedding_dim 0.0 +543 78 loss.margin 19.99072947779672 +543 78 loss.adversarial_temperature 0.9542167647316635 +543 78 optimizer.lr 0.004273504502914403 +543 78 negative_sampler.num_negs_per_pos 68.0 +543 78 training.batch_size 2.0 +543 79 model.embedding_dim 2.0 +543 79 loss.margin 16.263309945235182 +543 79 loss.adversarial_temperature 0.38615714899374987 +543 79 optimizer.lr 0.05834285065975265 +543 79 negative_sampler.num_negs_per_pos 46.0 +543 79 training.batch_size 2.0 +543 80 model.embedding_dim 2.0 +543 80 loss.margin 16.915925592103257 +543 80 loss.adversarial_temperature 0.7089504821493573 +543 80 optimizer.lr 0.038818928273113104 +543 80 negative_sampler.num_negs_per_pos 72.0 +543 80 training.batch_size 0.0 +543 81 model.embedding_dim 2.0 +543 81 loss.margin 11.789179116568285 +543 81 loss.adversarial_temperature 0.24604900247017614 +543 81 optimizer.lr 0.01128543507032026 +543 81 negative_sampler.num_negs_per_pos 49.0 +543 81 training.batch_size 1.0 +543 82 model.embedding_dim 1.0 +543 82 loss.margin 7.206737058417027 +543 82 loss.adversarial_temperature 0.44183193236256957 +543 82 optimizer.lr 0.03585293001805636 +543 82 negative_sampler.num_negs_per_pos 34.0 +543 82 training.batch_size 1.0 +543 83 model.embedding_dim 1.0 +543 83 loss.margin 25.197520350484925 +543 83 loss.adversarial_temperature 0.10423714396193756 +543 83 optimizer.lr 0.016257580135153847 +543 83 negative_sampler.num_negs_per_pos 62.0 +543 83 training.batch_size 2.0 +543 84 model.embedding_dim 2.0 +543 84 loss.margin 20.22990518407535 +543 84 loss.adversarial_temperature 0.7355404677508767 +543 84 optimizer.lr 0.0027076441809024943 +543 84 negative_sampler.num_negs_per_pos 23.0 +543 84 training.batch_size 2.0 +543 85 model.embedding_dim 1.0 +543 85 loss.margin 6.336894663390874 +543 85 loss.adversarial_temperature 0.16235387067598003 +543 85 optimizer.lr 0.0065014423733956985 +543 85 negative_sampler.num_negs_per_pos 32.0 +543 85 training.batch_size 1.0 +543 86 model.embedding_dim 1.0 +543 86 loss.margin 11.99299639573941 +543 86 loss.adversarial_temperature 0.8476621517101164 +543 86 optimizer.lr 0.0021594263205169406 +543 86 negative_sampler.num_negs_per_pos 12.0 +543 86 training.batch_size 2.0 +543 87 model.embedding_dim 1.0 +543 87 loss.margin 11.328873981547257 +543 87 loss.adversarial_temperature 0.5305711969605186 +543 87 optimizer.lr 0.008289140975913684 +543 87 negative_sampler.num_negs_per_pos 5.0 +543 87 training.batch_size 0.0 +543 88 model.embedding_dim 1.0 +543 88 loss.margin 8.155304670625224 +543 88 loss.adversarial_temperature 0.4198035737257225 +543 88 optimizer.lr 0.0011652576867807443 +543 88 negative_sampler.num_negs_per_pos 86.0 +543 88 training.batch_size 1.0 +543 89 model.embedding_dim 2.0 +543 89 loss.margin 1.0008535456612064 +543 89 loss.adversarial_temperature 0.15113613917085142 +543 89 optimizer.lr 0.0021717552729188048 +543 89 negative_sampler.num_negs_per_pos 83.0 +543 89 training.batch_size 2.0 +543 90 model.embedding_dim 2.0 +543 90 loss.margin 10.493627136317052 +543 90 loss.adversarial_temperature 0.144037146017651 +543 90 optimizer.lr 0.0016467262356671284 +543 90 negative_sampler.num_negs_per_pos 18.0 +543 90 training.batch_size 0.0 +543 91 model.embedding_dim 0.0 +543 91 loss.margin 1.213093117519842 +543 91 loss.adversarial_temperature 0.12364232411009388 +543 91 optimizer.lr 0.004093200196044624 +543 91 negative_sampler.num_negs_per_pos 74.0 +543 91 training.batch_size 2.0 +543 92 model.embedding_dim 2.0 +543 92 loss.margin 17.69340824118323 +543 92 loss.adversarial_temperature 0.1839984078679806 +543 92 optimizer.lr 0.02784138533998029 +543 92 negative_sampler.num_negs_per_pos 9.0 +543 92 training.batch_size 2.0 +543 93 model.embedding_dim 0.0 +543 93 loss.margin 15.742582452648143 +543 93 loss.adversarial_temperature 0.6051879566851311 +543 93 optimizer.lr 0.0024856175761953014 +543 93 negative_sampler.num_negs_per_pos 1.0 +543 93 training.batch_size 1.0 +543 94 model.embedding_dim 1.0 +543 94 loss.margin 29.91439318744272 +543 94 loss.adversarial_temperature 0.9715899143301455 +543 94 optimizer.lr 0.001606080436848531 +543 94 negative_sampler.num_negs_per_pos 55.0 +543 94 training.batch_size 0.0 +543 95 model.embedding_dim 2.0 +543 95 loss.margin 28.191105327683946 +543 95 loss.adversarial_temperature 0.4851255638284475 +543 95 optimizer.lr 0.003040504795251296 +543 95 negative_sampler.num_negs_per_pos 69.0 +543 95 training.batch_size 1.0 +543 96 model.embedding_dim 1.0 +543 96 loss.margin 11.591044131820825 +543 96 loss.adversarial_temperature 0.46873309406271324 +543 96 optimizer.lr 0.0023471471400571147 +543 96 negative_sampler.num_negs_per_pos 10.0 +543 96 training.batch_size 1.0 +543 97 model.embedding_dim 2.0 +543 97 loss.margin 4.203093340607402 +543 97 loss.adversarial_temperature 0.4369364007987716 +543 97 optimizer.lr 0.0033156905166977046 +543 97 negative_sampler.num_negs_per_pos 71.0 +543 97 training.batch_size 2.0 +543 98 model.embedding_dim 0.0 +543 98 loss.margin 21.986465663035045 +543 98 loss.adversarial_temperature 0.6741794824920527 +543 98 optimizer.lr 0.0015750186200138407 +543 98 negative_sampler.num_negs_per_pos 56.0 +543 98 training.batch_size 0.0 +543 99 model.embedding_dim 0.0 +543 99 loss.margin 13.77327358518895 +543 99 loss.adversarial_temperature 0.9919577430335474 +543 99 optimizer.lr 0.02758715500861825 +543 99 negative_sampler.num_negs_per_pos 25.0 +543 99 training.batch_size 0.0 +543 100 model.embedding_dim 2.0 +543 100 loss.margin 14.464448304013432 +543 100 loss.adversarial_temperature 0.2953748744659931 +543 100 optimizer.lr 0.001058589026307892 +543 100 negative_sampler.num_negs_per_pos 86.0 +543 100 training.batch_size 0.0 +543 1 dataset """kinships""" +543 1 model """rescal""" +543 1 loss """nssa""" +543 1 regularizer """no""" +543 1 optimizer """adam""" +543 1 training_loop """owa""" +543 1 negative_sampler """basic""" +543 1 evaluator """rankbased""" +543 2 dataset """kinships""" +543 2 model """rescal""" +543 2 loss """nssa""" +543 2 regularizer """no""" +543 2 optimizer """adam""" +543 2 training_loop """owa""" +543 2 negative_sampler """basic""" +543 2 evaluator """rankbased""" +543 3 dataset """kinships""" +543 3 model """rescal""" +543 3 loss """nssa""" +543 3 regularizer """no""" +543 3 optimizer """adam""" +543 3 training_loop """owa""" +543 3 negative_sampler """basic""" +543 3 evaluator """rankbased""" +543 4 dataset """kinships""" +543 4 model """rescal""" +543 4 loss """nssa""" +543 4 regularizer """no""" +543 4 optimizer """adam""" +543 4 training_loop """owa""" +543 4 negative_sampler """basic""" +543 4 evaluator """rankbased""" +543 5 dataset """kinships""" +543 5 model """rescal""" +543 5 loss """nssa""" +543 5 regularizer """no""" +543 5 optimizer """adam""" +543 5 training_loop """owa""" +543 5 negative_sampler """basic""" +543 5 evaluator """rankbased""" +543 6 dataset """kinships""" +543 6 model """rescal""" +543 6 loss """nssa""" +543 6 regularizer """no""" +543 6 optimizer """adam""" +543 6 training_loop """owa""" +543 6 negative_sampler """basic""" +543 6 evaluator """rankbased""" +543 7 dataset """kinships""" +543 7 model """rescal""" +543 7 loss """nssa""" +543 7 regularizer """no""" +543 7 optimizer """adam""" +543 7 training_loop """owa""" +543 7 negative_sampler """basic""" +543 7 evaluator """rankbased""" +543 8 dataset """kinships""" +543 8 model """rescal""" +543 8 loss """nssa""" +543 8 regularizer """no""" +543 8 optimizer """adam""" +543 8 training_loop """owa""" +543 8 negative_sampler """basic""" +543 8 evaluator """rankbased""" +543 9 dataset """kinships""" +543 9 model """rescal""" +543 9 loss """nssa""" +543 9 regularizer """no""" +543 9 optimizer """adam""" +543 9 training_loop """owa""" +543 9 negative_sampler """basic""" +543 9 evaluator """rankbased""" +543 10 dataset """kinships""" +543 10 model """rescal""" +543 10 loss """nssa""" +543 10 regularizer """no""" +543 10 optimizer """adam""" +543 10 training_loop """owa""" +543 10 negative_sampler """basic""" +543 10 evaluator """rankbased""" +543 11 dataset """kinships""" +543 11 model """rescal""" +543 11 loss """nssa""" +543 11 regularizer """no""" +543 11 optimizer """adam""" +543 11 training_loop """owa""" +543 11 negative_sampler """basic""" +543 11 evaluator """rankbased""" +543 12 dataset """kinships""" +543 12 model """rescal""" +543 12 loss """nssa""" +543 12 regularizer """no""" +543 12 optimizer """adam""" +543 12 training_loop """owa""" +543 12 negative_sampler """basic""" +543 12 evaluator """rankbased""" +543 13 dataset """kinships""" +543 13 model """rescal""" +543 13 loss """nssa""" +543 13 regularizer """no""" +543 13 optimizer """adam""" +543 13 training_loop """owa""" +543 13 negative_sampler """basic""" +543 13 evaluator """rankbased""" +543 14 dataset """kinships""" +543 14 model """rescal""" +543 14 loss """nssa""" +543 14 regularizer """no""" +543 14 optimizer """adam""" +543 14 training_loop """owa""" +543 14 negative_sampler """basic""" +543 14 evaluator """rankbased""" +543 15 dataset """kinships""" +543 15 model """rescal""" +543 15 loss """nssa""" +543 15 regularizer """no""" +543 15 optimizer """adam""" +543 15 training_loop """owa""" +543 15 negative_sampler """basic""" +543 15 evaluator """rankbased""" +543 16 dataset """kinships""" +543 16 model """rescal""" +543 16 loss """nssa""" +543 16 regularizer """no""" +543 16 optimizer """adam""" +543 16 training_loop """owa""" +543 16 negative_sampler """basic""" +543 16 evaluator """rankbased""" +543 17 dataset """kinships""" +543 17 model """rescal""" +543 17 loss """nssa""" +543 17 regularizer """no""" +543 17 optimizer """adam""" +543 17 training_loop """owa""" +543 17 negative_sampler """basic""" +543 17 evaluator """rankbased""" +543 18 dataset """kinships""" +543 18 model """rescal""" +543 18 loss """nssa""" +543 18 regularizer """no""" +543 18 optimizer """adam""" +543 18 training_loop """owa""" +543 18 negative_sampler """basic""" +543 18 evaluator """rankbased""" +543 19 dataset """kinships""" +543 19 model """rescal""" +543 19 loss """nssa""" +543 19 regularizer """no""" +543 19 optimizer """adam""" +543 19 training_loop """owa""" +543 19 negative_sampler """basic""" +543 19 evaluator """rankbased""" +543 20 dataset """kinships""" +543 20 model """rescal""" +543 20 loss """nssa""" +543 20 regularizer """no""" +543 20 optimizer """adam""" +543 20 training_loop """owa""" +543 20 negative_sampler """basic""" +543 20 evaluator """rankbased""" +543 21 dataset """kinships""" +543 21 model """rescal""" +543 21 loss """nssa""" +543 21 regularizer """no""" +543 21 optimizer """adam""" +543 21 training_loop """owa""" +543 21 negative_sampler """basic""" +543 21 evaluator """rankbased""" +543 22 dataset """kinships""" +543 22 model """rescal""" +543 22 loss """nssa""" +543 22 regularizer """no""" +543 22 optimizer """adam""" +543 22 training_loop """owa""" +543 22 negative_sampler """basic""" +543 22 evaluator """rankbased""" +543 23 dataset """kinships""" +543 23 model """rescal""" +543 23 loss """nssa""" +543 23 regularizer """no""" +543 23 optimizer """adam""" +543 23 training_loop """owa""" +543 23 negative_sampler """basic""" +543 23 evaluator """rankbased""" +543 24 dataset """kinships""" +543 24 model """rescal""" +543 24 loss """nssa""" +543 24 regularizer """no""" +543 24 optimizer """adam""" +543 24 training_loop """owa""" +543 24 negative_sampler """basic""" +543 24 evaluator """rankbased""" +543 25 dataset """kinships""" +543 25 model """rescal""" +543 25 loss """nssa""" +543 25 regularizer """no""" +543 25 optimizer """adam""" +543 25 training_loop """owa""" +543 25 negative_sampler """basic""" +543 25 evaluator """rankbased""" +543 26 dataset """kinships""" +543 26 model """rescal""" +543 26 loss """nssa""" +543 26 regularizer """no""" +543 26 optimizer """adam""" +543 26 training_loop """owa""" +543 26 negative_sampler """basic""" +543 26 evaluator """rankbased""" +543 27 dataset """kinships""" +543 27 model """rescal""" +543 27 loss """nssa""" +543 27 regularizer """no""" +543 27 optimizer """adam""" +543 27 training_loop """owa""" +543 27 negative_sampler """basic""" +543 27 evaluator """rankbased""" +543 28 dataset """kinships""" +543 28 model """rescal""" +543 28 loss """nssa""" +543 28 regularizer """no""" +543 28 optimizer """adam""" +543 28 training_loop """owa""" +543 28 negative_sampler """basic""" +543 28 evaluator """rankbased""" +543 29 dataset """kinships""" +543 29 model """rescal""" +543 29 loss """nssa""" +543 29 regularizer """no""" +543 29 optimizer """adam""" +543 29 training_loop """owa""" +543 29 negative_sampler """basic""" +543 29 evaluator """rankbased""" +543 30 dataset """kinships""" +543 30 model """rescal""" +543 30 loss """nssa""" +543 30 regularizer """no""" +543 30 optimizer """adam""" +543 30 training_loop """owa""" +543 30 negative_sampler """basic""" +543 30 evaluator """rankbased""" +543 31 dataset """kinships""" +543 31 model """rescal""" +543 31 loss """nssa""" +543 31 regularizer """no""" +543 31 optimizer """adam""" +543 31 training_loop """owa""" +543 31 negative_sampler """basic""" +543 31 evaluator """rankbased""" +543 32 dataset """kinships""" +543 32 model """rescal""" +543 32 loss """nssa""" +543 32 regularizer """no""" +543 32 optimizer """adam""" +543 32 training_loop """owa""" +543 32 negative_sampler """basic""" +543 32 evaluator """rankbased""" +543 33 dataset """kinships""" +543 33 model """rescal""" +543 33 loss """nssa""" +543 33 regularizer """no""" +543 33 optimizer """adam""" +543 33 training_loop """owa""" +543 33 negative_sampler """basic""" +543 33 evaluator """rankbased""" +543 34 dataset """kinships""" +543 34 model """rescal""" +543 34 loss """nssa""" +543 34 regularizer """no""" +543 34 optimizer """adam""" +543 34 training_loop """owa""" +543 34 negative_sampler """basic""" +543 34 evaluator """rankbased""" +543 35 dataset """kinships""" +543 35 model """rescal""" +543 35 loss """nssa""" +543 35 regularizer """no""" +543 35 optimizer """adam""" +543 35 training_loop """owa""" +543 35 negative_sampler """basic""" +543 35 evaluator """rankbased""" +543 36 dataset """kinships""" +543 36 model """rescal""" +543 36 loss """nssa""" +543 36 regularizer """no""" +543 36 optimizer """adam""" +543 36 training_loop """owa""" +543 36 negative_sampler """basic""" +543 36 evaluator """rankbased""" +543 37 dataset """kinships""" +543 37 model """rescal""" +543 37 loss """nssa""" +543 37 regularizer """no""" +543 37 optimizer """adam""" +543 37 training_loop """owa""" +543 37 negative_sampler """basic""" +543 37 evaluator """rankbased""" +543 38 dataset """kinships""" +543 38 model """rescal""" +543 38 loss """nssa""" +543 38 regularizer """no""" +543 38 optimizer """adam""" +543 38 training_loop """owa""" +543 38 negative_sampler """basic""" +543 38 evaluator """rankbased""" +543 39 dataset """kinships""" +543 39 model """rescal""" +543 39 loss """nssa""" +543 39 regularizer """no""" +543 39 optimizer """adam""" +543 39 training_loop """owa""" +543 39 negative_sampler """basic""" +543 39 evaluator """rankbased""" +543 40 dataset """kinships""" +543 40 model """rescal""" +543 40 loss """nssa""" +543 40 regularizer """no""" +543 40 optimizer """adam""" +543 40 training_loop """owa""" +543 40 negative_sampler """basic""" +543 40 evaluator """rankbased""" +543 41 dataset """kinships""" +543 41 model """rescal""" +543 41 loss """nssa""" +543 41 regularizer """no""" +543 41 optimizer """adam""" +543 41 training_loop """owa""" +543 41 negative_sampler """basic""" +543 41 evaluator """rankbased""" +543 42 dataset """kinships""" +543 42 model """rescal""" +543 42 loss """nssa""" +543 42 regularizer """no""" +543 42 optimizer """adam""" +543 42 training_loop """owa""" +543 42 negative_sampler """basic""" +543 42 evaluator """rankbased""" +543 43 dataset """kinships""" +543 43 model """rescal""" +543 43 loss """nssa""" +543 43 regularizer """no""" +543 43 optimizer """adam""" +543 43 training_loop """owa""" +543 43 negative_sampler """basic""" +543 43 evaluator """rankbased""" +543 44 dataset """kinships""" +543 44 model """rescal""" +543 44 loss """nssa""" +543 44 regularizer """no""" +543 44 optimizer """adam""" +543 44 training_loop """owa""" +543 44 negative_sampler """basic""" +543 44 evaluator """rankbased""" +543 45 dataset """kinships""" +543 45 model """rescal""" +543 45 loss """nssa""" +543 45 regularizer """no""" +543 45 optimizer """adam""" +543 45 training_loop """owa""" +543 45 negative_sampler """basic""" +543 45 evaluator """rankbased""" +543 46 dataset """kinships""" +543 46 model """rescal""" +543 46 loss """nssa""" +543 46 regularizer """no""" +543 46 optimizer """adam""" +543 46 training_loop """owa""" +543 46 negative_sampler """basic""" +543 46 evaluator """rankbased""" +543 47 dataset """kinships""" +543 47 model """rescal""" +543 47 loss """nssa""" +543 47 regularizer """no""" +543 47 optimizer """adam""" +543 47 training_loop """owa""" +543 47 negative_sampler """basic""" +543 47 evaluator """rankbased""" +543 48 dataset """kinships""" +543 48 model """rescal""" +543 48 loss """nssa""" +543 48 regularizer """no""" +543 48 optimizer """adam""" +543 48 training_loop """owa""" +543 48 negative_sampler """basic""" +543 48 evaluator """rankbased""" +543 49 dataset """kinships""" +543 49 model """rescal""" +543 49 loss """nssa""" +543 49 regularizer """no""" +543 49 optimizer """adam""" +543 49 training_loop """owa""" +543 49 negative_sampler """basic""" +543 49 evaluator """rankbased""" +543 50 dataset """kinships""" +543 50 model """rescal""" +543 50 loss """nssa""" +543 50 regularizer """no""" +543 50 optimizer """adam""" +543 50 training_loop """owa""" +543 50 negative_sampler """basic""" +543 50 evaluator """rankbased""" +543 51 dataset """kinships""" +543 51 model """rescal""" +543 51 loss """nssa""" +543 51 regularizer """no""" +543 51 optimizer """adam""" +543 51 training_loop """owa""" +543 51 negative_sampler """basic""" +543 51 evaluator """rankbased""" +543 52 dataset """kinships""" +543 52 model """rescal""" +543 52 loss """nssa""" +543 52 regularizer """no""" +543 52 optimizer """adam""" +543 52 training_loop """owa""" +543 52 negative_sampler """basic""" +543 52 evaluator """rankbased""" +543 53 dataset """kinships""" +543 53 model """rescal""" +543 53 loss """nssa""" +543 53 regularizer """no""" +543 53 optimizer """adam""" +543 53 training_loop """owa""" +543 53 negative_sampler """basic""" +543 53 evaluator """rankbased""" +543 54 dataset """kinships""" +543 54 model """rescal""" +543 54 loss """nssa""" +543 54 regularizer """no""" +543 54 optimizer """adam""" +543 54 training_loop """owa""" +543 54 negative_sampler """basic""" +543 54 evaluator """rankbased""" +543 55 dataset """kinships""" +543 55 model """rescal""" +543 55 loss """nssa""" +543 55 regularizer """no""" +543 55 optimizer """adam""" +543 55 training_loop """owa""" +543 55 negative_sampler """basic""" +543 55 evaluator """rankbased""" +543 56 dataset """kinships""" +543 56 model """rescal""" +543 56 loss """nssa""" +543 56 regularizer """no""" +543 56 optimizer """adam""" +543 56 training_loop """owa""" +543 56 negative_sampler """basic""" +543 56 evaluator """rankbased""" +543 57 dataset """kinships""" +543 57 model """rescal""" +543 57 loss """nssa""" +543 57 regularizer """no""" +543 57 optimizer """adam""" +543 57 training_loop """owa""" +543 57 negative_sampler """basic""" +543 57 evaluator """rankbased""" +543 58 dataset """kinships""" +543 58 model """rescal""" +543 58 loss """nssa""" +543 58 regularizer """no""" +543 58 optimizer """adam""" +543 58 training_loop """owa""" +543 58 negative_sampler """basic""" +543 58 evaluator """rankbased""" +543 59 dataset """kinships""" +543 59 model """rescal""" +543 59 loss """nssa""" +543 59 regularizer """no""" +543 59 optimizer """adam""" +543 59 training_loop """owa""" +543 59 negative_sampler """basic""" +543 59 evaluator """rankbased""" +543 60 dataset """kinships""" +543 60 model """rescal""" +543 60 loss """nssa""" +543 60 regularizer """no""" +543 60 optimizer """adam""" +543 60 training_loop """owa""" +543 60 negative_sampler """basic""" +543 60 evaluator """rankbased""" +543 61 dataset """kinships""" +543 61 model """rescal""" +543 61 loss """nssa""" +543 61 regularizer """no""" +543 61 optimizer """adam""" +543 61 training_loop """owa""" +543 61 negative_sampler """basic""" +543 61 evaluator """rankbased""" +543 62 dataset """kinships""" +543 62 model """rescal""" +543 62 loss """nssa""" +543 62 regularizer """no""" +543 62 optimizer """adam""" +543 62 training_loop """owa""" +543 62 negative_sampler """basic""" +543 62 evaluator """rankbased""" +543 63 dataset """kinships""" +543 63 model """rescal""" +543 63 loss """nssa""" +543 63 regularizer """no""" +543 63 optimizer """adam""" +543 63 training_loop """owa""" +543 63 negative_sampler """basic""" +543 63 evaluator """rankbased""" +543 64 dataset """kinships""" +543 64 model """rescal""" +543 64 loss """nssa""" +543 64 regularizer """no""" +543 64 optimizer """adam""" +543 64 training_loop """owa""" +543 64 negative_sampler """basic""" +543 64 evaluator """rankbased""" +543 65 dataset """kinships""" +543 65 model """rescal""" +543 65 loss """nssa""" +543 65 regularizer """no""" +543 65 optimizer """adam""" +543 65 training_loop """owa""" +543 65 negative_sampler """basic""" +543 65 evaluator """rankbased""" +543 66 dataset """kinships""" +543 66 model """rescal""" +543 66 loss """nssa""" +543 66 regularizer """no""" +543 66 optimizer """adam""" +543 66 training_loop """owa""" +543 66 negative_sampler """basic""" +543 66 evaluator """rankbased""" +543 67 dataset """kinships""" +543 67 model """rescal""" +543 67 loss """nssa""" +543 67 regularizer """no""" +543 67 optimizer """adam""" +543 67 training_loop """owa""" +543 67 negative_sampler """basic""" +543 67 evaluator """rankbased""" +543 68 dataset """kinships""" +543 68 model """rescal""" +543 68 loss """nssa""" +543 68 regularizer """no""" +543 68 optimizer """adam""" +543 68 training_loop """owa""" +543 68 negative_sampler """basic""" +543 68 evaluator """rankbased""" +543 69 dataset """kinships""" +543 69 model """rescal""" +543 69 loss """nssa""" +543 69 regularizer """no""" +543 69 optimizer """adam""" +543 69 training_loop """owa""" +543 69 negative_sampler """basic""" +543 69 evaluator """rankbased""" +543 70 dataset """kinships""" +543 70 model """rescal""" +543 70 loss """nssa""" +543 70 regularizer """no""" +543 70 optimizer """adam""" +543 70 training_loop """owa""" +543 70 negative_sampler """basic""" +543 70 evaluator """rankbased""" +543 71 dataset """kinships""" +543 71 model """rescal""" +543 71 loss """nssa""" +543 71 regularizer """no""" +543 71 optimizer """adam""" +543 71 training_loop """owa""" +543 71 negative_sampler """basic""" +543 71 evaluator """rankbased""" +543 72 dataset """kinships""" +543 72 model """rescal""" +543 72 loss """nssa""" +543 72 regularizer """no""" +543 72 optimizer """adam""" +543 72 training_loop """owa""" +543 72 negative_sampler """basic""" +543 72 evaluator """rankbased""" +543 73 dataset """kinships""" +543 73 model """rescal""" +543 73 loss """nssa""" +543 73 regularizer """no""" +543 73 optimizer """adam""" +543 73 training_loop """owa""" +543 73 negative_sampler """basic""" +543 73 evaluator """rankbased""" +543 74 dataset """kinships""" +543 74 model """rescal""" +543 74 loss """nssa""" +543 74 regularizer """no""" +543 74 optimizer """adam""" +543 74 training_loop """owa""" +543 74 negative_sampler """basic""" +543 74 evaluator """rankbased""" +543 75 dataset """kinships""" +543 75 model """rescal""" +543 75 loss """nssa""" +543 75 regularizer """no""" +543 75 optimizer """adam""" +543 75 training_loop """owa""" +543 75 negative_sampler """basic""" +543 75 evaluator """rankbased""" +543 76 dataset """kinships""" +543 76 model """rescal""" +543 76 loss """nssa""" +543 76 regularizer """no""" +543 76 optimizer """adam""" +543 76 training_loop """owa""" +543 76 negative_sampler """basic""" +543 76 evaluator """rankbased""" +543 77 dataset """kinships""" +543 77 model """rescal""" +543 77 loss """nssa""" +543 77 regularizer """no""" +543 77 optimizer """adam""" +543 77 training_loop """owa""" +543 77 negative_sampler """basic""" +543 77 evaluator """rankbased""" +543 78 dataset """kinships""" +543 78 model """rescal""" +543 78 loss """nssa""" +543 78 regularizer """no""" +543 78 optimizer """adam""" +543 78 training_loop """owa""" +543 78 negative_sampler """basic""" +543 78 evaluator """rankbased""" +543 79 dataset """kinships""" +543 79 model """rescal""" +543 79 loss """nssa""" +543 79 regularizer """no""" +543 79 optimizer """adam""" +543 79 training_loop """owa""" +543 79 negative_sampler """basic""" +543 79 evaluator """rankbased""" +543 80 dataset """kinships""" +543 80 model """rescal""" +543 80 loss """nssa""" +543 80 regularizer """no""" +543 80 optimizer """adam""" +543 80 training_loop """owa""" +543 80 negative_sampler """basic""" +543 80 evaluator """rankbased""" +543 81 dataset """kinships""" +543 81 model """rescal""" +543 81 loss """nssa""" +543 81 regularizer """no""" +543 81 optimizer """adam""" +543 81 training_loop """owa""" +543 81 negative_sampler """basic""" +543 81 evaluator """rankbased""" +543 82 dataset """kinships""" +543 82 model """rescal""" +543 82 loss """nssa""" +543 82 regularizer """no""" +543 82 optimizer """adam""" +543 82 training_loop """owa""" +543 82 negative_sampler """basic""" +543 82 evaluator """rankbased""" +543 83 dataset """kinships""" +543 83 model """rescal""" +543 83 loss """nssa""" +543 83 regularizer """no""" +543 83 optimizer """adam""" +543 83 training_loop """owa""" +543 83 negative_sampler """basic""" +543 83 evaluator """rankbased""" +543 84 dataset """kinships""" +543 84 model """rescal""" +543 84 loss """nssa""" +543 84 regularizer """no""" +543 84 optimizer """adam""" +543 84 training_loop """owa""" +543 84 negative_sampler """basic""" +543 84 evaluator """rankbased""" +543 85 dataset """kinships""" +543 85 model """rescal""" +543 85 loss """nssa""" +543 85 regularizer """no""" +543 85 optimizer """adam""" +543 85 training_loop """owa""" +543 85 negative_sampler """basic""" +543 85 evaluator """rankbased""" +543 86 dataset """kinships""" +543 86 model """rescal""" +543 86 loss """nssa""" +543 86 regularizer """no""" +543 86 optimizer """adam""" +543 86 training_loop """owa""" +543 86 negative_sampler """basic""" +543 86 evaluator """rankbased""" +543 87 dataset """kinships""" +543 87 model """rescal""" +543 87 loss """nssa""" +543 87 regularizer """no""" +543 87 optimizer """adam""" +543 87 training_loop """owa""" +543 87 negative_sampler """basic""" +543 87 evaluator """rankbased""" +543 88 dataset """kinships""" +543 88 model """rescal""" +543 88 loss """nssa""" +543 88 regularizer """no""" +543 88 optimizer """adam""" +543 88 training_loop """owa""" +543 88 negative_sampler """basic""" +543 88 evaluator """rankbased""" +543 89 dataset """kinships""" +543 89 model """rescal""" +543 89 loss """nssa""" +543 89 regularizer """no""" +543 89 optimizer """adam""" +543 89 training_loop """owa""" +543 89 negative_sampler """basic""" +543 89 evaluator """rankbased""" +543 90 dataset """kinships""" +543 90 model """rescal""" +543 90 loss """nssa""" +543 90 regularizer """no""" +543 90 optimizer """adam""" +543 90 training_loop """owa""" +543 90 negative_sampler """basic""" +543 90 evaluator """rankbased""" +543 91 dataset """kinships""" +543 91 model """rescal""" +543 91 loss """nssa""" +543 91 regularizer """no""" +543 91 optimizer """adam""" +543 91 training_loop """owa""" +543 91 negative_sampler """basic""" +543 91 evaluator """rankbased""" +543 92 dataset """kinships""" +543 92 model """rescal""" +543 92 loss """nssa""" +543 92 regularizer """no""" +543 92 optimizer """adam""" +543 92 training_loop """owa""" +543 92 negative_sampler """basic""" +543 92 evaluator """rankbased""" +543 93 dataset """kinships""" +543 93 model """rescal""" +543 93 loss """nssa""" +543 93 regularizer """no""" +543 93 optimizer """adam""" +543 93 training_loop """owa""" +543 93 negative_sampler """basic""" +543 93 evaluator """rankbased""" +543 94 dataset """kinships""" +543 94 model """rescal""" +543 94 loss """nssa""" +543 94 regularizer """no""" +543 94 optimizer """adam""" +543 94 training_loop """owa""" +543 94 negative_sampler """basic""" +543 94 evaluator """rankbased""" +543 95 dataset """kinships""" +543 95 model """rescal""" +543 95 loss """nssa""" +543 95 regularizer """no""" +543 95 optimizer """adam""" +543 95 training_loop """owa""" +543 95 negative_sampler """basic""" +543 95 evaluator """rankbased""" +543 96 dataset """kinships""" +543 96 model """rescal""" +543 96 loss """nssa""" +543 96 regularizer """no""" +543 96 optimizer """adam""" +543 96 training_loop """owa""" +543 96 negative_sampler """basic""" +543 96 evaluator """rankbased""" +543 97 dataset """kinships""" +543 97 model """rescal""" +543 97 loss """nssa""" +543 97 regularizer """no""" +543 97 optimizer """adam""" +543 97 training_loop """owa""" +543 97 negative_sampler """basic""" +543 97 evaluator """rankbased""" +543 98 dataset """kinships""" +543 98 model """rescal""" +543 98 loss """nssa""" +543 98 regularizer """no""" +543 98 optimizer """adam""" +543 98 training_loop """owa""" +543 98 negative_sampler """basic""" +543 98 evaluator """rankbased""" +543 99 dataset """kinships""" +543 99 model """rescal""" +543 99 loss """nssa""" +543 99 regularizer """no""" +543 99 optimizer """adam""" +543 99 training_loop """owa""" +543 99 negative_sampler """basic""" +543 99 evaluator """rankbased""" +543 100 dataset """kinships""" +543 100 model """rescal""" +543 100 loss """nssa""" +543 100 regularizer """no""" +543 100 optimizer """adam""" +543 100 training_loop """owa""" +543 100 negative_sampler """basic""" +543 100 evaluator """rankbased""" +544 1 model.embedding_dim 2.0 +544 1 loss.margin 6.828260101733695 +544 1 loss.adversarial_temperature 0.6179974032677051 +544 1 optimizer.lr 0.013723746884163404 +544 1 negative_sampler.num_negs_per_pos 8.0 +544 1 training.batch_size 1.0 +544 2 model.embedding_dim 2.0 +544 2 loss.margin 4.023815821133738 +544 2 loss.adversarial_temperature 0.5810748324715411 +544 2 optimizer.lr 0.04462748571230862 +544 2 negative_sampler.num_negs_per_pos 73.0 +544 2 training.batch_size 2.0 +544 3 model.embedding_dim 2.0 +544 3 loss.margin 29.87786838034609 +544 3 loss.adversarial_temperature 0.10887550968318825 +544 3 optimizer.lr 0.009654567422069962 +544 3 negative_sampler.num_negs_per_pos 56.0 +544 3 training.batch_size 1.0 +544 4 model.embedding_dim 2.0 +544 4 loss.margin 7.922795933741249 +544 4 loss.adversarial_temperature 0.8085038216657894 +544 4 optimizer.lr 0.06364548665047007 +544 4 negative_sampler.num_negs_per_pos 79.0 +544 4 training.batch_size 1.0 +544 5 model.embedding_dim 1.0 +544 5 loss.margin 4.219210071108711 +544 5 loss.adversarial_temperature 0.1500834227884988 +544 5 optimizer.lr 0.0026643465182844255 +544 5 negative_sampler.num_negs_per_pos 10.0 +544 5 training.batch_size 2.0 +544 6 model.embedding_dim 0.0 +544 6 loss.margin 27.507518338032042 +544 6 loss.adversarial_temperature 0.27781007784565437 +544 6 optimizer.lr 0.039187576540246655 +544 6 negative_sampler.num_negs_per_pos 12.0 +544 6 training.batch_size 2.0 +544 7 model.embedding_dim 0.0 +544 7 loss.margin 20.599392700868613 +544 7 loss.adversarial_temperature 0.25414478567934623 +544 7 optimizer.lr 0.0013527954971113202 +544 7 negative_sampler.num_negs_per_pos 18.0 +544 7 training.batch_size 2.0 +544 8 model.embedding_dim 2.0 +544 8 loss.margin 21.93680378984619 +544 8 loss.adversarial_temperature 0.327126243776397 +544 8 optimizer.lr 0.01611270518023362 +544 8 negative_sampler.num_negs_per_pos 14.0 +544 8 training.batch_size 2.0 +544 9 model.embedding_dim 1.0 +544 9 loss.margin 10.985492641907898 +544 9 loss.adversarial_temperature 0.3829087940337269 +544 9 optimizer.lr 0.002297980372399145 +544 9 negative_sampler.num_negs_per_pos 52.0 +544 9 training.batch_size 0.0 +544 10 model.embedding_dim 1.0 +544 10 loss.margin 27.42561425872366 +544 10 loss.adversarial_temperature 0.38652734412844814 +544 10 optimizer.lr 0.01442664447621828 +544 10 negative_sampler.num_negs_per_pos 86.0 +544 10 training.batch_size 1.0 +544 11 model.embedding_dim 1.0 +544 11 loss.margin 10.241054889665406 +544 11 loss.adversarial_temperature 0.19988753572481022 +544 11 optimizer.lr 0.03481314626155785 +544 11 negative_sampler.num_negs_per_pos 14.0 +544 11 training.batch_size 2.0 +544 12 model.embedding_dim 0.0 +544 12 loss.margin 22.313467298283367 +544 12 loss.adversarial_temperature 0.6375545199451632 +544 12 optimizer.lr 0.03883792199629253 +544 12 negative_sampler.num_negs_per_pos 42.0 +544 12 training.batch_size 1.0 +544 13 model.embedding_dim 0.0 +544 13 loss.margin 19.554461436282047 +544 13 loss.adversarial_temperature 0.6684608335710889 +544 13 optimizer.lr 0.0021747775925116735 +544 13 negative_sampler.num_negs_per_pos 63.0 +544 13 training.batch_size 1.0 +544 14 model.embedding_dim 0.0 +544 14 loss.margin 24.88369450163798 +544 14 loss.adversarial_temperature 0.2973072138261964 +544 14 optimizer.lr 0.003532198233430629 +544 14 negative_sampler.num_negs_per_pos 41.0 +544 14 training.batch_size 0.0 +544 15 model.embedding_dim 2.0 +544 15 loss.margin 14.555825283333597 +544 15 loss.adversarial_temperature 0.507310033698556 +544 15 optimizer.lr 0.025241708502093427 +544 15 negative_sampler.num_negs_per_pos 51.0 +544 15 training.batch_size 0.0 +544 16 model.embedding_dim 0.0 +544 16 loss.margin 29.382966454395145 +544 16 loss.adversarial_temperature 0.7437880710102546 +544 16 optimizer.lr 0.005742175002363058 +544 16 negative_sampler.num_negs_per_pos 51.0 +544 16 training.batch_size 2.0 +544 17 model.embedding_dim 2.0 +544 17 loss.margin 3.7329708849460372 +544 17 loss.adversarial_temperature 0.2872932088267115 +544 17 optimizer.lr 0.01107824223844047 +544 17 negative_sampler.num_negs_per_pos 39.0 +544 17 training.batch_size 0.0 +544 18 model.embedding_dim 2.0 +544 18 loss.margin 21.275190483688636 +544 18 loss.adversarial_temperature 0.5084731501837302 +544 18 optimizer.lr 0.0054609864635877005 +544 18 negative_sampler.num_negs_per_pos 38.0 +544 18 training.batch_size 2.0 +544 19 model.embedding_dim 0.0 +544 19 loss.margin 19.123392207876016 +544 19 loss.adversarial_temperature 0.9908900403679254 +544 19 optimizer.lr 0.005510579942773596 +544 19 negative_sampler.num_negs_per_pos 44.0 +544 19 training.batch_size 2.0 +544 20 model.embedding_dim 1.0 +544 20 loss.margin 27.834554898060905 +544 20 loss.adversarial_temperature 0.27625612703446506 +544 20 optimizer.lr 0.005254245132791764 +544 20 negative_sampler.num_negs_per_pos 44.0 +544 20 training.batch_size 1.0 +544 21 model.embedding_dim 0.0 +544 21 loss.margin 6.840740630065309 +544 21 loss.adversarial_temperature 0.6619674803347428 +544 21 optimizer.lr 0.006376635117026657 +544 21 negative_sampler.num_negs_per_pos 13.0 +544 21 training.batch_size 0.0 +544 22 model.embedding_dim 0.0 +544 22 loss.margin 23.968124171789448 +544 22 loss.adversarial_temperature 0.7354788969132543 +544 22 optimizer.lr 0.06791313904147783 +544 22 negative_sampler.num_negs_per_pos 91.0 +544 22 training.batch_size 1.0 +544 23 model.embedding_dim 1.0 +544 23 loss.margin 22.118106484872758 +544 23 loss.adversarial_temperature 0.5952609655930141 +544 23 optimizer.lr 0.0022329790860936938 +544 23 negative_sampler.num_negs_per_pos 79.0 +544 23 training.batch_size 2.0 +544 24 model.embedding_dim 2.0 +544 24 loss.margin 20.15793888639194 +544 24 loss.adversarial_temperature 0.8182422119914706 +544 24 optimizer.lr 0.019364986718954206 +544 24 negative_sampler.num_negs_per_pos 48.0 +544 24 training.batch_size 2.0 +544 25 model.embedding_dim 1.0 +544 25 loss.margin 26.435232578282257 +544 25 loss.adversarial_temperature 0.26949046406889476 +544 25 optimizer.lr 0.007547574169417477 +544 25 negative_sampler.num_negs_per_pos 40.0 +544 25 training.batch_size 1.0 +544 26 model.embedding_dim 0.0 +544 26 loss.margin 26.249497873223852 +544 26 loss.adversarial_temperature 0.8106052265543213 +544 26 optimizer.lr 0.0016903248248373254 +544 26 negative_sampler.num_negs_per_pos 75.0 +544 26 training.batch_size 0.0 +544 27 model.embedding_dim 1.0 +544 27 loss.margin 1.6170057569978076 +544 27 loss.adversarial_temperature 0.3806611772561528 +544 27 optimizer.lr 0.0015376656967158373 +544 27 negative_sampler.num_negs_per_pos 93.0 +544 27 training.batch_size 0.0 +544 28 model.embedding_dim 1.0 +544 28 loss.margin 10.904297633035762 +544 28 loss.adversarial_temperature 0.9046412312550621 +544 28 optimizer.lr 0.005307156251353373 +544 28 negative_sampler.num_negs_per_pos 24.0 +544 28 training.batch_size 0.0 +544 29 model.embedding_dim 2.0 +544 29 loss.margin 16.233544913119907 +544 29 loss.adversarial_temperature 0.14532861575065836 +544 29 optimizer.lr 0.06342888302624487 +544 29 negative_sampler.num_negs_per_pos 10.0 +544 29 training.batch_size 1.0 +544 30 model.embedding_dim 0.0 +544 30 loss.margin 24.898258942577428 +544 30 loss.adversarial_temperature 0.6055735802085285 +544 30 optimizer.lr 0.040764240561410356 +544 30 negative_sampler.num_negs_per_pos 16.0 +544 30 training.batch_size 2.0 +544 31 model.embedding_dim 1.0 +544 31 loss.margin 8.90607635505296 +544 31 loss.adversarial_temperature 0.6394673131542027 +544 31 optimizer.lr 0.004248863432366676 +544 31 negative_sampler.num_negs_per_pos 84.0 +544 31 training.batch_size 1.0 +544 32 model.embedding_dim 1.0 +544 32 loss.margin 15.634632424992027 +544 32 loss.adversarial_temperature 0.7144764361815571 +544 32 optimizer.lr 0.0031450133509900286 +544 32 negative_sampler.num_negs_per_pos 23.0 +544 32 training.batch_size 2.0 +544 33 model.embedding_dim 0.0 +544 33 loss.margin 12.190753092270077 +544 33 loss.adversarial_temperature 0.4721197442864723 +544 33 optimizer.lr 0.003154405894699205 +544 33 negative_sampler.num_negs_per_pos 25.0 +544 33 training.batch_size 0.0 +544 34 model.embedding_dim 2.0 +544 34 loss.margin 26.60347091046891 +544 34 loss.adversarial_temperature 0.8731773659584406 +544 34 optimizer.lr 0.020762435438209523 +544 34 negative_sampler.num_negs_per_pos 49.0 +544 34 training.batch_size 2.0 +544 35 model.embedding_dim 2.0 +544 35 loss.margin 5.35559905620452 +544 35 loss.adversarial_temperature 0.6179793052795252 +544 35 optimizer.lr 0.007825218281990481 +544 35 negative_sampler.num_negs_per_pos 34.0 +544 35 training.batch_size 1.0 +544 36 model.embedding_dim 0.0 +544 36 loss.margin 14.47408361083165 +544 36 loss.adversarial_temperature 0.35968958822964825 +544 36 optimizer.lr 0.007882539269164126 +544 36 negative_sampler.num_negs_per_pos 67.0 +544 36 training.batch_size 0.0 +544 37 model.embedding_dim 0.0 +544 37 loss.margin 12.956118596730159 +544 37 loss.adversarial_temperature 0.4570251884008766 +544 37 optimizer.lr 0.0123875722286274 +544 37 negative_sampler.num_negs_per_pos 73.0 +544 37 training.batch_size 2.0 +544 38 model.embedding_dim 0.0 +544 38 loss.margin 28.334981681803725 +544 38 loss.adversarial_temperature 0.663107031641124 +544 38 optimizer.lr 0.006085231170271953 +544 38 negative_sampler.num_negs_per_pos 85.0 +544 38 training.batch_size 2.0 +544 39 model.embedding_dim 1.0 +544 39 loss.margin 27.433055202327516 +544 39 loss.adversarial_temperature 0.16918848015306584 +544 39 optimizer.lr 0.006446863039231107 +544 39 negative_sampler.num_negs_per_pos 61.0 +544 39 training.batch_size 2.0 +544 40 model.embedding_dim 2.0 +544 40 loss.margin 4.3882587062989895 +544 40 loss.adversarial_temperature 0.44731695984208464 +544 40 optimizer.lr 0.004729951108691383 +544 40 negative_sampler.num_negs_per_pos 48.0 +544 40 training.batch_size 0.0 +544 41 model.embedding_dim 1.0 +544 41 loss.margin 22.591195547559295 +544 41 loss.adversarial_temperature 0.578664167753634 +544 41 optimizer.lr 0.002811251238948174 +544 41 negative_sampler.num_negs_per_pos 38.0 +544 41 training.batch_size 1.0 +544 42 model.embedding_dim 1.0 +544 42 loss.margin 23.11266219222637 +544 42 loss.adversarial_temperature 0.4906146007311669 +544 42 optimizer.lr 0.004146241126377937 +544 42 negative_sampler.num_negs_per_pos 5.0 +544 42 training.batch_size 0.0 +544 43 model.embedding_dim 0.0 +544 43 loss.margin 28.17586645746021 +544 43 loss.adversarial_temperature 0.8624628112148635 +544 43 optimizer.lr 0.002249236548190741 +544 43 negative_sampler.num_negs_per_pos 96.0 +544 43 training.batch_size 1.0 +544 44 model.embedding_dim 2.0 +544 44 loss.margin 23.31912056542762 +544 44 loss.adversarial_temperature 0.22782624932999834 +544 44 optimizer.lr 0.0011923516028557704 +544 44 negative_sampler.num_negs_per_pos 60.0 +544 44 training.batch_size 1.0 +544 45 model.embedding_dim 0.0 +544 45 loss.margin 6.735054360826905 +544 45 loss.adversarial_temperature 0.4166077284792292 +544 45 optimizer.lr 0.020351226814585142 +544 45 negative_sampler.num_negs_per_pos 42.0 +544 45 training.batch_size 2.0 +544 46 model.embedding_dim 1.0 +544 46 loss.margin 7.137227160648645 +544 46 loss.adversarial_temperature 0.6934704819304937 +544 46 optimizer.lr 0.0028001621391260476 +544 46 negative_sampler.num_negs_per_pos 0.0 +544 46 training.batch_size 0.0 +544 47 model.embedding_dim 1.0 +544 47 loss.margin 25.055566189831072 +544 47 loss.adversarial_temperature 0.36313753033576424 +544 47 optimizer.lr 0.08905736429740532 +544 47 negative_sampler.num_negs_per_pos 99.0 +544 47 training.batch_size 1.0 +544 48 model.embedding_dim 0.0 +544 48 loss.margin 11.78052265528462 +544 48 loss.adversarial_temperature 0.13744148380138785 +544 48 optimizer.lr 0.012071982609584751 +544 48 negative_sampler.num_negs_per_pos 80.0 +544 48 training.batch_size 2.0 +544 49 model.embedding_dim 0.0 +544 49 loss.margin 28.074300953422572 +544 49 loss.adversarial_temperature 0.6851399396892334 +544 49 optimizer.lr 0.003193444014546305 +544 49 negative_sampler.num_negs_per_pos 58.0 +544 49 training.batch_size 1.0 +544 50 model.embedding_dim 1.0 +544 50 loss.margin 21.832976866245403 +544 50 loss.adversarial_temperature 0.982852902431136 +544 50 optimizer.lr 0.0014206512737600396 +544 50 negative_sampler.num_negs_per_pos 25.0 +544 50 training.batch_size 0.0 +544 51 model.embedding_dim 2.0 +544 51 loss.margin 24.905802845323876 +544 51 loss.adversarial_temperature 0.847616596090181 +544 51 optimizer.lr 0.001015589068288211 +544 51 negative_sampler.num_negs_per_pos 2.0 +544 51 training.batch_size 2.0 +544 52 model.embedding_dim 0.0 +544 52 loss.margin 3.650739919310019 +544 52 loss.adversarial_temperature 0.617525722427088 +544 52 optimizer.lr 0.0018076165806615236 +544 52 negative_sampler.num_negs_per_pos 87.0 +544 52 training.batch_size 2.0 +544 53 model.embedding_dim 0.0 +544 53 loss.margin 13.17530729025064 +544 53 loss.adversarial_temperature 0.600087819644733 +544 53 optimizer.lr 0.019170600085316545 +544 53 negative_sampler.num_negs_per_pos 17.0 +544 53 training.batch_size 1.0 +544 54 model.embedding_dim 1.0 +544 54 loss.margin 5.801312222748327 +544 54 loss.adversarial_temperature 0.29934166692353414 +544 54 optimizer.lr 0.026628676429118987 +544 54 negative_sampler.num_negs_per_pos 85.0 +544 54 training.batch_size 0.0 +544 55 model.embedding_dim 1.0 +544 55 loss.margin 8.797131597824379 +544 55 loss.adversarial_temperature 0.3234197960450884 +544 55 optimizer.lr 0.005197520809039942 +544 55 negative_sampler.num_negs_per_pos 42.0 +544 55 training.batch_size 2.0 +544 56 model.embedding_dim 1.0 +544 56 loss.margin 25.45518790870764 +544 56 loss.adversarial_temperature 0.7771660824493546 +544 56 optimizer.lr 0.005956896092584641 +544 56 negative_sampler.num_negs_per_pos 83.0 +544 56 training.batch_size 2.0 +544 57 model.embedding_dim 0.0 +544 57 loss.margin 28.24006567561302 +544 57 loss.adversarial_temperature 0.672272836070707 +544 57 optimizer.lr 0.007701954587838511 +544 57 negative_sampler.num_negs_per_pos 75.0 +544 57 training.batch_size 2.0 +544 58 model.embedding_dim 2.0 +544 58 loss.margin 24.516904730302702 +544 58 loss.adversarial_temperature 0.9842057996450451 +544 58 optimizer.lr 0.0010474120279889958 +544 58 negative_sampler.num_negs_per_pos 75.0 +544 58 training.batch_size 2.0 +544 59 model.embedding_dim 0.0 +544 59 loss.margin 17.61120581438318 +544 59 loss.adversarial_temperature 0.19483810072627372 +544 59 optimizer.lr 0.06480033540491775 +544 59 negative_sampler.num_negs_per_pos 81.0 +544 59 training.batch_size 2.0 +544 60 model.embedding_dim 2.0 +544 60 loss.margin 22.840520188125197 +544 60 loss.adversarial_temperature 0.26238290719186597 +544 60 optimizer.lr 0.09056397423575954 +544 60 negative_sampler.num_negs_per_pos 65.0 +544 60 training.batch_size 1.0 +544 61 model.embedding_dim 0.0 +544 61 loss.margin 25.468819994949587 +544 61 loss.adversarial_temperature 0.8861096127868968 +544 61 optimizer.lr 0.09108831359121965 +544 61 negative_sampler.num_negs_per_pos 20.0 +544 61 training.batch_size 2.0 +544 62 model.embedding_dim 2.0 +544 62 loss.margin 25.885146782632585 +544 62 loss.adversarial_temperature 0.8471541894834191 +544 62 optimizer.lr 0.002734262373457065 +544 62 negative_sampler.num_negs_per_pos 33.0 +544 62 training.batch_size 2.0 +544 63 model.embedding_dim 1.0 +544 63 loss.margin 12.526169837923872 +544 63 loss.adversarial_temperature 0.7600964065886019 +544 63 optimizer.lr 0.0019396920134405978 +544 63 negative_sampler.num_negs_per_pos 84.0 +544 63 training.batch_size 2.0 +544 64 model.embedding_dim 1.0 +544 64 loss.margin 29.891438943288023 +544 64 loss.adversarial_temperature 0.26381108809923753 +544 64 optimizer.lr 0.024102713650652526 +544 64 negative_sampler.num_negs_per_pos 79.0 +544 64 training.batch_size 2.0 +544 65 model.embedding_dim 2.0 +544 65 loss.margin 23.62840840327817 +544 65 loss.adversarial_temperature 0.7251255487398867 +544 65 optimizer.lr 0.007154088378448037 +544 65 negative_sampler.num_negs_per_pos 7.0 +544 65 training.batch_size 1.0 +544 66 model.embedding_dim 1.0 +544 66 loss.margin 14.966119480941572 +544 66 loss.adversarial_temperature 0.6121607232782555 +544 66 optimizer.lr 0.005808754813274886 +544 66 negative_sampler.num_negs_per_pos 63.0 +544 66 training.batch_size 1.0 +544 67 model.embedding_dim 0.0 +544 67 loss.margin 2.062542153998292 +544 67 loss.adversarial_temperature 0.7095800867388427 +544 67 optimizer.lr 0.010407613983417576 +544 67 negative_sampler.num_negs_per_pos 95.0 +544 67 training.batch_size 1.0 +544 68 model.embedding_dim 1.0 +544 68 loss.margin 21.690784732167895 +544 68 loss.adversarial_temperature 0.7888816187618128 +544 68 optimizer.lr 0.04434202965873242 +544 68 negative_sampler.num_negs_per_pos 80.0 +544 68 training.batch_size 1.0 +544 69 model.embedding_dim 2.0 +544 69 loss.margin 2.2999761408024066 +544 69 loss.adversarial_temperature 0.20803645756707828 +544 69 optimizer.lr 0.034762572731578324 +544 69 negative_sampler.num_negs_per_pos 0.0 +544 69 training.batch_size 0.0 +544 70 model.embedding_dim 1.0 +544 70 loss.margin 3.779555522894925 +544 70 loss.adversarial_temperature 0.12133965471554689 +544 70 optimizer.lr 0.00724027481299983 +544 70 negative_sampler.num_negs_per_pos 7.0 +544 70 training.batch_size 2.0 +544 71 model.embedding_dim 2.0 +544 71 loss.margin 5.249844366615157 +544 71 loss.adversarial_temperature 0.9067217448275474 +544 71 optimizer.lr 0.0422605530017407 +544 71 negative_sampler.num_negs_per_pos 37.0 +544 71 training.batch_size 0.0 +544 72 model.embedding_dim 1.0 +544 72 loss.margin 19.264153196876137 +544 72 loss.adversarial_temperature 0.4344381153521396 +544 72 optimizer.lr 0.039927278893750334 +544 72 negative_sampler.num_negs_per_pos 59.0 +544 72 training.batch_size 1.0 +544 73 model.embedding_dim 2.0 +544 73 loss.margin 11.733478761674045 +544 73 loss.adversarial_temperature 0.7788416113934425 +544 73 optimizer.lr 0.08624893228356924 +544 73 negative_sampler.num_negs_per_pos 15.0 +544 73 training.batch_size 0.0 +544 74 model.embedding_dim 1.0 +544 74 loss.margin 28.09753689152201 +544 74 loss.adversarial_temperature 0.9844643759738982 +544 74 optimizer.lr 0.014755102002163317 +544 74 negative_sampler.num_negs_per_pos 91.0 +544 74 training.batch_size 0.0 +544 75 model.embedding_dim 0.0 +544 75 loss.margin 26.12100942195825 +544 75 loss.adversarial_temperature 0.6601073509812807 +544 75 optimizer.lr 0.0011693995262701908 +544 75 negative_sampler.num_negs_per_pos 51.0 +544 75 training.batch_size 0.0 +544 76 model.embedding_dim 1.0 +544 76 loss.margin 11.343747575491099 +544 76 loss.adversarial_temperature 0.4814377939380644 +544 76 optimizer.lr 0.014487459175794103 +544 76 negative_sampler.num_negs_per_pos 57.0 +544 76 training.batch_size 1.0 +544 77 model.embedding_dim 0.0 +544 77 loss.margin 6.102269836784796 +544 77 loss.adversarial_temperature 0.6315051301534732 +544 77 optimizer.lr 0.0011105434035302625 +544 77 negative_sampler.num_negs_per_pos 99.0 +544 77 training.batch_size 1.0 +544 78 model.embedding_dim 1.0 +544 78 loss.margin 23.07092684895712 +544 78 loss.adversarial_temperature 0.16102136251069765 +544 78 optimizer.lr 0.04189819096097556 +544 78 negative_sampler.num_negs_per_pos 0.0 +544 78 training.batch_size 1.0 +544 79 model.embedding_dim 0.0 +544 79 loss.margin 29.015580043843755 +544 79 loss.adversarial_temperature 0.3975597750327555 +544 79 optimizer.lr 0.00848782175985996 +544 79 negative_sampler.num_negs_per_pos 30.0 +544 79 training.batch_size 1.0 +544 80 model.embedding_dim 2.0 +544 80 loss.margin 29.491066045873477 +544 80 loss.adversarial_temperature 0.6911043059666079 +544 80 optimizer.lr 0.001163076068911271 +544 80 negative_sampler.num_negs_per_pos 38.0 +544 80 training.batch_size 2.0 +544 81 model.embedding_dim 2.0 +544 81 loss.margin 4.316502662819954 +544 81 loss.adversarial_temperature 0.23523742042036241 +544 81 optimizer.lr 0.033676162083524024 +544 81 negative_sampler.num_negs_per_pos 56.0 +544 81 training.batch_size 1.0 +544 82 model.embedding_dim 0.0 +544 82 loss.margin 25.29747343274834 +544 82 loss.adversarial_temperature 0.9893291851105095 +544 82 optimizer.lr 0.09188847050490825 +544 82 negative_sampler.num_negs_per_pos 55.0 +544 82 training.batch_size 0.0 +544 83 model.embedding_dim 2.0 +544 83 loss.margin 13.051324558186037 +544 83 loss.adversarial_temperature 0.459014219883495 +544 83 optimizer.lr 0.002396095115629638 +544 83 negative_sampler.num_negs_per_pos 58.0 +544 83 training.batch_size 0.0 +544 84 model.embedding_dim 0.0 +544 84 loss.margin 2.6190492340113654 +544 84 loss.adversarial_temperature 0.5168813547446237 +544 84 optimizer.lr 0.009154812706299099 +544 84 negative_sampler.num_negs_per_pos 66.0 +544 84 training.batch_size 1.0 +544 85 model.embedding_dim 1.0 +544 85 loss.margin 13.927216198625182 +544 85 loss.adversarial_temperature 0.10133444007576448 +544 85 optimizer.lr 0.011304650126385518 +544 85 negative_sampler.num_negs_per_pos 71.0 +544 85 training.batch_size 1.0 +544 86 model.embedding_dim 1.0 +544 86 loss.margin 1.8313146117051775 +544 86 loss.adversarial_temperature 0.7214649923716243 +544 86 optimizer.lr 0.0031963568680640976 +544 86 negative_sampler.num_negs_per_pos 50.0 +544 86 training.batch_size 2.0 +544 87 model.embedding_dim 2.0 +544 87 loss.margin 20.03578666833599 +544 87 loss.adversarial_temperature 0.8175785143574964 +544 87 optimizer.lr 0.0016609736308802975 +544 87 negative_sampler.num_negs_per_pos 45.0 +544 87 training.batch_size 0.0 +544 88 model.embedding_dim 1.0 +544 88 loss.margin 13.647600837912607 +544 88 loss.adversarial_temperature 0.5986100356712032 +544 88 optimizer.lr 0.015067037545264578 +544 88 negative_sampler.num_negs_per_pos 12.0 +544 88 training.batch_size 1.0 +544 89 model.embedding_dim 2.0 +544 89 loss.margin 9.783658102032708 +544 89 loss.adversarial_temperature 0.21717023537222052 +544 89 optimizer.lr 0.09365338733956179 +544 89 negative_sampler.num_negs_per_pos 69.0 +544 89 training.batch_size 0.0 +544 90 model.embedding_dim 0.0 +544 90 loss.margin 20.01285109917035 +544 90 loss.adversarial_temperature 0.5069106012593602 +544 90 optimizer.lr 0.003119752571934877 +544 90 negative_sampler.num_negs_per_pos 73.0 +544 90 training.batch_size 0.0 +544 91 model.embedding_dim 1.0 +544 91 loss.margin 10.875947459623436 +544 91 loss.adversarial_temperature 0.7734596559840772 +544 91 optimizer.lr 0.02586506222558379 +544 91 negative_sampler.num_negs_per_pos 13.0 +544 91 training.batch_size 0.0 +544 92 model.embedding_dim 0.0 +544 92 loss.margin 18.76377578075399 +544 92 loss.adversarial_temperature 0.7396851273112734 +544 92 optimizer.lr 0.014745054786559074 +544 92 negative_sampler.num_negs_per_pos 15.0 +544 92 training.batch_size 1.0 +544 93 model.embedding_dim 0.0 +544 93 loss.margin 4.274095950292508 +544 93 loss.adversarial_temperature 0.41500727259223913 +544 93 optimizer.lr 0.04729657917553541 +544 93 negative_sampler.num_negs_per_pos 35.0 +544 93 training.batch_size 0.0 +544 94 model.embedding_dim 2.0 +544 94 loss.margin 4.399350524536376 +544 94 loss.adversarial_temperature 0.3146516262888853 +544 94 optimizer.lr 0.05055085076214296 +544 94 negative_sampler.num_negs_per_pos 18.0 +544 94 training.batch_size 2.0 +544 95 model.embedding_dim 0.0 +544 95 loss.margin 5.695407092165249 +544 95 loss.adversarial_temperature 0.7094831454819672 +544 95 optimizer.lr 0.0029880685377760355 +544 95 negative_sampler.num_negs_per_pos 5.0 +544 95 training.batch_size 2.0 +544 96 model.embedding_dim 1.0 +544 96 loss.margin 28.84301955736748 +544 96 loss.adversarial_temperature 0.6653171270594376 +544 96 optimizer.lr 0.05171761524520057 +544 96 negative_sampler.num_negs_per_pos 74.0 +544 96 training.batch_size 1.0 +544 97 model.embedding_dim 1.0 +544 97 loss.margin 18.403044398510982 +544 97 loss.adversarial_temperature 0.6698703091832462 +544 97 optimizer.lr 0.05757854452808561 +544 97 negative_sampler.num_negs_per_pos 89.0 +544 97 training.batch_size 0.0 +544 98 model.embedding_dim 2.0 +544 98 loss.margin 4.8972679855853505 +544 98 loss.adversarial_temperature 0.9218668992010657 +544 98 optimizer.lr 0.05965363899220024 +544 98 negative_sampler.num_negs_per_pos 39.0 +544 98 training.batch_size 0.0 +544 99 model.embedding_dim 1.0 +544 99 loss.margin 3.0840352301047615 +544 99 loss.adversarial_temperature 0.2405465067601922 +544 99 optimizer.lr 0.020364849544495613 +544 99 negative_sampler.num_negs_per_pos 1.0 +544 99 training.batch_size 1.0 +544 100 model.embedding_dim 0.0 +544 100 loss.margin 29.86119068902866 +544 100 loss.adversarial_temperature 0.36066187678209094 +544 100 optimizer.lr 0.07055508284898519 +544 100 negative_sampler.num_negs_per_pos 45.0 +544 100 training.batch_size 1.0 +544 1 dataset """kinships""" +544 1 model """rescal""" +544 1 loss """nssa""" +544 1 regularizer """no""" +544 1 optimizer """adam""" +544 1 training_loop """owa""" +544 1 negative_sampler """basic""" +544 1 evaluator """rankbased""" +544 2 dataset """kinships""" +544 2 model """rescal""" +544 2 loss """nssa""" +544 2 regularizer """no""" +544 2 optimizer """adam""" +544 2 training_loop """owa""" +544 2 negative_sampler """basic""" +544 2 evaluator """rankbased""" +544 3 dataset """kinships""" +544 3 model """rescal""" +544 3 loss """nssa""" +544 3 regularizer """no""" +544 3 optimizer """adam""" +544 3 training_loop """owa""" +544 3 negative_sampler """basic""" +544 3 evaluator """rankbased""" +544 4 dataset """kinships""" +544 4 model """rescal""" +544 4 loss """nssa""" +544 4 regularizer """no""" +544 4 optimizer """adam""" +544 4 training_loop """owa""" +544 4 negative_sampler """basic""" +544 4 evaluator """rankbased""" +544 5 dataset """kinships""" +544 5 model """rescal""" +544 5 loss """nssa""" +544 5 regularizer """no""" +544 5 optimizer """adam""" +544 5 training_loop """owa""" +544 5 negative_sampler """basic""" +544 5 evaluator """rankbased""" +544 6 dataset """kinships""" +544 6 model """rescal""" +544 6 loss """nssa""" +544 6 regularizer """no""" +544 6 optimizer """adam""" +544 6 training_loop """owa""" +544 6 negative_sampler """basic""" +544 6 evaluator """rankbased""" +544 7 dataset """kinships""" +544 7 model """rescal""" +544 7 loss """nssa""" +544 7 regularizer """no""" +544 7 optimizer """adam""" +544 7 training_loop """owa""" +544 7 negative_sampler """basic""" +544 7 evaluator """rankbased""" +544 8 dataset """kinships""" +544 8 model """rescal""" +544 8 loss """nssa""" +544 8 regularizer """no""" +544 8 optimizer """adam""" +544 8 training_loop """owa""" +544 8 negative_sampler """basic""" +544 8 evaluator """rankbased""" +544 9 dataset """kinships""" +544 9 model """rescal""" +544 9 loss """nssa""" +544 9 regularizer """no""" +544 9 optimizer """adam""" +544 9 training_loop """owa""" +544 9 negative_sampler """basic""" +544 9 evaluator """rankbased""" +544 10 dataset """kinships""" +544 10 model """rescal""" +544 10 loss """nssa""" +544 10 regularizer """no""" +544 10 optimizer """adam""" +544 10 training_loop """owa""" +544 10 negative_sampler """basic""" +544 10 evaluator """rankbased""" +544 11 dataset """kinships""" +544 11 model """rescal""" +544 11 loss """nssa""" +544 11 regularizer """no""" +544 11 optimizer """adam""" +544 11 training_loop """owa""" +544 11 negative_sampler """basic""" +544 11 evaluator """rankbased""" +544 12 dataset """kinships""" +544 12 model """rescal""" +544 12 loss """nssa""" +544 12 regularizer """no""" +544 12 optimizer """adam""" +544 12 training_loop """owa""" +544 12 negative_sampler """basic""" +544 12 evaluator """rankbased""" +544 13 dataset """kinships""" +544 13 model """rescal""" +544 13 loss """nssa""" +544 13 regularizer """no""" +544 13 optimizer """adam""" +544 13 training_loop """owa""" +544 13 negative_sampler """basic""" +544 13 evaluator """rankbased""" +544 14 dataset """kinships""" +544 14 model """rescal""" +544 14 loss """nssa""" +544 14 regularizer """no""" +544 14 optimizer """adam""" +544 14 training_loop """owa""" +544 14 negative_sampler """basic""" +544 14 evaluator """rankbased""" +544 15 dataset """kinships""" +544 15 model """rescal""" +544 15 loss """nssa""" +544 15 regularizer """no""" +544 15 optimizer """adam""" +544 15 training_loop """owa""" +544 15 negative_sampler """basic""" +544 15 evaluator """rankbased""" +544 16 dataset """kinships""" +544 16 model """rescal""" +544 16 loss """nssa""" +544 16 regularizer """no""" +544 16 optimizer """adam""" +544 16 training_loop """owa""" +544 16 negative_sampler """basic""" +544 16 evaluator """rankbased""" +544 17 dataset """kinships""" +544 17 model """rescal""" +544 17 loss """nssa""" +544 17 regularizer """no""" +544 17 optimizer """adam""" +544 17 training_loop """owa""" +544 17 negative_sampler """basic""" +544 17 evaluator """rankbased""" +544 18 dataset """kinships""" +544 18 model """rescal""" +544 18 loss """nssa""" +544 18 regularizer """no""" +544 18 optimizer """adam""" +544 18 training_loop """owa""" +544 18 negative_sampler """basic""" +544 18 evaluator """rankbased""" +544 19 dataset """kinships""" +544 19 model """rescal""" +544 19 loss """nssa""" +544 19 regularizer """no""" +544 19 optimizer """adam""" +544 19 training_loop """owa""" +544 19 negative_sampler """basic""" +544 19 evaluator """rankbased""" +544 20 dataset """kinships""" +544 20 model """rescal""" +544 20 loss """nssa""" +544 20 regularizer """no""" +544 20 optimizer """adam""" +544 20 training_loop """owa""" +544 20 negative_sampler """basic""" +544 20 evaluator """rankbased""" +544 21 dataset """kinships""" +544 21 model """rescal""" +544 21 loss """nssa""" +544 21 regularizer """no""" +544 21 optimizer """adam""" +544 21 training_loop """owa""" +544 21 negative_sampler """basic""" +544 21 evaluator """rankbased""" +544 22 dataset """kinships""" +544 22 model """rescal""" +544 22 loss """nssa""" +544 22 regularizer """no""" +544 22 optimizer """adam""" +544 22 training_loop """owa""" +544 22 negative_sampler """basic""" +544 22 evaluator """rankbased""" +544 23 dataset """kinships""" +544 23 model """rescal""" +544 23 loss """nssa""" +544 23 regularizer """no""" +544 23 optimizer """adam""" +544 23 training_loop """owa""" +544 23 negative_sampler """basic""" +544 23 evaluator """rankbased""" +544 24 dataset """kinships""" +544 24 model """rescal""" +544 24 loss """nssa""" +544 24 regularizer """no""" +544 24 optimizer """adam""" +544 24 training_loop """owa""" +544 24 negative_sampler """basic""" +544 24 evaluator """rankbased""" +544 25 dataset """kinships""" +544 25 model """rescal""" +544 25 loss """nssa""" +544 25 regularizer """no""" +544 25 optimizer """adam""" +544 25 training_loop """owa""" +544 25 negative_sampler """basic""" +544 25 evaluator """rankbased""" +544 26 dataset """kinships""" +544 26 model """rescal""" +544 26 loss """nssa""" +544 26 regularizer """no""" +544 26 optimizer """adam""" +544 26 training_loop """owa""" +544 26 negative_sampler """basic""" +544 26 evaluator """rankbased""" +544 27 dataset """kinships""" +544 27 model """rescal""" +544 27 loss """nssa""" +544 27 regularizer """no""" +544 27 optimizer """adam""" +544 27 training_loop """owa""" +544 27 negative_sampler """basic""" +544 27 evaluator """rankbased""" +544 28 dataset """kinships""" +544 28 model """rescal""" +544 28 loss """nssa""" +544 28 regularizer """no""" +544 28 optimizer """adam""" +544 28 training_loop """owa""" +544 28 negative_sampler """basic""" +544 28 evaluator """rankbased""" +544 29 dataset """kinships""" +544 29 model """rescal""" +544 29 loss """nssa""" +544 29 regularizer """no""" +544 29 optimizer """adam""" +544 29 training_loop """owa""" +544 29 negative_sampler """basic""" +544 29 evaluator """rankbased""" +544 30 dataset """kinships""" +544 30 model """rescal""" +544 30 loss """nssa""" +544 30 regularizer """no""" +544 30 optimizer """adam""" +544 30 training_loop """owa""" +544 30 negative_sampler """basic""" +544 30 evaluator """rankbased""" +544 31 dataset """kinships""" +544 31 model """rescal""" +544 31 loss """nssa""" +544 31 regularizer """no""" +544 31 optimizer """adam""" +544 31 training_loop """owa""" +544 31 negative_sampler """basic""" +544 31 evaluator """rankbased""" +544 32 dataset """kinships""" +544 32 model """rescal""" +544 32 loss """nssa""" +544 32 regularizer """no""" +544 32 optimizer """adam""" +544 32 training_loop """owa""" +544 32 negative_sampler """basic""" +544 32 evaluator """rankbased""" +544 33 dataset """kinships""" +544 33 model """rescal""" +544 33 loss """nssa""" +544 33 regularizer """no""" +544 33 optimizer """adam""" +544 33 training_loop """owa""" +544 33 negative_sampler """basic""" +544 33 evaluator """rankbased""" +544 34 dataset """kinships""" +544 34 model """rescal""" +544 34 loss """nssa""" +544 34 regularizer """no""" +544 34 optimizer """adam""" +544 34 training_loop """owa""" +544 34 negative_sampler """basic""" +544 34 evaluator """rankbased""" +544 35 dataset """kinships""" +544 35 model """rescal""" +544 35 loss """nssa""" +544 35 regularizer """no""" +544 35 optimizer """adam""" +544 35 training_loop """owa""" +544 35 negative_sampler """basic""" +544 35 evaluator """rankbased""" +544 36 dataset """kinships""" +544 36 model """rescal""" +544 36 loss """nssa""" +544 36 regularizer """no""" +544 36 optimizer """adam""" +544 36 training_loop """owa""" +544 36 negative_sampler """basic""" +544 36 evaluator """rankbased""" +544 37 dataset """kinships""" +544 37 model """rescal""" +544 37 loss """nssa""" +544 37 regularizer """no""" +544 37 optimizer """adam""" +544 37 training_loop """owa""" +544 37 negative_sampler """basic""" +544 37 evaluator """rankbased""" +544 38 dataset """kinships""" +544 38 model """rescal""" +544 38 loss """nssa""" +544 38 regularizer """no""" +544 38 optimizer """adam""" +544 38 training_loop """owa""" +544 38 negative_sampler """basic""" +544 38 evaluator """rankbased""" +544 39 dataset """kinships""" +544 39 model """rescal""" +544 39 loss """nssa""" +544 39 regularizer """no""" +544 39 optimizer """adam""" +544 39 training_loop """owa""" +544 39 negative_sampler """basic""" +544 39 evaluator """rankbased""" +544 40 dataset """kinships""" +544 40 model """rescal""" +544 40 loss """nssa""" +544 40 regularizer """no""" +544 40 optimizer """adam""" +544 40 training_loop """owa""" +544 40 negative_sampler """basic""" +544 40 evaluator """rankbased""" +544 41 dataset """kinships""" +544 41 model """rescal""" +544 41 loss """nssa""" +544 41 regularizer """no""" +544 41 optimizer """adam""" +544 41 training_loop """owa""" +544 41 negative_sampler """basic""" +544 41 evaluator """rankbased""" +544 42 dataset """kinships""" +544 42 model """rescal""" +544 42 loss """nssa""" +544 42 regularizer """no""" +544 42 optimizer """adam""" +544 42 training_loop """owa""" +544 42 negative_sampler """basic""" +544 42 evaluator """rankbased""" +544 43 dataset """kinships""" +544 43 model """rescal""" +544 43 loss """nssa""" +544 43 regularizer """no""" +544 43 optimizer """adam""" +544 43 training_loop """owa""" +544 43 negative_sampler """basic""" +544 43 evaluator """rankbased""" +544 44 dataset """kinships""" +544 44 model """rescal""" +544 44 loss """nssa""" +544 44 regularizer """no""" +544 44 optimizer """adam""" +544 44 training_loop """owa""" +544 44 negative_sampler """basic""" +544 44 evaluator """rankbased""" +544 45 dataset """kinships""" +544 45 model """rescal""" +544 45 loss """nssa""" +544 45 regularizer """no""" +544 45 optimizer """adam""" +544 45 training_loop """owa""" +544 45 negative_sampler """basic""" +544 45 evaluator """rankbased""" +544 46 dataset """kinships""" +544 46 model """rescal""" +544 46 loss """nssa""" +544 46 regularizer """no""" +544 46 optimizer """adam""" +544 46 training_loop """owa""" +544 46 negative_sampler """basic""" +544 46 evaluator """rankbased""" +544 47 dataset """kinships""" +544 47 model """rescal""" +544 47 loss """nssa""" +544 47 regularizer """no""" +544 47 optimizer """adam""" +544 47 training_loop """owa""" +544 47 negative_sampler """basic""" +544 47 evaluator """rankbased""" +544 48 dataset """kinships""" +544 48 model """rescal""" +544 48 loss """nssa""" +544 48 regularizer """no""" +544 48 optimizer """adam""" +544 48 training_loop """owa""" +544 48 negative_sampler """basic""" +544 48 evaluator """rankbased""" +544 49 dataset """kinships""" +544 49 model """rescal""" +544 49 loss """nssa""" +544 49 regularizer """no""" +544 49 optimizer """adam""" +544 49 training_loop """owa""" +544 49 negative_sampler """basic""" +544 49 evaluator """rankbased""" +544 50 dataset """kinships""" +544 50 model """rescal""" +544 50 loss """nssa""" +544 50 regularizer """no""" +544 50 optimizer """adam""" +544 50 training_loop """owa""" +544 50 negative_sampler """basic""" +544 50 evaluator """rankbased""" +544 51 dataset """kinships""" +544 51 model """rescal""" +544 51 loss """nssa""" +544 51 regularizer """no""" +544 51 optimizer """adam""" +544 51 training_loop """owa""" +544 51 negative_sampler """basic""" +544 51 evaluator """rankbased""" +544 52 dataset """kinships""" +544 52 model """rescal""" +544 52 loss """nssa""" +544 52 regularizer """no""" +544 52 optimizer """adam""" +544 52 training_loop """owa""" +544 52 negative_sampler """basic""" +544 52 evaluator """rankbased""" +544 53 dataset """kinships""" +544 53 model """rescal""" +544 53 loss """nssa""" +544 53 regularizer """no""" +544 53 optimizer """adam""" +544 53 training_loop """owa""" +544 53 negative_sampler """basic""" +544 53 evaluator """rankbased""" +544 54 dataset """kinships""" +544 54 model """rescal""" +544 54 loss """nssa""" +544 54 regularizer """no""" +544 54 optimizer """adam""" +544 54 training_loop """owa""" +544 54 negative_sampler """basic""" +544 54 evaluator """rankbased""" +544 55 dataset """kinships""" +544 55 model """rescal""" +544 55 loss """nssa""" +544 55 regularizer """no""" +544 55 optimizer """adam""" +544 55 training_loop """owa""" +544 55 negative_sampler """basic""" +544 55 evaluator """rankbased""" +544 56 dataset """kinships""" +544 56 model """rescal""" +544 56 loss """nssa""" +544 56 regularizer """no""" +544 56 optimizer """adam""" +544 56 training_loop """owa""" +544 56 negative_sampler """basic""" +544 56 evaluator """rankbased""" +544 57 dataset """kinships""" +544 57 model """rescal""" +544 57 loss """nssa""" +544 57 regularizer """no""" +544 57 optimizer """adam""" +544 57 training_loop """owa""" +544 57 negative_sampler """basic""" +544 57 evaluator """rankbased""" +544 58 dataset """kinships""" +544 58 model """rescal""" +544 58 loss """nssa""" +544 58 regularizer """no""" +544 58 optimizer """adam""" +544 58 training_loop """owa""" +544 58 negative_sampler """basic""" +544 58 evaluator """rankbased""" +544 59 dataset """kinships""" +544 59 model """rescal""" +544 59 loss """nssa""" +544 59 regularizer """no""" +544 59 optimizer """adam""" +544 59 training_loop """owa""" +544 59 negative_sampler """basic""" +544 59 evaluator """rankbased""" +544 60 dataset """kinships""" +544 60 model """rescal""" +544 60 loss """nssa""" +544 60 regularizer """no""" +544 60 optimizer """adam""" +544 60 training_loop """owa""" +544 60 negative_sampler """basic""" +544 60 evaluator """rankbased""" +544 61 dataset """kinships""" +544 61 model """rescal""" +544 61 loss """nssa""" +544 61 regularizer """no""" +544 61 optimizer """adam""" +544 61 training_loop """owa""" +544 61 negative_sampler """basic""" +544 61 evaluator """rankbased""" +544 62 dataset """kinships""" +544 62 model """rescal""" +544 62 loss """nssa""" +544 62 regularizer """no""" +544 62 optimizer """adam""" +544 62 training_loop """owa""" +544 62 negative_sampler """basic""" +544 62 evaluator """rankbased""" +544 63 dataset """kinships""" +544 63 model """rescal""" +544 63 loss """nssa""" +544 63 regularizer """no""" +544 63 optimizer """adam""" +544 63 training_loop """owa""" +544 63 negative_sampler """basic""" +544 63 evaluator """rankbased""" +544 64 dataset """kinships""" +544 64 model """rescal""" +544 64 loss """nssa""" +544 64 regularizer """no""" +544 64 optimizer """adam""" +544 64 training_loop """owa""" +544 64 negative_sampler """basic""" +544 64 evaluator """rankbased""" +544 65 dataset """kinships""" +544 65 model """rescal""" +544 65 loss """nssa""" +544 65 regularizer """no""" +544 65 optimizer """adam""" +544 65 training_loop """owa""" +544 65 negative_sampler """basic""" +544 65 evaluator """rankbased""" +544 66 dataset """kinships""" +544 66 model """rescal""" +544 66 loss """nssa""" +544 66 regularizer """no""" +544 66 optimizer """adam""" +544 66 training_loop """owa""" +544 66 negative_sampler """basic""" +544 66 evaluator """rankbased""" +544 67 dataset """kinships""" +544 67 model """rescal""" +544 67 loss """nssa""" +544 67 regularizer """no""" +544 67 optimizer """adam""" +544 67 training_loop """owa""" +544 67 negative_sampler """basic""" +544 67 evaluator """rankbased""" +544 68 dataset """kinships""" +544 68 model """rescal""" +544 68 loss """nssa""" +544 68 regularizer """no""" +544 68 optimizer """adam""" +544 68 training_loop """owa""" +544 68 negative_sampler """basic""" +544 68 evaluator """rankbased""" +544 69 dataset """kinships""" +544 69 model """rescal""" +544 69 loss """nssa""" +544 69 regularizer """no""" +544 69 optimizer """adam""" +544 69 training_loop """owa""" +544 69 negative_sampler """basic""" +544 69 evaluator """rankbased""" +544 70 dataset """kinships""" +544 70 model """rescal""" +544 70 loss """nssa""" +544 70 regularizer """no""" +544 70 optimizer """adam""" +544 70 training_loop """owa""" +544 70 negative_sampler """basic""" +544 70 evaluator """rankbased""" +544 71 dataset """kinships""" +544 71 model """rescal""" +544 71 loss """nssa""" +544 71 regularizer """no""" +544 71 optimizer """adam""" +544 71 training_loop """owa""" +544 71 negative_sampler """basic""" +544 71 evaluator """rankbased""" +544 72 dataset """kinships""" +544 72 model """rescal""" +544 72 loss """nssa""" +544 72 regularizer """no""" +544 72 optimizer """adam""" +544 72 training_loop """owa""" +544 72 negative_sampler """basic""" +544 72 evaluator """rankbased""" +544 73 dataset """kinships""" +544 73 model """rescal""" +544 73 loss """nssa""" +544 73 regularizer """no""" +544 73 optimizer """adam""" +544 73 training_loop """owa""" +544 73 negative_sampler """basic""" +544 73 evaluator """rankbased""" +544 74 dataset """kinships""" +544 74 model """rescal""" +544 74 loss """nssa""" +544 74 regularizer """no""" +544 74 optimizer """adam""" +544 74 training_loop """owa""" +544 74 negative_sampler """basic""" +544 74 evaluator """rankbased""" +544 75 dataset """kinships""" +544 75 model """rescal""" +544 75 loss """nssa""" +544 75 regularizer """no""" +544 75 optimizer """adam""" +544 75 training_loop """owa""" +544 75 negative_sampler """basic""" +544 75 evaluator """rankbased""" +544 76 dataset """kinships""" +544 76 model """rescal""" +544 76 loss """nssa""" +544 76 regularizer """no""" +544 76 optimizer """adam""" +544 76 training_loop """owa""" +544 76 negative_sampler """basic""" +544 76 evaluator """rankbased""" +544 77 dataset """kinships""" +544 77 model """rescal""" +544 77 loss """nssa""" +544 77 regularizer """no""" +544 77 optimizer """adam""" +544 77 training_loop """owa""" +544 77 negative_sampler """basic""" +544 77 evaluator """rankbased""" +544 78 dataset """kinships""" +544 78 model """rescal""" +544 78 loss """nssa""" +544 78 regularizer """no""" +544 78 optimizer """adam""" +544 78 training_loop """owa""" +544 78 negative_sampler """basic""" +544 78 evaluator """rankbased""" +544 79 dataset """kinships""" +544 79 model """rescal""" +544 79 loss """nssa""" +544 79 regularizer """no""" +544 79 optimizer """adam""" +544 79 training_loop """owa""" +544 79 negative_sampler """basic""" +544 79 evaluator """rankbased""" +544 80 dataset """kinships""" +544 80 model """rescal""" +544 80 loss """nssa""" +544 80 regularizer """no""" +544 80 optimizer """adam""" +544 80 training_loop """owa""" +544 80 negative_sampler """basic""" +544 80 evaluator """rankbased""" +544 81 dataset """kinships""" +544 81 model """rescal""" +544 81 loss """nssa""" +544 81 regularizer """no""" +544 81 optimizer """adam""" +544 81 training_loop """owa""" +544 81 negative_sampler """basic""" +544 81 evaluator """rankbased""" +544 82 dataset """kinships""" +544 82 model """rescal""" +544 82 loss """nssa""" +544 82 regularizer """no""" +544 82 optimizer """adam""" +544 82 training_loop """owa""" +544 82 negative_sampler """basic""" +544 82 evaluator """rankbased""" +544 83 dataset """kinships""" +544 83 model """rescal""" +544 83 loss """nssa""" +544 83 regularizer """no""" +544 83 optimizer """adam""" +544 83 training_loop """owa""" +544 83 negative_sampler """basic""" +544 83 evaluator """rankbased""" +544 84 dataset """kinships""" +544 84 model """rescal""" +544 84 loss """nssa""" +544 84 regularizer """no""" +544 84 optimizer """adam""" +544 84 training_loop """owa""" +544 84 negative_sampler """basic""" +544 84 evaluator """rankbased""" +544 85 dataset """kinships""" +544 85 model """rescal""" +544 85 loss """nssa""" +544 85 regularizer """no""" +544 85 optimizer """adam""" +544 85 training_loop """owa""" +544 85 negative_sampler """basic""" +544 85 evaluator """rankbased""" +544 86 dataset """kinships""" +544 86 model """rescal""" +544 86 loss """nssa""" +544 86 regularizer """no""" +544 86 optimizer """adam""" +544 86 training_loop """owa""" +544 86 negative_sampler """basic""" +544 86 evaluator """rankbased""" +544 87 dataset """kinships""" +544 87 model """rescal""" +544 87 loss """nssa""" +544 87 regularizer """no""" +544 87 optimizer """adam""" +544 87 training_loop """owa""" +544 87 negative_sampler """basic""" +544 87 evaluator """rankbased""" +544 88 dataset """kinships""" +544 88 model """rescal""" +544 88 loss """nssa""" +544 88 regularizer """no""" +544 88 optimizer """adam""" +544 88 training_loop """owa""" +544 88 negative_sampler """basic""" +544 88 evaluator """rankbased""" +544 89 dataset """kinships""" +544 89 model """rescal""" +544 89 loss """nssa""" +544 89 regularizer """no""" +544 89 optimizer """adam""" +544 89 training_loop """owa""" +544 89 negative_sampler """basic""" +544 89 evaluator """rankbased""" +544 90 dataset """kinships""" +544 90 model """rescal""" +544 90 loss """nssa""" +544 90 regularizer """no""" +544 90 optimizer """adam""" +544 90 training_loop """owa""" +544 90 negative_sampler """basic""" +544 90 evaluator """rankbased""" +544 91 dataset """kinships""" +544 91 model """rescal""" +544 91 loss """nssa""" +544 91 regularizer """no""" +544 91 optimizer """adam""" +544 91 training_loop """owa""" +544 91 negative_sampler """basic""" +544 91 evaluator """rankbased""" +544 92 dataset """kinships""" +544 92 model """rescal""" +544 92 loss """nssa""" +544 92 regularizer """no""" +544 92 optimizer """adam""" +544 92 training_loop """owa""" +544 92 negative_sampler """basic""" +544 92 evaluator """rankbased""" +544 93 dataset """kinships""" +544 93 model """rescal""" +544 93 loss """nssa""" +544 93 regularizer """no""" +544 93 optimizer """adam""" +544 93 training_loop """owa""" +544 93 negative_sampler """basic""" +544 93 evaluator """rankbased""" +544 94 dataset """kinships""" +544 94 model """rescal""" +544 94 loss """nssa""" +544 94 regularizer """no""" +544 94 optimizer """adam""" +544 94 training_loop """owa""" +544 94 negative_sampler """basic""" +544 94 evaluator """rankbased""" +544 95 dataset """kinships""" +544 95 model """rescal""" +544 95 loss """nssa""" +544 95 regularizer """no""" +544 95 optimizer """adam""" +544 95 training_loop """owa""" +544 95 negative_sampler """basic""" +544 95 evaluator """rankbased""" +544 96 dataset """kinships""" +544 96 model """rescal""" +544 96 loss """nssa""" +544 96 regularizer """no""" +544 96 optimizer """adam""" +544 96 training_loop """owa""" +544 96 negative_sampler """basic""" +544 96 evaluator """rankbased""" +544 97 dataset """kinships""" +544 97 model """rescal""" +544 97 loss """nssa""" +544 97 regularizer """no""" +544 97 optimizer """adam""" +544 97 training_loop """owa""" +544 97 negative_sampler """basic""" +544 97 evaluator """rankbased""" +544 98 dataset """kinships""" +544 98 model """rescal""" +544 98 loss """nssa""" +544 98 regularizer """no""" +544 98 optimizer """adam""" +544 98 training_loop """owa""" +544 98 negative_sampler """basic""" +544 98 evaluator """rankbased""" +544 99 dataset """kinships""" +544 99 model """rescal""" +544 99 loss """nssa""" +544 99 regularizer """no""" +544 99 optimizer """adam""" +544 99 training_loop """owa""" +544 99 negative_sampler """basic""" +544 99 evaluator """rankbased""" +544 100 dataset """kinships""" +544 100 model """rescal""" +544 100 loss """nssa""" +544 100 regularizer """no""" +544 100 optimizer """adam""" +544 100 training_loop """owa""" +544 100 negative_sampler """basic""" +544 100 evaluator """rankbased""" +545 1 model.embedding_dim 0.0 +545 1 loss.margin 1.2320680802167452 +545 1 optimizer.lr 0.0019460928043832247 +545 1 negative_sampler.num_negs_per_pos 76.0 +545 1 training.batch_size 1.0 +545 2 model.embedding_dim 0.0 +545 2 loss.margin 9.53100326943885 +545 2 optimizer.lr 0.004840817034933441 +545 2 negative_sampler.num_negs_per_pos 82.0 +545 2 training.batch_size 2.0 +545 3 model.embedding_dim 2.0 +545 3 loss.margin 2.439571398699812 +545 3 optimizer.lr 0.0360834983407526 +545 3 negative_sampler.num_negs_per_pos 96.0 +545 3 training.batch_size 1.0 +545 4 model.embedding_dim 2.0 +545 4 loss.margin 5.492830967176026 +545 4 optimizer.lr 0.07758048351751626 +545 4 negative_sampler.num_negs_per_pos 42.0 +545 4 training.batch_size 0.0 +545 5 model.embedding_dim 2.0 +545 5 loss.margin 1.2722116871511062 +545 5 optimizer.lr 0.002663873170027923 +545 5 negative_sampler.num_negs_per_pos 63.0 +545 5 training.batch_size 1.0 +545 6 model.embedding_dim 0.0 +545 6 loss.margin 4.10186832558887 +545 6 optimizer.lr 0.009567942075847326 +545 6 negative_sampler.num_negs_per_pos 8.0 +545 6 training.batch_size 2.0 +545 7 model.embedding_dim 0.0 +545 7 loss.margin 0.9394252942669901 +545 7 optimizer.lr 0.005855657247033722 +545 7 negative_sampler.num_negs_per_pos 93.0 +545 7 training.batch_size 0.0 +545 8 model.embedding_dim 0.0 +545 8 loss.margin 9.426896202386425 +545 8 optimizer.lr 0.014213041014427108 +545 8 negative_sampler.num_negs_per_pos 45.0 +545 8 training.batch_size 2.0 +545 9 model.embedding_dim 2.0 +545 9 loss.margin 6.273389843900211 +545 9 optimizer.lr 0.00794905117456208 +545 9 negative_sampler.num_negs_per_pos 89.0 +545 9 training.batch_size 1.0 +545 10 model.embedding_dim 1.0 +545 10 loss.margin 9.729969143281311 +545 10 optimizer.lr 0.0011444638173093494 +545 10 negative_sampler.num_negs_per_pos 19.0 +545 10 training.batch_size 0.0 +545 11 model.embedding_dim 1.0 +545 11 loss.margin 6.805467522990654 +545 11 optimizer.lr 0.004121227231638599 +545 11 negative_sampler.num_negs_per_pos 87.0 +545 11 training.batch_size 1.0 +545 12 model.embedding_dim 1.0 +545 12 loss.margin 6.647992738910572 +545 12 optimizer.lr 0.04714736583066203 +545 12 negative_sampler.num_negs_per_pos 66.0 +545 12 training.batch_size 1.0 +545 13 model.embedding_dim 2.0 +545 13 loss.margin 7.777445693586958 +545 13 optimizer.lr 0.07386565154929438 +545 13 negative_sampler.num_negs_per_pos 45.0 +545 13 training.batch_size 0.0 +545 14 model.embedding_dim 2.0 +545 14 loss.margin 0.9234813239124173 +545 14 optimizer.lr 0.02256012701892646 +545 14 negative_sampler.num_negs_per_pos 67.0 +545 14 training.batch_size 1.0 +545 15 model.embedding_dim 0.0 +545 15 loss.margin 9.597885484534618 +545 15 optimizer.lr 0.008710672039466518 +545 15 negative_sampler.num_negs_per_pos 77.0 +545 15 training.batch_size 0.0 +545 16 model.embedding_dim 1.0 +545 16 loss.margin 0.9099051865429613 +545 16 optimizer.lr 0.011468771739391362 +545 16 negative_sampler.num_negs_per_pos 35.0 +545 16 training.batch_size 2.0 +545 17 model.embedding_dim 1.0 +545 17 loss.margin 4.089350571799072 +545 17 optimizer.lr 0.001091383026499983 +545 17 negative_sampler.num_negs_per_pos 80.0 +545 17 training.batch_size 1.0 +545 18 model.embedding_dim 0.0 +545 18 loss.margin 1.3123971979609959 +545 18 optimizer.lr 0.007888349651851236 +545 18 negative_sampler.num_negs_per_pos 33.0 +545 18 training.batch_size 1.0 +545 19 model.embedding_dim 1.0 +545 19 loss.margin 7.763627983852163 +545 19 optimizer.lr 0.0028991558584235146 +545 19 negative_sampler.num_negs_per_pos 24.0 +545 19 training.batch_size 0.0 +545 20 model.embedding_dim 2.0 +545 20 loss.margin 9.521369074825598 +545 20 optimizer.lr 0.014850932068574779 +545 20 negative_sampler.num_negs_per_pos 37.0 +545 20 training.batch_size 2.0 +545 21 model.embedding_dim 2.0 +545 21 loss.margin 5.276318646093116 +545 21 optimizer.lr 0.005155056995284247 +545 21 negative_sampler.num_negs_per_pos 19.0 +545 21 training.batch_size 2.0 +545 22 model.embedding_dim 1.0 +545 22 loss.margin 8.801602193862891 +545 22 optimizer.lr 0.017578181596665632 +545 22 negative_sampler.num_negs_per_pos 5.0 +545 22 training.batch_size 2.0 +545 23 model.embedding_dim 1.0 +545 23 loss.margin 6.111930175119853 +545 23 optimizer.lr 0.0014215233154481553 +545 23 negative_sampler.num_negs_per_pos 82.0 +545 23 training.batch_size 2.0 +545 24 model.embedding_dim 1.0 +545 24 loss.margin 6.554649125039391 +545 24 optimizer.lr 0.038878396147329596 +545 24 negative_sampler.num_negs_per_pos 40.0 +545 24 training.batch_size 2.0 +545 25 model.embedding_dim 1.0 +545 25 loss.margin 4.091838439454151 +545 25 optimizer.lr 0.08137289390363069 +545 25 negative_sampler.num_negs_per_pos 40.0 +545 25 training.batch_size 2.0 +545 26 model.embedding_dim 1.0 +545 26 loss.margin 7.491511809150409 +545 26 optimizer.lr 0.0368380628121968 +545 26 negative_sampler.num_negs_per_pos 34.0 +545 26 training.batch_size 2.0 +545 27 model.embedding_dim 0.0 +545 27 loss.margin 2.9019662020953283 +545 27 optimizer.lr 0.002155344960947882 +545 27 negative_sampler.num_negs_per_pos 84.0 +545 27 training.batch_size 2.0 +545 28 model.embedding_dim 0.0 +545 28 loss.margin 7.272288154534781 +545 28 optimizer.lr 0.05775231244253179 +545 28 negative_sampler.num_negs_per_pos 18.0 +545 28 training.batch_size 1.0 +545 29 model.embedding_dim 1.0 +545 29 loss.margin 1.3078809051010016 +545 29 optimizer.lr 0.04475293872626788 +545 29 negative_sampler.num_negs_per_pos 68.0 +545 29 training.batch_size 1.0 +545 30 model.embedding_dim 0.0 +545 30 loss.margin 7.790336601352933 +545 30 optimizer.lr 0.03581080848068819 +545 30 negative_sampler.num_negs_per_pos 80.0 +545 30 training.batch_size 0.0 +545 31 model.embedding_dim 2.0 +545 31 loss.margin 3.257736221312613 +545 31 optimizer.lr 0.03222665401869238 +545 31 negative_sampler.num_negs_per_pos 72.0 +545 31 training.batch_size 1.0 +545 32 model.embedding_dim 2.0 +545 32 loss.margin 4.284766822218327 +545 32 optimizer.lr 0.001865339898987766 +545 32 negative_sampler.num_negs_per_pos 65.0 +545 32 training.batch_size 1.0 +545 33 model.embedding_dim 0.0 +545 33 loss.margin 7.119795480406866 +545 33 optimizer.lr 0.0602281978575315 +545 33 negative_sampler.num_negs_per_pos 20.0 +545 33 training.batch_size 2.0 +545 34 model.embedding_dim 0.0 +545 34 loss.margin 6.445311846133132 +545 34 optimizer.lr 0.0010966064695437793 +545 34 negative_sampler.num_negs_per_pos 50.0 +545 34 training.batch_size 0.0 +545 35 model.embedding_dim 2.0 +545 35 loss.margin 3.827799413495125 +545 35 optimizer.lr 0.04469714888438305 +545 35 negative_sampler.num_negs_per_pos 34.0 +545 35 training.batch_size 2.0 +545 36 model.embedding_dim 1.0 +545 36 loss.margin 2.2597817907839346 +545 36 optimizer.lr 0.0029319572522245566 +545 36 negative_sampler.num_negs_per_pos 70.0 +545 36 training.batch_size 1.0 +545 37 model.embedding_dim 1.0 +545 37 loss.margin 3.458403157085479 +545 37 optimizer.lr 0.006571817736073061 +545 37 negative_sampler.num_negs_per_pos 0.0 +545 37 training.batch_size 1.0 +545 38 model.embedding_dim 1.0 +545 38 loss.margin 3.714799093509222 +545 38 optimizer.lr 0.005896175642199047 +545 38 negative_sampler.num_negs_per_pos 92.0 +545 38 training.batch_size 0.0 +545 39 model.embedding_dim 1.0 +545 39 loss.margin 9.560727248019498 +545 39 optimizer.lr 0.07910803244705837 +545 39 negative_sampler.num_negs_per_pos 47.0 +545 39 training.batch_size 0.0 +545 40 model.embedding_dim 1.0 +545 40 loss.margin 2.5406096633754665 +545 40 optimizer.lr 0.0015539271868176244 +545 40 negative_sampler.num_negs_per_pos 1.0 +545 40 training.batch_size 0.0 +545 41 model.embedding_dim 0.0 +545 41 loss.margin 9.138907300718703 +545 41 optimizer.lr 0.0013721846209828914 +545 41 negative_sampler.num_negs_per_pos 4.0 +545 41 training.batch_size 2.0 +545 42 model.embedding_dim 1.0 +545 42 loss.margin 6.707309899726213 +545 42 optimizer.lr 0.0060540379118722825 +545 42 negative_sampler.num_negs_per_pos 5.0 +545 42 training.batch_size 0.0 +545 43 model.embedding_dim 1.0 +545 43 loss.margin 7.121715454142749 +545 43 optimizer.lr 0.001411440699486918 +545 43 negative_sampler.num_negs_per_pos 75.0 +545 43 training.batch_size 2.0 +545 44 model.embedding_dim 1.0 +545 44 loss.margin 6.226519226251934 +545 44 optimizer.lr 0.09422886619537835 +545 44 negative_sampler.num_negs_per_pos 60.0 +545 44 training.batch_size 0.0 +545 45 model.embedding_dim 1.0 +545 45 loss.margin 9.933804506516713 +545 45 optimizer.lr 0.019432413021102178 +545 45 negative_sampler.num_negs_per_pos 3.0 +545 45 training.batch_size 0.0 +545 46 model.embedding_dim 1.0 +545 46 loss.margin 7.268941646897914 +545 46 optimizer.lr 0.0017814728431812179 +545 46 negative_sampler.num_negs_per_pos 62.0 +545 46 training.batch_size 1.0 +545 47 model.embedding_dim 0.0 +545 47 loss.margin 3.2882808079138024 +545 47 optimizer.lr 0.015219605218686157 +545 47 negative_sampler.num_negs_per_pos 52.0 +545 47 training.batch_size 1.0 +545 48 model.embedding_dim 2.0 +545 48 loss.margin 6.206269963127257 +545 48 optimizer.lr 0.051733109690161865 +545 48 negative_sampler.num_negs_per_pos 46.0 +545 48 training.batch_size 2.0 +545 49 model.embedding_dim 1.0 +545 49 loss.margin 4.626806612994456 +545 49 optimizer.lr 0.004689536757782794 +545 49 negative_sampler.num_negs_per_pos 87.0 +545 49 training.batch_size 2.0 +545 50 model.embedding_dim 0.0 +545 50 loss.margin 9.045050791336056 +545 50 optimizer.lr 0.08278708949739906 +545 50 negative_sampler.num_negs_per_pos 76.0 +545 50 training.batch_size 0.0 +545 51 model.embedding_dim 0.0 +545 51 loss.margin 6.835938901551884 +545 51 optimizer.lr 0.05009805400549548 +545 51 negative_sampler.num_negs_per_pos 24.0 +545 51 training.batch_size 2.0 +545 52 model.embedding_dim 1.0 +545 52 loss.margin 5.192979499041842 +545 52 optimizer.lr 0.0013833462584805977 +545 52 negative_sampler.num_negs_per_pos 0.0 +545 52 training.batch_size 2.0 +545 53 model.embedding_dim 2.0 +545 53 loss.margin 7.767054100012369 +545 53 optimizer.lr 0.042099551768267275 +545 53 negative_sampler.num_negs_per_pos 33.0 +545 53 training.batch_size 2.0 +545 54 model.embedding_dim 2.0 +545 54 loss.margin 7.331210193946658 +545 54 optimizer.lr 0.03492820038000134 +545 54 negative_sampler.num_negs_per_pos 84.0 +545 54 training.batch_size 2.0 +545 55 model.embedding_dim 1.0 +545 55 loss.margin 2.3861958660107314 +545 55 optimizer.lr 0.0026592867211373164 +545 55 negative_sampler.num_negs_per_pos 70.0 +545 55 training.batch_size 2.0 +545 56 model.embedding_dim 2.0 +545 56 loss.margin 0.7330137162121986 +545 56 optimizer.lr 0.03117803357058892 +545 56 negative_sampler.num_negs_per_pos 80.0 +545 56 training.batch_size 1.0 +545 57 model.embedding_dim 2.0 +545 57 loss.margin 1.99508156944291 +545 57 optimizer.lr 0.004516360850656091 +545 57 negative_sampler.num_negs_per_pos 80.0 +545 57 training.batch_size 0.0 +545 58 model.embedding_dim 1.0 +545 58 loss.margin 6.927745394218521 +545 58 optimizer.lr 0.002161979911835837 +545 58 negative_sampler.num_negs_per_pos 41.0 +545 58 training.batch_size 0.0 +545 59 model.embedding_dim 1.0 +545 59 loss.margin 7.284001725930459 +545 59 optimizer.lr 0.0027835994713662953 +545 59 negative_sampler.num_negs_per_pos 25.0 +545 59 training.batch_size 2.0 +545 60 model.embedding_dim 2.0 +545 60 loss.margin 7.148867461246077 +545 60 optimizer.lr 0.0010490217521789704 +545 60 negative_sampler.num_negs_per_pos 38.0 +545 60 training.batch_size 0.0 +545 61 model.embedding_dim 2.0 +545 61 loss.margin 7.664139984631935 +545 61 optimizer.lr 0.024910229851623167 +545 61 negative_sampler.num_negs_per_pos 34.0 +545 61 training.batch_size 2.0 +545 62 model.embedding_dim 1.0 +545 62 loss.margin 5.103050277655012 +545 62 optimizer.lr 0.0024104268978975725 +545 62 negative_sampler.num_negs_per_pos 7.0 +545 62 training.batch_size 1.0 +545 63 model.embedding_dim 2.0 +545 63 loss.margin 6.235952665698845 +545 63 optimizer.lr 0.0010259104232942001 +545 63 negative_sampler.num_negs_per_pos 62.0 +545 63 training.batch_size 2.0 +545 64 model.embedding_dim 2.0 +545 64 loss.margin 8.958379346867028 +545 64 optimizer.lr 0.017085028593002 +545 64 negative_sampler.num_negs_per_pos 81.0 +545 64 training.batch_size 1.0 +545 65 model.embedding_dim 0.0 +545 65 loss.margin 0.5327459807035184 +545 65 optimizer.lr 0.0036187370571222695 +545 65 negative_sampler.num_negs_per_pos 22.0 +545 65 training.batch_size 0.0 +545 66 model.embedding_dim 0.0 +545 66 loss.margin 7.7908625642748 +545 66 optimizer.lr 0.0028533620167217762 +545 66 negative_sampler.num_negs_per_pos 27.0 +545 66 training.batch_size 0.0 +545 67 model.embedding_dim 2.0 +545 67 loss.margin 4.820288162497569 +545 67 optimizer.lr 0.01633267831306854 +545 67 negative_sampler.num_negs_per_pos 19.0 +545 67 training.batch_size 0.0 +545 68 model.embedding_dim 2.0 +545 68 loss.margin 1.4406087122764548 +545 68 optimizer.lr 0.05390390251536494 +545 68 negative_sampler.num_negs_per_pos 74.0 +545 68 training.batch_size 2.0 +545 69 model.embedding_dim 0.0 +545 69 loss.margin 4.491103266674025 +545 69 optimizer.lr 0.0347462127332862 +545 69 negative_sampler.num_negs_per_pos 71.0 +545 69 training.batch_size 2.0 +545 70 model.embedding_dim 0.0 +545 70 loss.margin 7.617663066784756 +545 70 optimizer.lr 0.0035599900065599577 +545 70 negative_sampler.num_negs_per_pos 73.0 +545 70 training.batch_size 0.0 +545 71 model.embedding_dim 0.0 +545 71 loss.margin 4.28027113872167 +545 71 optimizer.lr 0.0026573254810953596 +545 71 negative_sampler.num_negs_per_pos 5.0 +545 71 training.batch_size 1.0 +545 72 model.embedding_dim 0.0 +545 72 loss.margin 6.37903139994603 +545 72 optimizer.lr 0.06091347198673496 +545 72 negative_sampler.num_negs_per_pos 71.0 +545 72 training.batch_size 2.0 +545 73 model.embedding_dim 0.0 +545 73 loss.margin 4.168626905395847 +545 73 optimizer.lr 0.007936319736396118 +545 73 negative_sampler.num_negs_per_pos 14.0 +545 73 training.batch_size 2.0 +545 74 model.embedding_dim 1.0 +545 74 loss.margin 3.4991605350399824 +545 74 optimizer.lr 0.07422643465290903 +545 74 negative_sampler.num_negs_per_pos 8.0 +545 74 training.batch_size 0.0 +545 75 model.embedding_dim 0.0 +545 75 loss.margin 8.482932299829624 +545 75 optimizer.lr 0.0018963127773927986 +545 75 negative_sampler.num_negs_per_pos 56.0 +545 75 training.batch_size 1.0 +545 76 model.embedding_dim 1.0 +545 76 loss.margin 0.9945153701688476 +545 76 optimizer.lr 0.06097303838394554 +545 76 negative_sampler.num_negs_per_pos 26.0 +545 76 training.batch_size 2.0 +545 77 model.embedding_dim 1.0 +545 77 loss.margin 3.57423673685442 +545 77 optimizer.lr 0.002568901249832643 +545 77 negative_sampler.num_negs_per_pos 68.0 +545 77 training.batch_size 2.0 +545 78 model.embedding_dim 0.0 +545 78 loss.margin 3.1267956378234283 +545 78 optimizer.lr 0.0021320468577422265 +545 78 negative_sampler.num_negs_per_pos 62.0 +545 78 training.batch_size 0.0 +545 79 model.embedding_dim 2.0 +545 79 loss.margin 6.6328381318363405 +545 79 optimizer.lr 0.001970645819709318 +545 79 negative_sampler.num_negs_per_pos 53.0 +545 79 training.batch_size 1.0 +545 80 model.embedding_dim 2.0 +545 80 loss.margin 1.3128911127560723 +545 80 optimizer.lr 0.015878290791534532 +545 80 negative_sampler.num_negs_per_pos 28.0 +545 80 training.batch_size 1.0 +545 81 model.embedding_dim 1.0 +545 81 loss.margin 6.230371420205337 +545 81 optimizer.lr 0.034139189483778004 +545 81 negative_sampler.num_negs_per_pos 71.0 +545 81 training.batch_size 2.0 +545 82 model.embedding_dim 0.0 +545 82 loss.margin 2.008006236014514 +545 82 optimizer.lr 0.007716600515281309 +545 82 negative_sampler.num_negs_per_pos 40.0 +545 82 training.batch_size 0.0 +545 83 model.embedding_dim 0.0 +545 83 loss.margin 2.772245280678629 +545 83 optimizer.lr 0.0011671326273035717 +545 83 negative_sampler.num_negs_per_pos 79.0 +545 83 training.batch_size 0.0 +545 84 model.embedding_dim 0.0 +545 84 loss.margin 5.595994902471568 +545 84 optimizer.lr 0.0026675293270370117 +545 84 negative_sampler.num_negs_per_pos 9.0 +545 84 training.batch_size 2.0 +545 85 model.embedding_dim 0.0 +545 85 loss.margin 9.743055930432503 +545 85 optimizer.lr 0.04843373648739637 +545 85 negative_sampler.num_negs_per_pos 22.0 +545 85 training.batch_size 2.0 +545 86 model.embedding_dim 1.0 +545 86 loss.margin 8.974725703358311 +545 86 optimizer.lr 0.01507225809599506 +545 86 negative_sampler.num_negs_per_pos 43.0 +545 86 training.batch_size 0.0 +545 87 model.embedding_dim 2.0 +545 87 loss.margin 1.0286712571100183 +545 87 optimizer.lr 0.0017183660964023505 +545 87 negative_sampler.num_negs_per_pos 71.0 +545 87 training.batch_size 2.0 +545 88 model.embedding_dim 1.0 +545 88 loss.margin 1.788294972456865 +545 88 optimizer.lr 0.003516518483046817 +545 88 negative_sampler.num_negs_per_pos 70.0 +545 88 training.batch_size 1.0 +545 89 model.embedding_dim 0.0 +545 89 loss.margin 6.891630002058031 +545 89 optimizer.lr 0.002517529425140371 +545 89 negative_sampler.num_negs_per_pos 49.0 +545 89 training.batch_size 0.0 +545 90 model.embedding_dim 2.0 +545 90 loss.margin 4.064813244123725 +545 90 optimizer.lr 0.003323682627783477 +545 90 negative_sampler.num_negs_per_pos 35.0 +545 90 training.batch_size 2.0 +545 91 model.embedding_dim 1.0 +545 91 loss.margin 7.438437393600136 +545 91 optimizer.lr 0.016574201148640146 +545 91 negative_sampler.num_negs_per_pos 14.0 +545 91 training.batch_size 0.0 +545 92 model.embedding_dim 1.0 +545 92 loss.margin 1.9907746979069514 +545 92 optimizer.lr 0.00903221809297274 +545 92 negative_sampler.num_negs_per_pos 32.0 +545 92 training.batch_size 2.0 +545 93 model.embedding_dim 0.0 +545 93 loss.margin 7.95737030377666 +545 93 optimizer.lr 0.05889512095689791 +545 93 negative_sampler.num_negs_per_pos 88.0 +545 93 training.batch_size 1.0 +545 94 model.embedding_dim 2.0 +545 94 loss.margin 5.1136280765723585 +545 94 optimizer.lr 0.007868381817193123 +545 94 negative_sampler.num_negs_per_pos 59.0 +545 94 training.batch_size 0.0 +545 95 model.embedding_dim 2.0 +545 95 loss.margin 5.884022106840456 +545 95 optimizer.lr 0.0019945149665139728 +545 95 negative_sampler.num_negs_per_pos 80.0 +545 95 training.batch_size 2.0 +545 96 model.embedding_dim 0.0 +545 96 loss.margin 6.918695567918147 +545 96 optimizer.lr 0.002839277951553438 +545 96 negative_sampler.num_negs_per_pos 25.0 +545 96 training.batch_size 2.0 +545 97 model.embedding_dim 1.0 +545 97 loss.margin 5.08674047291317 +545 97 optimizer.lr 0.004843462894259024 +545 97 negative_sampler.num_negs_per_pos 91.0 +545 97 training.batch_size 1.0 +545 98 model.embedding_dim 2.0 +545 98 loss.margin 1.9222884445931636 +545 98 optimizer.lr 0.08694098827196374 +545 98 negative_sampler.num_negs_per_pos 53.0 +545 98 training.batch_size 1.0 +545 99 model.embedding_dim 1.0 +545 99 loss.margin 2.5372231429346015 +545 99 optimizer.lr 0.004298074850120638 +545 99 negative_sampler.num_negs_per_pos 8.0 +545 99 training.batch_size 0.0 +545 100 model.embedding_dim 2.0 +545 100 loss.margin 7.7672098822706905 +545 100 optimizer.lr 0.026388303962627473 +545 100 negative_sampler.num_negs_per_pos 31.0 +545 100 training.batch_size 0.0 +545 1 dataset """kinships""" +545 1 model """rescal""" +545 1 loss """marginranking""" +545 1 regularizer """no""" +545 1 optimizer """adam""" +545 1 training_loop """owa""" +545 1 negative_sampler """basic""" +545 1 evaluator """rankbased""" +545 2 dataset """kinships""" +545 2 model """rescal""" +545 2 loss """marginranking""" +545 2 regularizer """no""" +545 2 optimizer """adam""" +545 2 training_loop """owa""" +545 2 negative_sampler """basic""" +545 2 evaluator """rankbased""" +545 3 dataset """kinships""" +545 3 model """rescal""" +545 3 loss """marginranking""" +545 3 regularizer """no""" +545 3 optimizer """adam""" +545 3 training_loop """owa""" +545 3 negative_sampler """basic""" +545 3 evaluator """rankbased""" +545 4 dataset """kinships""" +545 4 model """rescal""" +545 4 loss """marginranking""" +545 4 regularizer """no""" +545 4 optimizer """adam""" +545 4 training_loop """owa""" +545 4 negative_sampler """basic""" +545 4 evaluator """rankbased""" +545 5 dataset """kinships""" +545 5 model """rescal""" +545 5 loss """marginranking""" +545 5 regularizer """no""" +545 5 optimizer """adam""" +545 5 training_loop """owa""" +545 5 negative_sampler """basic""" +545 5 evaluator """rankbased""" +545 6 dataset """kinships""" +545 6 model """rescal""" +545 6 loss """marginranking""" +545 6 regularizer """no""" +545 6 optimizer """adam""" +545 6 training_loop """owa""" +545 6 negative_sampler """basic""" +545 6 evaluator """rankbased""" +545 7 dataset """kinships""" +545 7 model """rescal""" +545 7 loss """marginranking""" +545 7 regularizer """no""" +545 7 optimizer """adam""" +545 7 training_loop """owa""" +545 7 negative_sampler """basic""" +545 7 evaluator """rankbased""" +545 8 dataset """kinships""" +545 8 model """rescal""" +545 8 loss """marginranking""" +545 8 regularizer """no""" +545 8 optimizer """adam""" +545 8 training_loop """owa""" +545 8 negative_sampler """basic""" +545 8 evaluator """rankbased""" +545 9 dataset """kinships""" +545 9 model """rescal""" +545 9 loss """marginranking""" +545 9 regularizer """no""" +545 9 optimizer """adam""" +545 9 training_loop """owa""" +545 9 negative_sampler """basic""" +545 9 evaluator """rankbased""" +545 10 dataset """kinships""" +545 10 model """rescal""" +545 10 loss """marginranking""" +545 10 regularizer """no""" +545 10 optimizer """adam""" +545 10 training_loop """owa""" +545 10 negative_sampler """basic""" +545 10 evaluator """rankbased""" +545 11 dataset """kinships""" +545 11 model """rescal""" +545 11 loss """marginranking""" +545 11 regularizer """no""" +545 11 optimizer """adam""" +545 11 training_loop """owa""" +545 11 negative_sampler """basic""" +545 11 evaluator """rankbased""" +545 12 dataset """kinships""" +545 12 model """rescal""" +545 12 loss """marginranking""" +545 12 regularizer """no""" +545 12 optimizer """adam""" +545 12 training_loop """owa""" +545 12 negative_sampler """basic""" +545 12 evaluator """rankbased""" +545 13 dataset """kinships""" +545 13 model """rescal""" +545 13 loss """marginranking""" +545 13 regularizer """no""" +545 13 optimizer """adam""" +545 13 training_loop """owa""" +545 13 negative_sampler """basic""" +545 13 evaluator """rankbased""" +545 14 dataset """kinships""" +545 14 model """rescal""" +545 14 loss """marginranking""" +545 14 regularizer """no""" +545 14 optimizer """adam""" +545 14 training_loop """owa""" +545 14 negative_sampler """basic""" +545 14 evaluator """rankbased""" +545 15 dataset """kinships""" +545 15 model """rescal""" +545 15 loss """marginranking""" +545 15 regularizer """no""" +545 15 optimizer """adam""" +545 15 training_loop """owa""" +545 15 negative_sampler """basic""" +545 15 evaluator """rankbased""" +545 16 dataset """kinships""" +545 16 model """rescal""" +545 16 loss """marginranking""" +545 16 regularizer """no""" +545 16 optimizer """adam""" +545 16 training_loop """owa""" +545 16 negative_sampler """basic""" +545 16 evaluator """rankbased""" +545 17 dataset """kinships""" +545 17 model """rescal""" +545 17 loss """marginranking""" +545 17 regularizer """no""" +545 17 optimizer """adam""" +545 17 training_loop """owa""" +545 17 negative_sampler """basic""" +545 17 evaluator """rankbased""" +545 18 dataset """kinships""" +545 18 model """rescal""" +545 18 loss """marginranking""" +545 18 regularizer """no""" +545 18 optimizer """adam""" +545 18 training_loop """owa""" +545 18 negative_sampler """basic""" +545 18 evaluator """rankbased""" +545 19 dataset """kinships""" +545 19 model """rescal""" +545 19 loss """marginranking""" +545 19 regularizer """no""" +545 19 optimizer """adam""" +545 19 training_loop """owa""" +545 19 negative_sampler """basic""" +545 19 evaluator """rankbased""" +545 20 dataset """kinships""" +545 20 model """rescal""" +545 20 loss """marginranking""" +545 20 regularizer """no""" +545 20 optimizer """adam""" +545 20 training_loop """owa""" +545 20 negative_sampler """basic""" +545 20 evaluator """rankbased""" +545 21 dataset """kinships""" +545 21 model """rescal""" +545 21 loss """marginranking""" +545 21 regularizer """no""" +545 21 optimizer """adam""" +545 21 training_loop """owa""" +545 21 negative_sampler """basic""" +545 21 evaluator """rankbased""" +545 22 dataset """kinships""" +545 22 model """rescal""" +545 22 loss """marginranking""" +545 22 regularizer """no""" +545 22 optimizer """adam""" +545 22 training_loop """owa""" +545 22 negative_sampler """basic""" +545 22 evaluator """rankbased""" +545 23 dataset """kinships""" +545 23 model """rescal""" +545 23 loss """marginranking""" +545 23 regularizer """no""" +545 23 optimizer """adam""" +545 23 training_loop """owa""" +545 23 negative_sampler """basic""" +545 23 evaluator """rankbased""" +545 24 dataset """kinships""" +545 24 model """rescal""" +545 24 loss """marginranking""" +545 24 regularizer """no""" +545 24 optimizer """adam""" +545 24 training_loop """owa""" +545 24 negative_sampler """basic""" +545 24 evaluator """rankbased""" +545 25 dataset """kinships""" +545 25 model """rescal""" +545 25 loss """marginranking""" +545 25 regularizer """no""" +545 25 optimizer """adam""" +545 25 training_loop """owa""" +545 25 negative_sampler """basic""" +545 25 evaluator """rankbased""" +545 26 dataset """kinships""" +545 26 model """rescal""" +545 26 loss """marginranking""" +545 26 regularizer """no""" +545 26 optimizer """adam""" +545 26 training_loop """owa""" +545 26 negative_sampler """basic""" +545 26 evaluator """rankbased""" +545 27 dataset """kinships""" +545 27 model """rescal""" +545 27 loss """marginranking""" +545 27 regularizer """no""" +545 27 optimizer """adam""" +545 27 training_loop """owa""" +545 27 negative_sampler """basic""" +545 27 evaluator """rankbased""" +545 28 dataset """kinships""" +545 28 model """rescal""" +545 28 loss """marginranking""" +545 28 regularizer """no""" +545 28 optimizer """adam""" +545 28 training_loop """owa""" +545 28 negative_sampler """basic""" +545 28 evaluator """rankbased""" +545 29 dataset """kinships""" +545 29 model """rescal""" +545 29 loss """marginranking""" +545 29 regularizer """no""" +545 29 optimizer """adam""" +545 29 training_loop """owa""" +545 29 negative_sampler """basic""" +545 29 evaluator """rankbased""" +545 30 dataset """kinships""" +545 30 model """rescal""" +545 30 loss """marginranking""" +545 30 regularizer """no""" +545 30 optimizer """adam""" +545 30 training_loop """owa""" +545 30 negative_sampler """basic""" +545 30 evaluator """rankbased""" +545 31 dataset """kinships""" +545 31 model """rescal""" +545 31 loss """marginranking""" +545 31 regularizer """no""" +545 31 optimizer """adam""" +545 31 training_loop """owa""" +545 31 negative_sampler """basic""" +545 31 evaluator """rankbased""" +545 32 dataset """kinships""" +545 32 model """rescal""" +545 32 loss """marginranking""" +545 32 regularizer """no""" +545 32 optimizer """adam""" +545 32 training_loop """owa""" +545 32 negative_sampler """basic""" +545 32 evaluator """rankbased""" +545 33 dataset """kinships""" +545 33 model """rescal""" +545 33 loss """marginranking""" +545 33 regularizer """no""" +545 33 optimizer """adam""" +545 33 training_loop """owa""" +545 33 negative_sampler """basic""" +545 33 evaluator """rankbased""" +545 34 dataset """kinships""" +545 34 model """rescal""" +545 34 loss """marginranking""" +545 34 regularizer """no""" +545 34 optimizer """adam""" +545 34 training_loop """owa""" +545 34 negative_sampler """basic""" +545 34 evaluator """rankbased""" +545 35 dataset """kinships""" +545 35 model """rescal""" +545 35 loss """marginranking""" +545 35 regularizer """no""" +545 35 optimizer """adam""" +545 35 training_loop """owa""" +545 35 negative_sampler """basic""" +545 35 evaluator """rankbased""" +545 36 dataset """kinships""" +545 36 model """rescal""" +545 36 loss """marginranking""" +545 36 regularizer """no""" +545 36 optimizer """adam""" +545 36 training_loop """owa""" +545 36 negative_sampler """basic""" +545 36 evaluator """rankbased""" +545 37 dataset """kinships""" +545 37 model """rescal""" +545 37 loss """marginranking""" +545 37 regularizer """no""" +545 37 optimizer """adam""" +545 37 training_loop """owa""" +545 37 negative_sampler """basic""" +545 37 evaluator """rankbased""" +545 38 dataset """kinships""" +545 38 model """rescal""" +545 38 loss """marginranking""" +545 38 regularizer """no""" +545 38 optimizer """adam""" +545 38 training_loop """owa""" +545 38 negative_sampler """basic""" +545 38 evaluator """rankbased""" +545 39 dataset """kinships""" +545 39 model """rescal""" +545 39 loss """marginranking""" +545 39 regularizer """no""" +545 39 optimizer """adam""" +545 39 training_loop """owa""" +545 39 negative_sampler """basic""" +545 39 evaluator """rankbased""" +545 40 dataset """kinships""" +545 40 model """rescal""" +545 40 loss """marginranking""" +545 40 regularizer """no""" +545 40 optimizer """adam""" +545 40 training_loop """owa""" +545 40 negative_sampler """basic""" +545 40 evaluator """rankbased""" +545 41 dataset """kinships""" +545 41 model """rescal""" +545 41 loss """marginranking""" +545 41 regularizer """no""" +545 41 optimizer """adam""" +545 41 training_loop """owa""" +545 41 negative_sampler """basic""" +545 41 evaluator """rankbased""" +545 42 dataset """kinships""" +545 42 model """rescal""" +545 42 loss """marginranking""" +545 42 regularizer """no""" +545 42 optimizer """adam""" +545 42 training_loop """owa""" +545 42 negative_sampler """basic""" +545 42 evaluator """rankbased""" +545 43 dataset """kinships""" +545 43 model """rescal""" +545 43 loss """marginranking""" +545 43 regularizer """no""" +545 43 optimizer """adam""" +545 43 training_loop """owa""" +545 43 negative_sampler """basic""" +545 43 evaluator """rankbased""" +545 44 dataset """kinships""" +545 44 model """rescal""" +545 44 loss """marginranking""" +545 44 regularizer """no""" +545 44 optimizer """adam""" +545 44 training_loop """owa""" +545 44 negative_sampler """basic""" +545 44 evaluator """rankbased""" +545 45 dataset """kinships""" +545 45 model """rescal""" +545 45 loss """marginranking""" +545 45 regularizer """no""" +545 45 optimizer """adam""" +545 45 training_loop """owa""" +545 45 negative_sampler """basic""" +545 45 evaluator """rankbased""" +545 46 dataset """kinships""" +545 46 model """rescal""" +545 46 loss """marginranking""" +545 46 regularizer """no""" +545 46 optimizer """adam""" +545 46 training_loop """owa""" +545 46 negative_sampler """basic""" +545 46 evaluator """rankbased""" +545 47 dataset """kinships""" +545 47 model """rescal""" +545 47 loss """marginranking""" +545 47 regularizer """no""" +545 47 optimizer """adam""" +545 47 training_loop """owa""" +545 47 negative_sampler """basic""" +545 47 evaluator """rankbased""" +545 48 dataset """kinships""" +545 48 model """rescal""" +545 48 loss """marginranking""" +545 48 regularizer """no""" +545 48 optimizer """adam""" +545 48 training_loop """owa""" +545 48 negative_sampler """basic""" +545 48 evaluator """rankbased""" +545 49 dataset """kinships""" +545 49 model """rescal""" +545 49 loss """marginranking""" +545 49 regularizer """no""" +545 49 optimizer """adam""" +545 49 training_loop """owa""" +545 49 negative_sampler """basic""" +545 49 evaluator """rankbased""" +545 50 dataset """kinships""" +545 50 model """rescal""" +545 50 loss """marginranking""" +545 50 regularizer """no""" +545 50 optimizer """adam""" +545 50 training_loop """owa""" +545 50 negative_sampler """basic""" +545 50 evaluator """rankbased""" +545 51 dataset """kinships""" +545 51 model """rescal""" +545 51 loss """marginranking""" +545 51 regularizer """no""" +545 51 optimizer """adam""" +545 51 training_loop """owa""" +545 51 negative_sampler """basic""" +545 51 evaluator """rankbased""" +545 52 dataset """kinships""" +545 52 model """rescal""" +545 52 loss """marginranking""" +545 52 regularizer """no""" +545 52 optimizer """adam""" +545 52 training_loop """owa""" +545 52 negative_sampler """basic""" +545 52 evaluator """rankbased""" +545 53 dataset """kinships""" +545 53 model """rescal""" +545 53 loss """marginranking""" +545 53 regularizer """no""" +545 53 optimizer """adam""" +545 53 training_loop """owa""" +545 53 negative_sampler """basic""" +545 53 evaluator """rankbased""" +545 54 dataset """kinships""" +545 54 model """rescal""" +545 54 loss """marginranking""" +545 54 regularizer """no""" +545 54 optimizer """adam""" +545 54 training_loop """owa""" +545 54 negative_sampler """basic""" +545 54 evaluator """rankbased""" +545 55 dataset """kinships""" +545 55 model """rescal""" +545 55 loss """marginranking""" +545 55 regularizer """no""" +545 55 optimizer """adam""" +545 55 training_loop """owa""" +545 55 negative_sampler """basic""" +545 55 evaluator """rankbased""" +545 56 dataset """kinships""" +545 56 model """rescal""" +545 56 loss """marginranking""" +545 56 regularizer """no""" +545 56 optimizer """adam""" +545 56 training_loop """owa""" +545 56 negative_sampler """basic""" +545 56 evaluator """rankbased""" +545 57 dataset """kinships""" +545 57 model """rescal""" +545 57 loss """marginranking""" +545 57 regularizer """no""" +545 57 optimizer """adam""" +545 57 training_loop """owa""" +545 57 negative_sampler """basic""" +545 57 evaluator """rankbased""" +545 58 dataset """kinships""" +545 58 model """rescal""" +545 58 loss """marginranking""" +545 58 regularizer """no""" +545 58 optimizer """adam""" +545 58 training_loop """owa""" +545 58 negative_sampler """basic""" +545 58 evaluator """rankbased""" +545 59 dataset """kinships""" +545 59 model """rescal""" +545 59 loss """marginranking""" +545 59 regularizer """no""" +545 59 optimizer """adam""" +545 59 training_loop """owa""" +545 59 negative_sampler """basic""" +545 59 evaluator """rankbased""" +545 60 dataset """kinships""" +545 60 model """rescal""" +545 60 loss """marginranking""" +545 60 regularizer """no""" +545 60 optimizer """adam""" +545 60 training_loop """owa""" +545 60 negative_sampler """basic""" +545 60 evaluator """rankbased""" +545 61 dataset """kinships""" +545 61 model """rescal""" +545 61 loss """marginranking""" +545 61 regularizer """no""" +545 61 optimizer """adam""" +545 61 training_loop """owa""" +545 61 negative_sampler """basic""" +545 61 evaluator """rankbased""" +545 62 dataset """kinships""" +545 62 model """rescal""" +545 62 loss """marginranking""" +545 62 regularizer """no""" +545 62 optimizer """adam""" +545 62 training_loop """owa""" +545 62 negative_sampler """basic""" +545 62 evaluator """rankbased""" +545 63 dataset """kinships""" +545 63 model """rescal""" +545 63 loss """marginranking""" +545 63 regularizer """no""" +545 63 optimizer """adam""" +545 63 training_loop """owa""" +545 63 negative_sampler """basic""" +545 63 evaluator """rankbased""" +545 64 dataset """kinships""" +545 64 model """rescal""" +545 64 loss """marginranking""" +545 64 regularizer """no""" +545 64 optimizer """adam""" +545 64 training_loop """owa""" +545 64 negative_sampler """basic""" +545 64 evaluator """rankbased""" +545 65 dataset """kinships""" +545 65 model """rescal""" +545 65 loss """marginranking""" +545 65 regularizer """no""" +545 65 optimizer """adam""" +545 65 training_loop """owa""" +545 65 negative_sampler """basic""" +545 65 evaluator """rankbased""" +545 66 dataset """kinships""" +545 66 model """rescal""" +545 66 loss """marginranking""" +545 66 regularizer """no""" +545 66 optimizer """adam""" +545 66 training_loop """owa""" +545 66 negative_sampler """basic""" +545 66 evaluator """rankbased""" +545 67 dataset """kinships""" +545 67 model """rescal""" +545 67 loss """marginranking""" +545 67 regularizer """no""" +545 67 optimizer """adam""" +545 67 training_loop """owa""" +545 67 negative_sampler """basic""" +545 67 evaluator """rankbased""" +545 68 dataset """kinships""" +545 68 model """rescal""" +545 68 loss """marginranking""" +545 68 regularizer """no""" +545 68 optimizer """adam""" +545 68 training_loop """owa""" +545 68 negative_sampler """basic""" +545 68 evaluator """rankbased""" +545 69 dataset """kinships""" +545 69 model """rescal""" +545 69 loss """marginranking""" +545 69 regularizer """no""" +545 69 optimizer """adam""" +545 69 training_loop """owa""" +545 69 negative_sampler """basic""" +545 69 evaluator """rankbased""" +545 70 dataset """kinships""" +545 70 model """rescal""" +545 70 loss """marginranking""" +545 70 regularizer """no""" +545 70 optimizer """adam""" +545 70 training_loop """owa""" +545 70 negative_sampler """basic""" +545 70 evaluator """rankbased""" +545 71 dataset """kinships""" +545 71 model """rescal""" +545 71 loss """marginranking""" +545 71 regularizer """no""" +545 71 optimizer """adam""" +545 71 training_loop """owa""" +545 71 negative_sampler """basic""" +545 71 evaluator """rankbased""" +545 72 dataset """kinships""" +545 72 model """rescal""" +545 72 loss """marginranking""" +545 72 regularizer """no""" +545 72 optimizer """adam""" +545 72 training_loop """owa""" +545 72 negative_sampler """basic""" +545 72 evaluator """rankbased""" +545 73 dataset """kinships""" +545 73 model """rescal""" +545 73 loss """marginranking""" +545 73 regularizer """no""" +545 73 optimizer """adam""" +545 73 training_loop """owa""" +545 73 negative_sampler """basic""" +545 73 evaluator """rankbased""" +545 74 dataset """kinships""" +545 74 model """rescal""" +545 74 loss """marginranking""" +545 74 regularizer """no""" +545 74 optimizer """adam""" +545 74 training_loop """owa""" +545 74 negative_sampler """basic""" +545 74 evaluator """rankbased""" +545 75 dataset """kinships""" +545 75 model """rescal""" +545 75 loss """marginranking""" +545 75 regularizer """no""" +545 75 optimizer """adam""" +545 75 training_loop """owa""" +545 75 negative_sampler """basic""" +545 75 evaluator """rankbased""" +545 76 dataset """kinships""" +545 76 model """rescal""" +545 76 loss """marginranking""" +545 76 regularizer """no""" +545 76 optimizer """adam""" +545 76 training_loop """owa""" +545 76 negative_sampler """basic""" +545 76 evaluator """rankbased""" +545 77 dataset """kinships""" +545 77 model """rescal""" +545 77 loss """marginranking""" +545 77 regularizer """no""" +545 77 optimizer """adam""" +545 77 training_loop """owa""" +545 77 negative_sampler """basic""" +545 77 evaluator """rankbased""" +545 78 dataset """kinships""" +545 78 model """rescal""" +545 78 loss """marginranking""" +545 78 regularizer """no""" +545 78 optimizer """adam""" +545 78 training_loop """owa""" +545 78 negative_sampler """basic""" +545 78 evaluator """rankbased""" +545 79 dataset """kinships""" +545 79 model """rescal""" +545 79 loss """marginranking""" +545 79 regularizer """no""" +545 79 optimizer """adam""" +545 79 training_loop """owa""" +545 79 negative_sampler """basic""" +545 79 evaluator """rankbased""" +545 80 dataset """kinships""" +545 80 model """rescal""" +545 80 loss """marginranking""" +545 80 regularizer """no""" +545 80 optimizer """adam""" +545 80 training_loop """owa""" +545 80 negative_sampler """basic""" +545 80 evaluator """rankbased""" +545 81 dataset """kinships""" +545 81 model """rescal""" +545 81 loss """marginranking""" +545 81 regularizer """no""" +545 81 optimizer """adam""" +545 81 training_loop """owa""" +545 81 negative_sampler """basic""" +545 81 evaluator """rankbased""" +545 82 dataset """kinships""" +545 82 model """rescal""" +545 82 loss """marginranking""" +545 82 regularizer """no""" +545 82 optimizer """adam""" +545 82 training_loop """owa""" +545 82 negative_sampler """basic""" +545 82 evaluator """rankbased""" +545 83 dataset """kinships""" +545 83 model """rescal""" +545 83 loss """marginranking""" +545 83 regularizer """no""" +545 83 optimizer """adam""" +545 83 training_loop """owa""" +545 83 negative_sampler """basic""" +545 83 evaluator """rankbased""" +545 84 dataset """kinships""" +545 84 model """rescal""" +545 84 loss """marginranking""" +545 84 regularizer """no""" +545 84 optimizer """adam""" +545 84 training_loop """owa""" +545 84 negative_sampler """basic""" +545 84 evaluator """rankbased""" +545 85 dataset """kinships""" +545 85 model """rescal""" +545 85 loss """marginranking""" +545 85 regularizer """no""" +545 85 optimizer """adam""" +545 85 training_loop """owa""" +545 85 negative_sampler """basic""" +545 85 evaluator """rankbased""" +545 86 dataset """kinships""" +545 86 model """rescal""" +545 86 loss """marginranking""" +545 86 regularizer """no""" +545 86 optimizer """adam""" +545 86 training_loop """owa""" +545 86 negative_sampler """basic""" +545 86 evaluator """rankbased""" +545 87 dataset """kinships""" +545 87 model """rescal""" +545 87 loss """marginranking""" +545 87 regularizer """no""" +545 87 optimizer """adam""" +545 87 training_loop """owa""" +545 87 negative_sampler """basic""" +545 87 evaluator """rankbased""" +545 88 dataset """kinships""" +545 88 model """rescal""" +545 88 loss """marginranking""" +545 88 regularizer """no""" +545 88 optimizer """adam""" +545 88 training_loop """owa""" +545 88 negative_sampler """basic""" +545 88 evaluator """rankbased""" +545 89 dataset """kinships""" +545 89 model """rescal""" +545 89 loss """marginranking""" +545 89 regularizer """no""" +545 89 optimizer """adam""" +545 89 training_loop """owa""" +545 89 negative_sampler """basic""" +545 89 evaluator """rankbased""" +545 90 dataset """kinships""" +545 90 model """rescal""" +545 90 loss """marginranking""" +545 90 regularizer """no""" +545 90 optimizer """adam""" +545 90 training_loop """owa""" +545 90 negative_sampler """basic""" +545 90 evaluator """rankbased""" +545 91 dataset """kinships""" +545 91 model """rescal""" +545 91 loss """marginranking""" +545 91 regularizer """no""" +545 91 optimizer """adam""" +545 91 training_loop """owa""" +545 91 negative_sampler """basic""" +545 91 evaluator """rankbased""" +545 92 dataset """kinships""" +545 92 model """rescal""" +545 92 loss """marginranking""" +545 92 regularizer """no""" +545 92 optimizer """adam""" +545 92 training_loop """owa""" +545 92 negative_sampler """basic""" +545 92 evaluator """rankbased""" +545 93 dataset """kinships""" +545 93 model """rescal""" +545 93 loss """marginranking""" +545 93 regularizer """no""" +545 93 optimizer """adam""" +545 93 training_loop """owa""" +545 93 negative_sampler """basic""" +545 93 evaluator """rankbased""" +545 94 dataset """kinships""" +545 94 model """rescal""" +545 94 loss """marginranking""" +545 94 regularizer """no""" +545 94 optimizer """adam""" +545 94 training_loop """owa""" +545 94 negative_sampler """basic""" +545 94 evaluator """rankbased""" +545 95 dataset """kinships""" +545 95 model """rescal""" +545 95 loss """marginranking""" +545 95 regularizer """no""" +545 95 optimizer """adam""" +545 95 training_loop """owa""" +545 95 negative_sampler """basic""" +545 95 evaluator """rankbased""" +545 96 dataset """kinships""" +545 96 model """rescal""" +545 96 loss """marginranking""" +545 96 regularizer """no""" +545 96 optimizer """adam""" +545 96 training_loop """owa""" +545 96 negative_sampler """basic""" +545 96 evaluator """rankbased""" +545 97 dataset """kinships""" +545 97 model """rescal""" +545 97 loss """marginranking""" +545 97 regularizer """no""" +545 97 optimizer """adam""" +545 97 training_loop """owa""" +545 97 negative_sampler """basic""" +545 97 evaluator """rankbased""" +545 98 dataset """kinships""" +545 98 model """rescal""" +545 98 loss """marginranking""" +545 98 regularizer """no""" +545 98 optimizer """adam""" +545 98 training_loop """owa""" +545 98 negative_sampler """basic""" +545 98 evaluator """rankbased""" +545 99 dataset """kinships""" +545 99 model """rescal""" +545 99 loss """marginranking""" +545 99 regularizer """no""" +545 99 optimizer """adam""" +545 99 training_loop """owa""" +545 99 negative_sampler """basic""" +545 99 evaluator """rankbased""" +545 100 dataset """kinships""" +545 100 model """rescal""" +545 100 loss """marginranking""" +545 100 regularizer """no""" +545 100 optimizer """adam""" +545 100 training_loop """owa""" +545 100 negative_sampler """basic""" +545 100 evaluator """rankbased""" +546 1 model.embedding_dim 0.0 +546 1 loss.margin 6.926771691740346 +546 1 optimizer.lr 0.008280180546258944 +546 1 negative_sampler.num_negs_per_pos 91.0 +546 1 training.batch_size 0.0 +546 2 model.embedding_dim 1.0 +546 2 loss.margin 8.899251103629878 +546 2 optimizer.lr 0.002175099241128335 +546 2 negative_sampler.num_negs_per_pos 49.0 +546 2 training.batch_size 2.0 +546 3 model.embedding_dim 0.0 +546 3 loss.margin 7.651055891314136 +546 3 optimizer.lr 0.05486246857212951 +546 3 negative_sampler.num_negs_per_pos 71.0 +546 3 training.batch_size 2.0 +546 4 model.embedding_dim 1.0 +546 4 loss.margin 7.309901197519665 +546 4 optimizer.lr 0.054990739025750815 +546 4 negative_sampler.num_negs_per_pos 17.0 +546 4 training.batch_size 1.0 +546 5 model.embedding_dim 2.0 +546 5 loss.margin 2.9580615747246997 +546 5 optimizer.lr 0.0013234290220891006 +546 5 negative_sampler.num_negs_per_pos 27.0 +546 5 training.batch_size 1.0 +546 6 model.embedding_dim 1.0 +546 6 loss.margin 9.148177226838136 +546 6 optimizer.lr 0.037800904187200356 +546 6 negative_sampler.num_negs_per_pos 43.0 +546 6 training.batch_size 0.0 +546 7 model.embedding_dim 2.0 +546 7 loss.margin 9.194496178484957 +546 7 optimizer.lr 0.0019022446846056857 +546 7 negative_sampler.num_negs_per_pos 69.0 +546 7 training.batch_size 1.0 +546 8 model.embedding_dim 0.0 +546 8 loss.margin 7.793996639909747 +546 8 optimizer.lr 0.0072216445568841275 +546 8 negative_sampler.num_negs_per_pos 62.0 +546 8 training.batch_size 0.0 +546 9 model.embedding_dim 1.0 +546 9 loss.margin 2.606637418367241 +546 9 optimizer.lr 0.09787790687126274 +546 9 negative_sampler.num_negs_per_pos 64.0 +546 9 training.batch_size 1.0 +546 10 model.embedding_dim 1.0 +546 10 loss.margin 6.53761488250587 +546 10 optimizer.lr 0.07559615300329209 +546 10 negative_sampler.num_negs_per_pos 14.0 +546 10 training.batch_size 2.0 +546 11 model.embedding_dim 0.0 +546 11 loss.margin 2.9169441874068607 +546 11 optimizer.lr 0.00823299197428241 +546 11 negative_sampler.num_negs_per_pos 73.0 +546 11 training.batch_size 2.0 +546 12 model.embedding_dim 1.0 +546 12 loss.margin 1.6787550348510716 +546 12 optimizer.lr 0.023129830983548844 +546 12 negative_sampler.num_negs_per_pos 96.0 +546 12 training.batch_size 0.0 +546 13 model.embedding_dim 2.0 +546 13 loss.margin 1.607753751697385 +546 13 optimizer.lr 0.0276682933434581 +546 13 negative_sampler.num_negs_per_pos 42.0 +546 13 training.batch_size 1.0 +546 14 model.embedding_dim 2.0 +546 14 loss.margin 0.6343975582956811 +546 14 optimizer.lr 0.01542871496418653 +546 14 negative_sampler.num_negs_per_pos 17.0 +546 14 training.batch_size 0.0 +546 15 model.embedding_dim 1.0 +546 15 loss.margin 9.96445422862229 +546 15 optimizer.lr 0.0013933182111628044 +546 15 negative_sampler.num_negs_per_pos 50.0 +546 15 training.batch_size 0.0 +546 16 model.embedding_dim 2.0 +546 16 loss.margin 4.245562995616096 +546 16 optimizer.lr 0.0035165260640324693 +546 16 negative_sampler.num_negs_per_pos 80.0 +546 16 training.batch_size 0.0 +546 17 model.embedding_dim 0.0 +546 17 loss.margin 7.794406609512522 +546 17 optimizer.lr 0.014763144322914888 +546 17 negative_sampler.num_negs_per_pos 0.0 +546 17 training.batch_size 0.0 +546 18 model.embedding_dim 0.0 +546 18 loss.margin 1.5109028594433205 +546 18 optimizer.lr 0.07080705816865447 +546 18 negative_sampler.num_negs_per_pos 46.0 +546 18 training.batch_size 2.0 +546 19 model.embedding_dim 0.0 +546 19 loss.margin 9.444879936230208 +546 19 optimizer.lr 0.024597252464678237 +546 19 negative_sampler.num_negs_per_pos 41.0 +546 19 training.batch_size 0.0 +546 20 model.embedding_dim 0.0 +546 20 loss.margin 7.897849826991733 +546 20 optimizer.lr 0.006182730187422852 +546 20 negative_sampler.num_negs_per_pos 7.0 +546 20 training.batch_size 0.0 +546 21 model.embedding_dim 2.0 +546 21 loss.margin 9.506542006088242 +546 21 optimizer.lr 0.026930758084873063 +546 21 negative_sampler.num_negs_per_pos 57.0 +546 21 training.batch_size 2.0 +546 22 model.embedding_dim 1.0 +546 22 loss.margin 6.8371742772127755 +546 22 optimizer.lr 0.015624946487490082 +546 22 negative_sampler.num_negs_per_pos 59.0 +546 22 training.batch_size 2.0 +546 23 model.embedding_dim 0.0 +546 23 loss.margin 0.7987514609583468 +546 23 optimizer.lr 0.06324605480092321 +546 23 negative_sampler.num_negs_per_pos 48.0 +546 23 training.batch_size 0.0 +546 24 model.embedding_dim 1.0 +546 24 loss.margin 6.824028078685987 +546 24 optimizer.lr 0.03145060358625887 +546 24 negative_sampler.num_negs_per_pos 72.0 +546 24 training.batch_size 1.0 +546 25 model.embedding_dim 0.0 +546 25 loss.margin 7.18612873224378 +546 25 optimizer.lr 0.015353726077527951 +546 25 negative_sampler.num_negs_per_pos 75.0 +546 25 training.batch_size 2.0 +546 26 model.embedding_dim 1.0 +546 26 loss.margin 3.2021217798924715 +546 26 optimizer.lr 0.007840116167494839 +546 26 negative_sampler.num_negs_per_pos 71.0 +546 26 training.batch_size 0.0 +546 27 model.embedding_dim 0.0 +546 27 loss.margin 7.210952845476856 +546 27 optimizer.lr 0.03253236981537494 +546 27 negative_sampler.num_negs_per_pos 96.0 +546 27 training.batch_size 2.0 +546 28 model.embedding_dim 0.0 +546 28 loss.margin 7.191299749969549 +546 28 optimizer.lr 0.009218908493186852 +546 28 negative_sampler.num_negs_per_pos 8.0 +546 28 training.batch_size 1.0 +546 29 model.embedding_dim 1.0 +546 29 loss.margin 1.8542742300736244 +546 29 optimizer.lr 0.0017900737147111482 +546 29 negative_sampler.num_negs_per_pos 72.0 +546 29 training.batch_size 1.0 +546 30 model.embedding_dim 1.0 +546 30 loss.margin 4.472074330923698 +546 30 optimizer.lr 0.04179128132128906 +546 30 negative_sampler.num_negs_per_pos 75.0 +546 30 training.batch_size 0.0 +546 31 model.embedding_dim 2.0 +546 31 loss.margin 1.9768831643885938 +546 31 optimizer.lr 0.0753310016600696 +546 31 negative_sampler.num_negs_per_pos 25.0 +546 31 training.batch_size 0.0 +546 32 model.embedding_dim 2.0 +546 32 loss.margin 5.446919170354816 +546 32 optimizer.lr 0.0014719484029096105 +546 32 negative_sampler.num_negs_per_pos 27.0 +546 32 training.batch_size 0.0 +546 33 model.embedding_dim 1.0 +546 33 loss.margin 0.8629624591646163 +546 33 optimizer.lr 0.03538657979342581 +546 33 negative_sampler.num_negs_per_pos 19.0 +546 33 training.batch_size 2.0 +546 34 model.embedding_dim 0.0 +546 34 loss.margin 5.827981055846782 +546 34 optimizer.lr 0.0027775407521101864 +546 34 negative_sampler.num_negs_per_pos 83.0 +546 34 training.batch_size 1.0 +546 35 model.embedding_dim 2.0 +546 35 loss.margin 3.481936632553001 +546 35 optimizer.lr 0.0032740306600730095 +546 35 negative_sampler.num_negs_per_pos 58.0 +546 35 training.batch_size 1.0 +546 36 model.embedding_dim 1.0 +546 36 loss.margin 5.707491861063391 +546 36 optimizer.lr 0.010646485514909146 +546 36 negative_sampler.num_negs_per_pos 7.0 +546 36 training.batch_size 0.0 +546 37 model.embedding_dim 0.0 +546 37 loss.margin 8.168079990029494 +546 37 optimizer.lr 0.007673874185578503 +546 37 negative_sampler.num_negs_per_pos 91.0 +546 37 training.batch_size 2.0 +546 38 model.embedding_dim 0.0 +546 38 loss.margin 9.680747721225645 +546 38 optimizer.lr 0.07601584668466561 +546 38 negative_sampler.num_negs_per_pos 7.0 +546 38 training.batch_size 1.0 +546 39 model.embedding_dim 2.0 +546 39 loss.margin 2.8747762008887974 +546 39 optimizer.lr 0.01513585408008473 +546 39 negative_sampler.num_negs_per_pos 9.0 +546 39 training.batch_size 0.0 +546 40 model.embedding_dim 0.0 +546 40 loss.margin 2.723954339415127 +546 40 optimizer.lr 0.09017858500263984 +546 40 negative_sampler.num_negs_per_pos 43.0 +546 40 training.batch_size 1.0 +546 41 model.embedding_dim 0.0 +546 41 loss.margin 1.9906628523367895 +546 41 optimizer.lr 0.0340949432721453 +546 41 negative_sampler.num_negs_per_pos 88.0 +546 41 training.batch_size 0.0 +546 42 model.embedding_dim 1.0 +546 42 loss.margin 5.051571396996397 +546 42 optimizer.lr 0.007393011401615437 +546 42 negative_sampler.num_negs_per_pos 26.0 +546 42 training.batch_size 2.0 +546 43 model.embedding_dim 1.0 +546 43 loss.margin 2.147914985376276 +546 43 optimizer.lr 0.06860855278607744 +546 43 negative_sampler.num_negs_per_pos 54.0 +546 43 training.batch_size 2.0 +546 44 model.embedding_dim 0.0 +546 44 loss.margin 2.0817627397756384 +546 44 optimizer.lr 0.0074368625737011645 +546 44 negative_sampler.num_negs_per_pos 72.0 +546 44 training.batch_size 1.0 +546 45 model.embedding_dim 2.0 +546 45 loss.margin 3.3607902916799306 +546 45 optimizer.lr 0.036315569349824696 +546 45 negative_sampler.num_negs_per_pos 67.0 +546 45 training.batch_size 1.0 +546 46 model.embedding_dim 1.0 +546 46 loss.margin 3.381476108608447 +546 46 optimizer.lr 0.028223105629501574 +546 46 negative_sampler.num_negs_per_pos 21.0 +546 46 training.batch_size 2.0 +546 47 model.embedding_dim 2.0 +546 47 loss.margin 2.1641831247016245 +546 47 optimizer.lr 0.0018149418553166045 +546 47 negative_sampler.num_negs_per_pos 54.0 +546 47 training.batch_size 2.0 +546 48 model.embedding_dim 0.0 +546 48 loss.margin 8.051314386068622 +546 48 optimizer.lr 0.0011540622651998603 +546 48 negative_sampler.num_negs_per_pos 12.0 +546 48 training.batch_size 0.0 +546 49 model.embedding_dim 1.0 +546 49 loss.margin 3.0325476241994234 +546 49 optimizer.lr 0.012115536254119603 +546 49 negative_sampler.num_negs_per_pos 85.0 +546 49 training.batch_size 0.0 +546 50 model.embedding_dim 2.0 +546 50 loss.margin 9.414556807669477 +546 50 optimizer.lr 0.0021983731389720496 +546 50 negative_sampler.num_negs_per_pos 85.0 +546 50 training.batch_size 0.0 +546 51 model.embedding_dim 2.0 +546 51 loss.margin 4.48353096773147 +546 51 optimizer.lr 0.0012016544153411442 +546 51 negative_sampler.num_negs_per_pos 26.0 +546 51 training.batch_size 1.0 +546 52 model.embedding_dim 2.0 +546 52 loss.margin 1.432045465293482 +546 52 optimizer.lr 0.013876050314324517 +546 52 negative_sampler.num_negs_per_pos 9.0 +546 52 training.batch_size 2.0 +546 53 model.embedding_dim 2.0 +546 53 loss.margin 5.086309598647979 +546 53 optimizer.lr 0.005924218854658534 +546 53 negative_sampler.num_negs_per_pos 60.0 +546 53 training.batch_size 1.0 +546 54 model.embedding_dim 0.0 +546 54 loss.margin 7.1977657467343095 +546 54 optimizer.lr 0.09992121544533106 +546 54 negative_sampler.num_negs_per_pos 34.0 +546 54 training.batch_size 0.0 +546 55 model.embedding_dim 0.0 +546 55 loss.margin 2.181150771270686 +546 55 optimizer.lr 0.030721632912426956 +546 55 negative_sampler.num_negs_per_pos 77.0 +546 55 training.batch_size 1.0 +546 56 model.embedding_dim 0.0 +546 56 loss.margin 7.6986284508814204 +546 56 optimizer.lr 0.03615139491079128 +546 56 negative_sampler.num_negs_per_pos 36.0 +546 56 training.batch_size 0.0 +546 57 model.embedding_dim 0.0 +546 57 loss.margin 6.98593800301077 +546 57 optimizer.lr 0.015367994738188239 +546 57 negative_sampler.num_negs_per_pos 12.0 +546 57 training.batch_size 1.0 +546 58 model.embedding_dim 0.0 +546 58 loss.margin 7.966164443049839 +546 58 optimizer.lr 0.004055333236060628 +546 58 negative_sampler.num_negs_per_pos 92.0 +546 58 training.batch_size 1.0 +546 59 model.embedding_dim 1.0 +546 59 loss.margin 2.5393840038932733 +546 59 optimizer.lr 0.06880544934461497 +546 59 negative_sampler.num_negs_per_pos 55.0 +546 59 training.batch_size 1.0 +546 60 model.embedding_dim 2.0 +546 60 loss.margin 5.142730950115965 +546 60 optimizer.lr 0.010504161504132819 +546 60 negative_sampler.num_negs_per_pos 60.0 +546 60 training.batch_size 0.0 +546 61 model.embedding_dim 2.0 +546 61 loss.margin 3.9447359101193284 +546 61 optimizer.lr 0.003923161333678405 +546 61 negative_sampler.num_negs_per_pos 39.0 +546 61 training.batch_size 1.0 +546 62 model.embedding_dim 1.0 +546 62 loss.margin 2.5803578906729085 +546 62 optimizer.lr 0.012280084556535249 +546 62 negative_sampler.num_negs_per_pos 64.0 +546 62 training.batch_size 0.0 +546 63 model.embedding_dim 1.0 +546 63 loss.margin 2.6504537985214576 +546 63 optimizer.lr 0.007636162441978825 +546 63 negative_sampler.num_negs_per_pos 96.0 +546 63 training.batch_size 1.0 +546 64 model.embedding_dim 2.0 +546 64 loss.margin 1.1096136926889093 +546 64 optimizer.lr 0.014132529521062285 +546 64 negative_sampler.num_negs_per_pos 7.0 +546 64 training.batch_size 2.0 +546 65 model.embedding_dim 2.0 +546 65 loss.margin 8.217997113414386 +546 65 optimizer.lr 0.009632336648512263 +546 65 negative_sampler.num_negs_per_pos 32.0 +546 65 training.batch_size 1.0 +546 66 model.embedding_dim 0.0 +546 66 loss.margin 9.906319477887578 +546 66 optimizer.lr 0.0022198722742804915 +546 66 negative_sampler.num_negs_per_pos 69.0 +546 66 training.batch_size 2.0 +546 67 model.embedding_dim 1.0 +546 67 loss.margin 5.7602388107469755 +546 67 optimizer.lr 0.018047113108773483 +546 67 negative_sampler.num_negs_per_pos 60.0 +546 67 training.batch_size 1.0 +546 68 model.embedding_dim 1.0 +546 68 loss.margin 4.790702317039442 +546 68 optimizer.lr 0.001981804092767634 +546 68 negative_sampler.num_negs_per_pos 4.0 +546 68 training.batch_size 0.0 +546 69 model.embedding_dim 1.0 +546 69 loss.margin 9.378874955303859 +546 69 optimizer.lr 0.001887890287100875 +546 69 negative_sampler.num_negs_per_pos 62.0 +546 69 training.batch_size 2.0 +546 70 model.embedding_dim 0.0 +546 70 loss.margin 1.0470117510266495 +546 70 optimizer.lr 0.04919419215668968 +546 70 negative_sampler.num_negs_per_pos 36.0 +546 70 training.batch_size 1.0 +546 71 model.embedding_dim 0.0 +546 71 loss.margin 8.418898382679059 +546 71 optimizer.lr 0.005964896204821649 +546 71 negative_sampler.num_negs_per_pos 11.0 +546 71 training.batch_size 0.0 +546 72 model.embedding_dim 0.0 +546 72 loss.margin 2.030075766869876 +546 72 optimizer.lr 0.0025177829442779194 +546 72 negative_sampler.num_negs_per_pos 70.0 +546 72 training.batch_size 2.0 +546 73 model.embedding_dim 1.0 +546 73 loss.margin 9.63682561642955 +546 73 optimizer.lr 0.05082580791603214 +546 73 negative_sampler.num_negs_per_pos 24.0 +546 73 training.batch_size 2.0 +546 74 model.embedding_dim 0.0 +546 74 loss.margin 7.4049870140742815 +546 74 optimizer.lr 0.003256889442631901 +546 74 negative_sampler.num_negs_per_pos 93.0 +546 74 training.batch_size 0.0 +546 75 model.embedding_dim 1.0 +546 75 loss.margin 7.610182267813866 +546 75 optimizer.lr 0.0024933098628803466 +546 75 negative_sampler.num_negs_per_pos 67.0 +546 75 training.batch_size 1.0 +546 76 model.embedding_dim 0.0 +546 76 loss.margin 4.6557316943142615 +546 76 optimizer.lr 0.021976223370408145 +546 76 negative_sampler.num_negs_per_pos 34.0 +546 76 training.batch_size 1.0 +546 77 model.embedding_dim 0.0 +546 77 loss.margin 8.962231550450126 +546 77 optimizer.lr 0.001245132129384065 +546 77 negative_sampler.num_negs_per_pos 67.0 +546 77 training.batch_size 2.0 +546 78 model.embedding_dim 1.0 +546 78 loss.margin 5.671077572968587 +546 78 optimizer.lr 0.0021366189989459384 +546 78 negative_sampler.num_negs_per_pos 12.0 +546 78 training.batch_size 2.0 +546 79 model.embedding_dim 2.0 +546 79 loss.margin 7.225296537348015 +546 79 optimizer.lr 0.0011447114108938338 +546 79 negative_sampler.num_negs_per_pos 30.0 +546 79 training.batch_size 2.0 +546 80 model.embedding_dim 0.0 +546 80 loss.margin 6.376491237227247 +546 80 optimizer.lr 0.001215776263049668 +546 80 negative_sampler.num_negs_per_pos 63.0 +546 80 training.batch_size 1.0 +546 81 model.embedding_dim 1.0 +546 81 loss.margin 5.175230246954685 +546 81 optimizer.lr 0.03063773483309469 +546 81 negative_sampler.num_negs_per_pos 15.0 +546 81 training.batch_size 0.0 +546 82 model.embedding_dim 1.0 +546 82 loss.margin 9.484010938938445 +546 82 optimizer.lr 0.009520701364344129 +546 82 negative_sampler.num_negs_per_pos 27.0 +546 82 training.batch_size 0.0 +546 83 model.embedding_dim 0.0 +546 83 loss.margin 8.36496003330155 +546 83 optimizer.lr 0.0013186791683268506 +546 83 negative_sampler.num_negs_per_pos 70.0 +546 83 training.batch_size 0.0 +546 84 model.embedding_dim 1.0 +546 84 loss.margin 2.7099756853927315 +546 84 optimizer.lr 0.025485842619731115 +546 84 negative_sampler.num_negs_per_pos 64.0 +546 84 training.batch_size 0.0 +546 85 model.embedding_dim 2.0 +546 85 loss.margin 7.248075782273135 +546 85 optimizer.lr 0.003415030343858818 +546 85 negative_sampler.num_negs_per_pos 25.0 +546 85 training.batch_size 1.0 +546 86 model.embedding_dim 0.0 +546 86 loss.margin 9.25663601621309 +546 86 optimizer.lr 0.07232486607746118 +546 86 negative_sampler.num_negs_per_pos 51.0 +546 86 training.batch_size 1.0 +546 87 model.embedding_dim 0.0 +546 87 loss.margin 5.729728149219833 +546 87 optimizer.lr 0.0195477548449351 +546 87 negative_sampler.num_negs_per_pos 29.0 +546 87 training.batch_size 2.0 +546 88 model.embedding_dim 1.0 +546 88 loss.margin 3.133116798343342 +546 88 optimizer.lr 0.019467611220996002 +546 88 negative_sampler.num_negs_per_pos 68.0 +546 88 training.batch_size 1.0 +546 89 model.embedding_dim 2.0 +546 89 loss.margin 7.288874284406471 +546 89 optimizer.lr 0.005913947428683 +546 89 negative_sampler.num_negs_per_pos 71.0 +546 89 training.batch_size 0.0 +546 90 model.embedding_dim 2.0 +546 90 loss.margin 0.7489612335107336 +546 90 optimizer.lr 0.023284525252021016 +546 90 negative_sampler.num_negs_per_pos 7.0 +546 90 training.batch_size 2.0 +546 91 model.embedding_dim 2.0 +546 91 loss.margin 6.352020701359245 +546 91 optimizer.lr 0.0014873739745705223 +546 91 negative_sampler.num_negs_per_pos 71.0 +546 91 training.batch_size 0.0 +546 92 model.embedding_dim 0.0 +546 92 loss.margin 6.83679147513332 +546 92 optimizer.lr 0.020819831105171424 +546 92 negative_sampler.num_negs_per_pos 58.0 +546 92 training.batch_size 2.0 +546 93 model.embedding_dim 2.0 +546 93 loss.margin 9.190957922976512 +546 93 optimizer.lr 0.005421567863920515 +546 93 negative_sampler.num_negs_per_pos 90.0 +546 93 training.batch_size 2.0 +546 94 model.embedding_dim 2.0 +546 94 loss.margin 4.190096624294128 +546 94 optimizer.lr 0.05402853548380452 +546 94 negative_sampler.num_negs_per_pos 60.0 +546 94 training.batch_size 2.0 +546 95 model.embedding_dim 0.0 +546 95 loss.margin 1.2750460779683377 +546 95 optimizer.lr 0.0033300600720250753 +546 95 negative_sampler.num_negs_per_pos 66.0 +546 95 training.batch_size 1.0 +546 96 model.embedding_dim 0.0 +546 96 loss.margin 5.001347288273818 +546 96 optimizer.lr 0.006051814732313706 +546 96 negative_sampler.num_negs_per_pos 15.0 +546 96 training.batch_size 0.0 +546 97 model.embedding_dim 0.0 +546 97 loss.margin 6.333904658651156 +546 97 optimizer.lr 0.0012993686328989598 +546 97 negative_sampler.num_negs_per_pos 19.0 +546 97 training.batch_size 2.0 +546 98 model.embedding_dim 1.0 +546 98 loss.margin 2.9864674886494202 +546 98 optimizer.lr 0.018088200663563234 +546 98 negative_sampler.num_negs_per_pos 69.0 +546 98 training.batch_size 2.0 +546 99 model.embedding_dim 1.0 +546 99 loss.margin 2.6157757443085368 +546 99 optimizer.lr 0.01944172708656831 +546 99 negative_sampler.num_negs_per_pos 12.0 +546 99 training.batch_size 2.0 +546 100 model.embedding_dim 0.0 +546 100 loss.margin 1.0964772948876318 +546 100 optimizer.lr 0.08373832299094953 +546 100 negative_sampler.num_negs_per_pos 76.0 +546 100 training.batch_size 0.0 +546 1 dataset """kinships""" +546 1 model """rescal""" +546 1 loss """marginranking""" +546 1 regularizer """no""" +546 1 optimizer """adam""" +546 1 training_loop """owa""" +546 1 negative_sampler """basic""" +546 1 evaluator """rankbased""" +546 2 dataset """kinships""" +546 2 model """rescal""" +546 2 loss """marginranking""" +546 2 regularizer """no""" +546 2 optimizer """adam""" +546 2 training_loop """owa""" +546 2 negative_sampler """basic""" +546 2 evaluator """rankbased""" +546 3 dataset """kinships""" +546 3 model """rescal""" +546 3 loss """marginranking""" +546 3 regularizer """no""" +546 3 optimizer """adam""" +546 3 training_loop """owa""" +546 3 negative_sampler """basic""" +546 3 evaluator """rankbased""" +546 4 dataset """kinships""" +546 4 model """rescal""" +546 4 loss """marginranking""" +546 4 regularizer """no""" +546 4 optimizer """adam""" +546 4 training_loop """owa""" +546 4 negative_sampler """basic""" +546 4 evaluator """rankbased""" +546 5 dataset """kinships""" +546 5 model """rescal""" +546 5 loss """marginranking""" +546 5 regularizer """no""" +546 5 optimizer """adam""" +546 5 training_loop """owa""" +546 5 negative_sampler """basic""" +546 5 evaluator """rankbased""" +546 6 dataset """kinships""" +546 6 model """rescal""" +546 6 loss """marginranking""" +546 6 regularizer """no""" +546 6 optimizer """adam""" +546 6 training_loop """owa""" +546 6 negative_sampler """basic""" +546 6 evaluator """rankbased""" +546 7 dataset """kinships""" +546 7 model """rescal""" +546 7 loss """marginranking""" +546 7 regularizer """no""" +546 7 optimizer """adam""" +546 7 training_loop """owa""" +546 7 negative_sampler """basic""" +546 7 evaluator """rankbased""" +546 8 dataset """kinships""" +546 8 model """rescal""" +546 8 loss """marginranking""" +546 8 regularizer """no""" +546 8 optimizer """adam""" +546 8 training_loop """owa""" +546 8 negative_sampler """basic""" +546 8 evaluator """rankbased""" +546 9 dataset """kinships""" +546 9 model """rescal""" +546 9 loss """marginranking""" +546 9 regularizer """no""" +546 9 optimizer """adam""" +546 9 training_loop """owa""" +546 9 negative_sampler """basic""" +546 9 evaluator """rankbased""" +546 10 dataset """kinships""" +546 10 model """rescal""" +546 10 loss """marginranking""" +546 10 regularizer """no""" +546 10 optimizer """adam""" +546 10 training_loop """owa""" +546 10 negative_sampler """basic""" +546 10 evaluator """rankbased""" +546 11 dataset """kinships""" +546 11 model """rescal""" +546 11 loss """marginranking""" +546 11 regularizer """no""" +546 11 optimizer """adam""" +546 11 training_loop """owa""" +546 11 negative_sampler """basic""" +546 11 evaluator """rankbased""" +546 12 dataset """kinships""" +546 12 model """rescal""" +546 12 loss """marginranking""" +546 12 regularizer """no""" +546 12 optimizer """adam""" +546 12 training_loop """owa""" +546 12 negative_sampler """basic""" +546 12 evaluator """rankbased""" +546 13 dataset """kinships""" +546 13 model """rescal""" +546 13 loss """marginranking""" +546 13 regularizer """no""" +546 13 optimizer """adam""" +546 13 training_loop """owa""" +546 13 negative_sampler """basic""" +546 13 evaluator """rankbased""" +546 14 dataset """kinships""" +546 14 model """rescal""" +546 14 loss """marginranking""" +546 14 regularizer """no""" +546 14 optimizer """adam""" +546 14 training_loop """owa""" +546 14 negative_sampler """basic""" +546 14 evaluator """rankbased""" +546 15 dataset """kinships""" +546 15 model """rescal""" +546 15 loss """marginranking""" +546 15 regularizer """no""" +546 15 optimizer """adam""" +546 15 training_loop """owa""" +546 15 negative_sampler """basic""" +546 15 evaluator """rankbased""" +546 16 dataset """kinships""" +546 16 model """rescal""" +546 16 loss """marginranking""" +546 16 regularizer """no""" +546 16 optimizer """adam""" +546 16 training_loop """owa""" +546 16 negative_sampler """basic""" +546 16 evaluator """rankbased""" +546 17 dataset """kinships""" +546 17 model """rescal""" +546 17 loss """marginranking""" +546 17 regularizer """no""" +546 17 optimizer """adam""" +546 17 training_loop """owa""" +546 17 negative_sampler """basic""" +546 17 evaluator """rankbased""" +546 18 dataset """kinships""" +546 18 model """rescal""" +546 18 loss """marginranking""" +546 18 regularizer """no""" +546 18 optimizer """adam""" +546 18 training_loop """owa""" +546 18 negative_sampler """basic""" +546 18 evaluator """rankbased""" +546 19 dataset """kinships""" +546 19 model """rescal""" +546 19 loss """marginranking""" +546 19 regularizer """no""" +546 19 optimizer """adam""" +546 19 training_loop """owa""" +546 19 negative_sampler """basic""" +546 19 evaluator """rankbased""" +546 20 dataset """kinships""" +546 20 model """rescal""" +546 20 loss """marginranking""" +546 20 regularizer """no""" +546 20 optimizer """adam""" +546 20 training_loop """owa""" +546 20 negative_sampler """basic""" +546 20 evaluator """rankbased""" +546 21 dataset """kinships""" +546 21 model """rescal""" +546 21 loss """marginranking""" +546 21 regularizer """no""" +546 21 optimizer """adam""" +546 21 training_loop """owa""" +546 21 negative_sampler """basic""" +546 21 evaluator """rankbased""" +546 22 dataset """kinships""" +546 22 model """rescal""" +546 22 loss """marginranking""" +546 22 regularizer """no""" +546 22 optimizer """adam""" +546 22 training_loop """owa""" +546 22 negative_sampler """basic""" +546 22 evaluator """rankbased""" +546 23 dataset """kinships""" +546 23 model """rescal""" +546 23 loss """marginranking""" +546 23 regularizer """no""" +546 23 optimizer """adam""" +546 23 training_loop """owa""" +546 23 negative_sampler """basic""" +546 23 evaluator """rankbased""" +546 24 dataset """kinships""" +546 24 model """rescal""" +546 24 loss """marginranking""" +546 24 regularizer """no""" +546 24 optimizer """adam""" +546 24 training_loop """owa""" +546 24 negative_sampler """basic""" +546 24 evaluator """rankbased""" +546 25 dataset """kinships""" +546 25 model """rescal""" +546 25 loss """marginranking""" +546 25 regularizer """no""" +546 25 optimizer """adam""" +546 25 training_loop """owa""" +546 25 negative_sampler """basic""" +546 25 evaluator """rankbased""" +546 26 dataset """kinships""" +546 26 model """rescal""" +546 26 loss """marginranking""" +546 26 regularizer """no""" +546 26 optimizer """adam""" +546 26 training_loop """owa""" +546 26 negative_sampler """basic""" +546 26 evaluator """rankbased""" +546 27 dataset """kinships""" +546 27 model """rescal""" +546 27 loss """marginranking""" +546 27 regularizer """no""" +546 27 optimizer """adam""" +546 27 training_loop """owa""" +546 27 negative_sampler """basic""" +546 27 evaluator """rankbased""" +546 28 dataset """kinships""" +546 28 model """rescal""" +546 28 loss """marginranking""" +546 28 regularizer """no""" +546 28 optimizer """adam""" +546 28 training_loop """owa""" +546 28 negative_sampler """basic""" +546 28 evaluator """rankbased""" +546 29 dataset """kinships""" +546 29 model """rescal""" +546 29 loss """marginranking""" +546 29 regularizer """no""" +546 29 optimizer """adam""" +546 29 training_loop """owa""" +546 29 negative_sampler """basic""" +546 29 evaluator """rankbased""" +546 30 dataset """kinships""" +546 30 model """rescal""" +546 30 loss """marginranking""" +546 30 regularizer """no""" +546 30 optimizer """adam""" +546 30 training_loop """owa""" +546 30 negative_sampler """basic""" +546 30 evaluator """rankbased""" +546 31 dataset """kinships""" +546 31 model """rescal""" +546 31 loss """marginranking""" +546 31 regularizer """no""" +546 31 optimizer """adam""" +546 31 training_loop """owa""" +546 31 negative_sampler """basic""" +546 31 evaluator """rankbased""" +546 32 dataset """kinships""" +546 32 model """rescal""" +546 32 loss """marginranking""" +546 32 regularizer """no""" +546 32 optimizer """adam""" +546 32 training_loop """owa""" +546 32 negative_sampler """basic""" +546 32 evaluator """rankbased""" +546 33 dataset """kinships""" +546 33 model """rescal""" +546 33 loss """marginranking""" +546 33 regularizer """no""" +546 33 optimizer """adam""" +546 33 training_loop """owa""" +546 33 negative_sampler """basic""" +546 33 evaluator """rankbased""" +546 34 dataset """kinships""" +546 34 model """rescal""" +546 34 loss """marginranking""" +546 34 regularizer """no""" +546 34 optimizer """adam""" +546 34 training_loop """owa""" +546 34 negative_sampler """basic""" +546 34 evaluator """rankbased""" +546 35 dataset """kinships""" +546 35 model """rescal""" +546 35 loss """marginranking""" +546 35 regularizer """no""" +546 35 optimizer """adam""" +546 35 training_loop """owa""" +546 35 negative_sampler """basic""" +546 35 evaluator """rankbased""" +546 36 dataset """kinships""" +546 36 model """rescal""" +546 36 loss """marginranking""" +546 36 regularizer """no""" +546 36 optimizer """adam""" +546 36 training_loop """owa""" +546 36 negative_sampler """basic""" +546 36 evaluator """rankbased""" +546 37 dataset """kinships""" +546 37 model """rescal""" +546 37 loss """marginranking""" +546 37 regularizer """no""" +546 37 optimizer """adam""" +546 37 training_loop """owa""" +546 37 negative_sampler """basic""" +546 37 evaluator """rankbased""" +546 38 dataset """kinships""" +546 38 model """rescal""" +546 38 loss """marginranking""" +546 38 regularizer """no""" +546 38 optimizer """adam""" +546 38 training_loop """owa""" +546 38 negative_sampler """basic""" +546 38 evaluator """rankbased""" +546 39 dataset """kinships""" +546 39 model """rescal""" +546 39 loss """marginranking""" +546 39 regularizer """no""" +546 39 optimizer """adam""" +546 39 training_loop """owa""" +546 39 negative_sampler """basic""" +546 39 evaluator """rankbased""" +546 40 dataset """kinships""" +546 40 model """rescal""" +546 40 loss """marginranking""" +546 40 regularizer """no""" +546 40 optimizer """adam""" +546 40 training_loop """owa""" +546 40 negative_sampler """basic""" +546 40 evaluator """rankbased""" +546 41 dataset """kinships""" +546 41 model """rescal""" +546 41 loss """marginranking""" +546 41 regularizer """no""" +546 41 optimizer """adam""" +546 41 training_loop """owa""" +546 41 negative_sampler """basic""" +546 41 evaluator """rankbased""" +546 42 dataset """kinships""" +546 42 model """rescal""" +546 42 loss """marginranking""" +546 42 regularizer """no""" +546 42 optimizer """adam""" +546 42 training_loop """owa""" +546 42 negative_sampler """basic""" +546 42 evaluator """rankbased""" +546 43 dataset """kinships""" +546 43 model """rescal""" +546 43 loss """marginranking""" +546 43 regularizer """no""" +546 43 optimizer """adam""" +546 43 training_loop """owa""" +546 43 negative_sampler """basic""" +546 43 evaluator """rankbased""" +546 44 dataset """kinships""" +546 44 model """rescal""" +546 44 loss """marginranking""" +546 44 regularizer """no""" +546 44 optimizer """adam""" +546 44 training_loop """owa""" +546 44 negative_sampler """basic""" +546 44 evaluator """rankbased""" +546 45 dataset """kinships""" +546 45 model """rescal""" +546 45 loss """marginranking""" +546 45 regularizer """no""" +546 45 optimizer """adam""" +546 45 training_loop """owa""" +546 45 negative_sampler """basic""" +546 45 evaluator """rankbased""" +546 46 dataset """kinships""" +546 46 model """rescal""" +546 46 loss """marginranking""" +546 46 regularizer """no""" +546 46 optimizer """adam""" +546 46 training_loop """owa""" +546 46 negative_sampler """basic""" +546 46 evaluator """rankbased""" +546 47 dataset """kinships""" +546 47 model """rescal""" +546 47 loss """marginranking""" +546 47 regularizer """no""" +546 47 optimizer """adam""" +546 47 training_loop """owa""" +546 47 negative_sampler """basic""" +546 47 evaluator """rankbased""" +546 48 dataset """kinships""" +546 48 model """rescal""" +546 48 loss """marginranking""" +546 48 regularizer """no""" +546 48 optimizer """adam""" +546 48 training_loop """owa""" +546 48 negative_sampler """basic""" +546 48 evaluator """rankbased""" +546 49 dataset """kinships""" +546 49 model """rescal""" +546 49 loss """marginranking""" +546 49 regularizer """no""" +546 49 optimizer """adam""" +546 49 training_loop """owa""" +546 49 negative_sampler """basic""" +546 49 evaluator """rankbased""" +546 50 dataset """kinships""" +546 50 model """rescal""" +546 50 loss """marginranking""" +546 50 regularizer """no""" +546 50 optimizer """adam""" +546 50 training_loop """owa""" +546 50 negative_sampler """basic""" +546 50 evaluator """rankbased""" +546 51 dataset """kinships""" +546 51 model """rescal""" +546 51 loss """marginranking""" +546 51 regularizer """no""" +546 51 optimizer """adam""" +546 51 training_loop """owa""" +546 51 negative_sampler """basic""" +546 51 evaluator """rankbased""" +546 52 dataset """kinships""" +546 52 model """rescal""" +546 52 loss """marginranking""" +546 52 regularizer """no""" +546 52 optimizer """adam""" +546 52 training_loop """owa""" +546 52 negative_sampler """basic""" +546 52 evaluator """rankbased""" +546 53 dataset """kinships""" +546 53 model """rescal""" +546 53 loss """marginranking""" +546 53 regularizer """no""" +546 53 optimizer """adam""" +546 53 training_loop """owa""" +546 53 negative_sampler """basic""" +546 53 evaluator """rankbased""" +546 54 dataset """kinships""" +546 54 model """rescal""" +546 54 loss """marginranking""" +546 54 regularizer """no""" +546 54 optimizer """adam""" +546 54 training_loop """owa""" +546 54 negative_sampler """basic""" +546 54 evaluator """rankbased""" +546 55 dataset """kinships""" +546 55 model """rescal""" +546 55 loss """marginranking""" +546 55 regularizer """no""" +546 55 optimizer """adam""" +546 55 training_loop """owa""" +546 55 negative_sampler """basic""" +546 55 evaluator """rankbased""" +546 56 dataset """kinships""" +546 56 model """rescal""" +546 56 loss """marginranking""" +546 56 regularizer """no""" +546 56 optimizer """adam""" +546 56 training_loop """owa""" +546 56 negative_sampler """basic""" +546 56 evaluator """rankbased""" +546 57 dataset """kinships""" +546 57 model """rescal""" +546 57 loss """marginranking""" +546 57 regularizer """no""" +546 57 optimizer """adam""" +546 57 training_loop """owa""" +546 57 negative_sampler """basic""" +546 57 evaluator """rankbased""" +546 58 dataset """kinships""" +546 58 model """rescal""" +546 58 loss """marginranking""" +546 58 regularizer """no""" +546 58 optimizer """adam""" +546 58 training_loop """owa""" +546 58 negative_sampler """basic""" +546 58 evaluator """rankbased""" +546 59 dataset """kinships""" +546 59 model """rescal""" +546 59 loss """marginranking""" +546 59 regularizer """no""" +546 59 optimizer """adam""" +546 59 training_loop """owa""" +546 59 negative_sampler """basic""" +546 59 evaluator """rankbased""" +546 60 dataset """kinships""" +546 60 model """rescal""" +546 60 loss """marginranking""" +546 60 regularizer """no""" +546 60 optimizer """adam""" +546 60 training_loop """owa""" +546 60 negative_sampler """basic""" +546 60 evaluator """rankbased""" +546 61 dataset """kinships""" +546 61 model """rescal""" +546 61 loss """marginranking""" +546 61 regularizer """no""" +546 61 optimizer """adam""" +546 61 training_loop """owa""" +546 61 negative_sampler """basic""" +546 61 evaluator """rankbased""" +546 62 dataset """kinships""" +546 62 model """rescal""" +546 62 loss """marginranking""" +546 62 regularizer """no""" +546 62 optimizer """adam""" +546 62 training_loop """owa""" +546 62 negative_sampler """basic""" +546 62 evaluator """rankbased""" +546 63 dataset """kinships""" +546 63 model """rescal""" +546 63 loss """marginranking""" +546 63 regularizer """no""" +546 63 optimizer """adam""" +546 63 training_loop """owa""" +546 63 negative_sampler """basic""" +546 63 evaluator """rankbased""" +546 64 dataset """kinships""" +546 64 model """rescal""" +546 64 loss """marginranking""" +546 64 regularizer """no""" +546 64 optimizer """adam""" +546 64 training_loop """owa""" +546 64 negative_sampler """basic""" +546 64 evaluator """rankbased""" +546 65 dataset """kinships""" +546 65 model """rescal""" +546 65 loss """marginranking""" +546 65 regularizer """no""" +546 65 optimizer """adam""" +546 65 training_loop """owa""" +546 65 negative_sampler """basic""" +546 65 evaluator """rankbased""" +546 66 dataset """kinships""" +546 66 model """rescal""" +546 66 loss """marginranking""" +546 66 regularizer """no""" +546 66 optimizer """adam""" +546 66 training_loop """owa""" +546 66 negative_sampler """basic""" +546 66 evaluator """rankbased""" +546 67 dataset """kinships""" +546 67 model """rescal""" +546 67 loss """marginranking""" +546 67 regularizer """no""" +546 67 optimizer """adam""" +546 67 training_loop """owa""" +546 67 negative_sampler """basic""" +546 67 evaluator """rankbased""" +546 68 dataset """kinships""" +546 68 model """rescal""" +546 68 loss """marginranking""" +546 68 regularizer """no""" +546 68 optimizer """adam""" +546 68 training_loop """owa""" +546 68 negative_sampler """basic""" +546 68 evaluator """rankbased""" +546 69 dataset """kinships""" +546 69 model """rescal""" +546 69 loss """marginranking""" +546 69 regularizer """no""" +546 69 optimizer """adam""" +546 69 training_loop """owa""" +546 69 negative_sampler """basic""" +546 69 evaluator """rankbased""" +546 70 dataset """kinships""" +546 70 model """rescal""" +546 70 loss """marginranking""" +546 70 regularizer """no""" +546 70 optimizer """adam""" +546 70 training_loop """owa""" +546 70 negative_sampler """basic""" +546 70 evaluator """rankbased""" +546 71 dataset """kinships""" +546 71 model """rescal""" +546 71 loss """marginranking""" +546 71 regularizer """no""" +546 71 optimizer """adam""" +546 71 training_loop """owa""" +546 71 negative_sampler """basic""" +546 71 evaluator """rankbased""" +546 72 dataset """kinships""" +546 72 model """rescal""" +546 72 loss """marginranking""" +546 72 regularizer """no""" +546 72 optimizer """adam""" +546 72 training_loop """owa""" +546 72 negative_sampler """basic""" +546 72 evaluator """rankbased""" +546 73 dataset """kinships""" +546 73 model """rescal""" +546 73 loss """marginranking""" +546 73 regularizer """no""" +546 73 optimizer """adam""" +546 73 training_loop """owa""" +546 73 negative_sampler """basic""" +546 73 evaluator """rankbased""" +546 74 dataset """kinships""" +546 74 model """rescal""" +546 74 loss """marginranking""" +546 74 regularizer """no""" +546 74 optimizer """adam""" +546 74 training_loop """owa""" +546 74 negative_sampler """basic""" +546 74 evaluator """rankbased""" +546 75 dataset """kinships""" +546 75 model """rescal""" +546 75 loss """marginranking""" +546 75 regularizer """no""" +546 75 optimizer """adam""" +546 75 training_loop """owa""" +546 75 negative_sampler """basic""" +546 75 evaluator """rankbased""" +546 76 dataset """kinships""" +546 76 model """rescal""" +546 76 loss """marginranking""" +546 76 regularizer """no""" +546 76 optimizer """adam""" +546 76 training_loop """owa""" +546 76 negative_sampler """basic""" +546 76 evaluator """rankbased""" +546 77 dataset """kinships""" +546 77 model """rescal""" +546 77 loss """marginranking""" +546 77 regularizer """no""" +546 77 optimizer """adam""" +546 77 training_loop """owa""" +546 77 negative_sampler """basic""" +546 77 evaluator """rankbased""" +546 78 dataset """kinships""" +546 78 model """rescal""" +546 78 loss """marginranking""" +546 78 regularizer """no""" +546 78 optimizer """adam""" +546 78 training_loop """owa""" +546 78 negative_sampler """basic""" +546 78 evaluator """rankbased""" +546 79 dataset """kinships""" +546 79 model """rescal""" +546 79 loss """marginranking""" +546 79 regularizer """no""" +546 79 optimizer """adam""" +546 79 training_loop """owa""" +546 79 negative_sampler """basic""" +546 79 evaluator """rankbased""" +546 80 dataset """kinships""" +546 80 model """rescal""" +546 80 loss """marginranking""" +546 80 regularizer """no""" +546 80 optimizer """adam""" +546 80 training_loop """owa""" +546 80 negative_sampler """basic""" +546 80 evaluator """rankbased""" +546 81 dataset """kinships""" +546 81 model """rescal""" +546 81 loss """marginranking""" +546 81 regularizer """no""" +546 81 optimizer """adam""" +546 81 training_loop """owa""" +546 81 negative_sampler """basic""" +546 81 evaluator """rankbased""" +546 82 dataset """kinships""" +546 82 model """rescal""" +546 82 loss """marginranking""" +546 82 regularizer """no""" +546 82 optimizer """adam""" +546 82 training_loop """owa""" +546 82 negative_sampler """basic""" +546 82 evaluator """rankbased""" +546 83 dataset """kinships""" +546 83 model """rescal""" +546 83 loss """marginranking""" +546 83 regularizer """no""" +546 83 optimizer """adam""" +546 83 training_loop """owa""" +546 83 negative_sampler """basic""" +546 83 evaluator """rankbased""" +546 84 dataset """kinships""" +546 84 model """rescal""" +546 84 loss """marginranking""" +546 84 regularizer """no""" +546 84 optimizer """adam""" +546 84 training_loop """owa""" +546 84 negative_sampler """basic""" +546 84 evaluator """rankbased""" +546 85 dataset """kinships""" +546 85 model """rescal""" +546 85 loss """marginranking""" +546 85 regularizer """no""" +546 85 optimizer """adam""" +546 85 training_loop """owa""" +546 85 negative_sampler """basic""" +546 85 evaluator """rankbased""" +546 86 dataset """kinships""" +546 86 model """rescal""" +546 86 loss """marginranking""" +546 86 regularizer """no""" +546 86 optimizer """adam""" +546 86 training_loop """owa""" +546 86 negative_sampler """basic""" +546 86 evaluator """rankbased""" +546 87 dataset """kinships""" +546 87 model """rescal""" +546 87 loss """marginranking""" +546 87 regularizer """no""" +546 87 optimizer """adam""" +546 87 training_loop """owa""" +546 87 negative_sampler """basic""" +546 87 evaluator """rankbased""" +546 88 dataset """kinships""" +546 88 model """rescal""" +546 88 loss """marginranking""" +546 88 regularizer """no""" +546 88 optimizer """adam""" +546 88 training_loop """owa""" +546 88 negative_sampler """basic""" +546 88 evaluator """rankbased""" +546 89 dataset """kinships""" +546 89 model """rescal""" +546 89 loss """marginranking""" +546 89 regularizer """no""" +546 89 optimizer """adam""" +546 89 training_loop """owa""" +546 89 negative_sampler """basic""" +546 89 evaluator """rankbased""" +546 90 dataset """kinships""" +546 90 model """rescal""" +546 90 loss """marginranking""" +546 90 regularizer """no""" +546 90 optimizer """adam""" +546 90 training_loop """owa""" +546 90 negative_sampler """basic""" +546 90 evaluator """rankbased""" +546 91 dataset """kinships""" +546 91 model """rescal""" +546 91 loss """marginranking""" +546 91 regularizer """no""" +546 91 optimizer """adam""" +546 91 training_loop """owa""" +546 91 negative_sampler """basic""" +546 91 evaluator """rankbased""" +546 92 dataset """kinships""" +546 92 model """rescal""" +546 92 loss """marginranking""" +546 92 regularizer """no""" +546 92 optimizer """adam""" +546 92 training_loop """owa""" +546 92 negative_sampler """basic""" +546 92 evaluator """rankbased""" +546 93 dataset """kinships""" +546 93 model """rescal""" +546 93 loss """marginranking""" +546 93 regularizer """no""" +546 93 optimizer """adam""" +546 93 training_loop """owa""" +546 93 negative_sampler """basic""" +546 93 evaluator """rankbased""" +546 94 dataset """kinships""" +546 94 model """rescal""" +546 94 loss """marginranking""" +546 94 regularizer """no""" +546 94 optimizer """adam""" +546 94 training_loop """owa""" +546 94 negative_sampler """basic""" +546 94 evaluator """rankbased""" +546 95 dataset """kinships""" +546 95 model """rescal""" +546 95 loss """marginranking""" +546 95 regularizer """no""" +546 95 optimizer """adam""" +546 95 training_loop """owa""" +546 95 negative_sampler """basic""" +546 95 evaluator """rankbased""" +546 96 dataset """kinships""" +546 96 model """rescal""" +546 96 loss """marginranking""" +546 96 regularizer """no""" +546 96 optimizer """adam""" +546 96 training_loop """owa""" +546 96 negative_sampler """basic""" +546 96 evaluator """rankbased""" +546 97 dataset """kinships""" +546 97 model """rescal""" +546 97 loss """marginranking""" +546 97 regularizer """no""" +546 97 optimizer """adam""" +546 97 training_loop """owa""" +546 97 negative_sampler """basic""" +546 97 evaluator """rankbased""" +546 98 dataset """kinships""" +546 98 model """rescal""" +546 98 loss """marginranking""" +546 98 regularizer """no""" +546 98 optimizer """adam""" +546 98 training_loop """owa""" +546 98 negative_sampler """basic""" +546 98 evaluator """rankbased""" +546 99 dataset """kinships""" +546 99 model """rescal""" +546 99 loss """marginranking""" +546 99 regularizer """no""" +546 99 optimizer """adam""" +546 99 training_loop """owa""" +546 99 negative_sampler """basic""" +546 99 evaluator """rankbased""" +546 100 dataset """kinships""" +546 100 model """rescal""" +546 100 loss """marginranking""" +546 100 regularizer """no""" +546 100 optimizer """adam""" +546 100 training_loop """owa""" +546 100 negative_sampler """basic""" +546 100 evaluator """rankbased""" +547 1 model.embedding_dim 1.0 +547 1 loss.margin 7.4151484515384825 +547 1 optimizer.lr 0.010467091205560683 +547 1 negative_sampler.num_negs_per_pos 82.0 +547 1 training.batch_size 0.0 +547 2 model.embedding_dim 1.0 +547 2 loss.margin 8.142778952697963 +547 2 optimizer.lr 0.004433383031177953 +547 2 negative_sampler.num_negs_per_pos 77.0 +547 2 training.batch_size 0.0 +547 3 model.embedding_dim 0.0 +547 3 loss.margin 5.7401692633494426 +547 3 optimizer.lr 0.0016776730644082571 +547 3 negative_sampler.num_negs_per_pos 58.0 +547 3 training.batch_size 2.0 +547 4 model.embedding_dim 1.0 +547 4 loss.margin 0.9649553529038435 +547 4 optimizer.lr 0.0019078835573990167 +547 4 negative_sampler.num_negs_per_pos 57.0 +547 4 training.batch_size 1.0 +547 5 model.embedding_dim 1.0 +547 5 loss.margin 1.9683592988604701 +547 5 optimizer.lr 0.027415455744831126 +547 5 negative_sampler.num_negs_per_pos 45.0 +547 5 training.batch_size 0.0 +547 6 model.embedding_dim 2.0 +547 6 loss.margin 9.979527966736399 +547 6 optimizer.lr 0.030544067866196366 +547 6 negative_sampler.num_negs_per_pos 78.0 +547 6 training.batch_size 1.0 +547 7 model.embedding_dim 1.0 +547 7 loss.margin 7.867755958903726 +547 7 optimizer.lr 0.018124598788202086 +547 7 negative_sampler.num_negs_per_pos 50.0 +547 7 training.batch_size 0.0 +547 8 model.embedding_dim 0.0 +547 8 loss.margin 2.2767871041205114 +547 8 optimizer.lr 0.011201583601156017 +547 8 negative_sampler.num_negs_per_pos 50.0 +547 8 training.batch_size 1.0 +547 9 model.embedding_dim 0.0 +547 9 loss.margin 2.459546676756821 +547 9 optimizer.lr 0.006591824868195372 +547 9 negative_sampler.num_negs_per_pos 10.0 +547 9 training.batch_size 2.0 +547 10 model.embedding_dim 0.0 +547 10 loss.margin 9.645012765948778 +547 10 optimizer.lr 0.027444368453012007 +547 10 negative_sampler.num_negs_per_pos 84.0 +547 10 training.batch_size 2.0 +547 11 model.embedding_dim 1.0 +547 11 loss.margin 9.75740971863688 +547 11 optimizer.lr 0.04608943400983145 +547 11 negative_sampler.num_negs_per_pos 19.0 +547 11 training.batch_size 1.0 +547 12 model.embedding_dim 1.0 +547 12 loss.margin 5.336524546649729 +547 12 optimizer.lr 0.053095396680922995 +547 12 negative_sampler.num_negs_per_pos 86.0 +547 12 training.batch_size 0.0 +547 13 model.embedding_dim 2.0 +547 13 loss.margin 6.779842328204996 +547 13 optimizer.lr 0.01120244452447973 +547 13 negative_sampler.num_negs_per_pos 45.0 +547 13 training.batch_size 2.0 +547 14 model.embedding_dim 0.0 +547 14 loss.margin 7.82781001835066 +547 14 optimizer.lr 0.003961912610192299 +547 14 negative_sampler.num_negs_per_pos 7.0 +547 14 training.batch_size 1.0 +547 15 model.embedding_dim 0.0 +547 15 loss.margin 7.524800115398629 +547 15 optimizer.lr 0.08160696467782586 +547 15 negative_sampler.num_negs_per_pos 16.0 +547 15 training.batch_size 2.0 +547 16 model.embedding_dim 0.0 +547 16 loss.margin 4.422356538997497 +547 16 optimizer.lr 0.033045377896505704 +547 16 negative_sampler.num_negs_per_pos 67.0 +547 16 training.batch_size 2.0 +547 17 model.embedding_dim 1.0 +547 17 loss.margin 8.98251582672977 +547 17 optimizer.lr 0.014135065543845382 +547 17 negative_sampler.num_negs_per_pos 9.0 +547 17 training.batch_size 1.0 +547 1 dataset """wn18rr""" +547 1 model """rescal""" +547 1 loss """marginranking""" +547 1 regularizer """no""" +547 1 optimizer """adam""" +547 1 training_loop """owa""" +547 1 negative_sampler """basic""" +547 1 evaluator """rankbased""" +547 2 dataset """wn18rr""" +547 2 model """rescal""" +547 2 loss """marginranking""" +547 2 regularizer """no""" +547 2 optimizer """adam""" +547 2 training_loop """owa""" +547 2 negative_sampler """basic""" +547 2 evaluator """rankbased""" +547 3 dataset """wn18rr""" +547 3 model """rescal""" +547 3 loss """marginranking""" +547 3 regularizer """no""" +547 3 optimizer """adam""" +547 3 training_loop """owa""" +547 3 negative_sampler """basic""" +547 3 evaluator """rankbased""" +547 4 dataset """wn18rr""" +547 4 model """rescal""" +547 4 loss """marginranking""" +547 4 regularizer """no""" +547 4 optimizer """adam""" +547 4 training_loop """owa""" +547 4 negative_sampler """basic""" +547 4 evaluator """rankbased""" +547 5 dataset """wn18rr""" +547 5 model """rescal""" +547 5 loss """marginranking""" +547 5 regularizer """no""" +547 5 optimizer """adam""" +547 5 training_loop """owa""" +547 5 negative_sampler """basic""" +547 5 evaluator """rankbased""" +547 6 dataset """wn18rr""" +547 6 model """rescal""" +547 6 loss """marginranking""" +547 6 regularizer """no""" +547 6 optimizer """adam""" +547 6 training_loop """owa""" +547 6 negative_sampler """basic""" +547 6 evaluator """rankbased""" +547 7 dataset """wn18rr""" +547 7 model """rescal""" +547 7 loss """marginranking""" +547 7 regularizer """no""" +547 7 optimizer """adam""" +547 7 training_loop """owa""" +547 7 negative_sampler """basic""" +547 7 evaluator """rankbased""" +547 8 dataset """wn18rr""" +547 8 model """rescal""" +547 8 loss """marginranking""" +547 8 regularizer """no""" +547 8 optimizer """adam""" +547 8 training_loop """owa""" +547 8 negative_sampler """basic""" +547 8 evaluator """rankbased""" +547 9 dataset """wn18rr""" +547 9 model """rescal""" +547 9 loss """marginranking""" +547 9 regularizer """no""" +547 9 optimizer """adam""" +547 9 training_loop """owa""" +547 9 negative_sampler """basic""" +547 9 evaluator """rankbased""" +547 10 dataset """wn18rr""" +547 10 model """rescal""" +547 10 loss """marginranking""" +547 10 regularizer """no""" +547 10 optimizer """adam""" +547 10 training_loop """owa""" +547 10 negative_sampler """basic""" +547 10 evaluator """rankbased""" +547 11 dataset """wn18rr""" +547 11 model """rescal""" +547 11 loss """marginranking""" +547 11 regularizer """no""" +547 11 optimizer """adam""" +547 11 training_loop """owa""" +547 11 negative_sampler """basic""" +547 11 evaluator """rankbased""" +547 12 dataset """wn18rr""" +547 12 model """rescal""" +547 12 loss """marginranking""" +547 12 regularizer """no""" +547 12 optimizer """adam""" +547 12 training_loop """owa""" +547 12 negative_sampler """basic""" +547 12 evaluator """rankbased""" +547 13 dataset """wn18rr""" +547 13 model """rescal""" +547 13 loss """marginranking""" +547 13 regularizer """no""" +547 13 optimizer """adam""" +547 13 training_loop """owa""" +547 13 negative_sampler """basic""" +547 13 evaluator """rankbased""" +547 14 dataset """wn18rr""" +547 14 model """rescal""" +547 14 loss """marginranking""" +547 14 regularizer """no""" +547 14 optimizer """adam""" +547 14 training_loop """owa""" +547 14 negative_sampler """basic""" +547 14 evaluator """rankbased""" +547 15 dataset """wn18rr""" +547 15 model """rescal""" +547 15 loss """marginranking""" +547 15 regularizer """no""" +547 15 optimizer """adam""" +547 15 training_loop """owa""" +547 15 negative_sampler """basic""" +547 15 evaluator """rankbased""" +547 16 dataset """wn18rr""" +547 16 model """rescal""" +547 16 loss """marginranking""" +547 16 regularizer """no""" +547 16 optimizer """adam""" +547 16 training_loop """owa""" +547 16 negative_sampler """basic""" +547 16 evaluator """rankbased""" +547 17 dataset """wn18rr""" +547 17 model """rescal""" +547 17 loss """marginranking""" +547 17 regularizer """no""" +547 17 optimizer """adam""" +547 17 training_loop """owa""" +547 17 negative_sampler """basic""" +547 17 evaluator """rankbased""" +548 1 model.embedding_dim 2.0 +548 1 loss.margin 8.566847035375295 +548 1 optimizer.lr 0.02059665164534936 +548 1 negative_sampler.num_negs_per_pos 27.0 +548 1 training.batch_size 1.0 +548 2 model.embedding_dim 2.0 +548 2 loss.margin 3.742996630482506 +548 2 optimizer.lr 0.0028003615694013276 +548 2 negative_sampler.num_negs_per_pos 28.0 +548 2 training.batch_size 0.0 +548 3 model.embedding_dim 0.0 +548 3 loss.margin 2.919201608117921 +548 3 optimizer.lr 0.004542769145814405 +548 3 negative_sampler.num_negs_per_pos 41.0 +548 3 training.batch_size 0.0 +548 4 model.embedding_dim 1.0 +548 4 loss.margin 1.748901614014959 +548 4 optimizer.lr 0.01203611669054361 +548 4 negative_sampler.num_negs_per_pos 67.0 +548 4 training.batch_size 0.0 +548 5 model.embedding_dim 0.0 +548 5 loss.margin 8.564498817355409 +548 5 optimizer.lr 0.043984274433899136 +548 5 negative_sampler.num_negs_per_pos 98.0 +548 5 training.batch_size 0.0 +548 6 model.embedding_dim 1.0 +548 6 loss.margin 6.503971323020712 +548 6 optimizer.lr 0.0012418142476927716 +548 6 negative_sampler.num_negs_per_pos 96.0 +548 6 training.batch_size 2.0 +548 7 model.embedding_dim 0.0 +548 7 loss.margin 6.926155468501442 +548 7 optimizer.lr 0.0034485318615226613 +548 7 negative_sampler.num_negs_per_pos 44.0 +548 7 training.batch_size 0.0 +548 8 model.embedding_dim 1.0 +548 8 loss.margin 2.5469959762177607 +548 8 optimizer.lr 0.010965287787225383 +548 8 negative_sampler.num_negs_per_pos 59.0 +548 8 training.batch_size 2.0 +548 9 model.embedding_dim 1.0 +548 9 loss.margin 3.8962151989078286 +548 9 optimizer.lr 0.005538397722095896 +548 9 negative_sampler.num_negs_per_pos 81.0 +548 9 training.batch_size 2.0 +548 10 model.embedding_dim 2.0 +548 10 loss.margin 6.962260334472552 +548 10 optimizer.lr 0.0014390523943022923 +548 10 negative_sampler.num_negs_per_pos 0.0 +548 10 training.batch_size 1.0 +548 11 model.embedding_dim 2.0 +548 11 loss.margin 4.358391387007658 +548 11 optimizer.lr 0.0010737278035682731 +548 11 negative_sampler.num_negs_per_pos 11.0 +548 11 training.batch_size 1.0 +548 12 model.embedding_dim 0.0 +548 12 loss.margin 2.9852255979470104 +548 12 optimizer.lr 0.05332591104856162 +548 12 negative_sampler.num_negs_per_pos 55.0 +548 12 training.batch_size 1.0 +548 13 model.embedding_dim 0.0 +548 13 loss.margin 3.41487927945485 +548 13 optimizer.lr 0.022493289498961355 +548 13 negative_sampler.num_negs_per_pos 46.0 +548 13 training.batch_size 1.0 +548 14 model.embedding_dim 1.0 +548 14 loss.margin 5.299659035296464 +548 14 optimizer.lr 0.0821460507464882 +548 14 negative_sampler.num_negs_per_pos 26.0 +548 14 training.batch_size 1.0 +548 15 model.embedding_dim 1.0 +548 15 loss.margin 9.239749561996716 +548 15 optimizer.lr 0.016202214373242825 +548 15 negative_sampler.num_negs_per_pos 59.0 +548 15 training.batch_size 2.0 +548 16 model.embedding_dim 0.0 +548 16 loss.margin 0.6241553694702318 +548 16 optimizer.lr 0.0011942484526532562 +548 16 negative_sampler.num_negs_per_pos 27.0 +548 16 training.batch_size 2.0 +548 17 model.embedding_dim 0.0 +548 17 loss.margin 4.3735955715805614 +548 17 optimizer.lr 0.027329462616817204 +548 17 negative_sampler.num_negs_per_pos 57.0 +548 17 training.batch_size 2.0 +548 18 model.embedding_dim 0.0 +548 18 loss.margin 1.4830785427903983 +548 18 optimizer.lr 0.0954855595312006 +548 18 negative_sampler.num_negs_per_pos 61.0 +548 18 training.batch_size 0.0 +548 19 model.embedding_dim 0.0 +548 19 loss.margin 8.885645970030156 +548 19 optimizer.lr 0.011664100814554105 +548 19 negative_sampler.num_negs_per_pos 31.0 +548 19 training.batch_size 1.0 +548 20 model.embedding_dim 1.0 +548 20 loss.margin 6.138374777553959 +548 20 optimizer.lr 0.017192542498483825 +548 20 negative_sampler.num_negs_per_pos 61.0 +548 20 training.batch_size 1.0 +548 21 model.embedding_dim 2.0 +548 21 loss.margin 2.22811624614615 +548 21 optimizer.lr 0.0026516688818504506 +548 21 negative_sampler.num_negs_per_pos 20.0 +548 21 training.batch_size 1.0 +548 22 model.embedding_dim 1.0 +548 22 loss.margin 4.658112311417795 +548 22 optimizer.lr 0.0023398777989800854 +548 22 negative_sampler.num_negs_per_pos 15.0 +548 22 training.batch_size 0.0 +548 23 model.embedding_dim 2.0 +548 23 loss.margin 6.972056370577198 +548 23 optimizer.lr 0.07684972413693554 +548 23 negative_sampler.num_negs_per_pos 39.0 +548 23 training.batch_size 0.0 +548 24 model.embedding_dim 2.0 +548 24 loss.margin 3.3352074109699825 +548 24 optimizer.lr 0.0029716195582762577 +548 24 negative_sampler.num_negs_per_pos 36.0 +548 24 training.batch_size 1.0 +548 25 model.embedding_dim 1.0 +548 25 loss.margin 7.208323576272846 +548 25 optimizer.lr 0.009655830961318272 +548 25 negative_sampler.num_negs_per_pos 98.0 +548 25 training.batch_size 0.0 +548 1 dataset """wn18rr""" +548 1 model """rescal""" +548 1 loss """marginranking""" +548 1 regularizer """no""" +548 1 optimizer """adam""" +548 1 training_loop """owa""" +548 1 negative_sampler """basic""" +548 1 evaluator """rankbased""" +548 2 dataset """wn18rr""" +548 2 model """rescal""" +548 2 loss """marginranking""" +548 2 regularizer """no""" +548 2 optimizer """adam""" +548 2 training_loop """owa""" +548 2 negative_sampler """basic""" +548 2 evaluator """rankbased""" +548 3 dataset """wn18rr""" +548 3 model """rescal""" +548 3 loss """marginranking""" +548 3 regularizer """no""" +548 3 optimizer """adam""" +548 3 training_loop """owa""" +548 3 negative_sampler """basic""" +548 3 evaluator """rankbased""" +548 4 dataset """wn18rr""" +548 4 model """rescal""" +548 4 loss """marginranking""" +548 4 regularizer """no""" +548 4 optimizer """adam""" +548 4 training_loop """owa""" +548 4 negative_sampler """basic""" +548 4 evaluator """rankbased""" +548 5 dataset """wn18rr""" +548 5 model """rescal""" +548 5 loss """marginranking""" +548 5 regularizer """no""" +548 5 optimizer """adam""" +548 5 training_loop """owa""" +548 5 negative_sampler """basic""" +548 5 evaluator """rankbased""" +548 6 dataset """wn18rr""" +548 6 model """rescal""" +548 6 loss """marginranking""" +548 6 regularizer """no""" +548 6 optimizer """adam""" +548 6 training_loop """owa""" +548 6 negative_sampler """basic""" +548 6 evaluator """rankbased""" +548 7 dataset """wn18rr""" +548 7 model """rescal""" +548 7 loss """marginranking""" +548 7 regularizer """no""" +548 7 optimizer """adam""" +548 7 training_loop """owa""" +548 7 negative_sampler """basic""" +548 7 evaluator """rankbased""" +548 8 dataset """wn18rr""" +548 8 model """rescal""" +548 8 loss """marginranking""" +548 8 regularizer """no""" +548 8 optimizer """adam""" +548 8 training_loop """owa""" +548 8 negative_sampler """basic""" +548 8 evaluator """rankbased""" +548 9 dataset """wn18rr""" +548 9 model """rescal""" +548 9 loss """marginranking""" +548 9 regularizer """no""" +548 9 optimizer """adam""" +548 9 training_loop """owa""" +548 9 negative_sampler """basic""" +548 9 evaluator """rankbased""" +548 10 dataset """wn18rr""" +548 10 model """rescal""" +548 10 loss """marginranking""" +548 10 regularizer """no""" +548 10 optimizer """adam""" +548 10 training_loop """owa""" +548 10 negative_sampler """basic""" +548 10 evaluator """rankbased""" +548 11 dataset """wn18rr""" +548 11 model """rescal""" +548 11 loss """marginranking""" +548 11 regularizer """no""" +548 11 optimizer """adam""" +548 11 training_loop """owa""" +548 11 negative_sampler """basic""" +548 11 evaluator """rankbased""" +548 12 dataset """wn18rr""" +548 12 model """rescal""" +548 12 loss """marginranking""" +548 12 regularizer """no""" +548 12 optimizer """adam""" +548 12 training_loop """owa""" +548 12 negative_sampler """basic""" +548 12 evaluator """rankbased""" +548 13 dataset """wn18rr""" +548 13 model """rescal""" +548 13 loss """marginranking""" +548 13 regularizer """no""" +548 13 optimizer """adam""" +548 13 training_loop """owa""" +548 13 negative_sampler """basic""" +548 13 evaluator """rankbased""" +548 14 dataset """wn18rr""" +548 14 model """rescal""" +548 14 loss """marginranking""" +548 14 regularizer """no""" +548 14 optimizer """adam""" +548 14 training_loop """owa""" +548 14 negative_sampler """basic""" +548 14 evaluator """rankbased""" +548 15 dataset """wn18rr""" +548 15 model """rescal""" +548 15 loss """marginranking""" +548 15 regularizer """no""" +548 15 optimizer """adam""" +548 15 training_loop """owa""" +548 15 negative_sampler """basic""" +548 15 evaluator """rankbased""" +548 16 dataset """wn18rr""" +548 16 model """rescal""" +548 16 loss """marginranking""" +548 16 regularizer """no""" +548 16 optimizer """adam""" +548 16 training_loop """owa""" +548 16 negative_sampler """basic""" +548 16 evaluator """rankbased""" +548 17 dataset """wn18rr""" +548 17 model """rescal""" +548 17 loss """marginranking""" +548 17 regularizer """no""" +548 17 optimizer """adam""" +548 17 training_loop """owa""" +548 17 negative_sampler """basic""" +548 17 evaluator """rankbased""" +548 18 dataset """wn18rr""" +548 18 model """rescal""" +548 18 loss """marginranking""" +548 18 regularizer """no""" +548 18 optimizer """adam""" +548 18 training_loop """owa""" +548 18 negative_sampler """basic""" +548 18 evaluator """rankbased""" +548 19 dataset """wn18rr""" +548 19 model """rescal""" +548 19 loss """marginranking""" +548 19 regularizer """no""" +548 19 optimizer """adam""" +548 19 training_loop """owa""" +548 19 negative_sampler """basic""" +548 19 evaluator """rankbased""" +548 20 dataset """wn18rr""" +548 20 model """rescal""" +548 20 loss """marginranking""" +548 20 regularizer """no""" +548 20 optimizer """adam""" +548 20 training_loop """owa""" +548 20 negative_sampler """basic""" +548 20 evaluator """rankbased""" +548 21 dataset """wn18rr""" +548 21 model """rescal""" +548 21 loss """marginranking""" +548 21 regularizer """no""" +548 21 optimizer """adam""" +548 21 training_loop """owa""" +548 21 negative_sampler """basic""" +548 21 evaluator """rankbased""" +548 22 dataset """wn18rr""" +548 22 model """rescal""" +548 22 loss """marginranking""" +548 22 regularizer """no""" +548 22 optimizer """adam""" +548 22 training_loop """owa""" +548 22 negative_sampler """basic""" +548 22 evaluator """rankbased""" +548 23 dataset """wn18rr""" +548 23 model """rescal""" +548 23 loss """marginranking""" +548 23 regularizer """no""" +548 23 optimizer """adam""" +548 23 training_loop """owa""" +548 23 negative_sampler """basic""" +548 23 evaluator """rankbased""" +548 24 dataset """wn18rr""" +548 24 model """rescal""" +548 24 loss """marginranking""" +548 24 regularizer """no""" +548 24 optimizer """adam""" +548 24 training_loop """owa""" +548 24 negative_sampler """basic""" +548 24 evaluator """rankbased""" +548 25 dataset """wn18rr""" +548 25 model """rescal""" +548 25 loss """marginranking""" +548 25 regularizer """no""" +548 25 optimizer """adam""" +548 25 training_loop """owa""" +548 25 negative_sampler """basic""" +548 25 evaluator """rankbased""" +549 1 model.embedding_dim 1.0 +549 1 loss.margin 14.961443378741649 +549 1 loss.adversarial_temperature 0.7758149615625857 +549 1 optimizer.lr 0.038425880598779 +549 1 negative_sampler.num_negs_per_pos 6.0 +549 1 training.batch_size 1.0 +549 2 model.embedding_dim 1.0 +549 2 loss.margin 14.336439505694813 +549 2 loss.adversarial_temperature 0.8602959631289208 +549 2 optimizer.lr 0.005323461496369252 +549 2 negative_sampler.num_negs_per_pos 2.0 +549 2 training.batch_size 0.0 +549 3 model.embedding_dim 2.0 +549 3 loss.margin 8.670314461507285 +549 3 loss.adversarial_temperature 0.53224770637371 +549 3 optimizer.lr 0.057109433129584955 +549 3 negative_sampler.num_negs_per_pos 20.0 +549 3 training.batch_size 2.0 +549 4 model.embedding_dim 1.0 +549 4 loss.margin 16.3778569902397 +549 4 loss.adversarial_temperature 0.27670610210248986 +549 4 optimizer.lr 0.024167640263282143 +549 4 negative_sampler.num_negs_per_pos 15.0 +549 4 training.batch_size 0.0 +549 5 model.embedding_dim 1.0 +549 5 loss.margin 23.309865050176867 +549 5 loss.adversarial_temperature 0.2950290564016398 +549 5 optimizer.lr 0.012187348661867907 +549 5 negative_sampler.num_negs_per_pos 77.0 +549 5 training.batch_size 0.0 +549 6 model.embedding_dim 1.0 +549 6 loss.margin 20.851180005328796 +549 6 loss.adversarial_temperature 0.15617777032381047 +549 6 optimizer.lr 0.0010459234465378065 +549 6 negative_sampler.num_negs_per_pos 33.0 +549 6 training.batch_size 0.0 +549 7 model.embedding_dim 1.0 +549 7 loss.margin 7.918591442951114 +549 7 loss.adversarial_temperature 0.4596966198963324 +549 7 optimizer.lr 0.014032297194400419 +549 7 negative_sampler.num_negs_per_pos 13.0 +549 7 training.batch_size 0.0 +549 8 model.embedding_dim 2.0 +549 8 loss.margin 19.84554898602942 +549 8 loss.adversarial_temperature 0.43771818825574255 +549 8 optimizer.lr 0.001083225506133376 +549 8 negative_sampler.num_negs_per_pos 77.0 +549 8 training.batch_size 2.0 +549 9 model.embedding_dim 1.0 +549 9 loss.margin 7.565888953425365 +549 9 loss.adversarial_temperature 0.3897553336792453 +549 9 optimizer.lr 0.004695193753734542 +549 9 negative_sampler.num_negs_per_pos 73.0 +549 9 training.batch_size 1.0 +549 10 model.embedding_dim 0.0 +549 10 loss.margin 7.959070533427687 +549 10 loss.adversarial_temperature 0.9117734796294704 +549 10 optimizer.lr 0.06042477413140935 +549 10 negative_sampler.num_negs_per_pos 84.0 +549 10 training.batch_size 0.0 +549 11 model.embedding_dim 2.0 +549 11 loss.margin 8.338887812504288 +549 11 loss.adversarial_temperature 0.5250317124075173 +549 11 optimizer.lr 0.006635860037561176 +549 11 negative_sampler.num_negs_per_pos 13.0 +549 11 training.batch_size 0.0 +549 12 model.embedding_dim 1.0 +549 12 loss.margin 15.05862163071852 +549 12 loss.adversarial_temperature 0.3151650096866243 +549 12 optimizer.lr 0.0010264396650201619 +549 12 negative_sampler.num_negs_per_pos 93.0 +549 12 training.batch_size 0.0 +549 13 model.embedding_dim 2.0 +549 13 loss.margin 13.455431059499345 +549 13 loss.adversarial_temperature 0.8756265686250593 +549 13 optimizer.lr 0.00536419218413399 +549 13 negative_sampler.num_negs_per_pos 56.0 +549 13 training.batch_size 0.0 +549 14 model.embedding_dim 2.0 +549 14 loss.margin 20.249851320978667 +549 14 loss.adversarial_temperature 0.649436198780882 +549 14 optimizer.lr 0.005415927564371875 +549 14 negative_sampler.num_negs_per_pos 77.0 +549 14 training.batch_size 1.0 +549 1 dataset """wn18rr""" +549 1 model """rescal""" +549 1 loss """nssa""" +549 1 regularizer """no""" +549 1 optimizer """adam""" +549 1 training_loop """owa""" +549 1 negative_sampler """basic""" +549 1 evaluator """rankbased""" +549 2 dataset """wn18rr""" +549 2 model """rescal""" +549 2 loss """nssa""" +549 2 regularizer """no""" +549 2 optimizer """adam""" +549 2 training_loop """owa""" +549 2 negative_sampler """basic""" +549 2 evaluator """rankbased""" +549 3 dataset """wn18rr""" +549 3 model """rescal""" +549 3 loss """nssa""" +549 3 regularizer """no""" +549 3 optimizer """adam""" +549 3 training_loop """owa""" +549 3 negative_sampler """basic""" +549 3 evaluator """rankbased""" +549 4 dataset """wn18rr""" +549 4 model """rescal""" +549 4 loss """nssa""" +549 4 regularizer """no""" +549 4 optimizer """adam""" +549 4 training_loop """owa""" +549 4 negative_sampler """basic""" +549 4 evaluator """rankbased""" +549 5 dataset """wn18rr""" +549 5 model """rescal""" +549 5 loss """nssa""" +549 5 regularizer """no""" +549 5 optimizer """adam""" +549 5 training_loop """owa""" +549 5 negative_sampler """basic""" +549 5 evaluator """rankbased""" +549 6 dataset """wn18rr""" +549 6 model """rescal""" +549 6 loss """nssa""" +549 6 regularizer """no""" +549 6 optimizer """adam""" +549 6 training_loop """owa""" +549 6 negative_sampler """basic""" +549 6 evaluator """rankbased""" +549 7 dataset """wn18rr""" +549 7 model """rescal""" +549 7 loss """nssa""" +549 7 regularizer """no""" +549 7 optimizer """adam""" +549 7 training_loop """owa""" +549 7 negative_sampler """basic""" +549 7 evaluator """rankbased""" +549 8 dataset """wn18rr""" +549 8 model """rescal""" +549 8 loss """nssa""" +549 8 regularizer """no""" +549 8 optimizer """adam""" +549 8 training_loop """owa""" +549 8 negative_sampler """basic""" +549 8 evaluator """rankbased""" +549 9 dataset """wn18rr""" +549 9 model """rescal""" +549 9 loss """nssa""" +549 9 regularizer """no""" +549 9 optimizer """adam""" +549 9 training_loop """owa""" +549 9 negative_sampler """basic""" +549 9 evaluator """rankbased""" +549 10 dataset """wn18rr""" +549 10 model """rescal""" +549 10 loss """nssa""" +549 10 regularizer """no""" +549 10 optimizer """adam""" +549 10 training_loop """owa""" +549 10 negative_sampler """basic""" +549 10 evaluator """rankbased""" +549 11 dataset """wn18rr""" +549 11 model """rescal""" +549 11 loss """nssa""" +549 11 regularizer """no""" +549 11 optimizer """adam""" +549 11 training_loop """owa""" +549 11 negative_sampler """basic""" +549 11 evaluator """rankbased""" +549 12 dataset """wn18rr""" +549 12 model """rescal""" +549 12 loss """nssa""" +549 12 regularizer """no""" +549 12 optimizer """adam""" +549 12 training_loop """owa""" +549 12 negative_sampler """basic""" +549 12 evaluator """rankbased""" +549 13 dataset """wn18rr""" +549 13 model """rescal""" +549 13 loss """nssa""" +549 13 regularizer """no""" +549 13 optimizer """adam""" +549 13 training_loop """owa""" +549 13 negative_sampler """basic""" +549 13 evaluator """rankbased""" +549 14 dataset """wn18rr""" +549 14 model """rescal""" +549 14 loss """nssa""" +549 14 regularizer """no""" +549 14 optimizer """adam""" +549 14 training_loop """owa""" +549 14 negative_sampler """basic""" +549 14 evaluator """rankbased""" +550 1 model.embedding_dim 1.0 +550 1 loss.margin 22.50677683587858 +550 1 loss.adversarial_temperature 0.9476060387411311 +550 1 optimizer.lr 0.00890839251813458 +550 1 negative_sampler.num_negs_per_pos 57.0 +550 1 training.batch_size 0.0 +550 2 model.embedding_dim 1.0 +550 2 loss.margin 21.372316681599692 +550 2 loss.adversarial_temperature 0.5227672561716016 +550 2 optimizer.lr 0.0083602830351979 +550 2 negative_sampler.num_negs_per_pos 7.0 +550 2 training.batch_size 0.0 +550 3 model.embedding_dim 2.0 +550 3 loss.margin 9.284905235941276 +550 3 loss.adversarial_temperature 0.512437449944982 +550 3 optimizer.lr 0.0038784912471718867 +550 3 negative_sampler.num_negs_per_pos 98.0 +550 3 training.batch_size 0.0 +550 4 model.embedding_dim 0.0 +550 4 loss.margin 29.648862897619455 +550 4 loss.adversarial_temperature 0.33874987276671303 +550 4 optimizer.lr 0.012189735429375051 +550 4 negative_sampler.num_negs_per_pos 55.0 +550 4 training.batch_size 0.0 +550 5 model.embedding_dim 2.0 +550 5 loss.margin 19.700397112774805 +550 5 loss.adversarial_temperature 0.3500510366933278 +550 5 optimizer.lr 0.001025579382633407 +550 5 negative_sampler.num_negs_per_pos 21.0 +550 5 training.batch_size 1.0 +550 6 model.embedding_dim 1.0 +550 6 loss.margin 4.862478713346704 +550 6 loss.adversarial_temperature 0.3319342683768453 +550 6 optimizer.lr 0.036868188917520514 +550 6 negative_sampler.num_negs_per_pos 3.0 +550 6 training.batch_size 0.0 +550 7 model.embedding_dim 2.0 +550 7 loss.margin 28.86938174080256 +550 7 loss.adversarial_temperature 0.6114455651158681 +550 7 optimizer.lr 0.0011151471200711398 +550 7 negative_sampler.num_negs_per_pos 67.0 +550 7 training.batch_size 0.0 +550 8 model.embedding_dim 1.0 +550 8 loss.margin 8.420233632337899 +550 8 loss.adversarial_temperature 0.7383559895785542 +550 8 optimizer.lr 0.009402431328986242 +550 8 negative_sampler.num_negs_per_pos 81.0 +550 8 training.batch_size 1.0 +550 9 model.embedding_dim 1.0 +550 9 loss.margin 8.843358917165956 +550 9 loss.adversarial_temperature 0.49387324084623735 +550 9 optimizer.lr 0.009554805651406982 +550 9 negative_sampler.num_negs_per_pos 0.0 +550 9 training.batch_size 0.0 +550 10 model.embedding_dim 2.0 +550 10 loss.margin 10.302994032120726 +550 10 loss.adversarial_temperature 0.772909013260488 +550 10 optimizer.lr 0.0029399056919560667 +550 10 negative_sampler.num_negs_per_pos 4.0 +550 10 training.batch_size 0.0 +550 11 model.embedding_dim 2.0 +550 11 loss.margin 8.99622308079867 +550 11 loss.adversarial_temperature 0.4894347822214643 +550 11 optimizer.lr 0.03138047574058225 +550 11 negative_sampler.num_negs_per_pos 97.0 +550 11 training.batch_size 1.0 +550 12 model.embedding_dim 2.0 +550 12 loss.margin 7.492152042215402 +550 12 loss.adversarial_temperature 0.114825601595409 +550 12 optimizer.lr 0.003941430921035488 +550 12 negative_sampler.num_negs_per_pos 11.0 +550 12 training.batch_size 0.0 +550 13 model.embedding_dim 0.0 +550 13 loss.margin 28.166126437981134 +550 13 loss.adversarial_temperature 0.5172984180416836 +550 13 optimizer.lr 0.0795980651445508 +550 13 negative_sampler.num_negs_per_pos 74.0 +550 13 training.batch_size 2.0 +550 14 model.embedding_dim 1.0 +550 14 loss.margin 23.639258180102647 +550 14 loss.adversarial_temperature 0.49175760539116964 +550 14 optimizer.lr 0.06658614515868055 +550 14 negative_sampler.num_negs_per_pos 22.0 +550 14 training.batch_size 1.0 +550 15 model.embedding_dim 1.0 +550 15 loss.margin 18.049797819142615 +550 15 loss.adversarial_temperature 0.7409294105328406 +550 15 optimizer.lr 0.03802634184092628 +550 15 negative_sampler.num_negs_per_pos 0.0 +550 15 training.batch_size 1.0 +550 16 model.embedding_dim 0.0 +550 16 loss.margin 21.587364044085675 +550 16 loss.adversarial_temperature 0.951324269934882 +550 16 optimizer.lr 0.0011383672492455232 +550 16 negative_sampler.num_negs_per_pos 43.0 +550 16 training.batch_size 1.0 +550 17 model.embedding_dim 1.0 +550 17 loss.margin 9.310655463897513 +550 17 loss.adversarial_temperature 0.7308602922860015 +550 17 optimizer.lr 0.0016889586926886015 +550 17 negative_sampler.num_negs_per_pos 70.0 +550 17 training.batch_size 2.0 +550 18 model.embedding_dim 1.0 +550 18 loss.margin 26.877764514517906 +550 18 loss.adversarial_temperature 0.593115519723395 +550 18 optimizer.lr 0.006262730981870748 +550 18 negative_sampler.num_negs_per_pos 32.0 +550 18 training.batch_size 1.0 +550 19 model.embedding_dim 2.0 +550 19 loss.margin 16.052407263827828 +550 19 loss.adversarial_temperature 0.8654239015402774 +550 19 optimizer.lr 0.07764874539628751 +550 19 negative_sampler.num_negs_per_pos 14.0 +550 19 training.batch_size 2.0 +550 20 model.embedding_dim 1.0 +550 20 loss.margin 16.732970581639485 +550 20 loss.adversarial_temperature 0.6925621979872701 +550 20 optimizer.lr 0.001379179676984226 +550 20 negative_sampler.num_negs_per_pos 60.0 +550 20 training.batch_size 0.0 +550 21 model.embedding_dim 2.0 +550 21 loss.margin 27.936000124081772 +550 21 loss.adversarial_temperature 0.7350769738479231 +550 21 optimizer.lr 0.004966776260186127 +550 21 negative_sampler.num_negs_per_pos 80.0 +550 21 training.batch_size 2.0 +550 22 model.embedding_dim 0.0 +550 22 loss.margin 16.9442285614473 +550 22 loss.adversarial_temperature 0.9584314904839282 +550 22 optimizer.lr 0.0032730680496631187 +550 22 negative_sampler.num_negs_per_pos 20.0 +550 22 training.batch_size 2.0 +550 23 model.embedding_dim 0.0 +550 23 loss.margin 1.6678593239408004 +550 23 loss.adversarial_temperature 0.8689920988574741 +550 23 optimizer.lr 0.02236190265140117 +550 23 negative_sampler.num_negs_per_pos 27.0 +550 23 training.batch_size 2.0 +550 24 model.embedding_dim 1.0 +550 24 loss.margin 29.36521433631491 +550 24 loss.adversarial_temperature 0.34156220729377806 +550 24 optimizer.lr 0.0012222230531994266 +550 24 negative_sampler.num_negs_per_pos 82.0 +550 24 training.batch_size 0.0 +550 25 model.embedding_dim 0.0 +550 25 loss.margin 3.323065422910194 +550 25 loss.adversarial_temperature 0.7772598180592702 +550 25 optimizer.lr 0.020363542318519008 +550 25 negative_sampler.num_negs_per_pos 53.0 +550 25 training.batch_size 0.0 +550 26 model.embedding_dim 1.0 +550 26 loss.margin 24.914869266606985 +550 26 loss.adversarial_temperature 0.7790216738210624 +550 26 optimizer.lr 0.0015017882089774057 +550 26 negative_sampler.num_negs_per_pos 51.0 +550 26 training.batch_size 0.0 +550 1 dataset """wn18rr""" +550 1 model """rescal""" +550 1 loss """nssa""" +550 1 regularizer """no""" +550 1 optimizer """adam""" +550 1 training_loop """owa""" +550 1 negative_sampler """basic""" +550 1 evaluator """rankbased""" +550 2 dataset """wn18rr""" +550 2 model """rescal""" +550 2 loss """nssa""" +550 2 regularizer """no""" +550 2 optimizer """adam""" +550 2 training_loop """owa""" +550 2 negative_sampler """basic""" +550 2 evaluator """rankbased""" +550 3 dataset """wn18rr""" +550 3 model """rescal""" +550 3 loss """nssa""" +550 3 regularizer """no""" +550 3 optimizer """adam""" +550 3 training_loop """owa""" +550 3 negative_sampler """basic""" +550 3 evaluator """rankbased""" +550 4 dataset """wn18rr""" +550 4 model """rescal""" +550 4 loss """nssa""" +550 4 regularizer """no""" +550 4 optimizer """adam""" +550 4 training_loop """owa""" +550 4 negative_sampler """basic""" +550 4 evaluator """rankbased""" +550 5 dataset """wn18rr""" +550 5 model """rescal""" +550 5 loss """nssa""" +550 5 regularizer """no""" +550 5 optimizer """adam""" +550 5 training_loop """owa""" +550 5 negative_sampler """basic""" +550 5 evaluator """rankbased""" +550 6 dataset """wn18rr""" +550 6 model """rescal""" +550 6 loss """nssa""" +550 6 regularizer """no""" +550 6 optimizer """adam""" +550 6 training_loop """owa""" +550 6 negative_sampler """basic""" +550 6 evaluator """rankbased""" +550 7 dataset """wn18rr""" +550 7 model """rescal""" +550 7 loss """nssa""" +550 7 regularizer """no""" +550 7 optimizer """adam""" +550 7 training_loop """owa""" +550 7 negative_sampler """basic""" +550 7 evaluator """rankbased""" +550 8 dataset """wn18rr""" +550 8 model """rescal""" +550 8 loss """nssa""" +550 8 regularizer """no""" +550 8 optimizer """adam""" +550 8 training_loop """owa""" +550 8 negative_sampler """basic""" +550 8 evaluator """rankbased""" +550 9 dataset """wn18rr""" +550 9 model """rescal""" +550 9 loss """nssa""" +550 9 regularizer """no""" +550 9 optimizer """adam""" +550 9 training_loop """owa""" +550 9 negative_sampler """basic""" +550 9 evaluator """rankbased""" +550 10 dataset """wn18rr""" +550 10 model """rescal""" +550 10 loss """nssa""" +550 10 regularizer """no""" +550 10 optimizer """adam""" +550 10 training_loop """owa""" +550 10 negative_sampler """basic""" +550 10 evaluator """rankbased""" +550 11 dataset """wn18rr""" +550 11 model """rescal""" +550 11 loss """nssa""" +550 11 regularizer """no""" +550 11 optimizer """adam""" +550 11 training_loop """owa""" +550 11 negative_sampler """basic""" +550 11 evaluator """rankbased""" +550 12 dataset """wn18rr""" +550 12 model """rescal""" +550 12 loss """nssa""" +550 12 regularizer """no""" +550 12 optimizer """adam""" +550 12 training_loop """owa""" +550 12 negative_sampler """basic""" +550 12 evaluator """rankbased""" +550 13 dataset """wn18rr""" +550 13 model """rescal""" +550 13 loss """nssa""" +550 13 regularizer """no""" +550 13 optimizer """adam""" +550 13 training_loop """owa""" +550 13 negative_sampler """basic""" +550 13 evaluator """rankbased""" +550 14 dataset """wn18rr""" +550 14 model """rescal""" +550 14 loss """nssa""" +550 14 regularizer """no""" +550 14 optimizer """adam""" +550 14 training_loop """owa""" +550 14 negative_sampler """basic""" +550 14 evaluator """rankbased""" +550 15 dataset """wn18rr""" +550 15 model """rescal""" +550 15 loss """nssa""" +550 15 regularizer """no""" +550 15 optimizer """adam""" +550 15 training_loop """owa""" +550 15 negative_sampler """basic""" +550 15 evaluator """rankbased""" +550 16 dataset """wn18rr""" +550 16 model """rescal""" +550 16 loss """nssa""" +550 16 regularizer """no""" +550 16 optimizer """adam""" +550 16 training_loop """owa""" +550 16 negative_sampler """basic""" +550 16 evaluator """rankbased""" +550 17 dataset """wn18rr""" +550 17 model """rescal""" +550 17 loss """nssa""" +550 17 regularizer """no""" +550 17 optimizer """adam""" +550 17 training_loop """owa""" +550 17 negative_sampler """basic""" +550 17 evaluator """rankbased""" +550 18 dataset """wn18rr""" +550 18 model """rescal""" +550 18 loss """nssa""" +550 18 regularizer """no""" +550 18 optimizer """adam""" +550 18 training_loop """owa""" +550 18 negative_sampler """basic""" +550 18 evaluator """rankbased""" +550 19 dataset """wn18rr""" +550 19 model """rescal""" +550 19 loss """nssa""" +550 19 regularizer """no""" +550 19 optimizer """adam""" +550 19 training_loop """owa""" +550 19 negative_sampler """basic""" +550 19 evaluator """rankbased""" +550 20 dataset """wn18rr""" +550 20 model """rescal""" +550 20 loss """nssa""" +550 20 regularizer """no""" +550 20 optimizer """adam""" +550 20 training_loop """owa""" +550 20 negative_sampler """basic""" +550 20 evaluator """rankbased""" +550 21 dataset """wn18rr""" +550 21 model """rescal""" +550 21 loss """nssa""" +550 21 regularizer """no""" +550 21 optimizer """adam""" +550 21 training_loop """owa""" +550 21 negative_sampler """basic""" +550 21 evaluator """rankbased""" +550 22 dataset """wn18rr""" +550 22 model """rescal""" +550 22 loss """nssa""" +550 22 regularizer """no""" +550 22 optimizer """adam""" +550 22 training_loop """owa""" +550 22 negative_sampler """basic""" +550 22 evaluator """rankbased""" +550 23 dataset """wn18rr""" +550 23 model """rescal""" +550 23 loss """nssa""" +550 23 regularizer """no""" +550 23 optimizer """adam""" +550 23 training_loop """owa""" +550 23 negative_sampler """basic""" +550 23 evaluator """rankbased""" +550 24 dataset """wn18rr""" +550 24 model """rescal""" +550 24 loss """nssa""" +550 24 regularizer """no""" +550 24 optimizer """adam""" +550 24 training_loop """owa""" +550 24 negative_sampler """basic""" +550 24 evaluator """rankbased""" +550 25 dataset """wn18rr""" +550 25 model """rescal""" +550 25 loss """nssa""" +550 25 regularizer """no""" +550 25 optimizer """adam""" +550 25 training_loop """owa""" +550 25 negative_sampler """basic""" +550 25 evaluator """rankbased""" +550 26 dataset """wn18rr""" +550 26 model """rescal""" +550 26 loss """nssa""" +550 26 regularizer """no""" +550 26 optimizer """adam""" +550 26 training_loop """owa""" +550 26 negative_sampler """basic""" +550 26 evaluator """rankbased""" +551 1 model.embedding_dim 0.0 +551 1 optimizer.lr 0.04238236600997483 +551 1 training.batch_size 1.0 +551 1 training.label_smoothing 0.0011995045650183877 +551 2 model.embedding_dim 1.0 +551 2 optimizer.lr 0.0055611931017982025 +551 2 training.batch_size 2.0 +551 2 training.label_smoothing 0.006141912291354259 +551 3 model.embedding_dim 0.0 +551 3 optimizer.lr 0.013300168044423008 +551 3 training.batch_size 1.0 +551 3 training.label_smoothing 0.015904737088973945 +551 4 model.embedding_dim 1.0 +551 4 optimizer.lr 0.005546382210878629 +551 4 training.batch_size 1.0 +551 4 training.label_smoothing 0.0026477432096570336 +551 5 model.embedding_dim 2.0 +551 5 optimizer.lr 0.004257185957222482 +551 5 training.batch_size 1.0 +551 5 training.label_smoothing 0.15100247653411392 +551 6 model.embedding_dim 1.0 +551 6 optimizer.lr 0.010409507290176716 +551 6 training.batch_size 1.0 +551 6 training.label_smoothing 0.1423816145318938 +551 7 model.embedding_dim 0.0 +551 7 optimizer.lr 0.025767647366153962 +551 7 training.batch_size 0.0 +551 7 training.label_smoothing 0.27386987937113394 +551 8 model.embedding_dim 1.0 +551 8 optimizer.lr 0.02932978032206615 +551 8 training.batch_size 2.0 +551 8 training.label_smoothing 0.0036606582117221435 +551 9 model.embedding_dim 2.0 +551 9 optimizer.lr 0.0022533542694489267 +551 9 training.batch_size 0.0 +551 9 training.label_smoothing 0.00646672241206799 +551 10 model.embedding_dim 0.0 +551 10 optimizer.lr 0.034410500015091494 +551 10 training.batch_size 0.0 +551 10 training.label_smoothing 0.0010280647729054252 +551 11 model.embedding_dim 2.0 +551 11 optimizer.lr 0.010156650669156738 +551 11 training.batch_size 1.0 +551 11 training.label_smoothing 0.0026447238940312647 +551 12 model.embedding_dim 2.0 +551 12 optimizer.lr 0.02331058667436208 +551 12 training.batch_size 1.0 +551 12 training.label_smoothing 0.030498749182061146 +551 13 model.embedding_dim 1.0 +551 13 optimizer.lr 0.004605935950358249 +551 13 training.batch_size 0.0 +551 13 training.label_smoothing 0.5942292722812675 +551 14 model.embedding_dim 1.0 +551 14 optimizer.lr 0.004085642290559893 +551 14 training.batch_size 1.0 +551 14 training.label_smoothing 0.014149396659971959 +551 15 model.embedding_dim 0.0 +551 15 optimizer.lr 0.002334915050115927 +551 15 training.batch_size 1.0 +551 15 training.label_smoothing 0.0248079528320194 +551 16 model.embedding_dim 0.0 +551 16 optimizer.lr 0.004028827242336013 +551 16 training.batch_size 1.0 +551 16 training.label_smoothing 0.0026617729118020355 +551 17 model.embedding_dim 1.0 +551 17 optimizer.lr 0.004234983849397542 +551 17 training.batch_size 0.0 +551 17 training.label_smoothing 0.013556207272064583 +551 1 dataset """wn18rr""" +551 1 model """rescal""" +551 1 loss """bceaftersigmoid""" +551 1 regularizer """no""" +551 1 optimizer """adam""" +551 1 training_loop """lcwa""" +551 1 evaluator """rankbased""" +551 2 dataset """wn18rr""" +551 2 model """rescal""" +551 2 loss """bceaftersigmoid""" +551 2 regularizer """no""" +551 2 optimizer """adam""" +551 2 training_loop """lcwa""" +551 2 evaluator """rankbased""" +551 3 dataset """wn18rr""" +551 3 model """rescal""" +551 3 loss """bceaftersigmoid""" +551 3 regularizer """no""" +551 3 optimizer """adam""" +551 3 training_loop """lcwa""" +551 3 evaluator """rankbased""" +551 4 dataset """wn18rr""" +551 4 model """rescal""" +551 4 loss """bceaftersigmoid""" +551 4 regularizer """no""" +551 4 optimizer """adam""" +551 4 training_loop """lcwa""" +551 4 evaluator """rankbased""" +551 5 dataset """wn18rr""" +551 5 model """rescal""" +551 5 loss """bceaftersigmoid""" +551 5 regularizer """no""" +551 5 optimizer """adam""" +551 5 training_loop """lcwa""" +551 5 evaluator """rankbased""" +551 6 dataset """wn18rr""" +551 6 model """rescal""" +551 6 loss """bceaftersigmoid""" +551 6 regularizer """no""" +551 6 optimizer """adam""" +551 6 training_loop """lcwa""" +551 6 evaluator """rankbased""" +551 7 dataset """wn18rr""" +551 7 model """rescal""" +551 7 loss """bceaftersigmoid""" +551 7 regularizer """no""" +551 7 optimizer """adam""" +551 7 training_loop """lcwa""" +551 7 evaluator """rankbased""" +551 8 dataset """wn18rr""" +551 8 model """rescal""" +551 8 loss """bceaftersigmoid""" +551 8 regularizer """no""" +551 8 optimizer """adam""" +551 8 training_loop """lcwa""" +551 8 evaluator """rankbased""" +551 9 dataset """wn18rr""" +551 9 model """rescal""" +551 9 loss """bceaftersigmoid""" +551 9 regularizer """no""" +551 9 optimizer """adam""" +551 9 training_loop """lcwa""" +551 9 evaluator """rankbased""" +551 10 dataset """wn18rr""" +551 10 model """rescal""" +551 10 loss """bceaftersigmoid""" +551 10 regularizer """no""" +551 10 optimizer """adam""" +551 10 training_loop """lcwa""" +551 10 evaluator """rankbased""" +551 11 dataset """wn18rr""" +551 11 model """rescal""" +551 11 loss """bceaftersigmoid""" +551 11 regularizer """no""" +551 11 optimizer """adam""" +551 11 training_loop """lcwa""" +551 11 evaluator """rankbased""" +551 12 dataset """wn18rr""" +551 12 model """rescal""" +551 12 loss """bceaftersigmoid""" +551 12 regularizer """no""" +551 12 optimizer """adam""" +551 12 training_loop """lcwa""" +551 12 evaluator """rankbased""" +551 13 dataset """wn18rr""" +551 13 model """rescal""" +551 13 loss """bceaftersigmoid""" +551 13 regularizer """no""" +551 13 optimizer """adam""" +551 13 training_loop """lcwa""" +551 13 evaluator """rankbased""" +551 14 dataset """wn18rr""" +551 14 model """rescal""" +551 14 loss """bceaftersigmoid""" +551 14 regularizer """no""" +551 14 optimizer """adam""" +551 14 training_loop """lcwa""" +551 14 evaluator """rankbased""" +551 15 dataset """wn18rr""" +551 15 model """rescal""" +551 15 loss """bceaftersigmoid""" +551 15 regularizer """no""" +551 15 optimizer """adam""" +551 15 training_loop """lcwa""" +551 15 evaluator """rankbased""" +551 16 dataset """wn18rr""" +551 16 model """rescal""" +551 16 loss """bceaftersigmoid""" +551 16 regularizer """no""" +551 16 optimizer """adam""" +551 16 training_loop """lcwa""" +551 16 evaluator """rankbased""" +551 17 dataset """wn18rr""" +551 17 model """rescal""" +551 17 loss """bceaftersigmoid""" +551 17 regularizer """no""" +551 17 optimizer """adam""" +551 17 training_loop """lcwa""" +551 17 evaluator """rankbased""" +552 1 model.embedding_dim 0.0 +552 1 optimizer.lr 0.009068771324572603 +552 1 training.batch_size 2.0 +552 1 training.label_smoothing 0.3846452528327486 +552 2 model.embedding_dim 2.0 +552 2 optimizer.lr 0.008594818496606297 +552 2 training.batch_size 0.0 +552 2 training.label_smoothing 0.023715932281541244 +552 3 model.embedding_dim 2.0 +552 3 optimizer.lr 0.009884909914815266 +552 3 training.batch_size 1.0 +552 3 training.label_smoothing 0.1874465275085692 +552 4 model.embedding_dim 1.0 +552 4 optimizer.lr 0.0032248363558977956 +552 4 training.batch_size 1.0 +552 4 training.label_smoothing 0.003619025789703144 +552 5 model.embedding_dim 2.0 +552 5 optimizer.lr 0.0057542794006677014 +552 5 training.batch_size 2.0 +552 5 training.label_smoothing 0.9861547394936429 +552 6 model.embedding_dim 2.0 +552 6 optimizer.lr 0.0019017880177522595 +552 6 training.batch_size 1.0 +552 6 training.label_smoothing 0.004643808746631112 +552 7 model.embedding_dim 2.0 +552 7 optimizer.lr 0.09718460445379719 +552 7 training.batch_size 2.0 +552 7 training.label_smoothing 0.016226402985541257 +552 1 dataset """wn18rr""" +552 1 model """rescal""" +552 1 loss """softplus""" +552 1 regularizer """no""" +552 1 optimizer """adam""" +552 1 training_loop """lcwa""" +552 1 evaluator """rankbased""" +552 2 dataset """wn18rr""" +552 2 model """rescal""" +552 2 loss """softplus""" +552 2 regularizer """no""" +552 2 optimizer """adam""" +552 2 training_loop """lcwa""" +552 2 evaluator """rankbased""" +552 3 dataset """wn18rr""" +552 3 model """rescal""" +552 3 loss """softplus""" +552 3 regularizer """no""" +552 3 optimizer """adam""" +552 3 training_loop """lcwa""" +552 3 evaluator """rankbased""" +552 4 dataset """wn18rr""" +552 4 model """rescal""" +552 4 loss """softplus""" +552 4 regularizer """no""" +552 4 optimizer """adam""" +552 4 training_loop """lcwa""" +552 4 evaluator """rankbased""" +552 5 dataset """wn18rr""" +552 5 model """rescal""" +552 5 loss """softplus""" +552 5 regularizer """no""" +552 5 optimizer """adam""" +552 5 training_loop """lcwa""" +552 5 evaluator """rankbased""" +552 6 dataset """wn18rr""" +552 6 model """rescal""" +552 6 loss """softplus""" +552 6 regularizer """no""" +552 6 optimizer """adam""" +552 6 training_loop """lcwa""" +552 6 evaluator """rankbased""" +552 7 dataset """wn18rr""" +552 7 model """rescal""" +552 7 loss """softplus""" +552 7 regularizer """no""" +552 7 optimizer """adam""" +552 7 training_loop """lcwa""" +552 7 evaluator """rankbased""" +553 1 model.embedding_dim 0.0 +553 1 optimizer.lr 0.001274976497199893 +553 1 training.batch_size 0.0 +553 1 training.label_smoothing 0.6799804111041763 +553 2 model.embedding_dim 1.0 +553 2 optimizer.lr 0.0319340289985087 +553 2 training.batch_size 1.0 +553 2 training.label_smoothing 0.004150959024056524 +553 3 model.embedding_dim 1.0 +553 3 optimizer.lr 0.023403153973865748 +553 3 training.batch_size 1.0 +553 3 training.label_smoothing 0.1766877461720385 +553 4 model.embedding_dim 1.0 +553 4 optimizer.lr 0.095484003621536 +553 4 training.batch_size 2.0 +553 4 training.label_smoothing 0.10101314147335208 +553 5 model.embedding_dim 0.0 +553 5 optimizer.lr 0.009573124102681647 +553 5 training.batch_size 1.0 +553 5 training.label_smoothing 0.00948371820721187 +553 6 model.embedding_dim 1.0 +553 6 optimizer.lr 0.007745961298730914 +553 6 training.batch_size 2.0 +553 6 training.label_smoothing 0.12029765422212207 +553 7 model.embedding_dim 2.0 +553 7 optimizer.lr 0.009379361556343077 +553 7 training.batch_size 0.0 +553 7 training.label_smoothing 0.26964004906826916 +553 8 model.embedding_dim 2.0 +553 8 optimizer.lr 0.0014909738012675124 +553 8 training.batch_size 2.0 +553 8 training.label_smoothing 0.09540264773318204 +553 9 model.embedding_dim 2.0 +553 9 optimizer.lr 0.001550857783558854 +553 9 training.batch_size 2.0 +553 9 training.label_smoothing 0.1757309778820203 +553 10 model.embedding_dim 0.0 +553 10 optimizer.lr 0.01581550148787048 +553 10 training.batch_size 1.0 +553 10 training.label_smoothing 0.0065954857135170466 +553 11 model.embedding_dim 0.0 +553 11 optimizer.lr 0.001243002656472478 +553 11 training.batch_size 0.0 +553 11 training.label_smoothing 0.14704011993388086 +553 12 model.embedding_dim 0.0 +553 12 optimizer.lr 0.0011945366629554466 +553 12 training.batch_size 1.0 +553 12 training.label_smoothing 0.005053743184652808 +553 13 model.embedding_dim 2.0 +553 13 optimizer.lr 0.02993451506539889 +553 13 training.batch_size 1.0 +553 13 training.label_smoothing 0.3948816124864084 +553 14 model.embedding_dim 1.0 +553 14 optimizer.lr 0.08976114253913194 +553 14 training.batch_size 2.0 +553 14 training.label_smoothing 0.11363626489094007 +553 15 model.embedding_dim 1.0 +553 15 optimizer.lr 0.06914721305637811 +553 15 training.batch_size 1.0 +553 15 training.label_smoothing 0.0020068810454309024 +553 16 model.embedding_dim 1.0 +553 16 optimizer.lr 0.001084561113504461 +553 16 training.batch_size 1.0 +553 16 training.label_smoothing 0.04565065799019173 +553 17 model.embedding_dim 0.0 +553 17 optimizer.lr 0.09328249929258789 +553 17 training.batch_size 1.0 +553 17 training.label_smoothing 0.017356762645021083 +553 18 model.embedding_dim 0.0 +553 18 optimizer.lr 0.08605515879314726 +553 18 training.batch_size 1.0 +553 18 training.label_smoothing 0.015861415026296333 +553 19 model.embedding_dim 0.0 +553 19 optimizer.lr 0.013973843869169975 +553 19 training.batch_size 0.0 +553 19 training.label_smoothing 0.006954864904220343 +553 20 model.embedding_dim 1.0 +553 20 optimizer.lr 0.002441431654870073 +553 20 training.batch_size 1.0 +553 20 training.label_smoothing 0.006266760722313739 +553 21 model.embedding_dim 2.0 +553 21 optimizer.lr 0.008721400843947368 +553 21 training.batch_size 0.0 +553 21 training.label_smoothing 0.34137740107383563 +553 22 model.embedding_dim 1.0 +553 22 optimizer.lr 0.01536864499422464 +553 22 training.batch_size 1.0 +553 22 training.label_smoothing 0.7696342457307439 +553 1 dataset """wn18rr""" +553 1 model """rescal""" +553 1 loss """bceaftersigmoid""" +553 1 regularizer """no""" +553 1 optimizer """adam""" +553 1 training_loop """lcwa""" +553 1 evaluator """rankbased""" +553 2 dataset """wn18rr""" +553 2 model """rescal""" +553 2 loss """bceaftersigmoid""" +553 2 regularizer """no""" +553 2 optimizer """adam""" +553 2 training_loop """lcwa""" +553 2 evaluator """rankbased""" +553 3 dataset """wn18rr""" +553 3 model """rescal""" +553 3 loss """bceaftersigmoid""" +553 3 regularizer """no""" +553 3 optimizer """adam""" +553 3 training_loop """lcwa""" +553 3 evaluator """rankbased""" +553 4 dataset """wn18rr""" +553 4 model """rescal""" +553 4 loss """bceaftersigmoid""" +553 4 regularizer """no""" +553 4 optimizer """adam""" +553 4 training_loop """lcwa""" +553 4 evaluator """rankbased""" +553 5 dataset """wn18rr""" +553 5 model """rescal""" +553 5 loss """bceaftersigmoid""" +553 5 regularizer """no""" +553 5 optimizer """adam""" +553 5 training_loop """lcwa""" +553 5 evaluator """rankbased""" +553 6 dataset """wn18rr""" +553 6 model """rescal""" +553 6 loss """bceaftersigmoid""" +553 6 regularizer """no""" +553 6 optimizer """adam""" +553 6 training_loop """lcwa""" +553 6 evaluator """rankbased""" +553 7 dataset """wn18rr""" +553 7 model """rescal""" +553 7 loss """bceaftersigmoid""" +553 7 regularizer """no""" +553 7 optimizer """adam""" +553 7 training_loop """lcwa""" +553 7 evaluator """rankbased""" +553 8 dataset """wn18rr""" +553 8 model """rescal""" +553 8 loss """bceaftersigmoid""" +553 8 regularizer """no""" +553 8 optimizer """adam""" +553 8 training_loop """lcwa""" +553 8 evaluator """rankbased""" +553 9 dataset """wn18rr""" +553 9 model """rescal""" +553 9 loss """bceaftersigmoid""" +553 9 regularizer """no""" +553 9 optimizer """adam""" +553 9 training_loop """lcwa""" +553 9 evaluator """rankbased""" +553 10 dataset """wn18rr""" +553 10 model """rescal""" +553 10 loss """bceaftersigmoid""" +553 10 regularizer """no""" +553 10 optimizer """adam""" +553 10 training_loop """lcwa""" +553 10 evaluator """rankbased""" +553 11 dataset """wn18rr""" +553 11 model """rescal""" +553 11 loss """bceaftersigmoid""" +553 11 regularizer """no""" +553 11 optimizer """adam""" +553 11 training_loop """lcwa""" +553 11 evaluator """rankbased""" +553 12 dataset """wn18rr""" +553 12 model """rescal""" +553 12 loss """bceaftersigmoid""" +553 12 regularizer """no""" +553 12 optimizer """adam""" +553 12 training_loop """lcwa""" +553 12 evaluator """rankbased""" +553 13 dataset """wn18rr""" +553 13 model """rescal""" +553 13 loss """bceaftersigmoid""" +553 13 regularizer """no""" +553 13 optimizer """adam""" +553 13 training_loop """lcwa""" +553 13 evaluator """rankbased""" +553 14 dataset """wn18rr""" +553 14 model """rescal""" +553 14 loss """bceaftersigmoid""" +553 14 regularizer """no""" +553 14 optimizer """adam""" +553 14 training_loop """lcwa""" +553 14 evaluator """rankbased""" +553 15 dataset """wn18rr""" +553 15 model """rescal""" +553 15 loss """bceaftersigmoid""" +553 15 regularizer """no""" +553 15 optimizer """adam""" +553 15 training_loop """lcwa""" +553 15 evaluator """rankbased""" +553 16 dataset """wn18rr""" +553 16 model """rescal""" +553 16 loss """bceaftersigmoid""" +553 16 regularizer """no""" +553 16 optimizer """adam""" +553 16 training_loop """lcwa""" +553 16 evaluator """rankbased""" +553 17 dataset """wn18rr""" +553 17 model """rescal""" +553 17 loss """bceaftersigmoid""" +553 17 regularizer """no""" +553 17 optimizer """adam""" +553 17 training_loop """lcwa""" +553 17 evaluator """rankbased""" +553 18 dataset """wn18rr""" +553 18 model """rescal""" +553 18 loss """bceaftersigmoid""" +553 18 regularizer """no""" +553 18 optimizer """adam""" +553 18 training_loop """lcwa""" +553 18 evaluator """rankbased""" +553 19 dataset """wn18rr""" +553 19 model """rescal""" +553 19 loss """bceaftersigmoid""" +553 19 regularizer """no""" +553 19 optimizer """adam""" +553 19 training_loop """lcwa""" +553 19 evaluator """rankbased""" +553 20 dataset """wn18rr""" +553 20 model """rescal""" +553 20 loss """bceaftersigmoid""" +553 20 regularizer """no""" +553 20 optimizer """adam""" +553 20 training_loop """lcwa""" +553 20 evaluator """rankbased""" +553 21 dataset """wn18rr""" +553 21 model """rescal""" +553 21 loss """bceaftersigmoid""" +553 21 regularizer """no""" +553 21 optimizer """adam""" +553 21 training_loop """lcwa""" +553 21 evaluator """rankbased""" +553 22 dataset """wn18rr""" +553 22 model """rescal""" +553 22 loss """bceaftersigmoid""" +553 22 regularizer """no""" +553 22 optimizer """adam""" +553 22 training_loop """lcwa""" +553 22 evaluator """rankbased""" +554 1 model.embedding_dim 2.0 +554 1 optimizer.lr 0.03549622688043173 +554 1 training.batch_size 1.0 +554 1 training.label_smoothing 0.005650345466152887 +554 2 model.embedding_dim 1.0 +554 2 optimizer.lr 0.0017902119335500939 +554 2 training.batch_size 1.0 +554 2 training.label_smoothing 0.22308343825166563 +554 3 model.embedding_dim 0.0 +554 3 optimizer.lr 0.060794304766456794 +554 3 training.batch_size 1.0 +554 3 training.label_smoothing 0.6282503448740406 +554 4 model.embedding_dim 2.0 +554 4 optimizer.lr 0.0108908688063009 +554 4 training.batch_size 0.0 +554 4 training.label_smoothing 0.16654896694157303 +554 5 model.embedding_dim 0.0 +554 5 optimizer.lr 0.0027112191841804946 +554 5 training.batch_size 0.0 +554 5 training.label_smoothing 0.15784570890381855 +554 6 model.embedding_dim 2.0 +554 6 optimizer.lr 0.0013333266208972623 +554 6 training.batch_size 1.0 +554 6 training.label_smoothing 0.0516993538042116 +554 7 model.embedding_dim 0.0 +554 7 optimizer.lr 0.003481755585086227 +554 7 training.batch_size 2.0 +554 7 training.label_smoothing 0.26971153251979435 +554 8 model.embedding_dim 2.0 +554 8 optimizer.lr 0.008289675194289258 +554 8 training.batch_size 2.0 +554 8 training.label_smoothing 0.03732169059631493 +554 9 model.embedding_dim 2.0 +554 9 optimizer.lr 0.0043559627089591 +554 9 training.batch_size 0.0 +554 9 training.label_smoothing 0.006163753051782235 +554 1 dataset """wn18rr""" +554 1 model """rescal""" +554 1 loss """softplus""" +554 1 regularizer """no""" +554 1 optimizer """adam""" +554 1 training_loop """lcwa""" +554 1 evaluator """rankbased""" +554 2 dataset """wn18rr""" +554 2 model """rescal""" +554 2 loss """softplus""" +554 2 regularizer """no""" +554 2 optimizer """adam""" +554 2 training_loop """lcwa""" +554 2 evaluator """rankbased""" +554 3 dataset """wn18rr""" +554 3 model """rescal""" +554 3 loss """softplus""" +554 3 regularizer """no""" +554 3 optimizer """adam""" +554 3 training_loop """lcwa""" +554 3 evaluator """rankbased""" +554 4 dataset """wn18rr""" +554 4 model """rescal""" +554 4 loss """softplus""" +554 4 regularizer """no""" +554 4 optimizer """adam""" +554 4 training_loop """lcwa""" +554 4 evaluator """rankbased""" +554 5 dataset """wn18rr""" +554 5 model """rescal""" +554 5 loss """softplus""" +554 5 regularizer """no""" +554 5 optimizer """adam""" +554 5 training_loop """lcwa""" +554 5 evaluator """rankbased""" +554 6 dataset """wn18rr""" +554 6 model """rescal""" +554 6 loss """softplus""" +554 6 regularizer """no""" +554 6 optimizer """adam""" +554 6 training_loop """lcwa""" +554 6 evaluator """rankbased""" +554 7 dataset """wn18rr""" +554 7 model """rescal""" +554 7 loss """softplus""" +554 7 regularizer """no""" +554 7 optimizer """adam""" +554 7 training_loop """lcwa""" +554 7 evaluator """rankbased""" +554 8 dataset """wn18rr""" +554 8 model """rescal""" +554 8 loss """softplus""" +554 8 regularizer """no""" +554 8 optimizer """adam""" +554 8 training_loop """lcwa""" +554 8 evaluator """rankbased""" +554 9 dataset """wn18rr""" +554 9 model """rescal""" +554 9 loss """softplus""" +554 9 regularizer """no""" +554 9 optimizer """adam""" +554 9 training_loop """lcwa""" +554 9 evaluator """rankbased""" +555 1 model.embedding_dim 0.0 +555 1 optimizer.lr 0.026805899827081807 +555 1 training.batch_size 1.0 +555 1 training.label_smoothing 0.03877475995534091 +555 2 model.embedding_dim 2.0 +555 2 optimizer.lr 0.0012522355273936001 +555 2 training.batch_size 0.0 +555 2 training.label_smoothing 0.08796828973423983 +555 3 model.embedding_dim 1.0 +555 3 optimizer.lr 0.002095012622709335 +555 3 training.batch_size 0.0 +555 3 training.label_smoothing 0.0768317713822409 +555 4 model.embedding_dim 1.0 +555 4 optimizer.lr 0.05158114105670454 +555 4 training.batch_size 1.0 +555 4 training.label_smoothing 0.002269410988704251 +555 1 dataset """wn18rr""" +555 1 model """rescal""" +555 1 loss """crossentropy""" +555 1 regularizer """no""" +555 1 optimizer """adam""" +555 1 training_loop """lcwa""" +555 1 evaluator """rankbased""" +555 2 dataset """wn18rr""" +555 2 model """rescal""" +555 2 loss """crossentropy""" +555 2 regularizer """no""" +555 2 optimizer """adam""" +555 2 training_loop """lcwa""" +555 2 evaluator """rankbased""" +555 3 dataset """wn18rr""" +555 3 model """rescal""" +555 3 loss """crossentropy""" +555 3 regularizer """no""" +555 3 optimizer """adam""" +555 3 training_loop """lcwa""" +555 3 evaluator """rankbased""" +555 4 dataset """wn18rr""" +555 4 model """rescal""" +555 4 loss """crossentropy""" +555 4 regularizer """no""" +555 4 optimizer """adam""" +555 4 training_loop """lcwa""" +555 4 evaluator """rankbased""" +556 1 model.embedding_dim 2.0 +556 1 optimizer.lr 0.02315033216613753 +556 1 training.batch_size 0.0 +556 1 training.label_smoothing 0.3771937001412656 +556 2 model.embedding_dim 2.0 +556 2 optimizer.lr 0.010107160597317399 +556 2 training.batch_size 2.0 +556 2 training.label_smoothing 0.46232808539562165 +556 3 model.embedding_dim 0.0 +556 3 optimizer.lr 0.02529906021042848 +556 3 training.batch_size 2.0 +556 3 training.label_smoothing 0.8694660074798761 +556 4 model.embedding_dim 0.0 +556 4 optimizer.lr 0.010493491080483218 +556 4 training.batch_size 0.0 +556 4 training.label_smoothing 0.041384184772920926 +556 5 model.embedding_dim 0.0 +556 5 optimizer.lr 0.011172952010645667 +556 5 training.batch_size 2.0 +556 5 training.label_smoothing 0.01928454193796023 +556 6 model.embedding_dim 2.0 +556 6 optimizer.lr 0.001332455387430916 +556 6 training.batch_size 1.0 +556 6 training.label_smoothing 0.43600712136607944 +556 1 dataset """wn18rr""" +556 1 model """rescal""" +556 1 loss """crossentropy""" +556 1 regularizer """no""" +556 1 optimizer """adam""" +556 1 training_loop """lcwa""" +556 1 evaluator """rankbased""" +556 2 dataset """wn18rr""" +556 2 model """rescal""" +556 2 loss """crossentropy""" +556 2 regularizer """no""" +556 2 optimizer """adam""" +556 2 training_loop """lcwa""" +556 2 evaluator """rankbased""" +556 3 dataset """wn18rr""" +556 3 model """rescal""" +556 3 loss """crossentropy""" +556 3 regularizer """no""" +556 3 optimizer """adam""" +556 3 training_loop """lcwa""" +556 3 evaluator """rankbased""" +556 4 dataset """wn18rr""" +556 4 model """rescal""" +556 4 loss """crossentropy""" +556 4 regularizer """no""" +556 4 optimizer """adam""" +556 4 training_loop """lcwa""" +556 4 evaluator """rankbased""" +556 5 dataset """wn18rr""" +556 5 model """rescal""" +556 5 loss """crossentropy""" +556 5 regularizer """no""" +556 5 optimizer """adam""" +556 5 training_loop """lcwa""" +556 5 evaluator """rankbased""" +556 6 dataset """wn18rr""" +556 6 model """rescal""" +556 6 loss """crossentropy""" +556 6 regularizer """no""" +556 6 optimizer """adam""" +556 6 training_loop """lcwa""" +556 6 evaluator """rankbased""" +557 1 model.embedding_dim 0.0 +557 1 optimizer.lr 0.0033136427016462024 +557 1 negative_sampler.num_negs_per_pos 9.0 +557 1 training.batch_size 2.0 +557 2 model.embedding_dim 2.0 +557 2 optimizer.lr 0.07103274245127665 +557 2 negative_sampler.num_negs_per_pos 57.0 +557 2 training.batch_size 2.0 +557 3 model.embedding_dim 2.0 +557 3 optimizer.lr 0.00586753558703391 +557 3 negative_sampler.num_negs_per_pos 71.0 +557 3 training.batch_size 2.0 +557 4 model.embedding_dim 2.0 +557 4 optimizer.lr 0.012760592509553696 +557 4 negative_sampler.num_negs_per_pos 96.0 +557 4 training.batch_size 2.0 +557 5 model.embedding_dim 0.0 +557 5 optimizer.lr 0.002239806236069333 +557 5 negative_sampler.num_negs_per_pos 9.0 +557 5 training.batch_size 0.0 +557 6 model.embedding_dim 2.0 +557 6 optimizer.lr 0.01326398226755423 +557 6 negative_sampler.num_negs_per_pos 64.0 +557 6 training.batch_size 0.0 +557 7 model.embedding_dim 0.0 +557 7 optimizer.lr 0.028955699163771092 +557 7 negative_sampler.num_negs_per_pos 13.0 +557 7 training.batch_size 2.0 +557 8 model.embedding_dim 1.0 +557 8 optimizer.lr 0.04481713389777646 +557 8 negative_sampler.num_negs_per_pos 59.0 +557 8 training.batch_size 0.0 +557 9 model.embedding_dim 0.0 +557 9 optimizer.lr 0.04085631683742439 +557 9 negative_sampler.num_negs_per_pos 28.0 +557 9 training.batch_size 2.0 +557 10 model.embedding_dim 2.0 +557 10 optimizer.lr 0.004706478756493159 +557 10 negative_sampler.num_negs_per_pos 72.0 +557 10 training.batch_size 0.0 +557 11 model.embedding_dim 1.0 +557 11 optimizer.lr 0.002338226705348678 +557 11 negative_sampler.num_negs_per_pos 48.0 +557 11 training.batch_size 0.0 +557 12 model.embedding_dim 2.0 +557 12 optimizer.lr 0.006859835831026717 +557 12 negative_sampler.num_negs_per_pos 3.0 +557 12 training.batch_size 0.0 +557 13 model.embedding_dim 2.0 +557 13 optimizer.lr 0.025206793454876298 +557 13 negative_sampler.num_negs_per_pos 58.0 +557 13 training.batch_size 2.0 +557 14 model.embedding_dim 2.0 +557 14 optimizer.lr 0.0011976069592192428 +557 14 negative_sampler.num_negs_per_pos 27.0 +557 14 training.batch_size 2.0 +557 15 model.embedding_dim 0.0 +557 15 optimizer.lr 0.002946749545968406 +557 15 negative_sampler.num_negs_per_pos 34.0 +557 15 training.batch_size 0.0 +557 16 model.embedding_dim 1.0 +557 16 optimizer.lr 0.03242693257654563 +557 16 negative_sampler.num_negs_per_pos 97.0 +557 16 training.batch_size 0.0 +557 17 model.embedding_dim 2.0 +557 17 optimizer.lr 0.013104602634484841 +557 17 negative_sampler.num_negs_per_pos 66.0 +557 17 training.batch_size 2.0 +557 18 model.embedding_dim 1.0 +557 18 optimizer.lr 0.0028951766042112016 +557 18 negative_sampler.num_negs_per_pos 4.0 +557 18 training.batch_size 0.0 +557 19 model.embedding_dim 1.0 +557 19 optimizer.lr 0.04600228684331003 +557 19 negative_sampler.num_negs_per_pos 98.0 +557 19 training.batch_size 1.0 +557 1 dataset """wn18rr""" +557 1 model """rescal""" +557 1 loss """softplus""" +557 1 regularizer """no""" +557 1 optimizer """adam""" +557 1 training_loop """owa""" +557 1 negative_sampler """basic""" +557 1 evaluator """rankbased""" +557 2 dataset """wn18rr""" +557 2 model """rescal""" +557 2 loss """softplus""" +557 2 regularizer """no""" +557 2 optimizer """adam""" +557 2 training_loop """owa""" +557 2 negative_sampler """basic""" +557 2 evaluator """rankbased""" +557 3 dataset """wn18rr""" +557 3 model """rescal""" +557 3 loss """softplus""" +557 3 regularizer """no""" +557 3 optimizer """adam""" +557 3 training_loop """owa""" +557 3 negative_sampler """basic""" +557 3 evaluator """rankbased""" +557 4 dataset """wn18rr""" +557 4 model """rescal""" +557 4 loss """softplus""" +557 4 regularizer """no""" +557 4 optimizer """adam""" +557 4 training_loop """owa""" +557 4 negative_sampler """basic""" +557 4 evaluator """rankbased""" +557 5 dataset """wn18rr""" +557 5 model """rescal""" +557 5 loss """softplus""" +557 5 regularizer """no""" +557 5 optimizer """adam""" +557 5 training_loop """owa""" +557 5 negative_sampler """basic""" +557 5 evaluator """rankbased""" +557 6 dataset """wn18rr""" +557 6 model """rescal""" +557 6 loss """softplus""" +557 6 regularizer """no""" +557 6 optimizer """adam""" +557 6 training_loop """owa""" +557 6 negative_sampler """basic""" +557 6 evaluator """rankbased""" +557 7 dataset """wn18rr""" +557 7 model """rescal""" +557 7 loss """softplus""" +557 7 regularizer """no""" +557 7 optimizer """adam""" +557 7 training_loop """owa""" +557 7 negative_sampler """basic""" +557 7 evaluator """rankbased""" +557 8 dataset """wn18rr""" +557 8 model """rescal""" +557 8 loss """softplus""" +557 8 regularizer """no""" +557 8 optimizer """adam""" +557 8 training_loop """owa""" +557 8 negative_sampler """basic""" +557 8 evaluator """rankbased""" +557 9 dataset """wn18rr""" +557 9 model """rescal""" +557 9 loss """softplus""" +557 9 regularizer """no""" +557 9 optimizer """adam""" +557 9 training_loop """owa""" +557 9 negative_sampler """basic""" +557 9 evaluator """rankbased""" +557 10 dataset """wn18rr""" +557 10 model """rescal""" +557 10 loss """softplus""" +557 10 regularizer """no""" +557 10 optimizer """adam""" +557 10 training_loop """owa""" +557 10 negative_sampler """basic""" +557 10 evaluator """rankbased""" +557 11 dataset """wn18rr""" +557 11 model """rescal""" +557 11 loss """softplus""" +557 11 regularizer """no""" +557 11 optimizer """adam""" +557 11 training_loop """owa""" +557 11 negative_sampler """basic""" +557 11 evaluator """rankbased""" +557 12 dataset """wn18rr""" +557 12 model """rescal""" +557 12 loss """softplus""" +557 12 regularizer """no""" +557 12 optimizer """adam""" +557 12 training_loop """owa""" +557 12 negative_sampler """basic""" +557 12 evaluator """rankbased""" +557 13 dataset """wn18rr""" +557 13 model """rescal""" +557 13 loss """softplus""" +557 13 regularizer """no""" +557 13 optimizer """adam""" +557 13 training_loop """owa""" +557 13 negative_sampler """basic""" +557 13 evaluator """rankbased""" +557 14 dataset """wn18rr""" +557 14 model """rescal""" +557 14 loss """softplus""" +557 14 regularizer """no""" +557 14 optimizer """adam""" +557 14 training_loop """owa""" +557 14 negative_sampler """basic""" +557 14 evaluator """rankbased""" +557 15 dataset """wn18rr""" +557 15 model """rescal""" +557 15 loss """softplus""" +557 15 regularizer """no""" +557 15 optimizer """adam""" +557 15 training_loop """owa""" +557 15 negative_sampler """basic""" +557 15 evaluator """rankbased""" +557 16 dataset """wn18rr""" +557 16 model """rescal""" +557 16 loss """softplus""" +557 16 regularizer """no""" +557 16 optimizer """adam""" +557 16 training_loop """owa""" +557 16 negative_sampler """basic""" +557 16 evaluator """rankbased""" +557 17 dataset """wn18rr""" +557 17 model """rescal""" +557 17 loss """softplus""" +557 17 regularizer """no""" +557 17 optimizer """adam""" +557 17 training_loop """owa""" +557 17 negative_sampler """basic""" +557 17 evaluator """rankbased""" +557 18 dataset """wn18rr""" +557 18 model """rescal""" +557 18 loss """softplus""" +557 18 regularizer """no""" +557 18 optimizer """adam""" +557 18 training_loop """owa""" +557 18 negative_sampler """basic""" +557 18 evaluator """rankbased""" +557 19 dataset """wn18rr""" +557 19 model """rescal""" +557 19 loss """softplus""" +557 19 regularizer """no""" +557 19 optimizer """adam""" +557 19 training_loop """owa""" +557 19 negative_sampler """basic""" +557 19 evaluator """rankbased""" +558 1 model.embedding_dim 0.0 +558 1 optimizer.lr 0.04054935958719266 +558 1 negative_sampler.num_negs_per_pos 69.0 +558 1 training.batch_size 1.0 +558 2 model.embedding_dim 0.0 +558 2 optimizer.lr 0.0034306138784086725 +558 2 negative_sampler.num_negs_per_pos 98.0 +558 2 training.batch_size 1.0 +558 3 model.embedding_dim 1.0 +558 3 optimizer.lr 0.0036509617572079585 +558 3 negative_sampler.num_negs_per_pos 48.0 +558 3 training.batch_size 2.0 +558 4 model.embedding_dim 2.0 +558 4 optimizer.lr 0.007220811364360398 +558 4 negative_sampler.num_negs_per_pos 24.0 +558 4 training.batch_size 1.0 +558 5 model.embedding_dim 0.0 +558 5 optimizer.lr 0.017353496957232574 +558 5 negative_sampler.num_negs_per_pos 1.0 +558 5 training.batch_size 1.0 +558 6 model.embedding_dim 1.0 +558 6 optimizer.lr 0.07614740519019311 +558 6 negative_sampler.num_negs_per_pos 29.0 +558 6 training.batch_size 1.0 +558 7 model.embedding_dim 1.0 +558 7 optimizer.lr 0.030845151265942065 +558 7 negative_sampler.num_negs_per_pos 12.0 +558 7 training.batch_size 0.0 +558 8 model.embedding_dim 0.0 +558 8 optimizer.lr 0.006129484660667338 +558 8 negative_sampler.num_negs_per_pos 84.0 +558 8 training.batch_size 1.0 +558 9 model.embedding_dim 1.0 +558 9 optimizer.lr 0.006256850797514773 +558 9 negative_sampler.num_negs_per_pos 33.0 +558 9 training.batch_size 2.0 +558 10 model.embedding_dim 1.0 +558 10 optimizer.lr 0.004688659793594282 +558 10 negative_sampler.num_negs_per_pos 60.0 +558 10 training.batch_size 2.0 +558 11 model.embedding_dim 1.0 +558 11 optimizer.lr 0.011704399945269317 +558 11 negative_sampler.num_negs_per_pos 85.0 +558 11 training.batch_size 0.0 +558 12 model.embedding_dim 0.0 +558 12 optimizer.lr 0.003213411547810531 +558 12 negative_sampler.num_negs_per_pos 15.0 +558 12 training.batch_size 2.0 +558 13 model.embedding_dim 2.0 +558 13 optimizer.lr 0.027230348243449227 +558 13 negative_sampler.num_negs_per_pos 27.0 +558 13 training.batch_size 0.0 +558 14 model.embedding_dim 2.0 +558 14 optimizer.lr 0.05037599198661951 +558 14 negative_sampler.num_negs_per_pos 32.0 +558 14 training.batch_size 2.0 +558 15 model.embedding_dim 2.0 +558 15 optimizer.lr 0.01093969192106289 +558 15 negative_sampler.num_negs_per_pos 95.0 +558 15 training.batch_size 1.0 +558 16 model.embedding_dim 2.0 +558 16 optimizer.lr 0.011734728741954489 +558 16 negative_sampler.num_negs_per_pos 78.0 +558 16 training.batch_size 0.0 +558 17 model.embedding_dim 0.0 +558 17 optimizer.lr 0.007414015212435588 +558 17 negative_sampler.num_negs_per_pos 41.0 +558 17 training.batch_size 2.0 +558 18 model.embedding_dim 0.0 +558 18 optimizer.lr 0.00828223482920763 +558 18 negative_sampler.num_negs_per_pos 19.0 +558 18 training.batch_size 0.0 +558 19 model.embedding_dim 0.0 +558 19 optimizer.lr 0.05344861525253503 +558 19 negative_sampler.num_negs_per_pos 21.0 +558 19 training.batch_size 0.0 +558 20 model.embedding_dim 2.0 +558 20 optimizer.lr 0.0016874644547009575 +558 20 negative_sampler.num_negs_per_pos 24.0 +558 20 training.batch_size 2.0 +558 21 model.embedding_dim 2.0 +558 21 optimizer.lr 0.06843210218023765 +558 21 negative_sampler.num_negs_per_pos 8.0 +558 21 training.batch_size 1.0 +558 22 model.embedding_dim 0.0 +558 22 optimizer.lr 0.0249115325655338 +558 22 negative_sampler.num_negs_per_pos 74.0 +558 22 training.batch_size 0.0 +558 23 model.embedding_dim 2.0 +558 23 optimizer.lr 0.001494238295163725 +558 23 negative_sampler.num_negs_per_pos 90.0 +558 23 training.batch_size 0.0 +558 24 model.embedding_dim 1.0 +558 24 optimizer.lr 0.022874277627438024 +558 24 negative_sampler.num_negs_per_pos 24.0 +558 24 training.batch_size 0.0 +558 25 model.embedding_dim 2.0 +558 25 optimizer.lr 0.01114630737435442 +558 25 negative_sampler.num_negs_per_pos 96.0 +558 25 training.batch_size 2.0 +558 26 model.embedding_dim 2.0 +558 26 optimizer.lr 0.00794568917990445 +558 26 negative_sampler.num_negs_per_pos 98.0 +558 26 training.batch_size 2.0 +558 27 model.embedding_dim 0.0 +558 27 optimizer.lr 0.015385020520051775 +558 27 negative_sampler.num_negs_per_pos 10.0 +558 27 training.batch_size 1.0 +558 28 model.embedding_dim 1.0 +558 28 optimizer.lr 0.0865640721639521 +558 28 negative_sampler.num_negs_per_pos 12.0 +558 28 training.batch_size 2.0 +558 29 model.embedding_dim 0.0 +558 29 optimizer.lr 0.012599663417662697 +558 29 negative_sampler.num_negs_per_pos 88.0 +558 29 training.batch_size 2.0 +558 30 model.embedding_dim 1.0 +558 30 optimizer.lr 0.005502973267554965 +558 30 negative_sampler.num_negs_per_pos 41.0 +558 30 training.batch_size 1.0 +558 31 model.embedding_dim 1.0 +558 31 optimizer.lr 0.0021412414075944026 +558 31 negative_sampler.num_negs_per_pos 40.0 +558 31 training.batch_size 1.0 +558 32 model.embedding_dim 0.0 +558 32 optimizer.lr 0.02035125780354228 +558 32 negative_sampler.num_negs_per_pos 22.0 +558 32 training.batch_size 1.0 +558 33 model.embedding_dim 1.0 +558 33 optimizer.lr 0.01199980682180503 +558 33 negative_sampler.num_negs_per_pos 53.0 +558 33 training.batch_size 1.0 +558 34 model.embedding_dim 0.0 +558 34 optimizer.lr 0.02261672691283472 +558 34 negative_sampler.num_negs_per_pos 79.0 +558 34 training.batch_size 2.0 +558 35 model.embedding_dim 1.0 +558 35 optimizer.lr 0.00828749233404647 +558 35 negative_sampler.num_negs_per_pos 30.0 +558 35 training.batch_size 1.0 +558 36 model.embedding_dim 1.0 +558 36 optimizer.lr 0.022442331974829956 +558 36 negative_sampler.num_negs_per_pos 10.0 +558 36 training.batch_size 0.0 +558 1 dataset """wn18rr""" +558 1 model """rescal""" +558 1 loss """softplus""" +558 1 regularizer """no""" +558 1 optimizer """adam""" +558 1 training_loop """owa""" +558 1 negative_sampler """basic""" +558 1 evaluator """rankbased""" +558 2 dataset """wn18rr""" +558 2 model """rescal""" +558 2 loss """softplus""" +558 2 regularizer """no""" +558 2 optimizer """adam""" +558 2 training_loop """owa""" +558 2 negative_sampler """basic""" +558 2 evaluator """rankbased""" +558 3 dataset """wn18rr""" +558 3 model """rescal""" +558 3 loss """softplus""" +558 3 regularizer """no""" +558 3 optimizer """adam""" +558 3 training_loop """owa""" +558 3 negative_sampler """basic""" +558 3 evaluator """rankbased""" +558 4 dataset """wn18rr""" +558 4 model """rescal""" +558 4 loss """softplus""" +558 4 regularizer """no""" +558 4 optimizer """adam""" +558 4 training_loop """owa""" +558 4 negative_sampler """basic""" +558 4 evaluator """rankbased""" +558 5 dataset """wn18rr""" +558 5 model """rescal""" +558 5 loss """softplus""" +558 5 regularizer """no""" +558 5 optimizer """adam""" +558 5 training_loop """owa""" +558 5 negative_sampler """basic""" +558 5 evaluator """rankbased""" +558 6 dataset """wn18rr""" +558 6 model """rescal""" +558 6 loss """softplus""" +558 6 regularizer """no""" +558 6 optimizer """adam""" +558 6 training_loop """owa""" +558 6 negative_sampler """basic""" +558 6 evaluator """rankbased""" +558 7 dataset """wn18rr""" +558 7 model """rescal""" +558 7 loss """softplus""" +558 7 regularizer """no""" +558 7 optimizer """adam""" +558 7 training_loop """owa""" +558 7 negative_sampler """basic""" +558 7 evaluator """rankbased""" +558 8 dataset """wn18rr""" +558 8 model """rescal""" +558 8 loss """softplus""" +558 8 regularizer """no""" +558 8 optimizer """adam""" +558 8 training_loop """owa""" +558 8 negative_sampler """basic""" +558 8 evaluator """rankbased""" +558 9 dataset """wn18rr""" +558 9 model """rescal""" +558 9 loss """softplus""" +558 9 regularizer """no""" +558 9 optimizer """adam""" +558 9 training_loop """owa""" +558 9 negative_sampler """basic""" +558 9 evaluator """rankbased""" +558 10 dataset """wn18rr""" +558 10 model """rescal""" +558 10 loss """softplus""" +558 10 regularizer """no""" +558 10 optimizer """adam""" +558 10 training_loop """owa""" +558 10 negative_sampler """basic""" +558 10 evaluator """rankbased""" +558 11 dataset """wn18rr""" +558 11 model """rescal""" +558 11 loss """softplus""" +558 11 regularizer """no""" +558 11 optimizer """adam""" +558 11 training_loop """owa""" +558 11 negative_sampler """basic""" +558 11 evaluator """rankbased""" +558 12 dataset """wn18rr""" +558 12 model """rescal""" +558 12 loss """softplus""" +558 12 regularizer """no""" +558 12 optimizer """adam""" +558 12 training_loop """owa""" +558 12 negative_sampler """basic""" +558 12 evaluator """rankbased""" +558 13 dataset """wn18rr""" +558 13 model """rescal""" +558 13 loss """softplus""" +558 13 regularizer """no""" +558 13 optimizer """adam""" +558 13 training_loop """owa""" +558 13 negative_sampler """basic""" +558 13 evaluator """rankbased""" +558 14 dataset """wn18rr""" +558 14 model """rescal""" +558 14 loss """softplus""" +558 14 regularizer """no""" +558 14 optimizer """adam""" +558 14 training_loop """owa""" +558 14 negative_sampler """basic""" +558 14 evaluator """rankbased""" +558 15 dataset """wn18rr""" +558 15 model """rescal""" +558 15 loss """softplus""" +558 15 regularizer """no""" +558 15 optimizer """adam""" +558 15 training_loop """owa""" +558 15 negative_sampler """basic""" +558 15 evaluator """rankbased""" +558 16 dataset """wn18rr""" +558 16 model """rescal""" +558 16 loss """softplus""" +558 16 regularizer """no""" +558 16 optimizer """adam""" +558 16 training_loop """owa""" +558 16 negative_sampler """basic""" +558 16 evaluator """rankbased""" +558 17 dataset """wn18rr""" +558 17 model """rescal""" +558 17 loss """softplus""" +558 17 regularizer """no""" +558 17 optimizer """adam""" +558 17 training_loop """owa""" +558 17 negative_sampler """basic""" +558 17 evaluator """rankbased""" +558 18 dataset """wn18rr""" +558 18 model """rescal""" +558 18 loss """softplus""" +558 18 regularizer """no""" +558 18 optimizer """adam""" +558 18 training_loop """owa""" +558 18 negative_sampler """basic""" +558 18 evaluator """rankbased""" +558 19 dataset """wn18rr""" +558 19 model """rescal""" +558 19 loss """softplus""" +558 19 regularizer """no""" +558 19 optimizer """adam""" +558 19 training_loop """owa""" +558 19 negative_sampler """basic""" +558 19 evaluator """rankbased""" +558 20 dataset """wn18rr""" +558 20 model """rescal""" +558 20 loss """softplus""" +558 20 regularizer """no""" +558 20 optimizer """adam""" +558 20 training_loop """owa""" +558 20 negative_sampler """basic""" +558 20 evaluator """rankbased""" +558 21 dataset """wn18rr""" +558 21 model """rescal""" +558 21 loss """softplus""" +558 21 regularizer """no""" +558 21 optimizer """adam""" +558 21 training_loop """owa""" +558 21 negative_sampler """basic""" +558 21 evaluator """rankbased""" +558 22 dataset """wn18rr""" +558 22 model """rescal""" +558 22 loss """softplus""" +558 22 regularizer """no""" +558 22 optimizer """adam""" +558 22 training_loop """owa""" +558 22 negative_sampler """basic""" +558 22 evaluator """rankbased""" +558 23 dataset """wn18rr""" +558 23 model """rescal""" +558 23 loss """softplus""" +558 23 regularizer """no""" +558 23 optimizer """adam""" +558 23 training_loop """owa""" +558 23 negative_sampler """basic""" +558 23 evaluator """rankbased""" +558 24 dataset """wn18rr""" +558 24 model """rescal""" +558 24 loss """softplus""" +558 24 regularizer """no""" +558 24 optimizer """adam""" +558 24 training_loop """owa""" +558 24 negative_sampler """basic""" +558 24 evaluator """rankbased""" +558 25 dataset """wn18rr""" +558 25 model """rescal""" +558 25 loss """softplus""" +558 25 regularizer """no""" +558 25 optimizer """adam""" +558 25 training_loop """owa""" +558 25 negative_sampler """basic""" +558 25 evaluator """rankbased""" +558 26 dataset """wn18rr""" +558 26 model """rescal""" +558 26 loss """softplus""" +558 26 regularizer """no""" +558 26 optimizer """adam""" +558 26 training_loop """owa""" +558 26 negative_sampler """basic""" +558 26 evaluator """rankbased""" +558 27 dataset """wn18rr""" +558 27 model """rescal""" +558 27 loss """softplus""" +558 27 regularizer """no""" +558 27 optimizer """adam""" +558 27 training_loop """owa""" +558 27 negative_sampler """basic""" +558 27 evaluator """rankbased""" +558 28 dataset """wn18rr""" +558 28 model """rescal""" +558 28 loss """softplus""" +558 28 regularizer """no""" +558 28 optimizer """adam""" +558 28 training_loop """owa""" +558 28 negative_sampler """basic""" +558 28 evaluator """rankbased""" +558 29 dataset """wn18rr""" +558 29 model """rescal""" +558 29 loss """softplus""" +558 29 regularizer """no""" +558 29 optimizer """adam""" +558 29 training_loop """owa""" +558 29 negative_sampler """basic""" +558 29 evaluator """rankbased""" +558 30 dataset """wn18rr""" +558 30 model """rescal""" +558 30 loss """softplus""" +558 30 regularizer """no""" +558 30 optimizer """adam""" +558 30 training_loop """owa""" +558 30 negative_sampler """basic""" +558 30 evaluator """rankbased""" +558 31 dataset """wn18rr""" +558 31 model """rescal""" +558 31 loss """softplus""" +558 31 regularizer """no""" +558 31 optimizer """adam""" +558 31 training_loop """owa""" +558 31 negative_sampler """basic""" +558 31 evaluator """rankbased""" +558 32 dataset """wn18rr""" +558 32 model """rescal""" +558 32 loss """softplus""" +558 32 regularizer """no""" +558 32 optimizer """adam""" +558 32 training_loop """owa""" +558 32 negative_sampler """basic""" +558 32 evaluator """rankbased""" +558 33 dataset """wn18rr""" +558 33 model """rescal""" +558 33 loss """softplus""" +558 33 regularizer """no""" +558 33 optimizer """adam""" +558 33 training_loop """owa""" +558 33 negative_sampler """basic""" +558 33 evaluator """rankbased""" +558 34 dataset """wn18rr""" +558 34 model """rescal""" +558 34 loss """softplus""" +558 34 regularizer """no""" +558 34 optimizer """adam""" +558 34 training_loop """owa""" +558 34 negative_sampler """basic""" +558 34 evaluator """rankbased""" +558 35 dataset """wn18rr""" +558 35 model """rescal""" +558 35 loss """softplus""" +558 35 regularizer """no""" +558 35 optimizer """adam""" +558 35 training_loop """owa""" +558 35 negative_sampler """basic""" +558 35 evaluator """rankbased""" +558 36 dataset """wn18rr""" +558 36 model """rescal""" +558 36 loss """softplus""" +558 36 regularizer """no""" +558 36 optimizer """adam""" +558 36 training_loop """owa""" +558 36 negative_sampler """basic""" +558 36 evaluator """rankbased""" +559 1 model.embedding_dim 1.0 +559 1 optimizer.lr 0.08030828316681549 +559 1 negative_sampler.num_negs_per_pos 15.0 +559 1 training.batch_size 1.0 +559 2 model.embedding_dim 0.0 +559 2 optimizer.lr 0.006705847843251955 +559 2 negative_sampler.num_negs_per_pos 60.0 +559 2 training.batch_size 1.0 +559 3 model.embedding_dim 2.0 +559 3 optimizer.lr 0.004356941029980218 +559 3 negative_sampler.num_negs_per_pos 11.0 +559 3 training.batch_size 0.0 +559 4 model.embedding_dim 1.0 +559 4 optimizer.lr 0.023083262652875417 +559 4 negative_sampler.num_negs_per_pos 67.0 +559 4 training.batch_size 0.0 +559 5 model.embedding_dim 2.0 +559 5 optimizer.lr 0.004070291171777253 +559 5 negative_sampler.num_negs_per_pos 5.0 +559 5 training.batch_size 0.0 +559 6 model.embedding_dim 2.0 +559 6 optimizer.lr 0.04245693164710665 +559 6 negative_sampler.num_negs_per_pos 6.0 +559 6 training.batch_size 1.0 +559 7 model.embedding_dim 1.0 +559 7 optimizer.lr 0.011298393375365135 +559 7 negative_sampler.num_negs_per_pos 17.0 +559 7 training.batch_size 1.0 +559 8 model.embedding_dim 2.0 +559 8 optimizer.lr 0.06529703659696169 +559 8 negative_sampler.num_negs_per_pos 58.0 +559 8 training.batch_size 0.0 +559 9 model.embedding_dim 2.0 +559 9 optimizer.lr 0.01777689384498602 +559 9 negative_sampler.num_negs_per_pos 58.0 +559 9 training.batch_size 0.0 +559 10 model.embedding_dim 1.0 +559 10 optimizer.lr 0.0018841503764549432 +559 10 negative_sampler.num_negs_per_pos 92.0 +559 10 training.batch_size 1.0 +559 11 model.embedding_dim 2.0 +559 11 optimizer.lr 0.07820982204101638 +559 11 negative_sampler.num_negs_per_pos 60.0 +559 11 training.batch_size 2.0 +559 12 model.embedding_dim 1.0 +559 12 optimizer.lr 0.016509194940517866 +559 12 negative_sampler.num_negs_per_pos 93.0 +559 12 training.batch_size 0.0 +559 13 model.embedding_dim 2.0 +559 13 optimizer.lr 0.02242316856958945 +559 13 negative_sampler.num_negs_per_pos 33.0 +559 13 training.batch_size 1.0 +559 14 model.embedding_dim 1.0 +559 14 optimizer.lr 0.051691838923918904 +559 14 negative_sampler.num_negs_per_pos 93.0 +559 14 training.batch_size 2.0 +559 15 model.embedding_dim 2.0 +559 15 optimizer.lr 0.002663782349668834 +559 15 negative_sampler.num_negs_per_pos 53.0 +559 15 training.batch_size 2.0 +559 16 model.embedding_dim 1.0 +559 16 optimizer.lr 0.02577933803455369 +559 16 negative_sampler.num_negs_per_pos 3.0 +559 16 training.batch_size 0.0 +559 17 model.embedding_dim 2.0 +559 17 optimizer.lr 0.07714869554021155 +559 17 negative_sampler.num_negs_per_pos 67.0 +559 17 training.batch_size 1.0 +559 18 model.embedding_dim 1.0 +559 18 optimizer.lr 0.005052624421702235 +559 18 negative_sampler.num_negs_per_pos 54.0 +559 18 training.batch_size 0.0 +559 19 model.embedding_dim 0.0 +559 19 optimizer.lr 0.047526792699490825 +559 19 negative_sampler.num_negs_per_pos 93.0 +559 19 training.batch_size 2.0 +559 20 model.embedding_dim 1.0 +559 20 optimizer.lr 0.002845884918241365 +559 20 negative_sampler.num_negs_per_pos 5.0 +559 20 training.batch_size 2.0 +559 21 model.embedding_dim 1.0 +559 21 optimizer.lr 0.0064979924360064716 +559 21 negative_sampler.num_negs_per_pos 34.0 +559 21 training.batch_size 2.0 +559 22 model.embedding_dim 1.0 +559 22 optimizer.lr 0.06568023906082837 +559 22 negative_sampler.num_negs_per_pos 98.0 +559 22 training.batch_size 2.0 +559 23 model.embedding_dim 0.0 +559 23 optimizer.lr 0.00482611028744372 +559 23 negative_sampler.num_negs_per_pos 33.0 +559 23 training.batch_size 2.0 +559 24 model.embedding_dim 2.0 +559 24 optimizer.lr 0.009200351208837701 +559 24 negative_sampler.num_negs_per_pos 76.0 +559 24 training.batch_size 2.0 +559 25 model.embedding_dim 0.0 +559 25 optimizer.lr 0.010143977063770763 +559 25 negative_sampler.num_negs_per_pos 34.0 +559 25 training.batch_size 0.0 +559 26 model.embedding_dim 0.0 +559 26 optimizer.lr 0.008592000807677929 +559 26 negative_sampler.num_negs_per_pos 71.0 +559 26 training.batch_size 0.0 +559 27 model.embedding_dim 0.0 +559 27 optimizer.lr 0.02812507710114175 +559 27 negative_sampler.num_negs_per_pos 47.0 +559 27 training.batch_size 2.0 +559 28 model.embedding_dim 2.0 +559 28 optimizer.lr 0.014376698019455876 +559 28 negative_sampler.num_negs_per_pos 56.0 +559 28 training.batch_size 2.0 +559 29 model.embedding_dim 2.0 +559 29 optimizer.lr 0.019027484876491373 +559 29 negative_sampler.num_negs_per_pos 59.0 +559 29 training.batch_size 0.0 +559 30 model.embedding_dim 1.0 +559 30 optimizer.lr 0.00849223661601396 +559 30 negative_sampler.num_negs_per_pos 52.0 +559 30 training.batch_size 1.0 +559 1 dataset """wn18rr""" +559 1 model """rescal""" +559 1 loss """bceaftersigmoid""" +559 1 regularizer """no""" +559 1 optimizer """adam""" +559 1 training_loop """owa""" +559 1 negative_sampler """basic""" +559 1 evaluator """rankbased""" +559 2 dataset """wn18rr""" +559 2 model """rescal""" +559 2 loss """bceaftersigmoid""" +559 2 regularizer """no""" +559 2 optimizer """adam""" +559 2 training_loop """owa""" +559 2 negative_sampler """basic""" +559 2 evaluator """rankbased""" +559 3 dataset """wn18rr""" +559 3 model """rescal""" +559 3 loss """bceaftersigmoid""" +559 3 regularizer """no""" +559 3 optimizer """adam""" +559 3 training_loop """owa""" +559 3 negative_sampler """basic""" +559 3 evaluator """rankbased""" +559 4 dataset """wn18rr""" +559 4 model """rescal""" +559 4 loss """bceaftersigmoid""" +559 4 regularizer """no""" +559 4 optimizer """adam""" +559 4 training_loop """owa""" +559 4 negative_sampler """basic""" +559 4 evaluator """rankbased""" +559 5 dataset """wn18rr""" +559 5 model """rescal""" +559 5 loss """bceaftersigmoid""" +559 5 regularizer """no""" +559 5 optimizer """adam""" +559 5 training_loop """owa""" +559 5 negative_sampler """basic""" +559 5 evaluator """rankbased""" +559 6 dataset """wn18rr""" +559 6 model """rescal""" +559 6 loss """bceaftersigmoid""" +559 6 regularizer """no""" +559 6 optimizer """adam""" +559 6 training_loop """owa""" +559 6 negative_sampler """basic""" +559 6 evaluator """rankbased""" +559 7 dataset """wn18rr""" +559 7 model """rescal""" +559 7 loss """bceaftersigmoid""" +559 7 regularizer """no""" +559 7 optimizer """adam""" +559 7 training_loop """owa""" +559 7 negative_sampler """basic""" +559 7 evaluator """rankbased""" +559 8 dataset """wn18rr""" +559 8 model """rescal""" +559 8 loss """bceaftersigmoid""" +559 8 regularizer """no""" +559 8 optimizer """adam""" +559 8 training_loop """owa""" +559 8 negative_sampler """basic""" +559 8 evaluator """rankbased""" +559 9 dataset """wn18rr""" +559 9 model """rescal""" +559 9 loss """bceaftersigmoid""" +559 9 regularizer """no""" +559 9 optimizer """adam""" +559 9 training_loop """owa""" +559 9 negative_sampler """basic""" +559 9 evaluator """rankbased""" +559 10 dataset """wn18rr""" +559 10 model """rescal""" +559 10 loss """bceaftersigmoid""" +559 10 regularizer """no""" +559 10 optimizer """adam""" +559 10 training_loop """owa""" +559 10 negative_sampler """basic""" +559 10 evaluator """rankbased""" +559 11 dataset """wn18rr""" +559 11 model """rescal""" +559 11 loss """bceaftersigmoid""" +559 11 regularizer """no""" +559 11 optimizer """adam""" +559 11 training_loop """owa""" +559 11 negative_sampler """basic""" +559 11 evaluator """rankbased""" +559 12 dataset """wn18rr""" +559 12 model """rescal""" +559 12 loss """bceaftersigmoid""" +559 12 regularizer """no""" +559 12 optimizer """adam""" +559 12 training_loop """owa""" +559 12 negative_sampler """basic""" +559 12 evaluator """rankbased""" +559 13 dataset """wn18rr""" +559 13 model """rescal""" +559 13 loss """bceaftersigmoid""" +559 13 regularizer """no""" +559 13 optimizer """adam""" +559 13 training_loop """owa""" +559 13 negative_sampler """basic""" +559 13 evaluator """rankbased""" +559 14 dataset """wn18rr""" +559 14 model """rescal""" +559 14 loss """bceaftersigmoid""" +559 14 regularizer """no""" +559 14 optimizer """adam""" +559 14 training_loop """owa""" +559 14 negative_sampler """basic""" +559 14 evaluator """rankbased""" +559 15 dataset """wn18rr""" +559 15 model """rescal""" +559 15 loss """bceaftersigmoid""" +559 15 regularizer """no""" +559 15 optimizer """adam""" +559 15 training_loop """owa""" +559 15 negative_sampler """basic""" +559 15 evaluator """rankbased""" +559 16 dataset """wn18rr""" +559 16 model """rescal""" +559 16 loss """bceaftersigmoid""" +559 16 regularizer """no""" +559 16 optimizer """adam""" +559 16 training_loop """owa""" +559 16 negative_sampler """basic""" +559 16 evaluator """rankbased""" +559 17 dataset """wn18rr""" +559 17 model """rescal""" +559 17 loss """bceaftersigmoid""" +559 17 regularizer """no""" +559 17 optimizer """adam""" +559 17 training_loop """owa""" +559 17 negative_sampler """basic""" +559 17 evaluator """rankbased""" +559 18 dataset """wn18rr""" +559 18 model """rescal""" +559 18 loss """bceaftersigmoid""" +559 18 regularizer """no""" +559 18 optimizer """adam""" +559 18 training_loop """owa""" +559 18 negative_sampler """basic""" +559 18 evaluator """rankbased""" +559 19 dataset """wn18rr""" +559 19 model """rescal""" +559 19 loss """bceaftersigmoid""" +559 19 regularizer """no""" +559 19 optimizer """adam""" +559 19 training_loop """owa""" +559 19 negative_sampler """basic""" +559 19 evaluator """rankbased""" +559 20 dataset """wn18rr""" +559 20 model """rescal""" +559 20 loss """bceaftersigmoid""" +559 20 regularizer """no""" +559 20 optimizer """adam""" +559 20 training_loop """owa""" +559 20 negative_sampler """basic""" +559 20 evaluator """rankbased""" +559 21 dataset """wn18rr""" +559 21 model """rescal""" +559 21 loss """bceaftersigmoid""" +559 21 regularizer """no""" +559 21 optimizer """adam""" +559 21 training_loop """owa""" +559 21 negative_sampler """basic""" +559 21 evaluator """rankbased""" +559 22 dataset """wn18rr""" +559 22 model """rescal""" +559 22 loss """bceaftersigmoid""" +559 22 regularizer """no""" +559 22 optimizer """adam""" +559 22 training_loop """owa""" +559 22 negative_sampler """basic""" +559 22 evaluator """rankbased""" +559 23 dataset """wn18rr""" +559 23 model """rescal""" +559 23 loss """bceaftersigmoid""" +559 23 regularizer """no""" +559 23 optimizer """adam""" +559 23 training_loop """owa""" +559 23 negative_sampler """basic""" +559 23 evaluator """rankbased""" +559 24 dataset """wn18rr""" +559 24 model """rescal""" +559 24 loss """bceaftersigmoid""" +559 24 regularizer """no""" +559 24 optimizer """adam""" +559 24 training_loop """owa""" +559 24 negative_sampler """basic""" +559 24 evaluator """rankbased""" +559 25 dataset """wn18rr""" +559 25 model """rescal""" +559 25 loss """bceaftersigmoid""" +559 25 regularizer """no""" +559 25 optimizer """adam""" +559 25 training_loop """owa""" +559 25 negative_sampler """basic""" +559 25 evaluator """rankbased""" +559 26 dataset """wn18rr""" +559 26 model """rescal""" +559 26 loss """bceaftersigmoid""" +559 26 regularizer """no""" +559 26 optimizer """adam""" +559 26 training_loop """owa""" +559 26 negative_sampler """basic""" +559 26 evaluator """rankbased""" +559 27 dataset """wn18rr""" +559 27 model """rescal""" +559 27 loss """bceaftersigmoid""" +559 27 regularizer """no""" +559 27 optimizer """adam""" +559 27 training_loop """owa""" +559 27 negative_sampler """basic""" +559 27 evaluator """rankbased""" +559 28 dataset """wn18rr""" +559 28 model """rescal""" +559 28 loss """bceaftersigmoid""" +559 28 regularizer """no""" +559 28 optimizer """adam""" +559 28 training_loop """owa""" +559 28 negative_sampler """basic""" +559 28 evaluator """rankbased""" +559 29 dataset """wn18rr""" +559 29 model """rescal""" +559 29 loss """bceaftersigmoid""" +559 29 regularizer """no""" +559 29 optimizer """adam""" +559 29 training_loop """owa""" +559 29 negative_sampler """basic""" +559 29 evaluator """rankbased""" +559 30 dataset """wn18rr""" +559 30 model """rescal""" +559 30 loss """bceaftersigmoid""" +559 30 regularizer """no""" +559 30 optimizer """adam""" +559 30 training_loop """owa""" +559 30 negative_sampler """basic""" +559 30 evaluator """rankbased""" +560 1 model.embedding_dim 0.0 +560 1 optimizer.lr 0.001964334390594111 +560 1 negative_sampler.num_negs_per_pos 43.0 +560 1 training.batch_size 0.0 +560 2 model.embedding_dim 0.0 +560 2 optimizer.lr 0.005871727717133195 +560 2 negative_sampler.num_negs_per_pos 36.0 +560 2 training.batch_size 0.0 +560 3 model.embedding_dim 0.0 +560 3 optimizer.lr 0.0069617638588724065 +560 3 negative_sampler.num_negs_per_pos 54.0 +560 3 training.batch_size 1.0 +560 4 model.embedding_dim 1.0 +560 4 optimizer.lr 0.0020300941545961246 +560 4 negative_sampler.num_negs_per_pos 12.0 +560 4 training.batch_size 2.0 +560 5 model.embedding_dim 0.0 +560 5 optimizer.lr 0.020149935971179336 +560 5 negative_sampler.num_negs_per_pos 2.0 +560 5 training.batch_size 2.0 +560 6 model.embedding_dim 2.0 +560 6 optimizer.lr 0.011074977410310901 +560 6 negative_sampler.num_negs_per_pos 92.0 +560 6 training.batch_size 0.0 +560 7 model.embedding_dim 2.0 +560 7 optimizer.lr 0.012279993026067177 +560 7 negative_sampler.num_negs_per_pos 33.0 +560 7 training.batch_size 0.0 +560 8 model.embedding_dim 0.0 +560 8 optimizer.lr 0.0035685929647889993 +560 8 negative_sampler.num_negs_per_pos 34.0 +560 8 training.batch_size 2.0 +560 9 model.embedding_dim 1.0 +560 9 optimizer.lr 0.0033177459797448686 +560 9 negative_sampler.num_negs_per_pos 32.0 +560 9 training.batch_size 2.0 +560 10 model.embedding_dim 1.0 +560 10 optimizer.lr 0.008442301518745629 +560 10 negative_sampler.num_negs_per_pos 2.0 +560 10 training.batch_size 2.0 +560 11 model.embedding_dim 0.0 +560 11 optimizer.lr 0.042082939050808406 +560 11 negative_sampler.num_negs_per_pos 99.0 +560 11 training.batch_size 0.0 +560 12 model.embedding_dim 1.0 +560 12 optimizer.lr 0.06343075015703241 +560 12 negative_sampler.num_negs_per_pos 38.0 +560 12 training.batch_size 0.0 +560 13 model.embedding_dim 1.0 +560 13 optimizer.lr 0.04625630336892578 +560 13 negative_sampler.num_negs_per_pos 78.0 +560 13 training.batch_size 2.0 +560 14 model.embedding_dim 2.0 +560 14 optimizer.lr 0.022636806858592994 +560 14 negative_sampler.num_negs_per_pos 85.0 +560 14 training.batch_size 2.0 +560 15 model.embedding_dim 1.0 +560 15 optimizer.lr 0.007372108400952798 +560 15 negative_sampler.num_negs_per_pos 86.0 +560 15 training.batch_size 1.0 +560 16 model.embedding_dim 0.0 +560 16 optimizer.lr 0.012261896351058734 +560 16 negative_sampler.num_negs_per_pos 15.0 +560 16 training.batch_size 0.0 +560 17 model.embedding_dim 1.0 +560 17 optimizer.lr 0.010303858073243131 +560 17 negative_sampler.num_negs_per_pos 63.0 +560 17 training.batch_size 2.0 +560 18 model.embedding_dim 0.0 +560 18 optimizer.lr 0.003055062946271991 +560 18 negative_sampler.num_negs_per_pos 92.0 +560 18 training.batch_size 1.0 +560 19 model.embedding_dim 1.0 +560 19 optimizer.lr 0.05766143523825392 +560 19 negative_sampler.num_negs_per_pos 33.0 +560 19 training.batch_size 1.0 +560 20 model.embedding_dim 2.0 +560 20 optimizer.lr 0.0018962102948770119 +560 20 negative_sampler.num_negs_per_pos 44.0 +560 20 training.batch_size 0.0 +560 21 model.embedding_dim 0.0 +560 21 optimizer.lr 0.014302476523365507 +560 21 negative_sampler.num_negs_per_pos 8.0 +560 21 training.batch_size 1.0 +560 22 model.embedding_dim 0.0 +560 22 optimizer.lr 0.009805922819464215 +560 22 negative_sampler.num_negs_per_pos 9.0 +560 22 training.batch_size 2.0 +560 23 model.embedding_dim 2.0 +560 23 optimizer.lr 0.006300015289013828 +560 23 negative_sampler.num_negs_per_pos 51.0 +560 23 training.batch_size 2.0 +560 24 model.embedding_dim 0.0 +560 24 optimizer.lr 0.039741232439385134 +560 24 negative_sampler.num_negs_per_pos 53.0 +560 24 training.batch_size 1.0 +560 25 model.embedding_dim 0.0 +560 25 optimizer.lr 0.007495441575679795 +560 25 negative_sampler.num_negs_per_pos 78.0 +560 25 training.batch_size 2.0 +560 26 model.embedding_dim 1.0 +560 26 optimizer.lr 0.010304636274127435 +560 26 negative_sampler.num_negs_per_pos 96.0 +560 26 training.batch_size 0.0 +560 27 model.embedding_dim 1.0 +560 27 optimizer.lr 0.0201709291837824 +560 27 negative_sampler.num_negs_per_pos 40.0 +560 27 training.batch_size 1.0 +560 28 model.embedding_dim 2.0 +560 28 optimizer.lr 0.019786473791563813 +560 28 negative_sampler.num_negs_per_pos 90.0 +560 28 training.batch_size 0.0 +560 29 model.embedding_dim 1.0 +560 29 optimizer.lr 0.01109830117637511 +560 29 negative_sampler.num_negs_per_pos 39.0 +560 29 training.batch_size 1.0 +560 30 model.embedding_dim 2.0 +560 30 optimizer.lr 0.0021918014872728524 +560 30 negative_sampler.num_negs_per_pos 67.0 +560 30 training.batch_size 2.0 +560 31 model.embedding_dim 2.0 +560 31 optimizer.lr 0.09040550349975475 +560 31 negative_sampler.num_negs_per_pos 63.0 +560 31 training.batch_size 1.0 +560 32 model.embedding_dim 0.0 +560 32 optimizer.lr 0.0034727763131454266 +560 32 negative_sampler.num_negs_per_pos 19.0 +560 32 training.batch_size 1.0 +560 33 model.embedding_dim 2.0 +560 33 optimizer.lr 0.0762234127758579 +560 33 negative_sampler.num_negs_per_pos 11.0 +560 33 training.batch_size 1.0 +560 34 model.embedding_dim 1.0 +560 34 optimizer.lr 0.036229217850580454 +560 34 negative_sampler.num_negs_per_pos 77.0 +560 34 training.batch_size 1.0 +560 35 model.embedding_dim 2.0 +560 35 optimizer.lr 0.054403582204528955 +560 35 negative_sampler.num_negs_per_pos 77.0 +560 35 training.batch_size 2.0 +560 36 model.embedding_dim 1.0 +560 36 optimizer.lr 0.020814633594718256 +560 36 negative_sampler.num_negs_per_pos 90.0 +560 36 training.batch_size 2.0 +560 37 model.embedding_dim 1.0 +560 37 optimizer.lr 0.006276065150489911 +560 37 negative_sampler.num_negs_per_pos 52.0 +560 37 training.batch_size 0.0 +560 38 model.embedding_dim 0.0 +560 38 optimizer.lr 0.020871977383175807 +560 38 negative_sampler.num_negs_per_pos 30.0 +560 38 training.batch_size 1.0 +560 39 model.embedding_dim 1.0 +560 39 optimizer.lr 0.030240690989347233 +560 39 negative_sampler.num_negs_per_pos 12.0 +560 39 training.batch_size 0.0 +560 40 model.embedding_dim 2.0 +560 40 optimizer.lr 0.0411452338074422 +560 40 negative_sampler.num_negs_per_pos 25.0 +560 40 training.batch_size 0.0 +560 41 model.embedding_dim 0.0 +560 41 optimizer.lr 0.001277494239419278 +560 41 negative_sampler.num_negs_per_pos 23.0 +560 41 training.batch_size 0.0 +560 42 model.embedding_dim 0.0 +560 42 optimizer.lr 0.0015732257672032288 +560 42 negative_sampler.num_negs_per_pos 54.0 +560 42 training.batch_size 0.0 +560 43 model.embedding_dim 0.0 +560 43 optimizer.lr 0.0010402325091497869 +560 43 negative_sampler.num_negs_per_pos 78.0 +560 43 training.batch_size 2.0 +560 44 model.embedding_dim 0.0 +560 44 optimizer.lr 0.0018235980970238022 +560 44 negative_sampler.num_negs_per_pos 26.0 +560 44 training.batch_size 1.0 +560 45 model.embedding_dim 2.0 +560 45 optimizer.lr 0.019500973766041527 +560 45 negative_sampler.num_negs_per_pos 32.0 +560 45 training.batch_size 1.0 +560 46 model.embedding_dim 2.0 +560 46 optimizer.lr 0.015448534989583417 +560 46 negative_sampler.num_negs_per_pos 46.0 +560 46 training.batch_size 1.0 +560 47 model.embedding_dim 0.0 +560 47 optimizer.lr 0.01227828632823211 +560 47 negative_sampler.num_negs_per_pos 80.0 +560 47 training.batch_size 2.0 +560 48 model.embedding_dim 2.0 +560 48 optimizer.lr 0.007654227999261753 +560 48 negative_sampler.num_negs_per_pos 72.0 +560 48 training.batch_size 0.0 +560 49 model.embedding_dim 2.0 +560 49 optimizer.lr 0.056735296761667685 +560 49 negative_sampler.num_negs_per_pos 65.0 +560 49 training.batch_size 2.0 +560 50 model.embedding_dim 1.0 +560 50 optimizer.lr 0.07767656037037494 +560 50 negative_sampler.num_negs_per_pos 69.0 +560 50 training.batch_size 1.0 +560 51 model.embedding_dim 0.0 +560 51 optimizer.lr 0.01059827970775174 +560 51 negative_sampler.num_negs_per_pos 50.0 +560 51 training.batch_size 0.0 +560 52 model.embedding_dim 2.0 +560 52 optimizer.lr 0.009756151127564205 +560 52 negative_sampler.num_negs_per_pos 31.0 +560 52 training.batch_size 2.0 +560 53 model.embedding_dim 2.0 +560 53 optimizer.lr 0.0029132128588212564 +560 53 negative_sampler.num_negs_per_pos 70.0 +560 53 training.batch_size 2.0 +560 54 model.embedding_dim 1.0 +560 54 optimizer.lr 0.0015171621424046049 +560 54 negative_sampler.num_negs_per_pos 73.0 +560 54 training.batch_size 2.0 +560 55 model.embedding_dim 0.0 +560 55 optimizer.lr 0.013595442149201178 +560 55 negative_sampler.num_negs_per_pos 5.0 +560 55 training.batch_size 2.0 +560 56 model.embedding_dim 0.0 +560 56 optimizer.lr 0.021324086643958143 +560 56 negative_sampler.num_negs_per_pos 62.0 +560 56 training.batch_size 2.0 +560 57 model.embedding_dim 0.0 +560 57 optimizer.lr 0.0030296775483237733 +560 57 negative_sampler.num_negs_per_pos 44.0 +560 57 training.batch_size 2.0 +560 58 model.embedding_dim 1.0 +560 58 optimizer.lr 0.0027756081717222377 +560 58 negative_sampler.num_negs_per_pos 81.0 +560 58 training.batch_size 2.0 +560 59 model.embedding_dim 2.0 +560 59 optimizer.lr 0.0010035394080547904 +560 59 negative_sampler.num_negs_per_pos 51.0 +560 59 training.batch_size 1.0 +560 60 model.embedding_dim 2.0 +560 60 optimizer.lr 0.018290218171037438 +560 60 negative_sampler.num_negs_per_pos 4.0 +560 60 training.batch_size 1.0 +560 61 model.embedding_dim 2.0 +560 61 optimizer.lr 0.01502930249780862 +560 61 negative_sampler.num_negs_per_pos 51.0 +560 61 training.batch_size 0.0 +560 62 model.embedding_dim 2.0 +560 62 optimizer.lr 0.011783482040852535 +560 62 negative_sampler.num_negs_per_pos 19.0 +560 62 training.batch_size 1.0 +560 63 model.embedding_dim 0.0 +560 63 optimizer.lr 0.010525590478431266 +560 63 negative_sampler.num_negs_per_pos 43.0 +560 63 training.batch_size 0.0 +560 64 model.embedding_dim 2.0 +560 64 optimizer.lr 0.07474755756697167 +560 64 negative_sampler.num_negs_per_pos 87.0 +560 64 training.batch_size 2.0 +560 65 model.embedding_dim 0.0 +560 65 optimizer.lr 0.02350444251828338 +560 65 negative_sampler.num_negs_per_pos 8.0 +560 65 training.batch_size 2.0 +560 1 dataset """wn18rr""" +560 1 model """rescal""" +560 1 loss """bceaftersigmoid""" +560 1 regularizer """no""" +560 1 optimizer """adam""" +560 1 training_loop """owa""" +560 1 negative_sampler """basic""" +560 1 evaluator """rankbased""" +560 2 dataset """wn18rr""" +560 2 model """rescal""" +560 2 loss """bceaftersigmoid""" +560 2 regularizer """no""" +560 2 optimizer """adam""" +560 2 training_loop """owa""" +560 2 negative_sampler """basic""" +560 2 evaluator """rankbased""" +560 3 dataset """wn18rr""" +560 3 model """rescal""" +560 3 loss """bceaftersigmoid""" +560 3 regularizer """no""" +560 3 optimizer """adam""" +560 3 training_loop """owa""" +560 3 negative_sampler """basic""" +560 3 evaluator """rankbased""" +560 4 dataset """wn18rr""" +560 4 model """rescal""" +560 4 loss """bceaftersigmoid""" +560 4 regularizer """no""" +560 4 optimizer """adam""" +560 4 training_loop """owa""" +560 4 negative_sampler """basic""" +560 4 evaluator """rankbased""" +560 5 dataset """wn18rr""" +560 5 model """rescal""" +560 5 loss """bceaftersigmoid""" +560 5 regularizer """no""" +560 5 optimizer """adam""" +560 5 training_loop """owa""" +560 5 negative_sampler """basic""" +560 5 evaluator """rankbased""" +560 6 dataset """wn18rr""" +560 6 model """rescal""" +560 6 loss """bceaftersigmoid""" +560 6 regularizer """no""" +560 6 optimizer """adam""" +560 6 training_loop """owa""" +560 6 negative_sampler """basic""" +560 6 evaluator """rankbased""" +560 7 dataset """wn18rr""" +560 7 model """rescal""" +560 7 loss """bceaftersigmoid""" +560 7 regularizer """no""" +560 7 optimizer """adam""" +560 7 training_loop """owa""" +560 7 negative_sampler """basic""" +560 7 evaluator """rankbased""" +560 8 dataset """wn18rr""" +560 8 model """rescal""" +560 8 loss """bceaftersigmoid""" +560 8 regularizer """no""" +560 8 optimizer """adam""" +560 8 training_loop """owa""" +560 8 negative_sampler """basic""" +560 8 evaluator """rankbased""" +560 9 dataset """wn18rr""" +560 9 model """rescal""" +560 9 loss """bceaftersigmoid""" +560 9 regularizer """no""" +560 9 optimizer """adam""" +560 9 training_loop """owa""" +560 9 negative_sampler """basic""" +560 9 evaluator """rankbased""" +560 10 dataset """wn18rr""" +560 10 model """rescal""" +560 10 loss """bceaftersigmoid""" +560 10 regularizer """no""" +560 10 optimizer """adam""" +560 10 training_loop """owa""" +560 10 negative_sampler """basic""" +560 10 evaluator """rankbased""" +560 11 dataset """wn18rr""" +560 11 model """rescal""" +560 11 loss """bceaftersigmoid""" +560 11 regularizer """no""" +560 11 optimizer """adam""" +560 11 training_loop """owa""" +560 11 negative_sampler """basic""" +560 11 evaluator """rankbased""" +560 12 dataset """wn18rr""" +560 12 model """rescal""" +560 12 loss """bceaftersigmoid""" +560 12 regularizer """no""" +560 12 optimizer """adam""" +560 12 training_loop """owa""" +560 12 negative_sampler """basic""" +560 12 evaluator """rankbased""" +560 13 dataset """wn18rr""" +560 13 model """rescal""" +560 13 loss """bceaftersigmoid""" +560 13 regularizer """no""" +560 13 optimizer """adam""" +560 13 training_loop """owa""" +560 13 negative_sampler """basic""" +560 13 evaluator """rankbased""" +560 14 dataset """wn18rr""" +560 14 model """rescal""" +560 14 loss """bceaftersigmoid""" +560 14 regularizer """no""" +560 14 optimizer """adam""" +560 14 training_loop """owa""" +560 14 negative_sampler """basic""" +560 14 evaluator """rankbased""" +560 15 dataset """wn18rr""" +560 15 model """rescal""" +560 15 loss """bceaftersigmoid""" +560 15 regularizer """no""" +560 15 optimizer """adam""" +560 15 training_loop """owa""" +560 15 negative_sampler """basic""" +560 15 evaluator """rankbased""" +560 16 dataset """wn18rr""" +560 16 model """rescal""" +560 16 loss """bceaftersigmoid""" +560 16 regularizer """no""" +560 16 optimizer """adam""" +560 16 training_loop """owa""" +560 16 negative_sampler """basic""" +560 16 evaluator """rankbased""" +560 17 dataset """wn18rr""" +560 17 model """rescal""" +560 17 loss """bceaftersigmoid""" +560 17 regularizer """no""" +560 17 optimizer """adam""" +560 17 training_loop """owa""" +560 17 negative_sampler """basic""" +560 17 evaluator """rankbased""" +560 18 dataset """wn18rr""" +560 18 model """rescal""" +560 18 loss """bceaftersigmoid""" +560 18 regularizer """no""" +560 18 optimizer """adam""" +560 18 training_loop """owa""" +560 18 negative_sampler """basic""" +560 18 evaluator """rankbased""" +560 19 dataset """wn18rr""" +560 19 model """rescal""" +560 19 loss """bceaftersigmoid""" +560 19 regularizer """no""" +560 19 optimizer """adam""" +560 19 training_loop """owa""" +560 19 negative_sampler """basic""" +560 19 evaluator """rankbased""" +560 20 dataset """wn18rr""" +560 20 model """rescal""" +560 20 loss """bceaftersigmoid""" +560 20 regularizer """no""" +560 20 optimizer """adam""" +560 20 training_loop """owa""" +560 20 negative_sampler """basic""" +560 20 evaluator """rankbased""" +560 21 dataset """wn18rr""" +560 21 model """rescal""" +560 21 loss """bceaftersigmoid""" +560 21 regularizer """no""" +560 21 optimizer """adam""" +560 21 training_loop """owa""" +560 21 negative_sampler """basic""" +560 21 evaluator """rankbased""" +560 22 dataset """wn18rr""" +560 22 model """rescal""" +560 22 loss """bceaftersigmoid""" +560 22 regularizer """no""" +560 22 optimizer """adam""" +560 22 training_loop """owa""" +560 22 negative_sampler """basic""" +560 22 evaluator """rankbased""" +560 23 dataset """wn18rr""" +560 23 model """rescal""" +560 23 loss """bceaftersigmoid""" +560 23 regularizer """no""" +560 23 optimizer """adam""" +560 23 training_loop """owa""" +560 23 negative_sampler """basic""" +560 23 evaluator """rankbased""" +560 24 dataset """wn18rr""" +560 24 model """rescal""" +560 24 loss """bceaftersigmoid""" +560 24 regularizer """no""" +560 24 optimizer """adam""" +560 24 training_loop """owa""" +560 24 negative_sampler """basic""" +560 24 evaluator """rankbased""" +560 25 dataset """wn18rr""" +560 25 model """rescal""" +560 25 loss """bceaftersigmoid""" +560 25 regularizer """no""" +560 25 optimizer """adam""" +560 25 training_loop """owa""" +560 25 negative_sampler """basic""" +560 25 evaluator """rankbased""" +560 26 dataset """wn18rr""" +560 26 model """rescal""" +560 26 loss """bceaftersigmoid""" +560 26 regularizer """no""" +560 26 optimizer """adam""" +560 26 training_loop """owa""" +560 26 negative_sampler """basic""" +560 26 evaluator """rankbased""" +560 27 dataset """wn18rr""" +560 27 model """rescal""" +560 27 loss """bceaftersigmoid""" +560 27 regularizer """no""" +560 27 optimizer """adam""" +560 27 training_loop """owa""" +560 27 negative_sampler """basic""" +560 27 evaluator """rankbased""" +560 28 dataset """wn18rr""" +560 28 model """rescal""" +560 28 loss """bceaftersigmoid""" +560 28 regularizer """no""" +560 28 optimizer """adam""" +560 28 training_loop """owa""" +560 28 negative_sampler """basic""" +560 28 evaluator """rankbased""" +560 29 dataset """wn18rr""" +560 29 model """rescal""" +560 29 loss """bceaftersigmoid""" +560 29 regularizer """no""" +560 29 optimizer """adam""" +560 29 training_loop """owa""" +560 29 negative_sampler """basic""" +560 29 evaluator """rankbased""" +560 30 dataset """wn18rr""" +560 30 model """rescal""" +560 30 loss """bceaftersigmoid""" +560 30 regularizer """no""" +560 30 optimizer """adam""" +560 30 training_loop """owa""" +560 30 negative_sampler """basic""" +560 30 evaluator """rankbased""" +560 31 dataset """wn18rr""" +560 31 model """rescal""" +560 31 loss """bceaftersigmoid""" +560 31 regularizer """no""" +560 31 optimizer """adam""" +560 31 training_loop """owa""" +560 31 negative_sampler """basic""" +560 31 evaluator """rankbased""" +560 32 dataset """wn18rr""" +560 32 model """rescal""" +560 32 loss """bceaftersigmoid""" +560 32 regularizer """no""" +560 32 optimizer """adam""" +560 32 training_loop """owa""" +560 32 negative_sampler """basic""" +560 32 evaluator """rankbased""" +560 33 dataset """wn18rr""" +560 33 model """rescal""" +560 33 loss """bceaftersigmoid""" +560 33 regularizer """no""" +560 33 optimizer """adam""" +560 33 training_loop """owa""" +560 33 negative_sampler """basic""" +560 33 evaluator """rankbased""" +560 34 dataset """wn18rr""" +560 34 model """rescal""" +560 34 loss """bceaftersigmoid""" +560 34 regularizer """no""" +560 34 optimizer """adam""" +560 34 training_loop """owa""" +560 34 negative_sampler """basic""" +560 34 evaluator """rankbased""" +560 35 dataset """wn18rr""" +560 35 model """rescal""" +560 35 loss """bceaftersigmoid""" +560 35 regularizer """no""" +560 35 optimizer """adam""" +560 35 training_loop """owa""" +560 35 negative_sampler """basic""" +560 35 evaluator """rankbased""" +560 36 dataset """wn18rr""" +560 36 model """rescal""" +560 36 loss """bceaftersigmoid""" +560 36 regularizer """no""" +560 36 optimizer """adam""" +560 36 training_loop """owa""" +560 36 negative_sampler """basic""" +560 36 evaluator """rankbased""" +560 37 dataset """wn18rr""" +560 37 model """rescal""" +560 37 loss """bceaftersigmoid""" +560 37 regularizer """no""" +560 37 optimizer """adam""" +560 37 training_loop """owa""" +560 37 negative_sampler """basic""" +560 37 evaluator """rankbased""" +560 38 dataset """wn18rr""" +560 38 model """rescal""" +560 38 loss """bceaftersigmoid""" +560 38 regularizer """no""" +560 38 optimizer """adam""" +560 38 training_loop """owa""" +560 38 negative_sampler """basic""" +560 38 evaluator """rankbased""" +560 39 dataset """wn18rr""" +560 39 model """rescal""" +560 39 loss """bceaftersigmoid""" +560 39 regularizer """no""" +560 39 optimizer """adam""" +560 39 training_loop """owa""" +560 39 negative_sampler """basic""" +560 39 evaluator """rankbased""" +560 40 dataset """wn18rr""" +560 40 model """rescal""" +560 40 loss """bceaftersigmoid""" +560 40 regularizer """no""" +560 40 optimizer """adam""" +560 40 training_loop """owa""" +560 40 negative_sampler """basic""" +560 40 evaluator """rankbased""" +560 41 dataset """wn18rr""" +560 41 model """rescal""" +560 41 loss """bceaftersigmoid""" +560 41 regularizer """no""" +560 41 optimizer """adam""" +560 41 training_loop """owa""" +560 41 negative_sampler """basic""" +560 41 evaluator """rankbased""" +560 42 dataset """wn18rr""" +560 42 model """rescal""" +560 42 loss """bceaftersigmoid""" +560 42 regularizer """no""" +560 42 optimizer """adam""" +560 42 training_loop """owa""" +560 42 negative_sampler """basic""" +560 42 evaluator """rankbased""" +560 43 dataset """wn18rr""" +560 43 model """rescal""" +560 43 loss """bceaftersigmoid""" +560 43 regularizer """no""" +560 43 optimizer """adam""" +560 43 training_loop """owa""" +560 43 negative_sampler """basic""" +560 43 evaluator """rankbased""" +560 44 dataset """wn18rr""" +560 44 model """rescal""" +560 44 loss """bceaftersigmoid""" +560 44 regularizer """no""" +560 44 optimizer """adam""" +560 44 training_loop """owa""" +560 44 negative_sampler """basic""" +560 44 evaluator """rankbased""" +560 45 dataset """wn18rr""" +560 45 model """rescal""" +560 45 loss """bceaftersigmoid""" +560 45 regularizer """no""" +560 45 optimizer """adam""" +560 45 training_loop """owa""" +560 45 negative_sampler """basic""" +560 45 evaluator """rankbased""" +560 46 dataset """wn18rr""" +560 46 model """rescal""" +560 46 loss """bceaftersigmoid""" +560 46 regularizer """no""" +560 46 optimizer """adam""" +560 46 training_loop """owa""" +560 46 negative_sampler """basic""" +560 46 evaluator """rankbased""" +560 47 dataset """wn18rr""" +560 47 model """rescal""" +560 47 loss """bceaftersigmoid""" +560 47 regularizer """no""" +560 47 optimizer """adam""" +560 47 training_loop """owa""" +560 47 negative_sampler """basic""" +560 47 evaluator """rankbased""" +560 48 dataset """wn18rr""" +560 48 model """rescal""" +560 48 loss """bceaftersigmoid""" +560 48 regularizer """no""" +560 48 optimizer """adam""" +560 48 training_loop """owa""" +560 48 negative_sampler """basic""" +560 48 evaluator """rankbased""" +560 49 dataset """wn18rr""" +560 49 model """rescal""" +560 49 loss """bceaftersigmoid""" +560 49 regularizer """no""" +560 49 optimizer """adam""" +560 49 training_loop """owa""" +560 49 negative_sampler """basic""" +560 49 evaluator """rankbased""" +560 50 dataset """wn18rr""" +560 50 model """rescal""" +560 50 loss """bceaftersigmoid""" +560 50 regularizer """no""" +560 50 optimizer """adam""" +560 50 training_loop """owa""" +560 50 negative_sampler """basic""" +560 50 evaluator """rankbased""" +560 51 dataset """wn18rr""" +560 51 model """rescal""" +560 51 loss """bceaftersigmoid""" +560 51 regularizer """no""" +560 51 optimizer """adam""" +560 51 training_loop """owa""" +560 51 negative_sampler """basic""" +560 51 evaluator """rankbased""" +560 52 dataset """wn18rr""" +560 52 model """rescal""" +560 52 loss """bceaftersigmoid""" +560 52 regularizer """no""" +560 52 optimizer """adam""" +560 52 training_loop """owa""" +560 52 negative_sampler """basic""" +560 52 evaluator """rankbased""" +560 53 dataset """wn18rr""" +560 53 model """rescal""" +560 53 loss """bceaftersigmoid""" +560 53 regularizer """no""" +560 53 optimizer """adam""" +560 53 training_loop """owa""" +560 53 negative_sampler """basic""" +560 53 evaluator """rankbased""" +560 54 dataset """wn18rr""" +560 54 model """rescal""" +560 54 loss """bceaftersigmoid""" +560 54 regularizer """no""" +560 54 optimizer """adam""" +560 54 training_loop """owa""" +560 54 negative_sampler """basic""" +560 54 evaluator """rankbased""" +560 55 dataset """wn18rr""" +560 55 model """rescal""" +560 55 loss """bceaftersigmoid""" +560 55 regularizer """no""" +560 55 optimizer """adam""" +560 55 training_loop """owa""" +560 55 negative_sampler """basic""" +560 55 evaluator """rankbased""" +560 56 dataset """wn18rr""" +560 56 model """rescal""" +560 56 loss """bceaftersigmoid""" +560 56 regularizer """no""" +560 56 optimizer """adam""" +560 56 training_loop """owa""" +560 56 negative_sampler """basic""" +560 56 evaluator """rankbased""" +560 57 dataset """wn18rr""" +560 57 model """rescal""" +560 57 loss """bceaftersigmoid""" +560 57 regularizer """no""" +560 57 optimizer """adam""" +560 57 training_loop """owa""" +560 57 negative_sampler """basic""" +560 57 evaluator """rankbased""" +560 58 dataset """wn18rr""" +560 58 model """rescal""" +560 58 loss """bceaftersigmoid""" +560 58 regularizer """no""" +560 58 optimizer """adam""" +560 58 training_loop """owa""" +560 58 negative_sampler """basic""" +560 58 evaluator """rankbased""" +560 59 dataset """wn18rr""" +560 59 model """rescal""" +560 59 loss """bceaftersigmoid""" +560 59 regularizer """no""" +560 59 optimizer """adam""" +560 59 training_loop """owa""" +560 59 negative_sampler """basic""" +560 59 evaluator """rankbased""" +560 60 dataset """wn18rr""" +560 60 model """rescal""" +560 60 loss """bceaftersigmoid""" +560 60 regularizer """no""" +560 60 optimizer """adam""" +560 60 training_loop """owa""" +560 60 negative_sampler """basic""" +560 60 evaluator """rankbased""" +560 61 dataset """wn18rr""" +560 61 model """rescal""" +560 61 loss """bceaftersigmoid""" +560 61 regularizer """no""" +560 61 optimizer """adam""" +560 61 training_loop """owa""" +560 61 negative_sampler """basic""" +560 61 evaluator """rankbased""" +560 62 dataset """wn18rr""" +560 62 model """rescal""" +560 62 loss """bceaftersigmoid""" +560 62 regularizer """no""" +560 62 optimizer """adam""" +560 62 training_loop """owa""" +560 62 negative_sampler """basic""" +560 62 evaluator """rankbased""" +560 63 dataset """wn18rr""" +560 63 model """rescal""" +560 63 loss """bceaftersigmoid""" +560 63 regularizer """no""" +560 63 optimizer """adam""" +560 63 training_loop """owa""" +560 63 negative_sampler """basic""" +560 63 evaluator """rankbased""" +560 64 dataset """wn18rr""" +560 64 model """rescal""" +560 64 loss """bceaftersigmoid""" +560 64 regularizer """no""" +560 64 optimizer """adam""" +560 64 training_loop """owa""" +560 64 negative_sampler """basic""" +560 64 evaluator """rankbased""" +560 65 dataset """wn18rr""" +560 65 model """rescal""" +560 65 loss """bceaftersigmoid""" +560 65 regularizer """no""" +560 65 optimizer """adam""" +560 65 training_loop """owa""" +560 65 negative_sampler """basic""" +560 65 evaluator """rankbased""" +561 1 model.embedding_dim 1.0 +561 1 optimizer.lr 0.0015373277963290047 +561 1 negative_sampler.num_negs_per_pos 30.0 +561 1 training.batch_size 0.0 +561 2 model.embedding_dim 1.0 +561 2 optimizer.lr 0.010660240904506575 +561 2 negative_sampler.num_negs_per_pos 32.0 +561 2 training.batch_size 0.0 +561 3 model.embedding_dim 0.0 +561 3 optimizer.lr 0.03743980452010075 +561 3 negative_sampler.num_negs_per_pos 36.0 +561 3 training.batch_size 3.0 +561 1 dataset """yago310""" +561 1 model """rescal""" +561 1 loss """softplus""" +561 1 regularizer """no""" +561 1 optimizer """adam""" +561 1 training_loop """owa""" +561 1 negative_sampler """basic""" +561 1 evaluator """rankbased""" +561 2 dataset """yago310""" +561 2 model """rescal""" +561 2 loss """softplus""" +561 2 regularizer """no""" +561 2 optimizer """adam""" +561 2 training_loop """owa""" +561 2 negative_sampler """basic""" +561 2 evaluator """rankbased""" +561 3 dataset """yago310""" +561 3 model """rescal""" +561 3 loss """softplus""" +561 3 regularizer """no""" +561 3 optimizer """adam""" +561 3 training_loop """owa""" +561 3 negative_sampler """basic""" +561 3 evaluator """rankbased""" +562 1 model.embedding_dim 2.0 +562 1 optimizer.lr 0.00169366709798595 +562 1 negative_sampler.num_negs_per_pos 35.0 +562 1 training.batch_size 0.0 +562 1 dataset """yago310""" +562 1 model """rescal""" +562 1 loss """softplus""" +562 1 regularizer """no""" +562 1 optimizer """adam""" +562 1 training_loop """owa""" +562 1 negative_sampler """basic""" +562 1 evaluator """rankbased""" +563 1 model.embedding_dim 2.0 +563 1 optimizer.lr 0.04519951137921542 +563 1 negative_sampler.num_negs_per_pos 36.0 +563 1 training.batch_size 2.0 +563 2 model.embedding_dim 0.0 +563 2 optimizer.lr 0.061159120965821295 +563 2 negative_sampler.num_negs_per_pos 44.0 +563 2 training.batch_size 0.0 +563 3 model.embedding_dim 1.0 +563 3 optimizer.lr 0.0010862963868709398 +563 3 negative_sampler.num_negs_per_pos 39.0 +563 3 training.batch_size 2.0 +563 4 model.embedding_dim 2.0 +563 4 optimizer.lr 0.001270964902439165 +563 4 negative_sampler.num_negs_per_pos 36.0 +563 4 training.batch_size 1.0 +563 1 dataset """yago310""" +563 1 model """rescal""" +563 1 loss """bceaftersigmoid""" +563 1 regularizer """no""" +563 1 optimizer """adam""" +563 1 training_loop """owa""" +563 1 negative_sampler """basic""" +563 1 evaluator """rankbased""" +563 2 dataset """yago310""" +563 2 model """rescal""" +563 2 loss """bceaftersigmoid""" +563 2 regularizer """no""" +563 2 optimizer """adam""" +563 2 training_loop """owa""" +563 2 negative_sampler """basic""" +563 2 evaluator """rankbased""" +563 3 dataset """yago310""" +563 3 model """rescal""" +563 3 loss """bceaftersigmoid""" +563 3 regularizer """no""" +563 3 optimizer """adam""" +563 3 training_loop """owa""" +563 3 negative_sampler """basic""" +563 3 evaluator """rankbased""" +563 4 dataset """yago310""" +563 4 model """rescal""" +563 4 loss """bceaftersigmoid""" +563 4 regularizer """no""" +563 4 optimizer """adam""" +563 4 training_loop """owa""" +563 4 negative_sampler """basic""" +563 4 evaluator """rankbased""" +564 1 model.embedding_dim 0.0 +564 1 optimizer.lr 0.09017703706495944 +564 1 negative_sampler.num_negs_per_pos 31.0 +564 1 training.batch_size 1.0 +564 2 model.embedding_dim 0.0 +564 2 optimizer.lr 0.007920046431180609 +564 2 negative_sampler.num_negs_per_pos 19.0 +564 2 training.batch_size 1.0 +564 3 model.embedding_dim 0.0 +564 3 optimizer.lr 0.07033414255329745 +564 3 negative_sampler.num_negs_per_pos 5.0 +564 3 training.batch_size 0.0 +564 4 model.embedding_dim 2.0 +564 4 optimizer.lr 0.004268360144014684 +564 4 negative_sampler.num_negs_per_pos 40.0 +564 4 training.batch_size 0.0 +564 5 model.embedding_dim 0.0 +564 5 optimizer.lr 0.014442646977736007 +564 5 negative_sampler.num_negs_per_pos 17.0 +564 5 training.batch_size 2.0 +564 6 model.embedding_dim 1.0 +564 6 optimizer.lr 0.010192180560917922 +564 6 negative_sampler.num_negs_per_pos 20.0 +564 6 training.batch_size 1.0 +564 7 model.embedding_dim 2.0 +564 7 optimizer.lr 0.0048948748334286885 +564 7 negative_sampler.num_negs_per_pos 49.0 +564 7 training.batch_size 3.0 +564 8 model.embedding_dim 1.0 +564 8 optimizer.lr 0.03053181813793431 +564 8 negative_sampler.num_negs_per_pos 4.0 +564 8 training.batch_size 0.0 +564 9 model.embedding_dim 0.0 +564 9 optimizer.lr 0.00980222226384011 +564 9 negative_sampler.num_negs_per_pos 11.0 +564 9 training.batch_size 0.0 +564 10 model.embedding_dim 0.0 +564 10 optimizer.lr 0.03073644018206834 +564 10 negative_sampler.num_negs_per_pos 11.0 +564 10 training.batch_size 1.0 +564 11 model.embedding_dim 0.0 +564 11 optimizer.lr 0.044369188203948466 +564 11 negative_sampler.num_negs_per_pos 25.0 +564 11 training.batch_size 1.0 +564 12 model.embedding_dim 1.0 +564 12 optimizer.lr 0.05552872272352212 +564 12 negative_sampler.num_negs_per_pos 19.0 +564 12 training.batch_size 0.0 +564 1 dataset """yago310""" +564 1 model """rescal""" +564 1 loss """bceaftersigmoid""" +564 1 regularizer """no""" +564 1 optimizer """adam""" +564 1 training_loop """owa""" +564 1 negative_sampler """basic""" +564 1 evaluator """rankbased""" +564 2 dataset """yago310""" +564 2 model """rescal""" +564 2 loss """bceaftersigmoid""" +564 2 regularizer """no""" +564 2 optimizer """adam""" +564 2 training_loop """owa""" +564 2 negative_sampler """basic""" +564 2 evaluator """rankbased""" +564 3 dataset """yago310""" +564 3 model """rescal""" +564 3 loss """bceaftersigmoid""" +564 3 regularizer """no""" +564 3 optimizer """adam""" +564 3 training_loop """owa""" +564 3 negative_sampler """basic""" +564 3 evaluator """rankbased""" +564 4 dataset """yago310""" +564 4 model """rescal""" +564 4 loss """bceaftersigmoid""" +564 4 regularizer """no""" +564 4 optimizer """adam""" +564 4 training_loop """owa""" +564 4 negative_sampler """basic""" +564 4 evaluator """rankbased""" +564 5 dataset """yago310""" +564 5 model """rescal""" +564 5 loss """bceaftersigmoid""" +564 5 regularizer """no""" +564 5 optimizer """adam""" +564 5 training_loop """owa""" +564 5 negative_sampler """basic""" +564 5 evaluator """rankbased""" +564 6 dataset """yago310""" +564 6 model """rescal""" +564 6 loss """bceaftersigmoid""" +564 6 regularizer """no""" +564 6 optimizer """adam""" +564 6 training_loop """owa""" +564 6 negative_sampler """basic""" +564 6 evaluator """rankbased""" +564 7 dataset """yago310""" +564 7 model """rescal""" +564 7 loss """bceaftersigmoid""" +564 7 regularizer """no""" +564 7 optimizer """adam""" +564 7 training_loop """owa""" +564 7 negative_sampler """basic""" +564 7 evaluator """rankbased""" +564 8 dataset """yago310""" +564 8 model """rescal""" +564 8 loss """bceaftersigmoid""" +564 8 regularizer """no""" +564 8 optimizer """adam""" +564 8 training_loop """owa""" +564 8 negative_sampler """basic""" +564 8 evaluator """rankbased""" +564 9 dataset """yago310""" +564 9 model """rescal""" +564 9 loss """bceaftersigmoid""" +564 9 regularizer """no""" +564 9 optimizer """adam""" +564 9 training_loop """owa""" +564 9 negative_sampler """basic""" +564 9 evaluator """rankbased""" +564 10 dataset """yago310""" +564 10 model """rescal""" +564 10 loss """bceaftersigmoid""" +564 10 regularizer """no""" +564 10 optimizer """adam""" +564 10 training_loop """owa""" +564 10 negative_sampler """basic""" +564 10 evaluator """rankbased""" +564 11 dataset """yago310""" +564 11 model """rescal""" +564 11 loss """bceaftersigmoid""" +564 11 regularizer """no""" +564 11 optimizer """adam""" +564 11 training_loop """owa""" +564 11 negative_sampler """basic""" +564 11 evaluator """rankbased""" +564 12 dataset """yago310""" +564 12 model """rescal""" +564 12 loss """bceaftersigmoid""" +564 12 regularizer """no""" +564 12 optimizer """adam""" +564 12 training_loop """owa""" +564 12 negative_sampler """basic""" +564 12 evaluator """rankbased""" +565 1 model.embedding_dim 1.0 +565 1 loss.margin 3.51082351888984 +565 1 optimizer.lr 0.028550331229818135 +565 1 negative_sampler.num_negs_per_pos 23.0 +565 1 training.batch_size 1.0 +565 2 model.embedding_dim 1.0 +565 2 loss.margin 7.832721928245996 +565 2 optimizer.lr 0.008125759202142433 +565 2 negative_sampler.num_negs_per_pos 5.0 +565 2 training.batch_size 1.0 +565 3 model.embedding_dim 0.0 +565 3 loss.margin 8.916234266242496 +565 3 optimizer.lr 0.05621036107045595 +565 3 negative_sampler.num_negs_per_pos 21.0 +565 3 training.batch_size 0.0 +565 4 model.embedding_dim 1.0 +565 4 loss.margin 6.40049643613348 +565 4 optimizer.lr 0.0011349597881831846 +565 4 negative_sampler.num_negs_per_pos 38.0 +565 4 training.batch_size 3.0 +565 1 dataset """yago310""" +565 1 model """rescal""" +565 1 loss """marginranking""" +565 1 regularizer """no""" +565 1 optimizer """adam""" +565 1 training_loop """owa""" +565 1 negative_sampler """basic""" +565 1 evaluator """rankbased""" +565 2 dataset """yago310""" +565 2 model """rescal""" +565 2 loss """marginranking""" +565 2 regularizer """no""" +565 2 optimizer """adam""" +565 2 training_loop """owa""" +565 2 negative_sampler """basic""" +565 2 evaluator """rankbased""" +565 3 dataset """yago310""" +565 3 model """rescal""" +565 3 loss """marginranking""" +565 3 regularizer """no""" +565 3 optimizer """adam""" +565 3 training_loop """owa""" +565 3 negative_sampler """basic""" +565 3 evaluator """rankbased""" +565 4 dataset """yago310""" +565 4 model """rescal""" +565 4 loss """marginranking""" +565 4 regularizer """no""" +565 4 optimizer """adam""" +565 4 training_loop """owa""" +565 4 negative_sampler """basic""" +565 4 evaluator """rankbased""" +566 1 model.embedding_dim 0.0 +566 1 loss.margin 7.618037811455994 +566 1 optimizer.lr 0.044358456981678604 +566 1 negative_sampler.num_negs_per_pos 13.0 +566 1 training.batch_size 2.0 +566 2 model.embedding_dim 2.0 +566 2 loss.margin 5.195376118518908 +566 2 optimizer.lr 0.0017469836644945867 +566 2 negative_sampler.num_negs_per_pos 36.0 +566 2 training.batch_size 1.0 +566 1 dataset """yago310""" +566 1 model """rescal""" +566 1 loss """marginranking""" +566 1 regularizer """no""" +566 1 optimizer """adam""" +566 1 training_loop """owa""" +566 1 negative_sampler """basic""" +566 1 evaluator """rankbased""" +566 2 dataset """yago310""" +566 2 model """rescal""" +566 2 loss """marginranking""" +566 2 regularizer """no""" +566 2 optimizer """adam""" +566 2 training_loop """owa""" +566 2 negative_sampler """basic""" +566 2 evaluator """rankbased""" +567 1 model.embedding_dim 1.0 +567 1 loss.margin 1.199539877283218 +567 1 loss.adversarial_temperature 0.15466362979856502 +567 1 optimizer.lr 0.06674844615321014 +567 1 negative_sampler.num_negs_per_pos 46.0 +567 1 training.batch_size 0.0 +567 2 model.embedding_dim 1.0 +567 2 loss.margin 28.35157868601485 +567 2 loss.adversarial_temperature 0.4671627679882493 +567 2 optimizer.lr 0.002527060195604448 +567 2 negative_sampler.num_negs_per_pos 4.0 +567 2 training.batch_size 1.0 +567 1 dataset """yago310""" +567 1 model """rescal""" +567 1 loss """nssa""" +567 1 regularizer """no""" +567 1 optimizer """adam""" +567 1 training_loop """owa""" +567 1 negative_sampler """basic""" +567 1 evaluator """rankbased""" +567 2 dataset """yago310""" +567 2 model """rescal""" +567 2 loss """nssa""" +567 2 regularizer """no""" +567 2 optimizer """adam""" +567 2 training_loop """owa""" +567 2 negative_sampler """basic""" +567 2 evaluator """rankbased""" +568 1 model.embedding_dim 0.0 +568 1 loss.margin 23.949605262019798 +568 1 loss.adversarial_temperature 0.19594818824713495 +568 1 optimizer.lr 0.001102379932951327 +568 1 negative_sampler.num_negs_per_pos 25.0 +568 1 training.batch_size 1.0 +568 2 model.embedding_dim 0.0 +568 2 loss.margin 27.739573703761117 +568 2 loss.adversarial_temperature 0.6469390455213958 +568 2 optimizer.lr 0.04496414030156673 +568 2 negative_sampler.num_negs_per_pos 42.0 +568 2 training.batch_size 2.0 +568 3 model.embedding_dim 0.0 +568 3 loss.margin 17.028981711437158 +568 3 loss.adversarial_temperature 0.9280119144675613 +568 3 optimizer.lr 0.0012285663639239565 +568 3 negative_sampler.num_negs_per_pos 3.0 +568 3 training.batch_size 0.0 +568 4 model.embedding_dim 1.0 +568 4 loss.margin 12.209392106093883 +568 4 loss.adversarial_temperature 0.9253642378080058 +568 4 optimizer.lr 0.0011200182716123042 +568 4 negative_sampler.num_negs_per_pos 44.0 +568 4 training.batch_size 0.0 +568 1 dataset """yago310""" +568 1 model """rescal""" +568 1 loss """nssa""" +568 1 regularizer """no""" +568 1 optimizer """adam""" +568 1 training_loop """owa""" +568 1 negative_sampler """basic""" +568 1 evaluator """rankbased""" +568 2 dataset """yago310""" +568 2 model """rescal""" +568 2 loss """nssa""" +568 2 regularizer """no""" +568 2 optimizer """adam""" +568 2 training_loop """owa""" +568 2 negative_sampler """basic""" +568 2 evaluator """rankbased""" +568 3 dataset """yago310""" +568 3 model """rescal""" +568 3 loss """nssa""" +568 3 regularizer """no""" +568 3 optimizer """adam""" +568 3 training_loop """owa""" +568 3 negative_sampler """basic""" +568 3 evaluator """rankbased""" +568 4 dataset """yago310""" +568 4 model """rescal""" +568 4 loss """nssa""" +568 4 regularizer """no""" +568 4 optimizer """adam""" +568 4 training_loop """owa""" +568 4 negative_sampler """basic""" +568 4 evaluator """rankbased""" +569 1 model.embedding_dim 1.0 +569 1 optimizer.lr 0.006175502938889183 +569 1 training.batch_size 0.0 +569 1 training.label_smoothing 0.09668661226994531 +569 2 model.embedding_dim 2.0 +569 2 optimizer.lr 0.0857440988059519 +569 2 training.batch_size 2.0 +569 2 training.label_smoothing 0.8557073831663842 +569 3 model.embedding_dim 2.0 +569 3 optimizer.lr 0.09125277278502035 +569 3 training.batch_size 2.0 +569 3 training.label_smoothing 0.23632000911184678 +569 4 model.embedding_dim 2.0 +569 4 optimizer.lr 0.005204225025026803 +569 4 training.batch_size 0.0 +569 4 training.label_smoothing 0.005628276874889222 +569 1 dataset """fb15k237""" +569 1 model """rotate""" +569 1 loss """crossentropy""" +569 1 regularizer """no""" +569 1 optimizer """adam""" +569 1 training_loop """lcwa""" +569 1 evaluator """rankbased""" +569 2 dataset """fb15k237""" +569 2 model """rotate""" +569 2 loss """crossentropy""" +569 2 regularizer """no""" +569 2 optimizer """adam""" +569 2 training_loop """lcwa""" +569 2 evaluator """rankbased""" +569 3 dataset """fb15k237""" +569 3 model """rotate""" +569 3 loss """crossentropy""" +569 3 regularizer """no""" +569 3 optimizer """adam""" +569 3 training_loop """lcwa""" +569 3 evaluator """rankbased""" +569 4 dataset """fb15k237""" +569 4 model """rotate""" +569 4 loss """crossentropy""" +569 4 regularizer """no""" +569 4 optimizer """adam""" +569 4 training_loop """lcwa""" +569 4 evaluator """rankbased""" +570 1 model.embedding_dim 0.0 +570 1 optimizer.lr 0.005506054655256781 +570 1 training.batch_size 2.0 +570 1 training.label_smoothing 0.0109479025485709 +570 2 model.embedding_dim 0.0 +570 2 optimizer.lr 0.0010740931869128486 +570 2 training.batch_size 0.0 +570 2 training.label_smoothing 0.0669569372947811 +570 3 model.embedding_dim 0.0 +570 3 optimizer.lr 0.09318119137596281 +570 3 training.batch_size 2.0 +570 3 training.label_smoothing 0.05996541253683333 +570 4 model.embedding_dim 2.0 +570 4 optimizer.lr 0.04026458935924484 +570 4 training.batch_size 2.0 +570 4 training.label_smoothing 0.05227108994508883 +570 5 model.embedding_dim 1.0 +570 5 optimizer.lr 0.0010798024849633901 +570 5 training.batch_size 2.0 +570 5 training.label_smoothing 0.021855438931483645 +570 6 model.embedding_dim 1.0 +570 6 optimizer.lr 0.0018913243754544661 +570 6 training.batch_size 0.0 +570 6 training.label_smoothing 0.40831244131440014 +570 7 model.embedding_dim 0.0 +570 7 optimizer.lr 0.0011399174940955252 +570 7 training.batch_size 0.0 +570 7 training.label_smoothing 0.008063050717025193 +570 8 model.embedding_dim 1.0 +570 8 optimizer.lr 0.02163436733215213 +570 8 training.batch_size 0.0 +570 8 training.label_smoothing 0.029346847688281958 +570 9 model.embedding_dim 2.0 +570 9 optimizer.lr 0.006907526013297905 +570 9 training.batch_size 2.0 +570 9 training.label_smoothing 0.009023970648199884 +570 10 model.embedding_dim 1.0 +570 10 optimizer.lr 0.0012534755700626008 +570 10 training.batch_size 2.0 +570 10 training.label_smoothing 0.46588217106004803 +570 11 model.embedding_dim 0.0 +570 11 optimizer.lr 0.017946550770198706 +570 11 training.batch_size 0.0 +570 11 training.label_smoothing 0.0990665815844784 +570 12 model.embedding_dim 2.0 +570 12 optimizer.lr 0.013649677945473115 +570 12 training.batch_size 0.0 +570 12 training.label_smoothing 0.0030995558223716217 +570 13 model.embedding_dim 2.0 +570 13 optimizer.lr 0.010128160843977881 +570 13 training.batch_size 2.0 +570 13 training.label_smoothing 0.005922502140992592 +570 14 model.embedding_dim 1.0 +570 14 optimizer.lr 0.001493152527018863 +570 14 training.batch_size 1.0 +570 14 training.label_smoothing 0.0030420556393079847 +570 15 model.embedding_dim 2.0 +570 15 optimizer.lr 0.026357335378164076 +570 15 training.batch_size 0.0 +570 15 training.label_smoothing 0.3494001269375627 +570 1 dataset """fb15k237""" +570 1 model """rotate""" +570 1 loss """crossentropy""" +570 1 regularizer """no""" +570 1 optimizer """adam""" +570 1 training_loop """lcwa""" +570 1 evaluator """rankbased""" +570 2 dataset """fb15k237""" +570 2 model """rotate""" +570 2 loss """crossentropy""" +570 2 regularizer """no""" +570 2 optimizer """adam""" +570 2 training_loop """lcwa""" +570 2 evaluator """rankbased""" +570 3 dataset """fb15k237""" +570 3 model """rotate""" +570 3 loss """crossentropy""" +570 3 regularizer """no""" +570 3 optimizer """adam""" +570 3 training_loop """lcwa""" +570 3 evaluator """rankbased""" +570 4 dataset """fb15k237""" +570 4 model """rotate""" +570 4 loss """crossentropy""" +570 4 regularizer """no""" +570 4 optimizer """adam""" +570 4 training_loop """lcwa""" +570 4 evaluator """rankbased""" +570 5 dataset """fb15k237""" +570 5 model """rotate""" +570 5 loss """crossentropy""" +570 5 regularizer """no""" +570 5 optimizer """adam""" +570 5 training_loop """lcwa""" +570 5 evaluator """rankbased""" +570 6 dataset """fb15k237""" +570 6 model """rotate""" +570 6 loss """crossentropy""" +570 6 regularizer """no""" +570 6 optimizer """adam""" +570 6 training_loop """lcwa""" +570 6 evaluator """rankbased""" +570 7 dataset """fb15k237""" +570 7 model """rotate""" +570 7 loss """crossentropy""" +570 7 regularizer """no""" +570 7 optimizer """adam""" +570 7 training_loop """lcwa""" +570 7 evaluator """rankbased""" +570 8 dataset """fb15k237""" +570 8 model """rotate""" +570 8 loss """crossentropy""" +570 8 regularizer """no""" +570 8 optimizer """adam""" +570 8 training_loop """lcwa""" +570 8 evaluator """rankbased""" +570 9 dataset """fb15k237""" +570 9 model """rotate""" +570 9 loss """crossentropy""" +570 9 regularizer """no""" +570 9 optimizer """adam""" +570 9 training_loop """lcwa""" +570 9 evaluator """rankbased""" +570 10 dataset """fb15k237""" +570 10 model """rotate""" +570 10 loss """crossentropy""" +570 10 regularizer """no""" +570 10 optimizer """adam""" +570 10 training_loop """lcwa""" +570 10 evaluator """rankbased""" +570 11 dataset """fb15k237""" +570 11 model """rotate""" +570 11 loss """crossentropy""" +570 11 regularizer """no""" +570 11 optimizer """adam""" +570 11 training_loop """lcwa""" +570 11 evaluator """rankbased""" +570 12 dataset """fb15k237""" +570 12 model """rotate""" +570 12 loss """crossentropy""" +570 12 regularizer """no""" +570 12 optimizer """adam""" +570 12 training_loop """lcwa""" +570 12 evaluator """rankbased""" +570 13 dataset """fb15k237""" +570 13 model """rotate""" +570 13 loss """crossentropy""" +570 13 regularizer """no""" +570 13 optimizer """adam""" +570 13 training_loop """lcwa""" +570 13 evaluator """rankbased""" +570 14 dataset """fb15k237""" +570 14 model """rotate""" +570 14 loss """crossentropy""" +570 14 regularizer """no""" +570 14 optimizer """adam""" +570 14 training_loop """lcwa""" +570 14 evaluator """rankbased""" +570 15 dataset """fb15k237""" +570 15 model """rotate""" +570 15 loss """crossentropy""" +570 15 regularizer """no""" +570 15 optimizer """adam""" +570 15 training_loop """lcwa""" +570 15 evaluator """rankbased""" +571 1 model.embedding_dim 0.0 +571 1 optimizer.lr 0.06804119864572891 +571 1 negative_sampler.num_negs_per_pos 54.0 +571 1 training.batch_size 2.0 +571 2 model.embedding_dim 2.0 +571 2 optimizer.lr 0.0012332119060241662 +571 2 negative_sampler.num_negs_per_pos 69.0 +571 2 training.batch_size 1.0 +571 3 model.embedding_dim 0.0 +571 3 optimizer.lr 0.0049903659983328765 +571 3 negative_sampler.num_negs_per_pos 22.0 +571 3 training.batch_size 1.0 +571 4 model.embedding_dim 2.0 +571 4 optimizer.lr 0.004243790306710061 +571 4 negative_sampler.num_negs_per_pos 28.0 +571 4 training.batch_size 1.0 +571 5 model.embedding_dim 1.0 +571 5 optimizer.lr 0.004636400786998647 +571 5 negative_sampler.num_negs_per_pos 26.0 +571 5 training.batch_size 0.0 +571 6 model.embedding_dim 2.0 +571 6 optimizer.lr 0.0023888522277116155 +571 6 negative_sampler.num_negs_per_pos 73.0 +571 6 training.batch_size 0.0 +571 7 model.embedding_dim 0.0 +571 7 optimizer.lr 0.0174596950600549 +571 7 negative_sampler.num_negs_per_pos 2.0 +571 7 training.batch_size 2.0 +571 8 model.embedding_dim 2.0 +571 8 optimizer.lr 0.00730956073394665 +571 8 negative_sampler.num_negs_per_pos 92.0 +571 8 training.batch_size 0.0 +571 9 model.embedding_dim 1.0 +571 9 optimizer.lr 0.05220142513332015 +571 9 negative_sampler.num_negs_per_pos 0.0 +571 9 training.batch_size 0.0 +571 10 model.embedding_dim 0.0 +571 10 optimizer.lr 0.0010923886927455687 +571 10 negative_sampler.num_negs_per_pos 13.0 +571 10 training.batch_size 1.0 +571 11 model.embedding_dim 2.0 +571 11 optimizer.lr 0.003063563771261783 +571 11 negative_sampler.num_negs_per_pos 17.0 +571 11 training.batch_size 0.0 +571 12 model.embedding_dim 2.0 +571 12 optimizer.lr 0.011687005965566578 +571 12 negative_sampler.num_negs_per_pos 66.0 +571 12 training.batch_size 1.0 +571 13 model.embedding_dim 2.0 +571 13 optimizer.lr 0.0016610587038282188 +571 13 negative_sampler.num_negs_per_pos 45.0 +571 13 training.batch_size 2.0 +571 14 model.embedding_dim 1.0 +571 14 optimizer.lr 0.05363804264192744 +571 14 negative_sampler.num_negs_per_pos 91.0 +571 14 training.batch_size 1.0 +571 15 model.embedding_dim 0.0 +571 15 optimizer.lr 0.008378350943364562 +571 15 negative_sampler.num_negs_per_pos 90.0 +571 15 training.batch_size 0.0 +571 16 model.embedding_dim 2.0 +571 16 optimizer.lr 0.0036782918699596804 +571 16 negative_sampler.num_negs_per_pos 18.0 +571 16 training.batch_size 2.0 +571 17 model.embedding_dim 0.0 +571 17 optimizer.lr 0.018257189693424488 +571 17 negative_sampler.num_negs_per_pos 40.0 +571 17 training.batch_size 0.0 +571 18 model.embedding_dim 1.0 +571 18 optimizer.lr 0.00865984712216863 +571 18 negative_sampler.num_negs_per_pos 37.0 +571 18 training.batch_size 0.0 +571 19 model.embedding_dim 1.0 +571 19 optimizer.lr 0.007472564512491061 +571 19 negative_sampler.num_negs_per_pos 97.0 +571 19 training.batch_size 0.0 +571 20 model.embedding_dim 0.0 +571 20 optimizer.lr 0.03311686209675845 +571 20 negative_sampler.num_negs_per_pos 69.0 +571 20 training.batch_size 1.0 +571 21 model.embedding_dim 2.0 +571 21 optimizer.lr 0.01756990846665831 +571 21 negative_sampler.num_negs_per_pos 91.0 +571 21 training.batch_size 0.0 +571 22 model.embedding_dim 0.0 +571 22 optimizer.lr 0.001682601105866401 +571 22 negative_sampler.num_negs_per_pos 51.0 +571 22 training.batch_size 1.0 +571 23 model.embedding_dim 2.0 +571 23 optimizer.lr 0.0017120758674952572 +571 23 negative_sampler.num_negs_per_pos 67.0 +571 23 training.batch_size 2.0 +571 24 model.embedding_dim 0.0 +571 24 optimizer.lr 0.014259679028748168 +571 24 negative_sampler.num_negs_per_pos 82.0 +571 24 training.batch_size 0.0 +571 25 model.embedding_dim 0.0 +571 25 optimizer.lr 0.0026678622422606317 +571 25 negative_sampler.num_negs_per_pos 79.0 +571 25 training.batch_size 1.0 +571 26 model.embedding_dim 0.0 +571 26 optimizer.lr 0.0024824340444796 +571 26 negative_sampler.num_negs_per_pos 16.0 +571 26 training.batch_size 2.0 +571 27 model.embedding_dim 2.0 +571 27 optimizer.lr 0.004019950836552587 +571 27 negative_sampler.num_negs_per_pos 37.0 +571 27 training.batch_size 0.0 +571 28 model.embedding_dim 1.0 +571 28 optimizer.lr 0.004469657154228499 +571 28 negative_sampler.num_negs_per_pos 83.0 +571 28 training.batch_size 2.0 +571 29 model.embedding_dim 1.0 +571 29 optimizer.lr 0.0036066874088002653 +571 29 negative_sampler.num_negs_per_pos 77.0 +571 29 training.batch_size 0.0 +571 1 dataset """fb15k237""" +571 1 model """rotate""" +571 1 loss """bceaftersigmoid""" +571 1 regularizer """no""" +571 1 optimizer """adam""" +571 1 training_loop """owa""" +571 1 negative_sampler """basic""" +571 1 evaluator """rankbased""" +571 2 dataset """fb15k237""" +571 2 model """rotate""" +571 2 loss """bceaftersigmoid""" +571 2 regularizer """no""" +571 2 optimizer """adam""" +571 2 training_loop """owa""" +571 2 negative_sampler """basic""" +571 2 evaluator """rankbased""" +571 3 dataset """fb15k237""" +571 3 model """rotate""" +571 3 loss """bceaftersigmoid""" +571 3 regularizer """no""" +571 3 optimizer """adam""" +571 3 training_loop """owa""" +571 3 negative_sampler """basic""" +571 3 evaluator """rankbased""" +571 4 dataset """fb15k237""" +571 4 model """rotate""" +571 4 loss """bceaftersigmoid""" +571 4 regularizer """no""" +571 4 optimizer """adam""" +571 4 training_loop """owa""" +571 4 negative_sampler """basic""" +571 4 evaluator """rankbased""" +571 5 dataset """fb15k237""" +571 5 model """rotate""" +571 5 loss """bceaftersigmoid""" +571 5 regularizer """no""" +571 5 optimizer """adam""" +571 5 training_loop """owa""" +571 5 negative_sampler """basic""" +571 5 evaluator """rankbased""" +571 6 dataset """fb15k237""" +571 6 model """rotate""" +571 6 loss """bceaftersigmoid""" +571 6 regularizer """no""" +571 6 optimizer """adam""" +571 6 training_loop """owa""" +571 6 negative_sampler """basic""" +571 6 evaluator """rankbased""" +571 7 dataset """fb15k237""" +571 7 model """rotate""" +571 7 loss """bceaftersigmoid""" +571 7 regularizer """no""" +571 7 optimizer """adam""" +571 7 training_loop """owa""" +571 7 negative_sampler """basic""" +571 7 evaluator """rankbased""" +571 8 dataset """fb15k237""" +571 8 model """rotate""" +571 8 loss """bceaftersigmoid""" +571 8 regularizer """no""" +571 8 optimizer """adam""" +571 8 training_loop """owa""" +571 8 negative_sampler """basic""" +571 8 evaluator """rankbased""" +571 9 dataset """fb15k237""" +571 9 model """rotate""" +571 9 loss """bceaftersigmoid""" +571 9 regularizer """no""" +571 9 optimizer """adam""" +571 9 training_loop """owa""" +571 9 negative_sampler """basic""" +571 9 evaluator """rankbased""" +571 10 dataset """fb15k237""" +571 10 model """rotate""" +571 10 loss """bceaftersigmoid""" +571 10 regularizer """no""" +571 10 optimizer """adam""" +571 10 training_loop """owa""" +571 10 negative_sampler """basic""" +571 10 evaluator """rankbased""" +571 11 dataset """fb15k237""" +571 11 model """rotate""" +571 11 loss """bceaftersigmoid""" +571 11 regularizer """no""" +571 11 optimizer """adam""" +571 11 training_loop """owa""" +571 11 negative_sampler """basic""" +571 11 evaluator """rankbased""" +571 12 dataset """fb15k237""" +571 12 model """rotate""" +571 12 loss """bceaftersigmoid""" +571 12 regularizer """no""" +571 12 optimizer """adam""" +571 12 training_loop """owa""" +571 12 negative_sampler """basic""" +571 12 evaluator """rankbased""" +571 13 dataset """fb15k237""" +571 13 model """rotate""" +571 13 loss """bceaftersigmoid""" +571 13 regularizer """no""" +571 13 optimizer """adam""" +571 13 training_loop """owa""" +571 13 negative_sampler """basic""" +571 13 evaluator """rankbased""" +571 14 dataset """fb15k237""" +571 14 model """rotate""" +571 14 loss """bceaftersigmoid""" +571 14 regularizer """no""" +571 14 optimizer """adam""" +571 14 training_loop """owa""" +571 14 negative_sampler """basic""" +571 14 evaluator """rankbased""" +571 15 dataset """fb15k237""" +571 15 model """rotate""" +571 15 loss """bceaftersigmoid""" +571 15 regularizer """no""" +571 15 optimizer """adam""" +571 15 training_loop """owa""" +571 15 negative_sampler """basic""" +571 15 evaluator """rankbased""" +571 16 dataset """fb15k237""" +571 16 model """rotate""" +571 16 loss """bceaftersigmoid""" +571 16 regularizer """no""" +571 16 optimizer """adam""" +571 16 training_loop """owa""" +571 16 negative_sampler """basic""" +571 16 evaluator """rankbased""" +571 17 dataset """fb15k237""" +571 17 model """rotate""" +571 17 loss """bceaftersigmoid""" +571 17 regularizer """no""" +571 17 optimizer """adam""" +571 17 training_loop """owa""" +571 17 negative_sampler """basic""" +571 17 evaluator """rankbased""" +571 18 dataset """fb15k237""" +571 18 model """rotate""" +571 18 loss """bceaftersigmoid""" +571 18 regularizer """no""" +571 18 optimizer """adam""" +571 18 training_loop """owa""" +571 18 negative_sampler """basic""" +571 18 evaluator """rankbased""" +571 19 dataset """fb15k237""" +571 19 model """rotate""" +571 19 loss """bceaftersigmoid""" +571 19 regularizer """no""" +571 19 optimizer """adam""" +571 19 training_loop """owa""" +571 19 negative_sampler """basic""" +571 19 evaluator """rankbased""" +571 20 dataset """fb15k237""" +571 20 model """rotate""" +571 20 loss """bceaftersigmoid""" +571 20 regularizer """no""" +571 20 optimizer """adam""" +571 20 training_loop """owa""" +571 20 negative_sampler """basic""" +571 20 evaluator """rankbased""" +571 21 dataset """fb15k237""" +571 21 model """rotate""" +571 21 loss """bceaftersigmoid""" +571 21 regularizer """no""" +571 21 optimizer """adam""" +571 21 training_loop """owa""" +571 21 negative_sampler """basic""" +571 21 evaluator """rankbased""" +571 22 dataset """fb15k237""" +571 22 model """rotate""" +571 22 loss """bceaftersigmoid""" +571 22 regularizer """no""" +571 22 optimizer """adam""" +571 22 training_loop """owa""" +571 22 negative_sampler """basic""" +571 22 evaluator """rankbased""" +571 23 dataset """fb15k237""" +571 23 model """rotate""" +571 23 loss """bceaftersigmoid""" +571 23 regularizer """no""" +571 23 optimizer """adam""" +571 23 training_loop """owa""" +571 23 negative_sampler """basic""" +571 23 evaluator """rankbased""" +571 24 dataset """fb15k237""" +571 24 model """rotate""" +571 24 loss """bceaftersigmoid""" +571 24 regularizer """no""" +571 24 optimizer """adam""" +571 24 training_loop """owa""" +571 24 negative_sampler """basic""" +571 24 evaluator """rankbased""" +571 25 dataset """fb15k237""" +571 25 model """rotate""" +571 25 loss """bceaftersigmoid""" +571 25 regularizer """no""" +571 25 optimizer """adam""" +571 25 training_loop """owa""" +571 25 negative_sampler """basic""" +571 25 evaluator """rankbased""" +571 26 dataset """fb15k237""" +571 26 model """rotate""" +571 26 loss """bceaftersigmoid""" +571 26 regularizer """no""" +571 26 optimizer """adam""" +571 26 training_loop """owa""" +571 26 negative_sampler """basic""" +571 26 evaluator """rankbased""" +571 27 dataset """fb15k237""" +571 27 model """rotate""" +571 27 loss """bceaftersigmoid""" +571 27 regularizer """no""" +571 27 optimizer """adam""" +571 27 training_loop """owa""" +571 27 negative_sampler """basic""" +571 27 evaluator """rankbased""" +571 28 dataset """fb15k237""" +571 28 model """rotate""" +571 28 loss """bceaftersigmoid""" +571 28 regularizer """no""" +571 28 optimizer """adam""" +571 28 training_loop """owa""" +571 28 negative_sampler """basic""" +571 28 evaluator """rankbased""" +571 29 dataset """fb15k237""" +571 29 model """rotate""" +571 29 loss """bceaftersigmoid""" +571 29 regularizer """no""" +571 29 optimizer """adam""" +571 29 training_loop """owa""" +571 29 negative_sampler """basic""" +571 29 evaluator """rankbased""" +572 1 model.embedding_dim 1.0 +572 1 optimizer.lr 0.027021487530281687 +572 1 negative_sampler.num_negs_per_pos 69.0 +572 1 training.batch_size 2.0 +572 2 model.embedding_dim 2.0 +572 2 optimizer.lr 0.001341543899399063 +572 2 negative_sampler.num_negs_per_pos 80.0 +572 2 training.batch_size 1.0 +572 3 model.embedding_dim 0.0 +572 3 optimizer.lr 0.0018194585225911656 +572 3 negative_sampler.num_negs_per_pos 33.0 +572 3 training.batch_size 0.0 +572 4 model.embedding_dim 0.0 +572 4 optimizer.lr 0.008656465628981666 +572 4 negative_sampler.num_negs_per_pos 35.0 +572 4 training.batch_size 0.0 +572 5 model.embedding_dim 2.0 +572 5 optimizer.lr 0.00650821754865959 +572 5 negative_sampler.num_negs_per_pos 66.0 +572 5 training.batch_size 0.0 +572 6 model.embedding_dim 1.0 +572 6 optimizer.lr 0.0010468215682555013 +572 6 negative_sampler.num_negs_per_pos 50.0 +572 6 training.batch_size 2.0 +572 7 model.embedding_dim 0.0 +572 7 optimizer.lr 0.025227730997225205 +572 7 negative_sampler.num_negs_per_pos 86.0 +572 7 training.batch_size 0.0 +572 8 model.embedding_dim 1.0 +572 8 optimizer.lr 0.02196036408101259 +572 8 negative_sampler.num_negs_per_pos 93.0 +572 8 training.batch_size 0.0 +572 9 model.embedding_dim 2.0 +572 9 optimizer.lr 0.007419715493778117 +572 9 negative_sampler.num_negs_per_pos 28.0 +572 9 training.batch_size 1.0 +572 10 model.embedding_dim 2.0 +572 10 optimizer.lr 0.009456714751749027 +572 10 negative_sampler.num_negs_per_pos 38.0 +572 10 training.batch_size 0.0 +572 11 model.embedding_dim 0.0 +572 11 optimizer.lr 0.050718583994391934 +572 11 negative_sampler.num_negs_per_pos 18.0 +572 11 training.batch_size 0.0 +572 12 model.embedding_dim 2.0 +572 12 optimizer.lr 0.001070743782445868 +572 12 negative_sampler.num_negs_per_pos 10.0 +572 12 training.batch_size 1.0 +572 13 model.embedding_dim 1.0 +572 13 optimizer.lr 0.04442193400071099 +572 13 negative_sampler.num_negs_per_pos 31.0 +572 13 training.batch_size 0.0 +572 14 model.embedding_dim 1.0 +572 14 optimizer.lr 0.06661745319956919 +572 14 negative_sampler.num_negs_per_pos 70.0 +572 14 training.batch_size 2.0 +572 15 model.embedding_dim 1.0 +572 15 optimizer.lr 0.0013379214357157744 +572 15 negative_sampler.num_negs_per_pos 72.0 +572 15 training.batch_size 1.0 +572 16 model.embedding_dim 2.0 +572 16 optimizer.lr 0.0010088011560546177 +572 16 negative_sampler.num_negs_per_pos 71.0 +572 16 training.batch_size 0.0 +572 17 model.embedding_dim 0.0 +572 17 optimizer.lr 0.005439659800851662 +572 17 negative_sampler.num_negs_per_pos 10.0 +572 17 training.batch_size 2.0 +572 18 model.embedding_dim 0.0 +572 18 optimizer.lr 0.006145093678669935 +572 18 negative_sampler.num_negs_per_pos 31.0 +572 18 training.batch_size 0.0 +572 19 model.embedding_dim 0.0 +572 19 optimizer.lr 0.040158193892815296 +572 19 negative_sampler.num_negs_per_pos 99.0 +572 19 training.batch_size 2.0 +572 20 model.embedding_dim 1.0 +572 20 optimizer.lr 0.017920931010418733 +572 20 negative_sampler.num_negs_per_pos 30.0 +572 20 training.batch_size 2.0 +572 21 model.embedding_dim 0.0 +572 21 optimizer.lr 0.0017497446489974774 +572 21 negative_sampler.num_negs_per_pos 83.0 +572 21 training.batch_size 1.0 +572 22 model.embedding_dim 2.0 +572 22 optimizer.lr 0.027994484418047534 +572 22 negative_sampler.num_negs_per_pos 3.0 +572 22 training.batch_size 2.0 +572 23 model.embedding_dim 2.0 +572 23 optimizer.lr 0.0328314214733794 +572 23 negative_sampler.num_negs_per_pos 96.0 +572 23 training.batch_size 2.0 +572 24 model.embedding_dim 1.0 +572 24 optimizer.lr 0.03884662747076956 +572 24 negative_sampler.num_negs_per_pos 9.0 +572 24 training.batch_size 2.0 +572 25 model.embedding_dim 1.0 +572 25 optimizer.lr 0.016142927823204638 +572 25 negative_sampler.num_negs_per_pos 64.0 +572 25 training.batch_size 1.0 +572 26 model.embedding_dim 0.0 +572 26 optimizer.lr 0.007339821992304162 +572 26 negative_sampler.num_negs_per_pos 54.0 +572 26 training.batch_size 0.0 +572 27 model.embedding_dim 2.0 +572 27 optimizer.lr 0.0015049301682640852 +572 27 negative_sampler.num_negs_per_pos 11.0 +572 27 training.batch_size 2.0 +572 28 model.embedding_dim 1.0 +572 28 optimizer.lr 0.0013688022937042063 +572 28 negative_sampler.num_negs_per_pos 13.0 +572 28 training.batch_size 2.0 +572 29 model.embedding_dim 0.0 +572 29 optimizer.lr 0.020459449439505895 +572 29 negative_sampler.num_negs_per_pos 21.0 +572 29 training.batch_size 2.0 +572 30 model.embedding_dim 2.0 +572 30 optimizer.lr 0.05186710399485781 +572 30 negative_sampler.num_negs_per_pos 99.0 +572 30 training.batch_size 1.0 +572 31 model.embedding_dim 2.0 +572 31 optimizer.lr 0.002812799337191173 +572 31 negative_sampler.num_negs_per_pos 88.0 +572 31 training.batch_size 2.0 +572 1 dataset """fb15k237""" +572 1 model """rotate""" +572 1 loss """softplus""" +572 1 regularizer """no""" +572 1 optimizer """adam""" +572 1 training_loop """owa""" +572 1 negative_sampler """basic""" +572 1 evaluator """rankbased""" +572 2 dataset """fb15k237""" +572 2 model """rotate""" +572 2 loss """softplus""" +572 2 regularizer """no""" +572 2 optimizer """adam""" +572 2 training_loop """owa""" +572 2 negative_sampler """basic""" +572 2 evaluator """rankbased""" +572 3 dataset """fb15k237""" +572 3 model """rotate""" +572 3 loss """softplus""" +572 3 regularizer """no""" +572 3 optimizer """adam""" +572 3 training_loop """owa""" +572 3 negative_sampler """basic""" +572 3 evaluator """rankbased""" +572 4 dataset """fb15k237""" +572 4 model """rotate""" +572 4 loss """softplus""" +572 4 regularizer """no""" +572 4 optimizer """adam""" +572 4 training_loop """owa""" +572 4 negative_sampler """basic""" +572 4 evaluator """rankbased""" +572 5 dataset """fb15k237""" +572 5 model """rotate""" +572 5 loss """softplus""" +572 5 regularizer """no""" +572 5 optimizer """adam""" +572 5 training_loop """owa""" +572 5 negative_sampler """basic""" +572 5 evaluator """rankbased""" +572 6 dataset """fb15k237""" +572 6 model """rotate""" +572 6 loss """softplus""" +572 6 regularizer """no""" +572 6 optimizer """adam""" +572 6 training_loop """owa""" +572 6 negative_sampler """basic""" +572 6 evaluator """rankbased""" +572 7 dataset """fb15k237""" +572 7 model """rotate""" +572 7 loss """softplus""" +572 7 regularizer """no""" +572 7 optimizer """adam""" +572 7 training_loop """owa""" +572 7 negative_sampler """basic""" +572 7 evaluator """rankbased""" +572 8 dataset """fb15k237""" +572 8 model """rotate""" +572 8 loss """softplus""" +572 8 regularizer """no""" +572 8 optimizer """adam""" +572 8 training_loop """owa""" +572 8 negative_sampler """basic""" +572 8 evaluator """rankbased""" +572 9 dataset """fb15k237""" +572 9 model """rotate""" +572 9 loss """softplus""" +572 9 regularizer """no""" +572 9 optimizer """adam""" +572 9 training_loop """owa""" +572 9 negative_sampler """basic""" +572 9 evaluator """rankbased""" +572 10 dataset """fb15k237""" +572 10 model """rotate""" +572 10 loss """softplus""" +572 10 regularizer """no""" +572 10 optimizer """adam""" +572 10 training_loop """owa""" +572 10 negative_sampler """basic""" +572 10 evaluator """rankbased""" +572 11 dataset """fb15k237""" +572 11 model """rotate""" +572 11 loss """softplus""" +572 11 regularizer """no""" +572 11 optimizer """adam""" +572 11 training_loop """owa""" +572 11 negative_sampler """basic""" +572 11 evaluator """rankbased""" +572 12 dataset """fb15k237""" +572 12 model """rotate""" +572 12 loss """softplus""" +572 12 regularizer """no""" +572 12 optimizer """adam""" +572 12 training_loop """owa""" +572 12 negative_sampler """basic""" +572 12 evaluator """rankbased""" +572 13 dataset """fb15k237""" +572 13 model """rotate""" +572 13 loss """softplus""" +572 13 regularizer """no""" +572 13 optimizer """adam""" +572 13 training_loop """owa""" +572 13 negative_sampler """basic""" +572 13 evaluator """rankbased""" +572 14 dataset """fb15k237""" +572 14 model """rotate""" +572 14 loss """softplus""" +572 14 regularizer """no""" +572 14 optimizer """adam""" +572 14 training_loop """owa""" +572 14 negative_sampler """basic""" +572 14 evaluator """rankbased""" +572 15 dataset """fb15k237""" +572 15 model """rotate""" +572 15 loss """softplus""" +572 15 regularizer """no""" +572 15 optimizer """adam""" +572 15 training_loop """owa""" +572 15 negative_sampler """basic""" +572 15 evaluator """rankbased""" +572 16 dataset """fb15k237""" +572 16 model """rotate""" +572 16 loss """softplus""" +572 16 regularizer """no""" +572 16 optimizer """adam""" +572 16 training_loop """owa""" +572 16 negative_sampler """basic""" +572 16 evaluator """rankbased""" +572 17 dataset """fb15k237""" +572 17 model """rotate""" +572 17 loss """softplus""" +572 17 regularizer """no""" +572 17 optimizer """adam""" +572 17 training_loop """owa""" +572 17 negative_sampler """basic""" +572 17 evaluator """rankbased""" +572 18 dataset """fb15k237""" +572 18 model """rotate""" +572 18 loss """softplus""" +572 18 regularizer """no""" +572 18 optimizer """adam""" +572 18 training_loop """owa""" +572 18 negative_sampler """basic""" +572 18 evaluator """rankbased""" +572 19 dataset """fb15k237""" +572 19 model """rotate""" +572 19 loss """softplus""" +572 19 regularizer """no""" +572 19 optimizer """adam""" +572 19 training_loop """owa""" +572 19 negative_sampler """basic""" +572 19 evaluator """rankbased""" +572 20 dataset """fb15k237""" +572 20 model """rotate""" +572 20 loss """softplus""" +572 20 regularizer """no""" +572 20 optimizer """adam""" +572 20 training_loop """owa""" +572 20 negative_sampler """basic""" +572 20 evaluator """rankbased""" +572 21 dataset """fb15k237""" +572 21 model """rotate""" +572 21 loss """softplus""" +572 21 regularizer """no""" +572 21 optimizer """adam""" +572 21 training_loop """owa""" +572 21 negative_sampler """basic""" +572 21 evaluator """rankbased""" +572 22 dataset """fb15k237""" +572 22 model """rotate""" +572 22 loss """softplus""" +572 22 regularizer """no""" +572 22 optimizer """adam""" +572 22 training_loop """owa""" +572 22 negative_sampler """basic""" +572 22 evaluator """rankbased""" +572 23 dataset """fb15k237""" +572 23 model """rotate""" +572 23 loss """softplus""" +572 23 regularizer """no""" +572 23 optimizer """adam""" +572 23 training_loop """owa""" +572 23 negative_sampler """basic""" +572 23 evaluator """rankbased""" +572 24 dataset """fb15k237""" +572 24 model """rotate""" +572 24 loss """softplus""" +572 24 regularizer """no""" +572 24 optimizer """adam""" +572 24 training_loop """owa""" +572 24 negative_sampler """basic""" +572 24 evaluator """rankbased""" +572 25 dataset """fb15k237""" +572 25 model """rotate""" +572 25 loss """softplus""" +572 25 regularizer """no""" +572 25 optimizer """adam""" +572 25 training_loop """owa""" +572 25 negative_sampler """basic""" +572 25 evaluator """rankbased""" +572 26 dataset """fb15k237""" +572 26 model """rotate""" +572 26 loss """softplus""" +572 26 regularizer """no""" +572 26 optimizer """adam""" +572 26 training_loop """owa""" +572 26 negative_sampler """basic""" +572 26 evaluator """rankbased""" +572 27 dataset """fb15k237""" +572 27 model """rotate""" +572 27 loss """softplus""" +572 27 regularizer """no""" +572 27 optimizer """adam""" +572 27 training_loop """owa""" +572 27 negative_sampler """basic""" +572 27 evaluator """rankbased""" +572 28 dataset """fb15k237""" +572 28 model """rotate""" +572 28 loss """softplus""" +572 28 regularizer """no""" +572 28 optimizer """adam""" +572 28 training_loop """owa""" +572 28 negative_sampler """basic""" +572 28 evaluator """rankbased""" +572 29 dataset """fb15k237""" +572 29 model """rotate""" +572 29 loss """softplus""" +572 29 regularizer """no""" +572 29 optimizer """adam""" +572 29 training_loop """owa""" +572 29 negative_sampler """basic""" +572 29 evaluator """rankbased""" +572 30 dataset """fb15k237""" +572 30 model """rotate""" +572 30 loss """softplus""" +572 30 regularizer """no""" +572 30 optimizer """adam""" +572 30 training_loop """owa""" +572 30 negative_sampler """basic""" +572 30 evaluator """rankbased""" +572 31 dataset """fb15k237""" +572 31 model """rotate""" +572 31 loss """softplus""" +572 31 regularizer """no""" +572 31 optimizer """adam""" +572 31 training_loop """owa""" +572 31 negative_sampler """basic""" +572 31 evaluator """rankbased""" +573 1 model.embedding_dim 0.0 +573 1 optimizer.lr 0.003821830315308667 +573 1 negative_sampler.num_negs_per_pos 91.0 +573 1 training.batch_size 2.0 +573 2 model.embedding_dim 1.0 +573 2 optimizer.lr 0.0022691171938971663 +573 2 negative_sampler.num_negs_per_pos 1.0 +573 2 training.batch_size 2.0 +573 3 model.embedding_dim 0.0 +573 3 optimizer.lr 0.014511883437385405 +573 3 negative_sampler.num_negs_per_pos 3.0 +573 3 training.batch_size 0.0 +573 4 model.embedding_dim 1.0 +573 4 optimizer.lr 0.00292081693312295 +573 4 negative_sampler.num_negs_per_pos 27.0 +573 4 training.batch_size 1.0 +573 5 model.embedding_dim 2.0 +573 5 optimizer.lr 0.002701228703795191 +573 5 negative_sampler.num_negs_per_pos 97.0 +573 5 training.batch_size 1.0 +573 6 model.embedding_dim 1.0 +573 6 optimizer.lr 0.016869935051087553 +573 6 negative_sampler.num_negs_per_pos 73.0 +573 6 training.batch_size 0.0 +573 7 model.embedding_dim 1.0 +573 7 optimizer.lr 0.0012535852456965156 +573 7 negative_sampler.num_negs_per_pos 16.0 +573 7 training.batch_size 2.0 +573 8 model.embedding_dim 1.0 +573 8 optimizer.lr 0.0025107983376835485 +573 8 negative_sampler.num_negs_per_pos 96.0 +573 8 training.batch_size 0.0 +573 9 model.embedding_dim 2.0 +573 9 optimizer.lr 0.03946925118461664 +573 9 negative_sampler.num_negs_per_pos 14.0 +573 9 training.batch_size 0.0 +573 10 model.embedding_dim 0.0 +573 10 optimizer.lr 0.003880302789919885 +573 10 negative_sampler.num_negs_per_pos 39.0 +573 10 training.batch_size 0.0 +573 11 model.embedding_dim 0.0 +573 11 optimizer.lr 0.024421334477039525 +573 11 negative_sampler.num_negs_per_pos 80.0 +573 11 training.batch_size 1.0 +573 12 model.embedding_dim 1.0 +573 12 optimizer.lr 0.08079191462907277 +573 12 negative_sampler.num_negs_per_pos 41.0 +573 12 training.batch_size 2.0 +573 13 model.embedding_dim 1.0 +573 13 optimizer.lr 0.0021280429402202494 +573 13 negative_sampler.num_negs_per_pos 36.0 +573 13 training.batch_size 2.0 +573 14 model.embedding_dim 2.0 +573 14 optimizer.lr 0.06962264653277112 +573 14 negative_sampler.num_negs_per_pos 95.0 +573 14 training.batch_size 1.0 +573 15 model.embedding_dim 2.0 +573 15 optimizer.lr 0.004797427770610385 +573 15 negative_sampler.num_negs_per_pos 59.0 +573 15 training.batch_size 1.0 +573 16 model.embedding_dim 0.0 +573 16 optimizer.lr 0.04484531200206708 +573 16 negative_sampler.num_negs_per_pos 78.0 +573 16 training.batch_size 1.0 +573 17 model.embedding_dim 2.0 +573 17 optimizer.lr 0.0052800059307278975 +573 17 negative_sampler.num_negs_per_pos 90.0 +573 17 training.batch_size 0.0 +573 18 model.embedding_dim 2.0 +573 18 optimizer.lr 0.012340524193651841 +573 18 negative_sampler.num_negs_per_pos 9.0 +573 18 training.batch_size 0.0 +573 19 model.embedding_dim 2.0 +573 19 optimizer.lr 0.0076202861141478105 +573 19 negative_sampler.num_negs_per_pos 44.0 +573 19 training.batch_size 2.0 +573 20 model.embedding_dim 0.0 +573 20 optimizer.lr 0.009419104849973766 +573 20 negative_sampler.num_negs_per_pos 55.0 +573 20 training.batch_size 1.0 +573 21 model.embedding_dim 1.0 +573 21 optimizer.lr 0.005084571054468086 +573 21 negative_sampler.num_negs_per_pos 14.0 +573 21 training.batch_size 0.0 +573 22 model.embedding_dim 1.0 +573 22 optimizer.lr 0.040706577104885906 +573 22 negative_sampler.num_negs_per_pos 46.0 +573 22 training.batch_size 2.0 +573 23 model.embedding_dim 1.0 +573 23 optimizer.lr 0.0016027914075522481 +573 23 negative_sampler.num_negs_per_pos 12.0 +573 23 training.batch_size 0.0 +573 24 model.embedding_dim 1.0 +573 24 optimizer.lr 0.005351547068211866 +573 24 negative_sampler.num_negs_per_pos 97.0 +573 24 training.batch_size 2.0 +573 25 model.embedding_dim 0.0 +573 25 optimizer.lr 0.0020116385231319523 +573 25 negative_sampler.num_negs_per_pos 27.0 +573 25 training.batch_size 2.0 +573 26 model.embedding_dim 1.0 +573 26 optimizer.lr 0.005803157403196935 +573 26 negative_sampler.num_negs_per_pos 89.0 +573 26 training.batch_size 1.0 +573 27 model.embedding_dim 0.0 +573 27 optimizer.lr 0.00510577685342985 +573 27 negative_sampler.num_negs_per_pos 96.0 +573 27 training.batch_size 1.0 +573 28 model.embedding_dim 2.0 +573 28 optimizer.lr 0.009722985462351848 +573 28 negative_sampler.num_negs_per_pos 75.0 +573 28 training.batch_size 2.0 +573 29 model.embedding_dim 0.0 +573 29 optimizer.lr 0.0014660077743174245 +573 29 negative_sampler.num_negs_per_pos 36.0 +573 29 training.batch_size 2.0 +573 30 model.embedding_dim 0.0 +573 30 optimizer.lr 0.008624864339941064 +573 30 negative_sampler.num_negs_per_pos 95.0 +573 30 training.batch_size 2.0 +573 31 model.embedding_dim 1.0 +573 31 optimizer.lr 0.015939882235375877 +573 31 negative_sampler.num_negs_per_pos 80.0 +573 31 training.batch_size 1.0 +573 32 model.embedding_dim 2.0 +573 32 optimizer.lr 0.0012893506224134369 +573 32 negative_sampler.num_negs_per_pos 38.0 +573 32 training.batch_size 1.0 +573 33 model.embedding_dim 0.0 +573 33 optimizer.lr 0.0393567367243425 +573 33 negative_sampler.num_negs_per_pos 78.0 +573 33 training.batch_size 1.0 +573 34 model.embedding_dim 0.0 +573 34 optimizer.lr 0.07772143223363591 +573 34 negative_sampler.num_negs_per_pos 96.0 +573 34 training.batch_size 2.0 +573 35 model.embedding_dim 2.0 +573 35 optimizer.lr 0.002475577046964315 +573 35 negative_sampler.num_negs_per_pos 95.0 +573 35 training.batch_size 2.0 +573 36 model.embedding_dim 0.0 +573 36 optimizer.lr 0.05499559494436347 +573 36 negative_sampler.num_negs_per_pos 41.0 +573 36 training.batch_size 0.0 +573 37 model.embedding_dim 2.0 +573 37 optimizer.lr 0.006777983128813541 +573 37 negative_sampler.num_negs_per_pos 87.0 +573 37 training.batch_size 1.0 +573 38 model.embedding_dim 1.0 +573 38 optimizer.lr 0.09922840914690646 +573 38 negative_sampler.num_negs_per_pos 46.0 +573 38 training.batch_size 0.0 +573 39 model.embedding_dim 0.0 +573 39 optimizer.lr 0.025121293615096796 +573 39 negative_sampler.num_negs_per_pos 59.0 +573 39 training.batch_size 0.0 +573 40 model.embedding_dim 0.0 +573 40 optimizer.lr 0.01221004097702222 +573 40 negative_sampler.num_negs_per_pos 43.0 +573 40 training.batch_size 1.0 +573 41 model.embedding_dim 2.0 +573 41 optimizer.lr 0.012487418871472518 +573 41 negative_sampler.num_negs_per_pos 22.0 +573 41 training.batch_size 2.0 +573 42 model.embedding_dim 2.0 +573 42 optimizer.lr 0.007063641095026693 +573 42 negative_sampler.num_negs_per_pos 97.0 +573 42 training.batch_size 2.0 +573 43 model.embedding_dim 2.0 +573 43 optimizer.lr 0.0011570540209977498 +573 43 negative_sampler.num_negs_per_pos 11.0 +573 43 training.batch_size 0.0 +573 44 model.embedding_dim 2.0 +573 44 optimizer.lr 0.004907668695639881 +573 44 negative_sampler.num_negs_per_pos 85.0 +573 44 training.batch_size 2.0 +573 45 model.embedding_dim 2.0 +573 45 optimizer.lr 0.008786845789697994 +573 45 negative_sampler.num_negs_per_pos 8.0 +573 45 training.batch_size 2.0 +573 46 model.embedding_dim 0.0 +573 46 optimizer.lr 0.005864060858732267 +573 46 negative_sampler.num_negs_per_pos 49.0 +573 46 training.batch_size 0.0 +573 47 model.embedding_dim 0.0 +573 47 optimizer.lr 0.001006401061708527 +573 47 negative_sampler.num_negs_per_pos 74.0 +573 47 training.batch_size 2.0 +573 48 model.embedding_dim 2.0 +573 48 optimizer.lr 0.021604406092487878 +573 48 negative_sampler.num_negs_per_pos 1.0 +573 48 training.batch_size 0.0 +573 49 model.embedding_dim 2.0 +573 49 optimizer.lr 0.08758602245073316 +573 49 negative_sampler.num_negs_per_pos 67.0 +573 49 training.batch_size 1.0 +573 50 model.embedding_dim 0.0 +573 50 optimizer.lr 0.08601148757313655 +573 50 negative_sampler.num_negs_per_pos 12.0 +573 50 training.batch_size 1.0 +573 51 model.embedding_dim 0.0 +573 51 optimizer.lr 0.0028185713245240255 +573 51 negative_sampler.num_negs_per_pos 73.0 +573 51 training.batch_size 1.0 +573 52 model.embedding_dim 2.0 +573 52 optimizer.lr 0.0015789884317286807 +573 52 negative_sampler.num_negs_per_pos 64.0 +573 52 training.batch_size 0.0 +573 53 model.embedding_dim 1.0 +573 53 optimizer.lr 0.006039661687739575 +573 53 negative_sampler.num_negs_per_pos 30.0 +573 53 training.batch_size 2.0 +573 1 dataset """fb15k237""" +573 1 model """rotate""" +573 1 loss """bceaftersigmoid""" +573 1 regularizer """no""" +573 1 optimizer """adam""" +573 1 training_loop """owa""" +573 1 negative_sampler """basic""" +573 1 evaluator """rankbased""" +573 2 dataset """fb15k237""" +573 2 model """rotate""" +573 2 loss """bceaftersigmoid""" +573 2 regularizer """no""" +573 2 optimizer """adam""" +573 2 training_loop """owa""" +573 2 negative_sampler """basic""" +573 2 evaluator """rankbased""" +573 3 dataset """fb15k237""" +573 3 model """rotate""" +573 3 loss """bceaftersigmoid""" +573 3 regularizer """no""" +573 3 optimizer """adam""" +573 3 training_loop """owa""" +573 3 negative_sampler """basic""" +573 3 evaluator """rankbased""" +573 4 dataset """fb15k237""" +573 4 model """rotate""" +573 4 loss """bceaftersigmoid""" +573 4 regularizer """no""" +573 4 optimizer """adam""" +573 4 training_loop """owa""" +573 4 negative_sampler """basic""" +573 4 evaluator """rankbased""" +573 5 dataset """fb15k237""" +573 5 model """rotate""" +573 5 loss """bceaftersigmoid""" +573 5 regularizer """no""" +573 5 optimizer """adam""" +573 5 training_loop """owa""" +573 5 negative_sampler """basic""" +573 5 evaluator """rankbased""" +573 6 dataset """fb15k237""" +573 6 model """rotate""" +573 6 loss """bceaftersigmoid""" +573 6 regularizer """no""" +573 6 optimizer """adam""" +573 6 training_loop """owa""" +573 6 negative_sampler """basic""" +573 6 evaluator """rankbased""" +573 7 dataset """fb15k237""" +573 7 model """rotate""" +573 7 loss """bceaftersigmoid""" +573 7 regularizer """no""" +573 7 optimizer """adam""" +573 7 training_loop """owa""" +573 7 negative_sampler """basic""" +573 7 evaluator """rankbased""" +573 8 dataset """fb15k237""" +573 8 model """rotate""" +573 8 loss """bceaftersigmoid""" +573 8 regularizer """no""" +573 8 optimizer """adam""" +573 8 training_loop """owa""" +573 8 negative_sampler """basic""" +573 8 evaluator """rankbased""" +573 9 dataset """fb15k237""" +573 9 model """rotate""" +573 9 loss """bceaftersigmoid""" +573 9 regularizer """no""" +573 9 optimizer """adam""" +573 9 training_loop """owa""" +573 9 negative_sampler """basic""" +573 9 evaluator """rankbased""" +573 10 dataset """fb15k237""" +573 10 model """rotate""" +573 10 loss """bceaftersigmoid""" +573 10 regularizer """no""" +573 10 optimizer """adam""" +573 10 training_loop """owa""" +573 10 negative_sampler """basic""" +573 10 evaluator """rankbased""" +573 11 dataset """fb15k237""" +573 11 model """rotate""" +573 11 loss """bceaftersigmoid""" +573 11 regularizer """no""" +573 11 optimizer """adam""" +573 11 training_loop """owa""" +573 11 negative_sampler """basic""" +573 11 evaluator """rankbased""" +573 12 dataset """fb15k237""" +573 12 model """rotate""" +573 12 loss """bceaftersigmoid""" +573 12 regularizer """no""" +573 12 optimizer """adam""" +573 12 training_loop """owa""" +573 12 negative_sampler """basic""" +573 12 evaluator """rankbased""" +573 13 dataset """fb15k237""" +573 13 model """rotate""" +573 13 loss """bceaftersigmoid""" +573 13 regularizer """no""" +573 13 optimizer """adam""" +573 13 training_loop """owa""" +573 13 negative_sampler """basic""" +573 13 evaluator """rankbased""" +573 14 dataset """fb15k237""" +573 14 model """rotate""" +573 14 loss """bceaftersigmoid""" +573 14 regularizer """no""" +573 14 optimizer """adam""" +573 14 training_loop """owa""" +573 14 negative_sampler """basic""" +573 14 evaluator """rankbased""" +573 15 dataset """fb15k237""" +573 15 model """rotate""" +573 15 loss """bceaftersigmoid""" +573 15 regularizer """no""" +573 15 optimizer """adam""" +573 15 training_loop """owa""" +573 15 negative_sampler """basic""" +573 15 evaluator """rankbased""" +573 16 dataset """fb15k237""" +573 16 model """rotate""" +573 16 loss """bceaftersigmoid""" +573 16 regularizer """no""" +573 16 optimizer """adam""" +573 16 training_loop """owa""" +573 16 negative_sampler """basic""" +573 16 evaluator """rankbased""" +573 17 dataset """fb15k237""" +573 17 model """rotate""" +573 17 loss """bceaftersigmoid""" +573 17 regularizer """no""" +573 17 optimizer """adam""" +573 17 training_loop """owa""" +573 17 negative_sampler """basic""" +573 17 evaluator """rankbased""" +573 18 dataset """fb15k237""" +573 18 model """rotate""" +573 18 loss """bceaftersigmoid""" +573 18 regularizer """no""" +573 18 optimizer """adam""" +573 18 training_loop """owa""" +573 18 negative_sampler """basic""" +573 18 evaluator """rankbased""" +573 19 dataset """fb15k237""" +573 19 model """rotate""" +573 19 loss """bceaftersigmoid""" +573 19 regularizer """no""" +573 19 optimizer """adam""" +573 19 training_loop """owa""" +573 19 negative_sampler """basic""" +573 19 evaluator """rankbased""" +573 20 dataset """fb15k237""" +573 20 model """rotate""" +573 20 loss """bceaftersigmoid""" +573 20 regularizer """no""" +573 20 optimizer """adam""" +573 20 training_loop """owa""" +573 20 negative_sampler """basic""" +573 20 evaluator """rankbased""" +573 21 dataset """fb15k237""" +573 21 model """rotate""" +573 21 loss """bceaftersigmoid""" +573 21 regularizer """no""" +573 21 optimizer """adam""" +573 21 training_loop """owa""" +573 21 negative_sampler """basic""" +573 21 evaluator """rankbased""" +573 22 dataset """fb15k237""" +573 22 model """rotate""" +573 22 loss """bceaftersigmoid""" +573 22 regularizer """no""" +573 22 optimizer """adam""" +573 22 training_loop """owa""" +573 22 negative_sampler """basic""" +573 22 evaluator """rankbased""" +573 23 dataset """fb15k237""" +573 23 model """rotate""" +573 23 loss """bceaftersigmoid""" +573 23 regularizer """no""" +573 23 optimizer """adam""" +573 23 training_loop """owa""" +573 23 negative_sampler """basic""" +573 23 evaluator """rankbased""" +573 24 dataset """fb15k237""" +573 24 model """rotate""" +573 24 loss """bceaftersigmoid""" +573 24 regularizer """no""" +573 24 optimizer """adam""" +573 24 training_loop """owa""" +573 24 negative_sampler """basic""" +573 24 evaluator """rankbased""" +573 25 dataset """fb15k237""" +573 25 model """rotate""" +573 25 loss """bceaftersigmoid""" +573 25 regularizer """no""" +573 25 optimizer """adam""" +573 25 training_loop """owa""" +573 25 negative_sampler """basic""" +573 25 evaluator """rankbased""" +573 26 dataset """fb15k237""" +573 26 model """rotate""" +573 26 loss """bceaftersigmoid""" +573 26 regularizer """no""" +573 26 optimizer """adam""" +573 26 training_loop """owa""" +573 26 negative_sampler """basic""" +573 26 evaluator """rankbased""" +573 27 dataset """fb15k237""" +573 27 model """rotate""" +573 27 loss """bceaftersigmoid""" +573 27 regularizer """no""" +573 27 optimizer """adam""" +573 27 training_loop """owa""" +573 27 negative_sampler """basic""" +573 27 evaluator """rankbased""" +573 28 dataset """fb15k237""" +573 28 model """rotate""" +573 28 loss """bceaftersigmoid""" +573 28 regularizer """no""" +573 28 optimizer """adam""" +573 28 training_loop """owa""" +573 28 negative_sampler """basic""" +573 28 evaluator """rankbased""" +573 29 dataset """fb15k237""" +573 29 model """rotate""" +573 29 loss """bceaftersigmoid""" +573 29 regularizer """no""" +573 29 optimizer """adam""" +573 29 training_loop """owa""" +573 29 negative_sampler """basic""" +573 29 evaluator """rankbased""" +573 30 dataset """fb15k237""" +573 30 model """rotate""" +573 30 loss """bceaftersigmoid""" +573 30 regularizer """no""" +573 30 optimizer """adam""" +573 30 training_loop """owa""" +573 30 negative_sampler """basic""" +573 30 evaluator """rankbased""" +573 31 dataset """fb15k237""" +573 31 model """rotate""" +573 31 loss """bceaftersigmoid""" +573 31 regularizer """no""" +573 31 optimizer """adam""" +573 31 training_loop """owa""" +573 31 negative_sampler """basic""" +573 31 evaluator """rankbased""" +573 32 dataset """fb15k237""" +573 32 model """rotate""" +573 32 loss """bceaftersigmoid""" +573 32 regularizer """no""" +573 32 optimizer """adam""" +573 32 training_loop """owa""" +573 32 negative_sampler """basic""" +573 32 evaluator """rankbased""" +573 33 dataset """fb15k237""" +573 33 model """rotate""" +573 33 loss """bceaftersigmoid""" +573 33 regularizer """no""" +573 33 optimizer """adam""" +573 33 training_loop """owa""" +573 33 negative_sampler """basic""" +573 33 evaluator """rankbased""" +573 34 dataset """fb15k237""" +573 34 model """rotate""" +573 34 loss """bceaftersigmoid""" +573 34 regularizer """no""" +573 34 optimizer """adam""" +573 34 training_loop """owa""" +573 34 negative_sampler """basic""" +573 34 evaluator """rankbased""" +573 35 dataset """fb15k237""" +573 35 model """rotate""" +573 35 loss """bceaftersigmoid""" +573 35 regularizer """no""" +573 35 optimizer """adam""" +573 35 training_loop """owa""" +573 35 negative_sampler """basic""" +573 35 evaluator """rankbased""" +573 36 dataset """fb15k237""" +573 36 model """rotate""" +573 36 loss """bceaftersigmoid""" +573 36 regularizer """no""" +573 36 optimizer """adam""" +573 36 training_loop """owa""" +573 36 negative_sampler """basic""" +573 36 evaluator """rankbased""" +573 37 dataset """fb15k237""" +573 37 model """rotate""" +573 37 loss """bceaftersigmoid""" +573 37 regularizer """no""" +573 37 optimizer """adam""" +573 37 training_loop """owa""" +573 37 negative_sampler """basic""" +573 37 evaluator """rankbased""" +573 38 dataset """fb15k237""" +573 38 model """rotate""" +573 38 loss """bceaftersigmoid""" +573 38 regularizer """no""" +573 38 optimizer """adam""" +573 38 training_loop """owa""" +573 38 negative_sampler """basic""" +573 38 evaluator """rankbased""" +573 39 dataset """fb15k237""" +573 39 model """rotate""" +573 39 loss """bceaftersigmoid""" +573 39 regularizer """no""" +573 39 optimizer """adam""" +573 39 training_loop """owa""" +573 39 negative_sampler """basic""" +573 39 evaluator """rankbased""" +573 40 dataset """fb15k237""" +573 40 model """rotate""" +573 40 loss """bceaftersigmoid""" +573 40 regularizer """no""" +573 40 optimizer """adam""" +573 40 training_loop """owa""" +573 40 negative_sampler """basic""" +573 40 evaluator """rankbased""" +573 41 dataset """fb15k237""" +573 41 model """rotate""" +573 41 loss """bceaftersigmoid""" +573 41 regularizer """no""" +573 41 optimizer """adam""" +573 41 training_loop """owa""" +573 41 negative_sampler """basic""" +573 41 evaluator """rankbased""" +573 42 dataset """fb15k237""" +573 42 model """rotate""" +573 42 loss """bceaftersigmoid""" +573 42 regularizer """no""" +573 42 optimizer """adam""" +573 42 training_loop """owa""" +573 42 negative_sampler """basic""" +573 42 evaluator """rankbased""" +573 43 dataset """fb15k237""" +573 43 model """rotate""" +573 43 loss """bceaftersigmoid""" +573 43 regularizer """no""" +573 43 optimizer """adam""" +573 43 training_loop """owa""" +573 43 negative_sampler """basic""" +573 43 evaluator """rankbased""" +573 44 dataset """fb15k237""" +573 44 model """rotate""" +573 44 loss """bceaftersigmoid""" +573 44 regularizer """no""" +573 44 optimizer """adam""" +573 44 training_loop """owa""" +573 44 negative_sampler """basic""" +573 44 evaluator """rankbased""" +573 45 dataset """fb15k237""" +573 45 model """rotate""" +573 45 loss """bceaftersigmoid""" +573 45 regularizer """no""" +573 45 optimizer """adam""" +573 45 training_loop """owa""" +573 45 negative_sampler """basic""" +573 45 evaluator """rankbased""" +573 46 dataset """fb15k237""" +573 46 model """rotate""" +573 46 loss """bceaftersigmoid""" +573 46 regularizer """no""" +573 46 optimizer """adam""" +573 46 training_loop """owa""" +573 46 negative_sampler """basic""" +573 46 evaluator """rankbased""" +573 47 dataset """fb15k237""" +573 47 model """rotate""" +573 47 loss """bceaftersigmoid""" +573 47 regularizer """no""" +573 47 optimizer """adam""" +573 47 training_loop """owa""" +573 47 negative_sampler """basic""" +573 47 evaluator """rankbased""" +573 48 dataset """fb15k237""" +573 48 model """rotate""" +573 48 loss """bceaftersigmoid""" +573 48 regularizer """no""" +573 48 optimizer """adam""" +573 48 training_loop """owa""" +573 48 negative_sampler """basic""" +573 48 evaluator """rankbased""" +573 49 dataset """fb15k237""" +573 49 model """rotate""" +573 49 loss """bceaftersigmoid""" +573 49 regularizer """no""" +573 49 optimizer """adam""" +573 49 training_loop """owa""" +573 49 negative_sampler """basic""" +573 49 evaluator """rankbased""" +573 50 dataset """fb15k237""" +573 50 model """rotate""" +573 50 loss """bceaftersigmoid""" +573 50 regularizer """no""" +573 50 optimizer """adam""" +573 50 training_loop """owa""" +573 50 negative_sampler """basic""" +573 50 evaluator """rankbased""" +573 51 dataset """fb15k237""" +573 51 model """rotate""" +573 51 loss """bceaftersigmoid""" +573 51 regularizer """no""" +573 51 optimizer """adam""" +573 51 training_loop """owa""" +573 51 negative_sampler """basic""" +573 51 evaluator """rankbased""" +573 52 dataset """fb15k237""" +573 52 model """rotate""" +573 52 loss """bceaftersigmoid""" +573 52 regularizer """no""" +573 52 optimizer """adam""" +573 52 training_loop """owa""" +573 52 negative_sampler """basic""" +573 52 evaluator """rankbased""" +573 53 dataset """fb15k237""" +573 53 model """rotate""" +573 53 loss """bceaftersigmoid""" +573 53 regularizer """no""" +573 53 optimizer """adam""" +573 53 training_loop """owa""" +573 53 negative_sampler """basic""" +573 53 evaluator """rankbased""" +574 1 model.embedding_dim 0.0 +574 1 optimizer.lr 0.023304486933897438 +574 1 negative_sampler.num_negs_per_pos 97.0 +574 1 training.batch_size 0.0 +574 2 model.embedding_dim 2.0 +574 2 optimizer.lr 0.001319566388632905 +574 2 negative_sampler.num_negs_per_pos 32.0 +574 2 training.batch_size 1.0 +574 3 model.embedding_dim 2.0 +574 3 optimizer.lr 0.004247611754279615 +574 3 negative_sampler.num_negs_per_pos 25.0 +574 3 training.batch_size 0.0 +574 4 model.embedding_dim 1.0 +574 4 optimizer.lr 0.01668086104023236 +574 4 negative_sampler.num_negs_per_pos 41.0 +574 4 training.batch_size 1.0 +574 5 model.embedding_dim 0.0 +574 5 optimizer.lr 0.004531671406705443 +574 5 negative_sampler.num_negs_per_pos 74.0 +574 5 training.batch_size 0.0 +574 6 model.embedding_dim 1.0 +574 6 optimizer.lr 0.0035665217618732225 +574 6 negative_sampler.num_negs_per_pos 24.0 +574 6 training.batch_size 2.0 +574 7 model.embedding_dim 0.0 +574 7 optimizer.lr 0.0010803499268232971 +574 7 negative_sampler.num_negs_per_pos 6.0 +574 7 training.batch_size 1.0 +574 8 model.embedding_dim 1.0 +574 8 optimizer.lr 0.002489638465475708 +574 8 negative_sampler.num_negs_per_pos 7.0 +574 8 training.batch_size 2.0 +574 9 model.embedding_dim 2.0 +574 9 optimizer.lr 0.05757602409958129 +574 9 negative_sampler.num_negs_per_pos 95.0 +574 9 training.batch_size 2.0 +574 10 model.embedding_dim 2.0 +574 10 optimizer.lr 0.007679679571739059 +574 10 negative_sampler.num_negs_per_pos 95.0 +574 10 training.batch_size 0.0 +574 11 model.embedding_dim 1.0 +574 11 optimizer.lr 0.0036099648720815916 +574 11 negative_sampler.num_negs_per_pos 84.0 +574 11 training.batch_size 0.0 +574 12 model.embedding_dim 0.0 +574 12 optimizer.lr 0.059470358058603 +574 12 negative_sampler.num_negs_per_pos 38.0 +574 12 training.batch_size 2.0 +574 13 model.embedding_dim 2.0 +574 13 optimizer.lr 0.002469524479294928 +574 13 negative_sampler.num_negs_per_pos 77.0 +574 13 training.batch_size 2.0 +574 14 model.embedding_dim 2.0 +574 14 optimizer.lr 0.004617700208003361 +574 14 negative_sampler.num_negs_per_pos 35.0 +574 14 training.batch_size 1.0 +574 15 model.embedding_dim 1.0 +574 15 optimizer.lr 0.037433013759349994 +574 15 negative_sampler.num_negs_per_pos 23.0 +574 15 training.batch_size 0.0 +574 16 model.embedding_dim 1.0 +574 16 optimizer.lr 0.0010650956056716754 +574 16 negative_sampler.num_negs_per_pos 15.0 +574 16 training.batch_size 1.0 +574 17 model.embedding_dim 2.0 +574 17 optimizer.lr 0.0882441328087905 +574 17 negative_sampler.num_negs_per_pos 0.0 +574 17 training.batch_size 2.0 +574 18 model.embedding_dim 1.0 +574 18 optimizer.lr 0.0012151801603545462 +574 18 negative_sampler.num_negs_per_pos 67.0 +574 18 training.batch_size 0.0 +574 19 model.embedding_dim 0.0 +574 19 optimizer.lr 0.006108129027814618 +574 19 negative_sampler.num_negs_per_pos 42.0 +574 19 training.batch_size 2.0 +574 20 model.embedding_dim 1.0 +574 20 optimizer.lr 0.0016714168397987464 +574 20 negative_sampler.num_negs_per_pos 23.0 +574 20 training.batch_size 1.0 +574 21 model.embedding_dim 1.0 +574 21 optimizer.lr 0.07553076069148842 +574 21 negative_sampler.num_negs_per_pos 62.0 +574 21 training.batch_size 1.0 +574 22 model.embedding_dim 1.0 +574 22 optimizer.lr 0.018399248090690417 +574 22 negative_sampler.num_negs_per_pos 69.0 +574 22 training.batch_size 0.0 +574 23 model.embedding_dim 0.0 +574 23 optimizer.lr 0.011152655962553607 +574 23 negative_sampler.num_negs_per_pos 17.0 +574 23 training.batch_size 2.0 +574 24 model.embedding_dim 2.0 +574 24 optimizer.lr 0.008668114768946938 +574 24 negative_sampler.num_negs_per_pos 37.0 +574 24 training.batch_size 2.0 +574 25 model.embedding_dim 0.0 +574 25 optimizer.lr 0.004525660735452896 +574 25 negative_sampler.num_negs_per_pos 69.0 +574 25 training.batch_size 1.0 +574 26 model.embedding_dim 0.0 +574 26 optimizer.lr 0.026542052244684332 +574 26 negative_sampler.num_negs_per_pos 21.0 +574 26 training.batch_size 1.0 +574 27 model.embedding_dim 2.0 +574 27 optimizer.lr 0.0020768539966175303 +574 27 negative_sampler.num_negs_per_pos 61.0 +574 27 training.batch_size 0.0 +574 28 model.embedding_dim 2.0 +574 28 optimizer.lr 0.0016953494353606825 +574 28 negative_sampler.num_negs_per_pos 4.0 +574 28 training.batch_size 0.0 +574 29 model.embedding_dim 0.0 +574 29 optimizer.lr 0.01062767608797416 +574 29 negative_sampler.num_negs_per_pos 87.0 +574 29 training.batch_size 0.0 +574 30 model.embedding_dim 1.0 +574 30 optimizer.lr 0.06626018160257928 +574 30 negative_sampler.num_negs_per_pos 17.0 +574 30 training.batch_size 1.0 +574 31 model.embedding_dim 1.0 +574 31 optimizer.lr 0.03867102303816664 +574 31 negative_sampler.num_negs_per_pos 67.0 +574 31 training.batch_size 2.0 +574 32 model.embedding_dim 2.0 +574 32 optimizer.lr 0.0030861090686230583 +574 32 negative_sampler.num_negs_per_pos 70.0 +574 32 training.batch_size 1.0 +574 33 model.embedding_dim 2.0 +574 33 optimizer.lr 0.009030890567185874 +574 33 negative_sampler.num_negs_per_pos 76.0 +574 33 training.batch_size 0.0 +574 34 model.embedding_dim 0.0 +574 34 optimizer.lr 0.04226785486040054 +574 34 negative_sampler.num_negs_per_pos 76.0 +574 34 training.batch_size 1.0 +574 35 model.embedding_dim 1.0 +574 35 optimizer.lr 0.01648684960014973 +574 35 negative_sampler.num_negs_per_pos 96.0 +574 35 training.batch_size 1.0 +574 36 model.embedding_dim 0.0 +574 36 optimizer.lr 0.0023203922082907116 +574 36 negative_sampler.num_negs_per_pos 96.0 +574 36 training.batch_size 0.0 +574 37 model.embedding_dim 0.0 +574 37 optimizer.lr 0.014062394022340855 +574 37 negative_sampler.num_negs_per_pos 17.0 +574 37 training.batch_size 0.0 +574 38 model.embedding_dim 2.0 +574 38 optimizer.lr 0.006504181862933825 +574 38 negative_sampler.num_negs_per_pos 54.0 +574 38 training.batch_size 0.0 +574 39 model.embedding_dim 2.0 +574 39 optimizer.lr 0.001243522276035183 +574 39 negative_sampler.num_negs_per_pos 97.0 +574 39 training.batch_size 1.0 +574 40 model.embedding_dim 0.0 +574 40 optimizer.lr 0.008161390584833422 +574 40 negative_sampler.num_negs_per_pos 76.0 +574 40 training.batch_size 0.0 +574 41 model.embedding_dim 2.0 +574 41 optimizer.lr 0.0037396865302199867 +574 41 negative_sampler.num_negs_per_pos 98.0 +574 41 training.batch_size 0.0 +574 42 model.embedding_dim 1.0 +574 42 optimizer.lr 0.08368754907908182 +574 42 negative_sampler.num_negs_per_pos 2.0 +574 42 training.batch_size 0.0 +574 43 model.embedding_dim 2.0 +574 43 optimizer.lr 0.0027548404813102496 +574 43 negative_sampler.num_negs_per_pos 8.0 +574 43 training.batch_size 1.0 +574 44 model.embedding_dim 1.0 +574 44 optimizer.lr 0.031127824450012727 +574 44 negative_sampler.num_negs_per_pos 13.0 +574 44 training.batch_size 0.0 +574 45 model.embedding_dim 2.0 +574 45 optimizer.lr 0.004785698820092659 +574 45 negative_sampler.num_negs_per_pos 54.0 +574 45 training.batch_size 0.0 +574 46 model.embedding_dim 2.0 +574 46 optimizer.lr 0.0308125464197466 +574 46 negative_sampler.num_negs_per_pos 9.0 +574 46 training.batch_size 1.0 +574 47 model.embedding_dim 0.0 +574 47 optimizer.lr 0.0029031541414023107 +574 47 negative_sampler.num_negs_per_pos 92.0 +574 47 training.batch_size 1.0 +574 48 model.embedding_dim 2.0 +574 48 optimizer.lr 0.0015142134828711085 +574 48 negative_sampler.num_negs_per_pos 76.0 +574 48 training.batch_size 2.0 +574 49 model.embedding_dim 2.0 +574 49 optimizer.lr 0.022387808028084617 +574 49 negative_sampler.num_negs_per_pos 97.0 +574 49 training.batch_size 2.0 +574 50 model.embedding_dim 0.0 +574 50 optimizer.lr 0.0026674580019916314 +574 50 negative_sampler.num_negs_per_pos 0.0 +574 50 training.batch_size 2.0 +574 1 dataset """fb15k237""" +574 1 model """rotate""" +574 1 loss """softplus""" +574 1 regularizer """no""" +574 1 optimizer """adam""" +574 1 training_loop """owa""" +574 1 negative_sampler """basic""" +574 1 evaluator """rankbased""" +574 2 dataset """fb15k237""" +574 2 model """rotate""" +574 2 loss """softplus""" +574 2 regularizer """no""" +574 2 optimizer """adam""" +574 2 training_loop """owa""" +574 2 negative_sampler """basic""" +574 2 evaluator """rankbased""" +574 3 dataset """fb15k237""" +574 3 model """rotate""" +574 3 loss """softplus""" +574 3 regularizer """no""" +574 3 optimizer """adam""" +574 3 training_loop """owa""" +574 3 negative_sampler """basic""" +574 3 evaluator """rankbased""" +574 4 dataset """fb15k237""" +574 4 model """rotate""" +574 4 loss """softplus""" +574 4 regularizer """no""" +574 4 optimizer """adam""" +574 4 training_loop """owa""" +574 4 negative_sampler """basic""" +574 4 evaluator """rankbased""" +574 5 dataset """fb15k237""" +574 5 model """rotate""" +574 5 loss """softplus""" +574 5 regularizer """no""" +574 5 optimizer """adam""" +574 5 training_loop """owa""" +574 5 negative_sampler """basic""" +574 5 evaluator """rankbased""" +574 6 dataset """fb15k237""" +574 6 model """rotate""" +574 6 loss """softplus""" +574 6 regularizer """no""" +574 6 optimizer """adam""" +574 6 training_loop """owa""" +574 6 negative_sampler """basic""" +574 6 evaluator """rankbased""" +574 7 dataset """fb15k237""" +574 7 model """rotate""" +574 7 loss """softplus""" +574 7 regularizer """no""" +574 7 optimizer """adam""" +574 7 training_loop """owa""" +574 7 negative_sampler """basic""" +574 7 evaluator """rankbased""" +574 8 dataset """fb15k237""" +574 8 model """rotate""" +574 8 loss """softplus""" +574 8 regularizer """no""" +574 8 optimizer """adam""" +574 8 training_loop """owa""" +574 8 negative_sampler """basic""" +574 8 evaluator """rankbased""" +574 9 dataset """fb15k237""" +574 9 model """rotate""" +574 9 loss """softplus""" +574 9 regularizer """no""" +574 9 optimizer """adam""" +574 9 training_loop """owa""" +574 9 negative_sampler """basic""" +574 9 evaluator """rankbased""" +574 10 dataset """fb15k237""" +574 10 model """rotate""" +574 10 loss """softplus""" +574 10 regularizer """no""" +574 10 optimizer """adam""" +574 10 training_loop """owa""" +574 10 negative_sampler """basic""" +574 10 evaluator """rankbased""" +574 11 dataset """fb15k237""" +574 11 model """rotate""" +574 11 loss """softplus""" +574 11 regularizer """no""" +574 11 optimizer """adam""" +574 11 training_loop """owa""" +574 11 negative_sampler """basic""" +574 11 evaluator """rankbased""" +574 12 dataset """fb15k237""" +574 12 model """rotate""" +574 12 loss """softplus""" +574 12 regularizer """no""" +574 12 optimizer """adam""" +574 12 training_loop """owa""" +574 12 negative_sampler """basic""" +574 12 evaluator """rankbased""" +574 13 dataset """fb15k237""" +574 13 model """rotate""" +574 13 loss """softplus""" +574 13 regularizer """no""" +574 13 optimizer """adam""" +574 13 training_loop """owa""" +574 13 negative_sampler """basic""" +574 13 evaluator """rankbased""" +574 14 dataset """fb15k237""" +574 14 model """rotate""" +574 14 loss """softplus""" +574 14 regularizer """no""" +574 14 optimizer """adam""" +574 14 training_loop """owa""" +574 14 negative_sampler """basic""" +574 14 evaluator """rankbased""" +574 15 dataset """fb15k237""" +574 15 model """rotate""" +574 15 loss """softplus""" +574 15 regularizer """no""" +574 15 optimizer """adam""" +574 15 training_loop """owa""" +574 15 negative_sampler """basic""" +574 15 evaluator """rankbased""" +574 16 dataset """fb15k237""" +574 16 model """rotate""" +574 16 loss """softplus""" +574 16 regularizer """no""" +574 16 optimizer """adam""" +574 16 training_loop """owa""" +574 16 negative_sampler """basic""" +574 16 evaluator """rankbased""" +574 17 dataset """fb15k237""" +574 17 model """rotate""" +574 17 loss """softplus""" +574 17 regularizer """no""" +574 17 optimizer """adam""" +574 17 training_loop """owa""" +574 17 negative_sampler """basic""" +574 17 evaluator """rankbased""" +574 18 dataset """fb15k237""" +574 18 model """rotate""" +574 18 loss """softplus""" +574 18 regularizer """no""" +574 18 optimizer """adam""" +574 18 training_loop """owa""" +574 18 negative_sampler """basic""" +574 18 evaluator """rankbased""" +574 19 dataset """fb15k237""" +574 19 model """rotate""" +574 19 loss """softplus""" +574 19 regularizer """no""" +574 19 optimizer """adam""" +574 19 training_loop """owa""" +574 19 negative_sampler """basic""" +574 19 evaluator """rankbased""" +574 20 dataset """fb15k237""" +574 20 model """rotate""" +574 20 loss """softplus""" +574 20 regularizer """no""" +574 20 optimizer """adam""" +574 20 training_loop """owa""" +574 20 negative_sampler """basic""" +574 20 evaluator """rankbased""" +574 21 dataset """fb15k237""" +574 21 model """rotate""" +574 21 loss """softplus""" +574 21 regularizer """no""" +574 21 optimizer """adam""" +574 21 training_loop """owa""" +574 21 negative_sampler """basic""" +574 21 evaluator """rankbased""" +574 22 dataset """fb15k237""" +574 22 model """rotate""" +574 22 loss """softplus""" +574 22 regularizer """no""" +574 22 optimizer """adam""" +574 22 training_loop """owa""" +574 22 negative_sampler """basic""" +574 22 evaluator """rankbased""" +574 23 dataset """fb15k237""" +574 23 model """rotate""" +574 23 loss """softplus""" +574 23 regularizer """no""" +574 23 optimizer """adam""" +574 23 training_loop """owa""" +574 23 negative_sampler """basic""" +574 23 evaluator """rankbased""" +574 24 dataset """fb15k237""" +574 24 model """rotate""" +574 24 loss """softplus""" +574 24 regularizer """no""" +574 24 optimizer """adam""" +574 24 training_loop """owa""" +574 24 negative_sampler """basic""" +574 24 evaluator """rankbased""" +574 25 dataset """fb15k237""" +574 25 model """rotate""" +574 25 loss """softplus""" +574 25 regularizer """no""" +574 25 optimizer """adam""" +574 25 training_loop """owa""" +574 25 negative_sampler """basic""" +574 25 evaluator """rankbased""" +574 26 dataset """fb15k237""" +574 26 model """rotate""" +574 26 loss """softplus""" +574 26 regularizer """no""" +574 26 optimizer """adam""" +574 26 training_loop """owa""" +574 26 negative_sampler """basic""" +574 26 evaluator """rankbased""" +574 27 dataset """fb15k237""" +574 27 model """rotate""" +574 27 loss """softplus""" +574 27 regularizer """no""" +574 27 optimizer """adam""" +574 27 training_loop """owa""" +574 27 negative_sampler """basic""" +574 27 evaluator """rankbased""" +574 28 dataset """fb15k237""" +574 28 model """rotate""" +574 28 loss """softplus""" +574 28 regularizer """no""" +574 28 optimizer """adam""" +574 28 training_loop """owa""" +574 28 negative_sampler """basic""" +574 28 evaluator """rankbased""" +574 29 dataset """fb15k237""" +574 29 model """rotate""" +574 29 loss """softplus""" +574 29 regularizer """no""" +574 29 optimizer """adam""" +574 29 training_loop """owa""" +574 29 negative_sampler """basic""" +574 29 evaluator """rankbased""" +574 30 dataset """fb15k237""" +574 30 model """rotate""" +574 30 loss """softplus""" +574 30 regularizer """no""" +574 30 optimizer """adam""" +574 30 training_loop """owa""" +574 30 negative_sampler """basic""" +574 30 evaluator """rankbased""" +574 31 dataset """fb15k237""" +574 31 model """rotate""" +574 31 loss """softplus""" +574 31 regularizer """no""" +574 31 optimizer """adam""" +574 31 training_loop """owa""" +574 31 negative_sampler """basic""" +574 31 evaluator """rankbased""" +574 32 dataset """fb15k237""" +574 32 model """rotate""" +574 32 loss """softplus""" +574 32 regularizer """no""" +574 32 optimizer """adam""" +574 32 training_loop """owa""" +574 32 negative_sampler """basic""" +574 32 evaluator """rankbased""" +574 33 dataset """fb15k237""" +574 33 model """rotate""" +574 33 loss """softplus""" +574 33 regularizer """no""" +574 33 optimizer """adam""" +574 33 training_loop """owa""" +574 33 negative_sampler """basic""" +574 33 evaluator """rankbased""" +574 34 dataset """fb15k237""" +574 34 model """rotate""" +574 34 loss """softplus""" +574 34 regularizer """no""" +574 34 optimizer """adam""" +574 34 training_loop """owa""" +574 34 negative_sampler """basic""" +574 34 evaluator """rankbased""" +574 35 dataset """fb15k237""" +574 35 model """rotate""" +574 35 loss """softplus""" +574 35 regularizer """no""" +574 35 optimizer """adam""" +574 35 training_loop """owa""" +574 35 negative_sampler """basic""" +574 35 evaluator """rankbased""" +574 36 dataset """fb15k237""" +574 36 model """rotate""" +574 36 loss """softplus""" +574 36 regularizer """no""" +574 36 optimizer """adam""" +574 36 training_loop """owa""" +574 36 negative_sampler """basic""" +574 36 evaluator """rankbased""" +574 37 dataset """fb15k237""" +574 37 model """rotate""" +574 37 loss """softplus""" +574 37 regularizer """no""" +574 37 optimizer """adam""" +574 37 training_loop """owa""" +574 37 negative_sampler """basic""" +574 37 evaluator """rankbased""" +574 38 dataset """fb15k237""" +574 38 model """rotate""" +574 38 loss """softplus""" +574 38 regularizer """no""" +574 38 optimizer """adam""" +574 38 training_loop """owa""" +574 38 negative_sampler """basic""" +574 38 evaluator """rankbased""" +574 39 dataset """fb15k237""" +574 39 model """rotate""" +574 39 loss """softplus""" +574 39 regularizer """no""" +574 39 optimizer """adam""" +574 39 training_loop """owa""" +574 39 negative_sampler """basic""" +574 39 evaluator """rankbased""" +574 40 dataset """fb15k237""" +574 40 model """rotate""" +574 40 loss """softplus""" +574 40 regularizer """no""" +574 40 optimizer """adam""" +574 40 training_loop """owa""" +574 40 negative_sampler """basic""" +574 40 evaluator """rankbased""" +574 41 dataset """fb15k237""" +574 41 model """rotate""" +574 41 loss """softplus""" +574 41 regularizer """no""" +574 41 optimizer """adam""" +574 41 training_loop """owa""" +574 41 negative_sampler """basic""" +574 41 evaluator """rankbased""" +574 42 dataset """fb15k237""" +574 42 model """rotate""" +574 42 loss """softplus""" +574 42 regularizer """no""" +574 42 optimizer """adam""" +574 42 training_loop """owa""" +574 42 negative_sampler """basic""" +574 42 evaluator """rankbased""" +574 43 dataset """fb15k237""" +574 43 model """rotate""" +574 43 loss """softplus""" +574 43 regularizer """no""" +574 43 optimizer """adam""" +574 43 training_loop """owa""" +574 43 negative_sampler """basic""" +574 43 evaluator """rankbased""" +574 44 dataset """fb15k237""" +574 44 model """rotate""" +574 44 loss """softplus""" +574 44 regularizer """no""" +574 44 optimizer """adam""" +574 44 training_loop """owa""" +574 44 negative_sampler """basic""" +574 44 evaluator """rankbased""" +574 45 dataset """fb15k237""" +574 45 model """rotate""" +574 45 loss """softplus""" +574 45 regularizer """no""" +574 45 optimizer """adam""" +574 45 training_loop """owa""" +574 45 negative_sampler """basic""" +574 45 evaluator """rankbased""" +574 46 dataset """fb15k237""" +574 46 model """rotate""" +574 46 loss """softplus""" +574 46 regularizer """no""" +574 46 optimizer """adam""" +574 46 training_loop """owa""" +574 46 negative_sampler """basic""" +574 46 evaluator """rankbased""" +574 47 dataset """fb15k237""" +574 47 model """rotate""" +574 47 loss """softplus""" +574 47 regularizer """no""" +574 47 optimizer """adam""" +574 47 training_loop """owa""" +574 47 negative_sampler """basic""" +574 47 evaluator """rankbased""" +574 48 dataset """fb15k237""" +574 48 model """rotate""" +574 48 loss """softplus""" +574 48 regularizer """no""" +574 48 optimizer """adam""" +574 48 training_loop """owa""" +574 48 negative_sampler """basic""" +574 48 evaluator """rankbased""" +574 49 dataset """fb15k237""" +574 49 model """rotate""" +574 49 loss """softplus""" +574 49 regularizer """no""" +574 49 optimizer """adam""" +574 49 training_loop """owa""" +574 49 negative_sampler """basic""" +574 49 evaluator """rankbased""" +574 50 dataset """fb15k237""" +574 50 model """rotate""" +574 50 loss """softplus""" +574 50 regularizer """no""" +574 50 optimizer """adam""" +574 50 training_loop """owa""" +574 50 negative_sampler """basic""" +574 50 evaluator """rankbased""" +575 1 model.embedding_dim 1.0 +575 1 loss.margin 14.60435870269219 +575 1 loss.adversarial_temperature 0.16272050269096247 +575 1 optimizer.lr 0.0013880172750040988 +575 1 negative_sampler.num_negs_per_pos 89.0 +575 1 training.batch_size 2.0 +575 2 model.embedding_dim 2.0 +575 2 loss.margin 25.508778865755133 +575 2 loss.adversarial_temperature 0.42311544938267565 +575 2 optimizer.lr 0.011226280549804688 +575 2 negative_sampler.num_negs_per_pos 56.0 +575 2 training.batch_size 1.0 +575 3 model.embedding_dim 0.0 +575 3 loss.margin 23.179447348804203 +575 3 loss.adversarial_temperature 0.3569791142394164 +575 3 optimizer.lr 0.005075023697099996 +575 3 negative_sampler.num_negs_per_pos 71.0 +575 3 training.batch_size 2.0 +575 4 model.embedding_dim 2.0 +575 4 loss.margin 8.204675942885649 +575 4 loss.adversarial_temperature 0.719443546596282 +575 4 optimizer.lr 0.00797637610056539 +575 4 negative_sampler.num_negs_per_pos 15.0 +575 4 training.batch_size 2.0 +575 5 model.embedding_dim 1.0 +575 5 loss.margin 17.270023410867058 +575 5 loss.adversarial_temperature 0.6155506914121551 +575 5 optimizer.lr 0.038298957532171475 +575 5 negative_sampler.num_negs_per_pos 63.0 +575 5 training.batch_size 1.0 +575 6 model.embedding_dim 0.0 +575 6 loss.margin 5.544592445393534 +575 6 loss.adversarial_temperature 0.39308030405464234 +575 6 optimizer.lr 0.015789882781116357 +575 6 negative_sampler.num_negs_per_pos 51.0 +575 6 training.batch_size 1.0 +575 7 model.embedding_dim 1.0 +575 7 loss.margin 7.375364041278877 +575 7 loss.adversarial_temperature 0.3739743561482177 +575 7 optimizer.lr 0.0649272422468758 +575 7 negative_sampler.num_negs_per_pos 46.0 +575 7 training.batch_size 1.0 +575 8 model.embedding_dim 0.0 +575 8 loss.margin 28.655773209606387 +575 8 loss.adversarial_temperature 0.11587867656761204 +575 8 optimizer.lr 0.09833466487733002 +575 8 negative_sampler.num_negs_per_pos 64.0 +575 8 training.batch_size 1.0 +575 9 model.embedding_dim 0.0 +575 9 loss.margin 4.726795448444129 +575 9 loss.adversarial_temperature 0.2401540903536225 +575 9 optimizer.lr 0.01315184853729864 +575 9 negative_sampler.num_negs_per_pos 86.0 +575 9 training.batch_size 2.0 +575 10 model.embedding_dim 1.0 +575 10 loss.margin 20.177604782610793 +575 10 loss.adversarial_temperature 0.9239610817052188 +575 10 optimizer.lr 0.0011045421539895204 +575 10 negative_sampler.num_negs_per_pos 98.0 +575 10 training.batch_size 2.0 +575 11 model.embedding_dim 2.0 +575 11 loss.margin 23.331654468169788 +575 11 loss.adversarial_temperature 0.519240238118605 +575 11 optimizer.lr 0.013853372622136446 +575 11 negative_sampler.num_negs_per_pos 13.0 +575 11 training.batch_size 2.0 +575 12 model.embedding_dim 1.0 +575 12 loss.margin 6.856337656914941 +575 12 loss.adversarial_temperature 0.7824022937854856 +575 12 optimizer.lr 0.011098206816384047 +575 12 negative_sampler.num_negs_per_pos 49.0 +575 12 training.batch_size 0.0 +575 13 model.embedding_dim 2.0 +575 13 loss.margin 15.176566311835394 +575 13 loss.adversarial_temperature 0.4401755801713651 +575 13 optimizer.lr 0.012773933602704084 +575 13 negative_sampler.num_negs_per_pos 79.0 +575 13 training.batch_size 2.0 +575 14 model.embedding_dim 1.0 +575 14 loss.margin 24.352457327500346 +575 14 loss.adversarial_temperature 0.3948576255943086 +575 14 optimizer.lr 0.002353220008177676 +575 14 negative_sampler.num_negs_per_pos 54.0 +575 14 training.batch_size 0.0 +575 15 model.embedding_dim 1.0 +575 15 loss.margin 5.008118104821977 +575 15 loss.adversarial_temperature 0.39334025875545503 +575 15 optimizer.lr 0.055178762269836076 +575 15 negative_sampler.num_negs_per_pos 28.0 +575 15 training.batch_size 1.0 +575 16 model.embedding_dim 1.0 +575 16 loss.margin 21.984612829014086 +575 16 loss.adversarial_temperature 0.5969461423296933 +575 16 optimizer.lr 0.002571934593330688 +575 16 negative_sampler.num_negs_per_pos 59.0 +575 16 training.batch_size 1.0 +575 17 model.embedding_dim 1.0 +575 17 loss.margin 19.61663242012387 +575 17 loss.adversarial_temperature 0.13503417106870672 +575 17 optimizer.lr 0.001912867128452039 +575 17 negative_sampler.num_negs_per_pos 66.0 +575 17 training.batch_size 0.0 +575 18 model.embedding_dim 1.0 +575 18 loss.margin 7.093228775142252 +575 18 loss.adversarial_temperature 0.11868808536945274 +575 18 optimizer.lr 0.028293014721802342 +575 18 negative_sampler.num_negs_per_pos 15.0 +575 18 training.batch_size 0.0 +575 19 model.embedding_dim 1.0 +575 19 loss.margin 23.058764453460252 +575 19 loss.adversarial_temperature 0.5447962317850515 +575 19 optimizer.lr 0.0681935238304474 +575 19 negative_sampler.num_negs_per_pos 38.0 +575 19 training.batch_size 2.0 +575 20 model.embedding_dim 2.0 +575 20 loss.margin 3.1827630119395183 +575 20 loss.adversarial_temperature 0.8207303513644818 +575 20 optimizer.lr 0.018990106324125684 +575 20 negative_sampler.num_negs_per_pos 63.0 +575 20 training.batch_size 2.0 +575 21 model.embedding_dim 2.0 +575 21 loss.margin 25.36983349040499 +575 21 loss.adversarial_temperature 0.6765174136145182 +575 21 optimizer.lr 0.0751897084164841 +575 21 negative_sampler.num_negs_per_pos 94.0 +575 21 training.batch_size 2.0 +575 22 model.embedding_dim 1.0 +575 22 loss.margin 2.977076979307531 +575 22 loss.adversarial_temperature 0.1210484542711486 +575 22 optimizer.lr 0.07887419977788343 +575 22 negative_sampler.num_negs_per_pos 39.0 +575 22 training.batch_size 2.0 +575 23 model.embedding_dim 1.0 +575 23 loss.margin 6.236605033822437 +575 23 loss.adversarial_temperature 0.593300741596044 +575 23 optimizer.lr 0.08941596237296433 +575 23 negative_sampler.num_negs_per_pos 40.0 +575 23 training.batch_size 1.0 +575 24 model.embedding_dim 1.0 +575 24 loss.margin 4.4726507160239946 +575 24 loss.adversarial_temperature 0.6152818004496546 +575 24 optimizer.lr 0.010466388836523576 +575 24 negative_sampler.num_negs_per_pos 63.0 +575 24 training.batch_size 1.0 +575 25 model.embedding_dim 0.0 +575 25 loss.margin 10.190068256838362 +575 25 loss.adversarial_temperature 0.5157660821830442 +575 25 optimizer.lr 0.0041165660775472936 +575 25 negative_sampler.num_negs_per_pos 76.0 +575 25 training.batch_size 1.0 +575 26 model.embedding_dim 0.0 +575 26 loss.margin 1.8196563152214669 +575 26 loss.adversarial_temperature 0.7315571930430076 +575 26 optimizer.lr 0.012409122006390369 +575 26 negative_sampler.num_negs_per_pos 13.0 +575 26 training.batch_size 0.0 +575 27 model.embedding_dim 2.0 +575 27 loss.margin 25.010457022802285 +575 27 loss.adversarial_temperature 0.7717663340753802 +575 27 optimizer.lr 0.02977730452492093 +575 27 negative_sampler.num_negs_per_pos 60.0 +575 27 training.batch_size 1.0 +575 28 model.embedding_dim 2.0 +575 28 loss.margin 25.142974631495687 +575 28 loss.adversarial_temperature 0.5594577507001365 +575 28 optimizer.lr 0.004691214565735788 +575 28 negative_sampler.num_negs_per_pos 39.0 +575 28 training.batch_size 2.0 +575 29 model.embedding_dim 1.0 +575 29 loss.margin 19.852676735610576 +575 29 loss.adversarial_temperature 0.20111868795065121 +575 29 optimizer.lr 0.017352632073727125 +575 29 negative_sampler.num_negs_per_pos 72.0 +575 29 training.batch_size 0.0 +575 1 dataset """fb15k237""" +575 1 model """rotate""" +575 1 loss """nssa""" +575 1 regularizer """no""" +575 1 optimizer """adam""" +575 1 training_loop """owa""" +575 1 negative_sampler """basic""" +575 1 evaluator """rankbased""" +575 2 dataset """fb15k237""" +575 2 model """rotate""" +575 2 loss """nssa""" +575 2 regularizer """no""" +575 2 optimizer """adam""" +575 2 training_loop """owa""" +575 2 negative_sampler """basic""" +575 2 evaluator """rankbased""" +575 3 dataset """fb15k237""" +575 3 model """rotate""" +575 3 loss """nssa""" +575 3 regularizer """no""" +575 3 optimizer """adam""" +575 3 training_loop """owa""" +575 3 negative_sampler """basic""" +575 3 evaluator """rankbased""" +575 4 dataset """fb15k237""" +575 4 model """rotate""" +575 4 loss """nssa""" +575 4 regularizer """no""" +575 4 optimizer """adam""" +575 4 training_loop """owa""" +575 4 negative_sampler """basic""" +575 4 evaluator """rankbased""" +575 5 dataset """fb15k237""" +575 5 model """rotate""" +575 5 loss """nssa""" +575 5 regularizer """no""" +575 5 optimizer """adam""" +575 5 training_loop """owa""" +575 5 negative_sampler """basic""" +575 5 evaluator """rankbased""" +575 6 dataset """fb15k237""" +575 6 model """rotate""" +575 6 loss """nssa""" +575 6 regularizer """no""" +575 6 optimizer """adam""" +575 6 training_loop """owa""" +575 6 negative_sampler """basic""" +575 6 evaluator """rankbased""" +575 7 dataset """fb15k237""" +575 7 model """rotate""" +575 7 loss """nssa""" +575 7 regularizer """no""" +575 7 optimizer """adam""" +575 7 training_loop """owa""" +575 7 negative_sampler """basic""" +575 7 evaluator """rankbased""" +575 8 dataset """fb15k237""" +575 8 model """rotate""" +575 8 loss """nssa""" +575 8 regularizer """no""" +575 8 optimizer """adam""" +575 8 training_loop """owa""" +575 8 negative_sampler """basic""" +575 8 evaluator """rankbased""" +575 9 dataset """fb15k237""" +575 9 model """rotate""" +575 9 loss """nssa""" +575 9 regularizer """no""" +575 9 optimizer """adam""" +575 9 training_loop """owa""" +575 9 negative_sampler """basic""" +575 9 evaluator """rankbased""" +575 10 dataset """fb15k237""" +575 10 model """rotate""" +575 10 loss """nssa""" +575 10 regularizer """no""" +575 10 optimizer """adam""" +575 10 training_loop """owa""" +575 10 negative_sampler """basic""" +575 10 evaluator """rankbased""" +575 11 dataset """fb15k237""" +575 11 model """rotate""" +575 11 loss """nssa""" +575 11 regularizer """no""" +575 11 optimizer """adam""" +575 11 training_loop """owa""" +575 11 negative_sampler """basic""" +575 11 evaluator """rankbased""" +575 12 dataset """fb15k237""" +575 12 model """rotate""" +575 12 loss """nssa""" +575 12 regularizer """no""" +575 12 optimizer """adam""" +575 12 training_loop """owa""" +575 12 negative_sampler """basic""" +575 12 evaluator """rankbased""" +575 13 dataset """fb15k237""" +575 13 model """rotate""" +575 13 loss """nssa""" +575 13 regularizer """no""" +575 13 optimizer """adam""" +575 13 training_loop """owa""" +575 13 negative_sampler """basic""" +575 13 evaluator """rankbased""" +575 14 dataset """fb15k237""" +575 14 model """rotate""" +575 14 loss """nssa""" +575 14 regularizer """no""" +575 14 optimizer """adam""" +575 14 training_loop """owa""" +575 14 negative_sampler """basic""" +575 14 evaluator """rankbased""" +575 15 dataset """fb15k237""" +575 15 model """rotate""" +575 15 loss """nssa""" +575 15 regularizer """no""" +575 15 optimizer """adam""" +575 15 training_loop """owa""" +575 15 negative_sampler """basic""" +575 15 evaluator """rankbased""" +575 16 dataset """fb15k237""" +575 16 model """rotate""" +575 16 loss """nssa""" +575 16 regularizer """no""" +575 16 optimizer """adam""" +575 16 training_loop """owa""" +575 16 negative_sampler """basic""" +575 16 evaluator """rankbased""" +575 17 dataset """fb15k237""" +575 17 model """rotate""" +575 17 loss """nssa""" +575 17 regularizer """no""" +575 17 optimizer """adam""" +575 17 training_loop """owa""" +575 17 negative_sampler """basic""" +575 17 evaluator """rankbased""" +575 18 dataset """fb15k237""" +575 18 model """rotate""" +575 18 loss """nssa""" +575 18 regularizer """no""" +575 18 optimizer """adam""" +575 18 training_loop """owa""" +575 18 negative_sampler """basic""" +575 18 evaluator """rankbased""" +575 19 dataset """fb15k237""" +575 19 model """rotate""" +575 19 loss """nssa""" +575 19 regularizer """no""" +575 19 optimizer """adam""" +575 19 training_loop """owa""" +575 19 negative_sampler """basic""" +575 19 evaluator """rankbased""" +575 20 dataset """fb15k237""" +575 20 model """rotate""" +575 20 loss """nssa""" +575 20 regularizer """no""" +575 20 optimizer """adam""" +575 20 training_loop """owa""" +575 20 negative_sampler """basic""" +575 20 evaluator """rankbased""" +575 21 dataset """fb15k237""" +575 21 model """rotate""" +575 21 loss """nssa""" +575 21 regularizer """no""" +575 21 optimizer """adam""" +575 21 training_loop """owa""" +575 21 negative_sampler """basic""" +575 21 evaluator """rankbased""" +575 22 dataset """fb15k237""" +575 22 model """rotate""" +575 22 loss """nssa""" +575 22 regularizer """no""" +575 22 optimizer """adam""" +575 22 training_loop """owa""" +575 22 negative_sampler """basic""" +575 22 evaluator """rankbased""" +575 23 dataset """fb15k237""" +575 23 model """rotate""" +575 23 loss """nssa""" +575 23 regularizer """no""" +575 23 optimizer """adam""" +575 23 training_loop """owa""" +575 23 negative_sampler """basic""" +575 23 evaluator """rankbased""" +575 24 dataset """fb15k237""" +575 24 model """rotate""" +575 24 loss """nssa""" +575 24 regularizer """no""" +575 24 optimizer """adam""" +575 24 training_loop """owa""" +575 24 negative_sampler """basic""" +575 24 evaluator """rankbased""" +575 25 dataset """fb15k237""" +575 25 model """rotate""" +575 25 loss """nssa""" +575 25 regularizer """no""" +575 25 optimizer """adam""" +575 25 training_loop """owa""" +575 25 negative_sampler """basic""" +575 25 evaluator """rankbased""" +575 26 dataset """fb15k237""" +575 26 model """rotate""" +575 26 loss """nssa""" +575 26 regularizer """no""" +575 26 optimizer """adam""" +575 26 training_loop """owa""" +575 26 negative_sampler """basic""" +575 26 evaluator """rankbased""" +575 27 dataset """fb15k237""" +575 27 model """rotate""" +575 27 loss """nssa""" +575 27 regularizer """no""" +575 27 optimizer """adam""" +575 27 training_loop """owa""" +575 27 negative_sampler """basic""" +575 27 evaluator """rankbased""" +575 28 dataset """fb15k237""" +575 28 model """rotate""" +575 28 loss """nssa""" +575 28 regularizer """no""" +575 28 optimizer """adam""" +575 28 training_loop """owa""" +575 28 negative_sampler """basic""" +575 28 evaluator """rankbased""" +575 29 dataset """fb15k237""" +575 29 model """rotate""" +575 29 loss """nssa""" +575 29 regularizer """no""" +575 29 optimizer """adam""" +575 29 training_loop """owa""" +575 29 negative_sampler """basic""" +575 29 evaluator """rankbased""" +576 1 model.embedding_dim 0.0 +576 1 loss.margin 1.289448639134656 +576 1 loss.adversarial_temperature 0.6539038842406192 +576 1 optimizer.lr 0.02113894943449861 +576 1 negative_sampler.num_negs_per_pos 46.0 +576 1 training.batch_size 1.0 +576 2 model.embedding_dim 2.0 +576 2 loss.margin 21.596129843627427 +576 2 loss.adversarial_temperature 0.2255937889116001 +576 2 optimizer.lr 0.003782288171204389 +576 2 negative_sampler.num_negs_per_pos 11.0 +576 2 training.batch_size 2.0 +576 3 model.embedding_dim 0.0 +576 3 loss.margin 17.87397217608713 +576 3 loss.adversarial_temperature 0.30817615590412034 +576 3 optimizer.lr 0.0994002875944928 +576 3 negative_sampler.num_negs_per_pos 31.0 +576 3 training.batch_size 1.0 +576 4 model.embedding_dim 1.0 +576 4 loss.margin 19.029809280715785 +576 4 loss.adversarial_temperature 0.4756541312672859 +576 4 optimizer.lr 0.0011747681077283834 +576 4 negative_sampler.num_negs_per_pos 35.0 +576 4 training.batch_size 1.0 +576 5 model.embedding_dim 0.0 +576 5 loss.margin 2.5379768625492516 +576 5 loss.adversarial_temperature 0.8181659911683836 +576 5 optimizer.lr 0.01135162866365257 +576 5 negative_sampler.num_negs_per_pos 84.0 +576 5 training.batch_size 2.0 +576 6 model.embedding_dim 2.0 +576 6 loss.margin 17.911810696187683 +576 6 loss.adversarial_temperature 0.964718162654127 +576 6 optimizer.lr 0.008391929985023902 +576 6 negative_sampler.num_negs_per_pos 58.0 +576 6 training.batch_size 1.0 +576 7 model.embedding_dim 0.0 +576 7 loss.margin 25.22744785658999 +576 7 loss.adversarial_temperature 0.13514951126378835 +576 7 optimizer.lr 0.007753333526697488 +576 7 negative_sampler.num_negs_per_pos 6.0 +576 7 training.batch_size 2.0 +576 8 model.embedding_dim 0.0 +576 8 loss.margin 22.475038486262033 +576 8 loss.adversarial_temperature 0.2096123336694777 +576 8 optimizer.lr 0.059932998167778674 +576 8 negative_sampler.num_negs_per_pos 69.0 +576 8 training.batch_size 0.0 +576 9 model.embedding_dim 2.0 +576 9 loss.margin 29.15755969963054 +576 9 loss.adversarial_temperature 0.8154035316965499 +576 9 optimizer.lr 0.001866313647591832 +576 9 negative_sampler.num_negs_per_pos 74.0 +576 9 training.batch_size 2.0 +576 10 model.embedding_dim 1.0 +576 10 loss.margin 23.82149711244835 +576 10 loss.adversarial_temperature 0.5199722899589392 +576 10 optimizer.lr 0.004482125905243541 +576 10 negative_sampler.num_negs_per_pos 89.0 +576 10 training.batch_size 1.0 +576 11 model.embedding_dim 2.0 +576 11 loss.margin 3.3174656684832953 +576 11 loss.adversarial_temperature 0.39357938379635304 +576 11 optimizer.lr 0.001332183326763608 +576 11 negative_sampler.num_negs_per_pos 85.0 +576 11 training.batch_size 0.0 +576 12 model.embedding_dim 0.0 +576 12 loss.margin 25.691488646599183 +576 12 loss.adversarial_temperature 0.1276269051468376 +576 12 optimizer.lr 0.012987139748423943 +576 12 negative_sampler.num_negs_per_pos 16.0 +576 12 training.batch_size 1.0 +576 13 model.embedding_dim 0.0 +576 13 loss.margin 25.153155139220555 +576 13 loss.adversarial_temperature 0.8562472543945177 +576 13 optimizer.lr 0.0028659185867861317 +576 13 negative_sampler.num_negs_per_pos 88.0 +576 13 training.batch_size 2.0 +576 14 model.embedding_dim 0.0 +576 14 loss.margin 17.670144568362687 +576 14 loss.adversarial_temperature 0.9858144677158434 +576 14 optimizer.lr 0.001701859521066941 +576 14 negative_sampler.num_negs_per_pos 86.0 +576 14 training.batch_size 2.0 +576 15 model.embedding_dim 1.0 +576 15 loss.margin 10.353899097976454 +576 15 loss.adversarial_temperature 0.1833466056465089 +576 15 optimizer.lr 0.01780843474337544 +576 15 negative_sampler.num_negs_per_pos 72.0 +576 15 training.batch_size 1.0 +576 16 model.embedding_dim 1.0 +576 16 loss.margin 23.64496514673007 +576 16 loss.adversarial_temperature 0.8009507951891223 +576 16 optimizer.lr 0.0060197373833887225 +576 16 negative_sampler.num_negs_per_pos 32.0 +576 16 training.batch_size 0.0 +576 17 model.embedding_dim 2.0 +576 17 loss.margin 21.238962220866007 +576 17 loss.adversarial_temperature 0.5437812279803999 +576 17 optimizer.lr 0.007628725907488742 +576 17 negative_sampler.num_negs_per_pos 0.0 +576 17 training.batch_size 1.0 +576 18 model.embedding_dim 1.0 +576 18 loss.margin 2.7657508297603597 +576 18 loss.adversarial_temperature 0.7011117264315081 +576 18 optimizer.lr 0.001024668295330529 +576 18 negative_sampler.num_negs_per_pos 60.0 +576 18 training.batch_size 0.0 +576 19 model.embedding_dim 0.0 +576 19 loss.margin 16.988960076803135 +576 19 loss.adversarial_temperature 0.7366885401881685 +576 19 optimizer.lr 0.010370927628764081 +576 19 negative_sampler.num_negs_per_pos 22.0 +576 19 training.batch_size 0.0 +576 20 model.embedding_dim 2.0 +576 20 loss.margin 16.35575405205966 +576 20 loss.adversarial_temperature 0.15238908435067952 +576 20 optimizer.lr 0.06641368283872356 +576 20 negative_sampler.num_negs_per_pos 69.0 +576 20 training.batch_size 0.0 +576 21 model.embedding_dim 2.0 +576 21 loss.margin 4.699530454649568 +576 21 loss.adversarial_temperature 0.9994219563806148 +576 21 optimizer.lr 0.03648988487396674 +576 21 negative_sampler.num_negs_per_pos 95.0 +576 21 training.batch_size 1.0 +576 22 model.embedding_dim 2.0 +576 22 loss.margin 28.72664607790055 +576 22 loss.adversarial_temperature 0.49716733909404787 +576 22 optimizer.lr 0.0017106230957739475 +576 22 negative_sampler.num_negs_per_pos 89.0 +576 22 training.batch_size 2.0 +576 23 model.embedding_dim 2.0 +576 23 loss.margin 2.1880107538626237 +576 23 loss.adversarial_temperature 0.5909741606845038 +576 23 optimizer.lr 0.0011035078739176757 +576 23 negative_sampler.num_negs_per_pos 87.0 +576 23 training.batch_size 2.0 +576 24 model.embedding_dim 0.0 +576 24 loss.margin 7.7201635216563815 +576 24 loss.adversarial_temperature 0.7665674330065427 +576 24 optimizer.lr 0.0695799662133248 +576 24 negative_sampler.num_negs_per_pos 1.0 +576 24 training.batch_size 1.0 +576 25 model.embedding_dim 0.0 +576 25 loss.margin 29.907524771283295 +576 25 loss.adversarial_temperature 0.45749942443892644 +576 25 optimizer.lr 0.012481242674633797 +576 25 negative_sampler.num_negs_per_pos 77.0 +576 25 training.batch_size 1.0 +576 26 model.embedding_dim 1.0 +576 26 loss.margin 8.128554804456982 +576 26 loss.adversarial_temperature 0.6286364863249172 +576 26 optimizer.lr 0.007538531207059488 +576 26 negative_sampler.num_negs_per_pos 64.0 +576 26 training.batch_size 0.0 +576 27 model.embedding_dim 2.0 +576 27 loss.margin 16.48043354283725 +576 27 loss.adversarial_temperature 0.1614090281444737 +576 27 optimizer.lr 0.003888338076576516 +576 27 negative_sampler.num_negs_per_pos 9.0 +576 27 training.batch_size 0.0 +576 28 model.embedding_dim 2.0 +576 28 loss.margin 7.199075849617133 +576 28 loss.adversarial_temperature 0.5268644568403036 +576 28 optimizer.lr 0.007068801865014043 +576 28 negative_sampler.num_negs_per_pos 75.0 +576 28 training.batch_size 0.0 +576 29 model.embedding_dim 2.0 +576 29 loss.margin 10.41346809057903 +576 29 loss.adversarial_temperature 0.10276601981269912 +576 29 optimizer.lr 0.02447265980639368 +576 29 negative_sampler.num_negs_per_pos 35.0 +576 29 training.batch_size 1.0 +576 30 model.embedding_dim 1.0 +576 30 loss.margin 25.7747601475985 +576 30 loss.adversarial_temperature 0.18532480885565747 +576 30 optimizer.lr 0.011820496331079275 +576 30 negative_sampler.num_negs_per_pos 83.0 +576 30 training.batch_size 1.0 +576 31 model.embedding_dim 0.0 +576 31 loss.margin 15.525947882044619 +576 31 loss.adversarial_temperature 0.4825311131120348 +576 31 optimizer.lr 0.002120364598943091 +576 31 negative_sampler.num_negs_per_pos 11.0 +576 31 training.batch_size 0.0 +576 32 model.embedding_dim 0.0 +576 32 loss.margin 12.144479757023618 +576 32 loss.adversarial_temperature 0.19685876865371782 +576 32 optimizer.lr 0.012383096111109448 +576 32 negative_sampler.num_negs_per_pos 21.0 +576 32 training.batch_size 2.0 +576 33 model.embedding_dim 0.0 +576 33 loss.margin 7.542348167186814 +576 33 loss.adversarial_temperature 0.6661283691345343 +576 33 optimizer.lr 0.004957378625294025 +576 33 negative_sampler.num_negs_per_pos 55.0 +576 33 training.batch_size 2.0 +576 34 model.embedding_dim 2.0 +576 34 loss.margin 24.786304683402886 +576 34 loss.adversarial_temperature 0.39789257891183877 +576 34 optimizer.lr 0.009886117912580256 +576 34 negative_sampler.num_negs_per_pos 99.0 +576 34 training.batch_size 2.0 +576 35 model.embedding_dim 1.0 +576 35 loss.margin 9.296706559303804 +576 35 loss.adversarial_temperature 0.9748892289429548 +576 35 optimizer.lr 0.011872643995705107 +576 35 negative_sampler.num_negs_per_pos 5.0 +576 35 training.batch_size 2.0 +576 36 model.embedding_dim 2.0 +576 36 loss.margin 22.959751393063474 +576 36 loss.adversarial_temperature 0.30783490353054044 +576 36 optimizer.lr 0.04907492087067384 +576 36 negative_sampler.num_negs_per_pos 79.0 +576 36 training.batch_size 2.0 +576 37 model.embedding_dim 0.0 +576 37 loss.margin 29.292661164952698 +576 37 loss.adversarial_temperature 0.14804305615610547 +576 37 optimizer.lr 0.06907337636678308 +576 37 negative_sampler.num_negs_per_pos 22.0 +576 37 training.batch_size 0.0 +576 38 model.embedding_dim 0.0 +576 38 loss.margin 19.33862152144451 +576 38 loss.adversarial_temperature 0.4760750666518396 +576 38 optimizer.lr 0.0013623942624659015 +576 38 negative_sampler.num_negs_per_pos 47.0 +576 38 training.batch_size 1.0 +576 39 model.embedding_dim 2.0 +576 39 loss.margin 7.674622731864604 +576 39 loss.adversarial_temperature 0.1818621821321489 +576 39 optimizer.lr 0.005996625621530835 +576 39 negative_sampler.num_negs_per_pos 76.0 +576 39 training.batch_size 0.0 +576 1 dataset """fb15k237""" +576 1 model """rotate""" +576 1 loss """nssa""" +576 1 regularizer """no""" +576 1 optimizer """adam""" +576 1 training_loop """owa""" +576 1 negative_sampler """basic""" +576 1 evaluator """rankbased""" +576 2 dataset """fb15k237""" +576 2 model """rotate""" +576 2 loss """nssa""" +576 2 regularizer """no""" +576 2 optimizer """adam""" +576 2 training_loop """owa""" +576 2 negative_sampler """basic""" +576 2 evaluator """rankbased""" +576 3 dataset """fb15k237""" +576 3 model """rotate""" +576 3 loss """nssa""" +576 3 regularizer """no""" +576 3 optimizer """adam""" +576 3 training_loop """owa""" +576 3 negative_sampler """basic""" +576 3 evaluator """rankbased""" +576 4 dataset """fb15k237""" +576 4 model """rotate""" +576 4 loss """nssa""" +576 4 regularizer """no""" +576 4 optimizer """adam""" +576 4 training_loop """owa""" +576 4 negative_sampler """basic""" +576 4 evaluator """rankbased""" +576 5 dataset """fb15k237""" +576 5 model """rotate""" +576 5 loss """nssa""" +576 5 regularizer """no""" +576 5 optimizer """adam""" +576 5 training_loop """owa""" +576 5 negative_sampler """basic""" +576 5 evaluator """rankbased""" +576 6 dataset """fb15k237""" +576 6 model """rotate""" +576 6 loss """nssa""" +576 6 regularizer """no""" +576 6 optimizer """adam""" +576 6 training_loop """owa""" +576 6 negative_sampler """basic""" +576 6 evaluator """rankbased""" +576 7 dataset """fb15k237""" +576 7 model """rotate""" +576 7 loss """nssa""" +576 7 regularizer """no""" +576 7 optimizer """adam""" +576 7 training_loop """owa""" +576 7 negative_sampler """basic""" +576 7 evaluator """rankbased""" +576 8 dataset """fb15k237""" +576 8 model """rotate""" +576 8 loss """nssa""" +576 8 regularizer """no""" +576 8 optimizer """adam""" +576 8 training_loop """owa""" +576 8 negative_sampler """basic""" +576 8 evaluator """rankbased""" +576 9 dataset """fb15k237""" +576 9 model """rotate""" +576 9 loss """nssa""" +576 9 regularizer """no""" +576 9 optimizer """adam""" +576 9 training_loop """owa""" +576 9 negative_sampler """basic""" +576 9 evaluator """rankbased""" +576 10 dataset """fb15k237""" +576 10 model """rotate""" +576 10 loss """nssa""" +576 10 regularizer """no""" +576 10 optimizer """adam""" +576 10 training_loop """owa""" +576 10 negative_sampler """basic""" +576 10 evaluator """rankbased""" +576 11 dataset """fb15k237""" +576 11 model """rotate""" +576 11 loss """nssa""" +576 11 regularizer """no""" +576 11 optimizer """adam""" +576 11 training_loop """owa""" +576 11 negative_sampler """basic""" +576 11 evaluator """rankbased""" +576 12 dataset """fb15k237""" +576 12 model """rotate""" +576 12 loss """nssa""" +576 12 regularizer """no""" +576 12 optimizer """adam""" +576 12 training_loop """owa""" +576 12 negative_sampler """basic""" +576 12 evaluator """rankbased""" +576 13 dataset """fb15k237""" +576 13 model """rotate""" +576 13 loss """nssa""" +576 13 regularizer """no""" +576 13 optimizer """adam""" +576 13 training_loop """owa""" +576 13 negative_sampler """basic""" +576 13 evaluator """rankbased""" +576 14 dataset """fb15k237""" +576 14 model """rotate""" +576 14 loss """nssa""" +576 14 regularizer """no""" +576 14 optimizer """adam""" +576 14 training_loop """owa""" +576 14 negative_sampler """basic""" +576 14 evaluator """rankbased""" +576 15 dataset """fb15k237""" +576 15 model """rotate""" +576 15 loss """nssa""" +576 15 regularizer """no""" +576 15 optimizer """adam""" +576 15 training_loop """owa""" +576 15 negative_sampler """basic""" +576 15 evaluator """rankbased""" +576 16 dataset """fb15k237""" +576 16 model """rotate""" +576 16 loss """nssa""" +576 16 regularizer """no""" +576 16 optimizer """adam""" +576 16 training_loop """owa""" +576 16 negative_sampler """basic""" +576 16 evaluator """rankbased""" +576 17 dataset """fb15k237""" +576 17 model """rotate""" +576 17 loss """nssa""" +576 17 regularizer """no""" +576 17 optimizer """adam""" +576 17 training_loop """owa""" +576 17 negative_sampler """basic""" +576 17 evaluator """rankbased""" +576 18 dataset """fb15k237""" +576 18 model """rotate""" +576 18 loss """nssa""" +576 18 regularizer """no""" +576 18 optimizer """adam""" +576 18 training_loop """owa""" +576 18 negative_sampler """basic""" +576 18 evaluator """rankbased""" +576 19 dataset """fb15k237""" +576 19 model """rotate""" +576 19 loss """nssa""" +576 19 regularizer """no""" +576 19 optimizer """adam""" +576 19 training_loop """owa""" +576 19 negative_sampler """basic""" +576 19 evaluator """rankbased""" +576 20 dataset """fb15k237""" +576 20 model """rotate""" +576 20 loss """nssa""" +576 20 regularizer """no""" +576 20 optimizer """adam""" +576 20 training_loop """owa""" +576 20 negative_sampler """basic""" +576 20 evaluator """rankbased""" +576 21 dataset """fb15k237""" +576 21 model """rotate""" +576 21 loss """nssa""" +576 21 regularizer """no""" +576 21 optimizer """adam""" +576 21 training_loop """owa""" +576 21 negative_sampler """basic""" +576 21 evaluator """rankbased""" +576 22 dataset """fb15k237""" +576 22 model """rotate""" +576 22 loss """nssa""" +576 22 regularizer """no""" +576 22 optimizer """adam""" +576 22 training_loop """owa""" +576 22 negative_sampler """basic""" +576 22 evaluator """rankbased""" +576 23 dataset """fb15k237""" +576 23 model """rotate""" +576 23 loss """nssa""" +576 23 regularizer """no""" +576 23 optimizer """adam""" +576 23 training_loop """owa""" +576 23 negative_sampler """basic""" +576 23 evaluator """rankbased""" +576 24 dataset """fb15k237""" +576 24 model """rotate""" +576 24 loss """nssa""" +576 24 regularizer """no""" +576 24 optimizer """adam""" +576 24 training_loop """owa""" +576 24 negative_sampler """basic""" +576 24 evaluator """rankbased""" +576 25 dataset """fb15k237""" +576 25 model """rotate""" +576 25 loss """nssa""" +576 25 regularizer """no""" +576 25 optimizer """adam""" +576 25 training_loop """owa""" +576 25 negative_sampler """basic""" +576 25 evaluator """rankbased""" +576 26 dataset """fb15k237""" +576 26 model """rotate""" +576 26 loss """nssa""" +576 26 regularizer """no""" +576 26 optimizer """adam""" +576 26 training_loop """owa""" +576 26 negative_sampler """basic""" +576 26 evaluator """rankbased""" +576 27 dataset """fb15k237""" +576 27 model """rotate""" +576 27 loss """nssa""" +576 27 regularizer """no""" +576 27 optimizer """adam""" +576 27 training_loop """owa""" +576 27 negative_sampler """basic""" +576 27 evaluator """rankbased""" +576 28 dataset """fb15k237""" +576 28 model """rotate""" +576 28 loss """nssa""" +576 28 regularizer """no""" +576 28 optimizer """adam""" +576 28 training_loop """owa""" +576 28 negative_sampler """basic""" +576 28 evaluator """rankbased""" +576 29 dataset """fb15k237""" +576 29 model """rotate""" +576 29 loss """nssa""" +576 29 regularizer """no""" +576 29 optimizer """adam""" +576 29 training_loop """owa""" +576 29 negative_sampler """basic""" +576 29 evaluator """rankbased""" +576 30 dataset """fb15k237""" +576 30 model """rotate""" +576 30 loss """nssa""" +576 30 regularizer """no""" +576 30 optimizer """adam""" +576 30 training_loop """owa""" +576 30 negative_sampler """basic""" +576 30 evaluator """rankbased""" +576 31 dataset """fb15k237""" +576 31 model """rotate""" +576 31 loss """nssa""" +576 31 regularizer """no""" +576 31 optimizer """adam""" +576 31 training_loop """owa""" +576 31 negative_sampler """basic""" +576 31 evaluator """rankbased""" +576 32 dataset """fb15k237""" +576 32 model """rotate""" +576 32 loss """nssa""" +576 32 regularizer """no""" +576 32 optimizer """adam""" +576 32 training_loop """owa""" +576 32 negative_sampler """basic""" +576 32 evaluator """rankbased""" +576 33 dataset """fb15k237""" +576 33 model """rotate""" +576 33 loss """nssa""" +576 33 regularizer """no""" +576 33 optimizer """adam""" +576 33 training_loop """owa""" +576 33 negative_sampler """basic""" +576 33 evaluator """rankbased""" +576 34 dataset """fb15k237""" +576 34 model """rotate""" +576 34 loss """nssa""" +576 34 regularizer """no""" +576 34 optimizer """adam""" +576 34 training_loop """owa""" +576 34 negative_sampler """basic""" +576 34 evaluator """rankbased""" +576 35 dataset """fb15k237""" +576 35 model """rotate""" +576 35 loss """nssa""" +576 35 regularizer """no""" +576 35 optimizer """adam""" +576 35 training_loop """owa""" +576 35 negative_sampler """basic""" +576 35 evaluator """rankbased""" +576 36 dataset """fb15k237""" +576 36 model """rotate""" +576 36 loss """nssa""" +576 36 regularizer """no""" +576 36 optimizer """adam""" +576 36 training_loop """owa""" +576 36 negative_sampler """basic""" +576 36 evaluator """rankbased""" +576 37 dataset """fb15k237""" +576 37 model """rotate""" +576 37 loss """nssa""" +576 37 regularizer """no""" +576 37 optimizer """adam""" +576 37 training_loop """owa""" +576 37 negative_sampler """basic""" +576 37 evaluator """rankbased""" +576 38 dataset """fb15k237""" +576 38 model """rotate""" +576 38 loss """nssa""" +576 38 regularizer """no""" +576 38 optimizer """adam""" +576 38 training_loop """owa""" +576 38 negative_sampler """basic""" +576 38 evaluator """rankbased""" +576 39 dataset """fb15k237""" +576 39 model """rotate""" +576 39 loss """nssa""" +576 39 regularizer """no""" +576 39 optimizer """adam""" +576 39 training_loop """owa""" +576 39 negative_sampler """basic""" +576 39 evaluator """rankbased""" +577 1 model.embedding_dim 1.0 +577 1 optimizer.lr 0.005728479397093786 +577 1 training.batch_size 1.0 +577 1 training.label_smoothing 0.058496747262221926 +577 2 model.embedding_dim 2.0 +577 2 optimizer.lr 0.08007732089889749 +577 2 training.batch_size 2.0 +577 2 training.label_smoothing 0.4844631480359347 +577 3 model.embedding_dim 1.0 +577 3 optimizer.lr 0.007369688733114629 +577 3 training.batch_size 2.0 +577 3 training.label_smoothing 0.027199440834661043 +577 4 model.embedding_dim 0.0 +577 4 optimizer.lr 0.003147912838278411 +577 4 training.batch_size 1.0 +577 4 training.label_smoothing 0.04291054148875049 +577 5 model.embedding_dim 0.0 +577 5 optimizer.lr 0.0023528803240737894 +577 5 training.batch_size 0.0 +577 5 training.label_smoothing 0.027138367586168752 +577 6 model.embedding_dim 2.0 +577 6 optimizer.lr 0.01624758754905317 +577 6 training.batch_size 2.0 +577 6 training.label_smoothing 0.011020390986296615 +577 7 model.embedding_dim 1.0 +577 7 optimizer.lr 0.08970274226859161 +577 7 training.batch_size 2.0 +577 7 training.label_smoothing 0.7927524574236374 +577 8 model.embedding_dim 2.0 +577 8 optimizer.lr 0.0058834497128998214 +577 8 training.batch_size 0.0 +577 8 training.label_smoothing 0.04375201516467836 +577 1 dataset """fb15k237""" +577 1 model """rotate""" +577 1 loss """bceaftersigmoid""" +577 1 regularizer """no""" +577 1 optimizer """adam""" +577 1 training_loop """lcwa""" +577 1 evaluator """rankbased""" +577 2 dataset """fb15k237""" +577 2 model """rotate""" +577 2 loss """bceaftersigmoid""" +577 2 regularizer """no""" +577 2 optimizer """adam""" +577 2 training_loop """lcwa""" +577 2 evaluator """rankbased""" +577 3 dataset """fb15k237""" +577 3 model """rotate""" +577 3 loss """bceaftersigmoid""" +577 3 regularizer """no""" +577 3 optimizer """adam""" +577 3 training_loop """lcwa""" +577 3 evaluator """rankbased""" +577 4 dataset """fb15k237""" +577 4 model """rotate""" +577 4 loss """bceaftersigmoid""" +577 4 regularizer """no""" +577 4 optimizer """adam""" +577 4 training_loop """lcwa""" +577 4 evaluator """rankbased""" +577 5 dataset """fb15k237""" +577 5 model """rotate""" +577 5 loss """bceaftersigmoid""" +577 5 regularizer """no""" +577 5 optimizer """adam""" +577 5 training_loop """lcwa""" +577 5 evaluator """rankbased""" +577 6 dataset """fb15k237""" +577 6 model """rotate""" +577 6 loss """bceaftersigmoid""" +577 6 regularizer """no""" +577 6 optimizer """adam""" +577 6 training_loop """lcwa""" +577 6 evaluator """rankbased""" +577 7 dataset """fb15k237""" +577 7 model """rotate""" +577 7 loss """bceaftersigmoid""" +577 7 regularizer """no""" +577 7 optimizer """adam""" +577 7 training_loop """lcwa""" +577 7 evaluator """rankbased""" +577 8 dataset """fb15k237""" +577 8 model """rotate""" +577 8 loss """bceaftersigmoid""" +577 8 regularizer """no""" +577 8 optimizer """adam""" +577 8 training_loop """lcwa""" +577 8 evaluator """rankbased""" +578 1 model.embedding_dim 2.0 +578 1 optimizer.lr 0.022871031924390148 +578 1 training.batch_size 2.0 +578 1 training.label_smoothing 0.10082142188349787 +578 2 model.embedding_dim 2.0 +578 2 optimizer.lr 0.001209997712755984 +578 2 training.batch_size 2.0 +578 2 training.label_smoothing 0.02590430496698439 +578 3 model.embedding_dim 2.0 +578 3 optimizer.lr 0.0024854472261513715 +578 3 training.batch_size 0.0 +578 3 training.label_smoothing 0.6917198731868732 +578 4 model.embedding_dim 0.0 +578 4 optimizer.lr 0.0062966943188976976 +578 4 training.batch_size 0.0 +578 4 training.label_smoothing 0.0029475167381470503 +578 5 model.embedding_dim 0.0 +578 5 optimizer.lr 0.01410266203722307 +578 5 training.batch_size 2.0 +578 5 training.label_smoothing 0.3350004091580292 +578 6 model.embedding_dim 2.0 +578 6 optimizer.lr 0.006011835478805642 +578 6 training.batch_size 2.0 +578 6 training.label_smoothing 0.0034061460269631453 +578 1 dataset """fb15k237""" +578 1 model """rotate""" +578 1 loss """softplus""" +578 1 regularizer """no""" +578 1 optimizer """adam""" +578 1 training_loop """lcwa""" +578 1 evaluator """rankbased""" +578 2 dataset """fb15k237""" +578 2 model """rotate""" +578 2 loss """softplus""" +578 2 regularizer """no""" +578 2 optimizer """adam""" +578 2 training_loop """lcwa""" +578 2 evaluator """rankbased""" +578 3 dataset """fb15k237""" +578 3 model """rotate""" +578 3 loss """softplus""" +578 3 regularizer """no""" +578 3 optimizer """adam""" +578 3 training_loop """lcwa""" +578 3 evaluator """rankbased""" +578 4 dataset """fb15k237""" +578 4 model """rotate""" +578 4 loss """softplus""" +578 4 regularizer """no""" +578 4 optimizer """adam""" +578 4 training_loop """lcwa""" +578 4 evaluator """rankbased""" +578 5 dataset """fb15k237""" +578 5 model """rotate""" +578 5 loss """softplus""" +578 5 regularizer """no""" +578 5 optimizer """adam""" +578 5 training_loop """lcwa""" +578 5 evaluator """rankbased""" +578 6 dataset """fb15k237""" +578 6 model """rotate""" +578 6 loss """softplus""" +578 6 regularizer """no""" +578 6 optimizer """adam""" +578 6 training_loop """lcwa""" +578 6 evaluator """rankbased""" +579 1 model.embedding_dim 1.0 +579 1 optimizer.lr 0.0030671127008606025 +579 1 training.batch_size 2.0 +579 1 training.label_smoothing 0.010881854645023234 +579 2 model.embedding_dim 0.0 +579 2 optimizer.lr 0.015830347206627838 +579 2 training.batch_size 1.0 +579 2 training.label_smoothing 0.004404815120355902 +579 3 model.embedding_dim 2.0 +579 3 optimizer.lr 0.0020103790391999593 +579 3 training.batch_size 0.0 +579 3 training.label_smoothing 0.0015929526103623797 +579 4 model.embedding_dim 2.0 +579 4 optimizer.lr 0.002248637641266716 +579 4 training.batch_size 1.0 +579 4 training.label_smoothing 0.0696537145347789 +579 5 model.embedding_dim 0.0 +579 5 optimizer.lr 0.03610003774982238 +579 5 training.batch_size 2.0 +579 5 training.label_smoothing 0.00901728235654063 +579 6 model.embedding_dim 2.0 +579 6 optimizer.lr 0.003896261584273595 +579 6 training.batch_size 0.0 +579 6 training.label_smoothing 0.00291813692547901 +579 7 model.embedding_dim 0.0 +579 7 optimizer.lr 0.0015363067970008072 +579 7 training.batch_size 1.0 +579 7 training.label_smoothing 0.007483719565997809 +579 8 model.embedding_dim 2.0 +579 8 optimizer.lr 0.028774834883759252 +579 8 training.batch_size 0.0 +579 8 training.label_smoothing 0.9229816537681591 +579 9 model.embedding_dim 1.0 +579 9 optimizer.lr 0.0012158017597236296 +579 9 training.batch_size 1.0 +579 9 training.label_smoothing 0.0011355860170004476 +579 10 model.embedding_dim 0.0 +579 10 optimizer.lr 0.0017324920955691884 +579 10 training.batch_size 2.0 +579 10 training.label_smoothing 0.5113661096706325 +579 1 dataset """fb15k237""" +579 1 model """rotate""" +579 1 loss """bceaftersigmoid""" +579 1 regularizer """no""" +579 1 optimizer """adam""" +579 1 training_loop """lcwa""" +579 1 evaluator """rankbased""" +579 2 dataset """fb15k237""" +579 2 model """rotate""" +579 2 loss """bceaftersigmoid""" +579 2 regularizer """no""" +579 2 optimizer """adam""" +579 2 training_loop """lcwa""" +579 2 evaluator """rankbased""" +579 3 dataset """fb15k237""" +579 3 model """rotate""" +579 3 loss """bceaftersigmoid""" +579 3 regularizer """no""" +579 3 optimizer """adam""" +579 3 training_loop """lcwa""" +579 3 evaluator """rankbased""" +579 4 dataset """fb15k237""" +579 4 model """rotate""" +579 4 loss """bceaftersigmoid""" +579 4 regularizer """no""" +579 4 optimizer """adam""" +579 4 training_loop """lcwa""" +579 4 evaluator """rankbased""" +579 5 dataset """fb15k237""" +579 5 model """rotate""" +579 5 loss """bceaftersigmoid""" +579 5 regularizer """no""" +579 5 optimizer """adam""" +579 5 training_loop """lcwa""" +579 5 evaluator """rankbased""" +579 6 dataset """fb15k237""" +579 6 model """rotate""" +579 6 loss """bceaftersigmoid""" +579 6 regularizer """no""" +579 6 optimizer """adam""" +579 6 training_loop """lcwa""" +579 6 evaluator """rankbased""" +579 7 dataset """fb15k237""" +579 7 model """rotate""" +579 7 loss """bceaftersigmoid""" +579 7 regularizer """no""" +579 7 optimizer """adam""" +579 7 training_loop """lcwa""" +579 7 evaluator """rankbased""" +579 8 dataset """fb15k237""" +579 8 model """rotate""" +579 8 loss """bceaftersigmoid""" +579 8 regularizer """no""" +579 8 optimizer """adam""" +579 8 training_loop """lcwa""" +579 8 evaluator """rankbased""" +579 9 dataset """fb15k237""" +579 9 model """rotate""" +579 9 loss """bceaftersigmoid""" +579 9 regularizer """no""" +579 9 optimizer """adam""" +579 9 training_loop """lcwa""" +579 9 evaluator """rankbased""" +579 10 dataset """fb15k237""" +579 10 model """rotate""" +579 10 loss """bceaftersigmoid""" +579 10 regularizer """no""" +579 10 optimizer """adam""" +579 10 training_loop """lcwa""" +579 10 evaluator """rankbased""" +580 1 model.embedding_dim 1.0 +580 1 optimizer.lr 0.015716512395046265 +580 1 training.batch_size 2.0 +580 1 training.label_smoothing 0.870081622066777 +580 2 model.embedding_dim 1.0 +580 2 optimizer.lr 0.0036306089295137524 +580 2 training.batch_size 0.0 +580 2 training.label_smoothing 0.019528374661378762 +580 3 model.embedding_dim 1.0 +580 3 optimizer.lr 0.04984034145474348 +580 3 training.batch_size 0.0 +580 3 training.label_smoothing 0.14893434322534876 +580 4 model.embedding_dim 1.0 +580 4 optimizer.lr 0.07058060437347637 +580 4 training.batch_size 1.0 +580 4 training.label_smoothing 0.19039877042376646 +580 5 model.embedding_dim 1.0 +580 5 optimizer.lr 0.0071601063019005595 +580 5 training.batch_size 0.0 +580 5 training.label_smoothing 0.6253728141782641 +580 6 model.embedding_dim 0.0 +580 6 optimizer.lr 0.014819218437087951 +580 6 training.batch_size 2.0 +580 6 training.label_smoothing 0.9965616118652951 +580 7 model.embedding_dim 2.0 +580 7 optimizer.lr 0.00833034940529255 +580 7 training.batch_size 2.0 +580 7 training.label_smoothing 0.08692417293013581 +580 8 model.embedding_dim 1.0 +580 8 optimizer.lr 0.011414058107378614 +580 8 training.batch_size 2.0 +580 8 training.label_smoothing 0.009264195066758784 +580 9 model.embedding_dim 2.0 +580 9 optimizer.lr 0.00794391905891209 +580 9 training.batch_size 0.0 +580 9 training.label_smoothing 0.004751228330200137 +580 10 model.embedding_dim 1.0 +580 10 optimizer.lr 0.015421779149474632 +580 10 training.batch_size 2.0 +580 10 training.label_smoothing 0.08101498196579196 +580 11 model.embedding_dim 1.0 +580 11 optimizer.lr 0.07760202535508973 +580 11 training.batch_size 2.0 +580 11 training.label_smoothing 0.0026280100671065287 +580 12 model.embedding_dim 2.0 +580 12 optimizer.lr 0.006427405665867997 +580 12 training.batch_size 0.0 +580 12 training.label_smoothing 0.010943211259810686 +580 13 model.embedding_dim 1.0 +580 13 optimizer.lr 0.0870230262553966 +580 13 training.batch_size 1.0 +580 13 training.label_smoothing 0.46236426379534656 +580 14 model.embedding_dim 0.0 +580 14 optimizer.lr 0.00846356299548283 +580 14 training.batch_size 0.0 +580 14 training.label_smoothing 0.6863355474740488 +580 1 dataset """fb15k237""" +580 1 model """rotate""" +580 1 loss """softplus""" +580 1 regularizer """no""" +580 1 optimizer """adam""" +580 1 training_loop """lcwa""" +580 1 evaluator """rankbased""" +580 2 dataset """fb15k237""" +580 2 model """rotate""" +580 2 loss """softplus""" +580 2 regularizer """no""" +580 2 optimizer """adam""" +580 2 training_loop """lcwa""" +580 2 evaluator """rankbased""" +580 3 dataset """fb15k237""" +580 3 model """rotate""" +580 3 loss """softplus""" +580 3 regularizer """no""" +580 3 optimizer """adam""" +580 3 training_loop """lcwa""" +580 3 evaluator """rankbased""" +580 4 dataset """fb15k237""" +580 4 model """rotate""" +580 4 loss """softplus""" +580 4 regularizer """no""" +580 4 optimizer """adam""" +580 4 training_loop """lcwa""" +580 4 evaluator """rankbased""" +580 5 dataset """fb15k237""" +580 5 model """rotate""" +580 5 loss """softplus""" +580 5 regularizer """no""" +580 5 optimizer """adam""" +580 5 training_loop """lcwa""" +580 5 evaluator """rankbased""" +580 6 dataset """fb15k237""" +580 6 model """rotate""" +580 6 loss """softplus""" +580 6 regularizer """no""" +580 6 optimizer """adam""" +580 6 training_loop """lcwa""" +580 6 evaluator """rankbased""" +580 7 dataset """fb15k237""" +580 7 model """rotate""" +580 7 loss """softplus""" +580 7 regularizer """no""" +580 7 optimizer """adam""" +580 7 training_loop """lcwa""" +580 7 evaluator """rankbased""" +580 8 dataset """fb15k237""" +580 8 model """rotate""" +580 8 loss """softplus""" +580 8 regularizer """no""" +580 8 optimizer """adam""" +580 8 training_loop """lcwa""" +580 8 evaluator """rankbased""" +580 9 dataset """fb15k237""" +580 9 model """rotate""" +580 9 loss """softplus""" +580 9 regularizer """no""" +580 9 optimizer """adam""" +580 9 training_loop """lcwa""" +580 9 evaluator """rankbased""" +580 10 dataset """fb15k237""" +580 10 model """rotate""" +580 10 loss """softplus""" +580 10 regularizer """no""" +580 10 optimizer """adam""" +580 10 training_loop """lcwa""" +580 10 evaluator """rankbased""" +580 11 dataset """fb15k237""" +580 11 model """rotate""" +580 11 loss """softplus""" +580 11 regularizer """no""" +580 11 optimizer """adam""" +580 11 training_loop """lcwa""" +580 11 evaluator """rankbased""" +580 12 dataset """fb15k237""" +580 12 model """rotate""" +580 12 loss """softplus""" +580 12 regularizer """no""" +580 12 optimizer """adam""" +580 12 training_loop """lcwa""" +580 12 evaluator """rankbased""" +580 13 dataset """fb15k237""" +580 13 model """rotate""" +580 13 loss """softplus""" +580 13 regularizer """no""" +580 13 optimizer """adam""" +580 13 training_loop """lcwa""" +580 13 evaluator """rankbased""" +580 14 dataset """fb15k237""" +580 14 model """rotate""" +580 14 loss """softplus""" +580 14 regularizer """no""" +580 14 optimizer """adam""" +580 14 training_loop """lcwa""" +580 14 evaluator """rankbased""" +581 1 model.embedding_dim 0.0 +581 1 loss.margin 3.2543678593414267 +581 1 optimizer.lr 0.004159936627685061 +581 1 negative_sampler.num_negs_per_pos 88.0 +581 1 training.batch_size 0.0 +581 2 model.embedding_dim 2.0 +581 2 loss.margin 2.7154008149849633 +581 2 optimizer.lr 0.02941192194688735 +581 2 negative_sampler.num_negs_per_pos 85.0 +581 2 training.batch_size 1.0 +581 3 model.embedding_dim 2.0 +581 3 loss.margin 7.084185552916891 +581 3 optimizer.lr 0.09255947145446392 +581 3 negative_sampler.num_negs_per_pos 37.0 +581 3 training.batch_size 0.0 +581 4 model.embedding_dim 0.0 +581 4 loss.margin 9.953054493102103 +581 4 optimizer.lr 0.08329514562533209 +581 4 negative_sampler.num_negs_per_pos 15.0 +581 4 training.batch_size 2.0 +581 5 model.embedding_dim 1.0 +581 5 loss.margin 1.2451906335164333 +581 5 optimizer.lr 0.07453236628119998 +581 5 negative_sampler.num_negs_per_pos 41.0 +581 5 training.batch_size 2.0 +581 6 model.embedding_dim 1.0 +581 6 loss.margin 5.2230205978824715 +581 6 optimizer.lr 0.04568589923241656 +581 6 negative_sampler.num_negs_per_pos 75.0 +581 6 training.batch_size 0.0 +581 7 model.embedding_dim 2.0 +581 7 loss.margin 1.3271081065069896 +581 7 optimizer.lr 0.0034698371039247806 +581 7 negative_sampler.num_negs_per_pos 15.0 +581 7 training.batch_size 1.0 +581 8 model.embedding_dim 2.0 +581 8 loss.margin 9.949017793799971 +581 8 optimizer.lr 0.031031362813028283 +581 8 negative_sampler.num_negs_per_pos 24.0 +581 8 training.batch_size 2.0 +581 9 model.embedding_dim 2.0 +581 9 loss.margin 1.5369698221989077 +581 9 optimizer.lr 0.008859992490058049 +581 9 negative_sampler.num_negs_per_pos 61.0 +581 9 training.batch_size 1.0 +581 10 model.embedding_dim 2.0 +581 10 loss.margin 4.09975424009925 +581 10 optimizer.lr 0.03584111374576894 +581 10 negative_sampler.num_negs_per_pos 35.0 +581 10 training.batch_size 1.0 +581 11 model.embedding_dim 0.0 +581 11 loss.margin 6.88094311226726 +581 11 optimizer.lr 0.003928474941157123 +581 11 negative_sampler.num_negs_per_pos 73.0 +581 11 training.batch_size 1.0 +581 12 model.embedding_dim 2.0 +581 12 loss.margin 7.918432518542456 +581 12 optimizer.lr 0.034669168072604145 +581 12 negative_sampler.num_negs_per_pos 93.0 +581 12 training.batch_size 1.0 +581 13 model.embedding_dim 1.0 +581 13 loss.margin 5.327910230371512 +581 13 optimizer.lr 0.006446162392371466 +581 13 negative_sampler.num_negs_per_pos 63.0 +581 13 training.batch_size 2.0 +581 14 model.embedding_dim 0.0 +581 14 loss.margin 6.335980427062489 +581 14 optimizer.lr 0.08917543235407481 +581 14 negative_sampler.num_negs_per_pos 47.0 +581 14 training.batch_size 1.0 +581 15 model.embedding_dim 0.0 +581 15 loss.margin 5.608721407326515 +581 15 optimizer.lr 0.04998201470363312 +581 15 negative_sampler.num_negs_per_pos 88.0 +581 15 training.batch_size 2.0 +581 16 model.embedding_dim 1.0 +581 16 loss.margin 9.798581127074977 +581 16 optimizer.lr 0.005064986807006995 +581 16 negative_sampler.num_negs_per_pos 94.0 +581 16 training.batch_size 0.0 +581 17 model.embedding_dim 1.0 +581 17 loss.margin 7.325104897853047 +581 17 optimizer.lr 0.012579464389086692 +581 17 negative_sampler.num_negs_per_pos 24.0 +581 17 training.batch_size 2.0 +581 18 model.embedding_dim 0.0 +581 18 loss.margin 9.573787738158916 +581 18 optimizer.lr 0.014696029751359453 +581 18 negative_sampler.num_negs_per_pos 29.0 +581 18 training.batch_size 1.0 +581 19 model.embedding_dim 0.0 +581 19 loss.margin 4.38880416705791 +581 19 optimizer.lr 0.03917675602314012 +581 19 negative_sampler.num_negs_per_pos 71.0 +581 19 training.batch_size 0.0 +581 1 dataset """fb15k237""" +581 1 model """rotate""" +581 1 loss """marginranking""" +581 1 regularizer """no""" +581 1 optimizer """adam""" +581 1 training_loop """owa""" +581 1 negative_sampler """basic""" +581 1 evaluator """rankbased""" +581 2 dataset """fb15k237""" +581 2 model """rotate""" +581 2 loss """marginranking""" +581 2 regularizer """no""" +581 2 optimizer """adam""" +581 2 training_loop """owa""" +581 2 negative_sampler """basic""" +581 2 evaluator """rankbased""" +581 3 dataset """fb15k237""" +581 3 model """rotate""" +581 3 loss """marginranking""" +581 3 regularizer """no""" +581 3 optimizer """adam""" +581 3 training_loop """owa""" +581 3 negative_sampler """basic""" +581 3 evaluator """rankbased""" +581 4 dataset """fb15k237""" +581 4 model """rotate""" +581 4 loss """marginranking""" +581 4 regularizer """no""" +581 4 optimizer """adam""" +581 4 training_loop """owa""" +581 4 negative_sampler """basic""" +581 4 evaluator """rankbased""" +581 5 dataset """fb15k237""" +581 5 model """rotate""" +581 5 loss """marginranking""" +581 5 regularizer """no""" +581 5 optimizer """adam""" +581 5 training_loop """owa""" +581 5 negative_sampler """basic""" +581 5 evaluator """rankbased""" +581 6 dataset """fb15k237""" +581 6 model """rotate""" +581 6 loss """marginranking""" +581 6 regularizer """no""" +581 6 optimizer """adam""" +581 6 training_loop """owa""" +581 6 negative_sampler """basic""" +581 6 evaluator """rankbased""" +581 7 dataset """fb15k237""" +581 7 model """rotate""" +581 7 loss """marginranking""" +581 7 regularizer """no""" +581 7 optimizer """adam""" +581 7 training_loop """owa""" +581 7 negative_sampler """basic""" +581 7 evaluator """rankbased""" +581 8 dataset """fb15k237""" +581 8 model """rotate""" +581 8 loss """marginranking""" +581 8 regularizer """no""" +581 8 optimizer """adam""" +581 8 training_loop """owa""" +581 8 negative_sampler """basic""" +581 8 evaluator """rankbased""" +581 9 dataset """fb15k237""" +581 9 model """rotate""" +581 9 loss """marginranking""" +581 9 regularizer """no""" +581 9 optimizer """adam""" +581 9 training_loop """owa""" +581 9 negative_sampler """basic""" +581 9 evaluator """rankbased""" +581 10 dataset """fb15k237""" +581 10 model """rotate""" +581 10 loss """marginranking""" +581 10 regularizer """no""" +581 10 optimizer """adam""" +581 10 training_loop """owa""" +581 10 negative_sampler """basic""" +581 10 evaluator """rankbased""" +581 11 dataset """fb15k237""" +581 11 model """rotate""" +581 11 loss """marginranking""" +581 11 regularizer """no""" +581 11 optimizer """adam""" +581 11 training_loop """owa""" +581 11 negative_sampler """basic""" +581 11 evaluator """rankbased""" +581 12 dataset """fb15k237""" +581 12 model """rotate""" +581 12 loss """marginranking""" +581 12 regularizer """no""" +581 12 optimizer """adam""" +581 12 training_loop """owa""" +581 12 negative_sampler """basic""" +581 12 evaluator """rankbased""" +581 13 dataset """fb15k237""" +581 13 model """rotate""" +581 13 loss """marginranking""" +581 13 regularizer """no""" +581 13 optimizer """adam""" +581 13 training_loop """owa""" +581 13 negative_sampler """basic""" +581 13 evaluator """rankbased""" +581 14 dataset """fb15k237""" +581 14 model """rotate""" +581 14 loss """marginranking""" +581 14 regularizer """no""" +581 14 optimizer """adam""" +581 14 training_loop """owa""" +581 14 negative_sampler """basic""" +581 14 evaluator """rankbased""" +581 15 dataset """fb15k237""" +581 15 model """rotate""" +581 15 loss """marginranking""" +581 15 regularizer """no""" +581 15 optimizer """adam""" +581 15 training_loop """owa""" +581 15 negative_sampler """basic""" +581 15 evaluator """rankbased""" +581 16 dataset """fb15k237""" +581 16 model """rotate""" +581 16 loss """marginranking""" +581 16 regularizer """no""" +581 16 optimizer """adam""" +581 16 training_loop """owa""" +581 16 negative_sampler """basic""" +581 16 evaluator """rankbased""" +581 17 dataset """fb15k237""" +581 17 model """rotate""" +581 17 loss """marginranking""" +581 17 regularizer """no""" +581 17 optimizer """adam""" +581 17 training_loop """owa""" +581 17 negative_sampler """basic""" +581 17 evaluator """rankbased""" +581 18 dataset """fb15k237""" +581 18 model """rotate""" +581 18 loss """marginranking""" +581 18 regularizer """no""" +581 18 optimizer """adam""" +581 18 training_loop """owa""" +581 18 negative_sampler """basic""" +581 18 evaluator """rankbased""" +581 19 dataset """fb15k237""" +581 19 model """rotate""" +581 19 loss """marginranking""" +581 19 regularizer """no""" +581 19 optimizer """adam""" +581 19 training_loop """owa""" +581 19 negative_sampler """basic""" +581 19 evaluator """rankbased""" +582 1 model.embedding_dim 1.0 +582 1 loss.margin 6.669891391468208 +582 1 optimizer.lr 0.07832877033395343 +582 1 negative_sampler.num_negs_per_pos 80.0 +582 1 training.batch_size 0.0 +582 2 model.embedding_dim 1.0 +582 2 loss.margin 7.628977490243306 +582 2 optimizer.lr 0.015174251904309885 +582 2 negative_sampler.num_negs_per_pos 52.0 +582 2 training.batch_size 2.0 +582 3 model.embedding_dim 0.0 +582 3 loss.margin 1.9638030023494648 +582 3 optimizer.lr 0.032440611354532384 +582 3 negative_sampler.num_negs_per_pos 61.0 +582 3 training.batch_size 0.0 +582 4 model.embedding_dim 2.0 +582 4 loss.margin 3.1453599401125145 +582 4 optimizer.lr 0.01658576672773834 +582 4 negative_sampler.num_negs_per_pos 78.0 +582 4 training.batch_size 1.0 +582 5 model.embedding_dim 0.0 +582 5 loss.margin 9.27969639387598 +582 5 optimizer.lr 0.08842471411485162 +582 5 negative_sampler.num_negs_per_pos 9.0 +582 5 training.batch_size 1.0 +582 6 model.embedding_dim 2.0 +582 6 loss.margin 9.79287582383994 +582 6 optimizer.lr 0.0017087768837127806 +582 6 negative_sampler.num_negs_per_pos 64.0 +582 6 training.batch_size 2.0 +582 7 model.embedding_dim 1.0 +582 7 loss.margin 4.646970141436375 +582 7 optimizer.lr 0.001245970146996903 +582 7 negative_sampler.num_negs_per_pos 84.0 +582 7 training.batch_size 1.0 +582 8 model.embedding_dim 2.0 +582 8 loss.margin 7.357209455248156 +582 8 optimizer.lr 0.038034055966384046 +582 8 negative_sampler.num_negs_per_pos 38.0 +582 8 training.batch_size 2.0 +582 9 model.embedding_dim 0.0 +582 9 loss.margin 5.054072922328166 +582 9 optimizer.lr 0.001220274180731212 +582 9 negative_sampler.num_negs_per_pos 21.0 +582 9 training.batch_size 2.0 +582 10 model.embedding_dim 1.0 +582 10 loss.margin 4.749659737517576 +582 10 optimizer.lr 0.08417788705555825 +582 10 negative_sampler.num_negs_per_pos 36.0 +582 10 training.batch_size 1.0 +582 11 model.embedding_dim 2.0 +582 11 loss.margin 0.8442664772013266 +582 11 optimizer.lr 0.0014132534238003552 +582 11 negative_sampler.num_negs_per_pos 57.0 +582 11 training.batch_size 2.0 +582 12 model.embedding_dim 0.0 +582 12 loss.margin 2.696504851388073 +582 12 optimizer.lr 0.007884745960584784 +582 12 negative_sampler.num_negs_per_pos 36.0 +582 12 training.batch_size 2.0 +582 13 model.embedding_dim 0.0 +582 13 loss.margin 0.9536260199544317 +582 13 optimizer.lr 0.03144483442259063 +582 13 negative_sampler.num_negs_per_pos 5.0 +582 13 training.batch_size 0.0 +582 14 model.embedding_dim 1.0 +582 14 loss.margin 5.675632859596086 +582 14 optimizer.lr 0.025665475328341122 +582 14 negative_sampler.num_negs_per_pos 75.0 +582 14 training.batch_size 0.0 +582 15 model.embedding_dim 1.0 +582 15 loss.margin 6.299388677809417 +582 15 optimizer.lr 0.036465955798312924 +582 15 negative_sampler.num_negs_per_pos 11.0 +582 15 training.batch_size 0.0 +582 16 model.embedding_dim 0.0 +582 16 loss.margin 9.157140579914438 +582 16 optimizer.lr 0.003025828231306783 +582 16 negative_sampler.num_negs_per_pos 88.0 +582 16 training.batch_size 1.0 +582 17 model.embedding_dim 0.0 +582 17 loss.margin 8.077583298254332 +582 17 optimizer.lr 0.016319228848563842 +582 17 negative_sampler.num_negs_per_pos 54.0 +582 17 training.batch_size 1.0 +582 18 model.embedding_dim 0.0 +582 18 loss.margin 1.3416606817522576 +582 18 optimizer.lr 0.0014104068444602423 +582 18 negative_sampler.num_negs_per_pos 43.0 +582 18 training.batch_size 2.0 +582 19 model.embedding_dim 1.0 +582 19 loss.margin 7.743030979389044 +582 19 optimizer.lr 0.006811898780677011 +582 19 negative_sampler.num_negs_per_pos 15.0 +582 19 training.batch_size 2.0 +582 20 model.embedding_dim 1.0 +582 20 loss.margin 9.042860704052307 +582 20 optimizer.lr 0.04285919816760969 +582 20 negative_sampler.num_negs_per_pos 65.0 +582 20 training.batch_size 2.0 +582 21 model.embedding_dim 0.0 +582 21 loss.margin 6.755602478201084 +582 21 optimizer.lr 0.02150797930903759 +582 21 negative_sampler.num_negs_per_pos 75.0 +582 21 training.batch_size 0.0 +582 22 model.embedding_dim 0.0 +582 22 loss.margin 2.8182057526498254 +582 22 optimizer.lr 0.001782628074761089 +582 22 negative_sampler.num_negs_per_pos 60.0 +582 22 training.batch_size 2.0 +582 23 model.embedding_dim 1.0 +582 23 loss.margin 7.669915397950916 +582 23 optimizer.lr 0.020302638282894688 +582 23 negative_sampler.num_negs_per_pos 92.0 +582 23 training.batch_size 2.0 +582 24 model.embedding_dim 2.0 +582 24 loss.margin 8.266854352864826 +582 24 optimizer.lr 0.0014657952815249466 +582 24 negative_sampler.num_negs_per_pos 5.0 +582 24 training.batch_size 1.0 +582 25 model.embedding_dim 1.0 +582 25 loss.margin 1.686605594124043 +582 25 optimizer.lr 0.002259168245800565 +582 25 negative_sampler.num_negs_per_pos 56.0 +582 25 training.batch_size 0.0 +582 26 model.embedding_dim 1.0 +582 26 loss.margin 8.356268865123125 +582 26 optimizer.lr 0.040364334772952606 +582 26 negative_sampler.num_negs_per_pos 97.0 +582 26 training.batch_size 0.0 +582 27 model.embedding_dim 1.0 +582 27 loss.margin 5.548522843974114 +582 27 optimizer.lr 0.02485315153882824 +582 27 negative_sampler.num_negs_per_pos 57.0 +582 27 training.batch_size 2.0 +582 28 model.embedding_dim 2.0 +582 28 loss.margin 7.000383278216449 +582 28 optimizer.lr 0.012370988893315355 +582 28 negative_sampler.num_negs_per_pos 38.0 +582 28 training.batch_size 0.0 +582 29 model.embedding_dim 1.0 +582 29 loss.margin 3.113809594213772 +582 29 optimizer.lr 0.04179098169543438 +582 29 negative_sampler.num_negs_per_pos 94.0 +582 29 training.batch_size 2.0 +582 30 model.embedding_dim 2.0 +582 30 loss.margin 2.0365892275229474 +582 30 optimizer.lr 0.005289570727725977 +582 30 negative_sampler.num_negs_per_pos 93.0 +582 30 training.batch_size 2.0 +582 31 model.embedding_dim 1.0 +582 31 loss.margin 8.60187657013442 +582 31 optimizer.lr 0.019098220024743665 +582 31 negative_sampler.num_negs_per_pos 86.0 +582 31 training.batch_size 0.0 +582 32 model.embedding_dim 2.0 +582 32 loss.margin 4.369211191546243 +582 32 optimizer.lr 0.009449001703048318 +582 32 negative_sampler.num_negs_per_pos 73.0 +582 32 training.batch_size 1.0 +582 33 model.embedding_dim 2.0 +582 33 loss.margin 7.490981453263231 +582 33 optimizer.lr 0.002562082595392778 +582 33 negative_sampler.num_negs_per_pos 56.0 +582 33 training.batch_size 2.0 +582 34 model.embedding_dim 0.0 +582 34 loss.margin 4.640070726101961 +582 34 optimizer.lr 0.011835003758391534 +582 34 negative_sampler.num_negs_per_pos 35.0 +582 34 training.batch_size 0.0 +582 35 model.embedding_dim 1.0 +582 35 loss.margin 6.751802683919469 +582 35 optimizer.lr 0.0029733324209594222 +582 35 negative_sampler.num_negs_per_pos 37.0 +582 35 training.batch_size 1.0 +582 36 model.embedding_dim 2.0 +582 36 loss.margin 6.708955137611978 +582 36 optimizer.lr 0.00478145171740956 +582 36 negative_sampler.num_negs_per_pos 39.0 +582 36 training.batch_size 0.0 +582 37 model.embedding_dim 0.0 +582 37 loss.margin 2.6733940756447847 +582 37 optimizer.lr 0.006860611642768573 +582 37 negative_sampler.num_negs_per_pos 46.0 +582 37 training.batch_size 1.0 +582 38 model.embedding_dim 2.0 +582 38 loss.margin 6.286362766631121 +582 38 optimizer.lr 0.002930383995084154 +582 38 negative_sampler.num_negs_per_pos 92.0 +582 38 training.batch_size 0.0 +582 39 model.embedding_dim 2.0 +582 39 loss.margin 3.9262327228144196 +582 39 optimizer.lr 0.02814941054236736 +582 39 negative_sampler.num_negs_per_pos 93.0 +582 39 training.batch_size 2.0 +582 40 model.embedding_dim 2.0 +582 40 loss.margin 3.016618675367413 +582 40 optimizer.lr 0.0018599971329308384 +582 40 negative_sampler.num_negs_per_pos 51.0 +582 40 training.batch_size 1.0 +582 41 model.embedding_dim 0.0 +582 41 loss.margin 9.566991384177289 +582 41 optimizer.lr 0.002681378156227075 +582 41 negative_sampler.num_negs_per_pos 37.0 +582 41 training.batch_size 1.0 +582 42 model.embedding_dim 2.0 +582 42 loss.margin 9.062285271206477 +582 42 optimizer.lr 0.03186069574634522 +582 42 negative_sampler.num_negs_per_pos 96.0 +582 42 training.batch_size 2.0 +582 43 model.embedding_dim 1.0 +582 43 loss.margin 2.0641271834172605 +582 43 optimizer.lr 0.0058336211068175155 +582 43 negative_sampler.num_negs_per_pos 55.0 +582 43 training.batch_size 1.0 +582 44 model.embedding_dim 0.0 +582 44 loss.margin 7.42889069050176 +582 44 optimizer.lr 0.05467503391180229 +582 44 negative_sampler.num_negs_per_pos 86.0 +582 44 training.batch_size 0.0 +582 45 model.embedding_dim 1.0 +582 45 loss.margin 3.5452033591213237 +582 45 optimizer.lr 0.03614179529676588 +582 45 negative_sampler.num_negs_per_pos 45.0 +582 45 training.batch_size 2.0 +582 46 model.embedding_dim 2.0 +582 46 loss.margin 7.1227468825061635 +582 46 optimizer.lr 0.0023035181694511853 +582 46 negative_sampler.num_negs_per_pos 73.0 +582 46 training.batch_size 0.0 +582 47 model.embedding_dim 0.0 +582 47 loss.margin 6.450741021505405 +582 47 optimizer.lr 0.053950825833559216 +582 47 negative_sampler.num_negs_per_pos 79.0 +582 47 training.batch_size 1.0 +582 48 model.embedding_dim 0.0 +582 48 loss.margin 5.004425807115934 +582 48 optimizer.lr 0.001738462044747758 +582 48 negative_sampler.num_negs_per_pos 70.0 +582 48 training.batch_size 2.0 +582 49 model.embedding_dim 0.0 +582 49 loss.margin 0.8488672276281934 +582 49 optimizer.lr 0.002875377608671678 +582 49 negative_sampler.num_negs_per_pos 41.0 +582 49 training.batch_size 0.0 +582 50 model.embedding_dim 1.0 +582 50 loss.margin 6.148164356239813 +582 50 optimizer.lr 0.001028480107934663 +582 50 negative_sampler.num_negs_per_pos 71.0 +582 50 training.batch_size 2.0 +582 51 model.embedding_dim 2.0 +582 51 loss.margin 9.552907314330104 +582 51 optimizer.lr 0.005959763447061917 +582 51 negative_sampler.num_negs_per_pos 13.0 +582 51 training.batch_size 2.0 +582 52 model.embedding_dim 1.0 +582 52 loss.margin 5.980789110296218 +582 52 optimizer.lr 0.07055118940271585 +582 52 negative_sampler.num_negs_per_pos 91.0 +582 52 training.batch_size 0.0 +582 53 model.embedding_dim 0.0 +582 53 loss.margin 3.4786523610212283 +582 53 optimizer.lr 0.04768507611610822 +582 53 negative_sampler.num_negs_per_pos 92.0 +582 53 training.batch_size 2.0 +582 54 model.embedding_dim 1.0 +582 54 loss.margin 4.9762779763874745 +582 54 optimizer.lr 0.08619665661795085 +582 54 negative_sampler.num_negs_per_pos 69.0 +582 54 training.batch_size 2.0 +582 55 model.embedding_dim 0.0 +582 55 loss.margin 2.4711230949253538 +582 55 optimizer.lr 0.007070614029055146 +582 55 negative_sampler.num_negs_per_pos 84.0 +582 55 training.batch_size 1.0 +582 56 model.embedding_dim 0.0 +582 56 loss.margin 4.4179902783885066 +582 56 optimizer.lr 0.043048643000680835 +582 56 negative_sampler.num_negs_per_pos 17.0 +582 56 training.batch_size 2.0 +582 57 model.embedding_dim 1.0 +582 57 loss.margin 3.3140506192100765 +582 57 optimizer.lr 0.00637237554369004 +582 57 negative_sampler.num_negs_per_pos 93.0 +582 57 training.batch_size 1.0 +582 58 model.embedding_dim 1.0 +582 58 loss.margin 7.7043319059368285 +582 58 optimizer.lr 0.002414066952572086 +582 58 negative_sampler.num_negs_per_pos 88.0 +582 58 training.batch_size 2.0 +582 59 model.embedding_dim 1.0 +582 59 loss.margin 4.8485319393536 +582 59 optimizer.lr 0.003185411293125889 +582 59 negative_sampler.num_negs_per_pos 82.0 +582 59 training.batch_size 2.0 +582 1 dataset """fb15k237""" +582 1 model """rotate""" +582 1 loss """marginranking""" +582 1 regularizer """no""" +582 1 optimizer """adam""" +582 1 training_loop """owa""" +582 1 negative_sampler """basic""" +582 1 evaluator """rankbased""" +582 2 dataset """fb15k237""" +582 2 model """rotate""" +582 2 loss """marginranking""" +582 2 regularizer """no""" +582 2 optimizer """adam""" +582 2 training_loop """owa""" +582 2 negative_sampler """basic""" +582 2 evaluator """rankbased""" +582 3 dataset """fb15k237""" +582 3 model """rotate""" +582 3 loss """marginranking""" +582 3 regularizer """no""" +582 3 optimizer """adam""" +582 3 training_loop """owa""" +582 3 negative_sampler """basic""" +582 3 evaluator """rankbased""" +582 4 dataset """fb15k237""" +582 4 model """rotate""" +582 4 loss """marginranking""" +582 4 regularizer """no""" +582 4 optimizer """adam""" +582 4 training_loop """owa""" +582 4 negative_sampler """basic""" +582 4 evaluator """rankbased""" +582 5 dataset """fb15k237""" +582 5 model """rotate""" +582 5 loss """marginranking""" +582 5 regularizer """no""" +582 5 optimizer """adam""" +582 5 training_loop """owa""" +582 5 negative_sampler """basic""" +582 5 evaluator """rankbased""" +582 6 dataset """fb15k237""" +582 6 model """rotate""" +582 6 loss """marginranking""" +582 6 regularizer """no""" +582 6 optimizer """adam""" +582 6 training_loop """owa""" +582 6 negative_sampler """basic""" +582 6 evaluator """rankbased""" +582 7 dataset """fb15k237""" +582 7 model """rotate""" +582 7 loss """marginranking""" +582 7 regularizer """no""" +582 7 optimizer """adam""" +582 7 training_loop """owa""" +582 7 negative_sampler """basic""" +582 7 evaluator """rankbased""" +582 8 dataset """fb15k237""" +582 8 model """rotate""" +582 8 loss """marginranking""" +582 8 regularizer """no""" +582 8 optimizer """adam""" +582 8 training_loop """owa""" +582 8 negative_sampler """basic""" +582 8 evaluator """rankbased""" +582 9 dataset """fb15k237""" +582 9 model """rotate""" +582 9 loss """marginranking""" +582 9 regularizer """no""" +582 9 optimizer """adam""" +582 9 training_loop """owa""" +582 9 negative_sampler """basic""" +582 9 evaluator """rankbased""" +582 10 dataset """fb15k237""" +582 10 model """rotate""" +582 10 loss """marginranking""" +582 10 regularizer """no""" +582 10 optimizer """adam""" +582 10 training_loop """owa""" +582 10 negative_sampler """basic""" +582 10 evaluator """rankbased""" +582 11 dataset """fb15k237""" +582 11 model """rotate""" +582 11 loss """marginranking""" +582 11 regularizer """no""" +582 11 optimizer """adam""" +582 11 training_loop """owa""" +582 11 negative_sampler """basic""" +582 11 evaluator """rankbased""" +582 12 dataset """fb15k237""" +582 12 model """rotate""" +582 12 loss """marginranking""" +582 12 regularizer """no""" +582 12 optimizer """adam""" +582 12 training_loop """owa""" +582 12 negative_sampler """basic""" +582 12 evaluator """rankbased""" +582 13 dataset """fb15k237""" +582 13 model """rotate""" +582 13 loss """marginranking""" +582 13 regularizer """no""" +582 13 optimizer """adam""" +582 13 training_loop """owa""" +582 13 negative_sampler """basic""" +582 13 evaluator """rankbased""" +582 14 dataset """fb15k237""" +582 14 model """rotate""" +582 14 loss """marginranking""" +582 14 regularizer """no""" +582 14 optimizer """adam""" +582 14 training_loop """owa""" +582 14 negative_sampler """basic""" +582 14 evaluator """rankbased""" +582 15 dataset """fb15k237""" +582 15 model """rotate""" +582 15 loss """marginranking""" +582 15 regularizer """no""" +582 15 optimizer """adam""" +582 15 training_loop """owa""" +582 15 negative_sampler """basic""" +582 15 evaluator """rankbased""" +582 16 dataset """fb15k237""" +582 16 model """rotate""" +582 16 loss """marginranking""" +582 16 regularizer """no""" +582 16 optimizer """adam""" +582 16 training_loop """owa""" +582 16 negative_sampler """basic""" +582 16 evaluator """rankbased""" +582 17 dataset """fb15k237""" +582 17 model """rotate""" +582 17 loss """marginranking""" +582 17 regularizer """no""" +582 17 optimizer """adam""" +582 17 training_loop """owa""" +582 17 negative_sampler """basic""" +582 17 evaluator """rankbased""" +582 18 dataset """fb15k237""" +582 18 model """rotate""" +582 18 loss """marginranking""" +582 18 regularizer """no""" +582 18 optimizer """adam""" +582 18 training_loop """owa""" +582 18 negative_sampler """basic""" +582 18 evaluator """rankbased""" +582 19 dataset """fb15k237""" +582 19 model """rotate""" +582 19 loss """marginranking""" +582 19 regularizer """no""" +582 19 optimizer """adam""" +582 19 training_loop """owa""" +582 19 negative_sampler """basic""" +582 19 evaluator """rankbased""" +582 20 dataset """fb15k237""" +582 20 model """rotate""" +582 20 loss """marginranking""" +582 20 regularizer """no""" +582 20 optimizer """adam""" +582 20 training_loop """owa""" +582 20 negative_sampler """basic""" +582 20 evaluator """rankbased""" +582 21 dataset """fb15k237""" +582 21 model """rotate""" +582 21 loss """marginranking""" +582 21 regularizer """no""" +582 21 optimizer """adam""" +582 21 training_loop """owa""" +582 21 negative_sampler """basic""" +582 21 evaluator """rankbased""" +582 22 dataset """fb15k237""" +582 22 model """rotate""" +582 22 loss """marginranking""" +582 22 regularizer """no""" +582 22 optimizer """adam""" +582 22 training_loop """owa""" +582 22 negative_sampler """basic""" +582 22 evaluator """rankbased""" +582 23 dataset """fb15k237""" +582 23 model """rotate""" +582 23 loss """marginranking""" +582 23 regularizer """no""" +582 23 optimizer """adam""" +582 23 training_loop """owa""" +582 23 negative_sampler """basic""" +582 23 evaluator """rankbased""" +582 24 dataset """fb15k237""" +582 24 model """rotate""" +582 24 loss """marginranking""" +582 24 regularizer """no""" +582 24 optimizer """adam""" +582 24 training_loop """owa""" +582 24 negative_sampler """basic""" +582 24 evaluator """rankbased""" +582 25 dataset """fb15k237""" +582 25 model """rotate""" +582 25 loss """marginranking""" +582 25 regularizer """no""" +582 25 optimizer """adam""" +582 25 training_loop """owa""" +582 25 negative_sampler """basic""" +582 25 evaluator """rankbased""" +582 26 dataset """fb15k237""" +582 26 model """rotate""" +582 26 loss """marginranking""" +582 26 regularizer """no""" +582 26 optimizer """adam""" +582 26 training_loop """owa""" +582 26 negative_sampler """basic""" +582 26 evaluator """rankbased""" +582 27 dataset """fb15k237""" +582 27 model """rotate""" +582 27 loss """marginranking""" +582 27 regularizer """no""" +582 27 optimizer """adam""" +582 27 training_loop """owa""" +582 27 negative_sampler """basic""" +582 27 evaluator """rankbased""" +582 28 dataset """fb15k237""" +582 28 model """rotate""" +582 28 loss """marginranking""" +582 28 regularizer """no""" +582 28 optimizer """adam""" +582 28 training_loop """owa""" +582 28 negative_sampler """basic""" +582 28 evaluator """rankbased""" +582 29 dataset """fb15k237""" +582 29 model """rotate""" +582 29 loss """marginranking""" +582 29 regularizer """no""" +582 29 optimizer """adam""" +582 29 training_loop """owa""" +582 29 negative_sampler """basic""" +582 29 evaluator """rankbased""" +582 30 dataset """fb15k237""" +582 30 model """rotate""" +582 30 loss """marginranking""" +582 30 regularizer """no""" +582 30 optimizer """adam""" +582 30 training_loop """owa""" +582 30 negative_sampler """basic""" +582 30 evaluator """rankbased""" +582 31 dataset """fb15k237""" +582 31 model """rotate""" +582 31 loss """marginranking""" +582 31 regularizer """no""" +582 31 optimizer """adam""" +582 31 training_loop """owa""" +582 31 negative_sampler """basic""" +582 31 evaluator """rankbased""" +582 32 dataset """fb15k237""" +582 32 model """rotate""" +582 32 loss """marginranking""" +582 32 regularizer """no""" +582 32 optimizer """adam""" +582 32 training_loop """owa""" +582 32 negative_sampler """basic""" +582 32 evaluator """rankbased""" +582 33 dataset """fb15k237""" +582 33 model """rotate""" +582 33 loss """marginranking""" +582 33 regularizer """no""" +582 33 optimizer """adam""" +582 33 training_loop """owa""" +582 33 negative_sampler """basic""" +582 33 evaluator """rankbased""" +582 34 dataset """fb15k237""" +582 34 model """rotate""" +582 34 loss """marginranking""" +582 34 regularizer """no""" +582 34 optimizer """adam""" +582 34 training_loop """owa""" +582 34 negative_sampler """basic""" +582 34 evaluator """rankbased""" +582 35 dataset """fb15k237""" +582 35 model """rotate""" +582 35 loss """marginranking""" +582 35 regularizer """no""" +582 35 optimizer """adam""" +582 35 training_loop """owa""" +582 35 negative_sampler """basic""" +582 35 evaluator """rankbased""" +582 36 dataset """fb15k237""" +582 36 model """rotate""" +582 36 loss """marginranking""" +582 36 regularizer """no""" +582 36 optimizer """adam""" +582 36 training_loop """owa""" +582 36 negative_sampler """basic""" +582 36 evaluator """rankbased""" +582 37 dataset """fb15k237""" +582 37 model """rotate""" +582 37 loss """marginranking""" +582 37 regularizer """no""" +582 37 optimizer """adam""" +582 37 training_loop """owa""" +582 37 negative_sampler """basic""" +582 37 evaluator """rankbased""" +582 38 dataset """fb15k237""" +582 38 model """rotate""" +582 38 loss """marginranking""" +582 38 regularizer """no""" +582 38 optimizer """adam""" +582 38 training_loop """owa""" +582 38 negative_sampler """basic""" +582 38 evaluator """rankbased""" +582 39 dataset """fb15k237""" +582 39 model """rotate""" +582 39 loss """marginranking""" +582 39 regularizer """no""" +582 39 optimizer """adam""" +582 39 training_loop """owa""" +582 39 negative_sampler """basic""" +582 39 evaluator """rankbased""" +582 40 dataset """fb15k237""" +582 40 model """rotate""" +582 40 loss """marginranking""" +582 40 regularizer """no""" +582 40 optimizer """adam""" +582 40 training_loop """owa""" +582 40 negative_sampler """basic""" +582 40 evaluator """rankbased""" +582 41 dataset """fb15k237""" +582 41 model """rotate""" +582 41 loss """marginranking""" +582 41 regularizer """no""" +582 41 optimizer """adam""" +582 41 training_loop """owa""" +582 41 negative_sampler """basic""" +582 41 evaluator """rankbased""" +582 42 dataset """fb15k237""" +582 42 model """rotate""" +582 42 loss """marginranking""" +582 42 regularizer """no""" +582 42 optimizer """adam""" +582 42 training_loop """owa""" +582 42 negative_sampler """basic""" +582 42 evaluator """rankbased""" +582 43 dataset """fb15k237""" +582 43 model """rotate""" +582 43 loss """marginranking""" +582 43 regularizer """no""" +582 43 optimizer """adam""" +582 43 training_loop """owa""" +582 43 negative_sampler """basic""" +582 43 evaluator """rankbased""" +582 44 dataset """fb15k237""" +582 44 model """rotate""" +582 44 loss """marginranking""" +582 44 regularizer """no""" +582 44 optimizer """adam""" +582 44 training_loop """owa""" +582 44 negative_sampler """basic""" +582 44 evaluator """rankbased""" +582 45 dataset """fb15k237""" +582 45 model """rotate""" +582 45 loss """marginranking""" +582 45 regularizer """no""" +582 45 optimizer """adam""" +582 45 training_loop """owa""" +582 45 negative_sampler """basic""" +582 45 evaluator """rankbased""" +582 46 dataset """fb15k237""" +582 46 model """rotate""" +582 46 loss """marginranking""" +582 46 regularizer """no""" +582 46 optimizer """adam""" +582 46 training_loop """owa""" +582 46 negative_sampler """basic""" +582 46 evaluator """rankbased""" +582 47 dataset """fb15k237""" +582 47 model """rotate""" +582 47 loss """marginranking""" +582 47 regularizer """no""" +582 47 optimizer """adam""" +582 47 training_loop """owa""" +582 47 negative_sampler """basic""" +582 47 evaluator """rankbased""" +582 48 dataset """fb15k237""" +582 48 model """rotate""" +582 48 loss """marginranking""" +582 48 regularizer """no""" +582 48 optimizer """adam""" +582 48 training_loop """owa""" +582 48 negative_sampler """basic""" +582 48 evaluator """rankbased""" +582 49 dataset """fb15k237""" +582 49 model """rotate""" +582 49 loss """marginranking""" +582 49 regularizer """no""" +582 49 optimizer """adam""" +582 49 training_loop """owa""" +582 49 negative_sampler """basic""" +582 49 evaluator """rankbased""" +582 50 dataset """fb15k237""" +582 50 model """rotate""" +582 50 loss """marginranking""" +582 50 regularizer """no""" +582 50 optimizer """adam""" +582 50 training_loop """owa""" +582 50 negative_sampler """basic""" +582 50 evaluator """rankbased""" +582 51 dataset """fb15k237""" +582 51 model """rotate""" +582 51 loss """marginranking""" +582 51 regularizer """no""" +582 51 optimizer """adam""" +582 51 training_loop """owa""" +582 51 negative_sampler """basic""" +582 51 evaluator """rankbased""" +582 52 dataset """fb15k237""" +582 52 model """rotate""" +582 52 loss """marginranking""" +582 52 regularizer """no""" +582 52 optimizer """adam""" +582 52 training_loop """owa""" +582 52 negative_sampler """basic""" +582 52 evaluator """rankbased""" +582 53 dataset """fb15k237""" +582 53 model """rotate""" +582 53 loss """marginranking""" +582 53 regularizer """no""" +582 53 optimizer """adam""" +582 53 training_loop """owa""" +582 53 negative_sampler """basic""" +582 53 evaluator """rankbased""" +582 54 dataset """fb15k237""" +582 54 model """rotate""" +582 54 loss """marginranking""" +582 54 regularizer """no""" +582 54 optimizer """adam""" +582 54 training_loop """owa""" +582 54 negative_sampler """basic""" +582 54 evaluator """rankbased""" +582 55 dataset """fb15k237""" +582 55 model """rotate""" +582 55 loss """marginranking""" +582 55 regularizer """no""" +582 55 optimizer """adam""" +582 55 training_loop """owa""" +582 55 negative_sampler """basic""" +582 55 evaluator """rankbased""" +582 56 dataset """fb15k237""" +582 56 model """rotate""" +582 56 loss """marginranking""" +582 56 regularizer """no""" +582 56 optimizer """adam""" +582 56 training_loop """owa""" +582 56 negative_sampler """basic""" +582 56 evaluator """rankbased""" +582 57 dataset """fb15k237""" +582 57 model """rotate""" +582 57 loss """marginranking""" +582 57 regularizer """no""" +582 57 optimizer """adam""" +582 57 training_loop """owa""" +582 57 negative_sampler """basic""" +582 57 evaluator """rankbased""" +582 58 dataset """fb15k237""" +582 58 model """rotate""" +582 58 loss """marginranking""" +582 58 regularizer """no""" +582 58 optimizer """adam""" +582 58 training_loop """owa""" +582 58 negative_sampler """basic""" +582 58 evaluator """rankbased""" +582 59 dataset """fb15k237""" +582 59 model """rotate""" +582 59 loss """marginranking""" +582 59 regularizer """no""" +582 59 optimizer """adam""" +582 59 training_loop """owa""" +582 59 negative_sampler """basic""" +582 59 evaluator """rankbased""" +583 1 model.embedding_dim 2.0 +583 1 training.batch_size 2.0 +583 1 training.label_smoothing 0.09452399771214713 +583 2 model.embedding_dim 0.0 +583 2 training.batch_size 1.0 +583 2 training.label_smoothing 0.19372237071342663 +583 3 model.embedding_dim 2.0 +583 3 training.batch_size 0.0 +583 3 training.label_smoothing 0.017640687737265556 +583 4 model.embedding_dim 1.0 +583 4 training.batch_size 0.0 +583 4 training.label_smoothing 0.036385863560022255 +583 5 model.embedding_dim 1.0 +583 5 training.batch_size 2.0 +583 5 training.label_smoothing 0.010304003972353565 +583 6 model.embedding_dim 0.0 +583 6 training.batch_size 2.0 +583 6 training.label_smoothing 0.004078131542803431 +583 7 model.embedding_dim 1.0 +583 7 training.batch_size 2.0 +583 7 training.label_smoothing 0.010328569817018933 +583 8 model.embedding_dim 0.0 +583 8 training.batch_size 2.0 +583 8 training.label_smoothing 0.009337476803137547 +583 9 model.embedding_dim 0.0 +583 9 training.batch_size 0.0 +583 9 training.label_smoothing 0.003537803028779475 +583 10 model.embedding_dim 0.0 +583 10 training.batch_size 2.0 +583 10 training.label_smoothing 0.0069507691369019445 +583 11 model.embedding_dim 2.0 +583 11 training.batch_size 1.0 +583 11 training.label_smoothing 0.01828765234093335 +583 12 model.embedding_dim 2.0 +583 12 training.batch_size 2.0 +583 12 training.label_smoothing 0.04234597123445174 +583 13 model.embedding_dim 2.0 +583 13 training.batch_size 1.0 +583 13 training.label_smoothing 0.6710661974681322 +583 14 model.embedding_dim 0.0 +583 14 training.batch_size 0.0 +583 14 training.label_smoothing 0.006002779110692728 +583 15 model.embedding_dim 0.0 +583 15 training.batch_size 0.0 +583 15 training.label_smoothing 0.003907665782053824 +583 16 model.embedding_dim 1.0 +583 16 training.batch_size 0.0 +583 16 training.label_smoothing 0.008449801356729762 +583 17 model.embedding_dim 0.0 +583 17 training.batch_size 2.0 +583 17 training.label_smoothing 0.9443268664851178 +583 18 model.embedding_dim 0.0 +583 18 training.batch_size 1.0 +583 18 training.label_smoothing 0.06923818248240098 +583 19 model.embedding_dim 2.0 +583 19 training.batch_size 2.0 +583 19 training.label_smoothing 0.5251242853480822 +583 20 model.embedding_dim 0.0 +583 20 training.batch_size 0.0 +583 20 training.label_smoothing 0.1517600489735959 +583 21 model.embedding_dim 2.0 +583 21 training.batch_size 2.0 +583 21 training.label_smoothing 0.06629391087578015 +583 22 model.embedding_dim 2.0 +583 22 training.batch_size 1.0 +583 22 training.label_smoothing 0.9092156043347591 +583 23 model.embedding_dim 2.0 +583 23 training.batch_size 0.0 +583 23 training.label_smoothing 0.33099342847555235 +583 24 model.embedding_dim 0.0 +583 24 training.batch_size 0.0 +583 24 training.label_smoothing 0.010410316269214324 +583 25 model.embedding_dim 0.0 +583 25 training.batch_size 0.0 +583 25 training.label_smoothing 0.0011296307090886103 +583 26 model.embedding_dim 1.0 +583 26 training.batch_size 2.0 +583 26 training.label_smoothing 0.8916575562295128 +583 27 model.embedding_dim 2.0 +583 27 training.batch_size 0.0 +583 27 training.label_smoothing 0.0013710053011310689 +583 28 model.embedding_dim 0.0 +583 28 training.batch_size 1.0 +583 28 training.label_smoothing 0.10465351019262671 +583 29 model.embedding_dim 2.0 +583 29 training.batch_size 2.0 +583 29 training.label_smoothing 0.010256134786348894 +583 30 model.embedding_dim 2.0 +583 30 training.batch_size 2.0 +583 30 training.label_smoothing 0.10549398138989922 +583 31 model.embedding_dim 1.0 +583 31 training.batch_size 0.0 +583 31 training.label_smoothing 0.002249807092087758 +583 32 model.embedding_dim 1.0 +583 32 training.batch_size 1.0 +583 32 training.label_smoothing 0.0016217572227893466 +583 33 model.embedding_dim 2.0 +583 33 training.batch_size 0.0 +583 33 training.label_smoothing 0.003310957046373402 +583 34 model.embedding_dim 2.0 +583 34 training.batch_size 2.0 +583 34 training.label_smoothing 0.04972246818731498 +583 35 model.embedding_dim 0.0 +583 35 training.batch_size 2.0 +583 35 training.label_smoothing 0.004395975181875031 +583 36 model.embedding_dim 2.0 +583 36 training.batch_size 2.0 +583 36 training.label_smoothing 0.0066303938347264686 +583 37 model.embedding_dim 2.0 +583 37 training.batch_size 0.0 +583 37 training.label_smoothing 0.045864277389695327 +583 38 model.embedding_dim 1.0 +583 38 training.batch_size 1.0 +583 38 training.label_smoothing 0.01275943565687604 +583 39 model.embedding_dim 1.0 +583 39 training.batch_size 1.0 +583 39 training.label_smoothing 0.1514150127907455 +583 40 model.embedding_dim 0.0 +583 40 training.batch_size 1.0 +583 40 training.label_smoothing 0.0036987342376830976 +583 41 model.embedding_dim 0.0 +583 41 training.batch_size 0.0 +583 41 training.label_smoothing 0.11992123172032815 +583 42 model.embedding_dim 1.0 +583 42 training.batch_size 2.0 +583 42 training.label_smoothing 0.17435840052387352 +583 43 model.embedding_dim 2.0 +583 43 training.batch_size 1.0 +583 43 training.label_smoothing 0.05750513546074748 +583 44 model.embedding_dim 0.0 +583 44 training.batch_size 2.0 +583 44 training.label_smoothing 0.03411178888304166 +583 45 model.embedding_dim 0.0 +583 45 training.batch_size 0.0 +583 45 training.label_smoothing 0.0010311621388680477 +583 46 model.embedding_dim 0.0 +583 46 training.batch_size 1.0 +583 46 training.label_smoothing 0.016072366297548813 +583 47 model.embedding_dim 0.0 +583 47 training.batch_size 2.0 +583 47 training.label_smoothing 0.2797051167460459 +583 48 model.embedding_dim 0.0 +583 48 training.batch_size 0.0 +583 48 training.label_smoothing 0.02768780663802107 +583 49 model.embedding_dim 2.0 +583 49 training.batch_size 0.0 +583 49 training.label_smoothing 0.0682725535462484 +583 50 model.embedding_dim 1.0 +583 50 training.batch_size 0.0 +583 50 training.label_smoothing 0.06429828885185357 +583 51 model.embedding_dim 1.0 +583 51 training.batch_size 1.0 +583 51 training.label_smoothing 0.007951975641700764 +583 52 model.embedding_dim 1.0 +583 52 training.batch_size 2.0 +583 52 training.label_smoothing 0.03943941242814003 +583 53 model.embedding_dim 2.0 +583 53 training.batch_size 0.0 +583 53 training.label_smoothing 0.6572985441022108 +583 54 model.embedding_dim 2.0 +583 54 training.batch_size 1.0 +583 54 training.label_smoothing 0.001872891989766592 +583 55 model.embedding_dim 0.0 +583 55 training.batch_size 1.0 +583 55 training.label_smoothing 0.01817135665745948 +583 56 model.embedding_dim 1.0 +583 56 training.batch_size 0.0 +583 56 training.label_smoothing 0.02302388921729086 +583 57 model.embedding_dim 2.0 +583 57 training.batch_size 1.0 +583 57 training.label_smoothing 0.0016743438286898195 +583 58 model.embedding_dim 1.0 +583 58 training.batch_size 1.0 +583 58 training.label_smoothing 0.02936738794673512 +583 59 model.embedding_dim 0.0 +583 59 training.batch_size 0.0 +583 59 training.label_smoothing 0.04496486849446595 +583 60 model.embedding_dim 2.0 +583 60 training.batch_size 0.0 +583 60 training.label_smoothing 0.26932152656457514 +583 61 model.embedding_dim 1.0 +583 61 training.batch_size 2.0 +583 61 training.label_smoothing 0.040421359771736214 +583 62 model.embedding_dim 0.0 +583 62 training.batch_size 0.0 +583 62 training.label_smoothing 0.06282558452949992 +583 63 model.embedding_dim 1.0 +583 63 training.batch_size 2.0 +583 63 training.label_smoothing 0.003487100165207968 +583 64 model.embedding_dim 1.0 +583 64 training.batch_size 1.0 +583 64 training.label_smoothing 0.6887048845357878 +583 65 model.embedding_dim 0.0 +583 65 training.batch_size 2.0 +583 65 training.label_smoothing 0.005446051843935682 +583 66 model.embedding_dim 0.0 +583 66 training.batch_size 2.0 +583 66 training.label_smoothing 0.9858531150236973 +583 67 model.embedding_dim 1.0 +583 67 training.batch_size 0.0 +583 67 training.label_smoothing 0.0024149939501812707 +583 68 model.embedding_dim 0.0 +583 68 training.batch_size 1.0 +583 68 training.label_smoothing 0.004043113923013294 +583 69 model.embedding_dim 0.0 +583 69 training.batch_size 1.0 +583 69 training.label_smoothing 0.15827442364824082 +583 70 model.embedding_dim 2.0 +583 70 training.batch_size 0.0 +583 70 training.label_smoothing 0.020463621265807228 +583 71 model.embedding_dim 0.0 +583 71 training.batch_size 0.0 +583 71 training.label_smoothing 0.020492043538988827 +583 72 model.embedding_dim 2.0 +583 72 training.batch_size 1.0 +583 72 training.label_smoothing 0.08482360200956737 +583 73 model.embedding_dim 0.0 +583 73 training.batch_size 1.0 +583 73 training.label_smoothing 0.0261083942656603 +583 74 model.embedding_dim 0.0 +583 74 training.batch_size 2.0 +583 74 training.label_smoothing 0.16037266627528005 +583 75 model.embedding_dim 1.0 +583 75 training.batch_size 2.0 +583 75 training.label_smoothing 0.004073855127090261 +583 76 model.embedding_dim 2.0 +583 76 training.batch_size 2.0 +583 76 training.label_smoothing 0.012462981804485709 +583 77 model.embedding_dim 2.0 +583 77 training.batch_size 1.0 +583 77 training.label_smoothing 0.0016983844054681925 +583 78 model.embedding_dim 1.0 +583 78 training.batch_size 1.0 +583 78 training.label_smoothing 0.3866712414773535 +583 79 model.embedding_dim 2.0 +583 79 training.batch_size 0.0 +583 79 training.label_smoothing 0.0026501203648574053 +583 80 model.embedding_dim 2.0 +583 80 training.batch_size 2.0 +583 80 training.label_smoothing 0.014125559399723424 +583 81 model.embedding_dim 0.0 +583 81 training.batch_size 2.0 +583 81 training.label_smoothing 0.0014234429595865285 +583 82 model.embedding_dim 1.0 +583 82 training.batch_size 2.0 +583 82 training.label_smoothing 0.5290966240615963 +583 83 model.embedding_dim 2.0 +583 83 training.batch_size 2.0 +583 83 training.label_smoothing 0.0028564504986297585 +583 84 model.embedding_dim 2.0 +583 84 training.batch_size 1.0 +583 84 training.label_smoothing 0.0015448638714663184 +583 85 model.embedding_dim 2.0 +583 85 training.batch_size 1.0 +583 85 training.label_smoothing 0.13205980628961383 +583 86 model.embedding_dim 1.0 +583 86 training.batch_size 1.0 +583 86 training.label_smoothing 0.022863010960453657 +583 87 model.embedding_dim 2.0 +583 87 training.batch_size 1.0 +583 87 training.label_smoothing 0.034372725865190225 +583 88 model.embedding_dim 1.0 +583 88 training.batch_size 1.0 +583 88 training.label_smoothing 0.019877123327792626 +583 89 model.embedding_dim 2.0 +583 89 training.batch_size 1.0 +583 89 training.label_smoothing 0.0047955489920539 +583 90 model.embedding_dim 2.0 +583 90 training.batch_size 1.0 +583 90 training.label_smoothing 0.13836054860966565 +583 91 model.embedding_dim 2.0 +583 91 training.batch_size 0.0 +583 91 training.label_smoothing 0.038089527851737866 +583 92 model.embedding_dim 1.0 +583 92 training.batch_size 0.0 +583 92 training.label_smoothing 0.5833513880609918 +583 93 model.embedding_dim 2.0 +583 93 training.batch_size 0.0 +583 93 training.label_smoothing 0.02145453917411328 +583 94 model.embedding_dim 2.0 +583 94 training.batch_size 2.0 +583 94 training.label_smoothing 0.08557383602311398 +583 95 model.embedding_dim 0.0 +583 95 training.batch_size 0.0 +583 95 training.label_smoothing 0.1712239698422781 +583 96 model.embedding_dim 1.0 +583 96 training.batch_size 2.0 +583 96 training.label_smoothing 0.056684978237492634 +583 97 model.embedding_dim 2.0 +583 97 training.batch_size 0.0 +583 97 training.label_smoothing 0.02233165028863168 +583 98 model.embedding_dim 2.0 +583 98 training.batch_size 0.0 +583 98 training.label_smoothing 0.09325422100339398 +583 99 model.embedding_dim 1.0 +583 99 training.batch_size 2.0 +583 99 training.label_smoothing 0.21728077277214167 +583 100 model.embedding_dim 1.0 +583 100 training.batch_size 2.0 +583 100 training.label_smoothing 0.01091697265094162 +583 1 dataset """kinships""" +583 1 model """rotate""" +583 1 loss """bceaftersigmoid""" +583 1 regularizer """no""" +583 1 optimizer """adadelta""" +583 1 training_loop """lcwa""" +583 1 evaluator """rankbased""" +583 2 dataset """kinships""" +583 2 model """rotate""" +583 2 loss """bceaftersigmoid""" +583 2 regularizer """no""" +583 2 optimizer """adadelta""" +583 2 training_loop """lcwa""" +583 2 evaluator """rankbased""" +583 3 dataset """kinships""" +583 3 model """rotate""" +583 3 loss """bceaftersigmoid""" +583 3 regularizer """no""" +583 3 optimizer """adadelta""" +583 3 training_loop """lcwa""" +583 3 evaluator """rankbased""" +583 4 dataset """kinships""" +583 4 model """rotate""" +583 4 loss """bceaftersigmoid""" +583 4 regularizer """no""" +583 4 optimizer """adadelta""" +583 4 training_loop """lcwa""" +583 4 evaluator """rankbased""" +583 5 dataset """kinships""" +583 5 model """rotate""" +583 5 loss """bceaftersigmoid""" +583 5 regularizer """no""" +583 5 optimizer """adadelta""" +583 5 training_loop """lcwa""" +583 5 evaluator """rankbased""" +583 6 dataset """kinships""" +583 6 model """rotate""" +583 6 loss """bceaftersigmoid""" +583 6 regularizer """no""" +583 6 optimizer """adadelta""" +583 6 training_loop """lcwa""" +583 6 evaluator """rankbased""" +583 7 dataset """kinships""" +583 7 model """rotate""" +583 7 loss """bceaftersigmoid""" +583 7 regularizer """no""" +583 7 optimizer """adadelta""" +583 7 training_loop """lcwa""" +583 7 evaluator """rankbased""" +583 8 dataset """kinships""" +583 8 model """rotate""" +583 8 loss """bceaftersigmoid""" +583 8 regularizer """no""" +583 8 optimizer """adadelta""" +583 8 training_loop """lcwa""" +583 8 evaluator """rankbased""" +583 9 dataset """kinships""" +583 9 model """rotate""" +583 9 loss """bceaftersigmoid""" +583 9 regularizer """no""" +583 9 optimizer """adadelta""" +583 9 training_loop """lcwa""" +583 9 evaluator """rankbased""" +583 10 dataset """kinships""" +583 10 model """rotate""" +583 10 loss """bceaftersigmoid""" +583 10 regularizer """no""" +583 10 optimizer """adadelta""" +583 10 training_loop """lcwa""" +583 10 evaluator """rankbased""" +583 11 dataset """kinships""" +583 11 model """rotate""" +583 11 loss """bceaftersigmoid""" +583 11 regularizer """no""" +583 11 optimizer """adadelta""" +583 11 training_loop """lcwa""" +583 11 evaluator """rankbased""" +583 12 dataset """kinships""" +583 12 model """rotate""" +583 12 loss """bceaftersigmoid""" +583 12 regularizer """no""" +583 12 optimizer """adadelta""" +583 12 training_loop """lcwa""" +583 12 evaluator """rankbased""" +583 13 dataset """kinships""" +583 13 model """rotate""" +583 13 loss """bceaftersigmoid""" +583 13 regularizer """no""" +583 13 optimizer """adadelta""" +583 13 training_loop """lcwa""" +583 13 evaluator """rankbased""" +583 14 dataset """kinships""" +583 14 model """rotate""" +583 14 loss """bceaftersigmoid""" +583 14 regularizer """no""" +583 14 optimizer """adadelta""" +583 14 training_loop """lcwa""" +583 14 evaluator """rankbased""" +583 15 dataset """kinships""" +583 15 model """rotate""" +583 15 loss """bceaftersigmoid""" +583 15 regularizer """no""" +583 15 optimizer """adadelta""" +583 15 training_loop """lcwa""" +583 15 evaluator """rankbased""" +583 16 dataset """kinships""" +583 16 model """rotate""" +583 16 loss """bceaftersigmoid""" +583 16 regularizer """no""" +583 16 optimizer """adadelta""" +583 16 training_loop """lcwa""" +583 16 evaluator """rankbased""" +583 17 dataset """kinships""" +583 17 model """rotate""" +583 17 loss """bceaftersigmoid""" +583 17 regularizer """no""" +583 17 optimizer """adadelta""" +583 17 training_loop """lcwa""" +583 17 evaluator """rankbased""" +583 18 dataset """kinships""" +583 18 model """rotate""" +583 18 loss """bceaftersigmoid""" +583 18 regularizer """no""" +583 18 optimizer """adadelta""" +583 18 training_loop """lcwa""" +583 18 evaluator """rankbased""" +583 19 dataset """kinships""" +583 19 model """rotate""" +583 19 loss """bceaftersigmoid""" +583 19 regularizer """no""" +583 19 optimizer """adadelta""" +583 19 training_loop """lcwa""" +583 19 evaluator """rankbased""" +583 20 dataset """kinships""" +583 20 model """rotate""" +583 20 loss """bceaftersigmoid""" +583 20 regularizer """no""" +583 20 optimizer """adadelta""" +583 20 training_loop """lcwa""" +583 20 evaluator """rankbased""" +583 21 dataset """kinships""" +583 21 model """rotate""" +583 21 loss """bceaftersigmoid""" +583 21 regularizer """no""" +583 21 optimizer """adadelta""" +583 21 training_loop """lcwa""" +583 21 evaluator """rankbased""" +583 22 dataset """kinships""" +583 22 model """rotate""" +583 22 loss """bceaftersigmoid""" +583 22 regularizer """no""" +583 22 optimizer """adadelta""" +583 22 training_loop """lcwa""" +583 22 evaluator """rankbased""" +583 23 dataset """kinships""" +583 23 model """rotate""" +583 23 loss """bceaftersigmoid""" +583 23 regularizer """no""" +583 23 optimizer """adadelta""" +583 23 training_loop """lcwa""" +583 23 evaluator """rankbased""" +583 24 dataset """kinships""" +583 24 model """rotate""" +583 24 loss """bceaftersigmoid""" +583 24 regularizer """no""" +583 24 optimizer """adadelta""" +583 24 training_loop """lcwa""" +583 24 evaluator """rankbased""" +583 25 dataset """kinships""" +583 25 model """rotate""" +583 25 loss """bceaftersigmoid""" +583 25 regularizer """no""" +583 25 optimizer """adadelta""" +583 25 training_loop """lcwa""" +583 25 evaluator """rankbased""" +583 26 dataset """kinships""" +583 26 model """rotate""" +583 26 loss """bceaftersigmoid""" +583 26 regularizer """no""" +583 26 optimizer """adadelta""" +583 26 training_loop """lcwa""" +583 26 evaluator """rankbased""" +583 27 dataset """kinships""" +583 27 model """rotate""" +583 27 loss """bceaftersigmoid""" +583 27 regularizer """no""" +583 27 optimizer """adadelta""" +583 27 training_loop """lcwa""" +583 27 evaluator """rankbased""" +583 28 dataset """kinships""" +583 28 model """rotate""" +583 28 loss """bceaftersigmoid""" +583 28 regularizer """no""" +583 28 optimizer """adadelta""" +583 28 training_loop """lcwa""" +583 28 evaluator """rankbased""" +583 29 dataset """kinships""" +583 29 model """rotate""" +583 29 loss """bceaftersigmoid""" +583 29 regularizer """no""" +583 29 optimizer """adadelta""" +583 29 training_loop """lcwa""" +583 29 evaluator """rankbased""" +583 30 dataset """kinships""" +583 30 model """rotate""" +583 30 loss """bceaftersigmoid""" +583 30 regularizer """no""" +583 30 optimizer """adadelta""" +583 30 training_loop """lcwa""" +583 30 evaluator """rankbased""" +583 31 dataset """kinships""" +583 31 model """rotate""" +583 31 loss """bceaftersigmoid""" +583 31 regularizer """no""" +583 31 optimizer """adadelta""" +583 31 training_loop """lcwa""" +583 31 evaluator """rankbased""" +583 32 dataset """kinships""" +583 32 model """rotate""" +583 32 loss """bceaftersigmoid""" +583 32 regularizer """no""" +583 32 optimizer """adadelta""" +583 32 training_loop """lcwa""" +583 32 evaluator """rankbased""" +583 33 dataset """kinships""" +583 33 model """rotate""" +583 33 loss """bceaftersigmoid""" +583 33 regularizer """no""" +583 33 optimizer """adadelta""" +583 33 training_loop """lcwa""" +583 33 evaluator """rankbased""" +583 34 dataset """kinships""" +583 34 model """rotate""" +583 34 loss """bceaftersigmoid""" +583 34 regularizer """no""" +583 34 optimizer """adadelta""" +583 34 training_loop """lcwa""" +583 34 evaluator """rankbased""" +583 35 dataset """kinships""" +583 35 model """rotate""" +583 35 loss """bceaftersigmoid""" +583 35 regularizer """no""" +583 35 optimizer """adadelta""" +583 35 training_loop """lcwa""" +583 35 evaluator """rankbased""" +583 36 dataset """kinships""" +583 36 model """rotate""" +583 36 loss """bceaftersigmoid""" +583 36 regularizer """no""" +583 36 optimizer """adadelta""" +583 36 training_loop """lcwa""" +583 36 evaluator """rankbased""" +583 37 dataset """kinships""" +583 37 model """rotate""" +583 37 loss """bceaftersigmoid""" +583 37 regularizer """no""" +583 37 optimizer """adadelta""" +583 37 training_loop """lcwa""" +583 37 evaluator """rankbased""" +583 38 dataset """kinships""" +583 38 model """rotate""" +583 38 loss """bceaftersigmoid""" +583 38 regularizer """no""" +583 38 optimizer """adadelta""" +583 38 training_loop """lcwa""" +583 38 evaluator """rankbased""" +583 39 dataset """kinships""" +583 39 model """rotate""" +583 39 loss """bceaftersigmoid""" +583 39 regularizer """no""" +583 39 optimizer """adadelta""" +583 39 training_loop """lcwa""" +583 39 evaluator """rankbased""" +583 40 dataset """kinships""" +583 40 model """rotate""" +583 40 loss """bceaftersigmoid""" +583 40 regularizer """no""" +583 40 optimizer """adadelta""" +583 40 training_loop """lcwa""" +583 40 evaluator """rankbased""" +583 41 dataset """kinships""" +583 41 model """rotate""" +583 41 loss """bceaftersigmoid""" +583 41 regularizer """no""" +583 41 optimizer """adadelta""" +583 41 training_loop """lcwa""" +583 41 evaluator """rankbased""" +583 42 dataset """kinships""" +583 42 model """rotate""" +583 42 loss """bceaftersigmoid""" +583 42 regularizer """no""" +583 42 optimizer """adadelta""" +583 42 training_loop """lcwa""" +583 42 evaluator """rankbased""" +583 43 dataset """kinships""" +583 43 model """rotate""" +583 43 loss """bceaftersigmoid""" +583 43 regularizer """no""" +583 43 optimizer """adadelta""" +583 43 training_loop """lcwa""" +583 43 evaluator """rankbased""" +583 44 dataset """kinships""" +583 44 model """rotate""" +583 44 loss """bceaftersigmoid""" +583 44 regularizer """no""" +583 44 optimizer """adadelta""" +583 44 training_loop """lcwa""" +583 44 evaluator """rankbased""" +583 45 dataset """kinships""" +583 45 model """rotate""" +583 45 loss """bceaftersigmoid""" +583 45 regularizer """no""" +583 45 optimizer """adadelta""" +583 45 training_loop """lcwa""" +583 45 evaluator """rankbased""" +583 46 dataset """kinships""" +583 46 model """rotate""" +583 46 loss """bceaftersigmoid""" +583 46 regularizer """no""" +583 46 optimizer """adadelta""" +583 46 training_loop """lcwa""" +583 46 evaluator """rankbased""" +583 47 dataset """kinships""" +583 47 model """rotate""" +583 47 loss """bceaftersigmoid""" +583 47 regularizer """no""" +583 47 optimizer """adadelta""" +583 47 training_loop """lcwa""" +583 47 evaluator """rankbased""" +583 48 dataset """kinships""" +583 48 model """rotate""" +583 48 loss """bceaftersigmoid""" +583 48 regularizer """no""" +583 48 optimizer """adadelta""" +583 48 training_loop """lcwa""" +583 48 evaluator """rankbased""" +583 49 dataset """kinships""" +583 49 model """rotate""" +583 49 loss """bceaftersigmoid""" +583 49 regularizer """no""" +583 49 optimizer """adadelta""" +583 49 training_loop """lcwa""" +583 49 evaluator """rankbased""" +583 50 dataset """kinships""" +583 50 model """rotate""" +583 50 loss """bceaftersigmoid""" +583 50 regularizer """no""" +583 50 optimizer """adadelta""" +583 50 training_loop """lcwa""" +583 50 evaluator """rankbased""" +583 51 dataset """kinships""" +583 51 model """rotate""" +583 51 loss """bceaftersigmoid""" +583 51 regularizer """no""" +583 51 optimizer """adadelta""" +583 51 training_loop """lcwa""" +583 51 evaluator """rankbased""" +583 52 dataset """kinships""" +583 52 model """rotate""" +583 52 loss """bceaftersigmoid""" +583 52 regularizer """no""" +583 52 optimizer """adadelta""" +583 52 training_loop """lcwa""" +583 52 evaluator """rankbased""" +583 53 dataset """kinships""" +583 53 model """rotate""" +583 53 loss """bceaftersigmoid""" +583 53 regularizer """no""" +583 53 optimizer """adadelta""" +583 53 training_loop """lcwa""" +583 53 evaluator """rankbased""" +583 54 dataset """kinships""" +583 54 model """rotate""" +583 54 loss """bceaftersigmoid""" +583 54 regularizer """no""" +583 54 optimizer """adadelta""" +583 54 training_loop """lcwa""" +583 54 evaluator """rankbased""" +583 55 dataset """kinships""" +583 55 model """rotate""" +583 55 loss """bceaftersigmoid""" +583 55 regularizer """no""" +583 55 optimizer """adadelta""" +583 55 training_loop """lcwa""" +583 55 evaluator """rankbased""" +583 56 dataset """kinships""" +583 56 model """rotate""" +583 56 loss """bceaftersigmoid""" +583 56 regularizer """no""" +583 56 optimizer """adadelta""" +583 56 training_loop """lcwa""" +583 56 evaluator """rankbased""" +583 57 dataset """kinships""" +583 57 model """rotate""" +583 57 loss """bceaftersigmoid""" +583 57 regularizer """no""" +583 57 optimizer """adadelta""" +583 57 training_loop """lcwa""" +583 57 evaluator """rankbased""" +583 58 dataset """kinships""" +583 58 model """rotate""" +583 58 loss """bceaftersigmoid""" +583 58 regularizer """no""" +583 58 optimizer """adadelta""" +583 58 training_loop """lcwa""" +583 58 evaluator """rankbased""" +583 59 dataset """kinships""" +583 59 model """rotate""" +583 59 loss """bceaftersigmoid""" +583 59 regularizer """no""" +583 59 optimizer """adadelta""" +583 59 training_loop """lcwa""" +583 59 evaluator """rankbased""" +583 60 dataset """kinships""" +583 60 model """rotate""" +583 60 loss """bceaftersigmoid""" +583 60 regularizer """no""" +583 60 optimizer """adadelta""" +583 60 training_loop """lcwa""" +583 60 evaluator """rankbased""" +583 61 dataset """kinships""" +583 61 model """rotate""" +583 61 loss """bceaftersigmoid""" +583 61 regularizer """no""" +583 61 optimizer """adadelta""" +583 61 training_loop """lcwa""" +583 61 evaluator """rankbased""" +583 62 dataset """kinships""" +583 62 model """rotate""" +583 62 loss """bceaftersigmoid""" +583 62 regularizer """no""" +583 62 optimizer """adadelta""" +583 62 training_loop """lcwa""" +583 62 evaluator """rankbased""" +583 63 dataset """kinships""" +583 63 model """rotate""" +583 63 loss """bceaftersigmoid""" +583 63 regularizer """no""" +583 63 optimizer """adadelta""" +583 63 training_loop """lcwa""" +583 63 evaluator """rankbased""" +583 64 dataset """kinships""" +583 64 model """rotate""" +583 64 loss """bceaftersigmoid""" +583 64 regularizer """no""" +583 64 optimizer """adadelta""" +583 64 training_loop """lcwa""" +583 64 evaluator """rankbased""" +583 65 dataset """kinships""" +583 65 model """rotate""" +583 65 loss """bceaftersigmoid""" +583 65 regularizer """no""" +583 65 optimizer """adadelta""" +583 65 training_loop """lcwa""" +583 65 evaluator """rankbased""" +583 66 dataset """kinships""" +583 66 model """rotate""" +583 66 loss """bceaftersigmoid""" +583 66 regularizer """no""" +583 66 optimizer """adadelta""" +583 66 training_loop """lcwa""" +583 66 evaluator """rankbased""" +583 67 dataset """kinships""" +583 67 model """rotate""" +583 67 loss """bceaftersigmoid""" +583 67 regularizer """no""" +583 67 optimizer """adadelta""" +583 67 training_loop """lcwa""" +583 67 evaluator """rankbased""" +583 68 dataset """kinships""" +583 68 model """rotate""" +583 68 loss """bceaftersigmoid""" +583 68 regularizer """no""" +583 68 optimizer """adadelta""" +583 68 training_loop """lcwa""" +583 68 evaluator """rankbased""" +583 69 dataset """kinships""" +583 69 model """rotate""" +583 69 loss """bceaftersigmoid""" +583 69 regularizer """no""" +583 69 optimizer """adadelta""" +583 69 training_loop """lcwa""" +583 69 evaluator """rankbased""" +583 70 dataset """kinships""" +583 70 model """rotate""" +583 70 loss """bceaftersigmoid""" +583 70 regularizer """no""" +583 70 optimizer """adadelta""" +583 70 training_loop """lcwa""" +583 70 evaluator """rankbased""" +583 71 dataset """kinships""" +583 71 model """rotate""" +583 71 loss """bceaftersigmoid""" +583 71 regularizer """no""" +583 71 optimizer """adadelta""" +583 71 training_loop """lcwa""" +583 71 evaluator """rankbased""" +583 72 dataset """kinships""" +583 72 model """rotate""" +583 72 loss """bceaftersigmoid""" +583 72 regularizer """no""" +583 72 optimizer """adadelta""" +583 72 training_loop """lcwa""" +583 72 evaluator """rankbased""" +583 73 dataset """kinships""" +583 73 model """rotate""" +583 73 loss """bceaftersigmoid""" +583 73 regularizer """no""" +583 73 optimizer """adadelta""" +583 73 training_loop """lcwa""" +583 73 evaluator """rankbased""" +583 74 dataset """kinships""" +583 74 model """rotate""" +583 74 loss """bceaftersigmoid""" +583 74 regularizer """no""" +583 74 optimizer """adadelta""" +583 74 training_loop """lcwa""" +583 74 evaluator """rankbased""" +583 75 dataset """kinships""" +583 75 model """rotate""" +583 75 loss """bceaftersigmoid""" +583 75 regularizer """no""" +583 75 optimizer """adadelta""" +583 75 training_loop """lcwa""" +583 75 evaluator """rankbased""" +583 76 dataset """kinships""" +583 76 model """rotate""" +583 76 loss """bceaftersigmoid""" +583 76 regularizer """no""" +583 76 optimizer """adadelta""" +583 76 training_loop """lcwa""" +583 76 evaluator """rankbased""" +583 77 dataset """kinships""" +583 77 model """rotate""" +583 77 loss """bceaftersigmoid""" +583 77 regularizer """no""" +583 77 optimizer """adadelta""" +583 77 training_loop """lcwa""" +583 77 evaluator """rankbased""" +583 78 dataset """kinships""" +583 78 model """rotate""" +583 78 loss """bceaftersigmoid""" +583 78 regularizer """no""" +583 78 optimizer """adadelta""" +583 78 training_loop """lcwa""" +583 78 evaluator """rankbased""" +583 79 dataset """kinships""" +583 79 model """rotate""" +583 79 loss """bceaftersigmoid""" +583 79 regularizer """no""" +583 79 optimizer """adadelta""" +583 79 training_loop """lcwa""" +583 79 evaluator """rankbased""" +583 80 dataset """kinships""" +583 80 model """rotate""" +583 80 loss """bceaftersigmoid""" +583 80 regularizer """no""" +583 80 optimizer """adadelta""" +583 80 training_loop """lcwa""" +583 80 evaluator """rankbased""" +583 81 dataset """kinships""" +583 81 model """rotate""" +583 81 loss """bceaftersigmoid""" +583 81 regularizer """no""" +583 81 optimizer """adadelta""" +583 81 training_loop """lcwa""" +583 81 evaluator """rankbased""" +583 82 dataset """kinships""" +583 82 model """rotate""" +583 82 loss """bceaftersigmoid""" +583 82 regularizer """no""" +583 82 optimizer """adadelta""" +583 82 training_loop """lcwa""" +583 82 evaluator """rankbased""" +583 83 dataset """kinships""" +583 83 model """rotate""" +583 83 loss """bceaftersigmoid""" +583 83 regularizer """no""" +583 83 optimizer """adadelta""" +583 83 training_loop """lcwa""" +583 83 evaluator """rankbased""" +583 84 dataset """kinships""" +583 84 model """rotate""" +583 84 loss """bceaftersigmoid""" +583 84 regularizer """no""" +583 84 optimizer """adadelta""" +583 84 training_loop """lcwa""" +583 84 evaluator """rankbased""" +583 85 dataset """kinships""" +583 85 model """rotate""" +583 85 loss """bceaftersigmoid""" +583 85 regularizer """no""" +583 85 optimizer """adadelta""" +583 85 training_loop """lcwa""" +583 85 evaluator """rankbased""" +583 86 dataset """kinships""" +583 86 model """rotate""" +583 86 loss """bceaftersigmoid""" +583 86 regularizer """no""" +583 86 optimizer """adadelta""" +583 86 training_loop """lcwa""" +583 86 evaluator """rankbased""" +583 87 dataset """kinships""" +583 87 model """rotate""" +583 87 loss """bceaftersigmoid""" +583 87 regularizer """no""" +583 87 optimizer """adadelta""" +583 87 training_loop """lcwa""" +583 87 evaluator """rankbased""" +583 88 dataset """kinships""" +583 88 model """rotate""" +583 88 loss """bceaftersigmoid""" +583 88 regularizer """no""" +583 88 optimizer """adadelta""" +583 88 training_loop """lcwa""" +583 88 evaluator """rankbased""" +583 89 dataset """kinships""" +583 89 model """rotate""" +583 89 loss """bceaftersigmoid""" +583 89 regularizer """no""" +583 89 optimizer """adadelta""" +583 89 training_loop """lcwa""" +583 89 evaluator """rankbased""" +583 90 dataset """kinships""" +583 90 model """rotate""" +583 90 loss """bceaftersigmoid""" +583 90 regularizer """no""" +583 90 optimizer """adadelta""" +583 90 training_loop """lcwa""" +583 90 evaluator """rankbased""" +583 91 dataset """kinships""" +583 91 model """rotate""" +583 91 loss """bceaftersigmoid""" +583 91 regularizer """no""" +583 91 optimizer """adadelta""" +583 91 training_loop """lcwa""" +583 91 evaluator """rankbased""" +583 92 dataset """kinships""" +583 92 model """rotate""" +583 92 loss """bceaftersigmoid""" +583 92 regularizer """no""" +583 92 optimizer """adadelta""" +583 92 training_loop """lcwa""" +583 92 evaluator """rankbased""" +583 93 dataset """kinships""" +583 93 model """rotate""" +583 93 loss """bceaftersigmoid""" +583 93 regularizer """no""" +583 93 optimizer """adadelta""" +583 93 training_loop """lcwa""" +583 93 evaluator """rankbased""" +583 94 dataset """kinships""" +583 94 model """rotate""" +583 94 loss """bceaftersigmoid""" +583 94 regularizer """no""" +583 94 optimizer """adadelta""" +583 94 training_loop """lcwa""" +583 94 evaluator """rankbased""" +583 95 dataset """kinships""" +583 95 model """rotate""" +583 95 loss """bceaftersigmoid""" +583 95 regularizer """no""" +583 95 optimizer """adadelta""" +583 95 training_loop """lcwa""" +583 95 evaluator """rankbased""" +583 96 dataset """kinships""" +583 96 model """rotate""" +583 96 loss """bceaftersigmoid""" +583 96 regularizer """no""" +583 96 optimizer """adadelta""" +583 96 training_loop """lcwa""" +583 96 evaluator """rankbased""" +583 97 dataset """kinships""" +583 97 model """rotate""" +583 97 loss """bceaftersigmoid""" +583 97 regularizer """no""" +583 97 optimizer """adadelta""" +583 97 training_loop """lcwa""" +583 97 evaluator """rankbased""" +583 98 dataset """kinships""" +583 98 model """rotate""" +583 98 loss """bceaftersigmoid""" +583 98 regularizer """no""" +583 98 optimizer """adadelta""" +583 98 training_loop """lcwa""" +583 98 evaluator """rankbased""" +583 99 dataset """kinships""" +583 99 model """rotate""" +583 99 loss """bceaftersigmoid""" +583 99 regularizer """no""" +583 99 optimizer """adadelta""" +583 99 training_loop """lcwa""" +583 99 evaluator """rankbased""" +583 100 dataset """kinships""" +583 100 model """rotate""" +583 100 loss """bceaftersigmoid""" +583 100 regularizer """no""" +583 100 optimizer """adadelta""" +583 100 training_loop """lcwa""" +583 100 evaluator """rankbased""" +584 1 model.embedding_dim 2.0 +584 1 training.batch_size 1.0 +584 1 training.label_smoothing 0.13686392162483585 +584 2 model.embedding_dim 2.0 +584 2 training.batch_size 1.0 +584 2 training.label_smoothing 0.600210248473369 +584 3 model.embedding_dim 1.0 +584 3 training.batch_size 2.0 +584 3 training.label_smoothing 0.0169758723089787 +584 4 model.embedding_dim 2.0 +584 4 training.batch_size 2.0 +584 4 training.label_smoothing 0.16203210314993988 +584 5 model.embedding_dim 2.0 +584 5 training.batch_size 1.0 +584 5 training.label_smoothing 0.0535786160796179 +584 6 model.embedding_dim 2.0 +584 6 training.batch_size 1.0 +584 6 training.label_smoothing 0.30324718826339486 +584 7 model.embedding_dim 2.0 +584 7 training.batch_size 1.0 +584 7 training.label_smoothing 0.8938719862146184 +584 8 model.embedding_dim 2.0 +584 8 training.batch_size 0.0 +584 8 training.label_smoothing 0.00919817633372234 +584 9 model.embedding_dim 2.0 +584 9 training.batch_size 1.0 +584 9 training.label_smoothing 0.0029734966677538313 +584 10 model.embedding_dim 2.0 +584 10 training.batch_size 1.0 +584 10 training.label_smoothing 0.007232557875624425 +584 11 model.embedding_dim 1.0 +584 11 training.batch_size 2.0 +584 11 training.label_smoothing 0.002239541892654369 +584 12 model.embedding_dim 2.0 +584 12 training.batch_size 0.0 +584 12 training.label_smoothing 0.0020636893585598607 +584 13 model.embedding_dim 2.0 +584 13 training.batch_size 0.0 +584 13 training.label_smoothing 0.005673253365200486 +584 14 model.embedding_dim 0.0 +584 14 training.batch_size 1.0 +584 14 training.label_smoothing 0.4530853454700335 +584 15 model.embedding_dim 1.0 +584 15 training.batch_size 2.0 +584 15 training.label_smoothing 0.006014922365697832 +584 16 model.embedding_dim 2.0 +584 16 training.batch_size 0.0 +584 16 training.label_smoothing 0.1144451799881435 +584 17 model.embedding_dim 2.0 +584 17 training.batch_size 2.0 +584 17 training.label_smoothing 0.024908037933789078 +584 18 model.embedding_dim 0.0 +584 18 training.batch_size 2.0 +584 18 training.label_smoothing 0.8724917915429433 +584 19 model.embedding_dim 2.0 +584 19 training.batch_size 0.0 +584 19 training.label_smoothing 0.0011376872369026094 +584 20 model.embedding_dim 2.0 +584 20 training.batch_size 1.0 +584 20 training.label_smoothing 0.46801854972757234 +584 21 model.embedding_dim 1.0 +584 21 training.batch_size 1.0 +584 21 training.label_smoothing 0.0016321589828584143 +584 22 model.embedding_dim 2.0 +584 22 training.batch_size 2.0 +584 22 training.label_smoothing 0.09008656936609977 +584 23 model.embedding_dim 2.0 +584 23 training.batch_size 2.0 +584 23 training.label_smoothing 0.03733586306852269 +584 24 model.embedding_dim 1.0 +584 24 training.batch_size 0.0 +584 24 training.label_smoothing 0.003230214325331999 +584 25 model.embedding_dim 1.0 +584 25 training.batch_size 0.0 +584 25 training.label_smoothing 0.09001357262259119 +584 26 model.embedding_dim 1.0 +584 26 training.batch_size 1.0 +584 26 training.label_smoothing 0.0021999561521017694 +584 27 model.embedding_dim 0.0 +584 27 training.batch_size 0.0 +584 27 training.label_smoothing 0.05140677984664747 +584 28 model.embedding_dim 1.0 +584 28 training.batch_size 1.0 +584 28 training.label_smoothing 0.012347193475477237 +584 29 model.embedding_dim 1.0 +584 29 training.batch_size 0.0 +584 29 training.label_smoothing 0.2521043220659958 +584 30 model.embedding_dim 1.0 +584 30 training.batch_size 0.0 +584 30 training.label_smoothing 0.034902737923024105 +584 31 model.embedding_dim 2.0 +584 31 training.batch_size 2.0 +584 31 training.label_smoothing 0.04218357704176074 +584 32 model.embedding_dim 1.0 +584 32 training.batch_size 2.0 +584 32 training.label_smoothing 0.8369942338584897 +584 33 model.embedding_dim 2.0 +584 33 training.batch_size 2.0 +584 33 training.label_smoothing 0.11268285349990997 +584 34 model.embedding_dim 2.0 +584 34 training.batch_size 0.0 +584 34 training.label_smoothing 0.02322593718089756 +584 35 model.embedding_dim 0.0 +584 35 training.batch_size 2.0 +584 35 training.label_smoothing 0.08221914653313235 +584 36 model.embedding_dim 2.0 +584 36 training.batch_size 0.0 +584 36 training.label_smoothing 0.0037549646311067224 +584 37 model.embedding_dim 0.0 +584 37 training.batch_size 0.0 +584 37 training.label_smoothing 0.02692978733554068 +584 38 model.embedding_dim 1.0 +584 38 training.batch_size 0.0 +584 38 training.label_smoothing 0.05998868199207791 +584 39 model.embedding_dim 0.0 +584 39 training.batch_size 1.0 +584 39 training.label_smoothing 0.004723560880367509 +584 40 model.embedding_dim 1.0 +584 40 training.batch_size 2.0 +584 40 training.label_smoothing 0.2039948542642954 +584 41 model.embedding_dim 2.0 +584 41 training.batch_size 2.0 +584 41 training.label_smoothing 0.8612646425513509 +584 42 model.embedding_dim 1.0 +584 42 training.batch_size 0.0 +584 42 training.label_smoothing 0.9421553629883773 +584 43 model.embedding_dim 1.0 +584 43 training.batch_size 1.0 +584 43 training.label_smoothing 0.13919598596152766 +584 44 model.embedding_dim 0.0 +584 44 training.batch_size 0.0 +584 44 training.label_smoothing 0.3625392192136577 +584 45 model.embedding_dim 2.0 +584 45 training.batch_size 0.0 +584 45 training.label_smoothing 0.006579768049877014 +584 46 model.embedding_dim 1.0 +584 46 training.batch_size 0.0 +584 46 training.label_smoothing 0.03675509143165387 +584 47 model.embedding_dim 2.0 +584 47 training.batch_size 0.0 +584 47 training.label_smoothing 0.1085790111523876 +584 48 model.embedding_dim 1.0 +584 48 training.batch_size 1.0 +584 48 training.label_smoothing 0.007590572269636905 +584 49 model.embedding_dim 1.0 +584 49 training.batch_size 2.0 +584 49 training.label_smoothing 0.083098410666336 +584 50 model.embedding_dim 1.0 +584 50 training.batch_size 0.0 +584 50 training.label_smoothing 0.007814495291789033 +584 51 model.embedding_dim 2.0 +584 51 training.batch_size 1.0 +584 51 training.label_smoothing 0.004954726713900824 +584 52 model.embedding_dim 2.0 +584 52 training.batch_size 1.0 +584 52 training.label_smoothing 0.08923240221040143 +584 53 model.embedding_dim 0.0 +584 53 training.batch_size 2.0 +584 53 training.label_smoothing 0.16950761619783475 +584 54 model.embedding_dim 2.0 +584 54 training.batch_size 0.0 +584 54 training.label_smoothing 0.10218286093882274 +584 55 model.embedding_dim 0.0 +584 55 training.batch_size 1.0 +584 55 training.label_smoothing 0.07822957432983746 +584 56 model.embedding_dim 0.0 +584 56 training.batch_size 0.0 +584 56 training.label_smoothing 0.08041378842815214 +584 57 model.embedding_dim 1.0 +584 57 training.batch_size 1.0 +584 57 training.label_smoothing 0.3341791783229195 +584 58 model.embedding_dim 1.0 +584 58 training.batch_size 2.0 +584 58 training.label_smoothing 0.03952767908019012 +584 59 model.embedding_dim 1.0 +584 59 training.batch_size 2.0 +584 59 training.label_smoothing 0.15774251051856808 +584 60 model.embedding_dim 2.0 +584 60 training.batch_size 0.0 +584 60 training.label_smoothing 0.11285846248511769 +584 61 model.embedding_dim 0.0 +584 61 training.batch_size 1.0 +584 61 training.label_smoothing 0.003068341580502182 +584 62 model.embedding_dim 2.0 +584 62 training.batch_size 2.0 +584 62 training.label_smoothing 0.13787365710271468 +584 63 model.embedding_dim 2.0 +584 63 training.batch_size 0.0 +584 63 training.label_smoothing 0.01075925245778395 +584 64 model.embedding_dim 2.0 +584 64 training.batch_size 0.0 +584 64 training.label_smoothing 0.15337200736006926 +584 65 model.embedding_dim 2.0 +584 65 training.batch_size 1.0 +584 65 training.label_smoothing 0.004833044918682455 +584 66 model.embedding_dim 2.0 +584 66 training.batch_size 0.0 +584 66 training.label_smoothing 0.008235386738015566 +584 67 model.embedding_dim 1.0 +584 67 training.batch_size 1.0 +584 67 training.label_smoothing 0.4260587876568405 +584 68 model.embedding_dim 2.0 +584 68 training.batch_size 1.0 +584 68 training.label_smoothing 0.001959973171974001 +584 69 model.embedding_dim 2.0 +584 69 training.batch_size 0.0 +584 69 training.label_smoothing 0.019717727167038443 +584 70 model.embedding_dim 0.0 +584 70 training.batch_size 1.0 +584 70 training.label_smoothing 0.3184098438340645 +584 71 model.embedding_dim 0.0 +584 71 training.batch_size 1.0 +584 71 training.label_smoothing 0.06187306997316437 +584 72 model.embedding_dim 0.0 +584 72 training.batch_size 0.0 +584 72 training.label_smoothing 0.012586983923359602 +584 73 model.embedding_dim 0.0 +584 73 training.batch_size 2.0 +584 73 training.label_smoothing 0.02423854719121821 +584 74 model.embedding_dim 0.0 +584 74 training.batch_size 1.0 +584 74 training.label_smoothing 0.6333570299858655 +584 75 model.embedding_dim 1.0 +584 75 training.batch_size 0.0 +584 75 training.label_smoothing 0.16677214996894393 +584 76 model.embedding_dim 1.0 +584 76 training.batch_size 2.0 +584 76 training.label_smoothing 0.42640475077237106 +584 77 model.embedding_dim 2.0 +584 77 training.batch_size 1.0 +584 77 training.label_smoothing 0.4372486092675511 +584 78 model.embedding_dim 1.0 +584 78 training.batch_size 2.0 +584 78 training.label_smoothing 0.6193262874790927 +584 79 model.embedding_dim 0.0 +584 79 training.batch_size 1.0 +584 79 training.label_smoothing 0.01865125657866826 +584 80 model.embedding_dim 2.0 +584 80 training.batch_size 0.0 +584 80 training.label_smoothing 0.012045990146008946 +584 81 model.embedding_dim 1.0 +584 81 training.batch_size 0.0 +584 81 training.label_smoothing 0.4148271443982358 +584 82 model.embedding_dim 0.0 +584 82 training.batch_size 1.0 +584 82 training.label_smoothing 0.17088925094113047 +584 83 model.embedding_dim 0.0 +584 83 training.batch_size 0.0 +584 83 training.label_smoothing 0.001440061015857787 +584 84 model.embedding_dim 1.0 +584 84 training.batch_size 0.0 +584 84 training.label_smoothing 0.001958614182125306 +584 85 model.embedding_dim 2.0 +584 85 training.batch_size 1.0 +584 85 training.label_smoothing 0.0038447577287068035 +584 86 model.embedding_dim 0.0 +584 86 training.batch_size 2.0 +584 86 training.label_smoothing 0.051237346117140245 +584 87 model.embedding_dim 0.0 +584 87 training.batch_size 0.0 +584 87 training.label_smoothing 0.05476909379669979 +584 88 model.embedding_dim 2.0 +584 88 training.batch_size 0.0 +584 88 training.label_smoothing 0.0039708943793335515 +584 89 model.embedding_dim 0.0 +584 89 training.batch_size 0.0 +584 89 training.label_smoothing 0.2015626953747692 +584 90 model.embedding_dim 2.0 +584 90 training.batch_size 2.0 +584 90 training.label_smoothing 0.4189345546320066 +584 91 model.embedding_dim 1.0 +584 91 training.batch_size 2.0 +584 91 training.label_smoothing 0.7648305639760999 +584 92 model.embedding_dim 0.0 +584 92 training.batch_size 0.0 +584 92 training.label_smoothing 0.0015396777761286166 +584 93 model.embedding_dim 2.0 +584 93 training.batch_size 0.0 +584 93 training.label_smoothing 0.0017981959919828663 +584 94 model.embedding_dim 2.0 +584 94 training.batch_size 1.0 +584 94 training.label_smoothing 0.48031696331500046 +584 95 model.embedding_dim 1.0 +584 95 training.batch_size 1.0 +584 95 training.label_smoothing 0.0037971285377777393 +584 96 model.embedding_dim 1.0 +584 96 training.batch_size 1.0 +584 96 training.label_smoothing 0.0028733634024537833 +584 97 model.embedding_dim 1.0 +584 97 training.batch_size 1.0 +584 97 training.label_smoothing 0.051183717888161936 +584 98 model.embedding_dim 0.0 +584 98 training.batch_size 2.0 +584 98 training.label_smoothing 0.230801882906837 +584 99 model.embedding_dim 0.0 +584 99 training.batch_size 0.0 +584 99 training.label_smoothing 0.00977335217348923 +584 100 model.embedding_dim 1.0 +584 100 training.batch_size 2.0 +584 100 training.label_smoothing 0.09647646331557534 +584 1 dataset """kinships""" +584 1 model """rotate""" +584 1 loss """softplus""" +584 1 regularizer """no""" +584 1 optimizer """adadelta""" +584 1 training_loop """lcwa""" +584 1 evaluator """rankbased""" +584 2 dataset """kinships""" +584 2 model """rotate""" +584 2 loss """softplus""" +584 2 regularizer """no""" +584 2 optimizer """adadelta""" +584 2 training_loop """lcwa""" +584 2 evaluator """rankbased""" +584 3 dataset """kinships""" +584 3 model """rotate""" +584 3 loss """softplus""" +584 3 regularizer """no""" +584 3 optimizer """adadelta""" +584 3 training_loop """lcwa""" +584 3 evaluator """rankbased""" +584 4 dataset """kinships""" +584 4 model """rotate""" +584 4 loss """softplus""" +584 4 regularizer """no""" +584 4 optimizer """adadelta""" +584 4 training_loop """lcwa""" +584 4 evaluator """rankbased""" +584 5 dataset """kinships""" +584 5 model """rotate""" +584 5 loss """softplus""" +584 5 regularizer """no""" +584 5 optimizer """adadelta""" +584 5 training_loop """lcwa""" +584 5 evaluator """rankbased""" +584 6 dataset """kinships""" +584 6 model """rotate""" +584 6 loss """softplus""" +584 6 regularizer """no""" +584 6 optimizer """adadelta""" +584 6 training_loop """lcwa""" +584 6 evaluator """rankbased""" +584 7 dataset """kinships""" +584 7 model """rotate""" +584 7 loss """softplus""" +584 7 regularizer """no""" +584 7 optimizer """adadelta""" +584 7 training_loop """lcwa""" +584 7 evaluator """rankbased""" +584 8 dataset """kinships""" +584 8 model """rotate""" +584 8 loss """softplus""" +584 8 regularizer """no""" +584 8 optimizer """adadelta""" +584 8 training_loop """lcwa""" +584 8 evaluator """rankbased""" +584 9 dataset """kinships""" +584 9 model """rotate""" +584 9 loss """softplus""" +584 9 regularizer """no""" +584 9 optimizer """adadelta""" +584 9 training_loop """lcwa""" +584 9 evaluator """rankbased""" +584 10 dataset """kinships""" +584 10 model """rotate""" +584 10 loss """softplus""" +584 10 regularizer """no""" +584 10 optimizer """adadelta""" +584 10 training_loop """lcwa""" +584 10 evaluator """rankbased""" +584 11 dataset """kinships""" +584 11 model """rotate""" +584 11 loss """softplus""" +584 11 regularizer """no""" +584 11 optimizer """adadelta""" +584 11 training_loop """lcwa""" +584 11 evaluator """rankbased""" +584 12 dataset """kinships""" +584 12 model """rotate""" +584 12 loss """softplus""" +584 12 regularizer """no""" +584 12 optimizer """adadelta""" +584 12 training_loop """lcwa""" +584 12 evaluator """rankbased""" +584 13 dataset """kinships""" +584 13 model """rotate""" +584 13 loss """softplus""" +584 13 regularizer """no""" +584 13 optimizer """adadelta""" +584 13 training_loop """lcwa""" +584 13 evaluator """rankbased""" +584 14 dataset """kinships""" +584 14 model """rotate""" +584 14 loss """softplus""" +584 14 regularizer """no""" +584 14 optimizer """adadelta""" +584 14 training_loop """lcwa""" +584 14 evaluator """rankbased""" +584 15 dataset """kinships""" +584 15 model """rotate""" +584 15 loss """softplus""" +584 15 regularizer """no""" +584 15 optimizer """adadelta""" +584 15 training_loop """lcwa""" +584 15 evaluator """rankbased""" +584 16 dataset """kinships""" +584 16 model """rotate""" +584 16 loss """softplus""" +584 16 regularizer """no""" +584 16 optimizer """adadelta""" +584 16 training_loop """lcwa""" +584 16 evaluator """rankbased""" +584 17 dataset """kinships""" +584 17 model """rotate""" +584 17 loss """softplus""" +584 17 regularizer """no""" +584 17 optimizer """adadelta""" +584 17 training_loop """lcwa""" +584 17 evaluator """rankbased""" +584 18 dataset """kinships""" +584 18 model """rotate""" +584 18 loss """softplus""" +584 18 regularizer """no""" +584 18 optimizer """adadelta""" +584 18 training_loop """lcwa""" +584 18 evaluator """rankbased""" +584 19 dataset """kinships""" +584 19 model """rotate""" +584 19 loss """softplus""" +584 19 regularizer """no""" +584 19 optimizer """adadelta""" +584 19 training_loop """lcwa""" +584 19 evaluator """rankbased""" +584 20 dataset """kinships""" +584 20 model """rotate""" +584 20 loss """softplus""" +584 20 regularizer """no""" +584 20 optimizer """adadelta""" +584 20 training_loop """lcwa""" +584 20 evaluator """rankbased""" +584 21 dataset """kinships""" +584 21 model """rotate""" +584 21 loss """softplus""" +584 21 regularizer """no""" +584 21 optimizer """adadelta""" +584 21 training_loop """lcwa""" +584 21 evaluator """rankbased""" +584 22 dataset """kinships""" +584 22 model """rotate""" +584 22 loss """softplus""" +584 22 regularizer """no""" +584 22 optimizer """adadelta""" +584 22 training_loop """lcwa""" +584 22 evaluator """rankbased""" +584 23 dataset """kinships""" +584 23 model """rotate""" +584 23 loss """softplus""" +584 23 regularizer """no""" +584 23 optimizer """adadelta""" +584 23 training_loop """lcwa""" +584 23 evaluator """rankbased""" +584 24 dataset """kinships""" +584 24 model """rotate""" +584 24 loss """softplus""" +584 24 regularizer """no""" +584 24 optimizer """adadelta""" +584 24 training_loop """lcwa""" +584 24 evaluator """rankbased""" +584 25 dataset """kinships""" +584 25 model """rotate""" +584 25 loss """softplus""" +584 25 regularizer """no""" +584 25 optimizer """adadelta""" +584 25 training_loop """lcwa""" +584 25 evaluator """rankbased""" +584 26 dataset """kinships""" +584 26 model """rotate""" +584 26 loss """softplus""" +584 26 regularizer """no""" +584 26 optimizer """adadelta""" +584 26 training_loop """lcwa""" +584 26 evaluator """rankbased""" +584 27 dataset """kinships""" +584 27 model """rotate""" +584 27 loss """softplus""" +584 27 regularizer """no""" +584 27 optimizer """adadelta""" +584 27 training_loop """lcwa""" +584 27 evaluator """rankbased""" +584 28 dataset """kinships""" +584 28 model """rotate""" +584 28 loss """softplus""" +584 28 regularizer """no""" +584 28 optimizer """adadelta""" +584 28 training_loop """lcwa""" +584 28 evaluator """rankbased""" +584 29 dataset """kinships""" +584 29 model """rotate""" +584 29 loss """softplus""" +584 29 regularizer """no""" +584 29 optimizer """adadelta""" +584 29 training_loop """lcwa""" +584 29 evaluator """rankbased""" +584 30 dataset """kinships""" +584 30 model """rotate""" +584 30 loss """softplus""" +584 30 regularizer """no""" +584 30 optimizer """adadelta""" +584 30 training_loop """lcwa""" +584 30 evaluator """rankbased""" +584 31 dataset """kinships""" +584 31 model """rotate""" +584 31 loss """softplus""" +584 31 regularizer """no""" +584 31 optimizer """adadelta""" +584 31 training_loop """lcwa""" +584 31 evaluator """rankbased""" +584 32 dataset """kinships""" +584 32 model """rotate""" +584 32 loss """softplus""" +584 32 regularizer """no""" +584 32 optimizer """adadelta""" +584 32 training_loop """lcwa""" +584 32 evaluator """rankbased""" +584 33 dataset """kinships""" +584 33 model """rotate""" +584 33 loss """softplus""" +584 33 regularizer """no""" +584 33 optimizer """adadelta""" +584 33 training_loop """lcwa""" +584 33 evaluator """rankbased""" +584 34 dataset """kinships""" +584 34 model """rotate""" +584 34 loss """softplus""" +584 34 regularizer """no""" +584 34 optimizer """adadelta""" +584 34 training_loop """lcwa""" +584 34 evaluator """rankbased""" +584 35 dataset """kinships""" +584 35 model """rotate""" +584 35 loss """softplus""" +584 35 regularizer """no""" +584 35 optimizer """adadelta""" +584 35 training_loop """lcwa""" +584 35 evaluator """rankbased""" +584 36 dataset """kinships""" +584 36 model """rotate""" +584 36 loss """softplus""" +584 36 regularizer """no""" +584 36 optimizer """adadelta""" +584 36 training_loop """lcwa""" +584 36 evaluator """rankbased""" +584 37 dataset """kinships""" +584 37 model """rotate""" +584 37 loss """softplus""" +584 37 regularizer """no""" +584 37 optimizer """adadelta""" +584 37 training_loop """lcwa""" +584 37 evaluator """rankbased""" +584 38 dataset """kinships""" +584 38 model """rotate""" +584 38 loss """softplus""" +584 38 regularizer """no""" +584 38 optimizer """adadelta""" +584 38 training_loop """lcwa""" +584 38 evaluator """rankbased""" +584 39 dataset """kinships""" +584 39 model """rotate""" +584 39 loss """softplus""" +584 39 regularizer """no""" +584 39 optimizer """adadelta""" +584 39 training_loop """lcwa""" +584 39 evaluator """rankbased""" +584 40 dataset """kinships""" +584 40 model """rotate""" +584 40 loss """softplus""" +584 40 regularizer """no""" +584 40 optimizer """adadelta""" +584 40 training_loop """lcwa""" +584 40 evaluator """rankbased""" +584 41 dataset """kinships""" +584 41 model """rotate""" +584 41 loss """softplus""" +584 41 regularizer """no""" +584 41 optimizer """adadelta""" +584 41 training_loop """lcwa""" +584 41 evaluator """rankbased""" +584 42 dataset """kinships""" +584 42 model """rotate""" +584 42 loss """softplus""" +584 42 regularizer """no""" +584 42 optimizer """adadelta""" +584 42 training_loop """lcwa""" +584 42 evaluator """rankbased""" +584 43 dataset """kinships""" +584 43 model """rotate""" +584 43 loss """softplus""" +584 43 regularizer """no""" +584 43 optimizer """adadelta""" +584 43 training_loop """lcwa""" +584 43 evaluator """rankbased""" +584 44 dataset """kinships""" +584 44 model """rotate""" +584 44 loss """softplus""" +584 44 regularizer """no""" +584 44 optimizer """adadelta""" +584 44 training_loop """lcwa""" +584 44 evaluator """rankbased""" +584 45 dataset """kinships""" +584 45 model """rotate""" +584 45 loss """softplus""" +584 45 regularizer """no""" +584 45 optimizer """adadelta""" +584 45 training_loop """lcwa""" +584 45 evaluator """rankbased""" +584 46 dataset """kinships""" +584 46 model """rotate""" +584 46 loss """softplus""" +584 46 regularizer """no""" +584 46 optimizer """adadelta""" +584 46 training_loop """lcwa""" +584 46 evaluator """rankbased""" +584 47 dataset """kinships""" +584 47 model """rotate""" +584 47 loss """softplus""" +584 47 regularizer """no""" +584 47 optimizer """adadelta""" +584 47 training_loop """lcwa""" +584 47 evaluator """rankbased""" +584 48 dataset """kinships""" +584 48 model """rotate""" +584 48 loss """softplus""" +584 48 regularizer """no""" +584 48 optimizer """adadelta""" +584 48 training_loop """lcwa""" +584 48 evaluator """rankbased""" +584 49 dataset """kinships""" +584 49 model """rotate""" +584 49 loss """softplus""" +584 49 regularizer """no""" +584 49 optimizer """adadelta""" +584 49 training_loop """lcwa""" +584 49 evaluator """rankbased""" +584 50 dataset """kinships""" +584 50 model """rotate""" +584 50 loss """softplus""" +584 50 regularizer """no""" +584 50 optimizer """adadelta""" +584 50 training_loop """lcwa""" +584 50 evaluator """rankbased""" +584 51 dataset """kinships""" +584 51 model """rotate""" +584 51 loss """softplus""" +584 51 regularizer """no""" +584 51 optimizer """adadelta""" +584 51 training_loop """lcwa""" +584 51 evaluator """rankbased""" +584 52 dataset """kinships""" +584 52 model """rotate""" +584 52 loss """softplus""" +584 52 regularizer """no""" +584 52 optimizer """adadelta""" +584 52 training_loop """lcwa""" +584 52 evaluator """rankbased""" +584 53 dataset """kinships""" +584 53 model """rotate""" +584 53 loss """softplus""" +584 53 regularizer """no""" +584 53 optimizer """adadelta""" +584 53 training_loop """lcwa""" +584 53 evaluator """rankbased""" +584 54 dataset """kinships""" +584 54 model """rotate""" +584 54 loss """softplus""" +584 54 regularizer """no""" +584 54 optimizer """adadelta""" +584 54 training_loop """lcwa""" +584 54 evaluator """rankbased""" +584 55 dataset """kinships""" +584 55 model """rotate""" +584 55 loss """softplus""" +584 55 regularizer """no""" +584 55 optimizer """adadelta""" +584 55 training_loop """lcwa""" +584 55 evaluator """rankbased""" +584 56 dataset """kinships""" +584 56 model """rotate""" +584 56 loss """softplus""" +584 56 regularizer """no""" +584 56 optimizer """adadelta""" +584 56 training_loop """lcwa""" +584 56 evaluator """rankbased""" +584 57 dataset """kinships""" +584 57 model """rotate""" +584 57 loss """softplus""" +584 57 regularizer """no""" +584 57 optimizer """adadelta""" +584 57 training_loop """lcwa""" +584 57 evaluator """rankbased""" +584 58 dataset """kinships""" +584 58 model """rotate""" +584 58 loss """softplus""" +584 58 regularizer """no""" +584 58 optimizer """adadelta""" +584 58 training_loop """lcwa""" +584 58 evaluator """rankbased""" +584 59 dataset """kinships""" +584 59 model """rotate""" +584 59 loss """softplus""" +584 59 regularizer """no""" +584 59 optimizer """adadelta""" +584 59 training_loop """lcwa""" +584 59 evaluator """rankbased""" +584 60 dataset """kinships""" +584 60 model """rotate""" +584 60 loss """softplus""" +584 60 regularizer """no""" +584 60 optimizer """adadelta""" +584 60 training_loop """lcwa""" +584 60 evaluator """rankbased""" +584 61 dataset """kinships""" +584 61 model """rotate""" +584 61 loss """softplus""" +584 61 regularizer """no""" +584 61 optimizer """adadelta""" +584 61 training_loop """lcwa""" +584 61 evaluator """rankbased""" +584 62 dataset """kinships""" +584 62 model """rotate""" +584 62 loss """softplus""" +584 62 regularizer """no""" +584 62 optimizer """adadelta""" +584 62 training_loop """lcwa""" +584 62 evaluator """rankbased""" +584 63 dataset """kinships""" +584 63 model """rotate""" +584 63 loss """softplus""" +584 63 regularizer """no""" +584 63 optimizer """adadelta""" +584 63 training_loop """lcwa""" +584 63 evaluator """rankbased""" +584 64 dataset """kinships""" +584 64 model """rotate""" +584 64 loss """softplus""" +584 64 regularizer """no""" +584 64 optimizer """adadelta""" +584 64 training_loop """lcwa""" +584 64 evaluator """rankbased""" +584 65 dataset """kinships""" +584 65 model """rotate""" +584 65 loss """softplus""" +584 65 regularizer """no""" +584 65 optimizer """adadelta""" +584 65 training_loop """lcwa""" +584 65 evaluator """rankbased""" +584 66 dataset """kinships""" +584 66 model """rotate""" +584 66 loss """softplus""" +584 66 regularizer """no""" +584 66 optimizer """adadelta""" +584 66 training_loop """lcwa""" +584 66 evaluator """rankbased""" +584 67 dataset """kinships""" +584 67 model """rotate""" +584 67 loss """softplus""" +584 67 regularizer """no""" +584 67 optimizer """adadelta""" +584 67 training_loop """lcwa""" +584 67 evaluator """rankbased""" +584 68 dataset """kinships""" +584 68 model """rotate""" +584 68 loss """softplus""" +584 68 regularizer """no""" +584 68 optimizer """adadelta""" +584 68 training_loop """lcwa""" +584 68 evaluator """rankbased""" +584 69 dataset """kinships""" +584 69 model """rotate""" +584 69 loss """softplus""" +584 69 regularizer """no""" +584 69 optimizer """adadelta""" +584 69 training_loop """lcwa""" +584 69 evaluator """rankbased""" +584 70 dataset """kinships""" +584 70 model """rotate""" +584 70 loss """softplus""" +584 70 regularizer """no""" +584 70 optimizer """adadelta""" +584 70 training_loop """lcwa""" +584 70 evaluator """rankbased""" +584 71 dataset """kinships""" +584 71 model """rotate""" +584 71 loss """softplus""" +584 71 regularizer """no""" +584 71 optimizer """adadelta""" +584 71 training_loop """lcwa""" +584 71 evaluator """rankbased""" +584 72 dataset """kinships""" +584 72 model """rotate""" +584 72 loss """softplus""" +584 72 regularizer """no""" +584 72 optimizer """adadelta""" +584 72 training_loop """lcwa""" +584 72 evaluator """rankbased""" +584 73 dataset """kinships""" +584 73 model """rotate""" +584 73 loss """softplus""" +584 73 regularizer """no""" +584 73 optimizer """adadelta""" +584 73 training_loop """lcwa""" +584 73 evaluator """rankbased""" +584 74 dataset """kinships""" +584 74 model """rotate""" +584 74 loss """softplus""" +584 74 regularizer """no""" +584 74 optimizer """adadelta""" +584 74 training_loop """lcwa""" +584 74 evaluator """rankbased""" +584 75 dataset """kinships""" +584 75 model """rotate""" +584 75 loss """softplus""" +584 75 regularizer """no""" +584 75 optimizer """adadelta""" +584 75 training_loop """lcwa""" +584 75 evaluator """rankbased""" +584 76 dataset """kinships""" +584 76 model """rotate""" +584 76 loss """softplus""" +584 76 regularizer """no""" +584 76 optimizer """adadelta""" +584 76 training_loop """lcwa""" +584 76 evaluator """rankbased""" +584 77 dataset """kinships""" +584 77 model """rotate""" +584 77 loss """softplus""" +584 77 regularizer """no""" +584 77 optimizer """adadelta""" +584 77 training_loop """lcwa""" +584 77 evaluator """rankbased""" +584 78 dataset """kinships""" +584 78 model """rotate""" +584 78 loss """softplus""" +584 78 regularizer """no""" +584 78 optimizer """adadelta""" +584 78 training_loop """lcwa""" +584 78 evaluator """rankbased""" +584 79 dataset """kinships""" +584 79 model """rotate""" +584 79 loss """softplus""" +584 79 regularizer """no""" +584 79 optimizer """adadelta""" +584 79 training_loop """lcwa""" +584 79 evaluator """rankbased""" +584 80 dataset """kinships""" +584 80 model """rotate""" +584 80 loss """softplus""" +584 80 regularizer """no""" +584 80 optimizer """adadelta""" +584 80 training_loop """lcwa""" +584 80 evaluator """rankbased""" +584 81 dataset """kinships""" +584 81 model """rotate""" +584 81 loss """softplus""" +584 81 regularizer """no""" +584 81 optimizer """adadelta""" +584 81 training_loop """lcwa""" +584 81 evaluator """rankbased""" +584 82 dataset """kinships""" +584 82 model """rotate""" +584 82 loss """softplus""" +584 82 regularizer """no""" +584 82 optimizer """adadelta""" +584 82 training_loop """lcwa""" +584 82 evaluator """rankbased""" +584 83 dataset """kinships""" +584 83 model """rotate""" +584 83 loss """softplus""" +584 83 regularizer """no""" +584 83 optimizer """adadelta""" +584 83 training_loop """lcwa""" +584 83 evaluator """rankbased""" +584 84 dataset """kinships""" +584 84 model """rotate""" +584 84 loss """softplus""" +584 84 regularizer """no""" +584 84 optimizer """adadelta""" +584 84 training_loop """lcwa""" +584 84 evaluator """rankbased""" +584 85 dataset """kinships""" +584 85 model """rotate""" +584 85 loss """softplus""" +584 85 regularizer """no""" +584 85 optimizer """adadelta""" +584 85 training_loop """lcwa""" +584 85 evaluator """rankbased""" +584 86 dataset """kinships""" +584 86 model """rotate""" +584 86 loss """softplus""" +584 86 regularizer """no""" +584 86 optimizer """adadelta""" +584 86 training_loop """lcwa""" +584 86 evaluator """rankbased""" +584 87 dataset """kinships""" +584 87 model """rotate""" +584 87 loss """softplus""" +584 87 regularizer """no""" +584 87 optimizer """adadelta""" +584 87 training_loop """lcwa""" +584 87 evaluator """rankbased""" +584 88 dataset """kinships""" +584 88 model """rotate""" +584 88 loss """softplus""" +584 88 regularizer """no""" +584 88 optimizer """adadelta""" +584 88 training_loop """lcwa""" +584 88 evaluator """rankbased""" +584 89 dataset """kinships""" +584 89 model """rotate""" +584 89 loss """softplus""" +584 89 regularizer """no""" +584 89 optimizer """adadelta""" +584 89 training_loop """lcwa""" +584 89 evaluator """rankbased""" +584 90 dataset """kinships""" +584 90 model """rotate""" +584 90 loss """softplus""" +584 90 regularizer """no""" +584 90 optimizer """adadelta""" +584 90 training_loop """lcwa""" +584 90 evaluator """rankbased""" +584 91 dataset """kinships""" +584 91 model """rotate""" +584 91 loss """softplus""" +584 91 regularizer """no""" +584 91 optimizer """adadelta""" +584 91 training_loop """lcwa""" +584 91 evaluator """rankbased""" +584 92 dataset """kinships""" +584 92 model """rotate""" +584 92 loss """softplus""" +584 92 regularizer """no""" +584 92 optimizer """adadelta""" +584 92 training_loop """lcwa""" +584 92 evaluator """rankbased""" +584 93 dataset """kinships""" +584 93 model """rotate""" +584 93 loss """softplus""" +584 93 regularizer """no""" +584 93 optimizer """adadelta""" +584 93 training_loop """lcwa""" +584 93 evaluator """rankbased""" +584 94 dataset """kinships""" +584 94 model """rotate""" +584 94 loss """softplus""" +584 94 regularizer """no""" +584 94 optimizer """adadelta""" +584 94 training_loop """lcwa""" +584 94 evaluator """rankbased""" +584 95 dataset """kinships""" +584 95 model """rotate""" +584 95 loss """softplus""" +584 95 regularizer """no""" +584 95 optimizer """adadelta""" +584 95 training_loop """lcwa""" +584 95 evaluator """rankbased""" +584 96 dataset """kinships""" +584 96 model """rotate""" +584 96 loss """softplus""" +584 96 regularizer """no""" +584 96 optimizer """adadelta""" +584 96 training_loop """lcwa""" +584 96 evaluator """rankbased""" +584 97 dataset """kinships""" +584 97 model """rotate""" +584 97 loss """softplus""" +584 97 regularizer """no""" +584 97 optimizer """adadelta""" +584 97 training_loop """lcwa""" +584 97 evaluator """rankbased""" +584 98 dataset """kinships""" +584 98 model """rotate""" +584 98 loss """softplus""" +584 98 regularizer """no""" +584 98 optimizer """adadelta""" +584 98 training_loop """lcwa""" +584 98 evaluator """rankbased""" +584 99 dataset """kinships""" +584 99 model """rotate""" +584 99 loss """softplus""" +584 99 regularizer """no""" +584 99 optimizer """adadelta""" +584 99 training_loop """lcwa""" +584 99 evaluator """rankbased""" +584 100 dataset """kinships""" +584 100 model """rotate""" +584 100 loss """softplus""" +584 100 regularizer """no""" +584 100 optimizer """adadelta""" +584 100 training_loop """lcwa""" +584 100 evaluator """rankbased""" +585 1 model.embedding_dim 0.0 +585 1 training.batch_size 2.0 +585 1 training.label_smoothing 0.0243249056987387 +585 2 model.embedding_dim 2.0 +585 2 training.batch_size 0.0 +585 2 training.label_smoothing 0.002342086886380235 +585 3 model.embedding_dim 1.0 +585 3 training.batch_size 2.0 +585 3 training.label_smoothing 0.0012087677358698105 +585 4 model.embedding_dim 2.0 +585 4 training.batch_size 0.0 +585 4 training.label_smoothing 0.09868744887839574 +585 5 model.embedding_dim 2.0 +585 5 training.batch_size 1.0 +585 5 training.label_smoothing 0.0026456985751078677 +585 6 model.embedding_dim 1.0 +585 6 training.batch_size 0.0 +585 6 training.label_smoothing 0.010867301098756383 +585 7 model.embedding_dim 0.0 +585 7 training.batch_size 1.0 +585 7 training.label_smoothing 0.006177676123313228 +585 8 model.embedding_dim 1.0 +585 8 training.batch_size 0.0 +585 8 training.label_smoothing 0.023720791103528954 +585 9 model.embedding_dim 0.0 +585 9 training.batch_size 1.0 +585 9 training.label_smoothing 0.07760629693523775 +585 10 model.embedding_dim 0.0 +585 10 training.batch_size 1.0 +585 10 training.label_smoothing 0.0622532259780237 +585 11 model.embedding_dim 1.0 +585 11 training.batch_size 1.0 +585 11 training.label_smoothing 0.0037549025286178345 +585 12 model.embedding_dim 1.0 +585 12 training.batch_size 1.0 +585 12 training.label_smoothing 0.001865753629177396 +585 13 model.embedding_dim 2.0 +585 13 training.batch_size 2.0 +585 13 training.label_smoothing 0.005436717895531957 +585 14 model.embedding_dim 0.0 +585 14 training.batch_size 0.0 +585 14 training.label_smoothing 0.18373867541040254 +585 15 model.embedding_dim 2.0 +585 15 training.batch_size 0.0 +585 15 training.label_smoothing 0.0070016728118834455 +585 16 model.embedding_dim 0.0 +585 16 training.batch_size 0.0 +585 16 training.label_smoothing 0.461334321696857 +585 17 model.embedding_dim 1.0 +585 17 training.batch_size 2.0 +585 17 training.label_smoothing 0.006831912850989434 +585 18 model.embedding_dim 0.0 +585 18 training.batch_size 0.0 +585 18 training.label_smoothing 0.11009546601292822 +585 19 model.embedding_dim 1.0 +585 19 training.batch_size 1.0 +585 19 training.label_smoothing 0.001718722669718414 +585 20 model.embedding_dim 2.0 +585 20 training.batch_size 2.0 +585 20 training.label_smoothing 0.012927442848132792 +585 21 model.embedding_dim 1.0 +585 21 training.batch_size 0.0 +585 21 training.label_smoothing 0.00533687955849494 +585 22 model.embedding_dim 2.0 +585 22 training.batch_size 0.0 +585 22 training.label_smoothing 0.00470649696083984 +585 23 model.embedding_dim 2.0 +585 23 training.batch_size 2.0 +585 23 training.label_smoothing 0.019270974885072588 +585 24 model.embedding_dim 0.0 +585 24 training.batch_size 0.0 +585 24 training.label_smoothing 0.002092218547675723 +585 25 model.embedding_dim 2.0 +585 25 training.batch_size 2.0 +585 25 training.label_smoothing 0.05195972589512354 +585 26 model.embedding_dim 2.0 +585 26 training.batch_size 2.0 +585 26 training.label_smoothing 0.012656769114367868 +585 27 model.embedding_dim 0.0 +585 27 training.batch_size 1.0 +585 27 training.label_smoothing 0.9113488129091809 +585 28 model.embedding_dim 0.0 +585 28 training.batch_size 1.0 +585 28 training.label_smoothing 0.0016108803645904426 +585 29 model.embedding_dim 2.0 +585 29 training.batch_size 2.0 +585 29 training.label_smoothing 0.11386811716754944 +585 30 model.embedding_dim 1.0 +585 30 training.batch_size 2.0 +585 30 training.label_smoothing 0.0012841531275149263 +585 31 model.embedding_dim 0.0 +585 31 training.batch_size 2.0 +585 31 training.label_smoothing 0.006221880367524064 +585 32 model.embedding_dim 0.0 +585 32 training.batch_size 2.0 +585 32 training.label_smoothing 0.1010682017847684 +585 33 model.embedding_dim 1.0 +585 33 training.batch_size 1.0 +585 33 training.label_smoothing 0.36421180392814645 +585 34 model.embedding_dim 2.0 +585 34 training.batch_size 2.0 +585 34 training.label_smoothing 0.008826484698938528 +585 35 model.embedding_dim 2.0 +585 35 training.batch_size 1.0 +585 35 training.label_smoothing 0.02610400958805124 +585 36 model.embedding_dim 0.0 +585 36 training.batch_size 2.0 +585 36 training.label_smoothing 0.3333781021570115 +585 37 model.embedding_dim 1.0 +585 37 training.batch_size 1.0 +585 37 training.label_smoothing 0.16860633546728157 +585 38 model.embedding_dim 0.0 +585 38 training.batch_size 0.0 +585 38 training.label_smoothing 0.08060793969392809 +585 39 model.embedding_dim 2.0 +585 39 training.batch_size 2.0 +585 39 training.label_smoothing 0.04426437815493866 +585 40 model.embedding_dim 2.0 +585 40 training.batch_size 0.0 +585 40 training.label_smoothing 0.5036565198580496 +585 41 model.embedding_dim 0.0 +585 41 training.batch_size 2.0 +585 41 training.label_smoothing 0.07408580322123488 +585 42 model.embedding_dim 0.0 +585 42 training.batch_size 2.0 +585 42 training.label_smoothing 0.2865513894188899 +585 43 model.embedding_dim 1.0 +585 43 training.batch_size 2.0 +585 43 training.label_smoothing 0.5056501526109612 +585 44 model.embedding_dim 2.0 +585 44 training.batch_size 2.0 +585 44 training.label_smoothing 0.2173824505537865 +585 45 model.embedding_dim 2.0 +585 45 training.batch_size 0.0 +585 45 training.label_smoothing 0.00828380551698712 +585 46 model.embedding_dim 2.0 +585 46 training.batch_size 1.0 +585 46 training.label_smoothing 0.17259239096350212 +585 47 model.embedding_dim 0.0 +585 47 training.batch_size 2.0 +585 47 training.label_smoothing 0.0019362646180500255 +585 48 model.embedding_dim 1.0 +585 48 training.batch_size 0.0 +585 48 training.label_smoothing 0.004997054275902028 +585 49 model.embedding_dim 0.0 +585 49 training.batch_size 2.0 +585 49 training.label_smoothing 0.4518377297494014 +585 50 model.embedding_dim 1.0 +585 50 training.batch_size 0.0 +585 50 training.label_smoothing 0.010208631670260888 +585 51 model.embedding_dim 1.0 +585 51 training.batch_size 2.0 +585 51 training.label_smoothing 0.22981544903285908 +585 52 model.embedding_dim 0.0 +585 52 training.batch_size 0.0 +585 52 training.label_smoothing 0.17153392533214434 +585 53 model.embedding_dim 0.0 +585 53 training.batch_size 2.0 +585 53 training.label_smoothing 0.09055515227097377 +585 54 model.embedding_dim 1.0 +585 54 training.batch_size 1.0 +585 54 training.label_smoothing 0.6997652328699601 +585 55 model.embedding_dim 1.0 +585 55 training.batch_size 2.0 +585 55 training.label_smoothing 0.5418546585111298 +585 56 model.embedding_dim 2.0 +585 56 training.batch_size 2.0 +585 56 training.label_smoothing 0.0284466925586326 +585 57 model.embedding_dim 1.0 +585 57 training.batch_size 0.0 +585 57 training.label_smoothing 0.006852053890194384 +585 58 model.embedding_dim 0.0 +585 58 training.batch_size 2.0 +585 58 training.label_smoothing 0.05226064943446814 +585 59 model.embedding_dim 1.0 +585 59 training.batch_size 0.0 +585 59 training.label_smoothing 0.002465093524840863 +585 60 model.embedding_dim 2.0 +585 60 training.batch_size 0.0 +585 60 training.label_smoothing 0.2254712141579525 +585 61 model.embedding_dim 0.0 +585 61 training.batch_size 0.0 +585 61 training.label_smoothing 0.016317980739968457 +585 62 model.embedding_dim 0.0 +585 62 training.batch_size 0.0 +585 62 training.label_smoothing 0.004528853829240604 +585 63 model.embedding_dim 1.0 +585 63 training.batch_size 0.0 +585 63 training.label_smoothing 0.00807214911413732 +585 64 model.embedding_dim 1.0 +585 64 training.batch_size 0.0 +585 64 training.label_smoothing 0.022457205005342314 +585 65 model.embedding_dim 0.0 +585 65 training.batch_size 1.0 +585 65 training.label_smoothing 0.019341488348021796 +585 66 model.embedding_dim 2.0 +585 66 training.batch_size 2.0 +585 66 training.label_smoothing 0.010465559852594394 +585 67 model.embedding_dim 0.0 +585 67 training.batch_size 2.0 +585 67 training.label_smoothing 0.005139316259572781 +585 68 model.embedding_dim 0.0 +585 68 training.batch_size 2.0 +585 68 training.label_smoothing 0.0036778126457956096 +585 69 model.embedding_dim 2.0 +585 69 training.batch_size 0.0 +585 69 training.label_smoothing 0.17925014137209383 +585 70 model.embedding_dim 2.0 +585 70 training.batch_size 1.0 +585 70 training.label_smoothing 0.0098751355537188 +585 71 model.embedding_dim 0.0 +585 71 training.batch_size 1.0 +585 71 training.label_smoothing 0.022678240022842924 +585 72 model.embedding_dim 1.0 +585 72 training.batch_size 1.0 +585 72 training.label_smoothing 0.17844664563791784 +585 73 model.embedding_dim 1.0 +585 73 training.batch_size 1.0 +585 73 training.label_smoothing 0.010585226068422845 +585 74 model.embedding_dim 2.0 +585 74 training.batch_size 2.0 +585 74 training.label_smoothing 0.07359657796303987 +585 75 model.embedding_dim 0.0 +585 75 training.batch_size 2.0 +585 75 training.label_smoothing 0.022679653436482806 +585 76 model.embedding_dim 1.0 +585 76 training.batch_size 1.0 +585 76 training.label_smoothing 0.014688838626495433 +585 77 model.embedding_dim 0.0 +585 77 training.batch_size 0.0 +585 77 training.label_smoothing 0.06847248260792525 +585 78 model.embedding_dim 0.0 +585 78 training.batch_size 0.0 +585 78 training.label_smoothing 0.07220508946827511 +585 79 model.embedding_dim 1.0 +585 79 training.batch_size 2.0 +585 79 training.label_smoothing 0.002668594265957153 +585 80 model.embedding_dim 0.0 +585 80 training.batch_size 0.0 +585 80 training.label_smoothing 0.014521469461042356 +585 81 model.embedding_dim 0.0 +585 81 training.batch_size 2.0 +585 81 training.label_smoothing 0.9736218988370331 +585 82 model.embedding_dim 0.0 +585 82 training.batch_size 0.0 +585 82 training.label_smoothing 0.00793631289571575 +585 83 model.embedding_dim 2.0 +585 83 training.batch_size 1.0 +585 83 training.label_smoothing 0.006226688079530374 +585 84 model.embedding_dim 0.0 +585 84 training.batch_size 0.0 +585 84 training.label_smoothing 0.10056819852975146 +585 85 model.embedding_dim 2.0 +585 85 training.batch_size 2.0 +585 85 training.label_smoothing 0.0010300241989059315 +585 86 model.embedding_dim 0.0 +585 86 training.batch_size 1.0 +585 86 training.label_smoothing 0.03283676537797184 +585 87 model.embedding_dim 1.0 +585 87 training.batch_size 0.0 +585 87 training.label_smoothing 0.002567853016379631 +585 88 model.embedding_dim 1.0 +585 88 training.batch_size 1.0 +585 88 training.label_smoothing 0.0035623061712728734 +585 89 model.embedding_dim 0.0 +585 89 training.batch_size 2.0 +585 89 training.label_smoothing 0.5299614329102942 +585 90 model.embedding_dim 2.0 +585 90 training.batch_size 1.0 +585 90 training.label_smoothing 0.7512355463151676 +585 91 model.embedding_dim 1.0 +585 91 training.batch_size 2.0 +585 91 training.label_smoothing 0.012833501776147786 +585 92 model.embedding_dim 0.0 +585 92 training.batch_size 2.0 +585 92 training.label_smoothing 0.10712045412830536 +585 93 model.embedding_dim 2.0 +585 93 training.batch_size 0.0 +585 93 training.label_smoothing 0.027735086523757176 +585 94 model.embedding_dim 2.0 +585 94 training.batch_size 0.0 +585 94 training.label_smoothing 0.034926289537139284 +585 95 model.embedding_dim 2.0 +585 95 training.batch_size 1.0 +585 95 training.label_smoothing 0.007387745337098994 +585 96 model.embedding_dim 0.0 +585 96 training.batch_size 1.0 +585 96 training.label_smoothing 0.0019467347421734287 +585 97 model.embedding_dim 2.0 +585 97 training.batch_size 2.0 +585 97 training.label_smoothing 0.009546167770613501 +585 98 model.embedding_dim 0.0 +585 98 training.batch_size 1.0 +585 98 training.label_smoothing 0.0038176708422732043 +585 99 model.embedding_dim 0.0 +585 99 training.batch_size 0.0 +585 99 training.label_smoothing 0.009222913324371028 +585 100 model.embedding_dim 1.0 +585 100 training.batch_size 0.0 +585 100 training.label_smoothing 0.007582244000450148 +585 1 dataset """kinships""" +585 1 model """rotate""" +585 1 loss """bceaftersigmoid""" +585 1 regularizer """no""" +585 1 optimizer """adadelta""" +585 1 training_loop """lcwa""" +585 1 evaluator """rankbased""" +585 2 dataset """kinships""" +585 2 model """rotate""" +585 2 loss """bceaftersigmoid""" +585 2 regularizer """no""" +585 2 optimizer """adadelta""" +585 2 training_loop """lcwa""" +585 2 evaluator """rankbased""" +585 3 dataset """kinships""" +585 3 model """rotate""" +585 3 loss """bceaftersigmoid""" +585 3 regularizer """no""" +585 3 optimizer """adadelta""" +585 3 training_loop """lcwa""" +585 3 evaluator """rankbased""" +585 4 dataset """kinships""" +585 4 model """rotate""" +585 4 loss """bceaftersigmoid""" +585 4 regularizer """no""" +585 4 optimizer """adadelta""" +585 4 training_loop """lcwa""" +585 4 evaluator """rankbased""" +585 5 dataset """kinships""" +585 5 model """rotate""" +585 5 loss """bceaftersigmoid""" +585 5 regularizer """no""" +585 5 optimizer """adadelta""" +585 5 training_loop """lcwa""" +585 5 evaluator """rankbased""" +585 6 dataset """kinships""" +585 6 model """rotate""" +585 6 loss """bceaftersigmoid""" +585 6 regularizer """no""" +585 6 optimizer """adadelta""" +585 6 training_loop """lcwa""" +585 6 evaluator """rankbased""" +585 7 dataset """kinships""" +585 7 model """rotate""" +585 7 loss """bceaftersigmoid""" +585 7 regularizer """no""" +585 7 optimizer """adadelta""" +585 7 training_loop """lcwa""" +585 7 evaluator """rankbased""" +585 8 dataset """kinships""" +585 8 model """rotate""" +585 8 loss """bceaftersigmoid""" +585 8 regularizer """no""" +585 8 optimizer """adadelta""" +585 8 training_loop """lcwa""" +585 8 evaluator """rankbased""" +585 9 dataset """kinships""" +585 9 model """rotate""" +585 9 loss """bceaftersigmoid""" +585 9 regularizer """no""" +585 9 optimizer """adadelta""" +585 9 training_loop """lcwa""" +585 9 evaluator """rankbased""" +585 10 dataset """kinships""" +585 10 model """rotate""" +585 10 loss """bceaftersigmoid""" +585 10 regularizer """no""" +585 10 optimizer """adadelta""" +585 10 training_loop """lcwa""" +585 10 evaluator """rankbased""" +585 11 dataset """kinships""" +585 11 model """rotate""" +585 11 loss """bceaftersigmoid""" +585 11 regularizer """no""" +585 11 optimizer """adadelta""" +585 11 training_loop """lcwa""" +585 11 evaluator """rankbased""" +585 12 dataset """kinships""" +585 12 model """rotate""" +585 12 loss """bceaftersigmoid""" +585 12 regularizer """no""" +585 12 optimizer """adadelta""" +585 12 training_loop """lcwa""" +585 12 evaluator """rankbased""" +585 13 dataset """kinships""" +585 13 model """rotate""" +585 13 loss """bceaftersigmoid""" +585 13 regularizer """no""" +585 13 optimizer """adadelta""" +585 13 training_loop """lcwa""" +585 13 evaluator """rankbased""" +585 14 dataset """kinships""" +585 14 model """rotate""" +585 14 loss """bceaftersigmoid""" +585 14 regularizer """no""" +585 14 optimizer """adadelta""" +585 14 training_loop """lcwa""" +585 14 evaluator """rankbased""" +585 15 dataset """kinships""" +585 15 model """rotate""" +585 15 loss """bceaftersigmoid""" +585 15 regularizer """no""" +585 15 optimizer """adadelta""" +585 15 training_loop """lcwa""" +585 15 evaluator """rankbased""" +585 16 dataset """kinships""" +585 16 model """rotate""" +585 16 loss """bceaftersigmoid""" +585 16 regularizer """no""" +585 16 optimizer """adadelta""" +585 16 training_loop """lcwa""" +585 16 evaluator """rankbased""" +585 17 dataset """kinships""" +585 17 model """rotate""" +585 17 loss """bceaftersigmoid""" +585 17 regularizer """no""" +585 17 optimizer """adadelta""" +585 17 training_loop """lcwa""" +585 17 evaluator """rankbased""" +585 18 dataset """kinships""" +585 18 model """rotate""" +585 18 loss """bceaftersigmoid""" +585 18 regularizer """no""" +585 18 optimizer """adadelta""" +585 18 training_loop """lcwa""" +585 18 evaluator """rankbased""" +585 19 dataset """kinships""" +585 19 model """rotate""" +585 19 loss """bceaftersigmoid""" +585 19 regularizer """no""" +585 19 optimizer """adadelta""" +585 19 training_loop """lcwa""" +585 19 evaluator """rankbased""" +585 20 dataset """kinships""" +585 20 model """rotate""" +585 20 loss """bceaftersigmoid""" +585 20 regularizer """no""" +585 20 optimizer """adadelta""" +585 20 training_loop """lcwa""" +585 20 evaluator """rankbased""" +585 21 dataset """kinships""" +585 21 model """rotate""" +585 21 loss """bceaftersigmoid""" +585 21 regularizer """no""" +585 21 optimizer """adadelta""" +585 21 training_loop """lcwa""" +585 21 evaluator """rankbased""" +585 22 dataset """kinships""" +585 22 model """rotate""" +585 22 loss """bceaftersigmoid""" +585 22 regularizer """no""" +585 22 optimizer """adadelta""" +585 22 training_loop """lcwa""" +585 22 evaluator """rankbased""" +585 23 dataset """kinships""" +585 23 model """rotate""" +585 23 loss """bceaftersigmoid""" +585 23 regularizer """no""" +585 23 optimizer """adadelta""" +585 23 training_loop """lcwa""" +585 23 evaluator """rankbased""" +585 24 dataset """kinships""" +585 24 model """rotate""" +585 24 loss """bceaftersigmoid""" +585 24 regularizer """no""" +585 24 optimizer """adadelta""" +585 24 training_loop """lcwa""" +585 24 evaluator """rankbased""" +585 25 dataset """kinships""" +585 25 model """rotate""" +585 25 loss """bceaftersigmoid""" +585 25 regularizer """no""" +585 25 optimizer """adadelta""" +585 25 training_loop """lcwa""" +585 25 evaluator """rankbased""" +585 26 dataset """kinships""" +585 26 model """rotate""" +585 26 loss """bceaftersigmoid""" +585 26 regularizer """no""" +585 26 optimizer """adadelta""" +585 26 training_loop """lcwa""" +585 26 evaluator """rankbased""" +585 27 dataset """kinships""" +585 27 model """rotate""" +585 27 loss """bceaftersigmoid""" +585 27 regularizer """no""" +585 27 optimizer """adadelta""" +585 27 training_loop """lcwa""" +585 27 evaluator """rankbased""" +585 28 dataset """kinships""" +585 28 model """rotate""" +585 28 loss """bceaftersigmoid""" +585 28 regularizer """no""" +585 28 optimizer """adadelta""" +585 28 training_loop """lcwa""" +585 28 evaluator """rankbased""" +585 29 dataset """kinships""" +585 29 model """rotate""" +585 29 loss """bceaftersigmoid""" +585 29 regularizer """no""" +585 29 optimizer """adadelta""" +585 29 training_loop """lcwa""" +585 29 evaluator """rankbased""" +585 30 dataset """kinships""" +585 30 model """rotate""" +585 30 loss """bceaftersigmoid""" +585 30 regularizer """no""" +585 30 optimizer """adadelta""" +585 30 training_loop """lcwa""" +585 30 evaluator """rankbased""" +585 31 dataset """kinships""" +585 31 model """rotate""" +585 31 loss """bceaftersigmoid""" +585 31 regularizer """no""" +585 31 optimizer """adadelta""" +585 31 training_loop """lcwa""" +585 31 evaluator """rankbased""" +585 32 dataset """kinships""" +585 32 model """rotate""" +585 32 loss """bceaftersigmoid""" +585 32 regularizer """no""" +585 32 optimizer """adadelta""" +585 32 training_loop """lcwa""" +585 32 evaluator """rankbased""" +585 33 dataset """kinships""" +585 33 model """rotate""" +585 33 loss """bceaftersigmoid""" +585 33 regularizer """no""" +585 33 optimizer """adadelta""" +585 33 training_loop """lcwa""" +585 33 evaluator """rankbased""" +585 34 dataset """kinships""" +585 34 model """rotate""" +585 34 loss """bceaftersigmoid""" +585 34 regularizer """no""" +585 34 optimizer """adadelta""" +585 34 training_loop """lcwa""" +585 34 evaluator """rankbased""" +585 35 dataset """kinships""" +585 35 model """rotate""" +585 35 loss """bceaftersigmoid""" +585 35 regularizer """no""" +585 35 optimizer """adadelta""" +585 35 training_loop """lcwa""" +585 35 evaluator """rankbased""" +585 36 dataset """kinships""" +585 36 model """rotate""" +585 36 loss """bceaftersigmoid""" +585 36 regularizer """no""" +585 36 optimizer """adadelta""" +585 36 training_loop """lcwa""" +585 36 evaluator """rankbased""" +585 37 dataset """kinships""" +585 37 model """rotate""" +585 37 loss """bceaftersigmoid""" +585 37 regularizer """no""" +585 37 optimizer """adadelta""" +585 37 training_loop """lcwa""" +585 37 evaluator """rankbased""" +585 38 dataset """kinships""" +585 38 model """rotate""" +585 38 loss """bceaftersigmoid""" +585 38 regularizer """no""" +585 38 optimizer """adadelta""" +585 38 training_loop """lcwa""" +585 38 evaluator """rankbased""" +585 39 dataset """kinships""" +585 39 model """rotate""" +585 39 loss """bceaftersigmoid""" +585 39 regularizer """no""" +585 39 optimizer """adadelta""" +585 39 training_loop """lcwa""" +585 39 evaluator """rankbased""" +585 40 dataset """kinships""" +585 40 model """rotate""" +585 40 loss """bceaftersigmoid""" +585 40 regularizer """no""" +585 40 optimizer """adadelta""" +585 40 training_loop """lcwa""" +585 40 evaluator """rankbased""" +585 41 dataset """kinships""" +585 41 model """rotate""" +585 41 loss """bceaftersigmoid""" +585 41 regularizer """no""" +585 41 optimizer """adadelta""" +585 41 training_loop """lcwa""" +585 41 evaluator """rankbased""" +585 42 dataset """kinships""" +585 42 model """rotate""" +585 42 loss """bceaftersigmoid""" +585 42 regularizer """no""" +585 42 optimizer """adadelta""" +585 42 training_loop """lcwa""" +585 42 evaluator """rankbased""" +585 43 dataset """kinships""" +585 43 model """rotate""" +585 43 loss """bceaftersigmoid""" +585 43 regularizer """no""" +585 43 optimizer """adadelta""" +585 43 training_loop """lcwa""" +585 43 evaluator """rankbased""" +585 44 dataset """kinships""" +585 44 model """rotate""" +585 44 loss """bceaftersigmoid""" +585 44 regularizer """no""" +585 44 optimizer """adadelta""" +585 44 training_loop """lcwa""" +585 44 evaluator """rankbased""" +585 45 dataset """kinships""" +585 45 model """rotate""" +585 45 loss """bceaftersigmoid""" +585 45 regularizer """no""" +585 45 optimizer """adadelta""" +585 45 training_loop """lcwa""" +585 45 evaluator """rankbased""" +585 46 dataset """kinships""" +585 46 model """rotate""" +585 46 loss """bceaftersigmoid""" +585 46 regularizer """no""" +585 46 optimizer """adadelta""" +585 46 training_loop """lcwa""" +585 46 evaluator """rankbased""" +585 47 dataset """kinships""" +585 47 model """rotate""" +585 47 loss """bceaftersigmoid""" +585 47 regularizer """no""" +585 47 optimizer """adadelta""" +585 47 training_loop """lcwa""" +585 47 evaluator """rankbased""" +585 48 dataset """kinships""" +585 48 model """rotate""" +585 48 loss """bceaftersigmoid""" +585 48 regularizer """no""" +585 48 optimizer """adadelta""" +585 48 training_loop """lcwa""" +585 48 evaluator """rankbased""" +585 49 dataset """kinships""" +585 49 model """rotate""" +585 49 loss """bceaftersigmoid""" +585 49 regularizer """no""" +585 49 optimizer """adadelta""" +585 49 training_loop """lcwa""" +585 49 evaluator """rankbased""" +585 50 dataset """kinships""" +585 50 model """rotate""" +585 50 loss """bceaftersigmoid""" +585 50 regularizer """no""" +585 50 optimizer """adadelta""" +585 50 training_loop """lcwa""" +585 50 evaluator """rankbased""" +585 51 dataset """kinships""" +585 51 model """rotate""" +585 51 loss """bceaftersigmoid""" +585 51 regularizer """no""" +585 51 optimizer """adadelta""" +585 51 training_loop """lcwa""" +585 51 evaluator """rankbased""" +585 52 dataset """kinships""" +585 52 model """rotate""" +585 52 loss """bceaftersigmoid""" +585 52 regularizer """no""" +585 52 optimizer """adadelta""" +585 52 training_loop """lcwa""" +585 52 evaluator """rankbased""" +585 53 dataset """kinships""" +585 53 model """rotate""" +585 53 loss """bceaftersigmoid""" +585 53 regularizer """no""" +585 53 optimizer """adadelta""" +585 53 training_loop """lcwa""" +585 53 evaluator """rankbased""" +585 54 dataset """kinships""" +585 54 model """rotate""" +585 54 loss """bceaftersigmoid""" +585 54 regularizer """no""" +585 54 optimizer """adadelta""" +585 54 training_loop """lcwa""" +585 54 evaluator """rankbased""" +585 55 dataset """kinships""" +585 55 model """rotate""" +585 55 loss """bceaftersigmoid""" +585 55 regularizer """no""" +585 55 optimizer """adadelta""" +585 55 training_loop """lcwa""" +585 55 evaluator """rankbased""" +585 56 dataset """kinships""" +585 56 model """rotate""" +585 56 loss """bceaftersigmoid""" +585 56 regularizer """no""" +585 56 optimizer """adadelta""" +585 56 training_loop """lcwa""" +585 56 evaluator """rankbased""" +585 57 dataset """kinships""" +585 57 model """rotate""" +585 57 loss """bceaftersigmoid""" +585 57 regularizer """no""" +585 57 optimizer """adadelta""" +585 57 training_loop """lcwa""" +585 57 evaluator """rankbased""" +585 58 dataset """kinships""" +585 58 model """rotate""" +585 58 loss """bceaftersigmoid""" +585 58 regularizer """no""" +585 58 optimizer """adadelta""" +585 58 training_loop """lcwa""" +585 58 evaluator """rankbased""" +585 59 dataset """kinships""" +585 59 model """rotate""" +585 59 loss """bceaftersigmoid""" +585 59 regularizer """no""" +585 59 optimizer """adadelta""" +585 59 training_loop """lcwa""" +585 59 evaluator """rankbased""" +585 60 dataset """kinships""" +585 60 model """rotate""" +585 60 loss """bceaftersigmoid""" +585 60 regularizer """no""" +585 60 optimizer """adadelta""" +585 60 training_loop """lcwa""" +585 60 evaluator """rankbased""" +585 61 dataset """kinships""" +585 61 model """rotate""" +585 61 loss """bceaftersigmoid""" +585 61 regularizer """no""" +585 61 optimizer """adadelta""" +585 61 training_loop """lcwa""" +585 61 evaluator """rankbased""" +585 62 dataset """kinships""" +585 62 model """rotate""" +585 62 loss """bceaftersigmoid""" +585 62 regularizer """no""" +585 62 optimizer """adadelta""" +585 62 training_loop """lcwa""" +585 62 evaluator """rankbased""" +585 63 dataset """kinships""" +585 63 model """rotate""" +585 63 loss """bceaftersigmoid""" +585 63 regularizer """no""" +585 63 optimizer """adadelta""" +585 63 training_loop """lcwa""" +585 63 evaluator """rankbased""" +585 64 dataset """kinships""" +585 64 model """rotate""" +585 64 loss """bceaftersigmoid""" +585 64 regularizer """no""" +585 64 optimizer """adadelta""" +585 64 training_loop """lcwa""" +585 64 evaluator """rankbased""" +585 65 dataset """kinships""" +585 65 model """rotate""" +585 65 loss """bceaftersigmoid""" +585 65 regularizer """no""" +585 65 optimizer """adadelta""" +585 65 training_loop """lcwa""" +585 65 evaluator """rankbased""" +585 66 dataset """kinships""" +585 66 model """rotate""" +585 66 loss """bceaftersigmoid""" +585 66 regularizer """no""" +585 66 optimizer """adadelta""" +585 66 training_loop """lcwa""" +585 66 evaluator """rankbased""" +585 67 dataset """kinships""" +585 67 model """rotate""" +585 67 loss """bceaftersigmoid""" +585 67 regularizer """no""" +585 67 optimizer """adadelta""" +585 67 training_loop """lcwa""" +585 67 evaluator """rankbased""" +585 68 dataset """kinships""" +585 68 model """rotate""" +585 68 loss """bceaftersigmoid""" +585 68 regularizer """no""" +585 68 optimizer """adadelta""" +585 68 training_loop """lcwa""" +585 68 evaluator """rankbased""" +585 69 dataset """kinships""" +585 69 model """rotate""" +585 69 loss """bceaftersigmoid""" +585 69 regularizer """no""" +585 69 optimizer """adadelta""" +585 69 training_loop """lcwa""" +585 69 evaluator """rankbased""" +585 70 dataset """kinships""" +585 70 model """rotate""" +585 70 loss """bceaftersigmoid""" +585 70 regularizer """no""" +585 70 optimizer """adadelta""" +585 70 training_loop """lcwa""" +585 70 evaluator """rankbased""" +585 71 dataset """kinships""" +585 71 model """rotate""" +585 71 loss """bceaftersigmoid""" +585 71 regularizer """no""" +585 71 optimizer """adadelta""" +585 71 training_loop """lcwa""" +585 71 evaluator """rankbased""" +585 72 dataset """kinships""" +585 72 model """rotate""" +585 72 loss """bceaftersigmoid""" +585 72 regularizer """no""" +585 72 optimizer """adadelta""" +585 72 training_loop """lcwa""" +585 72 evaluator """rankbased""" +585 73 dataset """kinships""" +585 73 model """rotate""" +585 73 loss """bceaftersigmoid""" +585 73 regularizer """no""" +585 73 optimizer """adadelta""" +585 73 training_loop """lcwa""" +585 73 evaluator """rankbased""" +585 74 dataset """kinships""" +585 74 model """rotate""" +585 74 loss """bceaftersigmoid""" +585 74 regularizer """no""" +585 74 optimizer """adadelta""" +585 74 training_loop """lcwa""" +585 74 evaluator """rankbased""" +585 75 dataset """kinships""" +585 75 model """rotate""" +585 75 loss """bceaftersigmoid""" +585 75 regularizer """no""" +585 75 optimizer """adadelta""" +585 75 training_loop """lcwa""" +585 75 evaluator """rankbased""" +585 76 dataset """kinships""" +585 76 model """rotate""" +585 76 loss """bceaftersigmoid""" +585 76 regularizer """no""" +585 76 optimizer """adadelta""" +585 76 training_loop """lcwa""" +585 76 evaluator """rankbased""" +585 77 dataset """kinships""" +585 77 model """rotate""" +585 77 loss """bceaftersigmoid""" +585 77 regularizer """no""" +585 77 optimizer """adadelta""" +585 77 training_loop """lcwa""" +585 77 evaluator """rankbased""" +585 78 dataset """kinships""" +585 78 model """rotate""" +585 78 loss """bceaftersigmoid""" +585 78 regularizer """no""" +585 78 optimizer """adadelta""" +585 78 training_loop """lcwa""" +585 78 evaluator """rankbased""" +585 79 dataset """kinships""" +585 79 model """rotate""" +585 79 loss """bceaftersigmoid""" +585 79 regularizer """no""" +585 79 optimizer """adadelta""" +585 79 training_loop """lcwa""" +585 79 evaluator """rankbased""" +585 80 dataset """kinships""" +585 80 model """rotate""" +585 80 loss """bceaftersigmoid""" +585 80 regularizer """no""" +585 80 optimizer """adadelta""" +585 80 training_loop """lcwa""" +585 80 evaluator """rankbased""" +585 81 dataset """kinships""" +585 81 model """rotate""" +585 81 loss """bceaftersigmoid""" +585 81 regularizer """no""" +585 81 optimizer """adadelta""" +585 81 training_loop """lcwa""" +585 81 evaluator """rankbased""" +585 82 dataset """kinships""" +585 82 model """rotate""" +585 82 loss """bceaftersigmoid""" +585 82 regularizer """no""" +585 82 optimizer """adadelta""" +585 82 training_loop """lcwa""" +585 82 evaluator """rankbased""" +585 83 dataset """kinships""" +585 83 model """rotate""" +585 83 loss """bceaftersigmoid""" +585 83 regularizer """no""" +585 83 optimizer """adadelta""" +585 83 training_loop """lcwa""" +585 83 evaluator """rankbased""" +585 84 dataset """kinships""" +585 84 model """rotate""" +585 84 loss """bceaftersigmoid""" +585 84 regularizer """no""" +585 84 optimizer """adadelta""" +585 84 training_loop """lcwa""" +585 84 evaluator """rankbased""" +585 85 dataset """kinships""" +585 85 model """rotate""" +585 85 loss """bceaftersigmoid""" +585 85 regularizer """no""" +585 85 optimizer """adadelta""" +585 85 training_loop """lcwa""" +585 85 evaluator """rankbased""" +585 86 dataset """kinships""" +585 86 model """rotate""" +585 86 loss """bceaftersigmoid""" +585 86 regularizer """no""" +585 86 optimizer """adadelta""" +585 86 training_loop """lcwa""" +585 86 evaluator """rankbased""" +585 87 dataset """kinships""" +585 87 model """rotate""" +585 87 loss """bceaftersigmoid""" +585 87 regularizer """no""" +585 87 optimizer """adadelta""" +585 87 training_loop """lcwa""" +585 87 evaluator """rankbased""" +585 88 dataset """kinships""" +585 88 model """rotate""" +585 88 loss """bceaftersigmoid""" +585 88 regularizer """no""" +585 88 optimizer """adadelta""" +585 88 training_loop """lcwa""" +585 88 evaluator """rankbased""" +585 89 dataset """kinships""" +585 89 model """rotate""" +585 89 loss """bceaftersigmoid""" +585 89 regularizer """no""" +585 89 optimizer """adadelta""" +585 89 training_loop """lcwa""" +585 89 evaluator """rankbased""" +585 90 dataset """kinships""" +585 90 model """rotate""" +585 90 loss """bceaftersigmoid""" +585 90 regularizer """no""" +585 90 optimizer """adadelta""" +585 90 training_loop """lcwa""" +585 90 evaluator """rankbased""" +585 91 dataset """kinships""" +585 91 model """rotate""" +585 91 loss """bceaftersigmoid""" +585 91 regularizer """no""" +585 91 optimizer """adadelta""" +585 91 training_loop """lcwa""" +585 91 evaluator """rankbased""" +585 92 dataset """kinships""" +585 92 model """rotate""" +585 92 loss """bceaftersigmoid""" +585 92 regularizer """no""" +585 92 optimizer """adadelta""" +585 92 training_loop """lcwa""" +585 92 evaluator """rankbased""" +585 93 dataset """kinships""" +585 93 model """rotate""" +585 93 loss """bceaftersigmoid""" +585 93 regularizer """no""" +585 93 optimizer """adadelta""" +585 93 training_loop """lcwa""" +585 93 evaluator """rankbased""" +585 94 dataset """kinships""" +585 94 model """rotate""" +585 94 loss """bceaftersigmoid""" +585 94 regularizer """no""" +585 94 optimizer """adadelta""" +585 94 training_loop """lcwa""" +585 94 evaluator """rankbased""" +585 95 dataset """kinships""" +585 95 model """rotate""" +585 95 loss """bceaftersigmoid""" +585 95 regularizer """no""" +585 95 optimizer """adadelta""" +585 95 training_loop """lcwa""" +585 95 evaluator """rankbased""" +585 96 dataset """kinships""" +585 96 model """rotate""" +585 96 loss """bceaftersigmoid""" +585 96 regularizer """no""" +585 96 optimizer """adadelta""" +585 96 training_loop """lcwa""" +585 96 evaluator """rankbased""" +585 97 dataset """kinships""" +585 97 model """rotate""" +585 97 loss """bceaftersigmoid""" +585 97 regularizer """no""" +585 97 optimizer """adadelta""" +585 97 training_loop """lcwa""" +585 97 evaluator """rankbased""" +585 98 dataset """kinships""" +585 98 model """rotate""" +585 98 loss """bceaftersigmoid""" +585 98 regularizer """no""" +585 98 optimizer """adadelta""" +585 98 training_loop """lcwa""" +585 98 evaluator """rankbased""" +585 99 dataset """kinships""" +585 99 model """rotate""" +585 99 loss """bceaftersigmoid""" +585 99 regularizer """no""" +585 99 optimizer """adadelta""" +585 99 training_loop """lcwa""" +585 99 evaluator """rankbased""" +585 100 dataset """kinships""" +585 100 model """rotate""" +585 100 loss """bceaftersigmoid""" +585 100 regularizer """no""" +585 100 optimizer """adadelta""" +585 100 training_loop """lcwa""" +585 100 evaluator """rankbased""" +586 1 model.embedding_dim 0.0 +586 1 training.batch_size 0.0 +586 1 training.label_smoothing 0.09956425261563101 +586 2 model.embedding_dim 2.0 +586 2 training.batch_size 0.0 +586 2 training.label_smoothing 0.2700635939372088 +586 3 model.embedding_dim 2.0 +586 3 training.batch_size 1.0 +586 3 training.label_smoothing 0.534042961939242 +586 4 model.embedding_dim 1.0 +586 4 training.batch_size 2.0 +586 4 training.label_smoothing 0.0021075494633317447 +586 5 model.embedding_dim 0.0 +586 5 training.batch_size 0.0 +586 5 training.label_smoothing 0.47562454045025726 +586 6 model.embedding_dim 2.0 +586 6 training.batch_size 0.0 +586 6 training.label_smoothing 0.08357611882957842 +586 7 model.embedding_dim 1.0 +586 7 training.batch_size 2.0 +586 7 training.label_smoothing 0.1192359293604575 +586 8 model.embedding_dim 1.0 +586 8 training.batch_size 2.0 +586 8 training.label_smoothing 0.0020590985463107207 +586 9 model.embedding_dim 1.0 +586 9 training.batch_size 2.0 +586 9 training.label_smoothing 0.5849958987643418 +586 10 model.embedding_dim 2.0 +586 10 training.batch_size 1.0 +586 10 training.label_smoothing 0.011067878745420742 +586 11 model.embedding_dim 0.0 +586 11 training.batch_size 0.0 +586 11 training.label_smoothing 0.02638307847461095 +586 12 model.embedding_dim 2.0 +586 12 training.batch_size 2.0 +586 12 training.label_smoothing 0.01991607681807485 +586 13 model.embedding_dim 1.0 +586 13 training.batch_size 2.0 +586 13 training.label_smoothing 0.00786303216019879 +586 14 model.embedding_dim 0.0 +586 14 training.batch_size 2.0 +586 14 training.label_smoothing 0.8401549780306488 +586 15 model.embedding_dim 0.0 +586 15 training.batch_size 2.0 +586 15 training.label_smoothing 0.014109228895048499 +586 16 model.embedding_dim 0.0 +586 16 training.batch_size 0.0 +586 16 training.label_smoothing 0.012438066576727426 +586 17 model.embedding_dim 2.0 +586 17 training.batch_size 0.0 +586 17 training.label_smoothing 0.10749187014405395 +586 18 model.embedding_dim 1.0 +586 18 training.batch_size 1.0 +586 18 training.label_smoothing 0.32694087735521554 +586 19 model.embedding_dim 1.0 +586 19 training.batch_size 2.0 +586 19 training.label_smoothing 0.25981265874552983 +586 20 model.embedding_dim 0.0 +586 20 training.batch_size 2.0 +586 20 training.label_smoothing 0.3267650901987993 +586 21 model.embedding_dim 2.0 +586 21 training.batch_size 2.0 +586 21 training.label_smoothing 0.07057239523654237 +586 22 model.embedding_dim 0.0 +586 22 training.batch_size 0.0 +586 22 training.label_smoothing 0.0898899844309825 +586 23 model.embedding_dim 2.0 +586 23 training.batch_size 1.0 +586 23 training.label_smoothing 0.003745778410217825 +586 24 model.embedding_dim 2.0 +586 24 training.batch_size 2.0 +586 24 training.label_smoothing 0.26444679231574897 +586 25 model.embedding_dim 2.0 +586 25 training.batch_size 2.0 +586 25 training.label_smoothing 0.17547416734180396 +586 26 model.embedding_dim 1.0 +586 26 training.batch_size 0.0 +586 26 training.label_smoothing 0.01720665796177853 +586 27 model.embedding_dim 1.0 +586 27 training.batch_size 1.0 +586 27 training.label_smoothing 0.4069560819355874 +586 28 model.embedding_dim 0.0 +586 28 training.batch_size 1.0 +586 28 training.label_smoothing 0.10605390426775729 +586 29 model.embedding_dim 2.0 +586 29 training.batch_size 0.0 +586 29 training.label_smoothing 0.014847712845449263 +586 30 model.embedding_dim 1.0 +586 30 training.batch_size 0.0 +586 30 training.label_smoothing 0.04107455174273885 +586 31 model.embedding_dim 0.0 +586 31 training.batch_size 1.0 +586 31 training.label_smoothing 0.0017904221821280536 +586 32 model.embedding_dim 2.0 +586 32 training.batch_size 2.0 +586 32 training.label_smoothing 0.4944000740496356 +586 33 model.embedding_dim 0.0 +586 33 training.batch_size 2.0 +586 33 training.label_smoothing 0.0098436218730916 +586 34 model.embedding_dim 0.0 +586 34 training.batch_size 2.0 +586 34 training.label_smoothing 0.1820461596368209 +586 35 model.embedding_dim 2.0 +586 35 training.batch_size 1.0 +586 35 training.label_smoothing 0.12758703509931674 +586 36 model.embedding_dim 1.0 +586 36 training.batch_size 1.0 +586 36 training.label_smoothing 0.0707600027706294 +586 37 model.embedding_dim 2.0 +586 37 training.batch_size 2.0 +586 37 training.label_smoothing 0.3163470139583502 +586 38 model.embedding_dim 1.0 +586 38 training.batch_size 1.0 +586 38 training.label_smoothing 0.7893416121231158 +586 39 model.embedding_dim 2.0 +586 39 training.batch_size 2.0 +586 39 training.label_smoothing 0.39644264836988913 +586 40 model.embedding_dim 1.0 +586 40 training.batch_size 0.0 +586 40 training.label_smoothing 0.41371458958068436 +586 41 model.embedding_dim 1.0 +586 41 training.batch_size 1.0 +586 41 training.label_smoothing 0.4897729872670283 +586 42 model.embedding_dim 0.0 +586 42 training.batch_size 2.0 +586 42 training.label_smoothing 0.012013927461635243 +586 43 model.embedding_dim 2.0 +586 43 training.batch_size 0.0 +586 43 training.label_smoothing 0.7507611688670591 +586 44 model.embedding_dim 1.0 +586 44 training.batch_size 0.0 +586 44 training.label_smoothing 0.001598900754582374 +586 45 model.embedding_dim 0.0 +586 45 training.batch_size 2.0 +586 45 training.label_smoothing 0.002168976668631165 +586 46 model.embedding_dim 0.0 +586 46 training.batch_size 1.0 +586 46 training.label_smoothing 0.16132728176847147 +586 47 model.embedding_dim 2.0 +586 47 training.batch_size 0.0 +586 47 training.label_smoothing 0.0015526617806553645 +586 48 model.embedding_dim 2.0 +586 48 training.batch_size 1.0 +586 48 training.label_smoothing 0.05826398860213994 +586 49 model.embedding_dim 1.0 +586 49 training.batch_size 2.0 +586 49 training.label_smoothing 0.05160272010695042 +586 50 model.embedding_dim 1.0 +586 50 training.batch_size 1.0 +586 50 training.label_smoothing 0.002005661000596243 +586 51 model.embedding_dim 2.0 +586 51 training.batch_size 2.0 +586 51 training.label_smoothing 0.30349516307360086 +586 52 model.embedding_dim 2.0 +586 52 training.batch_size 0.0 +586 52 training.label_smoothing 0.006207877481109751 +586 53 model.embedding_dim 0.0 +586 53 training.batch_size 2.0 +586 53 training.label_smoothing 0.0878343114642224 +586 54 model.embedding_dim 2.0 +586 54 training.batch_size 1.0 +586 54 training.label_smoothing 0.20843395825508335 +586 55 model.embedding_dim 2.0 +586 55 training.batch_size 1.0 +586 55 training.label_smoothing 0.022933193872590508 +586 56 model.embedding_dim 2.0 +586 56 training.batch_size 0.0 +586 56 training.label_smoothing 0.7854690574775725 +586 57 model.embedding_dim 1.0 +586 57 training.batch_size 2.0 +586 57 training.label_smoothing 0.006539704954381495 +586 58 model.embedding_dim 1.0 +586 58 training.batch_size 0.0 +586 58 training.label_smoothing 0.023031366818575472 +586 59 model.embedding_dim 1.0 +586 59 training.batch_size 0.0 +586 59 training.label_smoothing 0.7209620786366977 +586 60 model.embedding_dim 1.0 +586 60 training.batch_size 0.0 +586 60 training.label_smoothing 0.010681685552745446 +586 61 model.embedding_dim 2.0 +586 61 training.batch_size 2.0 +586 61 training.label_smoothing 0.08164934484681098 +586 62 model.embedding_dim 0.0 +586 62 training.batch_size 1.0 +586 62 training.label_smoothing 0.014945578495082622 +586 63 model.embedding_dim 2.0 +586 63 training.batch_size 2.0 +586 63 training.label_smoothing 0.0029170585716849374 +586 64 model.embedding_dim 1.0 +586 64 training.batch_size 2.0 +586 64 training.label_smoothing 0.0737680533864999 +586 65 model.embedding_dim 0.0 +586 65 training.batch_size 1.0 +586 65 training.label_smoothing 0.004920332770356044 +586 66 model.embedding_dim 0.0 +586 66 training.batch_size 0.0 +586 66 training.label_smoothing 0.0034382824244689634 +586 67 model.embedding_dim 2.0 +586 67 training.batch_size 2.0 +586 67 training.label_smoothing 0.08464746865485707 +586 68 model.embedding_dim 1.0 +586 68 training.batch_size 1.0 +586 68 training.label_smoothing 0.018024135876272385 +586 69 model.embedding_dim 1.0 +586 69 training.batch_size 0.0 +586 69 training.label_smoothing 0.016310264748317815 +586 70 model.embedding_dim 1.0 +586 70 training.batch_size 0.0 +586 70 training.label_smoothing 0.4936397559699741 +586 71 model.embedding_dim 2.0 +586 71 training.batch_size 1.0 +586 71 training.label_smoothing 0.025087019727953776 +586 72 model.embedding_dim 2.0 +586 72 training.batch_size 0.0 +586 72 training.label_smoothing 0.019259677445222407 +586 73 model.embedding_dim 1.0 +586 73 training.batch_size 2.0 +586 73 training.label_smoothing 0.25572627404713383 +586 74 model.embedding_dim 2.0 +586 74 training.batch_size 1.0 +586 74 training.label_smoothing 0.0038702802165807436 +586 75 model.embedding_dim 0.0 +586 75 training.batch_size 0.0 +586 75 training.label_smoothing 0.0035230793574904376 +586 76 model.embedding_dim 2.0 +586 76 training.batch_size 2.0 +586 76 training.label_smoothing 0.40682100771740826 +586 77 model.embedding_dim 0.0 +586 77 training.batch_size 1.0 +586 77 training.label_smoothing 0.007189247980781396 +586 78 model.embedding_dim 0.0 +586 78 training.batch_size 1.0 +586 78 training.label_smoothing 0.012314567972872304 +586 79 model.embedding_dim 2.0 +586 79 training.batch_size 1.0 +586 79 training.label_smoothing 0.37191455122755424 +586 80 model.embedding_dim 0.0 +586 80 training.batch_size 0.0 +586 80 training.label_smoothing 0.005255857516376065 +586 81 model.embedding_dim 2.0 +586 81 training.batch_size 2.0 +586 81 training.label_smoothing 0.07077970887358641 +586 82 model.embedding_dim 0.0 +586 82 training.batch_size 2.0 +586 82 training.label_smoothing 0.03765228818460453 +586 83 model.embedding_dim 0.0 +586 83 training.batch_size 0.0 +586 83 training.label_smoothing 0.005674872259686374 +586 84 model.embedding_dim 1.0 +586 84 training.batch_size 2.0 +586 84 training.label_smoothing 0.04247881193315329 +586 85 model.embedding_dim 1.0 +586 85 training.batch_size 0.0 +586 85 training.label_smoothing 0.019080229363462965 +586 86 model.embedding_dim 1.0 +586 86 training.batch_size 0.0 +586 86 training.label_smoothing 0.11978089940694936 +586 87 model.embedding_dim 0.0 +586 87 training.batch_size 1.0 +586 87 training.label_smoothing 0.01125078970368468 +586 88 model.embedding_dim 1.0 +586 88 training.batch_size 1.0 +586 88 training.label_smoothing 0.002724206074597971 +586 89 model.embedding_dim 1.0 +586 89 training.batch_size 0.0 +586 89 training.label_smoothing 0.7341319782894461 +586 90 model.embedding_dim 1.0 +586 90 training.batch_size 1.0 +586 90 training.label_smoothing 0.00281806066380442 +586 91 model.embedding_dim 2.0 +586 91 training.batch_size 0.0 +586 91 training.label_smoothing 0.01643200112415561 +586 92 model.embedding_dim 1.0 +586 92 training.batch_size 2.0 +586 92 training.label_smoothing 0.4241831048089721 +586 93 model.embedding_dim 1.0 +586 93 training.batch_size 2.0 +586 93 training.label_smoothing 0.2567450736270992 +586 94 model.embedding_dim 1.0 +586 94 training.batch_size 2.0 +586 94 training.label_smoothing 0.03322067456357662 +586 95 model.embedding_dim 0.0 +586 95 training.batch_size 0.0 +586 95 training.label_smoothing 0.4169987668094156 +586 96 model.embedding_dim 1.0 +586 96 training.batch_size 0.0 +586 96 training.label_smoothing 0.0027573187692336906 +586 97 model.embedding_dim 2.0 +586 97 training.batch_size 2.0 +586 97 training.label_smoothing 0.15954178733652094 +586 98 model.embedding_dim 0.0 +586 98 training.batch_size 2.0 +586 98 training.label_smoothing 0.013469290471648337 +586 99 model.embedding_dim 2.0 +586 99 training.batch_size 0.0 +586 99 training.label_smoothing 0.10398366403527191 +586 100 model.embedding_dim 0.0 +586 100 training.batch_size 2.0 +586 100 training.label_smoothing 0.0020173090378447075 +586 1 dataset """kinships""" +586 1 model """rotate""" +586 1 loss """softplus""" +586 1 regularizer """no""" +586 1 optimizer """adadelta""" +586 1 training_loop """lcwa""" +586 1 evaluator """rankbased""" +586 2 dataset """kinships""" +586 2 model """rotate""" +586 2 loss """softplus""" +586 2 regularizer """no""" +586 2 optimizer """adadelta""" +586 2 training_loop """lcwa""" +586 2 evaluator """rankbased""" +586 3 dataset """kinships""" +586 3 model """rotate""" +586 3 loss """softplus""" +586 3 regularizer """no""" +586 3 optimizer """adadelta""" +586 3 training_loop """lcwa""" +586 3 evaluator """rankbased""" +586 4 dataset """kinships""" +586 4 model """rotate""" +586 4 loss """softplus""" +586 4 regularizer """no""" +586 4 optimizer """adadelta""" +586 4 training_loop """lcwa""" +586 4 evaluator """rankbased""" +586 5 dataset """kinships""" +586 5 model """rotate""" +586 5 loss """softplus""" +586 5 regularizer """no""" +586 5 optimizer """adadelta""" +586 5 training_loop """lcwa""" +586 5 evaluator """rankbased""" +586 6 dataset """kinships""" +586 6 model """rotate""" +586 6 loss """softplus""" +586 6 regularizer """no""" +586 6 optimizer """adadelta""" +586 6 training_loop """lcwa""" +586 6 evaluator """rankbased""" +586 7 dataset """kinships""" +586 7 model """rotate""" +586 7 loss """softplus""" +586 7 regularizer """no""" +586 7 optimizer """adadelta""" +586 7 training_loop """lcwa""" +586 7 evaluator """rankbased""" +586 8 dataset """kinships""" +586 8 model """rotate""" +586 8 loss """softplus""" +586 8 regularizer """no""" +586 8 optimizer """adadelta""" +586 8 training_loop """lcwa""" +586 8 evaluator """rankbased""" +586 9 dataset """kinships""" +586 9 model """rotate""" +586 9 loss """softplus""" +586 9 regularizer """no""" +586 9 optimizer """adadelta""" +586 9 training_loop """lcwa""" +586 9 evaluator """rankbased""" +586 10 dataset """kinships""" +586 10 model """rotate""" +586 10 loss """softplus""" +586 10 regularizer """no""" +586 10 optimizer """adadelta""" +586 10 training_loop """lcwa""" +586 10 evaluator """rankbased""" +586 11 dataset """kinships""" +586 11 model """rotate""" +586 11 loss """softplus""" +586 11 regularizer """no""" +586 11 optimizer """adadelta""" +586 11 training_loop """lcwa""" +586 11 evaluator """rankbased""" +586 12 dataset """kinships""" +586 12 model """rotate""" +586 12 loss """softplus""" +586 12 regularizer """no""" +586 12 optimizer """adadelta""" +586 12 training_loop """lcwa""" +586 12 evaluator """rankbased""" +586 13 dataset """kinships""" +586 13 model """rotate""" +586 13 loss """softplus""" +586 13 regularizer """no""" +586 13 optimizer """adadelta""" +586 13 training_loop """lcwa""" +586 13 evaluator """rankbased""" +586 14 dataset """kinships""" +586 14 model """rotate""" +586 14 loss """softplus""" +586 14 regularizer """no""" +586 14 optimizer """adadelta""" +586 14 training_loop """lcwa""" +586 14 evaluator """rankbased""" +586 15 dataset """kinships""" +586 15 model """rotate""" +586 15 loss """softplus""" +586 15 regularizer """no""" +586 15 optimizer """adadelta""" +586 15 training_loop """lcwa""" +586 15 evaluator """rankbased""" +586 16 dataset """kinships""" +586 16 model """rotate""" +586 16 loss """softplus""" +586 16 regularizer """no""" +586 16 optimizer """adadelta""" +586 16 training_loop """lcwa""" +586 16 evaluator """rankbased""" +586 17 dataset """kinships""" +586 17 model """rotate""" +586 17 loss """softplus""" +586 17 regularizer """no""" +586 17 optimizer """adadelta""" +586 17 training_loop """lcwa""" +586 17 evaluator """rankbased""" +586 18 dataset """kinships""" +586 18 model """rotate""" +586 18 loss """softplus""" +586 18 regularizer """no""" +586 18 optimizer """adadelta""" +586 18 training_loop """lcwa""" +586 18 evaluator """rankbased""" +586 19 dataset """kinships""" +586 19 model """rotate""" +586 19 loss """softplus""" +586 19 regularizer """no""" +586 19 optimizer """adadelta""" +586 19 training_loop """lcwa""" +586 19 evaluator """rankbased""" +586 20 dataset """kinships""" +586 20 model """rotate""" +586 20 loss """softplus""" +586 20 regularizer """no""" +586 20 optimizer """adadelta""" +586 20 training_loop """lcwa""" +586 20 evaluator """rankbased""" +586 21 dataset """kinships""" +586 21 model """rotate""" +586 21 loss """softplus""" +586 21 regularizer """no""" +586 21 optimizer """adadelta""" +586 21 training_loop """lcwa""" +586 21 evaluator """rankbased""" +586 22 dataset """kinships""" +586 22 model """rotate""" +586 22 loss """softplus""" +586 22 regularizer """no""" +586 22 optimizer """adadelta""" +586 22 training_loop """lcwa""" +586 22 evaluator """rankbased""" +586 23 dataset """kinships""" +586 23 model """rotate""" +586 23 loss """softplus""" +586 23 regularizer """no""" +586 23 optimizer """adadelta""" +586 23 training_loop """lcwa""" +586 23 evaluator """rankbased""" +586 24 dataset """kinships""" +586 24 model """rotate""" +586 24 loss """softplus""" +586 24 regularizer """no""" +586 24 optimizer """adadelta""" +586 24 training_loop """lcwa""" +586 24 evaluator """rankbased""" +586 25 dataset """kinships""" +586 25 model """rotate""" +586 25 loss """softplus""" +586 25 regularizer """no""" +586 25 optimizer """adadelta""" +586 25 training_loop """lcwa""" +586 25 evaluator """rankbased""" +586 26 dataset """kinships""" +586 26 model """rotate""" +586 26 loss """softplus""" +586 26 regularizer """no""" +586 26 optimizer """adadelta""" +586 26 training_loop """lcwa""" +586 26 evaluator """rankbased""" +586 27 dataset """kinships""" +586 27 model """rotate""" +586 27 loss """softplus""" +586 27 regularizer """no""" +586 27 optimizer """adadelta""" +586 27 training_loop """lcwa""" +586 27 evaluator """rankbased""" +586 28 dataset """kinships""" +586 28 model """rotate""" +586 28 loss """softplus""" +586 28 regularizer """no""" +586 28 optimizer """adadelta""" +586 28 training_loop """lcwa""" +586 28 evaluator """rankbased""" +586 29 dataset """kinships""" +586 29 model """rotate""" +586 29 loss """softplus""" +586 29 regularizer """no""" +586 29 optimizer """adadelta""" +586 29 training_loop """lcwa""" +586 29 evaluator """rankbased""" +586 30 dataset """kinships""" +586 30 model """rotate""" +586 30 loss """softplus""" +586 30 regularizer """no""" +586 30 optimizer """adadelta""" +586 30 training_loop """lcwa""" +586 30 evaluator """rankbased""" +586 31 dataset """kinships""" +586 31 model """rotate""" +586 31 loss """softplus""" +586 31 regularizer """no""" +586 31 optimizer """adadelta""" +586 31 training_loop """lcwa""" +586 31 evaluator """rankbased""" +586 32 dataset """kinships""" +586 32 model """rotate""" +586 32 loss """softplus""" +586 32 regularizer """no""" +586 32 optimizer """adadelta""" +586 32 training_loop """lcwa""" +586 32 evaluator """rankbased""" +586 33 dataset """kinships""" +586 33 model """rotate""" +586 33 loss """softplus""" +586 33 regularizer """no""" +586 33 optimizer """adadelta""" +586 33 training_loop """lcwa""" +586 33 evaluator """rankbased""" +586 34 dataset """kinships""" +586 34 model """rotate""" +586 34 loss """softplus""" +586 34 regularizer """no""" +586 34 optimizer """adadelta""" +586 34 training_loop """lcwa""" +586 34 evaluator """rankbased""" +586 35 dataset """kinships""" +586 35 model """rotate""" +586 35 loss """softplus""" +586 35 regularizer """no""" +586 35 optimizer """adadelta""" +586 35 training_loop """lcwa""" +586 35 evaluator """rankbased""" +586 36 dataset """kinships""" +586 36 model """rotate""" +586 36 loss """softplus""" +586 36 regularizer """no""" +586 36 optimizer """adadelta""" +586 36 training_loop """lcwa""" +586 36 evaluator """rankbased""" +586 37 dataset """kinships""" +586 37 model """rotate""" +586 37 loss """softplus""" +586 37 regularizer """no""" +586 37 optimizer """adadelta""" +586 37 training_loop """lcwa""" +586 37 evaluator """rankbased""" +586 38 dataset """kinships""" +586 38 model """rotate""" +586 38 loss """softplus""" +586 38 regularizer """no""" +586 38 optimizer """adadelta""" +586 38 training_loop """lcwa""" +586 38 evaluator """rankbased""" +586 39 dataset """kinships""" +586 39 model """rotate""" +586 39 loss """softplus""" +586 39 regularizer """no""" +586 39 optimizer """adadelta""" +586 39 training_loop """lcwa""" +586 39 evaluator """rankbased""" +586 40 dataset """kinships""" +586 40 model """rotate""" +586 40 loss """softplus""" +586 40 regularizer """no""" +586 40 optimizer """adadelta""" +586 40 training_loop """lcwa""" +586 40 evaluator """rankbased""" +586 41 dataset """kinships""" +586 41 model """rotate""" +586 41 loss """softplus""" +586 41 regularizer """no""" +586 41 optimizer """adadelta""" +586 41 training_loop """lcwa""" +586 41 evaluator """rankbased""" +586 42 dataset """kinships""" +586 42 model """rotate""" +586 42 loss """softplus""" +586 42 regularizer """no""" +586 42 optimizer """adadelta""" +586 42 training_loop """lcwa""" +586 42 evaluator """rankbased""" +586 43 dataset """kinships""" +586 43 model """rotate""" +586 43 loss """softplus""" +586 43 regularizer """no""" +586 43 optimizer """adadelta""" +586 43 training_loop """lcwa""" +586 43 evaluator """rankbased""" +586 44 dataset """kinships""" +586 44 model """rotate""" +586 44 loss """softplus""" +586 44 regularizer """no""" +586 44 optimizer """adadelta""" +586 44 training_loop """lcwa""" +586 44 evaluator """rankbased""" +586 45 dataset """kinships""" +586 45 model """rotate""" +586 45 loss """softplus""" +586 45 regularizer """no""" +586 45 optimizer """adadelta""" +586 45 training_loop """lcwa""" +586 45 evaluator """rankbased""" +586 46 dataset """kinships""" +586 46 model """rotate""" +586 46 loss """softplus""" +586 46 regularizer """no""" +586 46 optimizer """adadelta""" +586 46 training_loop """lcwa""" +586 46 evaluator """rankbased""" +586 47 dataset """kinships""" +586 47 model """rotate""" +586 47 loss """softplus""" +586 47 regularizer """no""" +586 47 optimizer """adadelta""" +586 47 training_loop """lcwa""" +586 47 evaluator """rankbased""" +586 48 dataset """kinships""" +586 48 model """rotate""" +586 48 loss """softplus""" +586 48 regularizer """no""" +586 48 optimizer """adadelta""" +586 48 training_loop """lcwa""" +586 48 evaluator """rankbased""" +586 49 dataset """kinships""" +586 49 model """rotate""" +586 49 loss """softplus""" +586 49 regularizer """no""" +586 49 optimizer """adadelta""" +586 49 training_loop """lcwa""" +586 49 evaluator """rankbased""" +586 50 dataset """kinships""" +586 50 model """rotate""" +586 50 loss """softplus""" +586 50 regularizer """no""" +586 50 optimizer """adadelta""" +586 50 training_loop """lcwa""" +586 50 evaluator """rankbased""" +586 51 dataset """kinships""" +586 51 model """rotate""" +586 51 loss """softplus""" +586 51 regularizer """no""" +586 51 optimizer """adadelta""" +586 51 training_loop """lcwa""" +586 51 evaluator """rankbased""" +586 52 dataset """kinships""" +586 52 model """rotate""" +586 52 loss """softplus""" +586 52 regularizer """no""" +586 52 optimizer """adadelta""" +586 52 training_loop """lcwa""" +586 52 evaluator """rankbased""" +586 53 dataset """kinships""" +586 53 model """rotate""" +586 53 loss """softplus""" +586 53 regularizer """no""" +586 53 optimizer """adadelta""" +586 53 training_loop """lcwa""" +586 53 evaluator """rankbased""" +586 54 dataset """kinships""" +586 54 model """rotate""" +586 54 loss """softplus""" +586 54 regularizer """no""" +586 54 optimizer """adadelta""" +586 54 training_loop """lcwa""" +586 54 evaluator """rankbased""" +586 55 dataset """kinships""" +586 55 model """rotate""" +586 55 loss """softplus""" +586 55 regularizer """no""" +586 55 optimizer """adadelta""" +586 55 training_loop """lcwa""" +586 55 evaluator """rankbased""" +586 56 dataset """kinships""" +586 56 model """rotate""" +586 56 loss """softplus""" +586 56 regularizer """no""" +586 56 optimizer """adadelta""" +586 56 training_loop """lcwa""" +586 56 evaluator """rankbased""" +586 57 dataset """kinships""" +586 57 model """rotate""" +586 57 loss """softplus""" +586 57 regularizer """no""" +586 57 optimizer """adadelta""" +586 57 training_loop """lcwa""" +586 57 evaluator """rankbased""" +586 58 dataset """kinships""" +586 58 model """rotate""" +586 58 loss """softplus""" +586 58 regularizer """no""" +586 58 optimizer """adadelta""" +586 58 training_loop """lcwa""" +586 58 evaluator """rankbased""" +586 59 dataset """kinships""" +586 59 model """rotate""" +586 59 loss """softplus""" +586 59 regularizer """no""" +586 59 optimizer """adadelta""" +586 59 training_loop """lcwa""" +586 59 evaluator """rankbased""" +586 60 dataset """kinships""" +586 60 model """rotate""" +586 60 loss """softplus""" +586 60 regularizer """no""" +586 60 optimizer """adadelta""" +586 60 training_loop """lcwa""" +586 60 evaluator """rankbased""" +586 61 dataset """kinships""" +586 61 model """rotate""" +586 61 loss """softplus""" +586 61 regularizer """no""" +586 61 optimizer """adadelta""" +586 61 training_loop """lcwa""" +586 61 evaluator """rankbased""" +586 62 dataset """kinships""" +586 62 model """rotate""" +586 62 loss """softplus""" +586 62 regularizer """no""" +586 62 optimizer """adadelta""" +586 62 training_loop """lcwa""" +586 62 evaluator """rankbased""" +586 63 dataset """kinships""" +586 63 model """rotate""" +586 63 loss """softplus""" +586 63 regularizer """no""" +586 63 optimizer """adadelta""" +586 63 training_loop """lcwa""" +586 63 evaluator """rankbased""" +586 64 dataset """kinships""" +586 64 model """rotate""" +586 64 loss """softplus""" +586 64 regularizer """no""" +586 64 optimizer """adadelta""" +586 64 training_loop """lcwa""" +586 64 evaluator """rankbased""" +586 65 dataset """kinships""" +586 65 model """rotate""" +586 65 loss """softplus""" +586 65 regularizer """no""" +586 65 optimizer """adadelta""" +586 65 training_loop """lcwa""" +586 65 evaluator """rankbased""" +586 66 dataset """kinships""" +586 66 model """rotate""" +586 66 loss """softplus""" +586 66 regularizer """no""" +586 66 optimizer """adadelta""" +586 66 training_loop """lcwa""" +586 66 evaluator """rankbased""" +586 67 dataset """kinships""" +586 67 model """rotate""" +586 67 loss """softplus""" +586 67 regularizer """no""" +586 67 optimizer """adadelta""" +586 67 training_loop """lcwa""" +586 67 evaluator """rankbased""" +586 68 dataset """kinships""" +586 68 model """rotate""" +586 68 loss """softplus""" +586 68 regularizer """no""" +586 68 optimizer """adadelta""" +586 68 training_loop """lcwa""" +586 68 evaluator """rankbased""" +586 69 dataset """kinships""" +586 69 model """rotate""" +586 69 loss """softplus""" +586 69 regularizer """no""" +586 69 optimizer """adadelta""" +586 69 training_loop """lcwa""" +586 69 evaluator """rankbased""" +586 70 dataset """kinships""" +586 70 model """rotate""" +586 70 loss """softplus""" +586 70 regularizer """no""" +586 70 optimizer """adadelta""" +586 70 training_loop """lcwa""" +586 70 evaluator """rankbased""" +586 71 dataset """kinships""" +586 71 model """rotate""" +586 71 loss """softplus""" +586 71 regularizer """no""" +586 71 optimizer """adadelta""" +586 71 training_loop """lcwa""" +586 71 evaluator """rankbased""" +586 72 dataset """kinships""" +586 72 model """rotate""" +586 72 loss """softplus""" +586 72 regularizer """no""" +586 72 optimizer """adadelta""" +586 72 training_loop """lcwa""" +586 72 evaluator """rankbased""" +586 73 dataset """kinships""" +586 73 model """rotate""" +586 73 loss """softplus""" +586 73 regularizer """no""" +586 73 optimizer """adadelta""" +586 73 training_loop """lcwa""" +586 73 evaluator """rankbased""" +586 74 dataset """kinships""" +586 74 model """rotate""" +586 74 loss """softplus""" +586 74 regularizer """no""" +586 74 optimizer """adadelta""" +586 74 training_loop """lcwa""" +586 74 evaluator """rankbased""" +586 75 dataset """kinships""" +586 75 model """rotate""" +586 75 loss """softplus""" +586 75 regularizer """no""" +586 75 optimizer """adadelta""" +586 75 training_loop """lcwa""" +586 75 evaluator """rankbased""" +586 76 dataset """kinships""" +586 76 model """rotate""" +586 76 loss """softplus""" +586 76 regularizer """no""" +586 76 optimizer """adadelta""" +586 76 training_loop """lcwa""" +586 76 evaluator """rankbased""" +586 77 dataset """kinships""" +586 77 model """rotate""" +586 77 loss """softplus""" +586 77 regularizer """no""" +586 77 optimizer """adadelta""" +586 77 training_loop """lcwa""" +586 77 evaluator """rankbased""" +586 78 dataset """kinships""" +586 78 model """rotate""" +586 78 loss """softplus""" +586 78 regularizer """no""" +586 78 optimizer """adadelta""" +586 78 training_loop """lcwa""" +586 78 evaluator """rankbased""" +586 79 dataset """kinships""" +586 79 model """rotate""" +586 79 loss """softplus""" +586 79 regularizer """no""" +586 79 optimizer """adadelta""" +586 79 training_loop """lcwa""" +586 79 evaluator """rankbased""" +586 80 dataset """kinships""" +586 80 model """rotate""" +586 80 loss """softplus""" +586 80 regularizer """no""" +586 80 optimizer """adadelta""" +586 80 training_loop """lcwa""" +586 80 evaluator """rankbased""" +586 81 dataset """kinships""" +586 81 model """rotate""" +586 81 loss """softplus""" +586 81 regularizer """no""" +586 81 optimizer """adadelta""" +586 81 training_loop """lcwa""" +586 81 evaluator """rankbased""" +586 82 dataset """kinships""" +586 82 model """rotate""" +586 82 loss """softplus""" +586 82 regularizer """no""" +586 82 optimizer """adadelta""" +586 82 training_loop """lcwa""" +586 82 evaluator """rankbased""" +586 83 dataset """kinships""" +586 83 model """rotate""" +586 83 loss """softplus""" +586 83 regularizer """no""" +586 83 optimizer """adadelta""" +586 83 training_loop """lcwa""" +586 83 evaluator """rankbased""" +586 84 dataset """kinships""" +586 84 model """rotate""" +586 84 loss """softplus""" +586 84 regularizer """no""" +586 84 optimizer """adadelta""" +586 84 training_loop """lcwa""" +586 84 evaluator """rankbased""" +586 85 dataset """kinships""" +586 85 model """rotate""" +586 85 loss """softplus""" +586 85 regularizer """no""" +586 85 optimizer """adadelta""" +586 85 training_loop """lcwa""" +586 85 evaluator """rankbased""" +586 86 dataset """kinships""" +586 86 model """rotate""" +586 86 loss """softplus""" +586 86 regularizer """no""" +586 86 optimizer """adadelta""" +586 86 training_loop """lcwa""" +586 86 evaluator """rankbased""" +586 87 dataset """kinships""" +586 87 model """rotate""" +586 87 loss """softplus""" +586 87 regularizer """no""" +586 87 optimizer """adadelta""" +586 87 training_loop """lcwa""" +586 87 evaluator """rankbased""" +586 88 dataset """kinships""" +586 88 model """rotate""" +586 88 loss """softplus""" +586 88 regularizer """no""" +586 88 optimizer """adadelta""" +586 88 training_loop """lcwa""" +586 88 evaluator """rankbased""" +586 89 dataset """kinships""" +586 89 model """rotate""" +586 89 loss """softplus""" +586 89 regularizer """no""" +586 89 optimizer """adadelta""" +586 89 training_loop """lcwa""" +586 89 evaluator """rankbased""" +586 90 dataset """kinships""" +586 90 model """rotate""" +586 90 loss """softplus""" +586 90 regularizer """no""" +586 90 optimizer """adadelta""" +586 90 training_loop """lcwa""" +586 90 evaluator """rankbased""" +586 91 dataset """kinships""" +586 91 model """rotate""" +586 91 loss """softplus""" +586 91 regularizer """no""" +586 91 optimizer """adadelta""" +586 91 training_loop """lcwa""" +586 91 evaluator """rankbased""" +586 92 dataset """kinships""" +586 92 model """rotate""" +586 92 loss """softplus""" +586 92 regularizer """no""" +586 92 optimizer """adadelta""" +586 92 training_loop """lcwa""" +586 92 evaluator """rankbased""" +586 93 dataset """kinships""" +586 93 model """rotate""" +586 93 loss """softplus""" +586 93 regularizer """no""" +586 93 optimizer """adadelta""" +586 93 training_loop """lcwa""" +586 93 evaluator """rankbased""" +586 94 dataset """kinships""" +586 94 model """rotate""" +586 94 loss """softplus""" +586 94 regularizer """no""" +586 94 optimizer """adadelta""" +586 94 training_loop """lcwa""" +586 94 evaluator """rankbased""" +586 95 dataset """kinships""" +586 95 model """rotate""" +586 95 loss """softplus""" +586 95 regularizer """no""" +586 95 optimizer """adadelta""" +586 95 training_loop """lcwa""" +586 95 evaluator """rankbased""" +586 96 dataset """kinships""" +586 96 model """rotate""" +586 96 loss """softplus""" +586 96 regularizer """no""" +586 96 optimizer """adadelta""" +586 96 training_loop """lcwa""" +586 96 evaluator """rankbased""" +586 97 dataset """kinships""" +586 97 model """rotate""" +586 97 loss """softplus""" +586 97 regularizer """no""" +586 97 optimizer """adadelta""" +586 97 training_loop """lcwa""" +586 97 evaluator """rankbased""" +586 98 dataset """kinships""" +586 98 model """rotate""" +586 98 loss """softplus""" +586 98 regularizer """no""" +586 98 optimizer """adadelta""" +586 98 training_loop """lcwa""" +586 98 evaluator """rankbased""" +586 99 dataset """kinships""" +586 99 model """rotate""" +586 99 loss """softplus""" +586 99 regularizer """no""" +586 99 optimizer """adadelta""" +586 99 training_loop """lcwa""" +586 99 evaluator """rankbased""" +586 100 dataset """kinships""" +586 100 model """rotate""" +586 100 loss """softplus""" +586 100 regularizer """no""" +586 100 optimizer """adadelta""" +586 100 training_loop """lcwa""" +586 100 evaluator """rankbased""" +587 1 model.embedding_dim 2.0 +587 1 training.batch_size 1.0 +587 1 training.label_smoothing 0.034857555280359526 +587 2 model.embedding_dim 1.0 +587 2 training.batch_size 0.0 +587 2 training.label_smoothing 0.00488898981087145 +587 3 model.embedding_dim 2.0 +587 3 training.batch_size 0.0 +587 3 training.label_smoothing 0.10417973852295953 +587 4 model.embedding_dim 1.0 +587 4 training.batch_size 0.0 +587 4 training.label_smoothing 0.004470138499387118 +587 5 model.embedding_dim 2.0 +587 5 training.batch_size 0.0 +587 5 training.label_smoothing 0.004278016726637761 +587 6 model.embedding_dim 2.0 +587 6 training.batch_size 0.0 +587 6 training.label_smoothing 0.5867145286784591 +587 7 model.embedding_dim 0.0 +587 7 training.batch_size 2.0 +587 7 training.label_smoothing 0.003069722807640612 +587 8 model.embedding_dim 1.0 +587 8 training.batch_size 0.0 +587 8 training.label_smoothing 0.003510696326766208 +587 9 model.embedding_dim 2.0 +587 9 training.batch_size 0.0 +587 9 training.label_smoothing 0.05741508699073427 +587 10 model.embedding_dim 0.0 +587 10 training.batch_size 1.0 +587 10 training.label_smoothing 0.12374757111836499 +587 11 model.embedding_dim 1.0 +587 11 training.batch_size 2.0 +587 11 training.label_smoothing 0.003358996648283624 +587 12 model.embedding_dim 0.0 +587 12 training.batch_size 1.0 +587 12 training.label_smoothing 0.027705755982142907 +587 13 model.embedding_dim 0.0 +587 13 training.batch_size 1.0 +587 13 training.label_smoothing 0.006253785221436981 +587 14 model.embedding_dim 2.0 +587 14 training.batch_size 2.0 +587 14 training.label_smoothing 0.14939880574862263 +587 15 model.embedding_dim 0.0 +587 15 training.batch_size 1.0 +587 15 training.label_smoothing 0.008630813581030313 +587 16 model.embedding_dim 2.0 +587 16 training.batch_size 0.0 +587 16 training.label_smoothing 0.16005296817731646 +587 17 model.embedding_dim 0.0 +587 17 training.batch_size 0.0 +587 17 training.label_smoothing 0.6495084721958169 +587 18 model.embedding_dim 1.0 +587 18 training.batch_size 2.0 +587 18 training.label_smoothing 0.01688110954143227 +587 19 model.embedding_dim 0.0 +587 19 training.batch_size 2.0 +587 19 training.label_smoothing 0.8437307590469976 +587 20 model.embedding_dim 1.0 +587 20 training.batch_size 0.0 +587 20 training.label_smoothing 0.2611179641375517 +587 21 model.embedding_dim 1.0 +587 21 training.batch_size 0.0 +587 21 training.label_smoothing 0.0016942960885649473 +587 22 model.embedding_dim 0.0 +587 22 training.batch_size 1.0 +587 22 training.label_smoothing 0.0702740611128896 +587 23 model.embedding_dim 0.0 +587 23 training.batch_size 1.0 +587 23 training.label_smoothing 0.7377249350561982 +587 24 model.embedding_dim 1.0 +587 24 training.batch_size 1.0 +587 24 training.label_smoothing 0.003317155699619687 +587 25 model.embedding_dim 2.0 +587 25 training.batch_size 1.0 +587 25 training.label_smoothing 0.005467800352328863 +587 26 model.embedding_dim 2.0 +587 26 training.batch_size 1.0 +587 26 training.label_smoothing 0.23258593825432886 +587 27 model.embedding_dim 1.0 +587 27 training.batch_size 1.0 +587 27 training.label_smoothing 0.0411173079229078 +587 28 model.embedding_dim 1.0 +587 28 training.batch_size 1.0 +587 28 training.label_smoothing 0.002636267116483203 +587 29 model.embedding_dim 0.0 +587 29 training.batch_size 2.0 +587 29 training.label_smoothing 0.01570809668968744 +587 30 model.embedding_dim 2.0 +587 30 training.batch_size 2.0 +587 30 training.label_smoothing 0.03578088361732805 +587 31 model.embedding_dim 0.0 +587 31 training.batch_size 2.0 +587 31 training.label_smoothing 0.0011901499317895638 +587 32 model.embedding_dim 1.0 +587 32 training.batch_size 2.0 +587 32 training.label_smoothing 0.0035619371655858686 +587 33 model.embedding_dim 0.0 +587 33 training.batch_size 2.0 +587 33 training.label_smoothing 0.0716728129272495 +587 34 model.embedding_dim 2.0 +587 34 training.batch_size 1.0 +587 34 training.label_smoothing 0.3323318802290536 +587 35 model.embedding_dim 1.0 +587 35 training.batch_size 0.0 +587 35 training.label_smoothing 0.16695666215867053 +587 36 model.embedding_dim 2.0 +587 36 training.batch_size 0.0 +587 36 training.label_smoothing 0.13042180957507396 +587 37 model.embedding_dim 2.0 +587 37 training.batch_size 0.0 +587 37 training.label_smoothing 0.00905805136616577 +587 38 model.embedding_dim 0.0 +587 38 training.batch_size 0.0 +587 38 training.label_smoothing 0.06293286859462782 +587 39 model.embedding_dim 2.0 +587 39 training.batch_size 0.0 +587 39 training.label_smoothing 0.1842930857094376 +587 40 model.embedding_dim 2.0 +587 40 training.batch_size 2.0 +587 40 training.label_smoothing 0.46323520264439577 +587 41 model.embedding_dim 0.0 +587 41 training.batch_size 0.0 +587 41 training.label_smoothing 0.010892484414129931 +587 42 model.embedding_dim 0.0 +587 42 training.batch_size 1.0 +587 42 training.label_smoothing 0.0018009719073979942 +587 43 model.embedding_dim 0.0 +587 43 training.batch_size 2.0 +587 43 training.label_smoothing 0.0037197542082460337 +587 44 model.embedding_dim 2.0 +587 44 training.batch_size 0.0 +587 44 training.label_smoothing 0.022148843793962075 +587 45 model.embedding_dim 1.0 +587 45 training.batch_size 1.0 +587 45 training.label_smoothing 0.04682755326873327 +587 46 model.embedding_dim 0.0 +587 46 training.batch_size 0.0 +587 46 training.label_smoothing 0.7929602471872071 +587 47 model.embedding_dim 0.0 +587 47 training.batch_size 2.0 +587 47 training.label_smoothing 0.004899151980578034 +587 48 model.embedding_dim 2.0 +587 48 training.batch_size 0.0 +587 48 training.label_smoothing 0.0012328072081904994 +587 49 model.embedding_dim 2.0 +587 49 training.batch_size 1.0 +587 49 training.label_smoothing 0.004715760746854681 +587 50 model.embedding_dim 2.0 +587 50 training.batch_size 0.0 +587 50 training.label_smoothing 0.12880730108025334 +587 51 model.embedding_dim 1.0 +587 51 training.batch_size 2.0 +587 51 training.label_smoothing 0.019045833903700748 +587 52 model.embedding_dim 0.0 +587 52 training.batch_size 2.0 +587 52 training.label_smoothing 0.0021409756560838785 +587 53 model.embedding_dim 1.0 +587 53 training.batch_size 1.0 +587 53 training.label_smoothing 0.18573635802587518 +587 54 model.embedding_dim 2.0 +587 54 training.batch_size 2.0 +587 54 training.label_smoothing 0.0012020444125297866 +587 55 model.embedding_dim 0.0 +587 55 training.batch_size 0.0 +587 55 training.label_smoothing 0.012221008543891665 +587 56 model.embedding_dim 2.0 +587 56 training.batch_size 1.0 +587 56 training.label_smoothing 0.04116278253246268 +587 57 model.embedding_dim 0.0 +587 57 training.batch_size 2.0 +587 57 training.label_smoothing 0.41075141093890677 +587 58 model.embedding_dim 1.0 +587 58 training.batch_size 1.0 +587 58 training.label_smoothing 0.2813278934107145 +587 59 model.embedding_dim 1.0 +587 59 training.batch_size 1.0 +587 59 training.label_smoothing 0.0011394167736078584 +587 60 model.embedding_dim 1.0 +587 60 training.batch_size 1.0 +587 60 training.label_smoothing 0.7879686154489205 +587 61 model.embedding_dim 1.0 +587 61 training.batch_size 2.0 +587 61 training.label_smoothing 0.7969056952107914 +587 62 model.embedding_dim 2.0 +587 62 training.batch_size 2.0 +587 62 training.label_smoothing 0.1452314895479239 +587 63 model.embedding_dim 2.0 +587 63 training.batch_size 1.0 +587 63 training.label_smoothing 0.002174798584134859 +587 64 model.embedding_dim 1.0 +587 64 training.batch_size 1.0 +587 64 training.label_smoothing 0.22389288930227946 +587 65 model.embedding_dim 2.0 +587 65 training.batch_size 2.0 +587 65 training.label_smoothing 0.07020537451614356 +587 66 model.embedding_dim 0.0 +587 66 training.batch_size 0.0 +587 66 training.label_smoothing 0.0044319040666467085 +587 67 model.embedding_dim 0.0 +587 67 training.batch_size 1.0 +587 67 training.label_smoothing 0.2084876824947401 +587 68 model.embedding_dim 0.0 +587 68 training.batch_size 0.0 +587 68 training.label_smoothing 0.0010539153316195889 +587 69 model.embedding_dim 1.0 +587 69 training.batch_size 1.0 +587 69 training.label_smoothing 0.13564991366452175 +587 70 model.embedding_dim 0.0 +587 70 training.batch_size 1.0 +587 70 training.label_smoothing 0.16800268921952136 +587 71 model.embedding_dim 0.0 +587 71 training.batch_size 2.0 +587 71 training.label_smoothing 0.0025358242835803965 +587 72 model.embedding_dim 0.0 +587 72 training.batch_size 1.0 +587 72 training.label_smoothing 0.06305908012144926 +587 73 model.embedding_dim 0.0 +587 73 training.batch_size 0.0 +587 73 training.label_smoothing 0.009033719793454777 +587 74 model.embedding_dim 0.0 +587 74 training.batch_size 0.0 +587 74 training.label_smoothing 0.0010859186211140165 +587 75 model.embedding_dim 0.0 +587 75 training.batch_size 2.0 +587 75 training.label_smoothing 0.04438684230718748 +587 76 model.embedding_dim 2.0 +587 76 training.batch_size 2.0 +587 76 training.label_smoothing 0.4443366162813032 +587 77 model.embedding_dim 2.0 +587 77 training.batch_size 0.0 +587 77 training.label_smoothing 0.00814226468837874 +587 78 model.embedding_dim 2.0 +587 78 training.batch_size 2.0 +587 78 training.label_smoothing 0.07516344612628943 +587 79 model.embedding_dim 1.0 +587 79 training.batch_size 1.0 +587 79 training.label_smoothing 0.638790133444016 +587 80 model.embedding_dim 0.0 +587 80 training.batch_size 1.0 +587 80 training.label_smoothing 0.38395207871295617 +587 81 model.embedding_dim 1.0 +587 81 training.batch_size 2.0 +587 81 training.label_smoothing 0.038613177983139735 +587 82 model.embedding_dim 0.0 +587 82 training.batch_size 2.0 +587 82 training.label_smoothing 0.00847590148522412 +587 83 model.embedding_dim 2.0 +587 83 training.batch_size 1.0 +587 83 training.label_smoothing 0.023979788414495332 +587 84 model.embedding_dim 0.0 +587 84 training.batch_size 2.0 +587 84 training.label_smoothing 0.002520007814987237 +587 85 model.embedding_dim 2.0 +587 85 training.batch_size 1.0 +587 85 training.label_smoothing 0.0677603011774845 +587 86 model.embedding_dim 1.0 +587 86 training.batch_size 1.0 +587 86 training.label_smoothing 0.022416137120694568 +587 87 model.embedding_dim 2.0 +587 87 training.batch_size 1.0 +587 87 training.label_smoothing 0.04773638092270713 +587 88 model.embedding_dim 1.0 +587 88 training.batch_size 2.0 +587 88 training.label_smoothing 0.029052862107282258 +587 89 model.embedding_dim 1.0 +587 89 training.batch_size 0.0 +587 89 training.label_smoothing 0.0019280477482773518 +587 90 model.embedding_dim 0.0 +587 90 training.batch_size 1.0 +587 90 training.label_smoothing 0.06686108669155823 +587 91 model.embedding_dim 2.0 +587 91 training.batch_size 1.0 +587 91 training.label_smoothing 0.002400021281385158 +587 92 model.embedding_dim 0.0 +587 92 training.batch_size 2.0 +587 92 training.label_smoothing 0.8606472738474028 +587 93 model.embedding_dim 2.0 +587 93 training.batch_size 0.0 +587 93 training.label_smoothing 0.2599585921023721 +587 94 model.embedding_dim 0.0 +587 94 training.batch_size 2.0 +587 94 training.label_smoothing 0.003919592941538196 +587 95 model.embedding_dim 0.0 +587 95 training.batch_size 2.0 +587 95 training.label_smoothing 0.3382774555069951 +587 96 model.embedding_dim 0.0 +587 96 training.batch_size 2.0 +587 96 training.label_smoothing 0.9751664154350664 +587 97 model.embedding_dim 1.0 +587 97 training.batch_size 2.0 +587 97 training.label_smoothing 0.001290032984959911 +587 98 model.embedding_dim 1.0 +587 98 training.batch_size 0.0 +587 98 training.label_smoothing 0.3129107924412015 +587 99 model.embedding_dim 1.0 +587 99 training.batch_size 0.0 +587 99 training.label_smoothing 0.004318363749650559 +587 100 model.embedding_dim 2.0 +587 100 training.batch_size 0.0 +587 100 training.label_smoothing 0.0016221323185578323 +587 1 dataset """kinships""" +587 1 model """rotate""" +587 1 loss """crossentropy""" +587 1 regularizer """no""" +587 1 optimizer """adadelta""" +587 1 training_loop """lcwa""" +587 1 evaluator """rankbased""" +587 2 dataset """kinships""" +587 2 model """rotate""" +587 2 loss """crossentropy""" +587 2 regularizer """no""" +587 2 optimizer """adadelta""" +587 2 training_loop """lcwa""" +587 2 evaluator """rankbased""" +587 3 dataset """kinships""" +587 3 model """rotate""" +587 3 loss """crossentropy""" +587 3 regularizer """no""" +587 3 optimizer """adadelta""" +587 3 training_loop """lcwa""" +587 3 evaluator """rankbased""" +587 4 dataset """kinships""" +587 4 model """rotate""" +587 4 loss """crossentropy""" +587 4 regularizer """no""" +587 4 optimizer """adadelta""" +587 4 training_loop """lcwa""" +587 4 evaluator """rankbased""" +587 5 dataset """kinships""" +587 5 model """rotate""" +587 5 loss """crossentropy""" +587 5 regularizer """no""" +587 5 optimizer """adadelta""" +587 5 training_loop """lcwa""" +587 5 evaluator """rankbased""" +587 6 dataset """kinships""" +587 6 model """rotate""" +587 6 loss """crossentropy""" +587 6 regularizer """no""" +587 6 optimizer """adadelta""" +587 6 training_loop """lcwa""" +587 6 evaluator """rankbased""" +587 7 dataset """kinships""" +587 7 model """rotate""" +587 7 loss """crossentropy""" +587 7 regularizer """no""" +587 7 optimizer """adadelta""" +587 7 training_loop """lcwa""" +587 7 evaluator """rankbased""" +587 8 dataset """kinships""" +587 8 model """rotate""" +587 8 loss """crossentropy""" +587 8 regularizer """no""" +587 8 optimizer """adadelta""" +587 8 training_loop """lcwa""" +587 8 evaluator """rankbased""" +587 9 dataset """kinships""" +587 9 model """rotate""" +587 9 loss """crossentropy""" +587 9 regularizer """no""" +587 9 optimizer """adadelta""" +587 9 training_loop """lcwa""" +587 9 evaluator """rankbased""" +587 10 dataset """kinships""" +587 10 model """rotate""" +587 10 loss """crossentropy""" +587 10 regularizer """no""" +587 10 optimizer """adadelta""" +587 10 training_loop """lcwa""" +587 10 evaluator """rankbased""" +587 11 dataset """kinships""" +587 11 model """rotate""" +587 11 loss """crossentropy""" +587 11 regularizer """no""" +587 11 optimizer """adadelta""" +587 11 training_loop """lcwa""" +587 11 evaluator """rankbased""" +587 12 dataset """kinships""" +587 12 model """rotate""" +587 12 loss """crossentropy""" +587 12 regularizer """no""" +587 12 optimizer """adadelta""" +587 12 training_loop """lcwa""" +587 12 evaluator """rankbased""" +587 13 dataset """kinships""" +587 13 model """rotate""" +587 13 loss """crossentropy""" +587 13 regularizer """no""" +587 13 optimizer """adadelta""" +587 13 training_loop """lcwa""" +587 13 evaluator """rankbased""" +587 14 dataset """kinships""" +587 14 model """rotate""" +587 14 loss """crossentropy""" +587 14 regularizer """no""" +587 14 optimizer """adadelta""" +587 14 training_loop """lcwa""" +587 14 evaluator """rankbased""" +587 15 dataset """kinships""" +587 15 model """rotate""" +587 15 loss """crossentropy""" +587 15 regularizer """no""" +587 15 optimizer """adadelta""" +587 15 training_loop """lcwa""" +587 15 evaluator """rankbased""" +587 16 dataset """kinships""" +587 16 model """rotate""" +587 16 loss """crossentropy""" +587 16 regularizer """no""" +587 16 optimizer """adadelta""" +587 16 training_loop """lcwa""" +587 16 evaluator """rankbased""" +587 17 dataset """kinships""" +587 17 model """rotate""" +587 17 loss """crossentropy""" +587 17 regularizer """no""" +587 17 optimizer """adadelta""" +587 17 training_loop """lcwa""" +587 17 evaluator """rankbased""" +587 18 dataset """kinships""" +587 18 model """rotate""" +587 18 loss """crossentropy""" +587 18 regularizer """no""" +587 18 optimizer """adadelta""" +587 18 training_loop """lcwa""" +587 18 evaluator """rankbased""" +587 19 dataset """kinships""" +587 19 model """rotate""" +587 19 loss """crossentropy""" +587 19 regularizer """no""" +587 19 optimizer """adadelta""" +587 19 training_loop """lcwa""" +587 19 evaluator """rankbased""" +587 20 dataset """kinships""" +587 20 model """rotate""" +587 20 loss """crossentropy""" +587 20 regularizer """no""" +587 20 optimizer """adadelta""" +587 20 training_loop """lcwa""" +587 20 evaluator """rankbased""" +587 21 dataset """kinships""" +587 21 model """rotate""" +587 21 loss """crossentropy""" +587 21 regularizer """no""" +587 21 optimizer """adadelta""" +587 21 training_loop """lcwa""" +587 21 evaluator """rankbased""" +587 22 dataset """kinships""" +587 22 model """rotate""" +587 22 loss """crossentropy""" +587 22 regularizer """no""" +587 22 optimizer """adadelta""" +587 22 training_loop """lcwa""" +587 22 evaluator """rankbased""" +587 23 dataset """kinships""" +587 23 model """rotate""" +587 23 loss """crossentropy""" +587 23 regularizer """no""" +587 23 optimizer """adadelta""" +587 23 training_loop """lcwa""" +587 23 evaluator """rankbased""" +587 24 dataset """kinships""" +587 24 model """rotate""" +587 24 loss """crossentropy""" +587 24 regularizer """no""" +587 24 optimizer """adadelta""" +587 24 training_loop """lcwa""" +587 24 evaluator """rankbased""" +587 25 dataset """kinships""" +587 25 model """rotate""" +587 25 loss """crossentropy""" +587 25 regularizer """no""" +587 25 optimizer """adadelta""" +587 25 training_loop """lcwa""" +587 25 evaluator """rankbased""" +587 26 dataset """kinships""" +587 26 model """rotate""" +587 26 loss """crossentropy""" +587 26 regularizer """no""" +587 26 optimizer """adadelta""" +587 26 training_loop """lcwa""" +587 26 evaluator """rankbased""" +587 27 dataset """kinships""" +587 27 model """rotate""" +587 27 loss """crossentropy""" +587 27 regularizer """no""" +587 27 optimizer """adadelta""" +587 27 training_loop """lcwa""" +587 27 evaluator """rankbased""" +587 28 dataset """kinships""" +587 28 model """rotate""" +587 28 loss """crossentropy""" +587 28 regularizer """no""" +587 28 optimizer """adadelta""" +587 28 training_loop """lcwa""" +587 28 evaluator """rankbased""" +587 29 dataset """kinships""" +587 29 model """rotate""" +587 29 loss """crossentropy""" +587 29 regularizer """no""" +587 29 optimizer """adadelta""" +587 29 training_loop """lcwa""" +587 29 evaluator """rankbased""" +587 30 dataset """kinships""" +587 30 model """rotate""" +587 30 loss """crossentropy""" +587 30 regularizer """no""" +587 30 optimizer """adadelta""" +587 30 training_loop """lcwa""" +587 30 evaluator """rankbased""" +587 31 dataset """kinships""" +587 31 model """rotate""" +587 31 loss """crossentropy""" +587 31 regularizer """no""" +587 31 optimizer """adadelta""" +587 31 training_loop """lcwa""" +587 31 evaluator """rankbased""" +587 32 dataset """kinships""" +587 32 model """rotate""" +587 32 loss """crossentropy""" +587 32 regularizer """no""" +587 32 optimizer """adadelta""" +587 32 training_loop """lcwa""" +587 32 evaluator """rankbased""" +587 33 dataset """kinships""" +587 33 model """rotate""" +587 33 loss """crossentropy""" +587 33 regularizer """no""" +587 33 optimizer """adadelta""" +587 33 training_loop """lcwa""" +587 33 evaluator """rankbased""" +587 34 dataset """kinships""" +587 34 model """rotate""" +587 34 loss """crossentropy""" +587 34 regularizer """no""" +587 34 optimizer """adadelta""" +587 34 training_loop """lcwa""" +587 34 evaluator """rankbased""" +587 35 dataset """kinships""" +587 35 model """rotate""" +587 35 loss """crossentropy""" +587 35 regularizer """no""" +587 35 optimizer """adadelta""" +587 35 training_loop """lcwa""" +587 35 evaluator """rankbased""" +587 36 dataset """kinships""" +587 36 model """rotate""" +587 36 loss """crossentropy""" +587 36 regularizer """no""" +587 36 optimizer """adadelta""" +587 36 training_loop """lcwa""" +587 36 evaluator """rankbased""" +587 37 dataset """kinships""" +587 37 model """rotate""" +587 37 loss """crossentropy""" +587 37 regularizer """no""" +587 37 optimizer """adadelta""" +587 37 training_loop """lcwa""" +587 37 evaluator """rankbased""" +587 38 dataset """kinships""" +587 38 model """rotate""" +587 38 loss """crossentropy""" +587 38 regularizer """no""" +587 38 optimizer """adadelta""" +587 38 training_loop """lcwa""" +587 38 evaluator """rankbased""" +587 39 dataset """kinships""" +587 39 model """rotate""" +587 39 loss """crossentropy""" +587 39 regularizer """no""" +587 39 optimizer """adadelta""" +587 39 training_loop """lcwa""" +587 39 evaluator """rankbased""" +587 40 dataset """kinships""" +587 40 model """rotate""" +587 40 loss """crossentropy""" +587 40 regularizer """no""" +587 40 optimizer """adadelta""" +587 40 training_loop """lcwa""" +587 40 evaluator """rankbased""" +587 41 dataset """kinships""" +587 41 model """rotate""" +587 41 loss """crossentropy""" +587 41 regularizer """no""" +587 41 optimizer """adadelta""" +587 41 training_loop """lcwa""" +587 41 evaluator """rankbased""" +587 42 dataset """kinships""" +587 42 model """rotate""" +587 42 loss """crossentropy""" +587 42 regularizer """no""" +587 42 optimizer """adadelta""" +587 42 training_loop """lcwa""" +587 42 evaluator """rankbased""" +587 43 dataset """kinships""" +587 43 model """rotate""" +587 43 loss """crossentropy""" +587 43 regularizer """no""" +587 43 optimizer """adadelta""" +587 43 training_loop """lcwa""" +587 43 evaluator """rankbased""" +587 44 dataset """kinships""" +587 44 model """rotate""" +587 44 loss """crossentropy""" +587 44 regularizer """no""" +587 44 optimizer """adadelta""" +587 44 training_loop """lcwa""" +587 44 evaluator """rankbased""" +587 45 dataset """kinships""" +587 45 model """rotate""" +587 45 loss """crossentropy""" +587 45 regularizer """no""" +587 45 optimizer """adadelta""" +587 45 training_loop """lcwa""" +587 45 evaluator """rankbased""" +587 46 dataset """kinships""" +587 46 model """rotate""" +587 46 loss """crossentropy""" +587 46 regularizer """no""" +587 46 optimizer """adadelta""" +587 46 training_loop """lcwa""" +587 46 evaluator """rankbased""" +587 47 dataset """kinships""" +587 47 model """rotate""" +587 47 loss """crossentropy""" +587 47 regularizer """no""" +587 47 optimizer """adadelta""" +587 47 training_loop """lcwa""" +587 47 evaluator """rankbased""" +587 48 dataset """kinships""" +587 48 model """rotate""" +587 48 loss """crossentropy""" +587 48 regularizer """no""" +587 48 optimizer """adadelta""" +587 48 training_loop """lcwa""" +587 48 evaluator """rankbased""" +587 49 dataset """kinships""" +587 49 model """rotate""" +587 49 loss """crossentropy""" +587 49 regularizer """no""" +587 49 optimizer """adadelta""" +587 49 training_loop """lcwa""" +587 49 evaluator """rankbased""" +587 50 dataset """kinships""" +587 50 model """rotate""" +587 50 loss """crossentropy""" +587 50 regularizer """no""" +587 50 optimizer """adadelta""" +587 50 training_loop """lcwa""" +587 50 evaluator """rankbased""" +587 51 dataset """kinships""" +587 51 model """rotate""" +587 51 loss """crossentropy""" +587 51 regularizer """no""" +587 51 optimizer """adadelta""" +587 51 training_loop """lcwa""" +587 51 evaluator """rankbased""" +587 52 dataset """kinships""" +587 52 model """rotate""" +587 52 loss """crossentropy""" +587 52 regularizer """no""" +587 52 optimizer """adadelta""" +587 52 training_loop """lcwa""" +587 52 evaluator """rankbased""" +587 53 dataset """kinships""" +587 53 model """rotate""" +587 53 loss """crossentropy""" +587 53 regularizer """no""" +587 53 optimizer """adadelta""" +587 53 training_loop """lcwa""" +587 53 evaluator """rankbased""" +587 54 dataset """kinships""" +587 54 model """rotate""" +587 54 loss """crossentropy""" +587 54 regularizer """no""" +587 54 optimizer """adadelta""" +587 54 training_loop """lcwa""" +587 54 evaluator """rankbased""" +587 55 dataset """kinships""" +587 55 model """rotate""" +587 55 loss """crossentropy""" +587 55 regularizer """no""" +587 55 optimizer """adadelta""" +587 55 training_loop """lcwa""" +587 55 evaluator """rankbased""" +587 56 dataset """kinships""" +587 56 model """rotate""" +587 56 loss """crossentropy""" +587 56 regularizer """no""" +587 56 optimizer """adadelta""" +587 56 training_loop """lcwa""" +587 56 evaluator """rankbased""" +587 57 dataset """kinships""" +587 57 model """rotate""" +587 57 loss """crossentropy""" +587 57 regularizer """no""" +587 57 optimizer """adadelta""" +587 57 training_loop """lcwa""" +587 57 evaluator """rankbased""" +587 58 dataset """kinships""" +587 58 model """rotate""" +587 58 loss """crossentropy""" +587 58 regularizer """no""" +587 58 optimizer """adadelta""" +587 58 training_loop """lcwa""" +587 58 evaluator """rankbased""" +587 59 dataset """kinships""" +587 59 model """rotate""" +587 59 loss """crossentropy""" +587 59 regularizer """no""" +587 59 optimizer """adadelta""" +587 59 training_loop """lcwa""" +587 59 evaluator """rankbased""" +587 60 dataset """kinships""" +587 60 model """rotate""" +587 60 loss """crossentropy""" +587 60 regularizer """no""" +587 60 optimizer """adadelta""" +587 60 training_loop """lcwa""" +587 60 evaluator """rankbased""" +587 61 dataset """kinships""" +587 61 model """rotate""" +587 61 loss """crossentropy""" +587 61 regularizer """no""" +587 61 optimizer """adadelta""" +587 61 training_loop """lcwa""" +587 61 evaluator """rankbased""" +587 62 dataset """kinships""" +587 62 model """rotate""" +587 62 loss """crossentropy""" +587 62 regularizer """no""" +587 62 optimizer """adadelta""" +587 62 training_loop """lcwa""" +587 62 evaluator """rankbased""" +587 63 dataset """kinships""" +587 63 model """rotate""" +587 63 loss """crossentropy""" +587 63 regularizer """no""" +587 63 optimizer """adadelta""" +587 63 training_loop """lcwa""" +587 63 evaluator """rankbased""" +587 64 dataset """kinships""" +587 64 model """rotate""" +587 64 loss """crossentropy""" +587 64 regularizer """no""" +587 64 optimizer """adadelta""" +587 64 training_loop """lcwa""" +587 64 evaluator """rankbased""" +587 65 dataset """kinships""" +587 65 model """rotate""" +587 65 loss """crossentropy""" +587 65 regularizer """no""" +587 65 optimizer """adadelta""" +587 65 training_loop """lcwa""" +587 65 evaluator """rankbased""" +587 66 dataset """kinships""" +587 66 model """rotate""" +587 66 loss """crossentropy""" +587 66 regularizer """no""" +587 66 optimizer """adadelta""" +587 66 training_loop """lcwa""" +587 66 evaluator """rankbased""" +587 67 dataset """kinships""" +587 67 model """rotate""" +587 67 loss """crossentropy""" +587 67 regularizer """no""" +587 67 optimizer """adadelta""" +587 67 training_loop """lcwa""" +587 67 evaluator """rankbased""" +587 68 dataset """kinships""" +587 68 model """rotate""" +587 68 loss """crossentropy""" +587 68 regularizer """no""" +587 68 optimizer """adadelta""" +587 68 training_loop """lcwa""" +587 68 evaluator """rankbased""" +587 69 dataset """kinships""" +587 69 model """rotate""" +587 69 loss """crossentropy""" +587 69 regularizer """no""" +587 69 optimizer """adadelta""" +587 69 training_loop """lcwa""" +587 69 evaluator """rankbased""" +587 70 dataset """kinships""" +587 70 model """rotate""" +587 70 loss """crossentropy""" +587 70 regularizer """no""" +587 70 optimizer """adadelta""" +587 70 training_loop """lcwa""" +587 70 evaluator """rankbased""" +587 71 dataset """kinships""" +587 71 model """rotate""" +587 71 loss """crossentropy""" +587 71 regularizer """no""" +587 71 optimizer """adadelta""" +587 71 training_loop """lcwa""" +587 71 evaluator """rankbased""" +587 72 dataset """kinships""" +587 72 model """rotate""" +587 72 loss """crossentropy""" +587 72 regularizer """no""" +587 72 optimizer """adadelta""" +587 72 training_loop """lcwa""" +587 72 evaluator """rankbased""" +587 73 dataset """kinships""" +587 73 model """rotate""" +587 73 loss """crossentropy""" +587 73 regularizer """no""" +587 73 optimizer """adadelta""" +587 73 training_loop """lcwa""" +587 73 evaluator """rankbased""" +587 74 dataset """kinships""" +587 74 model """rotate""" +587 74 loss """crossentropy""" +587 74 regularizer """no""" +587 74 optimizer """adadelta""" +587 74 training_loop """lcwa""" +587 74 evaluator """rankbased""" +587 75 dataset """kinships""" +587 75 model """rotate""" +587 75 loss """crossentropy""" +587 75 regularizer """no""" +587 75 optimizer """adadelta""" +587 75 training_loop """lcwa""" +587 75 evaluator """rankbased""" +587 76 dataset """kinships""" +587 76 model """rotate""" +587 76 loss """crossentropy""" +587 76 regularizer """no""" +587 76 optimizer """adadelta""" +587 76 training_loop """lcwa""" +587 76 evaluator """rankbased""" +587 77 dataset """kinships""" +587 77 model """rotate""" +587 77 loss """crossentropy""" +587 77 regularizer """no""" +587 77 optimizer """adadelta""" +587 77 training_loop """lcwa""" +587 77 evaluator """rankbased""" +587 78 dataset """kinships""" +587 78 model """rotate""" +587 78 loss """crossentropy""" +587 78 regularizer """no""" +587 78 optimizer """adadelta""" +587 78 training_loop """lcwa""" +587 78 evaluator """rankbased""" +587 79 dataset """kinships""" +587 79 model """rotate""" +587 79 loss """crossentropy""" +587 79 regularizer """no""" +587 79 optimizer """adadelta""" +587 79 training_loop """lcwa""" +587 79 evaluator """rankbased""" +587 80 dataset """kinships""" +587 80 model """rotate""" +587 80 loss """crossentropy""" +587 80 regularizer """no""" +587 80 optimizer """adadelta""" +587 80 training_loop """lcwa""" +587 80 evaluator """rankbased""" +587 81 dataset """kinships""" +587 81 model """rotate""" +587 81 loss """crossentropy""" +587 81 regularizer """no""" +587 81 optimizer """adadelta""" +587 81 training_loop """lcwa""" +587 81 evaluator """rankbased""" +587 82 dataset """kinships""" +587 82 model """rotate""" +587 82 loss """crossentropy""" +587 82 regularizer """no""" +587 82 optimizer """adadelta""" +587 82 training_loop """lcwa""" +587 82 evaluator """rankbased""" +587 83 dataset """kinships""" +587 83 model """rotate""" +587 83 loss """crossentropy""" +587 83 regularizer """no""" +587 83 optimizer """adadelta""" +587 83 training_loop """lcwa""" +587 83 evaluator """rankbased""" +587 84 dataset """kinships""" +587 84 model """rotate""" +587 84 loss """crossentropy""" +587 84 regularizer """no""" +587 84 optimizer """adadelta""" +587 84 training_loop """lcwa""" +587 84 evaluator """rankbased""" +587 85 dataset """kinships""" +587 85 model """rotate""" +587 85 loss """crossentropy""" +587 85 regularizer """no""" +587 85 optimizer """adadelta""" +587 85 training_loop """lcwa""" +587 85 evaluator """rankbased""" +587 86 dataset """kinships""" +587 86 model """rotate""" +587 86 loss """crossentropy""" +587 86 regularizer """no""" +587 86 optimizer """adadelta""" +587 86 training_loop """lcwa""" +587 86 evaluator """rankbased""" +587 87 dataset """kinships""" +587 87 model """rotate""" +587 87 loss """crossentropy""" +587 87 regularizer """no""" +587 87 optimizer """adadelta""" +587 87 training_loop """lcwa""" +587 87 evaluator """rankbased""" +587 88 dataset """kinships""" +587 88 model """rotate""" +587 88 loss """crossentropy""" +587 88 regularizer """no""" +587 88 optimizer """adadelta""" +587 88 training_loop """lcwa""" +587 88 evaluator """rankbased""" +587 89 dataset """kinships""" +587 89 model """rotate""" +587 89 loss """crossentropy""" +587 89 regularizer """no""" +587 89 optimizer """adadelta""" +587 89 training_loop """lcwa""" +587 89 evaluator """rankbased""" +587 90 dataset """kinships""" +587 90 model """rotate""" +587 90 loss """crossentropy""" +587 90 regularizer """no""" +587 90 optimizer """adadelta""" +587 90 training_loop """lcwa""" +587 90 evaluator """rankbased""" +587 91 dataset """kinships""" +587 91 model """rotate""" +587 91 loss """crossentropy""" +587 91 regularizer """no""" +587 91 optimizer """adadelta""" +587 91 training_loop """lcwa""" +587 91 evaluator """rankbased""" +587 92 dataset """kinships""" +587 92 model """rotate""" +587 92 loss """crossentropy""" +587 92 regularizer """no""" +587 92 optimizer """adadelta""" +587 92 training_loop """lcwa""" +587 92 evaluator """rankbased""" +587 93 dataset """kinships""" +587 93 model """rotate""" +587 93 loss """crossentropy""" +587 93 regularizer """no""" +587 93 optimizer """adadelta""" +587 93 training_loop """lcwa""" +587 93 evaluator """rankbased""" +587 94 dataset """kinships""" +587 94 model """rotate""" +587 94 loss """crossentropy""" +587 94 regularizer """no""" +587 94 optimizer """adadelta""" +587 94 training_loop """lcwa""" +587 94 evaluator """rankbased""" +587 95 dataset """kinships""" +587 95 model """rotate""" +587 95 loss """crossentropy""" +587 95 regularizer """no""" +587 95 optimizer """adadelta""" +587 95 training_loop """lcwa""" +587 95 evaluator """rankbased""" +587 96 dataset """kinships""" +587 96 model """rotate""" +587 96 loss """crossentropy""" +587 96 regularizer """no""" +587 96 optimizer """adadelta""" +587 96 training_loop """lcwa""" +587 96 evaluator """rankbased""" +587 97 dataset """kinships""" +587 97 model """rotate""" +587 97 loss """crossentropy""" +587 97 regularizer """no""" +587 97 optimizer """adadelta""" +587 97 training_loop """lcwa""" +587 97 evaluator """rankbased""" +587 98 dataset """kinships""" +587 98 model """rotate""" +587 98 loss """crossentropy""" +587 98 regularizer """no""" +587 98 optimizer """adadelta""" +587 98 training_loop """lcwa""" +587 98 evaluator """rankbased""" +587 99 dataset """kinships""" +587 99 model """rotate""" +587 99 loss """crossentropy""" +587 99 regularizer """no""" +587 99 optimizer """adadelta""" +587 99 training_loop """lcwa""" +587 99 evaluator """rankbased""" +587 100 dataset """kinships""" +587 100 model """rotate""" +587 100 loss """crossentropy""" +587 100 regularizer """no""" +587 100 optimizer """adadelta""" +587 100 training_loop """lcwa""" +587 100 evaluator """rankbased""" +588 1 model.embedding_dim 1.0 +588 1 training.batch_size 0.0 +588 1 training.label_smoothing 0.3826393367041942 +588 2 model.embedding_dim 2.0 +588 2 training.batch_size 2.0 +588 2 training.label_smoothing 0.6648141187910906 +588 3 model.embedding_dim 2.0 +588 3 training.batch_size 1.0 +588 3 training.label_smoothing 0.20528799206744802 +588 4 model.embedding_dim 1.0 +588 4 training.batch_size 0.0 +588 4 training.label_smoothing 0.013528578151182536 +588 5 model.embedding_dim 2.0 +588 5 training.batch_size 1.0 +588 5 training.label_smoothing 0.8112223124559768 +588 6 model.embedding_dim 0.0 +588 6 training.batch_size 2.0 +588 6 training.label_smoothing 0.04796638696504403 +588 7 model.embedding_dim 2.0 +588 7 training.batch_size 1.0 +588 7 training.label_smoothing 0.009147116533979377 +588 8 model.embedding_dim 2.0 +588 8 training.batch_size 2.0 +588 8 training.label_smoothing 0.05059094984612097 +588 9 model.embedding_dim 0.0 +588 9 training.batch_size 2.0 +588 9 training.label_smoothing 0.14902452181203235 +588 10 model.embedding_dim 1.0 +588 10 training.batch_size 0.0 +588 10 training.label_smoothing 0.018194742869983797 +588 11 model.embedding_dim 1.0 +588 11 training.batch_size 1.0 +588 11 training.label_smoothing 0.011755346295507297 +588 12 model.embedding_dim 1.0 +588 12 training.batch_size 2.0 +588 12 training.label_smoothing 0.14399375463195793 +588 13 model.embedding_dim 2.0 +588 13 training.batch_size 2.0 +588 13 training.label_smoothing 0.4504548947655077 +588 14 model.embedding_dim 0.0 +588 14 training.batch_size 2.0 +588 14 training.label_smoothing 0.20747525766298014 +588 15 model.embedding_dim 2.0 +588 15 training.batch_size 1.0 +588 15 training.label_smoothing 0.0012004182813891903 +588 16 model.embedding_dim 1.0 +588 16 training.batch_size 0.0 +588 16 training.label_smoothing 0.0017970075171939008 +588 17 model.embedding_dim 2.0 +588 17 training.batch_size 2.0 +588 17 training.label_smoothing 0.02254096927303598 +588 18 model.embedding_dim 1.0 +588 18 training.batch_size 0.0 +588 18 training.label_smoothing 0.5150063780669777 +588 19 model.embedding_dim 1.0 +588 19 training.batch_size 2.0 +588 19 training.label_smoothing 0.03871199331548581 +588 20 model.embedding_dim 1.0 +588 20 training.batch_size 2.0 +588 20 training.label_smoothing 0.001623967249883024 +588 21 model.embedding_dim 0.0 +588 21 training.batch_size 1.0 +588 21 training.label_smoothing 0.002823994386260244 +588 22 model.embedding_dim 2.0 +588 22 training.batch_size 0.0 +588 22 training.label_smoothing 0.6260986235657141 +588 23 model.embedding_dim 1.0 +588 23 training.batch_size 2.0 +588 23 training.label_smoothing 0.009186048085819243 +588 24 model.embedding_dim 1.0 +588 24 training.batch_size 2.0 +588 24 training.label_smoothing 0.0014316901842351575 +588 25 model.embedding_dim 2.0 +588 25 training.batch_size 0.0 +588 25 training.label_smoothing 0.019303874932062114 +588 26 model.embedding_dim 0.0 +588 26 training.batch_size 2.0 +588 26 training.label_smoothing 0.014194324163366507 +588 27 model.embedding_dim 0.0 +588 27 training.batch_size 0.0 +588 27 training.label_smoothing 0.021297199980242477 +588 28 model.embedding_dim 2.0 +588 28 training.batch_size 0.0 +588 28 training.label_smoothing 0.0012484092614537166 +588 29 model.embedding_dim 0.0 +588 29 training.batch_size 0.0 +588 29 training.label_smoothing 0.37772872440858957 +588 30 model.embedding_dim 1.0 +588 30 training.batch_size 2.0 +588 30 training.label_smoothing 0.012439482502110065 +588 31 model.embedding_dim 1.0 +588 31 training.batch_size 0.0 +588 31 training.label_smoothing 0.2772452073757105 +588 32 model.embedding_dim 1.0 +588 32 training.batch_size 2.0 +588 32 training.label_smoothing 0.00276851259478147 +588 33 model.embedding_dim 0.0 +588 33 training.batch_size 0.0 +588 33 training.label_smoothing 0.004689269963737259 +588 34 model.embedding_dim 1.0 +588 34 training.batch_size 1.0 +588 34 training.label_smoothing 0.04158675789294083 +588 35 model.embedding_dim 1.0 +588 35 training.batch_size 0.0 +588 35 training.label_smoothing 0.0722619173069813 +588 36 model.embedding_dim 2.0 +588 36 training.batch_size 0.0 +588 36 training.label_smoothing 0.06120597535693924 +588 37 model.embedding_dim 2.0 +588 37 training.batch_size 2.0 +588 37 training.label_smoothing 0.006373798491501962 +588 38 model.embedding_dim 0.0 +588 38 training.batch_size 0.0 +588 38 training.label_smoothing 0.03805848379347331 +588 39 model.embedding_dim 1.0 +588 39 training.batch_size 1.0 +588 39 training.label_smoothing 0.3646868814829175 +588 40 model.embedding_dim 0.0 +588 40 training.batch_size 1.0 +588 40 training.label_smoothing 0.008245343640570087 +588 41 model.embedding_dim 2.0 +588 41 training.batch_size 0.0 +588 41 training.label_smoothing 0.07671069582023253 +588 42 model.embedding_dim 2.0 +588 42 training.batch_size 2.0 +588 42 training.label_smoothing 0.14593736054565026 +588 43 model.embedding_dim 1.0 +588 43 training.batch_size 1.0 +588 43 training.label_smoothing 0.011337906863548951 +588 44 model.embedding_dim 2.0 +588 44 training.batch_size 1.0 +588 44 training.label_smoothing 0.026695872193924376 +588 45 model.embedding_dim 1.0 +588 45 training.batch_size 0.0 +588 45 training.label_smoothing 0.005320292538219494 +588 46 model.embedding_dim 2.0 +588 46 training.batch_size 2.0 +588 46 training.label_smoothing 0.006401732697009773 +588 47 model.embedding_dim 2.0 +588 47 training.batch_size 1.0 +588 47 training.label_smoothing 0.11031119693928873 +588 48 model.embedding_dim 2.0 +588 48 training.batch_size 2.0 +588 48 training.label_smoothing 0.49121132416489166 +588 49 model.embedding_dim 1.0 +588 49 training.batch_size 2.0 +588 49 training.label_smoothing 0.4859973637262203 +588 50 model.embedding_dim 1.0 +588 50 training.batch_size 1.0 +588 50 training.label_smoothing 0.029582114935181827 +588 51 model.embedding_dim 0.0 +588 51 training.batch_size 0.0 +588 51 training.label_smoothing 0.8532749248081336 +588 52 model.embedding_dim 2.0 +588 52 training.batch_size 1.0 +588 52 training.label_smoothing 0.010296467959044717 +588 53 model.embedding_dim 1.0 +588 53 training.batch_size 1.0 +588 53 training.label_smoothing 0.16684301510613064 +588 54 model.embedding_dim 0.0 +588 54 training.batch_size 1.0 +588 54 training.label_smoothing 0.017303450945158665 +588 55 model.embedding_dim 1.0 +588 55 training.batch_size 0.0 +588 55 training.label_smoothing 0.010135876244864521 +588 56 model.embedding_dim 1.0 +588 56 training.batch_size 1.0 +588 56 training.label_smoothing 0.025980519459969112 +588 57 model.embedding_dim 0.0 +588 57 training.batch_size 1.0 +588 57 training.label_smoothing 0.006929071256324684 +588 58 model.embedding_dim 2.0 +588 58 training.batch_size 1.0 +588 58 training.label_smoothing 0.14673588434991422 +588 59 model.embedding_dim 1.0 +588 59 training.batch_size 1.0 +588 59 training.label_smoothing 0.02087148420923627 +588 60 model.embedding_dim 1.0 +588 60 training.batch_size 1.0 +588 60 training.label_smoothing 0.09607570220773841 +588 61 model.embedding_dim 0.0 +588 61 training.batch_size 1.0 +588 61 training.label_smoothing 0.006634063697972749 +588 62 model.embedding_dim 1.0 +588 62 training.batch_size 1.0 +588 62 training.label_smoothing 0.006430993308360875 +588 63 model.embedding_dim 0.0 +588 63 training.batch_size 2.0 +588 63 training.label_smoothing 0.11138623973232037 +588 64 model.embedding_dim 0.0 +588 64 training.batch_size 1.0 +588 64 training.label_smoothing 0.03232536635392182 +588 65 model.embedding_dim 0.0 +588 65 training.batch_size 0.0 +588 65 training.label_smoothing 0.0014507381469104118 +588 66 model.embedding_dim 0.0 +588 66 training.batch_size 2.0 +588 66 training.label_smoothing 0.13959861361267653 +588 67 model.embedding_dim 1.0 +588 67 training.batch_size 2.0 +588 67 training.label_smoothing 0.029822159492099043 +588 68 model.embedding_dim 0.0 +588 68 training.batch_size 2.0 +588 68 training.label_smoothing 0.003506551370823321 +588 69 model.embedding_dim 2.0 +588 69 training.batch_size 1.0 +588 69 training.label_smoothing 0.0839815638589262 +588 70 model.embedding_dim 2.0 +588 70 training.batch_size 0.0 +588 70 training.label_smoothing 0.0024174654318294135 +588 71 model.embedding_dim 1.0 +588 71 training.batch_size 1.0 +588 71 training.label_smoothing 0.004178519351821319 +588 72 model.embedding_dim 1.0 +588 72 training.batch_size 0.0 +588 72 training.label_smoothing 0.14801213980651076 +588 73 model.embedding_dim 2.0 +588 73 training.batch_size 2.0 +588 73 training.label_smoothing 0.003750816719777177 +588 74 model.embedding_dim 2.0 +588 74 training.batch_size 0.0 +588 74 training.label_smoothing 0.011567515898891082 +588 75 model.embedding_dim 0.0 +588 75 training.batch_size 1.0 +588 75 training.label_smoothing 0.5408769067942742 +588 76 model.embedding_dim 1.0 +588 76 training.batch_size 0.0 +588 76 training.label_smoothing 0.10519804396166818 +588 77 model.embedding_dim 0.0 +588 77 training.batch_size 1.0 +588 77 training.label_smoothing 0.007075024344325597 +588 78 model.embedding_dim 2.0 +588 78 training.batch_size 1.0 +588 78 training.label_smoothing 0.010735865126792405 +588 79 model.embedding_dim 2.0 +588 79 training.batch_size 0.0 +588 79 training.label_smoothing 0.28281468330502596 +588 80 model.embedding_dim 2.0 +588 80 training.batch_size 0.0 +588 80 training.label_smoothing 0.09220170361524205 +588 81 model.embedding_dim 1.0 +588 81 training.batch_size 2.0 +588 81 training.label_smoothing 0.6579231701775242 +588 82 model.embedding_dim 0.0 +588 82 training.batch_size 0.0 +588 82 training.label_smoothing 0.05824260675748964 +588 83 model.embedding_dim 1.0 +588 83 training.batch_size 2.0 +588 83 training.label_smoothing 0.21783968538938478 +588 84 model.embedding_dim 2.0 +588 84 training.batch_size 1.0 +588 84 training.label_smoothing 0.0014327167294281673 +588 85 model.embedding_dim 1.0 +588 85 training.batch_size 1.0 +588 85 training.label_smoothing 0.0245262928214906 +588 86 model.embedding_dim 1.0 +588 86 training.batch_size 2.0 +588 86 training.label_smoothing 0.06603078439763595 +588 87 model.embedding_dim 1.0 +588 87 training.batch_size 0.0 +588 87 training.label_smoothing 0.08933899974105788 +588 88 model.embedding_dim 0.0 +588 88 training.batch_size 2.0 +588 88 training.label_smoothing 0.1194437416955954 +588 89 model.embedding_dim 0.0 +588 89 training.batch_size 2.0 +588 89 training.label_smoothing 0.0024828591361973833 +588 90 model.embedding_dim 2.0 +588 90 training.batch_size 0.0 +588 90 training.label_smoothing 0.12877285466112992 +588 91 model.embedding_dim 2.0 +588 91 training.batch_size 0.0 +588 91 training.label_smoothing 0.3053950911128349 +588 92 model.embedding_dim 0.0 +588 92 training.batch_size 0.0 +588 92 training.label_smoothing 0.009933127904543971 +588 93 model.embedding_dim 1.0 +588 93 training.batch_size 0.0 +588 93 training.label_smoothing 0.2998425827276183 +588 94 model.embedding_dim 2.0 +588 94 training.batch_size 2.0 +588 94 training.label_smoothing 0.008540909264486128 +588 95 model.embedding_dim 0.0 +588 95 training.batch_size 2.0 +588 95 training.label_smoothing 0.05306396310460394 +588 96 model.embedding_dim 2.0 +588 96 training.batch_size 1.0 +588 96 training.label_smoothing 0.001008457020793323 +588 97 model.embedding_dim 2.0 +588 97 training.batch_size 1.0 +588 97 training.label_smoothing 0.38479581663342177 +588 98 model.embedding_dim 2.0 +588 98 training.batch_size 0.0 +588 98 training.label_smoothing 0.0020953298465321204 +588 99 model.embedding_dim 1.0 +588 99 training.batch_size 0.0 +588 99 training.label_smoothing 0.5159611903224157 +588 100 model.embedding_dim 1.0 +588 100 training.batch_size 2.0 +588 100 training.label_smoothing 0.001955633422706558 +588 1 dataset """kinships""" +588 1 model """rotate""" +588 1 loss """crossentropy""" +588 1 regularizer """no""" +588 1 optimizer """adadelta""" +588 1 training_loop """lcwa""" +588 1 evaluator """rankbased""" +588 2 dataset """kinships""" +588 2 model """rotate""" +588 2 loss """crossentropy""" +588 2 regularizer """no""" +588 2 optimizer """adadelta""" +588 2 training_loop """lcwa""" +588 2 evaluator """rankbased""" +588 3 dataset """kinships""" +588 3 model """rotate""" +588 3 loss """crossentropy""" +588 3 regularizer """no""" +588 3 optimizer """adadelta""" +588 3 training_loop """lcwa""" +588 3 evaluator """rankbased""" +588 4 dataset """kinships""" +588 4 model """rotate""" +588 4 loss """crossentropy""" +588 4 regularizer """no""" +588 4 optimizer """adadelta""" +588 4 training_loop """lcwa""" +588 4 evaluator """rankbased""" +588 5 dataset """kinships""" +588 5 model """rotate""" +588 5 loss """crossentropy""" +588 5 regularizer """no""" +588 5 optimizer """adadelta""" +588 5 training_loop """lcwa""" +588 5 evaluator """rankbased""" +588 6 dataset """kinships""" +588 6 model """rotate""" +588 6 loss """crossentropy""" +588 6 regularizer """no""" +588 6 optimizer """adadelta""" +588 6 training_loop """lcwa""" +588 6 evaluator """rankbased""" +588 7 dataset """kinships""" +588 7 model """rotate""" +588 7 loss """crossentropy""" +588 7 regularizer """no""" +588 7 optimizer """adadelta""" +588 7 training_loop """lcwa""" +588 7 evaluator """rankbased""" +588 8 dataset """kinships""" +588 8 model """rotate""" +588 8 loss """crossentropy""" +588 8 regularizer """no""" +588 8 optimizer """adadelta""" +588 8 training_loop """lcwa""" +588 8 evaluator """rankbased""" +588 9 dataset """kinships""" +588 9 model """rotate""" +588 9 loss """crossentropy""" +588 9 regularizer """no""" +588 9 optimizer """adadelta""" +588 9 training_loop """lcwa""" +588 9 evaluator """rankbased""" +588 10 dataset """kinships""" +588 10 model """rotate""" +588 10 loss """crossentropy""" +588 10 regularizer """no""" +588 10 optimizer """adadelta""" +588 10 training_loop """lcwa""" +588 10 evaluator """rankbased""" +588 11 dataset """kinships""" +588 11 model """rotate""" +588 11 loss """crossentropy""" +588 11 regularizer """no""" +588 11 optimizer """adadelta""" +588 11 training_loop """lcwa""" +588 11 evaluator """rankbased""" +588 12 dataset """kinships""" +588 12 model """rotate""" +588 12 loss """crossentropy""" +588 12 regularizer """no""" +588 12 optimizer """adadelta""" +588 12 training_loop """lcwa""" +588 12 evaluator """rankbased""" +588 13 dataset """kinships""" +588 13 model """rotate""" +588 13 loss """crossentropy""" +588 13 regularizer """no""" +588 13 optimizer """adadelta""" +588 13 training_loop """lcwa""" +588 13 evaluator """rankbased""" +588 14 dataset """kinships""" +588 14 model """rotate""" +588 14 loss """crossentropy""" +588 14 regularizer """no""" +588 14 optimizer """adadelta""" +588 14 training_loop """lcwa""" +588 14 evaluator """rankbased""" +588 15 dataset """kinships""" +588 15 model """rotate""" +588 15 loss """crossentropy""" +588 15 regularizer """no""" +588 15 optimizer """adadelta""" +588 15 training_loop """lcwa""" +588 15 evaluator """rankbased""" +588 16 dataset """kinships""" +588 16 model """rotate""" +588 16 loss """crossentropy""" +588 16 regularizer """no""" +588 16 optimizer """adadelta""" +588 16 training_loop """lcwa""" +588 16 evaluator """rankbased""" +588 17 dataset """kinships""" +588 17 model """rotate""" +588 17 loss """crossentropy""" +588 17 regularizer """no""" +588 17 optimizer """adadelta""" +588 17 training_loop """lcwa""" +588 17 evaluator """rankbased""" +588 18 dataset """kinships""" +588 18 model """rotate""" +588 18 loss """crossentropy""" +588 18 regularizer """no""" +588 18 optimizer """adadelta""" +588 18 training_loop """lcwa""" +588 18 evaluator """rankbased""" +588 19 dataset """kinships""" +588 19 model """rotate""" +588 19 loss """crossentropy""" +588 19 regularizer """no""" +588 19 optimizer """adadelta""" +588 19 training_loop """lcwa""" +588 19 evaluator """rankbased""" +588 20 dataset """kinships""" +588 20 model """rotate""" +588 20 loss """crossentropy""" +588 20 regularizer """no""" +588 20 optimizer """adadelta""" +588 20 training_loop """lcwa""" +588 20 evaluator """rankbased""" +588 21 dataset """kinships""" +588 21 model """rotate""" +588 21 loss """crossentropy""" +588 21 regularizer """no""" +588 21 optimizer """adadelta""" +588 21 training_loop """lcwa""" +588 21 evaluator """rankbased""" +588 22 dataset """kinships""" +588 22 model """rotate""" +588 22 loss """crossentropy""" +588 22 regularizer """no""" +588 22 optimizer """adadelta""" +588 22 training_loop """lcwa""" +588 22 evaluator """rankbased""" +588 23 dataset """kinships""" +588 23 model """rotate""" +588 23 loss """crossentropy""" +588 23 regularizer """no""" +588 23 optimizer """adadelta""" +588 23 training_loop """lcwa""" +588 23 evaluator """rankbased""" +588 24 dataset """kinships""" +588 24 model """rotate""" +588 24 loss """crossentropy""" +588 24 regularizer """no""" +588 24 optimizer """adadelta""" +588 24 training_loop """lcwa""" +588 24 evaluator """rankbased""" +588 25 dataset """kinships""" +588 25 model """rotate""" +588 25 loss """crossentropy""" +588 25 regularizer """no""" +588 25 optimizer """adadelta""" +588 25 training_loop """lcwa""" +588 25 evaluator """rankbased""" +588 26 dataset """kinships""" +588 26 model """rotate""" +588 26 loss """crossentropy""" +588 26 regularizer """no""" +588 26 optimizer """adadelta""" +588 26 training_loop """lcwa""" +588 26 evaluator """rankbased""" +588 27 dataset """kinships""" +588 27 model """rotate""" +588 27 loss """crossentropy""" +588 27 regularizer """no""" +588 27 optimizer """adadelta""" +588 27 training_loop """lcwa""" +588 27 evaluator """rankbased""" +588 28 dataset """kinships""" +588 28 model """rotate""" +588 28 loss """crossentropy""" +588 28 regularizer """no""" +588 28 optimizer """adadelta""" +588 28 training_loop """lcwa""" +588 28 evaluator """rankbased""" +588 29 dataset """kinships""" +588 29 model """rotate""" +588 29 loss """crossentropy""" +588 29 regularizer """no""" +588 29 optimizer """adadelta""" +588 29 training_loop """lcwa""" +588 29 evaluator """rankbased""" +588 30 dataset """kinships""" +588 30 model """rotate""" +588 30 loss """crossentropy""" +588 30 regularizer """no""" +588 30 optimizer """adadelta""" +588 30 training_loop """lcwa""" +588 30 evaluator """rankbased""" +588 31 dataset """kinships""" +588 31 model """rotate""" +588 31 loss """crossentropy""" +588 31 regularizer """no""" +588 31 optimizer """adadelta""" +588 31 training_loop """lcwa""" +588 31 evaluator """rankbased""" +588 32 dataset """kinships""" +588 32 model """rotate""" +588 32 loss """crossentropy""" +588 32 regularizer """no""" +588 32 optimizer """adadelta""" +588 32 training_loop """lcwa""" +588 32 evaluator """rankbased""" +588 33 dataset """kinships""" +588 33 model """rotate""" +588 33 loss """crossentropy""" +588 33 regularizer """no""" +588 33 optimizer """adadelta""" +588 33 training_loop """lcwa""" +588 33 evaluator """rankbased""" +588 34 dataset """kinships""" +588 34 model """rotate""" +588 34 loss """crossentropy""" +588 34 regularizer """no""" +588 34 optimizer """adadelta""" +588 34 training_loop """lcwa""" +588 34 evaluator """rankbased""" +588 35 dataset """kinships""" +588 35 model """rotate""" +588 35 loss """crossentropy""" +588 35 regularizer """no""" +588 35 optimizer """adadelta""" +588 35 training_loop """lcwa""" +588 35 evaluator """rankbased""" +588 36 dataset """kinships""" +588 36 model """rotate""" +588 36 loss """crossentropy""" +588 36 regularizer """no""" +588 36 optimizer """adadelta""" +588 36 training_loop """lcwa""" +588 36 evaluator """rankbased""" +588 37 dataset """kinships""" +588 37 model """rotate""" +588 37 loss """crossentropy""" +588 37 regularizer """no""" +588 37 optimizer """adadelta""" +588 37 training_loop """lcwa""" +588 37 evaluator """rankbased""" +588 38 dataset """kinships""" +588 38 model """rotate""" +588 38 loss """crossentropy""" +588 38 regularizer """no""" +588 38 optimizer """adadelta""" +588 38 training_loop """lcwa""" +588 38 evaluator """rankbased""" +588 39 dataset """kinships""" +588 39 model """rotate""" +588 39 loss """crossentropy""" +588 39 regularizer """no""" +588 39 optimizer """adadelta""" +588 39 training_loop """lcwa""" +588 39 evaluator """rankbased""" +588 40 dataset """kinships""" +588 40 model """rotate""" +588 40 loss """crossentropy""" +588 40 regularizer """no""" +588 40 optimizer """adadelta""" +588 40 training_loop """lcwa""" +588 40 evaluator """rankbased""" +588 41 dataset """kinships""" +588 41 model """rotate""" +588 41 loss """crossentropy""" +588 41 regularizer """no""" +588 41 optimizer """adadelta""" +588 41 training_loop """lcwa""" +588 41 evaluator """rankbased""" +588 42 dataset """kinships""" +588 42 model """rotate""" +588 42 loss """crossentropy""" +588 42 regularizer """no""" +588 42 optimizer """adadelta""" +588 42 training_loop """lcwa""" +588 42 evaluator """rankbased""" +588 43 dataset """kinships""" +588 43 model """rotate""" +588 43 loss """crossentropy""" +588 43 regularizer """no""" +588 43 optimizer """adadelta""" +588 43 training_loop """lcwa""" +588 43 evaluator """rankbased""" +588 44 dataset """kinships""" +588 44 model """rotate""" +588 44 loss """crossentropy""" +588 44 regularizer """no""" +588 44 optimizer """adadelta""" +588 44 training_loop """lcwa""" +588 44 evaluator """rankbased""" +588 45 dataset """kinships""" +588 45 model """rotate""" +588 45 loss """crossentropy""" +588 45 regularizer """no""" +588 45 optimizer """adadelta""" +588 45 training_loop """lcwa""" +588 45 evaluator """rankbased""" +588 46 dataset """kinships""" +588 46 model """rotate""" +588 46 loss """crossentropy""" +588 46 regularizer """no""" +588 46 optimizer """adadelta""" +588 46 training_loop """lcwa""" +588 46 evaluator """rankbased""" +588 47 dataset """kinships""" +588 47 model """rotate""" +588 47 loss """crossentropy""" +588 47 regularizer """no""" +588 47 optimizer """adadelta""" +588 47 training_loop """lcwa""" +588 47 evaluator """rankbased""" +588 48 dataset """kinships""" +588 48 model """rotate""" +588 48 loss """crossentropy""" +588 48 regularizer """no""" +588 48 optimizer """adadelta""" +588 48 training_loop """lcwa""" +588 48 evaluator """rankbased""" +588 49 dataset """kinships""" +588 49 model """rotate""" +588 49 loss """crossentropy""" +588 49 regularizer """no""" +588 49 optimizer """adadelta""" +588 49 training_loop """lcwa""" +588 49 evaluator """rankbased""" +588 50 dataset """kinships""" +588 50 model """rotate""" +588 50 loss """crossentropy""" +588 50 regularizer """no""" +588 50 optimizer """adadelta""" +588 50 training_loop """lcwa""" +588 50 evaluator """rankbased""" +588 51 dataset """kinships""" +588 51 model """rotate""" +588 51 loss """crossentropy""" +588 51 regularizer """no""" +588 51 optimizer """adadelta""" +588 51 training_loop """lcwa""" +588 51 evaluator """rankbased""" +588 52 dataset """kinships""" +588 52 model """rotate""" +588 52 loss """crossentropy""" +588 52 regularizer """no""" +588 52 optimizer """adadelta""" +588 52 training_loop """lcwa""" +588 52 evaluator """rankbased""" +588 53 dataset """kinships""" +588 53 model """rotate""" +588 53 loss """crossentropy""" +588 53 regularizer """no""" +588 53 optimizer """adadelta""" +588 53 training_loop """lcwa""" +588 53 evaluator """rankbased""" +588 54 dataset """kinships""" +588 54 model """rotate""" +588 54 loss """crossentropy""" +588 54 regularizer """no""" +588 54 optimizer """adadelta""" +588 54 training_loop """lcwa""" +588 54 evaluator """rankbased""" +588 55 dataset """kinships""" +588 55 model """rotate""" +588 55 loss """crossentropy""" +588 55 regularizer """no""" +588 55 optimizer """adadelta""" +588 55 training_loop """lcwa""" +588 55 evaluator """rankbased""" +588 56 dataset """kinships""" +588 56 model """rotate""" +588 56 loss """crossentropy""" +588 56 regularizer """no""" +588 56 optimizer """adadelta""" +588 56 training_loop """lcwa""" +588 56 evaluator """rankbased""" +588 57 dataset """kinships""" +588 57 model """rotate""" +588 57 loss """crossentropy""" +588 57 regularizer """no""" +588 57 optimizer """adadelta""" +588 57 training_loop """lcwa""" +588 57 evaluator """rankbased""" +588 58 dataset """kinships""" +588 58 model """rotate""" +588 58 loss """crossentropy""" +588 58 regularizer """no""" +588 58 optimizer """adadelta""" +588 58 training_loop """lcwa""" +588 58 evaluator """rankbased""" +588 59 dataset """kinships""" +588 59 model """rotate""" +588 59 loss """crossentropy""" +588 59 regularizer """no""" +588 59 optimizer """adadelta""" +588 59 training_loop """lcwa""" +588 59 evaluator """rankbased""" +588 60 dataset """kinships""" +588 60 model """rotate""" +588 60 loss """crossentropy""" +588 60 regularizer """no""" +588 60 optimizer """adadelta""" +588 60 training_loop """lcwa""" +588 60 evaluator """rankbased""" +588 61 dataset """kinships""" +588 61 model """rotate""" +588 61 loss """crossentropy""" +588 61 regularizer """no""" +588 61 optimizer """adadelta""" +588 61 training_loop """lcwa""" +588 61 evaluator """rankbased""" +588 62 dataset """kinships""" +588 62 model """rotate""" +588 62 loss """crossentropy""" +588 62 regularizer """no""" +588 62 optimizer """adadelta""" +588 62 training_loop """lcwa""" +588 62 evaluator """rankbased""" +588 63 dataset """kinships""" +588 63 model """rotate""" +588 63 loss """crossentropy""" +588 63 regularizer """no""" +588 63 optimizer """adadelta""" +588 63 training_loop """lcwa""" +588 63 evaluator """rankbased""" +588 64 dataset """kinships""" +588 64 model """rotate""" +588 64 loss """crossentropy""" +588 64 regularizer """no""" +588 64 optimizer """adadelta""" +588 64 training_loop """lcwa""" +588 64 evaluator """rankbased""" +588 65 dataset """kinships""" +588 65 model """rotate""" +588 65 loss """crossentropy""" +588 65 regularizer """no""" +588 65 optimizer """adadelta""" +588 65 training_loop """lcwa""" +588 65 evaluator """rankbased""" +588 66 dataset """kinships""" +588 66 model """rotate""" +588 66 loss """crossentropy""" +588 66 regularizer """no""" +588 66 optimizer """adadelta""" +588 66 training_loop """lcwa""" +588 66 evaluator """rankbased""" +588 67 dataset """kinships""" +588 67 model """rotate""" +588 67 loss """crossentropy""" +588 67 regularizer """no""" +588 67 optimizer """adadelta""" +588 67 training_loop """lcwa""" +588 67 evaluator """rankbased""" +588 68 dataset """kinships""" +588 68 model """rotate""" +588 68 loss """crossentropy""" +588 68 regularizer """no""" +588 68 optimizer """adadelta""" +588 68 training_loop """lcwa""" +588 68 evaluator """rankbased""" +588 69 dataset """kinships""" +588 69 model """rotate""" +588 69 loss """crossentropy""" +588 69 regularizer """no""" +588 69 optimizer """adadelta""" +588 69 training_loop """lcwa""" +588 69 evaluator """rankbased""" +588 70 dataset """kinships""" +588 70 model """rotate""" +588 70 loss """crossentropy""" +588 70 regularizer """no""" +588 70 optimizer """adadelta""" +588 70 training_loop """lcwa""" +588 70 evaluator """rankbased""" +588 71 dataset """kinships""" +588 71 model """rotate""" +588 71 loss """crossentropy""" +588 71 regularizer """no""" +588 71 optimizer """adadelta""" +588 71 training_loop """lcwa""" +588 71 evaluator """rankbased""" +588 72 dataset """kinships""" +588 72 model """rotate""" +588 72 loss """crossentropy""" +588 72 regularizer """no""" +588 72 optimizer """adadelta""" +588 72 training_loop """lcwa""" +588 72 evaluator """rankbased""" +588 73 dataset """kinships""" +588 73 model """rotate""" +588 73 loss """crossentropy""" +588 73 regularizer """no""" +588 73 optimizer """adadelta""" +588 73 training_loop """lcwa""" +588 73 evaluator """rankbased""" +588 74 dataset """kinships""" +588 74 model """rotate""" +588 74 loss """crossentropy""" +588 74 regularizer """no""" +588 74 optimizer """adadelta""" +588 74 training_loop """lcwa""" +588 74 evaluator """rankbased""" +588 75 dataset """kinships""" +588 75 model """rotate""" +588 75 loss """crossentropy""" +588 75 regularizer """no""" +588 75 optimizer """adadelta""" +588 75 training_loop """lcwa""" +588 75 evaluator """rankbased""" +588 76 dataset """kinships""" +588 76 model """rotate""" +588 76 loss """crossentropy""" +588 76 regularizer """no""" +588 76 optimizer """adadelta""" +588 76 training_loop """lcwa""" +588 76 evaluator """rankbased""" +588 77 dataset """kinships""" +588 77 model """rotate""" +588 77 loss """crossentropy""" +588 77 regularizer """no""" +588 77 optimizer """adadelta""" +588 77 training_loop """lcwa""" +588 77 evaluator """rankbased""" +588 78 dataset """kinships""" +588 78 model """rotate""" +588 78 loss """crossentropy""" +588 78 regularizer """no""" +588 78 optimizer """adadelta""" +588 78 training_loop """lcwa""" +588 78 evaluator """rankbased""" +588 79 dataset """kinships""" +588 79 model """rotate""" +588 79 loss """crossentropy""" +588 79 regularizer """no""" +588 79 optimizer """adadelta""" +588 79 training_loop """lcwa""" +588 79 evaluator """rankbased""" +588 80 dataset """kinships""" +588 80 model """rotate""" +588 80 loss """crossentropy""" +588 80 regularizer """no""" +588 80 optimizer """adadelta""" +588 80 training_loop """lcwa""" +588 80 evaluator """rankbased""" +588 81 dataset """kinships""" +588 81 model """rotate""" +588 81 loss """crossentropy""" +588 81 regularizer """no""" +588 81 optimizer """adadelta""" +588 81 training_loop """lcwa""" +588 81 evaluator """rankbased""" +588 82 dataset """kinships""" +588 82 model """rotate""" +588 82 loss """crossentropy""" +588 82 regularizer """no""" +588 82 optimizer """adadelta""" +588 82 training_loop """lcwa""" +588 82 evaluator """rankbased""" +588 83 dataset """kinships""" +588 83 model """rotate""" +588 83 loss """crossentropy""" +588 83 regularizer """no""" +588 83 optimizer """adadelta""" +588 83 training_loop """lcwa""" +588 83 evaluator """rankbased""" +588 84 dataset """kinships""" +588 84 model """rotate""" +588 84 loss """crossentropy""" +588 84 regularizer """no""" +588 84 optimizer """adadelta""" +588 84 training_loop """lcwa""" +588 84 evaluator """rankbased""" +588 85 dataset """kinships""" +588 85 model """rotate""" +588 85 loss """crossentropy""" +588 85 regularizer """no""" +588 85 optimizer """adadelta""" +588 85 training_loop """lcwa""" +588 85 evaluator """rankbased""" +588 86 dataset """kinships""" +588 86 model """rotate""" +588 86 loss """crossentropy""" +588 86 regularizer """no""" +588 86 optimizer """adadelta""" +588 86 training_loop """lcwa""" +588 86 evaluator """rankbased""" +588 87 dataset """kinships""" +588 87 model """rotate""" +588 87 loss """crossentropy""" +588 87 regularizer """no""" +588 87 optimizer """adadelta""" +588 87 training_loop """lcwa""" +588 87 evaluator """rankbased""" +588 88 dataset """kinships""" +588 88 model """rotate""" +588 88 loss """crossentropy""" +588 88 regularizer """no""" +588 88 optimizer """adadelta""" +588 88 training_loop """lcwa""" +588 88 evaluator """rankbased""" +588 89 dataset """kinships""" +588 89 model """rotate""" +588 89 loss """crossentropy""" +588 89 regularizer """no""" +588 89 optimizer """adadelta""" +588 89 training_loop """lcwa""" +588 89 evaluator """rankbased""" +588 90 dataset """kinships""" +588 90 model """rotate""" +588 90 loss """crossentropy""" +588 90 regularizer """no""" +588 90 optimizer """adadelta""" +588 90 training_loop """lcwa""" +588 90 evaluator """rankbased""" +588 91 dataset """kinships""" +588 91 model """rotate""" +588 91 loss """crossentropy""" +588 91 regularizer """no""" +588 91 optimizer """adadelta""" +588 91 training_loop """lcwa""" +588 91 evaluator """rankbased""" +588 92 dataset """kinships""" +588 92 model """rotate""" +588 92 loss """crossentropy""" +588 92 regularizer """no""" +588 92 optimizer """adadelta""" +588 92 training_loop """lcwa""" +588 92 evaluator """rankbased""" +588 93 dataset """kinships""" +588 93 model """rotate""" +588 93 loss """crossentropy""" +588 93 regularizer """no""" +588 93 optimizer """adadelta""" +588 93 training_loop """lcwa""" +588 93 evaluator """rankbased""" +588 94 dataset """kinships""" +588 94 model """rotate""" +588 94 loss """crossentropy""" +588 94 regularizer """no""" +588 94 optimizer """adadelta""" +588 94 training_loop """lcwa""" +588 94 evaluator """rankbased""" +588 95 dataset """kinships""" +588 95 model """rotate""" +588 95 loss """crossentropy""" +588 95 regularizer """no""" +588 95 optimizer """adadelta""" +588 95 training_loop """lcwa""" +588 95 evaluator """rankbased""" +588 96 dataset """kinships""" +588 96 model """rotate""" +588 96 loss """crossentropy""" +588 96 regularizer """no""" +588 96 optimizer """adadelta""" +588 96 training_loop """lcwa""" +588 96 evaluator """rankbased""" +588 97 dataset """kinships""" +588 97 model """rotate""" +588 97 loss """crossentropy""" +588 97 regularizer """no""" +588 97 optimizer """adadelta""" +588 97 training_loop """lcwa""" +588 97 evaluator """rankbased""" +588 98 dataset """kinships""" +588 98 model """rotate""" +588 98 loss """crossentropy""" +588 98 regularizer """no""" +588 98 optimizer """adadelta""" +588 98 training_loop """lcwa""" +588 98 evaluator """rankbased""" +588 99 dataset """kinships""" +588 99 model """rotate""" +588 99 loss """crossentropy""" +588 99 regularizer """no""" +588 99 optimizer """adadelta""" +588 99 training_loop """lcwa""" +588 99 evaluator """rankbased""" +588 100 dataset """kinships""" +588 100 model """rotate""" +588 100 loss """crossentropy""" +588 100 regularizer """no""" +588 100 optimizer """adadelta""" +588 100 training_loop """lcwa""" +588 100 evaluator """rankbased""" +589 1 model.embedding_dim 0.0 +589 1 negative_sampler.num_negs_per_pos 26.0 +589 1 training.batch_size 1.0 +589 2 model.embedding_dim 2.0 +589 2 negative_sampler.num_negs_per_pos 5.0 +589 2 training.batch_size 2.0 +589 3 model.embedding_dim 1.0 +589 3 negative_sampler.num_negs_per_pos 87.0 +589 3 training.batch_size 2.0 +589 4 model.embedding_dim 0.0 +589 4 negative_sampler.num_negs_per_pos 20.0 +589 4 training.batch_size 0.0 +589 5 model.embedding_dim 2.0 +589 5 negative_sampler.num_negs_per_pos 34.0 +589 5 training.batch_size 1.0 +589 6 model.embedding_dim 2.0 +589 6 negative_sampler.num_negs_per_pos 89.0 +589 6 training.batch_size 1.0 +589 7 model.embedding_dim 1.0 +589 7 negative_sampler.num_negs_per_pos 4.0 +589 7 training.batch_size 1.0 +589 8 model.embedding_dim 0.0 +589 8 negative_sampler.num_negs_per_pos 47.0 +589 8 training.batch_size 2.0 +589 9 model.embedding_dim 2.0 +589 9 negative_sampler.num_negs_per_pos 20.0 +589 9 training.batch_size 0.0 +589 10 model.embedding_dim 1.0 +589 10 negative_sampler.num_negs_per_pos 11.0 +589 10 training.batch_size 0.0 +589 11 model.embedding_dim 0.0 +589 11 negative_sampler.num_negs_per_pos 18.0 +589 11 training.batch_size 1.0 +589 12 model.embedding_dim 2.0 +589 12 negative_sampler.num_negs_per_pos 78.0 +589 12 training.batch_size 2.0 +589 13 model.embedding_dim 1.0 +589 13 negative_sampler.num_negs_per_pos 72.0 +589 13 training.batch_size 1.0 +589 14 model.embedding_dim 2.0 +589 14 negative_sampler.num_negs_per_pos 81.0 +589 14 training.batch_size 2.0 +589 15 model.embedding_dim 1.0 +589 15 negative_sampler.num_negs_per_pos 84.0 +589 15 training.batch_size 0.0 +589 16 model.embedding_dim 1.0 +589 16 negative_sampler.num_negs_per_pos 72.0 +589 16 training.batch_size 1.0 +589 17 model.embedding_dim 0.0 +589 17 negative_sampler.num_negs_per_pos 9.0 +589 17 training.batch_size 2.0 +589 18 model.embedding_dim 2.0 +589 18 negative_sampler.num_negs_per_pos 44.0 +589 18 training.batch_size 0.0 +589 19 model.embedding_dim 1.0 +589 19 negative_sampler.num_negs_per_pos 17.0 +589 19 training.batch_size 0.0 +589 20 model.embedding_dim 2.0 +589 20 negative_sampler.num_negs_per_pos 41.0 +589 20 training.batch_size 1.0 +589 21 model.embedding_dim 1.0 +589 21 negative_sampler.num_negs_per_pos 72.0 +589 21 training.batch_size 0.0 +589 22 model.embedding_dim 1.0 +589 22 negative_sampler.num_negs_per_pos 45.0 +589 22 training.batch_size 1.0 +589 23 model.embedding_dim 2.0 +589 23 negative_sampler.num_negs_per_pos 72.0 +589 23 training.batch_size 1.0 +589 24 model.embedding_dim 2.0 +589 24 negative_sampler.num_negs_per_pos 81.0 +589 24 training.batch_size 0.0 +589 25 model.embedding_dim 2.0 +589 25 negative_sampler.num_negs_per_pos 95.0 +589 25 training.batch_size 1.0 +589 26 model.embedding_dim 0.0 +589 26 negative_sampler.num_negs_per_pos 6.0 +589 26 training.batch_size 2.0 +589 27 model.embedding_dim 2.0 +589 27 negative_sampler.num_negs_per_pos 19.0 +589 27 training.batch_size 0.0 +589 28 model.embedding_dim 1.0 +589 28 negative_sampler.num_negs_per_pos 21.0 +589 28 training.batch_size 0.0 +589 29 model.embedding_dim 2.0 +589 29 negative_sampler.num_negs_per_pos 73.0 +589 29 training.batch_size 1.0 +589 30 model.embedding_dim 2.0 +589 30 negative_sampler.num_negs_per_pos 44.0 +589 30 training.batch_size 2.0 +589 31 model.embedding_dim 1.0 +589 31 negative_sampler.num_negs_per_pos 52.0 +589 31 training.batch_size 1.0 +589 32 model.embedding_dim 2.0 +589 32 negative_sampler.num_negs_per_pos 10.0 +589 32 training.batch_size 1.0 +589 33 model.embedding_dim 2.0 +589 33 negative_sampler.num_negs_per_pos 79.0 +589 33 training.batch_size 0.0 +589 34 model.embedding_dim 2.0 +589 34 negative_sampler.num_negs_per_pos 89.0 +589 34 training.batch_size 0.0 +589 35 model.embedding_dim 0.0 +589 35 negative_sampler.num_negs_per_pos 98.0 +589 35 training.batch_size 0.0 +589 36 model.embedding_dim 1.0 +589 36 negative_sampler.num_negs_per_pos 90.0 +589 36 training.batch_size 1.0 +589 37 model.embedding_dim 2.0 +589 37 negative_sampler.num_negs_per_pos 45.0 +589 37 training.batch_size 1.0 +589 38 model.embedding_dim 2.0 +589 38 negative_sampler.num_negs_per_pos 84.0 +589 38 training.batch_size 1.0 +589 39 model.embedding_dim 0.0 +589 39 negative_sampler.num_negs_per_pos 85.0 +589 39 training.batch_size 2.0 +589 40 model.embedding_dim 1.0 +589 40 negative_sampler.num_negs_per_pos 89.0 +589 40 training.batch_size 2.0 +589 41 model.embedding_dim 0.0 +589 41 negative_sampler.num_negs_per_pos 6.0 +589 41 training.batch_size 0.0 +589 42 model.embedding_dim 0.0 +589 42 negative_sampler.num_negs_per_pos 48.0 +589 42 training.batch_size 2.0 +589 43 model.embedding_dim 0.0 +589 43 negative_sampler.num_negs_per_pos 14.0 +589 43 training.batch_size 1.0 +589 44 model.embedding_dim 1.0 +589 44 negative_sampler.num_negs_per_pos 69.0 +589 44 training.batch_size 0.0 +589 45 model.embedding_dim 1.0 +589 45 negative_sampler.num_negs_per_pos 75.0 +589 45 training.batch_size 0.0 +589 46 model.embedding_dim 1.0 +589 46 negative_sampler.num_negs_per_pos 76.0 +589 46 training.batch_size 1.0 +589 47 model.embedding_dim 2.0 +589 47 negative_sampler.num_negs_per_pos 18.0 +589 47 training.batch_size 0.0 +589 48 model.embedding_dim 2.0 +589 48 negative_sampler.num_negs_per_pos 72.0 +589 48 training.batch_size 1.0 +589 49 model.embedding_dim 0.0 +589 49 negative_sampler.num_negs_per_pos 60.0 +589 49 training.batch_size 0.0 +589 50 model.embedding_dim 0.0 +589 50 negative_sampler.num_negs_per_pos 19.0 +589 50 training.batch_size 0.0 +589 51 model.embedding_dim 2.0 +589 51 negative_sampler.num_negs_per_pos 41.0 +589 51 training.batch_size 0.0 +589 52 model.embedding_dim 1.0 +589 52 negative_sampler.num_negs_per_pos 70.0 +589 52 training.batch_size 1.0 +589 53 model.embedding_dim 1.0 +589 53 negative_sampler.num_negs_per_pos 37.0 +589 53 training.batch_size 2.0 +589 54 model.embedding_dim 2.0 +589 54 negative_sampler.num_negs_per_pos 45.0 +589 54 training.batch_size 2.0 +589 55 model.embedding_dim 1.0 +589 55 negative_sampler.num_negs_per_pos 49.0 +589 55 training.batch_size 1.0 +589 56 model.embedding_dim 1.0 +589 56 negative_sampler.num_negs_per_pos 47.0 +589 56 training.batch_size 0.0 +589 57 model.embedding_dim 1.0 +589 57 negative_sampler.num_negs_per_pos 85.0 +589 57 training.batch_size 2.0 +589 58 model.embedding_dim 1.0 +589 58 negative_sampler.num_negs_per_pos 53.0 +589 58 training.batch_size 0.0 +589 59 model.embedding_dim 1.0 +589 59 negative_sampler.num_negs_per_pos 88.0 +589 59 training.batch_size 0.0 +589 60 model.embedding_dim 2.0 +589 60 negative_sampler.num_negs_per_pos 3.0 +589 60 training.batch_size 1.0 +589 61 model.embedding_dim 2.0 +589 61 negative_sampler.num_negs_per_pos 66.0 +589 61 training.batch_size 0.0 +589 62 model.embedding_dim 2.0 +589 62 negative_sampler.num_negs_per_pos 78.0 +589 62 training.batch_size 2.0 +589 63 model.embedding_dim 1.0 +589 63 negative_sampler.num_negs_per_pos 67.0 +589 63 training.batch_size 0.0 +589 64 model.embedding_dim 1.0 +589 64 negative_sampler.num_negs_per_pos 93.0 +589 64 training.batch_size 0.0 +589 65 model.embedding_dim 2.0 +589 65 negative_sampler.num_negs_per_pos 46.0 +589 65 training.batch_size 2.0 +589 66 model.embedding_dim 1.0 +589 66 negative_sampler.num_negs_per_pos 86.0 +589 66 training.batch_size 1.0 +589 67 model.embedding_dim 0.0 +589 67 negative_sampler.num_negs_per_pos 25.0 +589 67 training.batch_size 2.0 +589 68 model.embedding_dim 1.0 +589 68 negative_sampler.num_negs_per_pos 41.0 +589 68 training.batch_size 1.0 +589 69 model.embedding_dim 0.0 +589 69 negative_sampler.num_negs_per_pos 49.0 +589 69 training.batch_size 1.0 +589 70 model.embedding_dim 0.0 +589 70 negative_sampler.num_negs_per_pos 43.0 +589 70 training.batch_size 0.0 +589 71 model.embedding_dim 0.0 +589 71 negative_sampler.num_negs_per_pos 93.0 +589 71 training.batch_size 0.0 +589 72 model.embedding_dim 0.0 +589 72 negative_sampler.num_negs_per_pos 72.0 +589 72 training.batch_size 0.0 +589 73 model.embedding_dim 1.0 +589 73 negative_sampler.num_negs_per_pos 6.0 +589 73 training.batch_size 0.0 +589 74 model.embedding_dim 2.0 +589 74 negative_sampler.num_negs_per_pos 39.0 +589 74 training.batch_size 0.0 +589 75 model.embedding_dim 1.0 +589 75 negative_sampler.num_negs_per_pos 40.0 +589 75 training.batch_size 0.0 +589 76 model.embedding_dim 1.0 +589 76 negative_sampler.num_negs_per_pos 56.0 +589 76 training.batch_size 1.0 +589 77 model.embedding_dim 2.0 +589 77 negative_sampler.num_negs_per_pos 36.0 +589 77 training.batch_size 1.0 +589 78 model.embedding_dim 1.0 +589 78 negative_sampler.num_negs_per_pos 48.0 +589 78 training.batch_size 2.0 +589 79 model.embedding_dim 0.0 +589 79 negative_sampler.num_negs_per_pos 94.0 +589 79 training.batch_size 0.0 +589 80 model.embedding_dim 2.0 +589 80 negative_sampler.num_negs_per_pos 86.0 +589 80 training.batch_size 2.0 +589 81 model.embedding_dim 1.0 +589 81 negative_sampler.num_negs_per_pos 97.0 +589 81 training.batch_size 2.0 +589 82 model.embedding_dim 1.0 +589 82 negative_sampler.num_negs_per_pos 68.0 +589 82 training.batch_size 1.0 +589 83 model.embedding_dim 1.0 +589 83 negative_sampler.num_negs_per_pos 23.0 +589 83 training.batch_size 2.0 +589 84 model.embedding_dim 0.0 +589 84 negative_sampler.num_negs_per_pos 75.0 +589 84 training.batch_size 0.0 +589 85 model.embedding_dim 1.0 +589 85 negative_sampler.num_negs_per_pos 49.0 +589 85 training.batch_size 1.0 +589 86 model.embedding_dim 1.0 +589 86 negative_sampler.num_negs_per_pos 45.0 +589 86 training.batch_size 1.0 +589 87 model.embedding_dim 2.0 +589 87 negative_sampler.num_negs_per_pos 74.0 +589 87 training.batch_size 0.0 +589 88 model.embedding_dim 2.0 +589 88 negative_sampler.num_negs_per_pos 55.0 +589 88 training.batch_size 2.0 +589 89 model.embedding_dim 2.0 +589 89 negative_sampler.num_negs_per_pos 44.0 +589 89 training.batch_size 0.0 +589 90 model.embedding_dim 0.0 +589 90 negative_sampler.num_negs_per_pos 48.0 +589 90 training.batch_size 0.0 +589 91 model.embedding_dim 2.0 +589 91 negative_sampler.num_negs_per_pos 45.0 +589 91 training.batch_size 1.0 +589 92 model.embedding_dim 2.0 +589 92 negative_sampler.num_negs_per_pos 63.0 +589 92 training.batch_size 0.0 +589 93 model.embedding_dim 1.0 +589 93 negative_sampler.num_negs_per_pos 21.0 +589 93 training.batch_size 0.0 +589 94 model.embedding_dim 1.0 +589 94 negative_sampler.num_negs_per_pos 2.0 +589 94 training.batch_size 1.0 +589 95 model.embedding_dim 2.0 +589 95 negative_sampler.num_negs_per_pos 64.0 +589 95 training.batch_size 2.0 +589 96 model.embedding_dim 2.0 +589 96 negative_sampler.num_negs_per_pos 91.0 +589 96 training.batch_size 0.0 +589 97 model.embedding_dim 1.0 +589 97 negative_sampler.num_negs_per_pos 52.0 +589 97 training.batch_size 2.0 +589 98 model.embedding_dim 0.0 +589 98 negative_sampler.num_negs_per_pos 41.0 +589 98 training.batch_size 0.0 +589 99 model.embedding_dim 2.0 +589 99 negative_sampler.num_negs_per_pos 61.0 +589 99 training.batch_size 0.0 +589 100 model.embedding_dim 1.0 +589 100 negative_sampler.num_negs_per_pos 61.0 +589 100 training.batch_size 0.0 +589 1 dataset """kinships""" +589 1 model """rotate""" +589 1 loss """bceaftersigmoid""" +589 1 regularizer """no""" +589 1 optimizer """adadelta""" +589 1 training_loop """owa""" +589 1 negative_sampler """basic""" +589 1 evaluator """rankbased""" +589 2 dataset """kinships""" +589 2 model """rotate""" +589 2 loss """bceaftersigmoid""" +589 2 regularizer """no""" +589 2 optimizer """adadelta""" +589 2 training_loop """owa""" +589 2 negative_sampler """basic""" +589 2 evaluator """rankbased""" +589 3 dataset """kinships""" +589 3 model """rotate""" +589 3 loss """bceaftersigmoid""" +589 3 regularizer """no""" +589 3 optimizer """adadelta""" +589 3 training_loop """owa""" +589 3 negative_sampler """basic""" +589 3 evaluator """rankbased""" +589 4 dataset """kinships""" +589 4 model """rotate""" +589 4 loss """bceaftersigmoid""" +589 4 regularizer """no""" +589 4 optimizer """adadelta""" +589 4 training_loop """owa""" +589 4 negative_sampler """basic""" +589 4 evaluator """rankbased""" +589 5 dataset """kinships""" +589 5 model """rotate""" +589 5 loss """bceaftersigmoid""" +589 5 regularizer """no""" +589 5 optimizer """adadelta""" +589 5 training_loop """owa""" +589 5 negative_sampler """basic""" +589 5 evaluator """rankbased""" +589 6 dataset """kinships""" +589 6 model """rotate""" +589 6 loss """bceaftersigmoid""" +589 6 regularizer """no""" +589 6 optimizer """adadelta""" +589 6 training_loop """owa""" +589 6 negative_sampler """basic""" +589 6 evaluator """rankbased""" +589 7 dataset """kinships""" +589 7 model """rotate""" +589 7 loss """bceaftersigmoid""" +589 7 regularizer """no""" +589 7 optimizer """adadelta""" +589 7 training_loop """owa""" +589 7 negative_sampler """basic""" +589 7 evaluator """rankbased""" +589 8 dataset """kinships""" +589 8 model """rotate""" +589 8 loss """bceaftersigmoid""" +589 8 regularizer """no""" +589 8 optimizer """adadelta""" +589 8 training_loop """owa""" +589 8 negative_sampler """basic""" +589 8 evaluator """rankbased""" +589 9 dataset """kinships""" +589 9 model """rotate""" +589 9 loss """bceaftersigmoid""" +589 9 regularizer """no""" +589 9 optimizer """adadelta""" +589 9 training_loop """owa""" +589 9 negative_sampler """basic""" +589 9 evaluator """rankbased""" +589 10 dataset """kinships""" +589 10 model """rotate""" +589 10 loss """bceaftersigmoid""" +589 10 regularizer """no""" +589 10 optimizer """adadelta""" +589 10 training_loop """owa""" +589 10 negative_sampler """basic""" +589 10 evaluator """rankbased""" +589 11 dataset """kinships""" +589 11 model """rotate""" +589 11 loss """bceaftersigmoid""" +589 11 regularizer """no""" +589 11 optimizer """adadelta""" +589 11 training_loop """owa""" +589 11 negative_sampler """basic""" +589 11 evaluator """rankbased""" +589 12 dataset """kinships""" +589 12 model """rotate""" +589 12 loss """bceaftersigmoid""" +589 12 regularizer """no""" +589 12 optimizer """adadelta""" +589 12 training_loop """owa""" +589 12 negative_sampler """basic""" +589 12 evaluator """rankbased""" +589 13 dataset """kinships""" +589 13 model """rotate""" +589 13 loss """bceaftersigmoid""" +589 13 regularizer """no""" +589 13 optimizer """adadelta""" +589 13 training_loop """owa""" +589 13 negative_sampler """basic""" +589 13 evaluator """rankbased""" +589 14 dataset """kinships""" +589 14 model """rotate""" +589 14 loss """bceaftersigmoid""" +589 14 regularizer """no""" +589 14 optimizer """adadelta""" +589 14 training_loop """owa""" +589 14 negative_sampler """basic""" +589 14 evaluator """rankbased""" +589 15 dataset """kinships""" +589 15 model """rotate""" +589 15 loss """bceaftersigmoid""" +589 15 regularizer """no""" +589 15 optimizer """adadelta""" +589 15 training_loop """owa""" +589 15 negative_sampler """basic""" +589 15 evaluator """rankbased""" +589 16 dataset """kinships""" +589 16 model """rotate""" +589 16 loss """bceaftersigmoid""" +589 16 regularizer """no""" +589 16 optimizer """adadelta""" +589 16 training_loop """owa""" +589 16 negative_sampler """basic""" +589 16 evaluator """rankbased""" +589 17 dataset """kinships""" +589 17 model """rotate""" +589 17 loss """bceaftersigmoid""" +589 17 regularizer """no""" +589 17 optimizer """adadelta""" +589 17 training_loop """owa""" +589 17 negative_sampler """basic""" +589 17 evaluator """rankbased""" +589 18 dataset """kinships""" +589 18 model """rotate""" +589 18 loss """bceaftersigmoid""" +589 18 regularizer """no""" +589 18 optimizer """adadelta""" +589 18 training_loop """owa""" +589 18 negative_sampler """basic""" +589 18 evaluator """rankbased""" +589 19 dataset """kinships""" +589 19 model """rotate""" +589 19 loss """bceaftersigmoid""" +589 19 regularizer """no""" +589 19 optimizer """adadelta""" +589 19 training_loop """owa""" +589 19 negative_sampler """basic""" +589 19 evaluator """rankbased""" +589 20 dataset """kinships""" +589 20 model """rotate""" +589 20 loss """bceaftersigmoid""" +589 20 regularizer """no""" +589 20 optimizer """adadelta""" +589 20 training_loop """owa""" +589 20 negative_sampler """basic""" +589 20 evaluator """rankbased""" +589 21 dataset """kinships""" +589 21 model """rotate""" +589 21 loss """bceaftersigmoid""" +589 21 regularizer """no""" +589 21 optimizer """adadelta""" +589 21 training_loop """owa""" +589 21 negative_sampler """basic""" +589 21 evaluator """rankbased""" +589 22 dataset """kinships""" +589 22 model """rotate""" +589 22 loss """bceaftersigmoid""" +589 22 regularizer """no""" +589 22 optimizer """adadelta""" +589 22 training_loop """owa""" +589 22 negative_sampler """basic""" +589 22 evaluator """rankbased""" +589 23 dataset """kinships""" +589 23 model """rotate""" +589 23 loss """bceaftersigmoid""" +589 23 regularizer """no""" +589 23 optimizer """adadelta""" +589 23 training_loop """owa""" +589 23 negative_sampler """basic""" +589 23 evaluator """rankbased""" +589 24 dataset """kinships""" +589 24 model """rotate""" +589 24 loss """bceaftersigmoid""" +589 24 regularizer """no""" +589 24 optimizer """adadelta""" +589 24 training_loop """owa""" +589 24 negative_sampler """basic""" +589 24 evaluator """rankbased""" +589 25 dataset """kinships""" +589 25 model """rotate""" +589 25 loss """bceaftersigmoid""" +589 25 regularizer """no""" +589 25 optimizer """adadelta""" +589 25 training_loop """owa""" +589 25 negative_sampler """basic""" +589 25 evaluator """rankbased""" +589 26 dataset """kinships""" +589 26 model """rotate""" +589 26 loss """bceaftersigmoid""" +589 26 regularizer """no""" +589 26 optimizer """adadelta""" +589 26 training_loop """owa""" +589 26 negative_sampler """basic""" +589 26 evaluator """rankbased""" +589 27 dataset """kinships""" +589 27 model """rotate""" +589 27 loss """bceaftersigmoid""" +589 27 regularizer """no""" +589 27 optimizer """adadelta""" +589 27 training_loop """owa""" +589 27 negative_sampler """basic""" +589 27 evaluator """rankbased""" +589 28 dataset """kinships""" +589 28 model """rotate""" +589 28 loss """bceaftersigmoid""" +589 28 regularizer """no""" +589 28 optimizer """adadelta""" +589 28 training_loop """owa""" +589 28 negative_sampler """basic""" +589 28 evaluator """rankbased""" +589 29 dataset """kinships""" +589 29 model """rotate""" +589 29 loss """bceaftersigmoid""" +589 29 regularizer """no""" +589 29 optimizer """adadelta""" +589 29 training_loop """owa""" +589 29 negative_sampler """basic""" +589 29 evaluator """rankbased""" +589 30 dataset """kinships""" +589 30 model """rotate""" +589 30 loss """bceaftersigmoid""" +589 30 regularizer """no""" +589 30 optimizer """adadelta""" +589 30 training_loop """owa""" +589 30 negative_sampler """basic""" +589 30 evaluator """rankbased""" +589 31 dataset """kinships""" +589 31 model """rotate""" +589 31 loss """bceaftersigmoid""" +589 31 regularizer """no""" +589 31 optimizer """adadelta""" +589 31 training_loop """owa""" +589 31 negative_sampler """basic""" +589 31 evaluator """rankbased""" +589 32 dataset """kinships""" +589 32 model """rotate""" +589 32 loss """bceaftersigmoid""" +589 32 regularizer """no""" +589 32 optimizer """adadelta""" +589 32 training_loop """owa""" +589 32 negative_sampler """basic""" +589 32 evaluator """rankbased""" +589 33 dataset """kinships""" +589 33 model """rotate""" +589 33 loss """bceaftersigmoid""" +589 33 regularizer """no""" +589 33 optimizer """adadelta""" +589 33 training_loop """owa""" +589 33 negative_sampler """basic""" +589 33 evaluator """rankbased""" +589 34 dataset """kinships""" +589 34 model """rotate""" +589 34 loss """bceaftersigmoid""" +589 34 regularizer """no""" +589 34 optimizer """adadelta""" +589 34 training_loop """owa""" +589 34 negative_sampler """basic""" +589 34 evaluator """rankbased""" +589 35 dataset """kinships""" +589 35 model """rotate""" +589 35 loss """bceaftersigmoid""" +589 35 regularizer """no""" +589 35 optimizer """adadelta""" +589 35 training_loop """owa""" +589 35 negative_sampler """basic""" +589 35 evaluator """rankbased""" +589 36 dataset """kinships""" +589 36 model """rotate""" +589 36 loss """bceaftersigmoid""" +589 36 regularizer """no""" +589 36 optimizer """adadelta""" +589 36 training_loop """owa""" +589 36 negative_sampler """basic""" +589 36 evaluator """rankbased""" +589 37 dataset """kinships""" +589 37 model """rotate""" +589 37 loss """bceaftersigmoid""" +589 37 regularizer """no""" +589 37 optimizer """adadelta""" +589 37 training_loop """owa""" +589 37 negative_sampler """basic""" +589 37 evaluator """rankbased""" +589 38 dataset """kinships""" +589 38 model """rotate""" +589 38 loss """bceaftersigmoid""" +589 38 regularizer """no""" +589 38 optimizer """adadelta""" +589 38 training_loop """owa""" +589 38 negative_sampler """basic""" +589 38 evaluator """rankbased""" +589 39 dataset """kinships""" +589 39 model """rotate""" +589 39 loss """bceaftersigmoid""" +589 39 regularizer """no""" +589 39 optimizer """adadelta""" +589 39 training_loop """owa""" +589 39 negative_sampler """basic""" +589 39 evaluator """rankbased""" +589 40 dataset """kinships""" +589 40 model """rotate""" +589 40 loss """bceaftersigmoid""" +589 40 regularizer """no""" +589 40 optimizer """adadelta""" +589 40 training_loop """owa""" +589 40 negative_sampler """basic""" +589 40 evaluator """rankbased""" +589 41 dataset """kinships""" +589 41 model """rotate""" +589 41 loss """bceaftersigmoid""" +589 41 regularizer """no""" +589 41 optimizer """adadelta""" +589 41 training_loop """owa""" +589 41 negative_sampler """basic""" +589 41 evaluator """rankbased""" +589 42 dataset """kinships""" +589 42 model """rotate""" +589 42 loss """bceaftersigmoid""" +589 42 regularizer """no""" +589 42 optimizer """adadelta""" +589 42 training_loop """owa""" +589 42 negative_sampler """basic""" +589 42 evaluator """rankbased""" +589 43 dataset """kinships""" +589 43 model """rotate""" +589 43 loss """bceaftersigmoid""" +589 43 regularizer """no""" +589 43 optimizer """adadelta""" +589 43 training_loop """owa""" +589 43 negative_sampler """basic""" +589 43 evaluator """rankbased""" +589 44 dataset """kinships""" +589 44 model """rotate""" +589 44 loss """bceaftersigmoid""" +589 44 regularizer """no""" +589 44 optimizer """adadelta""" +589 44 training_loop """owa""" +589 44 negative_sampler """basic""" +589 44 evaluator """rankbased""" +589 45 dataset """kinships""" +589 45 model """rotate""" +589 45 loss """bceaftersigmoid""" +589 45 regularizer """no""" +589 45 optimizer """adadelta""" +589 45 training_loop """owa""" +589 45 negative_sampler """basic""" +589 45 evaluator """rankbased""" +589 46 dataset """kinships""" +589 46 model """rotate""" +589 46 loss """bceaftersigmoid""" +589 46 regularizer """no""" +589 46 optimizer """adadelta""" +589 46 training_loop """owa""" +589 46 negative_sampler """basic""" +589 46 evaluator """rankbased""" +589 47 dataset """kinships""" +589 47 model """rotate""" +589 47 loss """bceaftersigmoid""" +589 47 regularizer """no""" +589 47 optimizer """adadelta""" +589 47 training_loop """owa""" +589 47 negative_sampler """basic""" +589 47 evaluator """rankbased""" +589 48 dataset """kinships""" +589 48 model """rotate""" +589 48 loss """bceaftersigmoid""" +589 48 regularizer """no""" +589 48 optimizer """adadelta""" +589 48 training_loop """owa""" +589 48 negative_sampler """basic""" +589 48 evaluator """rankbased""" +589 49 dataset """kinships""" +589 49 model """rotate""" +589 49 loss """bceaftersigmoid""" +589 49 regularizer """no""" +589 49 optimizer """adadelta""" +589 49 training_loop """owa""" +589 49 negative_sampler """basic""" +589 49 evaluator """rankbased""" +589 50 dataset """kinships""" +589 50 model """rotate""" +589 50 loss """bceaftersigmoid""" +589 50 regularizer """no""" +589 50 optimizer """adadelta""" +589 50 training_loop """owa""" +589 50 negative_sampler """basic""" +589 50 evaluator """rankbased""" +589 51 dataset """kinships""" +589 51 model """rotate""" +589 51 loss """bceaftersigmoid""" +589 51 regularizer """no""" +589 51 optimizer """adadelta""" +589 51 training_loop """owa""" +589 51 negative_sampler """basic""" +589 51 evaluator """rankbased""" +589 52 dataset """kinships""" +589 52 model """rotate""" +589 52 loss """bceaftersigmoid""" +589 52 regularizer """no""" +589 52 optimizer """adadelta""" +589 52 training_loop """owa""" +589 52 negative_sampler """basic""" +589 52 evaluator """rankbased""" +589 53 dataset """kinships""" +589 53 model """rotate""" +589 53 loss """bceaftersigmoid""" +589 53 regularizer """no""" +589 53 optimizer """adadelta""" +589 53 training_loop """owa""" +589 53 negative_sampler """basic""" +589 53 evaluator """rankbased""" +589 54 dataset """kinships""" +589 54 model """rotate""" +589 54 loss """bceaftersigmoid""" +589 54 regularizer """no""" +589 54 optimizer """adadelta""" +589 54 training_loop """owa""" +589 54 negative_sampler """basic""" +589 54 evaluator """rankbased""" +589 55 dataset """kinships""" +589 55 model """rotate""" +589 55 loss """bceaftersigmoid""" +589 55 regularizer """no""" +589 55 optimizer """adadelta""" +589 55 training_loop """owa""" +589 55 negative_sampler """basic""" +589 55 evaluator """rankbased""" +589 56 dataset """kinships""" +589 56 model """rotate""" +589 56 loss """bceaftersigmoid""" +589 56 regularizer """no""" +589 56 optimizer """adadelta""" +589 56 training_loop """owa""" +589 56 negative_sampler """basic""" +589 56 evaluator """rankbased""" +589 57 dataset """kinships""" +589 57 model """rotate""" +589 57 loss """bceaftersigmoid""" +589 57 regularizer """no""" +589 57 optimizer """adadelta""" +589 57 training_loop """owa""" +589 57 negative_sampler """basic""" +589 57 evaluator """rankbased""" +589 58 dataset """kinships""" +589 58 model """rotate""" +589 58 loss """bceaftersigmoid""" +589 58 regularizer """no""" +589 58 optimizer """adadelta""" +589 58 training_loop """owa""" +589 58 negative_sampler """basic""" +589 58 evaluator """rankbased""" +589 59 dataset """kinships""" +589 59 model """rotate""" +589 59 loss """bceaftersigmoid""" +589 59 regularizer """no""" +589 59 optimizer """adadelta""" +589 59 training_loop """owa""" +589 59 negative_sampler """basic""" +589 59 evaluator """rankbased""" +589 60 dataset """kinships""" +589 60 model """rotate""" +589 60 loss """bceaftersigmoid""" +589 60 regularizer """no""" +589 60 optimizer """adadelta""" +589 60 training_loop """owa""" +589 60 negative_sampler """basic""" +589 60 evaluator """rankbased""" +589 61 dataset """kinships""" +589 61 model """rotate""" +589 61 loss """bceaftersigmoid""" +589 61 regularizer """no""" +589 61 optimizer """adadelta""" +589 61 training_loop """owa""" +589 61 negative_sampler """basic""" +589 61 evaluator """rankbased""" +589 62 dataset """kinships""" +589 62 model """rotate""" +589 62 loss """bceaftersigmoid""" +589 62 regularizer """no""" +589 62 optimizer """adadelta""" +589 62 training_loop """owa""" +589 62 negative_sampler """basic""" +589 62 evaluator """rankbased""" +589 63 dataset """kinships""" +589 63 model """rotate""" +589 63 loss """bceaftersigmoid""" +589 63 regularizer """no""" +589 63 optimizer """adadelta""" +589 63 training_loop """owa""" +589 63 negative_sampler """basic""" +589 63 evaluator """rankbased""" +589 64 dataset """kinships""" +589 64 model """rotate""" +589 64 loss """bceaftersigmoid""" +589 64 regularizer """no""" +589 64 optimizer """adadelta""" +589 64 training_loop """owa""" +589 64 negative_sampler """basic""" +589 64 evaluator """rankbased""" +589 65 dataset """kinships""" +589 65 model """rotate""" +589 65 loss """bceaftersigmoid""" +589 65 regularizer """no""" +589 65 optimizer """adadelta""" +589 65 training_loop """owa""" +589 65 negative_sampler """basic""" +589 65 evaluator """rankbased""" +589 66 dataset """kinships""" +589 66 model """rotate""" +589 66 loss """bceaftersigmoid""" +589 66 regularizer """no""" +589 66 optimizer """adadelta""" +589 66 training_loop """owa""" +589 66 negative_sampler """basic""" +589 66 evaluator """rankbased""" +589 67 dataset """kinships""" +589 67 model """rotate""" +589 67 loss """bceaftersigmoid""" +589 67 regularizer """no""" +589 67 optimizer """adadelta""" +589 67 training_loop """owa""" +589 67 negative_sampler """basic""" +589 67 evaluator """rankbased""" +589 68 dataset """kinships""" +589 68 model """rotate""" +589 68 loss """bceaftersigmoid""" +589 68 regularizer """no""" +589 68 optimizer """adadelta""" +589 68 training_loop """owa""" +589 68 negative_sampler """basic""" +589 68 evaluator """rankbased""" +589 69 dataset """kinships""" +589 69 model """rotate""" +589 69 loss """bceaftersigmoid""" +589 69 regularizer """no""" +589 69 optimizer """adadelta""" +589 69 training_loop """owa""" +589 69 negative_sampler """basic""" +589 69 evaluator """rankbased""" +589 70 dataset """kinships""" +589 70 model """rotate""" +589 70 loss """bceaftersigmoid""" +589 70 regularizer """no""" +589 70 optimizer """adadelta""" +589 70 training_loop """owa""" +589 70 negative_sampler """basic""" +589 70 evaluator """rankbased""" +589 71 dataset """kinships""" +589 71 model """rotate""" +589 71 loss """bceaftersigmoid""" +589 71 regularizer """no""" +589 71 optimizer """adadelta""" +589 71 training_loop """owa""" +589 71 negative_sampler """basic""" +589 71 evaluator """rankbased""" +589 72 dataset """kinships""" +589 72 model """rotate""" +589 72 loss """bceaftersigmoid""" +589 72 regularizer """no""" +589 72 optimizer """adadelta""" +589 72 training_loop """owa""" +589 72 negative_sampler """basic""" +589 72 evaluator """rankbased""" +589 73 dataset """kinships""" +589 73 model """rotate""" +589 73 loss """bceaftersigmoid""" +589 73 regularizer """no""" +589 73 optimizer """adadelta""" +589 73 training_loop """owa""" +589 73 negative_sampler """basic""" +589 73 evaluator """rankbased""" +589 74 dataset """kinships""" +589 74 model """rotate""" +589 74 loss """bceaftersigmoid""" +589 74 regularizer """no""" +589 74 optimizer """adadelta""" +589 74 training_loop """owa""" +589 74 negative_sampler """basic""" +589 74 evaluator """rankbased""" +589 75 dataset """kinships""" +589 75 model """rotate""" +589 75 loss """bceaftersigmoid""" +589 75 regularizer """no""" +589 75 optimizer """adadelta""" +589 75 training_loop """owa""" +589 75 negative_sampler """basic""" +589 75 evaluator """rankbased""" +589 76 dataset """kinships""" +589 76 model """rotate""" +589 76 loss """bceaftersigmoid""" +589 76 regularizer """no""" +589 76 optimizer """adadelta""" +589 76 training_loop """owa""" +589 76 negative_sampler """basic""" +589 76 evaluator """rankbased""" +589 77 dataset """kinships""" +589 77 model """rotate""" +589 77 loss """bceaftersigmoid""" +589 77 regularizer """no""" +589 77 optimizer """adadelta""" +589 77 training_loop """owa""" +589 77 negative_sampler """basic""" +589 77 evaluator """rankbased""" +589 78 dataset """kinships""" +589 78 model """rotate""" +589 78 loss """bceaftersigmoid""" +589 78 regularizer """no""" +589 78 optimizer """adadelta""" +589 78 training_loop """owa""" +589 78 negative_sampler """basic""" +589 78 evaluator """rankbased""" +589 79 dataset """kinships""" +589 79 model """rotate""" +589 79 loss """bceaftersigmoid""" +589 79 regularizer """no""" +589 79 optimizer """adadelta""" +589 79 training_loop """owa""" +589 79 negative_sampler """basic""" +589 79 evaluator """rankbased""" +589 80 dataset """kinships""" +589 80 model """rotate""" +589 80 loss """bceaftersigmoid""" +589 80 regularizer """no""" +589 80 optimizer """adadelta""" +589 80 training_loop """owa""" +589 80 negative_sampler """basic""" +589 80 evaluator """rankbased""" +589 81 dataset """kinships""" +589 81 model """rotate""" +589 81 loss """bceaftersigmoid""" +589 81 regularizer """no""" +589 81 optimizer """adadelta""" +589 81 training_loop """owa""" +589 81 negative_sampler """basic""" +589 81 evaluator """rankbased""" +589 82 dataset """kinships""" +589 82 model """rotate""" +589 82 loss """bceaftersigmoid""" +589 82 regularizer """no""" +589 82 optimizer """adadelta""" +589 82 training_loop """owa""" +589 82 negative_sampler """basic""" +589 82 evaluator """rankbased""" +589 83 dataset """kinships""" +589 83 model """rotate""" +589 83 loss """bceaftersigmoid""" +589 83 regularizer """no""" +589 83 optimizer """adadelta""" +589 83 training_loop """owa""" +589 83 negative_sampler """basic""" +589 83 evaluator """rankbased""" +589 84 dataset """kinships""" +589 84 model """rotate""" +589 84 loss """bceaftersigmoid""" +589 84 regularizer """no""" +589 84 optimizer """adadelta""" +589 84 training_loop """owa""" +589 84 negative_sampler """basic""" +589 84 evaluator """rankbased""" +589 85 dataset """kinships""" +589 85 model """rotate""" +589 85 loss """bceaftersigmoid""" +589 85 regularizer """no""" +589 85 optimizer """adadelta""" +589 85 training_loop """owa""" +589 85 negative_sampler """basic""" +589 85 evaluator """rankbased""" +589 86 dataset """kinships""" +589 86 model """rotate""" +589 86 loss """bceaftersigmoid""" +589 86 regularizer """no""" +589 86 optimizer """adadelta""" +589 86 training_loop """owa""" +589 86 negative_sampler """basic""" +589 86 evaluator """rankbased""" +589 87 dataset """kinships""" +589 87 model """rotate""" +589 87 loss """bceaftersigmoid""" +589 87 regularizer """no""" +589 87 optimizer """adadelta""" +589 87 training_loop """owa""" +589 87 negative_sampler """basic""" +589 87 evaluator """rankbased""" +589 88 dataset """kinships""" +589 88 model """rotate""" +589 88 loss """bceaftersigmoid""" +589 88 regularizer """no""" +589 88 optimizer """adadelta""" +589 88 training_loop """owa""" +589 88 negative_sampler """basic""" +589 88 evaluator """rankbased""" +589 89 dataset """kinships""" +589 89 model """rotate""" +589 89 loss """bceaftersigmoid""" +589 89 regularizer """no""" +589 89 optimizer """adadelta""" +589 89 training_loop """owa""" +589 89 negative_sampler """basic""" +589 89 evaluator """rankbased""" +589 90 dataset """kinships""" +589 90 model """rotate""" +589 90 loss """bceaftersigmoid""" +589 90 regularizer """no""" +589 90 optimizer """adadelta""" +589 90 training_loop """owa""" +589 90 negative_sampler """basic""" +589 90 evaluator """rankbased""" +589 91 dataset """kinships""" +589 91 model """rotate""" +589 91 loss """bceaftersigmoid""" +589 91 regularizer """no""" +589 91 optimizer """adadelta""" +589 91 training_loop """owa""" +589 91 negative_sampler """basic""" +589 91 evaluator """rankbased""" +589 92 dataset """kinships""" +589 92 model """rotate""" +589 92 loss """bceaftersigmoid""" +589 92 regularizer """no""" +589 92 optimizer """adadelta""" +589 92 training_loop """owa""" +589 92 negative_sampler """basic""" +589 92 evaluator """rankbased""" +589 93 dataset """kinships""" +589 93 model """rotate""" +589 93 loss """bceaftersigmoid""" +589 93 regularizer """no""" +589 93 optimizer """adadelta""" +589 93 training_loop """owa""" +589 93 negative_sampler """basic""" +589 93 evaluator """rankbased""" +589 94 dataset """kinships""" +589 94 model """rotate""" +589 94 loss """bceaftersigmoid""" +589 94 regularizer """no""" +589 94 optimizer """adadelta""" +589 94 training_loop """owa""" +589 94 negative_sampler """basic""" +589 94 evaluator """rankbased""" +589 95 dataset """kinships""" +589 95 model """rotate""" +589 95 loss """bceaftersigmoid""" +589 95 regularizer """no""" +589 95 optimizer """adadelta""" +589 95 training_loop """owa""" +589 95 negative_sampler """basic""" +589 95 evaluator """rankbased""" +589 96 dataset """kinships""" +589 96 model """rotate""" +589 96 loss """bceaftersigmoid""" +589 96 regularizer """no""" +589 96 optimizer """adadelta""" +589 96 training_loop """owa""" +589 96 negative_sampler """basic""" +589 96 evaluator """rankbased""" +589 97 dataset """kinships""" +589 97 model """rotate""" +589 97 loss """bceaftersigmoid""" +589 97 regularizer """no""" +589 97 optimizer """adadelta""" +589 97 training_loop """owa""" +589 97 negative_sampler """basic""" +589 97 evaluator """rankbased""" +589 98 dataset """kinships""" +589 98 model """rotate""" +589 98 loss """bceaftersigmoid""" +589 98 regularizer """no""" +589 98 optimizer """adadelta""" +589 98 training_loop """owa""" +589 98 negative_sampler """basic""" +589 98 evaluator """rankbased""" +589 99 dataset """kinships""" +589 99 model """rotate""" +589 99 loss """bceaftersigmoid""" +589 99 regularizer """no""" +589 99 optimizer """adadelta""" +589 99 training_loop """owa""" +589 99 negative_sampler """basic""" +589 99 evaluator """rankbased""" +589 100 dataset """kinships""" +589 100 model """rotate""" +589 100 loss """bceaftersigmoid""" +589 100 regularizer """no""" +589 100 optimizer """adadelta""" +589 100 training_loop """owa""" +589 100 negative_sampler """basic""" +589 100 evaluator """rankbased""" +590 1 model.embedding_dim 2.0 +590 1 negative_sampler.num_negs_per_pos 15.0 +590 1 training.batch_size 0.0 +590 2 model.embedding_dim 2.0 +590 2 negative_sampler.num_negs_per_pos 88.0 +590 2 training.batch_size 0.0 +590 3 model.embedding_dim 2.0 +590 3 negative_sampler.num_negs_per_pos 60.0 +590 3 training.batch_size 2.0 +590 4 model.embedding_dim 0.0 +590 4 negative_sampler.num_negs_per_pos 49.0 +590 4 training.batch_size 1.0 +590 5 model.embedding_dim 1.0 +590 5 negative_sampler.num_negs_per_pos 13.0 +590 5 training.batch_size 2.0 +590 6 model.embedding_dim 1.0 +590 6 negative_sampler.num_negs_per_pos 86.0 +590 6 training.batch_size 0.0 +590 7 model.embedding_dim 0.0 +590 7 negative_sampler.num_negs_per_pos 6.0 +590 7 training.batch_size 0.0 +590 8 model.embedding_dim 2.0 +590 8 negative_sampler.num_negs_per_pos 37.0 +590 8 training.batch_size 0.0 +590 9 model.embedding_dim 1.0 +590 9 negative_sampler.num_negs_per_pos 20.0 +590 9 training.batch_size 2.0 +590 10 model.embedding_dim 2.0 +590 10 negative_sampler.num_negs_per_pos 94.0 +590 10 training.batch_size 2.0 +590 11 model.embedding_dim 1.0 +590 11 negative_sampler.num_negs_per_pos 10.0 +590 11 training.batch_size 1.0 +590 12 model.embedding_dim 1.0 +590 12 negative_sampler.num_negs_per_pos 46.0 +590 12 training.batch_size 2.0 +590 13 model.embedding_dim 2.0 +590 13 negative_sampler.num_negs_per_pos 99.0 +590 13 training.batch_size 2.0 +590 14 model.embedding_dim 1.0 +590 14 negative_sampler.num_negs_per_pos 77.0 +590 14 training.batch_size 1.0 +590 15 model.embedding_dim 2.0 +590 15 negative_sampler.num_negs_per_pos 87.0 +590 15 training.batch_size 1.0 +590 16 model.embedding_dim 1.0 +590 16 negative_sampler.num_negs_per_pos 28.0 +590 16 training.batch_size 2.0 +590 17 model.embedding_dim 0.0 +590 17 negative_sampler.num_negs_per_pos 97.0 +590 17 training.batch_size 2.0 +590 18 model.embedding_dim 1.0 +590 18 negative_sampler.num_negs_per_pos 96.0 +590 18 training.batch_size 0.0 +590 19 model.embedding_dim 1.0 +590 19 negative_sampler.num_negs_per_pos 10.0 +590 19 training.batch_size 2.0 +590 20 model.embedding_dim 0.0 +590 20 negative_sampler.num_negs_per_pos 86.0 +590 20 training.batch_size 0.0 +590 21 model.embedding_dim 0.0 +590 21 negative_sampler.num_negs_per_pos 0.0 +590 21 training.batch_size 1.0 +590 22 model.embedding_dim 0.0 +590 22 negative_sampler.num_negs_per_pos 87.0 +590 22 training.batch_size 2.0 +590 23 model.embedding_dim 1.0 +590 23 negative_sampler.num_negs_per_pos 59.0 +590 23 training.batch_size 1.0 +590 24 model.embedding_dim 2.0 +590 24 negative_sampler.num_negs_per_pos 20.0 +590 24 training.batch_size 2.0 +590 25 model.embedding_dim 2.0 +590 25 negative_sampler.num_negs_per_pos 28.0 +590 25 training.batch_size 0.0 +590 26 model.embedding_dim 1.0 +590 26 negative_sampler.num_negs_per_pos 20.0 +590 26 training.batch_size 1.0 +590 27 model.embedding_dim 0.0 +590 27 negative_sampler.num_negs_per_pos 60.0 +590 27 training.batch_size 0.0 +590 28 model.embedding_dim 0.0 +590 28 negative_sampler.num_negs_per_pos 43.0 +590 28 training.batch_size 2.0 +590 29 model.embedding_dim 0.0 +590 29 negative_sampler.num_negs_per_pos 13.0 +590 29 training.batch_size 1.0 +590 30 model.embedding_dim 0.0 +590 30 negative_sampler.num_negs_per_pos 1.0 +590 30 training.batch_size 0.0 +590 31 model.embedding_dim 1.0 +590 31 negative_sampler.num_negs_per_pos 45.0 +590 31 training.batch_size 2.0 +590 32 model.embedding_dim 0.0 +590 32 negative_sampler.num_negs_per_pos 0.0 +590 32 training.batch_size 0.0 +590 33 model.embedding_dim 1.0 +590 33 negative_sampler.num_negs_per_pos 77.0 +590 33 training.batch_size 1.0 +590 34 model.embedding_dim 0.0 +590 34 negative_sampler.num_negs_per_pos 73.0 +590 34 training.batch_size 1.0 +590 35 model.embedding_dim 0.0 +590 35 negative_sampler.num_negs_per_pos 4.0 +590 35 training.batch_size 2.0 +590 36 model.embedding_dim 2.0 +590 36 negative_sampler.num_negs_per_pos 15.0 +590 36 training.batch_size 1.0 +590 37 model.embedding_dim 1.0 +590 37 negative_sampler.num_negs_per_pos 35.0 +590 37 training.batch_size 1.0 +590 38 model.embedding_dim 0.0 +590 38 negative_sampler.num_negs_per_pos 35.0 +590 38 training.batch_size 2.0 +590 39 model.embedding_dim 2.0 +590 39 negative_sampler.num_negs_per_pos 22.0 +590 39 training.batch_size 1.0 +590 40 model.embedding_dim 2.0 +590 40 negative_sampler.num_negs_per_pos 24.0 +590 40 training.batch_size 0.0 +590 41 model.embedding_dim 0.0 +590 41 negative_sampler.num_negs_per_pos 26.0 +590 41 training.batch_size 1.0 +590 42 model.embedding_dim 0.0 +590 42 negative_sampler.num_negs_per_pos 69.0 +590 42 training.batch_size 1.0 +590 43 model.embedding_dim 2.0 +590 43 negative_sampler.num_negs_per_pos 82.0 +590 43 training.batch_size 2.0 +590 44 model.embedding_dim 1.0 +590 44 negative_sampler.num_negs_per_pos 81.0 +590 44 training.batch_size 2.0 +590 45 model.embedding_dim 0.0 +590 45 negative_sampler.num_negs_per_pos 80.0 +590 45 training.batch_size 1.0 +590 46 model.embedding_dim 2.0 +590 46 negative_sampler.num_negs_per_pos 10.0 +590 46 training.batch_size 2.0 +590 47 model.embedding_dim 0.0 +590 47 negative_sampler.num_negs_per_pos 44.0 +590 47 training.batch_size 2.0 +590 48 model.embedding_dim 2.0 +590 48 negative_sampler.num_negs_per_pos 12.0 +590 48 training.batch_size 1.0 +590 49 model.embedding_dim 0.0 +590 49 negative_sampler.num_negs_per_pos 37.0 +590 49 training.batch_size 1.0 +590 50 model.embedding_dim 2.0 +590 50 negative_sampler.num_negs_per_pos 2.0 +590 50 training.batch_size 0.0 +590 51 model.embedding_dim 2.0 +590 51 negative_sampler.num_negs_per_pos 32.0 +590 51 training.batch_size 2.0 +590 52 model.embedding_dim 2.0 +590 52 negative_sampler.num_negs_per_pos 1.0 +590 52 training.batch_size 0.0 +590 53 model.embedding_dim 2.0 +590 53 negative_sampler.num_negs_per_pos 28.0 +590 53 training.batch_size 0.0 +590 54 model.embedding_dim 1.0 +590 54 negative_sampler.num_negs_per_pos 94.0 +590 54 training.batch_size 2.0 +590 55 model.embedding_dim 0.0 +590 55 negative_sampler.num_negs_per_pos 62.0 +590 55 training.batch_size 1.0 +590 56 model.embedding_dim 1.0 +590 56 negative_sampler.num_negs_per_pos 29.0 +590 56 training.batch_size 2.0 +590 57 model.embedding_dim 0.0 +590 57 negative_sampler.num_negs_per_pos 91.0 +590 57 training.batch_size 1.0 +590 58 model.embedding_dim 1.0 +590 58 negative_sampler.num_negs_per_pos 69.0 +590 58 training.batch_size 0.0 +590 59 model.embedding_dim 0.0 +590 59 negative_sampler.num_negs_per_pos 26.0 +590 59 training.batch_size 1.0 +590 60 model.embedding_dim 2.0 +590 60 negative_sampler.num_negs_per_pos 72.0 +590 60 training.batch_size 1.0 +590 61 model.embedding_dim 1.0 +590 61 negative_sampler.num_negs_per_pos 16.0 +590 61 training.batch_size 0.0 +590 62 model.embedding_dim 1.0 +590 62 negative_sampler.num_negs_per_pos 0.0 +590 62 training.batch_size 0.0 +590 63 model.embedding_dim 2.0 +590 63 negative_sampler.num_negs_per_pos 91.0 +590 63 training.batch_size 1.0 +590 64 model.embedding_dim 0.0 +590 64 negative_sampler.num_negs_per_pos 49.0 +590 64 training.batch_size 1.0 +590 65 model.embedding_dim 2.0 +590 65 negative_sampler.num_negs_per_pos 39.0 +590 65 training.batch_size 1.0 +590 66 model.embedding_dim 1.0 +590 66 negative_sampler.num_negs_per_pos 31.0 +590 66 training.batch_size 0.0 +590 67 model.embedding_dim 0.0 +590 67 negative_sampler.num_negs_per_pos 55.0 +590 67 training.batch_size 1.0 +590 68 model.embedding_dim 0.0 +590 68 negative_sampler.num_negs_per_pos 95.0 +590 68 training.batch_size 0.0 +590 69 model.embedding_dim 0.0 +590 69 negative_sampler.num_negs_per_pos 94.0 +590 69 training.batch_size 2.0 +590 70 model.embedding_dim 2.0 +590 70 negative_sampler.num_negs_per_pos 99.0 +590 70 training.batch_size 1.0 +590 71 model.embedding_dim 0.0 +590 71 negative_sampler.num_negs_per_pos 59.0 +590 71 training.batch_size 2.0 +590 72 model.embedding_dim 2.0 +590 72 negative_sampler.num_negs_per_pos 49.0 +590 72 training.batch_size 2.0 +590 73 model.embedding_dim 0.0 +590 73 negative_sampler.num_negs_per_pos 37.0 +590 73 training.batch_size 2.0 +590 74 model.embedding_dim 0.0 +590 74 negative_sampler.num_negs_per_pos 67.0 +590 74 training.batch_size 1.0 +590 75 model.embedding_dim 2.0 +590 75 negative_sampler.num_negs_per_pos 85.0 +590 75 training.batch_size 1.0 +590 76 model.embedding_dim 0.0 +590 76 negative_sampler.num_negs_per_pos 76.0 +590 76 training.batch_size 0.0 +590 77 model.embedding_dim 1.0 +590 77 negative_sampler.num_negs_per_pos 4.0 +590 77 training.batch_size 0.0 +590 78 model.embedding_dim 1.0 +590 78 negative_sampler.num_negs_per_pos 7.0 +590 78 training.batch_size 0.0 +590 79 model.embedding_dim 1.0 +590 79 negative_sampler.num_negs_per_pos 19.0 +590 79 training.batch_size 1.0 +590 80 model.embedding_dim 0.0 +590 80 negative_sampler.num_negs_per_pos 27.0 +590 80 training.batch_size 0.0 +590 81 model.embedding_dim 2.0 +590 81 negative_sampler.num_negs_per_pos 28.0 +590 81 training.batch_size 2.0 +590 82 model.embedding_dim 0.0 +590 82 negative_sampler.num_negs_per_pos 78.0 +590 82 training.batch_size 2.0 +590 83 model.embedding_dim 2.0 +590 83 negative_sampler.num_negs_per_pos 66.0 +590 83 training.batch_size 0.0 +590 84 model.embedding_dim 2.0 +590 84 negative_sampler.num_negs_per_pos 49.0 +590 84 training.batch_size 0.0 +590 85 model.embedding_dim 0.0 +590 85 negative_sampler.num_negs_per_pos 44.0 +590 85 training.batch_size 1.0 +590 86 model.embedding_dim 1.0 +590 86 negative_sampler.num_negs_per_pos 26.0 +590 86 training.batch_size 2.0 +590 87 model.embedding_dim 0.0 +590 87 negative_sampler.num_negs_per_pos 65.0 +590 87 training.batch_size 2.0 +590 88 model.embedding_dim 1.0 +590 88 negative_sampler.num_negs_per_pos 3.0 +590 88 training.batch_size 1.0 +590 89 model.embedding_dim 0.0 +590 89 negative_sampler.num_negs_per_pos 90.0 +590 89 training.batch_size 0.0 +590 90 model.embedding_dim 0.0 +590 90 negative_sampler.num_negs_per_pos 53.0 +590 90 training.batch_size 2.0 +590 91 model.embedding_dim 0.0 +590 91 negative_sampler.num_negs_per_pos 47.0 +590 91 training.batch_size 2.0 +590 92 model.embedding_dim 2.0 +590 92 negative_sampler.num_negs_per_pos 92.0 +590 92 training.batch_size 2.0 +590 93 model.embedding_dim 2.0 +590 93 negative_sampler.num_negs_per_pos 39.0 +590 93 training.batch_size 1.0 +590 94 model.embedding_dim 1.0 +590 94 negative_sampler.num_negs_per_pos 3.0 +590 94 training.batch_size 0.0 +590 95 model.embedding_dim 2.0 +590 95 negative_sampler.num_negs_per_pos 56.0 +590 95 training.batch_size 1.0 +590 96 model.embedding_dim 2.0 +590 96 negative_sampler.num_negs_per_pos 8.0 +590 96 training.batch_size 1.0 +590 97 model.embedding_dim 2.0 +590 97 negative_sampler.num_negs_per_pos 14.0 +590 97 training.batch_size 1.0 +590 98 model.embedding_dim 0.0 +590 98 negative_sampler.num_negs_per_pos 28.0 +590 98 training.batch_size 1.0 +590 99 model.embedding_dim 0.0 +590 99 negative_sampler.num_negs_per_pos 83.0 +590 99 training.batch_size 0.0 +590 100 model.embedding_dim 2.0 +590 100 negative_sampler.num_negs_per_pos 50.0 +590 100 training.batch_size 0.0 +590 1 dataset """kinships""" +590 1 model """rotate""" +590 1 loss """softplus""" +590 1 regularizer """no""" +590 1 optimizer """adadelta""" +590 1 training_loop """owa""" +590 1 negative_sampler """basic""" +590 1 evaluator """rankbased""" +590 2 dataset """kinships""" +590 2 model """rotate""" +590 2 loss """softplus""" +590 2 regularizer """no""" +590 2 optimizer """adadelta""" +590 2 training_loop """owa""" +590 2 negative_sampler """basic""" +590 2 evaluator """rankbased""" +590 3 dataset """kinships""" +590 3 model """rotate""" +590 3 loss """softplus""" +590 3 regularizer """no""" +590 3 optimizer """adadelta""" +590 3 training_loop """owa""" +590 3 negative_sampler """basic""" +590 3 evaluator """rankbased""" +590 4 dataset """kinships""" +590 4 model """rotate""" +590 4 loss """softplus""" +590 4 regularizer """no""" +590 4 optimizer """adadelta""" +590 4 training_loop """owa""" +590 4 negative_sampler """basic""" +590 4 evaluator """rankbased""" +590 5 dataset """kinships""" +590 5 model """rotate""" +590 5 loss """softplus""" +590 5 regularizer """no""" +590 5 optimizer """adadelta""" +590 5 training_loop """owa""" +590 5 negative_sampler """basic""" +590 5 evaluator """rankbased""" +590 6 dataset """kinships""" +590 6 model """rotate""" +590 6 loss """softplus""" +590 6 regularizer """no""" +590 6 optimizer """adadelta""" +590 6 training_loop """owa""" +590 6 negative_sampler """basic""" +590 6 evaluator """rankbased""" +590 7 dataset """kinships""" +590 7 model """rotate""" +590 7 loss """softplus""" +590 7 regularizer """no""" +590 7 optimizer """adadelta""" +590 7 training_loop """owa""" +590 7 negative_sampler """basic""" +590 7 evaluator """rankbased""" +590 8 dataset """kinships""" +590 8 model """rotate""" +590 8 loss """softplus""" +590 8 regularizer """no""" +590 8 optimizer """adadelta""" +590 8 training_loop """owa""" +590 8 negative_sampler """basic""" +590 8 evaluator """rankbased""" +590 9 dataset """kinships""" +590 9 model """rotate""" +590 9 loss """softplus""" +590 9 regularizer """no""" +590 9 optimizer """adadelta""" +590 9 training_loop """owa""" +590 9 negative_sampler """basic""" +590 9 evaluator """rankbased""" +590 10 dataset """kinships""" +590 10 model """rotate""" +590 10 loss """softplus""" +590 10 regularizer """no""" +590 10 optimizer """adadelta""" +590 10 training_loop """owa""" +590 10 negative_sampler """basic""" +590 10 evaluator """rankbased""" +590 11 dataset """kinships""" +590 11 model """rotate""" +590 11 loss """softplus""" +590 11 regularizer """no""" +590 11 optimizer """adadelta""" +590 11 training_loop """owa""" +590 11 negative_sampler """basic""" +590 11 evaluator """rankbased""" +590 12 dataset """kinships""" +590 12 model """rotate""" +590 12 loss """softplus""" +590 12 regularizer """no""" +590 12 optimizer """adadelta""" +590 12 training_loop """owa""" +590 12 negative_sampler """basic""" +590 12 evaluator """rankbased""" +590 13 dataset """kinships""" +590 13 model """rotate""" +590 13 loss """softplus""" +590 13 regularizer """no""" +590 13 optimizer """adadelta""" +590 13 training_loop """owa""" +590 13 negative_sampler """basic""" +590 13 evaluator """rankbased""" +590 14 dataset """kinships""" +590 14 model """rotate""" +590 14 loss """softplus""" +590 14 regularizer """no""" +590 14 optimizer """adadelta""" +590 14 training_loop """owa""" +590 14 negative_sampler """basic""" +590 14 evaluator """rankbased""" +590 15 dataset """kinships""" +590 15 model """rotate""" +590 15 loss """softplus""" +590 15 regularizer """no""" +590 15 optimizer """adadelta""" +590 15 training_loop """owa""" +590 15 negative_sampler """basic""" +590 15 evaluator """rankbased""" +590 16 dataset """kinships""" +590 16 model """rotate""" +590 16 loss """softplus""" +590 16 regularizer """no""" +590 16 optimizer """adadelta""" +590 16 training_loop """owa""" +590 16 negative_sampler """basic""" +590 16 evaluator """rankbased""" +590 17 dataset """kinships""" +590 17 model """rotate""" +590 17 loss """softplus""" +590 17 regularizer """no""" +590 17 optimizer """adadelta""" +590 17 training_loop """owa""" +590 17 negative_sampler """basic""" +590 17 evaluator """rankbased""" +590 18 dataset """kinships""" +590 18 model """rotate""" +590 18 loss """softplus""" +590 18 regularizer """no""" +590 18 optimizer """adadelta""" +590 18 training_loop """owa""" +590 18 negative_sampler """basic""" +590 18 evaluator """rankbased""" +590 19 dataset """kinships""" +590 19 model """rotate""" +590 19 loss """softplus""" +590 19 regularizer """no""" +590 19 optimizer """adadelta""" +590 19 training_loop """owa""" +590 19 negative_sampler """basic""" +590 19 evaluator """rankbased""" +590 20 dataset """kinships""" +590 20 model """rotate""" +590 20 loss """softplus""" +590 20 regularizer """no""" +590 20 optimizer """adadelta""" +590 20 training_loop """owa""" +590 20 negative_sampler """basic""" +590 20 evaluator """rankbased""" +590 21 dataset """kinships""" +590 21 model """rotate""" +590 21 loss """softplus""" +590 21 regularizer """no""" +590 21 optimizer """adadelta""" +590 21 training_loop """owa""" +590 21 negative_sampler """basic""" +590 21 evaluator """rankbased""" +590 22 dataset """kinships""" +590 22 model """rotate""" +590 22 loss """softplus""" +590 22 regularizer """no""" +590 22 optimizer """adadelta""" +590 22 training_loop """owa""" +590 22 negative_sampler """basic""" +590 22 evaluator """rankbased""" +590 23 dataset """kinships""" +590 23 model """rotate""" +590 23 loss """softplus""" +590 23 regularizer """no""" +590 23 optimizer """adadelta""" +590 23 training_loop """owa""" +590 23 negative_sampler """basic""" +590 23 evaluator """rankbased""" +590 24 dataset """kinships""" +590 24 model """rotate""" +590 24 loss """softplus""" +590 24 regularizer """no""" +590 24 optimizer """adadelta""" +590 24 training_loop """owa""" +590 24 negative_sampler """basic""" +590 24 evaluator """rankbased""" +590 25 dataset """kinships""" +590 25 model """rotate""" +590 25 loss """softplus""" +590 25 regularizer """no""" +590 25 optimizer """adadelta""" +590 25 training_loop """owa""" +590 25 negative_sampler """basic""" +590 25 evaluator """rankbased""" +590 26 dataset """kinships""" +590 26 model """rotate""" +590 26 loss """softplus""" +590 26 regularizer """no""" +590 26 optimizer """adadelta""" +590 26 training_loop """owa""" +590 26 negative_sampler """basic""" +590 26 evaluator """rankbased""" +590 27 dataset """kinships""" +590 27 model """rotate""" +590 27 loss """softplus""" +590 27 regularizer """no""" +590 27 optimizer """adadelta""" +590 27 training_loop """owa""" +590 27 negative_sampler """basic""" +590 27 evaluator """rankbased""" +590 28 dataset """kinships""" +590 28 model """rotate""" +590 28 loss """softplus""" +590 28 regularizer """no""" +590 28 optimizer """adadelta""" +590 28 training_loop """owa""" +590 28 negative_sampler """basic""" +590 28 evaluator """rankbased""" +590 29 dataset """kinships""" +590 29 model """rotate""" +590 29 loss """softplus""" +590 29 regularizer """no""" +590 29 optimizer """adadelta""" +590 29 training_loop """owa""" +590 29 negative_sampler """basic""" +590 29 evaluator """rankbased""" +590 30 dataset """kinships""" +590 30 model """rotate""" +590 30 loss """softplus""" +590 30 regularizer """no""" +590 30 optimizer """adadelta""" +590 30 training_loop """owa""" +590 30 negative_sampler """basic""" +590 30 evaluator """rankbased""" +590 31 dataset """kinships""" +590 31 model """rotate""" +590 31 loss """softplus""" +590 31 regularizer """no""" +590 31 optimizer """adadelta""" +590 31 training_loop """owa""" +590 31 negative_sampler """basic""" +590 31 evaluator """rankbased""" +590 32 dataset """kinships""" +590 32 model """rotate""" +590 32 loss """softplus""" +590 32 regularizer """no""" +590 32 optimizer """adadelta""" +590 32 training_loop """owa""" +590 32 negative_sampler """basic""" +590 32 evaluator """rankbased""" +590 33 dataset """kinships""" +590 33 model """rotate""" +590 33 loss """softplus""" +590 33 regularizer """no""" +590 33 optimizer """adadelta""" +590 33 training_loop """owa""" +590 33 negative_sampler """basic""" +590 33 evaluator """rankbased""" +590 34 dataset """kinships""" +590 34 model """rotate""" +590 34 loss """softplus""" +590 34 regularizer """no""" +590 34 optimizer """adadelta""" +590 34 training_loop """owa""" +590 34 negative_sampler """basic""" +590 34 evaluator """rankbased""" +590 35 dataset """kinships""" +590 35 model """rotate""" +590 35 loss """softplus""" +590 35 regularizer """no""" +590 35 optimizer """adadelta""" +590 35 training_loop """owa""" +590 35 negative_sampler """basic""" +590 35 evaluator """rankbased""" +590 36 dataset """kinships""" +590 36 model """rotate""" +590 36 loss """softplus""" +590 36 regularizer """no""" +590 36 optimizer """adadelta""" +590 36 training_loop """owa""" +590 36 negative_sampler """basic""" +590 36 evaluator """rankbased""" +590 37 dataset """kinships""" +590 37 model """rotate""" +590 37 loss """softplus""" +590 37 regularizer """no""" +590 37 optimizer """adadelta""" +590 37 training_loop """owa""" +590 37 negative_sampler """basic""" +590 37 evaluator """rankbased""" +590 38 dataset """kinships""" +590 38 model """rotate""" +590 38 loss """softplus""" +590 38 regularizer """no""" +590 38 optimizer """adadelta""" +590 38 training_loop """owa""" +590 38 negative_sampler """basic""" +590 38 evaluator """rankbased""" +590 39 dataset """kinships""" +590 39 model """rotate""" +590 39 loss """softplus""" +590 39 regularizer """no""" +590 39 optimizer """adadelta""" +590 39 training_loop """owa""" +590 39 negative_sampler """basic""" +590 39 evaluator """rankbased""" +590 40 dataset """kinships""" +590 40 model """rotate""" +590 40 loss """softplus""" +590 40 regularizer """no""" +590 40 optimizer """adadelta""" +590 40 training_loop """owa""" +590 40 negative_sampler """basic""" +590 40 evaluator """rankbased""" +590 41 dataset """kinships""" +590 41 model """rotate""" +590 41 loss """softplus""" +590 41 regularizer """no""" +590 41 optimizer """adadelta""" +590 41 training_loop """owa""" +590 41 negative_sampler """basic""" +590 41 evaluator """rankbased""" +590 42 dataset """kinships""" +590 42 model """rotate""" +590 42 loss """softplus""" +590 42 regularizer """no""" +590 42 optimizer """adadelta""" +590 42 training_loop """owa""" +590 42 negative_sampler """basic""" +590 42 evaluator """rankbased""" +590 43 dataset """kinships""" +590 43 model """rotate""" +590 43 loss """softplus""" +590 43 regularizer """no""" +590 43 optimizer """adadelta""" +590 43 training_loop """owa""" +590 43 negative_sampler """basic""" +590 43 evaluator """rankbased""" +590 44 dataset """kinships""" +590 44 model """rotate""" +590 44 loss """softplus""" +590 44 regularizer """no""" +590 44 optimizer """adadelta""" +590 44 training_loop """owa""" +590 44 negative_sampler """basic""" +590 44 evaluator """rankbased""" +590 45 dataset """kinships""" +590 45 model """rotate""" +590 45 loss """softplus""" +590 45 regularizer """no""" +590 45 optimizer """adadelta""" +590 45 training_loop """owa""" +590 45 negative_sampler """basic""" +590 45 evaluator """rankbased""" +590 46 dataset """kinships""" +590 46 model """rotate""" +590 46 loss """softplus""" +590 46 regularizer """no""" +590 46 optimizer """adadelta""" +590 46 training_loop """owa""" +590 46 negative_sampler """basic""" +590 46 evaluator """rankbased""" +590 47 dataset """kinships""" +590 47 model """rotate""" +590 47 loss """softplus""" +590 47 regularizer """no""" +590 47 optimizer """adadelta""" +590 47 training_loop """owa""" +590 47 negative_sampler """basic""" +590 47 evaluator """rankbased""" +590 48 dataset """kinships""" +590 48 model """rotate""" +590 48 loss """softplus""" +590 48 regularizer """no""" +590 48 optimizer """adadelta""" +590 48 training_loop """owa""" +590 48 negative_sampler """basic""" +590 48 evaluator """rankbased""" +590 49 dataset """kinships""" +590 49 model """rotate""" +590 49 loss """softplus""" +590 49 regularizer """no""" +590 49 optimizer """adadelta""" +590 49 training_loop """owa""" +590 49 negative_sampler """basic""" +590 49 evaluator """rankbased""" +590 50 dataset """kinships""" +590 50 model """rotate""" +590 50 loss """softplus""" +590 50 regularizer """no""" +590 50 optimizer """adadelta""" +590 50 training_loop """owa""" +590 50 negative_sampler """basic""" +590 50 evaluator """rankbased""" +590 51 dataset """kinships""" +590 51 model """rotate""" +590 51 loss """softplus""" +590 51 regularizer """no""" +590 51 optimizer """adadelta""" +590 51 training_loop """owa""" +590 51 negative_sampler """basic""" +590 51 evaluator """rankbased""" +590 52 dataset """kinships""" +590 52 model """rotate""" +590 52 loss """softplus""" +590 52 regularizer """no""" +590 52 optimizer """adadelta""" +590 52 training_loop """owa""" +590 52 negative_sampler """basic""" +590 52 evaluator """rankbased""" +590 53 dataset """kinships""" +590 53 model """rotate""" +590 53 loss """softplus""" +590 53 regularizer """no""" +590 53 optimizer """adadelta""" +590 53 training_loop """owa""" +590 53 negative_sampler """basic""" +590 53 evaluator """rankbased""" +590 54 dataset """kinships""" +590 54 model """rotate""" +590 54 loss """softplus""" +590 54 regularizer """no""" +590 54 optimizer """adadelta""" +590 54 training_loop """owa""" +590 54 negative_sampler """basic""" +590 54 evaluator """rankbased""" +590 55 dataset """kinships""" +590 55 model """rotate""" +590 55 loss """softplus""" +590 55 regularizer """no""" +590 55 optimizer """adadelta""" +590 55 training_loop """owa""" +590 55 negative_sampler """basic""" +590 55 evaluator """rankbased""" +590 56 dataset """kinships""" +590 56 model """rotate""" +590 56 loss """softplus""" +590 56 regularizer """no""" +590 56 optimizer """adadelta""" +590 56 training_loop """owa""" +590 56 negative_sampler """basic""" +590 56 evaluator """rankbased""" +590 57 dataset """kinships""" +590 57 model """rotate""" +590 57 loss """softplus""" +590 57 regularizer """no""" +590 57 optimizer """adadelta""" +590 57 training_loop """owa""" +590 57 negative_sampler """basic""" +590 57 evaluator """rankbased""" +590 58 dataset """kinships""" +590 58 model """rotate""" +590 58 loss """softplus""" +590 58 regularizer """no""" +590 58 optimizer """adadelta""" +590 58 training_loop """owa""" +590 58 negative_sampler """basic""" +590 58 evaluator """rankbased""" +590 59 dataset """kinships""" +590 59 model """rotate""" +590 59 loss """softplus""" +590 59 regularizer """no""" +590 59 optimizer """adadelta""" +590 59 training_loop """owa""" +590 59 negative_sampler """basic""" +590 59 evaluator """rankbased""" +590 60 dataset """kinships""" +590 60 model """rotate""" +590 60 loss """softplus""" +590 60 regularizer """no""" +590 60 optimizer """adadelta""" +590 60 training_loop """owa""" +590 60 negative_sampler """basic""" +590 60 evaluator """rankbased""" +590 61 dataset """kinships""" +590 61 model """rotate""" +590 61 loss """softplus""" +590 61 regularizer """no""" +590 61 optimizer """adadelta""" +590 61 training_loop """owa""" +590 61 negative_sampler """basic""" +590 61 evaluator """rankbased""" +590 62 dataset """kinships""" +590 62 model """rotate""" +590 62 loss """softplus""" +590 62 regularizer """no""" +590 62 optimizer """adadelta""" +590 62 training_loop """owa""" +590 62 negative_sampler """basic""" +590 62 evaluator """rankbased""" +590 63 dataset """kinships""" +590 63 model """rotate""" +590 63 loss """softplus""" +590 63 regularizer """no""" +590 63 optimizer """adadelta""" +590 63 training_loop """owa""" +590 63 negative_sampler """basic""" +590 63 evaluator """rankbased""" +590 64 dataset """kinships""" +590 64 model """rotate""" +590 64 loss """softplus""" +590 64 regularizer """no""" +590 64 optimizer """adadelta""" +590 64 training_loop """owa""" +590 64 negative_sampler """basic""" +590 64 evaluator """rankbased""" +590 65 dataset """kinships""" +590 65 model """rotate""" +590 65 loss """softplus""" +590 65 regularizer """no""" +590 65 optimizer """adadelta""" +590 65 training_loop """owa""" +590 65 negative_sampler """basic""" +590 65 evaluator """rankbased""" +590 66 dataset """kinships""" +590 66 model """rotate""" +590 66 loss """softplus""" +590 66 regularizer """no""" +590 66 optimizer """adadelta""" +590 66 training_loop """owa""" +590 66 negative_sampler """basic""" +590 66 evaluator """rankbased""" +590 67 dataset """kinships""" +590 67 model """rotate""" +590 67 loss """softplus""" +590 67 regularizer """no""" +590 67 optimizer """adadelta""" +590 67 training_loop """owa""" +590 67 negative_sampler """basic""" +590 67 evaluator """rankbased""" +590 68 dataset """kinships""" +590 68 model """rotate""" +590 68 loss """softplus""" +590 68 regularizer """no""" +590 68 optimizer """adadelta""" +590 68 training_loop """owa""" +590 68 negative_sampler """basic""" +590 68 evaluator """rankbased""" +590 69 dataset """kinships""" +590 69 model """rotate""" +590 69 loss """softplus""" +590 69 regularizer """no""" +590 69 optimizer """adadelta""" +590 69 training_loop """owa""" +590 69 negative_sampler """basic""" +590 69 evaluator """rankbased""" +590 70 dataset """kinships""" +590 70 model """rotate""" +590 70 loss """softplus""" +590 70 regularizer """no""" +590 70 optimizer """adadelta""" +590 70 training_loop """owa""" +590 70 negative_sampler """basic""" +590 70 evaluator """rankbased""" +590 71 dataset """kinships""" +590 71 model """rotate""" +590 71 loss """softplus""" +590 71 regularizer """no""" +590 71 optimizer """adadelta""" +590 71 training_loop """owa""" +590 71 negative_sampler """basic""" +590 71 evaluator """rankbased""" +590 72 dataset """kinships""" +590 72 model """rotate""" +590 72 loss """softplus""" +590 72 regularizer """no""" +590 72 optimizer """adadelta""" +590 72 training_loop """owa""" +590 72 negative_sampler """basic""" +590 72 evaluator """rankbased""" +590 73 dataset """kinships""" +590 73 model """rotate""" +590 73 loss """softplus""" +590 73 regularizer """no""" +590 73 optimizer """adadelta""" +590 73 training_loop """owa""" +590 73 negative_sampler """basic""" +590 73 evaluator """rankbased""" +590 74 dataset """kinships""" +590 74 model """rotate""" +590 74 loss """softplus""" +590 74 regularizer """no""" +590 74 optimizer """adadelta""" +590 74 training_loop """owa""" +590 74 negative_sampler """basic""" +590 74 evaluator """rankbased""" +590 75 dataset """kinships""" +590 75 model """rotate""" +590 75 loss """softplus""" +590 75 regularizer """no""" +590 75 optimizer """adadelta""" +590 75 training_loop """owa""" +590 75 negative_sampler """basic""" +590 75 evaluator """rankbased""" +590 76 dataset """kinships""" +590 76 model """rotate""" +590 76 loss """softplus""" +590 76 regularizer """no""" +590 76 optimizer """adadelta""" +590 76 training_loop """owa""" +590 76 negative_sampler """basic""" +590 76 evaluator """rankbased""" +590 77 dataset """kinships""" +590 77 model """rotate""" +590 77 loss """softplus""" +590 77 regularizer """no""" +590 77 optimizer """adadelta""" +590 77 training_loop """owa""" +590 77 negative_sampler """basic""" +590 77 evaluator """rankbased""" +590 78 dataset """kinships""" +590 78 model """rotate""" +590 78 loss """softplus""" +590 78 regularizer """no""" +590 78 optimizer """adadelta""" +590 78 training_loop """owa""" +590 78 negative_sampler """basic""" +590 78 evaluator """rankbased""" +590 79 dataset """kinships""" +590 79 model """rotate""" +590 79 loss """softplus""" +590 79 regularizer """no""" +590 79 optimizer """adadelta""" +590 79 training_loop """owa""" +590 79 negative_sampler """basic""" +590 79 evaluator """rankbased""" +590 80 dataset """kinships""" +590 80 model """rotate""" +590 80 loss """softplus""" +590 80 regularizer """no""" +590 80 optimizer """adadelta""" +590 80 training_loop """owa""" +590 80 negative_sampler """basic""" +590 80 evaluator """rankbased""" +590 81 dataset """kinships""" +590 81 model """rotate""" +590 81 loss """softplus""" +590 81 regularizer """no""" +590 81 optimizer """adadelta""" +590 81 training_loop """owa""" +590 81 negative_sampler """basic""" +590 81 evaluator """rankbased""" +590 82 dataset """kinships""" +590 82 model """rotate""" +590 82 loss """softplus""" +590 82 regularizer """no""" +590 82 optimizer """adadelta""" +590 82 training_loop """owa""" +590 82 negative_sampler """basic""" +590 82 evaluator """rankbased""" +590 83 dataset """kinships""" +590 83 model """rotate""" +590 83 loss """softplus""" +590 83 regularizer """no""" +590 83 optimizer """adadelta""" +590 83 training_loop """owa""" +590 83 negative_sampler """basic""" +590 83 evaluator """rankbased""" +590 84 dataset """kinships""" +590 84 model """rotate""" +590 84 loss """softplus""" +590 84 regularizer """no""" +590 84 optimizer """adadelta""" +590 84 training_loop """owa""" +590 84 negative_sampler """basic""" +590 84 evaluator """rankbased""" +590 85 dataset """kinships""" +590 85 model """rotate""" +590 85 loss """softplus""" +590 85 regularizer """no""" +590 85 optimizer """adadelta""" +590 85 training_loop """owa""" +590 85 negative_sampler """basic""" +590 85 evaluator """rankbased""" +590 86 dataset """kinships""" +590 86 model """rotate""" +590 86 loss """softplus""" +590 86 regularizer """no""" +590 86 optimizer """adadelta""" +590 86 training_loop """owa""" +590 86 negative_sampler """basic""" +590 86 evaluator """rankbased""" +590 87 dataset """kinships""" +590 87 model """rotate""" +590 87 loss """softplus""" +590 87 regularizer """no""" +590 87 optimizer """adadelta""" +590 87 training_loop """owa""" +590 87 negative_sampler """basic""" +590 87 evaluator """rankbased""" +590 88 dataset """kinships""" +590 88 model """rotate""" +590 88 loss """softplus""" +590 88 regularizer """no""" +590 88 optimizer """adadelta""" +590 88 training_loop """owa""" +590 88 negative_sampler """basic""" +590 88 evaluator """rankbased""" +590 89 dataset """kinships""" +590 89 model """rotate""" +590 89 loss """softplus""" +590 89 regularizer """no""" +590 89 optimizer """adadelta""" +590 89 training_loop """owa""" +590 89 negative_sampler """basic""" +590 89 evaluator """rankbased""" +590 90 dataset """kinships""" +590 90 model """rotate""" +590 90 loss """softplus""" +590 90 regularizer """no""" +590 90 optimizer """adadelta""" +590 90 training_loop """owa""" +590 90 negative_sampler """basic""" +590 90 evaluator """rankbased""" +590 91 dataset """kinships""" +590 91 model """rotate""" +590 91 loss """softplus""" +590 91 regularizer """no""" +590 91 optimizer """adadelta""" +590 91 training_loop """owa""" +590 91 negative_sampler """basic""" +590 91 evaluator """rankbased""" +590 92 dataset """kinships""" +590 92 model """rotate""" +590 92 loss """softplus""" +590 92 regularizer """no""" +590 92 optimizer """adadelta""" +590 92 training_loop """owa""" +590 92 negative_sampler """basic""" +590 92 evaluator """rankbased""" +590 93 dataset """kinships""" +590 93 model """rotate""" +590 93 loss """softplus""" +590 93 regularizer """no""" +590 93 optimizer """adadelta""" +590 93 training_loop """owa""" +590 93 negative_sampler """basic""" +590 93 evaluator """rankbased""" +590 94 dataset """kinships""" +590 94 model """rotate""" +590 94 loss """softplus""" +590 94 regularizer """no""" +590 94 optimizer """adadelta""" +590 94 training_loop """owa""" +590 94 negative_sampler """basic""" +590 94 evaluator """rankbased""" +590 95 dataset """kinships""" +590 95 model """rotate""" +590 95 loss """softplus""" +590 95 regularizer """no""" +590 95 optimizer """adadelta""" +590 95 training_loop """owa""" +590 95 negative_sampler """basic""" +590 95 evaluator """rankbased""" +590 96 dataset """kinships""" +590 96 model """rotate""" +590 96 loss """softplus""" +590 96 regularizer """no""" +590 96 optimizer """adadelta""" +590 96 training_loop """owa""" +590 96 negative_sampler """basic""" +590 96 evaluator """rankbased""" +590 97 dataset """kinships""" +590 97 model """rotate""" +590 97 loss """softplus""" +590 97 regularizer """no""" +590 97 optimizer """adadelta""" +590 97 training_loop """owa""" +590 97 negative_sampler """basic""" +590 97 evaluator """rankbased""" +590 98 dataset """kinships""" +590 98 model """rotate""" +590 98 loss """softplus""" +590 98 regularizer """no""" +590 98 optimizer """adadelta""" +590 98 training_loop """owa""" +590 98 negative_sampler """basic""" +590 98 evaluator """rankbased""" +590 99 dataset """kinships""" +590 99 model """rotate""" +590 99 loss """softplus""" +590 99 regularizer """no""" +590 99 optimizer """adadelta""" +590 99 training_loop """owa""" +590 99 negative_sampler """basic""" +590 99 evaluator """rankbased""" +590 100 dataset """kinships""" +590 100 model """rotate""" +590 100 loss """softplus""" +590 100 regularizer """no""" +590 100 optimizer """adadelta""" +590 100 training_loop """owa""" +590 100 negative_sampler """basic""" +590 100 evaluator """rankbased""" +591 1 model.embedding_dim 2.0 +591 1 negative_sampler.num_negs_per_pos 67.0 +591 1 training.batch_size 1.0 +591 2 model.embedding_dim 0.0 +591 2 negative_sampler.num_negs_per_pos 82.0 +591 2 training.batch_size 0.0 +591 3 model.embedding_dim 2.0 +591 3 negative_sampler.num_negs_per_pos 61.0 +591 3 training.batch_size 2.0 +591 4 model.embedding_dim 1.0 +591 4 negative_sampler.num_negs_per_pos 33.0 +591 4 training.batch_size 0.0 +591 5 model.embedding_dim 0.0 +591 5 negative_sampler.num_negs_per_pos 48.0 +591 5 training.batch_size 1.0 +591 6 model.embedding_dim 1.0 +591 6 negative_sampler.num_negs_per_pos 33.0 +591 6 training.batch_size 0.0 +591 7 model.embedding_dim 2.0 +591 7 negative_sampler.num_negs_per_pos 25.0 +591 7 training.batch_size 0.0 +591 8 model.embedding_dim 0.0 +591 8 negative_sampler.num_negs_per_pos 42.0 +591 8 training.batch_size 1.0 +591 9 model.embedding_dim 2.0 +591 9 negative_sampler.num_negs_per_pos 25.0 +591 9 training.batch_size 1.0 +591 10 model.embedding_dim 2.0 +591 10 negative_sampler.num_negs_per_pos 14.0 +591 10 training.batch_size 1.0 +591 11 model.embedding_dim 1.0 +591 11 negative_sampler.num_negs_per_pos 26.0 +591 11 training.batch_size 0.0 +591 12 model.embedding_dim 2.0 +591 12 negative_sampler.num_negs_per_pos 53.0 +591 12 training.batch_size 0.0 +591 13 model.embedding_dim 2.0 +591 13 negative_sampler.num_negs_per_pos 67.0 +591 13 training.batch_size 2.0 +591 14 model.embedding_dim 1.0 +591 14 negative_sampler.num_negs_per_pos 94.0 +591 14 training.batch_size 2.0 +591 15 model.embedding_dim 0.0 +591 15 negative_sampler.num_negs_per_pos 14.0 +591 15 training.batch_size 1.0 +591 16 model.embedding_dim 1.0 +591 16 negative_sampler.num_negs_per_pos 75.0 +591 16 training.batch_size 2.0 +591 17 model.embedding_dim 0.0 +591 17 negative_sampler.num_negs_per_pos 30.0 +591 17 training.batch_size 1.0 +591 18 model.embedding_dim 0.0 +591 18 negative_sampler.num_negs_per_pos 23.0 +591 18 training.batch_size 0.0 +591 19 model.embedding_dim 1.0 +591 19 negative_sampler.num_negs_per_pos 42.0 +591 19 training.batch_size 1.0 +591 20 model.embedding_dim 2.0 +591 20 negative_sampler.num_negs_per_pos 39.0 +591 20 training.batch_size 1.0 +591 21 model.embedding_dim 2.0 +591 21 negative_sampler.num_negs_per_pos 25.0 +591 21 training.batch_size 0.0 +591 22 model.embedding_dim 2.0 +591 22 negative_sampler.num_negs_per_pos 61.0 +591 22 training.batch_size 0.0 +591 23 model.embedding_dim 0.0 +591 23 negative_sampler.num_negs_per_pos 26.0 +591 23 training.batch_size 1.0 +591 24 model.embedding_dim 0.0 +591 24 negative_sampler.num_negs_per_pos 88.0 +591 24 training.batch_size 0.0 +591 25 model.embedding_dim 2.0 +591 25 negative_sampler.num_negs_per_pos 72.0 +591 25 training.batch_size 1.0 +591 26 model.embedding_dim 1.0 +591 26 negative_sampler.num_negs_per_pos 69.0 +591 26 training.batch_size 0.0 +591 27 model.embedding_dim 0.0 +591 27 negative_sampler.num_negs_per_pos 89.0 +591 27 training.batch_size 2.0 +591 28 model.embedding_dim 2.0 +591 28 negative_sampler.num_negs_per_pos 99.0 +591 28 training.batch_size 2.0 +591 29 model.embedding_dim 2.0 +591 29 negative_sampler.num_negs_per_pos 78.0 +591 29 training.batch_size 2.0 +591 30 model.embedding_dim 0.0 +591 30 negative_sampler.num_negs_per_pos 53.0 +591 30 training.batch_size 0.0 +591 31 model.embedding_dim 2.0 +591 31 negative_sampler.num_negs_per_pos 23.0 +591 31 training.batch_size 0.0 +591 32 model.embedding_dim 0.0 +591 32 negative_sampler.num_negs_per_pos 45.0 +591 32 training.batch_size 1.0 +591 33 model.embedding_dim 2.0 +591 33 negative_sampler.num_negs_per_pos 61.0 +591 33 training.batch_size 0.0 +591 34 model.embedding_dim 0.0 +591 34 negative_sampler.num_negs_per_pos 61.0 +591 34 training.batch_size 2.0 +591 35 model.embedding_dim 1.0 +591 35 negative_sampler.num_negs_per_pos 30.0 +591 35 training.batch_size 2.0 +591 36 model.embedding_dim 1.0 +591 36 negative_sampler.num_negs_per_pos 60.0 +591 36 training.batch_size 0.0 +591 37 model.embedding_dim 2.0 +591 37 negative_sampler.num_negs_per_pos 63.0 +591 37 training.batch_size 2.0 +591 38 model.embedding_dim 2.0 +591 38 negative_sampler.num_negs_per_pos 62.0 +591 38 training.batch_size 1.0 +591 39 model.embedding_dim 0.0 +591 39 negative_sampler.num_negs_per_pos 74.0 +591 39 training.batch_size 1.0 +591 40 model.embedding_dim 2.0 +591 40 negative_sampler.num_negs_per_pos 83.0 +591 40 training.batch_size 1.0 +591 41 model.embedding_dim 0.0 +591 41 negative_sampler.num_negs_per_pos 88.0 +591 41 training.batch_size 0.0 +591 42 model.embedding_dim 0.0 +591 42 negative_sampler.num_negs_per_pos 47.0 +591 42 training.batch_size 1.0 +591 43 model.embedding_dim 0.0 +591 43 negative_sampler.num_negs_per_pos 68.0 +591 43 training.batch_size 2.0 +591 44 model.embedding_dim 2.0 +591 44 negative_sampler.num_negs_per_pos 54.0 +591 44 training.batch_size 2.0 +591 45 model.embedding_dim 1.0 +591 45 negative_sampler.num_negs_per_pos 0.0 +591 45 training.batch_size 2.0 +591 46 model.embedding_dim 0.0 +591 46 negative_sampler.num_negs_per_pos 35.0 +591 46 training.batch_size 1.0 +591 47 model.embedding_dim 1.0 +591 47 negative_sampler.num_negs_per_pos 64.0 +591 47 training.batch_size 2.0 +591 48 model.embedding_dim 0.0 +591 48 negative_sampler.num_negs_per_pos 39.0 +591 48 training.batch_size 2.0 +591 49 model.embedding_dim 2.0 +591 49 negative_sampler.num_negs_per_pos 60.0 +591 49 training.batch_size 1.0 +591 50 model.embedding_dim 1.0 +591 50 negative_sampler.num_negs_per_pos 22.0 +591 50 training.batch_size 2.0 +591 51 model.embedding_dim 0.0 +591 51 negative_sampler.num_negs_per_pos 58.0 +591 51 training.batch_size 2.0 +591 52 model.embedding_dim 0.0 +591 52 negative_sampler.num_negs_per_pos 30.0 +591 52 training.batch_size 0.0 +591 53 model.embedding_dim 0.0 +591 53 negative_sampler.num_negs_per_pos 24.0 +591 53 training.batch_size 0.0 +591 54 model.embedding_dim 2.0 +591 54 negative_sampler.num_negs_per_pos 89.0 +591 54 training.batch_size 2.0 +591 55 model.embedding_dim 1.0 +591 55 negative_sampler.num_negs_per_pos 73.0 +591 55 training.batch_size 1.0 +591 56 model.embedding_dim 1.0 +591 56 negative_sampler.num_negs_per_pos 58.0 +591 56 training.batch_size 2.0 +591 57 model.embedding_dim 2.0 +591 57 negative_sampler.num_negs_per_pos 71.0 +591 57 training.batch_size 1.0 +591 58 model.embedding_dim 1.0 +591 58 negative_sampler.num_negs_per_pos 72.0 +591 58 training.batch_size 2.0 +591 59 model.embedding_dim 0.0 +591 59 negative_sampler.num_negs_per_pos 28.0 +591 59 training.batch_size 0.0 +591 60 model.embedding_dim 1.0 +591 60 negative_sampler.num_negs_per_pos 70.0 +591 60 training.batch_size 2.0 +591 61 model.embedding_dim 0.0 +591 61 negative_sampler.num_negs_per_pos 88.0 +591 61 training.batch_size 1.0 +591 62 model.embedding_dim 2.0 +591 62 negative_sampler.num_negs_per_pos 81.0 +591 62 training.batch_size 0.0 +591 63 model.embedding_dim 2.0 +591 63 negative_sampler.num_negs_per_pos 11.0 +591 63 training.batch_size 2.0 +591 64 model.embedding_dim 2.0 +591 64 negative_sampler.num_negs_per_pos 63.0 +591 64 training.batch_size 1.0 +591 65 model.embedding_dim 1.0 +591 65 negative_sampler.num_negs_per_pos 27.0 +591 65 training.batch_size 0.0 +591 66 model.embedding_dim 0.0 +591 66 negative_sampler.num_negs_per_pos 58.0 +591 66 training.batch_size 1.0 +591 67 model.embedding_dim 1.0 +591 67 negative_sampler.num_negs_per_pos 70.0 +591 67 training.batch_size 0.0 +591 68 model.embedding_dim 1.0 +591 68 negative_sampler.num_negs_per_pos 4.0 +591 68 training.batch_size 2.0 +591 69 model.embedding_dim 2.0 +591 69 negative_sampler.num_negs_per_pos 70.0 +591 69 training.batch_size 0.0 +591 70 model.embedding_dim 0.0 +591 70 negative_sampler.num_negs_per_pos 3.0 +591 70 training.batch_size 1.0 +591 71 model.embedding_dim 1.0 +591 71 negative_sampler.num_negs_per_pos 79.0 +591 71 training.batch_size 2.0 +591 72 model.embedding_dim 1.0 +591 72 negative_sampler.num_negs_per_pos 15.0 +591 72 training.batch_size 0.0 +591 73 model.embedding_dim 0.0 +591 73 negative_sampler.num_negs_per_pos 19.0 +591 73 training.batch_size 0.0 +591 74 model.embedding_dim 1.0 +591 74 negative_sampler.num_negs_per_pos 55.0 +591 74 training.batch_size 2.0 +591 75 model.embedding_dim 2.0 +591 75 negative_sampler.num_negs_per_pos 44.0 +591 75 training.batch_size 2.0 +591 76 model.embedding_dim 2.0 +591 76 negative_sampler.num_negs_per_pos 57.0 +591 76 training.batch_size 2.0 +591 77 model.embedding_dim 0.0 +591 77 negative_sampler.num_negs_per_pos 9.0 +591 77 training.batch_size 1.0 +591 78 model.embedding_dim 1.0 +591 78 negative_sampler.num_negs_per_pos 40.0 +591 78 training.batch_size 2.0 +591 79 model.embedding_dim 1.0 +591 79 negative_sampler.num_negs_per_pos 64.0 +591 79 training.batch_size 1.0 +591 80 model.embedding_dim 0.0 +591 80 negative_sampler.num_negs_per_pos 74.0 +591 80 training.batch_size 2.0 +591 81 model.embedding_dim 1.0 +591 81 negative_sampler.num_negs_per_pos 13.0 +591 81 training.batch_size 0.0 +591 82 model.embedding_dim 0.0 +591 82 negative_sampler.num_negs_per_pos 22.0 +591 82 training.batch_size 1.0 +591 83 model.embedding_dim 0.0 +591 83 negative_sampler.num_negs_per_pos 96.0 +591 83 training.batch_size 0.0 +591 84 model.embedding_dim 0.0 +591 84 negative_sampler.num_negs_per_pos 14.0 +591 84 training.batch_size 0.0 +591 85 model.embedding_dim 2.0 +591 85 negative_sampler.num_negs_per_pos 92.0 +591 85 training.batch_size 1.0 +591 86 model.embedding_dim 1.0 +591 86 negative_sampler.num_negs_per_pos 50.0 +591 86 training.batch_size 2.0 +591 87 model.embedding_dim 1.0 +591 87 negative_sampler.num_negs_per_pos 93.0 +591 87 training.batch_size 0.0 +591 88 model.embedding_dim 1.0 +591 88 negative_sampler.num_negs_per_pos 6.0 +591 88 training.batch_size 0.0 +591 89 model.embedding_dim 0.0 +591 89 negative_sampler.num_negs_per_pos 21.0 +591 89 training.batch_size 1.0 +591 90 model.embedding_dim 0.0 +591 90 negative_sampler.num_negs_per_pos 51.0 +591 90 training.batch_size 0.0 +591 91 model.embedding_dim 1.0 +591 91 negative_sampler.num_negs_per_pos 51.0 +591 91 training.batch_size 1.0 +591 92 model.embedding_dim 0.0 +591 92 negative_sampler.num_negs_per_pos 52.0 +591 92 training.batch_size 1.0 +591 93 model.embedding_dim 1.0 +591 93 negative_sampler.num_negs_per_pos 35.0 +591 93 training.batch_size 1.0 +591 94 model.embedding_dim 2.0 +591 94 negative_sampler.num_negs_per_pos 83.0 +591 94 training.batch_size 2.0 +591 95 model.embedding_dim 1.0 +591 95 negative_sampler.num_negs_per_pos 51.0 +591 95 training.batch_size 2.0 +591 96 model.embedding_dim 2.0 +591 96 negative_sampler.num_negs_per_pos 92.0 +591 96 training.batch_size 0.0 +591 97 model.embedding_dim 2.0 +591 97 negative_sampler.num_negs_per_pos 57.0 +591 97 training.batch_size 2.0 +591 98 model.embedding_dim 2.0 +591 98 negative_sampler.num_negs_per_pos 14.0 +591 98 training.batch_size 1.0 +591 99 model.embedding_dim 2.0 +591 99 negative_sampler.num_negs_per_pos 57.0 +591 99 training.batch_size 2.0 +591 100 model.embedding_dim 0.0 +591 100 negative_sampler.num_negs_per_pos 27.0 +591 100 training.batch_size 0.0 +591 1 dataset """kinships""" +591 1 model """rotate""" +591 1 loss """bceaftersigmoid""" +591 1 regularizer """no""" +591 1 optimizer """adadelta""" +591 1 training_loop """owa""" +591 1 negative_sampler """basic""" +591 1 evaluator """rankbased""" +591 2 dataset """kinships""" +591 2 model """rotate""" +591 2 loss """bceaftersigmoid""" +591 2 regularizer """no""" +591 2 optimizer """adadelta""" +591 2 training_loop """owa""" +591 2 negative_sampler """basic""" +591 2 evaluator """rankbased""" +591 3 dataset """kinships""" +591 3 model """rotate""" +591 3 loss """bceaftersigmoid""" +591 3 regularizer """no""" +591 3 optimizer """adadelta""" +591 3 training_loop """owa""" +591 3 negative_sampler """basic""" +591 3 evaluator """rankbased""" +591 4 dataset """kinships""" +591 4 model """rotate""" +591 4 loss """bceaftersigmoid""" +591 4 regularizer """no""" +591 4 optimizer """adadelta""" +591 4 training_loop """owa""" +591 4 negative_sampler """basic""" +591 4 evaluator """rankbased""" +591 5 dataset """kinships""" +591 5 model """rotate""" +591 5 loss """bceaftersigmoid""" +591 5 regularizer """no""" +591 5 optimizer """adadelta""" +591 5 training_loop """owa""" +591 5 negative_sampler """basic""" +591 5 evaluator """rankbased""" +591 6 dataset """kinships""" +591 6 model """rotate""" +591 6 loss """bceaftersigmoid""" +591 6 regularizer """no""" +591 6 optimizer """adadelta""" +591 6 training_loop """owa""" +591 6 negative_sampler """basic""" +591 6 evaluator """rankbased""" +591 7 dataset """kinships""" +591 7 model """rotate""" +591 7 loss """bceaftersigmoid""" +591 7 regularizer """no""" +591 7 optimizer """adadelta""" +591 7 training_loop """owa""" +591 7 negative_sampler """basic""" +591 7 evaluator """rankbased""" +591 8 dataset """kinships""" +591 8 model """rotate""" +591 8 loss """bceaftersigmoid""" +591 8 regularizer """no""" +591 8 optimizer """adadelta""" +591 8 training_loop """owa""" +591 8 negative_sampler """basic""" +591 8 evaluator """rankbased""" +591 9 dataset """kinships""" +591 9 model """rotate""" +591 9 loss """bceaftersigmoid""" +591 9 regularizer """no""" +591 9 optimizer """adadelta""" +591 9 training_loop """owa""" +591 9 negative_sampler """basic""" +591 9 evaluator """rankbased""" +591 10 dataset """kinships""" +591 10 model """rotate""" +591 10 loss """bceaftersigmoid""" +591 10 regularizer """no""" +591 10 optimizer """adadelta""" +591 10 training_loop """owa""" +591 10 negative_sampler """basic""" +591 10 evaluator """rankbased""" +591 11 dataset """kinships""" +591 11 model """rotate""" +591 11 loss """bceaftersigmoid""" +591 11 regularizer """no""" +591 11 optimizer """adadelta""" +591 11 training_loop """owa""" +591 11 negative_sampler """basic""" +591 11 evaluator """rankbased""" +591 12 dataset """kinships""" +591 12 model """rotate""" +591 12 loss """bceaftersigmoid""" +591 12 regularizer """no""" +591 12 optimizer """adadelta""" +591 12 training_loop """owa""" +591 12 negative_sampler """basic""" +591 12 evaluator """rankbased""" +591 13 dataset """kinships""" +591 13 model """rotate""" +591 13 loss """bceaftersigmoid""" +591 13 regularizer """no""" +591 13 optimizer """adadelta""" +591 13 training_loop """owa""" +591 13 negative_sampler """basic""" +591 13 evaluator """rankbased""" +591 14 dataset """kinships""" +591 14 model """rotate""" +591 14 loss """bceaftersigmoid""" +591 14 regularizer """no""" +591 14 optimizer """adadelta""" +591 14 training_loop """owa""" +591 14 negative_sampler """basic""" +591 14 evaluator """rankbased""" +591 15 dataset """kinships""" +591 15 model """rotate""" +591 15 loss """bceaftersigmoid""" +591 15 regularizer """no""" +591 15 optimizer """adadelta""" +591 15 training_loop """owa""" +591 15 negative_sampler """basic""" +591 15 evaluator """rankbased""" +591 16 dataset """kinships""" +591 16 model """rotate""" +591 16 loss """bceaftersigmoid""" +591 16 regularizer """no""" +591 16 optimizer """adadelta""" +591 16 training_loop """owa""" +591 16 negative_sampler """basic""" +591 16 evaluator """rankbased""" +591 17 dataset """kinships""" +591 17 model """rotate""" +591 17 loss """bceaftersigmoid""" +591 17 regularizer """no""" +591 17 optimizer """adadelta""" +591 17 training_loop """owa""" +591 17 negative_sampler """basic""" +591 17 evaluator """rankbased""" +591 18 dataset """kinships""" +591 18 model """rotate""" +591 18 loss """bceaftersigmoid""" +591 18 regularizer """no""" +591 18 optimizer """adadelta""" +591 18 training_loop """owa""" +591 18 negative_sampler """basic""" +591 18 evaluator """rankbased""" +591 19 dataset """kinships""" +591 19 model """rotate""" +591 19 loss """bceaftersigmoid""" +591 19 regularizer """no""" +591 19 optimizer """adadelta""" +591 19 training_loop """owa""" +591 19 negative_sampler """basic""" +591 19 evaluator """rankbased""" +591 20 dataset """kinships""" +591 20 model """rotate""" +591 20 loss """bceaftersigmoid""" +591 20 regularizer """no""" +591 20 optimizer """adadelta""" +591 20 training_loop """owa""" +591 20 negative_sampler """basic""" +591 20 evaluator """rankbased""" +591 21 dataset """kinships""" +591 21 model """rotate""" +591 21 loss """bceaftersigmoid""" +591 21 regularizer """no""" +591 21 optimizer """adadelta""" +591 21 training_loop """owa""" +591 21 negative_sampler """basic""" +591 21 evaluator """rankbased""" +591 22 dataset """kinships""" +591 22 model """rotate""" +591 22 loss """bceaftersigmoid""" +591 22 regularizer """no""" +591 22 optimizer """adadelta""" +591 22 training_loop """owa""" +591 22 negative_sampler """basic""" +591 22 evaluator """rankbased""" +591 23 dataset """kinships""" +591 23 model """rotate""" +591 23 loss """bceaftersigmoid""" +591 23 regularizer """no""" +591 23 optimizer """adadelta""" +591 23 training_loop """owa""" +591 23 negative_sampler """basic""" +591 23 evaluator """rankbased""" +591 24 dataset """kinships""" +591 24 model """rotate""" +591 24 loss """bceaftersigmoid""" +591 24 regularizer """no""" +591 24 optimizer """adadelta""" +591 24 training_loop """owa""" +591 24 negative_sampler """basic""" +591 24 evaluator """rankbased""" +591 25 dataset """kinships""" +591 25 model """rotate""" +591 25 loss """bceaftersigmoid""" +591 25 regularizer """no""" +591 25 optimizer """adadelta""" +591 25 training_loop """owa""" +591 25 negative_sampler """basic""" +591 25 evaluator """rankbased""" +591 26 dataset """kinships""" +591 26 model """rotate""" +591 26 loss """bceaftersigmoid""" +591 26 regularizer """no""" +591 26 optimizer """adadelta""" +591 26 training_loop """owa""" +591 26 negative_sampler """basic""" +591 26 evaluator """rankbased""" +591 27 dataset """kinships""" +591 27 model """rotate""" +591 27 loss """bceaftersigmoid""" +591 27 regularizer """no""" +591 27 optimizer """adadelta""" +591 27 training_loop """owa""" +591 27 negative_sampler """basic""" +591 27 evaluator """rankbased""" +591 28 dataset """kinships""" +591 28 model """rotate""" +591 28 loss """bceaftersigmoid""" +591 28 regularizer """no""" +591 28 optimizer """adadelta""" +591 28 training_loop """owa""" +591 28 negative_sampler """basic""" +591 28 evaluator """rankbased""" +591 29 dataset """kinships""" +591 29 model """rotate""" +591 29 loss """bceaftersigmoid""" +591 29 regularizer """no""" +591 29 optimizer """adadelta""" +591 29 training_loop """owa""" +591 29 negative_sampler """basic""" +591 29 evaluator """rankbased""" +591 30 dataset """kinships""" +591 30 model """rotate""" +591 30 loss """bceaftersigmoid""" +591 30 regularizer """no""" +591 30 optimizer """adadelta""" +591 30 training_loop """owa""" +591 30 negative_sampler """basic""" +591 30 evaluator """rankbased""" +591 31 dataset """kinships""" +591 31 model """rotate""" +591 31 loss """bceaftersigmoid""" +591 31 regularizer """no""" +591 31 optimizer """adadelta""" +591 31 training_loop """owa""" +591 31 negative_sampler """basic""" +591 31 evaluator """rankbased""" +591 32 dataset """kinships""" +591 32 model """rotate""" +591 32 loss """bceaftersigmoid""" +591 32 regularizer """no""" +591 32 optimizer """adadelta""" +591 32 training_loop """owa""" +591 32 negative_sampler """basic""" +591 32 evaluator """rankbased""" +591 33 dataset """kinships""" +591 33 model """rotate""" +591 33 loss """bceaftersigmoid""" +591 33 regularizer """no""" +591 33 optimizer """adadelta""" +591 33 training_loop """owa""" +591 33 negative_sampler """basic""" +591 33 evaluator """rankbased""" +591 34 dataset """kinships""" +591 34 model """rotate""" +591 34 loss """bceaftersigmoid""" +591 34 regularizer """no""" +591 34 optimizer """adadelta""" +591 34 training_loop """owa""" +591 34 negative_sampler """basic""" +591 34 evaluator """rankbased""" +591 35 dataset """kinships""" +591 35 model """rotate""" +591 35 loss """bceaftersigmoid""" +591 35 regularizer """no""" +591 35 optimizer """adadelta""" +591 35 training_loop """owa""" +591 35 negative_sampler """basic""" +591 35 evaluator """rankbased""" +591 36 dataset """kinships""" +591 36 model """rotate""" +591 36 loss """bceaftersigmoid""" +591 36 regularizer """no""" +591 36 optimizer """adadelta""" +591 36 training_loop """owa""" +591 36 negative_sampler """basic""" +591 36 evaluator """rankbased""" +591 37 dataset """kinships""" +591 37 model """rotate""" +591 37 loss """bceaftersigmoid""" +591 37 regularizer """no""" +591 37 optimizer """adadelta""" +591 37 training_loop """owa""" +591 37 negative_sampler """basic""" +591 37 evaluator """rankbased""" +591 38 dataset """kinships""" +591 38 model """rotate""" +591 38 loss """bceaftersigmoid""" +591 38 regularizer """no""" +591 38 optimizer """adadelta""" +591 38 training_loop """owa""" +591 38 negative_sampler """basic""" +591 38 evaluator """rankbased""" +591 39 dataset """kinships""" +591 39 model """rotate""" +591 39 loss """bceaftersigmoid""" +591 39 regularizer """no""" +591 39 optimizer """adadelta""" +591 39 training_loop """owa""" +591 39 negative_sampler """basic""" +591 39 evaluator """rankbased""" +591 40 dataset """kinships""" +591 40 model """rotate""" +591 40 loss """bceaftersigmoid""" +591 40 regularizer """no""" +591 40 optimizer """adadelta""" +591 40 training_loop """owa""" +591 40 negative_sampler """basic""" +591 40 evaluator """rankbased""" +591 41 dataset """kinships""" +591 41 model """rotate""" +591 41 loss """bceaftersigmoid""" +591 41 regularizer """no""" +591 41 optimizer """adadelta""" +591 41 training_loop """owa""" +591 41 negative_sampler """basic""" +591 41 evaluator """rankbased""" +591 42 dataset """kinships""" +591 42 model """rotate""" +591 42 loss """bceaftersigmoid""" +591 42 regularizer """no""" +591 42 optimizer """adadelta""" +591 42 training_loop """owa""" +591 42 negative_sampler """basic""" +591 42 evaluator """rankbased""" +591 43 dataset """kinships""" +591 43 model """rotate""" +591 43 loss """bceaftersigmoid""" +591 43 regularizer """no""" +591 43 optimizer """adadelta""" +591 43 training_loop """owa""" +591 43 negative_sampler """basic""" +591 43 evaluator """rankbased""" +591 44 dataset """kinships""" +591 44 model """rotate""" +591 44 loss """bceaftersigmoid""" +591 44 regularizer """no""" +591 44 optimizer """adadelta""" +591 44 training_loop """owa""" +591 44 negative_sampler """basic""" +591 44 evaluator """rankbased""" +591 45 dataset """kinships""" +591 45 model """rotate""" +591 45 loss """bceaftersigmoid""" +591 45 regularizer """no""" +591 45 optimizer """adadelta""" +591 45 training_loop """owa""" +591 45 negative_sampler """basic""" +591 45 evaluator """rankbased""" +591 46 dataset """kinships""" +591 46 model """rotate""" +591 46 loss """bceaftersigmoid""" +591 46 regularizer """no""" +591 46 optimizer """adadelta""" +591 46 training_loop """owa""" +591 46 negative_sampler """basic""" +591 46 evaluator """rankbased""" +591 47 dataset """kinships""" +591 47 model """rotate""" +591 47 loss """bceaftersigmoid""" +591 47 regularizer """no""" +591 47 optimizer """adadelta""" +591 47 training_loop """owa""" +591 47 negative_sampler """basic""" +591 47 evaluator """rankbased""" +591 48 dataset """kinships""" +591 48 model """rotate""" +591 48 loss """bceaftersigmoid""" +591 48 regularizer """no""" +591 48 optimizer """adadelta""" +591 48 training_loop """owa""" +591 48 negative_sampler """basic""" +591 48 evaluator """rankbased""" +591 49 dataset """kinships""" +591 49 model """rotate""" +591 49 loss """bceaftersigmoid""" +591 49 regularizer """no""" +591 49 optimizer """adadelta""" +591 49 training_loop """owa""" +591 49 negative_sampler """basic""" +591 49 evaluator """rankbased""" +591 50 dataset """kinships""" +591 50 model """rotate""" +591 50 loss """bceaftersigmoid""" +591 50 regularizer """no""" +591 50 optimizer """adadelta""" +591 50 training_loop """owa""" +591 50 negative_sampler """basic""" +591 50 evaluator """rankbased""" +591 51 dataset """kinships""" +591 51 model """rotate""" +591 51 loss """bceaftersigmoid""" +591 51 regularizer """no""" +591 51 optimizer """adadelta""" +591 51 training_loop """owa""" +591 51 negative_sampler """basic""" +591 51 evaluator """rankbased""" +591 52 dataset """kinships""" +591 52 model """rotate""" +591 52 loss """bceaftersigmoid""" +591 52 regularizer """no""" +591 52 optimizer """adadelta""" +591 52 training_loop """owa""" +591 52 negative_sampler """basic""" +591 52 evaluator """rankbased""" +591 53 dataset """kinships""" +591 53 model """rotate""" +591 53 loss """bceaftersigmoid""" +591 53 regularizer """no""" +591 53 optimizer """adadelta""" +591 53 training_loop """owa""" +591 53 negative_sampler """basic""" +591 53 evaluator """rankbased""" +591 54 dataset """kinships""" +591 54 model """rotate""" +591 54 loss """bceaftersigmoid""" +591 54 regularizer """no""" +591 54 optimizer """adadelta""" +591 54 training_loop """owa""" +591 54 negative_sampler """basic""" +591 54 evaluator """rankbased""" +591 55 dataset """kinships""" +591 55 model """rotate""" +591 55 loss """bceaftersigmoid""" +591 55 regularizer """no""" +591 55 optimizer """adadelta""" +591 55 training_loop """owa""" +591 55 negative_sampler """basic""" +591 55 evaluator """rankbased""" +591 56 dataset """kinships""" +591 56 model """rotate""" +591 56 loss """bceaftersigmoid""" +591 56 regularizer """no""" +591 56 optimizer """adadelta""" +591 56 training_loop """owa""" +591 56 negative_sampler """basic""" +591 56 evaluator """rankbased""" +591 57 dataset """kinships""" +591 57 model """rotate""" +591 57 loss """bceaftersigmoid""" +591 57 regularizer """no""" +591 57 optimizer """adadelta""" +591 57 training_loop """owa""" +591 57 negative_sampler """basic""" +591 57 evaluator """rankbased""" +591 58 dataset """kinships""" +591 58 model """rotate""" +591 58 loss """bceaftersigmoid""" +591 58 regularizer """no""" +591 58 optimizer """adadelta""" +591 58 training_loop """owa""" +591 58 negative_sampler """basic""" +591 58 evaluator """rankbased""" +591 59 dataset """kinships""" +591 59 model """rotate""" +591 59 loss """bceaftersigmoid""" +591 59 regularizer """no""" +591 59 optimizer """adadelta""" +591 59 training_loop """owa""" +591 59 negative_sampler """basic""" +591 59 evaluator """rankbased""" +591 60 dataset """kinships""" +591 60 model """rotate""" +591 60 loss """bceaftersigmoid""" +591 60 regularizer """no""" +591 60 optimizer """adadelta""" +591 60 training_loop """owa""" +591 60 negative_sampler """basic""" +591 60 evaluator """rankbased""" +591 61 dataset """kinships""" +591 61 model """rotate""" +591 61 loss """bceaftersigmoid""" +591 61 regularizer """no""" +591 61 optimizer """adadelta""" +591 61 training_loop """owa""" +591 61 negative_sampler """basic""" +591 61 evaluator """rankbased""" +591 62 dataset """kinships""" +591 62 model """rotate""" +591 62 loss """bceaftersigmoid""" +591 62 regularizer """no""" +591 62 optimizer """adadelta""" +591 62 training_loop """owa""" +591 62 negative_sampler """basic""" +591 62 evaluator """rankbased""" +591 63 dataset """kinships""" +591 63 model """rotate""" +591 63 loss """bceaftersigmoid""" +591 63 regularizer """no""" +591 63 optimizer """adadelta""" +591 63 training_loop """owa""" +591 63 negative_sampler """basic""" +591 63 evaluator """rankbased""" +591 64 dataset """kinships""" +591 64 model """rotate""" +591 64 loss """bceaftersigmoid""" +591 64 regularizer """no""" +591 64 optimizer """adadelta""" +591 64 training_loop """owa""" +591 64 negative_sampler """basic""" +591 64 evaluator """rankbased""" +591 65 dataset """kinships""" +591 65 model """rotate""" +591 65 loss """bceaftersigmoid""" +591 65 regularizer """no""" +591 65 optimizer """adadelta""" +591 65 training_loop """owa""" +591 65 negative_sampler """basic""" +591 65 evaluator """rankbased""" +591 66 dataset """kinships""" +591 66 model """rotate""" +591 66 loss """bceaftersigmoid""" +591 66 regularizer """no""" +591 66 optimizer """adadelta""" +591 66 training_loop """owa""" +591 66 negative_sampler """basic""" +591 66 evaluator """rankbased""" +591 67 dataset """kinships""" +591 67 model """rotate""" +591 67 loss """bceaftersigmoid""" +591 67 regularizer """no""" +591 67 optimizer """adadelta""" +591 67 training_loop """owa""" +591 67 negative_sampler """basic""" +591 67 evaluator """rankbased""" +591 68 dataset """kinships""" +591 68 model """rotate""" +591 68 loss """bceaftersigmoid""" +591 68 regularizer """no""" +591 68 optimizer """adadelta""" +591 68 training_loop """owa""" +591 68 negative_sampler """basic""" +591 68 evaluator """rankbased""" +591 69 dataset """kinships""" +591 69 model """rotate""" +591 69 loss """bceaftersigmoid""" +591 69 regularizer """no""" +591 69 optimizer """adadelta""" +591 69 training_loop """owa""" +591 69 negative_sampler """basic""" +591 69 evaluator """rankbased""" +591 70 dataset """kinships""" +591 70 model """rotate""" +591 70 loss """bceaftersigmoid""" +591 70 regularizer """no""" +591 70 optimizer """adadelta""" +591 70 training_loop """owa""" +591 70 negative_sampler """basic""" +591 70 evaluator """rankbased""" +591 71 dataset """kinships""" +591 71 model """rotate""" +591 71 loss """bceaftersigmoid""" +591 71 regularizer """no""" +591 71 optimizer """adadelta""" +591 71 training_loop """owa""" +591 71 negative_sampler """basic""" +591 71 evaluator """rankbased""" +591 72 dataset """kinships""" +591 72 model """rotate""" +591 72 loss """bceaftersigmoid""" +591 72 regularizer """no""" +591 72 optimizer """adadelta""" +591 72 training_loop """owa""" +591 72 negative_sampler """basic""" +591 72 evaluator """rankbased""" +591 73 dataset """kinships""" +591 73 model """rotate""" +591 73 loss """bceaftersigmoid""" +591 73 regularizer """no""" +591 73 optimizer """adadelta""" +591 73 training_loop """owa""" +591 73 negative_sampler """basic""" +591 73 evaluator """rankbased""" +591 74 dataset """kinships""" +591 74 model """rotate""" +591 74 loss """bceaftersigmoid""" +591 74 regularizer """no""" +591 74 optimizer """adadelta""" +591 74 training_loop """owa""" +591 74 negative_sampler """basic""" +591 74 evaluator """rankbased""" +591 75 dataset """kinships""" +591 75 model """rotate""" +591 75 loss """bceaftersigmoid""" +591 75 regularizer """no""" +591 75 optimizer """adadelta""" +591 75 training_loop """owa""" +591 75 negative_sampler """basic""" +591 75 evaluator """rankbased""" +591 76 dataset """kinships""" +591 76 model """rotate""" +591 76 loss """bceaftersigmoid""" +591 76 regularizer """no""" +591 76 optimizer """adadelta""" +591 76 training_loop """owa""" +591 76 negative_sampler """basic""" +591 76 evaluator """rankbased""" +591 77 dataset """kinships""" +591 77 model """rotate""" +591 77 loss """bceaftersigmoid""" +591 77 regularizer """no""" +591 77 optimizer """adadelta""" +591 77 training_loop """owa""" +591 77 negative_sampler """basic""" +591 77 evaluator """rankbased""" +591 78 dataset """kinships""" +591 78 model """rotate""" +591 78 loss """bceaftersigmoid""" +591 78 regularizer """no""" +591 78 optimizer """adadelta""" +591 78 training_loop """owa""" +591 78 negative_sampler """basic""" +591 78 evaluator """rankbased""" +591 79 dataset """kinships""" +591 79 model """rotate""" +591 79 loss """bceaftersigmoid""" +591 79 regularizer """no""" +591 79 optimizer """adadelta""" +591 79 training_loop """owa""" +591 79 negative_sampler """basic""" +591 79 evaluator """rankbased""" +591 80 dataset """kinships""" +591 80 model """rotate""" +591 80 loss """bceaftersigmoid""" +591 80 regularizer """no""" +591 80 optimizer """adadelta""" +591 80 training_loop """owa""" +591 80 negative_sampler """basic""" +591 80 evaluator """rankbased""" +591 81 dataset """kinships""" +591 81 model """rotate""" +591 81 loss """bceaftersigmoid""" +591 81 regularizer """no""" +591 81 optimizer """adadelta""" +591 81 training_loop """owa""" +591 81 negative_sampler """basic""" +591 81 evaluator """rankbased""" +591 82 dataset """kinships""" +591 82 model """rotate""" +591 82 loss """bceaftersigmoid""" +591 82 regularizer """no""" +591 82 optimizer """adadelta""" +591 82 training_loop """owa""" +591 82 negative_sampler """basic""" +591 82 evaluator """rankbased""" +591 83 dataset """kinships""" +591 83 model """rotate""" +591 83 loss """bceaftersigmoid""" +591 83 regularizer """no""" +591 83 optimizer """adadelta""" +591 83 training_loop """owa""" +591 83 negative_sampler """basic""" +591 83 evaluator """rankbased""" +591 84 dataset """kinships""" +591 84 model """rotate""" +591 84 loss """bceaftersigmoid""" +591 84 regularizer """no""" +591 84 optimizer """adadelta""" +591 84 training_loop """owa""" +591 84 negative_sampler """basic""" +591 84 evaluator """rankbased""" +591 85 dataset """kinships""" +591 85 model """rotate""" +591 85 loss """bceaftersigmoid""" +591 85 regularizer """no""" +591 85 optimizer """adadelta""" +591 85 training_loop """owa""" +591 85 negative_sampler """basic""" +591 85 evaluator """rankbased""" +591 86 dataset """kinships""" +591 86 model """rotate""" +591 86 loss """bceaftersigmoid""" +591 86 regularizer """no""" +591 86 optimizer """adadelta""" +591 86 training_loop """owa""" +591 86 negative_sampler """basic""" +591 86 evaluator """rankbased""" +591 87 dataset """kinships""" +591 87 model """rotate""" +591 87 loss """bceaftersigmoid""" +591 87 regularizer """no""" +591 87 optimizer """adadelta""" +591 87 training_loop """owa""" +591 87 negative_sampler """basic""" +591 87 evaluator """rankbased""" +591 88 dataset """kinships""" +591 88 model """rotate""" +591 88 loss """bceaftersigmoid""" +591 88 regularizer """no""" +591 88 optimizer """adadelta""" +591 88 training_loop """owa""" +591 88 negative_sampler """basic""" +591 88 evaluator """rankbased""" +591 89 dataset """kinships""" +591 89 model """rotate""" +591 89 loss """bceaftersigmoid""" +591 89 regularizer """no""" +591 89 optimizer """adadelta""" +591 89 training_loop """owa""" +591 89 negative_sampler """basic""" +591 89 evaluator """rankbased""" +591 90 dataset """kinships""" +591 90 model """rotate""" +591 90 loss """bceaftersigmoid""" +591 90 regularizer """no""" +591 90 optimizer """adadelta""" +591 90 training_loop """owa""" +591 90 negative_sampler """basic""" +591 90 evaluator """rankbased""" +591 91 dataset """kinships""" +591 91 model """rotate""" +591 91 loss """bceaftersigmoid""" +591 91 regularizer """no""" +591 91 optimizer """adadelta""" +591 91 training_loop """owa""" +591 91 negative_sampler """basic""" +591 91 evaluator """rankbased""" +591 92 dataset """kinships""" +591 92 model """rotate""" +591 92 loss """bceaftersigmoid""" +591 92 regularizer """no""" +591 92 optimizer """adadelta""" +591 92 training_loop """owa""" +591 92 negative_sampler """basic""" +591 92 evaluator """rankbased""" +591 93 dataset """kinships""" +591 93 model """rotate""" +591 93 loss """bceaftersigmoid""" +591 93 regularizer """no""" +591 93 optimizer """adadelta""" +591 93 training_loop """owa""" +591 93 negative_sampler """basic""" +591 93 evaluator """rankbased""" +591 94 dataset """kinships""" +591 94 model """rotate""" +591 94 loss """bceaftersigmoid""" +591 94 regularizer """no""" +591 94 optimizer """adadelta""" +591 94 training_loop """owa""" +591 94 negative_sampler """basic""" +591 94 evaluator """rankbased""" +591 95 dataset """kinships""" +591 95 model """rotate""" +591 95 loss """bceaftersigmoid""" +591 95 regularizer """no""" +591 95 optimizer """adadelta""" +591 95 training_loop """owa""" +591 95 negative_sampler """basic""" +591 95 evaluator """rankbased""" +591 96 dataset """kinships""" +591 96 model """rotate""" +591 96 loss """bceaftersigmoid""" +591 96 regularizer """no""" +591 96 optimizer """adadelta""" +591 96 training_loop """owa""" +591 96 negative_sampler """basic""" +591 96 evaluator """rankbased""" +591 97 dataset """kinships""" +591 97 model """rotate""" +591 97 loss """bceaftersigmoid""" +591 97 regularizer """no""" +591 97 optimizer """adadelta""" +591 97 training_loop """owa""" +591 97 negative_sampler """basic""" +591 97 evaluator """rankbased""" +591 98 dataset """kinships""" +591 98 model """rotate""" +591 98 loss """bceaftersigmoid""" +591 98 regularizer """no""" +591 98 optimizer """adadelta""" +591 98 training_loop """owa""" +591 98 negative_sampler """basic""" +591 98 evaluator """rankbased""" +591 99 dataset """kinships""" +591 99 model """rotate""" +591 99 loss """bceaftersigmoid""" +591 99 regularizer """no""" +591 99 optimizer """adadelta""" +591 99 training_loop """owa""" +591 99 negative_sampler """basic""" +591 99 evaluator """rankbased""" +591 100 dataset """kinships""" +591 100 model """rotate""" +591 100 loss """bceaftersigmoid""" +591 100 regularizer """no""" +591 100 optimizer """adadelta""" +591 100 training_loop """owa""" +591 100 negative_sampler """basic""" +591 100 evaluator """rankbased""" +592 1 model.embedding_dim 0.0 +592 1 negative_sampler.num_negs_per_pos 90.0 +592 1 training.batch_size 1.0 +592 2 model.embedding_dim 1.0 +592 2 negative_sampler.num_negs_per_pos 76.0 +592 2 training.batch_size 0.0 +592 3 model.embedding_dim 0.0 +592 3 negative_sampler.num_negs_per_pos 97.0 +592 3 training.batch_size 1.0 +592 4 model.embedding_dim 1.0 +592 4 negative_sampler.num_negs_per_pos 55.0 +592 4 training.batch_size 1.0 +592 5 model.embedding_dim 0.0 +592 5 negative_sampler.num_negs_per_pos 32.0 +592 5 training.batch_size 0.0 +592 6 model.embedding_dim 1.0 +592 6 negative_sampler.num_negs_per_pos 24.0 +592 6 training.batch_size 2.0 +592 7 model.embedding_dim 0.0 +592 7 negative_sampler.num_negs_per_pos 63.0 +592 7 training.batch_size 1.0 +592 8 model.embedding_dim 1.0 +592 8 negative_sampler.num_negs_per_pos 4.0 +592 8 training.batch_size 0.0 +592 9 model.embedding_dim 0.0 +592 9 negative_sampler.num_negs_per_pos 33.0 +592 9 training.batch_size 1.0 +592 10 model.embedding_dim 2.0 +592 10 negative_sampler.num_negs_per_pos 99.0 +592 10 training.batch_size 1.0 +592 11 model.embedding_dim 1.0 +592 11 negative_sampler.num_negs_per_pos 74.0 +592 11 training.batch_size 0.0 +592 12 model.embedding_dim 1.0 +592 12 negative_sampler.num_negs_per_pos 45.0 +592 12 training.batch_size 1.0 +592 13 model.embedding_dim 1.0 +592 13 negative_sampler.num_negs_per_pos 47.0 +592 13 training.batch_size 1.0 +592 14 model.embedding_dim 2.0 +592 14 negative_sampler.num_negs_per_pos 89.0 +592 14 training.batch_size 2.0 +592 15 model.embedding_dim 1.0 +592 15 negative_sampler.num_negs_per_pos 82.0 +592 15 training.batch_size 1.0 +592 16 model.embedding_dim 2.0 +592 16 negative_sampler.num_negs_per_pos 50.0 +592 16 training.batch_size 0.0 +592 17 model.embedding_dim 2.0 +592 17 negative_sampler.num_negs_per_pos 69.0 +592 17 training.batch_size 2.0 +592 18 model.embedding_dim 2.0 +592 18 negative_sampler.num_negs_per_pos 83.0 +592 18 training.batch_size 0.0 +592 19 model.embedding_dim 0.0 +592 19 negative_sampler.num_negs_per_pos 98.0 +592 19 training.batch_size 2.0 +592 20 model.embedding_dim 0.0 +592 20 negative_sampler.num_negs_per_pos 3.0 +592 20 training.batch_size 0.0 +592 21 model.embedding_dim 1.0 +592 21 negative_sampler.num_negs_per_pos 90.0 +592 21 training.batch_size 1.0 +592 22 model.embedding_dim 2.0 +592 22 negative_sampler.num_negs_per_pos 34.0 +592 22 training.batch_size 2.0 +592 23 model.embedding_dim 1.0 +592 23 negative_sampler.num_negs_per_pos 45.0 +592 23 training.batch_size 2.0 +592 24 model.embedding_dim 0.0 +592 24 negative_sampler.num_negs_per_pos 34.0 +592 24 training.batch_size 1.0 +592 25 model.embedding_dim 0.0 +592 25 negative_sampler.num_negs_per_pos 40.0 +592 25 training.batch_size 0.0 +592 26 model.embedding_dim 0.0 +592 26 negative_sampler.num_negs_per_pos 28.0 +592 26 training.batch_size 0.0 +592 27 model.embedding_dim 2.0 +592 27 negative_sampler.num_negs_per_pos 38.0 +592 27 training.batch_size 1.0 +592 28 model.embedding_dim 1.0 +592 28 negative_sampler.num_negs_per_pos 82.0 +592 28 training.batch_size 2.0 +592 29 model.embedding_dim 0.0 +592 29 negative_sampler.num_negs_per_pos 89.0 +592 29 training.batch_size 1.0 +592 30 model.embedding_dim 0.0 +592 30 negative_sampler.num_negs_per_pos 73.0 +592 30 training.batch_size 0.0 +592 31 model.embedding_dim 0.0 +592 31 negative_sampler.num_negs_per_pos 29.0 +592 31 training.batch_size 2.0 +592 32 model.embedding_dim 1.0 +592 32 negative_sampler.num_negs_per_pos 31.0 +592 32 training.batch_size 0.0 +592 33 model.embedding_dim 1.0 +592 33 negative_sampler.num_negs_per_pos 64.0 +592 33 training.batch_size 0.0 +592 34 model.embedding_dim 1.0 +592 34 negative_sampler.num_negs_per_pos 91.0 +592 34 training.batch_size 2.0 +592 35 model.embedding_dim 1.0 +592 35 negative_sampler.num_negs_per_pos 91.0 +592 35 training.batch_size 1.0 +592 36 model.embedding_dim 1.0 +592 36 negative_sampler.num_negs_per_pos 32.0 +592 36 training.batch_size 2.0 +592 37 model.embedding_dim 0.0 +592 37 negative_sampler.num_negs_per_pos 23.0 +592 37 training.batch_size 0.0 +592 38 model.embedding_dim 0.0 +592 38 negative_sampler.num_negs_per_pos 4.0 +592 38 training.batch_size 0.0 +592 39 model.embedding_dim 1.0 +592 39 negative_sampler.num_negs_per_pos 89.0 +592 39 training.batch_size 1.0 +592 40 model.embedding_dim 1.0 +592 40 negative_sampler.num_negs_per_pos 25.0 +592 40 training.batch_size 2.0 +592 41 model.embedding_dim 2.0 +592 41 negative_sampler.num_negs_per_pos 40.0 +592 41 training.batch_size 2.0 +592 42 model.embedding_dim 1.0 +592 42 negative_sampler.num_negs_per_pos 94.0 +592 42 training.batch_size 0.0 +592 43 model.embedding_dim 2.0 +592 43 negative_sampler.num_negs_per_pos 50.0 +592 43 training.batch_size 1.0 +592 44 model.embedding_dim 0.0 +592 44 negative_sampler.num_negs_per_pos 19.0 +592 44 training.batch_size 1.0 +592 45 model.embedding_dim 1.0 +592 45 negative_sampler.num_negs_per_pos 95.0 +592 45 training.batch_size 1.0 +592 46 model.embedding_dim 0.0 +592 46 negative_sampler.num_negs_per_pos 63.0 +592 46 training.batch_size 0.0 +592 47 model.embedding_dim 1.0 +592 47 negative_sampler.num_negs_per_pos 11.0 +592 47 training.batch_size 2.0 +592 48 model.embedding_dim 1.0 +592 48 negative_sampler.num_negs_per_pos 78.0 +592 48 training.batch_size 2.0 +592 49 model.embedding_dim 2.0 +592 49 negative_sampler.num_negs_per_pos 25.0 +592 49 training.batch_size 2.0 +592 50 model.embedding_dim 1.0 +592 50 negative_sampler.num_negs_per_pos 41.0 +592 50 training.batch_size 0.0 +592 51 model.embedding_dim 0.0 +592 51 negative_sampler.num_negs_per_pos 33.0 +592 51 training.batch_size 2.0 +592 52 model.embedding_dim 0.0 +592 52 negative_sampler.num_negs_per_pos 75.0 +592 52 training.batch_size 1.0 +592 53 model.embedding_dim 0.0 +592 53 negative_sampler.num_negs_per_pos 25.0 +592 53 training.batch_size 1.0 +592 54 model.embedding_dim 0.0 +592 54 negative_sampler.num_negs_per_pos 57.0 +592 54 training.batch_size 1.0 +592 55 model.embedding_dim 2.0 +592 55 negative_sampler.num_negs_per_pos 73.0 +592 55 training.batch_size 1.0 +592 56 model.embedding_dim 0.0 +592 56 negative_sampler.num_negs_per_pos 49.0 +592 56 training.batch_size 1.0 +592 57 model.embedding_dim 1.0 +592 57 negative_sampler.num_negs_per_pos 15.0 +592 57 training.batch_size 0.0 +592 58 model.embedding_dim 0.0 +592 58 negative_sampler.num_negs_per_pos 53.0 +592 58 training.batch_size 0.0 +592 59 model.embedding_dim 2.0 +592 59 negative_sampler.num_negs_per_pos 95.0 +592 59 training.batch_size 2.0 +592 60 model.embedding_dim 0.0 +592 60 negative_sampler.num_negs_per_pos 51.0 +592 60 training.batch_size 0.0 +592 61 model.embedding_dim 0.0 +592 61 negative_sampler.num_negs_per_pos 52.0 +592 61 training.batch_size 0.0 +592 62 model.embedding_dim 2.0 +592 62 negative_sampler.num_negs_per_pos 72.0 +592 62 training.batch_size 2.0 +592 63 model.embedding_dim 1.0 +592 63 negative_sampler.num_negs_per_pos 71.0 +592 63 training.batch_size 1.0 +592 64 model.embedding_dim 2.0 +592 64 negative_sampler.num_negs_per_pos 12.0 +592 64 training.batch_size 1.0 +592 65 model.embedding_dim 1.0 +592 65 negative_sampler.num_negs_per_pos 78.0 +592 65 training.batch_size 1.0 +592 66 model.embedding_dim 2.0 +592 66 negative_sampler.num_negs_per_pos 62.0 +592 66 training.batch_size 0.0 +592 67 model.embedding_dim 2.0 +592 67 negative_sampler.num_negs_per_pos 35.0 +592 67 training.batch_size 1.0 +592 68 model.embedding_dim 1.0 +592 68 negative_sampler.num_negs_per_pos 53.0 +592 68 training.batch_size 2.0 +592 69 model.embedding_dim 0.0 +592 69 negative_sampler.num_negs_per_pos 61.0 +592 69 training.batch_size 1.0 +592 70 model.embedding_dim 0.0 +592 70 negative_sampler.num_negs_per_pos 5.0 +592 70 training.batch_size 0.0 +592 71 model.embedding_dim 1.0 +592 71 negative_sampler.num_negs_per_pos 71.0 +592 71 training.batch_size 2.0 +592 72 model.embedding_dim 1.0 +592 72 negative_sampler.num_negs_per_pos 10.0 +592 72 training.batch_size 2.0 +592 73 model.embedding_dim 2.0 +592 73 negative_sampler.num_negs_per_pos 71.0 +592 73 training.batch_size 0.0 +592 74 model.embedding_dim 1.0 +592 74 negative_sampler.num_negs_per_pos 39.0 +592 74 training.batch_size 2.0 +592 75 model.embedding_dim 0.0 +592 75 negative_sampler.num_negs_per_pos 86.0 +592 75 training.batch_size 2.0 +592 76 model.embedding_dim 2.0 +592 76 negative_sampler.num_negs_per_pos 31.0 +592 76 training.batch_size 0.0 +592 77 model.embedding_dim 1.0 +592 77 negative_sampler.num_negs_per_pos 72.0 +592 77 training.batch_size 1.0 +592 78 model.embedding_dim 1.0 +592 78 negative_sampler.num_negs_per_pos 68.0 +592 78 training.batch_size 1.0 +592 79 model.embedding_dim 1.0 +592 79 negative_sampler.num_negs_per_pos 55.0 +592 79 training.batch_size 2.0 +592 80 model.embedding_dim 0.0 +592 80 negative_sampler.num_negs_per_pos 82.0 +592 80 training.batch_size 1.0 +592 81 model.embedding_dim 1.0 +592 81 negative_sampler.num_negs_per_pos 45.0 +592 81 training.batch_size 1.0 +592 82 model.embedding_dim 0.0 +592 82 negative_sampler.num_negs_per_pos 52.0 +592 82 training.batch_size 2.0 +592 83 model.embedding_dim 1.0 +592 83 negative_sampler.num_negs_per_pos 15.0 +592 83 training.batch_size 1.0 +592 84 model.embedding_dim 0.0 +592 84 negative_sampler.num_negs_per_pos 87.0 +592 84 training.batch_size 0.0 +592 85 model.embedding_dim 2.0 +592 85 negative_sampler.num_negs_per_pos 32.0 +592 85 training.batch_size 2.0 +592 86 model.embedding_dim 0.0 +592 86 negative_sampler.num_negs_per_pos 82.0 +592 86 training.batch_size 0.0 +592 87 model.embedding_dim 2.0 +592 87 negative_sampler.num_negs_per_pos 1.0 +592 87 training.batch_size 0.0 +592 88 model.embedding_dim 1.0 +592 88 negative_sampler.num_negs_per_pos 86.0 +592 88 training.batch_size 1.0 +592 89 model.embedding_dim 2.0 +592 89 negative_sampler.num_negs_per_pos 89.0 +592 89 training.batch_size 0.0 +592 90 model.embedding_dim 1.0 +592 90 negative_sampler.num_negs_per_pos 67.0 +592 90 training.batch_size 0.0 +592 91 model.embedding_dim 0.0 +592 91 negative_sampler.num_negs_per_pos 3.0 +592 91 training.batch_size 0.0 +592 92 model.embedding_dim 0.0 +592 92 negative_sampler.num_negs_per_pos 18.0 +592 92 training.batch_size 0.0 +592 93 model.embedding_dim 1.0 +592 93 negative_sampler.num_negs_per_pos 28.0 +592 93 training.batch_size 1.0 +592 94 model.embedding_dim 1.0 +592 94 negative_sampler.num_negs_per_pos 58.0 +592 94 training.batch_size 1.0 +592 95 model.embedding_dim 0.0 +592 95 negative_sampler.num_negs_per_pos 62.0 +592 95 training.batch_size 0.0 +592 96 model.embedding_dim 0.0 +592 96 negative_sampler.num_negs_per_pos 11.0 +592 96 training.batch_size 0.0 +592 97 model.embedding_dim 2.0 +592 97 negative_sampler.num_negs_per_pos 2.0 +592 97 training.batch_size 1.0 +592 98 model.embedding_dim 0.0 +592 98 negative_sampler.num_negs_per_pos 20.0 +592 98 training.batch_size 2.0 +592 99 model.embedding_dim 2.0 +592 99 negative_sampler.num_negs_per_pos 13.0 +592 99 training.batch_size 2.0 +592 100 model.embedding_dim 2.0 +592 100 negative_sampler.num_negs_per_pos 48.0 +592 100 training.batch_size 2.0 +592 1 dataset """kinships""" +592 1 model """rotate""" +592 1 loss """softplus""" +592 1 regularizer """no""" +592 1 optimizer """adadelta""" +592 1 training_loop """owa""" +592 1 negative_sampler """basic""" +592 1 evaluator """rankbased""" +592 2 dataset """kinships""" +592 2 model """rotate""" +592 2 loss """softplus""" +592 2 regularizer """no""" +592 2 optimizer """adadelta""" +592 2 training_loop """owa""" +592 2 negative_sampler """basic""" +592 2 evaluator """rankbased""" +592 3 dataset """kinships""" +592 3 model """rotate""" +592 3 loss """softplus""" +592 3 regularizer """no""" +592 3 optimizer """adadelta""" +592 3 training_loop """owa""" +592 3 negative_sampler """basic""" +592 3 evaluator """rankbased""" +592 4 dataset """kinships""" +592 4 model """rotate""" +592 4 loss """softplus""" +592 4 regularizer """no""" +592 4 optimizer """adadelta""" +592 4 training_loop """owa""" +592 4 negative_sampler """basic""" +592 4 evaluator """rankbased""" +592 5 dataset """kinships""" +592 5 model """rotate""" +592 5 loss """softplus""" +592 5 regularizer """no""" +592 5 optimizer """adadelta""" +592 5 training_loop """owa""" +592 5 negative_sampler """basic""" +592 5 evaluator """rankbased""" +592 6 dataset """kinships""" +592 6 model """rotate""" +592 6 loss """softplus""" +592 6 regularizer """no""" +592 6 optimizer """adadelta""" +592 6 training_loop """owa""" +592 6 negative_sampler """basic""" +592 6 evaluator """rankbased""" +592 7 dataset """kinships""" +592 7 model """rotate""" +592 7 loss """softplus""" +592 7 regularizer """no""" +592 7 optimizer """adadelta""" +592 7 training_loop """owa""" +592 7 negative_sampler """basic""" +592 7 evaluator """rankbased""" +592 8 dataset """kinships""" +592 8 model """rotate""" +592 8 loss """softplus""" +592 8 regularizer """no""" +592 8 optimizer """adadelta""" +592 8 training_loop """owa""" +592 8 negative_sampler """basic""" +592 8 evaluator """rankbased""" +592 9 dataset """kinships""" +592 9 model """rotate""" +592 9 loss """softplus""" +592 9 regularizer """no""" +592 9 optimizer """adadelta""" +592 9 training_loop """owa""" +592 9 negative_sampler """basic""" +592 9 evaluator """rankbased""" +592 10 dataset """kinships""" +592 10 model """rotate""" +592 10 loss """softplus""" +592 10 regularizer """no""" +592 10 optimizer """adadelta""" +592 10 training_loop """owa""" +592 10 negative_sampler """basic""" +592 10 evaluator """rankbased""" +592 11 dataset """kinships""" +592 11 model """rotate""" +592 11 loss """softplus""" +592 11 regularizer """no""" +592 11 optimizer """adadelta""" +592 11 training_loop """owa""" +592 11 negative_sampler """basic""" +592 11 evaluator """rankbased""" +592 12 dataset """kinships""" +592 12 model """rotate""" +592 12 loss """softplus""" +592 12 regularizer """no""" +592 12 optimizer """adadelta""" +592 12 training_loop """owa""" +592 12 negative_sampler """basic""" +592 12 evaluator """rankbased""" +592 13 dataset """kinships""" +592 13 model """rotate""" +592 13 loss """softplus""" +592 13 regularizer """no""" +592 13 optimizer """adadelta""" +592 13 training_loop """owa""" +592 13 negative_sampler """basic""" +592 13 evaluator """rankbased""" +592 14 dataset """kinships""" +592 14 model """rotate""" +592 14 loss """softplus""" +592 14 regularizer """no""" +592 14 optimizer """adadelta""" +592 14 training_loop """owa""" +592 14 negative_sampler """basic""" +592 14 evaluator """rankbased""" +592 15 dataset """kinships""" +592 15 model """rotate""" +592 15 loss """softplus""" +592 15 regularizer """no""" +592 15 optimizer """adadelta""" +592 15 training_loop """owa""" +592 15 negative_sampler """basic""" +592 15 evaluator """rankbased""" +592 16 dataset """kinships""" +592 16 model """rotate""" +592 16 loss """softplus""" +592 16 regularizer """no""" +592 16 optimizer """adadelta""" +592 16 training_loop """owa""" +592 16 negative_sampler """basic""" +592 16 evaluator """rankbased""" +592 17 dataset """kinships""" +592 17 model """rotate""" +592 17 loss """softplus""" +592 17 regularizer """no""" +592 17 optimizer """adadelta""" +592 17 training_loop """owa""" +592 17 negative_sampler """basic""" +592 17 evaluator """rankbased""" +592 18 dataset """kinships""" +592 18 model """rotate""" +592 18 loss """softplus""" +592 18 regularizer """no""" +592 18 optimizer """adadelta""" +592 18 training_loop """owa""" +592 18 negative_sampler """basic""" +592 18 evaluator """rankbased""" +592 19 dataset """kinships""" +592 19 model """rotate""" +592 19 loss """softplus""" +592 19 regularizer """no""" +592 19 optimizer """adadelta""" +592 19 training_loop """owa""" +592 19 negative_sampler """basic""" +592 19 evaluator """rankbased""" +592 20 dataset """kinships""" +592 20 model """rotate""" +592 20 loss """softplus""" +592 20 regularizer """no""" +592 20 optimizer """adadelta""" +592 20 training_loop """owa""" +592 20 negative_sampler """basic""" +592 20 evaluator """rankbased""" +592 21 dataset """kinships""" +592 21 model """rotate""" +592 21 loss """softplus""" +592 21 regularizer """no""" +592 21 optimizer """adadelta""" +592 21 training_loop """owa""" +592 21 negative_sampler """basic""" +592 21 evaluator """rankbased""" +592 22 dataset """kinships""" +592 22 model """rotate""" +592 22 loss """softplus""" +592 22 regularizer """no""" +592 22 optimizer """adadelta""" +592 22 training_loop """owa""" +592 22 negative_sampler """basic""" +592 22 evaluator """rankbased""" +592 23 dataset """kinships""" +592 23 model """rotate""" +592 23 loss """softplus""" +592 23 regularizer """no""" +592 23 optimizer """adadelta""" +592 23 training_loop """owa""" +592 23 negative_sampler """basic""" +592 23 evaluator """rankbased""" +592 24 dataset """kinships""" +592 24 model """rotate""" +592 24 loss """softplus""" +592 24 regularizer """no""" +592 24 optimizer """adadelta""" +592 24 training_loop """owa""" +592 24 negative_sampler """basic""" +592 24 evaluator """rankbased""" +592 25 dataset """kinships""" +592 25 model """rotate""" +592 25 loss """softplus""" +592 25 regularizer """no""" +592 25 optimizer """adadelta""" +592 25 training_loop """owa""" +592 25 negative_sampler """basic""" +592 25 evaluator """rankbased""" +592 26 dataset """kinships""" +592 26 model """rotate""" +592 26 loss """softplus""" +592 26 regularizer """no""" +592 26 optimizer """adadelta""" +592 26 training_loop """owa""" +592 26 negative_sampler """basic""" +592 26 evaluator """rankbased""" +592 27 dataset """kinships""" +592 27 model """rotate""" +592 27 loss """softplus""" +592 27 regularizer """no""" +592 27 optimizer """adadelta""" +592 27 training_loop """owa""" +592 27 negative_sampler """basic""" +592 27 evaluator """rankbased""" +592 28 dataset """kinships""" +592 28 model """rotate""" +592 28 loss """softplus""" +592 28 regularizer """no""" +592 28 optimizer """adadelta""" +592 28 training_loop """owa""" +592 28 negative_sampler """basic""" +592 28 evaluator """rankbased""" +592 29 dataset """kinships""" +592 29 model """rotate""" +592 29 loss """softplus""" +592 29 regularizer """no""" +592 29 optimizer """adadelta""" +592 29 training_loop """owa""" +592 29 negative_sampler """basic""" +592 29 evaluator """rankbased""" +592 30 dataset """kinships""" +592 30 model """rotate""" +592 30 loss """softplus""" +592 30 regularizer """no""" +592 30 optimizer """adadelta""" +592 30 training_loop """owa""" +592 30 negative_sampler """basic""" +592 30 evaluator """rankbased""" +592 31 dataset """kinships""" +592 31 model """rotate""" +592 31 loss """softplus""" +592 31 regularizer """no""" +592 31 optimizer """adadelta""" +592 31 training_loop """owa""" +592 31 negative_sampler """basic""" +592 31 evaluator """rankbased""" +592 32 dataset """kinships""" +592 32 model """rotate""" +592 32 loss """softplus""" +592 32 regularizer """no""" +592 32 optimizer """adadelta""" +592 32 training_loop """owa""" +592 32 negative_sampler """basic""" +592 32 evaluator """rankbased""" +592 33 dataset """kinships""" +592 33 model """rotate""" +592 33 loss """softplus""" +592 33 regularizer """no""" +592 33 optimizer """adadelta""" +592 33 training_loop """owa""" +592 33 negative_sampler """basic""" +592 33 evaluator """rankbased""" +592 34 dataset """kinships""" +592 34 model """rotate""" +592 34 loss """softplus""" +592 34 regularizer """no""" +592 34 optimizer """adadelta""" +592 34 training_loop """owa""" +592 34 negative_sampler """basic""" +592 34 evaluator """rankbased""" +592 35 dataset """kinships""" +592 35 model """rotate""" +592 35 loss """softplus""" +592 35 regularizer """no""" +592 35 optimizer """adadelta""" +592 35 training_loop """owa""" +592 35 negative_sampler """basic""" +592 35 evaluator """rankbased""" +592 36 dataset """kinships""" +592 36 model """rotate""" +592 36 loss """softplus""" +592 36 regularizer """no""" +592 36 optimizer """adadelta""" +592 36 training_loop """owa""" +592 36 negative_sampler """basic""" +592 36 evaluator """rankbased""" +592 37 dataset """kinships""" +592 37 model """rotate""" +592 37 loss """softplus""" +592 37 regularizer """no""" +592 37 optimizer """adadelta""" +592 37 training_loop """owa""" +592 37 negative_sampler """basic""" +592 37 evaluator """rankbased""" +592 38 dataset """kinships""" +592 38 model """rotate""" +592 38 loss """softplus""" +592 38 regularizer """no""" +592 38 optimizer """adadelta""" +592 38 training_loop """owa""" +592 38 negative_sampler """basic""" +592 38 evaluator """rankbased""" +592 39 dataset """kinships""" +592 39 model """rotate""" +592 39 loss """softplus""" +592 39 regularizer """no""" +592 39 optimizer """adadelta""" +592 39 training_loop """owa""" +592 39 negative_sampler """basic""" +592 39 evaluator """rankbased""" +592 40 dataset """kinships""" +592 40 model """rotate""" +592 40 loss """softplus""" +592 40 regularizer """no""" +592 40 optimizer """adadelta""" +592 40 training_loop """owa""" +592 40 negative_sampler """basic""" +592 40 evaluator """rankbased""" +592 41 dataset """kinships""" +592 41 model """rotate""" +592 41 loss """softplus""" +592 41 regularizer """no""" +592 41 optimizer """adadelta""" +592 41 training_loop """owa""" +592 41 negative_sampler """basic""" +592 41 evaluator """rankbased""" +592 42 dataset """kinships""" +592 42 model """rotate""" +592 42 loss """softplus""" +592 42 regularizer """no""" +592 42 optimizer """adadelta""" +592 42 training_loop """owa""" +592 42 negative_sampler """basic""" +592 42 evaluator """rankbased""" +592 43 dataset """kinships""" +592 43 model """rotate""" +592 43 loss """softplus""" +592 43 regularizer """no""" +592 43 optimizer """adadelta""" +592 43 training_loop """owa""" +592 43 negative_sampler """basic""" +592 43 evaluator """rankbased""" +592 44 dataset """kinships""" +592 44 model """rotate""" +592 44 loss """softplus""" +592 44 regularizer """no""" +592 44 optimizer """adadelta""" +592 44 training_loop """owa""" +592 44 negative_sampler """basic""" +592 44 evaluator """rankbased""" +592 45 dataset """kinships""" +592 45 model """rotate""" +592 45 loss """softplus""" +592 45 regularizer """no""" +592 45 optimizer """adadelta""" +592 45 training_loop """owa""" +592 45 negative_sampler """basic""" +592 45 evaluator """rankbased""" +592 46 dataset """kinships""" +592 46 model """rotate""" +592 46 loss """softplus""" +592 46 regularizer """no""" +592 46 optimizer """adadelta""" +592 46 training_loop """owa""" +592 46 negative_sampler """basic""" +592 46 evaluator """rankbased""" +592 47 dataset """kinships""" +592 47 model """rotate""" +592 47 loss """softplus""" +592 47 regularizer """no""" +592 47 optimizer """adadelta""" +592 47 training_loop """owa""" +592 47 negative_sampler """basic""" +592 47 evaluator """rankbased""" +592 48 dataset """kinships""" +592 48 model """rotate""" +592 48 loss """softplus""" +592 48 regularizer """no""" +592 48 optimizer """adadelta""" +592 48 training_loop """owa""" +592 48 negative_sampler """basic""" +592 48 evaluator """rankbased""" +592 49 dataset """kinships""" +592 49 model """rotate""" +592 49 loss """softplus""" +592 49 regularizer """no""" +592 49 optimizer """adadelta""" +592 49 training_loop """owa""" +592 49 negative_sampler """basic""" +592 49 evaluator """rankbased""" +592 50 dataset """kinships""" +592 50 model """rotate""" +592 50 loss """softplus""" +592 50 regularizer """no""" +592 50 optimizer """adadelta""" +592 50 training_loop """owa""" +592 50 negative_sampler """basic""" +592 50 evaluator """rankbased""" +592 51 dataset """kinships""" +592 51 model """rotate""" +592 51 loss """softplus""" +592 51 regularizer """no""" +592 51 optimizer """adadelta""" +592 51 training_loop """owa""" +592 51 negative_sampler """basic""" +592 51 evaluator """rankbased""" +592 52 dataset """kinships""" +592 52 model """rotate""" +592 52 loss """softplus""" +592 52 regularizer """no""" +592 52 optimizer """adadelta""" +592 52 training_loop """owa""" +592 52 negative_sampler """basic""" +592 52 evaluator """rankbased""" +592 53 dataset """kinships""" +592 53 model """rotate""" +592 53 loss """softplus""" +592 53 regularizer """no""" +592 53 optimizer """adadelta""" +592 53 training_loop """owa""" +592 53 negative_sampler """basic""" +592 53 evaluator """rankbased""" +592 54 dataset """kinships""" +592 54 model """rotate""" +592 54 loss """softplus""" +592 54 regularizer """no""" +592 54 optimizer """adadelta""" +592 54 training_loop """owa""" +592 54 negative_sampler """basic""" +592 54 evaluator """rankbased""" +592 55 dataset """kinships""" +592 55 model """rotate""" +592 55 loss """softplus""" +592 55 regularizer """no""" +592 55 optimizer """adadelta""" +592 55 training_loop """owa""" +592 55 negative_sampler """basic""" +592 55 evaluator """rankbased""" +592 56 dataset """kinships""" +592 56 model """rotate""" +592 56 loss """softplus""" +592 56 regularizer """no""" +592 56 optimizer """adadelta""" +592 56 training_loop """owa""" +592 56 negative_sampler """basic""" +592 56 evaluator """rankbased""" +592 57 dataset """kinships""" +592 57 model """rotate""" +592 57 loss """softplus""" +592 57 regularizer """no""" +592 57 optimizer """adadelta""" +592 57 training_loop """owa""" +592 57 negative_sampler """basic""" +592 57 evaluator """rankbased""" +592 58 dataset """kinships""" +592 58 model """rotate""" +592 58 loss """softplus""" +592 58 regularizer """no""" +592 58 optimizer """adadelta""" +592 58 training_loop """owa""" +592 58 negative_sampler """basic""" +592 58 evaluator """rankbased""" +592 59 dataset """kinships""" +592 59 model """rotate""" +592 59 loss """softplus""" +592 59 regularizer """no""" +592 59 optimizer """adadelta""" +592 59 training_loop """owa""" +592 59 negative_sampler """basic""" +592 59 evaluator """rankbased""" +592 60 dataset """kinships""" +592 60 model """rotate""" +592 60 loss """softplus""" +592 60 regularizer """no""" +592 60 optimizer """adadelta""" +592 60 training_loop """owa""" +592 60 negative_sampler """basic""" +592 60 evaluator """rankbased""" +592 61 dataset """kinships""" +592 61 model """rotate""" +592 61 loss """softplus""" +592 61 regularizer """no""" +592 61 optimizer """adadelta""" +592 61 training_loop """owa""" +592 61 negative_sampler """basic""" +592 61 evaluator """rankbased""" +592 62 dataset """kinships""" +592 62 model """rotate""" +592 62 loss """softplus""" +592 62 regularizer """no""" +592 62 optimizer """adadelta""" +592 62 training_loop """owa""" +592 62 negative_sampler """basic""" +592 62 evaluator """rankbased""" +592 63 dataset """kinships""" +592 63 model """rotate""" +592 63 loss """softplus""" +592 63 regularizer """no""" +592 63 optimizer """adadelta""" +592 63 training_loop """owa""" +592 63 negative_sampler """basic""" +592 63 evaluator """rankbased""" +592 64 dataset """kinships""" +592 64 model """rotate""" +592 64 loss """softplus""" +592 64 regularizer """no""" +592 64 optimizer """adadelta""" +592 64 training_loop """owa""" +592 64 negative_sampler """basic""" +592 64 evaluator """rankbased""" +592 65 dataset """kinships""" +592 65 model """rotate""" +592 65 loss """softplus""" +592 65 regularizer """no""" +592 65 optimizer """adadelta""" +592 65 training_loop """owa""" +592 65 negative_sampler """basic""" +592 65 evaluator """rankbased""" +592 66 dataset """kinships""" +592 66 model """rotate""" +592 66 loss """softplus""" +592 66 regularizer """no""" +592 66 optimizer """adadelta""" +592 66 training_loop """owa""" +592 66 negative_sampler """basic""" +592 66 evaluator """rankbased""" +592 67 dataset """kinships""" +592 67 model """rotate""" +592 67 loss """softplus""" +592 67 regularizer """no""" +592 67 optimizer """adadelta""" +592 67 training_loop """owa""" +592 67 negative_sampler """basic""" +592 67 evaluator """rankbased""" +592 68 dataset """kinships""" +592 68 model """rotate""" +592 68 loss """softplus""" +592 68 regularizer """no""" +592 68 optimizer """adadelta""" +592 68 training_loop """owa""" +592 68 negative_sampler """basic""" +592 68 evaluator """rankbased""" +592 69 dataset """kinships""" +592 69 model """rotate""" +592 69 loss """softplus""" +592 69 regularizer """no""" +592 69 optimizer """adadelta""" +592 69 training_loop """owa""" +592 69 negative_sampler """basic""" +592 69 evaluator """rankbased""" +592 70 dataset """kinships""" +592 70 model """rotate""" +592 70 loss """softplus""" +592 70 regularizer """no""" +592 70 optimizer """adadelta""" +592 70 training_loop """owa""" +592 70 negative_sampler """basic""" +592 70 evaluator """rankbased""" +592 71 dataset """kinships""" +592 71 model """rotate""" +592 71 loss """softplus""" +592 71 regularizer """no""" +592 71 optimizer """adadelta""" +592 71 training_loop """owa""" +592 71 negative_sampler """basic""" +592 71 evaluator """rankbased""" +592 72 dataset """kinships""" +592 72 model """rotate""" +592 72 loss """softplus""" +592 72 regularizer """no""" +592 72 optimizer """adadelta""" +592 72 training_loop """owa""" +592 72 negative_sampler """basic""" +592 72 evaluator """rankbased""" +592 73 dataset """kinships""" +592 73 model """rotate""" +592 73 loss """softplus""" +592 73 regularizer """no""" +592 73 optimizer """adadelta""" +592 73 training_loop """owa""" +592 73 negative_sampler """basic""" +592 73 evaluator """rankbased""" +592 74 dataset """kinships""" +592 74 model """rotate""" +592 74 loss """softplus""" +592 74 regularizer """no""" +592 74 optimizer """adadelta""" +592 74 training_loop """owa""" +592 74 negative_sampler """basic""" +592 74 evaluator """rankbased""" +592 75 dataset """kinships""" +592 75 model """rotate""" +592 75 loss """softplus""" +592 75 regularizer """no""" +592 75 optimizer """adadelta""" +592 75 training_loop """owa""" +592 75 negative_sampler """basic""" +592 75 evaluator """rankbased""" +592 76 dataset """kinships""" +592 76 model """rotate""" +592 76 loss """softplus""" +592 76 regularizer """no""" +592 76 optimizer """adadelta""" +592 76 training_loop """owa""" +592 76 negative_sampler """basic""" +592 76 evaluator """rankbased""" +592 77 dataset """kinships""" +592 77 model """rotate""" +592 77 loss """softplus""" +592 77 regularizer """no""" +592 77 optimizer """adadelta""" +592 77 training_loop """owa""" +592 77 negative_sampler """basic""" +592 77 evaluator """rankbased""" +592 78 dataset """kinships""" +592 78 model """rotate""" +592 78 loss """softplus""" +592 78 regularizer """no""" +592 78 optimizer """adadelta""" +592 78 training_loop """owa""" +592 78 negative_sampler """basic""" +592 78 evaluator """rankbased""" +592 79 dataset """kinships""" +592 79 model """rotate""" +592 79 loss """softplus""" +592 79 regularizer """no""" +592 79 optimizer """adadelta""" +592 79 training_loop """owa""" +592 79 negative_sampler """basic""" +592 79 evaluator """rankbased""" +592 80 dataset """kinships""" +592 80 model """rotate""" +592 80 loss """softplus""" +592 80 regularizer """no""" +592 80 optimizer """adadelta""" +592 80 training_loop """owa""" +592 80 negative_sampler """basic""" +592 80 evaluator """rankbased""" +592 81 dataset """kinships""" +592 81 model """rotate""" +592 81 loss """softplus""" +592 81 regularizer """no""" +592 81 optimizer """adadelta""" +592 81 training_loop """owa""" +592 81 negative_sampler """basic""" +592 81 evaluator """rankbased""" +592 82 dataset """kinships""" +592 82 model """rotate""" +592 82 loss """softplus""" +592 82 regularizer """no""" +592 82 optimizer """adadelta""" +592 82 training_loop """owa""" +592 82 negative_sampler """basic""" +592 82 evaluator """rankbased""" +592 83 dataset """kinships""" +592 83 model """rotate""" +592 83 loss """softplus""" +592 83 regularizer """no""" +592 83 optimizer """adadelta""" +592 83 training_loop """owa""" +592 83 negative_sampler """basic""" +592 83 evaluator """rankbased""" +592 84 dataset """kinships""" +592 84 model """rotate""" +592 84 loss """softplus""" +592 84 regularizer """no""" +592 84 optimizer """adadelta""" +592 84 training_loop """owa""" +592 84 negative_sampler """basic""" +592 84 evaluator """rankbased""" +592 85 dataset """kinships""" +592 85 model """rotate""" +592 85 loss """softplus""" +592 85 regularizer """no""" +592 85 optimizer """adadelta""" +592 85 training_loop """owa""" +592 85 negative_sampler """basic""" +592 85 evaluator """rankbased""" +592 86 dataset """kinships""" +592 86 model """rotate""" +592 86 loss """softplus""" +592 86 regularizer """no""" +592 86 optimizer """adadelta""" +592 86 training_loop """owa""" +592 86 negative_sampler """basic""" +592 86 evaluator """rankbased""" +592 87 dataset """kinships""" +592 87 model """rotate""" +592 87 loss """softplus""" +592 87 regularizer """no""" +592 87 optimizer """adadelta""" +592 87 training_loop """owa""" +592 87 negative_sampler """basic""" +592 87 evaluator """rankbased""" +592 88 dataset """kinships""" +592 88 model """rotate""" +592 88 loss """softplus""" +592 88 regularizer """no""" +592 88 optimizer """adadelta""" +592 88 training_loop """owa""" +592 88 negative_sampler """basic""" +592 88 evaluator """rankbased""" +592 89 dataset """kinships""" +592 89 model """rotate""" +592 89 loss """softplus""" +592 89 regularizer """no""" +592 89 optimizer """adadelta""" +592 89 training_loop """owa""" +592 89 negative_sampler """basic""" +592 89 evaluator """rankbased""" +592 90 dataset """kinships""" +592 90 model """rotate""" +592 90 loss """softplus""" +592 90 regularizer """no""" +592 90 optimizer """adadelta""" +592 90 training_loop """owa""" +592 90 negative_sampler """basic""" +592 90 evaluator """rankbased""" +592 91 dataset """kinships""" +592 91 model """rotate""" +592 91 loss """softplus""" +592 91 regularizer """no""" +592 91 optimizer """adadelta""" +592 91 training_loop """owa""" +592 91 negative_sampler """basic""" +592 91 evaluator """rankbased""" +592 92 dataset """kinships""" +592 92 model """rotate""" +592 92 loss """softplus""" +592 92 regularizer """no""" +592 92 optimizer """adadelta""" +592 92 training_loop """owa""" +592 92 negative_sampler """basic""" +592 92 evaluator """rankbased""" +592 93 dataset """kinships""" +592 93 model """rotate""" +592 93 loss """softplus""" +592 93 regularizer """no""" +592 93 optimizer """adadelta""" +592 93 training_loop """owa""" +592 93 negative_sampler """basic""" +592 93 evaluator """rankbased""" +592 94 dataset """kinships""" +592 94 model """rotate""" +592 94 loss """softplus""" +592 94 regularizer """no""" +592 94 optimizer """adadelta""" +592 94 training_loop """owa""" +592 94 negative_sampler """basic""" +592 94 evaluator """rankbased""" +592 95 dataset """kinships""" +592 95 model """rotate""" +592 95 loss """softplus""" +592 95 regularizer """no""" +592 95 optimizer """adadelta""" +592 95 training_loop """owa""" +592 95 negative_sampler """basic""" +592 95 evaluator """rankbased""" +592 96 dataset """kinships""" +592 96 model """rotate""" +592 96 loss """softplus""" +592 96 regularizer """no""" +592 96 optimizer """adadelta""" +592 96 training_loop """owa""" +592 96 negative_sampler """basic""" +592 96 evaluator """rankbased""" +592 97 dataset """kinships""" +592 97 model """rotate""" +592 97 loss """softplus""" +592 97 regularizer """no""" +592 97 optimizer """adadelta""" +592 97 training_loop """owa""" +592 97 negative_sampler """basic""" +592 97 evaluator """rankbased""" +592 98 dataset """kinships""" +592 98 model """rotate""" +592 98 loss """softplus""" +592 98 regularizer """no""" +592 98 optimizer """adadelta""" +592 98 training_loop """owa""" +592 98 negative_sampler """basic""" +592 98 evaluator """rankbased""" +592 99 dataset """kinships""" +592 99 model """rotate""" +592 99 loss """softplus""" +592 99 regularizer """no""" +592 99 optimizer """adadelta""" +592 99 training_loop """owa""" +592 99 negative_sampler """basic""" +592 99 evaluator """rankbased""" +592 100 dataset """kinships""" +592 100 model """rotate""" +592 100 loss """softplus""" +592 100 regularizer """no""" +592 100 optimizer """adadelta""" +592 100 training_loop """owa""" +592 100 negative_sampler """basic""" +592 100 evaluator """rankbased""" +593 1 model.embedding_dim 1.0 +593 1 loss.margin 4.479676365040902 +593 1 negative_sampler.num_negs_per_pos 55.0 +593 1 training.batch_size 0.0 +593 2 model.embedding_dim 0.0 +593 2 loss.margin 4.9955772115805095 +593 2 negative_sampler.num_negs_per_pos 41.0 +593 2 training.batch_size 2.0 +593 3 model.embedding_dim 1.0 +593 3 loss.margin 5.665188255378319 +593 3 negative_sampler.num_negs_per_pos 74.0 +593 3 training.batch_size 0.0 +593 4 model.embedding_dim 2.0 +593 4 loss.margin 3.1454026951360774 +593 4 negative_sampler.num_negs_per_pos 23.0 +593 4 training.batch_size 1.0 +593 5 model.embedding_dim 2.0 +593 5 loss.margin 2.221494766360294 +593 5 negative_sampler.num_negs_per_pos 3.0 +593 5 training.batch_size 2.0 +593 6 model.embedding_dim 2.0 +593 6 loss.margin 4.452916952930998 +593 6 negative_sampler.num_negs_per_pos 92.0 +593 6 training.batch_size 0.0 +593 7 model.embedding_dim 1.0 +593 7 loss.margin 4.271392707860692 +593 7 negative_sampler.num_negs_per_pos 71.0 +593 7 training.batch_size 1.0 +593 8 model.embedding_dim 0.0 +593 8 loss.margin 0.8321743954208636 +593 8 negative_sampler.num_negs_per_pos 64.0 +593 8 training.batch_size 1.0 +593 9 model.embedding_dim 1.0 +593 9 loss.margin 9.359409489593196 +593 9 negative_sampler.num_negs_per_pos 6.0 +593 9 training.batch_size 0.0 +593 10 model.embedding_dim 2.0 +593 10 loss.margin 8.522020422459546 +593 10 negative_sampler.num_negs_per_pos 43.0 +593 10 training.batch_size 2.0 +593 11 model.embedding_dim 0.0 +593 11 loss.margin 0.9833487122635203 +593 11 negative_sampler.num_negs_per_pos 1.0 +593 11 training.batch_size 1.0 +593 12 model.embedding_dim 0.0 +593 12 loss.margin 1.693861311233099 +593 12 negative_sampler.num_negs_per_pos 20.0 +593 12 training.batch_size 0.0 +593 13 model.embedding_dim 2.0 +593 13 loss.margin 7.816021794155798 +593 13 negative_sampler.num_negs_per_pos 28.0 +593 13 training.batch_size 1.0 +593 14 model.embedding_dim 1.0 +593 14 loss.margin 5.217786623034786 +593 14 negative_sampler.num_negs_per_pos 58.0 +593 14 training.batch_size 0.0 +593 15 model.embedding_dim 2.0 +593 15 loss.margin 0.5814696832928736 +593 15 negative_sampler.num_negs_per_pos 60.0 +593 15 training.batch_size 1.0 +593 16 model.embedding_dim 2.0 +593 16 loss.margin 4.963228084490462 +593 16 negative_sampler.num_negs_per_pos 56.0 +593 16 training.batch_size 0.0 +593 17 model.embedding_dim 2.0 +593 17 loss.margin 6.137231268420606 +593 17 negative_sampler.num_negs_per_pos 59.0 +593 17 training.batch_size 0.0 +593 18 model.embedding_dim 0.0 +593 18 loss.margin 4.05700300462078 +593 18 negative_sampler.num_negs_per_pos 74.0 +593 18 training.batch_size 2.0 +593 19 model.embedding_dim 2.0 +593 19 loss.margin 2.3398643817818567 +593 19 negative_sampler.num_negs_per_pos 79.0 +593 19 training.batch_size 2.0 +593 20 model.embedding_dim 0.0 +593 20 loss.margin 2.991354182239378 +593 20 negative_sampler.num_negs_per_pos 27.0 +593 20 training.batch_size 1.0 +593 21 model.embedding_dim 0.0 +593 21 loss.margin 4.7986643263123945 +593 21 negative_sampler.num_negs_per_pos 42.0 +593 21 training.batch_size 2.0 +593 22 model.embedding_dim 1.0 +593 22 loss.margin 5.98423752416838 +593 22 negative_sampler.num_negs_per_pos 46.0 +593 22 training.batch_size 0.0 +593 23 model.embedding_dim 2.0 +593 23 loss.margin 9.56186070174636 +593 23 negative_sampler.num_negs_per_pos 16.0 +593 23 training.batch_size 2.0 +593 24 model.embedding_dim 1.0 +593 24 loss.margin 9.861860784809142 +593 24 negative_sampler.num_negs_per_pos 78.0 +593 24 training.batch_size 1.0 +593 25 model.embedding_dim 1.0 +593 25 loss.margin 2.5012708859522927 +593 25 negative_sampler.num_negs_per_pos 67.0 +593 25 training.batch_size 0.0 +593 26 model.embedding_dim 1.0 +593 26 loss.margin 6.812608750549609 +593 26 negative_sampler.num_negs_per_pos 77.0 +593 26 training.batch_size 0.0 +593 27 model.embedding_dim 2.0 +593 27 loss.margin 6.710433976870869 +593 27 negative_sampler.num_negs_per_pos 91.0 +593 27 training.batch_size 1.0 +593 28 model.embedding_dim 0.0 +593 28 loss.margin 4.74875884488978 +593 28 negative_sampler.num_negs_per_pos 56.0 +593 28 training.batch_size 0.0 +593 29 model.embedding_dim 0.0 +593 29 loss.margin 0.5294732289150617 +593 29 negative_sampler.num_negs_per_pos 54.0 +593 29 training.batch_size 2.0 +593 30 model.embedding_dim 0.0 +593 30 loss.margin 3.91335918781962 +593 30 negative_sampler.num_negs_per_pos 87.0 +593 30 training.batch_size 1.0 +593 31 model.embedding_dim 0.0 +593 31 loss.margin 4.663299492557123 +593 31 negative_sampler.num_negs_per_pos 26.0 +593 31 training.batch_size 2.0 +593 32 model.embedding_dim 0.0 +593 32 loss.margin 0.9803484197062693 +593 32 negative_sampler.num_negs_per_pos 28.0 +593 32 training.batch_size 0.0 +593 33 model.embedding_dim 1.0 +593 33 loss.margin 2.6591838427166214 +593 33 negative_sampler.num_negs_per_pos 66.0 +593 33 training.batch_size 0.0 +593 34 model.embedding_dim 0.0 +593 34 loss.margin 4.738884107233926 +593 34 negative_sampler.num_negs_per_pos 50.0 +593 34 training.batch_size 2.0 +593 35 model.embedding_dim 0.0 +593 35 loss.margin 4.156257367017945 +593 35 negative_sampler.num_negs_per_pos 11.0 +593 35 training.batch_size 0.0 +593 36 model.embedding_dim 1.0 +593 36 loss.margin 1.5872738103934476 +593 36 negative_sampler.num_negs_per_pos 22.0 +593 36 training.batch_size 2.0 +593 37 model.embedding_dim 1.0 +593 37 loss.margin 6.531626599317522 +593 37 negative_sampler.num_negs_per_pos 30.0 +593 37 training.batch_size 2.0 +593 38 model.embedding_dim 2.0 +593 38 loss.margin 2.6998182278242666 +593 38 negative_sampler.num_negs_per_pos 97.0 +593 38 training.batch_size 2.0 +593 39 model.embedding_dim 2.0 +593 39 loss.margin 1.1879798215945108 +593 39 negative_sampler.num_negs_per_pos 97.0 +593 39 training.batch_size 2.0 +593 40 model.embedding_dim 2.0 +593 40 loss.margin 8.098890054638618 +593 40 negative_sampler.num_negs_per_pos 12.0 +593 40 training.batch_size 1.0 +593 41 model.embedding_dim 1.0 +593 41 loss.margin 4.764074542863946 +593 41 negative_sampler.num_negs_per_pos 50.0 +593 41 training.batch_size 0.0 +593 42 model.embedding_dim 2.0 +593 42 loss.margin 8.396134285414176 +593 42 negative_sampler.num_negs_per_pos 53.0 +593 42 training.batch_size 0.0 +593 43 model.embedding_dim 1.0 +593 43 loss.margin 9.82667494306014 +593 43 negative_sampler.num_negs_per_pos 11.0 +593 43 training.batch_size 0.0 +593 44 model.embedding_dim 2.0 +593 44 loss.margin 0.5563924608573703 +593 44 negative_sampler.num_negs_per_pos 42.0 +593 44 training.batch_size 1.0 +593 45 model.embedding_dim 1.0 +593 45 loss.margin 6.051529803507205 +593 45 negative_sampler.num_negs_per_pos 80.0 +593 45 training.batch_size 0.0 +593 46 model.embedding_dim 1.0 +593 46 loss.margin 9.269108908051418 +593 46 negative_sampler.num_negs_per_pos 54.0 +593 46 training.batch_size 0.0 +593 47 model.embedding_dim 1.0 +593 47 loss.margin 2.3019520244047547 +593 47 negative_sampler.num_negs_per_pos 81.0 +593 47 training.batch_size 0.0 +593 48 model.embedding_dim 0.0 +593 48 loss.margin 0.7390793445365866 +593 48 negative_sampler.num_negs_per_pos 62.0 +593 48 training.batch_size 1.0 +593 49 model.embedding_dim 0.0 +593 49 loss.margin 8.641846362811467 +593 49 negative_sampler.num_negs_per_pos 60.0 +593 49 training.batch_size 0.0 +593 50 model.embedding_dim 2.0 +593 50 loss.margin 4.472577413228034 +593 50 negative_sampler.num_negs_per_pos 73.0 +593 50 training.batch_size 1.0 +593 51 model.embedding_dim 1.0 +593 51 loss.margin 3.6411233727090484 +593 51 negative_sampler.num_negs_per_pos 13.0 +593 51 training.batch_size 1.0 +593 52 model.embedding_dim 2.0 +593 52 loss.margin 1.9781161756693622 +593 52 negative_sampler.num_negs_per_pos 91.0 +593 52 training.batch_size 1.0 +593 53 model.embedding_dim 0.0 +593 53 loss.margin 1.3945875982552818 +593 53 negative_sampler.num_negs_per_pos 13.0 +593 53 training.batch_size 0.0 +593 54 model.embedding_dim 0.0 +593 54 loss.margin 4.160955119557058 +593 54 negative_sampler.num_negs_per_pos 32.0 +593 54 training.batch_size 1.0 +593 55 model.embedding_dim 1.0 +593 55 loss.margin 8.748152363257047 +593 55 negative_sampler.num_negs_per_pos 55.0 +593 55 training.batch_size 1.0 +593 56 model.embedding_dim 2.0 +593 56 loss.margin 9.763484165994797 +593 56 negative_sampler.num_negs_per_pos 18.0 +593 56 training.batch_size 1.0 +593 57 model.embedding_dim 1.0 +593 57 loss.margin 1.5723896598554363 +593 57 negative_sampler.num_negs_per_pos 59.0 +593 57 training.batch_size 0.0 +593 58 model.embedding_dim 1.0 +593 58 loss.margin 4.094493507225176 +593 58 negative_sampler.num_negs_per_pos 16.0 +593 58 training.batch_size 2.0 +593 59 model.embedding_dim 0.0 +593 59 loss.margin 3.5802250331973533 +593 59 negative_sampler.num_negs_per_pos 13.0 +593 59 training.batch_size 0.0 +593 60 model.embedding_dim 2.0 +593 60 loss.margin 0.8125384579329538 +593 60 negative_sampler.num_negs_per_pos 40.0 +593 60 training.batch_size 2.0 +593 61 model.embedding_dim 1.0 +593 61 loss.margin 8.826697888222776 +593 61 negative_sampler.num_negs_per_pos 94.0 +593 61 training.batch_size 0.0 +593 62 model.embedding_dim 1.0 +593 62 loss.margin 9.072064684947179 +593 62 negative_sampler.num_negs_per_pos 32.0 +593 62 training.batch_size 2.0 +593 63 model.embedding_dim 2.0 +593 63 loss.margin 1.070202552283118 +593 63 negative_sampler.num_negs_per_pos 57.0 +593 63 training.batch_size 1.0 +593 64 model.embedding_dim 1.0 +593 64 loss.margin 6.621375732286395 +593 64 negative_sampler.num_negs_per_pos 74.0 +593 64 training.batch_size 0.0 +593 65 model.embedding_dim 0.0 +593 65 loss.margin 9.89548185014087 +593 65 negative_sampler.num_negs_per_pos 0.0 +593 65 training.batch_size 2.0 +593 66 model.embedding_dim 0.0 +593 66 loss.margin 1.6645029743646695 +593 66 negative_sampler.num_negs_per_pos 27.0 +593 66 training.batch_size 1.0 +593 67 model.embedding_dim 1.0 +593 67 loss.margin 2.7373595320903834 +593 67 negative_sampler.num_negs_per_pos 0.0 +593 67 training.batch_size 0.0 +593 68 model.embedding_dim 2.0 +593 68 loss.margin 7.257233470277578 +593 68 negative_sampler.num_negs_per_pos 1.0 +593 68 training.batch_size 0.0 +593 69 model.embedding_dim 0.0 +593 69 loss.margin 0.9972141772278336 +593 69 negative_sampler.num_negs_per_pos 81.0 +593 69 training.batch_size 1.0 +593 70 model.embedding_dim 1.0 +593 70 loss.margin 3.920071998790461 +593 70 negative_sampler.num_negs_per_pos 69.0 +593 70 training.batch_size 2.0 +593 71 model.embedding_dim 0.0 +593 71 loss.margin 0.8750171232474292 +593 71 negative_sampler.num_negs_per_pos 36.0 +593 71 training.batch_size 0.0 +593 72 model.embedding_dim 2.0 +593 72 loss.margin 1.1211671852108522 +593 72 negative_sampler.num_negs_per_pos 4.0 +593 72 training.batch_size 2.0 +593 73 model.embedding_dim 0.0 +593 73 loss.margin 9.061470671437167 +593 73 negative_sampler.num_negs_per_pos 38.0 +593 73 training.batch_size 1.0 +593 74 model.embedding_dim 2.0 +593 74 loss.margin 3.7933602143163623 +593 74 negative_sampler.num_negs_per_pos 88.0 +593 74 training.batch_size 1.0 +593 75 model.embedding_dim 2.0 +593 75 loss.margin 0.9317460000788933 +593 75 negative_sampler.num_negs_per_pos 80.0 +593 75 training.batch_size 2.0 +593 76 model.embedding_dim 1.0 +593 76 loss.margin 4.557957707642065 +593 76 negative_sampler.num_negs_per_pos 86.0 +593 76 training.batch_size 2.0 +593 77 model.embedding_dim 1.0 +593 77 loss.margin 5.007903220199474 +593 77 negative_sampler.num_negs_per_pos 61.0 +593 77 training.batch_size 1.0 +593 78 model.embedding_dim 2.0 +593 78 loss.margin 0.9629275546703461 +593 78 negative_sampler.num_negs_per_pos 28.0 +593 78 training.batch_size 1.0 +593 79 model.embedding_dim 2.0 +593 79 loss.margin 9.177254104375852 +593 79 negative_sampler.num_negs_per_pos 95.0 +593 79 training.batch_size 1.0 +593 80 model.embedding_dim 2.0 +593 80 loss.margin 4.740066745079332 +593 80 negative_sampler.num_negs_per_pos 47.0 +593 80 training.batch_size 1.0 +593 81 model.embedding_dim 0.0 +593 81 loss.margin 3.9209898622531063 +593 81 negative_sampler.num_negs_per_pos 88.0 +593 81 training.batch_size 1.0 +593 82 model.embedding_dim 0.0 +593 82 loss.margin 7.368845461930032 +593 82 negative_sampler.num_negs_per_pos 47.0 +593 82 training.batch_size 1.0 +593 83 model.embedding_dim 2.0 +593 83 loss.margin 4.208429935793675 +593 83 negative_sampler.num_negs_per_pos 24.0 +593 83 training.batch_size 0.0 +593 84 model.embedding_dim 1.0 +593 84 loss.margin 1.4808808607709498 +593 84 negative_sampler.num_negs_per_pos 27.0 +593 84 training.batch_size 0.0 +593 85 model.embedding_dim 1.0 +593 85 loss.margin 2.1732682621765584 +593 85 negative_sampler.num_negs_per_pos 56.0 +593 85 training.batch_size 1.0 +593 86 model.embedding_dim 0.0 +593 86 loss.margin 7.166711658846578 +593 86 negative_sampler.num_negs_per_pos 11.0 +593 86 training.batch_size 0.0 +593 87 model.embedding_dim 0.0 +593 87 loss.margin 3.645507599263215 +593 87 negative_sampler.num_negs_per_pos 58.0 +593 87 training.batch_size 2.0 +593 88 model.embedding_dim 1.0 +593 88 loss.margin 8.932435983450707 +593 88 negative_sampler.num_negs_per_pos 0.0 +593 88 training.batch_size 1.0 +593 89 model.embedding_dim 0.0 +593 89 loss.margin 2.745347593110173 +593 89 negative_sampler.num_negs_per_pos 71.0 +593 89 training.batch_size 2.0 +593 90 model.embedding_dim 2.0 +593 90 loss.margin 5.351108770545972 +593 90 negative_sampler.num_negs_per_pos 48.0 +593 90 training.batch_size 1.0 +593 91 model.embedding_dim 2.0 +593 91 loss.margin 6.045391907949414 +593 91 negative_sampler.num_negs_per_pos 63.0 +593 91 training.batch_size 0.0 +593 92 model.embedding_dim 2.0 +593 92 loss.margin 4.2857157944406605 +593 92 negative_sampler.num_negs_per_pos 82.0 +593 92 training.batch_size 1.0 +593 93 model.embedding_dim 2.0 +593 93 loss.margin 4.690009714228611 +593 93 negative_sampler.num_negs_per_pos 71.0 +593 93 training.batch_size 0.0 +593 94 model.embedding_dim 1.0 +593 94 loss.margin 3.0595455451353657 +593 94 negative_sampler.num_negs_per_pos 25.0 +593 94 training.batch_size 0.0 +593 95 model.embedding_dim 2.0 +593 95 loss.margin 4.375081793086439 +593 95 negative_sampler.num_negs_per_pos 14.0 +593 95 training.batch_size 1.0 +593 96 model.embedding_dim 2.0 +593 96 loss.margin 0.9759821978537297 +593 96 negative_sampler.num_negs_per_pos 51.0 +593 96 training.batch_size 1.0 +593 97 model.embedding_dim 1.0 +593 97 loss.margin 8.366634322936097 +593 97 negative_sampler.num_negs_per_pos 79.0 +593 97 training.batch_size 0.0 +593 98 model.embedding_dim 2.0 +593 98 loss.margin 8.041956847861101 +593 98 negative_sampler.num_negs_per_pos 41.0 +593 98 training.batch_size 0.0 +593 99 model.embedding_dim 2.0 +593 99 loss.margin 8.546442015826726 +593 99 negative_sampler.num_negs_per_pos 44.0 +593 99 training.batch_size 1.0 +593 100 model.embedding_dim 1.0 +593 100 loss.margin 8.93685134303167 +593 100 negative_sampler.num_negs_per_pos 91.0 +593 100 training.batch_size 0.0 +593 1 dataset """kinships""" +593 1 model """rotate""" +593 1 loss """marginranking""" +593 1 regularizer """no""" +593 1 optimizer """adadelta""" +593 1 training_loop """owa""" +593 1 negative_sampler """basic""" +593 1 evaluator """rankbased""" +593 2 dataset """kinships""" +593 2 model """rotate""" +593 2 loss """marginranking""" +593 2 regularizer """no""" +593 2 optimizer """adadelta""" +593 2 training_loop """owa""" +593 2 negative_sampler """basic""" +593 2 evaluator """rankbased""" +593 3 dataset """kinships""" +593 3 model """rotate""" +593 3 loss """marginranking""" +593 3 regularizer """no""" +593 3 optimizer """adadelta""" +593 3 training_loop """owa""" +593 3 negative_sampler """basic""" +593 3 evaluator """rankbased""" +593 4 dataset """kinships""" +593 4 model """rotate""" +593 4 loss """marginranking""" +593 4 regularizer """no""" +593 4 optimizer """adadelta""" +593 4 training_loop """owa""" +593 4 negative_sampler """basic""" +593 4 evaluator """rankbased""" +593 5 dataset """kinships""" +593 5 model """rotate""" +593 5 loss """marginranking""" +593 5 regularizer """no""" +593 5 optimizer """adadelta""" +593 5 training_loop """owa""" +593 5 negative_sampler """basic""" +593 5 evaluator """rankbased""" +593 6 dataset """kinships""" +593 6 model """rotate""" +593 6 loss """marginranking""" +593 6 regularizer """no""" +593 6 optimizer """adadelta""" +593 6 training_loop """owa""" +593 6 negative_sampler """basic""" +593 6 evaluator """rankbased""" +593 7 dataset """kinships""" +593 7 model """rotate""" +593 7 loss """marginranking""" +593 7 regularizer """no""" +593 7 optimizer """adadelta""" +593 7 training_loop """owa""" +593 7 negative_sampler """basic""" +593 7 evaluator """rankbased""" +593 8 dataset """kinships""" +593 8 model """rotate""" +593 8 loss """marginranking""" +593 8 regularizer """no""" +593 8 optimizer """adadelta""" +593 8 training_loop """owa""" +593 8 negative_sampler """basic""" +593 8 evaluator """rankbased""" +593 9 dataset """kinships""" +593 9 model """rotate""" +593 9 loss """marginranking""" +593 9 regularizer """no""" +593 9 optimizer """adadelta""" +593 9 training_loop """owa""" +593 9 negative_sampler """basic""" +593 9 evaluator """rankbased""" +593 10 dataset """kinships""" +593 10 model """rotate""" +593 10 loss """marginranking""" +593 10 regularizer """no""" +593 10 optimizer """adadelta""" +593 10 training_loop """owa""" +593 10 negative_sampler """basic""" +593 10 evaluator """rankbased""" +593 11 dataset """kinships""" +593 11 model """rotate""" +593 11 loss """marginranking""" +593 11 regularizer """no""" +593 11 optimizer """adadelta""" +593 11 training_loop """owa""" +593 11 negative_sampler """basic""" +593 11 evaluator """rankbased""" +593 12 dataset """kinships""" +593 12 model """rotate""" +593 12 loss """marginranking""" +593 12 regularizer """no""" +593 12 optimizer """adadelta""" +593 12 training_loop """owa""" +593 12 negative_sampler """basic""" +593 12 evaluator """rankbased""" +593 13 dataset """kinships""" +593 13 model """rotate""" +593 13 loss """marginranking""" +593 13 regularizer """no""" +593 13 optimizer """adadelta""" +593 13 training_loop """owa""" +593 13 negative_sampler """basic""" +593 13 evaluator """rankbased""" +593 14 dataset """kinships""" +593 14 model """rotate""" +593 14 loss """marginranking""" +593 14 regularizer """no""" +593 14 optimizer """adadelta""" +593 14 training_loop """owa""" +593 14 negative_sampler """basic""" +593 14 evaluator """rankbased""" +593 15 dataset """kinships""" +593 15 model """rotate""" +593 15 loss """marginranking""" +593 15 regularizer """no""" +593 15 optimizer """adadelta""" +593 15 training_loop """owa""" +593 15 negative_sampler """basic""" +593 15 evaluator """rankbased""" +593 16 dataset """kinships""" +593 16 model """rotate""" +593 16 loss """marginranking""" +593 16 regularizer """no""" +593 16 optimizer """adadelta""" +593 16 training_loop """owa""" +593 16 negative_sampler """basic""" +593 16 evaluator """rankbased""" +593 17 dataset """kinships""" +593 17 model """rotate""" +593 17 loss """marginranking""" +593 17 regularizer """no""" +593 17 optimizer """adadelta""" +593 17 training_loop """owa""" +593 17 negative_sampler """basic""" +593 17 evaluator """rankbased""" +593 18 dataset """kinships""" +593 18 model """rotate""" +593 18 loss """marginranking""" +593 18 regularizer """no""" +593 18 optimizer """adadelta""" +593 18 training_loop """owa""" +593 18 negative_sampler """basic""" +593 18 evaluator """rankbased""" +593 19 dataset """kinships""" +593 19 model """rotate""" +593 19 loss """marginranking""" +593 19 regularizer """no""" +593 19 optimizer """adadelta""" +593 19 training_loop """owa""" +593 19 negative_sampler """basic""" +593 19 evaluator """rankbased""" +593 20 dataset """kinships""" +593 20 model """rotate""" +593 20 loss """marginranking""" +593 20 regularizer """no""" +593 20 optimizer """adadelta""" +593 20 training_loop """owa""" +593 20 negative_sampler """basic""" +593 20 evaluator """rankbased""" +593 21 dataset """kinships""" +593 21 model """rotate""" +593 21 loss """marginranking""" +593 21 regularizer """no""" +593 21 optimizer """adadelta""" +593 21 training_loop """owa""" +593 21 negative_sampler """basic""" +593 21 evaluator """rankbased""" +593 22 dataset """kinships""" +593 22 model """rotate""" +593 22 loss """marginranking""" +593 22 regularizer """no""" +593 22 optimizer """adadelta""" +593 22 training_loop """owa""" +593 22 negative_sampler """basic""" +593 22 evaluator """rankbased""" +593 23 dataset """kinships""" +593 23 model """rotate""" +593 23 loss """marginranking""" +593 23 regularizer """no""" +593 23 optimizer """adadelta""" +593 23 training_loop """owa""" +593 23 negative_sampler """basic""" +593 23 evaluator """rankbased""" +593 24 dataset """kinships""" +593 24 model """rotate""" +593 24 loss """marginranking""" +593 24 regularizer """no""" +593 24 optimizer """adadelta""" +593 24 training_loop """owa""" +593 24 negative_sampler """basic""" +593 24 evaluator """rankbased""" +593 25 dataset """kinships""" +593 25 model """rotate""" +593 25 loss """marginranking""" +593 25 regularizer """no""" +593 25 optimizer """adadelta""" +593 25 training_loop """owa""" +593 25 negative_sampler """basic""" +593 25 evaluator """rankbased""" +593 26 dataset """kinships""" +593 26 model """rotate""" +593 26 loss """marginranking""" +593 26 regularizer """no""" +593 26 optimizer """adadelta""" +593 26 training_loop """owa""" +593 26 negative_sampler """basic""" +593 26 evaluator """rankbased""" +593 27 dataset """kinships""" +593 27 model """rotate""" +593 27 loss """marginranking""" +593 27 regularizer """no""" +593 27 optimizer """adadelta""" +593 27 training_loop """owa""" +593 27 negative_sampler """basic""" +593 27 evaluator """rankbased""" +593 28 dataset """kinships""" +593 28 model """rotate""" +593 28 loss """marginranking""" +593 28 regularizer """no""" +593 28 optimizer """adadelta""" +593 28 training_loop """owa""" +593 28 negative_sampler """basic""" +593 28 evaluator """rankbased""" +593 29 dataset """kinships""" +593 29 model """rotate""" +593 29 loss """marginranking""" +593 29 regularizer """no""" +593 29 optimizer """adadelta""" +593 29 training_loop """owa""" +593 29 negative_sampler """basic""" +593 29 evaluator """rankbased""" +593 30 dataset """kinships""" +593 30 model """rotate""" +593 30 loss """marginranking""" +593 30 regularizer """no""" +593 30 optimizer """adadelta""" +593 30 training_loop """owa""" +593 30 negative_sampler """basic""" +593 30 evaluator """rankbased""" +593 31 dataset """kinships""" +593 31 model """rotate""" +593 31 loss """marginranking""" +593 31 regularizer """no""" +593 31 optimizer """adadelta""" +593 31 training_loop """owa""" +593 31 negative_sampler """basic""" +593 31 evaluator """rankbased""" +593 32 dataset """kinships""" +593 32 model """rotate""" +593 32 loss """marginranking""" +593 32 regularizer """no""" +593 32 optimizer """adadelta""" +593 32 training_loop """owa""" +593 32 negative_sampler """basic""" +593 32 evaluator """rankbased""" +593 33 dataset """kinships""" +593 33 model """rotate""" +593 33 loss """marginranking""" +593 33 regularizer """no""" +593 33 optimizer """adadelta""" +593 33 training_loop """owa""" +593 33 negative_sampler """basic""" +593 33 evaluator """rankbased""" +593 34 dataset """kinships""" +593 34 model """rotate""" +593 34 loss """marginranking""" +593 34 regularizer """no""" +593 34 optimizer """adadelta""" +593 34 training_loop """owa""" +593 34 negative_sampler """basic""" +593 34 evaluator """rankbased""" +593 35 dataset """kinships""" +593 35 model """rotate""" +593 35 loss """marginranking""" +593 35 regularizer """no""" +593 35 optimizer """adadelta""" +593 35 training_loop """owa""" +593 35 negative_sampler """basic""" +593 35 evaluator """rankbased""" +593 36 dataset """kinships""" +593 36 model """rotate""" +593 36 loss """marginranking""" +593 36 regularizer """no""" +593 36 optimizer """adadelta""" +593 36 training_loop """owa""" +593 36 negative_sampler """basic""" +593 36 evaluator """rankbased""" +593 37 dataset """kinships""" +593 37 model """rotate""" +593 37 loss """marginranking""" +593 37 regularizer """no""" +593 37 optimizer """adadelta""" +593 37 training_loop """owa""" +593 37 negative_sampler """basic""" +593 37 evaluator """rankbased""" +593 38 dataset """kinships""" +593 38 model """rotate""" +593 38 loss """marginranking""" +593 38 regularizer """no""" +593 38 optimizer """adadelta""" +593 38 training_loop """owa""" +593 38 negative_sampler """basic""" +593 38 evaluator """rankbased""" +593 39 dataset """kinships""" +593 39 model """rotate""" +593 39 loss """marginranking""" +593 39 regularizer """no""" +593 39 optimizer """adadelta""" +593 39 training_loop """owa""" +593 39 negative_sampler """basic""" +593 39 evaluator """rankbased""" +593 40 dataset """kinships""" +593 40 model """rotate""" +593 40 loss """marginranking""" +593 40 regularizer """no""" +593 40 optimizer """adadelta""" +593 40 training_loop """owa""" +593 40 negative_sampler """basic""" +593 40 evaluator """rankbased""" +593 41 dataset """kinships""" +593 41 model """rotate""" +593 41 loss """marginranking""" +593 41 regularizer """no""" +593 41 optimizer """adadelta""" +593 41 training_loop """owa""" +593 41 negative_sampler """basic""" +593 41 evaluator """rankbased""" +593 42 dataset """kinships""" +593 42 model """rotate""" +593 42 loss """marginranking""" +593 42 regularizer """no""" +593 42 optimizer """adadelta""" +593 42 training_loop """owa""" +593 42 negative_sampler """basic""" +593 42 evaluator """rankbased""" +593 43 dataset """kinships""" +593 43 model """rotate""" +593 43 loss """marginranking""" +593 43 regularizer """no""" +593 43 optimizer """adadelta""" +593 43 training_loop """owa""" +593 43 negative_sampler """basic""" +593 43 evaluator """rankbased""" +593 44 dataset """kinships""" +593 44 model """rotate""" +593 44 loss """marginranking""" +593 44 regularizer """no""" +593 44 optimizer """adadelta""" +593 44 training_loop """owa""" +593 44 negative_sampler """basic""" +593 44 evaluator """rankbased""" +593 45 dataset """kinships""" +593 45 model """rotate""" +593 45 loss """marginranking""" +593 45 regularizer """no""" +593 45 optimizer """adadelta""" +593 45 training_loop """owa""" +593 45 negative_sampler """basic""" +593 45 evaluator """rankbased""" +593 46 dataset """kinships""" +593 46 model """rotate""" +593 46 loss """marginranking""" +593 46 regularizer """no""" +593 46 optimizer """adadelta""" +593 46 training_loop """owa""" +593 46 negative_sampler """basic""" +593 46 evaluator """rankbased""" +593 47 dataset """kinships""" +593 47 model """rotate""" +593 47 loss """marginranking""" +593 47 regularizer """no""" +593 47 optimizer """adadelta""" +593 47 training_loop """owa""" +593 47 negative_sampler """basic""" +593 47 evaluator """rankbased""" +593 48 dataset """kinships""" +593 48 model """rotate""" +593 48 loss """marginranking""" +593 48 regularizer """no""" +593 48 optimizer """adadelta""" +593 48 training_loop """owa""" +593 48 negative_sampler """basic""" +593 48 evaluator """rankbased""" +593 49 dataset """kinships""" +593 49 model """rotate""" +593 49 loss """marginranking""" +593 49 regularizer """no""" +593 49 optimizer """adadelta""" +593 49 training_loop """owa""" +593 49 negative_sampler """basic""" +593 49 evaluator """rankbased""" +593 50 dataset """kinships""" +593 50 model """rotate""" +593 50 loss """marginranking""" +593 50 regularizer """no""" +593 50 optimizer """adadelta""" +593 50 training_loop """owa""" +593 50 negative_sampler """basic""" +593 50 evaluator """rankbased""" +593 51 dataset """kinships""" +593 51 model """rotate""" +593 51 loss """marginranking""" +593 51 regularizer """no""" +593 51 optimizer """adadelta""" +593 51 training_loop """owa""" +593 51 negative_sampler """basic""" +593 51 evaluator """rankbased""" +593 52 dataset """kinships""" +593 52 model """rotate""" +593 52 loss """marginranking""" +593 52 regularizer """no""" +593 52 optimizer """adadelta""" +593 52 training_loop """owa""" +593 52 negative_sampler """basic""" +593 52 evaluator """rankbased""" +593 53 dataset """kinships""" +593 53 model """rotate""" +593 53 loss """marginranking""" +593 53 regularizer """no""" +593 53 optimizer """adadelta""" +593 53 training_loop """owa""" +593 53 negative_sampler """basic""" +593 53 evaluator """rankbased""" +593 54 dataset """kinships""" +593 54 model """rotate""" +593 54 loss """marginranking""" +593 54 regularizer """no""" +593 54 optimizer """adadelta""" +593 54 training_loop """owa""" +593 54 negative_sampler """basic""" +593 54 evaluator """rankbased""" +593 55 dataset """kinships""" +593 55 model """rotate""" +593 55 loss """marginranking""" +593 55 regularizer """no""" +593 55 optimizer """adadelta""" +593 55 training_loop """owa""" +593 55 negative_sampler """basic""" +593 55 evaluator """rankbased""" +593 56 dataset """kinships""" +593 56 model """rotate""" +593 56 loss """marginranking""" +593 56 regularizer """no""" +593 56 optimizer """adadelta""" +593 56 training_loop """owa""" +593 56 negative_sampler """basic""" +593 56 evaluator """rankbased""" +593 57 dataset """kinships""" +593 57 model """rotate""" +593 57 loss """marginranking""" +593 57 regularizer """no""" +593 57 optimizer """adadelta""" +593 57 training_loop """owa""" +593 57 negative_sampler """basic""" +593 57 evaluator """rankbased""" +593 58 dataset """kinships""" +593 58 model """rotate""" +593 58 loss """marginranking""" +593 58 regularizer """no""" +593 58 optimizer """adadelta""" +593 58 training_loop """owa""" +593 58 negative_sampler """basic""" +593 58 evaluator """rankbased""" +593 59 dataset """kinships""" +593 59 model """rotate""" +593 59 loss """marginranking""" +593 59 regularizer """no""" +593 59 optimizer """adadelta""" +593 59 training_loop """owa""" +593 59 negative_sampler """basic""" +593 59 evaluator """rankbased""" +593 60 dataset """kinships""" +593 60 model """rotate""" +593 60 loss """marginranking""" +593 60 regularizer """no""" +593 60 optimizer """adadelta""" +593 60 training_loop """owa""" +593 60 negative_sampler """basic""" +593 60 evaluator """rankbased""" +593 61 dataset """kinships""" +593 61 model """rotate""" +593 61 loss """marginranking""" +593 61 regularizer """no""" +593 61 optimizer """adadelta""" +593 61 training_loop """owa""" +593 61 negative_sampler """basic""" +593 61 evaluator """rankbased""" +593 62 dataset """kinships""" +593 62 model """rotate""" +593 62 loss """marginranking""" +593 62 regularizer """no""" +593 62 optimizer """adadelta""" +593 62 training_loop """owa""" +593 62 negative_sampler """basic""" +593 62 evaluator """rankbased""" +593 63 dataset """kinships""" +593 63 model """rotate""" +593 63 loss """marginranking""" +593 63 regularizer """no""" +593 63 optimizer """adadelta""" +593 63 training_loop """owa""" +593 63 negative_sampler """basic""" +593 63 evaluator """rankbased""" +593 64 dataset """kinships""" +593 64 model """rotate""" +593 64 loss """marginranking""" +593 64 regularizer """no""" +593 64 optimizer """adadelta""" +593 64 training_loop """owa""" +593 64 negative_sampler """basic""" +593 64 evaluator """rankbased""" +593 65 dataset """kinships""" +593 65 model """rotate""" +593 65 loss """marginranking""" +593 65 regularizer """no""" +593 65 optimizer """adadelta""" +593 65 training_loop """owa""" +593 65 negative_sampler """basic""" +593 65 evaluator """rankbased""" +593 66 dataset """kinships""" +593 66 model """rotate""" +593 66 loss """marginranking""" +593 66 regularizer """no""" +593 66 optimizer """adadelta""" +593 66 training_loop """owa""" +593 66 negative_sampler """basic""" +593 66 evaluator """rankbased""" +593 67 dataset """kinships""" +593 67 model """rotate""" +593 67 loss """marginranking""" +593 67 regularizer """no""" +593 67 optimizer """adadelta""" +593 67 training_loop """owa""" +593 67 negative_sampler """basic""" +593 67 evaluator """rankbased""" +593 68 dataset """kinships""" +593 68 model """rotate""" +593 68 loss """marginranking""" +593 68 regularizer """no""" +593 68 optimizer """adadelta""" +593 68 training_loop """owa""" +593 68 negative_sampler """basic""" +593 68 evaluator """rankbased""" +593 69 dataset """kinships""" +593 69 model """rotate""" +593 69 loss """marginranking""" +593 69 regularizer """no""" +593 69 optimizer """adadelta""" +593 69 training_loop """owa""" +593 69 negative_sampler """basic""" +593 69 evaluator """rankbased""" +593 70 dataset """kinships""" +593 70 model """rotate""" +593 70 loss """marginranking""" +593 70 regularizer """no""" +593 70 optimizer """adadelta""" +593 70 training_loop """owa""" +593 70 negative_sampler """basic""" +593 70 evaluator """rankbased""" +593 71 dataset """kinships""" +593 71 model """rotate""" +593 71 loss """marginranking""" +593 71 regularizer """no""" +593 71 optimizer """adadelta""" +593 71 training_loop """owa""" +593 71 negative_sampler """basic""" +593 71 evaluator """rankbased""" +593 72 dataset """kinships""" +593 72 model """rotate""" +593 72 loss """marginranking""" +593 72 regularizer """no""" +593 72 optimizer """adadelta""" +593 72 training_loop """owa""" +593 72 negative_sampler """basic""" +593 72 evaluator """rankbased""" +593 73 dataset """kinships""" +593 73 model """rotate""" +593 73 loss """marginranking""" +593 73 regularizer """no""" +593 73 optimizer """adadelta""" +593 73 training_loop """owa""" +593 73 negative_sampler """basic""" +593 73 evaluator """rankbased""" +593 74 dataset """kinships""" +593 74 model """rotate""" +593 74 loss """marginranking""" +593 74 regularizer """no""" +593 74 optimizer """adadelta""" +593 74 training_loop """owa""" +593 74 negative_sampler """basic""" +593 74 evaluator """rankbased""" +593 75 dataset """kinships""" +593 75 model """rotate""" +593 75 loss """marginranking""" +593 75 regularizer """no""" +593 75 optimizer """adadelta""" +593 75 training_loop """owa""" +593 75 negative_sampler """basic""" +593 75 evaluator """rankbased""" +593 76 dataset """kinships""" +593 76 model """rotate""" +593 76 loss """marginranking""" +593 76 regularizer """no""" +593 76 optimizer """adadelta""" +593 76 training_loop """owa""" +593 76 negative_sampler """basic""" +593 76 evaluator """rankbased""" +593 77 dataset """kinships""" +593 77 model """rotate""" +593 77 loss """marginranking""" +593 77 regularizer """no""" +593 77 optimizer """adadelta""" +593 77 training_loop """owa""" +593 77 negative_sampler """basic""" +593 77 evaluator """rankbased""" +593 78 dataset """kinships""" +593 78 model """rotate""" +593 78 loss """marginranking""" +593 78 regularizer """no""" +593 78 optimizer """adadelta""" +593 78 training_loop """owa""" +593 78 negative_sampler """basic""" +593 78 evaluator """rankbased""" +593 79 dataset """kinships""" +593 79 model """rotate""" +593 79 loss """marginranking""" +593 79 regularizer """no""" +593 79 optimizer """adadelta""" +593 79 training_loop """owa""" +593 79 negative_sampler """basic""" +593 79 evaluator """rankbased""" +593 80 dataset """kinships""" +593 80 model """rotate""" +593 80 loss """marginranking""" +593 80 regularizer """no""" +593 80 optimizer """adadelta""" +593 80 training_loop """owa""" +593 80 negative_sampler """basic""" +593 80 evaluator """rankbased""" +593 81 dataset """kinships""" +593 81 model """rotate""" +593 81 loss """marginranking""" +593 81 regularizer """no""" +593 81 optimizer """adadelta""" +593 81 training_loop """owa""" +593 81 negative_sampler """basic""" +593 81 evaluator """rankbased""" +593 82 dataset """kinships""" +593 82 model """rotate""" +593 82 loss """marginranking""" +593 82 regularizer """no""" +593 82 optimizer """adadelta""" +593 82 training_loop """owa""" +593 82 negative_sampler """basic""" +593 82 evaluator """rankbased""" +593 83 dataset """kinships""" +593 83 model """rotate""" +593 83 loss """marginranking""" +593 83 regularizer """no""" +593 83 optimizer """adadelta""" +593 83 training_loop """owa""" +593 83 negative_sampler """basic""" +593 83 evaluator """rankbased""" +593 84 dataset """kinships""" +593 84 model """rotate""" +593 84 loss """marginranking""" +593 84 regularizer """no""" +593 84 optimizer """adadelta""" +593 84 training_loop """owa""" +593 84 negative_sampler """basic""" +593 84 evaluator """rankbased""" +593 85 dataset """kinships""" +593 85 model """rotate""" +593 85 loss """marginranking""" +593 85 regularizer """no""" +593 85 optimizer """adadelta""" +593 85 training_loop """owa""" +593 85 negative_sampler """basic""" +593 85 evaluator """rankbased""" +593 86 dataset """kinships""" +593 86 model """rotate""" +593 86 loss """marginranking""" +593 86 regularizer """no""" +593 86 optimizer """adadelta""" +593 86 training_loop """owa""" +593 86 negative_sampler """basic""" +593 86 evaluator """rankbased""" +593 87 dataset """kinships""" +593 87 model """rotate""" +593 87 loss """marginranking""" +593 87 regularizer """no""" +593 87 optimizer """adadelta""" +593 87 training_loop """owa""" +593 87 negative_sampler """basic""" +593 87 evaluator """rankbased""" +593 88 dataset """kinships""" +593 88 model """rotate""" +593 88 loss """marginranking""" +593 88 regularizer """no""" +593 88 optimizer """adadelta""" +593 88 training_loop """owa""" +593 88 negative_sampler """basic""" +593 88 evaluator """rankbased""" +593 89 dataset """kinships""" +593 89 model """rotate""" +593 89 loss """marginranking""" +593 89 regularizer """no""" +593 89 optimizer """adadelta""" +593 89 training_loop """owa""" +593 89 negative_sampler """basic""" +593 89 evaluator """rankbased""" +593 90 dataset """kinships""" +593 90 model """rotate""" +593 90 loss """marginranking""" +593 90 regularizer """no""" +593 90 optimizer """adadelta""" +593 90 training_loop """owa""" +593 90 negative_sampler """basic""" +593 90 evaluator """rankbased""" +593 91 dataset """kinships""" +593 91 model """rotate""" +593 91 loss """marginranking""" +593 91 regularizer """no""" +593 91 optimizer """adadelta""" +593 91 training_loop """owa""" +593 91 negative_sampler """basic""" +593 91 evaluator """rankbased""" +593 92 dataset """kinships""" +593 92 model """rotate""" +593 92 loss """marginranking""" +593 92 regularizer """no""" +593 92 optimizer """adadelta""" +593 92 training_loop """owa""" +593 92 negative_sampler """basic""" +593 92 evaluator """rankbased""" +593 93 dataset """kinships""" +593 93 model """rotate""" +593 93 loss """marginranking""" +593 93 regularizer """no""" +593 93 optimizer """adadelta""" +593 93 training_loop """owa""" +593 93 negative_sampler """basic""" +593 93 evaluator """rankbased""" +593 94 dataset """kinships""" +593 94 model """rotate""" +593 94 loss """marginranking""" +593 94 regularizer """no""" +593 94 optimizer """adadelta""" +593 94 training_loop """owa""" +593 94 negative_sampler """basic""" +593 94 evaluator """rankbased""" +593 95 dataset """kinships""" +593 95 model """rotate""" +593 95 loss """marginranking""" +593 95 regularizer """no""" +593 95 optimizer """adadelta""" +593 95 training_loop """owa""" +593 95 negative_sampler """basic""" +593 95 evaluator """rankbased""" +593 96 dataset """kinships""" +593 96 model """rotate""" +593 96 loss """marginranking""" +593 96 regularizer """no""" +593 96 optimizer """adadelta""" +593 96 training_loop """owa""" +593 96 negative_sampler """basic""" +593 96 evaluator """rankbased""" +593 97 dataset """kinships""" +593 97 model """rotate""" +593 97 loss """marginranking""" +593 97 regularizer """no""" +593 97 optimizer """adadelta""" +593 97 training_loop """owa""" +593 97 negative_sampler """basic""" +593 97 evaluator """rankbased""" +593 98 dataset """kinships""" +593 98 model """rotate""" +593 98 loss """marginranking""" +593 98 regularizer """no""" +593 98 optimizer """adadelta""" +593 98 training_loop """owa""" +593 98 negative_sampler """basic""" +593 98 evaluator """rankbased""" +593 99 dataset """kinships""" +593 99 model """rotate""" +593 99 loss """marginranking""" +593 99 regularizer """no""" +593 99 optimizer """adadelta""" +593 99 training_loop """owa""" +593 99 negative_sampler """basic""" +593 99 evaluator """rankbased""" +593 100 dataset """kinships""" +593 100 model """rotate""" +593 100 loss """marginranking""" +593 100 regularizer """no""" +593 100 optimizer """adadelta""" +593 100 training_loop """owa""" +593 100 negative_sampler """basic""" +593 100 evaluator """rankbased""" +594 1 model.embedding_dim 2.0 +594 1 loss.margin 6.27465511099168 +594 1 negative_sampler.num_negs_per_pos 22.0 +594 1 training.batch_size 1.0 +594 2 model.embedding_dim 0.0 +594 2 loss.margin 6.016121559700661 +594 2 negative_sampler.num_negs_per_pos 6.0 +594 2 training.batch_size 1.0 +594 3 model.embedding_dim 2.0 +594 3 loss.margin 4.640019956302629 +594 3 negative_sampler.num_negs_per_pos 58.0 +594 3 training.batch_size 2.0 +594 4 model.embedding_dim 1.0 +594 4 loss.margin 1.3977674387319432 +594 4 negative_sampler.num_negs_per_pos 99.0 +594 4 training.batch_size 0.0 +594 5 model.embedding_dim 2.0 +594 5 loss.margin 5.8944148802643905 +594 5 negative_sampler.num_negs_per_pos 9.0 +594 5 training.batch_size 0.0 +594 6 model.embedding_dim 1.0 +594 6 loss.margin 2.6741556598028584 +594 6 negative_sampler.num_negs_per_pos 62.0 +594 6 training.batch_size 0.0 +594 7 model.embedding_dim 1.0 +594 7 loss.margin 5.508190810435038 +594 7 negative_sampler.num_negs_per_pos 58.0 +594 7 training.batch_size 2.0 +594 8 model.embedding_dim 0.0 +594 8 loss.margin 6.9132737779398274 +594 8 negative_sampler.num_negs_per_pos 79.0 +594 8 training.batch_size 1.0 +594 9 model.embedding_dim 0.0 +594 9 loss.margin 5.49416737665793 +594 9 negative_sampler.num_negs_per_pos 95.0 +594 9 training.batch_size 2.0 +594 10 model.embedding_dim 2.0 +594 10 loss.margin 8.747161737398851 +594 10 negative_sampler.num_negs_per_pos 38.0 +594 10 training.batch_size 2.0 +594 11 model.embedding_dim 0.0 +594 11 loss.margin 3.77797307395628 +594 11 negative_sampler.num_negs_per_pos 62.0 +594 11 training.batch_size 2.0 +594 12 model.embedding_dim 1.0 +594 12 loss.margin 4.802884530519517 +594 12 negative_sampler.num_negs_per_pos 1.0 +594 12 training.batch_size 1.0 +594 13 model.embedding_dim 0.0 +594 13 loss.margin 8.483190317359494 +594 13 negative_sampler.num_negs_per_pos 51.0 +594 13 training.batch_size 1.0 +594 14 model.embedding_dim 1.0 +594 14 loss.margin 3.6891344292881763 +594 14 negative_sampler.num_negs_per_pos 15.0 +594 14 training.batch_size 2.0 +594 15 model.embedding_dim 1.0 +594 15 loss.margin 9.15352130506427 +594 15 negative_sampler.num_negs_per_pos 27.0 +594 15 training.batch_size 2.0 +594 16 model.embedding_dim 1.0 +594 16 loss.margin 2.152623866538332 +594 16 negative_sampler.num_negs_per_pos 30.0 +594 16 training.batch_size 2.0 +594 17 model.embedding_dim 1.0 +594 17 loss.margin 6.747923819857173 +594 17 negative_sampler.num_negs_per_pos 22.0 +594 17 training.batch_size 1.0 +594 18 model.embedding_dim 2.0 +594 18 loss.margin 9.124306377798487 +594 18 negative_sampler.num_negs_per_pos 96.0 +594 18 training.batch_size 1.0 +594 19 model.embedding_dim 0.0 +594 19 loss.margin 5.081932134724527 +594 19 negative_sampler.num_negs_per_pos 63.0 +594 19 training.batch_size 0.0 +594 20 model.embedding_dim 1.0 +594 20 loss.margin 1.7567284997450368 +594 20 negative_sampler.num_negs_per_pos 47.0 +594 20 training.batch_size 1.0 +594 21 model.embedding_dim 2.0 +594 21 loss.margin 9.462738520303898 +594 21 negative_sampler.num_negs_per_pos 40.0 +594 21 training.batch_size 1.0 +594 22 model.embedding_dim 2.0 +594 22 loss.margin 8.094372797941787 +594 22 negative_sampler.num_negs_per_pos 92.0 +594 22 training.batch_size 1.0 +594 23 model.embedding_dim 1.0 +594 23 loss.margin 4.082183028508082 +594 23 negative_sampler.num_negs_per_pos 99.0 +594 23 training.batch_size 0.0 +594 24 model.embedding_dim 1.0 +594 24 loss.margin 2.8085760001733093 +594 24 negative_sampler.num_negs_per_pos 27.0 +594 24 training.batch_size 0.0 +594 25 model.embedding_dim 2.0 +594 25 loss.margin 3.3573116118987847 +594 25 negative_sampler.num_negs_per_pos 3.0 +594 25 training.batch_size 1.0 +594 26 model.embedding_dim 1.0 +594 26 loss.margin 7.553199905495846 +594 26 negative_sampler.num_negs_per_pos 1.0 +594 26 training.batch_size 2.0 +594 27 model.embedding_dim 2.0 +594 27 loss.margin 7.396226023308707 +594 27 negative_sampler.num_negs_per_pos 0.0 +594 27 training.batch_size 1.0 +594 28 model.embedding_dim 1.0 +594 28 loss.margin 2.968627662299777 +594 28 negative_sampler.num_negs_per_pos 42.0 +594 28 training.batch_size 0.0 +594 29 model.embedding_dim 0.0 +594 29 loss.margin 5.715879916668804 +594 29 negative_sampler.num_negs_per_pos 37.0 +594 29 training.batch_size 0.0 +594 30 model.embedding_dim 1.0 +594 30 loss.margin 8.442942180655258 +594 30 negative_sampler.num_negs_per_pos 38.0 +594 30 training.batch_size 1.0 +594 31 model.embedding_dim 0.0 +594 31 loss.margin 5.269140724911235 +594 31 negative_sampler.num_negs_per_pos 36.0 +594 31 training.batch_size 2.0 +594 32 model.embedding_dim 0.0 +594 32 loss.margin 2.916785681605685 +594 32 negative_sampler.num_negs_per_pos 3.0 +594 32 training.batch_size 0.0 +594 33 model.embedding_dim 2.0 +594 33 loss.margin 5.893656103540853 +594 33 negative_sampler.num_negs_per_pos 45.0 +594 33 training.batch_size 1.0 +594 34 model.embedding_dim 0.0 +594 34 loss.margin 7.907472398392742 +594 34 negative_sampler.num_negs_per_pos 57.0 +594 34 training.batch_size 2.0 +594 35 model.embedding_dim 0.0 +594 35 loss.margin 5.296900399851487 +594 35 negative_sampler.num_negs_per_pos 86.0 +594 35 training.batch_size 2.0 +594 36 model.embedding_dim 0.0 +594 36 loss.margin 1.2873442382544973 +594 36 negative_sampler.num_negs_per_pos 54.0 +594 36 training.batch_size 0.0 +594 37 model.embedding_dim 1.0 +594 37 loss.margin 8.326051875116498 +594 37 negative_sampler.num_negs_per_pos 80.0 +594 37 training.batch_size 1.0 +594 38 model.embedding_dim 2.0 +594 38 loss.margin 7.839791247237158 +594 38 negative_sampler.num_negs_per_pos 75.0 +594 38 training.batch_size 2.0 +594 39 model.embedding_dim 1.0 +594 39 loss.margin 0.5723659380251038 +594 39 negative_sampler.num_negs_per_pos 72.0 +594 39 training.batch_size 2.0 +594 40 model.embedding_dim 1.0 +594 40 loss.margin 3.3279129352662116 +594 40 negative_sampler.num_negs_per_pos 53.0 +594 40 training.batch_size 2.0 +594 41 model.embedding_dim 0.0 +594 41 loss.margin 6.294467367683137 +594 41 negative_sampler.num_negs_per_pos 81.0 +594 41 training.batch_size 1.0 +594 42 model.embedding_dim 2.0 +594 42 loss.margin 3.290668628168371 +594 42 negative_sampler.num_negs_per_pos 1.0 +594 42 training.batch_size 2.0 +594 43 model.embedding_dim 1.0 +594 43 loss.margin 8.901906627107413 +594 43 negative_sampler.num_negs_per_pos 62.0 +594 43 training.batch_size 2.0 +594 44 model.embedding_dim 2.0 +594 44 loss.margin 6.26171234807729 +594 44 negative_sampler.num_negs_per_pos 86.0 +594 44 training.batch_size 2.0 +594 45 model.embedding_dim 2.0 +594 45 loss.margin 5.686946708735667 +594 45 negative_sampler.num_negs_per_pos 28.0 +594 45 training.batch_size 2.0 +594 46 model.embedding_dim 2.0 +594 46 loss.margin 5.05571992871965 +594 46 negative_sampler.num_negs_per_pos 84.0 +594 46 training.batch_size 1.0 +594 47 model.embedding_dim 2.0 +594 47 loss.margin 5.734942119478613 +594 47 negative_sampler.num_negs_per_pos 40.0 +594 47 training.batch_size 0.0 +594 48 model.embedding_dim 1.0 +594 48 loss.margin 4.967374144164271 +594 48 negative_sampler.num_negs_per_pos 6.0 +594 48 training.batch_size 2.0 +594 49 model.embedding_dim 1.0 +594 49 loss.margin 6.236523277162277 +594 49 negative_sampler.num_negs_per_pos 68.0 +594 49 training.batch_size 2.0 +594 50 model.embedding_dim 0.0 +594 50 loss.margin 8.333537438283416 +594 50 negative_sampler.num_negs_per_pos 76.0 +594 50 training.batch_size 0.0 +594 51 model.embedding_dim 2.0 +594 51 loss.margin 4.764914726799021 +594 51 negative_sampler.num_negs_per_pos 20.0 +594 51 training.batch_size 1.0 +594 52 model.embedding_dim 0.0 +594 52 loss.margin 2.500903509996864 +594 52 negative_sampler.num_negs_per_pos 86.0 +594 52 training.batch_size 0.0 +594 53 model.embedding_dim 1.0 +594 53 loss.margin 7.56849908114979 +594 53 negative_sampler.num_negs_per_pos 16.0 +594 53 training.batch_size 1.0 +594 54 model.embedding_dim 1.0 +594 54 loss.margin 1.9117261853112042 +594 54 negative_sampler.num_negs_per_pos 34.0 +594 54 training.batch_size 1.0 +594 55 model.embedding_dim 2.0 +594 55 loss.margin 2.736409259214968 +594 55 negative_sampler.num_negs_per_pos 48.0 +594 55 training.batch_size 1.0 +594 56 model.embedding_dim 2.0 +594 56 loss.margin 9.93675750207575 +594 56 negative_sampler.num_negs_per_pos 36.0 +594 56 training.batch_size 0.0 +594 57 model.embedding_dim 0.0 +594 57 loss.margin 3.3745157158390393 +594 57 negative_sampler.num_negs_per_pos 18.0 +594 57 training.batch_size 0.0 +594 58 model.embedding_dim 1.0 +594 58 loss.margin 5.292901775837123 +594 58 negative_sampler.num_negs_per_pos 69.0 +594 58 training.batch_size 2.0 +594 59 model.embedding_dim 0.0 +594 59 loss.margin 9.179314195996149 +594 59 negative_sampler.num_negs_per_pos 79.0 +594 59 training.batch_size 2.0 +594 60 model.embedding_dim 2.0 +594 60 loss.margin 0.8948562491876206 +594 60 negative_sampler.num_negs_per_pos 10.0 +594 60 training.batch_size 2.0 +594 61 model.embedding_dim 0.0 +594 61 loss.margin 5.611782485581299 +594 61 negative_sampler.num_negs_per_pos 89.0 +594 61 training.batch_size 2.0 +594 62 model.embedding_dim 0.0 +594 62 loss.margin 3.611496554368406 +594 62 negative_sampler.num_negs_per_pos 8.0 +594 62 training.batch_size 0.0 +594 63 model.embedding_dim 1.0 +594 63 loss.margin 7.175423652576658 +594 63 negative_sampler.num_negs_per_pos 66.0 +594 63 training.batch_size 2.0 +594 64 model.embedding_dim 2.0 +594 64 loss.margin 7.108564051266095 +594 64 negative_sampler.num_negs_per_pos 49.0 +594 64 training.batch_size 0.0 +594 65 model.embedding_dim 0.0 +594 65 loss.margin 1.1512041892175833 +594 65 negative_sampler.num_negs_per_pos 9.0 +594 65 training.batch_size 2.0 +594 66 model.embedding_dim 2.0 +594 66 loss.margin 5.971285832155977 +594 66 negative_sampler.num_negs_per_pos 22.0 +594 66 training.batch_size 0.0 +594 67 model.embedding_dim 1.0 +594 67 loss.margin 6.835566951917581 +594 67 negative_sampler.num_negs_per_pos 65.0 +594 67 training.batch_size 2.0 +594 68 model.embedding_dim 1.0 +594 68 loss.margin 7.852288065657348 +594 68 negative_sampler.num_negs_per_pos 11.0 +594 68 training.batch_size 2.0 +594 69 model.embedding_dim 1.0 +594 69 loss.margin 4.141487434952889 +594 69 negative_sampler.num_negs_per_pos 60.0 +594 69 training.batch_size 2.0 +594 70 model.embedding_dim 0.0 +594 70 loss.margin 0.9511759457513653 +594 70 negative_sampler.num_negs_per_pos 13.0 +594 70 training.batch_size 2.0 +594 71 model.embedding_dim 2.0 +594 71 loss.margin 5.095043303054849 +594 71 negative_sampler.num_negs_per_pos 56.0 +594 71 training.batch_size 0.0 +594 72 model.embedding_dim 2.0 +594 72 loss.margin 8.045540736417399 +594 72 negative_sampler.num_negs_per_pos 61.0 +594 72 training.batch_size 2.0 +594 73 model.embedding_dim 0.0 +594 73 loss.margin 6.995871676557966 +594 73 negative_sampler.num_negs_per_pos 4.0 +594 73 training.batch_size 0.0 +594 74 model.embedding_dim 2.0 +594 74 loss.margin 1.8664593977296957 +594 74 negative_sampler.num_negs_per_pos 28.0 +594 74 training.batch_size 1.0 +594 75 model.embedding_dim 2.0 +594 75 loss.margin 3.5448068805894373 +594 75 negative_sampler.num_negs_per_pos 65.0 +594 75 training.batch_size 2.0 +594 76 model.embedding_dim 2.0 +594 76 loss.margin 3.1222794979551356 +594 76 negative_sampler.num_negs_per_pos 60.0 +594 76 training.batch_size 0.0 +594 77 model.embedding_dim 2.0 +594 77 loss.margin 3.7340752407049753 +594 77 negative_sampler.num_negs_per_pos 10.0 +594 77 training.batch_size 1.0 +594 78 model.embedding_dim 0.0 +594 78 loss.margin 4.014761585685779 +594 78 negative_sampler.num_negs_per_pos 8.0 +594 78 training.batch_size 0.0 +594 79 model.embedding_dim 0.0 +594 79 loss.margin 8.862935833480604 +594 79 negative_sampler.num_negs_per_pos 26.0 +594 79 training.batch_size 2.0 +594 80 model.embedding_dim 0.0 +594 80 loss.margin 9.671396043652113 +594 80 negative_sampler.num_negs_per_pos 33.0 +594 80 training.batch_size 2.0 +594 81 model.embedding_dim 1.0 +594 81 loss.margin 3.3927596048553963 +594 81 negative_sampler.num_negs_per_pos 82.0 +594 81 training.batch_size 0.0 +594 82 model.embedding_dim 2.0 +594 82 loss.margin 1.1240640424372796 +594 82 negative_sampler.num_negs_per_pos 38.0 +594 82 training.batch_size 0.0 +594 83 model.embedding_dim 2.0 +594 83 loss.margin 2.4437305370882396 +594 83 negative_sampler.num_negs_per_pos 87.0 +594 83 training.batch_size 2.0 +594 84 model.embedding_dim 2.0 +594 84 loss.margin 9.226007842671423 +594 84 negative_sampler.num_negs_per_pos 64.0 +594 84 training.batch_size 0.0 +594 85 model.embedding_dim 1.0 +594 85 loss.margin 6.528804639467971 +594 85 negative_sampler.num_negs_per_pos 94.0 +594 85 training.batch_size 2.0 +594 86 model.embedding_dim 2.0 +594 86 loss.margin 1.2631769317630042 +594 86 negative_sampler.num_negs_per_pos 15.0 +594 86 training.batch_size 1.0 +594 87 model.embedding_dim 1.0 +594 87 loss.margin 3.317401673775438 +594 87 negative_sampler.num_negs_per_pos 23.0 +594 87 training.batch_size 0.0 +594 88 model.embedding_dim 0.0 +594 88 loss.margin 3.6836658817486545 +594 88 negative_sampler.num_negs_per_pos 98.0 +594 88 training.batch_size 2.0 +594 89 model.embedding_dim 1.0 +594 89 loss.margin 8.570629199953599 +594 89 negative_sampler.num_negs_per_pos 47.0 +594 89 training.batch_size 2.0 +594 90 model.embedding_dim 2.0 +594 90 loss.margin 3.043128018032651 +594 90 negative_sampler.num_negs_per_pos 77.0 +594 90 training.batch_size 0.0 +594 91 model.embedding_dim 1.0 +594 91 loss.margin 1.842227622213941 +594 91 negative_sampler.num_negs_per_pos 40.0 +594 91 training.batch_size 1.0 +594 92 model.embedding_dim 1.0 +594 92 loss.margin 0.9932291039890687 +594 92 negative_sampler.num_negs_per_pos 6.0 +594 92 training.batch_size 2.0 +594 93 model.embedding_dim 1.0 +594 93 loss.margin 6.353374951688717 +594 93 negative_sampler.num_negs_per_pos 47.0 +594 93 training.batch_size 1.0 +594 94 model.embedding_dim 2.0 +594 94 loss.margin 3.0482342791985904 +594 94 negative_sampler.num_negs_per_pos 85.0 +594 94 training.batch_size 1.0 +594 95 model.embedding_dim 1.0 +594 95 loss.margin 6.777857509135489 +594 95 negative_sampler.num_negs_per_pos 0.0 +594 95 training.batch_size 2.0 +594 96 model.embedding_dim 2.0 +594 96 loss.margin 4.357871949256204 +594 96 negative_sampler.num_negs_per_pos 93.0 +594 96 training.batch_size 1.0 +594 97 model.embedding_dim 1.0 +594 97 loss.margin 7.626409342140922 +594 97 negative_sampler.num_negs_per_pos 89.0 +594 97 training.batch_size 1.0 +594 98 model.embedding_dim 2.0 +594 98 loss.margin 0.7762449954156823 +594 98 negative_sampler.num_negs_per_pos 13.0 +594 98 training.batch_size 2.0 +594 99 model.embedding_dim 2.0 +594 99 loss.margin 5.196982622531838 +594 99 negative_sampler.num_negs_per_pos 1.0 +594 99 training.batch_size 2.0 +594 100 model.embedding_dim 0.0 +594 100 loss.margin 8.1731933824613 +594 100 negative_sampler.num_negs_per_pos 43.0 +594 100 training.batch_size 2.0 +594 1 dataset """kinships""" +594 1 model """rotate""" +594 1 loss """marginranking""" +594 1 regularizer """no""" +594 1 optimizer """adadelta""" +594 1 training_loop """owa""" +594 1 negative_sampler """basic""" +594 1 evaluator """rankbased""" +594 2 dataset """kinships""" +594 2 model """rotate""" +594 2 loss """marginranking""" +594 2 regularizer """no""" +594 2 optimizer """adadelta""" +594 2 training_loop """owa""" +594 2 negative_sampler """basic""" +594 2 evaluator """rankbased""" +594 3 dataset """kinships""" +594 3 model """rotate""" +594 3 loss """marginranking""" +594 3 regularizer """no""" +594 3 optimizer """adadelta""" +594 3 training_loop """owa""" +594 3 negative_sampler """basic""" +594 3 evaluator """rankbased""" +594 4 dataset """kinships""" +594 4 model """rotate""" +594 4 loss """marginranking""" +594 4 regularizer """no""" +594 4 optimizer """adadelta""" +594 4 training_loop """owa""" +594 4 negative_sampler """basic""" +594 4 evaluator """rankbased""" +594 5 dataset """kinships""" +594 5 model """rotate""" +594 5 loss """marginranking""" +594 5 regularizer """no""" +594 5 optimizer """adadelta""" +594 5 training_loop """owa""" +594 5 negative_sampler """basic""" +594 5 evaluator """rankbased""" +594 6 dataset """kinships""" +594 6 model """rotate""" +594 6 loss """marginranking""" +594 6 regularizer """no""" +594 6 optimizer """adadelta""" +594 6 training_loop """owa""" +594 6 negative_sampler """basic""" +594 6 evaluator """rankbased""" +594 7 dataset """kinships""" +594 7 model """rotate""" +594 7 loss """marginranking""" +594 7 regularizer """no""" +594 7 optimizer """adadelta""" +594 7 training_loop """owa""" +594 7 negative_sampler """basic""" +594 7 evaluator """rankbased""" +594 8 dataset """kinships""" +594 8 model """rotate""" +594 8 loss """marginranking""" +594 8 regularizer """no""" +594 8 optimizer """adadelta""" +594 8 training_loop """owa""" +594 8 negative_sampler """basic""" +594 8 evaluator """rankbased""" +594 9 dataset """kinships""" +594 9 model """rotate""" +594 9 loss """marginranking""" +594 9 regularizer """no""" +594 9 optimizer """adadelta""" +594 9 training_loop """owa""" +594 9 negative_sampler """basic""" +594 9 evaluator """rankbased""" +594 10 dataset """kinships""" +594 10 model """rotate""" +594 10 loss """marginranking""" +594 10 regularizer """no""" +594 10 optimizer """adadelta""" +594 10 training_loop """owa""" +594 10 negative_sampler """basic""" +594 10 evaluator """rankbased""" +594 11 dataset """kinships""" +594 11 model """rotate""" +594 11 loss """marginranking""" +594 11 regularizer """no""" +594 11 optimizer """adadelta""" +594 11 training_loop """owa""" +594 11 negative_sampler """basic""" +594 11 evaluator """rankbased""" +594 12 dataset """kinships""" +594 12 model """rotate""" +594 12 loss """marginranking""" +594 12 regularizer """no""" +594 12 optimizer """adadelta""" +594 12 training_loop """owa""" +594 12 negative_sampler """basic""" +594 12 evaluator """rankbased""" +594 13 dataset """kinships""" +594 13 model """rotate""" +594 13 loss """marginranking""" +594 13 regularizer """no""" +594 13 optimizer """adadelta""" +594 13 training_loop """owa""" +594 13 negative_sampler """basic""" +594 13 evaluator """rankbased""" +594 14 dataset """kinships""" +594 14 model """rotate""" +594 14 loss """marginranking""" +594 14 regularizer """no""" +594 14 optimizer """adadelta""" +594 14 training_loop """owa""" +594 14 negative_sampler """basic""" +594 14 evaluator """rankbased""" +594 15 dataset """kinships""" +594 15 model """rotate""" +594 15 loss """marginranking""" +594 15 regularizer """no""" +594 15 optimizer """adadelta""" +594 15 training_loop """owa""" +594 15 negative_sampler """basic""" +594 15 evaluator """rankbased""" +594 16 dataset """kinships""" +594 16 model """rotate""" +594 16 loss """marginranking""" +594 16 regularizer """no""" +594 16 optimizer """adadelta""" +594 16 training_loop """owa""" +594 16 negative_sampler """basic""" +594 16 evaluator """rankbased""" +594 17 dataset """kinships""" +594 17 model """rotate""" +594 17 loss """marginranking""" +594 17 regularizer """no""" +594 17 optimizer """adadelta""" +594 17 training_loop """owa""" +594 17 negative_sampler """basic""" +594 17 evaluator """rankbased""" +594 18 dataset """kinships""" +594 18 model """rotate""" +594 18 loss """marginranking""" +594 18 regularizer """no""" +594 18 optimizer """adadelta""" +594 18 training_loop """owa""" +594 18 negative_sampler """basic""" +594 18 evaluator """rankbased""" +594 19 dataset """kinships""" +594 19 model """rotate""" +594 19 loss """marginranking""" +594 19 regularizer """no""" +594 19 optimizer """adadelta""" +594 19 training_loop """owa""" +594 19 negative_sampler """basic""" +594 19 evaluator """rankbased""" +594 20 dataset """kinships""" +594 20 model """rotate""" +594 20 loss """marginranking""" +594 20 regularizer """no""" +594 20 optimizer """adadelta""" +594 20 training_loop """owa""" +594 20 negative_sampler """basic""" +594 20 evaluator """rankbased""" +594 21 dataset """kinships""" +594 21 model """rotate""" +594 21 loss """marginranking""" +594 21 regularizer """no""" +594 21 optimizer """adadelta""" +594 21 training_loop """owa""" +594 21 negative_sampler """basic""" +594 21 evaluator """rankbased""" +594 22 dataset """kinships""" +594 22 model """rotate""" +594 22 loss """marginranking""" +594 22 regularizer """no""" +594 22 optimizer """adadelta""" +594 22 training_loop """owa""" +594 22 negative_sampler """basic""" +594 22 evaluator """rankbased""" +594 23 dataset """kinships""" +594 23 model """rotate""" +594 23 loss """marginranking""" +594 23 regularizer """no""" +594 23 optimizer """adadelta""" +594 23 training_loop """owa""" +594 23 negative_sampler """basic""" +594 23 evaluator """rankbased""" +594 24 dataset """kinships""" +594 24 model """rotate""" +594 24 loss """marginranking""" +594 24 regularizer """no""" +594 24 optimizer """adadelta""" +594 24 training_loop """owa""" +594 24 negative_sampler """basic""" +594 24 evaluator """rankbased""" +594 25 dataset """kinships""" +594 25 model """rotate""" +594 25 loss """marginranking""" +594 25 regularizer """no""" +594 25 optimizer """adadelta""" +594 25 training_loop """owa""" +594 25 negative_sampler """basic""" +594 25 evaluator """rankbased""" +594 26 dataset """kinships""" +594 26 model """rotate""" +594 26 loss """marginranking""" +594 26 regularizer """no""" +594 26 optimizer """adadelta""" +594 26 training_loop """owa""" +594 26 negative_sampler """basic""" +594 26 evaluator """rankbased""" +594 27 dataset """kinships""" +594 27 model """rotate""" +594 27 loss """marginranking""" +594 27 regularizer """no""" +594 27 optimizer """adadelta""" +594 27 training_loop """owa""" +594 27 negative_sampler """basic""" +594 27 evaluator """rankbased""" +594 28 dataset """kinships""" +594 28 model """rotate""" +594 28 loss """marginranking""" +594 28 regularizer """no""" +594 28 optimizer """adadelta""" +594 28 training_loop """owa""" +594 28 negative_sampler """basic""" +594 28 evaluator """rankbased""" +594 29 dataset """kinships""" +594 29 model """rotate""" +594 29 loss """marginranking""" +594 29 regularizer """no""" +594 29 optimizer """adadelta""" +594 29 training_loop """owa""" +594 29 negative_sampler """basic""" +594 29 evaluator """rankbased""" +594 30 dataset """kinships""" +594 30 model """rotate""" +594 30 loss """marginranking""" +594 30 regularizer """no""" +594 30 optimizer """adadelta""" +594 30 training_loop """owa""" +594 30 negative_sampler """basic""" +594 30 evaluator """rankbased""" +594 31 dataset """kinships""" +594 31 model """rotate""" +594 31 loss """marginranking""" +594 31 regularizer """no""" +594 31 optimizer """adadelta""" +594 31 training_loop """owa""" +594 31 negative_sampler """basic""" +594 31 evaluator """rankbased""" +594 32 dataset """kinships""" +594 32 model """rotate""" +594 32 loss """marginranking""" +594 32 regularizer """no""" +594 32 optimizer """adadelta""" +594 32 training_loop """owa""" +594 32 negative_sampler """basic""" +594 32 evaluator """rankbased""" +594 33 dataset """kinships""" +594 33 model """rotate""" +594 33 loss """marginranking""" +594 33 regularizer """no""" +594 33 optimizer """adadelta""" +594 33 training_loop """owa""" +594 33 negative_sampler """basic""" +594 33 evaluator """rankbased""" +594 34 dataset """kinships""" +594 34 model """rotate""" +594 34 loss """marginranking""" +594 34 regularizer """no""" +594 34 optimizer """adadelta""" +594 34 training_loop """owa""" +594 34 negative_sampler """basic""" +594 34 evaluator """rankbased""" +594 35 dataset """kinships""" +594 35 model """rotate""" +594 35 loss """marginranking""" +594 35 regularizer """no""" +594 35 optimizer """adadelta""" +594 35 training_loop """owa""" +594 35 negative_sampler """basic""" +594 35 evaluator """rankbased""" +594 36 dataset """kinships""" +594 36 model """rotate""" +594 36 loss """marginranking""" +594 36 regularizer """no""" +594 36 optimizer """adadelta""" +594 36 training_loop """owa""" +594 36 negative_sampler """basic""" +594 36 evaluator """rankbased""" +594 37 dataset """kinships""" +594 37 model """rotate""" +594 37 loss """marginranking""" +594 37 regularizer """no""" +594 37 optimizer """adadelta""" +594 37 training_loop """owa""" +594 37 negative_sampler """basic""" +594 37 evaluator """rankbased""" +594 38 dataset """kinships""" +594 38 model """rotate""" +594 38 loss """marginranking""" +594 38 regularizer """no""" +594 38 optimizer """adadelta""" +594 38 training_loop """owa""" +594 38 negative_sampler """basic""" +594 38 evaluator """rankbased""" +594 39 dataset """kinships""" +594 39 model """rotate""" +594 39 loss """marginranking""" +594 39 regularizer """no""" +594 39 optimizer """adadelta""" +594 39 training_loop """owa""" +594 39 negative_sampler """basic""" +594 39 evaluator """rankbased""" +594 40 dataset """kinships""" +594 40 model """rotate""" +594 40 loss """marginranking""" +594 40 regularizer """no""" +594 40 optimizer """adadelta""" +594 40 training_loop """owa""" +594 40 negative_sampler """basic""" +594 40 evaluator """rankbased""" +594 41 dataset """kinships""" +594 41 model """rotate""" +594 41 loss """marginranking""" +594 41 regularizer """no""" +594 41 optimizer """adadelta""" +594 41 training_loop """owa""" +594 41 negative_sampler """basic""" +594 41 evaluator """rankbased""" +594 42 dataset """kinships""" +594 42 model """rotate""" +594 42 loss """marginranking""" +594 42 regularizer """no""" +594 42 optimizer """adadelta""" +594 42 training_loop """owa""" +594 42 negative_sampler """basic""" +594 42 evaluator """rankbased""" +594 43 dataset """kinships""" +594 43 model """rotate""" +594 43 loss """marginranking""" +594 43 regularizer """no""" +594 43 optimizer """adadelta""" +594 43 training_loop """owa""" +594 43 negative_sampler """basic""" +594 43 evaluator """rankbased""" +594 44 dataset """kinships""" +594 44 model """rotate""" +594 44 loss """marginranking""" +594 44 regularizer """no""" +594 44 optimizer """adadelta""" +594 44 training_loop """owa""" +594 44 negative_sampler """basic""" +594 44 evaluator """rankbased""" +594 45 dataset """kinships""" +594 45 model """rotate""" +594 45 loss """marginranking""" +594 45 regularizer """no""" +594 45 optimizer """adadelta""" +594 45 training_loop """owa""" +594 45 negative_sampler """basic""" +594 45 evaluator """rankbased""" +594 46 dataset """kinships""" +594 46 model """rotate""" +594 46 loss """marginranking""" +594 46 regularizer """no""" +594 46 optimizer """adadelta""" +594 46 training_loop """owa""" +594 46 negative_sampler """basic""" +594 46 evaluator """rankbased""" +594 47 dataset """kinships""" +594 47 model """rotate""" +594 47 loss """marginranking""" +594 47 regularizer """no""" +594 47 optimizer """adadelta""" +594 47 training_loop """owa""" +594 47 negative_sampler """basic""" +594 47 evaluator """rankbased""" +594 48 dataset """kinships""" +594 48 model """rotate""" +594 48 loss """marginranking""" +594 48 regularizer """no""" +594 48 optimizer """adadelta""" +594 48 training_loop """owa""" +594 48 negative_sampler """basic""" +594 48 evaluator """rankbased""" +594 49 dataset """kinships""" +594 49 model """rotate""" +594 49 loss """marginranking""" +594 49 regularizer """no""" +594 49 optimizer """adadelta""" +594 49 training_loop """owa""" +594 49 negative_sampler """basic""" +594 49 evaluator """rankbased""" +594 50 dataset """kinships""" +594 50 model """rotate""" +594 50 loss """marginranking""" +594 50 regularizer """no""" +594 50 optimizer """adadelta""" +594 50 training_loop """owa""" +594 50 negative_sampler """basic""" +594 50 evaluator """rankbased""" +594 51 dataset """kinships""" +594 51 model """rotate""" +594 51 loss """marginranking""" +594 51 regularizer """no""" +594 51 optimizer """adadelta""" +594 51 training_loop """owa""" +594 51 negative_sampler """basic""" +594 51 evaluator """rankbased""" +594 52 dataset """kinships""" +594 52 model """rotate""" +594 52 loss """marginranking""" +594 52 regularizer """no""" +594 52 optimizer """adadelta""" +594 52 training_loop """owa""" +594 52 negative_sampler """basic""" +594 52 evaluator """rankbased""" +594 53 dataset """kinships""" +594 53 model """rotate""" +594 53 loss """marginranking""" +594 53 regularizer """no""" +594 53 optimizer """adadelta""" +594 53 training_loop """owa""" +594 53 negative_sampler """basic""" +594 53 evaluator """rankbased""" +594 54 dataset """kinships""" +594 54 model """rotate""" +594 54 loss """marginranking""" +594 54 regularizer """no""" +594 54 optimizer """adadelta""" +594 54 training_loop """owa""" +594 54 negative_sampler """basic""" +594 54 evaluator """rankbased""" +594 55 dataset """kinships""" +594 55 model """rotate""" +594 55 loss """marginranking""" +594 55 regularizer """no""" +594 55 optimizer """adadelta""" +594 55 training_loop """owa""" +594 55 negative_sampler """basic""" +594 55 evaluator """rankbased""" +594 56 dataset """kinships""" +594 56 model """rotate""" +594 56 loss """marginranking""" +594 56 regularizer """no""" +594 56 optimizer """adadelta""" +594 56 training_loop """owa""" +594 56 negative_sampler """basic""" +594 56 evaluator """rankbased""" +594 57 dataset """kinships""" +594 57 model """rotate""" +594 57 loss """marginranking""" +594 57 regularizer """no""" +594 57 optimizer """adadelta""" +594 57 training_loop """owa""" +594 57 negative_sampler """basic""" +594 57 evaluator """rankbased""" +594 58 dataset """kinships""" +594 58 model """rotate""" +594 58 loss """marginranking""" +594 58 regularizer """no""" +594 58 optimizer """adadelta""" +594 58 training_loop """owa""" +594 58 negative_sampler """basic""" +594 58 evaluator """rankbased""" +594 59 dataset """kinships""" +594 59 model """rotate""" +594 59 loss """marginranking""" +594 59 regularizer """no""" +594 59 optimizer """adadelta""" +594 59 training_loop """owa""" +594 59 negative_sampler """basic""" +594 59 evaluator """rankbased""" +594 60 dataset """kinships""" +594 60 model """rotate""" +594 60 loss """marginranking""" +594 60 regularizer """no""" +594 60 optimizer """adadelta""" +594 60 training_loop """owa""" +594 60 negative_sampler """basic""" +594 60 evaluator """rankbased""" +594 61 dataset """kinships""" +594 61 model """rotate""" +594 61 loss """marginranking""" +594 61 regularizer """no""" +594 61 optimizer """adadelta""" +594 61 training_loop """owa""" +594 61 negative_sampler """basic""" +594 61 evaluator """rankbased""" +594 62 dataset """kinships""" +594 62 model """rotate""" +594 62 loss """marginranking""" +594 62 regularizer """no""" +594 62 optimizer """adadelta""" +594 62 training_loop """owa""" +594 62 negative_sampler """basic""" +594 62 evaluator """rankbased""" +594 63 dataset """kinships""" +594 63 model """rotate""" +594 63 loss """marginranking""" +594 63 regularizer """no""" +594 63 optimizer """adadelta""" +594 63 training_loop """owa""" +594 63 negative_sampler """basic""" +594 63 evaluator """rankbased""" +594 64 dataset """kinships""" +594 64 model """rotate""" +594 64 loss """marginranking""" +594 64 regularizer """no""" +594 64 optimizer """adadelta""" +594 64 training_loop """owa""" +594 64 negative_sampler """basic""" +594 64 evaluator """rankbased""" +594 65 dataset """kinships""" +594 65 model """rotate""" +594 65 loss """marginranking""" +594 65 regularizer """no""" +594 65 optimizer """adadelta""" +594 65 training_loop """owa""" +594 65 negative_sampler """basic""" +594 65 evaluator """rankbased""" +594 66 dataset """kinships""" +594 66 model """rotate""" +594 66 loss """marginranking""" +594 66 regularizer """no""" +594 66 optimizer """adadelta""" +594 66 training_loop """owa""" +594 66 negative_sampler """basic""" +594 66 evaluator """rankbased""" +594 67 dataset """kinships""" +594 67 model """rotate""" +594 67 loss """marginranking""" +594 67 regularizer """no""" +594 67 optimizer """adadelta""" +594 67 training_loop """owa""" +594 67 negative_sampler """basic""" +594 67 evaluator """rankbased""" +594 68 dataset """kinships""" +594 68 model """rotate""" +594 68 loss """marginranking""" +594 68 regularizer """no""" +594 68 optimizer """adadelta""" +594 68 training_loop """owa""" +594 68 negative_sampler """basic""" +594 68 evaluator """rankbased""" +594 69 dataset """kinships""" +594 69 model """rotate""" +594 69 loss """marginranking""" +594 69 regularizer """no""" +594 69 optimizer """adadelta""" +594 69 training_loop """owa""" +594 69 negative_sampler """basic""" +594 69 evaluator """rankbased""" +594 70 dataset """kinships""" +594 70 model """rotate""" +594 70 loss """marginranking""" +594 70 regularizer """no""" +594 70 optimizer """adadelta""" +594 70 training_loop """owa""" +594 70 negative_sampler """basic""" +594 70 evaluator """rankbased""" +594 71 dataset """kinships""" +594 71 model """rotate""" +594 71 loss """marginranking""" +594 71 regularizer """no""" +594 71 optimizer """adadelta""" +594 71 training_loop """owa""" +594 71 negative_sampler """basic""" +594 71 evaluator """rankbased""" +594 72 dataset """kinships""" +594 72 model """rotate""" +594 72 loss """marginranking""" +594 72 regularizer """no""" +594 72 optimizer """adadelta""" +594 72 training_loop """owa""" +594 72 negative_sampler """basic""" +594 72 evaluator """rankbased""" +594 73 dataset """kinships""" +594 73 model """rotate""" +594 73 loss """marginranking""" +594 73 regularizer """no""" +594 73 optimizer """adadelta""" +594 73 training_loop """owa""" +594 73 negative_sampler """basic""" +594 73 evaluator """rankbased""" +594 74 dataset """kinships""" +594 74 model """rotate""" +594 74 loss """marginranking""" +594 74 regularizer """no""" +594 74 optimizer """adadelta""" +594 74 training_loop """owa""" +594 74 negative_sampler """basic""" +594 74 evaluator """rankbased""" +594 75 dataset """kinships""" +594 75 model """rotate""" +594 75 loss """marginranking""" +594 75 regularizer """no""" +594 75 optimizer """adadelta""" +594 75 training_loop """owa""" +594 75 negative_sampler """basic""" +594 75 evaluator """rankbased""" +594 76 dataset """kinships""" +594 76 model """rotate""" +594 76 loss """marginranking""" +594 76 regularizer """no""" +594 76 optimizer """adadelta""" +594 76 training_loop """owa""" +594 76 negative_sampler """basic""" +594 76 evaluator """rankbased""" +594 77 dataset """kinships""" +594 77 model """rotate""" +594 77 loss """marginranking""" +594 77 regularizer """no""" +594 77 optimizer """adadelta""" +594 77 training_loop """owa""" +594 77 negative_sampler """basic""" +594 77 evaluator """rankbased""" +594 78 dataset """kinships""" +594 78 model """rotate""" +594 78 loss """marginranking""" +594 78 regularizer """no""" +594 78 optimizer """adadelta""" +594 78 training_loop """owa""" +594 78 negative_sampler """basic""" +594 78 evaluator """rankbased""" +594 79 dataset """kinships""" +594 79 model """rotate""" +594 79 loss """marginranking""" +594 79 regularizer """no""" +594 79 optimizer """adadelta""" +594 79 training_loop """owa""" +594 79 negative_sampler """basic""" +594 79 evaluator """rankbased""" +594 80 dataset """kinships""" +594 80 model """rotate""" +594 80 loss """marginranking""" +594 80 regularizer """no""" +594 80 optimizer """adadelta""" +594 80 training_loop """owa""" +594 80 negative_sampler """basic""" +594 80 evaluator """rankbased""" +594 81 dataset """kinships""" +594 81 model """rotate""" +594 81 loss """marginranking""" +594 81 regularizer """no""" +594 81 optimizer """adadelta""" +594 81 training_loop """owa""" +594 81 negative_sampler """basic""" +594 81 evaluator """rankbased""" +594 82 dataset """kinships""" +594 82 model """rotate""" +594 82 loss """marginranking""" +594 82 regularizer """no""" +594 82 optimizer """adadelta""" +594 82 training_loop """owa""" +594 82 negative_sampler """basic""" +594 82 evaluator """rankbased""" +594 83 dataset """kinships""" +594 83 model """rotate""" +594 83 loss """marginranking""" +594 83 regularizer """no""" +594 83 optimizer """adadelta""" +594 83 training_loop """owa""" +594 83 negative_sampler """basic""" +594 83 evaluator """rankbased""" +594 84 dataset """kinships""" +594 84 model """rotate""" +594 84 loss """marginranking""" +594 84 regularizer """no""" +594 84 optimizer """adadelta""" +594 84 training_loop """owa""" +594 84 negative_sampler """basic""" +594 84 evaluator """rankbased""" +594 85 dataset """kinships""" +594 85 model """rotate""" +594 85 loss """marginranking""" +594 85 regularizer """no""" +594 85 optimizer """adadelta""" +594 85 training_loop """owa""" +594 85 negative_sampler """basic""" +594 85 evaluator """rankbased""" +594 86 dataset """kinships""" +594 86 model """rotate""" +594 86 loss """marginranking""" +594 86 regularizer """no""" +594 86 optimizer """adadelta""" +594 86 training_loop """owa""" +594 86 negative_sampler """basic""" +594 86 evaluator """rankbased""" +594 87 dataset """kinships""" +594 87 model """rotate""" +594 87 loss """marginranking""" +594 87 regularizer """no""" +594 87 optimizer """adadelta""" +594 87 training_loop """owa""" +594 87 negative_sampler """basic""" +594 87 evaluator """rankbased""" +594 88 dataset """kinships""" +594 88 model """rotate""" +594 88 loss """marginranking""" +594 88 regularizer """no""" +594 88 optimizer """adadelta""" +594 88 training_loop """owa""" +594 88 negative_sampler """basic""" +594 88 evaluator """rankbased""" +594 89 dataset """kinships""" +594 89 model """rotate""" +594 89 loss """marginranking""" +594 89 regularizer """no""" +594 89 optimizer """adadelta""" +594 89 training_loop """owa""" +594 89 negative_sampler """basic""" +594 89 evaluator """rankbased""" +594 90 dataset """kinships""" +594 90 model """rotate""" +594 90 loss """marginranking""" +594 90 regularizer """no""" +594 90 optimizer """adadelta""" +594 90 training_loop """owa""" +594 90 negative_sampler """basic""" +594 90 evaluator """rankbased""" +594 91 dataset """kinships""" +594 91 model """rotate""" +594 91 loss """marginranking""" +594 91 regularizer """no""" +594 91 optimizer """adadelta""" +594 91 training_loop """owa""" +594 91 negative_sampler """basic""" +594 91 evaluator """rankbased""" +594 92 dataset """kinships""" +594 92 model """rotate""" +594 92 loss """marginranking""" +594 92 regularizer """no""" +594 92 optimizer """adadelta""" +594 92 training_loop """owa""" +594 92 negative_sampler """basic""" +594 92 evaluator """rankbased""" +594 93 dataset """kinships""" +594 93 model """rotate""" +594 93 loss """marginranking""" +594 93 regularizer """no""" +594 93 optimizer """adadelta""" +594 93 training_loop """owa""" +594 93 negative_sampler """basic""" +594 93 evaluator """rankbased""" +594 94 dataset """kinships""" +594 94 model """rotate""" +594 94 loss """marginranking""" +594 94 regularizer """no""" +594 94 optimizer """adadelta""" +594 94 training_loop """owa""" +594 94 negative_sampler """basic""" +594 94 evaluator """rankbased""" +594 95 dataset """kinships""" +594 95 model """rotate""" +594 95 loss """marginranking""" +594 95 regularizer """no""" +594 95 optimizer """adadelta""" +594 95 training_loop """owa""" +594 95 negative_sampler """basic""" +594 95 evaluator """rankbased""" +594 96 dataset """kinships""" +594 96 model """rotate""" +594 96 loss """marginranking""" +594 96 regularizer """no""" +594 96 optimizer """adadelta""" +594 96 training_loop """owa""" +594 96 negative_sampler """basic""" +594 96 evaluator """rankbased""" +594 97 dataset """kinships""" +594 97 model """rotate""" +594 97 loss """marginranking""" +594 97 regularizer """no""" +594 97 optimizer """adadelta""" +594 97 training_loop """owa""" +594 97 negative_sampler """basic""" +594 97 evaluator """rankbased""" +594 98 dataset """kinships""" +594 98 model """rotate""" +594 98 loss """marginranking""" +594 98 regularizer """no""" +594 98 optimizer """adadelta""" +594 98 training_loop """owa""" +594 98 negative_sampler """basic""" +594 98 evaluator """rankbased""" +594 99 dataset """kinships""" +594 99 model """rotate""" +594 99 loss """marginranking""" +594 99 regularizer """no""" +594 99 optimizer """adadelta""" +594 99 training_loop """owa""" +594 99 negative_sampler """basic""" +594 99 evaluator """rankbased""" +594 100 dataset """kinships""" +594 100 model """rotate""" +594 100 loss """marginranking""" +594 100 regularizer """no""" +594 100 optimizer """adadelta""" +594 100 training_loop """owa""" +594 100 negative_sampler """basic""" +594 100 evaluator """rankbased""" +595 1 model.embedding_dim 0.0 +595 1 loss.margin 28.977784880842307 +595 1 loss.adversarial_temperature 0.5419759449382076 +595 1 negative_sampler.num_negs_per_pos 76.0 +595 1 training.batch_size 0.0 +595 2 model.embedding_dim 2.0 +595 2 loss.margin 5.703869260652968 +595 2 loss.adversarial_temperature 0.6371159165628848 +595 2 negative_sampler.num_negs_per_pos 35.0 +595 2 training.batch_size 2.0 +595 3 model.embedding_dim 2.0 +595 3 loss.margin 8.267772428070788 +595 3 loss.adversarial_temperature 0.8855845898090688 +595 3 negative_sampler.num_negs_per_pos 69.0 +595 3 training.batch_size 1.0 +595 4 model.embedding_dim 2.0 +595 4 loss.margin 29.275630154721632 +595 4 loss.adversarial_temperature 0.7917124223509338 +595 4 negative_sampler.num_negs_per_pos 52.0 +595 4 training.batch_size 2.0 +595 5 model.embedding_dim 1.0 +595 5 loss.margin 1.929308844879466 +595 5 loss.adversarial_temperature 0.7774717000595447 +595 5 negative_sampler.num_negs_per_pos 45.0 +595 5 training.batch_size 1.0 +595 6 model.embedding_dim 0.0 +595 6 loss.margin 9.983697123209291 +595 6 loss.adversarial_temperature 0.118951473967639 +595 6 negative_sampler.num_negs_per_pos 95.0 +595 6 training.batch_size 0.0 +595 7 model.embedding_dim 0.0 +595 7 loss.margin 5.596144330596076 +595 7 loss.adversarial_temperature 0.9582990856567181 +595 7 negative_sampler.num_negs_per_pos 58.0 +595 7 training.batch_size 0.0 +595 8 model.embedding_dim 1.0 +595 8 loss.margin 20.142340192848117 +595 8 loss.adversarial_temperature 0.21616765486791856 +595 8 negative_sampler.num_negs_per_pos 69.0 +595 8 training.batch_size 1.0 +595 9 model.embedding_dim 0.0 +595 9 loss.margin 1.6529717940778377 +595 9 loss.adversarial_temperature 0.964328504674612 +595 9 negative_sampler.num_negs_per_pos 42.0 +595 9 training.batch_size 2.0 +595 10 model.embedding_dim 1.0 +595 10 loss.margin 16.767664812705014 +595 10 loss.adversarial_temperature 0.3405808117456918 +595 10 negative_sampler.num_negs_per_pos 56.0 +595 10 training.batch_size 0.0 +595 11 model.embedding_dim 2.0 +595 11 loss.margin 21.388327918009832 +595 11 loss.adversarial_temperature 0.6874864524374608 +595 11 negative_sampler.num_negs_per_pos 35.0 +595 11 training.batch_size 0.0 +595 12 model.embedding_dim 0.0 +595 12 loss.margin 24.60524461441211 +595 12 loss.adversarial_temperature 0.5564383729918877 +595 12 negative_sampler.num_negs_per_pos 63.0 +595 12 training.batch_size 0.0 +595 13 model.embedding_dim 0.0 +595 13 loss.margin 1.8455535855040177 +595 13 loss.adversarial_temperature 0.2188437513071514 +595 13 negative_sampler.num_negs_per_pos 46.0 +595 13 training.batch_size 0.0 +595 14 model.embedding_dim 1.0 +595 14 loss.margin 23.298855404425478 +595 14 loss.adversarial_temperature 0.20391827874005386 +595 14 negative_sampler.num_negs_per_pos 89.0 +595 14 training.batch_size 0.0 +595 15 model.embedding_dim 0.0 +595 15 loss.margin 24.616322785682893 +595 15 loss.adversarial_temperature 0.5907929004254239 +595 15 negative_sampler.num_negs_per_pos 54.0 +595 15 training.batch_size 0.0 +595 16 model.embedding_dim 1.0 +595 16 loss.margin 29.548027308401526 +595 16 loss.adversarial_temperature 0.1666140653053173 +595 16 negative_sampler.num_negs_per_pos 50.0 +595 16 training.batch_size 0.0 +595 17 model.embedding_dim 2.0 +595 17 loss.margin 19.228381262844586 +595 17 loss.adversarial_temperature 0.2830045188351872 +595 17 negative_sampler.num_negs_per_pos 24.0 +595 17 training.batch_size 1.0 +595 18 model.embedding_dim 0.0 +595 18 loss.margin 7.838637839076654 +595 18 loss.adversarial_temperature 0.5314115711616102 +595 18 negative_sampler.num_negs_per_pos 60.0 +595 18 training.batch_size 2.0 +595 19 model.embedding_dim 1.0 +595 19 loss.margin 20.48068322420939 +595 19 loss.adversarial_temperature 0.3039348350775175 +595 19 negative_sampler.num_negs_per_pos 71.0 +595 19 training.batch_size 2.0 +595 20 model.embedding_dim 0.0 +595 20 loss.margin 14.69024029611557 +595 20 loss.adversarial_temperature 0.18220623487350834 +595 20 negative_sampler.num_negs_per_pos 20.0 +595 20 training.batch_size 2.0 +595 21 model.embedding_dim 0.0 +595 21 loss.margin 27.725952167257002 +595 21 loss.adversarial_temperature 0.9905967592701389 +595 21 negative_sampler.num_negs_per_pos 0.0 +595 21 training.batch_size 2.0 +595 22 model.embedding_dim 1.0 +595 22 loss.margin 20.455113744603278 +595 22 loss.adversarial_temperature 0.40114354454355394 +595 22 negative_sampler.num_negs_per_pos 5.0 +595 22 training.batch_size 0.0 +595 23 model.embedding_dim 2.0 +595 23 loss.margin 21.719919184404073 +595 23 loss.adversarial_temperature 0.3962739225727818 +595 23 negative_sampler.num_negs_per_pos 14.0 +595 23 training.batch_size 2.0 +595 24 model.embedding_dim 1.0 +595 24 loss.margin 18.979247524382576 +595 24 loss.adversarial_temperature 0.4738050824901535 +595 24 negative_sampler.num_negs_per_pos 29.0 +595 24 training.batch_size 0.0 +595 25 model.embedding_dim 1.0 +595 25 loss.margin 14.545432410284059 +595 25 loss.adversarial_temperature 0.20754816306572937 +595 25 negative_sampler.num_negs_per_pos 50.0 +595 25 training.batch_size 2.0 +595 26 model.embedding_dim 0.0 +595 26 loss.margin 15.216022649602332 +595 26 loss.adversarial_temperature 0.9333364960291749 +595 26 negative_sampler.num_negs_per_pos 70.0 +595 26 training.batch_size 2.0 +595 27 model.embedding_dim 1.0 +595 27 loss.margin 21.41962797397325 +595 27 loss.adversarial_temperature 0.18157636831286528 +595 27 negative_sampler.num_negs_per_pos 80.0 +595 27 training.batch_size 2.0 +595 28 model.embedding_dim 2.0 +595 28 loss.margin 12.481588673417509 +595 28 loss.adversarial_temperature 0.27895347879075405 +595 28 negative_sampler.num_negs_per_pos 69.0 +595 28 training.batch_size 2.0 +595 29 model.embedding_dim 0.0 +595 29 loss.margin 11.253640642577025 +595 29 loss.adversarial_temperature 0.5293319503371554 +595 29 negative_sampler.num_negs_per_pos 29.0 +595 29 training.batch_size 0.0 +595 30 model.embedding_dim 1.0 +595 30 loss.margin 15.871852520104234 +595 30 loss.adversarial_temperature 0.6978440799783426 +595 30 negative_sampler.num_negs_per_pos 92.0 +595 30 training.batch_size 0.0 +595 31 model.embedding_dim 0.0 +595 31 loss.margin 1.274143327778306 +595 31 loss.adversarial_temperature 0.9705207827093725 +595 31 negative_sampler.num_negs_per_pos 94.0 +595 31 training.batch_size 1.0 +595 32 model.embedding_dim 1.0 +595 32 loss.margin 28.225344852980204 +595 32 loss.adversarial_temperature 0.2268481218304565 +595 32 negative_sampler.num_negs_per_pos 99.0 +595 32 training.batch_size 2.0 +595 33 model.embedding_dim 2.0 +595 33 loss.margin 28.515976238438334 +595 33 loss.adversarial_temperature 0.8170931491839107 +595 33 negative_sampler.num_negs_per_pos 16.0 +595 33 training.batch_size 2.0 +595 34 model.embedding_dim 2.0 +595 34 loss.margin 5.54447614916381 +595 34 loss.adversarial_temperature 0.46260330673026917 +595 34 negative_sampler.num_negs_per_pos 3.0 +595 34 training.batch_size 2.0 +595 35 model.embedding_dim 0.0 +595 35 loss.margin 22.895584699115584 +595 35 loss.adversarial_temperature 0.6137722851300372 +595 35 negative_sampler.num_negs_per_pos 25.0 +595 35 training.batch_size 0.0 +595 36 model.embedding_dim 1.0 +595 36 loss.margin 5.754621716357807 +595 36 loss.adversarial_temperature 0.6993188023721265 +595 36 negative_sampler.num_negs_per_pos 46.0 +595 36 training.batch_size 1.0 +595 37 model.embedding_dim 2.0 +595 37 loss.margin 18.73876638915944 +595 37 loss.adversarial_temperature 0.1855698023652345 +595 37 negative_sampler.num_negs_per_pos 89.0 +595 37 training.batch_size 2.0 +595 38 model.embedding_dim 2.0 +595 38 loss.margin 12.32678559137992 +595 38 loss.adversarial_temperature 0.9269608308547226 +595 38 negative_sampler.num_negs_per_pos 84.0 +595 38 training.batch_size 0.0 +595 39 model.embedding_dim 2.0 +595 39 loss.margin 5.008324859803579 +595 39 loss.adversarial_temperature 0.8431493604944442 +595 39 negative_sampler.num_negs_per_pos 38.0 +595 39 training.batch_size 0.0 +595 40 model.embedding_dim 2.0 +595 40 loss.margin 9.32669431428149 +595 40 loss.adversarial_temperature 0.7827949558644711 +595 40 negative_sampler.num_negs_per_pos 85.0 +595 40 training.batch_size 2.0 +595 41 model.embedding_dim 1.0 +595 41 loss.margin 24.929393503162224 +595 41 loss.adversarial_temperature 0.472337413025937 +595 41 negative_sampler.num_negs_per_pos 44.0 +595 41 training.batch_size 1.0 +595 42 model.embedding_dim 1.0 +595 42 loss.margin 9.556869051159843 +595 42 loss.adversarial_temperature 0.2532465036570363 +595 42 negative_sampler.num_negs_per_pos 21.0 +595 42 training.batch_size 0.0 +595 43 model.embedding_dim 2.0 +595 43 loss.margin 18.62424677117892 +595 43 loss.adversarial_temperature 0.3442304094116959 +595 43 negative_sampler.num_negs_per_pos 15.0 +595 43 training.batch_size 2.0 +595 44 model.embedding_dim 2.0 +595 44 loss.margin 24.112249224927833 +595 44 loss.adversarial_temperature 0.6307215594339818 +595 44 negative_sampler.num_negs_per_pos 85.0 +595 44 training.batch_size 0.0 +595 45 model.embedding_dim 2.0 +595 45 loss.margin 13.346171733602283 +595 45 loss.adversarial_temperature 0.816945805366508 +595 45 negative_sampler.num_negs_per_pos 18.0 +595 45 training.batch_size 2.0 +595 46 model.embedding_dim 1.0 +595 46 loss.margin 23.44678733067011 +595 46 loss.adversarial_temperature 0.5895864572210902 +595 46 negative_sampler.num_negs_per_pos 77.0 +595 46 training.batch_size 0.0 +595 47 model.embedding_dim 1.0 +595 47 loss.margin 1.2185282808692182 +595 47 loss.adversarial_temperature 0.134752480986976 +595 47 negative_sampler.num_negs_per_pos 96.0 +595 47 training.batch_size 0.0 +595 48 model.embedding_dim 0.0 +595 48 loss.margin 11.665280912004079 +595 48 loss.adversarial_temperature 0.15155824448977817 +595 48 negative_sampler.num_negs_per_pos 71.0 +595 48 training.batch_size 2.0 +595 49 model.embedding_dim 2.0 +595 49 loss.margin 13.338740644290388 +595 49 loss.adversarial_temperature 0.29946910111856645 +595 49 negative_sampler.num_negs_per_pos 58.0 +595 49 training.batch_size 1.0 +595 50 model.embedding_dim 1.0 +595 50 loss.margin 19.132620537549506 +595 50 loss.adversarial_temperature 0.5483850377703582 +595 50 negative_sampler.num_negs_per_pos 9.0 +595 50 training.batch_size 0.0 +595 51 model.embedding_dim 0.0 +595 51 loss.margin 11.245589902503136 +595 51 loss.adversarial_temperature 0.7873110331687605 +595 51 negative_sampler.num_negs_per_pos 73.0 +595 51 training.batch_size 1.0 +595 52 model.embedding_dim 2.0 +595 52 loss.margin 12.816653524238749 +595 52 loss.adversarial_temperature 0.41984096517606484 +595 52 negative_sampler.num_negs_per_pos 26.0 +595 52 training.batch_size 0.0 +595 53 model.embedding_dim 1.0 +595 53 loss.margin 24.930899389110156 +595 53 loss.adversarial_temperature 0.15103404671845622 +595 53 negative_sampler.num_negs_per_pos 90.0 +595 53 training.batch_size 1.0 +595 54 model.embedding_dim 2.0 +595 54 loss.margin 13.60613494021138 +595 54 loss.adversarial_temperature 0.2945993486682458 +595 54 negative_sampler.num_negs_per_pos 70.0 +595 54 training.batch_size 0.0 +595 55 model.embedding_dim 1.0 +595 55 loss.margin 7.239209554626973 +595 55 loss.adversarial_temperature 0.5219793332978993 +595 55 negative_sampler.num_negs_per_pos 65.0 +595 55 training.batch_size 1.0 +595 56 model.embedding_dim 2.0 +595 56 loss.margin 3.877578109797564 +595 56 loss.adversarial_temperature 0.9749667456825257 +595 56 negative_sampler.num_negs_per_pos 59.0 +595 56 training.batch_size 0.0 +595 57 model.embedding_dim 1.0 +595 57 loss.margin 14.703325082243603 +595 57 loss.adversarial_temperature 0.554094415339317 +595 57 negative_sampler.num_negs_per_pos 98.0 +595 57 training.batch_size 1.0 +595 58 model.embedding_dim 1.0 +595 58 loss.margin 2.597940718051519 +595 58 loss.adversarial_temperature 0.2673290067245961 +595 58 negative_sampler.num_negs_per_pos 27.0 +595 58 training.batch_size 1.0 +595 59 model.embedding_dim 0.0 +595 59 loss.margin 25.16692432173018 +595 59 loss.adversarial_temperature 0.634145706016403 +595 59 negative_sampler.num_negs_per_pos 74.0 +595 59 training.batch_size 2.0 +595 60 model.embedding_dim 0.0 +595 60 loss.margin 5.291008004255285 +595 60 loss.adversarial_temperature 0.5996020075707317 +595 60 negative_sampler.num_negs_per_pos 49.0 +595 60 training.batch_size 0.0 +595 61 model.embedding_dim 0.0 +595 61 loss.margin 2.059475044025105 +595 61 loss.adversarial_temperature 0.10375983253488907 +595 61 negative_sampler.num_negs_per_pos 40.0 +595 61 training.batch_size 0.0 +595 62 model.embedding_dim 1.0 +595 62 loss.margin 18.681935630970052 +595 62 loss.adversarial_temperature 0.3582150895435564 +595 62 negative_sampler.num_negs_per_pos 43.0 +595 62 training.batch_size 2.0 +595 63 model.embedding_dim 0.0 +595 63 loss.margin 29.21333988548499 +595 63 loss.adversarial_temperature 0.12190274569920569 +595 63 negative_sampler.num_negs_per_pos 58.0 +595 63 training.batch_size 0.0 +595 64 model.embedding_dim 0.0 +595 64 loss.margin 13.900823075509528 +595 64 loss.adversarial_temperature 0.2539025555386737 +595 64 negative_sampler.num_negs_per_pos 29.0 +595 64 training.batch_size 0.0 +595 65 model.embedding_dim 0.0 +595 65 loss.margin 25.458249662327137 +595 65 loss.adversarial_temperature 0.6261800360866155 +595 65 negative_sampler.num_negs_per_pos 81.0 +595 65 training.batch_size 0.0 +595 66 model.embedding_dim 2.0 +595 66 loss.margin 21.92060073059274 +595 66 loss.adversarial_temperature 0.3222936649789192 +595 66 negative_sampler.num_negs_per_pos 16.0 +595 66 training.batch_size 0.0 +595 67 model.embedding_dim 2.0 +595 67 loss.margin 26.279351444824126 +595 67 loss.adversarial_temperature 0.5461698529163806 +595 67 negative_sampler.num_negs_per_pos 65.0 +595 67 training.batch_size 0.0 +595 68 model.embedding_dim 2.0 +595 68 loss.margin 27.52504741753074 +595 68 loss.adversarial_temperature 0.3886438531265971 +595 68 negative_sampler.num_negs_per_pos 54.0 +595 68 training.batch_size 2.0 +595 69 model.embedding_dim 0.0 +595 69 loss.margin 23.39165058294342 +595 69 loss.adversarial_temperature 0.7668873064408904 +595 69 negative_sampler.num_negs_per_pos 38.0 +595 69 training.batch_size 0.0 +595 70 model.embedding_dim 1.0 +595 70 loss.margin 11.770540465818957 +595 70 loss.adversarial_temperature 0.19252846533679308 +595 70 negative_sampler.num_negs_per_pos 81.0 +595 70 training.batch_size 0.0 +595 71 model.embedding_dim 1.0 +595 71 loss.margin 16.49631148003524 +595 71 loss.adversarial_temperature 0.19845592124680178 +595 71 negative_sampler.num_negs_per_pos 53.0 +595 71 training.batch_size 1.0 +595 72 model.embedding_dim 0.0 +595 72 loss.margin 27.183797040296344 +595 72 loss.adversarial_temperature 0.37952406652793425 +595 72 negative_sampler.num_negs_per_pos 25.0 +595 72 training.batch_size 0.0 +595 73 model.embedding_dim 0.0 +595 73 loss.margin 11.33615714474657 +595 73 loss.adversarial_temperature 0.24813094925855916 +595 73 negative_sampler.num_negs_per_pos 81.0 +595 73 training.batch_size 0.0 +595 74 model.embedding_dim 0.0 +595 74 loss.margin 25.670462358045583 +595 74 loss.adversarial_temperature 0.678529315547654 +595 74 negative_sampler.num_negs_per_pos 60.0 +595 74 training.batch_size 2.0 +595 75 model.embedding_dim 1.0 +595 75 loss.margin 7.7322856326978044 +595 75 loss.adversarial_temperature 0.9659929779670363 +595 75 negative_sampler.num_negs_per_pos 87.0 +595 75 training.batch_size 2.0 +595 76 model.embedding_dim 1.0 +595 76 loss.margin 20.63688804950544 +595 76 loss.adversarial_temperature 0.20835665726405733 +595 76 negative_sampler.num_negs_per_pos 66.0 +595 76 training.batch_size 2.0 +595 77 model.embedding_dim 1.0 +595 77 loss.margin 26.96441163262179 +595 77 loss.adversarial_temperature 0.41407120649782725 +595 77 negative_sampler.num_negs_per_pos 57.0 +595 77 training.batch_size 1.0 +595 78 model.embedding_dim 2.0 +595 78 loss.margin 18.262824811198527 +595 78 loss.adversarial_temperature 0.7840556206595964 +595 78 negative_sampler.num_negs_per_pos 89.0 +595 78 training.batch_size 0.0 +595 79 model.embedding_dim 1.0 +595 79 loss.margin 28.834420799752387 +595 79 loss.adversarial_temperature 0.6013250092949878 +595 79 negative_sampler.num_negs_per_pos 73.0 +595 79 training.batch_size 1.0 +595 80 model.embedding_dim 0.0 +595 80 loss.margin 2.4711608844764763 +595 80 loss.adversarial_temperature 0.1557608727323453 +595 80 negative_sampler.num_negs_per_pos 46.0 +595 80 training.batch_size 2.0 +595 81 model.embedding_dim 0.0 +595 81 loss.margin 6.709906360010262 +595 81 loss.adversarial_temperature 0.4640089745373175 +595 81 negative_sampler.num_negs_per_pos 73.0 +595 81 training.batch_size 2.0 +595 82 model.embedding_dim 1.0 +595 82 loss.margin 8.439085353926018 +595 82 loss.adversarial_temperature 0.6265027014200413 +595 82 negative_sampler.num_negs_per_pos 74.0 +595 82 training.batch_size 2.0 +595 83 model.embedding_dim 2.0 +595 83 loss.margin 27.569119229026672 +595 83 loss.adversarial_temperature 0.26268061738300985 +595 83 negative_sampler.num_negs_per_pos 29.0 +595 83 training.batch_size 2.0 +595 84 model.embedding_dim 0.0 +595 84 loss.margin 12.44535446445024 +595 84 loss.adversarial_temperature 0.5968573760671926 +595 84 negative_sampler.num_negs_per_pos 6.0 +595 84 training.batch_size 2.0 +595 85 model.embedding_dim 1.0 +595 85 loss.margin 10.91266199796194 +595 85 loss.adversarial_temperature 0.3606114609890625 +595 85 negative_sampler.num_negs_per_pos 2.0 +595 85 training.batch_size 1.0 +595 86 model.embedding_dim 0.0 +595 86 loss.margin 12.664589606740993 +595 86 loss.adversarial_temperature 0.10368571229815653 +595 86 negative_sampler.num_negs_per_pos 20.0 +595 86 training.batch_size 0.0 +595 87 model.embedding_dim 2.0 +595 87 loss.margin 14.888819874895342 +595 87 loss.adversarial_temperature 0.8480838330397141 +595 87 negative_sampler.num_negs_per_pos 11.0 +595 87 training.batch_size 1.0 +595 88 model.embedding_dim 1.0 +595 88 loss.margin 18.755783292362047 +595 88 loss.adversarial_temperature 0.7296787828402964 +595 88 negative_sampler.num_negs_per_pos 41.0 +595 88 training.batch_size 1.0 +595 89 model.embedding_dim 2.0 +595 89 loss.margin 1.4983878756216769 +595 89 loss.adversarial_temperature 0.6522648609530274 +595 89 negative_sampler.num_negs_per_pos 67.0 +595 89 training.batch_size 0.0 +595 90 model.embedding_dim 0.0 +595 90 loss.margin 21.123675758490428 +595 90 loss.adversarial_temperature 0.9630029407978383 +595 90 negative_sampler.num_negs_per_pos 27.0 +595 90 training.batch_size 0.0 +595 91 model.embedding_dim 2.0 +595 91 loss.margin 29.894217236930256 +595 91 loss.adversarial_temperature 0.6700744125660257 +595 91 negative_sampler.num_negs_per_pos 43.0 +595 91 training.batch_size 0.0 +595 92 model.embedding_dim 1.0 +595 92 loss.margin 15.13772953231174 +595 92 loss.adversarial_temperature 0.5906469477215097 +595 92 negative_sampler.num_negs_per_pos 47.0 +595 92 training.batch_size 2.0 +595 93 model.embedding_dim 1.0 +595 93 loss.margin 22.198553227653772 +595 93 loss.adversarial_temperature 0.4254461692330652 +595 93 negative_sampler.num_negs_per_pos 51.0 +595 93 training.batch_size 1.0 +595 94 model.embedding_dim 2.0 +595 94 loss.margin 19.00386119344804 +595 94 loss.adversarial_temperature 0.5846811346406551 +595 94 negative_sampler.num_negs_per_pos 16.0 +595 94 training.batch_size 2.0 +595 95 model.embedding_dim 1.0 +595 95 loss.margin 7.839597266618456 +595 95 loss.adversarial_temperature 0.9956419232656831 +595 95 negative_sampler.num_negs_per_pos 83.0 +595 95 training.batch_size 0.0 +595 96 model.embedding_dim 2.0 +595 96 loss.margin 7.241205788455102 +595 96 loss.adversarial_temperature 0.3304742176645783 +595 96 negative_sampler.num_negs_per_pos 57.0 +595 96 training.batch_size 1.0 +595 97 model.embedding_dim 0.0 +595 97 loss.margin 6.9487733400138785 +595 97 loss.adversarial_temperature 0.9794435292272505 +595 97 negative_sampler.num_negs_per_pos 92.0 +595 97 training.batch_size 2.0 +595 98 model.embedding_dim 1.0 +595 98 loss.margin 1.0052105242936813 +595 98 loss.adversarial_temperature 0.7343491372361188 +595 98 negative_sampler.num_negs_per_pos 5.0 +595 98 training.batch_size 0.0 +595 99 model.embedding_dim 0.0 +595 99 loss.margin 19.664094776253854 +595 99 loss.adversarial_temperature 0.36986500795807564 +595 99 negative_sampler.num_negs_per_pos 77.0 +595 99 training.batch_size 0.0 +595 100 model.embedding_dim 0.0 +595 100 loss.margin 15.105466479360372 +595 100 loss.adversarial_temperature 0.8575632291372547 +595 100 negative_sampler.num_negs_per_pos 4.0 +595 100 training.batch_size 1.0 +595 1 dataset """kinships""" +595 1 model """rotate""" +595 1 loss """nssa""" +595 1 regularizer """no""" +595 1 optimizer """adadelta""" +595 1 training_loop """owa""" +595 1 negative_sampler """basic""" +595 1 evaluator """rankbased""" +595 2 dataset """kinships""" +595 2 model """rotate""" +595 2 loss """nssa""" +595 2 regularizer """no""" +595 2 optimizer """adadelta""" +595 2 training_loop """owa""" +595 2 negative_sampler """basic""" +595 2 evaluator """rankbased""" +595 3 dataset """kinships""" +595 3 model """rotate""" +595 3 loss """nssa""" +595 3 regularizer """no""" +595 3 optimizer """adadelta""" +595 3 training_loop """owa""" +595 3 negative_sampler """basic""" +595 3 evaluator """rankbased""" +595 4 dataset """kinships""" +595 4 model """rotate""" +595 4 loss """nssa""" +595 4 regularizer """no""" +595 4 optimizer """adadelta""" +595 4 training_loop """owa""" +595 4 negative_sampler """basic""" +595 4 evaluator """rankbased""" +595 5 dataset """kinships""" +595 5 model """rotate""" +595 5 loss """nssa""" +595 5 regularizer """no""" +595 5 optimizer """adadelta""" +595 5 training_loop """owa""" +595 5 negative_sampler """basic""" +595 5 evaluator """rankbased""" +595 6 dataset """kinships""" +595 6 model """rotate""" +595 6 loss """nssa""" +595 6 regularizer """no""" +595 6 optimizer """adadelta""" +595 6 training_loop """owa""" +595 6 negative_sampler """basic""" +595 6 evaluator """rankbased""" +595 7 dataset """kinships""" +595 7 model """rotate""" +595 7 loss """nssa""" +595 7 regularizer """no""" +595 7 optimizer """adadelta""" +595 7 training_loop """owa""" +595 7 negative_sampler """basic""" +595 7 evaluator """rankbased""" +595 8 dataset """kinships""" +595 8 model """rotate""" +595 8 loss """nssa""" +595 8 regularizer """no""" +595 8 optimizer """adadelta""" +595 8 training_loop """owa""" +595 8 negative_sampler """basic""" +595 8 evaluator """rankbased""" +595 9 dataset """kinships""" +595 9 model """rotate""" +595 9 loss """nssa""" +595 9 regularizer """no""" +595 9 optimizer """adadelta""" +595 9 training_loop """owa""" +595 9 negative_sampler """basic""" +595 9 evaluator """rankbased""" +595 10 dataset """kinships""" +595 10 model """rotate""" +595 10 loss """nssa""" +595 10 regularizer """no""" +595 10 optimizer """adadelta""" +595 10 training_loop """owa""" +595 10 negative_sampler """basic""" +595 10 evaluator """rankbased""" +595 11 dataset """kinships""" +595 11 model """rotate""" +595 11 loss """nssa""" +595 11 regularizer """no""" +595 11 optimizer """adadelta""" +595 11 training_loop """owa""" +595 11 negative_sampler """basic""" +595 11 evaluator """rankbased""" +595 12 dataset """kinships""" +595 12 model """rotate""" +595 12 loss """nssa""" +595 12 regularizer """no""" +595 12 optimizer """adadelta""" +595 12 training_loop """owa""" +595 12 negative_sampler """basic""" +595 12 evaluator """rankbased""" +595 13 dataset """kinships""" +595 13 model """rotate""" +595 13 loss """nssa""" +595 13 regularizer """no""" +595 13 optimizer """adadelta""" +595 13 training_loop """owa""" +595 13 negative_sampler """basic""" +595 13 evaluator """rankbased""" +595 14 dataset """kinships""" +595 14 model """rotate""" +595 14 loss """nssa""" +595 14 regularizer """no""" +595 14 optimizer """adadelta""" +595 14 training_loop """owa""" +595 14 negative_sampler """basic""" +595 14 evaluator """rankbased""" +595 15 dataset """kinships""" +595 15 model """rotate""" +595 15 loss """nssa""" +595 15 regularizer """no""" +595 15 optimizer """adadelta""" +595 15 training_loop """owa""" +595 15 negative_sampler """basic""" +595 15 evaluator """rankbased""" +595 16 dataset """kinships""" +595 16 model """rotate""" +595 16 loss """nssa""" +595 16 regularizer """no""" +595 16 optimizer """adadelta""" +595 16 training_loop """owa""" +595 16 negative_sampler """basic""" +595 16 evaluator """rankbased""" +595 17 dataset """kinships""" +595 17 model """rotate""" +595 17 loss """nssa""" +595 17 regularizer """no""" +595 17 optimizer """adadelta""" +595 17 training_loop """owa""" +595 17 negative_sampler """basic""" +595 17 evaluator """rankbased""" +595 18 dataset """kinships""" +595 18 model """rotate""" +595 18 loss """nssa""" +595 18 regularizer """no""" +595 18 optimizer """adadelta""" +595 18 training_loop """owa""" +595 18 negative_sampler """basic""" +595 18 evaluator """rankbased""" +595 19 dataset """kinships""" +595 19 model """rotate""" +595 19 loss """nssa""" +595 19 regularizer """no""" +595 19 optimizer """adadelta""" +595 19 training_loop """owa""" +595 19 negative_sampler """basic""" +595 19 evaluator """rankbased""" +595 20 dataset """kinships""" +595 20 model """rotate""" +595 20 loss """nssa""" +595 20 regularizer """no""" +595 20 optimizer """adadelta""" +595 20 training_loop """owa""" +595 20 negative_sampler """basic""" +595 20 evaluator """rankbased""" +595 21 dataset """kinships""" +595 21 model """rotate""" +595 21 loss """nssa""" +595 21 regularizer """no""" +595 21 optimizer """adadelta""" +595 21 training_loop """owa""" +595 21 negative_sampler """basic""" +595 21 evaluator """rankbased""" +595 22 dataset """kinships""" +595 22 model """rotate""" +595 22 loss """nssa""" +595 22 regularizer """no""" +595 22 optimizer """adadelta""" +595 22 training_loop """owa""" +595 22 negative_sampler """basic""" +595 22 evaluator """rankbased""" +595 23 dataset """kinships""" +595 23 model """rotate""" +595 23 loss """nssa""" +595 23 regularizer """no""" +595 23 optimizer """adadelta""" +595 23 training_loop """owa""" +595 23 negative_sampler """basic""" +595 23 evaluator """rankbased""" +595 24 dataset """kinships""" +595 24 model """rotate""" +595 24 loss """nssa""" +595 24 regularizer """no""" +595 24 optimizer """adadelta""" +595 24 training_loop """owa""" +595 24 negative_sampler """basic""" +595 24 evaluator """rankbased""" +595 25 dataset """kinships""" +595 25 model """rotate""" +595 25 loss """nssa""" +595 25 regularizer """no""" +595 25 optimizer """adadelta""" +595 25 training_loop """owa""" +595 25 negative_sampler """basic""" +595 25 evaluator """rankbased""" +595 26 dataset """kinships""" +595 26 model """rotate""" +595 26 loss """nssa""" +595 26 regularizer """no""" +595 26 optimizer """adadelta""" +595 26 training_loop """owa""" +595 26 negative_sampler """basic""" +595 26 evaluator """rankbased""" +595 27 dataset """kinships""" +595 27 model """rotate""" +595 27 loss """nssa""" +595 27 regularizer """no""" +595 27 optimizer """adadelta""" +595 27 training_loop """owa""" +595 27 negative_sampler """basic""" +595 27 evaluator """rankbased""" +595 28 dataset """kinships""" +595 28 model """rotate""" +595 28 loss """nssa""" +595 28 regularizer """no""" +595 28 optimizer """adadelta""" +595 28 training_loop """owa""" +595 28 negative_sampler """basic""" +595 28 evaluator """rankbased""" +595 29 dataset """kinships""" +595 29 model """rotate""" +595 29 loss """nssa""" +595 29 regularizer """no""" +595 29 optimizer """adadelta""" +595 29 training_loop """owa""" +595 29 negative_sampler """basic""" +595 29 evaluator """rankbased""" +595 30 dataset """kinships""" +595 30 model """rotate""" +595 30 loss """nssa""" +595 30 regularizer """no""" +595 30 optimizer """adadelta""" +595 30 training_loop """owa""" +595 30 negative_sampler """basic""" +595 30 evaluator """rankbased""" +595 31 dataset """kinships""" +595 31 model """rotate""" +595 31 loss """nssa""" +595 31 regularizer """no""" +595 31 optimizer """adadelta""" +595 31 training_loop """owa""" +595 31 negative_sampler """basic""" +595 31 evaluator """rankbased""" +595 32 dataset """kinships""" +595 32 model """rotate""" +595 32 loss """nssa""" +595 32 regularizer """no""" +595 32 optimizer """adadelta""" +595 32 training_loop """owa""" +595 32 negative_sampler """basic""" +595 32 evaluator """rankbased""" +595 33 dataset """kinships""" +595 33 model """rotate""" +595 33 loss """nssa""" +595 33 regularizer """no""" +595 33 optimizer """adadelta""" +595 33 training_loop """owa""" +595 33 negative_sampler """basic""" +595 33 evaluator """rankbased""" +595 34 dataset """kinships""" +595 34 model """rotate""" +595 34 loss """nssa""" +595 34 regularizer """no""" +595 34 optimizer """adadelta""" +595 34 training_loop """owa""" +595 34 negative_sampler """basic""" +595 34 evaluator """rankbased""" +595 35 dataset """kinships""" +595 35 model """rotate""" +595 35 loss """nssa""" +595 35 regularizer """no""" +595 35 optimizer """adadelta""" +595 35 training_loop """owa""" +595 35 negative_sampler """basic""" +595 35 evaluator """rankbased""" +595 36 dataset """kinships""" +595 36 model """rotate""" +595 36 loss """nssa""" +595 36 regularizer """no""" +595 36 optimizer """adadelta""" +595 36 training_loop """owa""" +595 36 negative_sampler """basic""" +595 36 evaluator """rankbased""" +595 37 dataset """kinships""" +595 37 model """rotate""" +595 37 loss """nssa""" +595 37 regularizer """no""" +595 37 optimizer """adadelta""" +595 37 training_loop """owa""" +595 37 negative_sampler """basic""" +595 37 evaluator """rankbased""" +595 38 dataset """kinships""" +595 38 model """rotate""" +595 38 loss """nssa""" +595 38 regularizer """no""" +595 38 optimizer """adadelta""" +595 38 training_loop """owa""" +595 38 negative_sampler """basic""" +595 38 evaluator """rankbased""" +595 39 dataset """kinships""" +595 39 model """rotate""" +595 39 loss """nssa""" +595 39 regularizer """no""" +595 39 optimizer """adadelta""" +595 39 training_loop """owa""" +595 39 negative_sampler """basic""" +595 39 evaluator """rankbased""" +595 40 dataset """kinships""" +595 40 model """rotate""" +595 40 loss """nssa""" +595 40 regularizer """no""" +595 40 optimizer """adadelta""" +595 40 training_loop """owa""" +595 40 negative_sampler """basic""" +595 40 evaluator """rankbased""" +595 41 dataset """kinships""" +595 41 model """rotate""" +595 41 loss """nssa""" +595 41 regularizer """no""" +595 41 optimizer """adadelta""" +595 41 training_loop """owa""" +595 41 negative_sampler """basic""" +595 41 evaluator """rankbased""" +595 42 dataset """kinships""" +595 42 model """rotate""" +595 42 loss """nssa""" +595 42 regularizer """no""" +595 42 optimizer """adadelta""" +595 42 training_loop """owa""" +595 42 negative_sampler """basic""" +595 42 evaluator """rankbased""" +595 43 dataset """kinships""" +595 43 model """rotate""" +595 43 loss """nssa""" +595 43 regularizer """no""" +595 43 optimizer """adadelta""" +595 43 training_loop """owa""" +595 43 negative_sampler """basic""" +595 43 evaluator """rankbased""" +595 44 dataset """kinships""" +595 44 model """rotate""" +595 44 loss """nssa""" +595 44 regularizer """no""" +595 44 optimizer """adadelta""" +595 44 training_loop """owa""" +595 44 negative_sampler """basic""" +595 44 evaluator """rankbased""" +595 45 dataset """kinships""" +595 45 model """rotate""" +595 45 loss """nssa""" +595 45 regularizer """no""" +595 45 optimizer """adadelta""" +595 45 training_loop """owa""" +595 45 negative_sampler """basic""" +595 45 evaluator """rankbased""" +595 46 dataset """kinships""" +595 46 model """rotate""" +595 46 loss """nssa""" +595 46 regularizer """no""" +595 46 optimizer """adadelta""" +595 46 training_loop """owa""" +595 46 negative_sampler """basic""" +595 46 evaluator """rankbased""" +595 47 dataset """kinships""" +595 47 model """rotate""" +595 47 loss """nssa""" +595 47 regularizer """no""" +595 47 optimizer """adadelta""" +595 47 training_loop """owa""" +595 47 negative_sampler """basic""" +595 47 evaluator """rankbased""" +595 48 dataset """kinships""" +595 48 model """rotate""" +595 48 loss """nssa""" +595 48 regularizer """no""" +595 48 optimizer """adadelta""" +595 48 training_loop """owa""" +595 48 negative_sampler """basic""" +595 48 evaluator """rankbased""" +595 49 dataset """kinships""" +595 49 model """rotate""" +595 49 loss """nssa""" +595 49 regularizer """no""" +595 49 optimizer """adadelta""" +595 49 training_loop """owa""" +595 49 negative_sampler """basic""" +595 49 evaluator """rankbased""" +595 50 dataset """kinships""" +595 50 model """rotate""" +595 50 loss """nssa""" +595 50 regularizer """no""" +595 50 optimizer """adadelta""" +595 50 training_loop """owa""" +595 50 negative_sampler """basic""" +595 50 evaluator """rankbased""" +595 51 dataset """kinships""" +595 51 model """rotate""" +595 51 loss """nssa""" +595 51 regularizer """no""" +595 51 optimizer """adadelta""" +595 51 training_loop """owa""" +595 51 negative_sampler """basic""" +595 51 evaluator """rankbased""" +595 52 dataset """kinships""" +595 52 model """rotate""" +595 52 loss """nssa""" +595 52 regularizer """no""" +595 52 optimizer """adadelta""" +595 52 training_loop """owa""" +595 52 negative_sampler """basic""" +595 52 evaluator """rankbased""" +595 53 dataset """kinships""" +595 53 model """rotate""" +595 53 loss """nssa""" +595 53 regularizer """no""" +595 53 optimizer """adadelta""" +595 53 training_loop """owa""" +595 53 negative_sampler """basic""" +595 53 evaluator """rankbased""" +595 54 dataset """kinships""" +595 54 model """rotate""" +595 54 loss """nssa""" +595 54 regularizer """no""" +595 54 optimizer """adadelta""" +595 54 training_loop """owa""" +595 54 negative_sampler """basic""" +595 54 evaluator """rankbased""" +595 55 dataset """kinships""" +595 55 model """rotate""" +595 55 loss """nssa""" +595 55 regularizer """no""" +595 55 optimizer """adadelta""" +595 55 training_loop """owa""" +595 55 negative_sampler """basic""" +595 55 evaluator """rankbased""" +595 56 dataset """kinships""" +595 56 model """rotate""" +595 56 loss """nssa""" +595 56 regularizer """no""" +595 56 optimizer """adadelta""" +595 56 training_loop """owa""" +595 56 negative_sampler """basic""" +595 56 evaluator """rankbased""" +595 57 dataset """kinships""" +595 57 model """rotate""" +595 57 loss """nssa""" +595 57 regularizer """no""" +595 57 optimizer """adadelta""" +595 57 training_loop """owa""" +595 57 negative_sampler """basic""" +595 57 evaluator """rankbased""" +595 58 dataset """kinships""" +595 58 model """rotate""" +595 58 loss """nssa""" +595 58 regularizer """no""" +595 58 optimizer """adadelta""" +595 58 training_loop """owa""" +595 58 negative_sampler """basic""" +595 58 evaluator """rankbased""" +595 59 dataset """kinships""" +595 59 model """rotate""" +595 59 loss """nssa""" +595 59 regularizer """no""" +595 59 optimizer """adadelta""" +595 59 training_loop """owa""" +595 59 negative_sampler """basic""" +595 59 evaluator """rankbased""" +595 60 dataset """kinships""" +595 60 model """rotate""" +595 60 loss """nssa""" +595 60 regularizer """no""" +595 60 optimizer """adadelta""" +595 60 training_loop """owa""" +595 60 negative_sampler """basic""" +595 60 evaluator """rankbased""" +595 61 dataset """kinships""" +595 61 model """rotate""" +595 61 loss """nssa""" +595 61 regularizer """no""" +595 61 optimizer """adadelta""" +595 61 training_loop """owa""" +595 61 negative_sampler """basic""" +595 61 evaluator """rankbased""" +595 62 dataset """kinships""" +595 62 model """rotate""" +595 62 loss """nssa""" +595 62 regularizer """no""" +595 62 optimizer """adadelta""" +595 62 training_loop """owa""" +595 62 negative_sampler """basic""" +595 62 evaluator """rankbased""" +595 63 dataset """kinships""" +595 63 model """rotate""" +595 63 loss """nssa""" +595 63 regularizer """no""" +595 63 optimizer """adadelta""" +595 63 training_loop """owa""" +595 63 negative_sampler """basic""" +595 63 evaluator """rankbased""" +595 64 dataset """kinships""" +595 64 model """rotate""" +595 64 loss """nssa""" +595 64 regularizer """no""" +595 64 optimizer """adadelta""" +595 64 training_loop """owa""" +595 64 negative_sampler """basic""" +595 64 evaluator """rankbased""" +595 65 dataset """kinships""" +595 65 model """rotate""" +595 65 loss """nssa""" +595 65 regularizer """no""" +595 65 optimizer """adadelta""" +595 65 training_loop """owa""" +595 65 negative_sampler """basic""" +595 65 evaluator """rankbased""" +595 66 dataset """kinships""" +595 66 model """rotate""" +595 66 loss """nssa""" +595 66 regularizer """no""" +595 66 optimizer """adadelta""" +595 66 training_loop """owa""" +595 66 negative_sampler """basic""" +595 66 evaluator """rankbased""" +595 67 dataset """kinships""" +595 67 model """rotate""" +595 67 loss """nssa""" +595 67 regularizer """no""" +595 67 optimizer """adadelta""" +595 67 training_loop """owa""" +595 67 negative_sampler """basic""" +595 67 evaluator """rankbased""" +595 68 dataset """kinships""" +595 68 model """rotate""" +595 68 loss """nssa""" +595 68 regularizer """no""" +595 68 optimizer """adadelta""" +595 68 training_loop """owa""" +595 68 negative_sampler """basic""" +595 68 evaluator """rankbased""" +595 69 dataset """kinships""" +595 69 model """rotate""" +595 69 loss """nssa""" +595 69 regularizer """no""" +595 69 optimizer """adadelta""" +595 69 training_loop """owa""" +595 69 negative_sampler """basic""" +595 69 evaluator """rankbased""" +595 70 dataset """kinships""" +595 70 model """rotate""" +595 70 loss """nssa""" +595 70 regularizer """no""" +595 70 optimizer """adadelta""" +595 70 training_loop """owa""" +595 70 negative_sampler """basic""" +595 70 evaluator """rankbased""" +595 71 dataset """kinships""" +595 71 model """rotate""" +595 71 loss """nssa""" +595 71 regularizer """no""" +595 71 optimizer """adadelta""" +595 71 training_loop """owa""" +595 71 negative_sampler """basic""" +595 71 evaluator """rankbased""" +595 72 dataset """kinships""" +595 72 model """rotate""" +595 72 loss """nssa""" +595 72 regularizer """no""" +595 72 optimizer """adadelta""" +595 72 training_loop """owa""" +595 72 negative_sampler """basic""" +595 72 evaluator """rankbased""" +595 73 dataset """kinships""" +595 73 model """rotate""" +595 73 loss """nssa""" +595 73 regularizer """no""" +595 73 optimizer """adadelta""" +595 73 training_loop """owa""" +595 73 negative_sampler """basic""" +595 73 evaluator """rankbased""" +595 74 dataset """kinships""" +595 74 model """rotate""" +595 74 loss """nssa""" +595 74 regularizer """no""" +595 74 optimizer """adadelta""" +595 74 training_loop """owa""" +595 74 negative_sampler """basic""" +595 74 evaluator """rankbased""" +595 75 dataset """kinships""" +595 75 model """rotate""" +595 75 loss """nssa""" +595 75 regularizer """no""" +595 75 optimizer """adadelta""" +595 75 training_loop """owa""" +595 75 negative_sampler """basic""" +595 75 evaluator """rankbased""" +595 76 dataset """kinships""" +595 76 model """rotate""" +595 76 loss """nssa""" +595 76 regularizer """no""" +595 76 optimizer """adadelta""" +595 76 training_loop """owa""" +595 76 negative_sampler """basic""" +595 76 evaluator """rankbased""" +595 77 dataset """kinships""" +595 77 model """rotate""" +595 77 loss """nssa""" +595 77 regularizer """no""" +595 77 optimizer """adadelta""" +595 77 training_loop """owa""" +595 77 negative_sampler """basic""" +595 77 evaluator """rankbased""" +595 78 dataset """kinships""" +595 78 model """rotate""" +595 78 loss """nssa""" +595 78 regularizer """no""" +595 78 optimizer """adadelta""" +595 78 training_loop """owa""" +595 78 negative_sampler """basic""" +595 78 evaluator """rankbased""" +595 79 dataset """kinships""" +595 79 model """rotate""" +595 79 loss """nssa""" +595 79 regularizer """no""" +595 79 optimizer """adadelta""" +595 79 training_loop """owa""" +595 79 negative_sampler """basic""" +595 79 evaluator """rankbased""" +595 80 dataset """kinships""" +595 80 model """rotate""" +595 80 loss """nssa""" +595 80 regularizer """no""" +595 80 optimizer """adadelta""" +595 80 training_loop """owa""" +595 80 negative_sampler """basic""" +595 80 evaluator """rankbased""" +595 81 dataset """kinships""" +595 81 model """rotate""" +595 81 loss """nssa""" +595 81 regularizer """no""" +595 81 optimizer """adadelta""" +595 81 training_loop """owa""" +595 81 negative_sampler """basic""" +595 81 evaluator """rankbased""" +595 82 dataset """kinships""" +595 82 model """rotate""" +595 82 loss """nssa""" +595 82 regularizer """no""" +595 82 optimizer """adadelta""" +595 82 training_loop """owa""" +595 82 negative_sampler """basic""" +595 82 evaluator """rankbased""" +595 83 dataset """kinships""" +595 83 model """rotate""" +595 83 loss """nssa""" +595 83 regularizer """no""" +595 83 optimizer """adadelta""" +595 83 training_loop """owa""" +595 83 negative_sampler """basic""" +595 83 evaluator """rankbased""" +595 84 dataset """kinships""" +595 84 model """rotate""" +595 84 loss """nssa""" +595 84 regularizer """no""" +595 84 optimizer """adadelta""" +595 84 training_loop """owa""" +595 84 negative_sampler """basic""" +595 84 evaluator """rankbased""" +595 85 dataset """kinships""" +595 85 model """rotate""" +595 85 loss """nssa""" +595 85 regularizer """no""" +595 85 optimizer """adadelta""" +595 85 training_loop """owa""" +595 85 negative_sampler """basic""" +595 85 evaluator """rankbased""" +595 86 dataset """kinships""" +595 86 model """rotate""" +595 86 loss """nssa""" +595 86 regularizer """no""" +595 86 optimizer """adadelta""" +595 86 training_loop """owa""" +595 86 negative_sampler """basic""" +595 86 evaluator """rankbased""" +595 87 dataset """kinships""" +595 87 model """rotate""" +595 87 loss """nssa""" +595 87 regularizer """no""" +595 87 optimizer """adadelta""" +595 87 training_loop """owa""" +595 87 negative_sampler """basic""" +595 87 evaluator """rankbased""" +595 88 dataset """kinships""" +595 88 model """rotate""" +595 88 loss """nssa""" +595 88 regularizer """no""" +595 88 optimizer """adadelta""" +595 88 training_loop """owa""" +595 88 negative_sampler """basic""" +595 88 evaluator """rankbased""" +595 89 dataset """kinships""" +595 89 model """rotate""" +595 89 loss """nssa""" +595 89 regularizer """no""" +595 89 optimizer """adadelta""" +595 89 training_loop """owa""" +595 89 negative_sampler """basic""" +595 89 evaluator """rankbased""" +595 90 dataset """kinships""" +595 90 model """rotate""" +595 90 loss """nssa""" +595 90 regularizer """no""" +595 90 optimizer """adadelta""" +595 90 training_loop """owa""" +595 90 negative_sampler """basic""" +595 90 evaluator """rankbased""" +595 91 dataset """kinships""" +595 91 model """rotate""" +595 91 loss """nssa""" +595 91 regularizer """no""" +595 91 optimizer """adadelta""" +595 91 training_loop """owa""" +595 91 negative_sampler """basic""" +595 91 evaluator """rankbased""" +595 92 dataset """kinships""" +595 92 model """rotate""" +595 92 loss """nssa""" +595 92 regularizer """no""" +595 92 optimizer """adadelta""" +595 92 training_loop """owa""" +595 92 negative_sampler """basic""" +595 92 evaluator """rankbased""" +595 93 dataset """kinships""" +595 93 model """rotate""" +595 93 loss """nssa""" +595 93 regularizer """no""" +595 93 optimizer """adadelta""" +595 93 training_loop """owa""" +595 93 negative_sampler """basic""" +595 93 evaluator """rankbased""" +595 94 dataset """kinships""" +595 94 model """rotate""" +595 94 loss """nssa""" +595 94 regularizer """no""" +595 94 optimizer """adadelta""" +595 94 training_loop """owa""" +595 94 negative_sampler """basic""" +595 94 evaluator """rankbased""" +595 95 dataset """kinships""" +595 95 model """rotate""" +595 95 loss """nssa""" +595 95 regularizer """no""" +595 95 optimizer """adadelta""" +595 95 training_loop """owa""" +595 95 negative_sampler """basic""" +595 95 evaluator """rankbased""" +595 96 dataset """kinships""" +595 96 model """rotate""" +595 96 loss """nssa""" +595 96 regularizer """no""" +595 96 optimizer """adadelta""" +595 96 training_loop """owa""" +595 96 negative_sampler """basic""" +595 96 evaluator """rankbased""" +595 97 dataset """kinships""" +595 97 model """rotate""" +595 97 loss """nssa""" +595 97 regularizer """no""" +595 97 optimizer """adadelta""" +595 97 training_loop """owa""" +595 97 negative_sampler """basic""" +595 97 evaluator """rankbased""" +595 98 dataset """kinships""" +595 98 model """rotate""" +595 98 loss """nssa""" +595 98 regularizer """no""" +595 98 optimizer """adadelta""" +595 98 training_loop """owa""" +595 98 negative_sampler """basic""" +595 98 evaluator """rankbased""" +595 99 dataset """kinships""" +595 99 model """rotate""" +595 99 loss """nssa""" +595 99 regularizer """no""" +595 99 optimizer """adadelta""" +595 99 training_loop """owa""" +595 99 negative_sampler """basic""" +595 99 evaluator """rankbased""" +595 100 dataset """kinships""" +595 100 model """rotate""" +595 100 loss """nssa""" +595 100 regularizer """no""" +595 100 optimizer """adadelta""" +595 100 training_loop """owa""" +595 100 negative_sampler """basic""" +595 100 evaluator """rankbased""" +596 1 model.embedding_dim 2.0 +596 1 loss.margin 11.406142974042735 +596 1 loss.adversarial_temperature 0.7954733620612794 +596 1 negative_sampler.num_negs_per_pos 76.0 +596 1 training.batch_size 1.0 +596 2 model.embedding_dim 2.0 +596 2 loss.margin 27.129798355962645 +596 2 loss.adversarial_temperature 0.7631833773707587 +596 2 negative_sampler.num_negs_per_pos 80.0 +596 2 training.batch_size 0.0 +596 3 model.embedding_dim 0.0 +596 3 loss.margin 11.487762048853837 +596 3 loss.adversarial_temperature 0.9224741235276537 +596 3 negative_sampler.num_negs_per_pos 21.0 +596 3 training.batch_size 0.0 +596 4 model.embedding_dim 1.0 +596 4 loss.margin 25.162044558296575 +596 4 loss.adversarial_temperature 0.2271105078222176 +596 4 negative_sampler.num_negs_per_pos 20.0 +596 4 training.batch_size 0.0 +596 5 model.embedding_dim 1.0 +596 5 loss.margin 5.164260118638045 +596 5 loss.adversarial_temperature 0.8139918960509126 +596 5 negative_sampler.num_negs_per_pos 70.0 +596 5 training.batch_size 0.0 +596 6 model.embedding_dim 2.0 +596 6 loss.margin 4.761397013515715 +596 6 loss.adversarial_temperature 0.7709141889363753 +596 6 negative_sampler.num_negs_per_pos 82.0 +596 6 training.batch_size 0.0 +596 7 model.embedding_dim 2.0 +596 7 loss.margin 27.885433547503077 +596 7 loss.adversarial_temperature 0.6932750941029103 +596 7 negative_sampler.num_negs_per_pos 64.0 +596 7 training.batch_size 0.0 +596 8 model.embedding_dim 2.0 +596 8 loss.margin 18.452637740161745 +596 8 loss.adversarial_temperature 0.5459628391517805 +596 8 negative_sampler.num_negs_per_pos 9.0 +596 8 training.batch_size 1.0 +596 9 model.embedding_dim 2.0 +596 9 loss.margin 1.7489464177750003 +596 9 loss.adversarial_temperature 0.1538610253970394 +596 9 negative_sampler.num_negs_per_pos 36.0 +596 9 training.batch_size 1.0 +596 10 model.embedding_dim 1.0 +596 10 loss.margin 4.769512408173005 +596 10 loss.adversarial_temperature 0.11324960172628097 +596 10 negative_sampler.num_negs_per_pos 48.0 +596 10 training.batch_size 1.0 +596 11 model.embedding_dim 2.0 +596 11 loss.margin 25.14154831518626 +596 11 loss.adversarial_temperature 0.9036480040591768 +596 11 negative_sampler.num_negs_per_pos 41.0 +596 11 training.batch_size 1.0 +596 12 model.embedding_dim 2.0 +596 12 loss.margin 14.791206220536218 +596 12 loss.adversarial_temperature 0.9407282607594776 +596 12 negative_sampler.num_negs_per_pos 15.0 +596 12 training.batch_size 2.0 +596 13 model.embedding_dim 0.0 +596 13 loss.margin 4.4837971379291215 +596 13 loss.adversarial_temperature 0.13632898173289756 +596 13 negative_sampler.num_negs_per_pos 36.0 +596 13 training.batch_size 0.0 +596 14 model.embedding_dim 2.0 +596 14 loss.margin 3.6316201576801705 +596 14 loss.adversarial_temperature 0.9410219391406899 +596 14 negative_sampler.num_negs_per_pos 29.0 +596 14 training.batch_size 0.0 +596 15 model.embedding_dim 0.0 +596 15 loss.margin 20.444733180506887 +596 15 loss.adversarial_temperature 0.9845012836970747 +596 15 negative_sampler.num_negs_per_pos 27.0 +596 15 training.batch_size 1.0 +596 16 model.embedding_dim 2.0 +596 16 loss.margin 1.82229688809803 +596 16 loss.adversarial_temperature 0.8286636906122168 +596 16 negative_sampler.num_negs_per_pos 61.0 +596 16 training.batch_size 0.0 +596 17 model.embedding_dim 1.0 +596 17 loss.margin 28.08500113006743 +596 17 loss.adversarial_temperature 0.9331562000591629 +596 17 negative_sampler.num_negs_per_pos 11.0 +596 17 training.batch_size 1.0 +596 18 model.embedding_dim 1.0 +596 18 loss.margin 17.06457169511753 +596 18 loss.adversarial_temperature 0.540088447460103 +596 18 negative_sampler.num_negs_per_pos 87.0 +596 18 training.batch_size 0.0 +596 19 model.embedding_dim 2.0 +596 19 loss.margin 23.135065651761128 +596 19 loss.adversarial_temperature 0.5108015863537534 +596 19 negative_sampler.num_negs_per_pos 83.0 +596 19 training.batch_size 0.0 +596 20 model.embedding_dim 1.0 +596 20 loss.margin 4.920502832320373 +596 20 loss.adversarial_temperature 0.5830145269684619 +596 20 negative_sampler.num_negs_per_pos 30.0 +596 20 training.batch_size 1.0 +596 21 model.embedding_dim 2.0 +596 21 loss.margin 5.632339247772356 +596 21 loss.adversarial_temperature 0.6183564801323093 +596 21 negative_sampler.num_negs_per_pos 80.0 +596 21 training.batch_size 0.0 +596 22 model.embedding_dim 1.0 +596 22 loss.margin 20.949742336586883 +596 22 loss.adversarial_temperature 0.9772670671414149 +596 22 negative_sampler.num_negs_per_pos 30.0 +596 22 training.batch_size 0.0 +596 23 model.embedding_dim 2.0 +596 23 loss.margin 27.558923771081993 +596 23 loss.adversarial_temperature 0.9049333150389979 +596 23 negative_sampler.num_negs_per_pos 61.0 +596 23 training.batch_size 1.0 +596 24 model.embedding_dim 1.0 +596 24 loss.margin 18.91612623854733 +596 24 loss.adversarial_temperature 0.30942225810227014 +596 24 negative_sampler.num_negs_per_pos 20.0 +596 24 training.batch_size 0.0 +596 25 model.embedding_dim 0.0 +596 25 loss.margin 6.2936490434750985 +596 25 loss.adversarial_temperature 0.348405535456606 +596 25 negative_sampler.num_negs_per_pos 87.0 +596 25 training.batch_size 2.0 +596 26 model.embedding_dim 0.0 +596 26 loss.margin 28.89087569891432 +596 26 loss.adversarial_temperature 0.21805313224878214 +596 26 negative_sampler.num_negs_per_pos 26.0 +596 26 training.batch_size 1.0 +596 27 model.embedding_dim 2.0 +596 27 loss.margin 17.343387777786088 +596 27 loss.adversarial_temperature 0.8318490917842434 +596 27 negative_sampler.num_negs_per_pos 70.0 +596 27 training.batch_size 0.0 +596 28 model.embedding_dim 1.0 +596 28 loss.margin 13.16596376090885 +596 28 loss.adversarial_temperature 0.47893312897650075 +596 28 negative_sampler.num_negs_per_pos 58.0 +596 28 training.batch_size 1.0 +596 29 model.embedding_dim 2.0 +596 29 loss.margin 19.06416186092005 +596 29 loss.adversarial_temperature 0.453050319766261 +596 29 negative_sampler.num_negs_per_pos 97.0 +596 29 training.batch_size 2.0 +596 30 model.embedding_dim 2.0 +596 30 loss.margin 26.577081754552005 +596 30 loss.adversarial_temperature 0.913154807947705 +596 30 negative_sampler.num_negs_per_pos 76.0 +596 30 training.batch_size 2.0 +596 31 model.embedding_dim 2.0 +596 31 loss.margin 9.363917016742338 +596 31 loss.adversarial_temperature 0.8538581865634434 +596 31 negative_sampler.num_negs_per_pos 29.0 +596 31 training.batch_size 1.0 +596 32 model.embedding_dim 2.0 +596 32 loss.margin 17.673696933886106 +596 32 loss.adversarial_temperature 0.5450762626808826 +596 32 negative_sampler.num_negs_per_pos 74.0 +596 32 training.batch_size 0.0 +596 33 model.embedding_dim 2.0 +596 33 loss.margin 21.869550589228435 +596 33 loss.adversarial_temperature 0.14855236570635943 +596 33 negative_sampler.num_negs_per_pos 38.0 +596 33 training.batch_size 2.0 +596 34 model.embedding_dim 2.0 +596 34 loss.margin 22.74604482840141 +596 34 loss.adversarial_temperature 0.17773211046166276 +596 34 negative_sampler.num_negs_per_pos 32.0 +596 34 training.batch_size 0.0 +596 35 model.embedding_dim 1.0 +596 35 loss.margin 16.817780179654136 +596 35 loss.adversarial_temperature 0.12947013894142978 +596 35 negative_sampler.num_negs_per_pos 81.0 +596 35 training.batch_size 1.0 +596 36 model.embedding_dim 2.0 +596 36 loss.margin 19.84543112071314 +596 36 loss.adversarial_temperature 0.3696080244002077 +596 36 negative_sampler.num_negs_per_pos 58.0 +596 36 training.batch_size 2.0 +596 37 model.embedding_dim 1.0 +596 37 loss.margin 14.167449846508346 +596 37 loss.adversarial_temperature 0.2814401226679024 +596 37 negative_sampler.num_negs_per_pos 44.0 +596 37 training.batch_size 0.0 +596 38 model.embedding_dim 2.0 +596 38 loss.margin 5.598427915389851 +596 38 loss.adversarial_temperature 0.12670009106170585 +596 38 negative_sampler.num_negs_per_pos 75.0 +596 38 training.batch_size 0.0 +596 39 model.embedding_dim 2.0 +596 39 loss.margin 22.142681223490637 +596 39 loss.adversarial_temperature 0.6628382743296386 +596 39 negative_sampler.num_negs_per_pos 45.0 +596 39 training.batch_size 1.0 +596 40 model.embedding_dim 2.0 +596 40 loss.margin 11.590746125235905 +596 40 loss.adversarial_temperature 0.7742456908068229 +596 40 negative_sampler.num_negs_per_pos 26.0 +596 40 training.batch_size 1.0 +596 41 model.embedding_dim 0.0 +596 41 loss.margin 8.684887734473204 +596 41 loss.adversarial_temperature 0.16715916306408676 +596 41 negative_sampler.num_negs_per_pos 1.0 +596 41 training.batch_size 2.0 +596 42 model.embedding_dim 1.0 +596 42 loss.margin 19.822008161883996 +596 42 loss.adversarial_temperature 0.988757165598143 +596 42 negative_sampler.num_negs_per_pos 62.0 +596 42 training.batch_size 0.0 +596 43 model.embedding_dim 1.0 +596 43 loss.margin 20.053850047797496 +596 43 loss.adversarial_temperature 0.5934846047958782 +596 43 negative_sampler.num_negs_per_pos 12.0 +596 43 training.batch_size 1.0 +596 44 model.embedding_dim 0.0 +596 44 loss.margin 24.286705219373186 +596 44 loss.adversarial_temperature 0.5327986611575539 +596 44 negative_sampler.num_negs_per_pos 48.0 +596 44 training.batch_size 0.0 +596 45 model.embedding_dim 1.0 +596 45 loss.margin 24.280681906817396 +596 45 loss.adversarial_temperature 0.24383706509894232 +596 45 negative_sampler.num_negs_per_pos 71.0 +596 45 training.batch_size 1.0 +596 46 model.embedding_dim 2.0 +596 46 loss.margin 14.850712357009442 +596 46 loss.adversarial_temperature 0.1814841819757831 +596 46 negative_sampler.num_negs_per_pos 19.0 +596 46 training.batch_size 1.0 +596 47 model.embedding_dim 2.0 +596 47 loss.margin 12.964740341684223 +596 47 loss.adversarial_temperature 0.40375006429543236 +596 47 negative_sampler.num_negs_per_pos 73.0 +596 47 training.batch_size 0.0 +596 48 model.embedding_dim 0.0 +596 48 loss.margin 6.934888784089296 +596 48 loss.adversarial_temperature 0.7813314070926354 +596 48 negative_sampler.num_negs_per_pos 12.0 +596 48 training.batch_size 1.0 +596 49 model.embedding_dim 1.0 +596 49 loss.margin 22.458757271510212 +596 49 loss.adversarial_temperature 0.8281078985294127 +596 49 negative_sampler.num_negs_per_pos 32.0 +596 49 training.batch_size 1.0 +596 50 model.embedding_dim 2.0 +596 50 loss.margin 10.430447338339112 +596 50 loss.adversarial_temperature 0.9887108566554685 +596 50 negative_sampler.num_negs_per_pos 91.0 +596 50 training.batch_size 2.0 +596 51 model.embedding_dim 2.0 +596 51 loss.margin 29.69085781272379 +596 51 loss.adversarial_temperature 0.11984697029218352 +596 51 negative_sampler.num_negs_per_pos 57.0 +596 51 training.batch_size 2.0 +596 52 model.embedding_dim 1.0 +596 52 loss.margin 25.0779795930507 +596 52 loss.adversarial_temperature 0.8789388180791888 +596 52 negative_sampler.num_negs_per_pos 37.0 +596 52 training.batch_size 0.0 +596 53 model.embedding_dim 1.0 +596 53 loss.margin 16.266757757399834 +596 53 loss.adversarial_temperature 0.924989337360085 +596 53 negative_sampler.num_negs_per_pos 43.0 +596 53 training.batch_size 1.0 +596 54 model.embedding_dim 1.0 +596 54 loss.margin 17.435965241753586 +596 54 loss.adversarial_temperature 0.7794862250017173 +596 54 negative_sampler.num_negs_per_pos 8.0 +596 54 training.batch_size 0.0 +596 55 model.embedding_dim 2.0 +596 55 loss.margin 21.684503738834447 +596 55 loss.adversarial_temperature 0.45957472195226545 +596 55 negative_sampler.num_negs_per_pos 36.0 +596 55 training.batch_size 2.0 +596 56 model.embedding_dim 0.0 +596 56 loss.margin 19.40246488564807 +596 56 loss.adversarial_temperature 0.15725662513051047 +596 56 negative_sampler.num_negs_per_pos 46.0 +596 56 training.batch_size 1.0 +596 57 model.embedding_dim 2.0 +596 57 loss.margin 23.23892884026933 +596 57 loss.adversarial_temperature 0.23386259387975195 +596 57 negative_sampler.num_negs_per_pos 12.0 +596 57 training.batch_size 0.0 +596 58 model.embedding_dim 2.0 +596 58 loss.margin 22.83561568220819 +596 58 loss.adversarial_temperature 0.46638616192134486 +596 58 negative_sampler.num_negs_per_pos 16.0 +596 58 training.batch_size 1.0 +596 59 model.embedding_dim 1.0 +596 59 loss.margin 5.448995403874267 +596 59 loss.adversarial_temperature 0.40115820886595477 +596 59 negative_sampler.num_negs_per_pos 68.0 +596 59 training.batch_size 2.0 +596 60 model.embedding_dim 1.0 +596 60 loss.margin 6.416961821328327 +596 60 loss.adversarial_temperature 0.639818517259826 +596 60 negative_sampler.num_negs_per_pos 60.0 +596 60 training.batch_size 1.0 +596 61 model.embedding_dim 2.0 +596 61 loss.margin 1.0973819299183827 +596 61 loss.adversarial_temperature 0.8968083169044845 +596 61 negative_sampler.num_negs_per_pos 42.0 +596 61 training.batch_size 1.0 +596 62 model.embedding_dim 0.0 +596 62 loss.margin 15.57040478626777 +596 62 loss.adversarial_temperature 0.6753197694342051 +596 62 negative_sampler.num_negs_per_pos 67.0 +596 62 training.batch_size 0.0 +596 63 model.embedding_dim 0.0 +596 63 loss.margin 19.305435325923717 +596 63 loss.adversarial_temperature 0.8364528430277808 +596 63 negative_sampler.num_negs_per_pos 68.0 +596 63 training.batch_size 1.0 +596 64 model.embedding_dim 2.0 +596 64 loss.margin 3.3594604940190296 +596 64 loss.adversarial_temperature 0.7052613747651412 +596 64 negative_sampler.num_negs_per_pos 71.0 +596 64 training.batch_size 2.0 +596 65 model.embedding_dim 2.0 +596 65 loss.margin 7.578644794731641 +596 65 loss.adversarial_temperature 0.7123405193354511 +596 65 negative_sampler.num_negs_per_pos 1.0 +596 65 training.batch_size 1.0 +596 66 model.embedding_dim 2.0 +596 66 loss.margin 4.732588174724832 +596 66 loss.adversarial_temperature 0.7812448450476265 +596 66 negative_sampler.num_negs_per_pos 97.0 +596 66 training.batch_size 1.0 +596 67 model.embedding_dim 2.0 +596 67 loss.margin 29.197417359161438 +596 67 loss.adversarial_temperature 0.6013607168615199 +596 67 negative_sampler.num_negs_per_pos 98.0 +596 67 training.batch_size 0.0 +596 68 model.embedding_dim 0.0 +596 68 loss.margin 2.2950293468527736 +596 68 loss.adversarial_temperature 0.10727831329712156 +596 68 negative_sampler.num_negs_per_pos 4.0 +596 68 training.batch_size 1.0 +596 69 model.embedding_dim 2.0 +596 69 loss.margin 1.5576784166437632 +596 69 loss.adversarial_temperature 0.17535775737834453 +596 69 negative_sampler.num_negs_per_pos 84.0 +596 69 training.batch_size 0.0 +596 70 model.embedding_dim 0.0 +596 70 loss.margin 28.61511339769396 +596 70 loss.adversarial_temperature 0.37956621732453555 +596 70 negative_sampler.num_negs_per_pos 5.0 +596 70 training.batch_size 0.0 +596 71 model.embedding_dim 0.0 +596 71 loss.margin 19.65898722137904 +596 71 loss.adversarial_temperature 0.1834324046719022 +596 71 negative_sampler.num_negs_per_pos 49.0 +596 71 training.batch_size 0.0 +596 72 model.embedding_dim 0.0 +596 72 loss.margin 22.277198278517112 +596 72 loss.adversarial_temperature 0.6363556094378099 +596 72 negative_sampler.num_negs_per_pos 69.0 +596 72 training.batch_size 2.0 +596 73 model.embedding_dim 2.0 +596 73 loss.margin 13.297736096347021 +596 73 loss.adversarial_temperature 0.7615554529836204 +596 73 negative_sampler.num_negs_per_pos 95.0 +596 73 training.batch_size 0.0 +596 74 model.embedding_dim 1.0 +596 74 loss.margin 24.350973337654324 +596 74 loss.adversarial_temperature 0.9446937905846865 +596 74 negative_sampler.num_negs_per_pos 80.0 +596 74 training.batch_size 1.0 +596 75 model.embedding_dim 1.0 +596 75 loss.margin 24.27882065504538 +596 75 loss.adversarial_temperature 0.18264444841109823 +596 75 negative_sampler.num_negs_per_pos 46.0 +596 75 training.batch_size 2.0 +596 76 model.embedding_dim 2.0 +596 76 loss.margin 21.859630525149555 +596 76 loss.adversarial_temperature 0.956204545864277 +596 76 negative_sampler.num_negs_per_pos 46.0 +596 76 training.batch_size 1.0 +596 77 model.embedding_dim 0.0 +596 77 loss.margin 29.882600534949155 +596 77 loss.adversarial_temperature 0.6185471986838021 +596 77 negative_sampler.num_negs_per_pos 87.0 +596 77 training.batch_size 1.0 +596 78 model.embedding_dim 0.0 +596 78 loss.margin 18.990166870415752 +596 78 loss.adversarial_temperature 0.38521421417008594 +596 78 negative_sampler.num_negs_per_pos 26.0 +596 78 training.batch_size 1.0 +596 79 model.embedding_dim 2.0 +596 79 loss.margin 7.757546400594815 +596 79 loss.adversarial_temperature 0.7096810260418278 +596 79 negative_sampler.num_negs_per_pos 77.0 +596 79 training.batch_size 1.0 +596 80 model.embedding_dim 2.0 +596 80 loss.margin 8.69815948698342 +596 80 loss.adversarial_temperature 0.48666882568655523 +596 80 negative_sampler.num_negs_per_pos 92.0 +596 80 training.batch_size 1.0 +596 81 model.embedding_dim 2.0 +596 81 loss.margin 1.1702461652732918 +596 81 loss.adversarial_temperature 0.6870780007705865 +596 81 negative_sampler.num_negs_per_pos 11.0 +596 81 training.batch_size 1.0 +596 82 model.embedding_dim 1.0 +596 82 loss.margin 9.339952617730292 +596 82 loss.adversarial_temperature 0.7959919310943401 +596 82 negative_sampler.num_negs_per_pos 92.0 +596 82 training.batch_size 2.0 +596 83 model.embedding_dim 0.0 +596 83 loss.margin 2.5641671859467827 +596 83 loss.adversarial_temperature 0.9721433705571455 +596 83 negative_sampler.num_negs_per_pos 47.0 +596 83 training.batch_size 0.0 +596 84 model.embedding_dim 2.0 +596 84 loss.margin 13.440705647750606 +596 84 loss.adversarial_temperature 0.1299873429588697 +596 84 negative_sampler.num_negs_per_pos 8.0 +596 84 training.batch_size 1.0 +596 85 model.embedding_dim 2.0 +596 85 loss.margin 22.317301253584453 +596 85 loss.adversarial_temperature 0.4942155118439001 +596 85 negative_sampler.num_negs_per_pos 1.0 +596 85 training.batch_size 2.0 +596 86 model.embedding_dim 0.0 +596 86 loss.margin 9.560595085236146 +596 86 loss.adversarial_temperature 0.5182364725484616 +596 86 negative_sampler.num_negs_per_pos 40.0 +596 86 training.batch_size 2.0 +596 87 model.embedding_dim 1.0 +596 87 loss.margin 14.426118930888025 +596 87 loss.adversarial_temperature 0.9788571379738903 +596 87 negative_sampler.num_negs_per_pos 24.0 +596 87 training.batch_size 1.0 +596 88 model.embedding_dim 2.0 +596 88 loss.margin 25.12334603382068 +596 88 loss.adversarial_temperature 0.24173251396094086 +596 88 negative_sampler.num_negs_per_pos 76.0 +596 88 training.batch_size 2.0 +596 89 model.embedding_dim 0.0 +596 89 loss.margin 4.380550283116685 +596 89 loss.adversarial_temperature 0.8444888497603001 +596 89 negative_sampler.num_negs_per_pos 48.0 +596 89 training.batch_size 2.0 +596 90 model.embedding_dim 1.0 +596 90 loss.margin 12.801536021175915 +596 90 loss.adversarial_temperature 0.234900137139671 +596 90 negative_sampler.num_negs_per_pos 13.0 +596 90 training.batch_size 1.0 +596 91 model.embedding_dim 0.0 +596 91 loss.margin 25.09863460182232 +596 91 loss.adversarial_temperature 0.14884390546987752 +596 91 negative_sampler.num_negs_per_pos 51.0 +596 91 training.batch_size 0.0 +596 92 model.embedding_dim 2.0 +596 92 loss.margin 28.496578862041225 +596 92 loss.adversarial_temperature 0.9146820174983085 +596 92 negative_sampler.num_negs_per_pos 1.0 +596 92 training.batch_size 2.0 +596 93 model.embedding_dim 0.0 +596 93 loss.margin 20.497231267428365 +596 93 loss.adversarial_temperature 0.12300841557730742 +596 93 negative_sampler.num_negs_per_pos 96.0 +596 93 training.batch_size 0.0 +596 94 model.embedding_dim 2.0 +596 94 loss.margin 28.667286935571365 +596 94 loss.adversarial_temperature 0.24410210825179943 +596 94 negative_sampler.num_negs_per_pos 32.0 +596 94 training.batch_size 2.0 +596 95 model.embedding_dim 1.0 +596 95 loss.margin 2.2580660668968684 +596 95 loss.adversarial_temperature 0.6801298784230788 +596 95 negative_sampler.num_negs_per_pos 94.0 +596 95 training.batch_size 2.0 +596 96 model.embedding_dim 0.0 +596 96 loss.margin 21.75159387945691 +596 96 loss.adversarial_temperature 0.5372725672467515 +596 96 negative_sampler.num_negs_per_pos 47.0 +596 96 training.batch_size 0.0 +596 97 model.embedding_dim 1.0 +596 97 loss.margin 6.005265879184684 +596 97 loss.adversarial_temperature 0.32110370965587764 +596 97 negative_sampler.num_negs_per_pos 57.0 +596 97 training.batch_size 0.0 +596 98 model.embedding_dim 0.0 +596 98 loss.margin 19.72690278715949 +596 98 loss.adversarial_temperature 0.48545271156592595 +596 98 negative_sampler.num_negs_per_pos 56.0 +596 98 training.batch_size 0.0 +596 99 model.embedding_dim 0.0 +596 99 loss.margin 16.240490405181973 +596 99 loss.adversarial_temperature 0.9539285481721881 +596 99 negative_sampler.num_negs_per_pos 55.0 +596 99 training.batch_size 0.0 +596 100 model.embedding_dim 2.0 +596 100 loss.margin 18.140565250355024 +596 100 loss.adversarial_temperature 0.7672327337727475 +596 100 negative_sampler.num_negs_per_pos 58.0 +596 100 training.batch_size 0.0 +596 1 dataset """kinships""" +596 1 model """rotate""" +596 1 loss """nssa""" +596 1 regularizer """no""" +596 1 optimizer """adadelta""" +596 1 training_loop """owa""" +596 1 negative_sampler """basic""" +596 1 evaluator """rankbased""" +596 2 dataset """kinships""" +596 2 model """rotate""" +596 2 loss """nssa""" +596 2 regularizer """no""" +596 2 optimizer """adadelta""" +596 2 training_loop """owa""" +596 2 negative_sampler """basic""" +596 2 evaluator """rankbased""" +596 3 dataset """kinships""" +596 3 model """rotate""" +596 3 loss """nssa""" +596 3 regularizer """no""" +596 3 optimizer """adadelta""" +596 3 training_loop """owa""" +596 3 negative_sampler """basic""" +596 3 evaluator """rankbased""" +596 4 dataset """kinships""" +596 4 model """rotate""" +596 4 loss """nssa""" +596 4 regularizer """no""" +596 4 optimizer """adadelta""" +596 4 training_loop """owa""" +596 4 negative_sampler """basic""" +596 4 evaluator """rankbased""" +596 5 dataset """kinships""" +596 5 model """rotate""" +596 5 loss """nssa""" +596 5 regularizer """no""" +596 5 optimizer """adadelta""" +596 5 training_loop """owa""" +596 5 negative_sampler """basic""" +596 5 evaluator """rankbased""" +596 6 dataset """kinships""" +596 6 model """rotate""" +596 6 loss """nssa""" +596 6 regularizer """no""" +596 6 optimizer """adadelta""" +596 6 training_loop """owa""" +596 6 negative_sampler """basic""" +596 6 evaluator """rankbased""" +596 7 dataset """kinships""" +596 7 model """rotate""" +596 7 loss """nssa""" +596 7 regularizer """no""" +596 7 optimizer """adadelta""" +596 7 training_loop """owa""" +596 7 negative_sampler """basic""" +596 7 evaluator """rankbased""" +596 8 dataset """kinships""" +596 8 model """rotate""" +596 8 loss """nssa""" +596 8 regularizer """no""" +596 8 optimizer """adadelta""" +596 8 training_loop """owa""" +596 8 negative_sampler """basic""" +596 8 evaluator """rankbased""" +596 9 dataset """kinships""" +596 9 model """rotate""" +596 9 loss """nssa""" +596 9 regularizer """no""" +596 9 optimizer """adadelta""" +596 9 training_loop """owa""" +596 9 negative_sampler """basic""" +596 9 evaluator """rankbased""" +596 10 dataset """kinships""" +596 10 model """rotate""" +596 10 loss """nssa""" +596 10 regularizer """no""" +596 10 optimizer """adadelta""" +596 10 training_loop """owa""" +596 10 negative_sampler """basic""" +596 10 evaluator """rankbased""" +596 11 dataset """kinships""" +596 11 model """rotate""" +596 11 loss """nssa""" +596 11 regularizer """no""" +596 11 optimizer """adadelta""" +596 11 training_loop """owa""" +596 11 negative_sampler """basic""" +596 11 evaluator """rankbased""" +596 12 dataset """kinships""" +596 12 model """rotate""" +596 12 loss """nssa""" +596 12 regularizer """no""" +596 12 optimizer """adadelta""" +596 12 training_loop """owa""" +596 12 negative_sampler """basic""" +596 12 evaluator """rankbased""" +596 13 dataset """kinships""" +596 13 model """rotate""" +596 13 loss """nssa""" +596 13 regularizer """no""" +596 13 optimizer """adadelta""" +596 13 training_loop """owa""" +596 13 negative_sampler """basic""" +596 13 evaluator """rankbased""" +596 14 dataset """kinships""" +596 14 model """rotate""" +596 14 loss """nssa""" +596 14 regularizer """no""" +596 14 optimizer """adadelta""" +596 14 training_loop """owa""" +596 14 negative_sampler """basic""" +596 14 evaluator """rankbased""" +596 15 dataset """kinships""" +596 15 model """rotate""" +596 15 loss """nssa""" +596 15 regularizer """no""" +596 15 optimizer """adadelta""" +596 15 training_loop """owa""" +596 15 negative_sampler """basic""" +596 15 evaluator """rankbased""" +596 16 dataset """kinships""" +596 16 model """rotate""" +596 16 loss """nssa""" +596 16 regularizer """no""" +596 16 optimizer """adadelta""" +596 16 training_loop """owa""" +596 16 negative_sampler """basic""" +596 16 evaluator """rankbased""" +596 17 dataset """kinships""" +596 17 model """rotate""" +596 17 loss """nssa""" +596 17 regularizer """no""" +596 17 optimizer """adadelta""" +596 17 training_loop """owa""" +596 17 negative_sampler """basic""" +596 17 evaluator """rankbased""" +596 18 dataset """kinships""" +596 18 model """rotate""" +596 18 loss """nssa""" +596 18 regularizer """no""" +596 18 optimizer """adadelta""" +596 18 training_loop """owa""" +596 18 negative_sampler """basic""" +596 18 evaluator """rankbased""" +596 19 dataset """kinships""" +596 19 model """rotate""" +596 19 loss """nssa""" +596 19 regularizer """no""" +596 19 optimizer """adadelta""" +596 19 training_loop """owa""" +596 19 negative_sampler """basic""" +596 19 evaluator """rankbased""" +596 20 dataset """kinships""" +596 20 model """rotate""" +596 20 loss """nssa""" +596 20 regularizer """no""" +596 20 optimizer """adadelta""" +596 20 training_loop """owa""" +596 20 negative_sampler """basic""" +596 20 evaluator """rankbased""" +596 21 dataset """kinships""" +596 21 model """rotate""" +596 21 loss """nssa""" +596 21 regularizer """no""" +596 21 optimizer """adadelta""" +596 21 training_loop """owa""" +596 21 negative_sampler """basic""" +596 21 evaluator """rankbased""" +596 22 dataset """kinships""" +596 22 model """rotate""" +596 22 loss """nssa""" +596 22 regularizer """no""" +596 22 optimizer """adadelta""" +596 22 training_loop """owa""" +596 22 negative_sampler """basic""" +596 22 evaluator """rankbased""" +596 23 dataset """kinships""" +596 23 model """rotate""" +596 23 loss """nssa""" +596 23 regularizer """no""" +596 23 optimizer """adadelta""" +596 23 training_loop """owa""" +596 23 negative_sampler """basic""" +596 23 evaluator """rankbased""" +596 24 dataset """kinships""" +596 24 model """rotate""" +596 24 loss """nssa""" +596 24 regularizer """no""" +596 24 optimizer """adadelta""" +596 24 training_loop """owa""" +596 24 negative_sampler """basic""" +596 24 evaluator """rankbased""" +596 25 dataset """kinships""" +596 25 model """rotate""" +596 25 loss """nssa""" +596 25 regularizer """no""" +596 25 optimizer """adadelta""" +596 25 training_loop """owa""" +596 25 negative_sampler """basic""" +596 25 evaluator """rankbased""" +596 26 dataset """kinships""" +596 26 model """rotate""" +596 26 loss """nssa""" +596 26 regularizer """no""" +596 26 optimizer """adadelta""" +596 26 training_loop """owa""" +596 26 negative_sampler """basic""" +596 26 evaluator """rankbased""" +596 27 dataset """kinships""" +596 27 model """rotate""" +596 27 loss """nssa""" +596 27 regularizer """no""" +596 27 optimizer """adadelta""" +596 27 training_loop """owa""" +596 27 negative_sampler """basic""" +596 27 evaluator """rankbased""" +596 28 dataset """kinships""" +596 28 model """rotate""" +596 28 loss """nssa""" +596 28 regularizer """no""" +596 28 optimizer """adadelta""" +596 28 training_loop """owa""" +596 28 negative_sampler """basic""" +596 28 evaluator """rankbased""" +596 29 dataset """kinships""" +596 29 model """rotate""" +596 29 loss """nssa""" +596 29 regularizer """no""" +596 29 optimizer """adadelta""" +596 29 training_loop """owa""" +596 29 negative_sampler """basic""" +596 29 evaluator """rankbased""" +596 30 dataset """kinships""" +596 30 model """rotate""" +596 30 loss """nssa""" +596 30 regularizer """no""" +596 30 optimizer """adadelta""" +596 30 training_loop """owa""" +596 30 negative_sampler """basic""" +596 30 evaluator """rankbased""" +596 31 dataset """kinships""" +596 31 model """rotate""" +596 31 loss """nssa""" +596 31 regularizer """no""" +596 31 optimizer """adadelta""" +596 31 training_loop """owa""" +596 31 negative_sampler """basic""" +596 31 evaluator """rankbased""" +596 32 dataset """kinships""" +596 32 model """rotate""" +596 32 loss """nssa""" +596 32 regularizer """no""" +596 32 optimizer """adadelta""" +596 32 training_loop """owa""" +596 32 negative_sampler """basic""" +596 32 evaluator """rankbased""" +596 33 dataset """kinships""" +596 33 model """rotate""" +596 33 loss """nssa""" +596 33 regularizer """no""" +596 33 optimizer """adadelta""" +596 33 training_loop """owa""" +596 33 negative_sampler """basic""" +596 33 evaluator """rankbased""" +596 34 dataset """kinships""" +596 34 model """rotate""" +596 34 loss """nssa""" +596 34 regularizer """no""" +596 34 optimizer """adadelta""" +596 34 training_loop """owa""" +596 34 negative_sampler """basic""" +596 34 evaluator """rankbased""" +596 35 dataset """kinships""" +596 35 model """rotate""" +596 35 loss """nssa""" +596 35 regularizer """no""" +596 35 optimizer """adadelta""" +596 35 training_loop """owa""" +596 35 negative_sampler """basic""" +596 35 evaluator """rankbased""" +596 36 dataset """kinships""" +596 36 model """rotate""" +596 36 loss """nssa""" +596 36 regularizer """no""" +596 36 optimizer """adadelta""" +596 36 training_loop """owa""" +596 36 negative_sampler """basic""" +596 36 evaluator """rankbased""" +596 37 dataset """kinships""" +596 37 model """rotate""" +596 37 loss """nssa""" +596 37 regularizer """no""" +596 37 optimizer """adadelta""" +596 37 training_loop """owa""" +596 37 negative_sampler """basic""" +596 37 evaluator """rankbased""" +596 38 dataset """kinships""" +596 38 model """rotate""" +596 38 loss """nssa""" +596 38 regularizer """no""" +596 38 optimizer """adadelta""" +596 38 training_loop """owa""" +596 38 negative_sampler """basic""" +596 38 evaluator """rankbased""" +596 39 dataset """kinships""" +596 39 model """rotate""" +596 39 loss """nssa""" +596 39 regularizer """no""" +596 39 optimizer """adadelta""" +596 39 training_loop """owa""" +596 39 negative_sampler """basic""" +596 39 evaluator """rankbased""" +596 40 dataset """kinships""" +596 40 model """rotate""" +596 40 loss """nssa""" +596 40 regularizer """no""" +596 40 optimizer """adadelta""" +596 40 training_loop """owa""" +596 40 negative_sampler """basic""" +596 40 evaluator """rankbased""" +596 41 dataset """kinships""" +596 41 model """rotate""" +596 41 loss """nssa""" +596 41 regularizer """no""" +596 41 optimizer """adadelta""" +596 41 training_loop """owa""" +596 41 negative_sampler """basic""" +596 41 evaluator """rankbased""" +596 42 dataset """kinships""" +596 42 model """rotate""" +596 42 loss """nssa""" +596 42 regularizer """no""" +596 42 optimizer """adadelta""" +596 42 training_loop """owa""" +596 42 negative_sampler """basic""" +596 42 evaluator """rankbased""" +596 43 dataset """kinships""" +596 43 model """rotate""" +596 43 loss """nssa""" +596 43 regularizer """no""" +596 43 optimizer """adadelta""" +596 43 training_loop """owa""" +596 43 negative_sampler """basic""" +596 43 evaluator """rankbased""" +596 44 dataset """kinships""" +596 44 model """rotate""" +596 44 loss """nssa""" +596 44 regularizer """no""" +596 44 optimizer """adadelta""" +596 44 training_loop """owa""" +596 44 negative_sampler """basic""" +596 44 evaluator """rankbased""" +596 45 dataset """kinships""" +596 45 model """rotate""" +596 45 loss """nssa""" +596 45 regularizer """no""" +596 45 optimizer """adadelta""" +596 45 training_loop """owa""" +596 45 negative_sampler """basic""" +596 45 evaluator """rankbased""" +596 46 dataset """kinships""" +596 46 model """rotate""" +596 46 loss """nssa""" +596 46 regularizer """no""" +596 46 optimizer """adadelta""" +596 46 training_loop """owa""" +596 46 negative_sampler """basic""" +596 46 evaluator """rankbased""" +596 47 dataset """kinships""" +596 47 model """rotate""" +596 47 loss """nssa""" +596 47 regularizer """no""" +596 47 optimizer """adadelta""" +596 47 training_loop """owa""" +596 47 negative_sampler """basic""" +596 47 evaluator """rankbased""" +596 48 dataset """kinships""" +596 48 model """rotate""" +596 48 loss """nssa""" +596 48 regularizer """no""" +596 48 optimizer """adadelta""" +596 48 training_loop """owa""" +596 48 negative_sampler """basic""" +596 48 evaluator """rankbased""" +596 49 dataset """kinships""" +596 49 model """rotate""" +596 49 loss """nssa""" +596 49 regularizer """no""" +596 49 optimizer """adadelta""" +596 49 training_loop """owa""" +596 49 negative_sampler """basic""" +596 49 evaluator """rankbased""" +596 50 dataset """kinships""" +596 50 model """rotate""" +596 50 loss """nssa""" +596 50 regularizer """no""" +596 50 optimizer """adadelta""" +596 50 training_loop """owa""" +596 50 negative_sampler """basic""" +596 50 evaluator """rankbased""" +596 51 dataset """kinships""" +596 51 model """rotate""" +596 51 loss """nssa""" +596 51 regularizer """no""" +596 51 optimizer """adadelta""" +596 51 training_loop """owa""" +596 51 negative_sampler """basic""" +596 51 evaluator """rankbased""" +596 52 dataset """kinships""" +596 52 model """rotate""" +596 52 loss """nssa""" +596 52 regularizer """no""" +596 52 optimizer """adadelta""" +596 52 training_loop """owa""" +596 52 negative_sampler """basic""" +596 52 evaluator """rankbased""" +596 53 dataset """kinships""" +596 53 model """rotate""" +596 53 loss """nssa""" +596 53 regularizer """no""" +596 53 optimizer """adadelta""" +596 53 training_loop """owa""" +596 53 negative_sampler """basic""" +596 53 evaluator """rankbased""" +596 54 dataset """kinships""" +596 54 model """rotate""" +596 54 loss """nssa""" +596 54 regularizer """no""" +596 54 optimizer """adadelta""" +596 54 training_loop """owa""" +596 54 negative_sampler """basic""" +596 54 evaluator """rankbased""" +596 55 dataset """kinships""" +596 55 model """rotate""" +596 55 loss """nssa""" +596 55 regularizer """no""" +596 55 optimizer """adadelta""" +596 55 training_loop """owa""" +596 55 negative_sampler """basic""" +596 55 evaluator """rankbased""" +596 56 dataset """kinships""" +596 56 model """rotate""" +596 56 loss """nssa""" +596 56 regularizer """no""" +596 56 optimizer """adadelta""" +596 56 training_loop """owa""" +596 56 negative_sampler """basic""" +596 56 evaluator """rankbased""" +596 57 dataset """kinships""" +596 57 model """rotate""" +596 57 loss """nssa""" +596 57 regularizer """no""" +596 57 optimizer """adadelta""" +596 57 training_loop """owa""" +596 57 negative_sampler """basic""" +596 57 evaluator """rankbased""" +596 58 dataset """kinships""" +596 58 model """rotate""" +596 58 loss """nssa""" +596 58 regularizer """no""" +596 58 optimizer """adadelta""" +596 58 training_loop """owa""" +596 58 negative_sampler """basic""" +596 58 evaluator """rankbased""" +596 59 dataset """kinships""" +596 59 model """rotate""" +596 59 loss """nssa""" +596 59 regularizer """no""" +596 59 optimizer """adadelta""" +596 59 training_loop """owa""" +596 59 negative_sampler """basic""" +596 59 evaluator """rankbased""" +596 60 dataset """kinships""" +596 60 model """rotate""" +596 60 loss """nssa""" +596 60 regularizer """no""" +596 60 optimizer """adadelta""" +596 60 training_loop """owa""" +596 60 negative_sampler """basic""" +596 60 evaluator """rankbased""" +596 61 dataset """kinships""" +596 61 model """rotate""" +596 61 loss """nssa""" +596 61 regularizer """no""" +596 61 optimizer """adadelta""" +596 61 training_loop """owa""" +596 61 negative_sampler """basic""" +596 61 evaluator """rankbased""" +596 62 dataset """kinships""" +596 62 model """rotate""" +596 62 loss """nssa""" +596 62 regularizer """no""" +596 62 optimizer """adadelta""" +596 62 training_loop """owa""" +596 62 negative_sampler """basic""" +596 62 evaluator """rankbased""" +596 63 dataset """kinships""" +596 63 model """rotate""" +596 63 loss """nssa""" +596 63 regularizer """no""" +596 63 optimizer """adadelta""" +596 63 training_loop """owa""" +596 63 negative_sampler """basic""" +596 63 evaluator """rankbased""" +596 64 dataset """kinships""" +596 64 model """rotate""" +596 64 loss """nssa""" +596 64 regularizer """no""" +596 64 optimizer """adadelta""" +596 64 training_loop """owa""" +596 64 negative_sampler """basic""" +596 64 evaluator """rankbased""" +596 65 dataset """kinships""" +596 65 model """rotate""" +596 65 loss """nssa""" +596 65 regularizer """no""" +596 65 optimizer """adadelta""" +596 65 training_loop """owa""" +596 65 negative_sampler """basic""" +596 65 evaluator """rankbased""" +596 66 dataset """kinships""" +596 66 model """rotate""" +596 66 loss """nssa""" +596 66 regularizer """no""" +596 66 optimizer """adadelta""" +596 66 training_loop """owa""" +596 66 negative_sampler """basic""" +596 66 evaluator """rankbased""" +596 67 dataset """kinships""" +596 67 model """rotate""" +596 67 loss """nssa""" +596 67 regularizer """no""" +596 67 optimizer """adadelta""" +596 67 training_loop """owa""" +596 67 negative_sampler """basic""" +596 67 evaluator """rankbased""" +596 68 dataset """kinships""" +596 68 model """rotate""" +596 68 loss """nssa""" +596 68 regularizer """no""" +596 68 optimizer """adadelta""" +596 68 training_loop """owa""" +596 68 negative_sampler """basic""" +596 68 evaluator """rankbased""" +596 69 dataset """kinships""" +596 69 model """rotate""" +596 69 loss """nssa""" +596 69 regularizer """no""" +596 69 optimizer """adadelta""" +596 69 training_loop """owa""" +596 69 negative_sampler """basic""" +596 69 evaluator """rankbased""" +596 70 dataset """kinships""" +596 70 model """rotate""" +596 70 loss """nssa""" +596 70 regularizer """no""" +596 70 optimizer """adadelta""" +596 70 training_loop """owa""" +596 70 negative_sampler """basic""" +596 70 evaluator """rankbased""" +596 71 dataset """kinships""" +596 71 model """rotate""" +596 71 loss """nssa""" +596 71 regularizer """no""" +596 71 optimizer """adadelta""" +596 71 training_loop """owa""" +596 71 negative_sampler """basic""" +596 71 evaluator """rankbased""" +596 72 dataset """kinships""" +596 72 model """rotate""" +596 72 loss """nssa""" +596 72 regularizer """no""" +596 72 optimizer """adadelta""" +596 72 training_loop """owa""" +596 72 negative_sampler """basic""" +596 72 evaluator """rankbased""" +596 73 dataset """kinships""" +596 73 model """rotate""" +596 73 loss """nssa""" +596 73 regularizer """no""" +596 73 optimizer """adadelta""" +596 73 training_loop """owa""" +596 73 negative_sampler """basic""" +596 73 evaluator """rankbased""" +596 74 dataset """kinships""" +596 74 model """rotate""" +596 74 loss """nssa""" +596 74 regularizer """no""" +596 74 optimizer """adadelta""" +596 74 training_loop """owa""" +596 74 negative_sampler """basic""" +596 74 evaluator """rankbased""" +596 75 dataset """kinships""" +596 75 model """rotate""" +596 75 loss """nssa""" +596 75 regularizer """no""" +596 75 optimizer """adadelta""" +596 75 training_loop """owa""" +596 75 negative_sampler """basic""" +596 75 evaluator """rankbased""" +596 76 dataset """kinships""" +596 76 model """rotate""" +596 76 loss """nssa""" +596 76 regularizer """no""" +596 76 optimizer """adadelta""" +596 76 training_loop """owa""" +596 76 negative_sampler """basic""" +596 76 evaluator """rankbased""" +596 77 dataset """kinships""" +596 77 model """rotate""" +596 77 loss """nssa""" +596 77 regularizer """no""" +596 77 optimizer """adadelta""" +596 77 training_loop """owa""" +596 77 negative_sampler """basic""" +596 77 evaluator """rankbased""" +596 78 dataset """kinships""" +596 78 model """rotate""" +596 78 loss """nssa""" +596 78 regularizer """no""" +596 78 optimizer """adadelta""" +596 78 training_loop """owa""" +596 78 negative_sampler """basic""" +596 78 evaluator """rankbased""" +596 79 dataset """kinships""" +596 79 model """rotate""" +596 79 loss """nssa""" +596 79 regularizer """no""" +596 79 optimizer """adadelta""" +596 79 training_loop """owa""" +596 79 negative_sampler """basic""" +596 79 evaluator """rankbased""" +596 80 dataset """kinships""" +596 80 model """rotate""" +596 80 loss """nssa""" +596 80 regularizer """no""" +596 80 optimizer """adadelta""" +596 80 training_loop """owa""" +596 80 negative_sampler """basic""" +596 80 evaluator """rankbased""" +596 81 dataset """kinships""" +596 81 model """rotate""" +596 81 loss """nssa""" +596 81 regularizer """no""" +596 81 optimizer """adadelta""" +596 81 training_loop """owa""" +596 81 negative_sampler """basic""" +596 81 evaluator """rankbased""" +596 82 dataset """kinships""" +596 82 model """rotate""" +596 82 loss """nssa""" +596 82 regularizer """no""" +596 82 optimizer """adadelta""" +596 82 training_loop """owa""" +596 82 negative_sampler """basic""" +596 82 evaluator """rankbased""" +596 83 dataset """kinships""" +596 83 model """rotate""" +596 83 loss """nssa""" +596 83 regularizer """no""" +596 83 optimizer """adadelta""" +596 83 training_loop """owa""" +596 83 negative_sampler """basic""" +596 83 evaluator """rankbased""" +596 84 dataset """kinships""" +596 84 model """rotate""" +596 84 loss """nssa""" +596 84 regularizer """no""" +596 84 optimizer """adadelta""" +596 84 training_loop """owa""" +596 84 negative_sampler """basic""" +596 84 evaluator """rankbased""" +596 85 dataset """kinships""" +596 85 model """rotate""" +596 85 loss """nssa""" +596 85 regularizer """no""" +596 85 optimizer """adadelta""" +596 85 training_loop """owa""" +596 85 negative_sampler """basic""" +596 85 evaluator """rankbased""" +596 86 dataset """kinships""" +596 86 model """rotate""" +596 86 loss """nssa""" +596 86 regularizer """no""" +596 86 optimizer """adadelta""" +596 86 training_loop """owa""" +596 86 negative_sampler """basic""" +596 86 evaluator """rankbased""" +596 87 dataset """kinships""" +596 87 model """rotate""" +596 87 loss """nssa""" +596 87 regularizer """no""" +596 87 optimizer """adadelta""" +596 87 training_loop """owa""" +596 87 negative_sampler """basic""" +596 87 evaluator """rankbased""" +596 88 dataset """kinships""" +596 88 model """rotate""" +596 88 loss """nssa""" +596 88 regularizer """no""" +596 88 optimizer """adadelta""" +596 88 training_loop """owa""" +596 88 negative_sampler """basic""" +596 88 evaluator """rankbased""" +596 89 dataset """kinships""" +596 89 model """rotate""" +596 89 loss """nssa""" +596 89 regularizer """no""" +596 89 optimizer """adadelta""" +596 89 training_loop """owa""" +596 89 negative_sampler """basic""" +596 89 evaluator """rankbased""" +596 90 dataset """kinships""" +596 90 model """rotate""" +596 90 loss """nssa""" +596 90 regularizer """no""" +596 90 optimizer """adadelta""" +596 90 training_loop """owa""" +596 90 negative_sampler """basic""" +596 90 evaluator """rankbased""" +596 91 dataset """kinships""" +596 91 model """rotate""" +596 91 loss """nssa""" +596 91 regularizer """no""" +596 91 optimizer """adadelta""" +596 91 training_loop """owa""" +596 91 negative_sampler """basic""" +596 91 evaluator """rankbased""" +596 92 dataset """kinships""" +596 92 model """rotate""" +596 92 loss """nssa""" +596 92 regularizer """no""" +596 92 optimizer """adadelta""" +596 92 training_loop """owa""" +596 92 negative_sampler """basic""" +596 92 evaluator """rankbased""" +596 93 dataset """kinships""" +596 93 model """rotate""" +596 93 loss """nssa""" +596 93 regularizer """no""" +596 93 optimizer """adadelta""" +596 93 training_loop """owa""" +596 93 negative_sampler """basic""" +596 93 evaluator """rankbased""" +596 94 dataset """kinships""" +596 94 model """rotate""" +596 94 loss """nssa""" +596 94 regularizer """no""" +596 94 optimizer """adadelta""" +596 94 training_loop """owa""" +596 94 negative_sampler """basic""" +596 94 evaluator """rankbased""" +596 95 dataset """kinships""" +596 95 model """rotate""" +596 95 loss """nssa""" +596 95 regularizer """no""" +596 95 optimizer """adadelta""" +596 95 training_loop """owa""" +596 95 negative_sampler """basic""" +596 95 evaluator """rankbased""" +596 96 dataset """kinships""" +596 96 model """rotate""" +596 96 loss """nssa""" +596 96 regularizer """no""" +596 96 optimizer """adadelta""" +596 96 training_loop """owa""" +596 96 negative_sampler """basic""" +596 96 evaluator """rankbased""" +596 97 dataset """kinships""" +596 97 model """rotate""" +596 97 loss """nssa""" +596 97 regularizer """no""" +596 97 optimizer """adadelta""" +596 97 training_loop """owa""" +596 97 negative_sampler """basic""" +596 97 evaluator """rankbased""" +596 98 dataset """kinships""" +596 98 model """rotate""" +596 98 loss """nssa""" +596 98 regularizer """no""" +596 98 optimizer """adadelta""" +596 98 training_loop """owa""" +596 98 negative_sampler """basic""" +596 98 evaluator """rankbased""" +596 99 dataset """kinships""" +596 99 model """rotate""" +596 99 loss """nssa""" +596 99 regularizer """no""" +596 99 optimizer """adadelta""" +596 99 training_loop """owa""" +596 99 negative_sampler """basic""" +596 99 evaluator """rankbased""" +596 100 dataset """kinships""" +596 100 model """rotate""" +596 100 loss """nssa""" +596 100 regularizer """no""" +596 100 optimizer """adadelta""" +596 100 training_loop """owa""" +596 100 negative_sampler """basic""" +596 100 evaluator """rankbased""" +597 1 model.embedding_dim 1.0 +597 1 loss.margin 19.860245366205078 +597 1 loss.adversarial_temperature 0.3473258902243305 +597 1 optimizer.lr 0.0018484714991919456 +597 1 negative_sampler.num_negs_per_pos 0.0 +597 1 training.batch_size 2.0 +597 2 model.embedding_dim 2.0 +597 2 loss.margin 24.892395040904386 +597 2 loss.adversarial_temperature 0.469047939578541 +597 2 optimizer.lr 0.001967006377193805 +597 2 negative_sampler.num_negs_per_pos 26.0 +597 2 training.batch_size 1.0 +597 3 model.embedding_dim 2.0 +597 3 loss.margin 14.758125026488726 +597 3 loss.adversarial_temperature 0.22369418828138338 +597 3 optimizer.lr 0.0022199114016990366 +597 3 negative_sampler.num_negs_per_pos 25.0 +597 3 training.batch_size 2.0 +597 4 model.embedding_dim 1.0 +597 4 loss.margin 4.103414588822976 +597 4 loss.adversarial_temperature 0.1862985635569512 +597 4 optimizer.lr 0.004596151978198088 +597 4 negative_sampler.num_negs_per_pos 15.0 +597 4 training.batch_size 1.0 +597 5 model.embedding_dim 2.0 +597 5 loss.margin 10.151607625534336 +597 5 loss.adversarial_temperature 0.22695838779962121 +597 5 optimizer.lr 0.0013731569539753036 +597 5 negative_sampler.num_negs_per_pos 97.0 +597 5 training.batch_size 1.0 +597 6 model.embedding_dim 2.0 +597 6 loss.margin 5.193115848402933 +597 6 loss.adversarial_temperature 0.995237332881329 +597 6 optimizer.lr 0.05248970369871745 +597 6 negative_sampler.num_negs_per_pos 53.0 +597 6 training.batch_size 2.0 +597 7 model.embedding_dim 2.0 +597 7 loss.margin 27.862709616533976 +597 7 loss.adversarial_temperature 0.35673954948234654 +597 7 optimizer.lr 0.06813368124464915 +597 7 negative_sampler.num_negs_per_pos 44.0 +597 7 training.batch_size 0.0 +597 8 model.embedding_dim 0.0 +597 8 loss.margin 19.357403454674547 +597 8 loss.adversarial_temperature 0.9065250501026153 +597 8 optimizer.lr 0.0603562998528133 +597 8 negative_sampler.num_negs_per_pos 75.0 +597 8 training.batch_size 1.0 +597 9 model.embedding_dim 2.0 +597 9 loss.margin 15.299753074715898 +597 9 loss.adversarial_temperature 0.5056530546494398 +597 9 optimizer.lr 0.005710394929209385 +597 9 negative_sampler.num_negs_per_pos 64.0 +597 9 training.batch_size 0.0 +597 10 model.embedding_dim 2.0 +597 10 loss.margin 19.299483891451697 +597 10 loss.adversarial_temperature 0.7601054327888774 +597 10 optimizer.lr 0.007414901809078512 +597 10 negative_sampler.num_negs_per_pos 33.0 +597 10 training.batch_size 0.0 +597 11 model.embedding_dim 1.0 +597 11 loss.margin 29.391322499280914 +597 11 loss.adversarial_temperature 0.6512210369981167 +597 11 optimizer.lr 0.0017181117762865826 +597 11 negative_sampler.num_negs_per_pos 22.0 +597 11 training.batch_size 0.0 +597 12 model.embedding_dim 1.0 +597 12 loss.margin 9.105849813068378 +597 12 loss.adversarial_temperature 0.13425343718466295 +597 12 optimizer.lr 0.0680492043506938 +597 12 negative_sampler.num_negs_per_pos 67.0 +597 12 training.batch_size 2.0 +597 13 model.embedding_dim 0.0 +597 13 loss.margin 29.28939684352111 +597 13 loss.adversarial_temperature 0.8962383080169772 +597 13 optimizer.lr 0.0335585989777517 +597 13 negative_sampler.num_negs_per_pos 87.0 +597 13 training.batch_size 2.0 +597 14 model.embedding_dim 0.0 +597 14 loss.margin 14.985249308139865 +597 14 loss.adversarial_temperature 0.8378256330664162 +597 14 optimizer.lr 0.016806967452606885 +597 14 negative_sampler.num_negs_per_pos 2.0 +597 14 training.batch_size 1.0 +597 15 model.embedding_dim 2.0 +597 15 loss.margin 19.003034389251432 +597 15 loss.adversarial_temperature 0.488734876915716 +597 15 optimizer.lr 0.033372556103388636 +597 15 negative_sampler.num_negs_per_pos 77.0 +597 15 training.batch_size 0.0 +597 16 model.embedding_dim 2.0 +597 16 loss.margin 9.96983411452006 +597 16 loss.adversarial_temperature 0.9401151551982844 +597 16 optimizer.lr 0.003016377876034944 +597 16 negative_sampler.num_negs_per_pos 89.0 +597 16 training.batch_size 0.0 +597 17 model.embedding_dim 0.0 +597 17 loss.margin 12.569955212913547 +597 17 loss.adversarial_temperature 0.10922281426045344 +597 17 optimizer.lr 0.08590225536948076 +597 17 negative_sampler.num_negs_per_pos 53.0 +597 17 training.batch_size 1.0 +597 18 model.embedding_dim 2.0 +597 18 loss.margin 8.80948902256746 +597 18 loss.adversarial_temperature 0.8049926882855089 +597 18 optimizer.lr 0.005615104973323972 +597 18 negative_sampler.num_negs_per_pos 86.0 +597 18 training.batch_size 1.0 +597 19 model.embedding_dim 0.0 +597 19 loss.margin 2.18013647429442 +597 19 loss.adversarial_temperature 0.4590762880310214 +597 19 optimizer.lr 0.007123628774052106 +597 19 negative_sampler.num_negs_per_pos 58.0 +597 19 training.batch_size 0.0 +597 20 model.embedding_dim 2.0 +597 20 loss.margin 20.36181642524099 +597 20 loss.adversarial_temperature 0.8048548336133283 +597 20 optimizer.lr 0.0012290095287437312 +597 20 negative_sampler.num_negs_per_pos 93.0 +597 20 training.batch_size 1.0 +597 21 model.embedding_dim 2.0 +597 21 loss.margin 8.288525137638805 +597 21 loss.adversarial_temperature 0.8433847830505727 +597 21 optimizer.lr 0.01670241196812479 +597 21 negative_sampler.num_negs_per_pos 37.0 +597 21 training.batch_size 0.0 +597 22 model.embedding_dim 2.0 +597 22 loss.margin 5.7893611389074 +597 22 loss.adversarial_temperature 0.2678767776547044 +597 22 optimizer.lr 0.0034245841030741144 +597 22 negative_sampler.num_negs_per_pos 16.0 +597 22 training.batch_size 1.0 +597 23 model.embedding_dim 2.0 +597 23 loss.margin 1.3338624170489766 +597 23 loss.adversarial_temperature 0.6804224393671838 +597 23 optimizer.lr 0.002347352901048851 +597 23 negative_sampler.num_negs_per_pos 91.0 +597 23 training.batch_size 2.0 +597 24 model.embedding_dim 2.0 +597 24 loss.margin 18.477560765317577 +597 24 loss.adversarial_temperature 0.8838443105707782 +597 24 optimizer.lr 0.017557445825385026 +597 24 negative_sampler.num_negs_per_pos 36.0 +597 24 training.batch_size 0.0 +597 25 model.embedding_dim 2.0 +597 25 loss.margin 12.742363791983772 +597 25 loss.adversarial_temperature 0.7872838487459903 +597 25 optimizer.lr 0.006707444980780342 +597 25 negative_sampler.num_negs_per_pos 69.0 +597 25 training.batch_size 2.0 +597 26 model.embedding_dim 0.0 +597 26 loss.margin 26.311523747900203 +597 26 loss.adversarial_temperature 0.9458640548351925 +597 26 optimizer.lr 0.008109030452263619 +597 26 negative_sampler.num_negs_per_pos 71.0 +597 26 training.batch_size 2.0 +597 27 model.embedding_dim 0.0 +597 27 loss.margin 26.050543286081854 +597 27 loss.adversarial_temperature 0.254334011238066 +597 27 optimizer.lr 0.02242522227058723 +597 27 negative_sampler.num_negs_per_pos 49.0 +597 27 training.batch_size 1.0 +597 28 model.embedding_dim 0.0 +597 28 loss.margin 7.412438411575719 +597 28 loss.adversarial_temperature 0.7397111986874896 +597 28 optimizer.lr 0.005129094730618952 +597 28 negative_sampler.num_negs_per_pos 65.0 +597 28 training.batch_size 0.0 +597 29 model.embedding_dim 0.0 +597 29 loss.margin 15.276847309290616 +597 29 loss.adversarial_temperature 0.14583435922673318 +597 29 optimizer.lr 0.03635409625706015 +597 29 negative_sampler.num_negs_per_pos 65.0 +597 29 training.batch_size 0.0 +597 30 model.embedding_dim 1.0 +597 30 loss.margin 21.80982568003943 +597 30 loss.adversarial_temperature 0.533330467325263 +597 30 optimizer.lr 0.001052346979167275 +597 30 negative_sampler.num_negs_per_pos 54.0 +597 30 training.batch_size 0.0 +597 31 model.embedding_dim 2.0 +597 31 loss.margin 24.286030549204668 +597 31 loss.adversarial_temperature 0.9394300765036692 +597 31 optimizer.lr 0.00426076762081301 +597 31 negative_sampler.num_negs_per_pos 0.0 +597 31 training.batch_size 0.0 +597 32 model.embedding_dim 0.0 +597 32 loss.margin 5.1847315641795655 +597 32 loss.adversarial_temperature 0.6955561252226914 +597 32 optimizer.lr 0.055405015328837785 +597 32 negative_sampler.num_negs_per_pos 54.0 +597 32 training.batch_size 0.0 +597 33 model.embedding_dim 0.0 +597 33 loss.margin 23.08720061811062 +597 33 loss.adversarial_temperature 0.24516315291122584 +597 33 optimizer.lr 0.022192609831427037 +597 33 negative_sampler.num_negs_per_pos 58.0 +597 33 training.batch_size 0.0 +597 34 model.embedding_dim 2.0 +597 34 loss.margin 20.012465983194176 +597 34 loss.adversarial_temperature 0.10029976428619669 +597 34 optimizer.lr 0.001332384136652758 +597 34 negative_sampler.num_negs_per_pos 8.0 +597 34 training.batch_size 2.0 +597 35 model.embedding_dim 1.0 +597 35 loss.margin 25.22884830029148 +597 35 loss.adversarial_temperature 0.9198001336004551 +597 35 optimizer.lr 0.003574882453585489 +597 35 negative_sampler.num_negs_per_pos 31.0 +597 35 training.batch_size 2.0 +597 36 model.embedding_dim 0.0 +597 36 loss.margin 27.657005525472716 +597 36 loss.adversarial_temperature 0.43258216190400706 +597 36 optimizer.lr 0.08821279667866258 +597 36 negative_sampler.num_negs_per_pos 28.0 +597 36 training.batch_size 2.0 +597 37 model.embedding_dim 2.0 +597 37 loss.margin 23.359781390344263 +597 37 loss.adversarial_temperature 0.1302533059386169 +597 37 optimizer.lr 0.09879181694163325 +597 37 negative_sampler.num_negs_per_pos 60.0 +597 37 training.batch_size 0.0 +597 38 model.embedding_dim 1.0 +597 38 loss.margin 23.335393057004254 +597 38 loss.adversarial_temperature 0.3575353715941318 +597 38 optimizer.lr 0.010915956360295055 +597 38 negative_sampler.num_negs_per_pos 77.0 +597 38 training.batch_size 2.0 +597 39 model.embedding_dim 1.0 +597 39 loss.margin 27.82359294379462 +597 39 loss.adversarial_temperature 0.22751830377814522 +597 39 optimizer.lr 0.044157503929164524 +597 39 negative_sampler.num_negs_per_pos 34.0 +597 39 training.batch_size 2.0 +597 40 model.embedding_dim 0.0 +597 40 loss.margin 15.573926586848696 +597 40 loss.adversarial_temperature 0.249323623959754 +597 40 optimizer.lr 0.001557150775489484 +597 40 negative_sampler.num_negs_per_pos 25.0 +597 40 training.batch_size 0.0 +597 41 model.embedding_dim 2.0 +597 41 loss.margin 25.870214229722887 +597 41 loss.adversarial_temperature 0.507484596390903 +597 41 optimizer.lr 0.0485923233755261 +597 41 negative_sampler.num_negs_per_pos 67.0 +597 41 training.batch_size 0.0 +597 42 model.embedding_dim 2.0 +597 42 loss.margin 22.324725413642636 +597 42 loss.adversarial_temperature 0.3244201744434684 +597 42 optimizer.lr 0.0030601281199677164 +597 42 negative_sampler.num_negs_per_pos 54.0 +597 42 training.batch_size 1.0 +597 43 model.embedding_dim 0.0 +597 43 loss.margin 8.290164171910359 +597 43 loss.adversarial_temperature 0.46941777241719795 +597 43 optimizer.lr 0.06324578124422323 +597 43 negative_sampler.num_negs_per_pos 26.0 +597 43 training.batch_size 2.0 +597 44 model.embedding_dim 1.0 +597 44 loss.margin 10.294323472455577 +597 44 loss.adversarial_temperature 0.242680092310785 +597 44 optimizer.lr 0.03393492278086166 +597 44 negative_sampler.num_negs_per_pos 33.0 +597 44 training.batch_size 0.0 +597 45 model.embedding_dim 0.0 +597 45 loss.margin 29.72656527095195 +597 45 loss.adversarial_temperature 0.24021231993071743 +597 45 optimizer.lr 0.010069229017838957 +597 45 negative_sampler.num_negs_per_pos 7.0 +597 45 training.batch_size 2.0 +597 46 model.embedding_dim 1.0 +597 46 loss.margin 26.015471101746336 +597 46 loss.adversarial_temperature 0.5486557120911166 +597 46 optimizer.lr 0.011807738900965254 +597 46 negative_sampler.num_negs_per_pos 45.0 +597 46 training.batch_size 1.0 +597 47 model.embedding_dim 1.0 +597 47 loss.margin 5.971721605234015 +597 47 loss.adversarial_temperature 0.5384719306550726 +597 47 optimizer.lr 0.01759386876258224 +597 47 negative_sampler.num_negs_per_pos 1.0 +597 47 training.batch_size 1.0 +597 48 model.embedding_dim 2.0 +597 48 loss.margin 26.346801702947946 +597 48 loss.adversarial_temperature 0.7801224732259733 +597 48 optimizer.lr 0.03500945248735984 +597 48 negative_sampler.num_negs_per_pos 25.0 +597 48 training.batch_size 0.0 +597 49 model.embedding_dim 2.0 +597 49 loss.margin 14.163473873271489 +597 49 loss.adversarial_temperature 0.39387118926987535 +597 49 optimizer.lr 0.0040006235898062065 +597 49 negative_sampler.num_negs_per_pos 44.0 +597 49 training.batch_size 0.0 +597 50 model.embedding_dim 1.0 +597 50 loss.margin 11.586227160766839 +597 50 loss.adversarial_temperature 0.6276816160023012 +597 50 optimizer.lr 0.0026480948360693853 +597 50 negative_sampler.num_negs_per_pos 17.0 +597 50 training.batch_size 1.0 +597 51 model.embedding_dim 0.0 +597 51 loss.margin 2.958559672380916 +597 51 loss.adversarial_temperature 0.1398700951943467 +597 51 optimizer.lr 0.0012997406729030577 +597 51 negative_sampler.num_negs_per_pos 99.0 +597 51 training.batch_size 1.0 +597 52 model.embedding_dim 0.0 +597 52 loss.margin 22.968903880507447 +597 52 loss.adversarial_temperature 0.38360766046377154 +597 52 optimizer.lr 0.026492108012786442 +597 52 negative_sampler.num_negs_per_pos 54.0 +597 52 training.batch_size 0.0 +597 53 model.embedding_dim 0.0 +597 53 loss.margin 27.14703814965153 +597 53 loss.adversarial_temperature 0.7537123311829036 +597 53 optimizer.lr 0.001059937786474991 +597 53 negative_sampler.num_negs_per_pos 42.0 +597 53 training.batch_size 1.0 +597 54 model.embedding_dim 1.0 +597 54 loss.margin 13.352212775671621 +597 54 loss.adversarial_temperature 0.3158316226139742 +597 54 optimizer.lr 0.00131352903740439 +597 54 negative_sampler.num_negs_per_pos 40.0 +597 54 training.batch_size 0.0 +597 55 model.embedding_dim 2.0 +597 55 loss.margin 1.6997615142515916 +597 55 loss.adversarial_temperature 0.7433625618790717 +597 55 optimizer.lr 0.007501817627301184 +597 55 negative_sampler.num_negs_per_pos 75.0 +597 55 training.batch_size 0.0 +597 56 model.embedding_dim 1.0 +597 56 loss.margin 10.0699924946056 +597 56 loss.adversarial_temperature 0.2200416620003507 +597 56 optimizer.lr 0.0016513487698469984 +597 56 negative_sampler.num_negs_per_pos 87.0 +597 56 training.batch_size 2.0 +597 57 model.embedding_dim 2.0 +597 57 loss.margin 8.08870225443158 +597 57 loss.adversarial_temperature 0.39949283650740114 +597 57 optimizer.lr 0.025865342837582312 +597 57 negative_sampler.num_negs_per_pos 60.0 +597 57 training.batch_size 1.0 +597 58 model.embedding_dim 0.0 +597 58 loss.margin 23.214874617185473 +597 58 loss.adversarial_temperature 0.39254303415254266 +597 58 optimizer.lr 0.0048941889272823705 +597 58 negative_sampler.num_negs_per_pos 84.0 +597 58 training.batch_size 1.0 +597 59 model.embedding_dim 1.0 +597 59 loss.margin 6.206544170957066 +597 59 loss.adversarial_temperature 0.9263301073500086 +597 59 optimizer.lr 0.001759335424206204 +597 59 negative_sampler.num_negs_per_pos 41.0 +597 59 training.batch_size 2.0 +597 60 model.embedding_dim 2.0 +597 60 loss.margin 27.79411550972122 +597 60 loss.adversarial_temperature 0.6035575222401539 +597 60 optimizer.lr 0.05868334760675399 +597 60 negative_sampler.num_negs_per_pos 46.0 +597 60 training.batch_size 0.0 +597 61 model.embedding_dim 2.0 +597 61 loss.margin 16.120598590121883 +597 61 loss.adversarial_temperature 0.31716303159589554 +597 61 optimizer.lr 0.007966930794340205 +597 61 negative_sampler.num_negs_per_pos 3.0 +597 61 training.batch_size 0.0 +597 62 model.embedding_dim 1.0 +597 62 loss.margin 20.724894862266154 +597 62 loss.adversarial_temperature 0.4303857347743678 +597 62 optimizer.lr 0.009181973786076556 +597 62 negative_sampler.num_negs_per_pos 14.0 +597 62 training.batch_size 1.0 +597 63 model.embedding_dim 1.0 +597 63 loss.margin 11.110507599563977 +597 63 loss.adversarial_temperature 0.6645129946586001 +597 63 optimizer.lr 0.0038409627973799996 +597 63 negative_sampler.num_negs_per_pos 24.0 +597 63 training.batch_size 2.0 +597 64 model.embedding_dim 0.0 +597 64 loss.margin 20.0052068678171 +597 64 loss.adversarial_temperature 0.12598563472343818 +597 64 optimizer.lr 0.0023410917485357056 +597 64 negative_sampler.num_negs_per_pos 47.0 +597 64 training.batch_size 0.0 +597 65 model.embedding_dim 2.0 +597 65 loss.margin 2.9753829654879347 +597 65 loss.adversarial_temperature 0.7550079842784134 +597 65 optimizer.lr 0.059720558295439825 +597 65 negative_sampler.num_negs_per_pos 85.0 +597 65 training.batch_size 1.0 +597 66 model.embedding_dim 2.0 +597 66 loss.margin 17.699759287537503 +597 66 loss.adversarial_temperature 0.9099132573379718 +597 66 optimizer.lr 0.04166742808755055 +597 66 negative_sampler.num_negs_per_pos 5.0 +597 66 training.batch_size 2.0 +597 67 model.embedding_dim 0.0 +597 67 loss.margin 14.445038374537837 +597 67 loss.adversarial_temperature 0.6159433332085246 +597 67 optimizer.lr 0.006300257862424754 +597 67 negative_sampler.num_negs_per_pos 95.0 +597 67 training.batch_size 0.0 +597 68 model.embedding_dim 0.0 +597 68 loss.margin 24.857235127790293 +597 68 loss.adversarial_temperature 0.2588036244473575 +597 68 optimizer.lr 0.0012771970200731715 +597 68 negative_sampler.num_negs_per_pos 0.0 +597 68 training.batch_size 2.0 +597 69 model.embedding_dim 2.0 +597 69 loss.margin 20.88518767257268 +597 69 loss.adversarial_temperature 0.6851598890751953 +597 69 optimizer.lr 0.018808913900373488 +597 69 negative_sampler.num_negs_per_pos 32.0 +597 69 training.batch_size 1.0 +597 70 model.embedding_dim 0.0 +597 70 loss.margin 10.502437629933901 +597 70 loss.adversarial_temperature 0.3575793411396205 +597 70 optimizer.lr 0.013043082457552996 +597 70 negative_sampler.num_negs_per_pos 40.0 +597 70 training.batch_size 2.0 +597 71 model.embedding_dim 1.0 +597 71 loss.margin 19.445483603105878 +597 71 loss.adversarial_temperature 0.367258712631514 +597 71 optimizer.lr 0.016055286559462877 +597 71 negative_sampler.num_negs_per_pos 86.0 +597 71 training.batch_size 0.0 +597 72 model.embedding_dim 2.0 +597 72 loss.margin 8.839101845581135 +597 72 loss.adversarial_temperature 0.7516327870080662 +597 72 optimizer.lr 0.034968896308772604 +597 72 negative_sampler.num_negs_per_pos 3.0 +597 72 training.batch_size 2.0 +597 73 model.embedding_dim 0.0 +597 73 loss.margin 26.663065993534772 +597 73 loss.adversarial_temperature 0.6092384156263099 +597 73 optimizer.lr 0.0019012506613636539 +597 73 negative_sampler.num_negs_per_pos 6.0 +597 73 training.batch_size 2.0 +597 74 model.embedding_dim 0.0 +597 74 loss.margin 6.226884779142839 +597 74 loss.adversarial_temperature 0.7460806372988176 +597 74 optimizer.lr 0.0014769470770981457 +597 74 negative_sampler.num_negs_per_pos 26.0 +597 74 training.batch_size 2.0 +597 75 model.embedding_dim 0.0 +597 75 loss.margin 11.302454444841874 +597 75 loss.adversarial_temperature 0.6531074044408994 +597 75 optimizer.lr 0.02788066925574076 +597 75 negative_sampler.num_negs_per_pos 73.0 +597 75 training.batch_size 0.0 +597 76 model.embedding_dim 0.0 +597 76 loss.margin 3.9570899781118403 +597 76 loss.adversarial_temperature 0.4640057170441929 +597 76 optimizer.lr 0.005979028024157932 +597 76 negative_sampler.num_negs_per_pos 34.0 +597 76 training.batch_size 2.0 +597 77 model.embedding_dim 0.0 +597 77 loss.margin 24.494548750431708 +597 77 loss.adversarial_temperature 0.7643284958880303 +597 77 optimizer.lr 0.07956843062817445 +597 77 negative_sampler.num_negs_per_pos 19.0 +597 77 training.batch_size 2.0 +597 78 model.embedding_dim 2.0 +597 78 loss.margin 22.668494552596027 +597 78 loss.adversarial_temperature 0.4026022845810692 +597 78 optimizer.lr 0.03348103262878129 +597 78 negative_sampler.num_negs_per_pos 92.0 +597 78 training.batch_size 1.0 +597 79 model.embedding_dim 0.0 +597 79 loss.margin 16.486876464673436 +597 79 loss.adversarial_temperature 0.8027060421924975 +597 79 optimizer.lr 0.0011722525485186683 +597 79 negative_sampler.num_negs_per_pos 70.0 +597 79 training.batch_size 0.0 +597 80 model.embedding_dim 1.0 +597 80 loss.margin 3.047499195117049 +597 80 loss.adversarial_temperature 0.22063302993211362 +597 80 optimizer.lr 0.010553024019515782 +597 80 negative_sampler.num_negs_per_pos 78.0 +597 80 training.batch_size 0.0 +597 81 model.embedding_dim 0.0 +597 81 loss.margin 5.296110304098339 +597 81 loss.adversarial_temperature 0.5114097037353681 +597 81 optimizer.lr 0.07757244741753005 +597 81 negative_sampler.num_negs_per_pos 30.0 +597 81 training.batch_size 1.0 +597 82 model.embedding_dim 0.0 +597 82 loss.margin 21.066843448820713 +597 82 loss.adversarial_temperature 0.37279348426119 +597 82 optimizer.lr 0.00920459396351957 +597 82 negative_sampler.num_negs_per_pos 99.0 +597 82 training.batch_size 1.0 +597 83 model.embedding_dim 1.0 +597 83 loss.margin 8.71031975346565 +597 83 loss.adversarial_temperature 0.9895742415260071 +597 83 optimizer.lr 0.001789980516782037 +597 83 negative_sampler.num_negs_per_pos 20.0 +597 83 training.batch_size 2.0 +597 84 model.embedding_dim 0.0 +597 84 loss.margin 19.939399351941223 +597 84 loss.adversarial_temperature 0.37205606546113495 +597 84 optimizer.lr 0.0432964899412437 +597 84 negative_sampler.num_negs_per_pos 11.0 +597 84 training.batch_size 0.0 +597 85 model.embedding_dim 2.0 +597 85 loss.margin 21.25408852795682 +597 85 loss.adversarial_temperature 0.723581459519285 +597 85 optimizer.lr 0.001352530468332793 +597 85 negative_sampler.num_negs_per_pos 90.0 +597 85 training.batch_size 0.0 +597 86 model.embedding_dim 2.0 +597 86 loss.margin 26.08580310436411 +597 86 loss.adversarial_temperature 0.14029605924903668 +597 86 optimizer.lr 0.0014049624945180008 +597 86 negative_sampler.num_negs_per_pos 46.0 +597 86 training.batch_size 2.0 +597 87 model.embedding_dim 1.0 +597 87 loss.margin 17.757547286932805 +597 87 loss.adversarial_temperature 0.32714812013495886 +597 87 optimizer.lr 0.007714808002396771 +597 87 negative_sampler.num_negs_per_pos 73.0 +597 87 training.batch_size 1.0 +597 88 model.embedding_dim 2.0 +597 88 loss.margin 4.259609829663241 +597 88 loss.adversarial_temperature 0.8951561035365861 +597 88 optimizer.lr 0.021821379082864578 +597 88 negative_sampler.num_negs_per_pos 24.0 +597 88 training.batch_size 1.0 +597 89 model.embedding_dim 0.0 +597 89 loss.margin 6.7526228437397195 +597 89 loss.adversarial_temperature 0.8139576781774703 +597 89 optimizer.lr 0.023281282999812313 +597 89 negative_sampler.num_negs_per_pos 41.0 +597 89 training.batch_size 1.0 +597 90 model.embedding_dim 0.0 +597 90 loss.margin 25.325226910028444 +597 90 loss.adversarial_temperature 0.4541833893205226 +597 90 optimizer.lr 0.006360842851575633 +597 90 negative_sampler.num_negs_per_pos 25.0 +597 90 training.batch_size 2.0 +597 91 model.embedding_dim 1.0 +597 91 loss.margin 24.590683176348655 +597 91 loss.adversarial_temperature 0.22032165826835412 +597 91 optimizer.lr 0.007949428132219438 +597 91 negative_sampler.num_negs_per_pos 61.0 +597 91 training.batch_size 1.0 +597 92 model.embedding_dim 1.0 +597 92 loss.margin 16.85789577200593 +597 92 loss.adversarial_temperature 0.4352167451793253 +597 92 optimizer.lr 0.09620286270812502 +597 92 negative_sampler.num_negs_per_pos 44.0 +597 92 training.batch_size 1.0 +597 93 model.embedding_dim 2.0 +597 93 loss.margin 27.180910090301914 +597 93 loss.adversarial_temperature 0.2805833993573986 +597 93 optimizer.lr 0.06053285133978452 +597 93 negative_sampler.num_negs_per_pos 92.0 +597 93 training.batch_size 0.0 +597 94 model.embedding_dim 0.0 +597 94 loss.margin 28.416189318233855 +597 94 loss.adversarial_temperature 0.439926304018747 +597 94 optimizer.lr 0.015709475332621046 +597 94 negative_sampler.num_negs_per_pos 64.0 +597 94 training.batch_size 0.0 +597 95 model.embedding_dim 2.0 +597 95 loss.margin 15.179491672250427 +597 95 loss.adversarial_temperature 0.294499878322959 +597 95 optimizer.lr 0.008601164276409288 +597 95 negative_sampler.num_negs_per_pos 76.0 +597 95 training.batch_size 1.0 +597 96 model.embedding_dim 0.0 +597 96 loss.margin 20.650219638376694 +597 96 loss.adversarial_temperature 0.9221987183295327 +597 96 optimizer.lr 0.01259294675295957 +597 96 negative_sampler.num_negs_per_pos 84.0 +597 96 training.batch_size 2.0 +597 97 model.embedding_dim 1.0 +597 97 loss.margin 18.629808711507412 +597 97 loss.adversarial_temperature 0.7868711882484833 +597 97 optimizer.lr 0.015540329148755225 +597 97 negative_sampler.num_negs_per_pos 15.0 +597 97 training.batch_size 0.0 +597 98 model.embedding_dim 1.0 +597 98 loss.margin 16.54440096818498 +597 98 loss.adversarial_temperature 0.7605407942846387 +597 98 optimizer.lr 0.0595153622985403 +597 98 negative_sampler.num_negs_per_pos 92.0 +597 98 training.batch_size 2.0 +597 99 model.embedding_dim 0.0 +597 99 loss.margin 7.984849954613094 +597 99 loss.adversarial_temperature 0.6746294226883846 +597 99 optimizer.lr 0.010063207036663153 +597 99 negative_sampler.num_negs_per_pos 99.0 +597 99 training.batch_size 0.0 +597 100 model.embedding_dim 1.0 +597 100 loss.margin 9.862126562903654 +597 100 loss.adversarial_temperature 0.4823319485040589 +597 100 optimizer.lr 0.0047089201101640854 +597 100 negative_sampler.num_negs_per_pos 1.0 +597 100 training.batch_size 2.0 +597 1 dataset """kinships""" +597 1 model """rotate""" +597 1 loss """nssa""" +597 1 regularizer """no""" +597 1 optimizer """adam""" +597 1 training_loop """owa""" +597 1 negative_sampler """basic""" +597 1 evaluator """rankbased""" +597 2 dataset """kinships""" +597 2 model """rotate""" +597 2 loss """nssa""" +597 2 regularizer """no""" +597 2 optimizer """adam""" +597 2 training_loop """owa""" +597 2 negative_sampler """basic""" +597 2 evaluator """rankbased""" +597 3 dataset """kinships""" +597 3 model """rotate""" +597 3 loss """nssa""" +597 3 regularizer """no""" +597 3 optimizer """adam""" +597 3 training_loop """owa""" +597 3 negative_sampler """basic""" +597 3 evaluator """rankbased""" +597 4 dataset """kinships""" +597 4 model """rotate""" +597 4 loss """nssa""" +597 4 regularizer """no""" +597 4 optimizer """adam""" +597 4 training_loop """owa""" +597 4 negative_sampler """basic""" +597 4 evaluator """rankbased""" +597 5 dataset """kinships""" +597 5 model """rotate""" +597 5 loss """nssa""" +597 5 regularizer """no""" +597 5 optimizer """adam""" +597 5 training_loop """owa""" +597 5 negative_sampler """basic""" +597 5 evaluator """rankbased""" +597 6 dataset """kinships""" +597 6 model """rotate""" +597 6 loss """nssa""" +597 6 regularizer """no""" +597 6 optimizer """adam""" +597 6 training_loop """owa""" +597 6 negative_sampler """basic""" +597 6 evaluator """rankbased""" +597 7 dataset """kinships""" +597 7 model """rotate""" +597 7 loss """nssa""" +597 7 regularizer """no""" +597 7 optimizer """adam""" +597 7 training_loop """owa""" +597 7 negative_sampler """basic""" +597 7 evaluator """rankbased""" +597 8 dataset """kinships""" +597 8 model """rotate""" +597 8 loss """nssa""" +597 8 regularizer """no""" +597 8 optimizer """adam""" +597 8 training_loop """owa""" +597 8 negative_sampler """basic""" +597 8 evaluator """rankbased""" +597 9 dataset """kinships""" +597 9 model """rotate""" +597 9 loss """nssa""" +597 9 regularizer """no""" +597 9 optimizer """adam""" +597 9 training_loop """owa""" +597 9 negative_sampler """basic""" +597 9 evaluator """rankbased""" +597 10 dataset """kinships""" +597 10 model """rotate""" +597 10 loss """nssa""" +597 10 regularizer """no""" +597 10 optimizer """adam""" +597 10 training_loop """owa""" +597 10 negative_sampler """basic""" +597 10 evaluator """rankbased""" +597 11 dataset """kinships""" +597 11 model """rotate""" +597 11 loss """nssa""" +597 11 regularizer """no""" +597 11 optimizer """adam""" +597 11 training_loop """owa""" +597 11 negative_sampler """basic""" +597 11 evaluator """rankbased""" +597 12 dataset """kinships""" +597 12 model """rotate""" +597 12 loss """nssa""" +597 12 regularizer """no""" +597 12 optimizer """adam""" +597 12 training_loop """owa""" +597 12 negative_sampler """basic""" +597 12 evaluator """rankbased""" +597 13 dataset """kinships""" +597 13 model """rotate""" +597 13 loss """nssa""" +597 13 regularizer """no""" +597 13 optimizer """adam""" +597 13 training_loop """owa""" +597 13 negative_sampler """basic""" +597 13 evaluator """rankbased""" +597 14 dataset """kinships""" +597 14 model """rotate""" +597 14 loss """nssa""" +597 14 regularizer """no""" +597 14 optimizer """adam""" +597 14 training_loop """owa""" +597 14 negative_sampler """basic""" +597 14 evaluator """rankbased""" +597 15 dataset """kinships""" +597 15 model """rotate""" +597 15 loss """nssa""" +597 15 regularizer """no""" +597 15 optimizer """adam""" +597 15 training_loop """owa""" +597 15 negative_sampler """basic""" +597 15 evaluator """rankbased""" +597 16 dataset """kinships""" +597 16 model """rotate""" +597 16 loss """nssa""" +597 16 regularizer """no""" +597 16 optimizer """adam""" +597 16 training_loop """owa""" +597 16 negative_sampler """basic""" +597 16 evaluator """rankbased""" +597 17 dataset """kinships""" +597 17 model """rotate""" +597 17 loss """nssa""" +597 17 regularizer """no""" +597 17 optimizer """adam""" +597 17 training_loop """owa""" +597 17 negative_sampler """basic""" +597 17 evaluator """rankbased""" +597 18 dataset """kinships""" +597 18 model """rotate""" +597 18 loss """nssa""" +597 18 regularizer """no""" +597 18 optimizer """adam""" +597 18 training_loop """owa""" +597 18 negative_sampler """basic""" +597 18 evaluator """rankbased""" +597 19 dataset """kinships""" +597 19 model """rotate""" +597 19 loss """nssa""" +597 19 regularizer """no""" +597 19 optimizer """adam""" +597 19 training_loop """owa""" +597 19 negative_sampler """basic""" +597 19 evaluator """rankbased""" +597 20 dataset """kinships""" +597 20 model """rotate""" +597 20 loss """nssa""" +597 20 regularizer """no""" +597 20 optimizer """adam""" +597 20 training_loop """owa""" +597 20 negative_sampler """basic""" +597 20 evaluator """rankbased""" +597 21 dataset """kinships""" +597 21 model """rotate""" +597 21 loss """nssa""" +597 21 regularizer """no""" +597 21 optimizer """adam""" +597 21 training_loop """owa""" +597 21 negative_sampler """basic""" +597 21 evaluator """rankbased""" +597 22 dataset """kinships""" +597 22 model """rotate""" +597 22 loss """nssa""" +597 22 regularizer """no""" +597 22 optimizer """adam""" +597 22 training_loop """owa""" +597 22 negative_sampler """basic""" +597 22 evaluator """rankbased""" +597 23 dataset """kinships""" +597 23 model """rotate""" +597 23 loss """nssa""" +597 23 regularizer """no""" +597 23 optimizer """adam""" +597 23 training_loop """owa""" +597 23 negative_sampler """basic""" +597 23 evaluator """rankbased""" +597 24 dataset """kinships""" +597 24 model """rotate""" +597 24 loss """nssa""" +597 24 regularizer """no""" +597 24 optimizer """adam""" +597 24 training_loop """owa""" +597 24 negative_sampler """basic""" +597 24 evaluator """rankbased""" +597 25 dataset """kinships""" +597 25 model """rotate""" +597 25 loss """nssa""" +597 25 regularizer """no""" +597 25 optimizer """adam""" +597 25 training_loop """owa""" +597 25 negative_sampler """basic""" +597 25 evaluator """rankbased""" +597 26 dataset """kinships""" +597 26 model """rotate""" +597 26 loss """nssa""" +597 26 regularizer """no""" +597 26 optimizer """adam""" +597 26 training_loop """owa""" +597 26 negative_sampler """basic""" +597 26 evaluator """rankbased""" +597 27 dataset """kinships""" +597 27 model """rotate""" +597 27 loss """nssa""" +597 27 regularizer """no""" +597 27 optimizer """adam""" +597 27 training_loop """owa""" +597 27 negative_sampler """basic""" +597 27 evaluator """rankbased""" +597 28 dataset """kinships""" +597 28 model """rotate""" +597 28 loss """nssa""" +597 28 regularizer """no""" +597 28 optimizer """adam""" +597 28 training_loop """owa""" +597 28 negative_sampler """basic""" +597 28 evaluator """rankbased""" +597 29 dataset """kinships""" +597 29 model """rotate""" +597 29 loss """nssa""" +597 29 regularizer """no""" +597 29 optimizer """adam""" +597 29 training_loop """owa""" +597 29 negative_sampler """basic""" +597 29 evaluator """rankbased""" +597 30 dataset """kinships""" +597 30 model """rotate""" +597 30 loss """nssa""" +597 30 regularizer """no""" +597 30 optimizer """adam""" +597 30 training_loop """owa""" +597 30 negative_sampler """basic""" +597 30 evaluator """rankbased""" +597 31 dataset """kinships""" +597 31 model """rotate""" +597 31 loss """nssa""" +597 31 regularizer """no""" +597 31 optimizer """adam""" +597 31 training_loop """owa""" +597 31 negative_sampler """basic""" +597 31 evaluator """rankbased""" +597 32 dataset """kinships""" +597 32 model """rotate""" +597 32 loss """nssa""" +597 32 regularizer """no""" +597 32 optimizer """adam""" +597 32 training_loop """owa""" +597 32 negative_sampler """basic""" +597 32 evaluator """rankbased""" +597 33 dataset """kinships""" +597 33 model """rotate""" +597 33 loss """nssa""" +597 33 regularizer """no""" +597 33 optimizer """adam""" +597 33 training_loop """owa""" +597 33 negative_sampler """basic""" +597 33 evaluator """rankbased""" +597 34 dataset """kinships""" +597 34 model """rotate""" +597 34 loss """nssa""" +597 34 regularizer """no""" +597 34 optimizer """adam""" +597 34 training_loop """owa""" +597 34 negative_sampler """basic""" +597 34 evaluator """rankbased""" +597 35 dataset """kinships""" +597 35 model """rotate""" +597 35 loss """nssa""" +597 35 regularizer """no""" +597 35 optimizer """adam""" +597 35 training_loop """owa""" +597 35 negative_sampler """basic""" +597 35 evaluator """rankbased""" +597 36 dataset """kinships""" +597 36 model """rotate""" +597 36 loss """nssa""" +597 36 regularizer """no""" +597 36 optimizer """adam""" +597 36 training_loop """owa""" +597 36 negative_sampler """basic""" +597 36 evaluator """rankbased""" +597 37 dataset """kinships""" +597 37 model """rotate""" +597 37 loss """nssa""" +597 37 regularizer """no""" +597 37 optimizer """adam""" +597 37 training_loop """owa""" +597 37 negative_sampler """basic""" +597 37 evaluator """rankbased""" +597 38 dataset """kinships""" +597 38 model """rotate""" +597 38 loss """nssa""" +597 38 regularizer """no""" +597 38 optimizer """adam""" +597 38 training_loop """owa""" +597 38 negative_sampler """basic""" +597 38 evaluator """rankbased""" +597 39 dataset """kinships""" +597 39 model """rotate""" +597 39 loss """nssa""" +597 39 regularizer """no""" +597 39 optimizer """adam""" +597 39 training_loop """owa""" +597 39 negative_sampler """basic""" +597 39 evaluator """rankbased""" +597 40 dataset """kinships""" +597 40 model """rotate""" +597 40 loss """nssa""" +597 40 regularizer """no""" +597 40 optimizer """adam""" +597 40 training_loop """owa""" +597 40 negative_sampler """basic""" +597 40 evaluator """rankbased""" +597 41 dataset """kinships""" +597 41 model """rotate""" +597 41 loss """nssa""" +597 41 regularizer """no""" +597 41 optimizer """adam""" +597 41 training_loop """owa""" +597 41 negative_sampler """basic""" +597 41 evaluator """rankbased""" +597 42 dataset """kinships""" +597 42 model """rotate""" +597 42 loss """nssa""" +597 42 regularizer """no""" +597 42 optimizer """adam""" +597 42 training_loop """owa""" +597 42 negative_sampler """basic""" +597 42 evaluator """rankbased""" +597 43 dataset """kinships""" +597 43 model """rotate""" +597 43 loss """nssa""" +597 43 regularizer """no""" +597 43 optimizer """adam""" +597 43 training_loop """owa""" +597 43 negative_sampler """basic""" +597 43 evaluator """rankbased""" +597 44 dataset """kinships""" +597 44 model """rotate""" +597 44 loss """nssa""" +597 44 regularizer """no""" +597 44 optimizer """adam""" +597 44 training_loop """owa""" +597 44 negative_sampler """basic""" +597 44 evaluator """rankbased""" +597 45 dataset """kinships""" +597 45 model """rotate""" +597 45 loss """nssa""" +597 45 regularizer """no""" +597 45 optimizer """adam""" +597 45 training_loop """owa""" +597 45 negative_sampler """basic""" +597 45 evaluator """rankbased""" +597 46 dataset """kinships""" +597 46 model """rotate""" +597 46 loss """nssa""" +597 46 regularizer """no""" +597 46 optimizer """adam""" +597 46 training_loop """owa""" +597 46 negative_sampler """basic""" +597 46 evaluator """rankbased""" +597 47 dataset """kinships""" +597 47 model """rotate""" +597 47 loss """nssa""" +597 47 regularizer """no""" +597 47 optimizer """adam""" +597 47 training_loop """owa""" +597 47 negative_sampler """basic""" +597 47 evaluator """rankbased""" +597 48 dataset """kinships""" +597 48 model """rotate""" +597 48 loss """nssa""" +597 48 regularizer """no""" +597 48 optimizer """adam""" +597 48 training_loop """owa""" +597 48 negative_sampler """basic""" +597 48 evaluator """rankbased""" +597 49 dataset """kinships""" +597 49 model """rotate""" +597 49 loss """nssa""" +597 49 regularizer """no""" +597 49 optimizer """adam""" +597 49 training_loop """owa""" +597 49 negative_sampler """basic""" +597 49 evaluator """rankbased""" +597 50 dataset """kinships""" +597 50 model """rotate""" +597 50 loss """nssa""" +597 50 regularizer """no""" +597 50 optimizer """adam""" +597 50 training_loop """owa""" +597 50 negative_sampler """basic""" +597 50 evaluator """rankbased""" +597 51 dataset """kinships""" +597 51 model """rotate""" +597 51 loss """nssa""" +597 51 regularizer """no""" +597 51 optimizer """adam""" +597 51 training_loop """owa""" +597 51 negative_sampler """basic""" +597 51 evaluator """rankbased""" +597 52 dataset """kinships""" +597 52 model """rotate""" +597 52 loss """nssa""" +597 52 regularizer """no""" +597 52 optimizer """adam""" +597 52 training_loop """owa""" +597 52 negative_sampler """basic""" +597 52 evaluator """rankbased""" +597 53 dataset """kinships""" +597 53 model """rotate""" +597 53 loss """nssa""" +597 53 regularizer """no""" +597 53 optimizer """adam""" +597 53 training_loop """owa""" +597 53 negative_sampler """basic""" +597 53 evaluator """rankbased""" +597 54 dataset """kinships""" +597 54 model """rotate""" +597 54 loss """nssa""" +597 54 regularizer """no""" +597 54 optimizer """adam""" +597 54 training_loop """owa""" +597 54 negative_sampler """basic""" +597 54 evaluator """rankbased""" +597 55 dataset """kinships""" +597 55 model """rotate""" +597 55 loss """nssa""" +597 55 regularizer """no""" +597 55 optimizer """adam""" +597 55 training_loop """owa""" +597 55 negative_sampler """basic""" +597 55 evaluator """rankbased""" +597 56 dataset """kinships""" +597 56 model """rotate""" +597 56 loss """nssa""" +597 56 regularizer """no""" +597 56 optimizer """adam""" +597 56 training_loop """owa""" +597 56 negative_sampler """basic""" +597 56 evaluator """rankbased""" +597 57 dataset """kinships""" +597 57 model """rotate""" +597 57 loss """nssa""" +597 57 regularizer """no""" +597 57 optimizer """adam""" +597 57 training_loop """owa""" +597 57 negative_sampler """basic""" +597 57 evaluator """rankbased""" +597 58 dataset """kinships""" +597 58 model """rotate""" +597 58 loss """nssa""" +597 58 regularizer """no""" +597 58 optimizer """adam""" +597 58 training_loop """owa""" +597 58 negative_sampler """basic""" +597 58 evaluator """rankbased""" +597 59 dataset """kinships""" +597 59 model """rotate""" +597 59 loss """nssa""" +597 59 regularizer """no""" +597 59 optimizer """adam""" +597 59 training_loop """owa""" +597 59 negative_sampler """basic""" +597 59 evaluator """rankbased""" +597 60 dataset """kinships""" +597 60 model """rotate""" +597 60 loss """nssa""" +597 60 regularizer """no""" +597 60 optimizer """adam""" +597 60 training_loop """owa""" +597 60 negative_sampler """basic""" +597 60 evaluator """rankbased""" +597 61 dataset """kinships""" +597 61 model """rotate""" +597 61 loss """nssa""" +597 61 regularizer """no""" +597 61 optimizer """adam""" +597 61 training_loop """owa""" +597 61 negative_sampler """basic""" +597 61 evaluator """rankbased""" +597 62 dataset """kinships""" +597 62 model """rotate""" +597 62 loss """nssa""" +597 62 regularizer """no""" +597 62 optimizer """adam""" +597 62 training_loop """owa""" +597 62 negative_sampler """basic""" +597 62 evaluator """rankbased""" +597 63 dataset """kinships""" +597 63 model """rotate""" +597 63 loss """nssa""" +597 63 regularizer """no""" +597 63 optimizer """adam""" +597 63 training_loop """owa""" +597 63 negative_sampler """basic""" +597 63 evaluator """rankbased""" +597 64 dataset """kinships""" +597 64 model """rotate""" +597 64 loss """nssa""" +597 64 regularizer """no""" +597 64 optimizer """adam""" +597 64 training_loop """owa""" +597 64 negative_sampler """basic""" +597 64 evaluator """rankbased""" +597 65 dataset """kinships""" +597 65 model """rotate""" +597 65 loss """nssa""" +597 65 regularizer """no""" +597 65 optimizer """adam""" +597 65 training_loop """owa""" +597 65 negative_sampler """basic""" +597 65 evaluator """rankbased""" +597 66 dataset """kinships""" +597 66 model """rotate""" +597 66 loss """nssa""" +597 66 regularizer """no""" +597 66 optimizer """adam""" +597 66 training_loop """owa""" +597 66 negative_sampler """basic""" +597 66 evaluator """rankbased""" +597 67 dataset """kinships""" +597 67 model """rotate""" +597 67 loss """nssa""" +597 67 regularizer """no""" +597 67 optimizer """adam""" +597 67 training_loop """owa""" +597 67 negative_sampler """basic""" +597 67 evaluator """rankbased""" +597 68 dataset """kinships""" +597 68 model """rotate""" +597 68 loss """nssa""" +597 68 regularizer """no""" +597 68 optimizer """adam""" +597 68 training_loop """owa""" +597 68 negative_sampler """basic""" +597 68 evaluator """rankbased""" +597 69 dataset """kinships""" +597 69 model """rotate""" +597 69 loss """nssa""" +597 69 regularizer """no""" +597 69 optimizer """adam""" +597 69 training_loop """owa""" +597 69 negative_sampler """basic""" +597 69 evaluator """rankbased""" +597 70 dataset """kinships""" +597 70 model """rotate""" +597 70 loss """nssa""" +597 70 regularizer """no""" +597 70 optimizer """adam""" +597 70 training_loop """owa""" +597 70 negative_sampler """basic""" +597 70 evaluator """rankbased""" +597 71 dataset """kinships""" +597 71 model """rotate""" +597 71 loss """nssa""" +597 71 regularizer """no""" +597 71 optimizer """adam""" +597 71 training_loop """owa""" +597 71 negative_sampler """basic""" +597 71 evaluator """rankbased""" +597 72 dataset """kinships""" +597 72 model """rotate""" +597 72 loss """nssa""" +597 72 regularizer """no""" +597 72 optimizer """adam""" +597 72 training_loop """owa""" +597 72 negative_sampler """basic""" +597 72 evaluator """rankbased""" +597 73 dataset """kinships""" +597 73 model """rotate""" +597 73 loss """nssa""" +597 73 regularizer """no""" +597 73 optimizer """adam""" +597 73 training_loop """owa""" +597 73 negative_sampler """basic""" +597 73 evaluator """rankbased""" +597 74 dataset """kinships""" +597 74 model """rotate""" +597 74 loss """nssa""" +597 74 regularizer """no""" +597 74 optimizer """adam""" +597 74 training_loop """owa""" +597 74 negative_sampler """basic""" +597 74 evaluator """rankbased""" +597 75 dataset """kinships""" +597 75 model """rotate""" +597 75 loss """nssa""" +597 75 regularizer """no""" +597 75 optimizer """adam""" +597 75 training_loop """owa""" +597 75 negative_sampler """basic""" +597 75 evaluator """rankbased""" +597 76 dataset """kinships""" +597 76 model """rotate""" +597 76 loss """nssa""" +597 76 regularizer """no""" +597 76 optimizer """adam""" +597 76 training_loop """owa""" +597 76 negative_sampler """basic""" +597 76 evaluator """rankbased""" +597 77 dataset """kinships""" +597 77 model """rotate""" +597 77 loss """nssa""" +597 77 regularizer """no""" +597 77 optimizer """adam""" +597 77 training_loop """owa""" +597 77 negative_sampler """basic""" +597 77 evaluator """rankbased""" +597 78 dataset """kinships""" +597 78 model """rotate""" +597 78 loss """nssa""" +597 78 regularizer """no""" +597 78 optimizer """adam""" +597 78 training_loop """owa""" +597 78 negative_sampler """basic""" +597 78 evaluator """rankbased""" +597 79 dataset """kinships""" +597 79 model """rotate""" +597 79 loss """nssa""" +597 79 regularizer """no""" +597 79 optimizer """adam""" +597 79 training_loop """owa""" +597 79 negative_sampler """basic""" +597 79 evaluator """rankbased""" +597 80 dataset """kinships""" +597 80 model """rotate""" +597 80 loss """nssa""" +597 80 regularizer """no""" +597 80 optimizer """adam""" +597 80 training_loop """owa""" +597 80 negative_sampler """basic""" +597 80 evaluator """rankbased""" +597 81 dataset """kinships""" +597 81 model """rotate""" +597 81 loss """nssa""" +597 81 regularizer """no""" +597 81 optimizer """adam""" +597 81 training_loop """owa""" +597 81 negative_sampler """basic""" +597 81 evaluator """rankbased""" +597 82 dataset """kinships""" +597 82 model """rotate""" +597 82 loss """nssa""" +597 82 regularizer """no""" +597 82 optimizer """adam""" +597 82 training_loop """owa""" +597 82 negative_sampler """basic""" +597 82 evaluator """rankbased""" +597 83 dataset """kinships""" +597 83 model """rotate""" +597 83 loss """nssa""" +597 83 regularizer """no""" +597 83 optimizer """adam""" +597 83 training_loop """owa""" +597 83 negative_sampler """basic""" +597 83 evaluator """rankbased""" +597 84 dataset """kinships""" +597 84 model """rotate""" +597 84 loss """nssa""" +597 84 regularizer """no""" +597 84 optimizer """adam""" +597 84 training_loop """owa""" +597 84 negative_sampler """basic""" +597 84 evaluator """rankbased""" +597 85 dataset """kinships""" +597 85 model """rotate""" +597 85 loss """nssa""" +597 85 regularizer """no""" +597 85 optimizer """adam""" +597 85 training_loop """owa""" +597 85 negative_sampler """basic""" +597 85 evaluator """rankbased""" +597 86 dataset """kinships""" +597 86 model """rotate""" +597 86 loss """nssa""" +597 86 regularizer """no""" +597 86 optimizer """adam""" +597 86 training_loop """owa""" +597 86 negative_sampler """basic""" +597 86 evaluator """rankbased""" +597 87 dataset """kinships""" +597 87 model """rotate""" +597 87 loss """nssa""" +597 87 regularizer """no""" +597 87 optimizer """adam""" +597 87 training_loop """owa""" +597 87 negative_sampler """basic""" +597 87 evaluator """rankbased""" +597 88 dataset """kinships""" +597 88 model """rotate""" +597 88 loss """nssa""" +597 88 regularizer """no""" +597 88 optimizer """adam""" +597 88 training_loop """owa""" +597 88 negative_sampler """basic""" +597 88 evaluator """rankbased""" +597 89 dataset """kinships""" +597 89 model """rotate""" +597 89 loss """nssa""" +597 89 regularizer """no""" +597 89 optimizer """adam""" +597 89 training_loop """owa""" +597 89 negative_sampler """basic""" +597 89 evaluator """rankbased""" +597 90 dataset """kinships""" +597 90 model """rotate""" +597 90 loss """nssa""" +597 90 regularizer """no""" +597 90 optimizer """adam""" +597 90 training_loop """owa""" +597 90 negative_sampler """basic""" +597 90 evaluator """rankbased""" +597 91 dataset """kinships""" +597 91 model """rotate""" +597 91 loss """nssa""" +597 91 regularizer """no""" +597 91 optimizer """adam""" +597 91 training_loop """owa""" +597 91 negative_sampler """basic""" +597 91 evaluator """rankbased""" +597 92 dataset """kinships""" +597 92 model """rotate""" +597 92 loss """nssa""" +597 92 regularizer """no""" +597 92 optimizer """adam""" +597 92 training_loop """owa""" +597 92 negative_sampler """basic""" +597 92 evaluator """rankbased""" +597 93 dataset """kinships""" +597 93 model """rotate""" +597 93 loss """nssa""" +597 93 regularizer """no""" +597 93 optimizer """adam""" +597 93 training_loop """owa""" +597 93 negative_sampler """basic""" +597 93 evaluator """rankbased""" +597 94 dataset """kinships""" +597 94 model """rotate""" +597 94 loss """nssa""" +597 94 regularizer """no""" +597 94 optimizer """adam""" +597 94 training_loop """owa""" +597 94 negative_sampler """basic""" +597 94 evaluator """rankbased""" +597 95 dataset """kinships""" +597 95 model """rotate""" +597 95 loss """nssa""" +597 95 regularizer """no""" +597 95 optimizer """adam""" +597 95 training_loop """owa""" +597 95 negative_sampler """basic""" +597 95 evaluator """rankbased""" +597 96 dataset """kinships""" +597 96 model """rotate""" +597 96 loss """nssa""" +597 96 regularizer """no""" +597 96 optimizer """adam""" +597 96 training_loop """owa""" +597 96 negative_sampler """basic""" +597 96 evaluator """rankbased""" +597 97 dataset """kinships""" +597 97 model """rotate""" +597 97 loss """nssa""" +597 97 regularizer """no""" +597 97 optimizer """adam""" +597 97 training_loop """owa""" +597 97 negative_sampler """basic""" +597 97 evaluator """rankbased""" +597 98 dataset """kinships""" +597 98 model """rotate""" +597 98 loss """nssa""" +597 98 regularizer """no""" +597 98 optimizer """adam""" +597 98 training_loop """owa""" +597 98 negative_sampler """basic""" +597 98 evaluator """rankbased""" +597 99 dataset """kinships""" +597 99 model """rotate""" +597 99 loss """nssa""" +597 99 regularizer """no""" +597 99 optimizer """adam""" +597 99 training_loop """owa""" +597 99 negative_sampler """basic""" +597 99 evaluator """rankbased""" +597 100 dataset """kinships""" +597 100 model """rotate""" +597 100 loss """nssa""" +597 100 regularizer """no""" +597 100 optimizer """adam""" +597 100 training_loop """owa""" +597 100 negative_sampler """basic""" +597 100 evaluator """rankbased""" +598 1 model.embedding_dim 0.0 +598 1 loss.margin 4.794606938653193 +598 1 loss.adversarial_temperature 0.5975403605064097 +598 1 optimizer.lr 0.004809723577756274 +598 1 negative_sampler.num_negs_per_pos 34.0 +598 1 training.batch_size 0.0 +598 2 model.embedding_dim 0.0 +598 2 loss.margin 3.3118541729668767 +598 2 loss.adversarial_temperature 0.6763762003309135 +598 2 optimizer.lr 0.010167222465627965 +598 2 negative_sampler.num_negs_per_pos 94.0 +598 2 training.batch_size 0.0 +598 3 model.embedding_dim 1.0 +598 3 loss.margin 25.588097356728873 +598 3 loss.adversarial_temperature 0.508724406987016 +598 3 optimizer.lr 0.05445640399595801 +598 3 negative_sampler.num_negs_per_pos 57.0 +598 3 training.batch_size 0.0 +598 4 model.embedding_dim 0.0 +598 4 loss.margin 13.989265476812529 +598 4 loss.adversarial_temperature 0.834157151681136 +598 4 optimizer.lr 0.009122555284780587 +598 4 negative_sampler.num_negs_per_pos 57.0 +598 4 training.batch_size 2.0 +598 5 model.embedding_dim 1.0 +598 5 loss.margin 22.406078426225672 +598 5 loss.adversarial_temperature 0.4610362389823434 +598 5 optimizer.lr 0.0561027297631212 +598 5 negative_sampler.num_negs_per_pos 7.0 +598 5 training.batch_size 0.0 +598 6 model.embedding_dim 1.0 +598 6 loss.margin 9.707828701280052 +598 6 loss.adversarial_temperature 0.1947563768921129 +598 6 optimizer.lr 0.005647477551925382 +598 6 negative_sampler.num_negs_per_pos 14.0 +598 6 training.batch_size 2.0 +598 7 model.embedding_dim 1.0 +598 7 loss.margin 4.129606779583581 +598 7 loss.adversarial_temperature 0.4406570293832921 +598 7 optimizer.lr 0.059309117952272145 +598 7 negative_sampler.num_negs_per_pos 3.0 +598 7 training.batch_size 2.0 +598 8 model.embedding_dim 0.0 +598 8 loss.margin 29.838465010686487 +598 8 loss.adversarial_temperature 0.30075366774549334 +598 8 optimizer.lr 0.00295197903960119 +598 8 negative_sampler.num_negs_per_pos 26.0 +598 8 training.batch_size 1.0 +598 9 model.embedding_dim 1.0 +598 9 loss.margin 19.428249558763802 +598 9 loss.adversarial_temperature 0.19092886251316357 +598 9 optimizer.lr 0.014581984076595174 +598 9 negative_sampler.num_negs_per_pos 68.0 +598 9 training.batch_size 2.0 +598 10 model.embedding_dim 0.0 +598 10 loss.margin 14.158111031419734 +598 10 loss.adversarial_temperature 0.6154535284529988 +598 10 optimizer.lr 0.0548437158187703 +598 10 negative_sampler.num_negs_per_pos 54.0 +598 10 training.batch_size 1.0 +598 11 model.embedding_dim 2.0 +598 11 loss.margin 28.62149876818131 +598 11 loss.adversarial_temperature 0.440940975967457 +598 11 optimizer.lr 0.0019107627566602955 +598 11 negative_sampler.num_negs_per_pos 83.0 +598 11 training.batch_size 1.0 +598 12 model.embedding_dim 2.0 +598 12 loss.margin 24.18597079231721 +598 12 loss.adversarial_temperature 0.5710664959827366 +598 12 optimizer.lr 0.002128266571839051 +598 12 negative_sampler.num_negs_per_pos 3.0 +598 12 training.batch_size 0.0 +598 13 model.embedding_dim 1.0 +598 13 loss.margin 20.959993424043887 +598 13 loss.adversarial_temperature 0.8095830160135952 +598 13 optimizer.lr 0.010804980650573212 +598 13 negative_sampler.num_negs_per_pos 78.0 +598 13 training.batch_size 1.0 +598 14 model.embedding_dim 1.0 +598 14 loss.margin 2.745188909901319 +598 14 loss.adversarial_temperature 0.49593605794050405 +598 14 optimizer.lr 0.004450450519072702 +598 14 negative_sampler.num_negs_per_pos 80.0 +598 14 training.batch_size 0.0 +598 15 model.embedding_dim 0.0 +598 15 loss.margin 18.498394537192713 +598 15 loss.adversarial_temperature 0.5162262115490179 +598 15 optimizer.lr 0.026507611758210634 +598 15 negative_sampler.num_negs_per_pos 46.0 +598 15 training.batch_size 1.0 +598 16 model.embedding_dim 0.0 +598 16 loss.margin 1.2834122324997534 +598 16 loss.adversarial_temperature 0.999645615608627 +598 16 optimizer.lr 0.0014677462361436116 +598 16 negative_sampler.num_negs_per_pos 38.0 +598 16 training.batch_size 0.0 +598 17 model.embedding_dim 0.0 +598 17 loss.margin 26.417722827082873 +598 17 loss.adversarial_temperature 0.6316834928186844 +598 17 optimizer.lr 0.07853905561373917 +598 17 negative_sampler.num_negs_per_pos 97.0 +598 17 training.batch_size 1.0 +598 18 model.embedding_dim 0.0 +598 18 loss.margin 10.43420826288995 +598 18 loss.adversarial_temperature 0.49761822356494734 +598 18 optimizer.lr 0.0129333636893821 +598 18 negative_sampler.num_negs_per_pos 46.0 +598 18 training.batch_size 1.0 +598 19 model.embedding_dim 2.0 +598 19 loss.margin 3.254801813305108 +598 19 loss.adversarial_temperature 0.5876329954181871 +598 19 optimizer.lr 0.05748406537313077 +598 19 negative_sampler.num_negs_per_pos 45.0 +598 19 training.batch_size 1.0 +598 20 model.embedding_dim 1.0 +598 20 loss.margin 21.823159769480743 +598 20 loss.adversarial_temperature 0.9651060144320472 +598 20 optimizer.lr 0.027197077958832992 +598 20 negative_sampler.num_negs_per_pos 88.0 +598 20 training.batch_size 1.0 +598 21 model.embedding_dim 0.0 +598 21 loss.margin 21.632892418340198 +598 21 loss.adversarial_temperature 0.9139625741379029 +598 21 optimizer.lr 0.0015011866722583402 +598 21 negative_sampler.num_negs_per_pos 86.0 +598 21 training.batch_size 0.0 +598 22 model.embedding_dim 2.0 +598 22 loss.margin 9.899928166336787 +598 22 loss.adversarial_temperature 0.2381989922008343 +598 22 optimizer.lr 0.04589672004934083 +598 22 negative_sampler.num_negs_per_pos 59.0 +598 22 training.batch_size 1.0 +598 23 model.embedding_dim 1.0 +598 23 loss.margin 8.65989373028224 +598 23 loss.adversarial_temperature 0.17726409988242323 +598 23 optimizer.lr 0.001675085692413867 +598 23 negative_sampler.num_negs_per_pos 46.0 +598 23 training.batch_size 0.0 +598 24 model.embedding_dim 0.0 +598 24 loss.margin 18.48693176617134 +598 24 loss.adversarial_temperature 0.2115683475312082 +598 24 optimizer.lr 0.074203119237085 +598 24 negative_sampler.num_negs_per_pos 19.0 +598 24 training.batch_size 1.0 +598 25 model.embedding_dim 2.0 +598 25 loss.margin 16.56056755997996 +598 25 loss.adversarial_temperature 0.6545231238919716 +598 25 optimizer.lr 0.052925410537107115 +598 25 negative_sampler.num_negs_per_pos 44.0 +598 25 training.batch_size 0.0 +598 26 model.embedding_dim 0.0 +598 26 loss.margin 15.620974849007258 +598 26 loss.adversarial_temperature 0.9383754482734068 +598 26 optimizer.lr 0.09427742695236316 +598 26 negative_sampler.num_negs_per_pos 43.0 +598 26 training.batch_size 2.0 +598 27 model.embedding_dim 2.0 +598 27 loss.margin 13.514187095711657 +598 27 loss.adversarial_temperature 0.26629973376573773 +598 27 optimizer.lr 0.009424799657347221 +598 27 negative_sampler.num_negs_per_pos 90.0 +598 27 training.batch_size 0.0 +598 28 model.embedding_dim 1.0 +598 28 loss.margin 13.084102445458786 +598 28 loss.adversarial_temperature 0.7439426202865935 +598 28 optimizer.lr 0.00519752641987619 +598 28 negative_sampler.num_negs_per_pos 9.0 +598 28 training.batch_size 0.0 +598 29 model.embedding_dim 2.0 +598 29 loss.margin 13.320655141011017 +598 29 loss.adversarial_temperature 0.9080624492365705 +598 29 optimizer.lr 0.001560903522536335 +598 29 negative_sampler.num_negs_per_pos 22.0 +598 29 training.batch_size 1.0 +598 30 model.embedding_dim 1.0 +598 30 loss.margin 28.126161730640952 +598 30 loss.adversarial_temperature 0.745683504581704 +598 30 optimizer.lr 0.003697597408808109 +598 30 negative_sampler.num_negs_per_pos 1.0 +598 30 training.batch_size 0.0 +598 31 model.embedding_dim 1.0 +598 31 loss.margin 5.348109601072797 +598 31 loss.adversarial_temperature 0.5451930094513556 +598 31 optimizer.lr 0.0027965824918878735 +598 31 negative_sampler.num_negs_per_pos 5.0 +598 31 training.batch_size 2.0 +598 32 model.embedding_dim 1.0 +598 32 loss.margin 28.486649940720337 +598 32 loss.adversarial_temperature 0.4064493570060191 +598 32 optimizer.lr 0.0035384956570959957 +598 32 negative_sampler.num_negs_per_pos 93.0 +598 32 training.batch_size 0.0 +598 33 model.embedding_dim 1.0 +598 33 loss.margin 26.788897189793765 +598 33 loss.adversarial_temperature 0.564781249315713 +598 33 optimizer.lr 0.0635227740982256 +598 33 negative_sampler.num_negs_per_pos 63.0 +598 33 training.batch_size 1.0 +598 34 model.embedding_dim 2.0 +598 34 loss.margin 3.77880371541794 +598 34 loss.adversarial_temperature 0.6819300737157017 +598 34 optimizer.lr 0.0030673124248670317 +598 34 negative_sampler.num_negs_per_pos 14.0 +598 34 training.batch_size 0.0 +598 35 model.embedding_dim 1.0 +598 35 loss.margin 10.054625730573608 +598 35 loss.adversarial_temperature 0.5934473323179933 +598 35 optimizer.lr 0.010505868997171172 +598 35 negative_sampler.num_negs_per_pos 24.0 +598 35 training.batch_size 0.0 +598 36 model.embedding_dim 0.0 +598 36 loss.margin 4.5485272816366304 +598 36 loss.adversarial_temperature 0.37700202821167517 +598 36 optimizer.lr 0.006003055898518131 +598 36 negative_sampler.num_negs_per_pos 29.0 +598 36 training.batch_size 1.0 +598 37 model.embedding_dim 0.0 +598 37 loss.margin 17.91947104166884 +598 37 loss.adversarial_temperature 0.7554873956651275 +598 37 optimizer.lr 0.04087201781964132 +598 37 negative_sampler.num_negs_per_pos 20.0 +598 37 training.batch_size 2.0 +598 38 model.embedding_dim 2.0 +598 38 loss.margin 3.2865740952251317 +598 38 loss.adversarial_temperature 0.7037389199406825 +598 38 optimizer.lr 0.07015862431518707 +598 38 negative_sampler.num_negs_per_pos 65.0 +598 38 training.batch_size 1.0 +598 39 model.embedding_dim 1.0 +598 39 loss.margin 27.35341995432977 +598 39 loss.adversarial_temperature 0.1475569253692951 +598 39 optimizer.lr 0.015985984270064587 +598 39 negative_sampler.num_negs_per_pos 38.0 +598 39 training.batch_size 1.0 +598 40 model.embedding_dim 2.0 +598 40 loss.margin 3.5448903425935367 +598 40 loss.adversarial_temperature 0.41723337056820264 +598 40 optimizer.lr 0.014503048141024006 +598 40 negative_sampler.num_negs_per_pos 27.0 +598 40 training.batch_size 1.0 +598 41 model.embedding_dim 0.0 +598 41 loss.margin 15.512020245566886 +598 41 loss.adversarial_temperature 0.1594073644149914 +598 41 optimizer.lr 0.012280860511995033 +598 41 negative_sampler.num_negs_per_pos 69.0 +598 41 training.batch_size 1.0 +598 42 model.embedding_dim 1.0 +598 42 loss.margin 7.264168935857386 +598 42 loss.adversarial_temperature 0.5789888426319493 +598 42 optimizer.lr 0.02783878205161466 +598 42 negative_sampler.num_negs_per_pos 73.0 +598 42 training.batch_size 1.0 +598 43 model.embedding_dim 1.0 +598 43 loss.margin 1.962574192552899 +598 43 loss.adversarial_temperature 0.9040555857326321 +598 43 optimizer.lr 0.02497237673123023 +598 43 negative_sampler.num_negs_per_pos 38.0 +598 43 training.batch_size 1.0 +598 44 model.embedding_dim 0.0 +598 44 loss.margin 9.557983438727463 +598 44 loss.adversarial_temperature 0.13280582914523567 +598 44 optimizer.lr 0.001371548016030485 +598 44 negative_sampler.num_negs_per_pos 2.0 +598 44 training.batch_size 2.0 +598 45 model.embedding_dim 1.0 +598 45 loss.margin 17.74679698574868 +598 45 loss.adversarial_temperature 0.8030256400283476 +598 45 optimizer.lr 0.04550937833920533 +598 45 negative_sampler.num_negs_per_pos 2.0 +598 45 training.batch_size 0.0 +598 46 model.embedding_dim 0.0 +598 46 loss.margin 18.409006835386865 +598 46 loss.adversarial_temperature 0.48397230066170793 +598 46 optimizer.lr 0.0016761393410907434 +598 46 negative_sampler.num_negs_per_pos 45.0 +598 46 training.batch_size 1.0 +598 47 model.embedding_dim 0.0 +598 47 loss.margin 3.338514397156433 +598 47 loss.adversarial_temperature 0.563792542942786 +598 47 optimizer.lr 0.0043933956797586726 +598 47 negative_sampler.num_negs_per_pos 38.0 +598 47 training.batch_size 0.0 +598 48 model.embedding_dim 0.0 +598 48 loss.margin 4.52471530785913 +598 48 loss.adversarial_temperature 0.6160467632212294 +598 48 optimizer.lr 0.0011378109918925923 +598 48 negative_sampler.num_negs_per_pos 38.0 +598 48 training.batch_size 0.0 +598 49 model.embedding_dim 1.0 +598 49 loss.margin 5.495735977417348 +598 49 loss.adversarial_temperature 0.31957540729142514 +598 49 optimizer.lr 0.009038591168250417 +598 49 negative_sampler.num_negs_per_pos 16.0 +598 49 training.batch_size 2.0 +598 50 model.embedding_dim 1.0 +598 50 loss.margin 9.248723676198656 +598 50 loss.adversarial_temperature 0.726057326857613 +598 50 optimizer.lr 0.0261113459511893 +598 50 negative_sampler.num_negs_per_pos 4.0 +598 50 training.batch_size 2.0 +598 51 model.embedding_dim 1.0 +598 51 loss.margin 25.906726461456817 +598 51 loss.adversarial_temperature 0.2906764492281873 +598 51 optimizer.lr 0.0011337072154008678 +598 51 negative_sampler.num_negs_per_pos 37.0 +598 51 training.batch_size 1.0 +598 52 model.embedding_dim 0.0 +598 52 loss.margin 3.925274944737028 +598 52 loss.adversarial_temperature 0.7828695626417286 +598 52 optimizer.lr 0.001014023706532439 +598 52 negative_sampler.num_negs_per_pos 61.0 +598 52 training.batch_size 2.0 +598 53 model.embedding_dim 1.0 +598 53 loss.margin 8.190771365013802 +598 53 loss.adversarial_temperature 0.26443550216379946 +598 53 optimizer.lr 0.01058639529872236 +598 53 negative_sampler.num_negs_per_pos 57.0 +598 53 training.batch_size 2.0 +598 54 model.embedding_dim 0.0 +598 54 loss.margin 15.684092717101056 +598 54 loss.adversarial_temperature 0.9493493169837127 +598 54 optimizer.lr 0.04956731470465614 +598 54 negative_sampler.num_negs_per_pos 94.0 +598 54 training.batch_size 0.0 +598 55 model.embedding_dim 2.0 +598 55 loss.margin 19.582765174017023 +598 55 loss.adversarial_temperature 0.8038282689640095 +598 55 optimizer.lr 0.005925675554460562 +598 55 negative_sampler.num_negs_per_pos 62.0 +598 55 training.batch_size 1.0 +598 56 model.embedding_dim 1.0 +598 56 loss.margin 12.7497956905811 +598 56 loss.adversarial_temperature 0.8709192596027218 +598 56 optimizer.lr 0.005283390173379351 +598 56 negative_sampler.num_negs_per_pos 86.0 +598 56 training.batch_size 1.0 +598 57 model.embedding_dim 2.0 +598 57 loss.margin 1.7451879738138159 +598 57 loss.adversarial_temperature 0.3919929038165142 +598 57 optimizer.lr 0.0013775261860462826 +598 57 negative_sampler.num_negs_per_pos 51.0 +598 57 training.batch_size 2.0 +598 58 model.embedding_dim 0.0 +598 58 loss.margin 20.358067722618546 +598 58 loss.adversarial_temperature 0.3749156299300455 +598 58 optimizer.lr 0.0033830875588049957 +598 58 negative_sampler.num_negs_per_pos 7.0 +598 58 training.batch_size 0.0 +598 59 model.embedding_dim 0.0 +598 59 loss.margin 12.5144788446361 +598 59 loss.adversarial_temperature 0.12492138628952316 +598 59 optimizer.lr 0.011960748493507556 +598 59 negative_sampler.num_negs_per_pos 19.0 +598 59 training.batch_size 1.0 +598 60 model.embedding_dim 1.0 +598 60 loss.margin 29.788851276544047 +598 60 loss.adversarial_temperature 0.6988189587974095 +598 60 optimizer.lr 0.004960943398206026 +598 60 negative_sampler.num_negs_per_pos 6.0 +598 60 training.batch_size 0.0 +598 61 model.embedding_dim 2.0 +598 61 loss.margin 3.9881576637284253 +598 61 loss.adversarial_temperature 0.2439729669132686 +598 61 optimizer.lr 0.00516627724947666 +598 61 negative_sampler.num_negs_per_pos 64.0 +598 61 training.batch_size 1.0 +598 62 model.embedding_dim 1.0 +598 62 loss.margin 27.952112931473 +598 62 loss.adversarial_temperature 0.7564047914517149 +598 62 optimizer.lr 0.029525889289812472 +598 62 negative_sampler.num_negs_per_pos 42.0 +598 62 training.batch_size 0.0 +598 63 model.embedding_dim 1.0 +598 63 loss.margin 9.28582168508229 +598 63 loss.adversarial_temperature 0.7286319938741592 +598 63 optimizer.lr 0.001042411225395646 +598 63 negative_sampler.num_negs_per_pos 30.0 +598 63 training.batch_size 0.0 +598 64 model.embedding_dim 1.0 +598 64 loss.margin 22.61106414946569 +598 64 loss.adversarial_temperature 0.5802482600863865 +598 64 optimizer.lr 0.08330971520719062 +598 64 negative_sampler.num_negs_per_pos 98.0 +598 64 training.batch_size 2.0 +598 65 model.embedding_dim 2.0 +598 65 loss.margin 22.72292911355334 +598 65 loss.adversarial_temperature 0.6264253660786138 +598 65 optimizer.lr 0.013730524030519917 +598 65 negative_sampler.num_negs_per_pos 32.0 +598 65 training.batch_size 0.0 +598 66 model.embedding_dim 2.0 +598 66 loss.margin 17.725708816998427 +598 66 loss.adversarial_temperature 0.27288593893498336 +598 66 optimizer.lr 0.008405677310787334 +598 66 negative_sampler.num_negs_per_pos 92.0 +598 66 training.batch_size 2.0 +598 67 model.embedding_dim 1.0 +598 67 loss.margin 19.682558245793675 +598 67 loss.adversarial_temperature 0.9769028360925388 +598 67 optimizer.lr 0.00734051956440726 +598 67 negative_sampler.num_negs_per_pos 22.0 +598 67 training.batch_size 2.0 +598 68 model.embedding_dim 1.0 +598 68 loss.margin 7.883326375303089 +598 68 loss.adversarial_temperature 0.6974567659495097 +598 68 optimizer.lr 0.039359694536640136 +598 68 negative_sampler.num_negs_per_pos 85.0 +598 68 training.batch_size 2.0 +598 69 model.embedding_dim 2.0 +598 69 loss.margin 24.900573504775444 +598 69 loss.adversarial_temperature 0.993030203080219 +598 69 optimizer.lr 0.0054264987348559965 +598 69 negative_sampler.num_negs_per_pos 40.0 +598 69 training.batch_size 1.0 +598 70 model.embedding_dim 1.0 +598 70 loss.margin 2.265615237819733 +598 70 loss.adversarial_temperature 0.16316769902282208 +598 70 optimizer.lr 0.045989758270608784 +598 70 negative_sampler.num_negs_per_pos 84.0 +598 70 training.batch_size 1.0 +598 71 model.embedding_dim 2.0 +598 71 loss.margin 6.323463344946951 +598 71 loss.adversarial_temperature 0.4328933776377481 +598 71 optimizer.lr 0.011603467059347223 +598 71 negative_sampler.num_negs_per_pos 96.0 +598 71 training.batch_size 2.0 +598 72 model.embedding_dim 2.0 +598 72 loss.margin 6.445012457336395 +598 72 loss.adversarial_temperature 0.3333770749446719 +598 72 optimizer.lr 0.00969324792051592 +598 72 negative_sampler.num_negs_per_pos 42.0 +598 72 training.batch_size 2.0 +598 73 model.embedding_dim 0.0 +598 73 loss.margin 6.751699309089069 +598 73 loss.adversarial_temperature 0.631499758372541 +598 73 optimizer.lr 0.010233263870012242 +598 73 negative_sampler.num_negs_per_pos 64.0 +598 73 training.batch_size 2.0 +598 74 model.embedding_dim 1.0 +598 74 loss.margin 23.993029329409218 +598 74 loss.adversarial_temperature 0.4747755892569601 +598 74 optimizer.lr 0.018793202591286483 +598 74 negative_sampler.num_negs_per_pos 29.0 +598 74 training.batch_size 0.0 +598 75 model.embedding_dim 0.0 +598 75 loss.margin 16.146320540732148 +598 75 loss.adversarial_temperature 0.2712931706207636 +598 75 optimizer.lr 0.0014290548733523047 +598 75 negative_sampler.num_negs_per_pos 34.0 +598 75 training.batch_size 2.0 +598 76 model.embedding_dim 1.0 +598 76 loss.margin 28.17760395323423 +598 76 loss.adversarial_temperature 0.7137739988079786 +598 76 optimizer.lr 0.017554390210392155 +598 76 negative_sampler.num_negs_per_pos 50.0 +598 76 training.batch_size 1.0 +598 77 model.embedding_dim 2.0 +598 77 loss.margin 1.8966867560579868 +598 77 loss.adversarial_temperature 0.886036407999496 +598 77 optimizer.lr 0.09768872021957348 +598 77 negative_sampler.num_negs_per_pos 37.0 +598 77 training.batch_size 0.0 +598 78 model.embedding_dim 2.0 +598 78 loss.margin 2.96933761249404 +598 78 loss.adversarial_temperature 0.3331066541169153 +598 78 optimizer.lr 0.0011663267027812768 +598 78 negative_sampler.num_negs_per_pos 64.0 +598 78 training.batch_size 2.0 +598 79 model.embedding_dim 0.0 +598 79 loss.margin 9.029358937056738 +598 79 loss.adversarial_temperature 0.30208678546747975 +598 79 optimizer.lr 0.006045525929394068 +598 79 negative_sampler.num_negs_per_pos 75.0 +598 79 training.batch_size 2.0 +598 80 model.embedding_dim 2.0 +598 80 loss.margin 29.179389509638238 +598 80 loss.adversarial_temperature 0.5321613401159275 +598 80 optimizer.lr 0.03339969679326058 +598 80 negative_sampler.num_negs_per_pos 15.0 +598 80 training.batch_size 1.0 +598 81 model.embedding_dim 1.0 +598 81 loss.margin 18.365190105751708 +598 81 loss.adversarial_temperature 0.7903292385462866 +598 81 optimizer.lr 0.011945721671219624 +598 81 negative_sampler.num_negs_per_pos 12.0 +598 81 training.batch_size 1.0 +598 82 model.embedding_dim 1.0 +598 82 loss.margin 22.92127207851953 +598 82 loss.adversarial_temperature 0.7847501267741382 +598 82 optimizer.lr 0.010617509766686567 +598 82 negative_sampler.num_negs_per_pos 84.0 +598 82 training.batch_size 0.0 +598 83 model.embedding_dim 0.0 +598 83 loss.margin 26.34411101784118 +598 83 loss.adversarial_temperature 0.7884932953709104 +598 83 optimizer.lr 0.001259535648423249 +598 83 negative_sampler.num_negs_per_pos 13.0 +598 83 training.batch_size 1.0 +598 84 model.embedding_dim 0.0 +598 84 loss.margin 13.809866897820452 +598 84 loss.adversarial_temperature 0.9596117602159666 +598 84 optimizer.lr 0.010631465153309521 +598 84 negative_sampler.num_negs_per_pos 32.0 +598 84 training.batch_size 0.0 +598 85 model.embedding_dim 1.0 +598 85 loss.margin 8.850713905468615 +598 85 loss.adversarial_temperature 0.9294310343546565 +598 85 optimizer.lr 0.051605281428747665 +598 85 negative_sampler.num_negs_per_pos 10.0 +598 85 training.batch_size 0.0 +598 86 model.embedding_dim 2.0 +598 86 loss.margin 25.736696893209253 +598 86 loss.adversarial_temperature 0.534917702695216 +598 86 optimizer.lr 0.00252331246168689 +598 86 negative_sampler.num_negs_per_pos 81.0 +598 86 training.batch_size 1.0 +598 87 model.embedding_dim 1.0 +598 87 loss.margin 23.07274978120827 +598 87 loss.adversarial_temperature 0.11539024444319 +598 87 optimizer.lr 0.001347649976091729 +598 87 negative_sampler.num_negs_per_pos 86.0 +598 87 training.batch_size 2.0 +598 88 model.embedding_dim 2.0 +598 88 loss.margin 10.229845048231107 +598 88 loss.adversarial_temperature 0.17983167415730758 +598 88 optimizer.lr 0.07544478446269491 +598 88 negative_sampler.num_negs_per_pos 14.0 +598 88 training.batch_size 2.0 +598 89 model.embedding_dim 1.0 +598 89 loss.margin 3.8161092238329215 +598 89 loss.adversarial_temperature 0.6465290514098805 +598 89 optimizer.lr 0.009716893022031015 +598 89 negative_sampler.num_negs_per_pos 75.0 +598 89 training.batch_size 1.0 +598 90 model.embedding_dim 2.0 +598 90 loss.margin 19.094789789328807 +598 90 loss.adversarial_temperature 0.370762520531426 +598 90 optimizer.lr 0.04667062679795166 +598 90 negative_sampler.num_negs_per_pos 20.0 +598 90 training.batch_size 1.0 +598 91 model.embedding_dim 1.0 +598 91 loss.margin 5.277556322966057 +598 91 loss.adversarial_temperature 0.2098357353132997 +598 91 optimizer.lr 0.003366662492207176 +598 91 negative_sampler.num_negs_per_pos 22.0 +598 91 training.batch_size 2.0 +598 92 model.embedding_dim 2.0 +598 92 loss.margin 27.351873613462878 +598 92 loss.adversarial_temperature 0.982077033600819 +598 92 optimizer.lr 0.06881086804630629 +598 92 negative_sampler.num_negs_per_pos 4.0 +598 92 training.batch_size 1.0 +598 93 model.embedding_dim 1.0 +598 93 loss.margin 15.218867583532198 +598 93 loss.adversarial_temperature 0.7077937413524045 +598 93 optimizer.lr 0.08159227078627547 +598 93 negative_sampler.num_negs_per_pos 80.0 +598 93 training.batch_size 0.0 +598 94 model.embedding_dim 1.0 +598 94 loss.margin 6.728969485366599 +598 94 loss.adversarial_temperature 0.3672612326899623 +598 94 optimizer.lr 0.016511833404324265 +598 94 negative_sampler.num_negs_per_pos 75.0 +598 94 training.batch_size 1.0 +598 95 model.embedding_dim 1.0 +598 95 loss.margin 19.08362995267028 +598 95 loss.adversarial_temperature 0.3757308943315001 +598 95 optimizer.lr 0.0030543586719671894 +598 95 negative_sampler.num_negs_per_pos 40.0 +598 95 training.batch_size 1.0 +598 96 model.embedding_dim 1.0 +598 96 loss.margin 25.12531085876812 +598 96 loss.adversarial_temperature 0.4486458090274442 +598 96 optimizer.lr 0.004626653704778477 +598 96 negative_sampler.num_negs_per_pos 0.0 +598 96 training.batch_size 1.0 +598 97 model.embedding_dim 1.0 +598 97 loss.margin 27.395368087621147 +598 97 loss.adversarial_temperature 0.7854981528896972 +598 97 optimizer.lr 0.010349236862741114 +598 97 negative_sampler.num_negs_per_pos 70.0 +598 97 training.batch_size 0.0 +598 98 model.embedding_dim 2.0 +598 98 loss.margin 17.871455567777254 +598 98 loss.adversarial_temperature 0.6602355429700165 +598 98 optimizer.lr 0.0200276304540923 +598 98 negative_sampler.num_negs_per_pos 65.0 +598 98 training.batch_size 1.0 +598 99 model.embedding_dim 2.0 +598 99 loss.margin 19.230791835938565 +598 99 loss.adversarial_temperature 0.3369884898764312 +598 99 optimizer.lr 0.002388561917661029 +598 99 negative_sampler.num_negs_per_pos 82.0 +598 99 training.batch_size 1.0 +598 100 model.embedding_dim 1.0 +598 100 loss.margin 24.27841909010346 +598 100 loss.adversarial_temperature 0.131734995458144 +598 100 optimizer.lr 0.006882008500088629 +598 100 negative_sampler.num_negs_per_pos 94.0 +598 100 training.batch_size 1.0 +598 1 dataset """kinships""" +598 1 model """rotate""" +598 1 loss """nssa""" +598 1 regularizer """no""" +598 1 optimizer """adam""" +598 1 training_loop """owa""" +598 1 negative_sampler """basic""" +598 1 evaluator """rankbased""" +598 2 dataset """kinships""" +598 2 model """rotate""" +598 2 loss """nssa""" +598 2 regularizer """no""" +598 2 optimizer """adam""" +598 2 training_loop """owa""" +598 2 negative_sampler """basic""" +598 2 evaluator """rankbased""" +598 3 dataset """kinships""" +598 3 model """rotate""" +598 3 loss """nssa""" +598 3 regularizer """no""" +598 3 optimizer """adam""" +598 3 training_loop """owa""" +598 3 negative_sampler """basic""" +598 3 evaluator """rankbased""" +598 4 dataset """kinships""" +598 4 model """rotate""" +598 4 loss """nssa""" +598 4 regularizer """no""" +598 4 optimizer """adam""" +598 4 training_loop """owa""" +598 4 negative_sampler """basic""" +598 4 evaluator """rankbased""" +598 5 dataset """kinships""" +598 5 model """rotate""" +598 5 loss """nssa""" +598 5 regularizer """no""" +598 5 optimizer """adam""" +598 5 training_loop """owa""" +598 5 negative_sampler """basic""" +598 5 evaluator """rankbased""" +598 6 dataset """kinships""" +598 6 model """rotate""" +598 6 loss """nssa""" +598 6 regularizer """no""" +598 6 optimizer """adam""" +598 6 training_loop """owa""" +598 6 negative_sampler """basic""" +598 6 evaluator """rankbased""" +598 7 dataset """kinships""" +598 7 model """rotate""" +598 7 loss """nssa""" +598 7 regularizer """no""" +598 7 optimizer """adam""" +598 7 training_loop """owa""" +598 7 negative_sampler """basic""" +598 7 evaluator """rankbased""" +598 8 dataset """kinships""" +598 8 model """rotate""" +598 8 loss """nssa""" +598 8 regularizer """no""" +598 8 optimizer """adam""" +598 8 training_loop """owa""" +598 8 negative_sampler """basic""" +598 8 evaluator """rankbased""" +598 9 dataset """kinships""" +598 9 model """rotate""" +598 9 loss """nssa""" +598 9 regularizer """no""" +598 9 optimizer """adam""" +598 9 training_loop """owa""" +598 9 negative_sampler """basic""" +598 9 evaluator """rankbased""" +598 10 dataset """kinships""" +598 10 model """rotate""" +598 10 loss """nssa""" +598 10 regularizer """no""" +598 10 optimizer """adam""" +598 10 training_loop """owa""" +598 10 negative_sampler """basic""" +598 10 evaluator """rankbased""" +598 11 dataset """kinships""" +598 11 model """rotate""" +598 11 loss """nssa""" +598 11 regularizer """no""" +598 11 optimizer """adam""" +598 11 training_loop """owa""" +598 11 negative_sampler """basic""" +598 11 evaluator """rankbased""" +598 12 dataset """kinships""" +598 12 model """rotate""" +598 12 loss """nssa""" +598 12 regularizer """no""" +598 12 optimizer """adam""" +598 12 training_loop """owa""" +598 12 negative_sampler """basic""" +598 12 evaluator """rankbased""" +598 13 dataset """kinships""" +598 13 model """rotate""" +598 13 loss """nssa""" +598 13 regularizer """no""" +598 13 optimizer """adam""" +598 13 training_loop """owa""" +598 13 negative_sampler """basic""" +598 13 evaluator """rankbased""" +598 14 dataset """kinships""" +598 14 model """rotate""" +598 14 loss """nssa""" +598 14 regularizer """no""" +598 14 optimizer """adam""" +598 14 training_loop """owa""" +598 14 negative_sampler """basic""" +598 14 evaluator """rankbased""" +598 15 dataset """kinships""" +598 15 model """rotate""" +598 15 loss """nssa""" +598 15 regularizer """no""" +598 15 optimizer """adam""" +598 15 training_loop """owa""" +598 15 negative_sampler """basic""" +598 15 evaluator """rankbased""" +598 16 dataset """kinships""" +598 16 model """rotate""" +598 16 loss """nssa""" +598 16 regularizer """no""" +598 16 optimizer """adam""" +598 16 training_loop """owa""" +598 16 negative_sampler """basic""" +598 16 evaluator """rankbased""" +598 17 dataset """kinships""" +598 17 model """rotate""" +598 17 loss """nssa""" +598 17 regularizer """no""" +598 17 optimizer """adam""" +598 17 training_loop """owa""" +598 17 negative_sampler """basic""" +598 17 evaluator """rankbased""" +598 18 dataset """kinships""" +598 18 model """rotate""" +598 18 loss """nssa""" +598 18 regularizer """no""" +598 18 optimizer """adam""" +598 18 training_loop """owa""" +598 18 negative_sampler """basic""" +598 18 evaluator """rankbased""" +598 19 dataset """kinships""" +598 19 model """rotate""" +598 19 loss """nssa""" +598 19 regularizer """no""" +598 19 optimizer """adam""" +598 19 training_loop """owa""" +598 19 negative_sampler """basic""" +598 19 evaluator """rankbased""" +598 20 dataset """kinships""" +598 20 model """rotate""" +598 20 loss """nssa""" +598 20 regularizer """no""" +598 20 optimizer """adam""" +598 20 training_loop """owa""" +598 20 negative_sampler """basic""" +598 20 evaluator """rankbased""" +598 21 dataset """kinships""" +598 21 model """rotate""" +598 21 loss """nssa""" +598 21 regularizer """no""" +598 21 optimizer """adam""" +598 21 training_loop """owa""" +598 21 negative_sampler """basic""" +598 21 evaluator """rankbased""" +598 22 dataset """kinships""" +598 22 model """rotate""" +598 22 loss """nssa""" +598 22 regularizer """no""" +598 22 optimizer """adam""" +598 22 training_loop """owa""" +598 22 negative_sampler """basic""" +598 22 evaluator """rankbased""" +598 23 dataset """kinships""" +598 23 model """rotate""" +598 23 loss """nssa""" +598 23 regularizer """no""" +598 23 optimizer """adam""" +598 23 training_loop """owa""" +598 23 negative_sampler """basic""" +598 23 evaluator """rankbased""" +598 24 dataset """kinships""" +598 24 model """rotate""" +598 24 loss """nssa""" +598 24 regularizer """no""" +598 24 optimizer """adam""" +598 24 training_loop """owa""" +598 24 negative_sampler """basic""" +598 24 evaluator """rankbased""" +598 25 dataset """kinships""" +598 25 model """rotate""" +598 25 loss """nssa""" +598 25 regularizer """no""" +598 25 optimizer """adam""" +598 25 training_loop """owa""" +598 25 negative_sampler """basic""" +598 25 evaluator """rankbased""" +598 26 dataset """kinships""" +598 26 model """rotate""" +598 26 loss """nssa""" +598 26 regularizer """no""" +598 26 optimizer """adam""" +598 26 training_loop """owa""" +598 26 negative_sampler """basic""" +598 26 evaluator """rankbased""" +598 27 dataset """kinships""" +598 27 model """rotate""" +598 27 loss """nssa""" +598 27 regularizer """no""" +598 27 optimizer """adam""" +598 27 training_loop """owa""" +598 27 negative_sampler """basic""" +598 27 evaluator """rankbased""" +598 28 dataset """kinships""" +598 28 model """rotate""" +598 28 loss """nssa""" +598 28 regularizer """no""" +598 28 optimizer """adam""" +598 28 training_loop """owa""" +598 28 negative_sampler """basic""" +598 28 evaluator """rankbased""" +598 29 dataset """kinships""" +598 29 model """rotate""" +598 29 loss """nssa""" +598 29 regularizer """no""" +598 29 optimizer """adam""" +598 29 training_loop """owa""" +598 29 negative_sampler """basic""" +598 29 evaluator """rankbased""" +598 30 dataset """kinships""" +598 30 model """rotate""" +598 30 loss """nssa""" +598 30 regularizer """no""" +598 30 optimizer """adam""" +598 30 training_loop """owa""" +598 30 negative_sampler """basic""" +598 30 evaluator """rankbased""" +598 31 dataset """kinships""" +598 31 model """rotate""" +598 31 loss """nssa""" +598 31 regularizer """no""" +598 31 optimizer """adam""" +598 31 training_loop """owa""" +598 31 negative_sampler """basic""" +598 31 evaluator """rankbased""" +598 32 dataset """kinships""" +598 32 model """rotate""" +598 32 loss """nssa""" +598 32 regularizer """no""" +598 32 optimizer """adam""" +598 32 training_loop """owa""" +598 32 negative_sampler """basic""" +598 32 evaluator """rankbased""" +598 33 dataset """kinships""" +598 33 model """rotate""" +598 33 loss """nssa""" +598 33 regularizer """no""" +598 33 optimizer """adam""" +598 33 training_loop """owa""" +598 33 negative_sampler """basic""" +598 33 evaluator """rankbased""" +598 34 dataset """kinships""" +598 34 model """rotate""" +598 34 loss """nssa""" +598 34 regularizer """no""" +598 34 optimizer """adam""" +598 34 training_loop """owa""" +598 34 negative_sampler """basic""" +598 34 evaluator """rankbased""" +598 35 dataset """kinships""" +598 35 model """rotate""" +598 35 loss """nssa""" +598 35 regularizer """no""" +598 35 optimizer """adam""" +598 35 training_loop """owa""" +598 35 negative_sampler """basic""" +598 35 evaluator """rankbased""" +598 36 dataset """kinships""" +598 36 model """rotate""" +598 36 loss """nssa""" +598 36 regularizer """no""" +598 36 optimizer """adam""" +598 36 training_loop """owa""" +598 36 negative_sampler """basic""" +598 36 evaluator """rankbased""" +598 37 dataset """kinships""" +598 37 model """rotate""" +598 37 loss """nssa""" +598 37 regularizer """no""" +598 37 optimizer """adam""" +598 37 training_loop """owa""" +598 37 negative_sampler """basic""" +598 37 evaluator """rankbased""" +598 38 dataset """kinships""" +598 38 model """rotate""" +598 38 loss """nssa""" +598 38 regularizer """no""" +598 38 optimizer """adam""" +598 38 training_loop """owa""" +598 38 negative_sampler """basic""" +598 38 evaluator """rankbased""" +598 39 dataset """kinships""" +598 39 model """rotate""" +598 39 loss """nssa""" +598 39 regularizer """no""" +598 39 optimizer """adam""" +598 39 training_loop """owa""" +598 39 negative_sampler """basic""" +598 39 evaluator """rankbased""" +598 40 dataset """kinships""" +598 40 model """rotate""" +598 40 loss """nssa""" +598 40 regularizer """no""" +598 40 optimizer """adam""" +598 40 training_loop """owa""" +598 40 negative_sampler """basic""" +598 40 evaluator """rankbased""" +598 41 dataset """kinships""" +598 41 model """rotate""" +598 41 loss """nssa""" +598 41 regularizer """no""" +598 41 optimizer """adam""" +598 41 training_loop """owa""" +598 41 negative_sampler """basic""" +598 41 evaluator """rankbased""" +598 42 dataset """kinships""" +598 42 model """rotate""" +598 42 loss """nssa""" +598 42 regularizer """no""" +598 42 optimizer """adam""" +598 42 training_loop """owa""" +598 42 negative_sampler """basic""" +598 42 evaluator """rankbased""" +598 43 dataset """kinships""" +598 43 model """rotate""" +598 43 loss """nssa""" +598 43 regularizer """no""" +598 43 optimizer """adam""" +598 43 training_loop """owa""" +598 43 negative_sampler """basic""" +598 43 evaluator """rankbased""" +598 44 dataset """kinships""" +598 44 model """rotate""" +598 44 loss """nssa""" +598 44 regularizer """no""" +598 44 optimizer """adam""" +598 44 training_loop """owa""" +598 44 negative_sampler """basic""" +598 44 evaluator """rankbased""" +598 45 dataset """kinships""" +598 45 model """rotate""" +598 45 loss """nssa""" +598 45 regularizer """no""" +598 45 optimizer """adam""" +598 45 training_loop """owa""" +598 45 negative_sampler """basic""" +598 45 evaluator """rankbased""" +598 46 dataset """kinships""" +598 46 model """rotate""" +598 46 loss """nssa""" +598 46 regularizer """no""" +598 46 optimizer """adam""" +598 46 training_loop """owa""" +598 46 negative_sampler """basic""" +598 46 evaluator """rankbased""" +598 47 dataset """kinships""" +598 47 model """rotate""" +598 47 loss """nssa""" +598 47 regularizer """no""" +598 47 optimizer """adam""" +598 47 training_loop """owa""" +598 47 negative_sampler """basic""" +598 47 evaluator """rankbased""" +598 48 dataset """kinships""" +598 48 model """rotate""" +598 48 loss """nssa""" +598 48 regularizer """no""" +598 48 optimizer """adam""" +598 48 training_loop """owa""" +598 48 negative_sampler """basic""" +598 48 evaluator """rankbased""" +598 49 dataset """kinships""" +598 49 model """rotate""" +598 49 loss """nssa""" +598 49 regularizer """no""" +598 49 optimizer """adam""" +598 49 training_loop """owa""" +598 49 negative_sampler """basic""" +598 49 evaluator """rankbased""" +598 50 dataset """kinships""" +598 50 model """rotate""" +598 50 loss """nssa""" +598 50 regularizer """no""" +598 50 optimizer """adam""" +598 50 training_loop """owa""" +598 50 negative_sampler """basic""" +598 50 evaluator """rankbased""" +598 51 dataset """kinships""" +598 51 model """rotate""" +598 51 loss """nssa""" +598 51 regularizer """no""" +598 51 optimizer """adam""" +598 51 training_loop """owa""" +598 51 negative_sampler """basic""" +598 51 evaluator """rankbased""" +598 52 dataset """kinships""" +598 52 model """rotate""" +598 52 loss """nssa""" +598 52 regularizer """no""" +598 52 optimizer """adam""" +598 52 training_loop """owa""" +598 52 negative_sampler """basic""" +598 52 evaluator """rankbased""" +598 53 dataset """kinships""" +598 53 model """rotate""" +598 53 loss """nssa""" +598 53 regularizer """no""" +598 53 optimizer """adam""" +598 53 training_loop """owa""" +598 53 negative_sampler """basic""" +598 53 evaluator """rankbased""" +598 54 dataset """kinships""" +598 54 model """rotate""" +598 54 loss """nssa""" +598 54 regularizer """no""" +598 54 optimizer """adam""" +598 54 training_loop """owa""" +598 54 negative_sampler """basic""" +598 54 evaluator """rankbased""" +598 55 dataset """kinships""" +598 55 model """rotate""" +598 55 loss """nssa""" +598 55 regularizer """no""" +598 55 optimizer """adam""" +598 55 training_loop """owa""" +598 55 negative_sampler """basic""" +598 55 evaluator """rankbased""" +598 56 dataset """kinships""" +598 56 model """rotate""" +598 56 loss """nssa""" +598 56 regularizer """no""" +598 56 optimizer """adam""" +598 56 training_loop """owa""" +598 56 negative_sampler """basic""" +598 56 evaluator """rankbased""" +598 57 dataset """kinships""" +598 57 model """rotate""" +598 57 loss """nssa""" +598 57 regularizer """no""" +598 57 optimizer """adam""" +598 57 training_loop """owa""" +598 57 negative_sampler """basic""" +598 57 evaluator """rankbased""" +598 58 dataset """kinships""" +598 58 model """rotate""" +598 58 loss """nssa""" +598 58 regularizer """no""" +598 58 optimizer """adam""" +598 58 training_loop """owa""" +598 58 negative_sampler """basic""" +598 58 evaluator """rankbased""" +598 59 dataset """kinships""" +598 59 model """rotate""" +598 59 loss """nssa""" +598 59 regularizer """no""" +598 59 optimizer """adam""" +598 59 training_loop """owa""" +598 59 negative_sampler """basic""" +598 59 evaluator """rankbased""" +598 60 dataset """kinships""" +598 60 model """rotate""" +598 60 loss """nssa""" +598 60 regularizer """no""" +598 60 optimizer """adam""" +598 60 training_loop """owa""" +598 60 negative_sampler """basic""" +598 60 evaluator """rankbased""" +598 61 dataset """kinships""" +598 61 model """rotate""" +598 61 loss """nssa""" +598 61 regularizer """no""" +598 61 optimizer """adam""" +598 61 training_loop """owa""" +598 61 negative_sampler """basic""" +598 61 evaluator """rankbased""" +598 62 dataset """kinships""" +598 62 model """rotate""" +598 62 loss """nssa""" +598 62 regularizer """no""" +598 62 optimizer """adam""" +598 62 training_loop """owa""" +598 62 negative_sampler """basic""" +598 62 evaluator """rankbased""" +598 63 dataset """kinships""" +598 63 model """rotate""" +598 63 loss """nssa""" +598 63 regularizer """no""" +598 63 optimizer """adam""" +598 63 training_loop """owa""" +598 63 negative_sampler """basic""" +598 63 evaluator """rankbased""" +598 64 dataset """kinships""" +598 64 model """rotate""" +598 64 loss """nssa""" +598 64 regularizer """no""" +598 64 optimizer """adam""" +598 64 training_loop """owa""" +598 64 negative_sampler """basic""" +598 64 evaluator """rankbased""" +598 65 dataset """kinships""" +598 65 model """rotate""" +598 65 loss """nssa""" +598 65 regularizer """no""" +598 65 optimizer """adam""" +598 65 training_loop """owa""" +598 65 negative_sampler """basic""" +598 65 evaluator """rankbased""" +598 66 dataset """kinships""" +598 66 model """rotate""" +598 66 loss """nssa""" +598 66 regularizer """no""" +598 66 optimizer """adam""" +598 66 training_loop """owa""" +598 66 negative_sampler """basic""" +598 66 evaluator """rankbased""" +598 67 dataset """kinships""" +598 67 model """rotate""" +598 67 loss """nssa""" +598 67 regularizer """no""" +598 67 optimizer """adam""" +598 67 training_loop """owa""" +598 67 negative_sampler """basic""" +598 67 evaluator """rankbased""" +598 68 dataset """kinships""" +598 68 model """rotate""" +598 68 loss """nssa""" +598 68 regularizer """no""" +598 68 optimizer """adam""" +598 68 training_loop """owa""" +598 68 negative_sampler """basic""" +598 68 evaluator """rankbased""" +598 69 dataset """kinships""" +598 69 model """rotate""" +598 69 loss """nssa""" +598 69 regularizer """no""" +598 69 optimizer """adam""" +598 69 training_loop """owa""" +598 69 negative_sampler """basic""" +598 69 evaluator """rankbased""" +598 70 dataset """kinships""" +598 70 model """rotate""" +598 70 loss """nssa""" +598 70 regularizer """no""" +598 70 optimizer """adam""" +598 70 training_loop """owa""" +598 70 negative_sampler """basic""" +598 70 evaluator """rankbased""" +598 71 dataset """kinships""" +598 71 model """rotate""" +598 71 loss """nssa""" +598 71 regularizer """no""" +598 71 optimizer """adam""" +598 71 training_loop """owa""" +598 71 negative_sampler """basic""" +598 71 evaluator """rankbased""" +598 72 dataset """kinships""" +598 72 model """rotate""" +598 72 loss """nssa""" +598 72 regularizer """no""" +598 72 optimizer """adam""" +598 72 training_loop """owa""" +598 72 negative_sampler """basic""" +598 72 evaluator """rankbased""" +598 73 dataset """kinships""" +598 73 model """rotate""" +598 73 loss """nssa""" +598 73 regularizer """no""" +598 73 optimizer """adam""" +598 73 training_loop """owa""" +598 73 negative_sampler """basic""" +598 73 evaluator """rankbased""" +598 74 dataset """kinships""" +598 74 model """rotate""" +598 74 loss """nssa""" +598 74 regularizer """no""" +598 74 optimizer """adam""" +598 74 training_loop """owa""" +598 74 negative_sampler """basic""" +598 74 evaluator """rankbased""" +598 75 dataset """kinships""" +598 75 model """rotate""" +598 75 loss """nssa""" +598 75 regularizer """no""" +598 75 optimizer """adam""" +598 75 training_loop """owa""" +598 75 negative_sampler """basic""" +598 75 evaluator """rankbased""" +598 76 dataset """kinships""" +598 76 model """rotate""" +598 76 loss """nssa""" +598 76 regularizer """no""" +598 76 optimizer """adam""" +598 76 training_loop """owa""" +598 76 negative_sampler """basic""" +598 76 evaluator """rankbased""" +598 77 dataset """kinships""" +598 77 model """rotate""" +598 77 loss """nssa""" +598 77 regularizer """no""" +598 77 optimizer """adam""" +598 77 training_loop """owa""" +598 77 negative_sampler """basic""" +598 77 evaluator """rankbased""" +598 78 dataset """kinships""" +598 78 model """rotate""" +598 78 loss """nssa""" +598 78 regularizer """no""" +598 78 optimizer """adam""" +598 78 training_loop """owa""" +598 78 negative_sampler """basic""" +598 78 evaluator """rankbased""" +598 79 dataset """kinships""" +598 79 model """rotate""" +598 79 loss """nssa""" +598 79 regularizer """no""" +598 79 optimizer """adam""" +598 79 training_loop """owa""" +598 79 negative_sampler """basic""" +598 79 evaluator """rankbased""" +598 80 dataset """kinships""" +598 80 model """rotate""" +598 80 loss """nssa""" +598 80 regularizer """no""" +598 80 optimizer """adam""" +598 80 training_loop """owa""" +598 80 negative_sampler """basic""" +598 80 evaluator """rankbased""" +598 81 dataset """kinships""" +598 81 model """rotate""" +598 81 loss """nssa""" +598 81 regularizer """no""" +598 81 optimizer """adam""" +598 81 training_loop """owa""" +598 81 negative_sampler """basic""" +598 81 evaluator """rankbased""" +598 82 dataset """kinships""" +598 82 model """rotate""" +598 82 loss """nssa""" +598 82 regularizer """no""" +598 82 optimizer """adam""" +598 82 training_loop """owa""" +598 82 negative_sampler """basic""" +598 82 evaluator """rankbased""" +598 83 dataset """kinships""" +598 83 model """rotate""" +598 83 loss """nssa""" +598 83 regularizer """no""" +598 83 optimizer """adam""" +598 83 training_loop """owa""" +598 83 negative_sampler """basic""" +598 83 evaluator """rankbased""" +598 84 dataset """kinships""" +598 84 model """rotate""" +598 84 loss """nssa""" +598 84 regularizer """no""" +598 84 optimizer """adam""" +598 84 training_loop """owa""" +598 84 negative_sampler """basic""" +598 84 evaluator """rankbased""" +598 85 dataset """kinships""" +598 85 model """rotate""" +598 85 loss """nssa""" +598 85 regularizer """no""" +598 85 optimizer """adam""" +598 85 training_loop """owa""" +598 85 negative_sampler """basic""" +598 85 evaluator """rankbased""" +598 86 dataset """kinships""" +598 86 model """rotate""" +598 86 loss """nssa""" +598 86 regularizer """no""" +598 86 optimizer """adam""" +598 86 training_loop """owa""" +598 86 negative_sampler """basic""" +598 86 evaluator """rankbased""" +598 87 dataset """kinships""" +598 87 model """rotate""" +598 87 loss """nssa""" +598 87 regularizer """no""" +598 87 optimizer """adam""" +598 87 training_loop """owa""" +598 87 negative_sampler """basic""" +598 87 evaluator """rankbased""" +598 88 dataset """kinships""" +598 88 model """rotate""" +598 88 loss """nssa""" +598 88 regularizer """no""" +598 88 optimizer """adam""" +598 88 training_loop """owa""" +598 88 negative_sampler """basic""" +598 88 evaluator """rankbased""" +598 89 dataset """kinships""" +598 89 model """rotate""" +598 89 loss """nssa""" +598 89 regularizer """no""" +598 89 optimizer """adam""" +598 89 training_loop """owa""" +598 89 negative_sampler """basic""" +598 89 evaluator """rankbased""" +598 90 dataset """kinships""" +598 90 model """rotate""" +598 90 loss """nssa""" +598 90 regularizer """no""" +598 90 optimizer """adam""" +598 90 training_loop """owa""" +598 90 negative_sampler """basic""" +598 90 evaluator """rankbased""" +598 91 dataset """kinships""" +598 91 model """rotate""" +598 91 loss """nssa""" +598 91 regularizer """no""" +598 91 optimizer """adam""" +598 91 training_loop """owa""" +598 91 negative_sampler """basic""" +598 91 evaluator """rankbased""" +598 92 dataset """kinships""" +598 92 model """rotate""" +598 92 loss """nssa""" +598 92 regularizer """no""" +598 92 optimizer """adam""" +598 92 training_loop """owa""" +598 92 negative_sampler """basic""" +598 92 evaluator """rankbased""" +598 93 dataset """kinships""" +598 93 model """rotate""" +598 93 loss """nssa""" +598 93 regularizer """no""" +598 93 optimizer """adam""" +598 93 training_loop """owa""" +598 93 negative_sampler """basic""" +598 93 evaluator """rankbased""" +598 94 dataset """kinships""" +598 94 model """rotate""" +598 94 loss """nssa""" +598 94 regularizer """no""" +598 94 optimizer """adam""" +598 94 training_loop """owa""" +598 94 negative_sampler """basic""" +598 94 evaluator """rankbased""" +598 95 dataset """kinships""" +598 95 model """rotate""" +598 95 loss """nssa""" +598 95 regularizer """no""" +598 95 optimizer """adam""" +598 95 training_loop """owa""" +598 95 negative_sampler """basic""" +598 95 evaluator """rankbased""" +598 96 dataset """kinships""" +598 96 model """rotate""" +598 96 loss """nssa""" +598 96 regularizer """no""" +598 96 optimizer """adam""" +598 96 training_loop """owa""" +598 96 negative_sampler """basic""" +598 96 evaluator """rankbased""" +598 97 dataset """kinships""" +598 97 model """rotate""" +598 97 loss """nssa""" +598 97 regularizer """no""" +598 97 optimizer """adam""" +598 97 training_loop """owa""" +598 97 negative_sampler """basic""" +598 97 evaluator """rankbased""" +598 98 dataset """kinships""" +598 98 model """rotate""" +598 98 loss """nssa""" +598 98 regularizer """no""" +598 98 optimizer """adam""" +598 98 training_loop """owa""" +598 98 negative_sampler """basic""" +598 98 evaluator """rankbased""" +598 99 dataset """kinships""" +598 99 model """rotate""" +598 99 loss """nssa""" +598 99 regularizer """no""" +598 99 optimizer """adam""" +598 99 training_loop """owa""" +598 99 negative_sampler """basic""" +598 99 evaluator """rankbased""" +598 100 dataset """kinships""" +598 100 model """rotate""" +598 100 loss """nssa""" +598 100 regularizer """no""" +598 100 optimizer """adam""" +598 100 training_loop """owa""" +598 100 negative_sampler """basic""" +598 100 evaluator """rankbased""" +599 1 model.embedding_dim 1.0 +599 1 optimizer.lr 0.0017593228786756915 +599 1 negative_sampler.num_negs_per_pos 66.0 +599 1 training.batch_size 1.0 +599 2 model.embedding_dim 0.0 +599 2 optimizer.lr 0.010951862564026079 +599 2 negative_sampler.num_negs_per_pos 68.0 +599 2 training.batch_size 0.0 +599 3 model.embedding_dim 0.0 +599 3 optimizer.lr 0.0025037586801894577 +599 3 negative_sampler.num_negs_per_pos 15.0 +599 3 training.batch_size 1.0 +599 4 model.embedding_dim 1.0 +599 4 optimizer.lr 0.004346044635707743 +599 4 negative_sampler.num_negs_per_pos 68.0 +599 4 training.batch_size 0.0 +599 5 model.embedding_dim 0.0 +599 5 optimizer.lr 0.011977104901880522 +599 5 negative_sampler.num_negs_per_pos 3.0 +599 5 training.batch_size 2.0 +599 6 model.embedding_dim 0.0 +599 6 optimizer.lr 0.03674920148024535 +599 6 negative_sampler.num_negs_per_pos 57.0 +599 6 training.batch_size 2.0 +599 7 model.embedding_dim 2.0 +599 7 optimizer.lr 0.017492094535569623 +599 7 negative_sampler.num_negs_per_pos 88.0 +599 7 training.batch_size 0.0 +599 8 model.embedding_dim 1.0 +599 8 optimizer.lr 0.0011287748683124375 +599 8 negative_sampler.num_negs_per_pos 91.0 +599 8 training.batch_size 1.0 +599 9 model.embedding_dim 1.0 +599 9 optimizer.lr 0.014782695673531127 +599 9 negative_sampler.num_negs_per_pos 1.0 +599 9 training.batch_size 1.0 +599 10 model.embedding_dim 0.0 +599 10 optimizer.lr 0.007867909705362169 +599 10 negative_sampler.num_negs_per_pos 57.0 +599 10 training.batch_size 2.0 +599 11 model.embedding_dim 1.0 +599 11 optimizer.lr 0.035586177876506093 +599 11 negative_sampler.num_negs_per_pos 77.0 +599 11 training.batch_size 1.0 +599 12 model.embedding_dim 2.0 +599 12 optimizer.lr 0.03217748984155315 +599 12 negative_sampler.num_negs_per_pos 80.0 +599 12 training.batch_size 0.0 +599 13 model.embedding_dim 2.0 +599 13 optimizer.lr 0.08703287258951029 +599 13 negative_sampler.num_negs_per_pos 31.0 +599 13 training.batch_size 1.0 +599 14 model.embedding_dim 1.0 +599 14 optimizer.lr 0.09465414649784068 +599 14 negative_sampler.num_negs_per_pos 7.0 +599 14 training.batch_size 1.0 +599 15 model.embedding_dim 0.0 +599 15 optimizer.lr 0.002126304626338622 +599 15 negative_sampler.num_negs_per_pos 38.0 +599 15 training.batch_size 1.0 +599 16 model.embedding_dim 0.0 +599 16 optimizer.lr 0.007273820524354545 +599 16 negative_sampler.num_negs_per_pos 55.0 +599 16 training.batch_size 0.0 +599 17 model.embedding_dim 0.0 +599 17 optimizer.lr 0.007964430856977579 +599 17 negative_sampler.num_negs_per_pos 21.0 +599 17 training.batch_size 2.0 +599 18 model.embedding_dim 0.0 +599 18 optimizer.lr 0.00118273941473739 +599 18 negative_sampler.num_negs_per_pos 98.0 +599 18 training.batch_size 2.0 +599 19 model.embedding_dim 0.0 +599 19 optimizer.lr 0.006637730877426436 +599 19 negative_sampler.num_negs_per_pos 23.0 +599 19 training.batch_size 2.0 +599 20 model.embedding_dim 2.0 +599 20 optimizer.lr 0.034006894723854354 +599 20 negative_sampler.num_negs_per_pos 82.0 +599 20 training.batch_size 1.0 +599 21 model.embedding_dim 1.0 +599 21 optimizer.lr 0.08239090869516241 +599 21 negative_sampler.num_negs_per_pos 3.0 +599 21 training.batch_size 1.0 +599 22 model.embedding_dim 1.0 +599 22 optimizer.lr 0.002906755247240763 +599 22 negative_sampler.num_negs_per_pos 14.0 +599 22 training.batch_size 0.0 +599 23 model.embedding_dim 2.0 +599 23 optimizer.lr 0.028697341678075198 +599 23 negative_sampler.num_negs_per_pos 16.0 +599 23 training.batch_size 2.0 +599 24 model.embedding_dim 0.0 +599 24 optimizer.lr 0.08195410307025597 +599 24 negative_sampler.num_negs_per_pos 48.0 +599 24 training.batch_size 0.0 +599 25 model.embedding_dim 1.0 +599 25 optimizer.lr 0.062093637973699774 +599 25 negative_sampler.num_negs_per_pos 8.0 +599 25 training.batch_size 2.0 +599 26 model.embedding_dim 0.0 +599 26 optimizer.lr 0.0016727983667364534 +599 26 negative_sampler.num_negs_per_pos 87.0 +599 26 training.batch_size 2.0 +599 27 model.embedding_dim 0.0 +599 27 optimizer.lr 0.005753664138029765 +599 27 negative_sampler.num_negs_per_pos 29.0 +599 27 training.batch_size 2.0 +599 28 model.embedding_dim 1.0 +599 28 optimizer.lr 0.0022264011015179193 +599 28 negative_sampler.num_negs_per_pos 72.0 +599 28 training.batch_size 2.0 +599 29 model.embedding_dim 1.0 +599 29 optimizer.lr 0.011849523061630807 +599 29 negative_sampler.num_negs_per_pos 73.0 +599 29 training.batch_size 2.0 +599 30 model.embedding_dim 0.0 +599 30 optimizer.lr 0.0010020771871846266 +599 30 negative_sampler.num_negs_per_pos 28.0 +599 30 training.batch_size 0.0 +599 31 model.embedding_dim 1.0 +599 31 optimizer.lr 0.009181168563069985 +599 31 negative_sampler.num_negs_per_pos 23.0 +599 31 training.batch_size 2.0 +599 32 model.embedding_dim 1.0 +599 32 optimizer.lr 0.010009596263586435 +599 32 negative_sampler.num_negs_per_pos 31.0 +599 32 training.batch_size 1.0 +599 33 model.embedding_dim 1.0 +599 33 optimizer.lr 0.0026660041867536685 +599 33 negative_sampler.num_negs_per_pos 13.0 +599 33 training.batch_size 2.0 +599 34 model.embedding_dim 1.0 +599 34 optimizer.lr 0.08449520662653677 +599 34 negative_sampler.num_negs_per_pos 72.0 +599 34 training.batch_size 0.0 +599 35 model.embedding_dim 0.0 +599 35 optimizer.lr 0.0013200171703345127 +599 35 negative_sampler.num_negs_per_pos 83.0 +599 35 training.batch_size 1.0 +599 36 model.embedding_dim 1.0 +599 36 optimizer.lr 0.05499991824813294 +599 36 negative_sampler.num_negs_per_pos 21.0 +599 36 training.batch_size 2.0 +599 37 model.embedding_dim 2.0 +599 37 optimizer.lr 0.07008561352833566 +599 37 negative_sampler.num_negs_per_pos 63.0 +599 37 training.batch_size 1.0 +599 38 model.embedding_dim 2.0 +599 38 optimizer.lr 0.0028662594135925173 +599 38 negative_sampler.num_negs_per_pos 31.0 +599 38 training.batch_size 0.0 +599 39 model.embedding_dim 1.0 +599 39 optimizer.lr 0.002300444950488771 +599 39 negative_sampler.num_negs_per_pos 18.0 +599 39 training.batch_size 1.0 +599 40 model.embedding_dim 0.0 +599 40 optimizer.lr 0.02717160871088947 +599 40 negative_sampler.num_negs_per_pos 13.0 +599 40 training.batch_size 0.0 +599 41 model.embedding_dim 1.0 +599 41 optimizer.lr 0.017954104754901883 +599 41 negative_sampler.num_negs_per_pos 51.0 +599 41 training.batch_size 0.0 +599 42 model.embedding_dim 1.0 +599 42 optimizer.lr 0.004051831193475994 +599 42 negative_sampler.num_negs_per_pos 20.0 +599 42 training.batch_size 0.0 +599 43 model.embedding_dim 1.0 +599 43 optimizer.lr 0.0011999747358460116 +599 43 negative_sampler.num_negs_per_pos 44.0 +599 43 training.batch_size 2.0 +599 44 model.embedding_dim 2.0 +599 44 optimizer.lr 0.013686862032258202 +599 44 negative_sampler.num_negs_per_pos 30.0 +599 44 training.batch_size 0.0 +599 45 model.embedding_dim 2.0 +599 45 optimizer.lr 0.051023578423170436 +599 45 negative_sampler.num_negs_per_pos 71.0 +599 45 training.batch_size 0.0 +599 46 model.embedding_dim 1.0 +599 46 optimizer.lr 0.00396758951440925 +599 46 negative_sampler.num_negs_per_pos 15.0 +599 46 training.batch_size 2.0 +599 47 model.embedding_dim 0.0 +599 47 optimizer.lr 0.0012612153688619052 +599 47 negative_sampler.num_negs_per_pos 84.0 +599 47 training.batch_size 2.0 +599 48 model.embedding_dim 0.0 +599 48 optimizer.lr 0.0014362355256776966 +599 48 negative_sampler.num_negs_per_pos 19.0 +599 48 training.batch_size 1.0 +599 49 model.embedding_dim 2.0 +599 49 optimizer.lr 0.005146130601370675 +599 49 negative_sampler.num_negs_per_pos 22.0 +599 49 training.batch_size 0.0 +599 50 model.embedding_dim 1.0 +599 50 optimizer.lr 0.02403683418533282 +599 50 negative_sampler.num_negs_per_pos 44.0 +599 50 training.batch_size 2.0 +599 51 model.embedding_dim 2.0 +599 51 optimizer.lr 0.01671899080663395 +599 51 negative_sampler.num_negs_per_pos 97.0 +599 51 training.batch_size 2.0 +599 52 model.embedding_dim 0.0 +599 52 optimizer.lr 0.0639837998920565 +599 52 negative_sampler.num_negs_per_pos 19.0 +599 52 training.batch_size 1.0 +599 53 model.embedding_dim 0.0 +599 53 optimizer.lr 0.02112043656083099 +599 53 negative_sampler.num_negs_per_pos 10.0 +599 53 training.batch_size 2.0 +599 54 model.embedding_dim 0.0 +599 54 optimizer.lr 0.07546866500225562 +599 54 negative_sampler.num_negs_per_pos 83.0 +599 54 training.batch_size 2.0 +599 55 model.embedding_dim 2.0 +599 55 optimizer.lr 0.004804291391859382 +599 55 negative_sampler.num_negs_per_pos 9.0 +599 55 training.batch_size 1.0 +599 56 model.embedding_dim 0.0 +599 56 optimizer.lr 0.0027939208964482738 +599 56 negative_sampler.num_negs_per_pos 68.0 +599 56 training.batch_size 1.0 +599 57 model.embedding_dim 0.0 +599 57 optimizer.lr 0.0023247893543128792 +599 57 negative_sampler.num_negs_per_pos 68.0 +599 57 training.batch_size 0.0 +599 58 model.embedding_dim 1.0 +599 58 optimizer.lr 0.020110944226965955 +599 58 negative_sampler.num_negs_per_pos 12.0 +599 58 training.batch_size 0.0 +599 59 model.embedding_dim 1.0 +599 59 optimizer.lr 0.004034031839091761 +599 59 negative_sampler.num_negs_per_pos 71.0 +599 59 training.batch_size 2.0 +599 60 model.embedding_dim 0.0 +599 60 optimizer.lr 0.02814607932554424 +599 60 negative_sampler.num_negs_per_pos 63.0 +599 60 training.batch_size 0.0 +599 61 model.embedding_dim 0.0 +599 61 optimizer.lr 0.007025493398380923 +599 61 negative_sampler.num_negs_per_pos 34.0 +599 61 training.batch_size 0.0 +599 62 model.embedding_dim 1.0 +599 62 optimizer.lr 0.001784566977045629 +599 62 negative_sampler.num_negs_per_pos 46.0 +599 62 training.batch_size 2.0 +599 63 model.embedding_dim 0.0 +599 63 optimizer.lr 0.013390199613886355 +599 63 negative_sampler.num_negs_per_pos 31.0 +599 63 training.batch_size 0.0 +599 64 model.embedding_dim 0.0 +599 64 optimizer.lr 0.07997305148828207 +599 64 negative_sampler.num_negs_per_pos 98.0 +599 64 training.batch_size 2.0 +599 65 model.embedding_dim 2.0 +599 65 optimizer.lr 0.00219989976610006 +599 65 negative_sampler.num_negs_per_pos 39.0 +599 65 training.batch_size 0.0 +599 66 model.embedding_dim 1.0 +599 66 optimizer.lr 0.069274933140351 +599 66 negative_sampler.num_negs_per_pos 21.0 +599 66 training.batch_size 2.0 +599 67 model.embedding_dim 2.0 +599 67 optimizer.lr 0.03338692136871155 +599 67 negative_sampler.num_negs_per_pos 24.0 +599 67 training.batch_size 1.0 +599 68 model.embedding_dim 1.0 +599 68 optimizer.lr 0.004902948347500537 +599 68 negative_sampler.num_negs_per_pos 25.0 +599 68 training.batch_size 1.0 +599 69 model.embedding_dim 2.0 +599 69 optimizer.lr 0.009777890495897406 +599 69 negative_sampler.num_negs_per_pos 56.0 +599 69 training.batch_size 2.0 +599 70 model.embedding_dim 0.0 +599 70 optimizer.lr 0.004850412100643826 +599 70 negative_sampler.num_negs_per_pos 21.0 +599 70 training.batch_size 2.0 +599 71 model.embedding_dim 1.0 +599 71 optimizer.lr 0.003962029985877375 +599 71 negative_sampler.num_negs_per_pos 81.0 +599 71 training.batch_size 2.0 +599 72 model.embedding_dim 0.0 +599 72 optimizer.lr 0.016322320876410265 +599 72 negative_sampler.num_negs_per_pos 49.0 +599 72 training.batch_size 2.0 +599 73 model.embedding_dim 0.0 +599 73 optimizer.lr 0.009113753565942942 +599 73 negative_sampler.num_negs_per_pos 22.0 +599 73 training.batch_size 0.0 +599 74 model.embedding_dim 0.0 +599 74 optimizer.lr 0.015858670337693614 +599 74 negative_sampler.num_negs_per_pos 68.0 +599 74 training.batch_size 2.0 +599 75 model.embedding_dim 0.0 +599 75 optimizer.lr 0.02176099647473902 +599 75 negative_sampler.num_negs_per_pos 62.0 +599 75 training.batch_size 2.0 +599 76 model.embedding_dim 2.0 +599 76 optimizer.lr 0.003976649133175131 +599 76 negative_sampler.num_negs_per_pos 49.0 +599 76 training.batch_size 2.0 +599 77 model.embedding_dim 1.0 +599 77 optimizer.lr 0.008373513898052893 +599 77 negative_sampler.num_negs_per_pos 90.0 +599 77 training.batch_size 0.0 +599 78 model.embedding_dim 0.0 +599 78 optimizer.lr 0.0014878582307003329 +599 78 negative_sampler.num_negs_per_pos 54.0 +599 78 training.batch_size 0.0 +599 79 model.embedding_dim 0.0 +599 79 optimizer.lr 0.08828028109108177 +599 79 negative_sampler.num_negs_per_pos 71.0 +599 79 training.batch_size 2.0 +599 80 model.embedding_dim 2.0 +599 80 optimizer.lr 0.09174841805709134 +599 80 negative_sampler.num_negs_per_pos 62.0 +599 80 training.batch_size 2.0 +599 81 model.embedding_dim 1.0 +599 81 optimizer.lr 0.007276202028025563 +599 81 negative_sampler.num_negs_per_pos 13.0 +599 81 training.batch_size 2.0 +599 82 model.embedding_dim 2.0 +599 82 optimizer.lr 0.04278761754763914 +599 82 negative_sampler.num_negs_per_pos 62.0 +599 82 training.batch_size 0.0 +599 83 model.embedding_dim 0.0 +599 83 optimizer.lr 0.007203598255602224 +599 83 negative_sampler.num_negs_per_pos 12.0 +599 83 training.batch_size 2.0 +599 84 model.embedding_dim 2.0 +599 84 optimizer.lr 0.054920479739323534 +599 84 negative_sampler.num_negs_per_pos 0.0 +599 84 training.batch_size 0.0 +599 85 model.embedding_dim 1.0 +599 85 optimizer.lr 0.004350441389966286 +599 85 negative_sampler.num_negs_per_pos 42.0 +599 85 training.batch_size 1.0 +599 86 model.embedding_dim 0.0 +599 86 optimizer.lr 0.01428079960871787 +599 86 negative_sampler.num_negs_per_pos 35.0 +599 86 training.batch_size 0.0 +599 87 model.embedding_dim 1.0 +599 87 optimizer.lr 0.00103195897129582 +599 87 negative_sampler.num_negs_per_pos 44.0 +599 87 training.batch_size 0.0 +599 88 model.embedding_dim 0.0 +599 88 optimizer.lr 0.06181609761647583 +599 88 negative_sampler.num_negs_per_pos 78.0 +599 88 training.batch_size 2.0 +599 89 model.embedding_dim 0.0 +599 89 optimizer.lr 0.006921432782137589 +599 89 negative_sampler.num_negs_per_pos 73.0 +599 89 training.batch_size 1.0 +599 90 model.embedding_dim 2.0 +599 90 optimizer.lr 0.013034730458672386 +599 90 negative_sampler.num_negs_per_pos 5.0 +599 90 training.batch_size 0.0 +599 91 model.embedding_dim 2.0 +599 91 optimizer.lr 0.0011049624440351692 +599 91 negative_sampler.num_negs_per_pos 27.0 +599 91 training.batch_size 1.0 +599 92 model.embedding_dim 2.0 +599 92 optimizer.lr 0.004152182785241637 +599 92 negative_sampler.num_negs_per_pos 2.0 +599 92 training.batch_size 2.0 +599 93 model.embedding_dim 1.0 +599 93 optimizer.lr 0.004018469496773896 +599 93 negative_sampler.num_negs_per_pos 2.0 +599 93 training.batch_size 0.0 +599 94 model.embedding_dim 0.0 +599 94 optimizer.lr 0.009052624825543384 +599 94 negative_sampler.num_negs_per_pos 69.0 +599 94 training.batch_size 0.0 +599 95 model.embedding_dim 2.0 +599 95 optimizer.lr 0.014964678346899178 +599 95 negative_sampler.num_negs_per_pos 53.0 +599 95 training.batch_size 1.0 +599 96 model.embedding_dim 1.0 +599 96 optimizer.lr 0.0019060937040075421 +599 96 negative_sampler.num_negs_per_pos 43.0 +599 96 training.batch_size 2.0 +599 97 model.embedding_dim 1.0 +599 97 optimizer.lr 0.014945973817887117 +599 97 negative_sampler.num_negs_per_pos 7.0 +599 97 training.batch_size 0.0 +599 98 model.embedding_dim 1.0 +599 98 optimizer.lr 0.05276420766586669 +599 98 negative_sampler.num_negs_per_pos 1.0 +599 98 training.batch_size 1.0 +599 99 model.embedding_dim 2.0 +599 99 optimizer.lr 0.0019813393216864955 +599 99 negative_sampler.num_negs_per_pos 91.0 +599 99 training.batch_size 1.0 +599 100 model.embedding_dim 2.0 +599 100 optimizer.lr 0.02600394107983868 +599 100 negative_sampler.num_negs_per_pos 23.0 +599 100 training.batch_size 2.0 +599 1 dataset """kinships""" +599 1 model """rotate""" +599 1 loss """bceaftersigmoid""" +599 1 regularizer """no""" +599 1 optimizer """adam""" +599 1 training_loop """owa""" +599 1 negative_sampler """basic""" +599 1 evaluator """rankbased""" +599 2 dataset """kinships""" +599 2 model """rotate""" +599 2 loss """bceaftersigmoid""" +599 2 regularizer """no""" +599 2 optimizer """adam""" +599 2 training_loop """owa""" +599 2 negative_sampler """basic""" +599 2 evaluator """rankbased""" +599 3 dataset """kinships""" +599 3 model """rotate""" +599 3 loss """bceaftersigmoid""" +599 3 regularizer """no""" +599 3 optimizer """adam""" +599 3 training_loop """owa""" +599 3 negative_sampler """basic""" +599 3 evaluator """rankbased""" +599 4 dataset """kinships""" +599 4 model """rotate""" +599 4 loss """bceaftersigmoid""" +599 4 regularizer """no""" +599 4 optimizer """adam""" +599 4 training_loop """owa""" +599 4 negative_sampler """basic""" +599 4 evaluator """rankbased""" +599 5 dataset """kinships""" +599 5 model """rotate""" +599 5 loss """bceaftersigmoid""" +599 5 regularizer """no""" +599 5 optimizer """adam""" +599 5 training_loop """owa""" +599 5 negative_sampler """basic""" +599 5 evaluator """rankbased""" +599 6 dataset """kinships""" +599 6 model """rotate""" +599 6 loss """bceaftersigmoid""" +599 6 regularizer """no""" +599 6 optimizer """adam""" +599 6 training_loop """owa""" +599 6 negative_sampler """basic""" +599 6 evaluator """rankbased""" +599 7 dataset """kinships""" +599 7 model """rotate""" +599 7 loss """bceaftersigmoid""" +599 7 regularizer """no""" +599 7 optimizer """adam""" +599 7 training_loop """owa""" +599 7 negative_sampler """basic""" +599 7 evaluator """rankbased""" +599 8 dataset """kinships""" +599 8 model """rotate""" +599 8 loss """bceaftersigmoid""" +599 8 regularizer """no""" +599 8 optimizer """adam""" +599 8 training_loop """owa""" +599 8 negative_sampler """basic""" +599 8 evaluator """rankbased""" +599 9 dataset """kinships""" +599 9 model """rotate""" +599 9 loss """bceaftersigmoid""" +599 9 regularizer """no""" +599 9 optimizer """adam""" +599 9 training_loop """owa""" +599 9 negative_sampler """basic""" +599 9 evaluator """rankbased""" +599 10 dataset """kinships""" +599 10 model """rotate""" +599 10 loss """bceaftersigmoid""" +599 10 regularizer """no""" +599 10 optimizer """adam""" +599 10 training_loop """owa""" +599 10 negative_sampler """basic""" +599 10 evaluator """rankbased""" +599 11 dataset """kinships""" +599 11 model """rotate""" +599 11 loss """bceaftersigmoid""" +599 11 regularizer """no""" +599 11 optimizer """adam""" +599 11 training_loop """owa""" +599 11 negative_sampler """basic""" +599 11 evaluator """rankbased""" +599 12 dataset """kinships""" +599 12 model """rotate""" +599 12 loss """bceaftersigmoid""" +599 12 regularizer """no""" +599 12 optimizer """adam""" +599 12 training_loop """owa""" +599 12 negative_sampler """basic""" +599 12 evaluator """rankbased""" +599 13 dataset """kinships""" +599 13 model """rotate""" +599 13 loss """bceaftersigmoid""" +599 13 regularizer """no""" +599 13 optimizer """adam""" +599 13 training_loop """owa""" +599 13 negative_sampler """basic""" +599 13 evaluator """rankbased""" +599 14 dataset """kinships""" +599 14 model """rotate""" +599 14 loss """bceaftersigmoid""" +599 14 regularizer """no""" +599 14 optimizer """adam""" +599 14 training_loop """owa""" +599 14 negative_sampler """basic""" +599 14 evaluator """rankbased""" +599 15 dataset """kinships""" +599 15 model """rotate""" +599 15 loss """bceaftersigmoid""" +599 15 regularizer """no""" +599 15 optimizer """adam""" +599 15 training_loop """owa""" +599 15 negative_sampler """basic""" +599 15 evaluator """rankbased""" +599 16 dataset """kinships""" +599 16 model """rotate""" +599 16 loss """bceaftersigmoid""" +599 16 regularizer """no""" +599 16 optimizer """adam""" +599 16 training_loop """owa""" +599 16 negative_sampler """basic""" +599 16 evaluator """rankbased""" +599 17 dataset """kinships""" +599 17 model """rotate""" +599 17 loss """bceaftersigmoid""" +599 17 regularizer """no""" +599 17 optimizer """adam""" +599 17 training_loop """owa""" +599 17 negative_sampler """basic""" +599 17 evaluator """rankbased""" +599 18 dataset """kinships""" +599 18 model """rotate""" +599 18 loss """bceaftersigmoid""" +599 18 regularizer """no""" +599 18 optimizer """adam""" +599 18 training_loop """owa""" +599 18 negative_sampler """basic""" +599 18 evaluator """rankbased""" +599 19 dataset """kinships""" +599 19 model """rotate""" +599 19 loss """bceaftersigmoid""" +599 19 regularizer """no""" +599 19 optimizer """adam""" +599 19 training_loop """owa""" +599 19 negative_sampler """basic""" +599 19 evaluator """rankbased""" +599 20 dataset """kinships""" +599 20 model """rotate""" +599 20 loss """bceaftersigmoid""" +599 20 regularizer """no""" +599 20 optimizer """adam""" +599 20 training_loop """owa""" +599 20 negative_sampler """basic""" +599 20 evaluator """rankbased""" +599 21 dataset """kinships""" +599 21 model """rotate""" +599 21 loss """bceaftersigmoid""" +599 21 regularizer """no""" +599 21 optimizer """adam""" +599 21 training_loop """owa""" +599 21 negative_sampler """basic""" +599 21 evaluator """rankbased""" +599 22 dataset """kinships""" +599 22 model """rotate""" +599 22 loss """bceaftersigmoid""" +599 22 regularizer """no""" +599 22 optimizer """adam""" +599 22 training_loop """owa""" +599 22 negative_sampler """basic""" +599 22 evaluator """rankbased""" +599 23 dataset """kinships""" +599 23 model """rotate""" +599 23 loss """bceaftersigmoid""" +599 23 regularizer """no""" +599 23 optimizer """adam""" +599 23 training_loop """owa""" +599 23 negative_sampler """basic""" +599 23 evaluator """rankbased""" +599 24 dataset """kinships""" +599 24 model """rotate""" +599 24 loss """bceaftersigmoid""" +599 24 regularizer """no""" +599 24 optimizer """adam""" +599 24 training_loop """owa""" +599 24 negative_sampler """basic""" +599 24 evaluator """rankbased""" +599 25 dataset """kinships""" +599 25 model """rotate""" +599 25 loss """bceaftersigmoid""" +599 25 regularizer """no""" +599 25 optimizer """adam""" +599 25 training_loop """owa""" +599 25 negative_sampler """basic""" +599 25 evaluator """rankbased""" +599 26 dataset """kinships""" +599 26 model """rotate""" +599 26 loss """bceaftersigmoid""" +599 26 regularizer """no""" +599 26 optimizer """adam""" +599 26 training_loop """owa""" +599 26 negative_sampler """basic""" +599 26 evaluator """rankbased""" +599 27 dataset """kinships""" +599 27 model """rotate""" +599 27 loss """bceaftersigmoid""" +599 27 regularizer """no""" +599 27 optimizer """adam""" +599 27 training_loop """owa""" +599 27 negative_sampler """basic""" +599 27 evaluator """rankbased""" +599 28 dataset """kinships""" +599 28 model """rotate""" +599 28 loss """bceaftersigmoid""" +599 28 regularizer """no""" +599 28 optimizer """adam""" +599 28 training_loop """owa""" +599 28 negative_sampler """basic""" +599 28 evaluator """rankbased""" +599 29 dataset """kinships""" +599 29 model """rotate""" +599 29 loss """bceaftersigmoid""" +599 29 regularizer """no""" +599 29 optimizer """adam""" +599 29 training_loop """owa""" +599 29 negative_sampler """basic""" +599 29 evaluator """rankbased""" +599 30 dataset """kinships""" +599 30 model """rotate""" +599 30 loss """bceaftersigmoid""" +599 30 regularizer """no""" +599 30 optimizer """adam""" +599 30 training_loop """owa""" +599 30 negative_sampler """basic""" +599 30 evaluator """rankbased""" +599 31 dataset """kinships""" +599 31 model """rotate""" +599 31 loss """bceaftersigmoid""" +599 31 regularizer """no""" +599 31 optimizer """adam""" +599 31 training_loop """owa""" +599 31 negative_sampler """basic""" +599 31 evaluator """rankbased""" +599 32 dataset """kinships""" +599 32 model """rotate""" +599 32 loss """bceaftersigmoid""" +599 32 regularizer """no""" +599 32 optimizer """adam""" +599 32 training_loop """owa""" +599 32 negative_sampler """basic""" +599 32 evaluator """rankbased""" +599 33 dataset """kinships""" +599 33 model """rotate""" +599 33 loss """bceaftersigmoid""" +599 33 regularizer """no""" +599 33 optimizer """adam""" +599 33 training_loop """owa""" +599 33 negative_sampler """basic""" +599 33 evaluator """rankbased""" +599 34 dataset """kinships""" +599 34 model """rotate""" +599 34 loss """bceaftersigmoid""" +599 34 regularizer """no""" +599 34 optimizer """adam""" +599 34 training_loop """owa""" +599 34 negative_sampler """basic""" +599 34 evaluator """rankbased""" +599 35 dataset """kinships""" +599 35 model """rotate""" +599 35 loss """bceaftersigmoid""" +599 35 regularizer """no""" +599 35 optimizer """adam""" +599 35 training_loop """owa""" +599 35 negative_sampler """basic""" +599 35 evaluator """rankbased""" +599 36 dataset """kinships""" +599 36 model """rotate""" +599 36 loss """bceaftersigmoid""" +599 36 regularizer """no""" +599 36 optimizer """adam""" +599 36 training_loop """owa""" +599 36 negative_sampler """basic""" +599 36 evaluator """rankbased""" +599 37 dataset """kinships""" +599 37 model """rotate""" +599 37 loss """bceaftersigmoid""" +599 37 regularizer """no""" +599 37 optimizer """adam""" +599 37 training_loop """owa""" +599 37 negative_sampler """basic""" +599 37 evaluator """rankbased""" +599 38 dataset """kinships""" +599 38 model """rotate""" +599 38 loss """bceaftersigmoid""" +599 38 regularizer """no""" +599 38 optimizer """adam""" +599 38 training_loop """owa""" +599 38 negative_sampler """basic""" +599 38 evaluator """rankbased""" +599 39 dataset """kinships""" +599 39 model """rotate""" +599 39 loss """bceaftersigmoid""" +599 39 regularizer """no""" +599 39 optimizer """adam""" +599 39 training_loop """owa""" +599 39 negative_sampler """basic""" +599 39 evaluator """rankbased""" +599 40 dataset """kinships""" +599 40 model """rotate""" +599 40 loss """bceaftersigmoid""" +599 40 regularizer """no""" +599 40 optimizer """adam""" +599 40 training_loop """owa""" +599 40 negative_sampler """basic""" +599 40 evaluator """rankbased""" +599 41 dataset """kinships""" +599 41 model """rotate""" +599 41 loss """bceaftersigmoid""" +599 41 regularizer """no""" +599 41 optimizer """adam""" +599 41 training_loop """owa""" +599 41 negative_sampler """basic""" +599 41 evaluator """rankbased""" +599 42 dataset """kinships""" +599 42 model """rotate""" +599 42 loss """bceaftersigmoid""" +599 42 regularizer """no""" +599 42 optimizer """adam""" +599 42 training_loop """owa""" +599 42 negative_sampler """basic""" +599 42 evaluator """rankbased""" +599 43 dataset """kinships""" +599 43 model """rotate""" +599 43 loss """bceaftersigmoid""" +599 43 regularizer """no""" +599 43 optimizer """adam""" +599 43 training_loop """owa""" +599 43 negative_sampler """basic""" +599 43 evaluator """rankbased""" +599 44 dataset """kinships""" +599 44 model """rotate""" +599 44 loss """bceaftersigmoid""" +599 44 regularizer """no""" +599 44 optimizer """adam""" +599 44 training_loop """owa""" +599 44 negative_sampler """basic""" +599 44 evaluator """rankbased""" +599 45 dataset """kinships""" +599 45 model """rotate""" +599 45 loss """bceaftersigmoid""" +599 45 regularizer """no""" +599 45 optimizer """adam""" +599 45 training_loop """owa""" +599 45 negative_sampler """basic""" +599 45 evaluator """rankbased""" +599 46 dataset """kinships""" +599 46 model """rotate""" +599 46 loss """bceaftersigmoid""" +599 46 regularizer """no""" +599 46 optimizer """adam""" +599 46 training_loop """owa""" +599 46 negative_sampler """basic""" +599 46 evaluator """rankbased""" +599 47 dataset """kinships""" +599 47 model """rotate""" +599 47 loss """bceaftersigmoid""" +599 47 regularizer """no""" +599 47 optimizer """adam""" +599 47 training_loop """owa""" +599 47 negative_sampler """basic""" +599 47 evaluator """rankbased""" +599 48 dataset """kinships""" +599 48 model """rotate""" +599 48 loss """bceaftersigmoid""" +599 48 regularizer """no""" +599 48 optimizer """adam""" +599 48 training_loop """owa""" +599 48 negative_sampler """basic""" +599 48 evaluator """rankbased""" +599 49 dataset """kinships""" +599 49 model """rotate""" +599 49 loss """bceaftersigmoid""" +599 49 regularizer """no""" +599 49 optimizer """adam""" +599 49 training_loop """owa""" +599 49 negative_sampler """basic""" +599 49 evaluator """rankbased""" +599 50 dataset """kinships""" +599 50 model """rotate""" +599 50 loss """bceaftersigmoid""" +599 50 regularizer """no""" +599 50 optimizer """adam""" +599 50 training_loop """owa""" +599 50 negative_sampler """basic""" +599 50 evaluator """rankbased""" +599 51 dataset """kinships""" +599 51 model """rotate""" +599 51 loss """bceaftersigmoid""" +599 51 regularizer """no""" +599 51 optimizer """adam""" +599 51 training_loop """owa""" +599 51 negative_sampler """basic""" +599 51 evaluator """rankbased""" +599 52 dataset """kinships""" +599 52 model """rotate""" +599 52 loss """bceaftersigmoid""" +599 52 regularizer """no""" +599 52 optimizer """adam""" +599 52 training_loop """owa""" +599 52 negative_sampler """basic""" +599 52 evaluator """rankbased""" +599 53 dataset """kinships""" +599 53 model """rotate""" +599 53 loss """bceaftersigmoid""" +599 53 regularizer """no""" +599 53 optimizer """adam""" +599 53 training_loop """owa""" +599 53 negative_sampler """basic""" +599 53 evaluator """rankbased""" +599 54 dataset """kinships""" +599 54 model """rotate""" +599 54 loss """bceaftersigmoid""" +599 54 regularizer """no""" +599 54 optimizer """adam""" +599 54 training_loop """owa""" +599 54 negative_sampler """basic""" +599 54 evaluator """rankbased""" +599 55 dataset """kinships""" +599 55 model """rotate""" +599 55 loss """bceaftersigmoid""" +599 55 regularizer """no""" +599 55 optimizer """adam""" +599 55 training_loop """owa""" +599 55 negative_sampler """basic""" +599 55 evaluator """rankbased""" +599 56 dataset """kinships""" +599 56 model """rotate""" +599 56 loss """bceaftersigmoid""" +599 56 regularizer """no""" +599 56 optimizer """adam""" +599 56 training_loop """owa""" +599 56 negative_sampler """basic""" +599 56 evaluator """rankbased""" +599 57 dataset """kinships""" +599 57 model """rotate""" +599 57 loss """bceaftersigmoid""" +599 57 regularizer """no""" +599 57 optimizer """adam""" +599 57 training_loop """owa""" +599 57 negative_sampler """basic""" +599 57 evaluator """rankbased""" +599 58 dataset """kinships""" +599 58 model """rotate""" +599 58 loss """bceaftersigmoid""" +599 58 regularizer """no""" +599 58 optimizer """adam""" +599 58 training_loop """owa""" +599 58 negative_sampler """basic""" +599 58 evaluator """rankbased""" +599 59 dataset """kinships""" +599 59 model """rotate""" +599 59 loss """bceaftersigmoid""" +599 59 regularizer """no""" +599 59 optimizer """adam""" +599 59 training_loop """owa""" +599 59 negative_sampler """basic""" +599 59 evaluator """rankbased""" +599 60 dataset """kinships""" +599 60 model """rotate""" +599 60 loss """bceaftersigmoid""" +599 60 regularizer """no""" +599 60 optimizer """adam""" +599 60 training_loop """owa""" +599 60 negative_sampler """basic""" +599 60 evaluator """rankbased""" +599 61 dataset """kinships""" +599 61 model """rotate""" +599 61 loss """bceaftersigmoid""" +599 61 regularizer """no""" +599 61 optimizer """adam""" +599 61 training_loop """owa""" +599 61 negative_sampler """basic""" +599 61 evaluator """rankbased""" +599 62 dataset """kinships""" +599 62 model """rotate""" +599 62 loss """bceaftersigmoid""" +599 62 regularizer """no""" +599 62 optimizer """adam""" +599 62 training_loop """owa""" +599 62 negative_sampler """basic""" +599 62 evaluator """rankbased""" +599 63 dataset """kinships""" +599 63 model """rotate""" +599 63 loss """bceaftersigmoid""" +599 63 regularizer """no""" +599 63 optimizer """adam""" +599 63 training_loop """owa""" +599 63 negative_sampler """basic""" +599 63 evaluator """rankbased""" +599 64 dataset """kinships""" +599 64 model """rotate""" +599 64 loss """bceaftersigmoid""" +599 64 regularizer """no""" +599 64 optimizer """adam""" +599 64 training_loop """owa""" +599 64 negative_sampler """basic""" +599 64 evaluator """rankbased""" +599 65 dataset """kinships""" +599 65 model """rotate""" +599 65 loss """bceaftersigmoid""" +599 65 regularizer """no""" +599 65 optimizer """adam""" +599 65 training_loop """owa""" +599 65 negative_sampler """basic""" +599 65 evaluator """rankbased""" +599 66 dataset """kinships""" +599 66 model """rotate""" +599 66 loss """bceaftersigmoid""" +599 66 regularizer """no""" +599 66 optimizer """adam""" +599 66 training_loop """owa""" +599 66 negative_sampler """basic""" +599 66 evaluator """rankbased""" +599 67 dataset """kinships""" +599 67 model """rotate""" +599 67 loss """bceaftersigmoid""" +599 67 regularizer """no""" +599 67 optimizer """adam""" +599 67 training_loop """owa""" +599 67 negative_sampler """basic""" +599 67 evaluator """rankbased""" +599 68 dataset """kinships""" +599 68 model """rotate""" +599 68 loss """bceaftersigmoid""" +599 68 regularizer """no""" +599 68 optimizer """adam""" +599 68 training_loop """owa""" +599 68 negative_sampler """basic""" +599 68 evaluator """rankbased""" +599 69 dataset """kinships""" +599 69 model """rotate""" +599 69 loss """bceaftersigmoid""" +599 69 regularizer """no""" +599 69 optimizer """adam""" +599 69 training_loop """owa""" +599 69 negative_sampler """basic""" +599 69 evaluator """rankbased""" +599 70 dataset """kinships""" +599 70 model """rotate""" +599 70 loss """bceaftersigmoid""" +599 70 regularizer """no""" +599 70 optimizer """adam""" +599 70 training_loop """owa""" +599 70 negative_sampler """basic""" +599 70 evaluator """rankbased""" +599 71 dataset """kinships""" +599 71 model """rotate""" +599 71 loss """bceaftersigmoid""" +599 71 regularizer """no""" +599 71 optimizer """adam""" +599 71 training_loop """owa""" +599 71 negative_sampler """basic""" +599 71 evaluator """rankbased""" +599 72 dataset """kinships""" +599 72 model """rotate""" +599 72 loss """bceaftersigmoid""" +599 72 regularizer """no""" +599 72 optimizer """adam""" +599 72 training_loop """owa""" +599 72 negative_sampler """basic""" +599 72 evaluator """rankbased""" +599 73 dataset """kinships""" +599 73 model """rotate""" +599 73 loss """bceaftersigmoid""" +599 73 regularizer """no""" +599 73 optimizer """adam""" +599 73 training_loop """owa""" +599 73 negative_sampler """basic""" +599 73 evaluator """rankbased""" +599 74 dataset """kinships""" +599 74 model """rotate""" +599 74 loss """bceaftersigmoid""" +599 74 regularizer """no""" +599 74 optimizer """adam""" +599 74 training_loop """owa""" +599 74 negative_sampler """basic""" +599 74 evaluator """rankbased""" +599 75 dataset """kinships""" +599 75 model """rotate""" +599 75 loss """bceaftersigmoid""" +599 75 regularizer """no""" +599 75 optimizer """adam""" +599 75 training_loop """owa""" +599 75 negative_sampler """basic""" +599 75 evaluator """rankbased""" +599 76 dataset """kinships""" +599 76 model """rotate""" +599 76 loss """bceaftersigmoid""" +599 76 regularizer """no""" +599 76 optimizer """adam""" +599 76 training_loop """owa""" +599 76 negative_sampler """basic""" +599 76 evaluator """rankbased""" +599 77 dataset """kinships""" +599 77 model """rotate""" +599 77 loss """bceaftersigmoid""" +599 77 regularizer """no""" +599 77 optimizer """adam""" +599 77 training_loop """owa""" +599 77 negative_sampler """basic""" +599 77 evaluator """rankbased""" +599 78 dataset """kinships""" +599 78 model """rotate""" +599 78 loss """bceaftersigmoid""" +599 78 regularizer """no""" +599 78 optimizer """adam""" +599 78 training_loop """owa""" +599 78 negative_sampler """basic""" +599 78 evaluator """rankbased""" +599 79 dataset """kinships""" +599 79 model """rotate""" +599 79 loss """bceaftersigmoid""" +599 79 regularizer """no""" +599 79 optimizer """adam""" +599 79 training_loop """owa""" +599 79 negative_sampler """basic""" +599 79 evaluator """rankbased""" +599 80 dataset """kinships""" +599 80 model """rotate""" +599 80 loss """bceaftersigmoid""" +599 80 regularizer """no""" +599 80 optimizer """adam""" +599 80 training_loop """owa""" +599 80 negative_sampler """basic""" +599 80 evaluator """rankbased""" +599 81 dataset """kinships""" +599 81 model """rotate""" +599 81 loss """bceaftersigmoid""" +599 81 regularizer """no""" +599 81 optimizer """adam""" +599 81 training_loop """owa""" +599 81 negative_sampler """basic""" +599 81 evaluator """rankbased""" +599 82 dataset """kinships""" +599 82 model """rotate""" +599 82 loss """bceaftersigmoid""" +599 82 regularizer """no""" +599 82 optimizer """adam""" +599 82 training_loop """owa""" +599 82 negative_sampler """basic""" +599 82 evaluator """rankbased""" +599 83 dataset """kinships""" +599 83 model """rotate""" +599 83 loss """bceaftersigmoid""" +599 83 regularizer """no""" +599 83 optimizer """adam""" +599 83 training_loop """owa""" +599 83 negative_sampler """basic""" +599 83 evaluator """rankbased""" +599 84 dataset """kinships""" +599 84 model """rotate""" +599 84 loss """bceaftersigmoid""" +599 84 regularizer """no""" +599 84 optimizer """adam""" +599 84 training_loop """owa""" +599 84 negative_sampler """basic""" +599 84 evaluator """rankbased""" +599 85 dataset """kinships""" +599 85 model """rotate""" +599 85 loss """bceaftersigmoid""" +599 85 regularizer """no""" +599 85 optimizer """adam""" +599 85 training_loop """owa""" +599 85 negative_sampler """basic""" +599 85 evaluator """rankbased""" +599 86 dataset """kinships""" +599 86 model """rotate""" +599 86 loss """bceaftersigmoid""" +599 86 regularizer """no""" +599 86 optimizer """adam""" +599 86 training_loop """owa""" +599 86 negative_sampler """basic""" +599 86 evaluator """rankbased""" +599 87 dataset """kinships""" +599 87 model """rotate""" +599 87 loss """bceaftersigmoid""" +599 87 regularizer """no""" +599 87 optimizer """adam""" +599 87 training_loop """owa""" +599 87 negative_sampler """basic""" +599 87 evaluator """rankbased""" +599 88 dataset """kinships""" +599 88 model """rotate""" +599 88 loss """bceaftersigmoid""" +599 88 regularizer """no""" +599 88 optimizer """adam""" +599 88 training_loop """owa""" +599 88 negative_sampler """basic""" +599 88 evaluator """rankbased""" +599 89 dataset """kinships""" +599 89 model """rotate""" +599 89 loss """bceaftersigmoid""" +599 89 regularizer """no""" +599 89 optimizer """adam""" +599 89 training_loop """owa""" +599 89 negative_sampler """basic""" +599 89 evaluator """rankbased""" +599 90 dataset """kinships""" +599 90 model """rotate""" +599 90 loss """bceaftersigmoid""" +599 90 regularizer """no""" +599 90 optimizer """adam""" +599 90 training_loop """owa""" +599 90 negative_sampler """basic""" +599 90 evaluator """rankbased""" +599 91 dataset """kinships""" +599 91 model """rotate""" +599 91 loss """bceaftersigmoid""" +599 91 regularizer """no""" +599 91 optimizer """adam""" +599 91 training_loop """owa""" +599 91 negative_sampler """basic""" +599 91 evaluator """rankbased""" +599 92 dataset """kinships""" +599 92 model """rotate""" +599 92 loss """bceaftersigmoid""" +599 92 regularizer """no""" +599 92 optimizer """adam""" +599 92 training_loop """owa""" +599 92 negative_sampler """basic""" +599 92 evaluator """rankbased""" +599 93 dataset """kinships""" +599 93 model """rotate""" +599 93 loss """bceaftersigmoid""" +599 93 regularizer """no""" +599 93 optimizer """adam""" +599 93 training_loop """owa""" +599 93 negative_sampler """basic""" +599 93 evaluator """rankbased""" +599 94 dataset """kinships""" +599 94 model """rotate""" +599 94 loss """bceaftersigmoid""" +599 94 regularizer """no""" +599 94 optimizer """adam""" +599 94 training_loop """owa""" +599 94 negative_sampler """basic""" +599 94 evaluator """rankbased""" +599 95 dataset """kinships""" +599 95 model """rotate""" +599 95 loss """bceaftersigmoid""" +599 95 regularizer """no""" +599 95 optimizer """adam""" +599 95 training_loop """owa""" +599 95 negative_sampler """basic""" +599 95 evaluator """rankbased""" +599 96 dataset """kinships""" +599 96 model """rotate""" +599 96 loss """bceaftersigmoid""" +599 96 regularizer """no""" +599 96 optimizer """adam""" +599 96 training_loop """owa""" +599 96 negative_sampler """basic""" +599 96 evaluator """rankbased""" +599 97 dataset """kinships""" +599 97 model """rotate""" +599 97 loss """bceaftersigmoid""" +599 97 regularizer """no""" +599 97 optimizer """adam""" +599 97 training_loop """owa""" +599 97 negative_sampler """basic""" +599 97 evaluator """rankbased""" +599 98 dataset """kinships""" +599 98 model """rotate""" +599 98 loss """bceaftersigmoid""" +599 98 regularizer """no""" +599 98 optimizer """adam""" +599 98 training_loop """owa""" +599 98 negative_sampler """basic""" +599 98 evaluator """rankbased""" +599 99 dataset """kinships""" +599 99 model """rotate""" +599 99 loss """bceaftersigmoid""" +599 99 regularizer """no""" +599 99 optimizer """adam""" +599 99 training_loop """owa""" +599 99 negative_sampler """basic""" +599 99 evaluator """rankbased""" +599 100 dataset """kinships""" +599 100 model """rotate""" +599 100 loss """bceaftersigmoid""" +599 100 regularizer """no""" +599 100 optimizer """adam""" +599 100 training_loop """owa""" +599 100 negative_sampler """basic""" +599 100 evaluator """rankbased""" +600 1 model.embedding_dim 2.0 +600 1 optimizer.lr 0.07365080296671174 +600 1 negative_sampler.num_negs_per_pos 36.0 +600 1 training.batch_size 0.0 +600 2 model.embedding_dim 0.0 +600 2 optimizer.lr 0.019094943620049536 +600 2 negative_sampler.num_negs_per_pos 45.0 +600 2 training.batch_size 0.0 +600 3 model.embedding_dim 1.0 +600 3 optimizer.lr 0.023124102246560946 +600 3 negative_sampler.num_negs_per_pos 33.0 +600 3 training.batch_size 0.0 +600 4 model.embedding_dim 0.0 +600 4 optimizer.lr 0.01695091679654676 +600 4 negative_sampler.num_negs_per_pos 7.0 +600 4 training.batch_size 2.0 +600 5 model.embedding_dim 1.0 +600 5 optimizer.lr 0.017780694009170636 +600 5 negative_sampler.num_negs_per_pos 70.0 +600 5 training.batch_size 2.0 +600 6 model.embedding_dim 1.0 +600 6 optimizer.lr 0.003330285864792524 +600 6 negative_sampler.num_negs_per_pos 89.0 +600 6 training.batch_size 1.0 +600 7 model.embedding_dim 0.0 +600 7 optimizer.lr 0.004010852341351494 +600 7 negative_sampler.num_negs_per_pos 26.0 +600 7 training.batch_size 1.0 +600 8 model.embedding_dim 2.0 +600 8 optimizer.lr 0.008139301623364835 +600 8 negative_sampler.num_negs_per_pos 57.0 +600 8 training.batch_size 1.0 +600 9 model.embedding_dim 2.0 +600 9 optimizer.lr 0.0018961750244540878 +600 9 negative_sampler.num_negs_per_pos 14.0 +600 9 training.batch_size 2.0 +600 10 model.embedding_dim 1.0 +600 10 optimizer.lr 0.020431150897778864 +600 10 negative_sampler.num_negs_per_pos 81.0 +600 10 training.batch_size 0.0 +600 11 model.embedding_dim 2.0 +600 11 optimizer.lr 0.09723678832828063 +600 11 negative_sampler.num_negs_per_pos 45.0 +600 11 training.batch_size 0.0 +600 12 model.embedding_dim 2.0 +600 12 optimizer.lr 0.0063460416992132725 +600 12 negative_sampler.num_negs_per_pos 58.0 +600 12 training.batch_size 0.0 +600 13 model.embedding_dim 1.0 +600 13 optimizer.lr 0.054065506501933 +600 13 negative_sampler.num_negs_per_pos 75.0 +600 13 training.batch_size 1.0 +600 14 model.embedding_dim 2.0 +600 14 optimizer.lr 0.012790319416829652 +600 14 negative_sampler.num_negs_per_pos 36.0 +600 14 training.batch_size 1.0 +600 15 model.embedding_dim 1.0 +600 15 optimizer.lr 0.06225655019465578 +600 15 negative_sampler.num_negs_per_pos 78.0 +600 15 training.batch_size 2.0 +600 16 model.embedding_dim 0.0 +600 16 optimizer.lr 0.01275714580761368 +600 16 negative_sampler.num_negs_per_pos 79.0 +600 16 training.batch_size 2.0 +600 17 model.embedding_dim 2.0 +600 17 optimizer.lr 0.012347067110369153 +600 17 negative_sampler.num_negs_per_pos 39.0 +600 17 training.batch_size 1.0 +600 18 model.embedding_dim 1.0 +600 18 optimizer.lr 0.004735910889038219 +600 18 negative_sampler.num_negs_per_pos 53.0 +600 18 training.batch_size 1.0 +600 19 model.embedding_dim 1.0 +600 19 optimizer.lr 0.04294773896973922 +600 19 negative_sampler.num_negs_per_pos 95.0 +600 19 training.batch_size 1.0 +600 20 model.embedding_dim 1.0 +600 20 optimizer.lr 0.001647732066846133 +600 20 negative_sampler.num_negs_per_pos 78.0 +600 20 training.batch_size 1.0 +600 21 model.embedding_dim 1.0 +600 21 optimizer.lr 0.020105721832248263 +600 21 negative_sampler.num_negs_per_pos 72.0 +600 21 training.batch_size 0.0 +600 22 model.embedding_dim 1.0 +600 22 optimizer.lr 0.0011546153023978733 +600 22 negative_sampler.num_negs_per_pos 45.0 +600 22 training.batch_size 2.0 +600 23 model.embedding_dim 2.0 +600 23 optimizer.lr 0.04729246843921347 +600 23 negative_sampler.num_negs_per_pos 66.0 +600 23 training.batch_size 2.0 +600 24 model.embedding_dim 1.0 +600 24 optimizer.lr 0.060533373749008905 +600 24 negative_sampler.num_negs_per_pos 31.0 +600 24 training.batch_size 0.0 +600 25 model.embedding_dim 0.0 +600 25 optimizer.lr 0.019713159401966426 +600 25 negative_sampler.num_negs_per_pos 94.0 +600 25 training.batch_size 1.0 +600 26 model.embedding_dim 0.0 +600 26 optimizer.lr 0.03193883758964762 +600 26 negative_sampler.num_negs_per_pos 62.0 +600 26 training.batch_size 2.0 +600 27 model.embedding_dim 2.0 +600 27 optimizer.lr 0.09115926505103143 +600 27 negative_sampler.num_negs_per_pos 62.0 +600 27 training.batch_size 0.0 +600 28 model.embedding_dim 2.0 +600 28 optimizer.lr 0.09137121107723924 +600 28 negative_sampler.num_negs_per_pos 47.0 +600 28 training.batch_size 2.0 +600 29 model.embedding_dim 0.0 +600 29 optimizer.lr 0.001039155414887367 +600 29 negative_sampler.num_negs_per_pos 24.0 +600 29 training.batch_size 0.0 +600 30 model.embedding_dim 1.0 +600 30 optimizer.lr 0.00585008565222039 +600 30 negative_sampler.num_negs_per_pos 62.0 +600 30 training.batch_size 0.0 +600 31 model.embedding_dim 0.0 +600 31 optimizer.lr 0.00837541127842221 +600 31 negative_sampler.num_negs_per_pos 64.0 +600 31 training.batch_size 1.0 +600 32 model.embedding_dim 0.0 +600 32 optimizer.lr 0.021957529482963027 +600 32 negative_sampler.num_negs_per_pos 18.0 +600 32 training.batch_size 1.0 +600 33 model.embedding_dim 0.0 +600 33 optimizer.lr 0.0018231039705157849 +600 33 negative_sampler.num_negs_per_pos 8.0 +600 33 training.batch_size 1.0 +600 34 model.embedding_dim 2.0 +600 34 optimizer.lr 0.035070046016540624 +600 34 negative_sampler.num_negs_per_pos 49.0 +600 34 training.batch_size 1.0 +600 35 model.embedding_dim 0.0 +600 35 optimizer.lr 0.005808349853241563 +600 35 negative_sampler.num_negs_per_pos 2.0 +600 35 training.batch_size 2.0 +600 36 model.embedding_dim 0.0 +600 36 optimizer.lr 0.0036101281861314095 +600 36 negative_sampler.num_negs_per_pos 48.0 +600 36 training.batch_size 1.0 +600 37 model.embedding_dim 2.0 +600 37 optimizer.lr 0.02708559415977699 +600 37 negative_sampler.num_negs_per_pos 86.0 +600 37 training.batch_size 1.0 +600 38 model.embedding_dim 2.0 +600 38 optimizer.lr 0.002183979791141389 +600 38 negative_sampler.num_negs_per_pos 85.0 +600 38 training.batch_size 2.0 +600 39 model.embedding_dim 1.0 +600 39 optimizer.lr 0.0016618779861063463 +600 39 negative_sampler.num_negs_per_pos 93.0 +600 39 training.batch_size 2.0 +600 40 model.embedding_dim 0.0 +600 40 optimizer.lr 0.008040362674536807 +600 40 negative_sampler.num_negs_per_pos 50.0 +600 40 training.batch_size 2.0 +600 41 model.embedding_dim 1.0 +600 41 optimizer.lr 0.0019003322153579331 +600 41 negative_sampler.num_negs_per_pos 47.0 +600 41 training.batch_size 0.0 +600 42 model.embedding_dim 0.0 +600 42 optimizer.lr 0.0012522535593286884 +600 42 negative_sampler.num_negs_per_pos 36.0 +600 42 training.batch_size 0.0 +600 43 model.embedding_dim 2.0 +600 43 optimizer.lr 0.0012959948612224826 +600 43 negative_sampler.num_negs_per_pos 71.0 +600 43 training.batch_size 0.0 +600 44 model.embedding_dim 0.0 +600 44 optimizer.lr 0.006323535405355042 +600 44 negative_sampler.num_negs_per_pos 96.0 +600 44 training.batch_size 1.0 +600 45 model.embedding_dim 0.0 +600 45 optimizer.lr 0.0051998074785273565 +600 45 negative_sampler.num_negs_per_pos 51.0 +600 45 training.batch_size 1.0 +600 46 model.embedding_dim 2.0 +600 46 optimizer.lr 0.07202916007448573 +600 46 negative_sampler.num_negs_per_pos 94.0 +600 46 training.batch_size 0.0 +600 47 model.embedding_dim 1.0 +600 47 optimizer.lr 0.0041898420499498195 +600 47 negative_sampler.num_negs_per_pos 56.0 +600 47 training.batch_size 1.0 +600 48 model.embedding_dim 2.0 +600 48 optimizer.lr 0.012221530970291519 +600 48 negative_sampler.num_negs_per_pos 9.0 +600 48 training.batch_size 0.0 +600 49 model.embedding_dim 0.0 +600 49 optimizer.lr 0.09830310261518611 +600 49 negative_sampler.num_negs_per_pos 46.0 +600 49 training.batch_size 0.0 +600 50 model.embedding_dim 0.0 +600 50 optimizer.lr 0.0016539544891890857 +600 50 negative_sampler.num_negs_per_pos 88.0 +600 50 training.batch_size 1.0 +600 51 model.embedding_dim 0.0 +600 51 optimizer.lr 0.007347852566627222 +600 51 negative_sampler.num_negs_per_pos 42.0 +600 51 training.batch_size 1.0 +600 52 model.embedding_dim 0.0 +600 52 optimizer.lr 0.016076889649645716 +600 52 negative_sampler.num_negs_per_pos 56.0 +600 52 training.batch_size 1.0 +600 53 model.embedding_dim 2.0 +600 53 optimizer.lr 0.0022507314716676483 +600 53 negative_sampler.num_negs_per_pos 98.0 +600 53 training.batch_size 2.0 +600 54 model.embedding_dim 1.0 +600 54 optimizer.lr 0.006613245514909438 +600 54 negative_sampler.num_negs_per_pos 9.0 +600 54 training.batch_size 1.0 +600 55 model.embedding_dim 2.0 +600 55 optimizer.lr 0.053254897106607205 +600 55 negative_sampler.num_negs_per_pos 67.0 +600 55 training.batch_size 2.0 +600 56 model.embedding_dim 1.0 +600 56 optimizer.lr 0.025073810307756034 +600 56 negative_sampler.num_negs_per_pos 16.0 +600 56 training.batch_size 0.0 +600 57 model.embedding_dim 0.0 +600 57 optimizer.lr 0.01853620664780098 +600 57 negative_sampler.num_negs_per_pos 8.0 +600 57 training.batch_size 1.0 +600 58 model.embedding_dim 1.0 +600 58 optimizer.lr 0.004505585137010427 +600 58 negative_sampler.num_negs_per_pos 40.0 +600 58 training.batch_size 1.0 +600 59 model.embedding_dim 1.0 +600 59 optimizer.lr 0.07831392682604052 +600 59 negative_sampler.num_negs_per_pos 31.0 +600 59 training.batch_size 0.0 +600 60 model.embedding_dim 1.0 +600 60 optimizer.lr 0.003072008519967635 +600 60 negative_sampler.num_negs_per_pos 18.0 +600 60 training.batch_size 2.0 +600 61 model.embedding_dim 2.0 +600 61 optimizer.lr 0.010282237689284223 +600 61 negative_sampler.num_negs_per_pos 95.0 +600 61 training.batch_size 1.0 +600 62 model.embedding_dim 0.0 +600 62 optimizer.lr 0.004146828326575554 +600 62 negative_sampler.num_negs_per_pos 44.0 +600 62 training.batch_size 0.0 +600 63 model.embedding_dim 2.0 +600 63 optimizer.lr 0.006312727632723687 +600 63 negative_sampler.num_negs_per_pos 84.0 +600 63 training.batch_size 0.0 +600 64 model.embedding_dim 1.0 +600 64 optimizer.lr 0.004797432752461688 +600 64 negative_sampler.num_negs_per_pos 40.0 +600 64 training.batch_size 1.0 +600 65 model.embedding_dim 2.0 +600 65 optimizer.lr 0.08777686410317406 +600 65 negative_sampler.num_negs_per_pos 99.0 +600 65 training.batch_size 0.0 +600 66 model.embedding_dim 0.0 +600 66 optimizer.lr 0.012007129713308022 +600 66 negative_sampler.num_negs_per_pos 21.0 +600 66 training.batch_size 1.0 +600 67 model.embedding_dim 2.0 +600 67 optimizer.lr 0.08293670747075804 +600 67 negative_sampler.num_negs_per_pos 44.0 +600 67 training.batch_size 0.0 +600 68 model.embedding_dim 2.0 +600 68 optimizer.lr 0.010584055617845459 +600 68 negative_sampler.num_negs_per_pos 37.0 +600 68 training.batch_size 2.0 +600 69 model.embedding_dim 1.0 +600 69 optimizer.lr 0.06745966059112939 +600 69 negative_sampler.num_negs_per_pos 80.0 +600 69 training.batch_size 1.0 +600 70 model.embedding_dim 1.0 +600 70 optimizer.lr 0.06188470293315406 +600 70 negative_sampler.num_negs_per_pos 4.0 +600 70 training.batch_size 1.0 +600 71 model.embedding_dim 0.0 +600 71 optimizer.lr 0.0328446533858536 +600 71 negative_sampler.num_negs_per_pos 66.0 +600 71 training.batch_size 0.0 +600 72 model.embedding_dim 0.0 +600 72 optimizer.lr 0.004362429719882685 +600 72 negative_sampler.num_negs_per_pos 88.0 +600 72 training.batch_size 2.0 +600 73 model.embedding_dim 0.0 +600 73 optimizer.lr 0.022519914954863704 +600 73 negative_sampler.num_negs_per_pos 87.0 +600 73 training.batch_size 0.0 +600 74 model.embedding_dim 2.0 +600 74 optimizer.lr 0.0941665778364374 +600 74 negative_sampler.num_negs_per_pos 32.0 +600 74 training.batch_size 1.0 +600 75 model.embedding_dim 2.0 +600 75 optimizer.lr 0.001033393933885892 +600 75 negative_sampler.num_negs_per_pos 36.0 +600 75 training.batch_size 0.0 +600 76 model.embedding_dim 1.0 +600 76 optimizer.lr 0.027805824416446846 +600 76 negative_sampler.num_negs_per_pos 65.0 +600 76 training.batch_size 1.0 +600 77 model.embedding_dim 1.0 +600 77 optimizer.lr 0.002299901925557335 +600 77 negative_sampler.num_negs_per_pos 49.0 +600 77 training.batch_size 2.0 +600 78 model.embedding_dim 2.0 +600 78 optimizer.lr 0.0010224742035268837 +600 78 negative_sampler.num_negs_per_pos 98.0 +600 78 training.batch_size 0.0 +600 79 model.embedding_dim 2.0 +600 79 optimizer.lr 0.002342872997141906 +600 79 negative_sampler.num_negs_per_pos 85.0 +600 79 training.batch_size 0.0 +600 80 model.embedding_dim 1.0 +600 80 optimizer.lr 0.05849602629507466 +600 80 negative_sampler.num_negs_per_pos 61.0 +600 80 training.batch_size 2.0 +600 81 model.embedding_dim 0.0 +600 81 optimizer.lr 0.0017874980395712668 +600 81 negative_sampler.num_negs_per_pos 41.0 +600 81 training.batch_size 0.0 +600 82 model.embedding_dim 0.0 +600 82 optimizer.lr 0.008612454057450704 +600 82 negative_sampler.num_negs_per_pos 34.0 +600 82 training.batch_size 2.0 +600 83 model.embedding_dim 1.0 +600 83 optimizer.lr 0.0012456628599039072 +600 83 negative_sampler.num_negs_per_pos 40.0 +600 83 training.batch_size 2.0 +600 84 model.embedding_dim 0.0 +600 84 optimizer.lr 0.09070593296526368 +600 84 negative_sampler.num_negs_per_pos 3.0 +600 84 training.batch_size 0.0 +600 85 model.embedding_dim 1.0 +600 85 optimizer.lr 0.06558865707627393 +600 85 negative_sampler.num_negs_per_pos 3.0 +600 85 training.batch_size 1.0 +600 86 model.embedding_dim 2.0 +600 86 optimizer.lr 0.025687616531617736 +600 86 negative_sampler.num_negs_per_pos 17.0 +600 86 training.batch_size 0.0 +600 87 model.embedding_dim 1.0 +600 87 optimizer.lr 0.09858127775145821 +600 87 negative_sampler.num_negs_per_pos 16.0 +600 87 training.batch_size 1.0 +600 88 model.embedding_dim 0.0 +600 88 optimizer.lr 0.006157151034429455 +600 88 negative_sampler.num_negs_per_pos 29.0 +600 88 training.batch_size 0.0 +600 89 model.embedding_dim 2.0 +600 89 optimizer.lr 0.05186669743622535 +600 89 negative_sampler.num_negs_per_pos 11.0 +600 89 training.batch_size 1.0 +600 90 model.embedding_dim 1.0 +600 90 optimizer.lr 0.022329222679215423 +600 90 negative_sampler.num_negs_per_pos 77.0 +600 90 training.batch_size 2.0 +600 91 model.embedding_dim 2.0 +600 91 optimizer.lr 0.0016067397818882523 +600 91 negative_sampler.num_negs_per_pos 43.0 +600 91 training.batch_size 1.0 +600 92 model.embedding_dim 0.0 +600 92 optimizer.lr 0.03869394901989017 +600 92 negative_sampler.num_negs_per_pos 20.0 +600 92 training.batch_size 2.0 +600 93 model.embedding_dim 2.0 +600 93 optimizer.lr 0.001897457167057591 +600 93 negative_sampler.num_negs_per_pos 82.0 +600 93 training.batch_size 0.0 +600 94 model.embedding_dim 0.0 +600 94 optimizer.lr 0.012873514581933945 +600 94 negative_sampler.num_negs_per_pos 58.0 +600 94 training.batch_size 1.0 +600 95 model.embedding_dim 1.0 +600 95 optimizer.lr 0.006010001763524926 +600 95 negative_sampler.num_negs_per_pos 43.0 +600 95 training.batch_size 1.0 +600 96 model.embedding_dim 2.0 +600 96 optimizer.lr 0.006265046962648607 +600 96 negative_sampler.num_negs_per_pos 20.0 +600 96 training.batch_size 0.0 +600 97 model.embedding_dim 0.0 +600 97 optimizer.lr 0.002084556165995489 +600 97 negative_sampler.num_negs_per_pos 1.0 +600 97 training.batch_size 1.0 +600 98 model.embedding_dim 2.0 +600 98 optimizer.lr 0.0020576209537374913 +600 98 negative_sampler.num_negs_per_pos 37.0 +600 98 training.batch_size 2.0 +600 99 model.embedding_dim 1.0 +600 99 optimizer.lr 0.001256360004005139 +600 99 negative_sampler.num_negs_per_pos 2.0 +600 99 training.batch_size 0.0 +600 100 model.embedding_dim 0.0 +600 100 optimizer.lr 0.03447189754186775 +600 100 negative_sampler.num_negs_per_pos 0.0 +600 100 training.batch_size 2.0 +600 1 dataset """kinships""" +600 1 model """rotate""" +600 1 loss """softplus""" +600 1 regularizer """no""" +600 1 optimizer """adam""" +600 1 training_loop """owa""" +600 1 negative_sampler """basic""" +600 1 evaluator """rankbased""" +600 2 dataset """kinships""" +600 2 model """rotate""" +600 2 loss """softplus""" +600 2 regularizer """no""" +600 2 optimizer """adam""" +600 2 training_loop """owa""" +600 2 negative_sampler """basic""" +600 2 evaluator """rankbased""" +600 3 dataset """kinships""" +600 3 model """rotate""" +600 3 loss """softplus""" +600 3 regularizer """no""" +600 3 optimizer """adam""" +600 3 training_loop """owa""" +600 3 negative_sampler """basic""" +600 3 evaluator """rankbased""" +600 4 dataset """kinships""" +600 4 model """rotate""" +600 4 loss """softplus""" +600 4 regularizer """no""" +600 4 optimizer """adam""" +600 4 training_loop """owa""" +600 4 negative_sampler """basic""" +600 4 evaluator """rankbased""" +600 5 dataset """kinships""" +600 5 model """rotate""" +600 5 loss """softplus""" +600 5 regularizer """no""" +600 5 optimizer """adam""" +600 5 training_loop """owa""" +600 5 negative_sampler """basic""" +600 5 evaluator """rankbased""" +600 6 dataset """kinships""" +600 6 model """rotate""" +600 6 loss """softplus""" +600 6 regularizer """no""" +600 6 optimizer """adam""" +600 6 training_loop """owa""" +600 6 negative_sampler """basic""" +600 6 evaluator """rankbased""" +600 7 dataset """kinships""" +600 7 model """rotate""" +600 7 loss """softplus""" +600 7 regularizer """no""" +600 7 optimizer """adam""" +600 7 training_loop """owa""" +600 7 negative_sampler """basic""" +600 7 evaluator """rankbased""" +600 8 dataset """kinships""" +600 8 model """rotate""" +600 8 loss """softplus""" +600 8 regularizer """no""" +600 8 optimizer """adam""" +600 8 training_loop """owa""" +600 8 negative_sampler """basic""" +600 8 evaluator """rankbased""" +600 9 dataset """kinships""" +600 9 model """rotate""" +600 9 loss """softplus""" +600 9 regularizer """no""" +600 9 optimizer """adam""" +600 9 training_loop """owa""" +600 9 negative_sampler """basic""" +600 9 evaluator """rankbased""" +600 10 dataset """kinships""" +600 10 model """rotate""" +600 10 loss """softplus""" +600 10 regularizer """no""" +600 10 optimizer """adam""" +600 10 training_loop """owa""" +600 10 negative_sampler """basic""" +600 10 evaluator """rankbased""" +600 11 dataset """kinships""" +600 11 model """rotate""" +600 11 loss """softplus""" +600 11 regularizer """no""" +600 11 optimizer """adam""" +600 11 training_loop """owa""" +600 11 negative_sampler """basic""" +600 11 evaluator """rankbased""" +600 12 dataset """kinships""" +600 12 model """rotate""" +600 12 loss """softplus""" +600 12 regularizer """no""" +600 12 optimizer """adam""" +600 12 training_loop """owa""" +600 12 negative_sampler """basic""" +600 12 evaluator """rankbased""" +600 13 dataset """kinships""" +600 13 model """rotate""" +600 13 loss """softplus""" +600 13 regularizer """no""" +600 13 optimizer """adam""" +600 13 training_loop """owa""" +600 13 negative_sampler """basic""" +600 13 evaluator """rankbased""" +600 14 dataset """kinships""" +600 14 model """rotate""" +600 14 loss """softplus""" +600 14 regularizer """no""" +600 14 optimizer """adam""" +600 14 training_loop """owa""" +600 14 negative_sampler """basic""" +600 14 evaluator """rankbased""" +600 15 dataset """kinships""" +600 15 model """rotate""" +600 15 loss """softplus""" +600 15 regularizer """no""" +600 15 optimizer """adam""" +600 15 training_loop """owa""" +600 15 negative_sampler """basic""" +600 15 evaluator """rankbased""" +600 16 dataset """kinships""" +600 16 model """rotate""" +600 16 loss """softplus""" +600 16 regularizer """no""" +600 16 optimizer """adam""" +600 16 training_loop """owa""" +600 16 negative_sampler """basic""" +600 16 evaluator """rankbased""" +600 17 dataset """kinships""" +600 17 model """rotate""" +600 17 loss """softplus""" +600 17 regularizer """no""" +600 17 optimizer """adam""" +600 17 training_loop """owa""" +600 17 negative_sampler """basic""" +600 17 evaluator """rankbased""" +600 18 dataset """kinships""" +600 18 model """rotate""" +600 18 loss """softplus""" +600 18 regularizer """no""" +600 18 optimizer """adam""" +600 18 training_loop """owa""" +600 18 negative_sampler """basic""" +600 18 evaluator """rankbased""" +600 19 dataset """kinships""" +600 19 model """rotate""" +600 19 loss """softplus""" +600 19 regularizer """no""" +600 19 optimizer """adam""" +600 19 training_loop """owa""" +600 19 negative_sampler """basic""" +600 19 evaluator """rankbased""" +600 20 dataset """kinships""" +600 20 model """rotate""" +600 20 loss """softplus""" +600 20 regularizer """no""" +600 20 optimizer """adam""" +600 20 training_loop """owa""" +600 20 negative_sampler """basic""" +600 20 evaluator """rankbased""" +600 21 dataset """kinships""" +600 21 model """rotate""" +600 21 loss """softplus""" +600 21 regularizer """no""" +600 21 optimizer """adam""" +600 21 training_loop """owa""" +600 21 negative_sampler """basic""" +600 21 evaluator """rankbased""" +600 22 dataset """kinships""" +600 22 model """rotate""" +600 22 loss """softplus""" +600 22 regularizer """no""" +600 22 optimizer """adam""" +600 22 training_loop """owa""" +600 22 negative_sampler """basic""" +600 22 evaluator """rankbased""" +600 23 dataset """kinships""" +600 23 model """rotate""" +600 23 loss """softplus""" +600 23 regularizer """no""" +600 23 optimizer """adam""" +600 23 training_loop """owa""" +600 23 negative_sampler """basic""" +600 23 evaluator """rankbased""" +600 24 dataset """kinships""" +600 24 model """rotate""" +600 24 loss """softplus""" +600 24 regularizer """no""" +600 24 optimizer """adam""" +600 24 training_loop """owa""" +600 24 negative_sampler """basic""" +600 24 evaluator """rankbased""" +600 25 dataset """kinships""" +600 25 model """rotate""" +600 25 loss """softplus""" +600 25 regularizer """no""" +600 25 optimizer """adam""" +600 25 training_loop """owa""" +600 25 negative_sampler """basic""" +600 25 evaluator """rankbased""" +600 26 dataset """kinships""" +600 26 model """rotate""" +600 26 loss """softplus""" +600 26 regularizer """no""" +600 26 optimizer """adam""" +600 26 training_loop """owa""" +600 26 negative_sampler """basic""" +600 26 evaluator """rankbased""" +600 27 dataset """kinships""" +600 27 model """rotate""" +600 27 loss """softplus""" +600 27 regularizer """no""" +600 27 optimizer """adam""" +600 27 training_loop """owa""" +600 27 negative_sampler """basic""" +600 27 evaluator """rankbased""" +600 28 dataset """kinships""" +600 28 model """rotate""" +600 28 loss """softplus""" +600 28 regularizer """no""" +600 28 optimizer """adam""" +600 28 training_loop """owa""" +600 28 negative_sampler """basic""" +600 28 evaluator """rankbased""" +600 29 dataset """kinships""" +600 29 model """rotate""" +600 29 loss """softplus""" +600 29 regularizer """no""" +600 29 optimizer """adam""" +600 29 training_loop """owa""" +600 29 negative_sampler """basic""" +600 29 evaluator """rankbased""" +600 30 dataset """kinships""" +600 30 model """rotate""" +600 30 loss """softplus""" +600 30 regularizer """no""" +600 30 optimizer """adam""" +600 30 training_loop """owa""" +600 30 negative_sampler """basic""" +600 30 evaluator """rankbased""" +600 31 dataset """kinships""" +600 31 model """rotate""" +600 31 loss """softplus""" +600 31 regularizer """no""" +600 31 optimizer """adam""" +600 31 training_loop """owa""" +600 31 negative_sampler """basic""" +600 31 evaluator """rankbased""" +600 32 dataset """kinships""" +600 32 model """rotate""" +600 32 loss """softplus""" +600 32 regularizer """no""" +600 32 optimizer """adam""" +600 32 training_loop """owa""" +600 32 negative_sampler """basic""" +600 32 evaluator """rankbased""" +600 33 dataset """kinships""" +600 33 model """rotate""" +600 33 loss """softplus""" +600 33 regularizer """no""" +600 33 optimizer """adam""" +600 33 training_loop """owa""" +600 33 negative_sampler """basic""" +600 33 evaluator """rankbased""" +600 34 dataset """kinships""" +600 34 model """rotate""" +600 34 loss """softplus""" +600 34 regularizer """no""" +600 34 optimizer """adam""" +600 34 training_loop """owa""" +600 34 negative_sampler """basic""" +600 34 evaluator """rankbased""" +600 35 dataset """kinships""" +600 35 model """rotate""" +600 35 loss """softplus""" +600 35 regularizer """no""" +600 35 optimizer """adam""" +600 35 training_loop """owa""" +600 35 negative_sampler """basic""" +600 35 evaluator """rankbased""" +600 36 dataset """kinships""" +600 36 model """rotate""" +600 36 loss """softplus""" +600 36 regularizer """no""" +600 36 optimizer """adam""" +600 36 training_loop """owa""" +600 36 negative_sampler """basic""" +600 36 evaluator """rankbased""" +600 37 dataset """kinships""" +600 37 model """rotate""" +600 37 loss """softplus""" +600 37 regularizer """no""" +600 37 optimizer """adam""" +600 37 training_loop """owa""" +600 37 negative_sampler """basic""" +600 37 evaluator """rankbased""" +600 38 dataset """kinships""" +600 38 model """rotate""" +600 38 loss """softplus""" +600 38 regularizer """no""" +600 38 optimizer """adam""" +600 38 training_loop """owa""" +600 38 negative_sampler """basic""" +600 38 evaluator """rankbased""" +600 39 dataset """kinships""" +600 39 model """rotate""" +600 39 loss """softplus""" +600 39 regularizer """no""" +600 39 optimizer """adam""" +600 39 training_loop """owa""" +600 39 negative_sampler """basic""" +600 39 evaluator """rankbased""" +600 40 dataset """kinships""" +600 40 model """rotate""" +600 40 loss """softplus""" +600 40 regularizer """no""" +600 40 optimizer """adam""" +600 40 training_loop """owa""" +600 40 negative_sampler """basic""" +600 40 evaluator """rankbased""" +600 41 dataset """kinships""" +600 41 model """rotate""" +600 41 loss """softplus""" +600 41 regularizer """no""" +600 41 optimizer """adam""" +600 41 training_loop """owa""" +600 41 negative_sampler """basic""" +600 41 evaluator """rankbased""" +600 42 dataset """kinships""" +600 42 model """rotate""" +600 42 loss """softplus""" +600 42 regularizer """no""" +600 42 optimizer """adam""" +600 42 training_loop """owa""" +600 42 negative_sampler """basic""" +600 42 evaluator """rankbased""" +600 43 dataset """kinships""" +600 43 model """rotate""" +600 43 loss """softplus""" +600 43 regularizer """no""" +600 43 optimizer """adam""" +600 43 training_loop """owa""" +600 43 negative_sampler """basic""" +600 43 evaluator """rankbased""" +600 44 dataset """kinships""" +600 44 model """rotate""" +600 44 loss """softplus""" +600 44 regularizer """no""" +600 44 optimizer """adam""" +600 44 training_loop """owa""" +600 44 negative_sampler """basic""" +600 44 evaluator """rankbased""" +600 45 dataset """kinships""" +600 45 model """rotate""" +600 45 loss """softplus""" +600 45 regularizer """no""" +600 45 optimizer """adam""" +600 45 training_loop """owa""" +600 45 negative_sampler """basic""" +600 45 evaluator """rankbased""" +600 46 dataset """kinships""" +600 46 model """rotate""" +600 46 loss """softplus""" +600 46 regularizer """no""" +600 46 optimizer """adam""" +600 46 training_loop """owa""" +600 46 negative_sampler """basic""" +600 46 evaluator """rankbased""" +600 47 dataset """kinships""" +600 47 model """rotate""" +600 47 loss """softplus""" +600 47 regularizer """no""" +600 47 optimizer """adam""" +600 47 training_loop """owa""" +600 47 negative_sampler """basic""" +600 47 evaluator """rankbased""" +600 48 dataset """kinships""" +600 48 model """rotate""" +600 48 loss """softplus""" +600 48 regularizer """no""" +600 48 optimizer """adam""" +600 48 training_loop """owa""" +600 48 negative_sampler """basic""" +600 48 evaluator """rankbased""" +600 49 dataset """kinships""" +600 49 model """rotate""" +600 49 loss """softplus""" +600 49 regularizer """no""" +600 49 optimizer """adam""" +600 49 training_loop """owa""" +600 49 negative_sampler """basic""" +600 49 evaluator """rankbased""" +600 50 dataset """kinships""" +600 50 model """rotate""" +600 50 loss """softplus""" +600 50 regularizer """no""" +600 50 optimizer """adam""" +600 50 training_loop """owa""" +600 50 negative_sampler """basic""" +600 50 evaluator """rankbased""" +600 51 dataset """kinships""" +600 51 model """rotate""" +600 51 loss """softplus""" +600 51 regularizer """no""" +600 51 optimizer """adam""" +600 51 training_loop """owa""" +600 51 negative_sampler """basic""" +600 51 evaluator """rankbased""" +600 52 dataset """kinships""" +600 52 model """rotate""" +600 52 loss """softplus""" +600 52 regularizer """no""" +600 52 optimizer """adam""" +600 52 training_loop """owa""" +600 52 negative_sampler """basic""" +600 52 evaluator """rankbased""" +600 53 dataset """kinships""" +600 53 model """rotate""" +600 53 loss """softplus""" +600 53 regularizer """no""" +600 53 optimizer """adam""" +600 53 training_loop """owa""" +600 53 negative_sampler """basic""" +600 53 evaluator """rankbased""" +600 54 dataset """kinships""" +600 54 model """rotate""" +600 54 loss """softplus""" +600 54 regularizer """no""" +600 54 optimizer """adam""" +600 54 training_loop """owa""" +600 54 negative_sampler """basic""" +600 54 evaluator """rankbased""" +600 55 dataset """kinships""" +600 55 model """rotate""" +600 55 loss """softplus""" +600 55 regularizer """no""" +600 55 optimizer """adam""" +600 55 training_loop """owa""" +600 55 negative_sampler """basic""" +600 55 evaluator """rankbased""" +600 56 dataset """kinships""" +600 56 model """rotate""" +600 56 loss """softplus""" +600 56 regularizer """no""" +600 56 optimizer """adam""" +600 56 training_loop """owa""" +600 56 negative_sampler """basic""" +600 56 evaluator """rankbased""" +600 57 dataset """kinships""" +600 57 model """rotate""" +600 57 loss """softplus""" +600 57 regularizer """no""" +600 57 optimizer """adam""" +600 57 training_loop """owa""" +600 57 negative_sampler """basic""" +600 57 evaluator """rankbased""" +600 58 dataset """kinships""" +600 58 model """rotate""" +600 58 loss """softplus""" +600 58 regularizer """no""" +600 58 optimizer """adam""" +600 58 training_loop """owa""" +600 58 negative_sampler """basic""" +600 58 evaluator """rankbased""" +600 59 dataset """kinships""" +600 59 model """rotate""" +600 59 loss """softplus""" +600 59 regularizer """no""" +600 59 optimizer """adam""" +600 59 training_loop """owa""" +600 59 negative_sampler """basic""" +600 59 evaluator """rankbased""" +600 60 dataset """kinships""" +600 60 model """rotate""" +600 60 loss """softplus""" +600 60 regularizer """no""" +600 60 optimizer """adam""" +600 60 training_loop """owa""" +600 60 negative_sampler """basic""" +600 60 evaluator """rankbased""" +600 61 dataset """kinships""" +600 61 model """rotate""" +600 61 loss """softplus""" +600 61 regularizer """no""" +600 61 optimizer """adam""" +600 61 training_loop """owa""" +600 61 negative_sampler """basic""" +600 61 evaluator """rankbased""" +600 62 dataset """kinships""" +600 62 model """rotate""" +600 62 loss """softplus""" +600 62 regularizer """no""" +600 62 optimizer """adam""" +600 62 training_loop """owa""" +600 62 negative_sampler """basic""" +600 62 evaluator """rankbased""" +600 63 dataset """kinships""" +600 63 model """rotate""" +600 63 loss """softplus""" +600 63 regularizer """no""" +600 63 optimizer """adam""" +600 63 training_loop """owa""" +600 63 negative_sampler """basic""" +600 63 evaluator """rankbased""" +600 64 dataset """kinships""" +600 64 model """rotate""" +600 64 loss """softplus""" +600 64 regularizer """no""" +600 64 optimizer """adam""" +600 64 training_loop """owa""" +600 64 negative_sampler """basic""" +600 64 evaluator """rankbased""" +600 65 dataset """kinships""" +600 65 model """rotate""" +600 65 loss """softplus""" +600 65 regularizer """no""" +600 65 optimizer """adam""" +600 65 training_loop """owa""" +600 65 negative_sampler """basic""" +600 65 evaluator """rankbased""" +600 66 dataset """kinships""" +600 66 model """rotate""" +600 66 loss """softplus""" +600 66 regularizer """no""" +600 66 optimizer """adam""" +600 66 training_loop """owa""" +600 66 negative_sampler """basic""" +600 66 evaluator """rankbased""" +600 67 dataset """kinships""" +600 67 model """rotate""" +600 67 loss """softplus""" +600 67 regularizer """no""" +600 67 optimizer """adam""" +600 67 training_loop """owa""" +600 67 negative_sampler """basic""" +600 67 evaluator """rankbased""" +600 68 dataset """kinships""" +600 68 model """rotate""" +600 68 loss """softplus""" +600 68 regularizer """no""" +600 68 optimizer """adam""" +600 68 training_loop """owa""" +600 68 negative_sampler """basic""" +600 68 evaluator """rankbased""" +600 69 dataset """kinships""" +600 69 model """rotate""" +600 69 loss """softplus""" +600 69 regularizer """no""" +600 69 optimizer """adam""" +600 69 training_loop """owa""" +600 69 negative_sampler """basic""" +600 69 evaluator """rankbased""" +600 70 dataset """kinships""" +600 70 model """rotate""" +600 70 loss """softplus""" +600 70 regularizer """no""" +600 70 optimizer """adam""" +600 70 training_loop """owa""" +600 70 negative_sampler """basic""" +600 70 evaluator """rankbased""" +600 71 dataset """kinships""" +600 71 model """rotate""" +600 71 loss """softplus""" +600 71 regularizer """no""" +600 71 optimizer """adam""" +600 71 training_loop """owa""" +600 71 negative_sampler """basic""" +600 71 evaluator """rankbased""" +600 72 dataset """kinships""" +600 72 model """rotate""" +600 72 loss """softplus""" +600 72 regularizer """no""" +600 72 optimizer """adam""" +600 72 training_loop """owa""" +600 72 negative_sampler """basic""" +600 72 evaluator """rankbased""" +600 73 dataset """kinships""" +600 73 model """rotate""" +600 73 loss """softplus""" +600 73 regularizer """no""" +600 73 optimizer """adam""" +600 73 training_loop """owa""" +600 73 negative_sampler """basic""" +600 73 evaluator """rankbased""" +600 74 dataset """kinships""" +600 74 model """rotate""" +600 74 loss """softplus""" +600 74 regularizer """no""" +600 74 optimizer """adam""" +600 74 training_loop """owa""" +600 74 negative_sampler """basic""" +600 74 evaluator """rankbased""" +600 75 dataset """kinships""" +600 75 model """rotate""" +600 75 loss """softplus""" +600 75 regularizer """no""" +600 75 optimizer """adam""" +600 75 training_loop """owa""" +600 75 negative_sampler """basic""" +600 75 evaluator """rankbased""" +600 76 dataset """kinships""" +600 76 model """rotate""" +600 76 loss """softplus""" +600 76 regularizer """no""" +600 76 optimizer """adam""" +600 76 training_loop """owa""" +600 76 negative_sampler """basic""" +600 76 evaluator """rankbased""" +600 77 dataset """kinships""" +600 77 model """rotate""" +600 77 loss """softplus""" +600 77 regularizer """no""" +600 77 optimizer """adam""" +600 77 training_loop """owa""" +600 77 negative_sampler """basic""" +600 77 evaluator """rankbased""" +600 78 dataset """kinships""" +600 78 model """rotate""" +600 78 loss """softplus""" +600 78 regularizer """no""" +600 78 optimizer """adam""" +600 78 training_loop """owa""" +600 78 negative_sampler """basic""" +600 78 evaluator """rankbased""" +600 79 dataset """kinships""" +600 79 model """rotate""" +600 79 loss """softplus""" +600 79 regularizer """no""" +600 79 optimizer """adam""" +600 79 training_loop """owa""" +600 79 negative_sampler """basic""" +600 79 evaluator """rankbased""" +600 80 dataset """kinships""" +600 80 model """rotate""" +600 80 loss """softplus""" +600 80 regularizer """no""" +600 80 optimizer """adam""" +600 80 training_loop """owa""" +600 80 negative_sampler """basic""" +600 80 evaluator """rankbased""" +600 81 dataset """kinships""" +600 81 model """rotate""" +600 81 loss """softplus""" +600 81 regularizer """no""" +600 81 optimizer """adam""" +600 81 training_loop """owa""" +600 81 negative_sampler """basic""" +600 81 evaluator """rankbased""" +600 82 dataset """kinships""" +600 82 model """rotate""" +600 82 loss """softplus""" +600 82 regularizer """no""" +600 82 optimizer """adam""" +600 82 training_loop """owa""" +600 82 negative_sampler """basic""" +600 82 evaluator """rankbased""" +600 83 dataset """kinships""" +600 83 model """rotate""" +600 83 loss """softplus""" +600 83 regularizer """no""" +600 83 optimizer """adam""" +600 83 training_loop """owa""" +600 83 negative_sampler """basic""" +600 83 evaluator """rankbased""" +600 84 dataset """kinships""" +600 84 model """rotate""" +600 84 loss """softplus""" +600 84 regularizer """no""" +600 84 optimizer """adam""" +600 84 training_loop """owa""" +600 84 negative_sampler """basic""" +600 84 evaluator """rankbased""" +600 85 dataset """kinships""" +600 85 model """rotate""" +600 85 loss """softplus""" +600 85 regularizer """no""" +600 85 optimizer """adam""" +600 85 training_loop """owa""" +600 85 negative_sampler """basic""" +600 85 evaluator """rankbased""" +600 86 dataset """kinships""" +600 86 model """rotate""" +600 86 loss """softplus""" +600 86 regularizer """no""" +600 86 optimizer """adam""" +600 86 training_loop """owa""" +600 86 negative_sampler """basic""" +600 86 evaluator """rankbased""" +600 87 dataset """kinships""" +600 87 model """rotate""" +600 87 loss """softplus""" +600 87 regularizer """no""" +600 87 optimizer """adam""" +600 87 training_loop """owa""" +600 87 negative_sampler """basic""" +600 87 evaluator """rankbased""" +600 88 dataset """kinships""" +600 88 model """rotate""" +600 88 loss """softplus""" +600 88 regularizer """no""" +600 88 optimizer """adam""" +600 88 training_loop """owa""" +600 88 negative_sampler """basic""" +600 88 evaluator """rankbased""" +600 89 dataset """kinships""" +600 89 model """rotate""" +600 89 loss """softplus""" +600 89 regularizer """no""" +600 89 optimizer """adam""" +600 89 training_loop """owa""" +600 89 negative_sampler """basic""" +600 89 evaluator """rankbased""" +600 90 dataset """kinships""" +600 90 model """rotate""" +600 90 loss """softplus""" +600 90 regularizer """no""" +600 90 optimizer """adam""" +600 90 training_loop """owa""" +600 90 negative_sampler """basic""" +600 90 evaluator """rankbased""" +600 91 dataset """kinships""" +600 91 model """rotate""" +600 91 loss """softplus""" +600 91 regularizer """no""" +600 91 optimizer """adam""" +600 91 training_loop """owa""" +600 91 negative_sampler """basic""" +600 91 evaluator """rankbased""" +600 92 dataset """kinships""" +600 92 model """rotate""" +600 92 loss """softplus""" +600 92 regularizer """no""" +600 92 optimizer """adam""" +600 92 training_loop """owa""" +600 92 negative_sampler """basic""" +600 92 evaluator """rankbased""" +600 93 dataset """kinships""" +600 93 model """rotate""" +600 93 loss """softplus""" +600 93 regularizer """no""" +600 93 optimizer """adam""" +600 93 training_loop """owa""" +600 93 negative_sampler """basic""" +600 93 evaluator """rankbased""" +600 94 dataset """kinships""" +600 94 model """rotate""" +600 94 loss """softplus""" +600 94 regularizer """no""" +600 94 optimizer """adam""" +600 94 training_loop """owa""" +600 94 negative_sampler """basic""" +600 94 evaluator """rankbased""" +600 95 dataset """kinships""" +600 95 model """rotate""" +600 95 loss """softplus""" +600 95 regularizer """no""" +600 95 optimizer """adam""" +600 95 training_loop """owa""" +600 95 negative_sampler """basic""" +600 95 evaluator """rankbased""" +600 96 dataset """kinships""" +600 96 model """rotate""" +600 96 loss """softplus""" +600 96 regularizer """no""" +600 96 optimizer """adam""" +600 96 training_loop """owa""" +600 96 negative_sampler """basic""" +600 96 evaluator """rankbased""" +600 97 dataset """kinships""" +600 97 model """rotate""" +600 97 loss """softplus""" +600 97 regularizer """no""" +600 97 optimizer """adam""" +600 97 training_loop """owa""" +600 97 negative_sampler """basic""" +600 97 evaluator """rankbased""" +600 98 dataset """kinships""" +600 98 model """rotate""" +600 98 loss """softplus""" +600 98 regularizer """no""" +600 98 optimizer """adam""" +600 98 training_loop """owa""" +600 98 negative_sampler """basic""" +600 98 evaluator """rankbased""" +600 99 dataset """kinships""" +600 99 model """rotate""" +600 99 loss """softplus""" +600 99 regularizer """no""" +600 99 optimizer """adam""" +600 99 training_loop """owa""" +600 99 negative_sampler """basic""" +600 99 evaluator """rankbased""" +600 100 dataset """kinships""" +600 100 model """rotate""" +600 100 loss """softplus""" +600 100 regularizer """no""" +600 100 optimizer """adam""" +600 100 training_loop """owa""" +600 100 negative_sampler """basic""" +600 100 evaluator """rankbased""" +601 1 model.embedding_dim 0.0 +601 1 optimizer.lr 0.009816662342165503 +601 1 negative_sampler.num_negs_per_pos 15.0 +601 1 training.batch_size 2.0 +601 2 model.embedding_dim 2.0 +601 2 optimizer.lr 0.0011175251635151121 +601 2 negative_sampler.num_negs_per_pos 0.0 +601 2 training.batch_size 1.0 +601 3 model.embedding_dim 2.0 +601 3 optimizer.lr 0.08268854178563419 +601 3 negative_sampler.num_negs_per_pos 19.0 +601 3 training.batch_size 2.0 +601 4 model.embedding_dim 2.0 +601 4 optimizer.lr 0.006096572907427587 +601 4 negative_sampler.num_negs_per_pos 21.0 +601 4 training.batch_size 1.0 +601 5 model.embedding_dim 0.0 +601 5 optimizer.lr 0.09401388341167932 +601 5 negative_sampler.num_negs_per_pos 81.0 +601 5 training.batch_size 0.0 +601 6 model.embedding_dim 1.0 +601 6 optimizer.lr 0.0025482014937553404 +601 6 negative_sampler.num_negs_per_pos 80.0 +601 6 training.batch_size 1.0 +601 7 model.embedding_dim 2.0 +601 7 optimizer.lr 0.006110712640737079 +601 7 negative_sampler.num_negs_per_pos 31.0 +601 7 training.batch_size 0.0 +601 8 model.embedding_dim 0.0 +601 8 optimizer.lr 0.011706325936754866 +601 8 negative_sampler.num_negs_per_pos 3.0 +601 8 training.batch_size 0.0 +601 9 model.embedding_dim 2.0 +601 9 optimizer.lr 0.011944302039020775 +601 9 negative_sampler.num_negs_per_pos 0.0 +601 9 training.batch_size 0.0 +601 10 model.embedding_dim 0.0 +601 10 optimizer.lr 0.005008253952154614 +601 10 negative_sampler.num_negs_per_pos 7.0 +601 10 training.batch_size 0.0 +601 11 model.embedding_dim 1.0 +601 11 optimizer.lr 0.0021076064870216484 +601 11 negative_sampler.num_negs_per_pos 92.0 +601 11 training.batch_size 2.0 +601 12 model.embedding_dim 2.0 +601 12 optimizer.lr 0.09264834057378978 +601 12 negative_sampler.num_negs_per_pos 52.0 +601 12 training.batch_size 1.0 +601 13 model.embedding_dim 2.0 +601 13 optimizer.lr 0.018719375930314967 +601 13 negative_sampler.num_negs_per_pos 75.0 +601 13 training.batch_size 0.0 +601 14 model.embedding_dim 2.0 +601 14 optimizer.lr 0.03284627579296454 +601 14 negative_sampler.num_negs_per_pos 2.0 +601 14 training.batch_size 0.0 +601 15 model.embedding_dim 0.0 +601 15 optimizer.lr 0.01549807989339275 +601 15 negative_sampler.num_negs_per_pos 28.0 +601 15 training.batch_size 1.0 +601 16 model.embedding_dim 0.0 +601 16 optimizer.lr 0.01666374443644733 +601 16 negative_sampler.num_negs_per_pos 94.0 +601 16 training.batch_size 1.0 +601 17 model.embedding_dim 1.0 +601 17 optimizer.lr 0.07179341535126511 +601 17 negative_sampler.num_negs_per_pos 36.0 +601 17 training.batch_size 1.0 +601 18 model.embedding_dim 2.0 +601 18 optimizer.lr 0.0014955858647880721 +601 18 negative_sampler.num_negs_per_pos 37.0 +601 18 training.batch_size 2.0 +601 19 model.embedding_dim 2.0 +601 19 optimizer.lr 0.006380346084170756 +601 19 negative_sampler.num_negs_per_pos 93.0 +601 19 training.batch_size 0.0 +601 20 model.embedding_dim 2.0 +601 20 optimizer.lr 0.025280136762463594 +601 20 negative_sampler.num_negs_per_pos 42.0 +601 20 training.batch_size 2.0 +601 21 model.embedding_dim 2.0 +601 21 optimizer.lr 0.0025354140339523554 +601 21 negative_sampler.num_negs_per_pos 50.0 +601 21 training.batch_size 2.0 +601 22 model.embedding_dim 0.0 +601 22 optimizer.lr 0.010763856859329676 +601 22 negative_sampler.num_negs_per_pos 55.0 +601 22 training.batch_size 0.0 +601 23 model.embedding_dim 0.0 +601 23 optimizer.lr 0.003765513971339845 +601 23 negative_sampler.num_negs_per_pos 84.0 +601 23 training.batch_size 0.0 +601 24 model.embedding_dim 0.0 +601 24 optimizer.lr 0.0027555394574286162 +601 24 negative_sampler.num_negs_per_pos 86.0 +601 24 training.batch_size 1.0 +601 25 model.embedding_dim 2.0 +601 25 optimizer.lr 0.0013237098538101402 +601 25 negative_sampler.num_negs_per_pos 98.0 +601 25 training.batch_size 1.0 +601 26 model.embedding_dim 2.0 +601 26 optimizer.lr 0.001067240224360337 +601 26 negative_sampler.num_negs_per_pos 39.0 +601 26 training.batch_size 2.0 +601 27 model.embedding_dim 2.0 +601 27 optimizer.lr 0.08052376977698293 +601 27 negative_sampler.num_negs_per_pos 46.0 +601 27 training.batch_size 1.0 +601 28 model.embedding_dim 1.0 +601 28 optimizer.lr 0.008589546126141543 +601 28 negative_sampler.num_negs_per_pos 60.0 +601 28 training.batch_size 0.0 +601 29 model.embedding_dim 1.0 +601 29 optimizer.lr 0.0020743475377236658 +601 29 negative_sampler.num_negs_per_pos 62.0 +601 29 training.batch_size 0.0 +601 30 model.embedding_dim 2.0 +601 30 optimizer.lr 0.09210824991109867 +601 30 negative_sampler.num_negs_per_pos 12.0 +601 30 training.batch_size 2.0 +601 31 model.embedding_dim 2.0 +601 31 optimizer.lr 0.005779588955999666 +601 31 negative_sampler.num_negs_per_pos 93.0 +601 31 training.batch_size 0.0 +601 32 model.embedding_dim 1.0 +601 32 optimizer.lr 0.022923426809770064 +601 32 negative_sampler.num_negs_per_pos 18.0 +601 32 training.batch_size 2.0 +601 33 model.embedding_dim 1.0 +601 33 optimizer.lr 0.045055827065887684 +601 33 negative_sampler.num_negs_per_pos 26.0 +601 33 training.batch_size 0.0 +601 34 model.embedding_dim 0.0 +601 34 optimizer.lr 0.00820521330591299 +601 34 negative_sampler.num_negs_per_pos 57.0 +601 34 training.batch_size 0.0 +601 35 model.embedding_dim 0.0 +601 35 optimizer.lr 0.0033024330353850356 +601 35 negative_sampler.num_negs_per_pos 30.0 +601 35 training.batch_size 2.0 +601 36 model.embedding_dim 1.0 +601 36 optimizer.lr 0.0037418834935442685 +601 36 negative_sampler.num_negs_per_pos 21.0 +601 36 training.batch_size 2.0 +601 37 model.embedding_dim 2.0 +601 37 optimizer.lr 0.003908277915382899 +601 37 negative_sampler.num_negs_per_pos 40.0 +601 37 training.batch_size 2.0 +601 38 model.embedding_dim 1.0 +601 38 optimizer.lr 0.0014350711336548116 +601 38 negative_sampler.num_negs_per_pos 39.0 +601 38 training.batch_size 0.0 +601 39 model.embedding_dim 2.0 +601 39 optimizer.lr 0.0019436926058551247 +601 39 negative_sampler.num_negs_per_pos 93.0 +601 39 training.batch_size 2.0 +601 40 model.embedding_dim 2.0 +601 40 optimizer.lr 0.0013701075871817843 +601 40 negative_sampler.num_negs_per_pos 31.0 +601 40 training.batch_size 0.0 +601 41 model.embedding_dim 2.0 +601 41 optimizer.lr 0.0156636759629304 +601 41 negative_sampler.num_negs_per_pos 99.0 +601 41 training.batch_size 1.0 +601 42 model.embedding_dim 1.0 +601 42 optimizer.lr 0.08460634338896238 +601 42 negative_sampler.num_negs_per_pos 3.0 +601 42 training.batch_size 1.0 +601 43 model.embedding_dim 1.0 +601 43 optimizer.lr 0.013564960219657859 +601 43 negative_sampler.num_negs_per_pos 81.0 +601 43 training.batch_size 0.0 +601 44 model.embedding_dim 0.0 +601 44 optimizer.lr 0.015097793219218603 +601 44 negative_sampler.num_negs_per_pos 70.0 +601 44 training.batch_size 0.0 +601 45 model.embedding_dim 1.0 +601 45 optimizer.lr 0.028561435673873318 +601 45 negative_sampler.num_negs_per_pos 39.0 +601 45 training.batch_size 2.0 +601 46 model.embedding_dim 0.0 +601 46 optimizer.lr 0.007923760678260765 +601 46 negative_sampler.num_negs_per_pos 77.0 +601 46 training.batch_size 1.0 +601 47 model.embedding_dim 0.0 +601 47 optimizer.lr 0.010000181011300742 +601 47 negative_sampler.num_negs_per_pos 2.0 +601 47 training.batch_size 2.0 +601 48 model.embedding_dim 2.0 +601 48 optimizer.lr 0.0023460075874250546 +601 48 negative_sampler.num_negs_per_pos 77.0 +601 48 training.batch_size 0.0 +601 49 model.embedding_dim 0.0 +601 49 optimizer.lr 0.009131408516059224 +601 49 negative_sampler.num_negs_per_pos 72.0 +601 49 training.batch_size 1.0 +601 50 model.embedding_dim 0.0 +601 50 optimizer.lr 0.05648995422194497 +601 50 negative_sampler.num_negs_per_pos 69.0 +601 50 training.batch_size 1.0 +601 51 model.embedding_dim 0.0 +601 51 optimizer.lr 0.01723780570443887 +601 51 negative_sampler.num_negs_per_pos 72.0 +601 51 training.batch_size 2.0 +601 52 model.embedding_dim 2.0 +601 52 optimizer.lr 0.01199233623810282 +601 52 negative_sampler.num_negs_per_pos 82.0 +601 52 training.batch_size 0.0 +601 53 model.embedding_dim 1.0 +601 53 optimizer.lr 0.01905830166958447 +601 53 negative_sampler.num_negs_per_pos 70.0 +601 53 training.batch_size 1.0 +601 54 model.embedding_dim 0.0 +601 54 optimizer.lr 0.0012628841468814108 +601 54 negative_sampler.num_negs_per_pos 74.0 +601 54 training.batch_size 0.0 +601 55 model.embedding_dim 2.0 +601 55 optimizer.lr 0.0020846593716260155 +601 55 negative_sampler.num_negs_per_pos 17.0 +601 55 training.batch_size 2.0 +601 56 model.embedding_dim 0.0 +601 56 optimizer.lr 0.0192469704445103 +601 56 negative_sampler.num_negs_per_pos 99.0 +601 56 training.batch_size 1.0 +601 57 model.embedding_dim 2.0 +601 57 optimizer.lr 0.02346023757085456 +601 57 negative_sampler.num_negs_per_pos 58.0 +601 57 training.batch_size 1.0 +601 58 model.embedding_dim 0.0 +601 58 optimizer.lr 0.001905187112852255 +601 58 negative_sampler.num_negs_per_pos 23.0 +601 58 training.batch_size 1.0 +601 59 model.embedding_dim 2.0 +601 59 optimizer.lr 0.005707590134761922 +601 59 negative_sampler.num_negs_per_pos 25.0 +601 59 training.batch_size 0.0 +601 60 model.embedding_dim 2.0 +601 60 optimizer.lr 0.0028930799418833615 +601 60 negative_sampler.num_negs_per_pos 28.0 +601 60 training.batch_size 1.0 +601 61 model.embedding_dim 0.0 +601 61 optimizer.lr 0.01303973393641091 +601 61 negative_sampler.num_negs_per_pos 13.0 +601 61 training.batch_size 2.0 +601 62 model.embedding_dim 1.0 +601 62 optimizer.lr 0.045791334685281286 +601 62 negative_sampler.num_negs_per_pos 17.0 +601 62 training.batch_size 2.0 +601 63 model.embedding_dim 0.0 +601 63 optimizer.lr 0.005261529572655501 +601 63 negative_sampler.num_negs_per_pos 30.0 +601 63 training.batch_size 2.0 +601 64 model.embedding_dim 2.0 +601 64 optimizer.lr 0.025956437276348915 +601 64 negative_sampler.num_negs_per_pos 63.0 +601 64 training.batch_size 1.0 +601 65 model.embedding_dim 0.0 +601 65 optimizer.lr 0.0033516799574509364 +601 65 negative_sampler.num_negs_per_pos 85.0 +601 65 training.batch_size 2.0 +601 66 model.embedding_dim 1.0 +601 66 optimizer.lr 0.056568349176923896 +601 66 negative_sampler.num_negs_per_pos 16.0 +601 66 training.batch_size 1.0 +601 67 model.embedding_dim 1.0 +601 67 optimizer.lr 0.04431678932040203 +601 67 negative_sampler.num_negs_per_pos 89.0 +601 67 training.batch_size 1.0 +601 68 model.embedding_dim 2.0 +601 68 optimizer.lr 0.002892685588725064 +601 68 negative_sampler.num_negs_per_pos 11.0 +601 68 training.batch_size 1.0 +601 69 model.embedding_dim 2.0 +601 69 optimizer.lr 0.003794359189916189 +601 69 negative_sampler.num_negs_per_pos 7.0 +601 69 training.batch_size 0.0 +601 70 model.embedding_dim 2.0 +601 70 optimizer.lr 0.07471176094779172 +601 70 negative_sampler.num_negs_per_pos 59.0 +601 70 training.batch_size 1.0 +601 71 model.embedding_dim 2.0 +601 71 optimizer.lr 0.03153145792739437 +601 71 negative_sampler.num_negs_per_pos 27.0 +601 71 training.batch_size 2.0 +601 72 model.embedding_dim 2.0 +601 72 optimizer.lr 0.0014724006759235181 +601 72 negative_sampler.num_negs_per_pos 83.0 +601 72 training.batch_size 1.0 +601 73 model.embedding_dim 2.0 +601 73 optimizer.lr 0.002365273975079644 +601 73 negative_sampler.num_negs_per_pos 1.0 +601 73 training.batch_size 0.0 +601 74 model.embedding_dim 1.0 +601 74 optimizer.lr 0.0010809207633320078 +601 74 negative_sampler.num_negs_per_pos 82.0 +601 74 training.batch_size 1.0 +601 75 model.embedding_dim 0.0 +601 75 optimizer.lr 0.011860819704015313 +601 75 negative_sampler.num_negs_per_pos 11.0 +601 75 training.batch_size 1.0 +601 76 model.embedding_dim 0.0 +601 76 optimizer.lr 0.04330616047672967 +601 76 negative_sampler.num_negs_per_pos 84.0 +601 76 training.batch_size 2.0 +601 77 model.embedding_dim 1.0 +601 77 optimizer.lr 0.048389435193109086 +601 77 negative_sampler.num_negs_per_pos 18.0 +601 77 training.batch_size 2.0 +601 78 model.embedding_dim 2.0 +601 78 optimizer.lr 0.04256097843666183 +601 78 negative_sampler.num_negs_per_pos 93.0 +601 78 training.batch_size 0.0 +601 79 model.embedding_dim 0.0 +601 79 optimizer.lr 0.003924392470787395 +601 79 negative_sampler.num_negs_per_pos 85.0 +601 79 training.batch_size 2.0 +601 80 model.embedding_dim 2.0 +601 80 optimizer.lr 0.09434522666684104 +601 80 negative_sampler.num_negs_per_pos 71.0 +601 80 training.batch_size 0.0 +601 81 model.embedding_dim 0.0 +601 81 optimizer.lr 0.0012491511760101127 +601 81 negative_sampler.num_negs_per_pos 71.0 +601 81 training.batch_size 2.0 +601 82 model.embedding_dim 1.0 +601 82 optimizer.lr 0.0019550023771137395 +601 82 negative_sampler.num_negs_per_pos 60.0 +601 82 training.batch_size 1.0 +601 83 model.embedding_dim 2.0 +601 83 optimizer.lr 0.013737874808277876 +601 83 negative_sampler.num_negs_per_pos 44.0 +601 83 training.batch_size 1.0 +601 84 model.embedding_dim 0.0 +601 84 optimizer.lr 0.01297817267767028 +601 84 negative_sampler.num_negs_per_pos 89.0 +601 84 training.batch_size 2.0 +601 85 model.embedding_dim 0.0 +601 85 optimizer.lr 0.03370124958919001 +601 85 negative_sampler.num_negs_per_pos 34.0 +601 85 training.batch_size 2.0 +601 86 model.embedding_dim 2.0 +601 86 optimizer.lr 0.011528949924423682 +601 86 negative_sampler.num_negs_per_pos 59.0 +601 86 training.batch_size 0.0 +601 87 model.embedding_dim 0.0 +601 87 optimizer.lr 0.0031194246592788142 +601 87 negative_sampler.num_negs_per_pos 69.0 +601 87 training.batch_size 1.0 +601 88 model.embedding_dim 2.0 +601 88 optimizer.lr 0.02159083182814742 +601 88 negative_sampler.num_negs_per_pos 95.0 +601 88 training.batch_size 1.0 +601 89 model.embedding_dim 1.0 +601 89 optimizer.lr 0.0021706263326941523 +601 89 negative_sampler.num_negs_per_pos 13.0 +601 89 training.batch_size 1.0 +601 90 model.embedding_dim 0.0 +601 90 optimizer.lr 0.0024489864058502425 +601 90 negative_sampler.num_negs_per_pos 63.0 +601 90 training.batch_size 2.0 +601 91 model.embedding_dim 0.0 +601 91 optimizer.lr 0.001627242888811675 +601 91 negative_sampler.num_negs_per_pos 43.0 +601 91 training.batch_size 2.0 +601 92 model.embedding_dim 2.0 +601 92 optimizer.lr 0.08777956792333441 +601 92 negative_sampler.num_negs_per_pos 7.0 +601 92 training.batch_size 1.0 +601 93 model.embedding_dim 0.0 +601 93 optimizer.lr 0.08719795600243803 +601 93 negative_sampler.num_negs_per_pos 52.0 +601 93 training.batch_size 2.0 +601 94 model.embedding_dim 2.0 +601 94 optimizer.lr 0.028912283697255983 +601 94 negative_sampler.num_negs_per_pos 79.0 +601 94 training.batch_size 1.0 +601 95 model.embedding_dim 2.0 +601 95 optimizer.lr 0.00699323942755091 +601 95 negative_sampler.num_negs_per_pos 5.0 +601 95 training.batch_size 1.0 +601 96 model.embedding_dim 2.0 +601 96 optimizer.lr 0.004549892134816864 +601 96 negative_sampler.num_negs_per_pos 97.0 +601 96 training.batch_size 0.0 +601 97 model.embedding_dim 0.0 +601 97 optimizer.lr 0.028940549570849936 +601 97 negative_sampler.num_negs_per_pos 94.0 +601 97 training.batch_size 1.0 +601 98 model.embedding_dim 2.0 +601 98 optimizer.lr 0.06258740378256418 +601 98 negative_sampler.num_negs_per_pos 22.0 +601 98 training.batch_size 2.0 +601 99 model.embedding_dim 0.0 +601 99 optimizer.lr 0.0013938192360709045 +601 99 negative_sampler.num_negs_per_pos 48.0 +601 99 training.batch_size 0.0 +601 100 model.embedding_dim 2.0 +601 100 optimizer.lr 0.005082304245952691 +601 100 negative_sampler.num_negs_per_pos 34.0 +601 100 training.batch_size 1.0 +601 1 dataset """kinships""" +601 1 model """rotate""" +601 1 loss """bceaftersigmoid""" +601 1 regularizer """no""" +601 1 optimizer """adam""" +601 1 training_loop """owa""" +601 1 negative_sampler """basic""" +601 1 evaluator """rankbased""" +601 2 dataset """kinships""" +601 2 model """rotate""" +601 2 loss """bceaftersigmoid""" +601 2 regularizer """no""" +601 2 optimizer """adam""" +601 2 training_loop """owa""" +601 2 negative_sampler """basic""" +601 2 evaluator """rankbased""" +601 3 dataset """kinships""" +601 3 model """rotate""" +601 3 loss """bceaftersigmoid""" +601 3 regularizer """no""" +601 3 optimizer """adam""" +601 3 training_loop """owa""" +601 3 negative_sampler """basic""" +601 3 evaluator """rankbased""" +601 4 dataset """kinships""" +601 4 model """rotate""" +601 4 loss """bceaftersigmoid""" +601 4 regularizer """no""" +601 4 optimizer """adam""" +601 4 training_loop """owa""" +601 4 negative_sampler """basic""" +601 4 evaluator """rankbased""" +601 5 dataset """kinships""" +601 5 model """rotate""" +601 5 loss """bceaftersigmoid""" +601 5 regularizer """no""" +601 5 optimizer """adam""" +601 5 training_loop """owa""" +601 5 negative_sampler """basic""" +601 5 evaluator """rankbased""" +601 6 dataset """kinships""" +601 6 model """rotate""" +601 6 loss """bceaftersigmoid""" +601 6 regularizer """no""" +601 6 optimizer """adam""" +601 6 training_loop """owa""" +601 6 negative_sampler """basic""" +601 6 evaluator """rankbased""" +601 7 dataset """kinships""" +601 7 model """rotate""" +601 7 loss """bceaftersigmoid""" +601 7 regularizer """no""" +601 7 optimizer """adam""" +601 7 training_loop """owa""" +601 7 negative_sampler """basic""" +601 7 evaluator """rankbased""" +601 8 dataset """kinships""" +601 8 model """rotate""" +601 8 loss """bceaftersigmoid""" +601 8 regularizer """no""" +601 8 optimizer """adam""" +601 8 training_loop """owa""" +601 8 negative_sampler """basic""" +601 8 evaluator """rankbased""" +601 9 dataset """kinships""" +601 9 model """rotate""" +601 9 loss """bceaftersigmoid""" +601 9 regularizer """no""" +601 9 optimizer """adam""" +601 9 training_loop """owa""" +601 9 negative_sampler """basic""" +601 9 evaluator """rankbased""" +601 10 dataset """kinships""" +601 10 model """rotate""" +601 10 loss """bceaftersigmoid""" +601 10 regularizer """no""" +601 10 optimizer """adam""" +601 10 training_loop """owa""" +601 10 negative_sampler """basic""" +601 10 evaluator """rankbased""" +601 11 dataset """kinships""" +601 11 model """rotate""" +601 11 loss """bceaftersigmoid""" +601 11 regularizer """no""" +601 11 optimizer """adam""" +601 11 training_loop """owa""" +601 11 negative_sampler """basic""" +601 11 evaluator """rankbased""" +601 12 dataset """kinships""" +601 12 model """rotate""" +601 12 loss """bceaftersigmoid""" +601 12 regularizer """no""" +601 12 optimizer """adam""" +601 12 training_loop """owa""" +601 12 negative_sampler """basic""" +601 12 evaluator """rankbased""" +601 13 dataset """kinships""" +601 13 model """rotate""" +601 13 loss """bceaftersigmoid""" +601 13 regularizer """no""" +601 13 optimizer """adam""" +601 13 training_loop """owa""" +601 13 negative_sampler """basic""" +601 13 evaluator """rankbased""" +601 14 dataset """kinships""" +601 14 model """rotate""" +601 14 loss """bceaftersigmoid""" +601 14 regularizer """no""" +601 14 optimizer """adam""" +601 14 training_loop """owa""" +601 14 negative_sampler """basic""" +601 14 evaluator """rankbased""" +601 15 dataset """kinships""" +601 15 model """rotate""" +601 15 loss """bceaftersigmoid""" +601 15 regularizer """no""" +601 15 optimizer """adam""" +601 15 training_loop """owa""" +601 15 negative_sampler """basic""" +601 15 evaluator """rankbased""" +601 16 dataset """kinships""" +601 16 model """rotate""" +601 16 loss """bceaftersigmoid""" +601 16 regularizer """no""" +601 16 optimizer """adam""" +601 16 training_loop """owa""" +601 16 negative_sampler """basic""" +601 16 evaluator """rankbased""" +601 17 dataset """kinships""" +601 17 model """rotate""" +601 17 loss """bceaftersigmoid""" +601 17 regularizer """no""" +601 17 optimizer """adam""" +601 17 training_loop """owa""" +601 17 negative_sampler """basic""" +601 17 evaluator """rankbased""" +601 18 dataset """kinships""" +601 18 model """rotate""" +601 18 loss """bceaftersigmoid""" +601 18 regularizer """no""" +601 18 optimizer """adam""" +601 18 training_loop """owa""" +601 18 negative_sampler """basic""" +601 18 evaluator """rankbased""" +601 19 dataset """kinships""" +601 19 model """rotate""" +601 19 loss """bceaftersigmoid""" +601 19 regularizer """no""" +601 19 optimizer """adam""" +601 19 training_loop """owa""" +601 19 negative_sampler """basic""" +601 19 evaluator """rankbased""" +601 20 dataset """kinships""" +601 20 model """rotate""" +601 20 loss """bceaftersigmoid""" +601 20 regularizer """no""" +601 20 optimizer """adam""" +601 20 training_loop """owa""" +601 20 negative_sampler """basic""" +601 20 evaluator """rankbased""" +601 21 dataset """kinships""" +601 21 model """rotate""" +601 21 loss """bceaftersigmoid""" +601 21 regularizer """no""" +601 21 optimizer """adam""" +601 21 training_loop """owa""" +601 21 negative_sampler """basic""" +601 21 evaluator """rankbased""" +601 22 dataset """kinships""" +601 22 model """rotate""" +601 22 loss """bceaftersigmoid""" +601 22 regularizer """no""" +601 22 optimizer """adam""" +601 22 training_loop """owa""" +601 22 negative_sampler """basic""" +601 22 evaluator """rankbased""" +601 23 dataset """kinships""" +601 23 model """rotate""" +601 23 loss """bceaftersigmoid""" +601 23 regularizer """no""" +601 23 optimizer """adam""" +601 23 training_loop """owa""" +601 23 negative_sampler """basic""" +601 23 evaluator """rankbased""" +601 24 dataset """kinships""" +601 24 model """rotate""" +601 24 loss """bceaftersigmoid""" +601 24 regularizer """no""" +601 24 optimizer """adam""" +601 24 training_loop """owa""" +601 24 negative_sampler """basic""" +601 24 evaluator """rankbased""" +601 25 dataset """kinships""" +601 25 model """rotate""" +601 25 loss """bceaftersigmoid""" +601 25 regularizer """no""" +601 25 optimizer """adam""" +601 25 training_loop """owa""" +601 25 negative_sampler """basic""" +601 25 evaluator """rankbased""" +601 26 dataset """kinships""" +601 26 model """rotate""" +601 26 loss """bceaftersigmoid""" +601 26 regularizer """no""" +601 26 optimizer """adam""" +601 26 training_loop """owa""" +601 26 negative_sampler """basic""" +601 26 evaluator """rankbased""" +601 27 dataset """kinships""" +601 27 model """rotate""" +601 27 loss """bceaftersigmoid""" +601 27 regularizer """no""" +601 27 optimizer """adam""" +601 27 training_loop """owa""" +601 27 negative_sampler """basic""" +601 27 evaluator """rankbased""" +601 28 dataset """kinships""" +601 28 model """rotate""" +601 28 loss """bceaftersigmoid""" +601 28 regularizer """no""" +601 28 optimizer """adam""" +601 28 training_loop """owa""" +601 28 negative_sampler """basic""" +601 28 evaluator """rankbased""" +601 29 dataset """kinships""" +601 29 model """rotate""" +601 29 loss """bceaftersigmoid""" +601 29 regularizer """no""" +601 29 optimizer """adam""" +601 29 training_loop """owa""" +601 29 negative_sampler """basic""" +601 29 evaluator """rankbased""" +601 30 dataset """kinships""" +601 30 model """rotate""" +601 30 loss """bceaftersigmoid""" +601 30 regularizer """no""" +601 30 optimizer """adam""" +601 30 training_loop """owa""" +601 30 negative_sampler """basic""" +601 30 evaluator """rankbased""" +601 31 dataset """kinships""" +601 31 model """rotate""" +601 31 loss """bceaftersigmoid""" +601 31 regularizer """no""" +601 31 optimizer """adam""" +601 31 training_loop """owa""" +601 31 negative_sampler """basic""" +601 31 evaluator """rankbased""" +601 32 dataset """kinships""" +601 32 model """rotate""" +601 32 loss """bceaftersigmoid""" +601 32 regularizer """no""" +601 32 optimizer """adam""" +601 32 training_loop """owa""" +601 32 negative_sampler """basic""" +601 32 evaluator """rankbased""" +601 33 dataset """kinships""" +601 33 model """rotate""" +601 33 loss """bceaftersigmoid""" +601 33 regularizer """no""" +601 33 optimizer """adam""" +601 33 training_loop """owa""" +601 33 negative_sampler """basic""" +601 33 evaluator """rankbased""" +601 34 dataset """kinships""" +601 34 model """rotate""" +601 34 loss """bceaftersigmoid""" +601 34 regularizer """no""" +601 34 optimizer """adam""" +601 34 training_loop """owa""" +601 34 negative_sampler """basic""" +601 34 evaluator """rankbased""" +601 35 dataset """kinships""" +601 35 model """rotate""" +601 35 loss """bceaftersigmoid""" +601 35 regularizer """no""" +601 35 optimizer """adam""" +601 35 training_loop """owa""" +601 35 negative_sampler """basic""" +601 35 evaluator """rankbased""" +601 36 dataset """kinships""" +601 36 model """rotate""" +601 36 loss """bceaftersigmoid""" +601 36 regularizer """no""" +601 36 optimizer """adam""" +601 36 training_loop """owa""" +601 36 negative_sampler """basic""" +601 36 evaluator """rankbased""" +601 37 dataset """kinships""" +601 37 model """rotate""" +601 37 loss """bceaftersigmoid""" +601 37 regularizer """no""" +601 37 optimizer """adam""" +601 37 training_loop """owa""" +601 37 negative_sampler """basic""" +601 37 evaluator """rankbased""" +601 38 dataset """kinships""" +601 38 model """rotate""" +601 38 loss """bceaftersigmoid""" +601 38 regularizer """no""" +601 38 optimizer """adam""" +601 38 training_loop """owa""" +601 38 negative_sampler """basic""" +601 38 evaluator """rankbased""" +601 39 dataset """kinships""" +601 39 model """rotate""" +601 39 loss """bceaftersigmoid""" +601 39 regularizer """no""" +601 39 optimizer """adam""" +601 39 training_loop """owa""" +601 39 negative_sampler """basic""" +601 39 evaluator """rankbased""" +601 40 dataset """kinships""" +601 40 model """rotate""" +601 40 loss """bceaftersigmoid""" +601 40 regularizer """no""" +601 40 optimizer """adam""" +601 40 training_loop """owa""" +601 40 negative_sampler """basic""" +601 40 evaluator """rankbased""" +601 41 dataset """kinships""" +601 41 model """rotate""" +601 41 loss """bceaftersigmoid""" +601 41 regularizer """no""" +601 41 optimizer """adam""" +601 41 training_loop """owa""" +601 41 negative_sampler """basic""" +601 41 evaluator """rankbased""" +601 42 dataset """kinships""" +601 42 model """rotate""" +601 42 loss """bceaftersigmoid""" +601 42 regularizer """no""" +601 42 optimizer """adam""" +601 42 training_loop """owa""" +601 42 negative_sampler """basic""" +601 42 evaluator """rankbased""" +601 43 dataset """kinships""" +601 43 model """rotate""" +601 43 loss """bceaftersigmoid""" +601 43 regularizer """no""" +601 43 optimizer """adam""" +601 43 training_loop """owa""" +601 43 negative_sampler """basic""" +601 43 evaluator """rankbased""" +601 44 dataset """kinships""" +601 44 model """rotate""" +601 44 loss """bceaftersigmoid""" +601 44 regularizer """no""" +601 44 optimizer """adam""" +601 44 training_loop """owa""" +601 44 negative_sampler """basic""" +601 44 evaluator """rankbased""" +601 45 dataset """kinships""" +601 45 model """rotate""" +601 45 loss """bceaftersigmoid""" +601 45 regularizer """no""" +601 45 optimizer """adam""" +601 45 training_loop """owa""" +601 45 negative_sampler """basic""" +601 45 evaluator """rankbased""" +601 46 dataset """kinships""" +601 46 model """rotate""" +601 46 loss """bceaftersigmoid""" +601 46 regularizer """no""" +601 46 optimizer """adam""" +601 46 training_loop """owa""" +601 46 negative_sampler """basic""" +601 46 evaluator """rankbased""" +601 47 dataset """kinships""" +601 47 model """rotate""" +601 47 loss """bceaftersigmoid""" +601 47 regularizer """no""" +601 47 optimizer """adam""" +601 47 training_loop """owa""" +601 47 negative_sampler """basic""" +601 47 evaluator """rankbased""" +601 48 dataset """kinships""" +601 48 model """rotate""" +601 48 loss """bceaftersigmoid""" +601 48 regularizer """no""" +601 48 optimizer """adam""" +601 48 training_loop """owa""" +601 48 negative_sampler """basic""" +601 48 evaluator """rankbased""" +601 49 dataset """kinships""" +601 49 model """rotate""" +601 49 loss """bceaftersigmoid""" +601 49 regularizer """no""" +601 49 optimizer """adam""" +601 49 training_loop """owa""" +601 49 negative_sampler """basic""" +601 49 evaluator """rankbased""" +601 50 dataset """kinships""" +601 50 model """rotate""" +601 50 loss """bceaftersigmoid""" +601 50 regularizer """no""" +601 50 optimizer """adam""" +601 50 training_loop """owa""" +601 50 negative_sampler """basic""" +601 50 evaluator """rankbased""" +601 51 dataset """kinships""" +601 51 model """rotate""" +601 51 loss """bceaftersigmoid""" +601 51 regularizer """no""" +601 51 optimizer """adam""" +601 51 training_loop """owa""" +601 51 negative_sampler """basic""" +601 51 evaluator """rankbased""" +601 52 dataset """kinships""" +601 52 model """rotate""" +601 52 loss """bceaftersigmoid""" +601 52 regularizer """no""" +601 52 optimizer """adam""" +601 52 training_loop """owa""" +601 52 negative_sampler """basic""" +601 52 evaluator """rankbased""" +601 53 dataset """kinships""" +601 53 model """rotate""" +601 53 loss """bceaftersigmoid""" +601 53 regularizer """no""" +601 53 optimizer """adam""" +601 53 training_loop """owa""" +601 53 negative_sampler """basic""" +601 53 evaluator """rankbased""" +601 54 dataset """kinships""" +601 54 model """rotate""" +601 54 loss """bceaftersigmoid""" +601 54 regularizer """no""" +601 54 optimizer """adam""" +601 54 training_loop """owa""" +601 54 negative_sampler """basic""" +601 54 evaluator """rankbased""" +601 55 dataset """kinships""" +601 55 model """rotate""" +601 55 loss """bceaftersigmoid""" +601 55 regularizer """no""" +601 55 optimizer """adam""" +601 55 training_loop """owa""" +601 55 negative_sampler """basic""" +601 55 evaluator """rankbased""" +601 56 dataset """kinships""" +601 56 model """rotate""" +601 56 loss """bceaftersigmoid""" +601 56 regularizer """no""" +601 56 optimizer """adam""" +601 56 training_loop """owa""" +601 56 negative_sampler """basic""" +601 56 evaluator """rankbased""" +601 57 dataset """kinships""" +601 57 model """rotate""" +601 57 loss """bceaftersigmoid""" +601 57 regularizer """no""" +601 57 optimizer """adam""" +601 57 training_loop """owa""" +601 57 negative_sampler """basic""" +601 57 evaluator """rankbased""" +601 58 dataset """kinships""" +601 58 model """rotate""" +601 58 loss """bceaftersigmoid""" +601 58 regularizer """no""" +601 58 optimizer """adam""" +601 58 training_loop """owa""" +601 58 negative_sampler """basic""" +601 58 evaluator """rankbased""" +601 59 dataset """kinships""" +601 59 model """rotate""" +601 59 loss """bceaftersigmoid""" +601 59 regularizer """no""" +601 59 optimizer """adam""" +601 59 training_loop """owa""" +601 59 negative_sampler """basic""" +601 59 evaluator """rankbased""" +601 60 dataset """kinships""" +601 60 model """rotate""" +601 60 loss """bceaftersigmoid""" +601 60 regularizer """no""" +601 60 optimizer """adam""" +601 60 training_loop """owa""" +601 60 negative_sampler """basic""" +601 60 evaluator """rankbased""" +601 61 dataset """kinships""" +601 61 model """rotate""" +601 61 loss """bceaftersigmoid""" +601 61 regularizer """no""" +601 61 optimizer """adam""" +601 61 training_loop """owa""" +601 61 negative_sampler """basic""" +601 61 evaluator """rankbased""" +601 62 dataset """kinships""" +601 62 model """rotate""" +601 62 loss """bceaftersigmoid""" +601 62 regularizer """no""" +601 62 optimizer """adam""" +601 62 training_loop """owa""" +601 62 negative_sampler """basic""" +601 62 evaluator """rankbased""" +601 63 dataset """kinships""" +601 63 model """rotate""" +601 63 loss """bceaftersigmoid""" +601 63 regularizer """no""" +601 63 optimizer """adam""" +601 63 training_loop """owa""" +601 63 negative_sampler """basic""" +601 63 evaluator """rankbased""" +601 64 dataset """kinships""" +601 64 model """rotate""" +601 64 loss """bceaftersigmoid""" +601 64 regularizer """no""" +601 64 optimizer """adam""" +601 64 training_loop """owa""" +601 64 negative_sampler """basic""" +601 64 evaluator """rankbased""" +601 65 dataset """kinships""" +601 65 model """rotate""" +601 65 loss """bceaftersigmoid""" +601 65 regularizer """no""" +601 65 optimizer """adam""" +601 65 training_loop """owa""" +601 65 negative_sampler """basic""" +601 65 evaluator """rankbased""" +601 66 dataset """kinships""" +601 66 model """rotate""" +601 66 loss """bceaftersigmoid""" +601 66 regularizer """no""" +601 66 optimizer """adam""" +601 66 training_loop """owa""" +601 66 negative_sampler """basic""" +601 66 evaluator """rankbased""" +601 67 dataset """kinships""" +601 67 model """rotate""" +601 67 loss """bceaftersigmoid""" +601 67 regularizer """no""" +601 67 optimizer """adam""" +601 67 training_loop """owa""" +601 67 negative_sampler """basic""" +601 67 evaluator """rankbased""" +601 68 dataset """kinships""" +601 68 model """rotate""" +601 68 loss """bceaftersigmoid""" +601 68 regularizer """no""" +601 68 optimizer """adam""" +601 68 training_loop """owa""" +601 68 negative_sampler """basic""" +601 68 evaluator """rankbased""" +601 69 dataset """kinships""" +601 69 model """rotate""" +601 69 loss """bceaftersigmoid""" +601 69 regularizer """no""" +601 69 optimizer """adam""" +601 69 training_loop """owa""" +601 69 negative_sampler """basic""" +601 69 evaluator """rankbased""" +601 70 dataset """kinships""" +601 70 model """rotate""" +601 70 loss """bceaftersigmoid""" +601 70 regularizer """no""" +601 70 optimizer """adam""" +601 70 training_loop """owa""" +601 70 negative_sampler """basic""" +601 70 evaluator """rankbased""" +601 71 dataset """kinships""" +601 71 model """rotate""" +601 71 loss """bceaftersigmoid""" +601 71 regularizer """no""" +601 71 optimizer """adam""" +601 71 training_loop """owa""" +601 71 negative_sampler """basic""" +601 71 evaluator """rankbased""" +601 72 dataset """kinships""" +601 72 model """rotate""" +601 72 loss """bceaftersigmoid""" +601 72 regularizer """no""" +601 72 optimizer """adam""" +601 72 training_loop """owa""" +601 72 negative_sampler """basic""" +601 72 evaluator """rankbased""" +601 73 dataset """kinships""" +601 73 model """rotate""" +601 73 loss """bceaftersigmoid""" +601 73 regularizer """no""" +601 73 optimizer """adam""" +601 73 training_loop """owa""" +601 73 negative_sampler """basic""" +601 73 evaluator """rankbased""" +601 74 dataset """kinships""" +601 74 model """rotate""" +601 74 loss """bceaftersigmoid""" +601 74 regularizer """no""" +601 74 optimizer """adam""" +601 74 training_loop """owa""" +601 74 negative_sampler """basic""" +601 74 evaluator """rankbased""" +601 75 dataset """kinships""" +601 75 model """rotate""" +601 75 loss """bceaftersigmoid""" +601 75 regularizer """no""" +601 75 optimizer """adam""" +601 75 training_loop """owa""" +601 75 negative_sampler """basic""" +601 75 evaluator """rankbased""" +601 76 dataset """kinships""" +601 76 model """rotate""" +601 76 loss """bceaftersigmoid""" +601 76 regularizer """no""" +601 76 optimizer """adam""" +601 76 training_loop """owa""" +601 76 negative_sampler """basic""" +601 76 evaluator """rankbased""" +601 77 dataset """kinships""" +601 77 model """rotate""" +601 77 loss """bceaftersigmoid""" +601 77 regularizer """no""" +601 77 optimizer """adam""" +601 77 training_loop """owa""" +601 77 negative_sampler """basic""" +601 77 evaluator """rankbased""" +601 78 dataset """kinships""" +601 78 model """rotate""" +601 78 loss """bceaftersigmoid""" +601 78 regularizer """no""" +601 78 optimizer """adam""" +601 78 training_loop """owa""" +601 78 negative_sampler """basic""" +601 78 evaluator """rankbased""" +601 79 dataset """kinships""" +601 79 model """rotate""" +601 79 loss """bceaftersigmoid""" +601 79 regularizer """no""" +601 79 optimizer """adam""" +601 79 training_loop """owa""" +601 79 negative_sampler """basic""" +601 79 evaluator """rankbased""" +601 80 dataset """kinships""" +601 80 model """rotate""" +601 80 loss """bceaftersigmoid""" +601 80 regularizer """no""" +601 80 optimizer """adam""" +601 80 training_loop """owa""" +601 80 negative_sampler """basic""" +601 80 evaluator """rankbased""" +601 81 dataset """kinships""" +601 81 model """rotate""" +601 81 loss """bceaftersigmoid""" +601 81 regularizer """no""" +601 81 optimizer """adam""" +601 81 training_loop """owa""" +601 81 negative_sampler """basic""" +601 81 evaluator """rankbased""" +601 82 dataset """kinships""" +601 82 model """rotate""" +601 82 loss """bceaftersigmoid""" +601 82 regularizer """no""" +601 82 optimizer """adam""" +601 82 training_loop """owa""" +601 82 negative_sampler """basic""" +601 82 evaluator """rankbased""" +601 83 dataset """kinships""" +601 83 model """rotate""" +601 83 loss """bceaftersigmoid""" +601 83 regularizer """no""" +601 83 optimizer """adam""" +601 83 training_loop """owa""" +601 83 negative_sampler """basic""" +601 83 evaluator """rankbased""" +601 84 dataset """kinships""" +601 84 model """rotate""" +601 84 loss """bceaftersigmoid""" +601 84 regularizer """no""" +601 84 optimizer """adam""" +601 84 training_loop """owa""" +601 84 negative_sampler """basic""" +601 84 evaluator """rankbased""" +601 85 dataset """kinships""" +601 85 model """rotate""" +601 85 loss """bceaftersigmoid""" +601 85 regularizer """no""" +601 85 optimizer """adam""" +601 85 training_loop """owa""" +601 85 negative_sampler """basic""" +601 85 evaluator """rankbased""" +601 86 dataset """kinships""" +601 86 model """rotate""" +601 86 loss """bceaftersigmoid""" +601 86 regularizer """no""" +601 86 optimizer """adam""" +601 86 training_loop """owa""" +601 86 negative_sampler """basic""" +601 86 evaluator """rankbased""" +601 87 dataset """kinships""" +601 87 model """rotate""" +601 87 loss """bceaftersigmoid""" +601 87 regularizer """no""" +601 87 optimizer """adam""" +601 87 training_loop """owa""" +601 87 negative_sampler """basic""" +601 87 evaluator """rankbased""" +601 88 dataset """kinships""" +601 88 model """rotate""" +601 88 loss """bceaftersigmoid""" +601 88 regularizer """no""" +601 88 optimizer """adam""" +601 88 training_loop """owa""" +601 88 negative_sampler """basic""" +601 88 evaluator """rankbased""" +601 89 dataset """kinships""" +601 89 model """rotate""" +601 89 loss """bceaftersigmoid""" +601 89 regularizer """no""" +601 89 optimizer """adam""" +601 89 training_loop """owa""" +601 89 negative_sampler """basic""" +601 89 evaluator """rankbased""" +601 90 dataset """kinships""" +601 90 model """rotate""" +601 90 loss """bceaftersigmoid""" +601 90 regularizer """no""" +601 90 optimizer """adam""" +601 90 training_loop """owa""" +601 90 negative_sampler """basic""" +601 90 evaluator """rankbased""" +601 91 dataset """kinships""" +601 91 model """rotate""" +601 91 loss """bceaftersigmoid""" +601 91 regularizer """no""" +601 91 optimizer """adam""" +601 91 training_loop """owa""" +601 91 negative_sampler """basic""" +601 91 evaluator """rankbased""" +601 92 dataset """kinships""" +601 92 model """rotate""" +601 92 loss """bceaftersigmoid""" +601 92 regularizer """no""" +601 92 optimizer """adam""" +601 92 training_loop """owa""" +601 92 negative_sampler """basic""" +601 92 evaluator """rankbased""" +601 93 dataset """kinships""" +601 93 model """rotate""" +601 93 loss """bceaftersigmoid""" +601 93 regularizer """no""" +601 93 optimizer """adam""" +601 93 training_loop """owa""" +601 93 negative_sampler """basic""" +601 93 evaluator """rankbased""" +601 94 dataset """kinships""" +601 94 model """rotate""" +601 94 loss """bceaftersigmoid""" +601 94 regularizer """no""" +601 94 optimizer """adam""" +601 94 training_loop """owa""" +601 94 negative_sampler """basic""" +601 94 evaluator """rankbased""" +601 95 dataset """kinships""" +601 95 model """rotate""" +601 95 loss """bceaftersigmoid""" +601 95 regularizer """no""" +601 95 optimizer """adam""" +601 95 training_loop """owa""" +601 95 negative_sampler """basic""" +601 95 evaluator """rankbased""" +601 96 dataset """kinships""" +601 96 model """rotate""" +601 96 loss """bceaftersigmoid""" +601 96 regularizer """no""" +601 96 optimizer """adam""" +601 96 training_loop """owa""" +601 96 negative_sampler """basic""" +601 96 evaluator """rankbased""" +601 97 dataset """kinships""" +601 97 model """rotate""" +601 97 loss """bceaftersigmoid""" +601 97 regularizer """no""" +601 97 optimizer """adam""" +601 97 training_loop """owa""" +601 97 negative_sampler """basic""" +601 97 evaluator """rankbased""" +601 98 dataset """kinships""" +601 98 model """rotate""" +601 98 loss """bceaftersigmoid""" +601 98 regularizer """no""" +601 98 optimizer """adam""" +601 98 training_loop """owa""" +601 98 negative_sampler """basic""" +601 98 evaluator """rankbased""" +601 99 dataset """kinships""" +601 99 model """rotate""" +601 99 loss """bceaftersigmoid""" +601 99 regularizer """no""" +601 99 optimizer """adam""" +601 99 training_loop """owa""" +601 99 negative_sampler """basic""" +601 99 evaluator """rankbased""" +601 100 dataset """kinships""" +601 100 model """rotate""" +601 100 loss """bceaftersigmoid""" +601 100 regularizer """no""" +601 100 optimizer """adam""" +601 100 training_loop """owa""" +601 100 negative_sampler """basic""" +601 100 evaluator """rankbased""" +602 1 model.embedding_dim 2.0 +602 1 optimizer.lr 0.07524090593212975 +602 1 negative_sampler.num_negs_per_pos 59.0 +602 1 training.batch_size 1.0 +602 2 model.embedding_dim 1.0 +602 2 optimizer.lr 0.04401727994507467 +602 2 negative_sampler.num_negs_per_pos 61.0 +602 2 training.batch_size 2.0 +602 3 model.embedding_dim 0.0 +602 3 optimizer.lr 0.0028279559486591066 +602 3 negative_sampler.num_negs_per_pos 54.0 +602 3 training.batch_size 0.0 +602 4 model.embedding_dim 0.0 +602 4 optimizer.lr 0.020024767692121092 +602 4 negative_sampler.num_negs_per_pos 83.0 +602 4 training.batch_size 2.0 +602 5 model.embedding_dim 2.0 +602 5 optimizer.lr 0.002266050236265723 +602 5 negative_sampler.num_negs_per_pos 84.0 +602 5 training.batch_size 1.0 +602 6 model.embedding_dim 2.0 +602 6 optimizer.lr 0.004797867419901482 +602 6 negative_sampler.num_negs_per_pos 90.0 +602 6 training.batch_size 0.0 +602 7 model.embedding_dim 1.0 +602 7 optimizer.lr 0.0019277957244299814 +602 7 negative_sampler.num_negs_per_pos 96.0 +602 7 training.batch_size 2.0 +602 8 model.embedding_dim 1.0 +602 8 optimizer.lr 0.019020635796765804 +602 8 negative_sampler.num_negs_per_pos 21.0 +602 8 training.batch_size 1.0 +602 9 model.embedding_dim 2.0 +602 9 optimizer.lr 0.0805484240586964 +602 9 negative_sampler.num_negs_per_pos 89.0 +602 9 training.batch_size 2.0 +602 10 model.embedding_dim 1.0 +602 10 optimizer.lr 0.0518364878392664 +602 10 negative_sampler.num_negs_per_pos 84.0 +602 10 training.batch_size 1.0 +602 11 model.embedding_dim 1.0 +602 11 optimizer.lr 0.0010157381096943278 +602 11 negative_sampler.num_negs_per_pos 51.0 +602 11 training.batch_size 2.0 +602 12 model.embedding_dim 0.0 +602 12 optimizer.lr 0.0037822192759750027 +602 12 negative_sampler.num_negs_per_pos 56.0 +602 12 training.batch_size 1.0 +602 13 model.embedding_dim 2.0 +602 13 optimizer.lr 0.028161998766845237 +602 13 negative_sampler.num_negs_per_pos 10.0 +602 13 training.batch_size 2.0 +602 14 model.embedding_dim 2.0 +602 14 optimizer.lr 0.027177714731436296 +602 14 negative_sampler.num_negs_per_pos 94.0 +602 14 training.batch_size 1.0 +602 15 model.embedding_dim 0.0 +602 15 optimizer.lr 0.001822710027216686 +602 15 negative_sampler.num_negs_per_pos 42.0 +602 15 training.batch_size 1.0 +602 16 model.embedding_dim 2.0 +602 16 optimizer.lr 0.015254059437223065 +602 16 negative_sampler.num_negs_per_pos 11.0 +602 16 training.batch_size 1.0 +602 17 model.embedding_dim 0.0 +602 17 optimizer.lr 0.0015495121106914446 +602 17 negative_sampler.num_negs_per_pos 20.0 +602 17 training.batch_size 2.0 +602 18 model.embedding_dim 2.0 +602 18 optimizer.lr 0.005427527295825451 +602 18 negative_sampler.num_negs_per_pos 51.0 +602 18 training.batch_size 1.0 +602 19 model.embedding_dim 0.0 +602 19 optimizer.lr 0.031686304109452355 +602 19 negative_sampler.num_negs_per_pos 80.0 +602 19 training.batch_size 2.0 +602 20 model.embedding_dim 1.0 +602 20 optimizer.lr 0.0062924889660978195 +602 20 negative_sampler.num_negs_per_pos 98.0 +602 20 training.batch_size 0.0 +602 21 model.embedding_dim 0.0 +602 21 optimizer.lr 0.012518526075935392 +602 21 negative_sampler.num_negs_per_pos 73.0 +602 21 training.batch_size 2.0 +602 22 model.embedding_dim 1.0 +602 22 optimizer.lr 0.0012226048678789592 +602 22 negative_sampler.num_negs_per_pos 1.0 +602 22 training.batch_size 0.0 +602 23 model.embedding_dim 1.0 +602 23 optimizer.lr 0.007314727817703831 +602 23 negative_sampler.num_negs_per_pos 1.0 +602 23 training.batch_size 2.0 +602 24 model.embedding_dim 2.0 +602 24 optimizer.lr 0.0014929853793859452 +602 24 negative_sampler.num_negs_per_pos 37.0 +602 24 training.batch_size 0.0 +602 25 model.embedding_dim 1.0 +602 25 optimizer.lr 0.008709542330234667 +602 25 negative_sampler.num_negs_per_pos 95.0 +602 25 training.batch_size 0.0 +602 26 model.embedding_dim 1.0 +602 26 optimizer.lr 0.010464358567939666 +602 26 negative_sampler.num_negs_per_pos 46.0 +602 26 training.batch_size 1.0 +602 27 model.embedding_dim 2.0 +602 27 optimizer.lr 0.006324492237501668 +602 27 negative_sampler.num_negs_per_pos 21.0 +602 27 training.batch_size 2.0 +602 28 model.embedding_dim 0.0 +602 28 optimizer.lr 0.01752854949946935 +602 28 negative_sampler.num_negs_per_pos 18.0 +602 28 training.batch_size 1.0 +602 29 model.embedding_dim 1.0 +602 29 optimizer.lr 0.0010896100770321133 +602 29 negative_sampler.num_negs_per_pos 36.0 +602 29 training.batch_size 2.0 +602 30 model.embedding_dim 0.0 +602 30 optimizer.lr 0.004594621795865982 +602 30 negative_sampler.num_negs_per_pos 71.0 +602 30 training.batch_size 1.0 +602 31 model.embedding_dim 1.0 +602 31 optimizer.lr 0.009252821401004651 +602 31 negative_sampler.num_negs_per_pos 22.0 +602 31 training.batch_size 2.0 +602 32 model.embedding_dim 1.0 +602 32 optimizer.lr 0.0018411956039923103 +602 32 negative_sampler.num_negs_per_pos 63.0 +602 32 training.batch_size 0.0 +602 33 model.embedding_dim 1.0 +602 33 optimizer.lr 0.0019880985623545663 +602 33 negative_sampler.num_negs_per_pos 8.0 +602 33 training.batch_size 1.0 +602 34 model.embedding_dim 1.0 +602 34 optimizer.lr 0.0035591170939883086 +602 34 negative_sampler.num_negs_per_pos 76.0 +602 34 training.batch_size 2.0 +602 35 model.embedding_dim 2.0 +602 35 optimizer.lr 0.007176342688983065 +602 35 negative_sampler.num_negs_per_pos 29.0 +602 35 training.batch_size 1.0 +602 36 model.embedding_dim 2.0 +602 36 optimizer.lr 0.003304633881049429 +602 36 negative_sampler.num_negs_per_pos 5.0 +602 36 training.batch_size 0.0 +602 37 model.embedding_dim 2.0 +602 37 optimizer.lr 0.0012355099915249818 +602 37 negative_sampler.num_negs_per_pos 32.0 +602 37 training.batch_size 2.0 +602 38 model.embedding_dim 1.0 +602 38 optimizer.lr 0.02076971584112027 +602 38 negative_sampler.num_negs_per_pos 9.0 +602 38 training.batch_size 0.0 +602 39 model.embedding_dim 1.0 +602 39 optimizer.lr 0.04389249960148817 +602 39 negative_sampler.num_negs_per_pos 16.0 +602 39 training.batch_size 2.0 +602 40 model.embedding_dim 2.0 +602 40 optimizer.lr 0.006707739904462682 +602 40 negative_sampler.num_negs_per_pos 81.0 +602 40 training.batch_size 1.0 +602 41 model.embedding_dim 0.0 +602 41 optimizer.lr 0.04284483777928888 +602 41 negative_sampler.num_negs_per_pos 31.0 +602 41 training.batch_size 1.0 +602 42 model.embedding_dim 1.0 +602 42 optimizer.lr 0.0356291792054364 +602 42 negative_sampler.num_negs_per_pos 12.0 +602 42 training.batch_size 2.0 +602 43 model.embedding_dim 1.0 +602 43 optimizer.lr 0.04281206154092624 +602 43 negative_sampler.num_negs_per_pos 56.0 +602 43 training.batch_size 2.0 +602 44 model.embedding_dim 0.0 +602 44 optimizer.lr 0.032900306176335624 +602 44 negative_sampler.num_negs_per_pos 34.0 +602 44 training.batch_size 1.0 +602 45 model.embedding_dim 1.0 +602 45 optimizer.lr 0.003804885673749946 +602 45 negative_sampler.num_negs_per_pos 50.0 +602 45 training.batch_size 0.0 +602 46 model.embedding_dim 2.0 +602 46 optimizer.lr 0.0076139429421421 +602 46 negative_sampler.num_negs_per_pos 96.0 +602 46 training.batch_size 1.0 +602 47 model.embedding_dim 1.0 +602 47 optimizer.lr 0.002890090605726868 +602 47 negative_sampler.num_negs_per_pos 72.0 +602 47 training.batch_size 0.0 +602 48 model.embedding_dim 0.0 +602 48 optimizer.lr 0.03618311187709761 +602 48 negative_sampler.num_negs_per_pos 46.0 +602 48 training.batch_size 0.0 +602 49 model.embedding_dim 1.0 +602 49 optimizer.lr 0.039899243618240224 +602 49 negative_sampler.num_negs_per_pos 21.0 +602 49 training.batch_size 2.0 +602 50 model.embedding_dim 2.0 +602 50 optimizer.lr 0.05043786209483472 +602 50 negative_sampler.num_negs_per_pos 79.0 +602 50 training.batch_size 0.0 +602 51 model.embedding_dim 1.0 +602 51 optimizer.lr 0.0012152362768804088 +602 51 negative_sampler.num_negs_per_pos 34.0 +602 51 training.batch_size 1.0 +602 52 model.embedding_dim 1.0 +602 52 optimizer.lr 0.02857701105616498 +602 52 negative_sampler.num_negs_per_pos 98.0 +602 52 training.batch_size 2.0 +602 53 model.embedding_dim 2.0 +602 53 optimizer.lr 0.08682773842716981 +602 53 negative_sampler.num_negs_per_pos 7.0 +602 53 training.batch_size 2.0 +602 54 model.embedding_dim 1.0 +602 54 optimizer.lr 0.012284553807869313 +602 54 negative_sampler.num_negs_per_pos 78.0 +602 54 training.batch_size 2.0 +602 55 model.embedding_dim 1.0 +602 55 optimizer.lr 0.0016479376034344278 +602 55 negative_sampler.num_negs_per_pos 90.0 +602 55 training.batch_size 0.0 +602 56 model.embedding_dim 2.0 +602 56 optimizer.lr 0.02008779182669543 +602 56 negative_sampler.num_negs_per_pos 19.0 +602 56 training.batch_size 2.0 +602 57 model.embedding_dim 2.0 +602 57 optimizer.lr 0.03630028287793379 +602 57 negative_sampler.num_negs_per_pos 83.0 +602 57 training.batch_size 2.0 +602 58 model.embedding_dim 2.0 +602 58 optimizer.lr 0.016903855101464773 +602 58 negative_sampler.num_negs_per_pos 24.0 +602 58 training.batch_size 1.0 +602 59 model.embedding_dim 2.0 +602 59 optimizer.lr 0.05365257648878262 +602 59 negative_sampler.num_negs_per_pos 61.0 +602 59 training.batch_size 1.0 +602 60 model.embedding_dim 2.0 +602 60 optimizer.lr 0.005450625705878201 +602 60 negative_sampler.num_negs_per_pos 57.0 +602 60 training.batch_size 0.0 +602 61 model.embedding_dim 0.0 +602 61 optimizer.lr 0.030225400333500883 +602 61 negative_sampler.num_negs_per_pos 45.0 +602 61 training.batch_size 1.0 +602 62 model.embedding_dim 0.0 +602 62 optimizer.lr 0.07728127092227688 +602 62 negative_sampler.num_negs_per_pos 8.0 +602 62 training.batch_size 1.0 +602 63 model.embedding_dim 1.0 +602 63 optimizer.lr 0.04114320607144195 +602 63 negative_sampler.num_negs_per_pos 41.0 +602 63 training.batch_size 1.0 +602 64 model.embedding_dim 1.0 +602 64 optimizer.lr 0.005074671917502472 +602 64 negative_sampler.num_negs_per_pos 27.0 +602 64 training.batch_size 0.0 +602 65 model.embedding_dim 1.0 +602 65 optimizer.lr 0.006211480949453034 +602 65 negative_sampler.num_negs_per_pos 50.0 +602 65 training.batch_size 1.0 +602 66 model.embedding_dim 2.0 +602 66 optimizer.lr 0.020504951461220992 +602 66 negative_sampler.num_negs_per_pos 8.0 +602 66 training.batch_size 0.0 +602 67 model.embedding_dim 0.0 +602 67 optimizer.lr 0.00570484577279204 +602 67 negative_sampler.num_negs_per_pos 87.0 +602 67 training.batch_size 2.0 +602 68 model.embedding_dim 1.0 +602 68 optimizer.lr 0.07240163921572186 +602 68 negative_sampler.num_negs_per_pos 58.0 +602 68 training.batch_size 2.0 +602 69 model.embedding_dim 2.0 +602 69 optimizer.lr 0.0784404574192546 +602 69 negative_sampler.num_negs_per_pos 45.0 +602 69 training.batch_size 0.0 +602 70 model.embedding_dim 2.0 +602 70 optimizer.lr 0.01656631432719847 +602 70 negative_sampler.num_negs_per_pos 0.0 +602 70 training.batch_size 1.0 +602 71 model.embedding_dim 1.0 +602 71 optimizer.lr 0.0056401595587213214 +602 71 negative_sampler.num_negs_per_pos 24.0 +602 71 training.batch_size 0.0 +602 72 model.embedding_dim 1.0 +602 72 optimizer.lr 0.013827965872196792 +602 72 negative_sampler.num_negs_per_pos 19.0 +602 72 training.batch_size 0.0 +602 73 model.embedding_dim 1.0 +602 73 optimizer.lr 0.03601093967214927 +602 73 negative_sampler.num_negs_per_pos 32.0 +602 73 training.batch_size 0.0 +602 74 model.embedding_dim 0.0 +602 74 optimizer.lr 0.06334375810522509 +602 74 negative_sampler.num_negs_per_pos 26.0 +602 74 training.batch_size 2.0 +602 75 model.embedding_dim 1.0 +602 75 optimizer.lr 0.002191856473269201 +602 75 negative_sampler.num_negs_per_pos 68.0 +602 75 training.batch_size 0.0 +602 76 model.embedding_dim 1.0 +602 76 optimizer.lr 0.030777653159768024 +602 76 negative_sampler.num_negs_per_pos 47.0 +602 76 training.batch_size 0.0 +602 77 model.embedding_dim 2.0 +602 77 optimizer.lr 0.013319991141733345 +602 77 negative_sampler.num_negs_per_pos 60.0 +602 77 training.batch_size 0.0 +602 78 model.embedding_dim 0.0 +602 78 optimizer.lr 0.01730964308543201 +602 78 negative_sampler.num_negs_per_pos 79.0 +602 78 training.batch_size 1.0 +602 79 model.embedding_dim 0.0 +602 79 optimizer.lr 0.007229549620164165 +602 79 negative_sampler.num_negs_per_pos 49.0 +602 79 training.batch_size 2.0 +602 80 model.embedding_dim 2.0 +602 80 optimizer.lr 0.016500737764591707 +602 80 negative_sampler.num_negs_per_pos 99.0 +602 80 training.batch_size 2.0 +602 81 model.embedding_dim 2.0 +602 81 optimizer.lr 0.035054472305460986 +602 81 negative_sampler.num_negs_per_pos 62.0 +602 81 training.batch_size 1.0 +602 82 model.embedding_dim 2.0 +602 82 optimizer.lr 0.0661165341573203 +602 82 negative_sampler.num_negs_per_pos 90.0 +602 82 training.batch_size 2.0 +602 83 model.embedding_dim 1.0 +602 83 optimizer.lr 0.0017160991820023305 +602 83 negative_sampler.num_negs_per_pos 34.0 +602 83 training.batch_size 2.0 +602 84 model.embedding_dim 1.0 +602 84 optimizer.lr 0.0013004455905297157 +602 84 negative_sampler.num_negs_per_pos 24.0 +602 84 training.batch_size 0.0 +602 85 model.embedding_dim 0.0 +602 85 optimizer.lr 0.0069517733066286665 +602 85 negative_sampler.num_negs_per_pos 82.0 +602 85 training.batch_size 0.0 +602 86 model.embedding_dim 2.0 +602 86 optimizer.lr 0.005716946798495513 +602 86 negative_sampler.num_negs_per_pos 31.0 +602 86 training.batch_size 1.0 +602 87 model.embedding_dim 1.0 +602 87 optimizer.lr 0.0010697128218356124 +602 87 negative_sampler.num_negs_per_pos 6.0 +602 87 training.batch_size 1.0 +602 88 model.embedding_dim 2.0 +602 88 optimizer.lr 0.010601207392140034 +602 88 negative_sampler.num_negs_per_pos 93.0 +602 88 training.batch_size 2.0 +602 89 model.embedding_dim 1.0 +602 89 optimizer.lr 0.04695576232220832 +602 89 negative_sampler.num_negs_per_pos 3.0 +602 89 training.batch_size 2.0 +602 90 model.embedding_dim 2.0 +602 90 optimizer.lr 0.001427522827010056 +602 90 negative_sampler.num_negs_per_pos 85.0 +602 90 training.batch_size 1.0 +602 91 model.embedding_dim 1.0 +602 91 optimizer.lr 0.08285723340546318 +602 91 negative_sampler.num_negs_per_pos 1.0 +602 91 training.batch_size 0.0 +602 92 model.embedding_dim 2.0 +602 92 optimizer.lr 0.08631264755089375 +602 92 negative_sampler.num_negs_per_pos 29.0 +602 92 training.batch_size 0.0 +602 93 model.embedding_dim 0.0 +602 93 optimizer.lr 0.07602087478932139 +602 93 negative_sampler.num_negs_per_pos 51.0 +602 93 training.batch_size 1.0 +602 94 model.embedding_dim 2.0 +602 94 optimizer.lr 0.03817711691371266 +602 94 negative_sampler.num_negs_per_pos 74.0 +602 94 training.batch_size 0.0 +602 95 model.embedding_dim 0.0 +602 95 optimizer.lr 0.013611794134907493 +602 95 negative_sampler.num_negs_per_pos 61.0 +602 95 training.batch_size 2.0 +602 96 model.embedding_dim 0.0 +602 96 optimizer.lr 0.030915601014483183 +602 96 negative_sampler.num_negs_per_pos 48.0 +602 96 training.batch_size 2.0 +602 97 model.embedding_dim 0.0 +602 97 optimizer.lr 0.0042386844266760326 +602 97 negative_sampler.num_negs_per_pos 39.0 +602 97 training.batch_size 2.0 +602 98 model.embedding_dim 0.0 +602 98 optimizer.lr 0.02518273084232888 +602 98 negative_sampler.num_negs_per_pos 85.0 +602 98 training.batch_size 0.0 +602 99 model.embedding_dim 2.0 +602 99 optimizer.lr 0.02155350027252173 +602 99 negative_sampler.num_negs_per_pos 8.0 +602 99 training.batch_size 2.0 +602 100 model.embedding_dim 0.0 +602 100 optimizer.lr 0.0327743333979184 +602 100 negative_sampler.num_negs_per_pos 16.0 +602 100 training.batch_size 2.0 +602 1 dataset """kinships""" +602 1 model """rotate""" +602 1 loss """softplus""" +602 1 regularizer """no""" +602 1 optimizer """adam""" +602 1 training_loop """owa""" +602 1 negative_sampler """basic""" +602 1 evaluator """rankbased""" +602 2 dataset """kinships""" +602 2 model """rotate""" +602 2 loss """softplus""" +602 2 regularizer """no""" +602 2 optimizer """adam""" +602 2 training_loop """owa""" +602 2 negative_sampler """basic""" +602 2 evaluator """rankbased""" +602 3 dataset """kinships""" +602 3 model """rotate""" +602 3 loss """softplus""" +602 3 regularizer """no""" +602 3 optimizer """adam""" +602 3 training_loop """owa""" +602 3 negative_sampler """basic""" +602 3 evaluator """rankbased""" +602 4 dataset """kinships""" +602 4 model """rotate""" +602 4 loss """softplus""" +602 4 regularizer """no""" +602 4 optimizer """adam""" +602 4 training_loop """owa""" +602 4 negative_sampler """basic""" +602 4 evaluator """rankbased""" +602 5 dataset """kinships""" +602 5 model """rotate""" +602 5 loss """softplus""" +602 5 regularizer """no""" +602 5 optimizer """adam""" +602 5 training_loop """owa""" +602 5 negative_sampler """basic""" +602 5 evaluator """rankbased""" +602 6 dataset """kinships""" +602 6 model """rotate""" +602 6 loss """softplus""" +602 6 regularizer """no""" +602 6 optimizer """adam""" +602 6 training_loop """owa""" +602 6 negative_sampler """basic""" +602 6 evaluator """rankbased""" +602 7 dataset """kinships""" +602 7 model """rotate""" +602 7 loss """softplus""" +602 7 regularizer """no""" +602 7 optimizer """adam""" +602 7 training_loop """owa""" +602 7 negative_sampler """basic""" +602 7 evaluator """rankbased""" +602 8 dataset """kinships""" +602 8 model """rotate""" +602 8 loss """softplus""" +602 8 regularizer """no""" +602 8 optimizer """adam""" +602 8 training_loop """owa""" +602 8 negative_sampler """basic""" +602 8 evaluator """rankbased""" +602 9 dataset """kinships""" +602 9 model """rotate""" +602 9 loss """softplus""" +602 9 regularizer """no""" +602 9 optimizer """adam""" +602 9 training_loop """owa""" +602 9 negative_sampler """basic""" +602 9 evaluator """rankbased""" +602 10 dataset """kinships""" +602 10 model """rotate""" +602 10 loss """softplus""" +602 10 regularizer """no""" +602 10 optimizer """adam""" +602 10 training_loop """owa""" +602 10 negative_sampler """basic""" +602 10 evaluator """rankbased""" +602 11 dataset """kinships""" +602 11 model """rotate""" +602 11 loss """softplus""" +602 11 regularizer """no""" +602 11 optimizer """adam""" +602 11 training_loop """owa""" +602 11 negative_sampler """basic""" +602 11 evaluator """rankbased""" +602 12 dataset """kinships""" +602 12 model """rotate""" +602 12 loss """softplus""" +602 12 regularizer """no""" +602 12 optimizer """adam""" +602 12 training_loop """owa""" +602 12 negative_sampler """basic""" +602 12 evaluator """rankbased""" +602 13 dataset """kinships""" +602 13 model """rotate""" +602 13 loss """softplus""" +602 13 regularizer """no""" +602 13 optimizer """adam""" +602 13 training_loop """owa""" +602 13 negative_sampler """basic""" +602 13 evaluator """rankbased""" +602 14 dataset """kinships""" +602 14 model """rotate""" +602 14 loss """softplus""" +602 14 regularizer """no""" +602 14 optimizer """adam""" +602 14 training_loop """owa""" +602 14 negative_sampler """basic""" +602 14 evaluator """rankbased""" +602 15 dataset """kinships""" +602 15 model """rotate""" +602 15 loss """softplus""" +602 15 regularizer """no""" +602 15 optimizer """adam""" +602 15 training_loop """owa""" +602 15 negative_sampler """basic""" +602 15 evaluator """rankbased""" +602 16 dataset """kinships""" +602 16 model """rotate""" +602 16 loss """softplus""" +602 16 regularizer """no""" +602 16 optimizer """adam""" +602 16 training_loop """owa""" +602 16 negative_sampler """basic""" +602 16 evaluator """rankbased""" +602 17 dataset """kinships""" +602 17 model """rotate""" +602 17 loss """softplus""" +602 17 regularizer """no""" +602 17 optimizer """adam""" +602 17 training_loop """owa""" +602 17 negative_sampler """basic""" +602 17 evaluator """rankbased""" +602 18 dataset """kinships""" +602 18 model """rotate""" +602 18 loss """softplus""" +602 18 regularizer """no""" +602 18 optimizer """adam""" +602 18 training_loop """owa""" +602 18 negative_sampler """basic""" +602 18 evaluator """rankbased""" +602 19 dataset """kinships""" +602 19 model """rotate""" +602 19 loss """softplus""" +602 19 regularizer """no""" +602 19 optimizer """adam""" +602 19 training_loop """owa""" +602 19 negative_sampler """basic""" +602 19 evaluator """rankbased""" +602 20 dataset """kinships""" +602 20 model """rotate""" +602 20 loss """softplus""" +602 20 regularizer """no""" +602 20 optimizer """adam""" +602 20 training_loop """owa""" +602 20 negative_sampler """basic""" +602 20 evaluator """rankbased""" +602 21 dataset """kinships""" +602 21 model """rotate""" +602 21 loss """softplus""" +602 21 regularizer """no""" +602 21 optimizer """adam""" +602 21 training_loop """owa""" +602 21 negative_sampler """basic""" +602 21 evaluator """rankbased""" +602 22 dataset """kinships""" +602 22 model """rotate""" +602 22 loss """softplus""" +602 22 regularizer """no""" +602 22 optimizer """adam""" +602 22 training_loop """owa""" +602 22 negative_sampler """basic""" +602 22 evaluator """rankbased""" +602 23 dataset """kinships""" +602 23 model """rotate""" +602 23 loss """softplus""" +602 23 regularizer """no""" +602 23 optimizer """adam""" +602 23 training_loop """owa""" +602 23 negative_sampler """basic""" +602 23 evaluator """rankbased""" +602 24 dataset """kinships""" +602 24 model """rotate""" +602 24 loss """softplus""" +602 24 regularizer """no""" +602 24 optimizer """adam""" +602 24 training_loop """owa""" +602 24 negative_sampler """basic""" +602 24 evaluator """rankbased""" +602 25 dataset """kinships""" +602 25 model """rotate""" +602 25 loss """softplus""" +602 25 regularizer """no""" +602 25 optimizer """adam""" +602 25 training_loop """owa""" +602 25 negative_sampler """basic""" +602 25 evaluator """rankbased""" +602 26 dataset """kinships""" +602 26 model """rotate""" +602 26 loss """softplus""" +602 26 regularizer """no""" +602 26 optimizer """adam""" +602 26 training_loop """owa""" +602 26 negative_sampler """basic""" +602 26 evaluator """rankbased""" +602 27 dataset """kinships""" +602 27 model """rotate""" +602 27 loss """softplus""" +602 27 regularizer """no""" +602 27 optimizer """adam""" +602 27 training_loop """owa""" +602 27 negative_sampler """basic""" +602 27 evaluator """rankbased""" +602 28 dataset """kinships""" +602 28 model """rotate""" +602 28 loss """softplus""" +602 28 regularizer """no""" +602 28 optimizer """adam""" +602 28 training_loop """owa""" +602 28 negative_sampler """basic""" +602 28 evaluator """rankbased""" +602 29 dataset """kinships""" +602 29 model """rotate""" +602 29 loss """softplus""" +602 29 regularizer """no""" +602 29 optimizer """adam""" +602 29 training_loop """owa""" +602 29 negative_sampler """basic""" +602 29 evaluator """rankbased""" +602 30 dataset """kinships""" +602 30 model """rotate""" +602 30 loss """softplus""" +602 30 regularizer """no""" +602 30 optimizer """adam""" +602 30 training_loop """owa""" +602 30 negative_sampler """basic""" +602 30 evaluator """rankbased""" +602 31 dataset """kinships""" +602 31 model """rotate""" +602 31 loss """softplus""" +602 31 regularizer """no""" +602 31 optimizer """adam""" +602 31 training_loop """owa""" +602 31 negative_sampler """basic""" +602 31 evaluator """rankbased""" +602 32 dataset """kinships""" +602 32 model """rotate""" +602 32 loss """softplus""" +602 32 regularizer """no""" +602 32 optimizer """adam""" +602 32 training_loop """owa""" +602 32 negative_sampler """basic""" +602 32 evaluator """rankbased""" +602 33 dataset """kinships""" +602 33 model """rotate""" +602 33 loss """softplus""" +602 33 regularizer """no""" +602 33 optimizer """adam""" +602 33 training_loop """owa""" +602 33 negative_sampler """basic""" +602 33 evaluator """rankbased""" +602 34 dataset """kinships""" +602 34 model """rotate""" +602 34 loss """softplus""" +602 34 regularizer """no""" +602 34 optimizer """adam""" +602 34 training_loop """owa""" +602 34 negative_sampler """basic""" +602 34 evaluator """rankbased""" +602 35 dataset """kinships""" +602 35 model """rotate""" +602 35 loss """softplus""" +602 35 regularizer """no""" +602 35 optimizer """adam""" +602 35 training_loop """owa""" +602 35 negative_sampler """basic""" +602 35 evaluator """rankbased""" +602 36 dataset """kinships""" +602 36 model """rotate""" +602 36 loss """softplus""" +602 36 regularizer """no""" +602 36 optimizer """adam""" +602 36 training_loop """owa""" +602 36 negative_sampler """basic""" +602 36 evaluator """rankbased""" +602 37 dataset """kinships""" +602 37 model """rotate""" +602 37 loss """softplus""" +602 37 regularizer """no""" +602 37 optimizer """adam""" +602 37 training_loop """owa""" +602 37 negative_sampler """basic""" +602 37 evaluator """rankbased""" +602 38 dataset """kinships""" +602 38 model """rotate""" +602 38 loss """softplus""" +602 38 regularizer """no""" +602 38 optimizer """adam""" +602 38 training_loop """owa""" +602 38 negative_sampler """basic""" +602 38 evaluator """rankbased""" +602 39 dataset """kinships""" +602 39 model """rotate""" +602 39 loss """softplus""" +602 39 regularizer """no""" +602 39 optimizer """adam""" +602 39 training_loop """owa""" +602 39 negative_sampler """basic""" +602 39 evaluator """rankbased""" +602 40 dataset """kinships""" +602 40 model """rotate""" +602 40 loss """softplus""" +602 40 regularizer """no""" +602 40 optimizer """adam""" +602 40 training_loop """owa""" +602 40 negative_sampler """basic""" +602 40 evaluator """rankbased""" +602 41 dataset """kinships""" +602 41 model """rotate""" +602 41 loss """softplus""" +602 41 regularizer """no""" +602 41 optimizer """adam""" +602 41 training_loop """owa""" +602 41 negative_sampler """basic""" +602 41 evaluator """rankbased""" +602 42 dataset """kinships""" +602 42 model """rotate""" +602 42 loss """softplus""" +602 42 regularizer """no""" +602 42 optimizer """adam""" +602 42 training_loop """owa""" +602 42 negative_sampler """basic""" +602 42 evaluator """rankbased""" +602 43 dataset """kinships""" +602 43 model """rotate""" +602 43 loss """softplus""" +602 43 regularizer """no""" +602 43 optimizer """adam""" +602 43 training_loop """owa""" +602 43 negative_sampler """basic""" +602 43 evaluator """rankbased""" +602 44 dataset """kinships""" +602 44 model """rotate""" +602 44 loss """softplus""" +602 44 regularizer """no""" +602 44 optimizer """adam""" +602 44 training_loop """owa""" +602 44 negative_sampler """basic""" +602 44 evaluator """rankbased""" +602 45 dataset """kinships""" +602 45 model """rotate""" +602 45 loss """softplus""" +602 45 regularizer """no""" +602 45 optimizer """adam""" +602 45 training_loop """owa""" +602 45 negative_sampler """basic""" +602 45 evaluator """rankbased""" +602 46 dataset """kinships""" +602 46 model """rotate""" +602 46 loss """softplus""" +602 46 regularizer """no""" +602 46 optimizer """adam""" +602 46 training_loop """owa""" +602 46 negative_sampler """basic""" +602 46 evaluator """rankbased""" +602 47 dataset """kinships""" +602 47 model """rotate""" +602 47 loss """softplus""" +602 47 regularizer """no""" +602 47 optimizer """adam""" +602 47 training_loop """owa""" +602 47 negative_sampler """basic""" +602 47 evaluator """rankbased""" +602 48 dataset """kinships""" +602 48 model """rotate""" +602 48 loss """softplus""" +602 48 regularizer """no""" +602 48 optimizer """adam""" +602 48 training_loop """owa""" +602 48 negative_sampler """basic""" +602 48 evaluator """rankbased""" +602 49 dataset """kinships""" +602 49 model """rotate""" +602 49 loss """softplus""" +602 49 regularizer """no""" +602 49 optimizer """adam""" +602 49 training_loop """owa""" +602 49 negative_sampler """basic""" +602 49 evaluator """rankbased""" +602 50 dataset """kinships""" +602 50 model """rotate""" +602 50 loss """softplus""" +602 50 regularizer """no""" +602 50 optimizer """adam""" +602 50 training_loop """owa""" +602 50 negative_sampler """basic""" +602 50 evaluator """rankbased""" +602 51 dataset """kinships""" +602 51 model """rotate""" +602 51 loss """softplus""" +602 51 regularizer """no""" +602 51 optimizer """adam""" +602 51 training_loop """owa""" +602 51 negative_sampler """basic""" +602 51 evaluator """rankbased""" +602 52 dataset """kinships""" +602 52 model """rotate""" +602 52 loss """softplus""" +602 52 regularizer """no""" +602 52 optimizer """adam""" +602 52 training_loop """owa""" +602 52 negative_sampler """basic""" +602 52 evaluator """rankbased""" +602 53 dataset """kinships""" +602 53 model """rotate""" +602 53 loss """softplus""" +602 53 regularizer """no""" +602 53 optimizer """adam""" +602 53 training_loop """owa""" +602 53 negative_sampler """basic""" +602 53 evaluator """rankbased""" +602 54 dataset """kinships""" +602 54 model """rotate""" +602 54 loss """softplus""" +602 54 regularizer """no""" +602 54 optimizer """adam""" +602 54 training_loop """owa""" +602 54 negative_sampler """basic""" +602 54 evaluator """rankbased""" +602 55 dataset """kinships""" +602 55 model """rotate""" +602 55 loss """softplus""" +602 55 regularizer """no""" +602 55 optimizer """adam""" +602 55 training_loop """owa""" +602 55 negative_sampler """basic""" +602 55 evaluator """rankbased""" +602 56 dataset """kinships""" +602 56 model """rotate""" +602 56 loss """softplus""" +602 56 regularizer """no""" +602 56 optimizer """adam""" +602 56 training_loop """owa""" +602 56 negative_sampler """basic""" +602 56 evaluator """rankbased""" +602 57 dataset """kinships""" +602 57 model """rotate""" +602 57 loss """softplus""" +602 57 regularizer """no""" +602 57 optimizer """adam""" +602 57 training_loop """owa""" +602 57 negative_sampler """basic""" +602 57 evaluator """rankbased""" +602 58 dataset """kinships""" +602 58 model """rotate""" +602 58 loss """softplus""" +602 58 regularizer """no""" +602 58 optimizer """adam""" +602 58 training_loop """owa""" +602 58 negative_sampler """basic""" +602 58 evaluator """rankbased""" +602 59 dataset """kinships""" +602 59 model """rotate""" +602 59 loss """softplus""" +602 59 regularizer """no""" +602 59 optimizer """adam""" +602 59 training_loop """owa""" +602 59 negative_sampler """basic""" +602 59 evaluator """rankbased""" +602 60 dataset """kinships""" +602 60 model """rotate""" +602 60 loss """softplus""" +602 60 regularizer """no""" +602 60 optimizer """adam""" +602 60 training_loop """owa""" +602 60 negative_sampler """basic""" +602 60 evaluator """rankbased""" +602 61 dataset """kinships""" +602 61 model """rotate""" +602 61 loss """softplus""" +602 61 regularizer """no""" +602 61 optimizer """adam""" +602 61 training_loop """owa""" +602 61 negative_sampler """basic""" +602 61 evaluator """rankbased""" +602 62 dataset """kinships""" +602 62 model """rotate""" +602 62 loss """softplus""" +602 62 regularizer """no""" +602 62 optimizer """adam""" +602 62 training_loop """owa""" +602 62 negative_sampler """basic""" +602 62 evaluator """rankbased""" +602 63 dataset """kinships""" +602 63 model """rotate""" +602 63 loss """softplus""" +602 63 regularizer """no""" +602 63 optimizer """adam""" +602 63 training_loop """owa""" +602 63 negative_sampler """basic""" +602 63 evaluator """rankbased""" +602 64 dataset """kinships""" +602 64 model """rotate""" +602 64 loss """softplus""" +602 64 regularizer """no""" +602 64 optimizer """adam""" +602 64 training_loop """owa""" +602 64 negative_sampler """basic""" +602 64 evaluator """rankbased""" +602 65 dataset """kinships""" +602 65 model """rotate""" +602 65 loss """softplus""" +602 65 regularizer """no""" +602 65 optimizer """adam""" +602 65 training_loop """owa""" +602 65 negative_sampler """basic""" +602 65 evaluator """rankbased""" +602 66 dataset """kinships""" +602 66 model """rotate""" +602 66 loss """softplus""" +602 66 regularizer """no""" +602 66 optimizer """adam""" +602 66 training_loop """owa""" +602 66 negative_sampler """basic""" +602 66 evaluator """rankbased""" +602 67 dataset """kinships""" +602 67 model """rotate""" +602 67 loss """softplus""" +602 67 regularizer """no""" +602 67 optimizer """adam""" +602 67 training_loop """owa""" +602 67 negative_sampler """basic""" +602 67 evaluator """rankbased""" +602 68 dataset """kinships""" +602 68 model """rotate""" +602 68 loss """softplus""" +602 68 regularizer """no""" +602 68 optimizer """adam""" +602 68 training_loop """owa""" +602 68 negative_sampler """basic""" +602 68 evaluator """rankbased""" +602 69 dataset """kinships""" +602 69 model """rotate""" +602 69 loss """softplus""" +602 69 regularizer """no""" +602 69 optimizer """adam""" +602 69 training_loop """owa""" +602 69 negative_sampler """basic""" +602 69 evaluator """rankbased""" +602 70 dataset """kinships""" +602 70 model """rotate""" +602 70 loss """softplus""" +602 70 regularizer """no""" +602 70 optimizer """adam""" +602 70 training_loop """owa""" +602 70 negative_sampler """basic""" +602 70 evaluator """rankbased""" +602 71 dataset """kinships""" +602 71 model """rotate""" +602 71 loss """softplus""" +602 71 regularizer """no""" +602 71 optimizer """adam""" +602 71 training_loop """owa""" +602 71 negative_sampler """basic""" +602 71 evaluator """rankbased""" +602 72 dataset """kinships""" +602 72 model """rotate""" +602 72 loss """softplus""" +602 72 regularizer """no""" +602 72 optimizer """adam""" +602 72 training_loop """owa""" +602 72 negative_sampler """basic""" +602 72 evaluator """rankbased""" +602 73 dataset """kinships""" +602 73 model """rotate""" +602 73 loss """softplus""" +602 73 regularizer """no""" +602 73 optimizer """adam""" +602 73 training_loop """owa""" +602 73 negative_sampler """basic""" +602 73 evaluator """rankbased""" +602 74 dataset """kinships""" +602 74 model """rotate""" +602 74 loss """softplus""" +602 74 regularizer """no""" +602 74 optimizer """adam""" +602 74 training_loop """owa""" +602 74 negative_sampler """basic""" +602 74 evaluator """rankbased""" +602 75 dataset """kinships""" +602 75 model """rotate""" +602 75 loss """softplus""" +602 75 regularizer """no""" +602 75 optimizer """adam""" +602 75 training_loop """owa""" +602 75 negative_sampler """basic""" +602 75 evaluator """rankbased""" +602 76 dataset """kinships""" +602 76 model """rotate""" +602 76 loss """softplus""" +602 76 regularizer """no""" +602 76 optimizer """adam""" +602 76 training_loop """owa""" +602 76 negative_sampler """basic""" +602 76 evaluator """rankbased""" +602 77 dataset """kinships""" +602 77 model """rotate""" +602 77 loss """softplus""" +602 77 regularizer """no""" +602 77 optimizer """adam""" +602 77 training_loop """owa""" +602 77 negative_sampler """basic""" +602 77 evaluator """rankbased""" +602 78 dataset """kinships""" +602 78 model """rotate""" +602 78 loss """softplus""" +602 78 regularizer """no""" +602 78 optimizer """adam""" +602 78 training_loop """owa""" +602 78 negative_sampler """basic""" +602 78 evaluator """rankbased""" +602 79 dataset """kinships""" +602 79 model """rotate""" +602 79 loss """softplus""" +602 79 regularizer """no""" +602 79 optimizer """adam""" +602 79 training_loop """owa""" +602 79 negative_sampler """basic""" +602 79 evaluator """rankbased""" +602 80 dataset """kinships""" +602 80 model """rotate""" +602 80 loss """softplus""" +602 80 regularizer """no""" +602 80 optimizer """adam""" +602 80 training_loop """owa""" +602 80 negative_sampler """basic""" +602 80 evaluator """rankbased""" +602 81 dataset """kinships""" +602 81 model """rotate""" +602 81 loss """softplus""" +602 81 regularizer """no""" +602 81 optimizer """adam""" +602 81 training_loop """owa""" +602 81 negative_sampler """basic""" +602 81 evaluator """rankbased""" +602 82 dataset """kinships""" +602 82 model """rotate""" +602 82 loss """softplus""" +602 82 regularizer """no""" +602 82 optimizer """adam""" +602 82 training_loop """owa""" +602 82 negative_sampler """basic""" +602 82 evaluator """rankbased""" +602 83 dataset """kinships""" +602 83 model """rotate""" +602 83 loss """softplus""" +602 83 regularizer """no""" +602 83 optimizer """adam""" +602 83 training_loop """owa""" +602 83 negative_sampler """basic""" +602 83 evaluator """rankbased""" +602 84 dataset """kinships""" +602 84 model """rotate""" +602 84 loss """softplus""" +602 84 regularizer """no""" +602 84 optimizer """adam""" +602 84 training_loop """owa""" +602 84 negative_sampler """basic""" +602 84 evaluator """rankbased""" +602 85 dataset """kinships""" +602 85 model """rotate""" +602 85 loss """softplus""" +602 85 regularizer """no""" +602 85 optimizer """adam""" +602 85 training_loop """owa""" +602 85 negative_sampler """basic""" +602 85 evaluator """rankbased""" +602 86 dataset """kinships""" +602 86 model """rotate""" +602 86 loss """softplus""" +602 86 regularizer """no""" +602 86 optimizer """adam""" +602 86 training_loop """owa""" +602 86 negative_sampler """basic""" +602 86 evaluator """rankbased""" +602 87 dataset """kinships""" +602 87 model """rotate""" +602 87 loss """softplus""" +602 87 regularizer """no""" +602 87 optimizer """adam""" +602 87 training_loop """owa""" +602 87 negative_sampler """basic""" +602 87 evaluator """rankbased""" +602 88 dataset """kinships""" +602 88 model """rotate""" +602 88 loss """softplus""" +602 88 regularizer """no""" +602 88 optimizer """adam""" +602 88 training_loop """owa""" +602 88 negative_sampler """basic""" +602 88 evaluator """rankbased""" +602 89 dataset """kinships""" +602 89 model """rotate""" +602 89 loss """softplus""" +602 89 regularizer """no""" +602 89 optimizer """adam""" +602 89 training_loop """owa""" +602 89 negative_sampler """basic""" +602 89 evaluator """rankbased""" +602 90 dataset """kinships""" +602 90 model """rotate""" +602 90 loss """softplus""" +602 90 regularizer """no""" +602 90 optimizer """adam""" +602 90 training_loop """owa""" +602 90 negative_sampler """basic""" +602 90 evaluator """rankbased""" +602 91 dataset """kinships""" +602 91 model """rotate""" +602 91 loss """softplus""" +602 91 regularizer """no""" +602 91 optimizer """adam""" +602 91 training_loop """owa""" +602 91 negative_sampler """basic""" +602 91 evaluator """rankbased""" +602 92 dataset """kinships""" +602 92 model """rotate""" +602 92 loss """softplus""" +602 92 regularizer """no""" +602 92 optimizer """adam""" +602 92 training_loop """owa""" +602 92 negative_sampler """basic""" +602 92 evaluator """rankbased""" +602 93 dataset """kinships""" +602 93 model """rotate""" +602 93 loss """softplus""" +602 93 regularizer """no""" +602 93 optimizer """adam""" +602 93 training_loop """owa""" +602 93 negative_sampler """basic""" +602 93 evaluator """rankbased""" +602 94 dataset """kinships""" +602 94 model """rotate""" +602 94 loss """softplus""" +602 94 regularizer """no""" +602 94 optimizer """adam""" +602 94 training_loop """owa""" +602 94 negative_sampler """basic""" +602 94 evaluator """rankbased""" +602 95 dataset """kinships""" +602 95 model """rotate""" +602 95 loss """softplus""" +602 95 regularizer """no""" +602 95 optimizer """adam""" +602 95 training_loop """owa""" +602 95 negative_sampler """basic""" +602 95 evaluator """rankbased""" +602 96 dataset """kinships""" +602 96 model """rotate""" +602 96 loss """softplus""" +602 96 regularizer """no""" +602 96 optimizer """adam""" +602 96 training_loop """owa""" +602 96 negative_sampler """basic""" +602 96 evaluator """rankbased""" +602 97 dataset """kinships""" +602 97 model """rotate""" +602 97 loss """softplus""" +602 97 regularizer """no""" +602 97 optimizer """adam""" +602 97 training_loop """owa""" +602 97 negative_sampler """basic""" +602 97 evaluator """rankbased""" +602 98 dataset """kinships""" +602 98 model """rotate""" +602 98 loss """softplus""" +602 98 regularizer """no""" +602 98 optimizer """adam""" +602 98 training_loop """owa""" +602 98 negative_sampler """basic""" +602 98 evaluator """rankbased""" +602 99 dataset """kinships""" +602 99 model """rotate""" +602 99 loss """softplus""" +602 99 regularizer """no""" +602 99 optimizer """adam""" +602 99 training_loop """owa""" +602 99 negative_sampler """basic""" +602 99 evaluator """rankbased""" +602 100 dataset """kinships""" +602 100 model """rotate""" +602 100 loss """softplus""" +602 100 regularizer """no""" +602 100 optimizer """adam""" +602 100 training_loop """owa""" +602 100 negative_sampler """basic""" +602 100 evaluator """rankbased""" +603 1 model.embedding_dim 2.0 +603 1 loss.margin 1.6820558074870238 +603 1 optimizer.lr 0.002582720521591747 +603 1 negative_sampler.num_negs_per_pos 57.0 +603 1 training.batch_size 1.0 +603 2 model.embedding_dim 0.0 +603 2 loss.margin 9.059806419116958 +603 2 optimizer.lr 0.03022793220198008 +603 2 negative_sampler.num_negs_per_pos 39.0 +603 2 training.batch_size 0.0 +603 3 model.embedding_dim 1.0 +603 3 loss.margin 1.572852895462075 +603 3 optimizer.lr 0.0016236800644572665 +603 3 negative_sampler.num_negs_per_pos 22.0 +603 3 training.batch_size 0.0 +603 4 model.embedding_dim 2.0 +603 4 loss.margin 7.437975474685172 +603 4 optimizer.lr 0.0016570575329743096 +603 4 negative_sampler.num_negs_per_pos 7.0 +603 4 training.batch_size 1.0 +603 5 model.embedding_dim 0.0 +603 5 loss.margin 8.538261338336552 +603 5 optimizer.lr 0.012306668271811766 +603 5 negative_sampler.num_negs_per_pos 61.0 +603 5 training.batch_size 0.0 +603 6 model.embedding_dim 2.0 +603 6 loss.margin 7.408746317903788 +603 6 optimizer.lr 0.04357422912754314 +603 6 negative_sampler.num_negs_per_pos 0.0 +603 6 training.batch_size 1.0 +603 7 model.embedding_dim 1.0 +603 7 loss.margin 3.6806358196795252 +603 7 optimizer.lr 0.03620667336780345 +603 7 negative_sampler.num_negs_per_pos 56.0 +603 7 training.batch_size 1.0 +603 8 model.embedding_dim 0.0 +603 8 loss.margin 2.0476195640965273 +603 8 optimizer.lr 0.018170645591647487 +603 8 negative_sampler.num_negs_per_pos 46.0 +603 8 training.batch_size 2.0 +603 9 model.embedding_dim 0.0 +603 9 loss.margin 7.98330466224173 +603 9 optimizer.lr 0.038283319716467354 +603 9 negative_sampler.num_negs_per_pos 65.0 +603 9 training.batch_size 1.0 +603 10 model.embedding_dim 0.0 +603 10 loss.margin 6.650417094758346 +603 10 optimizer.lr 0.019496822356690275 +603 10 negative_sampler.num_negs_per_pos 31.0 +603 10 training.batch_size 2.0 +603 11 model.embedding_dim 2.0 +603 11 loss.margin 5.615810566998138 +603 11 optimizer.lr 0.01627103586690864 +603 11 negative_sampler.num_negs_per_pos 31.0 +603 11 training.batch_size 1.0 +603 12 model.embedding_dim 2.0 +603 12 loss.margin 9.366361504910223 +603 12 optimizer.lr 0.0032350711235084964 +603 12 negative_sampler.num_negs_per_pos 51.0 +603 12 training.batch_size 0.0 +603 13 model.embedding_dim 2.0 +603 13 loss.margin 8.587558130004794 +603 13 optimizer.lr 0.011244593507494774 +603 13 negative_sampler.num_negs_per_pos 9.0 +603 13 training.batch_size 1.0 +603 14 model.embedding_dim 0.0 +603 14 loss.margin 9.665351774858415 +603 14 optimizer.lr 0.01261434485603327 +603 14 negative_sampler.num_negs_per_pos 71.0 +603 14 training.batch_size 1.0 +603 15 model.embedding_dim 2.0 +603 15 loss.margin 0.5342902524787737 +603 15 optimizer.lr 0.004946316396080272 +603 15 negative_sampler.num_negs_per_pos 45.0 +603 15 training.batch_size 1.0 +603 16 model.embedding_dim 2.0 +603 16 loss.margin 4.077394905887525 +603 16 optimizer.lr 0.0014890689810607143 +603 16 negative_sampler.num_negs_per_pos 79.0 +603 16 training.batch_size 2.0 +603 17 model.embedding_dim 1.0 +603 17 loss.margin 7.047686800753792 +603 17 optimizer.lr 0.07836054574197335 +603 17 negative_sampler.num_negs_per_pos 82.0 +603 17 training.batch_size 0.0 +603 18 model.embedding_dim 2.0 +603 18 loss.margin 1.964315876827647 +603 18 optimizer.lr 0.002659092909478412 +603 18 negative_sampler.num_negs_per_pos 32.0 +603 18 training.batch_size 1.0 +603 19 model.embedding_dim 1.0 +603 19 loss.margin 2.8879949049658973 +603 19 optimizer.lr 0.026594711036479673 +603 19 negative_sampler.num_negs_per_pos 40.0 +603 19 training.batch_size 0.0 +603 20 model.embedding_dim 2.0 +603 20 loss.margin 1.1721450361629862 +603 20 optimizer.lr 0.006693229354202206 +603 20 negative_sampler.num_negs_per_pos 64.0 +603 20 training.batch_size 0.0 +603 21 model.embedding_dim 1.0 +603 21 loss.margin 6.2644928814922896 +603 21 optimizer.lr 0.01188150719948214 +603 21 negative_sampler.num_negs_per_pos 43.0 +603 21 training.batch_size 1.0 +603 22 model.embedding_dim 2.0 +603 22 loss.margin 2.419451802876618 +603 22 optimizer.lr 0.001901453260451045 +603 22 negative_sampler.num_negs_per_pos 52.0 +603 22 training.batch_size 1.0 +603 23 model.embedding_dim 2.0 +603 23 loss.margin 7.8956380910425 +603 23 optimizer.lr 0.06454762769356079 +603 23 negative_sampler.num_negs_per_pos 37.0 +603 23 training.batch_size 0.0 +603 24 model.embedding_dim 1.0 +603 24 loss.margin 6.748526508151129 +603 24 optimizer.lr 0.006084034045255063 +603 24 negative_sampler.num_negs_per_pos 58.0 +603 24 training.batch_size 2.0 +603 25 model.embedding_dim 1.0 +603 25 loss.margin 6.752210786801407 +603 25 optimizer.lr 0.0044741997584118245 +603 25 negative_sampler.num_negs_per_pos 65.0 +603 25 training.batch_size 2.0 +603 26 model.embedding_dim 1.0 +603 26 loss.margin 1.1066409884811974 +603 26 optimizer.lr 0.013640075014047839 +603 26 negative_sampler.num_negs_per_pos 89.0 +603 26 training.batch_size 0.0 +603 27 model.embedding_dim 2.0 +603 27 loss.margin 3.5796509760349764 +603 27 optimizer.lr 0.0038827339244421858 +603 27 negative_sampler.num_negs_per_pos 5.0 +603 27 training.batch_size 1.0 +603 28 model.embedding_dim 0.0 +603 28 loss.margin 8.936505042546449 +603 28 optimizer.lr 0.0023227676337825858 +603 28 negative_sampler.num_negs_per_pos 5.0 +603 28 training.batch_size 2.0 +603 29 model.embedding_dim 2.0 +603 29 loss.margin 2.1969009265022335 +603 29 optimizer.lr 0.0018333909356122147 +603 29 negative_sampler.num_negs_per_pos 71.0 +603 29 training.batch_size 0.0 +603 30 model.embedding_dim 0.0 +603 30 loss.margin 8.750259285841707 +603 30 optimizer.lr 0.04951447211970652 +603 30 negative_sampler.num_negs_per_pos 69.0 +603 30 training.batch_size 1.0 +603 31 model.embedding_dim 0.0 +603 31 loss.margin 3.0085676495906677 +603 31 optimizer.lr 0.008683225700472853 +603 31 negative_sampler.num_negs_per_pos 81.0 +603 31 training.batch_size 2.0 +603 32 model.embedding_dim 0.0 +603 32 loss.margin 8.995233046589346 +603 32 optimizer.lr 0.0016173716884010868 +603 32 negative_sampler.num_negs_per_pos 6.0 +603 32 training.batch_size 1.0 +603 33 model.embedding_dim 1.0 +603 33 loss.margin 5.893251248737065 +603 33 optimizer.lr 0.01550187912365185 +603 33 negative_sampler.num_negs_per_pos 50.0 +603 33 training.batch_size 2.0 +603 34 model.embedding_dim 1.0 +603 34 loss.margin 6.007648932303627 +603 34 optimizer.lr 0.013520714376555498 +603 34 negative_sampler.num_negs_per_pos 4.0 +603 34 training.batch_size 2.0 +603 35 model.embedding_dim 0.0 +603 35 loss.margin 8.475128891806687 +603 35 optimizer.lr 0.0011836132979767286 +603 35 negative_sampler.num_negs_per_pos 34.0 +603 35 training.batch_size 1.0 +603 36 model.embedding_dim 1.0 +603 36 loss.margin 4.86966104511877 +603 36 optimizer.lr 0.004616999414773538 +603 36 negative_sampler.num_negs_per_pos 84.0 +603 36 training.batch_size 1.0 +603 37 model.embedding_dim 2.0 +603 37 loss.margin 7.788378712820975 +603 37 optimizer.lr 0.016568679927278042 +603 37 negative_sampler.num_negs_per_pos 93.0 +603 37 training.batch_size 2.0 +603 38 model.embedding_dim 1.0 +603 38 loss.margin 1.686545860430147 +603 38 optimizer.lr 0.01625593279823945 +603 38 negative_sampler.num_negs_per_pos 10.0 +603 38 training.batch_size 1.0 +603 39 model.embedding_dim 1.0 +603 39 loss.margin 5.48749515738548 +603 39 optimizer.lr 0.012374293761038992 +603 39 negative_sampler.num_negs_per_pos 76.0 +603 39 training.batch_size 1.0 +603 40 model.embedding_dim 0.0 +603 40 loss.margin 3.1182906706135616 +603 40 optimizer.lr 0.0014606702047223682 +603 40 negative_sampler.num_negs_per_pos 28.0 +603 40 training.batch_size 1.0 +603 41 model.embedding_dim 1.0 +603 41 loss.margin 5.730722330766929 +603 41 optimizer.lr 0.00825979453055697 +603 41 negative_sampler.num_negs_per_pos 85.0 +603 41 training.batch_size 2.0 +603 42 model.embedding_dim 2.0 +603 42 loss.margin 5.756376086916305 +603 42 optimizer.lr 0.0025858662494961963 +603 42 negative_sampler.num_negs_per_pos 41.0 +603 42 training.batch_size 1.0 +603 43 model.embedding_dim 1.0 +603 43 loss.margin 7.896275733362518 +603 43 optimizer.lr 0.0025300251032611526 +603 43 negative_sampler.num_negs_per_pos 45.0 +603 43 training.batch_size 0.0 +603 44 model.embedding_dim 2.0 +603 44 loss.margin 2.4063563136243196 +603 44 optimizer.lr 0.014609365772480032 +603 44 negative_sampler.num_negs_per_pos 7.0 +603 44 training.batch_size 1.0 +603 45 model.embedding_dim 2.0 +603 45 loss.margin 9.58711852963894 +603 45 optimizer.lr 0.04454522661872962 +603 45 negative_sampler.num_negs_per_pos 29.0 +603 45 training.batch_size 1.0 +603 46 model.embedding_dim 1.0 +603 46 loss.margin 2.4021944463030462 +603 46 optimizer.lr 0.017376118141383064 +603 46 negative_sampler.num_negs_per_pos 55.0 +603 46 training.batch_size 2.0 +603 47 model.embedding_dim 2.0 +603 47 loss.margin 4.347363263493862 +603 47 optimizer.lr 0.08394255767845074 +603 47 negative_sampler.num_negs_per_pos 34.0 +603 47 training.batch_size 0.0 +603 48 model.embedding_dim 0.0 +603 48 loss.margin 0.9105426344839473 +603 48 optimizer.lr 0.001289664601262844 +603 48 negative_sampler.num_negs_per_pos 95.0 +603 48 training.batch_size 0.0 +603 49 model.embedding_dim 2.0 +603 49 loss.margin 4.941282404701896 +603 49 optimizer.lr 0.040705565735999774 +603 49 negative_sampler.num_negs_per_pos 48.0 +603 49 training.batch_size 0.0 +603 50 model.embedding_dim 1.0 +603 50 loss.margin 3.2213232977367046 +603 50 optimizer.lr 0.004447645848521952 +603 50 negative_sampler.num_negs_per_pos 44.0 +603 50 training.batch_size 0.0 +603 51 model.embedding_dim 2.0 +603 51 loss.margin 4.07818669645649 +603 51 optimizer.lr 0.007875767617988092 +603 51 negative_sampler.num_negs_per_pos 0.0 +603 51 training.batch_size 1.0 +603 52 model.embedding_dim 0.0 +603 52 loss.margin 3.8094494984978557 +603 52 optimizer.lr 0.001744794290890727 +603 52 negative_sampler.num_negs_per_pos 63.0 +603 52 training.batch_size 1.0 +603 53 model.embedding_dim 2.0 +603 53 loss.margin 5.364472533701058 +603 53 optimizer.lr 0.0014028022941578082 +603 53 negative_sampler.num_negs_per_pos 92.0 +603 53 training.batch_size 0.0 +603 54 model.embedding_dim 2.0 +603 54 loss.margin 9.8740502089473 +603 54 optimizer.lr 0.015950258703975956 +603 54 negative_sampler.num_negs_per_pos 15.0 +603 54 training.batch_size 1.0 +603 55 model.embedding_dim 0.0 +603 55 loss.margin 8.885006567562582 +603 55 optimizer.lr 0.03330346460537049 +603 55 negative_sampler.num_negs_per_pos 94.0 +603 55 training.batch_size 0.0 +603 56 model.embedding_dim 2.0 +603 56 loss.margin 4.1283505483483856 +603 56 optimizer.lr 0.002409381599762729 +603 56 negative_sampler.num_negs_per_pos 59.0 +603 56 training.batch_size 1.0 +603 57 model.embedding_dim 2.0 +603 57 loss.margin 4.57669448648361 +603 57 optimizer.lr 0.0021095462207301654 +603 57 negative_sampler.num_negs_per_pos 36.0 +603 57 training.batch_size 2.0 +603 58 model.embedding_dim 2.0 +603 58 loss.margin 2.466170272668954 +603 58 optimizer.lr 0.001053740433144946 +603 58 negative_sampler.num_negs_per_pos 84.0 +603 58 training.batch_size 1.0 +603 59 model.embedding_dim 2.0 +603 59 loss.margin 2.379156231652665 +603 59 optimizer.lr 0.0025862374731284794 +603 59 negative_sampler.num_negs_per_pos 34.0 +603 59 training.batch_size 0.0 +603 60 model.embedding_dim 2.0 +603 60 loss.margin 1.2252023623786619 +603 60 optimizer.lr 0.006619210025452276 +603 60 negative_sampler.num_negs_per_pos 51.0 +603 60 training.batch_size 1.0 +603 61 model.embedding_dim 2.0 +603 61 loss.margin 5.110991851201934 +603 61 optimizer.lr 0.002895150474298981 +603 61 negative_sampler.num_negs_per_pos 95.0 +603 61 training.batch_size 2.0 +603 62 model.embedding_dim 2.0 +603 62 loss.margin 7.174226455432535 +603 62 optimizer.lr 0.017504233549780254 +603 62 negative_sampler.num_negs_per_pos 41.0 +603 62 training.batch_size 2.0 +603 63 model.embedding_dim 0.0 +603 63 loss.margin 7.36258127388722 +603 63 optimizer.lr 0.05192791919088044 +603 63 negative_sampler.num_negs_per_pos 28.0 +603 63 training.batch_size 2.0 +603 64 model.embedding_dim 0.0 +603 64 loss.margin 7.504283489836751 +603 64 optimizer.lr 0.009576047114830212 +603 64 negative_sampler.num_negs_per_pos 16.0 +603 64 training.batch_size 0.0 +603 65 model.embedding_dim 1.0 +603 65 loss.margin 5.414235296559168 +603 65 optimizer.lr 0.00855333410270147 +603 65 negative_sampler.num_negs_per_pos 91.0 +603 65 training.batch_size 0.0 +603 66 model.embedding_dim 1.0 +603 66 loss.margin 6.453184436107208 +603 66 optimizer.lr 0.04975062170352552 +603 66 negative_sampler.num_negs_per_pos 20.0 +603 66 training.batch_size 1.0 +603 67 model.embedding_dim 0.0 +603 67 loss.margin 6.938451575627584 +603 67 optimizer.lr 0.07618577576567521 +603 67 negative_sampler.num_negs_per_pos 8.0 +603 67 training.batch_size 2.0 +603 68 model.embedding_dim 0.0 +603 68 loss.margin 1.9442367412981374 +603 68 optimizer.lr 0.0022911021279500886 +603 68 negative_sampler.num_negs_per_pos 82.0 +603 68 training.batch_size 2.0 +603 69 model.embedding_dim 0.0 +603 69 loss.margin 4.985199298117001 +603 69 optimizer.lr 0.002395235339636853 +603 69 negative_sampler.num_negs_per_pos 32.0 +603 69 training.batch_size 2.0 +603 70 model.embedding_dim 2.0 +603 70 loss.margin 1.981437703894005 +603 70 optimizer.lr 0.003891308024919231 +603 70 negative_sampler.num_negs_per_pos 41.0 +603 70 training.batch_size 1.0 +603 71 model.embedding_dim 2.0 +603 71 loss.margin 9.36960277358635 +603 71 optimizer.lr 0.004297002073060959 +603 71 negative_sampler.num_negs_per_pos 54.0 +603 71 training.batch_size 2.0 +603 72 model.embedding_dim 0.0 +603 72 loss.margin 6.750905860633645 +603 72 optimizer.lr 0.009455267772441007 +603 72 negative_sampler.num_negs_per_pos 25.0 +603 72 training.batch_size 2.0 +603 73 model.embedding_dim 0.0 +603 73 loss.margin 2.86134727627441 +603 73 optimizer.lr 0.0016794544377797915 +603 73 negative_sampler.num_negs_per_pos 41.0 +603 73 training.batch_size 1.0 +603 74 model.embedding_dim 1.0 +603 74 loss.margin 5.963850395663574 +603 74 optimizer.lr 0.0020331252731847745 +603 74 negative_sampler.num_negs_per_pos 36.0 +603 74 training.batch_size 1.0 +603 75 model.embedding_dim 2.0 +603 75 loss.margin 8.557074896716232 +603 75 optimizer.lr 0.001917378129404161 +603 75 negative_sampler.num_negs_per_pos 59.0 +603 75 training.batch_size 0.0 +603 76 model.embedding_dim 2.0 +603 76 loss.margin 2.0286952360088226 +603 76 optimizer.lr 0.007880940755634867 +603 76 negative_sampler.num_negs_per_pos 16.0 +603 76 training.batch_size 2.0 +603 77 model.embedding_dim 2.0 +603 77 loss.margin 5.737889827564856 +603 77 optimizer.lr 0.010909251493077483 +603 77 negative_sampler.num_negs_per_pos 7.0 +603 77 training.batch_size 2.0 +603 78 model.embedding_dim 2.0 +603 78 loss.margin 6.005642783754205 +603 78 optimizer.lr 0.05126658097261532 +603 78 negative_sampler.num_negs_per_pos 31.0 +603 78 training.batch_size 0.0 +603 79 model.embedding_dim 2.0 +603 79 loss.margin 2.04459669745048 +603 79 optimizer.lr 0.03245670501846773 +603 79 negative_sampler.num_negs_per_pos 28.0 +603 79 training.batch_size 1.0 +603 80 model.embedding_dim 0.0 +603 80 loss.margin 2.4673760033457577 +603 80 optimizer.lr 0.0029715467837430508 +603 80 negative_sampler.num_negs_per_pos 91.0 +603 80 training.batch_size 0.0 +603 81 model.embedding_dim 0.0 +603 81 loss.margin 7.975028754416546 +603 81 optimizer.lr 0.009400791929742039 +603 81 negative_sampler.num_negs_per_pos 46.0 +603 81 training.batch_size 0.0 +603 82 model.embedding_dim 1.0 +603 82 loss.margin 9.910413569796633 +603 82 optimizer.lr 0.0022369227854762024 +603 82 negative_sampler.num_negs_per_pos 39.0 +603 82 training.batch_size 2.0 +603 83 model.embedding_dim 1.0 +603 83 loss.margin 4.520739325900847 +603 83 optimizer.lr 0.008052361684016551 +603 83 negative_sampler.num_negs_per_pos 66.0 +603 83 training.batch_size 2.0 +603 84 model.embedding_dim 0.0 +603 84 loss.margin 6.706616264982139 +603 84 optimizer.lr 0.0053587005202081865 +603 84 negative_sampler.num_negs_per_pos 4.0 +603 84 training.batch_size 0.0 +603 85 model.embedding_dim 0.0 +603 85 loss.margin 8.343491068999722 +603 85 optimizer.lr 0.012989172258766992 +603 85 negative_sampler.num_negs_per_pos 24.0 +603 85 training.batch_size 0.0 +603 86 model.embedding_dim 2.0 +603 86 loss.margin 9.787636778257657 +603 86 optimizer.lr 0.045250359824038 +603 86 negative_sampler.num_negs_per_pos 90.0 +603 86 training.batch_size 0.0 +603 87 model.embedding_dim 0.0 +603 87 loss.margin 9.07918473700252 +603 87 optimizer.lr 0.06711551544930802 +603 87 negative_sampler.num_negs_per_pos 41.0 +603 87 training.batch_size 2.0 +603 88 model.embedding_dim 0.0 +603 88 loss.margin 4.09025459109012 +603 88 optimizer.lr 0.013007942875254684 +603 88 negative_sampler.num_negs_per_pos 35.0 +603 88 training.batch_size 0.0 +603 89 model.embedding_dim 1.0 +603 89 loss.margin 9.84539238442606 +603 89 optimizer.lr 0.0850130577203199 +603 89 negative_sampler.num_negs_per_pos 39.0 +603 89 training.batch_size 1.0 +603 90 model.embedding_dim 0.0 +603 90 loss.margin 5.08669440003622 +603 90 optimizer.lr 0.09460139701599739 +603 90 negative_sampler.num_negs_per_pos 80.0 +603 90 training.batch_size 0.0 +603 91 model.embedding_dim 2.0 +603 91 loss.margin 2.306607783674355 +603 91 optimizer.lr 0.003326201118661118 +603 91 negative_sampler.num_negs_per_pos 63.0 +603 91 training.batch_size 0.0 +603 92 model.embedding_dim 1.0 +603 92 loss.margin 0.5893879998432557 +603 92 optimizer.lr 0.03230498406119277 +603 92 negative_sampler.num_negs_per_pos 26.0 +603 92 training.batch_size 2.0 +603 93 model.embedding_dim 2.0 +603 93 loss.margin 6.394968535423013 +603 93 optimizer.lr 0.006040395890897063 +603 93 negative_sampler.num_negs_per_pos 7.0 +603 93 training.batch_size 0.0 +603 94 model.embedding_dim 1.0 +603 94 loss.margin 8.683346177330217 +603 94 optimizer.lr 0.015639734179838286 +603 94 negative_sampler.num_negs_per_pos 58.0 +603 94 training.batch_size 0.0 +603 95 model.embedding_dim 0.0 +603 95 loss.margin 8.216317701917873 +603 95 optimizer.lr 0.003805466172788886 +603 95 negative_sampler.num_negs_per_pos 51.0 +603 95 training.batch_size 0.0 +603 96 model.embedding_dim 1.0 +603 96 loss.margin 8.902431197044226 +603 96 optimizer.lr 0.014868671069923563 +603 96 negative_sampler.num_negs_per_pos 34.0 +603 96 training.batch_size 1.0 +603 97 model.embedding_dim 0.0 +603 97 loss.margin 2.970229610184389 +603 97 optimizer.lr 0.004957829044159929 +603 97 negative_sampler.num_negs_per_pos 98.0 +603 97 training.batch_size 0.0 +603 98 model.embedding_dim 0.0 +603 98 loss.margin 8.897492229351297 +603 98 optimizer.lr 0.07907552347325181 +603 98 negative_sampler.num_negs_per_pos 42.0 +603 98 training.batch_size 1.0 +603 99 model.embedding_dim 0.0 +603 99 loss.margin 6.910726226647798 +603 99 optimizer.lr 0.09171861128832347 +603 99 negative_sampler.num_negs_per_pos 56.0 +603 99 training.batch_size 0.0 +603 100 model.embedding_dim 1.0 +603 100 loss.margin 4.925727736888339 +603 100 optimizer.lr 0.02751166929521793 +603 100 negative_sampler.num_negs_per_pos 35.0 +603 100 training.batch_size 2.0 +603 1 dataset """kinships""" +603 1 model """rotate""" +603 1 loss """marginranking""" +603 1 regularizer """no""" +603 1 optimizer """adam""" +603 1 training_loop """owa""" +603 1 negative_sampler """basic""" +603 1 evaluator """rankbased""" +603 2 dataset """kinships""" +603 2 model """rotate""" +603 2 loss """marginranking""" +603 2 regularizer """no""" +603 2 optimizer """adam""" +603 2 training_loop """owa""" +603 2 negative_sampler """basic""" +603 2 evaluator """rankbased""" +603 3 dataset """kinships""" +603 3 model """rotate""" +603 3 loss """marginranking""" +603 3 regularizer """no""" +603 3 optimizer """adam""" +603 3 training_loop """owa""" +603 3 negative_sampler """basic""" +603 3 evaluator """rankbased""" +603 4 dataset """kinships""" +603 4 model """rotate""" +603 4 loss """marginranking""" +603 4 regularizer """no""" +603 4 optimizer """adam""" +603 4 training_loop """owa""" +603 4 negative_sampler """basic""" +603 4 evaluator """rankbased""" +603 5 dataset """kinships""" +603 5 model """rotate""" +603 5 loss """marginranking""" +603 5 regularizer """no""" +603 5 optimizer """adam""" +603 5 training_loop """owa""" +603 5 negative_sampler """basic""" +603 5 evaluator """rankbased""" +603 6 dataset """kinships""" +603 6 model """rotate""" +603 6 loss """marginranking""" +603 6 regularizer """no""" +603 6 optimizer """adam""" +603 6 training_loop """owa""" +603 6 negative_sampler """basic""" +603 6 evaluator """rankbased""" +603 7 dataset """kinships""" +603 7 model """rotate""" +603 7 loss """marginranking""" +603 7 regularizer """no""" +603 7 optimizer """adam""" +603 7 training_loop """owa""" +603 7 negative_sampler """basic""" +603 7 evaluator """rankbased""" +603 8 dataset """kinships""" +603 8 model """rotate""" +603 8 loss """marginranking""" +603 8 regularizer """no""" +603 8 optimizer """adam""" +603 8 training_loop """owa""" +603 8 negative_sampler """basic""" +603 8 evaluator """rankbased""" +603 9 dataset """kinships""" +603 9 model """rotate""" +603 9 loss """marginranking""" +603 9 regularizer """no""" +603 9 optimizer """adam""" +603 9 training_loop """owa""" +603 9 negative_sampler """basic""" +603 9 evaluator """rankbased""" +603 10 dataset """kinships""" +603 10 model """rotate""" +603 10 loss """marginranking""" +603 10 regularizer """no""" +603 10 optimizer """adam""" +603 10 training_loop """owa""" +603 10 negative_sampler """basic""" +603 10 evaluator """rankbased""" +603 11 dataset """kinships""" +603 11 model """rotate""" +603 11 loss """marginranking""" +603 11 regularizer """no""" +603 11 optimizer """adam""" +603 11 training_loop """owa""" +603 11 negative_sampler """basic""" +603 11 evaluator """rankbased""" +603 12 dataset """kinships""" +603 12 model """rotate""" +603 12 loss """marginranking""" +603 12 regularizer """no""" +603 12 optimizer """adam""" +603 12 training_loop """owa""" +603 12 negative_sampler """basic""" +603 12 evaluator """rankbased""" +603 13 dataset """kinships""" +603 13 model """rotate""" +603 13 loss """marginranking""" +603 13 regularizer """no""" +603 13 optimizer """adam""" +603 13 training_loop """owa""" +603 13 negative_sampler """basic""" +603 13 evaluator """rankbased""" +603 14 dataset """kinships""" +603 14 model """rotate""" +603 14 loss """marginranking""" +603 14 regularizer """no""" +603 14 optimizer """adam""" +603 14 training_loop """owa""" +603 14 negative_sampler """basic""" +603 14 evaluator """rankbased""" +603 15 dataset """kinships""" +603 15 model """rotate""" +603 15 loss """marginranking""" +603 15 regularizer """no""" +603 15 optimizer """adam""" +603 15 training_loop """owa""" +603 15 negative_sampler """basic""" +603 15 evaluator """rankbased""" +603 16 dataset """kinships""" +603 16 model """rotate""" +603 16 loss """marginranking""" +603 16 regularizer """no""" +603 16 optimizer """adam""" +603 16 training_loop """owa""" +603 16 negative_sampler """basic""" +603 16 evaluator """rankbased""" +603 17 dataset """kinships""" +603 17 model """rotate""" +603 17 loss """marginranking""" +603 17 regularizer """no""" +603 17 optimizer """adam""" +603 17 training_loop """owa""" +603 17 negative_sampler """basic""" +603 17 evaluator """rankbased""" +603 18 dataset """kinships""" +603 18 model """rotate""" +603 18 loss """marginranking""" +603 18 regularizer """no""" +603 18 optimizer """adam""" +603 18 training_loop """owa""" +603 18 negative_sampler """basic""" +603 18 evaluator """rankbased""" +603 19 dataset """kinships""" +603 19 model """rotate""" +603 19 loss """marginranking""" +603 19 regularizer """no""" +603 19 optimizer """adam""" +603 19 training_loop """owa""" +603 19 negative_sampler """basic""" +603 19 evaluator """rankbased""" +603 20 dataset """kinships""" +603 20 model """rotate""" +603 20 loss """marginranking""" +603 20 regularizer """no""" +603 20 optimizer """adam""" +603 20 training_loop """owa""" +603 20 negative_sampler """basic""" +603 20 evaluator """rankbased""" +603 21 dataset """kinships""" +603 21 model """rotate""" +603 21 loss """marginranking""" +603 21 regularizer """no""" +603 21 optimizer """adam""" +603 21 training_loop """owa""" +603 21 negative_sampler """basic""" +603 21 evaluator """rankbased""" +603 22 dataset """kinships""" +603 22 model """rotate""" +603 22 loss """marginranking""" +603 22 regularizer """no""" +603 22 optimizer """adam""" +603 22 training_loop """owa""" +603 22 negative_sampler """basic""" +603 22 evaluator """rankbased""" +603 23 dataset """kinships""" +603 23 model """rotate""" +603 23 loss """marginranking""" +603 23 regularizer """no""" +603 23 optimizer """adam""" +603 23 training_loop """owa""" +603 23 negative_sampler """basic""" +603 23 evaluator """rankbased""" +603 24 dataset """kinships""" +603 24 model """rotate""" +603 24 loss """marginranking""" +603 24 regularizer """no""" +603 24 optimizer """adam""" +603 24 training_loop """owa""" +603 24 negative_sampler """basic""" +603 24 evaluator """rankbased""" +603 25 dataset """kinships""" +603 25 model """rotate""" +603 25 loss """marginranking""" +603 25 regularizer """no""" +603 25 optimizer """adam""" +603 25 training_loop """owa""" +603 25 negative_sampler """basic""" +603 25 evaluator """rankbased""" +603 26 dataset """kinships""" +603 26 model """rotate""" +603 26 loss """marginranking""" +603 26 regularizer """no""" +603 26 optimizer """adam""" +603 26 training_loop """owa""" +603 26 negative_sampler """basic""" +603 26 evaluator """rankbased""" +603 27 dataset """kinships""" +603 27 model """rotate""" +603 27 loss """marginranking""" +603 27 regularizer """no""" +603 27 optimizer """adam""" +603 27 training_loop """owa""" +603 27 negative_sampler """basic""" +603 27 evaluator """rankbased""" +603 28 dataset """kinships""" +603 28 model """rotate""" +603 28 loss """marginranking""" +603 28 regularizer """no""" +603 28 optimizer """adam""" +603 28 training_loop """owa""" +603 28 negative_sampler """basic""" +603 28 evaluator """rankbased""" +603 29 dataset """kinships""" +603 29 model """rotate""" +603 29 loss """marginranking""" +603 29 regularizer """no""" +603 29 optimizer """adam""" +603 29 training_loop """owa""" +603 29 negative_sampler """basic""" +603 29 evaluator """rankbased""" +603 30 dataset """kinships""" +603 30 model """rotate""" +603 30 loss """marginranking""" +603 30 regularizer """no""" +603 30 optimizer """adam""" +603 30 training_loop """owa""" +603 30 negative_sampler """basic""" +603 30 evaluator """rankbased""" +603 31 dataset """kinships""" +603 31 model """rotate""" +603 31 loss """marginranking""" +603 31 regularizer """no""" +603 31 optimizer """adam""" +603 31 training_loop """owa""" +603 31 negative_sampler """basic""" +603 31 evaluator """rankbased""" +603 32 dataset """kinships""" +603 32 model """rotate""" +603 32 loss """marginranking""" +603 32 regularizer """no""" +603 32 optimizer """adam""" +603 32 training_loop """owa""" +603 32 negative_sampler """basic""" +603 32 evaluator """rankbased""" +603 33 dataset """kinships""" +603 33 model """rotate""" +603 33 loss """marginranking""" +603 33 regularizer """no""" +603 33 optimizer """adam""" +603 33 training_loop """owa""" +603 33 negative_sampler """basic""" +603 33 evaluator """rankbased""" +603 34 dataset """kinships""" +603 34 model """rotate""" +603 34 loss """marginranking""" +603 34 regularizer """no""" +603 34 optimizer """adam""" +603 34 training_loop """owa""" +603 34 negative_sampler """basic""" +603 34 evaluator """rankbased""" +603 35 dataset """kinships""" +603 35 model """rotate""" +603 35 loss """marginranking""" +603 35 regularizer """no""" +603 35 optimizer """adam""" +603 35 training_loop """owa""" +603 35 negative_sampler """basic""" +603 35 evaluator """rankbased""" +603 36 dataset """kinships""" +603 36 model """rotate""" +603 36 loss """marginranking""" +603 36 regularizer """no""" +603 36 optimizer """adam""" +603 36 training_loop """owa""" +603 36 negative_sampler """basic""" +603 36 evaluator """rankbased""" +603 37 dataset """kinships""" +603 37 model """rotate""" +603 37 loss """marginranking""" +603 37 regularizer """no""" +603 37 optimizer """adam""" +603 37 training_loop """owa""" +603 37 negative_sampler """basic""" +603 37 evaluator """rankbased""" +603 38 dataset """kinships""" +603 38 model """rotate""" +603 38 loss """marginranking""" +603 38 regularizer """no""" +603 38 optimizer """adam""" +603 38 training_loop """owa""" +603 38 negative_sampler """basic""" +603 38 evaluator """rankbased""" +603 39 dataset """kinships""" +603 39 model """rotate""" +603 39 loss """marginranking""" +603 39 regularizer """no""" +603 39 optimizer """adam""" +603 39 training_loop """owa""" +603 39 negative_sampler """basic""" +603 39 evaluator """rankbased""" +603 40 dataset """kinships""" +603 40 model """rotate""" +603 40 loss """marginranking""" +603 40 regularizer """no""" +603 40 optimizer """adam""" +603 40 training_loop """owa""" +603 40 negative_sampler """basic""" +603 40 evaluator """rankbased""" +603 41 dataset """kinships""" +603 41 model """rotate""" +603 41 loss """marginranking""" +603 41 regularizer """no""" +603 41 optimizer """adam""" +603 41 training_loop """owa""" +603 41 negative_sampler """basic""" +603 41 evaluator """rankbased""" +603 42 dataset """kinships""" +603 42 model """rotate""" +603 42 loss """marginranking""" +603 42 regularizer """no""" +603 42 optimizer """adam""" +603 42 training_loop """owa""" +603 42 negative_sampler """basic""" +603 42 evaluator """rankbased""" +603 43 dataset """kinships""" +603 43 model """rotate""" +603 43 loss """marginranking""" +603 43 regularizer """no""" +603 43 optimizer """adam""" +603 43 training_loop """owa""" +603 43 negative_sampler """basic""" +603 43 evaluator """rankbased""" +603 44 dataset """kinships""" +603 44 model """rotate""" +603 44 loss """marginranking""" +603 44 regularizer """no""" +603 44 optimizer """adam""" +603 44 training_loop """owa""" +603 44 negative_sampler """basic""" +603 44 evaluator """rankbased""" +603 45 dataset """kinships""" +603 45 model """rotate""" +603 45 loss """marginranking""" +603 45 regularizer """no""" +603 45 optimizer """adam""" +603 45 training_loop """owa""" +603 45 negative_sampler """basic""" +603 45 evaluator """rankbased""" +603 46 dataset """kinships""" +603 46 model """rotate""" +603 46 loss """marginranking""" +603 46 regularizer """no""" +603 46 optimizer """adam""" +603 46 training_loop """owa""" +603 46 negative_sampler """basic""" +603 46 evaluator """rankbased""" +603 47 dataset """kinships""" +603 47 model """rotate""" +603 47 loss """marginranking""" +603 47 regularizer """no""" +603 47 optimizer """adam""" +603 47 training_loop """owa""" +603 47 negative_sampler """basic""" +603 47 evaluator """rankbased""" +603 48 dataset """kinships""" +603 48 model """rotate""" +603 48 loss """marginranking""" +603 48 regularizer """no""" +603 48 optimizer """adam""" +603 48 training_loop """owa""" +603 48 negative_sampler """basic""" +603 48 evaluator """rankbased""" +603 49 dataset """kinships""" +603 49 model """rotate""" +603 49 loss """marginranking""" +603 49 regularizer """no""" +603 49 optimizer """adam""" +603 49 training_loop """owa""" +603 49 negative_sampler """basic""" +603 49 evaluator """rankbased""" +603 50 dataset """kinships""" +603 50 model """rotate""" +603 50 loss """marginranking""" +603 50 regularizer """no""" +603 50 optimizer """adam""" +603 50 training_loop """owa""" +603 50 negative_sampler """basic""" +603 50 evaluator """rankbased""" +603 51 dataset """kinships""" +603 51 model """rotate""" +603 51 loss """marginranking""" +603 51 regularizer """no""" +603 51 optimizer """adam""" +603 51 training_loop """owa""" +603 51 negative_sampler """basic""" +603 51 evaluator """rankbased""" +603 52 dataset """kinships""" +603 52 model """rotate""" +603 52 loss """marginranking""" +603 52 regularizer """no""" +603 52 optimizer """adam""" +603 52 training_loop """owa""" +603 52 negative_sampler """basic""" +603 52 evaluator """rankbased""" +603 53 dataset """kinships""" +603 53 model """rotate""" +603 53 loss """marginranking""" +603 53 regularizer """no""" +603 53 optimizer """adam""" +603 53 training_loop """owa""" +603 53 negative_sampler """basic""" +603 53 evaluator """rankbased""" +603 54 dataset """kinships""" +603 54 model """rotate""" +603 54 loss """marginranking""" +603 54 regularizer """no""" +603 54 optimizer """adam""" +603 54 training_loop """owa""" +603 54 negative_sampler """basic""" +603 54 evaluator """rankbased""" +603 55 dataset """kinships""" +603 55 model """rotate""" +603 55 loss """marginranking""" +603 55 regularizer """no""" +603 55 optimizer """adam""" +603 55 training_loop """owa""" +603 55 negative_sampler """basic""" +603 55 evaluator """rankbased""" +603 56 dataset """kinships""" +603 56 model """rotate""" +603 56 loss """marginranking""" +603 56 regularizer """no""" +603 56 optimizer """adam""" +603 56 training_loop """owa""" +603 56 negative_sampler """basic""" +603 56 evaluator """rankbased""" +603 57 dataset """kinships""" +603 57 model """rotate""" +603 57 loss """marginranking""" +603 57 regularizer """no""" +603 57 optimizer """adam""" +603 57 training_loop """owa""" +603 57 negative_sampler """basic""" +603 57 evaluator """rankbased""" +603 58 dataset """kinships""" +603 58 model """rotate""" +603 58 loss """marginranking""" +603 58 regularizer """no""" +603 58 optimizer """adam""" +603 58 training_loop """owa""" +603 58 negative_sampler """basic""" +603 58 evaluator """rankbased""" +603 59 dataset """kinships""" +603 59 model """rotate""" +603 59 loss """marginranking""" +603 59 regularizer """no""" +603 59 optimizer """adam""" +603 59 training_loop """owa""" +603 59 negative_sampler """basic""" +603 59 evaluator """rankbased""" +603 60 dataset """kinships""" +603 60 model """rotate""" +603 60 loss """marginranking""" +603 60 regularizer """no""" +603 60 optimizer """adam""" +603 60 training_loop """owa""" +603 60 negative_sampler """basic""" +603 60 evaluator """rankbased""" +603 61 dataset """kinships""" +603 61 model """rotate""" +603 61 loss """marginranking""" +603 61 regularizer """no""" +603 61 optimizer """adam""" +603 61 training_loop """owa""" +603 61 negative_sampler """basic""" +603 61 evaluator """rankbased""" +603 62 dataset """kinships""" +603 62 model """rotate""" +603 62 loss """marginranking""" +603 62 regularizer """no""" +603 62 optimizer """adam""" +603 62 training_loop """owa""" +603 62 negative_sampler """basic""" +603 62 evaluator """rankbased""" +603 63 dataset """kinships""" +603 63 model """rotate""" +603 63 loss """marginranking""" +603 63 regularizer """no""" +603 63 optimizer """adam""" +603 63 training_loop """owa""" +603 63 negative_sampler """basic""" +603 63 evaluator """rankbased""" +603 64 dataset """kinships""" +603 64 model """rotate""" +603 64 loss """marginranking""" +603 64 regularizer """no""" +603 64 optimizer """adam""" +603 64 training_loop """owa""" +603 64 negative_sampler """basic""" +603 64 evaluator """rankbased""" +603 65 dataset """kinships""" +603 65 model """rotate""" +603 65 loss """marginranking""" +603 65 regularizer """no""" +603 65 optimizer """adam""" +603 65 training_loop """owa""" +603 65 negative_sampler """basic""" +603 65 evaluator """rankbased""" +603 66 dataset """kinships""" +603 66 model """rotate""" +603 66 loss """marginranking""" +603 66 regularizer """no""" +603 66 optimizer """adam""" +603 66 training_loop """owa""" +603 66 negative_sampler """basic""" +603 66 evaluator """rankbased""" +603 67 dataset """kinships""" +603 67 model """rotate""" +603 67 loss """marginranking""" +603 67 regularizer """no""" +603 67 optimizer """adam""" +603 67 training_loop """owa""" +603 67 negative_sampler """basic""" +603 67 evaluator """rankbased""" +603 68 dataset """kinships""" +603 68 model """rotate""" +603 68 loss """marginranking""" +603 68 regularizer """no""" +603 68 optimizer """adam""" +603 68 training_loop """owa""" +603 68 negative_sampler """basic""" +603 68 evaluator """rankbased""" +603 69 dataset """kinships""" +603 69 model """rotate""" +603 69 loss """marginranking""" +603 69 regularizer """no""" +603 69 optimizer """adam""" +603 69 training_loop """owa""" +603 69 negative_sampler """basic""" +603 69 evaluator """rankbased""" +603 70 dataset """kinships""" +603 70 model """rotate""" +603 70 loss """marginranking""" +603 70 regularizer """no""" +603 70 optimizer """adam""" +603 70 training_loop """owa""" +603 70 negative_sampler """basic""" +603 70 evaluator """rankbased""" +603 71 dataset """kinships""" +603 71 model """rotate""" +603 71 loss """marginranking""" +603 71 regularizer """no""" +603 71 optimizer """adam""" +603 71 training_loop """owa""" +603 71 negative_sampler """basic""" +603 71 evaluator """rankbased""" +603 72 dataset """kinships""" +603 72 model """rotate""" +603 72 loss """marginranking""" +603 72 regularizer """no""" +603 72 optimizer """adam""" +603 72 training_loop """owa""" +603 72 negative_sampler """basic""" +603 72 evaluator """rankbased""" +603 73 dataset """kinships""" +603 73 model """rotate""" +603 73 loss """marginranking""" +603 73 regularizer """no""" +603 73 optimizer """adam""" +603 73 training_loop """owa""" +603 73 negative_sampler """basic""" +603 73 evaluator """rankbased""" +603 74 dataset """kinships""" +603 74 model """rotate""" +603 74 loss """marginranking""" +603 74 regularizer """no""" +603 74 optimizer """adam""" +603 74 training_loop """owa""" +603 74 negative_sampler """basic""" +603 74 evaluator """rankbased""" +603 75 dataset """kinships""" +603 75 model """rotate""" +603 75 loss """marginranking""" +603 75 regularizer """no""" +603 75 optimizer """adam""" +603 75 training_loop """owa""" +603 75 negative_sampler """basic""" +603 75 evaluator """rankbased""" +603 76 dataset """kinships""" +603 76 model """rotate""" +603 76 loss """marginranking""" +603 76 regularizer """no""" +603 76 optimizer """adam""" +603 76 training_loop """owa""" +603 76 negative_sampler """basic""" +603 76 evaluator """rankbased""" +603 77 dataset """kinships""" +603 77 model """rotate""" +603 77 loss """marginranking""" +603 77 regularizer """no""" +603 77 optimizer """adam""" +603 77 training_loop """owa""" +603 77 negative_sampler """basic""" +603 77 evaluator """rankbased""" +603 78 dataset """kinships""" +603 78 model """rotate""" +603 78 loss """marginranking""" +603 78 regularizer """no""" +603 78 optimizer """adam""" +603 78 training_loop """owa""" +603 78 negative_sampler """basic""" +603 78 evaluator """rankbased""" +603 79 dataset """kinships""" +603 79 model """rotate""" +603 79 loss """marginranking""" +603 79 regularizer """no""" +603 79 optimizer """adam""" +603 79 training_loop """owa""" +603 79 negative_sampler """basic""" +603 79 evaluator """rankbased""" +603 80 dataset """kinships""" +603 80 model """rotate""" +603 80 loss """marginranking""" +603 80 regularizer """no""" +603 80 optimizer """adam""" +603 80 training_loop """owa""" +603 80 negative_sampler """basic""" +603 80 evaluator """rankbased""" +603 81 dataset """kinships""" +603 81 model """rotate""" +603 81 loss """marginranking""" +603 81 regularizer """no""" +603 81 optimizer """adam""" +603 81 training_loop """owa""" +603 81 negative_sampler """basic""" +603 81 evaluator """rankbased""" +603 82 dataset """kinships""" +603 82 model """rotate""" +603 82 loss """marginranking""" +603 82 regularizer """no""" +603 82 optimizer """adam""" +603 82 training_loop """owa""" +603 82 negative_sampler """basic""" +603 82 evaluator """rankbased""" +603 83 dataset """kinships""" +603 83 model """rotate""" +603 83 loss """marginranking""" +603 83 regularizer """no""" +603 83 optimizer """adam""" +603 83 training_loop """owa""" +603 83 negative_sampler """basic""" +603 83 evaluator """rankbased""" +603 84 dataset """kinships""" +603 84 model """rotate""" +603 84 loss """marginranking""" +603 84 regularizer """no""" +603 84 optimizer """adam""" +603 84 training_loop """owa""" +603 84 negative_sampler """basic""" +603 84 evaluator """rankbased""" +603 85 dataset """kinships""" +603 85 model """rotate""" +603 85 loss """marginranking""" +603 85 regularizer """no""" +603 85 optimizer """adam""" +603 85 training_loop """owa""" +603 85 negative_sampler """basic""" +603 85 evaluator """rankbased""" +603 86 dataset """kinships""" +603 86 model """rotate""" +603 86 loss """marginranking""" +603 86 regularizer """no""" +603 86 optimizer """adam""" +603 86 training_loop """owa""" +603 86 negative_sampler """basic""" +603 86 evaluator """rankbased""" +603 87 dataset """kinships""" +603 87 model """rotate""" +603 87 loss """marginranking""" +603 87 regularizer """no""" +603 87 optimizer """adam""" +603 87 training_loop """owa""" +603 87 negative_sampler """basic""" +603 87 evaluator """rankbased""" +603 88 dataset """kinships""" +603 88 model """rotate""" +603 88 loss """marginranking""" +603 88 regularizer """no""" +603 88 optimizer """adam""" +603 88 training_loop """owa""" +603 88 negative_sampler """basic""" +603 88 evaluator """rankbased""" +603 89 dataset """kinships""" +603 89 model """rotate""" +603 89 loss """marginranking""" +603 89 regularizer """no""" +603 89 optimizer """adam""" +603 89 training_loop """owa""" +603 89 negative_sampler """basic""" +603 89 evaluator """rankbased""" +603 90 dataset """kinships""" +603 90 model """rotate""" +603 90 loss """marginranking""" +603 90 regularizer """no""" +603 90 optimizer """adam""" +603 90 training_loop """owa""" +603 90 negative_sampler """basic""" +603 90 evaluator """rankbased""" +603 91 dataset """kinships""" +603 91 model """rotate""" +603 91 loss """marginranking""" +603 91 regularizer """no""" +603 91 optimizer """adam""" +603 91 training_loop """owa""" +603 91 negative_sampler """basic""" +603 91 evaluator """rankbased""" +603 92 dataset """kinships""" +603 92 model """rotate""" +603 92 loss """marginranking""" +603 92 regularizer """no""" +603 92 optimizer """adam""" +603 92 training_loop """owa""" +603 92 negative_sampler """basic""" +603 92 evaluator """rankbased""" +603 93 dataset """kinships""" +603 93 model """rotate""" +603 93 loss """marginranking""" +603 93 regularizer """no""" +603 93 optimizer """adam""" +603 93 training_loop """owa""" +603 93 negative_sampler """basic""" +603 93 evaluator """rankbased""" +603 94 dataset """kinships""" +603 94 model """rotate""" +603 94 loss """marginranking""" +603 94 regularizer """no""" +603 94 optimizer """adam""" +603 94 training_loop """owa""" +603 94 negative_sampler """basic""" +603 94 evaluator """rankbased""" +603 95 dataset """kinships""" +603 95 model """rotate""" +603 95 loss """marginranking""" +603 95 regularizer """no""" +603 95 optimizer """adam""" +603 95 training_loop """owa""" +603 95 negative_sampler """basic""" +603 95 evaluator """rankbased""" +603 96 dataset """kinships""" +603 96 model """rotate""" +603 96 loss """marginranking""" +603 96 regularizer """no""" +603 96 optimizer """adam""" +603 96 training_loop """owa""" +603 96 negative_sampler """basic""" +603 96 evaluator """rankbased""" +603 97 dataset """kinships""" +603 97 model """rotate""" +603 97 loss """marginranking""" +603 97 regularizer """no""" +603 97 optimizer """adam""" +603 97 training_loop """owa""" +603 97 negative_sampler """basic""" +603 97 evaluator """rankbased""" +603 98 dataset """kinships""" +603 98 model """rotate""" +603 98 loss """marginranking""" +603 98 regularizer """no""" +603 98 optimizer """adam""" +603 98 training_loop """owa""" +603 98 negative_sampler """basic""" +603 98 evaluator """rankbased""" +603 99 dataset """kinships""" +603 99 model """rotate""" +603 99 loss """marginranking""" +603 99 regularizer """no""" +603 99 optimizer """adam""" +603 99 training_loop """owa""" +603 99 negative_sampler """basic""" +603 99 evaluator """rankbased""" +603 100 dataset """kinships""" +603 100 model """rotate""" +603 100 loss """marginranking""" +603 100 regularizer """no""" +603 100 optimizer """adam""" +603 100 training_loop """owa""" +603 100 negative_sampler """basic""" +603 100 evaluator """rankbased""" +604 1 model.embedding_dim 2.0 +604 1 loss.margin 8.566470712960038 +604 1 optimizer.lr 0.0021152450771590343 +604 1 negative_sampler.num_negs_per_pos 15.0 +604 1 training.batch_size 0.0 +604 2 model.embedding_dim 0.0 +604 2 loss.margin 4.339298175568539 +604 2 optimizer.lr 0.00905239092354017 +604 2 negative_sampler.num_negs_per_pos 56.0 +604 2 training.batch_size 0.0 +604 3 model.embedding_dim 1.0 +604 3 loss.margin 2.3037506762792104 +604 3 optimizer.lr 0.010192960521080523 +604 3 negative_sampler.num_negs_per_pos 95.0 +604 3 training.batch_size 1.0 +604 4 model.embedding_dim 2.0 +604 4 loss.margin 3.980202917275755 +604 4 optimizer.lr 0.012584974141205558 +604 4 negative_sampler.num_negs_per_pos 59.0 +604 4 training.batch_size 0.0 +604 5 model.embedding_dim 1.0 +604 5 loss.margin 8.106208156639022 +604 5 optimizer.lr 0.006907803699856493 +604 5 negative_sampler.num_negs_per_pos 80.0 +604 5 training.batch_size 1.0 +604 6 model.embedding_dim 2.0 +604 6 loss.margin 1.275281990398864 +604 6 optimizer.lr 0.013781720381559323 +604 6 negative_sampler.num_negs_per_pos 78.0 +604 6 training.batch_size 0.0 +604 7 model.embedding_dim 0.0 +604 7 loss.margin 8.678942576784598 +604 7 optimizer.lr 0.004064997467997795 +604 7 negative_sampler.num_negs_per_pos 26.0 +604 7 training.batch_size 2.0 +604 8 model.embedding_dim 0.0 +604 8 loss.margin 1.2608628108888587 +604 8 optimizer.lr 0.013815933278727007 +604 8 negative_sampler.num_negs_per_pos 74.0 +604 8 training.batch_size 1.0 +604 9 model.embedding_dim 0.0 +604 9 loss.margin 3.5327281018498304 +604 9 optimizer.lr 0.05753418794313399 +604 9 negative_sampler.num_negs_per_pos 41.0 +604 9 training.batch_size 2.0 +604 10 model.embedding_dim 1.0 +604 10 loss.margin 6.495103075509424 +604 10 optimizer.lr 0.0026471181815436373 +604 10 negative_sampler.num_negs_per_pos 18.0 +604 10 training.batch_size 1.0 +604 11 model.embedding_dim 2.0 +604 11 loss.margin 2.795687210820117 +604 11 optimizer.lr 0.049838821901936316 +604 11 negative_sampler.num_negs_per_pos 39.0 +604 11 training.batch_size 1.0 +604 12 model.embedding_dim 1.0 +604 12 loss.margin 5.31568669544779 +604 12 optimizer.lr 0.0010467899168617129 +604 12 negative_sampler.num_negs_per_pos 79.0 +604 12 training.batch_size 1.0 +604 13 model.embedding_dim 2.0 +604 13 loss.margin 5.614482743761412 +604 13 optimizer.lr 0.01586750777642311 +604 13 negative_sampler.num_negs_per_pos 99.0 +604 13 training.batch_size 0.0 +604 14 model.embedding_dim 0.0 +604 14 loss.margin 9.759348510435995 +604 14 optimizer.lr 0.057535321525979635 +604 14 negative_sampler.num_negs_per_pos 5.0 +604 14 training.batch_size 2.0 +604 15 model.embedding_dim 0.0 +604 15 loss.margin 0.9611716306803553 +604 15 optimizer.lr 0.0012968893047903111 +604 15 negative_sampler.num_negs_per_pos 3.0 +604 15 training.batch_size 1.0 +604 16 model.embedding_dim 0.0 +604 16 loss.margin 5.3358561467313885 +604 16 optimizer.lr 0.0023181271449207045 +604 16 negative_sampler.num_negs_per_pos 17.0 +604 16 training.batch_size 2.0 +604 17 model.embedding_dim 2.0 +604 17 loss.margin 3.889169870268163 +604 17 optimizer.lr 0.08052869097713038 +604 17 negative_sampler.num_negs_per_pos 13.0 +604 17 training.batch_size 1.0 +604 18 model.embedding_dim 2.0 +604 18 loss.margin 5.700973297419672 +604 18 optimizer.lr 0.017156200203604907 +604 18 negative_sampler.num_negs_per_pos 93.0 +604 18 training.batch_size 1.0 +604 19 model.embedding_dim 0.0 +604 19 loss.margin 4.469436130718582 +604 19 optimizer.lr 0.004861520727136359 +604 19 negative_sampler.num_negs_per_pos 62.0 +604 19 training.batch_size 2.0 +604 20 model.embedding_dim 2.0 +604 20 loss.margin 4.1064289350628576 +604 20 optimizer.lr 0.0014494976060182194 +604 20 negative_sampler.num_negs_per_pos 38.0 +604 20 training.batch_size 0.0 +604 21 model.embedding_dim 2.0 +604 21 loss.margin 6.113866550521395 +604 21 optimizer.lr 0.0010401021655580018 +604 21 negative_sampler.num_negs_per_pos 89.0 +604 21 training.batch_size 0.0 +604 22 model.embedding_dim 0.0 +604 22 loss.margin 4.1970894632663684 +604 22 optimizer.lr 0.003204650977102323 +604 22 negative_sampler.num_negs_per_pos 17.0 +604 22 training.batch_size 0.0 +604 23 model.embedding_dim 0.0 +604 23 loss.margin 1.7165503398227955 +604 23 optimizer.lr 0.00316883174400821 +604 23 negative_sampler.num_negs_per_pos 55.0 +604 23 training.batch_size 1.0 +604 24 model.embedding_dim 0.0 +604 24 loss.margin 9.179444845088222 +604 24 optimizer.lr 0.040014422184637695 +604 24 negative_sampler.num_negs_per_pos 62.0 +604 24 training.batch_size 2.0 +604 25 model.embedding_dim 2.0 +604 25 loss.margin 3.8440279547699325 +604 25 optimizer.lr 0.034931717124622945 +604 25 negative_sampler.num_negs_per_pos 41.0 +604 25 training.batch_size 0.0 +604 26 model.embedding_dim 0.0 +604 26 loss.margin 5.823348710441514 +604 26 optimizer.lr 0.012123416979622999 +604 26 negative_sampler.num_negs_per_pos 85.0 +604 26 training.batch_size 1.0 +604 27 model.embedding_dim 0.0 +604 27 loss.margin 7.7169000626767 +604 27 optimizer.lr 0.06844920817812927 +604 27 negative_sampler.num_negs_per_pos 53.0 +604 27 training.batch_size 0.0 +604 28 model.embedding_dim 0.0 +604 28 loss.margin 6.406986028239062 +604 28 optimizer.lr 0.002782952747549295 +604 28 negative_sampler.num_negs_per_pos 21.0 +604 28 training.batch_size 0.0 +604 29 model.embedding_dim 2.0 +604 29 loss.margin 1.6263509311866131 +604 29 optimizer.lr 0.001031918718803844 +604 29 negative_sampler.num_negs_per_pos 20.0 +604 29 training.batch_size 0.0 +604 30 model.embedding_dim 1.0 +604 30 loss.margin 7.553646224183211 +604 30 optimizer.lr 0.006580294756213028 +604 30 negative_sampler.num_negs_per_pos 11.0 +604 30 training.batch_size 1.0 +604 31 model.embedding_dim 2.0 +604 31 loss.margin 7.154482157556799 +604 31 optimizer.lr 0.05895331383577419 +604 31 negative_sampler.num_negs_per_pos 23.0 +604 31 training.batch_size 2.0 +604 32 model.embedding_dim 0.0 +604 32 loss.margin 3.4607452838723405 +604 32 optimizer.lr 0.0057686086338274885 +604 32 negative_sampler.num_negs_per_pos 56.0 +604 32 training.batch_size 1.0 +604 33 model.embedding_dim 0.0 +604 33 loss.margin 8.795458332234613 +604 33 optimizer.lr 0.0013286235122853486 +604 33 negative_sampler.num_negs_per_pos 70.0 +604 33 training.batch_size 2.0 +604 34 model.embedding_dim 2.0 +604 34 loss.margin 6.55281346374857 +604 34 optimizer.lr 0.00478392292515947 +604 34 negative_sampler.num_negs_per_pos 11.0 +604 34 training.batch_size 2.0 +604 35 model.embedding_dim 1.0 +604 35 loss.margin 7.607098474405934 +604 35 optimizer.lr 0.0015930316040285854 +604 35 negative_sampler.num_negs_per_pos 76.0 +604 35 training.batch_size 0.0 +604 36 model.embedding_dim 1.0 +604 36 loss.margin 8.28806820379012 +604 36 optimizer.lr 0.0014167062395486828 +604 36 negative_sampler.num_negs_per_pos 70.0 +604 36 training.batch_size 1.0 +604 37 model.embedding_dim 2.0 +604 37 loss.margin 5.479680964860934 +604 37 optimizer.lr 0.010288924184199685 +604 37 negative_sampler.num_negs_per_pos 96.0 +604 37 training.batch_size 2.0 +604 38 model.embedding_dim 0.0 +604 38 loss.margin 3.652775912493908 +604 38 optimizer.lr 0.014565182869436324 +604 38 negative_sampler.num_negs_per_pos 85.0 +604 38 training.batch_size 2.0 +604 39 model.embedding_dim 2.0 +604 39 loss.margin 1.0206292727540243 +604 39 optimizer.lr 0.013016475048066975 +604 39 negative_sampler.num_negs_per_pos 79.0 +604 39 training.batch_size 0.0 +604 40 model.embedding_dim 1.0 +604 40 loss.margin 4.813602769871975 +604 40 optimizer.lr 0.04635341893693369 +604 40 negative_sampler.num_negs_per_pos 17.0 +604 40 training.batch_size 2.0 +604 41 model.embedding_dim 0.0 +604 41 loss.margin 7.912238246235191 +604 41 optimizer.lr 0.020410873143488297 +604 41 negative_sampler.num_negs_per_pos 64.0 +604 41 training.batch_size 2.0 +604 42 model.embedding_dim 2.0 +604 42 loss.margin 3.9851392404454145 +604 42 optimizer.lr 0.02518665703651905 +604 42 negative_sampler.num_negs_per_pos 49.0 +604 42 training.batch_size 1.0 +604 43 model.embedding_dim 1.0 +604 43 loss.margin 7.157994035597994 +604 43 optimizer.lr 0.09626092234630307 +604 43 negative_sampler.num_negs_per_pos 19.0 +604 43 training.batch_size 2.0 +604 44 model.embedding_dim 1.0 +604 44 loss.margin 8.998668899875652 +604 44 optimizer.lr 0.0010982940842041958 +604 44 negative_sampler.num_negs_per_pos 9.0 +604 44 training.batch_size 0.0 +604 45 model.embedding_dim 1.0 +604 45 loss.margin 3.082831917003582 +604 45 optimizer.lr 0.09882119270847042 +604 45 negative_sampler.num_negs_per_pos 14.0 +604 45 training.batch_size 1.0 +604 46 model.embedding_dim 1.0 +604 46 loss.margin 2.4982690282331155 +604 46 optimizer.lr 0.004640438680435851 +604 46 negative_sampler.num_negs_per_pos 91.0 +604 46 training.batch_size 1.0 +604 47 model.embedding_dim 2.0 +604 47 loss.margin 2.498563304664059 +604 47 optimizer.lr 0.027931078343443518 +604 47 negative_sampler.num_negs_per_pos 51.0 +604 47 training.batch_size 2.0 +604 48 model.embedding_dim 0.0 +604 48 loss.margin 9.722832459126845 +604 48 optimizer.lr 0.001113747555745269 +604 48 negative_sampler.num_negs_per_pos 57.0 +604 48 training.batch_size 0.0 +604 49 model.embedding_dim 2.0 +604 49 loss.margin 3.303877658531574 +604 49 optimizer.lr 0.07511930896718881 +604 49 negative_sampler.num_negs_per_pos 22.0 +604 49 training.batch_size 0.0 +604 50 model.embedding_dim 2.0 +604 50 loss.margin 1.3903645570920815 +604 50 optimizer.lr 0.09738048555868546 +604 50 negative_sampler.num_negs_per_pos 73.0 +604 50 training.batch_size 1.0 +604 51 model.embedding_dim 2.0 +604 51 loss.margin 2.7066861788446928 +604 51 optimizer.lr 0.002053265656542475 +604 51 negative_sampler.num_negs_per_pos 54.0 +604 51 training.batch_size 2.0 +604 52 model.embedding_dim 2.0 +604 52 loss.margin 5.853260909176689 +604 52 optimizer.lr 0.003711683666870976 +604 52 negative_sampler.num_negs_per_pos 84.0 +604 52 training.batch_size 0.0 +604 53 model.embedding_dim 0.0 +604 53 loss.margin 4.336334444976944 +604 53 optimizer.lr 0.0035008370269961242 +604 53 negative_sampler.num_negs_per_pos 64.0 +604 53 training.batch_size 1.0 +604 54 model.embedding_dim 2.0 +604 54 loss.margin 1.611952536646711 +604 54 optimizer.lr 0.0047789409916153425 +604 54 negative_sampler.num_negs_per_pos 41.0 +604 54 training.batch_size 2.0 +604 55 model.embedding_dim 0.0 +604 55 loss.margin 2.3213527872993116 +604 55 optimizer.lr 0.06233915649318894 +604 55 negative_sampler.num_negs_per_pos 56.0 +604 55 training.batch_size 2.0 +604 56 model.embedding_dim 2.0 +604 56 loss.margin 5.469169508555016 +604 56 optimizer.lr 0.0017000857761024508 +604 56 negative_sampler.num_negs_per_pos 51.0 +604 56 training.batch_size 2.0 +604 57 model.embedding_dim 1.0 +604 57 loss.margin 6.802983703968129 +604 57 optimizer.lr 0.001827109788080899 +604 57 negative_sampler.num_negs_per_pos 44.0 +604 57 training.batch_size 1.0 +604 58 model.embedding_dim 0.0 +604 58 loss.margin 2.9632179233582296 +604 58 optimizer.lr 0.08577410403824688 +604 58 negative_sampler.num_negs_per_pos 86.0 +604 58 training.batch_size 0.0 +604 59 model.embedding_dim 2.0 +604 59 loss.margin 8.151121085960886 +604 59 optimizer.lr 0.07666654794680537 +604 59 negative_sampler.num_negs_per_pos 32.0 +604 59 training.batch_size 2.0 +604 60 model.embedding_dim 0.0 +604 60 loss.margin 6.276697288453323 +604 60 optimizer.lr 0.02129907524115654 +604 60 negative_sampler.num_negs_per_pos 89.0 +604 60 training.batch_size 0.0 +604 61 model.embedding_dim 1.0 +604 61 loss.margin 5.0398835387863725 +604 61 optimizer.lr 0.02061880651442332 +604 61 negative_sampler.num_negs_per_pos 52.0 +604 61 training.batch_size 2.0 +604 62 model.embedding_dim 0.0 +604 62 loss.margin 3.5598215562597053 +604 62 optimizer.lr 0.0019514228779998022 +604 62 negative_sampler.num_negs_per_pos 92.0 +604 62 training.batch_size 0.0 +604 63 model.embedding_dim 1.0 +604 63 loss.margin 4.6659324877594415 +604 63 optimizer.lr 0.0024644212568584486 +604 63 negative_sampler.num_negs_per_pos 95.0 +604 63 training.batch_size 2.0 +604 64 model.embedding_dim 1.0 +604 64 loss.margin 8.936499362353024 +604 64 optimizer.lr 0.004339872886382583 +604 64 negative_sampler.num_negs_per_pos 88.0 +604 64 training.batch_size 0.0 +604 65 model.embedding_dim 1.0 +604 65 loss.margin 6.010867589339681 +604 65 optimizer.lr 0.08043578541775168 +604 65 negative_sampler.num_negs_per_pos 56.0 +604 65 training.batch_size 0.0 +604 66 model.embedding_dim 1.0 +604 66 loss.margin 4.783499605419267 +604 66 optimizer.lr 0.021171540277349698 +604 66 negative_sampler.num_negs_per_pos 53.0 +604 66 training.batch_size 2.0 +604 67 model.embedding_dim 2.0 +604 67 loss.margin 7.901013805910967 +604 67 optimizer.lr 0.00738882452953608 +604 67 negative_sampler.num_negs_per_pos 61.0 +604 67 training.batch_size 2.0 +604 68 model.embedding_dim 0.0 +604 68 loss.margin 2.759371769912139 +604 68 optimizer.lr 0.004203705888668493 +604 68 negative_sampler.num_negs_per_pos 62.0 +604 68 training.batch_size 1.0 +604 69 model.embedding_dim 1.0 +604 69 loss.margin 5.425238848837408 +604 69 optimizer.lr 0.0023590989724318542 +604 69 negative_sampler.num_negs_per_pos 63.0 +604 69 training.batch_size 1.0 +604 70 model.embedding_dim 1.0 +604 70 loss.margin 6.845203582746202 +604 70 optimizer.lr 0.006849055505902215 +604 70 negative_sampler.num_negs_per_pos 42.0 +604 70 training.batch_size 1.0 +604 71 model.embedding_dim 0.0 +604 71 loss.margin 8.16393640019528 +604 71 optimizer.lr 0.029921265759504233 +604 71 negative_sampler.num_negs_per_pos 66.0 +604 71 training.batch_size 0.0 +604 72 model.embedding_dim 1.0 +604 72 loss.margin 4.900996481340063 +604 72 optimizer.lr 0.016488255378992008 +604 72 negative_sampler.num_negs_per_pos 74.0 +604 72 training.batch_size 0.0 +604 73 model.embedding_dim 2.0 +604 73 loss.margin 5.792086604018587 +604 73 optimizer.lr 0.0012653522951258803 +604 73 negative_sampler.num_negs_per_pos 19.0 +604 73 training.batch_size 0.0 +604 74 model.embedding_dim 1.0 +604 74 loss.margin 2.7710489663619007 +604 74 optimizer.lr 0.0013963482739447633 +604 74 negative_sampler.num_negs_per_pos 63.0 +604 74 training.batch_size 1.0 +604 75 model.embedding_dim 0.0 +604 75 loss.margin 3.9505299768365973 +604 75 optimizer.lr 0.0018073444618924444 +604 75 negative_sampler.num_negs_per_pos 33.0 +604 75 training.batch_size 0.0 +604 76 model.embedding_dim 1.0 +604 76 loss.margin 7.288216166337787 +604 76 optimizer.lr 0.011488875257312991 +604 76 negative_sampler.num_negs_per_pos 17.0 +604 76 training.batch_size 2.0 +604 77 model.embedding_dim 0.0 +604 77 loss.margin 4.108311338415353 +604 77 optimizer.lr 0.004421336391108655 +604 77 negative_sampler.num_negs_per_pos 86.0 +604 77 training.batch_size 2.0 +604 78 model.embedding_dim 2.0 +604 78 loss.margin 1.0652175338577314 +604 78 optimizer.lr 0.006661701345022681 +604 78 negative_sampler.num_negs_per_pos 55.0 +604 78 training.batch_size 2.0 +604 79 model.embedding_dim 1.0 +604 79 loss.margin 4.6604560567089335 +604 79 optimizer.lr 0.03307484158482314 +604 79 negative_sampler.num_negs_per_pos 12.0 +604 79 training.batch_size 0.0 +604 80 model.embedding_dim 0.0 +604 80 loss.margin 9.63009594433809 +604 80 optimizer.lr 0.07780682983122147 +604 80 negative_sampler.num_negs_per_pos 61.0 +604 80 training.batch_size 0.0 +604 81 model.embedding_dim 1.0 +604 81 loss.margin 8.461572688259334 +604 81 optimizer.lr 0.001012564836905796 +604 81 negative_sampler.num_negs_per_pos 14.0 +604 81 training.batch_size 2.0 +604 82 model.embedding_dim 0.0 +604 82 loss.margin 8.136371786789864 +604 82 optimizer.lr 0.006506885666330856 +604 82 negative_sampler.num_negs_per_pos 20.0 +604 82 training.batch_size 0.0 +604 83 model.embedding_dim 2.0 +604 83 loss.margin 7.098514435297622 +604 83 optimizer.lr 0.007186376805905431 +604 83 negative_sampler.num_negs_per_pos 0.0 +604 83 training.batch_size 1.0 +604 84 model.embedding_dim 0.0 +604 84 loss.margin 1.243550688349572 +604 84 optimizer.lr 0.0025152621456337705 +604 84 negative_sampler.num_negs_per_pos 99.0 +604 84 training.batch_size 1.0 +604 85 model.embedding_dim 1.0 +604 85 loss.margin 1.6231525076454736 +604 85 optimizer.lr 0.009918590955636294 +604 85 negative_sampler.num_negs_per_pos 4.0 +604 85 training.batch_size 1.0 +604 86 model.embedding_dim 1.0 +604 86 loss.margin 8.182747622943998 +604 86 optimizer.lr 0.0041023649295582046 +604 86 negative_sampler.num_negs_per_pos 35.0 +604 86 training.batch_size 2.0 +604 87 model.embedding_dim 0.0 +604 87 loss.margin 8.646709271122239 +604 87 optimizer.lr 0.03585806984330624 +604 87 negative_sampler.num_negs_per_pos 10.0 +604 87 training.batch_size 0.0 +604 88 model.embedding_dim 2.0 +604 88 loss.margin 7.420229553987342 +604 88 optimizer.lr 0.008753153244580664 +604 88 negative_sampler.num_negs_per_pos 71.0 +604 88 training.batch_size 1.0 +604 89 model.embedding_dim 2.0 +604 89 loss.margin 6.960256952526308 +604 89 optimizer.lr 0.001030327747192347 +604 89 negative_sampler.num_negs_per_pos 37.0 +604 89 training.batch_size 1.0 +604 90 model.embedding_dim 2.0 +604 90 loss.margin 7.167062485686047 +604 90 optimizer.lr 0.0028600305660418427 +604 90 negative_sampler.num_negs_per_pos 22.0 +604 90 training.batch_size 2.0 +604 91 model.embedding_dim 0.0 +604 91 loss.margin 5.151740972434969 +604 91 optimizer.lr 0.011287242206577322 +604 91 negative_sampler.num_negs_per_pos 24.0 +604 91 training.batch_size 1.0 +604 92 model.embedding_dim 2.0 +604 92 loss.margin 8.3205053433837 +604 92 optimizer.lr 0.06101865622148856 +604 92 negative_sampler.num_negs_per_pos 44.0 +604 92 training.batch_size 1.0 +604 93 model.embedding_dim 0.0 +604 93 loss.margin 4.4854643556037 +604 93 optimizer.lr 0.0011760476956173857 +604 93 negative_sampler.num_negs_per_pos 20.0 +604 93 training.batch_size 0.0 +604 94 model.embedding_dim 0.0 +604 94 loss.margin 5.627227274584155 +604 94 optimizer.lr 0.05564500096271908 +604 94 negative_sampler.num_negs_per_pos 10.0 +604 94 training.batch_size 0.0 +604 95 model.embedding_dim 0.0 +604 95 loss.margin 7.104484404724156 +604 95 optimizer.lr 0.04239041731895688 +604 95 negative_sampler.num_negs_per_pos 94.0 +604 95 training.batch_size 0.0 +604 96 model.embedding_dim 2.0 +604 96 loss.margin 7.932314780748465 +604 96 optimizer.lr 0.005300239733728274 +604 96 negative_sampler.num_negs_per_pos 5.0 +604 96 training.batch_size 0.0 +604 97 model.embedding_dim 2.0 +604 97 loss.margin 1.1981346541352746 +604 97 optimizer.lr 0.02508164379701198 +604 97 negative_sampler.num_negs_per_pos 38.0 +604 97 training.batch_size 1.0 +604 98 model.embedding_dim 2.0 +604 98 loss.margin 2.128653712101049 +604 98 optimizer.lr 0.0017561741091964957 +604 98 negative_sampler.num_negs_per_pos 74.0 +604 98 training.batch_size 0.0 +604 99 model.embedding_dim 1.0 +604 99 loss.margin 8.762252155114753 +604 99 optimizer.lr 0.033638841800728625 +604 99 negative_sampler.num_negs_per_pos 81.0 +604 99 training.batch_size 0.0 +604 100 model.embedding_dim 2.0 +604 100 loss.margin 0.8636595532590752 +604 100 optimizer.lr 0.05712685398495597 +604 100 negative_sampler.num_negs_per_pos 80.0 +604 100 training.batch_size 0.0 +604 1 dataset """kinships""" +604 1 model """rotate""" +604 1 loss """marginranking""" +604 1 regularizer """no""" +604 1 optimizer """adam""" +604 1 training_loop """owa""" +604 1 negative_sampler """basic""" +604 1 evaluator """rankbased""" +604 2 dataset """kinships""" +604 2 model """rotate""" +604 2 loss """marginranking""" +604 2 regularizer """no""" +604 2 optimizer """adam""" +604 2 training_loop """owa""" +604 2 negative_sampler """basic""" +604 2 evaluator """rankbased""" +604 3 dataset """kinships""" +604 3 model """rotate""" +604 3 loss """marginranking""" +604 3 regularizer """no""" +604 3 optimizer """adam""" +604 3 training_loop """owa""" +604 3 negative_sampler """basic""" +604 3 evaluator """rankbased""" +604 4 dataset """kinships""" +604 4 model """rotate""" +604 4 loss """marginranking""" +604 4 regularizer """no""" +604 4 optimizer """adam""" +604 4 training_loop """owa""" +604 4 negative_sampler """basic""" +604 4 evaluator """rankbased""" +604 5 dataset """kinships""" +604 5 model """rotate""" +604 5 loss """marginranking""" +604 5 regularizer """no""" +604 5 optimizer """adam""" +604 5 training_loop """owa""" +604 5 negative_sampler """basic""" +604 5 evaluator """rankbased""" +604 6 dataset """kinships""" +604 6 model """rotate""" +604 6 loss """marginranking""" +604 6 regularizer """no""" +604 6 optimizer """adam""" +604 6 training_loop """owa""" +604 6 negative_sampler """basic""" +604 6 evaluator """rankbased""" +604 7 dataset """kinships""" +604 7 model """rotate""" +604 7 loss """marginranking""" +604 7 regularizer """no""" +604 7 optimizer """adam""" +604 7 training_loop """owa""" +604 7 negative_sampler """basic""" +604 7 evaluator """rankbased""" +604 8 dataset """kinships""" +604 8 model """rotate""" +604 8 loss """marginranking""" +604 8 regularizer """no""" +604 8 optimizer """adam""" +604 8 training_loop """owa""" +604 8 negative_sampler """basic""" +604 8 evaluator """rankbased""" +604 9 dataset """kinships""" +604 9 model """rotate""" +604 9 loss """marginranking""" +604 9 regularizer """no""" +604 9 optimizer """adam""" +604 9 training_loop """owa""" +604 9 negative_sampler """basic""" +604 9 evaluator """rankbased""" +604 10 dataset """kinships""" +604 10 model """rotate""" +604 10 loss """marginranking""" +604 10 regularizer """no""" +604 10 optimizer """adam""" +604 10 training_loop """owa""" +604 10 negative_sampler """basic""" +604 10 evaluator """rankbased""" +604 11 dataset """kinships""" +604 11 model """rotate""" +604 11 loss """marginranking""" +604 11 regularizer """no""" +604 11 optimizer """adam""" +604 11 training_loop """owa""" +604 11 negative_sampler """basic""" +604 11 evaluator """rankbased""" +604 12 dataset """kinships""" +604 12 model """rotate""" +604 12 loss """marginranking""" +604 12 regularizer """no""" +604 12 optimizer """adam""" +604 12 training_loop """owa""" +604 12 negative_sampler """basic""" +604 12 evaluator """rankbased""" +604 13 dataset """kinships""" +604 13 model """rotate""" +604 13 loss """marginranking""" +604 13 regularizer """no""" +604 13 optimizer """adam""" +604 13 training_loop """owa""" +604 13 negative_sampler """basic""" +604 13 evaluator """rankbased""" +604 14 dataset """kinships""" +604 14 model """rotate""" +604 14 loss """marginranking""" +604 14 regularizer """no""" +604 14 optimizer """adam""" +604 14 training_loop """owa""" +604 14 negative_sampler """basic""" +604 14 evaluator """rankbased""" +604 15 dataset """kinships""" +604 15 model """rotate""" +604 15 loss """marginranking""" +604 15 regularizer """no""" +604 15 optimizer """adam""" +604 15 training_loop """owa""" +604 15 negative_sampler """basic""" +604 15 evaluator """rankbased""" +604 16 dataset """kinships""" +604 16 model """rotate""" +604 16 loss """marginranking""" +604 16 regularizer """no""" +604 16 optimizer """adam""" +604 16 training_loop """owa""" +604 16 negative_sampler """basic""" +604 16 evaluator """rankbased""" +604 17 dataset """kinships""" +604 17 model """rotate""" +604 17 loss """marginranking""" +604 17 regularizer """no""" +604 17 optimizer """adam""" +604 17 training_loop """owa""" +604 17 negative_sampler """basic""" +604 17 evaluator """rankbased""" +604 18 dataset """kinships""" +604 18 model """rotate""" +604 18 loss """marginranking""" +604 18 regularizer """no""" +604 18 optimizer """adam""" +604 18 training_loop """owa""" +604 18 negative_sampler """basic""" +604 18 evaluator """rankbased""" +604 19 dataset """kinships""" +604 19 model """rotate""" +604 19 loss """marginranking""" +604 19 regularizer """no""" +604 19 optimizer """adam""" +604 19 training_loop """owa""" +604 19 negative_sampler """basic""" +604 19 evaluator """rankbased""" +604 20 dataset """kinships""" +604 20 model """rotate""" +604 20 loss """marginranking""" +604 20 regularizer """no""" +604 20 optimizer """adam""" +604 20 training_loop """owa""" +604 20 negative_sampler """basic""" +604 20 evaluator """rankbased""" +604 21 dataset """kinships""" +604 21 model """rotate""" +604 21 loss """marginranking""" +604 21 regularizer """no""" +604 21 optimizer """adam""" +604 21 training_loop """owa""" +604 21 negative_sampler """basic""" +604 21 evaluator """rankbased""" +604 22 dataset """kinships""" +604 22 model """rotate""" +604 22 loss """marginranking""" +604 22 regularizer """no""" +604 22 optimizer """adam""" +604 22 training_loop """owa""" +604 22 negative_sampler """basic""" +604 22 evaluator """rankbased""" +604 23 dataset """kinships""" +604 23 model """rotate""" +604 23 loss """marginranking""" +604 23 regularizer """no""" +604 23 optimizer """adam""" +604 23 training_loop """owa""" +604 23 negative_sampler """basic""" +604 23 evaluator """rankbased""" +604 24 dataset """kinships""" +604 24 model """rotate""" +604 24 loss """marginranking""" +604 24 regularizer """no""" +604 24 optimizer """adam""" +604 24 training_loop """owa""" +604 24 negative_sampler """basic""" +604 24 evaluator """rankbased""" +604 25 dataset """kinships""" +604 25 model """rotate""" +604 25 loss """marginranking""" +604 25 regularizer """no""" +604 25 optimizer """adam""" +604 25 training_loop """owa""" +604 25 negative_sampler """basic""" +604 25 evaluator """rankbased""" +604 26 dataset """kinships""" +604 26 model """rotate""" +604 26 loss """marginranking""" +604 26 regularizer """no""" +604 26 optimizer """adam""" +604 26 training_loop """owa""" +604 26 negative_sampler """basic""" +604 26 evaluator """rankbased""" +604 27 dataset """kinships""" +604 27 model """rotate""" +604 27 loss """marginranking""" +604 27 regularizer """no""" +604 27 optimizer """adam""" +604 27 training_loop """owa""" +604 27 negative_sampler """basic""" +604 27 evaluator """rankbased""" +604 28 dataset """kinships""" +604 28 model """rotate""" +604 28 loss """marginranking""" +604 28 regularizer """no""" +604 28 optimizer """adam""" +604 28 training_loop """owa""" +604 28 negative_sampler """basic""" +604 28 evaluator """rankbased""" +604 29 dataset """kinships""" +604 29 model """rotate""" +604 29 loss """marginranking""" +604 29 regularizer """no""" +604 29 optimizer """adam""" +604 29 training_loop """owa""" +604 29 negative_sampler """basic""" +604 29 evaluator """rankbased""" +604 30 dataset """kinships""" +604 30 model """rotate""" +604 30 loss """marginranking""" +604 30 regularizer """no""" +604 30 optimizer """adam""" +604 30 training_loop """owa""" +604 30 negative_sampler """basic""" +604 30 evaluator """rankbased""" +604 31 dataset """kinships""" +604 31 model """rotate""" +604 31 loss """marginranking""" +604 31 regularizer """no""" +604 31 optimizer """adam""" +604 31 training_loop """owa""" +604 31 negative_sampler """basic""" +604 31 evaluator """rankbased""" +604 32 dataset """kinships""" +604 32 model """rotate""" +604 32 loss """marginranking""" +604 32 regularizer """no""" +604 32 optimizer """adam""" +604 32 training_loop """owa""" +604 32 negative_sampler """basic""" +604 32 evaluator """rankbased""" +604 33 dataset """kinships""" +604 33 model """rotate""" +604 33 loss """marginranking""" +604 33 regularizer """no""" +604 33 optimizer """adam""" +604 33 training_loop """owa""" +604 33 negative_sampler """basic""" +604 33 evaluator """rankbased""" +604 34 dataset """kinships""" +604 34 model """rotate""" +604 34 loss """marginranking""" +604 34 regularizer """no""" +604 34 optimizer """adam""" +604 34 training_loop """owa""" +604 34 negative_sampler """basic""" +604 34 evaluator """rankbased""" +604 35 dataset """kinships""" +604 35 model """rotate""" +604 35 loss """marginranking""" +604 35 regularizer """no""" +604 35 optimizer """adam""" +604 35 training_loop """owa""" +604 35 negative_sampler """basic""" +604 35 evaluator """rankbased""" +604 36 dataset """kinships""" +604 36 model """rotate""" +604 36 loss """marginranking""" +604 36 regularizer """no""" +604 36 optimizer """adam""" +604 36 training_loop """owa""" +604 36 negative_sampler """basic""" +604 36 evaluator """rankbased""" +604 37 dataset """kinships""" +604 37 model """rotate""" +604 37 loss """marginranking""" +604 37 regularizer """no""" +604 37 optimizer """adam""" +604 37 training_loop """owa""" +604 37 negative_sampler """basic""" +604 37 evaluator """rankbased""" +604 38 dataset """kinships""" +604 38 model """rotate""" +604 38 loss """marginranking""" +604 38 regularizer """no""" +604 38 optimizer """adam""" +604 38 training_loop """owa""" +604 38 negative_sampler """basic""" +604 38 evaluator """rankbased""" +604 39 dataset """kinships""" +604 39 model """rotate""" +604 39 loss """marginranking""" +604 39 regularizer """no""" +604 39 optimizer """adam""" +604 39 training_loop """owa""" +604 39 negative_sampler """basic""" +604 39 evaluator """rankbased""" +604 40 dataset """kinships""" +604 40 model """rotate""" +604 40 loss """marginranking""" +604 40 regularizer """no""" +604 40 optimizer """adam""" +604 40 training_loop """owa""" +604 40 negative_sampler """basic""" +604 40 evaluator """rankbased""" +604 41 dataset """kinships""" +604 41 model """rotate""" +604 41 loss """marginranking""" +604 41 regularizer """no""" +604 41 optimizer """adam""" +604 41 training_loop """owa""" +604 41 negative_sampler """basic""" +604 41 evaluator """rankbased""" +604 42 dataset """kinships""" +604 42 model """rotate""" +604 42 loss """marginranking""" +604 42 regularizer """no""" +604 42 optimizer """adam""" +604 42 training_loop """owa""" +604 42 negative_sampler """basic""" +604 42 evaluator """rankbased""" +604 43 dataset """kinships""" +604 43 model """rotate""" +604 43 loss """marginranking""" +604 43 regularizer """no""" +604 43 optimizer """adam""" +604 43 training_loop """owa""" +604 43 negative_sampler """basic""" +604 43 evaluator """rankbased""" +604 44 dataset """kinships""" +604 44 model """rotate""" +604 44 loss """marginranking""" +604 44 regularizer """no""" +604 44 optimizer """adam""" +604 44 training_loop """owa""" +604 44 negative_sampler """basic""" +604 44 evaluator """rankbased""" +604 45 dataset """kinships""" +604 45 model """rotate""" +604 45 loss """marginranking""" +604 45 regularizer """no""" +604 45 optimizer """adam""" +604 45 training_loop """owa""" +604 45 negative_sampler """basic""" +604 45 evaluator """rankbased""" +604 46 dataset """kinships""" +604 46 model """rotate""" +604 46 loss """marginranking""" +604 46 regularizer """no""" +604 46 optimizer """adam""" +604 46 training_loop """owa""" +604 46 negative_sampler """basic""" +604 46 evaluator """rankbased""" +604 47 dataset """kinships""" +604 47 model """rotate""" +604 47 loss """marginranking""" +604 47 regularizer """no""" +604 47 optimizer """adam""" +604 47 training_loop """owa""" +604 47 negative_sampler """basic""" +604 47 evaluator """rankbased""" +604 48 dataset """kinships""" +604 48 model """rotate""" +604 48 loss """marginranking""" +604 48 regularizer """no""" +604 48 optimizer """adam""" +604 48 training_loop """owa""" +604 48 negative_sampler """basic""" +604 48 evaluator """rankbased""" +604 49 dataset """kinships""" +604 49 model """rotate""" +604 49 loss """marginranking""" +604 49 regularizer """no""" +604 49 optimizer """adam""" +604 49 training_loop """owa""" +604 49 negative_sampler """basic""" +604 49 evaluator """rankbased""" +604 50 dataset """kinships""" +604 50 model """rotate""" +604 50 loss """marginranking""" +604 50 regularizer """no""" +604 50 optimizer """adam""" +604 50 training_loop """owa""" +604 50 negative_sampler """basic""" +604 50 evaluator """rankbased""" +604 51 dataset """kinships""" +604 51 model """rotate""" +604 51 loss """marginranking""" +604 51 regularizer """no""" +604 51 optimizer """adam""" +604 51 training_loop """owa""" +604 51 negative_sampler """basic""" +604 51 evaluator """rankbased""" +604 52 dataset """kinships""" +604 52 model """rotate""" +604 52 loss """marginranking""" +604 52 regularizer """no""" +604 52 optimizer """adam""" +604 52 training_loop """owa""" +604 52 negative_sampler """basic""" +604 52 evaluator """rankbased""" +604 53 dataset """kinships""" +604 53 model """rotate""" +604 53 loss """marginranking""" +604 53 regularizer """no""" +604 53 optimizer """adam""" +604 53 training_loop """owa""" +604 53 negative_sampler """basic""" +604 53 evaluator """rankbased""" +604 54 dataset """kinships""" +604 54 model """rotate""" +604 54 loss """marginranking""" +604 54 regularizer """no""" +604 54 optimizer """adam""" +604 54 training_loop """owa""" +604 54 negative_sampler """basic""" +604 54 evaluator """rankbased""" +604 55 dataset """kinships""" +604 55 model """rotate""" +604 55 loss """marginranking""" +604 55 regularizer """no""" +604 55 optimizer """adam""" +604 55 training_loop """owa""" +604 55 negative_sampler """basic""" +604 55 evaluator """rankbased""" +604 56 dataset """kinships""" +604 56 model """rotate""" +604 56 loss """marginranking""" +604 56 regularizer """no""" +604 56 optimizer """adam""" +604 56 training_loop """owa""" +604 56 negative_sampler """basic""" +604 56 evaluator """rankbased""" +604 57 dataset """kinships""" +604 57 model """rotate""" +604 57 loss """marginranking""" +604 57 regularizer """no""" +604 57 optimizer """adam""" +604 57 training_loop """owa""" +604 57 negative_sampler """basic""" +604 57 evaluator """rankbased""" +604 58 dataset """kinships""" +604 58 model """rotate""" +604 58 loss """marginranking""" +604 58 regularizer """no""" +604 58 optimizer """adam""" +604 58 training_loop """owa""" +604 58 negative_sampler """basic""" +604 58 evaluator """rankbased""" +604 59 dataset """kinships""" +604 59 model """rotate""" +604 59 loss """marginranking""" +604 59 regularizer """no""" +604 59 optimizer """adam""" +604 59 training_loop """owa""" +604 59 negative_sampler """basic""" +604 59 evaluator """rankbased""" +604 60 dataset """kinships""" +604 60 model """rotate""" +604 60 loss """marginranking""" +604 60 regularizer """no""" +604 60 optimizer """adam""" +604 60 training_loop """owa""" +604 60 negative_sampler """basic""" +604 60 evaluator """rankbased""" +604 61 dataset """kinships""" +604 61 model """rotate""" +604 61 loss """marginranking""" +604 61 regularizer """no""" +604 61 optimizer """adam""" +604 61 training_loop """owa""" +604 61 negative_sampler """basic""" +604 61 evaluator """rankbased""" +604 62 dataset """kinships""" +604 62 model """rotate""" +604 62 loss """marginranking""" +604 62 regularizer """no""" +604 62 optimizer """adam""" +604 62 training_loop """owa""" +604 62 negative_sampler """basic""" +604 62 evaluator """rankbased""" +604 63 dataset """kinships""" +604 63 model """rotate""" +604 63 loss """marginranking""" +604 63 regularizer """no""" +604 63 optimizer """adam""" +604 63 training_loop """owa""" +604 63 negative_sampler """basic""" +604 63 evaluator """rankbased""" +604 64 dataset """kinships""" +604 64 model """rotate""" +604 64 loss """marginranking""" +604 64 regularizer """no""" +604 64 optimizer """adam""" +604 64 training_loop """owa""" +604 64 negative_sampler """basic""" +604 64 evaluator """rankbased""" +604 65 dataset """kinships""" +604 65 model """rotate""" +604 65 loss """marginranking""" +604 65 regularizer """no""" +604 65 optimizer """adam""" +604 65 training_loop """owa""" +604 65 negative_sampler """basic""" +604 65 evaluator """rankbased""" +604 66 dataset """kinships""" +604 66 model """rotate""" +604 66 loss """marginranking""" +604 66 regularizer """no""" +604 66 optimizer """adam""" +604 66 training_loop """owa""" +604 66 negative_sampler """basic""" +604 66 evaluator """rankbased""" +604 67 dataset """kinships""" +604 67 model """rotate""" +604 67 loss """marginranking""" +604 67 regularizer """no""" +604 67 optimizer """adam""" +604 67 training_loop """owa""" +604 67 negative_sampler """basic""" +604 67 evaluator """rankbased""" +604 68 dataset """kinships""" +604 68 model """rotate""" +604 68 loss """marginranking""" +604 68 regularizer """no""" +604 68 optimizer """adam""" +604 68 training_loop """owa""" +604 68 negative_sampler """basic""" +604 68 evaluator """rankbased""" +604 69 dataset """kinships""" +604 69 model """rotate""" +604 69 loss """marginranking""" +604 69 regularizer """no""" +604 69 optimizer """adam""" +604 69 training_loop """owa""" +604 69 negative_sampler """basic""" +604 69 evaluator """rankbased""" +604 70 dataset """kinships""" +604 70 model """rotate""" +604 70 loss """marginranking""" +604 70 regularizer """no""" +604 70 optimizer """adam""" +604 70 training_loop """owa""" +604 70 negative_sampler """basic""" +604 70 evaluator """rankbased""" +604 71 dataset """kinships""" +604 71 model """rotate""" +604 71 loss """marginranking""" +604 71 regularizer """no""" +604 71 optimizer """adam""" +604 71 training_loop """owa""" +604 71 negative_sampler """basic""" +604 71 evaluator """rankbased""" +604 72 dataset """kinships""" +604 72 model """rotate""" +604 72 loss """marginranking""" +604 72 regularizer """no""" +604 72 optimizer """adam""" +604 72 training_loop """owa""" +604 72 negative_sampler """basic""" +604 72 evaluator """rankbased""" +604 73 dataset """kinships""" +604 73 model """rotate""" +604 73 loss """marginranking""" +604 73 regularizer """no""" +604 73 optimizer """adam""" +604 73 training_loop """owa""" +604 73 negative_sampler """basic""" +604 73 evaluator """rankbased""" +604 74 dataset """kinships""" +604 74 model """rotate""" +604 74 loss """marginranking""" +604 74 regularizer """no""" +604 74 optimizer """adam""" +604 74 training_loop """owa""" +604 74 negative_sampler """basic""" +604 74 evaluator """rankbased""" +604 75 dataset """kinships""" +604 75 model """rotate""" +604 75 loss """marginranking""" +604 75 regularizer """no""" +604 75 optimizer """adam""" +604 75 training_loop """owa""" +604 75 negative_sampler """basic""" +604 75 evaluator """rankbased""" +604 76 dataset """kinships""" +604 76 model """rotate""" +604 76 loss """marginranking""" +604 76 regularizer """no""" +604 76 optimizer """adam""" +604 76 training_loop """owa""" +604 76 negative_sampler """basic""" +604 76 evaluator """rankbased""" +604 77 dataset """kinships""" +604 77 model """rotate""" +604 77 loss """marginranking""" +604 77 regularizer """no""" +604 77 optimizer """adam""" +604 77 training_loop """owa""" +604 77 negative_sampler """basic""" +604 77 evaluator """rankbased""" +604 78 dataset """kinships""" +604 78 model """rotate""" +604 78 loss """marginranking""" +604 78 regularizer """no""" +604 78 optimizer """adam""" +604 78 training_loop """owa""" +604 78 negative_sampler """basic""" +604 78 evaluator """rankbased""" +604 79 dataset """kinships""" +604 79 model """rotate""" +604 79 loss """marginranking""" +604 79 regularizer """no""" +604 79 optimizer """adam""" +604 79 training_loop """owa""" +604 79 negative_sampler """basic""" +604 79 evaluator """rankbased""" +604 80 dataset """kinships""" +604 80 model """rotate""" +604 80 loss """marginranking""" +604 80 regularizer """no""" +604 80 optimizer """adam""" +604 80 training_loop """owa""" +604 80 negative_sampler """basic""" +604 80 evaluator """rankbased""" +604 81 dataset """kinships""" +604 81 model """rotate""" +604 81 loss """marginranking""" +604 81 regularizer """no""" +604 81 optimizer """adam""" +604 81 training_loop """owa""" +604 81 negative_sampler """basic""" +604 81 evaluator """rankbased""" +604 82 dataset """kinships""" +604 82 model """rotate""" +604 82 loss """marginranking""" +604 82 regularizer """no""" +604 82 optimizer """adam""" +604 82 training_loop """owa""" +604 82 negative_sampler """basic""" +604 82 evaluator """rankbased""" +604 83 dataset """kinships""" +604 83 model """rotate""" +604 83 loss """marginranking""" +604 83 regularizer """no""" +604 83 optimizer """adam""" +604 83 training_loop """owa""" +604 83 negative_sampler """basic""" +604 83 evaluator """rankbased""" +604 84 dataset """kinships""" +604 84 model """rotate""" +604 84 loss """marginranking""" +604 84 regularizer """no""" +604 84 optimizer """adam""" +604 84 training_loop """owa""" +604 84 negative_sampler """basic""" +604 84 evaluator """rankbased""" +604 85 dataset """kinships""" +604 85 model """rotate""" +604 85 loss """marginranking""" +604 85 regularizer """no""" +604 85 optimizer """adam""" +604 85 training_loop """owa""" +604 85 negative_sampler """basic""" +604 85 evaluator """rankbased""" +604 86 dataset """kinships""" +604 86 model """rotate""" +604 86 loss """marginranking""" +604 86 regularizer """no""" +604 86 optimizer """adam""" +604 86 training_loop """owa""" +604 86 negative_sampler """basic""" +604 86 evaluator """rankbased""" +604 87 dataset """kinships""" +604 87 model """rotate""" +604 87 loss """marginranking""" +604 87 regularizer """no""" +604 87 optimizer """adam""" +604 87 training_loop """owa""" +604 87 negative_sampler """basic""" +604 87 evaluator """rankbased""" +604 88 dataset """kinships""" +604 88 model """rotate""" +604 88 loss """marginranking""" +604 88 regularizer """no""" +604 88 optimizer """adam""" +604 88 training_loop """owa""" +604 88 negative_sampler """basic""" +604 88 evaluator """rankbased""" +604 89 dataset """kinships""" +604 89 model """rotate""" +604 89 loss """marginranking""" +604 89 regularizer """no""" +604 89 optimizer """adam""" +604 89 training_loop """owa""" +604 89 negative_sampler """basic""" +604 89 evaluator """rankbased""" +604 90 dataset """kinships""" +604 90 model """rotate""" +604 90 loss """marginranking""" +604 90 regularizer """no""" +604 90 optimizer """adam""" +604 90 training_loop """owa""" +604 90 negative_sampler """basic""" +604 90 evaluator """rankbased""" +604 91 dataset """kinships""" +604 91 model """rotate""" +604 91 loss """marginranking""" +604 91 regularizer """no""" +604 91 optimizer """adam""" +604 91 training_loop """owa""" +604 91 negative_sampler """basic""" +604 91 evaluator """rankbased""" +604 92 dataset """kinships""" +604 92 model """rotate""" +604 92 loss """marginranking""" +604 92 regularizer """no""" +604 92 optimizer """adam""" +604 92 training_loop """owa""" +604 92 negative_sampler """basic""" +604 92 evaluator """rankbased""" +604 93 dataset """kinships""" +604 93 model """rotate""" +604 93 loss """marginranking""" +604 93 regularizer """no""" +604 93 optimizer """adam""" +604 93 training_loop """owa""" +604 93 negative_sampler """basic""" +604 93 evaluator """rankbased""" +604 94 dataset """kinships""" +604 94 model """rotate""" +604 94 loss """marginranking""" +604 94 regularizer """no""" +604 94 optimizer """adam""" +604 94 training_loop """owa""" +604 94 negative_sampler """basic""" +604 94 evaluator """rankbased""" +604 95 dataset """kinships""" +604 95 model """rotate""" +604 95 loss """marginranking""" +604 95 regularizer """no""" +604 95 optimizer """adam""" +604 95 training_loop """owa""" +604 95 negative_sampler """basic""" +604 95 evaluator """rankbased""" +604 96 dataset """kinships""" +604 96 model """rotate""" +604 96 loss """marginranking""" +604 96 regularizer """no""" +604 96 optimizer """adam""" +604 96 training_loop """owa""" +604 96 negative_sampler """basic""" +604 96 evaluator """rankbased""" +604 97 dataset """kinships""" +604 97 model """rotate""" +604 97 loss """marginranking""" +604 97 regularizer """no""" +604 97 optimizer """adam""" +604 97 training_loop """owa""" +604 97 negative_sampler """basic""" +604 97 evaluator """rankbased""" +604 98 dataset """kinships""" +604 98 model """rotate""" +604 98 loss """marginranking""" +604 98 regularizer """no""" +604 98 optimizer """adam""" +604 98 training_loop """owa""" +604 98 negative_sampler """basic""" +604 98 evaluator """rankbased""" +604 99 dataset """kinships""" +604 99 model """rotate""" +604 99 loss """marginranking""" +604 99 regularizer """no""" +604 99 optimizer """adam""" +604 99 training_loop """owa""" +604 99 negative_sampler """basic""" +604 99 evaluator """rankbased""" +604 100 dataset """kinships""" +604 100 model """rotate""" +604 100 loss """marginranking""" +604 100 regularizer """no""" +604 100 optimizer """adam""" +604 100 training_loop """owa""" +604 100 negative_sampler """basic""" +604 100 evaluator """rankbased""" +605 1 model.embedding_dim 2.0 +605 1 optimizer.lr 0.0018047041447954695 +605 1 training.batch_size 0.0 +605 1 training.label_smoothing 0.7723031202495156 +605 2 model.embedding_dim 1.0 +605 2 optimizer.lr 0.005706404123916034 +605 2 training.batch_size 2.0 +605 2 training.label_smoothing 0.05074164799970397 +605 3 model.embedding_dim 0.0 +605 3 optimizer.lr 0.022891606386150707 +605 3 training.batch_size 0.0 +605 3 training.label_smoothing 0.09223763101772528 +605 4 model.embedding_dim 2.0 +605 4 optimizer.lr 0.02111348549094274 +605 4 training.batch_size 2.0 +605 4 training.label_smoothing 0.016118096462717387 +605 5 model.embedding_dim 0.0 +605 5 optimizer.lr 0.0036635505882890757 +605 5 training.batch_size 0.0 +605 5 training.label_smoothing 0.007298209730208984 +605 6 model.embedding_dim 1.0 +605 6 optimizer.lr 0.0038433088214398768 +605 6 training.batch_size 1.0 +605 6 training.label_smoothing 0.012256139532016552 +605 7 model.embedding_dim 1.0 +605 7 optimizer.lr 0.014112096216074682 +605 7 training.batch_size 2.0 +605 7 training.label_smoothing 0.004078724995615097 +605 8 model.embedding_dim 2.0 +605 8 optimizer.lr 0.0013057436716423752 +605 8 training.batch_size 1.0 +605 8 training.label_smoothing 0.009557984763506708 +605 9 model.embedding_dim 2.0 +605 9 optimizer.lr 0.03751563094473671 +605 9 training.batch_size 0.0 +605 9 training.label_smoothing 0.13519504199293336 +605 10 model.embedding_dim 1.0 +605 10 optimizer.lr 0.057697529550852875 +605 10 training.batch_size 1.0 +605 10 training.label_smoothing 0.6568835354304616 +605 11 model.embedding_dim 1.0 +605 11 optimizer.lr 0.006924694904321727 +605 11 training.batch_size 0.0 +605 11 training.label_smoothing 0.027388156719279154 +605 12 model.embedding_dim 2.0 +605 12 optimizer.lr 0.0047657645452084604 +605 12 training.batch_size 2.0 +605 12 training.label_smoothing 0.00925749136014103 +605 13 model.embedding_dim 1.0 +605 13 optimizer.lr 0.004148128606343678 +605 13 training.batch_size 1.0 +605 13 training.label_smoothing 0.005785024493232255 +605 14 model.embedding_dim 2.0 +605 14 optimizer.lr 0.019850171319091652 +605 14 training.batch_size 2.0 +605 14 training.label_smoothing 0.0036724119992702633 +605 15 model.embedding_dim 1.0 +605 15 optimizer.lr 0.00880504300702057 +605 15 training.batch_size 0.0 +605 15 training.label_smoothing 0.03109479793218533 +605 16 model.embedding_dim 2.0 +605 16 optimizer.lr 0.0017917434285267453 +605 16 training.batch_size 1.0 +605 16 training.label_smoothing 0.017725556281377886 +605 17 model.embedding_dim 2.0 +605 17 optimizer.lr 0.0018787129406004914 +605 17 training.batch_size 2.0 +605 17 training.label_smoothing 0.003963390576026357 +605 18 model.embedding_dim 1.0 +605 18 optimizer.lr 0.05108986291748364 +605 18 training.batch_size 1.0 +605 18 training.label_smoothing 0.04638872877420472 +605 19 model.embedding_dim 0.0 +605 19 optimizer.lr 0.004551274239612205 +605 19 training.batch_size 0.0 +605 19 training.label_smoothing 0.07095270542623484 +605 20 model.embedding_dim 2.0 +605 20 optimizer.lr 0.022304931205937358 +605 20 training.batch_size 0.0 +605 20 training.label_smoothing 0.012398817560076462 +605 21 model.embedding_dim 2.0 +605 21 optimizer.lr 0.0010586911532792754 +605 21 training.batch_size 2.0 +605 21 training.label_smoothing 0.004625306623802653 +605 22 model.embedding_dim 1.0 +605 22 optimizer.lr 0.0018247782769198616 +605 22 training.batch_size 2.0 +605 22 training.label_smoothing 0.0011044773147090348 +605 23 model.embedding_dim 2.0 +605 23 optimizer.lr 0.002994396184522418 +605 23 training.batch_size 1.0 +605 23 training.label_smoothing 0.01021524928131058 +605 24 model.embedding_dim 0.0 +605 24 optimizer.lr 0.07580522715470549 +605 24 training.batch_size 1.0 +605 24 training.label_smoothing 0.009405652373757642 +605 25 model.embedding_dim 1.0 +605 25 optimizer.lr 0.02171328962202908 +605 25 training.batch_size 2.0 +605 25 training.label_smoothing 0.028381609825223197 +605 26 model.embedding_dim 2.0 +605 26 optimizer.lr 0.004619179583872861 +605 26 training.batch_size 2.0 +605 26 training.label_smoothing 0.0016845754237918716 +605 27 model.embedding_dim 0.0 +605 27 optimizer.lr 0.05918058480013498 +605 27 training.batch_size 2.0 +605 27 training.label_smoothing 0.6847437310756646 +605 28 model.embedding_dim 1.0 +605 28 optimizer.lr 0.001577796476221272 +605 28 training.batch_size 2.0 +605 28 training.label_smoothing 0.009165298528483143 +605 29 model.embedding_dim 1.0 +605 29 optimizer.lr 0.0028024910090312188 +605 29 training.batch_size 2.0 +605 29 training.label_smoothing 0.08594471674256864 +605 30 model.embedding_dim 2.0 +605 30 optimizer.lr 0.002586715609643933 +605 30 training.batch_size 1.0 +605 30 training.label_smoothing 0.8944384930810846 +605 31 model.embedding_dim 0.0 +605 31 optimizer.lr 0.0034294500337779883 +605 31 training.batch_size 1.0 +605 31 training.label_smoothing 0.04153842266103457 +605 32 model.embedding_dim 0.0 +605 32 optimizer.lr 0.01340726595488278 +605 32 training.batch_size 1.0 +605 32 training.label_smoothing 0.01081097340748691 +605 33 model.embedding_dim 2.0 +605 33 optimizer.lr 0.0042580821354678165 +605 33 training.batch_size 0.0 +605 33 training.label_smoothing 0.00971058098522717 +605 34 model.embedding_dim 1.0 +605 34 optimizer.lr 0.05670090958792125 +605 34 training.batch_size 2.0 +605 34 training.label_smoothing 0.007954174724942177 +605 35 model.embedding_dim 1.0 +605 35 optimizer.lr 0.011536714388791856 +605 35 training.batch_size 2.0 +605 35 training.label_smoothing 0.08592099116268703 +605 36 model.embedding_dim 0.0 +605 36 optimizer.lr 0.0191989397973863 +605 36 training.batch_size 0.0 +605 36 training.label_smoothing 0.09504242794577294 +605 37 model.embedding_dim 0.0 +605 37 optimizer.lr 0.07889403858946543 +605 37 training.batch_size 1.0 +605 37 training.label_smoothing 0.20502415215932623 +605 38 model.embedding_dim 1.0 +605 38 optimizer.lr 0.01656328519541511 +605 38 training.batch_size 0.0 +605 38 training.label_smoothing 0.004028483349090441 +605 39 model.embedding_dim 0.0 +605 39 optimizer.lr 0.08737890985107136 +605 39 training.batch_size 1.0 +605 39 training.label_smoothing 0.8790225438353644 +605 40 model.embedding_dim 1.0 +605 40 optimizer.lr 0.002619477471915499 +605 40 training.batch_size 1.0 +605 40 training.label_smoothing 0.0011363091323600178 +605 41 model.embedding_dim 1.0 +605 41 optimizer.lr 0.020443310014315916 +605 41 training.batch_size 1.0 +605 41 training.label_smoothing 0.10329626497270376 +605 42 model.embedding_dim 2.0 +605 42 optimizer.lr 0.0014701615292445778 +605 42 training.batch_size 2.0 +605 42 training.label_smoothing 0.004419465525418232 +605 43 model.embedding_dim 2.0 +605 43 optimizer.lr 0.0016491365102122056 +605 43 training.batch_size 2.0 +605 43 training.label_smoothing 0.0028324639625323528 +605 44 model.embedding_dim 2.0 +605 44 optimizer.lr 0.0027777047927253964 +605 44 training.batch_size 0.0 +605 44 training.label_smoothing 0.00276773661625285 +605 45 model.embedding_dim 0.0 +605 45 optimizer.lr 0.0016189526454944041 +605 45 training.batch_size 1.0 +605 45 training.label_smoothing 0.0065351089565153795 +605 46 model.embedding_dim 1.0 +605 46 optimizer.lr 0.05901914383240062 +605 46 training.batch_size 1.0 +605 46 training.label_smoothing 0.05684394972497932 +605 47 model.embedding_dim 2.0 +605 47 optimizer.lr 0.001950146772026949 +605 47 training.batch_size 2.0 +605 47 training.label_smoothing 0.1634715034762147 +605 48 model.embedding_dim 0.0 +605 48 optimizer.lr 0.006494079034185442 +605 48 training.batch_size 1.0 +605 48 training.label_smoothing 0.0014076327776641352 +605 49 model.embedding_dim 0.0 +605 49 optimizer.lr 0.004868491451716097 +605 49 training.batch_size 2.0 +605 49 training.label_smoothing 0.0033792396465702037 +605 50 model.embedding_dim 2.0 +605 50 optimizer.lr 0.01872117427783352 +605 50 training.batch_size 2.0 +605 50 training.label_smoothing 0.5337047458280082 +605 51 model.embedding_dim 0.0 +605 51 optimizer.lr 0.019329026212163435 +605 51 training.batch_size 0.0 +605 51 training.label_smoothing 0.012977636591989137 +605 52 model.embedding_dim 1.0 +605 52 optimizer.lr 0.08698501585556473 +605 52 training.batch_size 0.0 +605 52 training.label_smoothing 0.0031886402121148634 +605 53 model.embedding_dim 1.0 +605 53 optimizer.lr 0.04335380918781771 +605 53 training.batch_size 1.0 +605 53 training.label_smoothing 0.008323713299472882 +605 54 model.embedding_dim 1.0 +605 54 optimizer.lr 0.019806009160872502 +605 54 training.batch_size 0.0 +605 54 training.label_smoothing 0.01942978444942019 +605 55 model.embedding_dim 2.0 +605 55 optimizer.lr 0.0676714783541063 +605 55 training.batch_size 0.0 +605 55 training.label_smoothing 0.24666323831667045 +605 56 model.embedding_dim 2.0 +605 56 optimizer.lr 0.002037339894310727 +605 56 training.batch_size 1.0 +605 56 training.label_smoothing 0.04621866776751078 +605 57 model.embedding_dim 2.0 +605 57 optimizer.lr 0.09041434525998232 +605 57 training.batch_size 1.0 +605 57 training.label_smoothing 0.15336875802859115 +605 58 model.embedding_dim 0.0 +605 58 optimizer.lr 0.008452119123530309 +605 58 training.batch_size 1.0 +605 58 training.label_smoothing 0.015334786147411475 +605 59 model.embedding_dim 1.0 +605 59 optimizer.lr 0.01352753343443702 +605 59 training.batch_size 1.0 +605 59 training.label_smoothing 0.0075469530318637845 +605 60 model.embedding_dim 0.0 +605 60 optimizer.lr 0.005734534891616419 +605 60 training.batch_size 0.0 +605 60 training.label_smoothing 0.001304988031455823 +605 61 model.embedding_dim 0.0 +605 61 optimizer.lr 0.0015128381987408348 +605 61 training.batch_size 0.0 +605 61 training.label_smoothing 0.008890074152961306 +605 62 model.embedding_dim 2.0 +605 62 optimizer.lr 0.023057842471030005 +605 62 training.batch_size 0.0 +605 62 training.label_smoothing 0.05774194257777919 +605 63 model.embedding_dim 2.0 +605 63 optimizer.lr 0.026290189463796323 +605 63 training.batch_size 0.0 +605 63 training.label_smoothing 0.0313055022683786 +605 64 model.embedding_dim 2.0 +605 64 optimizer.lr 0.0029524856487023673 +605 64 training.batch_size 2.0 +605 64 training.label_smoothing 0.0010357657672583687 +605 65 model.embedding_dim 2.0 +605 65 optimizer.lr 0.0017498408613360423 +605 65 training.batch_size 2.0 +605 65 training.label_smoothing 0.005178670293248955 +605 66 model.embedding_dim 1.0 +605 66 optimizer.lr 0.02915194341907917 +605 66 training.batch_size 2.0 +605 66 training.label_smoothing 0.2034253747844789 +605 67 model.embedding_dim 1.0 +605 67 optimizer.lr 0.00430158493831984 +605 67 training.batch_size 2.0 +605 67 training.label_smoothing 0.00284564638743779 +605 68 model.embedding_dim 2.0 +605 68 optimizer.lr 0.008432122899880637 +605 68 training.batch_size 2.0 +605 68 training.label_smoothing 0.0012044350806863803 +605 69 model.embedding_dim 0.0 +605 69 optimizer.lr 0.004758918697282846 +605 69 training.batch_size 0.0 +605 69 training.label_smoothing 0.0031954166047333753 +605 70 model.embedding_dim 0.0 +605 70 optimizer.lr 0.0254432622229333 +605 70 training.batch_size 0.0 +605 70 training.label_smoothing 0.0042186968926001866 +605 71 model.embedding_dim 0.0 +605 71 optimizer.lr 0.003951865733139952 +605 71 training.batch_size 0.0 +605 71 training.label_smoothing 0.0015961731079521498 +605 72 model.embedding_dim 2.0 +605 72 optimizer.lr 0.021436597273945694 +605 72 training.batch_size 0.0 +605 72 training.label_smoothing 0.13935253583175533 +605 73 model.embedding_dim 2.0 +605 73 optimizer.lr 0.0017131492557921212 +605 73 training.batch_size 2.0 +605 73 training.label_smoothing 0.0025520142190533866 +605 74 model.embedding_dim 2.0 +605 74 optimizer.lr 0.028047931413200184 +605 74 training.batch_size 1.0 +605 74 training.label_smoothing 0.6560922302634288 +605 75 model.embedding_dim 2.0 +605 75 optimizer.lr 0.05460133323046392 +605 75 training.batch_size 1.0 +605 75 training.label_smoothing 0.5677889684298532 +605 76 model.embedding_dim 1.0 +605 76 optimizer.lr 0.02784784229135639 +605 76 training.batch_size 2.0 +605 76 training.label_smoothing 0.02126339180881548 +605 77 model.embedding_dim 1.0 +605 77 optimizer.lr 0.001105525562797358 +605 77 training.batch_size 1.0 +605 77 training.label_smoothing 0.0075762700292436565 +605 78 model.embedding_dim 0.0 +605 78 optimizer.lr 0.006846667726734503 +605 78 training.batch_size 0.0 +605 78 training.label_smoothing 0.0012880226220783719 +605 79 model.embedding_dim 0.0 +605 79 optimizer.lr 0.02881073033116216 +605 79 training.batch_size 0.0 +605 79 training.label_smoothing 0.0466685114370072 +605 80 model.embedding_dim 2.0 +605 80 optimizer.lr 0.04457081751354639 +605 80 training.batch_size 1.0 +605 80 training.label_smoothing 0.16896561767194834 +605 81 model.embedding_dim 1.0 +605 81 optimizer.lr 0.04147846272127847 +605 81 training.batch_size 2.0 +605 81 training.label_smoothing 0.002717462018468363 +605 82 model.embedding_dim 1.0 +605 82 optimizer.lr 0.007494968627285206 +605 82 training.batch_size 2.0 +605 82 training.label_smoothing 0.030909976566904564 +605 83 model.embedding_dim 0.0 +605 83 optimizer.lr 0.0013953776453396922 +605 83 training.batch_size 2.0 +605 83 training.label_smoothing 0.00393212974084547 +605 84 model.embedding_dim 0.0 +605 84 optimizer.lr 0.0011119970565239756 +605 84 training.batch_size 1.0 +605 84 training.label_smoothing 0.2295507804935863 +605 85 model.embedding_dim 0.0 +605 85 optimizer.lr 0.016649562459471363 +605 85 training.batch_size 2.0 +605 85 training.label_smoothing 0.006616746137438623 +605 86 model.embedding_dim 1.0 +605 86 optimizer.lr 0.005059358823654088 +605 86 training.batch_size 0.0 +605 86 training.label_smoothing 0.8108228343297663 +605 87 model.embedding_dim 0.0 +605 87 optimizer.lr 0.009580722864912088 +605 87 training.batch_size 0.0 +605 87 training.label_smoothing 0.6660563913547354 +605 88 model.embedding_dim 2.0 +605 88 optimizer.lr 0.0018422457751632385 +605 88 training.batch_size 2.0 +605 88 training.label_smoothing 0.03203158328460041 +605 89 model.embedding_dim 2.0 +605 89 optimizer.lr 0.003524066414503723 +605 89 training.batch_size 1.0 +605 89 training.label_smoothing 0.9233010756163191 +605 90 model.embedding_dim 2.0 +605 90 optimizer.lr 0.014358469805103394 +605 90 training.batch_size 2.0 +605 90 training.label_smoothing 0.12086093885715614 +605 91 model.embedding_dim 1.0 +605 91 optimizer.lr 0.015867116261527814 +605 91 training.batch_size 0.0 +605 91 training.label_smoothing 0.00440942419811035 +605 92 model.embedding_dim 1.0 +605 92 optimizer.lr 0.06744562152779619 +605 92 training.batch_size 2.0 +605 92 training.label_smoothing 0.14690944906235182 +605 93 model.embedding_dim 2.0 +605 93 optimizer.lr 0.005713385906539336 +605 93 training.batch_size 2.0 +605 93 training.label_smoothing 0.0020418840187927194 +605 94 model.embedding_dim 0.0 +605 94 optimizer.lr 0.03557917339609996 +605 94 training.batch_size 2.0 +605 94 training.label_smoothing 0.02722315199784575 +605 95 model.embedding_dim 1.0 +605 95 optimizer.lr 0.058402446132813354 +605 95 training.batch_size 2.0 +605 95 training.label_smoothing 0.07014195631674314 +605 96 model.embedding_dim 1.0 +605 96 optimizer.lr 0.0014567652839551283 +605 96 training.batch_size 0.0 +605 96 training.label_smoothing 0.018915944577134622 +605 97 model.embedding_dim 1.0 +605 97 optimizer.lr 0.02412198508993998 +605 97 training.batch_size 2.0 +605 97 training.label_smoothing 0.48469905275885766 +605 98 model.embedding_dim 0.0 +605 98 optimizer.lr 0.02497268920259833 +605 98 training.batch_size 1.0 +605 98 training.label_smoothing 0.004015815374683298 +605 99 model.embedding_dim 2.0 +605 99 optimizer.lr 0.06431230181406412 +605 99 training.batch_size 0.0 +605 99 training.label_smoothing 0.025621187848741445 +605 100 model.embedding_dim 2.0 +605 100 optimizer.lr 0.04231749916164195 +605 100 training.batch_size 2.0 +605 100 training.label_smoothing 0.31548767524850646 +605 1 dataset """kinships""" +605 1 model """rotate""" +605 1 loss """crossentropy""" +605 1 regularizer """no""" +605 1 optimizer """adam""" +605 1 training_loop """lcwa""" +605 1 evaluator """rankbased""" +605 2 dataset """kinships""" +605 2 model """rotate""" +605 2 loss """crossentropy""" +605 2 regularizer """no""" +605 2 optimizer """adam""" +605 2 training_loop """lcwa""" +605 2 evaluator """rankbased""" +605 3 dataset """kinships""" +605 3 model """rotate""" +605 3 loss """crossentropy""" +605 3 regularizer """no""" +605 3 optimizer """adam""" +605 3 training_loop """lcwa""" +605 3 evaluator """rankbased""" +605 4 dataset """kinships""" +605 4 model """rotate""" +605 4 loss """crossentropy""" +605 4 regularizer """no""" +605 4 optimizer """adam""" +605 4 training_loop """lcwa""" +605 4 evaluator """rankbased""" +605 5 dataset """kinships""" +605 5 model """rotate""" +605 5 loss """crossentropy""" +605 5 regularizer """no""" +605 5 optimizer """adam""" +605 5 training_loop """lcwa""" +605 5 evaluator """rankbased""" +605 6 dataset """kinships""" +605 6 model """rotate""" +605 6 loss """crossentropy""" +605 6 regularizer """no""" +605 6 optimizer """adam""" +605 6 training_loop """lcwa""" +605 6 evaluator """rankbased""" +605 7 dataset """kinships""" +605 7 model """rotate""" +605 7 loss """crossentropy""" +605 7 regularizer """no""" +605 7 optimizer """adam""" +605 7 training_loop """lcwa""" +605 7 evaluator """rankbased""" +605 8 dataset """kinships""" +605 8 model """rotate""" +605 8 loss """crossentropy""" +605 8 regularizer """no""" +605 8 optimizer """adam""" +605 8 training_loop """lcwa""" +605 8 evaluator """rankbased""" +605 9 dataset """kinships""" +605 9 model """rotate""" +605 9 loss """crossentropy""" +605 9 regularizer """no""" +605 9 optimizer """adam""" +605 9 training_loop """lcwa""" +605 9 evaluator """rankbased""" +605 10 dataset """kinships""" +605 10 model """rotate""" +605 10 loss """crossentropy""" +605 10 regularizer """no""" +605 10 optimizer """adam""" +605 10 training_loop """lcwa""" +605 10 evaluator """rankbased""" +605 11 dataset """kinships""" +605 11 model """rotate""" +605 11 loss """crossentropy""" +605 11 regularizer """no""" +605 11 optimizer """adam""" +605 11 training_loop """lcwa""" +605 11 evaluator """rankbased""" +605 12 dataset """kinships""" +605 12 model """rotate""" +605 12 loss """crossentropy""" +605 12 regularizer """no""" +605 12 optimizer """adam""" +605 12 training_loop """lcwa""" +605 12 evaluator """rankbased""" +605 13 dataset """kinships""" +605 13 model """rotate""" +605 13 loss """crossentropy""" +605 13 regularizer """no""" +605 13 optimizer """adam""" +605 13 training_loop """lcwa""" +605 13 evaluator """rankbased""" +605 14 dataset """kinships""" +605 14 model """rotate""" +605 14 loss """crossentropy""" +605 14 regularizer """no""" +605 14 optimizer """adam""" +605 14 training_loop """lcwa""" +605 14 evaluator """rankbased""" +605 15 dataset """kinships""" +605 15 model """rotate""" +605 15 loss """crossentropy""" +605 15 regularizer """no""" +605 15 optimizer """adam""" +605 15 training_loop """lcwa""" +605 15 evaluator """rankbased""" +605 16 dataset """kinships""" +605 16 model """rotate""" +605 16 loss """crossentropy""" +605 16 regularizer """no""" +605 16 optimizer """adam""" +605 16 training_loop """lcwa""" +605 16 evaluator """rankbased""" +605 17 dataset """kinships""" +605 17 model """rotate""" +605 17 loss """crossentropy""" +605 17 regularizer """no""" +605 17 optimizer """adam""" +605 17 training_loop """lcwa""" +605 17 evaluator """rankbased""" +605 18 dataset """kinships""" +605 18 model """rotate""" +605 18 loss """crossentropy""" +605 18 regularizer """no""" +605 18 optimizer """adam""" +605 18 training_loop """lcwa""" +605 18 evaluator """rankbased""" +605 19 dataset """kinships""" +605 19 model """rotate""" +605 19 loss """crossentropy""" +605 19 regularizer """no""" +605 19 optimizer """adam""" +605 19 training_loop """lcwa""" +605 19 evaluator """rankbased""" +605 20 dataset """kinships""" +605 20 model """rotate""" +605 20 loss """crossentropy""" +605 20 regularizer """no""" +605 20 optimizer """adam""" +605 20 training_loop """lcwa""" +605 20 evaluator """rankbased""" +605 21 dataset """kinships""" +605 21 model """rotate""" +605 21 loss """crossentropy""" +605 21 regularizer """no""" +605 21 optimizer """adam""" +605 21 training_loop """lcwa""" +605 21 evaluator """rankbased""" +605 22 dataset """kinships""" +605 22 model """rotate""" +605 22 loss """crossentropy""" +605 22 regularizer """no""" +605 22 optimizer """adam""" +605 22 training_loop """lcwa""" +605 22 evaluator """rankbased""" +605 23 dataset """kinships""" +605 23 model """rotate""" +605 23 loss """crossentropy""" +605 23 regularizer """no""" +605 23 optimizer """adam""" +605 23 training_loop """lcwa""" +605 23 evaluator """rankbased""" +605 24 dataset """kinships""" +605 24 model """rotate""" +605 24 loss """crossentropy""" +605 24 regularizer """no""" +605 24 optimizer """adam""" +605 24 training_loop """lcwa""" +605 24 evaluator """rankbased""" +605 25 dataset """kinships""" +605 25 model """rotate""" +605 25 loss """crossentropy""" +605 25 regularizer """no""" +605 25 optimizer """adam""" +605 25 training_loop """lcwa""" +605 25 evaluator """rankbased""" +605 26 dataset """kinships""" +605 26 model """rotate""" +605 26 loss """crossentropy""" +605 26 regularizer """no""" +605 26 optimizer """adam""" +605 26 training_loop """lcwa""" +605 26 evaluator """rankbased""" +605 27 dataset """kinships""" +605 27 model """rotate""" +605 27 loss """crossentropy""" +605 27 regularizer """no""" +605 27 optimizer """adam""" +605 27 training_loop """lcwa""" +605 27 evaluator """rankbased""" +605 28 dataset """kinships""" +605 28 model """rotate""" +605 28 loss """crossentropy""" +605 28 regularizer """no""" +605 28 optimizer """adam""" +605 28 training_loop """lcwa""" +605 28 evaluator """rankbased""" +605 29 dataset """kinships""" +605 29 model """rotate""" +605 29 loss """crossentropy""" +605 29 regularizer """no""" +605 29 optimizer """adam""" +605 29 training_loop """lcwa""" +605 29 evaluator """rankbased""" +605 30 dataset """kinships""" +605 30 model """rotate""" +605 30 loss """crossentropy""" +605 30 regularizer """no""" +605 30 optimizer """adam""" +605 30 training_loop """lcwa""" +605 30 evaluator """rankbased""" +605 31 dataset """kinships""" +605 31 model """rotate""" +605 31 loss """crossentropy""" +605 31 regularizer """no""" +605 31 optimizer """adam""" +605 31 training_loop """lcwa""" +605 31 evaluator """rankbased""" +605 32 dataset """kinships""" +605 32 model """rotate""" +605 32 loss """crossentropy""" +605 32 regularizer """no""" +605 32 optimizer """adam""" +605 32 training_loop """lcwa""" +605 32 evaluator """rankbased""" +605 33 dataset """kinships""" +605 33 model """rotate""" +605 33 loss """crossentropy""" +605 33 regularizer """no""" +605 33 optimizer """adam""" +605 33 training_loop """lcwa""" +605 33 evaluator """rankbased""" +605 34 dataset """kinships""" +605 34 model """rotate""" +605 34 loss """crossentropy""" +605 34 regularizer """no""" +605 34 optimizer """adam""" +605 34 training_loop """lcwa""" +605 34 evaluator """rankbased""" +605 35 dataset """kinships""" +605 35 model """rotate""" +605 35 loss """crossentropy""" +605 35 regularizer """no""" +605 35 optimizer """adam""" +605 35 training_loop """lcwa""" +605 35 evaluator """rankbased""" +605 36 dataset """kinships""" +605 36 model """rotate""" +605 36 loss """crossentropy""" +605 36 regularizer """no""" +605 36 optimizer """adam""" +605 36 training_loop """lcwa""" +605 36 evaluator """rankbased""" +605 37 dataset """kinships""" +605 37 model """rotate""" +605 37 loss """crossentropy""" +605 37 regularizer """no""" +605 37 optimizer """adam""" +605 37 training_loop """lcwa""" +605 37 evaluator """rankbased""" +605 38 dataset """kinships""" +605 38 model """rotate""" +605 38 loss """crossentropy""" +605 38 regularizer """no""" +605 38 optimizer """adam""" +605 38 training_loop """lcwa""" +605 38 evaluator """rankbased""" +605 39 dataset """kinships""" +605 39 model """rotate""" +605 39 loss """crossentropy""" +605 39 regularizer """no""" +605 39 optimizer """adam""" +605 39 training_loop """lcwa""" +605 39 evaluator """rankbased""" +605 40 dataset """kinships""" +605 40 model """rotate""" +605 40 loss """crossentropy""" +605 40 regularizer """no""" +605 40 optimizer """adam""" +605 40 training_loop """lcwa""" +605 40 evaluator """rankbased""" +605 41 dataset """kinships""" +605 41 model """rotate""" +605 41 loss """crossentropy""" +605 41 regularizer """no""" +605 41 optimizer """adam""" +605 41 training_loop """lcwa""" +605 41 evaluator """rankbased""" +605 42 dataset """kinships""" +605 42 model """rotate""" +605 42 loss """crossentropy""" +605 42 regularizer """no""" +605 42 optimizer """adam""" +605 42 training_loop """lcwa""" +605 42 evaluator """rankbased""" +605 43 dataset """kinships""" +605 43 model """rotate""" +605 43 loss """crossentropy""" +605 43 regularizer """no""" +605 43 optimizer """adam""" +605 43 training_loop """lcwa""" +605 43 evaluator """rankbased""" +605 44 dataset """kinships""" +605 44 model """rotate""" +605 44 loss """crossentropy""" +605 44 regularizer """no""" +605 44 optimizer """adam""" +605 44 training_loop """lcwa""" +605 44 evaluator """rankbased""" +605 45 dataset """kinships""" +605 45 model """rotate""" +605 45 loss """crossentropy""" +605 45 regularizer """no""" +605 45 optimizer """adam""" +605 45 training_loop """lcwa""" +605 45 evaluator """rankbased""" +605 46 dataset """kinships""" +605 46 model """rotate""" +605 46 loss """crossentropy""" +605 46 regularizer """no""" +605 46 optimizer """adam""" +605 46 training_loop """lcwa""" +605 46 evaluator """rankbased""" +605 47 dataset """kinships""" +605 47 model """rotate""" +605 47 loss """crossentropy""" +605 47 regularizer """no""" +605 47 optimizer """adam""" +605 47 training_loop """lcwa""" +605 47 evaluator """rankbased""" +605 48 dataset """kinships""" +605 48 model """rotate""" +605 48 loss """crossentropy""" +605 48 regularizer """no""" +605 48 optimizer """adam""" +605 48 training_loop """lcwa""" +605 48 evaluator """rankbased""" +605 49 dataset """kinships""" +605 49 model """rotate""" +605 49 loss """crossentropy""" +605 49 regularizer """no""" +605 49 optimizer """adam""" +605 49 training_loop """lcwa""" +605 49 evaluator """rankbased""" +605 50 dataset """kinships""" +605 50 model """rotate""" +605 50 loss """crossentropy""" +605 50 regularizer """no""" +605 50 optimizer """adam""" +605 50 training_loop """lcwa""" +605 50 evaluator """rankbased""" +605 51 dataset """kinships""" +605 51 model """rotate""" +605 51 loss """crossentropy""" +605 51 regularizer """no""" +605 51 optimizer """adam""" +605 51 training_loop """lcwa""" +605 51 evaluator """rankbased""" +605 52 dataset """kinships""" +605 52 model """rotate""" +605 52 loss """crossentropy""" +605 52 regularizer """no""" +605 52 optimizer """adam""" +605 52 training_loop """lcwa""" +605 52 evaluator """rankbased""" +605 53 dataset """kinships""" +605 53 model """rotate""" +605 53 loss """crossentropy""" +605 53 regularizer """no""" +605 53 optimizer """adam""" +605 53 training_loop """lcwa""" +605 53 evaluator """rankbased""" +605 54 dataset """kinships""" +605 54 model """rotate""" +605 54 loss """crossentropy""" +605 54 regularizer """no""" +605 54 optimizer """adam""" +605 54 training_loop """lcwa""" +605 54 evaluator """rankbased""" +605 55 dataset """kinships""" +605 55 model """rotate""" +605 55 loss """crossentropy""" +605 55 regularizer """no""" +605 55 optimizer """adam""" +605 55 training_loop """lcwa""" +605 55 evaluator """rankbased""" +605 56 dataset """kinships""" +605 56 model """rotate""" +605 56 loss """crossentropy""" +605 56 regularizer """no""" +605 56 optimizer """adam""" +605 56 training_loop """lcwa""" +605 56 evaluator """rankbased""" +605 57 dataset """kinships""" +605 57 model """rotate""" +605 57 loss """crossentropy""" +605 57 regularizer """no""" +605 57 optimizer """adam""" +605 57 training_loop """lcwa""" +605 57 evaluator """rankbased""" +605 58 dataset """kinships""" +605 58 model """rotate""" +605 58 loss """crossentropy""" +605 58 regularizer """no""" +605 58 optimizer """adam""" +605 58 training_loop """lcwa""" +605 58 evaluator """rankbased""" +605 59 dataset """kinships""" +605 59 model """rotate""" +605 59 loss """crossentropy""" +605 59 regularizer """no""" +605 59 optimizer """adam""" +605 59 training_loop """lcwa""" +605 59 evaluator """rankbased""" +605 60 dataset """kinships""" +605 60 model """rotate""" +605 60 loss """crossentropy""" +605 60 regularizer """no""" +605 60 optimizer """adam""" +605 60 training_loop """lcwa""" +605 60 evaluator """rankbased""" +605 61 dataset """kinships""" +605 61 model """rotate""" +605 61 loss """crossentropy""" +605 61 regularizer """no""" +605 61 optimizer """adam""" +605 61 training_loop """lcwa""" +605 61 evaluator """rankbased""" +605 62 dataset """kinships""" +605 62 model """rotate""" +605 62 loss """crossentropy""" +605 62 regularizer """no""" +605 62 optimizer """adam""" +605 62 training_loop """lcwa""" +605 62 evaluator """rankbased""" +605 63 dataset """kinships""" +605 63 model """rotate""" +605 63 loss """crossentropy""" +605 63 regularizer """no""" +605 63 optimizer """adam""" +605 63 training_loop """lcwa""" +605 63 evaluator """rankbased""" +605 64 dataset """kinships""" +605 64 model """rotate""" +605 64 loss """crossentropy""" +605 64 regularizer """no""" +605 64 optimizer """adam""" +605 64 training_loop """lcwa""" +605 64 evaluator """rankbased""" +605 65 dataset """kinships""" +605 65 model """rotate""" +605 65 loss """crossentropy""" +605 65 regularizer """no""" +605 65 optimizer """adam""" +605 65 training_loop """lcwa""" +605 65 evaluator """rankbased""" +605 66 dataset """kinships""" +605 66 model """rotate""" +605 66 loss """crossentropy""" +605 66 regularizer """no""" +605 66 optimizer """adam""" +605 66 training_loop """lcwa""" +605 66 evaluator """rankbased""" +605 67 dataset """kinships""" +605 67 model """rotate""" +605 67 loss """crossentropy""" +605 67 regularizer """no""" +605 67 optimizer """adam""" +605 67 training_loop """lcwa""" +605 67 evaluator """rankbased""" +605 68 dataset """kinships""" +605 68 model """rotate""" +605 68 loss """crossentropy""" +605 68 regularizer """no""" +605 68 optimizer """adam""" +605 68 training_loop """lcwa""" +605 68 evaluator """rankbased""" +605 69 dataset """kinships""" +605 69 model """rotate""" +605 69 loss """crossentropy""" +605 69 regularizer """no""" +605 69 optimizer """adam""" +605 69 training_loop """lcwa""" +605 69 evaluator """rankbased""" +605 70 dataset """kinships""" +605 70 model """rotate""" +605 70 loss """crossentropy""" +605 70 regularizer """no""" +605 70 optimizer """adam""" +605 70 training_loop """lcwa""" +605 70 evaluator """rankbased""" +605 71 dataset """kinships""" +605 71 model """rotate""" +605 71 loss """crossentropy""" +605 71 regularizer """no""" +605 71 optimizer """adam""" +605 71 training_loop """lcwa""" +605 71 evaluator """rankbased""" +605 72 dataset """kinships""" +605 72 model """rotate""" +605 72 loss """crossentropy""" +605 72 regularizer """no""" +605 72 optimizer """adam""" +605 72 training_loop """lcwa""" +605 72 evaluator """rankbased""" +605 73 dataset """kinships""" +605 73 model """rotate""" +605 73 loss """crossentropy""" +605 73 regularizer """no""" +605 73 optimizer """adam""" +605 73 training_loop """lcwa""" +605 73 evaluator """rankbased""" +605 74 dataset """kinships""" +605 74 model """rotate""" +605 74 loss """crossentropy""" +605 74 regularizer """no""" +605 74 optimizer """adam""" +605 74 training_loop """lcwa""" +605 74 evaluator """rankbased""" +605 75 dataset """kinships""" +605 75 model """rotate""" +605 75 loss """crossentropy""" +605 75 regularizer """no""" +605 75 optimizer """adam""" +605 75 training_loop """lcwa""" +605 75 evaluator """rankbased""" +605 76 dataset """kinships""" +605 76 model """rotate""" +605 76 loss """crossentropy""" +605 76 regularizer """no""" +605 76 optimizer """adam""" +605 76 training_loop """lcwa""" +605 76 evaluator """rankbased""" +605 77 dataset """kinships""" +605 77 model """rotate""" +605 77 loss """crossentropy""" +605 77 regularizer """no""" +605 77 optimizer """adam""" +605 77 training_loop """lcwa""" +605 77 evaluator """rankbased""" +605 78 dataset """kinships""" +605 78 model """rotate""" +605 78 loss """crossentropy""" +605 78 regularizer """no""" +605 78 optimizer """adam""" +605 78 training_loop """lcwa""" +605 78 evaluator """rankbased""" +605 79 dataset """kinships""" +605 79 model """rotate""" +605 79 loss """crossentropy""" +605 79 regularizer """no""" +605 79 optimizer """adam""" +605 79 training_loop """lcwa""" +605 79 evaluator """rankbased""" +605 80 dataset """kinships""" +605 80 model """rotate""" +605 80 loss """crossentropy""" +605 80 regularizer """no""" +605 80 optimizer """adam""" +605 80 training_loop """lcwa""" +605 80 evaluator """rankbased""" +605 81 dataset """kinships""" +605 81 model """rotate""" +605 81 loss """crossentropy""" +605 81 regularizer """no""" +605 81 optimizer """adam""" +605 81 training_loop """lcwa""" +605 81 evaluator """rankbased""" +605 82 dataset """kinships""" +605 82 model """rotate""" +605 82 loss """crossentropy""" +605 82 regularizer """no""" +605 82 optimizer """adam""" +605 82 training_loop """lcwa""" +605 82 evaluator """rankbased""" +605 83 dataset """kinships""" +605 83 model """rotate""" +605 83 loss """crossentropy""" +605 83 regularizer """no""" +605 83 optimizer """adam""" +605 83 training_loop """lcwa""" +605 83 evaluator """rankbased""" +605 84 dataset """kinships""" +605 84 model """rotate""" +605 84 loss """crossentropy""" +605 84 regularizer """no""" +605 84 optimizer """adam""" +605 84 training_loop """lcwa""" +605 84 evaluator """rankbased""" +605 85 dataset """kinships""" +605 85 model """rotate""" +605 85 loss """crossentropy""" +605 85 regularizer """no""" +605 85 optimizer """adam""" +605 85 training_loop """lcwa""" +605 85 evaluator """rankbased""" +605 86 dataset """kinships""" +605 86 model """rotate""" +605 86 loss """crossentropy""" +605 86 regularizer """no""" +605 86 optimizer """adam""" +605 86 training_loop """lcwa""" +605 86 evaluator """rankbased""" +605 87 dataset """kinships""" +605 87 model """rotate""" +605 87 loss """crossentropy""" +605 87 regularizer """no""" +605 87 optimizer """adam""" +605 87 training_loop """lcwa""" +605 87 evaluator """rankbased""" +605 88 dataset """kinships""" +605 88 model """rotate""" +605 88 loss """crossentropy""" +605 88 regularizer """no""" +605 88 optimizer """adam""" +605 88 training_loop """lcwa""" +605 88 evaluator """rankbased""" +605 89 dataset """kinships""" +605 89 model """rotate""" +605 89 loss """crossentropy""" +605 89 regularizer """no""" +605 89 optimizer """adam""" +605 89 training_loop """lcwa""" +605 89 evaluator """rankbased""" +605 90 dataset """kinships""" +605 90 model """rotate""" +605 90 loss """crossentropy""" +605 90 regularizer """no""" +605 90 optimizer """adam""" +605 90 training_loop """lcwa""" +605 90 evaluator """rankbased""" +605 91 dataset """kinships""" +605 91 model """rotate""" +605 91 loss """crossentropy""" +605 91 regularizer """no""" +605 91 optimizer """adam""" +605 91 training_loop """lcwa""" +605 91 evaluator """rankbased""" +605 92 dataset """kinships""" +605 92 model """rotate""" +605 92 loss """crossentropy""" +605 92 regularizer """no""" +605 92 optimizer """adam""" +605 92 training_loop """lcwa""" +605 92 evaluator """rankbased""" +605 93 dataset """kinships""" +605 93 model """rotate""" +605 93 loss """crossentropy""" +605 93 regularizer """no""" +605 93 optimizer """adam""" +605 93 training_loop """lcwa""" +605 93 evaluator """rankbased""" +605 94 dataset """kinships""" +605 94 model """rotate""" +605 94 loss """crossentropy""" +605 94 regularizer """no""" +605 94 optimizer """adam""" +605 94 training_loop """lcwa""" +605 94 evaluator """rankbased""" +605 95 dataset """kinships""" +605 95 model """rotate""" +605 95 loss """crossentropy""" +605 95 regularizer """no""" +605 95 optimizer """adam""" +605 95 training_loop """lcwa""" +605 95 evaluator """rankbased""" +605 96 dataset """kinships""" +605 96 model """rotate""" +605 96 loss """crossentropy""" +605 96 regularizer """no""" +605 96 optimizer """adam""" +605 96 training_loop """lcwa""" +605 96 evaluator """rankbased""" +605 97 dataset """kinships""" +605 97 model """rotate""" +605 97 loss """crossentropy""" +605 97 regularizer """no""" +605 97 optimizer """adam""" +605 97 training_loop """lcwa""" +605 97 evaluator """rankbased""" +605 98 dataset """kinships""" +605 98 model """rotate""" +605 98 loss """crossentropy""" +605 98 regularizer """no""" +605 98 optimizer """adam""" +605 98 training_loop """lcwa""" +605 98 evaluator """rankbased""" +605 99 dataset """kinships""" +605 99 model """rotate""" +605 99 loss """crossentropy""" +605 99 regularizer """no""" +605 99 optimizer """adam""" +605 99 training_loop """lcwa""" +605 99 evaluator """rankbased""" +605 100 dataset """kinships""" +605 100 model """rotate""" +605 100 loss """crossentropy""" +605 100 regularizer """no""" +605 100 optimizer """adam""" +605 100 training_loop """lcwa""" +605 100 evaluator """rankbased""" +606 1 model.embedding_dim 1.0 +606 1 optimizer.lr 0.002041303735336082 +606 1 training.batch_size 0.0 +606 1 training.label_smoothing 0.001616264291822995 +606 2 model.embedding_dim 1.0 +606 2 optimizer.lr 0.0015349738278487455 +606 2 training.batch_size 0.0 +606 2 training.label_smoothing 0.0022817356567534685 +606 3 model.embedding_dim 1.0 +606 3 optimizer.lr 0.001203573833986178 +606 3 training.batch_size 1.0 +606 3 training.label_smoothing 0.08591235041647446 +606 4 model.embedding_dim 0.0 +606 4 optimizer.lr 0.02627029078124805 +606 4 training.batch_size 1.0 +606 4 training.label_smoothing 0.7553620255618417 +606 5 model.embedding_dim 1.0 +606 5 optimizer.lr 0.023502640774609337 +606 5 training.batch_size 2.0 +606 5 training.label_smoothing 0.26297895294346646 +606 6 model.embedding_dim 1.0 +606 6 optimizer.lr 0.006055545232911354 +606 6 training.batch_size 0.0 +606 6 training.label_smoothing 0.004115901984814172 +606 7 model.embedding_dim 0.0 +606 7 optimizer.lr 0.035874693921285644 +606 7 training.batch_size 1.0 +606 7 training.label_smoothing 0.323090250765744 +606 8 model.embedding_dim 2.0 +606 8 optimizer.lr 0.005337784296155599 +606 8 training.batch_size 2.0 +606 8 training.label_smoothing 0.00119030367530552 +606 9 model.embedding_dim 1.0 +606 9 optimizer.lr 0.0018555723962151085 +606 9 training.batch_size 2.0 +606 9 training.label_smoothing 0.015350601449941083 +606 10 model.embedding_dim 1.0 +606 10 optimizer.lr 0.013614912079914075 +606 10 training.batch_size 0.0 +606 10 training.label_smoothing 0.021868752699010324 +606 11 model.embedding_dim 0.0 +606 11 optimizer.lr 0.041075813821039045 +606 11 training.batch_size 2.0 +606 11 training.label_smoothing 0.001624222453878991 +606 12 model.embedding_dim 2.0 +606 12 optimizer.lr 0.0075448276795051445 +606 12 training.batch_size 2.0 +606 12 training.label_smoothing 0.015768553181036864 +606 13 model.embedding_dim 1.0 +606 13 optimizer.lr 0.0017474994494165548 +606 13 training.batch_size 0.0 +606 13 training.label_smoothing 0.3732108042204362 +606 14 model.embedding_dim 0.0 +606 14 optimizer.lr 0.002150320670386811 +606 14 training.batch_size 0.0 +606 14 training.label_smoothing 0.05284272916870286 +606 15 model.embedding_dim 0.0 +606 15 optimizer.lr 0.04901589645992439 +606 15 training.batch_size 1.0 +606 15 training.label_smoothing 0.0032615935383934246 +606 16 model.embedding_dim 1.0 +606 16 optimizer.lr 0.001361353903086101 +606 16 training.batch_size 2.0 +606 16 training.label_smoothing 0.5132317502540822 +606 17 model.embedding_dim 2.0 +606 17 optimizer.lr 0.03324117504604773 +606 17 training.batch_size 1.0 +606 17 training.label_smoothing 0.5042066451990541 +606 18 model.embedding_dim 1.0 +606 18 optimizer.lr 0.016386991761049352 +606 18 training.batch_size 0.0 +606 18 training.label_smoothing 0.13991149132796885 +606 19 model.embedding_dim 2.0 +606 19 optimizer.lr 0.004916283089554737 +606 19 training.batch_size 2.0 +606 19 training.label_smoothing 0.013520907281733403 +606 20 model.embedding_dim 2.0 +606 20 optimizer.lr 0.0028355561326089355 +606 20 training.batch_size 0.0 +606 20 training.label_smoothing 0.008001111976363464 +606 21 model.embedding_dim 0.0 +606 21 optimizer.lr 0.0015872980854054583 +606 21 training.batch_size 2.0 +606 21 training.label_smoothing 0.0024834666967520507 +606 22 model.embedding_dim 0.0 +606 22 optimizer.lr 0.002122650114658847 +606 22 training.batch_size 1.0 +606 22 training.label_smoothing 0.04993460287026378 +606 23 model.embedding_dim 1.0 +606 23 optimizer.lr 0.04531425484607871 +606 23 training.batch_size 0.0 +606 23 training.label_smoothing 0.1674512642622134 +606 24 model.embedding_dim 0.0 +606 24 optimizer.lr 0.0013645943122553292 +606 24 training.batch_size 1.0 +606 24 training.label_smoothing 0.038490756175009404 +606 25 model.embedding_dim 1.0 +606 25 optimizer.lr 0.0028323690563851625 +606 25 training.batch_size 2.0 +606 25 training.label_smoothing 0.0023857787458159835 +606 26 model.embedding_dim 0.0 +606 26 optimizer.lr 0.011682322189892765 +606 26 training.batch_size 1.0 +606 26 training.label_smoothing 0.009360102629005891 +606 27 model.embedding_dim 1.0 +606 27 optimizer.lr 0.00951194202952971 +606 27 training.batch_size 1.0 +606 27 training.label_smoothing 0.0834989362876819 +606 28 model.embedding_dim 2.0 +606 28 optimizer.lr 0.01509356191940869 +606 28 training.batch_size 1.0 +606 28 training.label_smoothing 0.05318941382994955 +606 29 model.embedding_dim 0.0 +606 29 optimizer.lr 0.007700080118684931 +606 29 training.batch_size 1.0 +606 29 training.label_smoothing 0.021643206545873848 +606 30 model.embedding_dim 1.0 +606 30 optimizer.lr 0.00233762974674996 +606 30 training.batch_size 1.0 +606 30 training.label_smoothing 0.0034584749003455833 +606 31 model.embedding_dim 2.0 +606 31 optimizer.lr 0.001024748441716152 +606 31 training.batch_size 0.0 +606 31 training.label_smoothing 0.7616118749004379 +606 32 model.embedding_dim 1.0 +606 32 optimizer.lr 0.09073114969753744 +606 32 training.batch_size 2.0 +606 32 training.label_smoothing 0.003185696097466437 +606 33 model.embedding_dim 0.0 +606 33 optimizer.lr 0.001279172758691793 +606 33 training.batch_size 2.0 +606 33 training.label_smoothing 0.005765875936258412 +606 34 model.embedding_dim 0.0 +606 34 optimizer.lr 0.07663962610084836 +606 34 training.batch_size 1.0 +606 34 training.label_smoothing 0.7619657315370602 +606 35 model.embedding_dim 1.0 +606 35 optimizer.lr 0.04845463049905645 +606 35 training.batch_size 0.0 +606 35 training.label_smoothing 0.1545413047461258 +606 36 model.embedding_dim 0.0 +606 36 optimizer.lr 0.02295537974253148 +606 36 training.batch_size 0.0 +606 36 training.label_smoothing 0.001066804312436935 +606 37 model.embedding_dim 1.0 +606 37 optimizer.lr 0.0036659058322423797 +606 37 training.batch_size 1.0 +606 37 training.label_smoothing 0.004515854549147276 +606 38 model.embedding_dim 1.0 +606 38 optimizer.lr 0.0036413727911278264 +606 38 training.batch_size 1.0 +606 38 training.label_smoothing 0.09191633624597081 +606 39 model.embedding_dim 2.0 +606 39 optimizer.lr 0.09897577035287475 +606 39 training.batch_size 0.0 +606 39 training.label_smoothing 0.009727397383009454 +606 40 model.embedding_dim 2.0 +606 40 optimizer.lr 0.03405717022103271 +606 40 training.batch_size 0.0 +606 40 training.label_smoothing 0.001307552475462662 +606 41 model.embedding_dim 0.0 +606 41 optimizer.lr 0.03285810392596622 +606 41 training.batch_size 2.0 +606 41 training.label_smoothing 0.3870433201245144 +606 42 model.embedding_dim 0.0 +606 42 optimizer.lr 0.02341232121823405 +606 42 training.batch_size 2.0 +606 42 training.label_smoothing 0.009380222875329603 +606 43 model.embedding_dim 1.0 +606 43 optimizer.lr 0.001071692010557232 +606 43 training.batch_size 2.0 +606 43 training.label_smoothing 0.09891173982266492 +606 44 model.embedding_dim 1.0 +606 44 optimizer.lr 0.027960254218945668 +606 44 training.batch_size 2.0 +606 44 training.label_smoothing 0.013027516794508982 +606 45 model.embedding_dim 0.0 +606 45 optimizer.lr 0.0029203170883036395 +606 45 training.batch_size 0.0 +606 45 training.label_smoothing 0.8042503530892964 +606 46 model.embedding_dim 1.0 +606 46 optimizer.lr 0.08115520971055452 +606 46 training.batch_size 2.0 +606 46 training.label_smoothing 0.011621069428131525 +606 47 model.embedding_dim 0.0 +606 47 optimizer.lr 0.0028367240001907694 +606 47 training.batch_size 2.0 +606 47 training.label_smoothing 0.057294073551509714 +606 48 model.embedding_dim 2.0 +606 48 optimizer.lr 0.001971595507198851 +606 48 training.batch_size 1.0 +606 48 training.label_smoothing 0.0410254725168658 +606 49 model.embedding_dim 2.0 +606 49 optimizer.lr 0.009879878573318039 +606 49 training.batch_size 1.0 +606 49 training.label_smoothing 0.22761802641086276 +606 50 model.embedding_dim 1.0 +606 50 optimizer.lr 0.018049006370229055 +606 50 training.batch_size 0.0 +606 50 training.label_smoothing 0.01034948187757283 +606 51 model.embedding_dim 0.0 +606 51 optimizer.lr 0.045412784950916114 +606 51 training.batch_size 2.0 +606 51 training.label_smoothing 0.007722024318494776 +606 52 model.embedding_dim 0.0 +606 52 optimizer.lr 0.038428740831079035 +606 52 training.batch_size 1.0 +606 52 training.label_smoothing 0.26488945346354015 +606 53 model.embedding_dim 0.0 +606 53 optimizer.lr 0.0047357540750256975 +606 53 training.batch_size 1.0 +606 53 training.label_smoothing 0.013554426931260785 +606 54 model.embedding_dim 0.0 +606 54 optimizer.lr 0.0604758934574973 +606 54 training.batch_size 0.0 +606 54 training.label_smoothing 0.12292646592111052 +606 55 model.embedding_dim 2.0 +606 55 optimizer.lr 0.017067980147640487 +606 55 training.batch_size 1.0 +606 55 training.label_smoothing 0.01833248027551751 +606 56 model.embedding_dim 1.0 +606 56 optimizer.lr 0.0071262328945158005 +606 56 training.batch_size 0.0 +606 56 training.label_smoothing 0.6020498359686206 +606 57 model.embedding_dim 1.0 +606 57 optimizer.lr 0.006225668424502251 +606 57 training.batch_size 0.0 +606 57 training.label_smoothing 0.46475046281314586 +606 58 model.embedding_dim 0.0 +606 58 optimizer.lr 0.0019029082830535247 +606 58 training.batch_size 2.0 +606 58 training.label_smoothing 0.0636339750703952 +606 59 model.embedding_dim 1.0 +606 59 optimizer.lr 0.012172292726809673 +606 59 training.batch_size 0.0 +606 59 training.label_smoothing 0.13794612716962948 +606 60 model.embedding_dim 1.0 +606 60 optimizer.lr 0.029730274056035556 +606 60 training.batch_size 0.0 +606 60 training.label_smoothing 0.5255378028096492 +606 61 model.embedding_dim 0.0 +606 61 optimizer.lr 0.0029823817449247574 +606 61 training.batch_size 2.0 +606 61 training.label_smoothing 0.3708329747029295 +606 62 model.embedding_dim 0.0 +606 62 optimizer.lr 0.018589404612142715 +606 62 training.batch_size 1.0 +606 62 training.label_smoothing 0.047519356807656 +606 63 model.embedding_dim 2.0 +606 63 optimizer.lr 0.03859827728444865 +606 63 training.batch_size 1.0 +606 63 training.label_smoothing 0.018587560033094544 +606 64 model.embedding_dim 0.0 +606 64 optimizer.lr 0.04541029166796939 +606 64 training.batch_size 2.0 +606 64 training.label_smoothing 0.12632434291191183 +606 65 model.embedding_dim 0.0 +606 65 optimizer.lr 0.07789813393389493 +606 65 training.batch_size 2.0 +606 65 training.label_smoothing 0.07964891445850103 +606 66 model.embedding_dim 0.0 +606 66 optimizer.lr 0.058766030348039906 +606 66 training.batch_size 0.0 +606 66 training.label_smoothing 0.1600950148143846 +606 67 model.embedding_dim 0.0 +606 67 optimizer.lr 0.07820758948734118 +606 67 training.batch_size 0.0 +606 67 training.label_smoothing 0.343128469265123 +606 68 model.embedding_dim 1.0 +606 68 optimizer.lr 0.0028857361773994617 +606 68 training.batch_size 1.0 +606 68 training.label_smoothing 0.002477468083312248 +606 69 model.embedding_dim 0.0 +606 69 optimizer.lr 0.008069079195151508 +606 69 training.batch_size 1.0 +606 69 training.label_smoothing 0.09156600659051507 +606 70 model.embedding_dim 1.0 +606 70 optimizer.lr 0.04549120578131815 +606 70 training.batch_size 0.0 +606 70 training.label_smoothing 0.0831296725248704 +606 71 model.embedding_dim 2.0 +606 71 optimizer.lr 0.004550695275860616 +606 71 training.batch_size 0.0 +606 71 training.label_smoothing 0.0033591187488652757 +606 72 model.embedding_dim 1.0 +606 72 optimizer.lr 0.040210804487884305 +606 72 training.batch_size 0.0 +606 72 training.label_smoothing 0.21105907461157003 +606 73 model.embedding_dim 2.0 +606 73 optimizer.lr 0.012267778406912623 +606 73 training.batch_size 0.0 +606 73 training.label_smoothing 0.00223151445263713 +606 74 model.embedding_dim 1.0 +606 74 optimizer.lr 0.005140390627625735 +606 74 training.batch_size 0.0 +606 74 training.label_smoothing 0.24501634580014842 +606 75 model.embedding_dim 2.0 +606 75 optimizer.lr 0.0029118147934957287 +606 75 training.batch_size 0.0 +606 75 training.label_smoothing 0.007281193509323504 +606 76 model.embedding_dim 2.0 +606 76 optimizer.lr 0.08703543169287808 +606 76 training.batch_size 1.0 +606 76 training.label_smoothing 0.04376304299939075 +606 77 model.embedding_dim 0.0 +606 77 optimizer.lr 0.08217949893079961 +606 77 training.batch_size 2.0 +606 77 training.label_smoothing 0.004099434400434954 +606 78 model.embedding_dim 0.0 +606 78 optimizer.lr 0.019350722150661376 +606 78 training.batch_size 2.0 +606 78 training.label_smoothing 0.12352827893187594 +606 79 model.embedding_dim 0.0 +606 79 optimizer.lr 0.01457886947011274 +606 79 training.batch_size 0.0 +606 79 training.label_smoothing 0.005226835539623886 +606 80 model.embedding_dim 2.0 +606 80 optimizer.lr 0.0014174374764988479 +606 80 training.batch_size 2.0 +606 80 training.label_smoothing 0.015559205693855221 +606 81 model.embedding_dim 1.0 +606 81 optimizer.lr 0.04035869467379936 +606 81 training.batch_size 1.0 +606 81 training.label_smoothing 0.0023878982319118945 +606 82 model.embedding_dim 0.0 +606 82 optimizer.lr 0.02730343563171189 +606 82 training.batch_size 0.0 +606 82 training.label_smoothing 0.5524365855008394 +606 83 model.embedding_dim 1.0 +606 83 optimizer.lr 0.01470438803313366 +606 83 training.batch_size 1.0 +606 83 training.label_smoothing 0.029017953556121467 +606 84 model.embedding_dim 1.0 +606 84 optimizer.lr 0.0016375765314215055 +606 84 training.batch_size 0.0 +606 84 training.label_smoothing 0.021307906394461815 +606 85 model.embedding_dim 1.0 +606 85 optimizer.lr 0.005052393914135193 +606 85 training.batch_size 0.0 +606 85 training.label_smoothing 0.005358914369179614 +606 86 model.embedding_dim 1.0 +606 86 optimizer.lr 0.013096967707887461 +606 86 training.batch_size 2.0 +606 86 training.label_smoothing 0.025148498124062765 +606 87 model.embedding_dim 1.0 +606 87 optimizer.lr 0.020807359775594923 +606 87 training.batch_size 2.0 +606 87 training.label_smoothing 0.0021851779859483962 +606 88 model.embedding_dim 1.0 +606 88 optimizer.lr 0.007043950971045132 +606 88 training.batch_size 0.0 +606 88 training.label_smoothing 0.5690078239142699 +606 89 model.embedding_dim 2.0 +606 89 optimizer.lr 0.019852441546563005 +606 89 training.batch_size 0.0 +606 89 training.label_smoothing 0.3242647524696591 +606 90 model.embedding_dim 1.0 +606 90 optimizer.lr 0.007402248190957683 +606 90 training.batch_size 2.0 +606 90 training.label_smoothing 0.001445975599012607 +606 91 model.embedding_dim 1.0 +606 91 optimizer.lr 0.0884585172141508 +606 91 training.batch_size 2.0 +606 91 training.label_smoothing 0.0050231600574296275 +606 92 model.embedding_dim 0.0 +606 92 optimizer.lr 0.014063947192912454 +606 92 training.batch_size 1.0 +606 92 training.label_smoothing 0.08739339049643434 +606 93 model.embedding_dim 2.0 +606 93 optimizer.lr 0.007982445345143427 +606 93 training.batch_size 2.0 +606 93 training.label_smoothing 0.021151764462286574 +606 94 model.embedding_dim 1.0 +606 94 optimizer.lr 0.05018733875844875 +606 94 training.batch_size 0.0 +606 94 training.label_smoothing 0.05282326181389359 +606 95 model.embedding_dim 2.0 +606 95 optimizer.lr 0.016353640983081435 +606 95 training.batch_size 1.0 +606 95 training.label_smoothing 0.023134781095656996 +606 96 model.embedding_dim 2.0 +606 96 optimizer.lr 0.008804678586032436 +606 96 training.batch_size 0.0 +606 96 training.label_smoothing 0.0018655921607700575 +606 97 model.embedding_dim 1.0 +606 97 optimizer.lr 0.08107286386088795 +606 97 training.batch_size 2.0 +606 97 training.label_smoothing 0.003794998608891552 +606 98 model.embedding_dim 2.0 +606 98 optimizer.lr 0.0021747765074562825 +606 98 training.batch_size 2.0 +606 98 training.label_smoothing 0.1747194053822496 +606 99 model.embedding_dim 1.0 +606 99 optimizer.lr 0.0367025629363406 +606 99 training.batch_size 0.0 +606 99 training.label_smoothing 0.10648637954547426 +606 100 model.embedding_dim 2.0 +606 100 optimizer.lr 0.0021024450485727488 +606 100 training.batch_size 2.0 +606 100 training.label_smoothing 0.0032769796568403293 +606 1 dataset """kinships""" +606 1 model """rotate""" +606 1 loss """crossentropy""" +606 1 regularizer """no""" +606 1 optimizer """adam""" +606 1 training_loop """lcwa""" +606 1 evaluator """rankbased""" +606 2 dataset """kinships""" +606 2 model """rotate""" +606 2 loss """crossentropy""" +606 2 regularizer """no""" +606 2 optimizer """adam""" +606 2 training_loop """lcwa""" +606 2 evaluator """rankbased""" +606 3 dataset """kinships""" +606 3 model """rotate""" +606 3 loss """crossentropy""" +606 3 regularizer """no""" +606 3 optimizer """adam""" +606 3 training_loop """lcwa""" +606 3 evaluator """rankbased""" +606 4 dataset """kinships""" +606 4 model """rotate""" +606 4 loss """crossentropy""" +606 4 regularizer """no""" +606 4 optimizer """adam""" +606 4 training_loop """lcwa""" +606 4 evaluator """rankbased""" +606 5 dataset """kinships""" +606 5 model """rotate""" +606 5 loss """crossentropy""" +606 5 regularizer """no""" +606 5 optimizer """adam""" +606 5 training_loop """lcwa""" +606 5 evaluator """rankbased""" +606 6 dataset """kinships""" +606 6 model """rotate""" +606 6 loss """crossentropy""" +606 6 regularizer """no""" +606 6 optimizer """adam""" +606 6 training_loop """lcwa""" +606 6 evaluator """rankbased""" +606 7 dataset """kinships""" +606 7 model """rotate""" +606 7 loss """crossentropy""" +606 7 regularizer """no""" +606 7 optimizer """adam""" +606 7 training_loop """lcwa""" +606 7 evaluator """rankbased""" +606 8 dataset """kinships""" +606 8 model """rotate""" +606 8 loss """crossentropy""" +606 8 regularizer """no""" +606 8 optimizer """adam""" +606 8 training_loop """lcwa""" +606 8 evaluator """rankbased""" +606 9 dataset """kinships""" +606 9 model """rotate""" +606 9 loss """crossentropy""" +606 9 regularizer """no""" +606 9 optimizer """adam""" +606 9 training_loop """lcwa""" +606 9 evaluator """rankbased""" +606 10 dataset """kinships""" +606 10 model """rotate""" +606 10 loss """crossentropy""" +606 10 regularizer """no""" +606 10 optimizer """adam""" +606 10 training_loop """lcwa""" +606 10 evaluator """rankbased""" +606 11 dataset """kinships""" +606 11 model """rotate""" +606 11 loss """crossentropy""" +606 11 regularizer """no""" +606 11 optimizer """adam""" +606 11 training_loop """lcwa""" +606 11 evaluator """rankbased""" +606 12 dataset """kinships""" +606 12 model """rotate""" +606 12 loss """crossentropy""" +606 12 regularizer """no""" +606 12 optimizer """adam""" +606 12 training_loop """lcwa""" +606 12 evaluator """rankbased""" +606 13 dataset """kinships""" +606 13 model """rotate""" +606 13 loss """crossentropy""" +606 13 regularizer """no""" +606 13 optimizer """adam""" +606 13 training_loop """lcwa""" +606 13 evaluator """rankbased""" +606 14 dataset """kinships""" +606 14 model """rotate""" +606 14 loss """crossentropy""" +606 14 regularizer """no""" +606 14 optimizer """adam""" +606 14 training_loop """lcwa""" +606 14 evaluator """rankbased""" +606 15 dataset """kinships""" +606 15 model """rotate""" +606 15 loss """crossentropy""" +606 15 regularizer """no""" +606 15 optimizer """adam""" +606 15 training_loop """lcwa""" +606 15 evaluator """rankbased""" +606 16 dataset """kinships""" +606 16 model """rotate""" +606 16 loss """crossentropy""" +606 16 regularizer """no""" +606 16 optimizer """adam""" +606 16 training_loop """lcwa""" +606 16 evaluator """rankbased""" +606 17 dataset """kinships""" +606 17 model """rotate""" +606 17 loss """crossentropy""" +606 17 regularizer """no""" +606 17 optimizer """adam""" +606 17 training_loop """lcwa""" +606 17 evaluator """rankbased""" +606 18 dataset """kinships""" +606 18 model """rotate""" +606 18 loss """crossentropy""" +606 18 regularizer """no""" +606 18 optimizer """adam""" +606 18 training_loop """lcwa""" +606 18 evaluator """rankbased""" +606 19 dataset """kinships""" +606 19 model """rotate""" +606 19 loss """crossentropy""" +606 19 regularizer """no""" +606 19 optimizer """adam""" +606 19 training_loop """lcwa""" +606 19 evaluator """rankbased""" +606 20 dataset """kinships""" +606 20 model """rotate""" +606 20 loss """crossentropy""" +606 20 regularizer """no""" +606 20 optimizer """adam""" +606 20 training_loop """lcwa""" +606 20 evaluator """rankbased""" +606 21 dataset """kinships""" +606 21 model """rotate""" +606 21 loss """crossentropy""" +606 21 regularizer """no""" +606 21 optimizer """adam""" +606 21 training_loop """lcwa""" +606 21 evaluator """rankbased""" +606 22 dataset """kinships""" +606 22 model """rotate""" +606 22 loss """crossentropy""" +606 22 regularizer """no""" +606 22 optimizer """adam""" +606 22 training_loop """lcwa""" +606 22 evaluator """rankbased""" +606 23 dataset """kinships""" +606 23 model """rotate""" +606 23 loss """crossentropy""" +606 23 regularizer """no""" +606 23 optimizer """adam""" +606 23 training_loop """lcwa""" +606 23 evaluator """rankbased""" +606 24 dataset """kinships""" +606 24 model """rotate""" +606 24 loss """crossentropy""" +606 24 regularizer """no""" +606 24 optimizer """adam""" +606 24 training_loop """lcwa""" +606 24 evaluator """rankbased""" +606 25 dataset """kinships""" +606 25 model """rotate""" +606 25 loss """crossentropy""" +606 25 regularizer """no""" +606 25 optimizer """adam""" +606 25 training_loop """lcwa""" +606 25 evaluator """rankbased""" +606 26 dataset """kinships""" +606 26 model """rotate""" +606 26 loss """crossentropy""" +606 26 regularizer """no""" +606 26 optimizer """adam""" +606 26 training_loop """lcwa""" +606 26 evaluator """rankbased""" +606 27 dataset """kinships""" +606 27 model """rotate""" +606 27 loss """crossentropy""" +606 27 regularizer """no""" +606 27 optimizer """adam""" +606 27 training_loop """lcwa""" +606 27 evaluator """rankbased""" +606 28 dataset """kinships""" +606 28 model """rotate""" +606 28 loss """crossentropy""" +606 28 regularizer """no""" +606 28 optimizer """adam""" +606 28 training_loop """lcwa""" +606 28 evaluator """rankbased""" +606 29 dataset """kinships""" +606 29 model """rotate""" +606 29 loss """crossentropy""" +606 29 regularizer """no""" +606 29 optimizer """adam""" +606 29 training_loop """lcwa""" +606 29 evaluator """rankbased""" +606 30 dataset """kinships""" +606 30 model """rotate""" +606 30 loss """crossentropy""" +606 30 regularizer """no""" +606 30 optimizer """adam""" +606 30 training_loop """lcwa""" +606 30 evaluator """rankbased""" +606 31 dataset """kinships""" +606 31 model """rotate""" +606 31 loss """crossentropy""" +606 31 regularizer """no""" +606 31 optimizer """adam""" +606 31 training_loop """lcwa""" +606 31 evaluator """rankbased""" +606 32 dataset """kinships""" +606 32 model """rotate""" +606 32 loss """crossentropy""" +606 32 regularizer """no""" +606 32 optimizer """adam""" +606 32 training_loop """lcwa""" +606 32 evaluator """rankbased""" +606 33 dataset """kinships""" +606 33 model """rotate""" +606 33 loss """crossentropy""" +606 33 regularizer """no""" +606 33 optimizer """adam""" +606 33 training_loop """lcwa""" +606 33 evaluator """rankbased""" +606 34 dataset """kinships""" +606 34 model """rotate""" +606 34 loss """crossentropy""" +606 34 regularizer """no""" +606 34 optimizer """adam""" +606 34 training_loop """lcwa""" +606 34 evaluator """rankbased""" +606 35 dataset """kinships""" +606 35 model """rotate""" +606 35 loss """crossentropy""" +606 35 regularizer """no""" +606 35 optimizer """adam""" +606 35 training_loop """lcwa""" +606 35 evaluator """rankbased""" +606 36 dataset """kinships""" +606 36 model """rotate""" +606 36 loss """crossentropy""" +606 36 regularizer """no""" +606 36 optimizer """adam""" +606 36 training_loop """lcwa""" +606 36 evaluator """rankbased""" +606 37 dataset """kinships""" +606 37 model """rotate""" +606 37 loss """crossentropy""" +606 37 regularizer """no""" +606 37 optimizer """adam""" +606 37 training_loop """lcwa""" +606 37 evaluator """rankbased""" +606 38 dataset """kinships""" +606 38 model """rotate""" +606 38 loss """crossentropy""" +606 38 regularizer """no""" +606 38 optimizer """adam""" +606 38 training_loop """lcwa""" +606 38 evaluator """rankbased""" +606 39 dataset """kinships""" +606 39 model """rotate""" +606 39 loss """crossentropy""" +606 39 regularizer """no""" +606 39 optimizer """adam""" +606 39 training_loop """lcwa""" +606 39 evaluator """rankbased""" +606 40 dataset """kinships""" +606 40 model """rotate""" +606 40 loss """crossentropy""" +606 40 regularizer """no""" +606 40 optimizer """adam""" +606 40 training_loop """lcwa""" +606 40 evaluator """rankbased""" +606 41 dataset """kinships""" +606 41 model """rotate""" +606 41 loss """crossentropy""" +606 41 regularizer """no""" +606 41 optimizer """adam""" +606 41 training_loop """lcwa""" +606 41 evaluator """rankbased""" +606 42 dataset """kinships""" +606 42 model """rotate""" +606 42 loss """crossentropy""" +606 42 regularizer """no""" +606 42 optimizer """adam""" +606 42 training_loop """lcwa""" +606 42 evaluator """rankbased""" +606 43 dataset """kinships""" +606 43 model """rotate""" +606 43 loss """crossentropy""" +606 43 regularizer """no""" +606 43 optimizer """adam""" +606 43 training_loop """lcwa""" +606 43 evaluator """rankbased""" +606 44 dataset """kinships""" +606 44 model """rotate""" +606 44 loss """crossentropy""" +606 44 regularizer """no""" +606 44 optimizer """adam""" +606 44 training_loop """lcwa""" +606 44 evaluator """rankbased""" +606 45 dataset """kinships""" +606 45 model """rotate""" +606 45 loss """crossentropy""" +606 45 regularizer """no""" +606 45 optimizer """adam""" +606 45 training_loop """lcwa""" +606 45 evaluator """rankbased""" +606 46 dataset """kinships""" +606 46 model """rotate""" +606 46 loss """crossentropy""" +606 46 regularizer """no""" +606 46 optimizer """adam""" +606 46 training_loop """lcwa""" +606 46 evaluator """rankbased""" +606 47 dataset """kinships""" +606 47 model """rotate""" +606 47 loss """crossentropy""" +606 47 regularizer """no""" +606 47 optimizer """adam""" +606 47 training_loop """lcwa""" +606 47 evaluator """rankbased""" +606 48 dataset """kinships""" +606 48 model """rotate""" +606 48 loss """crossentropy""" +606 48 regularizer """no""" +606 48 optimizer """adam""" +606 48 training_loop """lcwa""" +606 48 evaluator """rankbased""" +606 49 dataset """kinships""" +606 49 model """rotate""" +606 49 loss """crossentropy""" +606 49 regularizer """no""" +606 49 optimizer """adam""" +606 49 training_loop """lcwa""" +606 49 evaluator """rankbased""" +606 50 dataset """kinships""" +606 50 model """rotate""" +606 50 loss """crossentropy""" +606 50 regularizer """no""" +606 50 optimizer """adam""" +606 50 training_loop """lcwa""" +606 50 evaluator """rankbased""" +606 51 dataset """kinships""" +606 51 model """rotate""" +606 51 loss """crossentropy""" +606 51 regularizer """no""" +606 51 optimizer """adam""" +606 51 training_loop """lcwa""" +606 51 evaluator """rankbased""" +606 52 dataset """kinships""" +606 52 model """rotate""" +606 52 loss """crossentropy""" +606 52 regularizer """no""" +606 52 optimizer """adam""" +606 52 training_loop """lcwa""" +606 52 evaluator """rankbased""" +606 53 dataset """kinships""" +606 53 model """rotate""" +606 53 loss """crossentropy""" +606 53 regularizer """no""" +606 53 optimizer """adam""" +606 53 training_loop """lcwa""" +606 53 evaluator """rankbased""" +606 54 dataset """kinships""" +606 54 model """rotate""" +606 54 loss """crossentropy""" +606 54 regularizer """no""" +606 54 optimizer """adam""" +606 54 training_loop """lcwa""" +606 54 evaluator """rankbased""" +606 55 dataset """kinships""" +606 55 model """rotate""" +606 55 loss """crossentropy""" +606 55 regularizer """no""" +606 55 optimizer """adam""" +606 55 training_loop """lcwa""" +606 55 evaluator """rankbased""" +606 56 dataset """kinships""" +606 56 model """rotate""" +606 56 loss """crossentropy""" +606 56 regularizer """no""" +606 56 optimizer """adam""" +606 56 training_loop """lcwa""" +606 56 evaluator """rankbased""" +606 57 dataset """kinships""" +606 57 model """rotate""" +606 57 loss """crossentropy""" +606 57 regularizer """no""" +606 57 optimizer """adam""" +606 57 training_loop """lcwa""" +606 57 evaluator """rankbased""" +606 58 dataset """kinships""" +606 58 model """rotate""" +606 58 loss """crossentropy""" +606 58 regularizer """no""" +606 58 optimizer """adam""" +606 58 training_loop """lcwa""" +606 58 evaluator """rankbased""" +606 59 dataset """kinships""" +606 59 model """rotate""" +606 59 loss """crossentropy""" +606 59 regularizer """no""" +606 59 optimizer """adam""" +606 59 training_loop """lcwa""" +606 59 evaluator """rankbased""" +606 60 dataset """kinships""" +606 60 model """rotate""" +606 60 loss """crossentropy""" +606 60 regularizer """no""" +606 60 optimizer """adam""" +606 60 training_loop """lcwa""" +606 60 evaluator """rankbased""" +606 61 dataset """kinships""" +606 61 model """rotate""" +606 61 loss """crossentropy""" +606 61 regularizer """no""" +606 61 optimizer """adam""" +606 61 training_loop """lcwa""" +606 61 evaluator """rankbased""" +606 62 dataset """kinships""" +606 62 model """rotate""" +606 62 loss """crossentropy""" +606 62 regularizer """no""" +606 62 optimizer """adam""" +606 62 training_loop """lcwa""" +606 62 evaluator """rankbased""" +606 63 dataset """kinships""" +606 63 model """rotate""" +606 63 loss """crossentropy""" +606 63 regularizer """no""" +606 63 optimizer """adam""" +606 63 training_loop """lcwa""" +606 63 evaluator """rankbased""" +606 64 dataset """kinships""" +606 64 model """rotate""" +606 64 loss """crossentropy""" +606 64 regularizer """no""" +606 64 optimizer """adam""" +606 64 training_loop """lcwa""" +606 64 evaluator """rankbased""" +606 65 dataset """kinships""" +606 65 model """rotate""" +606 65 loss """crossentropy""" +606 65 regularizer """no""" +606 65 optimizer """adam""" +606 65 training_loop """lcwa""" +606 65 evaluator """rankbased""" +606 66 dataset """kinships""" +606 66 model """rotate""" +606 66 loss """crossentropy""" +606 66 regularizer """no""" +606 66 optimizer """adam""" +606 66 training_loop """lcwa""" +606 66 evaluator """rankbased""" +606 67 dataset """kinships""" +606 67 model """rotate""" +606 67 loss """crossentropy""" +606 67 regularizer """no""" +606 67 optimizer """adam""" +606 67 training_loop """lcwa""" +606 67 evaluator """rankbased""" +606 68 dataset """kinships""" +606 68 model """rotate""" +606 68 loss """crossentropy""" +606 68 regularizer """no""" +606 68 optimizer """adam""" +606 68 training_loop """lcwa""" +606 68 evaluator """rankbased""" +606 69 dataset """kinships""" +606 69 model """rotate""" +606 69 loss """crossentropy""" +606 69 regularizer """no""" +606 69 optimizer """adam""" +606 69 training_loop """lcwa""" +606 69 evaluator """rankbased""" +606 70 dataset """kinships""" +606 70 model """rotate""" +606 70 loss """crossentropy""" +606 70 regularizer """no""" +606 70 optimizer """adam""" +606 70 training_loop """lcwa""" +606 70 evaluator """rankbased""" +606 71 dataset """kinships""" +606 71 model """rotate""" +606 71 loss """crossentropy""" +606 71 regularizer """no""" +606 71 optimizer """adam""" +606 71 training_loop """lcwa""" +606 71 evaluator """rankbased""" +606 72 dataset """kinships""" +606 72 model """rotate""" +606 72 loss """crossentropy""" +606 72 regularizer """no""" +606 72 optimizer """adam""" +606 72 training_loop """lcwa""" +606 72 evaluator """rankbased""" +606 73 dataset """kinships""" +606 73 model """rotate""" +606 73 loss """crossentropy""" +606 73 regularizer """no""" +606 73 optimizer """adam""" +606 73 training_loop """lcwa""" +606 73 evaluator """rankbased""" +606 74 dataset """kinships""" +606 74 model """rotate""" +606 74 loss """crossentropy""" +606 74 regularizer """no""" +606 74 optimizer """adam""" +606 74 training_loop """lcwa""" +606 74 evaluator """rankbased""" +606 75 dataset """kinships""" +606 75 model """rotate""" +606 75 loss """crossentropy""" +606 75 regularizer """no""" +606 75 optimizer """adam""" +606 75 training_loop """lcwa""" +606 75 evaluator """rankbased""" +606 76 dataset """kinships""" +606 76 model """rotate""" +606 76 loss """crossentropy""" +606 76 regularizer """no""" +606 76 optimizer """adam""" +606 76 training_loop """lcwa""" +606 76 evaluator """rankbased""" +606 77 dataset """kinships""" +606 77 model """rotate""" +606 77 loss """crossentropy""" +606 77 regularizer """no""" +606 77 optimizer """adam""" +606 77 training_loop """lcwa""" +606 77 evaluator """rankbased""" +606 78 dataset """kinships""" +606 78 model """rotate""" +606 78 loss """crossentropy""" +606 78 regularizer """no""" +606 78 optimizer """adam""" +606 78 training_loop """lcwa""" +606 78 evaluator """rankbased""" +606 79 dataset """kinships""" +606 79 model """rotate""" +606 79 loss """crossentropy""" +606 79 regularizer """no""" +606 79 optimizer """adam""" +606 79 training_loop """lcwa""" +606 79 evaluator """rankbased""" +606 80 dataset """kinships""" +606 80 model """rotate""" +606 80 loss """crossentropy""" +606 80 regularizer """no""" +606 80 optimizer """adam""" +606 80 training_loop """lcwa""" +606 80 evaluator """rankbased""" +606 81 dataset """kinships""" +606 81 model """rotate""" +606 81 loss """crossentropy""" +606 81 regularizer """no""" +606 81 optimizer """adam""" +606 81 training_loop """lcwa""" +606 81 evaluator """rankbased""" +606 82 dataset """kinships""" +606 82 model """rotate""" +606 82 loss """crossentropy""" +606 82 regularizer """no""" +606 82 optimizer """adam""" +606 82 training_loop """lcwa""" +606 82 evaluator """rankbased""" +606 83 dataset """kinships""" +606 83 model """rotate""" +606 83 loss """crossentropy""" +606 83 regularizer """no""" +606 83 optimizer """adam""" +606 83 training_loop """lcwa""" +606 83 evaluator """rankbased""" +606 84 dataset """kinships""" +606 84 model """rotate""" +606 84 loss """crossentropy""" +606 84 regularizer """no""" +606 84 optimizer """adam""" +606 84 training_loop """lcwa""" +606 84 evaluator """rankbased""" +606 85 dataset """kinships""" +606 85 model """rotate""" +606 85 loss """crossentropy""" +606 85 regularizer """no""" +606 85 optimizer """adam""" +606 85 training_loop """lcwa""" +606 85 evaluator """rankbased""" +606 86 dataset """kinships""" +606 86 model """rotate""" +606 86 loss """crossentropy""" +606 86 regularizer """no""" +606 86 optimizer """adam""" +606 86 training_loop """lcwa""" +606 86 evaluator """rankbased""" +606 87 dataset """kinships""" +606 87 model """rotate""" +606 87 loss """crossentropy""" +606 87 regularizer """no""" +606 87 optimizer """adam""" +606 87 training_loop """lcwa""" +606 87 evaluator """rankbased""" +606 88 dataset """kinships""" +606 88 model """rotate""" +606 88 loss """crossentropy""" +606 88 regularizer """no""" +606 88 optimizer """adam""" +606 88 training_loop """lcwa""" +606 88 evaluator """rankbased""" +606 89 dataset """kinships""" +606 89 model """rotate""" +606 89 loss """crossentropy""" +606 89 regularizer """no""" +606 89 optimizer """adam""" +606 89 training_loop """lcwa""" +606 89 evaluator """rankbased""" +606 90 dataset """kinships""" +606 90 model """rotate""" +606 90 loss """crossentropy""" +606 90 regularizer """no""" +606 90 optimizer """adam""" +606 90 training_loop """lcwa""" +606 90 evaluator """rankbased""" +606 91 dataset """kinships""" +606 91 model """rotate""" +606 91 loss """crossentropy""" +606 91 regularizer """no""" +606 91 optimizer """adam""" +606 91 training_loop """lcwa""" +606 91 evaluator """rankbased""" +606 92 dataset """kinships""" +606 92 model """rotate""" +606 92 loss """crossentropy""" +606 92 regularizer """no""" +606 92 optimizer """adam""" +606 92 training_loop """lcwa""" +606 92 evaluator """rankbased""" +606 93 dataset """kinships""" +606 93 model """rotate""" +606 93 loss """crossentropy""" +606 93 regularizer """no""" +606 93 optimizer """adam""" +606 93 training_loop """lcwa""" +606 93 evaluator """rankbased""" +606 94 dataset """kinships""" +606 94 model """rotate""" +606 94 loss """crossentropy""" +606 94 regularizer """no""" +606 94 optimizer """adam""" +606 94 training_loop """lcwa""" +606 94 evaluator """rankbased""" +606 95 dataset """kinships""" +606 95 model """rotate""" +606 95 loss """crossentropy""" +606 95 regularizer """no""" +606 95 optimizer """adam""" +606 95 training_loop """lcwa""" +606 95 evaluator """rankbased""" +606 96 dataset """kinships""" +606 96 model """rotate""" +606 96 loss """crossentropy""" +606 96 regularizer """no""" +606 96 optimizer """adam""" +606 96 training_loop """lcwa""" +606 96 evaluator """rankbased""" +606 97 dataset """kinships""" +606 97 model """rotate""" +606 97 loss """crossentropy""" +606 97 regularizer """no""" +606 97 optimizer """adam""" +606 97 training_loop """lcwa""" +606 97 evaluator """rankbased""" +606 98 dataset """kinships""" +606 98 model """rotate""" +606 98 loss """crossentropy""" +606 98 regularizer """no""" +606 98 optimizer """adam""" +606 98 training_loop """lcwa""" +606 98 evaluator """rankbased""" +606 99 dataset """kinships""" +606 99 model """rotate""" +606 99 loss """crossentropy""" +606 99 regularizer """no""" +606 99 optimizer """adam""" +606 99 training_loop """lcwa""" +606 99 evaluator """rankbased""" +606 100 dataset """kinships""" +606 100 model """rotate""" +606 100 loss """crossentropy""" +606 100 regularizer """no""" +606 100 optimizer """adam""" +606 100 training_loop """lcwa""" +606 100 evaluator """rankbased""" +607 1 model.embedding_dim 0.0 +607 1 optimizer.lr 0.002925110132620455 +607 1 training.batch_size 1.0 +607 1 training.label_smoothing 0.0029117616181764854 +607 2 model.embedding_dim 2.0 +607 2 optimizer.lr 0.007048520710390786 +607 2 training.batch_size 2.0 +607 2 training.label_smoothing 0.08457027067056602 +607 3 model.embedding_dim 2.0 +607 3 optimizer.lr 0.005143479561114939 +607 3 training.batch_size 0.0 +607 3 training.label_smoothing 0.08117891317576531 +607 4 model.embedding_dim 0.0 +607 4 optimizer.lr 0.007990716987366784 +607 4 training.batch_size 1.0 +607 4 training.label_smoothing 0.006450009782066109 +607 5 model.embedding_dim 0.0 +607 5 optimizer.lr 0.022781484405534395 +607 5 training.batch_size 0.0 +607 5 training.label_smoothing 0.22062353157889533 +607 6 model.embedding_dim 2.0 +607 6 optimizer.lr 0.09415974631616955 +607 6 training.batch_size 1.0 +607 6 training.label_smoothing 0.195923592987269 +607 7 model.embedding_dim 0.0 +607 7 optimizer.lr 0.027925494915751605 +607 7 training.batch_size 0.0 +607 7 training.label_smoothing 0.009245468778505402 +607 8 model.embedding_dim 0.0 +607 8 optimizer.lr 0.004189878497634737 +607 8 training.batch_size 0.0 +607 8 training.label_smoothing 0.0026641862564405006 +607 9 model.embedding_dim 2.0 +607 9 optimizer.lr 0.008349094210878582 +607 9 training.batch_size 2.0 +607 9 training.label_smoothing 0.001558639353800341 +607 10 model.embedding_dim 2.0 +607 10 optimizer.lr 0.004071358445478762 +607 10 training.batch_size 0.0 +607 10 training.label_smoothing 0.0174043419378365 +607 11 model.embedding_dim 1.0 +607 11 optimizer.lr 0.002231999695210528 +607 11 training.batch_size 0.0 +607 11 training.label_smoothing 0.009287851428950774 +607 12 model.embedding_dim 2.0 +607 12 optimizer.lr 0.08029113849199675 +607 12 training.batch_size 1.0 +607 12 training.label_smoothing 0.014803230190823703 +607 13 model.embedding_dim 1.0 +607 13 optimizer.lr 0.08222255944856294 +607 13 training.batch_size 0.0 +607 13 training.label_smoothing 0.37355284641371894 +607 14 model.embedding_dim 2.0 +607 14 optimizer.lr 0.013716374004027414 +607 14 training.batch_size 2.0 +607 14 training.label_smoothing 0.3408329701082627 +607 15 model.embedding_dim 1.0 +607 15 optimizer.lr 0.010050277245590235 +607 15 training.batch_size 1.0 +607 15 training.label_smoothing 0.006160605607813137 +607 16 model.embedding_dim 2.0 +607 16 optimizer.lr 0.04179761021879698 +607 16 training.batch_size 2.0 +607 16 training.label_smoothing 0.07008524056133027 +607 17 model.embedding_dim 2.0 +607 17 optimizer.lr 0.012513015793567048 +607 17 training.batch_size 2.0 +607 17 training.label_smoothing 0.007542011371533203 +607 18 model.embedding_dim 0.0 +607 18 optimizer.lr 0.021363997442370244 +607 18 training.batch_size 1.0 +607 18 training.label_smoothing 0.04005691076602891 +607 19 model.embedding_dim 2.0 +607 19 optimizer.lr 0.00445283152132687 +607 19 training.batch_size 0.0 +607 19 training.label_smoothing 0.0519542417452392 +607 20 model.embedding_dim 0.0 +607 20 optimizer.lr 0.0023015585351160344 +607 20 training.batch_size 2.0 +607 20 training.label_smoothing 0.5564196893022868 +607 21 model.embedding_dim 2.0 +607 21 optimizer.lr 0.025931932153256516 +607 21 training.batch_size 2.0 +607 21 training.label_smoothing 0.0017356981612969072 +607 22 model.embedding_dim 1.0 +607 22 optimizer.lr 0.0017756349406241184 +607 22 training.batch_size 0.0 +607 22 training.label_smoothing 0.0258723677453298 +607 23 model.embedding_dim 2.0 +607 23 optimizer.lr 0.005142787852613342 +607 23 training.batch_size 1.0 +607 23 training.label_smoothing 0.014810435895416044 +607 24 model.embedding_dim 2.0 +607 24 optimizer.lr 0.008273853025839694 +607 24 training.batch_size 1.0 +607 24 training.label_smoothing 0.180488448153689 +607 25 model.embedding_dim 1.0 +607 25 optimizer.lr 0.036705338140260106 +607 25 training.batch_size 0.0 +607 25 training.label_smoothing 0.04864286952813198 +607 26 model.embedding_dim 0.0 +607 26 optimizer.lr 0.018209573310905617 +607 26 training.batch_size 2.0 +607 26 training.label_smoothing 0.0016656262162589505 +607 27 model.embedding_dim 1.0 +607 27 optimizer.lr 0.02704520447212526 +607 27 training.batch_size 1.0 +607 27 training.label_smoothing 0.03733513164200334 +607 28 model.embedding_dim 2.0 +607 28 optimizer.lr 0.08179556354204948 +607 28 training.batch_size 1.0 +607 28 training.label_smoothing 0.12075054317625047 +607 29 model.embedding_dim 1.0 +607 29 optimizer.lr 0.005375847246215702 +607 29 training.batch_size 2.0 +607 29 training.label_smoothing 0.05897614565337488 +607 30 model.embedding_dim 2.0 +607 30 optimizer.lr 0.023996988996239227 +607 30 training.batch_size 2.0 +607 30 training.label_smoothing 0.32322536292712356 +607 31 model.embedding_dim 0.0 +607 31 optimizer.lr 0.0059368708737024215 +607 31 training.batch_size 0.0 +607 31 training.label_smoothing 0.006245550308517523 +607 32 model.embedding_dim 0.0 +607 32 optimizer.lr 0.006894702679232272 +607 32 training.batch_size 0.0 +607 32 training.label_smoothing 0.020045656538109184 +607 33 model.embedding_dim 0.0 +607 33 optimizer.lr 0.038965582656640185 +607 33 training.batch_size 1.0 +607 33 training.label_smoothing 0.003836157509351976 +607 34 model.embedding_dim 0.0 +607 34 optimizer.lr 0.0018081664959661865 +607 34 training.batch_size 0.0 +607 34 training.label_smoothing 0.019992459725129915 +607 35 model.embedding_dim 0.0 +607 35 optimizer.lr 0.0013068961853139017 +607 35 training.batch_size 1.0 +607 35 training.label_smoothing 0.020906076018135298 +607 36 model.embedding_dim 2.0 +607 36 optimizer.lr 0.0021706580457212087 +607 36 training.batch_size 0.0 +607 36 training.label_smoothing 0.0032938749949694736 +607 37 model.embedding_dim 0.0 +607 37 optimizer.lr 0.03281542586486766 +607 37 training.batch_size 2.0 +607 37 training.label_smoothing 0.1385224131704435 +607 38 model.embedding_dim 2.0 +607 38 optimizer.lr 0.002375395687532429 +607 38 training.batch_size 1.0 +607 38 training.label_smoothing 0.7458283442805523 +607 39 model.embedding_dim 1.0 +607 39 optimizer.lr 0.0011497210095418454 +607 39 training.batch_size 2.0 +607 39 training.label_smoothing 0.7261340125625155 +607 40 model.embedding_dim 2.0 +607 40 optimizer.lr 0.0030654286444135445 +607 40 training.batch_size 2.0 +607 40 training.label_smoothing 0.8593118006406082 +607 41 model.embedding_dim 0.0 +607 41 optimizer.lr 0.015153795311454873 +607 41 training.batch_size 0.0 +607 41 training.label_smoothing 0.00740772099061046 +607 42 model.embedding_dim 0.0 +607 42 optimizer.lr 0.003298868798590252 +607 42 training.batch_size 1.0 +607 42 training.label_smoothing 0.013412056928506685 +607 43 model.embedding_dim 1.0 +607 43 optimizer.lr 0.08299986367140516 +607 43 training.batch_size 1.0 +607 43 training.label_smoothing 0.002944914626292052 +607 44 model.embedding_dim 2.0 +607 44 optimizer.lr 0.024699883064728017 +607 44 training.batch_size 2.0 +607 44 training.label_smoothing 0.14863850054312983 +607 45 model.embedding_dim 2.0 +607 45 optimizer.lr 0.01272019876842224 +607 45 training.batch_size 2.0 +607 45 training.label_smoothing 0.10431145659445439 +607 46 model.embedding_dim 0.0 +607 46 optimizer.lr 0.0013264923613372543 +607 46 training.batch_size 2.0 +607 46 training.label_smoothing 0.8204553928613946 +607 47 model.embedding_dim 1.0 +607 47 optimizer.lr 0.0013480840501841377 +607 47 training.batch_size 2.0 +607 47 training.label_smoothing 0.010749405229062535 +607 48 model.embedding_dim 0.0 +607 48 optimizer.lr 0.0021313415235486156 +607 48 training.batch_size 0.0 +607 48 training.label_smoothing 0.002574191088285555 +607 49 model.embedding_dim 1.0 +607 49 optimizer.lr 0.004371299469754398 +607 49 training.batch_size 1.0 +607 49 training.label_smoothing 0.0041254429457905405 +607 50 model.embedding_dim 1.0 +607 50 optimizer.lr 0.006448132634676469 +607 50 training.batch_size 0.0 +607 50 training.label_smoothing 0.004234596907097493 +607 51 model.embedding_dim 0.0 +607 51 optimizer.lr 0.013418638750337024 +607 51 training.batch_size 1.0 +607 51 training.label_smoothing 0.013873730125516391 +607 52 model.embedding_dim 2.0 +607 52 optimizer.lr 0.049623644579837756 +607 52 training.batch_size 0.0 +607 52 training.label_smoothing 0.002043920370578021 +607 53 model.embedding_dim 2.0 +607 53 optimizer.lr 0.008916047297193856 +607 53 training.batch_size 0.0 +607 53 training.label_smoothing 0.0032612857547585894 +607 54 model.embedding_dim 1.0 +607 54 optimizer.lr 0.0017070743482802023 +607 54 training.batch_size 2.0 +607 54 training.label_smoothing 0.003745555700817421 +607 55 model.embedding_dim 1.0 +607 55 optimizer.lr 0.08191619227361055 +607 55 training.batch_size 0.0 +607 55 training.label_smoothing 0.006977010553735886 +607 56 model.embedding_dim 0.0 +607 56 optimizer.lr 0.008324891653267996 +607 56 training.batch_size 0.0 +607 56 training.label_smoothing 0.7624954393388108 +607 57 model.embedding_dim 1.0 +607 57 optimizer.lr 0.09765490131413623 +607 57 training.batch_size 0.0 +607 57 training.label_smoothing 0.8524249362210703 +607 58 model.embedding_dim 1.0 +607 58 optimizer.lr 0.06510098194667306 +607 58 training.batch_size 0.0 +607 58 training.label_smoothing 0.014692702375478742 +607 59 model.embedding_dim 2.0 +607 59 optimizer.lr 0.001954248499272841 +607 59 training.batch_size 2.0 +607 59 training.label_smoothing 0.035364040746726944 +607 60 model.embedding_dim 2.0 +607 60 optimizer.lr 0.025847446823121772 +607 60 training.batch_size 0.0 +607 60 training.label_smoothing 0.24152319293344904 +607 61 model.embedding_dim 0.0 +607 61 optimizer.lr 0.021986158422717002 +607 61 training.batch_size 0.0 +607 61 training.label_smoothing 0.02661315360994819 +607 62 model.embedding_dim 2.0 +607 62 optimizer.lr 0.004834411301432173 +607 62 training.batch_size 2.0 +607 62 training.label_smoothing 0.5596255334173309 +607 63 model.embedding_dim 1.0 +607 63 optimizer.lr 0.001170247058635639 +607 63 training.batch_size 0.0 +607 63 training.label_smoothing 0.11843854151464227 +607 64 model.embedding_dim 2.0 +607 64 optimizer.lr 0.0025492587470798884 +607 64 training.batch_size 0.0 +607 64 training.label_smoothing 0.46485086544499427 +607 65 model.embedding_dim 1.0 +607 65 optimizer.lr 0.015157040322568762 +607 65 training.batch_size 1.0 +607 65 training.label_smoothing 0.11184812929943161 +607 66 model.embedding_dim 0.0 +607 66 optimizer.lr 0.03548444968971233 +607 66 training.batch_size 2.0 +607 66 training.label_smoothing 0.04099327548854191 +607 67 model.embedding_dim 2.0 +607 67 optimizer.lr 0.07043091836156508 +607 67 training.batch_size 2.0 +607 67 training.label_smoothing 0.03841543628364531 +607 68 model.embedding_dim 2.0 +607 68 optimizer.lr 0.010689501605096978 +607 68 training.batch_size 2.0 +607 68 training.label_smoothing 0.010826541126788611 +607 69 model.embedding_dim 2.0 +607 69 optimizer.lr 0.010313164638715622 +607 69 training.batch_size 1.0 +607 69 training.label_smoothing 0.004694637778170293 +607 70 model.embedding_dim 2.0 +607 70 optimizer.lr 0.001114958529587839 +607 70 training.batch_size 0.0 +607 70 training.label_smoothing 0.001385020936174357 +607 71 model.embedding_dim 2.0 +607 71 optimizer.lr 0.06140939743303213 +607 71 training.batch_size 2.0 +607 71 training.label_smoothing 0.4140552978851544 +607 72 model.embedding_dim 1.0 +607 72 optimizer.lr 0.003162096533689675 +607 72 training.batch_size 1.0 +607 72 training.label_smoothing 0.03178451586170207 +607 73 model.embedding_dim 0.0 +607 73 optimizer.lr 0.013695288907424549 +607 73 training.batch_size 0.0 +607 73 training.label_smoothing 0.25665896471880234 +607 74 model.embedding_dim 2.0 +607 74 optimizer.lr 0.09602774015910866 +607 74 training.batch_size 2.0 +607 74 training.label_smoothing 0.2766582380793567 +607 75 model.embedding_dim 0.0 +607 75 optimizer.lr 0.0071690360644977025 +607 75 training.batch_size 1.0 +607 75 training.label_smoothing 0.00215458526370066 +607 76 model.embedding_dim 0.0 +607 76 optimizer.lr 0.031626737396514226 +607 76 training.batch_size 0.0 +607 76 training.label_smoothing 0.062286310050275275 +607 77 model.embedding_dim 2.0 +607 77 optimizer.lr 0.0014097615919166449 +607 77 training.batch_size 0.0 +607 77 training.label_smoothing 0.010916651174117477 +607 78 model.embedding_dim 0.0 +607 78 optimizer.lr 0.026663936257796242 +607 78 training.batch_size 0.0 +607 78 training.label_smoothing 0.0018993916070110686 +607 79 model.embedding_dim 0.0 +607 79 optimizer.lr 0.006189562015910114 +607 79 training.batch_size 2.0 +607 79 training.label_smoothing 0.12160959566645467 +607 80 model.embedding_dim 1.0 +607 80 optimizer.lr 0.07858220430765803 +607 80 training.batch_size 0.0 +607 80 training.label_smoothing 0.0070825434980651795 +607 81 model.embedding_dim 1.0 +607 81 optimizer.lr 0.014415365148692648 +607 81 training.batch_size 2.0 +607 81 training.label_smoothing 0.0020966157906118395 +607 82 model.embedding_dim 1.0 +607 82 optimizer.lr 0.026126440352554105 +607 82 training.batch_size 2.0 +607 82 training.label_smoothing 0.03961238263935205 +607 83 model.embedding_dim 1.0 +607 83 optimizer.lr 0.019402745508860758 +607 83 training.batch_size 0.0 +607 83 training.label_smoothing 0.05001821717585084 +607 84 model.embedding_dim 1.0 +607 84 optimizer.lr 0.0020312032890479593 +607 84 training.batch_size 1.0 +607 84 training.label_smoothing 0.011139149462568953 +607 85 model.embedding_dim 1.0 +607 85 optimizer.lr 0.014925419426361514 +607 85 training.batch_size 2.0 +607 85 training.label_smoothing 0.005706830892964561 +607 86 model.embedding_dim 1.0 +607 86 optimizer.lr 0.036432623027318284 +607 86 training.batch_size 0.0 +607 86 training.label_smoothing 0.025382103818053104 +607 87 model.embedding_dim 0.0 +607 87 optimizer.lr 0.00624068768728251 +607 87 training.batch_size 1.0 +607 87 training.label_smoothing 0.00731974837023611 +607 88 model.embedding_dim 2.0 +607 88 optimizer.lr 0.027833696367478962 +607 88 training.batch_size 1.0 +607 88 training.label_smoothing 0.12022877940540506 +607 89 model.embedding_dim 2.0 +607 89 optimizer.lr 0.012166133775380927 +607 89 training.batch_size 0.0 +607 89 training.label_smoothing 0.03493843404996891 +607 90 model.embedding_dim 0.0 +607 90 optimizer.lr 0.013925753599686931 +607 90 training.batch_size 1.0 +607 90 training.label_smoothing 0.012295676007479227 +607 91 model.embedding_dim 2.0 +607 91 optimizer.lr 0.0011378790888272256 +607 91 training.batch_size 1.0 +607 91 training.label_smoothing 0.05135194308714969 +607 92 model.embedding_dim 2.0 +607 92 optimizer.lr 0.026456069676889668 +607 92 training.batch_size 0.0 +607 92 training.label_smoothing 0.05604347094476373 +607 93 model.embedding_dim 2.0 +607 93 optimizer.lr 0.01076860466351238 +607 93 training.batch_size 0.0 +607 93 training.label_smoothing 0.8538580807441328 +607 94 model.embedding_dim 1.0 +607 94 optimizer.lr 0.018045935921756562 +607 94 training.batch_size 0.0 +607 94 training.label_smoothing 0.0018554922661778081 +607 95 model.embedding_dim 1.0 +607 95 optimizer.lr 0.0022121469910301047 +607 95 training.batch_size 1.0 +607 95 training.label_smoothing 0.013061947321033999 +607 96 model.embedding_dim 1.0 +607 96 optimizer.lr 0.09818627888453293 +607 96 training.batch_size 2.0 +607 96 training.label_smoothing 0.005884561534124108 +607 97 model.embedding_dim 2.0 +607 97 optimizer.lr 0.002603198373402758 +607 97 training.batch_size 1.0 +607 97 training.label_smoothing 0.001412165616653055 +607 98 model.embedding_dim 1.0 +607 98 optimizer.lr 0.026211082655132806 +607 98 training.batch_size 1.0 +607 98 training.label_smoothing 0.1006375427837603 +607 99 model.embedding_dim 1.0 +607 99 optimizer.lr 0.028035556511229763 +607 99 training.batch_size 2.0 +607 99 training.label_smoothing 0.0038233014117787165 +607 100 model.embedding_dim 0.0 +607 100 optimizer.lr 0.0019266723409473352 +607 100 training.batch_size 0.0 +607 100 training.label_smoothing 0.029618086115742886 +607 1 dataset """kinships""" +607 1 model """rotate""" +607 1 loss """bceaftersigmoid""" +607 1 regularizer """no""" +607 1 optimizer """adam""" +607 1 training_loop """lcwa""" +607 1 evaluator """rankbased""" +607 2 dataset """kinships""" +607 2 model """rotate""" +607 2 loss """bceaftersigmoid""" +607 2 regularizer """no""" +607 2 optimizer """adam""" +607 2 training_loop """lcwa""" +607 2 evaluator """rankbased""" +607 3 dataset """kinships""" +607 3 model """rotate""" +607 3 loss """bceaftersigmoid""" +607 3 regularizer """no""" +607 3 optimizer """adam""" +607 3 training_loop """lcwa""" +607 3 evaluator """rankbased""" +607 4 dataset """kinships""" +607 4 model """rotate""" +607 4 loss """bceaftersigmoid""" +607 4 regularizer """no""" +607 4 optimizer """adam""" +607 4 training_loop """lcwa""" +607 4 evaluator """rankbased""" +607 5 dataset """kinships""" +607 5 model """rotate""" +607 5 loss """bceaftersigmoid""" +607 5 regularizer """no""" +607 5 optimizer """adam""" +607 5 training_loop """lcwa""" +607 5 evaluator """rankbased""" +607 6 dataset """kinships""" +607 6 model """rotate""" +607 6 loss """bceaftersigmoid""" +607 6 regularizer """no""" +607 6 optimizer """adam""" +607 6 training_loop """lcwa""" +607 6 evaluator """rankbased""" +607 7 dataset """kinships""" +607 7 model """rotate""" +607 7 loss """bceaftersigmoid""" +607 7 regularizer """no""" +607 7 optimizer """adam""" +607 7 training_loop """lcwa""" +607 7 evaluator """rankbased""" +607 8 dataset """kinships""" +607 8 model """rotate""" +607 8 loss """bceaftersigmoid""" +607 8 regularizer """no""" +607 8 optimizer """adam""" +607 8 training_loop """lcwa""" +607 8 evaluator """rankbased""" +607 9 dataset """kinships""" +607 9 model """rotate""" +607 9 loss """bceaftersigmoid""" +607 9 regularizer """no""" +607 9 optimizer """adam""" +607 9 training_loop """lcwa""" +607 9 evaluator """rankbased""" +607 10 dataset """kinships""" +607 10 model """rotate""" +607 10 loss """bceaftersigmoid""" +607 10 regularizer """no""" +607 10 optimizer """adam""" +607 10 training_loop """lcwa""" +607 10 evaluator """rankbased""" +607 11 dataset """kinships""" +607 11 model """rotate""" +607 11 loss """bceaftersigmoid""" +607 11 regularizer """no""" +607 11 optimizer """adam""" +607 11 training_loop """lcwa""" +607 11 evaluator """rankbased""" +607 12 dataset """kinships""" +607 12 model """rotate""" +607 12 loss """bceaftersigmoid""" +607 12 regularizer """no""" +607 12 optimizer """adam""" +607 12 training_loop """lcwa""" +607 12 evaluator """rankbased""" +607 13 dataset """kinships""" +607 13 model """rotate""" +607 13 loss """bceaftersigmoid""" +607 13 regularizer """no""" +607 13 optimizer """adam""" +607 13 training_loop """lcwa""" +607 13 evaluator """rankbased""" +607 14 dataset """kinships""" +607 14 model """rotate""" +607 14 loss """bceaftersigmoid""" +607 14 regularizer """no""" +607 14 optimizer """adam""" +607 14 training_loop """lcwa""" +607 14 evaluator """rankbased""" +607 15 dataset """kinships""" +607 15 model """rotate""" +607 15 loss """bceaftersigmoid""" +607 15 regularizer """no""" +607 15 optimizer """adam""" +607 15 training_loop """lcwa""" +607 15 evaluator """rankbased""" +607 16 dataset """kinships""" +607 16 model """rotate""" +607 16 loss """bceaftersigmoid""" +607 16 regularizer """no""" +607 16 optimizer """adam""" +607 16 training_loop """lcwa""" +607 16 evaluator """rankbased""" +607 17 dataset """kinships""" +607 17 model """rotate""" +607 17 loss """bceaftersigmoid""" +607 17 regularizer """no""" +607 17 optimizer """adam""" +607 17 training_loop """lcwa""" +607 17 evaluator """rankbased""" +607 18 dataset """kinships""" +607 18 model """rotate""" +607 18 loss """bceaftersigmoid""" +607 18 regularizer """no""" +607 18 optimizer """adam""" +607 18 training_loop """lcwa""" +607 18 evaluator """rankbased""" +607 19 dataset """kinships""" +607 19 model """rotate""" +607 19 loss """bceaftersigmoid""" +607 19 regularizer """no""" +607 19 optimizer """adam""" +607 19 training_loop """lcwa""" +607 19 evaluator """rankbased""" +607 20 dataset """kinships""" +607 20 model """rotate""" +607 20 loss """bceaftersigmoid""" +607 20 regularizer """no""" +607 20 optimizer """adam""" +607 20 training_loop """lcwa""" +607 20 evaluator """rankbased""" +607 21 dataset """kinships""" +607 21 model """rotate""" +607 21 loss """bceaftersigmoid""" +607 21 regularizer """no""" +607 21 optimizer """adam""" +607 21 training_loop """lcwa""" +607 21 evaluator """rankbased""" +607 22 dataset """kinships""" +607 22 model """rotate""" +607 22 loss """bceaftersigmoid""" +607 22 regularizer """no""" +607 22 optimizer """adam""" +607 22 training_loop """lcwa""" +607 22 evaluator """rankbased""" +607 23 dataset """kinships""" +607 23 model """rotate""" +607 23 loss """bceaftersigmoid""" +607 23 regularizer """no""" +607 23 optimizer """adam""" +607 23 training_loop """lcwa""" +607 23 evaluator """rankbased""" +607 24 dataset """kinships""" +607 24 model """rotate""" +607 24 loss """bceaftersigmoid""" +607 24 regularizer """no""" +607 24 optimizer """adam""" +607 24 training_loop """lcwa""" +607 24 evaluator """rankbased""" +607 25 dataset """kinships""" +607 25 model """rotate""" +607 25 loss """bceaftersigmoid""" +607 25 regularizer """no""" +607 25 optimizer """adam""" +607 25 training_loop """lcwa""" +607 25 evaluator """rankbased""" +607 26 dataset """kinships""" +607 26 model """rotate""" +607 26 loss """bceaftersigmoid""" +607 26 regularizer """no""" +607 26 optimizer """adam""" +607 26 training_loop """lcwa""" +607 26 evaluator """rankbased""" +607 27 dataset """kinships""" +607 27 model """rotate""" +607 27 loss """bceaftersigmoid""" +607 27 regularizer """no""" +607 27 optimizer """adam""" +607 27 training_loop """lcwa""" +607 27 evaluator """rankbased""" +607 28 dataset """kinships""" +607 28 model """rotate""" +607 28 loss """bceaftersigmoid""" +607 28 regularizer """no""" +607 28 optimizer """adam""" +607 28 training_loop """lcwa""" +607 28 evaluator """rankbased""" +607 29 dataset """kinships""" +607 29 model """rotate""" +607 29 loss """bceaftersigmoid""" +607 29 regularizer """no""" +607 29 optimizer """adam""" +607 29 training_loop """lcwa""" +607 29 evaluator """rankbased""" +607 30 dataset """kinships""" +607 30 model """rotate""" +607 30 loss """bceaftersigmoid""" +607 30 regularizer """no""" +607 30 optimizer """adam""" +607 30 training_loop """lcwa""" +607 30 evaluator """rankbased""" +607 31 dataset """kinships""" +607 31 model """rotate""" +607 31 loss """bceaftersigmoid""" +607 31 regularizer """no""" +607 31 optimizer """adam""" +607 31 training_loop """lcwa""" +607 31 evaluator """rankbased""" +607 32 dataset """kinships""" +607 32 model """rotate""" +607 32 loss """bceaftersigmoid""" +607 32 regularizer """no""" +607 32 optimizer """adam""" +607 32 training_loop """lcwa""" +607 32 evaluator """rankbased""" +607 33 dataset """kinships""" +607 33 model """rotate""" +607 33 loss """bceaftersigmoid""" +607 33 regularizer """no""" +607 33 optimizer """adam""" +607 33 training_loop """lcwa""" +607 33 evaluator """rankbased""" +607 34 dataset """kinships""" +607 34 model """rotate""" +607 34 loss """bceaftersigmoid""" +607 34 regularizer """no""" +607 34 optimizer """adam""" +607 34 training_loop """lcwa""" +607 34 evaluator """rankbased""" +607 35 dataset """kinships""" +607 35 model """rotate""" +607 35 loss """bceaftersigmoid""" +607 35 regularizer """no""" +607 35 optimizer """adam""" +607 35 training_loop """lcwa""" +607 35 evaluator """rankbased""" +607 36 dataset """kinships""" +607 36 model """rotate""" +607 36 loss """bceaftersigmoid""" +607 36 regularizer """no""" +607 36 optimizer """adam""" +607 36 training_loop """lcwa""" +607 36 evaluator """rankbased""" +607 37 dataset """kinships""" +607 37 model """rotate""" +607 37 loss """bceaftersigmoid""" +607 37 regularizer """no""" +607 37 optimizer """adam""" +607 37 training_loop """lcwa""" +607 37 evaluator """rankbased""" +607 38 dataset """kinships""" +607 38 model """rotate""" +607 38 loss """bceaftersigmoid""" +607 38 regularizer """no""" +607 38 optimizer """adam""" +607 38 training_loop """lcwa""" +607 38 evaluator """rankbased""" +607 39 dataset """kinships""" +607 39 model """rotate""" +607 39 loss """bceaftersigmoid""" +607 39 regularizer """no""" +607 39 optimizer """adam""" +607 39 training_loop """lcwa""" +607 39 evaluator """rankbased""" +607 40 dataset """kinships""" +607 40 model """rotate""" +607 40 loss """bceaftersigmoid""" +607 40 regularizer """no""" +607 40 optimizer """adam""" +607 40 training_loop """lcwa""" +607 40 evaluator """rankbased""" +607 41 dataset """kinships""" +607 41 model """rotate""" +607 41 loss """bceaftersigmoid""" +607 41 regularizer """no""" +607 41 optimizer """adam""" +607 41 training_loop """lcwa""" +607 41 evaluator """rankbased""" +607 42 dataset """kinships""" +607 42 model """rotate""" +607 42 loss """bceaftersigmoid""" +607 42 regularizer """no""" +607 42 optimizer """adam""" +607 42 training_loop """lcwa""" +607 42 evaluator """rankbased""" +607 43 dataset """kinships""" +607 43 model """rotate""" +607 43 loss """bceaftersigmoid""" +607 43 regularizer """no""" +607 43 optimizer """adam""" +607 43 training_loop """lcwa""" +607 43 evaluator """rankbased""" +607 44 dataset """kinships""" +607 44 model """rotate""" +607 44 loss """bceaftersigmoid""" +607 44 regularizer """no""" +607 44 optimizer """adam""" +607 44 training_loop """lcwa""" +607 44 evaluator """rankbased""" +607 45 dataset """kinships""" +607 45 model """rotate""" +607 45 loss """bceaftersigmoid""" +607 45 regularizer """no""" +607 45 optimizer """adam""" +607 45 training_loop """lcwa""" +607 45 evaluator """rankbased""" +607 46 dataset """kinships""" +607 46 model """rotate""" +607 46 loss """bceaftersigmoid""" +607 46 regularizer """no""" +607 46 optimizer """adam""" +607 46 training_loop """lcwa""" +607 46 evaluator """rankbased""" +607 47 dataset """kinships""" +607 47 model """rotate""" +607 47 loss """bceaftersigmoid""" +607 47 regularizer """no""" +607 47 optimizer """adam""" +607 47 training_loop """lcwa""" +607 47 evaluator """rankbased""" +607 48 dataset """kinships""" +607 48 model """rotate""" +607 48 loss """bceaftersigmoid""" +607 48 regularizer """no""" +607 48 optimizer """adam""" +607 48 training_loop """lcwa""" +607 48 evaluator """rankbased""" +607 49 dataset """kinships""" +607 49 model """rotate""" +607 49 loss """bceaftersigmoid""" +607 49 regularizer """no""" +607 49 optimizer """adam""" +607 49 training_loop """lcwa""" +607 49 evaluator """rankbased""" +607 50 dataset """kinships""" +607 50 model """rotate""" +607 50 loss """bceaftersigmoid""" +607 50 regularizer """no""" +607 50 optimizer """adam""" +607 50 training_loop """lcwa""" +607 50 evaluator """rankbased""" +607 51 dataset """kinships""" +607 51 model """rotate""" +607 51 loss """bceaftersigmoid""" +607 51 regularizer """no""" +607 51 optimizer """adam""" +607 51 training_loop """lcwa""" +607 51 evaluator """rankbased""" +607 52 dataset """kinships""" +607 52 model """rotate""" +607 52 loss """bceaftersigmoid""" +607 52 regularizer """no""" +607 52 optimizer """adam""" +607 52 training_loop """lcwa""" +607 52 evaluator """rankbased""" +607 53 dataset """kinships""" +607 53 model """rotate""" +607 53 loss """bceaftersigmoid""" +607 53 regularizer """no""" +607 53 optimizer """adam""" +607 53 training_loop """lcwa""" +607 53 evaluator """rankbased""" +607 54 dataset """kinships""" +607 54 model """rotate""" +607 54 loss """bceaftersigmoid""" +607 54 regularizer """no""" +607 54 optimizer """adam""" +607 54 training_loop """lcwa""" +607 54 evaluator """rankbased""" +607 55 dataset """kinships""" +607 55 model """rotate""" +607 55 loss """bceaftersigmoid""" +607 55 regularizer """no""" +607 55 optimizer """adam""" +607 55 training_loop """lcwa""" +607 55 evaluator """rankbased""" +607 56 dataset """kinships""" +607 56 model """rotate""" +607 56 loss """bceaftersigmoid""" +607 56 regularizer """no""" +607 56 optimizer """adam""" +607 56 training_loop """lcwa""" +607 56 evaluator """rankbased""" +607 57 dataset """kinships""" +607 57 model """rotate""" +607 57 loss """bceaftersigmoid""" +607 57 regularizer """no""" +607 57 optimizer """adam""" +607 57 training_loop """lcwa""" +607 57 evaluator """rankbased""" +607 58 dataset """kinships""" +607 58 model """rotate""" +607 58 loss """bceaftersigmoid""" +607 58 regularizer """no""" +607 58 optimizer """adam""" +607 58 training_loop """lcwa""" +607 58 evaluator """rankbased""" +607 59 dataset """kinships""" +607 59 model """rotate""" +607 59 loss """bceaftersigmoid""" +607 59 regularizer """no""" +607 59 optimizer """adam""" +607 59 training_loop """lcwa""" +607 59 evaluator """rankbased""" +607 60 dataset """kinships""" +607 60 model """rotate""" +607 60 loss """bceaftersigmoid""" +607 60 regularizer """no""" +607 60 optimizer """adam""" +607 60 training_loop """lcwa""" +607 60 evaluator """rankbased""" +607 61 dataset """kinships""" +607 61 model """rotate""" +607 61 loss """bceaftersigmoid""" +607 61 regularizer """no""" +607 61 optimizer """adam""" +607 61 training_loop """lcwa""" +607 61 evaluator """rankbased""" +607 62 dataset """kinships""" +607 62 model """rotate""" +607 62 loss """bceaftersigmoid""" +607 62 regularizer """no""" +607 62 optimizer """adam""" +607 62 training_loop """lcwa""" +607 62 evaluator """rankbased""" +607 63 dataset """kinships""" +607 63 model """rotate""" +607 63 loss """bceaftersigmoid""" +607 63 regularizer """no""" +607 63 optimizer """adam""" +607 63 training_loop """lcwa""" +607 63 evaluator """rankbased""" +607 64 dataset """kinships""" +607 64 model """rotate""" +607 64 loss """bceaftersigmoid""" +607 64 regularizer """no""" +607 64 optimizer """adam""" +607 64 training_loop """lcwa""" +607 64 evaluator """rankbased""" +607 65 dataset """kinships""" +607 65 model """rotate""" +607 65 loss """bceaftersigmoid""" +607 65 regularizer """no""" +607 65 optimizer """adam""" +607 65 training_loop """lcwa""" +607 65 evaluator """rankbased""" +607 66 dataset """kinships""" +607 66 model """rotate""" +607 66 loss """bceaftersigmoid""" +607 66 regularizer """no""" +607 66 optimizer """adam""" +607 66 training_loop """lcwa""" +607 66 evaluator """rankbased""" +607 67 dataset """kinships""" +607 67 model """rotate""" +607 67 loss """bceaftersigmoid""" +607 67 regularizer """no""" +607 67 optimizer """adam""" +607 67 training_loop """lcwa""" +607 67 evaluator """rankbased""" +607 68 dataset """kinships""" +607 68 model """rotate""" +607 68 loss """bceaftersigmoid""" +607 68 regularizer """no""" +607 68 optimizer """adam""" +607 68 training_loop """lcwa""" +607 68 evaluator """rankbased""" +607 69 dataset """kinships""" +607 69 model """rotate""" +607 69 loss """bceaftersigmoid""" +607 69 regularizer """no""" +607 69 optimizer """adam""" +607 69 training_loop """lcwa""" +607 69 evaluator """rankbased""" +607 70 dataset """kinships""" +607 70 model """rotate""" +607 70 loss """bceaftersigmoid""" +607 70 regularizer """no""" +607 70 optimizer """adam""" +607 70 training_loop """lcwa""" +607 70 evaluator """rankbased""" +607 71 dataset """kinships""" +607 71 model """rotate""" +607 71 loss """bceaftersigmoid""" +607 71 regularizer """no""" +607 71 optimizer """adam""" +607 71 training_loop """lcwa""" +607 71 evaluator """rankbased""" +607 72 dataset """kinships""" +607 72 model """rotate""" +607 72 loss """bceaftersigmoid""" +607 72 regularizer """no""" +607 72 optimizer """adam""" +607 72 training_loop """lcwa""" +607 72 evaluator """rankbased""" +607 73 dataset """kinships""" +607 73 model """rotate""" +607 73 loss """bceaftersigmoid""" +607 73 regularizer """no""" +607 73 optimizer """adam""" +607 73 training_loop """lcwa""" +607 73 evaluator """rankbased""" +607 74 dataset """kinships""" +607 74 model """rotate""" +607 74 loss """bceaftersigmoid""" +607 74 regularizer """no""" +607 74 optimizer """adam""" +607 74 training_loop """lcwa""" +607 74 evaluator """rankbased""" +607 75 dataset """kinships""" +607 75 model """rotate""" +607 75 loss """bceaftersigmoid""" +607 75 regularizer """no""" +607 75 optimizer """adam""" +607 75 training_loop """lcwa""" +607 75 evaluator """rankbased""" +607 76 dataset """kinships""" +607 76 model """rotate""" +607 76 loss """bceaftersigmoid""" +607 76 regularizer """no""" +607 76 optimizer """adam""" +607 76 training_loop """lcwa""" +607 76 evaluator """rankbased""" +607 77 dataset """kinships""" +607 77 model """rotate""" +607 77 loss """bceaftersigmoid""" +607 77 regularizer """no""" +607 77 optimizer """adam""" +607 77 training_loop """lcwa""" +607 77 evaluator """rankbased""" +607 78 dataset """kinships""" +607 78 model """rotate""" +607 78 loss """bceaftersigmoid""" +607 78 regularizer """no""" +607 78 optimizer """adam""" +607 78 training_loop """lcwa""" +607 78 evaluator """rankbased""" +607 79 dataset """kinships""" +607 79 model """rotate""" +607 79 loss """bceaftersigmoid""" +607 79 regularizer """no""" +607 79 optimizer """adam""" +607 79 training_loop """lcwa""" +607 79 evaluator """rankbased""" +607 80 dataset """kinships""" +607 80 model """rotate""" +607 80 loss """bceaftersigmoid""" +607 80 regularizer """no""" +607 80 optimizer """adam""" +607 80 training_loop """lcwa""" +607 80 evaluator """rankbased""" +607 81 dataset """kinships""" +607 81 model """rotate""" +607 81 loss """bceaftersigmoid""" +607 81 regularizer """no""" +607 81 optimizer """adam""" +607 81 training_loop """lcwa""" +607 81 evaluator """rankbased""" +607 82 dataset """kinships""" +607 82 model """rotate""" +607 82 loss """bceaftersigmoid""" +607 82 regularizer """no""" +607 82 optimizer """adam""" +607 82 training_loop """lcwa""" +607 82 evaluator """rankbased""" +607 83 dataset """kinships""" +607 83 model """rotate""" +607 83 loss """bceaftersigmoid""" +607 83 regularizer """no""" +607 83 optimizer """adam""" +607 83 training_loop """lcwa""" +607 83 evaluator """rankbased""" +607 84 dataset """kinships""" +607 84 model """rotate""" +607 84 loss """bceaftersigmoid""" +607 84 regularizer """no""" +607 84 optimizer """adam""" +607 84 training_loop """lcwa""" +607 84 evaluator """rankbased""" +607 85 dataset """kinships""" +607 85 model """rotate""" +607 85 loss """bceaftersigmoid""" +607 85 regularizer """no""" +607 85 optimizer """adam""" +607 85 training_loop """lcwa""" +607 85 evaluator """rankbased""" +607 86 dataset """kinships""" +607 86 model """rotate""" +607 86 loss """bceaftersigmoid""" +607 86 regularizer """no""" +607 86 optimizer """adam""" +607 86 training_loop """lcwa""" +607 86 evaluator """rankbased""" +607 87 dataset """kinships""" +607 87 model """rotate""" +607 87 loss """bceaftersigmoid""" +607 87 regularizer """no""" +607 87 optimizer """adam""" +607 87 training_loop """lcwa""" +607 87 evaluator """rankbased""" +607 88 dataset """kinships""" +607 88 model """rotate""" +607 88 loss """bceaftersigmoid""" +607 88 regularizer """no""" +607 88 optimizer """adam""" +607 88 training_loop """lcwa""" +607 88 evaluator """rankbased""" +607 89 dataset """kinships""" +607 89 model """rotate""" +607 89 loss """bceaftersigmoid""" +607 89 regularizer """no""" +607 89 optimizer """adam""" +607 89 training_loop """lcwa""" +607 89 evaluator """rankbased""" +607 90 dataset """kinships""" +607 90 model """rotate""" +607 90 loss """bceaftersigmoid""" +607 90 regularizer """no""" +607 90 optimizer """adam""" +607 90 training_loop """lcwa""" +607 90 evaluator """rankbased""" +607 91 dataset """kinships""" +607 91 model """rotate""" +607 91 loss """bceaftersigmoid""" +607 91 regularizer """no""" +607 91 optimizer """adam""" +607 91 training_loop """lcwa""" +607 91 evaluator """rankbased""" +607 92 dataset """kinships""" +607 92 model """rotate""" +607 92 loss """bceaftersigmoid""" +607 92 regularizer """no""" +607 92 optimizer """adam""" +607 92 training_loop """lcwa""" +607 92 evaluator """rankbased""" +607 93 dataset """kinships""" +607 93 model """rotate""" +607 93 loss """bceaftersigmoid""" +607 93 regularizer """no""" +607 93 optimizer """adam""" +607 93 training_loop """lcwa""" +607 93 evaluator """rankbased""" +607 94 dataset """kinships""" +607 94 model """rotate""" +607 94 loss """bceaftersigmoid""" +607 94 regularizer """no""" +607 94 optimizer """adam""" +607 94 training_loop """lcwa""" +607 94 evaluator """rankbased""" +607 95 dataset """kinships""" +607 95 model """rotate""" +607 95 loss """bceaftersigmoid""" +607 95 regularizer """no""" +607 95 optimizer """adam""" +607 95 training_loop """lcwa""" +607 95 evaluator """rankbased""" +607 96 dataset """kinships""" +607 96 model """rotate""" +607 96 loss """bceaftersigmoid""" +607 96 regularizer """no""" +607 96 optimizer """adam""" +607 96 training_loop """lcwa""" +607 96 evaluator """rankbased""" +607 97 dataset """kinships""" +607 97 model """rotate""" +607 97 loss """bceaftersigmoid""" +607 97 regularizer """no""" +607 97 optimizer """adam""" +607 97 training_loop """lcwa""" +607 97 evaluator """rankbased""" +607 98 dataset """kinships""" +607 98 model """rotate""" +607 98 loss """bceaftersigmoid""" +607 98 regularizer """no""" +607 98 optimizer """adam""" +607 98 training_loop """lcwa""" +607 98 evaluator """rankbased""" +607 99 dataset """kinships""" +607 99 model """rotate""" +607 99 loss """bceaftersigmoid""" +607 99 regularizer """no""" +607 99 optimizer """adam""" +607 99 training_loop """lcwa""" +607 99 evaluator """rankbased""" +607 100 dataset """kinships""" +607 100 model """rotate""" +607 100 loss """bceaftersigmoid""" +607 100 regularizer """no""" +607 100 optimizer """adam""" +607 100 training_loop """lcwa""" +607 100 evaluator """rankbased""" +608 1 model.embedding_dim 1.0 +608 1 optimizer.lr 0.006831083755927104 +608 1 training.batch_size 1.0 +608 1 training.label_smoothing 0.05605329358127202 +608 2 model.embedding_dim 2.0 +608 2 optimizer.lr 0.018307024629030933 +608 2 training.batch_size 2.0 +608 2 training.label_smoothing 0.001588339942687348 +608 3 model.embedding_dim 1.0 +608 3 optimizer.lr 0.001784871963344554 +608 3 training.batch_size 2.0 +608 3 training.label_smoothing 0.028558085829481997 +608 4 model.embedding_dim 1.0 +608 4 optimizer.lr 0.001272206667815889 +608 4 training.batch_size 1.0 +608 4 training.label_smoothing 0.04396455685615421 +608 5 model.embedding_dim 0.0 +608 5 optimizer.lr 0.024663274297239537 +608 5 training.batch_size 2.0 +608 5 training.label_smoothing 0.0027196162518881408 +608 6 model.embedding_dim 2.0 +608 6 optimizer.lr 0.004946667140175323 +608 6 training.batch_size 1.0 +608 6 training.label_smoothing 0.01819329215706877 +608 7 model.embedding_dim 2.0 +608 7 optimizer.lr 0.0015853988629716907 +608 7 training.batch_size 2.0 +608 7 training.label_smoothing 0.001349355315850612 +608 8 model.embedding_dim 1.0 +608 8 optimizer.lr 0.001410678441283121 +608 8 training.batch_size 1.0 +608 8 training.label_smoothing 0.0045263858078327425 +608 9 model.embedding_dim 0.0 +608 9 optimizer.lr 0.00599414418408878 +608 9 training.batch_size 1.0 +608 9 training.label_smoothing 0.25850276884819884 +608 10 model.embedding_dim 1.0 +608 10 optimizer.lr 0.018806511256163172 +608 10 training.batch_size 2.0 +608 10 training.label_smoothing 0.4683964781238507 +608 11 model.embedding_dim 0.0 +608 11 optimizer.lr 0.008535654896829588 +608 11 training.batch_size 2.0 +608 11 training.label_smoothing 0.013162784127392418 +608 12 model.embedding_dim 1.0 +608 12 optimizer.lr 0.011244629647553645 +608 12 training.batch_size 0.0 +608 12 training.label_smoothing 0.04753840624618458 +608 13 model.embedding_dim 1.0 +608 13 optimizer.lr 0.0026073891093426264 +608 13 training.batch_size 2.0 +608 13 training.label_smoothing 0.030886470244002526 +608 14 model.embedding_dim 2.0 +608 14 optimizer.lr 0.0039816893848133635 +608 14 training.batch_size 2.0 +608 14 training.label_smoothing 0.9480217439783132 +608 15 model.embedding_dim 2.0 +608 15 optimizer.lr 0.09712938365813789 +608 15 training.batch_size 1.0 +608 15 training.label_smoothing 0.008298439543759755 +608 16 model.embedding_dim 2.0 +608 16 optimizer.lr 0.09233854139958711 +608 16 training.batch_size 2.0 +608 16 training.label_smoothing 0.5583939471413757 +608 17 model.embedding_dim 1.0 +608 17 optimizer.lr 0.006919858984862144 +608 17 training.batch_size 0.0 +608 17 training.label_smoothing 0.403594039882357 +608 18 model.embedding_dim 1.0 +608 18 optimizer.lr 0.09131260151273875 +608 18 training.batch_size 1.0 +608 18 training.label_smoothing 0.26263092400048216 +608 19 model.embedding_dim 0.0 +608 19 optimizer.lr 0.001389314937848205 +608 19 training.batch_size 2.0 +608 19 training.label_smoothing 0.001070554256965711 +608 20 model.embedding_dim 2.0 +608 20 optimizer.lr 0.018368957928511898 +608 20 training.batch_size 1.0 +608 20 training.label_smoothing 0.02575816983877146 +608 21 model.embedding_dim 1.0 +608 21 optimizer.lr 0.021098483610702705 +608 21 training.batch_size 2.0 +608 21 training.label_smoothing 0.0018003857027980954 +608 22 model.embedding_dim 2.0 +608 22 optimizer.lr 0.0025936287577253848 +608 22 training.batch_size 2.0 +608 22 training.label_smoothing 0.021060752900122562 +608 23 model.embedding_dim 1.0 +608 23 optimizer.lr 0.002235710069689393 +608 23 training.batch_size 0.0 +608 23 training.label_smoothing 0.0980750862091894 +608 24 model.embedding_dim 2.0 +608 24 optimizer.lr 0.011053683071216831 +608 24 training.batch_size 1.0 +608 24 training.label_smoothing 0.0013722223069543293 +608 25 model.embedding_dim 2.0 +608 25 optimizer.lr 0.028337839327183707 +608 25 training.batch_size 1.0 +608 25 training.label_smoothing 0.032878152020419806 +608 26 model.embedding_dim 0.0 +608 26 optimizer.lr 0.0010244344986691024 +608 26 training.batch_size 2.0 +608 26 training.label_smoothing 0.6300747592699587 +608 27 model.embedding_dim 0.0 +608 27 optimizer.lr 0.013949712784076793 +608 27 training.batch_size 0.0 +608 27 training.label_smoothing 0.014864628992781109 +608 28 model.embedding_dim 1.0 +608 28 optimizer.lr 0.002556089166913323 +608 28 training.batch_size 2.0 +608 28 training.label_smoothing 0.06605365267369702 +608 29 model.embedding_dim 0.0 +608 29 optimizer.lr 0.08450905368713568 +608 29 training.batch_size 0.0 +608 29 training.label_smoothing 0.0038148992416211303 +608 30 model.embedding_dim 1.0 +608 30 optimizer.lr 0.0011351112896298328 +608 30 training.batch_size 2.0 +608 30 training.label_smoothing 0.42706424157943057 +608 31 model.embedding_dim 2.0 +608 31 optimizer.lr 0.008216030613736503 +608 31 training.batch_size 1.0 +608 31 training.label_smoothing 0.17597839825741748 +608 32 model.embedding_dim 2.0 +608 32 optimizer.lr 0.008349721715490968 +608 32 training.batch_size 0.0 +608 32 training.label_smoothing 0.40439741418861513 +608 33 model.embedding_dim 2.0 +608 33 optimizer.lr 0.02015422035598731 +608 33 training.batch_size 2.0 +608 33 training.label_smoothing 0.7150905790322363 +608 34 model.embedding_dim 0.0 +608 34 optimizer.lr 0.027268504638858514 +608 34 training.batch_size 2.0 +608 34 training.label_smoothing 0.003146926464647619 +608 35 model.embedding_dim 2.0 +608 35 optimizer.lr 0.009442365863746619 +608 35 training.batch_size 1.0 +608 35 training.label_smoothing 0.004608920481193449 +608 36 model.embedding_dim 1.0 +608 36 optimizer.lr 0.002483129943892617 +608 36 training.batch_size 0.0 +608 36 training.label_smoothing 0.005698680051804621 +608 37 model.embedding_dim 2.0 +608 37 optimizer.lr 0.022606325532526346 +608 37 training.batch_size 2.0 +608 37 training.label_smoothing 0.004079200626570793 +608 38 model.embedding_dim 2.0 +608 38 optimizer.lr 0.06264270157056927 +608 38 training.batch_size 2.0 +608 38 training.label_smoothing 0.029190642796961593 +608 39 model.embedding_dim 2.0 +608 39 optimizer.lr 0.0011875204869807113 +608 39 training.batch_size 2.0 +608 39 training.label_smoothing 0.3184267696600773 +608 40 model.embedding_dim 1.0 +608 40 optimizer.lr 0.0075276855628785555 +608 40 training.batch_size 2.0 +608 40 training.label_smoothing 0.062179181535039554 +608 41 model.embedding_dim 2.0 +608 41 optimizer.lr 0.04439896550965334 +608 41 training.batch_size 2.0 +608 41 training.label_smoothing 0.7315189981275235 +608 42 model.embedding_dim 2.0 +608 42 optimizer.lr 0.003674651491170384 +608 42 training.batch_size 0.0 +608 42 training.label_smoothing 0.26771701776568924 +608 43 model.embedding_dim 0.0 +608 43 optimizer.lr 0.06363485210325974 +608 43 training.batch_size 1.0 +608 43 training.label_smoothing 0.0733103432359437 +608 44 model.embedding_dim 0.0 +608 44 optimizer.lr 0.0016231058319069029 +608 44 training.batch_size 0.0 +608 44 training.label_smoothing 0.001007441785867483 +608 45 model.embedding_dim 2.0 +608 45 optimizer.lr 0.0437030009934886 +608 45 training.batch_size 2.0 +608 45 training.label_smoothing 0.09853771628557069 +608 46 model.embedding_dim 1.0 +608 46 optimizer.lr 0.007067787266464738 +608 46 training.batch_size 2.0 +608 46 training.label_smoothing 0.05630334485920905 +608 47 model.embedding_dim 2.0 +608 47 optimizer.lr 0.016959023739672733 +608 47 training.batch_size 1.0 +608 47 training.label_smoothing 0.13120954889462477 +608 48 model.embedding_dim 0.0 +608 48 optimizer.lr 0.001400428402436419 +608 48 training.batch_size 1.0 +608 48 training.label_smoothing 0.01779293313469303 +608 49 model.embedding_dim 2.0 +608 49 optimizer.lr 0.011278751236935256 +608 49 training.batch_size 0.0 +608 49 training.label_smoothing 0.001331250643066764 +608 50 model.embedding_dim 2.0 +608 50 optimizer.lr 0.01821648348189088 +608 50 training.batch_size 0.0 +608 50 training.label_smoothing 0.14316968248036194 +608 51 model.embedding_dim 2.0 +608 51 optimizer.lr 0.01387525436530583 +608 51 training.batch_size 2.0 +608 51 training.label_smoothing 0.03809618126619415 +608 52 model.embedding_dim 2.0 +608 52 optimizer.lr 0.0014839359005695675 +608 52 training.batch_size 1.0 +608 52 training.label_smoothing 0.25755650170503575 +608 53 model.embedding_dim 0.0 +608 53 optimizer.lr 0.003595784891712887 +608 53 training.batch_size 1.0 +608 53 training.label_smoothing 0.1217784339078679 +608 54 model.embedding_dim 1.0 +608 54 optimizer.lr 0.0018179521975908567 +608 54 training.batch_size 0.0 +608 54 training.label_smoothing 0.19185630527197306 +608 55 model.embedding_dim 2.0 +608 55 optimizer.lr 0.001485410471091241 +608 55 training.batch_size 1.0 +608 55 training.label_smoothing 0.02618934325772164 +608 56 model.embedding_dim 0.0 +608 56 optimizer.lr 0.001465701805389483 +608 56 training.batch_size 1.0 +608 56 training.label_smoothing 0.05699351134838703 +608 57 model.embedding_dim 0.0 +608 57 optimizer.lr 0.0497380422443044 +608 57 training.batch_size 1.0 +608 57 training.label_smoothing 0.010636280650212012 +608 58 model.embedding_dim 0.0 +608 58 optimizer.lr 0.0011605573952139848 +608 58 training.batch_size 1.0 +608 58 training.label_smoothing 0.02407303451786308 +608 59 model.embedding_dim 2.0 +608 59 optimizer.lr 0.0019006431770610067 +608 59 training.batch_size 1.0 +608 59 training.label_smoothing 0.07931601579556109 +608 60 model.embedding_dim 1.0 +608 60 optimizer.lr 0.09724060481076058 +608 60 training.batch_size 0.0 +608 60 training.label_smoothing 0.024538422438592666 +608 61 model.embedding_dim 2.0 +608 61 optimizer.lr 0.002528495294441025 +608 61 training.batch_size 0.0 +608 61 training.label_smoothing 0.1256957123432925 +608 62 model.embedding_dim 0.0 +608 62 optimizer.lr 0.007734517006875937 +608 62 training.batch_size 2.0 +608 62 training.label_smoothing 0.05217752617342084 +608 63 model.embedding_dim 2.0 +608 63 optimizer.lr 0.04088766088347186 +608 63 training.batch_size 0.0 +608 63 training.label_smoothing 0.34541890127653563 +608 64 model.embedding_dim 0.0 +608 64 optimizer.lr 0.03486632203572596 +608 64 training.batch_size 1.0 +608 64 training.label_smoothing 0.0020247752454296357 +608 65 model.embedding_dim 0.0 +608 65 optimizer.lr 0.00728398276443473 +608 65 training.batch_size 2.0 +608 65 training.label_smoothing 0.20633968118167814 +608 66 model.embedding_dim 2.0 +608 66 optimizer.lr 0.0037038985200373823 +608 66 training.batch_size 1.0 +608 66 training.label_smoothing 0.04271464371183395 +608 67 model.embedding_dim 2.0 +608 67 optimizer.lr 0.0017082314939805604 +608 67 training.batch_size 2.0 +608 67 training.label_smoothing 0.004878616238115609 +608 68 model.embedding_dim 0.0 +608 68 optimizer.lr 0.08795390283360527 +608 68 training.batch_size 2.0 +608 68 training.label_smoothing 0.1504755767533338 +608 69 model.embedding_dim 2.0 +608 69 optimizer.lr 0.0011240216040468072 +608 69 training.batch_size 0.0 +608 69 training.label_smoothing 0.0069037670404095775 +608 70 model.embedding_dim 1.0 +608 70 optimizer.lr 0.004222401893808561 +608 70 training.batch_size 0.0 +608 70 training.label_smoothing 0.01572295418014057 +608 71 model.embedding_dim 2.0 +608 71 optimizer.lr 0.01353743863903075 +608 71 training.batch_size 0.0 +608 71 training.label_smoothing 0.00162440169652445 +608 72 model.embedding_dim 0.0 +608 72 optimizer.lr 0.002344483222163055 +608 72 training.batch_size 2.0 +608 72 training.label_smoothing 0.06306418162655766 +608 73 model.embedding_dim 2.0 +608 73 optimizer.lr 0.01833122141833792 +608 73 training.batch_size 0.0 +608 73 training.label_smoothing 0.0030696005622666616 +608 74 model.embedding_dim 0.0 +608 74 optimizer.lr 0.002821977583719135 +608 74 training.batch_size 2.0 +608 74 training.label_smoothing 0.09498348545993615 +608 75 model.embedding_dim 0.0 +608 75 optimizer.lr 0.03031543852093211 +608 75 training.batch_size 2.0 +608 75 training.label_smoothing 0.006344714582537513 +608 76 model.embedding_dim 1.0 +608 76 optimizer.lr 0.03193552619835352 +608 76 training.batch_size 2.0 +608 76 training.label_smoothing 0.04551049670713996 +608 77 model.embedding_dim 1.0 +608 77 optimizer.lr 0.01683457743337418 +608 77 training.batch_size 1.0 +608 77 training.label_smoothing 0.003360766700201688 +608 78 model.embedding_dim 1.0 +608 78 optimizer.lr 0.0037561719504964155 +608 78 training.batch_size 1.0 +608 78 training.label_smoothing 0.6978172334046445 +608 79 model.embedding_dim 0.0 +608 79 optimizer.lr 0.0010922856376068511 +608 79 training.batch_size 0.0 +608 79 training.label_smoothing 0.07998085821069963 +608 80 model.embedding_dim 0.0 +608 80 optimizer.lr 0.001368889099552808 +608 80 training.batch_size 1.0 +608 80 training.label_smoothing 0.08779063960017358 +608 81 model.embedding_dim 1.0 +608 81 optimizer.lr 0.01279108775399668 +608 81 training.batch_size 1.0 +608 81 training.label_smoothing 0.6832644737595662 +608 82 model.embedding_dim 0.0 +608 82 optimizer.lr 0.004021307235308578 +608 82 training.batch_size 2.0 +608 82 training.label_smoothing 0.3823378933008903 +608 83 model.embedding_dim 0.0 +608 83 optimizer.lr 0.0010236681266330972 +608 83 training.batch_size 1.0 +608 83 training.label_smoothing 0.0023337752748038964 +608 84 model.embedding_dim 0.0 +608 84 optimizer.lr 0.0016665825985268691 +608 84 training.batch_size 1.0 +608 84 training.label_smoothing 0.11658561126578043 +608 85 model.embedding_dim 2.0 +608 85 optimizer.lr 0.0016413987105834114 +608 85 training.batch_size 0.0 +608 85 training.label_smoothing 0.5564620059013856 +608 86 model.embedding_dim 0.0 +608 86 optimizer.lr 0.011413774476276449 +608 86 training.batch_size 1.0 +608 86 training.label_smoothing 0.00394722675476741 +608 87 model.embedding_dim 0.0 +608 87 optimizer.lr 0.015568002704355871 +608 87 training.batch_size 0.0 +608 87 training.label_smoothing 0.17991933984987243 +608 88 model.embedding_dim 0.0 +608 88 optimizer.lr 0.01076254683234203 +608 88 training.batch_size 0.0 +608 88 training.label_smoothing 0.08581013322853907 +608 89 model.embedding_dim 2.0 +608 89 optimizer.lr 0.07564353981965331 +608 89 training.batch_size 1.0 +608 89 training.label_smoothing 0.5668953388525695 +608 90 model.embedding_dim 0.0 +608 90 optimizer.lr 0.001956744810918554 +608 90 training.batch_size 2.0 +608 90 training.label_smoothing 0.011878323976910833 +608 91 model.embedding_dim 2.0 +608 91 optimizer.lr 0.003761624611025635 +608 91 training.batch_size 2.0 +608 91 training.label_smoothing 0.018085779540125902 +608 92 model.embedding_dim 0.0 +608 92 optimizer.lr 0.003909097974294002 +608 92 training.batch_size 1.0 +608 92 training.label_smoothing 0.0010685965289287558 +608 93 model.embedding_dim 1.0 +608 93 optimizer.lr 0.028293285826385312 +608 93 training.batch_size 2.0 +608 93 training.label_smoothing 0.09814397454327271 +608 94 model.embedding_dim 0.0 +608 94 optimizer.lr 0.03929186148930224 +608 94 training.batch_size 1.0 +608 94 training.label_smoothing 0.750849649514305 +608 95 model.embedding_dim 2.0 +608 95 optimizer.lr 0.04223869017838003 +608 95 training.batch_size 1.0 +608 95 training.label_smoothing 0.058266416526167646 +608 96 model.embedding_dim 2.0 +608 96 optimizer.lr 0.006735659295843708 +608 96 training.batch_size 2.0 +608 96 training.label_smoothing 0.8460227557683836 +608 97 model.embedding_dim 2.0 +608 97 optimizer.lr 0.007669299603396664 +608 97 training.batch_size 0.0 +608 97 training.label_smoothing 0.001690887995033212 +608 98 model.embedding_dim 2.0 +608 98 optimizer.lr 0.026066695346689237 +608 98 training.batch_size 0.0 +608 98 training.label_smoothing 0.11902895040562644 +608 99 model.embedding_dim 1.0 +608 99 optimizer.lr 0.06424382254936066 +608 99 training.batch_size 1.0 +608 99 training.label_smoothing 0.13507520958832 +608 100 model.embedding_dim 1.0 +608 100 optimizer.lr 0.022847220538582354 +608 100 training.batch_size 2.0 +608 100 training.label_smoothing 0.1549870292087787 +608 1 dataset """kinships""" +608 1 model """rotate""" +608 1 loss """softplus""" +608 1 regularizer """no""" +608 1 optimizer """adam""" +608 1 training_loop """lcwa""" +608 1 evaluator """rankbased""" +608 2 dataset """kinships""" +608 2 model """rotate""" +608 2 loss """softplus""" +608 2 regularizer """no""" +608 2 optimizer """adam""" +608 2 training_loop """lcwa""" +608 2 evaluator """rankbased""" +608 3 dataset """kinships""" +608 3 model """rotate""" +608 3 loss """softplus""" +608 3 regularizer """no""" +608 3 optimizer """adam""" +608 3 training_loop """lcwa""" +608 3 evaluator """rankbased""" +608 4 dataset """kinships""" +608 4 model """rotate""" +608 4 loss """softplus""" +608 4 regularizer """no""" +608 4 optimizer """adam""" +608 4 training_loop """lcwa""" +608 4 evaluator """rankbased""" +608 5 dataset """kinships""" +608 5 model """rotate""" +608 5 loss """softplus""" +608 5 regularizer """no""" +608 5 optimizer """adam""" +608 5 training_loop """lcwa""" +608 5 evaluator """rankbased""" +608 6 dataset """kinships""" +608 6 model """rotate""" +608 6 loss """softplus""" +608 6 regularizer """no""" +608 6 optimizer """adam""" +608 6 training_loop """lcwa""" +608 6 evaluator """rankbased""" +608 7 dataset """kinships""" +608 7 model """rotate""" +608 7 loss """softplus""" +608 7 regularizer """no""" +608 7 optimizer """adam""" +608 7 training_loop """lcwa""" +608 7 evaluator """rankbased""" +608 8 dataset """kinships""" +608 8 model """rotate""" +608 8 loss """softplus""" +608 8 regularizer """no""" +608 8 optimizer """adam""" +608 8 training_loop """lcwa""" +608 8 evaluator """rankbased""" +608 9 dataset """kinships""" +608 9 model """rotate""" +608 9 loss """softplus""" +608 9 regularizer """no""" +608 9 optimizer """adam""" +608 9 training_loop """lcwa""" +608 9 evaluator """rankbased""" +608 10 dataset """kinships""" +608 10 model """rotate""" +608 10 loss """softplus""" +608 10 regularizer """no""" +608 10 optimizer """adam""" +608 10 training_loop """lcwa""" +608 10 evaluator """rankbased""" +608 11 dataset """kinships""" +608 11 model """rotate""" +608 11 loss """softplus""" +608 11 regularizer """no""" +608 11 optimizer """adam""" +608 11 training_loop """lcwa""" +608 11 evaluator """rankbased""" +608 12 dataset """kinships""" +608 12 model """rotate""" +608 12 loss """softplus""" +608 12 regularizer """no""" +608 12 optimizer """adam""" +608 12 training_loop """lcwa""" +608 12 evaluator """rankbased""" +608 13 dataset """kinships""" +608 13 model """rotate""" +608 13 loss """softplus""" +608 13 regularizer """no""" +608 13 optimizer """adam""" +608 13 training_loop """lcwa""" +608 13 evaluator """rankbased""" +608 14 dataset """kinships""" +608 14 model """rotate""" +608 14 loss """softplus""" +608 14 regularizer """no""" +608 14 optimizer """adam""" +608 14 training_loop """lcwa""" +608 14 evaluator """rankbased""" +608 15 dataset """kinships""" +608 15 model """rotate""" +608 15 loss """softplus""" +608 15 regularizer """no""" +608 15 optimizer """adam""" +608 15 training_loop """lcwa""" +608 15 evaluator """rankbased""" +608 16 dataset """kinships""" +608 16 model """rotate""" +608 16 loss """softplus""" +608 16 regularizer """no""" +608 16 optimizer """adam""" +608 16 training_loop """lcwa""" +608 16 evaluator """rankbased""" +608 17 dataset """kinships""" +608 17 model """rotate""" +608 17 loss """softplus""" +608 17 regularizer """no""" +608 17 optimizer """adam""" +608 17 training_loop """lcwa""" +608 17 evaluator """rankbased""" +608 18 dataset """kinships""" +608 18 model """rotate""" +608 18 loss """softplus""" +608 18 regularizer """no""" +608 18 optimizer """adam""" +608 18 training_loop """lcwa""" +608 18 evaluator """rankbased""" +608 19 dataset """kinships""" +608 19 model """rotate""" +608 19 loss """softplus""" +608 19 regularizer """no""" +608 19 optimizer """adam""" +608 19 training_loop """lcwa""" +608 19 evaluator """rankbased""" +608 20 dataset """kinships""" +608 20 model """rotate""" +608 20 loss """softplus""" +608 20 regularizer """no""" +608 20 optimizer """adam""" +608 20 training_loop """lcwa""" +608 20 evaluator """rankbased""" +608 21 dataset """kinships""" +608 21 model """rotate""" +608 21 loss """softplus""" +608 21 regularizer """no""" +608 21 optimizer """adam""" +608 21 training_loop """lcwa""" +608 21 evaluator """rankbased""" +608 22 dataset """kinships""" +608 22 model """rotate""" +608 22 loss """softplus""" +608 22 regularizer """no""" +608 22 optimizer """adam""" +608 22 training_loop """lcwa""" +608 22 evaluator """rankbased""" +608 23 dataset """kinships""" +608 23 model """rotate""" +608 23 loss """softplus""" +608 23 regularizer """no""" +608 23 optimizer """adam""" +608 23 training_loop """lcwa""" +608 23 evaluator """rankbased""" +608 24 dataset """kinships""" +608 24 model """rotate""" +608 24 loss """softplus""" +608 24 regularizer """no""" +608 24 optimizer """adam""" +608 24 training_loop """lcwa""" +608 24 evaluator """rankbased""" +608 25 dataset """kinships""" +608 25 model """rotate""" +608 25 loss """softplus""" +608 25 regularizer """no""" +608 25 optimizer """adam""" +608 25 training_loop """lcwa""" +608 25 evaluator """rankbased""" +608 26 dataset """kinships""" +608 26 model """rotate""" +608 26 loss """softplus""" +608 26 regularizer """no""" +608 26 optimizer """adam""" +608 26 training_loop """lcwa""" +608 26 evaluator """rankbased""" +608 27 dataset """kinships""" +608 27 model """rotate""" +608 27 loss """softplus""" +608 27 regularizer """no""" +608 27 optimizer """adam""" +608 27 training_loop """lcwa""" +608 27 evaluator """rankbased""" +608 28 dataset """kinships""" +608 28 model """rotate""" +608 28 loss """softplus""" +608 28 regularizer """no""" +608 28 optimizer """adam""" +608 28 training_loop """lcwa""" +608 28 evaluator """rankbased""" +608 29 dataset """kinships""" +608 29 model """rotate""" +608 29 loss """softplus""" +608 29 regularizer """no""" +608 29 optimizer """adam""" +608 29 training_loop """lcwa""" +608 29 evaluator """rankbased""" +608 30 dataset """kinships""" +608 30 model """rotate""" +608 30 loss """softplus""" +608 30 regularizer """no""" +608 30 optimizer """adam""" +608 30 training_loop """lcwa""" +608 30 evaluator """rankbased""" +608 31 dataset """kinships""" +608 31 model """rotate""" +608 31 loss """softplus""" +608 31 regularizer """no""" +608 31 optimizer """adam""" +608 31 training_loop """lcwa""" +608 31 evaluator """rankbased""" +608 32 dataset """kinships""" +608 32 model """rotate""" +608 32 loss """softplus""" +608 32 regularizer """no""" +608 32 optimizer """adam""" +608 32 training_loop """lcwa""" +608 32 evaluator """rankbased""" +608 33 dataset """kinships""" +608 33 model """rotate""" +608 33 loss """softplus""" +608 33 regularizer """no""" +608 33 optimizer """adam""" +608 33 training_loop """lcwa""" +608 33 evaluator """rankbased""" +608 34 dataset """kinships""" +608 34 model """rotate""" +608 34 loss """softplus""" +608 34 regularizer """no""" +608 34 optimizer """adam""" +608 34 training_loop """lcwa""" +608 34 evaluator """rankbased""" +608 35 dataset """kinships""" +608 35 model """rotate""" +608 35 loss """softplus""" +608 35 regularizer """no""" +608 35 optimizer """adam""" +608 35 training_loop """lcwa""" +608 35 evaluator """rankbased""" +608 36 dataset """kinships""" +608 36 model """rotate""" +608 36 loss """softplus""" +608 36 regularizer """no""" +608 36 optimizer """adam""" +608 36 training_loop """lcwa""" +608 36 evaluator """rankbased""" +608 37 dataset """kinships""" +608 37 model """rotate""" +608 37 loss """softplus""" +608 37 regularizer """no""" +608 37 optimizer """adam""" +608 37 training_loop """lcwa""" +608 37 evaluator """rankbased""" +608 38 dataset """kinships""" +608 38 model """rotate""" +608 38 loss """softplus""" +608 38 regularizer """no""" +608 38 optimizer """adam""" +608 38 training_loop """lcwa""" +608 38 evaluator """rankbased""" +608 39 dataset """kinships""" +608 39 model """rotate""" +608 39 loss """softplus""" +608 39 regularizer """no""" +608 39 optimizer """adam""" +608 39 training_loop """lcwa""" +608 39 evaluator """rankbased""" +608 40 dataset """kinships""" +608 40 model """rotate""" +608 40 loss """softplus""" +608 40 regularizer """no""" +608 40 optimizer """adam""" +608 40 training_loop """lcwa""" +608 40 evaluator """rankbased""" +608 41 dataset """kinships""" +608 41 model """rotate""" +608 41 loss """softplus""" +608 41 regularizer """no""" +608 41 optimizer """adam""" +608 41 training_loop """lcwa""" +608 41 evaluator """rankbased""" +608 42 dataset """kinships""" +608 42 model """rotate""" +608 42 loss """softplus""" +608 42 regularizer """no""" +608 42 optimizer """adam""" +608 42 training_loop """lcwa""" +608 42 evaluator """rankbased""" +608 43 dataset """kinships""" +608 43 model """rotate""" +608 43 loss """softplus""" +608 43 regularizer """no""" +608 43 optimizer """adam""" +608 43 training_loop """lcwa""" +608 43 evaluator """rankbased""" +608 44 dataset """kinships""" +608 44 model """rotate""" +608 44 loss """softplus""" +608 44 regularizer """no""" +608 44 optimizer """adam""" +608 44 training_loop """lcwa""" +608 44 evaluator """rankbased""" +608 45 dataset """kinships""" +608 45 model """rotate""" +608 45 loss """softplus""" +608 45 regularizer """no""" +608 45 optimizer """adam""" +608 45 training_loop """lcwa""" +608 45 evaluator """rankbased""" +608 46 dataset """kinships""" +608 46 model """rotate""" +608 46 loss """softplus""" +608 46 regularizer """no""" +608 46 optimizer """adam""" +608 46 training_loop """lcwa""" +608 46 evaluator """rankbased""" +608 47 dataset """kinships""" +608 47 model """rotate""" +608 47 loss """softplus""" +608 47 regularizer """no""" +608 47 optimizer """adam""" +608 47 training_loop """lcwa""" +608 47 evaluator """rankbased""" +608 48 dataset """kinships""" +608 48 model """rotate""" +608 48 loss """softplus""" +608 48 regularizer """no""" +608 48 optimizer """adam""" +608 48 training_loop """lcwa""" +608 48 evaluator """rankbased""" +608 49 dataset """kinships""" +608 49 model """rotate""" +608 49 loss """softplus""" +608 49 regularizer """no""" +608 49 optimizer """adam""" +608 49 training_loop """lcwa""" +608 49 evaluator """rankbased""" +608 50 dataset """kinships""" +608 50 model """rotate""" +608 50 loss """softplus""" +608 50 regularizer """no""" +608 50 optimizer """adam""" +608 50 training_loop """lcwa""" +608 50 evaluator """rankbased""" +608 51 dataset """kinships""" +608 51 model """rotate""" +608 51 loss """softplus""" +608 51 regularizer """no""" +608 51 optimizer """adam""" +608 51 training_loop """lcwa""" +608 51 evaluator """rankbased""" +608 52 dataset """kinships""" +608 52 model """rotate""" +608 52 loss """softplus""" +608 52 regularizer """no""" +608 52 optimizer """adam""" +608 52 training_loop """lcwa""" +608 52 evaluator """rankbased""" +608 53 dataset """kinships""" +608 53 model """rotate""" +608 53 loss """softplus""" +608 53 regularizer """no""" +608 53 optimizer """adam""" +608 53 training_loop """lcwa""" +608 53 evaluator """rankbased""" +608 54 dataset """kinships""" +608 54 model """rotate""" +608 54 loss """softplus""" +608 54 regularizer """no""" +608 54 optimizer """adam""" +608 54 training_loop """lcwa""" +608 54 evaluator """rankbased""" +608 55 dataset """kinships""" +608 55 model """rotate""" +608 55 loss """softplus""" +608 55 regularizer """no""" +608 55 optimizer """adam""" +608 55 training_loop """lcwa""" +608 55 evaluator """rankbased""" +608 56 dataset """kinships""" +608 56 model """rotate""" +608 56 loss """softplus""" +608 56 regularizer """no""" +608 56 optimizer """adam""" +608 56 training_loop """lcwa""" +608 56 evaluator """rankbased""" +608 57 dataset """kinships""" +608 57 model """rotate""" +608 57 loss """softplus""" +608 57 regularizer """no""" +608 57 optimizer """adam""" +608 57 training_loop """lcwa""" +608 57 evaluator """rankbased""" +608 58 dataset """kinships""" +608 58 model """rotate""" +608 58 loss """softplus""" +608 58 regularizer """no""" +608 58 optimizer """adam""" +608 58 training_loop """lcwa""" +608 58 evaluator """rankbased""" +608 59 dataset """kinships""" +608 59 model """rotate""" +608 59 loss """softplus""" +608 59 regularizer """no""" +608 59 optimizer """adam""" +608 59 training_loop """lcwa""" +608 59 evaluator """rankbased""" +608 60 dataset """kinships""" +608 60 model """rotate""" +608 60 loss """softplus""" +608 60 regularizer """no""" +608 60 optimizer """adam""" +608 60 training_loop """lcwa""" +608 60 evaluator """rankbased""" +608 61 dataset """kinships""" +608 61 model """rotate""" +608 61 loss """softplus""" +608 61 regularizer """no""" +608 61 optimizer """adam""" +608 61 training_loop """lcwa""" +608 61 evaluator """rankbased""" +608 62 dataset """kinships""" +608 62 model """rotate""" +608 62 loss """softplus""" +608 62 regularizer """no""" +608 62 optimizer """adam""" +608 62 training_loop """lcwa""" +608 62 evaluator """rankbased""" +608 63 dataset """kinships""" +608 63 model """rotate""" +608 63 loss """softplus""" +608 63 regularizer """no""" +608 63 optimizer """adam""" +608 63 training_loop """lcwa""" +608 63 evaluator """rankbased""" +608 64 dataset """kinships""" +608 64 model """rotate""" +608 64 loss """softplus""" +608 64 regularizer """no""" +608 64 optimizer """adam""" +608 64 training_loop """lcwa""" +608 64 evaluator """rankbased""" +608 65 dataset """kinships""" +608 65 model """rotate""" +608 65 loss """softplus""" +608 65 regularizer """no""" +608 65 optimizer """adam""" +608 65 training_loop """lcwa""" +608 65 evaluator """rankbased""" +608 66 dataset """kinships""" +608 66 model """rotate""" +608 66 loss """softplus""" +608 66 regularizer """no""" +608 66 optimizer """adam""" +608 66 training_loop """lcwa""" +608 66 evaluator """rankbased""" +608 67 dataset """kinships""" +608 67 model """rotate""" +608 67 loss """softplus""" +608 67 regularizer """no""" +608 67 optimizer """adam""" +608 67 training_loop """lcwa""" +608 67 evaluator """rankbased""" +608 68 dataset """kinships""" +608 68 model """rotate""" +608 68 loss """softplus""" +608 68 regularizer """no""" +608 68 optimizer """adam""" +608 68 training_loop """lcwa""" +608 68 evaluator """rankbased""" +608 69 dataset """kinships""" +608 69 model """rotate""" +608 69 loss """softplus""" +608 69 regularizer """no""" +608 69 optimizer """adam""" +608 69 training_loop """lcwa""" +608 69 evaluator """rankbased""" +608 70 dataset """kinships""" +608 70 model """rotate""" +608 70 loss """softplus""" +608 70 regularizer """no""" +608 70 optimizer """adam""" +608 70 training_loop """lcwa""" +608 70 evaluator """rankbased""" +608 71 dataset """kinships""" +608 71 model """rotate""" +608 71 loss """softplus""" +608 71 regularizer """no""" +608 71 optimizer """adam""" +608 71 training_loop """lcwa""" +608 71 evaluator """rankbased""" +608 72 dataset """kinships""" +608 72 model """rotate""" +608 72 loss """softplus""" +608 72 regularizer """no""" +608 72 optimizer """adam""" +608 72 training_loop """lcwa""" +608 72 evaluator """rankbased""" +608 73 dataset """kinships""" +608 73 model """rotate""" +608 73 loss """softplus""" +608 73 regularizer """no""" +608 73 optimizer """adam""" +608 73 training_loop """lcwa""" +608 73 evaluator """rankbased""" +608 74 dataset """kinships""" +608 74 model """rotate""" +608 74 loss """softplus""" +608 74 regularizer """no""" +608 74 optimizer """adam""" +608 74 training_loop """lcwa""" +608 74 evaluator """rankbased""" +608 75 dataset """kinships""" +608 75 model """rotate""" +608 75 loss """softplus""" +608 75 regularizer """no""" +608 75 optimizer """adam""" +608 75 training_loop """lcwa""" +608 75 evaluator """rankbased""" +608 76 dataset """kinships""" +608 76 model """rotate""" +608 76 loss """softplus""" +608 76 regularizer """no""" +608 76 optimizer """adam""" +608 76 training_loop """lcwa""" +608 76 evaluator """rankbased""" +608 77 dataset """kinships""" +608 77 model """rotate""" +608 77 loss """softplus""" +608 77 regularizer """no""" +608 77 optimizer """adam""" +608 77 training_loop """lcwa""" +608 77 evaluator """rankbased""" +608 78 dataset """kinships""" +608 78 model """rotate""" +608 78 loss """softplus""" +608 78 regularizer """no""" +608 78 optimizer """adam""" +608 78 training_loop """lcwa""" +608 78 evaluator """rankbased""" +608 79 dataset """kinships""" +608 79 model """rotate""" +608 79 loss """softplus""" +608 79 regularizer """no""" +608 79 optimizer """adam""" +608 79 training_loop """lcwa""" +608 79 evaluator """rankbased""" +608 80 dataset """kinships""" +608 80 model """rotate""" +608 80 loss """softplus""" +608 80 regularizer """no""" +608 80 optimizer """adam""" +608 80 training_loop """lcwa""" +608 80 evaluator """rankbased""" +608 81 dataset """kinships""" +608 81 model """rotate""" +608 81 loss """softplus""" +608 81 regularizer """no""" +608 81 optimizer """adam""" +608 81 training_loop """lcwa""" +608 81 evaluator """rankbased""" +608 82 dataset """kinships""" +608 82 model """rotate""" +608 82 loss """softplus""" +608 82 regularizer """no""" +608 82 optimizer """adam""" +608 82 training_loop """lcwa""" +608 82 evaluator """rankbased""" +608 83 dataset """kinships""" +608 83 model """rotate""" +608 83 loss """softplus""" +608 83 regularizer """no""" +608 83 optimizer """adam""" +608 83 training_loop """lcwa""" +608 83 evaluator """rankbased""" +608 84 dataset """kinships""" +608 84 model """rotate""" +608 84 loss """softplus""" +608 84 regularizer """no""" +608 84 optimizer """adam""" +608 84 training_loop """lcwa""" +608 84 evaluator """rankbased""" +608 85 dataset """kinships""" +608 85 model """rotate""" +608 85 loss """softplus""" +608 85 regularizer """no""" +608 85 optimizer """adam""" +608 85 training_loop """lcwa""" +608 85 evaluator """rankbased""" +608 86 dataset """kinships""" +608 86 model """rotate""" +608 86 loss """softplus""" +608 86 regularizer """no""" +608 86 optimizer """adam""" +608 86 training_loop """lcwa""" +608 86 evaluator """rankbased""" +608 87 dataset """kinships""" +608 87 model """rotate""" +608 87 loss """softplus""" +608 87 regularizer """no""" +608 87 optimizer """adam""" +608 87 training_loop """lcwa""" +608 87 evaluator """rankbased""" +608 88 dataset """kinships""" +608 88 model """rotate""" +608 88 loss """softplus""" +608 88 regularizer """no""" +608 88 optimizer """adam""" +608 88 training_loop """lcwa""" +608 88 evaluator """rankbased""" +608 89 dataset """kinships""" +608 89 model """rotate""" +608 89 loss """softplus""" +608 89 regularizer """no""" +608 89 optimizer """adam""" +608 89 training_loop """lcwa""" +608 89 evaluator """rankbased""" +608 90 dataset """kinships""" +608 90 model """rotate""" +608 90 loss """softplus""" +608 90 regularizer """no""" +608 90 optimizer """adam""" +608 90 training_loop """lcwa""" +608 90 evaluator """rankbased""" +608 91 dataset """kinships""" +608 91 model """rotate""" +608 91 loss """softplus""" +608 91 regularizer """no""" +608 91 optimizer """adam""" +608 91 training_loop """lcwa""" +608 91 evaluator """rankbased""" +608 92 dataset """kinships""" +608 92 model """rotate""" +608 92 loss """softplus""" +608 92 regularizer """no""" +608 92 optimizer """adam""" +608 92 training_loop """lcwa""" +608 92 evaluator """rankbased""" +608 93 dataset """kinships""" +608 93 model """rotate""" +608 93 loss """softplus""" +608 93 regularizer """no""" +608 93 optimizer """adam""" +608 93 training_loop """lcwa""" +608 93 evaluator """rankbased""" +608 94 dataset """kinships""" +608 94 model """rotate""" +608 94 loss """softplus""" +608 94 regularizer """no""" +608 94 optimizer """adam""" +608 94 training_loop """lcwa""" +608 94 evaluator """rankbased""" +608 95 dataset """kinships""" +608 95 model """rotate""" +608 95 loss """softplus""" +608 95 regularizer """no""" +608 95 optimizer """adam""" +608 95 training_loop """lcwa""" +608 95 evaluator """rankbased""" +608 96 dataset """kinships""" +608 96 model """rotate""" +608 96 loss """softplus""" +608 96 regularizer """no""" +608 96 optimizer """adam""" +608 96 training_loop """lcwa""" +608 96 evaluator """rankbased""" +608 97 dataset """kinships""" +608 97 model """rotate""" +608 97 loss """softplus""" +608 97 regularizer """no""" +608 97 optimizer """adam""" +608 97 training_loop """lcwa""" +608 97 evaluator """rankbased""" +608 98 dataset """kinships""" +608 98 model """rotate""" +608 98 loss """softplus""" +608 98 regularizer """no""" +608 98 optimizer """adam""" +608 98 training_loop """lcwa""" +608 98 evaluator """rankbased""" +608 99 dataset """kinships""" +608 99 model """rotate""" +608 99 loss """softplus""" +608 99 regularizer """no""" +608 99 optimizer """adam""" +608 99 training_loop """lcwa""" +608 99 evaluator """rankbased""" +608 100 dataset """kinships""" +608 100 model """rotate""" +608 100 loss """softplus""" +608 100 regularizer """no""" +608 100 optimizer """adam""" +608 100 training_loop """lcwa""" +608 100 evaluator """rankbased""" +609 1 model.embedding_dim 1.0 +609 1 optimizer.lr 0.006370292811514681 +609 1 training.batch_size 1.0 +609 1 training.label_smoothing 0.14587785086782648 +609 2 model.embedding_dim 0.0 +609 2 optimizer.lr 0.004677603741815524 +609 2 training.batch_size 0.0 +609 2 training.label_smoothing 0.22792878698023505 +609 3 model.embedding_dim 0.0 +609 3 optimizer.lr 0.008072552153505008 +609 3 training.batch_size 2.0 +609 3 training.label_smoothing 0.0020370009520940327 +609 4 model.embedding_dim 0.0 +609 4 optimizer.lr 0.0013923059873640448 +609 4 training.batch_size 1.0 +609 4 training.label_smoothing 0.001382371080171314 +609 5 model.embedding_dim 2.0 +609 5 optimizer.lr 0.0041691167159921185 +609 5 training.batch_size 0.0 +609 5 training.label_smoothing 0.02616523227314149 +609 6 model.embedding_dim 2.0 +609 6 optimizer.lr 0.009837106350418822 +609 6 training.batch_size 2.0 +609 6 training.label_smoothing 0.028677793816284738 +609 7 model.embedding_dim 0.0 +609 7 optimizer.lr 0.0012568999315499504 +609 7 training.batch_size 0.0 +609 7 training.label_smoothing 0.11108514568312394 +609 8 model.embedding_dim 2.0 +609 8 optimizer.lr 0.02887586009808234 +609 8 training.batch_size 1.0 +609 8 training.label_smoothing 0.10299054096837101 +609 9 model.embedding_dim 1.0 +609 9 optimizer.lr 0.0030606086736972944 +609 9 training.batch_size 1.0 +609 9 training.label_smoothing 0.5391167522395383 +609 10 model.embedding_dim 0.0 +609 10 optimizer.lr 0.00204098696377491 +609 10 training.batch_size 1.0 +609 10 training.label_smoothing 0.1635781732807874 +609 11 model.embedding_dim 0.0 +609 11 optimizer.lr 0.003236499685213346 +609 11 training.batch_size 2.0 +609 11 training.label_smoothing 0.05873506663207119 +609 12 model.embedding_dim 1.0 +609 12 optimizer.lr 0.0026348748024714506 +609 12 training.batch_size 1.0 +609 12 training.label_smoothing 0.4020539028893049 +609 13 model.embedding_dim 1.0 +609 13 optimizer.lr 0.08266425039634716 +609 13 training.batch_size 2.0 +609 13 training.label_smoothing 0.05840172833446098 +609 14 model.embedding_dim 1.0 +609 14 optimizer.lr 0.005008623765123538 +609 14 training.batch_size 0.0 +609 14 training.label_smoothing 0.04942510358788481 +609 15 model.embedding_dim 2.0 +609 15 optimizer.lr 0.0056639808236969135 +609 15 training.batch_size 2.0 +609 15 training.label_smoothing 0.033564434585456646 +609 16 model.embedding_dim 2.0 +609 16 optimizer.lr 0.020927751926110107 +609 16 training.batch_size 1.0 +609 16 training.label_smoothing 0.6278054177960957 +609 17 model.embedding_dim 1.0 +609 17 optimizer.lr 0.0016077632487480407 +609 17 training.batch_size 2.0 +609 17 training.label_smoothing 0.5251747853139355 +609 18 model.embedding_dim 2.0 +609 18 optimizer.lr 0.00860083371778079 +609 18 training.batch_size 2.0 +609 18 training.label_smoothing 0.0010498113509586563 +609 19 model.embedding_dim 2.0 +609 19 optimizer.lr 0.0028958107644309434 +609 19 training.batch_size 2.0 +609 19 training.label_smoothing 0.22404506738451874 +609 20 model.embedding_dim 0.0 +609 20 optimizer.lr 0.05181721040882698 +609 20 training.batch_size 2.0 +609 20 training.label_smoothing 0.00283581390844083 +609 21 model.embedding_dim 0.0 +609 21 optimizer.lr 0.03565726458494212 +609 21 training.batch_size 0.0 +609 21 training.label_smoothing 0.08120762488164288 +609 22 model.embedding_dim 1.0 +609 22 optimizer.lr 0.0027527304657210534 +609 22 training.batch_size 1.0 +609 22 training.label_smoothing 0.0011225417648664726 +609 23 model.embedding_dim 0.0 +609 23 optimizer.lr 0.049173749753419996 +609 23 training.batch_size 2.0 +609 23 training.label_smoothing 0.00957530347237008 +609 24 model.embedding_dim 1.0 +609 24 optimizer.lr 0.0010976875361164034 +609 24 training.batch_size 1.0 +609 24 training.label_smoothing 0.6815343365118491 +609 25 model.embedding_dim 1.0 +609 25 optimizer.lr 0.04383475414593938 +609 25 training.batch_size 0.0 +609 25 training.label_smoothing 0.0010030473111269011 +609 26 model.embedding_dim 1.0 +609 26 optimizer.lr 0.008105282090212608 +609 26 training.batch_size 0.0 +609 26 training.label_smoothing 0.012456417166068354 +609 27 model.embedding_dim 0.0 +609 27 optimizer.lr 0.0026590231005349605 +609 27 training.batch_size 2.0 +609 27 training.label_smoothing 0.9759956504365067 +609 28 model.embedding_dim 2.0 +609 28 optimizer.lr 0.0014100254748202646 +609 28 training.batch_size 2.0 +609 28 training.label_smoothing 0.005540247816119467 +609 29 model.embedding_dim 1.0 +609 29 optimizer.lr 0.01635487467136458 +609 29 training.batch_size 1.0 +609 29 training.label_smoothing 0.04913426650837165 +609 30 model.embedding_dim 1.0 +609 30 optimizer.lr 0.0035550891032023373 +609 30 training.batch_size 2.0 +609 30 training.label_smoothing 0.014891856901270565 +609 31 model.embedding_dim 2.0 +609 31 optimizer.lr 0.014444871273422066 +609 31 training.batch_size 0.0 +609 31 training.label_smoothing 0.27914255333189797 +609 32 model.embedding_dim 2.0 +609 32 optimizer.lr 0.004077093349041271 +609 32 training.batch_size 1.0 +609 32 training.label_smoothing 0.1507417980932209 +609 33 model.embedding_dim 0.0 +609 33 optimizer.lr 0.010890121371214949 +609 33 training.batch_size 2.0 +609 33 training.label_smoothing 0.12948174658049333 +609 34 model.embedding_dim 0.0 +609 34 optimizer.lr 0.001150867063998373 +609 34 training.batch_size 1.0 +609 34 training.label_smoothing 0.015693605316464472 +609 35 model.embedding_dim 1.0 +609 35 optimizer.lr 0.0018559402286631144 +609 35 training.batch_size 1.0 +609 35 training.label_smoothing 0.2753889879756781 +609 36 model.embedding_dim 1.0 +609 36 optimizer.lr 0.001390520335393853 +609 36 training.batch_size 2.0 +609 36 training.label_smoothing 0.006287817231216091 +609 37 model.embedding_dim 0.0 +609 37 optimizer.lr 0.0021562943881686817 +609 37 training.batch_size 2.0 +609 37 training.label_smoothing 0.04294515983111344 +609 38 model.embedding_dim 1.0 +609 38 optimizer.lr 0.09399033652904651 +609 38 training.batch_size 1.0 +609 38 training.label_smoothing 0.011071102322606993 +609 39 model.embedding_dim 1.0 +609 39 optimizer.lr 0.002773448495483945 +609 39 training.batch_size 1.0 +609 39 training.label_smoothing 0.015590798773170352 +609 40 model.embedding_dim 0.0 +609 40 optimizer.lr 0.07793302041860119 +609 40 training.batch_size 0.0 +609 40 training.label_smoothing 0.0016418971268270812 +609 41 model.embedding_dim 0.0 +609 41 optimizer.lr 0.09132079115934431 +609 41 training.batch_size 0.0 +609 41 training.label_smoothing 0.10929304729185392 +609 42 model.embedding_dim 2.0 +609 42 optimizer.lr 0.040148432752262084 +609 42 training.batch_size 0.0 +609 42 training.label_smoothing 0.19339576983906953 +609 43 model.embedding_dim 0.0 +609 43 optimizer.lr 0.026165771096755077 +609 43 training.batch_size 0.0 +609 43 training.label_smoothing 0.4234315605890696 +609 44 model.embedding_dim 1.0 +609 44 optimizer.lr 0.002009750968335428 +609 44 training.batch_size 2.0 +609 44 training.label_smoothing 0.7311910523275917 +609 45 model.embedding_dim 0.0 +609 45 optimizer.lr 0.008484703844168977 +609 45 training.batch_size 2.0 +609 45 training.label_smoothing 0.020512952356055228 +609 46 model.embedding_dim 2.0 +609 46 optimizer.lr 0.02359820308973967 +609 46 training.batch_size 2.0 +609 46 training.label_smoothing 0.019834149818434586 +609 47 model.embedding_dim 1.0 +609 47 optimizer.lr 0.006189176599307434 +609 47 training.batch_size 0.0 +609 47 training.label_smoothing 0.00314337267482998 +609 48 model.embedding_dim 1.0 +609 48 optimizer.lr 0.0013466068970915514 +609 48 training.batch_size 1.0 +609 48 training.label_smoothing 0.11854287066451813 +609 49 model.embedding_dim 1.0 +609 49 optimizer.lr 0.09734119157672005 +609 49 training.batch_size 2.0 +609 49 training.label_smoothing 0.025818923187962046 +609 50 model.embedding_dim 1.0 +609 50 optimizer.lr 0.0053691305839035945 +609 50 training.batch_size 0.0 +609 50 training.label_smoothing 0.004833846753844252 +609 51 model.embedding_dim 1.0 +609 51 optimizer.lr 0.0075437548960741955 +609 51 training.batch_size 2.0 +609 51 training.label_smoothing 0.4889158422864993 +609 52 model.embedding_dim 0.0 +609 52 optimizer.lr 0.016372881495139904 +609 52 training.batch_size 2.0 +609 52 training.label_smoothing 0.6317022698298873 +609 53 model.embedding_dim 1.0 +609 53 optimizer.lr 0.003205574188997802 +609 53 training.batch_size 1.0 +609 53 training.label_smoothing 0.31811144299421873 +609 54 model.embedding_dim 2.0 +609 54 optimizer.lr 0.007150558488504251 +609 54 training.batch_size 1.0 +609 54 training.label_smoothing 0.0387448902333279 +609 55 model.embedding_dim 2.0 +609 55 optimizer.lr 0.025991207783578288 +609 55 training.batch_size 2.0 +609 55 training.label_smoothing 0.9685756673621156 +609 56 model.embedding_dim 1.0 +609 56 optimizer.lr 0.04573073157294964 +609 56 training.batch_size 2.0 +609 56 training.label_smoothing 0.018422268004808288 +609 57 model.embedding_dim 2.0 +609 57 optimizer.lr 0.04992595932808291 +609 57 training.batch_size 1.0 +609 57 training.label_smoothing 0.011733323997498425 +609 58 model.embedding_dim 0.0 +609 58 optimizer.lr 0.029092097618873897 +609 58 training.batch_size 2.0 +609 58 training.label_smoothing 0.07348126067821144 +609 59 model.embedding_dim 1.0 +609 59 optimizer.lr 0.0025658568188111257 +609 59 training.batch_size 1.0 +609 59 training.label_smoothing 0.0033311093478708793 +609 60 model.embedding_dim 2.0 +609 60 optimizer.lr 0.0037518458930899485 +609 60 training.batch_size 0.0 +609 60 training.label_smoothing 0.0014022298619053737 +609 61 model.embedding_dim 0.0 +609 61 optimizer.lr 0.0012148334990677468 +609 61 training.batch_size 0.0 +609 61 training.label_smoothing 0.03852312750986616 +609 62 model.embedding_dim 1.0 +609 62 optimizer.lr 0.025820601366964853 +609 62 training.batch_size 0.0 +609 62 training.label_smoothing 0.002170391794044884 +609 63 model.embedding_dim 0.0 +609 63 optimizer.lr 0.0017453706880636488 +609 63 training.batch_size 1.0 +609 63 training.label_smoothing 0.005117395605736547 +609 64 model.embedding_dim 2.0 +609 64 optimizer.lr 0.0031944763053335374 +609 64 training.batch_size 0.0 +609 64 training.label_smoothing 0.48893643079868726 +609 65 model.embedding_dim 0.0 +609 65 optimizer.lr 0.003071023469493284 +609 65 training.batch_size 2.0 +609 65 training.label_smoothing 0.002467090161652767 +609 66 model.embedding_dim 2.0 +609 66 optimizer.lr 0.001918606513480031 +609 66 training.batch_size 2.0 +609 66 training.label_smoothing 0.5077787700950629 +609 67 model.embedding_dim 2.0 +609 67 optimizer.lr 0.05693981676542106 +609 67 training.batch_size 1.0 +609 67 training.label_smoothing 0.6019065942349948 +609 68 model.embedding_dim 2.0 +609 68 optimizer.lr 0.0021114491274067913 +609 68 training.batch_size 2.0 +609 68 training.label_smoothing 0.20075253602198012 +609 69 model.embedding_dim 0.0 +609 69 optimizer.lr 0.004689480349449496 +609 69 training.batch_size 0.0 +609 69 training.label_smoothing 0.020210504255160716 +609 70 model.embedding_dim 0.0 +609 70 optimizer.lr 0.08243982185650169 +609 70 training.batch_size 0.0 +609 70 training.label_smoothing 0.16335372729706263 +609 71 model.embedding_dim 0.0 +609 71 optimizer.lr 0.002006251109669536 +609 71 training.batch_size 1.0 +609 71 training.label_smoothing 0.021183960859064297 +609 72 model.embedding_dim 1.0 +609 72 optimizer.lr 0.02220475638450548 +609 72 training.batch_size 0.0 +609 72 training.label_smoothing 0.16785105887399787 +609 73 model.embedding_dim 0.0 +609 73 optimizer.lr 0.01619064790563544 +609 73 training.batch_size 1.0 +609 73 training.label_smoothing 0.0013361139576657831 +609 74 model.embedding_dim 0.0 +609 74 optimizer.lr 0.014591028277180411 +609 74 training.batch_size 0.0 +609 74 training.label_smoothing 0.0073208406740763975 +609 75 model.embedding_dim 0.0 +609 75 optimizer.lr 0.039329787572764445 +609 75 training.batch_size 1.0 +609 75 training.label_smoothing 0.022173432390114788 +609 76 model.embedding_dim 2.0 +609 76 optimizer.lr 0.0031568675912294047 +609 76 training.batch_size 0.0 +609 76 training.label_smoothing 0.0050153869618304835 +609 77 model.embedding_dim 0.0 +609 77 optimizer.lr 0.0012869974741812453 +609 77 training.batch_size 1.0 +609 77 training.label_smoothing 0.0018726089585293735 +609 78 model.embedding_dim 0.0 +609 78 optimizer.lr 0.051140728746119586 +609 78 training.batch_size 0.0 +609 78 training.label_smoothing 0.00943021153003784 +609 79 model.embedding_dim 2.0 +609 79 optimizer.lr 0.0031884477615375067 +609 79 training.batch_size 2.0 +609 79 training.label_smoothing 0.001309076938282732 +609 80 model.embedding_dim 1.0 +609 80 optimizer.lr 0.028962217447618666 +609 80 training.batch_size 2.0 +609 80 training.label_smoothing 0.33790148386154867 +609 81 model.embedding_dim 1.0 +609 81 optimizer.lr 0.0922554163783074 +609 81 training.batch_size 2.0 +609 81 training.label_smoothing 0.08812398351547793 +609 82 model.embedding_dim 0.0 +609 82 optimizer.lr 0.0013512317415784467 +609 82 training.batch_size 0.0 +609 82 training.label_smoothing 0.07730474799239485 +609 83 model.embedding_dim 0.0 +609 83 optimizer.lr 0.001657569662903347 +609 83 training.batch_size 0.0 +609 83 training.label_smoothing 0.008427998369006191 +609 84 model.embedding_dim 0.0 +609 84 optimizer.lr 0.006068638033886829 +609 84 training.batch_size 1.0 +609 84 training.label_smoothing 0.06736566833669043 +609 85 model.embedding_dim 2.0 +609 85 optimizer.lr 0.0033282308655899944 +609 85 training.batch_size 1.0 +609 85 training.label_smoothing 0.3942495014610803 +609 86 model.embedding_dim 0.0 +609 86 optimizer.lr 0.0036380380876654904 +609 86 training.batch_size 0.0 +609 86 training.label_smoothing 0.05943507050736985 +609 87 model.embedding_dim 2.0 +609 87 optimizer.lr 0.009396712742500824 +609 87 training.batch_size 1.0 +609 87 training.label_smoothing 0.001169692275589115 +609 88 model.embedding_dim 1.0 +609 88 optimizer.lr 0.008628777349658378 +609 88 training.batch_size 0.0 +609 88 training.label_smoothing 0.038132421110955356 +609 89 model.embedding_dim 0.0 +609 89 optimizer.lr 0.020545984431102293 +609 89 training.batch_size 2.0 +609 89 training.label_smoothing 0.01583998233082918 +609 90 model.embedding_dim 1.0 +609 90 optimizer.lr 0.0010647985998799524 +609 90 training.batch_size 2.0 +609 90 training.label_smoothing 0.0013663970233684366 +609 91 model.embedding_dim 2.0 +609 91 optimizer.lr 0.0021554940269099443 +609 91 training.batch_size 2.0 +609 91 training.label_smoothing 0.012555547295715572 +609 92 model.embedding_dim 1.0 +609 92 optimizer.lr 0.005142807715013764 +609 92 training.batch_size 1.0 +609 92 training.label_smoothing 0.003751888663587053 +609 93 model.embedding_dim 2.0 +609 93 optimizer.lr 0.004402846118745733 +609 93 training.batch_size 0.0 +609 93 training.label_smoothing 0.059931198895136636 +609 94 model.embedding_dim 1.0 +609 94 optimizer.lr 0.0011582488625613166 +609 94 training.batch_size 2.0 +609 94 training.label_smoothing 0.00243581207359973 +609 95 model.embedding_dim 2.0 +609 95 optimizer.lr 0.08797314913201326 +609 95 training.batch_size 0.0 +609 95 training.label_smoothing 0.48733528274479004 +609 96 model.embedding_dim 2.0 +609 96 optimizer.lr 0.019483620312884063 +609 96 training.batch_size 0.0 +609 96 training.label_smoothing 0.001297814156257205 +609 97 model.embedding_dim 1.0 +609 97 optimizer.lr 0.04209374226965321 +609 97 training.batch_size 2.0 +609 97 training.label_smoothing 0.003262023125159726 +609 98 model.embedding_dim 1.0 +609 98 optimizer.lr 0.007116358064704253 +609 98 training.batch_size 0.0 +609 98 training.label_smoothing 0.0015042321497091512 +609 99 model.embedding_dim 0.0 +609 99 optimizer.lr 0.029842279655925057 +609 99 training.batch_size 0.0 +609 99 training.label_smoothing 0.014638253968411931 +609 100 model.embedding_dim 0.0 +609 100 optimizer.lr 0.0480484868816659 +609 100 training.batch_size 1.0 +609 100 training.label_smoothing 0.3071457054782649 +609 1 dataset """kinships""" +609 1 model """rotate""" +609 1 loss """bceaftersigmoid""" +609 1 regularizer """no""" +609 1 optimizer """adam""" +609 1 training_loop """lcwa""" +609 1 evaluator """rankbased""" +609 2 dataset """kinships""" +609 2 model """rotate""" +609 2 loss """bceaftersigmoid""" +609 2 regularizer """no""" +609 2 optimizer """adam""" +609 2 training_loop """lcwa""" +609 2 evaluator """rankbased""" +609 3 dataset """kinships""" +609 3 model """rotate""" +609 3 loss """bceaftersigmoid""" +609 3 regularizer """no""" +609 3 optimizer """adam""" +609 3 training_loop """lcwa""" +609 3 evaluator """rankbased""" +609 4 dataset """kinships""" +609 4 model """rotate""" +609 4 loss """bceaftersigmoid""" +609 4 regularizer """no""" +609 4 optimizer """adam""" +609 4 training_loop """lcwa""" +609 4 evaluator """rankbased""" +609 5 dataset """kinships""" +609 5 model """rotate""" +609 5 loss """bceaftersigmoid""" +609 5 regularizer """no""" +609 5 optimizer """adam""" +609 5 training_loop """lcwa""" +609 5 evaluator """rankbased""" +609 6 dataset """kinships""" +609 6 model """rotate""" +609 6 loss """bceaftersigmoid""" +609 6 regularizer """no""" +609 6 optimizer """adam""" +609 6 training_loop """lcwa""" +609 6 evaluator """rankbased""" +609 7 dataset """kinships""" +609 7 model """rotate""" +609 7 loss """bceaftersigmoid""" +609 7 regularizer """no""" +609 7 optimizer """adam""" +609 7 training_loop """lcwa""" +609 7 evaluator """rankbased""" +609 8 dataset """kinships""" +609 8 model """rotate""" +609 8 loss """bceaftersigmoid""" +609 8 regularizer """no""" +609 8 optimizer """adam""" +609 8 training_loop """lcwa""" +609 8 evaluator """rankbased""" +609 9 dataset """kinships""" +609 9 model """rotate""" +609 9 loss """bceaftersigmoid""" +609 9 regularizer """no""" +609 9 optimizer """adam""" +609 9 training_loop """lcwa""" +609 9 evaluator """rankbased""" +609 10 dataset """kinships""" +609 10 model """rotate""" +609 10 loss """bceaftersigmoid""" +609 10 regularizer """no""" +609 10 optimizer """adam""" +609 10 training_loop """lcwa""" +609 10 evaluator """rankbased""" +609 11 dataset """kinships""" +609 11 model """rotate""" +609 11 loss """bceaftersigmoid""" +609 11 regularizer """no""" +609 11 optimizer """adam""" +609 11 training_loop """lcwa""" +609 11 evaluator """rankbased""" +609 12 dataset """kinships""" +609 12 model """rotate""" +609 12 loss """bceaftersigmoid""" +609 12 regularizer """no""" +609 12 optimizer """adam""" +609 12 training_loop """lcwa""" +609 12 evaluator """rankbased""" +609 13 dataset """kinships""" +609 13 model """rotate""" +609 13 loss """bceaftersigmoid""" +609 13 regularizer """no""" +609 13 optimizer """adam""" +609 13 training_loop """lcwa""" +609 13 evaluator """rankbased""" +609 14 dataset """kinships""" +609 14 model """rotate""" +609 14 loss """bceaftersigmoid""" +609 14 regularizer """no""" +609 14 optimizer """adam""" +609 14 training_loop """lcwa""" +609 14 evaluator """rankbased""" +609 15 dataset """kinships""" +609 15 model """rotate""" +609 15 loss """bceaftersigmoid""" +609 15 regularizer """no""" +609 15 optimizer """adam""" +609 15 training_loop """lcwa""" +609 15 evaluator """rankbased""" +609 16 dataset """kinships""" +609 16 model """rotate""" +609 16 loss """bceaftersigmoid""" +609 16 regularizer """no""" +609 16 optimizer """adam""" +609 16 training_loop """lcwa""" +609 16 evaluator """rankbased""" +609 17 dataset """kinships""" +609 17 model """rotate""" +609 17 loss """bceaftersigmoid""" +609 17 regularizer """no""" +609 17 optimizer """adam""" +609 17 training_loop """lcwa""" +609 17 evaluator """rankbased""" +609 18 dataset """kinships""" +609 18 model """rotate""" +609 18 loss """bceaftersigmoid""" +609 18 regularizer """no""" +609 18 optimizer """adam""" +609 18 training_loop """lcwa""" +609 18 evaluator """rankbased""" +609 19 dataset """kinships""" +609 19 model """rotate""" +609 19 loss """bceaftersigmoid""" +609 19 regularizer """no""" +609 19 optimizer """adam""" +609 19 training_loop """lcwa""" +609 19 evaluator """rankbased""" +609 20 dataset """kinships""" +609 20 model """rotate""" +609 20 loss """bceaftersigmoid""" +609 20 regularizer """no""" +609 20 optimizer """adam""" +609 20 training_loop """lcwa""" +609 20 evaluator """rankbased""" +609 21 dataset """kinships""" +609 21 model """rotate""" +609 21 loss """bceaftersigmoid""" +609 21 regularizer """no""" +609 21 optimizer """adam""" +609 21 training_loop """lcwa""" +609 21 evaluator """rankbased""" +609 22 dataset """kinships""" +609 22 model """rotate""" +609 22 loss """bceaftersigmoid""" +609 22 regularizer """no""" +609 22 optimizer """adam""" +609 22 training_loop """lcwa""" +609 22 evaluator """rankbased""" +609 23 dataset """kinships""" +609 23 model """rotate""" +609 23 loss """bceaftersigmoid""" +609 23 regularizer """no""" +609 23 optimizer """adam""" +609 23 training_loop """lcwa""" +609 23 evaluator """rankbased""" +609 24 dataset """kinships""" +609 24 model """rotate""" +609 24 loss """bceaftersigmoid""" +609 24 regularizer """no""" +609 24 optimizer """adam""" +609 24 training_loop """lcwa""" +609 24 evaluator """rankbased""" +609 25 dataset """kinships""" +609 25 model """rotate""" +609 25 loss """bceaftersigmoid""" +609 25 regularizer """no""" +609 25 optimizer """adam""" +609 25 training_loop """lcwa""" +609 25 evaluator """rankbased""" +609 26 dataset """kinships""" +609 26 model """rotate""" +609 26 loss """bceaftersigmoid""" +609 26 regularizer """no""" +609 26 optimizer """adam""" +609 26 training_loop """lcwa""" +609 26 evaluator """rankbased""" +609 27 dataset """kinships""" +609 27 model """rotate""" +609 27 loss """bceaftersigmoid""" +609 27 regularizer """no""" +609 27 optimizer """adam""" +609 27 training_loop """lcwa""" +609 27 evaluator """rankbased""" +609 28 dataset """kinships""" +609 28 model """rotate""" +609 28 loss """bceaftersigmoid""" +609 28 regularizer """no""" +609 28 optimizer """adam""" +609 28 training_loop """lcwa""" +609 28 evaluator """rankbased""" +609 29 dataset """kinships""" +609 29 model """rotate""" +609 29 loss """bceaftersigmoid""" +609 29 regularizer """no""" +609 29 optimizer """adam""" +609 29 training_loop """lcwa""" +609 29 evaluator """rankbased""" +609 30 dataset """kinships""" +609 30 model """rotate""" +609 30 loss """bceaftersigmoid""" +609 30 regularizer """no""" +609 30 optimizer """adam""" +609 30 training_loop """lcwa""" +609 30 evaluator """rankbased""" +609 31 dataset """kinships""" +609 31 model """rotate""" +609 31 loss """bceaftersigmoid""" +609 31 regularizer """no""" +609 31 optimizer """adam""" +609 31 training_loop """lcwa""" +609 31 evaluator """rankbased""" +609 32 dataset """kinships""" +609 32 model """rotate""" +609 32 loss """bceaftersigmoid""" +609 32 regularizer """no""" +609 32 optimizer """adam""" +609 32 training_loop """lcwa""" +609 32 evaluator """rankbased""" +609 33 dataset """kinships""" +609 33 model """rotate""" +609 33 loss """bceaftersigmoid""" +609 33 regularizer """no""" +609 33 optimizer """adam""" +609 33 training_loop """lcwa""" +609 33 evaluator """rankbased""" +609 34 dataset """kinships""" +609 34 model """rotate""" +609 34 loss """bceaftersigmoid""" +609 34 regularizer """no""" +609 34 optimizer """adam""" +609 34 training_loop """lcwa""" +609 34 evaluator """rankbased""" +609 35 dataset """kinships""" +609 35 model """rotate""" +609 35 loss """bceaftersigmoid""" +609 35 regularizer """no""" +609 35 optimizer """adam""" +609 35 training_loop """lcwa""" +609 35 evaluator """rankbased""" +609 36 dataset """kinships""" +609 36 model """rotate""" +609 36 loss """bceaftersigmoid""" +609 36 regularizer """no""" +609 36 optimizer """adam""" +609 36 training_loop """lcwa""" +609 36 evaluator """rankbased""" +609 37 dataset """kinships""" +609 37 model """rotate""" +609 37 loss """bceaftersigmoid""" +609 37 regularizer """no""" +609 37 optimizer """adam""" +609 37 training_loop """lcwa""" +609 37 evaluator """rankbased""" +609 38 dataset """kinships""" +609 38 model """rotate""" +609 38 loss """bceaftersigmoid""" +609 38 regularizer """no""" +609 38 optimizer """adam""" +609 38 training_loop """lcwa""" +609 38 evaluator """rankbased""" +609 39 dataset """kinships""" +609 39 model """rotate""" +609 39 loss """bceaftersigmoid""" +609 39 regularizer """no""" +609 39 optimizer """adam""" +609 39 training_loop """lcwa""" +609 39 evaluator """rankbased""" +609 40 dataset """kinships""" +609 40 model """rotate""" +609 40 loss """bceaftersigmoid""" +609 40 regularizer """no""" +609 40 optimizer """adam""" +609 40 training_loop """lcwa""" +609 40 evaluator """rankbased""" +609 41 dataset """kinships""" +609 41 model """rotate""" +609 41 loss """bceaftersigmoid""" +609 41 regularizer """no""" +609 41 optimizer """adam""" +609 41 training_loop """lcwa""" +609 41 evaluator """rankbased""" +609 42 dataset """kinships""" +609 42 model """rotate""" +609 42 loss """bceaftersigmoid""" +609 42 regularizer """no""" +609 42 optimizer """adam""" +609 42 training_loop """lcwa""" +609 42 evaluator """rankbased""" +609 43 dataset """kinships""" +609 43 model """rotate""" +609 43 loss """bceaftersigmoid""" +609 43 regularizer """no""" +609 43 optimizer """adam""" +609 43 training_loop """lcwa""" +609 43 evaluator """rankbased""" +609 44 dataset """kinships""" +609 44 model """rotate""" +609 44 loss """bceaftersigmoid""" +609 44 regularizer """no""" +609 44 optimizer """adam""" +609 44 training_loop """lcwa""" +609 44 evaluator """rankbased""" +609 45 dataset """kinships""" +609 45 model """rotate""" +609 45 loss """bceaftersigmoid""" +609 45 regularizer """no""" +609 45 optimizer """adam""" +609 45 training_loop """lcwa""" +609 45 evaluator """rankbased""" +609 46 dataset """kinships""" +609 46 model """rotate""" +609 46 loss """bceaftersigmoid""" +609 46 regularizer """no""" +609 46 optimizer """adam""" +609 46 training_loop """lcwa""" +609 46 evaluator """rankbased""" +609 47 dataset """kinships""" +609 47 model """rotate""" +609 47 loss """bceaftersigmoid""" +609 47 regularizer """no""" +609 47 optimizer """adam""" +609 47 training_loop """lcwa""" +609 47 evaluator """rankbased""" +609 48 dataset """kinships""" +609 48 model """rotate""" +609 48 loss """bceaftersigmoid""" +609 48 regularizer """no""" +609 48 optimizer """adam""" +609 48 training_loop """lcwa""" +609 48 evaluator """rankbased""" +609 49 dataset """kinships""" +609 49 model """rotate""" +609 49 loss """bceaftersigmoid""" +609 49 regularizer """no""" +609 49 optimizer """adam""" +609 49 training_loop """lcwa""" +609 49 evaluator """rankbased""" +609 50 dataset """kinships""" +609 50 model """rotate""" +609 50 loss """bceaftersigmoid""" +609 50 regularizer """no""" +609 50 optimizer """adam""" +609 50 training_loop """lcwa""" +609 50 evaluator """rankbased""" +609 51 dataset """kinships""" +609 51 model """rotate""" +609 51 loss """bceaftersigmoid""" +609 51 regularizer """no""" +609 51 optimizer """adam""" +609 51 training_loop """lcwa""" +609 51 evaluator """rankbased""" +609 52 dataset """kinships""" +609 52 model """rotate""" +609 52 loss """bceaftersigmoid""" +609 52 regularizer """no""" +609 52 optimizer """adam""" +609 52 training_loop """lcwa""" +609 52 evaluator """rankbased""" +609 53 dataset """kinships""" +609 53 model """rotate""" +609 53 loss """bceaftersigmoid""" +609 53 regularizer """no""" +609 53 optimizer """adam""" +609 53 training_loop """lcwa""" +609 53 evaluator """rankbased""" +609 54 dataset """kinships""" +609 54 model """rotate""" +609 54 loss """bceaftersigmoid""" +609 54 regularizer """no""" +609 54 optimizer """adam""" +609 54 training_loop """lcwa""" +609 54 evaluator """rankbased""" +609 55 dataset """kinships""" +609 55 model """rotate""" +609 55 loss """bceaftersigmoid""" +609 55 regularizer """no""" +609 55 optimizer """adam""" +609 55 training_loop """lcwa""" +609 55 evaluator """rankbased""" +609 56 dataset """kinships""" +609 56 model """rotate""" +609 56 loss """bceaftersigmoid""" +609 56 regularizer """no""" +609 56 optimizer """adam""" +609 56 training_loop """lcwa""" +609 56 evaluator """rankbased""" +609 57 dataset """kinships""" +609 57 model """rotate""" +609 57 loss """bceaftersigmoid""" +609 57 regularizer """no""" +609 57 optimizer """adam""" +609 57 training_loop """lcwa""" +609 57 evaluator """rankbased""" +609 58 dataset """kinships""" +609 58 model """rotate""" +609 58 loss """bceaftersigmoid""" +609 58 regularizer """no""" +609 58 optimizer """adam""" +609 58 training_loop """lcwa""" +609 58 evaluator """rankbased""" +609 59 dataset """kinships""" +609 59 model """rotate""" +609 59 loss """bceaftersigmoid""" +609 59 regularizer """no""" +609 59 optimizer """adam""" +609 59 training_loop """lcwa""" +609 59 evaluator """rankbased""" +609 60 dataset """kinships""" +609 60 model """rotate""" +609 60 loss """bceaftersigmoid""" +609 60 regularizer """no""" +609 60 optimizer """adam""" +609 60 training_loop """lcwa""" +609 60 evaluator """rankbased""" +609 61 dataset """kinships""" +609 61 model """rotate""" +609 61 loss """bceaftersigmoid""" +609 61 regularizer """no""" +609 61 optimizer """adam""" +609 61 training_loop """lcwa""" +609 61 evaluator """rankbased""" +609 62 dataset """kinships""" +609 62 model """rotate""" +609 62 loss """bceaftersigmoid""" +609 62 regularizer """no""" +609 62 optimizer """adam""" +609 62 training_loop """lcwa""" +609 62 evaluator """rankbased""" +609 63 dataset """kinships""" +609 63 model """rotate""" +609 63 loss """bceaftersigmoid""" +609 63 regularizer """no""" +609 63 optimizer """adam""" +609 63 training_loop """lcwa""" +609 63 evaluator """rankbased""" +609 64 dataset """kinships""" +609 64 model """rotate""" +609 64 loss """bceaftersigmoid""" +609 64 regularizer """no""" +609 64 optimizer """adam""" +609 64 training_loop """lcwa""" +609 64 evaluator """rankbased""" +609 65 dataset """kinships""" +609 65 model """rotate""" +609 65 loss """bceaftersigmoid""" +609 65 regularizer """no""" +609 65 optimizer """adam""" +609 65 training_loop """lcwa""" +609 65 evaluator """rankbased""" +609 66 dataset """kinships""" +609 66 model """rotate""" +609 66 loss """bceaftersigmoid""" +609 66 regularizer """no""" +609 66 optimizer """adam""" +609 66 training_loop """lcwa""" +609 66 evaluator """rankbased""" +609 67 dataset """kinships""" +609 67 model """rotate""" +609 67 loss """bceaftersigmoid""" +609 67 regularizer """no""" +609 67 optimizer """adam""" +609 67 training_loop """lcwa""" +609 67 evaluator """rankbased""" +609 68 dataset """kinships""" +609 68 model """rotate""" +609 68 loss """bceaftersigmoid""" +609 68 regularizer """no""" +609 68 optimizer """adam""" +609 68 training_loop """lcwa""" +609 68 evaluator """rankbased""" +609 69 dataset """kinships""" +609 69 model """rotate""" +609 69 loss """bceaftersigmoid""" +609 69 regularizer """no""" +609 69 optimizer """adam""" +609 69 training_loop """lcwa""" +609 69 evaluator """rankbased""" +609 70 dataset """kinships""" +609 70 model """rotate""" +609 70 loss """bceaftersigmoid""" +609 70 regularizer """no""" +609 70 optimizer """adam""" +609 70 training_loop """lcwa""" +609 70 evaluator """rankbased""" +609 71 dataset """kinships""" +609 71 model """rotate""" +609 71 loss """bceaftersigmoid""" +609 71 regularizer """no""" +609 71 optimizer """adam""" +609 71 training_loop """lcwa""" +609 71 evaluator """rankbased""" +609 72 dataset """kinships""" +609 72 model """rotate""" +609 72 loss """bceaftersigmoid""" +609 72 regularizer """no""" +609 72 optimizer """adam""" +609 72 training_loop """lcwa""" +609 72 evaluator """rankbased""" +609 73 dataset """kinships""" +609 73 model """rotate""" +609 73 loss """bceaftersigmoid""" +609 73 regularizer """no""" +609 73 optimizer """adam""" +609 73 training_loop """lcwa""" +609 73 evaluator """rankbased""" +609 74 dataset """kinships""" +609 74 model """rotate""" +609 74 loss """bceaftersigmoid""" +609 74 regularizer """no""" +609 74 optimizer """adam""" +609 74 training_loop """lcwa""" +609 74 evaluator """rankbased""" +609 75 dataset """kinships""" +609 75 model """rotate""" +609 75 loss """bceaftersigmoid""" +609 75 regularizer """no""" +609 75 optimizer """adam""" +609 75 training_loop """lcwa""" +609 75 evaluator """rankbased""" +609 76 dataset """kinships""" +609 76 model """rotate""" +609 76 loss """bceaftersigmoid""" +609 76 regularizer """no""" +609 76 optimizer """adam""" +609 76 training_loop """lcwa""" +609 76 evaluator """rankbased""" +609 77 dataset """kinships""" +609 77 model """rotate""" +609 77 loss """bceaftersigmoid""" +609 77 regularizer """no""" +609 77 optimizer """adam""" +609 77 training_loop """lcwa""" +609 77 evaluator """rankbased""" +609 78 dataset """kinships""" +609 78 model """rotate""" +609 78 loss """bceaftersigmoid""" +609 78 regularizer """no""" +609 78 optimizer """adam""" +609 78 training_loop """lcwa""" +609 78 evaluator """rankbased""" +609 79 dataset """kinships""" +609 79 model """rotate""" +609 79 loss """bceaftersigmoid""" +609 79 regularizer """no""" +609 79 optimizer """adam""" +609 79 training_loop """lcwa""" +609 79 evaluator """rankbased""" +609 80 dataset """kinships""" +609 80 model """rotate""" +609 80 loss """bceaftersigmoid""" +609 80 regularizer """no""" +609 80 optimizer """adam""" +609 80 training_loop """lcwa""" +609 80 evaluator """rankbased""" +609 81 dataset """kinships""" +609 81 model """rotate""" +609 81 loss """bceaftersigmoid""" +609 81 regularizer """no""" +609 81 optimizer """adam""" +609 81 training_loop """lcwa""" +609 81 evaluator """rankbased""" +609 82 dataset """kinships""" +609 82 model """rotate""" +609 82 loss """bceaftersigmoid""" +609 82 regularizer """no""" +609 82 optimizer """adam""" +609 82 training_loop """lcwa""" +609 82 evaluator """rankbased""" +609 83 dataset """kinships""" +609 83 model """rotate""" +609 83 loss """bceaftersigmoid""" +609 83 regularizer """no""" +609 83 optimizer """adam""" +609 83 training_loop """lcwa""" +609 83 evaluator """rankbased""" +609 84 dataset """kinships""" +609 84 model """rotate""" +609 84 loss """bceaftersigmoid""" +609 84 regularizer """no""" +609 84 optimizer """adam""" +609 84 training_loop """lcwa""" +609 84 evaluator """rankbased""" +609 85 dataset """kinships""" +609 85 model """rotate""" +609 85 loss """bceaftersigmoid""" +609 85 regularizer """no""" +609 85 optimizer """adam""" +609 85 training_loop """lcwa""" +609 85 evaluator """rankbased""" +609 86 dataset """kinships""" +609 86 model """rotate""" +609 86 loss """bceaftersigmoid""" +609 86 regularizer """no""" +609 86 optimizer """adam""" +609 86 training_loop """lcwa""" +609 86 evaluator """rankbased""" +609 87 dataset """kinships""" +609 87 model """rotate""" +609 87 loss """bceaftersigmoid""" +609 87 regularizer """no""" +609 87 optimizer """adam""" +609 87 training_loop """lcwa""" +609 87 evaluator """rankbased""" +609 88 dataset """kinships""" +609 88 model """rotate""" +609 88 loss """bceaftersigmoid""" +609 88 regularizer """no""" +609 88 optimizer """adam""" +609 88 training_loop """lcwa""" +609 88 evaluator """rankbased""" +609 89 dataset """kinships""" +609 89 model """rotate""" +609 89 loss """bceaftersigmoid""" +609 89 regularizer """no""" +609 89 optimizer """adam""" +609 89 training_loop """lcwa""" +609 89 evaluator """rankbased""" +609 90 dataset """kinships""" +609 90 model """rotate""" +609 90 loss """bceaftersigmoid""" +609 90 regularizer """no""" +609 90 optimizer """adam""" +609 90 training_loop """lcwa""" +609 90 evaluator """rankbased""" +609 91 dataset """kinships""" +609 91 model """rotate""" +609 91 loss """bceaftersigmoid""" +609 91 regularizer """no""" +609 91 optimizer """adam""" +609 91 training_loop """lcwa""" +609 91 evaluator """rankbased""" +609 92 dataset """kinships""" +609 92 model """rotate""" +609 92 loss """bceaftersigmoid""" +609 92 regularizer """no""" +609 92 optimizer """adam""" +609 92 training_loop """lcwa""" +609 92 evaluator """rankbased""" +609 93 dataset """kinships""" +609 93 model """rotate""" +609 93 loss """bceaftersigmoid""" +609 93 regularizer """no""" +609 93 optimizer """adam""" +609 93 training_loop """lcwa""" +609 93 evaluator """rankbased""" +609 94 dataset """kinships""" +609 94 model """rotate""" +609 94 loss """bceaftersigmoid""" +609 94 regularizer """no""" +609 94 optimizer """adam""" +609 94 training_loop """lcwa""" +609 94 evaluator """rankbased""" +609 95 dataset """kinships""" +609 95 model """rotate""" +609 95 loss """bceaftersigmoid""" +609 95 regularizer """no""" +609 95 optimizer """adam""" +609 95 training_loop """lcwa""" +609 95 evaluator """rankbased""" +609 96 dataset """kinships""" +609 96 model """rotate""" +609 96 loss """bceaftersigmoid""" +609 96 regularizer """no""" +609 96 optimizer """adam""" +609 96 training_loop """lcwa""" +609 96 evaluator """rankbased""" +609 97 dataset """kinships""" +609 97 model """rotate""" +609 97 loss """bceaftersigmoid""" +609 97 regularizer """no""" +609 97 optimizer """adam""" +609 97 training_loop """lcwa""" +609 97 evaluator """rankbased""" +609 98 dataset """kinships""" +609 98 model """rotate""" +609 98 loss """bceaftersigmoid""" +609 98 regularizer """no""" +609 98 optimizer """adam""" +609 98 training_loop """lcwa""" +609 98 evaluator """rankbased""" +609 99 dataset """kinships""" +609 99 model """rotate""" +609 99 loss """bceaftersigmoid""" +609 99 regularizer """no""" +609 99 optimizer """adam""" +609 99 training_loop """lcwa""" +609 99 evaluator """rankbased""" +609 100 dataset """kinships""" +609 100 model """rotate""" +609 100 loss """bceaftersigmoid""" +609 100 regularizer """no""" +609 100 optimizer """adam""" +609 100 training_loop """lcwa""" +609 100 evaluator """rankbased""" +610 1 model.embedding_dim 2.0 +610 1 optimizer.lr 0.04833987084794892 +610 1 training.batch_size 2.0 +610 1 training.label_smoothing 0.11539107495554389 +610 2 model.embedding_dim 0.0 +610 2 optimizer.lr 0.020271618615069154 +610 2 training.batch_size 1.0 +610 2 training.label_smoothing 0.009200023292529174 +610 3 model.embedding_dim 1.0 +610 3 optimizer.lr 0.006786245747007544 +610 3 training.batch_size 0.0 +610 3 training.label_smoothing 0.0027190339115052077 +610 4 model.embedding_dim 1.0 +610 4 optimizer.lr 0.02391922939288206 +610 4 training.batch_size 2.0 +610 4 training.label_smoothing 0.06101066673283187 +610 5 model.embedding_dim 0.0 +610 5 optimizer.lr 0.008785286614850656 +610 5 training.batch_size 0.0 +610 5 training.label_smoothing 0.19236218754295387 +610 6 model.embedding_dim 2.0 +610 6 optimizer.lr 0.0012700455620575713 +610 6 training.batch_size 2.0 +610 6 training.label_smoothing 0.0031417715048668625 +610 7 model.embedding_dim 0.0 +610 7 optimizer.lr 0.06284749704824406 +610 7 training.batch_size 1.0 +610 7 training.label_smoothing 0.14652193211008904 +610 8 model.embedding_dim 0.0 +610 8 optimizer.lr 0.003862820337159711 +610 8 training.batch_size 0.0 +610 8 training.label_smoothing 0.29173905324518945 +610 9 model.embedding_dim 2.0 +610 9 optimizer.lr 0.003220919144884736 +610 9 training.batch_size 1.0 +610 9 training.label_smoothing 0.22697198751674258 +610 10 model.embedding_dim 1.0 +610 10 optimizer.lr 0.03255920682174967 +610 10 training.batch_size 0.0 +610 10 training.label_smoothing 0.5792517663854303 +610 11 model.embedding_dim 0.0 +610 11 optimizer.lr 0.007105208804148887 +610 11 training.batch_size 0.0 +610 11 training.label_smoothing 0.1578133567759729 +610 12 model.embedding_dim 1.0 +610 12 optimizer.lr 0.0015441303111680858 +610 12 training.batch_size 0.0 +610 12 training.label_smoothing 0.005438599847725358 +610 13 model.embedding_dim 1.0 +610 13 optimizer.lr 0.013036313753650037 +610 13 training.batch_size 0.0 +610 13 training.label_smoothing 0.022682529210525913 +610 14 model.embedding_dim 0.0 +610 14 optimizer.lr 0.0013109049816967418 +610 14 training.batch_size 2.0 +610 14 training.label_smoothing 0.011975775018216075 +610 15 model.embedding_dim 0.0 +610 15 optimizer.lr 0.0010656483420578458 +610 15 training.batch_size 0.0 +610 15 training.label_smoothing 0.15882203402918804 +610 16 model.embedding_dim 1.0 +610 16 optimizer.lr 0.011705152056052803 +610 16 training.batch_size 2.0 +610 16 training.label_smoothing 0.8658784440251936 +610 17 model.embedding_dim 1.0 +610 17 optimizer.lr 0.009508933091903315 +610 17 training.batch_size 1.0 +610 17 training.label_smoothing 0.002311408108170752 +610 18 model.embedding_dim 0.0 +610 18 optimizer.lr 0.007076659159863671 +610 18 training.batch_size 1.0 +610 18 training.label_smoothing 0.0048774015216863295 +610 19 model.embedding_dim 2.0 +610 19 optimizer.lr 0.09058967122757827 +610 19 training.batch_size 0.0 +610 19 training.label_smoothing 0.42406887381487535 +610 20 model.embedding_dim 1.0 +610 20 optimizer.lr 0.002407755364644885 +610 20 training.batch_size 0.0 +610 20 training.label_smoothing 0.0013455825325493346 +610 21 model.embedding_dim 0.0 +610 21 optimizer.lr 0.009287031454041248 +610 21 training.batch_size 2.0 +610 21 training.label_smoothing 0.0019170072251758292 +610 22 model.embedding_dim 2.0 +610 22 optimizer.lr 0.0012486729333106422 +610 22 training.batch_size 2.0 +610 22 training.label_smoothing 0.5387458350258166 +610 23 model.embedding_dim 0.0 +610 23 optimizer.lr 0.0012230613519938697 +610 23 training.batch_size 1.0 +610 23 training.label_smoothing 0.036996941920412925 +610 24 model.embedding_dim 1.0 +610 24 optimizer.lr 0.01264593695008429 +610 24 training.batch_size 0.0 +610 24 training.label_smoothing 0.015733925336193846 +610 25 model.embedding_dim 0.0 +610 25 optimizer.lr 0.011367618889295678 +610 25 training.batch_size 1.0 +610 25 training.label_smoothing 0.006430763051593624 +610 26 model.embedding_dim 0.0 +610 26 optimizer.lr 0.002981986116794665 +610 26 training.batch_size 1.0 +610 26 training.label_smoothing 0.002219371301401444 +610 27 model.embedding_dim 0.0 +610 27 optimizer.lr 0.02999286521935521 +610 27 training.batch_size 1.0 +610 27 training.label_smoothing 0.004395663499429779 +610 28 model.embedding_dim 2.0 +610 28 optimizer.lr 0.005593756989061095 +610 28 training.batch_size 2.0 +610 28 training.label_smoothing 0.1848017345942436 +610 29 model.embedding_dim 0.0 +610 29 optimizer.lr 0.00484856767094708 +610 29 training.batch_size 2.0 +610 29 training.label_smoothing 0.0024628231766185425 +610 30 model.embedding_dim 2.0 +610 30 optimizer.lr 0.0016925746367689766 +610 30 training.batch_size 2.0 +610 30 training.label_smoothing 0.14726261057440518 +610 31 model.embedding_dim 1.0 +610 31 optimizer.lr 0.06444683709531876 +610 31 training.batch_size 1.0 +610 31 training.label_smoothing 0.001897912577200655 +610 32 model.embedding_dim 0.0 +610 32 optimizer.lr 0.001087223400208631 +610 32 training.batch_size 2.0 +610 32 training.label_smoothing 0.003762999213525508 +610 33 model.embedding_dim 1.0 +610 33 optimizer.lr 0.006220821175084754 +610 33 training.batch_size 0.0 +610 33 training.label_smoothing 0.06786444239027806 +610 34 model.embedding_dim 2.0 +610 34 optimizer.lr 0.0030713778559900407 +610 34 training.batch_size 1.0 +610 34 training.label_smoothing 0.2469188797308864 +610 35 model.embedding_dim 0.0 +610 35 optimizer.lr 0.0012968188399496406 +610 35 training.batch_size 1.0 +610 35 training.label_smoothing 0.07856185234201606 +610 36 model.embedding_dim 0.0 +610 36 optimizer.lr 0.007154746334291086 +610 36 training.batch_size 2.0 +610 36 training.label_smoothing 0.023132332004199188 +610 37 model.embedding_dim 0.0 +610 37 optimizer.lr 0.0022112037862255744 +610 37 training.batch_size 2.0 +610 37 training.label_smoothing 0.0013402854161295619 +610 38 model.embedding_dim 2.0 +610 38 optimizer.lr 0.07320745561461321 +610 38 training.batch_size 1.0 +610 38 training.label_smoothing 0.25364657195223705 +610 39 model.embedding_dim 1.0 +610 39 optimizer.lr 0.028299785191699782 +610 39 training.batch_size 2.0 +610 39 training.label_smoothing 0.06370130113802913 +610 40 model.embedding_dim 1.0 +610 40 optimizer.lr 0.0023436912343602987 +610 40 training.batch_size 2.0 +610 40 training.label_smoothing 0.019307005519494085 +610 41 model.embedding_dim 0.0 +610 41 optimizer.lr 0.00175087699714243 +610 41 training.batch_size 1.0 +610 41 training.label_smoothing 0.1394626027038473 +610 42 model.embedding_dim 1.0 +610 42 optimizer.lr 0.012746395611877667 +610 42 training.batch_size 1.0 +610 42 training.label_smoothing 0.5928650430352389 +610 43 model.embedding_dim 2.0 +610 43 optimizer.lr 0.02436876091138057 +610 43 training.batch_size 2.0 +610 43 training.label_smoothing 0.014735136477079419 +610 44 model.embedding_dim 0.0 +610 44 optimizer.lr 0.013628655294017676 +610 44 training.batch_size 0.0 +610 44 training.label_smoothing 0.0010044312637586122 +610 45 model.embedding_dim 1.0 +610 45 optimizer.lr 0.05146738947908219 +610 45 training.batch_size 2.0 +610 45 training.label_smoothing 0.002750491814785031 +610 46 model.embedding_dim 0.0 +610 46 optimizer.lr 0.00429569946859211 +610 46 training.batch_size 0.0 +610 46 training.label_smoothing 0.21620818808122697 +610 47 model.embedding_dim 2.0 +610 47 optimizer.lr 0.07899231051965598 +610 47 training.batch_size 0.0 +610 47 training.label_smoothing 0.02959894364424219 +610 48 model.embedding_dim 0.0 +610 48 optimizer.lr 0.008803294380774929 +610 48 training.batch_size 0.0 +610 48 training.label_smoothing 0.002583383835609963 +610 49 model.embedding_dim 1.0 +610 49 optimizer.lr 0.009452421037573856 +610 49 training.batch_size 0.0 +610 49 training.label_smoothing 0.0017815989725818338 +610 50 model.embedding_dim 2.0 +610 50 optimizer.lr 0.003061509671618249 +610 50 training.batch_size 2.0 +610 50 training.label_smoothing 0.35696870349235055 +610 51 model.embedding_dim 1.0 +610 51 optimizer.lr 0.005538660263791356 +610 51 training.batch_size 1.0 +610 51 training.label_smoothing 0.1871666643898754 +610 52 model.embedding_dim 0.0 +610 52 optimizer.lr 0.021692143337211874 +610 52 training.batch_size 2.0 +610 52 training.label_smoothing 0.03578133593248406 +610 53 model.embedding_dim 1.0 +610 53 optimizer.lr 0.014440664922670212 +610 53 training.batch_size 2.0 +610 53 training.label_smoothing 0.001244757746862052 +610 54 model.embedding_dim 0.0 +610 54 optimizer.lr 0.07116747700226753 +610 54 training.batch_size 1.0 +610 54 training.label_smoothing 0.018617564501379733 +610 55 model.embedding_dim 0.0 +610 55 optimizer.lr 0.0033585843011099815 +610 55 training.batch_size 2.0 +610 55 training.label_smoothing 0.006565020382689355 +610 56 model.embedding_dim 1.0 +610 56 optimizer.lr 0.001664993479932497 +610 56 training.batch_size 2.0 +610 56 training.label_smoothing 0.24004807549737134 +610 57 model.embedding_dim 2.0 +610 57 optimizer.lr 0.033732532593649826 +610 57 training.batch_size 1.0 +610 57 training.label_smoothing 0.3850600188752136 +610 58 model.embedding_dim 0.0 +610 58 optimizer.lr 0.003871892549237853 +610 58 training.batch_size 1.0 +610 58 training.label_smoothing 0.016565206771107944 +610 59 model.embedding_dim 2.0 +610 59 optimizer.lr 0.009037534487224361 +610 59 training.batch_size 2.0 +610 59 training.label_smoothing 0.08149222946325357 +610 60 model.embedding_dim 0.0 +610 60 optimizer.lr 0.01531525316719952 +610 60 training.batch_size 1.0 +610 60 training.label_smoothing 0.5569117549219013 +610 61 model.embedding_dim 1.0 +610 61 optimizer.lr 0.07791281687887809 +610 61 training.batch_size 2.0 +610 61 training.label_smoothing 0.1170192785279016 +610 62 model.embedding_dim 2.0 +610 62 optimizer.lr 0.0027139938155336424 +610 62 training.batch_size 1.0 +610 62 training.label_smoothing 0.04265766821406531 +610 63 model.embedding_dim 0.0 +610 63 optimizer.lr 0.0470929463428668 +610 63 training.batch_size 2.0 +610 63 training.label_smoothing 0.005828569367754665 +610 64 model.embedding_dim 1.0 +610 64 optimizer.lr 0.06467856599842985 +610 64 training.batch_size 2.0 +610 64 training.label_smoothing 0.10937873460441372 +610 65 model.embedding_dim 1.0 +610 65 optimizer.lr 0.025343597670208405 +610 65 training.batch_size 2.0 +610 65 training.label_smoothing 0.0025588437339935147 +610 66 model.embedding_dim 1.0 +610 66 optimizer.lr 0.008545280169707648 +610 66 training.batch_size 0.0 +610 66 training.label_smoothing 0.06984390475925212 +610 67 model.embedding_dim 0.0 +610 67 optimizer.lr 0.0011632268994969182 +610 67 training.batch_size 0.0 +610 67 training.label_smoothing 0.0013801066685211565 +610 68 model.embedding_dim 2.0 +610 68 optimizer.lr 0.030333082539367857 +610 68 training.batch_size 2.0 +610 68 training.label_smoothing 0.0029786358064744826 +610 69 model.embedding_dim 2.0 +610 69 optimizer.lr 0.011026126354872481 +610 69 training.batch_size 1.0 +610 69 training.label_smoothing 0.06507431998596044 +610 70 model.embedding_dim 0.0 +610 70 optimizer.lr 0.0015137703106080434 +610 70 training.batch_size 1.0 +610 70 training.label_smoothing 0.0017307947534258416 +610 71 model.embedding_dim 1.0 +610 71 optimizer.lr 0.010701950912400034 +610 71 training.batch_size 0.0 +610 71 training.label_smoothing 0.004747850569210181 +610 72 model.embedding_dim 0.0 +610 72 optimizer.lr 0.007218174807305277 +610 72 training.batch_size 2.0 +610 72 training.label_smoothing 0.0400570355224572 +610 73 model.embedding_dim 2.0 +610 73 optimizer.lr 0.04991286056163713 +610 73 training.batch_size 0.0 +610 73 training.label_smoothing 0.3228249655658752 +610 74 model.embedding_dim 1.0 +610 74 optimizer.lr 0.0034904369395624872 +610 74 training.batch_size 0.0 +610 74 training.label_smoothing 0.040715093980995154 +610 75 model.embedding_dim 2.0 +610 75 optimizer.lr 0.001528317126884102 +610 75 training.batch_size 0.0 +610 75 training.label_smoothing 0.15801474492496956 +610 76 model.embedding_dim 1.0 +610 76 optimizer.lr 0.005840517348338818 +610 76 training.batch_size 0.0 +610 76 training.label_smoothing 0.06556739184084014 +610 77 model.embedding_dim 2.0 +610 77 optimizer.lr 0.0022967984371452107 +610 77 training.batch_size 0.0 +610 77 training.label_smoothing 0.05596053448800031 +610 78 model.embedding_dim 0.0 +610 78 optimizer.lr 0.08186905572222583 +610 78 training.batch_size 0.0 +610 78 training.label_smoothing 0.0028885834511045787 +610 79 model.embedding_dim 2.0 +610 79 optimizer.lr 0.0011102638540574378 +610 79 training.batch_size 2.0 +610 79 training.label_smoothing 0.0014287211174434647 +610 80 model.embedding_dim 0.0 +610 80 optimizer.lr 0.027147743063687477 +610 80 training.batch_size 1.0 +610 80 training.label_smoothing 0.6034113371937138 +610 81 model.embedding_dim 2.0 +610 81 optimizer.lr 0.008411235996093079 +610 81 training.batch_size 1.0 +610 81 training.label_smoothing 0.7439814756344207 +610 82 model.embedding_dim 1.0 +610 82 optimizer.lr 0.04783457162619617 +610 82 training.batch_size 0.0 +610 82 training.label_smoothing 0.006372665732352648 +610 83 model.embedding_dim 0.0 +610 83 optimizer.lr 0.008310968891722417 +610 83 training.batch_size 1.0 +610 83 training.label_smoothing 0.0024819748954083756 +610 84 model.embedding_dim 1.0 +610 84 optimizer.lr 0.09311821996771047 +610 84 training.batch_size 2.0 +610 84 training.label_smoothing 0.06368154220046793 +610 85 model.embedding_dim 1.0 +610 85 optimizer.lr 0.0030507256876322214 +610 85 training.batch_size 0.0 +610 85 training.label_smoothing 0.07982336146450977 +610 86 model.embedding_dim 0.0 +610 86 optimizer.lr 0.01001344951310112 +610 86 training.batch_size 0.0 +610 86 training.label_smoothing 0.23468848631360315 +610 87 model.embedding_dim 0.0 +610 87 optimizer.lr 0.04427633485114406 +610 87 training.batch_size 2.0 +610 87 training.label_smoothing 0.005613128516290002 +610 88 model.embedding_dim 0.0 +610 88 optimizer.lr 0.008766223709322793 +610 88 training.batch_size 1.0 +610 88 training.label_smoothing 0.00309689939735012 +610 89 model.embedding_dim 2.0 +610 89 optimizer.lr 0.019776549750586098 +610 89 training.batch_size 2.0 +610 89 training.label_smoothing 0.009042963635251625 +610 90 model.embedding_dim 0.0 +610 90 optimizer.lr 0.00601411835767159 +610 90 training.batch_size 0.0 +610 90 training.label_smoothing 0.004734601056208546 +610 91 model.embedding_dim 2.0 +610 91 optimizer.lr 0.08362529888787922 +610 91 training.batch_size 1.0 +610 91 training.label_smoothing 0.0018069963332031382 +610 92 model.embedding_dim 0.0 +610 92 optimizer.lr 0.006126823303851118 +610 92 training.batch_size 1.0 +610 92 training.label_smoothing 0.598594870696604 +610 93 model.embedding_dim 0.0 +610 93 optimizer.lr 0.029019582494951936 +610 93 training.batch_size 2.0 +610 93 training.label_smoothing 0.015227190969305011 +610 94 model.embedding_dim 2.0 +610 94 optimizer.lr 0.09686191462083484 +610 94 training.batch_size 2.0 +610 94 training.label_smoothing 0.0033092078980560074 +610 95 model.embedding_dim 2.0 +610 95 optimizer.lr 0.08301215145405216 +610 95 training.batch_size 2.0 +610 95 training.label_smoothing 0.035700045330437136 +610 96 model.embedding_dim 0.0 +610 96 optimizer.lr 0.0027012618647808627 +610 96 training.batch_size 2.0 +610 96 training.label_smoothing 0.08478247651332607 +610 97 model.embedding_dim 1.0 +610 97 optimizer.lr 0.022146346430604074 +610 97 training.batch_size 0.0 +610 97 training.label_smoothing 0.001039339488885514 +610 98 model.embedding_dim 2.0 +610 98 optimizer.lr 0.0013656049520572775 +610 98 training.batch_size 1.0 +610 98 training.label_smoothing 0.004218436781422068 +610 99 model.embedding_dim 1.0 +610 99 optimizer.lr 0.0024158413102985496 +610 99 training.batch_size 0.0 +610 99 training.label_smoothing 0.2699527318988714 +610 100 model.embedding_dim 1.0 +610 100 optimizer.lr 0.00830216068068084 +610 100 training.batch_size 1.0 +610 100 training.label_smoothing 0.03295757421188453 +610 1 dataset """kinships""" +610 1 model """rotate""" +610 1 loss """softplus""" +610 1 regularizer """no""" +610 1 optimizer """adam""" +610 1 training_loop """lcwa""" +610 1 evaluator """rankbased""" +610 2 dataset """kinships""" +610 2 model """rotate""" +610 2 loss """softplus""" +610 2 regularizer """no""" +610 2 optimizer """adam""" +610 2 training_loop """lcwa""" +610 2 evaluator """rankbased""" +610 3 dataset """kinships""" +610 3 model """rotate""" +610 3 loss """softplus""" +610 3 regularizer """no""" +610 3 optimizer """adam""" +610 3 training_loop """lcwa""" +610 3 evaluator """rankbased""" +610 4 dataset """kinships""" +610 4 model """rotate""" +610 4 loss """softplus""" +610 4 regularizer """no""" +610 4 optimizer """adam""" +610 4 training_loop """lcwa""" +610 4 evaluator """rankbased""" +610 5 dataset """kinships""" +610 5 model """rotate""" +610 5 loss """softplus""" +610 5 regularizer """no""" +610 5 optimizer """adam""" +610 5 training_loop """lcwa""" +610 5 evaluator """rankbased""" +610 6 dataset """kinships""" +610 6 model """rotate""" +610 6 loss """softplus""" +610 6 regularizer """no""" +610 6 optimizer """adam""" +610 6 training_loop """lcwa""" +610 6 evaluator """rankbased""" +610 7 dataset """kinships""" +610 7 model """rotate""" +610 7 loss """softplus""" +610 7 regularizer """no""" +610 7 optimizer """adam""" +610 7 training_loop """lcwa""" +610 7 evaluator """rankbased""" +610 8 dataset """kinships""" +610 8 model """rotate""" +610 8 loss """softplus""" +610 8 regularizer """no""" +610 8 optimizer """adam""" +610 8 training_loop """lcwa""" +610 8 evaluator """rankbased""" +610 9 dataset """kinships""" +610 9 model """rotate""" +610 9 loss """softplus""" +610 9 regularizer """no""" +610 9 optimizer """adam""" +610 9 training_loop """lcwa""" +610 9 evaluator """rankbased""" +610 10 dataset """kinships""" +610 10 model """rotate""" +610 10 loss """softplus""" +610 10 regularizer """no""" +610 10 optimizer """adam""" +610 10 training_loop """lcwa""" +610 10 evaluator """rankbased""" +610 11 dataset """kinships""" +610 11 model """rotate""" +610 11 loss """softplus""" +610 11 regularizer """no""" +610 11 optimizer """adam""" +610 11 training_loop """lcwa""" +610 11 evaluator """rankbased""" +610 12 dataset """kinships""" +610 12 model """rotate""" +610 12 loss """softplus""" +610 12 regularizer """no""" +610 12 optimizer """adam""" +610 12 training_loop """lcwa""" +610 12 evaluator """rankbased""" +610 13 dataset """kinships""" +610 13 model """rotate""" +610 13 loss """softplus""" +610 13 regularizer """no""" +610 13 optimizer """adam""" +610 13 training_loop """lcwa""" +610 13 evaluator """rankbased""" +610 14 dataset """kinships""" +610 14 model """rotate""" +610 14 loss """softplus""" +610 14 regularizer """no""" +610 14 optimizer """adam""" +610 14 training_loop """lcwa""" +610 14 evaluator """rankbased""" +610 15 dataset """kinships""" +610 15 model """rotate""" +610 15 loss """softplus""" +610 15 regularizer """no""" +610 15 optimizer """adam""" +610 15 training_loop """lcwa""" +610 15 evaluator """rankbased""" +610 16 dataset """kinships""" +610 16 model """rotate""" +610 16 loss """softplus""" +610 16 regularizer """no""" +610 16 optimizer """adam""" +610 16 training_loop """lcwa""" +610 16 evaluator """rankbased""" +610 17 dataset """kinships""" +610 17 model """rotate""" +610 17 loss """softplus""" +610 17 regularizer """no""" +610 17 optimizer """adam""" +610 17 training_loop """lcwa""" +610 17 evaluator """rankbased""" +610 18 dataset """kinships""" +610 18 model """rotate""" +610 18 loss """softplus""" +610 18 regularizer """no""" +610 18 optimizer """adam""" +610 18 training_loop """lcwa""" +610 18 evaluator """rankbased""" +610 19 dataset """kinships""" +610 19 model """rotate""" +610 19 loss """softplus""" +610 19 regularizer """no""" +610 19 optimizer """adam""" +610 19 training_loop """lcwa""" +610 19 evaluator """rankbased""" +610 20 dataset """kinships""" +610 20 model """rotate""" +610 20 loss """softplus""" +610 20 regularizer """no""" +610 20 optimizer """adam""" +610 20 training_loop """lcwa""" +610 20 evaluator """rankbased""" +610 21 dataset """kinships""" +610 21 model """rotate""" +610 21 loss """softplus""" +610 21 regularizer """no""" +610 21 optimizer """adam""" +610 21 training_loop """lcwa""" +610 21 evaluator """rankbased""" +610 22 dataset """kinships""" +610 22 model """rotate""" +610 22 loss """softplus""" +610 22 regularizer """no""" +610 22 optimizer """adam""" +610 22 training_loop """lcwa""" +610 22 evaluator """rankbased""" +610 23 dataset """kinships""" +610 23 model """rotate""" +610 23 loss """softplus""" +610 23 regularizer """no""" +610 23 optimizer """adam""" +610 23 training_loop """lcwa""" +610 23 evaluator """rankbased""" +610 24 dataset """kinships""" +610 24 model """rotate""" +610 24 loss """softplus""" +610 24 regularizer """no""" +610 24 optimizer """adam""" +610 24 training_loop """lcwa""" +610 24 evaluator """rankbased""" +610 25 dataset """kinships""" +610 25 model """rotate""" +610 25 loss """softplus""" +610 25 regularizer """no""" +610 25 optimizer """adam""" +610 25 training_loop """lcwa""" +610 25 evaluator """rankbased""" +610 26 dataset """kinships""" +610 26 model """rotate""" +610 26 loss """softplus""" +610 26 regularizer """no""" +610 26 optimizer """adam""" +610 26 training_loop """lcwa""" +610 26 evaluator """rankbased""" +610 27 dataset """kinships""" +610 27 model """rotate""" +610 27 loss """softplus""" +610 27 regularizer """no""" +610 27 optimizer """adam""" +610 27 training_loop """lcwa""" +610 27 evaluator """rankbased""" +610 28 dataset """kinships""" +610 28 model """rotate""" +610 28 loss """softplus""" +610 28 regularizer """no""" +610 28 optimizer """adam""" +610 28 training_loop """lcwa""" +610 28 evaluator """rankbased""" +610 29 dataset """kinships""" +610 29 model """rotate""" +610 29 loss """softplus""" +610 29 regularizer """no""" +610 29 optimizer """adam""" +610 29 training_loop """lcwa""" +610 29 evaluator """rankbased""" +610 30 dataset """kinships""" +610 30 model """rotate""" +610 30 loss """softplus""" +610 30 regularizer """no""" +610 30 optimizer """adam""" +610 30 training_loop """lcwa""" +610 30 evaluator """rankbased""" +610 31 dataset """kinships""" +610 31 model """rotate""" +610 31 loss """softplus""" +610 31 regularizer """no""" +610 31 optimizer """adam""" +610 31 training_loop """lcwa""" +610 31 evaluator """rankbased""" +610 32 dataset """kinships""" +610 32 model """rotate""" +610 32 loss """softplus""" +610 32 regularizer """no""" +610 32 optimizer """adam""" +610 32 training_loop """lcwa""" +610 32 evaluator """rankbased""" +610 33 dataset """kinships""" +610 33 model """rotate""" +610 33 loss """softplus""" +610 33 regularizer """no""" +610 33 optimizer """adam""" +610 33 training_loop """lcwa""" +610 33 evaluator """rankbased""" +610 34 dataset """kinships""" +610 34 model """rotate""" +610 34 loss """softplus""" +610 34 regularizer """no""" +610 34 optimizer """adam""" +610 34 training_loop """lcwa""" +610 34 evaluator """rankbased""" +610 35 dataset """kinships""" +610 35 model """rotate""" +610 35 loss """softplus""" +610 35 regularizer """no""" +610 35 optimizer """adam""" +610 35 training_loop """lcwa""" +610 35 evaluator """rankbased""" +610 36 dataset """kinships""" +610 36 model """rotate""" +610 36 loss """softplus""" +610 36 regularizer """no""" +610 36 optimizer """adam""" +610 36 training_loop """lcwa""" +610 36 evaluator """rankbased""" +610 37 dataset """kinships""" +610 37 model """rotate""" +610 37 loss """softplus""" +610 37 regularizer """no""" +610 37 optimizer """adam""" +610 37 training_loop """lcwa""" +610 37 evaluator """rankbased""" +610 38 dataset """kinships""" +610 38 model """rotate""" +610 38 loss """softplus""" +610 38 regularizer """no""" +610 38 optimizer """adam""" +610 38 training_loop """lcwa""" +610 38 evaluator """rankbased""" +610 39 dataset """kinships""" +610 39 model """rotate""" +610 39 loss """softplus""" +610 39 regularizer """no""" +610 39 optimizer """adam""" +610 39 training_loop """lcwa""" +610 39 evaluator """rankbased""" +610 40 dataset """kinships""" +610 40 model """rotate""" +610 40 loss """softplus""" +610 40 regularizer """no""" +610 40 optimizer """adam""" +610 40 training_loop """lcwa""" +610 40 evaluator """rankbased""" +610 41 dataset """kinships""" +610 41 model """rotate""" +610 41 loss """softplus""" +610 41 regularizer """no""" +610 41 optimizer """adam""" +610 41 training_loop """lcwa""" +610 41 evaluator """rankbased""" +610 42 dataset """kinships""" +610 42 model """rotate""" +610 42 loss """softplus""" +610 42 regularizer """no""" +610 42 optimizer """adam""" +610 42 training_loop """lcwa""" +610 42 evaluator """rankbased""" +610 43 dataset """kinships""" +610 43 model """rotate""" +610 43 loss """softplus""" +610 43 regularizer """no""" +610 43 optimizer """adam""" +610 43 training_loop """lcwa""" +610 43 evaluator """rankbased""" +610 44 dataset """kinships""" +610 44 model """rotate""" +610 44 loss """softplus""" +610 44 regularizer """no""" +610 44 optimizer """adam""" +610 44 training_loop """lcwa""" +610 44 evaluator """rankbased""" +610 45 dataset """kinships""" +610 45 model """rotate""" +610 45 loss """softplus""" +610 45 regularizer """no""" +610 45 optimizer """adam""" +610 45 training_loop """lcwa""" +610 45 evaluator """rankbased""" +610 46 dataset """kinships""" +610 46 model """rotate""" +610 46 loss """softplus""" +610 46 regularizer """no""" +610 46 optimizer """adam""" +610 46 training_loop """lcwa""" +610 46 evaluator """rankbased""" +610 47 dataset """kinships""" +610 47 model """rotate""" +610 47 loss """softplus""" +610 47 regularizer """no""" +610 47 optimizer """adam""" +610 47 training_loop """lcwa""" +610 47 evaluator """rankbased""" +610 48 dataset """kinships""" +610 48 model """rotate""" +610 48 loss """softplus""" +610 48 regularizer """no""" +610 48 optimizer """adam""" +610 48 training_loop """lcwa""" +610 48 evaluator """rankbased""" +610 49 dataset """kinships""" +610 49 model """rotate""" +610 49 loss """softplus""" +610 49 regularizer """no""" +610 49 optimizer """adam""" +610 49 training_loop """lcwa""" +610 49 evaluator """rankbased""" +610 50 dataset """kinships""" +610 50 model """rotate""" +610 50 loss """softplus""" +610 50 regularizer """no""" +610 50 optimizer """adam""" +610 50 training_loop """lcwa""" +610 50 evaluator """rankbased""" +610 51 dataset """kinships""" +610 51 model """rotate""" +610 51 loss """softplus""" +610 51 regularizer """no""" +610 51 optimizer """adam""" +610 51 training_loop """lcwa""" +610 51 evaluator """rankbased""" +610 52 dataset """kinships""" +610 52 model """rotate""" +610 52 loss """softplus""" +610 52 regularizer """no""" +610 52 optimizer """adam""" +610 52 training_loop """lcwa""" +610 52 evaluator """rankbased""" +610 53 dataset """kinships""" +610 53 model """rotate""" +610 53 loss """softplus""" +610 53 regularizer """no""" +610 53 optimizer """adam""" +610 53 training_loop """lcwa""" +610 53 evaluator """rankbased""" +610 54 dataset """kinships""" +610 54 model """rotate""" +610 54 loss """softplus""" +610 54 regularizer """no""" +610 54 optimizer """adam""" +610 54 training_loop """lcwa""" +610 54 evaluator """rankbased""" +610 55 dataset """kinships""" +610 55 model """rotate""" +610 55 loss """softplus""" +610 55 regularizer """no""" +610 55 optimizer """adam""" +610 55 training_loop """lcwa""" +610 55 evaluator """rankbased""" +610 56 dataset """kinships""" +610 56 model """rotate""" +610 56 loss """softplus""" +610 56 regularizer """no""" +610 56 optimizer """adam""" +610 56 training_loop """lcwa""" +610 56 evaluator """rankbased""" +610 57 dataset """kinships""" +610 57 model """rotate""" +610 57 loss """softplus""" +610 57 regularizer """no""" +610 57 optimizer """adam""" +610 57 training_loop """lcwa""" +610 57 evaluator """rankbased""" +610 58 dataset """kinships""" +610 58 model """rotate""" +610 58 loss """softplus""" +610 58 regularizer """no""" +610 58 optimizer """adam""" +610 58 training_loop """lcwa""" +610 58 evaluator """rankbased""" +610 59 dataset """kinships""" +610 59 model """rotate""" +610 59 loss """softplus""" +610 59 regularizer """no""" +610 59 optimizer """adam""" +610 59 training_loop """lcwa""" +610 59 evaluator """rankbased""" +610 60 dataset """kinships""" +610 60 model """rotate""" +610 60 loss """softplus""" +610 60 regularizer """no""" +610 60 optimizer """adam""" +610 60 training_loop """lcwa""" +610 60 evaluator """rankbased""" +610 61 dataset """kinships""" +610 61 model """rotate""" +610 61 loss """softplus""" +610 61 regularizer """no""" +610 61 optimizer """adam""" +610 61 training_loop """lcwa""" +610 61 evaluator """rankbased""" +610 62 dataset """kinships""" +610 62 model """rotate""" +610 62 loss """softplus""" +610 62 regularizer """no""" +610 62 optimizer """adam""" +610 62 training_loop """lcwa""" +610 62 evaluator """rankbased""" +610 63 dataset """kinships""" +610 63 model """rotate""" +610 63 loss """softplus""" +610 63 regularizer """no""" +610 63 optimizer """adam""" +610 63 training_loop """lcwa""" +610 63 evaluator """rankbased""" +610 64 dataset """kinships""" +610 64 model """rotate""" +610 64 loss """softplus""" +610 64 regularizer """no""" +610 64 optimizer """adam""" +610 64 training_loop """lcwa""" +610 64 evaluator """rankbased""" +610 65 dataset """kinships""" +610 65 model """rotate""" +610 65 loss """softplus""" +610 65 regularizer """no""" +610 65 optimizer """adam""" +610 65 training_loop """lcwa""" +610 65 evaluator """rankbased""" +610 66 dataset """kinships""" +610 66 model """rotate""" +610 66 loss """softplus""" +610 66 regularizer """no""" +610 66 optimizer """adam""" +610 66 training_loop """lcwa""" +610 66 evaluator """rankbased""" +610 67 dataset """kinships""" +610 67 model """rotate""" +610 67 loss """softplus""" +610 67 regularizer """no""" +610 67 optimizer """adam""" +610 67 training_loop """lcwa""" +610 67 evaluator """rankbased""" +610 68 dataset """kinships""" +610 68 model """rotate""" +610 68 loss """softplus""" +610 68 regularizer """no""" +610 68 optimizer """adam""" +610 68 training_loop """lcwa""" +610 68 evaluator """rankbased""" +610 69 dataset """kinships""" +610 69 model """rotate""" +610 69 loss """softplus""" +610 69 regularizer """no""" +610 69 optimizer """adam""" +610 69 training_loop """lcwa""" +610 69 evaluator """rankbased""" +610 70 dataset """kinships""" +610 70 model """rotate""" +610 70 loss """softplus""" +610 70 regularizer """no""" +610 70 optimizer """adam""" +610 70 training_loop """lcwa""" +610 70 evaluator """rankbased""" +610 71 dataset """kinships""" +610 71 model """rotate""" +610 71 loss """softplus""" +610 71 regularizer """no""" +610 71 optimizer """adam""" +610 71 training_loop """lcwa""" +610 71 evaluator """rankbased""" +610 72 dataset """kinships""" +610 72 model """rotate""" +610 72 loss """softplus""" +610 72 regularizer """no""" +610 72 optimizer """adam""" +610 72 training_loop """lcwa""" +610 72 evaluator """rankbased""" +610 73 dataset """kinships""" +610 73 model """rotate""" +610 73 loss """softplus""" +610 73 regularizer """no""" +610 73 optimizer """adam""" +610 73 training_loop """lcwa""" +610 73 evaluator """rankbased""" +610 74 dataset """kinships""" +610 74 model """rotate""" +610 74 loss """softplus""" +610 74 regularizer """no""" +610 74 optimizer """adam""" +610 74 training_loop """lcwa""" +610 74 evaluator """rankbased""" +610 75 dataset """kinships""" +610 75 model """rotate""" +610 75 loss """softplus""" +610 75 regularizer """no""" +610 75 optimizer """adam""" +610 75 training_loop """lcwa""" +610 75 evaluator """rankbased""" +610 76 dataset """kinships""" +610 76 model """rotate""" +610 76 loss """softplus""" +610 76 regularizer """no""" +610 76 optimizer """adam""" +610 76 training_loop """lcwa""" +610 76 evaluator """rankbased""" +610 77 dataset """kinships""" +610 77 model """rotate""" +610 77 loss """softplus""" +610 77 regularizer """no""" +610 77 optimizer """adam""" +610 77 training_loop """lcwa""" +610 77 evaluator """rankbased""" +610 78 dataset """kinships""" +610 78 model """rotate""" +610 78 loss """softplus""" +610 78 regularizer """no""" +610 78 optimizer """adam""" +610 78 training_loop """lcwa""" +610 78 evaluator """rankbased""" +610 79 dataset """kinships""" +610 79 model """rotate""" +610 79 loss """softplus""" +610 79 regularizer """no""" +610 79 optimizer """adam""" +610 79 training_loop """lcwa""" +610 79 evaluator """rankbased""" +610 80 dataset """kinships""" +610 80 model """rotate""" +610 80 loss """softplus""" +610 80 regularizer """no""" +610 80 optimizer """adam""" +610 80 training_loop """lcwa""" +610 80 evaluator """rankbased""" +610 81 dataset """kinships""" +610 81 model """rotate""" +610 81 loss """softplus""" +610 81 regularizer """no""" +610 81 optimizer """adam""" +610 81 training_loop """lcwa""" +610 81 evaluator """rankbased""" +610 82 dataset """kinships""" +610 82 model """rotate""" +610 82 loss """softplus""" +610 82 regularizer """no""" +610 82 optimizer """adam""" +610 82 training_loop """lcwa""" +610 82 evaluator """rankbased""" +610 83 dataset """kinships""" +610 83 model """rotate""" +610 83 loss """softplus""" +610 83 regularizer """no""" +610 83 optimizer """adam""" +610 83 training_loop """lcwa""" +610 83 evaluator """rankbased""" +610 84 dataset """kinships""" +610 84 model """rotate""" +610 84 loss """softplus""" +610 84 regularizer """no""" +610 84 optimizer """adam""" +610 84 training_loop """lcwa""" +610 84 evaluator """rankbased""" +610 85 dataset """kinships""" +610 85 model """rotate""" +610 85 loss """softplus""" +610 85 regularizer """no""" +610 85 optimizer """adam""" +610 85 training_loop """lcwa""" +610 85 evaluator """rankbased""" +610 86 dataset """kinships""" +610 86 model """rotate""" +610 86 loss """softplus""" +610 86 regularizer """no""" +610 86 optimizer """adam""" +610 86 training_loop """lcwa""" +610 86 evaluator """rankbased""" +610 87 dataset """kinships""" +610 87 model """rotate""" +610 87 loss """softplus""" +610 87 regularizer """no""" +610 87 optimizer """adam""" +610 87 training_loop """lcwa""" +610 87 evaluator """rankbased""" +610 88 dataset """kinships""" +610 88 model """rotate""" +610 88 loss """softplus""" +610 88 regularizer """no""" +610 88 optimizer """adam""" +610 88 training_loop """lcwa""" +610 88 evaluator """rankbased""" +610 89 dataset """kinships""" +610 89 model """rotate""" +610 89 loss """softplus""" +610 89 regularizer """no""" +610 89 optimizer """adam""" +610 89 training_loop """lcwa""" +610 89 evaluator """rankbased""" +610 90 dataset """kinships""" +610 90 model """rotate""" +610 90 loss """softplus""" +610 90 regularizer """no""" +610 90 optimizer """adam""" +610 90 training_loop """lcwa""" +610 90 evaluator """rankbased""" +610 91 dataset """kinships""" +610 91 model """rotate""" +610 91 loss """softplus""" +610 91 regularizer """no""" +610 91 optimizer """adam""" +610 91 training_loop """lcwa""" +610 91 evaluator """rankbased""" +610 92 dataset """kinships""" +610 92 model """rotate""" +610 92 loss """softplus""" +610 92 regularizer """no""" +610 92 optimizer """adam""" +610 92 training_loop """lcwa""" +610 92 evaluator """rankbased""" +610 93 dataset """kinships""" +610 93 model """rotate""" +610 93 loss """softplus""" +610 93 regularizer """no""" +610 93 optimizer """adam""" +610 93 training_loop """lcwa""" +610 93 evaluator """rankbased""" +610 94 dataset """kinships""" +610 94 model """rotate""" +610 94 loss """softplus""" +610 94 regularizer """no""" +610 94 optimizer """adam""" +610 94 training_loop """lcwa""" +610 94 evaluator """rankbased""" +610 95 dataset """kinships""" +610 95 model """rotate""" +610 95 loss """softplus""" +610 95 regularizer """no""" +610 95 optimizer """adam""" +610 95 training_loop """lcwa""" +610 95 evaluator """rankbased""" +610 96 dataset """kinships""" +610 96 model """rotate""" +610 96 loss """softplus""" +610 96 regularizer """no""" +610 96 optimizer """adam""" +610 96 training_loop """lcwa""" +610 96 evaluator """rankbased""" +610 97 dataset """kinships""" +610 97 model """rotate""" +610 97 loss """softplus""" +610 97 regularizer """no""" +610 97 optimizer """adam""" +610 97 training_loop """lcwa""" +610 97 evaluator """rankbased""" +610 98 dataset """kinships""" +610 98 model """rotate""" +610 98 loss """softplus""" +610 98 regularizer """no""" +610 98 optimizer """adam""" +610 98 training_loop """lcwa""" +610 98 evaluator """rankbased""" +610 99 dataset """kinships""" +610 99 model """rotate""" +610 99 loss """softplus""" +610 99 regularizer """no""" +610 99 optimizer """adam""" +610 99 training_loop """lcwa""" +610 99 evaluator """rankbased""" +610 100 dataset """kinships""" +610 100 model """rotate""" +610 100 loss """softplus""" +610 100 regularizer """no""" +610 100 optimizer """adam""" +610 100 training_loop """lcwa""" +610 100 evaluator """rankbased""" +611 1 model.embedding_dim 1.0 +611 1 loss.margin 1.3013355754923885 +611 1 optimizer.lr 0.004163045466585209 +611 1 negative_sampler.num_negs_per_pos 86.0 +611 1 training.batch_size 1.0 +611 2 model.embedding_dim 0.0 +611 2 loss.margin 4.731917411212808 +611 2 optimizer.lr 0.02794249031899793 +611 2 negative_sampler.num_negs_per_pos 63.0 +611 2 training.batch_size 0.0 +611 3 model.embedding_dim 2.0 +611 3 loss.margin 3.9270763257484487 +611 3 optimizer.lr 0.005839690741541189 +611 3 negative_sampler.num_negs_per_pos 58.0 +611 3 training.batch_size 0.0 +611 4 model.embedding_dim 0.0 +611 4 loss.margin 3.3940698659838864 +611 4 optimizer.lr 0.001170825005784953 +611 4 negative_sampler.num_negs_per_pos 57.0 +611 4 training.batch_size 0.0 +611 5 model.embedding_dim 2.0 +611 5 loss.margin 9.88976011023142 +611 5 optimizer.lr 0.016178509448265925 +611 5 negative_sampler.num_negs_per_pos 91.0 +611 5 training.batch_size 0.0 +611 6 model.embedding_dim 2.0 +611 6 loss.margin 5.335002353524482 +611 6 optimizer.lr 0.0038890744998046453 +611 6 negative_sampler.num_negs_per_pos 2.0 +611 6 training.batch_size 0.0 +611 7 model.embedding_dim 1.0 +611 7 loss.margin 1.4263977501170741 +611 7 optimizer.lr 0.0017857249089966469 +611 7 negative_sampler.num_negs_per_pos 33.0 +611 7 training.batch_size 2.0 +611 8 model.embedding_dim 0.0 +611 8 loss.margin 9.21743681737826 +611 8 optimizer.lr 0.015327886208494632 +611 8 negative_sampler.num_negs_per_pos 35.0 +611 8 training.batch_size 2.0 +611 9 model.embedding_dim 2.0 +611 9 loss.margin 9.594131364073371 +611 9 optimizer.lr 0.01845364029430442 +611 9 negative_sampler.num_negs_per_pos 61.0 +611 9 training.batch_size 0.0 +611 10 model.embedding_dim 1.0 +611 10 loss.margin 0.9401341128509622 +611 10 optimizer.lr 0.0018594854484779793 +611 10 negative_sampler.num_negs_per_pos 42.0 +611 10 training.batch_size 0.0 +611 11 model.embedding_dim 2.0 +611 11 loss.margin 5.782746036362189 +611 11 optimizer.lr 0.0016049507270461283 +611 11 negative_sampler.num_negs_per_pos 28.0 +611 11 training.batch_size 1.0 +611 12 model.embedding_dim 1.0 +611 12 loss.margin 2.342071072758491 +611 12 optimizer.lr 0.010509266530203877 +611 12 negative_sampler.num_negs_per_pos 25.0 +611 12 training.batch_size 0.0 +611 13 model.embedding_dim 1.0 +611 13 loss.margin 8.968783332706002 +611 13 optimizer.lr 0.007686406461951265 +611 13 negative_sampler.num_negs_per_pos 19.0 +611 13 training.batch_size 0.0 +611 14 model.embedding_dim 2.0 +611 14 loss.margin 2.692416879259238 +611 14 optimizer.lr 0.0011054471424922204 +611 14 negative_sampler.num_negs_per_pos 13.0 +611 14 training.batch_size 0.0 +611 15 model.embedding_dim 0.0 +611 15 loss.margin 6.911218856615256 +611 15 optimizer.lr 0.004307517147516672 +611 15 negative_sampler.num_negs_per_pos 61.0 +611 15 training.batch_size 2.0 +611 16 model.embedding_dim 1.0 +611 16 loss.margin 0.9265115733560705 +611 16 optimizer.lr 0.021276562573965256 +611 16 negative_sampler.num_negs_per_pos 25.0 +611 16 training.batch_size 2.0 +611 17 model.embedding_dim 0.0 +611 17 loss.margin 7.725978605797131 +611 17 optimizer.lr 0.008258363476746541 +611 17 negative_sampler.num_negs_per_pos 82.0 +611 17 training.batch_size 2.0 +611 18 model.embedding_dim 2.0 +611 18 loss.margin 3.0828601124820585 +611 18 optimizer.lr 0.0350346479482856 +611 18 negative_sampler.num_negs_per_pos 39.0 +611 18 training.batch_size 2.0 +611 19 model.embedding_dim 2.0 +611 19 loss.margin 3.3413326244267654 +611 19 optimizer.lr 0.01257778510890773 +611 19 negative_sampler.num_negs_per_pos 26.0 +611 19 training.batch_size 1.0 +611 20 model.embedding_dim 1.0 +611 20 loss.margin 0.8355433983979415 +611 20 optimizer.lr 0.0010717942876836994 +611 20 negative_sampler.num_negs_per_pos 63.0 +611 20 training.batch_size 2.0 +611 21 model.embedding_dim 0.0 +611 21 loss.margin 2.045778902651256 +611 21 optimizer.lr 0.005876386245873238 +611 21 negative_sampler.num_negs_per_pos 92.0 +611 21 training.batch_size 0.0 +611 22 model.embedding_dim 0.0 +611 22 loss.margin 9.397403625319333 +611 22 optimizer.lr 0.030011051544254506 +611 22 negative_sampler.num_negs_per_pos 38.0 +611 22 training.batch_size 1.0 +611 23 model.embedding_dim 1.0 +611 23 loss.margin 4.69780967864814 +611 23 optimizer.lr 0.08704146611703682 +611 23 negative_sampler.num_negs_per_pos 77.0 +611 23 training.batch_size 1.0 +611 24 model.embedding_dim 1.0 +611 24 loss.margin 7.546269643056332 +611 24 optimizer.lr 0.08746458097148757 +611 24 negative_sampler.num_negs_per_pos 56.0 +611 24 training.batch_size 0.0 +611 25 model.embedding_dim 1.0 +611 25 loss.margin 1.8521192006531415 +611 25 optimizer.lr 0.010256267752027568 +611 25 negative_sampler.num_negs_per_pos 65.0 +611 25 training.batch_size 2.0 +611 26 model.embedding_dim 1.0 +611 26 loss.margin 4.215299407651409 +611 26 optimizer.lr 0.0026904159587802174 +611 26 negative_sampler.num_negs_per_pos 98.0 +611 26 training.batch_size 2.0 +611 27 model.embedding_dim 1.0 +611 27 loss.margin 4.346299512974568 +611 27 optimizer.lr 0.09187514325232769 +611 27 negative_sampler.num_negs_per_pos 74.0 +611 27 training.batch_size 2.0 +611 28 model.embedding_dim 1.0 +611 28 loss.margin 4.885439653160006 +611 28 optimizer.lr 0.014509430873557678 +611 28 negative_sampler.num_negs_per_pos 21.0 +611 28 training.batch_size 1.0 +611 29 model.embedding_dim 0.0 +611 29 loss.margin 5.497698464569523 +611 29 optimizer.lr 0.015650429255112456 +611 29 negative_sampler.num_negs_per_pos 31.0 +611 29 training.batch_size 1.0 +611 30 model.embedding_dim 0.0 +611 30 loss.margin 0.9389520778112392 +611 30 optimizer.lr 0.04572876912064594 +611 30 negative_sampler.num_negs_per_pos 45.0 +611 30 training.batch_size 0.0 +611 31 model.embedding_dim 2.0 +611 31 loss.margin 9.368376038260104 +611 31 optimizer.lr 0.006060702888367581 +611 31 negative_sampler.num_negs_per_pos 78.0 +611 31 training.batch_size 1.0 +611 32 model.embedding_dim 2.0 +611 32 loss.margin 4.552045878259373 +611 32 optimizer.lr 0.0011272126840698232 +611 32 negative_sampler.num_negs_per_pos 97.0 +611 32 training.batch_size 2.0 +611 33 model.embedding_dim 0.0 +611 33 loss.margin 5.790575701190127 +611 33 optimizer.lr 0.06417433482095852 +611 33 negative_sampler.num_negs_per_pos 69.0 +611 33 training.batch_size 1.0 +611 34 model.embedding_dim 1.0 +611 34 loss.margin 9.296881326737147 +611 34 optimizer.lr 0.012415419353917097 +611 34 negative_sampler.num_negs_per_pos 90.0 +611 34 training.batch_size 0.0 +611 35 model.embedding_dim 1.0 +611 35 loss.margin 7.279113871249118 +611 35 optimizer.lr 0.002283746990105487 +611 35 negative_sampler.num_negs_per_pos 60.0 +611 35 training.batch_size 1.0 +611 36 model.embedding_dim 1.0 +611 36 loss.margin 1.5321608357911818 +611 36 optimizer.lr 0.05023642833712415 +611 36 negative_sampler.num_negs_per_pos 70.0 +611 36 training.batch_size 2.0 +611 37 model.embedding_dim 1.0 +611 37 loss.margin 3.094388166172925 +611 37 optimizer.lr 0.002095487193965905 +611 37 negative_sampler.num_negs_per_pos 74.0 +611 37 training.batch_size 2.0 +611 38 model.embedding_dim 0.0 +611 38 loss.margin 9.174755253631773 +611 38 optimizer.lr 0.011381872400085506 +611 38 negative_sampler.num_negs_per_pos 57.0 +611 38 training.batch_size 1.0 +611 39 model.embedding_dim 0.0 +611 39 loss.margin 4.115464180086002 +611 39 optimizer.lr 0.05096254188359597 +611 39 negative_sampler.num_negs_per_pos 21.0 +611 39 training.batch_size 2.0 +611 40 model.embedding_dim 0.0 +611 40 loss.margin 7.623868265370175 +611 40 optimizer.lr 0.008969485717520445 +611 40 negative_sampler.num_negs_per_pos 47.0 +611 40 training.batch_size 2.0 +611 41 model.embedding_dim 1.0 +611 41 loss.margin 4.207856475158351 +611 41 optimizer.lr 0.011249493710621351 +611 41 negative_sampler.num_negs_per_pos 60.0 +611 41 training.batch_size 1.0 +611 42 model.embedding_dim 1.0 +611 42 loss.margin 5.775673178753207 +611 42 optimizer.lr 0.003621823327955198 +611 42 negative_sampler.num_negs_per_pos 12.0 +611 42 training.batch_size 0.0 +611 43 model.embedding_dim 1.0 +611 43 loss.margin 9.349765287809106 +611 43 optimizer.lr 0.015515818017449758 +611 43 negative_sampler.num_negs_per_pos 86.0 +611 43 training.batch_size 1.0 +611 44 model.embedding_dim 0.0 +611 44 loss.margin 1.5525188614581675 +611 44 optimizer.lr 0.01100005083542255 +611 44 negative_sampler.num_negs_per_pos 28.0 +611 44 training.batch_size 1.0 +611 45 model.embedding_dim 0.0 +611 45 loss.margin 8.054124405967874 +611 45 optimizer.lr 0.002054362179611696 +611 45 negative_sampler.num_negs_per_pos 13.0 +611 45 training.batch_size 0.0 +611 46 model.embedding_dim 2.0 +611 46 loss.margin 5.2017990562690954 +611 46 optimizer.lr 0.00202534375861682 +611 46 negative_sampler.num_negs_per_pos 96.0 +611 46 training.batch_size 0.0 +611 47 model.embedding_dim 0.0 +611 47 loss.margin 1.4384763057098737 +611 47 optimizer.lr 0.053321732592818395 +611 47 negative_sampler.num_negs_per_pos 32.0 +611 47 training.batch_size 1.0 +611 48 model.embedding_dim 0.0 +611 48 loss.margin 2.6863442325239233 +611 48 optimizer.lr 0.09749478100920671 +611 48 negative_sampler.num_negs_per_pos 87.0 +611 48 training.batch_size 0.0 +611 49 model.embedding_dim 2.0 +611 49 loss.margin 3.2622292352334568 +611 49 optimizer.lr 0.07066927160113042 +611 49 negative_sampler.num_negs_per_pos 35.0 +611 49 training.batch_size 0.0 +611 50 model.embedding_dim 1.0 +611 50 loss.margin 4.538019299354416 +611 50 optimizer.lr 0.010844433682208294 +611 50 negative_sampler.num_negs_per_pos 90.0 +611 50 training.batch_size 1.0 +611 51 model.embedding_dim 0.0 +611 51 loss.margin 3.4430667150104073 +611 51 optimizer.lr 0.002849631494783797 +611 51 negative_sampler.num_negs_per_pos 50.0 +611 51 training.batch_size 2.0 +611 52 model.embedding_dim 1.0 +611 52 loss.margin 8.02269777763261 +611 52 optimizer.lr 0.003453124803603636 +611 52 negative_sampler.num_negs_per_pos 66.0 +611 52 training.batch_size 0.0 +611 53 model.embedding_dim 1.0 +611 53 loss.margin 1.5255735703984123 +611 53 optimizer.lr 0.015226691955993167 +611 53 negative_sampler.num_negs_per_pos 81.0 +611 53 training.batch_size 1.0 +611 54 model.embedding_dim 1.0 +611 54 loss.margin 3.1686480072606535 +611 54 optimizer.lr 0.015009696415989525 +611 54 negative_sampler.num_negs_per_pos 4.0 +611 54 training.batch_size 2.0 +611 55 model.embedding_dim 1.0 +611 55 loss.margin 8.976245830395479 +611 55 optimizer.lr 0.08501243845516976 +611 55 negative_sampler.num_negs_per_pos 60.0 +611 55 training.batch_size 0.0 +611 56 model.embedding_dim 0.0 +611 56 loss.margin 3.8856393767005417 +611 56 optimizer.lr 0.0013066175336530522 +611 56 negative_sampler.num_negs_per_pos 90.0 +611 56 training.batch_size 2.0 +611 57 model.embedding_dim 2.0 +611 57 loss.margin 3.7975503831050226 +611 57 optimizer.lr 0.018468944999174994 +611 57 negative_sampler.num_negs_per_pos 5.0 +611 57 training.batch_size 2.0 +611 58 model.embedding_dim 0.0 +611 58 loss.margin 3.640651548405138 +611 58 optimizer.lr 0.0038947201281217996 +611 58 negative_sampler.num_negs_per_pos 83.0 +611 58 training.batch_size 1.0 +611 59 model.embedding_dim 0.0 +611 59 loss.margin 6.570027879201822 +611 59 optimizer.lr 0.0454568850519817 +611 59 negative_sampler.num_negs_per_pos 98.0 +611 59 training.batch_size 2.0 +611 60 model.embedding_dim 2.0 +611 60 loss.margin 8.520060188184987 +611 60 optimizer.lr 0.09261841897924324 +611 60 negative_sampler.num_negs_per_pos 72.0 +611 60 training.batch_size 0.0 +611 61 model.embedding_dim 0.0 +611 61 loss.margin 6.768364244372864 +611 61 optimizer.lr 0.0014344638736790696 +611 61 negative_sampler.num_negs_per_pos 12.0 +611 61 training.batch_size 2.0 +611 1 dataset """wn18rr""" +611 1 model """rotate""" +611 1 loss """marginranking""" +611 1 regularizer """no""" +611 1 optimizer """adam""" +611 1 training_loop """owa""" +611 1 negative_sampler """basic""" +611 1 evaluator """rankbased""" +611 2 dataset """wn18rr""" +611 2 model """rotate""" +611 2 loss """marginranking""" +611 2 regularizer """no""" +611 2 optimizer """adam""" +611 2 training_loop """owa""" +611 2 negative_sampler """basic""" +611 2 evaluator """rankbased""" +611 3 dataset """wn18rr""" +611 3 model """rotate""" +611 3 loss """marginranking""" +611 3 regularizer """no""" +611 3 optimizer """adam""" +611 3 training_loop """owa""" +611 3 negative_sampler """basic""" +611 3 evaluator """rankbased""" +611 4 dataset """wn18rr""" +611 4 model """rotate""" +611 4 loss """marginranking""" +611 4 regularizer """no""" +611 4 optimizer """adam""" +611 4 training_loop """owa""" +611 4 negative_sampler """basic""" +611 4 evaluator """rankbased""" +611 5 dataset """wn18rr""" +611 5 model """rotate""" +611 5 loss """marginranking""" +611 5 regularizer """no""" +611 5 optimizer """adam""" +611 5 training_loop """owa""" +611 5 negative_sampler """basic""" +611 5 evaluator """rankbased""" +611 6 dataset """wn18rr""" +611 6 model """rotate""" +611 6 loss """marginranking""" +611 6 regularizer """no""" +611 6 optimizer """adam""" +611 6 training_loop """owa""" +611 6 negative_sampler """basic""" +611 6 evaluator """rankbased""" +611 7 dataset """wn18rr""" +611 7 model """rotate""" +611 7 loss """marginranking""" +611 7 regularizer """no""" +611 7 optimizer """adam""" +611 7 training_loop """owa""" +611 7 negative_sampler """basic""" +611 7 evaluator """rankbased""" +611 8 dataset """wn18rr""" +611 8 model """rotate""" +611 8 loss """marginranking""" +611 8 regularizer """no""" +611 8 optimizer """adam""" +611 8 training_loop """owa""" +611 8 negative_sampler """basic""" +611 8 evaluator """rankbased""" +611 9 dataset """wn18rr""" +611 9 model """rotate""" +611 9 loss """marginranking""" +611 9 regularizer """no""" +611 9 optimizer """adam""" +611 9 training_loop """owa""" +611 9 negative_sampler """basic""" +611 9 evaluator """rankbased""" +611 10 dataset """wn18rr""" +611 10 model """rotate""" +611 10 loss """marginranking""" +611 10 regularizer """no""" +611 10 optimizer """adam""" +611 10 training_loop """owa""" +611 10 negative_sampler """basic""" +611 10 evaluator """rankbased""" +611 11 dataset """wn18rr""" +611 11 model """rotate""" +611 11 loss """marginranking""" +611 11 regularizer """no""" +611 11 optimizer """adam""" +611 11 training_loop """owa""" +611 11 negative_sampler """basic""" +611 11 evaluator """rankbased""" +611 12 dataset """wn18rr""" +611 12 model """rotate""" +611 12 loss """marginranking""" +611 12 regularizer """no""" +611 12 optimizer """adam""" +611 12 training_loop """owa""" +611 12 negative_sampler """basic""" +611 12 evaluator """rankbased""" +611 13 dataset """wn18rr""" +611 13 model """rotate""" +611 13 loss """marginranking""" +611 13 regularizer """no""" +611 13 optimizer """adam""" +611 13 training_loop """owa""" +611 13 negative_sampler """basic""" +611 13 evaluator """rankbased""" +611 14 dataset """wn18rr""" +611 14 model """rotate""" +611 14 loss """marginranking""" +611 14 regularizer """no""" +611 14 optimizer """adam""" +611 14 training_loop """owa""" +611 14 negative_sampler """basic""" +611 14 evaluator """rankbased""" +611 15 dataset """wn18rr""" +611 15 model """rotate""" +611 15 loss """marginranking""" +611 15 regularizer """no""" +611 15 optimizer """adam""" +611 15 training_loop """owa""" +611 15 negative_sampler """basic""" +611 15 evaluator """rankbased""" +611 16 dataset """wn18rr""" +611 16 model """rotate""" +611 16 loss """marginranking""" +611 16 regularizer """no""" +611 16 optimizer """adam""" +611 16 training_loop """owa""" +611 16 negative_sampler """basic""" +611 16 evaluator """rankbased""" +611 17 dataset """wn18rr""" +611 17 model """rotate""" +611 17 loss """marginranking""" +611 17 regularizer """no""" +611 17 optimizer """adam""" +611 17 training_loop """owa""" +611 17 negative_sampler """basic""" +611 17 evaluator """rankbased""" +611 18 dataset """wn18rr""" +611 18 model """rotate""" +611 18 loss """marginranking""" +611 18 regularizer """no""" +611 18 optimizer """adam""" +611 18 training_loop """owa""" +611 18 negative_sampler """basic""" +611 18 evaluator """rankbased""" +611 19 dataset """wn18rr""" +611 19 model """rotate""" +611 19 loss """marginranking""" +611 19 regularizer """no""" +611 19 optimizer """adam""" +611 19 training_loop """owa""" +611 19 negative_sampler """basic""" +611 19 evaluator """rankbased""" +611 20 dataset """wn18rr""" +611 20 model """rotate""" +611 20 loss """marginranking""" +611 20 regularizer """no""" +611 20 optimizer """adam""" +611 20 training_loop """owa""" +611 20 negative_sampler """basic""" +611 20 evaluator """rankbased""" +611 21 dataset """wn18rr""" +611 21 model """rotate""" +611 21 loss """marginranking""" +611 21 regularizer """no""" +611 21 optimizer """adam""" +611 21 training_loop """owa""" +611 21 negative_sampler """basic""" +611 21 evaluator """rankbased""" +611 22 dataset """wn18rr""" +611 22 model """rotate""" +611 22 loss """marginranking""" +611 22 regularizer """no""" +611 22 optimizer """adam""" +611 22 training_loop """owa""" +611 22 negative_sampler """basic""" +611 22 evaluator """rankbased""" +611 23 dataset """wn18rr""" +611 23 model """rotate""" +611 23 loss """marginranking""" +611 23 regularizer """no""" +611 23 optimizer """adam""" +611 23 training_loop """owa""" +611 23 negative_sampler """basic""" +611 23 evaluator """rankbased""" +611 24 dataset """wn18rr""" +611 24 model """rotate""" +611 24 loss """marginranking""" +611 24 regularizer """no""" +611 24 optimizer """adam""" +611 24 training_loop """owa""" +611 24 negative_sampler """basic""" +611 24 evaluator """rankbased""" +611 25 dataset """wn18rr""" +611 25 model """rotate""" +611 25 loss """marginranking""" +611 25 regularizer """no""" +611 25 optimizer """adam""" +611 25 training_loop """owa""" +611 25 negative_sampler """basic""" +611 25 evaluator """rankbased""" +611 26 dataset """wn18rr""" +611 26 model """rotate""" +611 26 loss """marginranking""" +611 26 regularizer """no""" +611 26 optimizer """adam""" +611 26 training_loop """owa""" +611 26 negative_sampler """basic""" +611 26 evaluator """rankbased""" +611 27 dataset """wn18rr""" +611 27 model """rotate""" +611 27 loss """marginranking""" +611 27 regularizer """no""" +611 27 optimizer """adam""" +611 27 training_loop """owa""" +611 27 negative_sampler """basic""" +611 27 evaluator """rankbased""" +611 28 dataset """wn18rr""" +611 28 model """rotate""" +611 28 loss """marginranking""" +611 28 regularizer """no""" +611 28 optimizer """adam""" +611 28 training_loop """owa""" +611 28 negative_sampler """basic""" +611 28 evaluator """rankbased""" +611 29 dataset """wn18rr""" +611 29 model """rotate""" +611 29 loss """marginranking""" +611 29 regularizer """no""" +611 29 optimizer """adam""" +611 29 training_loop """owa""" +611 29 negative_sampler """basic""" +611 29 evaluator """rankbased""" +611 30 dataset """wn18rr""" +611 30 model """rotate""" +611 30 loss """marginranking""" +611 30 regularizer """no""" +611 30 optimizer """adam""" +611 30 training_loop """owa""" +611 30 negative_sampler """basic""" +611 30 evaluator """rankbased""" +611 31 dataset """wn18rr""" +611 31 model """rotate""" +611 31 loss """marginranking""" +611 31 regularizer """no""" +611 31 optimizer """adam""" +611 31 training_loop """owa""" +611 31 negative_sampler """basic""" +611 31 evaluator """rankbased""" +611 32 dataset """wn18rr""" +611 32 model """rotate""" +611 32 loss """marginranking""" +611 32 regularizer """no""" +611 32 optimizer """adam""" +611 32 training_loop """owa""" +611 32 negative_sampler """basic""" +611 32 evaluator """rankbased""" +611 33 dataset """wn18rr""" +611 33 model """rotate""" +611 33 loss """marginranking""" +611 33 regularizer """no""" +611 33 optimizer """adam""" +611 33 training_loop """owa""" +611 33 negative_sampler """basic""" +611 33 evaluator """rankbased""" +611 34 dataset """wn18rr""" +611 34 model """rotate""" +611 34 loss """marginranking""" +611 34 regularizer """no""" +611 34 optimizer """adam""" +611 34 training_loop """owa""" +611 34 negative_sampler """basic""" +611 34 evaluator """rankbased""" +611 35 dataset """wn18rr""" +611 35 model """rotate""" +611 35 loss """marginranking""" +611 35 regularizer """no""" +611 35 optimizer """adam""" +611 35 training_loop """owa""" +611 35 negative_sampler """basic""" +611 35 evaluator """rankbased""" +611 36 dataset """wn18rr""" +611 36 model """rotate""" +611 36 loss """marginranking""" +611 36 regularizer """no""" +611 36 optimizer """adam""" +611 36 training_loop """owa""" +611 36 negative_sampler """basic""" +611 36 evaluator """rankbased""" +611 37 dataset """wn18rr""" +611 37 model """rotate""" +611 37 loss """marginranking""" +611 37 regularizer """no""" +611 37 optimizer """adam""" +611 37 training_loop """owa""" +611 37 negative_sampler """basic""" +611 37 evaluator """rankbased""" +611 38 dataset """wn18rr""" +611 38 model """rotate""" +611 38 loss """marginranking""" +611 38 regularizer """no""" +611 38 optimizer """adam""" +611 38 training_loop """owa""" +611 38 negative_sampler """basic""" +611 38 evaluator """rankbased""" +611 39 dataset """wn18rr""" +611 39 model """rotate""" +611 39 loss """marginranking""" +611 39 regularizer """no""" +611 39 optimizer """adam""" +611 39 training_loop """owa""" +611 39 negative_sampler """basic""" +611 39 evaluator """rankbased""" +611 40 dataset """wn18rr""" +611 40 model """rotate""" +611 40 loss """marginranking""" +611 40 regularizer """no""" +611 40 optimizer """adam""" +611 40 training_loop """owa""" +611 40 negative_sampler """basic""" +611 40 evaluator """rankbased""" +611 41 dataset """wn18rr""" +611 41 model """rotate""" +611 41 loss """marginranking""" +611 41 regularizer """no""" +611 41 optimizer """adam""" +611 41 training_loop """owa""" +611 41 negative_sampler """basic""" +611 41 evaluator """rankbased""" +611 42 dataset """wn18rr""" +611 42 model """rotate""" +611 42 loss """marginranking""" +611 42 regularizer """no""" +611 42 optimizer """adam""" +611 42 training_loop """owa""" +611 42 negative_sampler """basic""" +611 42 evaluator """rankbased""" +611 43 dataset """wn18rr""" +611 43 model """rotate""" +611 43 loss """marginranking""" +611 43 regularizer """no""" +611 43 optimizer """adam""" +611 43 training_loop """owa""" +611 43 negative_sampler """basic""" +611 43 evaluator """rankbased""" +611 44 dataset """wn18rr""" +611 44 model """rotate""" +611 44 loss """marginranking""" +611 44 regularizer """no""" +611 44 optimizer """adam""" +611 44 training_loop """owa""" +611 44 negative_sampler """basic""" +611 44 evaluator """rankbased""" +611 45 dataset """wn18rr""" +611 45 model """rotate""" +611 45 loss """marginranking""" +611 45 regularizer """no""" +611 45 optimizer """adam""" +611 45 training_loop """owa""" +611 45 negative_sampler """basic""" +611 45 evaluator """rankbased""" +611 46 dataset """wn18rr""" +611 46 model """rotate""" +611 46 loss """marginranking""" +611 46 regularizer """no""" +611 46 optimizer """adam""" +611 46 training_loop """owa""" +611 46 negative_sampler """basic""" +611 46 evaluator """rankbased""" +611 47 dataset """wn18rr""" +611 47 model """rotate""" +611 47 loss """marginranking""" +611 47 regularizer """no""" +611 47 optimizer """adam""" +611 47 training_loop """owa""" +611 47 negative_sampler """basic""" +611 47 evaluator """rankbased""" +611 48 dataset """wn18rr""" +611 48 model """rotate""" +611 48 loss """marginranking""" +611 48 regularizer """no""" +611 48 optimizer """adam""" +611 48 training_loop """owa""" +611 48 negative_sampler """basic""" +611 48 evaluator """rankbased""" +611 49 dataset """wn18rr""" +611 49 model """rotate""" +611 49 loss """marginranking""" +611 49 regularizer """no""" +611 49 optimizer """adam""" +611 49 training_loop """owa""" +611 49 negative_sampler """basic""" +611 49 evaluator """rankbased""" +611 50 dataset """wn18rr""" +611 50 model """rotate""" +611 50 loss """marginranking""" +611 50 regularizer """no""" +611 50 optimizer """adam""" +611 50 training_loop """owa""" +611 50 negative_sampler """basic""" +611 50 evaluator """rankbased""" +611 51 dataset """wn18rr""" +611 51 model """rotate""" +611 51 loss """marginranking""" +611 51 regularizer """no""" +611 51 optimizer """adam""" +611 51 training_loop """owa""" +611 51 negative_sampler """basic""" +611 51 evaluator """rankbased""" +611 52 dataset """wn18rr""" +611 52 model """rotate""" +611 52 loss """marginranking""" +611 52 regularizer """no""" +611 52 optimizer """adam""" +611 52 training_loop """owa""" +611 52 negative_sampler """basic""" +611 52 evaluator """rankbased""" +611 53 dataset """wn18rr""" +611 53 model """rotate""" +611 53 loss """marginranking""" +611 53 regularizer """no""" +611 53 optimizer """adam""" +611 53 training_loop """owa""" +611 53 negative_sampler """basic""" +611 53 evaluator """rankbased""" +611 54 dataset """wn18rr""" +611 54 model """rotate""" +611 54 loss """marginranking""" +611 54 regularizer """no""" +611 54 optimizer """adam""" +611 54 training_loop """owa""" +611 54 negative_sampler """basic""" +611 54 evaluator """rankbased""" +611 55 dataset """wn18rr""" +611 55 model """rotate""" +611 55 loss """marginranking""" +611 55 regularizer """no""" +611 55 optimizer """adam""" +611 55 training_loop """owa""" +611 55 negative_sampler """basic""" +611 55 evaluator """rankbased""" +611 56 dataset """wn18rr""" +611 56 model """rotate""" +611 56 loss """marginranking""" +611 56 regularizer """no""" +611 56 optimizer """adam""" +611 56 training_loop """owa""" +611 56 negative_sampler """basic""" +611 56 evaluator """rankbased""" +611 57 dataset """wn18rr""" +611 57 model """rotate""" +611 57 loss """marginranking""" +611 57 regularizer """no""" +611 57 optimizer """adam""" +611 57 training_loop """owa""" +611 57 negative_sampler """basic""" +611 57 evaluator """rankbased""" +611 58 dataset """wn18rr""" +611 58 model """rotate""" +611 58 loss """marginranking""" +611 58 regularizer """no""" +611 58 optimizer """adam""" +611 58 training_loop """owa""" +611 58 negative_sampler """basic""" +611 58 evaluator """rankbased""" +611 59 dataset """wn18rr""" +611 59 model """rotate""" +611 59 loss """marginranking""" +611 59 regularizer """no""" +611 59 optimizer """adam""" +611 59 training_loop """owa""" +611 59 negative_sampler """basic""" +611 59 evaluator """rankbased""" +611 60 dataset """wn18rr""" +611 60 model """rotate""" +611 60 loss """marginranking""" +611 60 regularizer """no""" +611 60 optimizer """adam""" +611 60 training_loop """owa""" +611 60 negative_sampler """basic""" +611 60 evaluator """rankbased""" +611 61 dataset """wn18rr""" +611 61 model """rotate""" +611 61 loss """marginranking""" +611 61 regularizer """no""" +611 61 optimizer """adam""" +611 61 training_loop """owa""" +611 61 negative_sampler """basic""" +611 61 evaluator """rankbased""" +612 1 model.embedding_dim 0.0 +612 1 loss.margin 2.623267824800498 +612 1 optimizer.lr 0.029075749443577627 +612 1 negative_sampler.num_negs_per_pos 25.0 +612 1 training.batch_size 1.0 +612 2 model.embedding_dim 0.0 +612 2 loss.margin 2.959464939268468 +612 2 optimizer.lr 0.006653428447237192 +612 2 negative_sampler.num_negs_per_pos 25.0 +612 2 training.batch_size 0.0 +612 3 model.embedding_dim 2.0 +612 3 loss.margin 8.10041371461954 +612 3 optimizer.lr 0.08953579031336213 +612 3 negative_sampler.num_negs_per_pos 91.0 +612 3 training.batch_size 1.0 +612 4 model.embedding_dim 2.0 +612 4 loss.margin 6.464505597533037 +612 4 optimizer.lr 0.0012209989917784509 +612 4 negative_sampler.num_negs_per_pos 7.0 +612 4 training.batch_size 0.0 +612 5 model.embedding_dim 1.0 +612 5 loss.margin 8.28945244869129 +612 5 optimizer.lr 0.005940942742306746 +612 5 negative_sampler.num_negs_per_pos 78.0 +612 5 training.batch_size 1.0 +612 6 model.embedding_dim 1.0 +612 6 loss.margin 8.854251652567843 +612 6 optimizer.lr 0.002545883156812527 +612 6 negative_sampler.num_negs_per_pos 37.0 +612 6 training.batch_size 1.0 +612 7 model.embedding_dim 1.0 +612 7 loss.margin 2.3831328357026607 +612 7 optimizer.lr 0.010340087890849189 +612 7 negative_sampler.num_negs_per_pos 96.0 +612 7 training.batch_size 1.0 +612 8 model.embedding_dim 0.0 +612 8 loss.margin 3.8565107655768864 +612 8 optimizer.lr 0.01189302363138015 +612 8 negative_sampler.num_negs_per_pos 99.0 +612 8 training.batch_size 0.0 +612 9 model.embedding_dim 2.0 +612 9 loss.margin 4.453317201274623 +612 9 optimizer.lr 0.0026284269397284328 +612 9 negative_sampler.num_negs_per_pos 64.0 +612 9 training.batch_size 2.0 +612 10 model.embedding_dim 1.0 +612 10 loss.margin 9.903084720825323 +612 10 optimizer.lr 0.001464708091875793 +612 10 negative_sampler.num_negs_per_pos 54.0 +612 10 training.batch_size 2.0 +612 11 model.embedding_dim 2.0 +612 11 loss.margin 3.6024222861218753 +612 11 optimizer.lr 0.02392533531731306 +612 11 negative_sampler.num_negs_per_pos 95.0 +612 11 training.batch_size 0.0 +612 12 model.embedding_dim 0.0 +612 12 loss.margin 6.4401507748758045 +612 12 optimizer.lr 0.003960794723985194 +612 12 negative_sampler.num_negs_per_pos 34.0 +612 12 training.batch_size 2.0 +612 13 model.embedding_dim 2.0 +612 13 loss.margin 8.870117451440164 +612 13 optimizer.lr 0.004990834011652936 +612 13 negative_sampler.num_negs_per_pos 25.0 +612 13 training.batch_size 2.0 +612 14 model.embedding_dim 0.0 +612 14 loss.margin 4.925337480969611 +612 14 optimizer.lr 0.005588823103667591 +612 14 negative_sampler.num_negs_per_pos 46.0 +612 14 training.batch_size 2.0 +612 15 model.embedding_dim 0.0 +612 15 loss.margin 5.202650669990239 +612 15 optimizer.lr 0.0023604334629407243 +612 15 negative_sampler.num_negs_per_pos 18.0 +612 15 training.batch_size 0.0 +612 16 model.embedding_dim 1.0 +612 16 loss.margin 5.4221073651697695 +612 16 optimizer.lr 0.0015236331628458244 +612 16 negative_sampler.num_negs_per_pos 94.0 +612 16 training.batch_size 0.0 +612 17 model.embedding_dim 2.0 +612 17 loss.margin 2.6906703511663137 +612 17 optimizer.lr 0.0018810049539720868 +612 17 negative_sampler.num_negs_per_pos 66.0 +612 17 training.batch_size 2.0 +612 18 model.embedding_dim 0.0 +612 18 loss.margin 9.942203062554574 +612 18 optimizer.lr 0.00911007084496007 +612 18 negative_sampler.num_negs_per_pos 74.0 +612 18 training.batch_size 1.0 +612 19 model.embedding_dim 1.0 +612 19 loss.margin 5.300624806724526 +612 19 optimizer.lr 0.04113617885301103 +612 19 negative_sampler.num_negs_per_pos 11.0 +612 19 training.batch_size 2.0 +612 20 model.embedding_dim 0.0 +612 20 loss.margin 6.8432031556049635 +612 20 optimizer.lr 0.024346171081963637 +612 20 negative_sampler.num_negs_per_pos 29.0 +612 20 training.batch_size 1.0 +612 21 model.embedding_dim 0.0 +612 21 loss.margin 8.39458679944349 +612 21 optimizer.lr 0.016359707204166126 +612 21 negative_sampler.num_negs_per_pos 61.0 +612 21 training.batch_size 2.0 +612 22 model.embedding_dim 1.0 +612 22 loss.margin 3.5852711504532038 +612 22 optimizer.lr 0.019580986919610337 +612 22 negative_sampler.num_negs_per_pos 85.0 +612 22 training.batch_size 1.0 +612 23 model.embedding_dim 0.0 +612 23 loss.margin 7.03802074618591 +612 23 optimizer.lr 0.0011765211015259283 +612 23 negative_sampler.num_negs_per_pos 70.0 +612 23 training.batch_size 0.0 +612 24 model.embedding_dim 1.0 +612 24 loss.margin 6.381818967001582 +612 24 optimizer.lr 0.06828874244575192 +612 24 negative_sampler.num_negs_per_pos 4.0 +612 24 training.batch_size 2.0 +612 25 model.embedding_dim 1.0 +612 25 loss.margin 3.6381843673487015 +612 25 optimizer.lr 0.003926861288981141 +612 25 negative_sampler.num_negs_per_pos 95.0 +612 25 training.batch_size 0.0 +612 26 model.embedding_dim 1.0 +612 26 loss.margin 8.617903768461808 +612 26 optimizer.lr 0.015442845529286788 +612 26 negative_sampler.num_negs_per_pos 0.0 +612 26 training.batch_size 0.0 +612 27 model.embedding_dim 2.0 +612 27 loss.margin 9.358906802751404 +612 27 optimizer.lr 0.03406188193197618 +612 27 negative_sampler.num_negs_per_pos 95.0 +612 27 training.batch_size 1.0 +612 28 model.embedding_dim 0.0 +612 28 loss.margin 2.690518765038421 +612 28 optimizer.lr 0.024011718266266323 +612 28 negative_sampler.num_negs_per_pos 7.0 +612 28 training.batch_size 0.0 +612 29 model.embedding_dim 1.0 +612 29 loss.margin 4.152682789440969 +612 29 optimizer.lr 0.002181862134076526 +612 29 negative_sampler.num_negs_per_pos 9.0 +612 29 training.batch_size 1.0 +612 30 model.embedding_dim 0.0 +612 30 loss.margin 1.457076293559581 +612 30 optimizer.lr 0.03012789864156302 +612 30 negative_sampler.num_negs_per_pos 56.0 +612 30 training.batch_size 0.0 +612 31 model.embedding_dim 2.0 +612 31 loss.margin 8.010777869032873 +612 31 optimizer.lr 0.05804453249737636 +612 31 negative_sampler.num_negs_per_pos 50.0 +612 31 training.batch_size 1.0 +612 32 model.embedding_dim 1.0 +612 32 loss.margin 6.098095148195197 +612 32 optimizer.lr 0.07675158316808728 +612 32 negative_sampler.num_negs_per_pos 24.0 +612 32 training.batch_size 1.0 +612 33 model.embedding_dim 1.0 +612 33 loss.margin 5.45871096656536 +612 33 optimizer.lr 0.012025715462627734 +612 33 negative_sampler.num_negs_per_pos 30.0 +612 33 training.batch_size 1.0 +612 34 model.embedding_dim 0.0 +612 34 loss.margin 8.172820719740878 +612 34 optimizer.lr 0.006020907255844396 +612 34 negative_sampler.num_negs_per_pos 98.0 +612 34 training.batch_size 1.0 +612 35 model.embedding_dim 1.0 +612 35 loss.margin 8.965928611499105 +612 35 optimizer.lr 0.014386513595895822 +612 35 negative_sampler.num_negs_per_pos 85.0 +612 35 training.batch_size 1.0 +612 36 model.embedding_dim 1.0 +612 36 loss.margin 5.363240279830535 +612 36 optimizer.lr 0.015061680168426567 +612 36 negative_sampler.num_negs_per_pos 69.0 +612 36 training.batch_size 2.0 +612 37 model.embedding_dim 0.0 +612 37 loss.margin 5.352167948075253 +612 37 optimizer.lr 0.01724786595073748 +612 37 negative_sampler.num_negs_per_pos 18.0 +612 37 training.batch_size 0.0 +612 38 model.embedding_dim 0.0 +612 38 loss.margin 9.658955080004276 +612 38 optimizer.lr 0.008836133860391187 +612 38 negative_sampler.num_negs_per_pos 30.0 +612 38 training.batch_size 1.0 +612 39 model.embedding_dim 0.0 +612 39 loss.margin 4.534130684232955 +612 39 optimizer.lr 0.0181214341882761 +612 39 negative_sampler.num_negs_per_pos 72.0 +612 39 training.batch_size 1.0 +612 40 model.embedding_dim 0.0 +612 40 loss.margin 7.896506271869041 +612 40 optimizer.lr 0.020525866532497973 +612 40 negative_sampler.num_negs_per_pos 50.0 +612 40 training.batch_size 1.0 +612 41 model.embedding_dim 0.0 +612 41 loss.margin 2.003506606682304 +612 41 optimizer.lr 0.0032890771343372606 +612 41 negative_sampler.num_negs_per_pos 88.0 +612 41 training.batch_size 2.0 +612 42 model.embedding_dim 0.0 +612 42 loss.margin 5.835404882926238 +612 42 optimizer.lr 0.0050181699040112585 +612 42 negative_sampler.num_negs_per_pos 81.0 +612 42 training.batch_size 1.0 +612 43 model.embedding_dim 2.0 +612 43 loss.margin 9.417380314290531 +612 43 optimizer.lr 0.0027599962074647474 +612 43 negative_sampler.num_negs_per_pos 58.0 +612 43 training.batch_size 2.0 +612 44 model.embedding_dim 0.0 +612 44 loss.margin 9.126077700709411 +612 44 optimizer.lr 0.004229586238665886 +612 44 negative_sampler.num_negs_per_pos 72.0 +612 44 training.batch_size 2.0 +612 45 model.embedding_dim 2.0 +612 45 loss.margin 2.548222903919979 +612 45 optimizer.lr 0.02746603823577355 +612 45 negative_sampler.num_negs_per_pos 15.0 +612 45 training.batch_size 2.0 +612 46 model.embedding_dim 2.0 +612 46 loss.margin 4.732307375089837 +612 46 optimizer.lr 0.006999734363998628 +612 46 negative_sampler.num_negs_per_pos 34.0 +612 46 training.batch_size 1.0 +612 47 model.embedding_dim 0.0 +612 47 loss.margin 5.890875889327481 +612 47 optimizer.lr 0.040512810697679695 +612 47 negative_sampler.num_negs_per_pos 59.0 +612 47 training.batch_size 1.0 +612 48 model.embedding_dim 1.0 +612 48 loss.margin 1.4200836123772018 +612 48 optimizer.lr 0.019317258718209253 +612 48 negative_sampler.num_negs_per_pos 47.0 +612 48 training.batch_size 1.0 +612 49 model.embedding_dim 1.0 +612 49 loss.margin 9.656245305446204 +612 49 optimizer.lr 0.009956903072792431 +612 49 negative_sampler.num_negs_per_pos 35.0 +612 49 training.batch_size 0.0 +612 50 model.embedding_dim 0.0 +612 50 loss.margin 0.6271375963972181 +612 50 optimizer.lr 0.002020675068038641 +612 50 negative_sampler.num_negs_per_pos 69.0 +612 50 training.batch_size 1.0 +612 51 model.embedding_dim 2.0 +612 51 loss.margin 8.01307794247233 +612 51 optimizer.lr 0.012119920491974634 +612 51 negative_sampler.num_negs_per_pos 83.0 +612 51 training.batch_size 1.0 +612 52 model.embedding_dim 0.0 +612 52 loss.margin 6.211928454579716 +612 52 optimizer.lr 0.02859865385039105 +612 52 negative_sampler.num_negs_per_pos 78.0 +612 52 training.batch_size 2.0 +612 53 model.embedding_dim 1.0 +612 53 loss.margin 9.558610691991184 +612 53 optimizer.lr 0.05546908698963896 +612 53 negative_sampler.num_negs_per_pos 42.0 +612 53 training.batch_size 0.0 +612 54 model.embedding_dim 1.0 +612 54 loss.margin 1.9035170632518454 +612 54 optimizer.lr 0.027877595762744303 +612 54 negative_sampler.num_negs_per_pos 22.0 +612 54 training.batch_size 0.0 +612 55 model.embedding_dim 0.0 +612 55 loss.margin 0.947188075778971 +612 55 optimizer.lr 0.020118913824654524 +612 55 negative_sampler.num_negs_per_pos 64.0 +612 55 training.batch_size 1.0 +612 56 model.embedding_dim 0.0 +612 56 loss.margin 2.845774141168576 +612 56 optimizer.lr 0.038169350133823415 +612 56 negative_sampler.num_negs_per_pos 2.0 +612 56 training.batch_size 1.0 +612 57 model.embedding_dim 0.0 +612 57 loss.margin 6.711973135190241 +612 57 optimizer.lr 0.005597815585742714 +612 57 negative_sampler.num_negs_per_pos 10.0 +612 57 training.batch_size 0.0 +612 58 model.embedding_dim 0.0 +612 58 loss.margin 1.6525959199887754 +612 58 optimizer.lr 0.07185113646683647 +612 58 negative_sampler.num_negs_per_pos 40.0 +612 58 training.batch_size 1.0 +612 59 model.embedding_dim 0.0 +612 59 loss.margin 6.754326552550621 +612 59 optimizer.lr 0.03817356470998939 +612 59 negative_sampler.num_negs_per_pos 92.0 +612 59 training.batch_size 2.0 +612 60 model.embedding_dim 0.0 +612 60 loss.margin 6.703551975650861 +612 60 optimizer.lr 0.0019580680224144105 +612 60 negative_sampler.num_negs_per_pos 31.0 +612 60 training.batch_size 0.0 +612 61 model.embedding_dim 2.0 +612 61 loss.margin 6.758028362859929 +612 61 optimizer.lr 0.006003557985679583 +612 61 negative_sampler.num_negs_per_pos 82.0 +612 61 training.batch_size 1.0 +612 62 model.embedding_dim 0.0 +612 62 loss.margin 1.4139738538094409 +612 62 optimizer.lr 0.0036189188402873467 +612 62 negative_sampler.num_negs_per_pos 66.0 +612 62 training.batch_size 0.0 +612 63 model.embedding_dim 0.0 +612 63 loss.margin 0.6022769721408716 +612 63 optimizer.lr 0.011395614969957623 +612 63 negative_sampler.num_negs_per_pos 3.0 +612 63 training.batch_size 1.0 +612 64 model.embedding_dim 1.0 +612 64 loss.margin 1.7326734722051993 +612 64 optimizer.lr 0.03815133517609175 +612 64 negative_sampler.num_negs_per_pos 97.0 +612 64 training.batch_size 2.0 +612 65 model.embedding_dim 0.0 +612 65 loss.margin 1.3096760151254863 +612 65 optimizer.lr 0.022301408971547393 +612 65 negative_sampler.num_negs_per_pos 9.0 +612 65 training.batch_size 2.0 +612 66 model.embedding_dim 0.0 +612 66 loss.margin 2.913008591900925 +612 66 optimizer.lr 0.04095305805695401 +612 66 negative_sampler.num_negs_per_pos 19.0 +612 66 training.batch_size 0.0 +612 67 model.embedding_dim 1.0 +612 67 loss.margin 0.9468567213149599 +612 67 optimizer.lr 0.01473443753745705 +612 67 negative_sampler.num_negs_per_pos 53.0 +612 67 training.batch_size 0.0 +612 68 model.embedding_dim 1.0 +612 68 loss.margin 9.299429110784398 +612 68 optimizer.lr 0.011666577932484613 +612 68 negative_sampler.num_negs_per_pos 73.0 +612 68 training.batch_size 2.0 +612 69 model.embedding_dim 0.0 +612 69 loss.margin 3.6038022158115166 +612 69 optimizer.lr 0.055789601061753145 +612 69 negative_sampler.num_negs_per_pos 56.0 +612 69 training.batch_size 1.0 +612 70 model.embedding_dim 0.0 +612 70 loss.margin 8.034666768036958 +612 70 optimizer.lr 0.0765560982076116 +612 70 negative_sampler.num_negs_per_pos 27.0 +612 70 training.batch_size 0.0 +612 71 model.embedding_dim 0.0 +612 71 loss.margin 1.016400558858583 +612 71 optimizer.lr 0.0765330253396256 +612 71 negative_sampler.num_negs_per_pos 77.0 +612 71 training.batch_size 0.0 +612 72 model.embedding_dim 0.0 +612 72 loss.margin 2.5175940550119833 +612 72 optimizer.lr 0.011117011816055138 +612 72 negative_sampler.num_negs_per_pos 5.0 +612 72 training.batch_size 0.0 +612 73 model.embedding_dim 1.0 +612 73 loss.margin 7.581271074529623 +612 73 optimizer.lr 0.04168746663092159 +612 73 negative_sampler.num_negs_per_pos 29.0 +612 73 training.batch_size 2.0 +612 74 model.embedding_dim 1.0 +612 74 loss.margin 4.19722862056597 +612 74 optimizer.lr 0.0021906451333485086 +612 74 negative_sampler.num_negs_per_pos 74.0 +612 74 training.batch_size 0.0 +612 75 model.embedding_dim 0.0 +612 75 loss.margin 7.591010440476838 +612 75 optimizer.lr 0.002305207964774346 +612 75 negative_sampler.num_negs_per_pos 81.0 +612 75 training.batch_size 1.0 +612 76 model.embedding_dim 1.0 +612 76 loss.margin 3.448985338335641 +612 76 optimizer.lr 0.015125545817240774 +612 76 negative_sampler.num_negs_per_pos 57.0 +612 76 training.batch_size 1.0 +612 77 model.embedding_dim 0.0 +612 77 loss.margin 1.5052924604112736 +612 77 optimizer.lr 0.002945380233310086 +612 77 negative_sampler.num_negs_per_pos 33.0 +612 77 training.batch_size 0.0 +612 78 model.embedding_dim 2.0 +612 78 loss.margin 7.620527625742322 +612 78 optimizer.lr 0.07936705788167833 +612 78 negative_sampler.num_negs_per_pos 22.0 +612 78 training.batch_size 2.0 +612 79 model.embedding_dim 2.0 +612 79 loss.margin 9.17563391740139 +612 79 optimizer.lr 0.007630003264102754 +612 79 negative_sampler.num_negs_per_pos 38.0 +612 79 training.batch_size 1.0 +612 80 model.embedding_dim 2.0 +612 80 loss.margin 4.64908111673605 +612 80 optimizer.lr 0.005701246133836168 +612 80 negative_sampler.num_negs_per_pos 34.0 +612 80 training.batch_size 2.0 +612 81 model.embedding_dim 0.0 +612 81 loss.margin 3.766473248506695 +612 81 optimizer.lr 0.001813842817616217 +612 81 negative_sampler.num_negs_per_pos 27.0 +612 81 training.batch_size 2.0 +612 82 model.embedding_dim 1.0 +612 82 loss.margin 3.6070993473048274 +612 82 optimizer.lr 0.0026622192499119192 +612 82 negative_sampler.num_negs_per_pos 24.0 +612 82 training.batch_size 2.0 +612 83 model.embedding_dim 2.0 +612 83 loss.margin 2.340038973299459 +612 83 optimizer.lr 0.002321621035242055 +612 83 negative_sampler.num_negs_per_pos 87.0 +612 83 training.batch_size 0.0 +612 84 model.embedding_dim 0.0 +612 84 loss.margin 6.518697478343184 +612 84 optimizer.lr 0.0384502843502721 +612 84 negative_sampler.num_negs_per_pos 21.0 +612 84 training.batch_size 1.0 +612 85 model.embedding_dim 1.0 +612 85 loss.margin 6.832328494050155 +612 85 optimizer.lr 0.0039248599562659485 +612 85 negative_sampler.num_negs_per_pos 86.0 +612 85 training.batch_size 2.0 +612 86 model.embedding_dim 1.0 +612 86 loss.margin 9.119305191866799 +612 86 optimizer.lr 0.019376367450089898 +612 86 negative_sampler.num_negs_per_pos 31.0 +612 86 training.batch_size 1.0 +612 87 model.embedding_dim 0.0 +612 87 loss.margin 8.562729751719214 +612 87 optimizer.lr 0.014243709891223845 +612 87 negative_sampler.num_negs_per_pos 11.0 +612 87 training.batch_size 1.0 +612 88 model.embedding_dim 1.0 +612 88 loss.margin 5.758599960814707 +612 88 optimizer.lr 0.016353776059936074 +612 88 negative_sampler.num_negs_per_pos 49.0 +612 88 training.batch_size 2.0 +612 89 model.embedding_dim 0.0 +612 89 loss.margin 8.637204659292054 +612 89 optimizer.lr 0.005937730745356957 +612 89 negative_sampler.num_negs_per_pos 87.0 +612 89 training.batch_size 1.0 +612 90 model.embedding_dim 0.0 +612 90 loss.margin 1.686132000309526 +612 90 optimizer.lr 0.009032750832393119 +612 90 negative_sampler.num_negs_per_pos 99.0 +612 90 training.batch_size 1.0 +612 91 model.embedding_dim 0.0 +612 91 loss.margin 5.190193591128463 +612 91 optimizer.lr 0.06853922909320716 +612 91 negative_sampler.num_negs_per_pos 9.0 +612 91 training.batch_size 0.0 +612 92 model.embedding_dim 1.0 +612 92 loss.margin 2.8968280700579085 +612 92 optimizer.lr 0.06412667101832144 +612 92 negative_sampler.num_negs_per_pos 14.0 +612 92 training.batch_size 2.0 +612 93 model.embedding_dim 0.0 +612 93 loss.margin 3.2722480286401865 +612 93 optimizer.lr 0.029892115542141034 +612 93 negative_sampler.num_negs_per_pos 79.0 +612 93 training.batch_size 0.0 +612 94 model.embedding_dim 0.0 +612 94 loss.margin 6.165434074164242 +612 94 optimizer.lr 0.08429217175447179 +612 94 negative_sampler.num_negs_per_pos 29.0 +612 94 training.batch_size 2.0 +612 95 model.embedding_dim 1.0 +612 95 loss.margin 5.2159195726147916 +612 95 optimizer.lr 0.09551022742993533 +612 95 negative_sampler.num_negs_per_pos 3.0 +612 95 training.batch_size 1.0 +612 96 model.embedding_dim 2.0 +612 96 loss.margin 1.5056270330051085 +612 96 optimizer.lr 0.002493856497760664 +612 96 negative_sampler.num_negs_per_pos 10.0 +612 96 training.batch_size 2.0 +612 97 model.embedding_dim 0.0 +612 97 loss.margin 0.7408139234328758 +612 97 optimizer.lr 0.016890041233662247 +612 97 negative_sampler.num_negs_per_pos 48.0 +612 97 training.batch_size 2.0 +612 98 model.embedding_dim 2.0 +612 98 loss.margin 6.317968154540007 +612 98 optimizer.lr 0.0022335312311711794 +612 98 negative_sampler.num_negs_per_pos 2.0 +612 98 training.batch_size 2.0 +612 99 model.embedding_dim 1.0 +612 99 loss.margin 7.962194513013959 +612 99 optimizer.lr 0.011112246241030289 +612 99 negative_sampler.num_negs_per_pos 92.0 +612 99 training.batch_size 0.0 +612 100 model.embedding_dim 0.0 +612 100 loss.margin 4.957992206493148 +612 100 optimizer.lr 0.016405918193421094 +612 100 negative_sampler.num_negs_per_pos 69.0 +612 100 training.batch_size 2.0 +612 1 dataset """wn18rr""" +612 1 model """rotate""" +612 1 loss """marginranking""" +612 1 regularizer """no""" +612 1 optimizer """adam""" +612 1 training_loop """owa""" +612 1 negative_sampler """basic""" +612 1 evaluator """rankbased""" +612 2 dataset """wn18rr""" +612 2 model """rotate""" +612 2 loss """marginranking""" +612 2 regularizer """no""" +612 2 optimizer """adam""" +612 2 training_loop """owa""" +612 2 negative_sampler """basic""" +612 2 evaluator """rankbased""" +612 3 dataset """wn18rr""" +612 3 model """rotate""" +612 3 loss """marginranking""" +612 3 regularizer """no""" +612 3 optimizer """adam""" +612 3 training_loop """owa""" +612 3 negative_sampler """basic""" +612 3 evaluator """rankbased""" +612 4 dataset """wn18rr""" +612 4 model """rotate""" +612 4 loss """marginranking""" +612 4 regularizer """no""" +612 4 optimizer """adam""" +612 4 training_loop """owa""" +612 4 negative_sampler """basic""" +612 4 evaluator """rankbased""" +612 5 dataset """wn18rr""" +612 5 model """rotate""" +612 5 loss """marginranking""" +612 5 regularizer """no""" +612 5 optimizer """adam""" +612 5 training_loop """owa""" +612 5 negative_sampler """basic""" +612 5 evaluator """rankbased""" +612 6 dataset """wn18rr""" +612 6 model """rotate""" +612 6 loss """marginranking""" +612 6 regularizer """no""" +612 6 optimizer """adam""" +612 6 training_loop """owa""" +612 6 negative_sampler """basic""" +612 6 evaluator """rankbased""" +612 7 dataset """wn18rr""" +612 7 model """rotate""" +612 7 loss """marginranking""" +612 7 regularizer """no""" +612 7 optimizer """adam""" +612 7 training_loop """owa""" +612 7 negative_sampler """basic""" +612 7 evaluator """rankbased""" +612 8 dataset """wn18rr""" +612 8 model """rotate""" +612 8 loss """marginranking""" +612 8 regularizer """no""" +612 8 optimizer """adam""" +612 8 training_loop """owa""" +612 8 negative_sampler """basic""" +612 8 evaluator """rankbased""" +612 9 dataset """wn18rr""" +612 9 model """rotate""" +612 9 loss """marginranking""" +612 9 regularizer """no""" +612 9 optimizer """adam""" +612 9 training_loop """owa""" +612 9 negative_sampler """basic""" +612 9 evaluator """rankbased""" +612 10 dataset """wn18rr""" +612 10 model """rotate""" +612 10 loss """marginranking""" +612 10 regularizer """no""" +612 10 optimizer """adam""" +612 10 training_loop """owa""" +612 10 negative_sampler """basic""" +612 10 evaluator """rankbased""" +612 11 dataset """wn18rr""" +612 11 model """rotate""" +612 11 loss """marginranking""" +612 11 regularizer """no""" +612 11 optimizer """adam""" +612 11 training_loop """owa""" +612 11 negative_sampler """basic""" +612 11 evaluator """rankbased""" +612 12 dataset """wn18rr""" +612 12 model """rotate""" +612 12 loss """marginranking""" +612 12 regularizer """no""" +612 12 optimizer """adam""" +612 12 training_loop """owa""" +612 12 negative_sampler """basic""" +612 12 evaluator """rankbased""" +612 13 dataset """wn18rr""" +612 13 model """rotate""" +612 13 loss """marginranking""" +612 13 regularizer """no""" +612 13 optimizer """adam""" +612 13 training_loop """owa""" +612 13 negative_sampler """basic""" +612 13 evaluator """rankbased""" +612 14 dataset """wn18rr""" +612 14 model """rotate""" +612 14 loss """marginranking""" +612 14 regularizer """no""" +612 14 optimizer """adam""" +612 14 training_loop """owa""" +612 14 negative_sampler """basic""" +612 14 evaluator """rankbased""" +612 15 dataset """wn18rr""" +612 15 model """rotate""" +612 15 loss """marginranking""" +612 15 regularizer """no""" +612 15 optimizer """adam""" +612 15 training_loop """owa""" +612 15 negative_sampler """basic""" +612 15 evaluator """rankbased""" +612 16 dataset """wn18rr""" +612 16 model """rotate""" +612 16 loss """marginranking""" +612 16 regularizer """no""" +612 16 optimizer """adam""" +612 16 training_loop """owa""" +612 16 negative_sampler """basic""" +612 16 evaluator """rankbased""" +612 17 dataset """wn18rr""" +612 17 model """rotate""" +612 17 loss """marginranking""" +612 17 regularizer """no""" +612 17 optimizer """adam""" +612 17 training_loop """owa""" +612 17 negative_sampler """basic""" +612 17 evaluator """rankbased""" +612 18 dataset """wn18rr""" +612 18 model """rotate""" +612 18 loss """marginranking""" +612 18 regularizer """no""" +612 18 optimizer """adam""" +612 18 training_loop """owa""" +612 18 negative_sampler """basic""" +612 18 evaluator """rankbased""" +612 19 dataset """wn18rr""" +612 19 model """rotate""" +612 19 loss """marginranking""" +612 19 regularizer """no""" +612 19 optimizer """adam""" +612 19 training_loop """owa""" +612 19 negative_sampler """basic""" +612 19 evaluator """rankbased""" +612 20 dataset """wn18rr""" +612 20 model """rotate""" +612 20 loss """marginranking""" +612 20 regularizer """no""" +612 20 optimizer """adam""" +612 20 training_loop """owa""" +612 20 negative_sampler """basic""" +612 20 evaluator """rankbased""" +612 21 dataset """wn18rr""" +612 21 model """rotate""" +612 21 loss """marginranking""" +612 21 regularizer """no""" +612 21 optimizer """adam""" +612 21 training_loop """owa""" +612 21 negative_sampler """basic""" +612 21 evaluator """rankbased""" +612 22 dataset """wn18rr""" +612 22 model """rotate""" +612 22 loss """marginranking""" +612 22 regularizer """no""" +612 22 optimizer """adam""" +612 22 training_loop """owa""" +612 22 negative_sampler """basic""" +612 22 evaluator """rankbased""" +612 23 dataset """wn18rr""" +612 23 model """rotate""" +612 23 loss """marginranking""" +612 23 regularizer """no""" +612 23 optimizer """adam""" +612 23 training_loop """owa""" +612 23 negative_sampler """basic""" +612 23 evaluator """rankbased""" +612 24 dataset """wn18rr""" +612 24 model """rotate""" +612 24 loss """marginranking""" +612 24 regularizer """no""" +612 24 optimizer """adam""" +612 24 training_loop """owa""" +612 24 negative_sampler """basic""" +612 24 evaluator """rankbased""" +612 25 dataset """wn18rr""" +612 25 model """rotate""" +612 25 loss """marginranking""" +612 25 regularizer """no""" +612 25 optimizer """adam""" +612 25 training_loop """owa""" +612 25 negative_sampler """basic""" +612 25 evaluator """rankbased""" +612 26 dataset """wn18rr""" +612 26 model """rotate""" +612 26 loss """marginranking""" +612 26 regularizer """no""" +612 26 optimizer """adam""" +612 26 training_loop """owa""" +612 26 negative_sampler """basic""" +612 26 evaluator """rankbased""" +612 27 dataset """wn18rr""" +612 27 model """rotate""" +612 27 loss """marginranking""" +612 27 regularizer """no""" +612 27 optimizer """adam""" +612 27 training_loop """owa""" +612 27 negative_sampler """basic""" +612 27 evaluator """rankbased""" +612 28 dataset """wn18rr""" +612 28 model """rotate""" +612 28 loss """marginranking""" +612 28 regularizer """no""" +612 28 optimizer """adam""" +612 28 training_loop """owa""" +612 28 negative_sampler """basic""" +612 28 evaluator """rankbased""" +612 29 dataset """wn18rr""" +612 29 model """rotate""" +612 29 loss """marginranking""" +612 29 regularizer """no""" +612 29 optimizer """adam""" +612 29 training_loop """owa""" +612 29 negative_sampler """basic""" +612 29 evaluator """rankbased""" +612 30 dataset """wn18rr""" +612 30 model """rotate""" +612 30 loss """marginranking""" +612 30 regularizer """no""" +612 30 optimizer """adam""" +612 30 training_loop """owa""" +612 30 negative_sampler """basic""" +612 30 evaluator """rankbased""" +612 31 dataset """wn18rr""" +612 31 model """rotate""" +612 31 loss """marginranking""" +612 31 regularizer """no""" +612 31 optimizer """adam""" +612 31 training_loop """owa""" +612 31 negative_sampler """basic""" +612 31 evaluator """rankbased""" +612 32 dataset """wn18rr""" +612 32 model """rotate""" +612 32 loss """marginranking""" +612 32 regularizer """no""" +612 32 optimizer """adam""" +612 32 training_loop """owa""" +612 32 negative_sampler """basic""" +612 32 evaluator """rankbased""" +612 33 dataset """wn18rr""" +612 33 model """rotate""" +612 33 loss """marginranking""" +612 33 regularizer """no""" +612 33 optimizer """adam""" +612 33 training_loop """owa""" +612 33 negative_sampler """basic""" +612 33 evaluator """rankbased""" +612 34 dataset """wn18rr""" +612 34 model """rotate""" +612 34 loss """marginranking""" +612 34 regularizer """no""" +612 34 optimizer """adam""" +612 34 training_loop """owa""" +612 34 negative_sampler """basic""" +612 34 evaluator """rankbased""" +612 35 dataset """wn18rr""" +612 35 model """rotate""" +612 35 loss """marginranking""" +612 35 regularizer """no""" +612 35 optimizer """adam""" +612 35 training_loop """owa""" +612 35 negative_sampler """basic""" +612 35 evaluator """rankbased""" +612 36 dataset """wn18rr""" +612 36 model """rotate""" +612 36 loss """marginranking""" +612 36 regularizer """no""" +612 36 optimizer """adam""" +612 36 training_loop """owa""" +612 36 negative_sampler """basic""" +612 36 evaluator """rankbased""" +612 37 dataset """wn18rr""" +612 37 model """rotate""" +612 37 loss """marginranking""" +612 37 regularizer """no""" +612 37 optimizer """adam""" +612 37 training_loop """owa""" +612 37 negative_sampler """basic""" +612 37 evaluator """rankbased""" +612 38 dataset """wn18rr""" +612 38 model """rotate""" +612 38 loss """marginranking""" +612 38 regularizer """no""" +612 38 optimizer """adam""" +612 38 training_loop """owa""" +612 38 negative_sampler """basic""" +612 38 evaluator """rankbased""" +612 39 dataset """wn18rr""" +612 39 model """rotate""" +612 39 loss """marginranking""" +612 39 regularizer """no""" +612 39 optimizer """adam""" +612 39 training_loop """owa""" +612 39 negative_sampler """basic""" +612 39 evaluator """rankbased""" +612 40 dataset """wn18rr""" +612 40 model """rotate""" +612 40 loss """marginranking""" +612 40 regularizer """no""" +612 40 optimizer """adam""" +612 40 training_loop """owa""" +612 40 negative_sampler """basic""" +612 40 evaluator """rankbased""" +612 41 dataset """wn18rr""" +612 41 model """rotate""" +612 41 loss """marginranking""" +612 41 regularizer """no""" +612 41 optimizer """adam""" +612 41 training_loop """owa""" +612 41 negative_sampler """basic""" +612 41 evaluator """rankbased""" +612 42 dataset """wn18rr""" +612 42 model """rotate""" +612 42 loss """marginranking""" +612 42 regularizer """no""" +612 42 optimizer """adam""" +612 42 training_loop """owa""" +612 42 negative_sampler """basic""" +612 42 evaluator """rankbased""" +612 43 dataset """wn18rr""" +612 43 model """rotate""" +612 43 loss """marginranking""" +612 43 regularizer """no""" +612 43 optimizer """adam""" +612 43 training_loop """owa""" +612 43 negative_sampler """basic""" +612 43 evaluator """rankbased""" +612 44 dataset """wn18rr""" +612 44 model """rotate""" +612 44 loss """marginranking""" +612 44 regularizer """no""" +612 44 optimizer """adam""" +612 44 training_loop """owa""" +612 44 negative_sampler """basic""" +612 44 evaluator """rankbased""" +612 45 dataset """wn18rr""" +612 45 model """rotate""" +612 45 loss """marginranking""" +612 45 regularizer """no""" +612 45 optimizer """adam""" +612 45 training_loop """owa""" +612 45 negative_sampler """basic""" +612 45 evaluator """rankbased""" +612 46 dataset """wn18rr""" +612 46 model """rotate""" +612 46 loss """marginranking""" +612 46 regularizer """no""" +612 46 optimizer """adam""" +612 46 training_loop """owa""" +612 46 negative_sampler """basic""" +612 46 evaluator """rankbased""" +612 47 dataset """wn18rr""" +612 47 model """rotate""" +612 47 loss """marginranking""" +612 47 regularizer """no""" +612 47 optimizer """adam""" +612 47 training_loop """owa""" +612 47 negative_sampler """basic""" +612 47 evaluator """rankbased""" +612 48 dataset """wn18rr""" +612 48 model """rotate""" +612 48 loss """marginranking""" +612 48 regularizer """no""" +612 48 optimizer """adam""" +612 48 training_loop """owa""" +612 48 negative_sampler """basic""" +612 48 evaluator """rankbased""" +612 49 dataset """wn18rr""" +612 49 model """rotate""" +612 49 loss """marginranking""" +612 49 regularizer """no""" +612 49 optimizer """adam""" +612 49 training_loop """owa""" +612 49 negative_sampler """basic""" +612 49 evaluator """rankbased""" +612 50 dataset """wn18rr""" +612 50 model """rotate""" +612 50 loss """marginranking""" +612 50 regularizer """no""" +612 50 optimizer """adam""" +612 50 training_loop """owa""" +612 50 negative_sampler """basic""" +612 50 evaluator """rankbased""" +612 51 dataset """wn18rr""" +612 51 model """rotate""" +612 51 loss """marginranking""" +612 51 regularizer """no""" +612 51 optimizer """adam""" +612 51 training_loop """owa""" +612 51 negative_sampler """basic""" +612 51 evaluator """rankbased""" +612 52 dataset """wn18rr""" +612 52 model """rotate""" +612 52 loss """marginranking""" +612 52 regularizer """no""" +612 52 optimizer """adam""" +612 52 training_loop """owa""" +612 52 negative_sampler """basic""" +612 52 evaluator """rankbased""" +612 53 dataset """wn18rr""" +612 53 model """rotate""" +612 53 loss """marginranking""" +612 53 regularizer """no""" +612 53 optimizer """adam""" +612 53 training_loop """owa""" +612 53 negative_sampler """basic""" +612 53 evaluator """rankbased""" +612 54 dataset """wn18rr""" +612 54 model """rotate""" +612 54 loss """marginranking""" +612 54 regularizer """no""" +612 54 optimizer """adam""" +612 54 training_loop """owa""" +612 54 negative_sampler """basic""" +612 54 evaluator """rankbased""" +612 55 dataset """wn18rr""" +612 55 model """rotate""" +612 55 loss """marginranking""" +612 55 regularizer """no""" +612 55 optimizer """adam""" +612 55 training_loop """owa""" +612 55 negative_sampler """basic""" +612 55 evaluator """rankbased""" +612 56 dataset """wn18rr""" +612 56 model """rotate""" +612 56 loss """marginranking""" +612 56 regularizer """no""" +612 56 optimizer """adam""" +612 56 training_loop """owa""" +612 56 negative_sampler """basic""" +612 56 evaluator """rankbased""" +612 57 dataset """wn18rr""" +612 57 model """rotate""" +612 57 loss """marginranking""" +612 57 regularizer """no""" +612 57 optimizer """adam""" +612 57 training_loop """owa""" +612 57 negative_sampler """basic""" +612 57 evaluator """rankbased""" +612 58 dataset """wn18rr""" +612 58 model """rotate""" +612 58 loss """marginranking""" +612 58 regularizer """no""" +612 58 optimizer """adam""" +612 58 training_loop """owa""" +612 58 negative_sampler """basic""" +612 58 evaluator """rankbased""" +612 59 dataset """wn18rr""" +612 59 model """rotate""" +612 59 loss """marginranking""" +612 59 regularizer """no""" +612 59 optimizer """adam""" +612 59 training_loop """owa""" +612 59 negative_sampler """basic""" +612 59 evaluator """rankbased""" +612 60 dataset """wn18rr""" +612 60 model """rotate""" +612 60 loss """marginranking""" +612 60 regularizer """no""" +612 60 optimizer """adam""" +612 60 training_loop """owa""" +612 60 negative_sampler """basic""" +612 60 evaluator """rankbased""" +612 61 dataset """wn18rr""" +612 61 model """rotate""" +612 61 loss """marginranking""" +612 61 regularizer """no""" +612 61 optimizer """adam""" +612 61 training_loop """owa""" +612 61 negative_sampler """basic""" +612 61 evaluator """rankbased""" +612 62 dataset """wn18rr""" +612 62 model """rotate""" +612 62 loss """marginranking""" +612 62 regularizer """no""" +612 62 optimizer """adam""" +612 62 training_loop """owa""" +612 62 negative_sampler """basic""" +612 62 evaluator """rankbased""" +612 63 dataset """wn18rr""" +612 63 model """rotate""" +612 63 loss """marginranking""" +612 63 regularizer """no""" +612 63 optimizer """adam""" +612 63 training_loop """owa""" +612 63 negative_sampler """basic""" +612 63 evaluator """rankbased""" +612 64 dataset """wn18rr""" +612 64 model """rotate""" +612 64 loss """marginranking""" +612 64 regularizer """no""" +612 64 optimizer """adam""" +612 64 training_loop """owa""" +612 64 negative_sampler """basic""" +612 64 evaluator """rankbased""" +612 65 dataset """wn18rr""" +612 65 model """rotate""" +612 65 loss """marginranking""" +612 65 regularizer """no""" +612 65 optimizer """adam""" +612 65 training_loop """owa""" +612 65 negative_sampler """basic""" +612 65 evaluator """rankbased""" +612 66 dataset """wn18rr""" +612 66 model """rotate""" +612 66 loss """marginranking""" +612 66 regularizer """no""" +612 66 optimizer """adam""" +612 66 training_loop """owa""" +612 66 negative_sampler """basic""" +612 66 evaluator """rankbased""" +612 67 dataset """wn18rr""" +612 67 model """rotate""" +612 67 loss """marginranking""" +612 67 regularizer """no""" +612 67 optimizer """adam""" +612 67 training_loop """owa""" +612 67 negative_sampler """basic""" +612 67 evaluator """rankbased""" +612 68 dataset """wn18rr""" +612 68 model """rotate""" +612 68 loss """marginranking""" +612 68 regularizer """no""" +612 68 optimizer """adam""" +612 68 training_loop """owa""" +612 68 negative_sampler """basic""" +612 68 evaluator """rankbased""" +612 69 dataset """wn18rr""" +612 69 model """rotate""" +612 69 loss """marginranking""" +612 69 regularizer """no""" +612 69 optimizer """adam""" +612 69 training_loop """owa""" +612 69 negative_sampler """basic""" +612 69 evaluator """rankbased""" +612 70 dataset """wn18rr""" +612 70 model """rotate""" +612 70 loss """marginranking""" +612 70 regularizer """no""" +612 70 optimizer """adam""" +612 70 training_loop """owa""" +612 70 negative_sampler """basic""" +612 70 evaluator """rankbased""" +612 71 dataset """wn18rr""" +612 71 model """rotate""" +612 71 loss """marginranking""" +612 71 regularizer """no""" +612 71 optimizer """adam""" +612 71 training_loop """owa""" +612 71 negative_sampler """basic""" +612 71 evaluator """rankbased""" +612 72 dataset """wn18rr""" +612 72 model """rotate""" +612 72 loss """marginranking""" +612 72 regularizer """no""" +612 72 optimizer """adam""" +612 72 training_loop """owa""" +612 72 negative_sampler """basic""" +612 72 evaluator """rankbased""" +612 73 dataset """wn18rr""" +612 73 model """rotate""" +612 73 loss """marginranking""" +612 73 regularizer """no""" +612 73 optimizer """adam""" +612 73 training_loop """owa""" +612 73 negative_sampler """basic""" +612 73 evaluator """rankbased""" +612 74 dataset """wn18rr""" +612 74 model """rotate""" +612 74 loss """marginranking""" +612 74 regularizer """no""" +612 74 optimizer """adam""" +612 74 training_loop """owa""" +612 74 negative_sampler """basic""" +612 74 evaluator """rankbased""" +612 75 dataset """wn18rr""" +612 75 model """rotate""" +612 75 loss """marginranking""" +612 75 regularizer """no""" +612 75 optimizer """adam""" +612 75 training_loop """owa""" +612 75 negative_sampler """basic""" +612 75 evaluator """rankbased""" +612 76 dataset """wn18rr""" +612 76 model """rotate""" +612 76 loss """marginranking""" +612 76 regularizer """no""" +612 76 optimizer """adam""" +612 76 training_loop """owa""" +612 76 negative_sampler """basic""" +612 76 evaluator """rankbased""" +612 77 dataset """wn18rr""" +612 77 model """rotate""" +612 77 loss """marginranking""" +612 77 regularizer """no""" +612 77 optimizer """adam""" +612 77 training_loop """owa""" +612 77 negative_sampler """basic""" +612 77 evaluator """rankbased""" +612 78 dataset """wn18rr""" +612 78 model """rotate""" +612 78 loss """marginranking""" +612 78 regularizer """no""" +612 78 optimizer """adam""" +612 78 training_loop """owa""" +612 78 negative_sampler """basic""" +612 78 evaluator """rankbased""" +612 79 dataset """wn18rr""" +612 79 model """rotate""" +612 79 loss """marginranking""" +612 79 regularizer """no""" +612 79 optimizer """adam""" +612 79 training_loop """owa""" +612 79 negative_sampler """basic""" +612 79 evaluator """rankbased""" +612 80 dataset """wn18rr""" +612 80 model """rotate""" +612 80 loss """marginranking""" +612 80 regularizer """no""" +612 80 optimizer """adam""" +612 80 training_loop """owa""" +612 80 negative_sampler """basic""" +612 80 evaluator """rankbased""" +612 81 dataset """wn18rr""" +612 81 model """rotate""" +612 81 loss """marginranking""" +612 81 regularizer """no""" +612 81 optimizer """adam""" +612 81 training_loop """owa""" +612 81 negative_sampler """basic""" +612 81 evaluator """rankbased""" +612 82 dataset """wn18rr""" +612 82 model """rotate""" +612 82 loss """marginranking""" +612 82 regularizer """no""" +612 82 optimizer """adam""" +612 82 training_loop """owa""" +612 82 negative_sampler """basic""" +612 82 evaluator """rankbased""" +612 83 dataset """wn18rr""" +612 83 model """rotate""" +612 83 loss """marginranking""" +612 83 regularizer """no""" +612 83 optimizer """adam""" +612 83 training_loop """owa""" +612 83 negative_sampler """basic""" +612 83 evaluator """rankbased""" +612 84 dataset """wn18rr""" +612 84 model """rotate""" +612 84 loss """marginranking""" +612 84 regularizer """no""" +612 84 optimizer """adam""" +612 84 training_loop """owa""" +612 84 negative_sampler """basic""" +612 84 evaluator """rankbased""" +612 85 dataset """wn18rr""" +612 85 model """rotate""" +612 85 loss """marginranking""" +612 85 regularizer """no""" +612 85 optimizer """adam""" +612 85 training_loop """owa""" +612 85 negative_sampler """basic""" +612 85 evaluator """rankbased""" +612 86 dataset """wn18rr""" +612 86 model """rotate""" +612 86 loss """marginranking""" +612 86 regularizer """no""" +612 86 optimizer """adam""" +612 86 training_loop """owa""" +612 86 negative_sampler """basic""" +612 86 evaluator """rankbased""" +612 87 dataset """wn18rr""" +612 87 model """rotate""" +612 87 loss """marginranking""" +612 87 regularizer """no""" +612 87 optimizer """adam""" +612 87 training_loop """owa""" +612 87 negative_sampler """basic""" +612 87 evaluator """rankbased""" +612 88 dataset """wn18rr""" +612 88 model """rotate""" +612 88 loss """marginranking""" +612 88 regularizer """no""" +612 88 optimizer """adam""" +612 88 training_loop """owa""" +612 88 negative_sampler """basic""" +612 88 evaluator """rankbased""" +612 89 dataset """wn18rr""" +612 89 model """rotate""" +612 89 loss """marginranking""" +612 89 regularizer """no""" +612 89 optimizer """adam""" +612 89 training_loop """owa""" +612 89 negative_sampler """basic""" +612 89 evaluator """rankbased""" +612 90 dataset """wn18rr""" +612 90 model """rotate""" +612 90 loss """marginranking""" +612 90 regularizer """no""" +612 90 optimizer """adam""" +612 90 training_loop """owa""" +612 90 negative_sampler """basic""" +612 90 evaluator """rankbased""" +612 91 dataset """wn18rr""" +612 91 model """rotate""" +612 91 loss """marginranking""" +612 91 regularizer """no""" +612 91 optimizer """adam""" +612 91 training_loop """owa""" +612 91 negative_sampler """basic""" +612 91 evaluator """rankbased""" +612 92 dataset """wn18rr""" +612 92 model """rotate""" +612 92 loss """marginranking""" +612 92 regularizer """no""" +612 92 optimizer """adam""" +612 92 training_loop """owa""" +612 92 negative_sampler """basic""" +612 92 evaluator """rankbased""" +612 93 dataset """wn18rr""" +612 93 model """rotate""" +612 93 loss """marginranking""" +612 93 regularizer """no""" +612 93 optimizer """adam""" +612 93 training_loop """owa""" +612 93 negative_sampler """basic""" +612 93 evaluator """rankbased""" +612 94 dataset """wn18rr""" +612 94 model """rotate""" +612 94 loss """marginranking""" +612 94 regularizer """no""" +612 94 optimizer """adam""" +612 94 training_loop """owa""" +612 94 negative_sampler """basic""" +612 94 evaluator """rankbased""" +612 95 dataset """wn18rr""" +612 95 model """rotate""" +612 95 loss """marginranking""" +612 95 regularizer """no""" +612 95 optimizer """adam""" +612 95 training_loop """owa""" +612 95 negative_sampler """basic""" +612 95 evaluator """rankbased""" +612 96 dataset """wn18rr""" +612 96 model """rotate""" +612 96 loss """marginranking""" +612 96 regularizer """no""" +612 96 optimizer """adam""" +612 96 training_loop """owa""" +612 96 negative_sampler """basic""" +612 96 evaluator """rankbased""" +612 97 dataset """wn18rr""" +612 97 model """rotate""" +612 97 loss """marginranking""" +612 97 regularizer """no""" +612 97 optimizer """adam""" +612 97 training_loop """owa""" +612 97 negative_sampler """basic""" +612 97 evaluator """rankbased""" +612 98 dataset """wn18rr""" +612 98 model """rotate""" +612 98 loss """marginranking""" +612 98 regularizer """no""" +612 98 optimizer """adam""" +612 98 training_loop """owa""" +612 98 negative_sampler """basic""" +612 98 evaluator """rankbased""" +612 99 dataset """wn18rr""" +612 99 model """rotate""" +612 99 loss """marginranking""" +612 99 regularizer """no""" +612 99 optimizer """adam""" +612 99 training_loop """owa""" +612 99 negative_sampler """basic""" +612 99 evaluator """rankbased""" +612 100 dataset """wn18rr""" +612 100 model """rotate""" +612 100 loss """marginranking""" +612 100 regularizer """no""" +612 100 optimizer """adam""" +612 100 training_loop """owa""" +612 100 negative_sampler """basic""" +612 100 evaluator """rankbased""" +613 1 model.embedding_dim 2.0 +613 1 loss.margin 4.553928799927828 +613 1 loss.adversarial_temperature 0.2697795873461045 +613 1 optimizer.lr 0.01031868319064725 +613 1 negative_sampler.num_negs_per_pos 38.0 +613 1 training.batch_size 2.0 +613 2 model.embedding_dim 1.0 +613 2 loss.margin 10.4034915677578 +613 2 loss.adversarial_temperature 0.2124011544496949 +613 2 optimizer.lr 0.007629054326138246 +613 2 negative_sampler.num_negs_per_pos 59.0 +613 2 training.batch_size 2.0 +613 3 model.embedding_dim 1.0 +613 3 loss.margin 8.217864345212696 +613 3 loss.adversarial_temperature 0.49870818684279977 +613 3 optimizer.lr 0.003282207234748682 +613 3 negative_sampler.num_negs_per_pos 44.0 +613 3 training.batch_size 1.0 +613 4 model.embedding_dim 1.0 +613 4 loss.margin 23.115451261284278 +613 4 loss.adversarial_temperature 0.65115817438642 +613 4 optimizer.lr 0.0020553091285717357 +613 4 negative_sampler.num_negs_per_pos 12.0 +613 4 training.batch_size 1.0 +613 5 model.embedding_dim 0.0 +613 5 loss.margin 20.68335039239239 +613 5 loss.adversarial_temperature 0.5764699581628121 +613 5 optimizer.lr 0.002779079646468145 +613 5 negative_sampler.num_negs_per_pos 34.0 +613 5 training.batch_size 2.0 +613 6 model.embedding_dim 0.0 +613 6 loss.margin 6.850916089651555 +613 6 loss.adversarial_temperature 0.9615661485274073 +613 6 optimizer.lr 0.030649524328933794 +613 6 negative_sampler.num_negs_per_pos 15.0 +613 6 training.batch_size 2.0 +613 7 model.embedding_dim 2.0 +613 7 loss.margin 7.326817002502337 +613 7 loss.adversarial_temperature 0.2819731630084974 +613 7 optimizer.lr 0.0013216320284491032 +613 7 negative_sampler.num_negs_per_pos 4.0 +613 7 training.batch_size 1.0 +613 8 model.embedding_dim 2.0 +613 8 loss.margin 10.796623851752994 +613 8 loss.adversarial_temperature 0.5816459431770986 +613 8 optimizer.lr 0.09255830714763165 +613 8 negative_sampler.num_negs_per_pos 29.0 +613 8 training.batch_size 0.0 +613 9 model.embedding_dim 0.0 +613 9 loss.margin 24.43892563128668 +613 9 loss.adversarial_temperature 0.8587784952515736 +613 9 optimizer.lr 0.014347359073773441 +613 9 negative_sampler.num_negs_per_pos 71.0 +613 9 training.batch_size 2.0 +613 10 model.embedding_dim 2.0 +613 10 loss.margin 2.5889702849845326 +613 10 loss.adversarial_temperature 0.5462352669660087 +613 10 optimizer.lr 0.04605890032987289 +613 10 negative_sampler.num_negs_per_pos 54.0 +613 10 training.batch_size 0.0 +613 11 model.embedding_dim 0.0 +613 11 loss.margin 12.193619777123871 +613 11 loss.adversarial_temperature 0.5481510996076364 +613 11 optimizer.lr 0.006454811251882451 +613 11 negative_sampler.num_negs_per_pos 83.0 +613 11 training.batch_size 2.0 +613 12 model.embedding_dim 0.0 +613 12 loss.margin 7.696740923943779 +613 12 loss.adversarial_temperature 0.34652898626598755 +613 12 optimizer.lr 0.01469821880780144 +613 12 negative_sampler.num_negs_per_pos 16.0 +613 12 training.batch_size 2.0 +613 13 model.embedding_dim 0.0 +613 13 loss.margin 9.823140197356551 +613 13 loss.adversarial_temperature 0.23290705434208683 +613 13 optimizer.lr 0.0018118634156685 +613 13 negative_sampler.num_negs_per_pos 32.0 +613 13 training.batch_size 1.0 +613 14 model.embedding_dim 1.0 +613 14 loss.margin 11.467714218930436 +613 14 loss.adversarial_temperature 0.4162530615677578 +613 14 optimizer.lr 0.035996441786333086 +613 14 negative_sampler.num_negs_per_pos 53.0 +613 14 training.batch_size 2.0 +613 15 model.embedding_dim 1.0 +613 15 loss.margin 6.274618441464043 +613 15 loss.adversarial_temperature 0.9013799242847956 +613 15 optimizer.lr 0.00292249810616459 +613 15 negative_sampler.num_negs_per_pos 46.0 +613 15 training.batch_size 2.0 +613 16 model.embedding_dim 0.0 +613 16 loss.margin 6.59204227494535 +613 16 loss.adversarial_temperature 0.120824847172747 +613 16 optimizer.lr 0.0012349432674280494 +613 16 negative_sampler.num_negs_per_pos 20.0 +613 16 training.batch_size 2.0 +613 17 model.embedding_dim 2.0 +613 17 loss.margin 7.5180236757437475 +613 17 loss.adversarial_temperature 0.5344005456486388 +613 17 optimizer.lr 0.004966874498376616 +613 17 negative_sampler.num_negs_per_pos 67.0 +613 17 training.batch_size 1.0 +613 18 model.embedding_dim 1.0 +613 18 loss.margin 20.93783262555667 +613 18 loss.adversarial_temperature 0.8104099028364052 +613 18 optimizer.lr 0.0018139290461218192 +613 18 negative_sampler.num_negs_per_pos 24.0 +613 18 training.batch_size 2.0 +613 19 model.embedding_dim 0.0 +613 19 loss.margin 23.738461186728706 +613 19 loss.adversarial_temperature 0.2732321768558294 +613 19 optimizer.lr 0.017096026517245396 +613 19 negative_sampler.num_negs_per_pos 3.0 +613 19 training.batch_size 0.0 +613 20 model.embedding_dim 2.0 +613 20 loss.margin 10.352165202559885 +613 20 loss.adversarial_temperature 0.34712966776815896 +613 20 optimizer.lr 0.07848710373678348 +613 20 negative_sampler.num_negs_per_pos 97.0 +613 20 training.batch_size 0.0 +613 21 model.embedding_dim 1.0 +613 21 loss.margin 22.398618274395165 +613 21 loss.adversarial_temperature 0.659539456966005 +613 21 optimizer.lr 0.01014318533229606 +613 21 negative_sampler.num_negs_per_pos 76.0 +613 21 training.batch_size 1.0 +613 22 model.embedding_dim 1.0 +613 22 loss.margin 4.864824823000021 +613 22 loss.adversarial_temperature 0.6181867824726963 +613 22 optimizer.lr 0.05401398127725979 +613 22 negative_sampler.num_negs_per_pos 30.0 +613 22 training.batch_size 1.0 +613 23 model.embedding_dim 2.0 +613 23 loss.margin 24.179492356113492 +613 23 loss.adversarial_temperature 0.7634983368749051 +613 23 optimizer.lr 0.002525400716934938 +613 23 negative_sampler.num_negs_per_pos 8.0 +613 23 training.batch_size 1.0 +613 24 model.embedding_dim 1.0 +613 24 loss.margin 8.799365064720812 +613 24 loss.adversarial_temperature 0.9205461988998078 +613 24 optimizer.lr 0.0019761591232335916 +613 24 negative_sampler.num_negs_per_pos 64.0 +613 24 training.batch_size 1.0 +613 25 model.embedding_dim 2.0 +613 25 loss.margin 23.52736161109756 +613 25 loss.adversarial_temperature 0.4518556388034027 +613 25 optimizer.lr 0.011263258424077506 +613 25 negative_sampler.num_negs_per_pos 2.0 +613 25 training.batch_size 2.0 +613 26 model.embedding_dim 0.0 +613 26 loss.margin 26.234611709442326 +613 26 loss.adversarial_temperature 0.19208290975587758 +613 26 optimizer.lr 0.04910857548834258 +613 26 negative_sampler.num_negs_per_pos 47.0 +613 26 training.batch_size 2.0 +613 27 model.embedding_dim 2.0 +613 27 loss.margin 8.028124086712316 +613 27 loss.adversarial_temperature 0.8784471813430411 +613 27 optimizer.lr 0.01601853990984403 +613 27 negative_sampler.num_negs_per_pos 25.0 +613 27 training.batch_size 1.0 +613 28 model.embedding_dim 0.0 +613 28 loss.margin 5.726952630305874 +613 28 loss.adversarial_temperature 0.9264907461298314 +613 28 optimizer.lr 0.005063272755048562 +613 28 negative_sampler.num_negs_per_pos 47.0 +613 28 training.batch_size 1.0 +613 29 model.embedding_dim 1.0 +613 29 loss.margin 5.750097086563992 +613 29 loss.adversarial_temperature 0.991244150482603 +613 29 optimizer.lr 0.00783654468352839 +613 29 negative_sampler.num_negs_per_pos 67.0 +613 29 training.batch_size 1.0 +613 30 model.embedding_dim 0.0 +613 30 loss.margin 14.286989455453247 +613 30 loss.adversarial_temperature 0.5693739765647619 +613 30 optimizer.lr 0.0013090825956668137 +613 30 negative_sampler.num_negs_per_pos 65.0 +613 30 training.batch_size 0.0 +613 31 model.embedding_dim 1.0 +613 31 loss.margin 19.258361397549702 +613 31 loss.adversarial_temperature 0.7555496770045008 +613 31 optimizer.lr 0.001354871625095429 +613 31 negative_sampler.num_negs_per_pos 63.0 +613 31 training.batch_size 2.0 +613 32 model.embedding_dim 0.0 +613 32 loss.margin 23.332357910814782 +613 32 loss.adversarial_temperature 0.4869771579920864 +613 32 optimizer.lr 0.0027531029869150895 +613 32 negative_sampler.num_negs_per_pos 57.0 +613 32 training.batch_size 1.0 +613 33 model.embedding_dim 0.0 +613 33 loss.margin 14.458852119125531 +613 33 loss.adversarial_temperature 0.9117663175788713 +613 33 optimizer.lr 0.022029343610965358 +613 33 negative_sampler.num_negs_per_pos 49.0 +613 33 training.batch_size 2.0 +613 34 model.embedding_dim 1.0 +613 34 loss.margin 28.19944147772417 +613 34 loss.adversarial_temperature 0.8513822028624168 +613 34 optimizer.lr 0.06825044447617408 +613 34 negative_sampler.num_negs_per_pos 80.0 +613 34 training.batch_size 1.0 +613 35 model.embedding_dim 1.0 +613 35 loss.margin 16.94800892870471 +613 35 loss.adversarial_temperature 0.8499732998695133 +613 35 optimizer.lr 0.08325658623960477 +613 35 negative_sampler.num_negs_per_pos 24.0 +613 35 training.batch_size 1.0 +613 36 model.embedding_dim 0.0 +613 36 loss.margin 14.712884378021322 +613 36 loss.adversarial_temperature 0.94554315759086 +613 36 optimizer.lr 0.0028879155979751898 +613 36 negative_sampler.num_negs_per_pos 54.0 +613 36 training.batch_size 0.0 +613 37 model.embedding_dim 2.0 +613 37 loss.margin 19.83974755288672 +613 37 loss.adversarial_temperature 0.31109754184909927 +613 37 optimizer.lr 0.005792026731383512 +613 37 negative_sampler.num_negs_per_pos 15.0 +613 37 training.batch_size 2.0 +613 38 model.embedding_dim 2.0 +613 38 loss.margin 4.743275736468479 +613 38 loss.adversarial_temperature 0.3918188417182706 +613 38 optimizer.lr 0.011187650698334956 +613 38 negative_sampler.num_negs_per_pos 29.0 +613 38 training.batch_size 2.0 +613 39 model.embedding_dim 1.0 +613 39 loss.margin 19.269837982414117 +613 39 loss.adversarial_temperature 0.13773616377094222 +613 39 optimizer.lr 0.0045825151068965226 +613 39 negative_sampler.num_negs_per_pos 1.0 +613 39 training.batch_size 2.0 +613 40 model.embedding_dim 0.0 +613 40 loss.margin 17.66374474050067 +613 40 loss.adversarial_temperature 0.4066338304485014 +613 40 optimizer.lr 0.08395541034227899 +613 40 negative_sampler.num_negs_per_pos 23.0 +613 40 training.batch_size 2.0 +613 41 model.embedding_dim 2.0 +613 41 loss.margin 24.653380649698658 +613 41 loss.adversarial_temperature 0.5430178290957005 +613 41 optimizer.lr 0.001760832742696569 +613 41 negative_sampler.num_negs_per_pos 28.0 +613 41 training.batch_size 0.0 +613 42 model.embedding_dim 1.0 +613 42 loss.margin 14.718565703178609 +613 42 loss.adversarial_temperature 0.881041702873397 +613 42 optimizer.lr 0.05587356034358414 +613 42 negative_sampler.num_negs_per_pos 20.0 +613 42 training.batch_size 1.0 +613 43 model.embedding_dim 2.0 +613 43 loss.margin 24.015809469722086 +613 43 loss.adversarial_temperature 0.7829151251541683 +613 43 optimizer.lr 0.05983271892533555 +613 43 negative_sampler.num_negs_per_pos 61.0 +613 43 training.batch_size 2.0 +613 44 model.embedding_dim 0.0 +613 44 loss.margin 18.790279306930948 +613 44 loss.adversarial_temperature 0.8213517713907436 +613 44 optimizer.lr 0.049776560498805154 +613 44 negative_sampler.num_negs_per_pos 58.0 +613 44 training.batch_size 1.0 +613 45 model.embedding_dim 2.0 +613 45 loss.margin 15.47846892544401 +613 45 loss.adversarial_temperature 0.6383460342754809 +613 45 optimizer.lr 0.007780661322777138 +613 45 negative_sampler.num_negs_per_pos 28.0 +613 45 training.batch_size 1.0 +613 46 model.embedding_dim 1.0 +613 46 loss.margin 7.845674921711298 +613 46 loss.adversarial_temperature 0.10612841816174721 +613 46 optimizer.lr 0.0017960841378866473 +613 46 negative_sampler.num_negs_per_pos 22.0 +613 46 training.batch_size 0.0 +613 47 model.embedding_dim 1.0 +613 47 loss.margin 16.302522911524395 +613 47 loss.adversarial_temperature 0.859813537481662 +613 47 optimizer.lr 0.0011728752181910398 +613 47 negative_sampler.num_negs_per_pos 57.0 +613 47 training.batch_size 0.0 +613 48 model.embedding_dim 1.0 +613 48 loss.margin 23.50956425585649 +613 48 loss.adversarial_temperature 0.323595215025584 +613 48 optimizer.lr 0.02064730772287934 +613 48 negative_sampler.num_negs_per_pos 35.0 +613 48 training.batch_size 0.0 +613 49 model.embedding_dim 1.0 +613 49 loss.margin 25.296329420915157 +613 49 loss.adversarial_temperature 0.5290614787420503 +613 49 optimizer.lr 0.012123411286874594 +613 49 negative_sampler.num_negs_per_pos 91.0 +613 49 training.batch_size 1.0 +613 50 model.embedding_dim 0.0 +613 50 loss.margin 6.164900882599066 +613 50 loss.adversarial_temperature 0.24633534477919974 +613 50 optimizer.lr 0.008062046440243869 +613 50 negative_sampler.num_negs_per_pos 8.0 +613 50 training.batch_size 1.0 +613 51 model.embedding_dim 0.0 +613 51 loss.margin 18.238775371483428 +613 51 loss.adversarial_temperature 0.9151020491888551 +613 51 optimizer.lr 0.039117463131752496 +613 51 negative_sampler.num_negs_per_pos 26.0 +613 51 training.batch_size 1.0 +613 1 dataset """wn18rr""" +613 1 model """rotate""" +613 1 loss """nssa""" +613 1 regularizer """no""" +613 1 optimizer """adam""" +613 1 training_loop """owa""" +613 1 negative_sampler """basic""" +613 1 evaluator """rankbased""" +613 2 dataset """wn18rr""" +613 2 model """rotate""" +613 2 loss """nssa""" +613 2 regularizer """no""" +613 2 optimizer """adam""" +613 2 training_loop """owa""" +613 2 negative_sampler """basic""" +613 2 evaluator """rankbased""" +613 3 dataset """wn18rr""" +613 3 model """rotate""" +613 3 loss """nssa""" +613 3 regularizer """no""" +613 3 optimizer """adam""" +613 3 training_loop """owa""" +613 3 negative_sampler """basic""" +613 3 evaluator """rankbased""" +613 4 dataset """wn18rr""" +613 4 model """rotate""" +613 4 loss """nssa""" +613 4 regularizer """no""" +613 4 optimizer """adam""" +613 4 training_loop """owa""" +613 4 negative_sampler """basic""" +613 4 evaluator """rankbased""" +613 5 dataset """wn18rr""" +613 5 model """rotate""" +613 5 loss """nssa""" +613 5 regularizer """no""" +613 5 optimizer """adam""" +613 5 training_loop """owa""" +613 5 negative_sampler """basic""" +613 5 evaluator """rankbased""" +613 6 dataset """wn18rr""" +613 6 model """rotate""" +613 6 loss """nssa""" +613 6 regularizer """no""" +613 6 optimizer """adam""" +613 6 training_loop """owa""" +613 6 negative_sampler """basic""" +613 6 evaluator """rankbased""" +613 7 dataset """wn18rr""" +613 7 model """rotate""" +613 7 loss """nssa""" +613 7 regularizer """no""" +613 7 optimizer """adam""" +613 7 training_loop """owa""" +613 7 negative_sampler """basic""" +613 7 evaluator """rankbased""" +613 8 dataset """wn18rr""" +613 8 model """rotate""" +613 8 loss """nssa""" +613 8 regularizer """no""" +613 8 optimizer """adam""" +613 8 training_loop """owa""" +613 8 negative_sampler """basic""" +613 8 evaluator """rankbased""" +613 9 dataset """wn18rr""" +613 9 model """rotate""" +613 9 loss """nssa""" +613 9 regularizer """no""" +613 9 optimizer """adam""" +613 9 training_loop """owa""" +613 9 negative_sampler """basic""" +613 9 evaluator """rankbased""" +613 10 dataset """wn18rr""" +613 10 model """rotate""" +613 10 loss """nssa""" +613 10 regularizer """no""" +613 10 optimizer """adam""" +613 10 training_loop """owa""" +613 10 negative_sampler """basic""" +613 10 evaluator """rankbased""" +613 11 dataset """wn18rr""" +613 11 model """rotate""" +613 11 loss """nssa""" +613 11 regularizer """no""" +613 11 optimizer """adam""" +613 11 training_loop """owa""" +613 11 negative_sampler """basic""" +613 11 evaluator """rankbased""" +613 12 dataset """wn18rr""" +613 12 model """rotate""" +613 12 loss """nssa""" +613 12 regularizer """no""" +613 12 optimizer """adam""" +613 12 training_loop """owa""" +613 12 negative_sampler """basic""" +613 12 evaluator """rankbased""" +613 13 dataset """wn18rr""" +613 13 model """rotate""" +613 13 loss """nssa""" +613 13 regularizer """no""" +613 13 optimizer """adam""" +613 13 training_loop """owa""" +613 13 negative_sampler """basic""" +613 13 evaluator """rankbased""" +613 14 dataset """wn18rr""" +613 14 model """rotate""" +613 14 loss """nssa""" +613 14 regularizer """no""" +613 14 optimizer """adam""" +613 14 training_loop """owa""" +613 14 negative_sampler """basic""" +613 14 evaluator """rankbased""" +613 15 dataset """wn18rr""" +613 15 model """rotate""" +613 15 loss """nssa""" +613 15 regularizer """no""" +613 15 optimizer """adam""" +613 15 training_loop """owa""" +613 15 negative_sampler """basic""" +613 15 evaluator """rankbased""" +613 16 dataset """wn18rr""" +613 16 model """rotate""" +613 16 loss """nssa""" +613 16 regularizer """no""" +613 16 optimizer """adam""" +613 16 training_loop """owa""" +613 16 negative_sampler """basic""" +613 16 evaluator """rankbased""" +613 17 dataset """wn18rr""" +613 17 model """rotate""" +613 17 loss """nssa""" +613 17 regularizer """no""" +613 17 optimizer """adam""" +613 17 training_loop """owa""" +613 17 negative_sampler """basic""" +613 17 evaluator """rankbased""" +613 18 dataset """wn18rr""" +613 18 model """rotate""" +613 18 loss """nssa""" +613 18 regularizer """no""" +613 18 optimizer """adam""" +613 18 training_loop """owa""" +613 18 negative_sampler """basic""" +613 18 evaluator """rankbased""" +613 19 dataset """wn18rr""" +613 19 model """rotate""" +613 19 loss """nssa""" +613 19 regularizer """no""" +613 19 optimizer """adam""" +613 19 training_loop """owa""" +613 19 negative_sampler """basic""" +613 19 evaluator """rankbased""" +613 20 dataset """wn18rr""" +613 20 model """rotate""" +613 20 loss """nssa""" +613 20 regularizer """no""" +613 20 optimizer """adam""" +613 20 training_loop """owa""" +613 20 negative_sampler """basic""" +613 20 evaluator """rankbased""" +613 21 dataset """wn18rr""" +613 21 model """rotate""" +613 21 loss """nssa""" +613 21 regularizer """no""" +613 21 optimizer """adam""" +613 21 training_loop """owa""" +613 21 negative_sampler """basic""" +613 21 evaluator """rankbased""" +613 22 dataset """wn18rr""" +613 22 model """rotate""" +613 22 loss """nssa""" +613 22 regularizer """no""" +613 22 optimizer """adam""" +613 22 training_loop """owa""" +613 22 negative_sampler """basic""" +613 22 evaluator """rankbased""" +613 23 dataset """wn18rr""" +613 23 model """rotate""" +613 23 loss """nssa""" +613 23 regularizer """no""" +613 23 optimizer """adam""" +613 23 training_loop """owa""" +613 23 negative_sampler """basic""" +613 23 evaluator """rankbased""" +613 24 dataset """wn18rr""" +613 24 model """rotate""" +613 24 loss """nssa""" +613 24 regularizer """no""" +613 24 optimizer """adam""" +613 24 training_loop """owa""" +613 24 negative_sampler """basic""" +613 24 evaluator """rankbased""" +613 25 dataset """wn18rr""" +613 25 model """rotate""" +613 25 loss """nssa""" +613 25 regularizer """no""" +613 25 optimizer """adam""" +613 25 training_loop """owa""" +613 25 negative_sampler """basic""" +613 25 evaluator """rankbased""" +613 26 dataset """wn18rr""" +613 26 model """rotate""" +613 26 loss """nssa""" +613 26 regularizer """no""" +613 26 optimizer """adam""" +613 26 training_loop """owa""" +613 26 negative_sampler """basic""" +613 26 evaluator """rankbased""" +613 27 dataset """wn18rr""" +613 27 model """rotate""" +613 27 loss """nssa""" +613 27 regularizer """no""" +613 27 optimizer """adam""" +613 27 training_loop """owa""" +613 27 negative_sampler """basic""" +613 27 evaluator """rankbased""" +613 28 dataset """wn18rr""" +613 28 model """rotate""" +613 28 loss """nssa""" +613 28 regularizer """no""" +613 28 optimizer """adam""" +613 28 training_loop """owa""" +613 28 negative_sampler """basic""" +613 28 evaluator """rankbased""" +613 29 dataset """wn18rr""" +613 29 model """rotate""" +613 29 loss """nssa""" +613 29 regularizer """no""" +613 29 optimizer """adam""" +613 29 training_loop """owa""" +613 29 negative_sampler """basic""" +613 29 evaluator """rankbased""" +613 30 dataset """wn18rr""" +613 30 model """rotate""" +613 30 loss """nssa""" +613 30 regularizer """no""" +613 30 optimizer """adam""" +613 30 training_loop """owa""" +613 30 negative_sampler """basic""" +613 30 evaluator """rankbased""" +613 31 dataset """wn18rr""" +613 31 model """rotate""" +613 31 loss """nssa""" +613 31 regularizer """no""" +613 31 optimizer """adam""" +613 31 training_loop """owa""" +613 31 negative_sampler """basic""" +613 31 evaluator """rankbased""" +613 32 dataset """wn18rr""" +613 32 model """rotate""" +613 32 loss """nssa""" +613 32 regularizer """no""" +613 32 optimizer """adam""" +613 32 training_loop """owa""" +613 32 negative_sampler """basic""" +613 32 evaluator """rankbased""" +613 33 dataset """wn18rr""" +613 33 model """rotate""" +613 33 loss """nssa""" +613 33 regularizer """no""" +613 33 optimizer """adam""" +613 33 training_loop """owa""" +613 33 negative_sampler """basic""" +613 33 evaluator """rankbased""" +613 34 dataset """wn18rr""" +613 34 model """rotate""" +613 34 loss """nssa""" +613 34 regularizer """no""" +613 34 optimizer """adam""" +613 34 training_loop """owa""" +613 34 negative_sampler """basic""" +613 34 evaluator """rankbased""" +613 35 dataset """wn18rr""" +613 35 model """rotate""" +613 35 loss """nssa""" +613 35 regularizer """no""" +613 35 optimizer """adam""" +613 35 training_loop """owa""" +613 35 negative_sampler """basic""" +613 35 evaluator """rankbased""" +613 36 dataset """wn18rr""" +613 36 model """rotate""" +613 36 loss """nssa""" +613 36 regularizer """no""" +613 36 optimizer """adam""" +613 36 training_loop """owa""" +613 36 negative_sampler """basic""" +613 36 evaluator """rankbased""" +613 37 dataset """wn18rr""" +613 37 model """rotate""" +613 37 loss """nssa""" +613 37 regularizer """no""" +613 37 optimizer """adam""" +613 37 training_loop """owa""" +613 37 negative_sampler """basic""" +613 37 evaluator """rankbased""" +613 38 dataset """wn18rr""" +613 38 model """rotate""" +613 38 loss """nssa""" +613 38 regularizer """no""" +613 38 optimizer """adam""" +613 38 training_loop """owa""" +613 38 negative_sampler """basic""" +613 38 evaluator """rankbased""" +613 39 dataset """wn18rr""" +613 39 model """rotate""" +613 39 loss """nssa""" +613 39 regularizer """no""" +613 39 optimizer """adam""" +613 39 training_loop """owa""" +613 39 negative_sampler """basic""" +613 39 evaluator """rankbased""" +613 40 dataset """wn18rr""" +613 40 model """rotate""" +613 40 loss """nssa""" +613 40 regularizer """no""" +613 40 optimizer """adam""" +613 40 training_loop """owa""" +613 40 negative_sampler """basic""" +613 40 evaluator """rankbased""" +613 41 dataset """wn18rr""" +613 41 model """rotate""" +613 41 loss """nssa""" +613 41 regularizer """no""" +613 41 optimizer """adam""" +613 41 training_loop """owa""" +613 41 negative_sampler """basic""" +613 41 evaluator """rankbased""" +613 42 dataset """wn18rr""" +613 42 model """rotate""" +613 42 loss """nssa""" +613 42 regularizer """no""" +613 42 optimizer """adam""" +613 42 training_loop """owa""" +613 42 negative_sampler """basic""" +613 42 evaluator """rankbased""" +613 43 dataset """wn18rr""" +613 43 model """rotate""" +613 43 loss """nssa""" +613 43 regularizer """no""" +613 43 optimizer """adam""" +613 43 training_loop """owa""" +613 43 negative_sampler """basic""" +613 43 evaluator """rankbased""" +613 44 dataset """wn18rr""" +613 44 model """rotate""" +613 44 loss """nssa""" +613 44 regularizer """no""" +613 44 optimizer """adam""" +613 44 training_loop """owa""" +613 44 negative_sampler """basic""" +613 44 evaluator """rankbased""" +613 45 dataset """wn18rr""" +613 45 model """rotate""" +613 45 loss """nssa""" +613 45 regularizer """no""" +613 45 optimizer """adam""" +613 45 training_loop """owa""" +613 45 negative_sampler """basic""" +613 45 evaluator """rankbased""" +613 46 dataset """wn18rr""" +613 46 model """rotate""" +613 46 loss """nssa""" +613 46 regularizer """no""" +613 46 optimizer """adam""" +613 46 training_loop """owa""" +613 46 negative_sampler """basic""" +613 46 evaluator """rankbased""" +613 47 dataset """wn18rr""" +613 47 model """rotate""" +613 47 loss """nssa""" +613 47 regularizer """no""" +613 47 optimizer """adam""" +613 47 training_loop """owa""" +613 47 negative_sampler """basic""" +613 47 evaluator """rankbased""" +613 48 dataset """wn18rr""" +613 48 model """rotate""" +613 48 loss """nssa""" +613 48 regularizer """no""" +613 48 optimizer """adam""" +613 48 training_loop """owa""" +613 48 negative_sampler """basic""" +613 48 evaluator """rankbased""" +613 49 dataset """wn18rr""" +613 49 model """rotate""" +613 49 loss """nssa""" +613 49 regularizer """no""" +613 49 optimizer """adam""" +613 49 training_loop """owa""" +613 49 negative_sampler """basic""" +613 49 evaluator """rankbased""" +613 50 dataset """wn18rr""" +613 50 model """rotate""" +613 50 loss """nssa""" +613 50 regularizer """no""" +613 50 optimizer """adam""" +613 50 training_loop """owa""" +613 50 negative_sampler """basic""" +613 50 evaluator """rankbased""" +613 51 dataset """wn18rr""" +613 51 model """rotate""" +613 51 loss """nssa""" +613 51 regularizer """no""" +613 51 optimizer """adam""" +613 51 training_loop """owa""" +613 51 negative_sampler """basic""" +613 51 evaluator """rankbased""" +614 1 model.embedding_dim 0.0 +614 1 loss.margin 17.01944656843172 +614 1 loss.adversarial_temperature 0.503528485285558 +614 1 optimizer.lr 0.048689103472215826 +614 1 negative_sampler.num_negs_per_pos 88.0 +614 1 training.batch_size 0.0 +614 2 model.embedding_dim 2.0 +614 2 loss.margin 19.9759971308693 +614 2 loss.adversarial_temperature 0.2128303876614806 +614 2 optimizer.lr 0.06858010807453457 +614 2 negative_sampler.num_negs_per_pos 13.0 +614 2 training.batch_size 0.0 +614 3 model.embedding_dim 2.0 +614 3 loss.margin 6.9813651691742376 +614 3 loss.adversarial_temperature 0.7436545519037199 +614 3 optimizer.lr 0.0015822646505385543 +614 3 negative_sampler.num_negs_per_pos 34.0 +614 3 training.batch_size 2.0 +614 4 model.embedding_dim 2.0 +614 4 loss.margin 22.887007517109673 +614 4 loss.adversarial_temperature 0.4872804999250969 +614 4 optimizer.lr 0.010704208934507136 +614 4 negative_sampler.num_negs_per_pos 88.0 +614 4 training.batch_size 0.0 +614 5 model.embedding_dim 0.0 +614 5 loss.margin 18.29418655253908 +614 5 loss.adversarial_temperature 0.9667255361490998 +614 5 optimizer.lr 0.0030529352218966574 +614 5 negative_sampler.num_negs_per_pos 25.0 +614 5 training.batch_size 1.0 +614 6 model.embedding_dim 2.0 +614 6 loss.margin 23.911339415869232 +614 6 loss.adversarial_temperature 0.4813034735671266 +614 6 optimizer.lr 0.00391036410474574 +614 6 negative_sampler.num_negs_per_pos 72.0 +614 6 training.batch_size 1.0 +614 7 model.embedding_dim 2.0 +614 7 loss.margin 25.907599397848962 +614 7 loss.adversarial_temperature 0.28441349695097967 +614 7 optimizer.lr 0.03321149285148948 +614 7 negative_sampler.num_negs_per_pos 28.0 +614 7 training.batch_size 1.0 +614 8 model.embedding_dim 0.0 +614 8 loss.margin 19.295892520891844 +614 8 loss.adversarial_temperature 0.7327178531522752 +614 8 optimizer.lr 0.021489891716335023 +614 8 negative_sampler.num_negs_per_pos 13.0 +614 8 training.batch_size 0.0 +614 9 model.embedding_dim 0.0 +614 9 loss.margin 29.052226608157994 +614 9 loss.adversarial_temperature 0.5917459341285455 +614 9 optimizer.lr 0.005002103407570576 +614 9 negative_sampler.num_negs_per_pos 43.0 +614 9 training.batch_size 1.0 +614 10 model.embedding_dim 2.0 +614 10 loss.margin 1.6169335812023218 +614 10 loss.adversarial_temperature 0.2519430856418881 +614 10 optimizer.lr 0.00312793853759273 +614 10 negative_sampler.num_negs_per_pos 44.0 +614 10 training.batch_size 1.0 +614 11 model.embedding_dim 1.0 +614 11 loss.margin 4.736629454081591 +614 11 loss.adversarial_temperature 0.536608768147323 +614 11 optimizer.lr 0.03230948778164455 +614 11 negative_sampler.num_negs_per_pos 54.0 +614 11 training.batch_size 1.0 +614 12 model.embedding_dim 0.0 +614 12 loss.margin 4.510504658987383 +614 12 loss.adversarial_temperature 0.38367942541328964 +614 12 optimizer.lr 0.09961518222860548 +614 12 negative_sampler.num_negs_per_pos 87.0 +614 12 training.batch_size 1.0 +614 13 model.embedding_dim 1.0 +614 13 loss.margin 16.521794293976296 +614 13 loss.adversarial_temperature 0.25044865500363483 +614 13 optimizer.lr 0.010709608396970759 +614 13 negative_sampler.num_negs_per_pos 9.0 +614 13 training.batch_size 1.0 +614 14 model.embedding_dim 1.0 +614 14 loss.margin 27.98910536768283 +614 14 loss.adversarial_temperature 0.19635496605998357 +614 14 optimizer.lr 0.018942791254337493 +614 14 negative_sampler.num_negs_per_pos 5.0 +614 14 training.batch_size 2.0 +614 15 model.embedding_dim 2.0 +614 15 loss.margin 12.111796255837517 +614 15 loss.adversarial_temperature 0.5348926339459683 +614 15 optimizer.lr 0.017890039921952126 +614 15 negative_sampler.num_negs_per_pos 47.0 +614 15 training.batch_size 0.0 +614 16 model.embedding_dim 2.0 +614 16 loss.margin 28.506606259926183 +614 16 loss.adversarial_temperature 0.5086364313678956 +614 16 optimizer.lr 0.014930637146185784 +614 16 negative_sampler.num_negs_per_pos 9.0 +614 16 training.batch_size 2.0 +614 17 model.embedding_dim 1.0 +614 17 loss.margin 1.722550573426743 +614 17 loss.adversarial_temperature 0.418899366199919 +614 17 optimizer.lr 0.009659130700857053 +614 17 negative_sampler.num_negs_per_pos 19.0 +614 17 training.batch_size 1.0 +614 18 model.embedding_dim 1.0 +614 18 loss.margin 9.370960765741552 +614 18 loss.adversarial_temperature 0.3678473631011447 +614 18 optimizer.lr 0.02456284757635219 +614 18 negative_sampler.num_negs_per_pos 17.0 +614 18 training.batch_size 2.0 +614 19 model.embedding_dim 0.0 +614 19 loss.margin 19.218887211936003 +614 19 loss.adversarial_temperature 0.9714532379836436 +614 19 optimizer.lr 0.011976158490326471 +614 19 negative_sampler.num_negs_per_pos 50.0 +614 19 training.batch_size 1.0 +614 20 model.embedding_dim 2.0 +614 20 loss.margin 22.801811642695874 +614 20 loss.adversarial_temperature 0.8179638468259169 +614 20 optimizer.lr 0.027121469135413898 +614 20 negative_sampler.num_negs_per_pos 89.0 +614 20 training.batch_size 0.0 +614 21 model.embedding_dim 1.0 +614 21 loss.margin 16.331598433670017 +614 21 loss.adversarial_temperature 0.6580170257518644 +614 21 optimizer.lr 0.013052722037958996 +614 21 negative_sampler.num_negs_per_pos 35.0 +614 21 training.batch_size 2.0 +614 22 model.embedding_dim 2.0 +614 22 loss.margin 11.779062762225479 +614 22 loss.adversarial_temperature 0.2802595129678245 +614 22 optimizer.lr 0.012313300265710718 +614 22 negative_sampler.num_negs_per_pos 74.0 +614 22 training.batch_size 2.0 +614 23 model.embedding_dim 2.0 +614 23 loss.margin 16.228255353448883 +614 23 loss.adversarial_temperature 0.4627448173760849 +614 23 optimizer.lr 0.006012892537053953 +614 23 negative_sampler.num_negs_per_pos 14.0 +614 23 training.batch_size 2.0 +614 24 model.embedding_dim 0.0 +614 24 loss.margin 7.29305136479864 +614 24 loss.adversarial_temperature 0.9023124476789408 +614 24 optimizer.lr 0.012502717521227813 +614 24 negative_sampler.num_negs_per_pos 97.0 +614 24 training.batch_size 0.0 +614 25 model.embedding_dim 0.0 +614 25 loss.margin 22.278142486314326 +614 25 loss.adversarial_temperature 0.2846049864130189 +614 25 optimizer.lr 0.00325437940273605 +614 25 negative_sampler.num_negs_per_pos 65.0 +614 25 training.batch_size 1.0 +614 26 model.embedding_dim 1.0 +614 26 loss.margin 18.78466296904259 +614 26 loss.adversarial_temperature 0.9935499176155157 +614 26 optimizer.lr 0.03778176630306864 +614 26 negative_sampler.num_negs_per_pos 64.0 +614 26 training.batch_size 2.0 +614 27 model.embedding_dim 1.0 +614 27 loss.margin 7.350946249444913 +614 27 loss.adversarial_temperature 0.399304729730887 +614 27 optimizer.lr 0.021660355451558828 +614 27 negative_sampler.num_negs_per_pos 18.0 +614 27 training.batch_size 1.0 +614 28 model.embedding_dim 0.0 +614 28 loss.margin 1.5132250596816927 +614 28 loss.adversarial_temperature 0.3169317675906032 +614 28 optimizer.lr 0.0035507763067598877 +614 28 negative_sampler.num_negs_per_pos 5.0 +614 28 training.batch_size 0.0 +614 29 model.embedding_dim 2.0 +614 29 loss.margin 24.58955288958809 +614 29 loss.adversarial_temperature 0.37761540713429326 +614 29 optimizer.lr 0.02804045557632814 +614 29 negative_sampler.num_negs_per_pos 74.0 +614 29 training.batch_size 1.0 +614 30 model.embedding_dim 1.0 +614 30 loss.margin 8.919394918959954 +614 30 loss.adversarial_temperature 0.528636652860161 +614 30 optimizer.lr 0.005532903733701539 +614 30 negative_sampler.num_negs_per_pos 37.0 +614 30 training.batch_size 1.0 +614 31 model.embedding_dim 1.0 +614 31 loss.margin 6.700076863056358 +614 31 loss.adversarial_temperature 0.3428316144429818 +614 31 optimizer.lr 0.01622743477839646 +614 31 negative_sampler.num_negs_per_pos 92.0 +614 31 training.batch_size 0.0 +614 32 model.embedding_dim 2.0 +614 32 loss.margin 9.023857509883165 +614 32 loss.adversarial_temperature 0.8333396440486546 +614 32 optimizer.lr 0.0031893720815417354 +614 32 negative_sampler.num_negs_per_pos 82.0 +614 32 training.batch_size 0.0 +614 33 model.embedding_dim 1.0 +614 33 loss.margin 5.464650654361446 +614 33 loss.adversarial_temperature 0.11726399489333361 +614 33 optimizer.lr 0.08058889335197726 +614 33 negative_sampler.num_negs_per_pos 95.0 +614 33 training.batch_size 1.0 +614 34 model.embedding_dim 2.0 +614 34 loss.margin 14.430888943693194 +614 34 loss.adversarial_temperature 0.7795870298559134 +614 34 optimizer.lr 0.0077726966209630015 +614 34 negative_sampler.num_negs_per_pos 95.0 +614 34 training.batch_size 0.0 +614 35 model.embedding_dim 2.0 +614 35 loss.margin 12.89262987231462 +614 35 loss.adversarial_temperature 0.8200922970057029 +614 35 optimizer.lr 0.0014300259119951263 +614 35 negative_sampler.num_negs_per_pos 4.0 +614 35 training.batch_size 2.0 +614 36 model.embedding_dim 1.0 +614 36 loss.margin 2.732894102760934 +614 36 loss.adversarial_temperature 0.12402816054205579 +614 36 optimizer.lr 0.028343070868396422 +614 36 negative_sampler.num_negs_per_pos 54.0 +614 36 training.batch_size 2.0 +614 37 model.embedding_dim 0.0 +614 37 loss.margin 19.50445500529293 +614 37 loss.adversarial_temperature 0.9124553279727997 +614 37 optimizer.lr 0.09131905136036951 +614 37 negative_sampler.num_negs_per_pos 73.0 +614 37 training.batch_size 2.0 +614 38 model.embedding_dim 1.0 +614 38 loss.margin 4.521491094850558 +614 38 loss.adversarial_temperature 0.7566164502302494 +614 38 optimizer.lr 0.023829513778897006 +614 38 negative_sampler.num_negs_per_pos 64.0 +614 38 training.batch_size 1.0 +614 39 model.embedding_dim 0.0 +614 39 loss.margin 9.153349302700406 +614 39 loss.adversarial_temperature 0.45620162868511077 +614 39 optimizer.lr 0.009525493958742902 +614 39 negative_sampler.num_negs_per_pos 22.0 +614 39 training.batch_size 1.0 +614 40 model.embedding_dim 1.0 +614 40 loss.margin 5.3081788451430585 +614 40 loss.adversarial_temperature 0.8228067618272988 +614 40 optimizer.lr 0.026487628769018185 +614 40 negative_sampler.num_negs_per_pos 15.0 +614 40 training.batch_size 2.0 +614 41 model.embedding_dim 0.0 +614 41 loss.margin 19.301902055626773 +614 41 loss.adversarial_temperature 0.1924421444914588 +614 41 optimizer.lr 0.002157765821185831 +614 41 negative_sampler.num_negs_per_pos 77.0 +614 41 training.batch_size 1.0 +614 42 model.embedding_dim 0.0 +614 42 loss.margin 4.216461670342404 +614 42 loss.adversarial_temperature 0.31457245551048557 +614 42 optimizer.lr 0.01766912153547335 +614 42 negative_sampler.num_negs_per_pos 66.0 +614 42 training.batch_size 1.0 +614 43 model.embedding_dim 0.0 +614 43 loss.margin 7.5124995832586485 +614 43 loss.adversarial_temperature 0.8101405498452545 +614 43 optimizer.lr 0.002115494902243953 +614 43 negative_sampler.num_negs_per_pos 26.0 +614 43 training.batch_size 2.0 +614 44 model.embedding_dim 0.0 +614 44 loss.margin 27.11550355866222 +614 44 loss.adversarial_temperature 0.6304989445188064 +614 44 optimizer.lr 0.0030605270958613535 +614 44 negative_sampler.num_negs_per_pos 91.0 +614 44 training.batch_size 0.0 +614 45 model.embedding_dim 1.0 +614 45 loss.margin 1.747317781190734 +614 45 loss.adversarial_temperature 0.2616121119797174 +614 45 optimizer.lr 0.004393406442225358 +614 45 negative_sampler.num_negs_per_pos 17.0 +614 45 training.batch_size 1.0 +614 46 model.embedding_dim 1.0 +614 46 loss.margin 22.308075598944704 +614 46 loss.adversarial_temperature 0.18742844003664824 +614 46 optimizer.lr 0.088995272039205 +614 46 negative_sampler.num_negs_per_pos 11.0 +614 46 training.batch_size 0.0 +614 47 model.embedding_dim 0.0 +614 47 loss.margin 20.058970570688945 +614 47 loss.adversarial_temperature 0.24919077844698023 +614 47 optimizer.lr 0.036351582254778185 +614 47 negative_sampler.num_negs_per_pos 29.0 +614 47 training.batch_size 2.0 +614 48 model.embedding_dim 1.0 +614 48 loss.margin 23.44990847940366 +614 48 loss.adversarial_temperature 0.17433032416748132 +614 48 optimizer.lr 0.0031708496298021955 +614 48 negative_sampler.num_negs_per_pos 37.0 +614 48 training.batch_size 1.0 +614 49 model.embedding_dim 0.0 +614 49 loss.margin 17.08381159782252 +614 49 loss.adversarial_temperature 0.9651136793480967 +614 49 optimizer.lr 0.001586630950645394 +614 49 negative_sampler.num_negs_per_pos 64.0 +614 49 training.batch_size 0.0 +614 50 model.embedding_dim 0.0 +614 50 loss.margin 1.3014750700321867 +614 50 loss.adversarial_temperature 0.611657129650616 +614 50 optimizer.lr 0.046420112492729565 +614 50 negative_sampler.num_negs_per_pos 29.0 +614 50 training.batch_size 0.0 +614 51 model.embedding_dim 0.0 +614 51 loss.margin 15.113510102904188 +614 51 loss.adversarial_temperature 0.8653954672380155 +614 51 optimizer.lr 0.046792397171707 +614 51 negative_sampler.num_negs_per_pos 63.0 +614 51 training.batch_size 0.0 +614 52 model.embedding_dim 1.0 +614 52 loss.margin 9.169647881787432 +614 52 loss.adversarial_temperature 0.4019967465446846 +614 52 optimizer.lr 0.0023956888689853012 +614 52 negative_sampler.num_negs_per_pos 64.0 +614 52 training.batch_size 1.0 +614 53 model.embedding_dim 2.0 +614 53 loss.margin 29.21182684929366 +614 53 loss.adversarial_temperature 0.3467650002345653 +614 53 optimizer.lr 0.03548788481297983 +614 53 negative_sampler.num_negs_per_pos 70.0 +614 53 training.batch_size 1.0 +614 54 model.embedding_dim 2.0 +614 54 loss.margin 24.979759903619456 +614 54 loss.adversarial_temperature 0.8718573571836145 +614 54 optimizer.lr 0.007238140332811776 +614 54 negative_sampler.num_negs_per_pos 66.0 +614 54 training.batch_size 2.0 +614 55 model.embedding_dim 2.0 +614 55 loss.margin 24.239956571415327 +614 55 loss.adversarial_temperature 0.4408881760182276 +614 55 optimizer.lr 0.01946878919176389 +614 55 negative_sampler.num_negs_per_pos 67.0 +614 55 training.batch_size 0.0 +614 56 model.embedding_dim 0.0 +614 56 loss.margin 10.381808832362934 +614 56 loss.adversarial_temperature 0.5488755000377074 +614 56 optimizer.lr 0.012974719310978171 +614 56 negative_sampler.num_negs_per_pos 93.0 +614 56 training.batch_size 2.0 +614 57 model.embedding_dim 1.0 +614 57 loss.margin 24.69277114984089 +614 57 loss.adversarial_temperature 0.42949212675725 +614 57 optimizer.lr 0.0010764627418363136 +614 57 negative_sampler.num_negs_per_pos 88.0 +614 57 training.batch_size 2.0 +614 58 model.embedding_dim 2.0 +614 58 loss.margin 22.178260247100575 +614 58 loss.adversarial_temperature 0.5299443950170313 +614 58 optimizer.lr 0.004743351550997273 +614 58 negative_sampler.num_negs_per_pos 18.0 +614 58 training.batch_size 0.0 +614 59 model.embedding_dim 1.0 +614 59 loss.margin 26.233218938455764 +614 59 loss.adversarial_temperature 0.10504052890601244 +614 59 optimizer.lr 0.05036422514222611 +614 59 negative_sampler.num_negs_per_pos 2.0 +614 59 training.batch_size 2.0 +614 60 model.embedding_dim 2.0 +614 60 loss.margin 29.31961482728922 +614 60 loss.adversarial_temperature 0.22972013876594025 +614 60 optimizer.lr 0.026523040663263213 +614 60 negative_sampler.num_negs_per_pos 39.0 +614 60 training.batch_size 0.0 +614 61 model.embedding_dim 2.0 +614 61 loss.margin 3.0471810434333735 +614 61 loss.adversarial_temperature 0.9847917915576319 +614 61 optimizer.lr 0.04828424437946919 +614 61 negative_sampler.num_negs_per_pos 40.0 +614 61 training.batch_size 1.0 +614 62 model.embedding_dim 2.0 +614 62 loss.margin 24.737312873832817 +614 62 loss.adversarial_temperature 0.3202075735531784 +614 62 optimizer.lr 0.059607210736995264 +614 62 negative_sampler.num_negs_per_pos 49.0 +614 62 training.batch_size 0.0 +614 63 model.embedding_dim 1.0 +614 63 loss.margin 19.089894302197127 +614 63 loss.adversarial_temperature 0.9443389567013589 +614 63 optimizer.lr 0.0020455016123785957 +614 63 negative_sampler.num_negs_per_pos 96.0 +614 63 training.batch_size 2.0 +614 64 model.embedding_dim 2.0 +614 64 loss.margin 19.139506850489607 +614 64 loss.adversarial_temperature 0.9682760736346722 +614 64 optimizer.lr 0.013230148038758154 +614 64 negative_sampler.num_negs_per_pos 49.0 +614 64 training.batch_size 1.0 +614 65 model.embedding_dim 2.0 +614 65 loss.margin 2.799601938359568 +614 65 loss.adversarial_temperature 0.22244895972640727 +614 65 optimizer.lr 0.009251198926406244 +614 65 negative_sampler.num_negs_per_pos 58.0 +614 65 training.batch_size 2.0 +614 66 model.embedding_dim 1.0 +614 66 loss.margin 24.842388532904035 +614 66 loss.adversarial_temperature 0.19483791362587105 +614 66 optimizer.lr 0.09238835997487228 +614 66 negative_sampler.num_negs_per_pos 57.0 +614 66 training.batch_size 0.0 +614 67 model.embedding_dim 2.0 +614 67 loss.margin 29.041313440804103 +614 67 loss.adversarial_temperature 0.5064822095238936 +614 67 optimizer.lr 0.03695277848380907 +614 67 negative_sampler.num_negs_per_pos 58.0 +614 67 training.batch_size 2.0 +614 68 model.embedding_dim 2.0 +614 68 loss.margin 17.251917540015388 +614 68 loss.adversarial_temperature 0.414141331891433 +614 68 optimizer.lr 0.010444589575447237 +614 68 negative_sampler.num_negs_per_pos 6.0 +614 68 training.batch_size 1.0 +614 69 model.embedding_dim 1.0 +614 69 loss.margin 23.211294901699866 +614 69 loss.adversarial_temperature 0.9687317776423028 +614 69 optimizer.lr 0.024139676259299633 +614 69 negative_sampler.num_negs_per_pos 65.0 +614 69 training.batch_size 1.0 +614 70 model.embedding_dim 0.0 +614 70 loss.margin 18.99766875843714 +614 70 loss.adversarial_temperature 0.26551779638416645 +614 70 optimizer.lr 0.07438196395119566 +614 70 negative_sampler.num_negs_per_pos 71.0 +614 70 training.batch_size 0.0 +614 71 model.embedding_dim 2.0 +614 71 loss.margin 11.172279595245456 +614 71 loss.adversarial_temperature 0.8502758993855267 +614 71 optimizer.lr 0.047199824265995075 +614 71 negative_sampler.num_negs_per_pos 76.0 +614 71 training.batch_size 2.0 +614 72 model.embedding_dim 1.0 +614 72 loss.margin 8.251123387543206 +614 72 loss.adversarial_temperature 0.1064707319016519 +614 72 optimizer.lr 0.002648531824999529 +614 72 negative_sampler.num_negs_per_pos 56.0 +614 72 training.batch_size 0.0 +614 73 model.embedding_dim 1.0 +614 73 loss.margin 7.187752056823642 +614 73 loss.adversarial_temperature 0.1361543056869771 +614 73 optimizer.lr 0.022960558789753508 +614 73 negative_sampler.num_negs_per_pos 46.0 +614 73 training.batch_size 0.0 +614 74 model.embedding_dim 2.0 +614 74 loss.margin 29.277765996363218 +614 74 loss.adversarial_temperature 0.22428526453706504 +614 74 optimizer.lr 0.0014147169336995375 +614 74 negative_sampler.num_negs_per_pos 22.0 +614 74 training.batch_size 0.0 +614 75 model.embedding_dim 1.0 +614 75 loss.margin 21.529156345687827 +614 75 loss.adversarial_temperature 0.6702579048019256 +614 75 optimizer.lr 0.06798092900647658 +614 75 negative_sampler.num_negs_per_pos 70.0 +614 75 training.batch_size 0.0 +614 76 model.embedding_dim 2.0 +614 76 loss.margin 14.983264525408366 +614 76 loss.adversarial_temperature 0.786112248007889 +614 76 optimizer.lr 0.0014800516364075506 +614 76 negative_sampler.num_negs_per_pos 11.0 +614 76 training.batch_size 2.0 +614 77 model.embedding_dim 0.0 +614 77 loss.margin 1.119575358109159 +614 77 loss.adversarial_temperature 0.2955779425060187 +614 77 optimizer.lr 0.024917862040794148 +614 77 negative_sampler.num_negs_per_pos 12.0 +614 77 training.batch_size 0.0 +614 78 model.embedding_dim 1.0 +614 78 loss.margin 28.027684596918 +614 78 loss.adversarial_temperature 0.5995039024178727 +614 78 optimizer.lr 0.0016338453143833619 +614 78 negative_sampler.num_negs_per_pos 58.0 +614 78 training.batch_size 1.0 +614 1 dataset """wn18rr""" +614 1 model """rotate""" +614 1 loss """nssa""" +614 1 regularizer """no""" +614 1 optimizer """adam""" +614 1 training_loop """owa""" +614 1 negative_sampler """basic""" +614 1 evaluator """rankbased""" +614 2 dataset """wn18rr""" +614 2 model """rotate""" +614 2 loss """nssa""" +614 2 regularizer """no""" +614 2 optimizer """adam""" +614 2 training_loop """owa""" +614 2 negative_sampler """basic""" +614 2 evaluator """rankbased""" +614 3 dataset """wn18rr""" +614 3 model """rotate""" +614 3 loss """nssa""" +614 3 regularizer """no""" +614 3 optimizer """adam""" +614 3 training_loop """owa""" +614 3 negative_sampler """basic""" +614 3 evaluator """rankbased""" +614 4 dataset """wn18rr""" +614 4 model """rotate""" +614 4 loss """nssa""" +614 4 regularizer """no""" +614 4 optimizer """adam""" +614 4 training_loop """owa""" +614 4 negative_sampler """basic""" +614 4 evaluator """rankbased""" +614 5 dataset """wn18rr""" +614 5 model """rotate""" +614 5 loss """nssa""" +614 5 regularizer """no""" +614 5 optimizer """adam""" +614 5 training_loop """owa""" +614 5 negative_sampler """basic""" +614 5 evaluator """rankbased""" +614 6 dataset """wn18rr""" +614 6 model """rotate""" +614 6 loss """nssa""" +614 6 regularizer """no""" +614 6 optimizer """adam""" +614 6 training_loop """owa""" +614 6 negative_sampler """basic""" +614 6 evaluator """rankbased""" +614 7 dataset """wn18rr""" +614 7 model """rotate""" +614 7 loss """nssa""" +614 7 regularizer """no""" +614 7 optimizer """adam""" +614 7 training_loop """owa""" +614 7 negative_sampler """basic""" +614 7 evaluator """rankbased""" +614 8 dataset """wn18rr""" +614 8 model """rotate""" +614 8 loss """nssa""" +614 8 regularizer """no""" +614 8 optimizer """adam""" +614 8 training_loop """owa""" +614 8 negative_sampler """basic""" +614 8 evaluator """rankbased""" +614 9 dataset """wn18rr""" +614 9 model """rotate""" +614 9 loss """nssa""" +614 9 regularizer """no""" +614 9 optimizer """adam""" +614 9 training_loop """owa""" +614 9 negative_sampler """basic""" +614 9 evaluator """rankbased""" +614 10 dataset """wn18rr""" +614 10 model """rotate""" +614 10 loss """nssa""" +614 10 regularizer """no""" +614 10 optimizer """adam""" +614 10 training_loop """owa""" +614 10 negative_sampler """basic""" +614 10 evaluator """rankbased""" +614 11 dataset """wn18rr""" +614 11 model """rotate""" +614 11 loss """nssa""" +614 11 regularizer """no""" +614 11 optimizer """adam""" +614 11 training_loop """owa""" +614 11 negative_sampler """basic""" +614 11 evaluator """rankbased""" +614 12 dataset """wn18rr""" +614 12 model """rotate""" +614 12 loss """nssa""" +614 12 regularizer """no""" +614 12 optimizer """adam""" +614 12 training_loop """owa""" +614 12 negative_sampler """basic""" +614 12 evaluator """rankbased""" +614 13 dataset """wn18rr""" +614 13 model """rotate""" +614 13 loss """nssa""" +614 13 regularizer """no""" +614 13 optimizer """adam""" +614 13 training_loop """owa""" +614 13 negative_sampler """basic""" +614 13 evaluator """rankbased""" +614 14 dataset """wn18rr""" +614 14 model """rotate""" +614 14 loss """nssa""" +614 14 regularizer """no""" +614 14 optimizer """adam""" +614 14 training_loop """owa""" +614 14 negative_sampler """basic""" +614 14 evaluator """rankbased""" +614 15 dataset """wn18rr""" +614 15 model """rotate""" +614 15 loss """nssa""" +614 15 regularizer """no""" +614 15 optimizer """adam""" +614 15 training_loop """owa""" +614 15 negative_sampler """basic""" +614 15 evaluator """rankbased""" +614 16 dataset """wn18rr""" +614 16 model """rotate""" +614 16 loss """nssa""" +614 16 regularizer """no""" +614 16 optimizer """adam""" +614 16 training_loop """owa""" +614 16 negative_sampler """basic""" +614 16 evaluator """rankbased""" +614 17 dataset """wn18rr""" +614 17 model """rotate""" +614 17 loss """nssa""" +614 17 regularizer """no""" +614 17 optimizer """adam""" +614 17 training_loop """owa""" +614 17 negative_sampler """basic""" +614 17 evaluator """rankbased""" +614 18 dataset """wn18rr""" +614 18 model """rotate""" +614 18 loss """nssa""" +614 18 regularizer """no""" +614 18 optimizer """adam""" +614 18 training_loop """owa""" +614 18 negative_sampler """basic""" +614 18 evaluator """rankbased""" +614 19 dataset """wn18rr""" +614 19 model """rotate""" +614 19 loss """nssa""" +614 19 regularizer """no""" +614 19 optimizer """adam""" +614 19 training_loop """owa""" +614 19 negative_sampler """basic""" +614 19 evaluator """rankbased""" +614 20 dataset """wn18rr""" +614 20 model """rotate""" +614 20 loss """nssa""" +614 20 regularizer """no""" +614 20 optimizer """adam""" +614 20 training_loop """owa""" +614 20 negative_sampler """basic""" +614 20 evaluator """rankbased""" +614 21 dataset """wn18rr""" +614 21 model """rotate""" +614 21 loss """nssa""" +614 21 regularizer """no""" +614 21 optimizer """adam""" +614 21 training_loop """owa""" +614 21 negative_sampler """basic""" +614 21 evaluator """rankbased""" +614 22 dataset """wn18rr""" +614 22 model """rotate""" +614 22 loss """nssa""" +614 22 regularizer """no""" +614 22 optimizer """adam""" +614 22 training_loop """owa""" +614 22 negative_sampler """basic""" +614 22 evaluator """rankbased""" +614 23 dataset """wn18rr""" +614 23 model """rotate""" +614 23 loss """nssa""" +614 23 regularizer """no""" +614 23 optimizer """adam""" +614 23 training_loop """owa""" +614 23 negative_sampler """basic""" +614 23 evaluator """rankbased""" +614 24 dataset """wn18rr""" +614 24 model """rotate""" +614 24 loss """nssa""" +614 24 regularizer """no""" +614 24 optimizer """adam""" +614 24 training_loop """owa""" +614 24 negative_sampler """basic""" +614 24 evaluator """rankbased""" +614 25 dataset """wn18rr""" +614 25 model """rotate""" +614 25 loss """nssa""" +614 25 regularizer """no""" +614 25 optimizer """adam""" +614 25 training_loop """owa""" +614 25 negative_sampler """basic""" +614 25 evaluator """rankbased""" +614 26 dataset """wn18rr""" +614 26 model """rotate""" +614 26 loss """nssa""" +614 26 regularizer """no""" +614 26 optimizer """adam""" +614 26 training_loop """owa""" +614 26 negative_sampler """basic""" +614 26 evaluator """rankbased""" +614 27 dataset """wn18rr""" +614 27 model """rotate""" +614 27 loss """nssa""" +614 27 regularizer """no""" +614 27 optimizer """adam""" +614 27 training_loop """owa""" +614 27 negative_sampler """basic""" +614 27 evaluator """rankbased""" +614 28 dataset """wn18rr""" +614 28 model """rotate""" +614 28 loss """nssa""" +614 28 regularizer """no""" +614 28 optimizer """adam""" +614 28 training_loop """owa""" +614 28 negative_sampler """basic""" +614 28 evaluator """rankbased""" +614 29 dataset """wn18rr""" +614 29 model """rotate""" +614 29 loss """nssa""" +614 29 regularizer """no""" +614 29 optimizer """adam""" +614 29 training_loop """owa""" +614 29 negative_sampler """basic""" +614 29 evaluator """rankbased""" +614 30 dataset """wn18rr""" +614 30 model """rotate""" +614 30 loss """nssa""" +614 30 regularizer """no""" +614 30 optimizer """adam""" +614 30 training_loop """owa""" +614 30 negative_sampler """basic""" +614 30 evaluator """rankbased""" +614 31 dataset """wn18rr""" +614 31 model """rotate""" +614 31 loss """nssa""" +614 31 regularizer """no""" +614 31 optimizer """adam""" +614 31 training_loop """owa""" +614 31 negative_sampler """basic""" +614 31 evaluator """rankbased""" +614 32 dataset """wn18rr""" +614 32 model """rotate""" +614 32 loss """nssa""" +614 32 regularizer """no""" +614 32 optimizer """adam""" +614 32 training_loop """owa""" +614 32 negative_sampler """basic""" +614 32 evaluator """rankbased""" +614 33 dataset """wn18rr""" +614 33 model """rotate""" +614 33 loss """nssa""" +614 33 regularizer """no""" +614 33 optimizer """adam""" +614 33 training_loop """owa""" +614 33 negative_sampler """basic""" +614 33 evaluator """rankbased""" +614 34 dataset """wn18rr""" +614 34 model """rotate""" +614 34 loss """nssa""" +614 34 regularizer """no""" +614 34 optimizer """adam""" +614 34 training_loop """owa""" +614 34 negative_sampler """basic""" +614 34 evaluator """rankbased""" +614 35 dataset """wn18rr""" +614 35 model """rotate""" +614 35 loss """nssa""" +614 35 regularizer """no""" +614 35 optimizer """adam""" +614 35 training_loop """owa""" +614 35 negative_sampler """basic""" +614 35 evaluator """rankbased""" +614 36 dataset """wn18rr""" +614 36 model """rotate""" +614 36 loss """nssa""" +614 36 regularizer """no""" +614 36 optimizer """adam""" +614 36 training_loop """owa""" +614 36 negative_sampler """basic""" +614 36 evaluator """rankbased""" +614 37 dataset """wn18rr""" +614 37 model """rotate""" +614 37 loss """nssa""" +614 37 regularizer """no""" +614 37 optimizer """adam""" +614 37 training_loop """owa""" +614 37 negative_sampler """basic""" +614 37 evaluator """rankbased""" +614 38 dataset """wn18rr""" +614 38 model """rotate""" +614 38 loss """nssa""" +614 38 regularizer """no""" +614 38 optimizer """adam""" +614 38 training_loop """owa""" +614 38 negative_sampler """basic""" +614 38 evaluator """rankbased""" +614 39 dataset """wn18rr""" +614 39 model """rotate""" +614 39 loss """nssa""" +614 39 regularizer """no""" +614 39 optimizer """adam""" +614 39 training_loop """owa""" +614 39 negative_sampler """basic""" +614 39 evaluator """rankbased""" +614 40 dataset """wn18rr""" +614 40 model """rotate""" +614 40 loss """nssa""" +614 40 regularizer """no""" +614 40 optimizer """adam""" +614 40 training_loop """owa""" +614 40 negative_sampler """basic""" +614 40 evaluator """rankbased""" +614 41 dataset """wn18rr""" +614 41 model """rotate""" +614 41 loss """nssa""" +614 41 regularizer """no""" +614 41 optimizer """adam""" +614 41 training_loop """owa""" +614 41 negative_sampler """basic""" +614 41 evaluator """rankbased""" +614 42 dataset """wn18rr""" +614 42 model """rotate""" +614 42 loss """nssa""" +614 42 regularizer """no""" +614 42 optimizer """adam""" +614 42 training_loop """owa""" +614 42 negative_sampler """basic""" +614 42 evaluator """rankbased""" +614 43 dataset """wn18rr""" +614 43 model """rotate""" +614 43 loss """nssa""" +614 43 regularizer """no""" +614 43 optimizer """adam""" +614 43 training_loop """owa""" +614 43 negative_sampler """basic""" +614 43 evaluator """rankbased""" +614 44 dataset """wn18rr""" +614 44 model """rotate""" +614 44 loss """nssa""" +614 44 regularizer """no""" +614 44 optimizer """adam""" +614 44 training_loop """owa""" +614 44 negative_sampler """basic""" +614 44 evaluator """rankbased""" +614 45 dataset """wn18rr""" +614 45 model """rotate""" +614 45 loss """nssa""" +614 45 regularizer """no""" +614 45 optimizer """adam""" +614 45 training_loop """owa""" +614 45 negative_sampler """basic""" +614 45 evaluator """rankbased""" +614 46 dataset """wn18rr""" +614 46 model """rotate""" +614 46 loss """nssa""" +614 46 regularizer """no""" +614 46 optimizer """adam""" +614 46 training_loop """owa""" +614 46 negative_sampler """basic""" +614 46 evaluator """rankbased""" +614 47 dataset """wn18rr""" +614 47 model """rotate""" +614 47 loss """nssa""" +614 47 regularizer """no""" +614 47 optimizer """adam""" +614 47 training_loop """owa""" +614 47 negative_sampler """basic""" +614 47 evaluator """rankbased""" +614 48 dataset """wn18rr""" +614 48 model """rotate""" +614 48 loss """nssa""" +614 48 regularizer """no""" +614 48 optimizer """adam""" +614 48 training_loop """owa""" +614 48 negative_sampler """basic""" +614 48 evaluator """rankbased""" +614 49 dataset """wn18rr""" +614 49 model """rotate""" +614 49 loss """nssa""" +614 49 regularizer """no""" +614 49 optimizer """adam""" +614 49 training_loop """owa""" +614 49 negative_sampler """basic""" +614 49 evaluator """rankbased""" +614 50 dataset """wn18rr""" +614 50 model """rotate""" +614 50 loss """nssa""" +614 50 regularizer """no""" +614 50 optimizer """adam""" +614 50 training_loop """owa""" +614 50 negative_sampler """basic""" +614 50 evaluator """rankbased""" +614 51 dataset """wn18rr""" +614 51 model """rotate""" +614 51 loss """nssa""" +614 51 regularizer """no""" +614 51 optimizer """adam""" +614 51 training_loop """owa""" +614 51 negative_sampler """basic""" +614 51 evaluator """rankbased""" +614 52 dataset """wn18rr""" +614 52 model """rotate""" +614 52 loss """nssa""" +614 52 regularizer """no""" +614 52 optimizer """adam""" +614 52 training_loop """owa""" +614 52 negative_sampler """basic""" +614 52 evaluator """rankbased""" +614 53 dataset """wn18rr""" +614 53 model """rotate""" +614 53 loss """nssa""" +614 53 regularizer """no""" +614 53 optimizer """adam""" +614 53 training_loop """owa""" +614 53 negative_sampler """basic""" +614 53 evaluator """rankbased""" +614 54 dataset """wn18rr""" +614 54 model """rotate""" +614 54 loss """nssa""" +614 54 regularizer """no""" +614 54 optimizer """adam""" +614 54 training_loop """owa""" +614 54 negative_sampler """basic""" +614 54 evaluator """rankbased""" +614 55 dataset """wn18rr""" +614 55 model """rotate""" +614 55 loss """nssa""" +614 55 regularizer """no""" +614 55 optimizer """adam""" +614 55 training_loop """owa""" +614 55 negative_sampler """basic""" +614 55 evaluator """rankbased""" +614 56 dataset """wn18rr""" +614 56 model """rotate""" +614 56 loss """nssa""" +614 56 regularizer """no""" +614 56 optimizer """adam""" +614 56 training_loop """owa""" +614 56 negative_sampler """basic""" +614 56 evaluator """rankbased""" +614 57 dataset """wn18rr""" +614 57 model """rotate""" +614 57 loss """nssa""" +614 57 regularizer """no""" +614 57 optimizer """adam""" +614 57 training_loop """owa""" +614 57 negative_sampler """basic""" +614 57 evaluator """rankbased""" +614 58 dataset """wn18rr""" +614 58 model """rotate""" +614 58 loss """nssa""" +614 58 regularizer """no""" +614 58 optimizer """adam""" +614 58 training_loop """owa""" +614 58 negative_sampler """basic""" +614 58 evaluator """rankbased""" +614 59 dataset """wn18rr""" +614 59 model """rotate""" +614 59 loss """nssa""" +614 59 regularizer """no""" +614 59 optimizer """adam""" +614 59 training_loop """owa""" +614 59 negative_sampler """basic""" +614 59 evaluator """rankbased""" +614 60 dataset """wn18rr""" +614 60 model """rotate""" +614 60 loss """nssa""" +614 60 regularizer """no""" +614 60 optimizer """adam""" +614 60 training_loop """owa""" +614 60 negative_sampler """basic""" +614 60 evaluator """rankbased""" +614 61 dataset """wn18rr""" +614 61 model """rotate""" +614 61 loss """nssa""" +614 61 regularizer """no""" +614 61 optimizer """adam""" +614 61 training_loop """owa""" +614 61 negative_sampler """basic""" +614 61 evaluator """rankbased""" +614 62 dataset """wn18rr""" +614 62 model """rotate""" +614 62 loss """nssa""" +614 62 regularizer """no""" +614 62 optimizer """adam""" +614 62 training_loop """owa""" +614 62 negative_sampler """basic""" +614 62 evaluator """rankbased""" +614 63 dataset """wn18rr""" +614 63 model """rotate""" +614 63 loss """nssa""" +614 63 regularizer """no""" +614 63 optimizer """adam""" +614 63 training_loop """owa""" +614 63 negative_sampler """basic""" +614 63 evaluator """rankbased""" +614 64 dataset """wn18rr""" +614 64 model """rotate""" +614 64 loss """nssa""" +614 64 regularizer """no""" +614 64 optimizer """adam""" +614 64 training_loop """owa""" +614 64 negative_sampler """basic""" +614 64 evaluator """rankbased""" +614 65 dataset """wn18rr""" +614 65 model """rotate""" +614 65 loss """nssa""" +614 65 regularizer """no""" +614 65 optimizer """adam""" +614 65 training_loop """owa""" +614 65 negative_sampler """basic""" +614 65 evaluator """rankbased""" +614 66 dataset """wn18rr""" +614 66 model """rotate""" +614 66 loss """nssa""" +614 66 regularizer """no""" +614 66 optimizer """adam""" +614 66 training_loop """owa""" +614 66 negative_sampler """basic""" +614 66 evaluator """rankbased""" +614 67 dataset """wn18rr""" +614 67 model """rotate""" +614 67 loss """nssa""" +614 67 regularizer """no""" +614 67 optimizer """adam""" +614 67 training_loop """owa""" +614 67 negative_sampler """basic""" +614 67 evaluator """rankbased""" +614 68 dataset """wn18rr""" +614 68 model """rotate""" +614 68 loss """nssa""" +614 68 regularizer """no""" +614 68 optimizer """adam""" +614 68 training_loop """owa""" +614 68 negative_sampler """basic""" +614 68 evaluator """rankbased""" +614 69 dataset """wn18rr""" +614 69 model """rotate""" +614 69 loss """nssa""" +614 69 regularizer """no""" +614 69 optimizer """adam""" +614 69 training_loop """owa""" +614 69 negative_sampler """basic""" +614 69 evaluator """rankbased""" +614 70 dataset """wn18rr""" +614 70 model """rotate""" +614 70 loss """nssa""" +614 70 regularizer """no""" +614 70 optimizer """adam""" +614 70 training_loop """owa""" +614 70 negative_sampler """basic""" +614 70 evaluator """rankbased""" +614 71 dataset """wn18rr""" +614 71 model """rotate""" +614 71 loss """nssa""" +614 71 regularizer """no""" +614 71 optimizer """adam""" +614 71 training_loop """owa""" +614 71 negative_sampler """basic""" +614 71 evaluator """rankbased""" +614 72 dataset """wn18rr""" +614 72 model """rotate""" +614 72 loss """nssa""" +614 72 regularizer """no""" +614 72 optimizer """adam""" +614 72 training_loop """owa""" +614 72 negative_sampler """basic""" +614 72 evaluator """rankbased""" +614 73 dataset """wn18rr""" +614 73 model """rotate""" +614 73 loss """nssa""" +614 73 regularizer """no""" +614 73 optimizer """adam""" +614 73 training_loop """owa""" +614 73 negative_sampler """basic""" +614 73 evaluator """rankbased""" +614 74 dataset """wn18rr""" +614 74 model """rotate""" +614 74 loss """nssa""" +614 74 regularizer """no""" +614 74 optimizer """adam""" +614 74 training_loop """owa""" +614 74 negative_sampler """basic""" +614 74 evaluator """rankbased""" +614 75 dataset """wn18rr""" +614 75 model """rotate""" +614 75 loss """nssa""" +614 75 regularizer """no""" +614 75 optimizer """adam""" +614 75 training_loop """owa""" +614 75 negative_sampler """basic""" +614 75 evaluator """rankbased""" +614 76 dataset """wn18rr""" +614 76 model """rotate""" +614 76 loss """nssa""" +614 76 regularizer """no""" +614 76 optimizer """adam""" +614 76 training_loop """owa""" +614 76 negative_sampler """basic""" +614 76 evaluator """rankbased""" +614 77 dataset """wn18rr""" +614 77 model """rotate""" +614 77 loss """nssa""" +614 77 regularizer """no""" +614 77 optimizer """adam""" +614 77 training_loop """owa""" +614 77 negative_sampler """basic""" +614 77 evaluator """rankbased""" +614 78 dataset """wn18rr""" +614 78 model """rotate""" +614 78 loss """nssa""" +614 78 regularizer """no""" +614 78 optimizer """adam""" +614 78 training_loop """owa""" +614 78 negative_sampler """basic""" +614 78 evaluator """rankbased""" +615 1 model.embedding_dim 0.0 +615 1 optimizer.lr 0.08489212054560866 +615 1 negative_sampler.num_negs_per_pos 22.0 +615 1 training.batch_size 2.0 +615 2 model.embedding_dim 0.0 +615 2 optimizer.lr 0.011454404667632647 +615 2 negative_sampler.num_negs_per_pos 86.0 +615 2 training.batch_size 0.0 +615 3 model.embedding_dim 1.0 +615 3 optimizer.lr 0.016758216994535467 +615 3 negative_sampler.num_negs_per_pos 28.0 +615 3 training.batch_size 0.0 +615 4 model.embedding_dim 2.0 +615 4 optimizer.lr 0.032590814416254756 +615 4 negative_sampler.num_negs_per_pos 31.0 +615 4 training.batch_size 2.0 +615 5 model.embedding_dim 0.0 +615 5 optimizer.lr 0.010930153541062715 +615 5 negative_sampler.num_negs_per_pos 27.0 +615 5 training.batch_size 0.0 +615 6 model.embedding_dim 2.0 +615 6 optimizer.lr 0.01313297273715629 +615 6 negative_sampler.num_negs_per_pos 56.0 +615 6 training.batch_size 1.0 +615 7 model.embedding_dim 2.0 +615 7 optimizer.lr 0.003635058847342075 +615 7 negative_sampler.num_negs_per_pos 26.0 +615 7 training.batch_size 0.0 +615 8 model.embedding_dim 2.0 +615 8 optimizer.lr 0.05512615036969157 +615 8 negative_sampler.num_negs_per_pos 8.0 +615 8 training.batch_size 0.0 +615 9 model.embedding_dim 0.0 +615 9 optimizer.lr 0.0023251420512845305 +615 9 negative_sampler.num_negs_per_pos 4.0 +615 9 training.batch_size 1.0 +615 10 model.embedding_dim 2.0 +615 10 optimizer.lr 0.02872964161247291 +615 10 negative_sampler.num_negs_per_pos 56.0 +615 10 training.batch_size 0.0 +615 11 model.embedding_dim 1.0 +615 11 optimizer.lr 0.0010572660431173626 +615 11 negative_sampler.num_negs_per_pos 12.0 +615 11 training.batch_size 0.0 +615 12 model.embedding_dim 2.0 +615 12 optimizer.lr 0.0025262942502959785 +615 12 negative_sampler.num_negs_per_pos 62.0 +615 12 training.batch_size 1.0 +615 13 model.embedding_dim 2.0 +615 13 optimizer.lr 0.004592973383152046 +615 13 negative_sampler.num_negs_per_pos 63.0 +615 13 training.batch_size 0.0 +615 14 model.embedding_dim 0.0 +615 14 optimizer.lr 0.06586390332041234 +615 14 negative_sampler.num_negs_per_pos 65.0 +615 14 training.batch_size 2.0 +615 15 model.embedding_dim 0.0 +615 15 optimizer.lr 0.09810739502319307 +615 15 negative_sampler.num_negs_per_pos 35.0 +615 15 training.batch_size 0.0 +615 16 model.embedding_dim 1.0 +615 16 optimizer.lr 0.0020402252902932314 +615 16 negative_sampler.num_negs_per_pos 22.0 +615 16 training.batch_size 0.0 +615 17 model.embedding_dim 0.0 +615 17 optimizer.lr 0.013801331978991704 +615 17 negative_sampler.num_negs_per_pos 16.0 +615 17 training.batch_size 1.0 +615 18 model.embedding_dim 2.0 +615 18 optimizer.lr 0.015091403510405116 +615 18 negative_sampler.num_negs_per_pos 14.0 +615 18 training.batch_size 2.0 +615 19 model.embedding_dim 1.0 +615 19 optimizer.lr 0.02538298838694073 +615 19 negative_sampler.num_negs_per_pos 9.0 +615 19 training.batch_size 0.0 +615 20 model.embedding_dim 0.0 +615 20 optimizer.lr 0.05399854715829775 +615 20 negative_sampler.num_negs_per_pos 42.0 +615 20 training.batch_size 1.0 +615 21 model.embedding_dim 2.0 +615 21 optimizer.lr 0.01579690583271548 +615 21 negative_sampler.num_negs_per_pos 48.0 +615 21 training.batch_size 0.0 +615 22 model.embedding_dim 1.0 +615 22 optimizer.lr 0.0052515316968907025 +615 22 negative_sampler.num_negs_per_pos 77.0 +615 22 training.batch_size 1.0 +615 23 model.embedding_dim 2.0 +615 23 optimizer.lr 0.05204636512271123 +615 23 negative_sampler.num_negs_per_pos 60.0 +615 23 training.batch_size 1.0 +615 24 model.embedding_dim 0.0 +615 24 optimizer.lr 0.0022572829828062875 +615 24 negative_sampler.num_negs_per_pos 34.0 +615 24 training.batch_size 1.0 +615 25 model.embedding_dim 1.0 +615 25 optimizer.lr 0.015661898980929685 +615 25 negative_sampler.num_negs_per_pos 4.0 +615 25 training.batch_size 0.0 +615 26 model.embedding_dim 2.0 +615 26 optimizer.lr 0.01561889453517344 +615 26 negative_sampler.num_negs_per_pos 86.0 +615 26 training.batch_size 2.0 +615 27 model.embedding_dim 0.0 +615 27 optimizer.lr 0.0012740276092277588 +615 27 negative_sampler.num_negs_per_pos 49.0 +615 27 training.batch_size 1.0 +615 28 model.embedding_dim 2.0 +615 28 optimizer.lr 0.002259574771492718 +615 28 negative_sampler.num_negs_per_pos 53.0 +615 28 training.batch_size 0.0 +615 29 model.embedding_dim 0.0 +615 29 optimizer.lr 0.05053072726919983 +615 29 negative_sampler.num_negs_per_pos 11.0 +615 29 training.batch_size 2.0 +615 30 model.embedding_dim 0.0 +615 30 optimizer.lr 0.0159538448752641 +615 30 negative_sampler.num_negs_per_pos 55.0 +615 30 training.batch_size 2.0 +615 31 model.embedding_dim 0.0 +615 31 optimizer.lr 0.003480593104509174 +615 31 negative_sampler.num_negs_per_pos 86.0 +615 31 training.batch_size 0.0 +615 32 model.embedding_dim 0.0 +615 32 optimizer.lr 0.017123653996664955 +615 32 negative_sampler.num_negs_per_pos 58.0 +615 32 training.batch_size 0.0 +615 33 model.embedding_dim 0.0 +615 33 optimizer.lr 0.008672203364695505 +615 33 negative_sampler.num_negs_per_pos 73.0 +615 33 training.batch_size 1.0 +615 34 model.embedding_dim 1.0 +615 34 optimizer.lr 0.0026152152925762763 +615 34 negative_sampler.num_negs_per_pos 97.0 +615 34 training.batch_size 2.0 +615 35 model.embedding_dim 1.0 +615 35 optimizer.lr 0.002823308420458721 +615 35 negative_sampler.num_negs_per_pos 66.0 +615 35 training.batch_size 1.0 +615 36 model.embedding_dim 0.0 +615 36 optimizer.lr 0.0015030733296737394 +615 36 negative_sampler.num_negs_per_pos 93.0 +615 36 training.batch_size 0.0 +615 37 model.embedding_dim 0.0 +615 37 optimizer.lr 0.003581854955193491 +615 37 negative_sampler.num_negs_per_pos 67.0 +615 37 training.batch_size 0.0 +615 38 model.embedding_dim 0.0 +615 38 optimizer.lr 0.011655244837570617 +615 38 negative_sampler.num_negs_per_pos 15.0 +615 38 training.batch_size 0.0 +615 39 model.embedding_dim 0.0 +615 39 optimizer.lr 0.002074103844640662 +615 39 negative_sampler.num_negs_per_pos 88.0 +615 39 training.batch_size 2.0 +615 40 model.embedding_dim 2.0 +615 40 optimizer.lr 0.018987028846944934 +615 40 negative_sampler.num_negs_per_pos 26.0 +615 40 training.batch_size 0.0 +615 41 model.embedding_dim 0.0 +615 41 optimizer.lr 0.020216993945581314 +615 41 negative_sampler.num_negs_per_pos 61.0 +615 41 training.batch_size 0.0 +615 42 model.embedding_dim 1.0 +615 42 optimizer.lr 0.007306202805570267 +615 42 negative_sampler.num_negs_per_pos 62.0 +615 42 training.batch_size 0.0 +615 43 model.embedding_dim 2.0 +615 43 optimizer.lr 0.017010741739744845 +615 43 negative_sampler.num_negs_per_pos 87.0 +615 43 training.batch_size 0.0 +615 44 model.embedding_dim 0.0 +615 44 optimizer.lr 0.002665259448100372 +615 44 negative_sampler.num_negs_per_pos 69.0 +615 44 training.batch_size 2.0 +615 45 model.embedding_dim 0.0 +615 45 optimizer.lr 0.050857445269592384 +615 45 negative_sampler.num_negs_per_pos 70.0 +615 45 training.batch_size 1.0 +615 46 model.embedding_dim 1.0 +615 46 optimizer.lr 0.003327986212725925 +615 46 negative_sampler.num_negs_per_pos 97.0 +615 46 training.batch_size 1.0 +615 47 model.embedding_dim 2.0 +615 47 optimizer.lr 0.0019722421364498648 +615 47 negative_sampler.num_negs_per_pos 4.0 +615 47 training.batch_size 2.0 +615 48 model.embedding_dim 0.0 +615 48 optimizer.lr 0.0163959148674985 +615 48 negative_sampler.num_negs_per_pos 90.0 +615 48 training.batch_size 2.0 +615 49 model.embedding_dim 0.0 +615 49 optimizer.lr 0.017299477259557123 +615 49 negative_sampler.num_negs_per_pos 96.0 +615 49 training.batch_size 1.0 +615 50 model.embedding_dim 2.0 +615 50 optimizer.lr 0.0014311308409715418 +615 50 negative_sampler.num_negs_per_pos 72.0 +615 50 training.batch_size 1.0 +615 51 model.embedding_dim 1.0 +615 51 optimizer.lr 0.0017508117834560575 +615 51 negative_sampler.num_negs_per_pos 11.0 +615 51 training.batch_size 2.0 +615 52 model.embedding_dim 2.0 +615 52 optimizer.lr 0.010850427093899231 +615 52 negative_sampler.num_negs_per_pos 70.0 +615 52 training.batch_size 1.0 +615 53 model.embedding_dim 2.0 +615 53 optimizer.lr 0.018480760739904428 +615 53 negative_sampler.num_negs_per_pos 64.0 +615 53 training.batch_size 0.0 +615 54 model.embedding_dim 1.0 +615 54 optimizer.lr 0.018637432003865743 +615 54 negative_sampler.num_negs_per_pos 92.0 +615 54 training.batch_size 0.0 +615 55 model.embedding_dim 0.0 +615 55 optimizer.lr 0.0325091360779679 +615 55 negative_sampler.num_negs_per_pos 83.0 +615 55 training.batch_size 0.0 +615 56 model.embedding_dim 0.0 +615 56 optimizer.lr 0.04096679515695989 +615 56 negative_sampler.num_negs_per_pos 67.0 +615 56 training.batch_size 0.0 +615 57 model.embedding_dim 0.0 +615 57 optimizer.lr 0.023771668205232726 +615 57 negative_sampler.num_negs_per_pos 70.0 +615 57 training.batch_size 2.0 +615 58 model.embedding_dim 1.0 +615 58 optimizer.lr 0.031192103186858267 +615 58 negative_sampler.num_negs_per_pos 90.0 +615 58 training.batch_size 1.0 +615 59 model.embedding_dim 0.0 +615 59 optimizer.lr 0.005750053842989873 +615 59 negative_sampler.num_negs_per_pos 24.0 +615 59 training.batch_size 0.0 +615 60 model.embedding_dim 2.0 +615 60 optimizer.lr 0.0024135552009247035 +615 60 negative_sampler.num_negs_per_pos 87.0 +615 60 training.batch_size 0.0 +615 61 model.embedding_dim 0.0 +615 61 optimizer.lr 0.0011450194545483882 +615 61 negative_sampler.num_negs_per_pos 3.0 +615 61 training.batch_size 0.0 +615 1 dataset """wn18rr""" +615 1 model """rotate""" +615 1 loss """bceaftersigmoid""" +615 1 regularizer """no""" +615 1 optimizer """adam""" +615 1 training_loop """owa""" +615 1 negative_sampler """basic""" +615 1 evaluator """rankbased""" +615 2 dataset """wn18rr""" +615 2 model """rotate""" +615 2 loss """bceaftersigmoid""" +615 2 regularizer """no""" +615 2 optimizer """adam""" +615 2 training_loop """owa""" +615 2 negative_sampler """basic""" +615 2 evaluator """rankbased""" +615 3 dataset """wn18rr""" +615 3 model """rotate""" +615 3 loss """bceaftersigmoid""" +615 3 regularizer """no""" +615 3 optimizer """adam""" +615 3 training_loop """owa""" +615 3 negative_sampler """basic""" +615 3 evaluator """rankbased""" +615 4 dataset """wn18rr""" +615 4 model """rotate""" +615 4 loss """bceaftersigmoid""" +615 4 regularizer """no""" +615 4 optimizer """adam""" +615 4 training_loop """owa""" +615 4 negative_sampler """basic""" +615 4 evaluator """rankbased""" +615 5 dataset """wn18rr""" +615 5 model """rotate""" +615 5 loss """bceaftersigmoid""" +615 5 regularizer """no""" +615 5 optimizer """adam""" +615 5 training_loop """owa""" +615 5 negative_sampler """basic""" +615 5 evaluator """rankbased""" +615 6 dataset """wn18rr""" +615 6 model """rotate""" +615 6 loss """bceaftersigmoid""" +615 6 regularizer """no""" +615 6 optimizer """adam""" +615 6 training_loop """owa""" +615 6 negative_sampler """basic""" +615 6 evaluator """rankbased""" +615 7 dataset """wn18rr""" +615 7 model """rotate""" +615 7 loss """bceaftersigmoid""" +615 7 regularizer """no""" +615 7 optimizer """adam""" +615 7 training_loop """owa""" +615 7 negative_sampler """basic""" +615 7 evaluator """rankbased""" +615 8 dataset """wn18rr""" +615 8 model """rotate""" +615 8 loss """bceaftersigmoid""" +615 8 regularizer """no""" +615 8 optimizer """adam""" +615 8 training_loop """owa""" +615 8 negative_sampler """basic""" +615 8 evaluator """rankbased""" +615 9 dataset """wn18rr""" +615 9 model """rotate""" +615 9 loss """bceaftersigmoid""" +615 9 regularizer """no""" +615 9 optimizer """adam""" +615 9 training_loop """owa""" +615 9 negative_sampler """basic""" +615 9 evaluator """rankbased""" +615 10 dataset """wn18rr""" +615 10 model """rotate""" +615 10 loss """bceaftersigmoid""" +615 10 regularizer """no""" +615 10 optimizer """adam""" +615 10 training_loop """owa""" +615 10 negative_sampler """basic""" +615 10 evaluator """rankbased""" +615 11 dataset """wn18rr""" +615 11 model """rotate""" +615 11 loss """bceaftersigmoid""" +615 11 regularizer """no""" +615 11 optimizer """adam""" +615 11 training_loop """owa""" +615 11 negative_sampler """basic""" +615 11 evaluator """rankbased""" +615 12 dataset """wn18rr""" +615 12 model """rotate""" +615 12 loss """bceaftersigmoid""" +615 12 regularizer """no""" +615 12 optimizer """adam""" +615 12 training_loop """owa""" +615 12 negative_sampler """basic""" +615 12 evaluator """rankbased""" +615 13 dataset """wn18rr""" +615 13 model """rotate""" +615 13 loss """bceaftersigmoid""" +615 13 regularizer """no""" +615 13 optimizer """adam""" +615 13 training_loop """owa""" +615 13 negative_sampler """basic""" +615 13 evaluator """rankbased""" +615 14 dataset """wn18rr""" +615 14 model """rotate""" +615 14 loss """bceaftersigmoid""" +615 14 regularizer """no""" +615 14 optimizer """adam""" +615 14 training_loop """owa""" +615 14 negative_sampler """basic""" +615 14 evaluator """rankbased""" +615 15 dataset """wn18rr""" +615 15 model """rotate""" +615 15 loss """bceaftersigmoid""" +615 15 regularizer """no""" +615 15 optimizer """adam""" +615 15 training_loop """owa""" +615 15 negative_sampler """basic""" +615 15 evaluator """rankbased""" +615 16 dataset """wn18rr""" +615 16 model """rotate""" +615 16 loss """bceaftersigmoid""" +615 16 regularizer """no""" +615 16 optimizer """adam""" +615 16 training_loop """owa""" +615 16 negative_sampler """basic""" +615 16 evaluator """rankbased""" +615 17 dataset """wn18rr""" +615 17 model """rotate""" +615 17 loss """bceaftersigmoid""" +615 17 regularizer """no""" +615 17 optimizer """adam""" +615 17 training_loop """owa""" +615 17 negative_sampler """basic""" +615 17 evaluator """rankbased""" +615 18 dataset """wn18rr""" +615 18 model """rotate""" +615 18 loss """bceaftersigmoid""" +615 18 regularizer """no""" +615 18 optimizer """adam""" +615 18 training_loop """owa""" +615 18 negative_sampler """basic""" +615 18 evaluator """rankbased""" +615 19 dataset """wn18rr""" +615 19 model """rotate""" +615 19 loss """bceaftersigmoid""" +615 19 regularizer """no""" +615 19 optimizer """adam""" +615 19 training_loop """owa""" +615 19 negative_sampler """basic""" +615 19 evaluator """rankbased""" +615 20 dataset """wn18rr""" +615 20 model """rotate""" +615 20 loss """bceaftersigmoid""" +615 20 regularizer """no""" +615 20 optimizer """adam""" +615 20 training_loop """owa""" +615 20 negative_sampler """basic""" +615 20 evaluator """rankbased""" +615 21 dataset """wn18rr""" +615 21 model """rotate""" +615 21 loss """bceaftersigmoid""" +615 21 regularizer """no""" +615 21 optimizer """adam""" +615 21 training_loop """owa""" +615 21 negative_sampler """basic""" +615 21 evaluator """rankbased""" +615 22 dataset """wn18rr""" +615 22 model """rotate""" +615 22 loss """bceaftersigmoid""" +615 22 regularizer """no""" +615 22 optimizer """adam""" +615 22 training_loop """owa""" +615 22 negative_sampler """basic""" +615 22 evaluator """rankbased""" +615 23 dataset """wn18rr""" +615 23 model """rotate""" +615 23 loss """bceaftersigmoid""" +615 23 regularizer """no""" +615 23 optimizer """adam""" +615 23 training_loop """owa""" +615 23 negative_sampler """basic""" +615 23 evaluator """rankbased""" +615 24 dataset """wn18rr""" +615 24 model """rotate""" +615 24 loss """bceaftersigmoid""" +615 24 regularizer """no""" +615 24 optimizer """adam""" +615 24 training_loop """owa""" +615 24 negative_sampler """basic""" +615 24 evaluator """rankbased""" +615 25 dataset """wn18rr""" +615 25 model """rotate""" +615 25 loss """bceaftersigmoid""" +615 25 regularizer """no""" +615 25 optimizer """adam""" +615 25 training_loop """owa""" +615 25 negative_sampler """basic""" +615 25 evaluator """rankbased""" +615 26 dataset """wn18rr""" +615 26 model """rotate""" +615 26 loss """bceaftersigmoid""" +615 26 regularizer """no""" +615 26 optimizer """adam""" +615 26 training_loop """owa""" +615 26 negative_sampler """basic""" +615 26 evaluator """rankbased""" +615 27 dataset """wn18rr""" +615 27 model """rotate""" +615 27 loss """bceaftersigmoid""" +615 27 regularizer """no""" +615 27 optimizer """adam""" +615 27 training_loop """owa""" +615 27 negative_sampler """basic""" +615 27 evaluator """rankbased""" +615 28 dataset """wn18rr""" +615 28 model """rotate""" +615 28 loss """bceaftersigmoid""" +615 28 regularizer """no""" +615 28 optimizer """adam""" +615 28 training_loop """owa""" +615 28 negative_sampler """basic""" +615 28 evaluator """rankbased""" +615 29 dataset """wn18rr""" +615 29 model """rotate""" +615 29 loss """bceaftersigmoid""" +615 29 regularizer """no""" +615 29 optimizer """adam""" +615 29 training_loop """owa""" +615 29 negative_sampler """basic""" +615 29 evaluator """rankbased""" +615 30 dataset """wn18rr""" +615 30 model """rotate""" +615 30 loss """bceaftersigmoid""" +615 30 regularizer """no""" +615 30 optimizer """adam""" +615 30 training_loop """owa""" +615 30 negative_sampler """basic""" +615 30 evaluator """rankbased""" +615 31 dataset """wn18rr""" +615 31 model """rotate""" +615 31 loss """bceaftersigmoid""" +615 31 regularizer """no""" +615 31 optimizer """adam""" +615 31 training_loop """owa""" +615 31 negative_sampler """basic""" +615 31 evaluator """rankbased""" +615 32 dataset """wn18rr""" +615 32 model """rotate""" +615 32 loss """bceaftersigmoid""" +615 32 regularizer """no""" +615 32 optimizer """adam""" +615 32 training_loop """owa""" +615 32 negative_sampler """basic""" +615 32 evaluator """rankbased""" +615 33 dataset """wn18rr""" +615 33 model """rotate""" +615 33 loss """bceaftersigmoid""" +615 33 regularizer """no""" +615 33 optimizer """adam""" +615 33 training_loop """owa""" +615 33 negative_sampler """basic""" +615 33 evaluator """rankbased""" +615 34 dataset """wn18rr""" +615 34 model """rotate""" +615 34 loss """bceaftersigmoid""" +615 34 regularizer """no""" +615 34 optimizer """adam""" +615 34 training_loop """owa""" +615 34 negative_sampler """basic""" +615 34 evaluator """rankbased""" +615 35 dataset """wn18rr""" +615 35 model """rotate""" +615 35 loss """bceaftersigmoid""" +615 35 regularizer """no""" +615 35 optimizer """adam""" +615 35 training_loop """owa""" +615 35 negative_sampler """basic""" +615 35 evaluator """rankbased""" +615 36 dataset """wn18rr""" +615 36 model """rotate""" +615 36 loss """bceaftersigmoid""" +615 36 regularizer """no""" +615 36 optimizer """adam""" +615 36 training_loop """owa""" +615 36 negative_sampler """basic""" +615 36 evaluator """rankbased""" +615 37 dataset """wn18rr""" +615 37 model """rotate""" +615 37 loss """bceaftersigmoid""" +615 37 regularizer """no""" +615 37 optimizer """adam""" +615 37 training_loop """owa""" +615 37 negative_sampler """basic""" +615 37 evaluator """rankbased""" +615 38 dataset """wn18rr""" +615 38 model """rotate""" +615 38 loss """bceaftersigmoid""" +615 38 regularizer """no""" +615 38 optimizer """adam""" +615 38 training_loop """owa""" +615 38 negative_sampler """basic""" +615 38 evaluator """rankbased""" +615 39 dataset """wn18rr""" +615 39 model """rotate""" +615 39 loss """bceaftersigmoid""" +615 39 regularizer """no""" +615 39 optimizer """adam""" +615 39 training_loop """owa""" +615 39 negative_sampler """basic""" +615 39 evaluator """rankbased""" +615 40 dataset """wn18rr""" +615 40 model """rotate""" +615 40 loss """bceaftersigmoid""" +615 40 regularizer """no""" +615 40 optimizer """adam""" +615 40 training_loop """owa""" +615 40 negative_sampler """basic""" +615 40 evaluator """rankbased""" +615 41 dataset """wn18rr""" +615 41 model """rotate""" +615 41 loss """bceaftersigmoid""" +615 41 regularizer """no""" +615 41 optimizer """adam""" +615 41 training_loop """owa""" +615 41 negative_sampler """basic""" +615 41 evaluator """rankbased""" +615 42 dataset """wn18rr""" +615 42 model """rotate""" +615 42 loss """bceaftersigmoid""" +615 42 regularizer """no""" +615 42 optimizer """adam""" +615 42 training_loop """owa""" +615 42 negative_sampler """basic""" +615 42 evaluator """rankbased""" +615 43 dataset """wn18rr""" +615 43 model """rotate""" +615 43 loss """bceaftersigmoid""" +615 43 regularizer """no""" +615 43 optimizer """adam""" +615 43 training_loop """owa""" +615 43 negative_sampler """basic""" +615 43 evaluator """rankbased""" +615 44 dataset """wn18rr""" +615 44 model """rotate""" +615 44 loss """bceaftersigmoid""" +615 44 regularizer """no""" +615 44 optimizer """adam""" +615 44 training_loop """owa""" +615 44 negative_sampler """basic""" +615 44 evaluator """rankbased""" +615 45 dataset """wn18rr""" +615 45 model """rotate""" +615 45 loss """bceaftersigmoid""" +615 45 regularizer """no""" +615 45 optimizer """adam""" +615 45 training_loop """owa""" +615 45 negative_sampler """basic""" +615 45 evaluator """rankbased""" +615 46 dataset """wn18rr""" +615 46 model """rotate""" +615 46 loss """bceaftersigmoid""" +615 46 regularizer """no""" +615 46 optimizer """adam""" +615 46 training_loop """owa""" +615 46 negative_sampler """basic""" +615 46 evaluator """rankbased""" +615 47 dataset """wn18rr""" +615 47 model """rotate""" +615 47 loss """bceaftersigmoid""" +615 47 regularizer """no""" +615 47 optimizer """adam""" +615 47 training_loop """owa""" +615 47 negative_sampler """basic""" +615 47 evaluator """rankbased""" +615 48 dataset """wn18rr""" +615 48 model """rotate""" +615 48 loss """bceaftersigmoid""" +615 48 regularizer """no""" +615 48 optimizer """adam""" +615 48 training_loop """owa""" +615 48 negative_sampler """basic""" +615 48 evaluator """rankbased""" +615 49 dataset """wn18rr""" +615 49 model """rotate""" +615 49 loss """bceaftersigmoid""" +615 49 regularizer """no""" +615 49 optimizer """adam""" +615 49 training_loop """owa""" +615 49 negative_sampler """basic""" +615 49 evaluator """rankbased""" +615 50 dataset """wn18rr""" +615 50 model """rotate""" +615 50 loss """bceaftersigmoid""" +615 50 regularizer """no""" +615 50 optimizer """adam""" +615 50 training_loop """owa""" +615 50 negative_sampler """basic""" +615 50 evaluator """rankbased""" +615 51 dataset """wn18rr""" +615 51 model """rotate""" +615 51 loss """bceaftersigmoid""" +615 51 regularizer """no""" +615 51 optimizer """adam""" +615 51 training_loop """owa""" +615 51 negative_sampler """basic""" +615 51 evaluator """rankbased""" +615 52 dataset """wn18rr""" +615 52 model """rotate""" +615 52 loss """bceaftersigmoid""" +615 52 regularizer """no""" +615 52 optimizer """adam""" +615 52 training_loop """owa""" +615 52 negative_sampler """basic""" +615 52 evaluator """rankbased""" +615 53 dataset """wn18rr""" +615 53 model """rotate""" +615 53 loss """bceaftersigmoid""" +615 53 regularizer """no""" +615 53 optimizer """adam""" +615 53 training_loop """owa""" +615 53 negative_sampler """basic""" +615 53 evaluator """rankbased""" +615 54 dataset """wn18rr""" +615 54 model """rotate""" +615 54 loss """bceaftersigmoid""" +615 54 regularizer """no""" +615 54 optimizer """adam""" +615 54 training_loop """owa""" +615 54 negative_sampler """basic""" +615 54 evaluator """rankbased""" +615 55 dataset """wn18rr""" +615 55 model """rotate""" +615 55 loss """bceaftersigmoid""" +615 55 regularizer """no""" +615 55 optimizer """adam""" +615 55 training_loop """owa""" +615 55 negative_sampler """basic""" +615 55 evaluator """rankbased""" +615 56 dataset """wn18rr""" +615 56 model """rotate""" +615 56 loss """bceaftersigmoid""" +615 56 regularizer """no""" +615 56 optimizer """adam""" +615 56 training_loop """owa""" +615 56 negative_sampler """basic""" +615 56 evaluator """rankbased""" +615 57 dataset """wn18rr""" +615 57 model """rotate""" +615 57 loss """bceaftersigmoid""" +615 57 regularizer """no""" +615 57 optimizer """adam""" +615 57 training_loop """owa""" +615 57 negative_sampler """basic""" +615 57 evaluator """rankbased""" +615 58 dataset """wn18rr""" +615 58 model """rotate""" +615 58 loss """bceaftersigmoid""" +615 58 regularizer """no""" +615 58 optimizer """adam""" +615 58 training_loop """owa""" +615 58 negative_sampler """basic""" +615 58 evaluator """rankbased""" +615 59 dataset """wn18rr""" +615 59 model """rotate""" +615 59 loss """bceaftersigmoid""" +615 59 regularizer """no""" +615 59 optimizer """adam""" +615 59 training_loop """owa""" +615 59 negative_sampler """basic""" +615 59 evaluator """rankbased""" +615 60 dataset """wn18rr""" +615 60 model """rotate""" +615 60 loss """bceaftersigmoid""" +615 60 regularizer """no""" +615 60 optimizer """adam""" +615 60 training_loop """owa""" +615 60 negative_sampler """basic""" +615 60 evaluator """rankbased""" +615 61 dataset """wn18rr""" +615 61 model """rotate""" +615 61 loss """bceaftersigmoid""" +615 61 regularizer """no""" +615 61 optimizer """adam""" +615 61 training_loop """owa""" +615 61 negative_sampler """basic""" +615 61 evaluator """rankbased""" +616 1 model.embedding_dim 1.0 +616 1 optimizer.lr 0.007325797475713789 +616 1 negative_sampler.num_negs_per_pos 36.0 +616 1 training.batch_size 0.0 +616 2 model.embedding_dim 2.0 +616 2 optimizer.lr 0.007181244871701284 +616 2 negative_sampler.num_negs_per_pos 98.0 +616 2 training.batch_size 0.0 +616 3 model.embedding_dim 1.0 +616 3 optimizer.lr 0.005944788886775904 +616 3 negative_sampler.num_negs_per_pos 15.0 +616 3 training.batch_size 2.0 +616 4 model.embedding_dim 2.0 +616 4 optimizer.lr 0.03251352854615801 +616 4 negative_sampler.num_negs_per_pos 43.0 +616 4 training.batch_size 1.0 +616 5 model.embedding_dim 2.0 +616 5 optimizer.lr 0.08082186804662608 +616 5 negative_sampler.num_negs_per_pos 19.0 +616 5 training.batch_size 0.0 +616 6 model.embedding_dim 0.0 +616 6 optimizer.lr 0.09084114811515641 +616 6 negative_sampler.num_negs_per_pos 45.0 +616 6 training.batch_size 2.0 +616 7 model.embedding_dim 1.0 +616 7 optimizer.lr 0.02413662480369036 +616 7 negative_sampler.num_negs_per_pos 97.0 +616 7 training.batch_size 0.0 +616 8 model.embedding_dim 0.0 +616 8 optimizer.lr 0.0076906920374711875 +616 8 negative_sampler.num_negs_per_pos 87.0 +616 8 training.batch_size 2.0 +616 9 model.embedding_dim 0.0 +616 9 optimizer.lr 0.09402671710505353 +616 9 negative_sampler.num_negs_per_pos 96.0 +616 9 training.batch_size 1.0 +616 10 model.embedding_dim 0.0 +616 10 optimizer.lr 0.0018608143606342856 +616 10 negative_sampler.num_negs_per_pos 84.0 +616 10 training.batch_size 2.0 +616 11 model.embedding_dim 0.0 +616 11 optimizer.lr 0.029313247100450097 +616 11 negative_sampler.num_negs_per_pos 48.0 +616 11 training.batch_size 0.0 +616 12 model.embedding_dim 1.0 +616 12 optimizer.lr 0.0015942384229306696 +616 12 negative_sampler.num_negs_per_pos 15.0 +616 12 training.batch_size 0.0 +616 13 model.embedding_dim 1.0 +616 13 optimizer.lr 0.011223085887868463 +616 13 negative_sampler.num_negs_per_pos 21.0 +616 13 training.batch_size 2.0 +616 14 model.embedding_dim 2.0 +616 14 optimizer.lr 0.04134712316249451 +616 14 negative_sampler.num_negs_per_pos 12.0 +616 14 training.batch_size 2.0 +616 15 model.embedding_dim 1.0 +616 15 optimizer.lr 0.015115786874383549 +616 15 negative_sampler.num_negs_per_pos 78.0 +616 15 training.batch_size 2.0 +616 16 model.embedding_dim 1.0 +616 16 optimizer.lr 0.007162881199475093 +616 16 negative_sampler.num_negs_per_pos 80.0 +616 16 training.batch_size 1.0 +616 17 model.embedding_dim 0.0 +616 17 optimizer.lr 0.061076316535023906 +616 17 negative_sampler.num_negs_per_pos 50.0 +616 17 training.batch_size 0.0 +616 18 model.embedding_dim 0.0 +616 18 optimizer.lr 0.02846783742341164 +616 18 negative_sampler.num_negs_per_pos 32.0 +616 18 training.batch_size 2.0 +616 19 model.embedding_dim 1.0 +616 19 optimizer.lr 0.013114395202498448 +616 19 negative_sampler.num_negs_per_pos 49.0 +616 19 training.batch_size 0.0 +616 20 model.embedding_dim 1.0 +616 20 optimizer.lr 0.007976757602830653 +616 20 negative_sampler.num_negs_per_pos 37.0 +616 20 training.batch_size 1.0 +616 21 model.embedding_dim 0.0 +616 21 optimizer.lr 0.0061737180917552596 +616 21 negative_sampler.num_negs_per_pos 56.0 +616 21 training.batch_size 2.0 +616 22 model.embedding_dim 1.0 +616 22 optimizer.lr 0.008045140348137315 +616 22 negative_sampler.num_negs_per_pos 72.0 +616 22 training.batch_size 1.0 +616 23 model.embedding_dim 1.0 +616 23 optimizer.lr 0.003617103446150666 +616 23 negative_sampler.num_negs_per_pos 72.0 +616 23 training.batch_size 1.0 +616 24 model.embedding_dim 2.0 +616 24 optimizer.lr 0.007960776319872279 +616 24 negative_sampler.num_negs_per_pos 92.0 +616 24 training.batch_size 2.0 +616 25 model.embedding_dim 0.0 +616 25 optimizer.lr 0.005536252145037296 +616 25 negative_sampler.num_negs_per_pos 66.0 +616 25 training.batch_size 2.0 +616 26 model.embedding_dim 0.0 +616 26 optimizer.lr 0.009884311675199419 +616 26 negative_sampler.num_negs_per_pos 63.0 +616 26 training.batch_size 1.0 +616 27 model.embedding_dim 0.0 +616 27 optimizer.lr 0.07391488040882017 +616 27 negative_sampler.num_negs_per_pos 64.0 +616 27 training.batch_size 1.0 +616 28 model.embedding_dim 0.0 +616 28 optimizer.lr 0.007266358616496347 +616 28 negative_sampler.num_negs_per_pos 84.0 +616 28 training.batch_size 2.0 +616 29 model.embedding_dim 0.0 +616 29 optimizer.lr 0.0014565040906609613 +616 29 negative_sampler.num_negs_per_pos 32.0 +616 29 training.batch_size 1.0 +616 30 model.embedding_dim 0.0 +616 30 optimizer.lr 0.015567441451714441 +616 30 negative_sampler.num_negs_per_pos 13.0 +616 30 training.batch_size 2.0 +616 31 model.embedding_dim 2.0 +616 31 optimizer.lr 0.0018099576691029144 +616 31 negative_sampler.num_negs_per_pos 6.0 +616 31 training.batch_size 0.0 +616 32 model.embedding_dim 2.0 +616 32 optimizer.lr 0.003918863926323236 +616 32 negative_sampler.num_negs_per_pos 30.0 +616 32 training.batch_size 0.0 +616 33 model.embedding_dim 2.0 +616 33 optimizer.lr 0.004884708405311352 +616 33 negative_sampler.num_negs_per_pos 60.0 +616 33 training.batch_size 1.0 +616 34 model.embedding_dim 2.0 +616 34 optimizer.lr 0.0028152066941138556 +616 34 negative_sampler.num_negs_per_pos 22.0 +616 34 training.batch_size 2.0 +616 35 model.embedding_dim 1.0 +616 35 optimizer.lr 0.0011401238074767515 +616 35 negative_sampler.num_negs_per_pos 37.0 +616 35 training.batch_size 0.0 +616 36 model.embedding_dim 1.0 +616 36 optimizer.lr 0.009274605293446987 +616 36 negative_sampler.num_negs_per_pos 56.0 +616 36 training.batch_size 2.0 +616 37 model.embedding_dim 2.0 +616 37 optimizer.lr 0.0025645803519246586 +616 37 negative_sampler.num_negs_per_pos 2.0 +616 37 training.batch_size 2.0 +616 38 model.embedding_dim 2.0 +616 38 optimizer.lr 0.0014747872828605735 +616 38 negative_sampler.num_negs_per_pos 99.0 +616 38 training.batch_size 0.0 +616 39 model.embedding_dim 0.0 +616 39 optimizer.lr 0.039995396063693185 +616 39 negative_sampler.num_negs_per_pos 78.0 +616 39 training.batch_size 0.0 +616 40 model.embedding_dim 2.0 +616 40 optimizer.lr 0.00672380886688764 +616 40 negative_sampler.num_negs_per_pos 26.0 +616 40 training.batch_size 0.0 +616 41 model.embedding_dim 0.0 +616 41 optimizer.lr 0.008197801068147662 +616 41 negative_sampler.num_negs_per_pos 73.0 +616 41 training.batch_size 0.0 +616 42 model.embedding_dim 0.0 +616 42 optimizer.lr 0.005670348090843848 +616 42 negative_sampler.num_negs_per_pos 31.0 +616 42 training.batch_size 2.0 +616 43 model.embedding_dim 2.0 +616 43 optimizer.lr 0.002706514941410533 +616 43 negative_sampler.num_negs_per_pos 40.0 +616 43 training.batch_size 0.0 +616 44 model.embedding_dim 1.0 +616 44 optimizer.lr 0.001365648243078608 +616 44 negative_sampler.num_negs_per_pos 25.0 +616 44 training.batch_size 1.0 +616 45 model.embedding_dim 2.0 +616 45 optimizer.lr 0.00101803212549971 +616 45 negative_sampler.num_negs_per_pos 71.0 +616 45 training.batch_size 2.0 +616 46 model.embedding_dim 2.0 +616 46 optimizer.lr 0.08507341795614191 +616 46 negative_sampler.num_negs_per_pos 66.0 +616 46 training.batch_size 0.0 +616 47 model.embedding_dim 2.0 +616 47 optimizer.lr 0.020791993107147 +616 47 negative_sampler.num_negs_per_pos 94.0 +616 47 training.batch_size 1.0 +616 48 model.embedding_dim 0.0 +616 48 optimizer.lr 0.04584522933546831 +616 48 negative_sampler.num_negs_per_pos 34.0 +616 48 training.batch_size 1.0 +616 49 model.embedding_dim 0.0 +616 49 optimizer.lr 0.02733838088419565 +616 49 negative_sampler.num_negs_per_pos 16.0 +616 49 training.batch_size 2.0 +616 50 model.embedding_dim 2.0 +616 50 optimizer.lr 0.04835641066184299 +616 50 negative_sampler.num_negs_per_pos 23.0 +616 50 training.batch_size 0.0 +616 51 model.embedding_dim 2.0 +616 51 optimizer.lr 0.0022505499054223887 +616 51 negative_sampler.num_negs_per_pos 44.0 +616 51 training.batch_size 0.0 +616 52 model.embedding_dim 0.0 +616 52 optimizer.lr 0.0016557392047838104 +616 52 negative_sampler.num_negs_per_pos 75.0 +616 52 training.batch_size 0.0 +616 53 model.embedding_dim 0.0 +616 53 optimizer.lr 0.007644266183481946 +616 53 negative_sampler.num_negs_per_pos 75.0 +616 53 training.batch_size 0.0 +616 54 model.embedding_dim 1.0 +616 54 optimizer.lr 0.0010633428768765235 +616 54 negative_sampler.num_negs_per_pos 40.0 +616 54 training.batch_size 1.0 +616 55 model.embedding_dim 2.0 +616 55 optimizer.lr 0.0070772247044531235 +616 55 negative_sampler.num_negs_per_pos 90.0 +616 55 training.batch_size 1.0 +616 56 model.embedding_dim 2.0 +616 56 optimizer.lr 0.0032578618878076927 +616 56 negative_sampler.num_negs_per_pos 73.0 +616 56 training.batch_size 1.0 +616 57 model.embedding_dim 0.0 +616 57 optimizer.lr 0.07211072902034336 +616 57 negative_sampler.num_negs_per_pos 33.0 +616 57 training.batch_size 1.0 +616 58 model.embedding_dim 1.0 +616 58 optimizer.lr 0.0034559747399034615 +616 58 negative_sampler.num_negs_per_pos 48.0 +616 58 training.batch_size 2.0 +616 59 model.embedding_dim 1.0 +616 59 optimizer.lr 0.014114234225431366 +616 59 negative_sampler.num_negs_per_pos 12.0 +616 59 training.batch_size 0.0 +616 60 model.embedding_dim 1.0 +616 60 optimizer.lr 0.011969179673499383 +616 60 negative_sampler.num_negs_per_pos 27.0 +616 60 training.batch_size 0.0 +616 61 model.embedding_dim 1.0 +616 61 optimizer.lr 0.0017985053835978047 +616 61 negative_sampler.num_negs_per_pos 84.0 +616 61 training.batch_size 0.0 +616 62 model.embedding_dim 0.0 +616 62 optimizer.lr 0.09602252879601562 +616 62 negative_sampler.num_negs_per_pos 5.0 +616 62 training.batch_size 0.0 +616 63 model.embedding_dim 2.0 +616 63 optimizer.lr 0.013064318674640682 +616 63 negative_sampler.num_negs_per_pos 39.0 +616 63 training.batch_size 2.0 +616 64 model.embedding_dim 1.0 +616 64 optimizer.lr 0.0682785734965443 +616 64 negative_sampler.num_negs_per_pos 86.0 +616 64 training.batch_size 0.0 +616 65 model.embedding_dim 2.0 +616 65 optimizer.lr 0.0018244980091769547 +616 65 negative_sampler.num_negs_per_pos 79.0 +616 65 training.batch_size 1.0 +616 66 model.embedding_dim 1.0 +616 66 optimizer.lr 0.007403640987663081 +616 66 negative_sampler.num_negs_per_pos 93.0 +616 66 training.batch_size 0.0 +616 67 model.embedding_dim 0.0 +616 67 optimizer.lr 0.042878777461801576 +616 67 negative_sampler.num_negs_per_pos 32.0 +616 67 training.batch_size 1.0 +616 1 dataset """wn18rr""" +616 1 model """rotate""" +616 1 loss """softplus""" +616 1 regularizer """no""" +616 1 optimizer """adam""" +616 1 training_loop """owa""" +616 1 negative_sampler """basic""" +616 1 evaluator """rankbased""" +616 2 dataset """wn18rr""" +616 2 model """rotate""" +616 2 loss """softplus""" +616 2 regularizer """no""" +616 2 optimizer """adam""" +616 2 training_loop """owa""" +616 2 negative_sampler """basic""" +616 2 evaluator """rankbased""" +616 3 dataset """wn18rr""" +616 3 model """rotate""" +616 3 loss """softplus""" +616 3 regularizer """no""" +616 3 optimizer """adam""" +616 3 training_loop """owa""" +616 3 negative_sampler """basic""" +616 3 evaluator """rankbased""" +616 4 dataset """wn18rr""" +616 4 model """rotate""" +616 4 loss """softplus""" +616 4 regularizer """no""" +616 4 optimizer """adam""" +616 4 training_loop """owa""" +616 4 negative_sampler """basic""" +616 4 evaluator """rankbased""" +616 5 dataset """wn18rr""" +616 5 model """rotate""" +616 5 loss """softplus""" +616 5 regularizer """no""" +616 5 optimizer """adam""" +616 5 training_loop """owa""" +616 5 negative_sampler """basic""" +616 5 evaluator """rankbased""" +616 6 dataset """wn18rr""" +616 6 model """rotate""" +616 6 loss """softplus""" +616 6 regularizer """no""" +616 6 optimizer """adam""" +616 6 training_loop """owa""" +616 6 negative_sampler """basic""" +616 6 evaluator """rankbased""" +616 7 dataset """wn18rr""" +616 7 model """rotate""" +616 7 loss """softplus""" +616 7 regularizer """no""" +616 7 optimizer """adam""" +616 7 training_loop """owa""" +616 7 negative_sampler """basic""" +616 7 evaluator """rankbased""" +616 8 dataset """wn18rr""" +616 8 model """rotate""" +616 8 loss """softplus""" +616 8 regularizer """no""" +616 8 optimizer """adam""" +616 8 training_loop """owa""" +616 8 negative_sampler """basic""" +616 8 evaluator """rankbased""" +616 9 dataset """wn18rr""" +616 9 model """rotate""" +616 9 loss """softplus""" +616 9 regularizer """no""" +616 9 optimizer """adam""" +616 9 training_loop """owa""" +616 9 negative_sampler """basic""" +616 9 evaluator """rankbased""" +616 10 dataset """wn18rr""" +616 10 model """rotate""" +616 10 loss """softplus""" +616 10 regularizer """no""" +616 10 optimizer """adam""" +616 10 training_loop """owa""" +616 10 negative_sampler """basic""" +616 10 evaluator """rankbased""" +616 11 dataset """wn18rr""" +616 11 model """rotate""" +616 11 loss """softplus""" +616 11 regularizer """no""" +616 11 optimizer """adam""" +616 11 training_loop """owa""" +616 11 negative_sampler """basic""" +616 11 evaluator """rankbased""" +616 12 dataset """wn18rr""" +616 12 model """rotate""" +616 12 loss """softplus""" +616 12 regularizer """no""" +616 12 optimizer """adam""" +616 12 training_loop """owa""" +616 12 negative_sampler """basic""" +616 12 evaluator """rankbased""" +616 13 dataset """wn18rr""" +616 13 model """rotate""" +616 13 loss """softplus""" +616 13 regularizer """no""" +616 13 optimizer """adam""" +616 13 training_loop """owa""" +616 13 negative_sampler """basic""" +616 13 evaluator """rankbased""" +616 14 dataset """wn18rr""" +616 14 model """rotate""" +616 14 loss """softplus""" +616 14 regularizer """no""" +616 14 optimizer """adam""" +616 14 training_loop """owa""" +616 14 negative_sampler """basic""" +616 14 evaluator """rankbased""" +616 15 dataset """wn18rr""" +616 15 model """rotate""" +616 15 loss """softplus""" +616 15 regularizer """no""" +616 15 optimizer """adam""" +616 15 training_loop """owa""" +616 15 negative_sampler """basic""" +616 15 evaluator """rankbased""" +616 16 dataset """wn18rr""" +616 16 model """rotate""" +616 16 loss """softplus""" +616 16 regularizer """no""" +616 16 optimizer """adam""" +616 16 training_loop """owa""" +616 16 negative_sampler """basic""" +616 16 evaluator """rankbased""" +616 17 dataset """wn18rr""" +616 17 model """rotate""" +616 17 loss """softplus""" +616 17 regularizer """no""" +616 17 optimizer """adam""" +616 17 training_loop """owa""" +616 17 negative_sampler """basic""" +616 17 evaluator """rankbased""" +616 18 dataset """wn18rr""" +616 18 model """rotate""" +616 18 loss """softplus""" +616 18 regularizer """no""" +616 18 optimizer """adam""" +616 18 training_loop """owa""" +616 18 negative_sampler """basic""" +616 18 evaluator """rankbased""" +616 19 dataset """wn18rr""" +616 19 model """rotate""" +616 19 loss """softplus""" +616 19 regularizer """no""" +616 19 optimizer """adam""" +616 19 training_loop """owa""" +616 19 negative_sampler """basic""" +616 19 evaluator """rankbased""" +616 20 dataset """wn18rr""" +616 20 model """rotate""" +616 20 loss """softplus""" +616 20 regularizer """no""" +616 20 optimizer """adam""" +616 20 training_loop """owa""" +616 20 negative_sampler """basic""" +616 20 evaluator """rankbased""" +616 21 dataset """wn18rr""" +616 21 model """rotate""" +616 21 loss """softplus""" +616 21 regularizer """no""" +616 21 optimizer """adam""" +616 21 training_loop """owa""" +616 21 negative_sampler """basic""" +616 21 evaluator """rankbased""" +616 22 dataset """wn18rr""" +616 22 model """rotate""" +616 22 loss """softplus""" +616 22 regularizer """no""" +616 22 optimizer """adam""" +616 22 training_loop """owa""" +616 22 negative_sampler """basic""" +616 22 evaluator """rankbased""" +616 23 dataset """wn18rr""" +616 23 model """rotate""" +616 23 loss """softplus""" +616 23 regularizer """no""" +616 23 optimizer """adam""" +616 23 training_loop """owa""" +616 23 negative_sampler """basic""" +616 23 evaluator """rankbased""" +616 24 dataset """wn18rr""" +616 24 model """rotate""" +616 24 loss """softplus""" +616 24 regularizer """no""" +616 24 optimizer """adam""" +616 24 training_loop """owa""" +616 24 negative_sampler """basic""" +616 24 evaluator """rankbased""" +616 25 dataset """wn18rr""" +616 25 model """rotate""" +616 25 loss """softplus""" +616 25 regularizer """no""" +616 25 optimizer """adam""" +616 25 training_loop """owa""" +616 25 negative_sampler """basic""" +616 25 evaluator """rankbased""" +616 26 dataset """wn18rr""" +616 26 model """rotate""" +616 26 loss """softplus""" +616 26 regularizer """no""" +616 26 optimizer """adam""" +616 26 training_loop """owa""" +616 26 negative_sampler """basic""" +616 26 evaluator """rankbased""" +616 27 dataset """wn18rr""" +616 27 model """rotate""" +616 27 loss """softplus""" +616 27 regularizer """no""" +616 27 optimizer """adam""" +616 27 training_loop """owa""" +616 27 negative_sampler """basic""" +616 27 evaluator """rankbased""" +616 28 dataset """wn18rr""" +616 28 model """rotate""" +616 28 loss """softplus""" +616 28 regularizer """no""" +616 28 optimizer """adam""" +616 28 training_loop """owa""" +616 28 negative_sampler """basic""" +616 28 evaluator """rankbased""" +616 29 dataset """wn18rr""" +616 29 model """rotate""" +616 29 loss """softplus""" +616 29 regularizer """no""" +616 29 optimizer """adam""" +616 29 training_loop """owa""" +616 29 negative_sampler """basic""" +616 29 evaluator """rankbased""" +616 30 dataset """wn18rr""" +616 30 model """rotate""" +616 30 loss """softplus""" +616 30 regularizer """no""" +616 30 optimizer """adam""" +616 30 training_loop """owa""" +616 30 negative_sampler """basic""" +616 30 evaluator """rankbased""" +616 31 dataset """wn18rr""" +616 31 model """rotate""" +616 31 loss """softplus""" +616 31 regularizer """no""" +616 31 optimizer """adam""" +616 31 training_loop """owa""" +616 31 negative_sampler """basic""" +616 31 evaluator """rankbased""" +616 32 dataset """wn18rr""" +616 32 model """rotate""" +616 32 loss """softplus""" +616 32 regularizer """no""" +616 32 optimizer """adam""" +616 32 training_loop """owa""" +616 32 negative_sampler """basic""" +616 32 evaluator """rankbased""" +616 33 dataset """wn18rr""" +616 33 model """rotate""" +616 33 loss """softplus""" +616 33 regularizer """no""" +616 33 optimizer """adam""" +616 33 training_loop """owa""" +616 33 negative_sampler """basic""" +616 33 evaluator """rankbased""" +616 34 dataset """wn18rr""" +616 34 model """rotate""" +616 34 loss """softplus""" +616 34 regularizer """no""" +616 34 optimizer """adam""" +616 34 training_loop """owa""" +616 34 negative_sampler """basic""" +616 34 evaluator """rankbased""" +616 35 dataset """wn18rr""" +616 35 model """rotate""" +616 35 loss """softplus""" +616 35 regularizer """no""" +616 35 optimizer """adam""" +616 35 training_loop """owa""" +616 35 negative_sampler """basic""" +616 35 evaluator """rankbased""" +616 36 dataset """wn18rr""" +616 36 model """rotate""" +616 36 loss """softplus""" +616 36 regularizer """no""" +616 36 optimizer """adam""" +616 36 training_loop """owa""" +616 36 negative_sampler """basic""" +616 36 evaluator """rankbased""" +616 37 dataset """wn18rr""" +616 37 model """rotate""" +616 37 loss """softplus""" +616 37 regularizer """no""" +616 37 optimizer """adam""" +616 37 training_loop """owa""" +616 37 negative_sampler """basic""" +616 37 evaluator """rankbased""" +616 38 dataset """wn18rr""" +616 38 model """rotate""" +616 38 loss """softplus""" +616 38 regularizer """no""" +616 38 optimizer """adam""" +616 38 training_loop """owa""" +616 38 negative_sampler """basic""" +616 38 evaluator """rankbased""" +616 39 dataset """wn18rr""" +616 39 model """rotate""" +616 39 loss """softplus""" +616 39 regularizer """no""" +616 39 optimizer """adam""" +616 39 training_loop """owa""" +616 39 negative_sampler """basic""" +616 39 evaluator """rankbased""" +616 40 dataset """wn18rr""" +616 40 model """rotate""" +616 40 loss """softplus""" +616 40 regularizer """no""" +616 40 optimizer """adam""" +616 40 training_loop """owa""" +616 40 negative_sampler """basic""" +616 40 evaluator """rankbased""" +616 41 dataset """wn18rr""" +616 41 model """rotate""" +616 41 loss """softplus""" +616 41 regularizer """no""" +616 41 optimizer """adam""" +616 41 training_loop """owa""" +616 41 negative_sampler """basic""" +616 41 evaluator """rankbased""" +616 42 dataset """wn18rr""" +616 42 model """rotate""" +616 42 loss """softplus""" +616 42 regularizer """no""" +616 42 optimizer """adam""" +616 42 training_loop """owa""" +616 42 negative_sampler """basic""" +616 42 evaluator """rankbased""" +616 43 dataset """wn18rr""" +616 43 model """rotate""" +616 43 loss """softplus""" +616 43 regularizer """no""" +616 43 optimizer """adam""" +616 43 training_loop """owa""" +616 43 negative_sampler """basic""" +616 43 evaluator """rankbased""" +616 44 dataset """wn18rr""" +616 44 model """rotate""" +616 44 loss """softplus""" +616 44 regularizer """no""" +616 44 optimizer """adam""" +616 44 training_loop """owa""" +616 44 negative_sampler """basic""" +616 44 evaluator """rankbased""" +616 45 dataset """wn18rr""" +616 45 model """rotate""" +616 45 loss """softplus""" +616 45 regularizer """no""" +616 45 optimizer """adam""" +616 45 training_loop """owa""" +616 45 negative_sampler """basic""" +616 45 evaluator """rankbased""" +616 46 dataset """wn18rr""" +616 46 model """rotate""" +616 46 loss """softplus""" +616 46 regularizer """no""" +616 46 optimizer """adam""" +616 46 training_loop """owa""" +616 46 negative_sampler """basic""" +616 46 evaluator """rankbased""" +616 47 dataset """wn18rr""" +616 47 model """rotate""" +616 47 loss """softplus""" +616 47 regularizer """no""" +616 47 optimizer """adam""" +616 47 training_loop """owa""" +616 47 negative_sampler """basic""" +616 47 evaluator """rankbased""" +616 48 dataset """wn18rr""" +616 48 model """rotate""" +616 48 loss """softplus""" +616 48 regularizer """no""" +616 48 optimizer """adam""" +616 48 training_loop """owa""" +616 48 negative_sampler """basic""" +616 48 evaluator """rankbased""" +616 49 dataset """wn18rr""" +616 49 model """rotate""" +616 49 loss """softplus""" +616 49 regularizer """no""" +616 49 optimizer """adam""" +616 49 training_loop """owa""" +616 49 negative_sampler """basic""" +616 49 evaluator """rankbased""" +616 50 dataset """wn18rr""" +616 50 model """rotate""" +616 50 loss """softplus""" +616 50 regularizer """no""" +616 50 optimizer """adam""" +616 50 training_loop """owa""" +616 50 negative_sampler """basic""" +616 50 evaluator """rankbased""" +616 51 dataset """wn18rr""" +616 51 model """rotate""" +616 51 loss """softplus""" +616 51 regularizer """no""" +616 51 optimizer """adam""" +616 51 training_loop """owa""" +616 51 negative_sampler """basic""" +616 51 evaluator """rankbased""" +616 52 dataset """wn18rr""" +616 52 model """rotate""" +616 52 loss """softplus""" +616 52 regularizer """no""" +616 52 optimizer """adam""" +616 52 training_loop """owa""" +616 52 negative_sampler """basic""" +616 52 evaluator """rankbased""" +616 53 dataset """wn18rr""" +616 53 model """rotate""" +616 53 loss """softplus""" +616 53 regularizer """no""" +616 53 optimizer """adam""" +616 53 training_loop """owa""" +616 53 negative_sampler """basic""" +616 53 evaluator """rankbased""" +616 54 dataset """wn18rr""" +616 54 model """rotate""" +616 54 loss """softplus""" +616 54 regularizer """no""" +616 54 optimizer """adam""" +616 54 training_loop """owa""" +616 54 negative_sampler """basic""" +616 54 evaluator """rankbased""" +616 55 dataset """wn18rr""" +616 55 model """rotate""" +616 55 loss """softplus""" +616 55 regularizer """no""" +616 55 optimizer """adam""" +616 55 training_loop """owa""" +616 55 negative_sampler """basic""" +616 55 evaluator """rankbased""" +616 56 dataset """wn18rr""" +616 56 model """rotate""" +616 56 loss """softplus""" +616 56 regularizer """no""" +616 56 optimizer """adam""" +616 56 training_loop """owa""" +616 56 negative_sampler """basic""" +616 56 evaluator """rankbased""" +616 57 dataset """wn18rr""" +616 57 model """rotate""" +616 57 loss """softplus""" +616 57 regularizer """no""" +616 57 optimizer """adam""" +616 57 training_loop """owa""" +616 57 negative_sampler """basic""" +616 57 evaluator """rankbased""" +616 58 dataset """wn18rr""" +616 58 model """rotate""" +616 58 loss """softplus""" +616 58 regularizer """no""" +616 58 optimizer """adam""" +616 58 training_loop """owa""" +616 58 negative_sampler """basic""" +616 58 evaluator """rankbased""" +616 59 dataset """wn18rr""" +616 59 model """rotate""" +616 59 loss """softplus""" +616 59 regularizer """no""" +616 59 optimizer """adam""" +616 59 training_loop """owa""" +616 59 negative_sampler """basic""" +616 59 evaluator """rankbased""" +616 60 dataset """wn18rr""" +616 60 model """rotate""" +616 60 loss """softplus""" +616 60 regularizer """no""" +616 60 optimizer """adam""" +616 60 training_loop """owa""" +616 60 negative_sampler """basic""" +616 60 evaluator """rankbased""" +616 61 dataset """wn18rr""" +616 61 model """rotate""" +616 61 loss """softplus""" +616 61 regularizer """no""" +616 61 optimizer """adam""" +616 61 training_loop """owa""" +616 61 negative_sampler """basic""" +616 61 evaluator """rankbased""" +616 62 dataset """wn18rr""" +616 62 model """rotate""" +616 62 loss """softplus""" +616 62 regularizer """no""" +616 62 optimizer """adam""" +616 62 training_loop """owa""" +616 62 negative_sampler """basic""" +616 62 evaluator """rankbased""" +616 63 dataset """wn18rr""" +616 63 model """rotate""" +616 63 loss """softplus""" +616 63 regularizer """no""" +616 63 optimizer """adam""" +616 63 training_loop """owa""" +616 63 negative_sampler """basic""" +616 63 evaluator """rankbased""" +616 64 dataset """wn18rr""" +616 64 model """rotate""" +616 64 loss """softplus""" +616 64 regularizer """no""" +616 64 optimizer """adam""" +616 64 training_loop """owa""" +616 64 negative_sampler """basic""" +616 64 evaluator """rankbased""" +616 65 dataset """wn18rr""" +616 65 model """rotate""" +616 65 loss """softplus""" +616 65 regularizer """no""" +616 65 optimizer """adam""" +616 65 training_loop """owa""" +616 65 negative_sampler """basic""" +616 65 evaluator """rankbased""" +616 66 dataset """wn18rr""" +616 66 model """rotate""" +616 66 loss """softplus""" +616 66 regularizer """no""" +616 66 optimizer """adam""" +616 66 training_loop """owa""" +616 66 negative_sampler """basic""" +616 66 evaluator """rankbased""" +616 67 dataset """wn18rr""" +616 67 model """rotate""" +616 67 loss """softplus""" +616 67 regularizer """no""" +616 67 optimizer """adam""" +616 67 training_loop """owa""" +616 67 negative_sampler """basic""" +616 67 evaluator """rankbased""" +617 1 model.embedding_dim 0.0 +617 1 optimizer.lr 0.006317074537030744 +617 1 negative_sampler.num_negs_per_pos 41.0 +617 1 training.batch_size 1.0 +617 2 model.embedding_dim 0.0 +617 2 optimizer.lr 0.05550949161923125 +617 2 negative_sampler.num_negs_per_pos 99.0 +617 2 training.batch_size 1.0 +617 3 model.embedding_dim 2.0 +617 3 optimizer.lr 0.07162763695154564 +617 3 negative_sampler.num_negs_per_pos 77.0 +617 3 training.batch_size 0.0 +617 4 model.embedding_dim 1.0 +617 4 optimizer.lr 0.001686171942929461 +617 4 negative_sampler.num_negs_per_pos 47.0 +617 4 training.batch_size 2.0 +617 5 model.embedding_dim 0.0 +617 5 optimizer.lr 0.07646403752154818 +617 5 negative_sampler.num_negs_per_pos 96.0 +617 5 training.batch_size 1.0 +617 6 model.embedding_dim 0.0 +617 6 optimizer.lr 0.0015989313128744033 +617 6 negative_sampler.num_negs_per_pos 74.0 +617 6 training.batch_size 2.0 +617 7 model.embedding_dim 0.0 +617 7 optimizer.lr 0.004741828579857765 +617 7 negative_sampler.num_negs_per_pos 26.0 +617 7 training.batch_size 1.0 +617 8 model.embedding_dim 1.0 +617 8 optimizer.lr 0.001395135305160142 +617 8 negative_sampler.num_negs_per_pos 14.0 +617 8 training.batch_size 1.0 +617 9 model.embedding_dim 2.0 +617 9 optimizer.lr 0.005477788701215011 +617 9 negative_sampler.num_negs_per_pos 69.0 +617 9 training.batch_size 2.0 +617 10 model.embedding_dim 1.0 +617 10 optimizer.lr 0.037256599837397514 +617 10 negative_sampler.num_negs_per_pos 80.0 +617 10 training.batch_size 1.0 +617 11 model.embedding_dim 2.0 +617 11 optimizer.lr 0.01365455576139838 +617 11 negative_sampler.num_negs_per_pos 48.0 +617 11 training.batch_size 2.0 +617 12 model.embedding_dim 2.0 +617 12 optimizer.lr 0.001453560545320568 +617 12 negative_sampler.num_negs_per_pos 42.0 +617 12 training.batch_size 1.0 +617 13 model.embedding_dim 1.0 +617 13 optimizer.lr 0.004385657055694603 +617 13 negative_sampler.num_negs_per_pos 4.0 +617 13 training.batch_size 2.0 +617 14 model.embedding_dim 2.0 +617 14 optimizer.lr 0.002146085665293951 +617 14 negative_sampler.num_negs_per_pos 42.0 +617 14 training.batch_size 2.0 +617 15 model.embedding_dim 0.0 +617 15 optimizer.lr 0.001126287130663834 +617 15 negative_sampler.num_negs_per_pos 28.0 +617 15 training.batch_size 2.0 +617 16 model.embedding_dim 0.0 +617 16 optimizer.lr 0.008643789415761775 +617 16 negative_sampler.num_negs_per_pos 43.0 +617 16 training.batch_size 0.0 +617 17 model.embedding_dim 2.0 +617 17 optimizer.lr 0.001328646268570455 +617 17 negative_sampler.num_negs_per_pos 58.0 +617 17 training.batch_size 0.0 +617 18 model.embedding_dim 2.0 +617 18 optimizer.lr 0.002047190159509513 +617 18 negative_sampler.num_negs_per_pos 86.0 +617 18 training.batch_size 1.0 +617 19 model.embedding_dim 1.0 +617 19 optimizer.lr 0.0029322484674693702 +617 19 negative_sampler.num_negs_per_pos 15.0 +617 19 training.batch_size 1.0 +617 20 model.embedding_dim 1.0 +617 20 optimizer.lr 0.0024064997437666076 +617 20 negative_sampler.num_negs_per_pos 21.0 +617 20 training.batch_size 2.0 +617 21 model.embedding_dim 1.0 +617 21 optimizer.lr 0.006806139953051995 +617 21 negative_sampler.num_negs_per_pos 78.0 +617 21 training.batch_size 1.0 +617 22 model.embedding_dim 0.0 +617 22 optimizer.lr 0.03899602914849091 +617 22 negative_sampler.num_negs_per_pos 54.0 +617 22 training.batch_size 1.0 +617 23 model.embedding_dim 0.0 +617 23 optimizer.lr 0.0368585632412948 +617 23 negative_sampler.num_negs_per_pos 96.0 +617 23 training.batch_size 2.0 +617 24 model.embedding_dim 1.0 +617 24 optimizer.lr 0.026057595360018736 +617 24 negative_sampler.num_negs_per_pos 68.0 +617 24 training.batch_size 2.0 +617 25 model.embedding_dim 0.0 +617 25 optimizer.lr 0.0026855460808434205 +617 25 negative_sampler.num_negs_per_pos 62.0 +617 25 training.batch_size 0.0 +617 26 model.embedding_dim 1.0 +617 26 optimizer.lr 0.003751900423833667 +617 26 negative_sampler.num_negs_per_pos 5.0 +617 26 training.batch_size 2.0 +617 27 model.embedding_dim 0.0 +617 27 optimizer.lr 0.005643380623214571 +617 27 negative_sampler.num_negs_per_pos 82.0 +617 27 training.batch_size 2.0 +617 28 model.embedding_dim 1.0 +617 28 optimizer.lr 0.008020876667220421 +617 28 negative_sampler.num_negs_per_pos 43.0 +617 28 training.batch_size 0.0 +617 29 model.embedding_dim 2.0 +617 29 optimizer.lr 0.0011039478172104045 +617 29 negative_sampler.num_negs_per_pos 3.0 +617 29 training.batch_size 1.0 +617 30 model.embedding_dim 2.0 +617 30 optimizer.lr 0.003010163184817937 +617 30 negative_sampler.num_negs_per_pos 16.0 +617 30 training.batch_size 0.0 +617 31 model.embedding_dim 1.0 +617 31 optimizer.lr 0.01175461872237486 +617 31 negative_sampler.num_negs_per_pos 26.0 +617 31 training.batch_size 0.0 +617 32 model.embedding_dim 2.0 +617 32 optimizer.lr 0.009398857999841963 +617 32 negative_sampler.num_negs_per_pos 61.0 +617 32 training.batch_size 0.0 +617 33 model.embedding_dim 1.0 +617 33 optimizer.lr 0.05987178078026274 +617 33 negative_sampler.num_negs_per_pos 23.0 +617 33 training.batch_size 2.0 +617 34 model.embedding_dim 1.0 +617 34 optimizer.lr 0.006241718613189444 +617 34 negative_sampler.num_negs_per_pos 45.0 +617 34 training.batch_size 0.0 +617 35 model.embedding_dim 1.0 +617 35 optimizer.lr 0.037775243948655585 +617 35 negative_sampler.num_negs_per_pos 51.0 +617 35 training.batch_size 0.0 +617 36 model.embedding_dim 2.0 +617 36 optimizer.lr 0.036475570981432774 +617 36 negative_sampler.num_negs_per_pos 85.0 +617 36 training.batch_size 2.0 +617 37 model.embedding_dim 2.0 +617 37 optimizer.lr 0.015222897940187002 +617 37 negative_sampler.num_negs_per_pos 24.0 +617 37 training.batch_size 1.0 +617 38 model.embedding_dim 2.0 +617 38 optimizer.lr 0.011546322460599215 +617 38 negative_sampler.num_negs_per_pos 68.0 +617 38 training.batch_size 1.0 +617 39 model.embedding_dim 0.0 +617 39 optimizer.lr 0.02963977893952456 +617 39 negative_sampler.num_negs_per_pos 69.0 +617 39 training.batch_size 1.0 +617 40 model.embedding_dim 1.0 +617 40 optimizer.lr 0.0859759375451158 +617 40 negative_sampler.num_negs_per_pos 94.0 +617 40 training.batch_size 0.0 +617 41 model.embedding_dim 0.0 +617 41 optimizer.lr 0.005116700142814231 +617 41 negative_sampler.num_negs_per_pos 47.0 +617 41 training.batch_size 2.0 +617 42 model.embedding_dim 2.0 +617 42 optimizer.lr 0.005754471701830581 +617 42 negative_sampler.num_negs_per_pos 18.0 +617 42 training.batch_size 0.0 +617 43 model.embedding_dim 1.0 +617 43 optimizer.lr 0.0031283058943818603 +617 43 negative_sampler.num_negs_per_pos 56.0 +617 43 training.batch_size 0.0 +617 44 model.embedding_dim 1.0 +617 44 optimizer.lr 0.010337356405904533 +617 44 negative_sampler.num_negs_per_pos 82.0 +617 44 training.batch_size 1.0 +617 45 model.embedding_dim 2.0 +617 45 optimizer.lr 0.01446639120966224 +617 45 negative_sampler.num_negs_per_pos 51.0 +617 45 training.batch_size 2.0 +617 46 model.embedding_dim 0.0 +617 46 optimizer.lr 0.003247871138111946 +617 46 negative_sampler.num_negs_per_pos 12.0 +617 46 training.batch_size 1.0 +617 47 model.embedding_dim 2.0 +617 47 optimizer.lr 0.006368882219861505 +617 47 negative_sampler.num_negs_per_pos 19.0 +617 47 training.batch_size 1.0 +617 48 model.embedding_dim 1.0 +617 48 optimizer.lr 0.0961129378495936 +617 48 negative_sampler.num_negs_per_pos 9.0 +617 48 training.batch_size 0.0 +617 49 model.embedding_dim 1.0 +617 49 optimizer.lr 0.05579271047847812 +617 49 negative_sampler.num_negs_per_pos 1.0 +617 49 training.batch_size 0.0 +617 50 model.embedding_dim 0.0 +617 50 optimizer.lr 0.013362466027617885 +617 50 negative_sampler.num_negs_per_pos 78.0 +617 50 training.batch_size 0.0 +617 51 model.embedding_dim 0.0 +617 51 optimizer.lr 0.0023541312191497858 +617 51 negative_sampler.num_negs_per_pos 56.0 +617 51 training.batch_size 0.0 +617 52 model.embedding_dim 2.0 +617 52 optimizer.lr 0.02872210615393768 +617 52 negative_sampler.num_negs_per_pos 25.0 +617 52 training.batch_size 1.0 +617 53 model.embedding_dim 0.0 +617 53 optimizer.lr 0.01100258010046087 +617 53 negative_sampler.num_negs_per_pos 25.0 +617 53 training.batch_size 1.0 +617 54 model.embedding_dim 2.0 +617 54 optimizer.lr 0.017901395237154862 +617 54 negative_sampler.num_negs_per_pos 69.0 +617 54 training.batch_size 0.0 +617 55 model.embedding_dim 0.0 +617 55 optimizer.lr 0.013840638876537524 +617 55 negative_sampler.num_negs_per_pos 8.0 +617 55 training.batch_size 2.0 +617 56 model.embedding_dim 0.0 +617 56 optimizer.lr 0.051546988075630795 +617 56 negative_sampler.num_negs_per_pos 50.0 +617 56 training.batch_size 1.0 +617 57 model.embedding_dim 2.0 +617 57 optimizer.lr 0.06797501606507106 +617 57 negative_sampler.num_negs_per_pos 2.0 +617 57 training.batch_size 1.0 +617 58 model.embedding_dim 2.0 +617 58 optimizer.lr 0.0373559877586761 +617 58 negative_sampler.num_negs_per_pos 34.0 +617 58 training.batch_size 1.0 +617 59 model.embedding_dim 2.0 +617 59 optimizer.lr 0.005167155160010865 +617 59 negative_sampler.num_negs_per_pos 20.0 +617 59 training.batch_size 2.0 +617 60 model.embedding_dim 1.0 +617 60 optimizer.lr 0.06930812855243586 +617 60 negative_sampler.num_negs_per_pos 76.0 +617 60 training.batch_size 0.0 +617 61 model.embedding_dim 2.0 +617 61 optimizer.lr 0.007661825210288226 +617 61 negative_sampler.num_negs_per_pos 69.0 +617 61 training.batch_size 2.0 +617 62 model.embedding_dim 2.0 +617 62 optimizer.lr 0.002886363698091175 +617 62 negative_sampler.num_negs_per_pos 83.0 +617 62 training.batch_size 2.0 +617 63 model.embedding_dim 1.0 +617 63 optimizer.lr 0.0021698345216527143 +617 63 negative_sampler.num_negs_per_pos 27.0 +617 63 training.batch_size 1.0 +617 64 model.embedding_dim 2.0 +617 64 optimizer.lr 0.017311716286906708 +617 64 negative_sampler.num_negs_per_pos 75.0 +617 64 training.batch_size 0.0 +617 65 model.embedding_dim 0.0 +617 65 optimizer.lr 0.020290221193075064 +617 65 negative_sampler.num_negs_per_pos 83.0 +617 65 training.batch_size 1.0 +617 66 model.embedding_dim 2.0 +617 66 optimizer.lr 0.0027327584976677044 +617 66 negative_sampler.num_negs_per_pos 58.0 +617 66 training.batch_size 2.0 +617 67 model.embedding_dim 1.0 +617 67 optimizer.lr 0.03858618041932641 +617 67 negative_sampler.num_negs_per_pos 12.0 +617 67 training.batch_size 1.0 +617 68 model.embedding_dim 1.0 +617 68 optimizer.lr 0.0019360983103275022 +617 68 negative_sampler.num_negs_per_pos 65.0 +617 68 training.batch_size 2.0 +617 69 model.embedding_dim 0.0 +617 69 optimizer.lr 0.01963422021755301 +617 69 negative_sampler.num_negs_per_pos 35.0 +617 69 training.batch_size 1.0 +617 70 model.embedding_dim 0.0 +617 70 optimizer.lr 0.025514656261106595 +617 70 negative_sampler.num_negs_per_pos 66.0 +617 70 training.batch_size 2.0 +617 71 model.embedding_dim 1.0 +617 71 optimizer.lr 0.02121290783355703 +617 71 negative_sampler.num_negs_per_pos 35.0 +617 71 training.batch_size 2.0 +617 72 model.embedding_dim 2.0 +617 72 optimizer.lr 0.03815891880931283 +617 72 negative_sampler.num_negs_per_pos 0.0 +617 72 training.batch_size 2.0 +617 73 model.embedding_dim 1.0 +617 73 optimizer.lr 0.003215314821811986 +617 73 negative_sampler.num_negs_per_pos 74.0 +617 73 training.batch_size 1.0 +617 74 model.embedding_dim 0.0 +617 74 optimizer.lr 0.029957541591518647 +617 74 negative_sampler.num_negs_per_pos 28.0 +617 74 training.batch_size 0.0 +617 75 model.embedding_dim 1.0 +617 75 optimizer.lr 0.09232599230083938 +617 75 negative_sampler.num_negs_per_pos 89.0 +617 75 training.batch_size 1.0 +617 76 model.embedding_dim 0.0 +617 76 optimizer.lr 0.0025760670543902835 +617 76 negative_sampler.num_negs_per_pos 81.0 +617 76 training.batch_size 1.0 +617 77 model.embedding_dim 2.0 +617 77 optimizer.lr 0.004305439076881341 +617 77 negative_sampler.num_negs_per_pos 85.0 +617 77 training.batch_size 0.0 +617 78 model.embedding_dim 2.0 +617 78 optimizer.lr 0.06734210467947169 +617 78 negative_sampler.num_negs_per_pos 38.0 +617 78 training.batch_size 2.0 +617 79 model.embedding_dim 2.0 +617 79 optimizer.lr 0.0014875988013405468 +617 79 negative_sampler.num_negs_per_pos 62.0 +617 79 training.batch_size 1.0 +617 80 model.embedding_dim 1.0 +617 80 optimizer.lr 0.00171235308174101 +617 80 negative_sampler.num_negs_per_pos 30.0 +617 80 training.batch_size 2.0 +617 81 model.embedding_dim 2.0 +617 81 optimizer.lr 0.027526970358557307 +617 81 negative_sampler.num_negs_per_pos 49.0 +617 81 training.batch_size 1.0 +617 82 model.embedding_dim 1.0 +617 82 optimizer.lr 0.008517825800795483 +617 82 negative_sampler.num_negs_per_pos 61.0 +617 82 training.batch_size 2.0 +617 83 model.embedding_dim 0.0 +617 83 optimizer.lr 0.003002891030760263 +617 83 negative_sampler.num_negs_per_pos 62.0 +617 83 training.batch_size 1.0 +617 84 model.embedding_dim 2.0 +617 84 optimizer.lr 0.0691175127328593 +617 84 negative_sampler.num_negs_per_pos 12.0 +617 84 training.batch_size 2.0 +617 85 model.embedding_dim 0.0 +617 85 optimizer.lr 0.0023231314508561285 +617 85 negative_sampler.num_negs_per_pos 17.0 +617 85 training.batch_size 2.0 +617 86 model.embedding_dim 1.0 +617 86 optimizer.lr 0.001886885619734414 +617 86 negative_sampler.num_negs_per_pos 66.0 +617 86 training.batch_size 0.0 +617 87 model.embedding_dim 0.0 +617 87 optimizer.lr 0.03825039283135005 +617 87 negative_sampler.num_negs_per_pos 43.0 +617 87 training.batch_size 0.0 +617 88 model.embedding_dim 0.0 +617 88 optimizer.lr 0.004061756272706499 +617 88 negative_sampler.num_negs_per_pos 67.0 +617 88 training.batch_size 0.0 +617 89 model.embedding_dim 1.0 +617 89 optimizer.lr 0.08916122759207051 +617 89 negative_sampler.num_negs_per_pos 60.0 +617 89 training.batch_size 2.0 +617 90 model.embedding_dim 0.0 +617 90 optimizer.lr 0.006530656417767264 +617 90 negative_sampler.num_negs_per_pos 30.0 +617 90 training.batch_size 1.0 +617 91 model.embedding_dim 1.0 +617 91 optimizer.lr 0.002604157514700634 +617 91 negative_sampler.num_negs_per_pos 75.0 +617 91 training.batch_size 0.0 +617 92 model.embedding_dim 2.0 +617 92 optimizer.lr 0.01799024282074302 +617 92 negative_sampler.num_negs_per_pos 42.0 +617 92 training.batch_size 1.0 +617 93 model.embedding_dim 2.0 +617 93 optimizer.lr 0.0010024982860590314 +617 93 negative_sampler.num_negs_per_pos 7.0 +617 93 training.batch_size 0.0 +617 94 model.embedding_dim 2.0 +617 94 optimizer.lr 0.0077540397985334675 +617 94 negative_sampler.num_negs_per_pos 55.0 +617 94 training.batch_size 2.0 +617 95 model.embedding_dim 2.0 +617 95 optimizer.lr 0.036196887163036325 +617 95 negative_sampler.num_negs_per_pos 7.0 +617 95 training.batch_size 2.0 +617 96 model.embedding_dim 1.0 +617 96 optimizer.lr 0.048645934325862235 +617 96 negative_sampler.num_negs_per_pos 77.0 +617 96 training.batch_size 2.0 +617 97 model.embedding_dim 1.0 +617 97 optimizer.lr 0.00201250214595782 +617 97 negative_sampler.num_negs_per_pos 36.0 +617 97 training.batch_size 2.0 +617 98 model.embedding_dim 0.0 +617 98 optimizer.lr 0.06155632585198428 +617 98 negative_sampler.num_negs_per_pos 24.0 +617 98 training.batch_size 1.0 +617 99 model.embedding_dim 1.0 +617 99 optimizer.lr 0.014109606969538745 +617 99 negative_sampler.num_negs_per_pos 59.0 +617 99 training.batch_size 1.0 +617 100 model.embedding_dim 2.0 +617 100 optimizer.lr 0.09253879799099597 +617 100 negative_sampler.num_negs_per_pos 50.0 +617 100 training.batch_size 2.0 +617 1 dataset """wn18rr""" +617 1 model """rotate""" +617 1 loss """bceaftersigmoid""" +617 1 regularizer """no""" +617 1 optimizer """adam""" +617 1 training_loop """owa""" +617 1 negative_sampler """basic""" +617 1 evaluator """rankbased""" +617 2 dataset """wn18rr""" +617 2 model """rotate""" +617 2 loss """bceaftersigmoid""" +617 2 regularizer """no""" +617 2 optimizer """adam""" +617 2 training_loop """owa""" +617 2 negative_sampler """basic""" +617 2 evaluator """rankbased""" +617 3 dataset """wn18rr""" +617 3 model """rotate""" +617 3 loss """bceaftersigmoid""" +617 3 regularizer """no""" +617 3 optimizer """adam""" +617 3 training_loop """owa""" +617 3 negative_sampler """basic""" +617 3 evaluator """rankbased""" +617 4 dataset """wn18rr""" +617 4 model """rotate""" +617 4 loss """bceaftersigmoid""" +617 4 regularizer """no""" +617 4 optimizer """adam""" +617 4 training_loop """owa""" +617 4 negative_sampler """basic""" +617 4 evaluator """rankbased""" +617 5 dataset """wn18rr""" +617 5 model """rotate""" +617 5 loss """bceaftersigmoid""" +617 5 regularizer """no""" +617 5 optimizer """adam""" +617 5 training_loop """owa""" +617 5 negative_sampler """basic""" +617 5 evaluator """rankbased""" +617 6 dataset """wn18rr""" +617 6 model """rotate""" +617 6 loss """bceaftersigmoid""" +617 6 regularizer """no""" +617 6 optimizer """adam""" +617 6 training_loop """owa""" +617 6 negative_sampler """basic""" +617 6 evaluator """rankbased""" +617 7 dataset """wn18rr""" +617 7 model """rotate""" +617 7 loss """bceaftersigmoid""" +617 7 regularizer """no""" +617 7 optimizer """adam""" +617 7 training_loop """owa""" +617 7 negative_sampler """basic""" +617 7 evaluator """rankbased""" +617 8 dataset """wn18rr""" +617 8 model """rotate""" +617 8 loss """bceaftersigmoid""" +617 8 regularizer """no""" +617 8 optimizer """adam""" +617 8 training_loop """owa""" +617 8 negative_sampler """basic""" +617 8 evaluator """rankbased""" +617 9 dataset """wn18rr""" +617 9 model """rotate""" +617 9 loss """bceaftersigmoid""" +617 9 regularizer """no""" +617 9 optimizer """adam""" +617 9 training_loop """owa""" +617 9 negative_sampler """basic""" +617 9 evaluator """rankbased""" +617 10 dataset """wn18rr""" +617 10 model """rotate""" +617 10 loss """bceaftersigmoid""" +617 10 regularizer """no""" +617 10 optimizer """adam""" +617 10 training_loop """owa""" +617 10 negative_sampler """basic""" +617 10 evaluator """rankbased""" +617 11 dataset """wn18rr""" +617 11 model """rotate""" +617 11 loss """bceaftersigmoid""" +617 11 regularizer """no""" +617 11 optimizer """adam""" +617 11 training_loop """owa""" +617 11 negative_sampler """basic""" +617 11 evaluator """rankbased""" +617 12 dataset """wn18rr""" +617 12 model """rotate""" +617 12 loss """bceaftersigmoid""" +617 12 regularizer """no""" +617 12 optimizer """adam""" +617 12 training_loop """owa""" +617 12 negative_sampler """basic""" +617 12 evaluator """rankbased""" +617 13 dataset """wn18rr""" +617 13 model """rotate""" +617 13 loss """bceaftersigmoid""" +617 13 regularizer """no""" +617 13 optimizer """adam""" +617 13 training_loop """owa""" +617 13 negative_sampler """basic""" +617 13 evaluator """rankbased""" +617 14 dataset """wn18rr""" +617 14 model """rotate""" +617 14 loss """bceaftersigmoid""" +617 14 regularizer """no""" +617 14 optimizer """adam""" +617 14 training_loop """owa""" +617 14 negative_sampler """basic""" +617 14 evaluator """rankbased""" +617 15 dataset """wn18rr""" +617 15 model """rotate""" +617 15 loss """bceaftersigmoid""" +617 15 regularizer """no""" +617 15 optimizer """adam""" +617 15 training_loop """owa""" +617 15 negative_sampler """basic""" +617 15 evaluator """rankbased""" +617 16 dataset """wn18rr""" +617 16 model """rotate""" +617 16 loss """bceaftersigmoid""" +617 16 regularizer """no""" +617 16 optimizer """adam""" +617 16 training_loop """owa""" +617 16 negative_sampler """basic""" +617 16 evaluator """rankbased""" +617 17 dataset """wn18rr""" +617 17 model """rotate""" +617 17 loss """bceaftersigmoid""" +617 17 regularizer """no""" +617 17 optimizer """adam""" +617 17 training_loop """owa""" +617 17 negative_sampler """basic""" +617 17 evaluator """rankbased""" +617 18 dataset """wn18rr""" +617 18 model """rotate""" +617 18 loss """bceaftersigmoid""" +617 18 regularizer """no""" +617 18 optimizer """adam""" +617 18 training_loop """owa""" +617 18 negative_sampler """basic""" +617 18 evaluator """rankbased""" +617 19 dataset """wn18rr""" +617 19 model """rotate""" +617 19 loss """bceaftersigmoid""" +617 19 regularizer """no""" +617 19 optimizer """adam""" +617 19 training_loop """owa""" +617 19 negative_sampler """basic""" +617 19 evaluator """rankbased""" +617 20 dataset """wn18rr""" +617 20 model """rotate""" +617 20 loss """bceaftersigmoid""" +617 20 regularizer """no""" +617 20 optimizer """adam""" +617 20 training_loop """owa""" +617 20 negative_sampler """basic""" +617 20 evaluator """rankbased""" +617 21 dataset """wn18rr""" +617 21 model """rotate""" +617 21 loss """bceaftersigmoid""" +617 21 regularizer """no""" +617 21 optimizer """adam""" +617 21 training_loop """owa""" +617 21 negative_sampler """basic""" +617 21 evaluator """rankbased""" +617 22 dataset """wn18rr""" +617 22 model """rotate""" +617 22 loss """bceaftersigmoid""" +617 22 regularizer """no""" +617 22 optimizer """adam""" +617 22 training_loop """owa""" +617 22 negative_sampler """basic""" +617 22 evaluator """rankbased""" +617 23 dataset """wn18rr""" +617 23 model """rotate""" +617 23 loss """bceaftersigmoid""" +617 23 regularizer """no""" +617 23 optimizer """adam""" +617 23 training_loop """owa""" +617 23 negative_sampler """basic""" +617 23 evaluator """rankbased""" +617 24 dataset """wn18rr""" +617 24 model """rotate""" +617 24 loss """bceaftersigmoid""" +617 24 regularizer """no""" +617 24 optimizer """adam""" +617 24 training_loop """owa""" +617 24 negative_sampler """basic""" +617 24 evaluator """rankbased""" +617 25 dataset """wn18rr""" +617 25 model """rotate""" +617 25 loss """bceaftersigmoid""" +617 25 regularizer """no""" +617 25 optimizer """adam""" +617 25 training_loop """owa""" +617 25 negative_sampler """basic""" +617 25 evaluator """rankbased""" +617 26 dataset """wn18rr""" +617 26 model """rotate""" +617 26 loss """bceaftersigmoid""" +617 26 regularizer """no""" +617 26 optimizer """adam""" +617 26 training_loop """owa""" +617 26 negative_sampler """basic""" +617 26 evaluator """rankbased""" +617 27 dataset """wn18rr""" +617 27 model """rotate""" +617 27 loss """bceaftersigmoid""" +617 27 regularizer """no""" +617 27 optimizer """adam""" +617 27 training_loop """owa""" +617 27 negative_sampler """basic""" +617 27 evaluator """rankbased""" +617 28 dataset """wn18rr""" +617 28 model """rotate""" +617 28 loss """bceaftersigmoid""" +617 28 regularizer """no""" +617 28 optimizer """adam""" +617 28 training_loop """owa""" +617 28 negative_sampler """basic""" +617 28 evaluator """rankbased""" +617 29 dataset """wn18rr""" +617 29 model """rotate""" +617 29 loss """bceaftersigmoid""" +617 29 regularizer """no""" +617 29 optimizer """adam""" +617 29 training_loop """owa""" +617 29 negative_sampler """basic""" +617 29 evaluator """rankbased""" +617 30 dataset """wn18rr""" +617 30 model """rotate""" +617 30 loss """bceaftersigmoid""" +617 30 regularizer """no""" +617 30 optimizer """adam""" +617 30 training_loop """owa""" +617 30 negative_sampler """basic""" +617 30 evaluator """rankbased""" +617 31 dataset """wn18rr""" +617 31 model """rotate""" +617 31 loss """bceaftersigmoid""" +617 31 regularizer """no""" +617 31 optimizer """adam""" +617 31 training_loop """owa""" +617 31 negative_sampler """basic""" +617 31 evaluator """rankbased""" +617 32 dataset """wn18rr""" +617 32 model """rotate""" +617 32 loss """bceaftersigmoid""" +617 32 regularizer """no""" +617 32 optimizer """adam""" +617 32 training_loop """owa""" +617 32 negative_sampler """basic""" +617 32 evaluator """rankbased""" +617 33 dataset """wn18rr""" +617 33 model """rotate""" +617 33 loss """bceaftersigmoid""" +617 33 regularizer """no""" +617 33 optimizer """adam""" +617 33 training_loop """owa""" +617 33 negative_sampler """basic""" +617 33 evaluator """rankbased""" +617 34 dataset """wn18rr""" +617 34 model """rotate""" +617 34 loss """bceaftersigmoid""" +617 34 regularizer """no""" +617 34 optimizer """adam""" +617 34 training_loop """owa""" +617 34 negative_sampler """basic""" +617 34 evaluator """rankbased""" +617 35 dataset """wn18rr""" +617 35 model """rotate""" +617 35 loss """bceaftersigmoid""" +617 35 regularizer """no""" +617 35 optimizer """adam""" +617 35 training_loop """owa""" +617 35 negative_sampler """basic""" +617 35 evaluator """rankbased""" +617 36 dataset """wn18rr""" +617 36 model """rotate""" +617 36 loss """bceaftersigmoid""" +617 36 regularizer """no""" +617 36 optimizer """adam""" +617 36 training_loop """owa""" +617 36 negative_sampler """basic""" +617 36 evaluator """rankbased""" +617 37 dataset """wn18rr""" +617 37 model """rotate""" +617 37 loss """bceaftersigmoid""" +617 37 regularizer """no""" +617 37 optimizer """adam""" +617 37 training_loop """owa""" +617 37 negative_sampler """basic""" +617 37 evaluator """rankbased""" +617 38 dataset """wn18rr""" +617 38 model """rotate""" +617 38 loss """bceaftersigmoid""" +617 38 regularizer """no""" +617 38 optimizer """adam""" +617 38 training_loop """owa""" +617 38 negative_sampler """basic""" +617 38 evaluator """rankbased""" +617 39 dataset """wn18rr""" +617 39 model """rotate""" +617 39 loss """bceaftersigmoid""" +617 39 regularizer """no""" +617 39 optimizer """adam""" +617 39 training_loop """owa""" +617 39 negative_sampler """basic""" +617 39 evaluator """rankbased""" +617 40 dataset """wn18rr""" +617 40 model """rotate""" +617 40 loss """bceaftersigmoid""" +617 40 regularizer """no""" +617 40 optimizer """adam""" +617 40 training_loop """owa""" +617 40 negative_sampler """basic""" +617 40 evaluator """rankbased""" +617 41 dataset """wn18rr""" +617 41 model """rotate""" +617 41 loss """bceaftersigmoid""" +617 41 regularizer """no""" +617 41 optimizer """adam""" +617 41 training_loop """owa""" +617 41 negative_sampler """basic""" +617 41 evaluator """rankbased""" +617 42 dataset """wn18rr""" +617 42 model """rotate""" +617 42 loss """bceaftersigmoid""" +617 42 regularizer """no""" +617 42 optimizer """adam""" +617 42 training_loop """owa""" +617 42 negative_sampler """basic""" +617 42 evaluator """rankbased""" +617 43 dataset """wn18rr""" +617 43 model """rotate""" +617 43 loss """bceaftersigmoid""" +617 43 regularizer """no""" +617 43 optimizer """adam""" +617 43 training_loop """owa""" +617 43 negative_sampler """basic""" +617 43 evaluator """rankbased""" +617 44 dataset """wn18rr""" +617 44 model """rotate""" +617 44 loss """bceaftersigmoid""" +617 44 regularizer """no""" +617 44 optimizer """adam""" +617 44 training_loop """owa""" +617 44 negative_sampler """basic""" +617 44 evaluator """rankbased""" +617 45 dataset """wn18rr""" +617 45 model """rotate""" +617 45 loss """bceaftersigmoid""" +617 45 regularizer """no""" +617 45 optimizer """adam""" +617 45 training_loop """owa""" +617 45 negative_sampler """basic""" +617 45 evaluator """rankbased""" +617 46 dataset """wn18rr""" +617 46 model """rotate""" +617 46 loss """bceaftersigmoid""" +617 46 regularizer """no""" +617 46 optimizer """adam""" +617 46 training_loop """owa""" +617 46 negative_sampler """basic""" +617 46 evaluator """rankbased""" +617 47 dataset """wn18rr""" +617 47 model """rotate""" +617 47 loss """bceaftersigmoid""" +617 47 regularizer """no""" +617 47 optimizer """adam""" +617 47 training_loop """owa""" +617 47 negative_sampler """basic""" +617 47 evaluator """rankbased""" +617 48 dataset """wn18rr""" +617 48 model """rotate""" +617 48 loss """bceaftersigmoid""" +617 48 regularizer """no""" +617 48 optimizer """adam""" +617 48 training_loop """owa""" +617 48 negative_sampler """basic""" +617 48 evaluator """rankbased""" +617 49 dataset """wn18rr""" +617 49 model """rotate""" +617 49 loss """bceaftersigmoid""" +617 49 regularizer """no""" +617 49 optimizer """adam""" +617 49 training_loop """owa""" +617 49 negative_sampler """basic""" +617 49 evaluator """rankbased""" +617 50 dataset """wn18rr""" +617 50 model """rotate""" +617 50 loss """bceaftersigmoid""" +617 50 regularizer """no""" +617 50 optimizer """adam""" +617 50 training_loop """owa""" +617 50 negative_sampler """basic""" +617 50 evaluator """rankbased""" +617 51 dataset """wn18rr""" +617 51 model """rotate""" +617 51 loss """bceaftersigmoid""" +617 51 regularizer """no""" +617 51 optimizer """adam""" +617 51 training_loop """owa""" +617 51 negative_sampler """basic""" +617 51 evaluator """rankbased""" +617 52 dataset """wn18rr""" +617 52 model """rotate""" +617 52 loss """bceaftersigmoid""" +617 52 regularizer """no""" +617 52 optimizer """adam""" +617 52 training_loop """owa""" +617 52 negative_sampler """basic""" +617 52 evaluator """rankbased""" +617 53 dataset """wn18rr""" +617 53 model """rotate""" +617 53 loss """bceaftersigmoid""" +617 53 regularizer """no""" +617 53 optimizer """adam""" +617 53 training_loop """owa""" +617 53 negative_sampler """basic""" +617 53 evaluator """rankbased""" +617 54 dataset """wn18rr""" +617 54 model """rotate""" +617 54 loss """bceaftersigmoid""" +617 54 regularizer """no""" +617 54 optimizer """adam""" +617 54 training_loop """owa""" +617 54 negative_sampler """basic""" +617 54 evaluator """rankbased""" +617 55 dataset """wn18rr""" +617 55 model """rotate""" +617 55 loss """bceaftersigmoid""" +617 55 regularizer """no""" +617 55 optimizer """adam""" +617 55 training_loop """owa""" +617 55 negative_sampler """basic""" +617 55 evaluator """rankbased""" +617 56 dataset """wn18rr""" +617 56 model """rotate""" +617 56 loss """bceaftersigmoid""" +617 56 regularizer """no""" +617 56 optimizer """adam""" +617 56 training_loop """owa""" +617 56 negative_sampler """basic""" +617 56 evaluator """rankbased""" +617 57 dataset """wn18rr""" +617 57 model """rotate""" +617 57 loss """bceaftersigmoid""" +617 57 regularizer """no""" +617 57 optimizer """adam""" +617 57 training_loop """owa""" +617 57 negative_sampler """basic""" +617 57 evaluator """rankbased""" +617 58 dataset """wn18rr""" +617 58 model """rotate""" +617 58 loss """bceaftersigmoid""" +617 58 regularizer """no""" +617 58 optimizer """adam""" +617 58 training_loop """owa""" +617 58 negative_sampler """basic""" +617 58 evaluator """rankbased""" +617 59 dataset """wn18rr""" +617 59 model """rotate""" +617 59 loss """bceaftersigmoid""" +617 59 regularizer """no""" +617 59 optimizer """adam""" +617 59 training_loop """owa""" +617 59 negative_sampler """basic""" +617 59 evaluator """rankbased""" +617 60 dataset """wn18rr""" +617 60 model """rotate""" +617 60 loss """bceaftersigmoid""" +617 60 regularizer """no""" +617 60 optimizer """adam""" +617 60 training_loop """owa""" +617 60 negative_sampler """basic""" +617 60 evaluator """rankbased""" +617 61 dataset """wn18rr""" +617 61 model """rotate""" +617 61 loss """bceaftersigmoid""" +617 61 regularizer """no""" +617 61 optimizer """adam""" +617 61 training_loop """owa""" +617 61 negative_sampler """basic""" +617 61 evaluator """rankbased""" +617 62 dataset """wn18rr""" +617 62 model """rotate""" +617 62 loss """bceaftersigmoid""" +617 62 regularizer """no""" +617 62 optimizer """adam""" +617 62 training_loop """owa""" +617 62 negative_sampler """basic""" +617 62 evaluator """rankbased""" +617 63 dataset """wn18rr""" +617 63 model """rotate""" +617 63 loss """bceaftersigmoid""" +617 63 regularizer """no""" +617 63 optimizer """adam""" +617 63 training_loop """owa""" +617 63 negative_sampler """basic""" +617 63 evaluator """rankbased""" +617 64 dataset """wn18rr""" +617 64 model """rotate""" +617 64 loss """bceaftersigmoid""" +617 64 regularizer """no""" +617 64 optimizer """adam""" +617 64 training_loop """owa""" +617 64 negative_sampler """basic""" +617 64 evaluator """rankbased""" +617 65 dataset """wn18rr""" +617 65 model """rotate""" +617 65 loss """bceaftersigmoid""" +617 65 regularizer """no""" +617 65 optimizer """adam""" +617 65 training_loop """owa""" +617 65 negative_sampler """basic""" +617 65 evaluator """rankbased""" +617 66 dataset """wn18rr""" +617 66 model """rotate""" +617 66 loss """bceaftersigmoid""" +617 66 regularizer """no""" +617 66 optimizer """adam""" +617 66 training_loop """owa""" +617 66 negative_sampler """basic""" +617 66 evaluator """rankbased""" +617 67 dataset """wn18rr""" +617 67 model """rotate""" +617 67 loss """bceaftersigmoid""" +617 67 regularizer """no""" +617 67 optimizer """adam""" +617 67 training_loop """owa""" +617 67 negative_sampler """basic""" +617 67 evaluator """rankbased""" +617 68 dataset """wn18rr""" +617 68 model """rotate""" +617 68 loss """bceaftersigmoid""" +617 68 regularizer """no""" +617 68 optimizer """adam""" +617 68 training_loop """owa""" +617 68 negative_sampler """basic""" +617 68 evaluator """rankbased""" +617 69 dataset """wn18rr""" +617 69 model """rotate""" +617 69 loss """bceaftersigmoid""" +617 69 regularizer """no""" +617 69 optimizer """adam""" +617 69 training_loop """owa""" +617 69 negative_sampler """basic""" +617 69 evaluator """rankbased""" +617 70 dataset """wn18rr""" +617 70 model """rotate""" +617 70 loss """bceaftersigmoid""" +617 70 regularizer """no""" +617 70 optimizer """adam""" +617 70 training_loop """owa""" +617 70 negative_sampler """basic""" +617 70 evaluator """rankbased""" +617 71 dataset """wn18rr""" +617 71 model """rotate""" +617 71 loss """bceaftersigmoid""" +617 71 regularizer """no""" +617 71 optimizer """adam""" +617 71 training_loop """owa""" +617 71 negative_sampler """basic""" +617 71 evaluator """rankbased""" +617 72 dataset """wn18rr""" +617 72 model """rotate""" +617 72 loss """bceaftersigmoid""" +617 72 regularizer """no""" +617 72 optimizer """adam""" +617 72 training_loop """owa""" +617 72 negative_sampler """basic""" +617 72 evaluator """rankbased""" +617 73 dataset """wn18rr""" +617 73 model """rotate""" +617 73 loss """bceaftersigmoid""" +617 73 regularizer """no""" +617 73 optimizer """adam""" +617 73 training_loop """owa""" +617 73 negative_sampler """basic""" +617 73 evaluator """rankbased""" +617 74 dataset """wn18rr""" +617 74 model """rotate""" +617 74 loss """bceaftersigmoid""" +617 74 regularizer """no""" +617 74 optimizer """adam""" +617 74 training_loop """owa""" +617 74 negative_sampler """basic""" +617 74 evaluator """rankbased""" +617 75 dataset """wn18rr""" +617 75 model """rotate""" +617 75 loss """bceaftersigmoid""" +617 75 regularizer """no""" +617 75 optimizer """adam""" +617 75 training_loop """owa""" +617 75 negative_sampler """basic""" +617 75 evaluator """rankbased""" +617 76 dataset """wn18rr""" +617 76 model """rotate""" +617 76 loss """bceaftersigmoid""" +617 76 regularizer """no""" +617 76 optimizer """adam""" +617 76 training_loop """owa""" +617 76 negative_sampler """basic""" +617 76 evaluator """rankbased""" +617 77 dataset """wn18rr""" +617 77 model """rotate""" +617 77 loss """bceaftersigmoid""" +617 77 regularizer """no""" +617 77 optimizer """adam""" +617 77 training_loop """owa""" +617 77 negative_sampler """basic""" +617 77 evaluator """rankbased""" +617 78 dataset """wn18rr""" +617 78 model """rotate""" +617 78 loss """bceaftersigmoid""" +617 78 regularizer """no""" +617 78 optimizer """adam""" +617 78 training_loop """owa""" +617 78 negative_sampler """basic""" +617 78 evaluator """rankbased""" +617 79 dataset """wn18rr""" +617 79 model """rotate""" +617 79 loss """bceaftersigmoid""" +617 79 regularizer """no""" +617 79 optimizer """adam""" +617 79 training_loop """owa""" +617 79 negative_sampler """basic""" +617 79 evaluator """rankbased""" +617 80 dataset """wn18rr""" +617 80 model """rotate""" +617 80 loss """bceaftersigmoid""" +617 80 regularizer """no""" +617 80 optimizer """adam""" +617 80 training_loop """owa""" +617 80 negative_sampler """basic""" +617 80 evaluator """rankbased""" +617 81 dataset """wn18rr""" +617 81 model """rotate""" +617 81 loss """bceaftersigmoid""" +617 81 regularizer """no""" +617 81 optimizer """adam""" +617 81 training_loop """owa""" +617 81 negative_sampler """basic""" +617 81 evaluator """rankbased""" +617 82 dataset """wn18rr""" +617 82 model """rotate""" +617 82 loss """bceaftersigmoid""" +617 82 regularizer """no""" +617 82 optimizer """adam""" +617 82 training_loop """owa""" +617 82 negative_sampler """basic""" +617 82 evaluator """rankbased""" +617 83 dataset """wn18rr""" +617 83 model """rotate""" +617 83 loss """bceaftersigmoid""" +617 83 regularizer """no""" +617 83 optimizer """adam""" +617 83 training_loop """owa""" +617 83 negative_sampler """basic""" +617 83 evaluator """rankbased""" +617 84 dataset """wn18rr""" +617 84 model """rotate""" +617 84 loss """bceaftersigmoid""" +617 84 regularizer """no""" +617 84 optimizer """adam""" +617 84 training_loop """owa""" +617 84 negative_sampler """basic""" +617 84 evaluator """rankbased""" +617 85 dataset """wn18rr""" +617 85 model """rotate""" +617 85 loss """bceaftersigmoid""" +617 85 regularizer """no""" +617 85 optimizer """adam""" +617 85 training_loop """owa""" +617 85 negative_sampler """basic""" +617 85 evaluator """rankbased""" +617 86 dataset """wn18rr""" +617 86 model """rotate""" +617 86 loss """bceaftersigmoid""" +617 86 regularizer """no""" +617 86 optimizer """adam""" +617 86 training_loop """owa""" +617 86 negative_sampler """basic""" +617 86 evaluator """rankbased""" +617 87 dataset """wn18rr""" +617 87 model """rotate""" +617 87 loss """bceaftersigmoid""" +617 87 regularizer """no""" +617 87 optimizer """adam""" +617 87 training_loop """owa""" +617 87 negative_sampler """basic""" +617 87 evaluator """rankbased""" +617 88 dataset """wn18rr""" +617 88 model """rotate""" +617 88 loss """bceaftersigmoid""" +617 88 regularizer """no""" +617 88 optimizer """adam""" +617 88 training_loop """owa""" +617 88 negative_sampler """basic""" +617 88 evaluator """rankbased""" +617 89 dataset """wn18rr""" +617 89 model """rotate""" +617 89 loss """bceaftersigmoid""" +617 89 regularizer """no""" +617 89 optimizer """adam""" +617 89 training_loop """owa""" +617 89 negative_sampler """basic""" +617 89 evaluator """rankbased""" +617 90 dataset """wn18rr""" +617 90 model """rotate""" +617 90 loss """bceaftersigmoid""" +617 90 regularizer """no""" +617 90 optimizer """adam""" +617 90 training_loop """owa""" +617 90 negative_sampler """basic""" +617 90 evaluator """rankbased""" +617 91 dataset """wn18rr""" +617 91 model """rotate""" +617 91 loss """bceaftersigmoid""" +617 91 regularizer """no""" +617 91 optimizer """adam""" +617 91 training_loop """owa""" +617 91 negative_sampler """basic""" +617 91 evaluator """rankbased""" +617 92 dataset """wn18rr""" +617 92 model """rotate""" +617 92 loss """bceaftersigmoid""" +617 92 regularizer """no""" +617 92 optimizer """adam""" +617 92 training_loop """owa""" +617 92 negative_sampler """basic""" +617 92 evaluator """rankbased""" +617 93 dataset """wn18rr""" +617 93 model """rotate""" +617 93 loss """bceaftersigmoid""" +617 93 regularizer """no""" +617 93 optimizer """adam""" +617 93 training_loop """owa""" +617 93 negative_sampler """basic""" +617 93 evaluator """rankbased""" +617 94 dataset """wn18rr""" +617 94 model """rotate""" +617 94 loss """bceaftersigmoid""" +617 94 regularizer """no""" +617 94 optimizer """adam""" +617 94 training_loop """owa""" +617 94 negative_sampler """basic""" +617 94 evaluator """rankbased""" +617 95 dataset """wn18rr""" +617 95 model """rotate""" +617 95 loss """bceaftersigmoid""" +617 95 regularizer """no""" +617 95 optimizer """adam""" +617 95 training_loop """owa""" +617 95 negative_sampler """basic""" +617 95 evaluator """rankbased""" +617 96 dataset """wn18rr""" +617 96 model """rotate""" +617 96 loss """bceaftersigmoid""" +617 96 regularizer """no""" +617 96 optimizer """adam""" +617 96 training_loop """owa""" +617 96 negative_sampler """basic""" +617 96 evaluator """rankbased""" +617 97 dataset """wn18rr""" +617 97 model """rotate""" +617 97 loss """bceaftersigmoid""" +617 97 regularizer """no""" +617 97 optimizer """adam""" +617 97 training_loop """owa""" +617 97 negative_sampler """basic""" +617 97 evaluator """rankbased""" +617 98 dataset """wn18rr""" +617 98 model """rotate""" +617 98 loss """bceaftersigmoid""" +617 98 regularizer """no""" +617 98 optimizer """adam""" +617 98 training_loop """owa""" +617 98 negative_sampler """basic""" +617 98 evaluator """rankbased""" +617 99 dataset """wn18rr""" +617 99 model """rotate""" +617 99 loss """bceaftersigmoid""" +617 99 regularizer """no""" +617 99 optimizer """adam""" +617 99 training_loop """owa""" +617 99 negative_sampler """basic""" +617 99 evaluator """rankbased""" +617 100 dataset """wn18rr""" +617 100 model """rotate""" +617 100 loss """bceaftersigmoid""" +617 100 regularizer """no""" +617 100 optimizer """adam""" +617 100 training_loop """owa""" +617 100 negative_sampler """basic""" +617 100 evaluator """rankbased""" +618 1 model.embedding_dim 1.0 +618 1 optimizer.lr 0.007467189399820741 +618 1 negative_sampler.num_negs_per_pos 86.0 +618 1 training.batch_size 0.0 +618 2 model.embedding_dim 1.0 +618 2 optimizer.lr 0.001805903148918661 +618 2 negative_sampler.num_negs_per_pos 9.0 +618 2 training.batch_size 0.0 +618 3 model.embedding_dim 1.0 +618 3 optimizer.lr 0.010414363728234904 +618 3 negative_sampler.num_negs_per_pos 42.0 +618 3 training.batch_size 1.0 +618 4 model.embedding_dim 1.0 +618 4 optimizer.lr 0.039723910443910784 +618 4 negative_sampler.num_negs_per_pos 13.0 +618 4 training.batch_size 2.0 +618 5 model.embedding_dim 0.0 +618 5 optimizer.lr 0.04672542396208189 +618 5 negative_sampler.num_negs_per_pos 28.0 +618 5 training.batch_size 1.0 +618 6 model.embedding_dim 1.0 +618 6 optimizer.lr 0.01825079418852248 +618 6 negative_sampler.num_negs_per_pos 89.0 +618 6 training.batch_size 2.0 +618 7 model.embedding_dim 0.0 +618 7 optimizer.lr 0.009276258437853582 +618 7 negative_sampler.num_negs_per_pos 1.0 +618 7 training.batch_size 1.0 +618 8 model.embedding_dim 1.0 +618 8 optimizer.lr 0.015891994789679332 +618 8 negative_sampler.num_negs_per_pos 86.0 +618 8 training.batch_size 1.0 +618 9 model.embedding_dim 0.0 +618 9 optimizer.lr 0.07425541165578317 +618 9 negative_sampler.num_negs_per_pos 67.0 +618 9 training.batch_size 2.0 +618 10 model.embedding_dim 2.0 +618 10 optimizer.lr 0.014280186738636206 +618 10 negative_sampler.num_negs_per_pos 60.0 +618 10 training.batch_size 0.0 +618 11 model.embedding_dim 2.0 +618 11 optimizer.lr 0.06446513604575176 +618 11 negative_sampler.num_negs_per_pos 21.0 +618 11 training.batch_size 0.0 +618 12 model.embedding_dim 2.0 +618 12 optimizer.lr 0.0010005574751902697 +618 12 negative_sampler.num_negs_per_pos 43.0 +618 12 training.batch_size 0.0 +618 13 model.embedding_dim 1.0 +618 13 optimizer.lr 0.005637341340705709 +618 13 negative_sampler.num_negs_per_pos 61.0 +618 13 training.batch_size 0.0 +618 14 model.embedding_dim 2.0 +618 14 optimizer.lr 0.0901017160031295 +618 14 negative_sampler.num_negs_per_pos 56.0 +618 14 training.batch_size 2.0 +618 15 model.embedding_dim 2.0 +618 15 optimizer.lr 0.008908746821673433 +618 15 negative_sampler.num_negs_per_pos 64.0 +618 15 training.batch_size 1.0 +618 16 model.embedding_dim 2.0 +618 16 optimizer.lr 0.05945034540064636 +618 16 negative_sampler.num_negs_per_pos 26.0 +618 16 training.batch_size 1.0 +618 17 model.embedding_dim 1.0 +618 17 optimizer.lr 0.013332814015247307 +618 17 negative_sampler.num_negs_per_pos 55.0 +618 17 training.batch_size 2.0 +618 18 model.embedding_dim 1.0 +618 18 optimizer.lr 0.0019150122449686131 +618 18 negative_sampler.num_negs_per_pos 18.0 +618 18 training.batch_size 0.0 +618 19 model.embedding_dim 2.0 +618 19 optimizer.lr 0.0017567198515532501 +618 19 negative_sampler.num_negs_per_pos 42.0 +618 19 training.batch_size 0.0 +618 20 model.embedding_dim 2.0 +618 20 optimizer.lr 0.0030384873892607927 +618 20 negative_sampler.num_negs_per_pos 81.0 +618 20 training.batch_size 0.0 +618 21 model.embedding_dim 1.0 +618 21 optimizer.lr 0.03557920418258907 +618 21 negative_sampler.num_negs_per_pos 92.0 +618 21 training.batch_size 0.0 +618 22 model.embedding_dim 0.0 +618 22 optimizer.lr 0.022586055645818778 +618 22 negative_sampler.num_negs_per_pos 30.0 +618 22 training.batch_size 1.0 +618 23 model.embedding_dim 2.0 +618 23 optimizer.lr 0.013987012076403921 +618 23 negative_sampler.num_negs_per_pos 89.0 +618 23 training.batch_size 1.0 +618 24 model.embedding_dim 2.0 +618 24 optimizer.lr 0.052650186264729344 +618 24 negative_sampler.num_negs_per_pos 50.0 +618 24 training.batch_size 1.0 +618 25 model.embedding_dim 1.0 +618 25 optimizer.lr 0.010354375955182743 +618 25 negative_sampler.num_negs_per_pos 21.0 +618 25 training.batch_size 0.0 +618 26 model.embedding_dim 2.0 +618 26 optimizer.lr 0.008518695636806224 +618 26 negative_sampler.num_negs_per_pos 7.0 +618 26 training.batch_size 1.0 +618 27 model.embedding_dim 1.0 +618 27 optimizer.lr 0.012568126186218795 +618 27 negative_sampler.num_negs_per_pos 46.0 +618 27 training.batch_size 1.0 +618 28 model.embedding_dim 2.0 +618 28 optimizer.lr 0.0305520266163346 +618 28 negative_sampler.num_negs_per_pos 79.0 +618 28 training.batch_size 0.0 +618 29 model.embedding_dim 0.0 +618 29 optimizer.lr 0.017170128219053955 +618 29 negative_sampler.num_negs_per_pos 49.0 +618 29 training.batch_size 0.0 +618 30 model.embedding_dim 2.0 +618 30 optimizer.lr 0.0010942502266134736 +618 30 negative_sampler.num_negs_per_pos 21.0 +618 30 training.batch_size 1.0 +618 31 model.embedding_dim 0.0 +618 31 optimizer.lr 0.004341829787977535 +618 31 negative_sampler.num_negs_per_pos 47.0 +618 31 training.batch_size 1.0 +618 32 model.embedding_dim 2.0 +618 32 optimizer.lr 0.015898475195347008 +618 32 negative_sampler.num_negs_per_pos 5.0 +618 32 training.batch_size 2.0 +618 33 model.embedding_dim 1.0 +618 33 optimizer.lr 0.001338706590258634 +618 33 negative_sampler.num_negs_per_pos 96.0 +618 33 training.batch_size 1.0 +618 34 model.embedding_dim 2.0 +618 34 optimizer.lr 0.01767943347942398 +618 34 negative_sampler.num_negs_per_pos 13.0 +618 34 training.batch_size 1.0 +618 35 model.embedding_dim 1.0 +618 35 optimizer.lr 0.022270970960831724 +618 35 negative_sampler.num_negs_per_pos 33.0 +618 35 training.batch_size 1.0 +618 36 model.embedding_dim 0.0 +618 36 optimizer.lr 0.0016738399850647937 +618 36 negative_sampler.num_negs_per_pos 39.0 +618 36 training.batch_size 1.0 +618 37 model.embedding_dim 0.0 +618 37 optimizer.lr 0.07557439667215812 +618 37 negative_sampler.num_negs_per_pos 43.0 +618 37 training.batch_size 2.0 +618 38 model.embedding_dim 0.0 +618 38 optimizer.lr 0.002150519099558048 +618 38 negative_sampler.num_negs_per_pos 77.0 +618 38 training.batch_size 2.0 +618 39 model.embedding_dim 2.0 +618 39 optimizer.lr 0.0013949820138412223 +618 39 negative_sampler.num_negs_per_pos 95.0 +618 39 training.batch_size 1.0 +618 40 model.embedding_dim 1.0 +618 40 optimizer.lr 0.00792985657294377 +618 40 negative_sampler.num_negs_per_pos 62.0 +618 40 training.batch_size 1.0 +618 41 model.embedding_dim 1.0 +618 41 optimizer.lr 0.009288538258100632 +618 41 negative_sampler.num_negs_per_pos 18.0 +618 41 training.batch_size 1.0 +618 42 model.embedding_dim 0.0 +618 42 optimizer.lr 0.01989181288364523 +618 42 negative_sampler.num_negs_per_pos 5.0 +618 42 training.batch_size 1.0 +618 43 model.embedding_dim 1.0 +618 43 optimizer.lr 0.006158915031555233 +618 43 negative_sampler.num_negs_per_pos 69.0 +618 43 training.batch_size 2.0 +618 44 model.embedding_dim 1.0 +618 44 optimizer.lr 0.0067230323525623095 +618 44 negative_sampler.num_negs_per_pos 20.0 +618 44 training.batch_size 2.0 +618 45 model.embedding_dim 2.0 +618 45 optimizer.lr 0.027299492553971464 +618 45 negative_sampler.num_negs_per_pos 16.0 +618 45 training.batch_size 0.0 +618 46 model.embedding_dim 0.0 +618 46 optimizer.lr 0.031522524719710523 +618 46 negative_sampler.num_negs_per_pos 56.0 +618 46 training.batch_size 1.0 +618 47 model.embedding_dim 1.0 +618 47 optimizer.lr 0.027436318481032795 +618 47 negative_sampler.num_negs_per_pos 48.0 +618 47 training.batch_size 2.0 +618 48 model.embedding_dim 2.0 +618 48 optimizer.lr 0.004926761572797001 +618 48 negative_sampler.num_negs_per_pos 71.0 +618 48 training.batch_size 1.0 +618 49 model.embedding_dim 2.0 +618 49 optimizer.lr 0.01742306605611191 +618 49 negative_sampler.num_negs_per_pos 79.0 +618 49 training.batch_size 0.0 +618 50 model.embedding_dim 1.0 +618 50 optimizer.lr 0.0013071389205534015 +618 50 negative_sampler.num_negs_per_pos 41.0 +618 50 training.batch_size 1.0 +618 51 model.embedding_dim 1.0 +618 51 optimizer.lr 0.0027474412217617936 +618 51 negative_sampler.num_negs_per_pos 23.0 +618 51 training.batch_size 1.0 +618 52 model.embedding_dim 1.0 +618 52 optimizer.lr 0.002244461667774423 +618 52 negative_sampler.num_negs_per_pos 70.0 +618 52 training.batch_size 2.0 +618 53 model.embedding_dim 0.0 +618 53 optimizer.lr 0.015619616054175806 +618 53 negative_sampler.num_negs_per_pos 45.0 +618 53 training.batch_size 0.0 +618 54 model.embedding_dim 2.0 +618 54 optimizer.lr 0.04417638884087484 +618 54 negative_sampler.num_negs_per_pos 63.0 +618 54 training.batch_size 1.0 +618 55 model.embedding_dim 2.0 +618 55 optimizer.lr 0.0015795381874900052 +618 55 negative_sampler.num_negs_per_pos 88.0 +618 55 training.batch_size 2.0 +618 56 model.embedding_dim 1.0 +618 56 optimizer.lr 0.05677177944662907 +618 56 negative_sampler.num_negs_per_pos 52.0 +618 56 training.batch_size 1.0 +618 57 model.embedding_dim 2.0 +618 57 optimizer.lr 0.004059499600051633 +618 57 negative_sampler.num_negs_per_pos 46.0 +618 57 training.batch_size 1.0 +618 58 model.embedding_dim 0.0 +618 58 optimizer.lr 0.02394315552124617 +618 58 negative_sampler.num_negs_per_pos 66.0 +618 58 training.batch_size 1.0 +618 59 model.embedding_dim 2.0 +618 59 optimizer.lr 0.008061200009838867 +618 59 negative_sampler.num_negs_per_pos 73.0 +618 59 training.batch_size 2.0 +618 60 model.embedding_dim 2.0 +618 60 optimizer.lr 0.0025335780974298228 +618 60 negative_sampler.num_negs_per_pos 45.0 +618 60 training.batch_size 2.0 +618 61 model.embedding_dim 0.0 +618 61 optimizer.lr 0.0035265076950135826 +618 61 negative_sampler.num_negs_per_pos 15.0 +618 61 training.batch_size 1.0 +618 62 model.embedding_dim 1.0 +618 62 optimizer.lr 0.049134781597535294 +618 62 negative_sampler.num_negs_per_pos 29.0 +618 62 training.batch_size 0.0 +618 63 model.embedding_dim 1.0 +618 63 optimizer.lr 0.09498126480989989 +618 63 negative_sampler.num_negs_per_pos 85.0 +618 63 training.batch_size 0.0 +618 64 model.embedding_dim 1.0 +618 64 optimizer.lr 0.01885082726565867 +618 64 negative_sampler.num_negs_per_pos 41.0 +618 64 training.batch_size 2.0 +618 65 model.embedding_dim 1.0 +618 65 optimizer.lr 0.007105308370855132 +618 65 negative_sampler.num_negs_per_pos 43.0 +618 65 training.batch_size 2.0 +618 66 model.embedding_dim 2.0 +618 66 optimizer.lr 0.006222489557855999 +618 66 negative_sampler.num_negs_per_pos 73.0 +618 66 training.batch_size 0.0 +618 67 model.embedding_dim 0.0 +618 67 optimizer.lr 0.06010403521189273 +618 67 negative_sampler.num_negs_per_pos 52.0 +618 67 training.batch_size 2.0 +618 68 model.embedding_dim 2.0 +618 68 optimizer.lr 0.04238129653622758 +618 68 negative_sampler.num_negs_per_pos 41.0 +618 68 training.batch_size 2.0 +618 69 model.embedding_dim 1.0 +618 69 optimizer.lr 0.05308066042415582 +618 69 negative_sampler.num_negs_per_pos 88.0 +618 69 training.batch_size 0.0 +618 70 model.embedding_dim 1.0 +618 70 optimizer.lr 0.008287542343273177 +618 70 negative_sampler.num_negs_per_pos 49.0 +618 70 training.batch_size 1.0 +618 71 model.embedding_dim 0.0 +618 71 optimizer.lr 0.028987700339282434 +618 71 negative_sampler.num_negs_per_pos 7.0 +618 71 training.batch_size 2.0 +618 72 model.embedding_dim 0.0 +618 72 optimizer.lr 0.04158487565494041 +618 72 negative_sampler.num_negs_per_pos 54.0 +618 72 training.batch_size 0.0 +618 73 model.embedding_dim 1.0 +618 73 optimizer.lr 0.023623778233423934 +618 73 negative_sampler.num_negs_per_pos 4.0 +618 73 training.batch_size 2.0 +618 74 model.embedding_dim 0.0 +618 74 optimizer.lr 0.055200973706114806 +618 74 negative_sampler.num_negs_per_pos 27.0 +618 74 training.batch_size 2.0 +618 75 model.embedding_dim 0.0 +618 75 optimizer.lr 0.05648077186180258 +618 75 negative_sampler.num_negs_per_pos 47.0 +618 75 training.batch_size 1.0 +618 76 model.embedding_dim 0.0 +618 76 optimizer.lr 0.03279275965592359 +618 76 negative_sampler.num_negs_per_pos 24.0 +618 76 training.batch_size 1.0 +618 77 model.embedding_dim 1.0 +618 77 optimizer.lr 0.002209237189729897 +618 77 negative_sampler.num_negs_per_pos 90.0 +618 77 training.batch_size 2.0 +618 78 model.embedding_dim 1.0 +618 78 optimizer.lr 0.0014466590334008677 +618 78 negative_sampler.num_negs_per_pos 90.0 +618 78 training.batch_size 1.0 +618 79 model.embedding_dim 1.0 +618 79 optimizer.lr 0.03460041180680395 +618 79 negative_sampler.num_negs_per_pos 11.0 +618 79 training.batch_size 0.0 +618 80 model.embedding_dim 1.0 +618 80 optimizer.lr 0.003505361500579456 +618 80 negative_sampler.num_negs_per_pos 7.0 +618 80 training.batch_size 2.0 +618 81 model.embedding_dim 2.0 +618 81 optimizer.lr 0.09994499403233002 +618 81 negative_sampler.num_negs_per_pos 6.0 +618 81 training.batch_size 0.0 +618 82 model.embedding_dim 1.0 +618 82 optimizer.lr 0.012342053132305216 +618 82 negative_sampler.num_negs_per_pos 78.0 +618 82 training.batch_size 2.0 +618 83 model.embedding_dim 0.0 +618 83 optimizer.lr 0.005369473251461393 +618 83 negative_sampler.num_negs_per_pos 43.0 +618 83 training.batch_size 1.0 +618 84 model.embedding_dim 2.0 +618 84 optimizer.lr 0.0034784280251521035 +618 84 negative_sampler.num_negs_per_pos 46.0 +618 84 training.batch_size 2.0 +618 85 model.embedding_dim 0.0 +618 85 optimizer.lr 0.030788715354281807 +618 85 negative_sampler.num_negs_per_pos 68.0 +618 85 training.batch_size 1.0 +618 86 model.embedding_dim 1.0 +618 86 optimizer.lr 0.010672228583026092 +618 86 negative_sampler.num_negs_per_pos 50.0 +618 86 training.batch_size 0.0 +618 87 model.embedding_dim 2.0 +618 87 optimizer.lr 0.044703496130515004 +618 87 negative_sampler.num_negs_per_pos 10.0 +618 87 training.batch_size 0.0 +618 88 model.embedding_dim 2.0 +618 88 optimizer.lr 0.0680149511426039 +618 88 negative_sampler.num_negs_per_pos 13.0 +618 88 training.batch_size 0.0 +618 89 model.embedding_dim 1.0 +618 89 optimizer.lr 0.07471339780025839 +618 89 negative_sampler.num_negs_per_pos 74.0 +618 89 training.batch_size 0.0 +618 90 model.embedding_dim 2.0 +618 90 optimizer.lr 0.001818998188380175 +618 90 negative_sampler.num_negs_per_pos 5.0 +618 90 training.batch_size 0.0 +618 91 model.embedding_dim 1.0 +618 91 optimizer.lr 0.0026318876868029004 +618 91 negative_sampler.num_negs_per_pos 40.0 +618 91 training.batch_size 0.0 +618 92 model.embedding_dim 2.0 +618 92 optimizer.lr 0.023517424049609315 +618 92 negative_sampler.num_negs_per_pos 43.0 +618 92 training.batch_size 0.0 +618 93 model.embedding_dim 2.0 +618 93 optimizer.lr 0.001546233587133641 +618 93 negative_sampler.num_negs_per_pos 93.0 +618 93 training.batch_size 1.0 +618 94 model.embedding_dim 2.0 +618 94 optimizer.lr 0.04824575528389415 +618 94 negative_sampler.num_negs_per_pos 26.0 +618 94 training.batch_size 1.0 +618 95 model.embedding_dim 1.0 +618 95 optimizer.lr 0.001279567435637654 +618 95 negative_sampler.num_negs_per_pos 46.0 +618 95 training.batch_size 1.0 +618 96 model.embedding_dim 1.0 +618 96 optimizer.lr 0.010508844030448317 +618 96 negative_sampler.num_negs_per_pos 0.0 +618 96 training.batch_size 2.0 +618 97 model.embedding_dim 0.0 +618 97 optimizer.lr 0.003550049194455027 +618 97 negative_sampler.num_negs_per_pos 22.0 +618 97 training.batch_size 1.0 +618 98 model.embedding_dim 1.0 +618 98 optimizer.lr 0.03134259457311082 +618 98 negative_sampler.num_negs_per_pos 82.0 +618 98 training.batch_size 2.0 +618 99 model.embedding_dim 0.0 +618 99 optimizer.lr 0.004097539886910091 +618 99 negative_sampler.num_negs_per_pos 73.0 +618 99 training.batch_size 1.0 +618 100 model.embedding_dim 0.0 +618 100 optimizer.lr 0.044552769021035415 +618 100 negative_sampler.num_negs_per_pos 51.0 +618 100 training.batch_size 0.0 +618 1 dataset """wn18rr""" +618 1 model """rotate""" +618 1 loss """softplus""" +618 1 regularizer """no""" +618 1 optimizer """adam""" +618 1 training_loop """owa""" +618 1 negative_sampler """basic""" +618 1 evaluator """rankbased""" +618 2 dataset """wn18rr""" +618 2 model """rotate""" +618 2 loss """softplus""" +618 2 regularizer """no""" +618 2 optimizer """adam""" +618 2 training_loop """owa""" +618 2 negative_sampler """basic""" +618 2 evaluator """rankbased""" +618 3 dataset """wn18rr""" +618 3 model """rotate""" +618 3 loss """softplus""" +618 3 regularizer """no""" +618 3 optimizer """adam""" +618 3 training_loop """owa""" +618 3 negative_sampler """basic""" +618 3 evaluator """rankbased""" +618 4 dataset """wn18rr""" +618 4 model """rotate""" +618 4 loss """softplus""" +618 4 regularizer """no""" +618 4 optimizer """adam""" +618 4 training_loop """owa""" +618 4 negative_sampler """basic""" +618 4 evaluator """rankbased""" +618 5 dataset """wn18rr""" +618 5 model """rotate""" +618 5 loss """softplus""" +618 5 regularizer """no""" +618 5 optimizer """adam""" +618 5 training_loop """owa""" +618 5 negative_sampler """basic""" +618 5 evaluator """rankbased""" +618 6 dataset """wn18rr""" +618 6 model """rotate""" +618 6 loss """softplus""" +618 6 regularizer """no""" +618 6 optimizer """adam""" +618 6 training_loop """owa""" +618 6 negative_sampler """basic""" +618 6 evaluator """rankbased""" +618 7 dataset """wn18rr""" +618 7 model """rotate""" +618 7 loss """softplus""" +618 7 regularizer """no""" +618 7 optimizer """adam""" +618 7 training_loop """owa""" +618 7 negative_sampler """basic""" +618 7 evaluator """rankbased""" +618 8 dataset """wn18rr""" +618 8 model """rotate""" +618 8 loss """softplus""" +618 8 regularizer """no""" +618 8 optimizer """adam""" +618 8 training_loop """owa""" +618 8 negative_sampler """basic""" +618 8 evaluator """rankbased""" +618 9 dataset """wn18rr""" +618 9 model """rotate""" +618 9 loss """softplus""" +618 9 regularizer """no""" +618 9 optimizer """adam""" +618 9 training_loop """owa""" +618 9 negative_sampler """basic""" +618 9 evaluator """rankbased""" +618 10 dataset """wn18rr""" +618 10 model """rotate""" +618 10 loss """softplus""" +618 10 regularizer """no""" +618 10 optimizer """adam""" +618 10 training_loop """owa""" +618 10 negative_sampler """basic""" +618 10 evaluator """rankbased""" +618 11 dataset """wn18rr""" +618 11 model """rotate""" +618 11 loss """softplus""" +618 11 regularizer """no""" +618 11 optimizer """adam""" +618 11 training_loop """owa""" +618 11 negative_sampler """basic""" +618 11 evaluator """rankbased""" +618 12 dataset """wn18rr""" +618 12 model """rotate""" +618 12 loss """softplus""" +618 12 regularizer """no""" +618 12 optimizer """adam""" +618 12 training_loop """owa""" +618 12 negative_sampler """basic""" +618 12 evaluator """rankbased""" +618 13 dataset """wn18rr""" +618 13 model """rotate""" +618 13 loss """softplus""" +618 13 regularizer """no""" +618 13 optimizer """adam""" +618 13 training_loop """owa""" +618 13 negative_sampler """basic""" +618 13 evaluator """rankbased""" +618 14 dataset """wn18rr""" +618 14 model """rotate""" +618 14 loss """softplus""" +618 14 regularizer """no""" +618 14 optimizer """adam""" +618 14 training_loop """owa""" +618 14 negative_sampler """basic""" +618 14 evaluator """rankbased""" +618 15 dataset """wn18rr""" +618 15 model """rotate""" +618 15 loss """softplus""" +618 15 regularizer """no""" +618 15 optimizer """adam""" +618 15 training_loop """owa""" +618 15 negative_sampler """basic""" +618 15 evaluator """rankbased""" +618 16 dataset """wn18rr""" +618 16 model """rotate""" +618 16 loss """softplus""" +618 16 regularizer """no""" +618 16 optimizer """adam""" +618 16 training_loop """owa""" +618 16 negative_sampler """basic""" +618 16 evaluator """rankbased""" +618 17 dataset """wn18rr""" +618 17 model """rotate""" +618 17 loss """softplus""" +618 17 regularizer """no""" +618 17 optimizer """adam""" +618 17 training_loop """owa""" +618 17 negative_sampler """basic""" +618 17 evaluator """rankbased""" +618 18 dataset """wn18rr""" +618 18 model """rotate""" +618 18 loss """softplus""" +618 18 regularizer """no""" +618 18 optimizer """adam""" +618 18 training_loop """owa""" +618 18 negative_sampler """basic""" +618 18 evaluator """rankbased""" +618 19 dataset """wn18rr""" +618 19 model """rotate""" +618 19 loss """softplus""" +618 19 regularizer """no""" +618 19 optimizer """adam""" +618 19 training_loop """owa""" +618 19 negative_sampler """basic""" +618 19 evaluator """rankbased""" +618 20 dataset """wn18rr""" +618 20 model """rotate""" +618 20 loss """softplus""" +618 20 regularizer """no""" +618 20 optimizer """adam""" +618 20 training_loop """owa""" +618 20 negative_sampler """basic""" +618 20 evaluator """rankbased""" +618 21 dataset """wn18rr""" +618 21 model """rotate""" +618 21 loss """softplus""" +618 21 regularizer """no""" +618 21 optimizer """adam""" +618 21 training_loop """owa""" +618 21 negative_sampler """basic""" +618 21 evaluator """rankbased""" +618 22 dataset """wn18rr""" +618 22 model """rotate""" +618 22 loss """softplus""" +618 22 regularizer """no""" +618 22 optimizer """adam""" +618 22 training_loop """owa""" +618 22 negative_sampler """basic""" +618 22 evaluator """rankbased""" +618 23 dataset """wn18rr""" +618 23 model """rotate""" +618 23 loss """softplus""" +618 23 regularizer """no""" +618 23 optimizer """adam""" +618 23 training_loop """owa""" +618 23 negative_sampler """basic""" +618 23 evaluator """rankbased""" +618 24 dataset """wn18rr""" +618 24 model """rotate""" +618 24 loss """softplus""" +618 24 regularizer """no""" +618 24 optimizer """adam""" +618 24 training_loop """owa""" +618 24 negative_sampler """basic""" +618 24 evaluator """rankbased""" +618 25 dataset """wn18rr""" +618 25 model """rotate""" +618 25 loss """softplus""" +618 25 regularizer """no""" +618 25 optimizer """adam""" +618 25 training_loop """owa""" +618 25 negative_sampler """basic""" +618 25 evaluator """rankbased""" +618 26 dataset """wn18rr""" +618 26 model """rotate""" +618 26 loss """softplus""" +618 26 regularizer """no""" +618 26 optimizer """adam""" +618 26 training_loop """owa""" +618 26 negative_sampler """basic""" +618 26 evaluator """rankbased""" +618 27 dataset """wn18rr""" +618 27 model """rotate""" +618 27 loss """softplus""" +618 27 regularizer """no""" +618 27 optimizer """adam""" +618 27 training_loop """owa""" +618 27 negative_sampler """basic""" +618 27 evaluator """rankbased""" +618 28 dataset """wn18rr""" +618 28 model """rotate""" +618 28 loss """softplus""" +618 28 regularizer """no""" +618 28 optimizer """adam""" +618 28 training_loop """owa""" +618 28 negative_sampler """basic""" +618 28 evaluator """rankbased""" +618 29 dataset """wn18rr""" +618 29 model """rotate""" +618 29 loss """softplus""" +618 29 regularizer """no""" +618 29 optimizer """adam""" +618 29 training_loop """owa""" +618 29 negative_sampler """basic""" +618 29 evaluator """rankbased""" +618 30 dataset """wn18rr""" +618 30 model """rotate""" +618 30 loss """softplus""" +618 30 regularizer """no""" +618 30 optimizer """adam""" +618 30 training_loop """owa""" +618 30 negative_sampler """basic""" +618 30 evaluator """rankbased""" +618 31 dataset """wn18rr""" +618 31 model """rotate""" +618 31 loss """softplus""" +618 31 regularizer """no""" +618 31 optimizer """adam""" +618 31 training_loop """owa""" +618 31 negative_sampler """basic""" +618 31 evaluator """rankbased""" +618 32 dataset """wn18rr""" +618 32 model """rotate""" +618 32 loss """softplus""" +618 32 regularizer """no""" +618 32 optimizer """adam""" +618 32 training_loop """owa""" +618 32 negative_sampler """basic""" +618 32 evaluator """rankbased""" +618 33 dataset """wn18rr""" +618 33 model """rotate""" +618 33 loss """softplus""" +618 33 regularizer """no""" +618 33 optimizer """adam""" +618 33 training_loop """owa""" +618 33 negative_sampler """basic""" +618 33 evaluator """rankbased""" +618 34 dataset """wn18rr""" +618 34 model """rotate""" +618 34 loss """softplus""" +618 34 regularizer """no""" +618 34 optimizer """adam""" +618 34 training_loop """owa""" +618 34 negative_sampler """basic""" +618 34 evaluator """rankbased""" +618 35 dataset """wn18rr""" +618 35 model """rotate""" +618 35 loss """softplus""" +618 35 regularizer """no""" +618 35 optimizer """adam""" +618 35 training_loop """owa""" +618 35 negative_sampler """basic""" +618 35 evaluator """rankbased""" +618 36 dataset """wn18rr""" +618 36 model """rotate""" +618 36 loss """softplus""" +618 36 regularizer """no""" +618 36 optimizer """adam""" +618 36 training_loop """owa""" +618 36 negative_sampler """basic""" +618 36 evaluator """rankbased""" +618 37 dataset """wn18rr""" +618 37 model """rotate""" +618 37 loss """softplus""" +618 37 regularizer """no""" +618 37 optimizer """adam""" +618 37 training_loop """owa""" +618 37 negative_sampler """basic""" +618 37 evaluator """rankbased""" +618 38 dataset """wn18rr""" +618 38 model """rotate""" +618 38 loss """softplus""" +618 38 regularizer """no""" +618 38 optimizer """adam""" +618 38 training_loop """owa""" +618 38 negative_sampler """basic""" +618 38 evaluator """rankbased""" +618 39 dataset """wn18rr""" +618 39 model """rotate""" +618 39 loss """softplus""" +618 39 regularizer """no""" +618 39 optimizer """adam""" +618 39 training_loop """owa""" +618 39 negative_sampler """basic""" +618 39 evaluator """rankbased""" +618 40 dataset """wn18rr""" +618 40 model """rotate""" +618 40 loss """softplus""" +618 40 regularizer """no""" +618 40 optimizer """adam""" +618 40 training_loop """owa""" +618 40 negative_sampler """basic""" +618 40 evaluator """rankbased""" +618 41 dataset """wn18rr""" +618 41 model """rotate""" +618 41 loss """softplus""" +618 41 regularizer """no""" +618 41 optimizer """adam""" +618 41 training_loop """owa""" +618 41 negative_sampler """basic""" +618 41 evaluator """rankbased""" +618 42 dataset """wn18rr""" +618 42 model """rotate""" +618 42 loss """softplus""" +618 42 regularizer """no""" +618 42 optimizer """adam""" +618 42 training_loop """owa""" +618 42 negative_sampler """basic""" +618 42 evaluator """rankbased""" +618 43 dataset """wn18rr""" +618 43 model """rotate""" +618 43 loss """softplus""" +618 43 regularizer """no""" +618 43 optimizer """adam""" +618 43 training_loop """owa""" +618 43 negative_sampler """basic""" +618 43 evaluator """rankbased""" +618 44 dataset """wn18rr""" +618 44 model """rotate""" +618 44 loss """softplus""" +618 44 regularizer """no""" +618 44 optimizer """adam""" +618 44 training_loop """owa""" +618 44 negative_sampler """basic""" +618 44 evaluator """rankbased""" +618 45 dataset """wn18rr""" +618 45 model """rotate""" +618 45 loss """softplus""" +618 45 regularizer """no""" +618 45 optimizer """adam""" +618 45 training_loop """owa""" +618 45 negative_sampler """basic""" +618 45 evaluator """rankbased""" +618 46 dataset """wn18rr""" +618 46 model """rotate""" +618 46 loss """softplus""" +618 46 regularizer """no""" +618 46 optimizer """adam""" +618 46 training_loop """owa""" +618 46 negative_sampler """basic""" +618 46 evaluator """rankbased""" +618 47 dataset """wn18rr""" +618 47 model """rotate""" +618 47 loss """softplus""" +618 47 regularizer """no""" +618 47 optimizer """adam""" +618 47 training_loop """owa""" +618 47 negative_sampler """basic""" +618 47 evaluator """rankbased""" +618 48 dataset """wn18rr""" +618 48 model """rotate""" +618 48 loss """softplus""" +618 48 regularizer """no""" +618 48 optimizer """adam""" +618 48 training_loop """owa""" +618 48 negative_sampler """basic""" +618 48 evaluator """rankbased""" +618 49 dataset """wn18rr""" +618 49 model """rotate""" +618 49 loss """softplus""" +618 49 regularizer """no""" +618 49 optimizer """adam""" +618 49 training_loop """owa""" +618 49 negative_sampler """basic""" +618 49 evaluator """rankbased""" +618 50 dataset """wn18rr""" +618 50 model """rotate""" +618 50 loss """softplus""" +618 50 regularizer """no""" +618 50 optimizer """adam""" +618 50 training_loop """owa""" +618 50 negative_sampler """basic""" +618 50 evaluator """rankbased""" +618 51 dataset """wn18rr""" +618 51 model """rotate""" +618 51 loss """softplus""" +618 51 regularizer """no""" +618 51 optimizer """adam""" +618 51 training_loop """owa""" +618 51 negative_sampler """basic""" +618 51 evaluator """rankbased""" +618 52 dataset """wn18rr""" +618 52 model """rotate""" +618 52 loss """softplus""" +618 52 regularizer """no""" +618 52 optimizer """adam""" +618 52 training_loop """owa""" +618 52 negative_sampler """basic""" +618 52 evaluator """rankbased""" +618 53 dataset """wn18rr""" +618 53 model """rotate""" +618 53 loss """softplus""" +618 53 regularizer """no""" +618 53 optimizer """adam""" +618 53 training_loop """owa""" +618 53 negative_sampler """basic""" +618 53 evaluator """rankbased""" +618 54 dataset """wn18rr""" +618 54 model """rotate""" +618 54 loss """softplus""" +618 54 regularizer """no""" +618 54 optimizer """adam""" +618 54 training_loop """owa""" +618 54 negative_sampler """basic""" +618 54 evaluator """rankbased""" +618 55 dataset """wn18rr""" +618 55 model """rotate""" +618 55 loss """softplus""" +618 55 regularizer """no""" +618 55 optimizer """adam""" +618 55 training_loop """owa""" +618 55 negative_sampler """basic""" +618 55 evaluator """rankbased""" +618 56 dataset """wn18rr""" +618 56 model """rotate""" +618 56 loss """softplus""" +618 56 regularizer """no""" +618 56 optimizer """adam""" +618 56 training_loop """owa""" +618 56 negative_sampler """basic""" +618 56 evaluator """rankbased""" +618 57 dataset """wn18rr""" +618 57 model """rotate""" +618 57 loss """softplus""" +618 57 regularizer """no""" +618 57 optimizer """adam""" +618 57 training_loop """owa""" +618 57 negative_sampler """basic""" +618 57 evaluator """rankbased""" +618 58 dataset """wn18rr""" +618 58 model """rotate""" +618 58 loss """softplus""" +618 58 regularizer """no""" +618 58 optimizer """adam""" +618 58 training_loop """owa""" +618 58 negative_sampler """basic""" +618 58 evaluator """rankbased""" +618 59 dataset """wn18rr""" +618 59 model """rotate""" +618 59 loss """softplus""" +618 59 regularizer """no""" +618 59 optimizer """adam""" +618 59 training_loop """owa""" +618 59 negative_sampler """basic""" +618 59 evaluator """rankbased""" +618 60 dataset """wn18rr""" +618 60 model """rotate""" +618 60 loss """softplus""" +618 60 regularizer """no""" +618 60 optimizer """adam""" +618 60 training_loop """owa""" +618 60 negative_sampler """basic""" +618 60 evaluator """rankbased""" +618 61 dataset """wn18rr""" +618 61 model """rotate""" +618 61 loss """softplus""" +618 61 regularizer """no""" +618 61 optimizer """adam""" +618 61 training_loop """owa""" +618 61 negative_sampler """basic""" +618 61 evaluator """rankbased""" +618 62 dataset """wn18rr""" +618 62 model """rotate""" +618 62 loss """softplus""" +618 62 regularizer """no""" +618 62 optimizer """adam""" +618 62 training_loop """owa""" +618 62 negative_sampler """basic""" +618 62 evaluator """rankbased""" +618 63 dataset """wn18rr""" +618 63 model """rotate""" +618 63 loss """softplus""" +618 63 regularizer """no""" +618 63 optimizer """adam""" +618 63 training_loop """owa""" +618 63 negative_sampler """basic""" +618 63 evaluator """rankbased""" +618 64 dataset """wn18rr""" +618 64 model """rotate""" +618 64 loss """softplus""" +618 64 regularizer """no""" +618 64 optimizer """adam""" +618 64 training_loop """owa""" +618 64 negative_sampler """basic""" +618 64 evaluator """rankbased""" +618 65 dataset """wn18rr""" +618 65 model """rotate""" +618 65 loss """softplus""" +618 65 regularizer """no""" +618 65 optimizer """adam""" +618 65 training_loop """owa""" +618 65 negative_sampler """basic""" +618 65 evaluator """rankbased""" +618 66 dataset """wn18rr""" +618 66 model """rotate""" +618 66 loss """softplus""" +618 66 regularizer """no""" +618 66 optimizer """adam""" +618 66 training_loop """owa""" +618 66 negative_sampler """basic""" +618 66 evaluator """rankbased""" +618 67 dataset """wn18rr""" +618 67 model """rotate""" +618 67 loss """softplus""" +618 67 regularizer """no""" +618 67 optimizer """adam""" +618 67 training_loop """owa""" +618 67 negative_sampler """basic""" +618 67 evaluator """rankbased""" +618 68 dataset """wn18rr""" +618 68 model """rotate""" +618 68 loss """softplus""" +618 68 regularizer """no""" +618 68 optimizer """adam""" +618 68 training_loop """owa""" +618 68 negative_sampler """basic""" +618 68 evaluator """rankbased""" +618 69 dataset """wn18rr""" +618 69 model """rotate""" +618 69 loss """softplus""" +618 69 regularizer """no""" +618 69 optimizer """adam""" +618 69 training_loop """owa""" +618 69 negative_sampler """basic""" +618 69 evaluator """rankbased""" +618 70 dataset """wn18rr""" +618 70 model """rotate""" +618 70 loss """softplus""" +618 70 regularizer """no""" +618 70 optimizer """adam""" +618 70 training_loop """owa""" +618 70 negative_sampler """basic""" +618 70 evaluator """rankbased""" +618 71 dataset """wn18rr""" +618 71 model """rotate""" +618 71 loss """softplus""" +618 71 regularizer """no""" +618 71 optimizer """adam""" +618 71 training_loop """owa""" +618 71 negative_sampler """basic""" +618 71 evaluator """rankbased""" +618 72 dataset """wn18rr""" +618 72 model """rotate""" +618 72 loss """softplus""" +618 72 regularizer """no""" +618 72 optimizer """adam""" +618 72 training_loop """owa""" +618 72 negative_sampler """basic""" +618 72 evaluator """rankbased""" +618 73 dataset """wn18rr""" +618 73 model """rotate""" +618 73 loss """softplus""" +618 73 regularizer """no""" +618 73 optimizer """adam""" +618 73 training_loop """owa""" +618 73 negative_sampler """basic""" +618 73 evaluator """rankbased""" +618 74 dataset """wn18rr""" +618 74 model """rotate""" +618 74 loss """softplus""" +618 74 regularizer """no""" +618 74 optimizer """adam""" +618 74 training_loop """owa""" +618 74 negative_sampler """basic""" +618 74 evaluator """rankbased""" +618 75 dataset """wn18rr""" +618 75 model """rotate""" +618 75 loss """softplus""" +618 75 regularizer """no""" +618 75 optimizer """adam""" +618 75 training_loop """owa""" +618 75 negative_sampler """basic""" +618 75 evaluator """rankbased""" +618 76 dataset """wn18rr""" +618 76 model """rotate""" +618 76 loss """softplus""" +618 76 regularizer """no""" +618 76 optimizer """adam""" +618 76 training_loop """owa""" +618 76 negative_sampler """basic""" +618 76 evaluator """rankbased""" +618 77 dataset """wn18rr""" +618 77 model """rotate""" +618 77 loss """softplus""" +618 77 regularizer """no""" +618 77 optimizer """adam""" +618 77 training_loop """owa""" +618 77 negative_sampler """basic""" +618 77 evaluator """rankbased""" +618 78 dataset """wn18rr""" +618 78 model """rotate""" +618 78 loss """softplus""" +618 78 regularizer """no""" +618 78 optimizer """adam""" +618 78 training_loop """owa""" +618 78 negative_sampler """basic""" +618 78 evaluator """rankbased""" +618 79 dataset """wn18rr""" +618 79 model """rotate""" +618 79 loss """softplus""" +618 79 regularizer """no""" +618 79 optimizer """adam""" +618 79 training_loop """owa""" +618 79 negative_sampler """basic""" +618 79 evaluator """rankbased""" +618 80 dataset """wn18rr""" +618 80 model """rotate""" +618 80 loss """softplus""" +618 80 regularizer """no""" +618 80 optimizer """adam""" +618 80 training_loop """owa""" +618 80 negative_sampler """basic""" +618 80 evaluator """rankbased""" +618 81 dataset """wn18rr""" +618 81 model """rotate""" +618 81 loss """softplus""" +618 81 regularizer """no""" +618 81 optimizer """adam""" +618 81 training_loop """owa""" +618 81 negative_sampler """basic""" +618 81 evaluator """rankbased""" +618 82 dataset """wn18rr""" +618 82 model """rotate""" +618 82 loss """softplus""" +618 82 regularizer """no""" +618 82 optimizer """adam""" +618 82 training_loop """owa""" +618 82 negative_sampler """basic""" +618 82 evaluator """rankbased""" +618 83 dataset """wn18rr""" +618 83 model """rotate""" +618 83 loss """softplus""" +618 83 regularizer """no""" +618 83 optimizer """adam""" +618 83 training_loop """owa""" +618 83 negative_sampler """basic""" +618 83 evaluator """rankbased""" +618 84 dataset """wn18rr""" +618 84 model """rotate""" +618 84 loss """softplus""" +618 84 regularizer """no""" +618 84 optimizer """adam""" +618 84 training_loop """owa""" +618 84 negative_sampler """basic""" +618 84 evaluator """rankbased""" +618 85 dataset """wn18rr""" +618 85 model """rotate""" +618 85 loss """softplus""" +618 85 regularizer """no""" +618 85 optimizer """adam""" +618 85 training_loop """owa""" +618 85 negative_sampler """basic""" +618 85 evaluator """rankbased""" +618 86 dataset """wn18rr""" +618 86 model """rotate""" +618 86 loss """softplus""" +618 86 regularizer """no""" +618 86 optimizer """adam""" +618 86 training_loop """owa""" +618 86 negative_sampler """basic""" +618 86 evaluator """rankbased""" +618 87 dataset """wn18rr""" +618 87 model """rotate""" +618 87 loss """softplus""" +618 87 regularizer """no""" +618 87 optimizer """adam""" +618 87 training_loop """owa""" +618 87 negative_sampler """basic""" +618 87 evaluator """rankbased""" +618 88 dataset """wn18rr""" +618 88 model """rotate""" +618 88 loss """softplus""" +618 88 regularizer """no""" +618 88 optimizer """adam""" +618 88 training_loop """owa""" +618 88 negative_sampler """basic""" +618 88 evaluator """rankbased""" +618 89 dataset """wn18rr""" +618 89 model """rotate""" +618 89 loss """softplus""" +618 89 regularizer """no""" +618 89 optimizer """adam""" +618 89 training_loop """owa""" +618 89 negative_sampler """basic""" +618 89 evaluator """rankbased""" +618 90 dataset """wn18rr""" +618 90 model """rotate""" +618 90 loss """softplus""" +618 90 regularizer """no""" +618 90 optimizer """adam""" +618 90 training_loop """owa""" +618 90 negative_sampler """basic""" +618 90 evaluator """rankbased""" +618 91 dataset """wn18rr""" +618 91 model """rotate""" +618 91 loss """softplus""" +618 91 regularizer """no""" +618 91 optimizer """adam""" +618 91 training_loop """owa""" +618 91 negative_sampler """basic""" +618 91 evaluator """rankbased""" +618 92 dataset """wn18rr""" +618 92 model """rotate""" +618 92 loss """softplus""" +618 92 regularizer """no""" +618 92 optimizer """adam""" +618 92 training_loop """owa""" +618 92 negative_sampler """basic""" +618 92 evaluator """rankbased""" +618 93 dataset """wn18rr""" +618 93 model """rotate""" +618 93 loss """softplus""" +618 93 regularizer """no""" +618 93 optimizer """adam""" +618 93 training_loop """owa""" +618 93 negative_sampler """basic""" +618 93 evaluator """rankbased""" +618 94 dataset """wn18rr""" +618 94 model """rotate""" +618 94 loss """softplus""" +618 94 regularizer """no""" +618 94 optimizer """adam""" +618 94 training_loop """owa""" +618 94 negative_sampler """basic""" +618 94 evaluator """rankbased""" +618 95 dataset """wn18rr""" +618 95 model """rotate""" +618 95 loss """softplus""" +618 95 regularizer """no""" +618 95 optimizer """adam""" +618 95 training_loop """owa""" +618 95 negative_sampler """basic""" +618 95 evaluator """rankbased""" +618 96 dataset """wn18rr""" +618 96 model """rotate""" +618 96 loss """softplus""" +618 96 regularizer """no""" +618 96 optimizer """adam""" +618 96 training_loop """owa""" +618 96 negative_sampler """basic""" +618 96 evaluator """rankbased""" +618 97 dataset """wn18rr""" +618 97 model """rotate""" +618 97 loss """softplus""" +618 97 regularizer """no""" +618 97 optimizer """adam""" +618 97 training_loop """owa""" +618 97 negative_sampler """basic""" +618 97 evaluator """rankbased""" +618 98 dataset """wn18rr""" +618 98 model """rotate""" +618 98 loss """softplus""" +618 98 regularizer """no""" +618 98 optimizer """adam""" +618 98 training_loop """owa""" +618 98 negative_sampler """basic""" +618 98 evaluator """rankbased""" +618 99 dataset """wn18rr""" +618 99 model """rotate""" +618 99 loss """softplus""" +618 99 regularizer """no""" +618 99 optimizer """adam""" +618 99 training_loop """owa""" +618 99 negative_sampler """basic""" +618 99 evaluator """rankbased""" +618 100 dataset """wn18rr""" +618 100 model """rotate""" +618 100 loss """softplus""" +618 100 regularizer """no""" +618 100 optimizer """adam""" +618 100 training_loop """owa""" +618 100 negative_sampler """basic""" +618 100 evaluator """rankbased""" +619 1 model.embedding_dim 2.0 +619 1 optimizer.lr 0.0037503316588540945 +619 1 training.batch_size 0.0 +619 1 training.label_smoothing 0.04515571520081703 +619 2 model.embedding_dim 1.0 +619 2 optimizer.lr 0.002228661942271698 +619 2 training.batch_size 2.0 +619 2 training.label_smoothing 0.002874016599257961 +619 3 model.embedding_dim 1.0 +619 3 optimizer.lr 0.0588336689711427 +619 3 training.batch_size 0.0 +619 3 training.label_smoothing 0.6353376280161559 +619 1 dataset """wn18rr""" +619 1 model """rotate""" +619 1 loss """crossentropy""" +619 1 regularizer """no""" +619 1 optimizer """adam""" +619 1 training_loop """lcwa""" +619 1 evaluator """rankbased""" +619 2 dataset """wn18rr""" +619 2 model """rotate""" +619 2 loss """crossentropy""" +619 2 regularizer """no""" +619 2 optimizer """adam""" +619 2 training_loop """lcwa""" +619 2 evaluator """rankbased""" +619 3 dataset """wn18rr""" +619 3 model """rotate""" +619 3 loss """crossentropy""" +619 3 regularizer """no""" +619 3 optimizer """adam""" +619 3 training_loop """lcwa""" +619 3 evaluator """rankbased""" +620 1 model.embedding_dim 2.0 +620 1 optimizer.lr 0.04431076004388567 +620 1 training.batch_size 2.0 +620 1 training.label_smoothing 0.7110322153102321 +620 2 model.embedding_dim 2.0 +620 2 optimizer.lr 0.003181366569391964 +620 2 training.batch_size 1.0 +620 2 training.label_smoothing 0.003830639376353417 +620 3 model.embedding_dim 0.0 +620 3 optimizer.lr 0.020541452417937294 +620 3 training.batch_size 1.0 +620 3 training.label_smoothing 0.8171202561066221 +620 4 model.embedding_dim 2.0 +620 4 optimizer.lr 0.007175256212037824 +620 4 training.batch_size 0.0 +620 4 training.label_smoothing 0.0026829301378640874 +620 1 dataset """wn18rr""" +620 1 model """rotate""" +620 1 loss """crossentropy""" +620 1 regularizer """no""" +620 1 optimizer """adam""" +620 1 training_loop """lcwa""" +620 1 evaluator """rankbased""" +620 2 dataset """wn18rr""" +620 2 model """rotate""" +620 2 loss """crossentropy""" +620 2 regularizer """no""" +620 2 optimizer """adam""" +620 2 training_loop """lcwa""" +620 2 evaluator """rankbased""" +620 3 dataset """wn18rr""" +620 3 model """rotate""" +620 3 loss """crossentropy""" +620 3 regularizer """no""" +620 3 optimizer """adam""" +620 3 training_loop """lcwa""" +620 3 evaluator """rankbased""" +620 4 dataset """wn18rr""" +620 4 model """rotate""" +620 4 loss """crossentropy""" +620 4 regularizer """no""" +620 4 optimizer """adam""" +620 4 training_loop """lcwa""" +620 4 evaluator """rankbased""" +621 1 model.embedding_dim 0.0 +621 1 optimizer.lr 0.0038582846336970645 +621 1 training.batch_size 2.0 +621 1 training.label_smoothing 0.0058044998846670595 +621 2 model.embedding_dim 2.0 +621 2 optimizer.lr 0.05237290503478834 +621 2 training.batch_size 1.0 +621 2 training.label_smoothing 0.29478202223252914 +621 1 dataset """wn18rr""" +621 1 model """rotate""" +621 1 loss """softplus""" +621 1 regularizer """no""" +621 1 optimizer """adam""" +621 1 training_loop """lcwa""" +621 1 evaluator """rankbased""" +621 2 dataset """wn18rr""" +621 2 model """rotate""" +621 2 loss """softplus""" +621 2 regularizer """no""" +621 2 optimizer """adam""" +621 2 training_loop """lcwa""" +621 2 evaluator """rankbased""" +622 1 model.embedding_dim 2.0 +622 1 optimizer.lr 0.014918252827724873 +622 1 training.batch_size 0.0 +622 1 training.label_smoothing 0.003264060301869185 +622 2 model.embedding_dim 0.0 +622 2 optimizer.lr 0.0013172974162833943 +622 2 training.batch_size 0.0 +622 2 training.label_smoothing 0.06537928695732385 +622 3 model.embedding_dim 0.0 +622 3 optimizer.lr 0.008605059831759647 +622 3 training.batch_size 2.0 +622 3 training.label_smoothing 0.0024984350735995984 +622 1 dataset """wn18rr""" +622 1 model """rotate""" +622 1 loss """softplus""" +622 1 regularizer """no""" +622 1 optimizer """adam""" +622 1 training_loop """lcwa""" +622 1 evaluator """rankbased""" +622 2 dataset """wn18rr""" +622 2 model """rotate""" +622 2 loss """softplus""" +622 2 regularizer """no""" +622 2 optimizer """adam""" +622 2 training_loop """lcwa""" +622 2 evaluator """rankbased""" +622 3 dataset """wn18rr""" +622 3 model """rotate""" +622 3 loss """softplus""" +622 3 regularizer """no""" +622 3 optimizer """adam""" +622 3 training_loop """lcwa""" +622 3 evaluator """rankbased""" +623 1 model.embedding_dim 0.0 +623 1 optimizer.lr 0.08203012046078598 +623 1 training.batch_size 2.0 +623 1 training.label_smoothing 0.3794472193291517 +623 2 model.embedding_dim 1.0 +623 2 optimizer.lr 0.08370236937114096 +623 2 training.batch_size 2.0 +623 2 training.label_smoothing 0.0015464348661747948 +623 3 model.embedding_dim 1.0 +623 3 optimizer.lr 0.005483041030953888 +623 3 training.batch_size 0.0 +623 3 training.label_smoothing 0.036907760870648815 +623 1 dataset """wn18rr""" +623 1 model """rotate""" +623 1 loss """bceaftersigmoid""" +623 1 regularizer """no""" +623 1 optimizer """adam""" +623 1 training_loop """lcwa""" +623 1 evaluator """rankbased""" +623 2 dataset """wn18rr""" +623 2 model """rotate""" +623 2 loss """bceaftersigmoid""" +623 2 regularizer """no""" +623 2 optimizer """adam""" +623 2 training_loop """lcwa""" +623 2 evaluator """rankbased""" +623 3 dataset """wn18rr""" +623 3 model """rotate""" +623 3 loss """bceaftersigmoid""" +623 3 regularizer """no""" +623 3 optimizer """adam""" +623 3 training_loop """lcwa""" +623 3 evaluator """rankbased""" +624 1 model.embedding_dim 2.0 +624 1 optimizer.lr 0.006214965781049104 +624 1 training.batch_size 0.0 +624 1 training.label_smoothing 0.004860609555715287 +624 2 model.embedding_dim 2.0 +624 2 optimizer.lr 0.003475504156957737 +624 2 training.batch_size 0.0 +624 2 training.label_smoothing 0.7127762205758156 +624 1 dataset """wn18rr""" +624 1 model """rotate""" +624 1 loss """bceaftersigmoid""" +624 1 regularizer """no""" +624 1 optimizer """adam""" +624 1 training_loop """lcwa""" +624 1 evaluator """rankbased""" +624 2 dataset """wn18rr""" +624 2 model """rotate""" +624 2 loss """bceaftersigmoid""" +624 2 regularizer """no""" +624 2 optimizer """adam""" +624 2 training_loop """lcwa""" +624 2 evaluator """rankbased""" +625 1 model.embedding_dim 2.0 +625 1 optimizer.lr 0.01492608796148974 +625 1 negative_sampler.num_negs_per_pos 45.0 +625 1 training.batch_size 0.0 +625 2 model.embedding_dim 1.0 +625 2 optimizer.lr 0.02863028675842462 +625 2 negative_sampler.num_negs_per_pos 49.0 +625 2 training.batch_size 2.0 +625 3 model.embedding_dim 1.0 +625 3 optimizer.lr 0.0010764786608976522 +625 3 negative_sampler.num_negs_per_pos 40.0 +625 3 training.batch_size 2.0 +625 4 model.embedding_dim 2.0 +625 4 optimizer.lr 0.002953846233688189 +625 4 negative_sampler.num_negs_per_pos 28.0 +625 4 training.batch_size 0.0 +625 5 model.embedding_dim 1.0 +625 5 optimizer.lr 0.002468508231049954 +625 5 negative_sampler.num_negs_per_pos 38.0 +625 5 training.batch_size 3.0 +625 6 model.embedding_dim 0.0 +625 6 optimizer.lr 0.004353692784858383 +625 6 negative_sampler.num_negs_per_pos 9.0 +625 6 training.batch_size 2.0 +625 7 model.embedding_dim 1.0 +625 7 optimizer.lr 0.009052215438986334 +625 7 negative_sampler.num_negs_per_pos 34.0 +625 7 training.batch_size 0.0 +625 8 model.embedding_dim 2.0 +625 8 optimizer.lr 0.07160091864489201 +625 8 negative_sampler.num_negs_per_pos 0.0 +625 8 training.batch_size 3.0 +625 9 model.embedding_dim 1.0 +625 9 optimizer.lr 0.03308680510117914 +625 9 negative_sampler.num_negs_per_pos 13.0 +625 9 training.batch_size 1.0 +625 10 model.embedding_dim 2.0 +625 10 optimizer.lr 0.002422585576041489 +625 10 negative_sampler.num_negs_per_pos 45.0 +625 10 training.batch_size 3.0 +625 11 model.embedding_dim 2.0 +625 11 optimizer.lr 0.02179567154447765 +625 11 negative_sampler.num_negs_per_pos 11.0 +625 11 training.batch_size 1.0 +625 12 model.embedding_dim 2.0 +625 12 optimizer.lr 0.04864322542527604 +625 12 negative_sampler.num_negs_per_pos 25.0 +625 12 training.batch_size 1.0 +625 13 model.embedding_dim 2.0 +625 13 optimizer.lr 0.0012068886612470635 +625 13 negative_sampler.num_negs_per_pos 45.0 +625 13 training.batch_size 3.0 +625 14 model.embedding_dim 2.0 +625 14 optimizer.lr 0.00732764882723163 +625 14 negative_sampler.num_negs_per_pos 11.0 +625 14 training.batch_size 1.0 +625 15 model.embedding_dim 2.0 +625 15 optimizer.lr 0.006388526296084662 +625 15 negative_sampler.num_negs_per_pos 26.0 +625 15 training.batch_size 3.0 +625 16 model.embedding_dim 2.0 +625 16 optimizer.lr 0.0699367598499288 +625 16 negative_sampler.num_negs_per_pos 1.0 +625 16 training.batch_size 3.0 +625 1 dataset """yago310""" +625 1 model """rotate""" +625 1 loss """bceaftersigmoid""" +625 1 regularizer """no""" +625 1 optimizer """adam""" +625 1 training_loop """owa""" +625 1 negative_sampler """basic""" +625 1 evaluator """rankbased""" +625 2 dataset """yago310""" +625 2 model """rotate""" +625 2 loss """bceaftersigmoid""" +625 2 regularizer """no""" +625 2 optimizer """adam""" +625 2 training_loop """owa""" +625 2 negative_sampler """basic""" +625 2 evaluator """rankbased""" +625 3 dataset """yago310""" +625 3 model """rotate""" +625 3 loss """bceaftersigmoid""" +625 3 regularizer """no""" +625 3 optimizer """adam""" +625 3 training_loop """owa""" +625 3 negative_sampler """basic""" +625 3 evaluator """rankbased""" +625 4 dataset """yago310""" +625 4 model """rotate""" +625 4 loss """bceaftersigmoid""" +625 4 regularizer """no""" +625 4 optimizer """adam""" +625 4 training_loop """owa""" +625 4 negative_sampler """basic""" +625 4 evaluator """rankbased""" +625 5 dataset """yago310""" +625 5 model """rotate""" +625 5 loss """bceaftersigmoid""" +625 5 regularizer """no""" +625 5 optimizer """adam""" +625 5 training_loop """owa""" +625 5 negative_sampler """basic""" +625 5 evaluator """rankbased""" +625 6 dataset """yago310""" +625 6 model """rotate""" +625 6 loss """bceaftersigmoid""" +625 6 regularizer """no""" +625 6 optimizer """adam""" +625 6 training_loop """owa""" +625 6 negative_sampler """basic""" +625 6 evaluator """rankbased""" +625 7 dataset """yago310""" +625 7 model """rotate""" +625 7 loss """bceaftersigmoid""" +625 7 regularizer """no""" +625 7 optimizer """adam""" +625 7 training_loop """owa""" +625 7 negative_sampler """basic""" +625 7 evaluator """rankbased""" +625 8 dataset """yago310""" +625 8 model """rotate""" +625 8 loss """bceaftersigmoid""" +625 8 regularizer """no""" +625 8 optimizer """adam""" +625 8 training_loop """owa""" +625 8 negative_sampler """basic""" +625 8 evaluator """rankbased""" +625 9 dataset """yago310""" +625 9 model """rotate""" +625 9 loss """bceaftersigmoid""" +625 9 regularizer """no""" +625 9 optimizer """adam""" +625 9 training_loop """owa""" +625 9 negative_sampler """basic""" +625 9 evaluator """rankbased""" +625 10 dataset """yago310""" +625 10 model """rotate""" +625 10 loss """bceaftersigmoid""" +625 10 regularizer """no""" +625 10 optimizer """adam""" +625 10 training_loop """owa""" +625 10 negative_sampler """basic""" +625 10 evaluator """rankbased""" +625 11 dataset """yago310""" +625 11 model """rotate""" +625 11 loss """bceaftersigmoid""" +625 11 regularizer """no""" +625 11 optimizer """adam""" +625 11 training_loop """owa""" +625 11 negative_sampler """basic""" +625 11 evaluator """rankbased""" +625 12 dataset """yago310""" +625 12 model """rotate""" +625 12 loss """bceaftersigmoid""" +625 12 regularizer """no""" +625 12 optimizer """adam""" +625 12 training_loop """owa""" +625 12 negative_sampler """basic""" +625 12 evaluator """rankbased""" +625 13 dataset """yago310""" +625 13 model """rotate""" +625 13 loss """bceaftersigmoid""" +625 13 regularizer """no""" +625 13 optimizer """adam""" +625 13 training_loop """owa""" +625 13 negative_sampler """basic""" +625 13 evaluator """rankbased""" +625 14 dataset """yago310""" +625 14 model """rotate""" +625 14 loss """bceaftersigmoid""" +625 14 regularizer """no""" +625 14 optimizer """adam""" +625 14 training_loop """owa""" +625 14 negative_sampler """basic""" +625 14 evaluator """rankbased""" +625 15 dataset """yago310""" +625 15 model """rotate""" +625 15 loss """bceaftersigmoid""" +625 15 regularizer """no""" +625 15 optimizer """adam""" +625 15 training_loop """owa""" +625 15 negative_sampler """basic""" +625 15 evaluator """rankbased""" +625 16 dataset """yago310""" +625 16 model """rotate""" +625 16 loss """bceaftersigmoid""" +625 16 regularizer """no""" +625 16 optimizer """adam""" +625 16 training_loop """owa""" +625 16 negative_sampler """basic""" +625 16 evaluator """rankbased""" +626 1 model.embedding_dim 1.0 +626 1 optimizer.lr 0.005914905939252485 +626 1 negative_sampler.num_negs_per_pos 41.0 +626 1 training.batch_size 3.0 +626 2 model.embedding_dim 2.0 +626 2 optimizer.lr 0.021996020051748055 +626 2 negative_sampler.num_negs_per_pos 34.0 +626 2 training.batch_size 0.0 +626 3 model.embedding_dim 1.0 +626 3 optimizer.lr 0.050793765236027924 +626 3 negative_sampler.num_negs_per_pos 13.0 +626 3 training.batch_size 3.0 +626 4 model.embedding_dim 1.0 +626 4 optimizer.lr 0.017367199138394226 +626 4 negative_sampler.num_negs_per_pos 34.0 +626 4 training.batch_size 2.0 +626 5 model.embedding_dim 2.0 +626 5 optimizer.lr 0.018843466795977842 +626 5 negative_sampler.num_negs_per_pos 35.0 +626 5 training.batch_size 3.0 +626 6 model.embedding_dim 1.0 +626 6 optimizer.lr 0.07994548663816166 +626 6 negative_sampler.num_negs_per_pos 1.0 +626 6 training.batch_size 1.0 +626 7 model.embedding_dim 1.0 +626 7 optimizer.lr 0.0013563075989450617 +626 7 negative_sampler.num_negs_per_pos 26.0 +626 7 training.batch_size 1.0 +626 8 model.embedding_dim 2.0 +626 8 optimizer.lr 0.03723309517695056 +626 8 negative_sampler.num_negs_per_pos 30.0 +626 8 training.batch_size 1.0 +626 9 model.embedding_dim 1.0 +626 9 optimizer.lr 0.030164607464555356 +626 9 negative_sampler.num_negs_per_pos 43.0 +626 9 training.batch_size 0.0 +626 10 model.embedding_dim 0.0 +626 10 optimizer.lr 0.014018619797567945 +626 10 negative_sampler.num_negs_per_pos 22.0 +626 10 training.batch_size 2.0 +626 11 model.embedding_dim 1.0 +626 11 optimizer.lr 0.053851669618692255 +626 11 negative_sampler.num_negs_per_pos 16.0 +626 11 training.batch_size 2.0 +626 12 model.embedding_dim 0.0 +626 12 optimizer.lr 0.047484377940637645 +626 12 negative_sampler.num_negs_per_pos 28.0 +626 12 training.batch_size 1.0 +626 13 model.embedding_dim 2.0 +626 13 optimizer.lr 0.06140631297182312 +626 13 negative_sampler.num_negs_per_pos 48.0 +626 13 training.batch_size 1.0 +626 14 model.embedding_dim 1.0 +626 14 optimizer.lr 0.001773592626700073 +626 14 negative_sampler.num_negs_per_pos 17.0 +626 14 training.batch_size 1.0 +626 15 model.embedding_dim 0.0 +626 15 optimizer.lr 0.060120549095919885 +626 15 negative_sampler.num_negs_per_pos 20.0 +626 15 training.batch_size 0.0 +626 16 model.embedding_dim 2.0 +626 16 optimizer.lr 0.08847425845708838 +626 16 negative_sampler.num_negs_per_pos 11.0 +626 16 training.batch_size 2.0 +626 17 model.embedding_dim 2.0 +626 17 optimizer.lr 0.046288352541946624 +626 17 negative_sampler.num_negs_per_pos 13.0 +626 17 training.batch_size 0.0 +626 18 model.embedding_dim 1.0 +626 18 optimizer.lr 0.002214477835675409 +626 18 negative_sampler.num_negs_per_pos 23.0 +626 18 training.batch_size 3.0 +626 19 model.embedding_dim 1.0 +626 19 optimizer.lr 0.03543466850137875 +626 19 negative_sampler.num_negs_per_pos 12.0 +626 19 training.batch_size 1.0 +626 20 model.embedding_dim 1.0 +626 20 optimizer.lr 0.03709928287338947 +626 20 negative_sampler.num_negs_per_pos 4.0 +626 20 training.batch_size 2.0 +626 21 model.embedding_dim 2.0 +626 21 optimizer.lr 0.00972305299796057 +626 21 negative_sampler.num_negs_per_pos 22.0 +626 21 training.batch_size 2.0 +626 22 model.embedding_dim 0.0 +626 22 optimizer.lr 0.033480428794323225 +626 22 negative_sampler.num_negs_per_pos 43.0 +626 22 training.batch_size 1.0 +626 23 model.embedding_dim 2.0 +626 23 optimizer.lr 0.05941603306827581 +626 23 negative_sampler.num_negs_per_pos 7.0 +626 23 training.batch_size 0.0 +626 24 model.embedding_dim 2.0 +626 24 optimizer.lr 0.0039155574582712075 +626 24 negative_sampler.num_negs_per_pos 31.0 +626 24 training.batch_size 3.0 +626 25 model.embedding_dim 2.0 +626 25 optimizer.lr 0.018334318684017498 +626 25 negative_sampler.num_negs_per_pos 19.0 +626 25 training.batch_size 1.0 +626 26 model.embedding_dim 0.0 +626 26 optimizer.lr 0.07162712044562644 +626 26 negative_sampler.num_negs_per_pos 32.0 +626 26 training.batch_size 3.0 +626 27 model.embedding_dim 1.0 +626 27 optimizer.lr 0.0024608761876857787 +626 27 negative_sampler.num_negs_per_pos 21.0 +626 27 training.batch_size 1.0 +626 28 model.embedding_dim 0.0 +626 28 optimizer.lr 0.010254941415329592 +626 28 negative_sampler.num_negs_per_pos 15.0 +626 28 training.batch_size 2.0 +626 29 model.embedding_dim 1.0 +626 29 optimizer.lr 0.025868416232871654 +626 29 negative_sampler.num_negs_per_pos 18.0 +626 29 training.batch_size 0.0 +626 30 model.embedding_dim 1.0 +626 30 optimizer.lr 0.004652385220780043 +626 30 negative_sampler.num_negs_per_pos 18.0 +626 30 training.batch_size 2.0 +626 31 model.embedding_dim 2.0 +626 31 optimizer.lr 0.00799500407926031 +626 31 negative_sampler.num_negs_per_pos 37.0 +626 31 training.batch_size 1.0 +626 32 model.embedding_dim 2.0 +626 32 optimizer.lr 0.001631562498927153 +626 32 negative_sampler.num_negs_per_pos 5.0 +626 32 training.batch_size 3.0 +626 33 model.embedding_dim 2.0 +626 33 optimizer.lr 0.03784268732716352 +626 33 negative_sampler.num_negs_per_pos 1.0 +626 33 training.batch_size 1.0 +626 34 model.embedding_dim 0.0 +626 34 optimizer.lr 0.023536131563163594 +626 34 negative_sampler.num_negs_per_pos 24.0 +626 34 training.batch_size 1.0 +626 35 model.embedding_dim 2.0 +626 35 optimizer.lr 0.026581556047257027 +626 35 negative_sampler.num_negs_per_pos 12.0 +626 35 training.batch_size 1.0 +626 1 dataset """yago310""" +626 1 model """rotate""" +626 1 loss """bceaftersigmoid""" +626 1 regularizer """no""" +626 1 optimizer """adam""" +626 1 training_loop """owa""" +626 1 negative_sampler """basic""" +626 1 evaluator """rankbased""" +626 2 dataset """yago310""" +626 2 model """rotate""" +626 2 loss """bceaftersigmoid""" +626 2 regularizer """no""" +626 2 optimizer """adam""" +626 2 training_loop """owa""" +626 2 negative_sampler """basic""" +626 2 evaluator """rankbased""" +626 3 dataset """yago310""" +626 3 model """rotate""" +626 3 loss """bceaftersigmoid""" +626 3 regularizer """no""" +626 3 optimizer """adam""" +626 3 training_loop """owa""" +626 3 negative_sampler """basic""" +626 3 evaluator """rankbased""" +626 4 dataset """yago310""" +626 4 model """rotate""" +626 4 loss """bceaftersigmoid""" +626 4 regularizer """no""" +626 4 optimizer """adam""" +626 4 training_loop """owa""" +626 4 negative_sampler """basic""" +626 4 evaluator """rankbased""" +626 5 dataset """yago310""" +626 5 model """rotate""" +626 5 loss """bceaftersigmoid""" +626 5 regularizer """no""" +626 5 optimizer """adam""" +626 5 training_loop """owa""" +626 5 negative_sampler """basic""" +626 5 evaluator """rankbased""" +626 6 dataset """yago310""" +626 6 model """rotate""" +626 6 loss """bceaftersigmoid""" +626 6 regularizer """no""" +626 6 optimizer """adam""" +626 6 training_loop """owa""" +626 6 negative_sampler """basic""" +626 6 evaluator """rankbased""" +626 7 dataset """yago310""" +626 7 model """rotate""" +626 7 loss """bceaftersigmoid""" +626 7 regularizer """no""" +626 7 optimizer """adam""" +626 7 training_loop """owa""" +626 7 negative_sampler """basic""" +626 7 evaluator """rankbased""" +626 8 dataset """yago310""" +626 8 model """rotate""" +626 8 loss """bceaftersigmoid""" +626 8 regularizer """no""" +626 8 optimizer """adam""" +626 8 training_loop """owa""" +626 8 negative_sampler """basic""" +626 8 evaluator """rankbased""" +626 9 dataset """yago310""" +626 9 model """rotate""" +626 9 loss """bceaftersigmoid""" +626 9 regularizer """no""" +626 9 optimizer """adam""" +626 9 training_loop """owa""" +626 9 negative_sampler """basic""" +626 9 evaluator """rankbased""" +626 10 dataset """yago310""" +626 10 model """rotate""" +626 10 loss """bceaftersigmoid""" +626 10 regularizer """no""" +626 10 optimizer """adam""" +626 10 training_loop """owa""" +626 10 negative_sampler """basic""" +626 10 evaluator """rankbased""" +626 11 dataset """yago310""" +626 11 model """rotate""" +626 11 loss """bceaftersigmoid""" +626 11 regularizer """no""" +626 11 optimizer """adam""" +626 11 training_loop """owa""" +626 11 negative_sampler """basic""" +626 11 evaluator """rankbased""" +626 12 dataset """yago310""" +626 12 model """rotate""" +626 12 loss """bceaftersigmoid""" +626 12 regularizer """no""" +626 12 optimizer """adam""" +626 12 training_loop """owa""" +626 12 negative_sampler """basic""" +626 12 evaluator """rankbased""" +626 13 dataset """yago310""" +626 13 model """rotate""" +626 13 loss """bceaftersigmoid""" +626 13 regularizer """no""" +626 13 optimizer """adam""" +626 13 training_loop """owa""" +626 13 negative_sampler """basic""" +626 13 evaluator """rankbased""" +626 14 dataset """yago310""" +626 14 model """rotate""" +626 14 loss """bceaftersigmoid""" +626 14 regularizer """no""" +626 14 optimizer """adam""" +626 14 training_loop """owa""" +626 14 negative_sampler """basic""" +626 14 evaluator """rankbased""" +626 15 dataset """yago310""" +626 15 model """rotate""" +626 15 loss """bceaftersigmoid""" +626 15 regularizer """no""" +626 15 optimizer """adam""" +626 15 training_loop """owa""" +626 15 negative_sampler """basic""" +626 15 evaluator """rankbased""" +626 16 dataset """yago310""" +626 16 model """rotate""" +626 16 loss """bceaftersigmoid""" +626 16 regularizer """no""" +626 16 optimizer """adam""" +626 16 training_loop """owa""" +626 16 negative_sampler """basic""" +626 16 evaluator """rankbased""" +626 17 dataset """yago310""" +626 17 model """rotate""" +626 17 loss """bceaftersigmoid""" +626 17 regularizer """no""" +626 17 optimizer """adam""" +626 17 training_loop """owa""" +626 17 negative_sampler """basic""" +626 17 evaluator """rankbased""" +626 18 dataset """yago310""" +626 18 model """rotate""" +626 18 loss """bceaftersigmoid""" +626 18 regularizer """no""" +626 18 optimizer """adam""" +626 18 training_loop """owa""" +626 18 negative_sampler """basic""" +626 18 evaluator """rankbased""" +626 19 dataset """yago310""" +626 19 model """rotate""" +626 19 loss """bceaftersigmoid""" +626 19 regularizer """no""" +626 19 optimizer """adam""" +626 19 training_loop """owa""" +626 19 negative_sampler """basic""" +626 19 evaluator """rankbased""" +626 20 dataset """yago310""" +626 20 model """rotate""" +626 20 loss """bceaftersigmoid""" +626 20 regularizer """no""" +626 20 optimizer """adam""" +626 20 training_loop """owa""" +626 20 negative_sampler """basic""" +626 20 evaluator """rankbased""" +626 21 dataset """yago310""" +626 21 model """rotate""" +626 21 loss """bceaftersigmoid""" +626 21 regularizer """no""" +626 21 optimizer """adam""" +626 21 training_loop """owa""" +626 21 negative_sampler """basic""" +626 21 evaluator """rankbased""" +626 22 dataset """yago310""" +626 22 model """rotate""" +626 22 loss """bceaftersigmoid""" +626 22 regularizer """no""" +626 22 optimizer """adam""" +626 22 training_loop """owa""" +626 22 negative_sampler """basic""" +626 22 evaluator """rankbased""" +626 23 dataset """yago310""" +626 23 model """rotate""" +626 23 loss """bceaftersigmoid""" +626 23 regularizer """no""" +626 23 optimizer """adam""" +626 23 training_loop """owa""" +626 23 negative_sampler """basic""" +626 23 evaluator """rankbased""" +626 24 dataset """yago310""" +626 24 model """rotate""" +626 24 loss """bceaftersigmoid""" +626 24 regularizer """no""" +626 24 optimizer """adam""" +626 24 training_loop """owa""" +626 24 negative_sampler """basic""" +626 24 evaluator """rankbased""" +626 25 dataset """yago310""" +626 25 model """rotate""" +626 25 loss """bceaftersigmoid""" +626 25 regularizer """no""" +626 25 optimizer """adam""" +626 25 training_loop """owa""" +626 25 negative_sampler """basic""" +626 25 evaluator """rankbased""" +626 26 dataset """yago310""" +626 26 model """rotate""" +626 26 loss """bceaftersigmoid""" +626 26 regularizer """no""" +626 26 optimizer """adam""" +626 26 training_loop """owa""" +626 26 negative_sampler """basic""" +626 26 evaluator """rankbased""" +626 27 dataset """yago310""" +626 27 model """rotate""" +626 27 loss """bceaftersigmoid""" +626 27 regularizer """no""" +626 27 optimizer """adam""" +626 27 training_loop """owa""" +626 27 negative_sampler """basic""" +626 27 evaluator """rankbased""" +626 28 dataset """yago310""" +626 28 model """rotate""" +626 28 loss """bceaftersigmoid""" +626 28 regularizer """no""" +626 28 optimizer """adam""" +626 28 training_loop """owa""" +626 28 negative_sampler """basic""" +626 28 evaluator """rankbased""" +626 29 dataset """yago310""" +626 29 model """rotate""" +626 29 loss """bceaftersigmoid""" +626 29 regularizer """no""" +626 29 optimizer """adam""" +626 29 training_loop """owa""" +626 29 negative_sampler """basic""" +626 29 evaluator """rankbased""" +626 30 dataset """yago310""" +626 30 model """rotate""" +626 30 loss """bceaftersigmoid""" +626 30 regularizer """no""" +626 30 optimizer """adam""" +626 30 training_loop """owa""" +626 30 negative_sampler """basic""" +626 30 evaluator """rankbased""" +626 31 dataset """yago310""" +626 31 model """rotate""" +626 31 loss """bceaftersigmoid""" +626 31 regularizer """no""" +626 31 optimizer """adam""" +626 31 training_loop """owa""" +626 31 negative_sampler """basic""" +626 31 evaluator """rankbased""" +626 32 dataset """yago310""" +626 32 model """rotate""" +626 32 loss """bceaftersigmoid""" +626 32 regularizer """no""" +626 32 optimizer """adam""" +626 32 training_loop """owa""" +626 32 negative_sampler """basic""" +626 32 evaluator """rankbased""" +626 33 dataset """yago310""" +626 33 model """rotate""" +626 33 loss """bceaftersigmoid""" +626 33 regularizer """no""" +626 33 optimizer """adam""" +626 33 training_loop """owa""" +626 33 negative_sampler """basic""" +626 33 evaluator """rankbased""" +626 34 dataset """yago310""" +626 34 model """rotate""" +626 34 loss """bceaftersigmoid""" +626 34 regularizer """no""" +626 34 optimizer """adam""" +626 34 training_loop """owa""" +626 34 negative_sampler """basic""" +626 34 evaluator """rankbased""" +626 35 dataset """yago310""" +626 35 model """rotate""" +626 35 loss """bceaftersigmoid""" +626 35 regularizer """no""" +626 35 optimizer """adam""" +626 35 training_loop """owa""" +626 35 negative_sampler """basic""" +626 35 evaluator """rankbased""" +627 1 model.embedding_dim 1.0 +627 1 optimizer.lr 0.03571936054232889 +627 1 negative_sampler.num_negs_per_pos 2.0 +627 1 training.batch_size 3.0 +627 2 model.embedding_dim 1.0 +627 2 optimizer.lr 0.0014719238860245564 +627 2 negative_sampler.num_negs_per_pos 36.0 +627 2 training.batch_size 3.0 +627 3 model.embedding_dim 0.0 +627 3 optimizer.lr 0.005839667693128585 +627 3 negative_sampler.num_negs_per_pos 48.0 +627 3 training.batch_size 2.0 +627 4 model.embedding_dim 1.0 +627 4 optimizer.lr 0.01732861674842128 +627 4 negative_sampler.num_negs_per_pos 3.0 +627 4 training.batch_size 0.0 +627 5 model.embedding_dim 0.0 +627 5 optimizer.lr 0.06992948285320798 +627 5 negative_sampler.num_negs_per_pos 47.0 +627 5 training.batch_size 1.0 +627 6 model.embedding_dim 1.0 +627 6 optimizer.lr 0.07925464305123682 +627 6 negative_sampler.num_negs_per_pos 25.0 +627 6 training.batch_size 3.0 +627 7 model.embedding_dim 2.0 +627 7 optimizer.lr 0.007938157775143556 +627 7 negative_sampler.num_negs_per_pos 26.0 +627 7 training.batch_size 0.0 +627 8 model.embedding_dim 2.0 +627 8 optimizer.lr 0.026142673699686335 +627 8 negative_sampler.num_negs_per_pos 31.0 +627 8 training.batch_size 0.0 +627 9 model.embedding_dim 0.0 +627 9 optimizer.lr 0.003940487802857208 +627 9 negative_sampler.num_negs_per_pos 32.0 +627 9 training.batch_size 1.0 +627 10 model.embedding_dim 1.0 +627 10 optimizer.lr 0.025053770863027645 +627 10 negative_sampler.num_negs_per_pos 34.0 +627 10 training.batch_size 0.0 +627 11 model.embedding_dim 0.0 +627 11 optimizer.lr 0.01491328372469178 +627 11 negative_sampler.num_negs_per_pos 11.0 +627 11 training.batch_size 0.0 +627 12 model.embedding_dim 2.0 +627 12 optimizer.lr 0.03948817754588178 +627 12 negative_sampler.num_negs_per_pos 2.0 +627 12 training.batch_size 0.0 +627 13 model.embedding_dim 0.0 +627 13 optimizer.lr 0.008367703485822817 +627 13 negative_sampler.num_negs_per_pos 35.0 +627 13 training.batch_size 3.0 +627 14 model.embedding_dim 2.0 +627 14 optimizer.lr 0.030494452610224914 +627 14 negative_sampler.num_negs_per_pos 33.0 +627 14 training.batch_size 2.0 +627 15 model.embedding_dim 2.0 +627 15 optimizer.lr 0.0023929885495983876 +627 15 negative_sampler.num_negs_per_pos 44.0 +627 15 training.batch_size 1.0 +627 16 model.embedding_dim 1.0 +627 16 optimizer.lr 0.0025757815166308766 +627 16 negative_sampler.num_negs_per_pos 4.0 +627 16 training.batch_size 0.0 +627 17 model.embedding_dim 2.0 +627 17 optimizer.lr 0.001498382246969413 +627 17 negative_sampler.num_negs_per_pos 4.0 +627 17 training.batch_size 1.0 +627 1 dataset """yago310""" +627 1 model """rotate""" +627 1 loss """softplus""" +627 1 regularizer """no""" +627 1 optimizer """adam""" +627 1 training_loop """owa""" +627 1 negative_sampler """basic""" +627 1 evaluator """rankbased""" +627 2 dataset """yago310""" +627 2 model """rotate""" +627 2 loss """softplus""" +627 2 regularizer """no""" +627 2 optimizer """adam""" +627 2 training_loop """owa""" +627 2 negative_sampler """basic""" +627 2 evaluator """rankbased""" +627 3 dataset """yago310""" +627 3 model """rotate""" +627 3 loss """softplus""" +627 3 regularizer """no""" +627 3 optimizer """adam""" +627 3 training_loop """owa""" +627 3 negative_sampler """basic""" +627 3 evaluator """rankbased""" +627 4 dataset """yago310""" +627 4 model """rotate""" +627 4 loss """softplus""" +627 4 regularizer """no""" +627 4 optimizer """adam""" +627 4 training_loop """owa""" +627 4 negative_sampler """basic""" +627 4 evaluator """rankbased""" +627 5 dataset """yago310""" +627 5 model """rotate""" +627 5 loss """softplus""" +627 5 regularizer """no""" +627 5 optimizer """adam""" +627 5 training_loop """owa""" +627 5 negative_sampler """basic""" +627 5 evaluator """rankbased""" +627 6 dataset """yago310""" +627 6 model """rotate""" +627 6 loss """softplus""" +627 6 regularizer """no""" +627 6 optimizer """adam""" +627 6 training_loop """owa""" +627 6 negative_sampler """basic""" +627 6 evaluator """rankbased""" +627 7 dataset """yago310""" +627 7 model """rotate""" +627 7 loss """softplus""" +627 7 regularizer """no""" +627 7 optimizer """adam""" +627 7 training_loop """owa""" +627 7 negative_sampler """basic""" +627 7 evaluator """rankbased""" +627 8 dataset """yago310""" +627 8 model """rotate""" +627 8 loss """softplus""" +627 8 regularizer """no""" +627 8 optimizer """adam""" +627 8 training_loop """owa""" +627 8 negative_sampler """basic""" +627 8 evaluator """rankbased""" +627 9 dataset """yago310""" +627 9 model """rotate""" +627 9 loss """softplus""" +627 9 regularizer """no""" +627 9 optimizer """adam""" +627 9 training_loop """owa""" +627 9 negative_sampler """basic""" +627 9 evaluator """rankbased""" +627 10 dataset """yago310""" +627 10 model """rotate""" +627 10 loss """softplus""" +627 10 regularizer """no""" +627 10 optimizer """adam""" +627 10 training_loop """owa""" +627 10 negative_sampler """basic""" +627 10 evaluator """rankbased""" +627 11 dataset """yago310""" +627 11 model """rotate""" +627 11 loss """softplus""" +627 11 regularizer """no""" +627 11 optimizer """adam""" +627 11 training_loop """owa""" +627 11 negative_sampler """basic""" +627 11 evaluator """rankbased""" +627 12 dataset """yago310""" +627 12 model """rotate""" +627 12 loss """softplus""" +627 12 regularizer """no""" +627 12 optimizer """adam""" +627 12 training_loop """owa""" +627 12 negative_sampler """basic""" +627 12 evaluator """rankbased""" +627 13 dataset """yago310""" +627 13 model """rotate""" +627 13 loss """softplus""" +627 13 regularizer """no""" +627 13 optimizer """adam""" +627 13 training_loop """owa""" +627 13 negative_sampler """basic""" +627 13 evaluator """rankbased""" +627 14 dataset """yago310""" +627 14 model """rotate""" +627 14 loss """softplus""" +627 14 regularizer """no""" +627 14 optimizer """adam""" +627 14 training_loop """owa""" +627 14 negative_sampler """basic""" +627 14 evaluator """rankbased""" +627 15 dataset """yago310""" +627 15 model """rotate""" +627 15 loss """softplus""" +627 15 regularizer """no""" +627 15 optimizer """adam""" +627 15 training_loop """owa""" +627 15 negative_sampler """basic""" +627 15 evaluator """rankbased""" +627 16 dataset """yago310""" +627 16 model """rotate""" +627 16 loss """softplus""" +627 16 regularizer """no""" +627 16 optimizer """adam""" +627 16 training_loop """owa""" +627 16 negative_sampler """basic""" +627 16 evaluator """rankbased""" +627 17 dataset """yago310""" +627 17 model """rotate""" +627 17 loss """softplus""" +627 17 regularizer """no""" +627 17 optimizer """adam""" +627 17 training_loop """owa""" +627 17 negative_sampler """basic""" +627 17 evaluator """rankbased""" +628 1 model.embedding_dim 2.0 +628 1 optimizer.lr 0.09121710253392883 +628 1 negative_sampler.num_negs_per_pos 12.0 +628 1 training.batch_size 2.0 +628 2 model.embedding_dim 1.0 +628 2 optimizer.lr 0.07650280721210292 +628 2 negative_sampler.num_negs_per_pos 45.0 +628 2 training.batch_size 0.0 +628 3 model.embedding_dim 0.0 +628 3 optimizer.lr 0.0034753724970673656 +628 3 negative_sampler.num_negs_per_pos 27.0 +628 3 training.batch_size 3.0 +628 4 model.embedding_dim 0.0 +628 4 optimizer.lr 0.004830677106880529 +628 4 negative_sampler.num_negs_per_pos 39.0 +628 4 training.batch_size 0.0 +628 5 model.embedding_dim 1.0 +628 5 optimizer.lr 0.0013851813284967815 +628 5 negative_sampler.num_negs_per_pos 6.0 +628 5 training.batch_size 2.0 +628 6 model.embedding_dim 0.0 +628 6 optimizer.lr 0.012443214094638373 +628 6 negative_sampler.num_negs_per_pos 18.0 +628 6 training.batch_size 1.0 +628 7 model.embedding_dim 1.0 +628 7 optimizer.lr 0.0011451154543215508 +628 7 negative_sampler.num_negs_per_pos 18.0 +628 7 training.batch_size 2.0 +628 8 model.embedding_dim 1.0 +628 8 optimizer.lr 0.0017445387199240595 +628 8 negative_sampler.num_negs_per_pos 41.0 +628 8 training.batch_size 2.0 +628 9 model.embedding_dim 1.0 +628 9 optimizer.lr 0.010735942002988523 +628 9 negative_sampler.num_negs_per_pos 25.0 +628 9 training.batch_size 0.0 +628 10 model.embedding_dim 1.0 +628 10 optimizer.lr 0.0014068779277458215 +628 10 negative_sampler.num_negs_per_pos 24.0 +628 10 training.batch_size 0.0 +628 11 model.embedding_dim 1.0 +628 11 optimizer.lr 0.09371073986170496 +628 11 negative_sampler.num_negs_per_pos 23.0 +628 11 training.batch_size 1.0 +628 12 model.embedding_dim 1.0 +628 12 optimizer.lr 0.011288312748903528 +628 12 negative_sampler.num_negs_per_pos 33.0 +628 12 training.batch_size 2.0 +628 13 model.embedding_dim 0.0 +628 13 optimizer.lr 0.0014823807494268836 +628 13 negative_sampler.num_negs_per_pos 11.0 +628 13 training.batch_size 0.0 +628 14 model.embedding_dim 1.0 +628 14 optimizer.lr 0.08329000784188344 +628 14 negative_sampler.num_negs_per_pos 47.0 +628 14 training.batch_size 1.0 +628 15 model.embedding_dim 1.0 +628 15 optimizer.lr 0.09820049337053098 +628 15 negative_sampler.num_negs_per_pos 35.0 +628 15 training.batch_size 3.0 +628 16 model.embedding_dim 2.0 +628 16 optimizer.lr 0.05387244588192308 +628 16 negative_sampler.num_negs_per_pos 43.0 +628 16 training.batch_size 0.0 +628 17 model.embedding_dim 2.0 +628 17 optimizer.lr 0.061841726881600646 +628 17 negative_sampler.num_negs_per_pos 25.0 +628 17 training.batch_size 2.0 +628 18 model.embedding_dim 2.0 +628 18 optimizer.lr 0.004260722390983532 +628 18 negative_sampler.num_negs_per_pos 7.0 +628 18 training.batch_size 3.0 +628 19 model.embedding_dim 2.0 +628 19 optimizer.lr 0.003322129011302403 +628 19 negative_sampler.num_negs_per_pos 41.0 +628 19 training.batch_size 3.0 +628 20 model.embedding_dim 2.0 +628 20 optimizer.lr 0.009666484593213618 +628 20 negative_sampler.num_negs_per_pos 44.0 +628 20 training.batch_size 1.0 +628 21 model.embedding_dim 0.0 +628 21 optimizer.lr 0.001292463752246625 +628 21 negative_sampler.num_negs_per_pos 45.0 +628 21 training.batch_size 0.0 +628 22 model.embedding_dim 1.0 +628 22 optimizer.lr 0.052842000147935025 +628 22 negative_sampler.num_negs_per_pos 28.0 +628 22 training.batch_size 1.0 +628 23 model.embedding_dim 2.0 +628 23 optimizer.lr 0.0014495008768225068 +628 23 negative_sampler.num_negs_per_pos 12.0 +628 23 training.batch_size 2.0 +628 24 model.embedding_dim 2.0 +628 24 optimizer.lr 0.09725793586270895 +628 24 negative_sampler.num_negs_per_pos 23.0 +628 24 training.batch_size 2.0 +628 25 model.embedding_dim 2.0 +628 25 optimizer.lr 0.006946565232667762 +628 25 negative_sampler.num_negs_per_pos 20.0 +628 25 training.batch_size 1.0 +628 26 model.embedding_dim 2.0 +628 26 optimizer.lr 0.0010145912268202807 +628 26 negative_sampler.num_negs_per_pos 2.0 +628 26 training.batch_size 1.0 +628 27 model.embedding_dim 0.0 +628 27 optimizer.lr 0.003272587926608022 +628 27 negative_sampler.num_negs_per_pos 2.0 +628 27 training.batch_size 1.0 +628 28 model.embedding_dim 0.0 +628 28 optimizer.lr 0.0020552621219294396 +628 28 negative_sampler.num_negs_per_pos 43.0 +628 28 training.batch_size 1.0 +628 29 model.embedding_dim 1.0 +628 29 optimizer.lr 0.011032959638930853 +628 29 negative_sampler.num_negs_per_pos 41.0 +628 29 training.batch_size 1.0 +628 30 model.embedding_dim 1.0 +628 30 optimizer.lr 0.030624114477999353 +628 30 negative_sampler.num_negs_per_pos 1.0 +628 30 training.batch_size 0.0 +628 31 model.embedding_dim 0.0 +628 31 optimizer.lr 0.011084662953644478 +628 31 negative_sampler.num_negs_per_pos 27.0 +628 31 training.batch_size 3.0 +628 32 model.embedding_dim 2.0 +628 32 optimizer.lr 0.01455494536350522 +628 32 negative_sampler.num_negs_per_pos 7.0 +628 32 training.batch_size 1.0 +628 33 model.embedding_dim 1.0 +628 33 optimizer.lr 0.0017229616016004848 +628 33 negative_sampler.num_negs_per_pos 10.0 +628 33 training.batch_size 0.0 +628 34 model.embedding_dim 1.0 +628 34 optimizer.lr 0.005715664243107998 +628 34 negative_sampler.num_negs_per_pos 26.0 +628 34 training.batch_size 0.0 +628 1 dataset """yago310""" +628 1 model """rotate""" +628 1 loss """softplus""" +628 1 regularizer """no""" +628 1 optimizer """adam""" +628 1 training_loop """owa""" +628 1 negative_sampler """basic""" +628 1 evaluator """rankbased""" +628 2 dataset """yago310""" +628 2 model """rotate""" +628 2 loss """softplus""" +628 2 regularizer """no""" +628 2 optimizer """adam""" +628 2 training_loop """owa""" +628 2 negative_sampler """basic""" +628 2 evaluator """rankbased""" +628 3 dataset """yago310""" +628 3 model """rotate""" +628 3 loss """softplus""" +628 3 regularizer """no""" +628 3 optimizer """adam""" +628 3 training_loop """owa""" +628 3 negative_sampler """basic""" +628 3 evaluator """rankbased""" +628 4 dataset """yago310""" +628 4 model """rotate""" +628 4 loss """softplus""" +628 4 regularizer """no""" +628 4 optimizer """adam""" +628 4 training_loop """owa""" +628 4 negative_sampler """basic""" +628 4 evaluator """rankbased""" +628 5 dataset """yago310""" +628 5 model """rotate""" +628 5 loss """softplus""" +628 5 regularizer """no""" +628 5 optimizer """adam""" +628 5 training_loop """owa""" +628 5 negative_sampler """basic""" +628 5 evaluator """rankbased""" +628 6 dataset """yago310""" +628 6 model """rotate""" +628 6 loss """softplus""" +628 6 regularizer """no""" +628 6 optimizer """adam""" +628 6 training_loop """owa""" +628 6 negative_sampler """basic""" +628 6 evaluator """rankbased""" +628 7 dataset """yago310""" +628 7 model """rotate""" +628 7 loss """softplus""" +628 7 regularizer """no""" +628 7 optimizer """adam""" +628 7 training_loop """owa""" +628 7 negative_sampler """basic""" +628 7 evaluator """rankbased""" +628 8 dataset """yago310""" +628 8 model """rotate""" +628 8 loss """softplus""" +628 8 regularizer """no""" +628 8 optimizer """adam""" +628 8 training_loop """owa""" +628 8 negative_sampler """basic""" +628 8 evaluator """rankbased""" +628 9 dataset """yago310""" +628 9 model """rotate""" +628 9 loss """softplus""" +628 9 regularizer """no""" +628 9 optimizer """adam""" +628 9 training_loop """owa""" +628 9 negative_sampler """basic""" +628 9 evaluator """rankbased""" +628 10 dataset """yago310""" +628 10 model """rotate""" +628 10 loss """softplus""" +628 10 regularizer """no""" +628 10 optimizer """adam""" +628 10 training_loop """owa""" +628 10 negative_sampler """basic""" +628 10 evaluator """rankbased""" +628 11 dataset """yago310""" +628 11 model """rotate""" +628 11 loss """softplus""" +628 11 regularizer """no""" +628 11 optimizer """adam""" +628 11 training_loop """owa""" +628 11 negative_sampler """basic""" +628 11 evaluator """rankbased""" +628 12 dataset """yago310""" +628 12 model """rotate""" +628 12 loss """softplus""" +628 12 regularizer """no""" +628 12 optimizer """adam""" +628 12 training_loop """owa""" +628 12 negative_sampler """basic""" +628 12 evaluator """rankbased""" +628 13 dataset """yago310""" +628 13 model """rotate""" +628 13 loss """softplus""" +628 13 regularizer """no""" +628 13 optimizer """adam""" +628 13 training_loop """owa""" +628 13 negative_sampler """basic""" +628 13 evaluator """rankbased""" +628 14 dataset """yago310""" +628 14 model """rotate""" +628 14 loss """softplus""" +628 14 regularizer """no""" +628 14 optimizer """adam""" +628 14 training_loop """owa""" +628 14 negative_sampler """basic""" +628 14 evaluator """rankbased""" +628 15 dataset """yago310""" +628 15 model """rotate""" +628 15 loss """softplus""" +628 15 regularizer """no""" +628 15 optimizer """adam""" +628 15 training_loop """owa""" +628 15 negative_sampler """basic""" +628 15 evaluator """rankbased""" +628 16 dataset """yago310""" +628 16 model """rotate""" +628 16 loss """softplus""" +628 16 regularizer """no""" +628 16 optimizer """adam""" +628 16 training_loop """owa""" +628 16 negative_sampler """basic""" +628 16 evaluator """rankbased""" +628 17 dataset """yago310""" +628 17 model """rotate""" +628 17 loss """softplus""" +628 17 regularizer """no""" +628 17 optimizer """adam""" +628 17 training_loop """owa""" +628 17 negative_sampler """basic""" +628 17 evaluator """rankbased""" +628 18 dataset """yago310""" +628 18 model """rotate""" +628 18 loss """softplus""" +628 18 regularizer """no""" +628 18 optimizer """adam""" +628 18 training_loop """owa""" +628 18 negative_sampler """basic""" +628 18 evaluator """rankbased""" +628 19 dataset """yago310""" +628 19 model """rotate""" +628 19 loss """softplus""" +628 19 regularizer """no""" +628 19 optimizer """adam""" +628 19 training_loop """owa""" +628 19 negative_sampler """basic""" +628 19 evaluator """rankbased""" +628 20 dataset """yago310""" +628 20 model """rotate""" +628 20 loss """softplus""" +628 20 regularizer """no""" +628 20 optimizer """adam""" +628 20 training_loop """owa""" +628 20 negative_sampler """basic""" +628 20 evaluator """rankbased""" +628 21 dataset """yago310""" +628 21 model """rotate""" +628 21 loss """softplus""" +628 21 regularizer """no""" +628 21 optimizer """adam""" +628 21 training_loop """owa""" +628 21 negative_sampler """basic""" +628 21 evaluator """rankbased""" +628 22 dataset """yago310""" +628 22 model """rotate""" +628 22 loss """softplus""" +628 22 regularizer """no""" +628 22 optimizer """adam""" +628 22 training_loop """owa""" +628 22 negative_sampler """basic""" +628 22 evaluator """rankbased""" +628 23 dataset """yago310""" +628 23 model """rotate""" +628 23 loss """softplus""" +628 23 regularizer """no""" +628 23 optimizer """adam""" +628 23 training_loop """owa""" +628 23 negative_sampler """basic""" +628 23 evaluator """rankbased""" +628 24 dataset """yago310""" +628 24 model """rotate""" +628 24 loss """softplus""" +628 24 regularizer """no""" +628 24 optimizer """adam""" +628 24 training_loop """owa""" +628 24 negative_sampler """basic""" +628 24 evaluator """rankbased""" +628 25 dataset """yago310""" +628 25 model """rotate""" +628 25 loss """softplus""" +628 25 regularizer """no""" +628 25 optimizer """adam""" +628 25 training_loop """owa""" +628 25 negative_sampler """basic""" +628 25 evaluator """rankbased""" +628 26 dataset """yago310""" +628 26 model """rotate""" +628 26 loss """softplus""" +628 26 regularizer """no""" +628 26 optimizer """adam""" +628 26 training_loop """owa""" +628 26 negative_sampler """basic""" +628 26 evaluator """rankbased""" +628 27 dataset """yago310""" +628 27 model """rotate""" +628 27 loss """softplus""" +628 27 regularizer """no""" +628 27 optimizer """adam""" +628 27 training_loop """owa""" +628 27 negative_sampler """basic""" +628 27 evaluator """rankbased""" +628 28 dataset """yago310""" +628 28 model """rotate""" +628 28 loss """softplus""" +628 28 regularizer """no""" +628 28 optimizer """adam""" +628 28 training_loop """owa""" +628 28 negative_sampler """basic""" +628 28 evaluator """rankbased""" +628 29 dataset """yago310""" +628 29 model """rotate""" +628 29 loss """softplus""" +628 29 regularizer """no""" +628 29 optimizer """adam""" +628 29 training_loop """owa""" +628 29 negative_sampler """basic""" +628 29 evaluator """rankbased""" +628 30 dataset """yago310""" +628 30 model """rotate""" +628 30 loss """softplus""" +628 30 regularizer """no""" +628 30 optimizer """adam""" +628 30 training_loop """owa""" +628 30 negative_sampler """basic""" +628 30 evaluator """rankbased""" +628 31 dataset """yago310""" +628 31 model """rotate""" +628 31 loss """softplus""" +628 31 regularizer """no""" +628 31 optimizer """adam""" +628 31 training_loop """owa""" +628 31 negative_sampler """basic""" +628 31 evaluator """rankbased""" +628 32 dataset """yago310""" +628 32 model """rotate""" +628 32 loss """softplus""" +628 32 regularizer """no""" +628 32 optimizer """adam""" +628 32 training_loop """owa""" +628 32 negative_sampler """basic""" +628 32 evaluator """rankbased""" +628 33 dataset """yago310""" +628 33 model """rotate""" +628 33 loss """softplus""" +628 33 regularizer """no""" +628 33 optimizer """adam""" +628 33 training_loop """owa""" +628 33 negative_sampler """basic""" +628 33 evaluator """rankbased""" +628 34 dataset """yago310""" +628 34 model """rotate""" +628 34 loss """softplus""" +628 34 regularizer """no""" +628 34 optimizer """adam""" +628 34 training_loop """owa""" +628 34 negative_sampler """basic""" +628 34 evaluator """rankbased""" +629 1 model.embedding_dim 2.0 +629 1 loss.margin 3.5056825226392583 +629 1 optimizer.lr 0.0068290003045728045 +629 1 negative_sampler.num_negs_per_pos 19.0 +629 1 training.batch_size 2.0 +629 2 model.embedding_dim 2.0 +629 2 loss.margin 3.4448271316266617 +629 2 optimizer.lr 0.007274580126738739 +629 2 negative_sampler.num_negs_per_pos 48.0 +629 2 training.batch_size 2.0 +629 3 model.embedding_dim 0.0 +629 3 loss.margin 2.536780779808673 +629 3 optimizer.lr 0.01841711934301717 +629 3 negative_sampler.num_negs_per_pos 23.0 +629 3 training.batch_size 0.0 +629 4 model.embedding_dim 2.0 +629 4 loss.margin 6.901522722159489 +629 4 optimizer.lr 0.0011023881948428109 +629 4 negative_sampler.num_negs_per_pos 35.0 +629 4 training.batch_size 0.0 +629 5 model.embedding_dim 0.0 +629 5 loss.margin 8.055080759193022 +629 5 optimizer.lr 0.006549800364750737 +629 5 negative_sampler.num_negs_per_pos 31.0 +629 5 training.batch_size 2.0 +629 1 dataset """yago310""" +629 1 model """rotate""" +629 1 loss """marginranking""" +629 1 regularizer """no""" +629 1 optimizer """adam""" +629 1 training_loop """owa""" +629 1 negative_sampler """basic""" +629 1 evaluator """rankbased""" +629 2 dataset """yago310""" +629 2 model """rotate""" +629 2 loss """marginranking""" +629 2 regularizer """no""" +629 2 optimizer """adam""" +629 2 training_loop """owa""" +629 2 negative_sampler """basic""" +629 2 evaluator """rankbased""" +629 3 dataset """yago310""" +629 3 model """rotate""" +629 3 loss """marginranking""" +629 3 regularizer """no""" +629 3 optimizer """adam""" +629 3 training_loop """owa""" +629 3 negative_sampler """basic""" +629 3 evaluator """rankbased""" +629 4 dataset """yago310""" +629 4 model """rotate""" +629 4 loss """marginranking""" +629 4 regularizer """no""" +629 4 optimizer """adam""" +629 4 training_loop """owa""" +629 4 negative_sampler """basic""" +629 4 evaluator """rankbased""" +629 5 dataset """yago310""" +629 5 model """rotate""" +629 5 loss """marginranking""" +629 5 regularizer """no""" +629 5 optimizer """adam""" +629 5 training_loop """owa""" +629 5 negative_sampler """basic""" +629 5 evaluator """rankbased""" +630 1 model.embedding_dim 2.0 +630 1 loss.margin 8.977660906196471 +630 1 optimizer.lr 0.05572238913503094 +630 1 negative_sampler.num_negs_per_pos 6.0 +630 1 training.batch_size 3.0 +630 2 model.embedding_dim 0.0 +630 2 loss.margin 5.122429638236034 +630 2 optimizer.lr 0.013196823982918776 +630 2 negative_sampler.num_negs_per_pos 41.0 +630 2 training.batch_size 0.0 +630 3 model.embedding_dim 1.0 +630 3 loss.margin 4.632001258945285 +630 3 optimizer.lr 0.0023345594638325235 +630 3 negative_sampler.num_negs_per_pos 18.0 +630 3 training.batch_size 0.0 +630 4 model.embedding_dim 1.0 +630 4 loss.margin 3.757960776762958 +630 4 optimizer.lr 0.004219041645965371 +630 4 negative_sampler.num_negs_per_pos 16.0 +630 4 training.batch_size 3.0 +630 5 model.embedding_dim 1.0 +630 5 loss.margin 6.729746823764382 +630 5 optimizer.lr 0.023336940091319886 +630 5 negative_sampler.num_negs_per_pos 43.0 +630 5 training.batch_size 2.0 +630 6 model.embedding_dim 2.0 +630 6 loss.margin 2.6448569894267506 +630 6 optimizer.lr 0.009326533937520698 +630 6 negative_sampler.num_negs_per_pos 22.0 +630 6 training.batch_size 0.0 +630 7 model.embedding_dim 0.0 +630 7 loss.margin 9.286927108361303 +630 7 optimizer.lr 0.0017477799926928464 +630 7 negative_sampler.num_negs_per_pos 36.0 +630 7 training.batch_size 1.0 +630 8 model.embedding_dim 2.0 +630 8 loss.margin 4.03214010480745 +630 8 optimizer.lr 0.011588687483810147 +630 8 negative_sampler.num_negs_per_pos 12.0 +630 8 training.batch_size 0.0 +630 9 model.embedding_dim 2.0 +630 9 loss.margin 2.6898317855450493 +630 9 optimizer.lr 0.009392558053971682 +630 9 negative_sampler.num_negs_per_pos 6.0 +630 9 training.batch_size 1.0 +630 10 model.embedding_dim 1.0 +630 10 loss.margin 3.550169652348932 +630 10 optimizer.lr 0.004132434846002347 +630 10 negative_sampler.num_negs_per_pos 19.0 +630 10 training.batch_size 2.0 +630 1 dataset """yago310""" +630 1 model """rotate""" +630 1 loss """marginranking""" +630 1 regularizer """no""" +630 1 optimizer """adam""" +630 1 training_loop """owa""" +630 1 negative_sampler """basic""" +630 1 evaluator """rankbased""" +630 2 dataset """yago310""" +630 2 model """rotate""" +630 2 loss """marginranking""" +630 2 regularizer """no""" +630 2 optimizer """adam""" +630 2 training_loop """owa""" +630 2 negative_sampler """basic""" +630 2 evaluator """rankbased""" +630 3 dataset """yago310""" +630 3 model """rotate""" +630 3 loss """marginranking""" +630 3 regularizer """no""" +630 3 optimizer """adam""" +630 3 training_loop """owa""" +630 3 negative_sampler """basic""" +630 3 evaluator """rankbased""" +630 4 dataset """yago310""" +630 4 model """rotate""" +630 4 loss """marginranking""" +630 4 regularizer """no""" +630 4 optimizer """adam""" +630 4 training_loop """owa""" +630 4 negative_sampler """basic""" +630 4 evaluator """rankbased""" +630 5 dataset """yago310""" +630 5 model """rotate""" +630 5 loss """marginranking""" +630 5 regularizer """no""" +630 5 optimizer """adam""" +630 5 training_loop """owa""" +630 5 negative_sampler """basic""" +630 5 evaluator """rankbased""" +630 6 dataset """yago310""" +630 6 model """rotate""" +630 6 loss """marginranking""" +630 6 regularizer """no""" +630 6 optimizer """adam""" +630 6 training_loop """owa""" +630 6 negative_sampler """basic""" +630 6 evaluator """rankbased""" +630 7 dataset """yago310""" +630 7 model """rotate""" +630 7 loss """marginranking""" +630 7 regularizer """no""" +630 7 optimizer """adam""" +630 7 training_loop """owa""" +630 7 negative_sampler """basic""" +630 7 evaluator """rankbased""" +630 8 dataset """yago310""" +630 8 model """rotate""" +630 8 loss """marginranking""" +630 8 regularizer """no""" +630 8 optimizer """adam""" +630 8 training_loop """owa""" +630 8 negative_sampler """basic""" +630 8 evaluator """rankbased""" +630 9 dataset """yago310""" +630 9 model """rotate""" +630 9 loss """marginranking""" +630 9 regularizer """no""" +630 9 optimizer """adam""" +630 9 training_loop """owa""" +630 9 negative_sampler """basic""" +630 9 evaluator """rankbased""" +630 10 dataset """yago310""" +630 10 model """rotate""" +630 10 loss """marginranking""" +630 10 regularizer """no""" +630 10 optimizer """adam""" +630 10 training_loop """owa""" +630 10 negative_sampler """basic""" +630 10 evaluator """rankbased""" +631 1 model.embedding_dim 2.0 +631 1 loss.margin 13.108536608692852 +631 1 loss.adversarial_temperature 0.3552726044446528 +631 1 optimizer.lr 0.007144347202932864 +631 1 negative_sampler.num_negs_per_pos 24.0 +631 1 training.batch_size 1.0 +631 2 model.embedding_dim 2.0 +631 2 loss.margin 9.584868801154682 +631 2 loss.adversarial_temperature 0.46460406821599287 +631 2 optimizer.lr 0.0020968871150700294 +631 2 negative_sampler.num_negs_per_pos 0.0 +631 2 training.batch_size 2.0 +631 3 model.embedding_dim 1.0 +631 3 loss.margin 19.63307590435376 +631 3 loss.adversarial_temperature 0.38255898694604074 +631 3 optimizer.lr 0.06037036740499302 +631 3 negative_sampler.num_negs_per_pos 33.0 +631 3 training.batch_size 3.0 +631 4 model.embedding_dim 2.0 +631 4 loss.margin 10.325327528487318 +631 4 loss.adversarial_temperature 0.45015016470959557 +631 4 optimizer.lr 0.019570249083599896 +631 4 negative_sampler.num_negs_per_pos 3.0 +631 4 training.batch_size 2.0 +631 5 model.embedding_dim 0.0 +631 5 loss.margin 17.00472086926327 +631 5 loss.adversarial_temperature 0.22911161008162245 +631 5 optimizer.lr 0.006157241023235872 +631 5 negative_sampler.num_negs_per_pos 25.0 +631 5 training.batch_size 0.0 +631 6 model.embedding_dim 2.0 +631 6 loss.margin 8.099409146439225 +631 6 loss.adversarial_temperature 0.10834875409538697 +631 6 optimizer.lr 0.028753638054457714 +631 6 negative_sampler.num_negs_per_pos 14.0 +631 6 training.batch_size 3.0 +631 7 model.embedding_dim 1.0 +631 7 loss.margin 8.822182254349256 +631 7 loss.adversarial_temperature 0.30421596146602653 +631 7 optimizer.lr 0.011117372256691843 +631 7 negative_sampler.num_negs_per_pos 41.0 +631 7 training.batch_size 3.0 +631 8 model.embedding_dim 2.0 +631 8 loss.margin 26.969086959132266 +631 8 loss.adversarial_temperature 0.2824341604102265 +631 8 optimizer.lr 0.0024156905992942282 +631 8 negative_sampler.num_negs_per_pos 12.0 +631 8 training.batch_size 2.0 +631 9 model.embedding_dim 0.0 +631 9 loss.margin 23.201669062381594 +631 9 loss.adversarial_temperature 0.982953957881682 +631 9 optimizer.lr 0.005641351553297996 +631 9 negative_sampler.num_negs_per_pos 17.0 +631 9 training.batch_size 2.0 +631 10 model.embedding_dim 2.0 +631 10 loss.margin 10.266755876532098 +631 10 loss.adversarial_temperature 0.9988993046536766 +631 10 optimizer.lr 0.018814239417096083 +631 10 negative_sampler.num_negs_per_pos 0.0 +631 10 training.batch_size 1.0 +631 11 model.embedding_dim 0.0 +631 11 loss.margin 28.144419119253833 +631 11 loss.adversarial_temperature 0.7124625397354036 +631 11 optimizer.lr 0.004034012482731614 +631 11 negative_sampler.num_negs_per_pos 35.0 +631 11 training.batch_size 3.0 +631 1 dataset """yago310""" +631 1 model """rotate""" +631 1 loss """nssa""" +631 1 regularizer """no""" +631 1 optimizer """adam""" +631 1 training_loop """owa""" +631 1 negative_sampler """basic""" +631 1 evaluator """rankbased""" +631 2 dataset """yago310""" +631 2 model """rotate""" +631 2 loss """nssa""" +631 2 regularizer """no""" +631 2 optimizer """adam""" +631 2 training_loop """owa""" +631 2 negative_sampler """basic""" +631 2 evaluator """rankbased""" +631 3 dataset """yago310""" +631 3 model """rotate""" +631 3 loss """nssa""" +631 3 regularizer """no""" +631 3 optimizer """adam""" +631 3 training_loop """owa""" +631 3 negative_sampler """basic""" +631 3 evaluator """rankbased""" +631 4 dataset """yago310""" +631 4 model """rotate""" +631 4 loss """nssa""" +631 4 regularizer """no""" +631 4 optimizer """adam""" +631 4 training_loop """owa""" +631 4 negative_sampler """basic""" +631 4 evaluator """rankbased""" +631 5 dataset """yago310""" +631 5 model """rotate""" +631 5 loss """nssa""" +631 5 regularizer """no""" +631 5 optimizer """adam""" +631 5 training_loop """owa""" +631 5 negative_sampler """basic""" +631 5 evaluator """rankbased""" +631 6 dataset """yago310""" +631 6 model """rotate""" +631 6 loss """nssa""" +631 6 regularizer """no""" +631 6 optimizer """adam""" +631 6 training_loop """owa""" +631 6 negative_sampler """basic""" +631 6 evaluator """rankbased""" +631 7 dataset """yago310""" +631 7 model """rotate""" +631 7 loss """nssa""" +631 7 regularizer """no""" +631 7 optimizer """adam""" +631 7 training_loop """owa""" +631 7 negative_sampler """basic""" +631 7 evaluator """rankbased""" +631 8 dataset """yago310""" +631 8 model """rotate""" +631 8 loss """nssa""" +631 8 regularizer """no""" +631 8 optimizer """adam""" +631 8 training_loop """owa""" +631 8 negative_sampler """basic""" +631 8 evaluator """rankbased""" +631 9 dataset """yago310""" +631 9 model """rotate""" +631 9 loss """nssa""" +631 9 regularizer """no""" +631 9 optimizer """adam""" +631 9 training_loop """owa""" +631 9 negative_sampler """basic""" +631 9 evaluator """rankbased""" +631 10 dataset """yago310""" +631 10 model """rotate""" +631 10 loss """nssa""" +631 10 regularizer """no""" +631 10 optimizer """adam""" +631 10 training_loop """owa""" +631 10 negative_sampler """basic""" +631 10 evaluator """rankbased""" +631 11 dataset """yago310""" +631 11 model """rotate""" +631 11 loss """nssa""" +631 11 regularizer """no""" +631 11 optimizer """adam""" +631 11 training_loop """owa""" +631 11 negative_sampler """basic""" +631 11 evaluator """rankbased""" +632 1 model.embedding_dim 2.0 +632 1 loss.margin 19.33031304584022 +632 1 loss.adversarial_temperature 0.9600942270278319 +632 1 optimizer.lr 0.0036825579199050436 +632 1 negative_sampler.num_negs_per_pos 42.0 +632 1 training.batch_size 0.0 +632 2 model.embedding_dim 0.0 +632 2 loss.margin 11.68769735502081 +632 2 loss.adversarial_temperature 0.7506439915941907 +632 2 optimizer.lr 0.05222429042653592 +632 2 negative_sampler.num_negs_per_pos 22.0 +632 2 training.batch_size 2.0 +632 3 model.embedding_dim 0.0 +632 3 loss.margin 5.809456978975108 +632 3 loss.adversarial_temperature 0.4644425586751062 +632 3 optimizer.lr 0.010442150241988992 +632 3 negative_sampler.num_negs_per_pos 35.0 +632 3 training.batch_size 0.0 +632 4 model.embedding_dim 0.0 +632 4 loss.margin 17.783029499855967 +632 4 loss.adversarial_temperature 0.1725099262166383 +632 4 optimizer.lr 0.03378462797930175 +632 4 negative_sampler.num_negs_per_pos 21.0 +632 4 training.batch_size 1.0 +632 5 model.embedding_dim 0.0 +632 5 loss.margin 1.2194838386532494 +632 5 loss.adversarial_temperature 0.39172058611190375 +632 5 optimizer.lr 0.0038838766939494358 +632 5 negative_sampler.num_negs_per_pos 0.0 +632 5 training.batch_size 2.0 +632 6 model.embedding_dim 1.0 +632 6 loss.margin 25.88667380969616 +632 6 loss.adversarial_temperature 0.34484236554801795 +632 6 optimizer.lr 0.04582170120031017 +632 6 negative_sampler.num_negs_per_pos 16.0 +632 6 training.batch_size 1.0 +632 7 model.embedding_dim 1.0 +632 7 loss.margin 23.710992950424586 +632 7 loss.adversarial_temperature 0.4805671697765269 +632 7 optimizer.lr 0.001331795969193069 +632 7 negative_sampler.num_negs_per_pos 47.0 +632 7 training.batch_size 2.0 +632 8 model.embedding_dim 0.0 +632 8 loss.margin 21.02323484755273 +632 8 loss.adversarial_temperature 0.31628762583542885 +632 8 optimizer.lr 0.0054072082529153425 +632 8 negative_sampler.num_negs_per_pos 44.0 +632 8 training.batch_size 1.0 +632 9 model.embedding_dim 1.0 +632 9 loss.margin 6.91943147714466 +632 9 loss.adversarial_temperature 0.5239110729796671 +632 9 optimizer.lr 0.010146991970684874 +632 9 negative_sampler.num_negs_per_pos 20.0 +632 9 training.batch_size 0.0 +632 10 model.embedding_dim 1.0 +632 10 loss.margin 26.86807640406109 +632 10 loss.adversarial_temperature 0.7646426174453838 +632 10 optimizer.lr 0.022177062032560618 +632 10 negative_sampler.num_negs_per_pos 46.0 +632 10 training.batch_size 1.0 +632 11 model.embedding_dim 2.0 +632 11 loss.margin 8.834116490585272 +632 11 loss.adversarial_temperature 0.3421211315638334 +632 11 optimizer.lr 0.010056017819020532 +632 11 negative_sampler.num_negs_per_pos 44.0 +632 11 training.batch_size 0.0 +632 12 model.embedding_dim 0.0 +632 12 loss.margin 2.091326460580418 +632 12 loss.adversarial_temperature 0.8854715064552071 +632 12 optimizer.lr 0.00619609526219091 +632 12 negative_sampler.num_negs_per_pos 46.0 +632 12 training.batch_size 2.0 +632 13 model.embedding_dim 1.0 +632 13 loss.margin 15.519658497517572 +632 13 loss.adversarial_temperature 0.5463612811408668 +632 13 optimizer.lr 0.0029008930077462592 +632 13 negative_sampler.num_negs_per_pos 49.0 +632 13 training.batch_size 1.0 +632 1 dataset """yago310""" +632 1 model """rotate""" +632 1 loss """nssa""" +632 1 regularizer """no""" +632 1 optimizer """adam""" +632 1 training_loop """owa""" +632 1 negative_sampler """basic""" +632 1 evaluator """rankbased""" +632 2 dataset """yago310""" +632 2 model """rotate""" +632 2 loss """nssa""" +632 2 regularizer """no""" +632 2 optimizer """adam""" +632 2 training_loop """owa""" +632 2 negative_sampler """basic""" +632 2 evaluator """rankbased""" +632 3 dataset """yago310""" +632 3 model """rotate""" +632 3 loss """nssa""" +632 3 regularizer """no""" +632 3 optimizer """adam""" +632 3 training_loop """owa""" +632 3 negative_sampler """basic""" +632 3 evaluator """rankbased""" +632 4 dataset """yago310""" +632 4 model """rotate""" +632 4 loss """nssa""" +632 4 regularizer """no""" +632 4 optimizer """adam""" +632 4 training_loop """owa""" +632 4 negative_sampler """basic""" +632 4 evaluator """rankbased""" +632 5 dataset """yago310""" +632 5 model """rotate""" +632 5 loss """nssa""" +632 5 regularizer """no""" +632 5 optimizer """adam""" +632 5 training_loop """owa""" +632 5 negative_sampler """basic""" +632 5 evaluator """rankbased""" +632 6 dataset """yago310""" +632 6 model """rotate""" +632 6 loss """nssa""" +632 6 regularizer """no""" +632 6 optimizer """adam""" +632 6 training_loop """owa""" +632 6 negative_sampler """basic""" +632 6 evaluator """rankbased""" +632 7 dataset """yago310""" +632 7 model """rotate""" +632 7 loss """nssa""" +632 7 regularizer """no""" +632 7 optimizer """adam""" +632 7 training_loop """owa""" +632 7 negative_sampler """basic""" +632 7 evaluator """rankbased""" +632 8 dataset """yago310""" +632 8 model """rotate""" +632 8 loss """nssa""" +632 8 regularizer """no""" +632 8 optimizer """adam""" +632 8 training_loop """owa""" +632 8 negative_sampler """basic""" +632 8 evaluator """rankbased""" +632 9 dataset """yago310""" +632 9 model """rotate""" +632 9 loss """nssa""" +632 9 regularizer """no""" +632 9 optimizer """adam""" +632 9 training_loop """owa""" +632 9 negative_sampler """basic""" +632 9 evaluator """rankbased""" +632 10 dataset """yago310""" +632 10 model """rotate""" +632 10 loss """nssa""" +632 10 regularizer """no""" +632 10 optimizer """adam""" +632 10 training_loop """owa""" +632 10 negative_sampler """basic""" +632 10 evaluator """rankbased""" +632 11 dataset """yago310""" +632 11 model """rotate""" +632 11 loss """nssa""" +632 11 regularizer """no""" +632 11 optimizer """adam""" +632 11 training_loop """owa""" +632 11 negative_sampler """basic""" +632 11 evaluator """rankbased""" +632 12 dataset """yago310""" +632 12 model """rotate""" +632 12 loss """nssa""" +632 12 regularizer """no""" +632 12 optimizer """adam""" +632 12 training_loop """owa""" +632 12 negative_sampler """basic""" +632 12 evaluator """rankbased""" +632 13 dataset """yago310""" +632 13 model """rotate""" +632 13 loss """nssa""" +632 13 regularizer """no""" +632 13 optimizer """adam""" +632 13 training_loop """owa""" +632 13 negative_sampler """basic""" +632 13 evaluator """rankbased""" +633 1 model.embedding_dim 2.0 +633 1 optimizer.lr 0.02728874907557685 +633 1 negative_sampler.num_negs_per_pos 29.0 +633 1 training.batch_size 0.0 +633 2 model.embedding_dim 0.0 +633 2 optimizer.lr 0.0011623048977672833 +633 2 negative_sampler.num_negs_per_pos 12.0 +633 2 training.batch_size 1.0 +633 3 model.embedding_dim 2.0 +633 3 optimizer.lr 0.016218856617695523 +633 3 negative_sampler.num_negs_per_pos 0.0 +633 3 training.batch_size 1.0 +633 4 model.embedding_dim 1.0 +633 4 optimizer.lr 0.010628868330044608 +633 4 negative_sampler.num_negs_per_pos 48.0 +633 4 training.batch_size 0.0 +633 5 model.embedding_dim 0.0 +633 5 optimizer.lr 0.002563890846511359 +633 5 negative_sampler.num_negs_per_pos 22.0 +633 5 training.batch_size 1.0 +633 6 model.embedding_dim 0.0 +633 6 optimizer.lr 0.02997270264053356 +633 6 negative_sampler.num_negs_per_pos 22.0 +633 6 training.batch_size 2.0 +633 7 model.embedding_dim 1.0 +633 7 optimizer.lr 0.00424488068961603 +633 7 negative_sampler.num_negs_per_pos 86.0 +633 7 training.batch_size 0.0 +633 8 model.embedding_dim 2.0 +633 8 optimizer.lr 0.001932989751654078 +633 8 negative_sampler.num_negs_per_pos 96.0 +633 8 training.batch_size 0.0 +633 9 model.embedding_dim 0.0 +633 9 optimizer.lr 0.019897283963347983 +633 9 negative_sampler.num_negs_per_pos 93.0 +633 9 training.batch_size 1.0 +633 10 model.embedding_dim 0.0 +633 10 optimizer.lr 0.02827401199983271 +633 10 negative_sampler.num_negs_per_pos 34.0 +633 10 training.batch_size 0.0 +633 11 model.embedding_dim 1.0 +633 11 optimizer.lr 0.009700738030662109 +633 11 negative_sampler.num_negs_per_pos 93.0 +633 11 training.batch_size 1.0 +633 12 model.embedding_dim 0.0 +633 12 optimizer.lr 0.035960523009004694 +633 12 negative_sampler.num_negs_per_pos 85.0 +633 12 training.batch_size 2.0 +633 13 model.embedding_dim 2.0 +633 13 optimizer.lr 0.033174698851757105 +633 13 negative_sampler.num_negs_per_pos 44.0 +633 13 training.batch_size 0.0 +633 14 model.embedding_dim 1.0 +633 14 optimizer.lr 0.0027200249494627043 +633 14 negative_sampler.num_negs_per_pos 77.0 +633 14 training.batch_size 1.0 +633 15 model.embedding_dim 0.0 +633 15 optimizer.lr 0.01304232627645006 +633 15 negative_sampler.num_negs_per_pos 15.0 +633 15 training.batch_size 0.0 +633 16 model.embedding_dim 1.0 +633 16 optimizer.lr 0.009526878251916516 +633 16 negative_sampler.num_negs_per_pos 98.0 +633 16 training.batch_size 1.0 +633 17 model.embedding_dim 2.0 +633 17 optimizer.lr 0.005792927327665311 +633 17 negative_sampler.num_negs_per_pos 85.0 +633 17 training.batch_size 1.0 +633 18 model.embedding_dim 2.0 +633 18 optimizer.lr 0.022867636876074834 +633 18 negative_sampler.num_negs_per_pos 22.0 +633 18 training.batch_size 2.0 +633 19 model.embedding_dim 0.0 +633 19 optimizer.lr 0.016022590472478254 +633 19 negative_sampler.num_negs_per_pos 69.0 +633 19 training.batch_size 1.0 +633 1 dataset """fb15k237""" +633 1 model """simple""" +633 1 loss """bceaftersigmoid""" +633 1 regularizer """no""" +633 1 optimizer """adam""" +633 1 training_loop """owa""" +633 1 negative_sampler """basic""" +633 1 evaluator """rankbased""" +633 2 dataset """fb15k237""" +633 2 model """simple""" +633 2 loss """bceaftersigmoid""" +633 2 regularizer """no""" +633 2 optimizer """adam""" +633 2 training_loop """owa""" +633 2 negative_sampler """basic""" +633 2 evaluator """rankbased""" +633 3 dataset """fb15k237""" +633 3 model """simple""" +633 3 loss """bceaftersigmoid""" +633 3 regularizer """no""" +633 3 optimizer """adam""" +633 3 training_loop """owa""" +633 3 negative_sampler """basic""" +633 3 evaluator """rankbased""" +633 4 dataset """fb15k237""" +633 4 model """simple""" +633 4 loss """bceaftersigmoid""" +633 4 regularizer """no""" +633 4 optimizer """adam""" +633 4 training_loop """owa""" +633 4 negative_sampler """basic""" +633 4 evaluator """rankbased""" +633 5 dataset """fb15k237""" +633 5 model """simple""" +633 5 loss """bceaftersigmoid""" +633 5 regularizer """no""" +633 5 optimizer """adam""" +633 5 training_loop """owa""" +633 5 negative_sampler """basic""" +633 5 evaluator """rankbased""" +633 6 dataset """fb15k237""" +633 6 model """simple""" +633 6 loss """bceaftersigmoid""" +633 6 regularizer """no""" +633 6 optimizer """adam""" +633 6 training_loop """owa""" +633 6 negative_sampler """basic""" +633 6 evaluator """rankbased""" +633 7 dataset """fb15k237""" +633 7 model """simple""" +633 7 loss """bceaftersigmoid""" +633 7 regularizer """no""" +633 7 optimizer """adam""" +633 7 training_loop """owa""" +633 7 negative_sampler """basic""" +633 7 evaluator """rankbased""" +633 8 dataset """fb15k237""" +633 8 model """simple""" +633 8 loss """bceaftersigmoid""" +633 8 regularizer """no""" +633 8 optimizer """adam""" +633 8 training_loop """owa""" +633 8 negative_sampler """basic""" +633 8 evaluator """rankbased""" +633 9 dataset """fb15k237""" +633 9 model """simple""" +633 9 loss """bceaftersigmoid""" +633 9 regularizer """no""" +633 9 optimizer """adam""" +633 9 training_loop """owa""" +633 9 negative_sampler """basic""" +633 9 evaluator """rankbased""" +633 10 dataset """fb15k237""" +633 10 model """simple""" +633 10 loss """bceaftersigmoid""" +633 10 regularizer """no""" +633 10 optimizer """adam""" +633 10 training_loop """owa""" +633 10 negative_sampler """basic""" +633 10 evaluator """rankbased""" +633 11 dataset """fb15k237""" +633 11 model """simple""" +633 11 loss """bceaftersigmoid""" +633 11 regularizer """no""" +633 11 optimizer """adam""" +633 11 training_loop """owa""" +633 11 negative_sampler """basic""" +633 11 evaluator """rankbased""" +633 12 dataset """fb15k237""" +633 12 model """simple""" +633 12 loss """bceaftersigmoid""" +633 12 regularizer """no""" +633 12 optimizer """adam""" +633 12 training_loop """owa""" +633 12 negative_sampler """basic""" +633 12 evaluator """rankbased""" +633 13 dataset """fb15k237""" +633 13 model """simple""" +633 13 loss """bceaftersigmoid""" +633 13 regularizer """no""" +633 13 optimizer """adam""" +633 13 training_loop """owa""" +633 13 negative_sampler """basic""" +633 13 evaluator """rankbased""" +633 14 dataset """fb15k237""" +633 14 model """simple""" +633 14 loss """bceaftersigmoid""" +633 14 regularizer """no""" +633 14 optimizer """adam""" +633 14 training_loop """owa""" +633 14 negative_sampler """basic""" +633 14 evaluator """rankbased""" +633 15 dataset """fb15k237""" +633 15 model """simple""" +633 15 loss """bceaftersigmoid""" +633 15 regularizer """no""" +633 15 optimizer """adam""" +633 15 training_loop """owa""" +633 15 negative_sampler """basic""" +633 15 evaluator """rankbased""" +633 16 dataset """fb15k237""" +633 16 model """simple""" +633 16 loss """bceaftersigmoid""" +633 16 regularizer """no""" +633 16 optimizer """adam""" +633 16 training_loop """owa""" +633 16 negative_sampler """basic""" +633 16 evaluator """rankbased""" +633 17 dataset """fb15k237""" +633 17 model """simple""" +633 17 loss """bceaftersigmoid""" +633 17 regularizer """no""" +633 17 optimizer """adam""" +633 17 training_loop """owa""" +633 17 negative_sampler """basic""" +633 17 evaluator """rankbased""" +633 18 dataset """fb15k237""" +633 18 model """simple""" +633 18 loss """bceaftersigmoid""" +633 18 regularizer """no""" +633 18 optimizer """adam""" +633 18 training_loop """owa""" +633 18 negative_sampler """basic""" +633 18 evaluator """rankbased""" +633 19 dataset """fb15k237""" +633 19 model """simple""" +633 19 loss """bceaftersigmoid""" +633 19 regularizer """no""" +633 19 optimizer """adam""" +633 19 training_loop """owa""" +633 19 negative_sampler """basic""" +633 19 evaluator """rankbased""" +634 1 model.embedding_dim 0.0 +634 1 optimizer.lr 0.09602676185290505 +634 1 negative_sampler.num_negs_per_pos 45.0 +634 1 training.batch_size 1.0 +634 2 model.embedding_dim 0.0 +634 2 optimizer.lr 0.081805506112531 +634 2 negative_sampler.num_negs_per_pos 63.0 +634 2 training.batch_size 0.0 +634 3 model.embedding_dim 2.0 +634 3 optimizer.lr 0.008805804291629034 +634 3 negative_sampler.num_negs_per_pos 51.0 +634 3 training.batch_size 0.0 +634 4 model.embedding_dim 0.0 +634 4 optimizer.lr 0.002230149483792246 +634 4 negative_sampler.num_negs_per_pos 93.0 +634 4 training.batch_size 0.0 +634 5 model.embedding_dim 2.0 +634 5 optimizer.lr 0.0038984447020876697 +634 5 negative_sampler.num_negs_per_pos 50.0 +634 5 training.batch_size 2.0 +634 6 model.embedding_dim 2.0 +634 6 optimizer.lr 0.003365580224637498 +634 6 negative_sampler.num_negs_per_pos 63.0 +634 6 training.batch_size 1.0 +634 7 model.embedding_dim 0.0 +634 7 optimizer.lr 0.008997242759220127 +634 7 negative_sampler.num_negs_per_pos 71.0 +634 7 training.batch_size 1.0 +634 8 model.embedding_dim 1.0 +634 8 optimizer.lr 0.02951579633727705 +634 8 negative_sampler.num_negs_per_pos 46.0 +634 8 training.batch_size 1.0 +634 9 model.embedding_dim 2.0 +634 9 optimizer.lr 0.041166475515258184 +634 9 negative_sampler.num_negs_per_pos 11.0 +634 9 training.batch_size 1.0 +634 10 model.embedding_dim 0.0 +634 10 optimizer.lr 0.0013741377509276778 +634 10 negative_sampler.num_negs_per_pos 70.0 +634 10 training.batch_size 0.0 +634 11 model.embedding_dim 2.0 +634 11 optimizer.lr 0.0030080608193635344 +634 11 negative_sampler.num_negs_per_pos 98.0 +634 11 training.batch_size 0.0 +634 12 model.embedding_dim 0.0 +634 12 optimizer.lr 0.046708034737619405 +634 12 negative_sampler.num_negs_per_pos 44.0 +634 12 training.batch_size 0.0 +634 13 model.embedding_dim 0.0 +634 13 optimizer.lr 0.04212202580083398 +634 13 negative_sampler.num_negs_per_pos 92.0 +634 13 training.batch_size 2.0 +634 14 model.embedding_dim 1.0 +634 14 optimizer.lr 0.0013818908128929671 +634 14 negative_sampler.num_negs_per_pos 33.0 +634 14 training.batch_size 1.0 +634 15 model.embedding_dim 2.0 +634 15 optimizer.lr 0.0014399886848974512 +634 15 negative_sampler.num_negs_per_pos 7.0 +634 15 training.batch_size 1.0 +634 16 model.embedding_dim 1.0 +634 16 optimizer.lr 0.043583309395287324 +634 16 negative_sampler.num_negs_per_pos 83.0 +634 16 training.batch_size 2.0 +634 17 model.embedding_dim 1.0 +634 17 optimizer.lr 0.013558072580716948 +634 17 negative_sampler.num_negs_per_pos 96.0 +634 17 training.batch_size 2.0 +634 18 model.embedding_dim 0.0 +634 18 optimizer.lr 0.026758213566579023 +634 18 negative_sampler.num_negs_per_pos 33.0 +634 18 training.batch_size 2.0 +634 19 model.embedding_dim 2.0 +634 19 optimizer.lr 0.0013082526504010034 +634 19 negative_sampler.num_negs_per_pos 29.0 +634 19 training.batch_size 2.0 +634 1 dataset """fb15k237""" +634 1 model """simple""" +634 1 loss """softplus""" +634 1 regularizer """no""" +634 1 optimizer """adam""" +634 1 training_loop """owa""" +634 1 negative_sampler """basic""" +634 1 evaluator """rankbased""" +634 2 dataset """fb15k237""" +634 2 model """simple""" +634 2 loss """softplus""" +634 2 regularizer """no""" +634 2 optimizer """adam""" +634 2 training_loop """owa""" +634 2 negative_sampler """basic""" +634 2 evaluator """rankbased""" +634 3 dataset """fb15k237""" +634 3 model """simple""" +634 3 loss """softplus""" +634 3 regularizer """no""" +634 3 optimizer """adam""" +634 3 training_loop """owa""" +634 3 negative_sampler """basic""" +634 3 evaluator """rankbased""" +634 4 dataset """fb15k237""" +634 4 model """simple""" +634 4 loss """softplus""" +634 4 regularizer """no""" +634 4 optimizer """adam""" +634 4 training_loop """owa""" +634 4 negative_sampler """basic""" +634 4 evaluator """rankbased""" +634 5 dataset """fb15k237""" +634 5 model """simple""" +634 5 loss """softplus""" +634 5 regularizer """no""" +634 5 optimizer """adam""" +634 5 training_loop """owa""" +634 5 negative_sampler """basic""" +634 5 evaluator """rankbased""" +634 6 dataset """fb15k237""" +634 6 model """simple""" +634 6 loss """softplus""" +634 6 regularizer """no""" +634 6 optimizer """adam""" +634 6 training_loop """owa""" +634 6 negative_sampler """basic""" +634 6 evaluator """rankbased""" +634 7 dataset """fb15k237""" +634 7 model """simple""" +634 7 loss """softplus""" +634 7 regularizer """no""" +634 7 optimizer """adam""" +634 7 training_loop """owa""" +634 7 negative_sampler """basic""" +634 7 evaluator """rankbased""" +634 8 dataset """fb15k237""" +634 8 model """simple""" +634 8 loss """softplus""" +634 8 regularizer """no""" +634 8 optimizer """adam""" +634 8 training_loop """owa""" +634 8 negative_sampler """basic""" +634 8 evaluator """rankbased""" +634 9 dataset """fb15k237""" +634 9 model """simple""" +634 9 loss """softplus""" +634 9 regularizer """no""" +634 9 optimizer """adam""" +634 9 training_loop """owa""" +634 9 negative_sampler """basic""" +634 9 evaluator """rankbased""" +634 10 dataset """fb15k237""" +634 10 model """simple""" +634 10 loss """softplus""" +634 10 regularizer """no""" +634 10 optimizer """adam""" +634 10 training_loop """owa""" +634 10 negative_sampler """basic""" +634 10 evaluator """rankbased""" +634 11 dataset """fb15k237""" +634 11 model """simple""" +634 11 loss """softplus""" +634 11 regularizer """no""" +634 11 optimizer """adam""" +634 11 training_loop """owa""" +634 11 negative_sampler """basic""" +634 11 evaluator """rankbased""" +634 12 dataset """fb15k237""" +634 12 model """simple""" +634 12 loss """softplus""" +634 12 regularizer """no""" +634 12 optimizer """adam""" +634 12 training_loop """owa""" +634 12 negative_sampler """basic""" +634 12 evaluator """rankbased""" +634 13 dataset """fb15k237""" +634 13 model """simple""" +634 13 loss """softplus""" +634 13 regularizer """no""" +634 13 optimizer """adam""" +634 13 training_loop """owa""" +634 13 negative_sampler """basic""" +634 13 evaluator """rankbased""" +634 14 dataset """fb15k237""" +634 14 model """simple""" +634 14 loss """softplus""" +634 14 regularizer """no""" +634 14 optimizer """adam""" +634 14 training_loop """owa""" +634 14 negative_sampler """basic""" +634 14 evaluator """rankbased""" +634 15 dataset """fb15k237""" +634 15 model """simple""" +634 15 loss """softplus""" +634 15 regularizer """no""" +634 15 optimizer """adam""" +634 15 training_loop """owa""" +634 15 negative_sampler """basic""" +634 15 evaluator """rankbased""" +634 16 dataset """fb15k237""" +634 16 model """simple""" +634 16 loss """softplus""" +634 16 regularizer """no""" +634 16 optimizer """adam""" +634 16 training_loop """owa""" +634 16 negative_sampler """basic""" +634 16 evaluator """rankbased""" +634 17 dataset """fb15k237""" +634 17 model """simple""" +634 17 loss """softplus""" +634 17 regularizer """no""" +634 17 optimizer """adam""" +634 17 training_loop """owa""" +634 17 negative_sampler """basic""" +634 17 evaluator """rankbased""" +634 18 dataset """fb15k237""" +634 18 model """simple""" +634 18 loss """softplus""" +634 18 regularizer """no""" +634 18 optimizer """adam""" +634 18 training_loop """owa""" +634 18 negative_sampler """basic""" +634 18 evaluator """rankbased""" +634 19 dataset """fb15k237""" +634 19 model """simple""" +634 19 loss """softplus""" +634 19 regularizer """no""" +634 19 optimizer """adam""" +634 19 training_loop """owa""" +634 19 negative_sampler """basic""" +634 19 evaluator """rankbased""" +635 1 model.embedding_dim 2.0 +635 1 optimizer.lr 0.03796592174856771 +635 1 negative_sampler.num_negs_per_pos 73.0 +635 1 training.batch_size 2.0 +635 2 model.embedding_dim 1.0 +635 2 optimizer.lr 0.07798586395379348 +635 2 negative_sampler.num_negs_per_pos 88.0 +635 2 training.batch_size 2.0 +635 3 model.embedding_dim 0.0 +635 3 optimizer.lr 0.0017236552570505262 +635 3 negative_sampler.num_negs_per_pos 78.0 +635 3 training.batch_size 0.0 +635 4 model.embedding_dim 2.0 +635 4 optimizer.lr 0.020976056553541157 +635 4 negative_sampler.num_negs_per_pos 34.0 +635 4 training.batch_size 0.0 +635 5 model.embedding_dim 1.0 +635 5 optimizer.lr 0.09241657114779443 +635 5 negative_sampler.num_negs_per_pos 3.0 +635 5 training.batch_size 2.0 +635 6 model.embedding_dim 2.0 +635 6 optimizer.lr 0.005551938433210536 +635 6 negative_sampler.num_negs_per_pos 90.0 +635 6 training.batch_size 1.0 +635 7 model.embedding_dim 0.0 +635 7 optimizer.lr 0.001959808355928135 +635 7 negative_sampler.num_negs_per_pos 27.0 +635 7 training.batch_size 0.0 +635 8 model.embedding_dim 0.0 +635 8 optimizer.lr 0.012090316261337865 +635 8 negative_sampler.num_negs_per_pos 68.0 +635 8 training.batch_size 0.0 +635 9 model.embedding_dim 1.0 +635 9 optimizer.lr 0.005223184235046195 +635 9 negative_sampler.num_negs_per_pos 2.0 +635 9 training.batch_size 0.0 +635 10 model.embedding_dim 2.0 +635 10 optimizer.lr 0.00621582287129534 +635 10 negative_sampler.num_negs_per_pos 63.0 +635 10 training.batch_size 0.0 +635 11 model.embedding_dim 1.0 +635 11 optimizer.lr 0.0021672192688196247 +635 11 negative_sampler.num_negs_per_pos 71.0 +635 11 training.batch_size 2.0 +635 12 model.embedding_dim 0.0 +635 12 optimizer.lr 0.028225458328568424 +635 12 negative_sampler.num_negs_per_pos 56.0 +635 12 training.batch_size 1.0 +635 13 model.embedding_dim 1.0 +635 13 optimizer.lr 0.007731578076907556 +635 13 negative_sampler.num_negs_per_pos 95.0 +635 13 training.batch_size 2.0 +635 14 model.embedding_dim 1.0 +635 14 optimizer.lr 0.06280233120852745 +635 14 negative_sampler.num_negs_per_pos 79.0 +635 14 training.batch_size 0.0 +635 15 model.embedding_dim 1.0 +635 15 optimizer.lr 0.005089322572255459 +635 15 negative_sampler.num_negs_per_pos 30.0 +635 15 training.batch_size 1.0 +635 16 model.embedding_dim 0.0 +635 16 optimizer.lr 0.0923306705589615 +635 16 negative_sampler.num_negs_per_pos 31.0 +635 16 training.batch_size 0.0 +635 17 model.embedding_dim 2.0 +635 17 optimizer.lr 0.008612141507991783 +635 17 negative_sampler.num_negs_per_pos 24.0 +635 17 training.batch_size 0.0 +635 18 model.embedding_dim 0.0 +635 18 optimizer.lr 0.036703987515323576 +635 18 negative_sampler.num_negs_per_pos 80.0 +635 18 training.batch_size 0.0 +635 19 model.embedding_dim 2.0 +635 19 optimizer.lr 0.0013101161623938911 +635 19 negative_sampler.num_negs_per_pos 39.0 +635 19 training.batch_size 1.0 +635 20 model.embedding_dim 2.0 +635 20 optimizer.lr 0.07711948988959523 +635 20 negative_sampler.num_negs_per_pos 56.0 +635 20 training.batch_size 2.0 +635 21 model.embedding_dim 2.0 +635 21 optimizer.lr 0.003491510464672106 +635 21 negative_sampler.num_negs_per_pos 15.0 +635 21 training.batch_size 1.0 +635 22 model.embedding_dim 1.0 +635 22 optimizer.lr 0.0024008418174406255 +635 22 negative_sampler.num_negs_per_pos 4.0 +635 22 training.batch_size 0.0 +635 23 model.embedding_dim 0.0 +635 23 optimizer.lr 0.06260730046826576 +635 23 negative_sampler.num_negs_per_pos 41.0 +635 23 training.batch_size 0.0 +635 24 model.embedding_dim 1.0 +635 24 optimizer.lr 0.020352752919045126 +635 24 negative_sampler.num_negs_per_pos 8.0 +635 24 training.batch_size 0.0 +635 25 model.embedding_dim 0.0 +635 25 optimizer.lr 0.008504779824167457 +635 25 negative_sampler.num_negs_per_pos 86.0 +635 25 training.batch_size 1.0 +635 26 model.embedding_dim 1.0 +635 26 optimizer.lr 0.0011746948748791648 +635 26 negative_sampler.num_negs_per_pos 10.0 +635 26 training.batch_size 0.0 +635 27 model.embedding_dim 0.0 +635 27 optimizer.lr 0.0012041411967962756 +635 27 negative_sampler.num_negs_per_pos 74.0 +635 27 training.batch_size 2.0 +635 28 model.embedding_dim 1.0 +635 28 optimizer.lr 0.09346104108469816 +635 28 negative_sampler.num_negs_per_pos 45.0 +635 28 training.batch_size 1.0 +635 29 model.embedding_dim 2.0 +635 29 optimizer.lr 0.020016862802188843 +635 29 negative_sampler.num_negs_per_pos 13.0 +635 29 training.batch_size 2.0 +635 30 model.embedding_dim 0.0 +635 30 optimizer.lr 0.002107070787737209 +635 30 negative_sampler.num_negs_per_pos 84.0 +635 30 training.batch_size 2.0 +635 31 model.embedding_dim 0.0 +635 31 optimizer.lr 0.0035388585383158908 +635 31 negative_sampler.num_negs_per_pos 79.0 +635 31 training.batch_size 1.0 +635 32 model.embedding_dim 2.0 +635 32 optimizer.lr 0.020317165095749202 +635 32 negative_sampler.num_negs_per_pos 20.0 +635 32 training.batch_size 1.0 +635 33 model.embedding_dim 2.0 +635 33 optimizer.lr 0.08238464237751188 +635 33 negative_sampler.num_negs_per_pos 51.0 +635 33 training.batch_size 2.0 +635 34 model.embedding_dim 1.0 +635 34 optimizer.lr 0.023555167213610284 +635 34 negative_sampler.num_negs_per_pos 79.0 +635 34 training.batch_size 1.0 +635 35 model.embedding_dim 0.0 +635 35 optimizer.lr 0.04750219817696295 +635 35 negative_sampler.num_negs_per_pos 76.0 +635 35 training.batch_size 2.0 +635 36 model.embedding_dim 0.0 +635 36 optimizer.lr 0.013107231945295165 +635 36 negative_sampler.num_negs_per_pos 61.0 +635 36 training.batch_size 0.0 +635 37 model.embedding_dim 0.0 +635 37 optimizer.lr 0.027981798245869334 +635 37 negative_sampler.num_negs_per_pos 34.0 +635 37 training.batch_size 1.0 +635 38 model.embedding_dim 1.0 +635 38 optimizer.lr 0.0020909683810637074 +635 38 negative_sampler.num_negs_per_pos 14.0 +635 38 training.batch_size 0.0 +635 39 model.embedding_dim 0.0 +635 39 optimizer.lr 0.06182693026564029 +635 39 negative_sampler.num_negs_per_pos 2.0 +635 39 training.batch_size 1.0 +635 40 model.embedding_dim 2.0 +635 40 optimizer.lr 0.01863499420782199 +635 40 negative_sampler.num_negs_per_pos 1.0 +635 40 training.batch_size 1.0 +635 41 model.embedding_dim 0.0 +635 41 optimizer.lr 0.0038366102902673504 +635 41 negative_sampler.num_negs_per_pos 52.0 +635 41 training.batch_size 2.0 +635 42 model.embedding_dim 1.0 +635 42 optimizer.lr 0.07643169442215209 +635 42 negative_sampler.num_negs_per_pos 24.0 +635 42 training.batch_size 0.0 +635 1 dataset """fb15k237""" +635 1 model """simple""" +635 1 loss """bceaftersigmoid""" +635 1 regularizer """no""" +635 1 optimizer """adam""" +635 1 training_loop """owa""" +635 1 negative_sampler """basic""" +635 1 evaluator """rankbased""" +635 2 dataset """fb15k237""" +635 2 model """simple""" +635 2 loss """bceaftersigmoid""" +635 2 regularizer """no""" +635 2 optimizer """adam""" +635 2 training_loop """owa""" +635 2 negative_sampler """basic""" +635 2 evaluator """rankbased""" +635 3 dataset """fb15k237""" +635 3 model """simple""" +635 3 loss """bceaftersigmoid""" +635 3 regularizer """no""" +635 3 optimizer """adam""" +635 3 training_loop """owa""" +635 3 negative_sampler """basic""" +635 3 evaluator """rankbased""" +635 4 dataset """fb15k237""" +635 4 model """simple""" +635 4 loss """bceaftersigmoid""" +635 4 regularizer """no""" +635 4 optimizer """adam""" +635 4 training_loop """owa""" +635 4 negative_sampler """basic""" +635 4 evaluator """rankbased""" +635 5 dataset """fb15k237""" +635 5 model """simple""" +635 5 loss """bceaftersigmoid""" +635 5 regularizer """no""" +635 5 optimizer """adam""" +635 5 training_loop """owa""" +635 5 negative_sampler """basic""" +635 5 evaluator """rankbased""" +635 6 dataset """fb15k237""" +635 6 model """simple""" +635 6 loss """bceaftersigmoid""" +635 6 regularizer """no""" +635 6 optimizer """adam""" +635 6 training_loop """owa""" +635 6 negative_sampler """basic""" +635 6 evaluator """rankbased""" +635 7 dataset """fb15k237""" +635 7 model """simple""" +635 7 loss """bceaftersigmoid""" +635 7 regularizer """no""" +635 7 optimizer """adam""" +635 7 training_loop """owa""" +635 7 negative_sampler """basic""" +635 7 evaluator """rankbased""" +635 8 dataset """fb15k237""" +635 8 model """simple""" +635 8 loss """bceaftersigmoid""" +635 8 regularizer """no""" +635 8 optimizer """adam""" +635 8 training_loop """owa""" +635 8 negative_sampler """basic""" +635 8 evaluator """rankbased""" +635 9 dataset """fb15k237""" +635 9 model """simple""" +635 9 loss """bceaftersigmoid""" +635 9 regularizer """no""" +635 9 optimizer """adam""" +635 9 training_loop """owa""" +635 9 negative_sampler """basic""" +635 9 evaluator """rankbased""" +635 10 dataset """fb15k237""" +635 10 model """simple""" +635 10 loss """bceaftersigmoid""" +635 10 regularizer """no""" +635 10 optimizer """adam""" +635 10 training_loop """owa""" +635 10 negative_sampler """basic""" +635 10 evaluator """rankbased""" +635 11 dataset """fb15k237""" +635 11 model """simple""" +635 11 loss """bceaftersigmoid""" +635 11 regularizer """no""" +635 11 optimizer """adam""" +635 11 training_loop """owa""" +635 11 negative_sampler """basic""" +635 11 evaluator """rankbased""" +635 12 dataset """fb15k237""" +635 12 model """simple""" +635 12 loss """bceaftersigmoid""" +635 12 regularizer """no""" +635 12 optimizer """adam""" +635 12 training_loop """owa""" +635 12 negative_sampler """basic""" +635 12 evaluator """rankbased""" +635 13 dataset """fb15k237""" +635 13 model """simple""" +635 13 loss """bceaftersigmoid""" +635 13 regularizer """no""" +635 13 optimizer """adam""" +635 13 training_loop """owa""" +635 13 negative_sampler """basic""" +635 13 evaluator """rankbased""" +635 14 dataset """fb15k237""" +635 14 model """simple""" +635 14 loss """bceaftersigmoid""" +635 14 regularizer """no""" +635 14 optimizer """adam""" +635 14 training_loop """owa""" +635 14 negative_sampler """basic""" +635 14 evaluator """rankbased""" +635 15 dataset """fb15k237""" +635 15 model """simple""" +635 15 loss """bceaftersigmoid""" +635 15 regularizer """no""" +635 15 optimizer """adam""" +635 15 training_loop """owa""" +635 15 negative_sampler """basic""" +635 15 evaluator """rankbased""" +635 16 dataset """fb15k237""" +635 16 model """simple""" +635 16 loss """bceaftersigmoid""" +635 16 regularizer """no""" +635 16 optimizer """adam""" +635 16 training_loop """owa""" +635 16 negative_sampler """basic""" +635 16 evaluator """rankbased""" +635 17 dataset """fb15k237""" +635 17 model """simple""" +635 17 loss """bceaftersigmoid""" +635 17 regularizer """no""" +635 17 optimizer """adam""" +635 17 training_loop """owa""" +635 17 negative_sampler """basic""" +635 17 evaluator """rankbased""" +635 18 dataset """fb15k237""" +635 18 model """simple""" +635 18 loss """bceaftersigmoid""" +635 18 regularizer """no""" +635 18 optimizer """adam""" +635 18 training_loop """owa""" +635 18 negative_sampler """basic""" +635 18 evaluator """rankbased""" +635 19 dataset """fb15k237""" +635 19 model """simple""" +635 19 loss """bceaftersigmoid""" +635 19 regularizer """no""" +635 19 optimizer """adam""" +635 19 training_loop """owa""" +635 19 negative_sampler """basic""" +635 19 evaluator """rankbased""" +635 20 dataset """fb15k237""" +635 20 model """simple""" +635 20 loss """bceaftersigmoid""" +635 20 regularizer """no""" +635 20 optimizer """adam""" +635 20 training_loop """owa""" +635 20 negative_sampler """basic""" +635 20 evaluator """rankbased""" +635 21 dataset """fb15k237""" +635 21 model """simple""" +635 21 loss """bceaftersigmoid""" +635 21 regularizer """no""" +635 21 optimizer """adam""" +635 21 training_loop """owa""" +635 21 negative_sampler """basic""" +635 21 evaluator """rankbased""" +635 22 dataset """fb15k237""" +635 22 model """simple""" +635 22 loss """bceaftersigmoid""" +635 22 regularizer """no""" +635 22 optimizer """adam""" +635 22 training_loop """owa""" +635 22 negative_sampler """basic""" +635 22 evaluator """rankbased""" +635 23 dataset """fb15k237""" +635 23 model """simple""" +635 23 loss """bceaftersigmoid""" +635 23 regularizer """no""" +635 23 optimizer """adam""" +635 23 training_loop """owa""" +635 23 negative_sampler """basic""" +635 23 evaluator """rankbased""" +635 24 dataset """fb15k237""" +635 24 model """simple""" +635 24 loss """bceaftersigmoid""" +635 24 regularizer """no""" +635 24 optimizer """adam""" +635 24 training_loop """owa""" +635 24 negative_sampler """basic""" +635 24 evaluator """rankbased""" +635 25 dataset """fb15k237""" +635 25 model """simple""" +635 25 loss """bceaftersigmoid""" +635 25 regularizer """no""" +635 25 optimizer """adam""" +635 25 training_loop """owa""" +635 25 negative_sampler """basic""" +635 25 evaluator """rankbased""" +635 26 dataset """fb15k237""" +635 26 model """simple""" +635 26 loss """bceaftersigmoid""" +635 26 regularizer """no""" +635 26 optimizer """adam""" +635 26 training_loop """owa""" +635 26 negative_sampler """basic""" +635 26 evaluator """rankbased""" +635 27 dataset """fb15k237""" +635 27 model """simple""" +635 27 loss """bceaftersigmoid""" +635 27 regularizer """no""" +635 27 optimizer """adam""" +635 27 training_loop """owa""" +635 27 negative_sampler """basic""" +635 27 evaluator """rankbased""" +635 28 dataset """fb15k237""" +635 28 model """simple""" +635 28 loss """bceaftersigmoid""" +635 28 regularizer """no""" +635 28 optimizer """adam""" +635 28 training_loop """owa""" +635 28 negative_sampler """basic""" +635 28 evaluator """rankbased""" +635 29 dataset """fb15k237""" +635 29 model """simple""" +635 29 loss """bceaftersigmoid""" +635 29 regularizer """no""" +635 29 optimizer """adam""" +635 29 training_loop """owa""" +635 29 negative_sampler """basic""" +635 29 evaluator """rankbased""" +635 30 dataset """fb15k237""" +635 30 model """simple""" +635 30 loss """bceaftersigmoid""" +635 30 regularizer """no""" +635 30 optimizer """adam""" +635 30 training_loop """owa""" +635 30 negative_sampler """basic""" +635 30 evaluator """rankbased""" +635 31 dataset """fb15k237""" +635 31 model """simple""" +635 31 loss """bceaftersigmoid""" +635 31 regularizer """no""" +635 31 optimizer """adam""" +635 31 training_loop """owa""" +635 31 negative_sampler """basic""" +635 31 evaluator """rankbased""" +635 32 dataset """fb15k237""" +635 32 model """simple""" +635 32 loss """bceaftersigmoid""" +635 32 regularizer """no""" +635 32 optimizer """adam""" +635 32 training_loop """owa""" +635 32 negative_sampler """basic""" +635 32 evaluator """rankbased""" +635 33 dataset """fb15k237""" +635 33 model """simple""" +635 33 loss """bceaftersigmoid""" +635 33 regularizer """no""" +635 33 optimizer """adam""" +635 33 training_loop """owa""" +635 33 negative_sampler """basic""" +635 33 evaluator """rankbased""" +635 34 dataset """fb15k237""" +635 34 model """simple""" +635 34 loss """bceaftersigmoid""" +635 34 regularizer """no""" +635 34 optimizer """adam""" +635 34 training_loop """owa""" +635 34 negative_sampler """basic""" +635 34 evaluator """rankbased""" +635 35 dataset """fb15k237""" +635 35 model """simple""" +635 35 loss """bceaftersigmoid""" +635 35 regularizer """no""" +635 35 optimizer """adam""" +635 35 training_loop """owa""" +635 35 negative_sampler """basic""" +635 35 evaluator """rankbased""" +635 36 dataset """fb15k237""" +635 36 model """simple""" +635 36 loss """bceaftersigmoid""" +635 36 regularizer """no""" +635 36 optimizer """adam""" +635 36 training_loop """owa""" +635 36 negative_sampler """basic""" +635 36 evaluator """rankbased""" +635 37 dataset """fb15k237""" +635 37 model """simple""" +635 37 loss """bceaftersigmoid""" +635 37 regularizer """no""" +635 37 optimizer """adam""" +635 37 training_loop """owa""" +635 37 negative_sampler """basic""" +635 37 evaluator """rankbased""" +635 38 dataset """fb15k237""" +635 38 model """simple""" +635 38 loss """bceaftersigmoid""" +635 38 regularizer """no""" +635 38 optimizer """adam""" +635 38 training_loop """owa""" +635 38 negative_sampler """basic""" +635 38 evaluator """rankbased""" +635 39 dataset """fb15k237""" +635 39 model """simple""" +635 39 loss """bceaftersigmoid""" +635 39 regularizer """no""" +635 39 optimizer """adam""" +635 39 training_loop """owa""" +635 39 negative_sampler """basic""" +635 39 evaluator """rankbased""" +635 40 dataset """fb15k237""" +635 40 model """simple""" +635 40 loss """bceaftersigmoid""" +635 40 regularizer """no""" +635 40 optimizer """adam""" +635 40 training_loop """owa""" +635 40 negative_sampler """basic""" +635 40 evaluator """rankbased""" +635 41 dataset """fb15k237""" +635 41 model """simple""" +635 41 loss """bceaftersigmoid""" +635 41 regularizer """no""" +635 41 optimizer """adam""" +635 41 training_loop """owa""" +635 41 negative_sampler """basic""" +635 41 evaluator """rankbased""" +635 42 dataset """fb15k237""" +635 42 model """simple""" +635 42 loss """bceaftersigmoid""" +635 42 regularizer """no""" +635 42 optimizer """adam""" +635 42 training_loop """owa""" +635 42 negative_sampler """basic""" +635 42 evaluator """rankbased""" +636 1 model.embedding_dim 0.0 +636 1 optimizer.lr 0.004541140267221686 +636 1 negative_sampler.num_negs_per_pos 66.0 +636 1 training.batch_size 0.0 +636 2 model.embedding_dim 1.0 +636 2 optimizer.lr 0.022023758956257997 +636 2 negative_sampler.num_negs_per_pos 45.0 +636 2 training.batch_size 0.0 +636 3 model.embedding_dim 2.0 +636 3 optimizer.lr 0.0010719592639580194 +636 3 negative_sampler.num_negs_per_pos 53.0 +636 3 training.batch_size 1.0 +636 4 model.embedding_dim 1.0 +636 4 optimizer.lr 0.006452117530563688 +636 4 negative_sampler.num_negs_per_pos 30.0 +636 4 training.batch_size 2.0 +636 5 model.embedding_dim 1.0 +636 5 optimizer.lr 0.0518254579859757 +636 5 negative_sampler.num_negs_per_pos 84.0 +636 5 training.batch_size 1.0 +636 6 model.embedding_dim 1.0 +636 6 optimizer.lr 0.047990368699206476 +636 6 negative_sampler.num_negs_per_pos 96.0 +636 6 training.batch_size 1.0 +636 7 model.embedding_dim 2.0 +636 7 optimizer.lr 0.0797184556857352 +636 7 negative_sampler.num_negs_per_pos 65.0 +636 7 training.batch_size 1.0 +636 8 model.embedding_dim 1.0 +636 8 optimizer.lr 0.07976135644525956 +636 8 negative_sampler.num_negs_per_pos 10.0 +636 8 training.batch_size 2.0 +636 9 model.embedding_dim 1.0 +636 9 optimizer.lr 0.021898599255138818 +636 9 negative_sampler.num_negs_per_pos 66.0 +636 9 training.batch_size 0.0 +636 10 model.embedding_dim 2.0 +636 10 optimizer.lr 0.05495136355273971 +636 10 negative_sampler.num_negs_per_pos 74.0 +636 10 training.batch_size 0.0 +636 11 model.embedding_dim 2.0 +636 11 optimizer.lr 0.07789585115564439 +636 11 negative_sampler.num_negs_per_pos 96.0 +636 11 training.batch_size 2.0 +636 12 model.embedding_dim 2.0 +636 12 optimizer.lr 0.004316044614739794 +636 12 negative_sampler.num_negs_per_pos 54.0 +636 12 training.batch_size 2.0 +636 13 model.embedding_dim 0.0 +636 13 optimizer.lr 0.015291195270544653 +636 13 negative_sampler.num_negs_per_pos 2.0 +636 13 training.batch_size 0.0 +636 14 model.embedding_dim 2.0 +636 14 optimizer.lr 0.08281482809398452 +636 14 negative_sampler.num_negs_per_pos 83.0 +636 14 training.batch_size 0.0 +636 15 model.embedding_dim 1.0 +636 15 optimizer.lr 0.024215659493953996 +636 15 negative_sampler.num_negs_per_pos 15.0 +636 15 training.batch_size 1.0 +636 16 model.embedding_dim 1.0 +636 16 optimizer.lr 0.0040216533484473925 +636 16 negative_sampler.num_negs_per_pos 91.0 +636 16 training.batch_size 0.0 +636 17 model.embedding_dim 0.0 +636 17 optimizer.lr 0.0013330568534711065 +636 17 negative_sampler.num_negs_per_pos 34.0 +636 17 training.batch_size 2.0 +636 18 model.embedding_dim 2.0 +636 18 optimizer.lr 0.051120529420995146 +636 18 negative_sampler.num_negs_per_pos 86.0 +636 18 training.batch_size 1.0 +636 19 model.embedding_dim 0.0 +636 19 optimizer.lr 0.04315193765334952 +636 19 negative_sampler.num_negs_per_pos 92.0 +636 19 training.batch_size 2.0 +636 20 model.embedding_dim 1.0 +636 20 optimizer.lr 0.03105927157189327 +636 20 negative_sampler.num_negs_per_pos 8.0 +636 20 training.batch_size 1.0 +636 21 model.embedding_dim 1.0 +636 21 optimizer.lr 0.06309484653435635 +636 21 negative_sampler.num_negs_per_pos 26.0 +636 21 training.batch_size 0.0 +636 22 model.embedding_dim 0.0 +636 22 optimizer.lr 0.01870754335697648 +636 22 negative_sampler.num_negs_per_pos 11.0 +636 22 training.batch_size 2.0 +636 23 model.embedding_dim 2.0 +636 23 optimizer.lr 0.008919202435883735 +636 23 negative_sampler.num_negs_per_pos 71.0 +636 23 training.batch_size 0.0 +636 24 model.embedding_dim 1.0 +636 24 optimizer.lr 0.001580648084067547 +636 24 negative_sampler.num_negs_per_pos 35.0 +636 24 training.batch_size 0.0 +636 25 model.embedding_dim 1.0 +636 25 optimizer.lr 0.0034147670325111704 +636 25 negative_sampler.num_negs_per_pos 56.0 +636 25 training.batch_size 1.0 +636 26 model.embedding_dim 2.0 +636 26 optimizer.lr 0.029745651692462582 +636 26 negative_sampler.num_negs_per_pos 42.0 +636 26 training.batch_size 2.0 +636 27 model.embedding_dim 2.0 +636 27 optimizer.lr 0.012708985447009638 +636 27 negative_sampler.num_negs_per_pos 33.0 +636 27 training.batch_size 0.0 +636 28 model.embedding_dim 1.0 +636 28 optimizer.lr 0.0014135261450471139 +636 28 negative_sampler.num_negs_per_pos 7.0 +636 28 training.batch_size 2.0 +636 29 model.embedding_dim 2.0 +636 29 optimizer.lr 0.012617974185148248 +636 29 negative_sampler.num_negs_per_pos 32.0 +636 29 training.batch_size 0.0 +636 30 model.embedding_dim 2.0 +636 30 optimizer.lr 0.00666252667869393 +636 30 negative_sampler.num_negs_per_pos 22.0 +636 30 training.batch_size 1.0 +636 31 model.embedding_dim 2.0 +636 31 optimizer.lr 0.0029055412612625973 +636 31 negative_sampler.num_negs_per_pos 93.0 +636 31 training.batch_size 1.0 +636 32 model.embedding_dim 1.0 +636 32 optimizer.lr 0.002652738991096767 +636 32 negative_sampler.num_negs_per_pos 42.0 +636 32 training.batch_size 0.0 +636 33 model.embedding_dim 2.0 +636 33 optimizer.lr 0.01301178021691886 +636 33 negative_sampler.num_negs_per_pos 55.0 +636 33 training.batch_size 0.0 +636 34 model.embedding_dim 1.0 +636 34 optimizer.lr 0.003895461833388921 +636 34 negative_sampler.num_negs_per_pos 83.0 +636 34 training.batch_size 0.0 +636 1 dataset """fb15k237""" +636 1 model """simple""" +636 1 loss """softplus""" +636 1 regularizer """no""" +636 1 optimizer """adam""" +636 1 training_loop """owa""" +636 1 negative_sampler """basic""" +636 1 evaluator """rankbased""" +636 2 dataset """fb15k237""" +636 2 model """simple""" +636 2 loss """softplus""" +636 2 regularizer """no""" +636 2 optimizer """adam""" +636 2 training_loop """owa""" +636 2 negative_sampler """basic""" +636 2 evaluator """rankbased""" +636 3 dataset """fb15k237""" +636 3 model """simple""" +636 3 loss """softplus""" +636 3 regularizer """no""" +636 3 optimizer """adam""" +636 3 training_loop """owa""" +636 3 negative_sampler """basic""" +636 3 evaluator """rankbased""" +636 4 dataset """fb15k237""" +636 4 model """simple""" +636 4 loss """softplus""" +636 4 regularizer """no""" +636 4 optimizer """adam""" +636 4 training_loop """owa""" +636 4 negative_sampler """basic""" +636 4 evaluator """rankbased""" +636 5 dataset """fb15k237""" +636 5 model """simple""" +636 5 loss """softplus""" +636 5 regularizer """no""" +636 5 optimizer """adam""" +636 5 training_loop """owa""" +636 5 negative_sampler """basic""" +636 5 evaluator """rankbased""" +636 6 dataset """fb15k237""" +636 6 model """simple""" +636 6 loss """softplus""" +636 6 regularizer """no""" +636 6 optimizer """adam""" +636 6 training_loop """owa""" +636 6 negative_sampler """basic""" +636 6 evaluator """rankbased""" +636 7 dataset """fb15k237""" +636 7 model """simple""" +636 7 loss """softplus""" +636 7 regularizer """no""" +636 7 optimizer """adam""" +636 7 training_loop """owa""" +636 7 negative_sampler """basic""" +636 7 evaluator """rankbased""" +636 8 dataset """fb15k237""" +636 8 model """simple""" +636 8 loss """softplus""" +636 8 regularizer """no""" +636 8 optimizer """adam""" +636 8 training_loop """owa""" +636 8 negative_sampler """basic""" +636 8 evaluator """rankbased""" +636 9 dataset """fb15k237""" +636 9 model """simple""" +636 9 loss """softplus""" +636 9 regularizer """no""" +636 9 optimizer """adam""" +636 9 training_loop """owa""" +636 9 negative_sampler """basic""" +636 9 evaluator """rankbased""" +636 10 dataset """fb15k237""" +636 10 model """simple""" +636 10 loss """softplus""" +636 10 regularizer """no""" +636 10 optimizer """adam""" +636 10 training_loop """owa""" +636 10 negative_sampler """basic""" +636 10 evaluator """rankbased""" +636 11 dataset """fb15k237""" +636 11 model """simple""" +636 11 loss """softplus""" +636 11 regularizer """no""" +636 11 optimizer """adam""" +636 11 training_loop """owa""" +636 11 negative_sampler """basic""" +636 11 evaluator """rankbased""" +636 12 dataset """fb15k237""" +636 12 model """simple""" +636 12 loss """softplus""" +636 12 regularizer """no""" +636 12 optimizer """adam""" +636 12 training_loop """owa""" +636 12 negative_sampler """basic""" +636 12 evaluator """rankbased""" +636 13 dataset """fb15k237""" +636 13 model """simple""" +636 13 loss """softplus""" +636 13 regularizer """no""" +636 13 optimizer """adam""" +636 13 training_loop """owa""" +636 13 negative_sampler """basic""" +636 13 evaluator """rankbased""" +636 14 dataset """fb15k237""" +636 14 model """simple""" +636 14 loss """softplus""" +636 14 regularizer """no""" +636 14 optimizer """adam""" +636 14 training_loop """owa""" +636 14 negative_sampler """basic""" +636 14 evaluator """rankbased""" +636 15 dataset """fb15k237""" +636 15 model """simple""" +636 15 loss """softplus""" +636 15 regularizer """no""" +636 15 optimizer """adam""" +636 15 training_loop """owa""" +636 15 negative_sampler """basic""" +636 15 evaluator """rankbased""" +636 16 dataset """fb15k237""" +636 16 model """simple""" +636 16 loss """softplus""" +636 16 regularizer """no""" +636 16 optimizer """adam""" +636 16 training_loop """owa""" +636 16 negative_sampler """basic""" +636 16 evaluator """rankbased""" +636 17 dataset """fb15k237""" +636 17 model """simple""" +636 17 loss """softplus""" +636 17 regularizer """no""" +636 17 optimizer """adam""" +636 17 training_loop """owa""" +636 17 negative_sampler """basic""" +636 17 evaluator """rankbased""" +636 18 dataset """fb15k237""" +636 18 model """simple""" +636 18 loss """softplus""" +636 18 regularizer """no""" +636 18 optimizer """adam""" +636 18 training_loop """owa""" +636 18 negative_sampler """basic""" +636 18 evaluator """rankbased""" +636 19 dataset """fb15k237""" +636 19 model """simple""" +636 19 loss """softplus""" +636 19 regularizer """no""" +636 19 optimizer """adam""" +636 19 training_loop """owa""" +636 19 negative_sampler """basic""" +636 19 evaluator """rankbased""" +636 20 dataset """fb15k237""" +636 20 model """simple""" +636 20 loss """softplus""" +636 20 regularizer """no""" +636 20 optimizer """adam""" +636 20 training_loop """owa""" +636 20 negative_sampler """basic""" +636 20 evaluator """rankbased""" +636 21 dataset """fb15k237""" +636 21 model """simple""" +636 21 loss """softplus""" +636 21 regularizer """no""" +636 21 optimizer """adam""" +636 21 training_loop """owa""" +636 21 negative_sampler """basic""" +636 21 evaluator """rankbased""" +636 22 dataset """fb15k237""" +636 22 model """simple""" +636 22 loss """softplus""" +636 22 regularizer """no""" +636 22 optimizer """adam""" +636 22 training_loop """owa""" +636 22 negative_sampler """basic""" +636 22 evaluator """rankbased""" +636 23 dataset """fb15k237""" +636 23 model """simple""" +636 23 loss """softplus""" +636 23 regularizer """no""" +636 23 optimizer """adam""" +636 23 training_loop """owa""" +636 23 negative_sampler """basic""" +636 23 evaluator """rankbased""" +636 24 dataset """fb15k237""" +636 24 model """simple""" +636 24 loss """softplus""" +636 24 regularizer """no""" +636 24 optimizer """adam""" +636 24 training_loop """owa""" +636 24 negative_sampler """basic""" +636 24 evaluator """rankbased""" +636 25 dataset """fb15k237""" +636 25 model """simple""" +636 25 loss """softplus""" +636 25 regularizer """no""" +636 25 optimizer """adam""" +636 25 training_loop """owa""" +636 25 negative_sampler """basic""" +636 25 evaluator """rankbased""" +636 26 dataset """fb15k237""" +636 26 model """simple""" +636 26 loss """softplus""" +636 26 regularizer """no""" +636 26 optimizer """adam""" +636 26 training_loop """owa""" +636 26 negative_sampler """basic""" +636 26 evaluator """rankbased""" +636 27 dataset """fb15k237""" +636 27 model """simple""" +636 27 loss """softplus""" +636 27 regularizer """no""" +636 27 optimizer """adam""" +636 27 training_loop """owa""" +636 27 negative_sampler """basic""" +636 27 evaluator """rankbased""" +636 28 dataset """fb15k237""" +636 28 model """simple""" +636 28 loss """softplus""" +636 28 regularizer """no""" +636 28 optimizer """adam""" +636 28 training_loop """owa""" +636 28 negative_sampler """basic""" +636 28 evaluator """rankbased""" +636 29 dataset """fb15k237""" +636 29 model """simple""" +636 29 loss """softplus""" +636 29 regularizer """no""" +636 29 optimizer """adam""" +636 29 training_loop """owa""" +636 29 negative_sampler """basic""" +636 29 evaluator """rankbased""" +636 30 dataset """fb15k237""" +636 30 model """simple""" +636 30 loss """softplus""" +636 30 regularizer """no""" +636 30 optimizer """adam""" +636 30 training_loop """owa""" +636 30 negative_sampler """basic""" +636 30 evaluator """rankbased""" +636 31 dataset """fb15k237""" +636 31 model """simple""" +636 31 loss """softplus""" +636 31 regularizer """no""" +636 31 optimizer """adam""" +636 31 training_loop """owa""" +636 31 negative_sampler """basic""" +636 31 evaluator """rankbased""" +636 32 dataset """fb15k237""" +636 32 model """simple""" +636 32 loss """softplus""" +636 32 regularizer """no""" +636 32 optimizer """adam""" +636 32 training_loop """owa""" +636 32 negative_sampler """basic""" +636 32 evaluator """rankbased""" +636 33 dataset """fb15k237""" +636 33 model """simple""" +636 33 loss """softplus""" +636 33 regularizer """no""" +636 33 optimizer """adam""" +636 33 training_loop """owa""" +636 33 negative_sampler """basic""" +636 33 evaluator """rankbased""" +636 34 dataset """fb15k237""" +636 34 model """simple""" +636 34 loss """softplus""" +636 34 regularizer """no""" +636 34 optimizer """adam""" +636 34 training_loop """owa""" +636 34 negative_sampler """basic""" +636 34 evaluator """rankbased""" +637 1 model.embedding_dim 2.0 +637 1 loss.margin 24.448395191096992 +637 1 loss.adversarial_temperature 0.6977398475736107 +637 1 optimizer.lr 0.013318353002428564 +637 1 negative_sampler.num_negs_per_pos 23.0 +637 1 training.batch_size 1.0 +637 2 model.embedding_dim 0.0 +637 2 loss.margin 23.720743756721426 +637 2 loss.adversarial_temperature 0.637485112380253 +637 2 optimizer.lr 0.010620400993123879 +637 2 negative_sampler.num_negs_per_pos 68.0 +637 2 training.batch_size 2.0 +637 3 model.embedding_dim 2.0 +637 3 loss.margin 2.309301933368915 +637 3 loss.adversarial_temperature 0.40426819966204713 +637 3 optimizer.lr 0.02622725390687946 +637 3 negative_sampler.num_negs_per_pos 52.0 +637 3 training.batch_size 0.0 +637 4 model.embedding_dim 0.0 +637 4 loss.margin 14.828550031183532 +637 4 loss.adversarial_temperature 0.9670109513152135 +637 4 optimizer.lr 0.004069183527856667 +637 4 negative_sampler.num_negs_per_pos 57.0 +637 4 training.batch_size 2.0 +637 5 model.embedding_dim 1.0 +637 5 loss.margin 27.59947747873537 +637 5 loss.adversarial_temperature 0.2964492998577922 +637 5 optimizer.lr 0.0014577283391958718 +637 5 negative_sampler.num_negs_per_pos 72.0 +637 5 training.batch_size 2.0 +637 6 model.embedding_dim 2.0 +637 6 loss.margin 3.6187690649094786 +637 6 loss.adversarial_temperature 0.5788416835705836 +637 6 optimizer.lr 0.004162669092849482 +637 6 negative_sampler.num_negs_per_pos 8.0 +637 6 training.batch_size 1.0 +637 7 model.embedding_dim 2.0 +637 7 loss.margin 22.393250707103185 +637 7 loss.adversarial_temperature 0.4623318529157665 +637 7 optimizer.lr 0.054878743461601404 +637 7 negative_sampler.num_negs_per_pos 0.0 +637 7 training.batch_size 1.0 +637 8 model.embedding_dim 0.0 +637 8 loss.margin 25.619935197895945 +637 8 loss.adversarial_temperature 0.6474101596932839 +637 8 optimizer.lr 0.03432627998558833 +637 8 negative_sampler.num_negs_per_pos 15.0 +637 8 training.batch_size 1.0 +637 9 model.embedding_dim 1.0 +637 9 loss.margin 20.272533762551834 +637 9 loss.adversarial_temperature 0.3740142408338813 +637 9 optimizer.lr 0.002531940897304087 +637 9 negative_sampler.num_negs_per_pos 97.0 +637 9 training.batch_size 0.0 +637 10 model.embedding_dim 2.0 +637 10 loss.margin 19.91124880142461 +637 10 loss.adversarial_temperature 0.7046703544482649 +637 10 optimizer.lr 0.017169481043791745 +637 10 negative_sampler.num_negs_per_pos 67.0 +637 10 training.batch_size 1.0 +637 11 model.embedding_dim 1.0 +637 11 loss.margin 24.940225465595944 +637 11 loss.adversarial_temperature 0.5177931600189174 +637 11 optimizer.lr 0.05740489548796433 +637 11 negative_sampler.num_negs_per_pos 58.0 +637 11 training.batch_size 1.0 +637 12 model.embedding_dim 1.0 +637 12 loss.margin 26.306235299701605 +637 12 loss.adversarial_temperature 0.34440493176717385 +637 12 optimizer.lr 0.0375928798558983 +637 12 negative_sampler.num_negs_per_pos 67.0 +637 12 training.batch_size 2.0 +637 13 model.embedding_dim 1.0 +637 13 loss.margin 21.399104875242593 +637 13 loss.adversarial_temperature 0.14480551323157012 +637 13 optimizer.lr 0.03700707990418505 +637 13 negative_sampler.num_negs_per_pos 29.0 +637 13 training.batch_size 0.0 +637 14 model.embedding_dim 1.0 +637 14 loss.margin 16.640099713459755 +637 14 loss.adversarial_temperature 0.8037961013968556 +637 14 optimizer.lr 0.03550261541745737 +637 14 negative_sampler.num_negs_per_pos 91.0 +637 14 training.batch_size 1.0 +637 15 model.embedding_dim 2.0 +637 15 loss.margin 28.340001676339305 +637 15 loss.adversarial_temperature 0.9585643247408118 +637 15 optimizer.lr 0.0022187408150950434 +637 15 negative_sampler.num_negs_per_pos 58.0 +637 15 training.batch_size 1.0 +637 16 model.embedding_dim 2.0 +637 16 loss.margin 6.365468804996391 +637 16 loss.adversarial_temperature 0.4038515966209086 +637 16 optimizer.lr 0.013720459302918897 +637 16 negative_sampler.num_negs_per_pos 83.0 +637 16 training.batch_size 2.0 +637 17 model.embedding_dim 2.0 +637 17 loss.margin 10.399023352964374 +637 17 loss.adversarial_temperature 0.9012754890651516 +637 17 optimizer.lr 0.013262299881863424 +637 17 negative_sampler.num_negs_per_pos 92.0 +637 17 training.batch_size 2.0 +637 18 model.embedding_dim 2.0 +637 18 loss.margin 25.709019073935824 +637 18 loss.adversarial_temperature 0.37585959739143426 +637 18 optimizer.lr 0.0017198922106826154 +637 18 negative_sampler.num_negs_per_pos 1.0 +637 18 training.batch_size 0.0 +637 19 model.embedding_dim 0.0 +637 19 loss.margin 5.9047324175125 +637 19 loss.adversarial_temperature 0.9395849633120154 +637 19 optimizer.lr 0.00597566908010613 +637 19 negative_sampler.num_negs_per_pos 13.0 +637 19 training.batch_size 2.0 +637 20 model.embedding_dim 1.0 +637 20 loss.margin 27.923705996656977 +637 20 loss.adversarial_temperature 0.7764924376243837 +637 20 optimizer.lr 0.08503355491018884 +637 20 negative_sampler.num_negs_per_pos 63.0 +637 20 training.batch_size 0.0 +637 21 model.embedding_dim 2.0 +637 21 loss.margin 6.367721746947779 +637 21 loss.adversarial_temperature 0.38531768265069777 +637 21 optimizer.lr 0.002939216491092792 +637 21 negative_sampler.num_negs_per_pos 49.0 +637 21 training.batch_size 0.0 +637 1 dataset """fb15k237""" +637 1 model """simple""" +637 1 loss """nssa""" +637 1 regularizer """no""" +637 1 optimizer """adam""" +637 1 training_loop """owa""" +637 1 negative_sampler """basic""" +637 1 evaluator """rankbased""" +637 2 dataset """fb15k237""" +637 2 model """simple""" +637 2 loss """nssa""" +637 2 regularizer """no""" +637 2 optimizer """adam""" +637 2 training_loop """owa""" +637 2 negative_sampler """basic""" +637 2 evaluator """rankbased""" +637 3 dataset """fb15k237""" +637 3 model """simple""" +637 3 loss """nssa""" +637 3 regularizer """no""" +637 3 optimizer """adam""" +637 3 training_loop """owa""" +637 3 negative_sampler """basic""" +637 3 evaluator """rankbased""" +637 4 dataset """fb15k237""" +637 4 model """simple""" +637 4 loss """nssa""" +637 4 regularizer """no""" +637 4 optimizer """adam""" +637 4 training_loop """owa""" +637 4 negative_sampler """basic""" +637 4 evaluator """rankbased""" +637 5 dataset """fb15k237""" +637 5 model """simple""" +637 5 loss """nssa""" +637 5 regularizer """no""" +637 5 optimizer """adam""" +637 5 training_loop """owa""" +637 5 negative_sampler """basic""" +637 5 evaluator """rankbased""" +637 6 dataset """fb15k237""" +637 6 model """simple""" +637 6 loss """nssa""" +637 6 regularizer """no""" +637 6 optimizer """adam""" +637 6 training_loop """owa""" +637 6 negative_sampler """basic""" +637 6 evaluator """rankbased""" +637 7 dataset """fb15k237""" +637 7 model """simple""" +637 7 loss """nssa""" +637 7 regularizer """no""" +637 7 optimizer """adam""" +637 7 training_loop """owa""" +637 7 negative_sampler """basic""" +637 7 evaluator """rankbased""" +637 8 dataset """fb15k237""" +637 8 model """simple""" +637 8 loss """nssa""" +637 8 regularizer """no""" +637 8 optimizer """adam""" +637 8 training_loop """owa""" +637 8 negative_sampler """basic""" +637 8 evaluator """rankbased""" +637 9 dataset """fb15k237""" +637 9 model """simple""" +637 9 loss """nssa""" +637 9 regularizer """no""" +637 9 optimizer """adam""" +637 9 training_loop """owa""" +637 9 negative_sampler """basic""" +637 9 evaluator """rankbased""" +637 10 dataset """fb15k237""" +637 10 model """simple""" +637 10 loss """nssa""" +637 10 regularizer """no""" +637 10 optimizer """adam""" +637 10 training_loop """owa""" +637 10 negative_sampler """basic""" +637 10 evaluator """rankbased""" +637 11 dataset """fb15k237""" +637 11 model """simple""" +637 11 loss """nssa""" +637 11 regularizer """no""" +637 11 optimizer """adam""" +637 11 training_loop """owa""" +637 11 negative_sampler """basic""" +637 11 evaluator """rankbased""" +637 12 dataset """fb15k237""" +637 12 model """simple""" +637 12 loss """nssa""" +637 12 regularizer """no""" +637 12 optimizer """adam""" +637 12 training_loop """owa""" +637 12 negative_sampler """basic""" +637 12 evaluator """rankbased""" +637 13 dataset """fb15k237""" +637 13 model """simple""" +637 13 loss """nssa""" +637 13 regularizer """no""" +637 13 optimizer """adam""" +637 13 training_loop """owa""" +637 13 negative_sampler """basic""" +637 13 evaluator """rankbased""" +637 14 dataset """fb15k237""" +637 14 model """simple""" +637 14 loss """nssa""" +637 14 regularizer """no""" +637 14 optimizer """adam""" +637 14 training_loop """owa""" +637 14 negative_sampler """basic""" +637 14 evaluator """rankbased""" +637 15 dataset """fb15k237""" +637 15 model """simple""" +637 15 loss """nssa""" +637 15 regularizer """no""" +637 15 optimizer """adam""" +637 15 training_loop """owa""" +637 15 negative_sampler """basic""" +637 15 evaluator """rankbased""" +637 16 dataset """fb15k237""" +637 16 model """simple""" +637 16 loss """nssa""" +637 16 regularizer """no""" +637 16 optimizer """adam""" +637 16 training_loop """owa""" +637 16 negative_sampler """basic""" +637 16 evaluator """rankbased""" +637 17 dataset """fb15k237""" +637 17 model """simple""" +637 17 loss """nssa""" +637 17 regularizer """no""" +637 17 optimizer """adam""" +637 17 training_loop """owa""" +637 17 negative_sampler """basic""" +637 17 evaluator """rankbased""" +637 18 dataset """fb15k237""" +637 18 model """simple""" +637 18 loss """nssa""" +637 18 regularizer """no""" +637 18 optimizer """adam""" +637 18 training_loop """owa""" +637 18 negative_sampler """basic""" +637 18 evaluator """rankbased""" +637 19 dataset """fb15k237""" +637 19 model """simple""" +637 19 loss """nssa""" +637 19 regularizer """no""" +637 19 optimizer """adam""" +637 19 training_loop """owa""" +637 19 negative_sampler """basic""" +637 19 evaluator """rankbased""" +637 20 dataset """fb15k237""" +637 20 model """simple""" +637 20 loss """nssa""" +637 20 regularizer """no""" +637 20 optimizer """adam""" +637 20 training_loop """owa""" +637 20 negative_sampler """basic""" +637 20 evaluator """rankbased""" +637 21 dataset """fb15k237""" +637 21 model """simple""" +637 21 loss """nssa""" +637 21 regularizer """no""" +637 21 optimizer """adam""" +637 21 training_loop """owa""" +637 21 negative_sampler """basic""" +637 21 evaluator """rankbased""" +638 1 model.embedding_dim 2.0 +638 1 loss.margin 13.261942125988528 +638 1 loss.adversarial_temperature 0.4604113488987353 +638 1 optimizer.lr 0.05121961084881492 +638 1 negative_sampler.num_negs_per_pos 94.0 +638 1 training.batch_size 0.0 +638 2 model.embedding_dim 1.0 +638 2 loss.margin 28.85971552640046 +638 2 loss.adversarial_temperature 0.3436321102374975 +638 2 optimizer.lr 0.0021953543689103867 +638 2 negative_sampler.num_negs_per_pos 37.0 +638 2 training.batch_size 1.0 +638 3 model.embedding_dim 1.0 +638 3 loss.margin 18.13986026301468 +638 3 loss.adversarial_temperature 0.8848800840338017 +638 3 optimizer.lr 0.006815693706763374 +638 3 negative_sampler.num_negs_per_pos 34.0 +638 3 training.batch_size 0.0 +638 4 model.embedding_dim 2.0 +638 4 loss.margin 29.29716728189264 +638 4 loss.adversarial_temperature 0.6591515712332904 +638 4 optimizer.lr 0.031185471117404804 +638 4 negative_sampler.num_negs_per_pos 16.0 +638 4 training.batch_size 2.0 +638 5 model.embedding_dim 2.0 +638 5 loss.margin 6.694718202499643 +638 5 loss.adversarial_temperature 0.6281948743050871 +638 5 optimizer.lr 0.009974910696843274 +638 5 negative_sampler.num_negs_per_pos 65.0 +638 5 training.batch_size 1.0 +638 6 model.embedding_dim 1.0 +638 6 loss.margin 14.176569924966703 +638 6 loss.adversarial_temperature 0.12981918410163684 +638 6 optimizer.lr 0.0032000652766457833 +638 6 negative_sampler.num_negs_per_pos 56.0 +638 6 training.batch_size 2.0 +638 7 model.embedding_dim 2.0 +638 7 loss.margin 6.488971651416616 +638 7 loss.adversarial_temperature 0.45783130995220633 +638 7 optimizer.lr 0.004972636308601731 +638 7 negative_sampler.num_negs_per_pos 67.0 +638 7 training.batch_size 2.0 +638 8 model.embedding_dim 0.0 +638 8 loss.margin 17.202888718296123 +638 8 loss.adversarial_temperature 0.5961678682000969 +638 8 optimizer.lr 0.012846378186700974 +638 8 negative_sampler.num_negs_per_pos 15.0 +638 8 training.batch_size 0.0 +638 9 model.embedding_dim 1.0 +638 9 loss.margin 25.173786933098217 +638 9 loss.adversarial_temperature 0.7775151342841405 +638 9 optimizer.lr 0.0013540374247241866 +638 9 negative_sampler.num_negs_per_pos 21.0 +638 9 training.batch_size 0.0 +638 10 model.embedding_dim 0.0 +638 10 loss.margin 4.295381361379203 +638 10 loss.adversarial_temperature 0.9160124906320029 +638 10 optimizer.lr 0.016098017595301276 +638 10 negative_sampler.num_negs_per_pos 14.0 +638 10 training.batch_size 2.0 +638 11 model.embedding_dim 1.0 +638 11 loss.margin 24.121045478474258 +638 11 loss.adversarial_temperature 0.6399166231112162 +638 11 optimizer.lr 0.06601907837493302 +638 11 negative_sampler.num_negs_per_pos 16.0 +638 11 training.batch_size 1.0 +638 12 model.embedding_dim 0.0 +638 12 loss.margin 24.720551715916038 +638 12 loss.adversarial_temperature 0.9050981682145472 +638 12 optimizer.lr 0.015576587021757625 +638 12 negative_sampler.num_negs_per_pos 51.0 +638 12 training.batch_size 0.0 +638 13 model.embedding_dim 1.0 +638 13 loss.margin 26.499564263379554 +638 13 loss.adversarial_temperature 0.36878567541110535 +638 13 optimizer.lr 0.0019186923381241982 +638 13 negative_sampler.num_negs_per_pos 82.0 +638 13 training.batch_size 1.0 +638 14 model.embedding_dim 0.0 +638 14 loss.margin 7.649517709246183 +638 14 loss.adversarial_temperature 0.5736113362875451 +638 14 optimizer.lr 0.05555871863860921 +638 14 negative_sampler.num_negs_per_pos 45.0 +638 14 training.batch_size 1.0 +638 15 model.embedding_dim 1.0 +638 15 loss.margin 26.211281922099708 +638 15 loss.adversarial_temperature 0.23744288130499513 +638 15 optimizer.lr 0.01837399374476203 +638 15 negative_sampler.num_negs_per_pos 49.0 +638 15 training.batch_size 0.0 +638 16 model.embedding_dim 2.0 +638 16 loss.margin 23.509086201951796 +638 16 loss.adversarial_temperature 0.8371612797780843 +638 16 optimizer.lr 0.054701069427880694 +638 16 negative_sampler.num_negs_per_pos 56.0 +638 16 training.batch_size 0.0 +638 17 model.embedding_dim 2.0 +638 17 loss.margin 22.597414864042307 +638 17 loss.adversarial_temperature 0.7606892321683524 +638 17 optimizer.lr 0.0034190739121626444 +638 17 negative_sampler.num_negs_per_pos 80.0 +638 17 training.batch_size 1.0 +638 18 model.embedding_dim 2.0 +638 18 loss.margin 5.438357305543197 +638 18 loss.adversarial_temperature 0.7031302505645433 +638 18 optimizer.lr 0.018123894357783926 +638 18 negative_sampler.num_negs_per_pos 82.0 +638 18 training.batch_size 1.0 +638 19 model.embedding_dim 0.0 +638 19 loss.margin 3.721014359186758 +638 19 loss.adversarial_temperature 0.4600115508440104 +638 19 optimizer.lr 0.09404795501685077 +638 19 negative_sampler.num_negs_per_pos 71.0 +638 19 training.batch_size 0.0 +638 20 model.embedding_dim 2.0 +638 20 loss.margin 26.646758837855476 +638 20 loss.adversarial_temperature 0.837557071386406 +638 20 optimizer.lr 0.004889347144871187 +638 20 negative_sampler.num_negs_per_pos 11.0 +638 20 training.batch_size 2.0 +638 21 model.embedding_dim 0.0 +638 21 loss.margin 9.132512611203776 +638 21 loss.adversarial_temperature 0.7744310541184931 +638 21 optimizer.lr 0.004229073780106994 +638 21 negative_sampler.num_negs_per_pos 67.0 +638 21 training.batch_size 1.0 +638 22 model.embedding_dim 2.0 +638 22 loss.margin 9.58938145443323 +638 22 loss.adversarial_temperature 0.11986071096756813 +638 22 optimizer.lr 0.0014600672023827822 +638 22 negative_sampler.num_negs_per_pos 53.0 +638 22 training.batch_size 1.0 +638 23 model.embedding_dim 0.0 +638 23 loss.margin 29.081878151466 +638 23 loss.adversarial_temperature 0.7964573232390055 +638 23 optimizer.lr 0.0016803230332917212 +638 23 negative_sampler.num_negs_per_pos 96.0 +638 23 training.batch_size 2.0 +638 24 model.embedding_dim 0.0 +638 24 loss.margin 3.34203337357685 +638 24 loss.adversarial_temperature 0.8583313805945413 +638 24 optimizer.lr 0.001925474631471294 +638 24 negative_sampler.num_negs_per_pos 75.0 +638 24 training.batch_size 2.0 +638 25 model.embedding_dim 0.0 +638 25 loss.margin 14.529349254397548 +638 25 loss.adversarial_temperature 0.8511417505133111 +638 25 optimizer.lr 0.0054334222692726175 +638 25 negative_sampler.num_negs_per_pos 77.0 +638 25 training.batch_size 1.0 +638 26 model.embedding_dim 1.0 +638 26 loss.margin 13.284834048295608 +638 26 loss.adversarial_temperature 0.3673439234051082 +638 26 optimizer.lr 0.008821049899434271 +638 26 negative_sampler.num_negs_per_pos 3.0 +638 26 training.batch_size 0.0 +638 27 model.embedding_dim 2.0 +638 27 loss.margin 11.751774865574207 +638 27 loss.adversarial_temperature 0.32205246270744925 +638 27 optimizer.lr 0.002330184941444304 +638 27 negative_sampler.num_negs_per_pos 44.0 +638 27 training.batch_size 0.0 +638 28 model.embedding_dim 0.0 +638 28 loss.margin 14.739949756086569 +638 28 loss.adversarial_temperature 0.2576548861514766 +638 28 optimizer.lr 0.008864727501025984 +638 28 negative_sampler.num_negs_per_pos 82.0 +638 28 training.batch_size 1.0 +638 29 model.embedding_dim 1.0 +638 29 loss.margin 23.92901312927841 +638 29 loss.adversarial_temperature 0.6934568098785628 +638 29 optimizer.lr 0.004544069834761978 +638 29 negative_sampler.num_negs_per_pos 59.0 +638 29 training.batch_size 1.0 +638 30 model.embedding_dim 2.0 +638 30 loss.margin 10.21688653848795 +638 30 loss.adversarial_temperature 0.9242724988817648 +638 30 optimizer.lr 0.0069634046291900216 +638 30 negative_sampler.num_negs_per_pos 39.0 +638 30 training.batch_size 2.0 +638 31 model.embedding_dim 1.0 +638 31 loss.margin 5.480348265432187 +638 31 loss.adversarial_temperature 0.7667119885322514 +638 31 optimizer.lr 0.013955812933750453 +638 31 negative_sampler.num_negs_per_pos 71.0 +638 31 training.batch_size 1.0 +638 32 model.embedding_dim 0.0 +638 32 loss.margin 16.335028152032056 +638 32 loss.adversarial_temperature 0.3852866022943185 +638 32 optimizer.lr 0.004328658855772834 +638 32 negative_sampler.num_negs_per_pos 45.0 +638 32 training.batch_size 2.0 +638 33 model.embedding_dim 1.0 +638 33 loss.margin 27.12033140178223 +638 33 loss.adversarial_temperature 0.9530652247476571 +638 33 optimizer.lr 0.014377804510994126 +638 33 negative_sampler.num_negs_per_pos 37.0 +638 33 training.batch_size 2.0 +638 34 model.embedding_dim 2.0 +638 34 loss.margin 22.8620075517337 +638 34 loss.adversarial_temperature 0.8020533648497837 +638 34 optimizer.lr 0.0020958697052310842 +638 34 negative_sampler.num_negs_per_pos 48.0 +638 34 training.batch_size 1.0 +638 1 dataset """fb15k237""" +638 1 model """simple""" +638 1 loss """nssa""" +638 1 regularizer """no""" +638 1 optimizer """adam""" +638 1 training_loop """owa""" +638 1 negative_sampler """basic""" +638 1 evaluator """rankbased""" +638 2 dataset """fb15k237""" +638 2 model """simple""" +638 2 loss """nssa""" +638 2 regularizer """no""" +638 2 optimizer """adam""" +638 2 training_loop """owa""" +638 2 negative_sampler """basic""" +638 2 evaluator """rankbased""" +638 3 dataset """fb15k237""" +638 3 model """simple""" +638 3 loss """nssa""" +638 3 regularizer """no""" +638 3 optimizer """adam""" +638 3 training_loop """owa""" +638 3 negative_sampler """basic""" +638 3 evaluator """rankbased""" +638 4 dataset """fb15k237""" +638 4 model """simple""" +638 4 loss """nssa""" +638 4 regularizer """no""" +638 4 optimizer """adam""" +638 4 training_loop """owa""" +638 4 negative_sampler """basic""" +638 4 evaluator """rankbased""" +638 5 dataset """fb15k237""" +638 5 model """simple""" +638 5 loss """nssa""" +638 5 regularizer """no""" +638 5 optimizer """adam""" +638 5 training_loop """owa""" +638 5 negative_sampler """basic""" +638 5 evaluator """rankbased""" +638 6 dataset """fb15k237""" +638 6 model """simple""" +638 6 loss """nssa""" +638 6 regularizer """no""" +638 6 optimizer """adam""" +638 6 training_loop """owa""" +638 6 negative_sampler """basic""" +638 6 evaluator """rankbased""" +638 7 dataset """fb15k237""" +638 7 model """simple""" +638 7 loss """nssa""" +638 7 regularizer """no""" +638 7 optimizer """adam""" +638 7 training_loop """owa""" +638 7 negative_sampler """basic""" +638 7 evaluator """rankbased""" +638 8 dataset """fb15k237""" +638 8 model """simple""" +638 8 loss """nssa""" +638 8 regularizer """no""" +638 8 optimizer """adam""" +638 8 training_loop """owa""" +638 8 negative_sampler """basic""" +638 8 evaluator """rankbased""" +638 9 dataset """fb15k237""" +638 9 model """simple""" +638 9 loss """nssa""" +638 9 regularizer """no""" +638 9 optimizer """adam""" +638 9 training_loop """owa""" +638 9 negative_sampler """basic""" +638 9 evaluator """rankbased""" +638 10 dataset """fb15k237""" +638 10 model """simple""" +638 10 loss """nssa""" +638 10 regularizer """no""" +638 10 optimizer """adam""" +638 10 training_loop """owa""" +638 10 negative_sampler """basic""" +638 10 evaluator """rankbased""" +638 11 dataset """fb15k237""" +638 11 model """simple""" +638 11 loss """nssa""" +638 11 regularizer """no""" +638 11 optimizer """adam""" +638 11 training_loop """owa""" +638 11 negative_sampler """basic""" +638 11 evaluator """rankbased""" +638 12 dataset """fb15k237""" +638 12 model """simple""" +638 12 loss """nssa""" +638 12 regularizer """no""" +638 12 optimizer """adam""" +638 12 training_loop """owa""" +638 12 negative_sampler """basic""" +638 12 evaluator """rankbased""" +638 13 dataset """fb15k237""" +638 13 model """simple""" +638 13 loss """nssa""" +638 13 regularizer """no""" +638 13 optimizer """adam""" +638 13 training_loop """owa""" +638 13 negative_sampler """basic""" +638 13 evaluator """rankbased""" +638 14 dataset """fb15k237""" +638 14 model """simple""" +638 14 loss """nssa""" +638 14 regularizer """no""" +638 14 optimizer """adam""" +638 14 training_loop """owa""" +638 14 negative_sampler """basic""" +638 14 evaluator """rankbased""" +638 15 dataset """fb15k237""" +638 15 model """simple""" +638 15 loss """nssa""" +638 15 regularizer """no""" +638 15 optimizer """adam""" +638 15 training_loop """owa""" +638 15 negative_sampler """basic""" +638 15 evaluator """rankbased""" +638 16 dataset """fb15k237""" +638 16 model """simple""" +638 16 loss """nssa""" +638 16 regularizer """no""" +638 16 optimizer """adam""" +638 16 training_loop """owa""" +638 16 negative_sampler """basic""" +638 16 evaluator """rankbased""" +638 17 dataset """fb15k237""" +638 17 model """simple""" +638 17 loss """nssa""" +638 17 regularizer """no""" +638 17 optimizer """adam""" +638 17 training_loop """owa""" +638 17 negative_sampler """basic""" +638 17 evaluator """rankbased""" +638 18 dataset """fb15k237""" +638 18 model """simple""" +638 18 loss """nssa""" +638 18 regularizer """no""" +638 18 optimizer """adam""" +638 18 training_loop """owa""" +638 18 negative_sampler """basic""" +638 18 evaluator """rankbased""" +638 19 dataset """fb15k237""" +638 19 model """simple""" +638 19 loss """nssa""" +638 19 regularizer """no""" +638 19 optimizer """adam""" +638 19 training_loop """owa""" +638 19 negative_sampler """basic""" +638 19 evaluator """rankbased""" +638 20 dataset """fb15k237""" +638 20 model """simple""" +638 20 loss """nssa""" +638 20 regularizer """no""" +638 20 optimizer """adam""" +638 20 training_loop """owa""" +638 20 negative_sampler """basic""" +638 20 evaluator """rankbased""" +638 21 dataset """fb15k237""" +638 21 model """simple""" +638 21 loss """nssa""" +638 21 regularizer """no""" +638 21 optimizer """adam""" +638 21 training_loop """owa""" +638 21 negative_sampler """basic""" +638 21 evaluator """rankbased""" +638 22 dataset """fb15k237""" +638 22 model """simple""" +638 22 loss """nssa""" +638 22 regularizer """no""" +638 22 optimizer """adam""" +638 22 training_loop """owa""" +638 22 negative_sampler """basic""" +638 22 evaluator """rankbased""" +638 23 dataset """fb15k237""" +638 23 model """simple""" +638 23 loss """nssa""" +638 23 regularizer """no""" +638 23 optimizer """adam""" +638 23 training_loop """owa""" +638 23 negative_sampler """basic""" +638 23 evaluator """rankbased""" +638 24 dataset """fb15k237""" +638 24 model """simple""" +638 24 loss """nssa""" +638 24 regularizer """no""" +638 24 optimizer """adam""" +638 24 training_loop """owa""" +638 24 negative_sampler """basic""" +638 24 evaluator """rankbased""" +638 25 dataset """fb15k237""" +638 25 model """simple""" +638 25 loss """nssa""" +638 25 regularizer """no""" +638 25 optimizer """adam""" +638 25 training_loop """owa""" +638 25 negative_sampler """basic""" +638 25 evaluator """rankbased""" +638 26 dataset """fb15k237""" +638 26 model """simple""" +638 26 loss """nssa""" +638 26 regularizer """no""" +638 26 optimizer """adam""" +638 26 training_loop """owa""" +638 26 negative_sampler """basic""" +638 26 evaluator """rankbased""" +638 27 dataset """fb15k237""" +638 27 model """simple""" +638 27 loss """nssa""" +638 27 regularizer """no""" +638 27 optimizer """adam""" +638 27 training_loop """owa""" +638 27 negative_sampler """basic""" +638 27 evaluator """rankbased""" +638 28 dataset """fb15k237""" +638 28 model """simple""" +638 28 loss """nssa""" +638 28 regularizer """no""" +638 28 optimizer """adam""" +638 28 training_loop """owa""" +638 28 negative_sampler """basic""" +638 28 evaluator """rankbased""" +638 29 dataset """fb15k237""" +638 29 model """simple""" +638 29 loss """nssa""" +638 29 regularizer """no""" +638 29 optimizer """adam""" +638 29 training_loop """owa""" +638 29 negative_sampler """basic""" +638 29 evaluator """rankbased""" +638 30 dataset """fb15k237""" +638 30 model """simple""" +638 30 loss """nssa""" +638 30 regularizer """no""" +638 30 optimizer """adam""" +638 30 training_loop """owa""" +638 30 negative_sampler """basic""" +638 30 evaluator """rankbased""" +638 31 dataset """fb15k237""" +638 31 model """simple""" +638 31 loss """nssa""" +638 31 regularizer """no""" +638 31 optimizer """adam""" +638 31 training_loop """owa""" +638 31 negative_sampler """basic""" +638 31 evaluator """rankbased""" +638 32 dataset """fb15k237""" +638 32 model """simple""" +638 32 loss """nssa""" +638 32 regularizer """no""" +638 32 optimizer """adam""" +638 32 training_loop """owa""" +638 32 negative_sampler """basic""" +638 32 evaluator """rankbased""" +638 33 dataset """fb15k237""" +638 33 model """simple""" +638 33 loss """nssa""" +638 33 regularizer """no""" +638 33 optimizer """adam""" +638 33 training_loop """owa""" +638 33 negative_sampler """basic""" +638 33 evaluator """rankbased""" +638 34 dataset """fb15k237""" +638 34 model """simple""" +638 34 loss """nssa""" +638 34 regularizer """no""" +638 34 optimizer """adam""" +638 34 training_loop """owa""" +638 34 negative_sampler """basic""" +638 34 evaluator """rankbased""" +639 1 model.embedding_dim 0.0 +639 1 loss.margin 8.62654530864178 +639 1 optimizer.lr 0.0441537659826542 +639 1 negative_sampler.num_negs_per_pos 12.0 +639 1 training.batch_size 0.0 +639 2 model.embedding_dim 0.0 +639 2 loss.margin 9.427538272338465 +639 2 optimizer.lr 0.04965847262590059 +639 2 negative_sampler.num_negs_per_pos 97.0 +639 2 training.batch_size 0.0 +639 3 model.embedding_dim 1.0 +639 3 loss.margin 2.086870423589086 +639 3 optimizer.lr 0.06945066740399297 +639 3 negative_sampler.num_negs_per_pos 95.0 +639 3 training.batch_size 1.0 +639 4 model.embedding_dim 0.0 +639 4 loss.margin 3.4165468436231508 +639 4 optimizer.lr 0.0010137430390369795 +639 4 negative_sampler.num_negs_per_pos 32.0 +639 4 training.batch_size 2.0 +639 5 model.embedding_dim 0.0 +639 5 loss.margin 7.2594172952781655 +639 5 optimizer.lr 0.0028872385560674797 +639 5 negative_sampler.num_negs_per_pos 11.0 +639 5 training.batch_size 0.0 +639 6 model.embedding_dim 0.0 +639 6 loss.margin 8.522340933145239 +639 6 optimizer.lr 0.0025811061246001276 +639 6 negative_sampler.num_negs_per_pos 44.0 +639 6 training.batch_size 1.0 +639 7 model.embedding_dim 1.0 +639 7 loss.margin 2.2146118655259723 +639 7 optimizer.lr 0.03740806365691537 +639 7 negative_sampler.num_negs_per_pos 74.0 +639 7 training.batch_size 2.0 +639 8 model.embedding_dim 2.0 +639 8 loss.margin 2.140797606558654 +639 8 optimizer.lr 0.0017652142327066803 +639 8 negative_sampler.num_negs_per_pos 40.0 +639 8 training.batch_size 0.0 +639 9 model.embedding_dim 1.0 +639 9 loss.margin 5.835194322595145 +639 9 optimizer.lr 0.007717094724813153 +639 9 negative_sampler.num_negs_per_pos 8.0 +639 9 training.batch_size 0.0 +639 10 model.embedding_dim 1.0 +639 10 loss.margin 3.4165736643472555 +639 10 optimizer.lr 0.002834809440899414 +639 10 negative_sampler.num_negs_per_pos 91.0 +639 10 training.batch_size 0.0 +639 1 dataset """fb15k237""" +639 1 model """simple""" +639 1 loss """marginranking""" +639 1 regularizer """no""" +639 1 optimizer """adam""" +639 1 training_loop """owa""" +639 1 negative_sampler """basic""" +639 1 evaluator """rankbased""" +639 2 dataset """fb15k237""" +639 2 model """simple""" +639 2 loss """marginranking""" +639 2 regularizer """no""" +639 2 optimizer """adam""" +639 2 training_loop """owa""" +639 2 negative_sampler """basic""" +639 2 evaluator """rankbased""" +639 3 dataset """fb15k237""" +639 3 model """simple""" +639 3 loss """marginranking""" +639 3 regularizer """no""" +639 3 optimizer """adam""" +639 3 training_loop """owa""" +639 3 negative_sampler """basic""" +639 3 evaluator """rankbased""" +639 4 dataset """fb15k237""" +639 4 model """simple""" +639 4 loss """marginranking""" +639 4 regularizer """no""" +639 4 optimizer """adam""" +639 4 training_loop """owa""" +639 4 negative_sampler """basic""" +639 4 evaluator """rankbased""" +639 5 dataset """fb15k237""" +639 5 model """simple""" +639 5 loss """marginranking""" +639 5 regularizer """no""" +639 5 optimizer """adam""" +639 5 training_loop """owa""" +639 5 negative_sampler """basic""" +639 5 evaluator """rankbased""" +639 6 dataset """fb15k237""" +639 6 model """simple""" +639 6 loss """marginranking""" +639 6 regularizer """no""" +639 6 optimizer """adam""" +639 6 training_loop """owa""" +639 6 negative_sampler """basic""" +639 6 evaluator """rankbased""" +639 7 dataset """fb15k237""" +639 7 model """simple""" +639 7 loss """marginranking""" +639 7 regularizer """no""" +639 7 optimizer """adam""" +639 7 training_loop """owa""" +639 7 negative_sampler """basic""" +639 7 evaluator """rankbased""" +639 8 dataset """fb15k237""" +639 8 model """simple""" +639 8 loss """marginranking""" +639 8 regularizer """no""" +639 8 optimizer """adam""" +639 8 training_loop """owa""" +639 8 negative_sampler """basic""" +639 8 evaluator """rankbased""" +639 9 dataset """fb15k237""" +639 9 model """simple""" +639 9 loss """marginranking""" +639 9 regularizer """no""" +639 9 optimizer """adam""" +639 9 training_loop """owa""" +639 9 negative_sampler """basic""" +639 9 evaluator """rankbased""" +639 10 dataset """fb15k237""" +639 10 model """simple""" +639 10 loss """marginranking""" +639 10 regularizer """no""" +639 10 optimizer """adam""" +639 10 training_loop """owa""" +639 10 negative_sampler """basic""" +639 10 evaluator """rankbased""" +640 1 model.embedding_dim 0.0 +640 1 loss.margin 0.9074424176624796 +640 1 optimizer.lr 0.0016567994239429962 +640 1 negative_sampler.num_negs_per_pos 65.0 +640 1 training.batch_size 0.0 +640 2 model.embedding_dim 1.0 +640 2 loss.margin 9.058747142178325 +640 2 optimizer.lr 0.020520467398343855 +640 2 negative_sampler.num_negs_per_pos 21.0 +640 2 training.batch_size 0.0 +640 3 model.embedding_dim 1.0 +640 3 loss.margin 7.426217491672961 +640 3 optimizer.lr 0.004504660748158882 +640 3 negative_sampler.num_negs_per_pos 23.0 +640 3 training.batch_size 1.0 +640 4 model.embedding_dim 2.0 +640 4 loss.margin 8.373765353016006 +640 4 optimizer.lr 0.09207469026175384 +640 4 negative_sampler.num_negs_per_pos 27.0 +640 4 training.batch_size 0.0 +640 5 model.embedding_dim 0.0 +640 5 loss.margin 1.2789482065887028 +640 5 optimizer.lr 0.0541531270321221 +640 5 negative_sampler.num_negs_per_pos 42.0 +640 5 training.batch_size 0.0 +640 6 model.embedding_dim 2.0 +640 6 loss.margin 3.178150602769402 +640 6 optimizer.lr 0.06867519706127224 +640 6 negative_sampler.num_negs_per_pos 42.0 +640 6 training.batch_size 2.0 +640 7 model.embedding_dim 2.0 +640 7 loss.margin 4.942748396400148 +640 7 optimizer.lr 0.001035102528554761 +640 7 negative_sampler.num_negs_per_pos 96.0 +640 7 training.batch_size 2.0 +640 8 model.embedding_dim 2.0 +640 8 loss.margin 4.031406970949321 +640 8 optimizer.lr 0.002235456971736186 +640 8 negative_sampler.num_negs_per_pos 77.0 +640 8 training.batch_size 1.0 +640 9 model.embedding_dim 1.0 +640 9 loss.margin 0.9010052275166216 +640 9 optimizer.lr 0.0032995337885140972 +640 9 negative_sampler.num_negs_per_pos 15.0 +640 9 training.batch_size 1.0 +640 10 model.embedding_dim 1.0 +640 10 loss.margin 4.9749175490399296 +640 10 optimizer.lr 0.030990390072410577 +640 10 negative_sampler.num_negs_per_pos 13.0 +640 10 training.batch_size 2.0 +640 11 model.embedding_dim 2.0 +640 11 loss.margin 8.519897673923674 +640 11 optimizer.lr 0.004614003442032248 +640 11 negative_sampler.num_negs_per_pos 2.0 +640 11 training.batch_size 1.0 +640 12 model.embedding_dim 1.0 +640 12 loss.margin 0.7160177486890356 +640 12 optimizer.lr 0.001358033644545604 +640 12 negative_sampler.num_negs_per_pos 27.0 +640 12 training.batch_size 1.0 +640 13 model.embedding_dim 1.0 +640 13 loss.margin 7.531869395546437 +640 13 optimizer.lr 0.015476124119460203 +640 13 negative_sampler.num_negs_per_pos 22.0 +640 13 training.batch_size 2.0 +640 14 model.embedding_dim 1.0 +640 14 loss.margin 3.0170418095164564 +640 14 optimizer.lr 0.027286830711525142 +640 14 negative_sampler.num_negs_per_pos 13.0 +640 14 training.batch_size 2.0 +640 15 model.embedding_dim 1.0 +640 15 loss.margin 2.7597095082728664 +640 15 optimizer.lr 0.05131293426915837 +640 15 negative_sampler.num_negs_per_pos 55.0 +640 15 training.batch_size 1.0 +640 16 model.embedding_dim 2.0 +640 16 loss.margin 1.494991430592158 +640 16 optimizer.lr 0.08500002726259098 +640 16 negative_sampler.num_negs_per_pos 31.0 +640 16 training.batch_size 0.0 +640 17 model.embedding_dim 1.0 +640 17 loss.margin 6.274495318049848 +640 17 optimizer.lr 0.0023561571431668055 +640 17 negative_sampler.num_negs_per_pos 87.0 +640 17 training.batch_size 1.0 +640 18 model.embedding_dim 1.0 +640 18 loss.margin 3.914999600009856 +640 18 optimizer.lr 0.02083237551343265 +640 18 negative_sampler.num_negs_per_pos 60.0 +640 18 training.batch_size 2.0 +640 19 model.embedding_dim 2.0 +640 19 loss.margin 1.420515649303106 +640 19 optimizer.lr 0.02540254097889208 +640 19 negative_sampler.num_negs_per_pos 94.0 +640 19 training.batch_size 0.0 +640 20 model.embedding_dim 1.0 +640 20 loss.margin 1.0069313386598506 +640 20 optimizer.lr 0.0056238715690252765 +640 20 negative_sampler.num_negs_per_pos 44.0 +640 20 training.batch_size 1.0 +640 21 model.embedding_dim 0.0 +640 21 loss.margin 3.2941720699930253 +640 21 optimizer.lr 0.0011958958176237608 +640 21 negative_sampler.num_negs_per_pos 32.0 +640 21 training.batch_size 0.0 +640 22 model.embedding_dim 2.0 +640 22 loss.margin 2.1474227258035516 +640 22 optimizer.lr 0.012897547292033514 +640 22 negative_sampler.num_negs_per_pos 4.0 +640 22 training.batch_size 1.0 +640 23 model.embedding_dim 1.0 +640 23 loss.margin 0.6426448407101877 +640 23 optimizer.lr 0.0011876707194931157 +640 23 negative_sampler.num_negs_per_pos 5.0 +640 23 training.batch_size 1.0 +640 24 model.embedding_dim 0.0 +640 24 loss.margin 2.571204466804475 +640 24 optimizer.lr 0.018074246224946404 +640 24 negative_sampler.num_negs_per_pos 78.0 +640 24 training.batch_size 0.0 +640 25 model.embedding_dim 0.0 +640 25 loss.margin 7.429195088002168 +640 25 optimizer.lr 0.0018766996894061855 +640 25 negative_sampler.num_negs_per_pos 28.0 +640 25 training.batch_size 1.0 +640 26 model.embedding_dim 1.0 +640 26 loss.margin 1.0037538631832597 +640 26 optimizer.lr 0.011441447348090238 +640 26 negative_sampler.num_negs_per_pos 13.0 +640 26 training.batch_size 2.0 +640 27 model.embedding_dim 1.0 +640 27 loss.margin 3.7069723222346727 +640 27 optimizer.lr 0.001096441968288611 +640 27 negative_sampler.num_negs_per_pos 27.0 +640 27 training.batch_size 0.0 +640 1 dataset """fb15k237""" +640 1 model """simple""" +640 1 loss """marginranking""" +640 1 regularizer """no""" +640 1 optimizer """adam""" +640 1 training_loop """owa""" +640 1 negative_sampler """basic""" +640 1 evaluator """rankbased""" +640 2 dataset """fb15k237""" +640 2 model """simple""" +640 2 loss """marginranking""" +640 2 regularizer """no""" +640 2 optimizer """adam""" +640 2 training_loop """owa""" +640 2 negative_sampler """basic""" +640 2 evaluator """rankbased""" +640 3 dataset """fb15k237""" +640 3 model """simple""" +640 3 loss """marginranking""" +640 3 regularizer """no""" +640 3 optimizer """adam""" +640 3 training_loop """owa""" +640 3 negative_sampler """basic""" +640 3 evaluator """rankbased""" +640 4 dataset """fb15k237""" +640 4 model """simple""" +640 4 loss """marginranking""" +640 4 regularizer """no""" +640 4 optimizer """adam""" +640 4 training_loop """owa""" +640 4 negative_sampler """basic""" +640 4 evaluator """rankbased""" +640 5 dataset """fb15k237""" +640 5 model """simple""" +640 5 loss """marginranking""" +640 5 regularizer """no""" +640 5 optimizer """adam""" +640 5 training_loop """owa""" +640 5 negative_sampler """basic""" +640 5 evaluator """rankbased""" +640 6 dataset """fb15k237""" +640 6 model """simple""" +640 6 loss """marginranking""" +640 6 regularizer """no""" +640 6 optimizer """adam""" +640 6 training_loop """owa""" +640 6 negative_sampler """basic""" +640 6 evaluator """rankbased""" +640 7 dataset """fb15k237""" +640 7 model """simple""" +640 7 loss """marginranking""" +640 7 regularizer """no""" +640 7 optimizer """adam""" +640 7 training_loop """owa""" +640 7 negative_sampler """basic""" +640 7 evaluator """rankbased""" +640 8 dataset """fb15k237""" +640 8 model """simple""" +640 8 loss """marginranking""" +640 8 regularizer """no""" +640 8 optimizer """adam""" +640 8 training_loop """owa""" +640 8 negative_sampler """basic""" +640 8 evaluator """rankbased""" +640 9 dataset """fb15k237""" +640 9 model """simple""" +640 9 loss """marginranking""" +640 9 regularizer """no""" +640 9 optimizer """adam""" +640 9 training_loop """owa""" +640 9 negative_sampler """basic""" +640 9 evaluator """rankbased""" +640 10 dataset """fb15k237""" +640 10 model """simple""" +640 10 loss """marginranking""" +640 10 regularizer """no""" +640 10 optimizer """adam""" +640 10 training_loop """owa""" +640 10 negative_sampler """basic""" +640 10 evaluator """rankbased""" +640 11 dataset """fb15k237""" +640 11 model """simple""" +640 11 loss """marginranking""" +640 11 regularizer """no""" +640 11 optimizer """adam""" +640 11 training_loop """owa""" +640 11 negative_sampler """basic""" +640 11 evaluator """rankbased""" +640 12 dataset """fb15k237""" +640 12 model """simple""" +640 12 loss """marginranking""" +640 12 regularizer """no""" +640 12 optimizer """adam""" +640 12 training_loop """owa""" +640 12 negative_sampler """basic""" +640 12 evaluator """rankbased""" +640 13 dataset """fb15k237""" +640 13 model """simple""" +640 13 loss """marginranking""" +640 13 regularizer """no""" +640 13 optimizer """adam""" +640 13 training_loop """owa""" +640 13 negative_sampler """basic""" +640 13 evaluator """rankbased""" +640 14 dataset """fb15k237""" +640 14 model """simple""" +640 14 loss """marginranking""" +640 14 regularizer """no""" +640 14 optimizer """adam""" +640 14 training_loop """owa""" +640 14 negative_sampler """basic""" +640 14 evaluator """rankbased""" +640 15 dataset """fb15k237""" +640 15 model """simple""" +640 15 loss """marginranking""" +640 15 regularizer """no""" +640 15 optimizer """adam""" +640 15 training_loop """owa""" +640 15 negative_sampler """basic""" +640 15 evaluator """rankbased""" +640 16 dataset """fb15k237""" +640 16 model """simple""" +640 16 loss """marginranking""" +640 16 regularizer """no""" +640 16 optimizer """adam""" +640 16 training_loop """owa""" +640 16 negative_sampler """basic""" +640 16 evaluator """rankbased""" +640 17 dataset """fb15k237""" +640 17 model """simple""" +640 17 loss """marginranking""" +640 17 regularizer """no""" +640 17 optimizer """adam""" +640 17 training_loop """owa""" +640 17 negative_sampler """basic""" +640 17 evaluator """rankbased""" +640 18 dataset """fb15k237""" +640 18 model """simple""" +640 18 loss """marginranking""" +640 18 regularizer """no""" +640 18 optimizer """adam""" +640 18 training_loop """owa""" +640 18 negative_sampler """basic""" +640 18 evaluator """rankbased""" +640 19 dataset """fb15k237""" +640 19 model """simple""" +640 19 loss """marginranking""" +640 19 regularizer """no""" +640 19 optimizer """adam""" +640 19 training_loop """owa""" +640 19 negative_sampler """basic""" +640 19 evaluator """rankbased""" +640 20 dataset """fb15k237""" +640 20 model """simple""" +640 20 loss """marginranking""" +640 20 regularizer """no""" +640 20 optimizer """adam""" +640 20 training_loop """owa""" +640 20 negative_sampler """basic""" +640 20 evaluator """rankbased""" +640 21 dataset """fb15k237""" +640 21 model """simple""" +640 21 loss """marginranking""" +640 21 regularizer """no""" +640 21 optimizer """adam""" +640 21 training_loop """owa""" +640 21 negative_sampler """basic""" +640 21 evaluator """rankbased""" +640 22 dataset """fb15k237""" +640 22 model """simple""" +640 22 loss """marginranking""" +640 22 regularizer """no""" +640 22 optimizer """adam""" +640 22 training_loop """owa""" +640 22 negative_sampler """basic""" +640 22 evaluator """rankbased""" +640 23 dataset """fb15k237""" +640 23 model """simple""" +640 23 loss """marginranking""" +640 23 regularizer """no""" +640 23 optimizer """adam""" +640 23 training_loop """owa""" +640 23 negative_sampler """basic""" +640 23 evaluator """rankbased""" +640 24 dataset """fb15k237""" +640 24 model """simple""" +640 24 loss """marginranking""" +640 24 regularizer """no""" +640 24 optimizer """adam""" +640 24 training_loop """owa""" +640 24 negative_sampler """basic""" +640 24 evaluator """rankbased""" +640 25 dataset """fb15k237""" +640 25 model """simple""" +640 25 loss """marginranking""" +640 25 regularizer """no""" +640 25 optimizer """adam""" +640 25 training_loop """owa""" +640 25 negative_sampler """basic""" +640 25 evaluator """rankbased""" +640 26 dataset """fb15k237""" +640 26 model """simple""" +640 26 loss """marginranking""" +640 26 regularizer """no""" +640 26 optimizer """adam""" +640 26 training_loop """owa""" +640 26 negative_sampler """basic""" +640 26 evaluator """rankbased""" +640 27 dataset """fb15k237""" +640 27 model """simple""" +640 27 loss """marginranking""" +640 27 regularizer """no""" +640 27 optimizer """adam""" +640 27 training_loop """owa""" +640 27 negative_sampler """basic""" +640 27 evaluator """rankbased""" +641 1 model.embedding_dim 1.0 +641 1 optimizer.lr 0.0013429325451964388 +641 1 training.batch_size 2.0 +641 1 training.label_smoothing 0.001727879836180631 +641 2 model.embedding_dim 2.0 +641 2 optimizer.lr 0.026308029729298448 +641 2 training.batch_size 2.0 +641 2 training.label_smoothing 0.0073050026452626616 +641 3 model.embedding_dim 1.0 +641 3 optimizer.lr 0.05873305311267345 +641 3 training.batch_size 2.0 +641 3 training.label_smoothing 0.0028478409357342274 +641 4 model.embedding_dim 2.0 +641 4 optimizer.lr 0.009068639626698759 +641 4 training.batch_size 2.0 +641 4 training.label_smoothing 0.033390494802630205 +641 5 model.embedding_dim 1.0 +641 5 optimizer.lr 0.012989240602506686 +641 5 training.batch_size 1.0 +641 5 training.label_smoothing 0.007001512072967551 +641 6 model.embedding_dim 0.0 +641 6 optimizer.lr 0.002565337967105445 +641 6 training.batch_size 1.0 +641 6 training.label_smoothing 0.012380334552355165 +641 7 model.embedding_dim 0.0 +641 7 optimizer.lr 0.0011432576534684935 +641 7 training.batch_size 1.0 +641 7 training.label_smoothing 0.04380983522302492 +641 8 model.embedding_dim 0.0 +641 8 optimizer.lr 0.01651378229338377 +641 8 training.batch_size 1.0 +641 8 training.label_smoothing 0.40274968639045045 +641 9 model.embedding_dim 1.0 +641 9 optimizer.lr 0.03269750266488095 +641 9 training.batch_size 1.0 +641 9 training.label_smoothing 0.1813311165927944 +641 1 dataset """fb15k237""" +641 1 model """simple""" +641 1 loss """bceaftersigmoid""" +641 1 regularizer """no""" +641 1 optimizer """adam""" +641 1 training_loop """lcwa""" +641 1 evaluator """rankbased""" +641 2 dataset """fb15k237""" +641 2 model """simple""" +641 2 loss """bceaftersigmoid""" +641 2 regularizer """no""" +641 2 optimizer """adam""" +641 2 training_loop """lcwa""" +641 2 evaluator """rankbased""" +641 3 dataset """fb15k237""" +641 3 model """simple""" +641 3 loss """bceaftersigmoid""" +641 3 regularizer """no""" +641 3 optimizer """adam""" +641 3 training_loop """lcwa""" +641 3 evaluator """rankbased""" +641 4 dataset """fb15k237""" +641 4 model """simple""" +641 4 loss """bceaftersigmoid""" +641 4 regularizer """no""" +641 4 optimizer """adam""" +641 4 training_loop """lcwa""" +641 4 evaluator """rankbased""" +641 5 dataset """fb15k237""" +641 5 model """simple""" +641 5 loss """bceaftersigmoid""" +641 5 regularizer """no""" +641 5 optimizer """adam""" +641 5 training_loop """lcwa""" +641 5 evaluator """rankbased""" +641 6 dataset """fb15k237""" +641 6 model """simple""" +641 6 loss """bceaftersigmoid""" +641 6 regularizer """no""" +641 6 optimizer """adam""" +641 6 training_loop """lcwa""" +641 6 evaluator """rankbased""" +641 7 dataset """fb15k237""" +641 7 model """simple""" +641 7 loss """bceaftersigmoid""" +641 7 regularizer """no""" +641 7 optimizer """adam""" +641 7 training_loop """lcwa""" +641 7 evaluator """rankbased""" +641 8 dataset """fb15k237""" +641 8 model """simple""" +641 8 loss """bceaftersigmoid""" +641 8 regularizer """no""" +641 8 optimizer """adam""" +641 8 training_loop """lcwa""" +641 8 evaluator """rankbased""" +641 9 dataset """fb15k237""" +641 9 model """simple""" +641 9 loss """bceaftersigmoid""" +641 9 regularizer """no""" +641 9 optimizer """adam""" +641 9 training_loop """lcwa""" +641 9 evaluator """rankbased""" +642 1 model.embedding_dim 2.0 +642 1 optimizer.lr 0.0027784281030638377 +642 1 training.batch_size 2.0 +642 1 training.label_smoothing 0.027881086058820125 +642 2 model.embedding_dim 0.0 +642 2 optimizer.lr 0.0034656559515907795 +642 2 training.batch_size 2.0 +642 2 training.label_smoothing 0.8458485517837546 +642 3 model.embedding_dim 2.0 +642 3 optimizer.lr 0.0026838041368196313 +642 3 training.batch_size 2.0 +642 3 training.label_smoothing 0.00933595139589088 +642 4 model.embedding_dim 2.0 +642 4 optimizer.lr 0.018291542559442268 +642 4 training.batch_size 0.0 +642 4 training.label_smoothing 0.0026398354202668978 +642 5 model.embedding_dim 0.0 +642 5 optimizer.lr 0.011270318226924748 +642 5 training.batch_size 0.0 +642 5 training.label_smoothing 0.0017747977040924053 +642 6 model.embedding_dim 1.0 +642 6 optimizer.lr 0.003117726071292608 +642 6 training.batch_size 2.0 +642 6 training.label_smoothing 0.09415303159186147 +642 7 model.embedding_dim 0.0 +642 7 optimizer.lr 0.001479009614665233 +642 7 training.batch_size 1.0 +642 7 training.label_smoothing 0.6679866255537059 +642 8 model.embedding_dim 1.0 +642 8 optimizer.lr 0.009051404143927614 +642 8 training.batch_size 0.0 +642 8 training.label_smoothing 0.003566199635931064 +642 9 model.embedding_dim 1.0 +642 9 optimizer.lr 0.0070261079749462715 +642 9 training.batch_size 1.0 +642 9 training.label_smoothing 0.004917335441148495 +642 10 model.embedding_dim 2.0 +642 10 optimizer.lr 0.04665168688648675 +642 10 training.batch_size 1.0 +642 10 training.label_smoothing 0.018925620589132507 +642 11 model.embedding_dim 1.0 +642 11 optimizer.lr 0.002877141045917739 +642 11 training.batch_size 2.0 +642 11 training.label_smoothing 0.001123341460051062 +642 12 model.embedding_dim 2.0 +642 12 optimizer.lr 0.006941876027048007 +642 12 training.batch_size 1.0 +642 12 training.label_smoothing 0.0032346009642695854 +642 1 dataset """fb15k237""" +642 1 model """simple""" +642 1 loss """softplus""" +642 1 regularizer """no""" +642 1 optimizer """adam""" +642 1 training_loop """lcwa""" +642 1 evaluator """rankbased""" +642 2 dataset """fb15k237""" +642 2 model """simple""" +642 2 loss """softplus""" +642 2 regularizer """no""" +642 2 optimizer """adam""" +642 2 training_loop """lcwa""" +642 2 evaluator """rankbased""" +642 3 dataset """fb15k237""" +642 3 model """simple""" +642 3 loss """softplus""" +642 3 regularizer """no""" +642 3 optimizer """adam""" +642 3 training_loop """lcwa""" +642 3 evaluator """rankbased""" +642 4 dataset """fb15k237""" +642 4 model """simple""" +642 4 loss """softplus""" +642 4 regularizer """no""" +642 4 optimizer """adam""" +642 4 training_loop """lcwa""" +642 4 evaluator """rankbased""" +642 5 dataset """fb15k237""" +642 5 model """simple""" +642 5 loss """softplus""" +642 5 regularizer """no""" +642 5 optimizer """adam""" +642 5 training_loop """lcwa""" +642 5 evaluator """rankbased""" +642 6 dataset """fb15k237""" +642 6 model """simple""" +642 6 loss """softplus""" +642 6 regularizer """no""" +642 6 optimizer """adam""" +642 6 training_loop """lcwa""" +642 6 evaluator """rankbased""" +642 7 dataset """fb15k237""" +642 7 model """simple""" +642 7 loss """softplus""" +642 7 regularizer """no""" +642 7 optimizer """adam""" +642 7 training_loop """lcwa""" +642 7 evaluator """rankbased""" +642 8 dataset """fb15k237""" +642 8 model """simple""" +642 8 loss """softplus""" +642 8 regularizer """no""" +642 8 optimizer """adam""" +642 8 training_loop """lcwa""" +642 8 evaluator """rankbased""" +642 9 dataset """fb15k237""" +642 9 model """simple""" +642 9 loss """softplus""" +642 9 regularizer """no""" +642 9 optimizer """adam""" +642 9 training_loop """lcwa""" +642 9 evaluator """rankbased""" +642 10 dataset """fb15k237""" +642 10 model """simple""" +642 10 loss """softplus""" +642 10 regularizer """no""" +642 10 optimizer """adam""" +642 10 training_loop """lcwa""" +642 10 evaluator """rankbased""" +642 11 dataset """fb15k237""" +642 11 model """simple""" +642 11 loss """softplus""" +642 11 regularizer """no""" +642 11 optimizer """adam""" +642 11 training_loop """lcwa""" +642 11 evaluator """rankbased""" +642 12 dataset """fb15k237""" +642 12 model """simple""" +642 12 loss """softplus""" +642 12 regularizer """no""" +642 12 optimizer """adam""" +642 12 training_loop """lcwa""" +642 12 evaluator """rankbased""" +643 1 model.embedding_dim 2.0 +643 1 optimizer.lr 0.021246321479985238 +643 1 training.batch_size 1.0 +643 1 training.label_smoothing 0.007412650877703187 +643 2 model.embedding_dim 1.0 +643 2 optimizer.lr 0.005896284282613275 +643 2 training.batch_size 0.0 +643 2 training.label_smoothing 0.0010538752696071518 +643 3 model.embedding_dim 2.0 +643 3 optimizer.lr 0.0017998408288453043 +643 3 training.batch_size 2.0 +643 3 training.label_smoothing 0.007831617474485462 +643 4 model.embedding_dim 2.0 +643 4 optimizer.lr 0.08871960220358159 +643 4 training.batch_size 2.0 +643 4 training.label_smoothing 0.0012002304023911961 +643 5 model.embedding_dim 0.0 +643 5 optimizer.lr 0.020537249509827663 +643 5 training.batch_size 0.0 +643 5 training.label_smoothing 0.32697120497697013 +643 6 model.embedding_dim 2.0 +643 6 optimizer.lr 0.0034232329757678927 +643 6 training.batch_size 0.0 +643 6 training.label_smoothing 0.08537262159763204 +643 7 model.embedding_dim 2.0 +643 7 optimizer.lr 0.011154135939502112 +643 7 training.batch_size 1.0 +643 7 training.label_smoothing 0.0024191015759017437 +643 8 model.embedding_dim 2.0 +643 8 optimizer.lr 0.021025033036921374 +643 8 training.batch_size 0.0 +643 8 training.label_smoothing 0.8085804915917802 +643 9 model.embedding_dim 1.0 +643 9 optimizer.lr 0.0011221752628419004 +643 9 training.batch_size 2.0 +643 9 training.label_smoothing 0.0012452761031170707 +643 1 dataset """fb15k237""" +643 1 model """simple""" +643 1 loss """bceaftersigmoid""" +643 1 regularizer """no""" +643 1 optimizer """adam""" +643 1 training_loop """lcwa""" +643 1 evaluator """rankbased""" +643 2 dataset """fb15k237""" +643 2 model """simple""" +643 2 loss """bceaftersigmoid""" +643 2 regularizer """no""" +643 2 optimizer """adam""" +643 2 training_loop """lcwa""" +643 2 evaluator """rankbased""" +643 3 dataset """fb15k237""" +643 3 model """simple""" +643 3 loss """bceaftersigmoid""" +643 3 regularizer """no""" +643 3 optimizer """adam""" +643 3 training_loop """lcwa""" +643 3 evaluator """rankbased""" +643 4 dataset """fb15k237""" +643 4 model """simple""" +643 4 loss """bceaftersigmoid""" +643 4 regularizer """no""" +643 4 optimizer """adam""" +643 4 training_loop """lcwa""" +643 4 evaluator """rankbased""" +643 5 dataset """fb15k237""" +643 5 model """simple""" +643 5 loss """bceaftersigmoid""" +643 5 regularizer """no""" +643 5 optimizer """adam""" +643 5 training_loop """lcwa""" +643 5 evaluator """rankbased""" +643 6 dataset """fb15k237""" +643 6 model """simple""" +643 6 loss """bceaftersigmoid""" +643 6 regularizer """no""" +643 6 optimizer """adam""" +643 6 training_loop """lcwa""" +643 6 evaluator """rankbased""" +643 7 dataset """fb15k237""" +643 7 model """simple""" +643 7 loss """bceaftersigmoid""" +643 7 regularizer """no""" +643 7 optimizer """adam""" +643 7 training_loop """lcwa""" +643 7 evaluator """rankbased""" +643 8 dataset """fb15k237""" +643 8 model """simple""" +643 8 loss """bceaftersigmoid""" +643 8 regularizer """no""" +643 8 optimizer """adam""" +643 8 training_loop """lcwa""" +643 8 evaluator """rankbased""" +643 9 dataset """fb15k237""" +643 9 model """simple""" +643 9 loss """bceaftersigmoid""" +643 9 regularizer """no""" +643 9 optimizer """adam""" +643 9 training_loop """lcwa""" +643 9 evaluator """rankbased""" +644 1 model.embedding_dim 2.0 +644 1 optimizer.lr 0.06857321568943761 +644 1 training.batch_size 2.0 +644 1 training.label_smoothing 0.21029901543917703 +644 2 model.embedding_dim 0.0 +644 2 optimizer.lr 0.014457190064293392 +644 2 training.batch_size 0.0 +644 2 training.label_smoothing 0.1790975543761483 +644 3 model.embedding_dim 0.0 +644 3 optimizer.lr 0.006379836194275247 +644 3 training.batch_size 1.0 +644 3 training.label_smoothing 0.05840561260436432 +644 4 model.embedding_dim 2.0 +644 4 optimizer.lr 0.02130913494419852 +644 4 training.batch_size 1.0 +644 4 training.label_smoothing 0.10735199100271363 +644 5 model.embedding_dim 0.0 +644 5 optimizer.lr 0.06125541266111846 +644 5 training.batch_size 0.0 +644 5 training.label_smoothing 0.005450817102475084 +644 6 model.embedding_dim 1.0 +644 6 optimizer.lr 0.014305630368584472 +644 6 training.batch_size 2.0 +644 6 training.label_smoothing 0.07344459760817935 +644 7 model.embedding_dim 2.0 +644 7 optimizer.lr 0.008211707092261732 +644 7 training.batch_size 1.0 +644 7 training.label_smoothing 0.15578410007826077 +644 8 model.embedding_dim 2.0 +644 8 optimizer.lr 0.03247908877037044 +644 8 training.batch_size 1.0 +644 8 training.label_smoothing 0.0569482439480821 +644 9 model.embedding_dim 0.0 +644 9 optimizer.lr 0.0020305333595843095 +644 9 training.batch_size 2.0 +644 9 training.label_smoothing 0.3861969091010202 +644 10 model.embedding_dim 1.0 +644 10 optimizer.lr 0.04189239709616541 +644 10 training.batch_size 2.0 +644 10 training.label_smoothing 0.08487273816602586 +644 11 model.embedding_dim 1.0 +644 11 optimizer.lr 0.010383899128965349 +644 11 training.batch_size 2.0 +644 11 training.label_smoothing 0.07524656334766595 +644 12 model.embedding_dim 0.0 +644 12 optimizer.lr 0.07872143136367467 +644 12 training.batch_size 2.0 +644 12 training.label_smoothing 0.0064531644057546945 +644 13 model.embedding_dim 0.0 +644 13 optimizer.lr 0.005146038484808138 +644 13 training.batch_size 1.0 +644 13 training.label_smoothing 0.25852742812884 +644 14 model.embedding_dim 0.0 +644 14 optimizer.lr 0.0030313079445637246 +644 14 training.batch_size 2.0 +644 14 training.label_smoothing 0.0016010017535543525 +644 15 model.embedding_dim 1.0 +644 15 optimizer.lr 0.06688120903573647 +644 15 training.batch_size 2.0 +644 15 training.label_smoothing 0.09981340688558371 +644 16 model.embedding_dim 2.0 +644 16 optimizer.lr 0.016861009026184714 +644 16 training.batch_size 1.0 +644 16 training.label_smoothing 0.004814004051556708 +644 1 dataset """fb15k237""" +644 1 model """simple""" +644 1 loss """softplus""" +644 1 regularizer """no""" +644 1 optimizer """adam""" +644 1 training_loop """lcwa""" +644 1 evaluator """rankbased""" +644 2 dataset """fb15k237""" +644 2 model """simple""" +644 2 loss """softplus""" +644 2 regularizer """no""" +644 2 optimizer """adam""" +644 2 training_loop """lcwa""" +644 2 evaluator """rankbased""" +644 3 dataset """fb15k237""" +644 3 model """simple""" +644 3 loss """softplus""" +644 3 regularizer """no""" +644 3 optimizer """adam""" +644 3 training_loop """lcwa""" +644 3 evaluator """rankbased""" +644 4 dataset """fb15k237""" +644 4 model """simple""" +644 4 loss """softplus""" +644 4 regularizer """no""" +644 4 optimizer """adam""" +644 4 training_loop """lcwa""" +644 4 evaluator """rankbased""" +644 5 dataset """fb15k237""" +644 5 model """simple""" +644 5 loss """softplus""" +644 5 regularizer """no""" +644 5 optimizer """adam""" +644 5 training_loop """lcwa""" +644 5 evaluator """rankbased""" +644 6 dataset """fb15k237""" +644 6 model """simple""" +644 6 loss """softplus""" +644 6 regularizer """no""" +644 6 optimizer """adam""" +644 6 training_loop """lcwa""" +644 6 evaluator """rankbased""" +644 7 dataset """fb15k237""" +644 7 model """simple""" +644 7 loss """softplus""" +644 7 regularizer """no""" +644 7 optimizer """adam""" +644 7 training_loop """lcwa""" +644 7 evaluator """rankbased""" +644 8 dataset """fb15k237""" +644 8 model """simple""" +644 8 loss """softplus""" +644 8 regularizer """no""" +644 8 optimizer """adam""" +644 8 training_loop """lcwa""" +644 8 evaluator """rankbased""" +644 9 dataset """fb15k237""" +644 9 model """simple""" +644 9 loss """softplus""" +644 9 regularizer """no""" +644 9 optimizer """adam""" +644 9 training_loop """lcwa""" +644 9 evaluator """rankbased""" +644 10 dataset """fb15k237""" +644 10 model """simple""" +644 10 loss """softplus""" +644 10 regularizer """no""" +644 10 optimizer """adam""" +644 10 training_loop """lcwa""" +644 10 evaluator """rankbased""" +644 11 dataset """fb15k237""" +644 11 model """simple""" +644 11 loss """softplus""" +644 11 regularizer """no""" +644 11 optimizer """adam""" +644 11 training_loop """lcwa""" +644 11 evaluator """rankbased""" +644 12 dataset """fb15k237""" +644 12 model """simple""" +644 12 loss """softplus""" +644 12 regularizer """no""" +644 12 optimizer """adam""" +644 12 training_loop """lcwa""" +644 12 evaluator """rankbased""" +644 13 dataset """fb15k237""" +644 13 model """simple""" +644 13 loss """softplus""" +644 13 regularizer """no""" +644 13 optimizer """adam""" +644 13 training_loop """lcwa""" +644 13 evaluator """rankbased""" +644 14 dataset """fb15k237""" +644 14 model """simple""" +644 14 loss """softplus""" +644 14 regularizer """no""" +644 14 optimizer """adam""" +644 14 training_loop """lcwa""" +644 14 evaluator """rankbased""" +644 15 dataset """fb15k237""" +644 15 model """simple""" +644 15 loss """softplus""" +644 15 regularizer """no""" +644 15 optimizer """adam""" +644 15 training_loop """lcwa""" +644 15 evaluator """rankbased""" +644 16 dataset """fb15k237""" +644 16 model """simple""" +644 16 loss """softplus""" +644 16 regularizer """no""" +644 16 optimizer """adam""" +644 16 training_loop """lcwa""" +644 16 evaluator """rankbased""" +645 1 model.embedding_dim 2.0 +645 1 optimizer.lr 0.031330133233681286 +645 1 training.batch_size 2.0 +645 1 training.label_smoothing 0.03441662795727597 +645 2 model.embedding_dim 1.0 +645 2 optimizer.lr 0.045806057351938566 +645 2 training.batch_size 0.0 +645 2 training.label_smoothing 0.0065217997436954125 +645 3 model.embedding_dim 0.0 +645 3 optimizer.lr 0.026320135772154432 +645 3 training.batch_size 0.0 +645 3 training.label_smoothing 0.14873558924400482 +645 4 model.embedding_dim 2.0 +645 4 optimizer.lr 0.029851771787123994 +645 4 training.batch_size 2.0 +645 4 training.label_smoothing 0.07591750030265446 +645 5 model.embedding_dim 0.0 +645 5 optimizer.lr 0.04684673209490879 +645 5 training.batch_size 0.0 +645 5 training.label_smoothing 0.007304884264687723 +645 6 model.embedding_dim 0.0 +645 6 optimizer.lr 0.0071756233891528305 +645 6 training.batch_size 2.0 +645 6 training.label_smoothing 0.0018657871466217196 +645 1 dataset """fb15k237""" +645 1 model """simple""" +645 1 loss """crossentropy""" +645 1 regularizer """no""" +645 1 optimizer """adam""" +645 1 training_loop """lcwa""" +645 1 evaluator """rankbased""" +645 2 dataset """fb15k237""" +645 2 model """simple""" +645 2 loss """crossentropy""" +645 2 regularizer """no""" +645 2 optimizer """adam""" +645 2 training_loop """lcwa""" +645 2 evaluator """rankbased""" +645 3 dataset """fb15k237""" +645 3 model """simple""" +645 3 loss """crossentropy""" +645 3 regularizer """no""" +645 3 optimizer """adam""" +645 3 training_loop """lcwa""" +645 3 evaluator """rankbased""" +645 4 dataset """fb15k237""" +645 4 model """simple""" +645 4 loss """crossentropy""" +645 4 regularizer """no""" +645 4 optimizer """adam""" +645 4 training_loop """lcwa""" +645 4 evaluator """rankbased""" +645 5 dataset """fb15k237""" +645 5 model """simple""" +645 5 loss """crossentropy""" +645 5 regularizer """no""" +645 5 optimizer """adam""" +645 5 training_loop """lcwa""" +645 5 evaluator """rankbased""" +645 6 dataset """fb15k237""" +645 6 model """simple""" +645 6 loss """crossentropy""" +645 6 regularizer """no""" +645 6 optimizer """adam""" +645 6 training_loop """lcwa""" +645 6 evaluator """rankbased""" +646 1 model.embedding_dim 0.0 +646 1 optimizer.lr 0.03438518819477475 +646 1 training.batch_size 1.0 +646 1 training.label_smoothing 0.012859578117208512 +646 2 model.embedding_dim 0.0 +646 2 optimizer.lr 0.010078587359661885 +646 2 training.batch_size 2.0 +646 2 training.label_smoothing 0.022702492416931206 +646 3 model.embedding_dim 2.0 +646 3 optimizer.lr 0.00227990476836153 +646 3 training.batch_size 0.0 +646 3 training.label_smoothing 0.010409916173198063 +646 4 model.embedding_dim 0.0 +646 4 optimizer.lr 0.009362965908905884 +646 4 training.batch_size 2.0 +646 4 training.label_smoothing 0.04913963116381507 +646 5 model.embedding_dim 2.0 +646 5 optimizer.lr 0.002106883275710471 +646 5 training.batch_size 1.0 +646 5 training.label_smoothing 0.005819720037166547 +646 6 model.embedding_dim 1.0 +646 6 optimizer.lr 0.0028585461526689686 +646 6 training.batch_size 0.0 +646 6 training.label_smoothing 0.6195566497104105 +646 7 model.embedding_dim 1.0 +646 7 optimizer.lr 0.01573820477898104 +646 7 training.batch_size 1.0 +646 7 training.label_smoothing 0.15632409892044125 +646 8 model.embedding_dim 1.0 +646 8 optimizer.lr 0.07880001328244488 +646 8 training.batch_size 2.0 +646 8 training.label_smoothing 0.07675570139615016 +646 9 model.embedding_dim 1.0 +646 9 optimizer.lr 0.002680582825579882 +646 9 training.batch_size 1.0 +646 9 training.label_smoothing 0.22660913349688433 +646 10 model.embedding_dim 1.0 +646 10 optimizer.lr 0.024397817586047074 +646 10 training.batch_size 1.0 +646 10 training.label_smoothing 0.05476881311486755 +646 11 model.embedding_dim 2.0 +646 11 optimizer.lr 0.005543313893780478 +646 11 training.batch_size 1.0 +646 11 training.label_smoothing 0.06163991437890689 +646 1 dataset """fb15k237""" +646 1 model """simple""" +646 1 loss """crossentropy""" +646 1 regularizer """no""" +646 1 optimizer """adam""" +646 1 training_loop """lcwa""" +646 1 evaluator """rankbased""" +646 2 dataset """fb15k237""" +646 2 model """simple""" +646 2 loss """crossentropy""" +646 2 regularizer """no""" +646 2 optimizer """adam""" +646 2 training_loop """lcwa""" +646 2 evaluator """rankbased""" +646 3 dataset """fb15k237""" +646 3 model """simple""" +646 3 loss """crossentropy""" +646 3 regularizer """no""" +646 3 optimizer """adam""" +646 3 training_loop """lcwa""" +646 3 evaluator """rankbased""" +646 4 dataset """fb15k237""" +646 4 model """simple""" +646 4 loss """crossentropy""" +646 4 regularizer """no""" +646 4 optimizer """adam""" +646 4 training_loop """lcwa""" +646 4 evaluator """rankbased""" +646 5 dataset """fb15k237""" +646 5 model """simple""" +646 5 loss """crossentropy""" +646 5 regularizer """no""" +646 5 optimizer """adam""" +646 5 training_loop """lcwa""" +646 5 evaluator """rankbased""" +646 6 dataset """fb15k237""" +646 6 model """simple""" +646 6 loss """crossentropy""" +646 6 regularizer """no""" +646 6 optimizer """adam""" +646 6 training_loop """lcwa""" +646 6 evaluator """rankbased""" +646 7 dataset """fb15k237""" +646 7 model """simple""" +646 7 loss """crossentropy""" +646 7 regularizer """no""" +646 7 optimizer """adam""" +646 7 training_loop """lcwa""" +646 7 evaluator """rankbased""" +646 8 dataset """fb15k237""" +646 8 model """simple""" +646 8 loss """crossentropy""" +646 8 regularizer """no""" +646 8 optimizer """adam""" +646 8 training_loop """lcwa""" +646 8 evaluator """rankbased""" +646 9 dataset """fb15k237""" +646 9 model """simple""" +646 9 loss """crossentropy""" +646 9 regularizer """no""" +646 9 optimizer """adam""" +646 9 training_loop """lcwa""" +646 9 evaluator """rankbased""" +646 10 dataset """fb15k237""" +646 10 model """simple""" +646 10 loss """crossentropy""" +646 10 regularizer """no""" +646 10 optimizer """adam""" +646 10 training_loop """lcwa""" +646 10 evaluator """rankbased""" +646 11 dataset """fb15k237""" +646 11 model """simple""" +646 11 loss """crossentropy""" +646 11 regularizer """no""" +646 11 optimizer """adam""" +646 11 training_loop """lcwa""" +646 11 evaluator """rankbased""" +647 1 model.embedding_dim 2.0 +647 1 training.batch_size 0.0 +647 1 training.label_smoothing 0.4383033465610856 +647 2 model.embedding_dim 0.0 +647 2 training.batch_size 0.0 +647 2 training.label_smoothing 0.018854808424086135 +647 3 model.embedding_dim 2.0 +647 3 training.batch_size 0.0 +647 3 training.label_smoothing 0.05703689136471243 +647 4 model.embedding_dim 2.0 +647 4 training.batch_size 2.0 +647 4 training.label_smoothing 0.03933502878539389 +647 5 model.embedding_dim 2.0 +647 5 training.batch_size 1.0 +647 5 training.label_smoothing 0.029429928457628766 +647 6 model.embedding_dim 0.0 +647 6 training.batch_size 1.0 +647 6 training.label_smoothing 0.00671364660077158 +647 7 model.embedding_dim 1.0 +647 7 training.batch_size 1.0 +647 7 training.label_smoothing 0.07947922685992391 +647 8 model.embedding_dim 1.0 +647 8 training.batch_size 2.0 +647 8 training.label_smoothing 0.1994056892532126 +647 9 model.embedding_dim 1.0 +647 9 training.batch_size 0.0 +647 9 training.label_smoothing 0.26180357658503695 +647 10 model.embedding_dim 0.0 +647 10 training.batch_size 1.0 +647 10 training.label_smoothing 0.0035332309153841158 +647 11 model.embedding_dim 2.0 +647 11 training.batch_size 2.0 +647 11 training.label_smoothing 0.09262749254128476 +647 12 model.embedding_dim 2.0 +647 12 training.batch_size 0.0 +647 12 training.label_smoothing 0.11613598214185834 +647 13 model.embedding_dim 2.0 +647 13 training.batch_size 0.0 +647 13 training.label_smoothing 0.015339480729262613 +647 14 model.embedding_dim 0.0 +647 14 training.batch_size 1.0 +647 14 training.label_smoothing 0.001906272141505565 +647 15 model.embedding_dim 0.0 +647 15 training.batch_size 2.0 +647 15 training.label_smoothing 0.602489692676098 +647 16 model.embedding_dim 0.0 +647 16 training.batch_size 1.0 +647 16 training.label_smoothing 0.003126693665225609 +647 17 model.embedding_dim 2.0 +647 17 training.batch_size 2.0 +647 17 training.label_smoothing 0.004546869733589984 +647 18 model.embedding_dim 0.0 +647 18 training.batch_size 0.0 +647 18 training.label_smoothing 0.002939262063781292 +647 19 model.embedding_dim 2.0 +647 19 training.batch_size 2.0 +647 19 training.label_smoothing 0.24101321253330668 +647 20 model.embedding_dim 0.0 +647 20 training.batch_size 1.0 +647 20 training.label_smoothing 0.0031811098801842012 +647 21 model.embedding_dim 2.0 +647 21 training.batch_size 2.0 +647 21 training.label_smoothing 0.004857117059638168 +647 22 model.embedding_dim 0.0 +647 22 training.batch_size 2.0 +647 22 training.label_smoothing 0.005266924979957693 +647 23 model.embedding_dim 1.0 +647 23 training.batch_size 1.0 +647 23 training.label_smoothing 0.004250319785274224 +647 24 model.embedding_dim 0.0 +647 24 training.batch_size 1.0 +647 24 training.label_smoothing 0.006785918361576345 +647 25 model.embedding_dim 0.0 +647 25 training.batch_size 1.0 +647 25 training.label_smoothing 0.31429397234585876 +647 26 model.embedding_dim 1.0 +647 26 training.batch_size 0.0 +647 26 training.label_smoothing 0.01697831828092316 +647 27 model.embedding_dim 0.0 +647 27 training.batch_size 0.0 +647 27 training.label_smoothing 0.18594317923458983 +647 28 model.embedding_dim 1.0 +647 28 training.batch_size 0.0 +647 28 training.label_smoothing 0.0013988868940026783 +647 29 model.embedding_dim 2.0 +647 29 training.batch_size 2.0 +647 29 training.label_smoothing 0.011162027590310302 +647 30 model.embedding_dim 0.0 +647 30 training.batch_size 2.0 +647 30 training.label_smoothing 0.0010621016841911076 +647 31 model.embedding_dim 2.0 +647 31 training.batch_size 2.0 +647 31 training.label_smoothing 0.0022865921363831683 +647 32 model.embedding_dim 1.0 +647 32 training.batch_size 2.0 +647 32 training.label_smoothing 0.012256335163454275 +647 33 model.embedding_dim 0.0 +647 33 training.batch_size 2.0 +647 33 training.label_smoothing 0.009536956911078646 +647 34 model.embedding_dim 2.0 +647 34 training.batch_size 0.0 +647 34 training.label_smoothing 0.014874770620343918 +647 35 model.embedding_dim 1.0 +647 35 training.batch_size 0.0 +647 35 training.label_smoothing 0.3610861811586056 +647 36 model.embedding_dim 1.0 +647 36 training.batch_size 1.0 +647 36 training.label_smoothing 0.011539014257852408 +647 37 model.embedding_dim 1.0 +647 37 training.batch_size 2.0 +647 37 training.label_smoothing 0.010226602303859769 +647 38 model.embedding_dim 1.0 +647 38 training.batch_size 2.0 +647 38 training.label_smoothing 0.553527263868715 +647 39 model.embedding_dim 2.0 +647 39 training.batch_size 2.0 +647 39 training.label_smoothing 0.0187446953418074 +647 40 model.embedding_dim 1.0 +647 40 training.batch_size 1.0 +647 40 training.label_smoothing 0.6381631613488198 +647 41 model.embedding_dim 1.0 +647 41 training.batch_size 0.0 +647 41 training.label_smoothing 0.3144205116521789 +647 42 model.embedding_dim 1.0 +647 42 training.batch_size 2.0 +647 42 training.label_smoothing 0.01362071665736922 +647 43 model.embedding_dim 2.0 +647 43 training.batch_size 0.0 +647 43 training.label_smoothing 0.022459787854549208 +647 44 model.embedding_dim 1.0 +647 44 training.batch_size 1.0 +647 44 training.label_smoothing 0.4290510482812108 +647 45 model.embedding_dim 1.0 +647 45 training.batch_size 1.0 +647 45 training.label_smoothing 0.4312045881659232 +647 46 model.embedding_dim 2.0 +647 46 training.batch_size 2.0 +647 46 training.label_smoothing 0.09352263014968944 +647 47 model.embedding_dim 1.0 +647 47 training.batch_size 1.0 +647 47 training.label_smoothing 0.7364779675354559 +647 48 model.embedding_dim 0.0 +647 48 training.batch_size 1.0 +647 48 training.label_smoothing 0.14906976466386218 +647 49 model.embedding_dim 1.0 +647 49 training.batch_size 1.0 +647 49 training.label_smoothing 0.16797239004982495 +647 50 model.embedding_dim 2.0 +647 50 training.batch_size 2.0 +647 50 training.label_smoothing 0.04150547637107905 +647 51 model.embedding_dim 2.0 +647 51 training.batch_size 1.0 +647 51 training.label_smoothing 0.5992653386626461 +647 52 model.embedding_dim 2.0 +647 52 training.batch_size 0.0 +647 52 training.label_smoothing 0.5868483238924993 +647 53 model.embedding_dim 2.0 +647 53 training.batch_size 2.0 +647 53 training.label_smoothing 0.16734748086412937 +647 54 model.embedding_dim 2.0 +647 54 training.batch_size 2.0 +647 54 training.label_smoothing 0.10024546244783479 +647 55 model.embedding_dim 2.0 +647 55 training.batch_size 2.0 +647 55 training.label_smoothing 0.12352274119764048 +647 56 model.embedding_dim 0.0 +647 56 training.batch_size 1.0 +647 56 training.label_smoothing 0.0323914991727008 +647 57 model.embedding_dim 2.0 +647 57 training.batch_size 1.0 +647 57 training.label_smoothing 0.7006313355045103 +647 58 model.embedding_dim 2.0 +647 58 training.batch_size 1.0 +647 58 training.label_smoothing 0.05251829442144814 +647 59 model.embedding_dim 2.0 +647 59 training.batch_size 1.0 +647 59 training.label_smoothing 0.023895744291401674 +647 60 model.embedding_dim 2.0 +647 60 training.batch_size 1.0 +647 60 training.label_smoothing 0.44315574629354004 +647 61 model.embedding_dim 0.0 +647 61 training.batch_size 1.0 +647 61 training.label_smoothing 0.011204040121313917 +647 62 model.embedding_dim 2.0 +647 62 training.batch_size 2.0 +647 62 training.label_smoothing 0.036297270478056524 +647 63 model.embedding_dim 0.0 +647 63 training.batch_size 1.0 +647 63 training.label_smoothing 0.2644329732056339 +647 64 model.embedding_dim 0.0 +647 64 training.batch_size 2.0 +647 64 training.label_smoothing 0.0024936488074878147 +647 65 model.embedding_dim 0.0 +647 65 training.batch_size 1.0 +647 65 training.label_smoothing 0.8114161162192731 +647 66 model.embedding_dim 0.0 +647 66 training.batch_size 0.0 +647 66 training.label_smoothing 0.574625091860875 +647 67 model.embedding_dim 1.0 +647 67 training.batch_size 2.0 +647 67 training.label_smoothing 0.02773238079315944 +647 68 model.embedding_dim 0.0 +647 68 training.batch_size 1.0 +647 68 training.label_smoothing 0.07535619496307132 +647 69 model.embedding_dim 1.0 +647 69 training.batch_size 2.0 +647 69 training.label_smoothing 0.001880787598106521 +647 70 model.embedding_dim 2.0 +647 70 training.batch_size 1.0 +647 70 training.label_smoothing 0.02853475395706392 +647 71 model.embedding_dim 2.0 +647 71 training.batch_size 0.0 +647 71 training.label_smoothing 0.010216034335770938 +647 72 model.embedding_dim 0.0 +647 72 training.batch_size 2.0 +647 72 training.label_smoothing 0.2088161753021959 +647 73 model.embedding_dim 1.0 +647 73 training.batch_size 1.0 +647 73 training.label_smoothing 0.038124669995749275 +647 74 model.embedding_dim 2.0 +647 74 training.batch_size 2.0 +647 74 training.label_smoothing 0.2515259138074168 +647 75 model.embedding_dim 0.0 +647 75 training.batch_size 2.0 +647 75 training.label_smoothing 0.10670200255402532 +647 76 model.embedding_dim 0.0 +647 76 training.batch_size 2.0 +647 76 training.label_smoothing 0.007568698587828305 +647 77 model.embedding_dim 2.0 +647 77 training.batch_size 0.0 +647 77 training.label_smoothing 0.9260794461971866 +647 78 model.embedding_dim 2.0 +647 78 training.batch_size 1.0 +647 78 training.label_smoothing 0.016840637864348862 +647 79 model.embedding_dim 2.0 +647 79 training.batch_size 1.0 +647 79 training.label_smoothing 0.00438385528491396 +647 80 model.embedding_dim 1.0 +647 80 training.batch_size 2.0 +647 80 training.label_smoothing 0.55055218442289 +647 81 model.embedding_dim 0.0 +647 81 training.batch_size 2.0 +647 81 training.label_smoothing 0.0011528427976304424 +647 82 model.embedding_dim 1.0 +647 82 training.batch_size 2.0 +647 82 training.label_smoothing 0.011563490045964537 +647 83 model.embedding_dim 1.0 +647 83 training.batch_size 1.0 +647 83 training.label_smoothing 0.004435083845551371 +647 84 model.embedding_dim 1.0 +647 84 training.batch_size 2.0 +647 84 training.label_smoothing 0.0014121758853479212 +647 85 model.embedding_dim 0.0 +647 85 training.batch_size 0.0 +647 85 training.label_smoothing 0.0011126095964720194 +647 86 model.embedding_dim 1.0 +647 86 training.batch_size 1.0 +647 86 training.label_smoothing 0.466555846403396 +647 87 model.embedding_dim 2.0 +647 87 training.batch_size 1.0 +647 87 training.label_smoothing 0.049845367665714325 +647 88 model.embedding_dim 0.0 +647 88 training.batch_size 1.0 +647 88 training.label_smoothing 0.21603620990364503 +647 89 model.embedding_dim 1.0 +647 89 training.batch_size 0.0 +647 89 training.label_smoothing 0.02744820134830055 +647 90 model.embedding_dim 0.0 +647 90 training.batch_size 2.0 +647 90 training.label_smoothing 0.4008942486566257 +647 91 model.embedding_dim 0.0 +647 91 training.batch_size 0.0 +647 91 training.label_smoothing 0.9361409126799272 +647 92 model.embedding_dim 1.0 +647 92 training.batch_size 1.0 +647 92 training.label_smoothing 0.003685651578467652 +647 93 model.embedding_dim 0.0 +647 93 training.batch_size 0.0 +647 93 training.label_smoothing 0.26605319801974925 +647 94 model.embedding_dim 1.0 +647 94 training.batch_size 0.0 +647 94 training.label_smoothing 0.0036110677233609635 +647 95 model.embedding_dim 1.0 +647 95 training.batch_size 2.0 +647 95 training.label_smoothing 0.003205960866907348 +647 96 model.embedding_dim 2.0 +647 96 training.batch_size 1.0 +647 96 training.label_smoothing 0.004443627638315994 +647 97 model.embedding_dim 1.0 +647 97 training.batch_size 1.0 +647 97 training.label_smoothing 0.061137380640890644 +647 98 model.embedding_dim 0.0 +647 98 training.batch_size 1.0 +647 98 training.label_smoothing 0.5228565189158546 +647 99 model.embedding_dim 1.0 +647 99 training.batch_size 1.0 +647 99 training.label_smoothing 0.0031576240993579324 +647 100 model.embedding_dim 0.0 +647 100 training.batch_size 0.0 +647 100 training.label_smoothing 0.11250865885099211 +647 1 dataset """kinships""" +647 1 model """simple""" +647 1 loss """bceaftersigmoid""" +647 1 regularizer """no""" +647 1 optimizer """adadelta""" +647 1 training_loop """lcwa""" +647 1 evaluator """rankbased""" +647 2 dataset """kinships""" +647 2 model """simple""" +647 2 loss """bceaftersigmoid""" +647 2 regularizer """no""" +647 2 optimizer """adadelta""" +647 2 training_loop """lcwa""" +647 2 evaluator """rankbased""" +647 3 dataset """kinships""" +647 3 model """simple""" +647 3 loss """bceaftersigmoid""" +647 3 regularizer """no""" +647 3 optimizer """adadelta""" +647 3 training_loop """lcwa""" +647 3 evaluator """rankbased""" +647 4 dataset """kinships""" +647 4 model """simple""" +647 4 loss """bceaftersigmoid""" +647 4 regularizer """no""" +647 4 optimizer """adadelta""" +647 4 training_loop """lcwa""" +647 4 evaluator """rankbased""" +647 5 dataset """kinships""" +647 5 model """simple""" +647 5 loss """bceaftersigmoid""" +647 5 regularizer """no""" +647 5 optimizer """adadelta""" +647 5 training_loop """lcwa""" +647 5 evaluator """rankbased""" +647 6 dataset """kinships""" +647 6 model """simple""" +647 6 loss """bceaftersigmoid""" +647 6 regularizer """no""" +647 6 optimizer """adadelta""" +647 6 training_loop """lcwa""" +647 6 evaluator """rankbased""" +647 7 dataset """kinships""" +647 7 model """simple""" +647 7 loss """bceaftersigmoid""" +647 7 regularizer """no""" +647 7 optimizer """adadelta""" +647 7 training_loop """lcwa""" +647 7 evaluator """rankbased""" +647 8 dataset """kinships""" +647 8 model """simple""" +647 8 loss """bceaftersigmoid""" +647 8 regularizer """no""" +647 8 optimizer """adadelta""" +647 8 training_loop """lcwa""" +647 8 evaluator """rankbased""" +647 9 dataset """kinships""" +647 9 model """simple""" +647 9 loss """bceaftersigmoid""" +647 9 regularizer """no""" +647 9 optimizer """adadelta""" +647 9 training_loop """lcwa""" +647 9 evaluator """rankbased""" +647 10 dataset """kinships""" +647 10 model """simple""" +647 10 loss """bceaftersigmoid""" +647 10 regularizer """no""" +647 10 optimizer """adadelta""" +647 10 training_loop """lcwa""" +647 10 evaluator """rankbased""" +647 11 dataset """kinships""" +647 11 model """simple""" +647 11 loss """bceaftersigmoid""" +647 11 regularizer """no""" +647 11 optimizer """adadelta""" +647 11 training_loop """lcwa""" +647 11 evaluator """rankbased""" +647 12 dataset """kinships""" +647 12 model """simple""" +647 12 loss """bceaftersigmoid""" +647 12 regularizer """no""" +647 12 optimizer """adadelta""" +647 12 training_loop """lcwa""" +647 12 evaluator """rankbased""" +647 13 dataset """kinships""" +647 13 model """simple""" +647 13 loss """bceaftersigmoid""" +647 13 regularizer """no""" +647 13 optimizer """adadelta""" +647 13 training_loop """lcwa""" +647 13 evaluator """rankbased""" +647 14 dataset """kinships""" +647 14 model """simple""" +647 14 loss """bceaftersigmoid""" +647 14 regularizer """no""" +647 14 optimizer """adadelta""" +647 14 training_loop """lcwa""" +647 14 evaluator """rankbased""" +647 15 dataset """kinships""" +647 15 model """simple""" +647 15 loss """bceaftersigmoid""" +647 15 regularizer """no""" +647 15 optimizer """adadelta""" +647 15 training_loop """lcwa""" +647 15 evaluator """rankbased""" +647 16 dataset """kinships""" +647 16 model """simple""" +647 16 loss """bceaftersigmoid""" +647 16 regularizer """no""" +647 16 optimizer """adadelta""" +647 16 training_loop """lcwa""" +647 16 evaluator """rankbased""" +647 17 dataset """kinships""" +647 17 model """simple""" +647 17 loss """bceaftersigmoid""" +647 17 regularizer """no""" +647 17 optimizer """adadelta""" +647 17 training_loop """lcwa""" +647 17 evaluator """rankbased""" +647 18 dataset """kinships""" +647 18 model """simple""" +647 18 loss """bceaftersigmoid""" +647 18 regularizer """no""" +647 18 optimizer """adadelta""" +647 18 training_loop """lcwa""" +647 18 evaluator """rankbased""" +647 19 dataset """kinships""" +647 19 model """simple""" +647 19 loss """bceaftersigmoid""" +647 19 regularizer """no""" +647 19 optimizer """adadelta""" +647 19 training_loop """lcwa""" +647 19 evaluator """rankbased""" +647 20 dataset """kinships""" +647 20 model """simple""" +647 20 loss """bceaftersigmoid""" +647 20 regularizer """no""" +647 20 optimizer """adadelta""" +647 20 training_loop """lcwa""" +647 20 evaluator """rankbased""" +647 21 dataset """kinships""" +647 21 model """simple""" +647 21 loss """bceaftersigmoid""" +647 21 regularizer """no""" +647 21 optimizer """adadelta""" +647 21 training_loop """lcwa""" +647 21 evaluator """rankbased""" +647 22 dataset """kinships""" +647 22 model """simple""" +647 22 loss """bceaftersigmoid""" +647 22 regularizer """no""" +647 22 optimizer """adadelta""" +647 22 training_loop """lcwa""" +647 22 evaluator """rankbased""" +647 23 dataset """kinships""" +647 23 model """simple""" +647 23 loss """bceaftersigmoid""" +647 23 regularizer """no""" +647 23 optimizer """adadelta""" +647 23 training_loop """lcwa""" +647 23 evaluator """rankbased""" +647 24 dataset """kinships""" +647 24 model """simple""" +647 24 loss """bceaftersigmoid""" +647 24 regularizer """no""" +647 24 optimizer """adadelta""" +647 24 training_loop """lcwa""" +647 24 evaluator """rankbased""" +647 25 dataset """kinships""" +647 25 model """simple""" +647 25 loss """bceaftersigmoid""" +647 25 regularizer """no""" +647 25 optimizer """adadelta""" +647 25 training_loop """lcwa""" +647 25 evaluator """rankbased""" +647 26 dataset """kinships""" +647 26 model """simple""" +647 26 loss """bceaftersigmoid""" +647 26 regularizer """no""" +647 26 optimizer """adadelta""" +647 26 training_loop """lcwa""" +647 26 evaluator """rankbased""" +647 27 dataset """kinships""" +647 27 model """simple""" +647 27 loss """bceaftersigmoid""" +647 27 regularizer """no""" +647 27 optimizer """adadelta""" +647 27 training_loop """lcwa""" +647 27 evaluator """rankbased""" +647 28 dataset """kinships""" +647 28 model """simple""" +647 28 loss """bceaftersigmoid""" +647 28 regularizer """no""" +647 28 optimizer """adadelta""" +647 28 training_loop """lcwa""" +647 28 evaluator """rankbased""" +647 29 dataset """kinships""" +647 29 model """simple""" +647 29 loss """bceaftersigmoid""" +647 29 regularizer """no""" +647 29 optimizer """adadelta""" +647 29 training_loop """lcwa""" +647 29 evaluator """rankbased""" +647 30 dataset """kinships""" +647 30 model """simple""" +647 30 loss """bceaftersigmoid""" +647 30 regularizer """no""" +647 30 optimizer """adadelta""" +647 30 training_loop """lcwa""" +647 30 evaluator """rankbased""" +647 31 dataset """kinships""" +647 31 model """simple""" +647 31 loss """bceaftersigmoid""" +647 31 regularizer """no""" +647 31 optimizer """adadelta""" +647 31 training_loop """lcwa""" +647 31 evaluator """rankbased""" +647 32 dataset """kinships""" +647 32 model """simple""" +647 32 loss """bceaftersigmoid""" +647 32 regularizer """no""" +647 32 optimizer """adadelta""" +647 32 training_loop """lcwa""" +647 32 evaluator """rankbased""" +647 33 dataset """kinships""" +647 33 model """simple""" +647 33 loss """bceaftersigmoid""" +647 33 regularizer """no""" +647 33 optimizer """adadelta""" +647 33 training_loop """lcwa""" +647 33 evaluator """rankbased""" +647 34 dataset """kinships""" +647 34 model """simple""" +647 34 loss """bceaftersigmoid""" +647 34 regularizer """no""" +647 34 optimizer """adadelta""" +647 34 training_loop """lcwa""" +647 34 evaluator """rankbased""" +647 35 dataset """kinships""" +647 35 model """simple""" +647 35 loss """bceaftersigmoid""" +647 35 regularizer """no""" +647 35 optimizer """adadelta""" +647 35 training_loop """lcwa""" +647 35 evaluator """rankbased""" +647 36 dataset """kinships""" +647 36 model """simple""" +647 36 loss """bceaftersigmoid""" +647 36 regularizer """no""" +647 36 optimizer """adadelta""" +647 36 training_loop """lcwa""" +647 36 evaluator """rankbased""" +647 37 dataset """kinships""" +647 37 model """simple""" +647 37 loss """bceaftersigmoid""" +647 37 regularizer """no""" +647 37 optimizer """adadelta""" +647 37 training_loop """lcwa""" +647 37 evaluator """rankbased""" +647 38 dataset """kinships""" +647 38 model """simple""" +647 38 loss """bceaftersigmoid""" +647 38 regularizer """no""" +647 38 optimizer """adadelta""" +647 38 training_loop """lcwa""" +647 38 evaluator """rankbased""" +647 39 dataset """kinships""" +647 39 model """simple""" +647 39 loss """bceaftersigmoid""" +647 39 regularizer """no""" +647 39 optimizer """adadelta""" +647 39 training_loop """lcwa""" +647 39 evaluator """rankbased""" +647 40 dataset """kinships""" +647 40 model """simple""" +647 40 loss """bceaftersigmoid""" +647 40 regularizer """no""" +647 40 optimizer """adadelta""" +647 40 training_loop """lcwa""" +647 40 evaluator """rankbased""" +647 41 dataset """kinships""" +647 41 model """simple""" +647 41 loss """bceaftersigmoid""" +647 41 regularizer """no""" +647 41 optimizer """adadelta""" +647 41 training_loop """lcwa""" +647 41 evaluator """rankbased""" +647 42 dataset """kinships""" +647 42 model """simple""" +647 42 loss """bceaftersigmoid""" +647 42 regularizer """no""" +647 42 optimizer """adadelta""" +647 42 training_loop """lcwa""" +647 42 evaluator """rankbased""" +647 43 dataset """kinships""" +647 43 model """simple""" +647 43 loss """bceaftersigmoid""" +647 43 regularizer """no""" +647 43 optimizer """adadelta""" +647 43 training_loop """lcwa""" +647 43 evaluator """rankbased""" +647 44 dataset """kinships""" +647 44 model """simple""" +647 44 loss """bceaftersigmoid""" +647 44 regularizer """no""" +647 44 optimizer """adadelta""" +647 44 training_loop """lcwa""" +647 44 evaluator """rankbased""" +647 45 dataset """kinships""" +647 45 model """simple""" +647 45 loss """bceaftersigmoid""" +647 45 regularizer """no""" +647 45 optimizer """adadelta""" +647 45 training_loop """lcwa""" +647 45 evaluator """rankbased""" +647 46 dataset """kinships""" +647 46 model """simple""" +647 46 loss """bceaftersigmoid""" +647 46 regularizer """no""" +647 46 optimizer """adadelta""" +647 46 training_loop """lcwa""" +647 46 evaluator """rankbased""" +647 47 dataset """kinships""" +647 47 model """simple""" +647 47 loss """bceaftersigmoid""" +647 47 regularizer """no""" +647 47 optimizer """adadelta""" +647 47 training_loop """lcwa""" +647 47 evaluator """rankbased""" +647 48 dataset """kinships""" +647 48 model """simple""" +647 48 loss """bceaftersigmoid""" +647 48 regularizer """no""" +647 48 optimizer """adadelta""" +647 48 training_loop """lcwa""" +647 48 evaluator """rankbased""" +647 49 dataset """kinships""" +647 49 model """simple""" +647 49 loss """bceaftersigmoid""" +647 49 regularizer """no""" +647 49 optimizer """adadelta""" +647 49 training_loop """lcwa""" +647 49 evaluator """rankbased""" +647 50 dataset """kinships""" +647 50 model """simple""" +647 50 loss """bceaftersigmoid""" +647 50 regularizer """no""" +647 50 optimizer """adadelta""" +647 50 training_loop """lcwa""" +647 50 evaluator """rankbased""" +647 51 dataset """kinships""" +647 51 model """simple""" +647 51 loss """bceaftersigmoid""" +647 51 regularizer """no""" +647 51 optimizer """adadelta""" +647 51 training_loop """lcwa""" +647 51 evaluator """rankbased""" +647 52 dataset """kinships""" +647 52 model """simple""" +647 52 loss """bceaftersigmoid""" +647 52 regularizer """no""" +647 52 optimizer """adadelta""" +647 52 training_loop """lcwa""" +647 52 evaluator """rankbased""" +647 53 dataset """kinships""" +647 53 model """simple""" +647 53 loss """bceaftersigmoid""" +647 53 regularizer """no""" +647 53 optimizer """adadelta""" +647 53 training_loop """lcwa""" +647 53 evaluator """rankbased""" +647 54 dataset """kinships""" +647 54 model """simple""" +647 54 loss """bceaftersigmoid""" +647 54 regularizer """no""" +647 54 optimizer """adadelta""" +647 54 training_loop """lcwa""" +647 54 evaluator """rankbased""" +647 55 dataset """kinships""" +647 55 model """simple""" +647 55 loss """bceaftersigmoid""" +647 55 regularizer """no""" +647 55 optimizer """adadelta""" +647 55 training_loop """lcwa""" +647 55 evaluator """rankbased""" +647 56 dataset """kinships""" +647 56 model """simple""" +647 56 loss """bceaftersigmoid""" +647 56 regularizer """no""" +647 56 optimizer """adadelta""" +647 56 training_loop """lcwa""" +647 56 evaluator """rankbased""" +647 57 dataset """kinships""" +647 57 model """simple""" +647 57 loss """bceaftersigmoid""" +647 57 regularizer """no""" +647 57 optimizer """adadelta""" +647 57 training_loop """lcwa""" +647 57 evaluator """rankbased""" +647 58 dataset """kinships""" +647 58 model """simple""" +647 58 loss """bceaftersigmoid""" +647 58 regularizer """no""" +647 58 optimizer """adadelta""" +647 58 training_loop """lcwa""" +647 58 evaluator """rankbased""" +647 59 dataset """kinships""" +647 59 model """simple""" +647 59 loss """bceaftersigmoid""" +647 59 regularizer """no""" +647 59 optimizer """adadelta""" +647 59 training_loop """lcwa""" +647 59 evaluator """rankbased""" +647 60 dataset """kinships""" +647 60 model """simple""" +647 60 loss """bceaftersigmoid""" +647 60 regularizer """no""" +647 60 optimizer """adadelta""" +647 60 training_loop """lcwa""" +647 60 evaluator """rankbased""" +647 61 dataset """kinships""" +647 61 model """simple""" +647 61 loss """bceaftersigmoid""" +647 61 regularizer """no""" +647 61 optimizer """adadelta""" +647 61 training_loop """lcwa""" +647 61 evaluator """rankbased""" +647 62 dataset """kinships""" +647 62 model """simple""" +647 62 loss """bceaftersigmoid""" +647 62 regularizer """no""" +647 62 optimizer """adadelta""" +647 62 training_loop """lcwa""" +647 62 evaluator """rankbased""" +647 63 dataset """kinships""" +647 63 model """simple""" +647 63 loss """bceaftersigmoid""" +647 63 regularizer """no""" +647 63 optimizer """adadelta""" +647 63 training_loop """lcwa""" +647 63 evaluator """rankbased""" +647 64 dataset """kinships""" +647 64 model """simple""" +647 64 loss """bceaftersigmoid""" +647 64 regularizer """no""" +647 64 optimizer """adadelta""" +647 64 training_loop """lcwa""" +647 64 evaluator """rankbased""" +647 65 dataset """kinships""" +647 65 model """simple""" +647 65 loss """bceaftersigmoid""" +647 65 regularizer """no""" +647 65 optimizer """adadelta""" +647 65 training_loop """lcwa""" +647 65 evaluator """rankbased""" +647 66 dataset """kinships""" +647 66 model """simple""" +647 66 loss """bceaftersigmoid""" +647 66 regularizer """no""" +647 66 optimizer """adadelta""" +647 66 training_loop """lcwa""" +647 66 evaluator """rankbased""" +647 67 dataset """kinships""" +647 67 model """simple""" +647 67 loss """bceaftersigmoid""" +647 67 regularizer """no""" +647 67 optimizer """adadelta""" +647 67 training_loop """lcwa""" +647 67 evaluator """rankbased""" +647 68 dataset """kinships""" +647 68 model """simple""" +647 68 loss """bceaftersigmoid""" +647 68 regularizer """no""" +647 68 optimizer """adadelta""" +647 68 training_loop """lcwa""" +647 68 evaluator """rankbased""" +647 69 dataset """kinships""" +647 69 model """simple""" +647 69 loss """bceaftersigmoid""" +647 69 regularizer """no""" +647 69 optimizer """adadelta""" +647 69 training_loop """lcwa""" +647 69 evaluator """rankbased""" +647 70 dataset """kinships""" +647 70 model """simple""" +647 70 loss """bceaftersigmoid""" +647 70 regularizer """no""" +647 70 optimizer """adadelta""" +647 70 training_loop """lcwa""" +647 70 evaluator """rankbased""" +647 71 dataset """kinships""" +647 71 model """simple""" +647 71 loss """bceaftersigmoid""" +647 71 regularizer """no""" +647 71 optimizer """adadelta""" +647 71 training_loop """lcwa""" +647 71 evaluator """rankbased""" +647 72 dataset """kinships""" +647 72 model """simple""" +647 72 loss """bceaftersigmoid""" +647 72 regularizer """no""" +647 72 optimizer """adadelta""" +647 72 training_loop """lcwa""" +647 72 evaluator """rankbased""" +647 73 dataset """kinships""" +647 73 model """simple""" +647 73 loss """bceaftersigmoid""" +647 73 regularizer """no""" +647 73 optimizer """adadelta""" +647 73 training_loop """lcwa""" +647 73 evaluator """rankbased""" +647 74 dataset """kinships""" +647 74 model """simple""" +647 74 loss """bceaftersigmoid""" +647 74 regularizer """no""" +647 74 optimizer """adadelta""" +647 74 training_loop """lcwa""" +647 74 evaluator """rankbased""" +647 75 dataset """kinships""" +647 75 model """simple""" +647 75 loss """bceaftersigmoid""" +647 75 regularizer """no""" +647 75 optimizer """adadelta""" +647 75 training_loop """lcwa""" +647 75 evaluator """rankbased""" +647 76 dataset """kinships""" +647 76 model """simple""" +647 76 loss """bceaftersigmoid""" +647 76 regularizer """no""" +647 76 optimizer """adadelta""" +647 76 training_loop """lcwa""" +647 76 evaluator """rankbased""" +647 77 dataset """kinships""" +647 77 model """simple""" +647 77 loss """bceaftersigmoid""" +647 77 regularizer """no""" +647 77 optimizer """adadelta""" +647 77 training_loop """lcwa""" +647 77 evaluator """rankbased""" +647 78 dataset """kinships""" +647 78 model """simple""" +647 78 loss """bceaftersigmoid""" +647 78 regularizer """no""" +647 78 optimizer """adadelta""" +647 78 training_loop """lcwa""" +647 78 evaluator """rankbased""" +647 79 dataset """kinships""" +647 79 model """simple""" +647 79 loss """bceaftersigmoid""" +647 79 regularizer """no""" +647 79 optimizer """adadelta""" +647 79 training_loop """lcwa""" +647 79 evaluator """rankbased""" +647 80 dataset """kinships""" +647 80 model """simple""" +647 80 loss """bceaftersigmoid""" +647 80 regularizer """no""" +647 80 optimizer """adadelta""" +647 80 training_loop """lcwa""" +647 80 evaluator """rankbased""" +647 81 dataset """kinships""" +647 81 model """simple""" +647 81 loss """bceaftersigmoid""" +647 81 regularizer """no""" +647 81 optimizer """adadelta""" +647 81 training_loop """lcwa""" +647 81 evaluator """rankbased""" +647 82 dataset """kinships""" +647 82 model """simple""" +647 82 loss """bceaftersigmoid""" +647 82 regularizer """no""" +647 82 optimizer """adadelta""" +647 82 training_loop """lcwa""" +647 82 evaluator """rankbased""" +647 83 dataset """kinships""" +647 83 model """simple""" +647 83 loss """bceaftersigmoid""" +647 83 regularizer """no""" +647 83 optimizer """adadelta""" +647 83 training_loop """lcwa""" +647 83 evaluator """rankbased""" +647 84 dataset """kinships""" +647 84 model """simple""" +647 84 loss """bceaftersigmoid""" +647 84 regularizer """no""" +647 84 optimizer """adadelta""" +647 84 training_loop """lcwa""" +647 84 evaluator """rankbased""" +647 85 dataset """kinships""" +647 85 model """simple""" +647 85 loss """bceaftersigmoid""" +647 85 regularizer """no""" +647 85 optimizer """adadelta""" +647 85 training_loop """lcwa""" +647 85 evaluator """rankbased""" +647 86 dataset """kinships""" +647 86 model """simple""" +647 86 loss """bceaftersigmoid""" +647 86 regularizer """no""" +647 86 optimizer """adadelta""" +647 86 training_loop """lcwa""" +647 86 evaluator """rankbased""" +647 87 dataset """kinships""" +647 87 model """simple""" +647 87 loss """bceaftersigmoid""" +647 87 regularizer """no""" +647 87 optimizer """adadelta""" +647 87 training_loop """lcwa""" +647 87 evaluator """rankbased""" +647 88 dataset """kinships""" +647 88 model """simple""" +647 88 loss """bceaftersigmoid""" +647 88 regularizer """no""" +647 88 optimizer """adadelta""" +647 88 training_loop """lcwa""" +647 88 evaluator """rankbased""" +647 89 dataset """kinships""" +647 89 model """simple""" +647 89 loss """bceaftersigmoid""" +647 89 regularizer """no""" +647 89 optimizer """adadelta""" +647 89 training_loop """lcwa""" +647 89 evaluator """rankbased""" +647 90 dataset """kinships""" +647 90 model """simple""" +647 90 loss """bceaftersigmoid""" +647 90 regularizer """no""" +647 90 optimizer """adadelta""" +647 90 training_loop """lcwa""" +647 90 evaluator """rankbased""" +647 91 dataset """kinships""" +647 91 model """simple""" +647 91 loss """bceaftersigmoid""" +647 91 regularizer """no""" +647 91 optimizer """adadelta""" +647 91 training_loop """lcwa""" +647 91 evaluator """rankbased""" +647 92 dataset """kinships""" +647 92 model """simple""" +647 92 loss """bceaftersigmoid""" +647 92 regularizer """no""" +647 92 optimizer """adadelta""" +647 92 training_loop """lcwa""" +647 92 evaluator """rankbased""" +647 93 dataset """kinships""" +647 93 model """simple""" +647 93 loss """bceaftersigmoid""" +647 93 regularizer """no""" +647 93 optimizer """adadelta""" +647 93 training_loop """lcwa""" +647 93 evaluator """rankbased""" +647 94 dataset """kinships""" +647 94 model """simple""" +647 94 loss """bceaftersigmoid""" +647 94 regularizer """no""" +647 94 optimizer """adadelta""" +647 94 training_loop """lcwa""" +647 94 evaluator """rankbased""" +647 95 dataset """kinships""" +647 95 model """simple""" +647 95 loss """bceaftersigmoid""" +647 95 regularizer """no""" +647 95 optimizer """adadelta""" +647 95 training_loop """lcwa""" +647 95 evaluator """rankbased""" +647 96 dataset """kinships""" +647 96 model """simple""" +647 96 loss """bceaftersigmoid""" +647 96 regularizer """no""" +647 96 optimizer """adadelta""" +647 96 training_loop """lcwa""" +647 96 evaluator """rankbased""" +647 97 dataset """kinships""" +647 97 model """simple""" +647 97 loss """bceaftersigmoid""" +647 97 regularizer """no""" +647 97 optimizer """adadelta""" +647 97 training_loop """lcwa""" +647 97 evaluator """rankbased""" +647 98 dataset """kinships""" +647 98 model """simple""" +647 98 loss """bceaftersigmoid""" +647 98 regularizer """no""" +647 98 optimizer """adadelta""" +647 98 training_loop """lcwa""" +647 98 evaluator """rankbased""" +647 99 dataset """kinships""" +647 99 model """simple""" +647 99 loss """bceaftersigmoid""" +647 99 regularizer """no""" +647 99 optimizer """adadelta""" +647 99 training_loop """lcwa""" +647 99 evaluator """rankbased""" +647 100 dataset """kinships""" +647 100 model """simple""" +647 100 loss """bceaftersigmoid""" +647 100 regularizer """no""" +647 100 optimizer """adadelta""" +647 100 training_loop """lcwa""" +647 100 evaluator """rankbased""" +648 1 model.embedding_dim 0.0 +648 1 training.batch_size 1.0 +648 1 training.label_smoothing 0.03987225734814041 +648 2 model.embedding_dim 0.0 +648 2 training.batch_size 0.0 +648 2 training.label_smoothing 0.016796503190721575 +648 3 model.embedding_dim 1.0 +648 3 training.batch_size 2.0 +648 3 training.label_smoothing 0.04469849223931036 +648 4 model.embedding_dim 0.0 +648 4 training.batch_size 1.0 +648 4 training.label_smoothing 0.5392151300369106 +648 5 model.embedding_dim 2.0 +648 5 training.batch_size 1.0 +648 5 training.label_smoothing 0.3296432448955352 +648 6 model.embedding_dim 1.0 +648 6 training.batch_size 1.0 +648 6 training.label_smoothing 0.0031535140690852363 +648 7 model.embedding_dim 0.0 +648 7 training.batch_size 2.0 +648 7 training.label_smoothing 0.02281827963975063 +648 8 model.embedding_dim 0.0 +648 8 training.batch_size 2.0 +648 8 training.label_smoothing 0.002937612247692925 +648 9 model.embedding_dim 2.0 +648 9 training.batch_size 1.0 +648 9 training.label_smoothing 0.0013823447823096261 +648 10 model.embedding_dim 1.0 +648 10 training.batch_size 1.0 +648 10 training.label_smoothing 0.021967798993419236 +648 11 model.embedding_dim 2.0 +648 11 training.batch_size 0.0 +648 11 training.label_smoothing 0.6042826974327745 +648 12 model.embedding_dim 2.0 +648 12 training.batch_size 2.0 +648 12 training.label_smoothing 0.0010175967893403257 +648 13 model.embedding_dim 1.0 +648 13 training.batch_size 1.0 +648 13 training.label_smoothing 0.012320262112707517 +648 14 model.embedding_dim 2.0 +648 14 training.batch_size 1.0 +648 14 training.label_smoothing 0.009782126582989837 +648 15 model.embedding_dim 0.0 +648 15 training.batch_size 2.0 +648 15 training.label_smoothing 0.0921329346783621 +648 16 model.embedding_dim 2.0 +648 16 training.batch_size 1.0 +648 16 training.label_smoothing 0.00637256778019146 +648 17 model.embedding_dim 1.0 +648 17 training.batch_size 0.0 +648 17 training.label_smoothing 0.036311326053808636 +648 18 model.embedding_dim 2.0 +648 18 training.batch_size 1.0 +648 18 training.label_smoothing 0.004797525761553371 +648 19 model.embedding_dim 2.0 +648 19 training.batch_size 2.0 +648 19 training.label_smoothing 0.06624772886387433 +648 20 model.embedding_dim 2.0 +648 20 training.batch_size 1.0 +648 20 training.label_smoothing 0.8437535673295207 +648 21 model.embedding_dim 1.0 +648 21 training.batch_size 2.0 +648 21 training.label_smoothing 0.02143405133954446 +648 22 model.embedding_dim 0.0 +648 22 training.batch_size 1.0 +648 22 training.label_smoothing 0.0021043800886083423 +648 23 model.embedding_dim 0.0 +648 23 training.batch_size 1.0 +648 23 training.label_smoothing 0.0012000379042779127 +648 24 model.embedding_dim 2.0 +648 24 training.batch_size 2.0 +648 24 training.label_smoothing 0.0016700165423442525 +648 25 model.embedding_dim 1.0 +648 25 training.batch_size 2.0 +648 25 training.label_smoothing 0.02362769694704151 +648 26 model.embedding_dim 1.0 +648 26 training.batch_size 1.0 +648 26 training.label_smoothing 0.008077038507113153 +648 27 model.embedding_dim 0.0 +648 27 training.batch_size 1.0 +648 27 training.label_smoothing 0.003315910346774342 +648 28 model.embedding_dim 1.0 +648 28 training.batch_size 1.0 +648 28 training.label_smoothing 0.00536102956415559 +648 29 model.embedding_dim 2.0 +648 29 training.batch_size 1.0 +648 29 training.label_smoothing 0.004688173088647032 +648 30 model.embedding_dim 1.0 +648 30 training.batch_size 1.0 +648 30 training.label_smoothing 0.004358113825567097 +648 31 model.embedding_dim 1.0 +648 31 training.batch_size 2.0 +648 31 training.label_smoothing 0.6561893090922093 +648 32 model.embedding_dim 2.0 +648 32 training.batch_size 0.0 +648 32 training.label_smoothing 0.01505313945507374 +648 33 model.embedding_dim 2.0 +648 33 training.batch_size 0.0 +648 33 training.label_smoothing 0.0203119621011793 +648 34 model.embedding_dim 2.0 +648 34 training.batch_size 2.0 +648 34 training.label_smoothing 0.004633053511506905 +648 35 model.embedding_dim 0.0 +648 35 training.batch_size 0.0 +648 35 training.label_smoothing 0.004581875200700582 +648 36 model.embedding_dim 1.0 +648 36 training.batch_size 2.0 +648 36 training.label_smoothing 0.01437109600587551 +648 37 model.embedding_dim 0.0 +648 37 training.batch_size 1.0 +648 37 training.label_smoothing 0.06904230178871032 +648 38 model.embedding_dim 2.0 +648 38 training.batch_size 0.0 +648 38 training.label_smoothing 0.23684219828406206 +648 39 model.embedding_dim 1.0 +648 39 training.batch_size 1.0 +648 39 training.label_smoothing 0.3246552385817267 +648 40 model.embedding_dim 1.0 +648 40 training.batch_size 2.0 +648 40 training.label_smoothing 0.07978774489286539 +648 41 model.embedding_dim 1.0 +648 41 training.batch_size 1.0 +648 41 training.label_smoothing 0.044709195046468954 +648 42 model.embedding_dim 0.0 +648 42 training.batch_size 2.0 +648 42 training.label_smoothing 0.03262920441183987 +648 43 model.embedding_dim 1.0 +648 43 training.batch_size 0.0 +648 43 training.label_smoothing 0.013028836558690407 +648 44 model.embedding_dim 0.0 +648 44 training.batch_size 0.0 +648 44 training.label_smoothing 0.9046564341344475 +648 45 model.embedding_dim 0.0 +648 45 training.batch_size 1.0 +648 45 training.label_smoothing 0.30550746136650464 +648 46 model.embedding_dim 1.0 +648 46 training.batch_size 0.0 +648 46 training.label_smoothing 0.0016743419084714124 +648 47 model.embedding_dim 1.0 +648 47 training.batch_size 1.0 +648 47 training.label_smoothing 0.22488879864464767 +648 48 model.embedding_dim 1.0 +648 48 training.batch_size 1.0 +648 48 training.label_smoothing 0.0011714463402490679 +648 49 model.embedding_dim 0.0 +648 49 training.batch_size 0.0 +648 49 training.label_smoothing 0.05519194646169171 +648 50 model.embedding_dim 2.0 +648 50 training.batch_size 0.0 +648 50 training.label_smoothing 0.0023945497297944724 +648 51 model.embedding_dim 2.0 +648 51 training.batch_size 0.0 +648 51 training.label_smoothing 0.24620707012182652 +648 52 model.embedding_dim 1.0 +648 52 training.batch_size 0.0 +648 52 training.label_smoothing 0.07603018226807029 +648 53 model.embedding_dim 2.0 +648 53 training.batch_size 0.0 +648 53 training.label_smoothing 0.0016148351988295125 +648 54 model.embedding_dim 1.0 +648 54 training.batch_size 0.0 +648 54 training.label_smoothing 0.013843984799048793 +648 55 model.embedding_dim 2.0 +648 55 training.batch_size 0.0 +648 55 training.label_smoothing 0.3064591702660773 +648 56 model.embedding_dim 1.0 +648 56 training.batch_size 0.0 +648 56 training.label_smoothing 0.2517680702879622 +648 57 model.embedding_dim 0.0 +648 57 training.batch_size 1.0 +648 57 training.label_smoothing 0.14965248068455148 +648 58 model.embedding_dim 0.0 +648 58 training.batch_size 0.0 +648 58 training.label_smoothing 0.012733056451324309 +648 59 model.embedding_dim 1.0 +648 59 training.batch_size 1.0 +648 59 training.label_smoothing 0.21969017923169867 +648 60 model.embedding_dim 2.0 +648 60 training.batch_size 2.0 +648 60 training.label_smoothing 0.019998960837580616 +648 61 model.embedding_dim 0.0 +648 61 training.batch_size 1.0 +648 61 training.label_smoothing 0.0035094341465862636 +648 62 model.embedding_dim 2.0 +648 62 training.batch_size 1.0 +648 62 training.label_smoothing 0.0025413225865751434 +648 63 model.embedding_dim 2.0 +648 63 training.batch_size 2.0 +648 63 training.label_smoothing 0.5045362685721647 +648 64 model.embedding_dim 1.0 +648 64 training.batch_size 2.0 +648 64 training.label_smoothing 0.006130755461456786 +648 65 model.embedding_dim 2.0 +648 65 training.batch_size 0.0 +648 65 training.label_smoothing 0.8977501833051151 +648 66 model.embedding_dim 1.0 +648 66 training.batch_size 1.0 +648 66 training.label_smoothing 0.799222695291775 +648 67 model.embedding_dim 0.0 +648 67 training.batch_size 0.0 +648 67 training.label_smoothing 0.017078506542113427 +648 68 model.embedding_dim 1.0 +648 68 training.batch_size 2.0 +648 68 training.label_smoothing 0.16935704827759004 +648 69 model.embedding_dim 1.0 +648 69 training.batch_size 1.0 +648 69 training.label_smoothing 0.9337924067920488 +648 70 model.embedding_dim 2.0 +648 70 training.batch_size 2.0 +648 70 training.label_smoothing 0.022007182843889275 +648 71 model.embedding_dim 1.0 +648 71 training.batch_size 0.0 +648 71 training.label_smoothing 0.3014248669364087 +648 72 model.embedding_dim 2.0 +648 72 training.batch_size 0.0 +648 72 training.label_smoothing 0.07604234490940183 +648 73 model.embedding_dim 2.0 +648 73 training.batch_size 1.0 +648 73 training.label_smoothing 0.009271881120681402 +648 74 model.embedding_dim 2.0 +648 74 training.batch_size 2.0 +648 74 training.label_smoothing 0.02338192022599414 +648 75 model.embedding_dim 1.0 +648 75 training.batch_size 2.0 +648 75 training.label_smoothing 0.003448014728303772 +648 76 model.embedding_dim 0.0 +648 76 training.batch_size 2.0 +648 76 training.label_smoothing 0.003491609432367428 +648 77 model.embedding_dim 1.0 +648 77 training.batch_size 2.0 +648 77 training.label_smoothing 0.6660152520010937 +648 78 model.embedding_dim 1.0 +648 78 training.batch_size 2.0 +648 78 training.label_smoothing 0.5566553456717421 +648 79 model.embedding_dim 2.0 +648 79 training.batch_size 1.0 +648 79 training.label_smoothing 0.037520869672100596 +648 80 model.embedding_dim 0.0 +648 80 training.batch_size 1.0 +648 80 training.label_smoothing 0.675645804965987 +648 81 model.embedding_dim 1.0 +648 81 training.batch_size 2.0 +648 81 training.label_smoothing 0.8905310007365339 +648 82 model.embedding_dim 1.0 +648 82 training.batch_size 2.0 +648 82 training.label_smoothing 0.3427789908131618 +648 83 model.embedding_dim 1.0 +648 83 training.batch_size 0.0 +648 83 training.label_smoothing 0.015362921945386914 +648 84 model.embedding_dim 1.0 +648 84 training.batch_size 2.0 +648 84 training.label_smoothing 0.9420847739260977 +648 85 model.embedding_dim 2.0 +648 85 training.batch_size 2.0 +648 85 training.label_smoothing 0.04393898735535624 +648 86 model.embedding_dim 2.0 +648 86 training.batch_size 0.0 +648 86 training.label_smoothing 0.00619344523767649 +648 87 model.embedding_dim 0.0 +648 87 training.batch_size 1.0 +648 87 training.label_smoothing 0.02760558451972338 +648 88 model.embedding_dim 2.0 +648 88 training.batch_size 2.0 +648 88 training.label_smoothing 0.010840306688961115 +648 89 model.embedding_dim 0.0 +648 89 training.batch_size 0.0 +648 89 training.label_smoothing 0.04686738314515007 +648 90 model.embedding_dim 2.0 +648 90 training.batch_size 2.0 +648 90 training.label_smoothing 0.18256090356892127 +648 91 model.embedding_dim 1.0 +648 91 training.batch_size 0.0 +648 91 training.label_smoothing 0.6640164865045448 +648 92 model.embedding_dim 2.0 +648 92 training.batch_size 1.0 +648 92 training.label_smoothing 0.18804325468717767 +648 93 model.embedding_dim 1.0 +648 93 training.batch_size 1.0 +648 93 training.label_smoothing 0.05665777866964988 +648 94 model.embedding_dim 0.0 +648 94 training.batch_size 2.0 +648 94 training.label_smoothing 0.0021124313052774063 +648 95 model.embedding_dim 2.0 +648 95 training.batch_size 0.0 +648 95 training.label_smoothing 0.01922946790127755 +648 96 model.embedding_dim 1.0 +648 96 training.batch_size 1.0 +648 96 training.label_smoothing 0.10109587328604731 +648 97 model.embedding_dim 2.0 +648 97 training.batch_size 1.0 +648 97 training.label_smoothing 0.023929369025505486 +648 98 model.embedding_dim 2.0 +648 98 training.batch_size 1.0 +648 98 training.label_smoothing 0.12779020366445348 +648 99 model.embedding_dim 2.0 +648 99 training.batch_size 1.0 +648 99 training.label_smoothing 0.05243555264924189 +648 100 model.embedding_dim 0.0 +648 100 training.batch_size 2.0 +648 100 training.label_smoothing 0.019417834416201114 +648 1 dataset """kinships""" +648 1 model """simple""" +648 1 loss """softplus""" +648 1 regularizer """no""" +648 1 optimizer """adadelta""" +648 1 training_loop """lcwa""" +648 1 evaluator """rankbased""" +648 2 dataset """kinships""" +648 2 model """simple""" +648 2 loss """softplus""" +648 2 regularizer """no""" +648 2 optimizer """adadelta""" +648 2 training_loop """lcwa""" +648 2 evaluator """rankbased""" +648 3 dataset """kinships""" +648 3 model """simple""" +648 3 loss """softplus""" +648 3 regularizer """no""" +648 3 optimizer """adadelta""" +648 3 training_loop """lcwa""" +648 3 evaluator """rankbased""" +648 4 dataset """kinships""" +648 4 model """simple""" +648 4 loss """softplus""" +648 4 regularizer """no""" +648 4 optimizer """adadelta""" +648 4 training_loop """lcwa""" +648 4 evaluator """rankbased""" +648 5 dataset """kinships""" +648 5 model """simple""" +648 5 loss """softplus""" +648 5 regularizer """no""" +648 5 optimizer """adadelta""" +648 5 training_loop """lcwa""" +648 5 evaluator """rankbased""" +648 6 dataset """kinships""" +648 6 model """simple""" +648 6 loss """softplus""" +648 6 regularizer """no""" +648 6 optimizer """adadelta""" +648 6 training_loop """lcwa""" +648 6 evaluator """rankbased""" +648 7 dataset """kinships""" +648 7 model """simple""" +648 7 loss """softplus""" +648 7 regularizer """no""" +648 7 optimizer """adadelta""" +648 7 training_loop """lcwa""" +648 7 evaluator """rankbased""" +648 8 dataset """kinships""" +648 8 model """simple""" +648 8 loss """softplus""" +648 8 regularizer """no""" +648 8 optimizer """adadelta""" +648 8 training_loop """lcwa""" +648 8 evaluator """rankbased""" +648 9 dataset """kinships""" +648 9 model """simple""" +648 9 loss """softplus""" +648 9 regularizer """no""" +648 9 optimizer """adadelta""" +648 9 training_loop """lcwa""" +648 9 evaluator """rankbased""" +648 10 dataset """kinships""" +648 10 model """simple""" +648 10 loss """softplus""" +648 10 regularizer """no""" +648 10 optimizer """adadelta""" +648 10 training_loop """lcwa""" +648 10 evaluator """rankbased""" +648 11 dataset """kinships""" +648 11 model """simple""" +648 11 loss """softplus""" +648 11 regularizer """no""" +648 11 optimizer """adadelta""" +648 11 training_loop """lcwa""" +648 11 evaluator """rankbased""" +648 12 dataset """kinships""" +648 12 model """simple""" +648 12 loss """softplus""" +648 12 regularizer """no""" +648 12 optimizer """adadelta""" +648 12 training_loop """lcwa""" +648 12 evaluator """rankbased""" +648 13 dataset """kinships""" +648 13 model """simple""" +648 13 loss """softplus""" +648 13 regularizer """no""" +648 13 optimizer """adadelta""" +648 13 training_loop """lcwa""" +648 13 evaluator """rankbased""" +648 14 dataset """kinships""" +648 14 model """simple""" +648 14 loss """softplus""" +648 14 regularizer """no""" +648 14 optimizer """adadelta""" +648 14 training_loop """lcwa""" +648 14 evaluator """rankbased""" +648 15 dataset """kinships""" +648 15 model """simple""" +648 15 loss """softplus""" +648 15 regularizer """no""" +648 15 optimizer """adadelta""" +648 15 training_loop """lcwa""" +648 15 evaluator """rankbased""" +648 16 dataset """kinships""" +648 16 model """simple""" +648 16 loss """softplus""" +648 16 regularizer """no""" +648 16 optimizer """adadelta""" +648 16 training_loop """lcwa""" +648 16 evaluator """rankbased""" +648 17 dataset """kinships""" +648 17 model """simple""" +648 17 loss """softplus""" +648 17 regularizer """no""" +648 17 optimizer """adadelta""" +648 17 training_loop """lcwa""" +648 17 evaluator """rankbased""" +648 18 dataset """kinships""" +648 18 model """simple""" +648 18 loss """softplus""" +648 18 regularizer """no""" +648 18 optimizer """adadelta""" +648 18 training_loop """lcwa""" +648 18 evaluator """rankbased""" +648 19 dataset """kinships""" +648 19 model """simple""" +648 19 loss """softplus""" +648 19 regularizer """no""" +648 19 optimizer """adadelta""" +648 19 training_loop """lcwa""" +648 19 evaluator """rankbased""" +648 20 dataset """kinships""" +648 20 model """simple""" +648 20 loss """softplus""" +648 20 regularizer """no""" +648 20 optimizer """adadelta""" +648 20 training_loop """lcwa""" +648 20 evaluator """rankbased""" +648 21 dataset """kinships""" +648 21 model """simple""" +648 21 loss """softplus""" +648 21 regularizer """no""" +648 21 optimizer """adadelta""" +648 21 training_loop """lcwa""" +648 21 evaluator """rankbased""" +648 22 dataset """kinships""" +648 22 model """simple""" +648 22 loss """softplus""" +648 22 regularizer """no""" +648 22 optimizer """adadelta""" +648 22 training_loop """lcwa""" +648 22 evaluator """rankbased""" +648 23 dataset """kinships""" +648 23 model """simple""" +648 23 loss """softplus""" +648 23 regularizer """no""" +648 23 optimizer """adadelta""" +648 23 training_loop """lcwa""" +648 23 evaluator """rankbased""" +648 24 dataset """kinships""" +648 24 model """simple""" +648 24 loss """softplus""" +648 24 regularizer """no""" +648 24 optimizer """adadelta""" +648 24 training_loop """lcwa""" +648 24 evaluator """rankbased""" +648 25 dataset """kinships""" +648 25 model """simple""" +648 25 loss """softplus""" +648 25 regularizer """no""" +648 25 optimizer """adadelta""" +648 25 training_loop """lcwa""" +648 25 evaluator """rankbased""" +648 26 dataset """kinships""" +648 26 model """simple""" +648 26 loss """softplus""" +648 26 regularizer """no""" +648 26 optimizer """adadelta""" +648 26 training_loop """lcwa""" +648 26 evaluator """rankbased""" +648 27 dataset """kinships""" +648 27 model """simple""" +648 27 loss """softplus""" +648 27 regularizer """no""" +648 27 optimizer """adadelta""" +648 27 training_loop """lcwa""" +648 27 evaluator """rankbased""" +648 28 dataset """kinships""" +648 28 model """simple""" +648 28 loss """softplus""" +648 28 regularizer """no""" +648 28 optimizer """adadelta""" +648 28 training_loop """lcwa""" +648 28 evaluator """rankbased""" +648 29 dataset """kinships""" +648 29 model """simple""" +648 29 loss """softplus""" +648 29 regularizer """no""" +648 29 optimizer """adadelta""" +648 29 training_loop """lcwa""" +648 29 evaluator """rankbased""" +648 30 dataset """kinships""" +648 30 model """simple""" +648 30 loss """softplus""" +648 30 regularizer """no""" +648 30 optimizer """adadelta""" +648 30 training_loop """lcwa""" +648 30 evaluator """rankbased""" +648 31 dataset """kinships""" +648 31 model """simple""" +648 31 loss """softplus""" +648 31 regularizer """no""" +648 31 optimizer """adadelta""" +648 31 training_loop """lcwa""" +648 31 evaluator """rankbased""" +648 32 dataset """kinships""" +648 32 model """simple""" +648 32 loss """softplus""" +648 32 regularizer """no""" +648 32 optimizer """adadelta""" +648 32 training_loop """lcwa""" +648 32 evaluator """rankbased""" +648 33 dataset """kinships""" +648 33 model """simple""" +648 33 loss """softplus""" +648 33 regularizer """no""" +648 33 optimizer """adadelta""" +648 33 training_loop """lcwa""" +648 33 evaluator """rankbased""" +648 34 dataset """kinships""" +648 34 model """simple""" +648 34 loss """softplus""" +648 34 regularizer """no""" +648 34 optimizer """adadelta""" +648 34 training_loop """lcwa""" +648 34 evaluator """rankbased""" +648 35 dataset """kinships""" +648 35 model """simple""" +648 35 loss """softplus""" +648 35 regularizer """no""" +648 35 optimizer """adadelta""" +648 35 training_loop """lcwa""" +648 35 evaluator """rankbased""" +648 36 dataset """kinships""" +648 36 model """simple""" +648 36 loss """softplus""" +648 36 regularizer """no""" +648 36 optimizer """adadelta""" +648 36 training_loop """lcwa""" +648 36 evaluator """rankbased""" +648 37 dataset """kinships""" +648 37 model """simple""" +648 37 loss """softplus""" +648 37 regularizer """no""" +648 37 optimizer """adadelta""" +648 37 training_loop """lcwa""" +648 37 evaluator """rankbased""" +648 38 dataset """kinships""" +648 38 model """simple""" +648 38 loss """softplus""" +648 38 regularizer """no""" +648 38 optimizer """adadelta""" +648 38 training_loop """lcwa""" +648 38 evaluator """rankbased""" +648 39 dataset """kinships""" +648 39 model """simple""" +648 39 loss """softplus""" +648 39 regularizer """no""" +648 39 optimizer """adadelta""" +648 39 training_loop """lcwa""" +648 39 evaluator """rankbased""" +648 40 dataset """kinships""" +648 40 model """simple""" +648 40 loss """softplus""" +648 40 regularizer """no""" +648 40 optimizer """adadelta""" +648 40 training_loop """lcwa""" +648 40 evaluator """rankbased""" +648 41 dataset """kinships""" +648 41 model """simple""" +648 41 loss """softplus""" +648 41 regularizer """no""" +648 41 optimizer """adadelta""" +648 41 training_loop """lcwa""" +648 41 evaluator """rankbased""" +648 42 dataset """kinships""" +648 42 model """simple""" +648 42 loss """softplus""" +648 42 regularizer """no""" +648 42 optimizer """adadelta""" +648 42 training_loop """lcwa""" +648 42 evaluator """rankbased""" +648 43 dataset """kinships""" +648 43 model """simple""" +648 43 loss """softplus""" +648 43 regularizer """no""" +648 43 optimizer """adadelta""" +648 43 training_loop """lcwa""" +648 43 evaluator """rankbased""" +648 44 dataset """kinships""" +648 44 model """simple""" +648 44 loss """softplus""" +648 44 regularizer """no""" +648 44 optimizer """adadelta""" +648 44 training_loop """lcwa""" +648 44 evaluator """rankbased""" +648 45 dataset """kinships""" +648 45 model """simple""" +648 45 loss """softplus""" +648 45 regularizer """no""" +648 45 optimizer """adadelta""" +648 45 training_loop """lcwa""" +648 45 evaluator """rankbased""" +648 46 dataset """kinships""" +648 46 model """simple""" +648 46 loss """softplus""" +648 46 regularizer """no""" +648 46 optimizer """adadelta""" +648 46 training_loop """lcwa""" +648 46 evaluator """rankbased""" +648 47 dataset """kinships""" +648 47 model """simple""" +648 47 loss """softplus""" +648 47 regularizer """no""" +648 47 optimizer """adadelta""" +648 47 training_loop """lcwa""" +648 47 evaluator """rankbased""" +648 48 dataset """kinships""" +648 48 model """simple""" +648 48 loss """softplus""" +648 48 regularizer """no""" +648 48 optimizer """adadelta""" +648 48 training_loop """lcwa""" +648 48 evaluator """rankbased""" +648 49 dataset """kinships""" +648 49 model """simple""" +648 49 loss """softplus""" +648 49 regularizer """no""" +648 49 optimizer """adadelta""" +648 49 training_loop """lcwa""" +648 49 evaluator """rankbased""" +648 50 dataset """kinships""" +648 50 model """simple""" +648 50 loss """softplus""" +648 50 regularizer """no""" +648 50 optimizer """adadelta""" +648 50 training_loop """lcwa""" +648 50 evaluator """rankbased""" +648 51 dataset """kinships""" +648 51 model """simple""" +648 51 loss """softplus""" +648 51 regularizer """no""" +648 51 optimizer """adadelta""" +648 51 training_loop """lcwa""" +648 51 evaluator """rankbased""" +648 52 dataset """kinships""" +648 52 model """simple""" +648 52 loss """softplus""" +648 52 regularizer """no""" +648 52 optimizer """adadelta""" +648 52 training_loop """lcwa""" +648 52 evaluator """rankbased""" +648 53 dataset """kinships""" +648 53 model """simple""" +648 53 loss """softplus""" +648 53 regularizer """no""" +648 53 optimizer """adadelta""" +648 53 training_loop """lcwa""" +648 53 evaluator """rankbased""" +648 54 dataset """kinships""" +648 54 model """simple""" +648 54 loss """softplus""" +648 54 regularizer """no""" +648 54 optimizer """adadelta""" +648 54 training_loop """lcwa""" +648 54 evaluator """rankbased""" +648 55 dataset """kinships""" +648 55 model """simple""" +648 55 loss """softplus""" +648 55 regularizer """no""" +648 55 optimizer """adadelta""" +648 55 training_loop """lcwa""" +648 55 evaluator """rankbased""" +648 56 dataset """kinships""" +648 56 model """simple""" +648 56 loss """softplus""" +648 56 regularizer """no""" +648 56 optimizer """adadelta""" +648 56 training_loop """lcwa""" +648 56 evaluator """rankbased""" +648 57 dataset """kinships""" +648 57 model """simple""" +648 57 loss """softplus""" +648 57 regularizer """no""" +648 57 optimizer """adadelta""" +648 57 training_loop """lcwa""" +648 57 evaluator """rankbased""" +648 58 dataset """kinships""" +648 58 model """simple""" +648 58 loss """softplus""" +648 58 regularizer """no""" +648 58 optimizer """adadelta""" +648 58 training_loop """lcwa""" +648 58 evaluator """rankbased""" +648 59 dataset """kinships""" +648 59 model """simple""" +648 59 loss """softplus""" +648 59 regularizer """no""" +648 59 optimizer """adadelta""" +648 59 training_loop """lcwa""" +648 59 evaluator """rankbased""" +648 60 dataset """kinships""" +648 60 model """simple""" +648 60 loss """softplus""" +648 60 regularizer """no""" +648 60 optimizer """adadelta""" +648 60 training_loop """lcwa""" +648 60 evaluator """rankbased""" +648 61 dataset """kinships""" +648 61 model """simple""" +648 61 loss """softplus""" +648 61 regularizer """no""" +648 61 optimizer """adadelta""" +648 61 training_loop """lcwa""" +648 61 evaluator """rankbased""" +648 62 dataset """kinships""" +648 62 model """simple""" +648 62 loss """softplus""" +648 62 regularizer """no""" +648 62 optimizer """adadelta""" +648 62 training_loop """lcwa""" +648 62 evaluator """rankbased""" +648 63 dataset """kinships""" +648 63 model """simple""" +648 63 loss """softplus""" +648 63 regularizer """no""" +648 63 optimizer """adadelta""" +648 63 training_loop """lcwa""" +648 63 evaluator """rankbased""" +648 64 dataset """kinships""" +648 64 model """simple""" +648 64 loss """softplus""" +648 64 regularizer """no""" +648 64 optimizer """adadelta""" +648 64 training_loop """lcwa""" +648 64 evaluator """rankbased""" +648 65 dataset """kinships""" +648 65 model """simple""" +648 65 loss """softplus""" +648 65 regularizer """no""" +648 65 optimizer """adadelta""" +648 65 training_loop """lcwa""" +648 65 evaluator """rankbased""" +648 66 dataset """kinships""" +648 66 model """simple""" +648 66 loss """softplus""" +648 66 regularizer """no""" +648 66 optimizer """adadelta""" +648 66 training_loop """lcwa""" +648 66 evaluator """rankbased""" +648 67 dataset """kinships""" +648 67 model """simple""" +648 67 loss """softplus""" +648 67 regularizer """no""" +648 67 optimizer """adadelta""" +648 67 training_loop """lcwa""" +648 67 evaluator """rankbased""" +648 68 dataset """kinships""" +648 68 model """simple""" +648 68 loss """softplus""" +648 68 regularizer """no""" +648 68 optimizer """adadelta""" +648 68 training_loop """lcwa""" +648 68 evaluator """rankbased""" +648 69 dataset """kinships""" +648 69 model """simple""" +648 69 loss """softplus""" +648 69 regularizer """no""" +648 69 optimizer """adadelta""" +648 69 training_loop """lcwa""" +648 69 evaluator """rankbased""" +648 70 dataset """kinships""" +648 70 model """simple""" +648 70 loss """softplus""" +648 70 regularizer """no""" +648 70 optimizer """adadelta""" +648 70 training_loop """lcwa""" +648 70 evaluator """rankbased""" +648 71 dataset """kinships""" +648 71 model """simple""" +648 71 loss """softplus""" +648 71 regularizer """no""" +648 71 optimizer """adadelta""" +648 71 training_loop """lcwa""" +648 71 evaluator """rankbased""" +648 72 dataset """kinships""" +648 72 model """simple""" +648 72 loss """softplus""" +648 72 regularizer """no""" +648 72 optimizer """adadelta""" +648 72 training_loop """lcwa""" +648 72 evaluator """rankbased""" +648 73 dataset """kinships""" +648 73 model """simple""" +648 73 loss """softplus""" +648 73 regularizer """no""" +648 73 optimizer """adadelta""" +648 73 training_loop """lcwa""" +648 73 evaluator """rankbased""" +648 74 dataset """kinships""" +648 74 model """simple""" +648 74 loss """softplus""" +648 74 regularizer """no""" +648 74 optimizer """adadelta""" +648 74 training_loop """lcwa""" +648 74 evaluator """rankbased""" +648 75 dataset """kinships""" +648 75 model """simple""" +648 75 loss """softplus""" +648 75 regularizer """no""" +648 75 optimizer """adadelta""" +648 75 training_loop """lcwa""" +648 75 evaluator """rankbased""" +648 76 dataset """kinships""" +648 76 model """simple""" +648 76 loss """softplus""" +648 76 regularizer """no""" +648 76 optimizer """adadelta""" +648 76 training_loop """lcwa""" +648 76 evaluator """rankbased""" +648 77 dataset """kinships""" +648 77 model """simple""" +648 77 loss """softplus""" +648 77 regularizer """no""" +648 77 optimizer """adadelta""" +648 77 training_loop """lcwa""" +648 77 evaluator """rankbased""" +648 78 dataset """kinships""" +648 78 model """simple""" +648 78 loss """softplus""" +648 78 regularizer """no""" +648 78 optimizer """adadelta""" +648 78 training_loop """lcwa""" +648 78 evaluator """rankbased""" +648 79 dataset """kinships""" +648 79 model """simple""" +648 79 loss """softplus""" +648 79 regularizer """no""" +648 79 optimizer """adadelta""" +648 79 training_loop """lcwa""" +648 79 evaluator """rankbased""" +648 80 dataset """kinships""" +648 80 model """simple""" +648 80 loss """softplus""" +648 80 regularizer """no""" +648 80 optimizer """adadelta""" +648 80 training_loop """lcwa""" +648 80 evaluator """rankbased""" +648 81 dataset """kinships""" +648 81 model """simple""" +648 81 loss """softplus""" +648 81 regularizer """no""" +648 81 optimizer """adadelta""" +648 81 training_loop """lcwa""" +648 81 evaluator """rankbased""" +648 82 dataset """kinships""" +648 82 model """simple""" +648 82 loss """softplus""" +648 82 regularizer """no""" +648 82 optimizer """adadelta""" +648 82 training_loop """lcwa""" +648 82 evaluator """rankbased""" +648 83 dataset """kinships""" +648 83 model """simple""" +648 83 loss """softplus""" +648 83 regularizer """no""" +648 83 optimizer """adadelta""" +648 83 training_loop """lcwa""" +648 83 evaluator """rankbased""" +648 84 dataset """kinships""" +648 84 model """simple""" +648 84 loss """softplus""" +648 84 regularizer """no""" +648 84 optimizer """adadelta""" +648 84 training_loop """lcwa""" +648 84 evaluator """rankbased""" +648 85 dataset """kinships""" +648 85 model """simple""" +648 85 loss """softplus""" +648 85 regularizer """no""" +648 85 optimizer """adadelta""" +648 85 training_loop """lcwa""" +648 85 evaluator """rankbased""" +648 86 dataset """kinships""" +648 86 model """simple""" +648 86 loss """softplus""" +648 86 regularizer """no""" +648 86 optimizer """adadelta""" +648 86 training_loop """lcwa""" +648 86 evaluator """rankbased""" +648 87 dataset """kinships""" +648 87 model """simple""" +648 87 loss """softplus""" +648 87 regularizer """no""" +648 87 optimizer """adadelta""" +648 87 training_loop """lcwa""" +648 87 evaluator """rankbased""" +648 88 dataset """kinships""" +648 88 model """simple""" +648 88 loss """softplus""" +648 88 regularizer """no""" +648 88 optimizer """adadelta""" +648 88 training_loop """lcwa""" +648 88 evaluator """rankbased""" +648 89 dataset """kinships""" +648 89 model """simple""" +648 89 loss """softplus""" +648 89 regularizer """no""" +648 89 optimizer """adadelta""" +648 89 training_loop """lcwa""" +648 89 evaluator """rankbased""" +648 90 dataset """kinships""" +648 90 model """simple""" +648 90 loss """softplus""" +648 90 regularizer """no""" +648 90 optimizer """adadelta""" +648 90 training_loop """lcwa""" +648 90 evaluator """rankbased""" +648 91 dataset """kinships""" +648 91 model """simple""" +648 91 loss """softplus""" +648 91 regularizer """no""" +648 91 optimizer """adadelta""" +648 91 training_loop """lcwa""" +648 91 evaluator """rankbased""" +648 92 dataset """kinships""" +648 92 model """simple""" +648 92 loss """softplus""" +648 92 regularizer """no""" +648 92 optimizer """adadelta""" +648 92 training_loop """lcwa""" +648 92 evaluator """rankbased""" +648 93 dataset """kinships""" +648 93 model """simple""" +648 93 loss """softplus""" +648 93 regularizer """no""" +648 93 optimizer """adadelta""" +648 93 training_loop """lcwa""" +648 93 evaluator """rankbased""" +648 94 dataset """kinships""" +648 94 model """simple""" +648 94 loss """softplus""" +648 94 regularizer """no""" +648 94 optimizer """adadelta""" +648 94 training_loop """lcwa""" +648 94 evaluator """rankbased""" +648 95 dataset """kinships""" +648 95 model """simple""" +648 95 loss """softplus""" +648 95 regularizer """no""" +648 95 optimizer """adadelta""" +648 95 training_loop """lcwa""" +648 95 evaluator """rankbased""" +648 96 dataset """kinships""" +648 96 model """simple""" +648 96 loss """softplus""" +648 96 regularizer """no""" +648 96 optimizer """adadelta""" +648 96 training_loop """lcwa""" +648 96 evaluator """rankbased""" +648 97 dataset """kinships""" +648 97 model """simple""" +648 97 loss """softplus""" +648 97 regularizer """no""" +648 97 optimizer """adadelta""" +648 97 training_loop """lcwa""" +648 97 evaluator """rankbased""" +648 98 dataset """kinships""" +648 98 model """simple""" +648 98 loss """softplus""" +648 98 regularizer """no""" +648 98 optimizer """adadelta""" +648 98 training_loop """lcwa""" +648 98 evaluator """rankbased""" +648 99 dataset """kinships""" +648 99 model """simple""" +648 99 loss """softplus""" +648 99 regularizer """no""" +648 99 optimizer """adadelta""" +648 99 training_loop """lcwa""" +648 99 evaluator """rankbased""" +648 100 dataset """kinships""" +648 100 model """simple""" +648 100 loss """softplus""" +648 100 regularizer """no""" +648 100 optimizer """adadelta""" +648 100 training_loop """lcwa""" +648 100 evaluator """rankbased""" +649 1 model.embedding_dim 2.0 +649 1 training.batch_size 2.0 +649 1 training.label_smoothing 0.014968750778100802 +649 2 model.embedding_dim 2.0 +649 2 training.batch_size 0.0 +649 2 training.label_smoothing 0.01792949126849347 +649 3 model.embedding_dim 2.0 +649 3 training.batch_size 2.0 +649 3 training.label_smoothing 0.16672068394393985 +649 4 model.embedding_dim 0.0 +649 4 training.batch_size 2.0 +649 4 training.label_smoothing 0.017016390752098782 +649 5 model.embedding_dim 1.0 +649 5 training.batch_size 0.0 +649 5 training.label_smoothing 0.28648127823785763 +649 6 model.embedding_dim 2.0 +649 6 training.batch_size 2.0 +649 6 training.label_smoothing 0.0013024346237319867 +649 7 model.embedding_dim 1.0 +649 7 training.batch_size 1.0 +649 7 training.label_smoothing 0.7240591793222364 +649 8 model.embedding_dim 2.0 +649 8 training.batch_size 1.0 +649 8 training.label_smoothing 0.005340222375001869 +649 9 model.embedding_dim 0.0 +649 9 training.batch_size 0.0 +649 9 training.label_smoothing 0.012200368781566637 +649 10 model.embedding_dim 0.0 +649 10 training.batch_size 1.0 +649 10 training.label_smoothing 0.001115335449495781 +649 11 model.embedding_dim 1.0 +649 11 training.batch_size 2.0 +649 11 training.label_smoothing 0.1576954302547754 +649 12 model.embedding_dim 0.0 +649 12 training.batch_size 2.0 +649 12 training.label_smoothing 0.0013693476324954082 +649 13 model.embedding_dim 2.0 +649 13 training.batch_size 1.0 +649 13 training.label_smoothing 0.05198227344628344 +649 14 model.embedding_dim 0.0 +649 14 training.batch_size 0.0 +649 14 training.label_smoothing 0.36076991862648006 +649 15 model.embedding_dim 2.0 +649 15 training.batch_size 2.0 +649 15 training.label_smoothing 0.03969831401870076 +649 16 model.embedding_dim 1.0 +649 16 training.batch_size 1.0 +649 16 training.label_smoothing 0.02027404382384988 +649 17 model.embedding_dim 1.0 +649 17 training.batch_size 0.0 +649 17 training.label_smoothing 0.0105040922796057 +649 18 model.embedding_dim 0.0 +649 18 training.batch_size 0.0 +649 18 training.label_smoothing 0.345780321458426 +649 19 model.embedding_dim 0.0 +649 19 training.batch_size 2.0 +649 19 training.label_smoothing 0.05645247227831987 +649 20 model.embedding_dim 0.0 +649 20 training.batch_size 0.0 +649 20 training.label_smoothing 0.266607823315542 +649 21 model.embedding_dim 0.0 +649 21 training.batch_size 2.0 +649 21 training.label_smoothing 0.009111084812279959 +649 22 model.embedding_dim 1.0 +649 22 training.batch_size 1.0 +649 22 training.label_smoothing 0.03316586509591023 +649 23 model.embedding_dim 1.0 +649 23 training.batch_size 0.0 +649 23 training.label_smoothing 0.06293466751586316 +649 24 model.embedding_dim 1.0 +649 24 training.batch_size 1.0 +649 24 training.label_smoothing 0.032246240325569754 +649 25 model.embedding_dim 1.0 +649 25 training.batch_size 1.0 +649 25 training.label_smoothing 0.11951009522051338 +649 26 model.embedding_dim 0.0 +649 26 training.batch_size 2.0 +649 26 training.label_smoothing 0.0027603002026633993 +649 27 model.embedding_dim 1.0 +649 27 training.batch_size 1.0 +649 27 training.label_smoothing 0.04575363403944334 +649 28 model.embedding_dim 2.0 +649 28 training.batch_size 1.0 +649 28 training.label_smoothing 0.014568074274464285 +649 29 model.embedding_dim 2.0 +649 29 training.batch_size 2.0 +649 29 training.label_smoothing 0.4928036931153326 +649 30 model.embedding_dim 2.0 +649 30 training.batch_size 1.0 +649 30 training.label_smoothing 0.051284935045387794 +649 31 model.embedding_dim 1.0 +649 31 training.batch_size 0.0 +649 31 training.label_smoothing 0.10592276463816072 +649 32 model.embedding_dim 2.0 +649 32 training.batch_size 0.0 +649 32 training.label_smoothing 0.24726335239711056 +649 33 model.embedding_dim 2.0 +649 33 training.batch_size 1.0 +649 33 training.label_smoothing 0.0010814212346076971 +649 34 model.embedding_dim 1.0 +649 34 training.batch_size 2.0 +649 34 training.label_smoothing 0.02219631003453906 +649 35 model.embedding_dim 1.0 +649 35 training.batch_size 1.0 +649 35 training.label_smoothing 0.44955966363893063 +649 36 model.embedding_dim 0.0 +649 36 training.batch_size 1.0 +649 36 training.label_smoothing 0.002164770280141481 +649 37 model.embedding_dim 2.0 +649 37 training.batch_size 2.0 +649 37 training.label_smoothing 0.1434107997603829 +649 38 model.embedding_dim 0.0 +649 38 training.batch_size 1.0 +649 38 training.label_smoothing 0.81819951935935 +649 39 model.embedding_dim 0.0 +649 39 training.batch_size 2.0 +649 39 training.label_smoothing 0.016265481300717963 +649 40 model.embedding_dim 0.0 +649 40 training.batch_size 2.0 +649 40 training.label_smoothing 0.07492676953291691 +649 41 model.embedding_dim 1.0 +649 41 training.batch_size 0.0 +649 41 training.label_smoothing 0.934005425408291 +649 42 model.embedding_dim 1.0 +649 42 training.batch_size 2.0 +649 42 training.label_smoothing 0.003231451680742605 +649 43 model.embedding_dim 1.0 +649 43 training.batch_size 1.0 +649 43 training.label_smoothing 0.7885660258416807 +649 44 model.embedding_dim 2.0 +649 44 training.batch_size 2.0 +649 44 training.label_smoothing 0.035524634383905594 +649 45 model.embedding_dim 1.0 +649 45 training.batch_size 1.0 +649 45 training.label_smoothing 0.025831514307939635 +649 46 model.embedding_dim 1.0 +649 46 training.batch_size 2.0 +649 46 training.label_smoothing 0.012172661682081679 +649 47 model.embedding_dim 2.0 +649 47 training.batch_size 1.0 +649 47 training.label_smoothing 0.0019589696196372756 +649 48 model.embedding_dim 2.0 +649 48 training.batch_size 0.0 +649 48 training.label_smoothing 0.014799081298868802 +649 49 model.embedding_dim 1.0 +649 49 training.batch_size 0.0 +649 49 training.label_smoothing 0.23868367632169474 +649 50 model.embedding_dim 0.0 +649 50 training.batch_size 2.0 +649 50 training.label_smoothing 0.010496707600095656 +649 51 model.embedding_dim 2.0 +649 51 training.batch_size 2.0 +649 51 training.label_smoothing 0.1370332175107572 +649 52 model.embedding_dim 2.0 +649 52 training.batch_size 2.0 +649 52 training.label_smoothing 0.13108463375346865 +649 53 model.embedding_dim 0.0 +649 53 training.batch_size 0.0 +649 53 training.label_smoothing 0.1640710651356242 +649 54 model.embedding_dim 1.0 +649 54 training.batch_size 1.0 +649 54 training.label_smoothing 0.06503406873603801 +649 55 model.embedding_dim 0.0 +649 55 training.batch_size 2.0 +649 55 training.label_smoothing 0.5112751345425135 +649 56 model.embedding_dim 1.0 +649 56 training.batch_size 1.0 +649 56 training.label_smoothing 0.4705269883992206 +649 57 model.embedding_dim 2.0 +649 57 training.batch_size 2.0 +649 57 training.label_smoothing 0.06672069072932112 +649 58 model.embedding_dim 0.0 +649 58 training.batch_size 0.0 +649 58 training.label_smoothing 0.08190541400207646 +649 59 model.embedding_dim 2.0 +649 59 training.batch_size 1.0 +649 59 training.label_smoothing 0.022536718730132962 +649 60 model.embedding_dim 2.0 +649 60 training.batch_size 2.0 +649 60 training.label_smoothing 0.6414329757922812 +649 61 model.embedding_dim 1.0 +649 61 training.batch_size 1.0 +649 61 training.label_smoothing 0.01784915192955449 +649 62 model.embedding_dim 0.0 +649 62 training.batch_size 2.0 +649 62 training.label_smoothing 0.004974112251435858 +649 63 model.embedding_dim 1.0 +649 63 training.batch_size 2.0 +649 63 training.label_smoothing 0.17004893569106072 +649 64 model.embedding_dim 0.0 +649 64 training.batch_size 0.0 +649 64 training.label_smoothing 0.25711746339425634 +649 65 model.embedding_dim 2.0 +649 65 training.batch_size 1.0 +649 65 training.label_smoothing 0.317903511675748 +649 66 model.embedding_dim 0.0 +649 66 training.batch_size 0.0 +649 66 training.label_smoothing 0.01970075824823054 +649 67 model.embedding_dim 1.0 +649 67 training.batch_size 0.0 +649 67 training.label_smoothing 0.0023477465739596944 +649 68 model.embedding_dim 2.0 +649 68 training.batch_size 1.0 +649 68 training.label_smoothing 0.4600679074616901 +649 69 model.embedding_dim 0.0 +649 69 training.batch_size 0.0 +649 69 training.label_smoothing 0.13631549353296105 +649 70 model.embedding_dim 0.0 +649 70 training.batch_size 2.0 +649 70 training.label_smoothing 0.00530330425620101 +649 71 model.embedding_dim 2.0 +649 71 training.batch_size 1.0 +649 71 training.label_smoothing 0.02246101022499351 +649 72 model.embedding_dim 1.0 +649 72 training.batch_size 0.0 +649 72 training.label_smoothing 0.09148173342735479 +649 73 model.embedding_dim 0.0 +649 73 training.batch_size 0.0 +649 73 training.label_smoothing 0.0448304543337466 +649 74 model.embedding_dim 2.0 +649 74 training.batch_size 2.0 +649 74 training.label_smoothing 0.006539068900519374 +649 75 model.embedding_dim 1.0 +649 75 training.batch_size 1.0 +649 75 training.label_smoothing 0.2537933833513392 +649 76 model.embedding_dim 0.0 +649 76 training.batch_size 2.0 +649 76 training.label_smoothing 0.0026727490094605433 +649 77 model.embedding_dim 1.0 +649 77 training.batch_size 1.0 +649 77 training.label_smoothing 0.010446608901406015 +649 78 model.embedding_dim 0.0 +649 78 training.batch_size 0.0 +649 78 training.label_smoothing 0.005602221630777533 +649 79 model.embedding_dim 1.0 +649 79 training.batch_size 1.0 +649 79 training.label_smoothing 0.3132040409714934 +649 80 model.embedding_dim 2.0 +649 80 training.batch_size 2.0 +649 80 training.label_smoothing 0.9746467230907923 +649 81 model.embedding_dim 0.0 +649 81 training.batch_size 0.0 +649 81 training.label_smoothing 0.004975027349469904 +649 82 model.embedding_dim 0.0 +649 82 training.batch_size 2.0 +649 82 training.label_smoothing 0.005265897380341292 +649 83 model.embedding_dim 0.0 +649 83 training.batch_size 0.0 +649 83 training.label_smoothing 0.5018003383364708 +649 84 model.embedding_dim 0.0 +649 84 training.batch_size 0.0 +649 84 training.label_smoothing 0.8680266144028839 +649 85 model.embedding_dim 2.0 +649 85 training.batch_size 0.0 +649 85 training.label_smoothing 0.06468814723601021 +649 86 model.embedding_dim 0.0 +649 86 training.batch_size 1.0 +649 86 training.label_smoothing 0.032040032331690387 +649 87 model.embedding_dim 0.0 +649 87 training.batch_size 2.0 +649 87 training.label_smoothing 0.14335948318058672 +649 88 model.embedding_dim 0.0 +649 88 training.batch_size 0.0 +649 88 training.label_smoothing 0.00926340887841887 +649 89 model.embedding_dim 1.0 +649 89 training.batch_size 0.0 +649 89 training.label_smoothing 0.011596620475611965 +649 90 model.embedding_dim 0.0 +649 90 training.batch_size 1.0 +649 90 training.label_smoothing 0.004595701562542948 +649 91 model.embedding_dim 1.0 +649 91 training.batch_size 1.0 +649 91 training.label_smoothing 0.014175168509178776 +649 92 model.embedding_dim 2.0 +649 92 training.batch_size 0.0 +649 92 training.label_smoothing 0.5419563352267636 +649 93 model.embedding_dim 2.0 +649 93 training.batch_size 1.0 +649 93 training.label_smoothing 0.0026410925285380127 +649 94 model.embedding_dim 0.0 +649 94 training.batch_size 1.0 +649 94 training.label_smoothing 0.11333093972652418 +649 95 model.embedding_dim 0.0 +649 95 training.batch_size 1.0 +649 95 training.label_smoothing 0.0010843366525641997 +649 96 model.embedding_dim 1.0 +649 96 training.batch_size 0.0 +649 96 training.label_smoothing 0.001157469495424548 +649 97 model.embedding_dim 2.0 +649 97 training.batch_size 2.0 +649 97 training.label_smoothing 0.0014569154653477797 +649 98 model.embedding_dim 0.0 +649 98 training.batch_size 0.0 +649 98 training.label_smoothing 0.02727017552456973 +649 99 model.embedding_dim 2.0 +649 99 training.batch_size 2.0 +649 99 training.label_smoothing 0.08198832867018567 +649 100 model.embedding_dim 1.0 +649 100 training.batch_size 1.0 +649 100 training.label_smoothing 0.0045468762993364385 +649 1 dataset """kinships""" +649 1 model """simple""" +649 1 loss """bceaftersigmoid""" +649 1 regularizer """no""" +649 1 optimizer """adadelta""" +649 1 training_loop """lcwa""" +649 1 evaluator """rankbased""" +649 2 dataset """kinships""" +649 2 model """simple""" +649 2 loss """bceaftersigmoid""" +649 2 regularizer """no""" +649 2 optimizer """adadelta""" +649 2 training_loop """lcwa""" +649 2 evaluator """rankbased""" +649 3 dataset """kinships""" +649 3 model """simple""" +649 3 loss """bceaftersigmoid""" +649 3 regularizer """no""" +649 3 optimizer """adadelta""" +649 3 training_loop """lcwa""" +649 3 evaluator """rankbased""" +649 4 dataset """kinships""" +649 4 model """simple""" +649 4 loss """bceaftersigmoid""" +649 4 regularizer """no""" +649 4 optimizer """adadelta""" +649 4 training_loop """lcwa""" +649 4 evaluator """rankbased""" +649 5 dataset """kinships""" +649 5 model """simple""" +649 5 loss """bceaftersigmoid""" +649 5 regularizer """no""" +649 5 optimizer """adadelta""" +649 5 training_loop """lcwa""" +649 5 evaluator """rankbased""" +649 6 dataset """kinships""" +649 6 model """simple""" +649 6 loss """bceaftersigmoid""" +649 6 regularizer """no""" +649 6 optimizer """adadelta""" +649 6 training_loop """lcwa""" +649 6 evaluator """rankbased""" +649 7 dataset """kinships""" +649 7 model """simple""" +649 7 loss """bceaftersigmoid""" +649 7 regularizer """no""" +649 7 optimizer """adadelta""" +649 7 training_loop """lcwa""" +649 7 evaluator """rankbased""" +649 8 dataset """kinships""" +649 8 model """simple""" +649 8 loss """bceaftersigmoid""" +649 8 regularizer """no""" +649 8 optimizer """adadelta""" +649 8 training_loop """lcwa""" +649 8 evaluator """rankbased""" +649 9 dataset """kinships""" +649 9 model """simple""" +649 9 loss """bceaftersigmoid""" +649 9 regularizer """no""" +649 9 optimizer """adadelta""" +649 9 training_loop """lcwa""" +649 9 evaluator """rankbased""" +649 10 dataset """kinships""" +649 10 model """simple""" +649 10 loss """bceaftersigmoid""" +649 10 regularizer """no""" +649 10 optimizer """adadelta""" +649 10 training_loop """lcwa""" +649 10 evaluator """rankbased""" +649 11 dataset """kinships""" +649 11 model """simple""" +649 11 loss """bceaftersigmoid""" +649 11 regularizer """no""" +649 11 optimizer """adadelta""" +649 11 training_loop """lcwa""" +649 11 evaluator """rankbased""" +649 12 dataset """kinships""" +649 12 model """simple""" +649 12 loss """bceaftersigmoid""" +649 12 regularizer """no""" +649 12 optimizer """adadelta""" +649 12 training_loop """lcwa""" +649 12 evaluator """rankbased""" +649 13 dataset """kinships""" +649 13 model """simple""" +649 13 loss """bceaftersigmoid""" +649 13 regularizer """no""" +649 13 optimizer """adadelta""" +649 13 training_loop """lcwa""" +649 13 evaluator """rankbased""" +649 14 dataset """kinships""" +649 14 model """simple""" +649 14 loss """bceaftersigmoid""" +649 14 regularizer """no""" +649 14 optimizer """adadelta""" +649 14 training_loop """lcwa""" +649 14 evaluator """rankbased""" +649 15 dataset """kinships""" +649 15 model """simple""" +649 15 loss """bceaftersigmoid""" +649 15 regularizer """no""" +649 15 optimizer """adadelta""" +649 15 training_loop """lcwa""" +649 15 evaluator """rankbased""" +649 16 dataset """kinships""" +649 16 model """simple""" +649 16 loss """bceaftersigmoid""" +649 16 regularizer """no""" +649 16 optimizer """adadelta""" +649 16 training_loop """lcwa""" +649 16 evaluator """rankbased""" +649 17 dataset """kinships""" +649 17 model """simple""" +649 17 loss """bceaftersigmoid""" +649 17 regularizer """no""" +649 17 optimizer """adadelta""" +649 17 training_loop """lcwa""" +649 17 evaluator """rankbased""" +649 18 dataset """kinships""" +649 18 model """simple""" +649 18 loss """bceaftersigmoid""" +649 18 regularizer """no""" +649 18 optimizer """adadelta""" +649 18 training_loop """lcwa""" +649 18 evaluator """rankbased""" +649 19 dataset """kinships""" +649 19 model """simple""" +649 19 loss """bceaftersigmoid""" +649 19 regularizer """no""" +649 19 optimizer """adadelta""" +649 19 training_loop """lcwa""" +649 19 evaluator """rankbased""" +649 20 dataset """kinships""" +649 20 model """simple""" +649 20 loss """bceaftersigmoid""" +649 20 regularizer """no""" +649 20 optimizer """adadelta""" +649 20 training_loop """lcwa""" +649 20 evaluator """rankbased""" +649 21 dataset """kinships""" +649 21 model """simple""" +649 21 loss """bceaftersigmoid""" +649 21 regularizer """no""" +649 21 optimizer """adadelta""" +649 21 training_loop """lcwa""" +649 21 evaluator """rankbased""" +649 22 dataset """kinships""" +649 22 model """simple""" +649 22 loss """bceaftersigmoid""" +649 22 regularizer """no""" +649 22 optimizer """adadelta""" +649 22 training_loop """lcwa""" +649 22 evaluator """rankbased""" +649 23 dataset """kinships""" +649 23 model """simple""" +649 23 loss """bceaftersigmoid""" +649 23 regularizer """no""" +649 23 optimizer """adadelta""" +649 23 training_loop """lcwa""" +649 23 evaluator """rankbased""" +649 24 dataset """kinships""" +649 24 model """simple""" +649 24 loss """bceaftersigmoid""" +649 24 regularizer """no""" +649 24 optimizer """adadelta""" +649 24 training_loop """lcwa""" +649 24 evaluator """rankbased""" +649 25 dataset """kinships""" +649 25 model """simple""" +649 25 loss """bceaftersigmoid""" +649 25 regularizer """no""" +649 25 optimizer """adadelta""" +649 25 training_loop """lcwa""" +649 25 evaluator """rankbased""" +649 26 dataset """kinships""" +649 26 model """simple""" +649 26 loss """bceaftersigmoid""" +649 26 regularizer """no""" +649 26 optimizer """adadelta""" +649 26 training_loop """lcwa""" +649 26 evaluator """rankbased""" +649 27 dataset """kinships""" +649 27 model """simple""" +649 27 loss """bceaftersigmoid""" +649 27 regularizer """no""" +649 27 optimizer """adadelta""" +649 27 training_loop """lcwa""" +649 27 evaluator """rankbased""" +649 28 dataset """kinships""" +649 28 model """simple""" +649 28 loss """bceaftersigmoid""" +649 28 regularizer """no""" +649 28 optimizer """adadelta""" +649 28 training_loop """lcwa""" +649 28 evaluator """rankbased""" +649 29 dataset """kinships""" +649 29 model """simple""" +649 29 loss """bceaftersigmoid""" +649 29 regularizer """no""" +649 29 optimizer """adadelta""" +649 29 training_loop """lcwa""" +649 29 evaluator """rankbased""" +649 30 dataset """kinships""" +649 30 model """simple""" +649 30 loss """bceaftersigmoid""" +649 30 regularizer """no""" +649 30 optimizer """adadelta""" +649 30 training_loop """lcwa""" +649 30 evaluator """rankbased""" +649 31 dataset """kinships""" +649 31 model """simple""" +649 31 loss """bceaftersigmoid""" +649 31 regularizer """no""" +649 31 optimizer """adadelta""" +649 31 training_loop """lcwa""" +649 31 evaluator """rankbased""" +649 32 dataset """kinships""" +649 32 model """simple""" +649 32 loss """bceaftersigmoid""" +649 32 regularizer """no""" +649 32 optimizer """adadelta""" +649 32 training_loop """lcwa""" +649 32 evaluator """rankbased""" +649 33 dataset """kinships""" +649 33 model """simple""" +649 33 loss """bceaftersigmoid""" +649 33 regularizer """no""" +649 33 optimizer """adadelta""" +649 33 training_loop """lcwa""" +649 33 evaluator """rankbased""" +649 34 dataset """kinships""" +649 34 model """simple""" +649 34 loss """bceaftersigmoid""" +649 34 regularizer """no""" +649 34 optimizer """adadelta""" +649 34 training_loop """lcwa""" +649 34 evaluator """rankbased""" +649 35 dataset """kinships""" +649 35 model """simple""" +649 35 loss """bceaftersigmoid""" +649 35 regularizer """no""" +649 35 optimizer """adadelta""" +649 35 training_loop """lcwa""" +649 35 evaluator """rankbased""" +649 36 dataset """kinships""" +649 36 model """simple""" +649 36 loss """bceaftersigmoid""" +649 36 regularizer """no""" +649 36 optimizer """adadelta""" +649 36 training_loop """lcwa""" +649 36 evaluator """rankbased""" +649 37 dataset """kinships""" +649 37 model """simple""" +649 37 loss """bceaftersigmoid""" +649 37 regularizer """no""" +649 37 optimizer """adadelta""" +649 37 training_loop """lcwa""" +649 37 evaluator """rankbased""" +649 38 dataset """kinships""" +649 38 model """simple""" +649 38 loss """bceaftersigmoid""" +649 38 regularizer """no""" +649 38 optimizer """adadelta""" +649 38 training_loop """lcwa""" +649 38 evaluator """rankbased""" +649 39 dataset """kinships""" +649 39 model """simple""" +649 39 loss """bceaftersigmoid""" +649 39 regularizer """no""" +649 39 optimizer """adadelta""" +649 39 training_loop """lcwa""" +649 39 evaluator """rankbased""" +649 40 dataset """kinships""" +649 40 model """simple""" +649 40 loss """bceaftersigmoid""" +649 40 regularizer """no""" +649 40 optimizer """adadelta""" +649 40 training_loop """lcwa""" +649 40 evaluator """rankbased""" +649 41 dataset """kinships""" +649 41 model """simple""" +649 41 loss """bceaftersigmoid""" +649 41 regularizer """no""" +649 41 optimizer """adadelta""" +649 41 training_loop """lcwa""" +649 41 evaluator """rankbased""" +649 42 dataset """kinships""" +649 42 model """simple""" +649 42 loss """bceaftersigmoid""" +649 42 regularizer """no""" +649 42 optimizer """adadelta""" +649 42 training_loop """lcwa""" +649 42 evaluator """rankbased""" +649 43 dataset """kinships""" +649 43 model """simple""" +649 43 loss """bceaftersigmoid""" +649 43 regularizer """no""" +649 43 optimizer """adadelta""" +649 43 training_loop """lcwa""" +649 43 evaluator """rankbased""" +649 44 dataset """kinships""" +649 44 model """simple""" +649 44 loss """bceaftersigmoid""" +649 44 regularizer """no""" +649 44 optimizer """adadelta""" +649 44 training_loop """lcwa""" +649 44 evaluator """rankbased""" +649 45 dataset """kinships""" +649 45 model """simple""" +649 45 loss """bceaftersigmoid""" +649 45 regularizer """no""" +649 45 optimizer """adadelta""" +649 45 training_loop """lcwa""" +649 45 evaluator """rankbased""" +649 46 dataset """kinships""" +649 46 model """simple""" +649 46 loss """bceaftersigmoid""" +649 46 regularizer """no""" +649 46 optimizer """adadelta""" +649 46 training_loop """lcwa""" +649 46 evaluator """rankbased""" +649 47 dataset """kinships""" +649 47 model """simple""" +649 47 loss """bceaftersigmoid""" +649 47 regularizer """no""" +649 47 optimizer """adadelta""" +649 47 training_loop """lcwa""" +649 47 evaluator """rankbased""" +649 48 dataset """kinships""" +649 48 model """simple""" +649 48 loss """bceaftersigmoid""" +649 48 regularizer """no""" +649 48 optimizer """adadelta""" +649 48 training_loop """lcwa""" +649 48 evaluator """rankbased""" +649 49 dataset """kinships""" +649 49 model """simple""" +649 49 loss """bceaftersigmoid""" +649 49 regularizer """no""" +649 49 optimizer """adadelta""" +649 49 training_loop """lcwa""" +649 49 evaluator """rankbased""" +649 50 dataset """kinships""" +649 50 model """simple""" +649 50 loss """bceaftersigmoid""" +649 50 regularizer """no""" +649 50 optimizer """adadelta""" +649 50 training_loop """lcwa""" +649 50 evaluator """rankbased""" +649 51 dataset """kinships""" +649 51 model """simple""" +649 51 loss """bceaftersigmoid""" +649 51 regularizer """no""" +649 51 optimizer """adadelta""" +649 51 training_loop """lcwa""" +649 51 evaluator """rankbased""" +649 52 dataset """kinships""" +649 52 model """simple""" +649 52 loss """bceaftersigmoid""" +649 52 regularizer """no""" +649 52 optimizer """adadelta""" +649 52 training_loop """lcwa""" +649 52 evaluator """rankbased""" +649 53 dataset """kinships""" +649 53 model """simple""" +649 53 loss """bceaftersigmoid""" +649 53 regularizer """no""" +649 53 optimizer """adadelta""" +649 53 training_loop """lcwa""" +649 53 evaluator """rankbased""" +649 54 dataset """kinships""" +649 54 model """simple""" +649 54 loss """bceaftersigmoid""" +649 54 regularizer """no""" +649 54 optimizer """adadelta""" +649 54 training_loop """lcwa""" +649 54 evaluator """rankbased""" +649 55 dataset """kinships""" +649 55 model """simple""" +649 55 loss """bceaftersigmoid""" +649 55 regularizer """no""" +649 55 optimizer """adadelta""" +649 55 training_loop """lcwa""" +649 55 evaluator """rankbased""" +649 56 dataset """kinships""" +649 56 model """simple""" +649 56 loss """bceaftersigmoid""" +649 56 regularizer """no""" +649 56 optimizer """adadelta""" +649 56 training_loop """lcwa""" +649 56 evaluator """rankbased""" +649 57 dataset """kinships""" +649 57 model """simple""" +649 57 loss """bceaftersigmoid""" +649 57 regularizer """no""" +649 57 optimizer """adadelta""" +649 57 training_loop """lcwa""" +649 57 evaluator """rankbased""" +649 58 dataset """kinships""" +649 58 model """simple""" +649 58 loss """bceaftersigmoid""" +649 58 regularizer """no""" +649 58 optimizer """adadelta""" +649 58 training_loop """lcwa""" +649 58 evaluator """rankbased""" +649 59 dataset """kinships""" +649 59 model """simple""" +649 59 loss """bceaftersigmoid""" +649 59 regularizer """no""" +649 59 optimizer """adadelta""" +649 59 training_loop """lcwa""" +649 59 evaluator """rankbased""" +649 60 dataset """kinships""" +649 60 model """simple""" +649 60 loss """bceaftersigmoid""" +649 60 regularizer """no""" +649 60 optimizer """adadelta""" +649 60 training_loop """lcwa""" +649 60 evaluator """rankbased""" +649 61 dataset """kinships""" +649 61 model """simple""" +649 61 loss """bceaftersigmoid""" +649 61 regularizer """no""" +649 61 optimizer """adadelta""" +649 61 training_loop """lcwa""" +649 61 evaluator """rankbased""" +649 62 dataset """kinships""" +649 62 model """simple""" +649 62 loss """bceaftersigmoid""" +649 62 regularizer """no""" +649 62 optimizer """adadelta""" +649 62 training_loop """lcwa""" +649 62 evaluator """rankbased""" +649 63 dataset """kinships""" +649 63 model """simple""" +649 63 loss """bceaftersigmoid""" +649 63 regularizer """no""" +649 63 optimizer """adadelta""" +649 63 training_loop """lcwa""" +649 63 evaluator """rankbased""" +649 64 dataset """kinships""" +649 64 model """simple""" +649 64 loss """bceaftersigmoid""" +649 64 regularizer """no""" +649 64 optimizer """adadelta""" +649 64 training_loop """lcwa""" +649 64 evaluator """rankbased""" +649 65 dataset """kinships""" +649 65 model """simple""" +649 65 loss """bceaftersigmoid""" +649 65 regularizer """no""" +649 65 optimizer """adadelta""" +649 65 training_loop """lcwa""" +649 65 evaluator """rankbased""" +649 66 dataset """kinships""" +649 66 model """simple""" +649 66 loss """bceaftersigmoid""" +649 66 regularizer """no""" +649 66 optimizer """adadelta""" +649 66 training_loop """lcwa""" +649 66 evaluator """rankbased""" +649 67 dataset """kinships""" +649 67 model """simple""" +649 67 loss """bceaftersigmoid""" +649 67 regularizer """no""" +649 67 optimizer """adadelta""" +649 67 training_loop """lcwa""" +649 67 evaluator """rankbased""" +649 68 dataset """kinships""" +649 68 model """simple""" +649 68 loss """bceaftersigmoid""" +649 68 regularizer """no""" +649 68 optimizer """adadelta""" +649 68 training_loop """lcwa""" +649 68 evaluator """rankbased""" +649 69 dataset """kinships""" +649 69 model """simple""" +649 69 loss """bceaftersigmoid""" +649 69 regularizer """no""" +649 69 optimizer """adadelta""" +649 69 training_loop """lcwa""" +649 69 evaluator """rankbased""" +649 70 dataset """kinships""" +649 70 model """simple""" +649 70 loss """bceaftersigmoid""" +649 70 regularizer """no""" +649 70 optimizer """adadelta""" +649 70 training_loop """lcwa""" +649 70 evaluator """rankbased""" +649 71 dataset """kinships""" +649 71 model """simple""" +649 71 loss """bceaftersigmoid""" +649 71 regularizer """no""" +649 71 optimizer """adadelta""" +649 71 training_loop """lcwa""" +649 71 evaluator """rankbased""" +649 72 dataset """kinships""" +649 72 model """simple""" +649 72 loss """bceaftersigmoid""" +649 72 regularizer """no""" +649 72 optimizer """adadelta""" +649 72 training_loop """lcwa""" +649 72 evaluator """rankbased""" +649 73 dataset """kinships""" +649 73 model """simple""" +649 73 loss """bceaftersigmoid""" +649 73 regularizer """no""" +649 73 optimizer """adadelta""" +649 73 training_loop """lcwa""" +649 73 evaluator """rankbased""" +649 74 dataset """kinships""" +649 74 model """simple""" +649 74 loss """bceaftersigmoid""" +649 74 regularizer """no""" +649 74 optimizer """adadelta""" +649 74 training_loop """lcwa""" +649 74 evaluator """rankbased""" +649 75 dataset """kinships""" +649 75 model """simple""" +649 75 loss """bceaftersigmoid""" +649 75 regularizer """no""" +649 75 optimizer """adadelta""" +649 75 training_loop """lcwa""" +649 75 evaluator """rankbased""" +649 76 dataset """kinships""" +649 76 model """simple""" +649 76 loss """bceaftersigmoid""" +649 76 regularizer """no""" +649 76 optimizer """adadelta""" +649 76 training_loop """lcwa""" +649 76 evaluator """rankbased""" +649 77 dataset """kinships""" +649 77 model """simple""" +649 77 loss """bceaftersigmoid""" +649 77 regularizer """no""" +649 77 optimizer """adadelta""" +649 77 training_loop """lcwa""" +649 77 evaluator """rankbased""" +649 78 dataset """kinships""" +649 78 model """simple""" +649 78 loss """bceaftersigmoid""" +649 78 regularizer """no""" +649 78 optimizer """adadelta""" +649 78 training_loop """lcwa""" +649 78 evaluator """rankbased""" +649 79 dataset """kinships""" +649 79 model """simple""" +649 79 loss """bceaftersigmoid""" +649 79 regularizer """no""" +649 79 optimizer """adadelta""" +649 79 training_loop """lcwa""" +649 79 evaluator """rankbased""" +649 80 dataset """kinships""" +649 80 model """simple""" +649 80 loss """bceaftersigmoid""" +649 80 regularizer """no""" +649 80 optimizer """adadelta""" +649 80 training_loop """lcwa""" +649 80 evaluator """rankbased""" +649 81 dataset """kinships""" +649 81 model """simple""" +649 81 loss """bceaftersigmoid""" +649 81 regularizer """no""" +649 81 optimizer """adadelta""" +649 81 training_loop """lcwa""" +649 81 evaluator """rankbased""" +649 82 dataset """kinships""" +649 82 model """simple""" +649 82 loss """bceaftersigmoid""" +649 82 regularizer """no""" +649 82 optimizer """adadelta""" +649 82 training_loop """lcwa""" +649 82 evaluator """rankbased""" +649 83 dataset """kinships""" +649 83 model """simple""" +649 83 loss """bceaftersigmoid""" +649 83 regularizer """no""" +649 83 optimizer """adadelta""" +649 83 training_loop """lcwa""" +649 83 evaluator """rankbased""" +649 84 dataset """kinships""" +649 84 model """simple""" +649 84 loss """bceaftersigmoid""" +649 84 regularizer """no""" +649 84 optimizer """adadelta""" +649 84 training_loop """lcwa""" +649 84 evaluator """rankbased""" +649 85 dataset """kinships""" +649 85 model """simple""" +649 85 loss """bceaftersigmoid""" +649 85 regularizer """no""" +649 85 optimizer """adadelta""" +649 85 training_loop """lcwa""" +649 85 evaluator """rankbased""" +649 86 dataset """kinships""" +649 86 model """simple""" +649 86 loss """bceaftersigmoid""" +649 86 regularizer """no""" +649 86 optimizer """adadelta""" +649 86 training_loop """lcwa""" +649 86 evaluator """rankbased""" +649 87 dataset """kinships""" +649 87 model """simple""" +649 87 loss """bceaftersigmoid""" +649 87 regularizer """no""" +649 87 optimizer """adadelta""" +649 87 training_loop """lcwa""" +649 87 evaluator """rankbased""" +649 88 dataset """kinships""" +649 88 model """simple""" +649 88 loss """bceaftersigmoid""" +649 88 regularizer """no""" +649 88 optimizer """adadelta""" +649 88 training_loop """lcwa""" +649 88 evaluator """rankbased""" +649 89 dataset """kinships""" +649 89 model """simple""" +649 89 loss """bceaftersigmoid""" +649 89 regularizer """no""" +649 89 optimizer """adadelta""" +649 89 training_loop """lcwa""" +649 89 evaluator """rankbased""" +649 90 dataset """kinships""" +649 90 model """simple""" +649 90 loss """bceaftersigmoid""" +649 90 regularizer """no""" +649 90 optimizer """adadelta""" +649 90 training_loop """lcwa""" +649 90 evaluator """rankbased""" +649 91 dataset """kinships""" +649 91 model """simple""" +649 91 loss """bceaftersigmoid""" +649 91 regularizer """no""" +649 91 optimizer """adadelta""" +649 91 training_loop """lcwa""" +649 91 evaluator """rankbased""" +649 92 dataset """kinships""" +649 92 model """simple""" +649 92 loss """bceaftersigmoid""" +649 92 regularizer """no""" +649 92 optimizer """adadelta""" +649 92 training_loop """lcwa""" +649 92 evaluator """rankbased""" +649 93 dataset """kinships""" +649 93 model """simple""" +649 93 loss """bceaftersigmoid""" +649 93 regularizer """no""" +649 93 optimizer """adadelta""" +649 93 training_loop """lcwa""" +649 93 evaluator """rankbased""" +649 94 dataset """kinships""" +649 94 model """simple""" +649 94 loss """bceaftersigmoid""" +649 94 regularizer """no""" +649 94 optimizer """adadelta""" +649 94 training_loop """lcwa""" +649 94 evaluator """rankbased""" +649 95 dataset """kinships""" +649 95 model """simple""" +649 95 loss """bceaftersigmoid""" +649 95 regularizer """no""" +649 95 optimizer """adadelta""" +649 95 training_loop """lcwa""" +649 95 evaluator """rankbased""" +649 96 dataset """kinships""" +649 96 model """simple""" +649 96 loss """bceaftersigmoid""" +649 96 regularizer """no""" +649 96 optimizer """adadelta""" +649 96 training_loop """lcwa""" +649 96 evaluator """rankbased""" +649 97 dataset """kinships""" +649 97 model """simple""" +649 97 loss """bceaftersigmoid""" +649 97 regularizer """no""" +649 97 optimizer """adadelta""" +649 97 training_loop """lcwa""" +649 97 evaluator """rankbased""" +649 98 dataset """kinships""" +649 98 model """simple""" +649 98 loss """bceaftersigmoid""" +649 98 regularizer """no""" +649 98 optimizer """adadelta""" +649 98 training_loop """lcwa""" +649 98 evaluator """rankbased""" +649 99 dataset """kinships""" +649 99 model """simple""" +649 99 loss """bceaftersigmoid""" +649 99 regularizer """no""" +649 99 optimizer """adadelta""" +649 99 training_loop """lcwa""" +649 99 evaluator """rankbased""" +649 100 dataset """kinships""" +649 100 model """simple""" +649 100 loss """bceaftersigmoid""" +649 100 regularizer """no""" +649 100 optimizer """adadelta""" +649 100 training_loop """lcwa""" +649 100 evaluator """rankbased""" +650 1 model.embedding_dim 2.0 +650 1 training.batch_size 2.0 +650 1 training.label_smoothing 0.0042952147300576015 +650 2 model.embedding_dim 1.0 +650 2 training.batch_size 1.0 +650 2 training.label_smoothing 0.06878804209066179 +650 3 model.embedding_dim 2.0 +650 3 training.batch_size 1.0 +650 3 training.label_smoothing 0.25293086484106153 +650 4 model.embedding_dim 0.0 +650 4 training.batch_size 2.0 +650 4 training.label_smoothing 0.545655612176972 +650 5 model.embedding_dim 0.0 +650 5 training.batch_size 1.0 +650 5 training.label_smoothing 0.5322687822204564 +650 6 model.embedding_dim 2.0 +650 6 training.batch_size 2.0 +650 6 training.label_smoothing 0.1675490628918011 +650 7 model.embedding_dim 2.0 +650 7 training.batch_size 1.0 +650 7 training.label_smoothing 0.5929258311176372 +650 8 model.embedding_dim 0.0 +650 8 training.batch_size 1.0 +650 8 training.label_smoothing 0.003166929398396868 +650 9 model.embedding_dim 2.0 +650 9 training.batch_size 0.0 +650 9 training.label_smoothing 0.04395916342054691 +650 10 model.embedding_dim 2.0 +650 10 training.batch_size 1.0 +650 10 training.label_smoothing 0.8808752277213437 +650 11 model.embedding_dim 1.0 +650 11 training.batch_size 2.0 +650 11 training.label_smoothing 0.012525562096948379 +650 12 model.embedding_dim 0.0 +650 12 training.batch_size 2.0 +650 12 training.label_smoothing 0.00740770036882308 +650 13 model.embedding_dim 1.0 +650 13 training.batch_size 1.0 +650 13 training.label_smoothing 0.00295590273704276 +650 14 model.embedding_dim 2.0 +650 14 training.batch_size 1.0 +650 14 training.label_smoothing 0.0028164516968473825 +650 15 model.embedding_dim 0.0 +650 15 training.batch_size 1.0 +650 15 training.label_smoothing 0.0010762835950740039 +650 16 model.embedding_dim 0.0 +650 16 training.batch_size 0.0 +650 16 training.label_smoothing 0.01011309437819808 +650 17 model.embedding_dim 1.0 +650 17 training.batch_size 0.0 +650 17 training.label_smoothing 0.008160588905550534 +650 18 model.embedding_dim 0.0 +650 18 training.batch_size 0.0 +650 18 training.label_smoothing 0.022476899320639485 +650 19 model.embedding_dim 0.0 +650 19 training.batch_size 2.0 +650 19 training.label_smoothing 0.005568426323986563 +650 20 model.embedding_dim 1.0 +650 20 training.batch_size 1.0 +650 20 training.label_smoothing 0.0012011385520429294 +650 21 model.embedding_dim 1.0 +650 21 training.batch_size 1.0 +650 21 training.label_smoothing 0.006013876549723 +650 22 model.embedding_dim 2.0 +650 22 training.batch_size 0.0 +650 22 training.label_smoothing 0.0357585558060121 +650 23 model.embedding_dim 0.0 +650 23 training.batch_size 2.0 +650 23 training.label_smoothing 0.062445586035751566 +650 24 model.embedding_dim 1.0 +650 24 training.batch_size 0.0 +650 24 training.label_smoothing 0.08060535831514241 +650 25 model.embedding_dim 1.0 +650 25 training.batch_size 0.0 +650 25 training.label_smoothing 0.40098947844794114 +650 26 model.embedding_dim 0.0 +650 26 training.batch_size 0.0 +650 26 training.label_smoothing 0.06568823382339914 +650 27 model.embedding_dim 2.0 +650 27 training.batch_size 0.0 +650 27 training.label_smoothing 0.5727550948558393 +650 28 model.embedding_dim 1.0 +650 28 training.batch_size 2.0 +650 28 training.label_smoothing 0.008386913124341987 +650 29 model.embedding_dim 0.0 +650 29 training.batch_size 2.0 +650 29 training.label_smoothing 0.022537325479724417 +650 30 model.embedding_dim 1.0 +650 30 training.batch_size 0.0 +650 30 training.label_smoothing 0.010613962116232487 +650 31 model.embedding_dim 1.0 +650 31 training.batch_size 1.0 +650 31 training.label_smoothing 0.045143122084650426 +650 32 model.embedding_dim 2.0 +650 32 training.batch_size 2.0 +650 32 training.label_smoothing 0.0025111649703875687 +650 33 model.embedding_dim 0.0 +650 33 training.batch_size 2.0 +650 33 training.label_smoothing 0.0010872279007101647 +650 34 model.embedding_dim 2.0 +650 34 training.batch_size 2.0 +650 34 training.label_smoothing 0.2627016264837498 +650 35 model.embedding_dim 2.0 +650 35 training.batch_size 1.0 +650 35 training.label_smoothing 0.0016598926127283464 +650 36 model.embedding_dim 2.0 +650 36 training.batch_size 0.0 +650 36 training.label_smoothing 0.008322372666402098 +650 37 model.embedding_dim 1.0 +650 37 training.batch_size 2.0 +650 37 training.label_smoothing 0.004190820876517964 +650 38 model.embedding_dim 0.0 +650 38 training.batch_size 0.0 +650 38 training.label_smoothing 0.0038017094136728546 +650 39 model.embedding_dim 1.0 +650 39 training.batch_size 1.0 +650 39 training.label_smoothing 0.006933108913597882 +650 40 model.embedding_dim 0.0 +650 40 training.batch_size 1.0 +650 40 training.label_smoothing 0.052679711638634254 +650 41 model.embedding_dim 0.0 +650 41 training.batch_size 2.0 +650 41 training.label_smoothing 0.30129336492198544 +650 42 model.embedding_dim 0.0 +650 42 training.batch_size 2.0 +650 42 training.label_smoothing 0.005401322709496576 +650 43 model.embedding_dim 1.0 +650 43 training.batch_size 2.0 +650 43 training.label_smoothing 0.4656388747142822 +650 44 model.embedding_dim 2.0 +650 44 training.batch_size 2.0 +650 44 training.label_smoothing 0.0014004610576906981 +650 45 model.embedding_dim 0.0 +650 45 training.batch_size 1.0 +650 45 training.label_smoothing 0.010083680174798416 +650 46 model.embedding_dim 0.0 +650 46 training.batch_size 2.0 +650 46 training.label_smoothing 0.005988102296205664 +650 47 model.embedding_dim 1.0 +650 47 training.batch_size 2.0 +650 47 training.label_smoothing 0.1814113790402879 +650 48 model.embedding_dim 1.0 +650 48 training.batch_size 2.0 +650 48 training.label_smoothing 0.0054403019639458145 +650 49 model.embedding_dim 2.0 +650 49 training.batch_size 2.0 +650 49 training.label_smoothing 0.06314829139090002 +650 50 model.embedding_dim 0.0 +650 50 training.batch_size 2.0 +650 50 training.label_smoothing 0.0075219315297160305 +650 51 model.embedding_dim 0.0 +650 51 training.batch_size 1.0 +650 51 training.label_smoothing 0.005634171927128587 +650 52 model.embedding_dim 1.0 +650 52 training.batch_size 2.0 +650 52 training.label_smoothing 0.4112051252849358 +650 53 model.embedding_dim 0.0 +650 53 training.batch_size 2.0 +650 53 training.label_smoothing 0.6237850395891067 +650 54 model.embedding_dim 1.0 +650 54 training.batch_size 0.0 +650 54 training.label_smoothing 0.5484978485027823 +650 55 model.embedding_dim 0.0 +650 55 training.batch_size 2.0 +650 55 training.label_smoothing 0.0674092730726045 +650 56 model.embedding_dim 2.0 +650 56 training.batch_size 2.0 +650 56 training.label_smoothing 0.06195944317533266 +650 57 model.embedding_dim 0.0 +650 57 training.batch_size 0.0 +650 57 training.label_smoothing 0.022118462280031732 +650 58 model.embedding_dim 2.0 +650 58 training.batch_size 2.0 +650 58 training.label_smoothing 0.0010185614556759925 +650 59 model.embedding_dim 2.0 +650 59 training.batch_size 0.0 +650 59 training.label_smoothing 0.06424595653952817 +650 60 model.embedding_dim 1.0 +650 60 training.batch_size 2.0 +650 60 training.label_smoothing 0.013476276469221959 +650 61 model.embedding_dim 0.0 +650 61 training.batch_size 0.0 +650 61 training.label_smoothing 0.055637061822698314 +650 62 model.embedding_dim 2.0 +650 62 training.batch_size 2.0 +650 62 training.label_smoothing 0.0020154269138584523 +650 63 model.embedding_dim 1.0 +650 63 training.batch_size 0.0 +650 63 training.label_smoothing 0.22493249120895295 +650 64 model.embedding_dim 2.0 +650 64 training.batch_size 0.0 +650 64 training.label_smoothing 0.00233090984623093 +650 65 model.embedding_dim 2.0 +650 65 training.batch_size 0.0 +650 65 training.label_smoothing 0.026492439915477325 +650 66 model.embedding_dim 1.0 +650 66 training.batch_size 2.0 +650 66 training.label_smoothing 0.9731090993941521 +650 67 model.embedding_dim 0.0 +650 67 training.batch_size 0.0 +650 67 training.label_smoothing 0.01962291500664248 +650 68 model.embedding_dim 0.0 +650 68 training.batch_size 0.0 +650 68 training.label_smoothing 0.009767833163933742 +650 69 model.embedding_dim 0.0 +650 69 training.batch_size 1.0 +650 69 training.label_smoothing 0.013971934287594667 +650 70 model.embedding_dim 2.0 +650 70 training.batch_size 2.0 +650 70 training.label_smoothing 0.014636087969500227 +650 71 model.embedding_dim 2.0 +650 71 training.batch_size 0.0 +650 71 training.label_smoothing 0.0015403165072727506 +650 72 model.embedding_dim 0.0 +650 72 training.batch_size 1.0 +650 72 training.label_smoothing 0.09900384529861067 +650 73 model.embedding_dim 0.0 +650 73 training.batch_size 1.0 +650 73 training.label_smoothing 0.0037222474015769507 +650 74 model.embedding_dim 0.0 +650 74 training.batch_size 2.0 +650 74 training.label_smoothing 0.0013769439751607471 +650 75 model.embedding_dim 0.0 +650 75 training.batch_size 2.0 +650 75 training.label_smoothing 0.09728729918384 +650 76 model.embedding_dim 2.0 +650 76 training.batch_size 2.0 +650 76 training.label_smoothing 0.33935545266361783 +650 77 model.embedding_dim 1.0 +650 77 training.batch_size 1.0 +650 77 training.label_smoothing 0.0022836154736099673 +650 78 model.embedding_dim 0.0 +650 78 training.batch_size 2.0 +650 78 training.label_smoothing 0.01817171025492894 +650 79 model.embedding_dim 1.0 +650 79 training.batch_size 0.0 +650 79 training.label_smoothing 0.48702046900819174 +650 80 model.embedding_dim 2.0 +650 80 training.batch_size 0.0 +650 80 training.label_smoothing 0.2976494824398806 +650 81 model.embedding_dim 1.0 +650 81 training.batch_size 2.0 +650 81 training.label_smoothing 0.012857658860857398 +650 82 model.embedding_dim 2.0 +650 82 training.batch_size 1.0 +650 82 training.label_smoothing 0.0037826688082169243 +650 83 model.embedding_dim 0.0 +650 83 training.batch_size 1.0 +650 83 training.label_smoothing 0.0013027424068357402 +650 84 model.embedding_dim 2.0 +650 84 training.batch_size 0.0 +650 84 training.label_smoothing 0.687147065504858 +650 85 model.embedding_dim 0.0 +650 85 training.batch_size 1.0 +650 85 training.label_smoothing 0.017650927014384463 +650 86 model.embedding_dim 2.0 +650 86 training.batch_size 2.0 +650 86 training.label_smoothing 0.6782210080117365 +650 87 model.embedding_dim 1.0 +650 87 training.batch_size 0.0 +650 87 training.label_smoothing 0.16620374981397915 +650 88 model.embedding_dim 0.0 +650 88 training.batch_size 1.0 +650 88 training.label_smoothing 0.44772622397230527 +650 89 model.embedding_dim 0.0 +650 89 training.batch_size 1.0 +650 89 training.label_smoothing 0.0967436666513246 +650 90 model.embedding_dim 0.0 +650 90 training.batch_size 2.0 +650 90 training.label_smoothing 0.03433552104201484 +650 91 model.embedding_dim 0.0 +650 91 training.batch_size 1.0 +650 91 training.label_smoothing 0.9442476694741876 +650 92 model.embedding_dim 1.0 +650 92 training.batch_size 0.0 +650 92 training.label_smoothing 0.4632952468639462 +650 93 model.embedding_dim 0.0 +650 93 training.batch_size 0.0 +650 93 training.label_smoothing 0.7548235630419674 +650 94 model.embedding_dim 0.0 +650 94 training.batch_size 0.0 +650 94 training.label_smoothing 0.20536773479249051 +650 95 model.embedding_dim 2.0 +650 95 training.batch_size 2.0 +650 95 training.label_smoothing 0.5207881092673047 +650 96 model.embedding_dim 2.0 +650 96 training.batch_size 1.0 +650 96 training.label_smoothing 0.00220341067282568 +650 97 model.embedding_dim 2.0 +650 97 training.batch_size 2.0 +650 97 training.label_smoothing 0.0049384483718398235 +650 98 model.embedding_dim 2.0 +650 98 training.batch_size 0.0 +650 98 training.label_smoothing 0.004280792198010602 +650 99 model.embedding_dim 2.0 +650 99 training.batch_size 2.0 +650 99 training.label_smoothing 0.6603545663776661 +650 100 model.embedding_dim 0.0 +650 100 training.batch_size 1.0 +650 100 training.label_smoothing 0.12779497062571335 +650 1 dataset """kinships""" +650 1 model """simple""" +650 1 loss """softplus""" +650 1 regularizer """no""" +650 1 optimizer """adadelta""" +650 1 training_loop """lcwa""" +650 1 evaluator """rankbased""" +650 2 dataset """kinships""" +650 2 model """simple""" +650 2 loss """softplus""" +650 2 regularizer """no""" +650 2 optimizer """adadelta""" +650 2 training_loop """lcwa""" +650 2 evaluator """rankbased""" +650 3 dataset """kinships""" +650 3 model """simple""" +650 3 loss """softplus""" +650 3 regularizer """no""" +650 3 optimizer """adadelta""" +650 3 training_loop """lcwa""" +650 3 evaluator """rankbased""" +650 4 dataset """kinships""" +650 4 model """simple""" +650 4 loss """softplus""" +650 4 regularizer """no""" +650 4 optimizer """adadelta""" +650 4 training_loop """lcwa""" +650 4 evaluator """rankbased""" +650 5 dataset """kinships""" +650 5 model """simple""" +650 5 loss """softplus""" +650 5 regularizer """no""" +650 5 optimizer """adadelta""" +650 5 training_loop """lcwa""" +650 5 evaluator """rankbased""" +650 6 dataset """kinships""" +650 6 model """simple""" +650 6 loss """softplus""" +650 6 regularizer """no""" +650 6 optimizer """adadelta""" +650 6 training_loop """lcwa""" +650 6 evaluator """rankbased""" +650 7 dataset """kinships""" +650 7 model """simple""" +650 7 loss """softplus""" +650 7 regularizer """no""" +650 7 optimizer """adadelta""" +650 7 training_loop """lcwa""" +650 7 evaluator """rankbased""" +650 8 dataset """kinships""" +650 8 model """simple""" +650 8 loss """softplus""" +650 8 regularizer """no""" +650 8 optimizer """adadelta""" +650 8 training_loop """lcwa""" +650 8 evaluator """rankbased""" +650 9 dataset """kinships""" +650 9 model """simple""" +650 9 loss """softplus""" +650 9 regularizer """no""" +650 9 optimizer """adadelta""" +650 9 training_loop """lcwa""" +650 9 evaluator """rankbased""" +650 10 dataset """kinships""" +650 10 model """simple""" +650 10 loss """softplus""" +650 10 regularizer """no""" +650 10 optimizer """adadelta""" +650 10 training_loop """lcwa""" +650 10 evaluator """rankbased""" +650 11 dataset """kinships""" +650 11 model """simple""" +650 11 loss """softplus""" +650 11 regularizer """no""" +650 11 optimizer """adadelta""" +650 11 training_loop """lcwa""" +650 11 evaluator """rankbased""" +650 12 dataset """kinships""" +650 12 model """simple""" +650 12 loss """softplus""" +650 12 regularizer """no""" +650 12 optimizer """adadelta""" +650 12 training_loop """lcwa""" +650 12 evaluator """rankbased""" +650 13 dataset """kinships""" +650 13 model """simple""" +650 13 loss """softplus""" +650 13 regularizer """no""" +650 13 optimizer """adadelta""" +650 13 training_loop """lcwa""" +650 13 evaluator """rankbased""" +650 14 dataset """kinships""" +650 14 model """simple""" +650 14 loss """softplus""" +650 14 regularizer """no""" +650 14 optimizer """adadelta""" +650 14 training_loop """lcwa""" +650 14 evaluator """rankbased""" +650 15 dataset """kinships""" +650 15 model """simple""" +650 15 loss """softplus""" +650 15 regularizer """no""" +650 15 optimizer """adadelta""" +650 15 training_loop """lcwa""" +650 15 evaluator """rankbased""" +650 16 dataset """kinships""" +650 16 model """simple""" +650 16 loss """softplus""" +650 16 regularizer """no""" +650 16 optimizer """adadelta""" +650 16 training_loop """lcwa""" +650 16 evaluator """rankbased""" +650 17 dataset """kinships""" +650 17 model """simple""" +650 17 loss """softplus""" +650 17 regularizer """no""" +650 17 optimizer """adadelta""" +650 17 training_loop """lcwa""" +650 17 evaluator """rankbased""" +650 18 dataset """kinships""" +650 18 model """simple""" +650 18 loss """softplus""" +650 18 regularizer """no""" +650 18 optimizer """adadelta""" +650 18 training_loop """lcwa""" +650 18 evaluator """rankbased""" +650 19 dataset """kinships""" +650 19 model """simple""" +650 19 loss """softplus""" +650 19 regularizer """no""" +650 19 optimizer """adadelta""" +650 19 training_loop """lcwa""" +650 19 evaluator """rankbased""" +650 20 dataset """kinships""" +650 20 model """simple""" +650 20 loss """softplus""" +650 20 regularizer """no""" +650 20 optimizer """adadelta""" +650 20 training_loop """lcwa""" +650 20 evaluator """rankbased""" +650 21 dataset """kinships""" +650 21 model """simple""" +650 21 loss """softplus""" +650 21 regularizer """no""" +650 21 optimizer """adadelta""" +650 21 training_loop """lcwa""" +650 21 evaluator """rankbased""" +650 22 dataset """kinships""" +650 22 model """simple""" +650 22 loss """softplus""" +650 22 regularizer """no""" +650 22 optimizer """adadelta""" +650 22 training_loop """lcwa""" +650 22 evaluator """rankbased""" +650 23 dataset """kinships""" +650 23 model """simple""" +650 23 loss """softplus""" +650 23 regularizer """no""" +650 23 optimizer """adadelta""" +650 23 training_loop """lcwa""" +650 23 evaluator """rankbased""" +650 24 dataset """kinships""" +650 24 model """simple""" +650 24 loss """softplus""" +650 24 regularizer """no""" +650 24 optimizer """adadelta""" +650 24 training_loop """lcwa""" +650 24 evaluator """rankbased""" +650 25 dataset """kinships""" +650 25 model """simple""" +650 25 loss """softplus""" +650 25 regularizer """no""" +650 25 optimizer """adadelta""" +650 25 training_loop """lcwa""" +650 25 evaluator """rankbased""" +650 26 dataset """kinships""" +650 26 model """simple""" +650 26 loss """softplus""" +650 26 regularizer """no""" +650 26 optimizer """adadelta""" +650 26 training_loop """lcwa""" +650 26 evaluator """rankbased""" +650 27 dataset """kinships""" +650 27 model """simple""" +650 27 loss """softplus""" +650 27 regularizer """no""" +650 27 optimizer """adadelta""" +650 27 training_loop """lcwa""" +650 27 evaluator """rankbased""" +650 28 dataset """kinships""" +650 28 model """simple""" +650 28 loss """softplus""" +650 28 regularizer """no""" +650 28 optimizer """adadelta""" +650 28 training_loop """lcwa""" +650 28 evaluator """rankbased""" +650 29 dataset """kinships""" +650 29 model """simple""" +650 29 loss """softplus""" +650 29 regularizer """no""" +650 29 optimizer """adadelta""" +650 29 training_loop """lcwa""" +650 29 evaluator """rankbased""" +650 30 dataset """kinships""" +650 30 model """simple""" +650 30 loss """softplus""" +650 30 regularizer """no""" +650 30 optimizer """adadelta""" +650 30 training_loop """lcwa""" +650 30 evaluator """rankbased""" +650 31 dataset """kinships""" +650 31 model """simple""" +650 31 loss """softplus""" +650 31 regularizer """no""" +650 31 optimizer """adadelta""" +650 31 training_loop """lcwa""" +650 31 evaluator """rankbased""" +650 32 dataset """kinships""" +650 32 model """simple""" +650 32 loss """softplus""" +650 32 regularizer """no""" +650 32 optimizer """adadelta""" +650 32 training_loop """lcwa""" +650 32 evaluator """rankbased""" +650 33 dataset """kinships""" +650 33 model """simple""" +650 33 loss """softplus""" +650 33 regularizer """no""" +650 33 optimizer """adadelta""" +650 33 training_loop """lcwa""" +650 33 evaluator """rankbased""" +650 34 dataset """kinships""" +650 34 model """simple""" +650 34 loss """softplus""" +650 34 regularizer """no""" +650 34 optimizer """adadelta""" +650 34 training_loop """lcwa""" +650 34 evaluator """rankbased""" +650 35 dataset """kinships""" +650 35 model """simple""" +650 35 loss """softplus""" +650 35 regularizer """no""" +650 35 optimizer """adadelta""" +650 35 training_loop """lcwa""" +650 35 evaluator """rankbased""" +650 36 dataset """kinships""" +650 36 model """simple""" +650 36 loss """softplus""" +650 36 regularizer """no""" +650 36 optimizer """adadelta""" +650 36 training_loop """lcwa""" +650 36 evaluator """rankbased""" +650 37 dataset """kinships""" +650 37 model """simple""" +650 37 loss """softplus""" +650 37 regularizer """no""" +650 37 optimizer """adadelta""" +650 37 training_loop """lcwa""" +650 37 evaluator """rankbased""" +650 38 dataset """kinships""" +650 38 model """simple""" +650 38 loss """softplus""" +650 38 regularizer """no""" +650 38 optimizer """adadelta""" +650 38 training_loop """lcwa""" +650 38 evaluator """rankbased""" +650 39 dataset """kinships""" +650 39 model """simple""" +650 39 loss """softplus""" +650 39 regularizer """no""" +650 39 optimizer """adadelta""" +650 39 training_loop """lcwa""" +650 39 evaluator """rankbased""" +650 40 dataset """kinships""" +650 40 model """simple""" +650 40 loss """softplus""" +650 40 regularizer """no""" +650 40 optimizer """adadelta""" +650 40 training_loop """lcwa""" +650 40 evaluator """rankbased""" +650 41 dataset """kinships""" +650 41 model """simple""" +650 41 loss """softplus""" +650 41 regularizer """no""" +650 41 optimizer """adadelta""" +650 41 training_loop """lcwa""" +650 41 evaluator """rankbased""" +650 42 dataset """kinships""" +650 42 model """simple""" +650 42 loss """softplus""" +650 42 regularizer """no""" +650 42 optimizer """adadelta""" +650 42 training_loop """lcwa""" +650 42 evaluator """rankbased""" +650 43 dataset """kinships""" +650 43 model """simple""" +650 43 loss """softplus""" +650 43 regularizer """no""" +650 43 optimizer """adadelta""" +650 43 training_loop """lcwa""" +650 43 evaluator """rankbased""" +650 44 dataset """kinships""" +650 44 model """simple""" +650 44 loss """softplus""" +650 44 regularizer """no""" +650 44 optimizer """adadelta""" +650 44 training_loop """lcwa""" +650 44 evaluator """rankbased""" +650 45 dataset """kinships""" +650 45 model """simple""" +650 45 loss """softplus""" +650 45 regularizer """no""" +650 45 optimizer """adadelta""" +650 45 training_loop """lcwa""" +650 45 evaluator """rankbased""" +650 46 dataset """kinships""" +650 46 model """simple""" +650 46 loss """softplus""" +650 46 regularizer """no""" +650 46 optimizer """adadelta""" +650 46 training_loop """lcwa""" +650 46 evaluator """rankbased""" +650 47 dataset """kinships""" +650 47 model """simple""" +650 47 loss """softplus""" +650 47 regularizer """no""" +650 47 optimizer """adadelta""" +650 47 training_loop """lcwa""" +650 47 evaluator """rankbased""" +650 48 dataset """kinships""" +650 48 model """simple""" +650 48 loss """softplus""" +650 48 regularizer """no""" +650 48 optimizer """adadelta""" +650 48 training_loop """lcwa""" +650 48 evaluator """rankbased""" +650 49 dataset """kinships""" +650 49 model """simple""" +650 49 loss """softplus""" +650 49 regularizer """no""" +650 49 optimizer """adadelta""" +650 49 training_loop """lcwa""" +650 49 evaluator """rankbased""" +650 50 dataset """kinships""" +650 50 model """simple""" +650 50 loss """softplus""" +650 50 regularizer """no""" +650 50 optimizer """adadelta""" +650 50 training_loop """lcwa""" +650 50 evaluator """rankbased""" +650 51 dataset """kinships""" +650 51 model """simple""" +650 51 loss """softplus""" +650 51 regularizer """no""" +650 51 optimizer """adadelta""" +650 51 training_loop """lcwa""" +650 51 evaluator """rankbased""" +650 52 dataset """kinships""" +650 52 model """simple""" +650 52 loss """softplus""" +650 52 regularizer """no""" +650 52 optimizer """adadelta""" +650 52 training_loop """lcwa""" +650 52 evaluator """rankbased""" +650 53 dataset """kinships""" +650 53 model """simple""" +650 53 loss """softplus""" +650 53 regularizer """no""" +650 53 optimizer """adadelta""" +650 53 training_loop """lcwa""" +650 53 evaluator """rankbased""" +650 54 dataset """kinships""" +650 54 model """simple""" +650 54 loss """softplus""" +650 54 regularizer """no""" +650 54 optimizer """adadelta""" +650 54 training_loop """lcwa""" +650 54 evaluator """rankbased""" +650 55 dataset """kinships""" +650 55 model """simple""" +650 55 loss """softplus""" +650 55 regularizer """no""" +650 55 optimizer """adadelta""" +650 55 training_loop """lcwa""" +650 55 evaluator """rankbased""" +650 56 dataset """kinships""" +650 56 model """simple""" +650 56 loss """softplus""" +650 56 regularizer """no""" +650 56 optimizer """adadelta""" +650 56 training_loop """lcwa""" +650 56 evaluator """rankbased""" +650 57 dataset """kinships""" +650 57 model """simple""" +650 57 loss """softplus""" +650 57 regularizer """no""" +650 57 optimizer """adadelta""" +650 57 training_loop """lcwa""" +650 57 evaluator """rankbased""" +650 58 dataset """kinships""" +650 58 model """simple""" +650 58 loss """softplus""" +650 58 regularizer """no""" +650 58 optimizer """adadelta""" +650 58 training_loop """lcwa""" +650 58 evaluator """rankbased""" +650 59 dataset """kinships""" +650 59 model """simple""" +650 59 loss """softplus""" +650 59 regularizer """no""" +650 59 optimizer """adadelta""" +650 59 training_loop """lcwa""" +650 59 evaluator """rankbased""" +650 60 dataset """kinships""" +650 60 model """simple""" +650 60 loss """softplus""" +650 60 regularizer """no""" +650 60 optimizer """adadelta""" +650 60 training_loop """lcwa""" +650 60 evaluator """rankbased""" +650 61 dataset """kinships""" +650 61 model """simple""" +650 61 loss """softplus""" +650 61 regularizer """no""" +650 61 optimizer """adadelta""" +650 61 training_loop """lcwa""" +650 61 evaluator """rankbased""" +650 62 dataset """kinships""" +650 62 model """simple""" +650 62 loss """softplus""" +650 62 regularizer """no""" +650 62 optimizer """adadelta""" +650 62 training_loop """lcwa""" +650 62 evaluator """rankbased""" +650 63 dataset """kinships""" +650 63 model """simple""" +650 63 loss """softplus""" +650 63 regularizer """no""" +650 63 optimizer """adadelta""" +650 63 training_loop """lcwa""" +650 63 evaluator """rankbased""" +650 64 dataset """kinships""" +650 64 model """simple""" +650 64 loss """softplus""" +650 64 regularizer """no""" +650 64 optimizer """adadelta""" +650 64 training_loop """lcwa""" +650 64 evaluator """rankbased""" +650 65 dataset """kinships""" +650 65 model """simple""" +650 65 loss """softplus""" +650 65 regularizer """no""" +650 65 optimizer """adadelta""" +650 65 training_loop """lcwa""" +650 65 evaluator """rankbased""" +650 66 dataset """kinships""" +650 66 model """simple""" +650 66 loss """softplus""" +650 66 regularizer """no""" +650 66 optimizer """adadelta""" +650 66 training_loop """lcwa""" +650 66 evaluator """rankbased""" +650 67 dataset """kinships""" +650 67 model """simple""" +650 67 loss """softplus""" +650 67 regularizer """no""" +650 67 optimizer """adadelta""" +650 67 training_loop """lcwa""" +650 67 evaluator """rankbased""" +650 68 dataset """kinships""" +650 68 model """simple""" +650 68 loss """softplus""" +650 68 regularizer """no""" +650 68 optimizer """adadelta""" +650 68 training_loop """lcwa""" +650 68 evaluator """rankbased""" +650 69 dataset """kinships""" +650 69 model """simple""" +650 69 loss """softplus""" +650 69 regularizer """no""" +650 69 optimizer """adadelta""" +650 69 training_loop """lcwa""" +650 69 evaluator """rankbased""" +650 70 dataset """kinships""" +650 70 model """simple""" +650 70 loss """softplus""" +650 70 regularizer """no""" +650 70 optimizer """adadelta""" +650 70 training_loop """lcwa""" +650 70 evaluator """rankbased""" +650 71 dataset """kinships""" +650 71 model """simple""" +650 71 loss """softplus""" +650 71 regularizer """no""" +650 71 optimizer """adadelta""" +650 71 training_loop """lcwa""" +650 71 evaluator """rankbased""" +650 72 dataset """kinships""" +650 72 model """simple""" +650 72 loss """softplus""" +650 72 regularizer """no""" +650 72 optimizer """adadelta""" +650 72 training_loop """lcwa""" +650 72 evaluator """rankbased""" +650 73 dataset """kinships""" +650 73 model """simple""" +650 73 loss """softplus""" +650 73 regularizer """no""" +650 73 optimizer """adadelta""" +650 73 training_loop """lcwa""" +650 73 evaluator """rankbased""" +650 74 dataset """kinships""" +650 74 model """simple""" +650 74 loss """softplus""" +650 74 regularizer """no""" +650 74 optimizer """adadelta""" +650 74 training_loop """lcwa""" +650 74 evaluator """rankbased""" +650 75 dataset """kinships""" +650 75 model """simple""" +650 75 loss """softplus""" +650 75 regularizer """no""" +650 75 optimizer """adadelta""" +650 75 training_loop """lcwa""" +650 75 evaluator """rankbased""" +650 76 dataset """kinships""" +650 76 model """simple""" +650 76 loss """softplus""" +650 76 regularizer """no""" +650 76 optimizer """adadelta""" +650 76 training_loop """lcwa""" +650 76 evaluator """rankbased""" +650 77 dataset """kinships""" +650 77 model """simple""" +650 77 loss """softplus""" +650 77 regularizer """no""" +650 77 optimizer """adadelta""" +650 77 training_loop """lcwa""" +650 77 evaluator """rankbased""" +650 78 dataset """kinships""" +650 78 model """simple""" +650 78 loss """softplus""" +650 78 regularizer """no""" +650 78 optimizer """adadelta""" +650 78 training_loop """lcwa""" +650 78 evaluator """rankbased""" +650 79 dataset """kinships""" +650 79 model """simple""" +650 79 loss """softplus""" +650 79 regularizer """no""" +650 79 optimizer """adadelta""" +650 79 training_loop """lcwa""" +650 79 evaluator """rankbased""" +650 80 dataset """kinships""" +650 80 model """simple""" +650 80 loss """softplus""" +650 80 regularizer """no""" +650 80 optimizer """adadelta""" +650 80 training_loop """lcwa""" +650 80 evaluator """rankbased""" +650 81 dataset """kinships""" +650 81 model """simple""" +650 81 loss """softplus""" +650 81 regularizer """no""" +650 81 optimizer """adadelta""" +650 81 training_loop """lcwa""" +650 81 evaluator """rankbased""" +650 82 dataset """kinships""" +650 82 model """simple""" +650 82 loss """softplus""" +650 82 regularizer """no""" +650 82 optimizer """adadelta""" +650 82 training_loop """lcwa""" +650 82 evaluator """rankbased""" +650 83 dataset """kinships""" +650 83 model """simple""" +650 83 loss """softplus""" +650 83 regularizer """no""" +650 83 optimizer """adadelta""" +650 83 training_loop """lcwa""" +650 83 evaluator """rankbased""" +650 84 dataset """kinships""" +650 84 model """simple""" +650 84 loss """softplus""" +650 84 regularizer """no""" +650 84 optimizer """adadelta""" +650 84 training_loop """lcwa""" +650 84 evaluator """rankbased""" +650 85 dataset """kinships""" +650 85 model """simple""" +650 85 loss """softplus""" +650 85 regularizer """no""" +650 85 optimizer """adadelta""" +650 85 training_loop """lcwa""" +650 85 evaluator """rankbased""" +650 86 dataset """kinships""" +650 86 model """simple""" +650 86 loss """softplus""" +650 86 regularizer """no""" +650 86 optimizer """adadelta""" +650 86 training_loop """lcwa""" +650 86 evaluator """rankbased""" +650 87 dataset """kinships""" +650 87 model """simple""" +650 87 loss """softplus""" +650 87 regularizer """no""" +650 87 optimizer """adadelta""" +650 87 training_loop """lcwa""" +650 87 evaluator """rankbased""" +650 88 dataset """kinships""" +650 88 model """simple""" +650 88 loss """softplus""" +650 88 regularizer """no""" +650 88 optimizer """adadelta""" +650 88 training_loop """lcwa""" +650 88 evaluator """rankbased""" +650 89 dataset """kinships""" +650 89 model """simple""" +650 89 loss """softplus""" +650 89 regularizer """no""" +650 89 optimizer """adadelta""" +650 89 training_loop """lcwa""" +650 89 evaluator """rankbased""" +650 90 dataset """kinships""" +650 90 model """simple""" +650 90 loss """softplus""" +650 90 regularizer """no""" +650 90 optimizer """adadelta""" +650 90 training_loop """lcwa""" +650 90 evaluator """rankbased""" +650 91 dataset """kinships""" +650 91 model """simple""" +650 91 loss """softplus""" +650 91 regularizer """no""" +650 91 optimizer """adadelta""" +650 91 training_loop """lcwa""" +650 91 evaluator """rankbased""" +650 92 dataset """kinships""" +650 92 model """simple""" +650 92 loss """softplus""" +650 92 regularizer """no""" +650 92 optimizer """adadelta""" +650 92 training_loop """lcwa""" +650 92 evaluator """rankbased""" +650 93 dataset """kinships""" +650 93 model """simple""" +650 93 loss """softplus""" +650 93 regularizer """no""" +650 93 optimizer """adadelta""" +650 93 training_loop """lcwa""" +650 93 evaluator """rankbased""" +650 94 dataset """kinships""" +650 94 model """simple""" +650 94 loss """softplus""" +650 94 regularizer """no""" +650 94 optimizer """adadelta""" +650 94 training_loop """lcwa""" +650 94 evaluator """rankbased""" +650 95 dataset """kinships""" +650 95 model """simple""" +650 95 loss """softplus""" +650 95 regularizer """no""" +650 95 optimizer """adadelta""" +650 95 training_loop """lcwa""" +650 95 evaluator """rankbased""" +650 96 dataset """kinships""" +650 96 model """simple""" +650 96 loss """softplus""" +650 96 regularizer """no""" +650 96 optimizer """adadelta""" +650 96 training_loop """lcwa""" +650 96 evaluator """rankbased""" +650 97 dataset """kinships""" +650 97 model """simple""" +650 97 loss """softplus""" +650 97 regularizer """no""" +650 97 optimizer """adadelta""" +650 97 training_loop """lcwa""" +650 97 evaluator """rankbased""" +650 98 dataset """kinships""" +650 98 model """simple""" +650 98 loss """softplus""" +650 98 regularizer """no""" +650 98 optimizer """adadelta""" +650 98 training_loop """lcwa""" +650 98 evaluator """rankbased""" +650 99 dataset """kinships""" +650 99 model """simple""" +650 99 loss """softplus""" +650 99 regularizer """no""" +650 99 optimizer """adadelta""" +650 99 training_loop """lcwa""" +650 99 evaluator """rankbased""" +650 100 dataset """kinships""" +650 100 model """simple""" +650 100 loss """softplus""" +650 100 regularizer """no""" +650 100 optimizer """adadelta""" +650 100 training_loop """lcwa""" +650 100 evaluator """rankbased""" +651 1 model.embedding_dim 1.0 +651 1 training.batch_size 1.0 +651 1 training.label_smoothing 0.0026442240112124894 +651 2 model.embedding_dim 1.0 +651 2 training.batch_size 0.0 +651 2 training.label_smoothing 0.07236314723336919 +651 3 model.embedding_dim 0.0 +651 3 training.batch_size 2.0 +651 3 training.label_smoothing 0.07020436426532692 +651 4 model.embedding_dim 0.0 +651 4 training.batch_size 1.0 +651 4 training.label_smoothing 0.0027330790063189417 +651 5 model.embedding_dim 1.0 +651 5 training.batch_size 1.0 +651 5 training.label_smoothing 0.4245975877452464 +651 6 model.embedding_dim 2.0 +651 6 training.batch_size 1.0 +651 6 training.label_smoothing 0.6201842725813336 +651 7 model.embedding_dim 1.0 +651 7 training.batch_size 2.0 +651 7 training.label_smoothing 0.0014393039294126532 +651 8 model.embedding_dim 0.0 +651 8 training.batch_size 0.0 +651 8 training.label_smoothing 0.00249278314935481 +651 9 model.embedding_dim 1.0 +651 9 training.batch_size 0.0 +651 9 training.label_smoothing 0.027319812451609613 +651 10 model.embedding_dim 1.0 +651 10 training.batch_size 0.0 +651 10 training.label_smoothing 0.4319315754397193 +651 11 model.embedding_dim 1.0 +651 11 training.batch_size 2.0 +651 11 training.label_smoothing 0.019511743058705903 +651 12 model.embedding_dim 2.0 +651 12 training.batch_size 0.0 +651 12 training.label_smoothing 0.0023193006964509926 +651 13 model.embedding_dim 2.0 +651 13 training.batch_size 0.0 +651 13 training.label_smoothing 0.5518926538490361 +651 14 model.embedding_dim 2.0 +651 14 training.batch_size 1.0 +651 14 training.label_smoothing 0.0025843468974161253 +651 15 model.embedding_dim 1.0 +651 15 training.batch_size 1.0 +651 15 training.label_smoothing 0.13648856248967373 +651 16 model.embedding_dim 2.0 +651 16 training.batch_size 1.0 +651 16 training.label_smoothing 0.19360014419843172 +651 17 model.embedding_dim 1.0 +651 17 training.batch_size 1.0 +651 17 training.label_smoothing 0.003648658783592415 +651 18 model.embedding_dim 1.0 +651 18 training.batch_size 2.0 +651 18 training.label_smoothing 0.001205916744759039 +651 19 model.embedding_dim 2.0 +651 19 training.batch_size 1.0 +651 19 training.label_smoothing 0.3899994509855788 +651 20 model.embedding_dim 0.0 +651 20 training.batch_size 1.0 +651 20 training.label_smoothing 0.0844349228500072 +651 21 model.embedding_dim 0.0 +651 21 training.batch_size 1.0 +651 21 training.label_smoothing 0.1865059045960572 +651 22 model.embedding_dim 2.0 +651 22 training.batch_size 0.0 +651 22 training.label_smoothing 0.02819616288958739 +651 23 model.embedding_dim 0.0 +651 23 training.batch_size 0.0 +651 23 training.label_smoothing 0.0851483482282015 +651 24 model.embedding_dim 1.0 +651 24 training.batch_size 2.0 +651 24 training.label_smoothing 0.028356100530368092 +651 25 model.embedding_dim 0.0 +651 25 training.batch_size 2.0 +651 25 training.label_smoothing 0.0419053418259338 +651 26 model.embedding_dim 2.0 +651 26 training.batch_size 2.0 +651 26 training.label_smoothing 0.01625081117154906 +651 27 model.embedding_dim 0.0 +651 27 training.batch_size 0.0 +651 27 training.label_smoothing 0.0014573879952230675 +651 28 model.embedding_dim 0.0 +651 28 training.batch_size 2.0 +651 28 training.label_smoothing 0.303097610015239 +651 29 model.embedding_dim 2.0 +651 29 training.batch_size 0.0 +651 29 training.label_smoothing 0.002064595115470165 +651 30 model.embedding_dim 1.0 +651 30 training.batch_size 2.0 +651 30 training.label_smoothing 0.03961880883668322 +651 31 model.embedding_dim 1.0 +651 31 training.batch_size 1.0 +651 31 training.label_smoothing 0.10455817106405267 +651 32 model.embedding_dim 1.0 +651 32 training.batch_size 0.0 +651 32 training.label_smoothing 0.006015407894464612 +651 33 model.embedding_dim 1.0 +651 33 training.batch_size 1.0 +651 33 training.label_smoothing 0.009159721419842875 +651 34 model.embedding_dim 0.0 +651 34 training.batch_size 1.0 +651 34 training.label_smoothing 0.02111141084473717 +651 35 model.embedding_dim 0.0 +651 35 training.batch_size 1.0 +651 35 training.label_smoothing 0.03264609247164795 +651 36 model.embedding_dim 0.0 +651 36 training.batch_size 0.0 +651 36 training.label_smoothing 0.007122159191319286 +651 37 model.embedding_dim 0.0 +651 37 training.batch_size 2.0 +651 37 training.label_smoothing 0.20256943206624956 +651 38 model.embedding_dim 2.0 +651 38 training.batch_size 0.0 +651 38 training.label_smoothing 0.017860878035452655 +651 39 model.embedding_dim 0.0 +651 39 training.batch_size 2.0 +651 39 training.label_smoothing 0.05082966619793868 +651 40 model.embedding_dim 2.0 +651 40 training.batch_size 0.0 +651 40 training.label_smoothing 0.0016163381462906275 +651 41 model.embedding_dim 0.0 +651 41 training.batch_size 0.0 +651 41 training.label_smoothing 0.49459507915458856 +651 42 model.embedding_dim 2.0 +651 42 training.batch_size 2.0 +651 42 training.label_smoothing 0.13161768119520761 +651 43 model.embedding_dim 1.0 +651 43 training.batch_size 2.0 +651 43 training.label_smoothing 0.010135052524295713 +651 44 model.embedding_dim 2.0 +651 44 training.batch_size 0.0 +651 44 training.label_smoothing 0.011265842863295362 +651 45 model.embedding_dim 1.0 +651 45 training.batch_size 2.0 +651 45 training.label_smoothing 0.08980380780223624 +651 46 model.embedding_dim 0.0 +651 46 training.batch_size 0.0 +651 46 training.label_smoothing 0.016377734196313085 +651 47 model.embedding_dim 2.0 +651 47 training.batch_size 0.0 +651 47 training.label_smoothing 0.008583219023362964 +651 48 model.embedding_dim 0.0 +651 48 training.batch_size 1.0 +651 48 training.label_smoothing 0.030905663395950918 +651 49 model.embedding_dim 0.0 +651 49 training.batch_size 2.0 +651 49 training.label_smoothing 0.001157081526872343 +651 50 model.embedding_dim 1.0 +651 50 training.batch_size 1.0 +651 50 training.label_smoothing 0.001221043048983374 +651 51 model.embedding_dim 0.0 +651 51 training.batch_size 0.0 +651 51 training.label_smoothing 0.0015956501326529191 +651 52 model.embedding_dim 1.0 +651 52 training.batch_size 0.0 +651 52 training.label_smoothing 0.27362890019211117 +651 53 model.embedding_dim 0.0 +651 53 training.batch_size 1.0 +651 53 training.label_smoothing 0.011657716899027757 +651 54 model.embedding_dim 1.0 +651 54 training.batch_size 2.0 +651 54 training.label_smoothing 0.007547904099328554 +651 55 model.embedding_dim 1.0 +651 55 training.batch_size 1.0 +651 55 training.label_smoothing 0.05127142142149075 +651 56 model.embedding_dim 1.0 +651 56 training.batch_size 0.0 +651 56 training.label_smoothing 0.035240053592160314 +651 57 model.embedding_dim 0.0 +651 57 training.batch_size 1.0 +651 57 training.label_smoothing 0.3091487858637211 +651 58 model.embedding_dim 0.0 +651 58 training.batch_size 1.0 +651 58 training.label_smoothing 0.161052312513395 +651 59 model.embedding_dim 0.0 +651 59 training.batch_size 0.0 +651 59 training.label_smoothing 0.009664942039075855 +651 60 model.embedding_dim 0.0 +651 60 training.batch_size 0.0 +651 60 training.label_smoothing 0.03251028624324971 +651 61 model.embedding_dim 2.0 +651 61 training.batch_size 0.0 +651 61 training.label_smoothing 0.05904903487512742 +651 62 model.embedding_dim 0.0 +651 62 training.batch_size 0.0 +651 62 training.label_smoothing 0.00530279949007805 +651 63 model.embedding_dim 2.0 +651 63 training.batch_size 0.0 +651 63 training.label_smoothing 0.009201192993564363 +651 64 model.embedding_dim 1.0 +651 64 training.batch_size 1.0 +651 64 training.label_smoothing 0.001459053680908947 +651 65 model.embedding_dim 1.0 +651 65 training.batch_size 0.0 +651 65 training.label_smoothing 0.5705439935700903 +651 66 model.embedding_dim 1.0 +651 66 training.batch_size 2.0 +651 66 training.label_smoothing 0.0037052246791189655 +651 67 model.embedding_dim 0.0 +651 67 training.batch_size 1.0 +651 67 training.label_smoothing 0.011226196476803584 +651 68 model.embedding_dim 0.0 +651 68 training.batch_size 0.0 +651 68 training.label_smoothing 0.5291644847791056 +651 69 model.embedding_dim 2.0 +651 69 training.batch_size 2.0 +651 69 training.label_smoothing 0.32394910307135066 +651 70 model.embedding_dim 0.0 +651 70 training.batch_size 2.0 +651 70 training.label_smoothing 0.015326420186653538 +651 71 model.embedding_dim 0.0 +651 71 training.batch_size 2.0 +651 71 training.label_smoothing 0.13109184789651496 +651 72 model.embedding_dim 0.0 +651 72 training.batch_size 0.0 +651 72 training.label_smoothing 0.023538290199538816 +651 73 model.embedding_dim 0.0 +651 73 training.batch_size 2.0 +651 73 training.label_smoothing 0.005245121616832479 +651 74 model.embedding_dim 2.0 +651 74 training.batch_size 0.0 +651 74 training.label_smoothing 0.8678345580109066 +651 75 model.embedding_dim 2.0 +651 75 training.batch_size 1.0 +651 75 training.label_smoothing 0.23436577000163286 +651 76 model.embedding_dim 2.0 +651 76 training.batch_size 0.0 +651 76 training.label_smoothing 0.03540373652364454 +651 77 model.embedding_dim 1.0 +651 77 training.batch_size 1.0 +651 77 training.label_smoothing 0.022877713720984433 +651 78 model.embedding_dim 2.0 +651 78 training.batch_size 1.0 +651 78 training.label_smoothing 0.008345090371460953 +651 79 model.embedding_dim 1.0 +651 79 training.batch_size 1.0 +651 79 training.label_smoothing 0.014174536444191825 +651 80 model.embedding_dim 2.0 +651 80 training.batch_size 1.0 +651 80 training.label_smoothing 0.0035982767315726233 +651 81 model.embedding_dim 1.0 +651 81 training.batch_size 2.0 +651 81 training.label_smoothing 0.0011074407960547734 +651 82 model.embedding_dim 0.0 +651 82 training.batch_size 0.0 +651 82 training.label_smoothing 0.28368076221556293 +651 83 model.embedding_dim 2.0 +651 83 training.batch_size 0.0 +651 83 training.label_smoothing 0.00807398287987158 +651 84 model.embedding_dim 0.0 +651 84 training.batch_size 1.0 +651 84 training.label_smoothing 0.0792720599460647 +651 85 model.embedding_dim 0.0 +651 85 training.batch_size 0.0 +651 85 training.label_smoothing 0.08145352438731052 +651 86 model.embedding_dim 0.0 +651 86 training.batch_size 0.0 +651 86 training.label_smoothing 0.0036996367855845693 +651 87 model.embedding_dim 0.0 +651 87 training.batch_size 0.0 +651 87 training.label_smoothing 0.04060437318912978 +651 88 model.embedding_dim 2.0 +651 88 training.batch_size 0.0 +651 88 training.label_smoothing 0.4736557928049779 +651 89 model.embedding_dim 2.0 +651 89 training.batch_size 2.0 +651 89 training.label_smoothing 0.03300283238973669 +651 90 model.embedding_dim 0.0 +651 90 training.batch_size 1.0 +651 90 training.label_smoothing 0.2270336145380663 +651 91 model.embedding_dim 0.0 +651 91 training.batch_size 0.0 +651 91 training.label_smoothing 0.001153317621339315 +651 92 model.embedding_dim 1.0 +651 92 training.batch_size 2.0 +651 92 training.label_smoothing 0.00901672895873571 +651 93 model.embedding_dim 0.0 +651 93 training.batch_size 1.0 +651 93 training.label_smoothing 0.9641401235405775 +651 94 model.embedding_dim 0.0 +651 94 training.batch_size 2.0 +651 94 training.label_smoothing 0.009320121310530503 +651 95 model.embedding_dim 2.0 +651 95 training.batch_size 0.0 +651 95 training.label_smoothing 0.0026094192028830935 +651 96 model.embedding_dim 1.0 +651 96 training.batch_size 2.0 +651 96 training.label_smoothing 0.16199656492711387 +651 97 model.embedding_dim 1.0 +651 97 training.batch_size 1.0 +651 97 training.label_smoothing 0.14372472172141998 +651 98 model.embedding_dim 1.0 +651 98 training.batch_size 1.0 +651 98 training.label_smoothing 0.13909380186966433 +651 99 model.embedding_dim 2.0 +651 99 training.batch_size 2.0 +651 99 training.label_smoothing 0.0015154702435972286 +651 100 model.embedding_dim 2.0 +651 100 training.batch_size 0.0 +651 100 training.label_smoothing 0.026747131021689814 +651 1 dataset """kinships""" +651 1 model """simple""" +651 1 loss """crossentropy""" +651 1 regularizer """no""" +651 1 optimizer """adadelta""" +651 1 training_loop """lcwa""" +651 1 evaluator """rankbased""" +651 2 dataset """kinships""" +651 2 model """simple""" +651 2 loss """crossentropy""" +651 2 regularizer """no""" +651 2 optimizer """adadelta""" +651 2 training_loop """lcwa""" +651 2 evaluator """rankbased""" +651 3 dataset """kinships""" +651 3 model """simple""" +651 3 loss """crossentropy""" +651 3 regularizer """no""" +651 3 optimizer """adadelta""" +651 3 training_loop """lcwa""" +651 3 evaluator """rankbased""" +651 4 dataset """kinships""" +651 4 model """simple""" +651 4 loss """crossentropy""" +651 4 regularizer """no""" +651 4 optimizer """adadelta""" +651 4 training_loop """lcwa""" +651 4 evaluator """rankbased""" +651 5 dataset """kinships""" +651 5 model """simple""" +651 5 loss """crossentropy""" +651 5 regularizer """no""" +651 5 optimizer """adadelta""" +651 5 training_loop """lcwa""" +651 5 evaluator """rankbased""" +651 6 dataset """kinships""" +651 6 model """simple""" +651 6 loss """crossentropy""" +651 6 regularizer """no""" +651 6 optimizer """adadelta""" +651 6 training_loop """lcwa""" +651 6 evaluator """rankbased""" +651 7 dataset """kinships""" +651 7 model """simple""" +651 7 loss """crossentropy""" +651 7 regularizer """no""" +651 7 optimizer """adadelta""" +651 7 training_loop """lcwa""" +651 7 evaluator """rankbased""" +651 8 dataset """kinships""" +651 8 model """simple""" +651 8 loss """crossentropy""" +651 8 regularizer """no""" +651 8 optimizer """adadelta""" +651 8 training_loop """lcwa""" +651 8 evaluator """rankbased""" +651 9 dataset """kinships""" +651 9 model """simple""" +651 9 loss """crossentropy""" +651 9 regularizer """no""" +651 9 optimizer """adadelta""" +651 9 training_loop """lcwa""" +651 9 evaluator """rankbased""" +651 10 dataset """kinships""" +651 10 model """simple""" +651 10 loss """crossentropy""" +651 10 regularizer """no""" +651 10 optimizer """adadelta""" +651 10 training_loop """lcwa""" +651 10 evaluator """rankbased""" +651 11 dataset """kinships""" +651 11 model """simple""" +651 11 loss """crossentropy""" +651 11 regularizer """no""" +651 11 optimizer """adadelta""" +651 11 training_loop """lcwa""" +651 11 evaluator """rankbased""" +651 12 dataset """kinships""" +651 12 model """simple""" +651 12 loss """crossentropy""" +651 12 regularizer """no""" +651 12 optimizer """adadelta""" +651 12 training_loop """lcwa""" +651 12 evaluator """rankbased""" +651 13 dataset """kinships""" +651 13 model """simple""" +651 13 loss """crossentropy""" +651 13 regularizer """no""" +651 13 optimizer """adadelta""" +651 13 training_loop """lcwa""" +651 13 evaluator """rankbased""" +651 14 dataset """kinships""" +651 14 model """simple""" +651 14 loss """crossentropy""" +651 14 regularizer """no""" +651 14 optimizer """adadelta""" +651 14 training_loop """lcwa""" +651 14 evaluator """rankbased""" +651 15 dataset """kinships""" +651 15 model """simple""" +651 15 loss """crossentropy""" +651 15 regularizer """no""" +651 15 optimizer """adadelta""" +651 15 training_loop """lcwa""" +651 15 evaluator """rankbased""" +651 16 dataset """kinships""" +651 16 model """simple""" +651 16 loss """crossentropy""" +651 16 regularizer """no""" +651 16 optimizer """adadelta""" +651 16 training_loop """lcwa""" +651 16 evaluator """rankbased""" +651 17 dataset """kinships""" +651 17 model """simple""" +651 17 loss """crossentropy""" +651 17 regularizer """no""" +651 17 optimizer """adadelta""" +651 17 training_loop """lcwa""" +651 17 evaluator """rankbased""" +651 18 dataset """kinships""" +651 18 model """simple""" +651 18 loss """crossentropy""" +651 18 regularizer """no""" +651 18 optimizer """adadelta""" +651 18 training_loop """lcwa""" +651 18 evaluator """rankbased""" +651 19 dataset """kinships""" +651 19 model """simple""" +651 19 loss """crossentropy""" +651 19 regularizer """no""" +651 19 optimizer """adadelta""" +651 19 training_loop """lcwa""" +651 19 evaluator """rankbased""" +651 20 dataset """kinships""" +651 20 model """simple""" +651 20 loss """crossentropy""" +651 20 regularizer """no""" +651 20 optimizer """adadelta""" +651 20 training_loop """lcwa""" +651 20 evaluator """rankbased""" +651 21 dataset """kinships""" +651 21 model """simple""" +651 21 loss """crossentropy""" +651 21 regularizer """no""" +651 21 optimizer """adadelta""" +651 21 training_loop """lcwa""" +651 21 evaluator """rankbased""" +651 22 dataset """kinships""" +651 22 model """simple""" +651 22 loss """crossentropy""" +651 22 regularizer """no""" +651 22 optimizer """adadelta""" +651 22 training_loop """lcwa""" +651 22 evaluator """rankbased""" +651 23 dataset """kinships""" +651 23 model """simple""" +651 23 loss """crossentropy""" +651 23 regularizer """no""" +651 23 optimizer """adadelta""" +651 23 training_loop """lcwa""" +651 23 evaluator """rankbased""" +651 24 dataset """kinships""" +651 24 model """simple""" +651 24 loss """crossentropy""" +651 24 regularizer """no""" +651 24 optimizer """adadelta""" +651 24 training_loop """lcwa""" +651 24 evaluator """rankbased""" +651 25 dataset """kinships""" +651 25 model """simple""" +651 25 loss """crossentropy""" +651 25 regularizer """no""" +651 25 optimizer """adadelta""" +651 25 training_loop """lcwa""" +651 25 evaluator """rankbased""" +651 26 dataset """kinships""" +651 26 model """simple""" +651 26 loss """crossentropy""" +651 26 regularizer """no""" +651 26 optimizer """adadelta""" +651 26 training_loop """lcwa""" +651 26 evaluator """rankbased""" +651 27 dataset """kinships""" +651 27 model """simple""" +651 27 loss """crossentropy""" +651 27 regularizer """no""" +651 27 optimizer """adadelta""" +651 27 training_loop """lcwa""" +651 27 evaluator """rankbased""" +651 28 dataset """kinships""" +651 28 model """simple""" +651 28 loss """crossentropy""" +651 28 regularizer """no""" +651 28 optimizer """adadelta""" +651 28 training_loop """lcwa""" +651 28 evaluator """rankbased""" +651 29 dataset """kinships""" +651 29 model """simple""" +651 29 loss """crossentropy""" +651 29 regularizer """no""" +651 29 optimizer """adadelta""" +651 29 training_loop """lcwa""" +651 29 evaluator """rankbased""" +651 30 dataset """kinships""" +651 30 model """simple""" +651 30 loss """crossentropy""" +651 30 regularizer """no""" +651 30 optimizer """adadelta""" +651 30 training_loop """lcwa""" +651 30 evaluator """rankbased""" +651 31 dataset """kinships""" +651 31 model """simple""" +651 31 loss """crossentropy""" +651 31 regularizer """no""" +651 31 optimizer """adadelta""" +651 31 training_loop """lcwa""" +651 31 evaluator """rankbased""" +651 32 dataset """kinships""" +651 32 model """simple""" +651 32 loss """crossentropy""" +651 32 regularizer """no""" +651 32 optimizer """adadelta""" +651 32 training_loop """lcwa""" +651 32 evaluator """rankbased""" +651 33 dataset """kinships""" +651 33 model """simple""" +651 33 loss """crossentropy""" +651 33 regularizer """no""" +651 33 optimizer """adadelta""" +651 33 training_loop """lcwa""" +651 33 evaluator """rankbased""" +651 34 dataset """kinships""" +651 34 model """simple""" +651 34 loss """crossentropy""" +651 34 regularizer """no""" +651 34 optimizer """adadelta""" +651 34 training_loop """lcwa""" +651 34 evaluator """rankbased""" +651 35 dataset """kinships""" +651 35 model """simple""" +651 35 loss """crossentropy""" +651 35 regularizer """no""" +651 35 optimizer """adadelta""" +651 35 training_loop """lcwa""" +651 35 evaluator """rankbased""" +651 36 dataset """kinships""" +651 36 model """simple""" +651 36 loss """crossentropy""" +651 36 regularizer """no""" +651 36 optimizer """adadelta""" +651 36 training_loop """lcwa""" +651 36 evaluator """rankbased""" +651 37 dataset """kinships""" +651 37 model """simple""" +651 37 loss """crossentropy""" +651 37 regularizer """no""" +651 37 optimizer """adadelta""" +651 37 training_loop """lcwa""" +651 37 evaluator """rankbased""" +651 38 dataset """kinships""" +651 38 model """simple""" +651 38 loss """crossentropy""" +651 38 regularizer """no""" +651 38 optimizer """adadelta""" +651 38 training_loop """lcwa""" +651 38 evaluator """rankbased""" +651 39 dataset """kinships""" +651 39 model """simple""" +651 39 loss """crossentropy""" +651 39 regularizer """no""" +651 39 optimizer """adadelta""" +651 39 training_loop """lcwa""" +651 39 evaluator """rankbased""" +651 40 dataset """kinships""" +651 40 model """simple""" +651 40 loss """crossentropy""" +651 40 regularizer """no""" +651 40 optimizer """adadelta""" +651 40 training_loop """lcwa""" +651 40 evaluator """rankbased""" +651 41 dataset """kinships""" +651 41 model """simple""" +651 41 loss """crossentropy""" +651 41 regularizer """no""" +651 41 optimizer """adadelta""" +651 41 training_loop """lcwa""" +651 41 evaluator """rankbased""" +651 42 dataset """kinships""" +651 42 model """simple""" +651 42 loss """crossentropy""" +651 42 regularizer """no""" +651 42 optimizer """adadelta""" +651 42 training_loop """lcwa""" +651 42 evaluator """rankbased""" +651 43 dataset """kinships""" +651 43 model """simple""" +651 43 loss """crossentropy""" +651 43 regularizer """no""" +651 43 optimizer """adadelta""" +651 43 training_loop """lcwa""" +651 43 evaluator """rankbased""" +651 44 dataset """kinships""" +651 44 model """simple""" +651 44 loss """crossentropy""" +651 44 regularizer """no""" +651 44 optimizer """adadelta""" +651 44 training_loop """lcwa""" +651 44 evaluator """rankbased""" +651 45 dataset """kinships""" +651 45 model """simple""" +651 45 loss """crossentropy""" +651 45 regularizer """no""" +651 45 optimizer """adadelta""" +651 45 training_loop """lcwa""" +651 45 evaluator """rankbased""" +651 46 dataset """kinships""" +651 46 model """simple""" +651 46 loss """crossentropy""" +651 46 regularizer """no""" +651 46 optimizer """adadelta""" +651 46 training_loop """lcwa""" +651 46 evaluator """rankbased""" +651 47 dataset """kinships""" +651 47 model """simple""" +651 47 loss """crossentropy""" +651 47 regularizer """no""" +651 47 optimizer """adadelta""" +651 47 training_loop """lcwa""" +651 47 evaluator """rankbased""" +651 48 dataset """kinships""" +651 48 model """simple""" +651 48 loss """crossentropy""" +651 48 regularizer """no""" +651 48 optimizer """adadelta""" +651 48 training_loop """lcwa""" +651 48 evaluator """rankbased""" +651 49 dataset """kinships""" +651 49 model """simple""" +651 49 loss """crossentropy""" +651 49 regularizer """no""" +651 49 optimizer """adadelta""" +651 49 training_loop """lcwa""" +651 49 evaluator """rankbased""" +651 50 dataset """kinships""" +651 50 model """simple""" +651 50 loss """crossentropy""" +651 50 regularizer """no""" +651 50 optimizer """adadelta""" +651 50 training_loop """lcwa""" +651 50 evaluator """rankbased""" +651 51 dataset """kinships""" +651 51 model """simple""" +651 51 loss """crossentropy""" +651 51 regularizer """no""" +651 51 optimizer """adadelta""" +651 51 training_loop """lcwa""" +651 51 evaluator """rankbased""" +651 52 dataset """kinships""" +651 52 model """simple""" +651 52 loss """crossentropy""" +651 52 regularizer """no""" +651 52 optimizer """adadelta""" +651 52 training_loop """lcwa""" +651 52 evaluator """rankbased""" +651 53 dataset """kinships""" +651 53 model """simple""" +651 53 loss """crossentropy""" +651 53 regularizer """no""" +651 53 optimizer """adadelta""" +651 53 training_loop """lcwa""" +651 53 evaluator """rankbased""" +651 54 dataset """kinships""" +651 54 model """simple""" +651 54 loss """crossentropy""" +651 54 regularizer """no""" +651 54 optimizer """adadelta""" +651 54 training_loop """lcwa""" +651 54 evaluator """rankbased""" +651 55 dataset """kinships""" +651 55 model """simple""" +651 55 loss """crossentropy""" +651 55 regularizer """no""" +651 55 optimizer """adadelta""" +651 55 training_loop """lcwa""" +651 55 evaluator """rankbased""" +651 56 dataset """kinships""" +651 56 model """simple""" +651 56 loss """crossentropy""" +651 56 regularizer """no""" +651 56 optimizer """adadelta""" +651 56 training_loop """lcwa""" +651 56 evaluator """rankbased""" +651 57 dataset """kinships""" +651 57 model """simple""" +651 57 loss """crossentropy""" +651 57 regularizer """no""" +651 57 optimizer """adadelta""" +651 57 training_loop """lcwa""" +651 57 evaluator """rankbased""" +651 58 dataset """kinships""" +651 58 model """simple""" +651 58 loss """crossentropy""" +651 58 regularizer """no""" +651 58 optimizer """adadelta""" +651 58 training_loop """lcwa""" +651 58 evaluator """rankbased""" +651 59 dataset """kinships""" +651 59 model """simple""" +651 59 loss """crossentropy""" +651 59 regularizer """no""" +651 59 optimizer """adadelta""" +651 59 training_loop """lcwa""" +651 59 evaluator """rankbased""" +651 60 dataset """kinships""" +651 60 model """simple""" +651 60 loss """crossentropy""" +651 60 regularizer """no""" +651 60 optimizer """adadelta""" +651 60 training_loop """lcwa""" +651 60 evaluator """rankbased""" +651 61 dataset """kinships""" +651 61 model """simple""" +651 61 loss """crossentropy""" +651 61 regularizer """no""" +651 61 optimizer """adadelta""" +651 61 training_loop """lcwa""" +651 61 evaluator """rankbased""" +651 62 dataset """kinships""" +651 62 model """simple""" +651 62 loss """crossentropy""" +651 62 regularizer """no""" +651 62 optimizer """adadelta""" +651 62 training_loop """lcwa""" +651 62 evaluator """rankbased""" +651 63 dataset """kinships""" +651 63 model """simple""" +651 63 loss """crossentropy""" +651 63 regularizer """no""" +651 63 optimizer """adadelta""" +651 63 training_loop """lcwa""" +651 63 evaluator """rankbased""" +651 64 dataset """kinships""" +651 64 model """simple""" +651 64 loss """crossentropy""" +651 64 regularizer """no""" +651 64 optimizer """adadelta""" +651 64 training_loop """lcwa""" +651 64 evaluator """rankbased""" +651 65 dataset """kinships""" +651 65 model """simple""" +651 65 loss """crossentropy""" +651 65 regularizer """no""" +651 65 optimizer """adadelta""" +651 65 training_loop """lcwa""" +651 65 evaluator """rankbased""" +651 66 dataset """kinships""" +651 66 model """simple""" +651 66 loss """crossentropy""" +651 66 regularizer """no""" +651 66 optimizer """adadelta""" +651 66 training_loop """lcwa""" +651 66 evaluator """rankbased""" +651 67 dataset """kinships""" +651 67 model """simple""" +651 67 loss """crossentropy""" +651 67 regularizer """no""" +651 67 optimizer """adadelta""" +651 67 training_loop """lcwa""" +651 67 evaluator """rankbased""" +651 68 dataset """kinships""" +651 68 model """simple""" +651 68 loss """crossentropy""" +651 68 regularizer """no""" +651 68 optimizer """adadelta""" +651 68 training_loop """lcwa""" +651 68 evaluator """rankbased""" +651 69 dataset """kinships""" +651 69 model """simple""" +651 69 loss """crossentropy""" +651 69 regularizer """no""" +651 69 optimizer """adadelta""" +651 69 training_loop """lcwa""" +651 69 evaluator """rankbased""" +651 70 dataset """kinships""" +651 70 model """simple""" +651 70 loss """crossentropy""" +651 70 regularizer """no""" +651 70 optimizer """adadelta""" +651 70 training_loop """lcwa""" +651 70 evaluator """rankbased""" +651 71 dataset """kinships""" +651 71 model """simple""" +651 71 loss """crossentropy""" +651 71 regularizer """no""" +651 71 optimizer """adadelta""" +651 71 training_loop """lcwa""" +651 71 evaluator """rankbased""" +651 72 dataset """kinships""" +651 72 model """simple""" +651 72 loss """crossentropy""" +651 72 regularizer """no""" +651 72 optimizer """adadelta""" +651 72 training_loop """lcwa""" +651 72 evaluator """rankbased""" +651 73 dataset """kinships""" +651 73 model """simple""" +651 73 loss """crossentropy""" +651 73 regularizer """no""" +651 73 optimizer """adadelta""" +651 73 training_loop """lcwa""" +651 73 evaluator """rankbased""" +651 74 dataset """kinships""" +651 74 model """simple""" +651 74 loss """crossentropy""" +651 74 regularizer """no""" +651 74 optimizer """adadelta""" +651 74 training_loop """lcwa""" +651 74 evaluator """rankbased""" +651 75 dataset """kinships""" +651 75 model """simple""" +651 75 loss """crossentropy""" +651 75 regularizer """no""" +651 75 optimizer """adadelta""" +651 75 training_loop """lcwa""" +651 75 evaluator """rankbased""" +651 76 dataset """kinships""" +651 76 model """simple""" +651 76 loss """crossentropy""" +651 76 regularizer """no""" +651 76 optimizer """adadelta""" +651 76 training_loop """lcwa""" +651 76 evaluator """rankbased""" +651 77 dataset """kinships""" +651 77 model """simple""" +651 77 loss """crossentropy""" +651 77 regularizer """no""" +651 77 optimizer """adadelta""" +651 77 training_loop """lcwa""" +651 77 evaluator """rankbased""" +651 78 dataset """kinships""" +651 78 model """simple""" +651 78 loss """crossentropy""" +651 78 regularizer """no""" +651 78 optimizer """adadelta""" +651 78 training_loop """lcwa""" +651 78 evaluator """rankbased""" +651 79 dataset """kinships""" +651 79 model """simple""" +651 79 loss """crossentropy""" +651 79 regularizer """no""" +651 79 optimizer """adadelta""" +651 79 training_loop """lcwa""" +651 79 evaluator """rankbased""" +651 80 dataset """kinships""" +651 80 model """simple""" +651 80 loss """crossentropy""" +651 80 regularizer """no""" +651 80 optimizer """adadelta""" +651 80 training_loop """lcwa""" +651 80 evaluator """rankbased""" +651 81 dataset """kinships""" +651 81 model """simple""" +651 81 loss """crossentropy""" +651 81 regularizer """no""" +651 81 optimizer """adadelta""" +651 81 training_loop """lcwa""" +651 81 evaluator """rankbased""" +651 82 dataset """kinships""" +651 82 model """simple""" +651 82 loss """crossentropy""" +651 82 regularizer """no""" +651 82 optimizer """adadelta""" +651 82 training_loop """lcwa""" +651 82 evaluator """rankbased""" +651 83 dataset """kinships""" +651 83 model """simple""" +651 83 loss """crossentropy""" +651 83 regularizer """no""" +651 83 optimizer """adadelta""" +651 83 training_loop """lcwa""" +651 83 evaluator """rankbased""" +651 84 dataset """kinships""" +651 84 model """simple""" +651 84 loss """crossentropy""" +651 84 regularizer """no""" +651 84 optimizer """adadelta""" +651 84 training_loop """lcwa""" +651 84 evaluator """rankbased""" +651 85 dataset """kinships""" +651 85 model """simple""" +651 85 loss """crossentropy""" +651 85 regularizer """no""" +651 85 optimizer """adadelta""" +651 85 training_loop """lcwa""" +651 85 evaluator """rankbased""" +651 86 dataset """kinships""" +651 86 model """simple""" +651 86 loss """crossentropy""" +651 86 regularizer """no""" +651 86 optimizer """adadelta""" +651 86 training_loop """lcwa""" +651 86 evaluator """rankbased""" +651 87 dataset """kinships""" +651 87 model """simple""" +651 87 loss """crossentropy""" +651 87 regularizer """no""" +651 87 optimizer """adadelta""" +651 87 training_loop """lcwa""" +651 87 evaluator """rankbased""" +651 88 dataset """kinships""" +651 88 model """simple""" +651 88 loss """crossentropy""" +651 88 regularizer """no""" +651 88 optimizer """adadelta""" +651 88 training_loop """lcwa""" +651 88 evaluator """rankbased""" +651 89 dataset """kinships""" +651 89 model """simple""" +651 89 loss """crossentropy""" +651 89 regularizer """no""" +651 89 optimizer """adadelta""" +651 89 training_loop """lcwa""" +651 89 evaluator """rankbased""" +651 90 dataset """kinships""" +651 90 model """simple""" +651 90 loss """crossentropy""" +651 90 regularizer """no""" +651 90 optimizer """adadelta""" +651 90 training_loop """lcwa""" +651 90 evaluator """rankbased""" +651 91 dataset """kinships""" +651 91 model """simple""" +651 91 loss """crossentropy""" +651 91 regularizer """no""" +651 91 optimizer """adadelta""" +651 91 training_loop """lcwa""" +651 91 evaluator """rankbased""" +651 92 dataset """kinships""" +651 92 model """simple""" +651 92 loss """crossentropy""" +651 92 regularizer """no""" +651 92 optimizer """adadelta""" +651 92 training_loop """lcwa""" +651 92 evaluator """rankbased""" +651 93 dataset """kinships""" +651 93 model """simple""" +651 93 loss """crossentropy""" +651 93 regularizer """no""" +651 93 optimizer """adadelta""" +651 93 training_loop """lcwa""" +651 93 evaluator """rankbased""" +651 94 dataset """kinships""" +651 94 model """simple""" +651 94 loss """crossentropy""" +651 94 regularizer """no""" +651 94 optimizer """adadelta""" +651 94 training_loop """lcwa""" +651 94 evaluator """rankbased""" +651 95 dataset """kinships""" +651 95 model """simple""" +651 95 loss """crossentropy""" +651 95 regularizer """no""" +651 95 optimizer """adadelta""" +651 95 training_loop """lcwa""" +651 95 evaluator """rankbased""" +651 96 dataset """kinships""" +651 96 model """simple""" +651 96 loss """crossentropy""" +651 96 regularizer """no""" +651 96 optimizer """adadelta""" +651 96 training_loop """lcwa""" +651 96 evaluator """rankbased""" +651 97 dataset """kinships""" +651 97 model """simple""" +651 97 loss """crossentropy""" +651 97 regularizer """no""" +651 97 optimizer """adadelta""" +651 97 training_loop """lcwa""" +651 97 evaluator """rankbased""" +651 98 dataset """kinships""" +651 98 model """simple""" +651 98 loss """crossentropy""" +651 98 regularizer """no""" +651 98 optimizer """adadelta""" +651 98 training_loop """lcwa""" +651 98 evaluator """rankbased""" +651 99 dataset """kinships""" +651 99 model """simple""" +651 99 loss """crossentropy""" +651 99 regularizer """no""" +651 99 optimizer """adadelta""" +651 99 training_loop """lcwa""" +651 99 evaluator """rankbased""" +651 100 dataset """kinships""" +651 100 model """simple""" +651 100 loss """crossentropy""" +651 100 regularizer """no""" +651 100 optimizer """adadelta""" +651 100 training_loop """lcwa""" +651 100 evaluator """rankbased""" +652 1 model.embedding_dim 0.0 +652 1 training.batch_size 1.0 +652 1 training.label_smoothing 0.002501120523561068 +652 2 model.embedding_dim 1.0 +652 2 training.batch_size 1.0 +652 2 training.label_smoothing 0.01748479334788211 +652 3 model.embedding_dim 1.0 +652 3 training.batch_size 2.0 +652 3 training.label_smoothing 0.003067491623898693 +652 4 model.embedding_dim 0.0 +652 4 training.batch_size 1.0 +652 4 training.label_smoothing 0.008872055877781771 +652 5 model.embedding_dim 0.0 +652 5 training.batch_size 0.0 +652 5 training.label_smoothing 0.3734504060645687 +652 6 model.embedding_dim 1.0 +652 6 training.batch_size 1.0 +652 6 training.label_smoothing 0.0010819549643553414 +652 7 model.embedding_dim 2.0 +652 7 training.batch_size 2.0 +652 7 training.label_smoothing 0.0011782770314148813 +652 8 model.embedding_dim 0.0 +652 8 training.batch_size 2.0 +652 8 training.label_smoothing 0.0320896136945899 +652 9 model.embedding_dim 2.0 +652 9 training.batch_size 0.0 +652 9 training.label_smoothing 0.16317519411369216 +652 10 model.embedding_dim 1.0 +652 10 training.batch_size 2.0 +652 10 training.label_smoothing 0.0163860991437647 +652 11 model.embedding_dim 2.0 +652 11 training.batch_size 0.0 +652 11 training.label_smoothing 0.011203559462788604 +652 12 model.embedding_dim 0.0 +652 12 training.batch_size 2.0 +652 12 training.label_smoothing 0.003677324347250034 +652 13 model.embedding_dim 2.0 +652 13 training.batch_size 0.0 +652 13 training.label_smoothing 0.0030495042191441475 +652 14 model.embedding_dim 0.0 +652 14 training.batch_size 0.0 +652 14 training.label_smoothing 0.021720722132458885 +652 15 model.embedding_dim 1.0 +652 15 training.batch_size 1.0 +652 15 training.label_smoothing 0.1225538290230626 +652 16 model.embedding_dim 2.0 +652 16 training.batch_size 1.0 +652 16 training.label_smoothing 0.18231500325362102 +652 17 model.embedding_dim 0.0 +652 17 training.batch_size 0.0 +652 17 training.label_smoothing 0.003240559578522886 +652 18 model.embedding_dim 2.0 +652 18 training.batch_size 1.0 +652 18 training.label_smoothing 0.06341571503530131 +652 19 model.embedding_dim 1.0 +652 19 training.batch_size 2.0 +652 19 training.label_smoothing 0.03370101612122706 +652 20 model.embedding_dim 1.0 +652 20 training.batch_size 2.0 +652 20 training.label_smoothing 0.002483799217163039 +652 21 model.embedding_dim 2.0 +652 21 training.batch_size 0.0 +652 21 training.label_smoothing 0.0018634070043728127 +652 22 model.embedding_dim 0.0 +652 22 training.batch_size 0.0 +652 22 training.label_smoothing 0.18522145609796595 +652 23 model.embedding_dim 0.0 +652 23 training.batch_size 0.0 +652 23 training.label_smoothing 0.05504793491234438 +652 24 model.embedding_dim 1.0 +652 24 training.batch_size 0.0 +652 24 training.label_smoothing 0.0010458225836975975 +652 25 model.embedding_dim 0.0 +652 25 training.batch_size 1.0 +652 25 training.label_smoothing 0.36180875267447704 +652 26 model.embedding_dim 0.0 +652 26 training.batch_size 0.0 +652 26 training.label_smoothing 0.22975959559542553 +652 27 model.embedding_dim 0.0 +652 27 training.batch_size 2.0 +652 27 training.label_smoothing 0.009123800373107673 +652 28 model.embedding_dim 2.0 +652 28 training.batch_size 2.0 +652 28 training.label_smoothing 0.2941953872088852 +652 29 model.embedding_dim 1.0 +652 29 training.batch_size 2.0 +652 29 training.label_smoothing 0.11911124305786053 +652 30 model.embedding_dim 2.0 +652 30 training.batch_size 0.0 +652 30 training.label_smoothing 0.009862460471892064 +652 31 model.embedding_dim 2.0 +652 31 training.batch_size 0.0 +652 31 training.label_smoothing 0.018218998284733022 +652 32 model.embedding_dim 2.0 +652 32 training.batch_size 1.0 +652 32 training.label_smoothing 0.003951066803742657 +652 33 model.embedding_dim 0.0 +652 33 training.batch_size 2.0 +652 33 training.label_smoothing 0.04155377956090471 +652 34 model.embedding_dim 1.0 +652 34 training.batch_size 1.0 +652 34 training.label_smoothing 0.0037144862430714956 +652 35 model.embedding_dim 0.0 +652 35 training.batch_size 0.0 +652 35 training.label_smoothing 0.2724187181950197 +652 36 model.embedding_dim 2.0 +652 36 training.batch_size 1.0 +652 36 training.label_smoothing 0.008146982995234232 +652 37 model.embedding_dim 1.0 +652 37 training.batch_size 0.0 +652 37 training.label_smoothing 0.0016162550483382672 +652 38 model.embedding_dim 2.0 +652 38 training.batch_size 0.0 +652 38 training.label_smoothing 0.010049214427606264 +652 39 model.embedding_dim 2.0 +652 39 training.batch_size 2.0 +652 39 training.label_smoothing 0.011130423233947697 +652 40 model.embedding_dim 2.0 +652 40 training.batch_size 1.0 +652 40 training.label_smoothing 0.1790780004621164 +652 41 model.embedding_dim 0.0 +652 41 training.batch_size 0.0 +652 41 training.label_smoothing 0.18622508578942046 +652 42 model.embedding_dim 2.0 +652 42 training.batch_size 2.0 +652 42 training.label_smoothing 0.0026567047501773297 +652 43 model.embedding_dim 1.0 +652 43 training.batch_size 1.0 +652 43 training.label_smoothing 0.0315526828684477 +652 44 model.embedding_dim 2.0 +652 44 training.batch_size 2.0 +652 44 training.label_smoothing 0.006814655417141715 +652 45 model.embedding_dim 2.0 +652 45 training.batch_size 1.0 +652 45 training.label_smoothing 0.007977036887348007 +652 46 model.embedding_dim 1.0 +652 46 training.batch_size 0.0 +652 46 training.label_smoothing 0.224785664025156 +652 47 model.embedding_dim 2.0 +652 47 training.batch_size 0.0 +652 47 training.label_smoothing 0.6869981061167927 +652 48 model.embedding_dim 1.0 +652 48 training.batch_size 1.0 +652 48 training.label_smoothing 0.2129229239380463 +652 49 model.embedding_dim 1.0 +652 49 training.batch_size 1.0 +652 49 training.label_smoothing 0.4227047033805561 +652 50 model.embedding_dim 2.0 +652 50 training.batch_size 2.0 +652 50 training.label_smoothing 0.3773697871735022 +652 51 model.embedding_dim 1.0 +652 51 training.batch_size 2.0 +652 51 training.label_smoothing 0.1253818409595113 +652 52 model.embedding_dim 2.0 +652 52 training.batch_size 1.0 +652 52 training.label_smoothing 0.0010294779134745577 +652 53 model.embedding_dim 1.0 +652 53 training.batch_size 1.0 +652 53 training.label_smoothing 0.014339908191850761 +652 54 model.embedding_dim 0.0 +652 54 training.batch_size 0.0 +652 54 training.label_smoothing 0.015716686009824 +652 55 model.embedding_dim 0.0 +652 55 training.batch_size 1.0 +652 55 training.label_smoothing 0.04653136889729851 +652 56 model.embedding_dim 2.0 +652 56 training.batch_size 2.0 +652 56 training.label_smoothing 0.9189196658490052 +652 57 model.embedding_dim 0.0 +652 57 training.batch_size 0.0 +652 57 training.label_smoothing 0.004220382747462734 +652 58 model.embedding_dim 0.0 +652 58 training.batch_size 1.0 +652 58 training.label_smoothing 0.5316366092810119 +652 59 model.embedding_dim 0.0 +652 59 training.batch_size 0.0 +652 59 training.label_smoothing 0.04538902772717238 +652 60 model.embedding_dim 1.0 +652 60 training.batch_size 0.0 +652 60 training.label_smoothing 0.1302110745772907 +652 61 model.embedding_dim 1.0 +652 61 training.batch_size 1.0 +652 61 training.label_smoothing 0.006922070218176599 +652 62 model.embedding_dim 2.0 +652 62 training.batch_size 2.0 +652 62 training.label_smoothing 0.019823607293278856 +652 63 model.embedding_dim 2.0 +652 63 training.batch_size 1.0 +652 63 training.label_smoothing 0.007796134519027776 +652 64 model.embedding_dim 2.0 +652 64 training.batch_size 1.0 +652 64 training.label_smoothing 0.27631717584912113 +652 65 model.embedding_dim 0.0 +652 65 training.batch_size 1.0 +652 65 training.label_smoothing 0.04916331632942465 +652 66 model.embedding_dim 0.0 +652 66 training.batch_size 0.0 +652 66 training.label_smoothing 0.005005435972971858 +652 67 model.embedding_dim 0.0 +652 67 training.batch_size 0.0 +652 67 training.label_smoothing 0.05990499224577584 +652 68 model.embedding_dim 2.0 +652 68 training.batch_size 2.0 +652 68 training.label_smoothing 0.025245917459701412 +652 69 model.embedding_dim 0.0 +652 69 training.batch_size 0.0 +652 69 training.label_smoothing 0.10076293169538106 +652 70 model.embedding_dim 1.0 +652 70 training.batch_size 2.0 +652 70 training.label_smoothing 0.23834772022740142 +652 71 model.embedding_dim 0.0 +652 71 training.batch_size 1.0 +652 71 training.label_smoothing 0.01123259066569745 +652 72 model.embedding_dim 1.0 +652 72 training.batch_size 0.0 +652 72 training.label_smoothing 0.001096558149691078 +652 73 model.embedding_dim 2.0 +652 73 training.batch_size 1.0 +652 73 training.label_smoothing 0.002932637301074666 +652 74 model.embedding_dim 0.0 +652 74 training.batch_size 0.0 +652 74 training.label_smoothing 0.021712300243193705 +652 75 model.embedding_dim 2.0 +652 75 training.batch_size 2.0 +652 75 training.label_smoothing 0.0010556046697769224 +652 76 model.embedding_dim 2.0 +652 76 training.batch_size 2.0 +652 76 training.label_smoothing 0.16973323338347743 +652 77 model.embedding_dim 0.0 +652 77 training.batch_size 0.0 +652 77 training.label_smoothing 0.0017967888886300286 +652 78 model.embedding_dim 2.0 +652 78 training.batch_size 2.0 +652 78 training.label_smoothing 0.0012586190201784955 +652 79 model.embedding_dim 2.0 +652 79 training.batch_size 0.0 +652 79 training.label_smoothing 0.04958397689610847 +652 80 model.embedding_dim 0.0 +652 80 training.batch_size 0.0 +652 80 training.label_smoothing 0.002232851676547517 +652 81 model.embedding_dim 2.0 +652 81 training.batch_size 1.0 +652 81 training.label_smoothing 0.015164936329752543 +652 82 model.embedding_dim 1.0 +652 82 training.batch_size 0.0 +652 82 training.label_smoothing 0.13052986576600864 +652 83 model.embedding_dim 0.0 +652 83 training.batch_size 2.0 +652 83 training.label_smoothing 0.22141756534714244 +652 84 model.embedding_dim 0.0 +652 84 training.batch_size 0.0 +652 84 training.label_smoothing 0.04044705689295676 +652 85 model.embedding_dim 1.0 +652 85 training.batch_size 1.0 +652 85 training.label_smoothing 0.042327958221357924 +652 86 model.embedding_dim 2.0 +652 86 training.batch_size 1.0 +652 86 training.label_smoothing 0.0011804782338908323 +652 87 model.embedding_dim 2.0 +652 87 training.batch_size 1.0 +652 87 training.label_smoothing 0.006278539937641289 +652 88 model.embedding_dim 0.0 +652 88 training.batch_size 2.0 +652 88 training.label_smoothing 0.0014076687540685042 +652 89 model.embedding_dim 2.0 +652 89 training.batch_size 2.0 +652 89 training.label_smoothing 0.006288350553529643 +652 90 model.embedding_dim 2.0 +652 90 training.batch_size 2.0 +652 90 training.label_smoothing 0.07686454089208737 +652 91 model.embedding_dim 2.0 +652 91 training.batch_size 0.0 +652 91 training.label_smoothing 0.020900441656948453 +652 92 model.embedding_dim 2.0 +652 92 training.batch_size 2.0 +652 92 training.label_smoothing 0.011294944625941654 +652 93 model.embedding_dim 0.0 +652 93 training.batch_size 0.0 +652 93 training.label_smoothing 0.002186570633869869 +652 94 model.embedding_dim 0.0 +652 94 training.batch_size 0.0 +652 94 training.label_smoothing 0.1144268059059208 +652 95 model.embedding_dim 2.0 +652 95 training.batch_size 1.0 +652 95 training.label_smoothing 0.0673686050489084 +652 96 model.embedding_dim 1.0 +652 96 training.batch_size 0.0 +652 96 training.label_smoothing 0.027431404517626948 +652 97 model.embedding_dim 1.0 +652 97 training.batch_size 2.0 +652 97 training.label_smoothing 0.28042206521189744 +652 98 model.embedding_dim 0.0 +652 98 training.batch_size 2.0 +652 98 training.label_smoothing 0.15722892662044566 +652 99 model.embedding_dim 2.0 +652 99 training.batch_size 2.0 +652 99 training.label_smoothing 0.2240325756892123 +652 100 model.embedding_dim 2.0 +652 100 training.batch_size 1.0 +652 100 training.label_smoothing 0.05273562821029903 +652 1 dataset """kinships""" +652 1 model """simple""" +652 1 loss """crossentropy""" +652 1 regularizer """no""" +652 1 optimizer """adadelta""" +652 1 training_loop """lcwa""" +652 1 evaluator """rankbased""" +652 2 dataset """kinships""" +652 2 model """simple""" +652 2 loss """crossentropy""" +652 2 regularizer """no""" +652 2 optimizer """adadelta""" +652 2 training_loop """lcwa""" +652 2 evaluator """rankbased""" +652 3 dataset """kinships""" +652 3 model """simple""" +652 3 loss """crossentropy""" +652 3 regularizer """no""" +652 3 optimizer """adadelta""" +652 3 training_loop """lcwa""" +652 3 evaluator """rankbased""" +652 4 dataset """kinships""" +652 4 model """simple""" +652 4 loss """crossentropy""" +652 4 regularizer """no""" +652 4 optimizer """adadelta""" +652 4 training_loop """lcwa""" +652 4 evaluator """rankbased""" +652 5 dataset """kinships""" +652 5 model """simple""" +652 5 loss """crossentropy""" +652 5 regularizer """no""" +652 5 optimizer """adadelta""" +652 5 training_loop """lcwa""" +652 5 evaluator """rankbased""" +652 6 dataset """kinships""" +652 6 model """simple""" +652 6 loss """crossentropy""" +652 6 regularizer """no""" +652 6 optimizer """adadelta""" +652 6 training_loop """lcwa""" +652 6 evaluator """rankbased""" +652 7 dataset """kinships""" +652 7 model """simple""" +652 7 loss """crossentropy""" +652 7 regularizer """no""" +652 7 optimizer """adadelta""" +652 7 training_loop """lcwa""" +652 7 evaluator """rankbased""" +652 8 dataset """kinships""" +652 8 model """simple""" +652 8 loss """crossentropy""" +652 8 regularizer """no""" +652 8 optimizer """adadelta""" +652 8 training_loop """lcwa""" +652 8 evaluator """rankbased""" +652 9 dataset """kinships""" +652 9 model """simple""" +652 9 loss """crossentropy""" +652 9 regularizer """no""" +652 9 optimizer """adadelta""" +652 9 training_loop """lcwa""" +652 9 evaluator """rankbased""" +652 10 dataset """kinships""" +652 10 model """simple""" +652 10 loss """crossentropy""" +652 10 regularizer """no""" +652 10 optimizer """adadelta""" +652 10 training_loop """lcwa""" +652 10 evaluator """rankbased""" +652 11 dataset """kinships""" +652 11 model """simple""" +652 11 loss """crossentropy""" +652 11 regularizer """no""" +652 11 optimizer """adadelta""" +652 11 training_loop """lcwa""" +652 11 evaluator """rankbased""" +652 12 dataset """kinships""" +652 12 model """simple""" +652 12 loss """crossentropy""" +652 12 regularizer """no""" +652 12 optimizer """adadelta""" +652 12 training_loop """lcwa""" +652 12 evaluator """rankbased""" +652 13 dataset """kinships""" +652 13 model """simple""" +652 13 loss """crossentropy""" +652 13 regularizer """no""" +652 13 optimizer """adadelta""" +652 13 training_loop """lcwa""" +652 13 evaluator """rankbased""" +652 14 dataset """kinships""" +652 14 model """simple""" +652 14 loss """crossentropy""" +652 14 regularizer """no""" +652 14 optimizer """adadelta""" +652 14 training_loop """lcwa""" +652 14 evaluator """rankbased""" +652 15 dataset """kinships""" +652 15 model """simple""" +652 15 loss """crossentropy""" +652 15 regularizer """no""" +652 15 optimizer """adadelta""" +652 15 training_loop """lcwa""" +652 15 evaluator """rankbased""" +652 16 dataset """kinships""" +652 16 model """simple""" +652 16 loss """crossentropy""" +652 16 regularizer """no""" +652 16 optimizer """adadelta""" +652 16 training_loop """lcwa""" +652 16 evaluator """rankbased""" +652 17 dataset """kinships""" +652 17 model """simple""" +652 17 loss """crossentropy""" +652 17 regularizer """no""" +652 17 optimizer """adadelta""" +652 17 training_loop """lcwa""" +652 17 evaluator """rankbased""" +652 18 dataset """kinships""" +652 18 model """simple""" +652 18 loss """crossentropy""" +652 18 regularizer """no""" +652 18 optimizer """adadelta""" +652 18 training_loop """lcwa""" +652 18 evaluator """rankbased""" +652 19 dataset """kinships""" +652 19 model """simple""" +652 19 loss """crossentropy""" +652 19 regularizer """no""" +652 19 optimizer """adadelta""" +652 19 training_loop """lcwa""" +652 19 evaluator """rankbased""" +652 20 dataset """kinships""" +652 20 model """simple""" +652 20 loss """crossentropy""" +652 20 regularizer """no""" +652 20 optimizer """adadelta""" +652 20 training_loop """lcwa""" +652 20 evaluator """rankbased""" +652 21 dataset """kinships""" +652 21 model """simple""" +652 21 loss """crossentropy""" +652 21 regularizer """no""" +652 21 optimizer """adadelta""" +652 21 training_loop """lcwa""" +652 21 evaluator """rankbased""" +652 22 dataset """kinships""" +652 22 model """simple""" +652 22 loss """crossentropy""" +652 22 regularizer """no""" +652 22 optimizer """adadelta""" +652 22 training_loop """lcwa""" +652 22 evaluator """rankbased""" +652 23 dataset """kinships""" +652 23 model """simple""" +652 23 loss """crossentropy""" +652 23 regularizer """no""" +652 23 optimizer """adadelta""" +652 23 training_loop """lcwa""" +652 23 evaluator """rankbased""" +652 24 dataset """kinships""" +652 24 model """simple""" +652 24 loss """crossentropy""" +652 24 regularizer """no""" +652 24 optimizer """adadelta""" +652 24 training_loop """lcwa""" +652 24 evaluator """rankbased""" +652 25 dataset """kinships""" +652 25 model """simple""" +652 25 loss """crossentropy""" +652 25 regularizer """no""" +652 25 optimizer """adadelta""" +652 25 training_loop """lcwa""" +652 25 evaluator """rankbased""" +652 26 dataset """kinships""" +652 26 model """simple""" +652 26 loss """crossentropy""" +652 26 regularizer """no""" +652 26 optimizer """adadelta""" +652 26 training_loop """lcwa""" +652 26 evaluator """rankbased""" +652 27 dataset """kinships""" +652 27 model """simple""" +652 27 loss """crossentropy""" +652 27 regularizer """no""" +652 27 optimizer """adadelta""" +652 27 training_loop """lcwa""" +652 27 evaluator """rankbased""" +652 28 dataset """kinships""" +652 28 model """simple""" +652 28 loss """crossentropy""" +652 28 regularizer """no""" +652 28 optimizer """adadelta""" +652 28 training_loop """lcwa""" +652 28 evaluator """rankbased""" +652 29 dataset """kinships""" +652 29 model """simple""" +652 29 loss """crossentropy""" +652 29 regularizer """no""" +652 29 optimizer """adadelta""" +652 29 training_loop """lcwa""" +652 29 evaluator """rankbased""" +652 30 dataset """kinships""" +652 30 model """simple""" +652 30 loss """crossentropy""" +652 30 regularizer """no""" +652 30 optimizer """adadelta""" +652 30 training_loop """lcwa""" +652 30 evaluator """rankbased""" +652 31 dataset """kinships""" +652 31 model """simple""" +652 31 loss """crossentropy""" +652 31 regularizer """no""" +652 31 optimizer """adadelta""" +652 31 training_loop """lcwa""" +652 31 evaluator """rankbased""" +652 32 dataset """kinships""" +652 32 model """simple""" +652 32 loss """crossentropy""" +652 32 regularizer """no""" +652 32 optimizer """adadelta""" +652 32 training_loop """lcwa""" +652 32 evaluator """rankbased""" +652 33 dataset """kinships""" +652 33 model """simple""" +652 33 loss """crossentropy""" +652 33 regularizer """no""" +652 33 optimizer """adadelta""" +652 33 training_loop """lcwa""" +652 33 evaluator """rankbased""" +652 34 dataset """kinships""" +652 34 model """simple""" +652 34 loss """crossentropy""" +652 34 regularizer """no""" +652 34 optimizer """adadelta""" +652 34 training_loop """lcwa""" +652 34 evaluator """rankbased""" +652 35 dataset """kinships""" +652 35 model """simple""" +652 35 loss """crossentropy""" +652 35 regularizer """no""" +652 35 optimizer """adadelta""" +652 35 training_loop """lcwa""" +652 35 evaluator """rankbased""" +652 36 dataset """kinships""" +652 36 model """simple""" +652 36 loss """crossentropy""" +652 36 regularizer """no""" +652 36 optimizer """adadelta""" +652 36 training_loop """lcwa""" +652 36 evaluator """rankbased""" +652 37 dataset """kinships""" +652 37 model """simple""" +652 37 loss """crossentropy""" +652 37 regularizer """no""" +652 37 optimizer """adadelta""" +652 37 training_loop """lcwa""" +652 37 evaluator """rankbased""" +652 38 dataset """kinships""" +652 38 model """simple""" +652 38 loss """crossentropy""" +652 38 regularizer """no""" +652 38 optimizer """adadelta""" +652 38 training_loop """lcwa""" +652 38 evaluator """rankbased""" +652 39 dataset """kinships""" +652 39 model """simple""" +652 39 loss """crossentropy""" +652 39 regularizer """no""" +652 39 optimizer """adadelta""" +652 39 training_loop """lcwa""" +652 39 evaluator """rankbased""" +652 40 dataset """kinships""" +652 40 model """simple""" +652 40 loss """crossentropy""" +652 40 regularizer """no""" +652 40 optimizer """adadelta""" +652 40 training_loop """lcwa""" +652 40 evaluator """rankbased""" +652 41 dataset """kinships""" +652 41 model """simple""" +652 41 loss """crossentropy""" +652 41 regularizer """no""" +652 41 optimizer """adadelta""" +652 41 training_loop """lcwa""" +652 41 evaluator """rankbased""" +652 42 dataset """kinships""" +652 42 model """simple""" +652 42 loss """crossentropy""" +652 42 regularizer """no""" +652 42 optimizer """adadelta""" +652 42 training_loop """lcwa""" +652 42 evaluator """rankbased""" +652 43 dataset """kinships""" +652 43 model """simple""" +652 43 loss """crossentropy""" +652 43 regularizer """no""" +652 43 optimizer """adadelta""" +652 43 training_loop """lcwa""" +652 43 evaluator """rankbased""" +652 44 dataset """kinships""" +652 44 model """simple""" +652 44 loss """crossentropy""" +652 44 regularizer """no""" +652 44 optimizer """adadelta""" +652 44 training_loop """lcwa""" +652 44 evaluator """rankbased""" +652 45 dataset """kinships""" +652 45 model """simple""" +652 45 loss """crossentropy""" +652 45 regularizer """no""" +652 45 optimizer """adadelta""" +652 45 training_loop """lcwa""" +652 45 evaluator """rankbased""" +652 46 dataset """kinships""" +652 46 model """simple""" +652 46 loss """crossentropy""" +652 46 regularizer """no""" +652 46 optimizer """adadelta""" +652 46 training_loop """lcwa""" +652 46 evaluator """rankbased""" +652 47 dataset """kinships""" +652 47 model """simple""" +652 47 loss """crossentropy""" +652 47 regularizer """no""" +652 47 optimizer """adadelta""" +652 47 training_loop """lcwa""" +652 47 evaluator """rankbased""" +652 48 dataset """kinships""" +652 48 model """simple""" +652 48 loss """crossentropy""" +652 48 regularizer """no""" +652 48 optimizer """adadelta""" +652 48 training_loop """lcwa""" +652 48 evaluator """rankbased""" +652 49 dataset """kinships""" +652 49 model """simple""" +652 49 loss """crossentropy""" +652 49 regularizer """no""" +652 49 optimizer """adadelta""" +652 49 training_loop """lcwa""" +652 49 evaluator """rankbased""" +652 50 dataset """kinships""" +652 50 model """simple""" +652 50 loss """crossentropy""" +652 50 regularizer """no""" +652 50 optimizer """adadelta""" +652 50 training_loop """lcwa""" +652 50 evaluator """rankbased""" +652 51 dataset """kinships""" +652 51 model """simple""" +652 51 loss """crossentropy""" +652 51 regularizer """no""" +652 51 optimizer """adadelta""" +652 51 training_loop """lcwa""" +652 51 evaluator """rankbased""" +652 52 dataset """kinships""" +652 52 model """simple""" +652 52 loss """crossentropy""" +652 52 regularizer """no""" +652 52 optimizer """adadelta""" +652 52 training_loop """lcwa""" +652 52 evaluator """rankbased""" +652 53 dataset """kinships""" +652 53 model """simple""" +652 53 loss """crossentropy""" +652 53 regularizer """no""" +652 53 optimizer """adadelta""" +652 53 training_loop """lcwa""" +652 53 evaluator """rankbased""" +652 54 dataset """kinships""" +652 54 model """simple""" +652 54 loss """crossentropy""" +652 54 regularizer """no""" +652 54 optimizer """adadelta""" +652 54 training_loop """lcwa""" +652 54 evaluator """rankbased""" +652 55 dataset """kinships""" +652 55 model """simple""" +652 55 loss """crossentropy""" +652 55 regularizer """no""" +652 55 optimizer """adadelta""" +652 55 training_loop """lcwa""" +652 55 evaluator """rankbased""" +652 56 dataset """kinships""" +652 56 model """simple""" +652 56 loss """crossentropy""" +652 56 regularizer """no""" +652 56 optimizer """adadelta""" +652 56 training_loop """lcwa""" +652 56 evaluator """rankbased""" +652 57 dataset """kinships""" +652 57 model """simple""" +652 57 loss """crossentropy""" +652 57 regularizer """no""" +652 57 optimizer """adadelta""" +652 57 training_loop """lcwa""" +652 57 evaluator """rankbased""" +652 58 dataset """kinships""" +652 58 model """simple""" +652 58 loss """crossentropy""" +652 58 regularizer """no""" +652 58 optimizer """adadelta""" +652 58 training_loop """lcwa""" +652 58 evaluator """rankbased""" +652 59 dataset """kinships""" +652 59 model """simple""" +652 59 loss """crossentropy""" +652 59 regularizer """no""" +652 59 optimizer """adadelta""" +652 59 training_loop """lcwa""" +652 59 evaluator """rankbased""" +652 60 dataset """kinships""" +652 60 model """simple""" +652 60 loss """crossentropy""" +652 60 regularizer """no""" +652 60 optimizer """adadelta""" +652 60 training_loop """lcwa""" +652 60 evaluator """rankbased""" +652 61 dataset """kinships""" +652 61 model """simple""" +652 61 loss """crossentropy""" +652 61 regularizer """no""" +652 61 optimizer """adadelta""" +652 61 training_loop """lcwa""" +652 61 evaluator """rankbased""" +652 62 dataset """kinships""" +652 62 model """simple""" +652 62 loss """crossentropy""" +652 62 regularizer """no""" +652 62 optimizer """adadelta""" +652 62 training_loop """lcwa""" +652 62 evaluator """rankbased""" +652 63 dataset """kinships""" +652 63 model """simple""" +652 63 loss """crossentropy""" +652 63 regularizer """no""" +652 63 optimizer """adadelta""" +652 63 training_loop """lcwa""" +652 63 evaluator """rankbased""" +652 64 dataset """kinships""" +652 64 model """simple""" +652 64 loss """crossentropy""" +652 64 regularizer """no""" +652 64 optimizer """adadelta""" +652 64 training_loop """lcwa""" +652 64 evaluator """rankbased""" +652 65 dataset """kinships""" +652 65 model """simple""" +652 65 loss """crossentropy""" +652 65 regularizer """no""" +652 65 optimizer """adadelta""" +652 65 training_loop """lcwa""" +652 65 evaluator """rankbased""" +652 66 dataset """kinships""" +652 66 model """simple""" +652 66 loss """crossentropy""" +652 66 regularizer """no""" +652 66 optimizer """adadelta""" +652 66 training_loop """lcwa""" +652 66 evaluator """rankbased""" +652 67 dataset """kinships""" +652 67 model """simple""" +652 67 loss """crossentropy""" +652 67 regularizer """no""" +652 67 optimizer """adadelta""" +652 67 training_loop """lcwa""" +652 67 evaluator """rankbased""" +652 68 dataset """kinships""" +652 68 model """simple""" +652 68 loss """crossentropy""" +652 68 regularizer """no""" +652 68 optimizer """adadelta""" +652 68 training_loop """lcwa""" +652 68 evaluator """rankbased""" +652 69 dataset """kinships""" +652 69 model """simple""" +652 69 loss """crossentropy""" +652 69 regularizer """no""" +652 69 optimizer """adadelta""" +652 69 training_loop """lcwa""" +652 69 evaluator """rankbased""" +652 70 dataset """kinships""" +652 70 model """simple""" +652 70 loss """crossentropy""" +652 70 regularizer """no""" +652 70 optimizer """adadelta""" +652 70 training_loop """lcwa""" +652 70 evaluator """rankbased""" +652 71 dataset """kinships""" +652 71 model """simple""" +652 71 loss """crossentropy""" +652 71 regularizer """no""" +652 71 optimizer """adadelta""" +652 71 training_loop """lcwa""" +652 71 evaluator """rankbased""" +652 72 dataset """kinships""" +652 72 model """simple""" +652 72 loss """crossentropy""" +652 72 regularizer """no""" +652 72 optimizer """adadelta""" +652 72 training_loop """lcwa""" +652 72 evaluator """rankbased""" +652 73 dataset """kinships""" +652 73 model """simple""" +652 73 loss """crossentropy""" +652 73 regularizer """no""" +652 73 optimizer """adadelta""" +652 73 training_loop """lcwa""" +652 73 evaluator """rankbased""" +652 74 dataset """kinships""" +652 74 model """simple""" +652 74 loss """crossentropy""" +652 74 regularizer """no""" +652 74 optimizer """adadelta""" +652 74 training_loop """lcwa""" +652 74 evaluator """rankbased""" +652 75 dataset """kinships""" +652 75 model """simple""" +652 75 loss """crossentropy""" +652 75 regularizer """no""" +652 75 optimizer """adadelta""" +652 75 training_loop """lcwa""" +652 75 evaluator """rankbased""" +652 76 dataset """kinships""" +652 76 model """simple""" +652 76 loss """crossentropy""" +652 76 regularizer """no""" +652 76 optimizer """adadelta""" +652 76 training_loop """lcwa""" +652 76 evaluator """rankbased""" +652 77 dataset """kinships""" +652 77 model """simple""" +652 77 loss """crossentropy""" +652 77 regularizer """no""" +652 77 optimizer """adadelta""" +652 77 training_loop """lcwa""" +652 77 evaluator """rankbased""" +652 78 dataset """kinships""" +652 78 model """simple""" +652 78 loss """crossentropy""" +652 78 regularizer """no""" +652 78 optimizer """adadelta""" +652 78 training_loop """lcwa""" +652 78 evaluator """rankbased""" +652 79 dataset """kinships""" +652 79 model """simple""" +652 79 loss """crossentropy""" +652 79 regularizer """no""" +652 79 optimizer """adadelta""" +652 79 training_loop """lcwa""" +652 79 evaluator """rankbased""" +652 80 dataset """kinships""" +652 80 model """simple""" +652 80 loss """crossentropy""" +652 80 regularizer """no""" +652 80 optimizer """adadelta""" +652 80 training_loop """lcwa""" +652 80 evaluator """rankbased""" +652 81 dataset """kinships""" +652 81 model """simple""" +652 81 loss """crossentropy""" +652 81 regularizer """no""" +652 81 optimizer """adadelta""" +652 81 training_loop """lcwa""" +652 81 evaluator """rankbased""" +652 82 dataset """kinships""" +652 82 model """simple""" +652 82 loss """crossentropy""" +652 82 regularizer """no""" +652 82 optimizer """adadelta""" +652 82 training_loop """lcwa""" +652 82 evaluator """rankbased""" +652 83 dataset """kinships""" +652 83 model """simple""" +652 83 loss """crossentropy""" +652 83 regularizer """no""" +652 83 optimizer """adadelta""" +652 83 training_loop """lcwa""" +652 83 evaluator """rankbased""" +652 84 dataset """kinships""" +652 84 model """simple""" +652 84 loss """crossentropy""" +652 84 regularizer """no""" +652 84 optimizer """adadelta""" +652 84 training_loop """lcwa""" +652 84 evaluator """rankbased""" +652 85 dataset """kinships""" +652 85 model """simple""" +652 85 loss """crossentropy""" +652 85 regularizer """no""" +652 85 optimizer """adadelta""" +652 85 training_loop """lcwa""" +652 85 evaluator """rankbased""" +652 86 dataset """kinships""" +652 86 model """simple""" +652 86 loss """crossentropy""" +652 86 regularizer """no""" +652 86 optimizer """adadelta""" +652 86 training_loop """lcwa""" +652 86 evaluator """rankbased""" +652 87 dataset """kinships""" +652 87 model """simple""" +652 87 loss """crossentropy""" +652 87 regularizer """no""" +652 87 optimizer """adadelta""" +652 87 training_loop """lcwa""" +652 87 evaluator """rankbased""" +652 88 dataset """kinships""" +652 88 model """simple""" +652 88 loss """crossentropy""" +652 88 regularizer """no""" +652 88 optimizer """adadelta""" +652 88 training_loop """lcwa""" +652 88 evaluator """rankbased""" +652 89 dataset """kinships""" +652 89 model """simple""" +652 89 loss """crossentropy""" +652 89 regularizer """no""" +652 89 optimizer """adadelta""" +652 89 training_loop """lcwa""" +652 89 evaluator """rankbased""" +652 90 dataset """kinships""" +652 90 model """simple""" +652 90 loss """crossentropy""" +652 90 regularizer """no""" +652 90 optimizer """adadelta""" +652 90 training_loop """lcwa""" +652 90 evaluator """rankbased""" +652 91 dataset """kinships""" +652 91 model """simple""" +652 91 loss """crossentropy""" +652 91 regularizer """no""" +652 91 optimizer """adadelta""" +652 91 training_loop """lcwa""" +652 91 evaluator """rankbased""" +652 92 dataset """kinships""" +652 92 model """simple""" +652 92 loss """crossentropy""" +652 92 regularizer """no""" +652 92 optimizer """adadelta""" +652 92 training_loop """lcwa""" +652 92 evaluator """rankbased""" +652 93 dataset """kinships""" +652 93 model """simple""" +652 93 loss """crossentropy""" +652 93 regularizer """no""" +652 93 optimizer """adadelta""" +652 93 training_loop """lcwa""" +652 93 evaluator """rankbased""" +652 94 dataset """kinships""" +652 94 model """simple""" +652 94 loss """crossentropy""" +652 94 regularizer """no""" +652 94 optimizer """adadelta""" +652 94 training_loop """lcwa""" +652 94 evaluator """rankbased""" +652 95 dataset """kinships""" +652 95 model """simple""" +652 95 loss """crossentropy""" +652 95 regularizer """no""" +652 95 optimizer """adadelta""" +652 95 training_loop """lcwa""" +652 95 evaluator """rankbased""" +652 96 dataset """kinships""" +652 96 model """simple""" +652 96 loss """crossentropy""" +652 96 regularizer """no""" +652 96 optimizer """adadelta""" +652 96 training_loop """lcwa""" +652 96 evaluator """rankbased""" +652 97 dataset """kinships""" +652 97 model """simple""" +652 97 loss """crossentropy""" +652 97 regularizer """no""" +652 97 optimizer """adadelta""" +652 97 training_loop """lcwa""" +652 97 evaluator """rankbased""" +652 98 dataset """kinships""" +652 98 model """simple""" +652 98 loss """crossentropy""" +652 98 regularizer """no""" +652 98 optimizer """adadelta""" +652 98 training_loop """lcwa""" +652 98 evaluator """rankbased""" +652 99 dataset """kinships""" +652 99 model """simple""" +652 99 loss """crossentropy""" +652 99 regularizer """no""" +652 99 optimizer """adadelta""" +652 99 training_loop """lcwa""" +652 99 evaluator """rankbased""" +652 100 dataset """kinships""" +652 100 model """simple""" +652 100 loss """crossentropy""" +652 100 regularizer """no""" +652 100 optimizer """adadelta""" +652 100 training_loop """lcwa""" +652 100 evaluator """rankbased""" +653 1 model.embedding_dim 1.0 +653 1 negative_sampler.num_negs_per_pos 60.0 +653 1 training.batch_size 2.0 +653 2 model.embedding_dim 1.0 +653 2 negative_sampler.num_negs_per_pos 64.0 +653 2 training.batch_size 1.0 +653 3 model.embedding_dim 2.0 +653 3 negative_sampler.num_negs_per_pos 65.0 +653 3 training.batch_size 0.0 +653 4 model.embedding_dim 1.0 +653 4 negative_sampler.num_negs_per_pos 61.0 +653 4 training.batch_size 2.0 +653 5 model.embedding_dim 0.0 +653 5 negative_sampler.num_negs_per_pos 77.0 +653 5 training.batch_size 0.0 +653 6 model.embedding_dim 2.0 +653 6 negative_sampler.num_negs_per_pos 52.0 +653 6 training.batch_size 2.0 +653 7 model.embedding_dim 0.0 +653 7 negative_sampler.num_negs_per_pos 0.0 +653 7 training.batch_size 0.0 +653 8 model.embedding_dim 0.0 +653 8 negative_sampler.num_negs_per_pos 33.0 +653 8 training.batch_size 2.0 +653 9 model.embedding_dim 0.0 +653 9 negative_sampler.num_negs_per_pos 15.0 +653 9 training.batch_size 2.0 +653 10 model.embedding_dim 1.0 +653 10 negative_sampler.num_negs_per_pos 77.0 +653 10 training.batch_size 0.0 +653 11 model.embedding_dim 1.0 +653 11 negative_sampler.num_negs_per_pos 25.0 +653 11 training.batch_size 2.0 +653 12 model.embedding_dim 2.0 +653 12 negative_sampler.num_negs_per_pos 43.0 +653 12 training.batch_size 0.0 +653 13 model.embedding_dim 1.0 +653 13 negative_sampler.num_negs_per_pos 68.0 +653 13 training.batch_size 0.0 +653 14 model.embedding_dim 2.0 +653 14 negative_sampler.num_negs_per_pos 21.0 +653 14 training.batch_size 1.0 +653 15 model.embedding_dim 1.0 +653 15 negative_sampler.num_negs_per_pos 67.0 +653 15 training.batch_size 0.0 +653 16 model.embedding_dim 2.0 +653 16 negative_sampler.num_negs_per_pos 27.0 +653 16 training.batch_size 2.0 +653 17 model.embedding_dim 1.0 +653 17 negative_sampler.num_negs_per_pos 5.0 +653 17 training.batch_size 2.0 +653 18 model.embedding_dim 2.0 +653 18 negative_sampler.num_negs_per_pos 7.0 +653 18 training.batch_size 1.0 +653 19 model.embedding_dim 0.0 +653 19 negative_sampler.num_negs_per_pos 4.0 +653 19 training.batch_size 2.0 +653 20 model.embedding_dim 2.0 +653 20 negative_sampler.num_negs_per_pos 98.0 +653 20 training.batch_size 1.0 +653 21 model.embedding_dim 1.0 +653 21 negative_sampler.num_negs_per_pos 80.0 +653 21 training.batch_size 0.0 +653 22 model.embedding_dim 1.0 +653 22 negative_sampler.num_negs_per_pos 9.0 +653 22 training.batch_size 0.0 +653 23 model.embedding_dim 2.0 +653 23 negative_sampler.num_negs_per_pos 29.0 +653 23 training.batch_size 0.0 +653 24 model.embedding_dim 2.0 +653 24 negative_sampler.num_negs_per_pos 39.0 +653 24 training.batch_size 2.0 +653 25 model.embedding_dim 2.0 +653 25 negative_sampler.num_negs_per_pos 31.0 +653 25 training.batch_size 2.0 +653 26 model.embedding_dim 0.0 +653 26 negative_sampler.num_negs_per_pos 56.0 +653 26 training.batch_size 1.0 +653 27 model.embedding_dim 1.0 +653 27 negative_sampler.num_negs_per_pos 41.0 +653 27 training.batch_size 2.0 +653 28 model.embedding_dim 1.0 +653 28 negative_sampler.num_negs_per_pos 42.0 +653 28 training.batch_size 1.0 +653 29 model.embedding_dim 1.0 +653 29 negative_sampler.num_negs_per_pos 81.0 +653 29 training.batch_size 0.0 +653 30 model.embedding_dim 0.0 +653 30 negative_sampler.num_negs_per_pos 29.0 +653 30 training.batch_size 2.0 +653 31 model.embedding_dim 2.0 +653 31 negative_sampler.num_negs_per_pos 28.0 +653 31 training.batch_size 0.0 +653 32 model.embedding_dim 1.0 +653 32 negative_sampler.num_negs_per_pos 64.0 +653 32 training.batch_size 1.0 +653 33 model.embedding_dim 0.0 +653 33 negative_sampler.num_negs_per_pos 15.0 +653 33 training.batch_size 2.0 +653 34 model.embedding_dim 2.0 +653 34 negative_sampler.num_negs_per_pos 5.0 +653 34 training.batch_size 1.0 +653 35 model.embedding_dim 0.0 +653 35 negative_sampler.num_negs_per_pos 18.0 +653 35 training.batch_size 0.0 +653 36 model.embedding_dim 1.0 +653 36 negative_sampler.num_negs_per_pos 19.0 +653 36 training.batch_size 0.0 +653 37 model.embedding_dim 2.0 +653 37 negative_sampler.num_negs_per_pos 10.0 +653 37 training.batch_size 1.0 +653 38 model.embedding_dim 2.0 +653 38 negative_sampler.num_negs_per_pos 24.0 +653 38 training.batch_size 0.0 +653 39 model.embedding_dim 2.0 +653 39 negative_sampler.num_negs_per_pos 5.0 +653 39 training.batch_size 0.0 +653 40 model.embedding_dim 0.0 +653 40 negative_sampler.num_negs_per_pos 62.0 +653 40 training.batch_size 0.0 +653 41 model.embedding_dim 0.0 +653 41 negative_sampler.num_negs_per_pos 14.0 +653 41 training.batch_size 1.0 +653 42 model.embedding_dim 2.0 +653 42 negative_sampler.num_negs_per_pos 74.0 +653 42 training.batch_size 2.0 +653 43 model.embedding_dim 2.0 +653 43 negative_sampler.num_negs_per_pos 98.0 +653 43 training.batch_size 2.0 +653 44 model.embedding_dim 2.0 +653 44 negative_sampler.num_negs_per_pos 38.0 +653 44 training.batch_size 0.0 +653 45 model.embedding_dim 2.0 +653 45 negative_sampler.num_negs_per_pos 96.0 +653 45 training.batch_size 1.0 +653 46 model.embedding_dim 1.0 +653 46 negative_sampler.num_negs_per_pos 83.0 +653 46 training.batch_size 0.0 +653 47 model.embedding_dim 0.0 +653 47 negative_sampler.num_negs_per_pos 2.0 +653 47 training.batch_size 0.0 +653 48 model.embedding_dim 0.0 +653 48 negative_sampler.num_negs_per_pos 52.0 +653 48 training.batch_size 2.0 +653 49 model.embedding_dim 2.0 +653 49 negative_sampler.num_negs_per_pos 42.0 +653 49 training.batch_size 0.0 +653 50 model.embedding_dim 2.0 +653 50 negative_sampler.num_negs_per_pos 37.0 +653 50 training.batch_size 0.0 +653 51 model.embedding_dim 1.0 +653 51 negative_sampler.num_negs_per_pos 5.0 +653 51 training.batch_size 2.0 +653 52 model.embedding_dim 0.0 +653 52 negative_sampler.num_negs_per_pos 43.0 +653 52 training.batch_size 2.0 +653 53 model.embedding_dim 2.0 +653 53 negative_sampler.num_negs_per_pos 0.0 +653 53 training.batch_size 0.0 +653 54 model.embedding_dim 1.0 +653 54 negative_sampler.num_negs_per_pos 59.0 +653 54 training.batch_size 2.0 +653 55 model.embedding_dim 2.0 +653 55 negative_sampler.num_negs_per_pos 37.0 +653 55 training.batch_size 1.0 +653 56 model.embedding_dim 1.0 +653 56 negative_sampler.num_negs_per_pos 79.0 +653 56 training.batch_size 1.0 +653 57 model.embedding_dim 1.0 +653 57 negative_sampler.num_negs_per_pos 25.0 +653 57 training.batch_size 1.0 +653 58 model.embedding_dim 1.0 +653 58 negative_sampler.num_negs_per_pos 9.0 +653 58 training.batch_size 0.0 +653 59 model.embedding_dim 1.0 +653 59 negative_sampler.num_negs_per_pos 49.0 +653 59 training.batch_size 0.0 +653 60 model.embedding_dim 0.0 +653 60 negative_sampler.num_negs_per_pos 70.0 +653 60 training.batch_size 0.0 +653 61 model.embedding_dim 0.0 +653 61 negative_sampler.num_negs_per_pos 35.0 +653 61 training.batch_size 2.0 +653 62 model.embedding_dim 0.0 +653 62 negative_sampler.num_negs_per_pos 31.0 +653 62 training.batch_size 1.0 +653 63 model.embedding_dim 2.0 +653 63 negative_sampler.num_negs_per_pos 66.0 +653 63 training.batch_size 2.0 +653 64 model.embedding_dim 0.0 +653 64 negative_sampler.num_negs_per_pos 2.0 +653 64 training.batch_size 2.0 +653 65 model.embedding_dim 1.0 +653 65 negative_sampler.num_negs_per_pos 23.0 +653 65 training.batch_size 1.0 +653 66 model.embedding_dim 1.0 +653 66 negative_sampler.num_negs_per_pos 89.0 +653 66 training.batch_size 0.0 +653 67 model.embedding_dim 0.0 +653 67 negative_sampler.num_negs_per_pos 63.0 +653 67 training.batch_size 2.0 +653 68 model.embedding_dim 0.0 +653 68 negative_sampler.num_negs_per_pos 66.0 +653 68 training.batch_size 1.0 +653 69 model.embedding_dim 2.0 +653 69 negative_sampler.num_negs_per_pos 50.0 +653 69 training.batch_size 0.0 +653 70 model.embedding_dim 1.0 +653 70 negative_sampler.num_negs_per_pos 49.0 +653 70 training.batch_size 1.0 +653 71 model.embedding_dim 2.0 +653 71 negative_sampler.num_negs_per_pos 14.0 +653 71 training.batch_size 1.0 +653 72 model.embedding_dim 1.0 +653 72 negative_sampler.num_negs_per_pos 62.0 +653 72 training.batch_size 2.0 +653 73 model.embedding_dim 0.0 +653 73 negative_sampler.num_negs_per_pos 33.0 +653 73 training.batch_size 1.0 +653 74 model.embedding_dim 0.0 +653 74 negative_sampler.num_negs_per_pos 25.0 +653 74 training.batch_size 1.0 +653 75 model.embedding_dim 1.0 +653 75 negative_sampler.num_negs_per_pos 13.0 +653 75 training.batch_size 0.0 +653 76 model.embedding_dim 0.0 +653 76 negative_sampler.num_negs_per_pos 74.0 +653 76 training.batch_size 2.0 +653 77 model.embedding_dim 0.0 +653 77 negative_sampler.num_negs_per_pos 87.0 +653 77 training.batch_size 2.0 +653 78 model.embedding_dim 1.0 +653 78 negative_sampler.num_negs_per_pos 41.0 +653 78 training.batch_size 0.0 +653 79 model.embedding_dim 0.0 +653 79 negative_sampler.num_negs_per_pos 93.0 +653 79 training.batch_size 1.0 +653 80 model.embedding_dim 1.0 +653 80 negative_sampler.num_negs_per_pos 66.0 +653 80 training.batch_size 1.0 +653 81 model.embedding_dim 1.0 +653 81 negative_sampler.num_negs_per_pos 66.0 +653 81 training.batch_size 0.0 +653 82 model.embedding_dim 1.0 +653 82 negative_sampler.num_negs_per_pos 67.0 +653 82 training.batch_size 0.0 +653 83 model.embedding_dim 1.0 +653 83 negative_sampler.num_negs_per_pos 70.0 +653 83 training.batch_size 2.0 +653 84 model.embedding_dim 2.0 +653 84 negative_sampler.num_negs_per_pos 42.0 +653 84 training.batch_size 0.0 +653 85 model.embedding_dim 0.0 +653 85 negative_sampler.num_negs_per_pos 89.0 +653 85 training.batch_size 1.0 +653 86 model.embedding_dim 2.0 +653 86 negative_sampler.num_negs_per_pos 33.0 +653 86 training.batch_size 0.0 +653 87 model.embedding_dim 1.0 +653 87 negative_sampler.num_negs_per_pos 90.0 +653 87 training.batch_size 1.0 +653 88 model.embedding_dim 1.0 +653 88 negative_sampler.num_negs_per_pos 62.0 +653 88 training.batch_size 0.0 +653 89 model.embedding_dim 1.0 +653 89 negative_sampler.num_negs_per_pos 26.0 +653 89 training.batch_size 0.0 +653 90 model.embedding_dim 1.0 +653 90 negative_sampler.num_negs_per_pos 96.0 +653 90 training.batch_size 2.0 +653 91 model.embedding_dim 0.0 +653 91 negative_sampler.num_negs_per_pos 87.0 +653 91 training.batch_size 2.0 +653 92 model.embedding_dim 2.0 +653 92 negative_sampler.num_negs_per_pos 36.0 +653 92 training.batch_size 2.0 +653 93 model.embedding_dim 0.0 +653 93 negative_sampler.num_negs_per_pos 8.0 +653 93 training.batch_size 2.0 +653 94 model.embedding_dim 1.0 +653 94 negative_sampler.num_negs_per_pos 59.0 +653 94 training.batch_size 1.0 +653 95 model.embedding_dim 0.0 +653 95 negative_sampler.num_negs_per_pos 40.0 +653 95 training.batch_size 0.0 +653 96 model.embedding_dim 0.0 +653 96 negative_sampler.num_negs_per_pos 44.0 +653 96 training.batch_size 2.0 +653 97 model.embedding_dim 0.0 +653 97 negative_sampler.num_negs_per_pos 89.0 +653 97 training.batch_size 2.0 +653 98 model.embedding_dim 2.0 +653 98 negative_sampler.num_negs_per_pos 26.0 +653 98 training.batch_size 2.0 +653 99 model.embedding_dim 2.0 +653 99 negative_sampler.num_negs_per_pos 5.0 +653 99 training.batch_size 0.0 +653 100 model.embedding_dim 0.0 +653 100 negative_sampler.num_negs_per_pos 67.0 +653 100 training.batch_size 2.0 +653 1 dataset """kinships""" +653 1 model """simple""" +653 1 loss """bceaftersigmoid""" +653 1 regularizer """no""" +653 1 optimizer """adadelta""" +653 1 training_loop """owa""" +653 1 negative_sampler """basic""" +653 1 evaluator """rankbased""" +653 2 dataset """kinships""" +653 2 model """simple""" +653 2 loss """bceaftersigmoid""" +653 2 regularizer """no""" +653 2 optimizer """adadelta""" +653 2 training_loop """owa""" +653 2 negative_sampler """basic""" +653 2 evaluator """rankbased""" +653 3 dataset """kinships""" +653 3 model """simple""" +653 3 loss """bceaftersigmoid""" +653 3 regularizer """no""" +653 3 optimizer """adadelta""" +653 3 training_loop """owa""" +653 3 negative_sampler """basic""" +653 3 evaluator """rankbased""" +653 4 dataset """kinships""" +653 4 model """simple""" +653 4 loss """bceaftersigmoid""" +653 4 regularizer """no""" +653 4 optimizer """adadelta""" +653 4 training_loop """owa""" +653 4 negative_sampler """basic""" +653 4 evaluator """rankbased""" +653 5 dataset """kinships""" +653 5 model """simple""" +653 5 loss """bceaftersigmoid""" +653 5 regularizer """no""" +653 5 optimizer """adadelta""" +653 5 training_loop """owa""" +653 5 negative_sampler """basic""" +653 5 evaluator """rankbased""" +653 6 dataset """kinships""" +653 6 model """simple""" +653 6 loss """bceaftersigmoid""" +653 6 regularizer """no""" +653 6 optimizer """adadelta""" +653 6 training_loop """owa""" +653 6 negative_sampler """basic""" +653 6 evaluator """rankbased""" +653 7 dataset """kinships""" +653 7 model """simple""" +653 7 loss """bceaftersigmoid""" +653 7 regularizer """no""" +653 7 optimizer """adadelta""" +653 7 training_loop """owa""" +653 7 negative_sampler """basic""" +653 7 evaluator """rankbased""" +653 8 dataset """kinships""" +653 8 model """simple""" +653 8 loss """bceaftersigmoid""" +653 8 regularizer """no""" +653 8 optimizer """adadelta""" +653 8 training_loop """owa""" +653 8 negative_sampler """basic""" +653 8 evaluator """rankbased""" +653 9 dataset """kinships""" +653 9 model """simple""" +653 9 loss """bceaftersigmoid""" +653 9 regularizer """no""" +653 9 optimizer """adadelta""" +653 9 training_loop """owa""" +653 9 negative_sampler """basic""" +653 9 evaluator """rankbased""" +653 10 dataset """kinships""" +653 10 model """simple""" +653 10 loss """bceaftersigmoid""" +653 10 regularizer """no""" +653 10 optimizer """adadelta""" +653 10 training_loop """owa""" +653 10 negative_sampler """basic""" +653 10 evaluator """rankbased""" +653 11 dataset """kinships""" +653 11 model """simple""" +653 11 loss """bceaftersigmoid""" +653 11 regularizer """no""" +653 11 optimizer """adadelta""" +653 11 training_loop """owa""" +653 11 negative_sampler """basic""" +653 11 evaluator """rankbased""" +653 12 dataset """kinships""" +653 12 model """simple""" +653 12 loss """bceaftersigmoid""" +653 12 regularizer """no""" +653 12 optimizer """adadelta""" +653 12 training_loop """owa""" +653 12 negative_sampler """basic""" +653 12 evaluator """rankbased""" +653 13 dataset """kinships""" +653 13 model """simple""" +653 13 loss """bceaftersigmoid""" +653 13 regularizer """no""" +653 13 optimizer """adadelta""" +653 13 training_loop """owa""" +653 13 negative_sampler """basic""" +653 13 evaluator """rankbased""" +653 14 dataset """kinships""" +653 14 model """simple""" +653 14 loss """bceaftersigmoid""" +653 14 regularizer """no""" +653 14 optimizer """adadelta""" +653 14 training_loop """owa""" +653 14 negative_sampler """basic""" +653 14 evaluator """rankbased""" +653 15 dataset """kinships""" +653 15 model """simple""" +653 15 loss """bceaftersigmoid""" +653 15 regularizer """no""" +653 15 optimizer """adadelta""" +653 15 training_loop """owa""" +653 15 negative_sampler """basic""" +653 15 evaluator """rankbased""" +653 16 dataset """kinships""" +653 16 model """simple""" +653 16 loss """bceaftersigmoid""" +653 16 regularizer """no""" +653 16 optimizer """adadelta""" +653 16 training_loop """owa""" +653 16 negative_sampler """basic""" +653 16 evaluator """rankbased""" +653 17 dataset """kinships""" +653 17 model """simple""" +653 17 loss """bceaftersigmoid""" +653 17 regularizer """no""" +653 17 optimizer """adadelta""" +653 17 training_loop """owa""" +653 17 negative_sampler """basic""" +653 17 evaluator """rankbased""" +653 18 dataset """kinships""" +653 18 model """simple""" +653 18 loss """bceaftersigmoid""" +653 18 regularizer """no""" +653 18 optimizer """adadelta""" +653 18 training_loop """owa""" +653 18 negative_sampler """basic""" +653 18 evaluator """rankbased""" +653 19 dataset """kinships""" +653 19 model """simple""" +653 19 loss """bceaftersigmoid""" +653 19 regularizer """no""" +653 19 optimizer """adadelta""" +653 19 training_loop """owa""" +653 19 negative_sampler """basic""" +653 19 evaluator """rankbased""" +653 20 dataset """kinships""" +653 20 model """simple""" +653 20 loss """bceaftersigmoid""" +653 20 regularizer """no""" +653 20 optimizer """adadelta""" +653 20 training_loop """owa""" +653 20 negative_sampler """basic""" +653 20 evaluator """rankbased""" +653 21 dataset """kinships""" +653 21 model """simple""" +653 21 loss """bceaftersigmoid""" +653 21 regularizer """no""" +653 21 optimizer """adadelta""" +653 21 training_loop """owa""" +653 21 negative_sampler """basic""" +653 21 evaluator """rankbased""" +653 22 dataset """kinships""" +653 22 model """simple""" +653 22 loss """bceaftersigmoid""" +653 22 regularizer """no""" +653 22 optimizer """adadelta""" +653 22 training_loop """owa""" +653 22 negative_sampler """basic""" +653 22 evaluator """rankbased""" +653 23 dataset """kinships""" +653 23 model """simple""" +653 23 loss """bceaftersigmoid""" +653 23 regularizer """no""" +653 23 optimizer """adadelta""" +653 23 training_loop """owa""" +653 23 negative_sampler """basic""" +653 23 evaluator """rankbased""" +653 24 dataset """kinships""" +653 24 model """simple""" +653 24 loss """bceaftersigmoid""" +653 24 regularizer """no""" +653 24 optimizer """adadelta""" +653 24 training_loop """owa""" +653 24 negative_sampler """basic""" +653 24 evaluator """rankbased""" +653 25 dataset """kinships""" +653 25 model """simple""" +653 25 loss """bceaftersigmoid""" +653 25 regularizer """no""" +653 25 optimizer """adadelta""" +653 25 training_loop """owa""" +653 25 negative_sampler """basic""" +653 25 evaluator """rankbased""" +653 26 dataset """kinships""" +653 26 model """simple""" +653 26 loss """bceaftersigmoid""" +653 26 regularizer """no""" +653 26 optimizer """adadelta""" +653 26 training_loop """owa""" +653 26 negative_sampler """basic""" +653 26 evaluator """rankbased""" +653 27 dataset """kinships""" +653 27 model """simple""" +653 27 loss """bceaftersigmoid""" +653 27 regularizer """no""" +653 27 optimizer """adadelta""" +653 27 training_loop """owa""" +653 27 negative_sampler """basic""" +653 27 evaluator """rankbased""" +653 28 dataset """kinships""" +653 28 model """simple""" +653 28 loss """bceaftersigmoid""" +653 28 regularizer """no""" +653 28 optimizer """adadelta""" +653 28 training_loop """owa""" +653 28 negative_sampler """basic""" +653 28 evaluator """rankbased""" +653 29 dataset """kinships""" +653 29 model """simple""" +653 29 loss """bceaftersigmoid""" +653 29 regularizer """no""" +653 29 optimizer """adadelta""" +653 29 training_loop """owa""" +653 29 negative_sampler """basic""" +653 29 evaluator """rankbased""" +653 30 dataset """kinships""" +653 30 model """simple""" +653 30 loss """bceaftersigmoid""" +653 30 regularizer """no""" +653 30 optimizer """adadelta""" +653 30 training_loop """owa""" +653 30 negative_sampler """basic""" +653 30 evaluator """rankbased""" +653 31 dataset """kinships""" +653 31 model """simple""" +653 31 loss """bceaftersigmoid""" +653 31 regularizer """no""" +653 31 optimizer """adadelta""" +653 31 training_loop """owa""" +653 31 negative_sampler """basic""" +653 31 evaluator """rankbased""" +653 32 dataset """kinships""" +653 32 model """simple""" +653 32 loss """bceaftersigmoid""" +653 32 regularizer """no""" +653 32 optimizer """adadelta""" +653 32 training_loop """owa""" +653 32 negative_sampler """basic""" +653 32 evaluator """rankbased""" +653 33 dataset """kinships""" +653 33 model """simple""" +653 33 loss """bceaftersigmoid""" +653 33 regularizer """no""" +653 33 optimizer """adadelta""" +653 33 training_loop """owa""" +653 33 negative_sampler """basic""" +653 33 evaluator """rankbased""" +653 34 dataset """kinships""" +653 34 model """simple""" +653 34 loss """bceaftersigmoid""" +653 34 regularizer """no""" +653 34 optimizer """adadelta""" +653 34 training_loop """owa""" +653 34 negative_sampler """basic""" +653 34 evaluator """rankbased""" +653 35 dataset """kinships""" +653 35 model """simple""" +653 35 loss """bceaftersigmoid""" +653 35 regularizer """no""" +653 35 optimizer """adadelta""" +653 35 training_loop """owa""" +653 35 negative_sampler """basic""" +653 35 evaluator """rankbased""" +653 36 dataset """kinships""" +653 36 model """simple""" +653 36 loss """bceaftersigmoid""" +653 36 regularizer """no""" +653 36 optimizer """adadelta""" +653 36 training_loop """owa""" +653 36 negative_sampler """basic""" +653 36 evaluator """rankbased""" +653 37 dataset """kinships""" +653 37 model """simple""" +653 37 loss """bceaftersigmoid""" +653 37 regularizer """no""" +653 37 optimizer """adadelta""" +653 37 training_loop """owa""" +653 37 negative_sampler """basic""" +653 37 evaluator """rankbased""" +653 38 dataset """kinships""" +653 38 model """simple""" +653 38 loss """bceaftersigmoid""" +653 38 regularizer """no""" +653 38 optimizer """adadelta""" +653 38 training_loop """owa""" +653 38 negative_sampler """basic""" +653 38 evaluator """rankbased""" +653 39 dataset """kinships""" +653 39 model """simple""" +653 39 loss """bceaftersigmoid""" +653 39 regularizer """no""" +653 39 optimizer """adadelta""" +653 39 training_loop """owa""" +653 39 negative_sampler """basic""" +653 39 evaluator """rankbased""" +653 40 dataset """kinships""" +653 40 model """simple""" +653 40 loss """bceaftersigmoid""" +653 40 regularizer """no""" +653 40 optimizer """adadelta""" +653 40 training_loop """owa""" +653 40 negative_sampler """basic""" +653 40 evaluator """rankbased""" +653 41 dataset """kinships""" +653 41 model """simple""" +653 41 loss """bceaftersigmoid""" +653 41 regularizer """no""" +653 41 optimizer """adadelta""" +653 41 training_loop """owa""" +653 41 negative_sampler """basic""" +653 41 evaluator """rankbased""" +653 42 dataset """kinships""" +653 42 model """simple""" +653 42 loss """bceaftersigmoid""" +653 42 regularizer """no""" +653 42 optimizer """adadelta""" +653 42 training_loop """owa""" +653 42 negative_sampler """basic""" +653 42 evaluator """rankbased""" +653 43 dataset """kinships""" +653 43 model """simple""" +653 43 loss """bceaftersigmoid""" +653 43 regularizer """no""" +653 43 optimizer """adadelta""" +653 43 training_loop """owa""" +653 43 negative_sampler """basic""" +653 43 evaluator """rankbased""" +653 44 dataset """kinships""" +653 44 model """simple""" +653 44 loss """bceaftersigmoid""" +653 44 regularizer """no""" +653 44 optimizer """adadelta""" +653 44 training_loop """owa""" +653 44 negative_sampler """basic""" +653 44 evaluator """rankbased""" +653 45 dataset """kinships""" +653 45 model """simple""" +653 45 loss """bceaftersigmoid""" +653 45 regularizer """no""" +653 45 optimizer """adadelta""" +653 45 training_loop """owa""" +653 45 negative_sampler """basic""" +653 45 evaluator """rankbased""" +653 46 dataset """kinships""" +653 46 model """simple""" +653 46 loss """bceaftersigmoid""" +653 46 regularizer """no""" +653 46 optimizer """adadelta""" +653 46 training_loop """owa""" +653 46 negative_sampler """basic""" +653 46 evaluator """rankbased""" +653 47 dataset """kinships""" +653 47 model """simple""" +653 47 loss """bceaftersigmoid""" +653 47 regularizer """no""" +653 47 optimizer """adadelta""" +653 47 training_loop """owa""" +653 47 negative_sampler """basic""" +653 47 evaluator """rankbased""" +653 48 dataset """kinships""" +653 48 model """simple""" +653 48 loss """bceaftersigmoid""" +653 48 regularizer """no""" +653 48 optimizer """adadelta""" +653 48 training_loop """owa""" +653 48 negative_sampler """basic""" +653 48 evaluator """rankbased""" +653 49 dataset """kinships""" +653 49 model """simple""" +653 49 loss """bceaftersigmoid""" +653 49 regularizer """no""" +653 49 optimizer """adadelta""" +653 49 training_loop """owa""" +653 49 negative_sampler """basic""" +653 49 evaluator """rankbased""" +653 50 dataset """kinships""" +653 50 model """simple""" +653 50 loss """bceaftersigmoid""" +653 50 regularizer """no""" +653 50 optimizer """adadelta""" +653 50 training_loop """owa""" +653 50 negative_sampler """basic""" +653 50 evaluator """rankbased""" +653 51 dataset """kinships""" +653 51 model """simple""" +653 51 loss """bceaftersigmoid""" +653 51 regularizer """no""" +653 51 optimizer """adadelta""" +653 51 training_loop """owa""" +653 51 negative_sampler """basic""" +653 51 evaluator """rankbased""" +653 52 dataset """kinships""" +653 52 model """simple""" +653 52 loss """bceaftersigmoid""" +653 52 regularizer """no""" +653 52 optimizer """adadelta""" +653 52 training_loop """owa""" +653 52 negative_sampler """basic""" +653 52 evaluator """rankbased""" +653 53 dataset """kinships""" +653 53 model """simple""" +653 53 loss """bceaftersigmoid""" +653 53 regularizer """no""" +653 53 optimizer """adadelta""" +653 53 training_loop """owa""" +653 53 negative_sampler """basic""" +653 53 evaluator """rankbased""" +653 54 dataset """kinships""" +653 54 model """simple""" +653 54 loss """bceaftersigmoid""" +653 54 regularizer """no""" +653 54 optimizer """adadelta""" +653 54 training_loop """owa""" +653 54 negative_sampler """basic""" +653 54 evaluator """rankbased""" +653 55 dataset """kinships""" +653 55 model """simple""" +653 55 loss """bceaftersigmoid""" +653 55 regularizer """no""" +653 55 optimizer """adadelta""" +653 55 training_loop """owa""" +653 55 negative_sampler """basic""" +653 55 evaluator """rankbased""" +653 56 dataset """kinships""" +653 56 model """simple""" +653 56 loss """bceaftersigmoid""" +653 56 regularizer """no""" +653 56 optimizer """adadelta""" +653 56 training_loop """owa""" +653 56 negative_sampler """basic""" +653 56 evaluator """rankbased""" +653 57 dataset """kinships""" +653 57 model """simple""" +653 57 loss """bceaftersigmoid""" +653 57 regularizer """no""" +653 57 optimizer """adadelta""" +653 57 training_loop """owa""" +653 57 negative_sampler """basic""" +653 57 evaluator """rankbased""" +653 58 dataset """kinships""" +653 58 model """simple""" +653 58 loss """bceaftersigmoid""" +653 58 regularizer """no""" +653 58 optimizer """adadelta""" +653 58 training_loop """owa""" +653 58 negative_sampler """basic""" +653 58 evaluator """rankbased""" +653 59 dataset """kinships""" +653 59 model """simple""" +653 59 loss """bceaftersigmoid""" +653 59 regularizer """no""" +653 59 optimizer """adadelta""" +653 59 training_loop """owa""" +653 59 negative_sampler """basic""" +653 59 evaluator """rankbased""" +653 60 dataset """kinships""" +653 60 model """simple""" +653 60 loss """bceaftersigmoid""" +653 60 regularizer """no""" +653 60 optimizer """adadelta""" +653 60 training_loop """owa""" +653 60 negative_sampler """basic""" +653 60 evaluator """rankbased""" +653 61 dataset """kinships""" +653 61 model """simple""" +653 61 loss """bceaftersigmoid""" +653 61 regularizer """no""" +653 61 optimizer """adadelta""" +653 61 training_loop """owa""" +653 61 negative_sampler """basic""" +653 61 evaluator """rankbased""" +653 62 dataset """kinships""" +653 62 model """simple""" +653 62 loss """bceaftersigmoid""" +653 62 regularizer """no""" +653 62 optimizer """adadelta""" +653 62 training_loop """owa""" +653 62 negative_sampler """basic""" +653 62 evaluator """rankbased""" +653 63 dataset """kinships""" +653 63 model """simple""" +653 63 loss """bceaftersigmoid""" +653 63 regularizer """no""" +653 63 optimizer """adadelta""" +653 63 training_loop """owa""" +653 63 negative_sampler """basic""" +653 63 evaluator """rankbased""" +653 64 dataset """kinships""" +653 64 model """simple""" +653 64 loss """bceaftersigmoid""" +653 64 regularizer """no""" +653 64 optimizer """adadelta""" +653 64 training_loop """owa""" +653 64 negative_sampler """basic""" +653 64 evaluator """rankbased""" +653 65 dataset """kinships""" +653 65 model """simple""" +653 65 loss """bceaftersigmoid""" +653 65 regularizer """no""" +653 65 optimizer """adadelta""" +653 65 training_loop """owa""" +653 65 negative_sampler """basic""" +653 65 evaluator """rankbased""" +653 66 dataset """kinships""" +653 66 model """simple""" +653 66 loss """bceaftersigmoid""" +653 66 regularizer """no""" +653 66 optimizer """adadelta""" +653 66 training_loop """owa""" +653 66 negative_sampler """basic""" +653 66 evaluator """rankbased""" +653 67 dataset """kinships""" +653 67 model """simple""" +653 67 loss """bceaftersigmoid""" +653 67 regularizer """no""" +653 67 optimizer """adadelta""" +653 67 training_loop """owa""" +653 67 negative_sampler """basic""" +653 67 evaluator """rankbased""" +653 68 dataset """kinships""" +653 68 model """simple""" +653 68 loss """bceaftersigmoid""" +653 68 regularizer """no""" +653 68 optimizer """adadelta""" +653 68 training_loop """owa""" +653 68 negative_sampler """basic""" +653 68 evaluator """rankbased""" +653 69 dataset """kinships""" +653 69 model """simple""" +653 69 loss """bceaftersigmoid""" +653 69 regularizer """no""" +653 69 optimizer """adadelta""" +653 69 training_loop """owa""" +653 69 negative_sampler """basic""" +653 69 evaluator """rankbased""" +653 70 dataset """kinships""" +653 70 model """simple""" +653 70 loss """bceaftersigmoid""" +653 70 regularizer """no""" +653 70 optimizer """adadelta""" +653 70 training_loop """owa""" +653 70 negative_sampler """basic""" +653 70 evaluator """rankbased""" +653 71 dataset """kinships""" +653 71 model """simple""" +653 71 loss """bceaftersigmoid""" +653 71 regularizer """no""" +653 71 optimizer """adadelta""" +653 71 training_loop """owa""" +653 71 negative_sampler """basic""" +653 71 evaluator """rankbased""" +653 72 dataset """kinships""" +653 72 model """simple""" +653 72 loss """bceaftersigmoid""" +653 72 regularizer """no""" +653 72 optimizer """adadelta""" +653 72 training_loop """owa""" +653 72 negative_sampler """basic""" +653 72 evaluator """rankbased""" +653 73 dataset """kinships""" +653 73 model """simple""" +653 73 loss """bceaftersigmoid""" +653 73 regularizer """no""" +653 73 optimizer """adadelta""" +653 73 training_loop """owa""" +653 73 negative_sampler """basic""" +653 73 evaluator """rankbased""" +653 74 dataset """kinships""" +653 74 model """simple""" +653 74 loss """bceaftersigmoid""" +653 74 regularizer """no""" +653 74 optimizer """adadelta""" +653 74 training_loop """owa""" +653 74 negative_sampler """basic""" +653 74 evaluator """rankbased""" +653 75 dataset """kinships""" +653 75 model """simple""" +653 75 loss """bceaftersigmoid""" +653 75 regularizer """no""" +653 75 optimizer """adadelta""" +653 75 training_loop """owa""" +653 75 negative_sampler """basic""" +653 75 evaluator """rankbased""" +653 76 dataset """kinships""" +653 76 model """simple""" +653 76 loss """bceaftersigmoid""" +653 76 regularizer """no""" +653 76 optimizer """adadelta""" +653 76 training_loop """owa""" +653 76 negative_sampler """basic""" +653 76 evaluator """rankbased""" +653 77 dataset """kinships""" +653 77 model """simple""" +653 77 loss """bceaftersigmoid""" +653 77 regularizer """no""" +653 77 optimizer """adadelta""" +653 77 training_loop """owa""" +653 77 negative_sampler """basic""" +653 77 evaluator """rankbased""" +653 78 dataset """kinships""" +653 78 model """simple""" +653 78 loss """bceaftersigmoid""" +653 78 regularizer """no""" +653 78 optimizer """adadelta""" +653 78 training_loop """owa""" +653 78 negative_sampler """basic""" +653 78 evaluator """rankbased""" +653 79 dataset """kinships""" +653 79 model """simple""" +653 79 loss """bceaftersigmoid""" +653 79 regularizer """no""" +653 79 optimizer """adadelta""" +653 79 training_loop """owa""" +653 79 negative_sampler """basic""" +653 79 evaluator """rankbased""" +653 80 dataset """kinships""" +653 80 model """simple""" +653 80 loss """bceaftersigmoid""" +653 80 regularizer """no""" +653 80 optimizer """adadelta""" +653 80 training_loop """owa""" +653 80 negative_sampler """basic""" +653 80 evaluator """rankbased""" +653 81 dataset """kinships""" +653 81 model """simple""" +653 81 loss """bceaftersigmoid""" +653 81 regularizer """no""" +653 81 optimizer """adadelta""" +653 81 training_loop """owa""" +653 81 negative_sampler """basic""" +653 81 evaluator """rankbased""" +653 82 dataset """kinships""" +653 82 model """simple""" +653 82 loss """bceaftersigmoid""" +653 82 regularizer """no""" +653 82 optimizer """adadelta""" +653 82 training_loop """owa""" +653 82 negative_sampler """basic""" +653 82 evaluator """rankbased""" +653 83 dataset """kinships""" +653 83 model """simple""" +653 83 loss """bceaftersigmoid""" +653 83 regularizer """no""" +653 83 optimizer """adadelta""" +653 83 training_loop """owa""" +653 83 negative_sampler """basic""" +653 83 evaluator """rankbased""" +653 84 dataset """kinships""" +653 84 model """simple""" +653 84 loss """bceaftersigmoid""" +653 84 regularizer """no""" +653 84 optimizer """adadelta""" +653 84 training_loop """owa""" +653 84 negative_sampler """basic""" +653 84 evaluator """rankbased""" +653 85 dataset """kinships""" +653 85 model """simple""" +653 85 loss """bceaftersigmoid""" +653 85 regularizer """no""" +653 85 optimizer """adadelta""" +653 85 training_loop """owa""" +653 85 negative_sampler """basic""" +653 85 evaluator """rankbased""" +653 86 dataset """kinships""" +653 86 model """simple""" +653 86 loss """bceaftersigmoid""" +653 86 regularizer """no""" +653 86 optimizer """adadelta""" +653 86 training_loop """owa""" +653 86 negative_sampler """basic""" +653 86 evaluator """rankbased""" +653 87 dataset """kinships""" +653 87 model """simple""" +653 87 loss """bceaftersigmoid""" +653 87 regularizer """no""" +653 87 optimizer """adadelta""" +653 87 training_loop """owa""" +653 87 negative_sampler """basic""" +653 87 evaluator """rankbased""" +653 88 dataset """kinships""" +653 88 model """simple""" +653 88 loss """bceaftersigmoid""" +653 88 regularizer """no""" +653 88 optimizer """adadelta""" +653 88 training_loop """owa""" +653 88 negative_sampler """basic""" +653 88 evaluator """rankbased""" +653 89 dataset """kinships""" +653 89 model """simple""" +653 89 loss """bceaftersigmoid""" +653 89 regularizer """no""" +653 89 optimizer """adadelta""" +653 89 training_loop """owa""" +653 89 negative_sampler """basic""" +653 89 evaluator """rankbased""" +653 90 dataset """kinships""" +653 90 model """simple""" +653 90 loss """bceaftersigmoid""" +653 90 regularizer """no""" +653 90 optimizer """adadelta""" +653 90 training_loop """owa""" +653 90 negative_sampler """basic""" +653 90 evaluator """rankbased""" +653 91 dataset """kinships""" +653 91 model """simple""" +653 91 loss """bceaftersigmoid""" +653 91 regularizer """no""" +653 91 optimizer """adadelta""" +653 91 training_loop """owa""" +653 91 negative_sampler """basic""" +653 91 evaluator """rankbased""" +653 92 dataset """kinships""" +653 92 model """simple""" +653 92 loss """bceaftersigmoid""" +653 92 regularizer """no""" +653 92 optimizer """adadelta""" +653 92 training_loop """owa""" +653 92 negative_sampler """basic""" +653 92 evaluator """rankbased""" +653 93 dataset """kinships""" +653 93 model """simple""" +653 93 loss """bceaftersigmoid""" +653 93 regularizer """no""" +653 93 optimizer """adadelta""" +653 93 training_loop """owa""" +653 93 negative_sampler """basic""" +653 93 evaluator """rankbased""" +653 94 dataset """kinships""" +653 94 model """simple""" +653 94 loss """bceaftersigmoid""" +653 94 regularizer """no""" +653 94 optimizer """adadelta""" +653 94 training_loop """owa""" +653 94 negative_sampler """basic""" +653 94 evaluator """rankbased""" +653 95 dataset """kinships""" +653 95 model """simple""" +653 95 loss """bceaftersigmoid""" +653 95 regularizer """no""" +653 95 optimizer """adadelta""" +653 95 training_loop """owa""" +653 95 negative_sampler """basic""" +653 95 evaluator """rankbased""" +653 96 dataset """kinships""" +653 96 model """simple""" +653 96 loss """bceaftersigmoid""" +653 96 regularizer """no""" +653 96 optimizer """adadelta""" +653 96 training_loop """owa""" +653 96 negative_sampler """basic""" +653 96 evaluator """rankbased""" +653 97 dataset """kinships""" +653 97 model """simple""" +653 97 loss """bceaftersigmoid""" +653 97 regularizer """no""" +653 97 optimizer """adadelta""" +653 97 training_loop """owa""" +653 97 negative_sampler """basic""" +653 97 evaluator """rankbased""" +653 98 dataset """kinships""" +653 98 model """simple""" +653 98 loss """bceaftersigmoid""" +653 98 regularizer """no""" +653 98 optimizer """adadelta""" +653 98 training_loop """owa""" +653 98 negative_sampler """basic""" +653 98 evaluator """rankbased""" +653 99 dataset """kinships""" +653 99 model """simple""" +653 99 loss """bceaftersigmoid""" +653 99 regularizer """no""" +653 99 optimizer """adadelta""" +653 99 training_loop """owa""" +653 99 negative_sampler """basic""" +653 99 evaluator """rankbased""" +653 100 dataset """kinships""" +653 100 model """simple""" +653 100 loss """bceaftersigmoid""" +653 100 regularizer """no""" +653 100 optimizer """adadelta""" +653 100 training_loop """owa""" +653 100 negative_sampler """basic""" +653 100 evaluator """rankbased""" +654 1 model.embedding_dim 0.0 +654 1 negative_sampler.num_negs_per_pos 80.0 +654 1 training.batch_size 2.0 +654 2 model.embedding_dim 2.0 +654 2 negative_sampler.num_negs_per_pos 39.0 +654 2 training.batch_size 0.0 +654 3 model.embedding_dim 0.0 +654 3 negative_sampler.num_negs_per_pos 88.0 +654 3 training.batch_size 1.0 +654 4 model.embedding_dim 1.0 +654 4 negative_sampler.num_negs_per_pos 88.0 +654 4 training.batch_size 1.0 +654 5 model.embedding_dim 0.0 +654 5 negative_sampler.num_negs_per_pos 4.0 +654 5 training.batch_size 2.0 +654 6 model.embedding_dim 1.0 +654 6 negative_sampler.num_negs_per_pos 31.0 +654 6 training.batch_size 0.0 +654 7 model.embedding_dim 1.0 +654 7 negative_sampler.num_negs_per_pos 64.0 +654 7 training.batch_size 0.0 +654 8 model.embedding_dim 1.0 +654 8 negative_sampler.num_negs_per_pos 54.0 +654 8 training.batch_size 2.0 +654 9 model.embedding_dim 0.0 +654 9 negative_sampler.num_negs_per_pos 68.0 +654 9 training.batch_size 1.0 +654 10 model.embedding_dim 2.0 +654 10 negative_sampler.num_negs_per_pos 65.0 +654 10 training.batch_size 0.0 +654 11 model.embedding_dim 2.0 +654 11 negative_sampler.num_negs_per_pos 75.0 +654 11 training.batch_size 0.0 +654 12 model.embedding_dim 2.0 +654 12 negative_sampler.num_negs_per_pos 70.0 +654 12 training.batch_size 0.0 +654 13 model.embedding_dim 2.0 +654 13 negative_sampler.num_negs_per_pos 20.0 +654 13 training.batch_size 2.0 +654 14 model.embedding_dim 2.0 +654 14 negative_sampler.num_negs_per_pos 89.0 +654 14 training.batch_size 0.0 +654 15 model.embedding_dim 1.0 +654 15 negative_sampler.num_negs_per_pos 11.0 +654 15 training.batch_size 0.0 +654 16 model.embedding_dim 2.0 +654 16 negative_sampler.num_negs_per_pos 4.0 +654 16 training.batch_size 2.0 +654 17 model.embedding_dim 2.0 +654 17 negative_sampler.num_negs_per_pos 74.0 +654 17 training.batch_size 1.0 +654 18 model.embedding_dim 1.0 +654 18 negative_sampler.num_negs_per_pos 1.0 +654 18 training.batch_size 1.0 +654 19 model.embedding_dim 0.0 +654 19 negative_sampler.num_negs_per_pos 76.0 +654 19 training.batch_size 2.0 +654 20 model.embedding_dim 1.0 +654 20 negative_sampler.num_negs_per_pos 82.0 +654 20 training.batch_size 0.0 +654 21 model.embedding_dim 0.0 +654 21 negative_sampler.num_negs_per_pos 71.0 +654 21 training.batch_size 1.0 +654 22 model.embedding_dim 1.0 +654 22 negative_sampler.num_negs_per_pos 71.0 +654 22 training.batch_size 0.0 +654 23 model.embedding_dim 2.0 +654 23 negative_sampler.num_negs_per_pos 78.0 +654 23 training.batch_size 1.0 +654 24 model.embedding_dim 2.0 +654 24 negative_sampler.num_negs_per_pos 88.0 +654 24 training.batch_size 2.0 +654 25 model.embedding_dim 1.0 +654 25 negative_sampler.num_negs_per_pos 10.0 +654 25 training.batch_size 2.0 +654 26 model.embedding_dim 1.0 +654 26 negative_sampler.num_negs_per_pos 97.0 +654 26 training.batch_size 1.0 +654 27 model.embedding_dim 0.0 +654 27 negative_sampler.num_negs_per_pos 79.0 +654 27 training.batch_size 0.0 +654 28 model.embedding_dim 0.0 +654 28 negative_sampler.num_negs_per_pos 87.0 +654 28 training.batch_size 0.0 +654 29 model.embedding_dim 2.0 +654 29 negative_sampler.num_negs_per_pos 12.0 +654 29 training.batch_size 0.0 +654 30 model.embedding_dim 1.0 +654 30 negative_sampler.num_negs_per_pos 45.0 +654 30 training.batch_size 1.0 +654 31 model.embedding_dim 1.0 +654 31 negative_sampler.num_negs_per_pos 27.0 +654 31 training.batch_size 2.0 +654 32 model.embedding_dim 1.0 +654 32 negative_sampler.num_negs_per_pos 87.0 +654 32 training.batch_size 0.0 +654 33 model.embedding_dim 0.0 +654 33 negative_sampler.num_negs_per_pos 97.0 +654 33 training.batch_size 0.0 +654 34 model.embedding_dim 1.0 +654 34 negative_sampler.num_negs_per_pos 92.0 +654 34 training.batch_size 0.0 +654 35 model.embedding_dim 1.0 +654 35 negative_sampler.num_negs_per_pos 38.0 +654 35 training.batch_size 1.0 +654 36 model.embedding_dim 0.0 +654 36 negative_sampler.num_negs_per_pos 95.0 +654 36 training.batch_size 2.0 +654 37 model.embedding_dim 2.0 +654 37 negative_sampler.num_negs_per_pos 27.0 +654 37 training.batch_size 1.0 +654 38 model.embedding_dim 1.0 +654 38 negative_sampler.num_negs_per_pos 67.0 +654 38 training.batch_size 0.0 +654 39 model.embedding_dim 2.0 +654 39 negative_sampler.num_negs_per_pos 37.0 +654 39 training.batch_size 1.0 +654 40 model.embedding_dim 0.0 +654 40 negative_sampler.num_negs_per_pos 85.0 +654 40 training.batch_size 2.0 +654 41 model.embedding_dim 0.0 +654 41 negative_sampler.num_negs_per_pos 17.0 +654 41 training.batch_size 2.0 +654 42 model.embedding_dim 1.0 +654 42 negative_sampler.num_negs_per_pos 20.0 +654 42 training.batch_size 0.0 +654 43 model.embedding_dim 1.0 +654 43 negative_sampler.num_negs_per_pos 46.0 +654 43 training.batch_size 2.0 +654 44 model.embedding_dim 1.0 +654 44 negative_sampler.num_negs_per_pos 49.0 +654 44 training.batch_size 0.0 +654 45 model.embedding_dim 1.0 +654 45 negative_sampler.num_negs_per_pos 48.0 +654 45 training.batch_size 0.0 +654 46 model.embedding_dim 0.0 +654 46 negative_sampler.num_negs_per_pos 42.0 +654 46 training.batch_size 2.0 +654 47 model.embedding_dim 0.0 +654 47 negative_sampler.num_negs_per_pos 59.0 +654 47 training.batch_size 0.0 +654 48 model.embedding_dim 2.0 +654 48 negative_sampler.num_negs_per_pos 75.0 +654 48 training.batch_size 0.0 +654 49 model.embedding_dim 2.0 +654 49 negative_sampler.num_negs_per_pos 72.0 +654 49 training.batch_size 1.0 +654 50 model.embedding_dim 0.0 +654 50 negative_sampler.num_negs_per_pos 28.0 +654 50 training.batch_size 0.0 +654 51 model.embedding_dim 0.0 +654 51 negative_sampler.num_negs_per_pos 4.0 +654 51 training.batch_size 0.0 +654 52 model.embedding_dim 2.0 +654 52 negative_sampler.num_negs_per_pos 82.0 +654 52 training.batch_size 2.0 +654 53 model.embedding_dim 0.0 +654 53 negative_sampler.num_negs_per_pos 20.0 +654 53 training.batch_size 2.0 +654 54 model.embedding_dim 0.0 +654 54 negative_sampler.num_negs_per_pos 88.0 +654 54 training.batch_size 2.0 +654 55 model.embedding_dim 0.0 +654 55 negative_sampler.num_negs_per_pos 12.0 +654 55 training.batch_size 1.0 +654 56 model.embedding_dim 1.0 +654 56 negative_sampler.num_negs_per_pos 92.0 +654 56 training.batch_size 0.0 +654 57 model.embedding_dim 0.0 +654 57 negative_sampler.num_negs_per_pos 71.0 +654 57 training.batch_size 1.0 +654 58 model.embedding_dim 1.0 +654 58 negative_sampler.num_negs_per_pos 88.0 +654 58 training.batch_size 0.0 +654 59 model.embedding_dim 2.0 +654 59 negative_sampler.num_negs_per_pos 75.0 +654 59 training.batch_size 0.0 +654 60 model.embedding_dim 1.0 +654 60 negative_sampler.num_negs_per_pos 34.0 +654 60 training.batch_size 2.0 +654 61 model.embedding_dim 2.0 +654 61 negative_sampler.num_negs_per_pos 37.0 +654 61 training.batch_size 2.0 +654 62 model.embedding_dim 1.0 +654 62 negative_sampler.num_negs_per_pos 7.0 +654 62 training.batch_size 2.0 +654 63 model.embedding_dim 0.0 +654 63 negative_sampler.num_negs_per_pos 28.0 +654 63 training.batch_size 0.0 +654 64 model.embedding_dim 0.0 +654 64 negative_sampler.num_negs_per_pos 99.0 +654 64 training.batch_size 1.0 +654 65 model.embedding_dim 0.0 +654 65 negative_sampler.num_negs_per_pos 65.0 +654 65 training.batch_size 0.0 +654 66 model.embedding_dim 2.0 +654 66 negative_sampler.num_negs_per_pos 17.0 +654 66 training.batch_size 2.0 +654 67 model.embedding_dim 1.0 +654 67 negative_sampler.num_negs_per_pos 18.0 +654 67 training.batch_size 1.0 +654 68 model.embedding_dim 2.0 +654 68 negative_sampler.num_negs_per_pos 98.0 +654 68 training.batch_size 0.0 +654 69 model.embedding_dim 1.0 +654 69 negative_sampler.num_negs_per_pos 39.0 +654 69 training.batch_size 2.0 +654 70 model.embedding_dim 1.0 +654 70 negative_sampler.num_negs_per_pos 87.0 +654 70 training.batch_size 2.0 +654 71 model.embedding_dim 2.0 +654 71 negative_sampler.num_negs_per_pos 57.0 +654 71 training.batch_size 1.0 +654 72 model.embedding_dim 2.0 +654 72 negative_sampler.num_negs_per_pos 84.0 +654 72 training.batch_size 1.0 +654 73 model.embedding_dim 0.0 +654 73 negative_sampler.num_negs_per_pos 59.0 +654 73 training.batch_size 0.0 +654 74 model.embedding_dim 1.0 +654 74 negative_sampler.num_negs_per_pos 98.0 +654 74 training.batch_size 1.0 +654 75 model.embedding_dim 1.0 +654 75 negative_sampler.num_negs_per_pos 39.0 +654 75 training.batch_size 0.0 +654 76 model.embedding_dim 1.0 +654 76 negative_sampler.num_negs_per_pos 58.0 +654 76 training.batch_size 0.0 +654 77 model.embedding_dim 1.0 +654 77 negative_sampler.num_negs_per_pos 6.0 +654 77 training.batch_size 2.0 +654 78 model.embedding_dim 1.0 +654 78 negative_sampler.num_negs_per_pos 40.0 +654 78 training.batch_size 0.0 +654 79 model.embedding_dim 2.0 +654 79 negative_sampler.num_negs_per_pos 85.0 +654 79 training.batch_size 1.0 +654 80 model.embedding_dim 1.0 +654 80 negative_sampler.num_negs_per_pos 68.0 +654 80 training.batch_size 2.0 +654 81 model.embedding_dim 0.0 +654 81 negative_sampler.num_negs_per_pos 95.0 +654 81 training.batch_size 1.0 +654 82 model.embedding_dim 2.0 +654 82 negative_sampler.num_negs_per_pos 86.0 +654 82 training.batch_size 1.0 +654 83 model.embedding_dim 0.0 +654 83 negative_sampler.num_negs_per_pos 57.0 +654 83 training.batch_size 2.0 +654 84 model.embedding_dim 1.0 +654 84 negative_sampler.num_negs_per_pos 10.0 +654 84 training.batch_size 2.0 +654 85 model.embedding_dim 1.0 +654 85 negative_sampler.num_negs_per_pos 66.0 +654 85 training.batch_size 1.0 +654 86 model.embedding_dim 2.0 +654 86 negative_sampler.num_negs_per_pos 90.0 +654 86 training.batch_size 1.0 +654 87 model.embedding_dim 2.0 +654 87 negative_sampler.num_negs_per_pos 60.0 +654 87 training.batch_size 0.0 +654 88 model.embedding_dim 0.0 +654 88 negative_sampler.num_negs_per_pos 74.0 +654 88 training.batch_size 1.0 +654 89 model.embedding_dim 2.0 +654 89 negative_sampler.num_negs_per_pos 45.0 +654 89 training.batch_size 0.0 +654 90 model.embedding_dim 0.0 +654 90 negative_sampler.num_negs_per_pos 94.0 +654 90 training.batch_size 2.0 +654 91 model.embedding_dim 2.0 +654 91 negative_sampler.num_negs_per_pos 56.0 +654 91 training.batch_size 2.0 +654 92 model.embedding_dim 0.0 +654 92 negative_sampler.num_negs_per_pos 15.0 +654 92 training.batch_size 1.0 +654 93 model.embedding_dim 1.0 +654 93 negative_sampler.num_negs_per_pos 5.0 +654 93 training.batch_size 0.0 +654 94 model.embedding_dim 2.0 +654 94 negative_sampler.num_negs_per_pos 80.0 +654 94 training.batch_size 0.0 +654 95 model.embedding_dim 0.0 +654 95 negative_sampler.num_negs_per_pos 63.0 +654 95 training.batch_size 2.0 +654 96 model.embedding_dim 0.0 +654 96 negative_sampler.num_negs_per_pos 55.0 +654 96 training.batch_size 2.0 +654 97 model.embedding_dim 1.0 +654 97 negative_sampler.num_negs_per_pos 36.0 +654 97 training.batch_size 0.0 +654 98 model.embedding_dim 1.0 +654 98 negative_sampler.num_negs_per_pos 58.0 +654 98 training.batch_size 1.0 +654 99 model.embedding_dim 2.0 +654 99 negative_sampler.num_negs_per_pos 71.0 +654 99 training.batch_size 1.0 +654 100 model.embedding_dim 0.0 +654 100 negative_sampler.num_negs_per_pos 4.0 +654 100 training.batch_size 1.0 +654 1 dataset """kinships""" +654 1 model """simple""" +654 1 loss """softplus""" +654 1 regularizer """no""" +654 1 optimizer """adadelta""" +654 1 training_loop """owa""" +654 1 negative_sampler """basic""" +654 1 evaluator """rankbased""" +654 2 dataset """kinships""" +654 2 model """simple""" +654 2 loss """softplus""" +654 2 regularizer """no""" +654 2 optimizer """adadelta""" +654 2 training_loop """owa""" +654 2 negative_sampler """basic""" +654 2 evaluator """rankbased""" +654 3 dataset """kinships""" +654 3 model """simple""" +654 3 loss """softplus""" +654 3 regularizer """no""" +654 3 optimizer """adadelta""" +654 3 training_loop """owa""" +654 3 negative_sampler """basic""" +654 3 evaluator """rankbased""" +654 4 dataset """kinships""" +654 4 model """simple""" +654 4 loss """softplus""" +654 4 regularizer """no""" +654 4 optimizer """adadelta""" +654 4 training_loop """owa""" +654 4 negative_sampler """basic""" +654 4 evaluator """rankbased""" +654 5 dataset """kinships""" +654 5 model """simple""" +654 5 loss """softplus""" +654 5 regularizer """no""" +654 5 optimizer """adadelta""" +654 5 training_loop """owa""" +654 5 negative_sampler """basic""" +654 5 evaluator """rankbased""" +654 6 dataset """kinships""" +654 6 model """simple""" +654 6 loss """softplus""" +654 6 regularizer """no""" +654 6 optimizer """adadelta""" +654 6 training_loop """owa""" +654 6 negative_sampler """basic""" +654 6 evaluator """rankbased""" +654 7 dataset """kinships""" +654 7 model """simple""" +654 7 loss """softplus""" +654 7 regularizer """no""" +654 7 optimizer """adadelta""" +654 7 training_loop """owa""" +654 7 negative_sampler """basic""" +654 7 evaluator """rankbased""" +654 8 dataset """kinships""" +654 8 model """simple""" +654 8 loss """softplus""" +654 8 regularizer """no""" +654 8 optimizer """adadelta""" +654 8 training_loop """owa""" +654 8 negative_sampler """basic""" +654 8 evaluator """rankbased""" +654 9 dataset """kinships""" +654 9 model """simple""" +654 9 loss """softplus""" +654 9 regularizer """no""" +654 9 optimizer """adadelta""" +654 9 training_loop """owa""" +654 9 negative_sampler """basic""" +654 9 evaluator """rankbased""" +654 10 dataset """kinships""" +654 10 model """simple""" +654 10 loss """softplus""" +654 10 regularizer """no""" +654 10 optimizer """adadelta""" +654 10 training_loop """owa""" +654 10 negative_sampler """basic""" +654 10 evaluator """rankbased""" +654 11 dataset """kinships""" +654 11 model """simple""" +654 11 loss """softplus""" +654 11 regularizer """no""" +654 11 optimizer """adadelta""" +654 11 training_loop """owa""" +654 11 negative_sampler """basic""" +654 11 evaluator """rankbased""" +654 12 dataset """kinships""" +654 12 model """simple""" +654 12 loss """softplus""" +654 12 regularizer """no""" +654 12 optimizer """adadelta""" +654 12 training_loop """owa""" +654 12 negative_sampler """basic""" +654 12 evaluator """rankbased""" +654 13 dataset """kinships""" +654 13 model """simple""" +654 13 loss """softplus""" +654 13 regularizer """no""" +654 13 optimizer """adadelta""" +654 13 training_loop """owa""" +654 13 negative_sampler """basic""" +654 13 evaluator """rankbased""" +654 14 dataset """kinships""" +654 14 model """simple""" +654 14 loss """softplus""" +654 14 regularizer """no""" +654 14 optimizer """adadelta""" +654 14 training_loop """owa""" +654 14 negative_sampler """basic""" +654 14 evaluator """rankbased""" +654 15 dataset """kinships""" +654 15 model """simple""" +654 15 loss """softplus""" +654 15 regularizer """no""" +654 15 optimizer """adadelta""" +654 15 training_loop """owa""" +654 15 negative_sampler """basic""" +654 15 evaluator """rankbased""" +654 16 dataset """kinships""" +654 16 model """simple""" +654 16 loss """softplus""" +654 16 regularizer """no""" +654 16 optimizer """adadelta""" +654 16 training_loop """owa""" +654 16 negative_sampler """basic""" +654 16 evaluator """rankbased""" +654 17 dataset """kinships""" +654 17 model """simple""" +654 17 loss """softplus""" +654 17 regularizer """no""" +654 17 optimizer """adadelta""" +654 17 training_loop """owa""" +654 17 negative_sampler """basic""" +654 17 evaluator """rankbased""" +654 18 dataset """kinships""" +654 18 model """simple""" +654 18 loss """softplus""" +654 18 regularizer """no""" +654 18 optimizer """adadelta""" +654 18 training_loop """owa""" +654 18 negative_sampler """basic""" +654 18 evaluator """rankbased""" +654 19 dataset """kinships""" +654 19 model """simple""" +654 19 loss """softplus""" +654 19 regularizer """no""" +654 19 optimizer """adadelta""" +654 19 training_loop """owa""" +654 19 negative_sampler """basic""" +654 19 evaluator """rankbased""" +654 20 dataset """kinships""" +654 20 model """simple""" +654 20 loss """softplus""" +654 20 regularizer """no""" +654 20 optimizer """adadelta""" +654 20 training_loop """owa""" +654 20 negative_sampler """basic""" +654 20 evaluator """rankbased""" +654 21 dataset """kinships""" +654 21 model """simple""" +654 21 loss """softplus""" +654 21 regularizer """no""" +654 21 optimizer """adadelta""" +654 21 training_loop """owa""" +654 21 negative_sampler """basic""" +654 21 evaluator """rankbased""" +654 22 dataset """kinships""" +654 22 model """simple""" +654 22 loss """softplus""" +654 22 regularizer """no""" +654 22 optimizer """adadelta""" +654 22 training_loop """owa""" +654 22 negative_sampler """basic""" +654 22 evaluator """rankbased""" +654 23 dataset """kinships""" +654 23 model """simple""" +654 23 loss """softplus""" +654 23 regularizer """no""" +654 23 optimizer """adadelta""" +654 23 training_loop """owa""" +654 23 negative_sampler """basic""" +654 23 evaluator """rankbased""" +654 24 dataset """kinships""" +654 24 model """simple""" +654 24 loss """softplus""" +654 24 regularizer """no""" +654 24 optimizer """adadelta""" +654 24 training_loop """owa""" +654 24 negative_sampler """basic""" +654 24 evaluator """rankbased""" +654 25 dataset """kinships""" +654 25 model """simple""" +654 25 loss """softplus""" +654 25 regularizer """no""" +654 25 optimizer """adadelta""" +654 25 training_loop """owa""" +654 25 negative_sampler """basic""" +654 25 evaluator """rankbased""" +654 26 dataset """kinships""" +654 26 model """simple""" +654 26 loss """softplus""" +654 26 regularizer """no""" +654 26 optimizer """adadelta""" +654 26 training_loop """owa""" +654 26 negative_sampler """basic""" +654 26 evaluator """rankbased""" +654 27 dataset """kinships""" +654 27 model """simple""" +654 27 loss """softplus""" +654 27 regularizer """no""" +654 27 optimizer """adadelta""" +654 27 training_loop """owa""" +654 27 negative_sampler """basic""" +654 27 evaluator """rankbased""" +654 28 dataset """kinships""" +654 28 model """simple""" +654 28 loss """softplus""" +654 28 regularizer """no""" +654 28 optimizer """adadelta""" +654 28 training_loop """owa""" +654 28 negative_sampler """basic""" +654 28 evaluator """rankbased""" +654 29 dataset """kinships""" +654 29 model """simple""" +654 29 loss """softplus""" +654 29 regularizer """no""" +654 29 optimizer """adadelta""" +654 29 training_loop """owa""" +654 29 negative_sampler """basic""" +654 29 evaluator """rankbased""" +654 30 dataset """kinships""" +654 30 model """simple""" +654 30 loss """softplus""" +654 30 regularizer """no""" +654 30 optimizer """adadelta""" +654 30 training_loop """owa""" +654 30 negative_sampler """basic""" +654 30 evaluator """rankbased""" +654 31 dataset """kinships""" +654 31 model """simple""" +654 31 loss """softplus""" +654 31 regularizer """no""" +654 31 optimizer """adadelta""" +654 31 training_loop """owa""" +654 31 negative_sampler """basic""" +654 31 evaluator """rankbased""" +654 32 dataset """kinships""" +654 32 model """simple""" +654 32 loss """softplus""" +654 32 regularizer """no""" +654 32 optimizer """adadelta""" +654 32 training_loop """owa""" +654 32 negative_sampler """basic""" +654 32 evaluator """rankbased""" +654 33 dataset """kinships""" +654 33 model """simple""" +654 33 loss """softplus""" +654 33 regularizer """no""" +654 33 optimizer """adadelta""" +654 33 training_loop """owa""" +654 33 negative_sampler """basic""" +654 33 evaluator """rankbased""" +654 34 dataset """kinships""" +654 34 model """simple""" +654 34 loss """softplus""" +654 34 regularizer """no""" +654 34 optimizer """adadelta""" +654 34 training_loop """owa""" +654 34 negative_sampler """basic""" +654 34 evaluator """rankbased""" +654 35 dataset """kinships""" +654 35 model """simple""" +654 35 loss """softplus""" +654 35 regularizer """no""" +654 35 optimizer """adadelta""" +654 35 training_loop """owa""" +654 35 negative_sampler """basic""" +654 35 evaluator """rankbased""" +654 36 dataset """kinships""" +654 36 model """simple""" +654 36 loss """softplus""" +654 36 regularizer """no""" +654 36 optimizer """adadelta""" +654 36 training_loop """owa""" +654 36 negative_sampler """basic""" +654 36 evaluator """rankbased""" +654 37 dataset """kinships""" +654 37 model """simple""" +654 37 loss """softplus""" +654 37 regularizer """no""" +654 37 optimizer """adadelta""" +654 37 training_loop """owa""" +654 37 negative_sampler """basic""" +654 37 evaluator """rankbased""" +654 38 dataset """kinships""" +654 38 model """simple""" +654 38 loss """softplus""" +654 38 regularizer """no""" +654 38 optimizer """adadelta""" +654 38 training_loop """owa""" +654 38 negative_sampler """basic""" +654 38 evaluator """rankbased""" +654 39 dataset """kinships""" +654 39 model """simple""" +654 39 loss """softplus""" +654 39 regularizer """no""" +654 39 optimizer """adadelta""" +654 39 training_loop """owa""" +654 39 negative_sampler """basic""" +654 39 evaluator """rankbased""" +654 40 dataset """kinships""" +654 40 model """simple""" +654 40 loss """softplus""" +654 40 regularizer """no""" +654 40 optimizer """adadelta""" +654 40 training_loop """owa""" +654 40 negative_sampler """basic""" +654 40 evaluator """rankbased""" +654 41 dataset """kinships""" +654 41 model """simple""" +654 41 loss """softplus""" +654 41 regularizer """no""" +654 41 optimizer """adadelta""" +654 41 training_loop """owa""" +654 41 negative_sampler """basic""" +654 41 evaluator """rankbased""" +654 42 dataset """kinships""" +654 42 model """simple""" +654 42 loss """softplus""" +654 42 regularizer """no""" +654 42 optimizer """adadelta""" +654 42 training_loop """owa""" +654 42 negative_sampler """basic""" +654 42 evaluator """rankbased""" +654 43 dataset """kinships""" +654 43 model """simple""" +654 43 loss """softplus""" +654 43 regularizer """no""" +654 43 optimizer """adadelta""" +654 43 training_loop """owa""" +654 43 negative_sampler """basic""" +654 43 evaluator """rankbased""" +654 44 dataset """kinships""" +654 44 model """simple""" +654 44 loss """softplus""" +654 44 regularizer """no""" +654 44 optimizer """adadelta""" +654 44 training_loop """owa""" +654 44 negative_sampler """basic""" +654 44 evaluator """rankbased""" +654 45 dataset """kinships""" +654 45 model """simple""" +654 45 loss """softplus""" +654 45 regularizer """no""" +654 45 optimizer """adadelta""" +654 45 training_loop """owa""" +654 45 negative_sampler """basic""" +654 45 evaluator """rankbased""" +654 46 dataset """kinships""" +654 46 model """simple""" +654 46 loss """softplus""" +654 46 regularizer """no""" +654 46 optimizer """adadelta""" +654 46 training_loop """owa""" +654 46 negative_sampler """basic""" +654 46 evaluator """rankbased""" +654 47 dataset """kinships""" +654 47 model """simple""" +654 47 loss """softplus""" +654 47 regularizer """no""" +654 47 optimizer """adadelta""" +654 47 training_loop """owa""" +654 47 negative_sampler """basic""" +654 47 evaluator """rankbased""" +654 48 dataset """kinships""" +654 48 model """simple""" +654 48 loss """softplus""" +654 48 regularizer """no""" +654 48 optimizer """adadelta""" +654 48 training_loop """owa""" +654 48 negative_sampler """basic""" +654 48 evaluator """rankbased""" +654 49 dataset """kinships""" +654 49 model """simple""" +654 49 loss """softplus""" +654 49 regularizer """no""" +654 49 optimizer """adadelta""" +654 49 training_loop """owa""" +654 49 negative_sampler """basic""" +654 49 evaluator """rankbased""" +654 50 dataset """kinships""" +654 50 model """simple""" +654 50 loss """softplus""" +654 50 regularizer """no""" +654 50 optimizer """adadelta""" +654 50 training_loop """owa""" +654 50 negative_sampler """basic""" +654 50 evaluator """rankbased""" +654 51 dataset """kinships""" +654 51 model """simple""" +654 51 loss """softplus""" +654 51 regularizer """no""" +654 51 optimizer """adadelta""" +654 51 training_loop """owa""" +654 51 negative_sampler """basic""" +654 51 evaluator """rankbased""" +654 52 dataset """kinships""" +654 52 model """simple""" +654 52 loss """softplus""" +654 52 regularizer """no""" +654 52 optimizer """adadelta""" +654 52 training_loop """owa""" +654 52 negative_sampler """basic""" +654 52 evaluator """rankbased""" +654 53 dataset """kinships""" +654 53 model """simple""" +654 53 loss """softplus""" +654 53 regularizer """no""" +654 53 optimizer """adadelta""" +654 53 training_loop """owa""" +654 53 negative_sampler """basic""" +654 53 evaluator """rankbased""" +654 54 dataset """kinships""" +654 54 model """simple""" +654 54 loss """softplus""" +654 54 regularizer """no""" +654 54 optimizer """adadelta""" +654 54 training_loop """owa""" +654 54 negative_sampler """basic""" +654 54 evaluator """rankbased""" +654 55 dataset """kinships""" +654 55 model """simple""" +654 55 loss """softplus""" +654 55 regularizer """no""" +654 55 optimizer """adadelta""" +654 55 training_loop """owa""" +654 55 negative_sampler """basic""" +654 55 evaluator """rankbased""" +654 56 dataset """kinships""" +654 56 model """simple""" +654 56 loss """softplus""" +654 56 regularizer """no""" +654 56 optimizer """adadelta""" +654 56 training_loop """owa""" +654 56 negative_sampler """basic""" +654 56 evaluator """rankbased""" +654 57 dataset """kinships""" +654 57 model """simple""" +654 57 loss """softplus""" +654 57 regularizer """no""" +654 57 optimizer """adadelta""" +654 57 training_loop """owa""" +654 57 negative_sampler """basic""" +654 57 evaluator """rankbased""" +654 58 dataset """kinships""" +654 58 model """simple""" +654 58 loss """softplus""" +654 58 regularizer """no""" +654 58 optimizer """adadelta""" +654 58 training_loop """owa""" +654 58 negative_sampler """basic""" +654 58 evaluator """rankbased""" +654 59 dataset """kinships""" +654 59 model """simple""" +654 59 loss """softplus""" +654 59 regularizer """no""" +654 59 optimizer """adadelta""" +654 59 training_loop """owa""" +654 59 negative_sampler """basic""" +654 59 evaluator """rankbased""" +654 60 dataset """kinships""" +654 60 model """simple""" +654 60 loss """softplus""" +654 60 regularizer """no""" +654 60 optimizer """adadelta""" +654 60 training_loop """owa""" +654 60 negative_sampler """basic""" +654 60 evaluator """rankbased""" +654 61 dataset """kinships""" +654 61 model """simple""" +654 61 loss """softplus""" +654 61 regularizer """no""" +654 61 optimizer """adadelta""" +654 61 training_loop """owa""" +654 61 negative_sampler """basic""" +654 61 evaluator """rankbased""" +654 62 dataset """kinships""" +654 62 model """simple""" +654 62 loss """softplus""" +654 62 regularizer """no""" +654 62 optimizer """adadelta""" +654 62 training_loop """owa""" +654 62 negative_sampler """basic""" +654 62 evaluator """rankbased""" +654 63 dataset """kinships""" +654 63 model """simple""" +654 63 loss """softplus""" +654 63 regularizer """no""" +654 63 optimizer """adadelta""" +654 63 training_loop """owa""" +654 63 negative_sampler """basic""" +654 63 evaluator """rankbased""" +654 64 dataset """kinships""" +654 64 model """simple""" +654 64 loss """softplus""" +654 64 regularizer """no""" +654 64 optimizer """adadelta""" +654 64 training_loop """owa""" +654 64 negative_sampler """basic""" +654 64 evaluator """rankbased""" +654 65 dataset """kinships""" +654 65 model """simple""" +654 65 loss """softplus""" +654 65 regularizer """no""" +654 65 optimizer """adadelta""" +654 65 training_loop """owa""" +654 65 negative_sampler """basic""" +654 65 evaluator """rankbased""" +654 66 dataset """kinships""" +654 66 model """simple""" +654 66 loss """softplus""" +654 66 regularizer """no""" +654 66 optimizer """adadelta""" +654 66 training_loop """owa""" +654 66 negative_sampler """basic""" +654 66 evaluator """rankbased""" +654 67 dataset """kinships""" +654 67 model """simple""" +654 67 loss """softplus""" +654 67 regularizer """no""" +654 67 optimizer """adadelta""" +654 67 training_loop """owa""" +654 67 negative_sampler """basic""" +654 67 evaluator """rankbased""" +654 68 dataset """kinships""" +654 68 model """simple""" +654 68 loss """softplus""" +654 68 regularizer """no""" +654 68 optimizer """adadelta""" +654 68 training_loop """owa""" +654 68 negative_sampler """basic""" +654 68 evaluator """rankbased""" +654 69 dataset """kinships""" +654 69 model """simple""" +654 69 loss """softplus""" +654 69 regularizer """no""" +654 69 optimizer """adadelta""" +654 69 training_loop """owa""" +654 69 negative_sampler """basic""" +654 69 evaluator """rankbased""" +654 70 dataset """kinships""" +654 70 model """simple""" +654 70 loss """softplus""" +654 70 regularizer """no""" +654 70 optimizer """adadelta""" +654 70 training_loop """owa""" +654 70 negative_sampler """basic""" +654 70 evaluator """rankbased""" +654 71 dataset """kinships""" +654 71 model """simple""" +654 71 loss """softplus""" +654 71 regularizer """no""" +654 71 optimizer """adadelta""" +654 71 training_loop """owa""" +654 71 negative_sampler """basic""" +654 71 evaluator """rankbased""" +654 72 dataset """kinships""" +654 72 model """simple""" +654 72 loss """softplus""" +654 72 regularizer """no""" +654 72 optimizer """adadelta""" +654 72 training_loop """owa""" +654 72 negative_sampler """basic""" +654 72 evaluator """rankbased""" +654 73 dataset """kinships""" +654 73 model """simple""" +654 73 loss """softplus""" +654 73 regularizer """no""" +654 73 optimizer """adadelta""" +654 73 training_loop """owa""" +654 73 negative_sampler """basic""" +654 73 evaluator """rankbased""" +654 74 dataset """kinships""" +654 74 model """simple""" +654 74 loss """softplus""" +654 74 regularizer """no""" +654 74 optimizer """adadelta""" +654 74 training_loop """owa""" +654 74 negative_sampler """basic""" +654 74 evaluator """rankbased""" +654 75 dataset """kinships""" +654 75 model """simple""" +654 75 loss """softplus""" +654 75 regularizer """no""" +654 75 optimizer """adadelta""" +654 75 training_loop """owa""" +654 75 negative_sampler """basic""" +654 75 evaluator """rankbased""" +654 76 dataset """kinships""" +654 76 model """simple""" +654 76 loss """softplus""" +654 76 regularizer """no""" +654 76 optimizer """adadelta""" +654 76 training_loop """owa""" +654 76 negative_sampler """basic""" +654 76 evaluator """rankbased""" +654 77 dataset """kinships""" +654 77 model """simple""" +654 77 loss """softplus""" +654 77 regularizer """no""" +654 77 optimizer """adadelta""" +654 77 training_loop """owa""" +654 77 negative_sampler """basic""" +654 77 evaluator """rankbased""" +654 78 dataset """kinships""" +654 78 model """simple""" +654 78 loss """softplus""" +654 78 regularizer """no""" +654 78 optimizer """adadelta""" +654 78 training_loop """owa""" +654 78 negative_sampler """basic""" +654 78 evaluator """rankbased""" +654 79 dataset """kinships""" +654 79 model """simple""" +654 79 loss """softplus""" +654 79 regularizer """no""" +654 79 optimizer """adadelta""" +654 79 training_loop """owa""" +654 79 negative_sampler """basic""" +654 79 evaluator """rankbased""" +654 80 dataset """kinships""" +654 80 model """simple""" +654 80 loss """softplus""" +654 80 regularizer """no""" +654 80 optimizer """adadelta""" +654 80 training_loop """owa""" +654 80 negative_sampler """basic""" +654 80 evaluator """rankbased""" +654 81 dataset """kinships""" +654 81 model """simple""" +654 81 loss """softplus""" +654 81 regularizer """no""" +654 81 optimizer """adadelta""" +654 81 training_loop """owa""" +654 81 negative_sampler """basic""" +654 81 evaluator """rankbased""" +654 82 dataset """kinships""" +654 82 model """simple""" +654 82 loss """softplus""" +654 82 regularizer """no""" +654 82 optimizer """adadelta""" +654 82 training_loop """owa""" +654 82 negative_sampler """basic""" +654 82 evaluator """rankbased""" +654 83 dataset """kinships""" +654 83 model """simple""" +654 83 loss """softplus""" +654 83 regularizer """no""" +654 83 optimizer """adadelta""" +654 83 training_loop """owa""" +654 83 negative_sampler """basic""" +654 83 evaluator """rankbased""" +654 84 dataset """kinships""" +654 84 model """simple""" +654 84 loss """softplus""" +654 84 regularizer """no""" +654 84 optimizer """adadelta""" +654 84 training_loop """owa""" +654 84 negative_sampler """basic""" +654 84 evaluator """rankbased""" +654 85 dataset """kinships""" +654 85 model """simple""" +654 85 loss """softplus""" +654 85 regularizer """no""" +654 85 optimizer """adadelta""" +654 85 training_loop """owa""" +654 85 negative_sampler """basic""" +654 85 evaluator """rankbased""" +654 86 dataset """kinships""" +654 86 model """simple""" +654 86 loss """softplus""" +654 86 regularizer """no""" +654 86 optimizer """adadelta""" +654 86 training_loop """owa""" +654 86 negative_sampler """basic""" +654 86 evaluator """rankbased""" +654 87 dataset """kinships""" +654 87 model """simple""" +654 87 loss """softplus""" +654 87 regularizer """no""" +654 87 optimizer """adadelta""" +654 87 training_loop """owa""" +654 87 negative_sampler """basic""" +654 87 evaluator """rankbased""" +654 88 dataset """kinships""" +654 88 model """simple""" +654 88 loss """softplus""" +654 88 regularizer """no""" +654 88 optimizer """adadelta""" +654 88 training_loop """owa""" +654 88 negative_sampler """basic""" +654 88 evaluator """rankbased""" +654 89 dataset """kinships""" +654 89 model """simple""" +654 89 loss """softplus""" +654 89 regularizer """no""" +654 89 optimizer """adadelta""" +654 89 training_loop """owa""" +654 89 negative_sampler """basic""" +654 89 evaluator """rankbased""" +654 90 dataset """kinships""" +654 90 model """simple""" +654 90 loss """softplus""" +654 90 regularizer """no""" +654 90 optimizer """adadelta""" +654 90 training_loop """owa""" +654 90 negative_sampler """basic""" +654 90 evaluator """rankbased""" +654 91 dataset """kinships""" +654 91 model """simple""" +654 91 loss """softplus""" +654 91 regularizer """no""" +654 91 optimizer """adadelta""" +654 91 training_loop """owa""" +654 91 negative_sampler """basic""" +654 91 evaluator """rankbased""" +654 92 dataset """kinships""" +654 92 model """simple""" +654 92 loss """softplus""" +654 92 regularizer """no""" +654 92 optimizer """adadelta""" +654 92 training_loop """owa""" +654 92 negative_sampler """basic""" +654 92 evaluator """rankbased""" +654 93 dataset """kinships""" +654 93 model """simple""" +654 93 loss """softplus""" +654 93 regularizer """no""" +654 93 optimizer """adadelta""" +654 93 training_loop """owa""" +654 93 negative_sampler """basic""" +654 93 evaluator """rankbased""" +654 94 dataset """kinships""" +654 94 model """simple""" +654 94 loss """softplus""" +654 94 regularizer """no""" +654 94 optimizer """adadelta""" +654 94 training_loop """owa""" +654 94 negative_sampler """basic""" +654 94 evaluator """rankbased""" +654 95 dataset """kinships""" +654 95 model """simple""" +654 95 loss """softplus""" +654 95 regularizer """no""" +654 95 optimizer """adadelta""" +654 95 training_loop """owa""" +654 95 negative_sampler """basic""" +654 95 evaluator """rankbased""" +654 96 dataset """kinships""" +654 96 model """simple""" +654 96 loss """softplus""" +654 96 regularizer """no""" +654 96 optimizer """adadelta""" +654 96 training_loop """owa""" +654 96 negative_sampler """basic""" +654 96 evaluator """rankbased""" +654 97 dataset """kinships""" +654 97 model """simple""" +654 97 loss """softplus""" +654 97 regularizer """no""" +654 97 optimizer """adadelta""" +654 97 training_loop """owa""" +654 97 negative_sampler """basic""" +654 97 evaluator """rankbased""" +654 98 dataset """kinships""" +654 98 model """simple""" +654 98 loss """softplus""" +654 98 regularizer """no""" +654 98 optimizer """adadelta""" +654 98 training_loop """owa""" +654 98 negative_sampler """basic""" +654 98 evaluator """rankbased""" +654 99 dataset """kinships""" +654 99 model """simple""" +654 99 loss """softplus""" +654 99 regularizer """no""" +654 99 optimizer """adadelta""" +654 99 training_loop """owa""" +654 99 negative_sampler """basic""" +654 99 evaluator """rankbased""" +654 100 dataset """kinships""" +654 100 model """simple""" +654 100 loss """softplus""" +654 100 regularizer """no""" +654 100 optimizer """adadelta""" +654 100 training_loop """owa""" +654 100 negative_sampler """basic""" +654 100 evaluator """rankbased""" +655 1 model.embedding_dim 0.0 +655 1 negative_sampler.num_negs_per_pos 53.0 +655 1 training.batch_size 1.0 +655 2 model.embedding_dim 2.0 +655 2 negative_sampler.num_negs_per_pos 39.0 +655 2 training.batch_size 0.0 +655 3 model.embedding_dim 1.0 +655 3 negative_sampler.num_negs_per_pos 99.0 +655 3 training.batch_size 0.0 +655 4 model.embedding_dim 1.0 +655 4 negative_sampler.num_negs_per_pos 98.0 +655 4 training.batch_size 1.0 +655 5 model.embedding_dim 0.0 +655 5 negative_sampler.num_negs_per_pos 15.0 +655 5 training.batch_size 2.0 +655 6 model.embedding_dim 1.0 +655 6 negative_sampler.num_negs_per_pos 70.0 +655 6 training.batch_size 0.0 +655 7 model.embedding_dim 1.0 +655 7 negative_sampler.num_negs_per_pos 90.0 +655 7 training.batch_size 2.0 +655 8 model.embedding_dim 1.0 +655 8 negative_sampler.num_negs_per_pos 76.0 +655 8 training.batch_size 1.0 +655 9 model.embedding_dim 0.0 +655 9 negative_sampler.num_negs_per_pos 58.0 +655 9 training.batch_size 1.0 +655 10 model.embedding_dim 1.0 +655 10 negative_sampler.num_negs_per_pos 46.0 +655 10 training.batch_size 1.0 +655 11 model.embedding_dim 0.0 +655 11 negative_sampler.num_negs_per_pos 87.0 +655 11 training.batch_size 0.0 +655 12 model.embedding_dim 1.0 +655 12 negative_sampler.num_negs_per_pos 87.0 +655 12 training.batch_size 2.0 +655 13 model.embedding_dim 0.0 +655 13 negative_sampler.num_negs_per_pos 62.0 +655 13 training.batch_size 0.0 +655 14 model.embedding_dim 2.0 +655 14 negative_sampler.num_negs_per_pos 72.0 +655 14 training.batch_size 2.0 +655 15 model.embedding_dim 1.0 +655 15 negative_sampler.num_negs_per_pos 65.0 +655 15 training.batch_size 1.0 +655 16 model.embedding_dim 2.0 +655 16 negative_sampler.num_negs_per_pos 3.0 +655 16 training.batch_size 2.0 +655 17 model.embedding_dim 2.0 +655 17 negative_sampler.num_negs_per_pos 13.0 +655 17 training.batch_size 1.0 +655 18 model.embedding_dim 1.0 +655 18 negative_sampler.num_negs_per_pos 36.0 +655 18 training.batch_size 1.0 +655 19 model.embedding_dim 0.0 +655 19 negative_sampler.num_negs_per_pos 13.0 +655 19 training.batch_size 1.0 +655 20 model.embedding_dim 1.0 +655 20 negative_sampler.num_negs_per_pos 71.0 +655 20 training.batch_size 0.0 +655 21 model.embedding_dim 0.0 +655 21 negative_sampler.num_negs_per_pos 90.0 +655 21 training.batch_size 1.0 +655 22 model.embedding_dim 1.0 +655 22 negative_sampler.num_negs_per_pos 82.0 +655 22 training.batch_size 1.0 +655 23 model.embedding_dim 1.0 +655 23 negative_sampler.num_negs_per_pos 33.0 +655 23 training.batch_size 0.0 +655 24 model.embedding_dim 2.0 +655 24 negative_sampler.num_negs_per_pos 58.0 +655 24 training.batch_size 2.0 +655 25 model.embedding_dim 0.0 +655 25 negative_sampler.num_negs_per_pos 22.0 +655 25 training.batch_size 0.0 +655 26 model.embedding_dim 1.0 +655 26 negative_sampler.num_negs_per_pos 63.0 +655 26 training.batch_size 2.0 +655 27 model.embedding_dim 1.0 +655 27 negative_sampler.num_negs_per_pos 63.0 +655 27 training.batch_size 0.0 +655 28 model.embedding_dim 0.0 +655 28 negative_sampler.num_negs_per_pos 5.0 +655 28 training.batch_size 1.0 +655 29 model.embedding_dim 1.0 +655 29 negative_sampler.num_negs_per_pos 3.0 +655 29 training.batch_size 2.0 +655 30 model.embedding_dim 1.0 +655 30 negative_sampler.num_negs_per_pos 85.0 +655 30 training.batch_size 2.0 +655 31 model.embedding_dim 0.0 +655 31 negative_sampler.num_negs_per_pos 52.0 +655 31 training.batch_size 1.0 +655 32 model.embedding_dim 0.0 +655 32 negative_sampler.num_negs_per_pos 85.0 +655 32 training.batch_size 0.0 +655 33 model.embedding_dim 1.0 +655 33 negative_sampler.num_negs_per_pos 67.0 +655 33 training.batch_size 1.0 +655 34 model.embedding_dim 0.0 +655 34 negative_sampler.num_negs_per_pos 39.0 +655 34 training.batch_size 1.0 +655 35 model.embedding_dim 0.0 +655 35 negative_sampler.num_negs_per_pos 89.0 +655 35 training.batch_size 2.0 +655 36 model.embedding_dim 2.0 +655 36 negative_sampler.num_negs_per_pos 28.0 +655 36 training.batch_size 0.0 +655 37 model.embedding_dim 0.0 +655 37 negative_sampler.num_negs_per_pos 39.0 +655 37 training.batch_size 1.0 +655 38 model.embedding_dim 0.0 +655 38 negative_sampler.num_negs_per_pos 3.0 +655 38 training.batch_size 1.0 +655 39 model.embedding_dim 1.0 +655 39 negative_sampler.num_negs_per_pos 65.0 +655 39 training.batch_size 0.0 +655 40 model.embedding_dim 2.0 +655 40 negative_sampler.num_negs_per_pos 97.0 +655 40 training.batch_size 0.0 +655 41 model.embedding_dim 0.0 +655 41 negative_sampler.num_negs_per_pos 68.0 +655 41 training.batch_size 2.0 +655 42 model.embedding_dim 2.0 +655 42 negative_sampler.num_negs_per_pos 25.0 +655 42 training.batch_size 1.0 +655 43 model.embedding_dim 0.0 +655 43 negative_sampler.num_negs_per_pos 29.0 +655 43 training.batch_size 2.0 +655 44 model.embedding_dim 2.0 +655 44 negative_sampler.num_negs_per_pos 5.0 +655 44 training.batch_size 2.0 +655 45 model.embedding_dim 0.0 +655 45 negative_sampler.num_negs_per_pos 36.0 +655 45 training.batch_size 2.0 +655 46 model.embedding_dim 0.0 +655 46 negative_sampler.num_negs_per_pos 83.0 +655 46 training.batch_size 1.0 +655 47 model.embedding_dim 1.0 +655 47 negative_sampler.num_negs_per_pos 79.0 +655 47 training.batch_size 0.0 +655 48 model.embedding_dim 0.0 +655 48 negative_sampler.num_negs_per_pos 53.0 +655 48 training.batch_size 2.0 +655 49 model.embedding_dim 2.0 +655 49 negative_sampler.num_negs_per_pos 52.0 +655 49 training.batch_size 1.0 +655 50 model.embedding_dim 1.0 +655 50 negative_sampler.num_negs_per_pos 22.0 +655 50 training.batch_size 0.0 +655 51 model.embedding_dim 0.0 +655 51 negative_sampler.num_negs_per_pos 36.0 +655 51 training.batch_size 2.0 +655 52 model.embedding_dim 1.0 +655 52 negative_sampler.num_negs_per_pos 38.0 +655 52 training.batch_size 0.0 +655 53 model.embedding_dim 0.0 +655 53 negative_sampler.num_negs_per_pos 10.0 +655 53 training.batch_size 1.0 +655 54 model.embedding_dim 0.0 +655 54 negative_sampler.num_negs_per_pos 75.0 +655 54 training.batch_size 2.0 +655 55 model.embedding_dim 0.0 +655 55 negative_sampler.num_negs_per_pos 66.0 +655 55 training.batch_size 0.0 +655 56 model.embedding_dim 2.0 +655 56 negative_sampler.num_negs_per_pos 13.0 +655 56 training.batch_size 2.0 +655 57 model.embedding_dim 1.0 +655 57 negative_sampler.num_negs_per_pos 84.0 +655 57 training.batch_size 0.0 +655 58 model.embedding_dim 2.0 +655 58 negative_sampler.num_negs_per_pos 39.0 +655 58 training.batch_size 1.0 +655 59 model.embedding_dim 2.0 +655 59 negative_sampler.num_negs_per_pos 60.0 +655 59 training.batch_size 0.0 +655 60 model.embedding_dim 0.0 +655 60 negative_sampler.num_negs_per_pos 75.0 +655 60 training.batch_size 0.0 +655 61 model.embedding_dim 1.0 +655 61 negative_sampler.num_negs_per_pos 7.0 +655 61 training.batch_size 2.0 +655 62 model.embedding_dim 2.0 +655 62 negative_sampler.num_negs_per_pos 74.0 +655 62 training.batch_size 1.0 +655 63 model.embedding_dim 0.0 +655 63 negative_sampler.num_negs_per_pos 76.0 +655 63 training.batch_size 2.0 +655 64 model.embedding_dim 2.0 +655 64 negative_sampler.num_negs_per_pos 76.0 +655 64 training.batch_size 1.0 +655 65 model.embedding_dim 0.0 +655 65 negative_sampler.num_negs_per_pos 13.0 +655 65 training.batch_size 0.0 +655 66 model.embedding_dim 1.0 +655 66 negative_sampler.num_negs_per_pos 16.0 +655 66 training.batch_size 2.0 +655 67 model.embedding_dim 2.0 +655 67 negative_sampler.num_negs_per_pos 35.0 +655 67 training.batch_size 2.0 +655 68 model.embedding_dim 0.0 +655 68 negative_sampler.num_negs_per_pos 39.0 +655 68 training.batch_size 0.0 +655 69 model.embedding_dim 0.0 +655 69 negative_sampler.num_negs_per_pos 28.0 +655 69 training.batch_size 0.0 +655 70 model.embedding_dim 1.0 +655 70 negative_sampler.num_negs_per_pos 17.0 +655 70 training.batch_size 1.0 +655 71 model.embedding_dim 2.0 +655 71 negative_sampler.num_negs_per_pos 25.0 +655 71 training.batch_size 2.0 +655 72 model.embedding_dim 1.0 +655 72 negative_sampler.num_negs_per_pos 58.0 +655 72 training.batch_size 0.0 +655 73 model.embedding_dim 1.0 +655 73 negative_sampler.num_negs_per_pos 92.0 +655 73 training.batch_size 0.0 +655 74 model.embedding_dim 0.0 +655 74 negative_sampler.num_negs_per_pos 95.0 +655 74 training.batch_size 1.0 +655 75 model.embedding_dim 0.0 +655 75 negative_sampler.num_negs_per_pos 55.0 +655 75 training.batch_size 0.0 +655 76 model.embedding_dim 1.0 +655 76 negative_sampler.num_negs_per_pos 85.0 +655 76 training.batch_size 2.0 +655 77 model.embedding_dim 0.0 +655 77 negative_sampler.num_negs_per_pos 59.0 +655 77 training.batch_size 1.0 +655 78 model.embedding_dim 2.0 +655 78 negative_sampler.num_negs_per_pos 20.0 +655 78 training.batch_size 1.0 +655 79 model.embedding_dim 2.0 +655 79 negative_sampler.num_negs_per_pos 96.0 +655 79 training.batch_size 1.0 +655 80 model.embedding_dim 0.0 +655 80 negative_sampler.num_negs_per_pos 13.0 +655 80 training.batch_size 0.0 +655 81 model.embedding_dim 2.0 +655 81 negative_sampler.num_negs_per_pos 14.0 +655 81 training.batch_size 0.0 +655 82 model.embedding_dim 1.0 +655 82 negative_sampler.num_negs_per_pos 16.0 +655 82 training.batch_size 0.0 +655 83 model.embedding_dim 2.0 +655 83 negative_sampler.num_negs_per_pos 57.0 +655 83 training.batch_size 2.0 +655 84 model.embedding_dim 2.0 +655 84 negative_sampler.num_negs_per_pos 3.0 +655 84 training.batch_size 0.0 +655 85 model.embedding_dim 1.0 +655 85 negative_sampler.num_negs_per_pos 76.0 +655 85 training.batch_size 2.0 +655 86 model.embedding_dim 2.0 +655 86 negative_sampler.num_negs_per_pos 82.0 +655 86 training.batch_size 1.0 +655 87 model.embedding_dim 2.0 +655 87 negative_sampler.num_negs_per_pos 31.0 +655 87 training.batch_size 1.0 +655 88 model.embedding_dim 0.0 +655 88 negative_sampler.num_negs_per_pos 46.0 +655 88 training.batch_size 1.0 +655 89 model.embedding_dim 2.0 +655 89 negative_sampler.num_negs_per_pos 58.0 +655 89 training.batch_size 1.0 +655 90 model.embedding_dim 2.0 +655 90 negative_sampler.num_negs_per_pos 50.0 +655 90 training.batch_size 1.0 +655 91 model.embedding_dim 1.0 +655 91 negative_sampler.num_negs_per_pos 70.0 +655 91 training.batch_size 1.0 +655 92 model.embedding_dim 0.0 +655 92 negative_sampler.num_negs_per_pos 3.0 +655 92 training.batch_size 1.0 +655 93 model.embedding_dim 1.0 +655 93 negative_sampler.num_negs_per_pos 15.0 +655 93 training.batch_size 2.0 +655 94 model.embedding_dim 2.0 +655 94 negative_sampler.num_negs_per_pos 41.0 +655 94 training.batch_size 2.0 +655 95 model.embedding_dim 2.0 +655 95 negative_sampler.num_negs_per_pos 71.0 +655 95 training.batch_size 0.0 +655 96 model.embedding_dim 1.0 +655 96 negative_sampler.num_negs_per_pos 3.0 +655 96 training.batch_size 2.0 +655 97 model.embedding_dim 0.0 +655 97 negative_sampler.num_negs_per_pos 93.0 +655 97 training.batch_size 0.0 +655 98 model.embedding_dim 2.0 +655 98 negative_sampler.num_negs_per_pos 22.0 +655 98 training.batch_size 0.0 +655 99 model.embedding_dim 2.0 +655 99 negative_sampler.num_negs_per_pos 45.0 +655 99 training.batch_size 2.0 +655 100 model.embedding_dim 1.0 +655 100 negative_sampler.num_negs_per_pos 61.0 +655 100 training.batch_size 1.0 +655 1 dataset """kinships""" +655 1 model """simple""" +655 1 loss """bceaftersigmoid""" +655 1 regularizer """no""" +655 1 optimizer """adadelta""" +655 1 training_loop """owa""" +655 1 negative_sampler """basic""" +655 1 evaluator """rankbased""" +655 2 dataset """kinships""" +655 2 model """simple""" +655 2 loss """bceaftersigmoid""" +655 2 regularizer """no""" +655 2 optimizer """adadelta""" +655 2 training_loop """owa""" +655 2 negative_sampler """basic""" +655 2 evaluator """rankbased""" +655 3 dataset """kinships""" +655 3 model """simple""" +655 3 loss """bceaftersigmoid""" +655 3 regularizer """no""" +655 3 optimizer """adadelta""" +655 3 training_loop """owa""" +655 3 negative_sampler """basic""" +655 3 evaluator """rankbased""" +655 4 dataset """kinships""" +655 4 model """simple""" +655 4 loss """bceaftersigmoid""" +655 4 regularizer """no""" +655 4 optimizer """adadelta""" +655 4 training_loop """owa""" +655 4 negative_sampler """basic""" +655 4 evaluator """rankbased""" +655 5 dataset """kinships""" +655 5 model """simple""" +655 5 loss """bceaftersigmoid""" +655 5 regularizer """no""" +655 5 optimizer """adadelta""" +655 5 training_loop """owa""" +655 5 negative_sampler """basic""" +655 5 evaluator """rankbased""" +655 6 dataset """kinships""" +655 6 model """simple""" +655 6 loss """bceaftersigmoid""" +655 6 regularizer """no""" +655 6 optimizer """adadelta""" +655 6 training_loop """owa""" +655 6 negative_sampler """basic""" +655 6 evaluator """rankbased""" +655 7 dataset """kinships""" +655 7 model """simple""" +655 7 loss """bceaftersigmoid""" +655 7 regularizer """no""" +655 7 optimizer """adadelta""" +655 7 training_loop """owa""" +655 7 negative_sampler """basic""" +655 7 evaluator """rankbased""" +655 8 dataset """kinships""" +655 8 model """simple""" +655 8 loss """bceaftersigmoid""" +655 8 regularizer """no""" +655 8 optimizer """adadelta""" +655 8 training_loop """owa""" +655 8 negative_sampler """basic""" +655 8 evaluator """rankbased""" +655 9 dataset """kinships""" +655 9 model """simple""" +655 9 loss """bceaftersigmoid""" +655 9 regularizer """no""" +655 9 optimizer """adadelta""" +655 9 training_loop """owa""" +655 9 negative_sampler """basic""" +655 9 evaluator """rankbased""" +655 10 dataset """kinships""" +655 10 model """simple""" +655 10 loss """bceaftersigmoid""" +655 10 regularizer """no""" +655 10 optimizer """adadelta""" +655 10 training_loop """owa""" +655 10 negative_sampler """basic""" +655 10 evaluator """rankbased""" +655 11 dataset """kinships""" +655 11 model """simple""" +655 11 loss """bceaftersigmoid""" +655 11 regularizer """no""" +655 11 optimizer """adadelta""" +655 11 training_loop """owa""" +655 11 negative_sampler """basic""" +655 11 evaluator """rankbased""" +655 12 dataset """kinships""" +655 12 model """simple""" +655 12 loss """bceaftersigmoid""" +655 12 regularizer """no""" +655 12 optimizer """adadelta""" +655 12 training_loop """owa""" +655 12 negative_sampler """basic""" +655 12 evaluator """rankbased""" +655 13 dataset """kinships""" +655 13 model """simple""" +655 13 loss """bceaftersigmoid""" +655 13 regularizer """no""" +655 13 optimizer """adadelta""" +655 13 training_loop """owa""" +655 13 negative_sampler """basic""" +655 13 evaluator """rankbased""" +655 14 dataset """kinships""" +655 14 model """simple""" +655 14 loss """bceaftersigmoid""" +655 14 regularizer """no""" +655 14 optimizer """adadelta""" +655 14 training_loop """owa""" +655 14 negative_sampler """basic""" +655 14 evaluator """rankbased""" +655 15 dataset """kinships""" +655 15 model """simple""" +655 15 loss """bceaftersigmoid""" +655 15 regularizer """no""" +655 15 optimizer """adadelta""" +655 15 training_loop """owa""" +655 15 negative_sampler """basic""" +655 15 evaluator """rankbased""" +655 16 dataset """kinships""" +655 16 model """simple""" +655 16 loss """bceaftersigmoid""" +655 16 regularizer """no""" +655 16 optimizer """adadelta""" +655 16 training_loop """owa""" +655 16 negative_sampler """basic""" +655 16 evaluator """rankbased""" +655 17 dataset """kinships""" +655 17 model """simple""" +655 17 loss """bceaftersigmoid""" +655 17 regularizer """no""" +655 17 optimizer """adadelta""" +655 17 training_loop """owa""" +655 17 negative_sampler """basic""" +655 17 evaluator """rankbased""" +655 18 dataset """kinships""" +655 18 model """simple""" +655 18 loss """bceaftersigmoid""" +655 18 regularizer """no""" +655 18 optimizer """adadelta""" +655 18 training_loop """owa""" +655 18 negative_sampler """basic""" +655 18 evaluator """rankbased""" +655 19 dataset """kinships""" +655 19 model """simple""" +655 19 loss """bceaftersigmoid""" +655 19 regularizer """no""" +655 19 optimizer """adadelta""" +655 19 training_loop """owa""" +655 19 negative_sampler """basic""" +655 19 evaluator """rankbased""" +655 20 dataset """kinships""" +655 20 model """simple""" +655 20 loss """bceaftersigmoid""" +655 20 regularizer """no""" +655 20 optimizer """adadelta""" +655 20 training_loop """owa""" +655 20 negative_sampler """basic""" +655 20 evaluator """rankbased""" +655 21 dataset """kinships""" +655 21 model """simple""" +655 21 loss """bceaftersigmoid""" +655 21 regularizer """no""" +655 21 optimizer """adadelta""" +655 21 training_loop """owa""" +655 21 negative_sampler """basic""" +655 21 evaluator """rankbased""" +655 22 dataset """kinships""" +655 22 model """simple""" +655 22 loss """bceaftersigmoid""" +655 22 regularizer """no""" +655 22 optimizer """adadelta""" +655 22 training_loop """owa""" +655 22 negative_sampler """basic""" +655 22 evaluator """rankbased""" +655 23 dataset """kinships""" +655 23 model """simple""" +655 23 loss """bceaftersigmoid""" +655 23 regularizer """no""" +655 23 optimizer """adadelta""" +655 23 training_loop """owa""" +655 23 negative_sampler """basic""" +655 23 evaluator """rankbased""" +655 24 dataset """kinships""" +655 24 model """simple""" +655 24 loss """bceaftersigmoid""" +655 24 regularizer """no""" +655 24 optimizer """adadelta""" +655 24 training_loop """owa""" +655 24 negative_sampler """basic""" +655 24 evaluator """rankbased""" +655 25 dataset """kinships""" +655 25 model """simple""" +655 25 loss """bceaftersigmoid""" +655 25 regularizer """no""" +655 25 optimizer """adadelta""" +655 25 training_loop """owa""" +655 25 negative_sampler """basic""" +655 25 evaluator """rankbased""" +655 26 dataset """kinships""" +655 26 model """simple""" +655 26 loss """bceaftersigmoid""" +655 26 regularizer """no""" +655 26 optimizer """adadelta""" +655 26 training_loop """owa""" +655 26 negative_sampler """basic""" +655 26 evaluator """rankbased""" +655 27 dataset """kinships""" +655 27 model """simple""" +655 27 loss """bceaftersigmoid""" +655 27 regularizer """no""" +655 27 optimizer """adadelta""" +655 27 training_loop """owa""" +655 27 negative_sampler """basic""" +655 27 evaluator """rankbased""" +655 28 dataset """kinships""" +655 28 model """simple""" +655 28 loss """bceaftersigmoid""" +655 28 regularizer """no""" +655 28 optimizer """adadelta""" +655 28 training_loop """owa""" +655 28 negative_sampler """basic""" +655 28 evaluator """rankbased""" +655 29 dataset """kinships""" +655 29 model """simple""" +655 29 loss """bceaftersigmoid""" +655 29 regularizer """no""" +655 29 optimizer """adadelta""" +655 29 training_loop """owa""" +655 29 negative_sampler """basic""" +655 29 evaluator """rankbased""" +655 30 dataset """kinships""" +655 30 model """simple""" +655 30 loss """bceaftersigmoid""" +655 30 regularizer """no""" +655 30 optimizer """adadelta""" +655 30 training_loop """owa""" +655 30 negative_sampler """basic""" +655 30 evaluator """rankbased""" +655 31 dataset """kinships""" +655 31 model """simple""" +655 31 loss """bceaftersigmoid""" +655 31 regularizer """no""" +655 31 optimizer """adadelta""" +655 31 training_loop """owa""" +655 31 negative_sampler """basic""" +655 31 evaluator """rankbased""" +655 32 dataset """kinships""" +655 32 model """simple""" +655 32 loss """bceaftersigmoid""" +655 32 regularizer """no""" +655 32 optimizer """adadelta""" +655 32 training_loop """owa""" +655 32 negative_sampler """basic""" +655 32 evaluator """rankbased""" +655 33 dataset """kinships""" +655 33 model """simple""" +655 33 loss """bceaftersigmoid""" +655 33 regularizer """no""" +655 33 optimizer """adadelta""" +655 33 training_loop """owa""" +655 33 negative_sampler """basic""" +655 33 evaluator """rankbased""" +655 34 dataset """kinships""" +655 34 model """simple""" +655 34 loss """bceaftersigmoid""" +655 34 regularizer """no""" +655 34 optimizer """adadelta""" +655 34 training_loop """owa""" +655 34 negative_sampler """basic""" +655 34 evaluator """rankbased""" +655 35 dataset """kinships""" +655 35 model """simple""" +655 35 loss """bceaftersigmoid""" +655 35 regularizer """no""" +655 35 optimizer """adadelta""" +655 35 training_loop """owa""" +655 35 negative_sampler """basic""" +655 35 evaluator """rankbased""" +655 36 dataset """kinships""" +655 36 model """simple""" +655 36 loss """bceaftersigmoid""" +655 36 regularizer """no""" +655 36 optimizer """adadelta""" +655 36 training_loop """owa""" +655 36 negative_sampler """basic""" +655 36 evaluator """rankbased""" +655 37 dataset """kinships""" +655 37 model """simple""" +655 37 loss """bceaftersigmoid""" +655 37 regularizer """no""" +655 37 optimizer """adadelta""" +655 37 training_loop """owa""" +655 37 negative_sampler """basic""" +655 37 evaluator """rankbased""" +655 38 dataset """kinships""" +655 38 model """simple""" +655 38 loss """bceaftersigmoid""" +655 38 regularizer """no""" +655 38 optimizer """adadelta""" +655 38 training_loop """owa""" +655 38 negative_sampler """basic""" +655 38 evaluator """rankbased""" +655 39 dataset """kinships""" +655 39 model """simple""" +655 39 loss """bceaftersigmoid""" +655 39 regularizer """no""" +655 39 optimizer """adadelta""" +655 39 training_loop """owa""" +655 39 negative_sampler """basic""" +655 39 evaluator """rankbased""" +655 40 dataset """kinships""" +655 40 model """simple""" +655 40 loss """bceaftersigmoid""" +655 40 regularizer """no""" +655 40 optimizer """adadelta""" +655 40 training_loop """owa""" +655 40 negative_sampler """basic""" +655 40 evaluator """rankbased""" +655 41 dataset """kinships""" +655 41 model """simple""" +655 41 loss """bceaftersigmoid""" +655 41 regularizer """no""" +655 41 optimizer """adadelta""" +655 41 training_loop """owa""" +655 41 negative_sampler """basic""" +655 41 evaluator """rankbased""" +655 42 dataset """kinships""" +655 42 model """simple""" +655 42 loss """bceaftersigmoid""" +655 42 regularizer """no""" +655 42 optimizer """adadelta""" +655 42 training_loop """owa""" +655 42 negative_sampler """basic""" +655 42 evaluator """rankbased""" +655 43 dataset """kinships""" +655 43 model """simple""" +655 43 loss """bceaftersigmoid""" +655 43 regularizer """no""" +655 43 optimizer """adadelta""" +655 43 training_loop """owa""" +655 43 negative_sampler """basic""" +655 43 evaluator """rankbased""" +655 44 dataset """kinships""" +655 44 model """simple""" +655 44 loss """bceaftersigmoid""" +655 44 regularizer """no""" +655 44 optimizer """adadelta""" +655 44 training_loop """owa""" +655 44 negative_sampler """basic""" +655 44 evaluator """rankbased""" +655 45 dataset """kinships""" +655 45 model """simple""" +655 45 loss """bceaftersigmoid""" +655 45 regularizer """no""" +655 45 optimizer """adadelta""" +655 45 training_loop """owa""" +655 45 negative_sampler """basic""" +655 45 evaluator """rankbased""" +655 46 dataset """kinships""" +655 46 model """simple""" +655 46 loss """bceaftersigmoid""" +655 46 regularizer """no""" +655 46 optimizer """adadelta""" +655 46 training_loop """owa""" +655 46 negative_sampler """basic""" +655 46 evaluator """rankbased""" +655 47 dataset """kinships""" +655 47 model """simple""" +655 47 loss """bceaftersigmoid""" +655 47 regularizer """no""" +655 47 optimizer """adadelta""" +655 47 training_loop """owa""" +655 47 negative_sampler """basic""" +655 47 evaluator """rankbased""" +655 48 dataset """kinships""" +655 48 model """simple""" +655 48 loss """bceaftersigmoid""" +655 48 regularizer """no""" +655 48 optimizer """adadelta""" +655 48 training_loop """owa""" +655 48 negative_sampler """basic""" +655 48 evaluator """rankbased""" +655 49 dataset """kinships""" +655 49 model """simple""" +655 49 loss """bceaftersigmoid""" +655 49 regularizer """no""" +655 49 optimizer """adadelta""" +655 49 training_loop """owa""" +655 49 negative_sampler """basic""" +655 49 evaluator """rankbased""" +655 50 dataset """kinships""" +655 50 model """simple""" +655 50 loss """bceaftersigmoid""" +655 50 regularizer """no""" +655 50 optimizer """adadelta""" +655 50 training_loop """owa""" +655 50 negative_sampler """basic""" +655 50 evaluator """rankbased""" +655 51 dataset """kinships""" +655 51 model """simple""" +655 51 loss """bceaftersigmoid""" +655 51 regularizer """no""" +655 51 optimizer """adadelta""" +655 51 training_loop """owa""" +655 51 negative_sampler """basic""" +655 51 evaluator """rankbased""" +655 52 dataset """kinships""" +655 52 model """simple""" +655 52 loss """bceaftersigmoid""" +655 52 regularizer """no""" +655 52 optimizer """adadelta""" +655 52 training_loop """owa""" +655 52 negative_sampler """basic""" +655 52 evaluator """rankbased""" +655 53 dataset """kinships""" +655 53 model """simple""" +655 53 loss """bceaftersigmoid""" +655 53 regularizer """no""" +655 53 optimizer """adadelta""" +655 53 training_loop """owa""" +655 53 negative_sampler """basic""" +655 53 evaluator """rankbased""" +655 54 dataset """kinships""" +655 54 model """simple""" +655 54 loss """bceaftersigmoid""" +655 54 regularizer """no""" +655 54 optimizer """adadelta""" +655 54 training_loop """owa""" +655 54 negative_sampler """basic""" +655 54 evaluator """rankbased""" +655 55 dataset """kinships""" +655 55 model """simple""" +655 55 loss """bceaftersigmoid""" +655 55 regularizer """no""" +655 55 optimizer """adadelta""" +655 55 training_loop """owa""" +655 55 negative_sampler """basic""" +655 55 evaluator """rankbased""" +655 56 dataset """kinships""" +655 56 model """simple""" +655 56 loss """bceaftersigmoid""" +655 56 regularizer """no""" +655 56 optimizer """adadelta""" +655 56 training_loop """owa""" +655 56 negative_sampler """basic""" +655 56 evaluator """rankbased""" +655 57 dataset """kinships""" +655 57 model """simple""" +655 57 loss """bceaftersigmoid""" +655 57 regularizer """no""" +655 57 optimizer """adadelta""" +655 57 training_loop """owa""" +655 57 negative_sampler """basic""" +655 57 evaluator """rankbased""" +655 58 dataset """kinships""" +655 58 model """simple""" +655 58 loss """bceaftersigmoid""" +655 58 regularizer """no""" +655 58 optimizer """adadelta""" +655 58 training_loop """owa""" +655 58 negative_sampler """basic""" +655 58 evaluator """rankbased""" +655 59 dataset """kinships""" +655 59 model """simple""" +655 59 loss """bceaftersigmoid""" +655 59 regularizer """no""" +655 59 optimizer """adadelta""" +655 59 training_loop """owa""" +655 59 negative_sampler """basic""" +655 59 evaluator """rankbased""" +655 60 dataset """kinships""" +655 60 model """simple""" +655 60 loss """bceaftersigmoid""" +655 60 regularizer """no""" +655 60 optimizer """adadelta""" +655 60 training_loop """owa""" +655 60 negative_sampler """basic""" +655 60 evaluator """rankbased""" +655 61 dataset """kinships""" +655 61 model """simple""" +655 61 loss """bceaftersigmoid""" +655 61 regularizer """no""" +655 61 optimizer """adadelta""" +655 61 training_loop """owa""" +655 61 negative_sampler """basic""" +655 61 evaluator """rankbased""" +655 62 dataset """kinships""" +655 62 model """simple""" +655 62 loss """bceaftersigmoid""" +655 62 regularizer """no""" +655 62 optimizer """adadelta""" +655 62 training_loop """owa""" +655 62 negative_sampler """basic""" +655 62 evaluator """rankbased""" +655 63 dataset """kinships""" +655 63 model """simple""" +655 63 loss """bceaftersigmoid""" +655 63 regularizer """no""" +655 63 optimizer """adadelta""" +655 63 training_loop """owa""" +655 63 negative_sampler """basic""" +655 63 evaluator """rankbased""" +655 64 dataset """kinships""" +655 64 model """simple""" +655 64 loss """bceaftersigmoid""" +655 64 regularizer """no""" +655 64 optimizer """adadelta""" +655 64 training_loop """owa""" +655 64 negative_sampler """basic""" +655 64 evaluator """rankbased""" +655 65 dataset """kinships""" +655 65 model """simple""" +655 65 loss """bceaftersigmoid""" +655 65 regularizer """no""" +655 65 optimizer """adadelta""" +655 65 training_loop """owa""" +655 65 negative_sampler """basic""" +655 65 evaluator """rankbased""" +655 66 dataset """kinships""" +655 66 model """simple""" +655 66 loss """bceaftersigmoid""" +655 66 regularizer """no""" +655 66 optimizer """adadelta""" +655 66 training_loop """owa""" +655 66 negative_sampler """basic""" +655 66 evaluator """rankbased""" +655 67 dataset """kinships""" +655 67 model """simple""" +655 67 loss """bceaftersigmoid""" +655 67 regularizer """no""" +655 67 optimizer """adadelta""" +655 67 training_loop """owa""" +655 67 negative_sampler """basic""" +655 67 evaluator """rankbased""" +655 68 dataset """kinships""" +655 68 model """simple""" +655 68 loss """bceaftersigmoid""" +655 68 regularizer """no""" +655 68 optimizer """adadelta""" +655 68 training_loop """owa""" +655 68 negative_sampler """basic""" +655 68 evaluator """rankbased""" +655 69 dataset """kinships""" +655 69 model """simple""" +655 69 loss """bceaftersigmoid""" +655 69 regularizer """no""" +655 69 optimizer """adadelta""" +655 69 training_loop """owa""" +655 69 negative_sampler """basic""" +655 69 evaluator """rankbased""" +655 70 dataset """kinships""" +655 70 model """simple""" +655 70 loss """bceaftersigmoid""" +655 70 regularizer """no""" +655 70 optimizer """adadelta""" +655 70 training_loop """owa""" +655 70 negative_sampler """basic""" +655 70 evaluator """rankbased""" +655 71 dataset """kinships""" +655 71 model """simple""" +655 71 loss """bceaftersigmoid""" +655 71 regularizer """no""" +655 71 optimizer """adadelta""" +655 71 training_loop """owa""" +655 71 negative_sampler """basic""" +655 71 evaluator """rankbased""" +655 72 dataset """kinships""" +655 72 model """simple""" +655 72 loss """bceaftersigmoid""" +655 72 regularizer """no""" +655 72 optimizer """adadelta""" +655 72 training_loop """owa""" +655 72 negative_sampler """basic""" +655 72 evaluator """rankbased""" +655 73 dataset """kinships""" +655 73 model """simple""" +655 73 loss """bceaftersigmoid""" +655 73 regularizer """no""" +655 73 optimizer """adadelta""" +655 73 training_loop """owa""" +655 73 negative_sampler """basic""" +655 73 evaluator """rankbased""" +655 74 dataset """kinships""" +655 74 model """simple""" +655 74 loss """bceaftersigmoid""" +655 74 regularizer """no""" +655 74 optimizer """adadelta""" +655 74 training_loop """owa""" +655 74 negative_sampler """basic""" +655 74 evaluator """rankbased""" +655 75 dataset """kinships""" +655 75 model """simple""" +655 75 loss """bceaftersigmoid""" +655 75 regularizer """no""" +655 75 optimizer """adadelta""" +655 75 training_loop """owa""" +655 75 negative_sampler """basic""" +655 75 evaluator """rankbased""" +655 76 dataset """kinships""" +655 76 model """simple""" +655 76 loss """bceaftersigmoid""" +655 76 regularizer """no""" +655 76 optimizer """adadelta""" +655 76 training_loop """owa""" +655 76 negative_sampler """basic""" +655 76 evaluator """rankbased""" +655 77 dataset """kinships""" +655 77 model """simple""" +655 77 loss """bceaftersigmoid""" +655 77 regularizer """no""" +655 77 optimizer """adadelta""" +655 77 training_loop """owa""" +655 77 negative_sampler """basic""" +655 77 evaluator """rankbased""" +655 78 dataset """kinships""" +655 78 model """simple""" +655 78 loss """bceaftersigmoid""" +655 78 regularizer """no""" +655 78 optimizer """adadelta""" +655 78 training_loop """owa""" +655 78 negative_sampler """basic""" +655 78 evaluator """rankbased""" +655 79 dataset """kinships""" +655 79 model """simple""" +655 79 loss """bceaftersigmoid""" +655 79 regularizer """no""" +655 79 optimizer """adadelta""" +655 79 training_loop """owa""" +655 79 negative_sampler """basic""" +655 79 evaluator """rankbased""" +655 80 dataset """kinships""" +655 80 model """simple""" +655 80 loss """bceaftersigmoid""" +655 80 regularizer """no""" +655 80 optimizer """adadelta""" +655 80 training_loop """owa""" +655 80 negative_sampler """basic""" +655 80 evaluator """rankbased""" +655 81 dataset """kinships""" +655 81 model """simple""" +655 81 loss """bceaftersigmoid""" +655 81 regularizer """no""" +655 81 optimizer """adadelta""" +655 81 training_loop """owa""" +655 81 negative_sampler """basic""" +655 81 evaluator """rankbased""" +655 82 dataset """kinships""" +655 82 model """simple""" +655 82 loss """bceaftersigmoid""" +655 82 regularizer """no""" +655 82 optimizer """adadelta""" +655 82 training_loop """owa""" +655 82 negative_sampler """basic""" +655 82 evaluator """rankbased""" +655 83 dataset """kinships""" +655 83 model """simple""" +655 83 loss """bceaftersigmoid""" +655 83 regularizer """no""" +655 83 optimizer """adadelta""" +655 83 training_loop """owa""" +655 83 negative_sampler """basic""" +655 83 evaluator """rankbased""" +655 84 dataset """kinships""" +655 84 model """simple""" +655 84 loss """bceaftersigmoid""" +655 84 regularizer """no""" +655 84 optimizer """adadelta""" +655 84 training_loop """owa""" +655 84 negative_sampler """basic""" +655 84 evaluator """rankbased""" +655 85 dataset """kinships""" +655 85 model """simple""" +655 85 loss """bceaftersigmoid""" +655 85 regularizer """no""" +655 85 optimizer """adadelta""" +655 85 training_loop """owa""" +655 85 negative_sampler """basic""" +655 85 evaluator """rankbased""" +655 86 dataset """kinships""" +655 86 model """simple""" +655 86 loss """bceaftersigmoid""" +655 86 regularizer """no""" +655 86 optimizer """adadelta""" +655 86 training_loop """owa""" +655 86 negative_sampler """basic""" +655 86 evaluator """rankbased""" +655 87 dataset """kinships""" +655 87 model """simple""" +655 87 loss """bceaftersigmoid""" +655 87 regularizer """no""" +655 87 optimizer """adadelta""" +655 87 training_loop """owa""" +655 87 negative_sampler """basic""" +655 87 evaluator """rankbased""" +655 88 dataset """kinships""" +655 88 model """simple""" +655 88 loss """bceaftersigmoid""" +655 88 regularizer """no""" +655 88 optimizer """adadelta""" +655 88 training_loop """owa""" +655 88 negative_sampler """basic""" +655 88 evaluator """rankbased""" +655 89 dataset """kinships""" +655 89 model """simple""" +655 89 loss """bceaftersigmoid""" +655 89 regularizer """no""" +655 89 optimizer """adadelta""" +655 89 training_loop """owa""" +655 89 negative_sampler """basic""" +655 89 evaluator """rankbased""" +655 90 dataset """kinships""" +655 90 model """simple""" +655 90 loss """bceaftersigmoid""" +655 90 regularizer """no""" +655 90 optimizer """adadelta""" +655 90 training_loop """owa""" +655 90 negative_sampler """basic""" +655 90 evaluator """rankbased""" +655 91 dataset """kinships""" +655 91 model """simple""" +655 91 loss """bceaftersigmoid""" +655 91 regularizer """no""" +655 91 optimizer """adadelta""" +655 91 training_loop """owa""" +655 91 negative_sampler """basic""" +655 91 evaluator """rankbased""" +655 92 dataset """kinships""" +655 92 model """simple""" +655 92 loss """bceaftersigmoid""" +655 92 regularizer """no""" +655 92 optimizer """adadelta""" +655 92 training_loop """owa""" +655 92 negative_sampler """basic""" +655 92 evaluator """rankbased""" +655 93 dataset """kinships""" +655 93 model """simple""" +655 93 loss """bceaftersigmoid""" +655 93 regularizer """no""" +655 93 optimizer """adadelta""" +655 93 training_loop """owa""" +655 93 negative_sampler """basic""" +655 93 evaluator """rankbased""" +655 94 dataset """kinships""" +655 94 model """simple""" +655 94 loss """bceaftersigmoid""" +655 94 regularizer """no""" +655 94 optimizer """adadelta""" +655 94 training_loop """owa""" +655 94 negative_sampler """basic""" +655 94 evaluator """rankbased""" +655 95 dataset """kinships""" +655 95 model """simple""" +655 95 loss """bceaftersigmoid""" +655 95 regularizer """no""" +655 95 optimizer """adadelta""" +655 95 training_loop """owa""" +655 95 negative_sampler """basic""" +655 95 evaluator """rankbased""" +655 96 dataset """kinships""" +655 96 model """simple""" +655 96 loss """bceaftersigmoid""" +655 96 regularizer """no""" +655 96 optimizer """adadelta""" +655 96 training_loop """owa""" +655 96 negative_sampler """basic""" +655 96 evaluator """rankbased""" +655 97 dataset """kinships""" +655 97 model """simple""" +655 97 loss """bceaftersigmoid""" +655 97 regularizer """no""" +655 97 optimizer """adadelta""" +655 97 training_loop """owa""" +655 97 negative_sampler """basic""" +655 97 evaluator """rankbased""" +655 98 dataset """kinships""" +655 98 model """simple""" +655 98 loss """bceaftersigmoid""" +655 98 regularizer """no""" +655 98 optimizer """adadelta""" +655 98 training_loop """owa""" +655 98 negative_sampler """basic""" +655 98 evaluator """rankbased""" +655 99 dataset """kinships""" +655 99 model """simple""" +655 99 loss """bceaftersigmoid""" +655 99 regularizer """no""" +655 99 optimizer """adadelta""" +655 99 training_loop """owa""" +655 99 negative_sampler """basic""" +655 99 evaluator """rankbased""" +655 100 dataset """kinships""" +655 100 model """simple""" +655 100 loss """bceaftersigmoid""" +655 100 regularizer """no""" +655 100 optimizer """adadelta""" +655 100 training_loop """owa""" +655 100 negative_sampler """basic""" +655 100 evaluator """rankbased""" +656 1 model.embedding_dim 2.0 +656 1 negative_sampler.num_negs_per_pos 80.0 +656 1 training.batch_size 2.0 +656 2 model.embedding_dim 0.0 +656 2 negative_sampler.num_negs_per_pos 30.0 +656 2 training.batch_size 1.0 +656 3 model.embedding_dim 2.0 +656 3 negative_sampler.num_negs_per_pos 72.0 +656 3 training.batch_size 2.0 +656 4 model.embedding_dim 2.0 +656 4 negative_sampler.num_negs_per_pos 77.0 +656 4 training.batch_size 0.0 +656 5 model.embedding_dim 2.0 +656 5 negative_sampler.num_negs_per_pos 12.0 +656 5 training.batch_size 1.0 +656 6 model.embedding_dim 1.0 +656 6 negative_sampler.num_negs_per_pos 92.0 +656 6 training.batch_size 0.0 +656 7 model.embedding_dim 0.0 +656 7 negative_sampler.num_negs_per_pos 58.0 +656 7 training.batch_size 2.0 +656 8 model.embedding_dim 2.0 +656 8 negative_sampler.num_negs_per_pos 0.0 +656 8 training.batch_size 0.0 +656 9 model.embedding_dim 0.0 +656 9 negative_sampler.num_negs_per_pos 68.0 +656 9 training.batch_size 0.0 +656 10 model.embedding_dim 2.0 +656 10 negative_sampler.num_negs_per_pos 30.0 +656 10 training.batch_size 0.0 +656 11 model.embedding_dim 2.0 +656 11 negative_sampler.num_negs_per_pos 27.0 +656 11 training.batch_size 0.0 +656 12 model.embedding_dim 2.0 +656 12 negative_sampler.num_negs_per_pos 9.0 +656 12 training.batch_size 1.0 +656 13 model.embedding_dim 0.0 +656 13 negative_sampler.num_negs_per_pos 46.0 +656 13 training.batch_size 1.0 +656 14 model.embedding_dim 0.0 +656 14 negative_sampler.num_negs_per_pos 72.0 +656 14 training.batch_size 0.0 +656 15 model.embedding_dim 0.0 +656 15 negative_sampler.num_negs_per_pos 4.0 +656 15 training.batch_size 2.0 +656 16 model.embedding_dim 1.0 +656 16 negative_sampler.num_negs_per_pos 63.0 +656 16 training.batch_size 0.0 +656 17 model.embedding_dim 2.0 +656 17 negative_sampler.num_negs_per_pos 19.0 +656 17 training.batch_size 1.0 +656 18 model.embedding_dim 1.0 +656 18 negative_sampler.num_negs_per_pos 75.0 +656 18 training.batch_size 1.0 +656 19 model.embedding_dim 0.0 +656 19 negative_sampler.num_negs_per_pos 87.0 +656 19 training.batch_size 0.0 +656 20 model.embedding_dim 2.0 +656 20 negative_sampler.num_negs_per_pos 7.0 +656 20 training.batch_size 1.0 +656 21 model.embedding_dim 1.0 +656 21 negative_sampler.num_negs_per_pos 36.0 +656 21 training.batch_size 0.0 +656 22 model.embedding_dim 2.0 +656 22 negative_sampler.num_negs_per_pos 54.0 +656 22 training.batch_size 1.0 +656 23 model.embedding_dim 2.0 +656 23 negative_sampler.num_negs_per_pos 81.0 +656 23 training.batch_size 0.0 +656 24 model.embedding_dim 1.0 +656 24 negative_sampler.num_negs_per_pos 24.0 +656 24 training.batch_size 0.0 +656 25 model.embedding_dim 2.0 +656 25 negative_sampler.num_negs_per_pos 32.0 +656 25 training.batch_size 2.0 +656 26 model.embedding_dim 0.0 +656 26 negative_sampler.num_negs_per_pos 51.0 +656 26 training.batch_size 2.0 +656 27 model.embedding_dim 1.0 +656 27 negative_sampler.num_negs_per_pos 24.0 +656 27 training.batch_size 2.0 +656 28 model.embedding_dim 2.0 +656 28 negative_sampler.num_negs_per_pos 18.0 +656 28 training.batch_size 2.0 +656 29 model.embedding_dim 2.0 +656 29 negative_sampler.num_negs_per_pos 69.0 +656 29 training.batch_size 0.0 +656 30 model.embedding_dim 0.0 +656 30 negative_sampler.num_negs_per_pos 3.0 +656 30 training.batch_size 0.0 +656 31 model.embedding_dim 1.0 +656 31 negative_sampler.num_negs_per_pos 52.0 +656 31 training.batch_size 0.0 +656 32 model.embedding_dim 2.0 +656 32 negative_sampler.num_negs_per_pos 16.0 +656 32 training.batch_size 0.0 +656 33 model.embedding_dim 0.0 +656 33 negative_sampler.num_negs_per_pos 27.0 +656 33 training.batch_size 0.0 +656 34 model.embedding_dim 1.0 +656 34 negative_sampler.num_negs_per_pos 21.0 +656 34 training.batch_size 0.0 +656 35 model.embedding_dim 2.0 +656 35 negative_sampler.num_negs_per_pos 73.0 +656 35 training.batch_size 2.0 +656 36 model.embedding_dim 0.0 +656 36 negative_sampler.num_negs_per_pos 0.0 +656 36 training.batch_size 1.0 +656 37 model.embedding_dim 2.0 +656 37 negative_sampler.num_negs_per_pos 19.0 +656 37 training.batch_size 1.0 +656 38 model.embedding_dim 2.0 +656 38 negative_sampler.num_negs_per_pos 77.0 +656 38 training.batch_size 1.0 +656 39 model.embedding_dim 1.0 +656 39 negative_sampler.num_negs_per_pos 57.0 +656 39 training.batch_size 2.0 +656 40 model.embedding_dim 1.0 +656 40 negative_sampler.num_negs_per_pos 20.0 +656 40 training.batch_size 1.0 +656 41 model.embedding_dim 0.0 +656 41 negative_sampler.num_negs_per_pos 67.0 +656 41 training.batch_size 0.0 +656 42 model.embedding_dim 1.0 +656 42 negative_sampler.num_negs_per_pos 2.0 +656 42 training.batch_size 2.0 +656 43 model.embedding_dim 0.0 +656 43 negative_sampler.num_negs_per_pos 86.0 +656 43 training.batch_size 2.0 +656 44 model.embedding_dim 1.0 +656 44 negative_sampler.num_negs_per_pos 49.0 +656 44 training.batch_size 1.0 +656 45 model.embedding_dim 1.0 +656 45 negative_sampler.num_negs_per_pos 70.0 +656 45 training.batch_size 0.0 +656 46 model.embedding_dim 1.0 +656 46 negative_sampler.num_negs_per_pos 15.0 +656 46 training.batch_size 2.0 +656 47 model.embedding_dim 0.0 +656 47 negative_sampler.num_negs_per_pos 42.0 +656 47 training.batch_size 0.0 +656 48 model.embedding_dim 0.0 +656 48 negative_sampler.num_negs_per_pos 21.0 +656 48 training.batch_size 2.0 +656 49 model.embedding_dim 2.0 +656 49 negative_sampler.num_negs_per_pos 86.0 +656 49 training.batch_size 0.0 +656 50 model.embedding_dim 2.0 +656 50 negative_sampler.num_negs_per_pos 68.0 +656 50 training.batch_size 2.0 +656 51 model.embedding_dim 0.0 +656 51 negative_sampler.num_negs_per_pos 31.0 +656 51 training.batch_size 2.0 +656 52 model.embedding_dim 2.0 +656 52 negative_sampler.num_negs_per_pos 85.0 +656 52 training.batch_size 0.0 +656 53 model.embedding_dim 0.0 +656 53 negative_sampler.num_negs_per_pos 93.0 +656 53 training.batch_size 2.0 +656 54 model.embedding_dim 0.0 +656 54 negative_sampler.num_negs_per_pos 14.0 +656 54 training.batch_size 2.0 +656 55 model.embedding_dim 2.0 +656 55 negative_sampler.num_negs_per_pos 80.0 +656 55 training.batch_size 0.0 +656 56 model.embedding_dim 1.0 +656 56 negative_sampler.num_negs_per_pos 31.0 +656 56 training.batch_size 2.0 +656 57 model.embedding_dim 2.0 +656 57 negative_sampler.num_negs_per_pos 41.0 +656 57 training.batch_size 0.0 +656 58 model.embedding_dim 1.0 +656 58 negative_sampler.num_negs_per_pos 15.0 +656 58 training.batch_size 2.0 +656 59 model.embedding_dim 2.0 +656 59 negative_sampler.num_negs_per_pos 22.0 +656 59 training.batch_size 2.0 +656 60 model.embedding_dim 2.0 +656 60 negative_sampler.num_negs_per_pos 50.0 +656 60 training.batch_size 0.0 +656 61 model.embedding_dim 0.0 +656 61 negative_sampler.num_negs_per_pos 39.0 +656 61 training.batch_size 2.0 +656 62 model.embedding_dim 0.0 +656 62 negative_sampler.num_negs_per_pos 74.0 +656 62 training.batch_size 1.0 +656 63 model.embedding_dim 2.0 +656 63 negative_sampler.num_negs_per_pos 9.0 +656 63 training.batch_size 1.0 +656 64 model.embedding_dim 0.0 +656 64 negative_sampler.num_negs_per_pos 89.0 +656 64 training.batch_size 1.0 +656 65 model.embedding_dim 1.0 +656 65 negative_sampler.num_negs_per_pos 13.0 +656 65 training.batch_size 1.0 +656 66 model.embedding_dim 2.0 +656 66 negative_sampler.num_negs_per_pos 50.0 +656 66 training.batch_size 0.0 +656 67 model.embedding_dim 1.0 +656 67 negative_sampler.num_negs_per_pos 56.0 +656 67 training.batch_size 1.0 +656 68 model.embedding_dim 2.0 +656 68 negative_sampler.num_negs_per_pos 34.0 +656 68 training.batch_size 0.0 +656 69 model.embedding_dim 0.0 +656 69 negative_sampler.num_negs_per_pos 78.0 +656 69 training.batch_size 2.0 +656 70 model.embedding_dim 1.0 +656 70 negative_sampler.num_negs_per_pos 14.0 +656 70 training.batch_size 0.0 +656 71 model.embedding_dim 0.0 +656 71 negative_sampler.num_negs_per_pos 36.0 +656 71 training.batch_size 1.0 +656 72 model.embedding_dim 2.0 +656 72 negative_sampler.num_negs_per_pos 94.0 +656 72 training.batch_size 1.0 +656 73 model.embedding_dim 2.0 +656 73 negative_sampler.num_negs_per_pos 6.0 +656 73 training.batch_size 0.0 +656 74 model.embedding_dim 1.0 +656 74 negative_sampler.num_negs_per_pos 2.0 +656 74 training.batch_size 2.0 +656 75 model.embedding_dim 0.0 +656 75 negative_sampler.num_negs_per_pos 15.0 +656 75 training.batch_size 1.0 +656 76 model.embedding_dim 1.0 +656 76 negative_sampler.num_negs_per_pos 63.0 +656 76 training.batch_size 2.0 +656 77 model.embedding_dim 2.0 +656 77 negative_sampler.num_negs_per_pos 34.0 +656 77 training.batch_size 1.0 +656 78 model.embedding_dim 2.0 +656 78 negative_sampler.num_negs_per_pos 40.0 +656 78 training.batch_size 1.0 +656 79 model.embedding_dim 1.0 +656 79 negative_sampler.num_negs_per_pos 87.0 +656 79 training.batch_size 2.0 +656 80 model.embedding_dim 0.0 +656 80 negative_sampler.num_negs_per_pos 59.0 +656 80 training.batch_size 1.0 +656 81 model.embedding_dim 1.0 +656 81 negative_sampler.num_negs_per_pos 11.0 +656 81 training.batch_size 0.0 +656 82 model.embedding_dim 0.0 +656 82 negative_sampler.num_negs_per_pos 44.0 +656 82 training.batch_size 0.0 +656 83 model.embedding_dim 1.0 +656 83 negative_sampler.num_negs_per_pos 91.0 +656 83 training.batch_size 0.0 +656 84 model.embedding_dim 2.0 +656 84 negative_sampler.num_negs_per_pos 66.0 +656 84 training.batch_size 2.0 +656 85 model.embedding_dim 1.0 +656 85 negative_sampler.num_negs_per_pos 85.0 +656 85 training.batch_size 2.0 +656 86 model.embedding_dim 0.0 +656 86 negative_sampler.num_negs_per_pos 21.0 +656 86 training.batch_size 0.0 +656 87 model.embedding_dim 2.0 +656 87 negative_sampler.num_negs_per_pos 29.0 +656 87 training.batch_size 1.0 +656 88 model.embedding_dim 2.0 +656 88 negative_sampler.num_negs_per_pos 14.0 +656 88 training.batch_size 2.0 +656 89 model.embedding_dim 2.0 +656 89 negative_sampler.num_negs_per_pos 3.0 +656 89 training.batch_size 2.0 +656 90 model.embedding_dim 1.0 +656 90 negative_sampler.num_negs_per_pos 18.0 +656 90 training.batch_size 1.0 +656 91 model.embedding_dim 1.0 +656 91 negative_sampler.num_negs_per_pos 53.0 +656 91 training.batch_size 1.0 +656 92 model.embedding_dim 1.0 +656 92 negative_sampler.num_negs_per_pos 24.0 +656 92 training.batch_size 2.0 +656 93 model.embedding_dim 2.0 +656 93 negative_sampler.num_negs_per_pos 59.0 +656 93 training.batch_size 2.0 +656 94 model.embedding_dim 0.0 +656 94 negative_sampler.num_negs_per_pos 1.0 +656 94 training.batch_size 2.0 +656 95 model.embedding_dim 2.0 +656 95 negative_sampler.num_negs_per_pos 86.0 +656 95 training.batch_size 0.0 +656 96 model.embedding_dim 2.0 +656 96 negative_sampler.num_negs_per_pos 69.0 +656 96 training.batch_size 2.0 +656 97 model.embedding_dim 1.0 +656 97 negative_sampler.num_negs_per_pos 38.0 +656 97 training.batch_size 2.0 +656 98 model.embedding_dim 2.0 +656 98 negative_sampler.num_negs_per_pos 74.0 +656 98 training.batch_size 2.0 +656 99 model.embedding_dim 2.0 +656 99 negative_sampler.num_negs_per_pos 80.0 +656 99 training.batch_size 2.0 +656 100 model.embedding_dim 1.0 +656 100 negative_sampler.num_negs_per_pos 84.0 +656 100 training.batch_size 0.0 +656 1 dataset """kinships""" +656 1 model """simple""" +656 1 loss """softplus""" +656 1 regularizer """no""" +656 1 optimizer """adadelta""" +656 1 training_loop """owa""" +656 1 negative_sampler """basic""" +656 1 evaluator """rankbased""" +656 2 dataset """kinships""" +656 2 model """simple""" +656 2 loss """softplus""" +656 2 regularizer """no""" +656 2 optimizer """adadelta""" +656 2 training_loop """owa""" +656 2 negative_sampler """basic""" +656 2 evaluator """rankbased""" +656 3 dataset """kinships""" +656 3 model """simple""" +656 3 loss """softplus""" +656 3 regularizer """no""" +656 3 optimizer """adadelta""" +656 3 training_loop """owa""" +656 3 negative_sampler """basic""" +656 3 evaluator """rankbased""" +656 4 dataset """kinships""" +656 4 model """simple""" +656 4 loss """softplus""" +656 4 regularizer """no""" +656 4 optimizer """adadelta""" +656 4 training_loop """owa""" +656 4 negative_sampler """basic""" +656 4 evaluator """rankbased""" +656 5 dataset """kinships""" +656 5 model """simple""" +656 5 loss """softplus""" +656 5 regularizer """no""" +656 5 optimizer """adadelta""" +656 5 training_loop """owa""" +656 5 negative_sampler """basic""" +656 5 evaluator """rankbased""" +656 6 dataset """kinships""" +656 6 model """simple""" +656 6 loss """softplus""" +656 6 regularizer """no""" +656 6 optimizer """adadelta""" +656 6 training_loop """owa""" +656 6 negative_sampler """basic""" +656 6 evaluator """rankbased""" +656 7 dataset """kinships""" +656 7 model """simple""" +656 7 loss """softplus""" +656 7 regularizer """no""" +656 7 optimizer """adadelta""" +656 7 training_loop """owa""" +656 7 negative_sampler """basic""" +656 7 evaluator """rankbased""" +656 8 dataset """kinships""" +656 8 model """simple""" +656 8 loss """softplus""" +656 8 regularizer """no""" +656 8 optimizer """adadelta""" +656 8 training_loop """owa""" +656 8 negative_sampler """basic""" +656 8 evaluator """rankbased""" +656 9 dataset """kinships""" +656 9 model """simple""" +656 9 loss """softplus""" +656 9 regularizer """no""" +656 9 optimizer """adadelta""" +656 9 training_loop """owa""" +656 9 negative_sampler """basic""" +656 9 evaluator """rankbased""" +656 10 dataset """kinships""" +656 10 model """simple""" +656 10 loss """softplus""" +656 10 regularizer """no""" +656 10 optimizer """adadelta""" +656 10 training_loop """owa""" +656 10 negative_sampler """basic""" +656 10 evaluator """rankbased""" +656 11 dataset """kinships""" +656 11 model """simple""" +656 11 loss """softplus""" +656 11 regularizer """no""" +656 11 optimizer """adadelta""" +656 11 training_loop """owa""" +656 11 negative_sampler """basic""" +656 11 evaluator """rankbased""" +656 12 dataset """kinships""" +656 12 model """simple""" +656 12 loss """softplus""" +656 12 regularizer """no""" +656 12 optimizer """adadelta""" +656 12 training_loop """owa""" +656 12 negative_sampler """basic""" +656 12 evaluator """rankbased""" +656 13 dataset """kinships""" +656 13 model """simple""" +656 13 loss """softplus""" +656 13 regularizer """no""" +656 13 optimizer """adadelta""" +656 13 training_loop """owa""" +656 13 negative_sampler """basic""" +656 13 evaluator """rankbased""" +656 14 dataset """kinships""" +656 14 model """simple""" +656 14 loss """softplus""" +656 14 regularizer """no""" +656 14 optimizer """adadelta""" +656 14 training_loop """owa""" +656 14 negative_sampler """basic""" +656 14 evaluator """rankbased""" +656 15 dataset """kinships""" +656 15 model """simple""" +656 15 loss """softplus""" +656 15 regularizer """no""" +656 15 optimizer """adadelta""" +656 15 training_loop """owa""" +656 15 negative_sampler """basic""" +656 15 evaluator """rankbased""" +656 16 dataset """kinships""" +656 16 model """simple""" +656 16 loss """softplus""" +656 16 regularizer """no""" +656 16 optimizer """adadelta""" +656 16 training_loop """owa""" +656 16 negative_sampler """basic""" +656 16 evaluator """rankbased""" +656 17 dataset """kinships""" +656 17 model """simple""" +656 17 loss """softplus""" +656 17 regularizer """no""" +656 17 optimizer """adadelta""" +656 17 training_loop """owa""" +656 17 negative_sampler """basic""" +656 17 evaluator """rankbased""" +656 18 dataset """kinships""" +656 18 model """simple""" +656 18 loss """softplus""" +656 18 regularizer """no""" +656 18 optimizer """adadelta""" +656 18 training_loop """owa""" +656 18 negative_sampler """basic""" +656 18 evaluator """rankbased""" +656 19 dataset """kinships""" +656 19 model """simple""" +656 19 loss """softplus""" +656 19 regularizer """no""" +656 19 optimizer """adadelta""" +656 19 training_loop """owa""" +656 19 negative_sampler """basic""" +656 19 evaluator """rankbased""" +656 20 dataset """kinships""" +656 20 model """simple""" +656 20 loss """softplus""" +656 20 regularizer """no""" +656 20 optimizer """adadelta""" +656 20 training_loop """owa""" +656 20 negative_sampler """basic""" +656 20 evaluator """rankbased""" +656 21 dataset """kinships""" +656 21 model """simple""" +656 21 loss """softplus""" +656 21 regularizer """no""" +656 21 optimizer """adadelta""" +656 21 training_loop """owa""" +656 21 negative_sampler """basic""" +656 21 evaluator """rankbased""" +656 22 dataset """kinships""" +656 22 model """simple""" +656 22 loss """softplus""" +656 22 regularizer """no""" +656 22 optimizer """adadelta""" +656 22 training_loop """owa""" +656 22 negative_sampler """basic""" +656 22 evaluator """rankbased""" +656 23 dataset """kinships""" +656 23 model """simple""" +656 23 loss """softplus""" +656 23 regularizer """no""" +656 23 optimizer """adadelta""" +656 23 training_loop """owa""" +656 23 negative_sampler """basic""" +656 23 evaluator """rankbased""" +656 24 dataset """kinships""" +656 24 model """simple""" +656 24 loss """softplus""" +656 24 regularizer """no""" +656 24 optimizer """adadelta""" +656 24 training_loop """owa""" +656 24 negative_sampler """basic""" +656 24 evaluator """rankbased""" +656 25 dataset """kinships""" +656 25 model """simple""" +656 25 loss """softplus""" +656 25 regularizer """no""" +656 25 optimizer """adadelta""" +656 25 training_loop """owa""" +656 25 negative_sampler """basic""" +656 25 evaluator """rankbased""" +656 26 dataset """kinships""" +656 26 model """simple""" +656 26 loss """softplus""" +656 26 regularizer """no""" +656 26 optimizer """adadelta""" +656 26 training_loop """owa""" +656 26 negative_sampler """basic""" +656 26 evaluator """rankbased""" +656 27 dataset """kinships""" +656 27 model """simple""" +656 27 loss """softplus""" +656 27 regularizer """no""" +656 27 optimizer """adadelta""" +656 27 training_loop """owa""" +656 27 negative_sampler """basic""" +656 27 evaluator """rankbased""" +656 28 dataset """kinships""" +656 28 model """simple""" +656 28 loss """softplus""" +656 28 regularizer """no""" +656 28 optimizer """adadelta""" +656 28 training_loop """owa""" +656 28 negative_sampler """basic""" +656 28 evaluator """rankbased""" +656 29 dataset """kinships""" +656 29 model """simple""" +656 29 loss """softplus""" +656 29 regularizer """no""" +656 29 optimizer """adadelta""" +656 29 training_loop """owa""" +656 29 negative_sampler """basic""" +656 29 evaluator """rankbased""" +656 30 dataset """kinships""" +656 30 model """simple""" +656 30 loss """softplus""" +656 30 regularizer """no""" +656 30 optimizer """adadelta""" +656 30 training_loop """owa""" +656 30 negative_sampler """basic""" +656 30 evaluator """rankbased""" +656 31 dataset """kinships""" +656 31 model """simple""" +656 31 loss """softplus""" +656 31 regularizer """no""" +656 31 optimizer """adadelta""" +656 31 training_loop """owa""" +656 31 negative_sampler """basic""" +656 31 evaluator """rankbased""" +656 32 dataset """kinships""" +656 32 model """simple""" +656 32 loss """softplus""" +656 32 regularizer """no""" +656 32 optimizer """adadelta""" +656 32 training_loop """owa""" +656 32 negative_sampler """basic""" +656 32 evaluator """rankbased""" +656 33 dataset """kinships""" +656 33 model """simple""" +656 33 loss """softplus""" +656 33 regularizer """no""" +656 33 optimizer """adadelta""" +656 33 training_loop """owa""" +656 33 negative_sampler """basic""" +656 33 evaluator """rankbased""" +656 34 dataset """kinships""" +656 34 model """simple""" +656 34 loss """softplus""" +656 34 regularizer """no""" +656 34 optimizer """adadelta""" +656 34 training_loop """owa""" +656 34 negative_sampler """basic""" +656 34 evaluator """rankbased""" +656 35 dataset """kinships""" +656 35 model """simple""" +656 35 loss """softplus""" +656 35 regularizer """no""" +656 35 optimizer """adadelta""" +656 35 training_loop """owa""" +656 35 negative_sampler """basic""" +656 35 evaluator """rankbased""" +656 36 dataset """kinships""" +656 36 model """simple""" +656 36 loss """softplus""" +656 36 regularizer """no""" +656 36 optimizer """adadelta""" +656 36 training_loop """owa""" +656 36 negative_sampler """basic""" +656 36 evaluator """rankbased""" +656 37 dataset """kinships""" +656 37 model """simple""" +656 37 loss """softplus""" +656 37 regularizer """no""" +656 37 optimizer """adadelta""" +656 37 training_loop """owa""" +656 37 negative_sampler """basic""" +656 37 evaluator """rankbased""" +656 38 dataset """kinships""" +656 38 model """simple""" +656 38 loss """softplus""" +656 38 regularizer """no""" +656 38 optimizer """adadelta""" +656 38 training_loop """owa""" +656 38 negative_sampler """basic""" +656 38 evaluator """rankbased""" +656 39 dataset """kinships""" +656 39 model """simple""" +656 39 loss """softplus""" +656 39 regularizer """no""" +656 39 optimizer """adadelta""" +656 39 training_loop """owa""" +656 39 negative_sampler """basic""" +656 39 evaluator """rankbased""" +656 40 dataset """kinships""" +656 40 model """simple""" +656 40 loss """softplus""" +656 40 regularizer """no""" +656 40 optimizer """adadelta""" +656 40 training_loop """owa""" +656 40 negative_sampler """basic""" +656 40 evaluator """rankbased""" +656 41 dataset """kinships""" +656 41 model """simple""" +656 41 loss """softplus""" +656 41 regularizer """no""" +656 41 optimizer """adadelta""" +656 41 training_loop """owa""" +656 41 negative_sampler """basic""" +656 41 evaluator """rankbased""" +656 42 dataset """kinships""" +656 42 model """simple""" +656 42 loss """softplus""" +656 42 regularizer """no""" +656 42 optimizer """adadelta""" +656 42 training_loop """owa""" +656 42 negative_sampler """basic""" +656 42 evaluator """rankbased""" +656 43 dataset """kinships""" +656 43 model """simple""" +656 43 loss """softplus""" +656 43 regularizer """no""" +656 43 optimizer """adadelta""" +656 43 training_loop """owa""" +656 43 negative_sampler """basic""" +656 43 evaluator """rankbased""" +656 44 dataset """kinships""" +656 44 model """simple""" +656 44 loss """softplus""" +656 44 regularizer """no""" +656 44 optimizer """adadelta""" +656 44 training_loop """owa""" +656 44 negative_sampler """basic""" +656 44 evaluator """rankbased""" +656 45 dataset """kinships""" +656 45 model """simple""" +656 45 loss """softplus""" +656 45 regularizer """no""" +656 45 optimizer """adadelta""" +656 45 training_loop """owa""" +656 45 negative_sampler """basic""" +656 45 evaluator """rankbased""" +656 46 dataset """kinships""" +656 46 model """simple""" +656 46 loss """softplus""" +656 46 regularizer """no""" +656 46 optimizer """adadelta""" +656 46 training_loop """owa""" +656 46 negative_sampler """basic""" +656 46 evaluator """rankbased""" +656 47 dataset """kinships""" +656 47 model """simple""" +656 47 loss """softplus""" +656 47 regularizer """no""" +656 47 optimizer """adadelta""" +656 47 training_loop """owa""" +656 47 negative_sampler """basic""" +656 47 evaluator """rankbased""" +656 48 dataset """kinships""" +656 48 model """simple""" +656 48 loss """softplus""" +656 48 regularizer """no""" +656 48 optimizer """adadelta""" +656 48 training_loop """owa""" +656 48 negative_sampler """basic""" +656 48 evaluator """rankbased""" +656 49 dataset """kinships""" +656 49 model """simple""" +656 49 loss """softplus""" +656 49 regularizer """no""" +656 49 optimizer """adadelta""" +656 49 training_loop """owa""" +656 49 negative_sampler """basic""" +656 49 evaluator """rankbased""" +656 50 dataset """kinships""" +656 50 model """simple""" +656 50 loss """softplus""" +656 50 regularizer """no""" +656 50 optimizer """adadelta""" +656 50 training_loop """owa""" +656 50 negative_sampler """basic""" +656 50 evaluator """rankbased""" +656 51 dataset """kinships""" +656 51 model """simple""" +656 51 loss """softplus""" +656 51 regularizer """no""" +656 51 optimizer """adadelta""" +656 51 training_loop """owa""" +656 51 negative_sampler """basic""" +656 51 evaluator """rankbased""" +656 52 dataset """kinships""" +656 52 model """simple""" +656 52 loss """softplus""" +656 52 regularizer """no""" +656 52 optimizer """adadelta""" +656 52 training_loop """owa""" +656 52 negative_sampler """basic""" +656 52 evaluator """rankbased""" +656 53 dataset """kinships""" +656 53 model """simple""" +656 53 loss """softplus""" +656 53 regularizer """no""" +656 53 optimizer """adadelta""" +656 53 training_loop """owa""" +656 53 negative_sampler """basic""" +656 53 evaluator """rankbased""" +656 54 dataset """kinships""" +656 54 model """simple""" +656 54 loss """softplus""" +656 54 regularizer """no""" +656 54 optimizer """adadelta""" +656 54 training_loop """owa""" +656 54 negative_sampler """basic""" +656 54 evaluator """rankbased""" +656 55 dataset """kinships""" +656 55 model """simple""" +656 55 loss """softplus""" +656 55 regularizer """no""" +656 55 optimizer """adadelta""" +656 55 training_loop """owa""" +656 55 negative_sampler """basic""" +656 55 evaluator """rankbased""" +656 56 dataset """kinships""" +656 56 model """simple""" +656 56 loss """softplus""" +656 56 regularizer """no""" +656 56 optimizer """adadelta""" +656 56 training_loop """owa""" +656 56 negative_sampler """basic""" +656 56 evaluator """rankbased""" +656 57 dataset """kinships""" +656 57 model """simple""" +656 57 loss """softplus""" +656 57 regularizer """no""" +656 57 optimizer """adadelta""" +656 57 training_loop """owa""" +656 57 negative_sampler """basic""" +656 57 evaluator """rankbased""" +656 58 dataset """kinships""" +656 58 model """simple""" +656 58 loss """softplus""" +656 58 regularizer """no""" +656 58 optimizer """adadelta""" +656 58 training_loop """owa""" +656 58 negative_sampler """basic""" +656 58 evaluator """rankbased""" +656 59 dataset """kinships""" +656 59 model """simple""" +656 59 loss """softplus""" +656 59 regularizer """no""" +656 59 optimizer """adadelta""" +656 59 training_loop """owa""" +656 59 negative_sampler """basic""" +656 59 evaluator """rankbased""" +656 60 dataset """kinships""" +656 60 model """simple""" +656 60 loss """softplus""" +656 60 regularizer """no""" +656 60 optimizer """adadelta""" +656 60 training_loop """owa""" +656 60 negative_sampler """basic""" +656 60 evaluator """rankbased""" +656 61 dataset """kinships""" +656 61 model """simple""" +656 61 loss """softplus""" +656 61 regularizer """no""" +656 61 optimizer """adadelta""" +656 61 training_loop """owa""" +656 61 negative_sampler """basic""" +656 61 evaluator """rankbased""" +656 62 dataset """kinships""" +656 62 model """simple""" +656 62 loss """softplus""" +656 62 regularizer """no""" +656 62 optimizer """adadelta""" +656 62 training_loop """owa""" +656 62 negative_sampler """basic""" +656 62 evaluator """rankbased""" +656 63 dataset """kinships""" +656 63 model """simple""" +656 63 loss """softplus""" +656 63 regularizer """no""" +656 63 optimizer """adadelta""" +656 63 training_loop """owa""" +656 63 negative_sampler """basic""" +656 63 evaluator """rankbased""" +656 64 dataset """kinships""" +656 64 model """simple""" +656 64 loss """softplus""" +656 64 regularizer """no""" +656 64 optimizer """adadelta""" +656 64 training_loop """owa""" +656 64 negative_sampler """basic""" +656 64 evaluator """rankbased""" +656 65 dataset """kinships""" +656 65 model """simple""" +656 65 loss """softplus""" +656 65 regularizer """no""" +656 65 optimizer """adadelta""" +656 65 training_loop """owa""" +656 65 negative_sampler """basic""" +656 65 evaluator """rankbased""" +656 66 dataset """kinships""" +656 66 model """simple""" +656 66 loss """softplus""" +656 66 regularizer """no""" +656 66 optimizer """adadelta""" +656 66 training_loop """owa""" +656 66 negative_sampler """basic""" +656 66 evaluator """rankbased""" +656 67 dataset """kinships""" +656 67 model """simple""" +656 67 loss """softplus""" +656 67 regularizer """no""" +656 67 optimizer """adadelta""" +656 67 training_loop """owa""" +656 67 negative_sampler """basic""" +656 67 evaluator """rankbased""" +656 68 dataset """kinships""" +656 68 model """simple""" +656 68 loss """softplus""" +656 68 regularizer """no""" +656 68 optimizer """adadelta""" +656 68 training_loop """owa""" +656 68 negative_sampler """basic""" +656 68 evaluator """rankbased""" +656 69 dataset """kinships""" +656 69 model """simple""" +656 69 loss """softplus""" +656 69 regularizer """no""" +656 69 optimizer """adadelta""" +656 69 training_loop """owa""" +656 69 negative_sampler """basic""" +656 69 evaluator """rankbased""" +656 70 dataset """kinships""" +656 70 model """simple""" +656 70 loss """softplus""" +656 70 regularizer """no""" +656 70 optimizer """adadelta""" +656 70 training_loop """owa""" +656 70 negative_sampler """basic""" +656 70 evaluator """rankbased""" +656 71 dataset """kinships""" +656 71 model """simple""" +656 71 loss """softplus""" +656 71 regularizer """no""" +656 71 optimizer """adadelta""" +656 71 training_loop """owa""" +656 71 negative_sampler """basic""" +656 71 evaluator """rankbased""" +656 72 dataset """kinships""" +656 72 model """simple""" +656 72 loss """softplus""" +656 72 regularizer """no""" +656 72 optimizer """adadelta""" +656 72 training_loop """owa""" +656 72 negative_sampler """basic""" +656 72 evaluator """rankbased""" +656 73 dataset """kinships""" +656 73 model """simple""" +656 73 loss """softplus""" +656 73 regularizer """no""" +656 73 optimizer """adadelta""" +656 73 training_loop """owa""" +656 73 negative_sampler """basic""" +656 73 evaluator """rankbased""" +656 74 dataset """kinships""" +656 74 model """simple""" +656 74 loss """softplus""" +656 74 regularizer """no""" +656 74 optimizer """adadelta""" +656 74 training_loop """owa""" +656 74 negative_sampler """basic""" +656 74 evaluator """rankbased""" +656 75 dataset """kinships""" +656 75 model """simple""" +656 75 loss """softplus""" +656 75 regularizer """no""" +656 75 optimizer """adadelta""" +656 75 training_loop """owa""" +656 75 negative_sampler """basic""" +656 75 evaluator """rankbased""" +656 76 dataset """kinships""" +656 76 model """simple""" +656 76 loss """softplus""" +656 76 regularizer """no""" +656 76 optimizer """adadelta""" +656 76 training_loop """owa""" +656 76 negative_sampler """basic""" +656 76 evaluator """rankbased""" +656 77 dataset """kinships""" +656 77 model """simple""" +656 77 loss """softplus""" +656 77 regularizer """no""" +656 77 optimizer """adadelta""" +656 77 training_loop """owa""" +656 77 negative_sampler """basic""" +656 77 evaluator """rankbased""" +656 78 dataset """kinships""" +656 78 model """simple""" +656 78 loss """softplus""" +656 78 regularizer """no""" +656 78 optimizer """adadelta""" +656 78 training_loop """owa""" +656 78 negative_sampler """basic""" +656 78 evaluator """rankbased""" +656 79 dataset """kinships""" +656 79 model """simple""" +656 79 loss """softplus""" +656 79 regularizer """no""" +656 79 optimizer """adadelta""" +656 79 training_loop """owa""" +656 79 negative_sampler """basic""" +656 79 evaluator """rankbased""" +656 80 dataset """kinships""" +656 80 model """simple""" +656 80 loss """softplus""" +656 80 regularizer """no""" +656 80 optimizer """adadelta""" +656 80 training_loop """owa""" +656 80 negative_sampler """basic""" +656 80 evaluator """rankbased""" +656 81 dataset """kinships""" +656 81 model """simple""" +656 81 loss """softplus""" +656 81 regularizer """no""" +656 81 optimizer """adadelta""" +656 81 training_loop """owa""" +656 81 negative_sampler """basic""" +656 81 evaluator """rankbased""" +656 82 dataset """kinships""" +656 82 model """simple""" +656 82 loss """softplus""" +656 82 regularizer """no""" +656 82 optimizer """adadelta""" +656 82 training_loop """owa""" +656 82 negative_sampler """basic""" +656 82 evaluator """rankbased""" +656 83 dataset """kinships""" +656 83 model """simple""" +656 83 loss """softplus""" +656 83 regularizer """no""" +656 83 optimizer """adadelta""" +656 83 training_loop """owa""" +656 83 negative_sampler """basic""" +656 83 evaluator """rankbased""" +656 84 dataset """kinships""" +656 84 model """simple""" +656 84 loss """softplus""" +656 84 regularizer """no""" +656 84 optimizer """adadelta""" +656 84 training_loop """owa""" +656 84 negative_sampler """basic""" +656 84 evaluator """rankbased""" +656 85 dataset """kinships""" +656 85 model """simple""" +656 85 loss """softplus""" +656 85 regularizer """no""" +656 85 optimizer """adadelta""" +656 85 training_loop """owa""" +656 85 negative_sampler """basic""" +656 85 evaluator """rankbased""" +656 86 dataset """kinships""" +656 86 model """simple""" +656 86 loss """softplus""" +656 86 regularizer """no""" +656 86 optimizer """adadelta""" +656 86 training_loop """owa""" +656 86 negative_sampler """basic""" +656 86 evaluator """rankbased""" +656 87 dataset """kinships""" +656 87 model """simple""" +656 87 loss """softplus""" +656 87 regularizer """no""" +656 87 optimizer """adadelta""" +656 87 training_loop """owa""" +656 87 negative_sampler """basic""" +656 87 evaluator """rankbased""" +656 88 dataset """kinships""" +656 88 model """simple""" +656 88 loss """softplus""" +656 88 regularizer """no""" +656 88 optimizer """adadelta""" +656 88 training_loop """owa""" +656 88 negative_sampler """basic""" +656 88 evaluator """rankbased""" +656 89 dataset """kinships""" +656 89 model """simple""" +656 89 loss """softplus""" +656 89 regularizer """no""" +656 89 optimizer """adadelta""" +656 89 training_loop """owa""" +656 89 negative_sampler """basic""" +656 89 evaluator """rankbased""" +656 90 dataset """kinships""" +656 90 model """simple""" +656 90 loss """softplus""" +656 90 regularizer """no""" +656 90 optimizer """adadelta""" +656 90 training_loop """owa""" +656 90 negative_sampler """basic""" +656 90 evaluator """rankbased""" +656 91 dataset """kinships""" +656 91 model """simple""" +656 91 loss """softplus""" +656 91 regularizer """no""" +656 91 optimizer """adadelta""" +656 91 training_loop """owa""" +656 91 negative_sampler """basic""" +656 91 evaluator """rankbased""" +656 92 dataset """kinships""" +656 92 model """simple""" +656 92 loss """softplus""" +656 92 regularizer """no""" +656 92 optimizer """adadelta""" +656 92 training_loop """owa""" +656 92 negative_sampler """basic""" +656 92 evaluator """rankbased""" +656 93 dataset """kinships""" +656 93 model """simple""" +656 93 loss """softplus""" +656 93 regularizer """no""" +656 93 optimizer """adadelta""" +656 93 training_loop """owa""" +656 93 negative_sampler """basic""" +656 93 evaluator """rankbased""" +656 94 dataset """kinships""" +656 94 model """simple""" +656 94 loss """softplus""" +656 94 regularizer """no""" +656 94 optimizer """adadelta""" +656 94 training_loop """owa""" +656 94 negative_sampler """basic""" +656 94 evaluator """rankbased""" +656 95 dataset """kinships""" +656 95 model """simple""" +656 95 loss """softplus""" +656 95 regularizer """no""" +656 95 optimizer """adadelta""" +656 95 training_loop """owa""" +656 95 negative_sampler """basic""" +656 95 evaluator """rankbased""" +656 96 dataset """kinships""" +656 96 model """simple""" +656 96 loss """softplus""" +656 96 regularizer """no""" +656 96 optimizer """adadelta""" +656 96 training_loop """owa""" +656 96 negative_sampler """basic""" +656 96 evaluator """rankbased""" +656 97 dataset """kinships""" +656 97 model """simple""" +656 97 loss """softplus""" +656 97 regularizer """no""" +656 97 optimizer """adadelta""" +656 97 training_loop """owa""" +656 97 negative_sampler """basic""" +656 97 evaluator """rankbased""" +656 98 dataset """kinships""" +656 98 model """simple""" +656 98 loss """softplus""" +656 98 regularizer """no""" +656 98 optimizer """adadelta""" +656 98 training_loop """owa""" +656 98 negative_sampler """basic""" +656 98 evaluator """rankbased""" +656 99 dataset """kinships""" +656 99 model """simple""" +656 99 loss """softplus""" +656 99 regularizer """no""" +656 99 optimizer """adadelta""" +656 99 training_loop """owa""" +656 99 negative_sampler """basic""" +656 99 evaluator """rankbased""" +656 100 dataset """kinships""" +656 100 model """simple""" +656 100 loss """softplus""" +656 100 regularizer """no""" +656 100 optimizer """adadelta""" +656 100 training_loop """owa""" +656 100 negative_sampler """basic""" +656 100 evaluator """rankbased""" +657 1 model.embedding_dim 1.0 +657 1 loss.margin 0.7308862286620832 +657 1 negative_sampler.num_negs_per_pos 16.0 +657 1 training.batch_size 0.0 +657 2 model.embedding_dim 0.0 +657 2 loss.margin 8.835541307871907 +657 2 negative_sampler.num_negs_per_pos 55.0 +657 2 training.batch_size 1.0 +657 3 model.embedding_dim 1.0 +657 3 loss.margin 2.377970021303996 +657 3 negative_sampler.num_negs_per_pos 64.0 +657 3 training.batch_size 0.0 +657 4 model.embedding_dim 1.0 +657 4 loss.margin 0.8714229343942757 +657 4 negative_sampler.num_negs_per_pos 56.0 +657 4 training.batch_size 2.0 +657 5 model.embedding_dim 0.0 +657 5 loss.margin 4.835986725815169 +657 5 negative_sampler.num_negs_per_pos 96.0 +657 5 training.batch_size 2.0 +657 6 model.embedding_dim 2.0 +657 6 loss.margin 2.3425778674230147 +657 6 negative_sampler.num_negs_per_pos 14.0 +657 6 training.batch_size 2.0 +657 7 model.embedding_dim 1.0 +657 7 loss.margin 9.383576306499673 +657 7 negative_sampler.num_negs_per_pos 63.0 +657 7 training.batch_size 1.0 +657 8 model.embedding_dim 1.0 +657 8 loss.margin 1.2552778754163656 +657 8 negative_sampler.num_negs_per_pos 69.0 +657 8 training.batch_size 1.0 +657 9 model.embedding_dim 0.0 +657 9 loss.margin 9.870466075572182 +657 9 negative_sampler.num_negs_per_pos 22.0 +657 9 training.batch_size 1.0 +657 10 model.embedding_dim 2.0 +657 10 loss.margin 2.550601701218556 +657 10 negative_sampler.num_negs_per_pos 16.0 +657 10 training.batch_size 0.0 +657 11 model.embedding_dim 2.0 +657 11 loss.margin 0.8176018569339664 +657 11 negative_sampler.num_negs_per_pos 44.0 +657 11 training.batch_size 2.0 +657 12 model.embedding_dim 2.0 +657 12 loss.margin 4.187274795880196 +657 12 negative_sampler.num_negs_per_pos 30.0 +657 12 training.batch_size 1.0 +657 13 model.embedding_dim 1.0 +657 13 loss.margin 5.868820876401053 +657 13 negative_sampler.num_negs_per_pos 58.0 +657 13 training.batch_size 2.0 +657 14 model.embedding_dim 2.0 +657 14 loss.margin 1.4223372541404107 +657 14 negative_sampler.num_negs_per_pos 24.0 +657 14 training.batch_size 0.0 +657 15 model.embedding_dim 2.0 +657 15 loss.margin 7.334051602493813 +657 15 negative_sampler.num_negs_per_pos 59.0 +657 15 training.batch_size 2.0 +657 16 model.embedding_dim 0.0 +657 16 loss.margin 3.2565184344246094 +657 16 negative_sampler.num_negs_per_pos 81.0 +657 16 training.batch_size 1.0 +657 17 model.embedding_dim 2.0 +657 17 loss.margin 4.915306401633512 +657 17 negative_sampler.num_negs_per_pos 95.0 +657 17 training.batch_size 1.0 +657 18 model.embedding_dim 1.0 +657 18 loss.margin 2.6444940384647446 +657 18 negative_sampler.num_negs_per_pos 83.0 +657 18 training.batch_size 1.0 +657 19 model.embedding_dim 0.0 +657 19 loss.margin 4.740143590871747 +657 19 negative_sampler.num_negs_per_pos 77.0 +657 19 training.batch_size 2.0 +657 20 model.embedding_dim 2.0 +657 20 loss.margin 3.107381622267387 +657 20 negative_sampler.num_negs_per_pos 27.0 +657 20 training.batch_size 0.0 +657 21 model.embedding_dim 0.0 +657 21 loss.margin 1.5388889195294304 +657 21 negative_sampler.num_negs_per_pos 56.0 +657 21 training.batch_size 1.0 +657 22 model.embedding_dim 2.0 +657 22 loss.margin 8.94427495359135 +657 22 negative_sampler.num_negs_per_pos 44.0 +657 22 training.batch_size 1.0 +657 23 model.embedding_dim 1.0 +657 23 loss.margin 4.745435903798134 +657 23 negative_sampler.num_negs_per_pos 76.0 +657 23 training.batch_size 1.0 +657 24 model.embedding_dim 1.0 +657 24 loss.margin 8.613303515166292 +657 24 negative_sampler.num_negs_per_pos 79.0 +657 24 training.batch_size 2.0 +657 25 model.embedding_dim 1.0 +657 25 loss.margin 1.9838943888107423 +657 25 negative_sampler.num_negs_per_pos 36.0 +657 25 training.batch_size 2.0 +657 26 model.embedding_dim 1.0 +657 26 loss.margin 8.97030878916693 +657 26 negative_sampler.num_negs_per_pos 6.0 +657 26 training.batch_size 1.0 +657 27 model.embedding_dim 2.0 +657 27 loss.margin 4.6106181899090215 +657 27 negative_sampler.num_negs_per_pos 39.0 +657 27 training.batch_size 0.0 +657 28 model.embedding_dim 2.0 +657 28 loss.margin 6.461835866708343 +657 28 negative_sampler.num_negs_per_pos 71.0 +657 28 training.batch_size 1.0 +657 29 model.embedding_dim 2.0 +657 29 loss.margin 0.8867738433548868 +657 29 negative_sampler.num_negs_per_pos 4.0 +657 29 training.batch_size 1.0 +657 30 model.embedding_dim 1.0 +657 30 loss.margin 2.8036176278934106 +657 30 negative_sampler.num_negs_per_pos 69.0 +657 30 training.batch_size 1.0 +657 31 model.embedding_dim 1.0 +657 31 loss.margin 6.111572065780848 +657 31 negative_sampler.num_negs_per_pos 9.0 +657 31 training.batch_size 2.0 +657 32 model.embedding_dim 2.0 +657 32 loss.margin 3.065818114491612 +657 32 negative_sampler.num_negs_per_pos 90.0 +657 32 training.batch_size 0.0 +657 33 model.embedding_dim 1.0 +657 33 loss.margin 7.53002031708074 +657 33 negative_sampler.num_negs_per_pos 62.0 +657 33 training.batch_size 0.0 +657 34 model.embedding_dim 1.0 +657 34 loss.margin 4.757516324278334 +657 34 negative_sampler.num_negs_per_pos 60.0 +657 34 training.batch_size 0.0 +657 35 model.embedding_dim 0.0 +657 35 loss.margin 7.456212411071078 +657 35 negative_sampler.num_negs_per_pos 27.0 +657 35 training.batch_size 2.0 +657 36 model.embedding_dim 2.0 +657 36 loss.margin 5.454949348724389 +657 36 negative_sampler.num_negs_per_pos 76.0 +657 36 training.batch_size 0.0 +657 37 model.embedding_dim 1.0 +657 37 loss.margin 0.755218020740877 +657 37 negative_sampler.num_negs_per_pos 98.0 +657 37 training.batch_size 1.0 +657 38 model.embedding_dim 1.0 +657 38 loss.margin 3.6258160306408795 +657 38 negative_sampler.num_negs_per_pos 40.0 +657 38 training.batch_size 1.0 +657 39 model.embedding_dim 1.0 +657 39 loss.margin 7.9233291145683316 +657 39 negative_sampler.num_negs_per_pos 96.0 +657 39 training.batch_size 0.0 +657 40 model.embedding_dim 1.0 +657 40 loss.margin 1.552233773493198 +657 40 negative_sampler.num_negs_per_pos 44.0 +657 40 training.batch_size 2.0 +657 41 model.embedding_dim 1.0 +657 41 loss.margin 7.940118764756812 +657 41 negative_sampler.num_negs_per_pos 85.0 +657 41 training.batch_size 1.0 +657 42 model.embedding_dim 1.0 +657 42 loss.margin 6.366188616348157 +657 42 negative_sampler.num_negs_per_pos 64.0 +657 42 training.batch_size 0.0 +657 43 model.embedding_dim 2.0 +657 43 loss.margin 1.4715160167462769 +657 43 negative_sampler.num_negs_per_pos 60.0 +657 43 training.batch_size 0.0 +657 44 model.embedding_dim 0.0 +657 44 loss.margin 7.496060051732978 +657 44 negative_sampler.num_negs_per_pos 83.0 +657 44 training.batch_size 2.0 +657 45 model.embedding_dim 2.0 +657 45 loss.margin 0.5119868889791166 +657 45 negative_sampler.num_negs_per_pos 68.0 +657 45 training.batch_size 2.0 +657 46 model.embedding_dim 2.0 +657 46 loss.margin 9.069523352808577 +657 46 negative_sampler.num_negs_per_pos 31.0 +657 46 training.batch_size 1.0 +657 47 model.embedding_dim 0.0 +657 47 loss.margin 4.237214603120861 +657 47 negative_sampler.num_negs_per_pos 48.0 +657 47 training.batch_size 2.0 +657 48 model.embedding_dim 1.0 +657 48 loss.margin 5.859238470785637 +657 48 negative_sampler.num_negs_per_pos 28.0 +657 48 training.batch_size 1.0 +657 49 model.embedding_dim 2.0 +657 49 loss.margin 5.280011020534133 +657 49 negative_sampler.num_negs_per_pos 70.0 +657 49 training.batch_size 0.0 +657 50 model.embedding_dim 2.0 +657 50 loss.margin 0.716876727350819 +657 50 negative_sampler.num_negs_per_pos 60.0 +657 50 training.batch_size 1.0 +657 51 model.embedding_dim 1.0 +657 51 loss.margin 0.8134976544096206 +657 51 negative_sampler.num_negs_per_pos 19.0 +657 51 training.batch_size 2.0 +657 52 model.embedding_dim 0.0 +657 52 loss.margin 4.560379424536505 +657 52 negative_sampler.num_negs_per_pos 21.0 +657 52 training.batch_size 1.0 +657 53 model.embedding_dim 0.0 +657 53 loss.margin 1.1601067832196599 +657 53 negative_sampler.num_negs_per_pos 3.0 +657 53 training.batch_size 2.0 +657 54 model.embedding_dim 2.0 +657 54 loss.margin 9.714809990367424 +657 54 negative_sampler.num_negs_per_pos 63.0 +657 54 training.batch_size 2.0 +657 55 model.embedding_dim 0.0 +657 55 loss.margin 6.771883820895699 +657 55 negative_sampler.num_negs_per_pos 55.0 +657 55 training.batch_size 0.0 +657 56 model.embedding_dim 2.0 +657 56 loss.margin 7.052846142466522 +657 56 negative_sampler.num_negs_per_pos 18.0 +657 56 training.batch_size 0.0 +657 57 model.embedding_dim 2.0 +657 57 loss.margin 1.7699237577065305 +657 57 negative_sampler.num_negs_per_pos 32.0 +657 57 training.batch_size 0.0 +657 58 model.embedding_dim 0.0 +657 58 loss.margin 4.651653930749665 +657 58 negative_sampler.num_negs_per_pos 64.0 +657 58 training.batch_size 0.0 +657 59 model.embedding_dim 0.0 +657 59 loss.margin 5.358388456283472 +657 59 negative_sampler.num_negs_per_pos 97.0 +657 59 training.batch_size 1.0 +657 60 model.embedding_dim 0.0 +657 60 loss.margin 4.3276320790933305 +657 60 negative_sampler.num_negs_per_pos 23.0 +657 60 training.batch_size 1.0 +657 61 model.embedding_dim 2.0 +657 61 loss.margin 0.8529722224565062 +657 61 negative_sampler.num_negs_per_pos 15.0 +657 61 training.batch_size 0.0 +657 62 model.embedding_dim 0.0 +657 62 loss.margin 5.462954633197529 +657 62 negative_sampler.num_negs_per_pos 44.0 +657 62 training.batch_size 1.0 +657 63 model.embedding_dim 0.0 +657 63 loss.margin 1.0229694255697064 +657 63 negative_sampler.num_negs_per_pos 28.0 +657 63 training.batch_size 0.0 +657 64 model.embedding_dim 0.0 +657 64 loss.margin 5.4263419012379 +657 64 negative_sampler.num_negs_per_pos 35.0 +657 64 training.batch_size 0.0 +657 65 model.embedding_dim 0.0 +657 65 loss.margin 9.459565223531708 +657 65 negative_sampler.num_negs_per_pos 27.0 +657 65 training.batch_size 1.0 +657 66 model.embedding_dim 0.0 +657 66 loss.margin 4.322457246970775 +657 66 negative_sampler.num_negs_per_pos 99.0 +657 66 training.batch_size 1.0 +657 67 model.embedding_dim 0.0 +657 67 loss.margin 1.7868023432297573 +657 67 negative_sampler.num_negs_per_pos 73.0 +657 67 training.batch_size 2.0 +657 68 model.embedding_dim 0.0 +657 68 loss.margin 6.291962858245948 +657 68 negative_sampler.num_negs_per_pos 49.0 +657 68 training.batch_size 0.0 +657 69 model.embedding_dim 0.0 +657 69 loss.margin 1.5499846411754539 +657 69 negative_sampler.num_negs_per_pos 23.0 +657 69 training.batch_size 1.0 +657 70 model.embedding_dim 1.0 +657 70 loss.margin 6.856360997765537 +657 70 negative_sampler.num_negs_per_pos 63.0 +657 70 training.batch_size 0.0 +657 71 model.embedding_dim 0.0 +657 71 loss.margin 7.6285531316952735 +657 71 negative_sampler.num_negs_per_pos 3.0 +657 71 training.batch_size 0.0 +657 72 model.embedding_dim 1.0 +657 72 loss.margin 6.514449666791576 +657 72 negative_sampler.num_negs_per_pos 23.0 +657 72 training.batch_size 2.0 +657 73 model.embedding_dim 1.0 +657 73 loss.margin 5.795327696634162 +657 73 negative_sampler.num_negs_per_pos 46.0 +657 73 training.batch_size 1.0 +657 74 model.embedding_dim 1.0 +657 74 loss.margin 3.315261554804425 +657 74 negative_sampler.num_negs_per_pos 88.0 +657 74 training.batch_size 0.0 +657 75 model.embedding_dim 0.0 +657 75 loss.margin 8.066614844653675 +657 75 negative_sampler.num_negs_per_pos 57.0 +657 75 training.batch_size 0.0 +657 76 model.embedding_dim 1.0 +657 76 loss.margin 3.959480912983009 +657 76 negative_sampler.num_negs_per_pos 21.0 +657 76 training.batch_size 2.0 +657 77 model.embedding_dim 2.0 +657 77 loss.margin 2.393677442179773 +657 77 negative_sampler.num_negs_per_pos 88.0 +657 77 training.batch_size 1.0 +657 78 model.embedding_dim 2.0 +657 78 loss.margin 4.9986706714813955 +657 78 negative_sampler.num_negs_per_pos 39.0 +657 78 training.batch_size 2.0 +657 79 model.embedding_dim 1.0 +657 79 loss.margin 7.967973699194641 +657 79 negative_sampler.num_negs_per_pos 89.0 +657 79 training.batch_size 2.0 +657 80 model.embedding_dim 2.0 +657 80 loss.margin 6.616536262851894 +657 80 negative_sampler.num_negs_per_pos 48.0 +657 80 training.batch_size 0.0 +657 81 model.embedding_dim 0.0 +657 81 loss.margin 5.466638418697866 +657 81 negative_sampler.num_negs_per_pos 6.0 +657 81 training.batch_size 1.0 +657 82 model.embedding_dim 0.0 +657 82 loss.margin 2.4272309710860247 +657 82 negative_sampler.num_negs_per_pos 77.0 +657 82 training.batch_size 2.0 +657 83 model.embedding_dim 2.0 +657 83 loss.margin 8.75674701723829 +657 83 negative_sampler.num_negs_per_pos 92.0 +657 83 training.batch_size 1.0 +657 84 model.embedding_dim 2.0 +657 84 loss.margin 9.037162556879867 +657 84 negative_sampler.num_negs_per_pos 31.0 +657 84 training.batch_size 0.0 +657 85 model.embedding_dim 0.0 +657 85 loss.margin 1.4962231300660067 +657 85 negative_sampler.num_negs_per_pos 75.0 +657 85 training.batch_size 2.0 +657 86 model.embedding_dim 2.0 +657 86 loss.margin 6.776705221641966 +657 86 negative_sampler.num_negs_per_pos 14.0 +657 86 training.batch_size 1.0 +657 87 model.embedding_dim 2.0 +657 87 loss.margin 4.85417040897307 +657 87 negative_sampler.num_negs_per_pos 29.0 +657 87 training.batch_size 2.0 +657 88 model.embedding_dim 2.0 +657 88 loss.margin 1.0922535447204595 +657 88 negative_sampler.num_negs_per_pos 90.0 +657 88 training.batch_size 1.0 +657 89 model.embedding_dim 0.0 +657 89 loss.margin 2.8330161040025446 +657 89 negative_sampler.num_negs_per_pos 54.0 +657 89 training.batch_size 0.0 +657 90 model.embedding_dim 0.0 +657 90 loss.margin 6.165707905583588 +657 90 negative_sampler.num_negs_per_pos 84.0 +657 90 training.batch_size 1.0 +657 91 model.embedding_dim 2.0 +657 91 loss.margin 4.234581520665916 +657 91 negative_sampler.num_negs_per_pos 59.0 +657 91 training.batch_size 1.0 +657 92 model.embedding_dim 0.0 +657 92 loss.margin 8.533791639206525 +657 92 negative_sampler.num_negs_per_pos 90.0 +657 92 training.batch_size 2.0 +657 93 model.embedding_dim 1.0 +657 93 loss.margin 7.776639570981037 +657 93 negative_sampler.num_negs_per_pos 33.0 +657 93 training.batch_size 0.0 +657 94 model.embedding_dim 1.0 +657 94 loss.margin 8.664507705856293 +657 94 negative_sampler.num_negs_per_pos 34.0 +657 94 training.batch_size 0.0 +657 95 model.embedding_dim 2.0 +657 95 loss.margin 9.529652734131364 +657 95 negative_sampler.num_negs_per_pos 73.0 +657 95 training.batch_size 1.0 +657 96 model.embedding_dim 0.0 +657 96 loss.margin 0.5265226057674339 +657 96 negative_sampler.num_negs_per_pos 77.0 +657 96 training.batch_size 2.0 +657 97 model.embedding_dim 1.0 +657 97 loss.margin 2.2049338013029702 +657 97 negative_sampler.num_negs_per_pos 23.0 +657 97 training.batch_size 0.0 +657 98 model.embedding_dim 2.0 +657 98 loss.margin 0.8720203960589075 +657 98 negative_sampler.num_negs_per_pos 32.0 +657 98 training.batch_size 1.0 +657 99 model.embedding_dim 2.0 +657 99 loss.margin 4.0393166670231935 +657 99 negative_sampler.num_negs_per_pos 75.0 +657 99 training.batch_size 0.0 +657 100 model.embedding_dim 2.0 +657 100 loss.margin 0.9742264358772874 +657 100 negative_sampler.num_negs_per_pos 32.0 +657 100 training.batch_size 2.0 +657 1 dataset """kinships""" +657 1 model """simple""" +657 1 loss """marginranking""" +657 1 regularizer """no""" +657 1 optimizer """adadelta""" +657 1 training_loop """owa""" +657 1 negative_sampler """basic""" +657 1 evaluator """rankbased""" +657 2 dataset """kinships""" +657 2 model """simple""" +657 2 loss """marginranking""" +657 2 regularizer """no""" +657 2 optimizer """adadelta""" +657 2 training_loop """owa""" +657 2 negative_sampler """basic""" +657 2 evaluator """rankbased""" +657 3 dataset """kinships""" +657 3 model """simple""" +657 3 loss """marginranking""" +657 3 regularizer """no""" +657 3 optimizer """adadelta""" +657 3 training_loop """owa""" +657 3 negative_sampler """basic""" +657 3 evaluator """rankbased""" +657 4 dataset """kinships""" +657 4 model """simple""" +657 4 loss """marginranking""" +657 4 regularizer """no""" +657 4 optimizer """adadelta""" +657 4 training_loop """owa""" +657 4 negative_sampler """basic""" +657 4 evaluator """rankbased""" +657 5 dataset """kinships""" +657 5 model """simple""" +657 5 loss """marginranking""" +657 5 regularizer """no""" +657 5 optimizer """adadelta""" +657 5 training_loop """owa""" +657 5 negative_sampler """basic""" +657 5 evaluator """rankbased""" +657 6 dataset """kinships""" +657 6 model """simple""" +657 6 loss """marginranking""" +657 6 regularizer """no""" +657 6 optimizer """adadelta""" +657 6 training_loop """owa""" +657 6 negative_sampler """basic""" +657 6 evaluator """rankbased""" +657 7 dataset """kinships""" +657 7 model """simple""" +657 7 loss """marginranking""" +657 7 regularizer """no""" +657 7 optimizer """adadelta""" +657 7 training_loop """owa""" +657 7 negative_sampler """basic""" +657 7 evaluator """rankbased""" +657 8 dataset """kinships""" +657 8 model """simple""" +657 8 loss """marginranking""" +657 8 regularizer """no""" +657 8 optimizer """adadelta""" +657 8 training_loop """owa""" +657 8 negative_sampler """basic""" +657 8 evaluator """rankbased""" +657 9 dataset """kinships""" +657 9 model """simple""" +657 9 loss """marginranking""" +657 9 regularizer """no""" +657 9 optimizer """adadelta""" +657 9 training_loop """owa""" +657 9 negative_sampler """basic""" +657 9 evaluator """rankbased""" +657 10 dataset """kinships""" +657 10 model """simple""" +657 10 loss """marginranking""" +657 10 regularizer """no""" +657 10 optimizer """adadelta""" +657 10 training_loop """owa""" +657 10 negative_sampler """basic""" +657 10 evaluator """rankbased""" +657 11 dataset """kinships""" +657 11 model """simple""" +657 11 loss """marginranking""" +657 11 regularizer """no""" +657 11 optimizer """adadelta""" +657 11 training_loop """owa""" +657 11 negative_sampler """basic""" +657 11 evaluator """rankbased""" +657 12 dataset """kinships""" +657 12 model """simple""" +657 12 loss """marginranking""" +657 12 regularizer """no""" +657 12 optimizer """adadelta""" +657 12 training_loop """owa""" +657 12 negative_sampler """basic""" +657 12 evaluator """rankbased""" +657 13 dataset """kinships""" +657 13 model """simple""" +657 13 loss """marginranking""" +657 13 regularizer """no""" +657 13 optimizer """adadelta""" +657 13 training_loop """owa""" +657 13 negative_sampler """basic""" +657 13 evaluator """rankbased""" +657 14 dataset """kinships""" +657 14 model """simple""" +657 14 loss """marginranking""" +657 14 regularizer """no""" +657 14 optimizer """adadelta""" +657 14 training_loop """owa""" +657 14 negative_sampler """basic""" +657 14 evaluator """rankbased""" +657 15 dataset """kinships""" +657 15 model """simple""" +657 15 loss """marginranking""" +657 15 regularizer """no""" +657 15 optimizer """adadelta""" +657 15 training_loop """owa""" +657 15 negative_sampler """basic""" +657 15 evaluator """rankbased""" +657 16 dataset """kinships""" +657 16 model """simple""" +657 16 loss """marginranking""" +657 16 regularizer """no""" +657 16 optimizer """adadelta""" +657 16 training_loop """owa""" +657 16 negative_sampler """basic""" +657 16 evaluator """rankbased""" +657 17 dataset """kinships""" +657 17 model """simple""" +657 17 loss """marginranking""" +657 17 regularizer """no""" +657 17 optimizer """adadelta""" +657 17 training_loop """owa""" +657 17 negative_sampler """basic""" +657 17 evaluator """rankbased""" +657 18 dataset """kinships""" +657 18 model """simple""" +657 18 loss """marginranking""" +657 18 regularizer """no""" +657 18 optimizer """adadelta""" +657 18 training_loop """owa""" +657 18 negative_sampler """basic""" +657 18 evaluator """rankbased""" +657 19 dataset """kinships""" +657 19 model """simple""" +657 19 loss """marginranking""" +657 19 regularizer """no""" +657 19 optimizer """adadelta""" +657 19 training_loop """owa""" +657 19 negative_sampler """basic""" +657 19 evaluator """rankbased""" +657 20 dataset """kinships""" +657 20 model """simple""" +657 20 loss """marginranking""" +657 20 regularizer """no""" +657 20 optimizer """adadelta""" +657 20 training_loop """owa""" +657 20 negative_sampler """basic""" +657 20 evaluator """rankbased""" +657 21 dataset """kinships""" +657 21 model """simple""" +657 21 loss """marginranking""" +657 21 regularizer """no""" +657 21 optimizer """adadelta""" +657 21 training_loop """owa""" +657 21 negative_sampler """basic""" +657 21 evaluator """rankbased""" +657 22 dataset """kinships""" +657 22 model """simple""" +657 22 loss """marginranking""" +657 22 regularizer """no""" +657 22 optimizer """adadelta""" +657 22 training_loop """owa""" +657 22 negative_sampler """basic""" +657 22 evaluator """rankbased""" +657 23 dataset """kinships""" +657 23 model """simple""" +657 23 loss """marginranking""" +657 23 regularizer """no""" +657 23 optimizer """adadelta""" +657 23 training_loop """owa""" +657 23 negative_sampler """basic""" +657 23 evaluator """rankbased""" +657 24 dataset """kinships""" +657 24 model """simple""" +657 24 loss """marginranking""" +657 24 regularizer """no""" +657 24 optimizer """adadelta""" +657 24 training_loop """owa""" +657 24 negative_sampler """basic""" +657 24 evaluator """rankbased""" +657 25 dataset """kinships""" +657 25 model """simple""" +657 25 loss """marginranking""" +657 25 regularizer """no""" +657 25 optimizer """adadelta""" +657 25 training_loop """owa""" +657 25 negative_sampler """basic""" +657 25 evaluator """rankbased""" +657 26 dataset """kinships""" +657 26 model """simple""" +657 26 loss """marginranking""" +657 26 regularizer """no""" +657 26 optimizer """adadelta""" +657 26 training_loop """owa""" +657 26 negative_sampler """basic""" +657 26 evaluator """rankbased""" +657 27 dataset """kinships""" +657 27 model """simple""" +657 27 loss """marginranking""" +657 27 regularizer """no""" +657 27 optimizer """adadelta""" +657 27 training_loop """owa""" +657 27 negative_sampler """basic""" +657 27 evaluator """rankbased""" +657 28 dataset """kinships""" +657 28 model """simple""" +657 28 loss """marginranking""" +657 28 regularizer """no""" +657 28 optimizer """adadelta""" +657 28 training_loop """owa""" +657 28 negative_sampler """basic""" +657 28 evaluator """rankbased""" +657 29 dataset """kinships""" +657 29 model """simple""" +657 29 loss """marginranking""" +657 29 regularizer """no""" +657 29 optimizer """adadelta""" +657 29 training_loop """owa""" +657 29 negative_sampler """basic""" +657 29 evaluator """rankbased""" +657 30 dataset """kinships""" +657 30 model """simple""" +657 30 loss """marginranking""" +657 30 regularizer """no""" +657 30 optimizer """adadelta""" +657 30 training_loop """owa""" +657 30 negative_sampler """basic""" +657 30 evaluator """rankbased""" +657 31 dataset """kinships""" +657 31 model """simple""" +657 31 loss """marginranking""" +657 31 regularizer """no""" +657 31 optimizer """adadelta""" +657 31 training_loop """owa""" +657 31 negative_sampler """basic""" +657 31 evaluator """rankbased""" +657 32 dataset """kinships""" +657 32 model """simple""" +657 32 loss """marginranking""" +657 32 regularizer """no""" +657 32 optimizer """adadelta""" +657 32 training_loop """owa""" +657 32 negative_sampler """basic""" +657 32 evaluator """rankbased""" +657 33 dataset """kinships""" +657 33 model """simple""" +657 33 loss """marginranking""" +657 33 regularizer """no""" +657 33 optimizer """adadelta""" +657 33 training_loop """owa""" +657 33 negative_sampler """basic""" +657 33 evaluator """rankbased""" +657 34 dataset """kinships""" +657 34 model """simple""" +657 34 loss """marginranking""" +657 34 regularizer """no""" +657 34 optimizer """adadelta""" +657 34 training_loop """owa""" +657 34 negative_sampler """basic""" +657 34 evaluator """rankbased""" +657 35 dataset """kinships""" +657 35 model """simple""" +657 35 loss """marginranking""" +657 35 regularizer """no""" +657 35 optimizer """adadelta""" +657 35 training_loop """owa""" +657 35 negative_sampler """basic""" +657 35 evaluator """rankbased""" +657 36 dataset """kinships""" +657 36 model """simple""" +657 36 loss """marginranking""" +657 36 regularizer """no""" +657 36 optimizer """adadelta""" +657 36 training_loop """owa""" +657 36 negative_sampler """basic""" +657 36 evaluator """rankbased""" +657 37 dataset """kinships""" +657 37 model """simple""" +657 37 loss """marginranking""" +657 37 regularizer """no""" +657 37 optimizer """adadelta""" +657 37 training_loop """owa""" +657 37 negative_sampler """basic""" +657 37 evaluator """rankbased""" +657 38 dataset """kinships""" +657 38 model """simple""" +657 38 loss """marginranking""" +657 38 regularizer """no""" +657 38 optimizer """adadelta""" +657 38 training_loop """owa""" +657 38 negative_sampler """basic""" +657 38 evaluator """rankbased""" +657 39 dataset """kinships""" +657 39 model """simple""" +657 39 loss """marginranking""" +657 39 regularizer """no""" +657 39 optimizer """adadelta""" +657 39 training_loop """owa""" +657 39 negative_sampler """basic""" +657 39 evaluator """rankbased""" +657 40 dataset """kinships""" +657 40 model """simple""" +657 40 loss """marginranking""" +657 40 regularizer """no""" +657 40 optimizer """adadelta""" +657 40 training_loop """owa""" +657 40 negative_sampler """basic""" +657 40 evaluator """rankbased""" +657 41 dataset """kinships""" +657 41 model """simple""" +657 41 loss """marginranking""" +657 41 regularizer """no""" +657 41 optimizer """adadelta""" +657 41 training_loop """owa""" +657 41 negative_sampler """basic""" +657 41 evaluator """rankbased""" +657 42 dataset """kinships""" +657 42 model """simple""" +657 42 loss """marginranking""" +657 42 regularizer """no""" +657 42 optimizer """adadelta""" +657 42 training_loop """owa""" +657 42 negative_sampler """basic""" +657 42 evaluator """rankbased""" +657 43 dataset """kinships""" +657 43 model """simple""" +657 43 loss """marginranking""" +657 43 regularizer """no""" +657 43 optimizer """adadelta""" +657 43 training_loop """owa""" +657 43 negative_sampler """basic""" +657 43 evaluator """rankbased""" +657 44 dataset """kinships""" +657 44 model """simple""" +657 44 loss """marginranking""" +657 44 regularizer """no""" +657 44 optimizer """adadelta""" +657 44 training_loop """owa""" +657 44 negative_sampler """basic""" +657 44 evaluator """rankbased""" +657 45 dataset """kinships""" +657 45 model """simple""" +657 45 loss """marginranking""" +657 45 regularizer """no""" +657 45 optimizer """adadelta""" +657 45 training_loop """owa""" +657 45 negative_sampler """basic""" +657 45 evaluator """rankbased""" +657 46 dataset """kinships""" +657 46 model """simple""" +657 46 loss """marginranking""" +657 46 regularizer """no""" +657 46 optimizer """adadelta""" +657 46 training_loop """owa""" +657 46 negative_sampler """basic""" +657 46 evaluator """rankbased""" +657 47 dataset """kinships""" +657 47 model """simple""" +657 47 loss """marginranking""" +657 47 regularizer """no""" +657 47 optimizer """adadelta""" +657 47 training_loop """owa""" +657 47 negative_sampler """basic""" +657 47 evaluator """rankbased""" +657 48 dataset """kinships""" +657 48 model """simple""" +657 48 loss """marginranking""" +657 48 regularizer """no""" +657 48 optimizer """adadelta""" +657 48 training_loop """owa""" +657 48 negative_sampler """basic""" +657 48 evaluator """rankbased""" +657 49 dataset """kinships""" +657 49 model """simple""" +657 49 loss """marginranking""" +657 49 regularizer """no""" +657 49 optimizer """adadelta""" +657 49 training_loop """owa""" +657 49 negative_sampler """basic""" +657 49 evaluator """rankbased""" +657 50 dataset """kinships""" +657 50 model """simple""" +657 50 loss """marginranking""" +657 50 regularizer """no""" +657 50 optimizer """adadelta""" +657 50 training_loop """owa""" +657 50 negative_sampler """basic""" +657 50 evaluator """rankbased""" +657 51 dataset """kinships""" +657 51 model """simple""" +657 51 loss """marginranking""" +657 51 regularizer """no""" +657 51 optimizer """adadelta""" +657 51 training_loop """owa""" +657 51 negative_sampler """basic""" +657 51 evaluator """rankbased""" +657 52 dataset """kinships""" +657 52 model """simple""" +657 52 loss """marginranking""" +657 52 regularizer """no""" +657 52 optimizer """adadelta""" +657 52 training_loop """owa""" +657 52 negative_sampler """basic""" +657 52 evaluator """rankbased""" +657 53 dataset """kinships""" +657 53 model """simple""" +657 53 loss """marginranking""" +657 53 regularizer """no""" +657 53 optimizer """adadelta""" +657 53 training_loop """owa""" +657 53 negative_sampler """basic""" +657 53 evaluator """rankbased""" +657 54 dataset """kinships""" +657 54 model """simple""" +657 54 loss """marginranking""" +657 54 regularizer """no""" +657 54 optimizer """adadelta""" +657 54 training_loop """owa""" +657 54 negative_sampler """basic""" +657 54 evaluator """rankbased""" +657 55 dataset """kinships""" +657 55 model """simple""" +657 55 loss """marginranking""" +657 55 regularizer """no""" +657 55 optimizer """adadelta""" +657 55 training_loop """owa""" +657 55 negative_sampler """basic""" +657 55 evaluator """rankbased""" +657 56 dataset """kinships""" +657 56 model """simple""" +657 56 loss """marginranking""" +657 56 regularizer """no""" +657 56 optimizer """adadelta""" +657 56 training_loop """owa""" +657 56 negative_sampler """basic""" +657 56 evaluator """rankbased""" +657 57 dataset """kinships""" +657 57 model """simple""" +657 57 loss """marginranking""" +657 57 regularizer """no""" +657 57 optimizer """adadelta""" +657 57 training_loop """owa""" +657 57 negative_sampler """basic""" +657 57 evaluator """rankbased""" +657 58 dataset """kinships""" +657 58 model """simple""" +657 58 loss """marginranking""" +657 58 regularizer """no""" +657 58 optimizer """adadelta""" +657 58 training_loop """owa""" +657 58 negative_sampler """basic""" +657 58 evaluator """rankbased""" +657 59 dataset """kinships""" +657 59 model """simple""" +657 59 loss """marginranking""" +657 59 regularizer """no""" +657 59 optimizer """adadelta""" +657 59 training_loop """owa""" +657 59 negative_sampler """basic""" +657 59 evaluator """rankbased""" +657 60 dataset """kinships""" +657 60 model """simple""" +657 60 loss """marginranking""" +657 60 regularizer """no""" +657 60 optimizer """adadelta""" +657 60 training_loop """owa""" +657 60 negative_sampler """basic""" +657 60 evaluator """rankbased""" +657 61 dataset """kinships""" +657 61 model """simple""" +657 61 loss """marginranking""" +657 61 regularizer """no""" +657 61 optimizer """adadelta""" +657 61 training_loop """owa""" +657 61 negative_sampler """basic""" +657 61 evaluator """rankbased""" +657 62 dataset """kinships""" +657 62 model """simple""" +657 62 loss """marginranking""" +657 62 regularizer """no""" +657 62 optimizer """adadelta""" +657 62 training_loop """owa""" +657 62 negative_sampler """basic""" +657 62 evaluator """rankbased""" +657 63 dataset """kinships""" +657 63 model """simple""" +657 63 loss """marginranking""" +657 63 regularizer """no""" +657 63 optimizer """adadelta""" +657 63 training_loop """owa""" +657 63 negative_sampler """basic""" +657 63 evaluator """rankbased""" +657 64 dataset """kinships""" +657 64 model """simple""" +657 64 loss """marginranking""" +657 64 regularizer """no""" +657 64 optimizer """adadelta""" +657 64 training_loop """owa""" +657 64 negative_sampler """basic""" +657 64 evaluator """rankbased""" +657 65 dataset """kinships""" +657 65 model """simple""" +657 65 loss """marginranking""" +657 65 regularizer """no""" +657 65 optimizer """adadelta""" +657 65 training_loop """owa""" +657 65 negative_sampler """basic""" +657 65 evaluator """rankbased""" +657 66 dataset """kinships""" +657 66 model """simple""" +657 66 loss """marginranking""" +657 66 regularizer """no""" +657 66 optimizer """adadelta""" +657 66 training_loop """owa""" +657 66 negative_sampler """basic""" +657 66 evaluator """rankbased""" +657 67 dataset """kinships""" +657 67 model """simple""" +657 67 loss """marginranking""" +657 67 regularizer """no""" +657 67 optimizer """adadelta""" +657 67 training_loop """owa""" +657 67 negative_sampler """basic""" +657 67 evaluator """rankbased""" +657 68 dataset """kinships""" +657 68 model """simple""" +657 68 loss """marginranking""" +657 68 regularizer """no""" +657 68 optimizer """adadelta""" +657 68 training_loop """owa""" +657 68 negative_sampler """basic""" +657 68 evaluator """rankbased""" +657 69 dataset """kinships""" +657 69 model """simple""" +657 69 loss """marginranking""" +657 69 regularizer """no""" +657 69 optimizer """adadelta""" +657 69 training_loop """owa""" +657 69 negative_sampler """basic""" +657 69 evaluator """rankbased""" +657 70 dataset """kinships""" +657 70 model """simple""" +657 70 loss """marginranking""" +657 70 regularizer """no""" +657 70 optimizer """adadelta""" +657 70 training_loop """owa""" +657 70 negative_sampler """basic""" +657 70 evaluator """rankbased""" +657 71 dataset """kinships""" +657 71 model """simple""" +657 71 loss """marginranking""" +657 71 regularizer """no""" +657 71 optimizer """adadelta""" +657 71 training_loop """owa""" +657 71 negative_sampler """basic""" +657 71 evaluator """rankbased""" +657 72 dataset """kinships""" +657 72 model """simple""" +657 72 loss """marginranking""" +657 72 regularizer """no""" +657 72 optimizer """adadelta""" +657 72 training_loop """owa""" +657 72 negative_sampler """basic""" +657 72 evaluator """rankbased""" +657 73 dataset """kinships""" +657 73 model """simple""" +657 73 loss """marginranking""" +657 73 regularizer """no""" +657 73 optimizer """adadelta""" +657 73 training_loop """owa""" +657 73 negative_sampler """basic""" +657 73 evaluator """rankbased""" +657 74 dataset """kinships""" +657 74 model """simple""" +657 74 loss """marginranking""" +657 74 regularizer """no""" +657 74 optimizer """adadelta""" +657 74 training_loop """owa""" +657 74 negative_sampler """basic""" +657 74 evaluator """rankbased""" +657 75 dataset """kinships""" +657 75 model """simple""" +657 75 loss """marginranking""" +657 75 regularizer """no""" +657 75 optimizer """adadelta""" +657 75 training_loop """owa""" +657 75 negative_sampler """basic""" +657 75 evaluator """rankbased""" +657 76 dataset """kinships""" +657 76 model """simple""" +657 76 loss """marginranking""" +657 76 regularizer """no""" +657 76 optimizer """adadelta""" +657 76 training_loop """owa""" +657 76 negative_sampler """basic""" +657 76 evaluator """rankbased""" +657 77 dataset """kinships""" +657 77 model """simple""" +657 77 loss """marginranking""" +657 77 regularizer """no""" +657 77 optimizer """adadelta""" +657 77 training_loop """owa""" +657 77 negative_sampler """basic""" +657 77 evaluator """rankbased""" +657 78 dataset """kinships""" +657 78 model """simple""" +657 78 loss """marginranking""" +657 78 regularizer """no""" +657 78 optimizer """adadelta""" +657 78 training_loop """owa""" +657 78 negative_sampler """basic""" +657 78 evaluator """rankbased""" +657 79 dataset """kinships""" +657 79 model """simple""" +657 79 loss """marginranking""" +657 79 regularizer """no""" +657 79 optimizer """adadelta""" +657 79 training_loop """owa""" +657 79 negative_sampler """basic""" +657 79 evaluator """rankbased""" +657 80 dataset """kinships""" +657 80 model """simple""" +657 80 loss """marginranking""" +657 80 regularizer """no""" +657 80 optimizer """adadelta""" +657 80 training_loop """owa""" +657 80 negative_sampler """basic""" +657 80 evaluator """rankbased""" +657 81 dataset """kinships""" +657 81 model """simple""" +657 81 loss """marginranking""" +657 81 regularizer """no""" +657 81 optimizer """adadelta""" +657 81 training_loop """owa""" +657 81 negative_sampler """basic""" +657 81 evaluator """rankbased""" +657 82 dataset """kinships""" +657 82 model """simple""" +657 82 loss """marginranking""" +657 82 regularizer """no""" +657 82 optimizer """adadelta""" +657 82 training_loop """owa""" +657 82 negative_sampler """basic""" +657 82 evaluator """rankbased""" +657 83 dataset """kinships""" +657 83 model """simple""" +657 83 loss """marginranking""" +657 83 regularizer """no""" +657 83 optimizer """adadelta""" +657 83 training_loop """owa""" +657 83 negative_sampler """basic""" +657 83 evaluator """rankbased""" +657 84 dataset """kinships""" +657 84 model """simple""" +657 84 loss """marginranking""" +657 84 regularizer """no""" +657 84 optimizer """adadelta""" +657 84 training_loop """owa""" +657 84 negative_sampler """basic""" +657 84 evaluator """rankbased""" +657 85 dataset """kinships""" +657 85 model """simple""" +657 85 loss """marginranking""" +657 85 regularizer """no""" +657 85 optimizer """adadelta""" +657 85 training_loop """owa""" +657 85 negative_sampler """basic""" +657 85 evaluator """rankbased""" +657 86 dataset """kinships""" +657 86 model """simple""" +657 86 loss """marginranking""" +657 86 regularizer """no""" +657 86 optimizer """adadelta""" +657 86 training_loop """owa""" +657 86 negative_sampler """basic""" +657 86 evaluator """rankbased""" +657 87 dataset """kinships""" +657 87 model """simple""" +657 87 loss """marginranking""" +657 87 regularizer """no""" +657 87 optimizer """adadelta""" +657 87 training_loop """owa""" +657 87 negative_sampler """basic""" +657 87 evaluator """rankbased""" +657 88 dataset """kinships""" +657 88 model """simple""" +657 88 loss """marginranking""" +657 88 regularizer """no""" +657 88 optimizer """adadelta""" +657 88 training_loop """owa""" +657 88 negative_sampler """basic""" +657 88 evaluator """rankbased""" +657 89 dataset """kinships""" +657 89 model """simple""" +657 89 loss """marginranking""" +657 89 regularizer """no""" +657 89 optimizer """adadelta""" +657 89 training_loop """owa""" +657 89 negative_sampler """basic""" +657 89 evaluator """rankbased""" +657 90 dataset """kinships""" +657 90 model """simple""" +657 90 loss """marginranking""" +657 90 regularizer """no""" +657 90 optimizer """adadelta""" +657 90 training_loop """owa""" +657 90 negative_sampler """basic""" +657 90 evaluator """rankbased""" +657 91 dataset """kinships""" +657 91 model """simple""" +657 91 loss """marginranking""" +657 91 regularizer """no""" +657 91 optimizer """adadelta""" +657 91 training_loop """owa""" +657 91 negative_sampler """basic""" +657 91 evaluator """rankbased""" +657 92 dataset """kinships""" +657 92 model """simple""" +657 92 loss """marginranking""" +657 92 regularizer """no""" +657 92 optimizer """adadelta""" +657 92 training_loop """owa""" +657 92 negative_sampler """basic""" +657 92 evaluator """rankbased""" +657 93 dataset """kinships""" +657 93 model """simple""" +657 93 loss """marginranking""" +657 93 regularizer """no""" +657 93 optimizer """adadelta""" +657 93 training_loop """owa""" +657 93 negative_sampler """basic""" +657 93 evaluator """rankbased""" +657 94 dataset """kinships""" +657 94 model """simple""" +657 94 loss """marginranking""" +657 94 regularizer """no""" +657 94 optimizer """adadelta""" +657 94 training_loop """owa""" +657 94 negative_sampler """basic""" +657 94 evaluator """rankbased""" +657 95 dataset """kinships""" +657 95 model """simple""" +657 95 loss """marginranking""" +657 95 regularizer """no""" +657 95 optimizer """adadelta""" +657 95 training_loop """owa""" +657 95 negative_sampler """basic""" +657 95 evaluator """rankbased""" +657 96 dataset """kinships""" +657 96 model """simple""" +657 96 loss """marginranking""" +657 96 regularizer """no""" +657 96 optimizer """adadelta""" +657 96 training_loop """owa""" +657 96 negative_sampler """basic""" +657 96 evaluator """rankbased""" +657 97 dataset """kinships""" +657 97 model """simple""" +657 97 loss """marginranking""" +657 97 regularizer """no""" +657 97 optimizer """adadelta""" +657 97 training_loop """owa""" +657 97 negative_sampler """basic""" +657 97 evaluator """rankbased""" +657 98 dataset """kinships""" +657 98 model """simple""" +657 98 loss """marginranking""" +657 98 regularizer """no""" +657 98 optimizer """adadelta""" +657 98 training_loop """owa""" +657 98 negative_sampler """basic""" +657 98 evaluator """rankbased""" +657 99 dataset """kinships""" +657 99 model """simple""" +657 99 loss """marginranking""" +657 99 regularizer """no""" +657 99 optimizer """adadelta""" +657 99 training_loop """owa""" +657 99 negative_sampler """basic""" +657 99 evaluator """rankbased""" +657 100 dataset """kinships""" +657 100 model """simple""" +657 100 loss """marginranking""" +657 100 regularizer """no""" +657 100 optimizer """adadelta""" +657 100 training_loop """owa""" +657 100 negative_sampler """basic""" +657 100 evaluator """rankbased""" +658 1 model.embedding_dim 1.0 +658 1 loss.margin 5.845811921322757 +658 1 negative_sampler.num_negs_per_pos 68.0 +658 1 training.batch_size 0.0 +658 2 model.embedding_dim 2.0 +658 2 loss.margin 4.639331030817198 +658 2 negative_sampler.num_negs_per_pos 70.0 +658 2 training.batch_size 0.0 +658 3 model.embedding_dim 0.0 +658 3 loss.margin 4.81293565924344 +658 3 negative_sampler.num_negs_per_pos 80.0 +658 3 training.batch_size 0.0 +658 4 model.embedding_dim 2.0 +658 4 loss.margin 1.0339689138468149 +658 4 negative_sampler.num_negs_per_pos 71.0 +658 4 training.batch_size 1.0 +658 5 model.embedding_dim 0.0 +658 5 loss.margin 9.442480295844428 +658 5 negative_sampler.num_negs_per_pos 25.0 +658 5 training.batch_size 1.0 +658 6 model.embedding_dim 0.0 +658 6 loss.margin 3.5879601468330464 +658 6 negative_sampler.num_negs_per_pos 35.0 +658 6 training.batch_size 0.0 +658 7 model.embedding_dim 2.0 +658 7 loss.margin 8.434961774857372 +658 7 negative_sampler.num_negs_per_pos 81.0 +658 7 training.batch_size 1.0 +658 8 model.embedding_dim 1.0 +658 8 loss.margin 1.3019504172285288 +658 8 negative_sampler.num_negs_per_pos 65.0 +658 8 training.batch_size 1.0 +658 9 model.embedding_dim 1.0 +658 9 loss.margin 5.2868646403300446 +658 9 negative_sampler.num_negs_per_pos 94.0 +658 9 training.batch_size 1.0 +658 10 model.embedding_dim 1.0 +658 10 loss.margin 8.473593853522228 +658 10 negative_sampler.num_negs_per_pos 7.0 +658 10 training.batch_size 2.0 +658 11 model.embedding_dim 0.0 +658 11 loss.margin 9.66548717387441 +658 11 negative_sampler.num_negs_per_pos 18.0 +658 11 training.batch_size 0.0 +658 12 model.embedding_dim 2.0 +658 12 loss.margin 5.461562398395437 +658 12 negative_sampler.num_negs_per_pos 44.0 +658 12 training.batch_size 1.0 +658 13 model.embedding_dim 0.0 +658 13 loss.margin 8.851781617785749 +658 13 negative_sampler.num_negs_per_pos 26.0 +658 13 training.batch_size 0.0 +658 14 model.embedding_dim 2.0 +658 14 loss.margin 1.8737926635213609 +658 14 negative_sampler.num_negs_per_pos 14.0 +658 14 training.batch_size 1.0 +658 15 model.embedding_dim 1.0 +658 15 loss.margin 1.970521294794762 +658 15 negative_sampler.num_negs_per_pos 66.0 +658 15 training.batch_size 2.0 +658 16 model.embedding_dim 2.0 +658 16 loss.margin 5.809079842090934 +658 16 negative_sampler.num_negs_per_pos 46.0 +658 16 training.batch_size 0.0 +658 17 model.embedding_dim 2.0 +658 17 loss.margin 7.957328290442407 +658 17 negative_sampler.num_negs_per_pos 33.0 +658 17 training.batch_size 2.0 +658 18 model.embedding_dim 1.0 +658 18 loss.margin 1.5172965367613702 +658 18 negative_sampler.num_negs_per_pos 0.0 +658 18 training.batch_size 1.0 +658 19 model.embedding_dim 1.0 +658 19 loss.margin 8.683516122297636 +658 19 negative_sampler.num_negs_per_pos 26.0 +658 19 training.batch_size 2.0 +658 20 model.embedding_dim 1.0 +658 20 loss.margin 7.630222843637612 +658 20 negative_sampler.num_negs_per_pos 0.0 +658 20 training.batch_size 0.0 +658 21 model.embedding_dim 0.0 +658 21 loss.margin 6.922069699915174 +658 21 negative_sampler.num_negs_per_pos 35.0 +658 21 training.batch_size 1.0 +658 22 model.embedding_dim 1.0 +658 22 loss.margin 6.513781835667434 +658 22 negative_sampler.num_negs_per_pos 49.0 +658 22 training.batch_size 0.0 +658 23 model.embedding_dim 0.0 +658 23 loss.margin 6.703364223991505 +658 23 negative_sampler.num_negs_per_pos 0.0 +658 23 training.batch_size 1.0 +658 24 model.embedding_dim 0.0 +658 24 loss.margin 8.195696330050106 +658 24 negative_sampler.num_negs_per_pos 82.0 +658 24 training.batch_size 0.0 +658 25 model.embedding_dim 0.0 +658 25 loss.margin 3.7845228474090162 +658 25 negative_sampler.num_negs_per_pos 44.0 +658 25 training.batch_size 0.0 +658 26 model.embedding_dim 0.0 +658 26 loss.margin 7.427044674413066 +658 26 negative_sampler.num_negs_per_pos 63.0 +658 26 training.batch_size 1.0 +658 27 model.embedding_dim 0.0 +658 27 loss.margin 1.9780451647799 +658 27 negative_sampler.num_negs_per_pos 72.0 +658 27 training.batch_size 2.0 +658 28 model.embedding_dim 0.0 +658 28 loss.margin 4.82728421330155 +658 28 negative_sampler.num_negs_per_pos 40.0 +658 28 training.batch_size 2.0 +658 29 model.embedding_dim 1.0 +658 29 loss.margin 2.9447870369781812 +658 29 negative_sampler.num_negs_per_pos 84.0 +658 29 training.batch_size 1.0 +658 30 model.embedding_dim 1.0 +658 30 loss.margin 6.359168901604013 +658 30 negative_sampler.num_negs_per_pos 58.0 +658 30 training.batch_size 0.0 +658 31 model.embedding_dim 0.0 +658 31 loss.margin 4.955725553069859 +658 31 negative_sampler.num_negs_per_pos 5.0 +658 31 training.batch_size 0.0 +658 32 model.embedding_dim 0.0 +658 32 loss.margin 5.119177414377324 +658 32 negative_sampler.num_negs_per_pos 89.0 +658 32 training.batch_size 2.0 +658 33 model.embedding_dim 0.0 +658 33 loss.margin 7.264606504161744 +658 33 negative_sampler.num_negs_per_pos 36.0 +658 33 training.batch_size 1.0 +658 34 model.embedding_dim 2.0 +658 34 loss.margin 1.8509520618968718 +658 34 negative_sampler.num_negs_per_pos 8.0 +658 34 training.batch_size 2.0 +658 35 model.embedding_dim 1.0 +658 35 loss.margin 1.4195606798402114 +658 35 negative_sampler.num_negs_per_pos 99.0 +658 35 training.batch_size 1.0 +658 36 model.embedding_dim 1.0 +658 36 loss.margin 0.979133378236716 +658 36 negative_sampler.num_negs_per_pos 24.0 +658 36 training.batch_size 0.0 +658 37 model.embedding_dim 1.0 +658 37 loss.margin 1.6349477440584186 +658 37 negative_sampler.num_negs_per_pos 76.0 +658 37 training.batch_size 2.0 +658 38 model.embedding_dim 0.0 +658 38 loss.margin 3.5103760660803283 +658 38 negative_sampler.num_negs_per_pos 82.0 +658 38 training.batch_size 1.0 +658 39 model.embedding_dim 1.0 +658 39 loss.margin 2.2794010346284117 +658 39 negative_sampler.num_negs_per_pos 68.0 +658 39 training.batch_size 1.0 +658 40 model.embedding_dim 2.0 +658 40 loss.margin 1.9872950571470387 +658 40 negative_sampler.num_negs_per_pos 50.0 +658 40 training.batch_size 0.0 +658 41 model.embedding_dim 2.0 +658 41 loss.margin 5.000167892013877 +658 41 negative_sampler.num_negs_per_pos 38.0 +658 41 training.batch_size 0.0 +658 42 model.embedding_dim 2.0 +658 42 loss.margin 7.934822828490742 +658 42 negative_sampler.num_negs_per_pos 98.0 +658 42 training.batch_size 2.0 +658 43 model.embedding_dim 1.0 +658 43 loss.margin 4.2596040516705465 +658 43 negative_sampler.num_negs_per_pos 72.0 +658 43 training.batch_size 0.0 +658 44 model.embedding_dim 2.0 +658 44 loss.margin 6.880741063439542 +658 44 negative_sampler.num_negs_per_pos 84.0 +658 44 training.batch_size 2.0 +658 45 model.embedding_dim 0.0 +658 45 loss.margin 6.302023190408521 +658 45 negative_sampler.num_negs_per_pos 85.0 +658 45 training.batch_size 0.0 +658 46 model.embedding_dim 0.0 +658 46 loss.margin 7.4628213638394705 +658 46 negative_sampler.num_negs_per_pos 90.0 +658 46 training.batch_size 2.0 +658 47 model.embedding_dim 0.0 +658 47 loss.margin 0.8835488581263917 +658 47 negative_sampler.num_negs_per_pos 60.0 +658 47 training.batch_size 0.0 +658 48 model.embedding_dim 0.0 +658 48 loss.margin 4.67901110726375 +658 48 negative_sampler.num_negs_per_pos 83.0 +658 48 training.batch_size 1.0 +658 49 model.embedding_dim 2.0 +658 49 loss.margin 5.621047179286 +658 49 negative_sampler.num_negs_per_pos 1.0 +658 49 training.batch_size 1.0 +658 50 model.embedding_dim 0.0 +658 50 loss.margin 2.0502460669587457 +658 50 negative_sampler.num_negs_per_pos 77.0 +658 50 training.batch_size 2.0 +658 51 model.embedding_dim 0.0 +658 51 loss.margin 6.152916146940969 +658 51 negative_sampler.num_negs_per_pos 93.0 +658 51 training.batch_size 2.0 +658 52 model.embedding_dim 1.0 +658 52 loss.margin 7.677756364776517 +658 52 negative_sampler.num_negs_per_pos 73.0 +658 52 training.batch_size 2.0 +658 53 model.embedding_dim 2.0 +658 53 loss.margin 1.6554588521329128 +658 53 negative_sampler.num_negs_per_pos 20.0 +658 53 training.batch_size 1.0 +658 54 model.embedding_dim 0.0 +658 54 loss.margin 7.39180391675947 +658 54 negative_sampler.num_negs_per_pos 63.0 +658 54 training.batch_size 2.0 +658 55 model.embedding_dim 2.0 +658 55 loss.margin 2.984138552607858 +658 55 negative_sampler.num_negs_per_pos 67.0 +658 55 training.batch_size 0.0 +658 56 model.embedding_dim 0.0 +658 56 loss.margin 8.963759631712826 +658 56 negative_sampler.num_negs_per_pos 73.0 +658 56 training.batch_size 1.0 +658 57 model.embedding_dim 0.0 +658 57 loss.margin 2.559338875549458 +658 57 negative_sampler.num_negs_per_pos 91.0 +658 57 training.batch_size 0.0 +658 58 model.embedding_dim 0.0 +658 58 loss.margin 8.718783581133613 +658 58 negative_sampler.num_negs_per_pos 79.0 +658 58 training.batch_size 0.0 +658 59 model.embedding_dim 2.0 +658 59 loss.margin 2.476651653952511 +658 59 negative_sampler.num_negs_per_pos 43.0 +658 59 training.batch_size 1.0 +658 60 model.embedding_dim 1.0 +658 60 loss.margin 9.846033687174007 +658 60 negative_sampler.num_negs_per_pos 10.0 +658 60 training.batch_size 0.0 +658 61 model.embedding_dim 1.0 +658 61 loss.margin 8.416367304825675 +658 61 negative_sampler.num_negs_per_pos 99.0 +658 61 training.batch_size 0.0 +658 62 model.embedding_dim 0.0 +658 62 loss.margin 7.199293266835558 +658 62 negative_sampler.num_negs_per_pos 34.0 +658 62 training.batch_size 0.0 +658 63 model.embedding_dim 1.0 +658 63 loss.margin 8.089131056928586 +658 63 negative_sampler.num_negs_per_pos 19.0 +658 63 training.batch_size 0.0 +658 64 model.embedding_dim 0.0 +658 64 loss.margin 2.726695079106868 +658 64 negative_sampler.num_negs_per_pos 16.0 +658 64 training.batch_size 0.0 +658 65 model.embedding_dim 0.0 +658 65 loss.margin 1.4408007607621158 +658 65 negative_sampler.num_negs_per_pos 24.0 +658 65 training.batch_size 1.0 +658 66 model.embedding_dim 1.0 +658 66 loss.margin 6.515975188373921 +658 66 negative_sampler.num_negs_per_pos 53.0 +658 66 training.batch_size 2.0 +658 67 model.embedding_dim 2.0 +658 67 loss.margin 7.9483072892051805 +658 67 negative_sampler.num_negs_per_pos 68.0 +658 67 training.batch_size 1.0 +658 68 model.embedding_dim 2.0 +658 68 loss.margin 9.889196454720285 +658 68 negative_sampler.num_negs_per_pos 57.0 +658 68 training.batch_size 2.0 +658 69 model.embedding_dim 2.0 +658 69 loss.margin 3.243087811356116 +658 69 negative_sampler.num_negs_per_pos 61.0 +658 69 training.batch_size 2.0 +658 70 model.embedding_dim 1.0 +658 70 loss.margin 7.0944169758467615 +658 70 negative_sampler.num_negs_per_pos 67.0 +658 70 training.batch_size 1.0 +658 71 model.embedding_dim 0.0 +658 71 loss.margin 7.4426607565797385 +658 71 negative_sampler.num_negs_per_pos 70.0 +658 71 training.batch_size 2.0 +658 72 model.embedding_dim 2.0 +658 72 loss.margin 6.4231340857328725 +658 72 negative_sampler.num_negs_per_pos 32.0 +658 72 training.batch_size 0.0 +658 73 model.embedding_dim 1.0 +658 73 loss.margin 8.166105041154383 +658 73 negative_sampler.num_negs_per_pos 83.0 +658 73 training.batch_size 0.0 +658 74 model.embedding_dim 0.0 +658 74 loss.margin 7.099132675565502 +658 74 negative_sampler.num_negs_per_pos 65.0 +658 74 training.batch_size 0.0 +658 75 model.embedding_dim 1.0 +658 75 loss.margin 4.0310311563985675 +658 75 negative_sampler.num_negs_per_pos 66.0 +658 75 training.batch_size 1.0 +658 76 model.embedding_dim 1.0 +658 76 loss.margin 3.393151254227788 +658 76 negative_sampler.num_negs_per_pos 28.0 +658 76 training.batch_size 1.0 +658 77 model.embedding_dim 0.0 +658 77 loss.margin 4.629011115429417 +658 77 negative_sampler.num_negs_per_pos 5.0 +658 77 training.batch_size 1.0 +658 78 model.embedding_dim 1.0 +658 78 loss.margin 4.27074125872978 +658 78 negative_sampler.num_negs_per_pos 14.0 +658 78 training.batch_size 2.0 +658 79 model.embedding_dim 0.0 +658 79 loss.margin 6.8163616897295665 +658 79 negative_sampler.num_negs_per_pos 13.0 +658 79 training.batch_size 1.0 +658 80 model.embedding_dim 0.0 +658 80 loss.margin 3.766648275251041 +658 80 negative_sampler.num_negs_per_pos 8.0 +658 80 training.batch_size 2.0 +658 81 model.embedding_dim 2.0 +658 81 loss.margin 1.8940732698926883 +658 81 negative_sampler.num_negs_per_pos 0.0 +658 81 training.batch_size 1.0 +658 82 model.embedding_dim 1.0 +658 82 loss.margin 2.8250678092710166 +658 82 negative_sampler.num_negs_per_pos 71.0 +658 82 training.batch_size 1.0 +658 83 model.embedding_dim 1.0 +658 83 loss.margin 4.897777442154723 +658 83 negative_sampler.num_negs_per_pos 65.0 +658 83 training.batch_size 1.0 +658 84 model.embedding_dim 0.0 +658 84 loss.margin 4.388726131929948 +658 84 negative_sampler.num_negs_per_pos 56.0 +658 84 training.batch_size 2.0 +658 85 model.embedding_dim 1.0 +658 85 loss.margin 1.0685840410445908 +658 85 negative_sampler.num_negs_per_pos 19.0 +658 85 training.batch_size 1.0 +658 86 model.embedding_dim 0.0 +658 86 loss.margin 6.824327643071905 +658 86 negative_sampler.num_negs_per_pos 88.0 +658 86 training.batch_size 1.0 +658 87 model.embedding_dim 1.0 +658 87 loss.margin 0.6887988184173897 +658 87 negative_sampler.num_negs_per_pos 77.0 +658 87 training.batch_size 1.0 +658 88 model.embedding_dim 1.0 +658 88 loss.margin 8.661172390143543 +658 88 negative_sampler.num_negs_per_pos 31.0 +658 88 training.batch_size 2.0 +658 89 model.embedding_dim 2.0 +658 89 loss.margin 5.299198113164337 +658 89 negative_sampler.num_negs_per_pos 74.0 +658 89 training.batch_size 0.0 +658 90 model.embedding_dim 1.0 +658 90 loss.margin 9.142826233438479 +658 90 negative_sampler.num_negs_per_pos 44.0 +658 90 training.batch_size 0.0 +658 91 model.embedding_dim 2.0 +658 91 loss.margin 1.825481694009563 +658 91 negative_sampler.num_negs_per_pos 19.0 +658 91 training.batch_size 1.0 +658 92 model.embedding_dim 0.0 +658 92 loss.margin 9.719507959989858 +658 92 negative_sampler.num_negs_per_pos 74.0 +658 92 training.batch_size 2.0 +658 93 model.embedding_dim 1.0 +658 93 loss.margin 4.724931435347582 +658 93 negative_sampler.num_negs_per_pos 43.0 +658 93 training.batch_size 2.0 +658 94 model.embedding_dim 0.0 +658 94 loss.margin 8.716654978052231 +658 94 negative_sampler.num_negs_per_pos 13.0 +658 94 training.batch_size 1.0 +658 95 model.embedding_dim 2.0 +658 95 loss.margin 1.0518520390799182 +658 95 negative_sampler.num_negs_per_pos 46.0 +658 95 training.batch_size 2.0 +658 96 model.embedding_dim 2.0 +658 96 loss.margin 0.9692560614159094 +658 96 negative_sampler.num_negs_per_pos 14.0 +658 96 training.batch_size 0.0 +658 97 model.embedding_dim 0.0 +658 97 loss.margin 8.702818853051813 +658 97 negative_sampler.num_negs_per_pos 66.0 +658 97 training.batch_size 1.0 +658 98 model.embedding_dim 0.0 +658 98 loss.margin 3.2105091886268733 +658 98 negative_sampler.num_negs_per_pos 29.0 +658 98 training.batch_size 0.0 +658 99 model.embedding_dim 0.0 +658 99 loss.margin 4.6747440529358135 +658 99 negative_sampler.num_negs_per_pos 40.0 +658 99 training.batch_size 1.0 +658 100 model.embedding_dim 2.0 +658 100 loss.margin 4.915414666378671 +658 100 negative_sampler.num_negs_per_pos 84.0 +658 100 training.batch_size 2.0 +658 1 dataset """kinships""" +658 1 model """simple""" +658 1 loss """marginranking""" +658 1 regularizer """no""" +658 1 optimizer """adadelta""" +658 1 training_loop """owa""" +658 1 negative_sampler """basic""" +658 1 evaluator """rankbased""" +658 2 dataset """kinships""" +658 2 model """simple""" +658 2 loss """marginranking""" +658 2 regularizer """no""" +658 2 optimizer """adadelta""" +658 2 training_loop """owa""" +658 2 negative_sampler """basic""" +658 2 evaluator """rankbased""" +658 3 dataset """kinships""" +658 3 model """simple""" +658 3 loss """marginranking""" +658 3 regularizer """no""" +658 3 optimizer """adadelta""" +658 3 training_loop """owa""" +658 3 negative_sampler """basic""" +658 3 evaluator """rankbased""" +658 4 dataset """kinships""" +658 4 model """simple""" +658 4 loss """marginranking""" +658 4 regularizer """no""" +658 4 optimizer """adadelta""" +658 4 training_loop """owa""" +658 4 negative_sampler """basic""" +658 4 evaluator """rankbased""" +658 5 dataset """kinships""" +658 5 model """simple""" +658 5 loss """marginranking""" +658 5 regularizer """no""" +658 5 optimizer """adadelta""" +658 5 training_loop """owa""" +658 5 negative_sampler """basic""" +658 5 evaluator """rankbased""" +658 6 dataset """kinships""" +658 6 model """simple""" +658 6 loss """marginranking""" +658 6 regularizer """no""" +658 6 optimizer """adadelta""" +658 6 training_loop """owa""" +658 6 negative_sampler """basic""" +658 6 evaluator """rankbased""" +658 7 dataset """kinships""" +658 7 model """simple""" +658 7 loss """marginranking""" +658 7 regularizer """no""" +658 7 optimizer """adadelta""" +658 7 training_loop """owa""" +658 7 negative_sampler """basic""" +658 7 evaluator """rankbased""" +658 8 dataset """kinships""" +658 8 model """simple""" +658 8 loss """marginranking""" +658 8 regularizer """no""" +658 8 optimizer """adadelta""" +658 8 training_loop """owa""" +658 8 negative_sampler """basic""" +658 8 evaluator """rankbased""" +658 9 dataset """kinships""" +658 9 model """simple""" +658 9 loss """marginranking""" +658 9 regularizer """no""" +658 9 optimizer """adadelta""" +658 9 training_loop """owa""" +658 9 negative_sampler """basic""" +658 9 evaluator """rankbased""" +658 10 dataset """kinships""" +658 10 model """simple""" +658 10 loss """marginranking""" +658 10 regularizer """no""" +658 10 optimizer """adadelta""" +658 10 training_loop """owa""" +658 10 negative_sampler """basic""" +658 10 evaluator """rankbased""" +658 11 dataset """kinships""" +658 11 model """simple""" +658 11 loss """marginranking""" +658 11 regularizer """no""" +658 11 optimizer """adadelta""" +658 11 training_loop """owa""" +658 11 negative_sampler """basic""" +658 11 evaluator """rankbased""" +658 12 dataset """kinships""" +658 12 model """simple""" +658 12 loss """marginranking""" +658 12 regularizer """no""" +658 12 optimizer """adadelta""" +658 12 training_loop """owa""" +658 12 negative_sampler """basic""" +658 12 evaluator """rankbased""" +658 13 dataset """kinships""" +658 13 model """simple""" +658 13 loss """marginranking""" +658 13 regularizer """no""" +658 13 optimizer """adadelta""" +658 13 training_loop """owa""" +658 13 negative_sampler """basic""" +658 13 evaluator """rankbased""" +658 14 dataset """kinships""" +658 14 model """simple""" +658 14 loss """marginranking""" +658 14 regularizer """no""" +658 14 optimizer """adadelta""" +658 14 training_loop """owa""" +658 14 negative_sampler """basic""" +658 14 evaluator """rankbased""" +658 15 dataset """kinships""" +658 15 model """simple""" +658 15 loss """marginranking""" +658 15 regularizer """no""" +658 15 optimizer """adadelta""" +658 15 training_loop """owa""" +658 15 negative_sampler """basic""" +658 15 evaluator """rankbased""" +658 16 dataset """kinships""" +658 16 model """simple""" +658 16 loss """marginranking""" +658 16 regularizer """no""" +658 16 optimizer """adadelta""" +658 16 training_loop """owa""" +658 16 negative_sampler """basic""" +658 16 evaluator """rankbased""" +658 17 dataset """kinships""" +658 17 model """simple""" +658 17 loss """marginranking""" +658 17 regularizer """no""" +658 17 optimizer """adadelta""" +658 17 training_loop """owa""" +658 17 negative_sampler """basic""" +658 17 evaluator """rankbased""" +658 18 dataset """kinships""" +658 18 model """simple""" +658 18 loss """marginranking""" +658 18 regularizer """no""" +658 18 optimizer """adadelta""" +658 18 training_loop """owa""" +658 18 negative_sampler """basic""" +658 18 evaluator """rankbased""" +658 19 dataset """kinships""" +658 19 model """simple""" +658 19 loss """marginranking""" +658 19 regularizer """no""" +658 19 optimizer """adadelta""" +658 19 training_loop """owa""" +658 19 negative_sampler """basic""" +658 19 evaluator """rankbased""" +658 20 dataset """kinships""" +658 20 model """simple""" +658 20 loss """marginranking""" +658 20 regularizer """no""" +658 20 optimizer """adadelta""" +658 20 training_loop """owa""" +658 20 negative_sampler """basic""" +658 20 evaluator """rankbased""" +658 21 dataset """kinships""" +658 21 model """simple""" +658 21 loss """marginranking""" +658 21 regularizer """no""" +658 21 optimizer """adadelta""" +658 21 training_loop """owa""" +658 21 negative_sampler """basic""" +658 21 evaluator """rankbased""" +658 22 dataset """kinships""" +658 22 model """simple""" +658 22 loss """marginranking""" +658 22 regularizer """no""" +658 22 optimizer """adadelta""" +658 22 training_loop """owa""" +658 22 negative_sampler """basic""" +658 22 evaluator """rankbased""" +658 23 dataset """kinships""" +658 23 model """simple""" +658 23 loss """marginranking""" +658 23 regularizer """no""" +658 23 optimizer """adadelta""" +658 23 training_loop """owa""" +658 23 negative_sampler """basic""" +658 23 evaluator """rankbased""" +658 24 dataset """kinships""" +658 24 model """simple""" +658 24 loss """marginranking""" +658 24 regularizer """no""" +658 24 optimizer """adadelta""" +658 24 training_loop """owa""" +658 24 negative_sampler """basic""" +658 24 evaluator """rankbased""" +658 25 dataset """kinships""" +658 25 model """simple""" +658 25 loss """marginranking""" +658 25 regularizer """no""" +658 25 optimizer """adadelta""" +658 25 training_loop """owa""" +658 25 negative_sampler """basic""" +658 25 evaluator """rankbased""" +658 26 dataset """kinships""" +658 26 model """simple""" +658 26 loss """marginranking""" +658 26 regularizer """no""" +658 26 optimizer """adadelta""" +658 26 training_loop """owa""" +658 26 negative_sampler """basic""" +658 26 evaluator """rankbased""" +658 27 dataset """kinships""" +658 27 model """simple""" +658 27 loss """marginranking""" +658 27 regularizer """no""" +658 27 optimizer """adadelta""" +658 27 training_loop """owa""" +658 27 negative_sampler """basic""" +658 27 evaluator """rankbased""" +658 28 dataset """kinships""" +658 28 model """simple""" +658 28 loss """marginranking""" +658 28 regularizer """no""" +658 28 optimizer """adadelta""" +658 28 training_loop """owa""" +658 28 negative_sampler """basic""" +658 28 evaluator """rankbased""" +658 29 dataset """kinships""" +658 29 model """simple""" +658 29 loss """marginranking""" +658 29 regularizer """no""" +658 29 optimizer """adadelta""" +658 29 training_loop """owa""" +658 29 negative_sampler """basic""" +658 29 evaluator """rankbased""" +658 30 dataset """kinships""" +658 30 model """simple""" +658 30 loss """marginranking""" +658 30 regularizer """no""" +658 30 optimizer """adadelta""" +658 30 training_loop """owa""" +658 30 negative_sampler """basic""" +658 30 evaluator """rankbased""" +658 31 dataset """kinships""" +658 31 model """simple""" +658 31 loss """marginranking""" +658 31 regularizer """no""" +658 31 optimizer """adadelta""" +658 31 training_loop """owa""" +658 31 negative_sampler """basic""" +658 31 evaluator """rankbased""" +658 32 dataset """kinships""" +658 32 model """simple""" +658 32 loss """marginranking""" +658 32 regularizer """no""" +658 32 optimizer """adadelta""" +658 32 training_loop """owa""" +658 32 negative_sampler """basic""" +658 32 evaluator """rankbased""" +658 33 dataset """kinships""" +658 33 model """simple""" +658 33 loss """marginranking""" +658 33 regularizer """no""" +658 33 optimizer """adadelta""" +658 33 training_loop """owa""" +658 33 negative_sampler """basic""" +658 33 evaluator """rankbased""" +658 34 dataset """kinships""" +658 34 model """simple""" +658 34 loss """marginranking""" +658 34 regularizer """no""" +658 34 optimizer """adadelta""" +658 34 training_loop """owa""" +658 34 negative_sampler """basic""" +658 34 evaluator """rankbased""" +658 35 dataset """kinships""" +658 35 model """simple""" +658 35 loss """marginranking""" +658 35 regularizer """no""" +658 35 optimizer """adadelta""" +658 35 training_loop """owa""" +658 35 negative_sampler """basic""" +658 35 evaluator """rankbased""" +658 36 dataset """kinships""" +658 36 model """simple""" +658 36 loss """marginranking""" +658 36 regularizer """no""" +658 36 optimizer """adadelta""" +658 36 training_loop """owa""" +658 36 negative_sampler """basic""" +658 36 evaluator """rankbased""" +658 37 dataset """kinships""" +658 37 model """simple""" +658 37 loss """marginranking""" +658 37 regularizer """no""" +658 37 optimizer """adadelta""" +658 37 training_loop """owa""" +658 37 negative_sampler """basic""" +658 37 evaluator """rankbased""" +658 38 dataset """kinships""" +658 38 model """simple""" +658 38 loss """marginranking""" +658 38 regularizer """no""" +658 38 optimizer """adadelta""" +658 38 training_loop """owa""" +658 38 negative_sampler """basic""" +658 38 evaluator """rankbased""" +658 39 dataset """kinships""" +658 39 model """simple""" +658 39 loss """marginranking""" +658 39 regularizer """no""" +658 39 optimizer """adadelta""" +658 39 training_loop """owa""" +658 39 negative_sampler """basic""" +658 39 evaluator """rankbased""" +658 40 dataset """kinships""" +658 40 model """simple""" +658 40 loss """marginranking""" +658 40 regularizer """no""" +658 40 optimizer """adadelta""" +658 40 training_loop """owa""" +658 40 negative_sampler """basic""" +658 40 evaluator """rankbased""" +658 41 dataset """kinships""" +658 41 model """simple""" +658 41 loss """marginranking""" +658 41 regularizer """no""" +658 41 optimizer """adadelta""" +658 41 training_loop """owa""" +658 41 negative_sampler """basic""" +658 41 evaluator """rankbased""" +658 42 dataset """kinships""" +658 42 model """simple""" +658 42 loss """marginranking""" +658 42 regularizer """no""" +658 42 optimizer """adadelta""" +658 42 training_loop """owa""" +658 42 negative_sampler """basic""" +658 42 evaluator """rankbased""" +658 43 dataset """kinships""" +658 43 model """simple""" +658 43 loss """marginranking""" +658 43 regularizer """no""" +658 43 optimizer """adadelta""" +658 43 training_loop """owa""" +658 43 negative_sampler """basic""" +658 43 evaluator """rankbased""" +658 44 dataset """kinships""" +658 44 model """simple""" +658 44 loss """marginranking""" +658 44 regularizer """no""" +658 44 optimizer """adadelta""" +658 44 training_loop """owa""" +658 44 negative_sampler """basic""" +658 44 evaluator """rankbased""" +658 45 dataset """kinships""" +658 45 model """simple""" +658 45 loss """marginranking""" +658 45 regularizer """no""" +658 45 optimizer """adadelta""" +658 45 training_loop """owa""" +658 45 negative_sampler """basic""" +658 45 evaluator """rankbased""" +658 46 dataset """kinships""" +658 46 model """simple""" +658 46 loss """marginranking""" +658 46 regularizer """no""" +658 46 optimizer """adadelta""" +658 46 training_loop """owa""" +658 46 negative_sampler """basic""" +658 46 evaluator """rankbased""" +658 47 dataset """kinships""" +658 47 model """simple""" +658 47 loss """marginranking""" +658 47 regularizer """no""" +658 47 optimizer """adadelta""" +658 47 training_loop """owa""" +658 47 negative_sampler """basic""" +658 47 evaluator """rankbased""" +658 48 dataset """kinships""" +658 48 model """simple""" +658 48 loss """marginranking""" +658 48 regularizer """no""" +658 48 optimizer """adadelta""" +658 48 training_loop """owa""" +658 48 negative_sampler """basic""" +658 48 evaluator """rankbased""" +658 49 dataset """kinships""" +658 49 model """simple""" +658 49 loss """marginranking""" +658 49 regularizer """no""" +658 49 optimizer """adadelta""" +658 49 training_loop """owa""" +658 49 negative_sampler """basic""" +658 49 evaluator """rankbased""" +658 50 dataset """kinships""" +658 50 model """simple""" +658 50 loss """marginranking""" +658 50 regularizer """no""" +658 50 optimizer """adadelta""" +658 50 training_loop """owa""" +658 50 negative_sampler """basic""" +658 50 evaluator """rankbased""" +658 51 dataset """kinships""" +658 51 model """simple""" +658 51 loss """marginranking""" +658 51 regularizer """no""" +658 51 optimizer """adadelta""" +658 51 training_loop """owa""" +658 51 negative_sampler """basic""" +658 51 evaluator """rankbased""" +658 52 dataset """kinships""" +658 52 model """simple""" +658 52 loss """marginranking""" +658 52 regularizer """no""" +658 52 optimizer """adadelta""" +658 52 training_loop """owa""" +658 52 negative_sampler """basic""" +658 52 evaluator """rankbased""" +658 53 dataset """kinships""" +658 53 model """simple""" +658 53 loss """marginranking""" +658 53 regularizer """no""" +658 53 optimizer """adadelta""" +658 53 training_loop """owa""" +658 53 negative_sampler """basic""" +658 53 evaluator """rankbased""" +658 54 dataset """kinships""" +658 54 model """simple""" +658 54 loss """marginranking""" +658 54 regularizer """no""" +658 54 optimizer """adadelta""" +658 54 training_loop """owa""" +658 54 negative_sampler """basic""" +658 54 evaluator """rankbased""" +658 55 dataset """kinships""" +658 55 model """simple""" +658 55 loss """marginranking""" +658 55 regularizer """no""" +658 55 optimizer """adadelta""" +658 55 training_loop """owa""" +658 55 negative_sampler """basic""" +658 55 evaluator """rankbased""" +658 56 dataset """kinships""" +658 56 model """simple""" +658 56 loss """marginranking""" +658 56 regularizer """no""" +658 56 optimizer """adadelta""" +658 56 training_loop """owa""" +658 56 negative_sampler """basic""" +658 56 evaluator """rankbased""" +658 57 dataset """kinships""" +658 57 model """simple""" +658 57 loss """marginranking""" +658 57 regularizer """no""" +658 57 optimizer """adadelta""" +658 57 training_loop """owa""" +658 57 negative_sampler """basic""" +658 57 evaluator """rankbased""" +658 58 dataset """kinships""" +658 58 model """simple""" +658 58 loss """marginranking""" +658 58 regularizer """no""" +658 58 optimizer """adadelta""" +658 58 training_loop """owa""" +658 58 negative_sampler """basic""" +658 58 evaluator """rankbased""" +658 59 dataset """kinships""" +658 59 model """simple""" +658 59 loss """marginranking""" +658 59 regularizer """no""" +658 59 optimizer """adadelta""" +658 59 training_loop """owa""" +658 59 negative_sampler """basic""" +658 59 evaluator """rankbased""" +658 60 dataset """kinships""" +658 60 model """simple""" +658 60 loss """marginranking""" +658 60 regularizer """no""" +658 60 optimizer """adadelta""" +658 60 training_loop """owa""" +658 60 negative_sampler """basic""" +658 60 evaluator """rankbased""" +658 61 dataset """kinships""" +658 61 model """simple""" +658 61 loss """marginranking""" +658 61 regularizer """no""" +658 61 optimizer """adadelta""" +658 61 training_loop """owa""" +658 61 negative_sampler """basic""" +658 61 evaluator """rankbased""" +658 62 dataset """kinships""" +658 62 model """simple""" +658 62 loss """marginranking""" +658 62 regularizer """no""" +658 62 optimizer """adadelta""" +658 62 training_loop """owa""" +658 62 negative_sampler """basic""" +658 62 evaluator """rankbased""" +658 63 dataset """kinships""" +658 63 model """simple""" +658 63 loss """marginranking""" +658 63 regularizer """no""" +658 63 optimizer """adadelta""" +658 63 training_loop """owa""" +658 63 negative_sampler """basic""" +658 63 evaluator """rankbased""" +658 64 dataset """kinships""" +658 64 model """simple""" +658 64 loss """marginranking""" +658 64 regularizer """no""" +658 64 optimizer """adadelta""" +658 64 training_loop """owa""" +658 64 negative_sampler """basic""" +658 64 evaluator """rankbased""" +658 65 dataset """kinships""" +658 65 model """simple""" +658 65 loss """marginranking""" +658 65 regularizer """no""" +658 65 optimizer """adadelta""" +658 65 training_loop """owa""" +658 65 negative_sampler """basic""" +658 65 evaluator """rankbased""" +658 66 dataset """kinships""" +658 66 model """simple""" +658 66 loss """marginranking""" +658 66 regularizer """no""" +658 66 optimizer """adadelta""" +658 66 training_loop """owa""" +658 66 negative_sampler """basic""" +658 66 evaluator """rankbased""" +658 67 dataset """kinships""" +658 67 model """simple""" +658 67 loss """marginranking""" +658 67 regularizer """no""" +658 67 optimizer """adadelta""" +658 67 training_loop """owa""" +658 67 negative_sampler """basic""" +658 67 evaluator """rankbased""" +658 68 dataset """kinships""" +658 68 model """simple""" +658 68 loss """marginranking""" +658 68 regularizer """no""" +658 68 optimizer """adadelta""" +658 68 training_loop """owa""" +658 68 negative_sampler """basic""" +658 68 evaluator """rankbased""" +658 69 dataset """kinships""" +658 69 model """simple""" +658 69 loss """marginranking""" +658 69 regularizer """no""" +658 69 optimizer """adadelta""" +658 69 training_loop """owa""" +658 69 negative_sampler """basic""" +658 69 evaluator """rankbased""" +658 70 dataset """kinships""" +658 70 model """simple""" +658 70 loss """marginranking""" +658 70 regularizer """no""" +658 70 optimizer """adadelta""" +658 70 training_loop """owa""" +658 70 negative_sampler """basic""" +658 70 evaluator """rankbased""" +658 71 dataset """kinships""" +658 71 model """simple""" +658 71 loss """marginranking""" +658 71 regularizer """no""" +658 71 optimizer """adadelta""" +658 71 training_loop """owa""" +658 71 negative_sampler """basic""" +658 71 evaluator """rankbased""" +658 72 dataset """kinships""" +658 72 model """simple""" +658 72 loss """marginranking""" +658 72 regularizer """no""" +658 72 optimizer """adadelta""" +658 72 training_loop """owa""" +658 72 negative_sampler """basic""" +658 72 evaluator """rankbased""" +658 73 dataset """kinships""" +658 73 model """simple""" +658 73 loss """marginranking""" +658 73 regularizer """no""" +658 73 optimizer """adadelta""" +658 73 training_loop """owa""" +658 73 negative_sampler """basic""" +658 73 evaluator """rankbased""" +658 74 dataset """kinships""" +658 74 model """simple""" +658 74 loss """marginranking""" +658 74 regularizer """no""" +658 74 optimizer """adadelta""" +658 74 training_loop """owa""" +658 74 negative_sampler """basic""" +658 74 evaluator """rankbased""" +658 75 dataset """kinships""" +658 75 model """simple""" +658 75 loss """marginranking""" +658 75 regularizer """no""" +658 75 optimizer """adadelta""" +658 75 training_loop """owa""" +658 75 negative_sampler """basic""" +658 75 evaluator """rankbased""" +658 76 dataset """kinships""" +658 76 model """simple""" +658 76 loss """marginranking""" +658 76 regularizer """no""" +658 76 optimizer """adadelta""" +658 76 training_loop """owa""" +658 76 negative_sampler """basic""" +658 76 evaluator """rankbased""" +658 77 dataset """kinships""" +658 77 model """simple""" +658 77 loss """marginranking""" +658 77 regularizer """no""" +658 77 optimizer """adadelta""" +658 77 training_loop """owa""" +658 77 negative_sampler """basic""" +658 77 evaluator """rankbased""" +658 78 dataset """kinships""" +658 78 model """simple""" +658 78 loss """marginranking""" +658 78 regularizer """no""" +658 78 optimizer """adadelta""" +658 78 training_loop """owa""" +658 78 negative_sampler """basic""" +658 78 evaluator """rankbased""" +658 79 dataset """kinships""" +658 79 model """simple""" +658 79 loss """marginranking""" +658 79 regularizer """no""" +658 79 optimizer """adadelta""" +658 79 training_loop """owa""" +658 79 negative_sampler """basic""" +658 79 evaluator """rankbased""" +658 80 dataset """kinships""" +658 80 model """simple""" +658 80 loss """marginranking""" +658 80 regularizer """no""" +658 80 optimizer """adadelta""" +658 80 training_loop """owa""" +658 80 negative_sampler """basic""" +658 80 evaluator """rankbased""" +658 81 dataset """kinships""" +658 81 model """simple""" +658 81 loss """marginranking""" +658 81 regularizer """no""" +658 81 optimizer """adadelta""" +658 81 training_loop """owa""" +658 81 negative_sampler """basic""" +658 81 evaluator """rankbased""" +658 82 dataset """kinships""" +658 82 model """simple""" +658 82 loss """marginranking""" +658 82 regularizer """no""" +658 82 optimizer """adadelta""" +658 82 training_loop """owa""" +658 82 negative_sampler """basic""" +658 82 evaluator """rankbased""" +658 83 dataset """kinships""" +658 83 model """simple""" +658 83 loss """marginranking""" +658 83 regularizer """no""" +658 83 optimizer """adadelta""" +658 83 training_loop """owa""" +658 83 negative_sampler """basic""" +658 83 evaluator """rankbased""" +658 84 dataset """kinships""" +658 84 model """simple""" +658 84 loss """marginranking""" +658 84 regularizer """no""" +658 84 optimizer """adadelta""" +658 84 training_loop """owa""" +658 84 negative_sampler """basic""" +658 84 evaluator """rankbased""" +658 85 dataset """kinships""" +658 85 model """simple""" +658 85 loss """marginranking""" +658 85 regularizer """no""" +658 85 optimizer """adadelta""" +658 85 training_loop """owa""" +658 85 negative_sampler """basic""" +658 85 evaluator """rankbased""" +658 86 dataset """kinships""" +658 86 model """simple""" +658 86 loss """marginranking""" +658 86 regularizer """no""" +658 86 optimizer """adadelta""" +658 86 training_loop """owa""" +658 86 negative_sampler """basic""" +658 86 evaluator """rankbased""" +658 87 dataset """kinships""" +658 87 model """simple""" +658 87 loss """marginranking""" +658 87 regularizer """no""" +658 87 optimizer """adadelta""" +658 87 training_loop """owa""" +658 87 negative_sampler """basic""" +658 87 evaluator """rankbased""" +658 88 dataset """kinships""" +658 88 model """simple""" +658 88 loss """marginranking""" +658 88 regularizer """no""" +658 88 optimizer """adadelta""" +658 88 training_loop """owa""" +658 88 negative_sampler """basic""" +658 88 evaluator """rankbased""" +658 89 dataset """kinships""" +658 89 model """simple""" +658 89 loss """marginranking""" +658 89 regularizer """no""" +658 89 optimizer """adadelta""" +658 89 training_loop """owa""" +658 89 negative_sampler """basic""" +658 89 evaluator """rankbased""" +658 90 dataset """kinships""" +658 90 model """simple""" +658 90 loss """marginranking""" +658 90 regularizer """no""" +658 90 optimizer """adadelta""" +658 90 training_loop """owa""" +658 90 negative_sampler """basic""" +658 90 evaluator """rankbased""" +658 91 dataset """kinships""" +658 91 model """simple""" +658 91 loss """marginranking""" +658 91 regularizer """no""" +658 91 optimizer """adadelta""" +658 91 training_loop """owa""" +658 91 negative_sampler """basic""" +658 91 evaluator """rankbased""" +658 92 dataset """kinships""" +658 92 model """simple""" +658 92 loss """marginranking""" +658 92 regularizer """no""" +658 92 optimizer """adadelta""" +658 92 training_loop """owa""" +658 92 negative_sampler """basic""" +658 92 evaluator """rankbased""" +658 93 dataset """kinships""" +658 93 model """simple""" +658 93 loss """marginranking""" +658 93 regularizer """no""" +658 93 optimizer """adadelta""" +658 93 training_loop """owa""" +658 93 negative_sampler """basic""" +658 93 evaluator """rankbased""" +658 94 dataset """kinships""" +658 94 model """simple""" +658 94 loss """marginranking""" +658 94 regularizer """no""" +658 94 optimizer """adadelta""" +658 94 training_loop """owa""" +658 94 negative_sampler """basic""" +658 94 evaluator """rankbased""" +658 95 dataset """kinships""" +658 95 model """simple""" +658 95 loss """marginranking""" +658 95 regularizer """no""" +658 95 optimizer """adadelta""" +658 95 training_loop """owa""" +658 95 negative_sampler """basic""" +658 95 evaluator """rankbased""" +658 96 dataset """kinships""" +658 96 model """simple""" +658 96 loss """marginranking""" +658 96 regularizer """no""" +658 96 optimizer """adadelta""" +658 96 training_loop """owa""" +658 96 negative_sampler """basic""" +658 96 evaluator """rankbased""" +658 97 dataset """kinships""" +658 97 model """simple""" +658 97 loss """marginranking""" +658 97 regularizer """no""" +658 97 optimizer """adadelta""" +658 97 training_loop """owa""" +658 97 negative_sampler """basic""" +658 97 evaluator """rankbased""" +658 98 dataset """kinships""" +658 98 model """simple""" +658 98 loss """marginranking""" +658 98 regularizer """no""" +658 98 optimizer """adadelta""" +658 98 training_loop """owa""" +658 98 negative_sampler """basic""" +658 98 evaluator """rankbased""" +658 99 dataset """kinships""" +658 99 model """simple""" +658 99 loss """marginranking""" +658 99 regularizer """no""" +658 99 optimizer """adadelta""" +658 99 training_loop """owa""" +658 99 negative_sampler """basic""" +658 99 evaluator """rankbased""" +658 100 dataset """kinships""" +658 100 model """simple""" +658 100 loss """marginranking""" +658 100 regularizer """no""" +658 100 optimizer """adadelta""" +658 100 training_loop """owa""" +658 100 negative_sampler """basic""" +658 100 evaluator """rankbased""" +659 1 model.embedding_dim 0.0 +659 1 loss.margin 11.90404632711545 +659 1 loss.adversarial_temperature 0.7701895902979209 +659 1 negative_sampler.num_negs_per_pos 45.0 +659 1 training.batch_size 1.0 +659 2 model.embedding_dim 2.0 +659 2 loss.margin 1.5723797841402722 +659 2 loss.adversarial_temperature 0.7767537676275454 +659 2 negative_sampler.num_negs_per_pos 29.0 +659 2 training.batch_size 2.0 +659 3 model.embedding_dim 0.0 +659 3 loss.margin 6.33323071678538 +659 3 loss.adversarial_temperature 0.11597794124784952 +659 3 negative_sampler.num_negs_per_pos 51.0 +659 3 training.batch_size 1.0 +659 4 model.embedding_dim 1.0 +659 4 loss.margin 28.238339309866163 +659 4 loss.adversarial_temperature 0.652630573000761 +659 4 negative_sampler.num_negs_per_pos 41.0 +659 4 training.batch_size 1.0 +659 5 model.embedding_dim 2.0 +659 5 loss.margin 16.66453111678348 +659 5 loss.adversarial_temperature 0.7031718047950849 +659 5 negative_sampler.num_negs_per_pos 0.0 +659 5 training.batch_size 0.0 +659 6 model.embedding_dim 0.0 +659 6 loss.margin 17.534696987488637 +659 6 loss.adversarial_temperature 0.15978790810443955 +659 6 negative_sampler.num_negs_per_pos 5.0 +659 6 training.batch_size 1.0 +659 7 model.embedding_dim 0.0 +659 7 loss.margin 28.056497616691406 +659 7 loss.adversarial_temperature 0.9235776097016988 +659 7 negative_sampler.num_negs_per_pos 78.0 +659 7 training.batch_size 1.0 +659 8 model.embedding_dim 1.0 +659 8 loss.margin 15.934082127136833 +659 8 loss.adversarial_temperature 0.8158277680224986 +659 8 negative_sampler.num_negs_per_pos 3.0 +659 8 training.batch_size 1.0 +659 9 model.embedding_dim 0.0 +659 9 loss.margin 9.597485246145608 +659 9 loss.adversarial_temperature 0.18012664606185666 +659 9 negative_sampler.num_negs_per_pos 96.0 +659 9 training.batch_size 0.0 +659 10 model.embedding_dim 1.0 +659 10 loss.margin 7.381572126842151 +659 10 loss.adversarial_temperature 0.2460873734674358 +659 10 negative_sampler.num_negs_per_pos 41.0 +659 10 training.batch_size 0.0 +659 11 model.embedding_dim 0.0 +659 11 loss.margin 16.540943338499524 +659 11 loss.adversarial_temperature 0.25548032870931325 +659 11 negative_sampler.num_negs_per_pos 62.0 +659 11 training.batch_size 2.0 +659 12 model.embedding_dim 0.0 +659 12 loss.margin 16.84309237079567 +659 12 loss.adversarial_temperature 0.17594794217785778 +659 12 negative_sampler.num_negs_per_pos 3.0 +659 12 training.batch_size 1.0 +659 13 model.embedding_dim 0.0 +659 13 loss.margin 10.286660999876245 +659 13 loss.adversarial_temperature 0.7463420060169077 +659 13 negative_sampler.num_negs_per_pos 54.0 +659 13 training.batch_size 0.0 +659 14 model.embedding_dim 1.0 +659 14 loss.margin 16.77125152493295 +659 14 loss.adversarial_temperature 0.15436195777173983 +659 14 negative_sampler.num_negs_per_pos 55.0 +659 14 training.batch_size 0.0 +659 15 model.embedding_dim 0.0 +659 15 loss.margin 2.4926347413689474 +659 15 loss.adversarial_temperature 0.6337979847141167 +659 15 negative_sampler.num_negs_per_pos 54.0 +659 15 training.batch_size 2.0 +659 16 model.embedding_dim 2.0 +659 16 loss.margin 12.443894105304363 +659 16 loss.adversarial_temperature 0.7221011862455545 +659 16 negative_sampler.num_negs_per_pos 20.0 +659 16 training.batch_size 1.0 +659 17 model.embedding_dim 0.0 +659 17 loss.margin 1.2336929519642992 +659 17 loss.adversarial_temperature 0.21828129460760012 +659 17 negative_sampler.num_negs_per_pos 87.0 +659 17 training.batch_size 2.0 +659 18 model.embedding_dim 2.0 +659 18 loss.margin 7.885247419835643 +659 18 loss.adversarial_temperature 0.14368310628418896 +659 18 negative_sampler.num_negs_per_pos 32.0 +659 18 training.batch_size 2.0 +659 19 model.embedding_dim 0.0 +659 19 loss.margin 22.236908317646044 +659 19 loss.adversarial_temperature 0.6456031893413519 +659 19 negative_sampler.num_negs_per_pos 94.0 +659 19 training.batch_size 1.0 +659 20 model.embedding_dim 2.0 +659 20 loss.margin 9.430226112131713 +659 20 loss.adversarial_temperature 0.5248767083136495 +659 20 negative_sampler.num_negs_per_pos 79.0 +659 20 training.batch_size 0.0 +659 21 model.embedding_dim 2.0 +659 21 loss.margin 6.0993097828924645 +659 21 loss.adversarial_temperature 0.9363522525712559 +659 21 negative_sampler.num_negs_per_pos 12.0 +659 21 training.batch_size 1.0 +659 22 model.embedding_dim 0.0 +659 22 loss.margin 28.90531084802518 +659 22 loss.adversarial_temperature 0.4529832168998903 +659 22 negative_sampler.num_negs_per_pos 21.0 +659 22 training.batch_size 1.0 +659 23 model.embedding_dim 1.0 +659 23 loss.margin 28.19931796416118 +659 23 loss.adversarial_temperature 0.9215621523253189 +659 23 negative_sampler.num_negs_per_pos 97.0 +659 23 training.batch_size 0.0 +659 24 model.embedding_dim 2.0 +659 24 loss.margin 22.704796454133568 +659 24 loss.adversarial_temperature 0.4855607130350992 +659 24 negative_sampler.num_negs_per_pos 57.0 +659 24 training.batch_size 0.0 +659 25 model.embedding_dim 2.0 +659 25 loss.margin 29.075707753940392 +659 25 loss.adversarial_temperature 0.5117484151335308 +659 25 negative_sampler.num_negs_per_pos 32.0 +659 25 training.batch_size 1.0 +659 26 model.embedding_dim 0.0 +659 26 loss.margin 8.091095000493898 +659 26 loss.adversarial_temperature 0.6174294934559036 +659 26 negative_sampler.num_negs_per_pos 92.0 +659 26 training.batch_size 1.0 +659 27 model.embedding_dim 0.0 +659 27 loss.margin 11.85652956563078 +659 27 loss.adversarial_temperature 0.7402819825439536 +659 27 negative_sampler.num_negs_per_pos 71.0 +659 27 training.batch_size 1.0 +659 28 model.embedding_dim 2.0 +659 28 loss.margin 1.1058407346887462 +659 28 loss.adversarial_temperature 0.8329879988948029 +659 28 negative_sampler.num_negs_per_pos 99.0 +659 28 training.batch_size 1.0 +659 29 model.embedding_dim 2.0 +659 29 loss.margin 4.833075966230276 +659 29 loss.adversarial_temperature 0.13733495188393857 +659 29 negative_sampler.num_negs_per_pos 66.0 +659 29 training.batch_size 1.0 +659 30 model.embedding_dim 1.0 +659 30 loss.margin 21.090633615319046 +659 30 loss.adversarial_temperature 0.13380046630757966 +659 30 negative_sampler.num_negs_per_pos 42.0 +659 30 training.batch_size 1.0 +659 31 model.embedding_dim 2.0 +659 31 loss.margin 13.735426526976916 +659 31 loss.adversarial_temperature 0.8772282004044157 +659 31 negative_sampler.num_negs_per_pos 93.0 +659 31 training.batch_size 1.0 +659 32 model.embedding_dim 1.0 +659 32 loss.margin 22.0629697293188 +659 32 loss.adversarial_temperature 0.6588412980861326 +659 32 negative_sampler.num_negs_per_pos 31.0 +659 32 training.batch_size 2.0 +659 33 model.embedding_dim 1.0 +659 33 loss.margin 22.362444407969317 +659 33 loss.adversarial_temperature 0.7344362230973093 +659 33 negative_sampler.num_negs_per_pos 27.0 +659 33 training.batch_size 2.0 +659 34 model.embedding_dim 2.0 +659 34 loss.margin 11.058670266659245 +659 34 loss.adversarial_temperature 0.6308290359517595 +659 34 negative_sampler.num_negs_per_pos 68.0 +659 34 training.batch_size 1.0 +659 35 model.embedding_dim 1.0 +659 35 loss.margin 10.197125688635756 +659 35 loss.adversarial_temperature 0.9431615349148981 +659 35 negative_sampler.num_negs_per_pos 81.0 +659 35 training.batch_size 0.0 +659 36 model.embedding_dim 1.0 +659 36 loss.margin 19.844820947977325 +659 36 loss.adversarial_temperature 0.8592825731918988 +659 36 negative_sampler.num_negs_per_pos 95.0 +659 36 training.batch_size 0.0 +659 37 model.embedding_dim 1.0 +659 37 loss.margin 28.77465521715849 +659 37 loss.adversarial_temperature 0.6641714669491625 +659 37 negative_sampler.num_negs_per_pos 6.0 +659 37 training.batch_size 0.0 +659 38 model.embedding_dim 0.0 +659 38 loss.margin 10.0156662846963 +659 38 loss.adversarial_temperature 0.35052587507790756 +659 38 negative_sampler.num_negs_per_pos 41.0 +659 38 training.batch_size 0.0 +659 39 model.embedding_dim 0.0 +659 39 loss.margin 21.07178772322983 +659 39 loss.adversarial_temperature 0.31901371297867015 +659 39 negative_sampler.num_negs_per_pos 18.0 +659 39 training.batch_size 1.0 +659 40 model.embedding_dim 2.0 +659 40 loss.margin 28.64864674099976 +659 40 loss.adversarial_temperature 0.21714026230504413 +659 40 negative_sampler.num_negs_per_pos 66.0 +659 40 training.batch_size 2.0 +659 41 model.embedding_dim 0.0 +659 41 loss.margin 12.772937589635035 +659 41 loss.adversarial_temperature 0.3646239734071114 +659 41 negative_sampler.num_negs_per_pos 32.0 +659 41 training.batch_size 1.0 +659 42 model.embedding_dim 0.0 +659 42 loss.margin 3.2624491865365077 +659 42 loss.adversarial_temperature 0.46848070613947834 +659 42 negative_sampler.num_negs_per_pos 76.0 +659 42 training.batch_size 1.0 +659 43 model.embedding_dim 0.0 +659 43 loss.margin 3.3430169828780634 +659 43 loss.adversarial_temperature 0.8645757241660015 +659 43 negative_sampler.num_negs_per_pos 88.0 +659 43 training.batch_size 1.0 +659 44 model.embedding_dim 0.0 +659 44 loss.margin 15.874270836406602 +659 44 loss.adversarial_temperature 0.5719989187458902 +659 44 negative_sampler.num_negs_per_pos 23.0 +659 44 training.batch_size 0.0 +659 45 model.embedding_dim 0.0 +659 45 loss.margin 18.162258859383545 +659 45 loss.adversarial_temperature 0.9085068283844732 +659 45 negative_sampler.num_negs_per_pos 90.0 +659 45 training.batch_size 0.0 +659 46 model.embedding_dim 0.0 +659 46 loss.margin 18.642257404008664 +659 46 loss.adversarial_temperature 0.9210674251088982 +659 46 negative_sampler.num_negs_per_pos 82.0 +659 46 training.batch_size 1.0 +659 47 model.embedding_dim 1.0 +659 47 loss.margin 17.078288607889412 +659 47 loss.adversarial_temperature 0.6949219289939503 +659 47 negative_sampler.num_negs_per_pos 99.0 +659 47 training.batch_size 0.0 +659 48 model.embedding_dim 1.0 +659 48 loss.margin 23.433650968740313 +659 48 loss.adversarial_temperature 0.5060747555611442 +659 48 negative_sampler.num_negs_per_pos 17.0 +659 48 training.batch_size 0.0 +659 49 model.embedding_dim 1.0 +659 49 loss.margin 16.420578265256236 +659 49 loss.adversarial_temperature 0.89893972026417 +659 49 negative_sampler.num_negs_per_pos 91.0 +659 49 training.batch_size 0.0 +659 50 model.embedding_dim 0.0 +659 50 loss.margin 27.648855773010276 +659 50 loss.adversarial_temperature 0.812276830680273 +659 50 negative_sampler.num_negs_per_pos 90.0 +659 50 training.batch_size 0.0 +659 51 model.embedding_dim 2.0 +659 51 loss.margin 14.27558632311663 +659 51 loss.adversarial_temperature 0.258963006514845 +659 51 negative_sampler.num_negs_per_pos 85.0 +659 51 training.batch_size 1.0 +659 52 model.embedding_dim 0.0 +659 52 loss.margin 5.628687067279144 +659 52 loss.adversarial_temperature 0.7111547209655298 +659 52 negative_sampler.num_negs_per_pos 95.0 +659 52 training.batch_size 2.0 +659 53 model.embedding_dim 2.0 +659 53 loss.margin 11.351187213732342 +659 53 loss.adversarial_temperature 0.9303834546766552 +659 53 negative_sampler.num_negs_per_pos 21.0 +659 53 training.batch_size 1.0 +659 54 model.embedding_dim 2.0 +659 54 loss.margin 17.97439501458511 +659 54 loss.adversarial_temperature 0.30958744402772176 +659 54 negative_sampler.num_negs_per_pos 51.0 +659 54 training.batch_size 2.0 +659 55 model.embedding_dim 0.0 +659 55 loss.margin 14.8150218807556 +659 55 loss.adversarial_temperature 0.7160889203385383 +659 55 negative_sampler.num_negs_per_pos 58.0 +659 55 training.batch_size 2.0 +659 56 model.embedding_dim 1.0 +659 56 loss.margin 7.885976907432265 +659 56 loss.adversarial_temperature 0.619311517492599 +659 56 negative_sampler.num_negs_per_pos 51.0 +659 56 training.batch_size 2.0 +659 57 model.embedding_dim 0.0 +659 57 loss.margin 23.36350957644702 +659 57 loss.adversarial_temperature 0.23183502462947045 +659 57 negative_sampler.num_negs_per_pos 68.0 +659 57 training.batch_size 1.0 +659 58 model.embedding_dim 0.0 +659 58 loss.margin 11.711216334559753 +659 58 loss.adversarial_temperature 0.573967317685139 +659 58 negative_sampler.num_negs_per_pos 96.0 +659 58 training.batch_size 2.0 +659 59 model.embedding_dim 2.0 +659 59 loss.margin 11.393401743970932 +659 59 loss.adversarial_temperature 0.8905649326015448 +659 59 negative_sampler.num_negs_per_pos 93.0 +659 59 training.batch_size 1.0 +659 60 model.embedding_dim 2.0 +659 60 loss.margin 10.327843865154298 +659 60 loss.adversarial_temperature 0.2232732349232261 +659 60 negative_sampler.num_negs_per_pos 86.0 +659 60 training.batch_size 2.0 +659 61 model.embedding_dim 1.0 +659 61 loss.margin 20.04974705882851 +659 61 loss.adversarial_temperature 0.38830488802955454 +659 61 negative_sampler.num_negs_per_pos 80.0 +659 61 training.batch_size 0.0 +659 62 model.embedding_dim 0.0 +659 62 loss.margin 25.643451700402167 +659 62 loss.adversarial_temperature 0.7344095240738208 +659 62 negative_sampler.num_negs_per_pos 19.0 +659 62 training.batch_size 1.0 +659 63 model.embedding_dim 1.0 +659 63 loss.margin 11.139642130727218 +659 63 loss.adversarial_temperature 0.9759247407912564 +659 63 negative_sampler.num_negs_per_pos 63.0 +659 63 training.batch_size 2.0 +659 64 model.embedding_dim 1.0 +659 64 loss.margin 19.818426187471513 +659 64 loss.adversarial_temperature 0.11783140455859015 +659 64 negative_sampler.num_negs_per_pos 28.0 +659 64 training.batch_size 1.0 +659 65 model.embedding_dim 2.0 +659 65 loss.margin 14.693774654932636 +659 65 loss.adversarial_temperature 0.7308362672560388 +659 65 negative_sampler.num_negs_per_pos 29.0 +659 65 training.batch_size 2.0 +659 66 model.embedding_dim 1.0 +659 66 loss.margin 1.6711590898302642 +659 66 loss.adversarial_temperature 0.5700737009355179 +659 66 negative_sampler.num_negs_per_pos 54.0 +659 66 training.batch_size 2.0 +659 67 model.embedding_dim 1.0 +659 67 loss.margin 1.6886814151308847 +659 67 loss.adversarial_temperature 0.9297924644182457 +659 67 negative_sampler.num_negs_per_pos 12.0 +659 67 training.batch_size 0.0 +659 68 model.embedding_dim 2.0 +659 68 loss.margin 22.667307527423052 +659 68 loss.adversarial_temperature 0.47556539493658767 +659 68 negative_sampler.num_negs_per_pos 2.0 +659 68 training.batch_size 1.0 +659 69 model.embedding_dim 0.0 +659 69 loss.margin 15.710733105344568 +659 69 loss.adversarial_temperature 0.8584626591281074 +659 69 negative_sampler.num_negs_per_pos 2.0 +659 69 training.batch_size 0.0 +659 70 model.embedding_dim 0.0 +659 70 loss.margin 8.801158295837926 +659 70 loss.adversarial_temperature 0.904931437800176 +659 70 negative_sampler.num_negs_per_pos 71.0 +659 70 training.batch_size 1.0 +659 71 model.embedding_dim 1.0 +659 71 loss.margin 1.0262688406266458 +659 71 loss.adversarial_temperature 0.5480832876411301 +659 71 negative_sampler.num_negs_per_pos 21.0 +659 71 training.batch_size 0.0 +659 72 model.embedding_dim 0.0 +659 72 loss.margin 22.65251473994373 +659 72 loss.adversarial_temperature 0.949462862815756 +659 72 negative_sampler.num_negs_per_pos 54.0 +659 72 training.batch_size 0.0 +659 73 model.embedding_dim 2.0 +659 73 loss.margin 4.901150394826852 +659 73 loss.adversarial_temperature 0.4017033046766103 +659 73 negative_sampler.num_negs_per_pos 41.0 +659 73 training.batch_size 0.0 +659 74 model.embedding_dim 2.0 +659 74 loss.margin 25.435206468811113 +659 74 loss.adversarial_temperature 0.8682278993848547 +659 74 negative_sampler.num_negs_per_pos 77.0 +659 74 training.batch_size 0.0 +659 75 model.embedding_dim 0.0 +659 75 loss.margin 10.10506576922931 +659 75 loss.adversarial_temperature 0.8882201741908868 +659 75 negative_sampler.num_negs_per_pos 51.0 +659 75 training.batch_size 1.0 +659 76 model.embedding_dim 1.0 +659 76 loss.margin 9.681141110249381 +659 76 loss.adversarial_temperature 0.19874459483771612 +659 76 negative_sampler.num_negs_per_pos 96.0 +659 76 training.batch_size 2.0 +659 77 model.embedding_dim 0.0 +659 77 loss.margin 9.57492451697076 +659 77 loss.adversarial_temperature 0.5720534155984867 +659 77 negative_sampler.num_negs_per_pos 65.0 +659 77 training.batch_size 1.0 +659 78 model.embedding_dim 0.0 +659 78 loss.margin 11.500147950454496 +659 78 loss.adversarial_temperature 0.5003166576902879 +659 78 negative_sampler.num_negs_per_pos 54.0 +659 78 training.batch_size 2.0 +659 79 model.embedding_dim 1.0 +659 79 loss.margin 29.059868768554097 +659 79 loss.adversarial_temperature 0.19730241107283214 +659 79 negative_sampler.num_negs_per_pos 85.0 +659 79 training.batch_size 0.0 +659 80 model.embedding_dim 1.0 +659 80 loss.margin 14.421549754310826 +659 80 loss.adversarial_temperature 0.5680394571610538 +659 80 negative_sampler.num_negs_per_pos 76.0 +659 80 training.batch_size 2.0 +659 81 model.embedding_dim 0.0 +659 81 loss.margin 17.750690863627195 +659 81 loss.adversarial_temperature 0.704398116273417 +659 81 negative_sampler.num_negs_per_pos 54.0 +659 81 training.batch_size 0.0 +659 82 model.embedding_dim 0.0 +659 82 loss.margin 14.616592412642019 +659 82 loss.adversarial_temperature 0.4377009224813213 +659 82 negative_sampler.num_negs_per_pos 39.0 +659 82 training.batch_size 2.0 +659 83 model.embedding_dim 2.0 +659 83 loss.margin 17.828633727232287 +659 83 loss.adversarial_temperature 0.9382548743079319 +659 83 negative_sampler.num_negs_per_pos 21.0 +659 83 training.batch_size 0.0 +659 84 model.embedding_dim 2.0 +659 84 loss.margin 15.69969689537273 +659 84 loss.adversarial_temperature 0.13112096121976022 +659 84 negative_sampler.num_negs_per_pos 26.0 +659 84 training.batch_size 1.0 +659 85 model.embedding_dim 2.0 +659 85 loss.margin 1.4213177472887928 +659 85 loss.adversarial_temperature 0.8762605273655116 +659 85 negative_sampler.num_negs_per_pos 89.0 +659 85 training.batch_size 2.0 +659 86 model.embedding_dim 2.0 +659 86 loss.margin 16.183241801995703 +659 86 loss.adversarial_temperature 0.17567925835465256 +659 86 negative_sampler.num_negs_per_pos 21.0 +659 86 training.batch_size 1.0 +659 87 model.embedding_dim 0.0 +659 87 loss.margin 6.844360859996792 +659 87 loss.adversarial_temperature 0.8214439798912286 +659 87 negative_sampler.num_negs_per_pos 55.0 +659 87 training.batch_size 2.0 +659 88 model.embedding_dim 1.0 +659 88 loss.margin 2.2622378776675944 +659 88 loss.adversarial_temperature 0.48025973370994224 +659 88 negative_sampler.num_negs_per_pos 53.0 +659 88 training.batch_size 2.0 +659 89 model.embedding_dim 2.0 +659 89 loss.margin 11.92968176044721 +659 89 loss.adversarial_temperature 0.23452780201761494 +659 89 negative_sampler.num_negs_per_pos 75.0 +659 89 training.batch_size 2.0 +659 90 model.embedding_dim 1.0 +659 90 loss.margin 4.098208663761417 +659 90 loss.adversarial_temperature 0.9299046361843094 +659 90 negative_sampler.num_negs_per_pos 12.0 +659 90 training.batch_size 1.0 +659 91 model.embedding_dim 0.0 +659 91 loss.margin 3.6722723281259997 +659 91 loss.adversarial_temperature 0.6330533136013582 +659 91 negative_sampler.num_negs_per_pos 30.0 +659 91 training.batch_size 2.0 +659 92 model.embedding_dim 2.0 +659 92 loss.margin 4.364917623744953 +659 92 loss.adversarial_temperature 0.36835545414273574 +659 92 negative_sampler.num_negs_per_pos 31.0 +659 92 training.batch_size 1.0 +659 93 model.embedding_dim 1.0 +659 93 loss.margin 3.4681939888913274 +659 93 loss.adversarial_temperature 0.3230062538137239 +659 93 negative_sampler.num_negs_per_pos 7.0 +659 93 training.batch_size 2.0 +659 94 model.embedding_dim 1.0 +659 94 loss.margin 7.305052050270935 +659 94 loss.adversarial_temperature 0.6404268758084927 +659 94 negative_sampler.num_negs_per_pos 31.0 +659 94 training.batch_size 0.0 +659 95 model.embedding_dim 0.0 +659 95 loss.margin 1.0484440267850916 +659 95 loss.adversarial_temperature 0.7976733501383856 +659 95 negative_sampler.num_negs_per_pos 21.0 +659 95 training.batch_size 2.0 +659 96 model.embedding_dim 1.0 +659 96 loss.margin 2.3529058223103596 +659 96 loss.adversarial_temperature 0.32404678605722625 +659 96 negative_sampler.num_negs_per_pos 99.0 +659 96 training.batch_size 0.0 +659 97 model.embedding_dim 0.0 +659 97 loss.margin 23.34874273179913 +659 97 loss.adversarial_temperature 0.5452466809934724 +659 97 negative_sampler.num_negs_per_pos 53.0 +659 97 training.batch_size 2.0 +659 98 model.embedding_dim 1.0 +659 98 loss.margin 18.50322354467716 +659 98 loss.adversarial_temperature 0.5824551657903673 +659 98 negative_sampler.num_negs_per_pos 55.0 +659 98 training.batch_size 2.0 +659 99 model.embedding_dim 0.0 +659 99 loss.margin 8.206877707091692 +659 99 loss.adversarial_temperature 0.4532780227142752 +659 99 negative_sampler.num_negs_per_pos 60.0 +659 99 training.batch_size 1.0 +659 100 model.embedding_dim 1.0 +659 100 loss.margin 25.545671132603598 +659 100 loss.adversarial_temperature 0.18140261855663992 +659 100 negative_sampler.num_negs_per_pos 70.0 +659 100 training.batch_size 2.0 +659 1 dataset """kinships""" +659 1 model """simple""" +659 1 loss """nssa""" +659 1 regularizer """no""" +659 1 optimizer """adadelta""" +659 1 training_loop """owa""" +659 1 negative_sampler """basic""" +659 1 evaluator """rankbased""" +659 2 dataset """kinships""" +659 2 model """simple""" +659 2 loss """nssa""" +659 2 regularizer """no""" +659 2 optimizer """adadelta""" +659 2 training_loop """owa""" +659 2 negative_sampler """basic""" +659 2 evaluator """rankbased""" +659 3 dataset """kinships""" +659 3 model """simple""" +659 3 loss """nssa""" +659 3 regularizer """no""" +659 3 optimizer """adadelta""" +659 3 training_loop """owa""" +659 3 negative_sampler """basic""" +659 3 evaluator """rankbased""" +659 4 dataset """kinships""" +659 4 model """simple""" +659 4 loss """nssa""" +659 4 regularizer """no""" +659 4 optimizer """adadelta""" +659 4 training_loop """owa""" +659 4 negative_sampler """basic""" +659 4 evaluator """rankbased""" +659 5 dataset """kinships""" +659 5 model """simple""" +659 5 loss """nssa""" +659 5 regularizer """no""" +659 5 optimizer """adadelta""" +659 5 training_loop """owa""" +659 5 negative_sampler """basic""" +659 5 evaluator """rankbased""" +659 6 dataset """kinships""" +659 6 model """simple""" +659 6 loss """nssa""" +659 6 regularizer """no""" +659 6 optimizer """adadelta""" +659 6 training_loop """owa""" +659 6 negative_sampler """basic""" +659 6 evaluator """rankbased""" +659 7 dataset """kinships""" +659 7 model """simple""" +659 7 loss """nssa""" +659 7 regularizer """no""" +659 7 optimizer """adadelta""" +659 7 training_loop """owa""" +659 7 negative_sampler """basic""" +659 7 evaluator """rankbased""" +659 8 dataset """kinships""" +659 8 model """simple""" +659 8 loss """nssa""" +659 8 regularizer """no""" +659 8 optimizer """adadelta""" +659 8 training_loop """owa""" +659 8 negative_sampler """basic""" +659 8 evaluator """rankbased""" +659 9 dataset """kinships""" +659 9 model """simple""" +659 9 loss """nssa""" +659 9 regularizer """no""" +659 9 optimizer """adadelta""" +659 9 training_loop """owa""" +659 9 negative_sampler """basic""" +659 9 evaluator """rankbased""" +659 10 dataset """kinships""" +659 10 model """simple""" +659 10 loss """nssa""" +659 10 regularizer """no""" +659 10 optimizer """adadelta""" +659 10 training_loop """owa""" +659 10 negative_sampler """basic""" +659 10 evaluator """rankbased""" +659 11 dataset """kinships""" +659 11 model """simple""" +659 11 loss """nssa""" +659 11 regularizer """no""" +659 11 optimizer """adadelta""" +659 11 training_loop """owa""" +659 11 negative_sampler """basic""" +659 11 evaluator """rankbased""" +659 12 dataset """kinships""" +659 12 model """simple""" +659 12 loss """nssa""" +659 12 regularizer """no""" +659 12 optimizer """adadelta""" +659 12 training_loop """owa""" +659 12 negative_sampler """basic""" +659 12 evaluator """rankbased""" +659 13 dataset """kinships""" +659 13 model """simple""" +659 13 loss """nssa""" +659 13 regularizer """no""" +659 13 optimizer """adadelta""" +659 13 training_loop """owa""" +659 13 negative_sampler """basic""" +659 13 evaluator """rankbased""" +659 14 dataset """kinships""" +659 14 model """simple""" +659 14 loss """nssa""" +659 14 regularizer """no""" +659 14 optimizer """adadelta""" +659 14 training_loop """owa""" +659 14 negative_sampler """basic""" +659 14 evaluator """rankbased""" +659 15 dataset """kinships""" +659 15 model """simple""" +659 15 loss """nssa""" +659 15 regularizer """no""" +659 15 optimizer """adadelta""" +659 15 training_loop """owa""" +659 15 negative_sampler """basic""" +659 15 evaluator """rankbased""" +659 16 dataset """kinships""" +659 16 model """simple""" +659 16 loss """nssa""" +659 16 regularizer """no""" +659 16 optimizer """adadelta""" +659 16 training_loop """owa""" +659 16 negative_sampler """basic""" +659 16 evaluator """rankbased""" +659 17 dataset """kinships""" +659 17 model """simple""" +659 17 loss """nssa""" +659 17 regularizer """no""" +659 17 optimizer """adadelta""" +659 17 training_loop """owa""" +659 17 negative_sampler """basic""" +659 17 evaluator """rankbased""" +659 18 dataset """kinships""" +659 18 model """simple""" +659 18 loss """nssa""" +659 18 regularizer """no""" +659 18 optimizer """adadelta""" +659 18 training_loop """owa""" +659 18 negative_sampler """basic""" +659 18 evaluator """rankbased""" +659 19 dataset """kinships""" +659 19 model """simple""" +659 19 loss """nssa""" +659 19 regularizer """no""" +659 19 optimizer """adadelta""" +659 19 training_loop """owa""" +659 19 negative_sampler """basic""" +659 19 evaluator """rankbased""" +659 20 dataset """kinships""" +659 20 model """simple""" +659 20 loss """nssa""" +659 20 regularizer """no""" +659 20 optimizer """adadelta""" +659 20 training_loop """owa""" +659 20 negative_sampler """basic""" +659 20 evaluator """rankbased""" +659 21 dataset """kinships""" +659 21 model """simple""" +659 21 loss """nssa""" +659 21 regularizer """no""" +659 21 optimizer """adadelta""" +659 21 training_loop """owa""" +659 21 negative_sampler """basic""" +659 21 evaluator """rankbased""" +659 22 dataset """kinships""" +659 22 model """simple""" +659 22 loss """nssa""" +659 22 regularizer """no""" +659 22 optimizer """adadelta""" +659 22 training_loop """owa""" +659 22 negative_sampler """basic""" +659 22 evaluator """rankbased""" +659 23 dataset """kinships""" +659 23 model """simple""" +659 23 loss """nssa""" +659 23 regularizer """no""" +659 23 optimizer """adadelta""" +659 23 training_loop """owa""" +659 23 negative_sampler """basic""" +659 23 evaluator """rankbased""" +659 24 dataset """kinships""" +659 24 model """simple""" +659 24 loss """nssa""" +659 24 regularizer """no""" +659 24 optimizer """adadelta""" +659 24 training_loop """owa""" +659 24 negative_sampler """basic""" +659 24 evaluator """rankbased""" +659 25 dataset """kinships""" +659 25 model """simple""" +659 25 loss """nssa""" +659 25 regularizer """no""" +659 25 optimizer """adadelta""" +659 25 training_loop """owa""" +659 25 negative_sampler """basic""" +659 25 evaluator """rankbased""" +659 26 dataset """kinships""" +659 26 model """simple""" +659 26 loss """nssa""" +659 26 regularizer """no""" +659 26 optimizer """adadelta""" +659 26 training_loop """owa""" +659 26 negative_sampler """basic""" +659 26 evaluator """rankbased""" +659 27 dataset """kinships""" +659 27 model """simple""" +659 27 loss """nssa""" +659 27 regularizer """no""" +659 27 optimizer """adadelta""" +659 27 training_loop """owa""" +659 27 negative_sampler """basic""" +659 27 evaluator """rankbased""" +659 28 dataset """kinships""" +659 28 model """simple""" +659 28 loss """nssa""" +659 28 regularizer """no""" +659 28 optimizer """adadelta""" +659 28 training_loop """owa""" +659 28 negative_sampler """basic""" +659 28 evaluator """rankbased""" +659 29 dataset """kinships""" +659 29 model """simple""" +659 29 loss """nssa""" +659 29 regularizer """no""" +659 29 optimizer """adadelta""" +659 29 training_loop """owa""" +659 29 negative_sampler """basic""" +659 29 evaluator """rankbased""" +659 30 dataset """kinships""" +659 30 model """simple""" +659 30 loss """nssa""" +659 30 regularizer """no""" +659 30 optimizer """adadelta""" +659 30 training_loop """owa""" +659 30 negative_sampler """basic""" +659 30 evaluator """rankbased""" +659 31 dataset """kinships""" +659 31 model """simple""" +659 31 loss """nssa""" +659 31 regularizer """no""" +659 31 optimizer """adadelta""" +659 31 training_loop """owa""" +659 31 negative_sampler """basic""" +659 31 evaluator """rankbased""" +659 32 dataset """kinships""" +659 32 model """simple""" +659 32 loss """nssa""" +659 32 regularizer """no""" +659 32 optimizer """adadelta""" +659 32 training_loop """owa""" +659 32 negative_sampler """basic""" +659 32 evaluator """rankbased""" +659 33 dataset """kinships""" +659 33 model """simple""" +659 33 loss """nssa""" +659 33 regularizer """no""" +659 33 optimizer """adadelta""" +659 33 training_loop """owa""" +659 33 negative_sampler """basic""" +659 33 evaluator """rankbased""" +659 34 dataset """kinships""" +659 34 model """simple""" +659 34 loss """nssa""" +659 34 regularizer """no""" +659 34 optimizer """adadelta""" +659 34 training_loop """owa""" +659 34 negative_sampler """basic""" +659 34 evaluator """rankbased""" +659 35 dataset """kinships""" +659 35 model """simple""" +659 35 loss """nssa""" +659 35 regularizer """no""" +659 35 optimizer """adadelta""" +659 35 training_loop """owa""" +659 35 negative_sampler """basic""" +659 35 evaluator """rankbased""" +659 36 dataset """kinships""" +659 36 model """simple""" +659 36 loss """nssa""" +659 36 regularizer """no""" +659 36 optimizer """adadelta""" +659 36 training_loop """owa""" +659 36 negative_sampler """basic""" +659 36 evaluator """rankbased""" +659 37 dataset """kinships""" +659 37 model """simple""" +659 37 loss """nssa""" +659 37 regularizer """no""" +659 37 optimizer """adadelta""" +659 37 training_loop """owa""" +659 37 negative_sampler """basic""" +659 37 evaluator """rankbased""" +659 38 dataset """kinships""" +659 38 model """simple""" +659 38 loss """nssa""" +659 38 regularizer """no""" +659 38 optimizer """adadelta""" +659 38 training_loop """owa""" +659 38 negative_sampler """basic""" +659 38 evaluator """rankbased""" +659 39 dataset """kinships""" +659 39 model """simple""" +659 39 loss """nssa""" +659 39 regularizer """no""" +659 39 optimizer """adadelta""" +659 39 training_loop """owa""" +659 39 negative_sampler """basic""" +659 39 evaluator """rankbased""" +659 40 dataset """kinships""" +659 40 model """simple""" +659 40 loss """nssa""" +659 40 regularizer """no""" +659 40 optimizer """adadelta""" +659 40 training_loop """owa""" +659 40 negative_sampler """basic""" +659 40 evaluator """rankbased""" +659 41 dataset """kinships""" +659 41 model """simple""" +659 41 loss """nssa""" +659 41 regularizer """no""" +659 41 optimizer """adadelta""" +659 41 training_loop """owa""" +659 41 negative_sampler """basic""" +659 41 evaluator """rankbased""" +659 42 dataset """kinships""" +659 42 model """simple""" +659 42 loss """nssa""" +659 42 regularizer """no""" +659 42 optimizer """adadelta""" +659 42 training_loop """owa""" +659 42 negative_sampler """basic""" +659 42 evaluator """rankbased""" +659 43 dataset """kinships""" +659 43 model """simple""" +659 43 loss """nssa""" +659 43 regularizer """no""" +659 43 optimizer """adadelta""" +659 43 training_loop """owa""" +659 43 negative_sampler """basic""" +659 43 evaluator """rankbased""" +659 44 dataset """kinships""" +659 44 model """simple""" +659 44 loss """nssa""" +659 44 regularizer """no""" +659 44 optimizer """adadelta""" +659 44 training_loop """owa""" +659 44 negative_sampler """basic""" +659 44 evaluator """rankbased""" +659 45 dataset """kinships""" +659 45 model """simple""" +659 45 loss """nssa""" +659 45 regularizer """no""" +659 45 optimizer """adadelta""" +659 45 training_loop """owa""" +659 45 negative_sampler """basic""" +659 45 evaluator """rankbased""" +659 46 dataset """kinships""" +659 46 model """simple""" +659 46 loss """nssa""" +659 46 regularizer """no""" +659 46 optimizer """adadelta""" +659 46 training_loop """owa""" +659 46 negative_sampler """basic""" +659 46 evaluator """rankbased""" +659 47 dataset """kinships""" +659 47 model """simple""" +659 47 loss """nssa""" +659 47 regularizer """no""" +659 47 optimizer """adadelta""" +659 47 training_loop """owa""" +659 47 negative_sampler """basic""" +659 47 evaluator """rankbased""" +659 48 dataset """kinships""" +659 48 model """simple""" +659 48 loss """nssa""" +659 48 regularizer """no""" +659 48 optimizer """adadelta""" +659 48 training_loop """owa""" +659 48 negative_sampler """basic""" +659 48 evaluator """rankbased""" +659 49 dataset """kinships""" +659 49 model """simple""" +659 49 loss """nssa""" +659 49 regularizer """no""" +659 49 optimizer """adadelta""" +659 49 training_loop """owa""" +659 49 negative_sampler """basic""" +659 49 evaluator """rankbased""" +659 50 dataset """kinships""" +659 50 model """simple""" +659 50 loss """nssa""" +659 50 regularizer """no""" +659 50 optimizer """adadelta""" +659 50 training_loop """owa""" +659 50 negative_sampler """basic""" +659 50 evaluator """rankbased""" +659 51 dataset """kinships""" +659 51 model """simple""" +659 51 loss """nssa""" +659 51 regularizer """no""" +659 51 optimizer """adadelta""" +659 51 training_loop """owa""" +659 51 negative_sampler """basic""" +659 51 evaluator """rankbased""" +659 52 dataset """kinships""" +659 52 model """simple""" +659 52 loss """nssa""" +659 52 regularizer """no""" +659 52 optimizer """adadelta""" +659 52 training_loop """owa""" +659 52 negative_sampler """basic""" +659 52 evaluator """rankbased""" +659 53 dataset """kinships""" +659 53 model """simple""" +659 53 loss """nssa""" +659 53 regularizer """no""" +659 53 optimizer """adadelta""" +659 53 training_loop """owa""" +659 53 negative_sampler """basic""" +659 53 evaluator """rankbased""" +659 54 dataset """kinships""" +659 54 model """simple""" +659 54 loss """nssa""" +659 54 regularizer """no""" +659 54 optimizer """adadelta""" +659 54 training_loop """owa""" +659 54 negative_sampler """basic""" +659 54 evaluator """rankbased""" +659 55 dataset """kinships""" +659 55 model """simple""" +659 55 loss """nssa""" +659 55 regularizer """no""" +659 55 optimizer """adadelta""" +659 55 training_loop """owa""" +659 55 negative_sampler """basic""" +659 55 evaluator """rankbased""" +659 56 dataset """kinships""" +659 56 model """simple""" +659 56 loss """nssa""" +659 56 regularizer """no""" +659 56 optimizer """adadelta""" +659 56 training_loop """owa""" +659 56 negative_sampler """basic""" +659 56 evaluator """rankbased""" +659 57 dataset """kinships""" +659 57 model """simple""" +659 57 loss """nssa""" +659 57 regularizer """no""" +659 57 optimizer """adadelta""" +659 57 training_loop """owa""" +659 57 negative_sampler """basic""" +659 57 evaluator """rankbased""" +659 58 dataset """kinships""" +659 58 model """simple""" +659 58 loss """nssa""" +659 58 regularizer """no""" +659 58 optimizer """adadelta""" +659 58 training_loop """owa""" +659 58 negative_sampler """basic""" +659 58 evaluator """rankbased""" +659 59 dataset """kinships""" +659 59 model """simple""" +659 59 loss """nssa""" +659 59 regularizer """no""" +659 59 optimizer """adadelta""" +659 59 training_loop """owa""" +659 59 negative_sampler """basic""" +659 59 evaluator """rankbased""" +659 60 dataset """kinships""" +659 60 model """simple""" +659 60 loss """nssa""" +659 60 regularizer """no""" +659 60 optimizer """adadelta""" +659 60 training_loop """owa""" +659 60 negative_sampler """basic""" +659 60 evaluator """rankbased""" +659 61 dataset """kinships""" +659 61 model """simple""" +659 61 loss """nssa""" +659 61 regularizer """no""" +659 61 optimizer """adadelta""" +659 61 training_loop """owa""" +659 61 negative_sampler """basic""" +659 61 evaluator """rankbased""" +659 62 dataset """kinships""" +659 62 model """simple""" +659 62 loss """nssa""" +659 62 regularizer """no""" +659 62 optimizer """adadelta""" +659 62 training_loop """owa""" +659 62 negative_sampler """basic""" +659 62 evaluator """rankbased""" +659 63 dataset """kinships""" +659 63 model """simple""" +659 63 loss """nssa""" +659 63 regularizer """no""" +659 63 optimizer """adadelta""" +659 63 training_loop """owa""" +659 63 negative_sampler """basic""" +659 63 evaluator """rankbased""" +659 64 dataset """kinships""" +659 64 model """simple""" +659 64 loss """nssa""" +659 64 regularizer """no""" +659 64 optimizer """adadelta""" +659 64 training_loop """owa""" +659 64 negative_sampler """basic""" +659 64 evaluator """rankbased""" +659 65 dataset """kinships""" +659 65 model """simple""" +659 65 loss """nssa""" +659 65 regularizer """no""" +659 65 optimizer """adadelta""" +659 65 training_loop """owa""" +659 65 negative_sampler """basic""" +659 65 evaluator """rankbased""" +659 66 dataset """kinships""" +659 66 model """simple""" +659 66 loss """nssa""" +659 66 regularizer """no""" +659 66 optimizer """adadelta""" +659 66 training_loop """owa""" +659 66 negative_sampler """basic""" +659 66 evaluator """rankbased""" +659 67 dataset """kinships""" +659 67 model """simple""" +659 67 loss """nssa""" +659 67 regularizer """no""" +659 67 optimizer """adadelta""" +659 67 training_loop """owa""" +659 67 negative_sampler """basic""" +659 67 evaluator """rankbased""" +659 68 dataset """kinships""" +659 68 model """simple""" +659 68 loss """nssa""" +659 68 regularizer """no""" +659 68 optimizer """adadelta""" +659 68 training_loop """owa""" +659 68 negative_sampler """basic""" +659 68 evaluator """rankbased""" +659 69 dataset """kinships""" +659 69 model """simple""" +659 69 loss """nssa""" +659 69 regularizer """no""" +659 69 optimizer """adadelta""" +659 69 training_loop """owa""" +659 69 negative_sampler """basic""" +659 69 evaluator """rankbased""" +659 70 dataset """kinships""" +659 70 model """simple""" +659 70 loss """nssa""" +659 70 regularizer """no""" +659 70 optimizer """adadelta""" +659 70 training_loop """owa""" +659 70 negative_sampler """basic""" +659 70 evaluator """rankbased""" +659 71 dataset """kinships""" +659 71 model """simple""" +659 71 loss """nssa""" +659 71 regularizer """no""" +659 71 optimizer """adadelta""" +659 71 training_loop """owa""" +659 71 negative_sampler """basic""" +659 71 evaluator """rankbased""" +659 72 dataset """kinships""" +659 72 model """simple""" +659 72 loss """nssa""" +659 72 regularizer """no""" +659 72 optimizer """adadelta""" +659 72 training_loop """owa""" +659 72 negative_sampler """basic""" +659 72 evaluator """rankbased""" +659 73 dataset """kinships""" +659 73 model """simple""" +659 73 loss """nssa""" +659 73 regularizer """no""" +659 73 optimizer """adadelta""" +659 73 training_loop """owa""" +659 73 negative_sampler """basic""" +659 73 evaluator """rankbased""" +659 74 dataset """kinships""" +659 74 model """simple""" +659 74 loss """nssa""" +659 74 regularizer """no""" +659 74 optimizer """adadelta""" +659 74 training_loop """owa""" +659 74 negative_sampler """basic""" +659 74 evaluator """rankbased""" +659 75 dataset """kinships""" +659 75 model """simple""" +659 75 loss """nssa""" +659 75 regularizer """no""" +659 75 optimizer """adadelta""" +659 75 training_loop """owa""" +659 75 negative_sampler """basic""" +659 75 evaluator """rankbased""" +659 76 dataset """kinships""" +659 76 model """simple""" +659 76 loss """nssa""" +659 76 regularizer """no""" +659 76 optimizer """adadelta""" +659 76 training_loop """owa""" +659 76 negative_sampler """basic""" +659 76 evaluator """rankbased""" +659 77 dataset """kinships""" +659 77 model """simple""" +659 77 loss """nssa""" +659 77 regularizer """no""" +659 77 optimizer """adadelta""" +659 77 training_loop """owa""" +659 77 negative_sampler """basic""" +659 77 evaluator """rankbased""" +659 78 dataset """kinships""" +659 78 model """simple""" +659 78 loss """nssa""" +659 78 regularizer """no""" +659 78 optimizer """adadelta""" +659 78 training_loop """owa""" +659 78 negative_sampler """basic""" +659 78 evaluator """rankbased""" +659 79 dataset """kinships""" +659 79 model """simple""" +659 79 loss """nssa""" +659 79 regularizer """no""" +659 79 optimizer """adadelta""" +659 79 training_loop """owa""" +659 79 negative_sampler """basic""" +659 79 evaluator """rankbased""" +659 80 dataset """kinships""" +659 80 model """simple""" +659 80 loss """nssa""" +659 80 regularizer """no""" +659 80 optimizer """adadelta""" +659 80 training_loop """owa""" +659 80 negative_sampler """basic""" +659 80 evaluator """rankbased""" +659 81 dataset """kinships""" +659 81 model """simple""" +659 81 loss """nssa""" +659 81 regularizer """no""" +659 81 optimizer """adadelta""" +659 81 training_loop """owa""" +659 81 negative_sampler """basic""" +659 81 evaluator """rankbased""" +659 82 dataset """kinships""" +659 82 model """simple""" +659 82 loss """nssa""" +659 82 regularizer """no""" +659 82 optimizer """adadelta""" +659 82 training_loop """owa""" +659 82 negative_sampler """basic""" +659 82 evaluator """rankbased""" +659 83 dataset """kinships""" +659 83 model """simple""" +659 83 loss """nssa""" +659 83 regularizer """no""" +659 83 optimizer """adadelta""" +659 83 training_loop """owa""" +659 83 negative_sampler """basic""" +659 83 evaluator """rankbased""" +659 84 dataset """kinships""" +659 84 model """simple""" +659 84 loss """nssa""" +659 84 regularizer """no""" +659 84 optimizer """adadelta""" +659 84 training_loop """owa""" +659 84 negative_sampler """basic""" +659 84 evaluator """rankbased""" +659 85 dataset """kinships""" +659 85 model """simple""" +659 85 loss """nssa""" +659 85 regularizer """no""" +659 85 optimizer """adadelta""" +659 85 training_loop """owa""" +659 85 negative_sampler """basic""" +659 85 evaluator """rankbased""" +659 86 dataset """kinships""" +659 86 model """simple""" +659 86 loss """nssa""" +659 86 regularizer """no""" +659 86 optimizer """adadelta""" +659 86 training_loop """owa""" +659 86 negative_sampler """basic""" +659 86 evaluator """rankbased""" +659 87 dataset """kinships""" +659 87 model """simple""" +659 87 loss """nssa""" +659 87 regularizer """no""" +659 87 optimizer """adadelta""" +659 87 training_loop """owa""" +659 87 negative_sampler """basic""" +659 87 evaluator """rankbased""" +659 88 dataset """kinships""" +659 88 model """simple""" +659 88 loss """nssa""" +659 88 regularizer """no""" +659 88 optimizer """adadelta""" +659 88 training_loop """owa""" +659 88 negative_sampler """basic""" +659 88 evaluator """rankbased""" +659 89 dataset """kinships""" +659 89 model """simple""" +659 89 loss """nssa""" +659 89 regularizer """no""" +659 89 optimizer """adadelta""" +659 89 training_loop """owa""" +659 89 negative_sampler """basic""" +659 89 evaluator """rankbased""" +659 90 dataset """kinships""" +659 90 model """simple""" +659 90 loss """nssa""" +659 90 regularizer """no""" +659 90 optimizer """adadelta""" +659 90 training_loop """owa""" +659 90 negative_sampler """basic""" +659 90 evaluator """rankbased""" +659 91 dataset """kinships""" +659 91 model """simple""" +659 91 loss """nssa""" +659 91 regularizer """no""" +659 91 optimizer """adadelta""" +659 91 training_loop """owa""" +659 91 negative_sampler """basic""" +659 91 evaluator """rankbased""" +659 92 dataset """kinships""" +659 92 model """simple""" +659 92 loss """nssa""" +659 92 regularizer """no""" +659 92 optimizer """adadelta""" +659 92 training_loop """owa""" +659 92 negative_sampler """basic""" +659 92 evaluator """rankbased""" +659 93 dataset """kinships""" +659 93 model """simple""" +659 93 loss """nssa""" +659 93 regularizer """no""" +659 93 optimizer """adadelta""" +659 93 training_loop """owa""" +659 93 negative_sampler """basic""" +659 93 evaluator """rankbased""" +659 94 dataset """kinships""" +659 94 model """simple""" +659 94 loss """nssa""" +659 94 regularizer """no""" +659 94 optimizer """adadelta""" +659 94 training_loop """owa""" +659 94 negative_sampler """basic""" +659 94 evaluator """rankbased""" +659 95 dataset """kinships""" +659 95 model """simple""" +659 95 loss """nssa""" +659 95 regularizer """no""" +659 95 optimizer """adadelta""" +659 95 training_loop """owa""" +659 95 negative_sampler """basic""" +659 95 evaluator """rankbased""" +659 96 dataset """kinships""" +659 96 model """simple""" +659 96 loss """nssa""" +659 96 regularizer """no""" +659 96 optimizer """adadelta""" +659 96 training_loop """owa""" +659 96 negative_sampler """basic""" +659 96 evaluator """rankbased""" +659 97 dataset """kinships""" +659 97 model """simple""" +659 97 loss """nssa""" +659 97 regularizer """no""" +659 97 optimizer """adadelta""" +659 97 training_loop """owa""" +659 97 negative_sampler """basic""" +659 97 evaluator """rankbased""" +659 98 dataset """kinships""" +659 98 model """simple""" +659 98 loss """nssa""" +659 98 regularizer """no""" +659 98 optimizer """adadelta""" +659 98 training_loop """owa""" +659 98 negative_sampler """basic""" +659 98 evaluator """rankbased""" +659 99 dataset """kinships""" +659 99 model """simple""" +659 99 loss """nssa""" +659 99 regularizer """no""" +659 99 optimizer """adadelta""" +659 99 training_loop """owa""" +659 99 negative_sampler """basic""" +659 99 evaluator """rankbased""" +659 100 dataset """kinships""" +659 100 model """simple""" +659 100 loss """nssa""" +659 100 regularizer """no""" +659 100 optimizer """adadelta""" +659 100 training_loop """owa""" +659 100 negative_sampler """basic""" +659 100 evaluator """rankbased""" +660 1 model.embedding_dim 0.0 +660 1 loss.margin 13.044416476980814 +660 1 loss.adversarial_temperature 0.6233025102180645 +660 1 negative_sampler.num_negs_per_pos 52.0 +660 1 training.batch_size 1.0 +660 2 model.embedding_dim 1.0 +660 2 loss.margin 16.81941249332089 +660 2 loss.adversarial_temperature 0.3309961078756234 +660 2 negative_sampler.num_negs_per_pos 36.0 +660 2 training.batch_size 0.0 +660 3 model.embedding_dim 2.0 +660 3 loss.margin 11.75818971910969 +660 3 loss.adversarial_temperature 0.9547455767277525 +660 3 negative_sampler.num_negs_per_pos 84.0 +660 3 training.batch_size 2.0 +660 4 model.embedding_dim 0.0 +660 4 loss.margin 26.1953671962174 +660 4 loss.adversarial_temperature 0.6636452405093349 +660 4 negative_sampler.num_negs_per_pos 94.0 +660 4 training.batch_size 2.0 +660 5 model.embedding_dim 0.0 +660 5 loss.margin 3.9311199021555154 +660 5 loss.adversarial_temperature 0.3473986271419748 +660 5 negative_sampler.num_negs_per_pos 16.0 +660 5 training.batch_size 2.0 +660 6 model.embedding_dim 2.0 +660 6 loss.margin 18.188734974034638 +660 6 loss.adversarial_temperature 0.8784000691934947 +660 6 negative_sampler.num_negs_per_pos 20.0 +660 6 training.batch_size 2.0 +660 7 model.embedding_dim 2.0 +660 7 loss.margin 3.1826906636280174 +660 7 loss.adversarial_temperature 0.4833752951653343 +660 7 negative_sampler.num_negs_per_pos 14.0 +660 7 training.batch_size 1.0 +660 8 model.embedding_dim 2.0 +660 8 loss.margin 29.55752154483611 +660 8 loss.adversarial_temperature 0.36266914883267676 +660 8 negative_sampler.num_negs_per_pos 78.0 +660 8 training.batch_size 2.0 +660 9 model.embedding_dim 1.0 +660 9 loss.margin 26.836694625786038 +660 9 loss.adversarial_temperature 0.7093286094842921 +660 9 negative_sampler.num_negs_per_pos 19.0 +660 9 training.batch_size 0.0 +660 10 model.embedding_dim 2.0 +660 10 loss.margin 25.96095888110194 +660 10 loss.adversarial_temperature 0.23217369207674002 +660 10 negative_sampler.num_negs_per_pos 21.0 +660 10 training.batch_size 1.0 +660 11 model.embedding_dim 2.0 +660 11 loss.margin 13.103806688337341 +660 11 loss.adversarial_temperature 0.41563183269652537 +660 11 negative_sampler.num_negs_per_pos 43.0 +660 11 training.batch_size 1.0 +660 12 model.embedding_dim 1.0 +660 12 loss.margin 1.5356211053470634 +660 12 loss.adversarial_temperature 0.4804411283583201 +660 12 negative_sampler.num_negs_per_pos 79.0 +660 12 training.batch_size 2.0 +660 13 model.embedding_dim 0.0 +660 13 loss.margin 2.7290188824358372 +660 13 loss.adversarial_temperature 0.22955661219759796 +660 13 negative_sampler.num_negs_per_pos 60.0 +660 13 training.batch_size 2.0 +660 14 model.embedding_dim 2.0 +660 14 loss.margin 14.47053773506341 +660 14 loss.adversarial_temperature 0.7344087723411735 +660 14 negative_sampler.num_negs_per_pos 40.0 +660 14 training.batch_size 2.0 +660 15 model.embedding_dim 0.0 +660 15 loss.margin 16.121289421278092 +660 15 loss.adversarial_temperature 0.8972004500262624 +660 15 negative_sampler.num_negs_per_pos 90.0 +660 15 training.batch_size 2.0 +660 16 model.embedding_dim 2.0 +660 16 loss.margin 23.01716560612896 +660 16 loss.adversarial_temperature 0.9607284542910964 +660 16 negative_sampler.num_negs_per_pos 0.0 +660 16 training.batch_size 2.0 +660 17 model.embedding_dim 2.0 +660 17 loss.margin 12.09822062002965 +660 17 loss.adversarial_temperature 0.43575421729469066 +660 17 negative_sampler.num_negs_per_pos 87.0 +660 17 training.batch_size 1.0 +660 18 model.embedding_dim 0.0 +660 18 loss.margin 7.707867522533897 +660 18 loss.adversarial_temperature 0.36509042719295093 +660 18 negative_sampler.num_negs_per_pos 10.0 +660 18 training.batch_size 1.0 +660 19 model.embedding_dim 0.0 +660 19 loss.margin 21.31800498944763 +660 19 loss.adversarial_temperature 0.27301793839244065 +660 19 negative_sampler.num_negs_per_pos 9.0 +660 19 training.batch_size 2.0 +660 20 model.embedding_dim 1.0 +660 20 loss.margin 21.187701848712752 +660 20 loss.adversarial_temperature 0.36627870827923936 +660 20 negative_sampler.num_negs_per_pos 85.0 +660 20 training.batch_size 0.0 +660 21 model.embedding_dim 2.0 +660 21 loss.margin 11.309250022786767 +660 21 loss.adversarial_temperature 0.7076161399308487 +660 21 negative_sampler.num_negs_per_pos 21.0 +660 21 training.batch_size 2.0 +660 22 model.embedding_dim 1.0 +660 22 loss.margin 14.870731594039112 +660 22 loss.adversarial_temperature 0.6409625938305188 +660 22 negative_sampler.num_negs_per_pos 74.0 +660 22 training.batch_size 1.0 +660 23 model.embedding_dim 1.0 +660 23 loss.margin 13.074894366305537 +660 23 loss.adversarial_temperature 0.7097997474447707 +660 23 negative_sampler.num_negs_per_pos 52.0 +660 23 training.batch_size 1.0 +660 24 model.embedding_dim 0.0 +660 24 loss.margin 15.286634117944441 +660 24 loss.adversarial_temperature 0.8092480701079787 +660 24 negative_sampler.num_negs_per_pos 45.0 +660 24 training.batch_size 0.0 +660 25 model.embedding_dim 0.0 +660 25 loss.margin 24.843634413972943 +660 25 loss.adversarial_temperature 0.13775132337756285 +660 25 negative_sampler.num_negs_per_pos 45.0 +660 25 training.batch_size 0.0 +660 26 model.embedding_dim 0.0 +660 26 loss.margin 14.521876993168291 +660 26 loss.adversarial_temperature 0.6456326530817277 +660 26 negative_sampler.num_negs_per_pos 34.0 +660 26 training.batch_size 2.0 +660 27 model.embedding_dim 1.0 +660 27 loss.margin 15.468872231412487 +660 27 loss.adversarial_temperature 0.8712959297898721 +660 27 negative_sampler.num_negs_per_pos 70.0 +660 27 training.batch_size 0.0 +660 28 model.embedding_dim 2.0 +660 28 loss.margin 8.566417920480319 +660 28 loss.adversarial_temperature 0.8935038413279801 +660 28 negative_sampler.num_negs_per_pos 44.0 +660 28 training.batch_size 0.0 +660 29 model.embedding_dim 1.0 +660 29 loss.margin 10.914390462618908 +660 29 loss.adversarial_temperature 0.18969172632922982 +660 29 negative_sampler.num_negs_per_pos 64.0 +660 29 training.batch_size 2.0 +660 30 model.embedding_dim 2.0 +660 30 loss.margin 6.5463309004288455 +660 30 loss.adversarial_temperature 0.6813096843998945 +660 30 negative_sampler.num_negs_per_pos 43.0 +660 30 training.batch_size 2.0 +660 31 model.embedding_dim 0.0 +660 31 loss.margin 7.210527855480402 +660 31 loss.adversarial_temperature 0.17979042981528082 +660 31 negative_sampler.num_negs_per_pos 66.0 +660 31 training.batch_size 2.0 +660 32 model.embedding_dim 2.0 +660 32 loss.margin 14.753436525198431 +660 32 loss.adversarial_temperature 0.9062728711910388 +660 32 negative_sampler.num_negs_per_pos 22.0 +660 32 training.batch_size 2.0 +660 33 model.embedding_dim 1.0 +660 33 loss.margin 29.429433800530088 +660 33 loss.adversarial_temperature 0.19662841834320471 +660 33 negative_sampler.num_negs_per_pos 9.0 +660 33 training.batch_size 1.0 +660 34 model.embedding_dim 0.0 +660 34 loss.margin 25.555056464357126 +660 34 loss.adversarial_temperature 0.4230881221400776 +660 34 negative_sampler.num_negs_per_pos 57.0 +660 34 training.batch_size 2.0 +660 35 model.embedding_dim 0.0 +660 35 loss.margin 16.301237352419335 +660 35 loss.adversarial_temperature 0.7305072411015658 +660 35 negative_sampler.num_negs_per_pos 50.0 +660 35 training.batch_size 0.0 +660 36 model.embedding_dim 2.0 +660 36 loss.margin 7.612077696078514 +660 36 loss.adversarial_temperature 0.4516375771198744 +660 36 negative_sampler.num_negs_per_pos 11.0 +660 36 training.batch_size 1.0 +660 37 model.embedding_dim 1.0 +660 37 loss.margin 13.992028541714348 +660 37 loss.adversarial_temperature 0.5149219772233923 +660 37 negative_sampler.num_negs_per_pos 80.0 +660 37 training.batch_size 2.0 +660 38 model.embedding_dim 1.0 +660 38 loss.margin 24.454679028722477 +660 38 loss.adversarial_temperature 0.4073953946408163 +660 38 negative_sampler.num_negs_per_pos 48.0 +660 38 training.batch_size 2.0 +660 39 model.embedding_dim 1.0 +660 39 loss.margin 24.457061189590835 +660 39 loss.adversarial_temperature 0.15119655417678884 +660 39 negative_sampler.num_negs_per_pos 96.0 +660 39 training.batch_size 1.0 +660 40 model.embedding_dim 2.0 +660 40 loss.margin 13.043694262209133 +660 40 loss.adversarial_temperature 0.1175248852701234 +660 40 negative_sampler.num_negs_per_pos 61.0 +660 40 training.batch_size 2.0 +660 41 model.embedding_dim 1.0 +660 41 loss.margin 19.877780405750926 +660 41 loss.adversarial_temperature 0.4721216653960456 +660 41 negative_sampler.num_negs_per_pos 74.0 +660 41 training.batch_size 1.0 +660 42 model.embedding_dim 1.0 +660 42 loss.margin 6.3186320807921295 +660 42 loss.adversarial_temperature 0.9638692899838828 +660 42 negative_sampler.num_negs_per_pos 87.0 +660 42 training.batch_size 1.0 +660 43 model.embedding_dim 1.0 +660 43 loss.margin 5.387214737402414 +660 43 loss.adversarial_temperature 0.586863941311342 +660 43 negative_sampler.num_negs_per_pos 3.0 +660 43 training.batch_size 2.0 +660 44 model.embedding_dim 0.0 +660 44 loss.margin 3.3283297438922403 +660 44 loss.adversarial_temperature 0.1846833438393776 +660 44 negative_sampler.num_negs_per_pos 24.0 +660 44 training.batch_size 1.0 +660 45 model.embedding_dim 1.0 +660 45 loss.margin 5.596207864341783 +660 45 loss.adversarial_temperature 0.24680735741781148 +660 45 negative_sampler.num_negs_per_pos 46.0 +660 45 training.batch_size 2.0 +660 46 model.embedding_dim 0.0 +660 46 loss.margin 4.973273504339142 +660 46 loss.adversarial_temperature 0.6052863465278093 +660 46 negative_sampler.num_negs_per_pos 60.0 +660 46 training.batch_size 1.0 +660 47 model.embedding_dim 1.0 +660 47 loss.margin 15.391823654227363 +660 47 loss.adversarial_temperature 0.8369707085850623 +660 47 negative_sampler.num_negs_per_pos 21.0 +660 47 training.batch_size 0.0 +660 48 model.embedding_dim 1.0 +660 48 loss.margin 18.229009300908842 +660 48 loss.adversarial_temperature 0.3614095991626032 +660 48 negative_sampler.num_negs_per_pos 39.0 +660 48 training.batch_size 0.0 +660 49 model.embedding_dim 2.0 +660 49 loss.margin 28.95589721844293 +660 49 loss.adversarial_temperature 0.1248103732847223 +660 49 negative_sampler.num_negs_per_pos 75.0 +660 49 training.batch_size 1.0 +660 50 model.embedding_dim 0.0 +660 50 loss.margin 15.77821552577394 +660 50 loss.adversarial_temperature 0.8638336254029089 +660 50 negative_sampler.num_negs_per_pos 29.0 +660 50 training.batch_size 1.0 +660 51 model.embedding_dim 0.0 +660 51 loss.margin 29.516788637951354 +660 51 loss.adversarial_temperature 0.3039011358812296 +660 51 negative_sampler.num_negs_per_pos 11.0 +660 51 training.batch_size 2.0 +660 52 model.embedding_dim 1.0 +660 52 loss.margin 14.058732906733711 +660 52 loss.adversarial_temperature 0.5181407974306176 +660 52 negative_sampler.num_negs_per_pos 80.0 +660 52 training.batch_size 1.0 +660 53 model.embedding_dim 0.0 +660 53 loss.margin 10.195159842654835 +660 53 loss.adversarial_temperature 0.9820953849214398 +660 53 negative_sampler.num_negs_per_pos 78.0 +660 53 training.batch_size 2.0 +660 54 model.embedding_dim 2.0 +660 54 loss.margin 2.851066876849064 +660 54 loss.adversarial_temperature 0.7330613423019404 +660 54 negative_sampler.num_negs_per_pos 52.0 +660 54 training.batch_size 0.0 +660 55 model.embedding_dim 1.0 +660 55 loss.margin 4.910837203703149 +660 55 loss.adversarial_temperature 0.960500440895377 +660 55 negative_sampler.num_negs_per_pos 64.0 +660 55 training.batch_size 0.0 +660 56 model.embedding_dim 2.0 +660 56 loss.margin 20.059902088078793 +660 56 loss.adversarial_temperature 0.4936136575645331 +660 56 negative_sampler.num_negs_per_pos 90.0 +660 56 training.batch_size 0.0 +660 57 model.embedding_dim 0.0 +660 57 loss.margin 5.048924934722315 +660 57 loss.adversarial_temperature 0.16130884593714262 +660 57 negative_sampler.num_negs_per_pos 45.0 +660 57 training.batch_size 1.0 +660 58 model.embedding_dim 1.0 +660 58 loss.margin 10.07513127776844 +660 58 loss.adversarial_temperature 0.7723687821974335 +660 58 negative_sampler.num_negs_per_pos 99.0 +660 58 training.batch_size 2.0 +660 59 model.embedding_dim 2.0 +660 59 loss.margin 21.177694458270267 +660 59 loss.adversarial_temperature 0.327059475813232 +660 59 negative_sampler.num_negs_per_pos 58.0 +660 59 training.batch_size 0.0 +660 60 model.embedding_dim 0.0 +660 60 loss.margin 29.865231148253027 +660 60 loss.adversarial_temperature 0.6712385223924741 +660 60 negative_sampler.num_negs_per_pos 93.0 +660 60 training.batch_size 2.0 +660 61 model.embedding_dim 2.0 +660 61 loss.margin 1.7996515250963618 +660 61 loss.adversarial_temperature 0.618840297835837 +660 61 negative_sampler.num_negs_per_pos 65.0 +660 61 training.batch_size 0.0 +660 62 model.embedding_dim 1.0 +660 62 loss.margin 29.040951105069265 +660 62 loss.adversarial_temperature 0.8619704195799189 +660 62 negative_sampler.num_negs_per_pos 87.0 +660 62 training.batch_size 0.0 +660 63 model.embedding_dim 0.0 +660 63 loss.margin 8.486459378894443 +660 63 loss.adversarial_temperature 0.6061475742510158 +660 63 negative_sampler.num_negs_per_pos 63.0 +660 63 training.batch_size 2.0 +660 64 model.embedding_dim 0.0 +660 64 loss.margin 4.310185220887474 +660 64 loss.adversarial_temperature 0.1835881750306669 +660 64 negative_sampler.num_negs_per_pos 86.0 +660 64 training.batch_size 2.0 +660 65 model.embedding_dim 2.0 +660 65 loss.margin 3.337783022158153 +660 65 loss.adversarial_temperature 0.9952841562192047 +660 65 negative_sampler.num_negs_per_pos 30.0 +660 65 training.batch_size 2.0 +660 66 model.embedding_dim 2.0 +660 66 loss.margin 26.224193582654078 +660 66 loss.adversarial_temperature 0.49506474305852943 +660 66 negative_sampler.num_negs_per_pos 86.0 +660 66 training.batch_size 1.0 +660 67 model.embedding_dim 0.0 +660 67 loss.margin 2.1018955163701496 +660 67 loss.adversarial_temperature 0.6316441959827582 +660 67 negative_sampler.num_negs_per_pos 55.0 +660 67 training.batch_size 1.0 +660 68 model.embedding_dim 0.0 +660 68 loss.margin 16.22959047289133 +660 68 loss.adversarial_temperature 0.6749618766614471 +660 68 negative_sampler.num_negs_per_pos 33.0 +660 68 training.batch_size 2.0 +660 69 model.embedding_dim 1.0 +660 69 loss.margin 16.849744145925026 +660 69 loss.adversarial_temperature 0.5446692196625926 +660 69 negative_sampler.num_negs_per_pos 69.0 +660 69 training.batch_size 0.0 +660 70 model.embedding_dim 1.0 +660 70 loss.margin 5.77708130358781 +660 70 loss.adversarial_temperature 0.6054334718022572 +660 70 negative_sampler.num_negs_per_pos 11.0 +660 70 training.batch_size 2.0 +660 71 model.embedding_dim 0.0 +660 71 loss.margin 3.5789969638504093 +660 71 loss.adversarial_temperature 0.6193928051681182 +660 71 negative_sampler.num_negs_per_pos 35.0 +660 71 training.batch_size 2.0 +660 72 model.embedding_dim 1.0 +660 72 loss.margin 20.815083613434084 +660 72 loss.adversarial_temperature 0.63808725684046 +660 72 negative_sampler.num_negs_per_pos 89.0 +660 72 training.batch_size 0.0 +660 73 model.embedding_dim 2.0 +660 73 loss.margin 2.255214498005025 +660 73 loss.adversarial_temperature 0.456417009504016 +660 73 negative_sampler.num_negs_per_pos 89.0 +660 73 training.batch_size 2.0 +660 74 model.embedding_dim 2.0 +660 74 loss.margin 24.647075058530824 +660 74 loss.adversarial_temperature 0.9475943682011926 +660 74 negative_sampler.num_negs_per_pos 80.0 +660 74 training.batch_size 0.0 +660 75 model.embedding_dim 0.0 +660 75 loss.margin 27.222659287420026 +660 75 loss.adversarial_temperature 0.7293932530275045 +660 75 negative_sampler.num_negs_per_pos 2.0 +660 75 training.batch_size 0.0 +660 76 model.embedding_dim 2.0 +660 76 loss.margin 25.993123750341606 +660 76 loss.adversarial_temperature 0.2871264502535731 +660 76 negative_sampler.num_negs_per_pos 32.0 +660 76 training.batch_size 0.0 +660 77 model.embedding_dim 0.0 +660 77 loss.margin 9.316913008450873 +660 77 loss.adversarial_temperature 0.5814190959810351 +660 77 negative_sampler.num_negs_per_pos 0.0 +660 77 training.batch_size 1.0 +660 78 model.embedding_dim 1.0 +660 78 loss.margin 1.428111791965164 +660 78 loss.adversarial_temperature 0.5602978243272457 +660 78 negative_sampler.num_negs_per_pos 8.0 +660 78 training.batch_size 1.0 +660 79 model.embedding_dim 1.0 +660 79 loss.margin 18.85846446340578 +660 79 loss.adversarial_temperature 0.9913471465258755 +660 79 negative_sampler.num_negs_per_pos 83.0 +660 79 training.batch_size 2.0 +660 80 model.embedding_dim 2.0 +660 80 loss.margin 21.926740487219423 +660 80 loss.adversarial_temperature 0.7324809739798002 +660 80 negative_sampler.num_negs_per_pos 68.0 +660 80 training.batch_size 0.0 +660 81 model.embedding_dim 2.0 +660 81 loss.margin 20.184476156882848 +660 81 loss.adversarial_temperature 0.7493288525229015 +660 81 negative_sampler.num_negs_per_pos 93.0 +660 81 training.batch_size 2.0 +660 82 model.embedding_dim 1.0 +660 82 loss.margin 27.854646117409885 +660 82 loss.adversarial_temperature 0.7324398005291007 +660 82 negative_sampler.num_negs_per_pos 76.0 +660 82 training.batch_size 2.0 +660 83 model.embedding_dim 0.0 +660 83 loss.margin 11.90266469393221 +660 83 loss.adversarial_temperature 0.3897431719221972 +660 83 negative_sampler.num_negs_per_pos 10.0 +660 83 training.batch_size 2.0 +660 84 model.embedding_dim 2.0 +660 84 loss.margin 28.827341947775732 +660 84 loss.adversarial_temperature 0.4432053232046749 +660 84 negative_sampler.num_negs_per_pos 80.0 +660 84 training.batch_size 0.0 +660 85 model.embedding_dim 1.0 +660 85 loss.margin 19.70513270163136 +660 85 loss.adversarial_temperature 0.7735860159779004 +660 85 negative_sampler.num_negs_per_pos 80.0 +660 85 training.batch_size 0.0 +660 86 model.embedding_dim 0.0 +660 86 loss.margin 2.503671474793527 +660 86 loss.adversarial_temperature 0.3526188164653812 +660 86 negative_sampler.num_negs_per_pos 67.0 +660 86 training.batch_size 0.0 +660 87 model.embedding_dim 0.0 +660 87 loss.margin 28.640386594518226 +660 87 loss.adversarial_temperature 0.9807146160174426 +660 87 negative_sampler.num_negs_per_pos 11.0 +660 87 training.batch_size 1.0 +660 88 model.embedding_dim 1.0 +660 88 loss.margin 24.520112835843605 +660 88 loss.adversarial_temperature 0.30206890225076327 +660 88 negative_sampler.num_negs_per_pos 93.0 +660 88 training.batch_size 1.0 +660 89 model.embedding_dim 2.0 +660 89 loss.margin 21.427710958938622 +660 89 loss.adversarial_temperature 0.27196291501884456 +660 89 negative_sampler.num_negs_per_pos 31.0 +660 89 training.batch_size 2.0 +660 90 model.embedding_dim 0.0 +660 90 loss.margin 24.8784032919283 +660 90 loss.adversarial_temperature 0.8969080271143983 +660 90 negative_sampler.num_negs_per_pos 96.0 +660 90 training.batch_size 0.0 +660 91 model.embedding_dim 1.0 +660 91 loss.margin 22.434153404600323 +660 91 loss.adversarial_temperature 0.7860286464630634 +660 91 negative_sampler.num_negs_per_pos 77.0 +660 91 training.batch_size 2.0 +660 92 model.embedding_dim 1.0 +660 92 loss.margin 28.33558489078327 +660 92 loss.adversarial_temperature 0.4505587996748479 +660 92 negative_sampler.num_negs_per_pos 3.0 +660 92 training.batch_size 1.0 +660 93 model.embedding_dim 1.0 +660 93 loss.margin 19.196412325596683 +660 93 loss.adversarial_temperature 0.6706512475775466 +660 93 negative_sampler.num_negs_per_pos 74.0 +660 93 training.batch_size 0.0 +660 94 model.embedding_dim 1.0 +660 94 loss.margin 16.444780920917584 +660 94 loss.adversarial_temperature 0.7691993172516833 +660 94 negative_sampler.num_negs_per_pos 15.0 +660 94 training.batch_size 2.0 +660 95 model.embedding_dim 2.0 +660 95 loss.margin 24.587501803455552 +660 95 loss.adversarial_temperature 0.22322009806997262 +660 95 negative_sampler.num_negs_per_pos 37.0 +660 95 training.batch_size 0.0 +660 96 model.embedding_dim 1.0 +660 96 loss.margin 4.151870889181142 +660 96 loss.adversarial_temperature 0.47494340656247924 +660 96 negative_sampler.num_negs_per_pos 19.0 +660 96 training.batch_size 1.0 +660 97 model.embedding_dim 2.0 +660 97 loss.margin 22.118583264459815 +660 97 loss.adversarial_temperature 0.7583013193826609 +660 97 negative_sampler.num_negs_per_pos 19.0 +660 97 training.batch_size 2.0 +660 98 model.embedding_dim 0.0 +660 98 loss.margin 16.43521248516405 +660 98 loss.adversarial_temperature 0.5893628854753178 +660 98 negative_sampler.num_negs_per_pos 2.0 +660 98 training.batch_size 1.0 +660 99 model.embedding_dim 0.0 +660 99 loss.margin 13.431000763286333 +660 99 loss.adversarial_temperature 0.8125833638066428 +660 99 negative_sampler.num_negs_per_pos 58.0 +660 99 training.batch_size 0.0 +660 100 model.embedding_dim 0.0 +660 100 loss.margin 29.400575102018475 +660 100 loss.adversarial_temperature 0.28683817996308225 +660 100 negative_sampler.num_negs_per_pos 10.0 +660 100 training.batch_size 2.0 +660 1 dataset """kinships""" +660 1 model """simple""" +660 1 loss """nssa""" +660 1 regularizer """no""" +660 1 optimizer """adadelta""" +660 1 training_loop """owa""" +660 1 negative_sampler """basic""" +660 1 evaluator """rankbased""" +660 2 dataset """kinships""" +660 2 model """simple""" +660 2 loss """nssa""" +660 2 regularizer """no""" +660 2 optimizer """adadelta""" +660 2 training_loop """owa""" +660 2 negative_sampler """basic""" +660 2 evaluator """rankbased""" +660 3 dataset """kinships""" +660 3 model """simple""" +660 3 loss """nssa""" +660 3 regularizer """no""" +660 3 optimizer """adadelta""" +660 3 training_loop """owa""" +660 3 negative_sampler """basic""" +660 3 evaluator """rankbased""" +660 4 dataset """kinships""" +660 4 model """simple""" +660 4 loss """nssa""" +660 4 regularizer """no""" +660 4 optimizer """adadelta""" +660 4 training_loop """owa""" +660 4 negative_sampler """basic""" +660 4 evaluator """rankbased""" +660 5 dataset """kinships""" +660 5 model """simple""" +660 5 loss """nssa""" +660 5 regularizer """no""" +660 5 optimizer """adadelta""" +660 5 training_loop """owa""" +660 5 negative_sampler """basic""" +660 5 evaluator """rankbased""" +660 6 dataset """kinships""" +660 6 model """simple""" +660 6 loss """nssa""" +660 6 regularizer """no""" +660 6 optimizer """adadelta""" +660 6 training_loop """owa""" +660 6 negative_sampler """basic""" +660 6 evaluator """rankbased""" +660 7 dataset """kinships""" +660 7 model """simple""" +660 7 loss """nssa""" +660 7 regularizer """no""" +660 7 optimizer """adadelta""" +660 7 training_loop """owa""" +660 7 negative_sampler """basic""" +660 7 evaluator """rankbased""" +660 8 dataset """kinships""" +660 8 model """simple""" +660 8 loss """nssa""" +660 8 regularizer """no""" +660 8 optimizer """adadelta""" +660 8 training_loop """owa""" +660 8 negative_sampler """basic""" +660 8 evaluator """rankbased""" +660 9 dataset """kinships""" +660 9 model """simple""" +660 9 loss """nssa""" +660 9 regularizer """no""" +660 9 optimizer """adadelta""" +660 9 training_loop """owa""" +660 9 negative_sampler """basic""" +660 9 evaluator """rankbased""" +660 10 dataset """kinships""" +660 10 model """simple""" +660 10 loss """nssa""" +660 10 regularizer """no""" +660 10 optimizer """adadelta""" +660 10 training_loop """owa""" +660 10 negative_sampler """basic""" +660 10 evaluator """rankbased""" +660 11 dataset """kinships""" +660 11 model """simple""" +660 11 loss """nssa""" +660 11 regularizer """no""" +660 11 optimizer """adadelta""" +660 11 training_loop """owa""" +660 11 negative_sampler """basic""" +660 11 evaluator """rankbased""" +660 12 dataset """kinships""" +660 12 model """simple""" +660 12 loss """nssa""" +660 12 regularizer """no""" +660 12 optimizer """adadelta""" +660 12 training_loop """owa""" +660 12 negative_sampler """basic""" +660 12 evaluator """rankbased""" +660 13 dataset """kinships""" +660 13 model """simple""" +660 13 loss """nssa""" +660 13 regularizer """no""" +660 13 optimizer """adadelta""" +660 13 training_loop """owa""" +660 13 negative_sampler """basic""" +660 13 evaluator """rankbased""" +660 14 dataset """kinships""" +660 14 model """simple""" +660 14 loss """nssa""" +660 14 regularizer """no""" +660 14 optimizer """adadelta""" +660 14 training_loop """owa""" +660 14 negative_sampler """basic""" +660 14 evaluator """rankbased""" +660 15 dataset """kinships""" +660 15 model """simple""" +660 15 loss """nssa""" +660 15 regularizer """no""" +660 15 optimizer """adadelta""" +660 15 training_loop """owa""" +660 15 negative_sampler """basic""" +660 15 evaluator """rankbased""" +660 16 dataset """kinships""" +660 16 model """simple""" +660 16 loss """nssa""" +660 16 regularizer """no""" +660 16 optimizer """adadelta""" +660 16 training_loop """owa""" +660 16 negative_sampler """basic""" +660 16 evaluator """rankbased""" +660 17 dataset """kinships""" +660 17 model """simple""" +660 17 loss """nssa""" +660 17 regularizer """no""" +660 17 optimizer """adadelta""" +660 17 training_loop """owa""" +660 17 negative_sampler """basic""" +660 17 evaluator """rankbased""" +660 18 dataset """kinships""" +660 18 model """simple""" +660 18 loss """nssa""" +660 18 regularizer """no""" +660 18 optimizer """adadelta""" +660 18 training_loop """owa""" +660 18 negative_sampler """basic""" +660 18 evaluator """rankbased""" +660 19 dataset """kinships""" +660 19 model """simple""" +660 19 loss """nssa""" +660 19 regularizer """no""" +660 19 optimizer """adadelta""" +660 19 training_loop """owa""" +660 19 negative_sampler """basic""" +660 19 evaluator """rankbased""" +660 20 dataset """kinships""" +660 20 model """simple""" +660 20 loss """nssa""" +660 20 regularizer """no""" +660 20 optimizer """adadelta""" +660 20 training_loop """owa""" +660 20 negative_sampler """basic""" +660 20 evaluator """rankbased""" +660 21 dataset """kinships""" +660 21 model """simple""" +660 21 loss """nssa""" +660 21 regularizer """no""" +660 21 optimizer """adadelta""" +660 21 training_loop """owa""" +660 21 negative_sampler """basic""" +660 21 evaluator """rankbased""" +660 22 dataset """kinships""" +660 22 model """simple""" +660 22 loss """nssa""" +660 22 regularizer """no""" +660 22 optimizer """adadelta""" +660 22 training_loop """owa""" +660 22 negative_sampler """basic""" +660 22 evaluator """rankbased""" +660 23 dataset """kinships""" +660 23 model """simple""" +660 23 loss """nssa""" +660 23 regularizer """no""" +660 23 optimizer """adadelta""" +660 23 training_loop """owa""" +660 23 negative_sampler """basic""" +660 23 evaluator """rankbased""" +660 24 dataset """kinships""" +660 24 model """simple""" +660 24 loss """nssa""" +660 24 regularizer """no""" +660 24 optimizer """adadelta""" +660 24 training_loop """owa""" +660 24 negative_sampler """basic""" +660 24 evaluator """rankbased""" +660 25 dataset """kinships""" +660 25 model """simple""" +660 25 loss """nssa""" +660 25 regularizer """no""" +660 25 optimizer """adadelta""" +660 25 training_loop """owa""" +660 25 negative_sampler """basic""" +660 25 evaluator """rankbased""" +660 26 dataset """kinships""" +660 26 model """simple""" +660 26 loss """nssa""" +660 26 regularizer """no""" +660 26 optimizer """adadelta""" +660 26 training_loop """owa""" +660 26 negative_sampler """basic""" +660 26 evaluator """rankbased""" +660 27 dataset """kinships""" +660 27 model """simple""" +660 27 loss """nssa""" +660 27 regularizer """no""" +660 27 optimizer """adadelta""" +660 27 training_loop """owa""" +660 27 negative_sampler """basic""" +660 27 evaluator """rankbased""" +660 28 dataset """kinships""" +660 28 model """simple""" +660 28 loss """nssa""" +660 28 regularizer """no""" +660 28 optimizer """adadelta""" +660 28 training_loop """owa""" +660 28 negative_sampler """basic""" +660 28 evaluator """rankbased""" +660 29 dataset """kinships""" +660 29 model """simple""" +660 29 loss """nssa""" +660 29 regularizer """no""" +660 29 optimizer """adadelta""" +660 29 training_loop """owa""" +660 29 negative_sampler """basic""" +660 29 evaluator """rankbased""" +660 30 dataset """kinships""" +660 30 model """simple""" +660 30 loss """nssa""" +660 30 regularizer """no""" +660 30 optimizer """adadelta""" +660 30 training_loop """owa""" +660 30 negative_sampler """basic""" +660 30 evaluator """rankbased""" +660 31 dataset """kinships""" +660 31 model """simple""" +660 31 loss """nssa""" +660 31 regularizer """no""" +660 31 optimizer """adadelta""" +660 31 training_loop """owa""" +660 31 negative_sampler """basic""" +660 31 evaluator """rankbased""" +660 32 dataset """kinships""" +660 32 model """simple""" +660 32 loss """nssa""" +660 32 regularizer """no""" +660 32 optimizer """adadelta""" +660 32 training_loop """owa""" +660 32 negative_sampler """basic""" +660 32 evaluator """rankbased""" +660 33 dataset """kinships""" +660 33 model """simple""" +660 33 loss """nssa""" +660 33 regularizer """no""" +660 33 optimizer """adadelta""" +660 33 training_loop """owa""" +660 33 negative_sampler """basic""" +660 33 evaluator """rankbased""" +660 34 dataset """kinships""" +660 34 model """simple""" +660 34 loss """nssa""" +660 34 regularizer """no""" +660 34 optimizer """adadelta""" +660 34 training_loop """owa""" +660 34 negative_sampler """basic""" +660 34 evaluator """rankbased""" +660 35 dataset """kinships""" +660 35 model """simple""" +660 35 loss """nssa""" +660 35 regularizer """no""" +660 35 optimizer """adadelta""" +660 35 training_loop """owa""" +660 35 negative_sampler """basic""" +660 35 evaluator """rankbased""" +660 36 dataset """kinships""" +660 36 model """simple""" +660 36 loss """nssa""" +660 36 regularizer """no""" +660 36 optimizer """adadelta""" +660 36 training_loop """owa""" +660 36 negative_sampler """basic""" +660 36 evaluator """rankbased""" +660 37 dataset """kinships""" +660 37 model """simple""" +660 37 loss """nssa""" +660 37 regularizer """no""" +660 37 optimizer """adadelta""" +660 37 training_loop """owa""" +660 37 negative_sampler """basic""" +660 37 evaluator """rankbased""" +660 38 dataset """kinships""" +660 38 model """simple""" +660 38 loss """nssa""" +660 38 regularizer """no""" +660 38 optimizer """adadelta""" +660 38 training_loop """owa""" +660 38 negative_sampler """basic""" +660 38 evaluator """rankbased""" +660 39 dataset """kinships""" +660 39 model """simple""" +660 39 loss """nssa""" +660 39 regularizer """no""" +660 39 optimizer """adadelta""" +660 39 training_loop """owa""" +660 39 negative_sampler """basic""" +660 39 evaluator """rankbased""" +660 40 dataset """kinships""" +660 40 model """simple""" +660 40 loss """nssa""" +660 40 regularizer """no""" +660 40 optimizer """adadelta""" +660 40 training_loop """owa""" +660 40 negative_sampler """basic""" +660 40 evaluator """rankbased""" +660 41 dataset """kinships""" +660 41 model """simple""" +660 41 loss """nssa""" +660 41 regularizer """no""" +660 41 optimizer """adadelta""" +660 41 training_loop """owa""" +660 41 negative_sampler """basic""" +660 41 evaluator """rankbased""" +660 42 dataset """kinships""" +660 42 model """simple""" +660 42 loss """nssa""" +660 42 regularizer """no""" +660 42 optimizer """adadelta""" +660 42 training_loop """owa""" +660 42 negative_sampler """basic""" +660 42 evaluator """rankbased""" +660 43 dataset """kinships""" +660 43 model """simple""" +660 43 loss """nssa""" +660 43 regularizer """no""" +660 43 optimizer """adadelta""" +660 43 training_loop """owa""" +660 43 negative_sampler """basic""" +660 43 evaluator """rankbased""" +660 44 dataset """kinships""" +660 44 model """simple""" +660 44 loss """nssa""" +660 44 regularizer """no""" +660 44 optimizer """adadelta""" +660 44 training_loop """owa""" +660 44 negative_sampler """basic""" +660 44 evaluator """rankbased""" +660 45 dataset """kinships""" +660 45 model """simple""" +660 45 loss """nssa""" +660 45 regularizer """no""" +660 45 optimizer """adadelta""" +660 45 training_loop """owa""" +660 45 negative_sampler """basic""" +660 45 evaluator """rankbased""" +660 46 dataset """kinships""" +660 46 model """simple""" +660 46 loss """nssa""" +660 46 regularizer """no""" +660 46 optimizer """adadelta""" +660 46 training_loop """owa""" +660 46 negative_sampler """basic""" +660 46 evaluator """rankbased""" +660 47 dataset """kinships""" +660 47 model """simple""" +660 47 loss """nssa""" +660 47 regularizer """no""" +660 47 optimizer """adadelta""" +660 47 training_loop """owa""" +660 47 negative_sampler """basic""" +660 47 evaluator """rankbased""" +660 48 dataset """kinships""" +660 48 model """simple""" +660 48 loss """nssa""" +660 48 regularizer """no""" +660 48 optimizer """adadelta""" +660 48 training_loop """owa""" +660 48 negative_sampler """basic""" +660 48 evaluator """rankbased""" +660 49 dataset """kinships""" +660 49 model """simple""" +660 49 loss """nssa""" +660 49 regularizer """no""" +660 49 optimizer """adadelta""" +660 49 training_loop """owa""" +660 49 negative_sampler """basic""" +660 49 evaluator """rankbased""" +660 50 dataset """kinships""" +660 50 model """simple""" +660 50 loss """nssa""" +660 50 regularizer """no""" +660 50 optimizer """adadelta""" +660 50 training_loop """owa""" +660 50 negative_sampler """basic""" +660 50 evaluator """rankbased""" +660 51 dataset """kinships""" +660 51 model """simple""" +660 51 loss """nssa""" +660 51 regularizer """no""" +660 51 optimizer """adadelta""" +660 51 training_loop """owa""" +660 51 negative_sampler """basic""" +660 51 evaluator """rankbased""" +660 52 dataset """kinships""" +660 52 model """simple""" +660 52 loss """nssa""" +660 52 regularizer """no""" +660 52 optimizer """adadelta""" +660 52 training_loop """owa""" +660 52 negative_sampler """basic""" +660 52 evaluator """rankbased""" +660 53 dataset """kinships""" +660 53 model """simple""" +660 53 loss """nssa""" +660 53 regularizer """no""" +660 53 optimizer """adadelta""" +660 53 training_loop """owa""" +660 53 negative_sampler """basic""" +660 53 evaluator """rankbased""" +660 54 dataset """kinships""" +660 54 model """simple""" +660 54 loss """nssa""" +660 54 regularizer """no""" +660 54 optimizer """adadelta""" +660 54 training_loop """owa""" +660 54 negative_sampler """basic""" +660 54 evaluator """rankbased""" +660 55 dataset """kinships""" +660 55 model """simple""" +660 55 loss """nssa""" +660 55 regularizer """no""" +660 55 optimizer """adadelta""" +660 55 training_loop """owa""" +660 55 negative_sampler """basic""" +660 55 evaluator """rankbased""" +660 56 dataset """kinships""" +660 56 model """simple""" +660 56 loss """nssa""" +660 56 regularizer """no""" +660 56 optimizer """adadelta""" +660 56 training_loop """owa""" +660 56 negative_sampler """basic""" +660 56 evaluator """rankbased""" +660 57 dataset """kinships""" +660 57 model """simple""" +660 57 loss """nssa""" +660 57 regularizer """no""" +660 57 optimizer """adadelta""" +660 57 training_loop """owa""" +660 57 negative_sampler """basic""" +660 57 evaluator """rankbased""" +660 58 dataset """kinships""" +660 58 model """simple""" +660 58 loss """nssa""" +660 58 regularizer """no""" +660 58 optimizer """adadelta""" +660 58 training_loop """owa""" +660 58 negative_sampler """basic""" +660 58 evaluator """rankbased""" +660 59 dataset """kinships""" +660 59 model """simple""" +660 59 loss """nssa""" +660 59 regularizer """no""" +660 59 optimizer """adadelta""" +660 59 training_loop """owa""" +660 59 negative_sampler """basic""" +660 59 evaluator """rankbased""" +660 60 dataset """kinships""" +660 60 model """simple""" +660 60 loss """nssa""" +660 60 regularizer """no""" +660 60 optimizer """adadelta""" +660 60 training_loop """owa""" +660 60 negative_sampler """basic""" +660 60 evaluator """rankbased""" +660 61 dataset """kinships""" +660 61 model """simple""" +660 61 loss """nssa""" +660 61 regularizer """no""" +660 61 optimizer """adadelta""" +660 61 training_loop """owa""" +660 61 negative_sampler """basic""" +660 61 evaluator """rankbased""" +660 62 dataset """kinships""" +660 62 model """simple""" +660 62 loss """nssa""" +660 62 regularizer """no""" +660 62 optimizer """adadelta""" +660 62 training_loop """owa""" +660 62 negative_sampler """basic""" +660 62 evaluator """rankbased""" +660 63 dataset """kinships""" +660 63 model """simple""" +660 63 loss """nssa""" +660 63 regularizer """no""" +660 63 optimizer """adadelta""" +660 63 training_loop """owa""" +660 63 negative_sampler """basic""" +660 63 evaluator """rankbased""" +660 64 dataset """kinships""" +660 64 model """simple""" +660 64 loss """nssa""" +660 64 regularizer """no""" +660 64 optimizer """adadelta""" +660 64 training_loop """owa""" +660 64 negative_sampler """basic""" +660 64 evaluator """rankbased""" +660 65 dataset """kinships""" +660 65 model """simple""" +660 65 loss """nssa""" +660 65 regularizer """no""" +660 65 optimizer """adadelta""" +660 65 training_loop """owa""" +660 65 negative_sampler """basic""" +660 65 evaluator """rankbased""" +660 66 dataset """kinships""" +660 66 model """simple""" +660 66 loss """nssa""" +660 66 regularizer """no""" +660 66 optimizer """adadelta""" +660 66 training_loop """owa""" +660 66 negative_sampler """basic""" +660 66 evaluator """rankbased""" +660 67 dataset """kinships""" +660 67 model """simple""" +660 67 loss """nssa""" +660 67 regularizer """no""" +660 67 optimizer """adadelta""" +660 67 training_loop """owa""" +660 67 negative_sampler """basic""" +660 67 evaluator """rankbased""" +660 68 dataset """kinships""" +660 68 model """simple""" +660 68 loss """nssa""" +660 68 regularizer """no""" +660 68 optimizer """adadelta""" +660 68 training_loop """owa""" +660 68 negative_sampler """basic""" +660 68 evaluator """rankbased""" +660 69 dataset """kinships""" +660 69 model """simple""" +660 69 loss """nssa""" +660 69 regularizer """no""" +660 69 optimizer """adadelta""" +660 69 training_loop """owa""" +660 69 negative_sampler """basic""" +660 69 evaluator """rankbased""" +660 70 dataset """kinships""" +660 70 model """simple""" +660 70 loss """nssa""" +660 70 regularizer """no""" +660 70 optimizer """adadelta""" +660 70 training_loop """owa""" +660 70 negative_sampler """basic""" +660 70 evaluator """rankbased""" +660 71 dataset """kinships""" +660 71 model """simple""" +660 71 loss """nssa""" +660 71 regularizer """no""" +660 71 optimizer """adadelta""" +660 71 training_loop """owa""" +660 71 negative_sampler """basic""" +660 71 evaluator """rankbased""" +660 72 dataset """kinships""" +660 72 model """simple""" +660 72 loss """nssa""" +660 72 regularizer """no""" +660 72 optimizer """adadelta""" +660 72 training_loop """owa""" +660 72 negative_sampler """basic""" +660 72 evaluator """rankbased""" +660 73 dataset """kinships""" +660 73 model """simple""" +660 73 loss """nssa""" +660 73 regularizer """no""" +660 73 optimizer """adadelta""" +660 73 training_loop """owa""" +660 73 negative_sampler """basic""" +660 73 evaluator """rankbased""" +660 74 dataset """kinships""" +660 74 model """simple""" +660 74 loss """nssa""" +660 74 regularizer """no""" +660 74 optimizer """adadelta""" +660 74 training_loop """owa""" +660 74 negative_sampler """basic""" +660 74 evaluator """rankbased""" +660 75 dataset """kinships""" +660 75 model """simple""" +660 75 loss """nssa""" +660 75 regularizer """no""" +660 75 optimizer """adadelta""" +660 75 training_loop """owa""" +660 75 negative_sampler """basic""" +660 75 evaluator """rankbased""" +660 76 dataset """kinships""" +660 76 model """simple""" +660 76 loss """nssa""" +660 76 regularizer """no""" +660 76 optimizer """adadelta""" +660 76 training_loop """owa""" +660 76 negative_sampler """basic""" +660 76 evaluator """rankbased""" +660 77 dataset """kinships""" +660 77 model """simple""" +660 77 loss """nssa""" +660 77 regularizer """no""" +660 77 optimizer """adadelta""" +660 77 training_loop """owa""" +660 77 negative_sampler """basic""" +660 77 evaluator """rankbased""" +660 78 dataset """kinships""" +660 78 model """simple""" +660 78 loss """nssa""" +660 78 regularizer """no""" +660 78 optimizer """adadelta""" +660 78 training_loop """owa""" +660 78 negative_sampler """basic""" +660 78 evaluator """rankbased""" +660 79 dataset """kinships""" +660 79 model """simple""" +660 79 loss """nssa""" +660 79 regularizer """no""" +660 79 optimizer """adadelta""" +660 79 training_loop """owa""" +660 79 negative_sampler """basic""" +660 79 evaluator """rankbased""" +660 80 dataset """kinships""" +660 80 model """simple""" +660 80 loss """nssa""" +660 80 regularizer """no""" +660 80 optimizer """adadelta""" +660 80 training_loop """owa""" +660 80 negative_sampler """basic""" +660 80 evaluator """rankbased""" +660 81 dataset """kinships""" +660 81 model """simple""" +660 81 loss """nssa""" +660 81 regularizer """no""" +660 81 optimizer """adadelta""" +660 81 training_loop """owa""" +660 81 negative_sampler """basic""" +660 81 evaluator """rankbased""" +660 82 dataset """kinships""" +660 82 model """simple""" +660 82 loss """nssa""" +660 82 regularizer """no""" +660 82 optimizer """adadelta""" +660 82 training_loop """owa""" +660 82 negative_sampler """basic""" +660 82 evaluator """rankbased""" +660 83 dataset """kinships""" +660 83 model """simple""" +660 83 loss """nssa""" +660 83 regularizer """no""" +660 83 optimizer """adadelta""" +660 83 training_loop """owa""" +660 83 negative_sampler """basic""" +660 83 evaluator """rankbased""" +660 84 dataset """kinships""" +660 84 model """simple""" +660 84 loss """nssa""" +660 84 regularizer """no""" +660 84 optimizer """adadelta""" +660 84 training_loop """owa""" +660 84 negative_sampler """basic""" +660 84 evaluator """rankbased""" +660 85 dataset """kinships""" +660 85 model """simple""" +660 85 loss """nssa""" +660 85 regularizer """no""" +660 85 optimizer """adadelta""" +660 85 training_loop """owa""" +660 85 negative_sampler """basic""" +660 85 evaluator """rankbased""" +660 86 dataset """kinships""" +660 86 model """simple""" +660 86 loss """nssa""" +660 86 regularizer """no""" +660 86 optimizer """adadelta""" +660 86 training_loop """owa""" +660 86 negative_sampler """basic""" +660 86 evaluator """rankbased""" +660 87 dataset """kinships""" +660 87 model """simple""" +660 87 loss """nssa""" +660 87 regularizer """no""" +660 87 optimizer """adadelta""" +660 87 training_loop """owa""" +660 87 negative_sampler """basic""" +660 87 evaluator """rankbased""" +660 88 dataset """kinships""" +660 88 model """simple""" +660 88 loss """nssa""" +660 88 regularizer """no""" +660 88 optimizer """adadelta""" +660 88 training_loop """owa""" +660 88 negative_sampler """basic""" +660 88 evaluator """rankbased""" +660 89 dataset """kinships""" +660 89 model """simple""" +660 89 loss """nssa""" +660 89 regularizer """no""" +660 89 optimizer """adadelta""" +660 89 training_loop """owa""" +660 89 negative_sampler """basic""" +660 89 evaluator """rankbased""" +660 90 dataset """kinships""" +660 90 model """simple""" +660 90 loss """nssa""" +660 90 regularizer """no""" +660 90 optimizer """adadelta""" +660 90 training_loop """owa""" +660 90 negative_sampler """basic""" +660 90 evaluator """rankbased""" +660 91 dataset """kinships""" +660 91 model """simple""" +660 91 loss """nssa""" +660 91 regularizer """no""" +660 91 optimizer """adadelta""" +660 91 training_loop """owa""" +660 91 negative_sampler """basic""" +660 91 evaluator """rankbased""" +660 92 dataset """kinships""" +660 92 model """simple""" +660 92 loss """nssa""" +660 92 regularizer """no""" +660 92 optimizer """adadelta""" +660 92 training_loop """owa""" +660 92 negative_sampler """basic""" +660 92 evaluator """rankbased""" +660 93 dataset """kinships""" +660 93 model """simple""" +660 93 loss """nssa""" +660 93 regularizer """no""" +660 93 optimizer """adadelta""" +660 93 training_loop """owa""" +660 93 negative_sampler """basic""" +660 93 evaluator """rankbased""" +660 94 dataset """kinships""" +660 94 model """simple""" +660 94 loss """nssa""" +660 94 regularizer """no""" +660 94 optimizer """adadelta""" +660 94 training_loop """owa""" +660 94 negative_sampler """basic""" +660 94 evaluator """rankbased""" +660 95 dataset """kinships""" +660 95 model """simple""" +660 95 loss """nssa""" +660 95 regularizer """no""" +660 95 optimizer """adadelta""" +660 95 training_loop """owa""" +660 95 negative_sampler """basic""" +660 95 evaluator """rankbased""" +660 96 dataset """kinships""" +660 96 model """simple""" +660 96 loss """nssa""" +660 96 regularizer """no""" +660 96 optimizer """adadelta""" +660 96 training_loop """owa""" +660 96 negative_sampler """basic""" +660 96 evaluator """rankbased""" +660 97 dataset """kinships""" +660 97 model """simple""" +660 97 loss """nssa""" +660 97 regularizer """no""" +660 97 optimizer """adadelta""" +660 97 training_loop """owa""" +660 97 negative_sampler """basic""" +660 97 evaluator """rankbased""" +660 98 dataset """kinships""" +660 98 model """simple""" +660 98 loss """nssa""" +660 98 regularizer """no""" +660 98 optimizer """adadelta""" +660 98 training_loop """owa""" +660 98 negative_sampler """basic""" +660 98 evaluator """rankbased""" +660 99 dataset """kinships""" +660 99 model """simple""" +660 99 loss """nssa""" +660 99 regularizer """no""" +660 99 optimizer """adadelta""" +660 99 training_loop """owa""" +660 99 negative_sampler """basic""" +660 99 evaluator """rankbased""" +660 100 dataset """kinships""" +660 100 model """simple""" +660 100 loss """nssa""" +660 100 regularizer """no""" +660 100 optimizer """adadelta""" +660 100 training_loop """owa""" +660 100 negative_sampler """basic""" +660 100 evaluator """rankbased""" +661 1 model.embedding_dim 0.0 +661 1 optimizer.lr 0.009923832486746224 +661 1 training.batch_size 2.0 +661 1 training.label_smoothing 0.001026841590856908 +661 2 model.embedding_dim 2.0 +661 2 optimizer.lr 0.017396890853312353 +661 2 training.batch_size 2.0 +661 2 training.label_smoothing 0.013195273409605114 +661 3 model.embedding_dim 0.0 +661 3 optimizer.lr 0.027442264067111304 +661 3 training.batch_size 1.0 +661 3 training.label_smoothing 0.00931426159603244 +661 4 model.embedding_dim 2.0 +661 4 optimizer.lr 0.01526518195531963 +661 4 training.batch_size 2.0 +661 4 training.label_smoothing 0.5963272889434531 +661 5 model.embedding_dim 2.0 +661 5 optimizer.lr 0.016458139343613416 +661 5 training.batch_size 1.0 +661 5 training.label_smoothing 0.7344476260633853 +661 6 model.embedding_dim 1.0 +661 6 optimizer.lr 0.052258561713419305 +661 6 training.batch_size 2.0 +661 6 training.label_smoothing 0.044094636027390686 +661 7 model.embedding_dim 2.0 +661 7 optimizer.lr 0.00159893079855865 +661 7 training.batch_size 0.0 +661 7 training.label_smoothing 0.11138352497772719 +661 8 model.embedding_dim 1.0 +661 8 optimizer.lr 0.030171980929440696 +661 8 training.batch_size 0.0 +661 8 training.label_smoothing 0.07871125519709192 +661 9 model.embedding_dim 2.0 +661 9 optimizer.lr 0.0030987200335327505 +661 9 training.batch_size 0.0 +661 9 training.label_smoothing 0.09509296360515765 +661 10 model.embedding_dim 1.0 +661 10 optimizer.lr 0.0678121715870992 +661 10 training.batch_size 1.0 +661 10 training.label_smoothing 0.2516967948606761 +661 11 model.embedding_dim 1.0 +661 11 optimizer.lr 0.08228929573003615 +661 11 training.batch_size 0.0 +661 11 training.label_smoothing 0.002884938595863145 +661 12 model.embedding_dim 1.0 +661 12 optimizer.lr 0.018332312507918842 +661 12 training.batch_size 2.0 +661 12 training.label_smoothing 0.01457621886273265 +661 13 model.embedding_dim 2.0 +661 13 optimizer.lr 0.0011042741083442504 +661 13 training.batch_size 1.0 +661 13 training.label_smoothing 0.007169598335956367 +661 14 model.embedding_dim 0.0 +661 14 optimizer.lr 0.001501567400099283 +661 14 training.batch_size 1.0 +661 14 training.label_smoothing 0.21540411684007554 +661 15 model.embedding_dim 1.0 +661 15 optimizer.lr 0.0020649652869782972 +661 15 training.batch_size 1.0 +661 15 training.label_smoothing 0.0063209621068253665 +661 16 model.embedding_dim 0.0 +661 16 optimizer.lr 0.008812784790104876 +661 16 training.batch_size 1.0 +661 16 training.label_smoothing 0.015875954130302784 +661 17 model.embedding_dim 0.0 +661 17 optimizer.lr 0.013303921682144391 +661 17 training.batch_size 1.0 +661 17 training.label_smoothing 0.005318814348232925 +661 18 model.embedding_dim 2.0 +661 18 optimizer.lr 0.00198375441315213 +661 18 training.batch_size 1.0 +661 18 training.label_smoothing 0.017919229691451984 +661 19 model.embedding_dim 1.0 +661 19 optimizer.lr 0.07003777306664907 +661 19 training.batch_size 1.0 +661 19 training.label_smoothing 0.005167125455653293 +661 20 model.embedding_dim 1.0 +661 20 optimizer.lr 0.003705497606783984 +661 20 training.batch_size 0.0 +661 20 training.label_smoothing 0.0027324695960918035 +661 21 model.embedding_dim 0.0 +661 21 optimizer.lr 0.008901530896789469 +661 21 training.batch_size 2.0 +661 21 training.label_smoothing 0.012279017466025317 +661 22 model.embedding_dim 2.0 +661 22 optimizer.lr 0.0028215948434193215 +661 22 training.batch_size 1.0 +661 22 training.label_smoothing 0.06399014652427575 +661 23 model.embedding_dim 2.0 +661 23 optimizer.lr 0.0019879131331875764 +661 23 training.batch_size 0.0 +661 23 training.label_smoothing 0.3841319157189366 +661 24 model.embedding_dim 0.0 +661 24 optimizer.lr 0.07018532565751232 +661 24 training.batch_size 2.0 +661 24 training.label_smoothing 0.4073355708779716 +661 25 model.embedding_dim 2.0 +661 25 optimizer.lr 0.057093631720893136 +661 25 training.batch_size 2.0 +661 25 training.label_smoothing 0.0010691618132397224 +661 26 model.embedding_dim 2.0 +661 26 optimizer.lr 0.035328996380765296 +661 26 training.batch_size 0.0 +661 26 training.label_smoothing 0.0021187682485635444 +661 27 model.embedding_dim 0.0 +661 27 optimizer.lr 0.01997203580346823 +661 27 training.batch_size 1.0 +661 27 training.label_smoothing 0.0021257270550790555 +661 28 model.embedding_dim 1.0 +661 28 optimizer.lr 0.0016714932475148157 +661 28 training.batch_size 0.0 +661 28 training.label_smoothing 0.002573192403577767 +661 29 model.embedding_dim 1.0 +661 29 optimizer.lr 0.0019302650616168195 +661 29 training.batch_size 1.0 +661 29 training.label_smoothing 0.0030256827758856023 +661 30 model.embedding_dim 0.0 +661 30 optimizer.lr 0.01850305929999617 +661 30 training.batch_size 0.0 +661 30 training.label_smoothing 0.0010183712760001714 +661 31 model.embedding_dim 0.0 +661 31 optimizer.lr 0.006034773640109354 +661 31 training.batch_size 0.0 +661 31 training.label_smoothing 0.8689548622101794 +661 32 model.embedding_dim 2.0 +661 32 optimizer.lr 0.0020809735522324714 +661 32 training.batch_size 0.0 +661 32 training.label_smoothing 0.001114031890320423 +661 33 model.embedding_dim 0.0 +661 33 optimizer.lr 0.0010517900087165962 +661 33 training.batch_size 0.0 +661 33 training.label_smoothing 0.6638935812520946 +661 34 model.embedding_dim 0.0 +661 34 optimizer.lr 0.04484718642394149 +661 34 training.batch_size 0.0 +661 34 training.label_smoothing 0.2239074251449131 +661 35 model.embedding_dim 1.0 +661 35 optimizer.lr 0.028681020453954903 +661 35 training.batch_size 1.0 +661 35 training.label_smoothing 0.0044519929395267785 +661 36 model.embedding_dim 0.0 +661 36 optimizer.lr 0.011085884076269176 +661 36 training.batch_size 2.0 +661 36 training.label_smoothing 0.009885881959350305 +661 37 model.embedding_dim 0.0 +661 37 optimizer.lr 0.029913769002576026 +661 37 training.batch_size 2.0 +661 37 training.label_smoothing 0.0026384432893991868 +661 38 model.embedding_dim 0.0 +661 38 optimizer.lr 0.003658801993414283 +661 38 training.batch_size 2.0 +661 38 training.label_smoothing 0.05198942041370025 +661 39 model.embedding_dim 2.0 +661 39 optimizer.lr 0.001643876013523026 +661 39 training.batch_size 0.0 +661 39 training.label_smoothing 0.0014837155632617392 +661 40 model.embedding_dim 0.0 +661 40 optimizer.lr 0.004119600435721333 +661 40 training.batch_size 1.0 +661 40 training.label_smoothing 0.019989083791679425 +661 41 model.embedding_dim 1.0 +661 41 optimizer.lr 0.05281288326254431 +661 41 training.batch_size 0.0 +661 41 training.label_smoothing 0.0030278691980373685 +661 42 model.embedding_dim 0.0 +661 42 optimizer.lr 0.0061084092052433505 +661 42 training.batch_size 2.0 +661 42 training.label_smoothing 0.0019224261673363345 +661 43 model.embedding_dim 0.0 +661 43 optimizer.lr 0.04795924654697888 +661 43 training.batch_size 1.0 +661 43 training.label_smoothing 0.021108093024094398 +661 44 model.embedding_dim 2.0 +661 44 optimizer.lr 0.00967453424954654 +661 44 training.batch_size 0.0 +661 44 training.label_smoothing 0.0032646111076809602 +661 45 model.embedding_dim 1.0 +661 45 optimizer.lr 0.0034760198700460876 +661 45 training.batch_size 0.0 +661 45 training.label_smoothing 0.21650153950292533 +661 46 model.embedding_dim 2.0 +661 46 optimizer.lr 0.0343156387948997 +661 46 training.batch_size 2.0 +661 46 training.label_smoothing 0.0026794512422377917 +661 47 model.embedding_dim 2.0 +661 47 optimizer.lr 0.06255053403328362 +661 47 training.batch_size 2.0 +661 47 training.label_smoothing 0.13121477901806503 +661 48 model.embedding_dim 1.0 +661 48 optimizer.lr 0.008202036422321332 +661 48 training.batch_size 0.0 +661 48 training.label_smoothing 0.002911736535865917 +661 49 model.embedding_dim 1.0 +661 49 optimizer.lr 0.004038432349976487 +661 49 training.batch_size 1.0 +661 49 training.label_smoothing 0.3358842384031617 +661 50 model.embedding_dim 0.0 +661 50 optimizer.lr 0.0029564480184229767 +661 50 training.batch_size 0.0 +661 50 training.label_smoothing 0.0028888120049991232 +661 51 model.embedding_dim 1.0 +661 51 optimizer.lr 0.01715489766065308 +661 51 training.batch_size 1.0 +661 51 training.label_smoothing 0.11720316250833267 +661 52 model.embedding_dim 0.0 +661 52 optimizer.lr 0.09031087162046243 +661 52 training.batch_size 1.0 +661 52 training.label_smoothing 0.6842126550149763 +661 53 model.embedding_dim 1.0 +661 53 optimizer.lr 0.00208068241025839 +661 53 training.batch_size 0.0 +661 53 training.label_smoothing 0.2668335320192208 +661 54 model.embedding_dim 2.0 +661 54 optimizer.lr 0.00674339663434038 +661 54 training.batch_size 1.0 +661 54 training.label_smoothing 0.001491324143333799 +661 55 model.embedding_dim 0.0 +661 55 optimizer.lr 0.01778398503992973 +661 55 training.batch_size 2.0 +661 55 training.label_smoothing 0.001174819688001718 +661 56 model.embedding_dim 0.0 +661 56 optimizer.lr 0.004931424488300972 +661 56 training.batch_size 1.0 +661 56 training.label_smoothing 0.010478201507240189 +661 57 model.embedding_dim 0.0 +661 57 optimizer.lr 0.04512039784422102 +661 57 training.batch_size 2.0 +661 57 training.label_smoothing 0.023384473034090195 +661 58 model.embedding_dim 2.0 +661 58 optimizer.lr 0.014388588381539887 +661 58 training.batch_size 2.0 +661 58 training.label_smoothing 0.0016209066414253166 +661 59 model.embedding_dim 2.0 +661 59 optimizer.lr 0.054676566783677695 +661 59 training.batch_size 0.0 +661 59 training.label_smoothing 0.013891909924792788 +661 60 model.embedding_dim 0.0 +661 60 optimizer.lr 0.04389667211531556 +661 60 training.batch_size 0.0 +661 60 training.label_smoothing 0.005950185263091616 +661 61 model.embedding_dim 0.0 +661 61 optimizer.lr 0.0030320198043216817 +661 61 training.batch_size 0.0 +661 61 training.label_smoothing 0.06811572145112908 +661 62 model.embedding_dim 1.0 +661 62 optimizer.lr 0.001106900562390368 +661 62 training.batch_size 2.0 +661 62 training.label_smoothing 0.0017439071937096718 +661 63 model.embedding_dim 0.0 +661 63 optimizer.lr 0.01909698487501541 +661 63 training.batch_size 2.0 +661 63 training.label_smoothing 0.2181777440591485 +661 64 model.embedding_dim 1.0 +661 64 optimizer.lr 0.007316570591665013 +661 64 training.batch_size 1.0 +661 64 training.label_smoothing 0.3146696960988161 +661 65 model.embedding_dim 2.0 +661 65 optimizer.lr 0.015086921900822869 +661 65 training.batch_size 1.0 +661 65 training.label_smoothing 0.00747142952238715 +661 66 model.embedding_dim 0.0 +661 66 optimizer.lr 0.03089693585687574 +661 66 training.batch_size 1.0 +661 66 training.label_smoothing 0.0015255616371122485 +661 67 model.embedding_dim 0.0 +661 67 optimizer.lr 0.07684406176168516 +661 67 training.batch_size 1.0 +661 67 training.label_smoothing 0.042150009497247114 +661 68 model.embedding_dim 2.0 +661 68 optimizer.lr 0.00867015916442409 +661 68 training.batch_size 0.0 +661 68 training.label_smoothing 0.0014913632674032717 +661 69 model.embedding_dim 1.0 +661 69 optimizer.lr 0.005251716100272713 +661 69 training.batch_size 0.0 +661 69 training.label_smoothing 0.0021793304427079004 +661 70 model.embedding_dim 2.0 +661 70 optimizer.lr 0.07460667541674797 +661 70 training.batch_size 1.0 +661 70 training.label_smoothing 0.006242127658418982 +661 71 model.embedding_dim 2.0 +661 71 optimizer.lr 0.08898432592726006 +661 71 training.batch_size 1.0 +661 71 training.label_smoothing 0.0015672329244026733 +661 72 model.embedding_dim 1.0 +661 72 optimizer.lr 0.08969763278489254 +661 72 training.batch_size 2.0 +661 72 training.label_smoothing 0.011338580034015512 +661 73 model.embedding_dim 0.0 +661 73 optimizer.lr 0.0012867870040993302 +661 73 training.batch_size 1.0 +661 73 training.label_smoothing 0.2187751819573699 +661 74 model.embedding_dim 2.0 +661 74 optimizer.lr 0.07156689537259352 +661 74 training.batch_size 2.0 +661 74 training.label_smoothing 0.007228281744711504 +661 75 model.embedding_dim 2.0 +661 75 optimizer.lr 0.004838951820462428 +661 75 training.batch_size 1.0 +661 75 training.label_smoothing 0.3885892358305372 +661 76 model.embedding_dim 2.0 +661 76 optimizer.lr 0.0014169433303282458 +661 76 training.batch_size 0.0 +661 76 training.label_smoothing 0.00119613059925206 +661 77 model.embedding_dim 2.0 +661 77 optimizer.lr 0.021783328473706245 +661 77 training.batch_size 1.0 +661 77 training.label_smoothing 0.43074548337239216 +661 78 model.embedding_dim 0.0 +661 78 optimizer.lr 0.027208737353452726 +661 78 training.batch_size 1.0 +661 78 training.label_smoothing 0.007853540197454562 +661 79 model.embedding_dim 2.0 +661 79 optimizer.lr 0.003861080275111834 +661 79 training.batch_size 2.0 +661 79 training.label_smoothing 0.024872412475831142 +661 80 model.embedding_dim 1.0 +661 80 optimizer.lr 0.04102879354706142 +661 80 training.batch_size 1.0 +661 80 training.label_smoothing 0.058879605115481404 +661 81 model.embedding_dim 1.0 +661 81 optimizer.lr 0.0012569277683076852 +661 81 training.batch_size 2.0 +661 81 training.label_smoothing 0.0013626342338841336 +661 82 model.embedding_dim 0.0 +661 82 optimizer.lr 0.00644420024644895 +661 82 training.batch_size 1.0 +661 82 training.label_smoothing 0.016742995120376324 +661 83 model.embedding_dim 2.0 +661 83 optimizer.lr 0.002747037079041271 +661 83 training.batch_size 2.0 +661 83 training.label_smoothing 0.021349807890720936 +661 84 model.embedding_dim 1.0 +661 84 optimizer.lr 0.005166772529744142 +661 84 training.batch_size 2.0 +661 84 training.label_smoothing 0.003965106986464106 +661 85 model.embedding_dim 1.0 +661 85 optimizer.lr 0.05281406211939823 +661 85 training.batch_size 0.0 +661 85 training.label_smoothing 0.08933527599435688 +661 86 model.embedding_dim 0.0 +661 86 optimizer.lr 0.08822897950292799 +661 86 training.batch_size 2.0 +661 86 training.label_smoothing 0.8108605004419309 +661 87 model.embedding_dim 1.0 +661 87 optimizer.lr 0.004824106067054565 +661 87 training.batch_size 0.0 +661 87 training.label_smoothing 0.003366404431765019 +661 88 model.embedding_dim 2.0 +661 88 optimizer.lr 0.04722498389588031 +661 88 training.batch_size 1.0 +661 88 training.label_smoothing 0.05640421495440668 +661 89 model.embedding_dim 0.0 +661 89 optimizer.lr 0.0358765886768398 +661 89 training.batch_size 2.0 +661 89 training.label_smoothing 0.13271151286962046 +661 90 model.embedding_dim 2.0 +661 90 optimizer.lr 0.00824375808780176 +661 90 training.batch_size 0.0 +661 90 training.label_smoothing 0.21025364244467232 +661 91 model.embedding_dim 0.0 +661 91 optimizer.lr 0.0011401997077022524 +661 91 training.batch_size 1.0 +661 91 training.label_smoothing 0.737663799486786 +661 92 model.embedding_dim 1.0 +661 92 optimizer.lr 0.07665930170834205 +661 92 training.batch_size 2.0 +661 92 training.label_smoothing 0.014173801734078268 +661 93 model.embedding_dim 1.0 +661 93 optimizer.lr 0.09176776073490801 +661 93 training.batch_size 2.0 +661 93 training.label_smoothing 0.003425523977576105 +661 94 model.embedding_dim 2.0 +661 94 optimizer.lr 0.004413768101188598 +661 94 training.batch_size 1.0 +661 94 training.label_smoothing 0.9418141139968543 +661 95 model.embedding_dim 2.0 +661 95 optimizer.lr 0.06346914714626466 +661 95 training.batch_size 0.0 +661 95 training.label_smoothing 0.7402334473988894 +661 96 model.embedding_dim 1.0 +661 96 optimizer.lr 0.0027627386070496187 +661 96 training.batch_size 2.0 +661 96 training.label_smoothing 0.05340862547876985 +661 97 model.embedding_dim 0.0 +661 97 optimizer.lr 0.08054790200806855 +661 97 training.batch_size 1.0 +661 97 training.label_smoothing 0.42441044746100254 +661 98 model.embedding_dim 1.0 +661 98 optimizer.lr 0.0011829547819072448 +661 98 training.batch_size 0.0 +661 98 training.label_smoothing 0.06066873827318412 +661 99 model.embedding_dim 1.0 +661 99 optimizer.lr 0.05032232698564598 +661 99 training.batch_size 0.0 +661 99 training.label_smoothing 0.0386801413204992 +661 100 model.embedding_dim 2.0 +661 100 optimizer.lr 0.04958871255363692 +661 100 training.batch_size 0.0 +661 100 training.label_smoothing 0.08395515205193056 +661 1 dataset """kinships""" +661 1 model """simple""" +661 1 loss """bceaftersigmoid""" +661 1 regularizer """no""" +661 1 optimizer """adam""" +661 1 training_loop """lcwa""" +661 1 evaluator """rankbased""" +661 2 dataset """kinships""" +661 2 model """simple""" +661 2 loss """bceaftersigmoid""" +661 2 regularizer """no""" +661 2 optimizer """adam""" +661 2 training_loop """lcwa""" +661 2 evaluator """rankbased""" +661 3 dataset """kinships""" +661 3 model """simple""" +661 3 loss """bceaftersigmoid""" +661 3 regularizer """no""" +661 3 optimizer """adam""" +661 3 training_loop """lcwa""" +661 3 evaluator """rankbased""" +661 4 dataset """kinships""" +661 4 model """simple""" +661 4 loss """bceaftersigmoid""" +661 4 regularizer """no""" +661 4 optimizer """adam""" +661 4 training_loop """lcwa""" +661 4 evaluator """rankbased""" +661 5 dataset """kinships""" +661 5 model """simple""" +661 5 loss """bceaftersigmoid""" +661 5 regularizer """no""" +661 5 optimizer """adam""" +661 5 training_loop """lcwa""" +661 5 evaluator """rankbased""" +661 6 dataset """kinships""" +661 6 model """simple""" +661 6 loss """bceaftersigmoid""" +661 6 regularizer """no""" +661 6 optimizer """adam""" +661 6 training_loop """lcwa""" +661 6 evaluator """rankbased""" +661 7 dataset """kinships""" +661 7 model """simple""" +661 7 loss """bceaftersigmoid""" +661 7 regularizer """no""" +661 7 optimizer """adam""" +661 7 training_loop """lcwa""" +661 7 evaluator """rankbased""" +661 8 dataset """kinships""" +661 8 model """simple""" +661 8 loss """bceaftersigmoid""" +661 8 regularizer """no""" +661 8 optimizer """adam""" +661 8 training_loop """lcwa""" +661 8 evaluator """rankbased""" +661 9 dataset """kinships""" +661 9 model """simple""" +661 9 loss """bceaftersigmoid""" +661 9 regularizer """no""" +661 9 optimizer """adam""" +661 9 training_loop """lcwa""" +661 9 evaluator """rankbased""" +661 10 dataset """kinships""" +661 10 model """simple""" +661 10 loss """bceaftersigmoid""" +661 10 regularizer """no""" +661 10 optimizer """adam""" +661 10 training_loop """lcwa""" +661 10 evaluator """rankbased""" +661 11 dataset """kinships""" +661 11 model """simple""" +661 11 loss """bceaftersigmoid""" +661 11 regularizer """no""" +661 11 optimizer """adam""" +661 11 training_loop """lcwa""" +661 11 evaluator """rankbased""" +661 12 dataset """kinships""" +661 12 model """simple""" +661 12 loss """bceaftersigmoid""" +661 12 regularizer """no""" +661 12 optimizer """adam""" +661 12 training_loop """lcwa""" +661 12 evaluator """rankbased""" +661 13 dataset """kinships""" +661 13 model """simple""" +661 13 loss """bceaftersigmoid""" +661 13 regularizer """no""" +661 13 optimizer """adam""" +661 13 training_loop """lcwa""" +661 13 evaluator """rankbased""" +661 14 dataset """kinships""" +661 14 model """simple""" +661 14 loss """bceaftersigmoid""" +661 14 regularizer """no""" +661 14 optimizer """adam""" +661 14 training_loop """lcwa""" +661 14 evaluator """rankbased""" +661 15 dataset """kinships""" +661 15 model """simple""" +661 15 loss """bceaftersigmoid""" +661 15 regularizer """no""" +661 15 optimizer """adam""" +661 15 training_loop """lcwa""" +661 15 evaluator """rankbased""" +661 16 dataset """kinships""" +661 16 model """simple""" +661 16 loss """bceaftersigmoid""" +661 16 regularizer """no""" +661 16 optimizer """adam""" +661 16 training_loop """lcwa""" +661 16 evaluator """rankbased""" +661 17 dataset """kinships""" +661 17 model """simple""" +661 17 loss """bceaftersigmoid""" +661 17 regularizer """no""" +661 17 optimizer """adam""" +661 17 training_loop """lcwa""" +661 17 evaluator """rankbased""" +661 18 dataset """kinships""" +661 18 model """simple""" +661 18 loss """bceaftersigmoid""" +661 18 regularizer """no""" +661 18 optimizer """adam""" +661 18 training_loop """lcwa""" +661 18 evaluator """rankbased""" +661 19 dataset """kinships""" +661 19 model """simple""" +661 19 loss """bceaftersigmoid""" +661 19 regularizer """no""" +661 19 optimizer """adam""" +661 19 training_loop """lcwa""" +661 19 evaluator """rankbased""" +661 20 dataset """kinships""" +661 20 model """simple""" +661 20 loss """bceaftersigmoid""" +661 20 regularizer """no""" +661 20 optimizer """adam""" +661 20 training_loop """lcwa""" +661 20 evaluator """rankbased""" +661 21 dataset """kinships""" +661 21 model """simple""" +661 21 loss """bceaftersigmoid""" +661 21 regularizer """no""" +661 21 optimizer """adam""" +661 21 training_loop """lcwa""" +661 21 evaluator """rankbased""" +661 22 dataset """kinships""" +661 22 model """simple""" +661 22 loss """bceaftersigmoid""" +661 22 regularizer """no""" +661 22 optimizer """adam""" +661 22 training_loop """lcwa""" +661 22 evaluator """rankbased""" +661 23 dataset """kinships""" +661 23 model """simple""" +661 23 loss """bceaftersigmoid""" +661 23 regularizer """no""" +661 23 optimizer """adam""" +661 23 training_loop """lcwa""" +661 23 evaluator """rankbased""" +661 24 dataset """kinships""" +661 24 model """simple""" +661 24 loss """bceaftersigmoid""" +661 24 regularizer """no""" +661 24 optimizer """adam""" +661 24 training_loop """lcwa""" +661 24 evaluator """rankbased""" +661 25 dataset """kinships""" +661 25 model """simple""" +661 25 loss """bceaftersigmoid""" +661 25 regularizer """no""" +661 25 optimizer """adam""" +661 25 training_loop """lcwa""" +661 25 evaluator """rankbased""" +661 26 dataset """kinships""" +661 26 model """simple""" +661 26 loss """bceaftersigmoid""" +661 26 regularizer """no""" +661 26 optimizer """adam""" +661 26 training_loop """lcwa""" +661 26 evaluator """rankbased""" +661 27 dataset """kinships""" +661 27 model """simple""" +661 27 loss """bceaftersigmoid""" +661 27 regularizer """no""" +661 27 optimizer """adam""" +661 27 training_loop """lcwa""" +661 27 evaluator """rankbased""" +661 28 dataset """kinships""" +661 28 model """simple""" +661 28 loss """bceaftersigmoid""" +661 28 regularizer """no""" +661 28 optimizer """adam""" +661 28 training_loop """lcwa""" +661 28 evaluator """rankbased""" +661 29 dataset """kinships""" +661 29 model """simple""" +661 29 loss """bceaftersigmoid""" +661 29 regularizer """no""" +661 29 optimizer """adam""" +661 29 training_loop """lcwa""" +661 29 evaluator """rankbased""" +661 30 dataset """kinships""" +661 30 model """simple""" +661 30 loss """bceaftersigmoid""" +661 30 regularizer """no""" +661 30 optimizer """adam""" +661 30 training_loop """lcwa""" +661 30 evaluator """rankbased""" +661 31 dataset """kinships""" +661 31 model """simple""" +661 31 loss """bceaftersigmoid""" +661 31 regularizer """no""" +661 31 optimizer """adam""" +661 31 training_loop """lcwa""" +661 31 evaluator """rankbased""" +661 32 dataset """kinships""" +661 32 model """simple""" +661 32 loss """bceaftersigmoid""" +661 32 regularizer """no""" +661 32 optimizer """adam""" +661 32 training_loop """lcwa""" +661 32 evaluator """rankbased""" +661 33 dataset """kinships""" +661 33 model """simple""" +661 33 loss """bceaftersigmoid""" +661 33 regularizer """no""" +661 33 optimizer """adam""" +661 33 training_loop """lcwa""" +661 33 evaluator """rankbased""" +661 34 dataset """kinships""" +661 34 model """simple""" +661 34 loss """bceaftersigmoid""" +661 34 regularizer """no""" +661 34 optimizer """adam""" +661 34 training_loop """lcwa""" +661 34 evaluator """rankbased""" +661 35 dataset """kinships""" +661 35 model """simple""" +661 35 loss """bceaftersigmoid""" +661 35 regularizer """no""" +661 35 optimizer """adam""" +661 35 training_loop """lcwa""" +661 35 evaluator """rankbased""" +661 36 dataset """kinships""" +661 36 model """simple""" +661 36 loss """bceaftersigmoid""" +661 36 regularizer """no""" +661 36 optimizer """adam""" +661 36 training_loop """lcwa""" +661 36 evaluator """rankbased""" +661 37 dataset """kinships""" +661 37 model """simple""" +661 37 loss """bceaftersigmoid""" +661 37 regularizer """no""" +661 37 optimizer """adam""" +661 37 training_loop """lcwa""" +661 37 evaluator """rankbased""" +661 38 dataset """kinships""" +661 38 model """simple""" +661 38 loss """bceaftersigmoid""" +661 38 regularizer """no""" +661 38 optimizer """adam""" +661 38 training_loop """lcwa""" +661 38 evaluator """rankbased""" +661 39 dataset """kinships""" +661 39 model """simple""" +661 39 loss """bceaftersigmoid""" +661 39 regularizer """no""" +661 39 optimizer """adam""" +661 39 training_loop """lcwa""" +661 39 evaluator """rankbased""" +661 40 dataset """kinships""" +661 40 model """simple""" +661 40 loss """bceaftersigmoid""" +661 40 regularizer """no""" +661 40 optimizer """adam""" +661 40 training_loop """lcwa""" +661 40 evaluator """rankbased""" +661 41 dataset """kinships""" +661 41 model """simple""" +661 41 loss """bceaftersigmoid""" +661 41 regularizer """no""" +661 41 optimizer """adam""" +661 41 training_loop """lcwa""" +661 41 evaluator """rankbased""" +661 42 dataset """kinships""" +661 42 model """simple""" +661 42 loss """bceaftersigmoid""" +661 42 regularizer """no""" +661 42 optimizer """adam""" +661 42 training_loop """lcwa""" +661 42 evaluator """rankbased""" +661 43 dataset """kinships""" +661 43 model """simple""" +661 43 loss """bceaftersigmoid""" +661 43 regularizer """no""" +661 43 optimizer """adam""" +661 43 training_loop """lcwa""" +661 43 evaluator """rankbased""" +661 44 dataset """kinships""" +661 44 model """simple""" +661 44 loss """bceaftersigmoid""" +661 44 regularizer """no""" +661 44 optimizer """adam""" +661 44 training_loop """lcwa""" +661 44 evaluator """rankbased""" +661 45 dataset """kinships""" +661 45 model """simple""" +661 45 loss """bceaftersigmoid""" +661 45 regularizer """no""" +661 45 optimizer """adam""" +661 45 training_loop """lcwa""" +661 45 evaluator """rankbased""" +661 46 dataset """kinships""" +661 46 model """simple""" +661 46 loss """bceaftersigmoid""" +661 46 regularizer """no""" +661 46 optimizer """adam""" +661 46 training_loop """lcwa""" +661 46 evaluator """rankbased""" +661 47 dataset """kinships""" +661 47 model """simple""" +661 47 loss """bceaftersigmoid""" +661 47 regularizer """no""" +661 47 optimizer """adam""" +661 47 training_loop """lcwa""" +661 47 evaluator """rankbased""" +661 48 dataset """kinships""" +661 48 model """simple""" +661 48 loss """bceaftersigmoid""" +661 48 regularizer """no""" +661 48 optimizer """adam""" +661 48 training_loop """lcwa""" +661 48 evaluator """rankbased""" +661 49 dataset """kinships""" +661 49 model """simple""" +661 49 loss """bceaftersigmoid""" +661 49 regularizer """no""" +661 49 optimizer """adam""" +661 49 training_loop """lcwa""" +661 49 evaluator """rankbased""" +661 50 dataset """kinships""" +661 50 model """simple""" +661 50 loss """bceaftersigmoid""" +661 50 regularizer """no""" +661 50 optimizer """adam""" +661 50 training_loop """lcwa""" +661 50 evaluator """rankbased""" +661 51 dataset """kinships""" +661 51 model """simple""" +661 51 loss """bceaftersigmoid""" +661 51 regularizer """no""" +661 51 optimizer """adam""" +661 51 training_loop """lcwa""" +661 51 evaluator """rankbased""" +661 52 dataset """kinships""" +661 52 model """simple""" +661 52 loss """bceaftersigmoid""" +661 52 regularizer """no""" +661 52 optimizer """adam""" +661 52 training_loop """lcwa""" +661 52 evaluator """rankbased""" +661 53 dataset """kinships""" +661 53 model """simple""" +661 53 loss """bceaftersigmoid""" +661 53 regularizer """no""" +661 53 optimizer """adam""" +661 53 training_loop """lcwa""" +661 53 evaluator """rankbased""" +661 54 dataset """kinships""" +661 54 model """simple""" +661 54 loss """bceaftersigmoid""" +661 54 regularizer """no""" +661 54 optimizer """adam""" +661 54 training_loop """lcwa""" +661 54 evaluator """rankbased""" +661 55 dataset """kinships""" +661 55 model """simple""" +661 55 loss """bceaftersigmoid""" +661 55 regularizer """no""" +661 55 optimizer """adam""" +661 55 training_loop """lcwa""" +661 55 evaluator """rankbased""" +661 56 dataset """kinships""" +661 56 model """simple""" +661 56 loss """bceaftersigmoid""" +661 56 regularizer """no""" +661 56 optimizer """adam""" +661 56 training_loop """lcwa""" +661 56 evaluator """rankbased""" +661 57 dataset """kinships""" +661 57 model """simple""" +661 57 loss """bceaftersigmoid""" +661 57 regularizer """no""" +661 57 optimizer """adam""" +661 57 training_loop """lcwa""" +661 57 evaluator """rankbased""" +661 58 dataset """kinships""" +661 58 model """simple""" +661 58 loss """bceaftersigmoid""" +661 58 regularizer """no""" +661 58 optimizer """adam""" +661 58 training_loop """lcwa""" +661 58 evaluator """rankbased""" +661 59 dataset """kinships""" +661 59 model """simple""" +661 59 loss """bceaftersigmoid""" +661 59 regularizer """no""" +661 59 optimizer """adam""" +661 59 training_loop """lcwa""" +661 59 evaluator """rankbased""" +661 60 dataset """kinships""" +661 60 model """simple""" +661 60 loss """bceaftersigmoid""" +661 60 regularizer """no""" +661 60 optimizer """adam""" +661 60 training_loop """lcwa""" +661 60 evaluator """rankbased""" +661 61 dataset """kinships""" +661 61 model """simple""" +661 61 loss """bceaftersigmoid""" +661 61 regularizer """no""" +661 61 optimizer """adam""" +661 61 training_loop """lcwa""" +661 61 evaluator """rankbased""" +661 62 dataset """kinships""" +661 62 model """simple""" +661 62 loss """bceaftersigmoid""" +661 62 regularizer """no""" +661 62 optimizer """adam""" +661 62 training_loop """lcwa""" +661 62 evaluator """rankbased""" +661 63 dataset """kinships""" +661 63 model """simple""" +661 63 loss """bceaftersigmoid""" +661 63 regularizer """no""" +661 63 optimizer """adam""" +661 63 training_loop """lcwa""" +661 63 evaluator """rankbased""" +661 64 dataset """kinships""" +661 64 model """simple""" +661 64 loss """bceaftersigmoid""" +661 64 regularizer """no""" +661 64 optimizer """adam""" +661 64 training_loop """lcwa""" +661 64 evaluator """rankbased""" +661 65 dataset """kinships""" +661 65 model """simple""" +661 65 loss """bceaftersigmoid""" +661 65 regularizer """no""" +661 65 optimizer """adam""" +661 65 training_loop """lcwa""" +661 65 evaluator """rankbased""" +661 66 dataset """kinships""" +661 66 model """simple""" +661 66 loss """bceaftersigmoid""" +661 66 regularizer """no""" +661 66 optimizer """adam""" +661 66 training_loop """lcwa""" +661 66 evaluator """rankbased""" +661 67 dataset """kinships""" +661 67 model """simple""" +661 67 loss """bceaftersigmoid""" +661 67 regularizer """no""" +661 67 optimizer """adam""" +661 67 training_loop """lcwa""" +661 67 evaluator """rankbased""" +661 68 dataset """kinships""" +661 68 model """simple""" +661 68 loss """bceaftersigmoid""" +661 68 regularizer """no""" +661 68 optimizer """adam""" +661 68 training_loop """lcwa""" +661 68 evaluator """rankbased""" +661 69 dataset """kinships""" +661 69 model """simple""" +661 69 loss """bceaftersigmoid""" +661 69 regularizer """no""" +661 69 optimizer """adam""" +661 69 training_loop """lcwa""" +661 69 evaluator """rankbased""" +661 70 dataset """kinships""" +661 70 model """simple""" +661 70 loss """bceaftersigmoid""" +661 70 regularizer """no""" +661 70 optimizer """adam""" +661 70 training_loop """lcwa""" +661 70 evaluator """rankbased""" +661 71 dataset """kinships""" +661 71 model """simple""" +661 71 loss """bceaftersigmoid""" +661 71 regularizer """no""" +661 71 optimizer """adam""" +661 71 training_loop """lcwa""" +661 71 evaluator """rankbased""" +661 72 dataset """kinships""" +661 72 model """simple""" +661 72 loss """bceaftersigmoid""" +661 72 regularizer """no""" +661 72 optimizer """adam""" +661 72 training_loop """lcwa""" +661 72 evaluator """rankbased""" +661 73 dataset """kinships""" +661 73 model """simple""" +661 73 loss """bceaftersigmoid""" +661 73 regularizer """no""" +661 73 optimizer """adam""" +661 73 training_loop """lcwa""" +661 73 evaluator """rankbased""" +661 74 dataset """kinships""" +661 74 model """simple""" +661 74 loss """bceaftersigmoid""" +661 74 regularizer """no""" +661 74 optimizer """adam""" +661 74 training_loop """lcwa""" +661 74 evaluator """rankbased""" +661 75 dataset """kinships""" +661 75 model """simple""" +661 75 loss """bceaftersigmoid""" +661 75 regularizer """no""" +661 75 optimizer """adam""" +661 75 training_loop """lcwa""" +661 75 evaluator """rankbased""" +661 76 dataset """kinships""" +661 76 model """simple""" +661 76 loss """bceaftersigmoid""" +661 76 regularizer """no""" +661 76 optimizer """adam""" +661 76 training_loop """lcwa""" +661 76 evaluator """rankbased""" +661 77 dataset """kinships""" +661 77 model """simple""" +661 77 loss """bceaftersigmoid""" +661 77 regularizer """no""" +661 77 optimizer """adam""" +661 77 training_loop """lcwa""" +661 77 evaluator """rankbased""" +661 78 dataset """kinships""" +661 78 model """simple""" +661 78 loss """bceaftersigmoid""" +661 78 regularizer """no""" +661 78 optimizer """adam""" +661 78 training_loop """lcwa""" +661 78 evaluator """rankbased""" +661 79 dataset """kinships""" +661 79 model """simple""" +661 79 loss """bceaftersigmoid""" +661 79 regularizer """no""" +661 79 optimizer """adam""" +661 79 training_loop """lcwa""" +661 79 evaluator """rankbased""" +661 80 dataset """kinships""" +661 80 model """simple""" +661 80 loss """bceaftersigmoid""" +661 80 regularizer """no""" +661 80 optimizer """adam""" +661 80 training_loop """lcwa""" +661 80 evaluator """rankbased""" +661 81 dataset """kinships""" +661 81 model """simple""" +661 81 loss """bceaftersigmoid""" +661 81 regularizer """no""" +661 81 optimizer """adam""" +661 81 training_loop """lcwa""" +661 81 evaluator """rankbased""" +661 82 dataset """kinships""" +661 82 model """simple""" +661 82 loss """bceaftersigmoid""" +661 82 regularizer """no""" +661 82 optimizer """adam""" +661 82 training_loop """lcwa""" +661 82 evaluator """rankbased""" +661 83 dataset """kinships""" +661 83 model """simple""" +661 83 loss """bceaftersigmoid""" +661 83 regularizer """no""" +661 83 optimizer """adam""" +661 83 training_loop """lcwa""" +661 83 evaluator """rankbased""" +661 84 dataset """kinships""" +661 84 model """simple""" +661 84 loss """bceaftersigmoid""" +661 84 regularizer """no""" +661 84 optimizer """adam""" +661 84 training_loop """lcwa""" +661 84 evaluator """rankbased""" +661 85 dataset """kinships""" +661 85 model """simple""" +661 85 loss """bceaftersigmoid""" +661 85 regularizer """no""" +661 85 optimizer """adam""" +661 85 training_loop """lcwa""" +661 85 evaluator """rankbased""" +661 86 dataset """kinships""" +661 86 model """simple""" +661 86 loss """bceaftersigmoid""" +661 86 regularizer """no""" +661 86 optimizer """adam""" +661 86 training_loop """lcwa""" +661 86 evaluator """rankbased""" +661 87 dataset """kinships""" +661 87 model """simple""" +661 87 loss """bceaftersigmoid""" +661 87 regularizer """no""" +661 87 optimizer """adam""" +661 87 training_loop """lcwa""" +661 87 evaluator """rankbased""" +661 88 dataset """kinships""" +661 88 model """simple""" +661 88 loss """bceaftersigmoid""" +661 88 regularizer """no""" +661 88 optimizer """adam""" +661 88 training_loop """lcwa""" +661 88 evaluator """rankbased""" +661 89 dataset """kinships""" +661 89 model """simple""" +661 89 loss """bceaftersigmoid""" +661 89 regularizer """no""" +661 89 optimizer """adam""" +661 89 training_loop """lcwa""" +661 89 evaluator """rankbased""" +661 90 dataset """kinships""" +661 90 model """simple""" +661 90 loss """bceaftersigmoid""" +661 90 regularizer """no""" +661 90 optimizer """adam""" +661 90 training_loop """lcwa""" +661 90 evaluator """rankbased""" +661 91 dataset """kinships""" +661 91 model """simple""" +661 91 loss """bceaftersigmoid""" +661 91 regularizer """no""" +661 91 optimizer """adam""" +661 91 training_loop """lcwa""" +661 91 evaluator """rankbased""" +661 92 dataset """kinships""" +661 92 model """simple""" +661 92 loss """bceaftersigmoid""" +661 92 regularizer """no""" +661 92 optimizer """adam""" +661 92 training_loop """lcwa""" +661 92 evaluator """rankbased""" +661 93 dataset """kinships""" +661 93 model """simple""" +661 93 loss """bceaftersigmoid""" +661 93 regularizer """no""" +661 93 optimizer """adam""" +661 93 training_loop """lcwa""" +661 93 evaluator """rankbased""" +661 94 dataset """kinships""" +661 94 model """simple""" +661 94 loss """bceaftersigmoid""" +661 94 regularizer """no""" +661 94 optimizer """adam""" +661 94 training_loop """lcwa""" +661 94 evaluator """rankbased""" +661 95 dataset """kinships""" +661 95 model """simple""" +661 95 loss """bceaftersigmoid""" +661 95 regularizer """no""" +661 95 optimizer """adam""" +661 95 training_loop """lcwa""" +661 95 evaluator """rankbased""" +661 96 dataset """kinships""" +661 96 model """simple""" +661 96 loss """bceaftersigmoid""" +661 96 regularizer """no""" +661 96 optimizer """adam""" +661 96 training_loop """lcwa""" +661 96 evaluator """rankbased""" +661 97 dataset """kinships""" +661 97 model """simple""" +661 97 loss """bceaftersigmoid""" +661 97 regularizer """no""" +661 97 optimizer """adam""" +661 97 training_loop """lcwa""" +661 97 evaluator """rankbased""" +661 98 dataset """kinships""" +661 98 model """simple""" +661 98 loss """bceaftersigmoid""" +661 98 regularizer """no""" +661 98 optimizer """adam""" +661 98 training_loop """lcwa""" +661 98 evaluator """rankbased""" +661 99 dataset """kinships""" +661 99 model """simple""" +661 99 loss """bceaftersigmoid""" +661 99 regularizer """no""" +661 99 optimizer """adam""" +661 99 training_loop """lcwa""" +661 99 evaluator """rankbased""" +661 100 dataset """kinships""" +661 100 model """simple""" +661 100 loss """bceaftersigmoid""" +661 100 regularizer """no""" +661 100 optimizer """adam""" +661 100 training_loop """lcwa""" +661 100 evaluator """rankbased""" +662 1 model.embedding_dim 2.0 +662 1 optimizer.lr 0.010760199850754342 +662 1 training.batch_size 0.0 +662 1 training.label_smoothing 0.20395647835022485 +662 2 model.embedding_dim 2.0 +662 2 optimizer.lr 0.009329194475979925 +662 2 training.batch_size 0.0 +662 2 training.label_smoothing 0.23052995801876133 +662 3 model.embedding_dim 0.0 +662 3 optimizer.lr 0.044964167232048456 +662 3 training.batch_size 1.0 +662 3 training.label_smoothing 0.5192242130290711 +662 4 model.embedding_dim 1.0 +662 4 optimizer.lr 0.006435507374601824 +662 4 training.batch_size 1.0 +662 4 training.label_smoothing 0.011200290306528821 +662 5 model.embedding_dim 0.0 +662 5 optimizer.lr 0.02194185162167057 +662 5 training.batch_size 2.0 +662 5 training.label_smoothing 0.0012237478109681224 +662 6 model.embedding_dim 0.0 +662 6 optimizer.lr 0.021063913575358105 +662 6 training.batch_size 0.0 +662 6 training.label_smoothing 0.15289856104072602 +662 7 model.embedding_dim 2.0 +662 7 optimizer.lr 0.0025382167314774748 +662 7 training.batch_size 0.0 +662 7 training.label_smoothing 0.0014170290617843584 +662 8 model.embedding_dim 1.0 +662 8 optimizer.lr 0.007082303460264558 +662 8 training.batch_size 2.0 +662 8 training.label_smoothing 0.14117213037232043 +662 9 model.embedding_dim 2.0 +662 9 optimizer.lr 0.006606180476489198 +662 9 training.batch_size 0.0 +662 9 training.label_smoothing 0.08034714489913732 +662 10 model.embedding_dim 2.0 +662 10 optimizer.lr 0.024765595149379645 +662 10 training.batch_size 2.0 +662 10 training.label_smoothing 0.5395373791830319 +662 11 model.embedding_dim 0.0 +662 11 optimizer.lr 0.09101879730392869 +662 11 training.batch_size 1.0 +662 11 training.label_smoothing 0.009219619277205519 +662 12 model.embedding_dim 2.0 +662 12 optimizer.lr 0.003808844838125828 +662 12 training.batch_size 0.0 +662 12 training.label_smoothing 0.0017161791536357922 +662 13 model.embedding_dim 1.0 +662 13 optimizer.lr 0.013509999542597568 +662 13 training.batch_size 2.0 +662 13 training.label_smoothing 0.03444698959222732 +662 14 model.embedding_dim 0.0 +662 14 optimizer.lr 0.058492193771618885 +662 14 training.batch_size 0.0 +662 14 training.label_smoothing 0.0019194118102468618 +662 15 model.embedding_dim 0.0 +662 15 optimizer.lr 0.02409870677358168 +662 15 training.batch_size 2.0 +662 15 training.label_smoothing 0.0010122351700704412 +662 16 model.embedding_dim 2.0 +662 16 optimizer.lr 0.0010385831630374193 +662 16 training.batch_size 0.0 +662 16 training.label_smoothing 0.029273639134905438 +662 17 model.embedding_dim 2.0 +662 17 optimizer.lr 0.0030369808956114456 +662 17 training.batch_size 1.0 +662 17 training.label_smoothing 0.14803147904376443 +662 18 model.embedding_dim 1.0 +662 18 optimizer.lr 0.010616672509161427 +662 18 training.batch_size 1.0 +662 18 training.label_smoothing 0.11222591725719808 +662 19 model.embedding_dim 0.0 +662 19 optimizer.lr 0.049222465361178046 +662 19 training.batch_size 2.0 +662 19 training.label_smoothing 0.7839337595492771 +662 20 model.embedding_dim 1.0 +662 20 optimizer.lr 0.005357733414190799 +662 20 training.batch_size 1.0 +662 20 training.label_smoothing 0.22890842669608547 +662 21 model.embedding_dim 0.0 +662 21 optimizer.lr 0.01086884056524509 +662 21 training.batch_size 1.0 +662 21 training.label_smoothing 0.011783790307378451 +662 22 model.embedding_dim 1.0 +662 22 optimizer.lr 0.058270273242635603 +662 22 training.batch_size 2.0 +662 22 training.label_smoothing 0.009413639648274214 +662 23 model.embedding_dim 1.0 +662 23 optimizer.lr 0.004840067087880152 +662 23 training.batch_size 1.0 +662 23 training.label_smoothing 0.03220669066006941 +662 24 model.embedding_dim 0.0 +662 24 optimizer.lr 0.03325688594550552 +662 24 training.batch_size 1.0 +662 24 training.label_smoothing 0.031036924198696535 +662 25 model.embedding_dim 1.0 +662 25 optimizer.lr 0.010555371215857543 +662 25 training.batch_size 0.0 +662 25 training.label_smoothing 0.0016624073165072938 +662 26 model.embedding_dim 0.0 +662 26 optimizer.lr 0.037817680263872284 +662 26 training.batch_size 0.0 +662 26 training.label_smoothing 0.015501334430805546 +662 27 model.embedding_dim 1.0 +662 27 optimizer.lr 0.009044139874818792 +662 27 training.batch_size 0.0 +662 27 training.label_smoothing 0.4004138417638886 +662 28 model.embedding_dim 2.0 +662 28 optimizer.lr 0.00516879192223193 +662 28 training.batch_size 2.0 +662 28 training.label_smoothing 0.0037056838955055638 +662 29 model.embedding_dim 0.0 +662 29 optimizer.lr 0.0016452080469989102 +662 29 training.batch_size 2.0 +662 29 training.label_smoothing 0.014518257639658208 +662 30 model.embedding_dim 1.0 +662 30 optimizer.lr 0.0027993639559090624 +662 30 training.batch_size 0.0 +662 30 training.label_smoothing 0.009303929376313132 +662 31 model.embedding_dim 2.0 +662 31 optimizer.lr 0.02218497226480342 +662 31 training.batch_size 2.0 +662 31 training.label_smoothing 0.003661143582372154 +662 32 model.embedding_dim 0.0 +662 32 optimizer.lr 0.02036200071405344 +662 32 training.batch_size 2.0 +662 32 training.label_smoothing 0.0034878361741574743 +662 33 model.embedding_dim 2.0 +662 33 optimizer.lr 0.05714273444079822 +662 33 training.batch_size 0.0 +662 33 training.label_smoothing 0.020527426289158456 +662 34 model.embedding_dim 2.0 +662 34 optimizer.lr 0.019473640532279596 +662 34 training.batch_size 0.0 +662 34 training.label_smoothing 0.8848218928843223 +662 35 model.embedding_dim 1.0 +662 35 optimizer.lr 0.04817287197356125 +662 35 training.batch_size 1.0 +662 35 training.label_smoothing 0.5953873365423467 +662 36 model.embedding_dim 0.0 +662 36 optimizer.lr 0.00146841425681373 +662 36 training.batch_size 1.0 +662 36 training.label_smoothing 0.31128327287946894 +662 37 model.embedding_dim 2.0 +662 37 optimizer.lr 0.01661408415244946 +662 37 training.batch_size 2.0 +662 37 training.label_smoothing 0.0011736182461157642 +662 38 model.embedding_dim 0.0 +662 38 optimizer.lr 0.0030755978229688847 +662 38 training.batch_size 0.0 +662 38 training.label_smoothing 0.02410026937072315 +662 39 model.embedding_dim 1.0 +662 39 optimizer.lr 0.024090830022028137 +662 39 training.batch_size 1.0 +662 39 training.label_smoothing 0.0064340739119124616 +662 40 model.embedding_dim 1.0 +662 40 optimizer.lr 0.004864937768834861 +662 40 training.batch_size 0.0 +662 40 training.label_smoothing 0.014194061346420178 +662 41 model.embedding_dim 1.0 +662 41 optimizer.lr 0.06067818865924593 +662 41 training.batch_size 2.0 +662 41 training.label_smoothing 0.8978101678859856 +662 42 model.embedding_dim 1.0 +662 42 optimizer.lr 0.00266225233452444 +662 42 training.batch_size 2.0 +662 42 training.label_smoothing 0.4464181051314655 +662 43 model.embedding_dim 0.0 +662 43 optimizer.lr 0.00126846235725052 +662 43 training.batch_size 2.0 +662 43 training.label_smoothing 0.023654509682269363 +662 44 model.embedding_dim 1.0 +662 44 optimizer.lr 0.02029677824809982 +662 44 training.batch_size 0.0 +662 44 training.label_smoothing 0.04591854009824173 +662 45 model.embedding_dim 1.0 +662 45 optimizer.lr 0.001009035546938495 +662 45 training.batch_size 2.0 +662 45 training.label_smoothing 0.005071974150758511 +662 46 model.embedding_dim 1.0 +662 46 optimizer.lr 0.003299175577482165 +662 46 training.batch_size 2.0 +662 46 training.label_smoothing 0.0027935753425318744 +662 47 model.embedding_dim 0.0 +662 47 optimizer.lr 0.05779150751332216 +662 47 training.batch_size 0.0 +662 47 training.label_smoothing 0.006945410962482861 +662 48 model.embedding_dim 1.0 +662 48 optimizer.lr 0.019469846522715047 +662 48 training.batch_size 1.0 +662 48 training.label_smoothing 0.6146413105548703 +662 49 model.embedding_dim 0.0 +662 49 optimizer.lr 0.0019053410221928119 +662 49 training.batch_size 0.0 +662 49 training.label_smoothing 0.2777745295835002 +662 50 model.embedding_dim 1.0 +662 50 optimizer.lr 0.009479259006459004 +662 50 training.batch_size 0.0 +662 50 training.label_smoothing 0.23195928971528185 +662 51 model.embedding_dim 2.0 +662 51 optimizer.lr 0.016021459272453947 +662 51 training.batch_size 0.0 +662 51 training.label_smoothing 0.002595269076290442 +662 52 model.embedding_dim 2.0 +662 52 optimizer.lr 0.015367148556359234 +662 52 training.batch_size 2.0 +662 52 training.label_smoothing 0.0304205204311872 +662 53 model.embedding_dim 0.0 +662 53 optimizer.lr 0.04305730868583441 +662 53 training.batch_size 0.0 +662 53 training.label_smoothing 0.002749091734254293 +662 54 model.embedding_dim 1.0 +662 54 optimizer.lr 0.0013789672283542353 +662 54 training.batch_size 0.0 +662 54 training.label_smoothing 0.15696773296492944 +662 55 model.embedding_dim 1.0 +662 55 optimizer.lr 0.004755335397876227 +662 55 training.batch_size 2.0 +662 55 training.label_smoothing 0.1601400052691106 +662 56 model.embedding_dim 1.0 +662 56 optimizer.lr 0.011008248003034522 +662 56 training.batch_size 1.0 +662 56 training.label_smoothing 0.014812716580449118 +662 57 model.embedding_dim 1.0 +662 57 optimizer.lr 0.06193752767735919 +662 57 training.batch_size 1.0 +662 57 training.label_smoothing 0.2852415907591197 +662 58 model.embedding_dim 1.0 +662 58 optimizer.lr 0.03654067091684141 +662 58 training.batch_size 2.0 +662 58 training.label_smoothing 0.03506960755202514 +662 59 model.embedding_dim 1.0 +662 59 optimizer.lr 0.002073755550689454 +662 59 training.batch_size 0.0 +662 59 training.label_smoothing 0.0022199118552379783 +662 60 model.embedding_dim 0.0 +662 60 optimizer.lr 0.028520081213638204 +662 60 training.batch_size 1.0 +662 60 training.label_smoothing 0.22985355055316145 +662 61 model.embedding_dim 0.0 +662 61 optimizer.lr 0.024987797986212087 +662 61 training.batch_size 1.0 +662 61 training.label_smoothing 0.0064702194963442245 +662 62 model.embedding_dim 1.0 +662 62 optimizer.lr 0.009111015123251838 +662 62 training.batch_size 0.0 +662 62 training.label_smoothing 0.01480109588345247 +662 63 model.embedding_dim 0.0 +662 63 optimizer.lr 0.0074868936573694546 +662 63 training.batch_size 2.0 +662 63 training.label_smoothing 0.2199852728108743 +662 64 model.embedding_dim 0.0 +662 64 optimizer.lr 0.054889908845633904 +662 64 training.batch_size 1.0 +662 64 training.label_smoothing 0.022051485176969633 +662 65 model.embedding_dim 1.0 +662 65 optimizer.lr 0.037684064116403175 +662 65 training.batch_size 0.0 +662 65 training.label_smoothing 0.39066067084804446 +662 66 model.embedding_dim 0.0 +662 66 optimizer.lr 0.001406732018150876 +662 66 training.batch_size 1.0 +662 66 training.label_smoothing 0.042197744112042974 +662 67 model.embedding_dim 0.0 +662 67 optimizer.lr 0.019392666591116565 +662 67 training.batch_size 2.0 +662 67 training.label_smoothing 0.02824720970236531 +662 68 model.embedding_dim 2.0 +662 68 optimizer.lr 0.012755876639244688 +662 68 training.batch_size 2.0 +662 68 training.label_smoothing 0.05490093757565299 +662 69 model.embedding_dim 0.0 +662 69 optimizer.lr 0.0019633774067374585 +662 69 training.batch_size 1.0 +662 69 training.label_smoothing 0.4241430083914226 +662 70 model.embedding_dim 0.0 +662 70 optimizer.lr 0.016737192750455945 +662 70 training.batch_size 0.0 +662 70 training.label_smoothing 0.0032799443598612293 +662 71 model.embedding_dim 2.0 +662 71 optimizer.lr 0.010993080441029444 +662 71 training.batch_size 2.0 +662 71 training.label_smoothing 0.19576980761774246 +662 72 model.embedding_dim 0.0 +662 72 optimizer.lr 0.00562046822523603 +662 72 training.batch_size 0.0 +662 72 training.label_smoothing 0.016337165089532327 +662 73 model.embedding_dim 1.0 +662 73 optimizer.lr 0.001929634738383227 +662 73 training.batch_size 0.0 +662 73 training.label_smoothing 0.005142850755077112 +662 74 model.embedding_dim 1.0 +662 74 optimizer.lr 0.001605073984675644 +662 74 training.batch_size 1.0 +662 74 training.label_smoothing 0.0022804638484277893 +662 75 model.embedding_dim 2.0 +662 75 optimizer.lr 0.007916534934019386 +662 75 training.batch_size 0.0 +662 75 training.label_smoothing 0.014054827187024084 +662 76 model.embedding_dim 1.0 +662 76 optimizer.lr 0.009981979812581684 +662 76 training.batch_size 0.0 +662 76 training.label_smoothing 0.19016956283365427 +662 77 model.embedding_dim 0.0 +662 77 optimizer.lr 0.0010535997914947 +662 77 training.batch_size 1.0 +662 77 training.label_smoothing 0.007431389809293919 +662 78 model.embedding_dim 0.0 +662 78 optimizer.lr 0.05449083192477216 +662 78 training.batch_size 0.0 +662 78 training.label_smoothing 0.04959619098281043 +662 79 model.embedding_dim 2.0 +662 79 optimizer.lr 0.004677691789653174 +662 79 training.batch_size 2.0 +662 79 training.label_smoothing 0.010758548394486759 +662 80 model.embedding_dim 1.0 +662 80 optimizer.lr 0.03171396536830538 +662 80 training.batch_size 2.0 +662 80 training.label_smoothing 0.2735379247910339 +662 81 model.embedding_dim 1.0 +662 81 optimizer.lr 0.03269703072857651 +662 81 training.batch_size 0.0 +662 81 training.label_smoothing 0.0076567493613971666 +662 82 model.embedding_dim 2.0 +662 82 optimizer.lr 0.08308682945345015 +662 82 training.batch_size 1.0 +662 82 training.label_smoothing 0.4856596734987208 +662 83 model.embedding_dim 1.0 +662 83 optimizer.lr 0.04065720370222945 +662 83 training.batch_size 2.0 +662 83 training.label_smoothing 0.06494718420743449 +662 84 model.embedding_dim 2.0 +662 84 optimizer.lr 0.0748716504926183 +662 84 training.batch_size 0.0 +662 84 training.label_smoothing 0.02035778780805512 +662 85 model.embedding_dim 0.0 +662 85 optimizer.lr 0.012778265362494821 +662 85 training.batch_size 0.0 +662 85 training.label_smoothing 0.2314758533579176 +662 86 model.embedding_dim 0.0 +662 86 optimizer.lr 0.004760191176817038 +662 86 training.batch_size 0.0 +662 86 training.label_smoothing 0.045816353155663965 +662 87 model.embedding_dim 1.0 +662 87 optimizer.lr 0.0049866951334403215 +662 87 training.batch_size 1.0 +662 87 training.label_smoothing 0.10465152887581246 +662 88 model.embedding_dim 0.0 +662 88 optimizer.lr 0.02884460792615045 +662 88 training.batch_size 0.0 +662 88 training.label_smoothing 0.015173236428634268 +662 89 model.embedding_dim 1.0 +662 89 optimizer.lr 0.018451570863605646 +662 89 training.batch_size 1.0 +662 89 training.label_smoothing 0.02593582014807915 +662 90 model.embedding_dim 2.0 +662 90 optimizer.lr 0.02983875024003683 +662 90 training.batch_size 1.0 +662 90 training.label_smoothing 0.026067456185749585 +662 91 model.embedding_dim 0.0 +662 91 optimizer.lr 0.01629441633789723 +662 91 training.batch_size 2.0 +662 91 training.label_smoothing 0.3426895469076887 +662 92 model.embedding_dim 2.0 +662 92 optimizer.lr 0.02659478831385783 +662 92 training.batch_size 0.0 +662 92 training.label_smoothing 0.0011021122787875383 +662 93 model.embedding_dim 0.0 +662 93 optimizer.lr 0.0243982722918075 +662 93 training.batch_size 0.0 +662 93 training.label_smoothing 0.02968521567048651 +662 94 model.embedding_dim 0.0 +662 94 optimizer.lr 0.02301533668875662 +662 94 training.batch_size 2.0 +662 94 training.label_smoothing 0.13624443124314445 +662 95 model.embedding_dim 2.0 +662 95 optimizer.lr 0.009949499746642705 +662 95 training.batch_size 0.0 +662 95 training.label_smoothing 0.0010610169724156624 +662 96 model.embedding_dim 2.0 +662 96 optimizer.lr 0.001355074488524283 +662 96 training.batch_size 1.0 +662 96 training.label_smoothing 0.9852830879174134 +662 97 model.embedding_dim 2.0 +662 97 optimizer.lr 0.01789798222819446 +662 97 training.batch_size 0.0 +662 97 training.label_smoothing 0.009771179993503 +662 98 model.embedding_dim 1.0 +662 98 optimizer.lr 0.0010699866582897215 +662 98 training.batch_size 1.0 +662 98 training.label_smoothing 0.017114775409192394 +662 99 model.embedding_dim 2.0 +662 99 optimizer.lr 0.004177967990147244 +662 99 training.batch_size 0.0 +662 99 training.label_smoothing 0.0023352094435303197 +662 100 model.embedding_dim 0.0 +662 100 optimizer.lr 0.004654199817799693 +662 100 training.batch_size 2.0 +662 100 training.label_smoothing 0.9158003356130303 +662 1 dataset """kinships""" +662 1 model """simple""" +662 1 loss """softplus""" +662 1 regularizer """no""" +662 1 optimizer """adam""" +662 1 training_loop """lcwa""" +662 1 evaluator """rankbased""" +662 2 dataset """kinships""" +662 2 model """simple""" +662 2 loss """softplus""" +662 2 regularizer """no""" +662 2 optimizer """adam""" +662 2 training_loop """lcwa""" +662 2 evaluator """rankbased""" +662 3 dataset """kinships""" +662 3 model """simple""" +662 3 loss """softplus""" +662 3 regularizer """no""" +662 3 optimizer """adam""" +662 3 training_loop """lcwa""" +662 3 evaluator """rankbased""" +662 4 dataset """kinships""" +662 4 model """simple""" +662 4 loss """softplus""" +662 4 regularizer """no""" +662 4 optimizer """adam""" +662 4 training_loop """lcwa""" +662 4 evaluator """rankbased""" +662 5 dataset """kinships""" +662 5 model """simple""" +662 5 loss """softplus""" +662 5 regularizer """no""" +662 5 optimizer """adam""" +662 5 training_loop """lcwa""" +662 5 evaluator """rankbased""" +662 6 dataset """kinships""" +662 6 model """simple""" +662 6 loss """softplus""" +662 6 regularizer """no""" +662 6 optimizer """adam""" +662 6 training_loop """lcwa""" +662 6 evaluator """rankbased""" +662 7 dataset """kinships""" +662 7 model """simple""" +662 7 loss """softplus""" +662 7 regularizer """no""" +662 7 optimizer """adam""" +662 7 training_loop """lcwa""" +662 7 evaluator """rankbased""" +662 8 dataset """kinships""" +662 8 model """simple""" +662 8 loss """softplus""" +662 8 regularizer """no""" +662 8 optimizer """adam""" +662 8 training_loop """lcwa""" +662 8 evaluator """rankbased""" +662 9 dataset """kinships""" +662 9 model """simple""" +662 9 loss """softplus""" +662 9 regularizer """no""" +662 9 optimizer """adam""" +662 9 training_loop """lcwa""" +662 9 evaluator """rankbased""" +662 10 dataset """kinships""" +662 10 model """simple""" +662 10 loss """softplus""" +662 10 regularizer """no""" +662 10 optimizer """adam""" +662 10 training_loop """lcwa""" +662 10 evaluator """rankbased""" +662 11 dataset """kinships""" +662 11 model """simple""" +662 11 loss """softplus""" +662 11 regularizer """no""" +662 11 optimizer """adam""" +662 11 training_loop """lcwa""" +662 11 evaluator """rankbased""" +662 12 dataset """kinships""" +662 12 model """simple""" +662 12 loss """softplus""" +662 12 regularizer """no""" +662 12 optimizer """adam""" +662 12 training_loop """lcwa""" +662 12 evaluator """rankbased""" +662 13 dataset """kinships""" +662 13 model """simple""" +662 13 loss """softplus""" +662 13 regularizer """no""" +662 13 optimizer """adam""" +662 13 training_loop """lcwa""" +662 13 evaluator """rankbased""" +662 14 dataset """kinships""" +662 14 model """simple""" +662 14 loss """softplus""" +662 14 regularizer """no""" +662 14 optimizer """adam""" +662 14 training_loop """lcwa""" +662 14 evaluator """rankbased""" +662 15 dataset """kinships""" +662 15 model """simple""" +662 15 loss """softplus""" +662 15 regularizer """no""" +662 15 optimizer """adam""" +662 15 training_loop """lcwa""" +662 15 evaluator """rankbased""" +662 16 dataset """kinships""" +662 16 model """simple""" +662 16 loss """softplus""" +662 16 regularizer """no""" +662 16 optimizer """adam""" +662 16 training_loop """lcwa""" +662 16 evaluator """rankbased""" +662 17 dataset """kinships""" +662 17 model """simple""" +662 17 loss """softplus""" +662 17 regularizer """no""" +662 17 optimizer """adam""" +662 17 training_loop """lcwa""" +662 17 evaluator """rankbased""" +662 18 dataset """kinships""" +662 18 model """simple""" +662 18 loss """softplus""" +662 18 regularizer """no""" +662 18 optimizer """adam""" +662 18 training_loop """lcwa""" +662 18 evaluator """rankbased""" +662 19 dataset """kinships""" +662 19 model """simple""" +662 19 loss """softplus""" +662 19 regularizer """no""" +662 19 optimizer """adam""" +662 19 training_loop """lcwa""" +662 19 evaluator """rankbased""" +662 20 dataset """kinships""" +662 20 model """simple""" +662 20 loss """softplus""" +662 20 regularizer """no""" +662 20 optimizer """adam""" +662 20 training_loop """lcwa""" +662 20 evaluator """rankbased""" +662 21 dataset """kinships""" +662 21 model """simple""" +662 21 loss """softplus""" +662 21 regularizer """no""" +662 21 optimizer """adam""" +662 21 training_loop """lcwa""" +662 21 evaluator """rankbased""" +662 22 dataset """kinships""" +662 22 model """simple""" +662 22 loss """softplus""" +662 22 regularizer """no""" +662 22 optimizer """adam""" +662 22 training_loop """lcwa""" +662 22 evaluator """rankbased""" +662 23 dataset """kinships""" +662 23 model """simple""" +662 23 loss """softplus""" +662 23 regularizer """no""" +662 23 optimizer """adam""" +662 23 training_loop """lcwa""" +662 23 evaluator """rankbased""" +662 24 dataset """kinships""" +662 24 model """simple""" +662 24 loss """softplus""" +662 24 regularizer """no""" +662 24 optimizer """adam""" +662 24 training_loop """lcwa""" +662 24 evaluator """rankbased""" +662 25 dataset """kinships""" +662 25 model """simple""" +662 25 loss """softplus""" +662 25 regularizer """no""" +662 25 optimizer """adam""" +662 25 training_loop """lcwa""" +662 25 evaluator """rankbased""" +662 26 dataset """kinships""" +662 26 model """simple""" +662 26 loss """softplus""" +662 26 regularizer """no""" +662 26 optimizer """adam""" +662 26 training_loop """lcwa""" +662 26 evaluator """rankbased""" +662 27 dataset """kinships""" +662 27 model """simple""" +662 27 loss """softplus""" +662 27 regularizer """no""" +662 27 optimizer """adam""" +662 27 training_loop """lcwa""" +662 27 evaluator """rankbased""" +662 28 dataset """kinships""" +662 28 model """simple""" +662 28 loss """softplus""" +662 28 regularizer """no""" +662 28 optimizer """adam""" +662 28 training_loop """lcwa""" +662 28 evaluator """rankbased""" +662 29 dataset """kinships""" +662 29 model """simple""" +662 29 loss """softplus""" +662 29 regularizer """no""" +662 29 optimizer """adam""" +662 29 training_loop """lcwa""" +662 29 evaluator """rankbased""" +662 30 dataset """kinships""" +662 30 model """simple""" +662 30 loss """softplus""" +662 30 regularizer """no""" +662 30 optimizer """adam""" +662 30 training_loop """lcwa""" +662 30 evaluator """rankbased""" +662 31 dataset """kinships""" +662 31 model """simple""" +662 31 loss """softplus""" +662 31 regularizer """no""" +662 31 optimizer """adam""" +662 31 training_loop """lcwa""" +662 31 evaluator """rankbased""" +662 32 dataset """kinships""" +662 32 model """simple""" +662 32 loss """softplus""" +662 32 regularizer """no""" +662 32 optimizer """adam""" +662 32 training_loop """lcwa""" +662 32 evaluator """rankbased""" +662 33 dataset """kinships""" +662 33 model """simple""" +662 33 loss """softplus""" +662 33 regularizer """no""" +662 33 optimizer """adam""" +662 33 training_loop """lcwa""" +662 33 evaluator """rankbased""" +662 34 dataset """kinships""" +662 34 model """simple""" +662 34 loss """softplus""" +662 34 regularizer """no""" +662 34 optimizer """adam""" +662 34 training_loop """lcwa""" +662 34 evaluator """rankbased""" +662 35 dataset """kinships""" +662 35 model """simple""" +662 35 loss """softplus""" +662 35 regularizer """no""" +662 35 optimizer """adam""" +662 35 training_loop """lcwa""" +662 35 evaluator """rankbased""" +662 36 dataset """kinships""" +662 36 model """simple""" +662 36 loss """softplus""" +662 36 regularizer """no""" +662 36 optimizer """adam""" +662 36 training_loop """lcwa""" +662 36 evaluator """rankbased""" +662 37 dataset """kinships""" +662 37 model """simple""" +662 37 loss """softplus""" +662 37 regularizer """no""" +662 37 optimizer """adam""" +662 37 training_loop """lcwa""" +662 37 evaluator """rankbased""" +662 38 dataset """kinships""" +662 38 model """simple""" +662 38 loss """softplus""" +662 38 regularizer """no""" +662 38 optimizer """adam""" +662 38 training_loop """lcwa""" +662 38 evaluator """rankbased""" +662 39 dataset """kinships""" +662 39 model """simple""" +662 39 loss """softplus""" +662 39 regularizer """no""" +662 39 optimizer """adam""" +662 39 training_loop """lcwa""" +662 39 evaluator """rankbased""" +662 40 dataset """kinships""" +662 40 model """simple""" +662 40 loss """softplus""" +662 40 regularizer """no""" +662 40 optimizer """adam""" +662 40 training_loop """lcwa""" +662 40 evaluator """rankbased""" +662 41 dataset """kinships""" +662 41 model """simple""" +662 41 loss """softplus""" +662 41 regularizer """no""" +662 41 optimizer """adam""" +662 41 training_loop """lcwa""" +662 41 evaluator """rankbased""" +662 42 dataset """kinships""" +662 42 model """simple""" +662 42 loss """softplus""" +662 42 regularizer """no""" +662 42 optimizer """adam""" +662 42 training_loop """lcwa""" +662 42 evaluator """rankbased""" +662 43 dataset """kinships""" +662 43 model """simple""" +662 43 loss """softplus""" +662 43 regularizer """no""" +662 43 optimizer """adam""" +662 43 training_loop """lcwa""" +662 43 evaluator """rankbased""" +662 44 dataset """kinships""" +662 44 model """simple""" +662 44 loss """softplus""" +662 44 regularizer """no""" +662 44 optimizer """adam""" +662 44 training_loop """lcwa""" +662 44 evaluator """rankbased""" +662 45 dataset """kinships""" +662 45 model """simple""" +662 45 loss """softplus""" +662 45 regularizer """no""" +662 45 optimizer """adam""" +662 45 training_loop """lcwa""" +662 45 evaluator """rankbased""" +662 46 dataset """kinships""" +662 46 model """simple""" +662 46 loss """softplus""" +662 46 regularizer """no""" +662 46 optimizer """adam""" +662 46 training_loop """lcwa""" +662 46 evaluator """rankbased""" +662 47 dataset """kinships""" +662 47 model """simple""" +662 47 loss """softplus""" +662 47 regularizer """no""" +662 47 optimizer """adam""" +662 47 training_loop """lcwa""" +662 47 evaluator """rankbased""" +662 48 dataset """kinships""" +662 48 model """simple""" +662 48 loss """softplus""" +662 48 regularizer """no""" +662 48 optimizer """adam""" +662 48 training_loop """lcwa""" +662 48 evaluator """rankbased""" +662 49 dataset """kinships""" +662 49 model """simple""" +662 49 loss """softplus""" +662 49 regularizer """no""" +662 49 optimizer """adam""" +662 49 training_loop """lcwa""" +662 49 evaluator """rankbased""" +662 50 dataset """kinships""" +662 50 model """simple""" +662 50 loss """softplus""" +662 50 regularizer """no""" +662 50 optimizer """adam""" +662 50 training_loop """lcwa""" +662 50 evaluator """rankbased""" +662 51 dataset """kinships""" +662 51 model """simple""" +662 51 loss """softplus""" +662 51 regularizer """no""" +662 51 optimizer """adam""" +662 51 training_loop """lcwa""" +662 51 evaluator """rankbased""" +662 52 dataset """kinships""" +662 52 model """simple""" +662 52 loss """softplus""" +662 52 regularizer """no""" +662 52 optimizer """adam""" +662 52 training_loop """lcwa""" +662 52 evaluator """rankbased""" +662 53 dataset """kinships""" +662 53 model """simple""" +662 53 loss """softplus""" +662 53 regularizer """no""" +662 53 optimizer """adam""" +662 53 training_loop """lcwa""" +662 53 evaluator """rankbased""" +662 54 dataset """kinships""" +662 54 model """simple""" +662 54 loss """softplus""" +662 54 regularizer """no""" +662 54 optimizer """adam""" +662 54 training_loop """lcwa""" +662 54 evaluator """rankbased""" +662 55 dataset """kinships""" +662 55 model """simple""" +662 55 loss """softplus""" +662 55 regularizer """no""" +662 55 optimizer """adam""" +662 55 training_loop """lcwa""" +662 55 evaluator """rankbased""" +662 56 dataset """kinships""" +662 56 model """simple""" +662 56 loss """softplus""" +662 56 regularizer """no""" +662 56 optimizer """adam""" +662 56 training_loop """lcwa""" +662 56 evaluator """rankbased""" +662 57 dataset """kinships""" +662 57 model """simple""" +662 57 loss """softplus""" +662 57 regularizer """no""" +662 57 optimizer """adam""" +662 57 training_loop """lcwa""" +662 57 evaluator """rankbased""" +662 58 dataset """kinships""" +662 58 model """simple""" +662 58 loss """softplus""" +662 58 regularizer """no""" +662 58 optimizer """adam""" +662 58 training_loop """lcwa""" +662 58 evaluator """rankbased""" +662 59 dataset """kinships""" +662 59 model """simple""" +662 59 loss """softplus""" +662 59 regularizer """no""" +662 59 optimizer """adam""" +662 59 training_loop """lcwa""" +662 59 evaluator """rankbased""" +662 60 dataset """kinships""" +662 60 model """simple""" +662 60 loss """softplus""" +662 60 regularizer """no""" +662 60 optimizer """adam""" +662 60 training_loop """lcwa""" +662 60 evaluator """rankbased""" +662 61 dataset """kinships""" +662 61 model """simple""" +662 61 loss """softplus""" +662 61 regularizer """no""" +662 61 optimizer """adam""" +662 61 training_loop """lcwa""" +662 61 evaluator """rankbased""" +662 62 dataset """kinships""" +662 62 model """simple""" +662 62 loss """softplus""" +662 62 regularizer """no""" +662 62 optimizer """adam""" +662 62 training_loop """lcwa""" +662 62 evaluator """rankbased""" +662 63 dataset """kinships""" +662 63 model """simple""" +662 63 loss """softplus""" +662 63 regularizer """no""" +662 63 optimizer """adam""" +662 63 training_loop """lcwa""" +662 63 evaluator """rankbased""" +662 64 dataset """kinships""" +662 64 model """simple""" +662 64 loss """softplus""" +662 64 regularizer """no""" +662 64 optimizer """adam""" +662 64 training_loop """lcwa""" +662 64 evaluator """rankbased""" +662 65 dataset """kinships""" +662 65 model """simple""" +662 65 loss """softplus""" +662 65 regularizer """no""" +662 65 optimizer """adam""" +662 65 training_loop """lcwa""" +662 65 evaluator """rankbased""" +662 66 dataset """kinships""" +662 66 model """simple""" +662 66 loss """softplus""" +662 66 regularizer """no""" +662 66 optimizer """adam""" +662 66 training_loop """lcwa""" +662 66 evaluator """rankbased""" +662 67 dataset """kinships""" +662 67 model """simple""" +662 67 loss """softplus""" +662 67 regularizer """no""" +662 67 optimizer """adam""" +662 67 training_loop """lcwa""" +662 67 evaluator """rankbased""" +662 68 dataset """kinships""" +662 68 model """simple""" +662 68 loss """softplus""" +662 68 regularizer """no""" +662 68 optimizer """adam""" +662 68 training_loop """lcwa""" +662 68 evaluator """rankbased""" +662 69 dataset """kinships""" +662 69 model """simple""" +662 69 loss """softplus""" +662 69 regularizer """no""" +662 69 optimizer """adam""" +662 69 training_loop """lcwa""" +662 69 evaluator """rankbased""" +662 70 dataset """kinships""" +662 70 model """simple""" +662 70 loss """softplus""" +662 70 regularizer """no""" +662 70 optimizer """adam""" +662 70 training_loop """lcwa""" +662 70 evaluator """rankbased""" +662 71 dataset """kinships""" +662 71 model """simple""" +662 71 loss """softplus""" +662 71 regularizer """no""" +662 71 optimizer """adam""" +662 71 training_loop """lcwa""" +662 71 evaluator """rankbased""" +662 72 dataset """kinships""" +662 72 model """simple""" +662 72 loss """softplus""" +662 72 regularizer """no""" +662 72 optimizer """adam""" +662 72 training_loop """lcwa""" +662 72 evaluator """rankbased""" +662 73 dataset """kinships""" +662 73 model """simple""" +662 73 loss """softplus""" +662 73 regularizer """no""" +662 73 optimizer """adam""" +662 73 training_loop """lcwa""" +662 73 evaluator """rankbased""" +662 74 dataset """kinships""" +662 74 model """simple""" +662 74 loss """softplus""" +662 74 regularizer """no""" +662 74 optimizer """adam""" +662 74 training_loop """lcwa""" +662 74 evaluator """rankbased""" +662 75 dataset """kinships""" +662 75 model """simple""" +662 75 loss """softplus""" +662 75 regularizer """no""" +662 75 optimizer """adam""" +662 75 training_loop """lcwa""" +662 75 evaluator """rankbased""" +662 76 dataset """kinships""" +662 76 model """simple""" +662 76 loss """softplus""" +662 76 regularizer """no""" +662 76 optimizer """adam""" +662 76 training_loop """lcwa""" +662 76 evaluator """rankbased""" +662 77 dataset """kinships""" +662 77 model """simple""" +662 77 loss """softplus""" +662 77 regularizer """no""" +662 77 optimizer """adam""" +662 77 training_loop """lcwa""" +662 77 evaluator """rankbased""" +662 78 dataset """kinships""" +662 78 model """simple""" +662 78 loss """softplus""" +662 78 regularizer """no""" +662 78 optimizer """adam""" +662 78 training_loop """lcwa""" +662 78 evaluator """rankbased""" +662 79 dataset """kinships""" +662 79 model """simple""" +662 79 loss """softplus""" +662 79 regularizer """no""" +662 79 optimizer """adam""" +662 79 training_loop """lcwa""" +662 79 evaluator """rankbased""" +662 80 dataset """kinships""" +662 80 model """simple""" +662 80 loss """softplus""" +662 80 regularizer """no""" +662 80 optimizer """adam""" +662 80 training_loop """lcwa""" +662 80 evaluator """rankbased""" +662 81 dataset """kinships""" +662 81 model """simple""" +662 81 loss """softplus""" +662 81 regularizer """no""" +662 81 optimizer """adam""" +662 81 training_loop """lcwa""" +662 81 evaluator """rankbased""" +662 82 dataset """kinships""" +662 82 model """simple""" +662 82 loss """softplus""" +662 82 regularizer """no""" +662 82 optimizer """adam""" +662 82 training_loop """lcwa""" +662 82 evaluator """rankbased""" +662 83 dataset """kinships""" +662 83 model """simple""" +662 83 loss """softplus""" +662 83 regularizer """no""" +662 83 optimizer """adam""" +662 83 training_loop """lcwa""" +662 83 evaluator """rankbased""" +662 84 dataset """kinships""" +662 84 model """simple""" +662 84 loss """softplus""" +662 84 regularizer """no""" +662 84 optimizer """adam""" +662 84 training_loop """lcwa""" +662 84 evaluator """rankbased""" +662 85 dataset """kinships""" +662 85 model """simple""" +662 85 loss """softplus""" +662 85 regularizer """no""" +662 85 optimizer """adam""" +662 85 training_loop """lcwa""" +662 85 evaluator """rankbased""" +662 86 dataset """kinships""" +662 86 model """simple""" +662 86 loss """softplus""" +662 86 regularizer """no""" +662 86 optimizer """adam""" +662 86 training_loop """lcwa""" +662 86 evaluator """rankbased""" +662 87 dataset """kinships""" +662 87 model """simple""" +662 87 loss """softplus""" +662 87 regularizer """no""" +662 87 optimizer """adam""" +662 87 training_loop """lcwa""" +662 87 evaluator """rankbased""" +662 88 dataset """kinships""" +662 88 model """simple""" +662 88 loss """softplus""" +662 88 regularizer """no""" +662 88 optimizer """adam""" +662 88 training_loop """lcwa""" +662 88 evaluator """rankbased""" +662 89 dataset """kinships""" +662 89 model """simple""" +662 89 loss """softplus""" +662 89 regularizer """no""" +662 89 optimizer """adam""" +662 89 training_loop """lcwa""" +662 89 evaluator """rankbased""" +662 90 dataset """kinships""" +662 90 model """simple""" +662 90 loss """softplus""" +662 90 regularizer """no""" +662 90 optimizer """adam""" +662 90 training_loop """lcwa""" +662 90 evaluator """rankbased""" +662 91 dataset """kinships""" +662 91 model """simple""" +662 91 loss """softplus""" +662 91 regularizer """no""" +662 91 optimizer """adam""" +662 91 training_loop """lcwa""" +662 91 evaluator """rankbased""" +662 92 dataset """kinships""" +662 92 model """simple""" +662 92 loss """softplus""" +662 92 regularizer """no""" +662 92 optimizer """adam""" +662 92 training_loop """lcwa""" +662 92 evaluator """rankbased""" +662 93 dataset """kinships""" +662 93 model """simple""" +662 93 loss """softplus""" +662 93 regularizer """no""" +662 93 optimizer """adam""" +662 93 training_loop """lcwa""" +662 93 evaluator """rankbased""" +662 94 dataset """kinships""" +662 94 model """simple""" +662 94 loss """softplus""" +662 94 regularizer """no""" +662 94 optimizer """adam""" +662 94 training_loop """lcwa""" +662 94 evaluator """rankbased""" +662 95 dataset """kinships""" +662 95 model """simple""" +662 95 loss """softplus""" +662 95 regularizer """no""" +662 95 optimizer """adam""" +662 95 training_loop """lcwa""" +662 95 evaluator """rankbased""" +662 96 dataset """kinships""" +662 96 model """simple""" +662 96 loss """softplus""" +662 96 regularizer """no""" +662 96 optimizer """adam""" +662 96 training_loop """lcwa""" +662 96 evaluator """rankbased""" +662 97 dataset """kinships""" +662 97 model """simple""" +662 97 loss """softplus""" +662 97 regularizer """no""" +662 97 optimizer """adam""" +662 97 training_loop """lcwa""" +662 97 evaluator """rankbased""" +662 98 dataset """kinships""" +662 98 model """simple""" +662 98 loss """softplus""" +662 98 regularizer """no""" +662 98 optimizer """adam""" +662 98 training_loop """lcwa""" +662 98 evaluator """rankbased""" +662 99 dataset """kinships""" +662 99 model """simple""" +662 99 loss """softplus""" +662 99 regularizer """no""" +662 99 optimizer """adam""" +662 99 training_loop """lcwa""" +662 99 evaluator """rankbased""" +662 100 dataset """kinships""" +662 100 model """simple""" +662 100 loss """softplus""" +662 100 regularizer """no""" +662 100 optimizer """adam""" +662 100 training_loop """lcwa""" +662 100 evaluator """rankbased""" +663 1 model.embedding_dim 1.0 +663 1 optimizer.lr 0.08796230178895198 +663 1 training.batch_size 0.0 +663 1 training.label_smoothing 0.20928240406285087 +663 2 model.embedding_dim 2.0 +663 2 optimizer.lr 0.0746336221091805 +663 2 training.batch_size 0.0 +663 2 training.label_smoothing 0.001827428319360499 +663 3 model.embedding_dim 2.0 +663 3 optimizer.lr 0.01810661758830956 +663 3 training.batch_size 0.0 +663 3 training.label_smoothing 0.022036269950797792 +663 4 model.embedding_dim 1.0 +663 4 optimizer.lr 0.0032831088078851423 +663 4 training.batch_size 0.0 +663 4 training.label_smoothing 0.00529408571934761 +663 5 model.embedding_dim 1.0 +663 5 optimizer.lr 0.004809861716359619 +663 5 training.batch_size 2.0 +663 5 training.label_smoothing 0.0025301820560824544 +663 6 model.embedding_dim 1.0 +663 6 optimizer.lr 0.0010217304647821544 +663 6 training.batch_size 2.0 +663 6 training.label_smoothing 0.29400914057092176 +663 7 model.embedding_dim 0.0 +663 7 optimizer.lr 0.008519348310603369 +663 7 training.batch_size 2.0 +663 7 training.label_smoothing 0.1710540629359526 +663 8 model.embedding_dim 1.0 +663 8 optimizer.lr 0.03920065620757608 +663 8 training.batch_size 1.0 +663 8 training.label_smoothing 0.004350677833212194 +663 9 model.embedding_dim 0.0 +663 9 optimizer.lr 0.006992617064571533 +663 9 training.batch_size 2.0 +663 9 training.label_smoothing 0.0030296196665471876 +663 10 model.embedding_dim 1.0 +663 10 optimizer.lr 0.012374165413429786 +663 10 training.batch_size 2.0 +663 10 training.label_smoothing 0.015372462287545439 +663 11 model.embedding_dim 0.0 +663 11 optimizer.lr 0.0014623713369814777 +663 11 training.batch_size 1.0 +663 11 training.label_smoothing 0.008392655755973165 +663 12 model.embedding_dim 1.0 +663 12 optimizer.lr 0.0025248087236967635 +663 12 training.batch_size 2.0 +663 12 training.label_smoothing 0.5153439913526854 +663 13 model.embedding_dim 0.0 +663 13 optimizer.lr 0.047382626212432055 +663 13 training.batch_size 1.0 +663 13 training.label_smoothing 0.5064467552469438 +663 14 model.embedding_dim 1.0 +663 14 optimizer.lr 0.007427945940067592 +663 14 training.batch_size 2.0 +663 14 training.label_smoothing 0.2378645823870994 +663 15 model.embedding_dim 0.0 +663 15 optimizer.lr 0.0018967427763759103 +663 15 training.batch_size 0.0 +663 15 training.label_smoothing 0.0012968861850530245 +663 16 model.embedding_dim 0.0 +663 16 optimizer.lr 0.02253036374236103 +663 16 training.batch_size 1.0 +663 16 training.label_smoothing 0.009211468199507735 +663 17 model.embedding_dim 0.0 +663 17 optimizer.lr 0.0035006595351121305 +663 17 training.batch_size 0.0 +663 17 training.label_smoothing 0.6112035245776174 +663 18 model.embedding_dim 1.0 +663 18 optimizer.lr 0.055788847817189964 +663 18 training.batch_size 0.0 +663 18 training.label_smoothing 0.15548773144137337 +663 19 model.embedding_dim 0.0 +663 19 optimizer.lr 0.04004433722031041 +663 19 training.batch_size 2.0 +663 19 training.label_smoothing 0.04987943745150258 +663 20 model.embedding_dim 0.0 +663 20 optimizer.lr 0.0011774582489053728 +663 20 training.batch_size 1.0 +663 20 training.label_smoothing 0.008208764433525332 +663 21 model.embedding_dim 1.0 +663 21 optimizer.lr 0.001177395638174492 +663 21 training.batch_size 1.0 +663 21 training.label_smoothing 0.013809012086237511 +663 22 model.embedding_dim 1.0 +663 22 optimizer.lr 0.0016892403339271137 +663 22 training.batch_size 2.0 +663 22 training.label_smoothing 0.04212642823177712 +663 23 model.embedding_dim 1.0 +663 23 optimizer.lr 0.015015809292307026 +663 23 training.batch_size 2.0 +663 23 training.label_smoothing 0.005698203010318595 +663 24 model.embedding_dim 0.0 +663 24 optimizer.lr 0.009527591353862663 +663 24 training.batch_size 0.0 +663 24 training.label_smoothing 0.06415209039449005 +663 25 model.embedding_dim 2.0 +663 25 optimizer.lr 0.0030825443560593704 +663 25 training.batch_size 1.0 +663 25 training.label_smoothing 0.001553628070489279 +663 26 model.embedding_dim 2.0 +663 26 optimizer.lr 0.013250488696225669 +663 26 training.batch_size 0.0 +663 26 training.label_smoothing 0.8272824007212061 +663 27 model.embedding_dim 2.0 +663 27 optimizer.lr 0.004555238645018922 +663 27 training.batch_size 1.0 +663 27 training.label_smoothing 0.14587879030865097 +663 28 model.embedding_dim 0.0 +663 28 optimizer.lr 0.0015128190552934977 +663 28 training.batch_size 1.0 +663 28 training.label_smoothing 0.001147201828277677 +663 29 model.embedding_dim 0.0 +663 29 optimizer.lr 0.011662041656649703 +663 29 training.batch_size 2.0 +663 29 training.label_smoothing 0.08784441193795287 +663 30 model.embedding_dim 2.0 +663 30 optimizer.lr 0.0017917303351121321 +663 30 training.batch_size 1.0 +663 30 training.label_smoothing 0.10279695525830652 +663 31 model.embedding_dim 2.0 +663 31 optimizer.lr 0.005875954375769631 +663 31 training.batch_size 1.0 +663 31 training.label_smoothing 0.12718493801775768 +663 32 model.embedding_dim 1.0 +663 32 optimizer.lr 0.0040576661992883815 +663 32 training.batch_size 1.0 +663 32 training.label_smoothing 0.011518109569320322 +663 33 model.embedding_dim 0.0 +663 33 optimizer.lr 0.022514592917909297 +663 33 training.batch_size 0.0 +663 33 training.label_smoothing 0.07592021586534296 +663 34 model.embedding_dim 1.0 +663 34 optimizer.lr 0.015699721636716367 +663 34 training.batch_size 0.0 +663 34 training.label_smoothing 0.14545781308994196 +663 35 model.embedding_dim 0.0 +663 35 optimizer.lr 0.00296732270049522 +663 35 training.batch_size 1.0 +663 35 training.label_smoothing 0.019900629165521124 +663 36 model.embedding_dim 1.0 +663 36 optimizer.lr 0.012599340572649899 +663 36 training.batch_size 0.0 +663 36 training.label_smoothing 0.0033426198114765815 +663 37 model.embedding_dim 0.0 +663 37 optimizer.lr 0.002410694418801027 +663 37 training.batch_size 0.0 +663 37 training.label_smoothing 0.004906548859558044 +663 38 model.embedding_dim 2.0 +663 38 optimizer.lr 0.004805251426449582 +663 38 training.batch_size 1.0 +663 38 training.label_smoothing 0.021121505987771662 +663 39 model.embedding_dim 0.0 +663 39 optimizer.lr 0.027441155450302495 +663 39 training.batch_size 1.0 +663 39 training.label_smoothing 0.012754058305641981 +663 40 model.embedding_dim 1.0 +663 40 optimizer.lr 0.08640413800954479 +663 40 training.batch_size 0.0 +663 40 training.label_smoothing 0.10727978266035058 +663 41 model.embedding_dim 1.0 +663 41 optimizer.lr 0.07450144030332681 +663 41 training.batch_size 1.0 +663 41 training.label_smoothing 0.02136443542087776 +663 42 model.embedding_dim 0.0 +663 42 optimizer.lr 0.08722904818547603 +663 42 training.batch_size 2.0 +663 42 training.label_smoothing 0.0018550913955483247 +663 43 model.embedding_dim 1.0 +663 43 optimizer.lr 0.0012917405956648195 +663 43 training.batch_size 0.0 +663 43 training.label_smoothing 0.23205442691552353 +663 44 model.embedding_dim 0.0 +663 44 optimizer.lr 0.010335165632363306 +663 44 training.batch_size 1.0 +663 44 training.label_smoothing 0.0011843210865021003 +663 45 model.embedding_dim 2.0 +663 45 optimizer.lr 0.060902490870340095 +663 45 training.batch_size 1.0 +663 45 training.label_smoothing 0.0028403741143092514 +663 46 model.embedding_dim 0.0 +663 46 optimizer.lr 0.0154627323182568 +663 46 training.batch_size 1.0 +663 46 training.label_smoothing 0.6356087610578653 +663 47 model.embedding_dim 0.0 +663 47 optimizer.lr 0.0021045996367566877 +663 47 training.batch_size 1.0 +663 47 training.label_smoothing 0.1099431098744985 +663 48 model.embedding_dim 2.0 +663 48 optimizer.lr 0.023362176513128517 +663 48 training.batch_size 0.0 +663 48 training.label_smoothing 0.02698724609409682 +663 49 model.embedding_dim 2.0 +663 49 optimizer.lr 0.02304571370759727 +663 49 training.batch_size 1.0 +663 49 training.label_smoothing 0.0010758810337470396 +663 50 model.embedding_dim 2.0 +663 50 optimizer.lr 0.07602590875437146 +663 50 training.batch_size 2.0 +663 50 training.label_smoothing 0.06922187403814095 +663 51 model.embedding_dim 2.0 +663 51 optimizer.lr 0.07206649661670562 +663 51 training.batch_size 0.0 +663 51 training.label_smoothing 0.9495319432865119 +663 52 model.embedding_dim 1.0 +663 52 optimizer.lr 0.00954672713207121 +663 52 training.batch_size 2.0 +663 52 training.label_smoothing 0.00617861037782122 +663 53 model.embedding_dim 1.0 +663 53 optimizer.lr 0.05318833822101734 +663 53 training.batch_size 1.0 +663 53 training.label_smoothing 0.0054073250554819005 +663 54 model.embedding_dim 2.0 +663 54 optimizer.lr 0.024583773792065833 +663 54 training.batch_size 2.0 +663 54 training.label_smoothing 0.002218182233859255 +663 55 model.embedding_dim 1.0 +663 55 optimizer.lr 0.002961313043803449 +663 55 training.batch_size 2.0 +663 55 training.label_smoothing 0.0911517407899481 +663 56 model.embedding_dim 2.0 +663 56 optimizer.lr 0.013885563932594575 +663 56 training.batch_size 1.0 +663 56 training.label_smoothing 0.0036912191318638496 +663 57 model.embedding_dim 0.0 +663 57 optimizer.lr 0.006473546554544278 +663 57 training.batch_size 0.0 +663 57 training.label_smoothing 0.5184627663871804 +663 58 model.embedding_dim 0.0 +663 58 optimizer.lr 0.027003645063953884 +663 58 training.batch_size 2.0 +663 58 training.label_smoothing 0.5714893674941685 +663 59 model.embedding_dim 2.0 +663 59 optimizer.lr 0.01131238095212467 +663 59 training.batch_size 2.0 +663 59 training.label_smoothing 0.46107349444364054 +663 60 model.embedding_dim 0.0 +663 60 optimizer.lr 0.07854611207781523 +663 60 training.batch_size 1.0 +663 60 training.label_smoothing 0.011683736001669444 +663 61 model.embedding_dim 2.0 +663 61 optimizer.lr 0.07895561170522564 +663 61 training.batch_size 2.0 +663 61 training.label_smoothing 0.6251772197633313 +663 62 model.embedding_dim 2.0 +663 62 optimizer.lr 0.09981781127460197 +663 62 training.batch_size 2.0 +663 62 training.label_smoothing 0.09341367172880344 +663 63 model.embedding_dim 1.0 +663 63 optimizer.lr 0.002055368053396692 +663 63 training.batch_size 1.0 +663 63 training.label_smoothing 0.004185487826468934 +663 64 model.embedding_dim 2.0 +663 64 optimizer.lr 0.011455474584727125 +663 64 training.batch_size 1.0 +663 64 training.label_smoothing 0.006891639508055141 +663 65 model.embedding_dim 2.0 +663 65 optimizer.lr 0.007003041500726939 +663 65 training.batch_size 2.0 +663 65 training.label_smoothing 0.0013755960381465148 +663 66 model.embedding_dim 0.0 +663 66 optimizer.lr 0.008767206114608121 +663 66 training.batch_size 1.0 +663 66 training.label_smoothing 0.001137521961012128 +663 67 model.embedding_dim 1.0 +663 67 optimizer.lr 0.014531055929106116 +663 67 training.batch_size 1.0 +663 67 training.label_smoothing 0.06465318251321181 +663 68 model.embedding_dim 0.0 +663 68 optimizer.lr 0.016150366108135646 +663 68 training.batch_size 1.0 +663 68 training.label_smoothing 0.999490433469838 +663 69 model.embedding_dim 2.0 +663 69 optimizer.lr 0.009550195603429344 +663 69 training.batch_size 2.0 +663 69 training.label_smoothing 0.15176989025433682 +663 70 model.embedding_dim 2.0 +663 70 optimizer.lr 0.013676228532659707 +663 70 training.batch_size 2.0 +663 70 training.label_smoothing 0.011070815249958468 +663 71 model.embedding_dim 2.0 +663 71 optimizer.lr 0.0030189646549155526 +663 71 training.batch_size 0.0 +663 71 training.label_smoothing 0.017678709146310135 +663 72 model.embedding_dim 0.0 +663 72 optimizer.lr 0.0023705009628740012 +663 72 training.batch_size 0.0 +663 72 training.label_smoothing 0.18872365036261612 +663 73 model.embedding_dim 0.0 +663 73 optimizer.lr 0.0012277636010355423 +663 73 training.batch_size 2.0 +663 73 training.label_smoothing 0.3005634641996511 +663 74 model.embedding_dim 0.0 +663 74 optimizer.lr 0.0025178610692940663 +663 74 training.batch_size 1.0 +663 74 training.label_smoothing 0.03751533078983793 +663 75 model.embedding_dim 0.0 +663 75 optimizer.lr 0.029364244242930177 +663 75 training.batch_size 1.0 +663 75 training.label_smoothing 0.04786547744953183 +663 76 model.embedding_dim 1.0 +663 76 optimizer.lr 0.005450681925834473 +663 76 training.batch_size 2.0 +663 76 training.label_smoothing 0.02756719727801873 +663 77 model.embedding_dim 2.0 +663 77 optimizer.lr 0.03575995173095187 +663 77 training.batch_size 0.0 +663 77 training.label_smoothing 0.023939418337258607 +663 78 model.embedding_dim 1.0 +663 78 optimizer.lr 0.0010934751727075267 +663 78 training.batch_size 0.0 +663 78 training.label_smoothing 0.0024496182373013754 +663 79 model.embedding_dim 0.0 +663 79 optimizer.lr 0.05253507925049744 +663 79 training.batch_size 2.0 +663 79 training.label_smoothing 0.042453171159420135 +663 80 model.embedding_dim 0.0 +663 80 optimizer.lr 0.059223637148864486 +663 80 training.batch_size 2.0 +663 80 training.label_smoothing 0.7562527036725056 +663 81 model.embedding_dim 2.0 +663 81 optimizer.lr 0.009621923754067993 +663 81 training.batch_size 1.0 +663 81 training.label_smoothing 0.0022637032464186634 +663 82 model.embedding_dim 2.0 +663 82 optimizer.lr 0.02746511310214215 +663 82 training.batch_size 0.0 +663 82 training.label_smoothing 0.00734812755545043 +663 83 model.embedding_dim 0.0 +663 83 optimizer.lr 0.06928433999025407 +663 83 training.batch_size 0.0 +663 83 training.label_smoothing 0.051964537320931356 +663 84 model.embedding_dim 0.0 +663 84 optimizer.lr 0.0014550805189979049 +663 84 training.batch_size 0.0 +663 84 training.label_smoothing 0.007072616532578498 +663 85 model.embedding_dim 2.0 +663 85 optimizer.lr 0.0016748206929756133 +663 85 training.batch_size 2.0 +663 85 training.label_smoothing 0.04888931696099561 +663 86 model.embedding_dim 2.0 +663 86 optimizer.lr 0.01045383467381409 +663 86 training.batch_size 1.0 +663 86 training.label_smoothing 0.22123541264213903 +663 87 model.embedding_dim 1.0 +663 87 optimizer.lr 0.0026475159781464133 +663 87 training.batch_size 1.0 +663 87 training.label_smoothing 0.9784241403083056 +663 88 model.embedding_dim 0.0 +663 88 optimizer.lr 0.034967407425015536 +663 88 training.batch_size 0.0 +663 88 training.label_smoothing 0.8190825118684165 +663 89 model.embedding_dim 0.0 +663 89 optimizer.lr 0.01716597636703874 +663 89 training.batch_size 0.0 +663 89 training.label_smoothing 0.0052187385120128485 +663 90 model.embedding_dim 2.0 +663 90 optimizer.lr 0.010974752907949583 +663 90 training.batch_size 2.0 +663 90 training.label_smoothing 0.06375214934278717 +663 91 model.embedding_dim 1.0 +663 91 optimizer.lr 0.012826794184286597 +663 91 training.batch_size 1.0 +663 91 training.label_smoothing 0.7454704050201684 +663 92 model.embedding_dim 1.0 +663 92 optimizer.lr 0.07948537179955015 +663 92 training.batch_size 1.0 +663 92 training.label_smoothing 0.16665216182832412 +663 93 model.embedding_dim 2.0 +663 93 optimizer.lr 0.010539212819423164 +663 93 training.batch_size 0.0 +663 93 training.label_smoothing 0.01213486077723788 +663 94 model.embedding_dim 1.0 +663 94 optimizer.lr 0.0027162270683623473 +663 94 training.batch_size 1.0 +663 94 training.label_smoothing 0.024345421926868914 +663 95 model.embedding_dim 1.0 +663 95 optimizer.lr 0.08701574949898361 +663 95 training.batch_size 1.0 +663 95 training.label_smoothing 0.3946692445071014 +663 96 model.embedding_dim 0.0 +663 96 optimizer.lr 0.063128005933588 +663 96 training.batch_size 1.0 +663 96 training.label_smoothing 0.12124321870280831 +663 97 model.embedding_dim 1.0 +663 97 optimizer.lr 0.003483617628745727 +663 97 training.batch_size 1.0 +663 97 training.label_smoothing 0.0879575505374027 +663 98 model.embedding_dim 2.0 +663 98 optimizer.lr 0.0015720920748678018 +663 98 training.batch_size 2.0 +663 98 training.label_smoothing 0.5055273604948932 +663 99 model.embedding_dim 2.0 +663 99 optimizer.lr 0.018930954916423267 +663 99 training.batch_size 2.0 +663 99 training.label_smoothing 0.3541484301836337 +663 100 model.embedding_dim 0.0 +663 100 optimizer.lr 0.0013986830727923134 +663 100 training.batch_size 1.0 +663 100 training.label_smoothing 0.058115033579728186 +663 1 dataset """kinships""" +663 1 model """simple""" +663 1 loss """bceaftersigmoid""" +663 1 regularizer """no""" +663 1 optimizer """adam""" +663 1 training_loop """lcwa""" +663 1 evaluator """rankbased""" +663 2 dataset """kinships""" +663 2 model """simple""" +663 2 loss """bceaftersigmoid""" +663 2 regularizer """no""" +663 2 optimizer """adam""" +663 2 training_loop """lcwa""" +663 2 evaluator """rankbased""" +663 3 dataset """kinships""" +663 3 model """simple""" +663 3 loss """bceaftersigmoid""" +663 3 regularizer """no""" +663 3 optimizer """adam""" +663 3 training_loop """lcwa""" +663 3 evaluator """rankbased""" +663 4 dataset """kinships""" +663 4 model """simple""" +663 4 loss """bceaftersigmoid""" +663 4 regularizer """no""" +663 4 optimizer """adam""" +663 4 training_loop """lcwa""" +663 4 evaluator """rankbased""" +663 5 dataset """kinships""" +663 5 model """simple""" +663 5 loss """bceaftersigmoid""" +663 5 regularizer """no""" +663 5 optimizer """adam""" +663 5 training_loop """lcwa""" +663 5 evaluator """rankbased""" +663 6 dataset """kinships""" +663 6 model """simple""" +663 6 loss """bceaftersigmoid""" +663 6 regularizer """no""" +663 6 optimizer """adam""" +663 6 training_loop """lcwa""" +663 6 evaluator """rankbased""" +663 7 dataset """kinships""" +663 7 model """simple""" +663 7 loss """bceaftersigmoid""" +663 7 regularizer """no""" +663 7 optimizer """adam""" +663 7 training_loop """lcwa""" +663 7 evaluator """rankbased""" +663 8 dataset """kinships""" +663 8 model """simple""" +663 8 loss """bceaftersigmoid""" +663 8 regularizer """no""" +663 8 optimizer """adam""" +663 8 training_loop """lcwa""" +663 8 evaluator """rankbased""" +663 9 dataset """kinships""" +663 9 model """simple""" +663 9 loss """bceaftersigmoid""" +663 9 regularizer """no""" +663 9 optimizer """adam""" +663 9 training_loop """lcwa""" +663 9 evaluator """rankbased""" +663 10 dataset """kinships""" +663 10 model """simple""" +663 10 loss """bceaftersigmoid""" +663 10 regularizer """no""" +663 10 optimizer """adam""" +663 10 training_loop """lcwa""" +663 10 evaluator """rankbased""" +663 11 dataset """kinships""" +663 11 model """simple""" +663 11 loss """bceaftersigmoid""" +663 11 regularizer """no""" +663 11 optimizer """adam""" +663 11 training_loop """lcwa""" +663 11 evaluator """rankbased""" +663 12 dataset """kinships""" +663 12 model """simple""" +663 12 loss """bceaftersigmoid""" +663 12 regularizer """no""" +663 12 optimizer """adam""" +663 12 training_loop """lcwa""" +663 12 evaluator """rankbased""" +663 13 dataset """kinships""" +663 13 model """simple""" +663 13 loss """bceaftersigmoid""" +663 13 regularizer """no""" +663 13 optimizer """adam""" +663 13 training_loop """lcwa""" +663 13 evaluator """rankbased""" +663 14 dataset """kinships""" +663 14 model """simple""" +663 14 loss """bceaftersigmoid""" +663 14 regularizer """no""" +663 14 optimizer """adam""" +663 14 training_loop """lcwa""" +663 14 evaluator """rankbased""" +663 15 dataset """kinships""" +663 15 model """simple""" +663 15 loss """bceaftersigmoid""" +663 15 regularizer """no""" +663 15 optimizer """adam""" +663 15 training_loop """lcwa""" +663 15 evaluator """rankbased""" +663 16 dataset """kinships""" +663 16 model """simple""" +663 16 loss """bceaftersigmoid""" +663 16 regularizer """no""" +663 16 optimizer """adam""" +663 16 training_loop """lcwa""" +663 16 evaluator """rankbased""" +663 17 dataset """kinships""" +663 17 model """simple""" +663 17 loss """bceaftersigmoid""" +663 17 regularizer """no""" +663 17 optimizer """adam""" +663 17 training_loop """lcwa""" +663 17 evaluator """rankbased""" +663 18 dataset """kinships""" +663 18 model """simple""" +663 18 loss """bceaftersigmoid""" +663 18 regularizer """no""" +663 18 optimizer """adam""" +663 18 training_loop """lcwa""" +663 18 evaluator """rankbased""" +663 19 dataset """kinships""" +663 19 model """simple""" +663 19 loss """bceaftersigmoid""" +663 19 regularizer """no""" +663 19 optimizer """adam""" +663 19 training_loop """lcwa""" +663 19 evaluator """rankbased""" +663 20 dataset """kinships""" +663 20 model """simple""" +663 20 loss """bceaftersigmoid""" +663 20 regularizer """no""" +663 20 optimizer """adam""" +663 20 training_loop """lcwa""" +663 20 evaluator """rankbased""" +663 21 dataset """kinships""" +663 21 model """simple""" +663 21 loss """bceaftersigmoid""" +663 21 regularizer """no""" +663 21 optimizer """adam""" +663 21 training_loop """lcwa""" +663 21 evaluator """rankbased""" +663 22 dataset """kinships""" +663 22 model """simple""" +663 22 loss """bceaftersigmoid""" +663 22 regularizer """no""" +663 22 optimizer """adam""" +663 22 training_loop """lcwa""" +663 22 evaluator """rankbased""" +663 23 dataset """kinships""" +663 23 model """simple""" +663 23 loss """bceaftersigmoid""" +663 23 regularizer """no""" +663 23 optimizer """adam""" +663 23 training_loop """lcwa""" +663 23 evaluator """rankbased""" +663 24 dataset """kinships""" +663 24 model """simple""" +663 24 loss """bceaftersigmoid""" +663 24 regularizer """no""" +663 24 optimizer """adam""" +663 24 training_loop """lcwa""" +663 24 evaluator """rankbased""" +663 25 dataset """kinships""" +663 25 model """simple""" +663 25 loss """bceaftersigmoid""" +663 25 regularizer """no""" +663 25 optimizer """adam""" +663 25 training_loop """lcwa""" +663 25 evaluator """rankbased""" +663 26 dataset """kinships""" +663 26 model """simple""" +663 26 loss """bceaftersigmoid""" +663 26 regularizer """no""" +663 26 optimizer """adam""" +663 26 training_loop """lcwa""" +663 26 evaluator """rankbased""" +663 27 dataset """kinships""" +663 27 model """simple""" +663 27 loss """bceaftersigmoid""" +663 27 regularizer """no""" +663 27 optimizer """adam""" +663 27 training_loop """lcwa""" +663 27 evaluator """rankbased""" +663 28 dataset """kinships""" +663 28 model """simple""" +663 28 loss """bceaftersigmoid""" +663 28 regularizer """no""" +663 28 optimizer """adam""" +663 28 training_loop """lcwa""" +663 28 evaluator """rankbased""" +663 29 dataset """kinships""" +663 29 model """simple""" +663 29 loss """bceaftersigmoid""" +663 29 regularizer """no""" +663 29 optimizer """adam""" +663 29 training_loop """lcwa""" +663 29 evaluator """rankbased""" +663 30 dataset """kinships""" +663 30 model """simple""" +663 30 loss """bceaftersigmoid""" +663 30 regularizer """no""" +663 30 optimizer """adam""" +663 30 training_loop """lcwa""" +663 30 evaluator """rankbased""" +663 31 dataset """kinships""" +663 31 model """simple""" +663 31 loss """bceaftersigmoid""" +663 31 regularizer """no""" +663 31 optimizer """adam""" +663 31 training_loop """lcwa""" +663 31 evaluator """rankbased""" +663 32 dataset """kinships""" +663 32 model """simple""" +663 32 loss """bceaftersigmoid""" +663 32 regularizer """no""" +663 32 optimizer """adam""" +663 32 training_loop """lcwa""" +663 32 evaluator """rankbased""" +663 33 dataset """kinships""" +663 33 model """simple""" +663 33 loss """bceaftersigmoid""" +663 33 regularizer """no""" +663 33 optimizer """adam""" +663 33 training_loop """lcwa""" +663 33 evaluator """rankbased""" +663 34 dataset """kinships""" +663 34 model """simple""" +663 34 loss """bceaftersigmoid""" +663 34 regularizer """no""" +663 34 optimizer """adam""" +663 34 training_loop """lcwa""" +663 34 evaluator """rankbased""" +663 35 dataset """kinships""" +663 35 model """simple""" +663 35 loss """bceaftersigmoid""" +663 35 regularizer """no""" +663 35 optimizer """adam""" +663 35 training_loop """lcwa""" +663 35 evaluator """rankbased""" +663 36 dataset """kinships""" +663 36 model """simple""" +663 36 loss """bceaftersigmoid""" +663 36 regularizer """no""" +663 36 optimizer """adam""" +663 36 training_loop """lcwa""" +663 36 evaluator """rankbased""" +663 37 dataset """kinships""" +663 37 model """simple""" +663 37 loss """bceaftersigmoid""" +663 37 regularizer """no""" +663 37 optimizer """adam""" +663 37 training_loop """lcwa""" +663 37 evaluator """rankbased""" +663 38 dataset """kinships""" +663 38 model """simple""" +663 38 loss """bceaftersigmoid""" +663 38 regularizer """no""" +663 38 optimizer """adam""" +663 38 training_loop """lcwa""" +663 38 evaluator """rankbased""" +663 39 dataset """kinships""" +663 39 model """simple""" +663 39 loss """bceaftersigmoid""" +663 39 regularizer """no""" +663 39 optimizer """adam""" +663 39 training_loop """lcwa""" +663 39 evaluator """rankbased""" +663 40 dataset """kinships""" +663 40 model """simple""" +663 40 loss """bceaftersigmoid""" +663 40 regularizer """no""" +663 40 optimizer """adam""" +663 40 training_loop """lcwa""" +663 40 evaluator """rankbased""" +663 41 dataset """kinships""" +663 41 model """simple""" +663 41 loss """bceaftersigmoid""" +663 41 regularizer """no""" +663 41 optimizer """adam""" +663 41 training_loop """lcwa""" +663 41 evaluator """rankbased""" +663 42 dataset """kinships""" +663 42 model """simple""" +663 42 loss """bceaftersigmoid""" +663 42 regularizer """no""" +663 42 optimizer """adam""" +663 42 training_loop """lcwa""" +663 42 evaluator """rankbased""" +663 43 dataset """kinships""" +663 43 model """simple""" +663 43 loss """bceaftersigmoid""" +663 43 regularizer """no""" +663 43 optimizer """adam""" +663 43 training_loop """lcwa""" +663 43 evaluator """rankbased""" +663 44 dataset """kinships""" +663 44 model """simple""" +663 44 loss """bceaftersigmoid""" +663 44 regularizer """no""" +663 44 optimizer """adam""" +663 44 training_loop """lcwa""" +663 44 evaluator """rankbased""" +663 45 dataset """kinships""" +663 45 model """simple""" +663 45 loss """bceaftersigmoid""" +663 45 regularizer """no""" +663 45 optimizer """adam""" +663 45 training_loop """lcwa""" +663 45 evaluator """rankbased""" +663 46 dataset """kinships""" +663 46 model """simple""" +663 46 loss """bceaftersigmoid""" +663 46 regularizer """no""" +663 46 optimizer """adam""" +663 46 training_loop """lcwa""" +663 46 evaluator """rankbased""" +663 47 dataset """kinships""" +663 47 model """simple""" +663 47 loss """bceaftersigmoid""" +663 47 regularizer """no""" +663 47 optimizer """adam""" +663 47 training_loop """lcwa""" +663 47 evaluator """rankbased""" +663 48 dataset """kinships""" +663 48 model """simple""" +663 48 loss """bceaftersigmoid""" +663 48 regularizer """no""" +663 48 optimizer """adam""" +663 48 training_loop """lcwa""" +663 48 evaluator """rankbased""" +663 49 dataset """kinships""" +663 49 model """simple""" +663 49 loss """bceaftersigmoid""" +663 49 regularizer """no""" +663 49 optimizer """adam""" +663 49 training_loop """lcwa""" +663 49 evaluator """rankbased""" +663 50 dataset """kinships""" +663 50 model """simple""" +663 50 loss """bceaftersigmoid""" +663 50 regularizer """no""" +663 50 optimizer """adam""" +663 50 training_loop """lcwa""" +663 50 evaluator """rankbased""" +663 51 dataset """kinships""" +663 51 model """simple""" +663 51 loss """bceaftersigmoid""" +663 51 regularizer """no""" +663 51 optimizer """adam""" +663 51 training_loop """lcwa""" +663 51 evaluator """rankbased""" +663 52 dataset """kinships""" +663 52 model """simple""" +663 52 loss """bceaftersigmoid""" +663 52 regularizer """no""" +663 52 optimizer """adam""" +663 52 training_loop """lcwa""" +663 52 evaluator """rankbased""" +663 53 dataset """kinships""" +663 53 model """simple""" +663 53 loss """bceaftersigmoid""" +663 53 regularizer """no""" +663 53 optimizer """adam""" +663 53 training_loop """lcwa""" +663 53 evaluator """rankbased""" +663 54 dataset """kinships""" +663 54 model """simple""" +663 54 loss """bceaftersigmoid""" +663 54 regularizer """no""" +663 54 optimizer """adam""" +663 54 training_loop """lcwa""" +663 54 evaluator """rankbased""" +663 55 dataset """kinships""" +663 55 model """simple""" +663 55 loss """bceaftersigmoid""" +663 55 regularizer """no""" +663 55 optimizer """adam""" +663 55 training_loop """lcwa""" +663 55 evaluator """rankbased""" +663 56 dataset """kinships""" +663 56 model """simple""" +663 56 loss """bceaftersigmoid""" +663 56 regularizer """no""" +663 56 optimizer """adam""" +663 56 training_loop """lcwa""" +663 56 evaluator """rankbased""" +663 57 dataset """kinships""" +663 57 model """simple""" +663 57 loss """bceaftersigmoid""" +663 57 regularizer """no""" +663 57 optimizer """adam""" +663 57 training_loop """lcwa""" +663 57 evaluator """rankbased""" +663 58 dataset """kinships""" +663 58 model """simple""" +663 58 loss """bceaftersigmoid""" +663 58 regularizer """no""" +663 58 optimizer """adam""" +663 58 training_loop """lcwa""" +663 58 evaluator """rankbased""" +663 59 dataset """kinships""" +663 59 model """simple""" +663 59 loss """bceaftersigmoid""" +663 59 regularizer """no""" +663 59 optimizer """adam""" +663 59 training_loop """lcwa""" +663 59 evaluator """rankbased""" +663 60 dataset """kinships""" +663 60 model """simple""" +663 60 loss """bceaftersigmoid""" +663 60 regularizer """no""" +663 60 optimizer """adam""" +663 60 training_loop """lcwa""" +663 60 evaluator """rankbased""" +663 61 dataset """kinships""" +663 61 model """simple""" +663 61 loss """bceaftersigmoid""" +663 61 regularizer """no""" +663 61 optimizer """adam""" +663 61 training_loop """lcwa""" +663 61 evaluator """rankbased""" +663 62 dataset """kinships""" +663 62 model """simple""" +663 62 loss """bceaftersigmoid""" +663 62 regularizer """no""" +663 62 optimizer """adam""" +663 62 training_loop """lcwa""" +663 62 evaluator """rankbased""" +663 63 dataset """kinships""" +663 63 model """simple""" +663 63 loss """bceaftersigmoid""" +663 63 regularizer """no""" +663 63 optimizer """adam""" +663 63 training_loop """lcwa""" +663 63 evaluator """rankbased""" +663 64 dataset """kinships""" +663 64 model """simple""" +663 64 loss """bceaftersigmoid""" +663 64 regularizer """no""" +663 64 optimizer """adam""" +663 64 training_loop """lcwa""" +663 64 evaluator """rankbased""" +663 65 dataset """kinships""" +663 65 model """simple""" +663 65 loss """bceaftersigmoid""" +663 65 regularizer """no""" +663 65 optimizer """adam""" +663 65 training_loop """lcwa""" +663 65 evaluator """rankbased""" +663 66 dataset """kinships""" +663 66 model """simple""" +663 66 loss """bceaftersigmoid""" +663 66 regularizer """no""" +663 66 optimizer """adam""" +663 66 training_loop """lcwa""" +663 66 evaluator """rankbased""" +663 67 dataset """kinships""" +663 67 model """simple""" +663 67 loss """bceaftersigmoid""" +663 67 regularizer """no""" +663 67 optimizer """adam""" +663 67 training_loop """lcwa""" +663 67 evaluator """rankbased""" +663 68 dataset """kinships""" +663 68 model """simple""" +663 68 loss """bceaftersigmoid""" +663 68 regularizer """no""" +663 68 optimizer """adam""" +663 68 training_loop """lcwa""" +663 68 evaluator """rankbased""" +663 69 dataset """kinships""" +663 69 model """simple""" +663 69 loss """bceaftersigmoid""" +663 69 regularizer """no""" +663 69 optimizer """adam""" +663 69 training_loop """lcwa""" +663 69 evaluator """rankbased""" +663 70 dataset """kinships""" +663 70 model """simple""" +663 70 loss """bceaftersigmoid""" +663 70 regularizer """no""" +663 70 optimizer """adam""" +663 70 training_loop """lcwa""" +663 70 evaluator """rankbased""" +663 71 dataset """kinships""" +663 71 model """simple""" +663 71 loss """bceaftersigmoid""" +663 71 regularizer """no""" +663 71 optimizer """adam""" +663 71 training_loop """lcwa""" +663 71 evaluator """rankbased""" +663 72 dataset """kinships""" +663 72 model """simple""" +663 72 loss """bceaftersigmoid""" +663 72 regularizer """no""" +663 72 optimizer """adam""" +663 72 training_loop """lcwa""" +663 72 evaluator """rankbased""" +663 73 dataset """kinships""" +663 73 model """simple""" +663 73 loss """bceaftersigmoid""" +663 73 regularizer """no""" +663 73 optimizer """adam""" +663 73 training_loop """lcwa""" +663 73 evaluator """rankbased""" +663 74 dataset """kinships""" +663 74 model """simple""" +663 74 loss """bceaftersigmoid""" +663 74 regularizer """no""" +663 74 optimizer """adam""" +663 74 training_loop """lcwa""" +663 74 evaluator """rankbased""" +663 75 dataset """kinships""" +663 75 model """simple""" +663 75 loss """bceaftersigmoid""" +663 75 regularizer """no""" +663 75 optimizer """adam""" +663 75 training_loop """lcwa""" +663 75 evaluator """rankbased""" +663 76 dataset """kinships""" +663 76 model """simple""" +663 76 loss """bceaftersigmoid""" +663 76 regularizer """no""" +663 76 optimizer """adam""" +663 76 training_loop """lcwa""" +663 76 evaluator """rankbased""" +663 77 dataset """kinships""" +663 77 model """simple""" +663 77 loss """bceaftersigmoid""" +663 77 regularizer """no""" +663 77 optimizer """adam""" +663 77 training_loop """lcwa""" +663 77 evaluator """rankbased""" +663 78 dataset """kinships""" +663 78 model """simple""" +663 78 loss """bceaftersigmoid""" +663 78 regularizer """no""" +663 78 optimizer """adam""" +663 78 training_loop """lcwa""" +663 78 evaluator """rankbased""" +663 79 dataset """kinships""" +663 79 model """simple""" +663 79 loss """bceaftersigmoid""" +663 79 regularizer """no""" +663 79 optimizer """adam""" +663 79 training_loop """lcwa""" +663 79 evaluator """rankbased""" +663 80 dataset """kinships""" +663 80 model """simple""" +663 80 loss """bceaftersigmoid""" +663 80 regularizer """no""" +663 80 optimizer """adam""" +663 80 training_loop """lcwa""" +663 80 evaluator """rankbased""" +663 81 dataset """kinships""" +663 81 model """simple""" +663 81 loss """bceaftersigmoid""" +663 81 regularizer """no""" +663 81 optimizer """adam""" +663 81 training_loop """lcwa""" +663 81 evaluator """rankbased""" +663 82 dataset """kinships""" +663 82 model """simple""" +663 82 loss """bceaftersigmoid""" +663 82 regularizer """no""" +663 82 optimizer """adam""" +663 82 training_loop """lcwa""" +663 82 evaluator """rankbased""" +663 83 dataset """kinships""" +663 83 model """simple""" +663 83 loss """bceaftersigmoid""" +663 83 regularizer """no""" +663 83 optimizer """adam""" +663 83 training_loop """lcwa""" +663 83 evaluator """rankbased""" +663 84 dataset """kinships""" +663 84 model """simple""" +663 84 loss """bceaftersigmoid""" +663 84 regularizer """no""" +663 84 optimizer """adam""" +663 84 training_loop """lcwa""" +663 84 evaluator """rankbased""" +663 85 dataset """kinships""" +663 85 model """simple""" +663 85 loss """bceaftersigmoid""" +663 85 regularizer """no""" +663 85 optimizer """adam""" +663 85 training_loop """lcwa""" +663 85 evaluator """rankbased""" +663 86 dataset """kinships""" +663 86 model """simple""" +663 86 loss """bceaftersigmoid""" +663 86 regularizer """no""" +663 86 optimizer """adam""" +663 86 training_loop """lcwa""" +663 86 evaluator """rankbased""" +663 87 dataset """kinships""" +663 87 model """simple""" +663 87 loss """bceaftersigmoid""" +663 87 regularizer """no""" +663 87 optimizer """adam""" +663 87 training_loop """lcwa""" +663 87 evaluator """rankbased""" +663 88 dataset """kinships""" +663 88 model """simple""" +663 88 loss """bceaftersigmoid""" +663 88 regularizer """no""" +663 88 optimizer """adam""" +663 88 training_loop """lcwa""" +663 88 evaluator """rankbased""" +663 89 dataset """kinships""" +663 89 model """simple""" +663 89 loss """bceaftersigmoid""" +663 89 regularizer """no""" +663 89 optimizer """adam""" +663 89 training_loop """lcwa""" +663 89 evaluator """rankbased""" +663 90 dataset """kinships""" +663 90 model """simple""" +663 90 loss """bceaftersigmoid""" +663 90 regularizer """no""" +663 90 optimizer """adam""" +663 90 training_loop """lcwa""" +663 90 evaluator """rankbased""" +663 91 dataset """kinships""" +663 91 model """simple""" +663 91 loss """bceaftersigmoid""" +663 91 regularizer """no""" +663 91 optimizer """adam""" +663 91 training_loop """lcwa""" +663 91 evaluator """rankbased""" +663 92 dataset """kinships""" +663 92 model """simple""" +663 92 loss """bceaftersigmoid""" +663 92 regularizer """no""" +663 92 optimizer """adam""" +663 92 training_loop """lcwa""" +663 92 evaluator """rankbased""" +663 93 dataset """kinships""" +663 93 model """simple""" +663 93 loss """bceaftersigmoid""" +663 93 regularizer """no""" +663 93 optimizer """adam""" +663 93 training_loop """lcwa""" +663 93 evaluator """rankbased""" +663 94 dataset """kinships""" +663 94 model """simple""" +663 94 loss """bceaftersigmoid""" +663 94 regularizer """no""" +663 94 optimizer """adam""" +663 94 training_loop """lcwa""" +663 94 evaluator """rankbased""" +663 95 dataset """kinships""" +663 95 model """simple""" +663 95 loss """bceaftersigmoid""" +663 95 regularizer """no""" +663 95 optimizer """adam""" +663 95 training_loop """lcwa""" +663 95 evaluator """rankbased""" +663 96 dataset """kinships""" +663 96 model """simple""" +663 96 loss """bceaftersigmoid""" +663 96 regularizer """no""" +663 96 optimizer """adam""" +663 96 training_loop """lcwa""" +663 96 evaluator """rankbased""" +663 97 dataset """kinships""" +663 97 model """simple""" +663 97 loss """bceaftersigmoid""" +663 97 regularizer """no""" +663 97 optimizer """adam""" +663 97 training_loop """lcwa""" +663 97 evaluator """rankbased""" +663 98 dataset """kinships""" +663 98 model """simple""" +663 98 loss """bceaftersigmoid""" +663 98 regularizer """no""" +663 98 optimizer """adam""" +663 98 training_loop """lcwa""" +663 98 evaluator """rankbased""" +663 99 dataset """kinships""" +663 99 model """simple""" +663 99 loss """bceaftersigmoid""" +663 99 regularizer """no""" +663 99 optimizer """adam""" +663 99 training_loop """lcwa""" +663 99 evaluator """rankbased""" +663 100 dataset """kinships""" +663 100 model """simple""" +663 100 loss """bceaftersigmoid""" +663 100 regularizer """no""" +663 100 optimizer """adam""" +663 100 training_loop """lcwa""" +663 100 evaluator """rankbased""" +664 1 model.embedding_dim 1.0 +664 1 optimizer.lr 0.015467709590382836 +664 1 training.batch_size 2.0 +664 1 training.label_smoothing 0.02845826313298054 +664 2 model.embedding_dim 2.0 +664 2 optimizer.lr 0.006301008383124974 +664 2 training.batch_size 2.0 +664 2 training.label_smoothing 0.025704014429257432 +664 3 model.embedding_dim 0.0 +664 3 optimizer.lr 0.004894636828440907 +664 3 training.batch_size 0.0 +664 3 training.label_smoothing 0.9034487909813826 +664 4 model.embedding_dim 1.0 +664 4 optimizer.lr 0.027295341829117543 +664 4 training.batch_size 2.0 +664 4 training.label_smoothing 0.4952018358761905 +664 5 model.embedding_dim 1.0 +664 5 optimizer.lr 0.003051362621448708 +664 5 training.batch_size 1.0 +664 5 training.label_smoothing 0.009014058637554796 +664 6 model.embedding_dim 0.0 +664 6 optimizer.lr 0.0032264029263661385 +664 6 training.batch_size 2.0 +664 6 training.label_smoothing 0.8252511233019372 +664 7 model.embedding_dim 1.0 +664 7 optimizer.lr 0.03877427634547375 +664 7 training.batch_size 2.0 +664 7 training.label_smoothing 0.4655039188117298 +664 8 model.embedding_dim 1.0 +664 8 optimizer.lr 0.05063515485206386 +664 8 training.batch_size 0.0 +664 8 training.label_smoothing 0.18103196488055484 +664 9 model.embedding_dim 2.0 +664 9 optimizer.lr 0.00906885592350021 +664 9 training.batch_size 0.0 +664 9 training.label_smoothing 0.004845443582035034 +664 10 model.embedding_dim 0.0 +664 10 optimizer.lr 0.004172412940221136 +664 10 training.batch_size 2.0 +664 10 training.label_smoothing 0.001565246490312824 +664 11 model.embedding_dim 2.0 +664 11 optimizer.lr 0.0766422653809228 +664 11 training.batch_size 1.0 +664 11 training.label_smoothing 0.0029756189416007195 +664 12 model.embedding_dim 0.0 +664 12 optimizer.lr 0.0017416746252123941 +664 12 training.batch_size 2.0 +664 12 training.label_smoothing 0.0017568313835929878 +664 13 model.embedding_dim 0.0 +664 13 optimizer.lr 0.009017630008800393 +664 13 training.batch_size 0.0 +664 13 training.label_smoothing 0.23145979327444688 +664 14 model.embedding_dim 2.0 +664 14 optimizer.lr 0.014084885574110926 +664 14 training.batch_size 0.0 +664 14 training.label_smoothing 0.28196566683365 +664 15 model.embedding_dim 2.0 +664 15 optimizer.lr 0.03305627832265529 +664 15 training.batch_size 1.0 +664 15 training.label_smoothing 0.0025336740358927693 +664 16 model.embedding_dim 2.0 +664 16 optimizer.lr 0.05992498380872092 +664 16 training.batch_size 0.0 +664 16 training.label_smoothing 0.01343175323545455 +664 17 model.embedding_dim 0.0 +664 17 optimizer.lr 0.00362114264287682 +664 17 training.batch_size 0.0 +664 17 training.label_smoothing 0.008619488081406857 +664 18 model.embedding_dim 0.0 +664 18 optimizer.lr 0.09085520622631076 +664 18 training.batch_size 0.0 +664 18 training.label_smoothing 0.5584608732760559 +664 19 model.embedding_dim 1.0 +664 19 optimizer.lr 0.015024038298678434 +664 19 training.batch_size 2.0 +664 19 training.label_smoothing 0.6472375622323164 +664 20 model.embedding_dim 1.0 +664 20 optimizer.lr 0.0012537438454916846 +664 20 training.batch_size 2.0 +664 20 training.label_smoothing 0.2666402219184016 +664 21 model.embedding_dim 1.0 +664 21 optimizer.lr 0.0017857754517001327 +664 21 training.batch_size 1.0 +664 21 training.label_smoothing 0.11025302031323209 +664 22 model.embedding_dim 0.0 +664 22 optimizer.lr 0.019383110388936316 +664 22 training.batch_size 2.0 +664 22 training.label_smoothing 0.001043014043526791 +664 23 model.embedding_dim 2.0 +664 23 optimizer.lr 0.0015860183673687528 +664 23 training.batch_size 1.0 +664 23 training.label_smoothing 0.001265998350767278 +664 24 model.embedding_dim 2.0 +664 24 optimizer.lr 0.0020080545363100714 +664 24 training.batch_size 1.0 +664 24 training.label_smoothing 0.002751610971811554 +664 25 model.embedding_dim 2.0 +664 25 optimizer.lr 0.09996447864282763 +664 25 training.batch_size 0.0 +664 25 training.label_smoothing 0.0020124477580076437 +664 26 model.embedding_dim 0.0 +664 26 optimizer.lr 0.0016843782723847457 +664 26 training.batch_size 1.0 +664 26 training.label_smoothing 0.0054902335607890755 +664 27 model.embedding_dim 2.0 +664 27 optimizer.lr 0.010499495386150644 +664 27 training.batch_size 0.0 +664 27 training.label_smoothing 0.010880220010818554 +664 28 model.embedding_dim 2.0 +664 28 optimizer.lr 0.002470597803826597 +664 28 training.batch_size 1.0 +664 28 training.label_smoothing 0.0010288025438480559 +664 29 model.embedding_dim 0.0 +664 29 optimizer.lr 0.001220701074477988 +664 29 training.batch_size 1.0 +664 29 training.label_smoothing 0.0014128214672857419 +664 30 model.embedding_dim 2.0 +664 30 optimizer.lr 0.012948933086794638 +664 30 training.batch_size 0.0 +664 30 training.label_smoothing 0.0012923576464470453 +664 31 model.embedding_dim 0.0 +664 31 optimizer.lr 0.0010246885414332894 +664 31 training.batch_size 1.0 +664 31 training.label_smoothing 0.02229921133686882 +664 32 model.embedding_dim 1.0 +664 32 optimizer.lr 0.014840231613604933 +664 32 training.batch_size 1.0 +664 32 training.label_smoothing 0.004372696337734269 +664 33 model.embedding_dim 1.0 +664 33 optimizer.lr 0.05758535517210143 +664 33 training.batch_size 2.0 +664 33 training.label_smoothing 0.001012377050703587 +664 34 model.embedding_dim 2.0 +664 34 optimizer.lr 0.0609672727080121 +664 34 training.batch_size 0.0 +664 34 training.label_smoothing 0.14611948756003723 +664 35 model.embedding_dim 1.0 +664 35 optimizer.lr 0.03266957883820284 +664 35 training.batch_size 1.0 +664 35 training.label_smoothing 0.02359710144093967 +664 36 model.embedding_dim 1.0 +664 36 optimizer.lr 0.03771652932404309 +664 36 training.batch_size 1.0 +664 36 training.label_smoothing 0.022727481604874533 +664 37 model.embedding_dim 2.0 +664 37 optimizer.lr 0.08395161209271589 +664 37 training.batch_size 1.0 +664 37 training.label_smoothing 0.001707380504343577 +664 38 model.embedding_dim 1.0 +664 38 optimizer.lr 0.03048997600893785 +664 38 training.batch_size 0.0 +664 38 training.label_smoothing 0.594796377733102 +664 39 model.embedding_dim 2.0 +664 39 optimizer.lr 0.012222197986947141 +664 39 training.batch_size 0.0 +664 39 training.label_smoothing 0.0012703632964140424 +664 40 model.embedding_dim 1.0 +664 40 optimizer.lr 0.005870650684987359 +664 40 training.batch_size 2.0 +664 40 training.label_smoothing 0.15549345676951265 +664 41 model.embedding_dim 1.0 +664 41 optimizer.lr 0.046497122379560156 +664 41 training.batch_size 0.0 +664 41 training.label_smoothing 0.002916794801330985 +664 42 model.embedding_dim 2.0 +664 42 optimizer.lr 0.019373649426776548 +664 42 training.batch_size 0.0 +664 42 training.label_smoothing 0.8955787429991507 +664 43 model.embedding_dim 1.0 +664 43 optimizer.lr 0.0024735840513603674 +664 43 training.batch_size 0.0 +664 43 training.label_smoothing 0.06484354425074874 +664 44 model.embedding_dim 1.0 +664 44 optimizer.lr 0.0017549235431523155 +664 44 training.batch_size 0.0 +664 44 training.label_smoothing 0.35151206713640915 +664 45 model.embedding_dim 2.0 +664 45 optimizer.lr 0.04810352464168616 +664 45 training.batch_size 0.0 +664 45 training.label_smoothing 0.7140333884014604 +664 46 model.embedding_dim 0.0 +664 46 optimizer.lr 0.09383152957426888 +664 46 training.batch_size 0.0 +664 46 training.label_smoothing 0.6012751147828378 +664 47 model.embedding_dim 1.0 +664 47 optimizer.lr 0.07735403190112206 +664 47 training.batch_size 1.0 +664 47 training.label_smoothing 0.5020890545817551 +664 48 model.embedding_dim 1.0 +664 48 optimizer.lr 0.01337576183910873 +664 48 training.batch_size 0.0 +664 48 training.label_smoothing 0.0026046257576530078 +664 49 model.embedding_dim 0.0 +664 49 optimizer.lr 0.014565140670514001 +664 49 training.batch_size 2.0 +664 49 training.label_smoothing 0.8527759444318204 +664 50 model.embedding_dim 0.0 +664 50 optimizer.lr 0.0037398698731258653 +664 50 training.batch_size 2.0 +664 50 training.label_smoothing 0.9634006027025184 +664 51 model.embedding_dim 1.0 +664 51 optimizer.lr 0.03460890923762654 +664 51 training.batch_size 2.0 +664 51 training.label_smoothing 0.07122687692160853 +664 52 model.embedding_dim 2.0 +664 52 optimizer.lr 0.016352515474500638 +664 52 training.batch_size 2.0 +664 52 training.label_smoothing 0.021879557549710316 +664 53 model.embedding_dim 1.0 +664 53 optimizer.lr 0.009244606233898065 +664 53 training.batch_size 2.0 +664 53 training.label_smoothing 0.0017168447459247427 +664 54 model.embedding_dim 1.0 +664 54 optimizer.lr 0.019063152624770607 +664 54 training.batch_size 2.0 +664 54 training.label_smoothing 0.004422172049976876 +664 55 model.embedding_dim 0.0 +664 55 optimizer.lr 0.002928058918766234 +664 55 training.batch_size 2.0 +664 55 training.label_smoothing 0.03997306198170243 +664 56 model.embedding_dim 2.0 +664 56 optimizer.lr 0.028491011794417445 +664 56 training.batch_size 0.0 +664 56 training.label_smoothing 0.9679355410520403 +664 57 model.embedding_dim 0.0 +664 57 optimizer.lr 0.010949615632421942 +664 57 training.batch_size 1.0 +664 57 training.label_smoothing 0.003157141449382127 +664 58 model.embedding_dim 2.0 +664 58 optimizer.lr 0.003534943838324434 +664 58 training.batch_size 2.0 +664 58 training.label_smoothing 0.45339860949526645 +664 59 model.embedding_dim 0.0 +664 59 optimizer.lr 0.001761477984027067 +664 59 training.batch_size 2.0 +664 59 training.label_smoothing 0.9943057203578256 +664 60 model.embedding_dim 2.0 +664 60 optimizer.lr 0.006836860750225865 +664 60 training.batch_size 0.0 +664 60 training.label_smoothing 0.14479414819804542 +664 61 model.embedding_dim 1.0 +664 61 optimizer.lr 0.009875751982240576 +664 61 training.batch_size 0.0 +664 61 training.label_smoothing 0.14379168139171136 +664 62 model.embedding_dim 0.0 +664 62 optimizer.lr 0.029334141933842782 +664 62 training.batch_size 0.0 +664 62 training.label_smoothing 0.1749426546813429 +664 63 model.embedding_dim 1.0 +664 63 optimizer.lr 0.08997167301879312 +664 63 training.batch_size 2.0 +664 63 training.label_smoothing 0.2192871222471241 +664 64 model.embedding_dim 0.0 +664 64 optimizer.lr 0.0015600210164734975 +664 64 training.batch_size 1.0 +664 64 training.label_smoothing 0.0019374843201200686 +664 65 model.embedding_dim 1.0 +664 65 optimizer.lr 0.009194345220181048 +664 65 training.batch_size 1.0 +664 65 training.label_smoothing 0.005292351319680535 +664 66 model.embedding_dim 1.0 +664 66 optimizer.lr 0.009754498907458279 +664 66 training.batch_size 2.0 +664 66 training.label_smoothing 0.14310549449127677 +664 67 model.embedding_dim 2.0 +664 67 optimizer.lr 0.005085272885819569 +664 67 training.batch_size 0.0 +664 67 training.label_smoothing 0.023777975462780605 +664 68 model.embedding_dim 0.0 +664 68 optimizer.lr 0.09986882044261773 +664 68 training.batch_size 0.0 +664 68 training.label_smoothing 0.04381028081426554 +664 69 model.embedding_dim 1.0 +664 69 optimizer.lr 0.013208956075841148 +664 69 training.batch_size 2.0 +664 69 training.label_smoothing 0.011977947321896993 +664 70 model.embedding_dim 0.0 +664 70 optimizer.lr 0.0015108722617255075 +664 70 training.batch_size 0.0 +664 70 training.label_smoothing 0.10084059510269416 +664 71 model.embedding_dim 0.0 +664 71 optimizer.lr 0.0018007093849092357 +664 71 training.batch_size 0.0 +664 71 training.label_smoothing 0.0331921489545696 +664 72 model.embedding_dim 0.0 +664 72 optimizer.lr 0.0011228976773265944 +664 72 training.batch_size 0.0 +664 72 training.label_smoothing 0.01648276939312432 +664 73 model.embedding_dim 1.0 +664 73 optimizer.lr 0.04119324408457091 +664 73 training.batch_size 0.0 +664 73 training.label_smoothing 0.002472206746079105 +664 74 model.embedding_dim 2.0 +664 74 optimizer.lr 0.08809558278579999 +664 74 training.batch_size 1.0 +664 74 training.label_smoothing 0.015154748649651224 +664 75 model.embedding_dim 1.0 +664 75 optimizer.lr 0.0011506842216377877 +664 75 training.batch_size 2.0 +664 75 training.label_smoothing 0.01165872463086141 +664 76 model.embedding_dim 2.0 +664 76 optimizer.lr 0.0010608264140320196 +664 76 training.batch_size 1.0 +664 76 training.label_smoothing 0.030267238938280695 +664 77 model.embedding_dim 2.0 +664 77 optimizer.lr 0.0013806221955401893 +664 77 training.batch_size 1.0 +664 77 training.label_smoothing 0.9297503809339711 +664 78 model.embedding_dim 0.0 +664 78 optimizer.lr 0.00126089710986101 +664 78 training.batch_size 1.0 +664 78 training.label_smoothing 0.004591949381439427 +664 79 model.embedding_dim 0.0 +664 79 optimizer.lr 0.011527815111838525 +664 79 training.batch_size 0.0 +664 79 training.label_smoothing 0.3958447640745878 +664 80 model.embedding_dim 0.0 +664 80 optimizer.lr 0.04077713312382578 +664 80 training.batch_size 1.0 +664 80 training.label_smoothing 0.0024362302133555543 +664 81 model.embedding_dim 2.0 +664 81 optimizer.lr 0.0028773392500518 +664 81 training.batch_size 1.0 +664 81 training.label_smoothing 0.0010058201864228203 +664 82 model.embedding_dim 2.0 +664 82 optimizer.lr 0.03484448960053638 +664 82 training.batch_size 2.0 +664 82 training.label_smoothing 0.0023355415714044685 +664 83 model.embedding_dim 0.0 +664 83 optimizer.lr 0.08092460491714053 +664 83 training.batch_size 0.0 +664 83 training.label_smoothing 0.014538638752616493 +664 84 model.embedding_dim 0.0 +664 84 optimizer.lr 0.09180436448354286 +664 84 training.batch_size 0.0 +664 84 training.label_smoothing 0.01756332376930708 +664 85 model.embedding_dim 0.0 +664 85 optimizer.lr 0.002550573471529766 +664 85 training.batch_size 2.0 +664 85 training.label_smoothing 0.44378504648417527 +664 86 model.embedding_dim 0.0 +664 86 optimizer.lr 0.0048021299660551 +664 86 training.batch_size 0.0 +664 86 training.label_smoothing 0.5454915654641315 +664 87 model.embedding_dim 0.0 +664 87 optimizer.lr 0.003684625764048172 +664 87 training.batch_size 1.0 +664 87 training.label_smoothing 0.4386609527393293 +664 88 model.embedding_dim 2.0 +664 88 optimizer.lr 0.07768390997726397 +664 88 training.batch_size 0.0 +664 88 training.label_smoothing 0.003053638706916101 +664 89 model.embedding_dim 1.0 +664 89 optimizer.lr 0.01934155853656411 +664 89 training.batch_size 1.0 +664 89 training.label_smoothing 0.46065762379779907 +664 90 model.embedding_dim 2.0 +664 90 optimizer.lr 0.03600597566143717 +664 90 training.batch_size 1.0 +664 90 training.label_smoothing 0.020931291961725475 +664 91 model.embedding_dim 2.0 +664 91 optimizer.lr 0.013311585240119682 +664 91 training.batch_size 1.0 +664 91 training.label_smoothing 0.9851061509796205 +664 92 model.embedding_dim 1.0 +664 92 optimizer.lr 0.04069663539519758 +664 92 training.batch_size 2.0 +664 92 training.label_smoothing 0.20581676135752414 +664 93 model.embedding_dim 0.0 +664 93 optimizer.lr 0.05332464220867806 +664 93 training.batch_size 2.0 +664 93 training.label_smoothing 0.006265085897153603 +664 94 model.embedding_dim 2.0 +664 94 optimizer.lr 0.018213935252841207 +664 94 training.batch_size 2.0 +664 94 training.label_smoothing 0.18487241094271029 +664 95 model.embedding_dim 2.0 +664 95 optimizer.lr 0.019117881874198453 +664 95 training.batch_size 0.0 +664 95 training.label_smoothing 0.007066210003751025 +664 96 model.embedding_dim 0.0 +664 96 optimizer.lr 0.08223861629456396 +664 96 training.batch_size 2.0 +664 96 training.label_smoothing 0.1844175242317655 +664 97 model.embedding_dim 1.0 +664 97 optimizer.lr 0.06269703193837564 +664 97 training.batch_size 1.0 +664 97 training.label_smoothing 0.4493364458924122 +664 98 model.embedding_dim 2.0 +664 98 optimizer.lr 0.08243452004038254 +664 98 training.batch_size 0.0 +664 98 training.label_smoothing 0.26530622957269884 +664 99 model.embedding_dim 0.0 +664 99 optimizer.lr 0.06951008173065713 +664 99 training.batch_size 2.0 +664 99 training.label_smoothing 0.060987371612839944 +664 100 model.embedding_dim 0.0 +664 100 optimizer.lr 0.0015358445661804159 +664 100 training.batch_size 0.0 +664 100 training.label_smoothing 0.0039868234897300955 +664 1 dataset """kinships""" +664 1 model """simple""" +664 1 loss """softplus""" +664 1 regularizer """no""" +664 1 optimizer """adam""" +664 1 training_loop """lcwa""" +664 1 evaluator """rankbased""" +664 2 dataset """kinships""" +664 2 model """simple""" +664 2 loss """softplus""" +664 2 regularizer """no""" +664 2 optimizer """adam""" +664 2 training_loop """lcwa""" +664 2 evaluator """rankbased""" +664 3 dataset """kinships""" +664 3 model """simple""" +664 3 loss """softplus""" +664 3 regularizer """no""" +664 3 optimizer """adam""" +664 3 training_loop """lcwa""" +664 3 evaluator """rankbased""" +664 4 dataset """kinships""" +664 4 model """simple""" +664 4 loss """softplus""" +664 4 regularizer """no""" +664 4 optimizer """adam""" +664 4 training_loop """lcwa""" +664 4 evaluator """rankbased""" +664 5 dataset """kinships""" +664 5 model """simple""" +664 5 loss """softplus""" +664 5 regularizer """no""" +664 5 optimizer """adam""" +664 5 training_loop """lcwa""" +664 5 evaluator """rankbased""" +664 6 dataset """kinships""" +664 6 model """simple""" +664 6 loss """softplus""" +664 6 regularizer """no""" +664 6 optimizer """adam""" +664 6 training_loop """lcwa""" +664 6 evaluator """rankbased""" +664 7 dataset """kinships""" +664 7 model """simple""" +664 7 loss """softplus""" +664 7 regularizer """no""" +664 7 optimizer """adam""" +664 7 training_loop """lcwa""" +664 7 evaluator """rankbased""" +664 8 dataset """kinships""" +664 8 model """simple""" +664 8 loss """softplus""" +664 8 regularizer """no""" +664 8 optimizer """adam""" +664 8 training_loop """lcwa""" +664 8 evaluator """rankbased""" +664 9 dataset """kinships""" +664 9 model """simple""" +664 9 loss """softplus""" +664 9 regularizer """no""" +664 9 optimizer """adam""" +664 9 training_loop """lcwa""" +664 9 evaluator """rankbased""" +664 10 dataset """kinships""" +664 10 model """simple""" +664 10 loss """softplus""" +664 10 regularizer """no""" +664 10 optimizer """adam""" +664 10 training_loop """lcwa""" +664 10 evaluator """rankbased""" +664 11 dataset """kinships""" +664 11 model """simple""" +664 11 loss """softplus""" +664 11 regularizer """no""" +664 11 optimizer """adam""" +664 11 training_loop """lcwa""" +664 11 evaluator """rankbased""" +664 12 dataset """kinships""" +664 12 model """simple""" +664 12 loss """softplus""" +664 12 regularizer """no""" +664 12 optimizer """adam""" +664 12 training_loop """lcwa""" +664 12 evaluator """rankbased""" +664 13 dataset """kinships""" +664 13 model """simple""" +664 13 loss """softplus""" +664 13 regularizer """no""" +664 13 optimizer """adam""" +664 13 training_loop """lcwa""" +664 13 evaluator """rankbased""" +664 14 dataset """kinships""" +664 14 model """simple""" +664 14 loss """softplus""" +664 14 regularizer """no""" +664 14 optimizer """adam""" +664 14 training_loop """lcwa""" +664 14 evaluator """rankbased""" +664 15 dataset """kinships""" +664 15 model """simple""" +664 15 loss """softplus""" +664 15 regularizer """no""" +664 15 optimizer """adam""" +664 15 training_loop """lcwa""" +664 15 evaluator """rankbased""" +664 16 dataset """kinships""" +664 16 model """simple""" +664 16 loss """softplus""" +664 16 regularizer """no""" +664 16 optimizer """adam""" +664 16 training_loop """lcwa""" +664 16 evaluator """rankbased""" +664 17 dataset """kinships""" +664 17 model """simple""" +664 17 loss """softplus""" +664 17 regularizer """no""" +664 17 optimizer """adam""" +664 17 training_loop """lcwa""" +664 17 evaluator """rankbased""" +664 18 dataset """kinships""" +664 18 model """simple""" +664 18 loss """softplus""" +664 18 regularizer """no""" +664 18 optimizer """adam""" +664 18 training_loop """lcwa""" +664 18 evaluator """rankbased""" +664 19 dataset """kinships""" +664 19 model """simple""" +664 19 loss """softplus""" +664 19 regularizer """no""" +664 19 optimizer """adam""" +664 19 training_loop """lcwa""" +664 19 evaluator """rankbased""" +664 20 dataset """kinships""" +664 20 model """simple""" +664 20 loss """softplus""" +664 20 regularizer """no""" +664 20 optimizer """adam""" +664 20 training_loop """lcwa""" +664 20 evaluator """rankbased""" +664 21 dataset """kinships""" +664 21 model """simple""" +664 21 loss """softplus""" +664 21 regularizer """no""" +664 21 optimizer """adam""" +664 21 training_loop """lcwa""" +664 21 evaluator """rankbased""" +664 22 dataset """kinships""" +664 22 model """simple""" +664 22 loss """softplus""" +664 22 regularizer """no""" +664 22 optimizer """adam""" +664 22 training_loop """lcwa""" +664 22 evaluator """rankbased""" +664 23 dataset """kinships""" +664 23 model """simple""" +664 23 loss """softplus""" +664 23 regularizer """no""" +664 23 optimizer """adam""" +664 23 training_loop """lcwa""" +664 23 evaluator """rankbased""" +664 24 dataset """kinships""" +664 24 model """simple""" +664 24 loss """softplus""" +664 24 regularizer """no""" +664 24 optimizer """adam""" +664 24 training_loop """lcwa""" +664 24 evaluator """rankbased""" +664 25 dataset """kinships""" +664 25 model """simple""" +664 25 loss """softplus""" +664 25 regularizer """no""" +664 25 optimizer """adam""" +664 25 training_loop """lcwa""" +664 25 evaluator """rankbased""" +664 26 dataset """kinships""" +664 26 model """simple""" +664 26 loss """softplus""" +664 26 regularizer """no""" +664 26 optimizer """adam""" +664 26 training_loop """lcwa""" +664 26 evaluator """rankbased""" +664 27 dataset """kinships""" +664 27 model """simple""" +664 27 loss """softplus""" +664 27 regularizer """no""" +664 27 optimizer """adam""" +664 27 training_loop """lcwa""" +664 27 evaluator """rankbased""" +664 28 dataset """kinships""" +664 28 model """simple""" +664 28 loss """softplus""" +664 28 regularizer """no""" +664 28 optimizer """adam""" +664 28 training_loop """lcwa""" +664 28 evaluator """rankbased""" +664 29 dataset """kinships""" +664 29 model """simple""" +664 29 loss """softplus""" +664 29 regularizer """no""" +664 29 optimizer """adam""" +664 29 training_loop """lcwa""" +664 29 evaluator """rankbased""" +664 30 dataset """kinships""" +664 30 model """simple""" +664 30 loss """softplus""" +664 30 regularizer """no""" +664 30 optimizer """adam""" +664 30 training_loop """lcwa""" +664 30 evaluator """rankbased""" +664 31 dataset """kinships""" +664 31 model """simple""" +664 31 loss """softplus""" +664 31 regularizer """no""" +664 31 optimizer """adam""" +664 31 training_loop """lcwa""" +664 31 evaluator """rankbased""" +664 32 dataset """kinships""" +664 32 model """simple""" +664 32 loss """softplus""" +664 32 regularizer """no""" +664 32 optimizer """adam""" +664 32 training_loop """lcwa""" +664 32 evaluator """rankbased""" +664 33 dataset """kinships""" +664 33 model """simple""" +664 33 loss """softplus""" +664 33 regularizer """no""" +664 33 optimizer """adam""" +664 33 training_loop """lcwa""" +664 33 evaluator """rankbased""" +664 34 dataset """kinships""" +664 34 model """simple""" +664 34 loss """softplus""" +664 34 regularizer """no""" +664 34 optimizer """adam""" +664 34 training_loop """lcwa""" +664 34 evaluator """rankbased""" +664 35 dataset """kinships""" +664 35 model """simple""" +664 35 loss """softplus""" +664 35 regularizer """no""" +664 35 optimizer """adam""" +664 35 training_loop """lcwa""" +664 35 evaluator """rankbased""" +664 36 dataset """kinships""" +664 36 model """simple""" +664 36 loss """softplus""" +664 36 regularizer """no""" +664 36 optimizer """adam""" +664 36 training_loop """lcwa""" +664 36 evaluator """rankbased""" +664 37 dataset """kinships""" +664 37 model """simple""" +664 37 loss """softplus""" +664 37 regularizer """no""" +664 37 optimizer """adam""" +664 37 training_loop """lcwa""" +664 37 evaluator """rankbased""" +664 38 dataset """kinships""" +664 38 model """simple""" +664 38 loss """softplus""" +664 38 regularizer """no""" +664 38 optimizer """adam""" +664 38 training_loop """lcwa""" +664 38 evaluator """rankbased""" +664 39 dataset """kinships""" +664 39 model """simple""" +664 39 loss """softplus""" +664 39 regularizer """no""" +664 39 optimizer """adam""" +664 39 training_loop """lcwa""" +664 39 evaluator """rankbased""" +664 40 dataset """kinships""" +664 40 model """simple""" +664 40 loss """softplus""" +664 40 regularizer """no""" +664 40 optimizer """adam""" +664 40 training_loop """lcwa""" +664 40 evaluator """rankbased""" +664 41 dataset """kinships""" +664 41 model """simple""" +664 41 loss """softplus""" +664 41 regularizer """no""" +664 41 optimizer """adam""" +664 41 training_loop """lcwa""" +664 41 evaluator """rankbased""" +664 42 dataset """kinships""" +664 42 model """simple""" +664 42 loss """softplus""" +664 42 regularizer """no""" +664 42 optimizer """adam""" +664 42 training_loop """lcwa""" +664 42 evaluator """rankbased""" +664 43 dataset """kinships""" +664 43 model """simple""" +664 43 loss """softplus""" +664 43 regularizer """no""" +664 43 optimizer """adam""" +664 43 training_loop """lcwa""" +664 43 evaluator """rankbased""" +664 44 dataset """kinships""" +664 44 model """simple""" +664 44 loss """softplus""" +664 44 regularizer """no""" +664 44 optimizer """adam""" +664 44 training_loop """lcwa""" +664 44 evaluator """rankbased""" +664 45 dataset """kinships""" +664 45 model """simple""" +664 45 loss """softplus""" +664 45 regularizer """no""" +664 45 optimizer """adam""" +664 45 training_loop """lcwa""" +664 45 evaluator """rankbased""" +664 46 dataset """kinships""" +664 46 model """simple""" +664 46 loss """softplus""" +664 46 regularizer """no""" +664 46 optimizer """adam""" +664 46 training_loop """lcwa""" +664 46 evaluator """rankbased""" +664 47 dataset """kinships""" +664 47 model """simple""" +664 47 loss """softplus""" +664 47 regularizer """no""" +664 47 optimizer """adam""" +664 47 training_loop """lcwa""" +664 47 evaluator """rankbased""" +664 48 dataset """kinships""" +664 48 model """simple""" +664 48 loss """softplus""" +664 48 regularizer """no""" +664 48 optimizer """adam""" +664 48 training_loop """lcwa""" +664 48 evaluator """rankbased""" +664 49 dataset """kinships""" +664 49 model """simple""" +664 49 loss """softplus""" +664 49 regularizer """no""" +664 49 optimizer """adam""" +664 49 training_loop """lcwa""" +664 49 evaluator """rankbased""" +664 50 dataset """kinships""" +664 50 model """simple""" +664 50 loss """softplus""" +664 50 regularizer """no""" +664 50 optimizer """adam""" +664 50 training_loop """lcwa""" +664 50 evaluator """rankbased""" +664 51 dataset """kinships""" +664 51 model """simple""" +664 51 loss """softplus""" +664 51 regularizer """no""" +664 51 optimizer """adam""" +664 51 training_loop """lcwa""" +664 51 evaluator """rankbased""" +664 52 dataset """kinships""" +664 52 model """simple""" +664 52 loss """softplus""" +664 52 regularizer """no""" +664 52 optimizer """adam""" +664 52 training_loop """lcwa""" +664 52 evaluator """rankbased""" +664 53 dataset """kinships""" +664 53 model """simple""" +664 53 loss """softplus""" +664 53 regularizer """no""" +664 53 optimizer """adam""" +664 53 training_loop """lcwa""" +664 53 evaluator """rankbased""" +664 54 dataset """kinships""" +664 54 model """simple""" +664 54 loss """softplus""" +664 54 regularizer """no""" +664 54 optimizer """adam""" +664 54 training_loop """lcwa""" +664 54 evaluator """rankbased""" +664 55 dataset """kinships""" +664 55 model """simple""" +664 55 loss """softplus""" +664 55 regularizer """no""" +664 55 optimizer """adam""" +664 55 training_loop """lcwa""" +664 55 evaluator """rankbased""" +664 56 dataset """kinships""" +664 56 model """simple""" +664 56 loss """softplus""" +664 56 regularizer """no""" +664 56 optimizer """adam""" +664 56 training_loop """lcwa""" +664 56 evaluator """rankbased""" +664 57 dataset """kinships""" +664 57 model """simple""" +664 57 loss """softplus""" +664 57 regularizer """no""" +664 57 optimizer """adam""" +664 57 training_loop """lcwa""" +664 57 evaluator """rankbased""" +664 58 dataset """kinships""" +664 58 model """simple""" +664 58 loss """softplus""" +664 58 regularizer """no""" +664 58 optimizer """adam""" +664 58 training_loop """lcwa""" +664 58 evaluator """rankbased""" +664 59 dataset """kinships""" +664 59 model """simple""" +664 59 loss """softplus""" +664 59 regularizer """no""" +664 59 optimizer """adam""" +664 59 training_loop """lcwa""" +664 59 evaluator """rankbased""" +664 60 dataset """kinships""" +664 60 model """simple""" +664 60 loss """softplus""" +664 60 regularizer """no""" +664 60 optimizer """adam""" +664 60 training_loop """lcwa""" +664 60 evaluator """rankbased""" +664 61 dataset """kinships""" +664 61 model """simple""" +664 61 loss """softplus""" +664 61 regularizer """no""" +664 61 optimizer """adam""" +664 61 training_loop """lcwa""" +664 61 evaluator """rankbased""" +664 62 dataset """kinships""" +664 62 model """simple""" +664 62 loss """softplus""" +664 62 regularizer """no""" +664 62 optimizer """adam""" +664 62 training_loop """lcwa""" +664 62 evaluator """rankbased""" +664 63 dataset """kinships""" +664 63 model """simple""" +664 63 loss """softplus""" +664 63 regularizer """no""" +664 63 optimizer """adam""" +664 63 training_loop """lcwa""" +664 63 evaluator """rankbased""" +664 64 dataset """kinships""" +664 64 model """simple""" +664 64 loss """softplus""" +664 64 regularizer """no""" +664 64 optimizer """adam""" +664 64 training_loop """lcwa""" +664 64 evaluator """rankbased""" +664 65 dataset """kinships""" +664 65 model """simple""" +664 65 loss """softplus""" +664 65 regularizer """no""" +664 65 optimizer """adam""" +664 65 training_loop """lcwa""" +664 65 evaluator """rankbased""" +664 66 dataset """kinships""" +664 66 model """simple""" +664 66 loss """softplus""" +664 66 regularizer """no""" +664 66 optimizer """adam""" +664 66 training_loop """lcwa""" +664 66 evaluator """rankbased""" +664 67 dataset """kinships""" +664 67 model """simple""" +664 67 loss """softplus""" +664 67 regularizer """no""" +664 67 optimizer """adam""" +664 67 training_loop """lcwa""" +664 67 evaluator """rankbased""" +664 68 dataset """kinships""" +664 68 model """simple""" +664 68 loss """softplus""" +664 68 regularizer """no""" +664 68 optimizer """adam""" +664 68 training_loop """lcwa""" +664 68 evaluator """rankbased""" +664 69 dataset """kinships""" +664 69 model """simple""" +664 69 loss """softplus""" +664 69 regularizer """no""" +664 69 optimizer """adam""" +664 69 training_loop """lcwa""" +664 69 evaluator """rankbased""" +664 70 dataset """kinships""" +664 70 model """simple""" +664 70 loss """softplus""" +664 70 regularizer """no""" +664 70 optimizer """adam""" +664 70 training_loop """lcwa""" +664 70 evaluator """rankbased""" +664 71 dataset """kinships""" +664 71 model """simple""" +664 71 loss """softplus""" +664 71 regularizer """no""" +664 71 optimizer """adam""" +664 71 training_loop """lcwa""" +664 71 evaluator """rankbased""" +664 72 dataset """kinships""" +664 72 model """simple""" +664 72 loss """softplus""" +664 72 regularizer """no""" +664 72 optimizer """adam""" +664 72 training_loop """lcwa""" +664 72 evaluator """rankbased""" +664 73 dataset """kinships""" +664 73 model """simple""" +664 73 loss """softplus""" +664 73 regularizer """no""" +664 73 optimizer """adam""" +664 73 training_loop """lcwa""" +664 73 evaluator """rankbased""" +664 74 dataset """kinships""" +664 74 model """simple""" +664 74 loss """softplus""" +664 74 regularizer """no""" +664 74 optimizer """adam""" +664 74 training_loop """lcwa""" +664 74 evaluator """rankbased""" +664 75 dataset """kinships""" +664 75 model """simple""" +664 75 loss """softplus""" +664 75 regularizer """no""" +664 75 optimizer """adam""" +664 75 training_loop """lcwa""" +664 75 evaluator """rankbased""" +664 76 dataset """kinships""" +664 76 model """simple""" +664 76 loss """softplus""" +664 76 regularizer """no""" +664 76 optimizer """adam""" +664 76 training_loop """lcwa""" +664 76 evaluator """rankbased""" +664 77 dataset """kinships""" +664 77 model """simple""" +664 77 loss """softplus""" +664 77 regularizer """no""" +664 77 optimizer """adam""" +664 77 training_loop """lcwa""" +664 77 evaluator """rankbased""" +664 78 dataset """kinships""" +664 78 model """simple""" +664 78 loss """softplus""" +664 78 regularizer """no""" +664 78 optimizer """adam""" +664 78 training_loop """lcwa""" +664 78 evaluator """rankbased""" +664 79 dataset """kinships""" +664 79 model """simple""" +664 79 loss """softplus""" +664 79 regularizer """no""" +664 79 optimizer """adam""" +664 79 training_loop """lcwa""" +664 79 evaluator """rankbased""" +664 80 dataset """kinships""" +664 80 model """simple""" +664 80 loss """softplus""" +664 80 regularizer """no""" +664 80 optimizer """adam""" +664 80 training_loop """lcwa""" +664 80 evaluator """rankbased""" +664 81 dataset """kinships""" +664 81 model """simple""" +664 81 loss """softplus""" +664 81 regularizer """no""" +664 81 optimizer """adam""" +664 81 training_loop """lcwa""" +664 81 evaluator """rankbased""" +664 82 dataset """kinships""" +664 82 model """simple""" +664 82 loss """softplus""" +664 82 regularizer """no""" +664 82 optimizer """adam""" +664 82 training_loop """lcwa""" +664 82 evaluator """rankbased""" +664 83 dataset """kinships""" +664 83 model """simple""" +664 83 loss """softplus""" +664 83 regularizer """no""" +664 83 optimizer """adam""" +664 83 training_loop """lcwa""" +664 83 evaluator """rankbased""" +664 84 dataset """kinships""" +664 84 model """simple""" +664 84 loss """softplus""" +664 84 regularizer """no""" +664 84 optimizer """adam""" +664 84 training_loop """lcwa""" +664 84 evaluator """rankbased""" +664 85 dataset """kinships""" +664 85 model """simple""" +664 85 loss """softplus""" +664 85 regularizer """no""" +664 85 optimizer """adam""" +664 85 training_loop """lcwa""" +664 85 evaluator """rankbased""" +664 86 dataset """kinships""" +664 86 model """simple""" +664 86 loss """softplus""" +664 86 regularizer """no""" +664 86 optimizer """adam""" +664 86 training_loop """lcwa""" +664 86 evaluator """rankbased""" +664 87 dataset """kinships""" +664 87 model """simple""" +664 87 loss """softplus""" +664 87 regularizer """no""" +664 87 optimizer """adam""" +664 87 training_loop """lcwa""" +664 87 evaluator """rankbased""" +664 88 dataset """kinships""" +664 88 model """simple""" +664 88 loss """softplus""" +664 88 regularizer """no""" +664 88 optimizer """adam""" +664 88 training_loop """lcwa""" +664 88 evaluator """rankbased""" +664 89 dataset """kinships""" +664 89 model """simple""" +664 89 loss """softplus""" +664 89 regularizer """no""" +664 89 optimizer """adam""" +664 89 training_loop """lcwa""" +664 89 evaluator """rankbased""" +664 90 dataset """kinships""" +664 90 model """simple""" +664 90 loss """softplus""" +664 90 regularizer """no""" +664 90 optimizer """adam""" +664 90 training_loop """lcwa""" +664 90 evaluator """rankbased""" +664 91 dataset """kinships""" +664 91 model """simple""" +664 91 loss """softplus""" +664 91 regularizer """no""" +664 91 optimizer """adam""" +664 91 training_loop """lcwa""" +664 91 evaluator """rankbased""" +664 92 dataset """kinships""" +664 92 model """simple""" +664 92 loss """softplus""" +664 92 regularizer """no""" +664 92 optimizer """adam""" +664 92 training_loop """lcwa""" +664 92 evaluator """rankbased""" +664 93 dataset """kinships""" +664 93 model """simple""" +664 93 loss """softplus""" +664 93 regularizer """no""" +664 93 optimizer """adam""" +664 93 training_loop """lcwa""" +664 93 evaluator """rankbased""" +664 94 dataset """kinships""" +664 94 model """simple""" +664 94 loss """softplus""" +664 94 regularizer """no""" +664 94 optimizer """adam""" +664 94 training_loop """lcwa""" +664 94 evaluator """rankbased""" +664 95 dataset """kinships""" +664 95 model """simple""" +664 95 loss """softplus""" +664 95 regularizer """no""" +664 95 optimizer """adam""" +664 95 training_loop """lcwa""" +664 95 evaluator """rankbased""" +664 96 dataset """kinships""" +664 96 model """simple""" +664 96 loss """softplus""" +664 96 regularizer """no""" +664 96 optimizer """adam""" +664 96 training_loop """lcwa""" +664 96 evaluator """rankbased""" +664 97 dataset """kinships""" +664 97 model """simple""" +664 97 loss """softplus""" +664 97 regularizer """no""" +664 97 optimizer """adam""" +664 97 training_loop """lcwa""" +664 97 evaluator """rankbased""" +664 98 dataset """kinships""" +664 98 model """simple""" +664 98 loss """softplus""" +664 98 regularizer """no""" +664 98 optimizer """adam""" +664 98 training_loop """lcwa""" +664 98 evaluator """rankbased""" +664 99 dataset """kinships""" +664 99 model """simple""" +664 99 loss """softplus""" +664 99 regularizer """no""" +664 99 optimizer """adam""" +664 99 training_loop """lcwa""" +664 99 evaluator """rankbased""" +664 100 dataset """kinships""" +664 100 model """simple""" +664 100 loss """softplus""" +664 100 regularizer """no""" +664 100 optimizer """adam""" +664 100 training_loop """lcwa""" +664 100 evaluator """rankbased""" +665 1 model.embedding_dim 2.0 +665 1 optimizer.lr 0.07671544053194738 +665 1 training.batch_size 1.0 +665 1 training.label_smoothing 0.010821472064935543 +665 2 model.embedding_dim 0.0 +665 2 optimizer.lr 0.06913993111570219 +665 2 training.batch_size 0.0 +665 2 training.label_smoothing 0.015663853704289837 +665 3 model.embedding_dim 0.0 +665 3 optimizer.lr 0.0016606200178079257 +665 3 training.batch_size 2.0 +665 3 training.label_smoothing 0.2956303813494435 +665 4 model.embedding_dim 0.0 +665 4 optimizer.lr 0.04558310627386559 +665 4 training.batch_size 0.0 +665 4 training.label_smoothing 0.11976669037811191 +665 5 model.embedding_dim 1.0 +665 5 optimizer.lr 0.0022046663069561043 +665 5 training.batch_size 2.0 +665 5 training.label_smoothing 0.01842115103417079 +665 6 model.embedding_dim 2.0 +665 6 optimizer.lr 0.018777523462360134 +665 6 training.batch_size 0.0 +665 6 training.label_smoothing 0.4040519780987585 +665 7 model.embedding_dim 0.0 +665 7 optimizer.lr 0.0016288733362293274 +665 7 training.batch_size 1.0 +665 7 training.label_smoothing 0.5984472550438236 +665 8 model.embedding_dim 2.0 +665 8 optimizer.lr 0.00243633572488156 +665 8 training.batch_size 0.0 +665 8 training.label_smoothing 0.12104804518024886 +665 9 model.embedding_dim 2.0 +665 9 optimizer.lr 0.0027045460219767884 +665 9 training.batch_size 2.0 +665 9 training.label_smoothing 0.3987740165716214 +665 10 model.embedding_dim 2.0 +665 10 optimizer.lr 0.005882759634721619 +665 10 training.batch_size 1.0 +665 10 training.label_smoothing 0.4249269926561269 +665 11 model.embedding_dim 0.0 +665 11 optimizer.lr 0.008446433027797063 +665 11 training.batch_size 1.0 +665 11 training.label_smoothing 0.0012541059054340063 +665 12 model.embedding_dim 1.0 +665 12 optimizer.lr 0.016599754454259433 +665 12 training.batch_size 2.0 +665 12 training.label_smoothing 0.20520970684759246 +665 13 model.embedding_dim 1.0 +665 13 optimizer.lr 0.03815550509345833 +665 13 training.batch_size 1.0 +665 13 training.label_smoothing 0.28243435094948666 +665 14 model.embedding_dim 1.0 +665 14 optimizer.lr 0.0018798912047098095 +665 14 training.batch_size 2.0 +665 14 training.label_smoothing 0.001457070128879076 +665 15 model.embedding_dim 0.0 +665 15 optimizer.lr 0.004023683641617266 +665 15 training.batch_size 2.0 +665 15 training.label_smoothing 0.5978323424808272 +665 16 model.embedding_dim 2.0 +665 16 optimizer.lr 0.0755441900735481 +665 16 training.batch_size 2.0 +665 16 training.label_smoothing 0.029249869825446945 +665 17 model.embedding_dim 0.0 +665 17 optimizer.lr 0.04067481541878522 +665 17 training.batch_size 0.0 +665 17 training.label_smoothing 0.13065561786366967 +665 18 model.embedding_dim 1.0 +665 18 optimizer.lr 0.0030556722898744076 +665 18 training.batch_size 0.0 +665 18 training.label_smoothing 0.028285728890195094 +665 19 model.embedding_dim 0.0 +665 19 optimizer.lr 0.032425567962103365 +665 19 training.batch_size 0.0 +665 19 training.label_smoothing 0.15330480119853243 +665 20 model.embedding_dim 0.0 +665 20 optimizer.lr 0.0010008476312891148 +665 20 training.batch_size 2.0 +665 20 training.label_smoothing 0.020062493765476364 +665 21 model.embedding_dim 0.0 +665 21 optimizer.lr 0.0011905742554516906 +665 21 training.batch_size 0.0 +665 21 training.label_smoothing 0.013144659021452872 +665 22 model.embedding_dim 0.0 +665 22 optimizer.lr 0.0024662478905366084 +665 22 training.batch_size 0.0 +665 22 training.label_smoothing 0.0015802795379303105 +665 23 model.embedding_dim 1.0 +665 23 optimizer.lr 0.0017056041403171122 +665 23 training.batch_size 1.0 +665 23 training.label_smoothing 0.009486205554120795 +665 24 model.embedding_dim 2.0 +665 24 optimizer.lr 0.08209511977710611 +665 24 training.batch_size 0.0 +665 24 training.label_smoothing 0.002928710836577268 +665 25 model.embedding_dim 2.0 +665 25 optimizer.lr 0.0072425369869094135 +665 25 training.batch_size 2.0 +665 25 training.label_smoothing 0.003858195631339062 +665 26 model.embedding_dim 2.0 +665 26 optimizer.lr 0.004754729027735724 +665 26 training.batch_size 0.0 +665 26 training.label_smoothing 0.16775426303793492 +665 27 model.embedding_dim 2.0 +665 27 optimizer.lr 0.04304907598508776 +665 27 training.batch_size 0.0 +665 27 training.label_smoothing 0.011222934944144551 +665 28 model.embedding_dim 0.0 +665 28 optimizer.lr 0.011522232700513373 +665 28 training.batch_size 0.0 +665 28 training.label_smoothing 0.06588550555612141 +665 29 model.embedding_dim 2.0 +665 29 optimizer.lr 0.045422310245480595 +665 29 training.batch_size 0.0 +665 29 training.label_smoothing 0.7731584128352506 +665 30 model.embedding_dim 1.0 +665 30 optimizer.lr 0.03254854428005289 +665 30 training.batch_size 1.0 +665 30 training.label_smoothing 0.015996517525689348 +665 31 model.embedding_dim 1.0 +665 31 optimizer.lr 0.08199209742316572 +665 31 training.batch_size 0.0 +665 31 training.label_smoothing 0.3408699380829457 +665 32 model.embedding_dim 0.0 +665 32 optimizer.lr 0.0029578616282910134 +665 32 training.batch_size 0.0 +665 32 training.label_smoothing 0.052761465691744676 +665 33 model.embedding_dim 2.0 +665 33 optimizer.lr 0.0032902512581352264 +665 33 training.batch_size 0.0 +665 33 training.label_smoothing 0.07291839004542731 +665 34 model.embedding_dim 0.0 +665 34 optimizer.lr 0.039555354555284736 +665 34 training.batch_size 1.0 +665 34 training.label_smoothing 0.06997927613125465 +665 35 model.embedding_dim 0.0 +665 35 optimizer.lr 0.09810166375074628 +665 35 training.batch_size 2.0 +665 35 training.label_smoothing 0.048207355324555515 +665 36 model.embedding_dim 1.0 +665 36 optimizer.lr 0.0040767013878891065 +665 36 training.batch_size 1.0 +665 36 training.label_smoothing 0.3651491182500753 +665 37 model.embedding_dim 0.0 +665 37 optimizer.lr 0.01385329619750632 +665 37 training.batch_size 1.0 +665 37 training.label_smoothing 0.07438624650895773 +665 38 model.embedding_dim 1.0 +665 38 optimizer.lr 0.0022391736524377836 +665 38 training.batch_size 1.0 +665 38 training.label_smoothing 0.0015248683300110918 +665 39 model.embedding_dim 2.0 +665 39 optimizer.lr 0.004061820626613721 +665 39 training.batch_size 1.0 +665 39 training.label_smoothing 0.04764304180132932 +665 40 model.embedding_dim 2.0 +665 40 optimizer.lr 0.012075837932876058 +665 40 training.batch_size 0.0 +665 40 training.label_smoothing 0.6584720742528509 +665 41 model.embedding_dim 1.0 +665 41 optimizer.lr 0.0034026648925318216 +665 41 training.batch_size 1.0 +665 41 training.label_smoothing 0.0073172962954388306 +665 42 model.embedding_dim 2.0 +665 42 optimizer.lr 0.004913647981569282 +665 42 training.batch_size 1.0 +665 42 training.label_smoothing 0.5814672648268739 +665 43 model.embedding_dim 2.0 +665 43 optimizer.lr 0.0639838060944714 +665 43 training.batch_size 0.0 +665 43 training.label_smoothing 0.15504274580143468 +665 44 model.embedding_dim 2.0 +665 44 optimizer.lr 0.011083367171445816 +665 44 training.batch_size 1.0 +665 44 training.label_smoothing 0.02720264180224442 +665 45 model.embedding_dim 1.0 +665 45 optimizer.lr 0.02771614428674209 +665 45 training.batch_size 1.0 +665 45 training.label_smoothing 0.0023025885516108485 +665 46 model.embedding_dim 0.0 +665 46 optimizer.lr 0.0013019902476871258 +665 46 training.batch_size 2.0 +665 46 training.label_smoothing 0.283914229318987 +665 47 model.embedding_dim 1.0 +665 47 optimizer.lr 0.08508069726213896 +665 47 training.batch_size 0.0 +665 47 training.label_smoothing 0.18624948407570863 +665 48 model.embedding_dim 2.0 +665 48 optimizer.lr 0.012266961553982785 +665 48 training.batch_size 0.0 +665 48 training.label_smoothing 0.005224068293993902 +665 49 model.embedding_dim 1.0 +665 49 optimizer.lr 0.08901677206105515 +665 49 training.batch_size 1.0 +665 49 training.label_smoothing 0.4067438964335179 +665 50 model.embedding_dim 0.0 +665 50 optimizer.lr 0.0012011623002054076 +665 50 training.batch_size 2.0 +665 50 training.label_smoothing 0.013378236213304898 +665 51 model.embedding_dim 2.0 +665 51 optimizer.lr 0.06968852793985106 +665 51 training.batch_size 1.0 +665 51 training.label_smoothing 0.060923763172911635 +665 52 model.embedding_dim 1.0 +665 52 optimizer.lr 0.0013772548695776647 +665 52 training.batch_size 2.0 +665 52 training.label_smoothing 0.41700742269025287 +665 53 model.embedding_dim 2.0 +665 53 optimizer.lr 0.0012558613728513876 +665 53 training.batch_size 1.0 +665 53 training.label_smoothing 0.0015205254769924369 +665 54 model.embedding_dim 0.0 +665 54 optimizer.lr 0.007774513699960807 +665 54 training.batch_size 0.0 +665 54 training.label_smoothing 0.0043557229092907445 +665 55 model.embedding_dim 0.0 +665 55 optimizer.lr 0.007472040755573566 +665 55 training.batch_size 1.0 +665 55 training.label_smoothing 0.7993923965211868 +665 56 model.embedding_dim 0.0 +665 56 optimizer.lr 0.0035842892309410203 +665 56 training.batch_size 2.0 +665 56 training.label_smoothing 0.2200031357033905 +665 57 model.embedding_dim 0.0 +665 57 optimizer.lr 0.0012190924651044137 +665 57 training.batch_size 2.0 +665 57 training.label_smoothing 0.06855101419005674 +665 58 model.embedding_dim 1.0 +665 58 optimizer.lr 0.09771239168557384 +665 58 training.batch_size 2.0 +665 58 training.label_smoothing 0.45103849157509174 +665 59 model.embedding_dim 0.0 +665 59 optimizer.lr 0.014729896909769743 +665 59 training.batch_size 1.0 +665 59 training.label_smoothing 0.02304637279575116 +665 60 model.embedding_dim 2.0 +665 60 optimizer.lr 0.002871639835267266 +665 60 training.batch_size 1.0 +665 60 training.label_smoothing 0.02038192722740563 +665 61 model.embedding_dim 1.0 +665 61 optimizer.lr 0.015797504125697172 +665 61 training.batch_size 2.0 +665 61 training.label_smoothing 0.005399770201809689 +665 62 model.embedding_dim 0.0 +665 62 optimizer.lr 0.02767998485722872 +665 62 training.batch_size 0.0 +665 62 training.label_smoothing 0.010716405284946762 +665 63 model.embedding_dim 2.0 +665 63 optimizer.lr 0.03789453623360727 +665 63 training.batch_size 0.0 +665 63 training.label_smoothing 0.7487382839336827 +665 64 model.embedding_dim 2.0 +665 64 optimizer.lr 0.01261706496046246 +665 64 training.batch_size 0.0 +665 64 training.label_smoothing 0.001616157829520993 +665 65 model.embedding_dim 0.0 +665 65 optimizer.lr 0.023188019397005915 +665 65 training.batch_size 0.0 +665 65 training.label_smoothing 0.0012792470297036107 +665 66 model.embedding_dim 2.0 +665 66 optimizer.lr 0.034488541071544455 +665 66 training.batch_size 0.0 +665 66 training.label_smoothing 0.020225790577751722 +665 67 model.embedding_dim 1.0 +665 67 optimizer.lr 0.043341024381778614 +665 67 training.batch_size 1.0 +665 67 training.label_smoothing 0.0437610720583272 +665 68 model.embedding_dim 1.0 +665 68 optimizer.lr 0.0016162440263035212 +665 68 training.batch_size 0.0 +665 68 training.label_smoothing 0.007922831569268835 +665 69 model.embedding_dim 2.0 +665 69 optimizer.lr 0.009846546909575137 +665 69 training.batch_size 1.0 +665 69 training.label_smoothing 0.42219354730386716 +665 70 model.embedding_dim 1.0 +665 70 optimizer.lr 0.005603290484195548 +665 70 training.batch_size 0.0 +665 70 training.label_smoothing 0.004649996928804205 +665 71 model.embedding_dim 1.0 +665 71 optimizer.lr 0.007899362670056705 +665 71 training.batch_size 2.0 +665 71 training.label_smoothing 0.060800310775280884 +665 72 model.embedding_dim 0.0 +665 72 optimizer.lr 0.02442183033339861 +665 72 training.batch_size 2.0 +665 72 training.label_smoothing 0.2734270712073229 +665 73 model.embedding_dim 0.0 +665 73 optimizer.lr 0.0010541936599144386 +665 73 training.batch_size 1.0 +665 73 training.label_smoothing 0.016408999902004298 +665 74 model.embedding_dim 1.0 +665 74 optimizer.lr 0.04800002195422829 +665 74 training.batch_size 1.0 +665 74 training.label_smoothing 0.5267758134904533 +665 75 model.embedding_dim 0.0 +665 75 optimizer.lr 0.0011767771900321813 +665 75 training.batch_size 0.0 +665 75 training.label_smoothing 0.12480849453341324 +665 76 model.embedding_dim 2.0 +665 76 optimizer.lr 0.01631642795565392 +665 76 training.batch_size 1.0 +665 76 training.label_smoothing 0.020026470131730468 +665 77 model.embedding_dim 0.0 +665 77 optimizer.lr 0.0036785351580415388 +665 77 training.batch_size 1.0 +665 77 training.label_smoothing 0.4023964461626787 +665 78 model.embedding_dim 1.0 +665 78 optimizer.lr 0.0018920977383809995 +665 78 training.batch_size 2.0 +665 78 training.label_smoothing 0.19649040399066497 +665 79 model.embedding_dim 2.0 +665 79 optimizer.lr 0.003645139297142633 +665 79 training.batch_size 2.0 +665 79 training.label_smoothing 0.3748551550278034 +665 80 model.embedding_dim 1.0 +665 80 optimizer.lr 0.03054068421695184 +665 80 training.batch_size 1.0 +665 80 training.label_smoothing 0.01625941362555799 +665 81 model.embedding_dim 2.0 +665 81 optimizer.lr 0.009011111849795011 +665 81 training.batch_size 0.0 +665 81 training.label_smoothing 0.1573864923223045 +665 82 model.embedding_dim 2.0 +665 82 optimizer.lr 0.003933097370641333 +665 82 training.batch_size 2.0 +665 82 training.label_smoothing 0.003932040361762763 +665 83 model.embedding_dim 1.0 +665 83 optimizer.lr 0.06660937781736198 +665 83 training.batch_size 1.0 +665 83 training.label_smoothing 0.006134809391135446 +665 84 model.embedding_dim 1.0 +665 84 optimizer.lr 0.006871068215594137 +665 84 training.batch_size 0.0 +665 84 training.label_smoothing 0.028917212392279592 +665 85 model.embedding_dim 2.0 +665 85 optimizer.lr 0.009551208527621658 +665 85 training.batch_size 2.0 +665 85 training.label_smoothing 0.003633836895884466 +665 86 model.embedding_dim 1.0 +665 86 optimizer.lr 0.0061381350470148255 +665 86 training.batch_size 0.0 +665 86 training.label_smoothing 0.00233178508261617 +665 87 model.embedding_dim 0.0 +665 87 optimizer.lr 0.06684381367286754 +665 87 training.batch_size 2.0 +665 87 training.label_smoothing 0.016979093609273042 +665 88 model.embedding_dim 2.0 +665 88 optimizer.lr 0.010665671981503278 +665 88 training.batch_size 1.0 +665 88 training.label_smoothing 0.7822937921834179 +665 89 model.embedding_dim 1.0 +665 89 optimizer.lr 0.010644117540130886 +665 89 training.batch_size 2.0 +665 89 training.label_smoothing 0.022559116724004624 +665 90 model.embedding_dim 0.0 +665 90 optimizer.lr 0.0025391854369656735 +665 90 training.batch_size 0.0 +665 90 training.label_smoothing 0.650280923011763 +665 91 model.embedding_dim 1.0 +665 91 optimizer.lr 0.048795386714678646 +665 91 training.batch_size 0.0 +665 91 training.label_smoothing 0.1268266499669469 +665 92 model.embedding_dim 0.0 +665 92 optimizer.lr 0.08455487177701944 +665 92 training.batch_size 2.0 +665 92 training.label_smoothing 0.38564286108140716 +665 93 model.embedding_dim 1.0 +665 93 optimizer.lr 0.0011505766952607137 +665 93 training.batch_size 2.0 +665 93 training.label_smoothing 0.3294515221310618 +665 94 model.embedding_dim 0.0 +665 94 optimizer.lr 0.0021749189523136915 +665 94 training.batch_size 2.0 +665 94 training.label_smoothing 0.16079823595943674 +665 95 model.embedding_dim 1.0 +665 95 optimizer.lr 0.024179670558682424 +665 95 training.batch_size 1.0 +665 95 training.label_smoothing 0.016248450688800247 +665 96 model.embedding_dim 1.0 +665 96 optimizer.lr 0.006848331477420207 +665 96 training.batch_size 2.0 +665 96 training.label_smoothing 0.07119717827352297 +665 97 model.embedding_dim 1.0 +665 97 optimizer.lr 0.004900025831458062 +665 97 training.batch_size 0.0 +665 97 training.label_smoothing 0.0028721885291780845 +665 98 model.embedding_dim 0.0 +665 98 optimizer.lr 0.05227303178168044 +665 98 training.batch_size 1.0 +665 98 training.label_smoothing 0.13980739811441276 +665 99 model.embedding_dim 1.0 +665 99 optimizer.lr 0.0022671615103980825 +665 99 training.batch_size 0.0 +665 99 training.label_smoothing 0.004016036769669367 +665 100 model.embedding_dim 2.0 +665 100 optimizer.lr 0.016886238325730427 +665 100 training.batch_size 1.0 +665 100 training.label_smoothing 0.022007142634779625 +665 1 dataset """kinships""" +665 1 model """simple""" +665 1 loss """crossentropy""" +665 1 regularizer """no""" +665 1 optimizer """adam""" +665 1 training_loop """lcwa""" +665 1 evaluator """rankbased""" +665 2 dataset """kinships""" +665 2 model """simple""" +665 2 loss """crossentropy""" +665 2 regularizer """no""" +665 2 optimizer """adam""" +665 2 training_loop """lcwa""" +665 2 evaluator """rankbased""" +665 3 dataset """kinships""" +665 3 model """simple""" +665 3 loss """crossentropy""" +665 3 regularizer """no""" +665 3 optimizer """adam""" +665 3 training_loop """lcwa""" +665 3 evaluator """rankbased""" +665 4 dataset """kinships""" +665 4 model """simple""" +665 4 loss """crossentropy""" +665 4 regularizer """no""" +665 4 optimizer """adam""" +665 4 training_loop """lcwa""" +665 4 evaluator """rankbased""" +665 5 dataset """kinships""" +665 5 model """simple""" +665 5 loss """crossentropy""" +665 5 regularizer """no""" +665 5 optimizer """adam""" +665 5 training_loop """lcwa""" +665 5 evaluator """rankbased""" +665 6 dataset """kinships""" +665 6 model """simple""" +665 6 loss """crossentropy""" +665 6 regularizer """no""" +665 6 optimizer """adam""" +665 6 training_loop """lcwa""" +665 6 evaluator """rankbased""" +665 7 dataset """kinships""" +665 7 model """simple""" +665 7 loss """crossentropy""" +665 7 regularizer """no""" +665 7 optimizer """adam""" +665 7 training_loop """lcwa""" +665 7 evaluator """rankbased""" +665 8 dataset """kinships""" +665 8 model """simple""" +665 8 loss """crossentropy""" +665 8 regularizer """no""" +665 8 optimizer """adam""" +665 8 training_loop """lcwa""" +665 8 evaluator """rankbased""" +665 9 dataset """kinships""" +665 9 model """simple""" +665 9 loss """crossentropy""" +665 9 regularizer """no""" +665 9 optimizer """adam""" +665 9 training_loop """lcwa""" +665 9 evaluator """rankbased""" +665 10 dataset """kinships""" +665 10 model """simple""" +665 10 loss """crossentropy""" +665 10 regularizer """no""" +665 10 optimizer """adam""" +665 10 training_loop """lcwa""" +665 10 evaluator """rankbased""" +665 11 dataset """kinships""" +665 11 model """simple""" +665 11 loss """crossentropy""" +665 11 regularizer """no""" +665 11 optimizer """adam""" +665 11 training_loop """lcwa""" +665 11 evaluator """rankbased""" +665 12 dataset """kinships""" +665 12 model """simple""" +665 12 loss """crossentropy""" +665 12 regularizer """no""" +665 12 optimizer """adam""" +665 12 training_loop """lcwa""" +665 12 evaluator """rankbased""" +665 13 dataset """kinships""" +665 13 model """simple""" +665 13 loss """crossentropy""" +665 13 regularizer """no""" +665 13 optimizer """adam""" +665 13 training_loop """lcwa""" +665 13 evaluator """rankbased""" +665 14 dataset """kinships""" +665 14 model """simple""" +665 14 loss """crossentropy""" +665 14 regularizer """no""" +665 14 optimizer """adam""" +665 14 training_loop """lcwa""" +665 14 evaluator """rankbased""" +665 15 dataset """kinships""" +665 15 model """simple""" +665 15 loss """crossentropy""" +665 15 regularizer """no""" +665 15 optimizer """adam""" +665 15 training_loop """lcwa""" +665 15 evaluator """rankbased""" +665 16 dataset """kinships""" +665 16 model """simple""" +665 16 loss """crossentropy""" +665 16 regularizer """no""" +665 16 optimizer """adam""" +665 16 training_loop """lcwa""" +665 16 evaluator """rankbased""" +665 17 dataset """kinships""" +665 17 model """simple""" +665 17 loss """crossentropy""" +665 17 regularizer """no""" +665 17 optimizer """adam""" +665 17 training_loop """lcwa""" +665 17 evaluator """rankbased""" +665 18 dataset """kinships""" +665 18 model """simple""" +665 18 loss """crossentropy""" +665 18 regularizer """no""" +665 18 optimizer """adam""" +665 18 training_loop """lcwa""" +665 18 evaluator """rankbased""" +665 19 dataset """kinships""" +665 19 model """simple""" +665 19 loss """crossentropy""" +665 19 regularizer """no""" +665 19 optimizer """adam""" +665 19 training_loop """lcwa""" +665 19 evaluator """rankbased""" +665 20 dataset """kinships""" +665 20 model """simple""" +665 20 loss """crossentropy""" +665 20 regularizer """no""" +665 20 optimizer """adam""" +665 20 training_loop """lcwa""" +665 20 evaluator """rankbased""" +665 21 dataset """kinships""" +665 21 model """simple""" +665 21 loss """crossentropy""" +665 21 regularizer """no""" +665 21 optimizer """adam""" +665 21 training_loop """lcwa""" +665 21 evaluator """rankbased""" +665 22 dataset """kinships""" +665 22 model """simple""" +665 22 loss """crossentropy""" +665 22 regularizer """no""" +665 22 optimizer """adam""" +665 22 training_loop """lcwa""" +665 22 evaluator """rankbased""" +665 23 dataset """kinships""" +665 23 model """simple""" +665 23 loss """crossentropy""" +665 23 regularizer """no""" +665 23 optimizer """adam""" +665 23 training_loop """lcwa""" +665 23 evaluator """rankbased""" +665 24 dataset """kinships""" +665 24 model """simple""" +665 24 loss """crossentropy""" +665 24 regularizer """no""" +665 24 optimizer """adam""" +665 24 training_loop """lcwa""" +665 24 evaluator """rankbased""" +665 25 dataset """kinships""" +665 25 model """simple""" +665 25 loss """crossentropy""" +665 25 regularizer """no""" +665 25 optimizer """adam""" +665 25 training_loop """lcwa""" +665 25 evaluator """rankbased""" +665 26 dataset """kinships""" +665 26 model """simple""" +665 26 loss """crossentropy""" +665 26 regularizer """no""" +665 26 optimizer """adam""" +665 26 training_loop """lcwa""" +665 26 evaluator """rankbased""" +665 27 dataset """kinships""" +665 27 model """simple""" +665 27 loss """crossentropy""" +665 27 regularizer """no""" +665 27 optimizer """adam""" +665 27 training_loop """lcwa""" +665 27 evaluator """rankbased""" +665 28 dataset """kinships""" +665 28 model """simple""" +665 28 loss """crossentropy""" +665 28 regularizer """no""" +665 28 optimizer """adam""" +665 28 training_loop """lcwa""" +665 28 evaluator """rankbased""" +665 29 dataset """kinships""" +665 29 model """simple""" +665 29 loss """crossentropy""" +665 29 regularizer """no""" +665 29 optimizer """adam""" +665 29 training_loop """lcwa""" +665 29 evaluator """rankbased""" +665 30 dataset """kinships""" +665 30 model """simple""" +665 30 loss """crossentropy""" +665 30 regularizer """no""" +665 30 optimizer """adam""" +665 30 training_loop """lcwa""" +665 30 evaluator """rankbased""" +665 31 dataset """kinships""" +665 31 model """simple""" +665 31 loss """crossentropy""" +665 31 regularizer """no""" +665 31 optimizer """adam""" +665 31 training_loop """lcwa""" +665 31 evaluator """rankbased""" +665 32 dataset """kinships""" +665 32 model """simple""" +665 32 loss """crossentropy""" +665 32 regularizer """no""" +665 32 optimizer """adam""" +665 32 training_loop """lcwa""" +665 32 evaluator """rankbased""" +665 33 dataset """kinships""" +665 33 model """simple""" +665 33 loss """crossentropy""" +665 33 regularizer """no""" +665 33 optimizer """adam""" +665 33 training_loop """lcwa""" +665 33 evaluator """rankbased""" +665 34 dataset """kinships""" +665 34 model """simple""" +665 34 loss """crossentropy""" +665 34 regularizer """no""" +665 34 optimizer """adam""" +665 34 training_loop """lcwa""" +665 34 evaluator """rankbased""" +665 35 dataset """kinships""" +665 35 model """simple""" +665 35 loss """crossentropy""" +665 35 regularizer """no""" +665 35 optimizer """adam""" +665 35 training_loop """lcwa""" +665 35 evaluator """rankbased""" +665 36 dataset """kinships""" +665 36 model """simple""" +665 36 loss """crossentropy""" +665 36 regularizer """no""" +665 36 optimizer """adam""" +665 36 training_loop """lcwa""" +665 36 evaluator """rankbased""" +665 37 dataset """kinships""" +665 37 model """simple""" +665 37 loss """crossentropy""" +665 37 regularizer """no""" +665 37 optimizer """adam""" +665 37 training_loop """lcwa""" +665 37 evaluator """rankbased""" +665 38 dataset """kinships""" +665 38 model """simple""" +665 38 loss """crossentropy""" +665 38 regularizer """no""" +665 38 optimizer """adam""" +665 38 training_loop """lcwa""" +665 38 evaluator """rankbased""" +665 39 dataset """kinships""" +665 39 model """simple""" +665 39 loss """crossentropy""" +665 39 regularizer """no""" +665 39 optimizer """adam""" +665 39 training_loop """lcwa""" +665 39 evaluator """rankbased""" +665 40 dataset """kinships""" +665 40 model """simple""" +665 40 loss """crossentropy""" +665 40 regularizer """no""" +665 40 optimizer """adam""" +665 40 training_loop """lcwa""" +665 40 evaluator """rankbased""" +665 41 dataset """kinships""" +665 41 model """simple""" +665 41 loss """crossentropy""" +665 41 regularizer """no""" +665 41 optimizer """adam""" +665 41 training_loop """lcwa""" +665 41 evaluator """rankbased""" +665 42 dataset """kinships""" +665 42 model """simple""" +665 42 loss """crossentropy""" +665 42 regularizer """no""" +665 42 optimizer """adam""" +665 42 training_loop """lcwa""" +665 42 evaluator """rankbased""" +665 43 dataset """kinships""" +665 43 model """simple""" +665 43 loss """crossentropy""" +665 43 regularizer """no""" +665 43 optimizer """adam""" +665 43 training_loop """lcwa""" +665 43 evaluator """rankbased""" +665 44 dataset """kinships""" +665 44 model """simple""" +665 44 loss """crossentropy""" +665 44 regularizer """no""" +665 44 optimizer """adam""" +665 44 training_loop """lcwa""" +665 44 evaluator """rankbased""" +665 45 dataset """kinships""" +665 45 model """simple""" +665 45 loss """crossentropy""" +665 45 regularizer """no""" +665 45 optimizer """adam""" +665 45 training_loop """lcwa""" +665 45 evaluator """rankbased""" +665 46 dataset """kinships""" +665 46 model """simple""" +665 46 loss """crossentropy""" +665 46 regularizer """no""" +665 46 optimizer """adam""" +665 46 training_loop """lcwa""" +665 46 evaluator """rankbased""" +665 47 dataset """kinships""" +665 47 model """simple""" +665 47 loss """crossentropy""" +665 47 regularizer """no""" +665 47 optimizer """adam""" +665 47 training_loop """lcwa""" +665 47 evaluator """rankbased""" +665 48 dataset """kinships""" +665 48 model """simple""" +665 48 loss """crossentropy""" +665 48 regularizer """no""" +665 48 optimizer """adam""" +665 48 training_loop """lcwa""" +665 48 evaluator """rankbased""" +665 49 dataset """kinships""" +665 49 model """simple""" +665 49 loss """crossentropy""" +665 49 regularizer """no""" +665 49 optimizer """adam""" +665 49 training_loop """lcwa""" +665 49 evaluator """rankbased""" +665 50 dataset """kinships""" +665 50 model """simple""" +665 50 loss """crossentropy""" +665 50 regularizer """no""" +665 50 optimizer """adam""" +665 50 training_loop """lcwa""" +665 50 evaluator """rankbased""" +665 51 dataset """kinships""" +665 51 model """simple""" +665 51 loss """crossentropy""" +665 51 regularizer """no""" +665 51 optimizer """adam""" +665 51 training_loop """lcwa""" +665 51 evaluator """rankbased""" +665 52 dataset """kinships""" +665 52 model """simple""" +665 52 loss """crossentropy""" +665 52 regularizer """no""" +665 52 optimizer """adam""" +665 52 training_loop """lcwa""" +665 52 evaluator """rankbased""" +665 53 dataset """kinships""" +665 53 model """simple""" +665 53 loss """crossentropy""" +665 53 regularizer """no""" +665 53 optimizer """adam""" +665 53 training_loop """lcwa""" +665 53 evaluator """rankbased""" +665 54 dataset """kinships""" +665 54 model """simple""" +665 54 loss """crossentropy""" +665 54 regularizer """no""" +665 54 optimizer """adam""" +665 54 training_loop """lcwa""" +665 54 evaluator """rankbased""" +665 55 dataset """kinships""" +665 55 model """simple""" +665 55 loss """crossentropy""" +665 55 regularizer """no""" +665 55 optimizer """adam""" +665 55 training_loop """lcwa""" +665 55 evaluator """rankbased""" +665 56 dataset """kinships""" +665 56 model """simple""" +665 56 loss """crossentropy""" +665 56 regularizer """no""" +665 56 optimizer """adam""" +665 56 training_loop """lcwa""" +665 56 evaluator """rankbased""" +665 57 dataset """kinships""" +665 57 model """simple""" +665 57 loss """crossentropy""" +665 57 regularizer """no""" +665 57 optimizer """adam""" +665 57 training_loop """lcwa""" +665 57 evaluator """rankbased""" +665 58 dataset """kinships""" +665 58 model """simple""" +665 58 loss """crossentropy""" +665 58 regularizer """no""" +665 58 optimizer """adam""" +665 58 training_loop """lcwa""" +665 58 evaluator """rankbased""" +665 59 dataset """kinships""" +665 59 model """simple""" +665 59 loss """crossentropy""" +665 59 regularizer """no""" +665 59 optimizer """adam""" +665 59 training_loop """lcwa""" +665 59 evaluator """rankbased""" +665 60 dataset """kinships""" +665 60 model """simple""" +665 60 loss """crossentropy""" +665 60 regularizer """no""" +665 60 optimizer """adam""" +665 60 training_loop """lcwa""" +665 60 evaluator """rankbased""" +665 61 dataset """kinships""" +665 61 model """simple""" +665 61 loss """crossentropy""" +665 61 regularizer """no""" +665 61 optimizer """adam""" +665 61 training_loop """lcwa""" +665 61 evaluator """rankbased""" +665 62 dataset """kinships""" +665 62 model """simple""" +665 62 loss """crossentropy""" +665 62 regularizer """no""" +665 62 optimizer """adam""" +665 62 training_loop """lcwa""" +665 62 evaluator """rankbased""" +665 63 dataset """kinships""" +665 63 model """simple""" +665 63 loss """crossentropy""" +665 63 regularizer """no""" +665 63 optimizer """adam""" +665 63 training_loop """lcwa""" +665 63 evaluator """rankbased""" +665 64 dataset """kinships""" +665 64 model """simple""" +665 64 loss """crossentropy""" +665 64 regularizer """no""" +665 64 optimizer """adam""" +665 64 training_loop """lcwa""" +665 64 evaluator """rankbased""" +665 65 dataset """kinships""" +665 65 model """simple""" +665 65 loss """crossentropy""" +665 65 regularizer """no""" +665 65 optimizer """adam""" +665 65 training_loop """lcwa""" +665 65 evaluator """rankbased""" +665 66 dataset """kinships""" +665 66 model """simple""" +665 66 loss """crossentropy""" +665 66 regularizer """no""" +665 66 optimizer """adam""" +665 66 training_loop """lcwa""" +665 66 evaluator """rankbased""" +665 67 dataset """kinships""" +665 67 model """simple""" +665 67 loss """crossentropy""" +665 67 regularizer """no""" +665 67 optimizer """adam""" +665 67 training_loop """lcwa""" +665 67 evaluator """rankbased""" +665 68 dataset """kinships""" +665 68 model """simple""" +665 68 loss """crossentropy""" +665 68 regularizer """no""" +665 68 optimizer """adam""" +665 68 training_loop """lcwa""" +665 68 evaluator """rankbased""" +665 69 dataset """kinships""" +665 69 model """simple""" +665 69 loss """crossentropy""" +665 69 regularizer """no""" +665 69 optimizer """adam""" +665 69 training_loop """lcwa""" +665 69 evaluator """rankbased""" +665 70 dataset """kinships""" +665 70 model """simple""" +665 70 loss """crossentropy""" +665 70 regularizer """no""" +665 70 optimizer """adam""" +665 70 training_loop """lcwa""" +665 70 evaluator """rankbased""" +665 71 dataset """kinships""" +665 71 model """simple""" +665 71 loss """crossentropy""" +665 71 regularizer """no""" +665 71 optimizer """adam""" +665 71 training_loop """lcwa""" +665 71 evaluator """rankbased""" +665 72 dataset """kinships""" +665 72 model """simple""" +665 72 loss """crossentropy""" +665 72 regularizer """no""" +665 72 optimizer """adam""" +665 72 training_loop """lcwa""" +665 72 evaluator """rankbased""" +665 73 dataset """kinships""" +665 73 model """simple""" +665 73 loss """crossentropy""" +665 73 regularizer """no""" +665 73 optimizer """adam""" +665 73 training_loop """lcwa""" +665 73 evaluator """rankbased""" +665 74 dataset """kinships""" +665 74 model """simple""" +665 74 loss """crossentropy""" +665 74 regularizer """no""" +665 74 optimizer """adam""" +665 74 training_loop """lcwa""" +665 74 evaluator """rankbased""" +665 75 dataset """kinships""" +665 75 model """simple""" +665 75 loss """crossentropy""" +665 75 regularizer """no""" +665 75 optimizer """adam""" +665 75 training_loop """lcwa""" +665 75 evaluator """rankbased""" +665 76 dataset """kinships""" +665 76 model """simple""" +665 76 loss """crossentropy""" +665 76 regularizer """no""" +665 76 optimizer """adam""" +665 76 training_loop """lcwa""" +665 76 evaluator """rankbased""" +665 77 dataset """kinships""" +665 77 model """simple""" +665 77 loss """crossentropy""" +665 77 regularizer """no""" +665 77 optimizer """adam""" +665 77 training_loop """lcwa""" +665 77 evaluator """rankbased""" +665 78 dataset """kinships""" +665 78 model """simple""" +665 78 loss """crossentropy""" +665 78 regularizer """no""" +665 78 optimizer """adam""" +665 78 training_loop """lcwa""" +665 78 evaluator """rankbased""" +665 79 dataset """kinships""" +665 79 model """simple""" +665 79 loss """crossentropy""" +665 79 regularizer """no""" +665 79 optimizer """adam""" +665 79 training_loop """lcwa""" +665 79 evaluator """rankbased""" +665 80 dataset """kinships""" +665 80 model """simple""" +665 80 loss """crossentropy""" +665 80 regularizer """no""" +665 80 optimizer """adam""" +665 80 training_loop """lcwa""" +665 80 evaluator """rankbased""" +665 81 dataset """kinships""" +665 81 model """simple""" +665 81 loss """crossentropy""" +665 81 regularizer """no""" +665 81 optimizer """adam""" +665 81 training_loop """lcwa""" +665 81 evaluator """rankbased""" +665 82 dataset """kinships""" +665 82 model """simple""" +665 82 loss """crossentropy""" +665 82 regularizer """no""" +665 82 optimizer """adam""" +665 82 training_loop """lcwa""" +665 82 evaluator """rankbased""" +665 83 dataset """kinships""" +665 83 model """simple""" +665 83 loss """crossentropy""" +665 83 regularizer """no""" +665 83 optimizer """adam""" +665 83 training_loop """lcwa""" +665 83 evaluator """rankbased""" +665 84 dataset """kinships""" +665 84 model """simple""" +665 84 loss """crossentropy""" +665 84 regularizer """no""" +665 84 optimizer """adam""" +665 84 training_loop """lcwa""" +665 84 evaluator """rankbased""" +665 85 dataset """kinships""" +665 85 model """simple""" +665 85 loss """crossentropy""" +665 85 regularizer """no""" +665 85 optimizer """adam""" +665 85 training_loop """lcwa""" +665 85 evaluator """rankbased""" +665 86 dataset """kinships""" +665 86 model """simple""" +665 86 loss """crossentropy""" +665 86 regularizer """no""" +665 86 optimizer """adam""" +665 86 training_loop """lcwa""" +665 86 evaluator """rankbased""" +665 87 dataset """kinships""" +665 87 model """simple""" +665 87 loss """crossentropy""" +665 87 regularizer """no""" +665 87 optimizer """adam""" +665 87 training_loop """lcwa""" +665 87 evaluator """rankbased""" +665 88 dataset """kinships""" +665 88 model """simple""" +665 88 loss """crossentropy""" +665 88 regularizer """no""" +665 88 optimizer """adam""" +665 88 training_loop """lcwa""" +665 88 evaluator """rankbased""" +665 89 dataset """kinships""" +665 89 model """simple""" +665 89 loss """crossentropy""" +665 89 regularizer """no""" +665 89 optimizer """adam""" +665 89 training_loop """lcwa""" +665 89 evaluator """rankbased""" +665 90 dataset """kinships""" +665 90 model """simple""" +665 90 loss """crossentropy""" +665 90 regularizer """no""" +665 90 optimizer """adam""" +665 90 training_loop """lcwa""" +665 90 evaluator """rankbased""" +665 91 dataset """kinships""" +665 91 model """simple""" +665 91 loss """crossentropy""" +665 91 regularizer """no""" +665 91 optimizer """adam""" +665 91 training_loop """lcwa""" +665 91 evaluator """rankbased""" +665 92 dataset """kinships""" +665 92 model """simple""" +665 92 loss """crossentropy""" +665 92 regularizer """no""" +665 92 optimizer """adam""" +665 92 training_loop """lcwa""" +665 92 evaluator """rankbased""" +665 93 dataset """kinships""" +665 93 model """simple""" +665 93 loss """crossentropy""" +665 93 regularizer """no""" +665 93 optimizer """adam""" +665 93 training_loop """lcwa""" +665 93 evaluator """rankbased""" +665 94 dataset """kinships""" +665 94 model """simple""" +665 94 loss """crossentropy""" +665 94 regularizer """no""" +665 94 optimizer """adam""" +665 94 training_loop """lcwa""" +665 94 evaluator """rankbased""" +665 95 dataset """kinships""" +665 95 model """simple""" +665 95 loss """crossentropy""" +665 95 regularizer """no""" +665 95 optimizer """adam""" +665 95 training_loop """lcwa""" +665 95 evaluator """rankbased""" +665 96 dataset """kinships""" +665 96 model """simple""" +665 96 loss """crossentropy""" +665 96 regularizer """no""" +665 96 optimizer """adam""" +665 96 training_loop """lcwa""" +665 96 evaluator """rankbased""" +665 97 dataset """kinships""" +665 97 model """simple""" +665 97 loss """crossentropy""" +665 97 regularizer """no""" +665 97 optimizer """adam""" +665 97 training_loop """lcwa""" +665 97 evaluator """rankbased""" +665 98 dataset """kinships""" +665 98 model """simple""" +665 98 loss """crossentropy""" +665 98 regularizer """no""" +665 98 optimizer """adam""" +665 98 training_loop """lcwa""" +665 98 evaluator """rankbased""" +665 99 dataset """kinships""" +665 99 model """simple""" +665 99 loss """crossentropy""" +665 99 regularizer """no""" +665 99 optimizer """adam""" +665 99 training_loop """lcwa""" +665 99 evaluator """rankbased""" +665 100 dataset """kinships""" +665 100 model """simple""" +665 100 loss """crossentropy""" +665 100 regularizer """no""" +665 100 optimizer """adam""" +665 100 training_loop """lcwa""" +665 100 evaluator """rankbased""" +666 1 model.embedding_dim 1.0 +666 1 optimizer.lr 0.00320970574536053 +666 1 training.batch_size 1.0 +666 1 training.label_smoothing 0.053664351474817365 +666 2 model.embedding_dim 0.0 +666 2 optimizer.lr 0.0010433244118361665 +666 2 training.batch_size 1.0 +666 2 training.label_smoothing 0.039606827719526186 +666 3 model.embedding_dim 2.0 +666 3 optimizer.lr 0.0015903015464957496 +666 3 training.batch_size 0.0 +666 3 training.label_smoothing 0.01377683903752728 +666 4 model.embedding_dim 0.0 +666 4 optimizer.lr 0.008784023833133151 +666 4 training.batch_size 1.0 +666 4 training.label_smoothing 0.018846057724274657 +666 5 model.embedding_dim 0.0 +666 5 optimizer.lr 0.04377770131165797 +666 5 training.batch_size 0.0 +666 5 training.label_smoothing 0.14745282029312615 +666 6 model.embedding_dim 0.0 +666 6 optimizer.lr 0.001669905237012327 +666 6 training.batch_size 0.0 +666 6 training.label_smoothing 0.5744226494860447 +666 7 model.embedding_dim 0.0 +666 7 optimizer.lr 0.012141348676865383 +666 7 training.batch_size 0.0 +666 7 training.label_smoothing 0.08291492938994095 +666 8 model.embedding_dim 1.0 +666 8 optimizer.lr 0.0014238641045856864 +666 8 training.batch_size 0.0 +666 8 training.label_smoothing 0.3631560189881217 +666 9 model.embedding_dim 1.0 +666 9 optimizer.lr 0.00708757444755367 +666 9 training.batch_size 1.0 +666 9 training.label_smoothing 0.007191334241740783 +666 10 model.embedding_dim 1.0 +666 10 optimizer.lr 0.005177077701344219 +666 10 training.batch_size 2.0 +666 10 training.label_smoothing 0.055938201530407856 +666 11 model.embedding_dim 2.0 +666 11 optimizer.lr 0.009048140966272007 +666 11 training.batch_size 2.0 +666 11 training.label_smoothing 0.5756092883796953 +666 12 model.embedding_dim 2.0 +666 12 optimizer.lr 0.038264072291632985 +666 12 training.batch_size 2.0 +666 12 training.label_smoothing 0.8755455189005231 +666 13 model.embedding_dim 2.0 +666 13 optimizer.lr 0.012528677087072712 +666 13 training.batch_size 2.0 +666 13 training.label_smoothing 0.0012967821166746513 +666 14 model.embedding_dim 0.0 +666 14 optimizer.lr 0.0031408507232436335 +666 14 training.batch_size 0.0 +666 14 training.label_smoothing 0.033620753676390135 +666 15 model.embedding_dim 2.0 +666 15 optimizer.lr 0.0059521205708689875 +666 15 training.batch_size 1.0 +666 15 training.label_smoothing 0.07727091770360207 +666 16 model.embedding_dim 2.0 +666 16 optimizer.lr 0.025603687674673723 +666 16 training.batch_size 2.0 +666 16 training.label_smoothing 0.7581735480860881 +666 17 model.embedding_dim 0.0 +666 17 optimizer.lr 0.02147816456388951 +666 17 training.batch_size 1.0 +666 17 training.label_smoothing 0.04687396853818471 +666 18 model.embedding_dim 2.0 +666 18 optimizer.lr 0.0032899848628277877 +666 18 training.batch_size 2.0 +666 18 training.label_smoothing 0.13844845997906455 +666 19 model.embedding_dim 1.0 +666 19 optimizer.lr 0.00248488157985368 +666 19 training.batch_size 0.0 +666 19 training.label_smoothing 0.047255788479810455 +666 20 model.embedding_dim 0.0 +666 20 optimizer.lr 0.02376328285339657 +666 20 training.batch_size 0.0 +666 20 training.label_smoothing 0.05265737247214776 +666 21 model.embedding_dim 1.0 +666 21 optimizer.lr 0.08463354327006983 +666 21 training.batch_size 0.0 +666 21 training.label_smoothing 0.0010385523574880638 +666 22 model.embedding_dim 2.0 +666 22 optimizer.lr 0.0014410912082097904 +666 22 training.batch_size 2.0 +666 22 training.label_smoothing 0.007150623471776417 +666 23 model.embedding_dim 1.0 +666 23 optimizer.lr 0.005013334811844344 +666 23 training.batch_size 2.0 +666 23 training.label_smoothing 0.007070076795642618 +666 24 model.embedding_dim 2.0 +666 24 optimizer.lr 0.002624034794244987 +666 24 training.batch_size 1.0 +666 24 training.label_smoothing 0.02802762680120181 +666 25 model.embedding_dim 2.0 +666 25 optimizer.lr 0.0029877630786294654 +666 25 training.batch_size 1.0 +666 25 training.label_smoothing 0.020385656361811163 +666 26 model.embedding_dim 0.0 +666 26 optimizer.lr 0.011389170101907528 +666 26 training.batch_size 2.0 +666 26 training.label_smoothing 0.0011438792963745967 +666 27 model.embedding_dim 1.0 +666 27 optimizer.lr 0.031246679251275947 +666 27 training.batch_size 1.0 +666 27 training.label_smoothing 0.9118567530971318 +666 28 model.embedding_dim 0.0 +666 28 optimizer.lr 0.001970480367924747 +666 28 training.batch_size 1.0 +666 28 training.label_smoothing 0.06203332612251137 +666 29 model.embedding_dim 1.0 +666 29 optimizer.lr 0.0731428906998785 +666 29 training.batch_size 2.0 +666 29 training.label_smoothing 0.15441902314902495 +666 30 model.embedding_dim 1.0 +666 30 optimizer.lr 0.023270235863681998 +666 30 training.batch_size 0.0 +666 30 training.label_smoothing 0.18883653053885469 +666 31 model.embedding_dim 1.0 +666 31 optimizer.lr 0.003084018689526813 +666 31 training.batch_size 1.0 +666 31 training.label_smoothing 0.0033089148170926358 +666 32 model.embedding_dim 0.0 +666 32 optimizer.lr 0.05446492048088836 +666 32 training.batch_size 2.0 +666 32 training.label_smoothing 0.5280244273598848 +666 33 model.embedding_dim 1.0 +666 33 optimizer.lr 0.08867262584817413 +666 33 training.batch_size 0.0 +666 33 training.label_smoothing 0.011430076724783383 +666 34 model.embedding_dim 2.0 +666 34 optimizer.lr 0.005197875493763142 +666 34 training.batch_size 2.0 +666 34 training.label_smoothing 0.005322047750306566 +666 35 model.embedding_dim 1.0 +666 35 optimizer.lr 0.0010989419228805693 +666 35 training.batch_size 1.0 +666 35 training.label_smoothing 0.002288731803005825 +666 36 model.embedding_dim 1.0 +666 36 optimizer.lr 0.0692845965521756 +666 36 training.batch_size 2.0 +666 36 training.label_smoothing 0.0058888616436044965 +666 37 model.embedding_dim 1.0 +666 37 optimizer.lr 0.02316696975104614 +666 37 training.batch_size 0.0 +666 37 training.label_smoothing 0.3682713598933342 +666 38 model.embedding_dim 0.0 +666 38 optimizer.lr 0.005249687610846756 +666 38 training.batch_size 1.0 +666 38 training.label_smoothing 0.3763463523510292 +666 39 model.embedding_dim 0.0 +666 39 optimizer.lr 0.05132239530886577 +666 39 training.batch_size 2.0 +666 39 training.label_smoothing 0.5302119761773816 +666 40 model.embedding_dim 1.0 +666 40 optimizer.lr 0.0031228086368868205 +666 40 training.batch_size 2.0 +666 40 training.label_smoothing 0.9653977462145151 +666 41 model.embedding_dim 0.0 +666 41 optimizer.lr 0.00856927223838609 +666 41 training.batch_size 2.0 +666 41 training.label_smoothing 0.12206151726814943 +666 42 model.embedding_dim 1.0 +666 42 optimizer.lr 0.0030528420511913446 +666 42 training.batch_size 2.0 +666 42 training.label_smoothing 0.0015055226388837886 +666 43 model.embedding_dim 0.0 +666 43 optimizer.lr 0.006406837676633243 +666 43 training.batch_size 0.0 +666 43 training.label_smoothing 0.46371742027543467 +666 44 model.embedding_dim 0.0 +666 44 optimizer.lr 0.03432588125690843 +666 44 training.batch_size 0.0 +666 44 training.label_smoothing 0.0013537200685395745 +666 45 model.embedding_dim 0.0 +666 45 optimizer.lr 0.003201954621275823 +666 45 training.batch_size 2.0 +666 45 training.label_smoothing 0.0073906025905820716 +666 46 model.embedding_dim 1.0 +666 46 optimizer.lr 0.018838150476145312 +666 46 training.batch_size 2.0 +666 46 training.label_smoothing 0.33162751698804777 +666 47 model.embedding_dim 0.0 +666 47 optimizer.lr 0.015876445280693888 +666 47 training.batch_size 1.0 +666 47 training.label_smoothing 0.03366615990571732 +666 48 model.embedding_dim 0.0 +666 48 optimizer.lr 0.017892556642466508 +666 48 training.batch_size 0.0 +666 48 training.label_smoothing 0.4999780396018559 +666 49 model.embedding_dim 0.0 +666 49 optimizer.lr 0.030079953434558566 +666 49 training.batch_size 0.0 +666 49 training.label_smoothing 0.009357957017105267 +666 50 model.embedding_dim 0.0 +666 50 optimizer.lr 0.023749762731686407 +666 50 training.batch_size 0.0 +666 50 training.label_smoothing 0.0021541563449050246 +666 51 model.embedding_dim 0.0 +666 51 optimizer.lr 0.025706128153839682 +666 51 training.batch_size 2.0 +666 51 training.label_smoothing 0.0023389939945457315 +666 52 model.embedding_dim 0.0 +666 52 optimizer.lr 0.0014986544919178147 +666 52 training.batch_size 1.0 +666 52 training.label_smoothing 0.12076407607489106 +666 53 model.embedding_dim 1.0 +666 53 optimizer.lr 0.009399914629917001 +666 53 training.batch_size 1.0 +666 53 training.label_smoothing 0.25662936746131026 +666 54 model.embedding_dim 1.0 +666 54 optimizer.lr 0.03902251161927622 +666 54 training.batch_size 0.0 +666 54 training.label_smoothing 0.035825748706942384 +666 55 model.embedding_dim 0.0 +666 55 optimizer.lr 0.08798409943883806 +666 55 training.batch_size 1.0 +666 55 training.label_smoothing 0.037505747216445014 +666 56 model.embedding_dim 1.0 +666 56 optimizer.lr 0.05839640604826854 +666 56 training.batch_size 2.0 +666 56 training.label_smoothing 0.619372643544587 +666 57 model.embedding_dim 2.0 +666 57 optimizer.lr 0.054415512460988594 +666 57 training.batch_size 1.0 +666 57 training.label_smoothing 0.016714531161083367 +666 58 model.embedding_dim 0.0 +666 58 optimizer.lr 0.013529540504202998 +666 58 training.batch_size 2.0 +666 58 training.label_smoothing 0.06266656492358734 +666 59 model.embedding_dim 1.0 +666 59 optimizer.lr 0.032483964869865746 +666 59 training.batch_size 0.0 +666 59 training.label_smoothing 0.002674506232660781 +666 60 model.embedding_dim 1.0 +666 60 optimizer.lr 0.008576442943337129 +666 60 training.batch_size 1.0 +666 60 training.label_smoothing 0.9615326861397359 +666 61 model.embedding_dim 1.0 +666 61 optimizer.lr 0.003873336407667484 +666 61 training.batch_size 1.0 +666 61 training.label_smoothing 0.0019816961882128566 +666 62 model.embedding_dim 1.0 +666 62 optimizer.lr 0.0884575686129408 +666 62 training.batch_size 0.0 +666 62 training.label_smoothing 0.004340312712153255 +666 63 model.embedding_dim 1.0 +666 63 optimizer.lr 0.009961058016443751 +666 63 training.batch_size 0.0 +666 63 training.label_smoothing 0.008574596273705446 +666 64 model.embedding_dim 0.0 +666 64 optimizer.lr 0.0013852718661916416 +666 64 training.batch_size 0.0 +666 64 training.label_smoothing 0.00453275428381819 +666 65 model.embedding_dim 0.0 +666 65 optimizer.lr 0.002923850276775316 +666 65 training.batch_size 1.0 +666 65 training.label_smoothing 0.045978754358002416 +666 66 model.embedding_dim 0.0 +666 66 optimizer.lr 0.010308684380679247 +666 66 training.batch_size 0.0 +666 66 training.label_smoothing 0.027081540826253806 +666 67 model.embedding_dim 1.0 +666 67 optimizer.lr 0.08068474900481187 +666 67 training.batch_size 2.0 +666 67 training.label_smoothing 0.05964708730148376 +666 68 model.embedding_dim 0.0 +666 68 optimizer.lr 0.001049537892002367 +666 68 training.batch_size 2.0 +666 68 training.label_smoothing 0.08171778362205878 +666 69 model.embedding_dim 2.0 +666 69 optimizer.lr 0.018232045693855715 +666 69 training.batch_size 1.0 +666 69 training.label_smoothing 0.08905178926515808 +666 70 model.embedding_dim 2.0 +666 70 optimizer.lr 0.01677480684157418 +666 70 training.batch_size 0.0 +666 70 training.label_smoothing 0.02688058826189086 +666 71 model.embedding_dim 1.0 +666 71 optimizer.lr 0.0011876392707626253 +666 71 training.batch_size 0.0 +666 71 training.label_smoothing 0.05766613892670931 +666 72 model.embedding_dim 2.0 +666 72 optimizer.lr 0.010936265308217086 +666 72 training.batch_size 0.0 +666 72 training.label_smoothing 0.003024539337413326 +666 73 model.embedding_dim 1.0 +666 73 optimizer.lr 0.06492703724838012 +666 73 training.batch_size 2.0 +666 73 training.label_smoothing 0.11226959137843137 +666 74 model.embedding_dim 2.0 +666 74 optimizer.lr 0.03168811973618367 +666 74 training.batch_size 2.0 +666 74 training.label_smoothing 0.010758846662400352 +666 75 model.embedding_dim 0.0 +666 75 optimizer.lr 0.0013017261013898217 +666 75 training.batch_size 1.0 +666 75 training.label_smoothing 0.001782244322703592 +666 76 model.embedding_dim 1.0 +666 76 optimizer.lr 0.021853085036957504 +666 76 training.batch_size 2.0 +666 76 training.label_smoothing 0.07496983380369537 +666 77 model.embedding_dim 0.0 +666 77 optimizer.lr 0.0031564678530662743 +666 77 training.batch_size 0.0 +666 77 training.label_smoothing 0.0036157889914900684 +666 78 model.embedding_dim 2.0 +666 78 optimizer.lr 0.00363627968902819 +666 78 training.batch_size 1.0 +666 78 training.label_smoothing 0.0018200697604282936 +666 79 model.embedding_dim 2.0 +666 79 optimizer.lr 0.002640308741779454 +666 79 training.batch_size 0.0 +666 79 training.label_smoothing 0.003196029768263126 +666 80 model.embedding_dim 0.0 +666 80 optimizer.lr 0.0036045898792518715 +666 80 training.batch_size 0.0 +666 80 training.label_smoothing 0.019386827592294213 +666 81 model.embedding_dim 2.0 +666 81 optimizer.lr 0.005892144615520206 +666 81 training.batch_size 2.0 +666 81 training.label_smoothing 0.6986276443065536 +666 82 model.embedding_dim 2.0 +666 82 optimizer.lr 0.09662317507829755 +666 82 training.batch_size 2.0 +666 82 training.label_smoothing 0.014577245184304755 +666 83 model.embedding_dim 0.0 +666 83 optimizer.lr 0.015403896321320451 +666 83 training.batch_size 0.0 +666 83 training.label_smoothing 0.22215606362154297 +666 84 model.embedding_dim 2.0 +666 84 optimizer.lr 0.010836448167798236 +666 84 training.batch_size 0.0 +666 84 training.label_smoothing 0.0015410298567697155 +666 85 model.embedding_dim 1.0 +666 85 optimizer.lr 0.005079532182857777 +666 85 training.batch_size 0.0 +666 85 training.label_smoothing 0.05155642624954282 +666 86 model.embedding_dim 0.0 +666 86 optimizer.lr 0.03209030744606315 +666 86 training.batch_size 0.0 +666 86 training.label_smoothing 0.5526492155481951 +666 87 model.embedding_dim 2.0 +666 87 optimizer.lr 0.004290901662688027 +666 87 training.batch_size 0.0 +666 87 training.label_smoothing 0.01638332674976647 +666 88 model.embedding_dim 2.0 +666 88 optimizer.lr 0.04033107444940946 +666 88 training.batch_size 1.0 +666 88 training.label_smoothing 0.0016767647797511379 +666 89 model.embedding_dim 1.0 +666 89 optimizer.lr 0.0031226282109354793 +666 89 training.batch_size 0.0 +666 89 training.label_smoothing 0.016737660030936515 +666 90 model.embedding_dim 0.0 +666 90 optimizer.lr 0.09774248203790567 +666 90 training.batch_size 2.0 +666 90 training.label_smoothing 0.0038781024414028522 +666 91 model.embedding_dim 2.0 +666 91 optimizer.lr 0.004574648079383167 +666 91 training.batch_size 0.0 +666 91 training.label_smoothing 0.002986589403650529 +666 92 model.embedding_dim 0.0 +666 92 optimizer.lr 0.0019421537815765867 +666 92 training.batch_size 2.0 +666 92 training.label_smoothing 0.018490598732580486 +666 93 model.embedding_dim 1.0 +666 93 optimizer.lr 0.058147036702552744 +666 93 training.batch_size 1.0 +666 93 training.label_smoothing 0.020221760353522106 +666 94 model.embedding_dim 2.0 +666 94 optimizer.lr 0.09329548111100393 +666 94 training.batch_size 2.0 +666 94 training.label_smoothing 0.11693587036282152 +666 95 model.embedding_dim 1.0 +666 95 optimizer.lr 0.0015701115699573531 +666 95 training.batch_size 0.0 +666 95 training.label_smoothing 0.013353846479689341 +666 96 model.embedding_dim 1.0 +666 96 optimizer.lr 0.009251237287066527 +666 96 training.batch_size 0.0 +666 96 training.label_smoothing 0.017896415713776915 +666 97 model.embedding_dim 0.0 +666 97 optimizer.lr 0.003073780150978372 +666 97 training.batch_size 1.0 +666 97 training.label_smoothing 0.0026034525329276 +666 98 model.embedding_dim 0.0 +666 98 optimizer.lr 0.004562206162242199 +666 98 training.batch_size 0.0 +666 98 training.label_smoothing 0.0021228223678626014 +666 99 model.embedding_dim 1.0 +666 99 optimizer.lr 0.012946385981366363 +666 99 training.batch_size 1.0 +666 99 training.label_smoothing 0.2136587487696797 +666 100 model.embedding_dim 1.0 +666 100 optimizer.lr 0.007871821711501031 +666 100 training.batch_size 0.0 +666 100 training.label_smoothing 0.006223717085935044 +666 1 dataset """kinships""" +666 1 model """simple""" +666 1 loss """crossentropy""" +666 1 regularizer """no""" +666 1 optimizer """adam""" +666 1 training_loop """lcwa""" +666 1 evaluator """rankbased""" +666 2 dataset """kinships""" +666 2 model """simple""" +666 2 loss """crossentropy""" +666 2 regularizer """no""" +666 2 optimizer """adam""" +666 2 training_loop """lcwa""" +666 2 evaluator """rankbased""" +666 3 dataset """kinships""" +666 3 model """simple""" +666 3 loss """crossentropy""" +666 3 regularizer """no""" +666 3 optimizer """adam""" +666 3 training_loop """lcwa""" +666 3 evaluator """rankbased""" +666 4 dataset """kinships""" +666 4 model """simple""" +666 4 loss """crossentropy""" +666 4 regularizer """no""" +666 4 optimizer """adam""" +666 4 training_loop """lcwa""" +666 4 evaluator """rankbased""" +666 5 dataset """kinships""" +666 5 model """simple""" +666 5 loss """crossentropy""" +666 5 regularizer """no""" +666 5 optimizer """adam""" +666 5 training_loop """lcwa""" +666 5 evaluator """rankbased""" +666 6 dataset """kinships""" +666 6 model """simple""" +666 6 loss """crossentropy""" +666 6 regularizer """no""" +666 6 optimizer """adam""" +666 6 training_loop """lcwa""" +666 6 evaluator """rankbased""" +666 7 dataset """kinships""" +666 7 model """simple""" +666 7 loss """crossentropy""" +666 7 regularizer """no""" +666 7 optimizer """adam""" +666 7 training_loop """lcwa""" +666 7 evaluator """rankbased""" +666 8 dataset """kinships""" +666 8 model """simple""" +666 8 loss """crossentropy""" +666 8 regularizer """no""" +666 8 optimizer """adam""" +666 8 training_loop """lcwa""" +666 8 evaluator """rankbased""" +666 9 dataset """kinships""" +666 9 model """simple""" +666 9 loss """crossentropy""" +666 9 regularizer """no""" +666 9 optimizer """adam""" +666 9 training_loop """lcwa""" +666 9 evaluator """rankbased""" +666 10 dataset """kinships""" +666 10 model """simple""" +666 10 loss """crossentropy""" +666 10 regularizer """no""" +666 10 optimizer """adam""" +666 10 training_loop """lcwa""" +666 10 evaluator """rankbased""" +666 11 dataset """kinships""" +666 11 model """simple""" +666 11 loss """crossentropy""" +666 11 regularizer """no""" +666 11 optimizer """adam""" +666 11 training_loop """lcwa""" +666 11 evaluator """rankbased""" +666 12 dataset """kinships""" +666 12 model """simple""" +666 12 loss """crossentropy""" +666 12 regularizer """no""" +666 12 optimizer """adam""" +666 12 training_loop """lcwa""" +666 12 evaluator """rankbased""" +666 13 dataset """kinships""" +666 13 model """simple""" +666 13 loss """crossentropy""" +666 13 regularizer """no""" +666 13 optimizer """adam""" +666 13 training_loop """lcwa""" +666 13 evaluator """rankbased""" +666 14 dataset """kinships""" +666 14 model """simple""" +666 14 loss """crossentropy""" +666 14 regularizer """no""" +666 14 optimizer """adam""" +666 14 training_loop """lcwa""" +666 14 evaluator """rankbased""" +666 15 dataset """kinships""" +666 15 model """simple""" +666 15 loss """crossentropy""" +666 15 regularizer """no""" +666 15 optimizer """adam""" +666 15 training_loop """lcwa""" +666 15 evaluator """rankbased""" +666 16 dataset """kinships""" +666 16 model """simple""" +666 16 loss """crossentropy""" +666 16 regularizer """no""" +666 16 optimizer """adam""" +666 16 training_loop """lcwa""" +666 16 evaluator """rankbased""" +666 17 dataset """kinships""" +666 17 model """simple""" +666 17 loss """crossentropy""" +666 17 regularizer """no""" +666 17 optimizer """adam""" +666 17 training_loop """lcwa""" +666 17 evaluator """rankbased""" +666 18 dataset """kinships""" +666 18 model """simple""" +666 18 loss """crossentropy""" +666 18 regularizer """no""" +666 18 optimizer """adam""" +666 18 training_loop """lcwa""" +666 18 evaluator """rankbased""" +666 19 dataset """kinships""" +666 19 model """simple""" +666 19 loss """crossentropy""" +666 19 regularizer """no""" +666 19 optimizer """adam""" +666 19 training_loop """lcwa""" +666 19 evaluator """rankbased""" +666 20 dataset """kinships""" +666 20 model """simple""" +666 20 loss """crossentropy""" +666 20 regularizer """no""" +666 20 optimizer """adam""" +666 20 training_loop """lcwa""" +666 20 evaluator """rankbased""" +666 21 dataset """kinships""" +666 21 model """simple""" +666 21 loss """crossentropy""" +666 21 regularizer """no""" +666 21 optimizer """adam""" +666 21 training_loop """lcwa""" +666 21 evaluator """rankbased""" +666 22 dataset """kinships""" +666 22 model """simple""" +666 22 loss """crossentropy""" +666 22 regularizer """no""" +666 22 optimizer """adam""" +666 22 training_loop """lcwa""" +666 22 evaluator """rankbased""" +666 23 dataset """kinships""" +666 23 model """simple""" +666 23 loss """crossentropy""" +666 23 regularizer """no""" +666 23 optimizer """adam""" +666 23 training_loop """lcwa""" +666 23 evaluator """rankbased""" +666 24 dataset """kinships""" +666 24 model """simple""" +666 24 loss """crossentropy""" +666 24 regularizer """no""" +666 24 optimizer """adam""" +666 24 training_loop """lcwa""" +666 24 evaluator """rankbased""" +666 25 dataset """kinships""" +666 25 model """simple""" +666 25 loss """crossentropy""" +666 25 regularizer """no""" +666 25 optimizer """adam""" +666 25 training_loop """lcwa""" +666 25 evaluator """rankbased""" +666 26 dataset """kinships""" +666 26 model """simple""" +666 26 loss """crossentropy""" +666 26 regularizer """no""" +666 26 optimizer """adam""" +666 26 training_loop """lcwa""" +666 26 evaluator """rankbased""" +666 27 dataset """kinships""" +666 27 model """simple""" +666 27 loss """crossentropy""" +666 27 regularizer """no""" +666 27 optimizer """adam""" +666 27 training_loop """lcwa""" +666 27 evaluator """rankbased""" +666 28 dataset """kinships""" +666 28 model """simple""" +666 28 loss """crossentropy""" +666 28 regularizer """no""" +666 28 optimizer """adam""" +666 28 training_loop """lcwa""" +666 28 evaluator """rankbased""" +666 29 dataset """kinships""" +666 29 model """simple""" +666 29 loss """crossentropy""" +666 29 regularizer """no""" +666 29 optimizer """adam""" +666 29 training_loop """lcwa""" +666 29 evaluator """rankbased""" +666 30 dataset """kinships""" +666 30 model """simple""" +666 30 loss """crossentropy""" +666 30 regularizer """no""" +666 30 optimizer """adam""" +666 30 training_loop """lcwa""" +666 30 evaluator """rankbased""" +666 31 dataset """kinships""" +666 31 model """simple""" +666 31 loss """crossentropy""" +666 31 regularizer """no""" +666 31 optimizer """adam""" +666 31 training_loop """lcwa""" +666 31 evaluator """rankbased""" +666 32 dataset """kinships""" +666 32 model """simple""" +666 32 loss """crossentropy""" +666 32 regularizer """no""" +666 32 optimizer """adam""" +666 32 training_loop """lcwa""" +666 32 evaluator """rankbased""" +666 33 dataset """kinships""" +666 33 model """simple""" +666 33 loss """crossentropy""" +666 33 regularizer """no""" +666 33 optimizer """adam""" +666 33 training_loop """lcwa""" +666 33 evaluator """rankbased""" +666 34 dataset """kinships""" +666 34 model """simple""" +666 34 loss """crossentropy""" +666 34 regularizer """no""" +666 34 optimizer """adam""" +666 34 training_loop """lcwa""" +666 34 evaluator """rankbased""" +666 35 dataset """kinships""" +666 35 model """simple""" +666 35 loss """crossentropy""" +666 35 regularizer """no""" +666 35 optimizer """adam""" +666 35 training_loop """lcwa""" +666 35 evaluator """rankbased""" +666 36 dataset """kinships""" +666 36 model """simple""" +666 36 loss """crossentropy""" +666 36 regularizer """no""" +666 36 optimizer """adam""" +666 36 training_loop """lcwa""" +666 36 evaluator """rankbased""" +666 37 dataset """kinships""" +666 37 model """simple""" +666 37 loss """crossentropy""" +666 37 regularizer """no""" +666 37 optimizer """adam""" +666 37 training_loop """lcwa""" +666 37 evaluator """rankbased""" +666 38 dataset """kinships""" +666 38 model """simple""" +666 38 loss """crossentropy""" +666 38 regularizer """no""" +666 38 optimizer """adam""" +666 38 training_loop """lcwa""" +666 38 evaluator """rankbased""" +666 39 dataset """kinships""" +666 39 model """simple""" +666 39 loss """crossentropy""" +666 39 regularizer """no""" +666 39 optimizer """adam""" +666 39 training_loop """lcwa""" +666 39 evaluator """rankbased""" +666 40 dataset """kinships""" +666 40 model """simple""" +666 40 loss """crossentropy""" +666 40 regularizer """no""" +666 40 optimizer """adam""" +666 40 training_loop """lcwa""" +666 40 evaluator """rankbased""" +666 41 dataset """kinships""" +666 41 model """simple""" +666 41 loss """crossentropy""" +666 41 regularizer """no""" +666 41 optimizer """adam""" +666 41 training_loop """lcwa""" +666 41 evaluator """rankbased""" +666 42 dataset """kinships""" +666 42 model """simple""" +666 42 loss """crossentropy""" +666 42 regularizer """no""" +666 42 optimizer """adam""" +666 42 training_loop """lcwa""" +666 42 evaluator """rankbased""" +666 43 dataset """kinships""" +666 43 model """simple""" +666 43 loss """crossentropy""" +666 43 regularizer """no""" +666 43 optimizer """adam""" +666 43 training_loop """lcwa""" +666 43 evaluator """rankbased""" +666 44 dataset """kinships""" +666 44 model """simple""" +666 44 loss """crossentropy""" +666 44 regularizer """no""" +666 44 optimizer """adam""" +666 44 training_loop """lcwa""" +666 44 evaluator """rankbased""" +666 45 dataset """kinships""" +666 45 model """simple""" +666 45 loss """crossentropy""" +666 45 regularizer """no""" +666 45 optimizer """adam""" +666 45 training_loop """lcwa""" +666 45 evaluator """rankbased""" +666 46 dataset """kinships""" +666 46 model """simple""" +666 46 loss """crossentropy""" +666 46 regularizer """no""" +666 46 optimizer """adam""" +666 46 training_loop """lcwa""" +666 46 evaluator """rankbased""" +666 47 dataset """kinships""" +666 47 model """simple""" +666 47 loss """crossentropy""" +666 47 regularizer """no""" +666 47 optimizer """adam""" +666 47 training_loop """lcwa""" +666 47 evaluator """rankbased""" +666 48 dataset """kinships""" +666 48 model """simple""" +666 48 loss """crossentropy""" +666 48 regularizer """no""" +666 48 optimizer """adam""" +666 48 training_loop """lcwa""" +666 48 evaluator """rankbased""" +666 49 dataset """kinships""" +666 49 model """simple""" +666 49 loss """crossentropy""" +666 49 regularizer """no""" +666 49 optimizer """adam""" +666 49 training_loop """lcwa""" +666 49 evaluator """rankbased""" +666 50 dataset """kinships""" +666 50 model """simple""" +666 50 loss """crossentropy""" +666 50 regularizer """no""" +666 50 optimizer """adam""" +666 50 training_loop """lcwa""" +666 50 evaluator """rankbased""" +666 51 dataset """kinships""" +666 51 model """simple""" +666 51 loss """crossentropy""" +666 51 regularizer """no""" +666 51 optimizer """adam""" +666 51 training_loop """lcwa""" +666 51 evaluator """rankbased""" +666 52 dataset """kinships""" +666 52 model """simple""" +666 52 loss """crossentropy""" +666 52 regularizer """no""" +666 52 optimizer """adam""" +666 52 training_loop """lcwa""" +666 52 evaluator """rankbased""" +666 53 dataset """kinships""" +666 53 model """simple""" +666 53 loss """crossentropy""" +666 53 regularizer """no""" +666 53 optimizer """adam""" +666 53 training_loop """lcwa""" +666 53 evaluator """rankbased""" +666 54 dataset """kinships""" +666 54 model """simple""" +666 54 loss """crossentropy""" +666 54 regularizer """no""" +666 54 optimizer """adam""" +666 54 training_loop """lcwa""" +666 54 evaluator """rankbased""" +666 55 dataset """kinships""" +666 55 model """simple""" +666 55 loss """crossentropy""" +666 55 regularizer """no""" +666 55 optimizer """adam""" +666 55 training_loop """lcwa""" +666 55 evaluator """rankbased""" +666 56 dataset """kinships""" +666 56 model """simple""" +666 56 loss """crossentropy""" +666 56 regularizer """no""" +666 56 optimizer """adam""" +666 56 training_loop """lcwa""" +666 56 evaluator """rankbased""" +666 57 dataset """kinships""" +666 57 model """simple""" +666 57 loss """crossentropy""" +666 57 regularizer """no""" +666 57 optimizer """adam""" +666 57 training_loop """lcwa""" +666 57 evaluator """rankbased""" +666 58 dataset """kinships""" +666 58 model """simple""" +666 58 loss """crossentropy""" +666 58 regularizer """no""" +666 58 optimizer """adam""" +666 58 training_loop """lcwa""" +666 58 evaluator """rankbased""" +666 59 dataset """kinships""" +666 59 model """simple""" +666 59 loss """crossentropy""" +666 59 regularizer """no""" +666 59 optimizer """adam""" +666 59 training_loop """lcwa""" +666 59 evaluator """rankbased""" +666 60 dataset """kinships""" +666 60 model """simple""" +666 60 loss """crossentropy""" +666 60 regularizer """no""" +666 60 optimizer """adam""" +666 60 training_loop """lcwa""" +666 60 evaluator """rankbased""" +666 61 dataset """kinships""" +666 61 model """simple""" +666 61 loss """crossentropy""" +666 61 regularizer """no""" +666 61 optimizer """adam""" +666 61 training_loop """lcwa""" +666 61 evaluator """rankbased""" +666 62 dataset """kinships""" +666 62 model """simple""" +666 62 loss """crossentropy""" +666 62 regularizer """no""" +666 62 optimizer """adam""" +666 62 training_loop """lcwa""" +666 62 evaluator """rankbased""" +666 63 dataset """kinships""" +666 63 model """simple""" +666 63 loss """crossentropy""" +666 63 regularizer """no""" +666 63 optimizer """adam""" +666 63 training_loop """lcwa""" +666 63 evaluator """rankbased""" +666 64 dataset """kinships""" +666 64 model """simple""" +666 64 loss """crossentropy""" +666 64 regularizer """no""" +666 64 optimizer """adam""" +666 64 training_loop """lcwa""" +666 64 evaluator """rankbased""" +666 65 dataset """kinships""" +666 65 model """simple""" +666 65 loss """crossentropy""" +666 65 regularizer """no""" +666 65 optimizer """adam""" +666 65 training_loop """lcwa""" +666 65 evaluator """rankbased""" +666 66 dataset """kinships""" +666 66 model """simple""" +666 66 loss """crossentropy""" +666 66 regularizer """no""" +666 66 optimizer """adam""" +666 66 training_loop """lcwa""" +666 66 evaluator """rankbased""" +666 67 dataset """kinships""" +666 67 model """simple""" +666 67 loss """crossentropy""" +666 67 regularizer """no""" +666 67 optimizer """adam""" +666 67 training_loop """lcwa""" +666 67 evaluator """rankbased""" +666 68 dataset """kinships""" +666 68 model """simple""" +666 68 loss """crossentropy""" +666 68 regularizer """no""" +666 68 optimizer """adam""" +666 68 training_loop """lcwa""" +666 68 evaluator """rankbased""" +666 69 dataset """kinships""" +666 69 model """simple""" +666 69 loss """crossentropy""" +666 69 regularizer """no""" +666 69 optimizer """adam""" +666 69 training_loop """lcwa""" +666 69 evaluator """rankbased""" +666 70 dataset """kinships""" +666 70 model """simple""" +666 70 loss """crossentropy""" +666 70 regularizer """no""" +666 70 optimizer """adam""" +666 70 training_loop """lcwa""" +666 70 evaluator """rankbased""" +666 71 dataset """kinships""" +666 71 model """simple""" +666 71 loss """crossentropy""" +666 71 regularizer """no""" +666 71 optimizer """adam""" +666 71 training_loop """lcwa""" +666 71 evaluator """rankbased""" +666 72 dataset """kinships""" +666 72 model """simple""" +666 72 loss """crossentropy""" +666 72 regularizer """no""" +666 72 optimizer """adam""" +666 72 training_loop """lcwa""" +666 72 evaluator """rankbased""" +666 73 dataset """kinships""" +666 73 model """simple""" +666 73 loss """crossentropy""" +666 73 regularizer """no""" +666 73 optimizer """adam""" +666 73 training_loop """lcwa""" +666 73 evaluator """rankbased""" +666 74 dataset """kinships""" +666 74 model """simple""" +666 74 loss """crossentropy""" +666 74 regularizer """no""" +666 74 optimizer """adam""" +666 74 training_loop """lcwa""" +666 74 evaluator """rankbased""" +666 75 dataset """kinships""" +666 75 model """simple""" +666 75 loss """crossentropy""" +666 75 regularizer """no""" +666 75 optimizer """adam""" +666 75 training_loop """lcwa""" +666 75 evaluator """rankbased""" +666 76 dataset """kinships""" +666 76 model """simple""" +666 76 loss """crossentropy""" +666 76 regularizer """no""" +666 76 optimizer """adam""" +666 76 training_loop """lcwa""" +666 76 evaluator """rankbased""" +666 77 dataset """kinships""" +666 77 model """simple""" +666 77 loss """crossentropy""" +666 77 regularizer """no""" +666 77 optimizer """adam""" +666 77 training_loop """lcwa""" +666 77 evaluator """rankbased""" +666 78 dataset """kinships""" +666 78 model """simple""" +666 78 loss """crossentropy""" +666 78 regularizer """no""" +666 78 optimizer """adam""" +666 78 training_loop """lcwa""" +666 78 evaluator """rankbased""" +666 79 dataset """kinships""" +666 79 model """simple""" +666 79 loss """crossentropy""" +666 79 regularizer """no""" +666 79 optimizer """adam""" +666 79 training_loop """lcwa""" +666 79 evaluator """rankbased""" +666 80 dataset """kinships""" +666 80 model """simple""" +666 80 loss """crossentropy""" +666 80 regularizer """no""" +666 80 optimizer """adam""" +666 80 training_loop """lcwa""" +666 80 evaluator """rankbased""" +666 81 dataset """kinships""" +666 81 model """simple""" +666 81 loss """crossentropy""" +666 81 regularizer """no""" +666 81 optimizer """adam""" +666 81 training_loop """lcwa""" +666 81 evaluator """rankbased""" +666 82 dataset """kinships""" +666 82 model """simple""" +666 82 loss """crossentropy""" +666 82 regularizer """no""" +666 82 optimizer """adam""" +666 82 training_loop """lcwa""" +666 82 evaluator """rankbased""" +666 83 dataset """kinships""" +666 83 model """simple""" +666 83 loss """crossentropy""" +666 83 regularizer """no""" +666 83 optimizer """adam""" +666 83 training_loop """lcwa""" +666 83 evaluator """rankbased""" +666 84 dataset """kinships""" +666 84 model """simple""" +666 84 loss """crossentropy""" +666 84 regularizer """no""" +666 84 optimizer """adam""" +666 84 training_loop """lcwa""" +666 84 evaluator """rankbased""" +666 85 dataset """kinships""" +666 85 model """simple""" +666 85 loss """crossentropy""" +666 85 regularizer """no""" +666 85 optimizer """adam""" +666 85 training_loop """lcwa""" +666 85 evaluator """rankbased""" +666 86 dataset """kinships""" +666 86 model """simple""" +666 86 loss """crossentropy""" +666 86 regularizer """no""" +666 86 optimizer """adam""" +666 86 training_loop """lcwa""" +666 86 evaluator """rankbased""" +666 87 dataset """kinships""" +666 87 model """simple""" +666 87 loss """crossentropy""" +666 87 regularizer """no""" +666 87 optimizer """adam""" +666 87 training_loop """lcwa""" +666 87 evaluator """rankbased""" +666 88 dataset """kinships""" +666 88 model """simple""" +666 88 loss """crossentropy""" +666 88 regularizer """no""" +666 88 optimizer """adam""" +666 88 training_loop """lcwa""" +666 88 evaluator """rankbased""" +666 89 dataset """kinships""" +666 89 model """simple""" +666 89 loss """crossentropy""" +666 89 regularizer """no""" +666 89 optimizer """adam""" +666 89 training_loop """lcwa""" +666 89 evaluator """rankbased""" +666 90 dataset """kinships""" +666 90 model """simple""" +666 90 loss """crossentropy""" +666 90 regularizer """no""" +666 90 optimizer """adam""" +666 90 training_loop """lcwa""" +666 90 evaluator """rankbased""" +666 91 dataset """kinships""" +666 91 model """simple""" +666 91 loss """crossentropy""" +666 91 regularizer """no""" +666 91 optimizer """adam""" +666 91 training_loop """lcwa""" +666 91 evaluator """rankbased""" +666 92 dataset """kinships""" +666 92 model """simple""" +666 92 loss """crossentropy""" +666 92 regularizer """no""" +666 92 optimizer """adam""" +666 92 training_loop """lcwa""" +666 92 evaluator """rankbased""" +666 93 dataset """kinships""" +666 93 model """simple""" +666 93 loss """crossentropy""" +666 93 regularizer """no""" +666 93 optimizer """adam""" +666 93 training_loop """lcwa""" +666 93 evaluator """rankbased""" +666 94 dataset """kinships""" +666 94 model """simple""" +666 94 loss """crossentropy""" +666 94 regularizer """no""" +666 94 optimizer """adam""" +666 94 training_loop """lcwa""" +666 94 evaluator """rankbased""" +666 95 dataset """kinships""" +666 95 model """simple""" +666 95 loss """crossentropy""" +666 95 regularizer """no""" +666 95 optimizer """adam""" +666 95 training_loop """lcwa""" +666 95 evaluator """rankbased""" +666 96 dataset """kinships""" +666 96 model """simple""" +666 96 loss """crossentropy""" +666 96 regularizer """no""" +666 96 optimizer """adam""" +666 96 training_loop """lcwa""" +666 96 evaluator """rankbased""" +666 97 dataset """kinships""" +666 97 model """simple""" +666 97 loss """crossentropy""" +666 97 regularizer """no""" +666 97 optimizer """adam""" +666 97 training_loop """lcwa""" +666 97 evaluator """rankbased""" +666 98 dataset """kinships""" +666 98 model """simple""" +666 98 loss """crossentropy""" +666 98 regularizer """no""" +666 98 optimizer """adam""" +666 98 training_loop """lcwa""" +666 98 evaluator """rankbased""" +666 99 dataset """kinships""" +666 99 model """simple""" +666 99 loss """crossentropy""" +666 99 regularizer """no""" +666 99 optimizer """adam""" +666 99 training_loop """lcwa""" +666 99 evaluator """rankbased""" +666 100 dataset """kinships""" +666 100 model """simple""" +666 100 loss """crossentropy""" +666 100 regularizer """no""" +666 100 optimizer """adam""" +666 100 training_loop """lcwa""" +666 100 evaluator """rankbased""" +667 1 model.embedding_dim 0.0 +667 1 loss.margin 2.979334176425704 +667 1 optimizer.lr 0.003491544194548705 +667 1 negative_sampler.num_negs_per_pos 90.0 +667 1 training.batch_size 1.0 +667 2 model.embedding_dim 2.0 +667 2 loss.margin 7.750144126403723 +667 2 optimizer.lr 0.014681115832452761 +667 2 negative_sampler.num_negs_per_pos 83.0 +667 2 training.batch_size 1.0 +667 3 model.embedding_dim 0.0 +667 3 loss.margin 9.609294912209448 +667 3 optimizer.lr 0.003320544149234926 +667 3 negative_sampler.num_negs_per_pos 39.0 +667 3 training.batch_size 0.0 +667 4 model.embedding_dim 0.0 +667 4 loss.margin 2.232491794080119 +667 4 optimizer.lr 0.015064202865421547 +667 4 negative_sampler.num_negs_per_pos 55.0 +667 4 training.batch_size 0.0 +667 5 model.embedding_dim 0.0 +667 5 loss.margin 8.988640829313 +667 5 optimizer.lr 0.0010834766221172602 +667 5 negative_sampler.num_negs_per_pos 45.0 +667 5 training.batch_size 1.0 +667 6 model.embedding_dim 1.0 +667 6 loss.margin 5.6851682334995015 +667 6 optimizer.lr 0.001476509408963532 +667 6 negative_sampler.num_negs_per_pos 12.0 +667 6 training.batch_size 2.0 +667 7 model.embedding_dim 0.0 +667 7 loss.margin 7.871491828261709 +667 7 optimizer.lr 0.05433872799582802 +667 7 negative_sampler.num_negs_per_pos 35.0 +667 7 training.batch_size 1.0 +667 8 model.embedding_dim 1.0 +667 8 loss.margin 6.801696699869476 +667 8 optimizer.lr 0.04007464138762371 +667 8 negative_sampler.num_negs_per_pos 28.0 +667 8 training.batch_size 1.0 +667 9 model.embedding_dim 0.0 +667 9 loss.margin 1.2385982884264461 +667 9 optimizer.lr 0.020280693004972195 +667 9 negative_sampler.num_negs_per_pos 8.0 +667 9 training.batch_size 2.0 +667 10 model.embedding_dim 1.0 +667 10 loss.margin 5.131859981370788 +667 10 optimizer.lr 0.007006299414579548 +667 10 negative_sampler.num_negs_per_pos 26.0 +667 10 training.batch_size 0.0 +667 11 model.embedding_dim 0.0 +667 11 loss.margin 7.0208781330089005 +667 11 optimizer.lr 0.021793326109125197 +667 11 negative_sampler.num_negs_per_pos 22.0 +667 11 training.batch_size 0.0 +667 12 model.embedding_dim 0.0 +667 12 loss.margin 2.049850506843151 +667 12 optimizer.lr 0.05136597521043845 +667 12 negative_sampler.num_negs_per_pos 57.0 +667 12 training.batch_size 1.0 +667 13 model.embedding_dim 0.0 +667 13 loss.margin 1.5582173308043186 +667 13 optimizer.lr 0.0533670203591489 +667 13 negative_sampler.num_negs_per_pos 13.0 +667 13 training.batch_size 1.0 +667 14 model.embedding_dim 1.0 +667 14 loss.margin 8.614570807080915 +667 14 optimizer.lr 0.02964935444997558 +667 14 negative_sampler.num_negs_per_pos 50.0 +667 14 training.batch_size 0.0 +667 15 model.embedding_dim 1.0 +667 15 loss.margin 9.77450635378845 +667 15 optimizer.lr 0.01895667450071372 +667 15 negative_sampler.num_negs_per_pos 30.0 +667 15 training.batch_size 0.0 +667 16 model.embedding_dim 2.0 +667 16 loss.margin 5.124059923659573 +667 16 optimizer.lr 0.020037262372935415 +667 16 negative_sampler.num_negs_per_pos 78.0 +667 16 training.batch_size 0.0 +667 17 model.embedding_dim 2.0 +667 17 loss.margin 2.00850039165579 +667 17 optimizer.lr 0.019699504346725277 +667 17 negative_sampler.num_negs_per_pos 38.0 +667 17 training.batch_size 1.0 +667 18 model.embedding_dim 0.0 +667 18 loss.margin 4.709485919635556 +667 18 optimizer.lr 0.0012438609149123974 +667 18 negative_sampler.num_negs_per_pos 87.0 +667 18 training.batch_size 2.0 +667 19 model.embedding_dim 2.0 +667 19 loss.margin 3.7524026807188795 +667 19 optimizer.lr 0.0017536717772761023 +667 19 negative_sampler.num_negs_per_pos 97.0 +667 19 training.batch_size 2.0 +667 20 model.embedding_dim 1.0 +667 20 loss.margin 7.9026567150887885 +667 20 optimizer.lr 0.07080727030093605 +667 20 negative_sampler.num_negs_per_pos 47.0 +667 20 training.batch_size 2.0 +667 21 model.embedding_dim 1.0 +667 21 loss.margin 3.0520938120784913 +667 21 optimizer.lr 0.0010898183534775103 +667 21 negative_sampler.num_negs_per_pos 69.0 +667 21 training.batch_size 1.0 +667 22 model.embedding_dim 2.0 +667 22 loss.margin 2.3661278313780336 +667 22 optimizer.lr 0.033732953937889346 +667 22 negative_sampler.num_negs_per_pos 82.0 +667 22 training.batch_size 2.0 +667 23 model.embedding_dim 1.0 +667 23 loss.margin 8.332574924859927 +667 23 optimizer.lr 0.0010294537753925577 +667 23 negative_sampler.num_negs_per_pos 49.0 +667 23 training.batch_size 1.0 +667 24 model.embedding_dim 1.0 +667 24 loss.margin 2.093231706170945 +667 24 optimizer.lr 0.012223762743922326 +667 24 negative_sampler.num_negs_per_pos 43.0 +667 24 training.batch_size 1.0 +667 25 model.embedding_dim 2.0 +667 25 loss.margin 4.566816450803112 +667 25 optimizer.lr 0.01784089563007475 +667 25 negative_sampler.num_negs_per_pos 89.0 +667 25 training.batch_size 2.0 +667 26 model.embedding_dim 0.0 +667 26 loss.margin 8.649580673229394 +667 26 optimizer.lr 0.02229532180387908 +667 26 negative_sampler.num_negs_per_pos 7.0 +667 26 training.batch_size 0.0 +667 27 model.embedding_dim 2.0 +667 27 loss.margin 6.657789590768104 +667 27 optimizer.lr 0.0020598151140253694 +667 27 negative_sampler.num_negs_per_pos 50.0 +667 27 training.batch_size 0.0 +667 28 model.embedding_dim 0.0 +667 28 loss.margin 2.2097607957674352 +667 28 optimizer.lr 0.02324151876530222 +667 28 negative_sampler.num_negs_per_pos 76.0 +667 28 training.batch_size 1.0 +667 29 model.embedding_dim 0.0 +667 29 loss.margin 8.68394457945906 +667 29 optimizer.lr 0.006170737298624597 +667 29 negative_sampler.num_negs_per_pos 92.0 +667 29 training.batch_size 2.0 +667 30 model.embedding_dim 2.0 +667 30 loss.margin 2.2141809417903473 +667 30 optimizer.lr 0.039074249395158495 +667 30 negative_sampler.num_negs_per_pos 71.0 +667 30 training.batch_size 2.0 +667 31 model.embedding_dim 2.0 +667 31 loss.margin 1.7218213674888032 +667 31 optimizer.lr 0.022335880947151228 +667 31 negative_sampler.num_negs_per_pos 52.0 +667 31 training.batch_size 0.0 +667 32 model.embedding_dim 1.0 +667 32 loss.margin 4.903005945275675 +667 32 optimizer.lr 0.023248767316555858 +667 32 negative_sampler.num_negs_per_pos 71.0 +667 32 training.batch_size 0.0 +667 33 model.embedding_dim 0.0 +667 33 loss.margin 3.348378233713358 +667 33 optimizer.lr 0.004352443692326154 +667 33 negative_sampler.num_negs_per_pos 81.0 +667 33 training.batch_size 2.0 +667 34 model.embedding_dim 1.0 +667 34 loss.margin 4.297255314148641 +667 34 optimizer.lr 0.07263472312264171 +667 34 negative_sampler.num_negs_per_pos 5.0 +667 34 training.batch_size 0.0 +667 35 model.embedding_dim 0.0 +667 35 loss.margin 9.294428723100138 +667 35 optimizer.lr 0.09354593394799234 +667 35 negative_sampler.num_negs_per_pos 69.0 +667 35 training.batch_size 1.0 +667 36 model.embedding_dim 1.0 +667 36 loss.margin 2.3026767056638695 +667 36 optimizer.lr 0.0010221610677851314 +667 36 negative_sampler.num_negs_per_pos 38.0 +667 36 training.batch_size 2.0 +667 37 model.embedding_dim 1.0 +667 37 loss.margin 6.795671208153111 +667 37 optimizer.lr 0.011011365479002003 +667 37 negative_sampler.num_negs_per_pos 99.0 +667 37 training.batch_size 0.0 +667 38 model.embedding_dim 1.0 +667 38 loss.margin 4.304695472026058 +667 38 optimizer.lr 0.05182270963180878 +667 38 negative_sampler.num_negs_per_pos 7.0 +667 38 training.batch_size 0.0 +667 39 model.embedding_dim 2.0 +667 39 loss.margin 8.759692927923556 +667 39 optimizer.lr 0.004162493937709132 +667 39 negative_sampler.num_negs_per_pos 61.0 +667 39 training.batch_size 0.0 +667 40 model.embedding_dim 0.0 +667 40 loss.margin 7.47670446992916 +667 40 optimizer.lr 0.024447097471758678 +667 40 negative_sampler.num_negs_per_pos 93.0 +667 40 training.batch_size 2.0 +667 41 model.embedding_dim 0.0 +667 41 loss.margin 2.7027786310395356 +667 41 optimizer.lr 0.09476902342580429 +667 41 negative_sampler.num_negs_per_pos 23.0 +667 41 training.batch_size 1.0 +667 42 model.embedding_dim 2.0 +667 42 loss.margin 3.541691129509077 +667 42 optimizer.lr 0.0022608344310670364 +667 42 negative_sampler.num_negs_per_pos 79.0 +667 42 training.batch_size 2.0 +667 43 model.embedding_dim 1.0 +667 43 loss.margin 5.181932695821207 +667 43 optimizer.lr 0.05214767872564329 +667 43 negative_sampler.num_negs_per_pos 6.0 +667 43 training.batch_size 1.0 +667 44 model.embedding_dim 2.0 +667 44 loss.margin 4.972140549261576 +667 44 optimizer.lr 0.0034402064916513536 +667 44 negative_sampler.num_negs_per_pos 71.0 +667 44 training.batch_size 2.0 +667 45 model.embedding_dim 2.0 +667 45 loss.margin 3.1725005480491766 +667 45 optimizer.lr 0.002900332914426083 +667 45 negative_sampler.num_negs_per_pos 79.0 +667 45 training.batch_size 1.0 +667 46 model.embedding_dim 2.0 +667 46 loss.margin 9.42043010234201 +667 46 optimizer.lr 0.0029474024147835357 +667 46 negative_sampler.num_negs_per_pos 8.0 +667 46 training.batch_size 0.0 +667 47 model.embedding_dim 2.0 +667 47 loss.margin 7.343094510409484 +667 47 optimizer.lr 0.04398934658705054 +667 47 negative_sampler.num_negs_per_pos 68.0 +667 47 training.batch_size 1.0 +667 48 model.embedding_dim 2.0 +667 48 loss.margin 3.5860619887015384 +667 48 optimizer.lr 0.008970127915529093 +667 48 negative_sampler.num_negs_per_pos 52.0 +667 48 training.batch_size 1.0 +667 49 model.embedding_dim 1.0 +667 49 loss.margin 9.073232082403184 +667 49 optimizer.lr 0.0053012023118916615 +667 49 negative_sampler.num_negs_per_pos 60.0 +667 49 training.batch_size 0.0 +667 50 model.embedding_dim 1.0 +667 50 loss.margin 3.717119264029581 +667 50 optimizer.lr 0.0013705861217584185 +667 50 negative_sampler.num_negs_per_pos 68.0 +667 50 training.batch_size 0.0 +667 51 model.embedding_dim 2.0 +667 51 loss.margin 7.542605466686879 +667 51 optimizer.lr 0.0014194748612023711 +667 51 negative_sampler.num_negs_per_pos 78.0 +667 51 training.batch_size 0.0 +667 52 model.embedding_dim 2.0 +667 52 loss.margin 9.75548520705933 +667 52 optimizer.lr 0.0766358404568666 +667 52 negative_sampler.num_negs_per_pos 80.0 +667 52 training.batch_size 0.0 +667 53 model.embedding_dim 0.0 +667 53 loss.margin 2.5422463817067116 +667 53 optimizer.lr 0.02498211600229209 +667 53 negative_sampler.num_negs_per_pos 85.0 +667 53 training.batch_size 1.0 +667 54 model.embedding_dim 2.0 +667 54 loss.margin 4.185839186153862 +667 54 optimizer.lr 0.01415881890615893 +667 54 negative_sampler.num_negs_per_pos 85.0 +667 54 training.batch_size 0.0 +667 55 model.embedding_dim 1.0 +667 55 loss.margin 8.544125124233494 +667 55 optimizer.lr 0.04896345378822568 +667 55 negative_sampler.num_negs_per_pos 79.0 +667 55 training.batch_size 0.0 +667 56 model.embedding_dim 2.0 +667 56 loss.margin 9.498979965880585 +667 56 optimizer.lr 0.0028816837235975143 +667 56 negative_sampler.num_negs_per_pos 41.0 +667 56 training.batch_size 2.0 +667 57 model.embedding_dim 1.0 +667 57 loss.margin 8.454411551002858 +667 57 optimizer.lr 0.017932803566160605 +667 57 negative_sampler.num_negs_per_pos 70.0 +667 57 training.batch_size 0.0 +667 58 model.embedding_dim 1.0 +667 58 loss.margin 5.475222084627463 +667 58 optimizer.lr 0.009375072165707064 +667 58 negative_sampler.num_negs_per_pos 1.0 +667 58 training.batch_size 1.0 +667 59 model.embedding_dim 2.0 +667 59 loss.margin 6.481402793258362 +667 59 optimizer.lr 0.0017324633217652926 +667 59 negative_sampler.num_negs_per_pos 64.0 +667 59 training.batch_size 1.0 +667 60 model.embedding_dim 0.0 +667 60 loss.margin 7.408831753500546 +667 60 optimizer.lr 0.005772922690731802 +667 60 negative_sampler.num_negs_per_pos 1.0 +667 60 training.batch_size 1.0 +667 61 model.embedding_dim 2.0 +667 61 loss.margin 5.540406155131912 +667 61 optimizer.lr 0.02737295153039117 +667 61 negative_sampler.num_negs_per_pos 33.0 +667 61 training.batch_size 1.0 +667 62 model.embedding_dim 2.0 +667 62 loss.margin 3.7140962182886805 +667 62 optimizer.lr 0.010362579136632192 +667 62 negative_sampler.num_negs_per_pos 62.0 +667 62 training.batch_size 0.0 +667 63 model.embedding_dim 1.0 +667 63 loss.margin 7.476731371891258 +667 63 optimizer.lr 0.002368225583306485 +667 63 negative_sampler.num_negs_per_pos 98.0 +667 63 training.batch_size 1.0 +667 64 model.embedding_dim 1.0 +667 64 loss.margin 0.570623497528465 +667 64 optimizer.lr 0.0035375875621538513 +667 64 negative_sampler.num_negs_per_pos 79.0 +667 64 training.batch_size 2.0 +667 65 model.embedding_dim 1.0 +667 65 loss.margin 9.072700542861242 +667 65 optimizer.lr 0.0013775033436388042 +667 65 negative_sampler.num_negs_per_pos 96.0 +667 65 training.batch_size 1.0 +667 66 model.embedding_dim 1.0 +667 66 loss.margin 5.529384311224731 +667 66 optimizer.lr 0.00502135490764615 +667 66 negative_sampler.num_negs_per_pos 52.0 +667 66 training.batch_size 1.0 +667 67 model.embedding_dim 0.0 +667 67 loss.margin 5.7484608562884505 +667 67 optimizer.lr 0.0030106268683523845 +667 67 negative_sampler.num_negs_per_pos 79.0 +667 67 training.batch_size 1.0 +667 68 model.embedding_dim 2.0 +667 68 loss.margin 1.7004945996187613 +667 68 optimizer.lr 0.06190427184848519 +667 68 negative_sampler.num_negs_per_pos 86.0 +667 68 training.batch_size 0.0 +667 69 model.embedding_dim 0.0 +667 69 loss.margin 6.427530520612555 +667 69 optimizer.lr 0.0132687440288315 +667 69 negative_sampler.num_negs_per_pos 53.0 +667 69 training.batch_size 2.0 +667 70 model.embedding_dim 2.0 +667 70 loss.margin 6.723560719403405 +667 70 optimizer.lr 0.01514991208204796 +667 70 negative_sampler.num_negs_per_pos 78.0 +667 70 training.batch_size 2.0 +667 71 model.embedding_dim 1.0 +667 71 loss.margin 6.681740560740756 +667 71 optimizer.lr 0.006833232620702733 +667 71 negative_sampler.num_negs_per_pos 21.0 +667 71 training.batch_size 0.0 +667 72 model.embedding_dim 2.0 +667 72 loss.margin 2.417326131799891 +667 72 optimizer.lr 0.07398601769158447 +667 72 negative_sampler.num_negs_per_pos 40.0 +667 72 training.batch_size 0.0 +667 73 model.embedding_dim 2.0 +667 73 loss.margin 7.825232302379031 +667 73 optimizer.lr 0.04163585868303528 +667 73 negative_sampler.num_negs_per_pos 2.0 +667 73 training.batch_size 0.0 +667 74 model.embedding_dim 2.0 +667 74 loss.margin 2.542923961989741 +667 74 optimizer.lr 0.008968569110918381 +667 74 negative_sampler.num_negs_per_pos 70.0 +667 74 training.batch_size 2.0 +667 75 model.embedding_dim 1.0 +667 75 loss.margin 4.094886730939635 +667 75 optimizer.lr 0.004357347245738158 +667 75 negative_sampler.num_negs_per_pos 49.0 +667 75 training.batch_size 1.0 +667 76 model.embedding_dim 0.0 +667 76 loss.margin 5.558951464940945 +667 76 optimizer.lr 0.010074315371137661 +667 76 negative_sampler.num_negs_per_pos 76.0 +667 76 training.batch_size 2.0 +667 77 model.embedding_dim 0.0 +667 77 loss.margin 6.619517155785756 +667 77 optimizer.lr 0.020182532246683843 +667 77 negative_sampler.num_negs_per_pos 46.0 +667 77 training.batch_size 1.0 +667 78 model.embedding_dim 2.0 +667 78 loss.margin 7.999499620794534 +667 78 optimizer.lr 0.0016234288916357844 +667 78 negative_sampler.num_negs_per_pos 50.0 +667 78 training.batch_size 0.0 +667 79 model.embedding_dim 0.0 +667 79 loss.margin 4.61669584299681 +667 79 optimizer.lr 0.0017299761526918605 +667 79 negative_sampler.num_negs_per_pos 29.0 +667 79 training.batch_size 0.0 +667 80 model.embedding_dim 1.0 +667 80 loss.margin 2.8430329140037305 +667 80 optimizer.lr 0.019605906128942127 +667 80 negative_sampler.num_negs_per_pos 27.0 +667 80 training.batch_size 1.0 +667 81 model.embedding_dim 0.0 +667 81 loss.margin 4.2995469686203345 +667 81 optimizer.lr 0.07641071419998575 +667 81 negative_sampler.num_negs_per_pos 5.0 +667 81 training.batch_size 1.0 +667 82 model.embedding_dim 0.0 +667 82 loss.margin 6.257014436264052 +667 82 optimizer.lr 0.002459600952257137 +667 82 negative_sampler.num_negs_per_pos 62.0 +667 82 training.batch_size 1.0 +667 83 model.embedding_dim 1.0 +667 83 loss.margin 2.0099621588993797 +667 83 optimizer.lr 0.005461993748069968 +667 83 negative_sampler.num_negs_per_pos 1.0 +667 83 training.batch_size 1.0 +667 84 model.embedding_dim 0.0 +667 84 loss.margin 4.893839977833302 +667 84 optimizer.lr 0.0051227454659775705 +667 84 negative_sampler.num_negs_per_pos 61.0 +667 84 training.batch_size 2.0 +667 85 model.embedding_dim 2.0 +667 85 loss.margin 8.749525241017395 +667 85 optimizer.lr 0.053422417756659554 +667 85 negative_sampler.num_negs_per_pos 0.0 +667 85 training.batch_size 0.0 +667 86 model.embedding_dim 0.0 +667 86 loss.margin 9.60814560340811 +667 86 optimizer.lr 0.09658689836786798 +667 86 negative_sampler.num_negs_per_pos 41.0 +667 86 training.batch_size 0.0 +667 87 model.embedding_dim 1.0 +667 87 loss.margin 3.7895213435279302 +667 87 optimizer.lr 0.020925653241628206 +667 87 negative_sampler.num_negs_per_pos 95.0 +667 87 training.batch_size 2.0 +667 88 model.embedding_dim 0.0 +667 88 loss.margin 3.275210068648726 +667 88 optimizer.lr 0.001321143951383902 +667 88 negative_sampler.num_negs_per_pos 48.0 +667 88 training.batch_size 0.0 +667 89 model.embedding_dim 2.0 +667 89 loss.margin 6.581470220897955 +667 89 optimizer.lr 0.004504974546267837 +667 89 negative_sampler.num_negs_per_pos 63.0 +667 89 training.batch_size 0.0 +667 90 model.embedding_dim 0.0 +667 90 loss.margin 4.391767902832838 +667 90 optimizer.lr 0.0024110895233180876 +667 90 negative_sampler.num_negs_per_pos 70.0 +667 90 training.batch_size 2.0 +667 91 model.embedding_dim 1.0 +667 91 loss.margin 9.776610383588878 +667 91 optimizer.lr 0.019431045151959366 +667 91 negative_sampler.num_negs_per_pos 27.0 +667 91 training.batch_size 2.0 +667 92 model.embedding_dim 2.0 +667 92 loss.margin 0.8871857952943474 +667 92 optimizer.lr 0.04794617305140818 +667 92 negative_sampler.num_negs_per_pos 73.0 +667 92 training.batch_size 0.0 +667 93 model.embedding_dim 0.0 +667 93 loss.margin 9.58272966303907 +667 93 optimizer.lr 0.05557395817397699 +667 93 negative_sampler.num_negs_per_pos 54.0 +667 93 training.batch_size 0.0 +667 94 model.embedding_dim 1.0 +667 94 loss.margin 7.64543132222378 +667 94 optimizer.lr 0.0016853574988613014 +667 94 negative_sampler.num_negs_per_pos 48.0 +667 94 training.batch_size 0.0 +667 95 model.embedding_dim 2.0 +667 95 loss.margin 1.7232584435097063 +667 95 optimizer.lr 0.07335032904048955 +667 95 negative_sampler.num_negs_per_pos 87.0 +667 95 training.batch_size 2.0 +667 96 model.embedding_dim 2.0 +667 96 loss.margin 8.660521437340007 +667 96 optimizer.lr 0.05074994869745188 +667 96 negative_sampler.num_negs_per_pos 66.0 +667 96 training.batch_size 0.0 +667 97 model.embedding_dim 0.0 +667 97 loss.margin 2.2702551083774427 +667 97 optimizer.lr 0.004768145900269125 +667 97 negative_sampler.num_negs_per_pos 13.0 +667 97 training.batch_size 2.0 +667 98 model.embedding_dim 0.0 +667 98 loss.margin 2.5424457117983534 +667 98 optimizer.lr 0.07070441132745808 +667 98 negative_sampler.num_negs_per_pos 88.0 +667 98 training.batch_size 1.0 +667 99 model.embedding_dim 0.0 +667 99 loss.margin 2.8604662724479537 +667 99 optimizer.lr 0.00245004990643958 +667 99 negative_sampler.num_negs_per_pos 87.0 +667 99 training.batch_size 1.0 +667 100 model.embedding_dim 1.0 +667 100 loss.margin 5.872176788252439 +667 100 optimizer.lr 0.022016629544530018 +667 100 negative_sampler.num_negs_per_pos 35.0 +667 100 training.batch_size 1.0 +667 1 dataset """kinships""" +667 1 model """simple""" +667 1 loss """marginranking""" +667 1 regularizer """no""" +667 1 optimizer """adam""" +667 1 training_loop """owa""" +667 1 negative_sampler """basic""" +667 1 evaluator """rankbased""" +667 2 dataset """kinships""" +667 2 model """simple""" +667 2 loss """marginranking""" +667 2 regularizer """no""" +667 2 optimizer """adam""" +667 2 training_loop """owa""" +667 2 negative_sampler """basic""" +667 2 evaluator """rankbased""" +667 3 dataset """kinships""" +667 3 model """simple""" +667 3 loss """marginranking""" +667 3 regularizer """no""" +667 3 optimizer """adam""" +667 3 training_loop """owa""" +667 3 negative_sampler """basic""" +667 3 evaluator """rankbased""" +667 4 dataset """kinships""" +667 4 model """simple""" +667 4 loss """marginranking""" +667 4 regularizer """no""" +667 4 optimizer """adam""" +667 4 training_loop """owa""" +667 4 negative_sampler """basic""" +667 4 evaluator """rankbased""" +667 5 dataset """kinships""" +667 5 model """simple""" +667 5 loss """marginranking""" +667 5 regularizer """no""" +667 5 optimizer """adam""" +667 5 training_loop """owa""" +667 5 negative_sampler """basic""" +667 5 evaluator """rankbased""" +667 6 dataset """kinships""" +667 6 model """simple""" +667 6 loss """marginranking""" +667 6 regularizer """no""" +667 6 optimizer """adam""" +667 6 training_loop """owa""" +667 6 negative_sampler """basic""" +667 6 evaluator """rankbased""" +667 7 dataset """kinships""" +667 7 model """simple""" +667 7 loss """marginranking""" +667 7 regularizer """no""" +667 7 optimizer """adam""" +667 7 training_loop """owa""" +667 7 negative_sampler """basic""" +667 7 evaluator """rankbased""" +667 8 dataset """kinships""" +667 8 model """simple""" +667 8 loss """marginranking""" +667 8 regularizer """no""" +667 8 optimizer """adam""" +667 8 training_loop """owa""" +667 8 negative_sampler """basic""" +667 8 evaluator """rankbased""" +667 9 dataset """kinships""" +667 9 model """simple""" +667 9 loss """marginranking""" +667 9 regularizer """no""" +667 9 optimizer """adam""" +667 9 training_loop """owa""" +667 9 negative_sampler """basic""" +667 9 evaluator """rankbased""" +667 10 dataset """kinships""" +667 10 model """simple""" +667 10 loss """marginranking""" +667 10 regularizer """no""" +667 10 optimizer """adam""" +667 10 training_loop """owa""" +667 10 negative_sampler """basic""" +667 10 evaluator """rankbased""" +667 11 dataset """kinships""" +667 11 model """simple""" +667 11 loss """marginranking""" +667 11 regularizer """no""" +667 11 optimizer """adam""" +667 11 training_loop """owa""" +667 11 negative_sampler """basic""" +667 11 evaluator """rankbased""" +667 12 dataset """kinships""" +667 12 model """simple""" +667 12 loss """marginranking""" +667 12 regularizer """no""" +667 12 optimizer """adam""" +667 12 training_loop """owa""" +667 12 negative_sampler """basic""" +667 12 evaluator """rankbased""" +667 13 dataset """kinships""" +667 13 model """simple""" +667 13 loss """marginranking""" +667 13 regularizer """no""" +667 13 optimizer """adam""" +667 13 training_loop """owa""" +667 13 negative_sampler """basic""" +667 13 evaluator """rankbased""" +667 14 dataset """kinships""" +667 14 model """simple""" +667 14 loss """marginranking""" +667 14 regularizer """no""" +667 14 optimizer """adam""" +667 14 training_loop """owa""" +667 14 negative_sampler """basic""" +667 14 evaluator """rankbased""" +667 15 dataset """kinships""" +667 15 model """simple""" +667 15 loss """marginranking""" +667 15 regularizer """no""" +667 15 optimizer """adam""" +667 15 training_loop """owa""" +667 15 negative_sampler """basic""" +667 15 evaluator """rankbased""" +667 16 dataset """kinships""" +667 16 model """simple""" +667 16 loss """marginranking""" +667 16 regularizer """no""" +667 16 optimizer """adam""" +667 16 training_loop """owa""" +667 16 negative_sampler """basic""" +667 16 evaluator """rankbased""" +667 17 dataset """kinships""" +667 17 model """simple""" +667 17 loss """marginranking""" +667 17 regularizer """no""" +667 17 optimizer """adam""" +667 17 training_loop """owa""" +667 17 negative_sampler """basic""" +667 17 evaluator """rankbased""" +667 18 dataset """kinships""" +667 18 model """simple""" +667 18 loss """marginranking""" +667 18 regularizer """no""" +667 18 optimizer """adam""" +667 18 training_loop """owa""" +667 18 negative_sampler """basic""" +667 18 evaluator """rankbased""" +667 19 dataset """kinships""" +667 19 model """simple""" +667 19 loss """marginranking""" +667 19 regularizer """no""" +667 19 optimizer """adam""" +667 19 training_loop """owa""" +667 19 negative_sampler """basic""" +667 19 evaluator """rankbased""" +667 20 dataset """kinships""" +667 20 model """simple""" +667 20 loss """marginranking""" +667 20 regularizer """no""" +667 20 optimizer """adam""" +667 20 training_loop """owa""" +667 20 negative_sampler """basic""" +667 20 evaluator """rankbased""" +667 21 dataset """kinships""" +667 21 model """simple""" +667 21 loss """marginranking""" +667 21 regularizer """no""" +667 21 optimizer """adam""" +667 21 training_loop """owa""" +667 21 negative_sampler """basic""" +667 21 evaluator """rankbased""" +667 22 dataset """kinships""" +667 22 model """simple""" +667 22 loss """marginranking""" +667 22 regularizer """no""" +667 22 optimizer """adam""" +667 22 training_loop """owa""" +667 22 negative_sampler """basic""" +667 22 evaluator """rankbased""" +667 23 dataset """kinships""" +667 23 model """simple""" +667 23 loss """marginranking""" +667 23 regularizer """no""" +667 23 optimizer """adam""" +667 23 training_loop """owa""" +667 23 negative_sampler """basic""" +667 23 evaluator """rankbased""" +667 24 dataset """kinships""" +667 24 model """simple""" +667 24 loss """marginranking""" +667 24 regularizer """no""" +667 24 optimizer """adam""" +667 24 training_loop """owa""" +667 24 negative_sampler """basic""" +667 24 evaluator """rankbased""" +667 25 dataset """kinships""" +667 25 model """simple""" +667 25 loss """marginranking""" +667 25 regularizer """no""" +667 25 optimizer """adam""" +667 25 training_loop """owa""" +667 25 negative_sampler """basic""" +667 25 evaluator """rankbased""" +667 26 dataset """kinships""" +667 26 model """simple""" +667 26 loss """marginranking""" +667 26 regularizer """no""" +667 26 optimizer """adam""" +667 26 training_loop """owa""" +667 26 negative_sampler """basic""" +667 26 evaluator """rankbased""" +667 27 dataset """kinships""" +667 27 model """simple""" +667 27 loss """marginranking""" +667 27 regularizer """no""" +667 27 optimizer """adam""" +667 27 training_loop """owa""" +667 27 negative_sampler """basic""" +667 27 evaluator """rankbased""" +667 28 dataset """kinships""" +667 28 model """simple""" +667 28 loss """marginranking""" +667 28 regularizer """no""" +667 28 optimizer """adam""" +667 28 training_loop """owa""" +667 28 negative_sampler """basic""" +667 28 evaluator """rankbased""" +667 29 dataset """kinships""" +667 29 model """simple""" +667 29 loss """marginranking""" +667 29 regularizer """no""" +667 29 optimizer """adam""" +667 29 training_loop """owa""" +667 29 negative_sampler """basic""" +667 29 evaluator """rankbased""" +667 30 dataset """kinships""" +667 30 model """simple""" +667 30 loss """marginranking""" +667 30 regularizer """no""" +667 30 optimizer """adam""" +667 30 training_loop """owa""" +667 30 negative_sampler """basic""" +667 30 evaluator """rankbased""" +667 31 dataset """kinships""" +667 31 model """simple""" +667 31 loss """marginranking""" +667 31 regularizer """no""" +667 31 optimizer """adam""" +667 31 training_loop """owa""" +667 31 negative_sampler """basic""" +667 31 evaluator """rankbased""" +667 32 dataset """kinships""" +667 32 model """simple""" +667 32 loss """marginranking""" +667 32 regularizer """no""" +667 32 optimizer """adam""" +667 32 training_loop """owa""" +667 32 negative_sampler """basic""" +667 32 evaluator """rankbased""" +667 33 dataset """kinships""" +667 33 model """simple""" +667 33 loss """marginranking""" +667 33 regularizer """no""" +667 33 optimizer """adam""" +667 33 training_loop """owa""" +667 33 negative_sampler """basic""" +667 33 evaluator """rankbased""" +667 34 dataset """kinships""" +667 34 model """simple""" +667 34 loss """marginranking""" +667 34 regularizer """no""" +667 34 optimizer """adam""" +667 34 training_loop """owa""" +667 34 negative_sampler """basic""" +667 34 evaluator """rankbased""" +667 35 dataset """kinships""" +667 35 model """simple""" +667 35 loss """marginranking""" +667 35 regularizer """no""" +667 35 optimizer """adam""" +667 35 training_loop """owa""" +667 35 negative_sampler """basic""" +667 35 evaluator """rankbased""" +667 36 dataset """kinships""" +667 36 model """simple""" +667 36 loss """marginranking""" +667 36 regularizer """no""" +667 36 optimizer """adam""" +667 36 training_loop """owa""" +667 36 negative_sampler """basic""" +667 36 evaluator """rankbased""" +667 37 dataset """kinships""" +667 37 model """simple""" +667 37 loss """marginranking""" +667 37 regularizer """no""" +667 37 optimizer """adam""" +667 37 training_loop """owa""" +667 37 negative_sampler """basic""" +667 37 evaluator """rankbased""" +667 38 dataset """kinships""" +667 38 model """simple""" +667 38 loss """marginranking""" +667 38 regularizer """no""" +667 38 optimizer """adam""" +667 38 training_loop """owa""" +667 38 negative_sampler """basic""" +667 38 evaluator """rankbased""" +667 39 dataset """kinships""" +667 39 model """simple""" +667 39 loss """marginranking""" +667 39 regularizer """no""" +667 39 optimizer """adam""" +667 39 training_loop """owa""" +667 39 negative_sampler """basic""" +667 39 evaluator """rankbased""" +667 40 dataset """kinships""" +667 40 model """simple""" +667 40 loss """marginranking""" +667 40 regularizer """no""" +667 40 optimizer """adam""" +667 40 training_loop """owa""" +667 40 negative_sampler """basic""" +667 40 evaluator """rankbased""" +667 41 dataset """kinships""" +667 41 model """simple""" +667 41 loss """marginranking""" +667 41 regularizer """no""" +667 41 optimizer """adam""" +667 41 training_loop """owa""" +667 41 negative_sampler """basic""" +667 41 evaluator """rankbased""" +667 42 dataset """kinships""" +667 42 model """simple""" +667 42 loss """marginranking""" +667 42 regularizer """no""" +667 42 optimizer """adam""" +667 42 training_loop """owa""" +667 42 negative_sampler """basic""" +667 42 evaluator """rankbased""" +667 43 dataset """kinships""" +667 43 model """simple""" +667 43 loss """marginranking""" +667 43 regularizer """no""" +667 43 optimizer """adam""" +667 43 training_loop """owa""" +667 43 negative_sampler """basic""" +667 43 evaluator """rankbased""" +667 44 dataset """kinships""" +667 44 model """simple""" +667 44 loss """marginranking""" +667 44 regularizer """no""" +667 44 optimizer """adam""" +667 44 training_loop """owa""" +667 44 negative_sampler """basic""" +667 44 evaluator """rankbased""" +667 45 dataset """kinships""" +667 45 model """simple""" +667 45 loss """marginranking""" +667 45 regularizer """no""" +667 45 optimizer """adam""" +667 45 training_loop """owa""" +667 45 negative_sampler """basic""" +667 45 evaluator """rankbased""" +667 46 dataset """kinships""" +667 46 model """simple""" +667 46 loss """marginranking""" +667 46 regularizer """no""" +667 46 optimizer """adam""" +667 46 training_loop """owa""" +667 46 negative_sampler """basic""" +667 46 evaluator """rankbased""" +667 47 dataset """kinships""" +667 47 model """simple""" +667 47 loss """marginranking""" +667 47 regularizer """no""" +667 47 optimizer """adam""" +667 47 training_loop """owa""" +667 47 negative_sampler """basic""" +667 47 evaluator """rankbased""" +667 48 dataset """kinships""" +667 48 model """simple""" +667 48 loss """marginranking""" +667 48 regularizer """no""" +667 48 optimizer """adam""" +667 48 training_loop """owa""" +667 48 negative_sampler """basic""" +667 48 evaluator """rankbased""" +667 49 dataset """kinships""" +667 49 model """simple""" +667 49 loss """marginranking""" +667 49 regularizer """no""" +667 49 optimizer """adam""" +667 49 training_loop """owa""" +667 49 negative_sampler """basic""" +667 49 evaluator """rankbased""" +667 50 dataset """kinships""" +667 50 model """simple""" +667 50 loss """marginranking""" +667 50 regularizer """no""" +667 50 optimizer """adam""" +667 50 training_loop """owa""" +667 50 negative_sampler """basic""" +667 50 evaluator """rankbased""" +667 51 dataset """kinships""" +667 51 model """simple""" +667 51 loss """marginranking""" +667 51 regularizer """no""" +667 51 optimizer """adam""" +667 51 training_loop """owa""" +667 51 negative_sampler """basic""" +667 51 evaluator """rankbased""" +667 52 dataset """kinships""" +667 52 model """simple""" +667 52 loss """marginranking""" +667 52 regularizer """no""" +667 52 optimizer """adam""" +667 52 training_loop """owa""" +667 52 negative_sampler """basic""" +667 52 evaluator """rankbased""" +667 53 dataset """kinships""" +667 53 model """simple""" +667 53 loss """marginranking""" +667 53 regularizer """no""" +667 53 optimizer """adam""" +667 53 training_loop """owa""" +667 53 negative_sampler """basic""" +667 53 evaluator """rankbased""" +667 54 dataset """kinships""" +667 54 model """simple""" +667 54 loss """marginranking""" +667 54 regularizer """no""" +667 54 optimizer """adam""" +667 54 training_loop """owa""" +667 54 negative_sampler """basic""" +667 54 evaluator """rankbased""" +667 55 dataset """kinships""" +667 55 model """simple""" +667 55 loss """marginranking""" +667 55 regularizer """no""" +667 55 optimizer """adam""" +667 55 training_loop """owa""" +667 55 negative_sampler """basic""" +667 55 evaluator """rankbased""" +667 56 dataset """kinships""" +667 56 model """simple""" +667 56 loss """marginranking""" +667 56 regularizer """no""" +667 56 optimizer """adam""" +667 56 training_loop """owa""" +667 56 negative_sampler """basic""" +667 56 evaluator """rankbased""" +667 57 dataset """kinships""" +667 57 model """simple""" +667 57 loss """marginranking""" +667 57 regularizer """no""" +667 57 optimizer """adam""" +667 57 training_loop """owa""" +667 57 negative_sampler """basic""" +667 57 evaluator """rankbased""" +667 58 dataset """kinships""" +667 58 model """simple""" +667 58 loss """marginranking""" +667 58 regularizer """no""" +667 58 optimizer """adam""" +667 58 training_loop """owa""" +667 58 negative_sampler """basic""" +667 58 evaluator """rankbased""" +667 59 dataset """kinships""" +667 59 model """simple""" +667 59 loss """marginranking""" +667 59 regularizer """no""" +667 59 optimizer """adam""" +667 59 training_loop """owa""" +667 59 negative_sampler """basic""" +667 59 evaluator """rankbased""" +667 60 dataset """kinships""" +667 60 model """simple""" +667 60 loss """marginranking""" +667 60 regularizer """no""" +667 60 optimizer """adam""" +667 60 training_loop """owa""" +667 60 negative_sampler """basic""" +667 60 evaluator """rankbased""" +667 61 dataset """kinships""" +667 61 model """simple""" +667 61 loss """marginranking""" +667 61 regularizer """no""" +667 61 optimizer """adam""" +667 61 training_loop """owa""" +667 61 negative_sampler """basic""" +667 61 evaluator """rankbased""" +667 62 dataset """kinships""" +667 62 model """simple""" +667 62 loss """marginranking""" +667 62 regularizer """no""" +667 62 optimizer """adam""" +667 62 training_loop """owa""" +667 62 negative_sampler """basic""" +667 62 evaluator """rankbased""" +667 63 dataset """kinships""" +667 63 model """simple""" +667 63 loss """marginranking""" +667 63 regularizer """no""" +667 63 optimizer """adam""" +667 63 training_loop """owa""" +667 63 negative_sampler """basic""" +667 63 evaluator """rankbased""" +667 64 dataset """kinships""" +667 64 model """simple""" +667 64 loss """marginranking""" +667 64 regularizer """no""" +667 64 optimizer """adam""" +667 64 training_loop """owa""" +667 64 negative_sampler """basic""" +667 64 evaluator """rankbased""" +667 65 dataset """kinships""" +667 65 model """simple""" +667 65 loss """marginranking""" +667 65 regularizer """no""" +667 65 optimizer """adam""" +667 65 training_loop """owa""" +667 65 negative_sampler """basic""" +667 65 evaluator """rankbased""" +667 66 dataset """kinships""" +667 66 model """simple""" +667 66 loss """marginranking""" +667 66 regularizer """no""" +667 66 optimizer """adam""" +667 66 training_loop """owa""" +667 66 negative_sampler """basic""" +667 66 evaluator """rankbased""" +667 67 dataset """kinships""" +667 67 model """simple""" +667 67 loss """marginranking""" +667 67 regularizer """no""" +667 67 optimizer """adam""" +667 67 training_loop """owa""" +667 67 negative_sampler """basic""" +667 67 evaluator """rankbased""" +667 68 dataset """kinships""" +667 68 model """simple""" +667 68 loss """marginranking""" +667 68 regularizer """no""" +667 68 optimizer """adam""" +667 68 training_loop """owa""" +667 68 negative_sampler """basic""" +667 68 evaluator """rankbased""" +667 69 dataset """kinships""" +667 69 model """simple""" +667 69 loss """marginranking""" +667 69 regularizer """no""" +667 69 optimizer """adam""" +667 69 training_loop """owa""" +667 69 negative_sampler """basic""" +667 69 evaluator """rankbased""" +667 70 dataset """kinships""" +667 70 model """simple""" +667 70 loss """marginranking""" +667 70 regularizer """no""" +667 70 optimizer """adam""" +667 70 training_loop """owa""" +667 70 negative_sampler """basic""" +667 70 evaluator """rankbased""" +667 71 dataset """kinships""" +667 71 model """simple""" +667 71 loss """marginranking""" +667 71 regularizer """no""" +667 71 optimizer """adam""" +667 71 training_loop """owa""" +667 71 negative_sampler """basic""" +667 71 evaluator """rankbased""" +667 72 dataset """kinships""" +667 72 model """simple""" +667 72 loss """marginranking""" +667 72 regularizer """no""" +667 72 optimizer """adam""" +667 72 training_loop """owa""" +667 72 negative_sampler """basic""" +667 72 evaluator """rankbased""" +667 73 dataset """kinships""" +667 73 model """simple""" +667 73 loss """marginranking""" +667 73 regularizer """no""" +667 73 optimizer """adam""" +667 73 training_loop """owa""" +667 73 negative_sampler """basic""" +667 73 evaluator """rankbased""" +667 74 dataset """kinships""" +667 74 model """simple""" +667 74 loss """marginranking""" +667 74 regularizer """no""" +667 74 optimizer """adam""" +667 74 training_loop """owa""" +667 74 negative_sampler """basic""" +667 74 evaluator """rankbased""" +667 75 dataset """kinships""" +667 75 model """simple""" +667 75 loss """marginranking""" +667 75 regularizer """no""" +667 75 optimizer """adam""" +667 75 training_loop """owa""" +667 75 negative_sampler """basic""" +667 75 evaluator """rankbased""" +667 76 dataset """kinships""" +667 76 model """simple""" +667 76 loss """marginranking""" +667 76 regularizer """no""" +667 76 optimizer """adam""" +667 76 training_loop """owa""" +667 76 negative_sampler """basic""" +667 76 evaluator """rankbased""" +667 77 dataset """kinships""" +667 77 model """simple""" +667 77 loss """marginranking""" +667 77 regularizer """no""" +667 77 optimizer """adam""" +667 77 training_loop """owa""" +667 77 negative_sampler """basic""" +667 77 evaluator """rankbased""" +667 78 dataset """kinships""" +667 78 model """simple""" +667 78 loss """marginranking""" +667 78 regularizer """no""" +667 78 optimizer """adam""" +667 78 training_loop """owa""" +667 78 negative_sampler """basic""" +667 78 evaluator """rankbased""" +667 79 dataset """kinships""" +667 79 model """simple""" +667 79 loss """marginranking""" +667 79 regularizer """no""" +667 79 optimizer """adam""" +667 79 training_loop """owa""" +667 79 negative_sampler """basic""" +667 79 evaluator """rankbased""" +667 80 dataset """kinships""" +667 80 model """simple""" +667 80 loss """marginranking""" +667 80 regularizer """no""" +667 80 optimizer """adam""" +667 80 training_loop """owa""" +667 80 negative_sampler """basic""" +667 80 evaluator """rankbased""" +667 81 dataset """kinships""" +667 81 model """simple""" +667 81 loss """marginranking""" +667 81 regularizer """no""" +667 81 optimizer """adam""" +667 81 training_loop """owa""" +667 81 negative_sampler """basic""" +667 81 evaluator """rankbased""" +667 82 dataset """kinships""" +667 82 model """simple""" +667 82 loss """marginranking""" +667 82 regularizer """no""" +667 82 optimizer """adam""" +667 82 training_loop """owa""" +667 82 negative_sampler """basic""" +667 82 evaluator """rankbased""" +667 83 dataset """kinships""" +667 83 model """simple""" +667 83 loss """marginranking""" +667 83 regularizer """no""" +667 83 optimizer """adam""" +667 83 training_loop """owa""" +667 83 negative_sampler """basic""" +667 83 evaluator """rankbased""" +667 84 dataset """kinships""" +667 84 model """simple""" +667 84 loss """marginranking""" +667 84 regularizer """no""" +667 84 optimizer """adam""" +667 84 training_loop """owa""" +667 84 negative_sampler """basic""" +667 84 evaluator """rankbased""" +667 85 dataset """kinships""" +667 85 model """simple""" +667 85 loss """marginranking""" +667 85 regularizer """no""" +667 85 optimizer """adam""" +667 85 training_loop """owa""" +667 85 negative_sampler """basic""" +667 85 evaluator """rankbased""" +667 86 dataset """kinships""" +667 86 model """simple""" +667 86 loss """marginranking""" +667 86 regularizer """no""" +667 86 optimizer """adam""" +667 86 training_loop """owa""" +667 86 negative_sampler """basic""" +667 86 evaluator """rankbased""" +667 87 dataset """kinships""" +667 87 model """simple""" +667 87 loss """marginranking""" +667 87 regularizer """no""" +667 87 optimizer """adam""" +667 87 training_loop """owa""" +667 87 negative_sampler """basic""" +667 87 evaluator """rankbased""" +667 88 dataset """kinships""" +667 88 model """simple""" +667 88 loss """marginranking""" +667 88 regularizer """no""" +667 88 optimizer """adam""" +667 88 training_loop """owa""" +667 88 negative_sampler """basic""" +667 88 evaluator """rankbased""" +667 89 dataset """kinships""" +667 89 model """simple""" +667 89 loss """marginranking""" +667 89 regularizer """no""" +667 89 optimizer """adam""" +667 89 training_loop """owa""" +667 89 negative_sampler """basic""" +667 89 evaluator """rankbased""" +667 90 dataset """kinships""" +667 90 model """simple""" +667 90 loss """marginranking""" +667 90 regularizer """no""" +667 90 optimizer """adam""" +667 90 training_loop """owa""" +667 90 negative_sampler """basic""" +667 90 evaluator """rankbased""" +667 91 dataset """kinships""" +667 91 model """simple""" +667 91 loss """marginranking""" +667 91 regularizer """no""" +667 91 optimizer """adam""" +667 91 training_loop """owa""" +667 91 negative_sampler """basic""" +667 91 evaluator """rankbased""" +667 92 dataset """kinships""" +667 92 model """simple""" +667 92 loss """marginranking""" +667 92 regularizer """no""" +667 92 optimizer """adam""" +667 92 training_loop """owa""" +667 92 negative_sampler """basic""" +667 92 evaluator """rankbased""" +667 93 dataset """kinships""" +667 93 model """simple""" +667 93 loss """marginranking""" +667 93 regularizer """no""" +667 93 optimizer """adam""" +667 93 training_loop """owa""" +667 93 negative_sampler """basic""" +667 93 evaluator """rankbased""" +667 94 dataset """kinships""" +667 94 model """simple""" +667 94 loss """marginranking""" +667 94 regularizer """no""" +667 94 optimizer """adam""" +667 94 training_loop """owa""" +667 94 negative_sampler """basic""" +667 94 evaluator """rankbased""" +667 95 dataset """kinships""" +667 95 model """simple""" +667 95 loss """marginranking""" +667 95 regularizer """no""" +667 95 optimizer """adam""" +667 95 training_loop """owa""" +667 95 negative_sampler """basic""" +667 95 evaluator """rankbased""" +667 96 dataset """kinships""" +667 96 model """simple""" +667 96 loss """marginranking""" +667 96 regularizer """no""" +667 96 optimizer """adam""" +667 96 training_loop """owa""" +667 96 negative_sampler """basic""" +667 96 evaluator """rankbased""" +667 97 dataset """kinships""" +667 97 model """simple""" +667 97 loss """marginranking""" +667 97 regularizer """no""" +667 97 optimizer """adam""" +667 97 training_loop """owa""" +667 97 negative_sampler """basic""" +667 97 evaluator """rankbased""" +667 98 dataset """kinships""" +667 98 model """simple""" +667 98 loss """marginranking""" +667 98 regularizer """no""" +667 98 optimizer """adam""" +667 98 training_loop """owa""" +667 98 negative_sampler """basic""" +667 98 evaluator """rankbased""" +667 99 dataset """kinships""" +667 99 model """simple""" +667 99 loss """marginranking""" +667 99 regularizer """no""" +667 99 optimizer """adam""" +667 99 training_loop """owa""" +667 99 negative_sampler """basic""" +667 99 evaluator """rankbased""" +667 100 dataset """kinships""" +667 100 model """simple""" +667 100 loss """marginranking""" +667 100 regularizer """no""" +667 100 optimizer """adam""" +667 100 training_loop """owa""" +667 100 negative_sampler """basic""" +667 100 evaluator """rankbased""" +668 1 model.embedding_dim 0.0 +668 1 loss.margin 8.523177613622405 +668 1 optimizer.lr 0.030996430107226774 +668 1 negative_sampler.num_negs_per_pos 80.0 +668 1 training.batch_size 0.0 +668 2 model.embedding_dim 1.0 +668 2 loss.margin 6.217963461631253 +668 2 optimizer.lr 0.002938718617179034 +668 2 negative_sampler.num_negs_per_pos 52.0 +668 2 training.batch_size 2.0 +668 3 model.embedding_dim 0.0 +668 3 loss.margin 8.620962362821524 +668 3 optimizer.lr 0.08610025373189871 +668 3 negative_sampler.num_negs_per_pos 0.0 +668 3 training.batch_size 0.0 +668 4 model.embedding_dim 2.0 +668 4 loss.margin 9.315238945377807 +668 4 optimizer.lr 0.0013602544123302342 +668 4 negative_sampler.num_negs_per_pos 9.0 +668 4 training.batch_size 0.0 +668 5 model.embedding_dim 1.0 +668 5 loss.margin 8.576302585128024 +668 5 optimizer.lr 0.0012165217188312724 +668 5 negative_sampler.num_negs_per_pos 87.0 +668 5 training.batch_size 0.0 +668 6 model.embedding_dim 1.0 +668 6 loss.margin 1.5540818331305926 +668 6 optimizer.lr 0.021628057092429924 +668 6 negative_sampler.num_negs_per_pos 89.0 +668 6 training.batch_size 1.0 +668 7 model.embedding_dim 2.0 +668 7 loss.margin 7.879797624502961 +668 7 optimizer.lr 0.004001754555207061 +668 7 negative_sampler.num_negs_per_pos 75.0 +668 7 training.batch_size 2.0 +668 8 model.embedding_dim 0.0 +668 8 loss.margin 7.267951702809554 +668 8 optimizer.lr 0.0022025860884790673 +668 8 negative_sampler.num_negs_per_pos 94.0 +668 8 training.batch_size 0.0 +668 9 model.embedding_dim 0.0 +668 9 loss.margin 9.240225366793144 +668 9 optimizer.lr 0.0021864871404033955 +668 9 negative_sampler.num_negs_per_pos 27.0 +668 9 training.batch_size 1.0 +668 10 model.embedding_dim 1.0 +668 10 loss.margin 8.864775843805722 +668 10 optimizer.lr 0.011971275106807164 +668 10 negative_sampler.num_negs_per_pos 27.0 +668 10 training.batch_size 2.0 +668 11 model.embedding_dim 2.0 +668 11 loss.margin 7.017804006792452 +668 11 optimizer.lr 0.043696061678655966 +668 11 negative_sampler.num_negs_per_pos 69.0 +668 11 training.batch_size 0.0 +668 12 model.embedding_dim 2.0 +668 12 loss.margin 6.82206144072121 +668 12 optimizer.lr 0.0019929598008599303 +668 12 negative_sampler.num_negs_per_pos 28.0 +668 12 training.batch_size 2.0 +668 13 model.embedding_dim 0.0 +668 13 loss.margin 4.135475161012414 +668 13 optimizer.lr 0.02931695833551097 +668 13 negative_sampler.num_negs_per_pos 89.0 +668 13 training.batch_size 2.0 +668 14 model.embedding_dim 1.0 +668 14 loss.margin 5.064232148955713 +668 14 optimizer.lr 0.002308543457065092 +668 14 negative_sampler.num_negs_per_pos 83.0 +668 14 training.batch_size 1.0 +668 15 model.embedding_dim 2.0 +668 15 loss.margin 7.097636470218944 +668 15 optimizer.lr 0.007261316801729302 +668 15 negative_sampler.num_negs_per_pos 65.0 +668 15 training.batch_size 0.0 +668 16 model.embedding_dim 1.0 +668 16 loss.margin 7.264517603454441 +668 16 optimizer.lr 0.08532533153919636 +668 16 negative_sampler.num_negs_per_pos 65.0 +668 16 training.batch_size 2.0 +668 17 model.embedding_dim 0.0 +668 17 loss.margin 8.55823676431266 +668 17 optimizer.lr 0.016409334674508797 +668 17 negative_sampler.num_negs_per_pos 73.0 +668 17 training.batch_size 0.0 +668 18 model.embedding_dim 2.0 +668 18 loss.margin 2.573587469963914 +668 18 optimizer.lr 0.021454856802204467 +668 18 negative_sampler.num_negs_per_pos 91.0 +668 18 training.batch_size 0.0 +668 19 model.embedding_dim 2.0 +668 19 loss.margin 9.744900941743133 +668 19 optimizer.lr 0.09505974892515723 +668 19 negative_sampler.num_negs_per_pos 82.0 +668 19 training.batch_size 2.0 +668 20 model.embedding_dim 1.0 +668 20 loss.margin 7.115977896606257 +668 20 optimizer.lr 0.08070294538611254 +668 20 negative_sampler.num_negs_per_pos 88.0 +668 20 training.batch_size 0.0 +668 21 model.embedding_dim 2.0 +668 21 loss.margin 6.553648798546285 +668 21 optimizer.lr 0.004893832221356165 +668 21 negative_sampler.num_negs_per_pos 1.0 +668 21 training.batch_size 2.0 +668 22 model.embedding_dim 1.0 +668 22 loss.margin 7.613693567383605 +668 22 optimizer.lr 0.029985214694611077 +668 22 negative_sampler.num_negs_per_pos 91.0 +668 22 training.batch_size 1.0 +668 23 model.embedding_dim 0.0 +668 23 loss.margin 7.590268694825291 +668 23 optimizer.lr 0.05297963379492463 +668 23 negative_sampler.num_negs_per_pos 11.0 +668 23 training.batch_size 1.0 +668 24 model.embedding_dim 1.0 +668 24 loss.margin 3.437143625943894 +668 24 optimizer.lr 0.007138120621764694 +668 24 negative_sampler.num_negs_per_pos 25.0 +668 24 training.batch_size 1.0 +668 25 model.embedding_dim 0.0 +668 25 loss.margin 4.728664367805063 +668 25 optimizer.lr 0.022067356489529368 +668 25 negative_sampler.num_negs_per_pos 55.0 +668 25 training.batch_size 0.0 +668 26 model.embedding_dim 2.0 +668 26 loss.margin 3.188819134153999 +668 26 optimizer.lr 0.057470961798254734 +668 26 negative_sampler.num_negs_per_pos 28.0 +668 26 training.batch_size 2.0 +668 27 model.embedding_dim 0.0 +668 27 loss.margin 5.014655439890807 +668 27 optimizer.lr 0.027529389899811442 +668 27 negative_sampler.num_negs_per_pos 40.0 +668 27 training.batch_size 2.0 +668 28 model.embedding_dim 2.0 +668 28 loss.margin 0.7102458218789807 +668 28 optimizer.lr 0.024474735371834932 +668 28 negative_sampler.num_negs_per_pos 65.0 +668 28 training.batch_size 1.0 +668 29 model.embedding_dim 2.0 +668 29 loss.margin 0.5856585422888851 +668 29 optimizer.lr 0.001360485329696595 +668 29 negative_sampler.num_negs_per_pos 3.0 +668 29 training.batch_size 2.0 +668 30 model.embedding_dim 1.0 +668 30 loss.margin 6.163634140490511 +668 30 optimizer.lr 0.015472318767938517 +668 30 negative_sampler.num_negs_per_pos 95.0 +668 30 training.batch_size 0.0 +668 31 model.embedding_dim 1.0 +668 31 loss.margin 0.8524100645483159 +668 31 optimizer.lr 0.05116921940978464 +668 31 negative_sampler.num_negs_per_pos 53.0 +668 31 training.batch_size 2.0 +668 32 model.embedding_dim 1.0 +668 32 loss.margin 6.4188724970001845 +668 32 optimizer.lr 0.016468846733142712 +668 32 negative_sampler.num_negs_per_pos 60.0 +668 32 training.batch_size 0.0 +668 33 model.embedding_dim 1.0 +668 33 loss.margin 3.5794173257874027 +668 33 optimizer.lr 0.002919892312491585 +668 33 negative_sampler.num_negs_per_pos 45.0 +668 33 training.batch_size 2.0 +668 34 model.embedding_dim 1.0 +668 34 loss.margin 8.487085938184721 +668 34 optimizer.lr 0.001078816053996905 +668 34 negative_sampler.num_negs_per_pos 45.0 +668 34 training.batch_size 1.0 +668 35 model.embedding_dim 0.0 +668 35 loss.margin 2.7577159676287 +668 35 optimizer.lr 0.0027977966676869296 +668 35 negative_sampler.num_negs_per_pos 82.0 +668 35 training.batch_size 0.0 +668 36 model.embedding_dim 2.0 +668 36 loss.margin 7.607047985872565 +668 36 optimizer.lr 0.017369545485192893 +668 36 negative_sampler.num_negs_per_pos 79.0 +668 36 training.batch_size 1.0 +668 37 model.embedding_dim 2.0 +668 37 loss.margin 2.5938273396792533 +668 37 optimizer.lr 0.043014705393450044 +668 37 negative_sampler.num_negs_per_pos 26.0 +668 37 training.batch_size 2.0 +668 38 model.embedding_dim 0.0 +668 38 loss.margin 7.364583801136204 +668 38 optimizer.lr 0.04742904667572886 +668 38 negative_sampler.num_negs_per_pos 45.0 +668 38 training.batch_size 1.0 +668 39 model.embedding_dim 2.0 +668 39 loss.margin 6.174502345932917 +668 39 optimizer.lr 0.07578498735237266 +668 39 negative_sampler.num_negs_per_pos 35.0 +668 39 training.batch_size 0.0 +668 40 model.embedding_dim 2.0 +668 40 loss.margin 3.541664798253062 +668 40 optimizer.lr 0.016642990943800635 +668 40 negative_sampler.num_negs_per_pos 30.0 +668 40 training.batch_size 0.0 +668 41 model.embedding_dim 1.0 +668 41 loss.margin 3.61745547368096 +668 41 optimizer.lr 0.029404107576581493 +668 41 negative_sampler.num_negs_per_pos 68.0 +668 41 training.batch_size 0.0 +668 42 model.embedding_dim 2.0 +668 42 loss.margin 6.439265958733678 +668 42 optimizer.lr 0.0023114668657240657 +668 42 negative_sampler.num_negs_per_pos 70.0 +668 42 training.batch_size 1.0 +668 43 model.embedding_dim 1.0 +668 43 loss.margin 3.178963276002888 +668 43 optimizer.lr 0.0077784904126277435 +668 43 negative_sampler.num_negs_per_pos 83.0 +668 43 training.batch_size 0.0 +668 44 model.embedding_dim 1.0 +668 44 loss.margin 1.7457718407464071 +668 44 optimizer.lr 0.0031631347245284066 +668 44 negative_sampler.num_negs_per_pos 55.0 +668 44 training.batch_size 2.0 +668 45 model.embedding_dim 1.0 +668 45 loss.margin 6.253985794473871 +668 45 optimizer.lr 0.001729415121493828 +668 45 negative_sampler.num_negs_per_pos 52.0 +668 45 training.batch_size 0.0 +668 46 model.embedding_dim 0.0 +668 46 loss.margin 3.856878595760724 +668 46 optimizer.lr 0.05164036809337737 +668 46 negative_sampler.num_negs_per_pos 1.0 +668 46 training.batch_size 1.0 +668 47 model.embedding_dim 0.0 +668 47 loss.margin 4.965854032589205 +668 47 optimizer.lr 0.07477340274501584 +668 47 negative_sampler.num_negs_per_pos 0.0 +668 47 training.batch_size 1.0 +668 48 model.embedding_dim 2.0 +668 48 loss.margin 5.553464581985265 +668 48 optimizer.lr 0.054876255510416645 +668 48 negative_sampler.num_negs_per_pos 99.0 +668 48 training.batch_size 1.0 +668 49 model.embedding_dim 2.0 +668 49 loss.margin 7.49259863775808 +668 49 optimizer.lr 0.0470796091310891 +668 49 negative_sampler.num_negs_per_pos 56.0 +668 49 training.batch_size 1.0 +668 50 model.embedding_dim 1.0 +668 50 loss.margin 7.18899949735827 +668 50 optimizer.lr 0.021739350574034012 +668 50 negative_sampler.num_negs_per_pos 69.0 +668 50 training.batch_size 0.0 +668 51 model.embedding_dim 2.0 +668 51 loss.margin 5.868859769671454 +668 51 optimizer.lr 0.0026692970652725505 +668 51 negative_sampler.num_negs_per_pos 46.0 +668 51 training.batch_size 0.0 +668 52 model.embedding_dim 0.0 +668 52 loss.margin 9.291362162091136 +668 52 optimizer.lr 0.02354673774696356 +668 52 negative_sampler.num_negs_per_pos 12.0 +668 52 training.batch_size 1.0 +668 53 model.embedding_dim 0.0 +668 53 loss.margin 7.817541848540605 +668 53 optimizer.lr 0.04954629874814922 +668 53 negative_sampler.num_negs_per_pos 96.0 +668 53 training.batch_size 2.0 +668 54 model.embedding_dim 1.0 +668 54 loss.margin 8.336015858728715 +668 54 optimizer.lr 0.037463929048678776 +668 54 negative_sampler.num_negs_per_pos 84.0 +668 54 training.batch_size 1.0 +668 55 model.embedding_dim 2.0 +668 55 loss.margin 3.1741364586972045 +668 55 optimizer.lr 0.006262917968626803 +668 55 negative_sampler.num_negs_per_pos 30.0 +668 55 training.batch_size 0.0 +668 56 model.embedding_dim 1.0 +668 56 loss.margin 4.7236790730170215 +668 56 optimizer.lr 0.030949967781909848 +668 56 negative_sampler.num_negs_per_pos 80.0 +668 56 training.batch_size 0.0 +668 57 model.embedding_dim 0.0 +668 57 loss.margin 4.971408510805392 +668 57 optimizer.lr 0.0070333466246832845 +668 57 negative_sampler.num_negs_per_pos 34.0 +668 57 training.batch_size 2.0 +668 58 model.embedding_dim 0.0 +668 58 loss.margin 1.3456851667835563 +668 58 optimizer.lr 0.0018411436486616777 +668 58 negative_sampler.num_negs_per_pos 92.0 +668 58 training.batch_size 1.0 +668 59 model.embedding_dim 1.0 +668 59 loss.margin 2.838074805672662 +668 59 optimizer.lr 0.043568684608596996 +668 59 negative_sampler.num_negs_per_pos 33.0 +668 59 training.batch_size 0.0 +668 60 model.embedding_dim 0.0 +668 60 loss.margin 5.974081790970966 +668 60 optimizer.lr 0.011475061696757343 +668 60 negative_sampler.num_negs_per_pos 5.0 +668 60 training.batch_size 1.0 +668 61 model.embedding_dim 0.0 +668 61 loss.margin 5.495970257961405 +668 61 optimizer.lr 0.0063372601415880815 +668 61 negative_sampler.num_negs_per_pos 13.0 +668 61 training.batch_size 2.0 +668 62 model.embedding_dim 1.0 +668 62 loss.margin 3.561964232077221 +668 62 optimizer.lr 0.0012349119052750973 +668 62 negative_sampler.num_negs_per_pos 31.0 +668 62 training.batch_size 0.0 +668 63 model.embedding_dim 2.0 +668 63 loss.margin 9.389056600021476 +668 63 optimizer.lr 0.003179462022396153 +668 63 negative_sampler.num_negs_per_pos 89.0 +668 63 training.batch_size 2.0 +668 64 model.embedding_dim 0.0 +668 64 loss.margin 4.27434513354207 +668 64 optimizer.lr 0.003779196561203399 +668 64 negative_sampler.num_negs_per_pos 93.0 +668 64 training.batch_size 1.0 +668 65 model.embedding_dim 2.0 +668 65 loss.margin 3.825564563198485 +668 65 optimizer.lr 0.0014966173223547206 +668 65 negative_sampler.num_negs_per_pos 10.0 +668 65 training.batch_size 0.0 +668 66 model.embedding_dim 1.0 +668 66 loss.margin 2.8796611064554654 +668 66 optimizer.lr 0.010280996319626172 +668 66 negative_sampler.num_negs_per_pos 75.0 +668 66 training.batch_size 2.0 +668 67 model.embedding_dim 1.0 +668 67 loss.margin 7.850145691573503 +668 67 optimizer.lr 0.08498559863025797 +668 67 negative_sampler.num_negs_per_pos 71.0 +668 67 training.batch_size 1.0 +668 68 model.embedding_dim 1.0 +668 68 loss.margin 7.419823680608185 +668 68 optimizer.lr 0.003970664182846311 +668 68 negative_sampler.num_negs_per_pos 24.0 +668 68 training.batch_size 2.0 +668 69 model.embedding_dim 0.0 +668 69 loss.margin 9.27092384895371 +668 69 optimizer.lr 0.08607899206211254 +668 69 negative_sampler.num_negs_per_pos 90.0 +668 69 training.batch_size 0.0 +668 70 model.embedding_dim 0.0 +668 70 loss.margin 6.919647970270817 +668 70 optimizer.lr 0.015129137750932336 +668 70 negative_sampler.num_negs_per_pos 39.0 +668 70 training.batch_size 0.0 +668 71 model.embedding_dim 2.0 +668 71 loss.margin 4.899893762467896 +668 71 optimizer.lr 0.006126153135675707 +668 71 negative_sampler.num_negs_per_pos 83.0 +668 71 training.batch_size 2.0 +668 72 model.embedding_dim 1.0 +668 72 loss.margin 3.502809127938134 +668 72 optimizer.lr 0.002299535053111275 +668 72 negative_sampler.num_negs_per_pos 41.0 +668 72 training.batch_size 1.0 +668 73 model.embedding_dim 1.0 +668 73 loss.margin 4.948615995357513 +668 73 optimizer.lr 0.00305211396134025 +668 73 negative_sampler.num_negs_per_pos 2.0 +668 73 training.batch_size 0.0 +668 74 model.embedding_dim 0.0 +668 74 loss.margin 8.955860444855011 +668 74 optimizer.lr 0.010589605445899744 +668 74 negative_sampler.num_negs_per_pos 35.0 +668 74 training.batch_size 2.0 +668 75 model.embedding_dim 1.0 +668 75 loss.margin 8.585520692596562 +668 75 optimizer.lr 0.0015690008975297047 +668 75 negative_sampler.num_negs_per_pos 27.0 +668 75 training.batch_size 0.0 +668 76 model.embedding_dim 2.0 +668 76 loss.margin 5.725397769795418 +668 76 optimizer.lr 0.0685529479292587 +668 76 negative_sampler.num_negs_per_pos 27.0 +668 76 training.batch_size 2.0 +668 77 model.embedding_dim 0.0 +668 77 loss.margin 5.491533673826355 +668 77 optimizer.lr 0.001568904262468009 +668 77 negative_sampler.num_negs_per_pos 95.0 +668 77 training.batch_size 1.0 +668 78 model.embedding_dim 1.0 +668 78 loss.margin 7.614577782602913 +668 78 optimizer.lr 0.02160384480881326 +668 78 negative_sampler.num_negs_per_pos 46.0 +668 78 training.batch_size 2.0 +668 79 model.embedding_dim 0.0 +668 79 loss.margin 6.918433557265983 +668 79 optimizer.lr 0.006609621042161859 +668 79 negative_sampler.num_negs_per_pos 96.0 +668 79 training.batch_size 0.0 +668 80 model.embedding_dim 2.0 +668 80 loss.margin 0.8004711992037523 +668 80 optimizer.lr 0.0052535589931648995 +668 80 negative_sampler.num_negs_per_pos 29.0 +668 80 training.batch_size 1.0 +668 81 model.embedding_dim 2.0 +668 81 loss.margin 4.97909677935554 +668 81 optimizer.lr 0.007966502991867828 +668 81 negative_sampler.num_negs_per_pos 81.0 +668 81 training.batch_size 0.0 +668 82 model.embedding_dim 2.0 +668 82 loss.margin 4.3583717021773225 +668 82 optimizer.lr 0.0015109424888402774 +668 82 negative_sampler.num_negs_per_pos 53.0 +668 82 training.batch_size 0.0 +668 83 model.embedding_dim 0.0 +668 83 loss.margin 2.0563830058546824 +668 83 optimizer.lr 0.04595230817583622 +668 83 negative_sampler.num_negs_per_pos 24.0 +668 83 training.batch_size 1.0 +668 84 model.embedding_dim 1.0 +668 84 loss.margin 1.6827350385593365 +668 84 optimizer.lr 0.016082406740613668 +668 84 negative_sampler.num_negs_per_pos 86.0 +668 84 training.batch_size 1.0 +668 85 model.embedding_dim 1.0 +668 85 loss.margin 6.646103427011896 +668 85 optimizer.lr 0.06926317273421403 +668 85 negative_sampler.num_negs_per_pos 34.0 +668 85 training.batch_size 2.0 +668 86 model.embedding_dim 2.0 +668 86 loss.margin 1.173783813936016 +668 86 optimizer.lr 0.01749264115450962 +668 86 negative_sampler.num_negs_per_pos 55.0 +668 86 training.batch_size 2.0 +668 87 model.embedding_dim 0.0 +668 87 loss.margin 3.993210179170504 +668 87 optimizer.lr 0.00700616556745814 +668 87 negative_sampler.num_negs_per_pos 51.0 +668 87 training.batch_size 0.0 +668 88 model.embedding_dim 1.0 +668 88 loss.margin 4.30718985648029 +668 88 optimizer.lr 0.0094085474922118 +668 88 negative_sampler.num_negs_per_pos 13.0 +668 88 training.batch_size 0.0 +668 89 model.embedding_dim 2.0 +668 89 loss.margin 6.555028270584621 +668 89 optimizer.lr 0.020610395255651195 +668 89 negative_sampler.num_negs_per_pos 65.0 +668 89 training.batch_size 2.0 +668 90 model.embedding_dim 1.0 +668 90 loss.margin 0.9693885324894651 +668 90 optimizer.lr 0.00292496958585942 +668 90 negative_sampler.num_negs_per_pos 56.0 +668 90 training.batch_size 0.0 +668 91 model.embedding_dim 0.0 +668 91 loss.margin 7.479954465270723 +668 91 optimizer.lr 0.0019163525537471621 +668 91 negative_sampler.num_negs_per_pos 50.0 +668 91 training.batch_size 0.0 +668 92 model.embedding_dim 1.0 +668 92 loss.margin 3.1600731385446896 +668 92 optimizer.lr 0.018222693551327242 +668 92 negative_sampler.num_negs_per_pos 41.0 +668 92 training.batch_size 0.0 +668 93 model.embedding_dim 2.0 +668 93 loss.margin 6.178927876774706 +668 93 optimizer.lr 0.0956249781100453 +668 93 negative_sampler.num_negs_per_pos 80.0 +668 93 training.batch_size 1.0 +668 94 model.embedding_dim 2.0 +668 94 loss.margin 3.4460993528063253 +668 94 optimizer.lr 0.0027340052753166933 +668 94 negative_sampler.num_negs_per_pos 48.0 +668 94 training.batch_size 2.0 +668 95 model.embedding_dim 2.0 +668 95 loss.margin 1.306116023850441 +668 95 optimizer.lr 0.0023308598293983033 +668 95 negative_sampler.num_negs_per_pos 87.0 +668 95 training.batch_size 1.0 +668 96 model.embedding_dim 2.0 +668 96 loss.margin 0.5793277141853198 +668 96 optimizer.lr 0.027286135166277536 +668 96 negative_sampler.num_negs_per_pos 57.0 +668 96 training.batch_size 1.0 +668 97 model.embedding_dim 2.0 +668 97 loss.margin 2.398173805837491 +668 97 optimizer.lr 0.04886565479869576 +668 97 negative_sampler.num_negs_per_pos 18.0 +668 97 training.batch_size 0.0 +668 98 model.embedding_dim 0.0 +668 98 loss.margin 2.709874327858343 +668 98 optimizer.lr 0.03310248312949139 +668 98 negative_sampler.num_negs_per_pos 7.0 +668 98 training.batch_size 2.0 +668 99 model.embedding_dim 0.0 +668 99 loss.margin 9.143870764512904 +668 99 optimizer.lr 0.006698145395158506 +668 99 negative_sampler.num_negs_per_pos 44.0 +668 99 training.batch_size 0.0 +668 100 model.embedding_dim 0.0 +668 100 loss.margin 2.272688349099231 +668 100 optimizer.lr 0.0013902276672913708 +668 100 negative_sampler.num_negs_per_pos 97.0 +668 100 training.batch_size 1.0 +668 1 dataset """kinships""" +668 1 model """simple""" +668 1 loss """marginranking""" +668 1 regularizer """no""" +668 1 optimizer """adam""" +668 1 training_loop """owa""" +668 1 negative_sampler """basic""" +668 1 evaluator """rankbased""" +668 2 dataset """kinships""" +668 2 model """simple""" +668 2 loss """marginranking""" +668 2 regularizer """no""" +668 2 optimizer """adam""" +668 2 training_loop """owa""" +668 2 negative_sampler """basic""" +668 2 evaluator """rankbased""" +668 3 dataset """kinships""" +668 3 model """simple""" +668 3 loss """marginranking""" +668 3 regularizer """no""" +668 3 optimizer """adam""" +668 3 training_loop """owa""" +668 3 negative_sampler """basic""" +668 3 evaluator """rankbased""" +668 4 dataset """kinships""" +668 4 model """simple""" +668 4 loss """marginranking""" +668 4 regularizer """no""" +668 4 optimizer """adam""" +668 4 training_loop """owa""" +668 4 negative_sampler """basic""" +668 4 evaluator """rankbased""" +668 5 dataset """kinships""" +668 5 model """simple""" +668 5 loss """marginranking""" +668 5 regularizer """no""" +668 5 optimizer """adam""" +668 5 training_loop """owa""" +668 5 negative_sampler """basic""" +668 5 evaluator """rankbased""" +668 6 dataset """kinships""" +668 6 model """simple""" +668 6 loss """marginranking""" +668 6 regularizer """no""" +668 6 optimizer """adam""" +668 6 training_loop """owa""" +668 6 negative_sampler """basic""" +668 6 evaluator """rankbased""" +668 7 dataset """kinships""" +668 7 model """simple""" +668 7 loss """marginranking""" +668 7 regularizer """no""" +668 7 optimizer """adam""" +668 7 training_loop """owa""" +668 7 negative_sampler """basic""" +668 7 evaluator """rankbased""" +668 8 dataset """kinships""" +668 8 model """simple""" +668 8 loss """marginranking""" +668 8 regularizer """no""" +668 8 optimizer """adam""" +668 8 training_loop """owa""" +668 8 negative_sampler """basic""" +668 8 evaluator """rankbased""" +668 9 dataset """kinships""" +668 9 model """simple""" +668 9 loss """marginranking""" +668 9 regularizer """no""" +668 9 optimizer """adam""" +668 9 training_loop """owa""" +668 9 negative_sampler """basic""" +668 9 evaluator """rankbased""" +668 10 dataset """kinships""" +668 10 model """simple""" +668 10 loss """marginranking""" +668 10 regularizer """no""" +668 10 optimizer """adam""" +668 10 training_loop """owa""" +668 10 negative_sampler """basic""" +668 10 evaluator """rankbased""" +668 11 dataset """kinships""" +668 11 model """simple""" +668 11 loss """marginranking""" +668 11 regularizer """no""" +668 11 optimizer """adam""" +668 11 training_loop """owa""" +668 11 negative_sampler """basic""" +668 11 evaluator """rankbased""" +668 12 dataset """kinships""" +668 12 model """simple""" +668 12 loss """marginranking""" +668 12 regularizer """no""" +668 12 optimizer """adam""" +668 12 training_loop """owa""" +668 12 negative_sampler """basic""" +668 12 evaluator """rankbased""" +668 13 dataset """kinships""" +668 13 model """simple""" +668 13 loss """marginranking""" +668 13 regularizer """no""" +668 13 optimizer """adam""" +668 13 training_loop """owa""" +668 13 negative_sampler """basic""" +668 13 evaluator """rankbased""" +668 14 dataset """kinships""" +668 14 model """simple""" +668 14 loss """marginranking""" +668 14 regularizer """no""" +668 14 optimizer """adam""" +668 14 training_loop """owa""" +668 14 negative_sampler """basic""" +668 14 evaluator """rankbased""" +668 15 dataset """kinships""" +668 15 model """simple""" +668 15 loss """marginranking""" +668 15 regularizer """no""" +668 15 optimizer """adam""" +668 15 training_loop """owa""" +668 15 negative_sampler """basic""" +668 15 evaluator """rankbased""" +668 16 dataset """kinships""" +668 16 model """simple""" +668 16 loss """marginranking""" +668 16 regularizer """no""" +668 16 optimizer """adam""" +668 16 training_loop """owa""" +668 16 negative_sampler """basic""" +668 16 evaluator """rankbased""" +668 17 dataset """kinships""" +668 17 model """simple""" +668 17 loss """marginranking""" +668 17 regularizer """no""" +668 17 optimizer """adam""" +668 17 training_loop """owa""" +668 17 negative_sampler """basic""" +668 17 evaluator """rankbased""" +668 18 dataset """kinships""" +668 18 model """simple""" +668 18 loss """marginranking""" +668 18 regularizer """no""" +668 18 optimizer """adam""" +668 18 training_loop """owa""" +668 18 negative_sampler """basic""" +668 18 evaluator """rankbased""" +668 19 dataset """kinships""" +668 19 model """simple""" +668 19 loss """marginranking""" +668 19 regularizer """no""" +668 19 optimizer """adam""" +668 19 training_loop """owa""" +668 19 negative_sampler """basic""" +668 19 evaluator """rankbased""" +668 20 dataset """kinships""" +668 20 model """simple""" +668 20 loss """marginranking""" +668 20 regularizer """no""" +668 20 optimizer """adam""" +668 20 training_loop """owa""" +668 20 negative_sampler """basic""" +668 20 evaluator """rankbased""" +668 21 dataset """kinships""" +668 21 model """simple""" +668 21 loss """marginranking""" +668 21 regularizer """no""" +668 21 optimizer """adam""" +668 21 training_loop """owa""" +668 21 negative_sampler """basic""" +668 21 evaluator """rankbased""" +668 22 dataset """kinships""" +668 22 model """simple""" +668 22 loss """marginranking""" +668 22 regularizer """no""" +668 22 optimizer """adam""" +668 22 training_loop """owa""" +668 22 negative_sampler """basic""" +668 22 evaluator """rankbased""" +668 23 dataset """kinships""" +668 23 model """simple""" +668 23 loss """marginranking""" +668 23 regularizer """no""" +668 23 optimizer """adam""" +668 23 training_loop """owa""" +668 23 negative_sampler """basic""" +668 23 evaluator """rankbased""" +668 24 dataset """kinships""" +668 24 model """simple""" +668 24 loss """marginranking""" +668 24 regularizer """no""" +668 24 optimizer """adam""" +668 24 training_loop """owa""" +668 24 negative_sampler """basic""" +668 24 evaluator """rankbased""" +668 25 dataset """kinships""" +668 25 model """simple""" +668 25 loss """marginranking""" +668 25 regularizer """no""" +668 25 optimizer """adam""" +668 25 training_loop """owa""" +668 25 negative_sampler """basic""" +668 25 evaluator """rankbased""" +668 26 dataset """kinships""" +668 26 model """simple""" +668 26 loss """marginranking""" +668 26 regularizer """no""" +668 26 optimizer """adam""" +668 26 training_loop """owa""" +668 26 negative_sampler """basic""" +668 26 evaluator """rankbased""" +668 27 dataset """kinships""" +668 27 model """simple""" +668 27 loss """marginranking""" +668 27 regularizer """no""" +668 27 optimizer """adam""" +668 27 training_loop """owa""" +668 27 negative_sampler """basic""" +668 27 evaluator """rankbased""" +668 28 dataset """kinships""" +668 28 model """simple""" +668 28 loss """marginranking""" +668 28 regularizer """no""" +668 28 optimizer """adam""" +668 28 training_loop """owa""" +668 28 negative_sampler """basic""" +668 28 evaluator """rankbased""" +668 29 dataset """kinships""" +668 29 model """simple""" +668 29 loss """marginranking""" +668 29 regularizer """no""" +668 29 optimizer """adam""" +668 29 training_loop """owa""" +668 29 negative_sampler """basic""" +668 29 evaluator """rankbased""" +668 30 dataset """kinships""" +668 30 model """simple""" +668 30 loss """marginranking""" +668 30 regularizer """no""" +668 30 optimizer """adam""" +668 30 training_loop """owa""" +668 30 negative_sampler """basic""" +668 30 evaluator """rankbased""" +668 31 dataset """kinships""" +668 31 model """simple""" +668 31 loss """marginranking""" +668 31 regularizer """no""" +668 31 optimizer """adam""" +668 31 training_loop """owa""" +668 31 negative_sampler """basic""" +668 31 evaluator """rankbased""" +668 32 dataset """kinships""" +668 32 model """simple""" +668 32 loss """marginranking""" +668 32 regularizer """no""" +668 32 optimizer """adam""" +668 32 training_loop """owa""" +668 32 negative_sampler """basic""" +668 32 evaluator """rankbased""" +668 33 dataset """kinships""" +668 33 model """simple""" +668 33 loss """marginranking""" +668 33 regularizer """no""" +668 33 optimizer """adam""" +668 33 training_loop """owa""" +668 33 negative_sampler """basic""" +668 33 evaluator """rankbased""" +668 34 dataset """kinships""" +668 34 model """simple""" +668 34 loss """marginranking""" +668 34 regularizer """no""" +668 34 optimizer """adam""" +668 34 training_loop """owa""" +668 34 negative_sampler """basic""" +668 34 evaluator """rankbased""" +668 35 dataset """kinships""" +668 35 model """simple""" +668 35 loss """marginranking""" +668 35 regularizer """no""" +668 35 optimizer """adam""" +668 35 training_loop """owa""" +668 35 negative_sampler """basic""" +668 35 evaluator """rankbased""" +668 36 dataset """kinships""" +668 36 model """simple""" +668 36 loss """marginranking""" +668 36 regularizer """no""" +668 36 optimizer """adam""" +668 36 training_loop """owa""" +668 36 negative_sampler """basic""" +668 36 evaluator """rankbased""" +668 37 dataset """kinships""" +668 37 model """simple""" +668 37 loss """marginranking""" +668 37 regularizer """no""" +668 37 optimizer """adam""" +668 37 training_loop """owa""" +668 37 negative_sampler """basic""" +668 37 evaluator """rankbased""" +668 38 dataset """kinships""" +668 38 model """simple""" +668 38 loss """marginranking""" +668 38 regularizer """no""" +668 38 optimizer """adam""" +668 38 training_loop """owa""" +668 38 negative_sampler """basic""" +668 38 evaluator """rankbased""" +668 39 dataset """kinships""" +668 39 model """simple""" +668 39 loss """marginranking""" +668 39 regularizer """no""" +668 39 optimizer """adam""" +668 39 training_loop """owa""" +668 39 negative_sampler """basic""" +668 39 evaluator """rankbased""" +668 40 dataset """kinships""" +668 40 model """simple""" +668 40 loss """marginranking""" +668 40 regularizer """no""" +668 40 optimizer """adam""" +668 40 training_loop """owa""" +668 40 negative_sampler """basic""" +668 40 evaluator """rankbased""" +668 41 dataset """kinships""" +668 41 model """simple""" +668 41 loss """marginranking""" +668 41 regularizer """no""" +668 41 optimizer """adam""" +668 41 training_loop """owa""" +668 41 negative_sampler """basic""" +668 41 evaluator """rankbased""" +668 42 dataset """kinships""" +668 42 model """simple""" +668 42 loss """marginranking""" +668 42 regularizer """no""" +668 42 optimizer """adam""" +668 42 training_loop """owa""" +668 42 negative_sampler """basic""" +668 42 evaluator """rankbased""" +668 43 dataset """kinships""" +668 43 model """simple""" +668 43 loss """marginranking""" +668 43 regularizer """no""" +668 43 optimizer """adam""" +668 43 training_loop """owa""" +668 43 negative_sampler """basic""" +668 43 evaluator """rankbased""" +668 44 dataset """kinships""" +668 44 model """simple""" +668 44 loss """marginranking""" +668 44 regularizer """no""" +668 44 optimizer """adam""" +668 44 training_loop """owa""" +668 44 negative_sampler """basic""" +668 44 evaluator """rankbased""" +668 45 dataset """kinships""" +668 45 model """simple""" +668 45 loss """marginranking""" +668 45 regularizer """no""" +668 45 optimizer """adam""" +668 45 training_loop """owa""" +668 45 negative_sampler """basic""" +668 45 evaluator """rankbased""" +668 46 dataset """kinships""" +668 46 model """simple""" +668 46 loss """marginranking""" +668 46 regularizer """no""" +668 46 optimizer """adam""" +668 46 training_loop """owa""" +668 46 negative_sampler """basic""" +668 46 evaluator """rankbased""" +668 47 dataset """kinships""" +668 47 model """simple""" +668 47 loss """marginranking""" +668 47 regularizer """no""" +668 47 optimizer """adam""" +668 47 training_loop """owa""" +668 47 negative_sampler """basic""" +668 47 evaluator """rankbased""" +668 48 dataset """kinships""" +668 48 model """simple""" +668 48 loss """marginranking""" +668 48 regularizer """no""" +668 48 optimizer """adam""" +668 48 training_loop """owa""" +668 48 negative_sampler """basic""" +668 48 evaluator """rankbased""" +668 49 dataset """kinships""" +668 49 model """simple""" +668 49 loss """marginranking""" +668 49 regularizer """no""" +668 49 optimizer """adam""" +668 49 training_loop """owa""" +668 49 negative_sampler """basic""" +668 49 evaluator """rankbased""" +668 50 dataset """kinships""" +668 50 model """simple""" +668 50 loss """marginranking""" +668 50 regularizer """no""" +668 50 optimizer """adam""" +668 50 training_loop """owa""" +668 50 negative_sampler """basic""" +668 50 evaluator """rankbased""" +668 51 dataset """kinships""" +668 51 model """simple""" +668 51 loss """marginranking""" +668 51 regularizer """no""" +668 51 optimizer """adam""" +668 51 training_loop """owa""" +668 51 negative_sampler """basic""" +668 51 evaluator """rankbased""" +668 52 dataset """kinships""" +668 52 model """simple""" +668 52 loss """marginranking""" +668 52 regularizer """no""" +668 52 optimizer """adam""" +668 52 training_loop """owa""" +668 52 negative_sampler """basic""" +668 52 evaluator """rankbased""" +668 53 dataset """kinships""" +668 53 model """simple""" +668 53 loss """marginranking""" +668 53 regularizer """no""" +668 53 optimizer """adam""" +668 53 training_loop """owa""" +668 53 negative_sampler """basic""" +668 53 evaluator """rankbased""" +668 54 dataset """kinships""" +668 54 model """simple""" +668 54 loss """marginranking""" +668 54 regularizer """no""" +668 54 optimizer """adam""" +668 54 training_loop """owa""" +668 54 negative_sampler """basic""" +668 54 evaluator """rankbased""" +668 55 dataset """kinships""" +668 55 model """simple""" +668 55 loss """marginranking""" +668 55 regularizer """no""" +668 55 optimizer """adam""" +668 55 training_loop """owa""" +668 55 negative_sampler """basic""" +668 55 evaluator """rankbased""" +668 56 dataset """kinships""" +668 56 model """simple""" +668 56 loss """marginranking""" +668 56 regularizer """no""" +668 56 optimizer """adam""" +668 56 training_loop """owa""" +668 56 negative_sampler """basic""" +668 56 evaluator """rankbased""" +668 57 dataset """kinships""" +668 57 model """simple""" +668 57 loss """marginranking""" +668 57 regularizer """no""" +668 57 optimizer """adam""" +668 57 training_loop """owa""" +668 57 negative_sampler """basic""" +668 57 evaluator """rankbased""" +668 58 dataset """kinships""" +668 58 model """simple""" +668 58 loss """marginranking""" +668 58 regularizer """no""" +668 58 optimizer """adam""" +668 58 training_loop """owa""" +668 58 negative_sampler """basic""" +668 58 evaluator """rankbased""" +668 59 dataset """kinships""" +668 59 model """simple""" +668 59 loss """marginranking""" +668 59 regularizer """no""" +668 59 optimizer """adam""" +668 59 training_loop """owa""" +668 59 negative_sampler """basic""" +668 59 evaluator """rankbased""" +668 60 dataset """kinships""" +668 60 model """simple""" +668 60 loss """marginranking""" +668 60 regularizer """no""" +668 60 optimizer """adam""" +668 60 training_loop """owa""" +668 60 negative_sampler """basic""" +668 60 evaluator """rankbased""" +668 61 dataset """kinships""" +668 61 model """simple""" +668 61 loss """marginranking""" +668 61 regularizer """no""" +668 61 optimizer """adam""" +668 61 training_loop """owa""" +668 61 negative_sampler """basic""" +668 61 evaluator """rankbased""" +668 62 dataset """kinships""" +668 62 model """simple""" +668 62 loss """marginranking""" +668 62 regularizer """no""" +668 62 optimizer """adam""" +668 62 training_loop """owa""" +668 62 negative_sampler """basic""" +668 62 evaluator """rankbased""" +668 63 dataset """kinships""" +668 63 model """simple""" +668 63 loss """marginranking""" +668 63 regularizer """no""" +668 63 optimizer """adam""" +668 63 training_loop """owa""" +668 63 negative_sampler """basic""" +668 63 evaluator """rankbased""" +668 64 dataset """kinships""" +668 64 model """simple""" +668 64 loss """marginranking""" +668 64 regularizer """no""" +668 64 optimizer """adam""" +668 64 training_loop """owa""" +668 64 negative_sampler """basic""" +668 64 evaluator """rankbased""" +668 65 dataset """kinships""" +668 65 model """simple""" +668 65 loss """marginranking""" +668 65 regularizer """no""" +668 65 optimizer """adam""" +668 65 training_loop """owa""" +668 65 negative_sampler """basic""" +668 65 evaluator """rankbased""" +668 66 dataset """kinships""" +668 66 model """simple""" +668 66 loss """marginranking""" +668 66 regularizer """no""" +668 66 optimizer """adam""" +668 66 training_loop """owa""" +668 66 negative_sampler """basic""" +668 66 evaluator """rankbased""" +668 67 dataset """kinships""" +668 67 model """simple""" +668 67 loss """marginranking""" +668 67 regularizer """no""" +668 67 optimizer """adam""" +668 67 training_loop """owa""" +668 67 negative_sampler """basic""" +668 67 evaluator """rankbased""" +668 68 dataset """kinships""" +668 68 model """simple""" +668 68 loss """marginranking""" +668 68 regularizer """no""" +668 68 optimizer """adam""" +668 68 training_loop """owa""" +668 68 negative_sampler """basic""" +668 68 evaluator """rankbased""" +668 69 dataset """kinships""" +668 69 model """simple""" +668 69 loss """marginranking""" +668 69 regularizer """no""" +668 69 optimizer """adam""" +668 69 training_loop """owa""" +668 69 negative_sampler """basic""" +668 69 evaluator """rankbased""" +668 70 dataset """kinships""" +668 70 model """simple""" +668 70 loss """marginranking""" +668 70 regularizer """no""" +668 70 optimizer """adam""" +668 70 training_loop """owa""" +668 70 negative_sampler """basic""" +668 70 evaluator """rankbased""" +668 71 dataset """kinships""" +668 71 model """simple""" +668 71 loss """marginranking""" +668 71 regularizer """no""" +668 71 optimizer """adam""" +668 71 training_loop """owa""" +668 71 negative_sampler """basic""" +668 71 evaluator """rankbased""" +668 72 dataset """kinships""" +668 72 model """simple""" +668 72 loss """marginranking""" +668 72 regularizer """no""" +668 72 optimizer """adam""" +668 72 training_loop """owa""" +668 72 negative_sampler """basic""" +668 72 evaluator """rankbased""" +668 73 dataset """kinships""" +668 73 model """simple""" +668 73 loss """marginranking""" +668 73 regularizer """no""" +668 73 optimizer """adam""" +668 73 training_loop """owa""" +668 73 negative_sampler """basic""" +668 73 evaluator """rankbased""" +668 74 dataset """kinships""" +668 74 model """simple""" +668 74 loss """marginranking""" +668 74 regularizer """no""" +668 74 optimizer """adam""" +668 74 training_loop """owa""" +668 74 negative_sampler """basic""" +668 74 evaluator """rankbased""" +668 75 dataset """kinships""" +668 75 model """simple""" +668 75 loss """marginranking""" +668 75 regularizer """no""" +668 75 optimizer """adam""" +668 75 training_loop """owa""" +668 75 negative_sampler """basic""" +668 75 evaluator """rankbased""" +668 76 dataset """kinships""" +668 76 model """simple""" +668 76 loss """marginranking""" +668 76 regularizer """no""" +668 76 optimizer """adam""" +668 76 training_loop """owa""" +668 76 negative_sampler """basic""" +668 76 evaluator """rankbased""" +668 77 dataset """kinships""" +668 77 model """simple""" +668 77 loss """marginranking""" +668 77 regularizer """no""" +668 77 optimizer """adam""" +668 77 training_loop """owa""" +668 77 negative_sampler """basic""" +668 77 evaluator """rankbased""" +668 78 dataset """kinships""" +668 78 model """simple""" +668 78 loss """marginranking""" +668 78 regularizer """no""" +668 78 optimizer """adam""" +668 78 training_loop """owa""" +668 78 negative_sampler """basic""" +668 78 evaluator """rankbased""" +668 79 dataset """kinships""" +668 79 model """simple""" +668 79 loss """marginranking""" +668 79 regularizer """no""" +668 79 optimizer """adam""" +668 79 training_loop """owa""" +668 79 negative_sampler """basic""" +668 79 evaluator """rankbased""" +668 80 dataset """kinships""" +668 80 model """simple""" +668 80 loss """marginranking""" +668 80 regularizer """no""" +668 80 optimizer """adam""" +668 80 training_loop """owa""" +668 80 negative_sampler """basic""" +668 80 evaluator """rankbased""" +668 81 dataset """kinships""" +668 81 model """simple""" +668 81 loss """marginranking""" +668 81 regularizer """no""" +668 81 optimizer """adam""" +668 81 training_loop """owa""" +668 81 negative_sampler """basic""" +668 81 evaluator """rankbased""" +668 82 dataset """kinships""" +668 82 model """simple""" +668 82 loss """marginranking""" +668 82 regularizer """no""" +668 82 optimizer """adam""" +668 82 training_loop """owa""" +668 82 negative_sampler """basic""" +668 82 evaluator """rankbased""" +668 83 dataset """kinships""" +668 83 model """simple""" +668 83 loss """marginranking""" +668 83 regularizer """no""" +668 83 optimizer """adam""" +668 83 training_loop """owa""" +668 83 negative_sampler """basic""" +668 83 evaluator """rankbased""" +668 84 dataset """kinships""" +668 84 model """simple""" +668 84 loss """marginranking""" +668 84 regularizer """no""" +668 84 optimizer """adam""" +668 84 training_loop """owa""" +668 84 negative_sampler """basic""" +668 84 evaluator """rankbased""" +668 85 dataset """kinships""" +668 85 model """simple""" +668 85 loss """marginranking""" +668 85 regularizer """no""" +668 85 optimizer """adam""" +668 85 training_loop """owa""" +668 85 negative_sampler """basic""" +668 85 evaluator """rankbased""" +668 86 dataset """kinships""" +668 86 model """simple""" +668 86 loss """marginranking""" +668 86 regularizer """no""" +668 86 optimizer """adam""" +668 86 training_loop """owa""" +668 86 negative_sampler """basic""" +668 86 evaluator """rankbased""" +668 87 dataset """kinships""" +668 87 model """simple""" +668 87 loss """marginranking""" +668 87 regularizer """no""" +668 87 optimizer """adam""" +668 87 training_loop """owa""" +668 87 negative_sampler """basic""" +668 87 evaluator """rankbased""" +668 88 dataset """kinships""" +668 88 model """simple""" +668 88 loss """marginranking""" +668 88 regularizer """no""" +668 88 optimizer """adam""" +668 88 training_loop """owa""" +668 88 negative_sampler """basic""" +668 88 evaluator """rankbased""" +668 89 dataset """kinships""" +668 89 model """simple""" +668 89 loss """marginranking""" +668 89 regularizer """no""" +668 89 optimizer """adam""" +668 89 training_loop """owa""" +668 89 negative_sampler """basic""" +668 89 evaluator """rankbased""" +668 90 dataset """kinships""" +668 90 model """simple""" +668 90 loss """marginranking""" +668 90 regularizer """no""" +668 90 optimizer """adam""" +668 90 training_loop """owa""" +668 90 negative_sampler """basic""" +668 90 evaluator """rankbased""" +668 91 dataset """kinships""" +668 91 model """simple""" +668 91 loss """marginranking""" +668 91 regularizer """no""" +668 91 optimizer """adam""" +668 91 training_loop """owa""" +668 91 negative_sampler """basic""" +668 91 evaluator """rankbased""" +668 92 dataset """kinships""" +668 92 model """simple""" +668 92 loss """marginranking""" +668 92 regularizer """no""" +668 92 optimizer """adam""" +668 92 training_loop """owa""" +668 92 negative_sampler """basic""" +668 92 evaluator """rankbased""" +668 93 dataset """kinships""" +668 93 model """simple""" +668 93 loss """marginranking""" +668 93 regularizer """no""" +668 93 optimizer """adam""" +668 93 training_loop """owa""" +668 93 negative_sampler """basic""" +668 93 evaluator """rankbased""" +668 94 dataset """kinships""" +668 94 model """simple""" +668 94 loss """marginranking""" +668 94 regularizer """no""" +668 94 optimizer """adam""" +668 94 training_loop """owa""" +668 94 negative_sampler """basic""" +668 94 evaluator """rankbased""" +668 95 dataset """kinships""" +668 95 model """simple""" +668 95 loss """marginranking""" +668 95 regularizer """no""" +668 95 optimizer """adam""" +668 95 training_loop """owa""" +668 95 negative_sampler """basic""" +668 95 evaluator """rankbased""" +668 96 dataset """kinships""" +668 96 model """simple""" +668 96 loss """marginranking""" +668 96 regularizer """no""" +668 96 optimizer """adam""" +668 96 training_loop """owa""" +668 96 negative_sampler """basic""" +668 96 evaluator """rankbased""" +668 97 dataset """kinships""" +668 97 model """simple""" +668 97 loss """marginranking""" +668 97 regularizer """no""" +668 97 optimizer """adam""" +668 97 training_loop """owa""" +668 97 negative_sampler """basic""" +668 97 evaluator """rankbased""" +668 98 dataset """kinships""" +668 98 model """simple""" +668 98 loss """marginranking""" +668 98 regularizer """no""" +668 98 optimizer """adam""" +668 98 training_loop """owa""" +668 98 negative_sampler """basic""" +668 98 evaluator """rankbased""" +668 99 dataset """kinships""" +668 99 model """simple""" +668 99 loss """marginranking""" +668 99 regularizer """no""" +668 99 optimizer """adam""" +668 99 training_loop """owa""" +668 99 negative_sampler """basic""" +668 99 evaluator """rankbased""" +668 100 dataset """kinships""" +668 100 model """simple""" +668 100 loss """marginranking""" +668 100 regularizer """no""" +668 100 optimizer """adam""" +668 100 training_loop """owa""" +668 100 negative_sampler """basic""" +668 100 evaluator """rankbased""" +669 1 model.embedding_dim 1.0 +669 1 loss.margin 29.814503903893254 +669 1 loss.adversarial_temperature 0.27814307630209245 +669 1 optimizer.lr 0.0073368322174577425 +669 1 negative_sampler.num_negs_per_pos 94.0 +669 1 training.batch_size 1.0 +669 2 model.embedding_dim 2.0 +669 2 loss.margin 27.057286483128536 +669 2 loss.adversarial_temperature 0.20559852542766013 +669 2 optimizer.lr 0.012394322848100891 +669 2 negative_sampler.num_negs_per_pos 80.0 +669 2 training.batch_size 0.0 +669 3 model.embedding_dim 0.0 +669 3 loss.margin 19.855512355685573 +669 3 loss.adversarial_temperature 0.3912866205599459 +669 3 optimizer.lr 0.006671907341450311 +669 3 negative_sampler.num_negs_per_pos 35.0 +669 3 training.batch_size 2.0 +669 4 model.embedding_dim 2.0 +669 4 loss.margin 6.475711208090435 +669 4 loss.adversarial_temperature 0.8003479262593729 +669 4 optimizer.lr 0.012994630999831431 +669 4 negative_sampler.num_negs_per_pos 97.0 +669 4 training.batch_size 2.0 +669 5 model.embedding_dim 2.0 +669 5 loss.margin 3.837766159303051 +669 5 loss.adversarial_temperature 0.8562673292068155 +669 5 optimizer.lr 0.002155599241303294 +669 5 negative_sampler.num_negs_per_pos 30.0 +669 5 training.batch_size 1.0 +669 6 model.embedding_dim 1.0 +669 6 loss.margin 25.988898326972432 +669 6 loss.adversarial_temperature 0.9404200944454945 +669 6 optimizer.lr 0.009220824808470807 +669 6 negative_sampler.num_negs_per_pos 81.0 +669 6 training.batch_size 2.0 +669 7 model.embedding_dim 0.0 +669 7 loss.margin 17.98431196601163 +669 7 loss.adversarial_temperature 0.9570521210327121 +669 7 optimizer.lr 0.04667625245827833 +669 7 negative_sampler.num_negs_per_pos 53.0 +669 7 training.batch_size 0.0 +669 8 model.embedding_dim 0.0 +669 8 loss.margin 23.146050274693422 +669 8 loss.adversarial_temperature 0.9336855786055853 +669 8 optimizer.lr 0.0037592989719721255 +669 8 negative_sampler.num_negs_per_pos 79.0 +669 8 training.batch_size 1.0 +669 9 model.embedding_dim 0.0 +669 9 loss.margin 7.203700527433049 +669 9 loss.adversarial_temperature 0.22034114413146907 +669 9 optimizer.lr 0.008386969680252404 +669 9 negative_sampler.num_negs_per_pos 95.0 +669 9 training.batch_size 2.0 +669 10 model.embedding_dim 2.0 +669 10 loss.margin 18.185967154707217 +669 10 loss.adversarial_temperature 0.3602093668035875 +669 10 optimizer.lr 0.0010704858372313356 +669 10 negative_sampler.num_negs_per_pos 45.0 +669 10 training.batch_size 0.0 +669 11 model.embedding_dim 1.0 +669 11 loss.margin 28.190068190912832 +669 11 loss.adversarial_temperature 0.9784260027169854 +669 11 optimizer.lr 0.002156295148721704 +669 11 negative_sampler.num_negs_per_pos 58.0 +669 11 training.batch_size 1.0 +669 12 model.embedding_dim 2.0 +669 12 loss.margin 15.924716705848148 +669 12 loss.adversarial_temperature 0.4650067653034746 +669 12 optimizer.lr 0.0010012695523348796 +669 12 negative_sampler.num_negs_per_pos 2.0 +669 12 training.batch_size 2.0 +669 13 model.embedding_dim 1.0 +669 13 loss.margin 18.075033674543945 +669 13 loss.adversarial_temperature 0.8159368588696668 +669 13 optimizer.lr 0.00943032151279711 +669 13 negative_sampler.num_negs_per_pos 1.0 +669 13 training.batch_size 0.0 +669 14 model.embedding_dim 0.0 +669 14 loss.margin 3.9371308728093775 +669 14 loss.adversarial_temperature 0.5065084942421763 +669 14 optimizer.lr 0.039796827263186285 +669 14 negative_sampler.num_negs_per_pos 97.0 +669 14 training.batch_size 0.0 +669 15 model.embedding_dim 0.0 +669 15 loss.margin 29.257042179937656 +669 15 loss.adversarial_temperature 0.25545192781763815 +669 15 optimizer.lr 0.0038440976810176497 +669 15 negative_sampler.num_negs_per_pos 91.0 +669 15 training.batch_size 1.0 +669 16 model.embedding_dim 0.0 +669 16 loss.margin 20.14803075690136 +669 16 loss.adversarial_temperature 0.6312171350521906 +669 16 optimizer.lr 0.011258808094986448 +669 16 negative_sampler.num_negs_per_pos 74.0 +669 16 training.batch_size 2.0 +669 17 model.embedding_dim 0.0 +669 17 loss.margin 20.828488989025093 +669 17 loss.adversarial_temperature 0.23652400871697488 +669 17 optimizer.lr 0.010252897983762669 +669 17 negative_sampler.num_negs_per_pos 12.0 +669 17 training.batch_size 0.0 +669 18 model.embedding_dim 1.0 +669 18 loss.margin 13.104622250205907 +669 18 loss.adversarial_temperature 0.2679367019242336 +669 18 optimizer.lr 0.058150408192079034 +669 18 negative_sampler.num_negs_per_pos 7.0 +669 18 training.batch_size 2.0 +669 19 model.embedding_dim 0.0 +669 19 loss.margin 9.87002090045596 +669 19 loss.adversarial_temperature 0.4014476025184802 +669 19 optimizer.lr 0.04061273171860671 +669 19 negative_sampler.num_negs_per_pos 53.0 +669 19 training.batch_size 2.0 +669 20 model.embedding_dim 0.0 +669 20 loss.margin 22.764558130819943 +669 20 loss.adversarial_temperature 0.23786587523255595 +669 20 optimizer.lr 0.049934627207228965 +669 20 negative_sampler.num_negs_per_pos 18.0 +669 20 training.batch_size 1.0 +669 21 model.embedding_dim 0.0 +669 21 loss.margin 16.667036310700652 +669 21 loss.adversarial_temperature 0.1555193225561616 +669 21 optimizer.lr 0.0032510005168234456 +669 21 negative_sampler.num_negs_per_pos 44.0 +669 21 training.batch_size 1.0 +669 22 model.embedding_dim 1.0 +669 22 loss.margin 16.545577126329025 +669 22 loss.adversarial_temperature 0.9741114431928269 +669 22 optimizer.lr 0.0016945799162204635 +669 22 negative_sampler.num_negs_per_pos 30.0 +669 22 training.batch_size 1.0 +669 23 model.embedding_dim 1.0 +669 23 loss.margin 27.275677813262764 +669 23 loss.adversarial_temperature 0.5607943814483741 +669 23 optimizer.lr 0.00792989094886496 +669 23 negative_sampler.num_negs_per_pos 58.0 +669 23 training.batch_size 0.0 +669 24 model.embedding_dim 2.0 +669 24 loss.margin 26.86309317700505 +669 24 loss.adversarial_temperature 0.8874161292884475 +669 24 optimizer.lr 0.039891227811261185 +669 24 negative_sampler.num_negs_per_pos 47.0 +669 24 training.batch_size 0.0 +669 25 model.embedding_dim 0.0 +669 25 loss.margin 9.131425706788184 +669 25 loss.adversarial_temperature 0.5977376029787367 +669 25 optimizer.lr 0.07545610822905219 +669 25 negative_sampler.num_negs_per_pos 92.0 +669 25 training.batch_size 1.0 +669 26 model.embedding_dim 2.0 +669 26 loss.margin 13.415480826389917 +669 26 loss.adversarial_temperature 0.7432758380945268 +669 26 optimizer.lr 0.0043619991531348825 +669 26 negative_sampler.num_negs_per_pos 12.0 +669 26 training.batch_size 1.0 +669 27 model.embedding_dim 2.0 +669 27 loss.margin 1.7107266701853074 +669 27 loss.adversarial_temperature 0.3065745526349948 +669 27 optimizer.lr 0.058571366750097276 +669 27 negative_sampler.num_negs_per_pos 56.0 +669 27 training.batch_size 0.0 +669 28 model.embedding_dim 2.0 +669 28 loss.margin 3.5040639112315755 +669 28 loss.adversarial_temperature 0.47658602604568734 +669 28 optimizer.lr 0.0010305569522573558 +669 28 negative_sampler.num_negs_per_pos 35.0 +669 28 training.batch_size 1.0 +669 29 model.embedding_dim 0.0 +669 29 loss.margin 19.86399651739927 +669 29 loss.adversarial_temperature 0.9793594820678478 +669 29 optimizer.lr 0.0012076172919937604 +669 29 negative_sampler.num_negs_per_pos 27.0 +669 29 training.batch_size 1.0 +669 30 model.embedding_dim 2.0 +669 30 loss.margin 18.098150868714548 +669 30 loss.adversarial_temperature 0.1873935251552326 +669 30 optimizer.lr 0.0150853813313522 +669 30 negative_sampler.num_negs_per_pos 57.0 +669 30 training.batch_size 1.0 +669 31 model.embedding_dim 0.0 +669 31 loss.margin 25.477638414344757 +669 31 loss.adversarial_temperature 0.1476223724167181 +669 31 optimizer.lr 0.018926024578081534 +669 31 negative_sampler.num_negs_per_pos 66.0 +669 31 training.batch_size 2.0 +669 32 model.embedding_dim 2.0 +669 32 loss.margin 10.984469214365685 +669 32 loss.adversarial_temperature 0.1736641676031812 +669 32 optimizer.lr 0.0052743071054685816 +669 32 negative_sampler.num_negs_per_pos 75.0 +669 32 training.batch_size 2.0 +669 33 model.embedding_dim 2.0 +669 33 loss.margin 8.389980259772837 +669 33 loss.adversarial_temperature 0.5760048878417088 +669 33 optimizer.lr 0.0191434759790722 +669 33 negative_sampler.num_negs_per_pos 86.0 +669 33 training.batch_size 2.0 +669 34 model.embedding_dim 2.0 +669 34 loss.margin 1.2102920101051264 +669 34 loss.adversarial_temperature 0.3211030895649388 +669 34 optimizer.lr 0.04808944958200299 +669 34 negative_sampler.num_negs_per_pos 74.0 +669 34 training.batch_size 1.0 +669 35 model.embedding_dim 2.0 +669 35 loss.margin 24.58731285088757 +669 35 loss.adversarial_temperature 0.27337169971360054 +669 35 optimizer.lr 0.02713258267284766 +669 35 negative_sampler.num_negs_per_pos 91.0 +669 35 training.batch_size 2.0 +669 36 model.embedding_dim 1.0 +669 36 loss.margin 5.074084115424947 +669 36 loss.adversarial_temperature 0.6993019423076666 +669 36 optimizer.lr 0.007996141261576745 +669 36 negative_sampler.num_negs_per_pos 23.0 +669 36 training.batch_size 2.0 +669 37 model.embedding_dim 1.0 +669 37 loss.margin 19.520025128222514 +669 37 loss.adversarial_temperature 0.23235053946830508 +669 37 optimizer.lr 0.0010397569295330245 +669 37 negative_sampler.num_negs_per_pos 11.0 +669 37 training.batch_size 1.0 +669 38 model.embedding_dim 0.0 +669 38 loss.margin 9.337309901506547 +669 38 loss.adversarial_temperature 0.8361350007540075 +669 38 optimizer.lr 0.045231292035533215 +669 38 negative_sampler.num_negs_per_pos 80.0 +669 38 training.batch_size 2.0 +669 39 model.embedding_dim 0.0 +669 39 loss.margin 17.394147412676862 +669 39 loss.adversarial_temperature 0.7401234158597786 +669 39 optimizer.lr 0.08520020517784904 +669 39 negative_sampler.num_negs_per_pos 31.0 +669 39 training.batch_size 0.0 +669 40 model.embedding_dim 2.0 +669 40 loss.margin 4.184808084236708 +669 40 loss.adversarial_temperature 0.7131509561186392 +669 40 optimizer.lr 0.006673373785209631 +669 40 negative_sampler.num_negs_per_pos 82.0 +669 40 training.batch_size 1.0 +669 41 model.embedding_dim 0.0 +669 41 loss.margin 13.853589082657546 +669 41 loss.adversarial_temperature 0.7726230501150823 +669 41 optimizer.lr 0.018763280305187226 +669 41 negative_sampler.num_negs_per_pos 40.0 +669 41 training.batch_size 1.0 +669 42 model.embedding_dim 0.0 +669 42 loss.margin 19.521906522528937 +669 42 loss.adversarial_temperature 0.9464279509108225 +669 42 optimizer.lr 0.0016365450025016565 +669 42 negative_sampler.num_negs_per_pos 62.0 +669 42 training.batch_size 2.0 +669 43 model.embedding_dim 2.0 +669 43 loss.margin 26.27902930492843 +669 43 loss.adversarial_temperature 0.13426927126648616 +669 43 optimizer.lr 0.0017806860904775346 +669 43 negative_sampler.num_negs_per_pos 69.0 +669 43 training.batch_size 0.0 +669 44 model.embedding_dim 2.0 +669 44 loss.margin 26.292395745186948 +669 44 loss.adversarial_temperature 0.933799334515231 +669 44 optimizer.lr 0.009224043418254684 +669 44 negative_sampler.num_negs_per_pos 83.0 +669 44 training.batch_size 0.0 +669 45 model.embedding_dim 2.0 +669 45 loss.margin 18.965381851880654 +669 45 loss.adversarial_temperature 0.27537312294744914 +669 45 optimizer.lr 0.008584724161059614 +669 45 negative_sampler.num_negs_per_pos 84.0 +669 45 training.batch_size 1.0 +669 46 model.embedding_dim 1.0 +669 46 loss.margin 23.47106967490293 +669 46 loss.adversarial_temperature 0.1597890105403363 +669 46 optimizer.lr 0.002535461454582488 +669 46 negative_sampler.num_negs_per_pos 25.0 +669 46 training.batch_size 1.0 +669 47 model.embedding_dim 2.0 +669 47 loss.margin 8.69679275958319 +669 47 loss.adversarial_temperature 0.8664519496611925 +669 47 optimizer.lr 0.030262215037811967 +669 47 negative_sampler.num_negs_per_pos 6.0 +669 47 training.batch_size 1.0 +669 48 model.embedding_dim 2.0 +669 48 loss.margin 7.02229946085344 +669 48 loss.adversarial_temperature 0.21846282108506393 +669 48 optimizer.lr 0.01858360696043276 +669 48 negative_sampler.num_negs_per_pos 13.0 +669 48 training.batch_size 1.0 +669 49 model.embedding_dim 2.0 +669 49 loss.margin 14.813223814901283 +669 49 loss.adversarial_temperature 0.741050143289674 +669 49 optimizer.lr 0.0027757208336128473 +669 49 negative_sampler.num_negs_per_pos 91.0 +669 49 training.batch_size 1.0 +669 50 model.embedding_dim 0.0 +669 50 loss.margin 22.864192704742567 +669 50 loss.adversarial_temperature 0.49002812097825454 +669 50 optimizer.lr 0.003577463063397742 +669 50 negative_sampler.num_negs_per_pos 56.0 +669 50 training.batch_size 2.0 +669 51 model.embedding_dim 2.0 +669 51 loss.margin 27.79842329380391 +669 51 loss.adversarial_temperature 0.6904524955174955 +669 51 optimizer.lr 0.003022526003460192 +669 51 negative_sampler.num_negs_per_pos 86.0 +669 51 training.batch_size 1.0 +669 52 model.embedding_dim 0.0 +669 52 loss.margin 17.43441882868583 +669 52 loss.adversarial_temperature 0.1216110899956962 +669 52 optimizer.lr 0.002021420949979007 +669 52 negative_sampler.num_negs_per_pos 51.0 +669 52 training.batch_size 2.0 +669 53 model.embedding_dim 2.0 +669 53 loss.margin 14.093719915962323 +669 53 loss.adversarial_temperature 0.6663013261060522 +669 53 optimizer.lr 0.05880470767885054 +669 53 negative_sampler.num_negs_per_pos 1.0 +669 53 training.batch_size 2.0 +669 54 model.embedding_dim 2.0 +669 54 loss.margin 26.79977094267357 +669 54 loss.adversarial_temperature 0.18968152107541653 +669 54 optimizer.lr 0.056417229363595466 +669 54 negative_sampler.num_negs_per_pos 8.0 +669 54 training.batch_size 1.0 +669 55 model.embedding_dim 0.0 +669 55 loss.margin 1.1018538212293452 +669 55 loss.adversarial_temperature 0.7650542371317313 +669 55 optimizer.lr 0.008782968768041933 +669 55 negative_sampler.num_negs_per_pos 11.0 +669 55 training.batch_size 1.0 +669 56 model.embedding_dim 2.0 +669 56 loss.margin 24.240445564970937 +669 56 loss.adversarial_temperature 0.3603368108788162 +669 56 optimizer.lr 0.024359106509149415 +669 56 negative_sampler.num_negs_per_pos 0.0 +669 56 training.batch_size 0.0 +669 57 model.embedding_dim 1.0 +669 57 loss.margin 5.657117579004323 +669 57 loss.adversarial_temperature 0.9108762086472932 +669 57 optimizer.lr 0.0035034665102252997 +669 57 negative_sampler.num_negs_per_pos 97.0 +669 57 training.batch_size 0.0 +669 58 model.embedding_dim 1.0 +669 58 loss.margin 14.407487727438403 +669 58 loss.adversarial_temperature 0.4727181219593922 +669 58 optimizer.lr 0.00231082456054285 +669 58 negative_sampler.num_negs_per_pos 83.0 +669 58 training.batch_size 0.0 +669 59 model.embedding_dim 1.0 +669 59 loss.margin 28.6494637057457 +669 59 loss.adversarial_temperature 0.594827395549443 +669 59 optimizer.lr 0.0054790412277722625 +669 59 negative_sampler.num_negs_per_pos 93.0 +669 59 training.batch_size 2.0 +669 60 model.embedding_dim 2.0 +669 60 loss.margin 7.093677358088367 +669 60 loss.adversarial_temperature 0.774165270349397 +669 60 optimizer.lr 0.059260423818931866 +669 60 negative_sampler.num_negs_per_pos 16.0 +669 60 training.batch_size 0.0 +669 61 model.embedding_dim 0.0 +669 61 loss.margin 16.81388564207963 +669 61 loss.adversarial_temperature 0.9541559839596792 +669 61 optimizer.lr 0.09546003967846518 +669 61 negative_sampler.num_negs_per_pos 74.0 +669 61 training.batch_size 1.0 +669 62 model.embedding_dim 0.0 +669 62 loss.margin 21.095429781998277 +669 62 loss.adversarial_temperature 0.41571464694114335 +669 62 optimizer.lr 0.019808362058656843 +669 62 negative_sampler.num_negs_per_pos 85.0 +669 62 training.batch_size 1.0 +669 63 model.embedding_dim 0.0 +669 63 loss.margin 5.293566151598837 +669 63 loss.adversarial_temperature 0.9556326881970694 +669 63 optimizer.lr 0.0028750410085236723 +669 63 negative_sampler.num_negs_per_pos 77.0 +669 63 training.batch_size 2.0 +669 64 model.embedding_dim 1.0 +669 64 loss.margin 22.414865280712792 +669 64 loss.adversarial_temperature 0.10827547068741111 +669 64 optimizer.lr 0.010383973092507662 +669 64 negative_sampler.num_negs_per_pos 48.0 +669 64 training.batch_size 0.0 +669 65 model.embedding_dim 0.0 +669 65 loss.margin 22.73706077495857 +669 65 loss.adversarial_temperature 0.23014623814074933 +669 65 optimizer.lr 0.0024942845495458175 +669 65 negative_sampler.num_negs_per_pos 92.0 +669 65 training.batch_size 2.0 +669 66 model.embedding_dim 0.0 +669 66 loss.margin 25.857200047993683 +669 66 loss.adversarial_temperature 0.8955808894280168 +669 66 optimizer.lr 0.005076140417138996 +669 66 negative_sampler.num_negs_per_pos 83.0 +669 66 training.batch_size 0.0 +669 67 model.embedding_dim 0.0 +669 67 loss.margin 3.878310165719564 +669 67 loss.adversarial_temperature 0.16141243789894597 +669 67 optimizer.lr 0.012389150178034835 +669 67 negative_sampler.num_negs_per_pos 42.0 +669 67 training.batch_size 2.0 +669 68 model.embedding_dim 0.0 +669 68 loss.margin 7.8659559892304065 +669 68 loss.adversarial_temperature 0.24126076299375426 +669 68 optimizer.lr 0.012950987771042649 +669 68 negative_sampler.num_negs_per_pos 24.0 +669 68 training.batch_size 0.0 +669 69 model.embedding_dim 0.0 +669 69 loss.margin 20.093364964433697 +669 69 loss.adversarial_temperature 0.6124188531125982 +669 69 optimizer.lr 0.013811658881408718 +669 69 negative_sampler.num_negs_per_pos 85.0 +669 69 training.batch_size 1.0 +669 70 model.embedding_dim 0.0 +669 70 loss.margin 19.81500809426613 +669 70 loss.adversarial_temperature 0.8334216922621741 +669 70 optimizer.lr 0.0027089999382387676 +669 70 negative_sampler.num_negs_per_pos 39.0 +669 70 training.batch_size 0.0 +669 71 model.embedding_dim 1.0 +669 71 loss.margin 7.345063948248389 +669 71 loss.adversarial_temperature 0.6680814545928564 +669 71 optimizer.lr 0.0016466823417685527 +669 71 negative_sampler.num_negs_per_pos 82.0 +669 71 training.batch_size 2.0 +669 72 model.embedding_dim 2.0 +669 72 loss.margin 27.75004536696001 +669 72 loss.adversarial_temperature 0.24558285683103714 +669 72 optimizer.lr 0.0012979991381088412 +669 72 negative_sampler.num_negs_per_pos 0.0 +669 72 training.batch_size 1.0 +669 73 model.embedding_dim 0.0 +669 73 loss.margin 28.007497745101908 +669 73 loss.adversarial_temperature 0.6593086086442198 +669 73 optimizer.lr 0.0012727305380919843 +669 73 negative_sampler.num_negs_per_pos 56.0 +669 73 training.batch_size 1.0 +669 74 model.embedding_dim 2.0 +669 74 loss.margin 3.750840882680543 +669 74 loss.adversarial_temperature 0.4009554943623259 +669 74 optimizer.lr 0.005262041378258035 +669 74 negative_sampler.num_negs_per_pos 73.0 +669 74 training.batch_size 1.0 +669 75 model.embedding_dim 0.0 +669 75 loss.margin 17.62799343338222 +669 75 loss.adversarial_temperature 0.9405287399393327 +669 75 optimizer.lr 0.004747343513654939 +669 75 negative_sampler.num_negs_per_pos 84.0 +669 75 training.batch_size 2.0 +669 76 model.embedding_dim 1.0 +669 76 loss.margin 28.688141894143193 +669 76 loss.adversarial_temperature 0.636849765838805 +669 76 optimizer.lr 0.07610111152159756 +669 76 negative_sampler.num_negs_per_pos 69.0 +669 76 training.batch_size 0.0 +669 77 model.embedding_dim 1.0 +669 77 loss.margin 25.882255140528585 +669 77 loss.adversarial_temperature 0.5375444488485102 +669 77 optimizer.lr 0.09315696792004671 +669 77 negative_sampler.num_negs_per_pos 31.0 +669 77 training.batch_size 0.0 +669 78 model.embedding_dim 1.0 +669 78 loss.margin 12.686083608386594 +669 78 loss.adversarial_temperature 0.9667882945668568 +669 78 optimizer.lr 0.0039386474334495375 +669 78 negative_sampler.num_negs_per_pos 38.0 +669 78 training.batch_size 0.0 +669 79 model.embedding_dim 1.0 +669 79 loss.margin 6.9171719842159325 +669 79 loss.adversarial_temperature 0.5965746684639021 +669 79 optimizer.lr 0.012719966615079894 +669 79 negative_sampler.num_negs_per_pos 12.0 +669 79 training.batch_size 2.0 +669 80 model.embedding_dim 2.0 +669 80 loss.margin 14.87190662511053 +669 80 loss.adversarial_temperature 0.32884733223448775 +669 80 optimizer.lr 0.0019350674017205739 +669 80 negative_sampler.num_negs_per_pos 69.0 +669 80 training.batch_size 2.0 +669 81 model.embedding_dim 0.0 +669 81 loss.margin 4.3873250805184245 +669 81 loss.adversarial_temperature 0.7314484028388851 +669 81 optimizer.lr 0.02928465426380534 +669 81 negative_sampler.num_negs_per_pos 23.0 +669 81 training.batch_size 2.0 +669 82 model.embedding_dim 1.0 +669 82 loss.margin 10.904603636679074 +669 82 loss.adversarial_temperature 0.5662964534484589 +669 82 optimizer.lr 0.036907660469049454 +669 82 negative_sampler.num_negs_per_pos 70.0 +669 82 training.batch_size 2.0 +669 83 model.embedding_dim 0.0 +669 83 loss.margin 24.86044374058691 +669 83 loss.adversarial_temperature 0.27729047593540346 +669 83 optimizer.lr 0.03831349652046743 +669 83 negative_sampler.num_negs_per_pos 10.0 +669 83 training.batch_size 0.0 +669 84 model.embedding_dim 0.0 +669 84 loss.margin 13.04764985382515 +669 84 loss.adversarial_temperature 0.7890594224711047 +669 84 optimizer.lr 0.035996425896813455 +669 84 negative_sampler.num_negs_per_pos 17.0 +669 84 training.batch_size 0.0 +669 85 model.embedding_dim 2.0 +669 85 loss.margin 26.93528568560705 +669 85 loss.adversarial_temperature 0.23934539404228922 +669 85 optimizer.lr 0.006643632218233934 +669 85 negative_sampler.num_negs_per_pos 15.0 +669 85 training.batch_size 2.0 +669 86 model.embedding_dim 0.0 +669 86 loss.margin 7.696486756588644 +669 86 loss.adversarial_temperature 0.8552959313283097 +669 86 optimizer.lr 0.005575503281340298 +669 86 negative_sampler.num_negs_per_pos 4.0 +669 86 training.batch_size 2.0 +669 87 model.embedding_dim 2.0 +669 87 loss.margin 20.59039586149083 +669 87 loss.adversarial_temperature 0.1867518529877421 +669 87 optimizer.lr 0.031351287281572514 +669 87 negative_sampler.num_negs_per_pos 9.0 +669 87 training.batch_size 1.0 +669 88 model.embedding_dim 0.0 +669 88 loss.margin 3.1284802159350544 +669 88 loss.adversarial_temperature 0.8712071490564437 +669 88 optimizer.lr 0.01617367151969224 +669 88 negative_sampler.num_negs_per_pos 52.0 +669 88 training.batch_size 1.0 +669 89 model.embedding_dim 1.0 +669 89 loss.margin 20.011480699426553 +669 89 loss.adversarial_temperature 0.9290666588956643 +669 89 optimizer.lr 0.08005875178346405 +669 89 negative_sampler.num_negs_per_pos 59.0 +669 89 training.batch_size 1.0 +669 90 model.embedding_dim 1.0 +669 90 loss.margin 7.397935614896698 +669 90 loss.adversarial_temperature 0.5464158290975897 +669 90 optimizer.lr 0.006139251061452139 +669 90 negative_sampler.num_negs_per_pos 86.0 +669 90 training.batch_size 2.0 +669 91 model.embedding_dim 1.0 +669 91 loss.margin 20.048504588275375 +669 91 loss.adversarial_temperature 0.936848162865946 +669 91 optimizer.lr 0.08230794085423851 +669 91 negative_sampler.num_negs_per_pos 88.0 +669 91 training.batch_size 2.0 +669 92 model.embedding_dim 0.0 +669 92 loss.margin 10.330336574900157 +669 92 loss.adversarial_temperature 0.8301652806229343 +669 92 optimizer.lr 0.0883364463589164 +669 92 negative_sampler.num_negs_per_pos 17.0 +669 92 training.batch_size 0.0 +669 93 model.embedding_dim 2.0 +669 93 loss.margin 7.2306494528384455 +669 93 loss.adversarial_temperature 0.9420095910312052 +669 93 optimizer.lr 0.039139356788965844 +669 93 negative_sampler.num_negs_per_pos 98.0 +669 93 training.batch_size 2.0 +669 94 model.embedding_dim 1.0 +669 94 loss.margin 5.2843576569533734 +669 94 loss.adversarial_temperature 0.5841220929521924 +669 94 optimizer.lr 0.01990678773485967 +669 94 negative_sampler.num_negs_per_pos 37.0 +669 94 training.batch_size 0.0 +669 95 model.embedding_dim 0.0 +669 95 loss.margin 21.74854317224293 +669 95 loss.adversarial_temperature 0.5212347155748812 +669 95 optimizer.lr 0.02582493342322752 +669 95 negative_sampler.num_negs_per_pos 5.0 +669 95 training.batch_size 2.0 +669 96 model.embedding_dim 0.0 +669 96 loss.margin 21.151424903502523 +669 96 loss.adversarial_temperature 0.23819566347231597 +669 96 optimizer.lr 0.023943103184491124 +669 96 negative_sampler.num_negs_per_pos 72.0 +669 96 training.batch_size 0.0 +669 97 model.embedding_dim 2.0 +669 97 loss.margin 18.58327894045642 +669 97 loss.adversarial_temperature 0.4882676424286432 +669 97 optimizer.lr 0.011390251562356455 +669 97 negative_sampler.num_negs_per_pos 98.0 +669 97 training.batch_size 1.0 +669 98 model.embedding_dim 1.0 +669 98 loss.margin 26.265025955601573 +669 98 loss.adversarial_temperature 0.5114424334972869 +669 98 optimizer.lr 0.0036498195787932593 +669 98 negative_sampler.num_negs_per_pos 37.0 +669 98 training.batch_size 0.0 +669 99 model.embedding_dim 0.0 +669 99 loss.margin 9.702522118030068 +669 99 loss.adversarial_temperature 0.8459367037862877 +669 99 optimizer.lr 0.007616431384671164 +669 99 negative_sampler.num_negs_per_pos 46.0 +669 99 training.batch_size 0.0 +669 100 model.embedding_dim 2.0 +669 100 loss.margin 13.593803730320225 +669 100 loss.adversarial_temperature 0.32544027776294243 +669 100 optimizer.lr 0.0440452478592959 +669 100 negative_sampler.num_negs_per_pos 25.0 +669 100 training.batch_size 0.0 +669 1 dataset """kinships""" +669 1 model """simple""" +669 1 loss """nssa""" +669 1 regularizer """no""" +669 1 optimizer """adam""" +669 1 training_loop """owa""" +669 1 negative_sampler """basic""" +669 1 evaluator """rankbased""" +669 2 dataset """kinships""" +669 2 model """simple""" +669 2 loss """nssa""" +669 2 regularizer """no""" +669 2 optimizer """adam""" +669 2 training_loop """owa""" +669 2 negative_sampler """basic""" +669 2 evaluator """rankbased""" +669 3 dataset """kinships""" +669 3 model """simple""" +669 3 loss """nssa""" +669 3 regularizer """no""" +669 3 optimizer """adam""" +669 3 training_loop """owa""" +669 3 negative_sampler """basic""" +669 3 evaluator """rankbased""" +669 4 dataset """kinships""" +669 4 model """simple""" +669 4 loss """nssa""" +669 4 regularizer """no""" +669 4 optimizer """adam""" +669 4 training_loop """owa""" +669 4 negative_sampler """basic""" +669 4 evaluator """rankbased""" +669 5 dataset """kinships""" +669 5 model """simple""" +669 5 loss """nssa""" +669 5 regularizer """no""" +669 5 optimizer """adam""" +669 5 training_loop """owa""" +669 5 negative_sampler """basic""" +669 5 evaluator """rankbased""" +669 6 dataset """kinships""" +669 6 model """simple""" +669 6 loss """nssa""" +669 6 regularizer """no""" +669 6 optimizer """adam""" +669 6 training_loop """owa""" +669 6 negative_sampler """basic""" +669 6 evaluator """rankbased""" +669 7 dataset """kinships""" +669 7 model """simple""" +669 7 loss """nssa""" +669 7 regularizer """no""" +669 7 optimizer """adam""" +669 7 training_loop """owa""" +669 7 negative_sampler """basic""" +669 7 evaluator """rankbased""" +669 8 dataset """kinships""" +669 8 model """simple""" +669 8 loss """nssa""" +669 8 regularizer """no""" +669 8 optimizer """adam""" +669 8 training_loop """owa""" +669 8 negative_sampler """basic""" +669 8 evaluator """rankbased""" +669 9 dataset """kinships""" +669 9 model """simple""" +669 9 loss """nssa""" +669 9 regularizer """no""" +669 9 optimizer """adam""" +669 9 training_loop """owa""" +669 9 negative_sampler """basic""" +669 9 evaluator """rankbased""" +669 10 dataset """kinships""" +669 10 model """simple""" +669 10 loss """nssa""" +669 10 regularizer """no""" +669 10 optimizer """adam""" +669 10 training_loop """owa""" +669 10 negative_sampler """basic""" +669 10 evaluator """rankbased""" +669 11 dataset """kinships""" +669 11 model """simple""" +669 11 loss """nssa""" +669 11 regularizer """no""" +669 11 optimizer """adam""" +669 11 training_loop """owa""" +669 11 negative_sampler """basic""" +669 11 evaluator """rankbased""" +669 12 dataset """kinships""" +669 12 model """simple""" +669 12 loss """nssa""" +669 12 regularizer """no""" +669 12 optimizer """adam""" +669 12 training_loop """owa""" +669 12 negative_sampler """basic""" +669 12 evaluator """rankbased""" +669 13 dataset """kinships""" +669 13 model """simple""" +669 13 loss """nssa""" +669 13 regularizer """no""" +669 13 optimizer """adam""" +669 13 training_loop """owa""" +669 13 negative_sampler """basic""" +669 13 evaluator """rankbased""" +669 14 dataset """kinships""" +669 14 model """simple""" +669 14 loss """nssa""" +669 14 regularizer """no""" +669 14 optimizer """adam""" +669 14 training_loop """owa""" +669 14 negative_sampler """basic""" +669 14 evaluator """rankbased""" +669 15 dataset """kinships""" +669 15 model """simple""" +669 15 loss """nssa""" +669 15 regularizer """no""" +669 15 optimizer """adam""" +669 15 training_loop """owa""" +669 15 negative_sampler """basic""" +669 15 evaluator """rankbased""" +669 16 dataset """kinships""" +669 16 model """simple""" +669 16 loss """nssa""" +669 16 regularizer """no""" +669 16 optimizer """adam""" +669 16 training_loop """owa""" +669 16 negative_sampler """basic""" +669 16 evaluator """rankbased""" +669 17 dataset """kinships""" +669 17 model """simple""" +669 17 loss """nssa""" +669 17 regularizer """no""" +669 17 optimizer """adam""" +669 17 training_loop """owa""" +669 17 negative_sampler """basic""" +669 17 evaluator """rankbased""" +669 18 dataset """kinships""" +669 18 model """simple""" +669 18 loss """nssa""" +669 18 regularizer """no""" +669 18 optimizer """adam""" +669 18 training_loop """owa""" +669 18 negative_sampler """basic""" +669 18 evaluator """rankbased""" +669 19 dataset """kinships""" +669 19 model """simple""" +669 19 loss """nssa""" +669 19 regularizer """no""" +669 19 optimizer """adam""" +669 19 training_loop """owa""" +669 19 negative_sampler """basic""" +669 19 evaluator """rankbased""" +669 20 dataset """kinships""" +669 20 model """simple""" +669 20 loss """nssa""" +669 20 regularizer """no""" +669 20 optimizer """adam""" +669 20 training_loop """owa""" +669 20 negative_sampler """basic""" +669 20 evaluator """rankbased""" +669 21 dataset """kinships""" +669 21 model """simple""" +669 21 loss """nssa""" +669 21 regularizer """no""" +669 21 optimizer """adam""" +669 21 training_loop """owa""" +669 21 negative_sampler """basic""" +669 21 evaluator """rankbased""" +669 22 dataset """kinships""" +669 22 model """simple""" +669 22 loss """nssa""" +669 22 regularizer """no""" +669 22 optimizer """adam""" +669 22 training_loop """owa""" +669 22 negative_sampler """basic""" +669 22 evaluator """rankbased""" +669 23 dataset """kinships""" +669 23 model """simple""" +669 23 loss """nssa""" +669 23 regularizer """no""" +669 23 optimizer """adam""" +669 23 training_loop """owa""" +669 23 negative_sampler """basic""" +669 23 evaluator """rankbased""" +669 24 dataset """kinships""" +669 24 model """simple""" +669 24 loss """nssa""" +669 24 regularizer """no""" +669 24 optimizer """adam""" +669 24 training_loop """owa""" +669 24 negative_sampler """basic""" +669 24 evaluator """rankbased""" +669 25 dataset """kinships""" +669 25 model """simple""" +669 25 loss """nssa""" +669 25 regularizer """no""" +669 25 optimizer """adam""" +669 25 training_loop """owa""" +669 25 negative_sampler """basic""" +669 25 evaluator """rankbased""" +669 26 dataset """kinships""" +669 26 model """simple""" +669 26 loss """nssa""" +669 26 regularizer """no""" +669 26 optimizer """adam""" +669 26 training_loop """owa""" +669 26 negative_sampler """basic""" +669 26 evaluator """rankbased""" +669 27 dataset """kinships""" +669 27 model """simple""" +669 27 loss """nssa""" +669 27 regularizer """no""" +669 27 optimizer """adam""" +669 27 training_loop """owa""" +669 27 negative_sampler """basic""" +669 27 evaluator """rankbased""" +669 28 dataset """kinships""" +669 28 model """simple""" +669 28 loss """nssa""" +669 28 regularizer """no""" +669 28 optimizer """adam""" +669 28 training_loop """owa""" +669 28 negative_sampler """basic""" +669 28 evaluator """rankbased""" +669 29 dataset """kinships""" +669 29 model """simple""" +669 29 loss """nssa""" +669 29 regularizer """no""" +669 29 optimizer """adam""" +669 29 training_loop """owa""" +669 29 negative_sampler """basic""" +669 29 evaluator """rankbased""" +669 30 dataset """kinships""" +669 30 model """simple""" +669 30 loss """nssa""" +669 30 regularizer """no""" +669 30 optimizer """adam""" +669 30 training_loop """owa""" +669 30 negative_sampler """basic""" +669 30 evaluator """rankbased""" +669 31 dataset """kinships""" +669 31 model """simple""" +669 31 loss """nssa""" +669 31 regularizer """no""" +669 31 optimizer """adam""" +669 31 training_loop """owa""" +669 31 negative_sampler """basic""" +669 31 evaluator """rankbased""" +669 32 dataset """kinships""" +669 32 model """simple""" +669 32 loss """nssa""" +669 32 regularizer """no""" +669 32 optimizer """adam""" +669 32 training_loop """owa""" +669 32 negative_sampler """basic""" +669 32 evaluator """rankbased""" +669 33 dataset """kinships""" +669 33 model """simple""" +669 33 loss """nssa""" +669 33 regularizer """no""" +669 33 optimizer """adam""" +669 33 training_loop """owa""" +669 33 negative_sampler """basic""" +669 33 evaluator """rankbased""" +669 34 dataset """kinships""" +669 34 model """simple""" +669 34 loss """nssa""" +669 34 regularizer """no""" +669 34 optimizer """adam""" +669 34 training_loop """owa""" +669 34 negative_sampler """basic""" +669 34 evaluator """rankbased""" +669 35 dataset """kinships""" +669 35 model """simple""" +669 35 loss """nssa""" +669 35 regularizer """no""" +669 35 optimizer """adam""" +669 35 training_loop """owa""" +669 35 negative_sampler """basic""" +669 35 evaluator """rankbased""" +669 36 dataset """kinships""" +669 36 model """simple""" +669 36 loss """nssa""" +669 36 regularizer """no""" +669 36 optimizer """adam""" +669 36 training_loop """owa""" +669 36 negative_sampler """basic""" +669 36 evaluator """rankbased""" +669 37 dataset """kinships""" +669 37 model """simple""" +669 37 loss """nssa""" +669 37 regularizer """no""" +669 37 optimizer """adam""" +669 37 training_loop """owa""" +669 37 negative_sampler """basic""" +669 37 evaluator """rankbased""" +669 38 dataset """kinships""" +669 38 model """simple""" +669 38 loss """nssa""" +669 38 regularizer """no""" +669 38 optimizer """adam""" +669 38 training_loop """owa""" +669 38 negative_sampler """basic""" +669 38 evaluator """rankbased""" +669 39 dataset """kinships""" +669 39 model """simple""" +669 39 loss """nssa""" +669 39 regularizer """no""" +669 39 optimizer """adam""" +669 39 training_loop """owa""" +669 39 negative_sampler """basic""" +669 39 evaluator """rankbased""" +669 40 dataset """kinships""" +669 40 model """simple""" +669 40 loss """nssa""" +669 40 regularizer """no""" +669 40 optimizer """adam""" +669 40 training_loop """owa""" +669 40 negative_sampler """basic""" +669 40 evaluator """rankbased""" +669 41 dataset """kinships""" +669 41 model """simple""" +669 41 loss """nssa""" +669 41 regularizer """no""" +669 41 optimizer """adam""" +669 41 training_loop """owa""" +669 41 negative_sampler """basic""" +669 41 evaluator """rankbased""" +669 42 dataset """kinships""" +669 42 model """simple""" +669 42 loss """nssa""" +669 42 regularizer """no""" +669 42 optimizer """adam""" +669 42 training_loop """owa""" +669 42 negative_sampler """basic""" +669 42 evaluator """rankbased""" +669 43 dataset """kinships""" +669 43 model """simple""" +669 43 loss """nssa""" +669 43 regularizer """no""" +669 43 optimizer """adam""" +669 43 training_loop """owa""" +669 43 negative_sampler """basic""" +669 43 evaluator """rankbased""" +669 44 dataset """kinships""" +669 44 model """simple""" +669 44 loss """nssa""" +669 44 regularizer """no""" +669 44 optimizer """adam""" +669 44 training_loop """owa""" +669 44 negative_sampler """basic""" +669 44 evaluator """rankbased""" +669 45 dataset """kinships""" +669 45 model """simple""" +669 45 loss """nssa""" +669 45 regularizer """no""" +669 45 optimizer """adam""" +669 45 training_loop """owa""" +669 45 negative_sampler """basic""" +669 45 evaluator """rankbased""" +669 46 dataset """kinships""" +669 46 model """simple""" +669 46 loss """nssa""" +669 46 regularizer """no""" +669 46 optimizer """adam""" +669 46 training_loop """owa""" +669 46 negative_sampler """basic""" +669 46 evaluator """rankbased""" +669 47 dataset """kinships""" +669 47 model """simple""" +669 47 loss """nssa""" +669 47 regularizer """no""" +669 47 optimizer """adam""" +669 47 training_loop """owa""" +669 47 negative_sampler """basic""" +669 47 evaluator """rankbased""" +669 48 dataset """kinships""" +669 48 model """simple""" +669 48 loss """nssa""" +669 48 regularizer """no""" +669 48 optimizer """adam""" +669 48 training_loop """owa""" +669 48 negative_sampler """basic""" +669 48 evaluator """rankbased""" +669 49 dataset """kinships""" +669 49 model """simple""" +669 49 loss """nssa""" +669 49 regularizer """no""" +669 49 optimizer """adam""" +669 49 training_loop """owa""" +669 49 negative_sampler """basic""" +669 49 evaluator """rankbased""" +669 50 dataset """kinships""" +669 50 model """simple""" +669 50 loss """nssa""" +669 50 regularizer """no""" +669 50 optimizer """adam""" +669 50 training_loop """owa""" +669 50 negative_sampler """basic""" +669 50 evaluator """rankbased""" +669 51 dataset """kinships""" +669 51 model """simple""" +669 51 loss """nssa""" +669 51 regularizer """no""" +669 51 optimizer """adam""" +669 51 training_loop """owa""" +669 51 negative_sampler """basic""" +669 51 evaluator """rankbased""" +669 52 dataset """kinships""" +669 52 model """simple""" +669 52 loss """nssa""" +669 52 regularizer """no""" +669 52 optimizer """adam""" +669 52 training_loop """owa""" +669 52 negative_sampler """basic""" +669 52 evaluator """rankbased""" +669 53 dataset """kinships""" +669 53 model """simple""" +669 53 loss """nssa""" +669 53 regularizer """no""" +669 53 optimizer """adam""" +669 53 training_loop """owa""" +669 53 negative_sampler """basic""" +669 53 evaluator """rankbased""" +669 54 dataset """kinships""" +669 54 model """simple""" +669 54 loss """nssa""" +669 54 regularizer """no""" +669 54 optimizer """adam""" +669 54 training_loop """owa""" +669 54 negative_sampler """basic""" +669 54 evaluator """rankbased""" +669 55 dataset """kinships""" +669 55 model """simple""" +669 55 loss """nssa""" +669 55 regularizer """no""" +669 55 optimizer """adam""" +669 55 training_loop """owa""" +669 55 negative_sampler """basic""" +669 55 evaluator """rankbased""" +669 56 dataset """kinships""" +669 56 model """simple""" +669 56 loss """nssa""" +669 56 regularizer """no""" +669 56 optimizer """adam""" +669 56 training_loop """owa""" +669 56 negative_sampler """basic""" +669 56 evaluator """rankbased""" +669 57 dataset """kinships""" +669 57 model """simple""" +669 57 loss """nssa""" +669 57 regularizer """no""" +669 57 optimizer """adam""" +669 57 training_loop """owa""" +669 57 negative_sampler """basic""" +669 57 evaluator """rankbased""" +669 58 dataset """kinships""" +669 58 model """simple""" +669 58 loss """nssa""" +669 58 regularizer """no""" +669 58 optimizer """adam""" +669 58 training_loop """owa""" +669 58 negative_sampler """basic""" +669 58 evaluator """rankbased""" +669 59 dataset """kinships""" +669 59 model """simple""" +669 59 loss """nssa""" +669 59 regularizer """no""" +669 59 optimizer """adam""" +669 59 training_loop """owa""" +669 59 negative_sampler """basic""" +669 59 evaluator """rankbased""" +669 60 dataset """kinships""" +669 60 model """simple""" +669 60 loss """nssa""" +669 60 regularizer """no""" +669 60 optimizer """adam""" +669 60 training_loop """owa""" +669 60 negative_sampler """basic""" +669 60 evaluator """rankbased""" +669 61 dataset """kinships""" +669 61 model """simple""" +669 61 loss """nssa""" +669 61 regularizer """no""" +669 61 optimizer """adam""" +669 61 training_loop """owa""" +669 61 negative_sampler """basic""" +669 61 evaluator """rankbased""" +669 62 dataset """kinships""" +669 62 model """simple""" +669 62 loss """nssa""" +669 62 regularizer """no""" +669 62 optimizer """adam""" +669 62 training_loop """owa""" +669 62 negative_sampler """basic""" +669 62 evaluator """rankbased""" +669 63 dataset """kinships""" +669 63 model """simple""" +669 63 loss """nssa""" +669 63 regularizer """no""" +669 63 optimizer """adam""" +669 63 training_loop """owa""" +669 63 negative_sampler """basic""" +669 63 evaluator """rankbased""" +669 64 dataset """kinships""" +669 64 model """simple""" +669 64 loss """nssa""" +669 64 regularizer """no""" +669 64 optimizer """adam""" +669 64 training_loop """owa""" +669 64 negative_sampler """basic""" +669 64 evaluator """rankbased""" +669 65 dataset """kinships""" +669 65 model """simple""" +669 65 loss """nssa""" +669 65 regularizer """no""" +669 65 optimizer """adam""" +669 65 training_loop """owa""" +669 65 negative_sampler """basic""" +669 65 evaluator """rankbased""" +669 66 dataset """kinships""" +669 66 model """simple""" +669 66 loss """nssa""" +669 66 regularizer """no""" +669 66 optimizer """adam""" +669 66 training_loop """owa""" +669 66 negative_sampler """basic""" +669 66 evaluator """rankbased""" +669 67 dataset """kinships""" +669 67 model """simple""" +669 67 loss """nssa""" +669 67 regularizer """no""" +669 67 optimizer """adam""" +669 67 training_loop """owa""" +669 67 negative_sampler """basic""" +669 67 evaluator """rankbased""" +669 68 dataset """kinships""" +669 68 model """simple""" +669 68 loss """nssa""" +669 68 regularizer """no""" +669 68 optimizer """adam""" +669 68 training_loop """owa""" +669 68 negative_sampler """basic""" +669 68 evaluator """rankbased""" +669 69 dataset """kinships""" +669 69 model """simple""" +669 69 loss """nssa""" +669 69 regularizer """no""" +669 69 optimizer """adam""" +669 69 training_loop """owa""" +669 69 negative_sampler """basic""" +669 69 evaluator """rankbased""" +669 70 dataset """kinships""" +669 70 model """simple""" +669 70 loss """nssa""" +669 70 regularizer """no""" +669 70 optimizer """adam""" +669 70 training_loop """owa""" +669 70 negative_sampler """basic""" +669 70 evaluator """rankbased""" +669 71 dataset """kinships""" +669 71 model """simple""" +669 71 loss """nssa""" +669 71 regularizer """no""" +669 71 optimizer """adam""" +669 71 training_loop """owa""" +669 71 negative_sampler """basic""" +669 71 evaluator """rankbased""" +669 72 dataset """kinships""" +669 72 model """simple""" +669 72 loss """nssa""" +669 72 regularizer """no""" +669 72 optimizer """adam""" +669 72 training_loop """owa""" +669 72 negative_sampler """basic""" +669 72 evaluator """rankbased""" +669 73 dataset """kinships""" +669 73 model """simple""" +669 73 loss """nssa""" +669 73 regularizer """no""" +669 73 optimizer """adam""" +669 73 training_loop """owa""" +669 73 negative_sampler """basic""" +669 73 evaluator """rankbased""" +669 74 dataset """kinships""" +669 74 model """simple""" +669 74 loss """nssa""" +669 74 regularizer """no""" +669 74 optimizer """adam""" +669 74 training_loop """owa""" +669 74 negative_sampler """basic""" +669 74 evaluator """rankbased""" +669 75 dataset """kinships""" +669 75 model """simple""" +669 75 loss """nssa""" +669 75 regularizer """no""" +669 75 optimizer """adam""" +669 75 training_loop """owa""" +669 75 negative_sampler """basic""" +669 75 evaluator """rankbased""" +669 76 dataset """kinships""" +669 76 model """simple""" +669 76 loss """nssa""" +669 76 regularizer """no""" +669 76 optimizer """adam""" +669 76 training_loop """owa""" +669 76 negative_sampler """basic""" +669 76 evaluator """rankbased""" +669 77 dataset """kinships""" +669 77 model """simple""" +669 77 loss """nssa""" +669 77 regularizer """no""" +669 77 optimizer """adam""" +669 77 training_loop """owa""" +669 77 negative_sampler """basic""" +669 77 evaluator """rankbased""" +669 78 dataset """kinships""" +669 78 model """simple""" +669 78 loss """nssa""" +669 78 regularizer """no""" +669 78 optimizer """adam""" +669 78 training_loop """owa""" +669 78 negative_sampler """basic""" +669 78 evaluator """rankbased""" +669 79 dataset """kinships""" +669 79 model """simple""" +669 79 loss """nssa""" +669 79 regularizer """no""" +669 79 optimizer """adam""" +669 79 training_loop """owa""" +669 79 negative_sampler """basic""" +669 79 evaluator """rankbased""" +669 80 dataset """kinships""" +669 80 model """simple""" +669 80 loss """nssa""" +669 80 regularizer """no""" +669 80 optimizer """adam""" +669 80 training_loop """owa""" +669 80 negative_sampler """basic""" +669 80 evaluator """rankbased""" +669 81 dataset """kinships""" +669 81 model """simple""" +669 81 loss """nssa""" +669 81 regularizer """no""" +669 81 optimizer """adam""" +669 81 training_loop """owa""" +669 81 negative_sampler """basic""" +669 81 evaluator """rankbased""" +669 82 dataset """kinships""" +669 82 model """simple""" +669 82 loss """nssa""" +669 82 regularizer """no""" +669 82 optimizer """adam""" +669 82 training_loop """owa""" +669 82 negative_sampler """basic""" +669 82 evaluator """rankbased""" +669 83 dataset """kinships""" +669 83 model """simple""" +669 83 loss """nssa""" +669 83 regularizer """no""" +669 83 optimizer """adam""" +669 83 training_loop """owa""" +669 83 negative_sampler """basic""" +669 83 evaluator """rankbased""" +669 84 dataset """kinships""" +669 84 model """simple""" +669 84 loss """nssa""" +669 84 regularizer """no""" +669 84 optimizer """adam""" +669 84 training_loop """owa""" +669 84 negative_sampler """basic""" +669 84 evaluator """rankbased""" +669 85 dataset """kinships""" +669 85 model """simple""" +669 85 loss """nssa""" +669 85 regularizer """no""" +669 85 optimizer """adam""" +669 85 training_loop """owa""" +669 85 negative_sampler """basic""" +669 85 evaluator """rankbased""" +669 86 dataset """kinships""" +669 86 model """simple""" +669 86 loss """nssa""" +669 86 regularizer """no""" +669 86 optimizer """adam""" +669 86 training_loop """owa""" +669 86 negative_sampler """basic""" +669 86 evaluator """rankbased""" +669 87 dataset """kinships""" +669 87 model """simple""" +669 87 loss """nssa""" +669 87 regularizer """no""" +669 87 optimizer """adam""" +669 87 training_loop """owa""" +669 87 negative_sampler """basic""" +669 87 evaluator """rankbased""" +669 88 dataset """kinships""" +669 88 model """simple""" +669 88 loss """nssa""" +669 88 regularizer """no""" +669 88 optimizer """adam""" +669 88 training_loop """owa""" +669 88 negative_sampler """basic""" +669 88 evaluator """rankbased""" +669 89 dataset """kinships""" +669 89 model """simple""" +669 89 loss """nssa""" +669 89 regularizer """no""" +669 89 optimizer """adam""" +669 89 training_loop """owa""" +669 89 negative_sampler """basic""" +669 89 evaluator """rankbased""" +669 90 dataset """kinships""" +669 90 model """simple""" +669 90 loss """nssa""" +669 90 regularizer """no""" +669 90 optimizer """adam""" +669 90 training_loop """owa""" +669 90 negative_sampler """basic""" +669 90 evaluator """rankbased""" +669 91 dataset """kinships""" +669 91 model """simple""" +669 91 loss """nssa""" +669 91 regularizer """no""" +669 91 optimizer """adam""" +669 91 training_loop """owa""" +669 91 negative_sampler """basic""" +669 91 evaluator """rankbased""" +669 92 dataset """kinships""" +669 92 model """simple""" +669 92 loss """nssa""" +669 92 regularizer """no""" +669 92 optimizer """adam""" +669 92 training_loop """owa""" +669 92 negative_sampler """basic""" +669 92 evaluator """rankbased""" +669 93 dataset """kinships""" +669 93 model """simple""" +669 93 loss """nssa""" +669 93 regularizer """no""" +669 93 optimizer """adam""" +669 93 training_loop """owa""" +669 93 negative_sampler """basic""" +669 93 evaluator """rankbased""" +669 94 dataset """kinships""" +669 94 model """simple""" +669 94 loss """nssa""" +669 94 regularizer """no""" +669 94 optimizer """adam""" +669 94 training_loop """owa""" +669 94 negative_sampler """basic""" +669 94 evaluator """rankbased""" +669 95 dataset """kinships""" +669 95 model """simple""" +669 95 loss """nssa""" +669 95 regularizer """no""" +669 95 optimizer """adam""" +669 95 training_loop """owa""" +669 95 negative_sampler """basic""" +669 95 evaluator """rankbased""" +669 96 dataset """kinships""" +669 96 model """simple""" +669 96 loss """nssa""" +669 96 regularizer """no""" +669 96 optimizer """adam""" +669 96 training_loop """owa""" +669 96 negative_sampler """basic""" +669 96 evaluator """rankbased""" +669 97 dataset """kinships""" +669 97 model """simple""" +669 97 loss """nssa""" +669 97 regularizer """no""" +669 97 optimizer """adam""" +669 97 training_loop """owa""" +669 97 negative_sampler """basic""" +669 97 evaluator """rankbased""" +669 98 dataset """kinships""" +669 98 model """simple""" +669 98 loss """nssa""" +669 98 regularizer """no""" +669 98 optimizer """adam""" +669 98 training_loop """owa""" +669 98 negative_sampler """basic""" +669 98 evaluator """rankbased""" +669 99 dataset """kinships""" +669 99 model """simple""" +669 99 loss """nssa""" +669 99 regularizer """no""" +669 99 optimizer """adam""" +669 99 training_loop """owa""" +669 99 negative_sampler """basic""" +669 99 evaluator """rankbased""" +669 100 dataset """kinships""" +669 100 model """simple""" +669 100 loss """nssa""" +669 100 regularizer """no""" +669 100 optimizer """adam""" +669 100 training_loop """owa""" +669 100 negative_sampler """basic""" +669 100 evaluator """rankbased""" +670 1 model.embedding_dim 0.0 +670 1 loss.margin 1.0050235308185278 +670 1 loss.adversarial_temperature 0.634774066503389 +670 1 optimizer.lr 0.01224807487172678 +670 1 negative_sampler.num_negs_per_pos 68.0 +670 1 training.batch_size 2.0 +670 2 model.embedding_dim 1.0 +670 2 loss.margin 11.583998081287852 +670 2 loss.adversarial_temperature 0.45175633676696025 +670 2 optimizer.lr 0.013357533546657776 +670 2 negative_sampler.num_negs_per_pos 92.0 +670 2 training.batch_size 2.0 +670 3 model.embedding_dim 2.0 +670 3 loss.margin 15.8693153473717 +670 3 loss.adversarial_temperature 0.379763647371015 +670 3 optimizer.lr 0.019252169779910313 +670 3 negative_sampler.num_negs_per_pos 14.0 +670 3 training.batch_size 0.0 +670 4 model.embedding_dim 0.0 +670 4 loss.margin 21.340024411433035 +670 4 loss.adversarial_temperature 0.10686112689914258 +670 4 optimizer.lr 0.0031630883942714025 +670 4 negative_sampler.num_negs_per_pos 26.0 +670 4 training.batch_size 1.0 +670 5 model.embedding_dim 1.0 +670 5 loss.margin 12.198983116402582 +670 5 loss.adversarial_temperature 0.7052747252989349 +670 5 optimizer.lr 0.052304192574664114 +670 5 negative_sampler.num_negs_per_pos 60.0 +670 5 training.batch_size 1.0 +670 6 model.embedding_dim 2.0 +670 6 loss.margin 29.516434230765363 +670 6 loss.adversarial_temperature 0.8109924341310633 +670 6 optimizer.lr 0.001802367672358824 +670 6 negative_sampler.num_negs_per_pos 36.0 +670 6 training.batch_size 1.0 +670 7 model.embedding_dim 0.0 +670 7 loss.margin 21.820099870718774 +670 7 loss.adversarial_temperature 0.6376581179526162 +670 7 optimizer.lr 0.08328364891018489 +670 7 negative_sampler.num_negs_per_pos 3.0 +670 7 training.batch_size 1.0 +670 8 model.embedding_dim 1.0 +670 8 loss.margin 24.377623200769413 +670 8 loss.adversarial_temperature 0.7803896708293159 +670 8 optimizer.lr 0.08529974375568677 +670 8 negative_sampler.num_negs_per_pos 32.0 +670 8 training.batch_size 1.0 +670 9 model.embedding_dim 2.0 +670 9 loss.margin 17.872418489623637 +670 9 loss.adversarial_temperature 0.9000355232256774 +670 9 optimizer.lr 0.06990203704477053 +670 9 negative_sampler.num_negs_per_pos 25.0 +670 9 training.batch_size 2.0 +670 10 model.embedding_dim 2.0 +670 10 loss.margin 23.089298153754143 +670 10 loss.adversarial_temperature 0.8315277716091528 +670 10 optimizer.lr 0.05392539645505965 +670 10 negative_sampler.num_negs_per_pos 72.0 +670 10 training.batch_size 2.0 +670 11 model.embedding_dim 0.0 +670 11 loss.margin 26.6307808933978 +670 11 loss.adversarial_temperature 0.30777843738161503 +670 11 optimizer.lr 0.00425665717485658 +670 11 negative_sampler.num_negs_per_pos 10.0 +670 11 training.batch_size 1.0 +670 12 model.embedding_dim 1.0 +670 12 loss.margin 25.51429096196259 +670 12 loss.adversarial_temperature 0.7203155898450955 +670 12 optimizer.lr 0.00339668161361428 +670 12 negative_sampler.num_negs_per_pos 26.0 +670 12 training.batch_size 1.0 +670 13 model.embedding_dim 0.0 +670 13 loss.margin 7.365940731804215 +670 13 loss.adversarial_temperature 0.9609714129426541 +670 13 optimizer.lr 0.002335506049769431 +670 13 negative_sampler.num_negs_per_pos 12.0 +670 13 training.batch_size 1.0 +670 14 model.embedding_dim 1.0 +670 14 loss.margin 8.904622384384268 +670 14 loss.adversarial_temperature 0.9468397528250179 +670 14 optimizer.lr 0.0871498243364064 +670 14 negative_sampler.num_negs_per_pos 35.0 +670 14 training.batch_size 2.0 +670 15 model.embedding_dim 0.0 +670 15 loss.margin 3.74063418861183 +670 15 loss.adversarial_temperature 0.20703227238501798 +670 15 optimizer.lr 0.03582831715757218 +670 15 negative_sampler.num_negs_per_pos 31.0 +670 15 training.batch_size 2.0 +670 16 model.embedding_dim 0.0 +670 16 loss.margin 1.1944405933920446 +670 16 loss.adversarial_temperature 0.5327784041406333 +670 16 optimizer.lr 0.05450108565306086 +670 16 negative_sampler.num_negs_per_pos 42.0 +670 16 training.batch_size 0.0 +670 17 model.embedding_dim 0.0 +670 17 loss.margin 5.4673734912285905 +670 17 loss.adversarial_temperature 0.18012534451832102 +670 17 optimizer.lr 0.0026942526923487034 +670 17 negative_sampler.num_negs_per_pos 55.0 +670 17 training.batch_size 0.0 +670 18 model.embedding_dim 0.0 +670 18 loss.margin 26.951890573119645 +670 18 loss.adversarial_temperature 0.42564347917544254 +670 18 optimizer.lr 0.0010426740496900571 +670 18 negative_sampler.num_negs_per_pos 73.0 +670 18 training.batch_size 1.0 +670 19 model.embedding_dim 2.0 +670 19 loss.margin 2.3437468131103847 +670 19 loss.adversarial_temperature 0.1403311344822197 +670 19 optimizer.lr 0.015059399614274414 +670 19 negative_sampler.num_negs_per_pos 45.0 +670 19 training.batch_size 1.0 +670 20 model.embedding_dim 0.0 +670 20 loss.margin 23.23492311007305 +670 20 loss.adversarial_temperature 0.12237168251225829 +670 20 optimizer.lr 0.09890448522453853 +670 20 negative_sampler.num_negs_per_pos 2.0 +670 20 training.batch_size 1.0 +670 21 model.embedding_dim 0.0 +670 21 loss.margin 13.6046464536003 +670 21 loss.adversarial_temperature 0.7188027017756281 +670 21 optimizer.lr 0.021843831459425884 +670 21 negative_sampler.num_negs_per_pos 72.0 +670 21 training.batch_size 0.0 +670 22 model.embedding_dim 2.0 +670 22 loss.margin 6.063586039882796 +670 22 loss.adversarial_temperature 0.7831017058080706 +670 22 optimizer.lr 0.004066159751850347 +670 22 negative_sampler.num_negs_per_pos 57.0 +670 22 training.batch_size 2.0 +670 23 model.embedding_dim 0.0 +670 23 loss.margin 15.14897722466863 +670 23 loss.adversarial_temperature 0.3652678473942872 +670 23 optimizer.lr 0.01051066918228187 +670 23 negative_sampler.num_negs_per_pos 94.0 +670 23 training.batch_size 2.0 +670 24 model.embedding_dim 1.0 +670 24 loss.margin 18.070738873065007 +670 24 loss.adversarial_temperature 0.3593013582639222 +670 24 optimizer.lr 0.0011142567724176235 +670 24 negative_sampler.num_negs_per_pos 96.0 +670 24 training.batch_size 2.0 +670 25 model.embedding_dim 1.0 +670 25 loss.margin 23.461834525513364 +670 25 loss.adversarial_temperature 0.589756783858213 +670 25 optimizer.lr 0.0022244582471425043 +670 25 negative_sampler.num_negs_per_pos 17.0 +670 25 training.batch_size 2.0 +670 26 model.embedding_dim 2.0 +670 26 loss.margin 21.996163447074277 +670 26 loss.adversarial_temperature 0.34078833495886074 +670 26 optimizer.lr 0.04217941703527655 +670 26 negative_sampler.num_negs_per_pos 69.0 +670 26 training.batch_size 1.0 +670 27 model.embedding_dim 1.0 +670 27 loss.margin 20.00427838707024 +670 27 loss.adversarial_temperature 0.936064458385485 +670 27 optimizer.lr 0.001262808170933797 +670 27 negative_sampler.num_negs_per_pos 24.0 +670 27 training.batch_size 0.0 +670 28 model.embedding_dim 0.0 +670 28 loss.margin 22.231841801517206 +670 28 loss.adversarial_temperature 0.2024834578412142 +670 28 optimizer.lr 0.04970573608158937 +670 28 negative_sampler.num_negs_per_pos 9.0 +670 28 training.batch_size 0.0 +670 29 model.embedding_dim 2.0 +670 29 loss.margin 13.986627966173405 +670 29 loss.adversarial_temperature 0.5552465405761916 +670 29 optimizer.lr 0.015328530103973448 +670 29 negative_sampler.num_negs_per_pos 2.0 +670 29 training.batch_size 2.0 +670 30 model.embedding_dim 2.0 +670 30 loss.margin 22.15098669752642 +670 30 loss.adversarial_temperature 0.797239859218535 +670 30 optimizer.lr 0.018411004426748027 +670 30 negative_sampler.num_negs_per_pos 96.0 +670 30 training.batch_size 0.0 +670 31 model.embedding_dim 1.0 +670 31 loss.margin 3.2510325715692296 +670 31 loss.adversarial_temperature 0.1486089770820052 +670 31 optimizer.lr 0.03505672127160539 +670 31 negative_sampler.num_negs_per_pos 91.0 +670 31 training.batch_size 2.0 +670 32 model.embedding_dim 0.0 +670 32 loss.margin 23.501510202731115 +670 32 loss.adversarial_temperature 0.5800490443483887 +670 32 optimizer.lr 0.003928253166585502 +670 32 negative_sampler.num_negs_per_pos 81.0 +670 32 training.batch_size 2.0 +670 33 model.embedding_dim 1.0 +670 33 loss.margin 27.81705681898111 +670 33 loss.adversarial_temperature 0.4032835085072223 +670 33 optimizer.lr 0.03488997768890812 +670 33 negative_sampler.num_negs_per_pos 93.0 +670 33 training.batch_size 0.0 +670 34 model.embedding_dim 2.0 +670 34 loss.margin 20.410938598636346 +670 34 loss.adversarial_temperature 0.18680415115002091 +670 34 optimizer.lr 0.0026224928489585386 +670 34 negative_sampler.num_negs_per_pos 5.0 +670 34 training.batch_size 2.0 +670 35 model.embedding_dim 2.0 +670 35 loss.margin 6.988444233541464 +670 35 loss.adversarial_temperature 0.2968318498700262 +670 35 optimizer.lr 0.04479276177051512 +670 35 negative_sampler.num_negs_per_pos 98.0 +670 35 training.batch_size 2.0 +670 36 model.embedding_dim 0.0 +670 36 loss.margin 2.7169727185274954 +670 36 loss.adversarial_temperature 0.4216339642495065 +670 36 optimizer.lr 0.013493796711652687 +670 36 negative_sampler.num_negs_per_pos 7.0 +670 36 training.batch_size 1.0 +670 37 model.embedding_dim 2.0 +670 37 loss.margin 22.755704463388934 +670 37 loss.adversarial_temperature 0.25391781523405155 +670 37 optimizer.lr 0.060726282786531006 +670 37 negative_sampler.num_negs_per_pos 90.0 +670 37 training.batch_size 2.0 +670 38 model.embedding_dim 0.0 +670 38 loss.margin 13.037659871721115 +670 38 loss.adversarial_temperature 0.7113634810612144 +670 38 optimizer.lr 0.0012373793393572882 +670 38 negative_sampler.num_negs_per_pos 11.0 +670 38 training.batch_size 2.0 +670 39 model.embedding_dim 0.0 +670 39 loss.margin 28.168467244992467 +670 39 loss.adversarial_temperature 0.15176032969856143 +670 39 optimizer.lr 0.002875297032761409 +670 39 negative_sampler.num_negs_per_pos 10.0 +670 39 training.batch_size 0.0 +670 40 model.embedding_dim 2.0 +670 40 loss.margin 15.068592557470515 +670 40 loss.adversarial_temperature 0.8689903591505079 +670 40 optimizer.lr 0.0013896178129107152 +670 40 negative_sampler.num_negs_per_pos 5.0 +670 40 training.batch_size 2.0 +670 41 model.embedding_dim 0.0 +670 41 loss.margin 19.682389382636853 +670 41 loss.adversarial_temperature 0.837689417588625 +670 41 optimizer.lr 0.0184975729048808 +670 41 negative_sampler.num_negs_per_pos 90.0 +670 41 training.batch_size 0.0 +670 42 model.embedding_dim 2.0 +670 42 loss.margin 20.778588837387364 +670 42 loss.adversarial_temperature 0.5738264764628636 +670 42 optimizer.lr 0.012301356273904556 +670 42 negative_sampler.num_negs_per_pos 73.0 +670 42 training.batch_size 2.0 +670 43 model.embedding_dim 2.0 +670 43 loss.margin 27.581830619609487 +670 43 loss.adversarial_temperature 0.9536198565416302 +670 43 optimizer.lr 0.00543895512851762 +670 43 negative_sampler.num_negs_per_pos 19.0 +670 43 training.batch_size 2.0 +670 44 model.embedding_dim 2.0 +670 44 loss.margin 19.55621746685564 +670 44 loss.adversarial_temperature 0.3986573045431776 +670 44 optimizer.lr 0.01863254087532979 +670 44 negative_sampler.num_negs_per_pos 40.0 +670 44 training.batch_size 1.0 +670 45 model.embedding_dim 2.0 +670 45 loss.margin 24.884737180643114 +670 45 loss.adversarial_temperature 0.6999764093378673 +670 45 optimizer.lr 0.01145409861062341 +670 45 negative_sampler.num_negs_per_pos 14.0 +670 45 training.batch_size 2.0 +670 46 model.embedding_dim 2.0 +670 46 loss.margin 12.701373292009054 +670 46 loss.adversarial_temperature 0.5829623750108617 +670 46 optimizer.lr 0.003561005085585445 +670 46 negative_sampler.num_negs_per_pos 13.0 +670 46 training.batch_size 1.0 +670 47 model.embedding_dim 1.0 +670 47 loss.margin 5.138825307949561 +670 47 loss.adversarial_temperature 0.713634884942958 +670 47 optimizer.lr 0.006992261662063383 +670 47 negative_sampler.num_negs_per_pos 28.0 +670 47 training.batch_size 0.0 +670 48 model.embedding_dim 0.0 +670 48 loss.margin 9.951889455600702 +670 48 loss.adversarial_temperature 0.23361211722657477 +670 48 optimizer.lr 0.024816881161441515 +670 48 negative_sampler.num_negs_per_pos 51.0 +670 48 training.batch_size 2.0 +670 49 model.embedding_dim 0.0 +670 49 loss.margin 26.043007154942362 +670 49 loss.adversarial_temperature 0.29702864120470174 +670 49 optimizer.lr 0.004809779262445151 +670 49 negative_sampler.num_negs_per_pos 39.0 +670 49 training.batch_size 1.0 +670 50 model.embedding_dim 0.0 +670 50 loss.margin 4.291737370839352 +670 50 loss.adversarial_temperature 0.2959234354110369 +670 50 optimizer.lr 0.005128441430986088 +670 50 negative_sampler.num_negs_per_pos 14.0 +670 50 training.batch_size 1.0 +670 51 model.embedding_dim 0.0 +670 51 loss.margin 4.02104408879424 +670 51 loss.adversarial_temperature 0.9443364995013449 +670 51 optimizer.lr 0.001565248671293312 +670 51 negative_sampler.num_negs_per_pos 6.0 +670 51 training.batch_size 0.0 +670 52 model.embedding_dim 0.0 +670 52 loss.margin 5.917603793479153 +670 52 loss.adversarial_temperature 0.31661692718449963 +670 52 optimizer.lr 0.006802612694720214 +670 52 negative_sampler.num_negs_per_pos 11.0 +670 52 training.batch_size 1.0 +670 53 model.embedding_dim 2.0 +670 53 loss.margin 3.771757870656069 +670 53 loss.adversarial_temperature 0.8285339176302533 +670 53 optimizer.lr 0.016835141838540436 +670 53 negative_sampler.num_negs_per_pos 17.0 +670 53 training.batch_size 2.0 +670 54 model.embedding_dim 2.0 +670 54 loss.margin 19.562520169003328 +670 54 loss.adversarial_temperature 0.8807142586714952 +670 54 optimizer.lr 0.07332163642545236 +670 54 negative_sampler.num_negs_per_pos 28.0 +670 54 training.batch_size 2.0 +670 55 model.embedding_dim 1.0 +670 55 loss.margin 16.869766362537092 +670 55 loss.adversarial_temperature 0.8641188871326733 +670 55 optimizer.lr 0.0010717711701709177 +670 55 negative_sampler.num_negs_per_pos 48.0 +670 55 training.batch_size 1.0 +670 56 model.embedding_dim 0.0 +670 56 loss.margin 29.997873298962375 +670 56 loss.adversarial_temperature 0.48145068019640247 +670 56 optimizer.lr 0.012092158775915808 +670 56 negative_sampler.num_negs_per_pos 81.0 +670 56 training.batch_size 2.0 +670 57 model.embedding_dim 0.0 +670 57 loss.margin 25.78400517067987 +670 57 loss.adversarial_temperature 0.967359552693394 +670 57 optimizer.lr 0.0025719910577114676 +670 57 negative_sampler.num_negs_per_pos 14.0 +670 57 training.batch_size 2.0 +670 58 model.embedding_dim 0.0 +670 58 loss.margin 6.116456762319804 +670 58 loss.adversarial_temperature 0.3002852778025381 +670 58 optimizer.lr 0.0018799656491887012 +670 58 negative_sampler.num_negs_per_pos 46.0 +670 58 training.batch_size 1.0 +670 59 model.embedding_dim 1.0 +670 59 loss.margin 16.957291994211076 +670 59 loss.adversarial_temperature 0.8891405064306368 +670 59 optimizer.lr 0.0013782139010034044 +670 59 negative_sampler.num_negs_per_pos 8.0 +670 59 training.batch_size 1.0 +670 60 model.embedding_dim 0.0 +670 60 loss.margin 11.065671181080752 +670 60 loss.adversarial_temperature 0.8244933501449654 +670 60 optimizer.lr 0.004155407775937179 +670 60 negative_sampler.num_negs_per_pos 88.0 +670 60 training.batch_size 0.0 +670 61 model.embedding_dim 1.0 +670 61 loss.margin 14.396139758218098 +670 61 loss.adversarial_temperature 0.2762656914963288 +670 61 optimizer.lr 0.08396916791299955 +670 61 negative_sampler.num_negs_per_pos 11.0 +670 61 training.batch_size 1.0 +670 62 model.embedding_dim 2.0 +670 62 loss.margin 26.789788277034866 +670 62 loss.adversarial_temperature 0.8122214347021187 +670 62 optimizer.lr 0.002517768461271731 +670 62 negative_sampler.num_negs_per_pos 15.0 +670 62 training.batch_size 2.0 +670 63 model.embedding_dim 1.0 +670 63 loss.margin 21.323061210489577 +670 63 loss.adversarial_temperature 0.9417782898350832 +670 63 optimizer.lr 0.05457561360917915 +670 63 negative_sampler.num_negs_per_pos 56.0 +670 63 training.batch_size 1.0 +670 64 model.embedding_dim 0.0 +670 64 loss.margin 6.912196673186611 +670 64 loss.adversarial_temperature 0.4646867023080654 +670 64 optimizer.lr 0.07556349777801807 +670 64 negative_sampler.num_negs_per_pos 16.0 +670 64 training.batch_size 0.0 +670 65 model.embedding_dim 1.0 +670 65 loss.margin 14.778219938114537 +670 65 loss.adversarial_temperature 0.6192527239300252 +670 65 optimizer.lr 0.026789257218775744 +670 65 negative_sampler.num_negs_per_pos 90.0 +670 65 training.batch_size 0.0 +670 66 model.embedding_dim 2.0 +670 66 loss.margin 15.340970815382919 +670 66 loss.adversarial_temperature 0.20904559642824833 +670 66 optimizer.lr 0.002732260012983209 +670 66 negative_sampler.num_negs_per_pos 53.0 +670 66 training.batch_size 1.0 +670 67 model.embedding_dim 1.0 +670 67 loss.margin 21.337253863187108 +670 67 loss.adversarial_temperature 0.24820694921017308 +670 67 optimizer.lr 0.0051521324645584085 +670 67 negative_sampler.num_negs_per_pos 49.0 +670 67 training.batch_size 2.0 +670 68 model.embedding_dim 2.0 +670 68 loss.margin 24.654140957706996 +670 68 loss.adversarial_temperature 0.25633711219269456 +670 68 optimizer.lr 0.027932455093570743 +670 68 negative_sampler.num_negs_per_pos 50.0 +670 68 training.batch_size 2.0 +670 69 model.embedding_dim 2.0 +670 69 loss.margin 20.95605854261699 +670 69 loss.adversarial_temperature 0.25266206086955145 +670 69 optimizer.lr 0.05071897259206216 +670 69 negative_sampler.num_negs_per_pos 5.0 +670 69 training.batch_size 0.0 +670 70 model.embedding_dim 0.0 +670 70 loss.margin 22.73264978333779 +670 70 loss.adversarial_temperature 0.24061540909949541 +670 70 optimizer.lr 0.01634532047702127 +670 70 negative_sampler.num_negs_per_pos 7.0 +670 70 training.batch_size 1.0 +670 71 model.embedding_dim 2.0 +670 71 loss.margin 27.701282621768886 +670 71 loss.adversarial_temperature 0.7787997908677285 +670 71 optimizer.lr 0.00386674934448802 +670 71 negative_sampler.num_negs_per_pos 83.0 +670 71 training.batch_size 2.0 +670 72 model.embedding_dim 2.0 +670 72 loss.margin 2.9652588065052212 +670 72 loss.adversarial_temperature 0.6328301554583603 +670 72 optimizer.lr 0.013600936646093364 +670 72 negative_sampler.num_negs_per_pos 0.0 +670 72 training.batch_size 1.0 +670 73 model.embedding_dim 1.0 +670 73 loss.margin 9.533962412381468 +670 73 loss.adversarial_temperature 0.6220606276123645 +670 73 optimizer.lr 0.06310432141446191 +670 73 negative_sampler.num_negs_per_pos 97.0 +670 73 training.batch_size 1.0 +670 74 model.embedding_dim 2.0 +670 74 loss.margin 16.188824444991774 +670 74 loss.adversarial_temperature 0.5577853560117384 +670 74 optimizer.lr 0.08234213355299037 +670 74 negative_sampler.num_negs_per_pos 67.0 +670 74 training.batch_size 0.0 +670 75 model.embedding_dim 2.0 +670 75 loss.margin 11.280608578843694 +670 75 loss.adversarial_temperature 0.13306235153519475 +670 75 optimizer.lr 0.0063235181985252025 +670 75 negative_sampler.num_negs_per_pos 92.0 +670 75 training.batch_size 1.0 +670 76 model.embedding_dim 1.0 +670 76 loss.margin 23.178440404754078 +670 76 loss.adversarial_temperature 0.997828973836898 +670 76 optimizer.lr 0.007919453470760233 +670 76 negative_sampler.num_negs_per_pos 93.0 +670 76 training.batch_size 2.0 +670 77 model.embedding_dim 2.0 +670 77 loss.margin 4.800089183641954 +670 77 loss.adversarial_temperature 0.9524155983880397 +670 77 optimizer.lr 0.045604682537440516 +670 77 negative_sampler.num_negs_per_pos 95.0 +670 77 training.batch_size 1.0 +670 78 model.embedding_dim 0.0 +670 78 loss.margin 21.22766009582711 +670 78 loss.adversarial_temperature 0.467620883954624 +670 78 optimizer.lr 0.0014522129549053523 +670 78 negative_sampler.num_negs_per_pos 32.0 +670 78 training.batch_size 1.0 +670 79 model.embedding_dim 1.0 +670 79 loss.margin 14.949865193193068 +670 79 loss.adversarial_temperature 0.490120280848382 +670 79 optimizer.lr 0.001312881189571664 +670 79 negative_sampler.num_negs_per_pos 44.0 +670 79 training.batch_size 0.0 +670 80 model.embedding_dim 1.0 +670 80 loss.margin 20.22390324405075 +670 80 loss.adversarial_temperature 0.12772331370321338 +670 80 optimizer.lr 0.0046513270371770525 +670 80 negative_sampler.num_negs_per_pos 71.0 +670 80 training.batch_size 0.0 +670 81 model.embedding_dim 1.0 +670 81 loss.margin 6.4758049005925855 +670 81 loss.adversarial_temperature 0.6207261596648119 +670 81 optimizer.lr 0.08698972868498929 +670 81 negative_sampler.num_negs_per_pos 17.0 +670 81 training.batch_size 0.0 +670 82 model.embedding_dim 2.0 +670 82 loss.margin 19.231936362836457 +670 82 loss.adversarial_temperature 0.31433796301199196 +670 82 optimizer.lr 0.008658800715553323 +670 82 negative_sampler.num_negs_per_pos 33.0 +670 82 training.batch_size 2.0 +670 83 model.embedding_dim 0.0 +670 83 loss.margin 14.009191949304906 +670 83 loss.adversarial_temperature 0.34942228541976916 +670 83 optimizer.lr 0.008579527298660976 +670 83 negative_sampler.num_negs_per_pos 78.0 +670 83 training.batch_size 1.0 +670 84 model.embedding_dim 2.0 +670 84 loss.margin 16.832499060970598 +670 84 loss.adversarial_temperature 0.34711869515645366 +670 84 optimizer.lr 0.0022983154871604274 +670 84 negative_sampler.num_negs_per_pos 4.0 +670 84 training.batch_size 2.0 +670 85 model.embedding_dim 2.0 +670 85 loss.margin 1.8644811434581223 +670 85 loss.adversarial_temperature 0.5952385995713029 +670 85 optimizer.lr 0.0012913997760795784 +670 85 negative_sampler.num_negs_per_pos 86.0 +670 85 training.batch_size 0.0 +670 86 model.embedding_dim 0.0 +670 86 loss.margin 12.801681724721773 +670 86 loss.adversarial_temperature 0.27557292944632394 +670 86 optimizer.lr 0.0016082740056393159 +670 86 negative_sampler.num_negs_per_pos 99.0 +670 86 training.batch_size 2.0 +670 87 model.embedding_dim 1.0 +670 87 loss.margin 24.33291082387315 +670 87 loss.adversarial_temperature 0.8242896465812906 +670 87 optimizer.lr 0.005163320735851405 +670 87 negative_sampler.num_negs_per_pos 88.0 +670 87 training.batch_size 0.0 +670 88 model.embedding_dim 1.0 +670 88 loss.margin 3.648247710247949 +670 88 loss.adversarial_temperature 0.5335127733672452 +670 88 optimizer.lr 0.022568430478868178 +670 88 negative_sampler.num_negs_per_pos 31.0 +670 88 training.batch_size 2.0 +670 89 model.embedding_dim 2.0 +670 89 loss.margin 20.052189649101447 +670 89 loss.adversarial_temperature 0.9736137540966728 +670 89 optimizer.lr 0.010652216760871901 +670 89 negative_sampler.num_negs_per_pos 7.0 +670 89 training.batch_size 1.0 +670 90 model.embedding_dim 1.0 +670 90 loss.margin 20.056724735020616 +670 90 loss.adversarial_temperature 0.45745082058418984 +670 90 optimizer.lr 0.003726974819952767 +670 90 negative_sampler.num_negs_per_pos 78.0 +670 90 training.batch_size 1.0 +670 91 model.embedding_dim 1.0 +670 91 loss.margin 25.93916734041942 +670 91 loss.adversarial_temperature 0.3014808327462446 +670 91 optimizer.lr 0.006533129960171883 +670 91 negative_sampler.num_negs_per_pos 49.0 +670 91 training.batch_size 2.0 +670 92 model.embedding_dim 0.0 +670 92 loss.margin 19.617582049096413 +670 92 loss.adversarial_temperature 0.5088528713465154 +670 92 optimizer.lr 0.03101726344557726 +670 92 negative_sampler.num_negs_per_pos 37.0 +670 92 training.batch_size 1.0 +670 93 model.embedding_dim 0.0 +670 93 loss.margin 15.978043681718841 +670 93 loss.adversarial_temperature 0.7254410637238605 +670 93 optimizer.lr 0.003934685365494575 +670 93 negative_sampler.num_negs_per_pos 9.0 +670 93 training.batch_size 1.0 +670 94 model.embedding_dim 0.0 +670 94 loss.margin 24.40249133255406 +670 94 loss.adversarial_temperature 0.9933380417001869 +670 94 optimizer.lr 0.06266497404287866 +670 94 negative_sampler.num_negs_per_pos 54.0 +670 94 training.batch_size 2.0 +670 95 model.embedding_dim 0.0 +670 95 loss.margin 2.390214404273499 +670 95 loss.adversarial_temperature 0.37686146235018636 +670 95 optimizer.lr 0.00154443874984393 +670 95 negative_sampler.num_negs_per_pos 63.0 +670 95 training.batch_size 0.0 +670 96 model.embedding_dim 0.0 +670 96 loss.margin 6.664413796274935 +670 96 loss.adversarial_temperature 0.9301960922219858 +670 96 optimizer.lr 0.00548559266208788 +670 96 negative_sampler.num_negs_per_pos 76.0 +670 96 training.batch_size 0.0 +670 97 model.embedding_dim 0.0 +670 97 loss.margin 9.464088306629312 +670 97 loss.adversarial_temperature 0.15595869278348298 +670 97 optimizer.lr 0.0020032420200315018 +670 97 negative_sampler.num_negs_per_pos 48.0 +670 97 training.batch_size 1.0 +670 98 model.embedding_dim 2.0 +670 98 loss.margin 16.414141042729213 +670 98 loss.adversarial_temperature 0.4707758559462092 +670 98 optimizer.lr 0.026136399688643744 +670 98 negative_sampler.num_negs_per_pos 96.0 +670 98 training.batch_size 0.0 +670 99 model.embedding_dim 0.0 +670 99 loss.margin 26.554283658934835 +670 99 loss.adversarial_temperature 0.967696341430935 +670 99 optimizer.lr 0.0022093617900687057 +670 99 negative_sampler.num_negs_per_pos 32.0 +670 99 training.batch_size 2.0 +670 100 model.embedding_dim 0.0 +670 100 loss.margin 23.481190685971377 +670 100 loss.adversarial_temperature 0.8581388746883625 +670 100 optimizer.lr 0.0854398357086925 +670 100 negative_sampler.num_negs_per_pos 52.0 +670 100 training.batch_size 2.0 +670 1 dataset """kinships""" +670 1 model """simple""" +670 1 loss """nssa""" +670 1 regularizer """no""" +670 1 optimizer """adam""" +670 1 training_loop """owa""" +670 1 negative_sampler """basic""" +670 1 evaluator """rankbased""" +670 2 dataset """kinships""" +670 2 model """simple""" +670 2 loss """nssa""" +670 2 regularizer """no""" +670 2 optimizer """adam""" +670 2 training_loop """owa""" +670 2 negative_sampler """basic""" +670 2 evaluator """rankbased""" +670 3 dataset """kinships""" +670 3 model """simple""" +670 3 loss """nssa""" +670 3 regularizer """no""" +670 3 optimizer """adam""" +670 3 training_loop """owa""" +670 3 negative_sampler """basic""" +670 3 evaluator """rankbased""" +670 4 dataset """kinships""" +670 4 model """simple""" +670 4 loss """nssa""" +670 4 regularizer """no""" +670 4 optimizer """adam""" +670 4 training_loop """owa""" +670 4 negative_sampler """basic""" +670 4 evaluator """rankbased""" +670 5 dataset """kinships""" +670 5 model """simple""" +670 5 loss """nssa""" +670 5 regularizer """no""" +670 5 optimizer """adam""" +670 5 training_loop """owa""" +670 5 negative_sampler """basic""" +670 5 evaluator """rankbased""" +670 6 dataset """kinships""" +670 6 model """simple""" +670 6 loss """nssa""" +670 6 regularizer """no""" +670 6 optimizer """adam""" +670 6 training_loop """owa""" +670 6 negative_sampler """basic""" +670 6 evaluator """rankbased""" +670 7 dataset """kinships""" +670 7 model """simple""" +670 7 loss """nssa""" +670 7 regularizer """no""" +670 7 optimizer """adam""" +670 7 training_loop """owa""" +670 7 negative_sampler """basic""" +670 7 evaluator """rankbased""" +670 8 dataset """kinships""" +670 8 model """simple""" +670 8 loss """nssa""" +670 8 regularizer """no""" +670 8 optimizer """adam""" +670 8 training_loop """owa""" +670 8 negative_sampler """basic""" +670 8 evaluator """rankbased""" +670 9 dataset """kinships""" +670 9 model """simple""" +670 9 loss """nssa""" +670 9 regularizer """no""" +670 9 optimizer """adam""" +670 9 training_loop """owa""" +670 9 negative_sampler """basic""" +670 9 evaluator """rankbased""" +670 10 dataset """kinships""" +670 10 model """simple""" +670 10 loss """nssa""" +670 10 regularizer """no""" +670 10 optimizer """adam""" +670 10 training_loop """owa""" +670 10 negative_sampler """basic""" +670 10 evaluator """rankbased""" +670 11 dataset """kinships""" +670 11 model """simple""" +670 11 loss """nssa""" +670 11 regularizer """no""" +670 11 optimizer """adam""" +670 11 training_loop """owa""" +670 11 negative_sampler """basic""" +670 11 evaluator """rankbased""" +670 12 dataset """kinships""" +670 12 model """simple""" +670 12 loss """nssa""" +670 12 regularizer """no""" +670 12 optimizer """adam""" +670 12 training_loop """owa""" +670 12 negative_sampler """basic""" +670 12 evaluator """rankbased""" +670 13 dataset """kinships""" +670 13 model """simple""" +670 13 loss """nssa""" +670 13 regularizer """no""" +670 13 optimizer """adam""" +670 13 training_loop """owa""" +670 13 negative_sampler """basic""" +670 13 evaluator """rankbased""" +670 14 dataset """kinships""" +670 14 model """simple""" +670 14 loss """nssa""" +670 14 regularizer """no""" +670 14 optimizer """adam""" +670 14 training_loop """owa""" +670 14 negative_sampler """basic""" +670 14 evaluator """rankbased""" +670 15 dataset """kinships""" +670 15 model """simple""" +670 15 loss """nssa""" +670 15 regularizer """no""" +670 15 optimizer """adam""" +670 15 training_loop """owa""" +670 15 negative_sampler """basic""" +670 15 evaluator """rankbased""" +670 16 dataset """kinships""" +670 16 model """simple""" +670 16 loss """nssa""" +670 16 regularizer """no""" +670 16 optimizer """adam""" +670 16 training_loop """owa""" +670 16 negative_sampler """basic""" +670 16 evaluator """rankbased""" +670 17 dataset """kinships""" +670 17 model """simple""" +670 17 loss """nssa""" +670 17 regularizer """no""" +670 17 optimizer """adam""" +670 17 training_loop """owa""" +670 17 negative_sampler """basic""" +670 17 evaluator """rankbased""" +670 18 dataset """kinships""" +670 18 model """simple""" +670 18 loss """nssa""" +670 18 regularizer """no""" +670 18 optimizer """adam""" +670 18 training_loop """owa""" +670 18 negative_sampler """basic""" +670 18 evaluator """rankbased""" +670 19 dataset """kinships""" +670 19 model """simple""" +670 19 loss """nssa""" +670 19 regularizer """no""" +670 19 optimizer """adam""" +670 19 training_loop """owa""" +670 19 negative_sampler """basic""" +670 19 evaluator """rankbased""" +670 20 dataset """kinships""" +670 20 model """simple""" +670 20 loss """nssa""" +670 20 regularizer """no""" +670 20 optimizer """adam""" +670 20 training_loop """owa""" +670 20 negative_sampler """basic""" +670 20 evaluator """rankbased""" +670 21 dataset """kinships""" +670 21 model """simple""" +670 21 loss """nssa""" +670 21 regularizer """no""" +670 21 optimizer """adam""" +670 21 training_loop """owa""" +670 21 negative_sampler """basic""" +670 21 evaluator """rankbased""" +670 22 dataset """kinships""" +670 22 model """simple""" +670 22 loss """nssa""" +670 22 regularizer """no""" +670 22 optimizer """adam""" +670 22 training_loop """owa""" +670 22 negative_sampler """basic""" +670 22 evaluator """rankbased""" +670 23 dataset """kinships""" +670 23 model """simple""" +670 23 loss """nssa""" +670 23 regularizer """no""" +670 23 optimizer """adam""" +670 23 training_loop """owa""" +670 23 negative_sampler """basic""" +670 23 evaluator """rankbased""" +670 24 dataset """kinships""" +670 24 model """simple""" +670 24 loss """nssa""" +670 24 regularizer """no""" +670 24 optimizer """adam""" +670 24 training_loop """owa""" +670 24 negative_sampler """basic""" +670 24 evaluator """rankbased""" +670 25 dataset """kinships""" +670 25 model """simple""" +670 25 loss """nssa""" +670 25 regularizer """no""" +670 25 optimizer """adam""" +670 25 training_loop """owa""" +670 25 negative_sampler """basic""" +670 25 evaluator """rankbased""" +670 26 dataset """kinships""" +670 26 model """simple""" +670 26 loss """nssa""" +670 26 regularizer """no""" +670 26 optimizer """adam""" +670 26 training_loop """owa""" +670 26 negative_sampler """basic""" +670 26 evaluator """rankbased""" +670 27 dataset """kinships""" +670 27 model """simple""" +670 27 loss """nssa""" +670 27 regularizer """no""" +670 27 optimizer """adam""" +670 27 training_loop """owa""" +670 27 negative_sampler """basic""" +670 27 evaluator """rankbased""" +670 28 dataset """kinships""" +670 28 model """simple""" +670 28 loss """nssa""" +670 28 regularizer """no""" +670 28 optimizer """adam""" +670 28 training_loop """owa""" +670 28 negative_sampler """basic""" +670 28 evaluator """rankbased""" +670 29 dataset """kinships""" +670 29 model """simple""" +670 29 loss """nssa""" +670 29 regularizer """no""" +670 29 optimizer """adam""" +670 29 training_loop """owa""" +670 29 negative_sampler """basic""" +670 29 evaluator """rankbased""" +670 30 dataset """kinships""" +670 30 model """simple""" +670 30 loss """nssa""" +670 30 regularizer """no""" +670 30 optimizer """adam""" +670 30 training_loop """owa""" +670 30 negative_sampler """basic""" +670 30 evaluator """rankbased""" +670 31 dataset """kinships""" +670 31 model """simple""" +670 31 loss """nssa""" +670 31 regularizer """no""" +670 31 optimizer """adam""" +670 31 training_loop """owa""" +670 31 negative_sampler """basic""" +670 31 evaluator """rankbased""" +670 32 dataset """kinships""" +670 32 model """simple""" +670 32 loss """nssa""" +670 32 regularizer """no""" +670 32 optimizer """adam""" +670 32 training_loop """owa""" +670 32 negative_sampler """basic""" +670 32 evaluator """rankbased""" +670 33 dataset """kinships""" +670 33 model """simple""" +670 33 loss """nssa""" +670 33 regularizer """no""" +670 33 optimizer """adam""" +670 33 training_loop """owa""" +670 33 negative_sampler """basic""" +670 33 evaluator """rankbased""" +670 34 dataset """kinships""" +670 34 model """simple""" +670 34 loss """nssa""" +670 34 regularizer """no""" +670 34 optimizer """adam""" +670 34 training_loop """owa""" +670 34 negative_sampler """basic""" +670 34 evaluator """rankbased""" +670 35 dataset """kinships""" +670 35 model """simple""" +670 35 loss """nssa""" +670 35 regularizer """no""" +670 35 optimizer """adam""" +670 35 training_loop """owa""" +670 35 negative_sampler """basic""" +670 35 evaluator """rankbased""" +670 36 dataset """kinships""" +670 36 model """simple""" +670 36 loss """nssa""" +670 36 regularizer """no""" +670 36 optimizer """adam""" +670 36 training_loop """owa""" +670 36 negative_sampler """basic""" +670 36 evaluator """rankbased""" +670 37 dataset """kinships""" +670 37 model """simple""" +670 37 loss """nssa""" +670 37 regularizer """no""" +670 37 optimizer """adam""" +670 37 training_loop """owa""" +670 37 negative_sampler """basic""" +670 37 evaluator """rankbased""" +670 38 dataset """kinships""" +670 38 model """simple""" +670 38 loss """nssa""" +670 38 regularizer """no""" +670 38 optimizer """adam""" +670 38 training_loop """owa""" +670 38 negative_sampler """basic""" +670 38 evaluator """rankbased""" +670 39 dataset """kinships""" +670 39 model """simple""" +670 39 loss """nssa""" +670 39 regularizer """no""" +670 39 optimizer """adam""" +670 39 training_loop """owa""" +670 39 negative_sampler """basic""" +670 39 evaluator """rankbased""" +670 40 dataset """kinships""" +670 40 model """simple""" +670 40 loss """nssa""" +670 40 regularizer """no""" +670 40 optimizer """adam""" +670 40 training_loop """owa""" +670 40 negative_sampler """basic""" +670 40 evaluator """rankbased""" +670 41 dataset """kinships""" +670 41 model """simple""" +670 41 loss """nssa""" +670 41 regularizer """no""" +670 41 optimizer """adam""" +670 41 training_loop """owa""" +670 41 negative_sampler """basic""" +670 41 evaluator """rankbased""" +670 42 dataset """kinships""" +670 42 model """simple""" +670 42 loss """nssa""" +670 42 regularizer """no""" +670 42 optimizer """adam""" +670 42 training_loop """owa""" +670 42 negative_sampler """basic""" +670 42 evaluator """rankbased""" +670 43 dataset """kinships""" +670 43 model """simple""" +670 43 loss """nssa""" +670 43 regularizer """no""" +670 43 optimizer """adam""" +670 43 training_loop """owa""" +670 43 negative_sampler """basic""" +670 43 evaluator """rankbased""" +670 44 dataset """kinships""" +670 44 model """simple""" +670 44 loss """nssa""" +670 44 regularizer """no""" +670 44 optimizer """adam""" +670 44 training_loop """owa""" +670 44 negative_sampler """basic""" +670 44 evaluator """rankbased""" +670 45 dataset """kinships""" +670 45 model """simple""" +670 45 loss """nssa""" +670 45 regularizer """no""" +670 45 optimizer """adam""" +670 45 training_loop """owa""" +670 45 negative_sampler """basic""" +670 45 evaluator """rankbased""" +670 46 dataset """kinships""" +670 46 model """simple""" +670 46 loss """nssa""" +670 46 regularizer """no""" +670 46 optimizer """adam""" +670 46 training_loop """owa""" +670 46 negative_sampler """basic""" +670 46 evaluator """rankbased""" +670 47 dataset """kinships""" +670 47 model """simple""" +670 47 loss """nssa""" +670 47 regularizer """no""" +670 47 optimizer """adam""" +670 47 training_loop """owa""" +670 47 negative_sampler """basic""" +670 47 evaluator """rankbased""" +670 48 dataset """kinships""" +670 48 model """simple""" +670 48 loss """nssa""" +670 48 regularizer """no""" +670 48 optimizer """adam""" +670 48 training_loop """owa""" +670 48 negative_sampler """basic""" +670 48 evaluator """rankbased""" +670 49 dataset """kinships""" +670 49 model """simple""" +670 49 loss """nssa""" +670 49 regularizer """no""" +670 49 optimizer """adam""" +670 49 training_loop """owa""" +670 49 negative_sampler """basic""" +670 49 evaluator """rankbased""" +670 50 dataset """kinships""" +670 50 model """simple""" +670 50 loss """nssa""" +670 50 regularizer """no""" +670 50 optimizer """adam""" +670 50 training_loop """owa""" +670 50 negative_sampler """basic""" +670 50 evaluator """rankbased""" +670 51 dataset """kinships""" +670 51 model """simple""" +670 51 loss """nssa""" +670 51 regularizer """no""" +670 51 optimizer """adam""" +670 51 training_loop """owa""" +670 51 negative_sampler """basic""" +670 51 evaluator """rankbased""" +670 52 dataset """kinships""" +670 52 model """simple""" +670 52 loss """nssa""" +670 52 regularizer """no""" +670 52 optimizer """adam""" +670 52 training_loop """owa""" +670 52 negative_sampler """basic""" +670 52 evaluator """rankbased""" +670 53 dataset """kinships""" +670 53 model """simple""" +670 53 loss """nssa""" +670 53 regularizer """no""" +670 53 optimizer """adam""" +670 53 training_loop """owa""" +670 53 negative_sampler """basic""" +670 53 evaluator """rankbased""" +670 54 dataset """kinships""" +670 54 model """simple""" +670 54 loss """nssa""" +670 54 regularizer """no""" +670 54 optimizer """adam""" +670 54 training_loop """owa""" +670 54 negative_sampler """basic""" +670 54 evaluator """rankbased""" +670 55 dataset """kinships""" +670 55 model """simple""" +670 55 loss """nssa""" +670 55 regularizer """no""" +670 55 optimizer """adam""" +670 55 training_loop """owa""" +670 55 negative_sampler """basic""" +670 55 evaluator """rankbased""" +670 56 dataset """kinships""" +670 56 model """simple""" +670 56 loss """nssa""" +670 56 regularizer """no""" +670 56 optimizer """adam""" +670 56 training_loop """owa""" +670 56 negative_sampler """basic""" +670 56 evaluator """rankbased""" +670 57 dataset """kinships""" +670 57 model """simple""" +670 57 loss """nssa""" +670 57 regularizer """no""" +670 57 optimizer """adam""" +670 57 training_loop """owa""" +670 57 negative_sampler """basic""" +670 57 evaluator """rankbased""" +670 58 dataset """kinships""" +670 58 model """simple""" +670 58 loss """nssa""" +670 58 regularizer """no""" +670 58 optimizer """adam""" +670 58 training_loop """owa""" +670 58 negative_sampler """basic""" +670 58 evaluator """rankbased""" +670 59 dataset """kinships""" +670 59 model """simple""" +670 59 loss """nssa""" +670 59 regularizer """no""" +670 59 optimizer """adam""" +670 59 training_loop """owa""" +670 59 negative_sampler """basic""" +670 59 evaluator """rankbased""" +670 60 dataset """kinships""" +670 60 model """simple""" +670 60 loss """nssa""" +670 60 regularizer """no""" +670 60 optimizer """adam""" +670 60 training_loop """owa""" +670 60 negative_sampler """basic""" +670 60 evaluator """rankbased""" +670 61 dataset """kinships""" +670 61 model """simple""" +670 61 loss """nssa""" +670 61 regularizer """no""" +670 61 optimizer """adam""" +670 61 training_loop """owa""" +670 61 negative_sampler """basic""" +670 61 evaluator """rankbased""" +670 62 dataset """kinships""" +670 62 model """simple""" +670 62 loss """nssa""" +670 62 regularizer """no""" +670 62 optimizer """adam""" +670 62 training_loop """owa""" +670 62 negative_sampler """basic""" +670 62 evaluator """rankbased""" +670 63 dataset """kinships""" +670 63 model """simple""" +670 63 loss """nssa""" +670 63 regularizer """no""" +670 63 optimizer """adam""" +670 63 training_loop """owa""" +670 63 negative_sampler """basic""" +670 63 evaluator """rankbased""" +670 64 dataset """kinships""" +670 64 model """simple""" +670 64 loss """nssa""" +670 64 regularizer """no""" +670 64 optimizer """adam""" +670 64 training_loop """owa""" +670 64 negative_sampler """basic""" +670 64 evaluator """rankbased""" +670 65 dataset """kinships""" +670 65 model """simple""" +670 65 loss """nssa""" +670 65 regularizer """no""" +670 65 optimizer """adam""" +670 65 training_loop """owa""" +670 65 negative_sampler """basic""" +670 65 evaluator """rankbased""" +670 66 dataset """kinships""" +670 66 model """simple""" +670 66 loss """nssa""" +670 66 regularizer """no""" +670 66 optimizer """adam""" +670 66 training_loop """owa""" +670 66 negative_sampler """basic""" +670 66 evaluator """rankbased""" +670 67 dataset """kinships""" +670 67 model """simple""" +670 67 loss """nssa""" +670 67 regularizer """no""" +670 67 optimizer """adam""" +670 67 training_loop """owa""" +670 67 negative_sampler """basic""" +670 67 evaluator """rankbased""" +670 68 dataset """kinships""" +670 68 model """simple""" +670 68 loss """nssa""" +670 68 regularizer """no""" +670 68 optimizer """adam""" +670 68 training_loop """owa""" +670 68 negative_sampler """basic""" +670 68 evaluator """rankbased""" +670 69 dataset """kinships""" +670 69 model """simple""" +670 69 loss """nssa""" +670 69 regularizer """no""" +670 69 optimizer """adam""" +670 69 training_loop """owa""" +670 69 negative_sampler """basic""" +670 69 evaluator """rankbased""" +670 70 dataset """kinships""" +670 70 model """simple""" +670 70 loss """nssa""" +670 70 regularizer """no""" +670 70 optimizer """adam""" +670 70 training_loop """owa""" +670 70 negative_sampler """basic""" +670 70 evaluator """rankbased""" +670 71 dataset """kinships""" +670 71 model """simple""" +670 71 loss """nssa""" +670 71 regularizer """no""" +670 71 optimizer """adam""" +670 71 training_loop """owa""" +670 71 negative_sampler """basic""" +670 71 evaluator """rankbased""" +670 72 dataset """kinships""" +670 72 model """simple""" +670 72 loss """nssa""" +670 72 regularizer """no""" +670 72 optimizer """adam""" +670 72 training_loop """owa""" +670 72 negative_sampler """basic""" +670 72 evaluator """rankbased""" +670 73 dataset """kinships""" +670 73 model """simple""" +670 73 loss """nssa""" +670 73 regularizer """no""" +670 73 optimizer """adam""" +670 73 training_loop """owa""" +670 73 negative_sampler """basic""" +670 73 evaluator """rankbased""" +670 74 dataset """kinships""" +670 74 model """simple""" +670 74 loss """nssa""" +670 74 regularizer """no""" +670 74 optimizer """adam""" +670 74 training_loop """owa""" +670 74 negative_sampler """basic""" +670 74 evaluator """rankbased""" +670 75 dataset """kinships""" +670 75 model """simple""" +670 75 loss """nssa""" +670 75 regularizer """no""" +670 75 optimizer """adam""" +670 75 training_loop """owa""" +670 75 negative_sampler """basic""" +670 75 evaluator """rankbased""" +670 76 dataset """kinships""" +670 76 model """simple""" +670 76 loss """nssa""" +670 76 regularizer """no""" +670 76 optimizer """adam""" +670 76 training_loop """owa""" +670 76 negative_sampler """basic""" +670 76 evaluator """rankbased""" +670 77 dataset """kinships""" +670 77 model """simple""" +670 77 loss """nssa""" +670 77 regularizer """no""" +670 77 optimizer """adam""" +670 77 training_loop """owa""" +670 77 negative_sampler """basic""" +670 77 evaluator """rankbased""" +670 78 dataset """kinships""" +670 78 model """simple""" +670 78 loss """nssa""" +670 78 regularizer """no""" +670 78 optimizer """adam""" +670 78 training_loop """owa""" +670 78 negative_sampler """basic""" +670 78 evaluator """rankbased""" +670 79 dataset """kinships""" +670 79 model """simple""" +670 79 loss """nssa""" +670 79 regularizer """no""" +670 79 optimizer """adam""" +670 79 training_loop """owa""" +670 79 negative_sampler """basic""" +670 79 evaluator """rankbased""" +670 80 dataset """kinships""" +670 80 model """simple""" +670 80 loss """nssa""" +670 80 regularizer """no""" +670 80 optimizer """adam""" +670 80 training_loop """owa""" +670 80 negative_sampler """basic""" +670 80 evaluator """rankbased""" +670 81 dataset """kinships""" +670 81 model """simple""" +670 81 loss """nssa""" +670 81 regularizer """no""" +670 81 optimizer """adam""" +670 81 training_loop """owa""" +670 81 negative_sampler """basic""" +670 81 evaluator """rankbased""" +670 82 dataset """kinships""" +670 82 model """simple""" +670 82 loss """nssa""" +670 82 regularizer """no""" +670 82 optimizer """adam""" +670 82 training_loop """owa""" +670 82 negative_sampler """basic""" +670 82 evaluator """rankbased""" +670 83 dataset """kinships""" +670 83 model """simple""" +670 83 loss """nssa""" +670 83 regularizer """no""" +670 83 optimizer """adam""" +670 83 training_loop """owa""" +670 83 negative_sampler """basic""" +670 83 evaluator """rankbased""" +670 84 dataset """kinships""" +670 84 model """simple""" +670 84 loss """nssa""" +670 84 regularizer """no""" +670 84 optimizer """adam""" +670 84 training_loop """owa""" +670 84 negative_sampler """basic""" +670 84 evaluator """rankbased""" +670 85 dataset """kinships""" +670 85 model """simple""" +670 85 loss """nssa""" +670 85 regularizer """no""" +670 85 optimizer """adam""" +670 85 training_loop """owa""" +670 85 negative_sampler """basic""" +670 85 evaluator """rankbased""" +670 86 dataset """kinships""" +670 86 model """simple""" +670 86 loss """nssa""" +670 86 regularizer """no""" +670 86 optimizer """adam""" +670 86 training_loop """owa""" +670 86 negative_sampler """basic""" +670 86 evaluator """rankbased""" +670 87 dataset """kinships""" +670 87 model """simple""" +670 87 loss """nssa""" +670 87 regularizer """no""" +670 87 optimizer """adam""" +670 87 training_loop """owa""" +670 87 negative_sampler """basic""" +670 87 evaluator """rankbased""" +670 88 dataset """kinships""" +670 88 model """simple""" +670 88 loss """nssa""" +670 88 regularizer """no""" +670 88 optimizer """adam""" +670 88 training_loop """owa""" +670 88 negative_sampler """basic""" +670 88 evaluator """rankbased""" +670 89 dataset """kinships""" +670 89 model """simple""" +670 89 loss """nssa""" +670 89 regularizer """no""" +670 89 optimizer """adam""" +670 89 training_loop """owa""" +670 89 negative_sampler """basic""" +670 89 evaluator """rankbased""" +670 90 dataset """kinships""" +670 90 model """simple""" +670 90 loss """nssa""" +670 90 regularizer """no""" +670 90 optimizer """adam""" +670 90 training_loop """owa""" +670 90 negative_sampler """basic""" +670 90 evaluator """rankbased""" +670 91 dataset """kinships""" +670 91 model """simple""" +670 91 loss """nssa""" +670 91 regularizer """no""" +670 91 optimizer """adam""" +670 91 training_loop """owa""" +670 91 negative_sampler """basic""" +670 91 evaluator """rankbased""" +670 92 dataset """kinships""" +670 92 model """simple""" +670 92 loss """nssa""" +670 92 regularizer """no""" +670 92 optimizer """adam""" +670 92 training_loop """owa""" +670 92 negative_sampler """basic""" +670 92 evaluator """rankbased""" +670 93 dataset """kinships""" +670 93 model """simple""" +670 93 loss """nssa""" +670 93 regularizer """no""" +670 93 optimizer """adam""" +670 93 training_loop """owa""" +670 93 negative_sampler """basic""" +670 93 evaluator """rankbased""" +670 94 dataset """kinships""" +670 94 model """simple""" +670 94 loss """nssa""" +670 94 regularizer """no""" +670 94 optimizer """adam""" +670 94 training_loop """owa""" +670 94 negative_sampler """basic""" +670 94 evaluator """rankbased""" +670 95 dataset """kinships""" +670 95 model """simple""" +670 95 loss """nssa""" +670 95 regularizer """no""" +670 95 optimizer """adam""" +670 95 training_loop """owa""" +670 95 negative_sampler """basic""" +670 95 evaluator """rankbased""" +670 96 dataset """kinships""" +670 96 model """simple""" +670 96 loss """nssa""" +670 96 regularizer """no""" +670 96 optimizer """adam""" +670 96 training_loop """owa""" +670 96 negative_sampler """basic""" +670 96 evaluator """rankbased""" +670 97 dataset """kinships""" +670 97 model """simple""" +670 97 loss """nssa""" +670 97 regularizer """no""" +670 97 optimizer """adam""" +670 97 training_loop """owa""" +670 97 negative_sampler """basic""" +670 97 evaluator """rankbased""" +670 98 dataset """kinships""" +670 98 model """simple""" +670 98 loss """nssa""" +670 98 regularizer """no""" +670 98 optimizer """adam""" +670 98 training_loop """owa""" +670 98 negative_sampler """basic""" +670 98 evaluator """rankbased""" +670 99 dataset """kinships""" +670 99 model """simple""" +670 99 loss """nssa""" +670 99 regularizer """no""" +670 99 optimizer """adam""" +670 99 training_loop """owa""" +670 99 negative_sampler """basic""" +670 99 evaluator """rankbased""" +670 100 dataset """kinships""" +670 100 model """simple""" +670 100 loss """nssa""" +670 100 regularizer """no""" +670 100 optimizer """adam""" +670 100 training_loop """owa""" +670 100 negative_sampler """basic""" +670 100 evaluator """rankbased""" +671 1 model.embedding_dim 1.0 +671 1 optimizer.lr 0.01278034471360488 +671 1 negative_sampler.num_negs_per_pos 17.0 +671 1 training.batch_size 0.0 +671 2 model.embedding_dim 2.0 +671 2 optimizer.lr 0.030586273839841423 +671 2 negative_sampler.num_negs_per_pos 65.0 +671 2 training.batch_size 2.0 +671 3 model.embedding_dim 2.0 +671 3 optimizer.lr 0.031575552668811256 +671 3 negative_sampler.num_negs_per_pos 75.0 +671 3 training.batch_size 0.0 +671 4 model.embedding_dim 0.0 +671 4 optimizer.lr 0.035269371518892825 +671 4 negative_sampler.num_negs_per_pos 77.0 +671 4 training.batch_size 0.0 +671 5 model.embedding_dim 1.0 +671 5 optimizer.lr 0.0013115912405549444 +671 5 negative_sampler.num_negs_per_pos 46.0 +671 5 training.batch_size 0.0 +671 6 model.embedding_dim 2.0 +671 6 optimizer.lr 0.03148436482244268 +671 6 negative_sampler.num_negs_per_pos 24.0 +671 6 training.batch_size 0.0 +671 7 model.embedding_dim 0.0 +671 7 optimizer.lr 0.07435090434488642 +671 7 negative_sampler.num_negs_per_pos 30.0 +671 7 training.batch_size 0.0 +671 8 model.embedding_dim 2.0 +671 8 optimizer.lr 0.0011584689552095824 +671 8 negative_sampler.num_negs_per_pos 44.0 +671 8 training.batch_size 2.0 +671 9 model.embedding_dim 1.0 +671 9 optimizer.lr 0.008018979043944266 +671 9 negative_sampler.num_negs_per_pos 29.0 +671 9 training.batch_size 1.0 +671 10 model.embedding_dim 0.0 +671 10 optimizer.lr 0.00343218522298132 +671 10 negative_sampler.num_negs_per_pos 28.0 +671 10 training.batch_size 0.0 +671 11 model.embedding_dim 1.0 +671 11 optimizer.lr 0.0028883819476366638 +671 11 negative_sampler.num_negs_per_pos 95.0 +671 11 training.batch_size 0.0 +671 12 model.embedding_dim 0.0 +671 12 optimizer.lr 0.027372791265315242 +671 12 negative_sampler.num_negs_per_pos 77.0 +671 12 training.batch_size 0.0 +671 13 model.embedding_dim 0.0 +671 13 optimizer.lr 0.07865432343257679 +671 13 negative_sampler.num_negs_per_pos 57.0 +671 13 training.batch_size 2.0 +671 14 model.embedding_dim 2.0 +671 14 optimizer.lr 0.06394639078555013 +671 14 negative_sampler.num_negs_per_pos 51.0 +671 14 training.batch_size 1.0 +671 15 model.embedding_dim 0.0 +671 15 optimizer.lr 0.0030530176565890608 +671 15 negative_sampler.num_negs_per_pos 87.0 +671 15 training.batch_size 0.0 +671 16 model.embedding_dim 0.0 +671 16 optimizer.lr 0.02813150719780943 +671 16 negative_sampler.num_negs_per_pos 25.0 +671 16 training.batch_size 1.0 +671 17 model.embedding_dim 2.0 +671 17 optimizer.lr 0.0036346405972702174 +671 17 negative_sampler.num_negs_per_pos 34.0 +671 17 training.batch_size 0.0 +671 18 model.embedding_dim 0.0 +671 18 optimizer.lr 0.005354125168757805 +671 18 negative_sampler.num_negs_per_pos 24.0 +671 18 training.batch_size 0.0 +671 19 model.embedding_dim 2.0 +671 19 optimizer.lr 0.0010726720690793154 +671 19 negative_sampler.num_negs_per_pos 38.0 +671 19 training.batch_size 0.0 +671 20 model.embedding_dim 2.0 +671 20 optimizer.lr 0.08594112825033826 +671 20 negative_sampler.num_negs_per_pos 47.0 +671 20 training.batch_size 1.0 +671 21 model.embedding_dim 0.0 +671 21 optimizer.lr 0.004171272884434536 +671 21 negative_sampler.num_negs_per_pos 8.0 +671 21 training.batch_size 0.0 +671 22 model.embedding_dim 1.0 +671 22 optimizer.lr 0.003182400323396902 +671 22 negative_sampler.num_negs_per_pos 9.0 +671 22 training.batch_size 1.0 +671 23 model.embedding_dim 1.0 +671 23 optimizer.lr 0.07463486953883186 +671 23 negative_sampler.num_negs_per_pos 53.0 +671 23 training.batch_size 1.0 +671 24 model.embedding_dim 0.0 +671 24 optimizer.lr 0.0024755608884075016 +671 24 negative_sampler.num_negs_per_pos 57.0 +671 24 training.batch_size 2.0 +671 25 model.embedding_dim 2.0 +671 25 optimizer.lr 0.02550635180177852 +671 25 negative_sampler.num_negs_per_pos 45.0 +671 25 training.batch_size 1.0 +671 26 model.embedding_dim 0.0 +671 26 optimizer.lr 0.0022268261549978456 +671 26 negative_sampler.num_negs_per_pos 98.0 +671 26 training.batch_size 2.0 +671 27 model.embedding_dim 1.0 +671 27 optimizer.lr 0.07748480702428244 +671 27 negative_sampler.num_negs_per_pos 54.0 +671 27 training.batch_size 0.0 +671 28 model.embedding_dim 1.0 +671 28 optimizer.lr 0.022067942696371887 +671 28 negative_sampler.num_negs_per_pos 94.0 +671 28 training.batch_size 2.0 +671 29 model.embedding_dim 0.0 +671 29 optimizer.lr 0.013010763678699964 +671 29 negative_sampler.num_negs_per_pos 98.0 +671 29 training.batch_size 0.0 +671 30 model.embedding_dim 2.0 +671 30 optimizer.lr 0.005683475511095081 +671 30 negative_sampler.num_negs_per_pos 90.0 +671 30 training.batch_size 2.0 +671 31 model.embedding_dim 2.0 +671 31 optimizer.lr 0.023828256281223432 +671 31 negative_sampler.num_negs_per_pos 97.0 +671 31 training.batch_size 2.0 +671 32 model.embedding_dim 2.0 +671 32 optimizer.lr 0.03947169590072191 +671 32 negative_sampler.num_negs_per_pos 46.0 +671 32 training.batch_size 1.0 +671 33 model.embedding_dim 1.0 +671 33 optimizer.lr 0.002096389482866089 +671 33 negative_sampler.num_negs_per_pos 95.0 +671 33 training.batch_size 1.0 +671 34 model.embedding_dim 0.0 +671 34 optimizer.lr 0.0013132502027393036 +671 34 negative_sampler.num_negs_per_pos 35.0 +671 34 training.batch_size 0.0 +671 35 model.embedding_dim 0.0 +671 35 optimizer.lr 0.022351649720038915 +671 35 negative_sampler.num_negs_per_pos 80.0 +671 35 training.batch_size 0.0 +671 36 model.embedding_dim 2.0 +671 36 optimizer.lr 0.003496184768131203 +671 36 negative_sampler.num_negs_per_pos 1.0 +671 36 training.batch_size 0.0 +671 37 model.embedding_dim 2.0 +671 37 optimizer.lr 0.001097528042047023 +671 37 negative_sampler.num_negs_per_pos 77.0 +671 37 training.batch_size 1.0 +671 38 model.embedding_dim 2.0 +671 38 optimizer.lr 0.00602951769643683 +671 38 negative_sampler.num_negs_per_pos 39.0 +671 38 training.batch_size 0.0 +671 39 model.embedding_dim 0.0 +671 39 optimizer.lr 0.007681067384290283 +671 39 negative_sampler.num_negs_per_pos 53.0 +671 39 training.batch_size 0.0 +671 40 model.embedding_dim 1.0 +671 40 optimizer.lr 0.0031009609488141287 +671 40 negative_sampler.num_negs_per_pos 23.0 +671 40 training.batch_size 0.0 +671 41 model.embedding_dim 0.0 +671 41 optimizer.lr 0.016802504910412033 +671 41 negative_sampler.num_negs_per_pos 32.0 +671 41 training.batch_size 0.0 +671 42 model.embedding_dim 2.0 +671 42 optimizer.lr 0.004494730801544192 +671 42 negative_sampler.num_negs_per_pos 25.0 +671 42 training.batch_size 2.0 +671 43 model.embedding_dim 1.0 +671 43 optimizer.lr 0.007968156252547773 +671 43 negative_sampler.num_negs_per_pos 96.0 +671 43 training.batch_size 1.0 +671 44 model.embedding_dim 1.0 +671 44 optimizer.lr 0.037100660809133316 +671 44 negative_sampler.num_negs_per_pos 13.0 +671 44 training.batch_size 0.0 +671 45 model.embedding_dim 0.0 +671 45 optimizer.lr 0.008203625343522019 +671 45 negative_sampler.num_negs_per_pos 25.0 +671 45 training.batch_size 2.0 +671 46 model.embedding_dim 1.0 +671 46 optimizer.lr 0.011727530011399968 +671 46 negative_sampler.num_negs_per_pos 74.0 +671 46 training.batch_size 1.0 +671 47 model.embedding_dim 1.0 +671 47 optimizer.lr 0.0012679528549955797 +671 47 negative_sampler.num_negs_per_pos 72.0 +671 47 training.batch_size 2.0 +671 48 model.embedding_dim 0.0 +671 48 optimizer.lr 0.0017633671034335062 +671 48 negative_sampler.num_negs_per_pos 41.0 +671 48 training.batch_size 1.0 +671 49 model.embedding_dim 2.0 +671 49 optimizer.lr 0.001542817675020255 +671 49 negative_sampler.num_negs_per_pos 82.0 +671 49 training.batch_size 0.0 +671 50 model.embedding_dim 1.0 +671 50 optimizer.lr 0.04114969462092525 +671 50 negative_sampler.num_negs_per_pos 17.0 +671 50 training.batch_size 0.0 +671 51 model.embedding_dim 2.0 +671 51 optimizer.lr 0.0017655231853655883 +671 51 negative_sampler.num_negs_per_pos 78.0 +671 51 training.batch_size 2.0 +671 52 model.embedding_dim 0.0 +671 52 optimizer.lr 0.002975528792465106 +671 52 negative_sampler.num_negs_per_pos 50.0 +671 52 training.batch_size 0.0 +671 53 model.embedding_dim 1.0 +671 53 optimizer.lr 0.015269502828975708 +671 53 negative_sampler.num_negs_per_pos 81.0 +671 53 training.batch_size 0.0 +671 54 model.embedding_dim 0.0 +671 54 optimizer.lr 0.025428090223509843 +671 54 negative_sampler.num_negs_per_pos 66.0 +671 54 training.batch_size 2.0 +671 55 model.embedding_dim 2.0 +671 55 optimizer.lr 0.004637317319825465 +671 55 negative_sampler.num_negs_per_pos 30.0 +671 55 training.batch_size 2.0 +671 56 model.embedding_dim 1.0 +671 56 optimizer.lr 0.09874660646887058 +671 56 negative_sampler.num_negs_per_pos 16.0 +671 56 training.batch_size 1.0 +671 57 model.embedding_dim 1.0 +671 57 optimizer.lr 0.0026355059999146105 +671 57 negative_sampler.num_negs_per_pos 30.0 +671 57 training.batch_size 0.0 +671 58 model.embedding_dim 0.0 +671 58 optimizer.lr 0.0010438850208287199 +671 58 negative_sampler.num_negs_per_pos 85.0 +671 58 training.batch_size 1.0 +671 59 model.embedding_dim 2.0 +671 59 optimizer.lr 0.008881637354638417 +671 59 negative_sampler.num_negs_per_pos 46.0 +671 59 training.batch_size 1.0 +671 60 model.embedding_dim 1.0 +671 60 optimizer.lr 0.001191719246343131 +671 60 negative_sampler.num_negs_per_pos 83.0 +671 60 training.batch_size 0.0 +671 61 model.embedding_dim 0.0 +671 61 optimizer.lr 0.0024737714960750205 +671 61 negative_sampler.num_negs_per_pos 80.0 +671 61 training.batch_size 2.0 +671 62 model.embedding_dim 2.0 +671 62 optimizer.lr 0.01408432060156885 +671 62 negative_sampler.num_negs_per_pos 90.0 +671 62 training.batch_size 2.0 +671 63 model.embedding_dim 0.0 +671 63 optimizer.lr 0.0016562129149490265 +671 63 negative_sampler.num_negs_per_pos 4.0 +671 63 training.batch_size 0.0 +671 64 model.embedding_dim 1.0 +671 64 optimizer.lr 0.012991981972043506 +671 64 negative_sampler.num_negs_per_pos 33.0 +671 64 training.batch_size 0.0 +671 65 model.embedding_dim 2.0 +671 65 optimizer.lr 0.0010675167190903221 +671 65 negative_sampler.num_negs_per_pos 67.0 +671 65 training.batch_size 1.0 +671 66 model.embedding_dim 2.0 +671 66 optimizer.lr 0.08117452843119248 +671 66 negative_sampler.num_negs_per_pos 59.0 +671 66 training.batch_size 1.0 +671 67 model.embedding_dim 1.0 +671 67 optimizer.lr 0.06926908303420942 +671 67 negative_sampler.num_negs_per_pos 3.0 +671 67 training.batch_size 0.0 +671 68 model.embedding_dim 1.0 +671 68 optimizer.lr 0.018838923475357216 +671 68 negative_sampler.num_negs_per_pos 21.0 +671 68 training.batch_size 1.0 +671 69 model.embedding_dim 0.0 +671 69 optimizer.lr 0.06814641658535277 +671 69 negative_sampler.num_negs_per_pos 25.0 +671 69 training.batch_size 1.0 +671 70 model.embedding_dim 0.0 +671 70 optimizer.lr 0.025444687783216416 +671 70 negative_sampler.num_negs_per_pos 16.0 +671 70 training.batch_size 1.0 +671 71 model.embedding_dim 1.0 +671 71 optimizer.lr 0.010073966557652914 +671 71 negative_sampler.num_negs_per_pos 61.0 +671 71 training.batch_size 0.0 +671 72 model.embedding_dim 2.0 +671 72 optimizer.lr 0.004915809987920557 +671 72 negative_sampler.num_negs_per_pos 8.0 +671 72 training.batch_size 0.0 +671 73 model.embedding_dim 0.0 +671 73 optimizer.lr 0.0722900407083762 +671 73 negative_sampler.num_negs_per_pos 3.0 +671 73 training.batch_size 1.0 +671 74 model.embedding_dim 1.0 +671 74 optimizer.lr 0.002303466298554021 +671 74 negative_sampler.num_negs_per_pos 88.0 +671 74 training.batch_size 1.0 +671 75 model.embedding_dim 0.0 +671 75 optimizer.lr 0.0023561237303152214 +671 75 negative_sampler.num_negs_per_pos 36.0 +671 75 training.batch_size 1.0 +671 76 model.embedding_dim 1.0 +671 76 optimizer.lr 0.03389269897528015 +671 76 negative_sampler.num_negs_per_pos 0.0 +671 76 training.batch_size 2.0 +671 77 model.embedding_dim 1.0 +671 77 optimizer.lr 0.0024647414041167994 +671 77 negative_sampler.num_negs_per_pos 89.0 +671 77 training.batch_size 1.0 +671 78 model.embedding_dim 2.0 +671 78 optimizer.lr 0.01231842956659185 +671 78 negative_sampler.num_negs_per_pos 5.0 +671 78 training.batch_size 1.0 +671 79 model.embedding_dim 0.0 +671 79 optimizer.lr 0.0019526255823443738 +671 79 negative_sampler.num_negs_per_pos 71.0 +671 79 training.batch_size 2.0 +671 80 model.embedding_dim 0.0 +671 80 optimizer.lr 0.016015310079726765 +671 80 negative_sampler.num_negs_per_pos 61.0 +671 80 training.batch_size 2.0 +671 81 model.embedding_dim 1.0 +671 81 optimizer.lr 0.019995011581937807 +671 81 negative_sampler.num_negs_per_pos 36.0 +671 81 training.batch_size 1.0 +671 82 model.embedding_dim 0.0 +671 82 optimizer.lr 0.07284796475252703 +671 82 negative_sampler.num_negs_per_pos 72.0 +671 82 training.batch_size 2.0 +671 83 model.embedding_dim 2.0 +671 83 optimizer.lr 0.049537401475366734 +671 83 negative_sampler.num_negs_per_pos 84.0 +671 83 training.batch_size 2.0 +671 84 model.embedding_dim 2.0 +671 84 optimizer.lr 0.010255592353729748 +671 84 negative_sampler.num_negs_per_pos 58.0 +671 84 training.batch_size 0.0 +671 85 model.embedding_dim 2.0 +671 85 optimizer.lr 0.0013759248601186486 +671 85 negative_sampler.num_negs_per_pos 76.0 +671 85 training.batch_size 2.0 +671 86 model.embedding_dim 0.0 +671 86 optimizer.lr 0.05298323151422828 +671 86 negative_sampler.num_negs_per_pos 10.0 +671 86 training.batch_size 1.0 +671 87 model.embedding_dim 1.0 +671 87 optimizer.lr 0.04823588317851734 +671 87 negative_sampler.num_negs_per_pos 95.0 +671 87 training.batch_size 2.0 +671 88 model.embedding_dim 0.0 +671 88 optimizer.lr 0.08197235209930925 +671 88 negative_sampler.num_negs_per_pos 59.0 +671 88 training.batch_size 1.0 +671 89 model.embedding_dim 0.0 +671 89 optimizer.lr 0.0014982281334833647 +671 89 negative_sampler.num_negs_per_pos 47.0 +671 89 training.batch_size 2.0 +671 90 model.embedding_dim 2.0 +671 90 optimizer.lr 0.06355044482005168 +671 90 negative_sampler.num_negs_per_pos 83.0 +671 90 training.batch_size 1.0 +671 91 model.embedding_dim 1.0 +671 91 optimizer.lr 0.018276077183862063 +671 91 negative_sampler.num_negs_per_pos 54.0 +671 91 training.batch_size 0.0 +671 92 model.embedding_dim 2.0 +671 92 optimizer.lr 0.001233696842265327 +671 92 negative_sampler.num_negs_per_pos 59.0 +671 92 training.batch_size 2.0 +671 93 model.embedding_dim 0.0 +671 93 optimizer.lr 0.03047899341849414 +671 93 negative_sampler.num_negs_per_pos 80.0 +671 93 training.batch_size 0.0 +671 94 model.embedding_dim 2.0 +671 94 optimizer.lr 0.042527476063056326 +671 94 negative_sampler.num_negs_per_pos 49.0 +671 94 training.batch_size 1.0 +671 95 model.embedding_dim 2.0 +671 95 optimizer.lr 0.039150465706076026 +671 95 negative_sampler.num_negs_per_pos 37.0 +671 95 training.batch_size 1.0 +671 96 model.embedding_dim 1.0 +671 96 optimizer.lr 0.033638521972455857 +671 96 negative_sampler.num_negs_per_pos 62.0 +671 96 training.batch_size 1.0 +671 97 model.embedding_dim 2.0 +671 97 optimizer.lr 0.0028514735340576173 +671 97 negative_sampler.num_negs_per_pos 67.0 +671 97 training.batch_size 2.0 +671 98 model.embedding_dim 0.0 +671 98 optimizer.lr 0.04263481958748054 +671 98 negative_sampler.num_negs_per_pos 97.0 +671 98 training.batch_size 0.0 +671 99 model.embedding_dim 0.0 +671 99 optimizer.lr 0.030108756360988292 +671 99 negative_sampler.num_negs_per_pos 80.0 +671 99 training.batch_size 0.0 +671 100 model.embedding_dim 1.0 +671 100 optimizer.lr 0.07226731862733207 +671 100 negative_sampler.num_negs_per_pos 37.0 +671 100 training.batch_size 1.0 +671 1 dataset """kinships""" +671 1 model """simple""" +671 1 loss """bceaftersigmoid""" +671 1 regularizer """no""" +671 1 optimizer """adam""" +671 1 training_loop """owa""" +671 1 negative_sampler """basic""" +671 1 evaluator """rankbased""" +671 2 dataset """kinships""" +671 2 model """simple""" +671 2 loss """bceaftersigmoid""" +671 2 regularizer """no""" +671 2 optimizer """adam""" +671 2 training_loop """owa""" +671 2 negative_sampler """basic""" +671 2 evaluator """rankbased""" +671 3 dataset """kinships""" +671 3 model """simple""" +671 3 loss """bceaftersigmoid""" +671 3 regularizer """no""" +671 3 optimizer """adam""" +671 3 training_loop """owa""" +671 3 negative_sampler """basic""" +671 3 evaluator """rankbased""" +671 4 dataset """kinships""" +671 4 model """simple""" +671 4 loss """bceaftersigmoid""" +671 4 regularizer """no""" +671 4 optimizer """adam""" +671 4 training_loop """owa""" +671 4 negative_sampler """basic""" +671 4 evaluator """rankbased""" +671 5 dataset """kinships""" +671 5 model """simple""" +671 5 loss """bceaftersigmoid""" +671 5 regularizer """no""" +671 5 optimizer """adam""" +671 5 training_loop """owa""" +671 5 negative_sampler """basic""" +671 5 evaluator """rankbased""" +671 6 dataset """kinships""" +671 6 model """simple""" +671 6 loss """bceaftersigmoid""" +671 6 regularizer """no""" +671 6 optimizer """adam""" +671 6 training_loop """owa""" +671 6 negative_sampler """basic""" +671 6 evaluator """rankbased""" +671 7 dataset """kinships""" +671 7 model """simple""" +671 7 loss """bceaftersigmoid""" +671 7 regularizer """no""" +671 7 optimizer """adam""" +671 7 training_loop """owa""" +671 7 negative_sampler """basic""" +671 7 evaluator """rankbased""" +671 8 dataset """kinships""" +671 8 model """simple""" +671 8 loss """bceaftersigmoid""" +671 8 regularizer """no""" +671 8 optimizer """adam""" +671 8 training_loop """owa""" +671 8 negative_sampler """basic""" +671 8 evaluator """rankbased""" +671 9 dataset """kinships""" +671 9 model """simple""" +671 9 loss """bceaftersigmoid""" +671 9 regularizer """no""" +671 9 optimizer """adam""" +671 9 training_loop """owa""" +671 9 negative_sampler """basic""" +671 9 evaluator """rankbased""" +671 10 dataset """kinships""" +671 10 model """simple""" +671 10 loss """bceaftersigmoid""" +671 10 regularizer """no""" +671 10 optimizer """adam""" +671 10 training_loop """owa""" +671 10 negative_sampler """basic""" +671 10 evaluator """rankbased""" +671 11 dataset """kinships""" +671 11 model """simple""" +671 11 loss """bceaftersigmoid""" +671 11 regularizer """no""" +671 11 optimizer """adam""" +671 11 training_loop """owa""" +671 11 negative_sampler """basic""" +671 11 evaluator """rankbased""" +671 12 dataset """kinships""" +671 12 model """simple""" +671 12 loss """bceaftersigmoid""" +671 12 regularizer """no""" +671 12 optimizer """adam""" +671 12 training_loop """owa""" +671 12 negative_sampler """basic""" +671 12 evaluator """rankbased""" +671 13 dataset """kinships""" +671 13 model """simple""" +671 13 loss """bceaftersigmoid""" +671 13 regularizer """no""" +671 13 optimizer """adam""" +671 13 training_loop """owa""" +671 13 negative_sampler """basic""" +671 13 evaluator """rankbased""" +671 14 dataset """kinships""" +671 14 model """simple""" +671 14 loss """bceaftersigmoid""" +671 14 regularizer """no""" +671 14 optimizer """adam""" +671 14 training_loop """owa""" +671 14 negative_sampler """basic""" +671 14 evaluator """rankbased""" +671 15 dataset """kinships""" +671 15 model """simple""" +671 15 loss """bceaftersigmoid""" +671 15 regularizer """no""" +671 15 optimizer """adam""" +671 15 training_loop """owa""" +671 15 negative_sampler """basic""" +671 15 evaluator """rankbased""" +671 16 dataset """kinships""" +671 16 model """simple""" +671 16 loss """bceaftersigmoid""" +671 16 regularizer """no""" +671 16 optimizer """adam""" +671 16 training_loop """owa""" +671 16 negative_sampler """basic""" +671 16 evaluator """rankbased""" +671 17 dataset """kinships""" +671 17 model """simple""" +671 17 loss """bceaftersigmoid""" +671 17 regularizer """no""" +671 17 optimizer """adam""" +671 17 training_loop """owa""" +671 17 negative_sampler """basic""" +671 17 evaluator """rankbased""" +671 18 dataset """kinships""" +671 18 model """simple""" +671 18 loss """bceaftersigmoid""" +671 18 regularizer """no""" +671 18 optimizer """adam""" +671 18 training_loop """owa""" +671 18 negative_sampler """basic""" +671 18 evaluator """rankbased""" +671 19 dataset """kinships""" +671 19 model """simple""" +671 19 loss """bceaftersigmoid""" +671 19 regularizer """no""" +671 19 optimizer """adam""" +671 19 training_loop """owa""" +671 19 negative_sampler """basic""" +671 19 evaluator """rankbased""" +671 20 dataset """kinships""" +671 20 model """simple""" +671 20 loss """bceaftersigmoid""" +671 20 regularizer """no""" +671 20 optimizer """adam""" +671 20 training_loop """owa""" +671 20 negative_sampler """basic""" +671 20 evaluator """rankbased""" +671 21 dataset """kinships""" +671 21 model """simple""" +671 21 loss """bceaftersigmoid""" +671 21 regularizer """no""" +671 21 optimizer """adam""" +671 21 training_loop """owa""" +671 21 negative_sampler """basic""" +671 21 evaluator """rankbased""" +671 22 dataset """kinships""" +671 22 model """simple""" +671 22 loss """bceaftersigmoid""" +671 22 regularizer """no""" +671 22 optimizer """adam""" +671 22 training_loop """owa""" +671 22 negative_sampler """basic""" +671 22 evaluator """rankbased""" +671 23 dataset """kinships""" +671 23 model """simple""" +671 23 loss """bceaftersigmoid""" +671 23 regularizer """no""" +671 23 optimizer """adam""" +671 23 training_loop """owa""" +671 23 negative_sampler """basic""" +671 23 evaluator """rankbased""" +671 24 dataset """kinships""" +671 24 model """simple""" +671 24 loss """bceaftersigmoid""" +671 24 regularizer """no""" +671 24 optimizer """adam""" +671 24 training_loop """owa""" +671 24 negative_sampler """basic""" +671 24 evaluator """rankbased""" +671 25 dataset """kinships""" +671 25 model """simple""" +671 25 loss """bceaftersigmoid""" +671 25 regularizer """no""" +671 25 optimizer """adam""" +671 25 training_loop """owa""" +671 25 negative_sampler """basic""" +671 25 evaluator """rankbased""" +671 26 dataset """kinships""" +671 26 model """simple""" +671 26 loss """bceaftersigmoid""" +671 26 regularizer """no""" +671 26 optimizer """adam""" +671 26 training_loop """owa""" +671 26 negative_sampler """basic""" +671 26 evaluator """rankbased""" +671 27 dataset """kinships""" +671 27 model """simple""" +671 27 loss """bceaftersigmoid""" +671 27 regularizer """no""" +671 27 optimizer """adam""" +671 27 training_loop """owa""" +671 27 negative_sampler """basic""" +671 27 evaluator """rankbased""" +671 28 dataset """kinships""" +671 28 model """simple""" +671 28 loss """bceaftersigmoid""" +671 28 regularizer """no""" +671 28 optimizer """adam""" +671 28 training_loop """owa""" +671 28 negative_sampler """basic""" +671 28 evaluator """rankbased""" +671 29 dataset """kinships""" +671 29 model """simple""" +671 29 loss """bceaftersigmoid""" +671 29 regularizer """no""" +671 29 optimizer """adam""" +671 29 training_loop """owa""" +671 29 negative_sampler """basic""" +671 29 evaluator """rankbased""" +671 30 dataset """kinships""" +671 30 model """simple""" +671 30 loss """bceaftersigmoid""" +671 30 regularizer """no""" +671 30 optimizer """adam""" +671 30 training_loop """owa""" +671 30 negative_sampler """basic""" +671 30 evaluator """rankbased""" +671 31 dataset """kinships""" +671 31 model """simple""" +671 31 loss """bceaftersigmoid""" +671 31 regularizer """no""" +671 31 optimizer """adam""" +671 31 training_loop """owa""" +671 31 negative_sampler """basic""" +671 31 evaluator """rankbased""" +671 32 dataset """kinships""" +671 32 model """simple""" +671 32 loss """bceaftersigmoid""" +671 32 regularizer """no""" +671 32 optimizer """adam""" +671 32 training_loop """owa""" +671 32 negative_sampler """basic""" +671 32 evaluator """rankbased""" +671 33 dataset """kinships""" +671 33 model """simple""" +671 33 loss """bceaftersigmoid""" +671 33 regularizer """no""" +671 33 optimizer """adam""" +671 33 training_loop """owa""" +671 33 negative_sampler """basic""" +671 33 evaluator """rankbased""" +671 34 dataset """kinships""" +671 34 model """simple""" +671 34 loss """bceaftersigmoid""" +671 34 regularizer """no""" +671 34 optimizer """adam""" +671 34 training_loop """owa""" +671 34 negative_sampler """basic""" +671 34 evaluator """rankbased""" +671 35 dataset """kinships""" +671 35 model """simple""" +671 35 loss """bceaftersigmoid""" +671 35 regularizer """no""" +671 35 optimizer """adam""" +671 35 training_loop """owa""" +671 35 negative_sampler """basic""" +671 35 evaluator """rankbased""" +671 36 dataset """kinships""" +671 36 model """simple""" +671 36 loss """bceaftersigmoid""" +671 36 regularizer """no""" +671 36 optimizer """adam""" +671 36 training_loop """owa""" +671 36 negative_sampler """basic""" +671 36 evaluator """rankbased""" +671 37 dataset """kinships""" +671 37 model """simple""" +671 37 loss """bceaftersigmoid""" +671 37 regularizer """no""" +671 37 optimizer """adam""" +671 37 training_loop """owa""" +671 37 negative_sampler """basic""" +671 37 evaluator """rankbased""" +671 38 dataset """kinships""" +671 38 model """simple""" +671 38 loss """bceaftersigmoid""" +671 38 regularizer """no""" +671 38 optimizer """adam""" +671 38 training_loop """owa""" +671 38 negative_sampler """basic""" +671 38 evaluator """rankbased""" +671 39 dataset """kinships""" +671 39 model """simple""" +671 39 loss """bceaftersigmoid""" +671 39 regularizer """no""" +671 39 optimizer """adam""" +671 39 training_loop """owa""" +671 39 negative_sampler """basic""" +671 39 evaluator """rankbased""" +671 40 dataset """kinships""" +671 40 model """simple""" +671 40 loss """bceaftersigmoid""" +671 40 regularizer """no""" +671 40 optimizer """adam""" +671 40 training_loop """owa""" +671 40 negative_sampler """basic""" +671 40 evaluator """rankbased""" +671 41 dataset """kinships""" +671 41 model """simple""" +671 41 loss """bceaftersigmoid""" +671 41 regularizer """no""" +671 41 optimizer """adam""" +671 41 training_loop """owa""" +671 41 negative_sampler """basic""" +671 41 evaluator """rankbased""" +671 42 dataset """kinships""" +671 42 model """simple""" +671 42 loss """bceaftersigmoid""" +671 42 regularizer """no""" +671 42 optimizer """adam""" +671 42 training_loop """owa""" +671 42 negative_sampler """basic""" +671 42 evaluator """rankbased""" +671 43 dataset """kinships""" +671 43 model """simple""" +671 43 loss """bceaftersigmoid""" +671 43 regularizer """no""" +671 43 optimizer """adam""" +671 43 training_loop """owa""" +671 43 negative_sampler """basic""" +671 43 evaluator """rankbased""" +671 44 dataset """kinships""" +671 44 model """simple""" +671 44 loss """bceaftersigmoid""" +671 44 regularizer """no""" +671 44 optimizer """adam""" +671 44 training_loop """owa""" +671 44 negative_sampler """basic""" +671 44 evaluator """rankbased""" +671 45 dataset """kinships""" +671 45 model """simple""" +671 45 loss """bceaftersigmoid""" +671 45 regularizer """no""" +671 45 optimizer """adam""" +671 45 training_loop """owa""" +671 45 negative_sampler """basic""" +671 45 evaluator """rankbased""" +671 46 dataset """kinships""" +671 46 model """simple""" +671 46 loss """bceaftersigmoid""" +671 46 regularizer """no""" +671 46 optimizer """adam""" +671 46 training_loop """owa""" +671 46 negative_sampler """basic""" +671 46 evaluator """rankbased""" +671 47 dataset """kinships""" +671 47 model """simple""" +671 47 loss """bceaftersigmoid""" +671 47 regularizer """no""" +671 47 optimizer """adam""" +671 47 training_loop """owa""" +671 47 negative_sampler """basic""" +671 47 evaluator """rankbased""" +671 48 dataset """kinships""" +671 48 model """simple""" +671 48 loss """bceaftersigmoid""" +671 48 regularizer """no""" +671 48 optimizer """adam""" +671 48 training_loop """owa""" +671 48 negative_sampler """basic""" +671 48 evaluator """rankbased""" +671 49 dataset """kinships""" +671 49 model """simple""" +671 49 loss """bceaftersigmoid""" +671 49 regularizer """no""" +671 49 optimizer """adam""" +671 49 training_loop """owa""" +671 49 negative_sampler """basic""" +671 49 evaluator """rankbased""" +671 50 dataset """kinships""" +671 50 model """simple""" +671 50 loss """bceaftersigmoid""" +671 50 regularizer """no""" +671 50 optimizer """adam""" +671 50 training_loop """owa""" +671 50 negative_sampler """basic""" +671 50 evaluator """rankbased""" +671 51 dataset """kinships""" +671 51 model """simple""" +671 51 loss """bceaftersigmoid""" +671 51 regularizer """no""" +671 51 optimizer """adam""" +671 51 training_loop """owa""" +671 51 negative_sampler """basic""" +671 51 evaluator """rankbased""" +671 52 dataset """kinships""" +671 52 model """simple""" +671 52 loss """bceaftersigmoid""" +671 52 regularizer """no""" +671 52 optimizer """adam""" +671 52 training_loop """owa""" +671 52 negative_sampler """basic""" +671 52 evaluator """rankbased""" +671 53 dataset """kinships""" +671 53 model """simple""" +671 53 loss """bceaftersigmoid""" +671 53 regularizer """no""" +671 53 optimizer """adam""" +671 53 training_loop """owa""" +671 53 negative_sampler """basic""" +671 53 evaluator """rankbased""" +671 54 dataset """kinships""" +671 54 model """simple""" +671 54 loss """bceaftersigmoid""" +671 54 regularizer """no""" +671 54 optimizer """adam""" +671 54 training_loop """owa""" +671 54 negative_sampler """basic""" +671 54 evaluator """rankbased""" +671 55 dataset """kinships""" +671 55 model """simple""" +671 55 loss """bceaftersigmoid""" +671 55 regularizer """no""" +671 55 optimizer """adam""" +671 55 training_loop """owa""" +671 55 negative_sampler """basic""" +671 55 evaluator """rankbased""" +671 56 dataset """kinships""" +671 56 model """simple""" +671 56 loss """bceaftersigmoid""" +671 56 regularizer """no""" +671 56 optimizer """adam""" +671 56 training_loop """owa""" +671 56 negative_sampler """basic""" +671 56 evaluator """rankbased""" +671 57 dataset """kinships""" +671 57 model """simple""" +671 57 loss """bceaftersigmoid""" +671 57 regularizer """no""" +671 57 optimizer """adam""" +671 57 training_loop """owa""" +671 57 negative_sampler """basic""" +671 57 evaluator """rankbased""" +671 58 dataset """kinships""" +671 58 model """simple""" +671 58 loss """bceaftersigmoid""" +671 58 regularizer """no""" +671 58 optimizer """adam""" +671 58 training_loop """owa""" +671 58 negative_sampler """basic""" +671 58 evaluator """rankbased""" +671 59 dataset """kinships""" +671 59 model """simple""" +671 59 loss """bceaftersigmoid""" +671 59 regularizer """no""" +671 59 optimizer """adam""" +671 59 training_loop """owa""" +671 59 negative_sampler """basic""" +671 59 evaluator """rankbased""" +671 60 dataset """kinships""" +671 60 model """simple""" +671 60 loss """bceaftersigmoid""" +671 60 regularizer """no""" +671 60 optimizer """adam""" +671 60 training_loop """owa""" +671 60 negative_sampler """basic""" +671 60 evaluator """rankbased""" +671 61 dataset """kinships""" +671 61 model """simple""" +671 61 loss """bceaftersigmoid""" +671 61 regularizer """no""" +671 61 optimizer """adam""" +671 61 training_loop """owa""" +671 61 negative_sampler """basic""" +671 61 evaluator """rankbased""" +671 62 dataset """kinships""" +671 62 model """simple""" +671 62 loss """bceaftersigmoid""" +671 62 regularizer """no""" +671 62 optimizer """adam""" +671 62 training_loop """owa""" +671 62 negative_sampler """basic""" +671 62 evaluator """rankbased""" +671 63 dataset """kinships""" +671 63 model """simple""" +671 63 loss """bceaftersigmoid""" +671 63 regularizer """no""" +671 63 optimizer """adam""" +671 63 training_loop """owa""" +671 63 negative_sampler """basic""" +671 63 evaluator """rankbased""" +671 64 dataset """kinships""" +671 64 model """simple""" +671 64 loss """bceaftersigmoid""" +671 64 regularizer """no""" +671 64 optimizer """adam""" +671 64 training_loop """owa""" +671 64 negative_sampler """basic""" +671 64 evaluator """rankbased""" +671 65 dataset """kinships""" +671 65 model """simple""" +671 65 loss """bceaftersigmoid""" +671 65 regularizer """no""" +671 65 optimizer """adam""" +671 65 training_loop """owa""" +671 65 negative_sampler """basic""" +671 65 evaluator """rankbased""" +671 66 dataset """kinships""" +671 66 model """simple""" +671 66 loss """bceaftersigmoid""" +671 66 regularizer """no""" +671 66 optimizer """adam""" +671 66 training_loop """owa""" +671 66 negative_sampler """basic""" +671 66 evaluator """rankbased""" +671 67 dataset """kinships""" +671 67 model """simple""" +671 67 loss """bceaftersigmoid""" +671 67 regularizer """no""" +671 67 optimizer """adam""" +671 67 training_loop """owa""" +671 67 negative_sampler """basic""" +671 67 evaluator """rankbased""" +671 68 dataset """kinships""" +671 68 model """simple""" +671 68 loss """bceaftersigmoid""" +671 68 regularizer """no""" +671 68 optimizer """adam""" +671 68 training_loop """owa""" +671 68 negative_sampler """basic""" +671 68 evaluator """rankbased""" +671 69 dataset """kinships""" +671 69 model """simple""" +671 69 loss """bceaftersigmoid""" +671 69 regularizer """no""" +671 69 optimizer """adam""" +671 69 training_loop """owa""" +671 69 negative_sampler """basic""" +671 69 evaluator """rankbased""" +671 70 dataset """kinships""" +671 70 model """simple""" +671 70 loss """bceaftersigmoid""" +671 70 regularizer """no""" +671 70 optimizer """adam""" +671 70 training_loop """owa""" +671 70 negative_sampler """basic""" +671 70 evaluator """rankbased""" +671 71 dataset """kinships""" +671 71 model """simple""" +671 71 loss """bceaftersigmoid""" +671 71 regularizer """no""" +671 71 optimizer """adam""" +671 71 training_loop """owa""" +671 71 negative_sampler """basic""" +671 71 evaluator """rankbased""" +671 72 dataset """kinships""" +671 72 model """simple""" +671 72 loss """bceaftersigmoid""" +671 72 regularizer """no""" +671 72 optimizer """adam""" +671 72 training_loop """owa""" +671 72 negative_sampler """basic""" +671 72 evaluator """rankbased""" +671 73 dataset """kinships""" +671 73 model """simple""" +671 73 loss """bceaftersigmoid""" +671 73 regularizer """no""" +671 73 optimizer """adam""" +671 73 training_loop """owa""" +671 73 negative_sampler """basic""" +671 73 evaluator """rankbased""" +671 74 dataset """kinships""" +671 74 model """simple""" +671 74 loss """bceaftersigmoid""" +671 74 regularizer """no""" +671 74 optimizer """adam""" +671 74 training_loop """owa""" +671 74 negative_sampler """basic""" +671 74 evaluator """rankbased""" +671 75 dataset """kinships""" +671 75 model """simple""" +671 75 loss """bceaftersigmoid""" +671 75 regularizer """no""" +671 75 optimizer """adam""" +671 75 training_loop """owa""" +671 75 negative_sampler """basic""" +671 75 evaluator """rankbased""" +671 76 dataset """kinships""" +671 76 model """simple""" +671 76 loss """bceaftersigmoid""" +671 76 regularizer """no""" +671 76 optimizer """adam""" +671 76 training_loop """owa""" +671 76 negative_sampler """basic""" +671 76 evaluator """rankbased""" +671 77 dataset """kinships""" +671 77 model """simple""" +671 77 loss """bceaftersigmoid""" +671 77 regularizer """no""" +671 77 optimizer """adam""" +671 77 training_loop """owa""" +671 77 negative_sampler """basic""" +671 77 evaluator """rankbased""" +671 78 dataset """kinships""" +671 78 model """simple""" +671 78 loss """bceaftersigmoid""" +671 78 regularizer """no""" +671 78 optimizer """adam""" +671 78 training_loop """owa""" +671 78 negative_sampler """basic""" +671 78 evaluator """rankbased""" +671 79 dataset """kinships""" +671 79 model """simple""" +671 79 loss """bceaftersigmoid""" +671 79 regularizer """no""" +671 79 optimizer """adam""" +671 79 training_loop """owa""" +671 79 negative_sampler """basic""" +671 79 evaluator """rankbased""" +671 80 dataset """kinships""" +671 80 model """simple""" +671 80 loss """bceaftersigmoid""" +671 80 regularizer """no""" +671 80 optimizer """adam""" +671 80 training_loop """owa""" +671 80 negative_sampler """basic""" +671 80 evaluator """rankbased""" +671 81 dataset """kinships""" +671 81 model """simple""" +671 81 loss """bceaftersigmoid""" +671 81 regularizer """no""" +671 81 optimizer """adam""" +671 81 training_loop """owa""" +671 81 negative_sampler """basic""" +671 81 evaluator """rankbased""" +671 82 dataset """kinships""" +671 82 model """simple""" +671 82 loss """bceaftersigmoid""" +671 82 regularizer """no""" +671 82 optimizer """adam""" +671 82 training_loop """owa""" +671 82 negative_sampler """basic""" +671 82 evaluator """rankbased""" +671 83 dataset """kinships""" +671 83 model """simple""" +671 83 loss """bceaftersigmoid""" +671 83 regularizer """no""" +671 83 optimizer """adam""" +671 83 training_loop """owa""" +671 83 negative_sampler """basic""" +671 83 evaluator """rankbased""" +671 84 dataset """kinships""" +671 84 model """simple""" +671 84 loss """bceaftersigmoid""" +671 84 regularizer """no""" +671 84 optimizer """adam""" +671 84 training_loop """owa""" +671 84 negative_sampler """basic""" +671 84 evaluator """rankbased""" +671 85 dataset """kinships""" +671 85 model """simple""" +671 85 loss """bceaftersigmoid""" +671 85 regularizer """no""" +671 85 optimizer """adam""" +671 85 training_loop """owa""" +671 85 negative_sampler """basic""" +671 85 evaluator """rankbased""" +671 86 dataset """kinships""" +671 86 model """simple""" +671 86 loss """bceaftersigmoid""" +671 86 regularizer """no""" +671 86 optimizer """adam""" +671 86 training_loop """owa""" +671 86 negative_sampler """basic""" +671 86 evaluator """rankbased""" +671 87 dataset """kinships""" +671 87 model """simple""" +671 87 loss """bceaftersigmoid""" +671 87 regularizer """no""" +671 87 optimizer """adam""" +671 87 training_loop """owa""" +671 87 negative_sampler """basic""" +671 87 evaluator """rankbased""" +671 88 dataset """kinships""" +671 88 model """simple""" +671 88 loss """bceaftersigmoid""" +671 88 regularizer """no""" +671 88 optimizer """adam""" +671 88 training_loop """owa""" +671 88 negative_sampler """basic""" +671 88 evaluator """rankbased""" +671 89 dataset """kinships""" +671 89 model """simple""" +671 89 loss """bceaftersigmoid""" +671 89 regularizer """no""" +671 89 optimizer """adam""" +671 89 training_loop """owa""" +671 89 negative_sampler """basic""" +671 89 evaluator """rankbased""" +671 90 dataset """kinships""" +671 90 model """simple""" +671 90 loss """bceaftersigmoid""" +671 90 regularizer """no""" +671 90 optimizer """adam""" +671 90 training_loop """owa""" +671 90 negative_sampler """basic""" +671 90 evaluator """rankbased""" +671 91 dataset """kinships""" +671 91 model """simple""" +671 91 loss """bceaftersigmoid""" +671 91 regularizer """no""" +671 91 optimizer """adam""" +671 91 training_loop """owa""" +671 91 negative_sampler """basic""" +671 91 evaluator """rankbased""" +671 92 dataset """kinships""" +671 92 model """simple""" +671 92 loss """bceaftersigmoid""" +671 92 regularizer """no""" +671 92 optimizer """adam""" +671 92 training_loop """owa""" +671 92 negative_sampler """basic""" +671 92 evaluator """rankbased""" +671 93 dataset """kinships""" +671 93 model """simple""" +671 93 loss """bceaftersigmoid""" +671 93 regularizer """no""" +671 93 optimizer """adam""" +671 93 training_loop """owa""" +671 93 negative_sampler """basic""" +671 93 evaluator """rankbased""" +671 94 dataset """kinships""" +671 94 model """simple""" +671 94 loss """bceaftersigmoid""" +671 94 regularizer """no""" +671 94 optimizer """adam""" +671 94 training_loop """owa""" +671 94 negative_sampler """basic""" +671 94 evaluator """rankbased""" +671 95 dataset """kinships""" +671 95 model """simple""" +671 95 loss """bceaftersigmoid""" +671 95 regularizer """no""" +671 95 optimizer """adam""" +671 95 training_loop """owa""" +671 95 negative_sampler """basic""" +671 95 evaluator """rankbased""" +671 96 dataset """kinships""" +671 96 model """simple""" +671 96 loss """bceaftersigmoid""" +671 96 regularizer """no""" +671 96 optimizer """adam""" +671 96 training_loop """owa""" +671 96 negative_sampler """basic""" +671 96 evaluator """rankbased""" +671 97 dataset """kinships""" +671 97 model """simple""" +671 97 loss """bceaftersigmoid""" +671 97 regularizer """no""" +671 97 optimizer """adam""" +671 97 training_loop """owa""" +671 97 negative_sampler """basic""" +671 97 evaluator """rankbased""" +671 98 dataset """kinships""" +671 98 model """simple""" +671 98 loss """bceaftersigmoid""" +671 98 regularizer """no""" +671 98 optimizer """adam""" +671 98 training_loop """owa""" +671 98 negative_sampler """basic""" +671 98 evaluator """rankbased""" +671 99 dataset """kinships""" +671 99 model """simple""" +671 99 loss """bceaftersigmoid""" +671 99 regularizer """no""" +671 99 optimizer """adam""" +671 99 training_loop """owa""" +671 99 negative_sampler """basic""" +671 99 evaluator """rankbased""" +671 100 dataset """kinships""" +671 100 model """simple""" +671 100 loss """bceaftersigmoid""" +671 100 regularizer """no""" +671 100 optimizer """adam""" +671 100 training_loop """owa""" +671 100 negative_sampler """basic""" +671 100 evaluator """rankbased""" +672 1 model.embedding_dim 1.0 +672 1 optimizer.lr 0.0024340304557968324 +672 1 negative_sampler.num_negs_per_pos 74.0 +672 1 training.batch_size 2.0 +672 2 model.embedding_dim 2.0 +672 2 optimizer.lr 0.004431532459876477 +672 2 negative_sampler.num_negs_per_pos 24.0 +672 2 training.batch_size 2.0 +672 3 model.embedding_dim 1.0 +672 3 optimizer.lr 0.04760686778456365 +672 3 negative_sampler.num_negs_per_pos 36.0 +672 3 training.batch_size 1.0 +672 4 model.embedding_dim 2.0 +672 4 optimizer.lr 0.0795184826037227 +672 4 negative_sampler.num_negs_per_pos 79.0 +672 4 training.batch_size 0.0 +672 5 model.embedding_dim 1.0 +672 5 optimizer.lr 0.0010301674128737326 +672 5 negative_sampler.num_negs_per_pos 38.0 +672 5 training.batch_size 2.0 +672 6 model.embedding_dim 2.0 +672 6 optimizer.lr 0.005885677364569162 +672 6 negative_sampler.num_negs_per_pos 64.0 +672 6 training.batch_size 0.0 +672 7 model.embedding_dim 1.0 +672 7 optimizer.lr 0.00782710535125337 +672 7 negative_sampler.num_negs_per_pos 70.0 +672 7 training.batch_size 2.0 +672 8 model.embedding_dim 0.0 +672 8 optimizer.lr 0.005973775223016885 +672 8 negative_sampler.num_negs_per_pos 62.0 +672 8 training.batch_size 1.0 +672 9 model.embedding_dim 0.0 +672 9 optimizer.lr 0.007548754297965566 +672 9 negative_sampler.num_negs_per_pos 27.0 +672 9 training.batch_size 0.0 +672 10 model.embedding_dim 0.0 +672 10 optimizer.lr 0.014967025056703023 +672 10 negative_sampler.num_negs_per_pos 41.0 +672 10 training.batch_size 1.0 +672 11 model.embedding_dim 0.0 +672 11 optimizer.lr 0.0026330827722868414 +672 11 negative_sampler.num_negs_per_pos 93.0 +672 11 training.batch_size 2.0 +672 12 model.embedding_dim 1.0 +672 12 optimizer.lr 0.0010641577830336024 +672 12 negative_sampler.num_negs_per_pos 43.0 +672 12 training.batch_size 1.0 +672 13 model.embedding_dim 2.0 +672 13 optimizer.lr 0.05962279237395176 +672 13 negative_sampler.num_negs_per_pos 44.0 +672 13 training.batch_size 1.0 +672 14 model.embedding_dim 1.0 +672 14 optimizer.lr 0.04430984860266861 +672 14 negative_sampler.num_negs_per_pos 60.0 +672 14 training.batch_size 0.0 +672 15 model.embedding_dim 2.0 +672 15 optimizer.lr 0.004660857519143169 +672 15 negative_sampler.num_negs_per_pos 33.0 +672 15 training.batch_size 2.0 +672 16 model.embedding_dim 1.0 +672 16 optimizer.lr 0.014955414052000906 +672 16 negative_sampler.num_negs_per_pos 13.0 +672 16 training.batch_size 0.0 +672 17 model.embedding_dim 2.0 +672 17 optimizer.lr 0.02348872381171086 +672 17 negative_sampler.num_negs_per_pos 67.0 +672 17 training.batch_size 1.0 +672 18 model.embedding_dim 0.0 +672 18 optimizer.lr 0.021578092130976872 +672 18 negative_sampler.num_negs_per_pos 22.0 +672 18 training.batch_size 1.0 +672 19 model.embedding_dim 0.0 +672 19 optimizer.lr 0.09944318082803585 +672 19 negative_sampler.num_negs_per_pos 3.0 +672 19 training.batch_size 0.0 +672 20 model.embedding_dim 2.0 +672 20 optimizer.lr 0.0010981922329308532 +672 20 negative_sampler.num_negs_per_pos 59.0 +672 20 training.batch_size 1.0 +672 21 model.embedding_dim 2.0 +672 21 optimizer.lr 0.0018315644929094567 +672 21 negative_sampler.num_negs_per_pos 77.0 +672 21 training.batch_size 2.0 +672 22 model.embedding_dim 1.0 +672 22 optimizer.lr 0.0010127290363815835 +672 22 negative_sampler.num_negs_per_pos 77.0 +672 22 training.batch_size 1.0 +672 23 model.embedding_dim 0.0 +672 23 optimizer.lr 0.022698340654596607 +672 23 negative_sampler.num_negs_per_pos 94.0 +672 23 training.batch_size 0.0 +672 24 model.embedding_dim 2.0 +672 24 optimizer.lr 0.0017189230545394745 +672 24 negative_sampler.num_negs_per_pos 26.0 +672 24 training.batch_size 1.0 +672 25 model.embedding_dim 2.0 +672 25 optimizer.lr 0.04460097381441724 +672 25 negative_sampler.num_negs_per_pos 47.0 +672 25 training.batch_size 1.0 +672 26 model.embedding_dim 0.0 +672 26 optimizer.lr 0.03375542867641807 +672 26 negative_sampler.num_negs_per_pos 9.0 +672 26 training.batch_size 0.0 +672 27 model.embedding_dim 2.0 +672 27 optimizer.lr 0.017430956677491713 +672 27 negative_sampler.num_negs_per_pos 75.0 +672 27 training.batch_size 0.0 +672 28 model.embedding_dim 0.0 +672 28 optimizer.lr 0.007493944013005045 +672 28 negative_sampler.num_negs_per_pos 63.0 +672 28 training.batch_size 2.0 +672 29 model.embedding_dim 1.0 +672 29 optimizer.lr 0.0012882317045981764 +672 29 negative_sampler.num_negs_per_pos 47.0 +672 29 training.batch_size 1.0 +672 30 model.embedding_dim 0.0 +672 30 optimizer.lr 0.02336934026609376 +672 30 negative_sampler.num_negs_per_pos 23.0 +672 30 training.batch_size 2.0 +672 31 model.embedding_dim 0.0 +672 31 optimizer.lr 0.04315495071063702 +672 31 negative_sampler.num_negs_per_pos 4.0 +672 31 training.batch_size 1.0 +672 32 model.embedding_dim 1.0 +672 32 optimizer.lr 0.009694599749683556 +672 32 negative_sampler.num_negs_per_pos 39.0 +672 32 training.batch_size 2.0 +672 33 model.embedding_dim 0.0 +672 33 optimizer.lr 0.04154966628176875 +672 33 negative_sampler.num_negs_per_pos 32.0 +672 33 training.batch_size 1.0 +672 34 model.embedding_dim 2.0 +672 34 optimizer.lr 0.001665880613790508 +672 34 negative_sampler.num_negs_per_pos 29.0 +672 34 training.batch_size 0.0 +672 35 model.embedding_dim 2.0 +672 35 optimizer.lr 0.01548903904355482 +672 35 negative_sampler.num_negs_per_pos 43.0 +672 35 training.batch_size 1.0 +672 36 model.embedding_dim 1.0 +672 36 optimizer.lr 0.0015180847400117728 +672 36 negative_sampler.num_negs_per_pos 82.0 +672 36 training.batch_size 2.0 +672 37 model.embedding_dim 1.0 +672 37 optimizer.lr 0.014428650360750686 +672 37 negative_sampler.num_negs_per_pos 15.0 +672 37 training.batch_size 0.0 +672 38 model.embedding_dim 2.0 +672 38 optimizer.lr 0.002365146379993325 +672 38 negative_sampler.num_negs_per_pos 21.0 +672 38 training.batch_size 0.0 +672 39 model.embedding_dim 1.0 +672 39 optimizer.lr 0.018403233196510316 +672 39 negative_sampler.num_negs_per_pos 89.0 +672 39 training.batch_size 0.0 +672 40 model.embedding_dim 1.0 +672 40 optimizer.lr 0.023860069021662655 +672 40 negative_sampler.num_negs_per_pos 59.0 +672 40 training.batch_size 1.0 +672 41 model.embedding_dim 0.0 +672 41 optimizer.lr 0.008302099447157575 +672 41 negative_sampler.num_negs_per_pos 46.0 +672 41 training.batch_size 2.0 +672 42 model.embedding_dim 0.0 +672 42 optimizer.lr 0.0011739840532091995 +672 42 negative_sampler.num_negs_per_pos 15.0 +672 42 training.batch_size 0.0 +672 43 model.embedding_dim 1.0 +672 43 optimizer.lr 0.0013241581409617865 +672 43 negative_sampler.num_negs_per_pos 33.0 +672 43 training.batch_size 2.0 +672 44 model.embedding_dim 1.0 +672 44 optimizer.lr 0.05564452233169282 +672 44 negative_sampler.num_negs_per_pos 83.0 +672 44 training.batch_size 0.0 +672 45 model.embedding_dim 1.0 +672 45 optimizer.lr 0.08887745851331347 +672 45 negative_sampler.num_negs_per_pos 76.0 +672 45 training.batch_size 0.0 +672 46 model.embedding_dim 0.0 +672 46 optimizer.lr 0.07058115972545519 +672 46 negative_sampler.num_negs_per_pos 68.0 +672 46 training.batch_size 1.0 +672 47 model.embedding_dim 2.0 +672 47 optimizer.lr 0.01400594940787426 +672 47 negative_sampler.num_negs_per_pos 96.0 +672 47 training.batch_size 2.0 +672 48 model.embedding_dim 1.0 +672 48 optimizer.lr 0.03624107011110422 +672 48 negative_sampler.num_negs_per_pos 70.0 +672 48 training.batch_size 0.0 +672 49 model.embedding_dim 0.0 +672 49 optimizer.lr 0.0016800158619707 +672 49 negative_sampler.num_negs_per_pos 96.0 +672 49 training.batch_size 2.0 +672 50 model.embedding_dim 0.0 +672 50 optimizer.lr 0.015923036532204806 +672 50 negative_sampler.num_negs_per_pos 0.0 +672 50 training.batch_size 2.0 +672 51 model.embedding_dim 2.0 +672 51 optimizer.lr 0.002302220267022791 +672 51 negative_sampler.num_negs_per_pos 19.0 +672 51 training.batch_size 0.0 +672 52 model.embedding_dim 1.0 +672 52 optimizer.lr 0.04650875074343501 +672 52 negative_sampler.num_negs_per_pos 20.0 +672 52 training.batch_size 0.0 +672 53 model.embedding_dim 1.0 +672 53 optimizer.lr 0.003965946291040464 +672 53 negative_sampler.num_negs_per_pos 5.0 +672 53 training.batch_size 0.0 +672 54 model.embedding_dim 2.0 +672 54 optimizer.lr 0.00814197672726852 +672 54 negative_sampler.num_negs_per_pos 54.0 +672 54 training.batch_size 2.0 +672 55 model.embedding_dim 2.0 +672 55 optimizer.lr 0.08093137560184195 +672 55 negative_sampler.num_negs_per_pos 67.0 +672 55 training.batch_size 0.0 +672 56 model.embedding_dim 1.0 +672 56 optimizer.lr 0.04677129234228507 +672 56 negative_sampler.num_negs_per_pos 41.0 +672 56 training.batch_size 1.0 +672 57 model.embedding_dim 2.0 +672 57 optimizer.lr 0.004076879261840909 +672 57 negative_sampler.num_negs_per_pos 23.0 +672 57 training.batch_size 0.0 +672 58 model.embedding_dim 2.0 +672 58 optimizer.lr 0.021408227984966163 +672 58 negative_sampler.num_negs_per_pos 4.0 +672 58 training.batch_size 1.0 +672 59 model.embedding_dim 2.0 +672 59 optimizer.lr 0.003885828074271966 +672 59 negative_sampler.num_negs_per_pos 36.0 +672 59 training.batch_size 2.0 +672 60 model.embedding_dim 1.0 +672 60 optimizer.lr 0.03442471740894696 +672 60 negative_sampler.num_negs_per_pos 64.0 +672 60 training.batch_size 1.0 +672 61 model.embedding_dim 0.0 +672 61 optimizer.lr 0.04296699221462842 +672 61 negative_sampler.num_negs_per_pos 16.0 +672 61 training.batch_size 1.0 +672 62 model.embedding_dim 0.0 +672 62 optimizer.lr 0.011691759954884182 +672 62 negative_sampler.num_negs_per_pos 32.0 +672 62 training.batch_size 2.0 +672 63 model.embedding_dim 2.0 +672 63 optimizer.lr 0.031542993647614204 +672 63 negative_sampler.num_negs_per_pos 21.0 +672 63 training.batch_size 2.0 +672 64 model.embedding_dim 1.0 +672 64 optimizer.lr 0.019657444492303108 +672 64 negative_sampler.num_negs_per_pos 94.0 +672 64 training.batch_size 2.0 +672 65 model.embedding_dim 2.0 +672 65 optimizer.lr 0.004750114061116922 +672 65 negative_sampler.num_negs_per_pos 80.0 +672 65 training.batch_size 0.0 +672 66 model.embedding_dim 1.0 +672 66 optimizer.lr 0.05114055557493518 +672 66 negative_sampler.num_negs_per_pos 63.0 +672 66 training.batch_size 0.0 +672 67 model.embedding_dim 0.0 +672 67 optimizer.lr 0.0032922021976000294 +672 67 negative_sampler.num_negs_per_pos 81.0 +672 67 training.batch_size 1.0 +672 68 model.embedding_dim 1.0 +672 68 optimizer.lr 0.002183184735863121 +672 68 negative_sampler.num_negs_per_pos 85.0 +672 68 training.batch_size 2.0 +672 69 model.embedding_dim 1.0 +672 69 optimizer.lr 0.0017045362152972717 +672 69 negative_sampler.num_negs_per_pos 18.0 +672 69 training.batch_size 1.0 +672 70 model.embedding_dim 1.0 +672 70 optimizer.lr 0.05787028222961666 +672 70 negative_sampler.num_negs_per_pos 86.0 +672 70 training.batch_size 2.0 +672 71 model.embedding_dim 0.0 +672 71 optimizer.lr 0.002606064735096244 +672 71 negative_sampler.num_negs_per_pos 51.0 +672 71 training.batch_size 2.0 +672 72 model.embedding_dim 2.0 +672 72 optimizer.lr 0.006348914273368517 +672 72 negative_sampler.num_negs_per_pos 10.0 +672 72 training.batch_size 1.0 +672 73 model.embedding_dim 1.0 +672 73 optimizer.lr 0.0622790438940288 +672 73 negative_sampler.num_negs_per_pos 19.0 +672 73 training.batch_size 1.0 +672 74 model.embedding_dim 1.0 +672 74 optimizer.lr 0.07613745215821253 +672 74 negative_sampler.num_negs_per_pos 87.0 +672 74 training.batch_size 0.0 +672 75 model.embedding_dim 0.0 +672 75 optimizer.lr 0.001456326420102526 +672 75 negative_sampler.num_negs_per_pos 98.0 +672 75 training.batch_size 0.0 +672 76 model.embedding_dim 0.0 +672 76 optimizer.lr 0.006836382732447582 +672 76 negative_sampler.num_negs_per_pos 88.0 +672 76 training.batch_size 0.0 +672 77 model.embedding_dim 2.0 +672 77 optimizer.lr 0.009912593126516912 +672 77 negative_sampler.num_negs_per_pos 56.0 +672 77 training.batch_size 2.0 +672 78 model.embedding_dim 2.0 +672 78 optimizer.lr 0.009465933827263805 +672 78 negative_sampler.num_negs_per_pos 18.0 +672 78 training.batch_size 1.0 +672 79 model.embedding_dim 0.0 +672 79 optimizer.lr 0.037905689331372 +672 79 negative_sampler.num_negs_per_pos 18.0 +672 79 training.batch_size 0.0 +672 80 model.embedding_dim 0.0 +672 80 optimizer.lr 0.0380067588325102 +672 80 negative_sampler.num_negs_per_pos 83.0 +672 80 training.batch_size 2.0 +672 81 model.embedding_dim 2.0 +672 81 optimizer.lr 0.0014398094722489225 +672 81 negative_sampler.num_negs_per_pos 53.0 +672 81 training.batch_size 0.0 +672 82 model.embedding_dim 1.0 +672 82 optimizer.lr 0.007705896796338173 +672 82 negative_sampler.num_negs_per_pos 33.0 +672 82 training.batch_size 2.0 +672 83 model.embedding_dim 0.0 +672 83 optimizer.lr 0.004248691081407384 +672 83 negative_sampler.num_negs_per_pos 99.0 +672 83 training.batch_size 1.0 +672 84 model.embedding_dim 0.0 +672 84 optimizer.lr 0.014767068460740456 +672 84 negative_sampler.num_negs_per_pos 62.0 +672 84 training.batch_size 1.0 +672 85 model.embedding_dim 2.0 +672 85 optimizer.lr 0.00543908964541781 +672 85 negative_sampler.num_negs_per_pos 15.0 +672 85 training.batch_size 2.0 +672 86 model.embedding_dim 0.0 +672 86 optimizer.lr 0.007334890392594617 +672 86 negative_sampler.num_negs_per_pos 24.0 +672 86 training.batch_size 1.0 +672 87 model.embedding_dim 1.0 +672 87 optimizer.lr 0.01082678393006801 +672 87 negative_sampler.num_negs_per_pos 62.0 +672 87 training.batch_size 2.0 +672 88 model.embedding_dim 0.0 +672 88 optimizer.lr 0.01583971252883993 +672 88 negative_sampler.num_negs_per_pos 70.0 +672 88 training.batch_size 1.0 +672 89 model.embedding_dim 2.0 +672 89 optimizer.lr 0.0016639301498155895 +672 89 negative_sampler.num_negs_per_pos 7.0 +672 89 training.batch_size 0.0 +672 90 model.embedding_dim 2.0 +672 90 optimizer.lr 0.06092788897607775 +672 90 negative_sampler.num_negs_per_pos 35.0 +672 90 training.batch_size 0.0 +672 91 model.embedding_dim 2.0 +672 91 optimizer.lr 0.0027740370942848733 +672 91 negative_sampler.num_negs_per_pos 86.0 +672 91 training.batch_size 0.0 +672 92 model.embedding_dim 2.0 +672 92 optimizer.lr 0.006464033890803899 +672 92 negative_sampler.num_negs_per_pos 2.0 +672 92 training.batch_size 0.0 +672 93 model.embedding_dim 2.0 +672 93 optimizer.lr 0.0012189443739902904 +672 93 negative_sampler.num_negs_per_pos 85.0 +672 93 training.batch_size 2.0 +672 94 model.embedding_dim 1.0 +672 94 optimizer.lr 0.0761610006929221 +672 94 negative_sampler.num_negs_per_pos 88.0 +672 94 training.batch_size 0.0 +672 95 model.embedding_dim 2.0 +672 95 optimizer.lr 0.0169241607365587 +672 95 negative_sampler.num_negs_per_pos 85.0 +672 95 training.batch_size 0.0 +672 96 model.embedding_dim 0.0 +672 96 optimizer.lr 0.03829562734064787 +672 96 negative_sampler.num_negs_per_pos 13.0 +672 96 training.batch_size 1.0 +672 97 model.embedding_dim 0.0 +672 97 optimizer.lr 0.00853455742622022 +672 97 negative_sampler.num_negs_per_pos 75.0 +672 97 training.batch_size 1.0 +672 98 model.embedding_dim 0.0 +672 98 optimizer.lr 0.0010422092651097964 +672 98 negative_sampler.num_negs_per_pos 42.0 +672 98 training.batch_size 2.0 +672 99 model.embedding_dim 0.0 +672 99 optimizer.lr 0.002273410873213304 +672 99 negative_sampler.num_negs_per_pos 54.0 +672 99 training.batch_size 1.0 +672 100 model.embedding_dim 1.0 +672 100 optimizer.lr 0.06702935093169425 +672 100 negative_sampler.num_negs_per_pos 90.0 +672 100 training.batch_size 0.0 +672 1 dataset """kinships""" +672 1 model """simple""" +672 1 loss """softplus""" +672 1 regularizer """no""" +672 1 optimizer """adam""" +672 1 training_loop """owa""" +672 1 negative_sampler """basic""" +672 1 evaluator """rankbased""" +672 2 dataset """kinships""" +672 2 model """simple""" +672 2 loss """softplus""" +672 2 regularizer """no""" +672 2 optimizer """adam""" +672 2 training_loop """owa""" +672 2 negative_sampler """basic""" +672 2 evaluator """rankbased""" +672 3 dataset """kinships""" +672 3 model """simple""" +672 3 loss """softplus""" +672 3 regularizer """no""" +672 3 optimizer """adam""" +672 3 training_loop """owa""" +672 3 negative_sampler """basic""" +672 3 evaluator """rankbased""" +672 4 dataset """kinships""" +672 4 model """simple""" +672 4 loss """softplus""" +672 4 regularizer """no""" +672 4 optimizer """adam""" +672 4 training_loop """owa""" +672 4 negative_sampler """basic""" +672 4 evaluator """rankbased""" +672 5 dataset """kinships""" +672 5 model """simple""" +672 5 loss """softplus""" +672 5 regularizer """no""" +672 5 optimizer """adam""" +672 5 training_loop """owa""" +672 5 negative_sampler """basic""" +672 5 evaluator """rankbased""" +672 6 dataset """kinships""" +672 6 model """simple""" +672 6 loss """softplus""" +672 6 regularizer """no""" +672 6 optimizer """adam""" +672 6 training_loop """owa""" +672 6 negative_sampler """basic""" +672 6 evaluator """rankbased""" +672 7 dataset """kinships""" +672 7 model """simple""" +672 7 loss """softplus""" +672 7 regularizer """no""" +672 7 optimizer """adam""" +672 7 training_loop """owa""" +672 7 negative_sampler """basic""" +672 7 evaluator """rankbased""" +672 8 dataset """kinships""" +672 8 model """simple""" +672 8 loss """softplus""" +672 8 regularizer """no""" +672 8 optimizer """adam""" +672 8 training_loop """owa""" +672 8 negative_sampler """basic""" +672 8 evaluator """rankbased""" +672 9 dataset """kinships""" +672 9 model """simple""" +672 9 loss """softplus""" +672 9 regularizer """no""" +672 9 optimizer """adam""" +672 9 training_loop """owa""" +672 9 negative_sampler """basic""" +672 9 evaluator """rankbased""" +672 10 dataset """kinships""" +672 10 model """simple""" +672 10 loss """softplus""" +672 10 regularizer """no""" +672 10 optimizer """adam""" +672 10 training_loop """owa""" +672 10 negative_sampler """basic""" +672 10 evaluator """rankbased""" +672 11 dataset """kinships""" +672 11 model """simple""" +672 11 loss """softplus""" +672 11 regularizer """no""" +672 11 optimizer """adam""" +672 11 training_loop """owa""" +672 11 negative_sampler """basic""" +672 11 evaluator """rankbased""" +672 12 dataset """kinships""" +672 12 model """simple""" +672 12 loss """softplus""" +672 12 regularizer """no""" +672 12 optimizer """adam""" +672 12 training_loop """owa""" +672 12 negative_sampler """basic""" +672 12 evaluator """rankbased""" +672 13 dataset """kinships""" +672 13 model """simple""" +672 13 loss """softplus""" +672 13 regularizer """no""" +672 13 optimizer """adam""" +672 13 training_loop """owa""" +672 13 negative_sampler """basic""" +672 13 evaluator """rankbased""" +672 14 dataset """kinships""" +672 14 model """simple""" +672 14 loss """softplus""" +672 14 regularizer """no""" +672 14 optimizer """adam""" +672 14 training_loop """owa""" +672 14 negative_sampler """basic""" +672 14 evaluator """rankbased""" +672 15 dataset """kinships""" +672 15 model """simple""" +672 15 loss """softplus""" +672 15 regularizer """no""" +672 15 optimizer """adam""" +672 15 training_loop """owa""" +672 15 negative_sampler """basic""" +672 15 evaluator """rankbased""" +672 16 dataset """kinships""" +672 16 model """simple""" +672 16 loss """softplus""" +672 16 regularizer """no""" +672 16 optimizer """adam""" +672 16 training_loop """owa""" +672 16 negative_sampler """basic""" +672 16 evaluator """rankbased""" +672 17 dataset """kinships""" +672 17 model """simple""" +672 17 loss """softplus""" +672 17 regularizer """no""" +672 17 optimizer """adam""" +672 17 training_loop """owa""" +672 17 negative_sampler """basic""" +672 17 evaluator """rankbased""" +672 18 dataset """kinships""" +672 18 model """simple""" +672 18 loss """softplus""" +672 18 regularizer """no""" +672 18 optimizer """adam""" +672 18 training_loop """owa""" +672 18 negative_sampler """basic""" +672 18 evaluator """rankbased""" +672 19 dataset """kinships""" +672 19 model """simple""" +672 19 loss """softplus""" +672 19 regularizer """no""" +672 19 optimizer """adam""" +672 19 training_loop """owa""" +672 19 negative_sampler """basic""" +672 19 evaluator """rankbased""" +672 20 dataset """kinships""" +672 20 model """simple""" +672 20 loss """softplus""" +672 20 regularizer """no""" +672 20 optimizer """adam""" +672 20 training_loop """owa""" +672 20 negative_sampler """basic""" +672 20 evaluator """rankbased""" +672 21 dataset """kinships""" +672 21 model """simple""" +672 21 loss """softplus""" +672 21 regularizer """no""" +672 21 optimizer """adam""" +672 21 training_loop """owa""" +672 21 negative_sampler """basic""" +672 21 evaluator """rankbased""" +672 22 dataset """kinships""" +672 22 model """simple""" +672 22 loss """softplus""" +672 22 regularizer """no""" +672 22 optimizer """adam""" +672 22 training_loop """owa""" +672 22 negative_sampler """basic""" +672 22 evaluator """rankbased""" +672 23 dataset """kinships""" +672 23 model """simple""" +672 23 loss """softplus""" +672 23 regularizer """no""" +672 23 optimizer """adam""" +672 23 training_loop """owa""" +672 23 negative_sampler """basic""" +672 23 evaluator """rankbased""" +672 24 dataset """kinships""" +672 24 model """simple""" +672 24 loss """softplus""" +672 24 regularizer """no""" +672 24 optimizer """adam""" +672 24 training_loop """owa""" +672 24 negative_sampler """basic""" +672 24 evaluator """rankbased""" +672 25 dataset """kinships""" +672 25 model """simple""" +672 25 loss """softplus""" +672 25 regularizer """no""" +672 25 optimizer """adam""" +672 25 training_loop """owa""" +672 25 negative_sampler """basic""" +672 25 evaluator """rankbased""" +672 26 dataset """kinships""" +672 26 model """simple""" +672 26 loss """softplus""" +672 26 regularizer """no""" +672 26 optimizer """adam""" +672 26 training_loop """owa""" +672 26 negative_sampler """basic""" +672 26 evaluator """rankbased""" +672 27 dataset """kinships""" +672 27 model """simple""" +672 27 loss """softplus""" +672 27 regularizer """no""" +672 27 optimizer """adam""" +672 27 training_loop """owa""" +672 27 negative_sampler """basic""" +672 27 evaluator """rankbased""" +672 28 dataset """kinships""" +672 28 model """simple""" +672 28 loss """softplus""" +672 28 regularizer """no""" +672 28 optimizer """adam""" +672 28 training_loop """owa""" +672 28 negative_sampler """basic""" +672 28 evaluator """rankbased""" +672 29 dataset """kinships""" +672 29 model """simple""" +672 29 loss """softplus""" +672 29 regularizer """no""" +672 29 optimizer """adam""" +672 29 training_loop """owa""" +672 29 negative_sampler """basic""" +672 29 evaluator """rankbased""" +672 30 dataset """kinships""" +672 30 model """simple""" +672 30 loss """softplus""" +672 30 regularizer """no""" +672 30 optimizer """adam""" +672 30 training_loop """owa""" +672 30 negative_sampler """basic""" +672 30 evaluator """rankbased""" +672 31 dataset """kinships""" +672 31 model """simple""" +672 31 loss """softplus""" +672 31 regularizer """no""" +672 31 optimizer """adam""" +672 31 training_loop """owa""" +672 31 negative_sampler """basic""" +672 31 evaluator """rankbased""" +672 32 dataset """kinships""" +672 32 model """simple""" +672 32 loss """softplus""" +672 32 regularizer """no""" +672 32 optimizer """adam""" +672 32 training_loop """owa""" +672 32 negative_sampler """basic""" +672 32 evaluator """rankbased""" +672 33 dataset """kinships""" +672 33 model """simple""" +672 33 loss """softplus""" +672 33 regularizer """no""" +672 33 optimizer """adam""" +672 33 training_loop """owa""" +672 33 negative_sampler """basic""" +672 33 evaluator """rankbased""" +672 34 dataset """kinships""" +672 34 model """simple""" +672 34 loss """softplus""" +672 34 regularizer """no""" +672 34 optimizer """adam""" +672 34 training_loop """owa""" +672 34 negative_sampler """basic""" +672 34 evaluator """rankbased""" +672 35 dataset """kinships""" +672 35 model """simple""" +672 35 loss """softplus""" +672 35 regularizer """no""" +672 35 optimizer """adam""" +672 35 training_loop """owa""" +672 35 negative_sampler """basic""" +672 35 evaluator """rankbased""" +672 36 dataset """kinships""" +672 36 model """simple""" +672 36 loss """softplus""" +672 36 regularizer """no""" +672 36 optimizer """adam""" +672 36 training_loop """owa""" +672 36 negative_sampler """basic""" +672 36 evaluator """rankbased""" +672 37 dataset """kinships""" +672 37 model """simple""" +672 37 loss """softplus""" +672 37 regularizer """no""" +672 37 optimizer """adam""" +672 37 training_loop """owa""" +672 37 negative_sampler """basic""" +672 37 evaluator """rankbased""" +672 38 dataset """kinships""" +672 38 model """simple""" +672 38 loss """softplus""" +672 38 regularizer """no""" +672 38 optimizer """adam""" +672 38 training_loop """owa""" +672 38 negative_sampler """basic""" +672 38 evaluator """rankbased""" +672 39 dataset """kinships""" +672 39 model """simple""" +672 39 loss """softplus""" +672 39 regularizer """no""" +672 39 optimizer """adam""" +672 39 training_loop """owa""" +672 39 negative_sampler """basic""" +672 39 evaluator """rankbased""" +672 40 dataset """kinships""" +672 40 model """simple""" +672 40 loss """softplus""" +672 40 regularizer """no""" +672 40 optimizer """adam""" +672 40 training_loop """owa""" +672 40 negative_sampler """basic""" +672 40 evaluator """rankbased""" +672 41 dataset """kinships""" +672 41 model """simple""" +672 41 loss """softplus""" +672 41 regularizer """no""" +672 41 optimizer """adam""" +672 41 training_loop """owa""" +672 41 negative_sampler """basic""" +672 41 evaluator """rankbased""" +672 42 dataset """kinships""" +672 42 model """simple""" +672 42 loss """softplus""" +672 42 regularizer """no""" +672 42 optimizer """adam""" +672 42 training_loop """owa""" +672 42 negative_sampler """basic""" +672 42 evaluator """rankbased""" +672 43 dataset """kinships""" +672 43 model """simple""" +672 43 loss """softplus""" +672 43 regularizer """no""" +672 43 optimizer """adam""" +672 43 training_loop """owa""" +672 43 negative_sampler """basic""" +672 43 evaluator """rankbased""" +672 44 dataset """kinships""" +672 44 model """simple""" +672 44 loss """softplus""" +672 44 regularizer """no""" +672 44 optimizer """adam""" +672 44 training_loop """owa""" +672 44 negative_sampler """basic""" +672 44 evaluator """rankbased""" +672 45 dataset """kinships""" +672 45 model """simple""" +672 45 loss """softplus""" +672 45 regularizer """no""" +672 45 optimizer """adam""" +672 45 training_loop """owa""" +672 45 negative_sampler """basic""" +672 45 evaluator """rankbased""" +672 46 dataset """kinships""" +672 46 model """simple""" +672 46 loss """softplus""" +672 46 regularizer """no""" +672 46 optimizer """adam""" +672 46 training_loop """owa""" +672 46 negative_sampler """basic""" +672 46 evaluator """rankbased""" +672 47 dataset """kinships""" +672 47 model """simple""" +672 47 loss """softplus""" +672 47 regularizer """no""" +672 47 optimizer """adam""" +672 47 training_loop """owa""" +672 47 negative_sampler """basic""" +672 47 evaluator """rankbased""" +672 48 dataset """kinships""" +672 48 model """simple""" +672 48 loss """softplus""" +672 48 regularizer """no""" +672 48 optimizer """adam""" +672 48 training_loop """owa""" +672 48 negative_sampler """basic""" +672 48 evaluator """rankbased""" +672 49 dataset """kinships""" +672 49 model """simple""" +672 49 loss """softplus""" +672 49 regularizer """no""" +672 49 optimizer """adam""" +672 49 training_loop """owa""" +672 49 negative_sampler """basic""" +672 49 evaluator """rankbased""" +672 50 dataset """kinships""" +672 50 model """simple""" +672 50 loss """softplus""" +672 50 regularizer """no""" +672 50 optimizer """adam""" +672 50 training_loop """owa""" +672 50 negative_sampler """basic""" +672 50 evaluator """rankbased""" +672 51 dataset """kinships""" +672 51 model """simple""" +672 51 loss """softplus""" +672 51 regularizer """no""" +672 51 optimizer """adam""" +672 51 training_loop """owa""" +672 51 negative_sampler """basic""" +672 51 evaluator """rankbased""" +672 52 dataset """kinships""" +672 52 model """simple""" +672 52 loss """softplus""" +672 52 regularizer """no""" +672 52 optimizer """adam""" +672 52 training_loop """owa""" +672 52 negative_sampler """basic""" +672 52 evaluator """rankbased""" +672 53 dataset """kinships""" +672 53 model """simple""" +672 53 loss """softplus""" +672 53 regularizer """no""" +672 53 optimizer """adam""" +672 53 training_loop """owa""" +672 53 negative_sampler """basic""" +672 53 evaluator """rankbased""" +672 54 dataset """kinships""" +672 54 model """simple""" +672 54 loss """softplus""" +672 54 regularizer """no""" +672 54 optimizer """adam""" +672 54 training_loop """owa""" +672 54 negative_sampler """basic""" +672 54 evaluator """rankbased""" +672 55 dataset """kinships""" +672 55 model """simple""" +672 55 loss """softplus""" +672 55 regularizer """no""" +672 55 optimizer """adam""" +672 55 training_loop """owa""" +672 55 negative_sampler """basic""" +672 55 evaluator """rankbased""" +672 56 dataset """kinships""" +672 56 model """simple""" +672 56 loss """softplus""" +672 56 regularizer """no""" +672 56 optimizer """adam""" +672 56 training_loop """owa""" +672 56 negative_sampler """basic""" +672 56 evaluator """rankbased""" +672 57 dataset """kinships""" +672 57 model """simple""" +672 57 loss """softplus""" +672 57 regularizer """no""" +672 57 optimizer """adam""" +672 57 training_loop """owa""" +672 57 negative_sampler """basic""" +672 57 evaluator """rankbased""" +672 58 dataset """kinships""" +672 58 model """simple""" +672 58 loss """softplus""" +672 58 regularizer """no""" +672 58 optimizer """adam""" +672 58 training_loop """owa""" +672 58 negative_sampler """basic""" +672 58 evaluator """rankbased""" +672 59 dataset """kinships""" +672 59 model """simple""" +672 59 loss """softplus""" +672 59 regularizer """no""" +672 59 optimizer """adam""" +672 59 training_loop """owa""" +672 59 negative_sampler """basic""" +672 59 evaluator """rankbased""" +672 60 dataset """kinships""" +672 60 model """simple""" +672 60 loss """softplus""" +672 60 regularizer """no""" +672 60 optimizer """adam""" +672 60 training_loop """owa""" +672 60 negative_sampler """basic""" +672 60 evaluator """rankbased""" +672 61 dataset """kinships""" +672 61 model """simple""" +672 61 loss """softplus""" +672 61 regularizer """no""" +672 61 optimizer """adam""" +672 61 training_loop """owa""" +672 61 negative_sampler """basic""" +672 61 evaluator """rankbased""" +672 62 dataset """kinships""" +672 62 model """simple""" +672 62 loss """softplus""" +672 62 regularizer """no""" +672 62 optimizer """adam""" +672 62 training_loop """owa""" +672 62 negative_sampler """basic""" +672 62 evaluator """rankbased""" +672 63 dataset """kinships""" +672 63 model """simple""" +672 63 loss """softplus""" +672 63 regularizer """no""" +672 63 optimizer """adam""" +672 63 training_loop """owa""" +672 63 negative_sampler """basic""" +672 63 evaluator """rankbased""" +672 64 dataset """kinships""" +672 64 model """simple""" +672 64 loss """softplus""" +672 64 regularizer """no""" +672 64 optimizer """adam""" +672 64 training_loop """owa""" +672 64 negative_sampler """basic""" +672 64 evaluator """rankbased""" +672 65 dataset """kinships""" +672 65 model """simple""" +672 65 loss """softplus""" +672 65 regularizer """no""" +672 65 optimizer """adam""" +672 65 training_loop """owa""" +672 65 negative_sampler """basic""" +672 65 evaluator """rankbased""" +672 66 dataset """kinships""" +672 66 model """simple""" +672 66 loss """softplus""" +672 66 regularizer """no""" +672 66 optimizer """adam""" +672 66 training_loop """owa""" +672 66 negative_sampler """basic""" +672 66 evaluator """rankbased""" +672 67 dataset """kinships""" +672 67 model """simple""" +672 67 loss """softplus""" +672 67 regularizer """no""" +672 67 optimizer """adam""" +672 67 training_loop """owa""" +672 67 negative_sampler """basic""" +672 67 evaluator """rankbased""" +672 68 dataset """kinships""" +672 68 model """simple""" +672 68 loss """softplus""" +672 68 regularizer """no""" +672 68 optimizer """adam""" +672 68 training_loop """owa""" +672 68 negative_sampler """basic""" +672 68 evaluator """rankbased""" +672 69 dataset """kinships""" +672 69 model """simple""" +672 69 loss """softplus""" +672 69 regularizer """no""" +672 69 optimizer """adam""" +672 69 training_loop """owa""" +672 69 negative_sampler """basic""" +672 69 evaluator """rankbased""" +672 70 dataset """kinships""" +672 70 model """simple""" +672 70 loss """softplus""" +672 70 regularizer """no""" +672 70 optimizer """adam""" +672 70 training_loop """owa""" +672 70 negative_sampler """basic""" +672 70 evaluator """rankbased""" +672 71 dataset """kinships""" +672 71 model """simple""" +672 71 loss """softplus""" +672 71 regularizer """no""" +672 71 optimizer """adam""" +672 71 training_loop """owa""" +672 71 negative_sampler """basic""" +672 71 evaluator """rankbased""" +672 72 dataset """kinships""" +672 72 model """simple""" +672 72 loss """softplus""" +672 72 regularizer """no""" +672 72 optimizer """adam""" +672 72 training_loop """owa""" +672 72 negative_sampler """basic""" +672 72 evaluator """rankbased""" +672 73 dataset """kinships""" +672 73 model """simple""" +672 73 loss """softplus""" +672 73 regularizer """no""" +672 73 optimizer """adam""" +672 73 training_loop """owa""" +672 73 negative_sampler """basic""" +672 73 evaluator """rankbased""" +672 74 dataset """kinships""" +672 74 model """simple""" +672 74 loss """softplus""" +672 74 regularizer """no""" +672 74 optimizer """adam""" +672 74 training_loop """owa""" +672 74 negative_sampler """basic""" +672 74 evaluator """rankbased""" +672 75 dataset """kinships""" +672 75 model """simple""" +672 75 loss """softplus""" +672 75 regularizer """no""" +672 75 optimizer """adam""" +672 75 training_loop """owa""" +672 75 negative_sampler """basic""" +672 75 evaluator """rankbased""" +672 76 dataset """kinships""" +672 76 model """simple""" +672 76 loss """softplus""" +672 76 regularizer """no""" +672 76 optimizer """adam""" +672 76 training_loop """owa""" +672 76 negative_sampler """basic""" +672 76 evaluator """rankbased""" +672 77 dataset """kinships""" +672 77 model """simple""" +672 77 loss """softplus""" +672 77 regularizer """no""" +672 77 optimizer """adam""" +672 77 training_loop """owa""" +672 77 negative_sampler """basic""" +672 77 evaluator """rankbased""" +672 78 dataset """kinships""" +672 78 model """simple""" +672 78 loss """softplus""" +672 78 regularizer """no""" +672 78 optimizer """adam""" +672 78 training_loop """owa""" +672 78 negative_sampler """basic""" +672 78 evaluator """rankbased""" +672 79 dataset """kinships""" +672 79 model """simple""" +672 79 loss """softplus""" +672 79 regularizer """no""" +672 79 optimizer """adam""" +672 79 training_loop """owa""" +672 79 negative_sampler """basic""" +672 79 evaluator """rankbased""" +672 80 dataset """kinships""" +672 80 model """simple""" +672 80 loss """softplus""" +672 80 regularizer """no""" +672 80 optimizer """adam""" +672 80 training_loop """owa""" +672 80 negative_sampler """basic""" +672 80 evaluator """rankbased""" +672 81 dataset """kinships""" +672 81 model """simple""" +672 81 loss """softplus""" +672 81 regularizer """no""" +672 81 optimizer """adam""" +672 81 training_loop """owa""" +672 81 negative_sampler """basic""" +672 81 evaluator """rankbased""" +672 82 dataset """kinships""" +672 82 model """simple""" +672 82 loss """softplus""" +672 82 regularizer """no""" +672 82 optimizer """adam""" +672 82 training_loop """owa""" +672 82 negative_sampler """basic""" +672 82 evaluator """rankbased""" +672 83 dataset """kinships""" +672 83 model """simple""" +672 83 loss """softplus""" +672 83 regularizer """no""" +672 83 optimizer """adam""" +672 83 training_loop """owa""" +672 83 negative_sampler """basic""" +672 83 evaluator """rankbased""" +672 84 dataset """kinships""" +672 84 model """simple""" +672 84 loss """softplus""" +672 84 regularizer """no""" +672 84 optimizer """adam""" +672 84 training_loop """owa""" +672 84 negative_sampler """basic""" +672 84 evaluator """rankbased""" +672 85 dataset """kinships""" +672 85 model """simple""" +672 85 loss """softplus""" +672 85 regularizer """no""" +672 85 optimizer """adam""" +672 85 training_loop """owa""" +672 85 negative_sampler """basic""" +672 85 evaluator """rankbased""" +672 86 dataset """kinships""" +672 86 model """simple""" +672 86 loss """softplus""" +672 86 regularizer """no""" +672 86 optimizer """adam""" +672 86 training_loop """owa""" +672 86 negative_sampler """basic""" +672 86 evaluator """rankbased""" +672 87 dataset """kinships""" +672 87 model """simple""" +672 87 loss """softplus""" +672 87 regularizer """no""" +672 87 optimizer """adam""" +672 87 training_loop """owa""" +672 87 negative_sampler """basic""" +672 87 evaluator """rankbased""" +672 88 dataset """kinships""" +672 88 model """simple""" +672 88 loss """softplus""" +672 88 regularizer """no""" +672 88 optimizer """adam""" +672 88 training_loop """owa""" +672 88 negative_sampler """basic""" +672 88 evaluator """rankbased""" +672 89 dataset """kinships""" +672 89 model """simple""" +672 89 loss """softplus""" +672 89 regularizer """no""" +672 89 optimizer """adam""" +672 89 training_loop """owa""" +672 89 negative_sampler """basic""" +672 89 evaluator """rankbased""" +672 90 dataset """kinships""" +672 90 model """simple""" +672 90 loss """softplus""" +672 90 regularizer """no""" +672 90 optimizer """adam""" +672 90 training_loop """owa""" +672 90 negative_sampler """basic""" +672 90 evaluator """rankbased""" +672 91 dataset """kinships""" +672 91 model """simple""" +672 91 loss """softplus""" +672 91 regularizer """no""" +672 91 optimizer """adam""" +672 91 training_loop """owa""" +672 91 negative_sampler """basic""" +672 91 evaluator """rankbased""" +672 92 dataset """kinships""" +672 92 model """simple""" +672 92 loss """softplus""" +672 92 regularizer """no""" +672 92 optimizer """adam""" +672 92 training_loop """owa""" +672 92 negative_sampler """basic""" +672 92 evaluator """rankbased""" +672 93 dataset """kinships""" +672 93 model """simple""" +672 93 loss """softplus""" +672 93 regularizer """no""" +672 93 optimizer """adam""" +672 93 training_loop """owa""" +672 93 negative_sampler """basic""" +672 93 evaluator """rankbased""" +672 94 dataset """kinships""" +672 94 model """simple""" +672 94 loss """softplus""" +672 94 regularizer """no""" +672 94 optimizer """adam""" +672 94 training_loop """owa""" +672 94 negative_sampler """basic""" +672 94 evaluator """rankbased""" +672 95 dataset """kinships""" +672 95 model """simple""" +672 95 loss """softplus""" +672 95 regularizer """no""" +672 95 optimizer """adam""" +672 95 training_loop """owa""" +672 95 negative_sampler """basic""" +672 95 evaluator """rankbased""" +672 96 dataset """kinships""" +672 96 model """simple""" +672 96 loss """softplus""" +672 96 regularizer """no""" +672 96 optimizer """adam""" +672 96 training_loop """owa""" +672 96 negative_sampler """basic""" +672 96 evaluator """rankbased""" +672 97 dataset """kinships""" +672 97 model """simple""" +672 97 loss """softplus""" +672 97 regularizer """no""" +672 97 optimizer """adam""" +672 97 training_loop """owa""" +672 97 negative_sampler """basic""" +672 97 evaluator """rankbased""" +672 98 dataset """kinships""" +672 98 model """simple""" +672 98 loss """softplus""" +672 98 regularizer """no""" +672 98 optimizer """adam""" +672 98 training_loop """owa""" +672 98 negative_sampler """basic""" +672 98 evaluator """rankbased""" +672 99 dataset """kinships""" +672 99 model """simple""" +672 99 loss """softplus""" +672 99 regularizer """no""" +672 99 optimizer """adam""" +672 99 training_loop """owa""" +672 99 negative_sampler """basic""" +672 99 evaluator """rankbased""" +672 100 dataset """kinships""" +672 100 model """simple""" +672 100 loss """softplus""" +672 100 regularizer """no""" +672 100 optimizer """adam""" +672 100 training_loop """owa""" +672 100 negative_sampler """basic""" +672 100 evaluator """rankbased""" +673 1 model.embedding_dim 1.0 +673 1 optimizer.lr 0.04719877579121214 +673 1 negative_sampler.num_negs_per_pos 5.0 +673 1 training.batch_size 1.0 +673 2 model.embedding_dim 2.0 +673 2 optimizer.lr 0.0027485915794545003 +673 2 negative_sampler.num_negs_per_pos 33.0 +673 2 training.batch_size 0.0 +673 3 model.embedding_dim 2.0 +673 3 optimizer.lr 0.019385459941871696 +673 3 negative_sampler.num_negs_per_pos 76.0 +673 3 training.batch_size 0.0 +673 4 model.embedding_dim 1.0 +673 4 optimizer.lr 0.022347969899678726 +673 4 negative_sampler.num_negs_per_pos 36.0 +673 4 training.batch_size 0.0 +673 5 model.embedding_dim 1.0 +673 5 optimizer.lr 0.01594415146557416 +673 5 negative_sampler.num_negs_per_pos 80.0 +673 5 training.batch_size 1.0 +673 6 model.embedding_dim 2.0 +673 6 optimizer.lr 0.041323444423911046 +673 6 negative_sampler.num_negs_per_pos 56.0 +673 6 training.batch_size 0.0 +673 7 model.embedding_dim 1.0 +673 7 optimizer.lr 0.017572972701172185 +673 7 negative_sampler.num_negs_per_pos 71.0 +673 7 training.batch_size 2.0 +673 8 model.embedding_dim 2.0 +673 8 optimizer.lr 0.0031856848634403607 +673 8 negative_sampler.num_negs_per_pos 64.0 +673 8 training.batch_size 0.0 +673 9 model.embedding_dim 2.0 +673 9 optimizer.lr 0.0015621330948421294 +673 9 negative_sampler.num_negs_per_pos 14.0 +673 9 training.batch_size 2.0 +673 10 model.embedding_dim 2.0 +673 10 optimizer.lr 0.03386534093373858 +673 10 negative_sampler.num_negs_per_pos 37.0 +673 10 training.batch_size 0.0 +673 11 model.embedding_dim 0.0 +673 11 optimizer.lr 0.022916897573250668 +673 11 negative_sampler.num_negs_per_pos 8.0 +673 11 training.batch_size 1.0 +673 12 model.embedding_dim 1.0 +673 12 optimizer.lr 0.0018514552087320621 +673 12 negative_sampler.num_negs_per_pos 11.0 +673 12 training.batch_size 2.0 +673 13 model.embedding_dim 2.0 +673 13 optimizer.lr 0.0015960291902217018 +673 13 negative_sampler.num_negs_per_pos 81.0 +673 13 training.batch_size 0.0 +673 14 model.embedding_dim 1.0 +673 14 optimizer.lr 0.05265635520408956 +673 14 negative_sampler.num_negs_per_pos 32.0 +673 14 training.batch_size 0.0 +673 15 model.embedding_dim 2.0 +673 15 optimizer.lr 0.012319297875483227 +673 15 negative_sampler.num_negs_per_pos 80.0 +673 15 training.batch_size 1.0 +673 16 model.embedding_dim 0.0 +673 16 optimizer.lr 0.004201036777825754 +673 16 negative_sampler.num_negs_per_pos 71.0 +673 16 training.batch_size 1.0 +673 17 model.embedding_dim 0.0 +673 17 optimizer.lr 0.03410969812212525 +673 17 negative_sampler.num_negs_per_pos 53.0 +673 17 training.batch_size 2.0 +673 18 model.embedding_dim 1.0 +673 18 optimizer.lr 0.003296530272462188 +673 18 negative_sampler.num_negs_per_pos 12.0 +673 18 training.batch_size 0.0 +673 19 model.embedding_dim 0.0 +673 19 optimizer.lr 0.0022850943249811606 +673 19 negative_sampler.num_negs_per_pos 53.0 +673 19 training.batch_size 2.0 +673 20 model.embedding_dim 1.0 +673 20 optimizer.lr 0.00386339104026046 +673 20 negative_sampler.num_negs_per_pos 81.0 +673 20 training.batch_size 0.0 +673 21 model.embedding_dim 0.0 +673 21 optimizer.lr 0.006652680922701935 +673 21 negative_sampler.num_negs_per_pos 24.0 +673 21 training.batch_size 2.0 +673 22 model.embedding_dim 0.0 +673 22 optimizer.lr 0.08672941907851274 +673 22 negative_sampler.num_negs_per_pos 68.0 +673 22 training.batch_size 1.0 +673 23 model.embedding_dim 0.0 +673 23 optimizer.lr 0.002732402395414853 +673 23 negative_sampler.num_negs_per_pos 53.0 +673 23 training.batch_size 1.0 +673 24 model.embedding_dim 1.0 +673 24 optimizer.lr 0.05524076416600288 +673 24 negative_sampler.num_negs_per_pos 28.0 +673 24 training.batch_size 2.0 +673 25 model.embedding_dim 0.0 +673 25 optimizer.lr 0.0022842896932599582 +673 25 negative_sampler.num_negs_per_pos 14.0 +673 25 training.batch_size 1.0 +673 26 model.embedding_dim 2.0 +673 26 optimizer.lr 0.009175549476324579 +673 26 negative_sampler.num_negs_per_pos 42.0 +673 26 training.batch_size 0.0 +673 27 model.embedding_dim 1.0 +673 27 optimizer.lr 0.0024469806235280908 +673 27 negative_sampler.num_negs_per_pos 69.0 +673 27 training.batch_size 1.0 +673 28 model.embedding_dim 1.0 +673 28 optimizer.lr 0.0075162758389593475 +673 28 negative_sampler.num_negs_per_pos 32.0 +673 28 training.batch_size 2.0 +673 29 model.embedding_dim 0.0 +673 29 optimizer.lr 0.01038086707900089 +673 29 negative_sampler.num_negs_per_pos 12.0 +673 29 training.batch_size 2.0 +673 30 model.embedding_dim 1.0 +673 30 optimizer.lr 0.00897249810132406 +673 30 negative_sampler.num_negs_per_pos 60.0 +673 30 training.batch_size 2.0 +673 31 model.embedding_dim 2.0 +673 31 optimizer.lr 0.00313698169954805 +673 31 negative_sampler.num_negs_per_pos 1.0 +673 31 training.batch_size 1.0 +673 32 model.embedding_dim 2.0 +673 32 optimizer.lr 0.020921495014426084 +673 32 negative_sampler.num_negs_per_pos 16.0 +673 32 training.batch_size 2.0 +673 33 model.embedding_dim 1.0 +673 33 optimizer.lr 0.026835837744171892 +673 33 negative_sampler.num_negs_per_pos 93.0 +673 33 training.batch_size 0.0 +673 34 model.embedding_dim 0.0 +673 34 optimizer.lr 0.007614113948470509 +673 34 negative_sampler.num_negs_per_pos 84.0 +673 34 training.batch_size 2.0 +673 35 model.embedding_dim 0.0 +673 35 optimizer.lr 0.08967925916382916 +673 35 negative_sampler.num_negs_per_pos 98.0 +673 35 training.batch_size 0.0 +673 36 model.embedding_dim 2.0 +673 36 optimizer.lr 0.0685480517951849 +673 36 negative_sampler.num_negs_per_pos 27.0 +673 36 training.batch_size 0.0 +673 37 model.embedding_dim 0.0 +673 37 optimizer.lr 0.047165642705620715 +673 37 negative_sampler.num_negs_per_pos 20.0 +673 37 training.batch_size 2.0 +673 38 model.embedding_dim 1.0 +673 38 optimizer.lr 0.001976135188695115 +673 38 negative_sampler.num_negs_per_pos 15.0 +673 38 training.batch_size 1.0 +673 39 model.embedding_dim 0.0 +673 39 optimizer.lr 0.0547507009929072 +673 39 negative_sampler.num_negs_per_pos 61.0 +673 39 training.batch_size 2.0 +673 40 model.embedding_dim 1.0 +673 40 optimizer.lr 0.011405648029706348 +673 40 negative_sampler.num_negs_per_pos 74.0 +673 40 training.batch_size 1.0 +673 41 model.embedding_dim 0.0 +673 41 optimizer.lr 0.004444631571147813 +673 41 negative_sampler.num_negs_per_pos 5.0 +673 41 training.batch_size 2.0 +673 42 model.embedding_dim 2.0 +673 42 optimizer.lr 0.007108961205044918 +673 42 negative_sampler.num_negs_per_pos 7.0 +673 42 training.batch_size 2.0 +673 43 model.embedding_dim 1.0 +673 43 optimizer.lr 0.017405708928340428 +673 43 negative_sampler.num_negs_per_pos 35.0 +673 43 training.batch_size 2.0 +673 44 model.embedding_dim 2.0 +673 44 optimizer.lr 0.006377355737135729 +673 44 negative_sampler.num_negs_per_pos 90.0 +673 44 training.batch_size 1.0 +673 45 model.embedding_dim 1.0 +673 45 optimizer.lr 0.0012374009409696733 +673 45 negative_sampler.num_negs_per_pos 3.0 +673 45 training.batch_size 1.0 +673 46 model.embedding_dim 2.0 +673 46 optimizer.lr 0.004432021832327996 +673 46 negative_sampler.num_negs_per_pos 33.0 +673 46 training.batch_size 0.0 +673 47 model.embedding_dim 0.0 +673 47 optimizer.lr 0.017050434286825598 +673 47 negative_sampler.num_negs_per_pos 41.0 +673 47 training.batch_size 0.0 +673 48 model.embedding_dim 0.0 +673 48 optimizer.lr 0.06049601860210146 +673 48 negative_sampler.num_negs_per_pos 33.0 +673 48 training.batch_size 1.0 +673 49 model.embedding_dim 2.0 +673 49 optimizer.lr 0.002552772521025945 +673 49 negative_sampler.num_negs_per_pos 83.0 +673 49 training.batch_size 1.0 +673 50 model.embedding_dim 2.0 +673 50 optimizer.lr 0.002416857235003275 +673 50 negative_sampler.num_negs_per_pos 70.0 +673 50 training.batch_size 1.0 +673 51 model.embedding_dim 2.0 +673 51 optimizer.lr 0.04354618805579896 +673 51 negative_sampler.num_negs_per_pos 3.0 +673 51 training.batch_size 2.0 +673 52 model.embedding_dim 1.0 +673 52 optimizer.lr 0.00304489511807609 +673 52 negative_sampler.num_negs_per_pos 19.0 +673 52 training.batch_size 1.0 +673 53 model.embedding_dim 2.0 +673 53 optimizer.lr 0.0023969158747901213 +673 53 negative_sampler.num_negs_per_pos 7.0 +673 53 training.batch_size 1.0 +673 54 model.embedding_dim 0.0 +673 54 optimizer.lr 0.0013296171509855688 +673 54 negative_sampler.num_negs_per_pos 72.0 +673 54 training.batch_size 0.0 +673 55 model.embedding_dim 1.0 +673 55 optimizer.lr 0.04399709774882031 +673 55 negative_sampler.num_negs_per_pos 76.0 +673 55 training.batch_size 2.0 +673 56 model.embedding_dim 1.0 +673 56 optimizer.lr 0.003237166903596845 +673 56 negative_sampler.num_negs_per_pos 69.0 +673 56 training.batch_size 1.0 +673 57 model.embedding_dim 0.0 +673 57 optimizer.lr 0.058718903330123765 +673 57 negative_sampler.num_negs_per_pos 87.0 +673 57 training.batch_size 2.0 +673 58 model.embedding_dim 1.0 +673 58 optimizer.lr 0.027624212427914067 +673 58 negative_sampler.num_negs_per_pos 32.0 +673 58 training.batch_size 2.0 +673 59 model.embedding_dim 0.0 +673 59 optimizer.lr 0.07048295364456607 +673 59 negative_sampler.num_negs_per_pos 28.0 +673 59 training.batch_size 0.0 +673 60 model.embedding_dim 2.0 +673 60 optimizer.lr 0.0026926527738785635 +673 60 negative_sampler.num_negs_per_pos 41.0 +673 60 training.batch_size 0.0 +673 61 model.embedding_dim 2.0 +673 61 optimizer.lr 0.015366958005404044 +673 61 negative_sampler.num_negs_per_pos 73.0 +673 61 training.batch_size 0.0 +673 62 model.embedding_dim 2.0 +673 62 optimizer.lr 0.01162547913786311 +673 62 negative_sampler.num_negs_per_pos 65.0 +673 62 training.batch_size 1.0 +673 63 model.embedding_dim 0.0 +673 63 optimizer.lr 0.03528027077998858 +673 63 negative_sampler.num_negs_per_pos 33.0 +673 63 training.batch_size 2.0 +673 64 model.embedding_dim 1.0 +673 64 optimizer.lr 0.0021221926437145927 +673 64 negative_sampler.num_negs_per_pos 65.0 +673 64 training.batch_size 2.0 +673 65 model.embedding_dim 0.0 +673 65 optimizer.lr 0.0037108744503416098 +673 65 negative_sampler.num_negs_per_pos 68.0 +673 65 training.batch_size 0.0 +673 66 model.embedding_dim 2.0 +673 66 optimizer.lr 0.028206680894506574 +673 66 negative_sampler.num_negs_per_pos 12.0 +673 66 training.batch_size 2.0 +673 67 model.embedding_dim 1.0 +673 67 optimizer.lr 0.05965571759194226 +673 67 negative_sampler.num_negs_per_pos 78.0 +673 67 training.batch_size 0.0 +673 68 model.embedding_dim 2.0 +673 68 optimizer.lr 0.0015426201048726832 +673 68 negative_sampler.num_negs_per_pos 44.0 +673 68 training.batch_size 1.0 +673 69 model.embedding_dim 2.0 +673 69 optimizer.lr 0.014870115515162279 +673 69 negative_sampler.num_negs_per_pos 29.0 +673 69 training.batch_size 2.0 +673 70 model.embedding_dim 1.0 +673 70 optimizer.lr 0.001268324175913899 +673 70 negative_sampler.num_negs_per_pos 66.0 +673 70 training.batch_size 1.0 +673 71 model.embedding_dim 0.0 +673 71 optimizer.lr 0.0407317083735588 +673 71 negative_sampler.num_negs_per_pos 8.0 +673 71 training.batch_size 1.0 +673 72 model.embedding_dim 2.0 +673 72 optimizer.lr 0.0013551884129892877 +673 72 negative_sampler.num_negs_per_pos 24.0 +673 72 training.batch_size 2.0 +673 73 model.embedding_dim 1.0 +673 73 optimizer.lr 0.027593508863358845 +673 73 negative_sampler.num_negs_per_pos 31.0 +673 73 training.batch_size 1.0 +673 74 model.embedding_dim 0.0 +673 74 optimizer.lr 0.003966007264198642 +673 74 negative_sampler.num_negs_per_pos 89.0 +673 74 training.batch_size 0.0 +673 75 model.embedding_dim 2.0 +673 75 optimizer.lr 0.002681587126870392 +673 75 negative_sampler.num_negs_per_pos 78.0 +673 75 training.batch_size 2.0 +673 76 model.embedding_dim 2.0 +673 76 optimizer.lr 0.0773218504897268 +673 76 negative_sampler.num_negs_per_pos 0.0 +673 76 training.batch_size 2.0 +673 77 model.embedding_dim 0.0 +673 77 optimizer.lr 0.003111015519455095 +673 77 negative_sampler.num_negs_per_pos 90.0 +673 77 training.batch_size 0.0 +673 78 model.embedding_dim 2.0 +673 78 optimizer.lr 0.004791876883759081 +673 78 negative_sampler.num_negs_per_pos 0.0 +673 78 training.batch_size 2.0 +673 79 model.embedding_dim 0.0 +673 79 optimizer.lr 0.0012139458232897637 +673 79 negative_sampler.num_negs_per_pos 57.0 +673 79 training.batch_size 1.0 +673 80 model.embedding_dim 2.0 +673 80 optimizer.lr 0.07839252458037023 +673 80 negative_sampler.num_negs_per_pos 57.0 +673 80 training.batch_size 0.0 +673 81 model.embedding_dim 0.0 +673 81 optimizer.lr 0.00692375859810614 +673 81 negative_sampler.num_negs_per_pos 30.0 +673 81 training.batch_size 0.0 +673 82 model.embedding_dim 1.0 +673 82 optimizer.lr 0.004786992439778982 +673 82 negative_sampler.num_negs_per_pos 86.0 +673 82 training.batch_size 1.0 +673 83 model.embedding_dim 2.0 +673 83 optimizer.lr 0.046238895149182904 +673 83 negative_sampler.num_negs_per_pos 74.0 +673 83 training.batch_size 1.0 +673 84 model.embedding_dim 0.0 +673 84 optimizer.lr 0.005436222696846735 +673 84 negative_sampler.num_negs_per_pos 88.0 +673 84 training.batch_size 2.0 +673 85 model.embedding_dim 0.0 +673 85 optimizer.lr 0.012774155941866691 +673 85 negative_sampler.num_negs_per_pos 70.0 +673 85 training.batch_size 2.0 +673 86 model.embedding_dim 2.0 +673 86 optimizer.lr 0.0013347640852238187 +673 86 negative_sampler.num_negs_per_pos 85.0 +673 86 training.batch_size 0.0 +673 87 model.embedding_dim 0.0 +673 87 optimizer.lr 0.03461100649569418 +673 87 negative_sampler.num_negs_per_pos 57.0 +673 87 training.batch_size 1.0 +673 88 model.embedding_dim 2.0 +673 88 optimizer.lr 0.08342300427920067 +673 88 negative_sampler.num_negs_per_pos 25.0 +673 88 training.batch_size 1.0 +673 89 model.embedding_dim 2.0 +673 89 optimizer.lr 0.002142037078993238 +673 89 negative_sampler.num_negs_per_pos 19.0 +673 89 training.batch_size 0.0 +673 90 model.embedding_dim 1.0 +673 90 optimizer.lr 0.01624825117201213 +673 90 negative_sampler.num_negs_per_pos 18.0 +673 90 training.batch_size 0.0 +673 91 model.embedding_dim 0.0 +673 91 optimizer.lr 0.016238523950710786 +673 91 negative_sampler.num_negs_per_pos 13.0 +673 91 training.batch_size 0.0 +673 92 model.embedding_dim 0.0 +673 92 optimizer.lr 0.018083529933708983 +673 92 negative_sampler.num_negs_per_pos 32.0 +673 92 training.batch_size 2.0 +673 93 model.embedding_dim 1.0 +673 93 optimizer.lr 0.09071945496478054 +673 93 negative_sampler.num_negs_per_pos 96.0 +673 93 training.batch_size 2.0 +673 94 model.embedding_dim 1.0 +673 94 optimizer.lr 0.006190112050292636 +673 94 negative_sampler.num_negs_per_pos 0.0 +673 94 training.batch_size 2.0 +673 95 model.embedding_dim 1.0 +673 95 optimizer.lr 0.0013006971677948867 +673 95 negative_sampler.num_negs_per_pos 79.0 +673 95 training.batch_size 1.0 +673 96 model.embedding_dim 2.0 +673 96 optimizer.lr 0.01050513327146445 +673 96 negative_sampler.num_negs_per_pos 41.0 +673 96 training.batch_size 2.0 +673 97 model.embedding_dim 1.0 +673 97 optimizer.lr 0.002500192772233806 +673 97 negative_sampler.num_negs_per_pos 65.0 +673 97 training.batch_size 2.0 +673 98 model.embedding_dim 0.0 +673 98 optimizer.lr 0.012951067035632058 +673 98 negative_sampler.num_negs_per_pos 63.0 +673 98 training.batch_size 0.0 +673 99 model.embedding_dim 2.0 +673 99 optimizer.lr 0.002209247657206097 +673 99 negative_sampler.num_negs_per_pos 58.0 +673 99 training.batch_size 1.0 +673 100 model.embedding_dim 1.0 +673 100 optimizer.lr 0.0010658615701902912 +673 100 negative_sampler.num_negs_per_pos 63.0 +673 100 training.batch_size 0.0 +673 1 dataset """kinships""" +673 1 model """simple""" +673 1 loss """bceaftersigmoid""" +673 1 regularizer """no""" +673 1 optimizer """adam""" +673 1 training_loop """owa""" +673 1 negative_sampler """basic""" +673 1 evaluator """rankbased""" +673 2 dataset """kinships""" +673 2 model """simple""" +673 2 loss """bceaftersigmoid""" +673 2 regularizer """no""" +673 2 optimizer """adam""" +673 2 training_loop """owa""" +673 2 negative_sampler """basic""" +673 2 evaluator """rankbased""" +673 3 dataset """kinships""" +673 3 model """simple""" +673 3 loss """bceaftersigmoid""" +673 3 regularizer """no""" +673 3 optimizer """adam""" +673 3 training_loop """owa""" +673 3 negative_sampler """basic""" +673 3 evaluator """rankbased""" +673 4 dataset """kinships""" +673 4 model """simple""" +673 4 loss """bceaftersigmoid""" +673 4 regularizer """no""" +673 4 optimizer """adam""" +673 4 training_loop """owa""" +673 4 negative_sampler """basic""" +673 4 evaluator """rankbased""" +673 5 dataset """kinships""" +673 5 model """simple""" +673 5 loss """bceaftersigmoid""" +673 5 regularizer """no""" +673 5 optimizer """adam""" +673 5 training_loop """owa""" +673 5 negative_sampler """basic""" +673 5 evaluator """rankbased""" +673 6 dataset """kinships""" +673 6 model """simple""" +673 6 loss """bceaftersigmoid""" +673 6 regularizer """no""" +673 6 optimizer """adam""" +673 6 training_loop """owa""" +673 6 negative_sampler """basic""" +673 6 evaluator """rankbased""" +673 7 dataset """kinships""" +673 7 model """simple""" +673 7 loss """bceaftersigmoid""" +673 7 regularizer """no""" +673 7 optimizer """adam""" +673 7 training_loop """owa""" +673 7 negative_sampler """basic""" +673 7 evaluator """rankbased""" +673 8 dataset """kinships""" +673 8 model """simple""" +673 8 loss """bceaftersigmoid""" +673 8 regularizer """no""" +673 8 optimizer """adam""" +673 8 training_loop """owa""" +673 8 negative_sampler """basic""" +673 8 evaluator """rankbased""" +673 9 dataset """kinships""" +673 9 model """simple""" +673 9 loss """bceaftersigmoid""" +673 9 regularizer """no""" +673 9 optimizer """adam""" +673 9 training_loop """owa""" +673 9 negative_sampler """basic""" +673 9 evaluator """rankbased""" +673 10 dataset """kinships""" +673 10 model """simple""" +673 10 loss """bceaftersigmoid""" +673 10 regularizer """no""" +673 10 optimizer """adam""" +673 10 training_loop """owa""" +673 10 negative_sampler """basic""" +673 10 evaluator """rankbased""" +673 11 dataset """kinships""" +673 11 model """simple""" +673 11 loss """bceaftersigmoid""" +673 11 regularizer """no""" +673 11 optimizer """adam""" +673 11 training_loop """owa""" +673 11 negative_sampler """basic""" +673 11 evaluator """rankbased""" +673 12 dataset """kinships""" +673 12 model """simple""" +673 12 loss """bceaftersigmoid""" +673 12 regularizer """no""" +673 12 optimizer """adam""" +673 12 training_loop """owa""" +673 12 negative_sampler """basic""" +673 12 evaluator """rankbased""" +673 13 dataset """kinships""" +673 13 model """simple""" +673 13 loss """bceaftersigmoid""" +673 13 regularizer """no""" +673 13 optimizer """adam""" +673 13 training_loop """owa""" +673 13 negative_sampler """basic""" +673 13 evaluator """rankbased""" +673 14 dataset """kinships""" +673 14 model """simple""" +673 14 loss """bceaftersigmoid""" +673 14 regularizer """no""" +673 14 optimizer """adam""" +673 14 training_loop """owa""" +673 14 negative_sampler """basic""" +673 14 evaluator """rankbased""" +673 15 dataset """kinships""" +673 15 model """simple""" +673 15 loss """bceaftersigmoid""" +673 15 regularizer """no""" +673 15 optimizer """adam""" +673 15 training_loop """owa""" +673 15 negative_sampler """basic""" +673 15 evaluator """rankbased""" +673 16 dataset """kinships""" +673 16 model """simple""" +673 16 loss """bceaftersigmoid""" +673 16 regularizer """no""" +673 16 optimizer """adam""" +673 16 training_loop """owa""" +673 16 negative_sampler """basic""" +673 16 evaluator """rankbased""" +673 17 dataset """kinships""" +673 17 model """simple""" +673 17 loss """bceaftersigmoid""" +673 17 regularizer """no""" +673 17 optimizer """adam""" +673 17 training_loop """owa""" +673 17 negative_sampler """basic""" +673 17 evaluator """rankbased""" +673 18 dataset """kinships""" +673 18 model """simple""" +673 18 loss """bceaftersigmoid""" +673 18 regularizer """no""" +673 18 optimizer """adam""" +673 18 training_loop """owa""" +673 18 negative_sampler """basic""" +673 18 evaluator """rankbased""" +673 19 dataset """kinships""" +673 19 model """simple""" +673 19 loss """bceaftersigmoid""" +673 19 regularizer """no""" +673 19 optimizer """adam""" +673 19 training_loop """owa""" +673 19 negative_sampler """basic""" +673 19 evaluator """rankbased""" +673 20 dataset """kinships""" +673 20 model """simple""" +673 20 loss """bceaftersigmoid""" +673 20 regularizer """no""" +673 20 optimizer """adam""" +673 20 training_loop """owa""" +673 20 negative_sampler """basic""" +673 20 evaluator """rankbased""" +673 21 dataset """kinships""" +673 21 model """simple""" +673 21 loss """bceaftersigmoid""" +673 21 regularizer """no""" +673 21 optimizer """adam""" +673 21 training_loop """owa""" +673 21 negative_sampler """basic""" +673 21 evaluator """rankbased""" +673 22 dataset """kinships""" +673 22 model """simple""" +673 22 loss """bceaftersigmoid""" +673 22 regularizer """no""" +673 22 optimizer """adam""" +673 22 training_loop """owa""" +673 22 negative_sampler """basic""" +673 22 evaluator """rankbased""" +673 23 dataset """kinships""" +673 23 model """simple""" +673 23 loss """bceaftersigmoid""" +673 23 regularizer """no""" +673 23 optimizer """adam""" +673 23 training_loop """owa""" +673 23 negative_sampler """basic""" +673 23 evaluator """rankbased""" +673 24 dataset """kinships""" +673 24 model """simple""" +673 24 loss """bceaftersigmoid""" +673 24 regularizer """no""" +673 24 optimizer """adam""" +673 24 training_loop """owa""" +673 24 negative_sampler """basic""" +673 24 evaluator """rankbased""" +673 25 dataset """kinships""" +673 25 model """simple""" +673 25 loss """bceaftersigmoid""" +673 25 regularizer """no""" +673 25 optimizer """adam""" +673 25 training_loop """owa""" +673 25 negative_sampler """basic""" +673 25 evaluator """rankbased""" +673 26 dataset """kinships""" +673 26 model """simple""" +673 26 loss """bceaftersigmoid""" +673 26 regularizer """no""" +673 26 optimizer """adam""" +673 26 training_loop """owa""" +673 26 negative_sampler """basic""" +673 26 evaluator """rankbased""" +673 27 dataset """kinships""" +673 27 model """simple""" +673 27 loss """bceaftersigmoid""" +673 27 regularizer """no""" +673 27 optimizer """adam""" +673 27 training_loop """owa""" +673 27 negative_sampler """basic""" +673 27 evaluator """rankbased""" +673 28 dataset """kinships""" +673 28 model """simple""" +673 28 loss """bceaftersigmoid""" +673 28 regularizer """no""" +673 28 optimizer """adam""" +673 28 training_loop """owa""" +673 28 negative_sampler """basic""" +673 28 evaluator """rankbased""" +673 29 dataset """kinships""" +673 29 model """simple""" +673 29 loss """bceaftersigmoid""" +673 29 regularizer """no""" +673 29 optimizer """adam""" +673 29 training_loop """owa""" +673 29 negative_sampler """basic""" +673 29 evaluator """rankbased""" +673 30 dataset """kinships""" +673 30 model """simple""" +673 30 loss """bceaftersigmoid""" +673 30 regularizer """no""" +673 30 optimizer """adam""" +673 30 training_loop """owa""" +673 30 negative_sampler """basic""" +673 30 evaluator """rankbased""" +673 31 dataset """kinships""" +673 31 model """simple""" +673 31 loss """bceaftersigmoid""" +673 31 regularizer """no""" +673 31 optimizer """adam""" +673 31 training_loop """owa""" +673 31 negative_sampler """basic""" +673 31 evaluator """rankbased""" +673 32 dataset """kinships""" +673 32 model """simple""" +673 32 loss """bceaftersigmoid""" +673 32 regularizer """no""" +673 32 optimizer """adam""" +673 32 training_loop """owa""" +673 32 negative_sampler """basic""" +673 32 evaluator """rankbased""" +673 33 dataset """kinships""" +673 33 model """simple""" +673 33 loss """bceaftersigmoid""" +673 33 regularizer """no""" +673 33 optimizer """adam""" +673 33 training_loop """owa""" +673 33 negative_sampler """basic""" +673 33 evaluator """rankbased""" +673 34 dataset """kinships""" +673 34 model """simple""" +673 34 loss """bceaftersigmoid""" +673 34 regularizer """no""" +673 34 optimizer """adam""" +673 34 training_loop """owa""" +673 34 negative_sampler """basic""" +673 34 evaluator """rankbased""" +673 35 dataset """kinships""" +673 35 model """simple""" +673 35 loss """bceaftersigmoid""" +673 35 regularizer """no""" +673 35 optimizer """adam""" +673 35 training_loop """owa""" +673 35 negative_sampler """basic""" +673 35 evaluator """rankbased""" +673 36 dataset """kinships""" +673 36 model """simple""" +673 36 loss """bceaftersigmoid""" +673 36 regularizer """no""" +673 36 optimizer """adam""" +673 36 training_loop """owa""" +673 36 negative_sampler """basic""" +673 36 evaluator """rankbased""" +673 37 dataset """kinships""" +673 37 model """simple""" +673 37 loss """bceaftersigmoid""" +673 37 regularizer """no""" +673 37 optimizer """adam""" +673 37 training_loop """owa""" +673 37 negative_sampler """basic""" +673 37 evaluator """rankbased""" +673 38 dataset """kinships""" +673 38 model """simple""" +673 38 loss """bceaftersigmoid""" +673 38 regularizer """no""" +673 38 optimizer """adam""" +673 38 training_loop """owa""" +673 38 negative_sampler """basic""" +673 38 evaluator """rankbased""" +673 39 dataset """kinships""" +673 39 model """simple""" +673 39 loss """bceaftersigmoid""" +673 39 regularizer """no""" +673 39 optimizer """adam""" +673 39 training_loop """owa""" +673 39 negative_sampler """basic""" +673 39 evaluator """rankbased""" +673 40 dataset """kinships""" +673 40 model """simple""" +673 40 loss """bceaftersigmoid""" +673 40 regularizer """no""" +673 40 optimizer """adam""" +673 40 training_loop """owa""" +673 40 negative_sampler """basic""" +673 40 evaluator """rankbased""" +673 41 dataset """kinships""" +673 41 model """simple""" +673 41 loss """bceaftersigmoid""" +673 41 regularizer """no""" +673 41 optimizer """adam""" +673 41 training_loop """owa""" +673 41 negative_sampler """basic""" +673 41 evaluator """rankbased""" +673 42 dataset """kinships""" +673 42 model """simple""" +673 42 loss """bceaftersigmoid""" +673 42 regularizer """no""" +673 42 optimizer """adam""" +673 42 training_loop """owa""" +673 42 negative_sampler """basic""" +673 42 evaluator """rankbased""" +673 43 dataset """kinships""" +673 43 model """simple""" +673 43 loss """bceaftersigmoid""" +673 43 regularizer """no""" +673 43 optimizer """adam""" +673 43 training_loop """owa""" +673 43 negative_sampler """basic""" +673 43 evaluator """rankbased""" +673 44 dataset """kinships""" +673 44 model """simple""" +673 44 loss """bceaftersigmoid""" +673 44 regularizer """no""" +673 44 optimizer """adam""" +673 44 training_loop """owa""" +673 44 negative_sampler """basic""" +673 44 evaluator """rankbased""" +673 45 dataset """kinships""" +673 45 model """simple""" +673 45 loss """bceaftersigmoid""" +673 45 regularizer """no""" +673 45 optimizer """adam""" +673 45 training_loop """owa""" +673 45 negative_sampler """basic""" +673 45 evaluator """rankbased""" +673 46 dataset """kinships""" +673 46 model """simple""" +673 46 loss """bceaftersigmoid""" +673 46 regularizer """no""" +673 46 optimizer """adam""" +673 46 training_loop """owa""" +673 46 negative_sampler """basic""" +673 46 evaluator """rankbased""" +673 47 dataset """kinships""" +673 47 model """simple""" +673 47 loss """bceaftersigmoid""" +673 47 regularizer """no""" +673 47 optimizer """adam""" +673 47 training_loop """owa""" +673 47 negative_sampler """basic""" +673 47 evaluator """rankbased""" +673 48 dataset """kinships""" +673 48 model """simple""" +673 48 loss """bceaftersigmoid""" +673 48 regularizer """no""" +673 48 optimizer """adam""" +673 48 training_loop """owa""" +673 48 negative_sampler """basic""" +673 48 evaluator """rankbased""" +673 49 dataset """kinships""" +673 49 model """simple""" +673 49 loss """bceaftersigmoid""" +673 49 regularizer """no""" +673 49 optimizer """adam""" +673 49 training_loop """owa""" +673 49 negative_sampler """basic""" +673 49 evaluator """rankbased""" +673 50 dataset """kinships""" +673 50 model """simple""" +673 50 loss """bceaftersigmoid""" +673 50 regularizer """no""" +673 50 optimizer """adam""" +673 50 training_loop """owa""" +673 50 negative_sampler """basic""" +673 50 evaluator """rankbased""" +673 51 dataset """kinships""" +673 51 model """simple""" +673 51 loss """bceaftersigmoid""" +673 51 regularizer """no""" +673 51 optimizer """adam""" +673 51 training_loop """owa""" +673 51 negative_sampler """basic""" +673 51 evaluator """rankbased""" +673 52 dataset """kinships""" +673 52 model """simple""" +673 52 loss """bceaftersigmoid""" +673 52 regularizer """no""" +673 52 optimizer """adam""" +673 52 training_loop """owa""" +673 52 negative_sampler """basic""" +673 52 evaluator """rankbased""" +673 53 dataset """kinships""" +673 53 model """simple""" +673 53 loss """bceaftersigmoid""" +673 53 regularizer """no""" +673 53 optimizer """adam""" +673 53 training_loop """owa""" +673 53 negative_sampler """basic""" +673 53 evaluator """rankbased""" +673 54 dataset """kinships""" +673 54 model """simple""" +673 54 loss """bceaftersigmoid""" +673 54 regularizer """no""" +673 54 optimizer """adam""" +673 54 training_loop """owa""" +673 54 negative_sampler """basic""" +673 54 evaluator """rankbased""" +673 55 dataset """kinships""" +673 55 model """simple""" +673 55 loss """bceaftersigmoid""" +673 55 regularizer """no""" +673 55 optimizer """adam""" +673 55 training_loop """owa""" +673 55 negative_sampler """basic""" +673 55 evaluator """rankbased""" +673 56 dataset """kinships""" +673 56 model """simple""" +673 56 loss """bceaftersigmoid""" +673 56 regularizer """no""" +673 56 optimizer """adam""" +673 56 training_loop """owa""" +673 56 negative_sampler """basic""" +673 56 evaluator """rankbased""" +673 57 dataset """kinships""" +673 57 model """simple""" +673 57 loss """bceaftersigmoid""" +673 57 regularizer """no""" +673 57 optimizer """adam""" +673 57 training_loop """owa""" +673 57 negative_sampler """basic""" +673 57 evaluator """rankbased""" +673 58 dataset """kinships""" +673 58 model """simple""" +673 58 loss """bceaftersigmoid""" +673 58 regularizer """no""" +673 58 optimizer """adam""" +673 58 training_loop """owa""" +673 58 negative_sampler """basic""" +673 58 evaluator """rankbased""" +673 59 dataset """kinships""" +673 59 model """simple""" +673 59 loss """bceaftersigmoid""" +673 59 regularizer """no""" +673 59 optimizer """adam""" +673 59 training_loop """owa""" +673 59 negative_sampler """basic""" +673 59 evaluator """rankbased""" +673 60 dataset """kinships""" +673 60 model """simple""" +673 60 loss """bceaftersigmoid""" +673 60 regularizer """no""" +673 60 optimizer """adam""" +673 60 training_loop """owa""" +673 60 negative_sampler """basic""" +673 60 evaluator """rankbased""" +673 61 dataset """kinships""" +673 61 model """simple""" +673 61 loss """bceaftersigmoid""" +673 61 regularizer """no""" +673 61 optimizer """adam""" +673 61 training_loop """owa""" +673 61 negative_sampler """basic""" +673 61 evaluator """rankbased""" +673 62 dataset """kinships""" +673 62 model """simple""" +673 62 loss """bceaftersigmoid""" +673 62 regularizer """no""" +673 62 optimizer """adam""" +673 62 training_loop """owa""" +673 62 negative_sampler """basic""" +673 62 evaluator """rankbased""" +673 63 dataset """kinships""" +673 63 model """simple""" +673 63 loss """bceaftersigmoid""" +673 63 regularizer """no""" +673 63 optimizer """adam""" +673 63 training_loop """owa""" +673 63 negative_sampler """basic""" +673 63 evaluator """rankbased""" +673 64 dataset """kinships""" +673 64 model """simple""" +673 64 loss """bceaftersigmoid""" +673 64 regularizer """no""" +673 64 optimizer """adam""" +673 64 training_loop """owa""" +673 64 negative_sampler """basic""" +673 64 evaluator """rankbased""" +673 65 dataset """kinships""" +673 65 model """simple""" +673 65 loss """bceaftersigmoid""" +673 65 regularizer """no""" +673 65 optimizer """adam""" +673 65 training_loop """owa""" +673 65 negative_sampler """basic""" +673 65 evaluator """rankbased""" +673 66 dataset """kinships""" +673 66 model """simple""" +673 66 loss """bceaftersigmoid""" +673 66 regularizer """no""" +673 66 optimizer """adam""" +673 66 training_loop """owa""" +673 66 negative_sampler """basic""" +673 66 evaluator """rankbased""" +673 67 dataset """kinships""" +673 67 model """simple""" +673 67 loss """bceaftersigmoid""" +673 67 regularizer """no""" +673 67 optimizer """adam""" +673 67 training_loop """owa""" +673 67 negative_sampler """basic""" +673 67 evaluator """rankbased""" +673 68 dataset """kinships""" +673 68 model """simple""" +673 68 loss """bceaftersigmoid""" +673 68 regularizer """no""" +673 68 optimizer """adam""" +673 68 training_loop """owa""" +673 68 negative_sampler """basic""" +673 68 evaluator """rankbased""" +673 69 dataset """kinships""" +673 69 model """simple""" +673 69 loss """bceaftersigmoid""" +673 69 regularizer """no""" +673 69 optimizer """adam""" +673 69 training_loop """owa""" +673 69 negative_sampler """basic""" +673 69 evaluator """rankbased""" +673 70 dataset """kinships""" +673 70 model """simple""" +673 70 loss """bceaftersigmoid""" +673 70 regularizer """no""" +673 70 optimizer """adam""" +673 70 training_loop """owa""" +673 70 negative_sampler """basic""" +673 70 evaluator """rankbased""" +673 71 dataset """kinships""" +673 71 model """simple""" +673 71 loss """bceaftersigmoid""" +673 71 regularizer """no""" +673 71 optimizer """adam""" +673 71 training_loop """owa""" +673 71 negative_sampler """basic""" +673 71 evaluator """rankbased""" +673 72 dataset """kinships""" +673 72 model """simple""" +673 72 loss """bceaftersigmoid""" +673 72 regularizer """no""" +673 72 optimizer """adam""" +673 72 training_loop """owa""" +673 72 negative_sampler """basic""" +673 72 evaluator """rankbased""" +673 73 dataset """kinships""" +673 73 model """simple""" +673 73 loss """bceaftersigmoid""" +673 73 regularizer """no""" +673 73 optimizer """adam""" +673 73 training_loop """owa""" +673 73 negative_sampler """basic""" +673 73 evaluator """rankbased""" +673 74 dataset """kinships""" +673 74 model """simple""" +673 74 loss """bceaftersigmoid""" +673 74 regularizer """no""" +673 74 optimizer """adam""" +673 74 training_loop """owa""" +673 74 negative_sampler """basic""" +673 74 evaluator """rankbased""" +673 75 dataset """kinships""" +673 75 model """simple""" +673 75 loss """bceaftersigmoid""" +673 75 regularizer """no""" +673 75 optimizer """adam""" +673 75 training_loop """owa""" +673 75 negative_sampler """basic""" +673 75 evaluator """rankbased""" +673 76 dataset """kinships""" +673 76 model """simple""" +673 76 loss """bceaftersigmoid""" +673 76 regularizer """no""" +673 76 optimizer """adam""" +673 76 training_loop """owa""" +673 76 negative_sampler """basic""" +673 76 evaluator """rankbased""" +673 77 dataset """kinships""" +673 77 model """simple""" +673 77 loss """bceaftersigmoid""" +673 77 regularizer """no""" +673 77 optimizer """adam""" +673 77 training_loop """owa""" +673 77 negative_sampler """basic""" +673 77 evaluator """rankbased""" +673 78 dataset """kinships""" +673 78 model """simple""" +673 78 loss """bceaftersigmoid""" +673 78 regularizer """no""" +673 78 optimizer """adam""" +673 78 training_loop """owa""" +673 78 negative_sampler """basic""" +673 78 evaluator """rankbased""" +673 79 dataset """kinships""" +673 79 model """simple""" +673 79 loss """bceaftersigmoid""" +673 79 regularizer """no""" +673 79 optimizer """adam""" +673 79 training_loop """owa""" +673 79 negative_sampler """basic""" +673 79 evaluator """rankbased""" +673 80 dataset """kinships""" +673 80 model """simple""" +673 80 loss """bceaftersigmoid""" +673 80 regularizer """no""" +673 80 optimizer """adam""" +673 80 training_loop """owa""" +673 80 negative_sampler """basic""" +673 80 evaluator """rankbased""" +673 81 dataset """kinships""" +673 81 model """simple""" +673 81 loss """bceaftersigmoid""" +673 81 regularizer """no""" +673 81 optimizer """adam""" +673 81 training_loop """owa""" +673 81 negative_sampler """basic""" +673 81 evaluator """rankbased""" +673 82 dataset """kinships""" +673 82 model """simple""" +673 82 loss """bceaftersigmoid""" +673 82 regularizer """no""" +673 82 optimizer """adam""" +673 82 training_loop """owa""" +673 82 negative_sampler """basic""" +673 82 evaluator """rankbased""" +673 83 dataset """kinships""" +673 83 model """simple""" +673 83 loss """bceaftersigmoid""" +673 83 regularizer """no""" +673 83 optimizer """adam""" +673 83 training_loop """owa""" +673 83 negative_sampler """basic""" +673 83 evaluator """rankbased""" +673 84 dataset """kinships""" +673 84 model """simple""" +673 84 loss """bceaftersigmoid""" +673 84 regularizer """no""" +673 84 optimizer """adam""" +673 84 training_loop """owa""" +673 84 negative_sampler """basic""" +673 84 evaluator """rankbased""" +673 85 dataset """kinships""" +673 85 model """simple""" +673 85 loss """bceaftersigmoid""" +673 85 regularizer """no""" +673 85 optimizer """adam""" +673 85 training_loop """owa""" +673 85 negative_sampler """basic""" +673 85 evaluator """rankbased""" +673 86 dataset """kinships""" +673 86 model """simple""" +673 86 loss """bceaftersigmoid""" +673 86 regularizer """no""" +673 86 optimizer """adam""" +673 86 training_loop """owa""" +673 86 negative_sampler """basic""" +673 86 evaluator """rankbased""" +673 87 dataset """kinships""" +673 87 model """simple""" +673 87 loss """bceaftersigmoid""" +673 87 regularizer """no""" +673 87 optimizer """adam""" +673 87 training_loop """owa""" +673 87 negative_sampler """basic""" +673 87 evaluator """rankbased""" +673 88 dataset """kinships""" +673 88 model """simple""" +673 88 loss """bceaftersigmoid""" +673 88 regularizer """no""" +673 88 optimizer """adam""" +673 88 training_loop """owa""" +673 88 negative_sampler """basic""" +673 88 evaluator """rankbased""" +673 89 dataset """kinships""" +673 89 model """simple""" +673 89 loss """bceaftersigmoid""" +673 89 regularizer """no""" +673 89 optimizer """adam""" +673 89 training_loop """owa""" +673 89 negative_sampler """basic""" +673 89 evaluator """rankbased""" +673 90 dataset """kinships""" +673 90 model """simple""" +673 90 loss """bceaftersigmoid""" +673 90 regularizer """no""" +673 90 optimizer """adam""" +673 90 training_loop """owa""" +673 90 negative_sampler """basic""" +673 90 evaluator """rankbased""" +673 91 dataset """kinships""" +673 91 model """simple""" +673 91 loss """bceaftersigmoid""" +673 91 regularizer """no""" +673 91 optimizer """adam""" +673 91 training_loop """owa""" +673 91 negative_sampler """basic""" +673 91 evaluator """rankbased""" +673 92 dataset """kinships""" +673 92 model """simple""" +673 92 loss """bceaftersigmoid""" +673 92 regularizer """no""" +673 92 optimizer """adam""" +673 92 training_loop """owa""" +673 92 negative_sampler """basic""" +673 92 evaluator """rankbased""" +673 93 dataset """kinships""" +673 93 model """simple""" +673 93 loss """bceaftersigmoid""" +673 93 regularizer """no""" +673 93 optimizer """adam""" +673 93 training_loop """owa""" +673 93 negative_sampler """basic""" +673 93 evaluator """rankbased""" +673 94 dataset """kinships""" +673 94 model """simple""" +673 94 loss """bceaftersigmoid""" +673 94 regularizer """no""" +673 94 optimizer """adam""" +673 94 training_loop """owa""" +673 94 negative_sampler """basic""" +673 94 evaluator """rankbased""" +673 95 dataset """kinships""" +673 95 model """simple""" +673 95 loss """bceaftersigmoid""" +673 95 regularizer """no""" +673 95 optimizer """adam""" +673 95 training_loop """owa""" +673 95 negative_sampler """basic""" +673 95 evaluator """rankbased""" +673 96 dataset """kinships""" +673 96 model """simple""" +673 96 loss """bceaftersigmoid""" +673 96 regularizer """no""" +673 96 optimizer """adam""" +673 96 training_loop """owa""" +673 96 negative_sampler """basic""" +673 96 evaluator """rankbased""" +673 97 dataset """kinships""" +673 97 model """simple""" +673 97 loss """bceaftersigmoid""" +673 97 regularizer """no""" +673 97 optimizer """adam""" +673 97 training_loop """owa""" +673 97 negative_sampler """basic""" +673 97 evaluator """rankbased""" +673 98 dataset """kinships""" +673 98 model """simple""" +673 98 loss """bceaftersigmoid""" +673 98 regularizer """no""" +673 98 optimizer """adam""" +673 98 training_loop """owa""" +673 98 negative_sampler """basic""" +673 98 evaluator """rankbased""" +673 99 dataset """kinships""" +673 99 model """simple""" +673 99 loss """bceaftersigmoid""" +673 99 regularizer """no""" +673 99 optimizer """adam""" +673 99 training_loop """owa""" +673 99 negative_sampler """basic""" +673 99 evaluator """rankbased""" +673 100 dataset """kinships""" +673 100 model """simple""" +673 100 loss """bceaftersigmoid""" +673 100 regularizer """no""" +673 100 optimizer """adam""" +673 100 training_loop """owa""" +673 100 negative_sampler """basic""" +673 100 evaluator """rankbased""" +674 1 model.embedding_dim 0.0 +674 1 optimizer.lr 0.022069850606652144 +674 1 negative_sampler.num_negs_per_pos 21.0 +674 1 training.batch_size 0.0 +674 2 model.embedding_dim 2.0 +674 2 optimizer.lr 0.0013083079080400812 +674 2 negative_sampler.num_negs_per_pos 1.0 +674 2 training.batch_size 0.0 +674 3 model.embedding_dim 1.0 +674 3 optimizer.lr 0.05547356283286478 +674 3 negative_sampler.num_negs_per_pos 80.0 +674 3 training.batch_size 2.0 +674 4 model.embedding_dim 1.0 +674 4 optimizer.lr 0.047402041156897015 +674 4 negative_sampler.num_negs_per_pos 24.0 +674 4 training.batch_size 2.0 +674 5 model.embedding_dim 2.0 +674 5 optimizer.lr 0.059912863830708075 +674 5 negative_sampler.num_negs_per_pos 84.0 +674 5 training.batch_size 1.0 +674 6 model.embedding_dim 2.0 +674 6 optimizer.lr 0.011007711522226939 +674 6 negative_sampler.num_negs_per_pos 22.0 +674 6 training.batch_size 0.0 +674 7 model.embedding_dim 1.0 +674 7 optimizer.lr 0.00314415753487535 +674 7 negative_sampler.num_negs_per_pos 35.0 +674 7 training.batch_size 2.0 +674 8 model.embedding_dim 0.0 +674 8 optimizer.lr 0.0015150875970419352 +674 8 negative_sampler.num_negs_per_pos 65.0 +674 8 training.batch_size 0.0 +674 9 model.embedding_dim 0.0 +674 9 optimizer.lr 0.0016033947474011511 +674 9 negative_sampler.num_negs_per_pos 35.0 +674 9 training.batch_size 0.0 +674 10 model.embedding_dim 1.0 +674 10 optimizer.lr 0.0011438938051403877 +674 10 negative_sampler.num_negs_per_pos 7.0 +674 10 training.batch_size 0.0 +674 11 model.embedding_dim 1.0 +674 11 optimizer.lr 0.0022783184514770824 +674 11 negative_sampler.num_negs_per_pos 20.0 +674 11 training.batch_size 2.0 +674 12 model.embedding_dim 1.0 +674 12 optimizer.lr 0.003322998263484839 +674 12 negative_sampler.num_negs_per_pos 46.0 +674 12 training.batch_size 0.0 +674 13 model.embedding_dim 2.0 +674 13 optimizer.lr 0.0011177800559455395 +674 13 negative_sampler.num_negs_per_pos 33.0 +674 13 training.batch_size 1.0 +674 14 model.embedding_dim 1.0 +674 14 optimizer.lr 0.004230769650851137 +674 14 negative_sampler.num_negs_per_pos 1.0 +674 14 training.batch_size 2.0 +674 15 model.embedding_dim 2.0 +674 15 optimizer.lr 0.0603945541877647 +674 15 negative_sampler.num_negs_per_pos 6.0 +674 15 training.batch_size 2.0 +674 16 model.embedding_dim 0.0 +674 16 optimizer.lr 0.011613456830539222 +674 16 negative_sampler.num_negs_per_pos 3.0 +674 16 training.batch_size 0.0 +674 17 model.embedding_dim 0.0 +674 17 optimizer.lr 0.05232551917713807 +674 17 negative_sampler.num_negs_per_pos 2.0 +674 17 training.batch_size 2.0 +674 18 model.embedding_dim 2.0 +674 18 optimizer.lr 0.0025014820215281536 +674 18 negative_sampler.num_negs_per_pos 60.0 +674 18 training.batch_size 0.0 +674 19 model.embedding_dim 1.0 +674 19 optimizer.lr 0.0029872003275217404 +674 19 negative_sampler.num_negs_per_pos 67.0 +674 19 training.batch_size 0.0 +674 20 model.embedding_dim 2.0 +674 20 optimizer.lr 0.049760606878949745 +674 20 negative_sampler.num_negs_per_pos 12.0 +674 20 training.batch_size 2.0 +674 21 model.embedding_dim 2.0 +674 21 optimizer.lr 0.002499671952480206 +674 21 negative_sampler.num_negs_per_pos 94.0 +674 21 training.batch_size 1.0 +674 22 model.embedding_dim 2.0 +674 22 optimizer.lr 0.0017800851814680314 +674 22 negative_sampler.num_negs_per_pos 41.0 +674 22 training.batch_size 1.0 +674 23 model.embedding_dim 1.0 +674 23 optimizer.lr 0.05006768290521772 +674 23 negative_sampler.num_negs_per_pos 72.0 +674 23 training.batch_size 0.0 +674 24 model.embedding_dim 1.0 +674 24 optimizer.lr 0.002700888445089147 +674 24 negative_sampler.num_negs_per_pos 40.0 +674 24 training.batch_size 1.0 +674 25 model.embedding_dim 2.0 +674 25 optimizer.lr 0.029170200248894773 +674 25 negative_sampler.num_negs_per_pos 41.0 +674 25 training.batch_size 2.0 +674 26 model.embedding_dim 1.0 +674 26 optimizer.lr 0.0050607944198113675 +674 26 negative_sampler.num_negs_per_pos 17.0 +674 26 training.batch_size 0.0 +674 27 model.embedding_dim 0.0 +674 27 optimizer.lr 0.022404926439236215 +674 27 negative_sampler.num_negs_per_pos 90.0 +674 27 training.batch_size 0.0 +674 28 model.embedding_dim 2.0 +674 28 optimizer.lr 0.022621159620870843 +674 28 negative_sampler.num_negs_per_pos 31.0 +674 28 training.batch_size 2.0 +674 29 model.embedding_dim 1.0 +674 29 optimizer.lr 0.055504281830227174 +674 29 negative_sampler.num_negs_per_pos 57.0 +674 29 training.batch_size 0.0 +674 30 model.embedding_dim 0.0 +674 30 optimizer.lr 0.0012940299822726306 +674 30 negative_sampler.num_negs_per_pos 78.0 +674 30 training.batch_size 0.0 +674 31 model.embedding_dim 2.0 +674 31 optimizer.lr 0.056241687376016355 +674 31 negative_sampler.num_negs_per_pos 58.0 +674 31 training.batch_size 2.0 +674 32 model.embedding_dim 0.0 +674 32 optimizer.lr 0.0371019052783183 +674 32 negative_sampler.num_negs_per_pos 88.0 +674 32 training.batch_size 2.0 +674 33 model.embedding_dim 2.0 +674 33 optimizer.lr 0.013511286408817302 +674 33 negative_sampler.num_negs_per_pos 83.0 +674 33 training.batch_size 0.0 +674 34 model.embedding_dim 1.0 +674 34 optimizer.lr 0.001357939906551615 +674 34 negative_sampler.num_negs_per_pos 21.0 +674 34 training.batch_size 2.0 +674 35 model.embedding_dim 0.0 +674 35 optimizer.lr 0.010530573528355873 +674 35 negative_sampler.num_negs_per_pos 67.0 +674 35 training.batch_size 1.0 +674 36 model.embedding_dim 2.0 +674 36 optimizer.lr 0.0010033580775848298 +674 36 negative_sampler.num_negs_per_pos 73.0 +674 36 training.batch_size 0.0 +674 37 model.embedding_dim 0.0 +674 37 optimizer.lr 0.0038579333336654716 +674 37 negative_sampler.num_negs_per_pos 15.0 +674 37 training.batch_size 0.0 +674 38 model.embedding_dim 0.0 +674 38 optimizer.lr 0.0075176396410515 +674 38 negative_sampler.num_negs_per_pos 51.0 +674 38 training.batch_size 1.0 +674 39 model.embedding_dim 0.0 +674 39 optimizer.lr 0.0437256845297037 +674 39 negative_sampler.num_negs_per_pos 39.0 +674 39 training.batch_size 1.0 +674 40 model.embedding_dim 2.0 +674 40 optimizer.lr 0.002747262168525246 +674 40 negative_sampler.num_negs_per_pos 77.0 +674 40 training.batch_size 2.0 +674 41 model.embedding_dim 2.0 +674 41 optimizer.lr 0.001160177322541008 +674 41 negative_sampler.num_negs_per_pos 26.0 +674 41 training.batch_size 0.0 +674 42 model.embedding_dim 2.0 +674 42 optimizer.lr 0.00283544092378472 +674 42 negative_sampler.num_negs_per_pos 2.0 +674 42 training.batch_size 0.0 +674 43 model.embedding_dim 1.0 +674 43 optimizer.lr 0.003814090265708058 +674 43 negative_sampler.num_negs_per_pos 89.0 +674 43 training.batch_size 1.0 +674 44 model.embedding_dim 2.0 +674 44 optimizer.lr 0.008032798562870546 +674 44 negative_sampler.num_negs_per_pos 37.0 +674 44 training.batch_size 0.0 +674 45 model.embedding_dim 2.0 +674 45 optimizer.lr 0.0012429755987338634 +674 45 negative_sampler.num_negs_per_pos 88.0 +674 45 training.batch_size 2.0 +674 46 model.embedding_dim 2.0 +674 46 optimizer.lr 0.03010841181268931 +674 46 negative_sampler.num_negs_per_pos 42.0 +674 46 training.batch_size 1.0 +674 47 model.embedding_dim 2.0 +674 47 optimizer.lr 0.0071525122664439994 +674 47 negative_sampler.num_negs_per_pos 1.0 +674 47 training.batch_size 2.0 +674 48 model.embedding_dim 2.0 +674 48 optimizer.lr 0.05858692135554815 +674 48 negative_sampler.num_negs_per_pos 44.0 +674 48 training.batch_size 0.0 +674 49 model.embedding_dim 2.0 +674 49 optimizer.lr 0.013135167850928529 +674 49 negative_sampler.num_negs_per_pos 33.0 +674 49 training.batch_size 0.0 +674 50 model.embedding_dim 1.0 +674 50 optimizer.lr 0.00881161626251133 +674 50 negative_sampler.num_negs_per_pos 51.0 +674 50 training.batch_size 1.0 +674 51 model.embedding_dim 0.0 +674 51 optimizer.lr 0.03328267392731589 +674 51 negative_sampler.num_negs_per_pos 31.0 +674 51 training.batch_size 2.0 +674 52 model.embedding_dim 0.0 +674 52 optimizer.lr 0.004698374009855884 +674 52 negative_sampler.num_negs_per_pos 6.0 +674 52 training.batch_size 2.0 +674 53 model.embedding_dim 2.0 +674 53 optimizer.lr 0.0029565511984391956 +674 53 negative_sampler.num_negs_per_pos 21.0 +674 53 training.batch_size 1.0 +674 54 model.embedding_dim 1.0 +674 54 optimizer.lr 0.012380992625975274 +674 54 negative_sampler.num_negs_per_pos 53.0 +674 54 training.batch_size 0.0 +674 55 model.embedding_dim 0.0 +674 55 optimizer.lr 0.022212205313887973 +674 55 negative_sampler.num_negs_per_pos 8.0 +674 55 training.batch_size 1.0 +674 56 model.embedding_dim 1.0 +674 56 optimizer.lr 0.034167978331959795 +674 56 negative_sampler.num_negs_per_pos 8.0 +674 56 training.batch_size 0.0 +674 57 model.embedding_dim 2.0 +674 57 optimizer.lr 0.01335367312105663 +674 57 negative_sampler.num_negs_per_pos 45.0 +674 57 training.batch_size 1.0 +674 58 model.embedding_dim 0.0 +674 58 optimizer.lr 0.002946531298974578 +674 58 negative_sampler.num_negs_per_pos 92.0 +674 58 training.batch_size 1.0 +674 59 model.embedding_dim 0.0 +674 59 optimizer.lr 0.003876383428216546 +674 59 negative_sampler.num_negs_per_pos 44.0 +674 59 training.batch_size 2.0 +674 60 model.embedding_dim 2.0 +674 60 optimizer.lr 0.052537489886891206 +674 60 negative_sampler.num_negs_per_pos 66.0 +674 60 training.batch_size 1.0 +674 61 model.embedding_dim 1.0 +674 61 optimizer.lr 0.07105986296839774 +674 61 negative_sampler.num_negs_per_pos 10.0 +674 61 training.batch_size 2.0 +674 62 model.embedding_dim 2.0 +674 62 optimizer.lr 0.0019426746212022855 +674 62 negative_sampler.num_negs_per_pos 3.0 +674 62 training.batch_size 0.0 +674 63 model.embedding_dim 0.0 +674 63 optimizer.lr 0.0026597759025982276 +674 63 negative_sampler.num_negs_per_pos 48.0 +674 63 training.batch_size 0.0 +674 64 model.embedding_dim 1.0 +674 64 optimizer.lr 0.00945165211160237 +674 64 negative_sampler.num_negs_per_pos 89.0 +674 64 training.batch_size 2.0 +674 65 model.embedding_dim 1.0 +674 65 optimizer.lr 0.010854363411428296 +674 65 negative_sampler.num_negs_per_pos 24.0 +674 65 training.batch_size 1.0 +674 66 model.embedding_dim 1.0 +674 66 optimizer.lr 0.006141347172835622 +674 66 negative_sampler.num_negs_per_pos 10.0 +674 66 training.batch_size 1.0 +674 67 model.embedding_dim 1.0 +674 67 optimizer.lr 0.03852305582589687 +674 67 negative_sampler.num_negs_per_pos 18.0 +674 67 training.batch_size 2.0 +674 68 model.embedding_dim 1.0 +674 68 optimizer.lr 0.011663696710701261 +674 68 negative_sampler.num_negs_per_pos 83.0 +674 68 training.batch_size 2.0 +674 69 model.embedding_dim 0.0 +674 69 optimizer.lr 0.008256455260383166 +674 69 negative_sampler.num_negs_per_pos 20.0 +674 69 training.batch_size 0.0 +674 70 model.embedding_dim 0.0 +674 70 optimizer.lr 0.0011732185150904342 +674 70 negative_sampler.num_negs_per_pos 64.0 +674 70 training.batch_size 0.0 +674 71 model.embedding_dim 1.0 +674 71 optimizer.lr 0.04360771333714352 +674 71 negative_sampler.num_negs_per_pos 2.0 +674 71 training.batch_size 2.0 +674 72 model.embedding_dim 1.0 +674 72 optimizer.lr 0.0032893899907675494 +674 72 negative_sampler.num_negs_per_pos 75.0 +674 72 training.batch_size 2.0 +674 73 model.embedding_dim 0.0 +674 73 optimizer.lr 0.012418307525611936 +674 73 negative_sampler.num_negs_per_pos 62.0 +674 73 training.batch_size 0.0 +674 74 model.embedding_dim 2.0 +674 74 optimizer.lr 0.016004878987973578 +674 74 negative_sampler.num_negs_per_pos 25.0 +674 74 training.batch_size 0.0 +674 75 model.embedding_dim 0.0 +674 75 optimizer.lr 0.020482617184151234 +674 75 negative_sampler.num_negs_per_pos 39.0 +674 75 training.batch_size 1.0 +674 76 model.embedding_dim 0.0 +674 76 optimizer.lr 0.0023486218020204504 +674 76 negative_sampler.num_negs_per_pos 39.0 +674 76 training.batch_size 1.0 +674 77 model.embedding_dim 2.0 +674 77 optimizer.lr 0.0026656500437652977 +674 77 negative_sampler.num_negs_per_pos 15.0 +674 77 training.batch_size 0.0 +674 78 model.embedding_dim 2.0 +674 78 optimizer.lr 0.03332442286887602 +674 78 negative_sampler.num_negs_per_pos 41.0 +674 78 training.batch_size 1.0 +674 79 model.embedding_dim 1.0 +674 79 optimizer.lr 0.00558752907988447 +674 79 negative_sampler.num_negs_per_pos 98.0 +674 79 training.batch_size 0.0 +674 80 model.embedding_dim 0.0 +674 80 optimizer.lr 0.012793208347537647 +674 80 negative_sampler.num_negs_per_pos 67.0 +674 80 training.batch_size 1.0 +674 81 model.embedding_dim 0.0 +674 81 optimizer.lr 0.013975569573283225 +674 81 negative_sampler.num_negs_per_pos 84.0 +674 81 training.batch_size 1.0 +674 82 model.embedding_dim 2.0 +674 82 optimizer.lr 0.018410875091182888 +674 82 negative_sampler.num_negs_per_pos 35.0 +674 82 training.batch_size 2.0 +674 83 model.embedding_dim 0.0 +674 83 optimizer.lr 0.00871458110490633 +674 83 negative_sampler.num_negs_per_pos 57.0 +674 83 training.batch_size 2.0 +674 84 model.embedding_dim 2.0 +674 84 optimizer.lr 0.028274775726774993 +674 84 negative_sampler.num_negs_per_pos 19.0 +674 84 training.batch_size 2.0 +674 85 model.embedding_dim 0.0 +674 85 optimizer.lr 0.0010694998446619303 +674 85 negative_sampler.num_negs_per_pos 62.0 +674 85 training.batch_size 0.0 +674 86 model.embedding_dim 2.0 +674 86 optimizer.lr 0.0030704587743670855 +674 86 negative_sampler.num_negs_per_pos 8.0 +674 86 training.batch_size 2.0 +674 87 model.embedding_dim 1.0 +674 87 optimizer.lr 0.0018118176430564532 +674 87 negative_sampler.num_negs_per_pos 95.0 +674 87 training.batch_size 2.0 +674 88 model.embedding_dim 1.0 +674 88 optimizer.lr 0.008450278926753554 +674 88 negative_sampler.num_negs_per_pos 87.0 +674 88 training.batch_size 2.0 +674 89 model.embedding_dim 2.0 +674 89 optimizer.lr 0.08090479536764258 +674 89 negative_sampler.num_negs_per_pos 6.0 +674 89 training.batch_size 1.0 +674 90 model.embedding_dim 0.0 +674 90 optimizer.lr 0.06725488272267402 +674 90 negative_sampler.num_negs_per_pos 55.0 +674 90 training.batch_size 0.0 +674 91 model.embedding_dim 1.0 +674 91 optimizer.lr 0.0020881688109043108 +674 91 negative_sampler.num_negs_per_pos 44.0 +674 91 training.batch_size 1.0 +674 92 model.embedding_dim 0.0 +674 92 optimizer.lr 0.09324635900217781 +674 92 negative_sampler.num_negs_per_pos 15.0 +674 92 training.batch_size 0.0 +674 93 model.embedding_dim 0.0 +674 93 optimizer.lr 0.005987619764957237 +674 93 negative_sampler.num_negs_per_pos 78.0 +674 93 training.batch_size 1.0 +674 94 model.embedding_dim 1.0 +674 94 optimizer.lr 0.007343473002158302 +674 94 negative_sampler.num_negs_per_pos 54.0 +674 94 training.batch_size 2.0 +674 95 model.embedding_dim 2.0 +674 95 optimizer.lr 0.0041074172092702135 +674 95 negative_sampler.num_negs_per_pos 89.0 +674 95 training.batch_size 0.0 +674 96 model.embedding_dim 1.0 +674 96 optimizer.lr 0.0032956069768777735 +674 96 negative_sampler.num_negs_per_pos 72.0 +674 96 training.batch_size 2.0 +674 97 model.embedding_dim 0.0 +674 97 optimizer.lr 0.001232311397161814 +674 97 negative_sampler.num_negs_per_pos 55.0 +674 97 training.batch_size 0.0 +674 98 model.embedding_dim 0.0 +674 98 optimizer.lr 0.0022791589610469906 +674 98 negative_sampler.num_negs_per_pos 47.0 +674 98 training.batch_size 2.0 +674 99 model.embedding_dim 2.0 +674 99 optimizer.lr 0.07238109343023794 +674 99 negative_sampler.num_negs_per_pos 64.0 +674 99 training.batch_size 0.0 +674 100 model.embedding_dim 1.0 +674 100 optimizer.lr 0.006837484018293011 +674 100 negative_sampler.num_negs_per_pos 99.0 +674 100 training.batch_size 0.0 +674 1 dataset """kinships""" +674 1 model """simple""" +674 1 loss """softplus""" +674 1 regularizer """no""" +674 1 optimizer """adam""" +674 1 training_loop """owa""" +674 1 negative_sampler """basic""" +674 1 evaluator """rankbased""" +674 2 dataset """kinships""" +674 2 model """simple""" +674 2 loss """softplus""" +674 2 regularizer """no""" +674 2 optimizer """adam""" +674 2 training_loop """owa""" +674 2 negative_sampler """basic""" +674 2 evaluator """rankbased""" +674 3 dataset """kinships""" +674 3 model """simple""" +674 3 loss """softplus""" +674 3 regularizer """no""" +674 3 optimizer """adam""" +674 3 training_loop """owa""" +674 3 negative_sampler """basic""" +674 3 evaluator """rankbased""" +674 4 dataset """kinships""" +674 4 model """simple""" +674 4 loss """softplus""" +674 4 regularizer """no""" +674 4 optimizer """adam""" +674 4 training_loop """owa""" +674 4 negative_sampler """basic""" +674 4 evaluator """rankbased""" +674 5 dataset """kinships""" +674 5 model """simple""" +674 5 loss """softplus""" +674 5 regularizer """no""" +674 5 optimizer """adam""" +674 5 training_loop """owa""" +674 5 negative_sampler """basic""" +674 5 evaluator """rankbased""" +674 6 dataset """kinships""" +674 6 model """simple""" +674 6 loss """softplus""" +674 6 regularizer """no""" +674 6 optimizer """adam""" +674 6 training_loop """owa""" +674 6 negative_sampler """basic""" +674 6 evaluator """rankbased""" +674 7 dataset """kinships""" +674 7 model """simple""" +674 7 loss """softplus""" +674 7 regularizer """no""" +674 7 optimizer """adam""" +674 7 training_loop """owa""" +674 7 negative_sampler """basic""" +674 7 evaluator """rankbased""" +674 8 dataset """kinships""" +674 8 model """simple""" +674 8 loss """softplus""" +674 8 regularizer """no""" +674 8 optimizer """adam""" +674 8 training_loop """owa""" +674 8 negative_sampler """basic""" +674 8 evaluator """rankbased""" +674 9 dataset """kinships""" +674 9 model """simple""" +674 9 loss """softplus""" +674 9 regularizer """no""" +674 9 optimizer """adam""" +674 9 training_loop """owa""" +674 9 negative_sampler """basic""" +674 9 evaluator """rankbased""" +674 10 dataset """kinships""" +674 10 model """simple""" +674 10 loss """softplus""" +674 10 regularizer """no""" +674 10 optimizer """adam""" +674 10 training_loop """owa""" +674 10 negative_sampler """basic""" +674 10 evaluator """rankbased""" +674 11 dataset """kinships""" +674 11 model """simple""" +674 11 loss """softplus""" +674 11 regularizer """no""" +674 11 optimizer """adam""" +674 11 training_loop """owa""" +674 11 negative_sampler """basic""" +674 11 evaluator """rankbased""" +674 12 dataset """kinships""" +674 12 model """simple""" +674 12 loss """softplus""" +674 12 regularizer """no""" +674 12 optimizer """adam""" +674 12 training_loop """owa""" +674 12 negative_sampler """basic""" +674 12 evaluator """rankbased""" +674 13 dataset """kinships""" +674 13 model """simple""" +674 13 loss """softplus""" +674 13 regularizer """no""" +674 13 optimizer """adam""" +674 13 training_loop """owa""" +674 13 negative_sampler """basic""" +674 13 evaluator """rankbased""" +674 14 dataset """kinships""" +674 14 model """simple""" +674 14 loss """softplus""" +674 14 regularizer """no""" +674 14 optimizer """adam""" +674 14 training_loop """owa""" +674 14 negative_sampler """basic""" +674 14 evaluator """rankbased""" +674 15 dataset """kinships""" +674 15 model """simple""" +674 15 loss """softplus""" +674 15 regularizer """no""" +674 15 optimizer """adam""" +674 15 training_loop """owa""" +674 15 negative_sampler """basic""" +674 15 evaluator """rankbased""" +674 16 dataset """kinships""" +674 16 model """simple""" +674 16 loss """softplus""" +674 16 regularizer """no""" +674 16 optimizer """adam""" +674 16 training_loop """owa""" +674 16 negative_sampler """basic""" +674 16 evaluator """rankbased""" +674 17 dataset """kinships""" +674 17 model """simple""" +674 17 loss """softplus""" +674 17 regularizer """no""" +674 17 optimizer """adam""" +674 17 training_loop """owa""" +674 17 negative_sampler """basic""" +674 17 evaluator """rankbased""" +674 18 dataset """kinships""" +674 18 model """simple""" +674 18 loss """softplus""" +674 18 regularizer """no""" +674 18 optimizer """adam""" +674 18 training_loop """owa""" +674 18 negative_sampler """basic""" +674 18 evaluator """rankbased""" +674 19 dataset """kinships""" +674 19 model """simple""" +674 19 loss """softplus""" +674 19 regularizer """no""" +674 19 optimizer """adam""" +674 19 training_loop """owa""" +674 19 negative_sampler """basic""" +674 19 evaluator """rankbased""" +674 20 dataset """kinships""" +674 20 model """simple""" +674 20 loss """softplus""" +674 20 regularizer """no""" +674 20 optimizer """adam""" +674 20 training_loop """owa""" +674 20 negative_sampler """basic""" +674 20 evaluator """rankbased""" +674 21 dataset """kinships""" +674 21 model """simple""" +674 21 loss """softplus""" +674 21 regularizer """no""" +674 21 optimizer """adam""" +674 21 training_loop """owa""" +674 21 negative_sampler """basic""" +674 21 evaluator """rankbased""" +674 22 dataset """kinships""" +674 22 model """simple""" +674 22 loss """softplus""" +674 22 regularizer """no""" +674 22 optimizer """adam""" +674 22 training_loop """owa""" +674 22 negative_sampler """basic""" +674 22 evaluator """rankbased""" +674 23 dataset """kinships""" +674 23 model """simple""" +674 23 loss """softplus""" +674 23 regularizer """no""" +674 23 optimizer """adam""" +674 23 training_loop """owa""" +674 23 negative_sampler """basic""" +674 23 evaluator """rankbased""" +674 24 dataset """kinships""" +674 24 model """simple""" +674 24 loss """softplus""" +674 24 regularizer """no""" +674 24 optimizer """adam""" +674 24 training_loop """owa""" +674 24 negative_sampler """basic""" +674 24 evaluator """rankbased""" +674 25 dataset """kinships""" +674 25 model """simple""" +674 25 loss """softplus""" +674 25 regularizer """no""" +674 25 optimizer """adam""" +674 25 training_loop """owa""" +674 25 negative_sampler """basic""" +674 25 evaluator """rankbased""" +674 26 dataset """kinships""" +674 26 model """simple""" +674 26 loss """softplus""" +674 26 regularizer """no""" +674 26 optimizer """adam""" +674 26 training_loop """owa""" +674 26 negative_sampler """basic""" +674 26 evaluator """rankbased""" +674 27 dataset """kinships""" +674 27 model """simple""" +674 27 loss """softplus""" +674 27 regularizer """no""" +674 27 optimizer """adam""" +674 27 training_loop """owa""" +674 27 negative_sampler """basic""" +674 27 evaluator """rankbased""" +674 28 dataset """kinships""" +674 28 model """simple""" +674 28 loss """softplus""" +674 28 regularizer """no""" +674 28 optimizer """adam""" +674 28 training_loop """owa""" +674 28 negative_sampler """basic""" +674 28 evaluator """rankbased""" +674 29 dataset """kinships""" +674 29 model """simple""" +674 29 loss """softplus""" +674 29 regularizer """no""" +674 29 optimizer """adam""" +674 29 training_loop """owa""" +674 29 negative_sampler """basic""" +674 29 evaluator """rankbased""" +674 30 dataset """kinships""" +674 30 model """simple""" +674 30 loss """softplus""" +674 30 regularizer """no""" +674 30 optimizer """adam""" +674 30 training_loop """owa""" +674 30 negative_sampler """basic""" +674 30 evaluator """rankbased""" +674 31 dataset """kinships""" +674 31 model """simple""" +674 31 loss """softplus""" +674 31 regularizer """no""" +674 31 optimizer """adam""" +674 31 training_loop """owa""" +674 31 negative_sampler """basic""" +674 31 evaluator """rankbased""" +674 32 dataset """kinships""" +674 32 model """simple""" +674 32 loss """softplus""" +674 32 regularizer """no""" +674 32 optimizer """adam""" +674 32 training_loop """owa""" +674 32 negative_sampler """basic""" +674 32 evaluator """rankbased""" +674 33 dataset """kinships""" +674 33 model """simple""" +674 33 loss """softplus""" +674 33 regularizer """no""" +674 33 optimizer """adam""" +674 33 training_loop """owa""" +674 33 negative_sampler """basic""" +674 33 evaluator """rankbased""" +674 34 dataset """kinships""" +674 34 model """simple""" +674 34 loss """softplus""" +674 34 regularizer """no""" +674 34 optimizer """adam""" +674 34 training_loop """owa""" +674 34 negative_sampler """basic""" +674 34 evaluator """rankbased""" +674 35 dataset """kinships""" +674 35 model """simple""" +674 35 loss """softplus""" +674 35 regularizer """no""" +674 35 optimizer """adam""" +674 35 training_loop """owa""" +674 35 negative_sampler """basic""" +674 35 evaluator """rankbased""" +674 36 dataset """kinships""" +674 36 model """simple""" +674 36 loss """softplus""" +674 36 regularizer """no""" +674 36 optimizer """adam""" +674 36 training_loop """owa""" +674 36 negative_sampler """basic""" +674 36 evaluator """rankbased""" +674 37 dataset """kinships""" +674 37 model """simple""" +674 37 loss """softplus""" +674 37 regularizer """no""" +674 37 optimizer """adam""" +674 37 training_loop """owa""" +674 37 negative_sampler """basic""" +674 37 evaluator """rankbased""" +674 38 dataset """kinships""" +674 38 model """simple""" +674 38 loss """softplus""" +674 38 regularizer """no""" +674 38 optimizer """adam""" +674 38 training_loop """owa""" +674 38 negative_sampler """basic""" +674 38 evaluator """rankbased""" +674 39 dataset """kinships""" +674 39 model """simple""" +674 39 loss """softplus""" +674 39 regularizer """no""" +674 39 optimizer """adam""" +674 39 training_loop """owa""" +674 39 negative_sampler """basic""" +674 39 evaluator """rankbased""" +674 40 dataset """kinships""" +674 40 model """simple""" +674 40 loss """softplus""" +674 40 regularizer """no""" +674 40 optimizer """adam""" +674 40 training_loop """owa""" +674 40 negative_sampler """basic""" +674 40 evaluator """rankbased""" +674 41 dataset """kinships""" +674 41 model """simple""" +674 41 loss """softplus""" +674 41 regularizer """no""" +674 41 optimizer """adam""" +674 41 training_loop """owa""" +674 41 negative_sampler """basic""" +674 41 evaluator """rankbased""" +674 42 dataset """kinships""" +674 42 model """simple""" +674 42 loss """softplus""" +674 42 regularizer """no""" +674 42 optimizer """adam""" +674 42 training_loop """owa""" +674 42 negative_sampler """basic""" +674 42 evaluator """rankbased""" +674 43 dataset """kinships""" +674 43 model """simple""" +674 43 loss """softplus""" +674 43 regularizer """no""" +674 43 optimizer """adam""" +674 43 training_loop """owa""" +674 43 negative_sampler """basic""" +674 43 evaluator """rankbased""" +674 44 dataset """kinships""" +674 44 model """simple""" +674 44 loss """softplus""" +674 44 regularizer """no""" +674 44 optimizer """adam""" +674 44 training_loop """owa""" +674 44 negative_sampler """basic""" +674 44 evaluator """rankbased""" +674 45 dataset """kinships""" +674 45 model """simple""" +674 45 loss """softplus""" +674 45 regularizer """no""" +674 45 optimizer """adam""" +674 45 training_loop """owa""" +674 45 negative_sampler """basic""" +674 45 evaluator """rankbased""" +674 46 dataset """kinships""" +674 46 model """simple""" +674 46 loss """softplus""" +674 46 regularizer """no""" +674 46 optimizer """adam""" +674 46 training_loop """owa""" +674 46 negative_sampler """basic""" +674 46 evaluator """rankbased""" +674 47 dataset """kinships""" +674 47 model """simple""" +674 47 loss """softplus""" +674 47 regularizer """no""" +674 47 optimizer """adam""" +674 47 training_loop """owa""" +674 47 negative_sampler """basic""" +674 47 evaluator """rankbased""" +674 48 dataset """kinships""" +674 48 model """simple""" +674 48 loss """softplus""" +674 48 regularizer """no""" +674 48 optimizer """adam""" +674 48 training_loop """owa""" +674 48 negative_sampler """basic""" +674 48 evaluator """rankbased""" +674 49 dataset """kinships""" +674 49 model """simple""" +674 49 loss """softplus""" +674 49 regularizer """no""" +674 49 optimizer """adam""" +674 49 training_loop """owa""" +674 49 negative_sampler """basic""" +674 49 evaluator """rankbased""" +674 50 dataset """kinships""" +674 50 model """simple""" +674 50 loss """softplus""" +674 50 regularizer """no""" +674 50 optimizer """adam""" +674 50 training_loop """owa""" +674 50 negative_sampler """basic""" +674 50 evaluator """rankbased""" +674 51 dataset """kinships""" +674 51 model """simple""" +674 51 loss """softplus""" +674 51 regularizer """no""" +674 51 optimizer """adam""" +674 51 training_loop """owa""" +674 51 negative_sampler """basic""" +674 51 evaluator """rankbased""" +674 52 dataset """kinships""" +674 52 model """simple""" +674 52 loss """softplus""" +674 52 regularizer """no""" +674 52 optimizer """adam""" +674 52 training_loop """owa""" +674 52 negative_sampler """basic""" +674 52 evaluator """rankbased""" +674 53 dataset """kinships""" +674 53 model """simple""" +674 53 loss """softplus""" +674 53 regularizer """no""" +674 53 optimizer """adam""" +674 53 training_loop """owa""" +674 53 negative_sampler """basic""" +674 53 evaluator """rankbased""" +674 54 dataset """kinships""" +674 54 model """simple""" +674 54 loss """softplus""" +674 54 regularizer """no""" +674 54 optimizer """adam""" +674 54 training_loop """owa""" +674 54 negative_sampler """basic""" +674 54 evaluator """rankbased""" +674 55 dataset """kinships""" +674 55 model """simple""" +674 55 loss """softplus""" +674 55 regularizer """no""" +674 55 optimizer """adam""" +674 55 training_loop """owa""" +674 55 negative_sampler """basic""" +674 55 evaluator """rankbased""" +674 56 dataset """kinships""" +674 56 model """simple""" +674 56 loss """softplus""" +674 56 regularizer """no""" +674 56 optimizer """adam""" +674 56 training_loop """owa""" +674 56 negative_sampler """basic""" +674 56 evaluator """rankbased""" +674 57 dataset """kinships""" +674 57 model """simple""" +674 57 loss """softplus""" +674 57 regularizer """no""" +674 57 optimizer """adam""" +674 57 training_loop """owa""" +674 57 negative_sampler """basic""" +674 57 evaluator """rankbased""" +674 58 dataset """kinships""" +674 58 model """simple""" +674 58 loss """softplus""" +674 58 regularizer """no""" +674 58 optimizer """adam""" +674 58 training_loop """owa""" +674 58 negative_sampler """basic""" +674 58 evaluator """rankbased""" +674 59 dataset """kinships""" +674 59 model """simple""" +674 59 loss """softplus""" +674 59 regularizer """no""" +674 59 optimizer """adam""" +674 59 training_loop """owa""" +674 59 negative_sampler """basic""" +674 59 evaluator """rankbased""" +674 60 dataset """kinships""" +674 60 model """simple""" +674 60 loss """softplus""" +674 60 regularizer """no""" +674 60 optimizer """adam""" +674 60 training_loop """owa""" +674 60 negative_sampler """basic""" +674 60 evaluator """rankbased""" +674 61 dataset """kinships""" +674 61 model """simple""" +674 61 loss """softplus""" +674 61 regularizer """no""" +674 61 optimizer """adam""" +674 61 training_loop """owa""" +674 61 negative_sampler """basic""" +674 61 evaluator """rankbased""" +674 62 dataset """kinships""" +674 62 model """simple""" +674 62 loss """softplus""" +674 62 regularizer """no""" +674 62 optimizer """adam""" +674 62 training_loop """owa""" +674 62 negative_sampler """basic""" +674 62 evaluator """rankbased""" +674 63 dataset """kinships""" +674 63 model """simple""" +674 63 loss """softplus""" +674 63 regularizer """no""" +674 63 optimizer """adam""" +674 63 training_loop """owa""" +674 63 negative_sampler """basic""" +674 63 evaluator """rankbased""" +674 64 dataset """kinships""" +674 64 model """simple""" +674 64 loss """softplus""" +674 64 regularizer """no""" +674 64 optimizer """adam""" +674 64 training_loop """owa""" +674 64 negative_sampler """basic""" +674 64 evaluator """rankbased""" +674 65 dataset """kinships""" +674 65 model """simple""" +674 65 loss """softplus""" +674 65 regularizer """no""" +674 65 optimizer """adam""" +674 65 training_loop """owa""" +674 65 negative_sampler """basic""" +674 65 evaluator """rankbased""" +674 66 dataset """kinships""" +674 66 model """simple""" +674 66 loss """softplus""" +674 66 regularizer """no""" +674 66 optimizer """adam""" +674 66 training_loop """owa""" +674 66 negative_sampler """basic""" +674 66 evaluator """rankbased""" +674 67 dataset """kinships""" +674 67 model """simple""" +674 67 loss """softplus""" +674 67 regularizer """no""" +674 67 optimizer """adam""" +674 67 training_loop """owa""" +674 67 negative_sampler """basic""" +674 67 evaluator """rankbased""" +674 68 dataset """kinships""" +674 68 model """simple""" +674 68 loss """softplus""" +674 68 regularizer """no""" +674 68 optimizer """adam""" +674 68 training_loop """owa""" +674 68 negative_sampler """basic""" +674 68 evaluator """rankbased""" +674 69 dataset """kinships""" +674 69 model """simple""" +674 69 loss """softplus""" +674 69 regularizer """no""" +674 69 optimizer """adam""" +674 69 training_loop """owa""" +674 69 negative_sampler """basic""" +674 69 evaluator """rankbased""" +674 70 dataset """kinships""" +674 70 model """simple""" +674 70 loss """softplus""" +674 70 regularizer """no""" +674 70 optimizer """adam""" +674 70 training_loop """owa""" +674 70 negative_sampler """basic""" +674 70 evaluator """rankbased""" +674 71 dataset """kinships""" +674 71 model """simple""" +674 71 loss """softplus""" +674 71 regularizer """no""" +674 71 optimizer """adam""" +674 71 training_loop """owa""" +674 71 negative_sampler """basic""" +674 71 evaluator """rankbased""" +674 72 dataset """kinships""" +674 72 model """simple""" +674 72 loss """softplus""" +674 72 regularizer """no""" +674 72 optimizer """adam""" +674 72 training_loop """owa""" +674 72 negative_sampler """basic""" +674 72 evaluator """rankbased""" +674 73 dataset """kinships""" +674 73 model """simple""" +674 73 loss """softplus""" +674 73 regularizer """no""" +674 73 optimizer """adam""" +674 73 training_loop """owa""" +674 73 negative_sampler """basic""" +674 73 evaluator """rankbased""" +674 74 dataset """kinships""" +674 74 model """simple""" +674 74 loss """softplus""" +674 74 regularizer """no""" +674 74 optimizer """adam""" +674 74 training_loop """owa""" +674 74 negative_sampler """basic""" +674 74 evaluator """rankbased""" +674 75 dataset """kinships""" +674 75 model """simple""" +674 75 loss """softplus""" +674 75 regularizer """no""" +674 75 optimizer """adam""" +674 75 training_loop """owa""" +674 75 negative_sampler """basic""" +674 75 evaluator """rankbased""" +674 76 dataset """kinships""" +674 76 model """simple""" +674 76 loss """softplus""" +674 76 regularizer """no""" +674 76 optimizer """adam""" +674 76 training_loop """owa""" +674 76 negative_sampler """basic""" +674 76 evaluator """rankbased""" +674 77 dataset """kinships""" +674 77 model """simple""" +674 77 loss """softplus""" +674 77 regularizer """no""" +674 77 optimizer """adam""" +674 77 training_loop """owa""" +674 77 negative_sampler """basic""" +674 77 evaluator """rankbased""" +674 78 dataset """kinships""" +674 78 model """simple""" +674 78 loss """softplus""" +674 78 regularizer """no""" +674 78 optimizer """adam""" +674 78 training_loop """owa""" +674 78 negative_sampler """basic""" +674 78 evaluator """rankbased""" +674 79 dataset """kinships""" +674 79 model """simple""" +674 79 loss """softplus""" +674 79 regularizer """no""" +674 79 optimizer """adam""" +674 79 training_loop """owa""" +674 79 negative_sampler """basic""" +674 79 evaluator """rankbased""" +674 80 dataset """kinships""" +674 80 model """simple""" +674 80 loss """softplus""" +674 80 regularizer """no""" +674 80 optimizer """adam""" +674 80 training_loop """owa""" +674 80 negative_sampler """basic""" +674 80 evaluator """rankbased""" +674 81 dataset """kinships""" +674 81 model """simple""" +674 81 loss """softplus""" +674 81 regularizer """no""" +674 81 optimizer """adam""" +674 81 training_loop """owa""" +674 81 negative_sampler """basic""" +674 81 evaluator """rankbased""" +674 82 dataset """kinships""" +674 82 model """simple""" +674 82 loss """softplus""" +674 82 regularizer """no""" +674 82 optimizer """adam""" +674 82 training_loop """owa""" +674 82 negative_sampler """basic""" +674 82 evaluator """rankbased""" +674 83 dataset """kinships""" +674 83 model """simple""" +674 83 loss """softplus""" +674 83 regularizer """no""" +674 83 optimizer """adam""" +674 83 training_loop """owa""" +674 83 negative_sampler """basic""" +674 83 evaluator """rankbased""" +674 84 dataset """kinships""" +674 84 model """simple""" +674 84 loss """softplus""" +674 84 regularizer """no""" +674 84 optimizer """adam""" +674 84 training_loop """owa""" +674 84 negative_sampler """basic""" +674 84 evaluator """rankbased""" +674 85 dataset """kinships""" +674 85 model """simple""" +674 85 loss """softplus""" +674 85 regularizer """no""" +674 85 optimizer """adam""" +674 85 training_loop """owa""" +674 85 negative_sampler """basic""" +674 85 evaluator """rankbased""" +674 86 dataset """kinships""" +674 86 model """simple""" +674 86 loss """softplus""" +674 86 regularizer """no""" +674 86 optimizer """adam""" +674 86 training_loop """owa""" +674 86 negative_sampler """basic""" +674 86 evaluator """rankbased""" +674 87 dataset """kinships""" +674 87 model """simple""" +674 87 loss """softplus""" +674 87 regularizer """no""" +674 87 optimizer """adam""" +674 87 training_loop """owa""" +674 87 negative_sampler """basic""" +674 87 evaluator """rankbased""" +674 88 dataset """kinships""" +674 88 model """simple""" +674 88 loss """softplus""" +674 88 regularizer """no""" +674 88 optimizer """adam""" +674 88 training_loop """owa""" +674 88 negative_sampler """basic""" +674 88 evaluator """rankbased""" +674 89 dataset """kinships""" +674 89 model """simple""" +674 89 loss """softplus""" +674 89 regularizer """no""" +674 89 optimizer """adam""" +674 89 training_loop """owa""" +674 89 negative_sampler """basic""" +674 89 evaluator """rankbased""" +674 90 dataset """kinships""" +674 90 model """simple""" +674 90 loss """softplus""" +674 90 regularizer """no""" +674 90 optimizer """adam""" +674 90 training_loop """owa""" +674 90 negative_sampler """basic""" +674 90 evaluator """rankbased""" +674 91 dataset """kinships""" +674 91 model """simple""" +674 91 loss """softplus""" +674 91 regularizer """no""" +674 91 optimizer """adam""" +674 91 training_loop """owa""" +674 91 negative_sampler """basic""" +674 91 evaluator """rankbased""" +674 92 dataset """kinships""" +674 92 model """simple""" +674 92 loss """softplus""" +674 92 regularizer """no""" +674 92 optimizer """adam""" +674 92 training_loop """owa""" +674 92 negative_sampler """basic""" +674 92 evaluator """rankbased""" +674 93 dataset """kinships""" +674 93 model """simple""" +674 93 loss """softplus""" +674 93 regularizer """no""" +674 93 optimizer """adam""" +674 93 training_loop """owa""" +674 93 negative_sampler """basic""" +674 93 evaluator """rankbased""" +674 94 dataset """kinships""" +674 94 model """simple""" +674 94 loss """softplus""" +674 94 regularizer """no""" +674 94 optimizer """adam""" +674 94 training_loop """owa""" +674 94 negative_sampler """basic""" +674 94 evaluator """rankbased""" +674 95 dataset """kinships""" +674 95 model """simple""" +674 95 loss """softplus""" +674 95 regularizer """no""" +674 95 optimizer """adam""" +674 95 training_loop """owa""" +674 95 negative_sampler """basic""" +674 95 evaluator """rankbased""" +674 96 dataset """kinships""" +674 96 model """simple""" +674 96 loss """softplus""" +674 96 regularizer """no""" +674 96 optimizer """adam""" +674 96 training_loop """owa""" +674 96 negative_sampler """basic""" +674 96 evaluator """rankbased""" +674 97 dataset """kinships""" +674 97 model """simple""" +674 97 loss """softplus""" +674 97 regularizer """no""" +674 97 optimizer """adam""" +674 97 training_loop """owa""" +674 97 negative_sampler """basic""" +674 97 evaluator """rankbased""" +674 98 dataset """kinships""" +674 98 model """simple""" +674 98 loss """softplus""" +674 98 regularizer """no""" +674 98 optimizer """adam""" +674 98 training_loop """owa""" +674 98 negative_sampler """basic""" +674 98 evaluator """rankbased""" +674 99 dataset """kinships""" +674 99 model """simple""" +674 99 loss """softplus""" +674 99 regularizer """no""" +674 99 optimizer """adam""" +674 99 training_loop """owa""" +674 99 negative_sampler """basic""" +674 99 evaluator """rankbased""" +674 100 dataset """kinships""" +674 100 model """simple""" +674 100 loss """softplus""" +674 100 regularizer """no""" +674 100 optimizer """adam""" +674 100 training_loop """owa""" +674 100 negative_sampler """basic""" +674 100 evaluator """rankbased""" +675 1 model.embedding_dim 1.0 +675 1 loss.margin 5.94822308436786 +675 1 optimizer.lr 0.03397624623610738 +675 1 negative_sampler.num_negs_per_pos 25.0 +675 1 training.batch_size 0.0 +675 2 model.embedding_dim 2.0 +675 2 loss.margin 7.627115322034732 +675 2 optimizer.lr 0.001607234658285485 +675 2 negative_sampler.num_negs_per_pos 30.0 +675 2 training.batch_size 0.0 +675 3 model.embedding_dim 2.0 +675 3 loss.margin 9.134966011910327 +675 3 optimizer.lr 0.04114547468715182 +675 3 negative_sampler.num_negs_per_pos 48.0 +675 3 training.batch_size 1.0 +675 4 model.embedding_dim 1.0 +675 4 loss.margin 0.8397086893760498 +675 4 optimizer.lr 0.012896021783498364 +675 4 negative_sampler.num_negs_per_pos 3.0 +675 4 training.batch_size 0.0 +675 5 model.embedding_dim 1.0 +675 5 loss.margin 2.0662342745458897 +675 5 optimizer.lr 0.055543176806886106 +675 5 negative_sampler.num_negs_per_pos 76.0 +675 5 training.batch_size 0.0 +675 6 model.embedding_dim 2.0 +675 6 loss.margin 1.5243437146447736 +675 6 optimizer.lr 0.009924096721550113 +675 6 negative_sampler.num_negs_per_pos 22.0 +675 6 training.batch_size 2.0 +675 7 model.embedding_dim 2.0 +675 7 loss.margin 9.480600956784656 +675 7 optimizer.lr 0.0021071222755464913 +675 7 negative_sampler.num_negs_per_pos 13.0 +675 7 training.batch_size 0.0 +675 8 model.embedding_dim 1.0 +675 8 loss.margin 7.803090416563405 +675 8 optimizer.lr 0.004416500828485005 +675 8 negative_sampler.num_negs_per_pos 39.0 +675 8 training.batch_size 2.0 +675 9 model.embedding_dim 1.0 +675 9 loss.margin 6.533017331711173 +675 9 optimizer.lr 0.025147735668620746 +675 9 negative_sampler.num_negs_per_pos 14.0 +675 9 training.batch_size 2.0 +675 10 model.embedding_dim 0.0 +675 10 loss.margin 1.5968262078478934 +675 10 optimizer.lr 0.003587626557118278 +675 10 negative_sampler.num_negs_per_pos 69.0 +675 10 training.batch_size 1.0 +675 11 model.embedding_dim 0.0 +675 11 loss.margin 6.667410452178401 +675 11 optimizer.lr 0.016773758621204053 +675 11 negative_sampler.num_negs_per_pos 11.0 +675 11 training.batch_size 1.0 +675 12 model.embedding_dim 1.0 +675 12 loss.margin 2.2095027319800193 +675 12 optimizer.lr 0.0013826828999260606 +675 12 negative_sampler.num_negs_per_pos 52.0 +675 12 training.batch_size 2.0 +675 13 model.embedding_dim 0.0 +675 13 loss.margin 5.45501871377475 +675 13 optimizer.lr 0.0015070633198763198 +675 13 negative_sampler.num_negs_per_pos 73.0 +675 13 training.batch_size 0.0 +675 14 model.embedding_dim 0.0 +675 14 loss.margin 6.920202891690092 +675 14 optimizer.lr 0.008056417075392466 +675 14 negative_sampler.num_negs_per_pos 36.0 +675 14 training.batch_size 2.0 +675 15 model.embedding_dim 1.0 +675 15 loss.margin 5.399808841460965 +675 15 optimizer.lr 0.002901241910021338 +675 15 negative_sampler.num_negs_per_pos 80.0 +675 15 training.batch_size 1.0 +675 16 model.embedding_dim 1.0 +675 16 loss.margin 5.432443250033428 +675 16 optimizer.lr 0.08590689780310935 +675 16 negative_sampler.num_negs_per_pos 41.0 +675 16 training.batch_size 0.0 +675 17 model.embedding_dim 0.0 +675 17 loss.margin 8.419289232177801 +675 17 optimizer.lr 0.04559606238383377 +675 17 negative_sampler.num_negs_per_pos 4.0 +675 17 training.batch_size 1.0 +675 18 model.embedding_dim 2.0 +675 18 loss.margin 6.595308455316566 +675 18 optimizer.lr 0.0974202696957644 +675 18 negative_sampler.num_negs_per_pos 77.0 +675 18 training.batch_size 1.0 +675 19 model.embedding_dim 1.0 +675 19 loss.margin 9.552625255421558 +675 19 optimizer.lr 0.027509058758749167 +675 19 negative_sampler.num_negs_per_pos 24.0 +675 19 training.batch_size 0.0 +675 20 model.embedding_dim 2.0 +675 20 loss.margin 9.490385138619565 +675 20 optimizer.lr 0.0012466439061657212 +675 20 negative_sampler.num_negs_per_pos 34.0 +675 20 training.batch_size 2.0 +675 21 model.embedding_dim 1.0 +675 21 loss.margin 0.8469828798339281 +675 21 optimizer.lr 0.0015828341723301527 +675 21 negative_sampler.num_negs_per_pos 63.0 +675 21 training.batch_size 1.0 +675 22 model.embedding_dim 2.0 +675 22 loss.margin 1.7980769805943477 +675 22 optimizer.lr 0.005777318159673548 +675 22 negative_sampler.num_negs_per_pos 91.0 +675 22 training.batch_size 0.0 +675 23 model.embedding_dim 2.0 +675 23 loss.margin 3.0889500889165715 +675 23 optimizer.lr 0.005515959100860206 +675 23 negative_sampler.num_negs_per_pos 3.0 +675 23 training.batch_size 1.0 +675 24 model.embedding_dim 0.0 +675 24 loss.margin 9.022607879211147 +675 24 optimizer.lr 0.001127115497574888 +675 24 negative_sampler.num_negs_per_pos 7.0 +675 24 training.batch_size 1.0 +675 25 model.embedding_dim 2.0 +675 25 loss.margin 3.5838491526631535 +675 25 optimizer.lr 0.004102500484223929 +675 25 negative_sampler.num_negs_per_pos 28.0 +675 25 training.batch_size 2.0 +675 26 model.embedding_dim 1.0 +675 26 loss.margin 1.016922826518907 +675 26 optimizer.lr 0.01934569593499845 +675 26 negative_sampler.num_negs_per_pos 72.0 +675 26 training.batch_size 2.0 +675 27 model.embedding_dim 0.0 +675 27 loss.margin 1.2068094723176086 +675 27 optimizer.lr 0.039294057483115576 +675 27 negative_sampler.num_negs_per_pos 98.0 +675 27 training.batch_size 2.0 +675 28 model.embedding_dim 1.0 +675 28 loss.margin 1.416219002806323 +675 28 optimizer.lr 0.0061451275346425865 +675 28 negative_sampler.num_negs_per_pos 59.0 +675 28 training.batch_size 2.0 +675 29 model.embedding_dim 1.0 +675 29 loss.margin 4.6006310860502975 +675 29 optimizer.lr 0.0028175196424884965 +675 29 negative_sampler.num_negs_per_pos 37.0 +675 29 training.batch_size 0.0 +675 30 model.embedding_dim 2.0 +675 30 loss.margin 1.4069764659684156 +675 30 optimizer.lr 0.0059304501761851675 +675 30 negative_sampler.num_negs_per_pos 63.0 +675 30 training.batch_size 0.0 +675 31 model.embedding_dim 2.0 +675 31 loss.margin 3.576735449899595 +675 31 optimizer.lr 0.001789999676822359 +675 31 negative_sampler.num_negs_per_pos 87.0 +675 31 training.batch_size 2.0 +675 32 model.embedding_dim 2.0 +675 32 loss.margin 5.249025351438695 +675 32 optimizer.lr 0.0638868316745166 +675 32 negative_sampler.num_negs_per_pos 97.0 +675 32 training.batch_size 2.0 +675 33 model.embedding_dim 2.0 +675 33 loss.margin 8.864614759866987 +675 33 optimizer.lr 0.004318971461291971 +675 33 negative_sampler.num_negs_per_pos 21.0 +675 33 training.batch_size 2.0 +675 1 dataset """wn18rr""" +675 1 model """simple""" +675 1 loss """marginranking""" +675 1 regularizer """no""" +675 1 optimizer """adam""" +675 1 training_loop """owa""" +675 1 negative_sampler """basic""" +675 1 evaluator """rankbased""" +675 2 dataset """wn18rr""" +675 2 model """simple""" +675 2 loss """marginranking""" +675 2 regularizer """no""" +675 2 optimizer """adam""" +675 2 training_loop """owa""" +675 2 negative_sampler """basic""" +675 2 evaluator """rankbased""" +675 3 dataset """wn18rr""" +675 3 model """simple""" +675 3 loss """marginranking""" +675 3 regularizer """no""" +675 3 optimizer """adam""" +675 3 training_loop """owa""" +675 3 negative_sampler """basic""" +675 3 evaluator """rankbased""" +675 4 dataset """wn18rr""" +675 4 model """simple""" +675 4 loss """marginranking""" +675 4 regularizer """no""" +675 4 optimizer """adam""" +675 4 training_loop """owa""" +675 4 negative_sampler """basic""" +675 4 evaluator """rankbased""" +675 5 dataset """wn18rr""" +675 5 model """simple""" +675 5 loss """marginranking""" +675 5 regularizer """no""" +675 5 optimizer """adam""" +675 5 training_loop """owa""" +675 5 negative_sampler """basic""" +675 5 evaluator """rankbased""" +675 6 dataset """wn18rr""" +675 6 model """simple""" +675 6 loss """marginranking""" +675 6 regularizer """no""" +675 6 optimizer """adam""" +675 6 training_loop """owa""" +675 6 negative_sampler """basic""" +675 6 evaluator """rankbased""" +675 7 dataset """wn18rr""" +675 7 model """simple""" +675 7 loss """marginranking""" +675 7 regularizer """no""" +675 7 optimizer """adam""" +675 7 training_loop """owa""" +675 7 negative_sampler """basic""" +675 7 evaluator """rankbased""" +675 8 dataset """wn18rr""" +675 8 model """simple""" +675 8 loss """marginranking""" +675 8 regularizer """no""" +675 8 optimizer """adam""" +675 8 training_loop """owa""" +675 8 negative_sampler """basic""" +675 8 evaluator """rankbased""" +675 9 dataset """wn18rr""" +675 9 model """simple""" +675 9 loss """marginranking""" +675 9 regularizer """no""" +675 9 optimizer """adam""" +675 9 training_loop """owa""" +675 9 negative_sampler """basic""" +675 9 evaluator """rankbased""" +675 10 dataset """wn18rr""" +675 10 model """simple""" +675 10 loss """marginranking""" +675 10 regularizer """no""" +675 10 optimizer """adam""" +675 10 training_loop """owa""" +675 10 negative_sampler """basic""" +675 10 evaluator """rankbased""" +675 11 dataset """wn18rr""" +675 11 model """simple""" +675 11 loss """marginranking""" +675 11 regularizer """no""" +675 11 optimizer """adam""" +675 11 training_loop """owa""" +675 11 negative_sampler """basic""" +675 11 evaluator """rankbased""" +675 12 dataset """wn18rr""" +675 12 model """simple""" +675 12 loss """marginranking""" +675 12 regularizer """no""" +675 12 optimizer """adam""" +675 12 training_loop """owa""" +675 12 negative_sampler """basic""" +675 12 evaluator """rankbased""" +675 13 dataset """wn18rr""" +675 13 model """simple""" +675 13 loss """marginranking""" +675 13 regularizer """no""" +675 13 optimizer """adam""" +675 13 training_loop """owa""" +675 13 negative_sampler """basic""" +675 13 evaluator """rankbased""" +675 14 dataset """wn18rr""" +675 14 model """simple""" +675 14 loss """marginranking""" +675 14 regularizer """no""" +675 14 optimizer """adam""" +675 14 training_loop """owa""" +675 14 negative_sampler """basic""" +675 14 evaluator """rankbased""" +675 15 dataset """wn18rr""" +675 15 model """simple""" +675 15 loss """marginranking""" +675 15 regularizer """no""" +675 15 optimizer """adam""" +675 15 training_loop """owa""" +675 15 negative_sampler """basic""" +675 15 evaluator """rankbased""" +675 16 dataset """wn18rr""" +675 16 model """simple""" +675 16 loss """marginranking""" +675 16 regularizer """no""" +675 16 optimizer """adam""" +675 16 training_loop """owa""" +675 16 negative_sampler """basic""" +675 16 evaluator """rankbased""" +675 17 dataset """wn18rr""" +675 17 model """simple""" +675 17 loss """marginranking""" +675 17 regularizer """no""" +675 17 optimizer """adam""" +675 17 training_loop """owa""" +675 17 negative_sampler """basic""" +675 17 evaluator """rankbased""" +675 18 dataset """wn18rr""" +675 18 model """simple""" +675 18 loss """marginranking""" +675 18 regularizer """no""" +675 18 optimizer """adam""" +675 18 training_loop """owa""" +675 18 negative_sampler """basic""" +675 18 evaluator """rankbased""" +675 19 dataset """wn18rr""" +675 19 model """simple""" +675 19 loss """marginranking""" +675 19 regularizer """no""" +675 19 optimizer """adam""" +675 19 training_loop """owa""" +675 19 negative_sampler """basic""" +675 19 evaluator """rankbased""" +675 20 dataset """wn18rr""" +675 20 model """simple""" +675 20 loss """marginranking""" +675 20 regularizer """no""" +675 20 optimizer """adam""" +675 20 training_loop """owa""" +675 20 negative_sampler """basic""" +675 20 evaluator """rankbased""" +675 21 dataset """wn18rr""" +675 21 model """simple""" +675 21 loss """marginranking""" +675 21 regularizer """no""" +675 21 optimizer """adam""" +675 21 training_loop """owa""" +675 21 negative_sampler """basic""" +675 21 evaluator """rankbased""" +675 22 dataset """wn18rr""" +675 22 model """simple""" +675 22 loss """marginranking""" +675 22 regularizer """no""" +675 22 optimizer """adam""" +675 22 training_loop """owa""" +675 22 negative_sampler """basic""" +675 22 evaluator """rankbased""" +675 23 dataset """wn18rr""" +675 23 model """simple""" +675 23 loss """marginranking""" +675 23 regularizer """no""" +675 23 optimizer """adam""" +675 23 training_loop """owa""" +675 23 negative_sampler """basic""" +675 23 evaluator """rankbased""" +675 24 dataset """wn18rr""" +675 24 model """simple""" +675 24 loss """marginranking""" +675 24 regularizer """no""" +675 24 optimizer """adam""" +675 24 training_loop """owa""" +675 24 negative_sampler """basic""" +675 24 evaluator """rankbased""" +675 25 dataset """wn18rr""" +675 25 model """simple""" +675 25 loss """marginranking""" +675 25 regularizer """no""" +675 25 optimizer """adam""" +675 25 training_loop """owa""" +675 25 negative_sampler """basic""" +675 25 evaluator """rankbased""" +675 26 dataset """wn18rr""" +675 26 model """simple""" +675 26 loss """marginranking""" +675 26 regularizer """no""" +675 26 optimizer """adam""" +675 26 training_loop """owa""" +675 26 negative_sampler """basic""" +675 26 evaluator """rankbased""" +675 27 dataset """wn18rr""" +675 27 model """simple""" +675 27 loss """marginranking""" +675 27 regularizer """no""" +675 27 optimizer """adam""" +675 27 training_loop """owa""" +675 27 negative_sampler """basic""" +675 27 evaluator """rankbased""" +675 28 dataset """wn18rr""" +675 28 model """simple""" +675 28 loss """marginranking""" +675 28 regularizer """no""" +675 28 optimizer """adam""" +675 28 training_loop """owa""" +675 28 negative_sampler """basic""" +675 28 evaluator """rankbased""" +675 29 dataset """wn18rr""" +675 29 model """simple""" +675 29 loss """marginranking""" +675 29 regularizer """no""" +675 29 optimizer """adam""" +675 29 training_loop """owa""" +675 29 negative_sampler """basic""" +675 29 evaluator """rankbased""" +675 30 dataset """wn18rr""" +675 30 model """simple""" +675 30 loss """marginranking""" +675 30 regularizer """no""" +675 30 optimizer """adam""" +675 30 training_loop """owa""" +675 30 negative_sampler """basic""" +675 30 evaluator """rankbased""" +675 31 dataset """wn18rr""" +675 31 model """simple""" +675 31 loss """marginranking""" +675 31 regularizer """no""" +675 31 optimizer """adam""" +675 31 training_loop """owa""" +675 31 negative_sampler """basic""" +675 31 evaluator """rankbased""" +675 32 dataset """wn18rr""" +675 32 model """simple""" +675 32 loss """marginranking""" +675 32 regularizer """no""" +675 32 optimizer """adam""" +675 32 training_loop """owa""" +675 32 negative_sampler """basic""" +675 32 evaluator """rankbased""" +675 33 dataset """wn18rr""" +675 33 model """simple""" +675 33 loss """marginranking""" +675 33 regularizer """no""" +675 33 optimizer """adam""" +675 33 training_loop """owa""" +675 33 negative_sampler """basic""" +675 33 evaluator """rankbased""" +676 1 model.embedding_dim 1.0 +676 1 loss.margin 5.091432905898514 +676 1 optimizer.lr 0.013395920152931047 +676 1 negative_sampler.num_negs_per_pos 78.0 +676 1 training.batch_size 0.0 +676 2 model.embedding_dim 0.0 +676 2 loss.margin 5.405840859861765 +676 2 optimizer.lr 0.014806800178446303 +676 2 negative_sampler.num_negs_per_pos 85.0 +676 2 training.batch_size 2.0 +676 3 model.embedding_dim 0.0 +676 3 loss.margin 7.017614216546342 +676 3 optimizer.lr 0.0019151849753512335 +676 3 negative_sampler.num_negs_per_pos 38.0 +676 3 training.batch_size 0.0 +676 4 model.embedding_dim 0.0 +676 4 loss.margin 6.1758565307019255 +676 4 optimizer.lr 0.04940000666723687 +676 4 negative_sampler.num_negs_per_pos 52.0 +676 4 training.batch_size 2.0 +676 5 model.embedding_dim 1.0 +676 5 loss.margin 5.048996138173498 +676 5 optimizer.lr 0.06929957691733063 +676 5 negative_sampler.num_negs_per_pos 88.0 +676 5 training.batch_size 2.0 +676 6 model.embedding_dim 0.0 +676 6 loss.margin 1.4399739094823312 +676 6 optimizer.lr 0.011865142447406444 +676 6 negative_sampler.num_negs_per_pos 80.0 +676 6 training.batch_size 2.0 +676 7 model.embedding_dim 2.0 +676 7 loss.margin 2.4683872718392488 +676 7 optimizer.lr 0.0025204247405814367 +676 7 negative_sampler.num_negs_per_pos 37.0 +676 7 training.batch_size 2.0 +676 8 model.embedding_dim 1.0 +676 8 loss.margin 5.005018647836392 +676 8 optimizer.lr 0.06281128385284544 +676 8 negative_sampler.num_negs_per_pos 54.0 +676 8 training.batch_size 2.0 +676 9 model.embedding_dim 0.0 +676 9 loss.margin 9.049263685003108 +676 9 optimizer.lr 0.02156714490414122 +676 9 negative_sampler.num_negs_per_pos 93.0 +676 9 training.batch_size 1.0 +676 10 model.embedding_dim 2.0 +676 10 loss.margin 7.550920905855289 +676 10 optimizer.lr 0.021436680091123428 +676 10 negative_sampler.num_negs_per_pos 64.0 +676 10 training.batch_size 1.0 +676 11 model.embedding_dim 1.0 +676 11 loss.margin 3.305708730344074 +676 11 optimizer.lr 0.02111779140423732 +676 11 negative_sampler.num_negs_per_pos 66.0 +676 11 training.batch_size 0.0 +676 12 model.embedding_dim 1.0 +676 12 loss.margin 3.1921167939476516 +676 12 optimizer.lr 0.008432436361418832 +676 12 negative_sampler.num_negs_per_pos 85.0 +676 12 training.batch_size 2.0 +676 13 model.embedding_dim 0.0 +676 13 loss.margin 7.30246090811111 +676 13 optimizer.lr 0.021828145329456795 +676 13 negative_sampler.num_negs_per_pos 71.0 +676 13 training.batch_size 2.0 +676 14 model.embedding_dim 2.0 +676 14 loss.margin 4.622900935704418 +676 14 optimizer.lr 0.011897816351657608 +676 14 negative_sampler.num_negs_per_pos 74.0 +676 14 training.batch_size 1.0 +676 15 model.embedding_dim 1.0 +676 15 loss.margin 8.761035985216967 +676 15 optimizer.lr 0.01291964494369666 +676 15 negative_sampler.num_negs_per_pos 55.0 +676 15 training.batch_size 2.0 +676 16 model.embedding_dim 2.0 +676 16 loss.margin 1.2857817578779944 +676 16 optimizer.lr 0.00801075484200296 +676 16 negative_sampler.num_negs_per_pos 31.0 +676 16 training.batch_size 1.0 +676 17 model.embedding_dim 1.0 +676 17 loss.margin 8.155371774586559 +676 17 optimizer.lr 0.007581163781014153 +676 17 negative_sampler.num_negs_per_pos 25.0 +676 17 training.batch_size 2.0 +676 18 model.embedding_dim 0.0 +676 18 loss.margin 8.719367882505644 +676 18 optimizer.lr 0.012533646564041547 +676 18 negative_sampler.num_negs_per_pos 47.0 +676 18 training.batch_size 1.0 +676 19 model.embedding_dim 1.0 +676 19 loss.margin 4.017015510210526 +676 19 optimizer.lr 0.009395616845636164 +676 19 negative_sampler.num_negs_per_pos 96.0 +676 19 training.batch_size 1.0 +676 20 model.embedding_dim 0.0 +676 20 loss.margin 9.85346070901088 +676 20 optimizer.lr 0.0011714741407003275 +676 20 negative_sampler.num_negs_per_pos 28.0 +676 20 training.batch_size 1.0 +676 21 model.embedding_dim 0.0 +676 21 loss.margin 0.7743375328310652 +676 21 optimizer.lr 0.024196943572615284 +676 21 negative_sampler.num_negs_per_pos 93.0 +676 21 training.batch_size 1.0 +676 22 model.embedding_dim 2.0 +676 22 loss.margin 8.15465842459223 +676 22 optimizer.lr 0.007984029484569222 +676 22 negative_sampler.num_negs_per_pos 20.0 +676 22 training.batch_size 1.0 +676 23 model.embedding_dim 2.0 +676 23 loss.margin 4.641317402428533 +676 23 optimizer.lr 0.00853677194308144 +676 23 negative_sampler.num_negs_per_pos 3.0 +676 23 training.batch_size 0.0 +676 24 model.embedding_dim 1.0 +676 24 loss.margin 9.434628380152397 +676 24 optimizer.lr 0.06770561120509662 +676 24 negative_sampler.num_negs_per_pos 92.0 +676 24 training.batch_size 1.0 +676 25 model.embedding_dim 0.0 +676 25 loss.margin 5.782457569603249 +676 25 optimizer.lr 0.0019853598349242694 +676 25 negative_sampler.num_negs_per_pos 49.0 +676 25 training.batch_size 0.0 +676 26 model.embedding_dim 2.0 +676 26 loss.margin 6.021494848819523 +676 26 optimizer.lr 0.06341677671728504 +676 26 negative_sampler.num_negs_per_pos 31.0 +676 26 training.batch_size 2.0 +676 27 model.embedding_dim 2.0 +676 27 loss.margin 8.374299720577419 +676 27 optimizer.lr 0.044172918420318556 +676 27 negative_sampler.num_negs_per_pos 98.0 +676 27 training.batch_size 1.0 +676 28 model.embedding_dim 1.0 +676 28 loss.margin 1.6177305204996477 +676 28 optimizer.lr 0.015537742973569652 +676 28 negative_sampler.num_negs_per_pos 32.0 +676 28 training.batch_size 2.0 +676 29 model.embedding_dim 0.0 +676 29 loss.margin 4.162401449345342 +676 29 optimizer.lr 0.004865397435205693 +676 29 negative_sampler.num_negs_per_pos 21.0 +676 29 training.batch_size 2.0 +676 30 model.embedding_dim 2.0 +676 30 loss.margin 1.7194716411548 +676 30 optimizer.lr 0.006413472228480485 +676 30 negative_sampler.num_negs_per_pos 93.0 +676 30 training.batch_size 0.0 +676 31 model.embedding_dim 2.0 +676 31 loss.margin 4.9866739779873654 +676 31 optimizer.lr 0.001203529554968113 +676 31 negative_sampler.num_negs_per_pos 79.0 +676 31 training.batch_size 2.0 +676 32 model.embedding_dim 1.0 +676 32 loss.margin 5.312826293622404 +676 32 optimizer.lr 0.0030541968784294216 +676 32 negative_sampler.num_negs_per_pos 11.0 +676 32 training.batch_size 1.0 +676 33 model.embedding_dim 0.0 +676 33 loss.margin 9.7509539933488 +676 33 optimizer.lr 0.015225434803245948 +676 33 negative_sampler.num_negs_per_pos 20.0 +676 33 training.batch_size 2.0 +676 34 model.embedding_dim 1.0 +676 34 loss.margin 0.8249278847671964 +676 34 optimizer.lr 0.0043545010316513345 +676 34 negative_sampler.num_negs_per_pos 14.0 +676 34 training.batch_size 0.0 +676 35 model.embedding_dim 0.0 +676 35 loss.margin 7.4988807416411 +676 35 optimizer.lr 0.0015132302069850276 +676 35 negative_sampler.num_negs_per_pos 16.0 +676 35 training.batch_size 0.0 +676 36 model.embedding_dim 1.0 +676 36 loss.margin 7.927571574166624 +676 36 optimizer.lr 0.00769838189624148 +676 36 negative_sampler.num_negs_per_pos 32.0 +676 36 training.batch_size 1.0 +676 37 model.embedding_dim 0.0 +676 37 loss.margin 3.1630860375884025 +676 37 optimizer.lr 0.07889881919316745 +676 37 negative_sampler.num_negs_per_pos 91.0 +676 37 training.batch_size 1.0 +676 38 model.embedding_dim 2.0 +676 38 loss.margin 4.5414859141136965 +676 38 optimizer.lr 0.044785028536836954 +676 38 negative_sampler.num_negs_per_pos 8.0 +676 38 training.batch_size 2.0 +676 39 model.embedding_dim 2.0 +676 39 loss.margin 8.876218106920007 +676 39 optimizer.lr 0.0020386294255223167 +676 39 negative_sampler.num_negs_per_pos 65.0 +676 39 training.batch_size 1.0 +676 40 model.embedding_dim 1.0 +676 40 loss.margin 2.7471797936010898 +676 40 optimizer.lr 0.002058131736164085 +676 40 negative_sampler.num_negs_per_pos 84.0 +676 40 training.batch_size 1.0 +676 41 model.embedding_dim 2.0 +676 41 loss.margin 4.7573375026415725 +676 41 optimizer.lr 0.002034124618925081 +676 41 negative_sampler.num_negs_per_pos 87.0 +676 41 training.batch_size 2.0 +676 42 model.embedding_dim 2.0 +676 42 loss.margin 7.765015850748469 +676 42 optimizer.lr 0.005627100622857913 +676 42 negative_sampler.num_negs_per_pos 94.0 +676 42 training.batch_size 0.0 +676 43 model.embedding_dim 2.0 +676 43 loss.margin 7.862768141569302 +676 43 optimizer.lr 0.0014477840754883184 +676 43 negative_sampler.num_negs_per_pos 51.0 +676 43 training.batch_size 1.0 +676 44 model.embedding_dim 1.0 +676 44 loss.margin 1.9309198000320507 +676 44 optimizer.lr 0.052996709842992706 +676 44 negative_sampler.num_negs_per_pos 30.0 +676 44 training.batch_size 2.0 +676 45 model.embedding_dim 1.0 +676 45 loss.margin 0.7444095959431423 +676 45 optimizer.lr 0.002867891848576349 +676 45 negative_sampler.num_negs_per_pos 81.0 +676 45 training.batch_size 2.0 +676 46 model.embedding_dim 2.0 +676 46 loss.margin 1.5174518276953126 +676 46 optimizer.lr 0.012475262292882655 +676 46 negative_sampler.num_negs_per_pos 39.0 +676 46 training.batch_size 2.0 +676 47 model.embedding_dim 1.0 +676 47 loss.margin 4.784704437355457 +676 47 optimizer.lr 0.011961129724752405 +676 47 negative_sampler.num_negs_per_pos 14.0 +676 47 training.batch_size 1.0 +676 48 model.embedding_dim 1.0 +676 48 loss.margin 7.579513014385299 +676 48 optimizer.lr 0.028247628636225035 +676 48 negative_sampler.num_negs_per_pos 99.0 +676 48 training.batch_size 0.0 +676 49 model.embedding_dim 1.0 +676 49 loss.margin 4.380138028841882 +676 49 optimizer.lr 0.020797129850602607 +676 49 negative_sampler.num_negs_per_pos 80.0 +676 49 training.batch_size 0.0 +676 50 model.embedding_dim 1.0 +676 50 loss.margin 3.0649188541459376 +676 50 optimizer.lr 0.017064875843127186 +676 50 negative_sampler.num_negs_per_pos 86.0 +676 50 training.batch_size 2.0 +676 51 model.embedding_dim 1.0 +676 51 loss.margin 3.5751844865581663 +676 51 optimizer.lr 0.01094362450773689 +676 51 negative_sampler.num_negs_per_pos 23.0 +676 51 training.batch_size 1.0 +676 52 model.embedding_dim 1.0 +676 52 loss.margin 8.044335359366103 +676 52 optimizer.lr 0.00952064243424021 +676 52 negative_sampler.num_negs_per_pos 9.0 +676 52 training.batch_size 2.0 +676 53 model.embedding_dim 0.0 +676 53 loss.margin 9.158801016948596 +676 53 optimizer.lr 0.015729847440815972 +676 53 negative_sampler.num_negs_per_pos 40.0 +676 53 training.batch_size 1.0 +676 54 model.embedding_dim 0.0 +676 54 loss.margin 4.268608048897015 +676 54 optimizer.lr 0.0013126102900949438 +676 54 negative_sampler.num_negs_per_pos 90.0 +676 54 training.batch_size 2.0 +676 55 model.embedding_dim 0.0 +676 55 loss.margin 4.938009925526334 +676 55 optimizer.lr 0.004517865994320347 +676 55 negative_sampler.num_negs_per_pos 82.0 +676 55 training.batch_size 1.0 +676 56 model.embedding_dim 1.0 +676 56 loss.margin 2.8836017259505504 +676 56 optimizer.lr 0.011870195243622903 +676 56 negative_sampler.num_negs_per_pos 67.0 +676 56 training.batch_size 0.0 +676 57 model.embedding_dim 0.0 +676 57 loss.margin 2.2441481804319254 +676 57 optimizer.lr 0.01706606152978482 +676 57 negative_sampler.num_negs_per_pos 66.0 +676 57 training.batch_size 0.0 +676 58 model.embedding_dim 1.0 +676 58 loss.margin 5.860074078541137 +676 58 optimizer.lr 0.020065357983830932 +676 58 negative_sampler.num_negs_per_pos 32.0 +676 58 training.batch_size 2.0 +676 59 model.embedding_dim 1.0 +676 59 loss.margin 9.791411770599971 +676 59 optimizer.lr 0.0038609928045641568 +676 59 negative_sampler.num_negs_per_pos 12.0 +676 59 training.batch_size 2.0 +676 60 model.embedding_dim 0.0 +676 60 loss.margin 8.100009199834789 +676 60 optimizer.lr 0.0019224038086663956 +676 60 negative_sampler.num_negs_per_pos 86.0 +676 60 training.batch_size 2.0 +676 61 model.embedding_dim 2.0 +676 61 loss.margin 7.93846902170756 +676 61 optimizer.lr 0.03159691749957347 +676 61 negative_sampler.num_negs_per_pos 15.0 +676 61 training.batch_size 2.0 +676 62 model.embedding_dim 1.0 +676 62 loss.margin 7.487327099797145 +676 62 optimizer.lr 0.065589202838229 +676 62 negative_sampler.num_negs_per_pos 21.0 +676 62 training.batch_size 2.0 +676 63 model.embedding_dim 2.0 +676 63 loss.margin 4.29613435882584 +676 63 optimizer.lr 0.014925492532029674 +676 63 negative_sampler.num_negs_per_pos 73.0 +676 63 training.batch_size 2.0 +676 64 model.embedding_dim 1.0 +676 64 loss.margin 8.695547470298706 +676 64 optimizer.lr 0.003764140846809324 +676 64 negative_sampler.num_negs_per_pos 87.0 +676 64 training.batch_size 1.0 +676 1 dataset """wn18rr""" +676 1 model """simple""" +676 1 loss """marginranking""" +676 1 regularizer """no""" +676 1 optimizer """adam""" +676 1 training_loop """owa""" +676 1 negative_sampler """basic""" +676 1 evaluator """rankbased""" +676 2 dataset """wn18rr""" +676 2 model """simple""" +676 2 loss """marginranking""" +676 2 regularizer """no""" +676 2 optimizer """adam""" +676 2 training_loop """owa""" +676 2 negative_sampler """basic""" +676 2 evaluator """rankbased""" +676 3 dataset """wn18rr""" +676 3 model """simple""" +676 3 loss """marginranking""" +676 3 regularizer """no""" +676 3 optimizer """adam""" +676 3 training_loop """owa""" +676 3 negative_sampler """basic""" +676 3 evaluator """rankbased""" +676 4 dataset """wn18rr""" +676 4 model """simple""" +676 4 loss """marginranking""" +676 4 regularizer """no""" +676 4 optimizer """adam""" +676 4 training_loop """owa""" +676 4 negative_sampler """basic""" +676 4 evaluator """rankbased""" +676 5 dataset """wn18rr""" +676 5 model """simple""" +676 5 loss """marginranking""" +676 5 regularizer """no""" +676 5 optimizer """adam""" +676 5 training_loop """owa""" +676 5 negative_sampler """basic""" +676 5 evaluator """rankbased""" +676 6 dataset """wn18rr""" +676 6 model """simple""" +676 6 loss """marginranking""" +676 6 regularizer """no""" +676 6 optimizer """adam""" +676 6 training_loop """owa""" +676 6 negative_sampler """basic""" +676 6 evaluator """rankbased""" +676 7 dataset """wn18rr""" +676 7 model """simple""" +676 7 loss """marginranking""" +676 7 regularizer """no""" +676 7 optimizer """adam""" +676 7 training_loop """owa""" +676 7 negative_sampler """basic""" +676 7 evaluator """rankbased""" +676 8 dataset """wn18rr""" +676 8 model """simple""" +676 8 loss """marginranking""" +676 8 regularizer """no""" +676 8 optimizer """adam""" +676 8 training_loop """owa""" +676 8 negative_sampler """basic""" +676 8 evaluator """rankbased""" +676 9 dataset """wn18rr""" +676 9 model """simple""" +676 9 loss """marginranking""" +676 9 regularizer """no""" +676 9 optimizer """adam""" +676 9 training_loop """owa""" +676 9 negative_sampler """basic""" +676 9 evaluator """rankbased""" +676 10 dataset """wn18rr""" +676 10 model """simple""" +676 10 loss """marginranking""" +676 10 regularizer """no""" +676 10 optimizer """adam""" +676 10 training_loop """owa""" +676 10 negative_sampler """basic""" +676 10 evaluator """rankbased""" +676 11 dataset """wn18rr""" +676 11 model """simple""" +676 11 loss """marginranking""" +676 11 regularizer """no""" +676 11 optimizer """adam""" +676 11 training_loop """owa""" +676 11 negative_sampler """basic""" +676 11 evaluator """rankbased""" +676 12 dataset """wn18rr""" +676 12 model """simple""" +676 12 loss """marginranking""" +676 12 regularizer """no""" +676 12 optimizer """adam""" +676 12 training_loop """owa""" +676 12 negative_sampler """basic""" +676 12 evaluator """rankbased""" +676 13 dataset """wn18rr""" +676 13 model """simple""" +676 13 loss """marginranking""" +676 13 regularizer """no""" +676 13 optimizer """adam""" +676 13 training_loop """owa""" +676 13 negative_sampler """basic""" +676 13 evaluator """rankbased""" +676 14 dataset """wn18rr""" +676 14 model """simple""" +676 14 loss """marginranking""" +676 14 regularizer """no""" +676 14 optimizer """adam""" +676 14 training_loop """owa""" +676 14 negative_sampler """basic""" +676 14 evaluator """rankbased""" +676 15 dataset """wn18rr""" +676 15 model """simple""" +676 15 loss """marginranking""" +676 15 regularizer """no""" +676 15 optimizer """adam""" +676 15 training_loop """owa""" +676 15 negative_sampler """basic""" +676 15 evaluator """rankbased""" +676 16 dataset """wn18rr""" +676 16 model """simple""" +676 16 loss """marginranking""" +676 16 regularizer """no""" +676 16 optimizer """adam""" +676 16 training_loop """owa""" +676 16 negative_sampler """basic""" +676 16 evaluator """rankbased""" +676 17 dataset """wn18rr""" +676 17 model """simple""" +676 17 loss """marginranking""" +676 17 regularizer """no""" +676 17 optimizer """adam""" +676 17 training_loop """owa""" +676 17 negative_sampler """basic""" +676 17 evaluator """rankbased""" +676 18 dataset """wn18rr""" +676 18 model """simple""" +676 18 loss """marginranking""" +676 18 regularizer """no""" +676 18 optimizer """adam""" +676 18 training_loop """owa""" +676 18 negative_sampler """basic""" +676 18 evaluator """rankbased""" +676 19 dataset """wn18rr""" +676 19 model """simple""" +676 19 loss """marginranking""" +676 19 regularizer """no""" +676 19 optimizer """adam""" +676 19 training_loop """owa""" +676 19 negative_sampler """basic""" +676 19 evaluator """rankbased""" +676 20 dataset """wn18rr""" +676 20 model """simple""" +676 20 loss """marginranking""" +676 20 regularizer """no""" +676 20 optimizer """adam""" +676 20 training_loop """owa""" +676 20 negative_sampler """basic""" +676 20 evaluator """rankbased""" +676 21 dataset """wn18rr""" +676 21 model """simple""" +676 21 loss """marginranking""" +676 21 regularizer """no""" +676 21 optimizer """adam""" +676 21 training_loop """owa""" +676 21 negative_sampler """basic""" +676 21 evaluator """rankbased""" +676 22 dataset """wn18rr""" +676 22 model """simple""" +676 22 loss """marginranking""" +676 22 regularizer """no""" +676 22 optimizer """adam""" +676 22 training_loop """owa""" +676 22 negative_sampler """basic""" +676 22 evaluator """rankbased""" +676 23 dataset """wn18rr""" +676 23 model """simple""" +676 23 loss """marginranking""" +676 23 regularizer """no""" +676 23 optimizer """adam""" +676 23 training_loop """owa""" +676 23 negative_sampler """basic""" +676 23 evaluator """rankbased""" +676 24 dataset """wn18rr""" +676 24 model """simple""" +676 24 loss """marginranking""" +676 24 regularizer """no""" +676 24 optimizer """adam""" +676 24 training_loop """owa""" +676 24 negative_sampler """basic""" +676 24 evaluator """rankbased""" +676 25 dataset """wn18rr""" +676 25 model """simple""" +676 25 loss """marginranking""" +676 25 regularizer """no""" +676 25 optimizer """adam""" +676 25 training_loop """owa""" +676 25 negative_sampler """basic""" +676 25 evaluator """rankbased""" +676 26 dataset """wn18rr""" +676 26 model """simple""" +676 26 loss """marginranking""" +676 26 regularizer """no""" +676 26 optimizer """adam""" +676 26 training_loop """owa""" +676 26 negative_sampler """basic""" +676 26 evaluator """rankbased""" +676 27 dataset """wn18rr""" +676 27 model """simple""" +676 27 loss """marginranking""" +676 27 regularizer """no""" +676 27 optimizer """adam""" +676 27 training_loop """owa""" +676 27 negative_sampler """basic""" +676 27 evaluator """rankbased""" +676 28 dataset """wn18rr""" +676 28 model """simple""" +676 28 loss """marginranking""" +676 28 regularizer """no""" +676 28 optimizer """adam""" +676 28 training_loop """owa""" +676 28 negative_sampler """basic""" +676 28 evaluator """rankbased""" +676 29 dataset """wn18rr""" +676 29 model """simple""" +676 29 loss """marginranking""" +676 29 regularizer """no""" +676 29 optimizer """adam""" +676 29 training_loop """owa""" +676 29 negative_sampler """basic""" +676 29 evaluator """rankbased""" +676 30 dataset """wn18rr""" +676 30 model """simple""" +676 30 loss """marginranking""" +676 30 regularizer """no""" +676 30 optimizer """adam""" +676 30 training_loop """owa""" +676 30 negative_sampler """basic""" +676 30 evaluator """rankbased""" +676 31 dataset """wn18rr""" +676 31 model """simple""" +676 31 loss """marginranking""" +676 31 regularizer """no""" +676 31 optimizer """adam""" +676 31 training_loop """owa""" +676 31 negative_sampler """basic""" +676 31 evaluator """rankbased""" +676 32 dataset """wn18rr""" +676 32 model """simple""" +676 32 loss """marginranking""" +676 32 regularizer """no""" +676 32 optimizer """adam""" +676 32 training_loop """owa""" +676 32 negative_sampler """basic""" +676 32 evaluator """rankbased""" +676 33 dataset """wn18rr""" +676 33 model """simple""" +676 33 loss """marginranking""" +676 33 regularizer """no""" +676 33 optimizer """adam""" +676 33 training_loop """owa""" +676 33 negative_sampler """basic""" +676 33 evaluator """rankbased""" +676 34 dataset """wn18rr""" +676 34 model """simple""" +676 34 loss """marginranking""" +676 34 regularizer """no""" +676 34 optimizer """adam""" +676 34 training_loop """owa""" +676 34 negative_sampler """basic""" +676 34 evaluator """rankbased""" +676 35 dataset """wn18rr""" +676 35 model """simple""" +676 35 loss """marginranking""" +676 35 regularizer """no""" +676 35 optimizer """adam""" +676 35 training_loop """owa""" +676 35 negative_sampler """basic""" +676 35 evaluator """rankbased""" +676 36 dataset """wn18rr""" +676 36 model """simple""" +676 36 loss """marginranking""" +676 36 regularizer """no""" +676 36 optimizer """adam""" +676 36 training_loop """owa""" +676 36 negative_sampler """basic""" +676 36 evaluator """rankbased""" +676 37 dataset """wn18rr""" +676 37 model """simple""" +676 37 loss """marginranking""" +676 37 regularizer """no""" +676 37 optimizer """adam""" +676 37 training_loop """owa""" +676 37 negative_sampler """basic""" +676 37 evaluator """rankbased""" +676 38 dataset """wn18rr""" +676 38 model """simple""" +676 38 loss """marginranking""" +676 38 regularizer """no""" +676 38 optimizer """adam""" +676 38 training_loop """owa""" +676 38 negative_sampler """basic""" +676 38 evaluator """rankbased""" +676 39 dataset """wn18rr""" +676 39 model """simple""" +676 39 loss """marginranking""" +676 39 regularizer """no""" +676 39 optimizer """adam""" +676 39 training_loop """owa""" +676 39 negative_sampler """basic""" +676 39 evaluator """rankbased""" +676 40 dataset """wn18rr""" +676 40 model """simple""" +676 40 loss """marginranking""" +676 40 regularizer """no""" +676 40 optimizer """adam""" +676 40 training_loop """owa""" +676 40 negative_sampler """basic""" +676 40 evaluator """rankbased""" +676 41 dataset """wn18rr""" +676 41 model """simple""" +676 41 loss """marginranking""" +676 41 regularizer """no""" +676 41 optimizer """adam""" +676 41 training_loop """owa""" +676 41 negative_sampler """basic""" +676 41 evaluator """rankbased""" +676 42 dataset """wn18rr""" +676 42 model """simple""" +676 42 loss """marginranking""" +676 42 regularizer """no""" +676 42 optimizer """adam""" +676 42 training_loop """owa""" +676 42 negative_sampler """basic""" +676 42 evaluator """rankbased""" +676 43 dataset """wn18rr""" +676 43 model """simple""" +676 43 loss """marginranking""" +676 43 regularizer """no""" +676 43 optimizer """adam""" +676 43 training_loop """owa""" +676 43 negative_sampler """basic""" +676 43 evaluator """rankbased""" +676 44 dataset """wn18rr""" +676 44 model """simple""" +676 44 loss """marginranking""" +676 44 regularizer """no""" +676 44 optimizer """adam""" +676 44 training_loop """owa""" +676 44 negative_sampler """basic""" +676 44 evaluator """rankbased""" +676 45 dataset """wn18rr""" +676 45 model """simple""" +676 45 loss """marginranking""" +676 45 regularizer """no""" +676 45 optimizer """adam""" +676 45 training_loop """owa""" +676 45 negative_sampler """basic""" +676 45 evaluator """rankbased""" +676 46 dataset """wn18rr""" +676 46 model """simple""" +676 46 loss """marginranking""" +676 46 regularizer """no""" +676 46 optimizer """adam""" +676 46 training_loop """owa""" +676 46 negative_sampler """basic""" +676 46 evaluator """rankbased""" +676 47 dataset """wn18rr""" +676 47 model """simple""" +676 47 loss """marginranking""" +676 47 regularizer """no""" +676 47 optimizer """adam""" +676 47 training_loop """owa""" +676 47 negative_sampler """basic""" +676 47 evaluator """rankbased""" +676 48 dataset """wn18rr""" +676 48 model """simple""" +676 48 loss """marginranking""" +676 48 regularizer """no""" +676 48 optimizer """adam""" +676 48 training_loop """owa""" +676 48 negative_sampler """basic""" +676 48 evaluator """rankbased""" +676 49 dataset """wn18rr""" +676 49 model """simple""" +676 49 loss """marginranking""" +676 49 regularizer """no""" +676 49 optimizer """adam""" +676 49 training_loop """owa""" +676 49 negative_sampler """basic""" +676 49 evaluator """rankbased""" +676 50 dataset """wn18rr""" +676 50 model """simple""" +676 50 loss """marginranking""" +676 50 regularizer """no""" +676 50 optimizer """adam""" +676 50 training_loop """owa""" +676 50 negative_sampler """basic""" +676 50 evaluator """rankbased""" +676 51 dataset """wn18rr""" +676 51 model """simple""" +676 51 loss """marginranking""" +676 51 regularizer """no""" +676 51 optimizer """adam""" +676 51 training_loop """owa""" +676 51 negative_sampler """basic""" +676 51 evaluator """rankbased""" +676 52 dataset """wn18rr""" +676 52 model """simple""" +676 52 loss """marginranking""" +676 52 regularizer """no""" +676 52 optimizer """adam""" +676 52 training_loop """owa""" +676 52 negative_sampler """basic""" +676 52 evaluator """rankbased""" +676 53 dataset """wn18rr""" +676 53 model """simple""" +676 53 loss """marginranking""" +676 53 regularizer """no""" +676 53 optimizer """adam""" +676 53 training_loop """owa""" +676 53 negative_sampler """basic""" +676 53 evaluator """rankbased""" +676 54 dataset """wn18rr""" +676 54 model """simple""" +676 54 loss """marginranking""" +676 54 regularizer """no""" +676 54 optimizer """adam""" +676 54 training_loop """owa""" +676 54 negative_sampler """basic""" +676 54 evaluator """rankbased""" +676 55 dataset """wn18rr""" +676 55 model """simple""" +676 55 loss """marginranking""" +676 55 regularizer """no""" +676 55 optimizer """adam""" +676 55 training_loop """owa""" +676 55 negative_sampler """basic""" +676 55 evaluator """rankbased""" +676 56 dataset """wn18rr""" +676 56 model """simple""" +676 56 loss """marginranking""" +676 56 regularizer """no""" +676 56 optimizer """adam""" +676 56 training_loop """owa""" +676 56 negative_sampler """basic""" +676 56 evaluator """rankbased""" +676 57 dataset """wn18rr""" +676 57 model """simple""" +676 57 loss """marginranking""" +676 57 regularizer """no""" +676 57 optimizer """adam""" +676 57 training_loop """owa""" +676 57 negative_sampler """basic""" +676 57 evaluator """rankbased""" +676 58 dataset """wn18rr""" +676 58 model """simple""" +676 58 loss """marginranking""" +676 58 regularizer """no""" +676 58 optimizer """adam""" +676 58 training_loop """owa""" +676 58 negative_sampler """basic""" +676 58 evaluator """rankbased""" +676 59 dataset """wn18rr""" +676 59 model """simple""" +676 59 loss """marginranking""" +676 59 regularizer """no""" +676 59 optimizer """adam""" +676 59 training_loop """owa""" +676 59 negative_sampler """basic""" +676 59 evaluator """rankbased""" +676 60 dataset """wn18rr""" +676 60 model """simple""" +676 60 loss """marginranking""" +676 60 regularizer """no""" +676 60 optimizer """adam""" +676 60 training_loop """owa""" +676 60 negative_sampler """basic""" +676 60 evaluator """rankbased""" +676 61 dataset """wn18rr""" +676 61 model """simple""" +676 61 loss """marginranking""" +676 61 regularizer """no""" +676 61 optimizer """adam""" +676 61 training_loop """owa""" +676 61 negative_sampler """basic""" +676 61 evaluator """rankbased""" +676 62 dataset """wn18rr""" +676 62 model """simple""" +676 62 loss """marginranking""" +676 62 regularizer """no""" +676 62 optimizer """adam""" +676 62 training_loop """owa""" +676 62 negative_sampler """basic""" +676 62 evaluator """rankbased""" +676 63 dataset """wn18rr""" +676 63 model """simple""" +676 63 loss """marginranking""" +676 63 regularizer """no""" +676 63 optimizer """adam""" +676 63 training_loop """owa""" +676 63 negative_sampler """basic""" +676 63 evaluator """rankbased""" +676 64 dataset """wn18rr""" +676 64 model """simple""" +676 64 loss """marginranking""" +676 64 regularizer """no""" +676 64 optimizer """adam""" +676 64 training_loop """owa""" +676 64 negative_sampler """basic""" +676 64 evaluator """rankbased""" +677 1 model.embedding_dim 0.0 +677 1 loss.margin 26.451232732628306 +677 1 loss.adversarial_temperature 0.47382728238616384 +677 1 optimizer.lr 0.019190564791242145 +677 1 negative_sampler.num_negs_per_pos 79.0 +677 1 training.batch_size 2.0 +677 2 model.embedding_dim 1.0 +677 2 loss.margin 13.544556748117474 +677 2 loss.adversarial_temperature 0.5217458950555892 +677 2 optimizer.lr 0.00646906183651937 +677 2 negative_sampler.num_negs_per_pos 46.0 +677 2 training.batch_size 1.0 +677 3 model.embedding_dim 2.0 +677 3 loss.margin 4.6088435678790525 +677 3 loss.adversarial_temperature 0.8170666946439191 +677 3 optimizer.lr 0.0076087980988669215 +677 3 negative_sampler.num_negs_per_pos 46.0 +677 3 training.batch_size 2.0 +677 4 model.embedding_dim 0.0 +677 4 loss.margin 28.22914904963126 +677 4 loss.adversarial_temperature 0.9718038704388371 +677 4 optimizer.lr 0.0010561142692660586 +677 4 negative_sampler.num_negs_per_pos 48.0 +677 4 training.batch_size 0.0 +677 5 model.embedding_dim 1.0 +677 5 loss.margin 15.447773430986224 +677 5 loss.adversarial_temperature 0.3898743070943796 +677 5 optimizer.lr 0.0014634156046119218 +677 5 negative_sampler.num_negs_per_pos 16.0 +677 5 training.batch_size 1.0 +677 6 model.embedding_dim 2.0 +677 6 loss.margin 4.47904404110604 +677 6 loss.adversarial_temperature 0.13455218819836753 +677 6 optimizer.lr 0.005307587111931118 +677 6 negative_sampler.num_negs_per_pos 72.0 +677 6 training.batch_size 1.0 +677 7 model.embedding_dim 0.0 +677 7 loss.margin 20.947183563324263 +677 7 loss.adversarial_temperature 0.5957404160536116 +677 7 optimizer.lr 0.03576484575609725 +677 7 negative_sampler.num_negs_per_pos 73.0 +677 7 training.batch_size 1.0 +677 8 model.embedding_dim 0.0 +677 8 loss.margin 18.214154483834804 +677 8 loss.adversarial_temperature 0.9006058732451039 +677 8 optimizer.lr 0.006697435891435242 +677 8 negative_sampler.num_negs_per_pos 56.0 +677 8 training.batch_size 0.0 +677 9 model.embedding_dim 2.0 +677 9 loss.margin 6.365412498250646 +677 9 loss.adversarial_temperature 0.1449701722968768 +677 9 optimizer.lr 0.027477146668679956 +677 9 negative_sampler.num_negs_per_pos 43.0 +677 9 training.batch_size 2.0 +677 10 model.embedding_dim 2.0 +677 10 loss.margin 15.602836269217368 +677 10 loss.adversarial_temperature 0.39249515946773056 +677 10 optimizer.lr 0.01017608492406493 +677 10 negative_sampler.num_negs_per_pos 4.0 +677 10 training.batch_size 0.0 +677 11 model.embedding_dim 1.0 +677 11 loss.margin 18.52277651881861 +677 11 loss.adversarial_temperature 0.7598655057854403 +677 11 optimizer.lr 0.028032895948430156 +677 11 negative_sampler.num_negs_per_pos 47.0 +677 11 training.batch_size 2.0 +677 12 model.embedding_dim 0.0 +677 12 loss.margin 27.28321031697304 +677 12 loss.adversarial_temperature 0.4648699979677493 +677 12 optimizer.lr 0.0013000466226184377 +677 12 negative_sampler.num_negs_per_pos 18.0 +677 12 training.batch_size 0.0 +677 13 model.embedding_dim 0.0 +677 13 loss.margin 22.65508071948949 +677 13 loss.adversarial_temperature 0.36694330145845283 +677 13 optimizer.lr 0.0627129909111762 +677 13 negative_sampler.num_negs_per_pos 26.0 +677 13 training.batch_size 1.0 +677 14 model.embedding_dim 2.0 +677 14 loss.margin 19.15417591673527 +677 14 loss.adversarial_temperature 0.36205187092764396 +677 14 optimizer.lr 0.0314198420001322 +677 14 negative_sampler.num_negs_per_pos 84.0 +677 14 training.batch_size 1.0 +677 15 model.embedding_dim 0.0 +677 15 loss.margin 22.221588143233323 +677 15 loss.adversarial_temperature 0.9792989657191131 +677 15 optimizer.lr 0.08882178772890775 +677 15 negative_sampler.num_negs_per_pos 51.0 +677 15 training.batch_size 1.0 +677 16 model.embedding_dim 0.0 +677 16 loss.margin 19.32906234964423 +677 16 loss.adversarial_temperature 0.4127122467197611 +677 16 optimizer.lr 0.0024727003733353726 +677 16 negative_sampler.num_negs_per_pos 62.0 +677 16 training.batch_size 1.0 +677 17 model.embedding_dim 0.0 +677 17 loss.margin 20.729476568426136 +677 17 loss.adversarial_temperature 0.9630112789021441 +677 17 optimizer.lr 0.0034560340023518307 +677 17 negative_sampler.num_negs_per_pos 5.0 +677 17 training.batch_size 0.0 +677 18 model.embedding_dim 2.0 +677 18 loss.margin 6.363684603575702 +677 18 loss.adversarial_temperature 0.5932428949652385 +677 18 optimizer.lr 0.028856314715922732 +677 18 negative_sampler.num_negs_per_pos 33.0 +677 18 training.batch_size 1.0 +677 19 model.embedding_dim 1.0 +677 19 loss.margin 21.214907731496677 +677 19 loss.adversarial_temperature 0.9998619328507176 +677 19 optimizer.lr 0.012610302353928819 +677 19 negative_sampler.num_negs_per_pos 97.0 +677 19 training.batch_size 1.0 +677 20 model.embedding_dim 1.0 +677 20 loss.margin 14.770985465088723 +677 20 loss.adversarial_temperature 0.7794486901903354 +677 20 optimizer.lr 0.004182966503882546 +677 20 negative_sampler.num_negs_per_pos 38.0 +677 20 training.batch_size 0.0 +677 21 model.embedding_dim 2.0 +677 21 loss.margin 6.523769161544232 +677 21 loss.adversarial_temperature 0.11468183566791253 +677 21 optimizer.lr 0.001121355493708445 +677 21 negative_sampler.num_negs_per_pos 87.0 +677 21 training.batch_size 2.0 +677 22 model.embedding_dim 1.0 +677 22 loss.margin 2.0534669654943145 +677 22 loss.adversarial_temperature 0.572808548521171 +677 22 optimizer.lr 0.028509912613885493 +677 22 negative_sampler.num_negs_per_pos 1.0 +677 22 training.batch_size 1.0 +677 23 model.embedding_dim 2.0 +677 23 loss.margin 22.912488211588737 +677 23 loss.adversarial_temperature 0.9146098088579147 +677 23 optimizer.lr 0.008585737067690702 +677 23 negative_sampler.num_negs_per_pos 84.0 +677 23 training.batch_size 1.0 +677 24 model.embedding_dim 0.0 +677 24 loss.margin 6.31401322974399 +677 24 loss.adversarial_temperature 0.6032390399137383 +677 24 optimizer.lr 0.00984797573851553 +677 24 negative_sampler.num_negs_per_pos 61.0 +677 24 training.batch_size 1.0 +677 25 model.embedding_dim 2.0 +677 25 loss.margin 14.005844009316885 +677 25 loss.adversarial_temperature 0.8051092588982505 +677 25 optimizer.lr 0.009271459868851934 +677 25 negative_sampler.num_negs_per_pos 78.0 +677 25 training.batch_size 0.0 +677 26 model.embedding_dim 1.0 +677 26 loss.margin 21.70866500400862 +677 26 loss.adversarial_temperature 0.5212897070267476 +677 26 optimizer.lr 0.001625364349466888 +677 26 negative_sampler.num_negs_per_pos 83.0 +677 26 training.batch_size 2.0 +677 27 model.embedding_dim 0.0 +677 27 loss.margin 15.81828986364088 +677 27 loss.adversarial_temperature 0.13844588246733716 +677 27 optimizer.lr 0.003702666449454506 +677 27 negative_sampler.num_negs_per_pos 63.0 +677 27 training.batch_size 2.0 +677 28 model.embedding_dim 2.0 +677 28 loss.margin 8.553214884916367 +677 28 loss.adversarial_temperature 0.34217611269834336 +677 28 optimizer.lr 0.03941164804414046 +677 28 negative_sampler.num_negs_per_pos 7.0 +677 28 training.batch_size 1.0 +677 29 model.embedding_dim 0.0 +677 29 loss.margin 1.8440245813175473 +677 29 loss.adversarial_temperature 0.23436182206183503 +677 29 optimizer.lr 0.010804520608238024 +677 29 negative_sampler.num_negs_per_pos 37.0 +677 29 training.batch_size 2.0 +677 30 model.embedding_dim 1.0 +677 30 loss.margin 27.927593813540078 +677 30 loss.adversarial_temperature 0.6265194816521473 +677 30 optimizer.lr 0.01245283779369592 +677 30 negative_sampler.num_negs_per_pos 96.0 +677 30 training.batch_size 2.0 +677 31 model.embedding_dim 0.0 +677 31 loss.margin 22.664940040021733 +677 31 loss.adversarial_temperature 0.9770136721333178 +677 31 optimizer.lr 0.016198182209248076 +677 31 negative_sampler.num_negs_per_pos 4.0 +677 31 training.batch_size 1.0 +677 32 model.embedding_dim 2.0 +677 32 loss.margin 14.48082690129206 +677 32 loss.adversarial_temperature 0.3794451995154836 +677 32 optimizer.lr 0.0011055757999197853 +677 32 negative_sampler.num_negs_per_pos 1.0 +677 32 training.batch_size 2.0 +677 33 model.embedding_dim 0.0 +677 33 loss.margin 6.470414168548775 +677 33 loss.adversarial_temperature 0.9067928384355884 +677 33 optimizer.lr 0.04222301952110516 +677 33 negative_sampler.num_negs_per_pos 60.0 +677 33 training.batch_size 1.0 +677 34 model.embedding_dim 2.0 +677 34 loss.margin 19.930013698060108 +677 34 loss.adversarial_temperature 0.7285148677745327 +677 34 optimizer.lr 0.0012801957875761146 +677 34 negative_sampler.num_negs_per_pos 16.0 +677 34 training.batch_size 2.0 +677 35 model.embedding_dim 1.0 +677 35 loss.margin 12.807833674977818 +677 35 loss.adversarial_temperature 0.26213345175951364 +677 35 optimizer.lr 0.005723212790772634 +677 35 negative_sampler.num_negs_per_pos 16.0 +677 35 training.batch_size 1.0 +677 36 model.embedding_dim 0.0 +677 36 loss.margin 23.321271325851193 +677 36 loss.adversarial_temperature 0.3009975564972706 +677 36 optimizer.lr 0.017860245751127657 +677 36 negative_sampler.num_negs_per_pos 49.0 +677 36 training.batch_size 2.0 +677 37 model.embedding_dim 2.0 +677 37 loss.margin 8.47890912466062 +677 37 loss.adversarial_temperature 0.2524514435796479 +677 37 optimizer.lr 0.0028636409451492486 +677 37 negative_sampler.num_negs_per_pos 77.0 +677 37 training.batch_size 1.0 +677 38 model.embedding_dim 0.0 +677 38 loss.margin 27.41009768970424 +677 38 loss.adversarial_temperature 0.6028749045126239 +677 38 optimizer.lr 0.03967987770186509 +677 38 negative_sampler.num_negs_per_pos 89.0 +677 38 training.batch_size 1.0 +677 39 model.embedding_dim 0.0 +677 39 loss.margin 20.17580618808019 +677 39 loss.adversarial_temperature 0.9999712220709701 +677 39 optimizer.lr 0.003486423426416572 +677 39 negative_sampler.num_negs_per_pos 22.0 +677 39 training.batch_size 0.0 +677 40 model.embedding_dim 2.0 +677 40 loss.margin 22.914869411146952 +677 40 loss.adversarial_temperature 0.5127155858223963 +677 40 optimizer.lr 0.009596573372078506 +677 40 negative_sampler.num_negs_per_pos 39.0 +677 40 training.batch_size 1.0 +677 41 model.embedding_dim 2.0 +677 41 loss.margin 26.39967447104412 +677 41 loss.adversarial_temperature 0.7113761664060206 +677 41 optimizer.lr 0.0013816404202992058 +677 41 negative_sampler.num_negs_per_pos 62.0 +677 41 training.batch_size 0.0 +677 42 model.embedding_dim 2.0 +677 42 loss.margin 12.350717032390842 +677 42 loss.adversarial_temperature 0.5384981246745392 +677 42 optimizer.lr 0.0019954542327016204 +677 42 negative_sampler.num_negs_per_pos 77.0 +677 42 training.batch_size 2.0 +677 43 model.embedding_dim 0.0 +677 43 loss.margin 28.59890177633369 +677 43 loss.adversarial_temperature 0.7820131254246359 +677 43 optimizer.lr 0.007319611267604937 +677 43 negative_sampler.num_negs_per_pos 56.0 +677 43 training.batch_size 1.0 +677 44 model.embedding_dim 0.0 +677 44 loss.margin 3.40628197998306 +677 44 loss.adversarial_temperature 0.8075332512723664 +677 44 optimizer.lr 0.0052387140661085355 +677 44 negative_sampler.num_negs_per_pos 43.0 +677 44 training.batch_size 0.0 +677 45 model.embedding_dim 0.0 +677 45 loss.margin 20.0576912399261 +677 45 loss.adversarial_temperature 0.1926229990387656 +677 45 optimizer.lr 0.09716236733827552 +677 45 negative_sampler.num_negs_per_pos 23.0 +677 45 training.batch_size 1.0 +677 46 model.embedding_dim 2.0 +677 46 loss.margin 20.38867977543427 +677 46 loss.adversarial_temperature 0.878031664196093 +677 46 optimizer.lr 0.0016891154846553622 +677 46 negative_sampler.num_negs_per_pos 17.0 +677 46 training.batch_size 0.0 +677 47 model.embedding_dim 1.0 +677 47 loss.margin 18.909938414527616 +677 47 loss.adversarial_temperature 0.1226348090067353 +677 47 optimizer.lr 0.0012628238753345324 +677 47 negative_sampler.num_negs_per_pos 88.0 +677 47 training.batch_size 0.0 +677 48 model.embedding_dim 2.0 +677 48 loss.margin 27.587587119211516 +677 48 loss.adversarial_temperature 0.22407309264250855 +677 48 optimizer.lr 0.04998779922587728 +677 48 negative_sampler.num_negs_per_pos 22.0 +677 48 training.batch_size 2.0 +677 49 model.embedding_dim 1.0 +677 49 loss.margin 19.450434298385442 +677 49 loss.adversarial_temperature 0.5107710196245989 +677 49 optimizer.lr 0.07638779482950893 +677 49 negative_sampler.num_negs_per_pos 2.0 +677 49 training.batch_size 2.0 +677 50 model.embedding_dim 0.0 +677 50 loss.margin 4.294601665201057 +677 50 loss.adversarial_temperature 0.58130615325755 +677 50 optimizer.lr 0.0035433945790922087 +677 50 negative_sampler.num_negs_per_pos 39.0 +677 50 training.batch_size 1.0 +677 51 model.embedding_dim 2.0 +677 51 loss.margin 19.94688156952011 +677 51 loss.adversarial_temperature 0.4593348663541551 +677 51 optimizer.lr 0.03276433367540852 +677 51 negative_sampler.num_negs_per_pos 6.0 +677 51 training.batch_size 0.0 +677 52 model.embedding_dim 0.0 +677 52 loss.margin 27.156798187862115 +677 52 loss.adversarial_temperature 0.534425286530827 +677 52 optimizer.lr 0.06533979065888189 +677 52 negative_sampler.num_negs_per_pos 54.0 +677 52 training.batch_size 1.0 +677 53 model.embedding_dim 2.0 +677 53 loss.margin 22.717639764497328 +677 53 loss.adversarial_temperature 0.49515582332902247 +677 53 optimizer.lr 0.010027479441644477 +677 53 negative_sampler.num_negs_per_pos 89.0 +677 53 training.batch_size 2.0 +677 54 model.embedding_dim 2.0 +677 54 loss.margin 14.647294927609002 +677 54 loss.adversarial_temperature 0.9301359808635561 +677 54 optimizer.lr 0.030580442561270117 +677 54 negative_sampler.num_negs_per_pos 2.0 +677 54 training.batch_size 0.0 +677 1 dataset """wn18rr""" +677 1 model """simple""" +677 1 loss """nssa""" +677 1 regularizer """no""" +677 1 optimizer """adam""" +677 1 training_loop """owa""" +677 1 negative_sampler """basic""" +677 1 evaluator """rankbased""" +677 2 dataset """wn18rr""" +677 2 model """simple""" +677 2 loss """nssa""" +677 2 regularizer """no""" +677 2 optimizer """adam""" +677 2 training_loop """owa""" +677 2 negative_sampler """basic""" +677 2 evaluator """rankbased""" +677 3 dataset """wn18rr""" +677 3 model """simple""" +677 3 loss """nssa""" +677 3 regularizer """no""" +677 3 optimizer """adam""" +677 3 training_loop """owa""" +677 3 negative_sampler """basic""" +677 3 evaluator """rankbased""" +677 4 dataset """wn18rr""" +677 4 model """simple""" +677 4 loss """nssa""" +677 4 regularizer """no""" +677 4 optimizer """adam""" +677 4 training_loop """owa""" +677 4 negative_sampler """basic""" +677 4 evaluator """rankbased""" +677 5 dataset """wn18rr""" +677 5 model """simple""" +677 5 loss """nssa""" +677 5 regularizer """no""" +677 5 optimizer """adam""" +677 5 training_loop """owa""" +677 5 negative_sampler """basic""" +677 5 evaluator """rankbased""" +677 6 dataset """wn18rr""" +677 6 model """simple""" +677 6 loss """nssa""" +677 6 regularizer """no""" +677 6 optimizer """adam""" +677 6 training_loop """owa""" +677 6 negative_sampler """basic""" +677 6 evaluator """rankbased""" +677 7 dataset """wn18rr""" +677 7 model """simple""" +677 7 loss """nssa""" +677 7 regularizer """no""" +677 7 optimizer """adam""" +677 7 training_loop """owa""" +677 7 negative_sampler """basic""" +677 7 evaluator """rankbased""" +677 8 dataset """wn18rr""" +677 8 model """simple""" +677 8 loss """nssa""" +677 8 regularizer """no""" +677 8 optimizer """adam""" +677 8 training_loop """owa""" +677 8 negative_sampler """basic""" +677 8 evaluator """rankbased""" +677 9 dataset """wn18rr""" +677 9 model """simple""" +677 9 loss """nssa""" +677 9 regularizer """no""" +677 9 optimizer """adam""" +677 9 training_loop """owa""" +677 9 negative_sampler """basic""" +677 9 evaluator """rankbased""" +677 10 dataset """wn18rr""" +677 10 model """simple""" +677 10 loss """nssa""" +677 10 regularizer """no""" +677 10 optimizer """adam""" +677 10 training_loop """owa""" +677 10 negative_sampler """basic""" +677 10 evaluator """rankbased""" +677 11 dataset """wn18rr""" +677 11 model """simple""" +677 11 loss """nssa""" +677 11 regularizer """no""" +677 11 optimizer """adam""" +677 11 training_loop """owa""" +677 11 negative_sampler """basic""" +677 11 evaluator """rankbased""" +677 12 dataset """wn18rr""" +677 12 model """simple""" +677 12 loss """nssa""" +677 12 regularizer """no""" +677 12 optimizer """adam""" +677 12 training_loop """owa""" +677 12 negative_sampler """basic""" +677 12 evaluator """rankbased""" +677 13 dataset """wn18rr""" +677 13 model """simple""" +677 13 loss """nssa""" +677 13 regularizer """no""" +677 13 optimizer """adam""" +677 13 training_loop """owa""" +677 13 negative_sampler """basic""" +677 13 evaluator """rankbased""" +677 14 dataset """wn18rr""" +677 14 model """simple""" +677 14 loss """nssa""" +677 14 regularizer """no""" +677 14 optimizer """adam""" +677 14 training_loop """owa""" +677 14 negative_sampler """basic""" +677 14 evaluator """rankbased""" +677 15 dataset """wn18rr""" +677 15 model """simple""" +677 15 loss """nssa""" +677 15 regularizer """no""" +677 15 optimizer """adam""" +677 15 training_loop """owa""" +677 15 negative_sampler """basic""" +677 15 evaluator """rankbased""" +677 16 dataset """wn18rr""" +677 16 model """simple""" +677 16 loss """nssa""" +677 16 regularizer """no""" +677 16 optimizer """adam""" +677 16 training_loop """owa""" +677 16 negative_sampler """basic""" +677 16 evaluator """rankbased""" +677 17 dataset """wn18rr""" +677 17 model """simple""" +677 17 loss """nssa""" +677 17 regularizer """no""" +677 17 optimizer """adam""" +677 17 training_loop """owa""" +677 17 negative_sampler """basic""" +677 17 evaluator """rankbased""" +677 18 dataset """wn18rr""" +677 18 model """simple""" +677 18 loss """nssa""" +677 18 regularizer """no""" +677 18 optimizer """adam""" +677 18 training_loop """owa""" +677 18 negative_sampler """basic""" +677 18 evaluator """rankbased""" +677 19 dataset """wn18rr""" +677 19 model """simple""" +677 19 loss """nssa""" +677 19 regularizer """no""" +677 19 optimizer """adam""" +677 19 training_loop """owa""" +677 19 negative_sampler """basic""" +677 19 evaluator """rankbased""" +677 20 dataset """wn18rr""" +677 20 model """simple""" +677 20 loss """nssa""" +677 20 regularizer """no""" +677 20 optimizer """adam""" +677 20 training_loop """owa""" +677 20 negative_sampler """basic""" +677 20 evaluator """rankbased""" +677 21 dataset """wn18rr""" +677 21 model """simple""" +677 21 loss """nssa""" +677 21 regularizer """no""" +677 21 optimizer """adam""" +677 21 training_loop """owa""" +677 21 negative_sampler """basic""" +677 21 evaluator """rankbased""" +677 22 dataset """wn18rr""" +677 22 model """simple""" +677 22 loss """nssa""" +677 22 regularizer """no""" +677 22 optimizer """adam""" +677 22 training_loop """owa""" +677 22 negative_sampler """basic""" +677 22 evaluator """rankbased""" +677 23 dataset """wn18rr""" +677 23 model """simple""" +677 23 loss """nssa""" +677 23 regularizer """no""" +677 23 optimizer """adam""" +677 23 training_loop """owa""" +677 23 negative_sampler """basic""" +677 23 evaluator """rankbased""" +677 24 dataset """wn18rr""" +677 24 model """simple""" +677 24 loss """nssa""" +677 24 regularizer """no""" +677 24 optimizer """adam""" +677 24 training_loop """owa""" +677 24 negative_sampler """basic""" +677 24 evaluator """rankbased""" +677 25 dataset """wn18rr""" +677 25 model """simple""" +677 25 loss """nssa""" +677 25 regularizer """no""" +677 25 optimizer """adam""" +677 25 training_loop """owa""" +677 25 negative_sampler """basic""" +677 25 evaluator """rankbased""" +677 26 dataset """wn18rr""" +677 26 model """simple""" +677 26 loss """nssa""" +677 26 regularizer """no""" +677 26 optimizer """adam""" +677 26 training_loop """owa""" +677 26 negative_sampler """basic""" +677 26 evaluator """rankbased""" +677 27 dataset """wn18rr""" +677 27 model """simple""" +677 27 loss """nssa""" +677 27 regularizer """no""" +677 27 optimizer """adam""" +677 27 training_loop """owa""" +677 27 negative_sampler """basic""" +677 27 evaluator """rankbased""" +677 28 dataset """wn18rr""" +677 28 model """simple""" +677 28 loss """nssa""" +677 28 regularizer """no""" +677 28 optimizer """adam""" +677 28 training_loop """owa""" +677 28 negative_sampler """basic""" +677 28 evaluator """rankbased""" +677 29 dataset """wn18rr""" +677 29 model """simple""" +677 29 loss """nssa""" +677 29 regularizer """no""" +677 29 optimizer """adam""" +677 29 training_loop """owa""" +677 29 negative_sampler """basic""" +677 29 evaluator """rankbased""" +677 30 dataset """wn18rr""" +677 30 model """simple""" +677 30 loss """nssa""" +677 30 regularizer """no""" +677 30 optimizer """adam""" +677 30 training_loop """owa""" +677 30 negative_sampler """basic""" +677 30 evaluator """rankbased""" +677 31 dataset """wn18rr""" +677 31 model """simple""" +677 31 loss """nssa""" +677 31 regularizer """no""" +677 31 optimizer """adam""" +677 31 training_loop """owa""" +677 31 negative_sampler """basic""" +677 31 evaluator """rankbased""" +677 32 dataset """wn18rr""" +677 32 model """simple""" +677 32 loss """nssa""" +677 32 regularizer """no""" +677 32 optimizer """adam""" +677 32 training_loop """owa""" +677 32 negative_sampler """basic""" +677 32 evaluator """rankbased""" +677 33 dataset """wn18rr""" +677 33 model """simple""" +677 33 loss """nssa""" +677 33 regularizer """no""" +677 33 optimizer """adam""" +677 33 training_loop """owa""" +677 33 negative_sampler """basic""" +677 33 evaluator """rankbased""" +677 34 dataset """wn18rr""" +677 34 model """simple""" +677 34 loss """nssa""" +677 34 regularizer """no""" +677 34 optimizer """adam""" +677 34 training_loop """owa""" +677 34 negative_sampler """basic""" +677 34 evaluator """rankbased""" +677 35 dataset """wn18rr""" +677 35 model """simple""" +677 35 loss """nssa""" +677 35 regularizer """no""" +677 35 optimizer """adam""" +677 35 training_loop """owa""" +677 35 negative_sampler """basic""" +677 35 evaluator """rankbased""" +677 36 dataset """wn18rr""" +677 36 model """simple""" +677 36 loss """nssa""" +677 36 regularizer """no""" +677 36 optimizer """adam""" +677 36 training_loop """owa""" +677 36 negative_sampler """basic""" +677 36 evaluator """rankbased""" +677 37 dataset """wn18rr""" +677 37 model """simple""" +677 37 loss """nssa""" +677 37 regularizer """no""" +677 37 optimizer """adam""" +677 37 training_loop """owa""" +677 37 negative_sampler """basic""" +677 37 evaluator """rankbased""" +677 38 dataset """wn18rr""" +677 38 model """simple""" +677 38 loss """nssa""" +677 38 regularizer """no""" +677 38 optimizer """adam""" +677 38 training_loop """owa""" +677 38 negative_sampler """basic""" +677 38 evaluator """rankbased""" +677 39 dataset """wn18rr""" +677 39 model """simple""" +677 39 loss """nssa""" +677 39 regularizer """no""" +677 39 optimizer """adam""" +677 39 training_loop """owa""" +677 39 negative_sampler """basic""" +677 39 evaluator """rankbased""" +677 40 dataset """wn18rr""" +677 40 model """simple""" +677 40 loss """nssa""" +677 40 regularizer """no""" +677 40 optimizer """adam""" +677 40 training_loop """owa""" +677 40 negative_sampler """basic""" +677 40 evaluator """rankbased""" +677 41 dataset """wn18rr""" +677 41 model """simple""" +677 41 loss """nssa""" +677 41 regularizer """no""" +677 41 optimizer """adam""" +677 41 training_loop """owa""" +677 41 negative_sampler """basic""" +677 41 evaluator """rankbased""" +677 42 dataset """wn18rr""" +677 42 model """simple""" +677 42 loss """nssa""" +677 42 regularizer """no""" +677 42 optimizer """adam""" +677 42 training_loop """owa""" +677 42 negative_sampler """basic""" +677 42 evaluator """rankbased""" +677 43 dataset """wn18rr""" +677 43 model """simple""" +677 43 loss """nssa""" +677 43 regularizer """no""" +677 43 optimizer """adam""" +677 43 training_loop """owa""" +677 43 negative_sampler """basic""" +677 43 evaluator """rankbased""" +677 44 dataset """wn18rr""" +677 44 model """simple""" +677 44 loss """nssa""" +677 44 regularizer """no""" +677 44 optimizer """adam""" +677 44 training_loop """owa""" +677 44 negative_sampler """basic""" +677 44 evaluator """rankbased""" +677 45 dataset """wn18rr""" +677 45 model """simple""" +677 45 loss """nssa""" +677 45 regularizer """no""" +677 45 optimizer """adam""" +677 45 training_loop """owa""" +677 45 negative_sampler """basic""" +677 45 evaluator """rankbased""" +677 46 dataset """wn18rr""" +677 46 model """simple""" +677 46 loss """nssa""" +677 46 regularizer """no""" +677 46 optimizer """adam""" +677 46 training_loop """owa""" +677 46 negative_sampler """basic""" +677 46 evaluator """rankbased""" +677 47 dataset """wn18rr""" +677 47 model """simple""" +677 47 loss """nssa""" +677 47 regularizer """no""" +677 47 optimizer """adam""" +677 47 training_loop """owa""" +677 47 negative_sampler """basic""" +677 47 evaluator """rankbased""" +677 48 dataset """wn18rr""" +677 48 model """simple""" +677 48 loss """nssa""" +677 48 regularizer """no""" +677 48 optimizer """adam""" +677 48 training_loop """owa""" +677 48 negative_sampler """basic""" +677 48 evaluator """rankbased""" +677 49 dataset """wn18rr""" +677 49 model """simple""" +677 49 loss """nssa""" +677 49 regularizer """no""" +677 49 optimizer """adam""" +677 49 training_loop """owa""" +677 49 negative_sampler """basic""" +677 49 evaluator """rankbased""" +677 50 dataset """wn18rr""" +677 50 model """simple""" +677 50 loss """nssa""" +677 50 regularizer """no""" +677 50 optimizer """adam""" +677 50 training_loop """owa""" +677 50 negative_sampler """basic""" +677 50 evaluator """rankbased""" +677 51 dataset """wn18rr""" +677 51 model """simple""" +677 51 loss """nssa""" +677 51 regularizer """no""" +677 51 optimizer """adam""" +677 51 training_loop """owa""" +677 51 negative_sampler """basic""" +677 51 evaluator """rankbased""" +677 52 dataset """wn18rr""" +677 52 model """simple""" +677 52 loss """nssa""" +677 52 regularizer """no""" +677 52 optimizer """adam""" +677 52 training_loop """owa""" +677 52 negative_sampler """basic""" +677 52 evaluator """rankbased""" +677 53 dataset """wn18rr""" +677 53 model """simple""" +677 53 loss """nssa""" +677 53 regularizer """no""" +677 53 optimizer """adam""" +677 53 training_loop """owa""" +677 53 negative_sampler """basic""" +677 53 evaluator """rankbased""" +677 54 dataset """wn18rr""" +677 54 model """simple""" +677 54 loss """nssa""" +677 54 regularizer """no""" +677 54 optimizer """adam""" +677 54 training_loop """owa""" +677 54 negative_sampler """basic""" +677 54 evaluator """rankbased""" +678 1 model.embedding_dim 2.0 +678 1 loss.margin 1.828376443384725 +678 1 loss.adversarial_temperature 0.11998529495229512 +678 1 optimizer.lr 0.027558298329126114 +678 1 negative_sampler.num_negs_per_pos 26.0 +678 1 training.batch_size 2.0 +678 2 model.embedding_dim 2.0 +678 2 loss.margin 12.910177747513778 +678 2 loss.adversarial_temperature 0.4384031410030519 +678 2 optimizer.lr 0.02729089671071183 +678 2 negative_sampler.num_negs_per_pos 17.0 +678 2 training.batch_size 2.0 +678 3 model.embedding_dim 2.0 +678 3 loss.margin 5.650576620433295 +678 3 loss.adversarial_temperature 0.42299766700136215 +678 3 optimizer.lr 0.005365278526044946 +678 3 negative_sampler.num_negs_per_pos 10.0 +678 3 training.batch_size 2.0 +678 4 model.embedding_dim 2.0 +678 4 loss.margin 20.874401456215573 +678 4 loss.adversarial_temperature 0.8883662809920959 +678 4 optimizer.lr 0.025624987719367665 +678 4 negative_sampler.num_negs_per_pos 97.0 +678 4 training.batch_size 2.0 +678 5 model.embedding_dim 0.0 +678 5 loss.margin 3.3944441351941776 +678 5 loss.adversarial_temperature 0.24744380452640372 +678 5 optimizer.lr 0.002831880199647184 +678 5 negative_sampler.num_negs_per_pos 59.0 +678 5 training.batch_size 2.0 +678 6 model.embedding_dim 1.0 +678 6 loss.margin 23.542406842623066 +678 6 loss.adversarial_temperature 0.3829221840605314 +678 6 optimizer.lr 0.02144101969338199 +678 6 negative_sampler.num_negs_per_pos 20.0 +678 6 training.batch_size 1.0 +678 7 model.embedding_dim 2.0 +678 7 loss.margin 5.965172532170381 +678 7 loss.adversarial_temperature 0.8354573913720529 +678 7 optimizer.lr 0.002927034153046218 +678 7 negative_sampler.num_negs_per_pos 83.0 +678 7 training.batch_size 0.0 +678 8 model.embedding_dim 1.0 +678 8 loss.margin 22.82704769660629 +678 8 loss.adversarial_temperature 0.8806738869821275 +678 8 optimizer.lr 0.0686488731995767 +678 8 negative_sampler.num_negs_per_pos 90.0 +678 8 training.batch_size 1.0 +678 9 model.embedding_dim 0.0 +678 9 loss.margin 27.225605447913626 +678 9 loss.adversarial_temperature 0.9908455982072993 +678 9 optimizer.lr 0.03145653687352881 +678 9 negative_sampler.num_negs_per_pos 49.0 +678 9 training.batch_size 1.0 +678 10 model.embedding_dim 1.0 +678 10 loss.margin 11.723649999189723 +678 10 loss.adversarial_temperature 0.6945068826518894 +678 10 optimizer.lr 0.020599104918295565 +678 10 negative_sampler.num_negs_per_pos 86.0 +678 10 training.batch_size 2.0 +678 11 model.embedding_dim 2.0 +678 11 loss.margin 20.643750477584668 +678 11 loss.adversarial_temperature 0.6769281107070987 +678 11 optimizer.lr 0.00888146925891269 +678 11 negative_sampler.num_negs_per_pos 46.0 +678 11 training.batch_size 0.0 +678 12 model.embedding_dim 2.0 +678 12 loss.margin 21.957177577854804 +678 12 loss.adversarial_temperature 0.22090176030608027 +678 12 optimizer.lr 0.0010641793241734727 +678 12 negative_sampler.num_negs_per_pos 55.0 +678 12 training.batch_size 0.0 +678 13 model.embedding_dim 2.0 +678 13 loss.margin 22.29020977680615 +678 13 loss.adversarial_temperature 0.9945303153166849 +678 13 optimizer.lr 0.002448705858911152 +678 13 negative_sampler.num_negs_per_pos 71.0 +678 13 training.batch_size 1.0 +678 14 model.embedding_dim 1.0 +678 14 loss.margin 23.85921160725377 +678 14 loss.adversarial_temperature 0.2170250955580944 +678 14 optimizer.lr 0.0019486521921607044 +678 14 negative_sampler.num_negs_per_pos 57.0 +678 14 training.batch_size 2.0 +678 15 model.embedding_dim 1.0 +678 15 loss.margin 19.016814835460494 +678 15 loss.adversarial_temperature 0.6755915949178278 +678 15 optimizer.lr 0.025535800761173257 +678 15 negative_sampler.num_negs_per_pos 33.0 +678 15 training.batch_size 1.0 +678 16 model.embedding_dim 2.0 +678 16 loss.margin 7.779758388509722 +678 16 loss.adversarial_temperature 0.9922631346595063 +678 16 optimizer.lr 0.005469619786136166 +678 16 negative_sampler.num_negs_per_pos 45.0 +678 16 training.batch_size 2.0 +678 17 model.embedding_dim 2.0 +678 17 loss.margin 20.54249325374807 +678 17 loss.adversarial_temperature 0.9683827295113576 +678 17 optimizer.lr 0.03080367651536052 +678 17 negative_sampler.num_negs_per_pos 31.0 +678 17 training.batch_size 0.0 +678 18 model.embedding_dim 2.0 +678 18 loss.margin 21.924833788145587 +678 18 loss.adversarial_temperature 0.7331197219523187 +678 18 optimizer.lr 0.0032627897255674805 +678 18 negative_sampler.num_negs_per_pos 33.0 +678 18 training.batch_size 1.0 +678 19 model.embedding_dim 0.0 +678 19 loss.margin 22.89830715635747 +678 19 loss.adversarial_temperature 0.34914919900362723 +678 19 optimizer.lr 0.004613705615397706 +678 19 negative_sampler.num_negs_per_pos 34.0 +678 19 training.batch_size 1.0 +678 20 model.embedding_dim 1.0 +678 20 loss.margin 5.766920706721129 +678 20 loss.adversarial_temperature 0.2524698412529159 +678 20 optimizer.lr 0.00999364845347439 +678 20 negative_sampler.num_negs_per_pos 83.0 +678 20 training.batch_size 2.0 +678 21 model.embedding_dim 0.0 +678 21 loss.margin 17.65051580505446 +678 21 loss.adversarial_temperature 0.25958844049194474 +678 21 optimizer.lr 0.002089556300036434 +678 21 negative_sampler.num_negs_per_pos 92.0 +678 21 training.batch_size 1.0 +678 22 model.embedding_dim 0.0 +678 22 loss.margin 9.774676029709756 +678 22 loss.adversarial_temperature 0.554621709145405 +678 22 optimizer.lr 0.0029992454256119326 +678 22 negative_sampler.num_negs_per_pos 51.0 +678 22 training.batch_size 2.0 +678 23 model.embedding_dim 1.0 +678 23 loss.margin 14.658732166087818 +678 23 loss.adversarial_temperature 0.29870010300802646 +678 23 optimizer.lr 0.0010867832816580944 +678 23 negative_sampler.num_negs_per_pos 31.0 +678 23 training.batch_size 0.0 +678 24 model.embedding_dim 1.0 +678 24 loss.margin 12.483391548620713 +678 24 loss.adversarial_temperature 0.3858113927905776 +678 24 optimizer.lr 0.03943230350457744 +678 24 negative_sampler.num_negs_per_pos 57.0 +678 24 training.batch_size 0.0 +678 25 model.embedding_dim 2.0 +678 25 loss.margin 18.168617647566535 +678 25 loss.adversarial_temperature 0.6233060690841625 +678 25 optimizer.lr 0.0542410996470708 +678 25 negative_sampler.num_negs_per_pos 35.0 +678 25 training.batch_size 2.0 +678 26 model.embedding_dim 0.0 +678 26 loss.margin 24.73225096695148 +678 26 loss.adversarial_temperature 0.15331853664255007 +678 26 optimizer.lr 0.0010928394345075783 +678 26 negative_sampler.num_negs_per_pos 19.0 +678 26 training.batch_size 2.0 +678 27 model.embedding_dim 1.0 +678 27 loss.margin 24.639022340985548 +678 27 loss.adversarial_temperature 0.8522042025950256 +678 27 optimizer.lr 0.0032483971847410316 +678 27 negative_sampler.num_negs_per_pos 22.0 +678 27 training.batch_size 2.0 +678 28 model.embedding_dim 0.0 +678 28 loss.margin 19.636591077799594 +678 28 loss.adversarial_temperature 0.18594249046769304 +678 28 optimizer.lr 0.032428938219646715 +678 28 negative_sampler.num_negs_per_pos 3.0 +678 28 training.batch_size 1.0 +678 29 model.embedding_dim 0.0 +678 29 loss.margin 17.176573668382314 +678 29 loss.adversarial_temperature 0.6662326823756225 +678 29 optimizer.lr 0.0041297372130500295 +678 29 negative_sampler.num_negs_per_pos 90.0 +678 29 training.batch_size 1.0 +678 30 model.embedding_dim 0.0 +678 30 loss.margin 25.89981379065996 +678 30 loss.adversarial_temperature 0.3386163471120106 +678 30 optimizer.lr 0.004835096224456404 +678 30 negative_sampler.num_negs_per_pos 58.0 +678 30 training.batch_size 2.0 +678 31 model.embedding_dim 1.0 +678 31 loss.margin 18.38319406120945 +678 31 loss.adversarial_temperature 0.5434628035331003 +678 31 optimizer.lr 0.042166933776095435 +678 31 negative_sampler.num_negs_per_pos 78.0 +678 31 training.batch_size 1.0 +678 32 model.embedding_dim 1.0 +678 32 loss.margin 6.593149002661317 +678 32 loss.adversarial_temperature 0.6506283336512648 +678 32 optimizer.lr 0.01848204381245066 +678 32 negative_sampler.num_negs_per_pos 91.0 +678 32 training.batch_size 2.0 +678 33 model.embedding_dim 2.0 +678 33 loss.margin 5.29035705411061 +678 33 loss.adversarial_temperature 0.7937828373467872 +678 33 optimizer.lr 0.012737181741998518 +678 33 negative_sampler.num_negs_per_pos 81.0 +678 33 training.batch_size 1.0 +678 34 model.embedding_dim 1.0 +678 34 loss.margin 20.4530725201049 +678 34 loss.adversarial_temperature 0.894038233155678 +678 34 optimizer.lr 0.004510549463548447 +678 34 negative_sampler.num_negs_per_pos 48.0 +678 34 training.batch_size 0.0 +678 35 model.embedding_dim 1.0 +678 35 loss.margin 7.532910943024113 +678 35 loss.adversarial_temperature 0.8232646495170789 +678 35 optimizer.lr 0.0019520829832844506 +678 35 negative_sampler.num_negs_per_pos 8.0 +678 35 training.batch_size 2.0 +678 36 model.embedding_dim 2.0 +678 36 loss.margin 2.5139597479985216 +678 36 loss.adversarial_temperature 0.7299575501127981 +678 36 optimizer.lr 0.05518043929993419 +678 36 negative_sampler.num_negs_per_pos 46.0 +678 36 training.batch_size 2.0 +678 37 model.embedding_dim 2.0 +678 37 loss.margin 18.4586290658212 +678 37 loss.adversarial_temperature 0.20774024681622547 +678 37 optimizer.lr 0.008405460503091943 +678 37 negative_sampler.num_negs_per_pos 48.0 +678 37 training.batch_size 2.0 +678 38 model.embedding_dim 2.0 +678 38 loss.margin 11.967899715087514 +678 38 loss.adversarial_temperature 0.15306872870595123 +678 38 optimizer.lr 0.00778820170338046 +678 38 negative_sampler.num_negs_per_pos 52.0 +678 38 training.batch_size 2.0 +678 39 model.embedding_dim 2.0 +678 39 loss.margin 7.451562036773468 +678 39 loss.adversarial_temperature 0.2739172308333669 +678 39 optimizer.lr 0.0545148275869578 +678 39 negative_sampler.num_negs_per_pos 8.0 +678 39 training.batch_size 0.0 +678 40 model.embedding_dim 1.0 +678 40 loss.margin 7.1035001256309 +678 40 loss.adversarial_temperature 0.6686444945747176 +678 40 optimizer.lr 0.0049197089260585025 +678 40 negative_sampler.num_negs_per_pos 61.0 +678 40 training.batch_size 1.0 +678 41 model.embedding_dim 1.0 +678 41 loss.margin 27.813592037920362 +678 41 loss.adversarial_temperature 0.23744045482481813 +678 41 optimizer.lr 0.0036763992746309984 +678 41 negative_sampler.num_negs_per_pos 65.0 +678 41 training.batch_size 2.0 +678 42 model.embedding_dim 2.0 +678 42 loss.margin 28.766585921571185 +678 42 loss.adversarial_temperature 0.795795125117433 +678 42 optimizer.lr 0.002202927880552584 +678 42 negative_sampler.num_negs_per_pos 40.0 +678 42 training.batch_size 2.0 +678 43 model.embedding_dim 1.0 +678 43 loss.margin 8.170969042715333 +678 43 loss.adversarial_temperature 0.6033715380939665 +678 43 optimizer.lr 0.002716663763742599 +678 43 negative_sampler.num_negs_per_pos 21.0 +678 43 training.batch_size 0.0 +678 44 model.embedding_dim 2.0 +678 44 loss.margin 25.734820863020964 +678 44 loss.adversarial_temperature 0.7575698680730597 +678 44 optimizer.lr 0.00119435405198097 +678 44 negative_sampler.num_negs_per_pos 82.0 +678 44 training.batch_size 0.0 +678 45 model.embedding_dim 1.0 +678 45 loss.margin 20.90238742775886 +678 45 loss.adversarial_temperature 0.8163999790518978 +678 45 optimizer.lr 0.001588567078946603 +678 45 negative_sampler.num_negs_per_pos 61.0 +678 45 training.batch_size 1.0 +678 46 model.embedding_dim 0.0 +678 46 loss.margin 26.239911878374784 +678 46 loss.adversarial_temperature 0.6404143924485889 +678 46 optimizer.lr 0.08402563707128108 +678 46 negative_sampler.num_negs_per_pos 87.0 +678 46 training.batch_size 0.0 +678 47 model.embedding_dim 2.0 +678 47 loss.margin 21.066091208001694 +678 47 loss.adversarial_temperature 0.7889803885734964 +678 47 optimizer.lr 0.036268609495301043 +678 47 negative_sampler.num_negs_per_pos 90.0 +678 47 training.batch_size 0.0 +678 48 model.embedding_dim 1.0 +678 48 loss.margin 14.38663460609719 +678 48 loss.adversarial_temperature 0.9356401541269042 +678 48 optimizer.lr 0.005430896881759383 +678 48 negative_sampler.num_negs_per_pos 63.0 +678 48 training.batch_size 2.0 +678 49 model.embedding_dim 0.0 +678 49 loss.margin 28.02068542236168 +678 49 loss.adversarial_temperature 0.7742284434256692 +678 49 optimizer.lr 0.006873227432191817 +678 49 negative_sampler.num_negs_per_pos 20.0 +678 49 training.batch_size 0.0 +678 50 model.embedding_dim 2.0 +678 50 loss.margin 11.95988914457164 +678 50 loss.adversarial_temperature 0.741150414151558 +678 50 optimizer.lr 0.019259539122269763 +678 50 negative_sampler.num_negs_per_pos 21.0 +678 50 training.batch_size 0.0 +678 51 model.embedding_dim 0.0 +678 51 loss.margin 16.016984990027126 +678 51 loss.adversarial_temperature 0.45468123592138926 +678 51 optimizer.lr 0.001882429589261117 +678 51 negative_sampler.num_negs_per_pos 59.0 +678 51 training.batch_size 1.0 +678 52 model.embedding_dim 0.0 +678 52 loss.margin 11.004068751623134 +678 52 loss.adversarial_temperature 0.26588236817471683 +678 52 optimizer.lr 0.0016016850194736686 +678 52 negative_sampler.num_negs_per_pos 75.0 +678 52 training.batch_size 2.0 +678 53 model.embedding_dim 0.0 +678 53 loss.margin 17.166077881851336 +678 53 loss.adversarial_temperature 0.8571551014470232 +678 53 optimizer.lr 0.0023251930962005407 +678 53 negative_sampler.num_negs_per_pos 7.0 +678 53 training.batch_size 0.0 +678 54 model.embedding_dim 0.0 +678 54 loss.margin 13.384370212907886 +678 54 loss.adversarial_temperature 0.7951294195327862 +678 54 optimizer.lr 0.001975024086046264 +678 54 negative_sampler.num_negs_per_pos 57.0 +678 54 training.batch_size 0.0 +678 55 model.embedding_dim 0.0 +678 55 loss.margin 23.403055558280883 +678 55 loss.adversarial_temperature 0.2128024706346949 +678 55 optimizer.lr 0.00557602116394803 +678 55 negative_sampler.num_negs_per_pos 31.0 +678 55 training.batch_size 0.0 +678 56 model.embedding_dim 2.0 +678 56 loss.margin 23.84694086617988 +678 56 loss.adversarial_temperature 0.37282974635754884 +678 56 optimizer.lr 0.03585568133314778 +678 56 negative_sampler.num_negs_per_pos 92.0 +678 56 training.batch_size 2.0 +678 57 model.embedding_dim 0.0 +678 57 loss.margin 29.202355675560604 +678 57 loss.adversarial_temperature 0.5161883108044051 +678 57 optimizer.lr 0.011421771978827957 +678 57 negative_sampler.num_negs_per_pos 82.0 +678 57 training.batch_size 2.0 +678 58 model.embedding_dim 1.0 +678 58 loss.margin 17.87636579131866 +678 58 loss.adversarial_temperature 0.16956023618562113 +678 58 optimizer.lr 0.005665069096607115 +678 58 negative_sampler.num_negs_per_pos 67.0 +678 58 training.batch_size 0.0 +678 59 model.embedding_dim 0.0 +678 59 loss.margin 10.71862135433322 +678 59 loss.adversarial_temperature 0.41472273488358774 +678 59 optimizer.lr 0.004472707801572224 +678 59 negative_sampler.num_negs_per_pos 57.0 +678 59 training.batch_size 0.0 +678 60 model.embedding_dim 1.0 +678 60 loss.margin 24.724939636830868 +678 60 loss.adversarial_temperature 0.7640730546272094 +678 60 optimizer.lr 0.06230088111618707 +678 60 negative_sampler.num_negs_per_pos 4.0 +678 60 training.batch_size 0.0 +678 61 model.embedding_dim 1.0 +678 61 loss.margin 20.067666355052435 +678 61 loss.adversarial_temperature 0.5182315140959457 +678 61 optimizer.lr 0.002248173556304656 +678 61 negative_sampler.num_negs_per_pos 33.0 +678 61 training.batch_size 2.0 +678 62 model.embedding_dim 0.0 +678 62 loss.margin 7.479194295735539 +678 62 loss.adversarial_temperature 0.752154264264722 +678 62 optimizer.lr 0.01696339508538292 +678 62 negative_sampler.num_negs_per_pos 5.0 +678 62 training.batch_size 1.0 +678 63 model.embedding_dim 1.0 +678 63 loss.margin 8.640016302571421 +678 63 loss.adversarial_temperature 0.13243745633822318 +678 63 optimizer.lr 0.005963298855064059 +678 63 negative_sampler.num_negs_per_pos 34.0 +678 63 training.batch_size 1.0 +678 64 model.embedding_dim 1.0 +678 64 loss.margin 27.045890805452313 +678 64 loss.adversarial_temperature 0.3787801112383786 +678 64 optimizer.lr 0.09559905504715104 +678 64 negative_sampler.num_negs_per_pos 20.0 +678 64 training.batch_size 1.0 +678 65 model.embedding_dim 1.0 +678 65 loss.margin 25.20325496916661 +678 65 loss.adversarial_temperature 0.33216517780771504 +678 65 optimizer.lr 0.0870648733072564 +678 65 negative_sampler.num_negs_per_pos 37.0 +678 65 training.batch_size 0.0 +678 66 model.embedding_dim 1.0 +678 66 loss.margin 17.742031034077126 +678 66 loss.adversarial_temperature 0.36674381727455124 +678 66 optimizer.lr 0.002945642207942433 +678 66 negative_sampler.num_negs_per_pos 46.0 +678 66 training.batch_size 0.0 +678 67 model.embedding_dim 2.0 +678 67 loss.margin 22.805367202341777 +678 67 loss.adversarial_temperature 0.7740925946356959 +678 67 optimizer.lr 0.0020285621394292705 +678 67 negative_sampler.num_negs_per_pos 51.0 +678 67 training.batch_size 2.0 +678 68 model.embedding_dim 2.0 +678 68 loss.margin 24.570294710957416 +678 68 loss.adversarial_temperature 0.27060347475402646 +678 68 optimizer.lr 0.021483955856581846 +678 68 negative_sampler.num_negs_per_pos 92.0 +678 68 training.batch_size 0.0 +678 69 model.embedding_dim 1.0 +678 69 loss.margin 27.503865848651152 +678 69 loss.adversarial_temperature 0.35384674733248495 +678 69 optimizer.lr 0.05193364385060768 +678 69 negative_sampler.num_negs_per_pos 26.0 +678 69 training.batch_size 1.0 +678 70 model.embedding_dim 2.0 +678 70 loss.margin 25.158943396505883 +678 70 loss.adversarial_temperature 0.7886191436703653 +678 70 optimizer.lr 0.016542575429136053 +678 70 negative_sampler.num_negs_per_pos 13.0 +678 70 training.batch_size 2.0 +678 71 model.embedding_dim 0.0 +678 71 loss.margin 25.304865019530155 +678 71 loss.adversarial_temperature 0.7661646349856368 +678 71 optimizer.lr 0.008970394020346478 +678 71 negative_sampler.num_negs_per_pos 16.0 +678 71 training.batch_size 1.0 +678 72 model.embedding_dim 0.0 +678 72 loss.margin 23.681924516476673 +678 72 loss.adversarial_temperature 0.9328339125955547 +678 72 optimizer.lr 0.04216887588166385 +678 72 negative_sampler.num_negs_per_pos 79.0 +678 72 training.batch_size 1.0 +678 73 model.embedding_dim 0.0 +678 73 loss.margin 25.368452690020863 +678 73 loss.adversarial_temperature 0.29809738200035085 +678 73 optimizer.lr 0.008549812798707047 +678 73 negative_sampler.num_negs_per_pos 42.0 +678 73 training.batch_size 0.0 +678 74 model.embedding_dim 0.0 +678 74 loss.margin 26.019884143355313 +678 74 loss.adversarial_temperature 0.6543455991193435 +678 74 optimizer.lr 0.0520593473220757 +678 74 negative_sampler.num_negs_per_pos 73.0 +678 74 training.batch_size 2.0 +678 75 model.embedding_dim 1.0 +678 75 loss.margin 19.003823094731228 +678 75 loss.adversarial_temperature 0.8813093498250961 +678 75 optimizer.lr 0.0020146463435396535 +678 75 negative_sampler.num_negs_per_pos 27.0 +678 75 training.batch_size 2.0 +678 76 model.embedding_dim 1.0 +678 76 loss.margin 11.113038404953043 +678 76 loss.adversarial_temperature 0.2750631288092217 +678 76 optimizer.lr 0.04091687628429471 +678 76 negative_sampler.num_negs_per_pos 14.0 +678 76 training.batch_size 0.0 +678 77 model.embedding_dim 0.0 +678 77 loss.margin 20.799807844561354 +678 77 loss.adversarial_temperature 0.3157255521166211 +678 77 optimizer.lr 0.01372109635491076 +678 77 negative_sampler.num_negs_per_pos 23.0 +678 77 training.batch_size 0.0 +678 78 model.embedding_dim 2.0 +678 78 loss.margin 9.670830266938047 +678 78 loss.adversarial_temperature 0.4244144592134067 +678 78 optimizer.lr 0.06052801135189233 +678 78 negative_sampler.num_negs_per_pos 17.0 +678 78 training.batch_size 2.0 +678 79 model.embedding_dim 1.0 +678 79 loss.margin 8.136547317175879 +678 79 loss.adversarial_temperature 0.8957230025587177 +678 79 optimizer.lr 0.0036613459546518097 +678 79 negative_sampler.num_negs_per_pos 9.0 +678 79 training.batch_size 0.0 +678 80 model.embedding_dim 1.0 +678 80 loss.margin 1.8704413391276793 +678 80 loss.adversarial_temperature 0.3335328395863303 +678 80 optimizer.lr 0.06559026543641346 +678 80 negative_sampler.num_negs_per_pos 14.0 +678 80 training.batch_size 2.0 +678 81 model.embedding_dim 2.0 +678 81 loss.margin 5.128073564702117 +678 81 loss.adversarial_temperature 0.9475411899421995 +678 81 optimizer.lr 0.01087851289526129 +678 81 negative_sampler.num_negs_per_pos 17.0 +678 81 training.batch_size 1.0 +678 82 model.embedding_dim 2.0 +678 82 loss.margin 24.034005303055217 +678 82 loss.adversarial_temperature 0.1794637763899747 +678 82 optimizer.lr 0.0309308459058009 +678 82 negative_sampler.num_negs_per_pos 97.0 +678 82 training.batch_size 1.0 +678 83 model.embedding_dim 2.0 +678 83 loss.margin 3.2396387212869096 +678 83 loss.adversarial_temperature 0.8123584350511017 +678 83 optimizer.lr 0.05040953876980291 +678 83 negative_sampler.num_negs_per_pos 40.0 +678 83 training.batch_size 2.0 +678 84 model.embedding_dim 0.0 +678 84 loss.margin 12.798045594575248 +678 84 loss.adversarial_temperature 0.7434048256572572 +678 84 optimizer.lr 0.07647627590110312 +678 84 negative_sampler.num_negs_per_pos 16.0 +678 84 training.batch_size 1.0 +678 85 model.embedding_dim 2.0 +678 85 loss.margin 7.442256717540632 +678 85 loss.adversarial_temperature 0.34370283101637633 +678 85 optimizer.lr 0.005358337367669979 +678 85 negative_sampler.num_negs_per_pos 58.0 +678 85 training.batch_size 2.0 +678 1 dataset """wn18rr""" +678 1 model """simple""" +678 1 loss """nssa""" +678 1 regularizer """no""" +678 1 optimizer """adam""" +678 1 training_loop """owa""" +678 1 negative_sampler """basic""" +678 1 evaluator """rankbased""" +678 2 dataset """wn18rr""" +678 2 model """simple""" +678 2 loss """nssa""" +678 2 regularizer """no""" +678 2 optimizer """adam""" +678 2 training_loop """owa""" +678 2 negative_sampler """basic""" +678 2 evaluator """rankbased""" +678 3 dataset """wn18rr""" +678 3 model """simple""" +678 3 loss """nssa""" +678 3 regularizer """no""" +678 3 optimizer """adam""" +678 3 training_loop """owa""" +678 3 negative_sampler """basic""" +678 3 evaluator """rankbased""" +678 4 dataset """wn18rr""" +678 4 model """simple""" +678 4 loss """nssa""" +678 4 regularizer """no""" +678 4 optimizer """adam""" +678 4 training_loop """owa""" +678 4 negative_sampler """basic""" +678 4 evaluator """rankbased""" +678 5 dataset """wn18rr""" +678 5 model """simple""" +678 5 loss """nssa""" +678 5 regularizer """no""" +678 5 optimizer """adam""" +678 5 training_loop """owa""" +678 5 negative_sampler """basic""" +678 5 evaluator """rankbased""" +678 6 dataset """wn18rr""" +678 6 model """simple""" +678 6 loss """nssa""" +678 6 regularizer """no""" +678 6 optimizer """adam""" +678 6 training_loop """owa""" +678 6 negative_sampler """basic""" +678 6 evaluator """rankbased""" +678 7 dataset """wn18rr""" +678 7 model """simple""" +678 7 loss """nssa""" +678 7 regularizer """no""" +678 7 optimizer """adam""" +678 7 training_loop """owa""" +678 7 negative_sampler """basic""" +678 7 evaluator """rankbased""" +678 8 dataset """wn18rr""" +678 8 model """simple""" +678 8 loss """nssa""" +678 8 regularizer """no""" +678 8 optimizer """adam""" +678 8 training_loop """owa""" +678 8 negative_sampler """basic""" +678 8 evaluator """rankbased""" +678 9 dataset """wn18rr""" +678 9 model """simple""" +678 9 loss """nssa""" +678 9 regularizer """no""" +678 9 optimizer """adam""" +678 9 training_loop """owa""" +678 9 negative_sampler """basic""" +678 9 evaluator """rankbased""" +678 10 dataset """wn18rr""" +678 10 model """simple""" +678 10 loss """nssa""" +678 10 regularizer """no""" +678 10 optimizer """adam""" +678 10 training_loop """owa""" +678 10 negative_sampler """basic""" +678 10 evaluator """rankbased""" +678 11 dataset """wn18rr""" +678 11 model """simple""" +678 11 loss """nssa""" +678 11 regularizer """no""" +678 11 optimizer """adam""" +678 11 training_loop """owa""" +678 11 negative_sampler """basic""" +678 11 evaluator """rankbased""" +678 12 dataset """wn18rr""" +678 12 model """simple""" +678 12 loss """nssa""" +678 12 regularizer """no""" +678 12 optimizer """adam""" +678 12 training_loop """owa""" +678 12 negative_sampler """basic""" +678 12 evaluator """rankbased""" +678 13 dataset """wn18rr""" +678 13 model """simple""" +678 13 loss """nssa""" +678 13 regularizer """no""" +678 13 optimizer """adam""" +678 13 training_loop """owa""" +678 13 negative_sampler """basic""" +678 13 evaluator """rankbased""" +678 14 dataset """wn18rr""" +678 14 model """simple""" +678 14 loss """nssa""" +678 14 regularizer """no""" +678 14 optimizer """adam""" +678 14 training_loop """owa""" +678 14 negative_sampler """basic""" +678 14 evaluator """rankbased""" +678 15 dataset """wn18rr""" +678 15 model """simple""" +678 15 loss """nssa""" +678 15 regularizer """no""" +678 15 optimizer """adam""" +678 15 training_loop """owa""" +678 15 negative_sampler """basic""" +678 15 evaluator """rankbased""" +678 16 dataset """wn18rr""" +678 16 model """simple""" +678 16 loss """nssa""" +678 16 regularizer """no""" +678 16 optimizer """adam""" +678 16 training_loop """owa""" +678 16 negative_sampler """basic""" +678 16 evaluator """rankbased""" +678 17 dataset """wn18rr""" +678 17 model """simple""" +678 17 loss """nssa""" +678 17 regularizer """no""" +678 17 optimizer """adam""" +678 17 training_loop """owa""" +678 17 negative_sampler """basic""" +678 17 evaluator """rankbased""" +678 18 dataset """wn18rr""" +678 18 model """simple""" +678 18 loss """nssa""" +678 18 regularizer """no""" +678 18 optimizer """adam""" +678 18 training_loop """owa""" +678 18 negative_sampler """basic""" +678 18 evaluator """rankbased""" +678 19 dataset """wn18rr""" +678 19 model """simple""" +678 19 loss """nssa""" +678 19 regularizer """no""" +678 19 optimizer """adam""" +678 19 training_loop """owa""" +678 19 negative_sampler """basic""" +678 19 evaluator """rankbased""" +678 20 dataset """wn18rr""" +678 20 model """simple""" +678 20 loss """nssa""" +678 20 regularizer """no""" +678 20 optimizer """adam""" +678 20 training_loop """owa""" +678 20 negative_sampler """basic""" +678 20 evaluator """rankbased""" +678 21 dataset """wn18rr""" +678 21 model """simple""" +678 21 loss """nssa""" +678 21 regularizer """no""" +678 21 optimizer """adam""" +678 21 training_loop """owa""" +678 21 negative_sampler """basic""" +678 21 evaluator """rankbased""" +678 22 dataset """wn18rr""" +678 22 model """simple""" +678 22 loss """nssa""" +678 22 regularizer """no""" +678 22 optimizer """adam""" +678 22 training_loop """owa""" +678 22 negative_sampler """basic""" +678 22 evaluator """rankbased""" +678 23 dataset """wn18rr""" +678 23 model """simple""" +678 23 loss """nssa""" +678 23 regularizer """no""" +678 23 optimizer """adam""" +678 23 training_loop """owa""" +678 23 negative_sampler """basic""" +678 23 evaluator """rankbased""" +678 24 dataset """wn18rr""" +678 24 model """simple""" +678 24 loss """nssa""" +678 24 regularizer """no""" +678 24 optimizer """adam""" +678 24 training_loop """owa""" +678 24 negative_sampler """basic""" +678 24 evaluator """rankbased""" +678 25 dataset """wn18rr""" +678 25 model """simple""" +678 25 loss """nssa""" +678 25 regularizer """no""" +678 25 optimizer """adam""" +678 25 training_loop """owa""" +678 25 negative_sampler """basic""" +678 25 evaluator """rankbased""" +678 26 dataset """wn18rr""" +678 26 model """simple""" +678 26 loss """nssa""" +678 26 regularizer """no""" +678 26 optimizer """adam""" +678 26 training_loop """owa""" +678 26 negative_sampler """basic""" +678 26 evaluator """rankbased""" +678 27 dataset """wn18rr""" +678 27 model """simple""" +678 27 loss """nssa""" +678 27 regularizer """no""" +678 27 optimizer """adam""" +678 27 training_loop """owa""" +678 27 negative_sampler """basic""" +678 27 evaluator """rankbased""" +678 28 dataset """wn18rr""" +678 28 model """simple""" +678 28 loss """nssa""" +678 28 regularizer """no""" +678 28 optimizer """adam""" +678 28 training_loop """owa""" +678 28 negative_sampler """basic""" +678 28 evaluator """rankbased""" +678 29 dataset """wn18rr""" +678 29 model """simple""" +678 29 loss """nssa""" +678 29 regularizer """no""" +678 29 optimizer """adam""" +678 29 training_loop """owa""" +678 29 negative_sampler """basic""" +678 29 evaluator """rankbased""" +678 30 dataset """wn18rr""" +678 30 model """simple""" +678 30 loss """nssa""" +678 30 regularizer """no""" +678 30 optimizer """adam""" +678 30 training_loop """owa""" +678 30 negative_sampler """basic""" +678 30 evaluator """rankbased""" +678 31 dataset """wn18rr""" +678 31 model """simple""" +678 31 loss """nssa""" +678 31 regularizer """no""" +678 31 optimizer """adam""" +678 31 training_loop """owa""" +678 31 negative_sampler """basic""" +678 31 evaluator """rankbased""" +678 32 dataset """wn18rr""" +678 32 model """simple""" +678 32 loss """nssa""" +678 32 regularizer """no""" +678 32 optimizer """adam""" +678 32 training_loop """owa""" +678 32 negative_sampler """basic""" +678 32 evaluator """rankbased""" +678 33 dataset """wn18rr""" +678 33 model """simple""" +678 33 loss """nssa""" +678 33 regularizer """no""" +678 33 optimizer """adam""" +678 33 training_loop """owa""" +678 33 negative_sampler """basic""" +678 33 evaluator """rankbased""" +678 34 dataset """wn18rr""" +678 34 model """simple""" +678 34 loss """nssa""" +678 34 regularizer """no""" +678 34 optimizer """adam""" +678 34 training_loop """owa""" +678 34 negative_sampler """basic""" +678 34 evaluator """rankbased""" +678 35 dataset """wn18rr""" +678 35 model """simple""" +678 35 loss """nssa""" +678 35 regularizer """no""" +678 35 optimizer """adam""" +678 35 training_loop """owa""" +678 35 negative_sampler """basic""" +678 35 evaluator """rankbased""" +678 36 dataset """wn18rr""" +678 36 model """simple""" +678 36 loss """nssa""" +678 36 regularizer """no""" +678 36 optimizer """adam""" +678 36 training_loop """owa""" +678 36 negative_sampler """basic""" +678 36 evaluator """rankbased""" +678 37 dataset """wn18rr""" +678 37 model """simple""" +678 37 loss """nssa""" +678 37 regularizer """no""" +678 37 optimizer """adam""" +678 37 training_loop """owa""" +678 37 negative_sampler """basic""" +678 37 evaluator """rankbased""" +678 38 dataset """wn18rr""" +678 38 model """simple""" +678 38 loss """nssa""" +678 38 regularizer """no""" +678 38 optimizer """adam""" +678 38 training_loop """owa""" +678 38 negative_sampler """basic""" +678 38 evaluator """rankbased""" +678 39 dataset """wn18rr""" +678 39 model """simple""" +678 39 loss """nssa""" +678 39 regularizer """no""" +678 39 optimizer """adam""" +678 39 training_loop """owa""" +678 39 negative_sampler """basic""" +678 39 evaluator """rankbased""" +678 40 dataset """wn18rr""" +678 40 model """simple""" +678 40 loss """nssa""" +678 40 regularizer """no""" +678 40 optimizer """adam""" +678 40 training_loop """owa""" +678 40 negative_sampler """basic""" +678 40 evaluator """rankbased""" +678 41 dataset """wn18rr""" +678 41 model """simple""" +678 41 loss """nssa""" +678 41 regularizer """no""" +678 41 optimizer """adam""" +678 41 training_loop """owa""" +678 41 negative_sampler """basic""" +678 41 evaluator """rankbased""" +678 42 dataset """wn18rr""" +678 42 model """simple""" +678 42 loss """nssa""" +678 42 regularizer """no""" +678 42 optimizer """adam""" +678 42 training_loop """owa""" +678 42 negative_sampler """basic""" +678 42 evaluator """rankbased""" +678 43 dataset """wn18rr""" +678 43 model """simple""" +678 43 loss """nssa""" +678 43 regularizer """no""" +678 43 optimizer """adam""" +678 43 training_loop """owa""" +678 43 negative_sampler """basic""" +678 43 evaluator """rankbased""" +678 44 dataset """wn18rr""" +678 44 model """simple""" +678 44 loss """nssa""" +678 44 regularizer """no""" +678 44 optimizer """adam""" +678 44 training_loop """owa""" +678 44 negative_sampler """basic""" +678 44 evaluator """rankbased""" +678 45 dataset """wn18rr""" +678 45 model """simple""" +678 45 loss """nssa""" +678 45 regularizer """no""" +678 45 optimizer """adam""" +678 45 training_loop """owa""" +678 45 negative_sampler """basic""" +678 45 evaluator """rankbased""" +678 46 dataset """wn18rr""" +678 46 model """simple""" +678 46 loss """nssa""" +678 46 regularizer """no""" +678 46 optimizer """adam""" +678 46 training_loop """owa""" +678 46 negative_sampler """basic""" +678 46 evaluator """rankbased""" +678 47 dataset """wn18rr""" +678 47 model """simple""" +678 47 loss """nssa""" +678 47 regularizer """no""" +678 47 optimizer """adam""" +678 47 training_loop """owa""" +678 47 negative_sampler """basic""" +678 47 evaluator """rankbased""" +678 48 dataset """wn18rr""" +678 48 model """simple""" +678 48 loss """nssa""" +678 48 regularizer """no""" +678 48 optimizer """adam""" +678 48 training_loop """owa""" +678 48 negative_sampler """basic""" +678 48 evaluator """rankbased""" +678 49 dataset """wn18rr""" +678 49 model """simple""" +678 49 loss """nssa""" +678 49 regularizer """no""" +678 49 optimizer """adam""" +678 49 training_loop """owa""" +678 49 negative_sampler """basic""" +678 49 evaluator """rankbased""" +678 50 dataset """wn18rr""" +678 50 model """simple""" +678 50 loss """nssa""" +678 50 regularizer """no""" +678 50 optimizer """adam""" +678 50 training_loop """owa""" +678 50 negative_sampler """basic""" +678 50 evaluator """rankbased""" +678 51 dataset """wn18rr""" +678 51 model """simple""" +678 51 loss """nssa""" +678 51 regularizer """no""" +678 51 optimizer """adam""" +678 51 training_loop """owa""" +678 51 negative_sampler """basic""" +678 51 evaluator """rankbased""" +678 52 dataset """wn18rr""" +678 52 model """simple""" +678 52 loss """nssa""" +678 52 regularizer """no""" +678 52 optimizer """adam""" +678 52 training_loop """owa""" +678 52 negative_sampler """basic""" +678 52 evaluator """rankbased""" +678 53 dataset """wn18rr""" +678 53 model """simple""" +678 53 loss """nssa""" +678 53 regularizer """no""" +678 53 optimizer """adam""" +678 53 training_loop """owa""" +678 53 negative_sampler """basic""" +678 53 evaluator """rankbased""" +678 54 dataset """wn18rr""" +678 54 model """simple""" +678 54 loss """nssa""" +678 54 regularizer """no""" +678 54 optimizer """adam""" +678 54 training_loop """owa""" +678 54 negative_sampler """basic""" +678 54 evaluator """rankbased""" +678 55 dataset """wn18rr""" +678 55 model """simple""" +678 55 loss """nssa""" +678 55 regularizer """no""" +678 55 optimizer """adam""" +678 55 training_loop """owa""" +678 55 negative_sampler """basic""" +678 55 evaluator """rankbased""" +678 56 dataset """wn18rr""" +678 56 model """simple""" +678 56 loss """nssa""" +678 56 regularizer """no""" +678 56 optimizer """adam""" +678 56 training_loop """owa""" +678 56 negative_sampler """basic""" +678 56 evaluator """rankbased""" +678 57 dataset """wn18rr""" +678 57 model """simple""" +678 57 loss """nssa""" +678 57 regularizer """no""" +678 57 optimizer """adam""" +678 57 training_loop """owa""" +678 57 negative_sampler """basic""" +678 57 evaluator """rankbased""" +678 58 dataset """wn18rr""" +678 58 model """simple""" +678 58 loss """nssa""" +678 58 regularizer """no""" +678 58 optimizer """adam""" +678 58 training_loop """owa""" +678 58 negative_sampler """basic""" +678 58 evaluator """rankbased""" +678 59 dataset """wn18rr""" +678 59 model """simple""" +678 59 loss """nssa""" +678 59 regularizer """no""" +678 59 optimizer """adam""" +678 59 training_loop """owa""" +678 59 negative_sampler """basic""" +678 59 evaluator """rankbased""" +678 60 dataset """wn18rr""" +678 60 model """simple""" +678 60 loss """nssa""" +678 60 regularizer """no""" +678 60 optimizer """adam""" +678 60 training_loop """owa""" +678 60 negative_sampler """basic""" +678 60 evaluator """rankbased""" +678 61 dataset """wn18rr""" +678 61 model """simple""" +678 61 loss """nssa""" +678 61 regularizer """no""" +678 61 optimizer """adam""" +678 61 training_loop """owa""" +678 61 negative_sampler """basic""" +678 61 evaluator """rankbased""" +678 62 dataset """wn18rr""" +678 62 model """simple""" +678 62 loss """nssa""" +678 62 regularizer """no""" +678 62 optimizer """adam""" +678 62 training_loop """owa""" +678 62 negative_sampler """basic""" +678 62 evaluator """rankbased""" +678 63 dataset """wn18rr""" +678 63 model """simple""" +678 63 loss """nssa""" +678 63 regularizer """no""" +678 63 optimizer """adam""" +678 63 training_loop """owa""" +678 63 negative_sampler """basic""" +678 63 evaluator """rankbased""" +678 64 dataset """wn18rr""" +678 64 model """simple""" +678 64 loss """nssa""" +678 64 regularizer """no""" +678 64 optimizer """adam""" +678 64 training_loop """owa""" +678 64 negative_sampler """basic""" +678 64 evaluator """rankbased""" +678 65 dataset """wn18rr""" +678 65 model """simple""" +678 65 loss """nssa""" +678 65 regularizer """no""" +678 65 optimizer """adam""" +678 65 training_loop """owa""" +678 65 negative_sampler """basic""" +678 65 evaluator """rankbased""" +678 66 dataset """wn18rr""" +678 66 model """simple""" +678 66 loss """nssa""" +678 66 regularizer """no""" +678 66 optimizer """adam""" +678 66 training_loop """owa""" +678 66 negative_sampler """basic""" +678 66 evaluator """rankbased""" +678 67 dataset """wn18rr""" +678 67 model """simple""" +678 67 loss """nssa""" +678 67 regularizer """no""" +678 67 optimizer """adam""" +678 67 training_loop """owa""" +678 67 negative_sampler """basic""" +678 67 evaluator """rankbased""" +678 68 dataset """wn18rr""" +678 68 model """simple""" +678 68 loss """nssa""" +678 68 regularizer """no""" +678 68 optimizer """adam""" +678 68 training_loop """owa""" +678 68 negative_sampler """basic""" +678 68 evaluator """rankbased""" +678 69 dataset """wn18rr""" +678 69 model """simple""" +678 69 loss """nssa""" +678 69 regularizer """no""" +678 69 optimizer """adam""" +678 69 training_loop """owa""" +678 69 negative_sampler """basic""" +678 69 evaluator """rankbased""" +678 70 dataset """wn18rr""" +678 70 model """simple""" +678 70 loss """nssa""" +678 70 regularizer """no""" +678 70 optimizer """adam""" +678 70 training_loop """owa""" +678 70 negative_sampler """basic""" +678 70 evaluator """rankbased""" +678 71 dataset """wn18rr""" +678 71 model """simple""" +678 71 loss """nssa""" +678 71 regularizer """no""" +678 71 optimizer """adam""" +678 71 training_loop """owa""" +678 71 negative_sampler """basic""" +678 71 evaluator """rankbased""" +678 72 dataset """wn18rr""" +678 72 model """simple""" +678 72 loss """nssa""" +678 72 regularizer """no""" +678 72 optimizer """adam""" +678 72 training_loop """owa""" +678 72 negative_sampler """basic""" +678 72 evaluator """rankbased""" +678 73 dataset """wn18rr""" +678 73 model """simple""" +678 73 loss """nssa""" +678 73 regularizer """no""" +678 73 optimizer """adam""" +678 73 training_loop """owa""" +678 73 negative_sampler """basic""" +678 73 evaluator """rankbased""" +678 74 dataset """wn18rr""" +678 74 model """simple""" +678 74 loss """nssa""" +678 74 regularizer """no""" +678 74 optimizer """adam""" +678 74 training_loop """owa""" +678 74 negative_sampler """basic""" +678 74 evaluator """rankbased""" +678 75 dataset """wn18rr""" +678 75 model """simple""" +678 75 loss """nssa""" +678 75 regularizer """no""" +678 75 optimizer """adam""" +678 75 training_loop """owa""" +678 75 negative_sampler """basic""" +678 75 evaluator """rankbased""" +678 76 dataset """wn18rr""" +678 76 model """simple""" +678 76 loss """nssa""" +678 76 regularizer """no""" +678 76 optimizer """adam""" +678 76 training_loop """owa""" +678 76 negative_sampler """basic""" +678 76 evaluator """rankbased""" +678 77 dataset """wn18rr""" +678 77 model """simple""" +678 77 loss """nssa""" +678 77 regularizer """no""" +678 77 optimizer """adam""" +678 77 training_loop """owa""" +678 77 negative_sampler """basic""" +678 77 evaluator """rankbased""" +678 78 dataset """wn18rr""" +678 78 model """simple""" +678 78 loss """nssa""" +678 78 regularizer """no""" +678 78 optimizer """adam""" +678 78 training_loop """owa""" +678 78 negative_sampler """basic""" +678 78 evaluator """rankbased""" +678 79 dataset """wn18rr""" +678 79 model """simple""" +678 79 loss """nssa""" +678 79 regularizer """no""" +678 79 optimizer """adam""" +678 79 training_loop """owa""" +678 79 negative_sampler """basic""" +678 79 evaluator """rankbased""" +678 80 dataset """wn18rr""" +678 80 model """simple""" +678 80 loss """nssa""" +678 80 regularizer """no""" +678 80 optimizer """adam""" +678 80 training_loop """owa""" +678 80 negative_sampler """basic""" +678 80 evaluator """rankbased""" +678 81 dataset """wn18rr""" +678 81 model """simple""" +678 81 loss """nssa""" +678 81 regularizer """no""" +678 81 optimizer """adam""" +678 81 training_loop """owa""" +678 81 negative_sampler """basic""" +678 81 evaluator """rankbased""" +678 82 dataset """wn18rr""" +678 82 model """simple""" +678 82 loss """nssa""" +678 82 regularizer """no""" +678 82 optimizer """adam""" +678 82 training_loop """owa""" +678 82 negative_sampler """basic""" +678 82 evaluator """rankbased""" +678 83 dataset """wn18rr""" +678 83 model """simple""" +678 83 loss """nssa""" +678 83 regularizer """no""" +678 83 optimizer """adam""" +678 83 training_loop """owa""" +678 83 negative_sampler """basic""" +678 83 evaluator """rankbased""" +678 84 dataset """wn18rr""" +678 84 model """simple""" +678 84 loss """nssa""" +678 84 regularizer """no""" +678 84 optimizer """adam""" +678 84 training_loop """owa""" +678 84 negative_sampler """basic""" +678 84 evaluator """rankbased""" +678 85 dataset """wn18rr""" +678 85 model """simple""" +678 85 loss """nssa""" +678 85 regularizer """no""" +678 85 optimizer """adam""" +678 85 training_loop """owa""" +678 85 negative_sampler """basic""" +678 85 evaluator """rankbased""" +679 1 model.embedding_dim 2.0 +679 1 optimizer.lr 0.017833693684197707 +679 1 negative_sampler.num_negs_per_pos 89.0 +679 1 training.batch_size 1.0 +679 2 model.embedding_dim 1.0 +679 2 optimizer.lr 0.009277898512126334 +679 2 negative_sampler.num_negs_per_pos 99.0 +679 2 training.batch_size 0.0 +679 3 model.embedding_dim 0.0 +679 3 optimizer.lr 0.04114109483598688 +679 3 negative_sampler.num_negs_per_pos 25.0 +679 3 training.batch_size 2.0 +679 4 model.embedding_dim 1.0 +679 4 optimizer.lr 0.03737666495302397 +679 4 negative_sampler.num_negs_per_pos 45.0 +679 4 training.batch_size 1.0 +679 5 model.embedding_dim 1.0 +679 5 optimizer.lr 0.007748435949777249 +679 5 negative_sampler.num_negs_per_pos 67.0 +679 5 training.batch_size 1.0 +679 6 model.embedding_dim 0.0 +679 6 optimizer.lr 0.08680080093393017 +679 6 negative_sampler.num_negs_per_pos 38.0 +679 6 training.batch_size 2.0 +679 7 model.embedding_dim 2.0 +679 7 optimizer.lr 0.0036516936036972516 +679 7 negative_sampler.num_negs_per_pos 19.0 +679 7 training.batch_size 2.0 +679 8 model.embedding_dim 0.0 +679 8 optimizer.lr 0.05576072710615142 +679 8 negative_sampler.num_negs_per_pos 95.0 +679 8 training.batch_size 2.0 +679 9 model.embedding_dim 0.0 +679 9 optimizer.lr 0.0037586552507623615 +679 9 negative_sampler.num_negs_per_pos 98.0 +679 9 training.batch_size 0.0 +679 10 model.embedding_dim 0.0 +679 10 optimizer.lr 0.0038755658444805338 +679 10 negative_sampler.num_negs_per_pos 62.0 +679 10 training.batch_size 0.0 +679 11 model.embedding_dim 1.0 +679 11 optimizer.lr 0.02559091707755982 +679 11 negative_sampler.num_negs_per_pos 52.0 +679 11 training.batch_size 2.0 +679 12 model.embedding_dim 0.0 +679 12 optimizer.lr 0.0027882354624867373 +679 12 negative_sampler.num_negs_per_pos 64.0 +679 12 training.batch_size 0.0 +679 13 model.embedding_dim 2.0 +679 13 optimizer.lr 0.012503696685032416 +679 13 negative_sampler.num_negs_per_pos 47.0 +679 13 training.batch_size 1.0 +679 14 model.embedding_dim 2.0 +679 14 optimizer.lr 0.016885769788531176 +679 14 negative_sampler.num_negs_per_pos 54.0 +679 14 training.batch_size 2.0 +679 15 model.embedding_dim 2.0 +679 15 optimizer.lr 0.002351231553552278 +679 15 negative_sampler.num_negs_per_pos 26.0 +679 15 training.batch_size 1.0 +679 16 model.embedding_dim 2.0 +679 16 optimizer.lr 0.020792360013657696 +679 16 negative_sampler.num_negs_per_pos 29.0 +679 16 training.batch_size 2.0 +679 17 model.embedding_dim 1.0 +679 17 optimizer.lr 0.003473790901861907 +679 17 negative_sampler.num_negs_per_pos 98.0 +679 17 training.batch_size 2.0 +679 18 model.embedding_dim 2.0 +679 18 optimizer.lr 0.012810632040569975 +679 18 negative_sampler.num_negs_per_pos 54.0 +679 18 training.batch_size 1.0 +679 19 model.embedding_dim 2.0 +679 19 optimizer.lr 0.0019413884384061832 +679 19 negative_sampler.num_negs_per_pos 22.0 +679 19 training.batch_size 2.0 +679 20 model.embedding_dim 1.0 +679 20 optimizer.lr 0.06360436506284206 +679 20 negative_sampler.num_negs_per_pos 89.0 +679 20 training.batch_size 2.0 +679 21 model.embedding_dim 1.0 +679 21 optimizer.lr 0.0012243254799103893 +679 21 negative_sampler.num_negs_per_pos 10.0 +679 21 training.batch_size 0.0 +679 22 model.embedding_dim 0.0 +679 22 optimizer.lr 0.02402713924974417 +679 22 negative_sampler.num_negs_per_pos 86.0 +679 22 training.batch_size 2.0 +679 23 model.embedding_dim 0.0 +679 23 optimizer.lr 0.008482370295611599 +679 23 negative_sampler.num_negs_per_pos 46.0 +679 23 training.batch_size 0.0 +679 24 model.embedding_dim 1.0 +679 24 optimizer.lr 0.05897293141859742 +679 24 negative_sampler.num_negs_per_pos 71.0 +679 24 training.batch_size 0.0 +679 25 model.embedding_dim 1.0 +679 25 optimizer.lr 0.025321779510009433 +679 25 negative_sampler.num_negs_per_pos 64.0 +679 25 training.batch_size 0.0 +679 26 model.embedding_dim 1.0 +679 26 optimizer.lr 0.0033336504624532945 +679 26 negative_sampler.num_negs_per_pos 41.0 +679 26 training.batch_size 2.0 +679 27 model.embedding_dim 1.0 +679 27 optimizer.lr 0.044672004716288266 +679 27 negative_sampler.num_negs_per_pos 27.0 +679 27 training.batch_size 2.0 +679 28 model.embedding_dim 0.0 +679 28 optimizer.lr 0.0016314558559094707 +679 28 negative_sampler.num_negs_per_pos 63.0 +679 28 training.batch_size 1.0 +679 29 model.embedding_dim 0.0 +679 29 optimizer.lr 0.09734553464827919 +679 29 negative_sampler.num_negs_per_pos 70.0 +679 29 training.batch_size 0.0 +679 30 model.embedding_dim 2.0 +679 30 optimizer.lr 0.09883429758611895 +679 30 negative_sampler.num_negs_per_pos 90.0 +679 30 training.batch_size 2.0 +679 31 model.embedding_dim 2.0 +679 31 optimizer.lr 0.03959754790646554 +679 31 negative_sampler.num_negs_per_pos 22.0 +679 31 training.batch_size 0.0 +679 32 model.embedding_dim 0.0 +679 32 optimizer.lr 0.02488774215236203 +679 32 negative_sampler.num_negs_per_pos 76.0 +679 32 training.batch_size 2.0 +679 33 model.embedding_dim 2.0 +679 33 optimizer.lr 0.0021513496844402778 +679 33 negative_sampler.num_negs_per_pos 3.0 +679 33 training.batch_size 0.0 +679 34 model.embedding_dim 2.0 +679 34 optimizer.lr 0.0018683438822051067 +679 34 negative_sampler.num_negs_per_pos 60.0 +679 34 training.batch_size 1.0 +679 35 model.embedding_dim 1.0 +679 35 optimizer.lr 0.0035002820427141343 +679 35 negative_sampler.num_negs_per_pos 92.0 +679 35 training.batch_size 0.0 +679 36 model.embedding_dim 0.0 +679 36 optimizer.lr 0.03394654568602238 +679 36 negative_sampler.num_negs_per_pos 16.0 +679 36 training.batch_size 1.0 +679 37 model.embedding_dim 1.0 +679 37 optimizer.lr 0.013893799288996008 +679 37 negative_sampler.num_negs_per_pos 40.0 +679 37 training.batch_size 0.0 +679 38 model.embedding_dim 2.0 +679 38 optimizer.lr 0.07615202617693244 +679 38 negative_sampler.num_negs_per_pos 42.0 +679 38 training.batch_size 0.0 +679 39 model.embedding_dim 1.0 +679 39 optimizer.lr 0.03012780565151536 +679 39 negative_sampler.num_negs_per_pos 96.0 +679 39 training.batch_size 2.0 +679 40 model.embedding_dim 1.0 +679 40 optimizer.lr 0.07865873616490027 +679 40 negative_sampler.num_negs_per_pos 58.0 +679 40 training.batch_size 1.0 +679 41 model.embedding_dim 2.0 +679 41 optimizer.lr 0.0306119558762587 +679 41 negative_sampler.num_negs_per_pos 79.0 +679 41 training.batch_size 0.0 +679 42 model.embedding_dim 2.0 +679 42 optimizer.lr 0.005424937066084286 +679 42 negative_sampler.num_negs_per_pos 67.0 +679 42 training.batch_size 2.0 +679 43 model.embedding_dim 1.0 +679 43 optimizer.lr 0.011430020100588183 +679 43 negative_sampler.num_negs_per_pos 91.0 +679 43 training.batch_size 1.0 +679 44 model.embedding_dim 0.0 +679 44 optimizer.lr 0.0034142537067739483 +679 44 negative_sampler.num_negs_per_pos 87.0 +679 44 training.batch_size 0.0 +679 45 model.embedding_dim 1.0 +679 45 optimizer.lr 0.0012551022935229099 +679 45 negative_sampler.num_negs_per_pos 55.0 +679 45 training.batch_size 1.0 +679 46 model.embedding_dim 0.0 +679 46 optimizer.lr 0.029701482560619483 +679 46 negative_sampler.num_negs_per_pos 93.0 +679 46 training.batch_size 1.0 +679 47 model.embedding_dim 0.0 +679 47 optimizer.lr 0.005075424183863909 +679 47 negative_sampler.num_negs_per_pos 36.0 +679 47 training.batch_size 1.0 +679 48 model.embedding_dim 1.0 +679 48 optimizer.lr 0.09446863402497536 +679 48 negative_sampler.num_negs_per_pos 0.0 +679 48 training.batch_size 0.0 +679 49 model.embedding_dim 0.0 +679 49 optimizer.lr 0.05561479950800485 +679 49 negative_sampler.num_negs_per_pos 40.0 +679 49 training.batch_size 1.0 +679 1 dataset """wn18rr""" +679 1 model """simple""" +679 1 loss """bceaftersigmoid""" +679 1 regularizer """no""" +679 1 optimizer """adam""" +679 1 training_loop """owa""" +679 1 negative_sampler """basic""" +679 1 evaluator """rankbased""" +679 2 dataset """wn18rr""" +679 2 model """simple""" +679 2 loss """bceaftersigmoid""" +679 2 regularizer """no""" +679 2 optimizer """adam""" +679 2 training_loop """owa""" +679 2 negative_sampler """basic""" +679 2 evaluator """rankbased""" +679 3 dataset """wn18rr""" +679 3 model """simple""" +679 3 loss """bceaftersigmoid""" +679 3 regularizer """no""" +679 3 optimizer """adam""" +679 3 training_loop """owa""" +679 3 negative_sampler """basic""" +679 3 evaluator """rankbased""" +679 4 dataset """wn18rr""" +679 4 model """simple""" +679 4 loss """bceaftersigmoid""" +679 4 regularizer """no""" +679 4 optimizer """adam""" +679 4 training_loop """owa""" +679 4 negative_sampler """basic""" +679 4 evaluator """rankbased""" +679 5 dataset """wn18rr""" +679 5 model """simple""" +679 5 loss """bceaftersigmoid""" +679 5 regularizer """no""" +679 5 optimizer """adam""" +679 5 training_loop """owa""" +679 5 negative_sampler """basic""" +679 5 evaluator """rankbased""" +679 6 dataset """wn18rr""" +679 6 model """simple""" +679 6 loss """bceaftersigmoid""" +679 6 regularizer """no""" +679 6 optimizer """adam""" +679 6 training_loop """owa""" +679 6 negative_sampler """basic""" +679 6 evaluator """rankbased""" +679 7 dataset """wn18rr""" +679 7 model """simple""" +679 7 loss """bceaftersigmoid""" +679 7 regularizer """no""" +679 7 optimizer """adam""" +679 7 training_loop """owa""" +679 7 negative_sampler """basic""" +679 7 evaluator """rankbased""" +679 8 dataset """wn18rr""" +679 8 model """simple""" +679 8 loss """bceaftersigmoid""" +679 8 regularizer """no""" +679 8 optimizer """adam""" +679 8 training_loop """owa""" +679 8 negative_sampler """basic""" +679 8 evaluator """rankbased""" +679 9 dataset """wn18rr""" +679 9 model """simple""" +679 9 loss """bceaftersigmoid""" +679 9 regularizer """no""" +679 9 optimizer """adam""" +679 9 training_loop """owa""" +679 9 negative_sampler """basic""" +679 9 evaluator """rankbased""" +679 10 dataset """wn18rr""" +679 10 model """simple""" +679 10 loss """bceaftersigmoid""" +679 10 regularizer """no""" +679 10 optimizer """adam""" +679 10 training_loop """owa""" +679 10 negative_sampler """basic""" +679 10 evaluator """rankbased""" +679 11 dataset """wn18rr""" +679 11 model """simple""" +679 11 loss """bceaftersigmoid""" +679 11 regularizer """no""" +679 11 optimizer """adam""" +679 11 training_loop """owa""" +679 11 negative_sampler """basic""" +679 11 evaluator """rankbased""" +679 12 dataset """wn18rr""" +679 12 model """simple""" +679 12 loss """bceaftersigmoid""" +679 12 regularizer """no""" +679 12 optimizer """adam""" +679 12 training_loop """owa""" +679 12 negative_sampler """basic""" +679 12 evaluator """rankbased""" +679 13 dataset """wn18rr""" +679 13 model """simple""" +679 13 loss """bceaftersigmoid""" +679 13 regularizer """no""" +679 13 optimizer """adam""" +679 13 training_loop """owa""" +679 13 negative_sampler """basic""" +679 13 evaluator """rankbased""" +679 14 dataset """wn18rr""" +679 14 model """simple""" +679 14 loss """bceaftersigmoid""" +679 14 regularizer """no""" +679 14 optimizer """adam""" +679 14 training_loop """owa""" +679 14 negative_sampler """basic""" +679 14 evaluator """rankbased""" +679 15 dataset """wn18rr""" +679 15 model """simple""" +679 15 loss """bceaftersigmoid""" +679 15 regularizer """no""" +679 15 optimizer """adam""" +679 15 training_loop """owa""" +679 15 negative_sampler """basic""" +679 15 evaluator """rankbased""" +679 16 dataset """wn18rr""" +679 16 model """simple""" +679 16 loss """bceaftersigmoid""" +679 16 regularizer """no""" +679 16 optimizer """adam""" +679 16 training_loop """owa""" +679 16 negative_sampler """basic""" +679 16 evaluator """rankbased""" +679 17 dataset """wn18rr""" +679 17 model """simple""" +679 17 loss """bceaftersigmoid""" +679 17 regularizer """no""" +679 17 optimizer """adam""" +679 17 training_loop """owa""" +679 17 negative_sampler """basic""" +679 17 evaluator """rankbased""" +679 18 dataset """wn18rr""" +679 18 model """simple""" +679 18 loss """bceaftersigmoid""" +679 18 regularizer """no""" +679 18 optimizer """adam""" +679 18 training_loop """owa""" +679 18 negative_sampler """basic""" +679 18 evaluator """rankbased""" +679 19 dataset """wn18rr""" +679 19 model """simple""" +679 19 loss """bceaftersigmoid""" +679 19 regularizer """no""" +679 19 optimizer """adam""" +679 19 training_loop """owa""" +679 19 negative_sampler """basic""" +679 19 evaluator """rankbased""" +679 20 dataset """wn18rr""" +679 20 model """simple""" +679 20 loss """bceaftersigmoid""" +679 20 regularizer """no""" +679 20 optimizer """adam""" +679 20 training_loop """owa""" +679 20 negative_sampler """basic""" +679 20 evaluator """rankbased""" +679 21 dataset """wn18rr""" +679 21 model """simple""" +679 21 loss """bceaftersigmoid""" +679 21 regularizer """no""" +679 21 optimizer """adam""" +679 21 training_loop """owa""" +679 21 negative_sampler """basic""" +679 21 evaluator """rankbased""" +679 22 dataset """wn18rr""" +679 22 model """simple""" +679 22 loss """bceaftersigmoid""" +679 22 regularizer """no""" +679 22 optimizer """adam""" +679 22 training_loop """owa""" +679 22 negative_sampler """basic""" +679 22 evaluator """rankbased""" +679 23 dataset """wn18rr""" +679 23 model """simple""" +679 23 loss """bceaftersigmoid""" +679 23 regularizer """no""" +679 23 optimizer """adam""" +679 23 training_loop """owa""" +679 23 negative_sampler """basic""" +679 23 evaluator """rankbased""" +679 24 dataset """wn18rr""" +679 24 model """simple""" +679 24 loss """bceaftersigmoid""" +679 24 regularizer """no""" +679 24 optimizer """adam""" +679 24 training_loop """owa""" +679 24 negative_sampler """basic""" +679 24 evaluator """rankbased""" +679 25 dataset """wn18rr""" +679 25 model """simple""" +679 25 loss """bceaftersigmoid""" +679 25 regularizer """no""" +679 25 optimizer """adam""" +679 25 training_loop """owa""" +679 25 negative_sampler """basic""" +679 25 evaluator """rankbased""" +679 26 dataset """wn18rr""" +679 26 model """simple""" +679 26 loss """bceaftersigmoid""" +679 26 regularizer """no""" +679 26 optimizer """adam""" +679 26 training_loop """owa""" +679 26 negative_sampler """basic""" +679 26 evaluator """rankbased""" +679 27 dataset """wn18rr""" +679 27 model """simple""" +679 27 loss """bceaftersigmoid""" +679 27 regularizer """no""" +679 27 optimizer """adam""" +679 27 training_loop """owa""" +679 27 negative_sampler """basic""" +679 27 evaluator """rankbased""" +679 28 dataset """wn18rr""" +679 28 model """simple""" +679 28 loss """bceaftersigmoid""" +679 28 regularizer """no""" +679 28 optimizer """adam""" +679 28 training_loop """owa""" +679 28 negative_sampler """basic""" +679 28 evaluator """rankbased""" +679 29 dataset """wn18rr""" +679 29 model """simple""" +679 29 loss """bceaftersigmoid""" +679 29 regularizer """no""" +679 29 optimizer """adam""" +679 29 training_loop """owa""" +679 29 negative_sampler """basic""" +679 29 evaluator """rankbased""" +679 30 dataset """wn18rr""" +679 30 model """simple""" +679 30 loss """bceaftersigmoid""" +679 30 regularizer """no""" +679 30 optimizer """adam""" +679 30 training_loop """owa""" +679 30 negative_sampler """basic""" +679 30 evaluator """rankbased""" +679 31 dataset """wn18rr""" +679 31 model """simple""" +679 31 loss """bceaftersigmoid""" +679 31 regularizer """no""" +679 31 optimizer """adam""" +679 31 training_loop """owa""" +679 31 negative_sampler """basic""" +679 31 evaluator """rankbased""" +679 32 dataset """wn18rr""" +679 32 model """simple""" +679 32 loss """bceaftersigmoid""" +679 32 regularizer """no""" +679 32 optimizer """adam""" +679 32 training_loop """owa""" +679 32 negative_sampler """basic""" +679 32 evaluator """rankbased""" +679 33 dataset """wn18rr""" +679 33 model """simple""" +679 33 loss """bceaftersigmoid""" +679 33 regularizer """no""" +679 33 optimizer """adam""" +679 33 training_loop """owa""" +679 33 negative_sampler """basic""" +679 33 evaluator """rankbased""" +679 34 dataset """wn18rr""" +679 34 model """simple""" +679 34 loss """bceaftersigmoid""" +679 34 regularizer """no""" +679 34 optimizer """adam""" +679 34 training_loop """owa""" +679 34 negative_sampler """basic""" +679 34 evaluator """rankbased""" +679 35 dataset """wn18rr""" +679 35 model """simple""" +679 35 loss """bceaftersigmoid""" +679 35 regularizer """no""" +679 35 optimizer """adam""" +679 35 training_loop """owa""" +679 35 negative_sampler """basic""" +679 35 evaluator """rankbased""" +679 36 dataset """wn18rr""" +679 36 model """simple""" +679 36 loss """bceaftersigmoid""" +679 36 regularizer """no""" +679 36 optimizer """adam""" +679 36 training_loop """owa""" +679 36 negative_sampler """basic""" +679 36 evaluator """rankbased""" +679 37 dataset """wn18rr""" +679 37 model """simple""" +679 37 loss """bceaftersigmoid""" +679 37 regularizer """no""" +679 37 optimizer """adam""" +679 37 training_loop """owa""" +679 37 negative_sampler """basic""" +679 37 evaluator """rankbased""" +679 38 dataset """wn18rr""" +679 38 model """simple""" +679 38 loss """bceaftersigmoid""" +679 38 regularizer """no""" +679 38 optimizer """adam""" +679 38 training_loop """owa""" +679 38 negative_sampler """basic""" +679 38 evaluator """rankbased""" +679 39 dataset """wn18rr""" +679 39 model """simple""" +679 39 loss """bceaftersigmoid""" +679 39 regularizer """no""" +679 39 optimizer """adam""" +679 39 training_loop """owa""" +679 39 negative_sampler """basic""" +679 39 evaluator """rankbased""" +679 40 dataset """wn18rr""" +679 40 model """simple""" +679 40 loss """bceaftersigmoid""" +679 40 regularizer """no""" +679 40 optimizer """adam""" +679 40 training_loop """owa""" +679 40 negative_sampler """basic""" +679 40 evaluator """rankbased""" +679 41 dataset """wn18rr""" +679 41 model """simple""" +679 41 loss """bceaftersigmoid""" +679 41 regularizer """no""" +679 41 optimizer """adam""" +679 41 training_loop """owa""" +679 41 negative_sampler """basic""" +679 41 evaluator """rankbased""" +679 42 dataset """wn18rr""" +679 42 model """simple""" +679 42 loss """bceaftersigmoid""" +679 42 regularizer """no""" +679 42 optimizer """adam""" +679 42 training_loop """owa""" +679 42 negative_sampler """basic""" +679 42 evaluator """rankbased""" +679 43 dataset """wn18rr""" +679 43 model """simple""" +679 43 loss """bceaftersigmoid""" +679 43 regularizer """no""" +679 43 optimizer """adam""" +679 43 training_loop """owa""" +679 43 negative_sampler """basic""" +679 43 evaluator """rankbased""" +679 44 dataset """wn18rr""" +679 44 model """simple""" +679 44 loss """bceaftersigmoid""" +679 44 regularizer """no""" +679 44 optimizer """adam""" +679 44 training_loop """owa""" +679 44 negative_sampler """basic""" +679 44 evaluator """rankbased""" +679 45 dataset """wn18rr""" +679 45 model """simple""" +679 45 loss """bceaftersigmoid""" +679 45 regularizer """no""" +679 45 optimizer """adam""" +679 45 training_loop """owa""" +679 45 negative_sampler """basic""" +679 45 evaluator """rankbased""" +679 46 dataset """wn18rr""" +679 46 model """simple""" +679 46 loss """bceaftersigmoid""" +679 46 regularizer """no""" +679 46 optimizer """adam""" +679 46 training_loop """owa""" +679 46 negative_sampler """basic""" +679 46 evaluator """rankbased""" +679 47 dataset """wn18rr""" +679 47 model """simple""" +679 47 loss """bceaftersigmoid""" +679 47 regularizer """no""" +679 47 optimizer """adam""" +679 47 training_loop """owa""" +679 47 negative_sampler """basic""" +679 47 evaluator """rankbased""" +679 48 dataset """wn18rr""" +679 48 model """simple""" +679 48 loss """bceaftersigmoid""" +679 48 regularizer """no""" +679 48 optimizer """adam""" +679 48 training_loop """owa""" +679 48 negative_sampler """basic""" +679 48 evaluator """rankbased""" +679 49 dataset """wn18rr""" +679 49 model """simple""" +679 49 loss """bceaftersigmoid""" +679 49 regularizer """no""" +679 49 optimizer """adam""" +679 49 training_loop """owa""" +679 49 negative_sampler """basic""" +679 49 evaluator """rankbased""" +680 1 model.embedding_dim 1.0 +680 1 optimizer.lr 0.00156392878455931 +680 1 negative_sampler.num_negs_per_pos 5.0 +680 1 training.batch_size 1.0 +680 2 model.embedding_dim 0.0 +680 2 optimizer.lr 0.010655527710489913 +680 2 negative_sampler.num_negs_per_pos 91.0 +680 2 training.batch_size 1.0 +680 3 model.embedding_dim 0.0 +680 3 optimizer.lr 0.001120406575697261 +680 3 negative_sampler.num_negs_per_pos 47.0 +680 3 training.batch_size 1.0 +680 4 model.embedding_dim 1.0 +680 4 optimizer.lr 0.009688202789421153 +680 4 negative_sampler.num_negs_per_pos 98.0 +680 4 training.batch_size 1.0 +680 5 model.embedding_dim 1.0 +680 5 optimizer.lr 0.004259091868345459 +680 5 negative_sampler.num_negs_per_pos 37.0 +680 5 training.batch_size 2.0 +680 6 model.embedding_dim 0.0 +680 6 optimizer.lr 0.002263936466659375 +680 6 negative_sampler.num_negs_per_pos 61.0 +680 6 training.batch_size 1.0 +680 7 model.embedding_dim 0.0 +680 7 optimizer.lr 0.035503774824732104 +680 7 negative_sampler.num_negs_per_pos 69.0 +680 7 training.batch_size 2.0 +680 8 model.embedding_dim 1.0 +680 8 optimizer.lr 0.0018313300822581809 +680 8 negative_sampler.num_negs_per_pos 5.0 +680 8 training.batch_size 1.0 +680 9 model.embedding_dim 2.0 +680 9 optimizer.lr 0.0028965984476620596 +680 9 negative_sampler.num_negs_per_pos 87.0 +680 9 training.batch_size 0.0 +680 10 model.embedding_dim 1.0 +680 10 optimizer.lr 0.06526771508187927 +680 10 negative_sampler.num_negs_per_pos 51.0 +680 10 training.batch_size 1.0 +680 11 model.embedding_dim 0.0 +680 11 optimizer.lr 0.01144886819724914 +680 11 negative_sampler.num_negs_per_pos 80.0 +680 11 training.batch_size 1.0 +680 12 model.embedding_dim 1.0 +680 12 optimizer.lr 0.009506385594191688 +680 12 negative_sampler.num_negs_per_pos 19.0 +680 12 training.batch_size 1.0 +680 13 model.embedding_dim 1.0 +680 13 optimizer.lr 0.06830868510242946 +680 13 negative_sampler.num_negs_per_pos 86.0 +680 13 training.batch_size 2.0 +680 14 model.embedding_dim 1.0 +680 14 optimizer.lr 0.09093712256790788 +680 14 negative_sampler.num_negs_per_pos 52.0 +680 14 training.batch_size 0.0 +680 15 model.embedding_dim 1.0 +680 15 optimizer.lr 0.09476349466238038 +680 15 negative_sampler.num_negs_per_pos 59.0 +680 15 training.batch_size 1.0 +680 16 model.embedding_dim 0.0 +680 16 optimizer.lr 0.07875886779745384 +680 16 negative_sampler.num_negs_per_pos 83.0 +680 16 training.batch_size 2.0 +680 17 model.embedding_dim 2.0 +680 17 optimizer.lr 0.009244480817460593 +680 17 negative_sampler.num_negs_per_pos 21.0 +680 17 training.batch_size 1.0 +680 18 model.embedding_dim 2.0 +680 18 optimizer.lr 0.023240821476490554 +680 18 negative_sampler.num_negs_per_pos 6.0 +680 18 training.batch_size 1.0 +680 19 model.embedding_dim 2.0 +680 19 optimizer.lr 0.031004589505730717 +680 19 negative_sampler.num_negs_per_pos 20.0 +680 19 training.batch_size 2.0 +680 20 model.embedding_dim 0.0 +680 20 optimizer.lr 0.002075615115726909 +680 20 negative_sampler.num_negs_per_pos 1.0 +680 20 training.batch_size 2.0 +680 21 model.embedding_dim 1.0 +680 21 optimizer.lr 0.0154935161733208 +680 21 negative_sampler.num_negs_per_pos 41.0 +680 21 training.batch_size 2.0 +680 22 model.embedding_dim 0.0 +680 22 optimizer.lr 0.009513552307088047 +680 22 negative_sampler.num_negs_per_pos 19.0 +680 22 training.batch_size 0.0 +680 23 model.embedding_dim 2.0 +680 23 optimizer.lr 0.004957539633327467 +680 23 negative_sampler.num_negs_per_pos 61.0 +680 23 training.batch_size 0.0 +680 24 model.embedding_dim 2.0 +680 24 optimizer.lr 0.017433584488584522 +680 24 negative_sampler.num_negs_per_pos 99.0 +680 24 training.batch_size 1.0 +680 25 model.embedding_dim 2.0 +680 25 optimizer.lr 0.021675757847516602 +680 25 negative_sampler.num_negs_per_pos 46.0 +680 25 training.batch_size 2.0 +680 26 model.embedding_dim 2.0 +680 26 optimizer.lr 0.07902568703677013 +680 26 negative_sampler.num_negs_per_pos 66.0 +680 26 training.batch_size 0.0 +680 27 model.embedding_dim 2.0 +680 27 optimizer.lr 0.0034100155874795955 +680 27 negative_sampler.num_negs_per_pos 93.0 +680 27 training.batch_size 0.0 +680 28 model.embedding_dim 2.0 +680 28 optimizer.lr 0.028021780465490295 +680 28 negative_sampler.num_negs_per_pos 81.0 +680 28 training.batch_size 2.0 +680 29 model.embedding_dim 0.0 +680 29 optimizer.lr 0.01911813400580198 +680 29 negative_sampler.num_negs_per_pos 41.0 +680 29 training.batch_size 0.0 +680 30 model.embedding_dim 0.0 +680 30 optimizer.lr 0.0034304520685972434 +680 30 negative_sampler.num_negs_per_pos 77.0 +680 30 training.batch_size 2.0 +680 31 model.embedding_dim 1.0 +680 31 optimizer.lr 0.022675316570889583 +680 31 negative_sampler.num_negs_per_pos 94.0 +680 31 training.batch_size 0.0 +680 32 model.embedding_dim 2.0 +680 32 optimizer.lr 0.014784455232242439 +680 32 negative_sampler.num_negs_per_pos 2.0 +680 32 training.batch_size 1.0 +680 33 model.embedding_dim 0.0 +680 33 optimizer.lr 0.0013391695183995619 +680 33 negative_sampler.num_negs_per_pos 2.0 +680 33 training.batch_size 1.0 +680 34 model.embedding_dim 2.0 +680 34 optimizer.lr 0.004709599056939013 +680 34 negative_sampler.num_negs_per_pos 24.0 +680 34 training.batch_size 1.0 +680 35 model.embedding_dim 2.0 +680 35 optimizer.lr 0.025434145757429975 +680 35 negative_sampler.num_negs_per_pos 17.0 +680 35 training.batch_size 0.0 +680 36 model.embedding_dim 0.0 +680 36 optimizer.lr 0.0037262034145359242 +680 36 negative_sampler.num_negs_per_pos 95.0 +680 36 training.batch_size 2.0 +680 37 model.embedding_dim 0.0 +680 37 optimizer.lr 0.09447637359522731 +680 37 negative_sampler.num_negs_per_pos 55.0 +680 37 training.batch_size 1.0 +680 38 model.embedding_dim 0.0 +680 38 optimizer.lr 0.01886535509474985 +680 38 negative_sampler.num_negs_per_pos 73.0 +680 38 training.batch_size 1.0 +680 39 model.embedding_dim 1.0 +680 39 optimizer.lr 0.07920614213292097 +680 39 negative_sampler.num_negs_per_pos 91.0 +680 39 training.batch_size 2.0 +680 40 model.embedding_dim 0.0 +680 40 optimizer.lr 0.07464239747112554 +680 40 negative_sampler.num_negs_per_pos 91.0 +680 40 training.batch_size 0.0 +680 41 model.embedding_dim 0.0 +680 41 optimizer.lr 0.07121516733767388 +680 41 negative_sampler.num_negs_per_pos 69.0 +680 41 training.batch_size 2.0 +680 42 model.embedding_dim 2.0 +680 42 optimizer.lr 0.0875508717000703 +680 42 negative_sampler.num_negs_per_pos 13.0 +680 42 training.batch_size 0.0 +680 43 model.embedding_dim 2.0 +680 43 optimizer.lr 0.030393531869629852 +680 43 negative_sampler.num_negs_per_pos 74.0 +680 43 training.batch_size 1.0 +680 44 model.embedding_dim 0.0 +680 44 optimizer.lr 0.0019546902687988827 +680 44 negative_sampler.num_negs_per_pos 76.0 +680 44 training.batch_size 0.0 +680 45 model.embedding_dim 1.0 +680 45 optimizer.lr 0.0037667759822586863 +680 45 negative_sampler.num_negs_per_pos 51.0 +680 45 training.batch_size 2.0 +680 46 model.embedding_dim 1.0 +680 46 optimizer.lr 0.00531784192689479 +680 46 negative_sampler.num_negs_per_pos 97.0 +680 46 training.batch_size 1.0 +680 47 model.embedding_dim 2.0 +680 47 optimizer.lr 0.003648878922615852 +680 47 negative_sampler.num_negs_per_pos 38.0 +680 47 training.batch_size 2.0 +680 48 model.embedding_dim 1.0 +680 48 optimizer.lr 0.007937694477998506 +680 48 negative_sampler.num_negs_per_pos 11.0 +680 48 training.batch_size 0.0 +680 49 model.embedding_dim 1.0 +680 49 optimizer.lr 0.003016430551526024 +680 49 negative_sampler.num_negs_per_pos 37.0 +680 49 training.batch_size 1.0 +680 50 model.embedding_dim 0.0 +680 50 optimizer.lr 0.002401090588476856 +680 50 negative_sampler.num_negs_per_pos 44.0 +680 50 training.batch_size 0.0 +680 51 model.embedding_dim 0.0 +680 51 optimizer.lr 0.006057860896917042 +680 51 negative_sampler.num_negs_per_pos 44.0 +680 51 training.batch_size 1.0 +680 52 model.embedding_dim 2.0 +680 52 optimizer.lr 0.00943820884936652 +680 52 negative_sampler.num_negs_per_pos 67.0 +680 52 training.batch_size 0.0 +680 53 model.embedding_dim 0.0 +680 53 optimizer.lr 0.0035126300032670417 +680 53 negative_sampler.num_negs_per_pos 58.0 +680 53 training.batch_size 1.0 +680 1 dataset """wn18rr""" +680 1 model """simple""" +680 1 loss """softplus""" +680 1 regularizer """no""" +680 1 optimizer """adam""" +680 1 training_loop """owa""" +680 1 negative_sampler """basic""" +680 1 evaluator """rankbased""" +680 2 dataset """wn18rr""" +680 2 model """simple""" +680 2 loss """softplus""" +680 2 regularizer """no""" +680 2 optimizer """adam""" +680 2 training_loop """owa""" +680 2 negative_sampler """basic""" +680 2 evaluator """rankbased""" +680 3 dataset """wn18rr""" +680 3 model """simple""" +680 3 loss """softplus""" +680 3 regularizer """no""" +680 3 optimizer """adam""" +680 3 training_loop """owa""" +680 3 negative_sampler """basic""" +680 3 evaluator """rankbased""" +680 4 dataset """wn18rr""" +680 4 model """simple""" +680 4 loss """softplus""" +680 4 regularizer """no""" +680 4 optimizer """adam""" +680 4 training_loop """owa""" +680 4 negative_sampler """basic""" +680 4 evaluator """rankbased""" +680 5 dataset """wn18rr""" +680 5 model """simple""" +680 5 loss """softplus""" +680 5 regularizer """no""" +680 5 optimizer """adam""" +680 5 training_loop """owa""" +680 5 negative_sampler """basic""" +680 5 evaluator """rankbased""" +680 6 dataset """wn18rr""" +680 6 model """simple""" +680 6 loss """softplus""" +680 6 regularizer """no""" +680 6 optimizer """adam""" +680 6 training_loop """owa""" +680 6 negative_sampler """basic""" +680 6 evaluator """rankbased""" +680 7 dataset """wn18rr""" +680 7 model """simple""" +680 7 loss """softplus""" +680 7 regularizer """no""" +680 7 optimizer """adam""" +680 7 training_loop """owa""" +680 7 negative_sampler """basic""" +680 7 evaluator """rankbased""" +680 8 dataset """wn18rr""" +680 8 model """simple""" +680 8 loss """softplus""" +680 8 regularizer """no""" +680 8 optimizer """adam""" +680 8 training_loop """owa""" +680 8 negative_sampler """basic""" +680 8 evaluator """rankbased""" +680 9 dataset """wn18rr""" +680 9 model """simple""" +680 9 loss """softplus""" +680 9 regularizer """no""" +680 9 optimizer """adam""" +680 9 training_loop """owa""" +680 9 negative_sampler """basic""" +680 9 evaluator """rankbased""" +680 10 dataset """wn18rr""" +680 10 model """simple""" +680 10 loss """softplus""" +680 10 regularizer """no""" +680 10 optimizer """adam""" +680 10 training_loop """owa""" +680 10 negative_sampler """basic""" +680 10 evaluator """rankbased""" +680 11 dataset """wn18rr""" +680 11 model """simple""" +680 11 loss """softplus""" +680 11 regularizer """no""" +680 11 optimizer """adam""" +680 11 training_loop """owa""" +680 11 negative_sampler """basic""" +680 11 evaluator """rankbased""" +680 12 dataset """wn18rr""" +680 12 model """simple""" +680 12 loss """softplus""" +680 12 regularizer """no""" +680 12 optimizer """adam""" +680 12 training_loop """owa""" +680 12 negative_sampler """basic""" +680 12 evaluator """rankbased""" +680 13 dataset """wn18rr""" +680 13 model """simple""" +680 13 loss """softplus""" +680 13 regularizer """no""" +680 13 optimizer """adam""" +680 13 training_loop """owa""" +680 13 negative_sampler """basic""" +680 13 evaluator """rankbased""" +680 14 dataset """wn18rr""" +680 14 model """simple""" +680 14 loss """softplus""" +680 14 regularizer """no""" +680 14 optimizer """adam""" +680 14 training_loop """owa""" +680 14 negative_sampler """basic""" +680 14 evaluator """rankbased""" +680 15 dataset """wn18rr""" +680 15 model """simple""" +680 15 loss """softplus""" +680 15 regularizer """no""" +680 15 optimizer """adam""" +680 15 training_loop """owa""" +680 15 negative_sampler """basic""" +680 15 evaluator """rankbased""" +680 16 dataset """wn18rr""" +680 16 model """simple""" +680 16 loss """softplus""" +680 16 regularizer """no""" +680 16 optimizer """adam""" +680 16 training_loop """owa""" +680 16 negative_sampler """basic""" +680 16 evaluator """rankbased""" +680 17 dataset """wn18rr""" +680 17 model """simple""" +680 17 loss """softplus""" +680 17 regularizer """no""" +680 17 optimizer """adam""" +680 17 training_loop """owa""" +680 17 negative_sampler """basic""" +680 17 evaluator """rankbased""" +680 18 dataset """wn18rr""" +680 18 model """simple""" +680 18 loss """softplus""" +680 18 regularizer """no""" +680 18 optimizer """adam""" +680 18 training_loop """owa""" +680 18 negative_sampler """basic""" +680 18 evaluator """rankbased""" +680 19 dataset """wn18rr""" +680 19 model """simple""" +680 19 loss """softplus""" +680 19 regularizer """no""" +680 19 optimizer """adam""" +680 19 training_loop """owa""" +680 19 negative_sampler """basic""" +680 19 evaluator """rankbased""" +680 20 dataset """wn18rr""" +680 20 model """simple""" +680 20 loss """softplus""" +680 20 regularizer """no""" +680 20 optimizer """adam""" +680 20 training_loop """owa""" +680 20 negative_sampler """basic""" +680 20 evaluator """rankbased""" +680 21 dataset """wn18rr""" +680 21 model """simple""" +680 21 loss """softplus""" +680 21 regularizer """no""" +680 21 optimizer """adam""" +680 21 training_loop """owa""" +680 21 negative_sampler """basic""" +680 21 evaluator """rankbased""" +680 22 dataset """wn18rr""" +680 22 model """simple""" +680 22 loss """softplus""" +680 22 regularizer """no""" +680 22 optimizer """adam""" +680 22 training_loop """owa""" +680 22 negative_sampler """basic""" +680 22 evaluator """rankbased""" +680 23 dataset """wn18rr""" +680 23 model """simple""" +680 23 loss """softplus""" +680 23 regularizer """no""" +680 23 optimizer """adam""" +680 23 training_loop """owa""" +680 23 negative_sampler """basic""" +680 23 evaluator """rankbased""" +680 24 dataset """wn18rr""" +680 24 model """simple""" +680 24 loss """softplus""" +680 24 regularizer """no""" +680 24 optimizer """adam""" +680 24 training_loop """owa""" +680 24 negative_sampler """basic""" +680 24 evaluator """rankbased""" +680 25 dataset """wn18rr""" +680 25 model """simple""" +680 25 loss """softplus""" +680 25 regularizer """no""" +680 25 optimizer """adam""" +680 25 training_loop """owa""" +680 25 negative_sampler """basic""" +680 25 evaluator """rankbased""" +680 26 dataset """wn18rr""" +680 26 model """simple""" +680 26 loss """softplus""" +680 26 regularizer """no""" +680 26 optimizer """adam""" +680 26 training_loop """owa""" +680 26 negative_sampler """basic""" +680 26 evaluator """rankbased""" +680 27 dataset """wn18rr""" +680 27 model """simple""" +680 27 loss """softplus""" +680 27 regularizer """no""" +680 27 optimizer """adam""" +680 27 training_loop """owa""" +680 27 negative_sampler """basic""" +680 27 evaluator """rankbased""" +680 28 dataset """wn18rr""" +680 28 model """simple""" +680 28 loss """softplus""" +680 28 regularizer """no""" +680 28 optimizer """adam""" +680 28 training_loop """owa""" +680 28 negative_sampler """basic""" +680 28 evaluator """rankbased""" +680 29 dataset """wn18rr""" +680 29 model """simple""" +680 29 loss """softplus""" +680 29 regularizer """no""" +680 29 optimizer """adam""" +680 29 training_loop """owa""" +680 29 negative_sampler """basic""" +680 29 evaluator """rankbased""" +680 30 dataset """wn18rr""" +680 30 model """simple""" +680 30 loss """softplus""" +680 30 regularizer """no""" +680 30 optimizer """adam""" +680 30 training_loop """owa""" +680 30 negative_sampler """basic""" +680 30 evaluator """rankbased""" +680 31 dataset """wn18rr""" +680 31 model """simple""" +680 31 loss """softplus""" +680 31 regularizer """no""" +680 31 optimizer """adam""" +680 31 training_loop """owa""" +680 31 negative_sampler """basic""" +680 31 evaluator """rankbased""" +680 32 dataset """wn18rr""" +680 32 model """simple""" +680 32 loss """softplus""" +680 32 regularizer """no""" +680 32 optimizer """adam""" +680 32 training_loop """owa""" +680 32 negative_sampler """basic""" +680 32 evaluator """rankbased""" +680 33 dataset """wn18rr""" +680 33 model """simple""" +680 33 loss """softplus""" +680 33 regularizer """no""" +680 33 optimizer """adam""" +680 33 training_loop """owa""" +680 33 negative_sampler """basic""" +680 33 evaluator """rankbased""" +680 34 dataset """wn18rr""" +680 34 model """simple""" +680 34 loss """softplus""" +680 34 regularizer """no""" +680 34 optimizer """adam""" +680 34 training_loop """owa""" +680 34 negative_sampler """basic""" +680 34 evaluator """rankbased""" +680 35 dataset """wn18rr""" +680 35 model """simple""" +680 35 loss """softplus""" +680 35 regularizer """no""" +680 35 optimizer """adam""" +680 35 training_loop """owa""" +680 35 negative_sampler """basic""" +680 35 evaluator """rankbased""" +680 36 dataset """wn18rr""" +680 36 model """simple""" +680 36 loss """softplus""" +680 36 regularizer """no""" +680 36 optimizer """adam""" +680 36 training_loop """owa""" +680 36 negative_sampler """basic""" +680 36 evaluator """rankbased""" +680 37 dataset """wn18rr""" +680 37 model """simple""" +680 37 loss """softplus""" +680 37 regularizer """no""" +680 37 optimizer """adam""" +680 37 training_loop """owa""" +680 37 negative_sampler """basic""" +680 37 evaluator """rankbased""" +680 38 dataset """wn18rr""" +680 38 model """simple""" +680 38 loss """softplus""" +680 38 regularizer """no""" +680 38 optimizer """adam""" +680 38 training_loop """owa""" +680 38 negative_sampler """basic""" +680 38 evaluator """rankbased""" +680 39 dataset """wn18rr""" +680 39 model """simple""" +680 39 loss """softplus""" +680 39 regularizer """no""" +680 39 optimizer """adam""" +680 39 training_loop """owa""" +680 39 negative_sampler """basic""" +680 39 evaluator """rankbased""" +680 40 dataset """wn18rr""" +680 40 model """simple""" +680 40 loss """softplus""" +680 40 regularizer """no""" +680 40 optimizer """adam""" +680 40 training_loop """owa""" +680 40 negative_sampler """basic""" +680 40 evaluator """rankbased""" +680 41 dataset """wn18rr""" +680 41 model """simple""" +680 41 loss """softplus""" +680 41 regularizer """no""" +680 41 optimizer """adam""" +680 41 training_loop """owa""" +680 41 negative_sampler """basic""" +680 41 evaluator """rankbased""" +680 42 dataset """wn18rr""" +680 42 model """simple""" +680 42 loss """softplus""" +680 42 regularizer """no""" +680 42 optimizer """adam""" +680 42 training_loop """owa""" +680 42 negative_sampler """basic""" +680 42 evaluator """rankbased""" +680 43 dataset """wn18rr""" +680 43 model """simple""" +680 43 loss """softplus""" +680 43 regularizer """no""" +680 43 optimizer """adam""" +680 43 training_loop """owa""" +680 43 negative_sampler """basic""" +680 43 evaluator """rankbased""" +680 44 dataset """wn18rr""" +680 44 model """simple""" +680 44 loss """softplus""" +680 44 regularizer """no""" +680 44 optimizer """adam""" +680 44 training_loop """owa""" +680 44 negative_sampler """basic""" +680 44 evaluator """rankbased""" +680 45 dataset """wn18rr""" +680 45 model """simple""" +680 45 loss """softplus""" +680 45 regularizer """no""" +680 45 optimizer """adam""" +680 45 training_loop """owa""" +680 45 negative_sampler """basic""" +680 45 evaluator """rankbased""" +680 46 dataset """wn18rr""" +680 46 model """simple""" +680 46 loss """softplus""" +680 46 regularizer """no""" +680 46 optimizer """adam""" +680 46 training_loop """owa""" +680 46 negative_sampler """basic""" +680 46 evaluator """rankbased""" +680 47 dataset """wn18rr""" +680 47 model """simple""" +680 47 loss """softplus""" +680 47 regularizer """no""" +680 47 optimizer """adam""" +680 47 training_loop """owa""" +680 47 negative_sampler """basic""" +680 47 evaluator """rankbased""" +680 48 dataset """wn18rr""" +680 48 model """simple""" +680 48 loss """softplus""" +680 48 regularizer """no""" +680 48 optimizer """adam""" +680 48 training_loop """owa""" +680 48 negative_sampler """basic""" +680 48 evaluator """rankbased""" +680 49 dataset """wn18rr""" +680 49 model """simple""" +680 49 loss """softplus""" +680 49 regularizer """no""" +680 49 optimizer """adam""" +680 49 training_loop """owa""" +680 49 negative_sampler """basic""" +680 49 evaluator """rankbased""" +680 50 dataset """wn18rr""" +680 50 model """simple""" +680 50 loss """softplus""" +680 50 regularizer """no""" +680 50 optimizer """adam""" +680 50 training_loop """owa""" +680 50 negative_sampler """basic""" +680 50 evaluator """rankbased""" +680 51 dataset """wn18rr""" +680 51 model """simple""" +680 51 loss """softplus""" +680 51 regularizer """no""" +680 51 optimizer """adam""" +680 51 training_loop """owa""" +680 51 negative_sampler """basic""" +680 51 evaluator """rankbased""" +680 52 dataset """wn18rr""" +680 52 model """simple""" +680 52 loss """softplus""" +680 52 regularizer """no""" +680 52 optimizer """adam""" +680 52 training_loop """owa""" +680 52 negative_sampler """basic""" +680 52 evaluator """rankbased""" +680 53 dataset """wn18rr""" +680 53 model """simple""" +680 53 loss """softplus""" +680 53 regularizer """no""" +680 53 optimizer """adam""" +680 53 training_loop """owa""" +680 53 negative_sampler """basic""" +680 53 evaluator """rankbased""" +681 1 model.embedding_dim 2.0 +681 1 optimizer.lr 0.019252258381812427 +681 1 negative_sampler.num_negs_per_pos 20.0 +681 1 training.batch_size 2.0 +681 2 model.embedding_dim 1.0 +681 2 optimizer.lr 0.006638340811991213 +681 2 negative_sampler.num_negs_per_pos 59.0 +681 2 training.batch_size 2.0 +681 3 model.embedding_dim 2.0 +681 3 optimizer.lr 0.01287127649840119 +681 3 negative_sampler.num_negs_per_pos 28.0 +681 3 training.batch_size 0.0 +681 4 model.embedding_dim 2.0 +681 4 optimizer.lr 0.003752789233478965 +681 4 negative_sampler.num_negs_per_pos 97.0 +681 4 training.batch_size 1.0 +681 5 model.embedding_dim 1.0 +681 5 optimizer.lr 0.0013198909568654118 +681 5 negative_sampler.num_negs_per_pos 14.0 +681 5 training.batch_size 0.0 +681 6 model.embedding_dim 0.0 +681 6 optimizer.lr 0.0015695242540400749 +681 6 negative_sampler.num_negs_per_pos 98.0 +681 6 training.batch_size 0.0 +681 7 model.embedding_dim 0.0 +681 7 optimizer.lr 0.015645940158798752 +681 7 negative_sampler.num_negs_per_pos 45.0 +681 7 training.batch_size 0.0 +681 8 model.embedding_dim 0.0 +681 8 optimizer.lr 0.014908919592300637 +681 8 negative_sampler.num_negs_per_pos 34.0 +681 8 training.batch_size 2.0 +681 9 model.embedding_dim 1.0 +681 9 optimizer.lr 0.001064093024327227 +681 9 negative_sampler.num_negs_per_pos 21.0 +681 9 training.batch_size 2.0 +681 10 model.embedding_dim 2.0 +681 10 optimizer.lr 0.03643926221103245 +681 10 negative_sampler.num_negs_per_pos 20.0 +681 10 training.batch_size 0.0 +681 11 model.embedding_dim 1.0 +681 11 optimizer.lr 0.09304705411458397 +681 11 negative_sampler.num_negs_per_pos 1.0 +681 11 training.batch_size 0.0 +681 12 model.embedding_dim 0.0 +681 12 optimizer.lr 0.01475146625591763 +681 12 negative_sampler.num_negs_per_pos 83.0 +681 12 training.batch_size 2.0 +681 13 model.embedding_dim 1.0 +681 13 optimizer.lr 0.002198577653993586 +681 13 negative_sampler.num_negs_per_pos 98.0 +681 13 training.batch_size 2.0 +681 14 model.embedding_dim 2.0 +681 14 optimizer.lr 0.02672694532032031 +681 14 negative_sampler.num_negs_per_pos 14.0 +681 14 training.batch_size 1.0 +681 15 model.embedding_dim 0.0 +681 15 optimizer.lr 0.02595976947052842 +681 15 negative_sampler.num_negs_per_pos 76.0 +681 15 training.batch_size 2.0 +681 16 model.embedding_dim 1.0 +681 16 optimizer.lr 0.045915497811516354 +681 16 negative_sampler.num_negs_per_pos 72.0 +681 16 training.batch_size 1.0 +681 17 model.embedding_dim 2.0 +681 17 optimizer.lr 0.023378287195328567 +681 17 negative_sampler.num_negs_per_pos 22.0 +681 17 training.batch_size 1.0 +681 18 model.embedding_dim 0.0 +681 18 optimizer.lr 0.0010601581580490494 +681 18 negative_sampler.num_negs_per_pos 52.0 +681 18 training.batch_size 1.0 +681 19 model.embedding_dim 1.0 +681 19 optimizer.lr 0.07929563592492359 +681 19 negative_sampler.num_negs_per_pos 79.0 +681 19 training.batch_size 2.0 +681 20 model.embedding_dim 0.0 +681 20 optimizer.lr 0.0017485895842804756 +681 20 negative_sampler.num_negs_per_pos 62.0 +681 20 training.batch_size 1.0 +681 21 model.embedding_dim 0.0 +681 21 optimizer.lr 0.09090061494564482 +681 21 negative_sampler.num_negs_per_pos 11.0 +681 21 training.batch_size 1.0 +681 22 model.embedding_dim 2.0 +681 22 optimizer.lr 0.019336807486808386 +681 22 negative_sampler.num_negs_per_pos 42.0 +681 22 training.batch_size 1.0 +681 23 model.embedding_dim 0.0 +681 23 optimizer.lr 0.0018557674289786713 +681 23 negative_sampler.num_negs_per_pos 43.0 +681 23 training.batch_size 1.0 +681 24 model.embedding_dim 2.0 +681 24 optimizer.lr 0.055638979480301325 +681 24 negative_sampler.num_negs_per_pos 45.0 +681 24 training.batch_size 0.0 +681 25 model.embedding_dim 2.0 +681 25 optimizer.lr 0.0028741652221010855 +681 25 negative_sampler.num_negs_per_pos 49.0 +681 25 training.batch_size 0.0 +681 26 model.embedding_dim 1.0 +681 26 optimizer.lr 0.002063523949059511 +681 26 negative_sampler.num_negs_per_pos 80.0 +681 26 training.batch_size 2.0 +681 27 model.embedding_dim 1.0 +681 27 optimizer.lr 0.001256772464478213 +681 27 negative_sampler.num_negs_per_pos 4.0 +681 27 training.batch_size 2.0 +681 28 model.embedding_dim 1.0 +681 28 optimizer.lr 0.003132302664369113 +681 28 negative_sampler.num_negs_per_pos 38.0 +681 28 training.batch_size 0.0 +681 29 model.embedding_dim 0.0 +681 29 optimizer.lr 0.004107979641108874 +681 29 negative_sampler.num_negs_per_pos 4.0 +681 29 training.batch_size 0.0 +681 30 model.embedding_dim 0.0 +681 30 optimizer.lr 0.021528186831435606 +681 30 negative_sampler.num_negs_per_pos 8.0 +681 30 training.batch_size 0.0 +681 31 model.embedding_dim 1.0 +681 31 optimizer.lr 0.03227649784116 +681 31 negative_sampler.num_negs_per_pos 10.0 +681 31 training.batch_size 0.0 +681 32 model.embedding_dim 1.0 +681 32 optimizer.lr 0.07112438113163357 +681 32 negative_sampler.num_negs_per_pos 31.0 +681 32 training.batch_size 1.0 +681 33 model.embedding_dim 0.0 +681 33 optimizer.lr 0.07782382357913506 +681 33 negative_sampler.num_negs_per_pos 34.0 +681 33 training.batch_size 1.0 +681 34 model.embedding_dim 0.0 +681 34 optimizer.lr 0.012660569675109142 +681 34 negative_sampler.num_negs_per_pos 33.0 +681 34 training.batch_size 1.0 +681 35 model.embedding_dim 0.0 +681 35 optimizer.lr 0.004404989170928243 +681 35 negative_sampler.num_negs_per_pos 53.0 +681 35 training.batch_size 1.0 +681 36 model.embedding_dim 2.0 +681 36 optimizer.lr 0.026246150199122718 +681 36 negative_sampler.num_negs_per_pos 81.0 +681 36 training.batch_size 1.0 +681 37 model.embedding_dim 1.0 +681 37 optimizer.lr 0.00798700840382074 +681 37 negative_sampler.num_negs_per_pos 71.0 +681 37 training.batch_size 0.0 +681 38 model.embedding_dim 0.0 +681 38 optimizer.lr 0.001998083857288785 +681 38 negative_sampler.num_negs_per_pos 85.0 +681 38 training.batch_size 2.0 +681 39 model.embedding_dim 2.0 +681 39 optimizer.lr 0.004460864137793265 +681 39 negative_sampler.num_negs_per_pos 27.0 +681 39 training.batch_size 0.0 +681 40 model.embedding_dim 0.0 +681 40 optimizer.lr 0.007917809040970883 +681 40 negative_sampler.num_negs_per_pos 85.0 +681 40 training.batch_size 0.0 +681 41 model.embedding_dim 1.0 +681 41 optimizer.lr 0.03658616111123509 +681 41 negative_sampler.num_negs_per_pos 47.0 +681 41 training.batch_size 1.0 +681 42 model.embedding_dim 1.0 +681 42 optimizer.lr 0.0045352472852540245 +681 42 negative_sampler.num_negs_per_pos 66.0 +681 42 training.batch_size 0.0 +681 43 model.embedding_dim 1.0 +681 43 optimizer.lr 0.01958229156301115 +681 43 negative_sampler.num_negs_per_pos 77.0 +681 43 training.batch_size 0.0 +681 44 model.embedding_dim 1.0 +681 44 optimizer.lr 0.022308440033330792 +681 44 negative_sampler.num_negs_per_pos 13.0 +681 44 training.batch_size 2.0 +681 45 model.embedding_dim 2.0 +681 45 optimizer.lr 0.0012583285504490344 +681 45 negative_sampler.num_negs_per_pos 16.0 +681 45 training.batch_size 1.0 +681 46 model.embedding_dim 2.0 +681 46 optimizer.lr 0.0029861670081385407 +681 46 negative_sampler.num_negs_per_pos 72.0 +681 46 training.batch_size 1.0 +681 47 model.embedding_dim 2.0 +681 47 optimizer.lr 0.06482514962009281 +681 47 negative_sampler.num_negs_per_pos 47.0 +681 47 training.batch_size 1.0 +681 48 model.embedding_dim 0.0 +681 48 optimizer.lr 0.02956780979003114 +681 48 negative_sampler.num_negs_per_pos 85.0 +681 48 training.batch_size 2.0 +681 49 model.embedding_dim 2.0 +681 49 optimizer.lr 0.031844009730498044 +681 49 negative_sampler.num_negs_per_pos 48.0 +681 49 training.batch_size 2.0 +681 50 model.embedding_dim 1.0 +681 50 optimizer.lr 0.002759794415258126 +681 50 negative_sampler.num_negs_per_pos 19.0 +681 50 training.batch_size 2.0 +681 51 model.embedding_dim 1.0 +681 51 optimizer.lr 0.004763891404249437 +681 51 negative_sampler.num_negs_per_pos 83.0 +681 51 training.batch_size 2.0 +681 52 model.embedding_dim 2.0 +681 52 optimizer.lr 0.005446503492200789 +681 52 negative_sampler.num_negs_per_pos 51.0 +681 52 training.batch_size 0.0 +681 53 model.embedding_dim 2.0 +681 53 optimizer.lr 0.019423237757242803 +681 53 negative_sampler.num_negs_per_pos 16.0 +681 53 training.batch_size 2.0 +681 54 model.embedding_dim 1.0 +681 54 optimizer.lr 0.0011048831306804537 +681 54 negative_sampler.num_negs_per_pos 3.0 +681 54 training.batch_size 2.0 +681 55 model.embedding_dim 2.0 +681 55 optimizer.lr 0.0046329282104138475 +681 55 negative_sampler.num_negs_per_pos 37.0 +681 55 training.batch_size 0.0 +681 56 model.embedding_dim 2.0 +681 56 optimizer.lr 0.02890291478126892 +681 56 negative_sampler.num_negs_per_pos 13.0 +681 56 training.batch_size 0.0 +681 57 model.embedding_dim 1.0 +681 57 optimizer.lr 0.0010031251583484124 +681 57 negative_sampler.num_negs_per_pos 89.0 +681 57 training.batch_size 2.0 +681 58 model.embedding_dim 0.0 +681 58 optimizer.lr 0.05369500144733066 +681 58 negative_sampler.num_negs_per_pos 68.0 +681 58 training.batch_size 2.0 +681 59 model.embedding_dim 1.0 +681 59 optimizer.lr 0.005796557839623626 +681 59 negative_sampler.num_negs_per_pos 20.0 +681 59 training.batch_size 1.0 +681 60 model.embedding_dim 2.0 +681 60 optimizer.lr 0.02607459264613116 +681 60 negative_sampler.num_negs_per_pos 27.0 +681 60 training.batch_size 2.0 +681 61 model.embedding_dim 0.0 +681 61 optimizer.lr 0.052056522108587425 +681 61 negative_sampler.num_negs_per_pos 92.0 +681 61 training.batch_size 2.0 +681 62 model.embedding_dim 2.0 +681 62 optimizer.lr 0.001165492931534121 +681 62 negative_sampler.num_negs_per_pos 62.0 +681 62 training.batch_size 0.0 +681 63 model.embedding_dim 2.0 +681 63 optimizer.lr 0.0018966420327710207 +681 63 negative_sampler.num_negs_per_pos 95.0 +681 63 training.batch_size 0.0 +681 64 model.embedding_dim 1.0 +681 64 optimizer.lr 0.08535320623047056 +681 64 negative_sampler.num_negs_per_pos 64.0 +681 64 training.batch_size 1.0 +681 65 model.embedding_dim 0.0 +681 65 optimizer.lr 0.0016179703555224052 +681 65 negative_sampler.num_negs_per_pos 72.0 +681 65 training.batch_size 0.0 +681 66 model.embedding_dim 2.0 +681 66 optimizer.lr 0.015751847165461096 +681 66 negative_sampler.num_negs_per_pos 14.0 +681 66 training.batch_size 2.0 +681 67 model.embedding_dim 0.0 +681 67 optimizer.lr 0.010449250281045696 +681 67 negative_sampler.num_negs_per_pos 53.0 +681 67 training.batch_size 1.0 +681 68 model.embedding_dim 0.0 +681 68 optimizer.lr 0.01058323111068448 +681 68 negative_sampler.num_negs_per_pos 76.0 +681 68 training.batch_size 0.0 +681 69 model.embedding_dim 0.0 +681 69 optimizer.lr 0.02873514007119746 +681 69 negative_sampler.num_negs_per_pos 80.0 +681 69 training.batch_size 0.0 +681 70 model.embedding_dim 1.0 +681 70 optimizer.lr 0.024447091834955543 +681 70 negative_sampler.num_negs_per_pos 55.0 +681 70 training.batch_size 2.0 +681 71 model.embedding_dim 2.0 +681 71 optimizer.lr 0.0011605240419126624 +681 71 negative_sampler.num_negs_per_pos 32.0 +681 71 training.batch_size 0.0 +681 72 model.embedding_dim 0.0 +681 72 optimizer.lr 0.0016717410680720584 +681 72 negative_sampler.num_negs_per_pos 8.0 +681 72 training.batch_size 1.0 +681 73 model.embedding_dim 2.0 +681 73 optimizer.lr 0.017694299849176292 +681 73 negative_sampler.num_negs_per_pos 80.0 +681 73 training.batch_size 2.0 +681 74 model.embedding_dim 2.0 +681 74 optimizer.lr 0.08926322359130771 +681 74 negative_sampler.num_negs_per_pos 38.0 +681 74 training.batch_size 1.0 +681 75 model.embedding_dim 1.0 +681 75 optimizer.lr 0.0020557492310164867 +681 75 negative_sampler.num_negs_per_pos 8.0 +681 75 training.batch_size 1.0 +681 76 model.embedding_dim 2.0 +681 76 optimizer.lr 0.018575329491193088 +681 76 negative_sampler.num_negs_per_pos 95.0 +681 76 training.batch_size 0.0 +681 77 model.embedding_dim 2.0 +681 77 optimizer.lr 0.004936690283203362 +681 77 negative_sampler.num_negs_per_pos 85.0 +681 77 training.batch_size 2.0 +681 78 model.embedding_dim 1.0 +681 78 optimizer.lr 0.06420756427210772 +681 78 negative_sampler.num_negs_per_pos 42.0 +681 78 training.batch_size 2.0 +681 79 model.embedding_dim 2.0 +681 79 optimizer.lr 0.0018529157490251294 +681 79 negative_sampler.num_negs_per_pos 52.0 +681 79 training.batch_size 2.0 +681 80 model.embedding_dim 1.0 +681 80 optimizer.lr 0.002480370145178266 +681 80 negative_sampler.num_negs_per_pos 47.0 +681 80 training.batch_size 1.0 +681 1 dataset """wn18rr""" +681 1 model """simple""" +681 1 loss """bceaftersigmoid""" +681 1 regularizer """no""" +681 1 optimizer """adam""" +681 1 training_loop """owa""" +681 1 negative_sampler """basic""" +681 1 evaluator """rankbased""" +681 2 dataset """wn18rr""" +681 2 model """simple""" +681 2 loss """bceaftersigmoid""" +681 2 regularizer """no""" +681 2 optimizer """adam""" +681 2 training_loop """owa""" +681 2 negative_sampler """basic""" +681 2 evaluator """rankbased""" +681 3 dataset """wn18rr""" +681 3 model """simple""" +681 3 loss """bceaftersigmoid""" +681 3 regularizer """no""" +681 3 optimizer """adam""" +681 3 training_loop """owa""" +681 3 negative_sampler """basic""" +681 3 evaluator """rankbased""" +681 4 dataset """wn18rr""" +681 4 model """simple""" +681 4 loss """bceaftersigmoid""" +681 4 regularizer """no""" +681 4 optimizer """adam""" +681 4 training_loop """owa""" +681 4 negative_sampler """basic""" +681 4 evaluator """rankbased""" +681 5 dataset """wn18rr""" +681 5 model """simple""" +681 5 loss """bceaftersigmoid""" +681 5 regularizer """no""" +681 5 optimizer """adam""" +681 5 training_loop """owa""" +681 5 negative_sampler """basic""" +681 5 evaluator """rankbased""" +681 6 dataset """wn18rr""" +681 6 model """simple""" +681 6 loss """bceaftersigmoid""" +681 6 regularizer """no""" +681 6 optimizer """adam""" +681 6 training_loop """owa""" +681 6 negative_sampler """basic""" +681 6 evaluator """rankbased""" +681 7 dataset """wn18rr""" +681 7 model """simple""" +681 7 loss """bceaftersigmoid""" +681 7 regularizer """no""" +681 7 optimizer """adam""" +681 7 training_loop """owa""" +681 7 negative_sampler """basic""" +681 7 evaluator """rankbased""" +681 8 dataset """wn18rr""" +681 8 model """simple""" +681 8 loss """bceaftersigmoid""" +681 8 regularizer """no""" +681 8 optimizer """adam""" +681 8 training_loop """owa""" +681 8 negative_sampler """basic""" +681 8 evaluator """rankbased""" +681 9 dataset """wn18rr""" +681 9 model """simple""" +681 9 loss """bceaftersigmoid""" +681 9 regularizer """no""" +681 9 optimizer """adam""" +681 9 training_loop """owa""" +681 9 negative_sampler """basic""" +681 9 evaluator """rankbased""" +681 10 dataset """wn18rr""" +681 10 model """simple""" +681 10 loss """bceaftersigmoid""" +681 10 regularizer """no""" +681 10 optimizer """adam""" +681 10 training_loop """owa""" +681 10 negative_sampler """basic""" +681 10 evaluator """rankbased""" +681 11 dataset """wn18rr""" +681 11 model """simple""" +681 11 loss """bceaftersigmoid""" +681 11 regularizer """no""" +681 11 optimizer """adam""" +681 11 training_loop """owa""" +681 11 negative_sampler """basic""" +681 11 evaluator """rankbased""" +681 12 dataset """wn18rr""" +681 12 model """simple""" +681 12 loss """bceaftersigmoid""" +681 12 regularizer """no""" +681 12 optimizer """adam""" +681 12 training_loop """owa""" +681 12 negative_sampler """basic""" +681 12 evaluator """rankbased""" +681 13 dataset """wn18rr""" +681 13 model """simple""" +681 13 loss """bceaftersigmoid""" +681 13 regularizer """no""" +681 13 optimizer """adam""" +681 13 training_loop """owa""" +681 13 negative_sampler """basic""" +681 13 evaluator """rankbased""" +681 14 dataset """wn18rr""" +681 14 model """simple""" +681 14 loss """bceaftersigmoid""" +681 14 regularizer """no""" +681 14 optimizer """adam""" +681 14 training_loop """owa""" +681 14 negative_sampler """basic""" +681 14 evaluator """rankbased""" +681 15 dataset """wn18rr""" +681 15 model """simple""" +681 15 loss """bceaftersigmoid""" +681 15 regularizer """no""" +681 15 optimizer """adam""" +681 15 training_loop """owa""" +681 15 negative_sampler """basic""" +681 15 evaluator """rankbased""" +681 16 dataset """wn18rr""" +681 16 model """simple""" +681 16 loss """bceaftersigmoid""" +681 16 regularizer """no""" +681 16 optimizer """adam""" +681 16 training_loop """owa""" +681 16 negative_sampler """basic""" +681 16 evaluator """rankbased""" +681 17 dataset """wn18rr""" +681 17 model """simple""" +681 17 loss """bceaftersigmoid""" +681 17 regularizer """no""" +681 17 optimizer """adam""" +681 17 training_loop """owa""" +681 17 negative_sampler """basic""" +681 17 evaluator """rankbased""" +681 18 dataset """wn18rr""" +681 18 model """simple""" +681 18 loss """bceaftersigmoid""" +681 18 regularizer """no""" +681 18 optimizer """adam""" +681 18 training_loop """owa""" +681 18 negative_sampler """basic""" +681 18 evaluator """rankbased""" +681 19 dataset """wn18rr""" +681 19 model """simple""" +681 19 loss """bceaftersigmoid""" +681 19 regularizer """no""" +681 19 optimizer """adam""" +681 19 training_loop """owa""" +681 19 negative_sampler """basic""" +681 19 evaluator """rankbased""" +681 20 dataset """wn18rr""" +681 20 model """simple""" +681 20 loss """bceaftersigmoid""" +681 20 regularizer """no""" +681 20 optimizer """adam""" +681 20 training_loop """owa""" +681 20 negative_sampler """basic""" +681 20 evaluator """rankbased""" +681 21 dataset """wn18rr""" +681 21 model """simple""" +681 21 loss """bceaftersigmoid""" +681 21 regularizer """no""" +681 21 optimizer """adam""" +681 21 training_loop """owa""" +681 21 negative_sampler """basic""" +681 21 evaluator """rankbased""" +681 22 dataset """wn18rr""" +681 22 model """simple""" +681 22 loss """bceaftersigmoid""" +681 22 regularizer """no""" +681 22 optimizer """adam""" +681 22 training_loop """owa""" +681 22 negative_sampler """basic""" +681 22 evaluator """rankbased""" +681 23 dataset """wn18rr""" +681 23 model """simple""" +681 23 loss """bceaftersigmoid""" +681 23 regularizer """no""" +681 23 optimizer """adam""" +681 23 training_loop """owa""" +681 23 negative_sampler """basic""" +681 23 evaluator """rankbased""" +681 24 dataset """wn18rr""" +681 24 model """simple""" +681 24 loss """bceaftersigmoid""" +681 24 regularizer """no""" +681 24 optimizer """adam""" +681 24 training_loop """owa""" +681 24 negative_sampler """basic""" +681 24 evaluator """rankbased""" +681 25 dataset """wn18rr""" +681 25 model """simple""" +681 25 loss """bceaftersigmoid""" +681 25 regularizer """no""" +681 25 optimizer """adam""" +681 25 training_loop """owa""" +681 25 negative_sampler """basic""" +681 25 evaluator """rankbased""" +681 26 dataset """wn18rr""" +681 26 model """simple""" +681 26 loss """bceaftersigmoid""" +681 26 regularizer """no""" +681 26 optimizer """adam""" +681 26 training_loop """owa""" +681 26 negative_sampler """basic""" +681 26 evaluator """rankbased""" +681 27 dataset """wn18rr""" +681 27 model """simple""" +681 27 loss """bceaftersigmoid""" +681 27 regularizer """no""" +681 27 optimizer """adam""" +681 27 training_loop """owa""" +681 27 negative_sampler """basic""" +681 27 evaluator """rankbased""" +681 28 dataset """wn18rr""" +681 28 model """simple""" +681 28 loss """bceaftersigmoid""" +681 28 regularizer """no""" +681 28 optimizer """adam""" +681 28 training_loop """owa""" +681 28 negative_sampler """basic""" +681 28 evaluator """rankbased""" +681 29 dataset """wn18rr""" +681 29 model """simple""" +681 29 loss """bceaftersigmoid""" +681 29 regularizer """no""" +681 29 optimizer """adam""" +681 29 training_loop """owa""" +681 29 negative_sampler """basic""" +681 29 evaluator """rankbased""" +681 30 dataset """wn18rr""" +681 30 model """simple""" +681 30 loss """bceaftersigmoid""" +681 30 regularizer """no""" +681 30 optimizer """adam""" +681 30 training_loop """owa""" +681 30 negative_sampler """basic""" +681 30 evaluator """rankbased""" +681 31 dataset """wn18rr""" +681 31 model """simple""" +681 31 loss """bceaftersigmoid""" +681 31 regularizer """no""" +681 31 optimizer """adam""" +681 31 training_loop """owa""" +681 31 negative_sampler """basic""" +681 31 evaluator """rankbased""" +681 32 dataset """wn18rr""" +681 32 model """simple""" +681 32 loss """bceaftersigmoid""" +681 32 regularizer """no""" +681 32 optimizer """adam""" +681 32 training_loop """owa""" +681 32 negative_sampler """basic""" +681 32 evaluator """rankbased""" +681 33 dataset """wn18rr""" +681 33 model """simple""" +681 33 loss """bceaftersigmoid""" +681 33 regularizer """no""" +681 33 optimizer """adam""" +681 33 training_loop """owa""" +681 33 negative_sampler """basic""" +681 33 evaluator """rankbased""" +681 34 dataset """wn18rr""" +681 34 model """simple""" +681 34 loss """bceaftersigmoid""" +681 34 regularizer """no""" +681 34 optimizer """adam""" +681 34 training_loop """owa""" +681 34 negative_sampler """basic""" +681 34 evaluator """rankbased""" +681 35 dataset """wn18rr""" +681 35 model """simple""" +681 35 loss """bceaftersigmoid""" +681 35 regularizer """no""" +681 35 optimizer """adam""" +681 35 training_loop """owa""" +681 35 negative_sampler """basic""" +681 35 evaluator """rankbased""" +681 36 dataset """wn18rr""" +681 36 model """simple""" +681 36 loss """bceaftersigmoid""" +681 36 regularizer """no""" +681 36 optimizer """adam""" +681 36 training_loop """owa""" +681 36 negative_sampler """basic""" +681 36 evaluator """rankbased""" +681 37 dataset """wn18rr""" +681 37 model """simple""" +681 37 loss """bceaftersigmoid""" +681 37 regularizer """no""" +681 37 optimizer """adam""" +681 37 training_loop """owa""" +681 37 negative_sampler """basic""" +681 37 evaluator """rankbased""" +681 38 dataset """wn18rr""" +681 38 model """simple""" +681 38 loss """bceaftersigmoid""" +681 38 regularizer """no""" +681 38 optimizer """adam""" +681 38 training_loop """owa""" +681 38 negative_sampler """basic""" +681 38 evaluator """rankbased""" +681 39 dataset """wn18rr""" +681 39 model """simple""" +681 39 loss """bceaftersigmoid""" +681 39 regularizer """no""" +681 39 optimizer """adam""" +681 39 training_loop """owa""" +681 39 negative_sampler """basic""" +681 39 evaluator """rankbased""" +681 40 dataset """wn18rr""" +681 40 model """simple""" +681 40 loss """bceaftersigmoid""" +681 40 regularizer """no""" +681 40 optimizer """adam""" +681 40 training_loop """owa""" +681 40 negative_sampler """basic""" +681 40 evaluator """rankbased""" +681 41 dataset """wn18rr""" +681 41 model """simple""" +681 41 loss """bceaftersigmoid""" +681 41 regularizer """no""" +681 41 optimizer """adam""" +681 41 training_loop """owa""" +681 41 negative_sampler """basic""" +681 41 evaluator """rankbased""" +681 42 dataset """wn18rr""" +681 42 model """simple""" +681 42 loss """bceaftersigmoid""" +681 42 regularizer """no""" +681 42 optimizer """adam""" +681 42 training_loop """owa""" +681 42 negative_sampler """basic""" +681 42 evaluator """rankbased""" +681 43 dataset """wn18rr""" +681 43 model """simple""" +681 43 loss """bceaftersigmoid""" +681 43 regularizer """no""" +681 43 optimizer """adam""" +681 43 training_loop """owa""" +681 43 negative_sampler """basic""" +681 43 evaluator """rankbased""" +681 44 dataset """wn18rr""" +681 44 model """simple""" +681 44 loss """bceaftersigmoid""" +681 44 regularizer """no""" +681 44 optimizer """adam""" +681 44 training_loop """owa""" +681 44 negative_sampler """basic""" +681 44 evaluator """rankbased""" +681 45 dataset """wn18rr""" +681 45 model """simple""" +681 45 loss """bceaftersigmoid""" +681 45 regularizer """no""" +681 45 optimizer """adam""" +681 45 training_loop """owa""" +681 45 negative_sampler """basic""" +681 45 evaluator """rankbased""" +681 46 dataset """wn18rr""" +681 46 model """simple""" +681 46 loss """bceaftersigmoid""" +681 46 regularizer """no""" +681 46 optimizer """adam""" +681 46 training_loop """owa""" +681 46 negative_sampler """basic""" +681 46 evaluator """rankbased""" +681 47 dataset """wn18rr""" +681 47 model """simple""" +681 47 loss """bceaftersigmoid""" +681 47 regularizer """no""" +681 47 optimizer """adam""" +681 47 training_loop """owa""" +681 47 negative_sampler """basic""" +681 47 evaluator """rankbased""" +681 48 dataset """wn18rr""" +681 48 model """simple""" +681 48 loss """bceaftersigmoid""" +681 48 regularizer """no""" +681 48 optimizer """adam""" +681 48 training_loop """owa""" +681 48 negative_sampler """basic""" +681 48 evaluator """rankbased""" +681 49 dataset """wn18rr""" +681 49 model """simple""" +681 49 loss """bceaftersigmoid""" +681 49 regularizer """no""" +681 49 optimizer """adam""" +681 49 training_loop """owa""" +681 49 negative_sampler """basic""" +681 49 evaluator """rankbased""" +681 50 dataset """wn18rr""" +681 50 model """simple""" +681 50 loss """bceaftersigmoid""" +681 50 regularizer """no""" +681 50 optimizer """adam""" +681 50 training_loop """owa""" +681 50 negative_sampler """basic""" +681 50 evaluator """rankbased""" +681 51 dataset """wn18rr""" +681 51 model """simple""" +681 51 loss """bceaftersigmoid""" +681 51 regularizer """no""" +681 51 optimizer """adam""" +681 51 training_loop """owa""" +681 51 negative_sampler """basic""" +681 51 evaluator """rankbased""" +681 52 dataset """wn18rr""" +681 52 model """simple""" +681 52 loss """bceaftersigmoid""" +681 52 regularizer """no""" +681 52 optimizer """adam""" +681 52 training_loop """owa""" +681 52 negative_sampler """basic""" +681 52 evaluator """rankbased""" +681 53 dataset """wn18rr""" +681 53 model """simple""" +681 53 loss """bceaftersigmoid""" +681 53 regularizer """no""" +681 53 optimizer """adam""" +681 53 training_loop """owa""" +681 53 negative_sampler """basic""" +681 53 evaluator """rankbased""" +681 54 dataset """wn18rr""" +681 54 model """simple""" +681 54 loss """bceaftersigmoid""" +681 54 regularizer """no""" +681 54 optimizer """adam""" +681 54 training_loop """owa""" +681 54 negative_sampler """basic""" +681 54 evaluator """rankbased""" +681 55 dataset """wn18rr""" +681 55 model """simple""" +681 55 loss """bceaftersigmoid""" +681 55 regularizer """no""" +681 55 optimizer """adam""" +681 55 training_loop """owa""" +681 55 negative_sampler """basic""" +681 55 evaluator """rankbased""" +681 56 dataset """wn18rr""" +681 56 model """simple""" +681 56 loss """bceaftersigmoid""" +681 56 regularizer """no""" +681 56 optimizer """adam""" +681 56 training_loop """owa""" +681 56 negative_sampler """basic""" +681 56 evaluator """rankbased""" +681 57 dataset """wn18rr""" +681 57 model """simple""" +681 57 loss """bceaftersigmoid""" +681 57 regularizer """no""" +681 57 optimizer """adam""" +681 57 training_loop """owa""" +681 57 negative_sampler """basic""" +681 57 evaluator """rankbased""" +681 58 dataset """wn18rr""" +681 58 model """simple""" +681 58 loss """bceaftersigmoid""" +681 58 regularizer """no""" +681 58 optimizer """adam""" +681 58 training_loop """owa""" +681 58 negative_sampler """basic""" +681 58 evaluator """rankbased""" +681 59 dataset """wn18rr""" +681 59 model """simple""" +681 59 loss """bceaftersigmoid""" +681 59 regularizer """no""" +681 59 optimizer """adam""" +681 59 training_loop """owa""" +681 59 negative_sampler """basic""" +681 59 evaluator """rankbased""" +681 60 dataset """wn18rr""" +681 60 model """simple""" +681 60 loss """bceaftersigmoid""" +681 60 regularizer """no""" +681 60 optimizer """adam""" +681 60 training_loop """owa""" +681 60 negative_sampler """basic""" +681 60 evaluator """rankbased""" +681 61 dataset """wn18rr""" +681 61 model """simple""" +681 61 loss """bceaftersigmoid""" +681 61 regularizer """no""" +681 61 optimizer """adam""" +681 61 training_loop """owa""" +681 61 negative_sampler """basic""" +681 61 evaluator """rankbased""" +681 62 dataset """wn18rr""" +681 62 model """simple""" +681 62 loss """bceaftersigmoid""" +681 62 regularizer """no""" +681 62 optimizer """adam""" +681 62 training_loop """owa""" +681 62 negative_sampler """basic""" +681 62 evaluator """rankbased""" +681 63 dataset """wn18rr""" +681 63 model """simple""" +681 63 loss """bceaftersigmoid""" +681 63 regularizer """no""" +681 63 optimizer """adam""" +681 63 training_loop """owa""" +681 63 negative_sampler """basic""" +681 63 evaluator """rankbased""" +681 64 dataset """wn18rr""" +681 64 model """simple""" +681 64 loss """bceaftersigmoid""" +681 64 regularizer """no""" +681 64 optimizer """adam""" +681 64 training_loop """owa""" +681 64 negative_sampler """basic""" +681 64 evaluator """rankbased""" +681 65 dataset """wn18rr""" +681 65 model """simple""" +681 65 loss """bceaftersigmoid""" +681 65 regularizer """no""" +681 65 optimizer """adam""" +681 65 training_loop """owa""" +681 65 negative_sampler """basic""" +681 65 evaluator """rankbased""" +681 66 dataset """wn18rr""" +681 66 model """simple""" +681 66 loss """bceaftersigmoid""" +681 66 regularizer """no""" +681 66 optimizer """adam""" +681 66 training_loop """owa""" +681 66 negative_sampler """basic""" +681 66 evaluator """rankbased""" +681 67 dataset """wn18rr""" +681 67 model """simple""" +681 67 loss """bceaftersigmoid""" +681 67 regularizer """no""" +681 67 optimizer """adam""" +681 67 training_loop """owa""" +681 67 negative_sampler """basic""" +681 67 evaluator """rankbased""" +681 68 dataset """wn18rr""" +681 68 model """simple""" +681 68 loss """bceaftersigmoid""" +681 68 regularizer """no""" +681 68 optimizer """adam""" +681 68 training_loop """owa""" +681 68 negative_sampler """basic""" +681 68 evaluator """rankbased""" +681 69 dataset """wn18rr""" +681 69 model """simple""" +681 69 loss """bceaftersigmoid""" +681 69 regularizer """no""" +681 69 optimizer """adam""" +681 69 training_loop """owa""" +681 69 negative_sampler """basic""" +681 69 evaluator """rankbased""" +681 70 dataset """wn18rr""" +681 70 model """simple""" +681 70 loss """bceaftersigmoid""" +681 70 regularizer """no""" +681 70 optimizer """adam""" +681 70 training_loop """owa""" +681 70 negative_sampler """basic""" +681 70 evaluator """rankbased""" +681 71 dataset """wn18rr""" +681 71 model """simple""" +681 71 loss """bceaftersigmoid""" +681 71 regularizer """no""" +681 71 optimizer """adam""" +681 71 training_loop """owa""" +681 71 negative_sampler """basic""" +681 71 evaluator """rankbased""" +681 72 dataset """wn18rr""" +681 72 model """simple""" +681 72 loss """bceaftersigmoid""" +681 72 regularizer """no""" +681 72 optimizer """adam""" +681 72 training_loop """owa""" +681 72 negative_sampler """basic""" +681 72 evaluator """rankbased""" +681 73 dataset """wn18rr""" +681 73 model """simple""" +681 73 loss """bceaftersigmoid""" +681 73 regularizer """no""" +681 73 optimizer """adam""" +681 73 training_loop """owa""" +681 73 negative_sampler """basic""" +681 73 evaluator """rankbased""" +681 74 dataset """wn18rr""" +681 74 model """simple""" +681 74 loss """bceaftersigmoid""" +681 74 regularizer """no""" +681 74 optimizer """adam""" +681 74 training_loop """owa""" +681 74 negative_sampler """basic""" +681 74 evaluator """rankbased""" +681 75 dataset """wn18rr""" +681 75 model """simple""" +681 75 loss """bceaftersigmoid""" +681 75 regularizer """no""" +681 75 optimizer """adam""" +681 75 training_loop """owa""" +681 75 negative_sampler """basic""" +681 75 evaluator """rankbased""" +681 76 dataset """wn18rr""" +681 76 model """simple""" +681 76 loss """bceaftersigmoid""" +681 76 regularizer """no""" +681 76 optimizer """adam""" +681 76 training_loop """owa""" +681 76 negative_sampler """basic""" +681 76 evaluator """rankbased""" +681 77 dataset """wn18rr""" +681 77 model """simple""" +681 77 loss """bceaftersigmoid""" +681 77 regularizer """no""" +681 77 optimizer """adam""" +681 77 training_loop """owa""" +681 77 negative_sampler """basic""" +681 77 evaluator """rankbased""" +681 78 dataset """wn18rr""" +681 78 model """simple""" +681 78 loss """bceaftersigmoid""" +681 78 regularizer """no""" +681 78 optimizer """adam""" +681 78 training_loop """owa""" +681 78 negative_sampler """basic""" +681 78 evaluator """rankbased""" +681 79 dataset """wn18rr""" +681 79 model """simple""" +681 79 loss """bceaftersigmoid""" +681 79 regularizer """no""" +681 79 optimizer """adam""" +681 79 training_loop """owa""" +681 79 negative_sampler """basic""" +681 79 evaluator """rankbased""" +681 80 dataset """wn18rr""" +681 80 model """simple""" +681 80 loss """bceaftersigmoid""" +681 80 regularizer """no""" +681 80 optimizer """adam""" +681 80 training_loop """owa""" +681 80 negative_sampler """basic""" +681 80 evaluator """rankbased""" +682 1 model.embedding_dim 2.0 +682 1 optimizer.lr 0.0012102697192390122 +682 1 negative_sampler.num_negs_per_pos 45.0 +682 1 training.batch_size 0.0 +682 2 model.embedding_dim 2.0 +682 2 optimizer.lr 0.0029032948232189113 +682 2 negative_sampler.num_negs_per_pos 84.0 +682 2 training.batch_size 1.0 +682 3 model.embedding_dim 1.0 +682 3 optimizer.lr 0.0023147684845607947 +682 3 negative_sampler.num_negs_per_pos 77.0 +682 3 training.batch_size 1.0 +682 4 model.embedding_dim 1.0 +682 4 optimizer.lr 0.004419913032899294 +682 4 negative_sampler.num_negs_per_pos 97.0 +682 4 training.batch_size 2.0 +682 5 model.embedding_dim 0.0 +682 5 optimizer.lr 0.001609536182820733 +682 5 negative_sampler.num_negs_per_pos 5.0 +682 5 training.batch_size 0.0 +682 6 model.embedding_dim 0.0 +682 6 optimizer.lr 0.011612436769486539 +682 6 negative_sampler.num_negs_per_pos 41.0 +682 6 training.batch_size 1.0 +682 7 model.embedding_dim 1.0 +682 7 optimizer.lr 0.026396585072319446 +682 7 negative_sampler.num_negs_per_pos 76.0 +682 7 training.batch_size 1.0 +682 8 model.embedding_dim 1.0 +682 8 optimizer.lr 0.001338073940085894 +682 8 negative_sampler.num_negs_per_pos 57.0 +682 8 training.batch_size 2.0 +682 9 model.embedding_dim 0.0 +682 9 optimizer.lr 0.026614321805629424 +682 9 negative_sampler.num_negs_per_pos 48.0 +682 9 training.batch_size 0.0 +682 10 model.embedding_dim 1.0 +682 10 optimizer.lr 0.09316558879099973 +682 10 negative_sampler.num_negs_per_pos 25.0 +682 10 training.batch_size 2.0 +682 11 model.embedding_dim 2.0 +682 11 optimizer.lr 0.00897724978415383 +682 11 negative_sampler.num_negs_per_pos 58.0 +682 11 training.batch_size 2.0 +682 12 model.embedding_dim 2.0 +682 12 optimizer.lr 0.011570182996159226 +682 12 negative_sampler.num_negs_per_pos 73.0 +682 12 training.batch_size 2.0 +682 13 model.embedding_dim 2.0 +682 13 optimizer.lr 0.0032131664264541436 +682 13 negative_sampler.num_negs_per_pos 96.0 +682 13 training.batch_size 2.0 +682 14 model.embedding_dim 0.0 +682 14 optimizer.lr 0.003418589459106703 +682 14 negative_sampler.num_negs_per_pos 8.0 +682 14 training.batch_size 0.0 +682 15 model.embedding_dim 1.0 +682 15 optimizer.lr 0.0021680121632124735 +682 15 negative_sampler.num_negs_per_pos 40.0 +682 15 training.batch_size 2.0 +682 16 model.embedding_dim 1.0 +682 16 optimizer.lr 0.028908912168120162 +682 16 negative_sampler.num_negs_per_pos 43.0 +682 16 training.batch_size 2.0 +682 17 model.embedding_dim 2.0 +682 17 optimizer.lr 0.0183180497493765 +682 17 negative_sampler.num_negs_per_pos 66.0 +682 17 training.batch_size 2.0 +682 18 model.embedding_dim 1.0 +682 18 optimizer.lr 0.033944086129451045 +682 18 negative_sampler.num_negs_per_pos 1.0 +682 18 training.batch_size 0.0 +682 19 model.embedding_dim 2.0 +682 19 optimizer.lr 0.005601179535708573 +682 19 negative_sampler.num_negs_per_pos 67.0 +682 19 training.batch_size 2.0 +682 20 model.embedding_dim 2.0 +682 20 optimizer.lr 0.002284803868901959 +682 20 negative_sampler.num_negs_per_pos 86.0 +682 20 training.batch_size 1.0 +682 21 model.embedding_dim 2.0 +682 21 optimizer.lr 0.0075464813298277665 +682 21 negative_sampler.num_negs_per_pos 20.0 +682 21 training.batch_size 1.0 +682 22 model.embedding_dim 2.0 +682 22 optimizer.lr 0.008407460701570203 +682 22 negative_sampler.num_negs_per_pos 83.0 +682 22 training.batch_size 0.0 +682 23 model.embedding_dim 2.0 +682 23 optimizer.lr 0.023269775174507068 +682 23 negative_sampler.num_negs_per_pos 5.0 +682 23 training.batch_size 2.0 +682 24 model.embedding_dim 0.0 +682 24 optimizer.lr 0.06991931820509084 +682 24 negative_sampler.num_negs_per_pos 37.0 +682 24 training.batch_size 0.0 +682 25 model.embedding_dim 2.0 +682 25 optimizer.lr 0.001836404741766362 +682 25 negative_sampler.num_negs_per_pos 55.0 +682 25 training.batch_size 1.0 +682 26 model.embedding_dim 1.0 +682 26 optimizer.lr 0.0015993379366651811 +682 26 negative_sampler.num_negs_per_pos 86.0 +682 26 training.batch_size 0.0 +682 27 model.embedding_dim 1.0 +682 27 optimizer.lr 0.001800646977967615 +682 27 negative_sampler.num_negs_per_pos 74.0 +682 27 training.batch_size 0.0 +682 28 model.embedding_dim 1.0 +682 28 optimizer.lr 0.024321724303416576 +682 28 negative_sampler.num_negs_per_pos 73.0 +682 28 training.batch_size 1.0 +682 29 model.embedding_dim 0.0 +682 29 optimizer.lr 0.005092339239171904 +682 29 negative_sampler.num_negs_per_pos 38.0 +682 29 training.batch_size 0.0 +682 30 model.embedding_dim 1.0 +682 30 optimizer.lr 0.01239467511106184 +682 30 negative_sampler.num_negs_per_pos 29.0 +682 30 training.batch_size 0.0 +682 31 model.embedding_dim 1.0 +682 31 optimizer.lr 0.007397574792631591 +682 31 negative_sampler.num_negs_per_pos 17.0 +682 31 training.batch_size 2.0 +682 32 model.embedding_dim 1.0 +682 32 optimizer.lr 0.002142165582506842 +682 32 negative_sampler.num_negs_per_pos 18.0 +682 32 training.batch_size 1.0 +682 33 model.embedding_dim 1.0 +682 33 optimizer.lr 0.07878019971377663 +682 33 negative_sampler.num_negs_per_pos 99.0 +682 33 training.batch_size 1.0 +682 34 model.embedding_dim 0.0 +682 34 optimizer.lr 0.044540448314951 +682 34 negative_sampler.num_negs_per_pos 74.0 +682 34 training.batch_size 2.0 +682 35 model.embedding_dim 1.0 +682 35 optimizer.lr 0.0015921526980894863 +682 35 negative_sampler.num_negs_per_pos 21.0 +682 35 training.batch_size 0.0 +682 36 model.embedding_dim 2.0 +682 36 optimizer.lr 0.0012524758786456335 +682 36 negative_sampler.num_negs_per_pos 80.0 +682 36 training.batch_size 2.0 +682 37 model.embedding_dim 1.0 +682 37 optimizer.lr 0.011737441799761947 +682 37 negative_sampler.num_negs_per_pos 98.0 +682 37 training.batch_size 2.0 +682 38 model.embedding_dim 1.0 +682 38 optimizer.lr 0.016717207615204798 +682 38 negative_sampler.num_negs_per_pos 96.0 +682 38 training.batch_size 2.0 +682 39 model.embedding_dim 0.0 +682 39 optimizer.lr 0.0110504296353907 +682 39 negative_sampler.num_negs_per_pos 98.0 +682 39 training.batch_size 2.0 +682 40 model.embedding_dim 1.0 +682 40 optimizer.lr 0.011961864083567244 +682 40 negative_sampler.num_negs_per_pos 62.0 +682 40 training.batch_size 0.0 +682 41 model.embedding_dim 1.0 +682 41 optimizer.lr 0.031643069613957416 +682 41 negative_sampler.num_negs_per_pos 48.0 +682 41 training.batch_size 0.0 +682 42 model.embedding_dim 0.0 +682 42 optimizer.lr 0.0015249533313235605 +682 42 negative_sampler.num_negs_per_pos 17.0 +682 42 training.batch_size 2.0 +682 43 model.embedding_dim 0.0 +682 43 optimizer.lr 0.0016249972919124397 +682 43 negative_sampler.num_negs_per_pos 35.0 +682 43 training.batch_size 2.0 +682 44 model.embedding_dim 0.0 +682 44 optimizer.lr 0.008230406416111094 +682 44 negative_sampler.num_negs_per_pos 37.0 +682 44 training.batch_size 0.0 +682 45 model.embedding_dim 0.0 +682 45 optimizer.lr 0.01094565216735499 +682 45 negative_sampler.num_negs_per_pos 22.0 +682 45 training.batch_size 2.0 +682 46 model.embedding_dim 2.0 +682 46 optimizer.lr 0.016711413642370932 +682 46 negative_sampler.num_negs_per_pos 28.0 +682 46 training.batch_size 2.0 +682 47 model.embedding_dim 1.0 +682 47 optimizer.lr 0.01340394405471554 +682 47 negative_sampler.num_negs_per_pos 45.0 +682 47 training.batch_size 2.0 +682 48 model.embedding_dim 0.0 +682 48 optimizer.lr 0.0028565732390404453 +682 48 negative_sampler.num_negs_per_pos 36.0 +682 48 training.batch_size 0.0 +682 49 model.embedding_dim 2.0 +682 49 optimizer.lr 0.0020073747709138776 +682 49 negative_sampler.num_negs_per_pos 12.0 +682 49 training.batch_size 0.0 +682 50 model.embedding_dim 2.0 +682 50 optimizer.lr 0.01676664221667969 +682 50 negative_sampler.num_negs_per_pos 48.0 +682 50 training.batch_size 0.0 +682 51 model.embedding_dim 1.0 +682 51 optimizer.lr 0.006168556470272484 +682 51 negative_sampler.num_negs_per_pos 3.0 +682 51 training.batch_size 1.0 +682 52 model.embedding_dim 1.0 +682 52 optimizer.lr 0.01809404458048396 +682 52 negative_sampler.num_negs_per_pos 76.0 +682 52 training.batch_size 0.0 +682 53 model.embedding_dim 0.0 +682 53 optimizer.lr 0.028373645945092306 +682 53 negative_sampler.num_negs_per_pos 64.0 +682 53 training.batch_size 2.0 +682 54 model.embedding_dim 0.0 +682 54 optimizer.lr 0.009946661792611541 +682 54 negative_sampler.num_negs_per_pos 74.0 +682 54 training.batch_size 0.0 +682 55 model.embedding_dim 1.0 +682 55 optimizer.lr 0.026376796780508544 +682 55 negative_sampler.num_negs_per_pos 89.0 +682 55 training.batch_size 2.0 +682 56 model.embedding_dim 1.0 +682 56 optimizer.lr 0.09540882898325563 +682 56 negative_sampler.num_negs_per_pos 60.0 +682 56 training.batch_size 1.0 +682 57 model.embedding_dim 0.0 +682 57 optimizer.lr 0.009988129394528452 +682 57 negative_sampler.num_negs_per_pos 39.0 +682 57 training.batch_size 1.0 +682 58 model.embedding_dim 0.0 +682 58 optimizer.lr 0.022942601175832036 +682 58 negative_sampler.num_negs_per_pos 95.0 +682 58 training.batch_size 1.0 +682 59 model.embedding_dim 1.0 +682 59 optimizer.lr 0.0010413458224679665 +682 59 negative_sampler.num_negs_per_pos 67.0 +682 59 training.batch_size 2.0 +682 60 model.embedding_dim 1.0 +682 60 optimizer.lr 0.09222565163687399 +682 60 negative_sampler.num_negs_per_pos 73.0 +682 60 training.batch_size 1.0 +682 61 model.embedding_dim 1.0 +682 61 optimizer.lr 0.023445641118835726 +682 61 negative_sampler.num_negs_per_pos 29.0 +682 61 training.batch_size 1.0 +682 62 model.embedding_dim 1.0 +682 62 optimizer.lr 0.08837957878687706 +682 62 negative_sampler.num_negs_per_pos 86.0 +682 62 training.batch_size 1.0 +682 63 model.embedding_dim 1.0 +682 63 optimizer.lr 0.09341609388543456 +682 63 negative_sampler.num_negs_per_pos 21.0 +682 63 training.batch_size 1.0 +682 64 model.embedding_dim 2.0 +682 64 optimizer.lr 0.03899259088060915 +682 64 negative_sampler.num_negs_per_pos 7.0 +682 64 training.batch_size 2.0 +682 65 model.embedding_dim 2.0 +682 65 optimizer.lr 0.008307148151544992 +682 65 negative_sampler.num_negs_per_pos 99.0 +682 65 training.batch_size 2.0 +682 66 model.embedding_dim 0.0 +682 66 optimizer.lr 0.0010240147401940813 +682 66 negative_sampler.num_negs_per_pos 51.0 +682 66 training.batch_size 0.0 +682 67 model.embedding_dim 2.0 +682 67 optimizer.lr 0.0582890544824565 +682 67 negative_sampler.num_negs_per_pos 98.0 +682 67 training.batch_size 2.0 +682 68 model.embedding_dim 0.0 +682 68 optimizer.lr 0.08307528738409592 +682 68 negative_sampler.num_negs_per_pos 20.0 +682 68 training.batch_size 2.0 +682 69 model.embedding_dim 1.0 +682 69 optimizer.lr 0.006468104014548084 +682 69 negative_sampler.num_negs_per_pos 71.0 +682 69 training.batch_size 1.0 +682 70 model.embedding_dim 1.0 +682 70 optimizer.lr 0.003261359949001839 +682 70 negative_sampler.num_negs_per_pos 80.0 +682 70 training.batch_size 2.0 +682 71 model.embedding_dim 2.0 +682 71 optimizer.lr 0.026937926676699493 +682 71 negative_sampler.num_negs_per_pos 81.0 +682 71 training.batch_size 2.0 +682 72 model.embedding_dim 0.0 +682 72 optimizer.lr 0.011762697571418072 +682 72 negative_sampler.num_negs_per_pos 4.0 +682 72 training.batch_size 2.0 +682 73 model.embedding_dim 2.0 +682 73 optimizer.lr 0.04021150214335145 +682 73 negative_sampler.num_negs_per_pos 24.0 +682 73 training.batch_size 2.0 +682 74 model.embedding_dim 2.0 +682 74 optimizer.lr 0.0024359092239098785 +682 74 negative_sampler.num_negs_per_pos 92.0 +682 74 training.batch_size 0.0 +682 75 model.embedding_dim 0.0 +682 75 optimizer.lr 0.031566294000427784 +682 75 negative_sampler.num_negs_per_pos 19.0 +682 75 training.batch_size 0.0 +682 76 model.embedding_dim 0.0 +682 76 optimizer.lr 0.07812274270820022 +682 76 negative_sampler.num_negs_per_pos 76.0 +682 76 training.batch_size 2.0 +682 77 model.embedding_dim 1.0 +682 77 optimizer.lr 0.0015357881303265932 +682 77 negative_sampler.num_negs_per_pos 36.0 +682 77 training.batch_size 1.0 +682 78 model.embedding_dim 1.0 +682 78 optimizer.lr 0.018673888080056125 +682 78 negative_sampler.num_negs_per_pos 35.0 +682 78 training.batch_size 0.0 +682 79 model.embedding_dim 1.0 +682 79 optimizer.lr 0.001694003738391384 +682 79 negative_sampler.num_negs_per_pos 88.0 +682 79 training.batch_size 2.0 +682 80 model.embedding_dim 1.0 +682 80 optimizer.lr 0.06648971084211815 +682 80 negative_sampler.num_negs_per_pos 49.0 +682 80 training.batch_size 2.0 +682 81 model.embedding_dim 2.0 +682 81 optimizer.lr 0.011729254089134118 +682 81 negative_sampler.num_negs_per_pos 2.0 +682 81 training.batch_size 2.0 +682 1 dataset """wn18rr""" +682 1 model """simple""" +682 1 loss """softplus""" +682 1 regularizer """no""" +682 1 optimizer """adam""" +682 1 training_loop """owa""" +682 1 negative_sampler """basic""" +682 1 evaluator """rankbased""" +682 2 dataset """wn18rr""" +682 2 model """simple""" +682 2 loss """softplus""" +682 2 regularizer """no""" +682 2 optimizer """adam""" +682 2 training_loop """owa""" +682 2 negative_sampler """basic""" +682 2 evaluator """rankbased""" +682 3 dataset """wn18rr""" +682 3 model """simple""" +682 3 loss """softplus""" +682 3 regularizer """no""" +682 3 optimizer """adam""" +682 3 training_loop """owa""" +682 3 negative_sampler """basic""" +682 3 evaluator """rankbased""" +682 4 dataset """wn18rr""" +682 4 model """simple""" +682 4 loss """softplus""" +682 4 regularizer """no""" +682 4 optimizer """adam""" +682 4 training_loop """owa""" +682 4 negative_sampler """basic""" +682 4 evaluator """rankbased""" +682 5 dataset """wn18rr""" +682 5 model """simple""" +682 5 loss """softplus""" +682 5 regularizer """no""" +682 5 optimizer """adam""" +682 5 training_loop """owa""" +682 5 negative_sampler """basic""" +682 5 evaluator """rankbased""" +682 6 dataset """wn18rr""" +682 6 model """simple""" +682 6 loss """softplus""" +682 6 regularizer """no""" +682 6 optimizer """adam""" +682 6 training_loop """owa""" +682 6 negative_sampler """basic""" +682 6 evaluator """rankbased""" +682 7 dataset """wn18rr""" +682 7 model """simple""" +682 7 loss """softplus""" +682 7 regularizer """no""" +682 7 optimizer """adam""" +682 7 training_loop """owa""" +682 7 negative_sampler """basic""" +682 7 evaluator """rankbased""" +682 8 dataset """wn18rr""" +682 8 model """simple""" +682 8 loss """softplus""" +682 8 regularizer """no""" +682 8 optimizer """adam""" +682 8 training_loop """owa""" +682 8 negative_sampler """basic""" +682 8 evaluator """rankbased""" +682 9 dataset """wn18rr""" +682 9 model """simple""" +682 9 loss """softplus""" +682 9 regularizer """no""" +682 9 optimizer """adam""" +682 9 training_loop """owa""" +682 9 negative_sampler """basic""" +682 9 evaluator """rankbased""" +682 10 dataset """wn18rr""" +682 10 model """simple""" +682 10 loss """softplus""" +682 10 regularizer """no""" +682 10 optimizer """adam""" +682 10 training_loop """owa""" +682 10 negative_sampler """basic""" +682 10 evaluator """rankbased""" +682 11 dataset """wn18rr""" +682 11 model """simple""" +682 11 loss """softplus""" +682 11 regularizer """no""" +682 11 optimizer """adam""" +682 11 training_loop """owa""" +682 11 negative_sampler """basic""" +682 11 evaluator """rankbased""" +682 12 dataset """wn18rr""" +682 12 model """simple""" +682 12 loss """softplus""" +682 12 regularizer """no""" +682 12 optimizer """adam""" +682 12 training_loop """owa""" +682 12 negative_sampler """basic""" +682 12 evaluator """rankbased""" +682 13 dataset """wn18rr""" +682 13 model """simple""" +682 13 loss """softplus""" +682 13 regularizer """no""" +682 13 optimizer """adam""" +682 13 training_loop """owa""" +682 13 negative_sampler """basic""" +682 13 evaluator """rankbased""" +682 14 dataset """wn18rr""" +682 14 model """simple""" +682 14 loss """softplus""" +682 14 regularizer """no""" +682 14 optimizer """adam""" +682 14 training_loop """owa""" +682 14 negative_sampler """basic""" +682 14 evaluator """rankbased""" +682 15 dataset """wn18rr""" +682 15 model """simple""" +682 15 loss """softplus""" +682 15 regularizer """no""" +682 15 optimizer """adam""" +682 15 training_loop """owa""" +682 15 negative_sampler """basic""" +682 15 evaluator """rankbased""" +682 16 dataset """wn18rr""" +682 16 model """simple""" +682 16 loss """softplus""" +682 16 regularizer """no""" +682 16 optimizer """adam""" +682 16 training_loop """owa""" +682 16 negative_sampler """basic""" +682 16 evaluator """rankbased""" +682 17 dataset """wn18rr""" +682 17 model """simple""" +682 17 loss """softplus""" +682 17 regularizer """no""" +682 17 optimizer """adam""" +682 17 training_loop """owa""" +682 17 negative_sampler """basic""" +682 17 evaluator """rankbased""" +682 18 dataset """wn18rr""" +682 18 model """simple""" +682 18 loss """softplus""" +682 18 regularizer """no""" +682 18 optimizer """adam""" +682 18 training_loop """owa""" +682 18 negative_sampler """basic""" +682 18 evaluator """rankbased""" +682 19 dataset """wn18rr""" +682 19 model """simple""" +682 19 loss """softplus""" +682 19 regularizer """no""" +682 19 optimizer """adam""" +682 19 training_loop """owa""" +682 19 negative_sampler """basic""" +682 19 evaluator """rankbased""" +682 20 dataset """wn18rr""" +682 20 model """simple""" +682 20 loss """softplus""" +682 20 regularizer """no""" +682 20 optimizer """adam""" +682 20 training_loop """owa""" +682 20 negative_sampler """basic""" +682 20 evaluator """rankbased""" +682 21 dataset """wn18rr""" +682 21 model """simple""" +682 21 loss """softplus""" +682 21 regularizer """no""" +682 21 optimizer """adam""" +682 21 training_loop """owa""" +682 21 negative_sampler """basic""" +682 21 evaluator """rankbased""" +682 22 dataset """wn18rr""" +682 22 model """simple""" +682 22 loss """softplus""" +682 22 regularizer """no""" +682 22 optimizer """adam""" +682 22 training_loop """owa""" +682 22 negative_sampler """basic""" +682 22 evaluator """rankbased""" +682 23 dataset """wn18rr""" +682 23 model """simple""" +682 23 loss """softplus""" +682 23 regularizer """no""" +682 23 optimizer """adam""" +682 23 training_loop """owa""" +682 23 negative_sampler """basic""" +682 23 evaluator """rankbased""" +682 24 dataset """wn18rr""" +682 24 model """simple""" +682 24 loss """softplus""" +682 24 regularizer """no""" +682 24 optimizer """adam""" +682 24 training_loop """owa""" +682 24 negative_sampler """basic""" +682 24 evaluator """rankbased""" +682 25 dataset """wn18rr""" +682 25 model """simple""" +682 25 loss """softplus""" +682 25 regularizer """no""" +682 25 optimizer """adam""" +682 25 training_loop """owa""" +682 25 negative_sampler """basic""" +682 25 evaluator """rankbased""" +682 26 dataset """wn18rr""" +682 26 model """simple""" +682 26 loss """softplus""" +682 26 regularizer """no""" +682 26 optimizer """adam""" +682 26 training_loop """owa""" +682 26 negative_sampler """basic""" +682 26 evaluator """rankbased""" +682 27 dataset """wn18rr""" +682 27 model """simple""" +682 27 loss """softplus""" +682 27 regularizer """no""" +682 27 optimizer """adam""" +682 27 training_loop """owa""" +682 27 negative_sampler """basic""" +682 27 evaluator """rankbased""" +682 28 dataset """wn18rr""" +682 28 model """simple""" +682 28 loss """softplus""" +682 28 regularizer """no""" +682 28 optimizer """adam""" +682 28 training_loop """owa""" +682 28 negative_sampler """basic""" +682 28 evaluator """rankbased""" +682 29 dataset """wn18rr""" +682 29 model """simple""" +682 29 loss """softplus""" +682 29 regularizer """no""" +682 29 optimizer """adam""" +682 29 training_loop """owa""" +682 29 negative_sampler """basic""" +682 29 evaluator """rankbased""" +682 30 dataset """wn18rr""" +682 30 model """simple""" +682 30 loss """softplus""" +682 30 regularizer """no""" +682 30 optimizer """adam""" +682 30 training_loop """owa""" +682 30 negative_sampler """basic""" +682 30 evaluator """rankbased""" +682 31 dataset """wn18rr""" +682 31 model """simple""" +682 31 loss """softplus""" +682 31 regularizer """no""" +682 31 optimizer """adam""" +682 31 training_loop """owa""" +682 31 negative_sampler """basic""" +682 31 evaluator """rankbased""" +682 32 dataset """wn18rr""" +682 32 model """simple""" +682 32 loss """softplus""" +682 32 regularizer """no""" +682 32 optimizer """adam""" +682 32 training_loop """owa""" +682 32 negative_sampler """basic""" +682 32 evaluator """rankbased""" +682 33 dataset """wn18rr""" +682 33 model """simple""" +682 33 loss """softplus""" +682 33 regularizer """no""" +682 33 optimizer """adam""" +682 33 training_loop """owa""" +682 33 negative_sampler """basic""" +682 33 evaluator """rankbased""" +682 34 dataset """wn18rr""" +682 34 model """simple""" +682 34 loss """softplus""" +682 34 regularizer """no""" +682 34 optimizer """adam""" +682 34 training_loop """owa""" +682 34 negative_sampler """basic""" +682 34 evaluator """rankbased""" +682 35 dataset """wn18rr""" +682 35 model """simple""" +682 35 loss """softplus""" +682 35 regularizer """no""" +682 35 optimizer """adam""" +682 35 training_loop """owa""" +682 35 negative_sampler """basic""" +682 35 evaluator """rankbased""" +682 36 dataset """wn18rr""" +682 36 model """simple""" +682 36 loss """softplus""" +682 36 regularizer """no""" +682 36 optimizer """adam""" +682 36 training_loop """owa""" +682 36 negative_sampler """basic""" +682 36 evaluator """rankbased""" +682 37 dataset """wn18rr""" +682 37 model """simple""" +682 37 loss """softplus""" +682 37 regularizer """no""" +682 37 optimizer """adam""" +682 37 training_loop """owa""" +682 37 negative_sampler """basic""" +682 37 evaluator """rankbased""" +682 38 dataset """wn18rr""" +682 38 model """simple""" +682 38 loss """softplus""" +682 38 regularizer """no""" +682 38 optimizer """adam""" +682 38 training_loop """owa""" +682 38 negative_sampler """basic""" +682 38 evaluator """rankbased""" +682 39 dataset """wn18rr""" +682 39 model """simple""" +682 39 loss """softplus""" +682 39 regularizer """no""" +682 39 optimizer """adam""" +682 39 training_loop """owa""" +682 39 negative_sampler """basic""" +682 39 evaluator """rankbased""" +682 40 dataset """wn18rr""" +682 40 model """simple""" +682 40 loss """softplus""" +682 40 regularizer """no""" +682 40 optimizer """adam""" +682 40 training_loop """owa""" +682 40 negative_sampler """basic""" +682 40 evaluator """rankbased""" +682 41 dataset """wn18rr""" +682 41 model """simple""" +682 41 loss """softplus""" +682 41 regularizer """no""" +682 41 optimizer """adam""" +682 41 training_loop """owa""" +682 41 negative_sampler """basic""" +682 41 evaluator """rankbased""" +682 42 dataset """wn18rr""" +682 42 model """simple""" +682 42 loss """softplus""" +682 42 regularizer """no""" +682 42 optimizer """adam""" +682 42 training_loop """owa""" +682 42 negative_sampler """basic""" +682 42 evaluator """rankbased""" +682 43 dataset """wn18rr""" +682 43 model """simple""" +682 43 loss """softplus""" +682 43 regularizer """no""" +682 43 optimizer """adam""" +682 43 training_loop """owa""" +682 43 negative_sampler """basic""" +682 43 evaluator """rankbased""" +682 44 dataset """wn18rr""" +682 44 model """simple""" +682 44 loss """softplus""" +682 44 regularizer """no""" +682 44 optimizer """adam""" +682 44 training_loop """owa""" +682 44 negative_sampler """basic""" +682 44 evaluator """rankbased""" +682 45 dataset """wn18rr""" +682 45 model """simple""" +682 45 loss """softplus""" +682 45 regularizer """no""" +682 45 optimizer """adam""" +682 45 training_loop """owa""" +682 45 negative_sampler """basic""" +682 45 evaluator """rankbased""" +682 46 dataset """wn18rr""" +682 46 model """simple""" +682 46 loss """softplus""" +682 46 regularizer """no""" +682 46 optimizer """adam""" +682 46 training_loop """owa""" +682 46 negative_sampler """basic""" +682 46 evaluator """rankbased""" +682 47 dataset """wn18rr""" +682 47 model """simple""" +682 47 loss """softplus""" +682 47 regularizer """no""" +682 47 optimizer """adam""" +682 47 training_loop """owa""" +682 47 negative_sampler """basic""" +682 47 evaluator """rankbased""" +682 48 dataset """wn18rr""" +682 48 model """simple""" +682 48 loss """softplus""" +682 48 regularizer """no""" +682 48 optimizer """adam""" +682 48 training_loop """owa""" +682 48 negative_sampler """basic""" +682 48 evaluator """rankbased""" +682 49 dataset """wn18rr""" +682 49 model """simple""" +682 49 loss """softplus""" +682 49 regularizer """no""" +682 49 optimizer """adam""" +682 49 training_loop """owa""" +682 49 negative_sampler """basic""" +682 49 evaluator """rankbased""" +682 50 dataset """wn18rr""" +682 50 model """simple""" +682 50 loss """softplus""" +682 50 regularizer """no""" +682 50 optimizer """adam""" +682 50 training_loop """owa""" +682 50 negative_sampler """basic""" +682 50 evaluator """rankbased""" +682 51 dataset """wn18rr""" +682 51 model """simple""" +682 51 loss """softplus""" +682 51 regularizer """no""" +682 51 optimizer """adam""" +682 51 training_loop """owa""" +682 51 negative_sampler """basic""" +682 51 evaluator """rankbased""" +682 52 dataset """wn18rr""" +682 52 model """simple""" +682 52 loss """softplus""" +682 52 regularizer """no""" +682 52 optimizer """adam""" +682 52 training_loop """owa""" +682 52 negative_sampler """basic""" +682 52 evaluator """rankbased""" +682 53 dataset """wn18rr""" +682 53 model """simple""" +682 53 loss """softplus""" +682 53 regularizer """no""" +682 53 optimizer """adam""" +682 53 training_loop """owa""" +682 53 negative_sampler """basic""" +682 53 evaluator """rankbased""" +682 54 dataset """wn18rr""" +682 54 model """simple""" +682 54 loss """softplus""" +682 54 regularizer """no""" +682 54 optimizer """adam""" +682 54 training_loop """owa""" +682 54 negative_sampler """basic""" +682 54 evaluator """rankbased""" +682 55 dataset """wn18rr""" +682 55 model """simple""" +682 55 loss """softplus""" +682 55 regularizer """no""" +682 55 optimizer """adam""" +682 55 training_loop """owa""" +682 55 negative_sampler """basic""" +682 55 evaluator """rankbased""" +682 56 dataset """wn18rr""" +682 56 model """simple""" +682 56 loss """softplus""" +682 56 regularizer """no""" +682 56 optimizer """adam""" +682 56 training_loop """owa""" +682 56 negative_sampler """basic""" +682 56 evaluator """rankbased""" +682 57 dataset """wn18rr""" +682 57 model """simple""" +682 57 loss """softplus""" +682 57 regularizer """no""" +682 57 optimizer """adam""" +682 57 training_loop """owa""" +682 57 negative_sampler """basic""" +682 57 evaluator """rankbased""" +682 58 dataset """wn18rr""" +682 58 model """simple""" +682 58 loss """softplus""" +682 58 regularizer """no""" +682 58 optimizer """adam""" +682 58 training_loop """owa""" +682 58 negative_sampler """basic""" +682 58 evaluator """rankbased""" +682 59 dataset """wn18rr""" +682 59 model """simple""" +682 59 loss """softplus""" +682 59 regularizer """no""" +682 59 optimizer """adam""" +682 59 training_loop """owa""" +682 59 negative_sampler """basic""" +682 59 evaluator """rankbased""" +682 60 dataset """wn18rr""" +682 60 model """simple""" +682 60 loss """softplus""" +682 60 regularizer """no""" +682 60 optimizer """adam""" +682 60 training_loop """owa""" +682 60 negative_sampler """basic""" +682 60 evaluator """rankbased""" +682 61 dataset """wn18rr""" +682 61 model """simple""" +682 61 loss """softplus""" +682 61 regularizer """no""" +682 61 optimizer """adam""" +682 61 training_loop """owa""" +682 61 negative_sampler """basic""" +682 61 evaluator """rankbased""" +682 62 dataset """wn18rr""" +682 62 model """simple""" +682 62 loss """softplus""" +682 62 regularizer """no""" +682 62 optimizer """adam""" +682 62 training_loop """owa""" +682 62 negative_sampler """basic""" +682 62 evaluator """rankbased""" +682 63 dataset """wn18rr""" +682 63 model """simple""" +682 63 loss """softplus""" +682 63 regularizer """no""" +682 63 optimizer """adam""" +682 63 training_loop """owa""" +682 63 negative_sampler """basic""" +682 63 evaluator """rankbased""" +682 64 dataset """wn18rr""" +682 64 model """simple""" +682 64 loss """softplus""" +682 64 regularizer """no""" +682 64 optimizer """adam""" +682 64 training_loop """owa""" +682 64 negative_sampler """basic""" +682 64 evaluator """rankbased""" +682 65 dataset """wn18rr""" +682 65 model """simple""" +682 65 loss """softplus""" +682 65 regularizer """no""" +682 65 optimizer """adam""" +682 65 training_loop """owa""" +682 65 negative_sampler """basic""" +682 65 evaluator """rankbased""" +682 66 dataset """wn18rr""" +682 66 model """simple""" +682 66 loss """softplus""" +682 66 regularizer """no""" +682 66 optimizer """adam""" +682 66 training_loop """owa""" +682 66 negative_sampler """basic""" +682 66 evaluator """rankbased""" +682 67 dataset """wn18rr""" +682 67 model """simple""" +682 67 loss """softplus""" +682 67 regularizer """no""" +682 67 optimizer """adam""" +682 67 training_loop """owa""" +682 67 negative_sampler """basic""" +682 67 evaluator """rankbased""" +682 68 dataset """wn18rr""" +682 68 model """simple""" +682 68 loss """softplus""" +682 68 regularizer """no""" +682 68 optimizer """adam""" +682 68 training_loop """owa""" +682 68 negative_sampler """basic""" +682 68 evaluator """rankbased""" +682 69 dataset """wn18rr""" +682 69 model """simple""" +682 69 loss """softplus""" +682 69 regularizer """no""" +682 69 optimizer """adam""" +682 69 training_loop """owa""" +682 69 negative_sampler """basic""" +682 69 evaluator """rankbased""" +682 70 dataset """wn18rr""" +682 70 model """simple""" +682 70 loss """softplus""" +682 70 regularizer """no""" +682 70 optimizer """adam""" +682 70 training_loop """owa""" +682 70 negative_sampler """basic""" +682 70 evaluator """rankbased""" +682 71 dataset """wn18rr""" +682 71 model """simple""" +682 71 loss """softplus""" +682 71 regularizer """no""" +682 71 optimizer """adam""" +682 71 training_loop """owa""" +682 71 negative_sampler """basic""" +682 71 evaluator """rankbased""" +682 72 dataset """wn18rr""" +682 72 model """simple""" +682 72 loss """softplus""" +682 72 regularizer """no""" +682 72 optimizer """adam""" +682 72 training_loop """owa""" +682 72 negative_sampler """basic""" +682 72 evaluator """rankbased""" +682 73 dataset """wn18rr""" +682 73 model """simple""" +682 73 loss """softplus""" +682 73 regularizer """no""" +682 73 optimizer """adam""" +682 73 training_loop """owa""" +682 73 negative_sampler """basic""" +682 73 evaluator """rankbased""" +682 74 dataset """wn18rr""" +682 74 model """simple""" +682 74 loss """softplus""" +682 74 regularizer """no""" +682 74 optimizer """adam""" +682 74 training_loop """owa""" +682 74 negative_sampler """basic""" +682 74 evaluator """rankbased""" +682 75 dataset """wn18rr""" +682 75 model """simple""" +682 75 loss """softplus""" +682 75 regularizer """no""" +682 75 optimizer """adam""" +682 75 training_loop """owa""" +682 75 negative_sampler """basic""" +682 75 evaluator """rankbased""" +682 76 dataset """wn18rr""" +682 76 model """simple""" +682 76 loss """softplus""" +682 76 regularizer """no""" +682 76 optimizer """adam""" +682 76 training_loop """owa""" +682 76 negative_sampler """basic""" +682 76 evaluator """rankbased""" +682 77 dataset """wn18rr""" +682 77 model """simple""" +682 77 loss """softplus""" +682 77 regularizer """no""" +682 77 optimizer """adam""" +682 77 training_loop """owa""" +682 77 negative_sampler """basic""" +682 77 evaluator """rankbased""" +682 78 dataset """wn18rr""" +682 78 model """simple""" +682 78 loss """softplus""" +682 78 regularizer """no""" +682 78 optimizer """adam""" +682 78 training_loop """owa""" +682 78 negative_sampler """basic""" +682 78 evaluator """rankbased""" +682 79 dataset """wn18rr""" +682 79 model """simple""" +682 79 loss """softplus""" +682 79 regularizer """no""" +682 79 optimizer """adam""" +682 79 training_loop """owa""" +682 79 negative_sampler """basic""" +682 79 evaluator """rankbased""" +682 80 dataset """wn18rr""" +682 80 model """simple""" +682 80 loss """softplus""" +682 80 regularizer """no""" +682 80 optimizer """adam""" +682 80 training_loop """owa""" +682 80 negative_sampler """basic""" +682 80 evaluator """rankbased""" +682 81 dataset """wn18rr""" +682 81 model """simple""" +682 81 loss """softplus""" +682 81 regularizer """no""" +682 81 optimizer """adam""" +682 81 training_loop """owa""" +682 81 negative_sampler """basic""" +682 81 evaluator """rankbased""" +683 1 model.embedding_dim 2.0 +683 1 optimizer.lr 0.056549294853499105 +683 1 training.batch_size 2.0 +683 1 training.label_smoothing 0.004397133450972112 +683 2 model.embedding_dim 2.0 +683 2 optimizer.lr 0.0045332698649192945 +683 2 training.batch_size 2.0 +683 2 training.label_smoothing 0.004342304112997613 +683 1 dataset """wn18rr""" +683 1 model """simple""" +683 1 loss """crossentropy""" +683 1 regularizer """no""" +683 1 optimizer """adam""" +683 1 training_loop """lcwa""" +683 1 evaluator """rankbased""" +683 2 dataset """wn18rr""" +683 2 model """simple""" +683 2 loss """crossentropy""" +683 2 regularizer """no""" +683 2 optimizer """adam""" +683 2 training_loop """lcwa""" +683 2 evaluator """rankbased""" +684 1 model.embedding_dim 0.0 +684 1 optimizer.lr 0.004487356417468697 +684 1 training.batch_size 0.0 +684 1 training.label_smoothing 0.01585810114874472 +684 2 model.embedding_dim 1.0 +684 2 optimizer.lr 0.023183979678678916 +684 2 training.batch_size 2.0 +684 2 training.label_smoothing 0.6009334435505677 +684 3 model.embedding_dim 2.0 +684 3 optimizer.lr 0.013747479636366975 +684 3 training.batch_size 2.0 +684 3 training.label_smoothing 0.0022043073159063982 +684 4 model.embedding_dim 0.0 +684 4 optimizer.lr 0.04471844472757741 +684 4 training.batch_size 1.0 +684 4 training.label_smoothing 0.024298638754053335 +684 5 model.embedding_dim 1.0 +684 5 optimizer.lr 0.017062844999166833 +684 5 training.batch_size 0.0 +684 5 training.label_smoothing 0.042319597270304904 +684 6 model.embedding_dim 2.0 +684 6 optimizer.lr 0.08476137477961362 +684 6 training.batch_size 2.0 +684 6 training.label_smoothing 0.22760895440690534 +684 7 model.embedding_dim 1.0 +684 7 optimizer.lr 0.045058774455650465 +684 7 training.batch_size 2.0 +684 7 training.label_smoothing 0.12664596569830014 +684 8 model.embedding_dim 1.0 +684 8 optimizer.lr 0.004589367284701653 +684 8 training.batch_size 0.0 +684 8 training.label_smoothing 0.3234047762057613 +684 1 dataset """wn18rr""" +684 1 model """simple""" +684 1 loss """crossentropy""" +684 1 regularizer """no""" +684 1 optimizer """adam""" +684 1 training_loop """lcwa""" +684 1 evaluator """rankbased""" +684 2 dataset """wn18rr""" +684 2 model """simple""" +684 2 loss """crossentropy""" +684 2 regularizer """no""" +684 2 optimizer """adam""" +684 2 training_loop """lcwa""" +684 2 evaluator """rankbased""" +684 3 dataset """wn18rr""" +684 3 model """simple""" +684 3 loss """crossentropy""" +684 3 regularizer """no""" +684 3 optimizer """adam""" +684 3 training_loop """lcwa""" +684 3 evaluator """rankbased""" +684 4 dataset """wn18rr""" +684 4 model """simple""" +684 4 loss """crossentropy""" +684 4 regularizer """no""" +684 4 optimizer """adam""" +684 4 training_loop """lcwa""" +684 4 evaluator """rankbased""" +684 5 dataset """wn18rr""" +684 5 model """simple""" +684 5 loss """crossentropy""" +684 5 regularizer """no""" +684 5 optimizer """adam""" +684 5 training_loop """lcwa""" +684 5 evaluator """rankbased""" +684 6 dataset """wn18rr""" +684 6 model """simple""" +684 6 loss """crossentropy""" +684 6 regularizer """no""" +684 6 optimizer """adam""" +684 6 training_loop """lcwa""" +684 6 evaluator """rankbased""" +684 7 dataset """wn18rr""" +684 7 model """simple""" +684 7 loss """crossentropy""" +684 7 regularizer """no""" +684 7 optimizer """adam""" +684 7 training_loop """lcwa""" +684 7 evaluator """rankbased""" +684 8 dataset """wn18rr""" +684 8 model """simple""" +684 8 loss """crossentropy""" +684 8 regularizer """no""" +684 8 optimizer """adam""" +684 8 training_loop """lcwa""" +684 8 evaluator """rankbased""" +685 1 model.embedding_dim 1.0 +685 1 optimizer.lr 0.02097238493065829 +685 1 training.batch_size 2.0 +685 1 training.label_smoothing 0.040908635438496724 +685 2 model.embedding_dim 2.0 +685 2 optimizer.lr 0.0041639826460022605 +685 2 training.batch_size 0.0 +685 2 training.label_smoothing 0.5930764906833095 +685 1 dataset """wn18rr""" +685 1 model """simple""" +685 1 loss """bceaftersigmoid""" +685 1 regularizer """no""" +685 1 optimizer """adam""" +685 1 training_loop """lcwa""" +685 1 evaluator """rankbased""" +685 2 dataset """wn18rr""" +685 2 model """simple""" +685 2 loss """bceaftersigmoid""" +685 2 regularizer """no""" +685 2 optimizer """adam""" +685 2 training_loop """lcwa""" +685 2 evaluator """rankbased""" +686 1 model.embedding_dim 2.0 +686 1 optimizer.lr 0.006027394387776031 +686 1 training.batch_size 1.0 +686 1 training.label_smoothing 0.2713895002598561 +686 2 model.embedding_dim 2.0 +686 2 optimizer.lr 0.003526188910905342 +686 2 training.batch_size 0.0 +686 2 training.label_smoothing 0.0023956873916191296 +686 3 model.embedding_dim 0.0 +686 3 optimizer.lr 0.050148726528083935 +686 3 training.batch_size 1.0 +686 3 training.label_smoothing 0.15240917350457847 +686 4 model.embedding_dim 1.0 +686 4 optimizer.lr 0.08177473435674222 +686 4 training.batch_size 2.0 +686 4 training.label_smoothing 0.010739064405366329 +686 5 model.embedding_dim 0.0 +686 5 optimizer.lr 0.09405560892285728 +686 5 training.batch_size 0.0 +686 5 training.label_smoothing 0.00825335090168696 +686 6 model.embedding_dim 2.0 +686 6 optimizer.lr 0.0012102545857423238 +686 6 training.batch_size 1.0 +686 6 training.label_smoothing 0.20005625690198808 +686 1 dataset """wn18rr""" +686 1 model """simple""" +686 1 loss """bceaftersigmoid""" +686 1 regularizer """no""" +686 1 optimizer """adam""" +686 1 training_loop """lcwa""" +686 1 evaluator """rankbased""" +686 2 dataset """wn18rr""" +686 2 model """simple""" +686 2 loss """bceaftersigmoid""" +686 2 regularizer """no""" +686 2 optimizer """adam""" +686 2 training_loop """lcwa""" +686 2 evaluator """rankbased""" +686 3 dataset """wn18rr""" +686 3 model """simple""" +686 3 loss """bceaftersigmoid""" +686 3 regularizer """no""" +686 3 optimizer """adam""" +686 3 training_loop """lcwa""" +686 3 evaluator """rankbased""" +686 4 dataset """wn18rr""" +686 4 model """simple""" +686 4 loss """bceaftersigmoid""" +686 4 regularizer """no""" +686 4 optimizer """adam""" +686 4 training_loop """lcwa""" +686 4 evaluator """rankbased""" +686 5 dataset """wn18rr""" +686 5 model """simple""" +686 5 loss """bceaftersigmoid""" +686 5 regularizer """no""" +686 5 optimizer """adam""" +686 5 training_loop """lcwa""" +686 5 evaluator """rankbased""" +686 6 dataset """wn18rr""" +686 6 model """simple""" +686 6 loss """bceaftersigmoid""" +686 6 regularizer """no""" +686 6 optimizer """adam""" +686 6 training_loop """lcwa""" +686 6 evaluator """rankbased""" +687 1 model.embedding_dim 2.0 +687 1 optimizer.lr 0.005587512814782146 +687 1 training.batch_size 2.0 +687 1 training.label_smoothing 0.17024865326115646 +687 2 model.embedding_dim 0.0 +687 2 optimizer.lr 0.09910031924010458 +687 2 training.batch_size 1.0 +687 2 training.label_smoothing 0.8165838693853675 +687 3 model.embedding_dim 0.0 +687 3 optimizer.lr 0.025234010943957 +687 3 training.batch_size 2.0 +687 3 training.label_smoothing 0.0022147798878433727 +687 4 model.embedding_dim 1.0 +687 4 optimizer.lr 0.04422824126933295 +687 4 training.batch_size 2.0 +687 4 training.label_smoothing 0.002849165010466829 +687 5 model.embedding_dim 0.0 +687 5 optimizer.lr 0.002871948172832901 +687 5 training.batch_size 2.0 +687 5 training.label_smoothing 0.11301986357558433 +687 6 model.embedding_dim 1.0 +687 6 optimizer.lr 0.05799540386869722 +687 6 training.batch_size 2.0 +687 6 training.label_smoothing 0.13652503647589656 +687 7 model.embedding_dim 0.0 +687 7 optimizer.lr 0.03194623423475052 +687 7 training.batch_size 1.0 +687 7 training.label_smoothing 0.0015677399091741414 +687 8 model.embedding_dim 1.0 +687 8 optimizer.lr 0.03272647651101646 +687 8 training.batch_size 0.0 +687 8 training.label_smoothing 0.018717551071115668 +687 1 dataset """wn18rr""" +687 1 model """simple""" +687 1 loss """softplus""" +687 1 regularizer """no""" +687 1 optimizer """adam""" +687 1 training_loop """lcwa""" +687 1 evaluator """rankbased""" +687 2 dataset """wn18rr""" +687 2 model """simple""" +687 2 loss """softplus""" +687 2 regularizer """no""" +687 2 optimizer """adam""" +687 2 training_loop """lcwa""" +687 2 evaluator """rankbased""" +687 3 dataset """wn18rr""" +687 3 model """simple""" +687 3 loss """softplus""" +687 3 regularizer """no""" +687 3 optimizer """adam""" +687 3 training_loop """lcwa""" +687 3 evaluator """rankbased""" +687 4 dataset """wn18rr""" +687 4 model """simple""" +687 4 loss """softplus""" +687 4 regularizer """no""" +687 4 optimizer """adam""" +687 4 training_loop """lcwa""" +687 4 evaluator """rankbased""" +687 5 dataset """wn18rr""" +687 5 model """simple""" +687 5 loss """softplus""" +687 5 regularizer """no""" +687 5 optimizer """adam""" +687 5 training_loop """lcwa""" +687 5 evaluator """rankbased""" +687 6 dataset """wn18rr""" +687 6 model """simple""" +687 6 loss """softplus""" +687 6 regularizer """no""" +687 6 optimizer """adam""" +687 6 training_loop """lcwa""" +687 6 evaluator """rankbased""" +687 7 dataset """wn18rr""" +687 7 model """simple""" +687 7 loss """softplus""" +687 7 regularizer """no""" +687 7 optimizer """adam""" +687 7 training_loop """lcwa""" +687 7 evaluator """rankbased""" +687 8 dataset """wn18rr""" +687 8 model """simple""" +687 8 loss """softplus""" +687 8 regularizer """no""" +687 8 optimizer """adam""" +687 8 training_loop """lcwa""" +687 8 evaluator """rankbased""" +688 1 model.embedding_dim 2.0 +688 1 optimizer.lr 0.004069640269972345 +688 1 training.batch_size 0.0 +688 1 training.label_smoothing 0.5710592328499572 +688 2 model.embedding_dim 2.0 +688 2 optimizer.lr 0.010820650433791493 +688 2 training.batch_size 1.0 +688 2 training.label_smoothing 0.00555100604997465 +688 3 model.embedding_dim 1.0 +688 3 optimizer.lr 0.04934417466713368 +688 3 training.batch_size 0.0 +688 3 training.label_smoothing 0.08282624482791694 +688 4 model.embedding_dim 2.0 +688 4 optimizer.lr 0.0023798357026540836 +688 4 training.batch_size 0.0 +688 4 training.label_smoothing 0.00791845069930567 +688 5 model.embedding_dim 1.0 +688 5 optimizer.lr 0.00991657945128441 +688 5 training.batch_size 0.0 +688 5 training.label_smoothing 0.016834292134971778 +688 6 model.embedding_dim 2.0 +688 6 optimizer.lr 0.030963701571274264 +688 6 training.batch_size 2.0 +688 6 training.label_smoothing 0.007471063248987446 +688 7 model.embedding_dim 1.0 +688 7 optimizer.lr 0.0121957614190584 +688 7 training.batch_size 0.0 +688 7 training.label_smoothing 0.18791948428273356 +688 8 model.embedding_dim 2.0 +688 8 optimizer.lr 0.0037031814235944345 +688 8 training.batch_size 2.0 +688 8 training.label_smoothing 0.8145558096754577 +688 9 model.embedding_dim 2.0 +688 9 optimizer.lr 0.01780406445261381 +688 9 training.batch_size 0.0 +688 9 training.label_smoothing 0.2882844920885415 +688 1 dataset """wn18rr""" +688 1 model """simple""" +688 1 loss """softplus""" +688 1 regularizer """no""" +688 1 optimizer """adam""" +688 1 training_loop """lcwa""" +688 1 evaluator """rankbased""" +688 2 dataset """wn18rr""" +688 2 model """simple""" +688 2 loss """softplus""" +688 2 regularizer """no""" +688 2 optimizer """adam""" +688 2 training_loop """lcwa""" +688 2 evaluator """rankbased""" +688 3 dataset """wn18rr""" +688 3 model """simple""" +688 3 loss """softplus""" +688 3 regularizer """no""" +688 3 optimizer """adam""" +688 3 training_loop """lcwa""" +688 3 evaluator """rankbased""" +688 4 dataset """wn18rr""" +688 4 model """simple""" +688 4 loss """softplus""" +688 4 regularizer """no""" +688 4 optimizer """adam""" +688 4 training_loop """lcwa""" +688 4 evaluator """rankbased""" +688 5 dataset """wn18rr""" +688 5 model """simple""" +688 5 loss """softplus""" +688 5 regularizer """no""" +688 5 optimizer """adam""" +688 5 training_loop """lcwa""" +688 5 evaluator """rankbased""" +688 6 dataset """wn18rr""" +688 6 model """simple""" +688 6 loss """softplus""" +688 6 regularizer """no""" +688 6 optimizer """adam""" +688 6 training_loop """lcwa""" +688 6 evaluator """rankbased""" +688 7 dataset """wn18rr""" +688 7 model """simple""" +688 7 loss """softplus""" +688 7 regularizer """no""" +688 7 optimizer """adam""" +688 7 training_loop """lcwa""" +688 7 evaluator """rankbased""" +688 8 dataset """wn18rr""" +688 8 model """simple""" +688 8 loss """softplus""" +688 8 regularizer """no""" +688 8 optimizer """adam""" +688 8 training_loop """lcwa""" +688 8 evaluator """rankbased""" +688 9 dataset """wn18rr""" +688 9 model """simple""" +688 9 loss """softplus""" +688 9 regularizer """no""" +688 9 optimizer """adam""" +688 9 training_loop """lcwa""" +688 9 evaluator """rankbased""" +689 1 model.embedding_dim 0.0 +689 1 model.scoring_fct_norm 2.0 +689 1 training.batch_size 1.0 +689 1 training.label_smoothing 0.004613557632988748 +689 2 model.embedding_dim 1.0 +689 2 model.scoring_fct_norm 1.0 +689 2 training.batch_size 1.0 +689 2 training.label_smoothing 0.006854897548325292 +689 3 model.embedding_dim 2.0 +689 3 model.scoring_fct_norm 2.0 +689 3 training.batch_size 1.0 +689 3 training.label_smoothing 0.0950404464608067 +689 4 model.embedding_dim 0.0 +689 4 model.scoring_fct_norm 2.0 +689 4 training.batch_size 1.0 +689 4 training.label_smoothing 0.013236301199022648 +689 5 model.embedding_dim 1.0 +689 5 model.scoring_fct_norm 1.0 +689 5 training.batch_size 1.0 +689 5 training.label_smoothing 0.28406249572284015 +689 6 model.embedding_dim 0.0 +689 6 model.scoring_fct_norm 2.0 +689 6 training.batch_size 0.0 +689 6 training.label_smoothing 0.18426303977966213 +689 7 model.embedding_dim 2.0 +689 7 model.scoring_fct_norm 1.0 +689 7 training.batch_size 1.0 +689 7 training.label_smoothing 0.08816822116954606 +689 8 model.embedding_dim 1.0 +689 8 model.scoring_fct_norm 1.0 +689 8 training.batch_size 2.0 +689 8 training.label_smoothing 0.002512439387758842 +689 9 model.embedding_dim 2.0 +689 9 model.scoring_fct_norm 1.0 +689 9 training.batch_size 0.0 +689 9 training.label_smoothing 0.511546952156523 +689 10 model.embedding_dim 1.0 +689 10 model.scoring_fct_norm 2.0 +689 10 training.batch_size 1.0 +689 10 training.label_smoothing 0.017555104577952695 +689 11 model.embedding_dim 2.0 +689 11 model.scoring_fct_norm 2.0 +689 11 training.batch_size 2.0 +689 11 training.label_smoothing 0.05835263272476267 +689 12 model.embedding_dim 1.0 +689 12 model.scoring_fct_norm 2.0 +689 12 training.batch_size 1.0 +689 12 training.label_smoothing 0.0013431154228996651 +689 13 model.embedding_dim 0.0 +689 13 model.scoring_fct_norm 2.0 +689 13 training.batch_size 2.0 +689 13 training.label_smoothing 0.007635901700683797 +689 14 model.embedding_dim 2.0 +689 14 model.scoring_fct_norm 2.0 +689 14 training.batch_size 2.0 +689 14 training.label_smoothing 0.007114659168624397 +689 15 model.embedding_dim 0.0 +689 15 model.scoring_fct_norm 2.0 +689 15 training.batch_size 0.0 +689 15 training.label_smoothing 0.05938218124053912 +689 16 model.embedding_dim 1.0 +689 16 model.scoring_fct_norm 1.0 +689 16 training.batch_size 1.0 +689 16 training.label_smoothing 0.08613109343475997 +689 17 model.embedding_dim 1.0 +689 17 model.scoring_fct_norm 2.0 +689 17 training.batch_size 1.0 +689 17 training.label_smoothing 0.0034500617684950145 +689 18 model.embedding_dim 0.0 +689 18 model.scoring_fct_norm 1.0 +689 18 training.batch_size 2.0 +689 18 training.label_smoothing 0.07828383247697557 +689 19 model.embedding_dim 1.0 +689 19 model.scoring_fct_norm 2.0 +689 19 training.batch_size 1.0 +689 19 training.label_smoothing 0.003533595176534182 +689 20 model.embedding_dim 0.0 +689 20 model.scoring_fct_norm 2.0 +689 20 training.batch_size 2.0 +689 20 training.label_smoothing 0.21366916768979025 +689 21 model.embedding_dim 1.0 +689 21 model.scoring_fct_norm 1.0 +689 21 training.batch_size 2.0 +689 21 training.label_smoothing 0.5595299219008775 +689 22 model.embedding_dim 0.0 +689 22 model.scoring_fct_norm 1.0 +689 22 training.batch_size 1.0 +689 22 training.label_smoothing 0.0014062984097212097 +689 23 model.embedding_dim 1.0 +689 23 model.scoring_fct_norm 1.0 +689 23 training.batch_size 2.0 +689 23 training.label_smoothing 0.3238746980192847 +689 24 model.embedding_dim 2.0 +689 24 model.scoring_fct_norm 1.0 +689 24 training.batch_size 1.0 +689 24 training.label_smoothing 0.644743016643217 +689 25 model.embedding_dim 2.0 +689 25 model.scoring_fct_norm 1.0 +689 25 training.batch_size 0.0 +689 25 training.label_smoothing 0.0337387695409439 +689 26 model.embedding_dim 2.0 +689 26 model.scoring_fct_norm 2.0 +689 26 training.batch_size 0.0 +689 26 training.label_smoothing 0.443597014308999 +689 27 model.embedding_dim 1.0 +689 27 model.scoring_fct_norm 1.0 +689 27 training.batch_size 0.0 +689 27 training.label_smoothing 0.0062144307707940405 +689 28 model.embedding_dim 1.0 +689 28 model.scoring_fct_norm 1.0 +689 28 training.batch_size 0.0 +689 28 training.label_smoothing 0.09199293917601466 +689 29 model.embedding_dim 0.0 +689 29 model.scoring_fct_norm 2.0 +689 29 training.batch_size 1.0 +689 29 training.label_smoothing 0.010351537871786113 +689 30 model.embedding_dim 1.0 +689 30 model.scoring_fct_norm 2.0 +689 30 training.batch_size 1.0 +689 30 training.label_smoothing 0.004449787270192 +689 31 model.embedding_dim 2.0 +689 31 model.scoring_fct_norm 2.0 +689 31 training.batch_size 1.0 +689 31 training.label_smoothing 0.05558520360574504 +689 32 model.embedding_dim 1.0 +689 32 model.scoring_fct_norm 2.0 +689 32 training.batch_size 1.0 +689 32 training.label_smoothing 0.17533591800221265 +689 33 model.embedding_dim 1.0 +689 33 model.scoring_fct_norm 1.0 +689 33 training.batch_size 1.0 +689 33 training.label_smoothing 0.0019710490740589607 +689 34 model.embedding_dim 1.0 +689 34 model.scoring_fct_norm 1.0 +689 34 training.batch_size 2.0 +689 34 training.label_smoothing 0.3508120105971718 +689 35 model.embedding_dim 0.0 +689 35 model.scoring_fct_norm 1.0 +689 35 training.batch_size 1.0 +689 35 training.label_smoothing 0.01105704698469224 +689 36 model.embedding_dim 1.0 +689 36 model.scoring_fct_norm 2.0 +689 36 training.batch_size 1.0 +689 36 training.label_smoothing 0.006427961450407001 +689 37 model.embedding_dim 2.0 +689 37 model.scoring_fct_norm 1.0 +689 37 training.batch_size 1.0 +689 37 training.label_smoothing 0.5388423327359976 +689 38 model.embedding_dim 0.0 +689 38 model.scoring_fct_norm 2.0 +689 38 training.batch_size 2.0 +689 38 training.label_smoothing 0.21704071356581114 +689 39 model.embedding_dim 0.0 +689 39 model.scoring_fct_norm 2.0 +689 39 training.batch_size 1.0 +689 39 training.label_smoothing 0.44346340340264917 +689 40 model.embedding_dim 2.0 +689 40 model.scoring_fct_norm 2.0 +689 40 training.batch_size 0.0 +689 40 training.label_smoothing 0.019600188774862158 +689 41 model.embedding_dim 1.0 +689 41 model.scoring_fct_norm 2.0 +689 41 training.batch_size 0.0 +689 41 training.label_smoothing 0.0014362450082526501 +689 42 model.embedding_dim 1.0 +689 42 model.scoring_fct_norm 1.0 +689 42 training.batch_size 0.0 +689 42 training.label_smoothing 0.9736054730568174 +689 43 model.embedding_dim 0.0 +689 43 model.scoring_fct_norm 2.0 +689 43 training.batch_size 0.0 +689 43 training.label_smoothing 0.15313641416918325 +689 44 model.embedding_dim 1.0 +689 44 model.scoring_fct_norm 2.0 +689 44 training.batch_size 0.0 +689 44 training.label_smoothing 0.01610910894375129 +689 45 model.embedding_dim 2.0 +689 45 model.scoring_fct_norm 1.0 +689 45 training.batch_size 0.0 +689 45 training.label_smoothing 0.37184779758965325 +689 46 model.embedding_dim 0.0 +689 46 model.scoring_fct_norm 1.0 +689 46 training.batch_size 2.0 +689 46 training.label_smoothing 0.17047249037130044 +689 47 model.embedding_dim 0.0 +689 47 model.scoring_fct_norm 1.0 +689 47 training.batch_size 1.0 +689 47 training.label_smoothing 0.060246530939270716 +689 48 model.embedding_dim 2.0 +689 48 model.scoring_fct_norm 1.0 +689 48 training.batch_size 0.0 +689 48 training.label_smoothing 0.8386180132846204 +689 49 model.embedding_dim 0.0 +689 49 model.scoring_fct_norm 1.0 +689 49 training.batch_size 0.0 +689 49 training.label_smoothing 0.0034550664418996806 +689 50 model.embedding_dim 2.0 +689 50 model.scoring_fct_norm 1.0 +689 50 training.batch_size 2.0 +689 50 training.label_smoothing 0.10690242333445049 +689 51 model.embedding_dim 2.0 +689 51 model.scoring_fct_norm 1.0 +689 51 training.batch_size 0.0 +689 51 training.label_smoothing 0.0012746334629909141 +689 52 model.embedding_dim 1.0 +689 52 model.scoring_fct_norm 1.0 +689 52 training.batch_size 2.0 +689 52 training.label_smoothing 0.0020833378383158154 +689 53 model.embedding_dim 1.0 +689 53 model.scoring_fct_norm 2.0 +689 53 training.batch_size 1.0 +689 53 training.label_smoothing 0.0010996091494023712 +689 54 model.embedding_dim 2.0 +689 54 model.scoring_fct_norm 2.0 +689 54 training.batch_size 1.0 +689 54 training.label_smoothing 0.0025549840121429314 +689 55 model.embedding_dim 1.0 +689 55 model.scoring_fct_norm 1.0 +689 55 training.batch_size 0.0 +689 55 training.label_smoothing 0.001414580193990124 +689 56 model.embedding_dim 0.0 +689 56 model.scoring_fct_norm 1.0 +689 56 training.batch_size 2.0 +689 56 training.label_smoothing 0.0011572007765886184 +689 57 model.embedding_dim 2.0 +689 57 model.scoring_fct_norm 1.0 +689 57 training.batch_size 2.0 +689 57 training.label_smoothing 0.025553546450458528 +689 58 model.embedding_dim 0.0 +689 58 model.scoring_fct_norm 1.0 +689 58 training.batch_size 0.0 +689 58 training.label_smoothing 0.015722428521216306 +689 59 model.embedding_dim 1.0 +689 59 model.scoring_fct_norm 1.0 +689 59 training.batch_size 0.0 +689 59 training.label_smoothing 0.0018090206037559738 +689 60 model.embedding_dim 0.0 +689 60 model.scoring_fct_norm 2.0 +689 60 training.batch_size 0.0 +689 60 training.label_smoothing 0.0011411675321489592 +689 61 model.embedding_dim 1.0 +689 61 model.scoring_fct_norm 2.0 +689 61 training.batch_size 1.0 +689 61 training.label_smoothing 0.007192349095383624 +689 62 model.embedding_dim 2.0 +689 62 model.scoring_fct_norm 2.0 +689 62 training.batch_size 2.0 +689 62 training.label_smoothing 0.004834461604779568 +689 63 model.embedding_dim 2.0 +689 63 model.scoring_fct_norm 1.0 +689 63 training.batch_size 1.0 +689 63 training.label_smoothing 0.3418221023614068 +689 64 model.embedding_dim 1.0 +689 64 model.scoring_fct_norm 2.0 +689 64 training.batch_size 1.0 +689 64 training.label_smoothing 0.27940186829825037 +689 65 model.embedding_dim 2.0 +689 65 model.scoring_fct_norm 2.0 +689 65 training.batch_size 2.0 +689 65 training.label_smoothing 0.43590279719877545 +689 66 model.embedding_dim 0.0 +689 66 model.scoring_fct_norm 2.0 +689 66 training.batch_size 1.0 +689 66 training.label_smoothing 0.86599253581468 +689 67 model.embedding_dim 0.0 +689 67 model.scoring_fct_norm 2.0 +689 67 training.batch_size 1.0 +689 67 training.label_smoothing 0.0978667619125339 +689 68 model.embedding_dim 0.0 +689 68 model.scoring_fct_norm 1.0 +689 68 training.batch_size 2.0 +689 68 training.label_smoothing 0.029458268315841914 +689 69 model.embedding_dim 1.0 +689 69 model.scoring_fct_norm 1.0 +689 69 training.batch_size 2.0 +689 69 training.label_smoothing 0.03282952750002017 +689 70 model.embedding_dim 2.0 +689 70 model.scoring_fct_norm 2.0 +689 70 training.batch_size 0.0 +689 70 training.label_smoothing 0.0011580116632519464 +689 71 model.embedding_dim 1.0 +689 71 model.scoring_fct_norm 1.0 +689 71 training.batch_size 1.0 +689 71 training.label_smoothing 0.1894714525387774 +689 72 model.embedding_dim 2.0 +689 72 model.scoring_fct_norm 2.0 +689 72 training.batch_size 0.0 +689 72 training.label_smoothing 0.03490287991342923 +689 73 model.embedding_dim 1.0 +689 73 model.scoring_fct_norm 1.0 +689 73 training.batch_size 0.0 +689 73 training.label_smoothing 0.018334249153797125 +689 74 model.embedding_dim 2.0 +689 74 model.scoring_fct_norm 1.0 +689 74 training.batch_size 1.0 +689 74 training.label_smoothing 0.004405921615620057 +689 75 model.embedding_dim 1.0 +689 75 model.scoring_fct_norm 1.0 +689 75 training.batch_size 0.0 +689 75 training.label_smoothing 0.02757244464970205 +689 76 model.embedding_dim 0.0 +689 76 model.scoring_fct_norm 2.0 +689 76 training.batch_size 0.0 +689 76 training.label_smoothing 0.0019238565839179778 +689 77 model.embedding_dim 0.0 +689 77 model.scoring_fct_norm 1.0 +689 77 training.batch_size 0.0 +689 77 training.label_smoothing 0.0014418267136525748 +689 78 model.embedding_dim 2.0 +689 78 model.scoring_fct_norm 2.0 +689 78 training.batch_size 0.0 +689 78 training.label_smoothing 0.6492276530310835 +689 79 model.embedding_dim 0.0 +689 79 model.scoring_fct_norm 1.0 +689 79 training.batch_size 1.0 +689 79 training.label_smoothing 0.008123726874604106 +689 80 model.embedding_dim 2.0 +689 80 model.scoring_fct_norm 1.0 +689 80 training.batch_size 1.0 +689 80 training.label_smoothing 0.0013241203521734614 +689 81 model.embedding_dim 1.0 +689 81 model.scoring_fct_norm 2.0 +689 81 training.batch_size 0.0 +689 81 training.label_smoothing 0.016383690814448217 +689 82 model.embedding_dim 0.0 +689 82 model.scoring_fct_norm 2.0 +689 82 training.batch_size 1.0 +689 82 training.label_smoothing 0.06688103512149209 +689 83 model.embedding_dim 0.0 +689 83 model.scoring_fct_norm 1.0 +689 83 training.batch_size 2.0 +689 83 training.label_smoothing 0.0023595604121190447 +689 84 model.embedding_dim 2.0 +689 84 model.scoring_fct_norm 2.0 +689 84 training.batch_size 2.0 +689 84 training.label_smoothing 0.002537225773794872 +689 85 model.embedding_dim 0.0 +689 85 model.scoring_fct_norm 1.0 +689 85 training.batch_size 0.0 +689 85 training.label_smoothing 0.015461758194592789 +689 86 model.embedding_dim 1.0 +689 86 model.scoring_fct_norm 2.0 +689 86 training.batch_size 1.0 +689 86 training.label_smoothing 0.08460899739829761 +689 87 model.embedding_dim 2.0 +689 87 model.scoring_fct_norm 1.0 +689 87 training.batch_size 2.0 +689 87 training.label_smoothing 0.6381223472897687 +689 88 model.embedding_dim 2.0 +689 88 model.scoring_fct_norm 2.0 +689 88 training.batch_size 0.0 +689 88 training.label_smoothing 0.02331655346020737 +689 89 model.embedding_dim 2.0 +689 89 model.scoring_fct_norm 1.0 +689 89 training.batch_size 0.0 +689 89 training.label_smoothing 0.004164796780127191 +689 90 model.embedding_dim 2.0 +689 90 model.scoring_fct_norm 2.0 +689 90 training.batch_size 0.0 +689 90 training.label_smoothing 0.012519986190966451 +689 91 model.embedding_dim 2.0 +689 91 model.scoring_fct_norm 2.0 +689 91 training.batch_size 1.0 +689 91 training.label_smoothing 0.6873303094851477 +689 92 model.embedding_dim 2.0 +689 92 model.scoring_fct_norm 1.0 +689 92 training.batch_size 1.0 +689 92 training.label_smoothing 0.0844730934037796 +689 93 model.embedding_dim 2.0 +689 93 model.scoring_fct_norm 1.0 +689 93 training.batch_size 2.0 +689 93 training.label_smoothing 0.001969270624155687 +689 94 model.embedding_dim 0.0 +689 94 model.scoring_fct_norm 2.0 +689 94 training.batch_size 2.0 +689 94 training.label_smoothing 0.003008321494137856 +689 95 model.embedding_dim 2.0 +689 95 model.scoring_fct_norm 2.0 +689 95 training.batch_size 2.0 +689 95 training.label_smoothing 0.3430711489690806 +689 96 model.embedding_dim 1.0 +689 96 model.scoring_fct_norm 1.0 +689 96 training.batch_size 1.0 +689 96 training.label_smoothing 0.0898160788659943 +689 97 model.embedding_dim 0.0 +689 97 model.scoring_fct_norm 1.0 +689 97 training.batch_size 2.0 +689 97 training.label_smoothing 0.1881325083712004 +689 98 model.embedding_dim 0.0 +689 98 model.scoring_fct_norm 2.0 +689 98 training.batch_size 1.0 +689 98 training.label_smoothing 0.010002833470006457 +689 99 model.embedding_dim 0.0 +689 99 model.scoring_fct_norm 1.0 +689 99 training.batch_size 0.0 +689 99 training.label_smoothing 0.0039381352623435485 +689 100 model.embedding_dim 2.0 +689 100 model.scoring_fct_norm 2.0 +689 100 training.batch_size 1.0 +689 100 training.label_smoothing 0.0019657036973966873 +689 1 dataset """kinships""" +689 1 model """structuredembedding""" +689 1 loss """bceaftersigmoid""" +689 1 regularizer """no""" +689 1 optimizer """adadelta""" +689 1 training_loop """lcwa""" +689 1 evaluator """rankbased""" +689 2 dataset """kinships""" +689 2 model """structuredembedding""" +689 2 loss """bceaftersigmoid""" +689 2 regularizer """no""" +689 2 optimizer """adadelta""" +689 2 training_loop """lcwa""" +689 2 evaluator """rankbased""" +689 3 dataset """kinships""" +689 3 model """structuredembedding""" +689 3 loss """bceaftersigmoid""" +689 3 regularizer """no""" +689 3 optimizer """adadelta""" +689 3 training_loop """lcwa""" +689 3 evaluator """rankbased""" +689 4 dataset """kinships""" +689 4 model """structuredembedding""" +689 4 loss """bceaftersigmoid""" +689 4 regularizer """no""" +689 4 optimizer """adadelta""" +689 4 training_loop """lcwa""" +689 4 evaluator """rankbased""" +689 5 dataset """kinships""" +689 5 model """structuredembedding""" +689 5 loss """bceaftersigmoid""" +689 5 regularizer """no""" +689 5 optimizer """adadelta""" +689 5 training_loop """lcwa""" +689 5 evaluator """rankbased""" +689 6 dataset """kinships""" +689 6 model """structuredembedding""" +689 6 loss """bceaftersigmoid""" +689 6 regularizer """no""" +689 6 optimizer """adadelta""" +689 6 training_loop """lcwa""" +689 6 evaluator """rankbased""" +689 7 dataset """kinships""" +689 7 model """structuredembedding""" +689 7 loss """bceaftersigmoid""" +689 7 regularizer """no""" +689 7 optimizer """adadelta""" +689 7 training_loop """lcwa""" +689 7 evaluator """rankbased""" +689 8 dataset """kinships""" +689 8 model """structuredembedding""" +689 8 loss """bceaftersigmoid""" +689 8 regularizer """no""" +689 8 optimizer """adadelta""" +689 8 training_loop """lcwa""" +689 8 evaluator """rankbased""" +689 9 dataset """kinships""" +689 9 model """structuredembedding""" +689 9 loss """bceaftersigmoid""" +689 9 regularizer """no""" +689 9 optimizer """adadelta""" +689 9 training_loop """lcwa""" +689 9 evaluator """rankbased""" +689 10 dataset """kinships""" +689 10 model """structuredembedding""" +689 10 loss """bceaftersigmoid""" +689 10 regularizer """no""" +689 10 optimizer """adadelta""" +689 10 training_loop """lcwa""" +689 10 evaluator """rankbased""" +689 11 dataset """kinships""" +689 11 model """structuredembedding""" +689 11 loss """bceaftersigmoid""" +689 11 regularizer """no""" +689 11 optimizer """adadelta""" +689 11 training_loop """lcwa""" +689 11 evaluator """rankbased""" +689 12 dataset """kinships""" +689 12 model """structuredembedding""" +689 12 loss """bceaftersigmoid""" +689 12 regularizer """no""" +689 12 optimizer """adadelta""" +689 12 training_loop """lcwa""" +689 12 evaluator """rankbased""" +689 13 dataset """kinships""" +689 13 model """structuredembedding""" +689 13 loss """bceaftersigmoid""" +689 13 regularizer """no""" +689 13 optimizer """adadelta""" +689 13 training_loop """lcwa""" +689 13 evaluator """rankbased""" +689 14 dataset """kinships""" +689 14 model """structuredembedding""" +689 14 loss """bceaftersigmoid""" +689 14 regularizer """no""" +689 14 optimizer """adadelta""" +689 14 training_loop """lcwa""" +689 14 evaluator """rankbased""" +689 15 dataset """kinships""" +689 15 model """structuredembedding""" +689 15 loss """bceaftersigmoid""" +689 15 regularizer """no""" +689 15 optimizer """adadelta""" +689 15 training_loop """lcwa""" +689 15 evaluator """rankbased""" +689 16 dataset """kinships""" +689 16 model """structuredembedding""" +689 16 loss """bceaftersigmoid""" +689 16 regularizer """no""" +689 16 optimizer """adadelta""" +689 16 training_loop """lcwa""" +689 16 evaluator """rankbased""" +689 17 dataset """kinships""" +689 17 model """structuredembedding""" +689 17 loss """bceaftersigmoid""" +689 17 regularizer """no""" +689 17 optimizer """adadelta""" +689 17 training_loop """lcwa""" +689 17 evaluator """rankbased""" +689 18 dataset """kinships""" +689 18 model """structuredembedding""" +689 18 loss """bceaftersigmoid""" +689 18 regularizer """no""" +689 18 optimizer """adadelta""" +689 18 training_loop """lcwa""" +689 18 evaluator """rankbased""" +689 19 dataset """kinships""" +689 19 model """structuredembedding""" +689 19 loss """bceaftersigmoid""" +689 19 regularizer """no""" +689 19 optimizer """adadelta""" +689 19 training_loop """lcwa""" +689 19 evaluator """rankbased""" +689 20 dataset """kinships""" +689 20 model """structuredembedding""" +689 20 loss """bceaftersigmoid""" +689 20 regularizer """no""" +689 20 optimizer """adadelta""" +689 20 training_loop """lcwa""" +689 20 evaluator """rankbased""" +689 21 dataset """kinships""" +689 21 model """structuredembedding""" +689 21 loss """bceaftersigmoid""" +689 21 regularizer """no""" +689 21 optimizer """adadelta""" +689 21 training_loop """lcwa""" +689 21 evaluator """rankbased""" +689 22 dataset """kinships""" +689 22 model """structuredembedding""" +689 22 loss """bceaftersigmoid""" +689 22 regularizer """no""" +689 22 optimizer """adadelta""" +689 22 training_loop """lcwa""" +689 22 evaluator """rankbased""" +689 23 dataset """kinships""" +689 23 model """structuredembedding""" +689 23 loss """bceaftersigmoid""" +689 23 regularizer """no""" +689 23 optimizer """adadelta""" +689 23 training_loop """lcwa""" +689 23 evaluator """rankbased""" +689 24 dataset """kinships""" +689 24 model """structuredembedding""" +689 24 loss """bceaftersigmoid""" +689 24 regularizer """no""" +689 24 optimizer """adadelta""" +689 24 training_loop """lcwa""" +689 24 evaluator """rankbased""" +689 25 dataset """kinships""" +689 25 model """structuredembedding""" +689 25 loss """bceaftersigmoid""" +689 25 regularizer """no""" +689 25 optimizer """adadelta""" +689 25 training_loop """lcwa""" +689 25 evaluator """rankbased""" +689 26 dataset """kinships""" +689 26 model """structuredembedding""" +689 26 loss """bceaftersigmoid""" +689 26 regularizer """no""" +689 26 optimizer """adadelta""" +689 26 training_loop """lcwa""" +689 26 evaluator """rankbased""" +689 27 dataset """kinships""" +689 27 model """structuredembedding""" +689 27 loss """bceaftersigmoid""" +689 27 regularizer """no""" +689 27 optimizer """adadelta""" +689 27 training_loop """lcwa""" +689 27 evaluator """rankbased""" +689 28 dataset """kinships""" +689 28 model """structuredembedding""" +689 28 loss """bceaftersigmoid""" +689 28 regularizer """no""" +689 28 optimizer """adadelta""" +689 28 training_loop """lcwa""" +689 28 evaluator """rankbased""" +689 29 dataset """kinships""" +689 29 model """structuredembedding""" +689 29 loss """bceaftersigmoid""" +689 29 regularizer """no""" +689 29 optimizer """adadelta""" +689 29 training_loop """lcwa""" +689 29 evaluator """rankbased""" +689 30 dataset """kinships""" +689 30 model """structuredembedding""" +689 30 loss """bceaftersigmoid""" +689 30 regularizer """no""" +689 30 optimizer """adadelta""" +689 30 training_loop """lcwa""" +689 30 evaluator """rankbased""" +689 31 dataset """kinships""" +689 31 model """structuredembedding""" +689 31 loss """bceaftersigmoid""" +689 31 regularizer """no""" +689 31 optimizer """adadelta""" +689 31 training_loop """lcwa""" +689 31 evaluator """rankbased""" +689 32 dataset """kinships""" +689 32 model """structuredembedding""" +689 32 loss """bceaftersigmoid""" +689 32 regularizer """no""" +689 32 optimizer """adadelta""" +689 32 training_loop """lcwa""" +689 32 evaluator """rankbased""" +689 33 dataset """kinships""" +689 33 model """structuredembedding""" +689 33 loss """bceaftersigmoid""" +689 33 regularizer """no""" +689 33 optimizer """adadelta""" +689 33 training_loop """lcwa""" +689 33 evaluator """rankbased""" +689 34 dataset """kinships""" +689 34 model """structuredembedding""" +689 34 loss """bceaftersigmoid""" +689 34 regularizer """no""" +689 34 optimizer """adadelta""" +689 34 training_loop """lcwa""" +689 34 evaluator """rankbased""" +689 35 dataset """kinships""" +689 35 model """structuredembedding""" +689 35 loss """bceaftersigmoid""" +689 35 regularizer """no""" +689 35 optimizer """adadelta""" +689 35 training_loop """lcwa""" +689 35 evaluator """rankbased""" +689 36 dataset """kinships""" +689 36 model """structuredembedding""" +689 36 loss """bceaftersigmoid""" +689 36 regularizer """no""" +689 36 optimizer """adadelta""" +689 36 training_loop """lcwa""" +689 36 evaluator """rankbased""" +689 37 dataset """kinships""" +689 37 model """structuredembedding""" +689 37 loss """bceaftersigmoid""" +689 37 regularizer """no""" +689 37 optimizer """adadelta""" +689 37 training_loop """lcwa""" +689 37 evaluator """rankbased""" +689 38 dataset """kinships""" +689 38 model """structuredembedding""" +689 38 loss """bceaftersigmoid""" +689 38 regularizer """no""" +689 38 optimizer """adadelta""" +689 38 training_loop """lcwa""" +689 38 evaluator """rankbased""" +689 39 dataset """kinships""" +689 39 model """structuredembedding""" +689 39 loss """bceaftersigmoid""" +689 39 regularizer """no""" +689 39 optimizer """adadelta""" +689 39 training_loop """lcwa""" +689 39 evaluator """rankbased""" +689 40 dataset """kinships""" +689 40 model """structuredembedding""" +689 40 loss """bceaftersigmoid""" +689 40 regularizer """no""" +689 40 optimizer """adadelta""" +689 40 training_loop """lcwa""" +689 40 evaluator """rankbased""" +689 41 dataset """kinships""" +689 41 model """structuredembedding""" +689 41 loss """bceaftersigmoid""" +689 41 regularizer """no""" +689 41 optimizer """adadelta""" +689 41 training_loop """lcwa""" +689 41 evaluator """rankbased""" +689 42 dataset """kinships""" +689 42 model """structuredembedding""" +689 42 loss """bceaftersigmoid""" +689 42 regularizer """no""" +689 42 optimizer """adadelta""" +689 42 training_loop """lcwa""" +689 42 evaluator """rankbased""" +689 43 dataset """kinships""" +689 43 model """structuredembedding""" +689 43 loss """bceaftersigmoid""" +689 43 regularizer """no""" +689 43 optimizer """adadelta""" +689 43 training_loop """lcwa""" +689 43 evaluator """rankbased""" +689 44 dataset """kinships""" +689 44 model """structuredembedding""" +689 44 loss """bceaftersigmoid""" +689 44 regularizer """no""" +689 44 optimizer """adadelta""" +689 44 training_loop """lcwa""" +689 44 evaluator """rankbased""" +689 45 dataset """kinships""" +689 45 model """structuredembedding""" +689 45 loss """bceaftersigmoid""" +689 45 regularizer """no""" +689 45 optimizer """adadelta""" +689 45 training_loop """lcwa""" +689 45 evaluator """rankbased""" +689 46 dataset """kinships""" +689 46 model """structuredembedding""" +689 46 loss """bceaftersigmoid""" +689 46 regularizer """no""" +689 46 optimizer """adadelta""" +689 46 training_loop """lcwa""" +689 46 evaluator """rankbased""" +689 47 dataset """kinships""" +689 47 model """structuredembedding""" +689 47 loss """bceaftersigmoid""" +689 47 regularizer """no""" +689 47 optimizer """adadelta""" +689 47 training_loop """lcwa""" +689 47 evaluator """rankbased""" +689 48 dataset """kinships""" +689 48 model """structuredembedding""" +689 48 loss """bceaftersigmoid""" +689 48 regularizer """no""" +689 48 optimizer """adadelta""" +689 48 training_loop """lcwa""" +689 48 evaluator """rankbased""" +689 49 dataset """kinships""" +689 49 model """structuredembedding""" +689 49 loss """bceaftersigmoid""" +689 49 regularizer """no""" +689 49 optimizer """adadelta""" +689 49 training_loop """lcwa""" +689 49 evaluator """rankbased""" +689 50 dataset """kinships""" +689 50 model """structuredembedding""" +689 50 loss """bceaftersigmoid""" +689 50 regularizer """no""" +689 50 optimizer """adadelta""" +689 50 training_loop """lcwa""" +689 50 evaluator """rankbased""" +689 51 dataset """kinships""" +689 51 model """structuredembedding""" +689 51 loss """bceaftersigmoid""" +689 51 regularizer """no""" +689 51 optimizer """adadelta""" +689 51 training_loop """lcwa""" +689 51 evaluator """rankbased""" +689 52 dataset """kinships""" +689 52 model """structuredembedding""" +689 52 loss """bceaftersigmoid""" +689 52 regularizer """no""" +689 52 optimizer """adadelta""" +689 52 training_loop """lcwa""" +689 52 evaluator """rankbased""" +689 53 dataset """kinships""" +689 53 model """structuredembedding""" +689 53 loss """bceaftersigmoid""" +689 53 regularizer """no""" +689 53 optimizer """adadelta""" +689 53 training_loop """lcwa""" +689 53 evaluator """rankbased""" +689 54 dataset """kinships""" +689 54 model """structuredembedding""" +689 54 loss """bceaftersigmoid""" +689 54 regularizer """no""" +689 54 optimizer """adadelta""" +689 54 training_loop """lcwa""" +689 54 evaluator """rankbased""" +689 55 dataset """kinships""" +689 55 model """structuredembedding""" +689 55 loss """bceaftersigmoid""" +689 55 regularizer """no""" +689 55 optimizer """adadelta""" +689 55 training_loop """lcwa""" +689 55 evaluator """rankbased""" +689 56 dataset """kinships""" +689 56 model """structuredembedding""" +689 56 loss """bceaftersigmoid""" +689 56 regularizer """no""" +689 56 optimizer """adadelta""" +689 56 training_loop """lcwa""" +689 56 evaluator """rankbased""" +689 57 dataset """kinships""" +689 57 model """structuredembedding""" +689 57 loss """bceaftersigmoid""" +689 57 regularizer """no""" +689 57 optimizer """adadelta""" +689 57 training_loop """lcwa""" +689 57 evaluator """rankbased""" +689 58 dataset """kinships""" +689 58 model """structuredembedding""" +689 58 loss """bceaftersigmoid""" +689 58 regularizer """no""" +689 58 optimizer """adadelta""" +689 58 training_loop """lcwa""" +689 58 evaluator """rankbased""" +689 59 dataset """kinships""" +689 59 model """structuredembedding""" +689 59 loss """bceaftersigmoid""" +689 59 regularizer """no""" +689 59 optimizer """adadelta""" +689 59 training_loop """lcwa""" +689 59 evaluator """rankbased""" +689 60 dataset """kinships""" +689 60 model """structuredembedding""" +689 60 loss """bceaftersigmoid""" +689 60 regularizer """no""" +689 60 optimizer """adadelta""" +689 60 training_loop """lcwa""" +689 60 evaluator """rankbased""" +689 61 dataset """kinships""" +689 61 model """structuredembedding""" +689 61 loss """bceaftersigmoid""" +689 61 regularizer """no""" +689 61 optimizer """adadelta""" +689 61 training_loop """lcwa""" +689 61 evaluator """rankbased""" +689 62 dataset """kinships""" +689 62 model """structuredembedding""" +689 62 loss """bceaftersigmoid""" +689 62 regularizer """no""" +689 62 optimizer """adadelta""" +689 62 training_loop """lcwa""" +689 62 evaluator """rankbased""" +689 63 dataset """kinships""" +689 63 model """structuredembedding""" +689 63 loss """bceaftersigmoid""" +689 63 regularizer """no""" +689 63 optimizer """adadelta""" +689 63 training_loop """lcwa""" +689 63 evaluator """rankbased""" +689 64 dataset """kinships""" +689 64 model """structuredembedding""" +689 64 loss """bceaftersigmoid""" +689 64 regularizer """no""" +689 64 optimizer """adadelta""" +689 64 training_loop """lcwa""" +689 64 evaluator """rankbased""" +689 65 dataset """kinships""" +689 65 model """structuredembedding""" +689 65 loss """bceaftersigmoid""" +689 65 regularizer """no""" +689 65 optimizer """adadelta""" +689 65 training_loop """lcwa""" +689 65 evaluator """rankbased""" +689 66 dataset """kinships""" +689 66 model """structuredembedding""" +689 66 loss """bceaftersigmoid""" +689 66 regularizer """no""" +689 66 optimizer """adadelta""" +689 66 training_loop """lcwa""" +689 66 evaluator """rankbased""" +689 67 dataset """kinships""" +689 67 model """structuredembedding""" +689 67 loss """bceaftersigmoid""" +689 67 regularizer """no""" +689 67 optimizer """adadelta""" +689 67 training_loop """lcwa""" +689 67 evaluator """rankbased""" +689 68 dataset """kinships""" +689 68 model """structuredembedding""" +689 68 loss """bceaftersigmoid""" +689 68 regularizer """no""" +689 68 optimizer """adadelta""" +689 68 training_loop """lcwa""" +689 68 evaluator """rankbased""" +689 69 dataset """kinships""" +689 69 model """structuredembedding""" +689 69 loss """bceaftersigmoid""" +689 69 regularizer """no""" +689 69 optimizer """adadelta""" +689 69 training_loop """lcwa""" +689 69 evaluator """rankbased""" +689 70 dataset """kinships""" +689 70 model """structuredembedding""" +689 70 loss """bceaftersigmoid""" +689 70 regularizer """no""" +689 70 optimizer """adadelta""" +689 70 training_loop """lcwa""" +689 70 evaluator """rankbased""" +689 71 dataset """kinships""" +689 71 model """structuredembedding""" +689 71 loss """bceaftersigmoid""" +689 71 regularizer """no""" +689 71 optimizer """adadelta""" +689 71 training_loop """lcwa""" +689 71 evaluator """rankbased""" +689 72 dataset """kinships""" +689 72 model """structuredembedding""" +689 72 loss """bceaftersigmoid""" +689 72 regularizer """no""" +689 72 optimizer """adadelta""" +689 72 training_loop """lcwa""" +689 72 evaluator """rankbased""" +689 73 dataset """kinships""" +689 73 model """structuredembedding""" +689 73 loss """bceaftersigmoid""" +689 73 regularizer """no""" +689 73 optimizer """adadelta""" +689 73 training_loop """lcwa""" +689 73 evaluator """rankbased""" +689 74 dataset """kinships""" +689 74 model """structuredembedding""" +689 74 loss """bceaftersigmoid""" +689 74 regularizer """no""" +689 74 optimizer """adadelta""" +689 74 training_loop """lcwa""" +689 74 evaluator """rankbased""" +689 75 dataset """kinships""" +689 75 model """structuredembedding""" +689 75 loss """bceaftersigmoid""" +689 75 regularizer """no""" +689 75 optimizer """adadelta""" +689 75 training_loop """lcwa""" +689 75 evaluator """rankbased""" +689 76 dataset """kinships""" +689 76 model """structuredembedding""" +689 76 loss """bceaftersigmoid""" +689 76 regularizer """no""" +689 76 optimizer """adadelta""" +689 76 training_loop """lcwa""" +689 76 evaluator """rankbased""" +689 77 dataset """kinships""" +689 77 model """structuredembedding""" +689 77 loss """bceaftersigmoid""" +689 77 regularizer """no""" +689 77 optimizer """adadelta""" +689 77 training_loop """lcwa""" +689 77 evaluator """rankbased""" +689 78 dataset """kinships""" +689 78 model """structuredembedding""" +689 78 loss """bceaftersigmoid""" +689 78 regularizer """no""" +689 78 optimizer """adadelta""" +689 78 training_loop """lcwa""" +689 78 evaluator """rankbased""" +689 79 dataset """kinships""" +689 79 model """structuredembedding""" +689 79 loss """bceaftersigmoid""" +689 79 regularizer """no""" +689 79 optimizer """adadelta""" +689 79 training_loop """lcwa""" +689 79 evaluator """rankbased""" +689 80 dataset """kinships""" +689 80 model """structuredembedding""" +689 80 loss """bceaftersigmoid""" +689 80 regularizer """no""" +689 80 optimizer """adadelta""" +689 80 training_loop """lcwa""" +689 80 evaluator """rankbased""" +689 81 dataset """kinships""" +689 81 model """structuredembedding""" +689 81 loss """bceaftersigmoid""" +689 81 regularizer """no""" +689 81 optimizer """adadelta""" +689 81 training_loop """lcwa""" +689 81 evaluator """rankbased""" +689 82 dataset """kinships""" +689 82 model """structuredembedding""" +689 82 loss """bceaftersigmoid""" +689 82 regularizer """no""" +689 82 optimizer """adadelta""" +689 82 training_loop """lcwa""" +689 82 evaluator """rankbased""" +689 83 dataset """kinships""" +689 83 model """structuredembedding""" +689 83 loss """bceaftersigmoid""" +689 83 regularizer """no""" +689 83 optimizer """adadelta""" +689 83 training_loop """lcwa""" +689 83 evaluator """rankbased""" +689 84 dataset """kinships""" +689 84 model """structuredembedding""" +689 84 loss """bceaftersigmoid""" +689 84 regularizer """no""" +689 84 optimizer """adadelta""" +689 84 training_loop """lcwa""" +689 84 evaluator """rankbased""" +689 85 dataset """kinships""" +689 85 model """structuredembedding""" +689 85 loss """bceaftersigmoid""" +689 85 regularizer """no""" +689 85 optimizer """adadelta""" +689 85 training_loop """lcwa""" +689 85 evaluator """rankbased""" +689 86 dataset """kinships""" +689 86 model """structuredembedding""" +689 86 loss """bceaftersigmoid""" +689 86 regularizer """no""" +689 86 optimizer """adadelta""" +689 86 training_loop """lcwa""" +689 86 evaluator """rankbased""" +689 87 dataset """kinships""" +689 87 model """structuredembedding""" +689 87 loss """bceaftersigmoid""" +689 87 regularizer """no""" +689 87 optimizer """adadelta""" +689 87 training_loop """lcwa""" +689 87 evaluator """rankbased""" +689 88 dataset """kinships""" +689 88 model """structuredembedding""" +689 88 loss """bceaftersigmoid""" +689 88 regularizer """no""" +689 88 optimizer """adadelta""" +689 88 training_loop """lcwa""" +689 88 evaluator """rankbased""" +689 89 dataset """kinships""" +689 89 model """structuredembedding""" +689 89 loss """bceaftersigmoid""" +689 89 regularizer """no""" +689 89 optimizer """adadelta""" +689 89 training_loop """lcwa""" +689 89 evaluator """rankbased""" +689 90 dataset """kinships""" +689 90 model """structuredembedding""" +689 90 loss """bceaftersigmoid""" +689 90 regularizer """no""" +689 90 optimizer """adadelta""" +689 90 training_loop """lcwa""" +689 90 evaluator """rankbased""" +689 91 dataset """kinships""" +689 91 model """structuredembedding""" +689 91 loss """bceaftersigmoid""" +689 91 regularizer """no""" +689 91 optimizer """adadelta""" +689 91 training_loop """lcwa""" +689 91 evaluator """rankbased""" +689 92 dataset """kinships""" +689 92 model """structuredembedding""" +689 92 loss """bceaftersigmoid""" +689 92 regularizer """no""" +689 92 optimizer """adadelta""" +689 92 training_loop """lcwa""" +689 92 evaluator """rankbased""" +689 93 dataset """kinships""" +689 93 model """structuredembedding""" +689 93 loss """bceaftersigmoid""" +689 93 regularizer """no""" +689 93 optimizer """adadelta""" +689 93 training_loop """lcwa""" +689 93 evaluator """rankbased""" +689 94 dataset """kinships""" +689 94 model """structuredembedding""" +689 94 loss """bceaftersigmoid""" +689 94 regularizer """no""" +689 94 optimizer """adadelta""" +689 94 training_loop """lcwa""" +689 94 evaluator """rankbased""" +689 95 dataset """kinships""" +689 95 model """structuredembedding""" +689 95 loss """bceaftersigmoid""" +689 95 regularizer """no""" +689 95 optimizer """adadelta""" +689 95 training_loop """lcwa""" +689 95 evaluator """rankbased""" +689 96 dataset """kinships""" +689 96 model """structuredembedding""" +689 96 loss """bceaftersigmoid""" +689 96 regularizer """no""" +689 96 optimizer """adadelta""" +689 96 training_loop """lcwa""" +689 96 evaluator """rankbased""" +689 97 dataset """kinships""" +689 97 model """structuredembedding""" +689 97 loss """bceaftersigmoid""" +689 97 regularizer """no""" +689 97 optimizer """adadelta""" +689 97 training_loop """lcwa""" +689 97 evaluator """rankbased""" +689 98 dataset """kinships""" +689 98 model """structuredembedding""" +689 98 loss """bceaftersigmoid""" +689 98 regularizer """no""" +689 98 optimizer """adadelta""" +689 98 training_loop """lcwa""" +689 98 evaluator """rankbased""" +689 99 dataset """kinships""" +689 99 model """structuredembedding""" +689 99 loss """bceaftersigmoid""" +689 99 regularizer """no""" +689 99 optimizer """adadelta""" +689 99 training_loop """lcwa""" +689 99 evaluator """rankbased""" +689 100 dataset """kinships""" +689 100 model """structuredembedding""" +689 100 loss """bceaftersigmoid""" +689 100 regularizer """no""" +689 100 optimizer """adadelta""" +689 100 training_loop """lcwa""" +689 100 evaluator """rankbased""" +690 1 model.embedding_dim 1.0 +690 1 model.scoring_fct_norm 1.0 +690 1 training.batch_size 1.0 +690 1 training.label_smoothing 0.0347151851963088 +690 2 model.embedding_dim 0.0 +690 2 model.scoring_fct_norm 1.0 +690 2 training.batch_size 2.0 +690 2 training.label_smoothing 0.007530468843462516 +690 3 model.embedding_dim 1.0 +690 3 model.scoring_fct_norm 1.0 +690 3 training.batch_size 0.0 +690 3 training.label_smoothing 0.007505833775978299 +690 4 model.embedding_dim 2.0 +690 4 model.scoring_fct_norm 1.0 +690 4 training.batch_size 1.0 +690 4 training.label_smoothing 0.13061379042079108 +690 5 model.embedding_dim 1.0 +690 5 model.scoring_fct_norm 1.0 +690 5 training.batch_size 0.0 +690 5 training.label_smoothing 0.002888954124777526 +690 6 model.embedding_dim 1.0 +690 6 model.scoring_fct_norm 1.0 +690 6 training.batch_size 0.0 +690 6 training.label_smoothing 0.05933798403289312 +690 7 model.embedding_dim 1.0 +690 7 model.scoring_fct_norm 1.0 +690 7 training.batch_size 1.0 +690 7 training.label_smoothing 0.02579446609131757 +690 8 model.embedding_dim 2.0 +690 8 model.scoring_fct_norm 2.0 +690 8 training.batch_size 2.0 +690 8 training.label_smoothing 0.0084408214714488 +690 9 model.embedding_dim 0.0 +690 9 model.scoring_fct_norm 1.0 +690 9 training.batch_size 1.0 +690 9 training.label_smoothing 0.012071567880679255 +690 10 model.embedding_dim 1.0 +690 10 model.scoring_fct_norm 1.0 +690 10 training.batch_size 1.0 +690 10 training.label_smoothing 0.18359063380836382 +690 11 model.embedding_dim 0.0 +690 11 model.scoring_fct_norm 2.0 +690 11 training.batch_size 2.0 +690 11 training.label_smoothing 0.07821905157513934 +690 12 model.embedding_dim 1.0 +690 12 model.scoring_fct_norm 1.0 +690 12 training.batch_size 0.0 +690 12 training.label_smoothing 0.008041343089837881 +690 13 model.embedding_dim 2.0 +690 13 model.scoring_fct_norm 1.0 +690 13 training.batch_size 2.0 +690 13 training.label_smoothing 0.5730290992683782 +690 14 model.embedding_dim 2.0 +690 14 model.scoring_fct_norm 2.0 +690 14 training.batch_size 1.0 +690 14 training.label_smoothing 0.015812383339169254 +690 15 model.embedding_dim 2.0 +690 15 model.scoring_fct_norm 1.0 +690 15 training.batch_size 1.0 +690 15 training.label_smoothing 0.0028613339711618005 +690 16 model.embedding_dim 0.0 +690 16 model.scoring_fct_norm 1.0 +690 16 training.batch_size 0.0 +690 16 training.label_smoothing 0.051822946926521035 +690 17 model.embedding_dim 2.0 +690 17 model.scoring_fct_norm 1.0 +690 17 training.batch_size 0.0 +690 17 training.label_smoothing 0.12123755584534601 +690 18 model.embedding_dim 1.0 +690 18 model.scoring_fct_norm 2.0 +690 18 training.batch_size 0.0 +690 18 training.label_smoothing 0.005344288121361115 +690 19 model.embedding_dim 0.0 +690 19 model.scoring_fct_norm 2.0 +690 19 training.batch_size 2.0 +690 19 training.label_smoothing 0.016340314911775483 +690 20 model.embedding_dim 2.0 +690 20 model.scoring_fct_norm 1.0 +690 20 training.batch_size 2.0 +690 20 training.label_smoothing 0.8544780153642503 +690 21 model.embedding_dim 2.0 +690 21 model.scoring_fct_norm 2.0 +690 21 training.batch_size 2.0 +690 21 training.label_smoothing 0.006345217014517899 +690 22 model.embedding_dim 2.0 +690 22 model.scoring_fct_norm 1.0 +690 22 training.batch_size 0.0 +690 22 training.label_smoothing 0.04703858708453896 +690 23 model.embedding_dim 0.0 +690 23 model.scoring_fct_norm 1.0 +690 23 training.batch_size 2.0 +690 23 training.label_smoothing 0.019256221565216183 +690 24 model.embedding_dim 2.0 +690 24 model.scoring_fct_norm 1.0 +690 24 training.batch_size 2.0 +690 24 training.label_smoothing 0.011728931621044228 +690 25 model.embedding_dim 2.0 +690 25 model.scoring_fct_norm 1.0 +690 25 training.batch_size 0.0 +690 25 training.label_smoothing 0.03105587012646517 +690 26 model.embedding_dim 1.0 +690 26 model.scoring_fct_norm 1.0 +690 26 training.batch_size 2.0 +690 26 training.label_smoothing 0.0014986728572919021 +690 27 model.embedding_dim 0.0 +690 27 model.scoring_fct_norm 1.0 +690 27 training.batch_size 1.0 +690 27 training.label_smoothing 0.3957929412331204 +690 28 model.embedding_dim 0.0 +690 28 model.scoring_fct_norm 2.0 +690 28 training.batch_size 1.0 +690 28 training.label_smoothing 0.028053597286386282 +690 29 model.embedding_dim 1.0 +690 29 model.scoring_fct_norm 1.0 +690 29 training.batch_size 1.0 +690 29 training.label_smoothing 0.3082817376925708 +690 30 model.embedding_dim 0.0 +690 30 model.scoring_fct_norm 2.0 +690 30 training.batch_size 0.0 +690 30 training.label_smoothing 0.11457158515176166 +690 31 model.embedding_dim 0.0 +690 31 model.scoring_fct_norm 2.0 +690 31 training.batch_size 0.0 +690 31 training.label_smoothing 0.34086547779560783 +690 32 model.embedding_dim 1.0 +690 32 model.scoring_fct_norm 2.0 +690 32 training.batch_size 2.0 +690 32 training.label_smoothing 0.6895371694361287 +690 33 model.embedding_dim 0.0 +690 33 model.scoring_fct_norm 2.0 +690 33 training.batch_size 2.0 +690 33 training.label_smoothing 0.0230883654842674 +690 34 model.embedding_dim 0.0 +690 34 model.scoring_fct_norm 2.0 +690 34 training.batch_size 0.0 +690 34 training.label_smoothing 0.03962111439727035 +690 35 model.embedding_dim 2.0 +690 35 model.scoring_fct_norm 2.0 +690 35 training.batch_size 0.0 +690 35 training.label_smoothing 0.013038439713225448 +690 36 model.embedding_dim 1.0 +690 36 model.scoring_fct_norm 1.0 +690 36 training.batch_size 1.0 +690 36 training.label_smoothing 0.02215379812081622 +690 37 model.embedding_dim 2.0 +690 37 model.scoring_fct_norm 2.0 +690 37 training.batch_size 0.0 +690 37 training.label_smoothing 0.9170046131285964 +690 38 model.embedding_dim 2.0 +690 38 model.scoring_fct_norm 2.0 +690 38 training.batch_size 2.0 +690 38 training.label_smoothing 0.003368162556915711 +690 39 model.embedding_dim 1.0 +690 39 model.scoring_fct_norm 2.0 +690 39 training.batch_size 2.0 +690 39 training.label_smoothing 0.5208876064616489 +690 40 model.embedding_dim 2.0 +690 40 model.scoring_fct_norm 1.0 +690 40 training.batch_size 1.0 +690 40 training.label_smoothing 0.02299778775551322 +690 41 model.embedding_dim 1.0 +690 41 model.scoring_fct_norm 1.0 +690 41 training.batch_size 1.0 +690 41 training.label_smoothing 0.03018411380498794 +690 42 model.embedding_dim 0.0 +690 42 model.scoring_fct_norm 1.0 +690 42 training.batch_size 0.0 +690 42 training.label_smoothing 0.004569702314573695 +690 43 model.embedding_dim 2.0 +690 43 model.scoring_fct_norm 1.0 +690 43 training.batch_size 1.0 +690 43 training.label_smoothing 0.6010997699204402 +690 44 model.embedding_dim 2.0 +690 44 model.scoring_fct_norm 1.0 +690 44 training.batch_size 1.0 +690 44 training.label_smoothing 0.001342503281346926 +690 45 model.embedding_dim 0.0 +690 45 model.scoring_fct_norm 2.0 +690 45 training.batch_size 2.0 +690 45 training.label_smoothing 0.004006767435897044 +690 46 model.embedding_dim 1.0 +690 46 model.scoring_fct_norm 1.0 +690 46 training.batch_size 2.0 +690 46 training.label_smoothing 0.11218831415619498 +690 47 model.embedding_dim 2.0 +690 47 model.scoring_fct_norm 2.0 +690 47 training.batch_size 2.0 +690 47 training.label_smoothing 0.5169345586666328 +690 48 model.embedding_dim 0.0 +690 48 model.scoring_fct_norm 1.0 +690 48 training.batch_size 0.0 +690 48 training.label_smoothing 0.012391329860686064 +690 49 model.embedding_dim 0.0 +690 49 model.scoring_fct_norm 2.0 +690 49 training.batch_size 2.0 +690 49 training.label_smoothing 0.0010103493551094837 +690 50 model.embedding_dim 2.0 +690 50 model.scoring_fct_norm 2.0 +690 50 training.batch_size 0.0 +690 50 training.label_smoothing 0.07235328755529492 +690 51 model.embedding_dim 0.0 +690 51 model.scoring_fct_norm 1.0 +690 51 training.batch_size 0.0 +690 51 training.label_smoothing 0.0015378072202153416 +690 52 model.embedding_dim 0.0 +690 52 model.scoring_fct_norm 2.0 +690 52 training.batch_size 0.0 +690 52 training.label_smoothing 0.008733510497028711 +690 53 model.embedding_dim 0.0 +690 53 model.scoring_fct_norm 1.0 +690 53 training.batch_size 2.0 +690 53 training.label_smoothing 0.09193771953552836 +690 54 model.embedding_dim 0.0 +690 54 model.scoring_fct_norm 1.0 +690 54 training.batch_size 2.0 +690 54 training.label_smoothing 0.023861889958868534 +690 55 model.embedding_dim 0.0 +690 55 model.scoring_fct_norm 1.0 +690 55 training.batch_size 2.0 +690 55 training.label_smoothing 0.02167299373319159 +690 56 model.embedding_dim 1.0 +690 56 model.scoring_fct_norm 1.0 +690 56 training.batch_size 2.0 +690 56 training.label_smoothing 0.0013819330745218452 +690 57 model.embedding_dim 1.0 +690 57 model.scoring_fct_norm 1.0 +690 57 training.batch_size 1.0 +690 57 training.label_smoothing 0.947331355630003 +690 58 model.embedding_dim 0.0 +690 58 model.scoring_fct_norm 1.0 +690 58 training.batch_size 0.0 +690 58 training.label_smoothing 0.09810768159275636 +690 59 model.embedding_dim 2.0 +690 59 model.scoring_fct_norm 1.0 +690 59 training.batch_size 2.0 +690 59 training.label_smoothing 0.002568063445013147 +690 60 model.embedding_dim 0.0 +690 60 model.scoring_fct_norm 2.0 +690 60 training.batch_size 0.0 +690 60 training.label_smoothing 0.002793381750641283 +690 61 model.embedding_dim 0.0 +690 61 model.scoring_fct_norm 2.0 +690 61 training.batch_size 0.0 +690 61 training.label_smoothing 0.041036466864964975 +690 62 model.embedding_dim 0.0 +690 62 model.scoring_fct_norm 2.0 +690 62 training.batch_size 2.0 +690 62 training.label_smoothing 0.0010657092540706885 +690 63 model.embedding_dim 2.0 +690 63 model.scoring_fct_norm 1.0 +690 63 training.batch_size 0.0 +690 63 training.label_smoothing 0.04758834704703965 +690 64 model.embedding_dim 2.0 +690 64 model.scoring_fct_norm 2.0 +690 64 training.batch_size 2.0 +690 64 training.label_smoothing 0.5936762980399842 +690 65 model.embedding_dim 1.0 +690 65 model.scoring_fct_norm 1.0 +690 65 training.batch_size 2.0 +690 65 training.label_smoothing 0.002864952868493033 +690 66 model.embedding_dim 2.0 +690 66 model.scoring_fct_norm 1.0 +690 66 training.batch_size 0.0 +690 66 training.label_smoothing 0.743087276165948 +690 67 model.embedding_dim 2.0 +690 67 model.scoring_fct_norm 1.0 +690 67 training.batch_size 1.0 +690 67 training.label_smoothing 0.7849680468743075 +690 68 model.embedding_dim 1.0 +690 68 model.scoring_fct_norm 2.0 +690 68 training.batch_size 1.0 +690 68 training.label_smoothing 0.04832115356503955 +690 69 model.embedding_dim 2.0 +690 69 model.scoring_fct_norm 2.0 +690 69 training.batch_size 0.0 +690 69 training.label_smoothing 0.02738703112707926 +690 70 model.embedding_dim 0.0 +690 70 model.scoring_fct_norm 2.0 +690 70 training.batch_size 2.0 +690 70 training.label_smoothing 0.00445410778344817 +690 71 model.embedding_dim 2.0 +690 71 model.scoring_fct_norm 1.0 +690 71 training.batch_size 0.0 +690 71 training.label_smoothing 0.22413878365273798 +690 72 model.embedding_dim 2.0 +690 72 model.scoring_fct_norm 2.0 +690 72 training.batch_size 1.0 +690 72 training.label_smoothing 0.002718293676572863 +690 73 model.embedding_dim 2.0 +690 73 model.scoring_fct_norm 2.0 +690 73 training.batch_size 0.0 +690 73 training.label_smoothing 0.0384851854288241 +690 74 model.embedding_dim 1.0 +690 74 model.scoring_fct_norm 1.0 +690 74 training.batch_size 2.0 +690 74 training.label_smoothing 0.4241527324905147 +690 75 model.embedding_dim 2.0 +690 75 model.scoring_fct_norm 1.0 +690 75 training.batch_size 0.0 +690 75 training.label_smoothing 0.19979726776172577 +690 76 model.embedding_dim 1.0 +690 76 model.scoring_fct_norm 1.0 +690 76 training.batch_size 2.0 +690 76 training.label_smoothing 0.0027951044056770797 +690 77 model.embedding_dim 2.0 +690 77 model.scoring_fct_norm 1.0 +690 77 training.batch_size 0.0 +690 77 training.label_smoothing 0.7745651162190409 +690 78 model.embedding_dim 2.0 +690 78 model.scoring_fct_norm 2.0 +690 78 training.batch_size 2.0 +690 78 training.label_smoothing 0.0015241300678205242 +690 79 model.embedding_dim 2.0 +690 79 model.scoring_fct_norm 2.0 +690 79 training.batch_size 2.0 +690 79 training.label_smoothing 0.21325006065513905 +690 80 model.embedding_dim 1.0 +690 80 model.scoring_fct_norm 2.0 +690 80 training.batch_size 1.0 +690 80 training.label_smoothing 0.10996434557021477 +690 81 model.embedding_dim 0.0 +690 81 model.scoring_fct_norm 2.0 +690 81 training.batch_size 0.0 +690 81 training.label_smoothing 0.02050564691259981 +690 82 model.embedding_dim 0.0 +690 82 model.scoring_fct_norm 2.0 +690 82 training.batch_size 1.0 +690 82 training.label_smoothing 0.061097921895473764 +690 83 model.embedding_dim 2.0 +690 83 model.scoring_fct_norm 2.0 +690 83 training.batch_size 0.0 +690 83 training.label_smoothing 0.010307658062665606 +690 84 model.embedding_dim 0.0 +690 84 model.scoring_fct_norm 1.0 +690 84 training.batch_size 2.0 +690 84 training.label_smoothing 0.13950163835947632 +690 85 model.embedding_dim 2.0 +690 85 model.scoring_fct_norm 1.0 +690 85 training.batch_size 2.0 +690 85 training.label_smoothing 0.00198705141220875 +690 86 model.embedding_dim 1.0 +690 86 model.scoring_fct_norm 1.0 +690 86 training.batch_size 2.0 +690 86 training.label_smoothing 0.045296047730845085 +690 87 model.embedding_dim 2.0 +690 87 model.scoring_fct_norm 1.0 +690 87 training.batch_size 2.0 +690 87 training.label_smoothing 0.011621923403981346 +690 88 model.embedding_dim 1.0 +690 88 model.scoring_fct_norm 1.0 +690 88 training.batch_size 2.0 +690 88 training.label_smoothing 0.14834155737186322 +690 89 model.embedding_dim 1.0 +690 89 model.scoring_fct_norm 2.0 +690 89 training.batch_size 1.0 +690 89 training.label_smoothing 0.001910720583680405 +690 90 model.embedding_dim 2.0 +690 90 model.scoring_fct_norm 1.0 +690 90 training.batch_size 1.0 +690 90 training.label_smoothing 0.0030089195612340624 +690 91 model.embedding_dim 0.0 +690 91 model.scoring_fct_norm 1.0 +690 91 training.batch_size 0.0 +690 91 training.label_smoothing 0.0215164631591625 +690 92 model.embedding_dim 0.0 +690 92 model.scoring_fct_norm 2.0 +690 92 training.batch_size 1.0 +690 92 training.label_smoothing 0.014815961595864126 +690 93 model.embedding_dim 1.0 +690 93 model.scoring_fct_norm 1.0 +690 93 training.batch_size 2.0 +690 93 training.label_smoothing 0.01100278896250802 +690 94 model.embedding_dim 2.0 +690 94 model.scoring_fct_norm 1.0 +690 94 training.batch_size 1.0 +690 94 training.label_smoothing 0.006579458110329571 +690 95 model.embedding_dim 2.0 +690 95 model.scoring_fct_norm 2.0 +690 95 training.batch_size 2.0 +690 95 training.label_smoothing 0.00835274289567174 +690 96 model.embedding_dim 1.0 +690 96 model.scoring_fct_norm 1.0 +690 96 training.batch_size 1.0 +690 96 training.label_smoothing 0.04914074053636597 +690 97 model.embedding_dim 0.0 +690 97 model.scoring_fct_norm 2.0 +690 97 training.batch_size 0.0 +690 97 training.label_smoothing 0.10251328495370962 +690 98 model.embedding_dim 0.0 +690 98 model.scoring_fct_norm 1.0 +690 98 training.batch_size 0.0 +690 98 training.label_smoothing 0.31155902963678467 +690 99 model.embedding_dim 2.0 +690 99 model.scoring_fct_norm 1.0 +690 99 training.batch_size 1.0 +690 99 training.label_smoothing 0.35645702211896635 +690 100 model.embedding_dim 0.0 +690 100 model.scoring_fct_norm 1.0 +690 100 training.batch_size 1.0 +690 100 training.label_smoothing 0.0038704389349958255 +690 1 dataset """kinships""" +690 1 model """structuredembedding""" +690 1 loss """softplus""" +690 1 regularizer """no""" +690 1 optimizer """adadelta""" +690 1 training_loop """lcwa""" +690 1 evaluator """rankbased""" +690 2 dataset """kinships""" +690 2 model """structuredembedding""" +690 2 loss """softplus""" +690 2 regularizer """no""" +690 2 optimizer """adadelta""" +690 2 training_loop """lcwa""" +690 2 evaluator """rankbased""" +690 3 dataset """kinships""" +690 3 model """structuredembedding""" +690 3 loss """softplus""" +690 3 regularizer """no""" +690 3 optimizer """adadelta""" +690 3 training_loop """lcwa""" +690 3 evaluator """rankbased""" +690 4 dataset """kinships""" +690 4 model """structuredembedding""" +690 4 loss """softplus""" +690 4 regularizer """no""" +690 4 optimizer """adadelta""" +690 4 training_loop """lcwa""" +690 4 evaluator """rankbased""" +690 5 dataset """kinships""" +690 5 model """structuredembedding""" +690 5 loss """softplus""" +690 5 regularizer """no""" +690 5 optimizer """adadelta""" +690 5 training_loop """lcwa""" +690 5 evaluator """rankbased""" +690 6 dataset """kinships""" +690 6 model """structuredembedding""" +690 6 loss """softplus""" +690 6 regularizer """no""" +690 6 optimizer """adadelta""" +690 6 training_loop """lcwa""" +690 6 evaluator """rankbased""" +690 7 dataset """kinships""" +690 7 model """structuredembedding""" +690 7 loss """softplus""" +690 7 regularizer """no""" +690 7 optimizer """adadelta""" +690 7 training_loop """lcwa""" +690 7 evaluator """rankbased""" +690 8 dataset """kinships""" +690 8 model """structuredembedding""" +690 8 loss """softplus""" +690 8 regularizer """no""" +690 8 optimizer """adadelta""" +690 8 training_loop """lcwa""" +690 8 evaluator """rankbased""" +690 9 dataset """kinships""" +690 9 model """structuredembedding""" +690 9 loss """softplus""" +690 9 regularizer """no""" +690 9 optimizer """adadelta""" +690 9 training_loop """lcwa""" +690 9 evaluator """rankbased""" +690 10 dataset """kinships""" +690 10 model """structuredembedding""" +690 10 loss """softplus""" +690 10 regularizer """no""" +690 10 optimizer """adadelta""" +690 10 training_loop """lcwa""" +690 10 evaluator """rankbased""" +690 11 dataset """kinships""" +690 11 model """structuredembedding""" +690 11 loss """softplus""" +690 11 regularizer """no""" +690 11 optimizer """adadelta""" +690 11 training_loop """lcwa""" +690 11 evaluator """rankbased""" +690 12 dataset """kinships""" +690 12 model """structuredembedding""" +690 12 loss """softplus""" +690 12 regularizer """no""" +690 12 optimizer """adadelta""" +690 12 training_loop """lcwa""" +690 12 evaluator """rankbased""" +690 13 dataset """kinships""" +690 13 model """structuredembedding""" +690 13 loss """softplus""" +690 13 regularizer """no""" +690 13 optimizer """adadelta""" +690 13 training_loop """lcwa""" +690 13 evaluator """rankbased""" +690 14 dataset """kinships""" +690 14 model """structuredembedding""" +690 14 loss """softplus""" +690 14 regularizer """no""" +690 14 optimizer """adadelta""" +690 14 training_loop """lcwa""" +690 14 evaluator """rankbased""" +690 15 dataset """kinships""" +690 15 model """structuredembedding""" +690 15 loss """softplus""" +690 15 regularizer """no""" +690 15 optimizer """adadelta""" +690 15 training_loop """lcwa""" +690 15 evaluator """rankbased""" +690 16 dataset """kinships""" +690 16 model """structuredembedding""" +690 16 loss """softplus""" +690 16 regularizer """no""" +690 16 optimizer """adadelta""" +690 16 training_loop """lcwa""" +690 16 evaluator """rankbased""" +690 17 dataset """kinships""" +690 17 model """structuredembedding""" +690 17 loss """softplus""" +690 17 regularizer """no""" +690 17 optimizer """adadelta""" +690 17 training_loop """lcwa""" +690 17 evaluator """rankbased""" +690 18 dataset """kinships""" +690 18 model """structuredembedding""" +690 18 loss """softplus""" +690 18 regularizer """no""" +690 18 optimizer """adadelta""" +690 18 training_loop """lcwa""" +690 18 evaluator """rankbased""" +690 19 dataset """kinships""" +690 19 model """structuredembedding""" +690 19 loss """softplus""" +690 19 regularizer """no""" +690 19 optimizer """adadelta""" +690 19 training_loop """lcwa""" +690 19 evaluator """rankbased""" +690 20 dataset """kinships""" +690 20 model """structuredembedding""" +690 20 loss """softplus""" +690 20 regularizer """no""" +690 20 optimizer """adadelta""" +690 20 training_loop """lcwa""" +690 20 evaluator """rankbased""" +690 21 dataset """kinships""" +690 21 model """structuredembedding""" +690 21 loss """softplus""" +690 21 regularizer """no""" +690 21 optimizer """adadelta""" +690 21 training_loop """lcwa""" +690 21 evaluator """rankbased""" +690 22 dataset """kinships""" +690 22 model """structuredembedding""" +690 22 loss """softplus""" +690 22 regularizer """no""" +690 22 optimizer """adadelta""" +690 22 training_loop """lcwa""" +690 22 evaluator """rankbased""" +690 23 dataset """kinships""" +690 23 model """structuredembedding""" +690 23 loss """softplus""" +690 23 regularizer """no""" +690 23 optimizer """adadelta""" +690 23 training_loop """lcwa""" +690 23 evaluator """rankbased""" +690 24 dataset """kinships""" +690 24 model """structuredembedding""" +690 24 loss """softplus""" +690 24 regularizer """no""" +690 24 optimizer """adadelta""" +690 24 training_loop """lcwa""" +690 24 evaluator """rankbased""" +690 25 dataset """kinships""" +690 25 model """structuredembedding""" +690 25 loss """softplus""" +690 25 regularizer """no""" +690 25 optimizer """adadelta""" +690 25 training_loop """lcwa""" +690 25 evaluator """rankbased""" +690 26 dataset """kinships""" +690 26 model """structuredembedding""" +690 26 loss """softplus""" +690 26 regularizer """no""" +690 26 optimizer """adadelta""" +690 26 training_loop """lcwa""" +690 26 evaluator """rankbased""" +690 27 dataset """kinships""" +690 27 model """structuredembedding""" +690 27 loss """softplus""" +690 27 regularizer """no""" +690 27 optimizer """adadelta""" +690 27 training_loop """lcwa""" +690 27 evaluator """rankbased""" +690 28 dataset """kinships""" +690 28 model """structuredembedding""" +690 28 loss """softplus""" +690 28 regularizer """no""" +690 28 optimizer """adadelta""" +690 28 training_loop """lcwa""" +690 28 evaluator """rankbased""" +690 29 dataset """kinships""" +690 29 model """structuredembedding""" +690 29 loss """softplus""" +690 29 regularizer """no""" +690 29 optimizer """adadelta""" +690 29 training_loop """lcwa""" +690 29 evaluator """rankbased""" +690 30 dataset """kinships""" +690 30 model """structuredembedding""" +690 30 loss """softplus""" +690 30 regularizer """no""" +690 30 optimizer """adadelta""" +690 30 training_loop """lcwa""" +690 30 evaluator """rankbased""" +690 31 dataset """kinships""" +690 31 model """structuredembedding""" +690 31 loss """softplus""" +690 31 regularizer """no""" +690 31 optimizer """adadelta""" +690 31 training_loop """lcwa""" +690 31 evaluator """rankbased""" +690 32 dataset """kinships""" +690 32 model """structuredembedding""" +690 32 loss """softplus""" +690 32 regularizer """no""" +690 32 optimizer """adadelta""" +690 32 training_loop """lcwa""" +690 32 evaluator """rankbased""" +690 33 dataset """kinships""" +690 33 model """structuredembedding""" +690 33 loss """softplus""" +690 33 regularizer """no""" +690 33 optimizer """adadelta""" +690 33 training_loop """lcwa""" +690 33 evaluator """rankbased""" +690 34 dataset """kinships""" +690 34 model """structuredembedding""" +690 34 loss """softplus""" +690 34 regularizer """no""" +690 34 optimizer """adadelta""" +690 34 training_loop """lcwa""" +690 34 evaluator """rankbased""" +690 35 dataset """kinships""" +690 35 model """structuredembedding""" +690 35 loss """softplus""" +690 35 regularizer """no""" +690 35 optimizer """adadelta""" +690 35 training_loop """lcwa""" +690 35 evaluator """rankbased""" +690 36 dataset """kinships""" +690 36 model """structuredembedding""" +690 36 loss """softplus""" +690 36 regularizer """no""" +690 36 optimizer """adadelta""" +690 36 training_loop """lcwa""" +690 36 evaluator """rankbased""" +690 37 dataset """kinships""" +690 37 model """structuredembedding""" +690 37 loss """softplus""" +690 37 regularizer """no""" +690 37 optimizer """adadelta""" +690 37 training_loop """lcwa""" +690 37 evaluator """rankbased""" +690 38 dataset """kinships""" +690 38 model """structuredembedding""" +690 38 loss """softplus""" +690 38 regularizer """no""" +690 38 optimizer """adadelta""" +690 38 training_loop """lcwa""" +690 38 evaluator """rankbased""" +690 39 dataset """kinships""" +690 39 model """structuredembedding""" +690 39 loss """softplus""" +690 39 regularizer """no""" +690 39 optimizer """adadelta""" +690 39 training_loop """lcwa""" +690 39 evaluator """rankbased""" +690 40 dataset """kinships""" +690 40 model """structuredembedding""" +690 40 loss """softplus""" +690 40 regularizer """no""" +690 40 optimizer """adadelta""" +690 40 training_loop """lcwa""" +690 40 evaluator """rankbased""" +690 41 dataset """kinships""" +690 41 model """structuredembedding""" +690 41 loss """softplus""" +690 41 regularizer """no""" +690 41 optimizer """adadelta""" +690 41 training_loop """lcwa""" +690 41 evaluator """rankbased""" +690 42 dataset """kinships""" +690 42 model """structuredembedding""" +690 42 loss """softplus""" +690 42 regularizer """no""" +690 42 optimizer """adadelta""" +690 42 training_loop """lcwa""" +690 42 evaluator """rankbased""" +690 43 dataset """kinships""" +690 43 model """structuredembedding""" +690 43 loss """softplus""" +690 43 regularizer """no""" +690 43 optimizer """adadelta""" +690 43 training_loop """lcwa""" +690 43 evaluator """rankbased""" +690 44 dataset """kinships""" +690 44 model """structuredembedding""" +690 44 loss """softplus""" +690 44 regularizer """no""" +690 44 optimizer """adadelta""" +690 44 training_loop """lcwa""" +690 44 evaluator """rankbased""" +690 45 dataset """kinships""" +690 45 model """structuredembedding""" +690 45 loss """softplus""" +690 45 regularizer """no""" +690 45 optimizer """adadelta""" +690 45 training_loop """lcwa""" +690 45 evaluator """rankbased""" +690 46 dataset """kinships""" +690 46 model """structuredembedding""" +690 46 loss """softplus""" +690 46 regularizer """no""" +690 46 optimizer """adadelta""" +690 46 training_loop """lcwa""" +690 46 evaluator """rankbased""" +690 47 dataset """kinships""" +690 47 model """structuredembedding""" +690 47 loss """softplus""" +690 47 regularizer """no""" +690 47 optimizer """adadelta""" +690 47 training_loop """lcwa""" +690 47 evaluator """rankbased""" +690 48 dataset """kinships""" +690 48 model """structuredembedding""" +690 48 loss """softplus""" +690 48 regularizer """no""" +690 48 optimizer """adadelta""" +690 48 training_loop """lcwa""" +690 48 evaluator """rankbased""" +690 49 dataset """kinships""" +690 49 model """structuredembedding""" +690 49 loss """softplus""" +690 49 regularizer """no""" +690 49 optimizer """adadelta""" +690 49 training_loop """lcwa""" +690 49 evaluator """rankbased""" +690 50 dataset """kinships""" +690 50 model """structuredembedding""" +690 50 loss """softplus""" +690 50 regularizer """no""" +690 50 optimizer """adadelta""" +690 50 training_loop """lcwa""" +690 50 evaluator """rankbased""" +690 51 dataset """kinships""" +690 51 model """structuredembedding""" +690 51 loss """softplus""" +690 51 regularizer """no""" +690 51 optimizer """adadelta""" +690 51 training_loop """lcwa""" +690 51 evaluator """rankbased""" +690 52 dataset """kinships""" +690 52 model """structuredembedding""" +690 52 loss """softplus""" +690 52 regularizer """no""" +690 52 optimizer """adadelta""" +690 52 training_loop """lcwa""" +690 52 evaluator """rankbased""" +690 53 dataset """kinships""" +690 53 model """structuredembedding""" +690 53 loss """softplus""" +690 53 regularizer """no""" +690 53 optimizer """adadelta""" +690 53 training_loop """lcwa""" +690 53 evaluator """rankbased""" +690 54 dataset """kinships""" +690 54 model """structuredembedding""" +690 54 loss """softplus""" +690 54 regularizer """no""" +690 54 optimizer """adadelta""" +690 54 training_loop """lcwa""" +690 54 evaluator """rankbased""" +690 55 dataset """kinships""" +690 55 model """structuredembedding""" +690 55 loss """softplus""" +690 55 regularizer """no""" +690 55 optimizer """adadelta""" +690 55 training_loop """lcwa""" +690 55 evaluator """rankbased""" +690 56 dataset """kinships""" +690 56 model """structuredembedding""" +690 56 loss """softplus""" +690 56 regularizer """no""" +690 56 optimizer """adadelta""" +690 56 training_loop """lcwa""" +690 56 evaluator """rankbased""" +690 57 dataset """kinships""" +690 57 model """structuredembedding""" +690 57 loss """softplus""" +690 57 regularizer """no""" +690 57 optimizer """adadelta""" +690 57 training_loop """lcwa""" +690 57 evaluator """rankbased""" +690 58 dataset """kinships""" +690 58 model """structuredembedding""" +690 58 loss """softplus""" +690 58 regularizer """no""" +690 58 optimizer """adadelta""" +690 58 training_loop """lcwa""" +690 58 evaluator """rankbased""" +690 59 dataset """kinships""" +690 59 model """structuredembedding""" +690 59 loss """softplus""" +690 59 regularizer """no""" +690 59 optimizer """adadelta""" +690 59 training_loop """lcwa""" +690 59 evaluator """rankbased""" +690 60 dataset """kinships""" +690 60 model """structuredembedding""" +690 60 loss """softplus""" +690 60 regularizer """no""" +690 60 optimizer """adadelta""" +690 60 training_loop """lcwa""" +690 60 evaluator """rankbased""" +690 61 dataset """kinships""" +690 61 model """structuredembedding""" +690 61 loss """softplus""" +690 61 regularizer """no""" +690 61 optimizer """adadelta""" +690 61 training_loop """lcwa""" +690 61 evaluator """rankbased""" +690 62 dataset """kinships""" +690 62 model """structuredembedding""" +690 62 loss """softplus""" +690 62 regularizer """no""" +690 62 optimizer """adadelta""" +690 62 training_loop """lcwa""" +690 62 evaluator """rankbased""" +690 63 dataset """kinships""" +690 63 model """structuredembedding""" +690 63 loss """softplus""" +690 63 regularizer """no""" +690 63 optimizer """adadelta""" +690 63 training_loop """lcwa""" +690 63 evaluator """rankbased""" +690 64 dataset """kinships""" +690 64 model """structuredembedding""" +690 64 loss """softplus""" +690 64 regularizer """no""" +690 64 optimizer """adadelta""" +690 64 training_loop """lcwa""" +690 64 evaluator """rankbased""" +690 65 dataset """kinships""" +690 65 model """structuredembedding""" +690 65 loss """softplus""" +690 65 regularizer """no""" +690 65 optimizer """adadelta""" +690 65 training_loop """lcwa""" +690 65 evaluator """rankbased""" +690 66 dataset """kinships""" +690 66 model """structuredembedding""" +690 66 loss """softplus""" +690 66 regularizer """no""" +690 66 optimizer """adadelta""" +690 66 training_loop """lcwa""" +690 66 evaluator """rankbased""" +690 67 dataset """kinships""" +690 67 model """structuredembedding""" +690 67 loss """softplus""" +690 67 regularizer """no""" +690 67 optimizer """adadelta""" +690 67 training_loop """lcwa""" +690 67 evaluator """rankbased""" +690 68 dataset """kinships""" +690 68 model """structuredembedding""" +690 68 loss """softplus""" +690 68 regularizer """no""" +690 68 optimizer """adadelta""" +690 68 training_loop """lcwa""" +690 68 evaluator """rankbased""" +690 69 dataset """kinships""" +690 69 model """structuredembedding""" +690 69 loss """softplus""" +690 69 regularizer """no""" +690 69 optimizer """adadelta""" +690 69 training_loop """lcwa""" +690 69 evaluator """rankbased""" +690 70 dataset """kinships""" +690 70 model """structuredembedding""" +690 70 loss """softplus""" +690 70 regularizer """no""" +690 70 optimizer """adadelta""" +690 70 training_loop """lcwa""" +690 70 evaluator """rankbased""" +690 71 dataset """kinships""" +690 71 model """structuredembedding""" +690 71 loss """softplus""" +690 71 regularizer """no""" +690 71 optimizer """adadelta""" +690 71 training_loop """lcwa""" +690 71 evaluator """rankbased""" +690 72 dataset """kinships""" +690 72 model """structuredembedding""" +690 72 loss """softplus""" +690 72 regularizer """no""" +690 72 optimizer """adadelta""" +690 72 training_loop """lcwa""" +690 72 evaluator """rankbased""" +690 73 dataset """kinships""" +690 73 model """structuredembedding""" +690 73 loss """softplus""" +690 73 regularizer """no""" +690 73 optimizer """adadelta""" +690 73 training_loop """lcwa""" +690 73 evaluator """rankbased""" +690 74 dataset """kinships""" +690 74 model """structuredembedding""" +690 74 loss """softplus""" +690 74 regularizer """no""" +690 74 optimizer """adadelta""" +690 74 training_loop """lcwa""" +690 74 evaluator """rankbased""" +690 75 dataset """kinships""" +690 75 model """structuredembedding""" +690 75 loss """softplus""" +690 75 regularizer """no""" +690 75 optimizer """adadelta""" +690 75 training_loop """lcwa""" +690 75 evaluator """rankbased""" +690 76 dataset """kinships""" +690 76 model """structuredembedding""" +690 76 loss """softplus""" +690 76 regularizer """no""" +690 76 optimizer """adadelta""" +690 76 training_loop """lcwa""" +690 76 evaluator """rankbased""" +690 77 dataset """kinships""" +690 77 model """structuredembedding""" +690 77 loss """softplus""" +690 77 regularizer """no""" +690 77 optimizer """adadelta""" +690 77 training_loop """lcwa""" +690 77 evaluator """rankbased""" +690 78 dataset """kinships""" +690 78 model """structuredembedding""" +690 78 loss """softplus""" +690 78 regularizer """no""" +690 78 optimizer """adadelta""" +690 78 training_loop """lcwa""" +690 78 evaluator """rankbased""" +690 79 dataset """kinships""" +690 79 model """structuredembedding""" +690 79 loss """softplus""" +690 79 regularizer """no""" +690 79 optimizer """adadelta""" +690 79 training_loop """lcwa""" +690 79 evaluator """rankbased""" +690 80 dataset """kinships""" +690 80 model """structuredembedding""" +690 80 loss """softplus""" +690 80 regularizer """no""" +690 80 optimizer """adadelta""" +690 80 training_loop """lcwa""" +690 80 evaluator """rankbased""" +690 81 dataset """kinships""" +690 81 model """structuredembedding""" +690 81 loss """softplus""" +690 81 regularizer """no""" +690 81 optimizer """adadelta""" +690 81 training_loop """lcwa""" +690 81 evaluator """rankbased""" +690 82 dataset """kinships""" +690 82 model """structuredembedding""" +690 82 loss """softplus""" +690 82 regularizer """no""" +690 82 optimizer """adadelta""" +690 82 training_loop """lcwa""" +690 82 evaluator """rankbased""" +690 83 dataset """kinships""" +690 83 model """structuredembedding""" +690 83 loss """softplus""" +690 83 regularizer """no""" +690 83 optimizer """adadelta""" +690 83 training_loop """lcwa""" +690 83 evaluator """rankbased""" +690 84 dataset """kinships""" +690 84 model """structuredembedding""" +690 84 loss """softplus""" +690 84 regularizer """no""" +690 84 optimizer """adadelta""" +690 84 training_loop """lcwa""" +690 84 evaluator """rankbased""" +690 85 dataset """kinships""" +690 85 model """structuredembedding""" +690 85 loss """softplus""" +690 85 regularizer """no""" +690 85 optimizer """adadelta""" +690 85 training_loop """lcwa""" +690 85 evaluator """rankbased""" +690 86 dataset """kinships""" +690 86 model """structuredembedding""" +690 86 loss """softplus""" +690 86 regularizer """no""" +690 86 optimizer """adadelta""" +690 86 training_loop """lcwa""" +690 86 evaluator """rankbased""" +690 87 dataset """kinships""" +690 87 model """structuredembedding""" +690 87 loss """softplus""" +690 87 regularizer """no""" +690 87 optimizer """adadelta""" +690 87 training_loop """lcwa""" +690 87 evaluator """rankbased""" +690 88 dataset """kinships""" +690 88 model """structuredembedding""" +690 88 loss """softplus""" +690 88 regularizer """no""" +690 88 optimizer """adadelta""" +690 88 training_loop """lcwa""" +690 88 evaluator """rankbased""" +690 89 dataset """kinships""" +690 89 model """structuredembedding""" +690 89 loss """softplus""" +690 89 regularizer """no""" +690 89 optimizer """adadelta""" +690 89 training_loop """lcwa""" +690 89 evaluator """rankbased""" +690 90 dataset """kinships""" +690 90 model """structuredembedding""" +690 90 loss """softplus""" +690 90 regularizer """no""" +690 90 optimizer """adadelta""" +690 90 training_loop """lcwa""" +690 90 evaluator """rankbased""" +690 91 dataset """kinships""" +690 91 model """structuredembedding""" +690 91 loss """softplus""" +690 91 regularizer """no""" +690 91 optimizer """adadelta""" +690 91 training_loop """lcwa""" +690 91 evaluator """rankbased""" +690 92 dataset """kinships""" +690 92 model """structuredembedding""" +690 92 loss """softplus""" +690 92 regularizer """no""" +690 92 optimizer """adadelta""" +690 92 training_loop """lcwa""" +690 92 evaluator """rankbased""" +690 93 dataset """kinships""" +690 93 model """structuredembedding""" +690 93 loss """softplus""" +690 93 regularizer """no""" +690 93 optimizer """adadelta""" +690 93 training_loop """lcwa""" +690 93 evaluator """rankbased""" +690 94 dataset """kinships""" +690 94 model """structuredembedding""" +690 94 loss """softplus""" +690 94 regularizer """no""" +690 94 optimizer """adadelta""" +690 94 training_loop """lcwa""" +690 94 evaluator """rankbased""" +690 95 dataset """kinships""" +690 95 model """structuredembedding""" +690 95 loss """softplus""" +690 95 regularizer """no""" +690 95 optimizer """adadelta""" +690 95 training_loop """lcwa""" +690 95 evaluator """rankbased""" +690 96 dataset """kinships""" +690 96 model """structuredembedding""" +690 96 loss """softplus""" +690 96 regularizer """no""" +690 96 optimizer """adadelta""" +690 96 training_loop """lcwa""" +690 96 evaluator """rankbased""" +690 97 dataset """kinships""" +690 97 model """structuredembedding""" +690 97 loss """softplus""" +690 97 regularizer """no""" +690 97 optimizer """adadelta""" +690 97 training_loop """lcwa""" +690 97 evaluator """rankbased""" +690 98 dataset """kinships""" +690 98 model """structuredembedding""" +690 98 loss """softplus""" +690 98 regularizer """no""" +690 98 optimizer """adadelta""" +690 98 training_loop """lcwa""" +690 98 evaluator """rankbased""" +690 99 dataset """kinships""" +690 99 model """structuredembedding""" +690 99 loss """softplus""" +690 99 regularizer """no""" +690 99 optimizer """adadelta""" +690 99 training_loop """lcwa""" +690 99 evaluator """rankbased""" +690 100 dataset """kinships""" +690 100 model """structuredembedding""" +690 100 loss """softplus""" +690 100 regularizer """no""" +690 100 optimizer """adadelta""" +690 100 training_loop """lcwa""" +690 100 evaluator """rankbased""" +691 1 model.embedding_dim 0.0 +691 1 model.scoring_fct_norm 1.0 +691 1 training.batch_size 1.0 +691 1 training.label_smoothing 0.7376539395964096 +691 2 model.embedding_dim 2.0 +691 2 model.scoring_fct_norm 2.0 +691 2 training.batch_size 0.0 +691 2 training.label_smoothing 0.00361774232723343 +691 3 model.embedding_dim 1.0 +691 3 model.scoring_fct_norm 1.0 +691 3 training.batch_size 1.0 +691 3 training.label_smoothing 0.0730866279258306 +691 4 model.embedding_dim 2.0 +691 4 model.scoring_fct_norm 1.0 +691 4 training.batch_size 0.0 +691 4 training.label_smoothing 0.062393800548691924 +691 5 model.embedding_dim 1.0 +691 5 model.scoring_fct_norm 1.0 +691 5 training.batch_size 1.0 +691 5 training.label_smoothing 0.018862807560435076 +691 6 model.embedding_dim 1.0 +691 6 model.scoring_fct_norm 1.0 +691 6 training.batch_size 1.0 +691 6 training.label_smoothing 0.12050072982348568 +691 7 model.embedding_dim 1.0 +691 7 model.scoring_fct_norm 2.0 +691 7 training.batch_size 1.0 +691 7 training.label_smoothing 0.3092765734500257 +691 8 model.embedding_dim 0.0 +691 8 model.scoring_fct_norm 1.0 +691 8 training.batch_size 1.0 +691 8 training.label_smoothing 0.1620711489431756 +691 9 model.embedding_dim 2.0 +691 9 model.scoring_fct_norm 1.0 +691 9 training.batch_size 1.0 +691 9 training.label_smoothing 0.034083974729718615 +691 10 model.embedding_dim 2.0 +691 10 model.scoring_fct_norm 2.0 +691 10 training.batch_size 2.0 +691 10 training.label_smoothing 0.0646461084108107 +691 11 model.embedding_dim 2.0 +691 11 model.scoring_fct_norm 1.0 +691 11 training.batch_size 1.0 +691 11 training.label_smoothing 0.9581520874984361 +691 12 model.embedding_dim 0.0 +691 12 model.scoring_fct_norm 2.0 +691 12 training.batch_size 0.0 +691 12 training.label_smoothing 0.6463152052948866 +691 13 model.embedding_dim 2.0 +691 13 model.scoring_fct_norm 1.0 +691 13 training.batch_size 0.0 +691 13 training.label_smoothing 0.850479931537787 +691 14 model.embedding_dim 2.0 +691 14 model.scoring_fct_norm 2.0 +691 14 training.batch_size 1.0 +691 14 training.label_smoothing 0.4655461096999587 +691 15 model.embedding_dim 1.0 +691 15 model.scoring_fct_norm 2.0 +691 15 training.batch_size 1.0 +691 15 training.label_smoothing 0.0021017585720659675 +691 16 model.embedding_dim 0.0 +691 16 model.scoring_fct_norm 1.0 +691 16 training.batch_size 0.0 +691 16 training.label_smoothing 0.057231821352294376 +691 17 model.embedding_dim 2.0 +691 17 model.scoring_fct_norm 1.0 +691 17 training.batch_size 0.0 +691 17 training.label_smoothing 0.07034852040747472 +691 18 model.embedding_dim 1.0 +691 18 model.scoring_fct_norm 1.0 +691 18 training.batch_size 1.0 +691 18 training.label_smoothing 0.0012732375598532373 +691 19 model.embedding_dim 2.0 +691 19 model.scoring_fct_norm 2.0 +691 19 training.batch_size 0.0 +691 19 training.label_smoothing 0.007528526284930458 +691 20 model.embedding_dim 0.0 +691 20 model.scoring_fct_norm 1.0 +691 20 training.batch_size 2.0 +691 20 training.label_smoothing 0.010867700446021615 +691 21 model.embedding_dim 2.0 +691 21 model.scoring_fct_norm 2.0 +691 21 training.batch_size 0.0 +691 21 training.label_smoothing 0.0047151805912603074 +691 22 model.embedding_dim 1.0 +691 22 model.scoring_fct_norm 2.0 +691 22 training.batch_size 2.0 +691 22 training.label_smoothing 0.7534472172953631 +691 23 model.embedding_dim 1.0 +691 23 model.scoring_fct_norm 2.0 +691 23 training.batch_size 1.0 +691 23 training.label_smoothing 0.010034594214055505 +691 24 model.embedding_dim 1.0 +691 24 model.scoring_fct_norm 1.0 +691 24 training.batch_size 1.0 +691 24 training.label_smoothing 0.11479936686786098 +691 25 model.embedding_dim 2.0 +691 25 model.scoring_fct_norm 2.0 +691 25 training.batch_size 2.0 +691 25 training.label_smoothing 0.30091027936092424 +691 26 model.embedding_dim 2.0 +691 26 model.scoring_fct_norm 1.0 +691 26 training.batch_size 1.0 +691 26 training.label_smoothing 0.16483443761810918 +691 27 model.embedding_dim 1.0 +691 27 model.scoring_fct_norm 2.0 +691 27 training.batch_size 2.0 +691 27 training.label_smoothing 0.0010533839347217043 +691 28 model.embedding_dim 1.0 +691 28 model.scoring_fct_norm 1.0 +691 28 training.batch_size 0.0 +691 28 training.label_smoothing 0.002860051053057415 +691 29 model.embedding_dim 1.0 +691 29 model.scoring_fct_norm 2.0 +691 29 training.batch_size 0.0 +691 29 training.label_smoothing 0.0018764650852086387 +691 30 model.embedding_dim 2.0 +691 30 model.scoring_fct_norm 1.0 +691 30 training.batch_size 1.0 +691 30 training.label_smoothing 0.235777510304234 +691 31 model.embedding_dim 2.0 +691 31 model.scoring_fct_norm 1.0 +691 31 training.batch_size 0.0 +691 31 training.label_smoothing 0.734831951735089 +691 32 model.embedding_dim 1.0 +691 32 model.scoring_fct_norm 2.0 +691 32 training.batch_size 2.0 +691 32 training.label_smoothing 0.053486785165680915 +691 33 model.embedding_dim 0.0 +691 33 model.scoring_fct_norm 1.0 +691 33 training.batch_size 1.0 +691 33 training.label_smoothing 0.056540684430343234 +691 34 model.embedding_dim 1.0 +691 34 model.scoring_fct_norm 2.0 +691 34 training.batch_size 1.0 +691 34 training.label_smoothing 0.18526758920326758 +691 35 model.embedding_dim 2.0 +691 35 model.scoring_fct_norm 2.0 +691 35 training.batch_size 2.0 +691 35 training.label_smoothing 0.005802839960003005 +691 36 model.embedding_dim 0.0 +691 36 model.scoring_fct_norm 2.0 +691 36 training.batch_size 0.0 +691 36 training.label_smoothing 0.023079718251822885 +691 37 model.embedding_dim 1.0 +691 37 model.scoring_fct_norm 2.0 +691 37 training.batch_size 0.0 +691 37 training.label_smoothing 0.05876127417998767 +691 38 model.embedding_dim 1.0 +691 38 model.scoring_fct_norm 1.0 +691 38 training.batch_size 1.0 +691 38 training.label_smoothing 0.0011470037420732843 +691 39 model.embedding_dim 2.0 +691 39 model.scoring_fct_norm 1.0 +691 39 training.batch_size 0.0 +691 39 training.label_smoothing 0.012498267385553806 +691 40 model.embedding_dim 1.0 +691 40 model.scoring_fct_norm 1.0 +691 40 training.batch_size 0.0 +691 40 training.label_smoothing 0.15454007173764578 +691 41 model.embedding_dim 2.0 +691 41 model.scoring_fct_norm 2.0 +691 41 training.batch_size 2.0 +691 41 training.label_smoothing 0.4371690977821822 +691 42 model.embedding_dim 2.0 +691 42 model.scoring_fct_norm 1.0 +691 42 training.batch_size 1.0 +691 42 training.label_smoothing 0.005942966775548713 +691 43 model.embedding_dim 0.0 +691 43 model.scoring_fct_norm 2.0 +691 43 training.batch_size 2.0 +691 43 training.label_smoothing 0.027953572066447455 +691 44 model.embedding_dim 1.0 +691 44 model.scoring_fct_norm 1.0 +691 44 training.batch_size 1.0 +691 44 training.label_smoothing 0.001911835034218325 +691 45 model.embedding_dim 1.0 +691 45 model.scoring_fct_norm 1.0 +691 45 training.batch_size 2.0 +691 45 training.label_smoothing 0.013963025009572642 +691 46 model.embedding_dim 0.0 +691 46 model.scoring_fct_norm 1.0 +691 46 training.batch_size 1.0 +691 46 training.label_smoothing 0.0021989776715878786 +691 47 model.embedding_dim 1.0 +691 47 model.scoring_fct_norm 2.0 +691 47 training.batch_size 0.0 +691 47 training.label_smoothing 0.001796214179079128 +691 48 model.embedding_dim 2.0 +691 48 model.scoring_fct_norm 2.0 +691 48 training.batch_size 0.0 +691 48 training.label_smoothing 0.014179084745648225 +691 49 model.embedding_dim 1.0 +691 49 model.scoring_fct_norm 2.0 +691 49 training.batch_size 1.0 +691 49 training.label_smoothing 0.4294986091638282 +691 50 model.embedding_dim 1.0 +691 50 model.scoring_fct_norm 2.0 +691 50 training.batch_size 0.0 +691 50 training.label_smoothing 0.006453859395546233 +691 51 model.embedding_dim 2.0 +691 51 model.scoring_fct_norm 1.0 +691 51 training.batch_size 2.0 +691 51 training.label_smoothing 0.771915460179851 +691 52 model.embedding_dim 2.0 +691 52 model.scoring_fct_norm 2.0 +691 52 training.batch_size 0.0 +691 52 training.label_smoothing 0.2775774481439725 +691 53 model.embedding_dim 1.0 +691 53 model.scoring_fct_norm 1.0 +691 53 training.batch_size 2.0 +691 53 training.label_smoothing 0.0035753890906164787 +691 54 model.embedding_dim 2.0 +691 54 model.scoring_fct_norm 1.0 +691 54 training.batch_size 0.0 +691 54 training.label_smoothing 0.0077177802366195794 +691 55 model.embedding_dim 0.0 +691 55 model.scoring_fct_norm 1.0 +691 55 training.batch_size 1.0 +691 55 training.label_smoothing 0.5605871405000409 +691 56 model.embedding_dim 2.0 +691 56 model.scoring_fct_norm 2.0 +691 56 training.batch_size 0.0 +691 56 training.label_smoothing 0.0029477828230038113 +691 57 model.embedding_dim 0.0 +691 57 model.scoring_fct_norm 2.0 +691 57 training.batch_size 2.0 +691 57 training.label_smoothing 0.19253512588296268 +691 58 model.embedding_dim 2.0 +691 58 model.scoring_fct_norm 1.0 +691 58 training.batch_size 1.0 +691 58 training.label_smoothing 0.1432839645771604 +691 59 model.embedding_dim 1.0 +691 59 model.scoring_fct_norm 1.0 +691 59 training.batch_size 1.0 +691 59 training.label_smoothing 0.35841110564574064 +691 60 model.embedding_dim 0.0 +691 60 model.scoring_fct_norm 2.0 +691 60 training.batch_size 0.0 +691 60 training.label_smoothing 0.0011836682434163991 +691 61 model.embedding_dim 0.0 +691 61 model.scoring_fct_norm 1.0 +691 61 training.batch_size 2.0 +691 61 training.label_smoothing 0.008230652463922223 +691 62 model.embedding_dim 2.0 +691 62 model.scoring_fct_norm 1.0 +691 62 training.batch_size 1.0 +691 62 training.label_smoothing 0.11931501849871437 +691 63 model.embedding_dim 0.0 +691 63 model.scoring_fct_norm 1.0 +691 63 training.batch_size 2.0 +691 63 training.label_smoothing 0.9400876570308455 +691 64 model.embedding_dim 0.0 +691 64 model.scoring_fct_norm 2.0 +691 64 training.batch_size 0.0 +691 64 training.label_smoothing 0.5627147295731465 +691 65 model.embedding_dim 2.0 +691 65 model.scoring_fct_norm 2.0 +691 65 training.batch_size 2.0 +691 65 training.label_smoothing 0.030895013462407177 +691 66 model.embedding_dim 2.0 +691 66 model.scoring_fct_norm 2.0 +691 66 training.batch_size 0.0 +691 66 training.label_smoothing 0.03332074806679593 +691 67 model.embedding_dim 2.0 +691 67 model.scoring_fct_norm 1.0 +691 67 training.batch_size 2.0 +691 67 training.label_smoothing 0.0024346706361102386 +691 68 model.embedding_dim 0.0 +691 68 model.scoring_fct_norm 1.0 +691 68 training.batch_size 2.0 +691 68 training.label_smoothing 0.011462140385876687 +691 69 model.embedding_dim 1.0 +691 69 model.scoring_fct_norm 2.0 +691 69 training.batch_size 2.0 +691 69 training.label_smoothing 0.4265305105163147 +691 70 model.embedding_dim 1.0 +691 70 model.scoring_fct_norm 2.0 +691 70 training.batch_size 0.0 +691 70 training.label_smoothing 0.02143403682107392 +691 71 model.embedding_dim 1.0 +691 71 model.scoring_fct_norm 1.0 +691 71 training.batch_size 2.0 +691 71 training.label_smoothing 0.00512769318448028 +691 72 model.embedding_dim 0.0 +691 72 model.scoring_fct_norm 1.0 +691 72 training.batch_size 0.0 +691 72 training.label_smoothing 0.05077119042606159 +691 73 model.embedding_dim 0.0 +691 73 model.scoring_fct_norm 1.0 +691 73 training.batch_size 2.0 +691 73 training.label_smoothing 0.009265983948031149 +691 74 model.embedding_dim 0.0 +691 74 model.scoring_fct_norm 1.0 +691 74 training.batch_size 0.0 +691 74 training.label_smoothing 0.059916497783850095 +691 75 model.embedding_dim 1.0 +691 75 model.scoring_fct_norm 1.0 +691 75 training.batch_size 0.0 +691 75 training.label_smoothing 0.020422060802154047 +691 76 model.embedding_dim 2.0 +691 76 model.scoring_fct_norm 2.0 +691 76 training.batch_size 0.0 +691 76 training.label_smoothing 0.001042484657623797 +691 77 model.embedding_dim 1.0 +691 77 model.scoring_fct_norm 2.0 +691 77 training.batch_size 1.0 +691 77 training.label_smoothing 0.01301924712536468 +691 78 model.embedding_dim 2.0 +691 78 model.scoring_fct_norm 2.0 +691 78 training.batch_size 1.0 +691 78 training.label_smoothing 0.4799507097749755 +691 79 model.embedding_dim 0.0 +691 79 model.scoring_fct_norm 1.0 +691 79 training.batch_size 0.0 +691 79 training.label_smoothing 0.025124937628317252 +691 80 model.embedding_dim 2.0 +691 80 model.scoring_fct_norm 2.0 +691 80 training.batch_size 2.0 +691 80 training.label_smoothing 0.004812708513378558 +691 81 model.embedding_dim 1.0 +691 81 model.scoring_fct_norm 2.0 +691 81 training.batch_size 0.0 +691 81 training.label_smoothing 0.359591721224414 +691 82 model.embedding_dim 2.0 +691 82 model.scoring_fct_norm 1.0 +691 82 training.batch_size 0.0 +691 82 training.label_smoothing 0.0016754586958919052 +691 83 model.embedding_dim 1.0 +691 83 model.scoring_fct_norm 2.0 +691 83 training.batch_size 2.0 +691 83 training.label_smoothing 0.12930645840211477 +691 84 model.embedding_dim 0.0 +691 84 model.scoring_fct_norm 2.0 +691 84 training.batch_size 2.0 +691 84 training.label_smoothing 0.07985714605536552 +691 85 model.embedding_dim 2.0 +691 85 model.scoring_fct_norm 1.0 +691 85 training.batch_size 1.0 +691 85 training.label_smoothing 0.057431100804536124 +691 86 model.embedding_dim 1.0 +691 86 model.scoring_fct_norm 1.0 +691 86 training.batch_size 2.0 +691 86 training.label_smoothing 0.9806867791098036 +691 87 model.embedding_dim 1.0 +691 87 model.scoring_fct_norm 1.0 +691 87 training.batch_size 0.0 +691 87 training.label_smoothing 0.006417704312770091 +691 88 model.embedding_dim 0.0 +691 88 model.scoring_fct_norm 1.0 +691 88 training.batch_size 0.0 +691 88 training.label_smoothing 0.3807827108100237 +691 89 model.embedding_dim 1.0 +691 89 model.scoring_fct_norm 2.0 +691 89 training.batch_size 1.0 +691 89 training.label_smoothing 0.014659076261803138 +691 90 model.embedding_dim 1.0 +691 90 model.scoring_fct_norm 2.0 +691 90 training.batch_size 2.0 +691 90 training.label_smoothing 0.3209926961401401 +691 91 model.embedding_dim 2.0 +691 91 model.scoring_fct_norm 2.0 +691 91 training.batch_size 2.0 +691 91 training.label_smoothing 0.023602017417022798 +691 92 model.embedding_dim 1.0 +691 92 model.scoring_fct_norm 2.0 +691 92 training.batch_size 2.0 +691 92 training.label_smoothing 0.04808181275339127 +691 93 model.embedding_dim 2.0 +691 93 model.scoring_fct_norm 1.0 +691 93 training.batch_size 2.0 +691 93 training.label_smoothing 0.006120968323323269 +691 94 model.embedding_dim 1.0 +691 94 model.scoring_fct_norm 1.0 +691 94 training.batch_size 1.0 +691 94 training.label_smoothing 0.0038220175648201542 +691 95 model.embedding_dim 0.0 +691 95 model.scoring_fct_norm 1.0 +691 95 training.batch_size 0.0 +691 95 training.label_smoothing 0.028383485708215597 +691 96 model.embedding_dim 1.0 +691 96 model.scoring_fct_norm 2.0 +691 96 training.batch_size 0.0 +691 96 training.label_smoothing 0.021116547256636323 +691 97 model.embedding_dim 1.0 +691 97 model.scoring_fct_norm 2.0 +691 97 training.batch_size 0.0 +691 97 training.label_smoothing 0.02275903603414891 +691 98 model.embedding_dim 0.0 +691 98 model.scoring_fct_norm 1.0 +691 98 training.batch_size 2.0 +691 98 training.label_smoothing 0.008155208277203153 +691 99 model.embedding_dim 0.0 +691 99 model.scoring_fct_norm 2.0 +691 99 training.batch_size 2.0 +691 99 training.label_smoothing 0.329407607087098 +691 100 model.embedding_dim 0.0 +691 100 model.scoring_fct_norm 1.0 +691 100 training.batch_size 0.0 +691 100 training.label_smoothing 0.0030033654328117496 +691 1 dataset """kinships""" +691 1 model """structuredembedding""" +691 1 loss """bceaftersigmoid""" +691 1 regularizer """no""" +691 1 optimizer """adadelta""" +691 1 training_loop """lcwa""" +691 1 evaluator """rankbased""" +691 2 dataset """kinships""" +691 2 model """structuredembedding""" +691 2 loss """bceaftersigmoid""" +691 2 regularizer """no""" +691 2 optimizer """adadelta""" +691 2 training_loop """lcwa""" +691 2 evaluator """rankbased""" +691 3 dataset """kinships""" +691 3 model """structuredembedding""" +691 3 loss """bceaftersigmoid""" +691 3 regularizer """no""" +691 3 optimizer """adadelta""" +691 3 training_loop """lcwa""" +691 3 evaluator """rankbased""" +691 4 dataset """kinships""" +691 4 model """structuredembedding""" +691 4 loss """bceaftersigmoid""" +691 4 regularizer """no""" +691 4 optimizer """adadelta""" +691 4 training_loop """lcwa""" +691 4 evaluator """rankbased""" +691 5 dataset """kinships""" +691 5 model """structuredembedding""" +691 5 loss """bceaftersigmoid""" +691 5 regularizer """no""" +691 5 optimizer """adadelta""" +691 5 training_loop """lcwa""" +691 5 evaluator """rankbased""" +691 6 dataset """kinships""" +691 6 model """structuredembedding""" +691 6 loss """bceaftersigmoid""" +691 6 regularizer """no""" +691 6 optimizer """adadelta""" +691 6 training_loop """lcwa""" +691 6 evaluator """rankbased""" +691 7 dataset """kinships""" +691 7 model """structuredembedding""" +691 7 loss """bceaftersigmoid""" +691 7 regularizer """no""" +691 7 optimizer """adadelta""" +691 7 training_loop """lcwa""" +691 7 evaluator """rankbased""" +691 8 dataset """kinships""" +691 8 model """structuredembedding""" +691 8 loss """bceaftersigmoid""" +691 8 regularizer """no""" +691 8 optimizer """adadelta""" +691 8 training_loop """lcwa""" +691 8 evaluator """rankbased""" +691 9 dataset """kinships""" +691 9 model """structuredembedding""" +691 9 loss """bceaftersigmoid""" +691 9 regularizer """no""" +691 9 optimizer """adadelta""" +691 9 training_loop """lcwa""" +691 9 evaluator """rankbased""" +691 10 dataset """kinships""" +691 10 model """structuredembedding""" +691 10 loss """bceaftersigmoid""" +691 10 regularizer """no""" +691 10 optimizer """adadelta""" +691 10 training_loop """lcwa""" +691 10 evaluator """rankbased""" +691 11 dataset """kinships""" +691 11 model """structuredembedding""" +691 11 loss """bceaftersigmoid""" +691 11 regularizer """no""" +691 11 optimizer """adadelta""" +691 11 training_loop """lcwa""" +691 11 evaluator """rankbased""" +691 12 dataset """kinships""" +691 12 model """structuredembedding""" +691 12 loss """bceaftersigmoid""" +691 12 regularizer """no""" +691 12 optimizer """adadelta""" +691 12 training_loop """lcwa""" +691 12 evaluator """rankbased""" +691 13 dataset """kinships""" +691 13 model """structuredembedding""" +691 13 loss """bceaftersigmoid""" +691 13 regularizer """no""" +691 13 optimizer """adadelta""" +691 13 training_loop """lcwa""" +691 13 evaluator """rankbased""" +691 14 dataset """kinships""" +691 14 model """structuredembedding""" +691 14 loss """bceaftersigmoid""" +691 14 regularizer """no""" +691 14 optimizer """adadelta""" +691 14 training_loop """lcwa""" +691 14 evaluator """rankbased""" +691 15 dataset """kinships""" +691 15 model """structuredembedding""" +691 15 loss """bceaftersigmoid""" +691 15 regularizer """no""" +691 15 optimizer """adadelta""" +691 15 training_loop """lcwa""" +691 15 evaluator """rankbased""" +691 16 dataset """kinships""" +691 16 model """structuredembedding""" +691 16 loss """bceaftersigmoid""" +691 16 regularizer """no""" +691 16 optimizer """adadelta""" +691 16 training_loop """lcwa""" +691 16 evaluator """rankbased""" +691 17 dataset """kinships""" +691 17 model """structuredembedding""" +691 17 loss """bceaftersigmoid""" +691 17 regularizer """no""" +691 17 optimizer """adadelta""" +691 17 training_loop """lcwa""" +691 17 evaluator """rankbased""" +691 18 dataset """kinships""" +691 18 model """structuredembedding""" +691 18 loss """bceaftersigmoid""" +691 18 regularizer """no""" +691 18 optimizer """adadelta""" +691 18 training_loop """lcwa""" +691 18 evaluator """rankbased""" +691 19 dataset """kinships""" +691 19 model """structuredembedding""" +691 19 loss """bceaftersigmoid""" +691 19 regularizer """no""" +691 19 optimizer """adadelta""" +691 19 training_loop """lcwa""" +691 19 evaluator """rankbased""" +691 20 dataset """kinships""" +691 20 model """structuredembedding""" +691 20 loss """bceaftersigmoid""" +691 20 regularizer """no""" +691 20 optimizer """adadelta""" +691 20 training_loop """lcwa""" +691 20 evaluator """rankbased""" +691 21 dataset """kinships""" +691 21 model """structuredembedding""" +691 21 loss """bceaftersigmoid""" +691 21 regularizer """no""" +691 21 optimizer """adadelta""" +691 21 training_loop """lcwa""" +691 21 evaluator """rankbased""" +691 22 dataset """kinships""" +691 22 model """structuredembedding""" +691 22 loss """bceaftersigmoid""" +691 22 regularizer """no""" +691 22 optimizer """adadelta""" +691 22 training_loop """lcwa""" +691 22 evaluator """rankbased""" +691 23 dataset """kinships""" +691 23 model """structuredembedding""" +691 23 loss """bceaftersigmoid""" +691 23 regularizer """no""" +691 23 optimizer """adadelta""" +691 23 training_loop """lcwa""" +691 23 evaluator """rankbased""" +691 24 dataset """kinships""" +691 24 model """structuredembedding""" +691 24 loss """bceaftersigmoid""" +691 24 regularizer """no""" +691 24 optimizer """adadelta""" +691 24 training_loop """lcwa""" +691 24 evaluator """rankbased""" +691 25 dataset """kinships""" +691 25 model """structuredembedding""" +691 25 loss """bceaftersigmoid""" +691 25 regularizer """no""" +691 25 optimizer """adadelta""" +691 25 training_loop """lcwa""" +691 25 evaluator """rankbased""" +691 26 dataset """kinships""" +691 26 model """structuredembedding""" +691 26 loss """bceaftersigmoid""" +691 26 regularizer """no""" +691 26 optimizer """adadelta""" +691 26 training_loop """lcwa""" +691 26 evaluator """rankbased""" +691 27 dataset """kinships""" +691 27 model """structuredembedding""" +691 27 loss """bceaftersigmoid""" +691 27 regularizer """no""" +691 27 optimizer """adadelta""" +691 27 training_loop """lcwa""" +691 27 evaluator """rankbased""" +691 28 dataset """kinships""" +691 28 model """structuredembedding""" +691 28 loss """bceaftersigmoid""" +691 28 regularizer """no""" +691 28 optimizer """adadelta""" +691 28 training_loop """lcwa""" +691 28 evaluator """rankbased""" +691 29 dataset """kinships""" +691 29 model """structuredembedding""" +691 29 loss """bceaftersigmoid""" +691 29 regularizer """no""" +691 29 optimizer """adadelta""" +691 29 training_loop """lcwa""" +691 29 evaluator """rankbased""" +691 30 dataset """kinships""" +691 30 model """structuredembedding""" +691 30 loss """bceaftersigmoid""" +691 30 regularizer """no""" +691 30 optimizer """adadelta""" +691 30 training_loop """lcwa""" +691 30 evaluator """rankbased""" +691 31 dataset """kinships""" +691 31 model """structuredembedding""" +691 31 loss """bceaftersigmoid""" +691 31 regularizer """no""" +691 31 optimizer """adadelta""" +691 31 training_loop """lcwa""" +691 31 evaluator """rankbased""" +691 32 dataset """kinships""" +691 32 model """structuredembedding""" +691 32 loss """bceaftersigmoid""" +691 32 regularizer """no""" +691 32 optimizer """adadelta""" +691 32 training_loop """lcwa""" +691 32 evaluator """rankbased""" +691 33 dataset """kinships""" +691 33 model """structuredembedding""" +691 33 loss """bceaftersigmoid""" +691 33 regularizer """no""" +691 33 optimizer """adadelta""" +691 33 training_loop """lcwa""" +691 33 evaluator """rankbased""" +691 34 dataset """kinships""" +691 34 model """structuredembedding""" +691 34 loss """bceaftersigmoid""" +691 34 regularizer """no""" +691 34 optimizer """adadelta""" +691 34 training_loop """lcwa""" +691 34 evaluator """rankbased""" +691 35 dataset """kinships""" +691 35 model """structuredembedding""" +691 35 loss """bceaftersigmoid""" +691 35 regularizer """no""" +691 35 optimizer """adadelta""" +691 35 training_loop """lcwa""" +691 35 evaluator """rankbased""" +691 36 dataset """kinships""" +691 36 model """structuredembedding""" +691 36 loss """bceaftersigmoid""" +691 36 regularizer """no""" +691 36 optimizer """adadelta""" +691 36 training_loop """lcwa""" +691 36 evaluator """rankbased""" +691 37 dataset """kinships""" +691 37 model """structuredembedding""" +691 37 loss """bceaftersigmoid""" +691 37 regularizer """no""" +691 37 optimizer """adadelta""" +691 37 training_loop """lcwa""" +691 37 evaluator """rankbased""" +691 38 dataset """kinships""" +691 38 model """structuredembedding""" +691 38 loss """bceaftersigmoid""" +691 38 regularizer """no""" +691 38 optimizer """adadelta""" +691 38 training_loop """lcwa""" +691 38 evaluator """rankbased""" +691 39 dataset """kinships""" +691 39 model """structuredembedding""" +691 39 loss """bceaftersigmoid""" +691 39 regularizer """no""" +691 39 optimizer """adadelta""" +691 39 training_loop """lcwa""" +691 39 evaluator """rankbased""" +691 40 dataset """kinships""" +691 40 model """structuredembedding""" +691 40 loss """bceaftersigmoid""" +691 40 regularizer """no""" +691 40 optimizer """adadelta""" +691 40 training_loop """lcwa""" +691 40 evaluator """rankbased""" +691 41 dataset """kinships""" +691 41 model """structuredembedding""" +691 41 loss """bceaftersigmoid""" +691 41 regularizer """no""" +691 41 optimizer """adadelta""" +691 41 training_loop """lcwa""" +691 41 evaluator """rankbased""" +691 42 dataset """kinships""" +691 42 model """structuredembedding""" +691 42 loss """bceaftersigmoid""" +691 42 regularizer """no""" +691 42 optimizer """adadelta""" +691 42 training_loop """lcwa""" +691 42 evaluator """rankbased""" +691 43 dataset """kinships""" +691 43 model """structuredembedding""" +691 43 loss """bceaftersigmoid""" +691 43 regularizer """no""" +691 43 optimizer """adadelta""" +691 43 training_loop """lcwa""" +691 43 evaluator """rankbased""" +691 44 dataset """kinships""" +691 44 model """structuredembedding""" +691 44 loss """bceaftersigmoid""" +691 44 regularizer """no""" +691 44 optimizer """adadelta""" +691 44 training_loop """lcwa""" +691 44 evaluator """rankbased""" +691 45 dataset """kinships""" +691 45 model """structuredembedding""" +691 45 loss """bceaftersigmoid""" +691 45 regularizer """no""" +691 45 optimizer """adadelta""" +691 45 training_loop """lcwa""" +691 45 evaluator """rankbased""" +691 46 dataset """kinships""" +691 46 model """structuredembedding""" +691 46 loss """bceaftersigmoid""" +691 46 regularizer """no""" +691 46 optimizer """adadelta""" +691 46 training_loop """lcwa""" +691 46 evaluator """rankbased""" +691 47 dataset """kinships""" +691 47 model """structuredembedding""" +691 47 loss """bceaftersigmoid""" +691 47 regularizer """no""" +691 47 optimizer """adadelta""" +691 47 training_loop """lcwa""" +691 47 evaluator """rankbased""" +691 48 dataset """kinships""" +691 48 model """structuredembedding""" +691 48 loss """bceaftersigmoid""" +691 48 regularizer """no""" +691 48 optimizer """adadelta""" +691 48 training_loop """lcwa""" +691 48 evaluator """rankbased""" +691 49 dataset """kinships""" +691 49 model """structuredembedding""" +691 49 loss """bceaftersigmoid""" +691 49 regularizer """no""" +691 49 optimizer """adadelta""" +691 49 training_loop """lcwa""" +691 49 evaluator """rankbased""" +691 50 dataset """kinships""" +691 50 model """structuredembedding""" +691 50 loss """bceaftersigmoid""" +691 50 regularizer """no""" +691 50 optimizer """adadelta""" +691 50 training_loop """lcwa""" +691 50 evaluator """rankbased""" +691 51 dataset """kinships""" +691 51 model """structuredembedding""" +691 51 loss """bceaftersigmoid""" +691 51 regularizer """no""" +691 51 optimizer """adadelta""" +691 51 training_loop """lcwa""" +691 51 evaluator """rankbased""" +691 52 dataset """kinships""" +691 52 model """structuredembedding""" +691 52 loss """bceaftersigmoid""" +691 52 regularizer """no""" +691 52 optimizer """adadelta""" +691 52 training_loop """lcwa""" +691 52 evaluator """rankbased""" +691 53 dataset """kinships""" +691 53 model """structuredembedding""" +691 53 loss """bceaftersigmoid""" +691 53 regularizer """no""" +691 53 optimizer """adadelta""" +691 53 training_loop """lcwa""" +691 53 evaluator """rankbased""" +691 54 dataset """kinships""" +691 54 model """structuredembedding""" +691 54 loss """bceaftersigmoid""" +691 54 regularizer """no""" +691 54 optimizer """adadelta""" +691 54 training_loop """lcwa""" +691 54 evaluator """rankbased""" +691 55 dataset """kinships""" +691 55 model """structuredembedding""" +691 55 loss """bceaftersigmoid""" +691 55 regularizer """no""" +691 55 optimizer """adadelta""" +691 55 training_loop """lcwa""" +691 55 evaluator """rankbased""" +691 56 dataset """kinships""" +691 56 model """structuredembedding""" +691 56 loss """bceaftersigmoid""" +691 56 regularizer """no""" +691 56 optimizer """adadelta""" +691 56 training_loop """lcwa""" +691 56 evaluator """rankbased""" +691 57 dataset """kinships""" +691 57 model """structuredembedding""" +691 57 loss """bceaftersigmoid""" +691 57 regularizer """no""" +691 57 optimizer """adadelta""" +691 57 training_loop """lcwa""" +691 57 evaluator """rankbased""" +691 58 dataset """kinships""" +691 58 model """structuredembedding""" +691 58 loss """bceaftersigmoid""" +691 58 regularizer """no""" +691 58 optimizer """adadelta""" +691 58 training_loop """lcwa""" +691 58 evaluator """rankbased""" +691 59 dataset """kinships""" +691 59 model """structuredembedding""" +691 59 loss """bceaftersigmoid""" +691 59 regularizer """no""" +691 59 optimizer """adadelta""" +691 59 training_loop """lcwa""" +691 59 evaluator """rankbased""" +691 60 dataset """kinships""" +691 60 model """structuredembedding""" +691 60 loss """bceaftersigmoid""" +691 60 regularizer """no""" +691 60 optimizer """adadelta""" +691 60 training_loop """lcwa""" +691 60 evaluator """rankbased""" +691 61 dataset """kinships""" +691 61 model """structuredembedding""" +691 61 loss """bceaftersigmoid""" +691 61 regularizer """no""" +691 61 optimizer """adadelta""" +691 61 training_loop """lcwa""" +691 61 evaluator """rankbased""" +691 62 dataset """kinships""" +691 62 model """structuredembedding""" +691 62 loss """bceaftersigmoid""" +691 62 regularizer """no""" +691 62 optimizer """adadelta""" +691 62 training_loop """lcwa""" +691 62 evaluator """rankbased""" +691 63 dataset """kinships""" +691 63 model """structuredembedding""" +691 63 loss """bceaftersigmoid""" +691 63 regularizer """no""" +691 63 optimizer """adadelta""" +691 63 training_loop """lcwa""" +691 63 evaluator """rankbased""" +691 64 dataset """kinships""" +691 64 model """structuredembedding""" +691 64 loss """bceaftersigmoid""" +691 64 regularizer """no""" +691 64 optimizer """adadelta""" +691 64 training_loop """lcwa""" +691 64 evaluator """rankbased""" +691 65 dataset """kinships""" +691 65 model """structuredembedding""" +691 65 loss """bceaftersigmoid""" +691 65 regularizer """no""" +691 65 optimizer """adadelta""" +691 65 training_loop """lcwa""" +691 65 evaluator """rankbased""" +691 66 dataset """kinships""" +691 66 model """structuredembedding""" +691 66 loss """bceaftersigmoid""" +691 66 regularizer """no""" +691 66 optimizer """adadelta""" +691 66 training_loop """lcwa""" +691 66 evaluator """rankbased""" +691 67 dataset """kinships""" +691 67 model """structuredembedding""" +691 67 loss """bceaftersigmoid""" +691 67 regularizer """no""" +691 67 optimizer """adadelta""" +691 67 training_loop """lcwa""" +691 67 evaluator """rankbased""" +691 68 dataset """kinships""" +691 68 model """structuredembedding""" +691 68 loss """bceaftersigmoid""" +691 68 regularizer """no""" +691 68 optimizer """adadelta""" +691 68 training_loop """lcwa""" +691 68 evaluator """rankbased""" +691 69 dataset """kinships""" +691 69 model """structuredembedding""" +691 69 loss """bceaftersigmoid""" +691 69 regularizer """no""" +691 69 optimizer """adadelta""" +691 69 training_loop """lcwa""" +691 69 evaluator """rankbased""" +691 70 dataset """kinships""" +691 70 model """structuredembedding""" +691 70 loss """bceaftersigmoid""" +691 70 regularizer """no""" +691 70 optimizer """adadelta""" +691 70 training_loop """lcwa""" +691 70 evaluator """rankbased""" +691 71 dataset """kinships""" +691 71 model """structuredembedding""" +691 71 loss """bceaftersigmoid""" +691 71 regularizer """no""" +691 71 optimizer """adadelta""" +691 71 training_loop """lcwa""" +691 71 evaluator """rankbased""" +691 72 dataset """kinships""" +691 72 model """structuredembedding""" +691 72 loss """bceaftersigmoid""" +691 72 regularizer """no""" +691 72 optimizer """adadelta""" +691 72 training_loop """lcwa""" +691 72 evaluator """rankbased""" +691 73 dataset """kinships""" +691 73 model """structuredembedding""" +691 73 loss """bceaftersigmoid""" +691 73 regularizer """no""" +691 73 optimizer """adadelta""" +691 73 training_loop """lcwa""" +691 73 evaluator """rankbased""" +691 74 dataset """kinships""" +691 74 model """structuredembedding""" +691 74 loss """bceaftersigmoid""" +691 74 regularizer """no""" +691 74 optimizer """adadelta""" +691 74 training_loop """lcwa""" +691 74 evaluator """rankbased""" +691 75 dataset """kinships""" +691 75 model """structuredembedding""" +691 75 loss """bceaftersigmoid""" +691 75 regularizer """no""" +691 75 optimizer """adadelta""" +691 75 training_loop """lcwa""" +691 75 evaluator """rankbased""" +691 76 dataset """kinships""" +691 76 model """structuredembedding""" +691 76 loss """bceaftersigmoid""" +691 76 regularizer """no""" +691 76 optimizer """adadelta""" +691 76 training_loop """lcwa""" +691 76 evaluator """rankbased""" +691 77 dataset """kinships""" +691 77 model """structuredembedding""" +691 77 loss """bceaftersigmoid""" +691 77 regularizer """no""" +691 77 optimizer """adadelta""" +691 77 training_loop """lcwa""" +691 77 evaluator """rankbased""" +691 78 dataset """kinships""" +691 78 model """structuredembedding""" +691 78 loss """bceaftersigmoid""" +691 78 regularizer """no""" +691 78 optimizer """adadelta""" +691 78 training_loop """lcwa""" +691 78 evaluator """rankbased""" +691 79 dataset """kinships""" +691 79 model """structuredembedding""" +691 79 loss """bceaftersigmoid""" +691 79 regularizer """no""" +691 79 optimizer """adadelta""" +691 79 training_loop """lcwa""" +691 79 evaluator """rankbased""" +691 80 dataset """kinships""" +691 80 model """structuredembedding""" +691 80 loss """bceaftersigmoid""" +691 80 regularizer """no""" +691 80 optimizer """adadelta""" +691 80 training_loop """lcwa""" +691 80 evaluator """rankbased""" +691 81 dataset """kinships""" +691 81 model """structuredembedding""" +691 81 loss """bceaftersigmoid""" +691 81 regularizer """no""" +691 81 optimizer """adadelta""" +691 81 training_loop """lcwa""" +691 81 evaluator """rankbased""" +691 82 dataset """kinships""" +691 82 model """structuredembedding""" +691 82 loss """bceaftersigmoid""" +691 82 regularizer """no""" +691 82 optimizer """adadelta""" +691 82 training_loop """lcwa""" +691 82 evaluator """rankbased""" +691 83 dataset """kinships""" +691 83 model """structuredembedding""" +691 83 loss """bceaftersigmoid""" +691 83 regularizer """no""" +691 83 optimizer """adadelta""" +691 83 training_loop """lcwa""" +691 83 evaluator """rankbased""" +691 84 dataset """kinships""" +691 84 model """structuredembedding""" +691 84 loss """bceaftersigmoid""" +691 84 regularizer """no""" +691 84 optimizer """adadelta""" +691 84 training_loop """lcwa""" +691 84 evaluator """rankbased""" +691 85 dataset """kinships""" +691 85 model """structuredembedding""" +691 85 loss """bceaftersigmoid""" +691 85 regularizer """no""" +691 85 optimizer """adadelta""" +691 85 training_loop """lcwa""" +691 85 evaluator """rankbased""" +691 86 dataset """kinships""" +691 86 model """structuredembedding""" +691 86 loss """bceaftersigmoid""" +691 86 regularizer """no""" +691 86 optimizer """adadelta""" +691 86 training_loop """lcwa""" +691 86 evaluator """rankbased""" +691 87 dataset """kinships""" +691 87 model """structuredembedding""" +691 87 loss """bceaftersigmoid""" +691 87 regularizer """no""" +691 87 optimizer """adadelta""" +691 87 training_loop """lcwa""" +691 87 evaluator """rankbased""" +691 88 dataset """kinships""" +691 88 model """structuredembedding""" +691 88 loss """bceaftersigmoid""" +691 88 regularizer """no""" +691 88 optimizer """adadelta""" +691 88 training_loop """lcwa""" +691 88 evaluator """rankbased""" +691 89 dataset """kinships""" +691 89 model """structuredembedding""" +691 89 loss """bceaftersigmoid""" +691 89 regularizer """no""" +691 89 optimizer """adadelta""" +691 89 training_loop """lcwa""" +691 89 evaluator """rankbased""" +691 90 dataset """kinships""" +691 90 model """structuredembedding""" +691 90 loss """bceaftersigmoid""" +691 90 regularizer """no""" +691 90 optimizer """adadelta""" +691 90 training_loop """lcwa""" +691 90 evaluator """rankbased""" +691 91 dataset """kinships""" +691 91 model """structuredembedding""" +691 91 loss """bceaftersigmoid""" +691 91 regularizer """no""" +691 91 optimizer """adadelta""" +691 91 training_loop """lcwa""" +691 91 evaluator """rankbased""" +691 92 dataset """kinships""" +691 92 model """structuredembedding""" +691 92 loss """bceaftersigmoid""" +691 92 regularizer """no""" +691 92 optimizer """adadelta""" +691 92 training_loop """lcwa""" +691 92 evaluator """rankbased""" +691 93 dataset """kinships""" +691 93 model """structuredembedding""" +691 93 loss """bceaftersigmoid""" +691 93 regularizer """no""" +691 93 optimizer """adadelta""" +691 93 training_loop """lcwa""" +691 93 evaluator """rankbased""" +691 94 dataset """kinships""" +691 94 model """structuredembedding""" +691 94 loss """bceaftersigmoid""" +691 94 regularizer """no""" +691 94 optimizer """adadelta""" +691 94 training_loop """lcwa""" +691 94 evaluator """rankbased""" +691 95 dataset """kinships""" +691 95 model """structuredembedding""" +691 95 loss """bceaftersigmoid""" +691 95 regularizer """no""" +691 95 optimizer """adadelta""" +691 95 training_loop """lcwa""" +691 95 evaluator """rankbased""" +691 96 dataset """kinships""" +691 96 model """structuredembedding""" +691 96 loss """bceaftersigmoid""" +691 96 regularizer """no""" +691 96 optimizer """adadelta""" +691 96 training_loop """lcwa""" +691 96 evaluator """rankbased""" +691 97 dataset """kinships""" +691 97 model """structuredembedding""" +691 97 loss """bceaftersigmoid""" +691 97 regularizer """no""" +691 97 optimizer """adadelta""" +691 97 training_loop """lcwa""" +691 97 evaluator """rankbased""" +691 98 dataset """kinships""" +691 98 model """structuredembedding""" +691 98 loss """bceaftersigmoid""" +691 98 regularizer """no""" +691 98 optimizer """adadelta""" +691 98 training_loop """lcwa""" +691 98 evaluator """rankbased""" +691 99 dataset """kinships""" +691 99 model """structuredembedding""" +691 99 loss """bceaftersigmoid""" +691 99 regularizer """no""" +691 99 optimizer """adadelta""" +691 99 training_loop """lcwa""" +691 99 evaluator """rankbased""" +691 100 dataset """kinships""" +691 100 model """structuredembedding""" +691 100 loss """bceaftersigmoid""" +691 100 regularizer """no""" +691 100 optimizer """adadelta""" +691 100 training_loop """lcwa""" +691 100 evaluator """rankbased""" +692 1 model.embedding_dim 2.0 +692 1 model.scoring_fct_norm 2.0 +692 1 training.batch_size 0.0 +692 1 training.label_smoothing 0.0015565711013845766 +692 2 model.embedding_dim 1.0 +692 2 model.scoring_fct_norm 2.0 +692 2 training.batch_size 1.0 +692 2 training.label_smoothing 0.002601656060454174 +692 3 model.embedding_dim 2.0 +692 3 model.scoring_fct_norm 2.0 +692 3 training.batch_size 1.0 +692 3 training.label_smoothing 0.002128060751899752 +692 4 model.embedding_dim 2.0 +692 4 model.scoring_fct_norm 1.0 +692 4 training.batch_size 1.0 +692 4 training.label_smoothing 0.0017532929443805293 +692 5 model.embedding_dim 2.0 +692 5 model.scoring_fct_norm 2.0 +692 5 training.batch_size 1.0 +692 5 training.label_smoothing 0.001057612992255758 +692 6 model.embedding_dim 0.0 +692 6 model.scoring_fct_norm 2.0 +692 6 training.batch_size 2.0 +692 6 training.label_smoothing 0.8953475414718647 +692 7 model.embedding_dim 0.0 +692 7 model.scoring_fct_norm 1.0 +692 7 training.batch_size 2.0 +692 7 training.label_smoothing 0.016647800893865895 +692 8 model.embedding_dim 0.0 +692 8 model.scoring_fct_norm 2.0 +692 8 training.batch_size 2.0 +692 8 training.label_smoothing 0.0023800924144058468 +692 9 model.embedding_dim 1.0 +692 9 model.scoring_fct_norm 2.0 +692 9 training.batch_size 0.0 +692 9 training.label_smoothing 0.006375246828637223 +692 10 model.embedding_dim 2.0 +692 10 model.scoring_fct_norm 2.0 +692 10 training.batch_size 2.0 +692 10 training.label_smoothing 0.14626956680839198 +692 11 model.embedding_dim 1.0 +692 11 model.scoring_fct_norm 2.0 +692 11 training.batch_size 2.0 +692 11 training.label_smoothing 0.8648534205417967 +692 12 model.embedding_dim 0.0 +692 12 model.scoring_fct_norm 1.0 +692 12 training.batch_size 2.0 +692 12 training.label_smoothing 0.0036162826229189995 +692 13 model.embedding_dim 0.0 +692 13 model.scoring_fct_norm 2.0 +692 13 training.batch_size 2.0 +692 13 training.label_smoothing 0.3420321617613639 +692 14 model.embedding_dim 0.0 +692 14 model.scoring_fct_norm 1.0 +692 14 training.batch_size 0.0 +692 14 training.label_smoothing 0.008783790895661437 +692 15 model.embedding_dim 2.0 +692 15 model.scoring_fct_norm 2.0 +692 15 training.batch_size 2.0 +692 15 training.label_smoothing 0.11088416110809449 +692 16 model.embedding_dim 2.0 +692 16 model.scoring_fct_norm 2.0 +692 16 training.batch_size 0.0 +692 16 training.label_smoothing 0.004503255326642494 +692 17 model.embedding_dim 1.0 +692 17 model.scoring_fct_norm 1.0 +692 17 training.batch_size 2.0 +692 17 training.label_smoothing 0.8482882625420911 +692 18 model.embedding_dim 2.0 +692 18 model.scoring_fct_norm 1.0 +692 18 training.batch_size 2.0 +692 18 training.label_smoothing 0.8481414972216048 +692 19 model.embedding_dim 1.0 +692 19 model.scoring_fct_norm 1.0 +692 19 training.batch_size 2.0 +692 19 training.label_smoothing 0.12230677626901361 +692 20 model.embedding_dim 2.0 +692 20 model.scoring_fct_norm 2.0 +692 20 training.batch_size 2.0 +692 20 training.label_smoothing 0.02676554227646362 +692 21 model.embedding_dim 2.0 +692 21 model.scoring_fct_norm 2.0 +692 21 training.batch_size 2.0 +692 21 training.label_smoothing 0.06464526518638415 +692 22 model.embedding_dim 0.0 +692 22 model.scoring_fct_norm 2.0 +692 22 training.batch_size 2.0 +692 22 training.label_smoothing 0.5210794012450937 +692 23 model.embedding_dim 2.0 +692 23 model.scoring_fct_norm 2.0 +692 23 training.batch_size 1.0 +692 23 training.label_smoothing 0.0018233804755381617 +692 24 model.embedding_dim 1.0 +692 24 model.scoring_fct_norm 2.0 +692 24 training.batch_size 0.0 +692 24 training.label_smoothing 0.026565308033571253 +692 25 model.embedding_dim 2.0 +692 25 model.scoring_fct_norm 2.0 +692 25 training.batch_size 2.0 +692 25 training.label_smoothing 0.18126619342961753 +692 26 model.embedding_dim 1.0 +692 26 model.scoring_fct_norm 1.0 +692 26 training.batch_size 2.0 +692 26 training.label_smoothing 0.012166185809929501 +692 27 model.embedding_dim 1.0 +692 27 model.scoring_fct_norm 2.0 +692 27 training.batch_size 2.0 +692 27 training.label_smoothing 0.0014743551768285025 +692 28 model.embedding_dim 2.0 +692 28 model.scoring_fct_norm 1.0 +692 28 training.batch_size 2.0 +692 28 training.label_smoothing 0.7945384942359532 +692 29 model.embedding_dim 0.0 +692 29 model.scoring_fct_norm 1.0 +692 29 training.batch_size 2.0 +692 29 training.label_smoothing 0.30227356673611916 +692 30 model.embedding_dim 1.0 +692 30 model.scoring_fct_norm 2.0 +692 30 training.batch_size 2.0 +692 30 training.label_smoothing 0.004100263125482887 +692 31 model.embedding_dim 2.0 +692 31 model.scoring_fct_norm 1.0 +692 31 training.batch_size 1.0 +692 31 training.label_smoothing 0.4727929818994542 +692 32 model.embedding_dim 2.0 +692 32 model.scoring_fct_norm 1.0 +692 32 training.batch_size 2.0 +692 32 training.label_smoothing 0.1630457598294581 +692 33 model.embedding_dim 1.0 +692 33 model.scoring_fct_norm 2.0 +692 33 training.batch_size 0.0 +692 33 training.label_smoothing 0.007514720262895641 +692 34 model.embedding_dim 2.0 +692 34 model.scoring_fct_norm 1.0 +692 34 training.batch_size 0.0 +692 34 training.label_smoothing 0.03991689062484466 +692 35 model.embedding_dim 0.0 +692 35 model.scoring_fct_norm 1.0 +692 35 training.batch_size 1.0 +692 35 training.label_smoothing 0.2222467365018164 +692 36 model.embedding_dim 0.0 +692 36 model.scoring_fct_norm 1.0 +692 36 training.batch_size 0.0 +692 36 training.label_smoothing 0.004116964953190114 +692 37 model.embedding_dim 0.0 +692 37 model.scoring_fct_norm 2.0 +692 37 training.batch_size 0.0 +692 37 training.label_smoothing 0.00776095348259608 +692 38 model.embedding_dim 1.0 +692 38 model.scoring_fct_norm 2.0 +692 38 training.batch_size 1.0 +692 38 training.label_smoothing 0.004300352331970539 +692 39 model.embedding_dim 1.0 +692 39 model.scoring_fct_norm 2.0 +692 39 training.batch_size 0.0 +692 39 training.label_smoothing 0.00628251784971761 +692 40 model.embedding_dim 0.0 +692 40 model.scoring_fct_norm 1.0 +692 40 training.batch_size 0.0 +692 40 training.label_smoothing 0.001909890810881697 +692 41 model.embedding_dim 1.0 +692 41 model.scoring_fct_norm 2.0 +692 41 training.batch_size 0.0 +692 41 training.label_smoothing 0.37983860512250317 +692 42 model.embedding_dim 1.0 +692 42 model.scoring_fct_norm 1.0 +692 42 training.batch_size 2.0 +692 42 training.label_smoothing 0.025996666344760708 +692 43 model.embedding_dim 1.0 +692 43 model.scoring_fct_norm 1.0 +692 43 training.batch_size 2.0 +692 43 training.label_smoothing 0.0029603362790688163 +692 44 model.embedding_dim 2.0 +692 44 model.scoring_fct_norm 1.0 +692 44 training.batch_size 2.0 +692 44 training.label_smoothing 0.5107639512764146 +692 45 model.embedding_dim 2.0 +692 45 model.scoring_fct_norm 1.0 +692 45 training.batch_size 1.0 +692 45 training.label_smoothing 0.017427340661740284 +692 46 model.embedding_dim 1.0 +692 46 model.scoring_fct_norm 2.0 +692 46 training.batch_size 2.0 +692 46 training.label_smoothing 0.004759896247515439 +692 47 model.embedding_dim 2.0 +692 47 model.scoring_fct_norm 1.0 +692 47 training.batch_size 2.0 +692 47 training.label_smoothing 0.018358164450448286 +692 48 model.embedding_dim 2.0 +692 48 model.scoring_fct_norm 1.0 +692 48 training.batch_size 2.0 +692 48 training.label_smoothing 0.006404999170250452 +692 49 model.embedding_dim 1.0 +692 49 model.scoring_fct_norm 2.0 +692 49 training.batch_size 1.0 +692 49 training.label_smoothing 0.1030502537376682 +692 50 model.embedding_dim 0.0 +692 50 model.scoring_fct_norm 2.0 +692 50 training.batch_size 1.0 +692 50 training.label_smoothing 0.039225301012768596 +692 51 model.embedding_dim 0.0 +692 51 model.scoring_fct_norm 2.0 +692 51 training.batch_size 1.0 +692 51 training.label_smoothing 0.0012211897479632167 +692 52 model.embedding_dim 0.0 +692 52 model.scoring_fct_norm 2.0 +692 52 training.batch_size 0.0 +692 52 training.label_smoothing 0.02875841828911375 +692 53 model.embedding_dim 0.0 +692 53 model.scoring_fct_norm 1.0 +692 53 training.batch_size 2.0 +692 53 training.label_smoothing 0.0966480464577468 +692 54 model.embedding_dim 0.0 +692 54 model.scoring_fct_norm 2.0 +692 54 training.batch_size 1.0 +692 54 training.label_smoothing 0.006767091726424441 +692 55 model.embedding_dim 1.0 +692 55 model.scoring_fct_norm 2.0 +692 55 training.batch_size 1.0 +692 55 training.label_smoothing 0.057861184395133554 +692 56 model.embedding_dim 0.0 +692 56 model.scoring_fct_norm 1.0 +692 56 training.batch_size 1.0 +692 56 training.label_smoothing 0.8859195855954453 +692 57 model.embedding_dim 2.0 +692 57 model.scoring_fct_norm 1.0 +692 57 training.batch_size 2.0 +692 57 training.label_smoothing 0.023665554544839675 +692 58 model.embedding_dim 2.0 +692 58 model.scoring_fct_norm 2.0 +692 58 training.batch_size 0.0 +692 58 training.label_smoothing 0.016064505756550326 +692 59 model.embedding_dim 0.0 +692 59 model.scoring_fct_norm 2.0 +692 59 training.batch_size 2.0 +692 59 training.label_smoothing 0.003185892316496425 +692 60 model.embedding_dim 2.0 +692 60 model.scoring_fct_norm 2.0 +692 60 training.batch_size 1.0 +692 60 training.label_smoothing 0.5478431726766527 +692 61 model.embedding_dim 1.0 +692 61 model.scoring_fct_norm 1.0 +692 61 training.batch_size 1.0 +692 61 training.label_smoothing 0.024228435969381856 +692 62 model.embedding_dim 2.0 +692 62 model.scoring_fct_norm 2.0 +692 62 training.batch_size 0.0 +692 62 training.label_smoothing 0.01873735496775138 +692 63 model.embedding_dim 2.0 +692 63 model.scoring_fct_norm 2.0 +692 63 training.batch_size 1.0 +692 63 training.label_smoothing 0.31288545290343833 +692 64 model.embedding_dim 2.0 +692 64 model.scoring_fct_norm 1.0 +692 64 training.batch_size 1.0 +692 64 training.label_smoothing 0.6736057870713272 +692 65 model.embedding_dim 1.0 +692 65 model.scoring_fct_norm 1.0 +692 65 training.batch_size 2.0 +692 65 training.label_smoothing 0.032117033110770236 +692 66 model.embedding_dim 0.0 +692 66 model.scoring_fct_norm 2.0 +692 66 training.batch_size 1.0 +692 66 training.label_smoothing 0.04826687774852966 +692 67 model.embedding_dim 1.0 +692 67 model.scoring_fct_norm 2.0 +692 67 training.batch_size 2.0 +692 67 training.label_smoothing 0.0021800769631790365 +692 68 model.embedding_dim 1.0 +692 68 model.scoring_fct_norm 2.0 +692 68 training.batch_size 0.0 +692 68 training.label_smoothing 0.002999261527906618 +692 69 model.embedding_dim 0.0 +692 69 model.scoring_fct_norm 1.0 +692 69 training.batch_size 2.0 +692 69 training.label_smoothing 0.23335442302076678 +692 70 model.embedding_dim 1.0 +692 70 model.scoring_fct_norm 2.0 +692 70 training.batch_size 2.0 +692 70 training.label_smoothing 0.6970576254177631 +692 71 model.embedding_dim 1.0 +692 71 model.scoring_fct_norm 1.0 +692 71 training.batch_size 2.0 +692 71 training.label_smoothing 0.008194945838841158 +692 72 model.embedding_dim 1.0 +692 72 model.scoring_fct_norm 1.0 +692 72 training.batch_size 0.0 +692 72 training.label_smoothing 0.003195729451427545 +692 73 model.embedding_dim 2.0 +692 73 model.scoring_fct_norm 1.0 +692 73 training.batch_size 1.0 +692 73 training.label_smoothing 0.10895260028698214 +692 74 model.embedding_dim 0.0 +692 74 model.scoring_fct_norm 1.0 +692 74 training.batch_size 2.0 +692 74 training.label_smoothing 0.0010604120511631123 +692 75 model.embedding_dim 0.0 +692 75 model.scoring_fct_norm 2.0 +692 75 training.batch_size 0.0 +692 75 training.label_smoothing 0.8690070455289589 +692 76 model.embedding_dim 0.0 +692 76 model.scoring_fct_norm 1.0 +692 76 training.batch_size 0.0 +692 76 training.label_smoothing 0.00444997744066266 +692 77 model.embedding_dim 2.0 +692 77 model.scoring_fct_norm 1.0 +692 77 training.batch_size 0.0 +692 77 training.label_smoothing 0.0073883049128886545 +692 78 model.embedding_dim 0.0 +692 78 model.scoring_fct_norm 2.0 +692 78 training.batch_size 2.0 +692 78 training.label_smoothing 0.002171197749926988 +692 79 model.embedding_dim 1.0 +692 79 model.scoring_fct_norm 2.0 +692 79 training.batch_size 1.0 +692 79 training.label_smoothing 0.028051192786431842 +692 80 model.embedding_dim 0.0 +692 80 model.scoring_fct_norm 1.0 +692 80 training.batch_size 1.0 +692 80 training.label_smoothing 0.01946755641941571 +692 81 model.embedding_dim 1.0 +692 81 model.scoring_fct_norm 2.0 +692 81 training.batch_size 2.0 +692 81 training.label_smoothing 0.007457219299132412 +692 82 model.embedding_dim 2.0 +692 82 model.scoring_fct_norm 1.0 +692 82 training.batch_size 2.0 +692 82 training.label_smoothing 0.2191161159464331 +692 83 model.embedding_dim 1.0 +692 83 model.scoring_fct_norm 2.0 +692 83 training.batch_size 1.0 +692 83 training.label_smoothing 0.0022628627774314886 +692 84 model.embedding_dim 1.0 +692 84 model.scoring_fct_norm 2.0 +692 84 training.batch_size 1.0 +692 84 training.label_smoothing 0.11172112004934369 +692 85 model.embedding_dim 2.0 +692 85 model.scoring_fct_norm 2.0 +692 85 training.batch_size 1.0 +692 85 training.label_smoothing 0.04853424898409963 +692 86 model.embedding_dim 2.0 +692 86 model.scoring_fct_norm 1.0 +692 86 training.batch_size 2.0 +692 86 training.label_smoothing 0.0286712582217885 +692 87 model.embedding_dim 0.0 +692 87 model.scoring_fct_norm 2.0 +692 87 training.batch_size 2.0 +692 87 training.label_smoothing 0.29605175944478856 +692 88 model.embedding_dim 2.0 +692 88 model.scoring_fct_norm 2.0 +692 88 training.batch_size 1.0 +692 88 training.label_smoothing 0.6838047167383292 +692 89 model.embedding_dim 1.0 +692 89 model.scoring_fct_norm 2.0 +692 89 training.batch_size 2.0 +692 89 training.label_smoothing 0.10624852301168494 +692 90 model.embedding_dim 0.0 +692 90 model.scoring_fct_norm 2.0 +692 90 training.batch_size 1.0 +692 90 training.label_smoothing 0.013898426502618936 +692 91 model.embedding_dim 2.0 +692 91 model.scoring_fct_norm 1.0 +692 91 training.batch_size 0.0 +692 91 training.label_smoothing 0.3055700196254333 +692 92 model.embedding_dim 1.0 +692 92 model.scoring_fct_norm 2.0 +692 92 training.batch_size 0.0 +692 92 training.label_smoothing 0.03458282263062883 +692 93 model.embedding_dim 0.0 +692 93 model.scoring_fct_norm 2.0 +692 93 training.batch_size 2.0 +692 93 training.label_smoothing 0.028545487527823432 +692 94 model.embedding_dim 1.0 +692 94 model.scoring_fct_norm 2.0 +692 94 training.batch_size 0.0 +692 94 training.label_smoothing 0.07266388138306235 +692 95 model.embedding_dim 1.0 +692 95 model.scoring_fct_norm 2.0 +692 95 training.batch_size 0.0 +692 95 training.label_smoothing 0.020665469817702654 +692 96 model.embedding_dim 0.0 +692 96 model.scoring_fct_norm 2.0 +692 96 training.batch_size 1.0 +692 96 training.label_smoothing 0.01887347260459456 +692 97 model.embedding_dim 2.0 +692 97 model.scoring_fct_norm 2.0 +692 97 training.batch_size 1.0 +692 97 training.label_smoothing 0.01865781136956332 +692 98 model.embedding_dim 0.0 +692 98 model.scoring_fct_norm 2.0 +692 98 training.batch_size 1.0 +692 98 training.label_smoothing 0.07164495369851939 +692 99 model.embedding_dim 2.0 +692 99 model.scoring_fct_norm 2.0 +692 99 training.batch_size 2.0 +692 99 training.label_smoothing 0.1901024436112395 +692 100 model.embedding_dim 2.0 +692 100 model.scoring_fct_norm 1.0 +692 100 training.batch_size 2.0 +692 100 training.label_smoothing 0.011404439262910768 +692 1 dataset """kinships""" +692 1 model """structuredembedding""" +692 1 loss """softplus""" +692 1 regularizer """no""" +692 1 optimizer """adadelta""" +692 1 training_loop """lcwa""" +692 1 evaluator """rankbased""" +692 2 dataset """kinships""" +692 2 model """structuredembedding""" +692 2 loss """softplus""" +692 2 regularizer """no""" +692 2 optimizer """adadelta""" +692 2 training_loop """lcwa""" +692 2 evaluator """rankbased""" +692 3 dataset """kinships""" +692 3 model """structuredembedding""" +692 3 loss """softplus""" +692 3 regularizer """no""" +692 3 optimizer """adadelta""" +692 3 training_loop """lcwa""" +692 3 evaluator """rankbased""" +692 4 dataset """kinships""" +692 4 model """structuredembedding""" +692 4 loss """softplus""" +692 4 regularizer """no""" +692 4 optimizer """adadelta""" +692 4 training_loop """lcwa""" +692 4 evaluator """rankbased""" +692 5 dataset """kinships""" +692 5 model """structuredembedding""" +692 5 loss """softplus""" +692 5 regularizer """no""" +692 5 optimizer """adadelta""" +692 5 training_loop """lcwa""" +692 5 evaluator """rankbased""" +692 6 dataset """kinships""" +692 6 model """structuredembedding""" +692 6 loss """softplus""" +692 6 regularizer """no""" +692 6 optimizer """adadelta""" +692 6 training_loop """lcwa""" +692 6 evaluator """rankbased""" +692 7 dataset """kinships""" +692 7 model """structuredembedding""" +692 7 loss """softplus""" +692 7 regularizer """no""" +692 7 optimizer """adadelta""" +692 7 training_loop """lcwa""" +692 7 evaluator """rankbased""" +692 8 dataset """kinships""" +692 8 model """structuredembedding""" +692 8 loss """softplus""" +692 8 regularizer """no""" +692 8 optimizer """adadelta""" +692 8 training_loop """lcwa""" +692 8 evaluator """rankbased""" +692 9 dataset """kinships""" +692 9 model """structuredembedding""" +692 9 loss """softplus""" +692 9 regularizer """no""" +692 9 optimizer """adadelta""" +692 9 training_loop """lcwa""" +692 9 evaluator """rankbased""" +692 10 dataset """kinships""" +692 10 model """structuredembedding""" +692 10 loss """softplus""" +692 10 regularizer """no""" +692 10 optimizer """adadelta""" +692 10 training_loop """lcwa""" +692 10 evaluator """rankbased""" +692 11 dataset """kinships""" +692 11 model """structuredembedding""" +692 11 loss """softplus""" +692 11 regularizer """no""" +692 11 optimizer """adadelta""" +692 11 training_loop """lcwa""" +692 11 evaluator """rankbased""" +692 12 dataset """kinships""" +692 12 model """structuredembedding""" +692 12 loss """softplus""" +692 12 regularizer """no""" +692 12 optimizer """adadelta""" +692 12 training_loop """lcwa""" +692 12 evaluator """rankbased""" +692 13 dataset """kinships""" +692 13 model """structuredembedding""" +692 13 loss """softplus""" +692 13 regularizer """no""" +692 13 optimizer """adadelta""" +692 13 training_loop """lcwa""" +692 13 evaluator """rankbased""" +692 14 dataset """kinships""" +692 14 model """structuredembedding""" +692 14 loss """softplus""" +692 14 regularizer """no""" +692 14 optimizer """adadelta""" +692 14 training_loop """lcwa""" +692 14 evaluator """rankbased""" +692 15 dataset """kinships""" +692 15 model """structuredembedding""" +692 15 loss """softplus""" +692 15 regularizer """no""" +692 15 optimizer """adadelta""" +692 15 training_loop """lcwa""" +692 15 evaluator """rankbased""" +692 16 dataset """kinships""" +692 16 model """structuredembedding""" +692 16 loss """softplus""" +692 16 regularizer """no""" +692 16 optimizer """adadelta""" +692 16 training_loop """lcwa""" +692 16 evaluator """rankbased""" +692 17 dataset """kinships""" +692 17 model """structuredembedding""" +692 17 loss """softplus""" +692 17 regularizer """no""" +692 17 optimizer """adadelta""" +692 17 training_loop """lcwa""" +692 17 evaluator """rankbased""" +692 18 dataset """kinships""" +692 18 model """structuredembedding""" +692 18 loss """softplus""" +692 18 regularizer """no""" +692 18 optimizer """adadelta""" +692 18 training_loop """lcwa""" +692 18 evaluator """rankbased""" +692 19 dataset """kinships""" +692 19 model """structuredembedding""" +692 19 loss """softplus""" +692 19 regularizer """no""" +692 19 optimizer """adadelta""" +692 19 training_loop """lcwa""" +692 19 evaluator """rankbased""" +692 20 dataset """kinships""" +692 20 model """structuredembedding""" +692 20 loss """softplus""" +692 20 regularizer """no""" +692 20 optimizer """adadelta""" +692 20 training_loop """lcwa""" +692 20 evaluator """rankbased""" +692 21 dataset """kinships""" +692 21 model """structuredembedding""" +692 21 loss """softplus""" +692 21 regularizer """no""" +692 21 optimizer """adadelta""" +692 21 training_loop """lcwa""" +692 21 evaluator """rankbased""" +692 22 dataset """kinships""" +692 22 model """structuredembedding""" +692 22 loss """softplus""" +692 22 regularizer """no""" +692 22 optimizer """adadelta""" +692 22 training_loop """lcwa""" +692 22 evaluator """rankbased""" +692 23 dataset """kinships""" +692 23 model """structuredembedding""" +692 23 loss """softplus""" +692 23 regularizer """no""" +692 23 optimizer """adadelta""" +692 23 training_loop """lcwa""" +692 23 evaluator """rankbased""" +692 24 dataset """kinships""" +692 24 model """structuredembedding""" +692 24 loss """softplus""" +692 24 regularizer """no""" +692 24 optimizer """adadelta""" +692 24 training_loop """lcwa""" +692 24 evaluator """rankbased""" +692 25 dataset """kinships""" +692 25 model """structuredembedding""" +692 25 loss """softplus""" +692 25 regularizer """no""" +692 25 optimizer """adadelta""" +692 25 training_loop """lcwa""" +692 25 evaluator """rankbased""" +692 26 dataset """kinships""" +692 26 model """structuredembedding""" +692 26 loss """softplus""" +692 26 regularizer """no""" +692 26 optimizer """adadelta""" +692 26 training_loop """lcwa""" +692 26 evaluator """rankbased""" +692 27 dataset """kinships""" +692 27 model """structuredembedding""" +692 27 loss """softplus""" +692 27 regularizer """no""" +692 27 optimizer """adadelta""" +692 27 training_loop """lcwa""" +692 27 evaluator """rankbased""" +692 28 dataset """kinships""" +692 28 model """structuredembedding""" +692 28 loss """softplus""" +692 28 regularizer """no""" +692 28 optimizer """adadelta""" +692 28 training_loop """lcwa""" +692 28 evaluator """rankbased""" +692 29 dataset """kinships""" +692 29 model """structuredembedding""" +692 29 loss """softplus""" +692 29 regularizer """no""" +692 29 optimizer """adadelta""" +692 29 training_loop """lcwa""" +692 29 evaluator """rankbased""" +692 30 dataset """kinships""" +692 30 model """structuredembedding""" +692 30 loss """softplus""" +692 30 regularizer """no""" +692 30 optimizer """adadelta""" +692 30 training_loop """lcwa""" +692 30 evaluator """rankbased""" +692 31 dataset """kinships""" +692 31 model """structuredembedding""" +692 31 loss """softplus""" +692 31 regularizer """no""" +692 31 optimizer """adadelta""" +692 31 training_loop """lcwa""" +692 31 evaluator """rankbased""" +692 32 dataset """kinships""" +692 32 model """structuredembedding""" +692 32 loss """softplus""" +692 32 regularizer """no""" +692 32 optimizer """adadelta""" +692 32 training_loop """lcwa""" +692 32 evaluator """rankbased""" +692 33 dataset """kinships""" +692 33 model """structuredembedding""" +692 33 loss """softplus""" +692 33 regularizer """no""" +692 33 optimizer """adadelta""" +692 33 training_loop """lcwa""" +692 33 evaluator """rankbased""" +692 34 dataset """kinships""" +692 34 model """structuredembedding""" +692 34 loss """softplus""" +692 34 regularizer """no""" +692 34 optimizer """adadelta""" +692 34 training_loop """lcwa""" +692 34 evaluator """rankbased""" +692 35 dataset """kinships""" +692 35 model """structuredembedding""" +692 35 loss """softplus""" +692 35 regularizer """no""" +692 35 optimizer """adadelta""" +692 35 training_loop """lcwa""" +692 35 evaluator """rankbased""" +692 36 dataset """kinships""" +692 36 model """structuredembedding""" +692 36 loss """softplus""" +692 36 regularizer """no""" +692 36 optimizer """adadelta""" +692 36 training_loop """lcwa""" +692 36 evaluator """rankbased""" +692 37 dataset """kinships""" +692 37 model """structuredembedding""" +692 37 loss """softplus""" +692 37 regularizer """no""" +692 37 optimizer """adadelta""" +692 37 training_loop """lcwa""" +692 37 evaluator """rankbased""" +692 38 dataset """kinships""" +692 38 model """structuredembedding""" +692 38 loss """softplus""" +692 38 regularizer """no""" +692 38 optimizer """adadelta""" +692 38 training_loop """lcwa""" +692 38 evaluator """rankbased""" +692 39 dataset """kinships""" +692 39 model """structuredembedding""" +692 39 loss """softplus""" +692 39 regularizer """no""" +692 39 optimizer """adadelta""" +692 39 training_loop """lcwa""" +692 39 evaluator """rankbased""" +692 40 dataset """kinships""" +692 40 model """structuredembedding""" +692 40 loss """softplus""" +692 40 regularizer """no""" +692 40 optimizer """adadelta""" +692 40 training_loop """lcwa""" +692 40 evaluator """rankbased""" +692 41 dataset """kinships""" +692 41 model """structuredembedding""" +692 41 loss """softplus""" +692 41 regularizer """no""" +692 41 optimizer """adadelta""" +692 41 training_loop """lcwa""" +692 41 evaluator """rankbased""" +692 42 dataset """kinships""" +692 42 model """structuredembedding""" +692 42 loss """softplus""" +692 42 regularizer """no""" +692 42 optimizer """adadelta""" +692 42 training_loop """lcwa""" +692 42 evaluator """rankbased""" +692 43 dataset """kinships""" +692 43 model """structuredembedding""" +692 43 loss """softplus""" +692 43 regularizer """no""" +692 43 optimizer """adadelta""" +692 43 training_loop """lcwa""" +692 43 evaluator """rankbased""" +692 44 dataset """kinships""" +692 44 model """structuredembedding""" +692 44 loss """softplus""" +692 44 regularizer """no""" +692 44 optimizer """adadelta""" +692 44 training_loop """lcwa""" +692 44 evaluator """rankbased""" +692 45 dataset """kinships""" +692 45 model """structuredembedding""" +692 45 loss """softplus""" +692 45 regularizer """no""" +692 45 optimizer """adadelta""" +692 45 training_loop """lcwa""" +692 45 evaluator """rankbased""" +692 46 dataset """kinships""" +692 46 model """structuredembedding""" +692 46 loss """softplus""" +692 46 regularizer """no""" +692 46 optimizer """adadelta""" +692 46 training_loop """lcwa""" +692 46 evaluator """rankbased""" +692 47 dataset """kinships""" +692 47 model """structuredembedding""" +692 47 loss """softplus""" +692 47 regularizer """no""" +692 47 optimizer """adadelta""" +692 47 training_loop """lcwa""" +692 47 evaluator """rankbased""" +692 48 dataset """kinships""" +692 48 model """structuredembedding""" +692 48 loss """softplus""" +692 48 regularizer """no""" +692 48 optimizer """adadelta""" +692 48 training_loop """lcwa""" +692 48 evaluator """rankbased""" +692 49 dataset """kinships""" +692 49 model """structuredembedding""" +692 49 loss """softplus""" +692 49 regularizer """no""" +692 49 optimizer """adadelta""" +692 49 training_loop """lcwa""" +692 49 evaluator """rankbased""" +692 50 dataset """kinships""" +692 50 model """structuredembedding""" +692 50 loss """softplus""" +692 50 regularizer """no""" +692 50 optimizer """adadelta""" +692 50 training_loop """lcwa""" +692 50 evaluator """rankbased""" +692 51 dataset """kinships""" +692 51 model """structuredembedding""" +692 51 loss """softplus""" +692 51 regularizer """no""" +692 51 optimizer """adadelta""" +692 51 training_loop """lcwa""" +692 51 evaluator """rankbased""" +692 52 dataset """kinships""" +692 52 model """structuredembedding""" +692 52 loss """softplus""" +692 52 regularizer """no""" +692 52 optimizer """adadelta""" +692 52 training_loop """lcwa""" +692 52 evaluator """rankbased""" +692 53 dataset """kinships""" +692 53 model """structuredembedding""" +692 53 loss """softplus""" +692 53 regularizer """no""" +692 53 optimizer """adadelta""" +692 53 training_loop """lcwa""" +692 53 evaluator """rankbased""" +692 54 dataset """kinships""" +692 54 model """structuredembedding""" +692 54 loss """softplus""" +692 54 regularizer """no""" +692 54 optimizer """adadelta""" +692 54 training_loop """lcwa""" +692 54 evaluator """rankbased""" +692 55 dataset """kinships""" +692 55 model """structuredembedding""" +692 55 loss """softplus""" +692 55 regularizer """no""" +692 55 optimizer """adadelta""" +692 55 training_loop """lcwa""" +692 55 evaluator """rankbased""" +692 56 dataset """kinships""" +692 56 model """structuredembedding""" +692 56 loss """softplus""" +692 56 regularizer """no""" +692 56 optimizer """adadelta""" +692 56 training_loop """lcwa""" +692 56 evaluator """rankbased""" +692 57 dataset """kinships""" +692 57 model """structuredembedding""" +692 57 loss """softplus""" +692 57 regularizer """no""" +692 57 optimizer """adadelta""" +692 57 training_loop """lcwa""" +692 57 evaluator """rankbased""" +692 58 dataset """kinships""" +692 58 model """structuredembedding""" +692 58 loss """softplus""" +692 58 regularizer """no""" +692 58 optimizer """adadelta""" +692 58 training_loop """lcwa""" +692 58 evaluator """rankbased""" +692 59 dataset """kinships""" +692 59 model """structuredembedding""" +692 59 loss """softplus""" +692 59 regularizer """no""" +692 59 optimizer """adadelta""" +692 59 training_loop """lcwa""" +692 59 evaluator """rankbased""" +692 60 dataset """kinships""" +692 60 model """structuredembedding""" +692 60 loss """softplus""" +692 60 regularizer """no""" +692 60 optimizer """adadelta""" +692 60 training_loop """lcwa""" +692 60 evaluator """rankbased""" +692 61 dataset """kinships""" +692 61 model """structuredembedding""" +692 61 loss """softplus""" +692 61 regularizer """no""" +692 61 optimizer """adadelta""" +692 61 training_loop """lcwa""" +692 61 evaluator """rankbased""" +692 62 dataset """kinships""" +692 62 model """structuredembedding""" +692 62 loss """softplus""" +692 62 regularizer """no""" +692 62 optimizer """adadelta""" +692 62 training_loop """lcwa""" +692 62 evaluator """rankbased""" +692 63 dataset """kinships""" +692 63 model """structuredembedding""" +692 63 loss """softplus""" +692 63 regularizer """no""" +692 63 optimizer """adadelta""" +692 63 training_loop """lcwa""" +692 63 evaluator """rankbased""" +692 64 dataset """kinships""" +692 64 model """structuredembedding""" +692 64 loss """softplus""" +692 64 regularizer """no""" +692 64 optimizer """adadelta""" +692 64 training_loop """lcwa""" +692 64 evaluator """rankbased""" +692 65 dataset """kinships""" +692 65 model """structuredembedding""" +692 65 loss """softplus""" +692 65 regularizer """no""" +692 65 optimizer """adadelta""" +692 65 training_loop """lcwa""" +692 65 evaluator """rankbased""" +692 66 dataset """kinships""" +692 66 model """structuredembedding""" +692 66 loss """softplus""" +692 66 regularizer """no""" +692 66 optimizer """adadelta""" +692 66 training_loop """lcwa""" +692 66 evaluator """rankbased""" +692 67 dataset """kinships""" +692 67 model """structuredembedding""" +692 67 loss """softplus""" +692 67 regularizer """no""" +692 67 optimizer """adadelta""" +692 67 training_loop """lcwa""" +692 67 evaluator """rankbased""" +692 68 dataset """kinships""" +692 68 model """structuredembedding""" +692 68 loss """softplus""" +692 68 regularizer """no""" +692 68 optimizer """adadelta""" +692 68 training_loop """lcwa""" +692 68 evaluator """rankbased""" +692 69 dataset """kinships""" +692 69 model """structuredembedding""" +692 69 loss """softplus""" +692 69 regularizer """no""" +692 69 optimizer """adadelta""" +692 69 training_loop """lcwa""" +692 69 evaluator """rankbased""" +692 70 dataset """kinships""" +692 70 model """structuredembedding""" +692 70 loss """softplus""" +692 70 regularizer """no""" +692 70 optimizer """adadelta""" +692 70 training_loop """lcwa""" +692 70 evaluator """rankbased""" +692 71 dataset """kinships""" +692 71 model """structuredembedding""" +692 71 loss """softplus""" +692 71 regularizer """no""" +692 71 optimizer """adadelta""" +692 71 training_loop """lcwa""" +692 71 evaluator """rankbased""" +692 72 dataset """kinships""" +692 72 model """structuredembedding""" +692 72 loss """softplus""" +692 72 regularizer """no""" +692 72 optimizer """adadelta""" +692 72 training_loop """lcwa""" +692 72 evaluator """rankbased""" +692 73 dataset """kinships""" +692 73 model """structuredembedding""" +692 73 loss """softplus""" +692 73 regularizer """no""" +692 73 optimizer """adadelta""" +692 73 training_loop """lcwa""" +692 73 evaluator """rankbased""" +692 74 dataset """kinships""" +692 74 model """structuredembedding""" +692 74 loss """softplus""" +692 74 regularizer """no""" +692 74 optimizer """adadelta""" +692 74 training_loop """lcwa""" +692 74 evaluator """rankbased""" +692 75 dataset """kinships""" +692 75 model """structuredembedding""" +692 75 loss """softplus""" +692 75 regularizer """no""" +692 75 optimizer """adadelta""" +692 75 training_loop """lcwa""" +692 75 evaluator """rankbased""" +692 76 dataset """kinships""" +692 76 model """structuredembedding""" +692 76 loss """softplus""" +692 76 regularizer """no""" +692 76 optimizer """adadelta""" +692 76 training_loop """lcwa""" +692 76 evaluator """rankbased""" +692 77 dataset """kinships""" +692 77 model """structuredembedding""" +692 77 loss """softplus""" +692 77 regularizer """no""" +692 77 optimizer """adadelta""" +692 77 training_loop """lcwa""" +692 77 evaluator """rankbased""" +692 78 dataset """kinships""" +692 78 model """structuredembedding""" +692 78 loss """softplus""" +692 78 regularizer """no""" +692 78 optimizer """adadelta""" +692 78 training_loop """lcwa""" +692 78 evaluator """rankbased""" +692 79 dataset """kinships""" +692 79 model """structuredembedding""" +692 79 loss """softplus""" +692 79 regularizer """no""" +692 79 optimizer """adadelta""" +692 79 training_loop """lcwa""" +692 79 evaluator """rankbased""" +692 80 dataset """kinships""" +692 80 model """structuredembedding""" +692 80 loss """softplus""" +692 80 regularizer """no""" +692 80 optimizer """adadelta""" +692 80 training_loop """lcwa""" +692 80 evaluator """rankbased""" +692 81 dataset """kinships""" +692 81 model """structuredembedding""" +692 81 loss """softplus""" +692 81 regularizer """no""" +692 81 optimizer """adadelta""" +692 81 training_loop """lcwa""" +692 81 evaluator """rankbased""" +692 82 dataset """kinships""" +692 82 model """structuredembedding""" +692 82 loss """softplus""" +692 82 regularizer """no""" +692 82 optimizer """adadelta""" +692 82 training_loop """lcwa""" +692 82 evaluator """rankbased""" +692 83 dataset """kinships""" +692 83 model """structuredembedding""" +692 83 loss """softplus""" +692 83 regularizer """no""" +692 83 optimizer """adadelta""" +692 83 training_loop """lcwa""" +692 83 evaluator """rankbased""" +692 84 dataset """kinships""" +692 84 model """structuredembedding""" +692 84 loss """softplus""" +692 84 regularizer """no""" +692 84 optimizer """adadelta""" +692 84 training_loop """lcwa""" +692 84 evaluator """rankbased""" +692 85 dataset """kinships""" +692 85 model """structuredembedding""" +692 85 loss """softplus""" +692 85 regularizer """no""" +692 85 optimizer """adadelta""" +692 85 training_loop """lcwa""" +692 85 evaluator """rankbased""" +692 86 dataset """kinships""" +692 86 model """structuredembedding""" +692 86 loss """softplus""" +692 86 regularizer """no""" +692 86 optimizer """adadelta""" +692 86 training_loop """lcwa""" +692 86 evaluator """rankbased""" +692 87 dataset """kinships""" +692 87 model """structuredembedding""" +692 87 loss """softplus""" +692 87 regularizer """no""" +692 87 optimizer """adadelta""" +692 87 training_loop """lcwa""" +692 87 evaluator """rankbased""" +692 88 dataset """kinships""" +692 88 model """structuredembedding""" +692 88 loss """softplus""" +692 88 regularizer """no""" +692 88 optimizer """adadelta""" +692 88 training_loop """lcwa""" +692 88 evaluator """rankbased""" +692 89 dataset """kinships""" +692 89 model """structuredembedding""" +692 89 loss """softplus""" +692 89 regularizer """no""" +692 89 optimizer """adadelta""" +692 89 training_loop """lcwa""" +692 89 evaluator """rankbased""" +692 90 dataset """kinships""" +692 90 model """structuredembedding""" +692 90 loss """softplus""" +692 90 regularizer """no""" +692 90 optimizer """adadelta""" +692 90 training_loop """lcwa""" +692 90 evaluator """rankbased""" +692 91 dataset """kinships""" +692 91 model """structuredembedding""" +692 91 loss """softplus""" +692 91 regularizer """no""" +692 91 optimizer """adadelta""" +692 91 training_loop """lcwa""" +692 91 evaluator """rankbased""" +692 92 dataset """kinships""" +692 92 model """structuredembedding""" +692 92 loss """softplus""" +692 92 regularizer """no""" +692 92 optimizer """adadelta""" +692 92 training_loop """lcwa""" +692 92 evaluator """rankbased""" +692 93 dataset """kinships""" +692 93 model """structuredembedding""" +692 93 loss """softplus""" +692 93 regularizer """no""" +692 93 optimizer """adadelta""" +692 93 training_loop """lcwa""" +692 93 evaluator """rankbased""" +692 94 dataset """kinships""" +692 94 model """structuredembedding""" +692 94 loss """softplus""" +692 94 regularizer """no""" +692 94 optimizer """adadelta""" +692 94 training_loop """lcwa""" +692 94 evaluator """rankbased""" +692 95 dataset """kinships""" +692 95 model """structuredembedding""" +692 95 loss """softplus""" +692 95 regularizer """no""" +692 95 optimizer """adadelta""" +692 95 training_loop """lcwa""" +692 95 evaluator """rankbased""" +692 96 dataset """kinships""" +692 96 model """structuredembedding""" +692 96 loss """softplus""" +692 96 regularizer """no""" +692 96 optimizer """adadelta""" +692 96 training_loop """lcwa""" +692 96 evaluator """rankbased""" +692 97 dataset """kinships""" +692 97 model """structuredembedding""" +692 97 loss """softplus""" +692 97 regularizer """no""" +692 97 optimizer """adadelta""" +692 97 training_loop """lcwa""" +692 97 evaluator """rankbased""" +692 98 dataset """kinships""" +692 98 model """structuredembedding""" +692 98 loss """softplus""" +692 98 regularizer """no""" +692 98 optimizer """adadelta""" +692 98 training_loop """lcwa""" +692 98 evaluator """rankbased""" +692 99 dataset """kinships""" +692 99 model """structuredembedding""" +692 99 loss """softplus""" +692 99 regularizer """no""" +692 99 optimizer """adadelta""" +692 99 training_loop """lcwa""" +692 99 evaluator """rankbased""" +692 100 dataset """kinships""" +692 100 model """structuredembedding""" +692 100 loss """softplus""" +692 100 regularizer """no""" +692 100 optimizer """adadelta""" +692 100 training_loop """lcwa""" +692 100 evaluator """rankbased""" +693 1 model.embedding_dim 2.0 +693 1 model.scoring_fct_norm 1.0 +693 1 training.batch_size 2.0 +693 1 training.label_smoothing 0.0056881104942883435 +693 2 model.embedding_dim 1.0 +693 2 model.scoring_fct_norm 2.0 +693 2 training.batch_size 1.0 +693 2 training.label_smoothing 0.9226376049529037 +693 3 model.embedding_dim 1.0 +693 3 model.scoring_fct_norm 1.0 +693 3 training.batch_size 1.0 +693 3 training.label_smoothing 0.5063682721034855 +693 4 model.embedding_dim 0.0 +693 4 model.scoring_fct_norm 1.0 +693 4 training.batch_size 1.0 +693 4 training.label_smoothing 0.47884727754671347 +693 5 model.embedding_dim 2.0 +693 5 model.scoring_fct_norm 2.0 +693 5 training.batch_size 1.0 +693 5 training.label_smoothing 0.0022029097671061168 +693 6 model.embedding_dim 1.0 +693 6 model.scoring_fct_norm 2.0 +693 6 training.batch_size 2.0 +693 6 training.label_smoothing 0.0033114071772839637 +693 7 model.embedding_dim 0.0 +693 7 model.scoring_fct_norm 1.0 +693 7 training.batch_size 2.0 +693 7 training.label_smoothing 0.008511885666391406 +693 8 model.embedding_dim 2.0 +693 8 model.scoring_fct_norm 1.0 +693 8 training.batch_size 2.0 +693 8 training.label_smoothing 0.0026966505413973077 +693 9 model.embedding_dim 1.0 +693 9 model.scoring_fct_norm 2.0 +693 9 training.batch_size 2.0 +693 9 training.label_smoothing 0.0032743416512308727 +693 10 model.embedding_dim 2.0 +693 10 model.scoring_fct_norm 2.0 +693 10 training.batch_size 0.0 +693 10 training.label_smoothing 0.01872419371967 +693 11 model.embedding_dim 2.0 +693 11 model.scoring_fct_norm 2.0 +693 11 training.batch_size 0.0 +693 11 training.label_smoothing 0.29124374068618486 +693 12 model.embedding_dim 2.0 +693 12 model.scoring_fct_norm 1.0 +693 12 training.batch_size 1.0 +693 12 training.label_smoothing 0.003748839901422175 +693 13 model.embedding_dim 0.0 +693 13 model.scoring_fct_norm 2.0 +693 13 training.batch_size 2.0 +693 13 training.label_smoothing 0.2334536566267839 +693 14 model.embedding_dim 0.0 +693 14 model.scoring_fct_norm 1.0 +693 14 training.batch_size 0.0 +693 14 training.label_smoothing 0.004037444978192953 +693 15 model.embedding_dim 0.0 +693 15 model.scoring_fct_norm 2.0 +693 15 training.batch_size 1.0 +693 15 training.label_smoothing 0.06041236198807144 +693 16 model.embedding_dim 2.0 +693 16 model.scoring_fct_norm 1.0 +693 16 training.batch_size 0.0 +693 16 training.label_smoothing 0.020953578017050844 +693 17 model.embedding_dim 1.0 +693 17 model.scoring_fct_norm 2.0 +693 17 training.batch_size 2.0 +693 17 training.label_smoothing 0.3448092977545151 +693 18 model.embedding_dim 1.0 +693 18 model.scoring_fct_norm 2.0 +693 18 training.batch_size 1.0 +693 18 training.label_smoothing 0.010106563239008517 +693 19 model.embedding_dim 1.0 +693 19 model.scoring_fct_norm 2.0 +693 19 training.batch_size 1.0 +693 19 training.label_smoothing 0.9930771925771583 +693 20 model.embedding_dim 1.0 +693 20 model.scoring_fct_norm 2.0 +693 20 training.batch_size 0.0 +693 20 training.label_smoothing 0.13287661472766224 +693 21 model.embedding_dim 0.0 +693 21 model.scoring_fct_norm 2.0 +693 21 training.batch_size 0.0 +693 21 training.label_smoothing 0.37483958731478195 +693 22 model.embedding_dim 1.0 +693 22 model.scoring_fct_norm 2.0 +693 22 training.batch_size 1.0 +693 22 training.label_smoothing 0.00706483304948514 +693 23 model.embedding_dim 2.0 +693 23 model.scoring_fct_norm 2.0 +693 23 training.batch_size 1.0 +693 23 training.label_smoothing 0.00212024125896839 +693 24 model.embedding_dim 0.0 +693 24 model.scoring_fct_norm 1.0 +693 24 training.batch_size 2.0 +693 24 training.label_smoothing 0.6408714797508611 +693 25 model.embedding_dim 1.0 +693 25 model.scoring_fct_norm 1.0 +693 25 training.batch_size 2.0 +693 25 training.label_smoothing 0.0015060495935812605 +693 26 model.embedding_dim 2.0 +693 26 model.scoring_fct_norm 2.0 +693 26 training.batch_size 0.0 +693 26 training.label_smoothing 0.004640856737297724 +693 27 model.embedding_dim 2.0 +693 27 model.scoring_fct_norm 2.0 +693 27 training.batch_size 1.0 +693 27 training.label_smoothing 0.3243493288987587 +693 28 model.embedding_dim 2.0 +693 28 model.scoring_fct_norm 1.0 +693 28 training.batch_size 2.0 +693 28 training.label_smoothing 0.004354570921552069 +693 29 model.embedding_dim 2.0 +693 29 model.scoring_fct_norm 2.0 +693 29 training.batch_size 2.0 +693 29 training.label_smoothing 0.20717172133213915 +693 30 model.embedding_dim 0.0 +693 30 model.scoring_fct_norm 2.0 +693 30 training.batch_size 1.0 +693 30 training.label_smoothing 0.01833287276355034 +693 31 model.embedding_dim 2.0 +693 31 model.scoring_fct_norm 1.0 +693 31 training.batch_size 1.0 +693 31 training.label_smoothing 0.1588800567304295 +693 32 model.embedding_dim 2.0 +693 32 model.scoring_fct_norm 2.0 +693 32 training.batch_size 0.0 +693 32 training.label_smoothing 0.08952706275667284 +693 33 model.embedding_dim 0.0 +693 33 model.scoring_fct_norm 1.0 +693 33 training.batch_size 1.0 +693 33 training.label_smoothing 0.014682576548969392 +693 34 model.embedding_dim 0.0 +693 34 model.scoring_fct_norm 1.0 +693 34 training.batch_size 0.0 +693 34 training.label_smoothing 0.005312281834964506 +693 35 model.embedding_dim 0.0 +693 35 model.scoring_fct_norm 2.0 +693 35 training.batch_size 1.0 +693 35 training.label_smoothing 0.017030622424605773 +693 36 model.embedding_dim 1.0 +693 36 model.scoring_fct_norm 1.0 +693 36 training.batch_size 1.0 +693 36 training.label_smoothing 0.10121763777654481 +693 37 model.embedding_dim 1.0 +693 37 model.scoring_fct_norm 1.0 +693 37 training.batch_size 1.0 +693 37 training.label_smoothing 0.004316687437284846 +693 38 model.embedding_dim 1.0 +693 38 model.scoring_fct_norm 2.0 +693 38 training.batch_size 1.0 +693 38 training.label_smoothing 0.001351737807705395 +693 39 model.embedding_dim 2.0 +693 39 model.scoring_fct_norm 1.0 +693 39 training.batch_size 1.0 +693 39 training.label_smoothing 0.12481761021270851 +693 40 model.embedding_dim 0.0 +693 40 model.scoring_fct_norm 2.0 +693 40 training.batch_size 0.0 +693 40 training.label_smoothing 0.10927807052615474 +693 41 model.embedding_dim 0.0 +693 41 model.scoring_fct_norm 1.0 +693 41 training.batch_size 2.0 +693 41 training.label_smoothing 0.02035788924606954 +693 42 model.embedding_dim 1.0 +693 42 model.scoring_fct_norm 1.0 +693 42 training.batch_size 1.0 +693 42 training.label_smoothing 0.0012527745633824127 +693 43 model.embedding_dim 2.0 +693 43 model.scoring_fct_norm 1.0 +693 43 training.batch_size 0.0 +693 43 training.label_smoothing 0.03238465910504034 +693 44 model.embedding_dim 1.0 +693 44 model.scoring_fct_norm 1.0 +693 44 training.batch_size 0.0 +693 44 training.label_smoothing 0.009491583731588623 +693 45 model.embedding_dim 1.0 +693 45 model.scoring_fct_norm 1.0 +693 45 training.batch_size 1.0 +693 45 training.label_smoothing 0.14291682428645197 +693 46 model.embedding_dim 1.0 +693 46 model.scoring_fct_norm 1.0 +693 46 training.batch_size 2.0 +693 46 training.label_smoothing 0.04323610414784456 +693 47 model.embedding_dim 1.0 +693 47 model.scoring_fct_norm 1.0 +693 47 training.batch_size 2.0 +693 47 training.label_smoothing 0.006988657698664355 +693 48 model.embedding_dim 0.0 +693 48 model.scoring_fct_norm 2.0 +693 48 training.batch_size 1.0 +693 48 training.label_smoothing 0.0027208921309768543 +693 49 model.embedding_dim 2.0 +693 49 model.scoring_fct_norm 2.0 +693 49 training.batch_size 1.0 +693 49 training.label_smoothing 0.08370011571741583 +693 50 model.embedding_dim 2.0 +693 50 model.scoring_fct_norm 2.0 +693 50 training.batch_size 1.0 +693 50 training.label_smoothing 0.005795695211366757 +693 51 model.embedding_dim 2.0 +693 51 model.scoring_fct_norm 1.0 +693 51 training.batch_size 0.0 +693 51 training.label_smoothing 0.0021622825870969372 +693 52 model.embedding_dim 1.0 +693 52 model.scoring_fct_norm 1.0 +693 52 training.batch_size 1.0 +693 52 training.label_smoothing 0.1509311921802671 +693 53 model.embedding_dim 0.0 +693 53 model.scoring_fct_norm 1.0 +693 53 training.batch_size 0.0 +693 53 training.label_smoothing 0.34936985870081444 +693 54 model.embedding_dim 2.0 +693 54 model.scoring_fct_norm 2.0 +693 54 training.batch_size 1.0 +693 54 training.label_smoothing 0.2872889788402757 +693 55 model.embedding_dim 0.0 +693 55 model.scoring_fct_norm 1.0 +693 55 training.batch_size 1.0 +693 55 training.label_smoothing 0.005219598859891763 +693 56 model.embedding_dim 2.0 +693 56 model.scoring_fct_norm 2.0 +693 56 training.batch_size 1.0 +693 56 training.label_smoothing 0.002678459192950039 +693 57 model.embedding_dim 1.0 +693 57 model.scoring_fct_norm 2.0 +693 57 training.batch_size 2.0 +693 57 training.label_smoothing 0.0193317983568827 +693 58 model.embedding_dim 0.0 +693 58 model.scoring_fct_norm 1.0 +693 58 training.batch_size 0.0 +693 58 training.label_smoothing 0.00313136789815872 +693 59 model.embedding_dim 2.0 +693 59 model.scoring_fct_norm 1.0 +693 59 training.batch_size 2.0 +693 59 training.label_smoothing 0.1296536734535869 +693 60 model.embedding_dim 0.0 +693 60 model.scoring_fct_norm 2.0 +693 60 training.batch_size 2.0 +693 60 training.label_smoothing 0.0050901344498111255 +693 61 model.embedding_dim 2.0 +693 61 model.scoring_fct_norm 2.0 +693 61 training.batch_size 1.0 +693 61 training.label_smoothing 0.010603136395832281 +693 62 model.embedding_dim 1.0 +693 62 model.scoring_fct_norm 1.0 +693 62 training.batch_size 1.0 +693 62 training.label_smoothing 0.0019882752217794354 +693 63 model.embedding_dim 0.0 +693 63 model.scoring_fct_norm 2.0 +693 63 training.batch_size 1.0 +693 63 training.label_smoothing 0.00928666757421964 +693 64 model.embedding_dim 0.0 +693 64 model.scoring_fct_norm 2.0 +693 64 training.batch_size 0.0 +693 64 training.label_smoothing 0.0024923050160019907 +693 65 model.embedding_dim 2.0 +693 65 model.scoring_fct_norm 2.0 +693 65 training.batch_size 2.0 +693 65 training.label_smoothing 0.07332055954315604 +693 66 model.embedding_dim 0.0 +693 66 model.scoring_fct_norm 2.0 +693 66 training.batch_size 1.0 +693 66 training.label_smoothing 0.12415529872405734 +693 67 model.embedding_dim 1.0 +693 67 model.scoring_fct_norm 1.0 +693 67 training.batch_size 2.0 +693 67 training.label_smoothing 0.1853448691587631 +693 68 model.embedding_dim 1.0 +693 68 model.scoring_fct_norm 2.0 +693 68 training.batch_size 2.0 +693 68 training.label_smoothing 0.0015013927540405848 +693 69 model.embedding_dim 0.0 +693 69 model.scoring_fct_norm 2.0 +693 69 training.batch_size 0.0 +693 69 training.label_smoothing 0.17736416024511342 +693 70 model.embedding_dim 0.0 +693 70 model.scoring_fct_norm 2.0 +693 70 training.batch_size 1.0 +693 70 training.label_smoothing 0.0781772528346516 +693 71 model.embedding_dim 1.0 +693 71 model.scoring_fct_norm 2.0 +693 71 training.batch_size 1.0 +693 71 training.label_smoothing 0.05590670313575074 +693 72 model.embedding_dim 2.0 +693 72 model.scoring_fct_norm 2.0 +693 72 training.batch_size 2.0 +693 72 training.label_smoothing 0.01002586519405401 +693 73 model.embedding_dim 2.0 +693 73 model.scoring_fct_norm 1.0 +693 73 training.batch_size 1.0 +693 73 training.label_smoothing 0.7019712616953212 +693 74 model.embedding_dim 0.0 +693 74 model.scoring_fct_norm 2.0 +693 74 training.batch_size 0.0 +693 74 training.label_smoothing 0.6175664345637929 +693 75 model.embedding_dim 0.0 +693 75 model.scoring_fct_norm 1.0 +693 75 training.batch_size 1.0 +693 75 training.label_smoothing 0.001976458934220133 +693 76 model.embedding_dim 0.0 +693 76 model.scoring_fct_norm 1.0 +693 76 training.batch_size 1.0 +693 76 training.label_smoothing 0.12216999097947528 +693 77 model.embedding_dim 2.0 +693 77 model.scoring_fct_norm 2.0 +693 77 training.batch_size 0.0 +693 77 training.label_smoothing 0.001949144472737188 +693 78 model.embedding_dim 2.0 +693 78 model.scoring_fct_norm 1.0 +693 78 training.batch_size 0.0 +693 78 training.label_smoothing 0.03977252345489521 +693 79 model.embedding_dim 2.0 +693 79 model.scoring_fct_norm 2.0 +693 79 training.batch_size 1.0 +693 79 training.label_smoothing 0.6946408143081982 +693 80 model.embedding_dim 0.0 +693 80 model.scoring_fct_norm 1.0 +693 80 training.batch_size 1.0 +693 80 training.label_smoothing 0.011781609315200312 +693 81 model.embedding_dim 0.0 +693 81 model.scoring_fct_norm 2.0 +693 81 training.batch_size 2.0 +693 81 training.label_smoothing 0.001228626677154058 +693 82 model.embedding_dim 1.0 +693 82 model.scoring_fct_norm 2.0 +693 82 training.batch_size 2.0 +693 82 training.label_smoothing 0.16185741858553718 +693 83 model.embedding_dim 0.0 +693 83 model.scoring_fct_norm 2.0 +693 83 training.batch_size 1.0 +693 83 training.label_smoothing 0.03879949839400567 +693 84 model.embedding_dim 1.0 +693 84 model.scoring_fct_norm 2.0 +693 84 training.batch_size 2.0 +693 84 training.label_smoothing 0.002811754150357283 +693 85 model.embedding_dim 1.0 +693 85 model.scoring_fct_norm 1.0 +693 85 training.batch_size 0.0 +693 85 training.label_smoothing 0.0030703280820919875 +693 86 model.embedding_dim 1.0 +693 86 model.scoring_fct_norm 1.0 +693 86 training.batch_size 0.0 +693 86 training.label_smoothing 0.004028996143310033 +693 87 model.embedding_dim 0.0 +693 87 model.scoring_fct_norm 2.0 +693 87 training.batch_size 1.0 +693 87 training.label_smoothing 0.5230801178259539 +693 88 model.embedding_dim 2.0 +693 88 model.scoring_fct_norm 1.0 +693 88 training.batch_size 2.0 +693 88 training.label_smoothing 0.0023525361565299562 +693 89 model.embedding_dim 1.0 +693 89 model.scoring_fct_norm 1.0 +693 89 training.batch_size 2.0 +693 89 training.label_smoothing 0.001986882732004836 +693 90 model.embedding_dim 2.0 +693 90 model.scoring_fct_norm 1.0 +693 90 training.batch_size 1.0 +693 90 training.label_smoothing 0.005605299349344227 +693 91 model.embedding_dim 0.0 +693 91 model.scoring_fct_norm 1.0 +693 91 training.batch_size 2.0 +693 91 training.label_smoothing 0.6467669038667909 +693 92 model.embedding_dim 1.0 +693 92 model.scoring_fct_norm 1.0 +693 92 training.batch_size 0.0 +693 92 training.label_smoothing 0.6589805282549513 +693 93 model.embedding_dim 0.0 +693 93 model.scoring_fct_norm 2.0 +693 93 training.batch_size 2.0 +693 93 training.label_smoothing 0.050766917675677314 +693 94 model.embedding_dim 0.0 +693 94 model.scoring_fct_norm 2.0 +693 94 training.batch_size 2.0 +693 94 training.label_smoothing 0.05783654301896635 +693 95 model.embedding_dim 2.0 +693 95 model.scoring_fct_norm 2.0 +693 95 training.batch_size 1.0 +693 95 training.label_smoothing 0.02416740193803609 +693 96 model.embedding_dim 0.0 +693 96 model.scoring_fct_norm 1.0 +693 96 training.batch_size 1.0 +693 96 training.label_smoothing 0.040957854432586066 +693 97 model.embedding_dim 1.0 +693 97 model.scoring_fct_norm 2.0 +693 97 training.batch_size 0.0 +693 97 training.label_smoothing 0.7844012387206923 +693 98 model.embedding_dim 0.0 +693 98 model.scoring_fct_norm 1.0 +693 98 training.batch_size 0.0 +693 98 training.label_smoothing 0.0015467964485837936 +693 99 model.embedding_dim 0.0 +693 99 model.scoring_fct_norm 1.0 +693 99 training.batch_size 2.0 +693 99 training.label_smoothing 0.06401389075819254 +693 100 model.embedding_dim 1.0 +693 100 model.scoring_fct_norm 2.0 +693 100 training.batch_size 1.0 +693 100 training.label_smoothing 0.0025008113313041646 +693 1 dataset """kinships""" +693 1 model """structuredembedding""" +693 1 loss """crossentropy""" +693 1 regularizer """no""" +693 1 optimizer """adadelta""" +693 1 training_loop """lcwa""" +693 1 evaluator """rankbased""" +693 2 dataset """kinships""" +693 2 model """structuredembedding""" +693 2 loss """crossentropy""" +693 2 regularizer """no""" +693 2 optimizer """adadelta""" +693 2 training_loop """lcwa""" +693 2 evaluator """rankbased""" +693 3 dataset """kinships""" +693 3 model """structuredembedding""" +693 3 loss """crossentropy""" +693 3 regularizer """no""" +693 3 optimizer """adadelta""" +693 3 training_loop """lcwa""" +693 3 evaluator """rankbased""" +693 4 dataset """kinships""" +693 4 model """structuredembedding""" +693 4 loss """crossentropy""" +693 4 regularizer """no""" +693 4 optimizer """adadelta""" +693 4 training_loop """lcwa""" +693 4 evaluator """rankbased""" +693 5 dataset """kinships""" +693 5 model """structuredembedding""" +693 5 loss """crossentropy""" +693 5 regularizer """no""" +693 5 optimizer """adadelta""" +693 5 training_loop """lcwa""" +693 5 evaluator """rankbased""" +693 6 dataset """kinships""" +693 6 model """structuredembedding""" +693 6 loss """crossentropy""" +693 6 regularizer """no""" +693 6 optimizer """adadelta""" +693 6 training_loop """lcwa""" +693 6 evaluator """rankbased""" +693 7 dataset """kinships""" +693 7 model """structuredembedding""" +693 7 loss """crossentropy""" +693 7 regularizer """no""" +693 7 optimizer """adadelta""" +693 7 training_loop """lcwa""" +693 7 evaluator """rankbased""" +693 8 dataset """kinships""" +693 8 model """structuredembedding""" +693 8 loss """crossentropy""" +693 8 regularizer """no""" +693 8 optimizer """adadelta""" +693 8 training_loop """lcwa""" +693 8 evaluator """rankbased""" +693 9 dataset """kinships""" +693 9 model """structuredembedding""" +693 9 loss """crossentropy""" +693 9 regularizer """no""" +693 9 optimizer """adadelta""" +693 9 training_loop """lcwa""" +693 9 evaluator """rankbased""" +693 10 dataset """kinships""" +693 10 model """structuredembedding""" +693 10 loss """crossentropy""" +693 10 regularizer """no""" +693 10 optimizer """adadelta""" +693 10 training_loop """lcwa""" +693 10 evaluator """rankbased""" +693 11 dataset """kinships""" +693 11 model """structuredembedding""" +693 11 loss """crossentropy""" +693 11 regularizer """no""" +693 11 optimizer """adadelta""" +693 11 training_loop """lcwa""" +693 11 evaluator """rankbased""" +693 12 dataset """kinships""" +693 12 model """structuredembedding""" +693 12 loss """crossentropy""" +693 12 regularizer """no""" +693 12 optimizer """adadelta""" +693 12 training_loop """lcwa""" +693 12 evaluator """rankbased""" +693 13 dataset """kinships""" +693 13 model """structuredembedding""" +693 13 loss """crossentropy""" +693 13 regularizer """no""" +693 13 optimizer """adadelta""" +693 13 training_loop """lcwa""" +693 13 evaluator """rankbased""" +693 14 dataset """kinships""" +693 14 model """structuredembedding""" +693 14 loss """crossentropy""" +693 14 regularizer """no""" +693 14 optimizer """adadelta""" +693 14 training_loop """lcwa""" +693 14 evaluator """rankbased""" +693 15 dataset """kinships""" +693 15 model """structuredembedding""" +693 15 loss """crossentropy""" +693 15 regularizer """no""" +693 15 optimizer """adadelta""" +693 15 training_loop """lcwa""" +693 15 evaluator """rankbased""" +693 16 dataset """kinships""" +693 16 model """structuredembedding""" +693 16 loss """crossentropy""" +693 16 regularizer """no""" +693 16 optimizer """adadelta""" +693 16 training_loop """lcwa""" +693 16 evaluator """rankbased""" +693 17 dataset """kinships""" +693 17 model """structuredembedding""" +693 17 loss """crossentropy""" +693 17 regularizer """no""" +693 17 optimizer """adadelta""" +693 17 training_loop """lcwa""" +693 17 evaluator """rankbased""" +693 18 dataset """kinships""" +693 18 model """structuredembedding""" +693 18 loss """crossentropy""" +693 18 regularizer """no""" +693 18 optimizer """adadelta""" +693 18 training_loop """lcwa""" +693 18 evaluator """rankbased""" +693 19 dataset """kinships""" +693 19 model """structuredembedding""" +693 19 loss """crossentropy""" +693 19 regularizer """no""" +693 19 optimizer """adadelta""" +693 19 training_loop """lcwa""" +693 19 evaluator """rankbased""" +693 20 dataset """kinships""" +693 20 model """structuredembedding""" +693 20 loss """crossentropy""" +693 20 regularizer """no""" +693 20 optimizer """adadelta""" +693 20 training_loop """lcwa""" +693 20 evaluator """rankbased""" +693 21 dataset """kinships""" +693 21 model """structuredembedding""" +693 21 loss """crossentropy""" +693 21 regularizer """no""" +693 21 optimizer """adadelta""" +693 21 training_loop """lcwa""" +693 21 evaluator """rankbased""" +693 22 dataset """kinships""" +693 22 model """structuredembedding""" +693 22 loss """crossentropy""" +693 22 regularizer """no""" +693 22 optimizer """adadelta""" +693 22 training_loop """lcwa""" +693 22 evaluator """rankbased""" +693 23 dataset """kinships""" +693 23 model """structuredembedding""" +693 23 loss """crossentropy""" +693 23 regularizer """no""" +693 23 optimizer """adadelta""" +693 23 training_loop """lcwa""" +693 23 evaluator """rankbased""" +693 24 dataset """kinships""" +693 24 model """structuredembedding""" +693 24 loss """crossentropy""" +693 24 regularizer """no""" +693 24 optimizer """adadelta""" +693 24 training_loop """lcwa""" +693 24 evaluator """rankbased""" +693 25 dataset """kinships""" +693 25 model """structuredembedding""" +693 25 loss """crossentropy""" +693 25 regularizer """no""" +693 25 optimizer """adadelta""" +693 25 training_loop """lcwa""" +693 25 evaluator """rankbased""" +693 26 dataset """kinships""" +693 26 model """structuredembedding""" +693 26 loss """crossentropy""" +693 26 regularizer """no""" +693 26 optimizer """adadelta""" +693 26 training_loop """lcwa""" +693 26 evaluator """rankbased""" +693 27 dataset """kinships""" +693 27 model """structuredembedding""" +693 27 loss """crossentropy""" +693 27 regularizer """no""" +693 27 optimizer """adadelta""" +693 27 training_loop """lcwa""" +693 27 evaluator """rankbased""" +693 28 dataset """kinships""" +693 28 model """structuredembedding""" +693 28 loss """crossentropy""" +693 28 regularizer """no""" +693 28 optimizer """adadelta""" +693 28 training_loop """lcwa""" +693 28 evaluator """rankbased""" +693 29 dataset """kinships""" +693 29 model """structuredembedding""" +693 29 loss """crossentropy""" +693 29 regularizer """no""" +693 29 optimizer """adadelta""" +693 29 training_loop """lcwa""" +693 29 evaluator """rankbased""" +693 30 dataset """kinships""" +693 30 model """structuredembedding""" +693 30 loss """crossentropy""" +693 30 regularizer """no""" +693 30 optimizer """adadelta""" +693 30 training_loop """lcwa""" +693 30 evaluator """rankbased""" +693 31 dataset """kinships""" +693 31 model """structuredembedding""" +693 31 loss """crossentropy""" +693 31 regularizer """no""" +693 31 optimizer """adadelta""" +693 31 training_loop """lcwa""" +693 31 evaluator """rankbased""" +693 32 dataset """kinships""" +693 32 model """structuredembedding""" +693 32 loss """crossentropy""" +693 32 regularizer """no""" +693 32 optimizer """adadelta""" +693 32 training_loop """lcwa""" +693 32 evaluator """rankbased""" +693 33 dataset """kinships""" +693 33 model """structuredembedding""" +693 33 loss """crossentropy""" +693 33 regularizer """no""" +693 33 optimizer """adadelta""" +693 33 training_loop """lcwa""" +693 33 evaluator """rankbased""" +693 34 dataset """kinships""" +693 34 model """structuredembedding""" +693 34 loss """crossentropy""" +693 34 regularizer """no""" +693 34 optimizer """adadelta""" +693 34 training_loop """lcwa""" +693 34 evaluator """rankbased""" +693 35 dataset """kinships""" +693 35 model """structuredembedding""" +693 35 loss """crossentropy""" +693 35 regularizer """no""" +693 35 optimizer """adadelta""" +693 35 training_loop """lcwa""" +693 35 evaluator """rankbased""" +693 36 dataset """kinships""" +693 36 model """structuredembedding""" +693 36 loss """crossentropy""" +693 36 regularizer """no""" +693 36 optimizer """adadelta""" +693 36 training_loop """lcwa""" +693 36 evaluator """rankbased""" +693 37 dataset """kinships""" +693 37 model """structuredembedding""" +693 37 loss """crossentropy""" +693 37 regularizer """no""" +693 37 optimizer """adadelta""" +693 37 training_loop """lcwa""" +693 37 evaluator """rankbased""" +693 38 dataset """kinships""" +693 38 model """structuredembedding""" +693 38 loss """crossentropy""" +693 38 regularizer """no""" +693 38 optimizer """adadelta""" +693 38 training_loop """lcwa""" +693 38 evaluator """rankbased""" +693 39 dataset """kinships""" +693 39 model """structuredembedding""" +693 39 loss """crossentropy""" +693 39 regularizer """no""" +693 39 optimizer """adadelta""" +693 39 training_loop """lcwa""" +693 39 evaluator """rankbased""" +693 40 dataset """kinships""" +693 40 model """structuredembedding""" +693 40 loss """crossentropy""" +693 40 regularizer """no""" +693 40 optimizer """adadelta""" +693 40 training_loop """lcwa""" +693 40 evaluator """rankbased""" +693 41 dataset """kinships""" +693 41 model """structuredembedding""" +693 41 loss """crossentropy""" +693 41 regularizer """no""" +693 41 optimizer """adadelta""" +693 41 training_loop """lcwa""" +693 41 evaluator """rankbased""" +693 42 dataset """kinships""" +693 42 model """structuredembedding""" +693 42 loss """crossentropy""" +693 42 regularizer """no""" +693 42 optimizer """adadelta""" +693 42 training_loop """lcwa""" +693 42 evaluator """rankbased""" +693 43 dataset """kinships""" +693 43 model """structuredembedding""" +693 43 loss """crossentropy""" +693 43 regularizer """no""" +693 43 optimizer """adadelta""" +693 43 training_loop """lcwa""" +693 43 evaluator """rankbased""" +693 44 dataset """kinships""" +693 44 model """structuredembedding""" +693 44 loss """crossentropy""" +693 44 regularizer """no""" +693 44 optimizer """adadelta""" +693 44 training_loop """lcwa""" +693 44 evaluator """rankbased""" +693 45 dataset """kinships""" +693 45 model """structuredembedding""" +693 45 loss """crossentropy""" +693 45 regularizer """no""" +693 45 optimizer """adadelta""" +693 45 training_loop """lcwa""" +693 45 evaluator """rankbased""" +693 46 dataset """kinships""" +693 46 model """structuredembedding""" +693 46 loss """crossentropy""" +693 46 regularizer """no""" +693 46 optimizer """adadelta""" +693 46 training_loop """lcwa""" +693 46 evaluator """rankbased""" +693 47 dataset """kinships""" +693 47 model """structuredembedding""" +693 47 loss """crossentropy""" +693 47 regularizer """no""" +693 47 optimizer """adadelta""" +693 47 training_loop """lcwa""" +693 47 evaluator """rankbased""" +693 48 dataset """kinships""" +693 48 model """structuredembedding""" +693 48 loss """crossentropy""" +693 48 regularizer """no""" +693 48 optimizer """adadelta""" +693 48 training_loop """lcwa""" +693 48 evaluator """rankbased""" +693 49 dataset """kinships""" +693 49 model """structuredembedding""" +693 49 loss """crossentropy""" +693 49 regularizer """no""" +693 49 optimizer """adadelta""" +693 49 training_loop """lcwa""" +693 49 evaluator """rankbased""" +693 50 dataset """kinships""" +693 50 model """structuredembedding""" +693 50 loss """crossentropy""" +693 50 regularizer """no""" +693 50 optimizer """adadelta""" +693 50 training_loop """lcwa""" +693 50 evaluator """rankbased""" +693 51 dataset """kinships""" +693 51 model """structuredembedding""" +693 51 loss """crossentropy""" +693 51 regularizer """no""" +693 51 optimizer """adadelta""" +693 51 training_loop """lcwa""" +693 51 evaluator """rankbased""" +693 52 dataset """kinships""" +693 52 model """structuredembedding""" +693 52 loss """crossentropy""" +693 52 regularizer """no""" +693 52 optimizer """adadelta""" +693 52 training_loop """lcwa""" +693 52 evaluator """rankbased""" +693 53 dataset """kinships""" +693 53 model """structuredembedding""" +693 53 loss """crossentropy""" +693 53 regularizer """no""" +693 53 optimizer """adadelta""" +693 53 training_loop """lcwa""" +693 53 evaluator """rankbased""" +693 54 dataset """kinships""" +693 54 model """structuredembedding""" +693 54 loss """crossentropy""" +693 54 regularizer """no""" +693 54 optimizer """adadelta""" +693 54 training_loop """lcwa""" +693 54 evaluator """rankbased""" +693 55 dataset """kinships""" +693 55 model """structuredembedding""" +693 55 loss """crossentropy""" +693 55 regularizer """no""" +693 55 optimizer """adadelta""" +693 55 training_loop """lcwa""" +693 55 evaluator """rankbased""" +693 56 dataset """kinships""" +693 56 model """structuredembedding""" +693 56 loss """crossentropy""" +693 56 regularizer """no""" +693 56 optimizer """adadelta""" +693 56 training_loop """lcwa""" +693 56 evaluator """rankbased""" +693 57 dataset """kinships""" +693 57 model """structuredembedding""" +693 57 loss """crossentropy""" +693 57 regularizer """no""" +693 57 optimizer """adadelta""" +693 57 training_loop """lcwa""" +693 57 evaluator """rankbased""" +693 58 dataset """kinships""" +693 58 model """structuredembedding""" +693 58 loss """crossentropy""" +693 58 regularizer """no""" +693 58 optimizer """adadelta""" +693 58 training_loop """lcwa""" +693 58 evaluator """rankbased""" +693 59 dataset """kinships""" +693 59 model """structuredembedding""" +693 59 loss """crossentropy""" +693 59 regularizer """no""" +693 59 optimizer """adadelta""" +693 59 training_loop """lcwa""" +693 59 evaluator """rankbased""" +693 60 dataset """kinships""" +693 60 model """structuredembedding""" +693 60 loss """crossentropy""" +693 60 regularizer """no""" +693 60 optimizer """adadelta""" +693 60 training_loop """lcwa""" +693 60 evaluator """rankbased""" +693 61 dataset """kinships""" +693 61 model """structuredembedding""" +693 61 loss """crossentropy""" +693 61 regularizer """no""" +693 61 optimizer """adadelta""" +693 61 training_loop """lcwa""" +693 61 evaluator """rankbased""" +693 62 dataset """kinships""" +693 62 model """structuredembedding""" +693 62 loss """crossentropy""" +693 62 regularizer """no""" +693 62 optimizer """adadelta""" +693 62 training_loop """lcwa""" +693 62 evaluator """rankbased""" +693 63 dataset """kinships""" +693 63 model """structuredembedding""" +693 63 loss """crossentropy""" +693 63 regularizer """no""" +693 63 optimizer """adadelta""" +693 63 training_loop """lcwa""" +693 63 evaluator """rankbased""" +693 64 dataset """kinships""" +693 64 model """structuredembedding""" +693 64 loss """crossentropy""" +693 64 regularizer """no""" +693 64 optimizer """adadelta""" +693 64 training_loop """lcwa""" +693 64 evaluator """rankbased""" +693 65 dataset """kinships""" +693 65 model """structuredembedding""" +693 65 loss """crossentropy""" +693 65 regularizer """no""" +693 65 optimizer """adadelta""" +693 65 training_loop """lcwa""" +693 65 evaluator """rankbased""" +693 66 dataset """kinships""" +693 66 model """structuredembedding""" +693 66 loss """crossentropy""" +693 66 regularizer """no""" +693 66 optimizer """adadelta""" +693 66 training_loop """lcwa""" +693 66 evaluator """rankbased""" +693 67 dataset """kinships""" +693 67 model """structuredembedding""" +693 67 loss """crossentropy""" +693 67 regularizer """no""" +693 67 optimizer """adadelta""" +693 67 training_loop """lcwa""" +693 67 evaluator """rankbased""" +693 68 dataset """kinships""" +693 68 model """structuredembedding""" +693 68 loss """crossentropy""" +693 68 regularizer """no""" +693 68 optimizer """adadelta""" +693 68 training_loop """lcwa""" +693 68 evaluator """rankbased""" +693 69 dataset """kinships""" +693 69 model """structuredembedding""" +693 69 loss """crossentropy""" +693 69 regularizer """no""" +693 69 optimizer """adadelta""" +693 69 training_loop """lcwa""" +693 69 evaluator """rankbased""" +693 70 dataset """kinships""" +693 70 model """structuredembedding""" +693 70 loss """crossentropy""" +693 70 regularizer """no""" +693 70 optimizer """adadelta""" +693 70 training_loop """lcwa""" +693 70 evaluator """rankbased""" +693 71 dataset """kinships""" +693 71 model """structuredembedding""" +693 71 loss """crossentropy""" +693 71 regularizer """no""" +693 71 optimizer """adadelta""" +693 71 training_loop """lcwa""" +693 71 evaluator """rankbased""" +693 72 dataset """kinships""" +693 72 model """structuredembedding""" +693 72 loss """crossentropy""" +693 72 regularizer """no""" +693 72 optimizer """adadelta""" +693 72 training_loop """lcwa""" +693 72 evaluator """rankbased""" +693 73 dataset """kinships""" +693 73 model """structuredembedding""" +693 73 loss """crossentropy""" +693 73 regularizer """no""" +693 73 optimizer """adadelta""" +693 73 training_loop """lcwa""" +693 73 evaluator """rankbased""" +693 74 dataset """kinships""" +693 74 model """structuredembedding""" +693 74 loss """crossentropy""" +693 74 regularizer """no""" +693 74 optimizer """adadelta""" +693 74 training_loop """lcwa""" +693 74 evaluator """rankbased""" +693 75 dataset """kinships""" +693 75 model """structuredembedding""" +693 75 loss """crossentropy""" +693 75 regularizer """no""" +693 75 optimizer """adadelta""" +693 75 training_loop """lcwa""" +693 75 evaluator """rankbased""" +693 76 dataset """kinships""" +693 76 model """structuredembedding""" +693 76 loss """crossentropy""" +693 76 regularizer """no""" +693 76 optimizer """adadelta""" +693 76 training_loop """lcwa""" +693 76 evaluator """rankbased""" +693 77 dataset """kinships""" +693 77 model """structuredembedding""" +693 77 loss """crossentropy""" +693 77 regularizer """no""" +693 77 optimizer """adadelta""" +693 77 training_loop """lcwa""" +693 77 evaluator """rankbased""" +693 78 dataset """kinships""" +693 78 model """structuredembedding""" +693 78 loss """crossentropy""" +693 78 regularizer """no""" +693 78 optimizer """adadelta""" +693 78 training_loop """lcwa""" +693 78 evaluator """rankbased""" +693 79 dataset """kinships""" +693 79 model """structuredembedding""" +693 79 loss """crossentropy""" +693 79 regularizer """no""" +693 79 optimizer """adadelta""" +693 79 training_loop """lcwa""" +693 79 evaluator """rankbased""" +693 80 dataset """kinships""" +693 80 model """structuredembedding""" +693 80 loss """crossentropy""" +693 80 regularizer """no""" +693 80 optimizer """adadelta""" +693 80 training_loop """lcwa""" +693 80 evaluator """rankbased""" +693 81 dataset """kinships""" +693 81 model """structuredembedding""" +693 81 loss """crossentropy""" +693 81 regularizer """no""" +693 81 optimizer """adadelta""" +693 81 training_loop """lcwa""" +693 81 evaluator """rankbased""" +693 82 dataset """kinships""" +693 82 model """structuredembedding""" +693 82 loss """crossentropy""" +693 82 regularizer """no""" +693 82 optimizer """adadelta""" +693 82 training_loop """lcwa""" +693 82 evaluator """rankbased""" +693 83 dataset """kinships""" +693 83 model """structuredembedding""" +693 83 loss """crossentropy""" +693 83 regularizer """no""" +693 83 optimizer """adadelta""" +693 83 training_loop """lcwa""" +693 83 evaluator """rankbased""" +693 84 dataset """kinships""" +693 84 model """structuredembedding""" +693 84 loss """crossentropy""" +693 84 regularizer """no""" +693 84 optimizer """adadelta""" +693 84 training_loop """lcwa""" +693 84 evaluator """rankbased""" +693 85 dataset """kinships""" +693 85 model """structuredembedding""" +693 85 loss """crossentropy""" +693 85 regularizer """no""" +693 85 optimizer """adadelta""" +693 85 training_loop """lcwa""" +693 85 evaluator """rankbased""" +693 86 dataset """kinships""" +693 86 model """structuredembedding""" +693 86 loss """crossentropy""" +693 86 regularizer """no""" +693 86 optimizer """adadelta""" +693 86 training_loop """lcwa""" +693 86 evaluator """rankbased""" +693 87 dataset """kinships""" +693 87 model """structuredembedding""" +693 87 loss """crossentropy""" +693 87 regularizer """no""" +693 87 optimizer """adadelta""" +693 87 training_loop """lcwa""" +693 87 evaluator """rankbased""" +693 88 dataset """kinships""" +693 88 model """structuredembedding""" +693 88 loss """crossentropy""" +693 88 regularizer """no""" +693 88 optimizer """adadelta""" +693 88 training_loop """lcwa""" +693 88 evaluator """rankbased""" +693 89 dataset """kinships""" +693 89 model """structuredembedding""" +693 89 loss """crossentropy""" +693 89 regularizer """no""" +693 89 optimizer """adadelta""" +693 89 training_loop """lcwa""" +693 89 evaluator """rankbased""" +693 90 dataset """kinships""" +693 90 model """structuredembedding""" +693 90 loss """crossentropy""" +693 90 regularizer """no""" +693 90 optimizer """adadelta""" +693 90 training_loop """lcwa""" +693 90 evaluator """rankbased""" +693 91 dataset """kinships""" +693 91 model """structuredembedding""" +693 91 loss """crossentropy""" +693 91 regularizer """no""" +693 91 optimizer """adadelta""" +693 91 training_loop """lcwa""" +693 91 evaluator """rankbased""" +693 92 dataset """kinships""" +693 92 model """structuredembedding""" +693 92 loss """crossentropy""" +693 92 regularizer """no""" +693 92 optimizer """adadelta""" +693 92 training_loop """lcwa""" +693 92 evaluator """rankbased""" +693 93 dataset """kinships""" +693 93 model """structuredembedding""" +693 93 loss """crossentropy""" +693 93 regularizer """no""" +693 93 optimizer """adadelta""" +693 93 training_loop """lcwa""" +693 93 evaluator """rankbased""" +693 94 dataset """kinships""" +693 94 model """structuredembedding""" +693 94 loss """crossentropy""" +693 94 regularizer """no""" +693 94 optimizer """adadelta""" +693 94 training_loop """lcwa""" +693 94 evaluator """rankbased""" +693 95 dataset """kinships""" +693 95 model """structuredembedding""" +693 95 loss """crossentropy""" +693 95 regularizer """no""" +693 95 optimizer """adadelta""" +693 95 training_loop """lcwa""" +693 95 evaluator """rankbased""" +693 96 dataset """kinships""" +693 96 model """structuredembedding""" +693 96 loss """crossentropy""" +693 96 regularizer """no""" +693 96 optimizer """adadelta""" +693 96 training_loop """lcwa""" +693 96 evaluator """rankbased""" +693 97 dataset """kinships""" +693 97 model """structuredembedding""" +693 97 loss """crossentropy""" +693 97 regularizer """no""" +693 97 optimizer """adadelta""" +693 97 training_loop """lcwa""" +693 97 evaluator """rankbased""" +693 98 dataset """kinships""" +693 98 model """structuredembedding""" +693 98 loss """crossentropy""" +693 98 regularizer """no""" +693 98 optimizer """adadelta""" +693 98 training_loop """lcwa""" +693 98 evaluator """rankbased""" +693 99 dataset """kinships""" +693 99 model """structuredembedding""" +693 99 loss """crossentropy""" +693 99 regularizer """no""" +693 99 optimizer """adadelta""" +693 99 training_loop """lcwa""" +693 99 evaluator """rankbased""" +693 100 dataset """kinships""" +693 100 model """structuredembedding""" +693 100 loss """crossentropy""" +693 100 regularizer """no""" +693 100 optimizer """adadelta""" +693 100 training_loop """lcwa""" +693 100 evaluator """rankbased""" +694 1 model.embedding_dim 0.0 +694 1 model.scoring_fct_norm 2.0 +694 1 training.batch_size 0.0 +694 1 training.label_smoothing 0.006452559595911265 +694 2 model.embedding_dim 2.0 +694 2 model.scoring_fct_norm 1.0 +694 2 training.batch_size 0.0 +694 2 training.label_smoothing 0.5278972162739779 +694 3 model.embedding_dim 0.0 +694 3 model.scoring_fct_norm 2.0 +694 3 training.batch_size 1.0 +694 3 training.label_smoothing 0.022955653002939197 +694 4 model.embedding_dim 1.0 +694 4 model.scoring_fct_norm 2.0 +694 4 training.batch_size 0.0 +694 4 training.label_smoothing 0.0170917050903018 +694 5 model.embedding_dim 1.0 +694 5 model.scoring_fct_norm 1.0 +694 5 training.batch_size 2.0 +694 5 training.label_smoothing 0.02273722198227041 +694 6 model.embedding_dim 1.0 +694 6 model.scoring_fct_norm 2.0 +694 6 training.batch_size 0.0 +694 6 training.label_smoothing 0.0029050779446642425 +694 7 model.embedding_dim 0.0 +694 7 model.scoring_fct_norm 2.0 +694 7 training.batch_size 2.0 +694 7 training.label_smoothing 0.06436919381602685 +694 8 model.embedding_dim 1.0 +694 8 model.scoring_fct_norm 1.0 +694 8 training.batch_size 1.0 +694 8 training.label_smoothing 0.2595442094680219 +694 9 model.embedding_dim 0.0 +694 9 model.scoring_fct_norm 2.0 +694 9 training.batch_size 1.0 +694 9 training.label_smoothing 0.07463311114384392 +694 10 model.embedding_dim 1.0 +694 10 model.scoring_fct_norm 2.0 +694 10 training.batch_size 1.0 +694 10 training.label_smoothing 0.072959308263528 +694 11 model.embedding_dim 2.0 +694 11 model.scoring_fct_norm 1.0 +694 11 training.batch_size 1.0 +694 11 training.label_smoothing 0.0010109864103960214 +694 12 model.embedding_dim 0.0 +694 12 model.scoring_fct_norm 1.0 +694 12 training.batch_size 2.0 +694 12 training.label_smoothing 0.003613139239908423 +694 13 model.embedding_dim 1.0 +694 13 model.scoring_fct_norm 2.0 +694 13 training.batch_size 0.0 +694 13 training.label_smoothing 0.42845259576902456 +694 14 model.embedding_dim 0.0 +694 14 model.scoring_fct_norm 2.0 +694 14 training.batch_size 0.0 +694 14 training.label_smoothing 0.041963725651327874 +694 15 model.embedding_dim 0.0 +694 15 model.scoring_fct_norm 1.0 +694 15 training.batch_size 1.0 +694 15 training.label_smoothing 0.2804450672734993 +694 16 model.embedding_dim 2.0 +694 16 model.scoring_fct_norm 2.0 +694 16 training.batch_size 1.0 +694 16 training.label_smoothing 0.06645623060227011 +694 17 model.embedding_dim 0.0 +694 17 model.scoring_fct_norm 1.0 +694 17 training.batch_size 2.0 +694 17 training.label_smoothing 0.012732770359322016 +694 18 model.embedding_dim 2.0 +694 18 model.scoring_fct_norm 1.0 +694 18 training.batch_size 1.0 +694 18 training.label_smoothing 0.060960319525196024 +694 19 model.embedding_dim 0.0 +694 19 model.scoring_fct_norm 2.0 +694 19 training.batch_size 0.0 +694 19 training.label_smoothing 0.003629980688600446 +694 20 model.embedding_dim 2.0 +694 20 model.scoring_fct_norm 2.0 +694 20 training.batch_size 2.0 +694 20 training.label_smoothing 0.0035922227131641154 +694 21 model.embedding_dim 1.0 +694 21 model.scoring_fct_norm 2.0 +694 21 training.batch_size 2.0 +694 21 training.label_smoothing 0.030915988626927977 +694 22 model.embedding_dim 1.0 +694 22 model.scoring_fct_norm 1.0 +694 22 training.batch_size 1.0 +694 22 training.label_smoothing 0.0043856435852504105 +694 23 model.embedding_dim 2.0 +694 23 model.scoring_fct_norm 2.0 +694 23 training.batch_size 0.0 +694 23 training.label_smoothing 0.0027011742243136607 +694 24 model.embedding_dim 2.0 +694 24 model.scoring_fct_norm 2.0 +694 24 training.batch_size 0.0 +694 24 training.label_smoothing 0.037042354905118015 +694 25 model.embedding_dim 1.0 +694 25 model.scoring_fct_norm 1.0 +694 25 training.batch_size 2.0 +694 25 training.label_smoothing 0.0021783581377411614 +694 26 model.embedding_dim 2.0 +694 26 model.scoring_fct_norm 1.0 +694 26 training.batch_size 0.0 +694 26 training.label_smoothing 0.03972049604285127 +694 27 model.embedding_dim 1.0 +694 27 model.scoring_fct_norm 1.0 +694 27 training.batch_size 0.0 +694 27 training.label_smoothing 0.19648501926495004 +694 28 model.embedding_dim 0.0 +694 28 model.scoring_fct_norm 2.0 +694 28 training.batch_size 0.0 +694 28 training.label_smoothing 0.006828466088631106 +694 29 model.embedding_dim 1.0 +694 29 model.scoring_fct_norm 1.0 +694 29 training.batch_size 2.0 +694 29 training.label_smoothing 0.0037421641166964052 +694 30 model.embedding_dim 1.0 +694 30 model.scoring_fct_norm 2.0 +694 30 training.batch_size 2.0 +694 30 training.label_smoothing 0.1166412150533128 +694 31 model.embedding_dim 2.0 +694 31 model.scoring_fct_norm 1.0 +694 31 training.batch_size 1.0 +694 31 training.label_smoothing 0.5135368873225737 +694 32 model.embedding_dim 0.0 +694 32 model.scoring_fct_norm 2.0 +694 32 training.batch_size 0.0 +694 32 training.label_smoothing 0.002438715581579059 +694 33 model.embedding_dim 2.0 +694 33 model.scoring_fct_norm 1.0 +694 33 training.batch_size 0.0 +694 33 training.label_smoothing 0.3912435733316605 +694 34 model.embedding_dim 2.0 +694 34 model.scoring_fct_norm 1.0 +694 34 training.batch_size 1.0 +694 34 training.label_smoothing 0.8629983722037403 +694 35 model.embedding_dim 2.0 +694 35 model.scoring_fct_norm 1.0 +694 35 training.batch_size 0.0 +694 35 training.label_smoothing 0.49935942557956464 +694 36 model.embedding_dim 1.0 +694 36 model.scoring_fct_norm 1.0 +694 36 training.batch_size 2.0 +694 36 training.label_smoothing 0.008346037418721728 +694 37 model.embedding_dim 0.0 +694 37 model.scoring_fct_norm 2.0 +694 37 training.batch_size 2.0 +694 37 training.label_smoothing 0.0011470369803562271 +694 38 model.embedding_dim 0.0 +694 38 model.scoring_fct_norm 1.0 +694 38 training.batch_size 0.0 +694 38 training.label_smoothing 0.5973887038314154 +694 39 model.embedding_dim 1.0 +694 39 model.scoring_fct_norm 1.0 +694 39 training.batch_size 2.0 +694 39 training.label_smoothing 0.010456403031105663 +694 40 model.embedding_dim 1.0 +694 40 model.scoring_fct_norm 1.0 +694 40 training.batch_size 2.0 +694 40 training.label_smoothing 0.5455559532583143 +694 41 model.embedding_dim 1.0 +694 41 model.scoring_fct_norm 1.0 +694 41 training.batch_size 1.0 +694 41 training.label_smoothing 0.1569526444268931 +694 42 model.embedding_dim 2.0 +694 42 model.scoring_fct_norm 2.0 +694 42 training.batch_size 1.0 +694 42 training.label_smoothing 0.04083219459383569 +694 43 model.embedding_dim 1.0 +694 43 model.scoring_fct_norm 1.0 +694 43 training.batch_size 1.0 +694 43 training.label_smoothing 0.5500892570788938 +694 44 model.embedding_dim 1.0 +694 44 model.scoring_fct_norm 2.0 +694 44 training.batch_size 1.0 +694 44 training.label_smoothing 0.0020912584169682953 +694 45 model.embedding_dim 0.0 +694 45 model.scoring_fct_norm 1.0 +694 45 training.batch_size 2.0 +694 45 training.label_smoothing 0.015504352519588326 +694 46 model.embedding_dim 1.0 +694 46 model.scoring_fct_norm 2.0 +694 46 training.batch_size 2.0 +694 46 training.label_smoothing 0.004120215565140118 +694 47 model.embedding_dim 1.0 +694 47 model.scoring_fct_norm 2.0 +694 47 training.batch_size 1.0 +694 47 training.label_smoothing 0.08179710537066033 +694 48 model.embedding_dim 0.0 +694 48 model.scoring_fct_norm 2.0 +694 48 training.batch_size 1.0 +694 48 training.label_smoothing 0.4582771173400922 +694 49 model.embedding_dim 2.0 +694 49 model.scoring_fct_norm 1.0 +694 49 training.batch_size 2.0 +694 49 training.label_smoothing 0.46925839458647645 +694 50 model.embedding_dim 1.0 +694 50 model.scoring_fct_norm 1.0 +694 50 training.batch_size 2.0 +694 50 training.label_smoothing 0.0021333531498911717 +694 51 model.embedding_dim 1.0 +694 51 model.scoring_fct_norm 1.0 +694 51 training.batch_size 1.0 +694 51 training.label_smoothing 0.014948071184563826 +694 52 model.embedding_dim 2.0 +694 52 model.scoring_fct_norm 1.0 +694 52 training.batch_size 1.0 +694 52 training.label_smoothing 0.04048249052403157 +694 53 model.embedding_dim 0.0 +694 53 model.scoring_fct_norm 2.0 +694 53 training.batch_size 2.0 +694 53 training.label_smoothing 0.17589401714561426 +694 54 model.embedding_dim 2.0 +694 54 model.scoring_fct_norm 2.0 +694 54 training.batch_size 0.0 +694 54 training.label_smoothing 0.0014432457540092005 +694 55 model.embedding_dim 1.0 +694 55 model.scoring_fct_norm 1.0 +694 55 training.batch_size 1.0 +694 55 training.label_smoothing 0.0011937966185562884 +694 56 model.embedding_dim 2.0 +694 56 model.scoring_fct_norm 1.0 +694 56 training.batch_size 1.0 +694 56 training.label_smoothing 0.006967548211571828 +694 57 model.embedding_dim 2.0 +694 57 model.scoring_fct_norm 1.0 +694 57 training.batch_size 1.0 +694 57 training.label_smoothing 0.061429623627894746 +694 58 model.embedding_dim 0.0 +694 58 model.scoring_fct_norm 2.0 +694 58 training.batch_size 0.0 +694 58 training.label_smoothing 0.0072044025409345085 +694 59 model.embedding_dim 1.0 +694 59 model.scoring_fct_norm 1.0 +694 59 training.batch_size 2.0 +694 59 training.label_smoothing 0.7074897935762127 +694 60 model.embedding_dim 1.0 +694 60 model.scoring_fct_norm 2.0 +694 60 training.batch_size 2.0 +694 60 training.label_smoothing 0.02801472469819134 +694 61 model.embedding_dim 2.0 +694 61 model.scoring_fct_norm 1.0 +694 61 training.batch_size 0.0 +694 61 training.label_smoothing 0.012271106760446177 +694 62 model.embedding_dim 1.0 +694 62 model.scoring_fct_norm 1.0 +694 62 training.batch_size 1.0 +694 62 training.label_smoothing 0.037303062207486415 +694 63 model.embedding_dim 0.0 +694 63 model.scoring_fct_norm 1.0 +694 63 training.batch_size 2.0 +694 63 training.label_smoothing 0.027310976365765436 +694 64 model.embedding_dim 2.0 +694 64 model.scoring_fct_norm 1.0 +694 64 training.batch_size 1.0 +694 64 training.label_smoothing 0.0012238395722095547 +694 65 model.embedding_dim 0.0 +694 65 model.scoring_fct_norm 2.0 +694 65 training.batch_size 2.0 +694 65 training.label_smoothing 0.22283699526429768 +694 66 model.embedding_dim 1.0 +694 66 model.scoring_fct_norm 1.0 +694 66 training.batch_size 0.0 +694 66 training.label_smoothing 0.7380066892704932 +694 67 model.embedding_dim 1.0 +694 67 model.scoring_fct_norm 1.0 +694 67 training.batch_size 1.0 +694 67 training.label_smoothing 0.07075773590416704 +694 68 model.embedding_dim 2.0 +694 68 model.scoring_fct_norm 1.0 +694 68 training.batch_size 1.0 +694 68 training.label_smoothing 0.003653448398291748 +694 69 model.embedding_dim 2.0 +694 69 model.scoring_fct_norm 1.0 +694 69 training.batch_size 0.0 +694 69 training.label_smoothing 0.02075379146431547 +694 70 model.embedding_dim 0.0 +694 70 model.scoring_fct_norm 1.0 +694 70 training.batch_size 1.0 +694 70 training.label_smoothing 0.857498339015796 +694 71 model.embedding_dim 2.0 +694 71 model.scoring_fct_norm 2.0 +694 71 training.batch_size 2.0 +694 71 training.label_smoothing 0.0022993987441288907 +694 72 model.embedding_dim 2.0 +694 72 model.scoring_fct_norm 1.0 +694 72 training.batch_size 0.0 +694 72 training.label_smoothing 0.06947939574766616 +694 73 model.embedding_dim 1.0 +694 73 model.scoring_fct_norm 1.0 +694 73 training.batch_size 1.0 +694 73 training.label_smoothing 0.007939769700558479 +694 74 model.embedding_dim 2.0 +694 74 model.scoring_fct_norm 2.0 +694 74 training.batch_size 1.0 +694 74 training.label_smoothing 0.39798828644908407 +694 75 model.embedding_dim 2.0 +694 75 model.scoring_fct_norm 1.0 +694 75 training.batch_size 2.0 +694 75 training.label_smoothing 0.004208374364685055 +694 76 model.embedding_dim 0.0 +694 76 model.scoring_fct_norm 2.0 +694 76 training.batch_size 1.0 +694 76 training.label_smoothing 0.04151730913437953 +694 77 model.embedding_dim 0.0 +694 77 model.scoring_fct_norm 1.0 +694 77 training.batch_size 1.0 +694 77 training.label_smoothing 0.002947030641998516 +694 78 model.embedding_dim 1.0 +694 78 model.scoring_fct_norm 1.0 +694 78 training.batch_size 1.0 +694 78 training.label_smoothing 0.11168778809868014 +694 79 model.embedding_dim 2.0 +694 79 model.scoring_fct_norm 1.0 +694 79 training.batch_size 2.0 +694 79 training.label_smoothing 0.022726734261014122 +694 80 model.embedding_dim 1.0 +694 80 model.scoring_fct_norm 2.0 +694 80 training.batch_size 0.0 +694 80 training.label_smoothing 0.0010489061535174852 +694 81 model.embedding_dim 2.0 +694 81 model.scoring_fct_norm 1.0 +694 81 training.batch_size 2.0 +694 81 training.label_smoothing 0.008063105682595283 +694 82 model.embedding_dim 0.0 +694 82 model.scoring_fct_norm 1.0 +694 82 training.batch_size 0.0 +694 82 training.label_smoothing 0.08398812642070516 +694 83 model.embedding_dim 2.0 +694 83 model.scoring_fct_norm 2.0 +694 83 training.batch_size 1.0 +694 83 training.label_smoothing 0.5500044997096611 +694 84 model.embedding_dim 0.0 +694 84 model.scoring_fct_norm 1.0 +694 84 training.batch_size 1.0 +694 84 training.label_smoothing 0.00977436527366546 +694 85 model.embedding_dim 2.0 +694 85 model.scoring_fct_norm 1.0 +694 85 training.batch_size 2.0 +694 85 training.label_smoothing 0.36985424908872494 +694 86 model.embedding_dim 2.0 +694 86 model.scoring_fct_norm 2.0 +694 86 training.batch_size 1.0 +694 86 training.label_smoothing 0.2551031132161538 +694 87 model.embedding_dim 0.0 +694 87 model.scoring_fct_norm 1.0 +694 87 training.batch_size 2.0 +694 87 training.label_smoothing 0.010912945120834577 +694 88 model.embedding_dim 1.0 +694 88 model.scoring_fct_norm 2.0 +694 88 training.batch_size 0.0 +694 88 training.label_smoothing 0.06745626396335101 +694 89 model.embedding_dim 2.0 +694 89 model.scoring_fct_norm 2.0 +694 89 training.batch_size 1.0 +694 89 training.label_smoothing 0.23373853093914432 +694 90 model.embedding_dim 0.0 +694 90 model.scoring_fct_norm 1.0 +694 90 training.batch_size 2.0 +694 90 training.label_smoothing 0.028605263070347487 +694 91 model.embedding_dim 1.0 +694 91 model.scoring_fct_norm 1.0 +694 91 training.batch_size 1.0 +694 91 training.label_smoothing 0.1486541168715529 +694 92 model.embedding_dim 0.0 +694 92 model.scoring_fct_norm 1.0 +694 92 training.batch_size 2.0 +694 92 training.label_smoothing 0.005568451641632981 +694 93 model.embedding_dim 1.0 +694 93 model.scoring_fct_norm 1.0 +694 93 training.batch_size 0.0 +694 93 training.label_smoothing 0.17166680381633997 +694 94 model.embedding_dim 0.0 +694 94 model.scoring_fct_norm 2.0 +694 94 training.batch_size 1.0 +694 94 training.label_smoothing 0.002121054686266659 +694 95 model.embedding_dim 1.0 +694 95 model.scoring_fct_norm 1.0 +694 95 training.batch_size 0.0 +694 95 training.label_smoothing 0.3625626966588705 +694 96 model.embedding_dim 1.0 +694 96 model.scoring_fct_norm 2.0 +694 96 training.batch_size 1.0 +694 96 training.label_smoothing 0.028559409840895427 +694 97 model.embedding_dim 1.0 +694 97 model.scoring_fct_norm 1.0 +694 97 training.batch_size 0.0 +694 97 training.label_smoothing 0.14499567240119654 +694 98 model.embedding_dim 1.0 +694 98 model.scoring_fct_norm 1.0 +694 98 training.batch_size 0.0 +694 98 training.label_smoothing 0.6181631469030362 +694 99 model.embedding_dim 1.0 +694 99 model.scoring_fct_norm 1.0 +694 99 training.batch_size 0.0 +694 99 training.label_smoothing 0.001467376689074773 +694 100 model.embedding_dim 0.0 +694 100 model.scoring_fct_norm 1.0 +694 100 training.batch_size 2.0 +694 100 training.label_smoothing 0.0818532870816845 +694 1 dataset """kinships""" +694 1 model """structuredembedding""" +694 1 loss """crossentropy""" +694 1 regularizer """no""" +694 1 optimizer """adadelta""" +694 1 training_loop """lcwa""" +694 1 evaluator """rankbased""" +694 2 dataset """kinships""" +694 2 model """structuredembedding""" +694 2 loss """crossentropy""" +694 2 regularizer """no""" +694 2 optimizer """adadelta""" +694 2 training_loop """lcwa""" +694 2 evaluator """rankbased""" +694 3 dataset """kinships""" +694 3 model """structuredembedding""" +694 3 loss """crossentropy""" +694 3 regularizer """no""" +694 3 optimizer """adadelta""" +694 3 training_loop """lcwa""" +694 3 evaluator """rankbased""" +694 4 dataset """kinships""" +694 4 model """structuredembedding""" +694 4 loss """crossentropy""" +694 4 regularizer """no""" +694 4 optimizer """adadelta""" +694 4 training_loop """lcwa""" +694 4 evaluator """rankbased""" +694 5 dataset """kinships""" +694 5 model """structuredembedding""" +694 5 loss """crossentropy""" +694 5 regularizer """no""" +694 5 optimizer """adadelta""" +694 5 training_loop """lcwa""" +694 5 evaluator """rankbased""" +694 6 dataset """kinships""" +694 6 model """structuredembedding""" +694 6 loss """crossentropy""" +694 6 regularizer """no""" +694 6 optimizer """adadelta""" +694 6 training_loop """lcwa""" +694 6 evaluator """rankbased""" +694 7 dataset """kinships""" +694 7 model """structuredembedding""" +694 7 loss """crossentropy""" +694 7 regularizer """no""" +694 7 optimizer """adadelta""" +694 7 training_loop """lcwa""" +694 7 evaluator """rankbased""" +694 8 dataset """kinships""" +694 8 model """structuredembedding""" +694 8 loss """crossentropy""" +694 8 regularizer """no""" +694 8 optimizer """adadelta""" +694 8 training_loop """lcwa""" +694 8 evaluator """rankbased""" +694 9 dataset """kinships""" +694 9 model """structuredembedding""" +694 9 loss """crossentropy""" +694 9 regularizer """no""" +694 9 optimizer """adadelta""" +694 9 training_loop """lcwa""" +694 9 evaluator """rankbased""" +694 10 dataset """kinships""" +694 10 model """structuredembedding""" +694 10 loss """crossentropy""" +694 10 regularizer """no""" +694 10 optimizer """adadelta""" +694 10 training_loop """lcwa""" +694 10 evaluator """rankbased""" +694 11 dataset """kinships""" +694 11 model """structuredembedding""" +694 11 loss """crossentropy""" +694 11 regularizer """no""" +694 11 optimizer """adadelta""" +694 11 training_loop """lcwa""" +694 11 evaluator """rankbased""" +694 12 dataset """kinships""" +694 12 model """structuredembedding""" +694 12 loss """crossentropy""" +694 12 regularizer """no""" +694 12 optimizer """adadelta""" +694 12 training_loop """lcwa""" +694 12 evaluator """rankbased""" +694 13 dataset """kinships""" +694 13 model """structuredembedding""" +694 13 loss """crossentropy""" +694 13 regularizer """no""" +694 13 optimizer """adadelta""" +694 13 training_loop """lcwa""" +694 13 evaluator """rankbased""" +694 14 dataset """kinships""" +694 14 model """structuredembedding""" +694 14 loss """crossentropy""" +694 14 regularizer """no""" +694 14 optimizer """adadelta""" +694 14 training_loop """lcwa""" +694 14 evaluator """rankbased""" +694 15 dataset """kinships""" +694 15 model """structuredembedding""" +694 15 loss """crossentropy""" +694 15 regularizer """no""" +694 15 optimizer """adadelta""" +694 15 training_loop """lcwa""" +694 15 evaluator """rankbased""" +694 16 dataset """kinships""" +694 16 model """structuredembedding""" +694 16 loss """crossentropy""" +694 16 regularizer """no""" +694 16 optimizer """adadelta""" +694 16 training_loop """lcwa""" +694 16 evaluator """rankbased""" +694 17 dataset """kinships""" +694 17 model """structuredembedding""" +694 17 loss """crossentropy""" +694 17 regularizer """no""" +694 17 optimizer """adadelta""" +694 17 training_loop """lcwa""" +694 17 evaluator """rankbased""" +694 18 dataset """kinships""" +694 18 model """structuredembedding""" +694 18 loss """crossentropy""" +694 18 regularizer """no""" +694 18 optimizer """adadelta""" +694 18 training_loop """lcwa""" +694 18 evaluator """rankbased""" +694 19 dataset """kinships""" +694 19 model """structuredembedding""" +694 19 loss """crossentropy""" +694 19 regularizer """no""" +694 19 optimizer """adadelta""" +694 19 training_loop """lcwa""" +694 19 evaluator """rankbased""" +694 20 dataset """kinships""" +694 20 model """structuredembedding""" +694 20 loss """crossentropy""" +694 20 regularizer """no""" +694 20 optimizer """adadelta""" +694 20 training_loop """lcwa""" +694 20 evaluator """rankbased""" +694 21 dataset """kinships""" +694 21 model """structuredembedding""" +694 21 loss """crossentropy""" +694 21 regularizer """no""" +694 21 optimizer """adadelta""" +694 21 training_loop """lcwa""" +694 21 evaluator """rankbased""" +694 22 dataset """kinships""" +694 22 model """structuredembedding""" +694 22 loss """crossentropy""" +694 22 regularizer """no""" +694 22 optimizer """adadelta""" +694 22 training_loop """lcwa""" +694 22 evaluator """rankbased""" +694 23 dataset """kinships""" +694 23 model """structuredembedding""" +694 23 loss """crossentropy""" +694 23 regularizer """no""" +694 23 optimizer """adadelta""" +694 23 training_loop """lcwa""" +694 23 evaluator """rankbased""" +694 24 dataset """kinships""" +694 24 model """structuredembedding""" +694 24 loss """crossentropy""" +694 24 regularizer """no""" +694 24 optimizer """adadelta""" +694 24 training_loop """lcwa""" +694 24 evaluator """rankbased""" +694 25 dataset """kinships""" +694 25 model """structuredembedding""" +694 25 loss """crossentropy""" +694 25 regularizer """no""" +694 25 optimizer """adadelta""" +694 25 training_loop """lcwa""" +694 25 evaluator """rankbased""" +694 26 dataset """kinships""" +694 26 model """structuredembedding""" +694 26 loss """crossentropy""" +694 26 regularizer """no""" +694 26 optimizer """adadelta""" +694 26 training_loop """lcwa""" +694 26 evaluator """rankbased""" +694 27 dataset """kinships""" +694 27 model """structuredembedding""" +694 27 loss """crossentropy""" +694 27 regularizer """no""" +694 27 optimizer """adadelta""" +694 27 training_loop """lcwa""" +694 27 evaluator """rankbased""" +694 28 dataset """kinships""" +694 28 model """structuredembedding""" +694 28 loss """crossentropy""" +694 28 regularizer """no""" +694 28 optimizer """adadelta""" +694 28 training_loop """lcwa""" +694 28 evaluator """rankbased""" +694 29 dataset """kinships""" +694 29 model """structuredembedding""" +694 29 loss """crossentropy""" +694 29 regularizer """no""" +694 29 optimizer """adadelta""" +694 29 training_loop """lcwa""" +694 29 evaluator """rankbased""" +694 30 dataset """kinships""" +694 30 model """structuredembedding""" +694 30 loss """crossentropy""" +694 30 regularizer """no""" +694 30 optimizer """adadelta""" +694 30 training_loop """lcwa""" +694 30 evaluator """rankbased""" +694 31 dataset """kinships""" +694 31 model """structuredembedding""" +694 31 loss """crossentropy""" +694 31 regularizer """no""" +694 31 optimizer """adadelta""" +694 31 training_loop """lcwa""" +694 31 evaluator """rankbased""" +694 32 dataset """kinships""" +694 32 model """structuredembedding""" +694 32 loss """crossentropy""" +694 32 regularizer """no""" +694 32 optimizer """adadelta""" +694 32 training_loop """lcwa""" +694 32 evaluator """rankbased""" +694 33 dataset """kinships""" +694 33 model """structuredembedding""" +694 33 loss """crossentropy""" +694 33 regularizer """no""" +694 33 optimizer """adadelta""" +694 33 training_loop """lcwa""" +694 33 evaluator """rankbased""" +694 34 dataset """kinships""" +694 34 model """structuredembedding""" +694 34 loss """crossentropy""" +694 34 regularizer """no""" +694 34 optimizer """adadelta""" +694 34 training_loop """lcwa""" +694 34 evaluator """rankbased""" +694 35 dataset """kinships""" +694 35 model """structuredembedding""" +694 35 loss """crossentropy""" +694 35 regularizer """no""" +694 35 optimizer """adadelta""" +694 35 training_loop """lcwa""" +694 35 evaluator """rankbased""" +694 36 dataset """kinships""" +694 36 model """structuredembedding""" +694 36 loss """crossentropy""" +694 36 regularizer """no""" +694 36 optimizer """adadelta""" +694 36 training_loop """lcwa""" +694 36 evaluator """rankbased""" +694 37 dataset """kinships""" +694 37 model """structuredembedding""" +694 37 loss """crossentropy""" +694 37 regularizer """no""" +694 37 optimizer """adadelta""" +694 37 training_loop """lcwa""" +694 37 evaluator """rankbased""" +694 38 dataset """kinships""" +694 38 model """structuredembedding""" +694 38 loss """crossentropy""" +694 38 regularizer """no""" +694 38 optimizer """adadelta""" +694 38 training_loop """lcwa""" +694 38 evaluator """rankbased""" +694 39 dataset """kinships""" +694 39 model """structuredembedding""" +694 39 loss """crossentropy""" +694 39 regularizer """no""" +694 39 optimizer """adadelta""" +694 39 training_loop """lcwa""" +694 39 evaluator """rankbased""" +694 40 dataset """kinships""" +694 40 model """structuredembedding""" +694 40 loss """crossentropy""" +694 40 regularizer """no""" +694 40 optimizer """adadelta""" +694 40 training_loop """lcwa""" +694 40 evaluator """rankbased""" +694 41 dataset """kinships""" +694 41 model """structuredembedding""" +694 41 loss """crossentropy""" +694 41 regularizer """no""" +694 41 optimizer """adadelta""" +694 41 training_loop """lcwa""" +694 41 evaluator """rankbased""" +694 42 dataset """kinships""" +694 42 model """structuredembedding""" +694 42 loss """crossentropy""" +694 42 regularizer """no""" +694 42 optimizer """adadelta""" +694 42 training_loop """lcwa""" +694 42 evaluator """rankbased""" +694 43 dataset """kinships""" +694 43 model """structuredembedding""" +694 43 loss """crossentropy""" +694 43 regularizer """no""" +694 43 optimizer """adadelta""" +694 43 training_loop """lcwa""" +694 43 evaluator """rankbased""" +694 44 dataset """kinships""" +694 44 model """structuredembedding""" +694 44 loss """crossentropy""" +694 44 regularizer """no""" +694 44 optimizer """adadelta""" +694 44 training_loop """lcwa""" +694 44 evaluator """rankbased""" +694 45 dataset """kinships""" +694 45 model """structuredembedding""" +694 45 loss """crossentropy""" +694 45 regularizer """no""" +694 45 optimizer """adadelta""" +694 45 training_loop """lcwa""" +694 45 evaluator """rankbased""" +694 46 dataset """kinships""" +694 46 model """structuredembedding""" +694 46 loss """crossentropy""" +694 46 regularizer """no""" +694 46 optimizer """adadelta""" +694 46 training_loop """lcwa""" +694 46 evaluator """rankbased""" +694 47 dataset """kinships""" +694 47 model """structuredembedding""" +694 47 loss """crossentropy""" +694 47 regularizer """no""" +694 47 optimizer """adadelta""" +694 47 training_loop """lcwa""" +694 47 evaluator """rankbased""" +694 48 dataset """kinships""" +694 48 model """structuredembedding""" +694 48 loss """crossentropy""" +694 48 regularizer """no""" +694 48 optimizer """adadelta""" +694 48 training_loop """lcwa""" +694 48 evaluator """rankbased""" +694 49 dataset """kinships""" +694 49 model """structuredembedding""" +694 49 loss """crossentropy""" +694 49 regularizer """no""" +694 49 optimizer """adadelta""" +694 49 training_loop """lcwa""" +694 49 evaluator """rankbased""" +694 50 dataset """kinships""" +694 50 model """structuredembedding""" +694 50 loss """crossentropy""" +694 50 regularizer """no""" +694 50 optimizer """adadelta""" +694 50 training_loop """lcwa""" +694 50 evaluator """rankbased""" +694 51 dataset """kinships""" +694 51 model """structuredembedding""" +694 51 loss """crossentropy""" +694 51 regularizer """no""" +694 51 optimizer """adadelta""" +694 51 training_loop """lcwa""" +694 51 evaluator """rankbased""" +694 52 dataset """kinships""" +694 52 model """structuredembedding""" +694 52 loss """crossentropy""" +694 52 regularizer """no""" +694 52 optimizer """adadelta""" +694 52 training_loop """lcwa""" +694 52 evaluator """rankbased""" +694 53 dataset """kinships""" +694 53 model """structuredembedding""" +694 53 loss """crossentropy""" +694 53 regularizer """no""" +694 53 optimizer """adadelta""" +694 53 training_loop """lcwa""" +694 53 evaluator """rankbased""" +694 54 dataset """kinships""" +694 54 model """structuredembedding""" +694 54 loss """crossentropy""" +694 54 regularizer """no""" +694 54 optimizer """adadelta""" +694 54 training_loop """lcwa""" +694 54 evaluator """rankbased""" +694 55 dataset """kinships""" +694 55 model """structuredembedding""" +694 55 loss """crossentropy""" +694 55 regularizer """no""" +694 55 optimizer """adadelta""" +694 55 training_loop """lcwa""" +694 55 evaluator """rankbased""" +694 56 dataset """kinships""" +694 56 model """structuredembedding""" +694 56 loss """crossentropy""" +694 56 regularizer """no""" +694 56 optimizer """adadelta""" +694 56 training_loop """lcwa""" +694 56 evaluator """rankbased""" +694 57 dataset """kinships""" +694 57 model """structuredembedding""" +694 57 loss """crossentropy""" +694 57 regularizer """no""" +694 57 optimizer """adadelta""" +694 57 training_loop """lcwa""" +694 57 evaluator """rankbased""" +694 58 dataset """kinships""" +694 58 model """structuredembedding""" +694 58 loss """crossentropy""" +694 58 regularizer """no""" +694 58 optimizer """adadelta""" +694 58 training_loop """lcwa""" +694 58 evaluator """rankbased""" +694 59 dataset """kinships""" +694 59 model """structuredembedding""" +694 59 loss """crossentropy""" +694 59 regularizer """no""" +694 59 optimizer """adadelta""" +694 59 training_loop """lcwa""" +694 59 evaluator """rankbased""" +694 60 dataset """kinships""" +694 60 model """structuredembedding""" +694 60 loss """crossentropy""" +694 60 regularizer """no""" +694 60 optimizer """adadelta""" +694 60 training_loop """lcwa""" +694 60 evaluator """rankbased""" +694 61 dataset """kinships""" +694 61 model """structuredembedding""" +694 61 loss """crossentropy""" +694 61 regularizer """no""" +694 61 optimizer """adadelta""" +694 61 training_loop """lcwa""" +694 61 evaluator """rankbased""" +694 62 dataset """kinships""" +694 62 model """structuredembedding""" +694 62 loss """crossentropy""" +694 62 regularizer """no""" +694 62 optimizer """adadelta""" +694 62 training_loop """lcwa""" +694 62 evaluator """rankbased""" +694 63 dataset """kinships""" +694 63 model """structuredembedding""" +694 63 loss """crossentropy""" +694 63 regularizer """no""" +694 63 optimizer """adadelta""" +694 63 training_loop """lcwa""" +694 63 evaluator """rankbased""" +694 64 dataset """kinships""" +694 64 model """structuredembedding""" +694 64 loss """crossentropy""" +694 64 regularizer """no""" +694 64 optimizer """adadelta""" +694 64 training_loop """lcwa""" +694 64 evaluator """rankbased""" +694 65 dataset """kinships""" +694 65 model """structuredembedding""" +694 65 loss """crossentropy""" +694 65 regularizer """no""" +694 65 optimizer """adadelta""" +694 65 training_loop """lcwa""" +694 65 evaluator """rankbased""" +694 66 dataset """kinships""" +694 66 model """structuredembedding""" +694 66 loss """crossentropy""" +694 66 regularizer """no""" +694 66 optimizer """adadelta""" +694 66 training_loop """lcwa""" +694 66 evaluator """rankbased""" +694 67 dataset """kinships""" +694 67 model """structuredembedding""" +694 67 loss """crossentropy""" +694 67 regularizer """no""" +694 67 optimizer """adadelta""" +694 67 training_loop """lcwa""" +694 67 evaluator """rankbased""" +694 68 dataset """kinships""" +694 68 model """structuredembedding""" +694 68 loss """crossentropy""" +694 68 regularizer """no""" +694 68 optimizer """adadelta""" +694 68 training_loop """lcwa""" +694 68 evaluator """rankbased""" +694 69 dataset """kinships""" +694 69 model """structuredembedding""" +694 69 loss """crossentropy""" +694 69 regularizer """no""" +694 69 optimizer """adadelta""" +694 69 training_loop """lcwa""" +694 69 evaluator """rankbased""" +694 70 dataset """kinships""" +694 70 model """structuredembedding""" +694 70 loss """crossentropy""" +694 70 regularizer """no""" +694 70 optimizer """adadelta""" +694 70 training_loop """lcwa""" +694 70 evaluator """rankbased""" +694 71 dataset """kinships""" +694 71 model """structuredembedding""" +694 71 loss """crossentropy""" +694 71 regularizer """no""" +694 71 optimizer """adadelta""" +694 71 training_loop """lcwa""" +694 71 evaluator """rankbased""" +694 72 dataset """kinships""" +694 72 model """structuredembedding""" +694 72 loss """crossentropy""" +694 72 regularizer """no""" +694 72 optimizer """adadelta""" +694 72 training_loop """lcwa""" +694 72 evaluator """rankbased""" +694 73 dataset """kinships""" +694 73 model """structuredembedding""" +694 73 loss """crossentropy""" +694 73 regularizer """no""" +694 73 optimizer """adadelta""" +694 73 training_loop """lcwa""" +694 73 evaluator """rankbased""" +694 74 dataset """kinships""" +694 74 model """structuredembedding""" +694 74 loss """crossentropy""" +694 74 regularizer """no""" +694 74 optimizer """adadelta""" +694 74 training_loop """lcwa""" +694 74 evaluator """rankbased""" +694 75 dataset """kinships""" +694 75 model """structuredembedding""" +694 75 loss """crossentropy""" +694 75 regularizer """no""" +694 75 optimizer """adadelta""" +694 75 training_loop """lcwa""" +694 75 evaluator """rankbased""" +694 76 dataset """kinships""" +694 76 model """structuredembedding""" +694 76 loss """crossentropy""" +694 76 regularizer """no""" +694 76 optimizer """adadelta""" +694 76 training_loop """lcwa""" +694 76 evaluator """rankbased""" +694 77 dataset """kinships""" +694 77 model """structuredembedding""" +694 77 loss """crossentropy""" +694 77 regularizer """no""" +694 77 optimizer """adadelta""" +694 77 training_loop """lcwa""" +694 77 evaluator """rankbased""" +694 78 dataset """kinships""" +694 78 model """structuredembedding""" +694 78 loss """crossentropy""" +694 78 regularizer """no""" +694 78 optimizer """adadelta""" +694 78 training_loop """lcwa""" +694 78 evaluator """rankbased""" +694 79 dataset """kinships""" +694 79 model """structuredembedding""" +694 79 loss """crossentropy""" +694 79 regularizer """no""" +694 79 optimizer """adadelta""" +694 79 training_loop """lcwa""" +694 79 evaluator """rankbased""" +694 80 dataset """kinships""" +694 80 model """structuredembedding""" +694 80 loss """crossentropy""" +694 80 regularizer """no""" +694 80 optimizer """adadelta""" +694 80 training_loop """lcwa""" +694 80 evaluator """rankbased""" +694 81 dataset """kinships""" +694 81 model """structuredembedding""" +694 81 loss """crossentropy""" +694 81 regularizer """no""" +694 81 optimizer """adadelta""" +694 81 training_loop """lcwa""" +694 81 evaluator """rankbased""" +694 82 dataset """kinships""" +694 82 model """structuredembedding""" +694 82 loss """crossentropy""" +694 82 regularizer """no""" +694 82 optimizer """adadelta""" +694 82 training_loop """lcwa""" +694 82 evaluator """rankbased""" +694 83 dataset """kinships""" +694 83 model """structuredembedding""" +694 83 loss """crossentropy""" +694 83 regularizer """no""" +694 83 optimizer """adadelta""" +694 83 training_loop """lcwa""" +694 83 evaluator """rankbased""" +694 84 dataset """kinships""" +694 84 model """structuredembedding""" +694 84 loss """crossentropy""" +694 84 regularizer """no""" +694 84 optimizer """adadelta""" +694 84 training_loop """lcwa""" +694 84 evaluator """rankbased""" +694 85 dataset """kinships""" +694 85 model """structuredembedding""" +694 85 loss """crossentropy""" +694 85 regularizer """no""" +694 85 optimizer """adadelta""" +694 85 training_loop """lcwa""" +694 85 evaluator """rankbased""" +694 86 dataset """kinships""" +694 86 model """structuredembedding""" +694 86 loss """crossentropy""" +694 86 regularizer """no""" +694 86 optimizer """adadelta""" +694 86 training_loop """lcwa""" +694 86 evaluator """rankbased""" +694 87 dataset """kinships""" +694 87 model """structuredembedding""" +694 87 loss """crossentropy""" +694 87 regularizer """no""" +694 87 optimizer """adadelta""" +694 87 training_loop """lcwa""" +694 87 evaluator """rankbased""" +694 88 dataset """kinships""" +694 88 model """structuredembedding""" +694 88 loss """crossentropy""" +694 88 regularizer """no""" +694 88 optimizer """adadelta""" +694 88 training_loop """lcwa""" +694 88 evaluator """rankbased""" +694 89 dataset """kinships""" +694 89 model """structuredembedding""" +694 89 loss """crossentropy""" +694 89 regularizer """no""" +694 89 optimizer """adadelta""" +694 89 training_loop """lcwa""" +694 89 evaluator """rankbased""" +694 90 dataset """kinships""" +694 90 model """structuredembedding""" +694 90 loss """crossentropy""" +694 90 regularizer """no""" +694 90 optimizer """adadelta""" +694 90 training_loop """lcwa""" +694 90 evaluator """rankbased""" +694 91 dataset """kinships""" +694 91 model """structuredembedding""" +694 91 loss """crossentropy""" +694 91 regularizer """no""" +694 91 optimizer """adadelta""" +694 91 training_loop """lcwa""" +694 91 evaluator """rankbased""" +694 92 dataset """kinships""" +694 92 model """structuredembedding""" +694 92 loss """crossentropy""" +694 92 regularizer """no""" +694 92 optimizer """adadelta""" +694 92 training_loop """lcwa""" +694 92 evaluator """rankbased""" +694 93 dataset """kinships""" +694 93 model """structuredembedding""" +694 93 loss """crossentropy""" +694 93 regularizer """no""" +694 93 optimizer """adadelta""" +694 93 training_loop """lcwa""" +694 93 evaluator """rankbased""" +694 94 dataset """kinships""" +694 94 model """structuredembedding""" +694 94 loss """crossentropy""" +694 94 regularizer """no""" +694 94 optimizer """adadelta""" +694 94 training_loop """lcwa""" +694 94 evaluator """rankbased""" +694 95 dataset """kinships""" +694 95 model """structuredembedding""" +694 95 loss """crossentropy""" +694 95 regularizer """no""" +694 95 optimizer """adadelta""" +694 95 training_loop """lcwa""" +694 95 evaluator """rankbased""" +694 96 dataset """kinships""" +694 96 model """structuredembedding""" +694 96 loss """crossentropy""" +694 96 regularizer """no""" +694 96 optimizer """adadelta""" +694 96 training_loop """lcwa""" +694 96 evaluator """rankbased""" +694 97 dataset """kinships""" +694 97 model """structuredembedding""" +694 97 loss """crossentropy""" +694 97 regularizer """no""" +694 97 optimizer """adadelta""" +694 97 training_loop """lcwa""" +694 97 evaluator """rankbased""" +694 98 dataset """kinships""" +694 98 model """structuredembedding""" +694 98 loss """crossentropy""" +694 98 regularizer """no""" +694 98 optimizer """adadelta""" +694 98 training_loop """lcwa""" +694 98 evaluator """rankbased""" +694 99 dataset """kinships""" +694 99 model """structuredembedding""" +694 99 loss """crossentropy""" +694 99 regularizer """no""" +694 99 optimizer """adadelta""" +694 99 training_loop """lcwa""" +694 99 evaluator """rankbased""" +694 100 dataset """kinships""" +694 100 model """structuredembedding""" +694 100 loss """crossentropy""" +694 100 regularizer """no""" +694 100 optimizer """adadelta""" +694 100 training_loop """lcwa""" +694 100 evaluator """rankbased""" +695 1 model.embedding_dim 2.0 +695 1 model.scoring_fct_norm 2.0 +695 1 negative_sampler.num_negs_per_pos 5.0 +695 1 training.batch_size 1.0 +695 2 model.embedding_dim 0.0 +695 2 model.scoring_fct_norm 2.0 +695 2 negative_sampler.num_negs_per_pos 65.0 +695 2 training.batch_size 1.0 +695 3 model.embedding_dim 2.0 +695 3 model.scoring_fct_norm 1.0 +695 3 negative_sampler.num_negs_per_pos 7.0 +695 3 training.batch_size 0.0 +695 4 model.embedding_dim 0.0 +695 4 model.scoring_fct_norm 2.0 +695 4 negative_sampler.num_negs_per_pos 46.0 +695 4 training.batch_size 2.0 +695 5 model.embedding_dim 1.0 +695 5 model.scoring_fct_norm 2.0 +695 5 negative_sampler.num_negs_per_pos 36.0 +695 5 training.batch_size 2.0 +695 6 model.embedding_dim 2.0 +695 6 model.scoring_fct_norm 1.0 +695 6 negative_sampler.num_negs_per_pos 72.0 +695 6 training.batch_size 1.0 +695 7 model.embedding_dim 2.0 +695 7 model.scoring_fct_norm 1.0 +695 7 negative_sampler.num_negs_per_pos 81.0 +695 7 training.batch_size 1.0 +695 8 model.embedding_dim 0.0 +695 8 model.scoring_fct_norm 2.0 +695 8 negative_sampler.num_negs_per_pos 10.0 +695 8 training.batch_size 2.0 +695 9 model.embedding_dim 0.0 +695 9 model.scoring_fct_norm 2.0 +695 9 negative_sampler.num_negs_per_pos 52.0 +695 9 training.batch_size 2.0 +695 10 model.embedding_dim 1.0 +695 10 model.scoring_fct_norm 1.0 +695 10 negative_sampler.num_negs_per_pos 39.0 +695 10 training.batch_size 2.0 +695 11 model.embedding_dim 0.0 +695 11 model.scoring_fct_norm 2.0 +695 11 negative_sampler.num_negs_per_pos 48.0 +695 11 training.batch_size 2.0 +695 12 model.embedding_dim 0.0 +695 12 model.scoring_fct_norm 2.0 +695 12 negative_sampler.num_negs_per_pos 66.0 +695 12 training.batch_size 1.0 +695 13 model.embedding_dim 2.0 +695 13 model.scoring_fct_norm 1.0 +695 13 negative_sampler.num_negs_per_pos 6.0 +695 13 training.batch_size 0.0 +695 14 model.embedding_dim 2.0 +695 14 model.scoring_fct_norm 2.0 +695 14 negative_sampler.num_negs_per_pos 44.0 +695 14 training.batch_size 0.0 +695 15 model.embedding_dim 0.0 +695 15 model.scoring_fct_norm 2.0 +695 15 negative_sampler.num_negs_per_pos 93.0 +695 15 training.batch_size 2.0 +695 16 model.embedding_dim 2.0 +695 16 model.scoring_fct_norm 2.0 +695 16 negative_sampler.num_negs_per_pos 70.0 +695 16 training.batch_size 0.0 +695 17 model.embedding_dim 2.0 +695 17 model.scoring_fct_norm 1.0 +695 17 negative_sampler.num_negs_per_pos 2.0 +695 17 training.batch_size 1.0 +695 18 model.embedding_dim 2.0 +695 18 model.scoring_fct_norm 1.0 +695 18 negative_sampler.num_negs_per_pos 74.0 +695 18 training.batch_size 2.0 +695 19 model.embedding_dim 1.0 +695 19 model.scoring_fct_norm 2.0 +695 19 negative_sampler.num_negs_per_pos 20.0 +695 19 training.batch_size 1.0 +695 20 model.embedding_dim 0.0 +695 20 model.scoring_fct_norm 2.0 +695 20 negative_sampler.num_negs_per_pos 3.0 +695 20 training.batch_size 1.0 +695 21 model.embedding_dim 1.0 +695 21 model.scoring_fct_norm 1.0 +695 21 negative_sampler.num_negs_per_pos 74.0 +695 21 training.batch_size 2.0 +695 22 model.embedding_dim 2.0 +695 22 model.scoring_fct_norm 1.0 +695 22 negative_sampler.num_negs_per_pos 92.0 +695 22 training.batch_size 1.0 +695 23 model.embedding_dim 1.0 +695 23 model.scoring_fct_norm 2.0 +695 23 negative_sampler.num_negs_per_pos 62.0 +695 23 training.batch_size 1.0 +695 24 model.embedding_dim 1.0 +695 24 model.scoring_fct_norm 2.0 +695 24 negative_sampler.num_negs_per_pos 97.0 +695 24 training.batch_size 2.0 +695 25 model.embedding_dim 0.0 +695 25 model.scoring_fct_norm 2.0 +695 25 negative_sampler.num_negs_per_pos 97.0 +695 25 training.batch_size 0.0 +695 26 model.embedding_dim 1.0 +695 26 model.scoring_fct_norm 1.0 +695 26 negative_sampler.num_negs_per_pos 25.0 +695 26 training.batch_size 2.0 +695 27 model.embedding_dim 0.0 +695 27 model.scoring_fct_norm 2.0 +695 27 negative_sampler.num_negs_per_pos 15.0 +695 27 training.batch_size 2.0 +695 28 model.embedding_dim 0.0 +695 28 model.scoring_fct_norm 1.0 +695 28 negative_sampler.num_negs_per_pos 71.0 +695 28 training.batch_size 1.0 +695 29 model.embedding_dim 2.0 +695 29 model.scoring_fct_norm 1.0 +695 29 negative_sampler.num_negs_per_pos 52.0 +695 29 training.batch_size 2.0 +695 30 model.embedding_dim 2.0 +695 30 model.scoring_fct_norm 2.0 +695 30 negative_sampler.num_negs_per_pos 54.0 +695 30 training.batch_size 0.0 +695 31 model.embedding_dim 2.0 +695 31 model.scoring_fct_norm 2.0 +695 31 negative_sampler.num_negs_per_pos 14.0 +695 31 training.batch_size 2.0 +695 32 model.embedding_dim 1.0 +695 32 model.scoring_fct_norm 1.0 +695 32 negative_sampler.num_negs_per_pos 5.0 +695 32 training.batch_size 1.0 +695 33 model.embedding_dim 0.0 +695 33 model.scoring_fct_norm 1.0 +695 33 negative_sampler.num_negs_per_pos 25.0 +695 33 training.batch_size 0.0 +695 34 model.embedding_dim 0.0 +695 34 model.scoring_fct_norm 2.0 +695 34 negative_sampler.num_negs_per_pos 64.0 +695 34 training.batch_size 2.0 +695 35 model.embedding_dim 2.0 +695 35 model.scoring_fct_norm 1.0 +695 35 negative_sampler.num_negs_per_pos 70.0 +695 35 training.batch_size 0.0 +695 36 model.embedding_dim 1.0 +695 36 model.scoring_fct_norm 2.0 +695 36 negative_sampler.num_negs_per_pos 36.0 +695 36 training.batch_size 0.0 +695 37 model.embedding_dim 2.0 +695 37 model.scoring_fct_norm 1.0 +695 37 negative_sampler.num_negs_per_pos 0.0 +695 37 training.batch_size 0.0 +695 38 model.embedding_dim 1.0 +695 38 model.scoring_fct_norm 2.0 +695 38 negative_sampler.num_negs_per_pos 65.0 +695 38 training.batch_size 0.0 +695 39 model.embedding_dim 2.0 +695 39 model.scoring_fct_norm 2.0 +695 39 negative_sampler.num_negs_per_pos 53.0 +695 39 training.batch_size 1.0 +695 40 model.embedding_dim 2.0 +695 40 model.scoring_fct_norm 2.0 +695 40 negative_sampler.num_negs_per_pos 18.0 +695 40 training.batch_size 1.0 +695 41 model.embedding_dim 0.0 +695 41 model.scoring_fct_norm 2.0 +695 41 negative_sampler.num_negs_per_pos 42.0 +695 41 training.batch_size 0.0 +695 42 model.embedding_dim 2.0 +695 42 model.scoring_fct_norm 1.0 +695 42 negative_sampler.num_negs_per_pos 52.0 +695 42 training.batch_size 0.0 +695 43 model.embedding_dim 2.0 +695 43 model.scoring_fct_norm 1.0 +695 43 negative_sampler.num_negs_per_pos 3.0 +695 43 training.batch_size 2.0 +695 44 model.embedding_dim 1.0 +695 44 model.scoring_fct_norm 1.0 +695 44 negative_sampler.num_negs_per_pos 5.0 +695 44 training.batch_size 2.0 +695 45 model.embedding_dim 2.0 +695 45 model.scoring_fct_norm 1.0 +695 45 negative_sampler.num_negs_per_pos 68.0 +695 45 training.batch_size 0.0 +695 46 model.embedding_dim 0.0 +695 46 model.scoring_fct_norm 2.0 +695 46 negative_sampler.num_negs_per_pos 12.0 +695 46 training.batch_size 1.0 +695 47 model.embedding_dim 1.0 +695 47 model.scoring_fct_norm 2.0 +695 47 negative_sampler.num_negs_per_pos 27.0 +695 47 training.batch_size 1.0 +695 1 dataset """kinships""" +695 1 model """structuredembedding""" +695 1 loss """bceaftersigmoid""" +695 1 regularizer """no""" +695 1 optimizer """adadelta""" +695 1 training_loop """owa""" +695 1 negative_sampler """basic""" +695 1 evaluator """rankbased""" +695 2 dataset """kinships""" +695 2 model """structuredembedding""" +695 2 loss """bceaftersigmoid""" +695 2 regularizer """no""" +695 2 optimizer """adadelta""" +695 2 training_loop """owa""" +695 2 negative_sampler """basic""" +695 2 evaluator """rankbased""" +695 3 dataset """kinships""" +695 3 model """structuredembedding""" +695 3 loss """bceaftersigmoid""" +695 3 regularizer """no""" +695 3 optimizer """adadelta""" +695 3 training_loop """owa""" +695 3 negative_sampler """basic""" +695 3 evaluator """rankbased""" +695 4 dataset """kinships""" +695 4 model """structuredembedding""" +695 4 loss """bceaftersigmoid""" +695 4 regularizer """no""" +695 4 optimizer """adadelta""" +695 4 training_loop """owa""" +695 4 negative_sampler """basic""" +695 4 evaluator """rankbased""" +695 5 dataset """kinships""" +695 5 model """structuredembedding""" +695 5 loss """bceaftersigmoid""" +695 5 regularizer """no""" +695 5 optimizer """adadelta""" +695 5 training_loop """owa""" +695 5 negative_sampler """basic""" +695 5 evaluator """rankbased""" +695 6 dataset """kinships""" +695 6 model """structuredembedding""" +695 6 loss """bceaftersigmoid""" +695 6 regularizer """no""" +695 6 optimizer """adadelta""" +695 6 training_loop """owa""" +695 6 negative_sampler """basic""" +695 6 evaluator """rankbased""" +695 7 dataset """kinships""" +695 7 model """structuredembedding""" +695 7 loss """bceaftersigmoid""" +695 7 regularizer """no""" +695 7 optimizer """adadelta""" +695 7 training_loop """owa""" +695 7 negative_sampler """basic""" +695 7 evaluator """rankbased""" +695 8 dataset """kinships""" +695 8 model """structuredembedding""" +695 8 loss """bceaftersigmoid""" +695 8 regularizer """no""" +695 8 optimizer """adadelta""" +695 8 training_loop """owa""" +695 8 negative_sampler """basic""" +695 8 evaluator """rankbased""" +695 9 dataset """kinships""" +695 9 model """structuredembedding""" +695 9 loss """bceaftersigmoid""" +695 9 regularizer """no""" +695 9 optimizer """adadelta""" +695 9 training_loop """owa""" +695 9 negative_sampler """basic""" +695 9 evaluator """rankbased""" +695 10 dataset """kinships""" +695 10 model """structuredembedding""" +695 10 loss """bceaftersigmoid""" +695 10 regularizer """no""" +695 10 optimizer """adadelta""" +695 10 training_loop """owa""" +695 10 negative_sampler """basic""" +695 10 evaluator """rankbased""" +695 11 dataset """kinships""" +695 11 model """structuredembedding""" +695 11 loss """bceaftersigmoid""" +695 11 regularizer """no""" +695 11 optimizer """adadelta""" +695 11 training_loop """owa""" +695 11 negative_sampler """basic""" +695 11 evaluator """rankbased""" +695 12 dataset """kinships""" +695 12 model """structuredembedding""" +695 12 loss """bceaftersigmoid""" +695 12 regularizer """no""" +695 12 optimizer """adadelta""" +695 12 training_loop """owa""" +695 12 negative_sampler """basic""" +695 12 evaluator """rankbased""" +695 13 dataset """kinships""" +695 13 model """structuredembedding""" +695 13 loss """bceaftersigmoid""" +695 13 regularizer """no""" +695 13 optimizer """adadelta""" +695 13 training_loop """owa""" +695 13 negative_sampler """basic""" +695 13 evaluator """rankbased""" +695 14 dataset """kinships""" +695 14 model """structuredembedding""" +695 14 loss """bceaftersigmoid""" +695 14 regularizer """no""" +695 14 optimizer """adadelta""" +695 14 training_loop """owa""" +695 14 negative_sampler """basic""" +695 14 evaluator """rankbased""" +695 15 dataset """kinships""" +695 15 model """structuredembedding""" +695 15 loss """bceaftersigmoid""" +695 15 regularizer """no""" +695 15 optimizer """adadelta""" +695 15 training_loop """owa""" +695 15 negative_sampler """basic""" +695 15 evaluator """rankbased""" +695 16 dataset """kinships""" +695 16 model """structuredembedding""" +695 16 loss """bceaftersigmoid""" +695 16 regularizer """no""" +695 16 optimizer """adadelta""" +695 16 training_loop """owa""" +695 16 negative_sampler """basic""" +695 16 evaluator """rankbased""" +695 17 dataset """kinships""" +695 17 model """structuredembedding""" +695 17 loss """bceaftersigmoid""" +695 17 regularizer """no""" +695 17 optimizer """adadelta""" +695 17 training_loop """owa""" +695 17 negative_sampler """basic""" +695 17 evaluator """rankbased""" +695 18 dataset """kinships""" +695 18 model """structuredembedding""" +695 18 loss """bceaftersigmoid""" +695 18 regularizer """no""" +695 18 optimizer """adadelta""" +695 18 training_loop """owa""" +695 18 negative_sampler """basic""" +695 18 evaluator """rankbased""" +695 19 dataset """kinships""" +695 19 model """structuredembedding""" +695 19 loss """bceaftersigmoid""" +695 19 regularizer """no""" +695 19 optimizer """adadelta""" +695 19 training_loop """owa""" +695 19 negative_sampler """basic""" +695 19 evaluator """rankbased""" +695 20 dataset """kinships""" +695 20 model """structuredembedding""" +695 20 loss """bceaftersigmoid""" +695 20 regularizer """no""" +695 20 optimizer """adadelta""" +695 20 training_loop """owa""" +695 20 negative_sampler """basic""" +695 20 evaluator """rankbased""" +695 21 dataset """kinships""" +695 21 model """structuredembedding""" +695 21 loss """bceaftersigmoid""" +695 21 regularizer """no""" +695 21 optimizer """adadelta""" +695 21 training_loop """owa""" +695 21 negative_sampler """basic""" +695 21 evaluator """rankbased""" +695 22 dataset """kinships""" +695 22 model """structuredembedding""" +695 22 loss """bceaftersigmoid""" +695 22 regularizer """no""" +695 22 optimizer """adadelta""" +695 22 training_loop """owa""" +695 22 negative_sampler """basic""" +695 22 evaluator """rankbased""" +695 23 dataset """kinships""" +695 23 model """structuredembedding""" +695 23 loss """bceaftersigmoid""" +695 23 regularizer """no""" +695 23 optimizer """adadelta""" +695 23 training_loop """owa""" +695 23 negative_sampler """basic""" +695 23 evaluator """rankbased""" +695 24 dataset """kinships""" +695 24 model """structuredembedding""" +695 24 loss """bceaftersigmoid""" +695 24 regularizer """no""" +695 24 optimizer """adadelta""" +695 24 training_loop """owa""" +695 24 negative_sampler """basic""" +695 24 evaluator """rankbased""" +695 25 dataset """kinships""" +695 25 model """structuredembedding""" +695 25 loss """bceaftersigmoid""" +695 25 regularizer """no""" +695 25 optimizer """adadelta""" +695 25 training_loop """owa""" +695 25 negative_sampler """basic""" +695 25 evaluator """rankbased""" +695 26 dataset """kinships""" +695 26 model """structuredembedding""" +695 26 loss """bceaftersigmoid""" +695 26 regularizer """no""" +695 26 optimizer """adadelta""" +695 26 training_loop """owa""" +695 26 negative_sampler """basic""" +695 26 evaluator """rankbased""" +695 27 dataset """kinships""" +695 27 model """structuredembedding""" +695 27 loss """bceaftersigmoid""" +695 27 regularizer """no""" +695 27 optimizer """adadelta""" +695 27 training_loop """owa""" +695 27 negative_sampler """basic""" +695 27 evaluator """rankbased""" +695 28 dataset """kinships""" +695 28 model """structuredembedding""" +695 28 loss """bceaftersigmoid""" +695 28 regularizer """no""" +695 28 optimizer """adadelta""" +695 28 training_loop """owa""" +695 28 negative_sampler """basic""" +695 28 evaluator """rankbased""" +695 29 dataset """kinships""" +695 29 model """structuredembedding""" +695 29 loss """bceaftersigmoid""" +695 29 regularizer """no""" +695 29 optimizer """adadelta""" +695 29 training_loop """owa""" +695 29 negative_sampler """basic""" +695 29 evaluator """rankbased""" +695 30 dataset """kinships""" +695 30 model """structuredembedding""" +695 30 loss """bceaftersigmoid""" +695 30 regularizer """no""" +695 30 optimizer """adadelta""" +695 30 training_loop """owa""" +695 30 negative_sampler """basic""" +695 30 evaluator """rankbased""" +695 31 dataset """kinships""" +695 31 model """structuredembedding""" +695 31 loss """bceaftersigmoid""" +695 31 regularizer """no""" +695 31 optimizer """adadelta""" +695 31 training_loop """owa""" +695 31 negative_sampler """basic""" +695 31 evaluator """rankbased""" +695 32 dataset """kinships""" +695 32 model """structuredembedding""" +695 32 loss """bceaftersigmoid""" +695 32 regularizer """no""" +695 32 optimizer """adadelta""" +695 32 training_loop """owa""" +695 32 negative_sampler """basic""" +695 32 evaluator """rankbased""" +695 33 dataset """kinships""" +695 33 model """structuredembedding""" +695 33 loss """bceaftersigmoid""" +695 33 regularizer """no""" +695 33 optimizer """adadelta""" +695 33 training_loop """owa""" +695 33 negative_sampler """basic""" +695 33 evaluator """rankbased""" +695 34 dataset """kinships""" +695 34 model """structuredembedding""" +695 34 loss """bceaftersigmoid""" +695 34 regularizer """no""" +695 34 optimizer """adadelta""" +695 34 training_loop """owa""" +695 34 negative_sampler """basic""" +695 34 evaluator """rankbased""" +695 35 dataset """kinships""" +695 35 model """structuredembedding""" +695 35 loss """bceaftersigmoid""" +695 35 regularizer """no""" +695 35 optimizer """adadelta""" +695 35 training_loop """owa""" +695 35 negative_sampler """basic""" +695 35 evaluator """rankbased""" +695 36 dataset """kinships""" +695 36 model """structuredembedding""" +695 36 loss """bceaftersigmoid""" +695 36 regularizer """no""" +695 36 optimizer """adadelta""" +695 36 training_loop """owa""" +695 36 negative_sampler """basic""" +695 36 evaluator """rankbased""" +695 37 dataset """kinships""" +695 37 model """structuredembedding""" +695 37 loss """bceaftersigmoid""" +695 37 regularizer """no""" +695 37 optimizer """adadelta""" +695 37 training_loop """owa""" +695 37 negative_sampler """basic""" +695 37 evaluator """rankbased""" +695 38 dataset """kinships""" +695 38 model """structuredembedding""" +695 38 loss """bceaftersigmoid""" +695 38 regularizer """no""" +695 38 optimizer """adadelta""" +695 38 training_loop """owa""" +695 38 negative_sampler """basic""" +695 38 evaluator """rankbased""" +695 39 dataset """kinships""" +695 39 model """structuredembedding""" +695 39 loss """bceaftersigmoid""" +695 39 regularizer """no""" +695 39 optimizer """adadelta""" +695 39 training_loop """owa""" +695 39 negative_sampler """basic""" +695 39 evaluator """rankbased""" +695 40 dataset """kinships""" +695 40 model """structuredembedding""" +695 40 loss """bceaftersigmoid""" +695 40 regularizer """no""" +695 40 optimizer """adadelta""" +695 40 training_loop """owa""" +695 40 negative_sampler """basic""" +695 40 evaluator """rankbased""" +695 41 dataset """kinships""" +695 41 model """structuredembedding""" +695 41 loss """bceaftersigmoid""" +695 41 regularizer """no""" +695 41 optimizer """adadelta""" +695 41 training_loop """owa""" +695 41 negative_sampler """basic""" +695 41 evaluator """rankbased""" +695 42 dataset """kinships""" +695 42 model """structuredembedding""" +695 42 loss """bceaftersigmoid""" +695 42 regularizer """no""" +695 42 optimizer """adadelta""" +695 42 training_loop """owa""" +695 42 negative_sampler """basic""" +695 42 evaluator """rankbased""" +695 43 dataset """kinships""" +695 43 model """structuredembedding""" +695 43 loss """bceaftersigmoid""" +695 43 regularizer """no""" +695 43 optimizer """adadelta""" +695 43 training_loop """owa""" +695 43 negative_sampler """basic""" +695 43 evaluator """rankbased""" +695 44 dataset """kinships""" +695 44 model """structuredembedding""" +695 44 loss """bceaftersigmoid""" +695 44 regularizer """no""" +695 44 optimizer """adadelta""" +695 44 training_loop """owa""" +695 44 negative_sampler """basic""" +695 44 evaluator """rankbased""" +695 45 dataset """kinships""" +695 45 model """structuredembedding""" +695 45 loss """bceaftersigmoid""" +695 45 regularizer """no""" +695 45 optimizer """adadelta""" +695 45 training_loop """owa""" +695 45 negative_sampler """basic""" +695 45 evaluator """rankbased""" +695 46 dataset """kinships""" +695 46 model """structuredembedding""" +695 46 loss """bceaftersigmoid""" +695 46 regularizer """no""" +695 46 optimizer """adadelta""" +695 46 training_loop """owa""" +695 46 negative_sampler """basic""" +695 46 evaluator """rankbased""" +695 47 dataset """kinships""" +695 47 model """structuredembedding""" +695 47 loss """bceaftersigmoid""" +695 47 regularizer """no""" +695 47 optimizer """adadelta""" +695 47 training_loop """owa""" +695 47 negative_sampler """basic""" +695 47 evaluator """rankbased""" +696 1 model.embedding_dim 0.0 +696 1 model.scoring_fct_norm 1.0 +696 1 negative_sampler.num_negs_per_pos 58.0 +696 1 training.batch_size 2.0 +696 2 model.embedding_dim 1.0 +696 2 model.scoring_fct_norm 1.0 +696 2 negative_sampler.num_negs_per_pos 58.0 +696 2 training.batch_size 2.0 +696 3 model.embedding_dim 1.0 +696 3 model.scoring_fct_norm 1.0 +696 3 negative_sampler.num_negs_per_pos 15.0 +696 3 training.batch_size 0.0 +696 4 model.embedding_dim 2.0 +696 4 model.scoring_fct_norm 1.0 +696 4 negative_sampler.num_negs_per_pos 26.0 +696 4 training.batch_size 0.0 +696 5 model.embedding_dim 2.0 +696 5 model.scoring_fct_norm 1.0 +696 5 negative_sampler.num_negs_per_pos 47.0 +696 5 training.batch_size 2.0 +696 6 model.embedding_dim 2.0 +696 6 model.scoring_fct_norm 1.0 +696 6 negative_sampler.num_negs_per_pos 92.0 +696 6 training.batch_size 0.0 +696 7 model.embedding_dim 2.0 +696 7 model.scoring_fct_norm 1.0 +696 7 negative_sampler.num_negs_per_pos 66.0 +696 7 training.batch_size 0.0 +696 8 model.embedding_dim 1.0 +696 8 model.scoring_fct_norm 1.0 +696 8 negative_sampler.num_negs_per_pos 11.0 +696 8 training.batch_size 2.0 +696 9 model.embedding_dim 2.0 +696 9 model.scoring_fct_norm 1.0 +696 9 negative_sampler.num_negs_per_pos 95.0 +696 9 training.batch_size 0.0 +696 10 model.embedding_dim 0.0 +696 10 model.scoring_fct_norm 2.0 +696 10 negative_sampler.num_negs_per_pos 59.0 +696 10 training.batch_size 0.0 +696 11 model.embedding_dim 2.0 +696 11 model.scoring_fct_norm 2.0 +696 11 negative_sampler.num_negs_per_pos 52.0 +696 11 training.batch_size 1.0 +696 12 model.embedding_dim 0.0 +696 12 model.scoring_fct_norm 2.0 +696 12 negative_sampler.num_negs_per_pos 82.0 +696 12 training.batch_size 1.0 +696 13 model.embedding_dim 2.0 +696 13 model.scoring_fct_norm 1.0 +696 13 negative_sampler.num_negs_per_pos 12.0 +696 13 training.batch_size 0.0 +696 14 model.embedding_dim 0.0 +696 14 model.scoring_fct_norm 2.0 +696 14 negative_sampler.num_negs_per_pos 81.0 +696 14 training.batch_size 0.0 +696 15 model.embedding_dim 1.0 +696 15 model.scoring_fct_norm 2.0 +696 15 negative_sampler.num_negs_per_pos 89.0 +696 15 training.batch_size 1.0 +696 16 model.embedding_dim 2.0 +696 16 model.scoring_fct_norm 1.0 +696 16 negative_sampler.num_negs_per_pos 12.0 +696 16 training.batch_size 0.0 +696 17 model.embedding_dim 1.0 +696 17 model.scoring_fct_norm 1.0 +696 17 negative_sampler.num_negs_per_pos 24.0 +696 17 training.batch_size 1.0 +696 18 model.embedding_dim 1.0 +696 18 model.scoring_fct_norm 1.0 +696 18 negative_sampler.num_negs_per_pos 29.0 +696 18 training.batch_size 2.0 +696 19 model.embedding_dim 1.0 +696 19 model.scoring_fct_norm 2.0 +696 19 negative_sampler.num_negs_per_pos 33.0 +696 19 training.batch_size 1.0 +696 20 model.embedding_dim 1.0 +696 20 model.scoring_fct_norm 2.0 +696 20 negative_sampler.num_negs_per_pos 98.0 +696 20 training.batch_size 1.0 +696 21 model.embedding_dim 1.0 +696 21 model.scoring_fct_norm 1.0 +696 21 negative_sampler.num_negs_per_pos 95.0 +696 21 training.batch_size 0.0 +696 22 model.embedding_dim 1.0 +696 22 model.scoring_fct_norm 1.0 +696 22 negative_sampler.num_negs_per_pos 73.0 +696 22 training.batch_size 2.0 +696 23 model.embedding_dim 0.0 +696 23 model.scoring_fct_norm 1.0 +696 23 negative_sampler.num_negs_per_pos 40.0 +696 23 training.batch_size 1.0 +696 24 model.embedding_dim 0.0 +696 24 model.scoring_fct_norm 1.0 +696 24 negative_sampler.num_negs_per_pos 4.0 +696 24 training.batch_size 0.0 +696 25 model.embedding_dim 1.0 +696 25 model.scoring_fct_norm 2.0 +696 25 negative_sampler.num_negs_per_pos 90.0 +696 25 training.batch_size 0.0 +696 26 model.embedding_dim 2.0 +696 26 model.scoring_fct_norm 2.0 +696 26 negative_sampler.num_negs_per_pos 98.0 +696 26 training.batch_size 0.0 +696 27 model.embedding_dim 2.0 +696 27 model.scoring_fct_norm 1.0 +696 27 negative_sampler.num_negs_per_pos 17.0 +696 27 training.batch_size 0.0 +696 28 model.embedding_dim 1.0 +696 28 model.scoring_fct_norm 1.0 +696 28 negative_sampler.num_negs_per_pos 16.0 +696 28 training.batch_size 1.0 +696 29 model.embedding_dim 0.0 +696 29 model.scoring_fct_norm 2.0 +696 29 negative_sampler.num_negs_per_pos 54.0 +696 29 training.batch_size 0.0 +696 30 model.embedding_dim 2.0 +696 30 model.scoring_fct_norm 2.0 +696 30 negative_sampler.num_negs_per_pos 20.0 +696 30 training.batch_size 0.0 +696 31 model.embedding_dim 1.0 +696 31 model.scoring_fct_norm 2.0 +696 31 negative_sampler.num_negs_per_pos 24.0 +696 31 training.batch_size 2.0 +696 32 model.embedding_dim 0.0 +696 32 model.scoring_fct_norm 2.0 +696 32 negative_sampler.num_negs_per_pos 65.0 +696 32 training.batch_size 2.0 +696 33 model.embedding_dim 2.0 +696 33 model.scoring_fct_norm 2.0 +696 33 negative_sampler.num_negs_per_pos 19.0 +696 33 training.batch_size 2.0 +696 34 model.embedding_dim 1.0 +696 34 model.scoring_fct_norm 1.0 +696 34 negative_sampler.num_negs_per_pos 24.0 +696 34 training.batch_size 2.0 +696 35 model.embedding_dim 2.0 +696 35 model.scoring_fct_norm 2.0 +696 35 negative_sampler.num_negs_per_pos 0.0 +696 35 training.batch_size 2.0 +696 36 model.embedding_dim 2.0 +696 36 model.scoring_fct_norm 1.0 +696 36 negative_sampler.num_negs_per_pos 15.0 +696 36 training.batch_size 1.0 +696 37 model.embedding_dim 2.0 +696 37 model.scoring_fct_norm 2.0 +696 37 negative_sampler.num_negs_per_pos 26.0 +696 37 training.batch_size 0.0 +696 38 model.embedding_dim 1.0 +696 38 model.scoring_fct_norm 1.0 +696 38 negative_sampler.num_negs_per_pos 97.0 +696 38 training.batch_size 1.0 +696 39 model.embedding_dim 0.0 +696 39 model.scoring_fct_norm 2.0 +696 39 negative_sampler.num_negs_per_pos 65.0 +696 39 training.batch_size 2.0 +696 40 model.embedding_dim 2.0 +696 40 model.scoring_fct_norm 1.0 +696 40 negative_sampler.num_negs_per_pos 47.0 +696 40 training.batch_size 2.0 +696 41 model.embedding_dim 1.0 +696 41 model.scoring_fct_norm 1.0 +696 41 negative_sampler.num_negs_per_pos 37.0 +696 41 training.batch_size 2.0 +696 42 model.embedding_dim 1.0 +696 42 model.scoring_fct_norm 1.0 +696 42 negative_sampler.num_negs_per_pos 85.0 +696 42 training.batch_size 0.0 +696 43 model.embedding_dim 0.0 +696 43 model.scoring_fct_norm 1.0 +696 43 negative_sampler.num_negs_per_pos 18.0 +696 43 training.batch_size 1.0 +696 44 model.embedding_dim 2.0 +696 44 model.scoring_fct_norm 1.0 +696 44 negative_sampler.num_negs_per_pos 53.0 +696 44 training.batch_size 2.0 +696 45 model.embedding_dim 0.0 +696 45 model.scoring_fct_norm 2.0 +696 45 negative_sampler.num_negs_per_pos 65.0 +696 45 training.batch_size 1.0 +696 46 model.embedding_dim 0.0 +696 46 model.scoring_fct_norm 1.0 +696 46 negative_sampler.num_negs_per_pos 40.0 +696 46 training.batch_size 1.0 +696 47 model.embedding_dim 1.0 +696 47 model.scoring_fct_norm 1.0 +696 47 negative_sampler.num_negs_per_pos 47.0 +696 47 training.batch_size 2.0 +696 48 model.embedding_dim 1.0 +696 48 model.scoring_fct_norm 2.0 +696 48 negative_sampler.num_negs_per_pos 30.0 +696 48 training.batch_size 2.0 +696 49 model.embedding_dim 1.0 +696 49 model.scoring_fct_norm 2.0 +696 49 negative_sampler.num_negs_per_pos 90.0 +696 49 training.batch_size 2.0 +696 50 model.embedding_dim 1.0 +696 50 model.scoring_fct_norm 1.0 +696 50 negative_sampler.num_negs_per_pos 59.0 +696 50 training.batch_size 2.0 +696 51 model.embedding_dim 2.0 +696 51 model.scoring_fct_norm 1.0 +696 51 negative_sampler.num_negs_per_pos 75.0 +696 51 training.batch_size 0.0 +696 52 model.embedding_dim 1.0 +696 52 model.scoring_fct_norm 2.0 +696 52 negative_sampler.num_negs_per_pos 66.0 +696 52 training.batch_size 1.0 +696 53 model.embedding_dim 1.0 +696 53 model.scoring_fct_norm 1.0 +696 53 negative_sampler.num_negs_per_pos 99.0 +696 53 training.batch_size 2.0 +696 54 model.embedding_dim 0.0 +696 54 model.scoring_fct_norm 2.0 +696 54 negative_sampler.num_negs_per_pos 91.0 +696 54 training.batch_size 1.0 +696 55 model.embedding_dim 2.0 +696 55 model.scoring_fct_norm 2.0 +696 55 negative_sampler.num_negs_per_pos 39.0 +696 55 training.batch_size 2.0 +696 1 dataset """kinships""" +696 1 model """structuredembedding""" +696 1 loss """softplus""" +696 1 regularizer """no""" +696 1 optimizer """adadelta""" +696 1 training_loop """owa""" +696 1 negative_sampler """basic""" +696 1 evaluator """rankbased""" +696 2 dataset """kinships""" +696 2 model """structuredembedding""" +696 2 loss """softplus""" +696 2 regularizer """no""" +696 2 optimizer """adadelta""" +696 2 training_loop """owa""" +696 2 negative_sampler """basic""" +696 2 evaluator """rankbased""" +696 3 dataset """kinships""" +696 3 model """structuredembedding""" +696 3 loss """softplus""" +696 3 regularizer """no""" +696 3 optimizer """adadelta""" +696 3 training_loop """owa""" +696 3 negative_sampler """basic""" +696 3 evaluator """rankbased""" +696 4 dataset """kinships""" +696 4 model """structuredembedding""" +696 4 loss """softplus""" +696 4 regularizer """no""" +696 4 optimizer """adadelta""" +696 4 training_loop """owa""" +696 4 negative_sampler """basic""" +696 4 evaluator """rankbased""" +696 5 dataset """kinships""" +696 5 model """structuredembedding""" +696 5 loss """softplus""" +696 5 regularizer """no""" +696 5 optimizer """adadelta""" +696 5 training_loop """owa""" +696 5 negative_sampler """basic""" +696 5 evaluator """rankbased""" +696 6 dataset """kinships""" +696 6 model """structuredembedding""" +696 6 loss """softplus""" +696 6 regularizer """no""" +696 6 optimizer """adadelta""" +696 6 training_loop """owa""" +696 6 negative_sampler """basic""" +696 6 evaluator """rankbased""" +696 7 dataset """kinships""" +696 7 model """structuredembedding""" +696 7 loss """softplus""" +696 7 regularizer """no""" +696 7 optimizer """adadelta""" +696 7 training_loop """owa""" +696 7 negative_sampler """basic""" +696 7 evaluator """rankbased""" +696 8 dataset """kinships""" +696 8 model """structuredembedding""" +696 8 loss """softplus""" +696 8 regularizer """no""" +696 8 optimizer """adadelta""" +696 8 training_loop """owa""" +696 8 negative_sampler """basic""" +696 8 evaluator """rankbased""" +696 9 dataset """kinships""" +696 9 model """structuredembedding""" +696 9 loss """softplus""" +696 9 regularizer """no""" +696 9 optimizer """adadelta""" +696 9 training_loop """owa""" +696 9 negative_sampler """basic""" +696 9 evaluator """rankbased""" +696 10 dataset """kinships""" +696 10 model """structuredembedding""" +696 10 loss """softplus""" +696 10 regularizer """no""" +696 10 optimizer """adadelta""" +696 10 training_loop """owa""" +696 10 negative_sampler """basic""" +696 10 evaluator """rankbased""" +696 11 dataset """kinships""" +696 11 model """structuredembedding""" +696 11 loss """softplus""" +696 11 regularizer """no""" +696 11 optimizer """adadelta""" +696 11 training_loop """owa""" +696 11 negative_sampler """basic""" +696 11 evaluator """rankbased""" +696 12 dataset """kinships""" +696 12 model """structuredembedding""" +696 12 loss """softplus""" +696 12 regularizer """no""" +696 12 optimizer """adadelta""" +696 12 training_loop """owa""" +696 12 negative_sampler """basic""" +696 12 evaluator """rankbased""" +696 13 dataset """kinships""" +696 13 model """structuredembedding""" +696 13 loss """softplus""" +696 13 regularizer """no""" +696 13 optimizer """adadelta""" +696 13 training_loop """owa""" +696 13 negative_sampler """basic""" +696 13 evaluator """rankbased""" +696 14 dataset """kinships""" +696 14 model """structuredembedding""" +696 14 loss """softplus""" +696 14 regularizer """no""" +696 14 optimizer """adadelta""" +696 14 training_loop """owa""" +696 14 negative_sampler """basic""" +696 14 evaluator """rankbased""" +696 15 dataset """kinships""" +696 15 model """structuredembedding""" +696 15 loss """softplus""" +696 15 regularizer """no""" +696 15 optimizer """adadelta""" +696 15 training_loop """owa""" +696 15 negative_sampler """basic""" +696 15 evaluator """rankbased""" +696 16 dataset """kinships""" +696 16 model """structuredembedding""" +696 16 loss """softplus""" +696 16 regularizer """no""" +696 16 optimizer """adadelta""" +696 16 training_loop """owa""" +696 16 negative_sampler """basic""" +696 16 evaluator """rankbased""" +696 17 dataset """kinships""" +696 17 model """structuredembedding""" +696 17 loss """softplus""" +696 17 regularizer """no""" +696 17 optimizer """adadelta""" +696 17 training_loop """owa""" +696 17 negative_sampler """basic""" +696 17 evaluator """rankbased""" +696 18 dataset """kinships""" +696 18 model """structuredembedding""" +696 18 loss """softplus""" +696 18 regularizer """no""" +696 18 optimizer """adadelta""" +696 18 training_loop """owa""" +696 18 negative_sampler """basic""" +696 18 evaluator """rankbased""" +696 19 dataset """kinships""" +696 19 model """structuredembedding""" +696 19 loss """softplus""" +696 19 regularizer """no""" +696 19 optimizer """adadelta""" +696 19 training_loop """owa""" +696 19 negative_sampler """basic""" +696 19 evaluator """rankbased""" +696 20 dataset """kinships""" +696 20 model """structuredembedding""" +696 20 loss """softplus""" +696 20 regularizer """no""" +696 20 optimizer """adadelta""" +696 20 training_loop """owa""" +696 20 negative_sampler """basic""" +696 20 evaluator """rankbased""" +696 21 dataset """kinships""" +696 21 model """structuredembedding""" +696 21 loss """softplus""" +696 21 regularizer """no""" +696 21 optimizer """adadelta""" +696 21 training_loop """owa""" +696 21 negative_sampler """basic""" +696 21 evaluator """rankbased""" +696 22 dataset """kinships""" +696 22 model """structuredembedding""" +696 22 loss """softplus""" +696 22 regularizer """no""" +696 22 optimizer """adadelta""" +696 22 training_loop """owa""" +696 22 negative_sampler """basic""" +696 22 evaluator """rankbased""" +696 23 dataset """kinships""" +696 23 model """structuredembedding""" +696 23 loss """softplus""" +696 23 regularizer """no""" +696 23 optimizer """adadelta""" +696 23 training_loop """owa""" +696 23 negative_sampler """basic""" +696 23 evaluator """rankbased""" +696 24 dataset """kinships""" +696 24 model """structuredembedding""" +696 24 loss """softplus""" +696 24 regularizer """no""" +696 24 optimizer """adadelta""" +696 24 training_loop """owa""" +696 24 negative_sampler """basic""" +696 24 evaluator """rankbased""" +696 25 dataset """kinships""" +696 25 model """structuredembedding""" +696 25 loss """softplus""" +696 25 regularizer """no""" +696 25 optimizer """adadelta""" +696 25 training_loop """owa""" +696 25 negative_sampler """basic""" +696 25 evaluator """rankbased""" +696 26 dataset """kinships""" +696 26 model """structuredembedding""" +696 26 loss """softplus""" +696 26 regularizer """no""" +696 26 optimizer """adadelta""" +696 26 training_loop """owa""" +696 26 negative_sampler """basic""" +696 26 evaluator """rankbased""" +696 27 dataset """kinships""" +696 27 model """structuredembedding""" +696 27 loss """softplus""" +696 27 regularizer """no""" +696 27 optimizer """adadelta""" +696 27 training_loop """owa""" +696 27 negative_sampler """basic""" +696 27 evaluator """rankbased""" +696 28 dataset """kinships""" +696 28 model """structuredembedding""" +696 28 loss """softplus""" +696 28 regularizer """no""" +696 28 optimizer """adadelta""" +696 28 training_loop """owa""" +696 28 negative_sampler """basic""" +696 28 evaluator """rankbased""" +696 29 dataset """kinships""" +696 29 model """structuredembedding""" +696 29 loss """softplus""" +696 29 regularizer """no""" +696 29 optimizer """adadelta""" +696 29 training_loop """owa""" +696 29 negative_sampler """basic""" +696 29 evaluator """rankbased""" +696 30 dataset """kinships""" +696 30 model """structuredembedding""" +696 30 loss """softplus""" +696 30 regularizer """no""" +696 30 optimizer """adadelta""" +696 30 training_loop """owa""" +696 30 negative_sampler """basic""" +696 30 evaluator """rankbased""" +696 31 dataset """kinships""" +696 31 model """structuredembedding""" +696 31 loss """softplus""" +696 31 regularizer """no""" +696 31 optimizer """adadelta""" +696 31 training_loop """owa""" +696 31 negative_sampler """basic""" +696 31 evaluator """rankbased""" +696 32 dataset """kinships""" +696 32 model """structuredembedding""" +696 32 loss """softplus""" +696 32 regularizer """no""" +696 32 optimizer """adadelta""" +696 32 training_loop """owa""" +696 32 negative_sampler """basic""" +696 32 evaluator """rankbased""" +696 33 dataset """kinships""" +696 33 model """structuredembedding""" +696 33 loss """softplus""" +696 33 regularizer """no""" +696 33 optimizer """adadelta""" +696 33 training_loop """owa""" +696 33 negative_sampler """basic""" +696 33 evaluator """rankbased""" +696 34 dataset """kinships""" +696 34 model """structuredembedding""" +696 34 loss """softplus""" +696 34 regularizer """no""" +696 34 optimizer """adadelta""" +696 34 training_loop """owa""" +696 34 negative_sampler """basic""" +696 34 evaluator """rankbased""" +696 35 dataset """kinships""" +696 35 model """structuredembedding""" +696 35 loss """softplus""" +696 35 regularizer """no""" +696 35 optimizer """adadelta""" +696 35 training_loop """owa""" +696 35 negative_sampler """basic""" +696 35 evaluator """rankbased""" +696 36 dataset """kinships""" +696 36 model """structuredembedding""" +696 36 loss """softplus""" +696 36 regularizer """no""" +696 36 optimizer """adadelta""" +696 36 training_loop """owa""" +696 36 negative_sampler """basic""" +696 36 evaluator """rankbased""" +696 37 dataset """kinships""" +696 37 model """structuredembedding""" +696 37 loss """softplus""" +696 37 regularizer """no""" +696 37 optimizer """adadelta""" +696 37 training_loop """owa""" +696 37 negative_sampler """basic""" +696 37 evaluator """rankbased""" +696 38 dataset """kinships""" +696 38 model """structuredembedding""" +696 38 loss """softplus""" +696 38 regularizer """no""" +696 38 optimizer """adadelta""" +696 38 training_loop """owa""" +696 38 negative_sampler """basic""" +696 38 evaluator """rankbased""" +696 39 dataset """kinships""" +696 39 model """structuredembedding""" +696 39 loss """softplus""" +696 39 regularizer """no""" +696 39 optimizer """adadelta""" +696 39 training_loop """owa""" +696 39 negative_sampler """basic""" +696 39 evaluator """rankbased""" +696 40 dataset """kinships""" +696 40 model """structuredembedding""" +696 40 loss """softplus""" +696 40 regularizer """no""" +696 40 optimizer """adadelta""" +696 40 training_loop """owa""" +696 40 negative_sampler """basic""" +696 40 evaluator """rankbased""" +696 41 dataset """kinships""" +696 41 model """structuredembedding""" +696 41 loss """softplus""" +696 41 regularizer """no""" +696 41 optimizer """adadelta""" +696 41 training_loop """owa""" +696 41 negative_sampler """basic""" +696 41 evaluator """rankbased""" +696 42 dataset """kinships""" +696 42 model """structuredembedding""" +696 42 loss """softplus""" +696 42 regularizer """no""" +696 42 optimizer """adadelta""" +696 42 training_loop """owa""" +696 42 negative_sampler """basic""" +696 42 evaluator """rankbased""" +696 43 dataset """kinships""" +696 43 model """structuredembedding""" +696 43 loss """softplus""" +696 43 regularizer """no""" +696 43 optimizer """adadelta""" +696 43 training_loop """owa""" +696 43 negative_sampler """basic""" +696 43 evaluator """rankbased""" +696 44 dataset """kinships""" +696 44 model """structuredembedding""" +696 44 loss """softplus""" +696 44 regularizer """no""" +696 44 optimizer """adadelta""" +696 44 training_loop """owa""" +696 44 negative_sampler """basic""" +696 44 evaluator """rankbased""" +696 45 dataset """kinships""" +696 45 model """structuredembedding""" +696 45 loss """softplus""" +696 45 regularizer """no""" +696 45 optimizer """adadelta""" +696 45 training_loop """owa""" +696 45 negative_sampler """basic""" +696 45 evaluator """rankbased""" +696 46 dataset """kinships""" +696 46 model """structuredembedding""" +696 46 loss """softplus""" +696 46 regularizer """no""" +696 46 optimizer """adadelta""" +696 46 training_loop """owa""" +696 46 negative_sampler """basic""" +696 46 evaluator """rankbased""" +696 47 dataset """kinships""" +696 47 model """structuredembedding""" +696 47 loss """softplus""" +696 47 regularizer """no""" +696 47 optimizer """adadelta""" +696 47 training_loop """owa""" +696 47 negative_sampler """basic""" +696 47 evaluator """rankbased""" +696 48 dataset """kinships""" +696 48 model """structuredembedding""" +696 48 loss """softplus""" +696 48 regularizer """no""" +696 48 optimizer """adadelta""" +696 48 training_loop """owa""" +696 48 negative_sampler """basic""" +696 48 evaluator """rankbased""" +696 49 dataset """kinships""" +696 49 model """structuredembedding""" +696 49 loss """softplus""" +696 49 regularizer """no""" +696 49 optimizer """adadelta""" +696 49 training_loop """owa""" +696 49 negative_sampler """basic""" +696 49 evaluator """rankbased""" +696 50 dataset """kinships""" +696 50 model """structuredembedding""" +696 50 loss """softplus""" +696 50 regularizer """no""" +696 50 optimizer """adadelta""" +696 50 training_loop """owa""" +696 50 negative_sampler """basic""" +696 50 evaluator """rankbased""" +696 51 dataset """kinships""" +696 51 model """structuredembedding""" +696 51 loss """softplus""" +696 51 regularizer """no""" +696 51 optimizer """adadelta""" +696 51 training_loop """owa""" +696 51 negative_sampler """basic""" +696 51 evaluator """rankbased""" +696 52 dataset """kinships""" +696 52 model """structuredembedding""" +696 52 loss """softplus""" +696 52 regularizer """no""" +696 52 optimizer """adadelta""" +696 52 training_loop """owa""" +696 52 negative_sampler """basic""" +696 52 evaluator """rankbased""" +696 53 dataset """kinships""" +696 53 model """structuredembedding""" +696 53 loss """softplus""" +696 53 regularizer """no""" +696 53 optimizer """adadelta""" +696 53 training_loop """owa""" +696 53 negative_sampler """basic""" +696 53 evaluator """rankbased""" +696 54 dataset """kinships""" +696 54 model """structuredembedding""" +696 54 loss """softplus""" +696 54 regularizer """no""" +696 54 optimizer """adadelta""" +696 54 training_loop """owa""" +696 54 negative_sampler """basic""" +696 54 evaluator """rankbased""" +696 55 dataset """kinships""" +696 55 model """structuredembedding""" +696 55 loss """softplus""" +696 55 regularizer """no""" +696 55 optimizer """adadelta""" +696 55 training_loop """owa""" +696 55 negative_sampler """basic""" +696 55 evaluator """rankbased""" +697 1 model.embedding_dim 1.0 +697 1 model.scoring_fct_norm 2.0 +697 1 loss.margin 2.6626100435297917 +697 1 loss.adversarial_temperature 0.8432320982271891 +697 1 negative_sampler.num_negs_per_pos 38.0 +697 1 training.batch_size 1.0 +697 2 model.embedding_dim 2.0 +697 2 model.scoring_fct_norm 1.0 +697 2 loss.margin 14.505070520746186 +697 2 loss.adversarial_temperature 0.4889604539696766 +697 2 negative_sampler.num_negs_per_pos 20.0 +697 2 training.batch_size 2.0 +697 3 model.embedding_dim 2.0 +697 3 model.scoring_fct_norm 2.0 +697 3 loss.margin 9.865121057217895 +697 3 loss.adversarial_temperature 0.8898035154128098 +697 3 negative_sampler.num_negs_per_pos 50.0 +697 3 training.batch_size 1.0 +697 4 model.embedding_dim 1.0 +697 4 model.scoring_fct_norm 1.0 +697 4 loss.margin 26.44923615971855 +697 4 loss.adversarial_temperature 0.9179441130575826 +697 4 negative_sampler.num_negs_per_pos 64.0 +697 4 training.batch_size 2.0 +697 5 model.embedding_dim 1.0 +697 5 model.scoring_fct_norm 2.0 +697 5 loss.margin 11.279969227820098 +697 5 loss.adversarial_temperature 0.9521322267163149 +697 5 negative_sampler.num_negs_per_pos 38.0 +697 5 training.batch_size 2.0 +697 6 model.embedding_dim 2.0 +697 6 model.scoring_fct_norm 1.0 +697 6 loss.margin 7.33452809135868 +697 6 loss.adversarial_temperature 0.6034878234449268 +697 6 negative_sampler.num_negs_per_pos 70.0 +697 6 training.batch_size 2.0 +697 7 model.embedding_dim 2.0 +697 7 model.scoring_fct_norm 2.0 +697 7 loss.margin 1.1071916932553023 +697 7 loss.adversarial_temperature 0.6843901302668896 +697 7 negative_sampler.num_negs_per_pos 77.0 +697 7 training.batch_size 2.0 +697 8 model.embedding_dim 0.0 +697 8 model.scoring_fct_norm 1.0 +697 8 loss.margin 6.794412766932871 +697 8 loss.adversarial_temperature 0.4345146204286542 +697 8 negative_sampler.num_negs_per_pos 99.0 +697 8 training.batch_size 2.0 +697 9 model.embedding_dim 2.0 +697 9 model.scoring_fct_norm 2.0 +697 9 loss.margin 5.5033676093999295 +697 9 loss.adversarial_temperature 0.5745838000094619 +697 9 negative_sampler.num_negs_per_pos 31.0 +697 9 training.batch_size 0.0 +697 10 model.embedding_dim 1.0 +697 10 model.scoring_fct_norm 2.0 +697 10 loss.margin 15.807885083491819 +697 10 loss.adversarial_temperature 0.20238337635579562 +697 10 negative_sampler.num_negs_per_pos 86.0 +697 10 training.batch_size 0.0 +697 11 model.embedding_dim 0.0 +697 11 model.scoring_fct_norm 1.0 +697 11 loss.margin 14.797305458263292 +697 11 loss.adversarial_temperature 0.6823911763350412 +697 11 negative_sampler.num_negs_per_pos 93.0 +697 11 training.batch_size 0.0 +697 12 model.embedding_dim 0.0 +697 12 model.scoring_fct_norm 1.0 +697 12 loss.margin 28.711100784107778 +697 12 loss.adversarial_temperature 0.9796187059102014 +697 12 negative_sampler.num_negs_per_pos 98.0 +697 12 training.batch_size 0.0 +697 13 model.embedding_dim 1.0 +697 13 model.scoring_fct_norm 1.0 +697 13 loss.margin 11.385004836947465 +697 13 loss.adversarial_temperature 0.677182152212946 +697 13 negative_sampler.num_negs_per_pos 23.0 +697 13 training.batch_size 1.0 +697 14 model.embedding_dim 1.0 +697 14 model.scoring_fct_norm 2.0 +697 14 loss.margin 26.861183244807417 +697 14 loss.adversarial_temperature 0.1914503719952369 +697 14 negative_sampler.num_negs_per_pos 83.0 +697 14 training.batch_size 0.0 +697 15 model.embedding_dim 0.0 +697 15 model.scoring_fct_norm 2.0 +697 15 loss.margin 28.357669222745827 +697 15 loss.adversarial_temperature 0.5149495503540221 +697 15 negative_sampler.num_negs_per_pos 86.0 +697 15 training.batch_size 2.0 +697 16 model.embedding_dim 1.0 +697 16 model.scoring_fct_norm 1.0 +697 16 loss.margin 9.191687051153858 +697 16 loss.adversarial_temperature 0.8692989429189687 +697 16 negative_sampler.num_negs_per_pos 48.0 +697 16 training.batch_size 1.0 +697 17 model.embedding_dim 2.0 +697 17 model.scoring_fct_norm 1.0 +697 17 loss.margin 2.010743402057746 +697 17 loss.adversarial_temperature 0.9368595335887717 +697 17 negative_sampler.num_negs_per_pos 41.0 +697 17 training.batch_size 0.0 +697 18 model.embedding_dim 2.0 +697 18 model.scoring_fct_norm 1.0 +697 18 loss.margin 12.101658175322687 +697 18 loss.adversarial_temperature 0.16409644360869832 +697 18 negative_sampler.num_negs_per_pos 31.0 +697 18 training.batch_size 2.0 +697 19 model.embedding_dim 1.0 +697 19 model.scoring_fct_norm 2.0 +697 19 loss.margin 14.79147993511932 +697 19 loss.adversarial_temperature 0.6325571625876036 +697 19 negative_sampler.num_negs_per_pos 70.0 +697 19 training.batch_size 1.0 +697 20 model.embedding_dim 2.0 +697 20 model.scoring_fct_norm 2.0 +697 20 loss.margin 4.575616068277406 +697 20 loss.adversarial_temperature 0.8440646089092293 +697 20 negative_sampler.num_negs_per_pos 36.0 +697 20 training.batch_size 2.0 +697 21 model.embedding_dim 0.0 +697 21 model.scoring_fct_norm 2.0 +697 21 loss.margin 26.645531567612494 +697 21 loss.adversarial_temperature 0.9719714726531443 +697 21 negative_sampler.num_negs_per_pos 38.0 +697 21 training.batch_size 2.0 +697 22 model.embedding_dim 0.0 +697 22 model.scoring_fct_norm 1.0 +697 22 loss.margin 22.630371875531015 +697 22 loss.adversarial_temperature 0.8725706431360922 +697 22 negative_sampler.num_negs_per_pos 59.0 +697 22 training.batch_size 0.0 +697 23 model.embedding_dim 0.0 +697 23 model.scoring_fct_norm 1.0 +697 23 loss.margin 18.115204251315657 +697 23 loss.adversarial_temperature 0.5642577122447487 +697 23 negative_sampler.num_negs_per_pos 36.0 +697 23 training.batch_size 0.0 +697 24 model.embedding_dim 1.0 +697 24 model.scoring_fct_norm 1.0 +697 24 loss.margin 28.227561171469286 +697 24 loss.adversarial_temperature 0.5283598496388308 +697 24 negative_sampler.num_negs_per_pos 77.0 +697 24 training.batch_size 2.0 +697 25 model.embedding_dim 0.0 +697 25 model.scoring_fct_norm 1.0 +697 25 loss.margin 14.729447166361629 +697 25 loss.adversarial_temperature 0.5539592190800894 +697 25 negative_sampler.num_negs_per_pos 45.0 +697 25 training.batch_size 1.0 +697 26 model.embedding_dim 0.0 +697 26 model.scoring_fct_norm 1.0 +697 26 loss.margin 16.822322602838348 +697 26 loss.adversarial_temperature 0.6719332591745357 +697 26 negative_sampler.num_negs_per_pos 56.0 +697 26 training.batch_size 1.0 +697 27 model.embedding_dim 2.0 +697 27 model.scoring_fct_norm 2.0 +697 27 loss.margin 6.529674272472039 +697 27 loss.adversarial_temperature 0.12278225895274422 +697 27 negative_sampler.num_negs_per_pos 74.0 +697 27 training.batch_size 1.0 +697 28 model.embedding_dim 0.0 +697 28 model.scoring_fct_norm 1.0 +697 28 loss.margin 1.3314172375782576 +697 28 loss.adversarial_temperature 0.6666209142779362 +697 28 negative_sampler.num_negs_per_pos 35.0 +697 28 training.batch_size 0.0 +697 29 model.embedding_dim 1.0 +697 29 model.scoring_fct_norm 1.0 +697 29 loss.margin 5.391058572495181 +697 29 loss.adversarial_temperature 0.5637258005369954 +697 29 negative_sampler.num_negs_per_pos 66.0 +697 29 training.batch_size 2.0 +697 30 model.embedding_dim 1.0 +697 30 model.scoring_fct_norm 2.0 +697 30 loss.margin 16.43222188786353 +697 30 loss.adversarial_temperature 0.6202181137186709 +697 30 negative_sampler.num_negs_per_pos 90.0 +697 30 training.batch_size 1.0 +697 31 model.embedding_dim 2.0 +697 31 model.scoring_fct_norm 2.0 +697 31 loss.margin 4.427525573874254 +697 31 loss.adversarial_temperature 0.6078260237089996 +697 31 negative_sampler.num_negs_per_pos 93.0 +697 31 training.batch_size 2.0 +697 32 model.embedding_dim 0.0 +697 32 model.scoring_fct_norm 1.0 +697 32 loss.margin 24.17144554258551 +697 32 loss.adversarial_temperature 0.2673984588315129 +697 32 negative_sampler.num_negs_per_pos 46.0 +697 32 training.batch_size 1.0 +697 33 model.embedding_dim 1.0 +697 33 model.scoring_fct_norm 2.0 +697 33 loss.margin 12.337529031158818 +697 33 loss.adversarial_temperature 0.6856769504517392 +697 33 negative_sampler.num_negs_per_pos 21.0 +697 33 training.batch_size 2.0 +697 34 model.embedding_dim 1.0 +697 34 model.scoring_fct_norm 1.0 +697 34 loss.margin 17.245633729733562 +697 34 loss.adversarial_temperature 0.5738030843648361 +697 34 negative_sampler.num_negs_per_pos 42.0 +697 34 training.batch_size 0.0 +697 35 model.embedding_dim 2.0 +697 35 model.scoring_fct_norm 1.0 +697 35 loss.margin 8.203232385590695 +697 35 loss.adversarial_temperature 0.7319761327637075 +697 35 negative_sampler.num_negs_per_pos 85.0 +697 35 training.batch_size 2.0 +697 36 model.embedding_dim 0.0 +697 36 model.scoring_fct_norm 2.0 +697 36 loss.margin 12.024558927973965 +697 36 loss.adversarial_temperature 0.11017617390815221 +697 36 negative_sampler.num_negs_per_pos 10.0 +697 36 training.batch_size 1.0 +697 37 model.embedding_dim 2.0 +697 37 model.scoring_fct_norm 1.0 +697 37 loss.margin 29.871482455236826 +697 37 loss.adversarial_temperature 0.7312432027277874 +697 37 negative_sampler.num_negs_per_pos 17.0 +697 37 training.batch_size 1.0 +697 38 model.embedding_dim 0.0 +697 38 model.scoring_fct_norm 2.0 +697 38 loss.margin 3.6947753762561506 +697 38 loss.adversarial_temperature 0.9762757840647122 +697 38 negative_sampler.num_negs_per_pos 31.0 +697 38 training.batch_size 0.0 +697 39 model.embedding_dim 0.0 +697 39 model.scoring_fct_norm 1.0 +697 39 loss.margin 10.970125678162205 +697 39 loss.adversarial_temperature 0.3496125610267609 +697 39 negative_sampler.num_negs_per_pos 5.0 +697 39 training.batch_size 1.0 +697 40 model.embedding_dim 2.0 +697 40 model.scoring_fct_norm 2.0 +697 40 loss.margin 21.42162774639302 +697 40 loss.adversarial_temperature 0.6529615020065919 +697 40 negative_sampler.num_negs_per_pos 1.0 +697 40 training.batch_size 0.0 +697 41 model.embedding_dim 0.0 +697 41 model.scoring_fct_norm 1.0 +697 41 loss.margin 22.767507045600436 +697 41 loss.adversarial_temperature 0.44516587240253125 +697 41 negative_sampler.num_negs_per_pos 90.0 +697 41 training.batch_size 0.0 +697 42 model.embedding_dim 2.0 +697 42 model.scoring_fct_norm 1.0 +697 42 loss.margin 29.806597378454327 +697 42 loss.adversarial_temperature 0.4339953829322154 +697 42 negative_sampler.num_negs_per_pos 68.0 +697 42 training.batch_size 0.0 +697 43 model.embedding_dim 1.0 +697 43 model.scoring_fct_norm 2.0 +697 43 loss.margin 16.73640288018764 +697 43 loss.adversarial_temperature 0.6047968054142493 +697 43 negative_sampler.num_negs_per_pos 21.0 +697 43 training.batch_size 1.0 +697 44 model.embedding_dim 1.0 +697 44 model.scoring_fct_norm 1.0 +697 44 loss.margin 17.240323400207785 +697 44 loss.adversarial_temperature 0.15037406316570634 +697 44 negative_sampler.num_negs_per_pos 68.0 +697 44 training.batch_size 1.0 +697 45 model.embedding_dim 1.0 +697 45 model.scoring_fct_norm 1.0 +697 45 loss.margin 18.8005213993081 +697 45 loss.adversarial_temperature 0.1126925378584406 +697 45 negative_sampler.num_negs_per_pos 0.0 +697 45 training.batch_size 0.0 +697 46 model.embedding_dim 0.0 +697 46 model.scoring_fct_norm 2.0 +697 46 loss.margin 7.822323498868159 +697 46 loss.adversarial_temperature 0.8257749757130602 +697 46 negative_sampler.num_negs_per_pos 75.0 +697 46 training.batch_size 1.0 +697 47 model.embedding_dim 2.0 +697 47 model.scoring_fct_norm 1.0 +697 47 loss.margin 12.386454274860682 +697 47 loss.adversarial_temperature 0.4283249650455514 +697 47 negative_sampler.num_negs_per_pos 86.0 +697 47 training.batch_size 1.0 +697 48 model.embedding_dim 0.0 +697 48 model.scoring_fct_norm 2.0 +697 48 loss.margin 26.252712139931454 +697 48 loss.adversarial_temperature 0.8911722585751974 +697 48 negative_sampler.num_negs_per_pos 82.0 +697 48 training.batch_size 1.0 +697 49 model.embedding_dim 0.0 +697 49 model.scoring_fct_norm 1.0 +697 49 loss.margin 27.814015258743943 +697 49 loss.adversarial_temperature 0.8980388970840043 +697 49 negative_sampler.num_negs_per_pos 22.0 +697 49 training.batch_size 0.0 +697 50 model.embedding_dim 1.0 +697 50 model.scoring_fct_norm 2.0 +697 50 loss.margin 9.037448867531925 +697 50 loss.adversarial_temperature 0.5173466196363841 +697 50 negative_sampler.num_negs_per_pos 24.0 +697 50 training.batch_size 1.0 +697 51 model.embedding_dim 2.0 +697 51 model.scoring_fct_norm 1.0 +697 51 loss.margin 7.470256014947393 +697 51 loss.adversarial_temperature 0.5454920296407233 +697 51 negative_sampler.num_negs_per_pos 30.0 +697 51 training.batch_size 2.0 +697 52 model.embedding_dim 0.0 +697 52 model.scoring_fct_norm 1.0 +697 52 loss.margin 17.30060103777715 +697 52 loss.adversarial_temperature 0.36703258367881153 +697 52 negative_sampler.num_negs_per_pos 7.0 +697 52 training.batch_size 2.0 +697 53 model.embedding_dim 2.0 +697 53 model.scoring_fct_norm 1.0 +697 53 loss.margin 8.525562452309032 +697 53 loss.adversarial_temperature 0.4384273785589603 +697 53 negative_sampler.num_negs_per_pos 85.0 +697 53 training.batch_size 1.0 +697 54 model.embedding_dim 2.0 +697 54 model.scoring_fct_norm 2.0 +697 54 loss.margin 5.222056762224355 +697 54 loss.adversarial_temperature 0.24893056065357133 +697 54 negative_sampler.num_negs_per_pos 96.0 +697 54 training.batch_size 1.0 +697 1 dataset """kinships""" +697 1 model """structuredembedding""" +697 1 loss """nssa""" +697 1 regularizer """no""" +697 1 optimizer """adadelta""" +697 1 training_loop """owa""" +697 1 negative_sampler """basic""" +697 1 evaluator """rankbased""" +697 2 dataset """kinships""" +697 2 model """structuredembedding""" +697 2 loss """nssa""" +697 2 regularizer """no""" +697 2 optimizer """adadelta""" +697 2 training_loop """owa""" +697 2 negative_sampler """basic""" +697 2 evaluator """rankbased""" +697 3 dataset """kinships""" +697 3 model """structuredembedding""" +697 3 loss """nssa""" +697 3 regularizer """no""" +697 3 optimizer """adadelta""" +697 3 training_loop """owa""" +697 3 negative_sampler """basic""" +697 3 evaluator """rankbased""" +697 4 dataset """kinships""" +697 4 model """structuredembedding""" +697 4 loss """nssa""" +697 4 regularizer """no""" +697 4 optimizer """adadelta""" +697 4 training_loop """owa""" +697 4 negative_sampler """basic""" +697 4 evaluator """rankbased""" +697 5 dataset """kinships""" +697 5 model """structuredembedding""" +697 5 loss """nssa""" +697 5 regularizer """no""" +697 5 optimizer """adadelta""" +697 5 training_loop """owa""" +697 5 negative_sampler """basic""" +697 5 evaluator """rankbased""" +697 6 dataset """kinships""" +697 6 model """structuredembedding""" +697 6 loss """nssa""" +697 6 regularizer """no""" +697 6 optimizer """adadelta""" +697 6 training_loop """owa""" +697 6 negative_sampler """basic""" +697 6 evaluator """rankbased""" +697 7 dataset """kinships""" +697 7 model """structuredembedding""" +697 7 loss """nssa""" +697 7 regularizer """no""" +697 7 optimizer """adadelta""" +697 7 training_loop """owa""" +697 7 negative_sampler """basic""" +697 7 evaluator """rankbased""" +697 8 dataset """kinships""" +697 8 model """structuredembedding""" +697 8 loss """nssa""" +697 8 regularizer """no""" +697 8 optimizer """adadelta""" +697 8 training_loop """owa""" +697 8 negative_sampler """basic""" +697 8 evaluator """rankbased""" +697 9 dataset """kinships""" +697 9 model """structuredembedding""" +697 9 loss """nssa""" +697 9 regularizer """no""" +697 9 optimizer """adadelta""" +697 9 training_loop """owa""" +697 9 negative_sampler """basic""" +697 9 evaluator """rankbased""" +697 10 dataset """kinships""" +697 10 model """structuredembedding""" +697 10 loss """nssa""" +697 10 regularizer """no""" +697 10 optimizer """adadelta""" +697 10 training_loop """owa""" +697 10 negative_sampler """basic""" +697 10 evaluator """rankbased""" +697 11 dataset """kinships""" +697 11 model """structuredembedding""" +697 11 loss """nssa""" +697 11 regularizer """no""" +697 11 optimizer """adadelta""" +697 11 training_loop """owa""" +697 11 negative_sampler """basic""" +697 11 evaluator """rankbased""" +697 12 dataset """kinships""" +697 12 model """structuredembedding""" +697 12 loss """nssa""" +697 12 regularizer """no""" +697 12 optimizer """adadelta""" +697 12 training_loop """owa""" +697 12 negative_sampler """basic""" +697 12 evaluator """rankbased""" +697 13 dataset """kinships""" +697 13 model """structuredembedding""" +697 13 loss """nssa""" +697 13 regularizer """no""" +697 13 optimizer """adadelta""" +697 13 training_loop """owa""" +697 13 negative_sampler """basic""" +697 13 evaluator """rankbased""" +697 14 dataset """kinships""" +697 14 model """structuredembedding""" +697 14 loss """nssa""" +697 14 regularizer """no""" +697 14 optimizer """adadelta""" +697 14 training_loop """owa""" +697 14 negative_sampler """basic""" +697 14 evaluator """rankbased""" +697 15 dataset """kinships""" +697 15 model """structuredembedding""" +697 15 loss """nssa""" +697 15 regularizer """no""" +697 15 optimizer """adadelta""" +697 15 training_loop """owa""" +697 15 negative_sampler """basic""" +697 15 evaluator """rankbased""" +697 16 dataset """kinships""" +697 16 model """structuredembedding""" +697 16 loss """nssa""" +697 16 regularizer """no""" +697 16 optimizer """adadelta""" +697 16 training_loop """owa""" +697 16 negative_sampler """basic""" +697 16 evaluator """rankbased""" +697 17 dataset """kinships""" +697 17 model """structuredembedding""" +697 17 loss """nssa""" +697 17 regularizer """no""" +697 17 optimizer """adadelta""" +697 17 training_loop """owa""" +697 17 negative_sampler """basic""" +697 17 evaluator """rankbased""" +697 18 dataset """kinships""" +697 18 model """structuredembedding""" +697 18 loss """nssa""" +697 18 regularizer """no""" +697 18 optimizer """adadelta""" +697 18 training_loop """owa""" +697 18 negative_sampler """basic""" +697 18 evaluator """rankbased""" +697 19 dataset """kinships""" +697 19 model """structuredembedding""" +697 19 loss """nssa""" +697 19 regularizer """no""" +697 19 optimizer """adadelta""" +697 19 training_loop """owa""" +697 19 negative_sampler """basic""" +697 19 evaluator """rankbased""" +697 20 dataset """kinships""" +697 20 model """structuredembedding""" +697 20 loss """nssa""" +697 20 regularizer """no""" +697 20 optimizer """adadelta""" +697 20 training_loop """owa""" +697 20 negative_sampler """basic""" +697 20 evaluator """rankbased""" +697 21 dataset """kinships""" +697 21 model """structuredembedding""" +697 21 loss """nssa""" +697 21 regularizer """no""" +697 21 optimizer """adadelta""" +697 21 training_loop """owa""" +697 21 negative_sampler """basic""" +697 21 evaluator """rankbased""" +697 22 dataset """kinships""" +697 22 model """structuredembedding""" +697 22 loss """nssa""" +697 22 regularizer """no""" +697 22 optimizer """adadelta""" +697 22 training_loop """owa""" +697 22 negative_sampler """basic""" +697 22 evaluator """rankbased""" +697 23 dataset """kinships""" +697 23 model """structuredembedding""" +697 23 loss """nssa""" +697 23 regularizer """no""" +697 23 optimizer """adadelta""" +697 23 training_loop """owa""" +697 23 negative_sampler """basic""" +697 23 evaluator """rankbased""" +697 24 dataset """kinships""" +697 24 model """structuredembedding""" +697 24 loss """nssa""" +697 24 regularizer """no""" +697 24 optimizer """adadelta""" +697 24 training_loop """owa""" +697 24 negative_sampler """basic""" +697 24 evaluator """rankbased""" +697 25 dataset """kinships""" +697 25 model """structuredembedding""" +697 25 loss """nssa""" +697 25 regularizer """no""" +697 25 optimizer """adadelta""" +697 25 training_loop """owa""" +697 25 negative_sampler """basic""" +697 25 evaluator """rankbased""" +697 26 dataset """kinships""" +697 26 model """structuredembedding""" +697 26 loss """nssa""" +697 26 regularizer """no""" +697 26 optimizer """adadelta""" +697 26 training_loop """owa""" +697 26 negative_sampler """basic""" +697 26 evaluator """rankbased""" +697 27 dataset """kinships""" +697 27 model """structuredembedding""" +697 27 loss """nssa""" +697 27 regularizer """no""" +697 27 optimizer """adadelta""" +697 27 training_loop """owa""" +697 27 negative_sampler """basic""" +697 27 evaluator """rankbased""" +697 28 dataset """kinships""" +697 28 model """structuredembedding""" +697 28 loss """nssa""" +697 28 regularizer """no""" +697 28 optimizer """adadelta""" +697 28 training_loop """owa""" +697 28 negative_sampler """basic""" +697 28 evaluator """rankbased""" +697 29 dataset """kinships""" +697 29 model """structuredembedding""" +697 29 loss """nssa""" +697 29 regularizer """no""" +697 29 optimizer """adadelta""" +697 29 training_loop """owa""" +697 29 negative_sampler """basic""" +697 29 evaluator """rankbased""" +697 30 dataset """kinships""" +697 30 model """structuredembedding""" +697 30 loss """nssa""" +697 30 regularizer """no""" +697 30 optimizer """adadelta""" +697 30 training_loop """owa""" +697 30 negative_sampler """basic""" +697 30 evaluator """rankbased""" +697 31 dataset """kinships""" +697 31 model """structuredembedding""" +697 31 loss """nssa""" +697 31 regularizer """no""" +697 31 optimizer """adadelta""" +697 31 training_loop """owa""" +697 31 negative_sampler """basic""" +697 31 evaluator """rankbased""" +697 32 dataset """kinships""" +697 32 model """structuredembedding""" +697 32 loss """nssa""" +697 32 regularizer """no""" +697 32 optimizer """adadelta""" +697 32 training_loop """owa""" +697 32 negative_sampler """basic""" +697 32 evaluator """rankbased""" +697 33 dataset """kinships""" +697 33 model """structuredembedding""" +697 33 loss """nssa""" +697 33 regularizer """no""" +697 33 optimizer """adadelta""" +697 33 training_loop """owa""" +697 33 negative_sampler """basic""" +697 33 evaluator """rankbased""" +697 34 dataset """kinships""" +697 34 model """structuredembedding""" +697 34 loss """nssa""" +697 34 regularizer """no""" +697 34 optimizer """adadelta""" +697 34 training_loop """owa""" +697 34 negative_sampler """basic""" +697 34 evaluator """rankbased""" +697 35 dataset """kinships""" +697 35 model """structuredembedding""" +697 35 loss """nssa""" +697 35 regularizer """no""" +697 35 optimizer """adadelta""" +697 35 training_loop """owa""" +697 35 negative_sampler """basic""" +697 35 evaluator """rankbased""" +697 36 dataset """kinships""" +697 36 model """structuredembedding""" +697 36 loss """nssa""" +697 36 regularizer """no""" +697 36 optimizer """adadelta""" +697 36 training_loop """owa""" +697 36 negative_sampler """basic""" +697 36 evaluator """rankbased""" +697 37 dataset """kinships""" +697 37 model """structuredembedding""" +697 37 loss """nssa""" +697 37 regularizer """no""" +697 37 optimizer """adadelta""" +697 37 training_loop """owa""" +697 37 negative_sampler """basic""" +697 37 evaluator """rankbased""" +697 38 dataset """kinships""" +697 38 model """structuredembedding""" +697 38 loss """nssa""" +697 38 regularizer """no""" +697 38 optimizer """adadelta""" +697 38 training_loop """owa""" +697 38 negative_sampler """basic""" +697 38 evaluator """rankbased""" +697 39 dataset """kinships""" +697 39 model """structuredembedding""" +697 39 loss """nssa""" +697 39 regularizer """no""" +697 39 optimizer """adadelta""" +697 39 training_loop """owa""" +697 39 negative_sampler """basic""" +697 39 evaluator """rankbased""" +697 40 dataset """kinships""" +697 40 model """structuredembedding""" +697 40 loss """nssa""" +697 40 regularizer """no""" +697 40 optimizer """adadelta""" +697 40 training_loop """owa""" +697 40 negative_sampler """basic""" +697 40 evaluator """rankbased""" +697 41 dataset """kinships""" +697 41 model """structuredembedding""" +697 41 loss """nssa""" +697 41 regularizer """no""" +697 41 optimizer """adadelta""" +697 41 training_loop """owa""" +697 41 negative_sampler """basic""" +697 41 evaluator """rankbased""" +697 42 dataset """kinships""" +697 42 model """structuredembedding""" +697 42 loss """nssa""" +697 42 regularizer """no""" +697 42 optimizer """adadelta""" +697 42 training_loop """owa""" +697 42 negative_sampler """basic""" +697 42 evaluator """rankbased""" +697 43 dataset """kinships""" +697 43 model """structuredembedding""" +697 43 loss """nssa""" +697 43 regularizer """no""" +697 43 optimizer """adadelta""" +697 43 training_loop """owa""" +697 43 negative_sampler """basic""" +697 43 evaluator """rankbased""" +697 44 dataset """kinships""" +697 44 model """structuredembedding""" +697 44 loss """nssa""" +697 44 regularizer """no""" +697 44 optimizer """adadelta""" +697 44 training_loop """owa""" +697 44 negative_sampler """basic""" +697 44 evaluator """rankbased""" +697 45 dataset """kinships""" +697 45 model """structuredembedding""" +697 45 loss """nssa""" +697 45 regularizer """no""" +697 45 optimizer """adadelta""" +697 45 training_loop """owa""" +697 45 negative_sampler """basic""" +697 45 evaluator """rankbased""" +697 46 dataset """kinships""" +697 46 model """structuredembedding""" +697 46 loss """nssa""" +697 46 regularizer """no""" +697 46 optimizer """adadelta""" +697 46 training_loop """owa""" +697 46 negative_sampler """basic""" +697 46 evaluator """rankbased""" +697 47 dataset """kinships""" +697 47 model """structuredembedding""" +697 47 loss """nssa""" +697 47 regularizer """no""" +697 47 optimizer """adadelta""" +697 47 training_loop """owa""" +697 47 negative_sampler """basic""" +697 47 evaluator """rankbased""" +697 48 dataset """kinships""" +697 48 model """structuredembedding""" +697 48 loss """nssa""" +697 48 regularizer """no""" +697 48 optimizer """adadelta""" +697 48 training_loop """owa""" +697 48 negative_sampler """basic""" +697 48 evaluator """rankbased""" +697 49 dataset """kinships""" +697 49 model """structuredembedding""" +697 49 loss """nssa""" +697 49 regularizer """no""" +697 49 optimizer """adadelta""" +697 49 training_loop """owa""" +697 49 negative_sampler """basic""" +697 49 evaluator """rankbased""" +697 50 dataset """kinships""" +697 50 model """structuredembedding""" +697 50 loss """nssa""" +697 50 regularizer """no""" +697 50 optimizer """adadelta""" +697 50 training_loop """owa""" +697 50 negative_sampler """basic""" +697 50 evaluator """rankbased""" +697 51 dataset """kinships""" +697 51 model """structuredembedding""" +697 51 loss """nssa""" +697 51 regularizer """no""" +697 51 optimizer """adadelta""" +697 51 training_loop """owa""" +697 51 negative_sampler """basic""" +697 51 evaluator """rankbased""" +697 52 dataset """kinships""" +697 52 model """structuredembedding""" +697 52 loss """nssa""" +697 52 regularizer """no""" +697 52 optimizer """adadelta""" +697 52 training_loop """owa""" +697 52 negative_sampler """basic""" +697 52 evaluator """rankbased""" +697 53 dataset """kinships""" +697 53 model """structuredembedding""" +697 53 loss """nssa""" +697 53 regularizer """no""" +697 53 optimizer """adadelta""" +697 53 training_loop """owa""" +697 53 negative_sampler """basic""" +697 53 evaluator """rankbased""" +697 54 dataset """kinships""" +697 54 model """structuredembedding""" +697 54 loss """nssa""" +697 54 regularizer """no""" +697 54 optimizer """adadelta""" +697 54 training_loop """owa""" +697 54 negative_sampler """basic""" +697 54 evaluator """rankbased""" +698 1 model.embedding_dim 2.0 +698 1 model.scoring_fct_norm 2.0 +698 1 loss.margin 7.796205187008863 +698 1 loss.adversarial_temperature 0.43087100183145666 +698 1 negative_sampler.num_negs_per_pos 52.0 +698 1 training.batch_size 2.0 +698 2 model.embedding_dim 2.0 +698 2 model.scoring_fct_norm 1.0 +698 2 loss.margin 17.827399293415084 +698 2 loss.adversarial_temperature 0.8146117432521066 +698 2 negative_sampler.num_negs_per_pos 4.0 +698 2 training.batch_size 2.0 +698 3 model.embedding_dim 1.0 +698 3 model.scoring_fct_norm 1.0 +698 3 loss.margin 25.239941843681045 +698 3 loss.adversarial_temperature 0.6385686270953038 +698 3 negative_sampler.num_negs_per_pos 79.0 +698 3 training.batch_size 2.0 +698 4 model.embedding_dim 2.0 +698 4 model.scoring_fct_norm 1.0 +698 4 loss.margin 22.21427512041648 +698 4 loss.adversarial_temperature 0.22521882737303067 +698 4 negative_sampler.num_negs_per_pos 26.0 +698 4 training.batch_size 0.0 +698 5 model.embedding_dim 1.0 +698 5 model.scoring_fct_norm 2.0 +698 5 loss.margin 17.66853661620799 +698 5 loss.adversarial_temperature 0.26067962415643914 +698 5 negative_sampler.num_negs_per_pos 65.0 +698 5 training.batch_size 0.0 +698 6 model.embedding_dim 2.0 +698 6 model.scoring_fct_norm 2.0 +698 6 loss.margin 17.290647025478307 +698 6 loss.adversarial_temperature 0.3574999625449379 +698 6 negative_sampler.num_negs_per_pos 9.0 +698 6 training.batch_size 1.0 +698 7 model.embedding_dim 0.0 +698 7 model.scoring_fct_norm 1.0 +698 7 loss.margin 18.77373664049305 +698 7 loss.adversarial_temperature 0.6878573584928792 +698 7 negative_sampler.num_negs_per_pos 90.0 +698 7 training.batch_size 0.0 +698 8 model.embedding_dim 0.0 +698 8 model.scoring_fct_norm 2.0 +698 8 loss.margin 14.463519334840907 +698 8 loss.adversarial_temperature 0.7994252930983938 +698 8 negative_sampler.num_negs_per_pos 70.0 +698 8 training.batch_size 0.0 +698 9 model.embedding_dim 1.0 +698 9 model.scoring_fct_norm 2.0 +698 9 loss.margin 5.876623157627652 +698 9 loss.adversarial_temperature 0.377431851262374 +698 9 negative_sampler.num_negs_per_pos 18.0 +698 9 training.batch_size 1.0 +698 10 model.embedding_dim 2.0 +698 10 model.scoring_fct_norm 1.0 +698 10 loss.margin 24.791377755025206 +698 10 loss.adversarial_temperature 0.9386093618611872 +698 10 negative_sampler.num_negs_per_pos 38.0 +698 10 training.batch_size 0.0 +698 11 model.embedding_dim 0.0 +698 11 model.scoring_fct_norm 2.0 +698 11 loss.margin 21.13843461399248 +698 11 loss.adversarial_temperature 0.3516189968089761 +698 11 negative_sampler.num_negs_per_pos 95.0 +698 11 training.batch_size 0.0 +698 12 model.embedding_dim 1.0 +698 12 model.scoring_fct_norm 2.0 +698 12 loss.margin 19.4444741871786 +698 12 loss.adversarial_temperature 0.7234057019819932 +698 12 negative_sampler.num_negs_per_pos 77.0 +698 12 training.batch_size 2.0 +698 13 model.embedding_dim 2.0 +698 13 model.scoring_fct_norm 2.0 +698 13 loss.margin 19.216812455284977 +698 13 loss.adversarial_temperature 0.5179434369271815 +698 13 negative_sampler.num_negs_per_pos 3.0 +698 13 training.batch_size 0.0 +698 14 model.embedding_dim 0.0 +698 14 model.scoring_fct_norm 1.0 +698 14 loss.margin 20.16538951459711 +698 14 loss.adversarial_temperature 0.5016845876965821 +698 14 negative_sampler.num_negs_per_pos 69.0 +698 14 training.batch_size 2.0 +698 15 model.embedding_dim 2.0 +698 15 model.scoring_fct_norm 1.0 +698 15 loss.margin 25.17563253394232 +698 15 loss.adversarial_temperature 0.7567538553145087 +698 15 negative_sampler.num_negs_per_pos 40.0 +698 15 training.batch_size 1.0 +698 16 model.embedding_dim 1.0 +698 16 model.scoring_fct_norm 2.0 +698 16 loss.margin 8.899774438981112 +698 16 loss.adversarial_temperature 0.6164032452137336 +698 16 negative_sampler.num_negs_per_pos 28.0 +698 16 training.batch_size 2.0 +698 17 model.embedding_dim 0.0 +698 17 model.scoring_fct_norm 1.0 +698 17 loss.margin 19.115437318728745 +698 17 loss.adversarial_temperature 0.28651620931484106 +698 17 negative_sampler.num_negs_per_pos 41.0 +698 17 training.batch_size 1.0 +698 18 model.embedding_dim 1.0 +698 18 model.scoring_fct_norm 2.0 +698 18 loss.margin 14.38750348839979 +698 18 loss.adversarial_temperature 0.2559697727722488 +698 18 negative_sampler.num_negs_per_pos 20.0 +698 18 training.batch_size 2.0 +698 19 model.embedding_dim 0.0 +698 19 model.scoring_fct_norm 2.0 +698 19 loss.margin 9.575993711008062 +698 19 loss.adversarial_temperature 0.11590128902946652 +698 19 negative_sampler.num_negs_per_pos 64.0 +698 19 training.batch_size 0.0 +698 20 model.embedding_dim 2.0 +698 20 model.scoring_fct_norm 1.0 +698 20 loss.margin 15.134235539106461 +698 20 loss.adversarial_temperature 0.6967015465557037 +698 20 negative_sampler.num_negs_per_pos 36.0 +698 20 training.batch_size 2.0 +698 21 model.embedding_dim 1.0 +698 21 model.scoring_fct_norm 1.0 +698 21 loss.margin 9.551401039589683 +698 21 loss.adversarial_temperature 0.26120562291808175 +698 21 negative_sampler.num_negs_per_pos 72.0 +698 21 training.batch_size 2.0 +698 22 model.embedding_dim 1.0 +698 22 model.scoring_fct_norm 2.0 +698 22 loss.margin 19.213239044008816 +698 22 loss.adversarial_temperature 0.1727603770268395 +698 22 negative_sampler.num_negs_per_pos 20.0 +698 22 training.batch_size 0.0 +698 23 model.embedding_dim 0.0 +698 23 model.scoring_fct_norm 2.0 +698 23 loss.margin 1.4945367311243003 +698 23 loss.adversarial_temperature 0.4501816626312817 +698 23 negative_sampler.num_negs_per_pos 44.0 +698 23 training.batch_size 1.0 +698 24 model.embedding_dim 2.0 +698 24 model.scoring_fct_norm 2.0 +698 24 loss.margin 19.95004874345234 +698 24 loss.adversarial_temperature 0.39014640739256623 +698 24 negative_sampler.num_negs_per_pos 0.0 +698 24 training.batch_size 1.0 +698 25 model.embedding_dim 2.0 +698 25 model.scoring_fct_norm 2.0 +698 25 loss.margin 22.147178485876417 +698 25 loss.adversarial_temperature 0.5824690425918796 +698 25 negative_sampler.num_negs_per_pos 65.0 +698 25 training.batch_size 2.0 +698 26 model.embedding_dim 2.0 +698 26 model.scoring_fct_norm 1.0 +698 26 loss.margin 2.3594709331662247 +698 26 loss.adversarial_temperature 0.8403696984216755 +698 26 negative_sampler.num_negs_per_pos 97.0 +698 26 training.batch_size 0.0 +698 27 model.embedding_dim 1.0 +698 27 model.scoring_fct_norm 1.0 +698 27 loss.margin 8.03467514972748 +698 27 loss.adversarial_temperature 0.9976106549286416 +698 27 negative_sampler.num_negs_per_pos 14.0 +698 27 training.batch_size 2.0 +698 28 model.embedding_dim 0.0 +698 28 model.scoring_fct_norm 1.0 +698 28 loss.margin 25.341524755901016 +698 28 loss.adversarial_temperature 0.33856296744575476 +698 28 negative_sampler.num_negs_per_pos 16.0 +698 28 training.batch_size 1.0 +698 29 model.embedding_dim 2.0 +698 29 model.scoring_fct_norm 2.0 +698 29 loss.margin 15.838175600108048 +698 29 loss.adversarial_temperature 0.13002614409995503 +698 29 negative_sampler.num_negs_per_pos 89.0 +698 29 training.batch_size 0.0 +698 30 model.embedding_dim 2.0 +698 30 model.scoring_fct_norm 1.0 +698 30 loss.margin 4.614112946375746 +698 30 loss.adversarial_temperature 0.3338169381583856 +698 30 negative_sampler.num_negs_per_pos 9.0 +698 30 training.batch_size 1.0 +698 31 model.embedding_dim 2.0 +698 31 model.scoring_fct_norm 1.0 +698 31 loss.margin 15.004288579139395 +698 31 loss.adversarial_temperature 0.3390511093381055 +698 31 negative_sampler.num_negs_per_pos 66.0 +698 31 training.batch_size 2.0 +698 32 model.embedding_dim 0.0 +698 32 model.scoring_fct_norm 1.0 +698 32 loss.margin 15.223822911521937 +698 32 loss.adversarial_temperature 0.5108016919847118 +698 32 negative_sampler.num_negs_per_pos 20.0 +698 32 training.batch_size 0.0 +698 33 model.embedding_dim 2.0 +698 33 model.scoring_fct_norm 2.0 +698 33 loss.margin 27.473427654337915 +698 33 loss.adversarial_temperature 0.4031191615621418 +698 33 negative_sampler.num_negs_per_pos 90.0 +698 33 training.batch_size 1.0 +698 34 model.embedding_dim 0.0 +698 34 model.scoring_fct_norm 2.0 +698 34 loss.margin 10.132462990825122 +698 34 loss.adversarial_temperature 0.30682348034082835 +698 34 negative_sampler.num_negs_per_pos 48.0 +698 34 training.batch_size 0.0 +698 35 model.embedding_dim 0.0 +698 35 model.scoring_fct_norm 2.0 +698 35 loss.margin 10.30721659172187 +698 35 loss.adversarial_temperature 0.8862154616939687 +698 35 negative_sampler.num_negs_per_pos 94.0 +698 35 training.batch_size 2.0 +698 36 model.embedding_dim 0.0 +698 36 model.scoring_fct_norm 2.0 +698 36 loss.margin 28.159275949688663 +698 36 loss.adversarial_temperature 0.2165463442330401 +698 36 negative_sampler.num_negs_per_pos 36.0 +698 36 training.batch_size 2.0 +698 37 model.embedding_dim 0.0 +698 37 model.scoring_fct_norm 2.0 +698 37 loss.margin 8.843005458709605 +698 37 loss.adversarial_temperature 0.8811685975169382 +698 37 negative_sampler.num_negs_per_pos 29.0 +698 37 training.batch_size 2.0 +698 38 model.embedding_dim 2.0 +698 38 model.scoring_fct_norm 2.0 +698 38 loss.margin 9.694237091639579 +698 38 loss.adversarial_temperature 0.11080152837162267 +698 38 negative_sampler.num_negs_per_pos 60.0 +698 38 training.batch_size 1.0 +698 39 model.embedding_dim 0.0 +698 39 model.scoring_fct_norm 2.0 +698 39 loss.margin 15.901019593749702 +698 39 loss.adversarial_temperature 0.5345294798730087 +698 39 negative_sampler.num_negs_per_pos 91.0 +698 39 training.batch_size 1.0 +698 40 model.embedding_dim 2.0 +698 40 model.scoring_fct_norm 2.0 +698 40 loss.margin 8.65172715966816 +698 40 loss.adversarial_temperature 0.9151150625925466 +698 40 negative_sampler.num_negs_per_pos 51.0 +698 40 training.batch_size 1.0 +698 41 model.embedding_dim 0.0 +698 41 model.scoring_fct_norm 2.0 +698 41 loss.margin 27.71378915116649 +698 41 loss.adversarial_temperature 0.5815430841973067 +698 41 negative_sampler.num_negs_per_pos 29.0 +698 41 training.batch_size 2.0 +698 42 model.embedding_dim 2.0 +698 42 model.scoring_fct_norm 2.0 +698 42 loss.margin 1.5921197589654392 +698 42 loss.adversarial_temperature 0.38584282886031995 +698 42 negative_sampler.num_negs_per_pos 44.0 +698 42 training.batch_size 2.0 +698 43 model.embedding_dim 1.0 +698 43 model.scoring_fct_norm 1.0 +698 43 loss.margin 3.6537448310453606 +698 43 loss.adversarial_temperature 0.4069203308999336 +698 43 negative_sampler.num_negs_per_pos 93.0 +698 43 training.batch_size 2.0 +698 44 model.embedding_dim 2.0 +698 44 model.scoring_fct_norm 1.0 +698 44 loss.margin 2.661990870823103 +698 44 loss.adversarial_temperature 0.25233425619813366 +698 44 negative_sampler.num_negs_per_pos 44.0 +698 44 training.batch_size 1.0 +698 45 model.embedding_dim 2.0 +698 45 model.scoring_fct_norm 2.0 +698 45 loss.margin 23.835600159081018 +698 45 loss.adversarial_temperature 0.4492733545162211 +698 45 negative_sampler.num_negs_per_pos 95.0 +698 45 training.batch_size 2.0 +698 46 model.embedding_dim 2.0 +698 46 model.scoring_fct_norm 1.0 +698 46 loss.margin 15.485055677094698 +698 46 loss.adversarial_temperature 0.37975974303042637 +698 46 negative_sampler.num_negs_per_pos 68.0 +698 46 training.batch_size 2.0 +698 47 model.embedding_dim 1.0 +698 47 model.scoring_fct_norm 2.0 +698 47 loss.margin 19.070181613573727 +698 47 loss.adversarial_temperature 0.5345196862685899 +698 47 negative_sampler.num_negs_per_pos 1.0 +698 47 training.batch_size 1.0 +698 48 model.embedding_dim 1.0 +698 48 model.scoring_fct_norm 2.0 +698 48 loss.margin 27.818007149821874 +698 48 loss.adversarial_temperature 0.3325168094297712 +698 48 negative_sampler.num_negs_per_pos 11.0 +698 48 training.batch_size 2.0 +698 49 model.embedding_dim 1.0 +698 49 model.scoring_fct_norm 2.0 +698 49 loss.margin 2.26007119298783 +698 49 loss.adversarial_temperature 0.19007471001498008 +698 49 negative_sampler.num_negs_per_pos 30.0 +698 49 training.batch_size 0.0 +698 50 model.embedding_dim 2.0 +698 50 model.scoring_fct_norm 2.0 +698 50 loss.margin 4.215044790347825 +698 50 loss.adversarial_temperature 0.2345904218388389 +698 50 negative_sampler.num_negs_per_pos 99.0 +698 50 training.batch_size 1.0 +698 51 model.embedding_dim 0.0 +698 51 model.scoring_fct_norm 1.0 +698 51 loss.margin 19.436541767104128 +698 51 loss.adversarial_temperature 0.12047220246857743 +698 51 negative_sampler.num_negs_per_pos 44.0 +698 51 training.batch_size 0.0 +698 52 model.embedding_dim 2.0 +698 52 model.scoring_fct_norm 1.0 +698 52 loss.margin 27.158986090889876 +698 52 loss.adversarial_temperature 0.460959550811192 +698 52 negative_sampler.num_negs_per_pos 82.0 +698 52 training.batch_size 2.0 +698 53 model.embedding_dim 2.0 +698 53 model.scoring_fct_norm 1.0 +698 53 loss.margin 19.232074034935682 +698 53 loss.adversarial_temperature 0.1697118564079749 +698 53 negative_sampler.num_negs_per_pos 97.0 +698 53 training.batch_size 0.0 +698 54 model.embedding_dim 1.0 +698 54 model.scoring_fct_norm 1.0 +698 54 loss.margin 29.393896116225704 +698 54 loss.adversarial_temperature 0.1934720456725457 +698 54 negative_sampler.num_negs_per_pos 83.0 +698 54 training.batch_size 1.0 +698 55 model.embedding_dim 1.0 +698 55 model.scoring_fct_norm 2.0 +698 55 loss.margin 25.991012674141448 +698 55 loss.adversarial_temperature 0.1106364608318609 +698 55 negative_sampler.num_negs_per_pos 4.0 +698 55 training.batch_size 1.0 +698 56 model.embedding_dim 2.0 +698 56 model.scoring_fct_norm 1.0 +698 56 loss.margin 23.310373007541283 +698 56 loss.adversarial_temperature 0.816293772569537 +698 56 negative_sampler.num_negs_per_pos 81.0 +698 56 training.batch_size 1.0 +698 57 model.embedding_dim 2.0 +698 57 model.scoring_fct_norm 2.0 +698 57 loss.margin 6.343161527986551 +698 57 loss.adversarial_temperature 0.20954857112870848 +698 57 negative_sampler.num_negs_per_pos 59.0 +698 57 training.batch_size 0.0 +698 58 model.embedding_dim 1.0 +698 58 model.scoring_fct_norm 2.0 +698 58 loss.margin 1.8646977119714756 +698 58 loss.adversarial_temperature 0.17346292347828118 +698 58 negative_sampler.num_negs_per_pos 99.0 +698 58 training.batch_size 1.0 +698 59 model.embedding_dim 1.0 +698 59 model.scoring_fct_norm 1.0 +698 59 loss.margin 16.397327272174323 +698 59 loss.adversarial_temperature 0.8454360988082826 +698 59 negative_sampler.num_negs_per_pos 24.0 +698 59 training.batch_size 0.0 +698 60 model.embedding_dim 2.0 +698 60 model.scoring_fct_norm 2.0 +698 60 loss.margin 13.798388621787977 +698 60 loss.adversarial_temperature 0.852153546751679 +698 60 negative_sampler.num_negs_per_pos 42.0 +698 60 training.batch_size 2.0 +698 61 model.embedding_dim 1.0 +698 61 model.scoring_fct_norm 2.0 +698 61 loss.margin 14.695794081136208 +698 61 loss.adversarial_temperature 0.29668662833615017 +698 61 negative_sampler.num_negs_per_pos 82.0 +698 61 training.batch_size 0.0 +698 62 model.embedding_dim 1.0 +698 62 model.scoring_fct_norm 2.0 +698 62 loss.margin 17.7654953383408 +698 62 loss.adversarial_temperature 0.16908897187689925 +698 62 negative_sampler.num_negs_per_pos 24.0 +698 62 training.batch_size 1.0 +698 63 model.embedding_dim 0.0 +698 63 model.scoring_fct_norm 1.0 +698 63 loss.margin 7.546517633728138 +698 63 loss.adversarial_temperature 0.3636896673624219 +698 63 negative_sampler.num_negs_per_pos 27.0 +698 63 training.batch_size 2.0 +698 64 model.embedding_dim 2.0 +698 64 model.scoring_fct_norm 1.0 +698 64 loss.margin 9.3627960496222 +698 64 loss.adversarial_temperature 0.46490578767793456 +698 64 negative_sampler.num_negs_per_pos 51.0 +698 64 training.batch_size 2.0 +698 65 model.embedding_dim 0.0 +698 65 model.scoring_fct_norm 2.0 +698 65 loss.margin 25.49982523905855 +698 65 loss.adversarial_temperature 0.7837370007292105 +698 65 negative_sampler.num_negs_per_pos 61.0 +698 65 training.batch_size 0.0 +698 66 model.embedding_dim 0.0 +698 66 model.scoring_fct_norm 1.0 +698 66 loss.margin 9.430821113251168 +698 66 loss.adversarial_temperature 0.726661376585202 +698 66 negative_sampler.num_negs_per_pos 43.0 +698 66 training.batch_size 2.0 +698 67 model.embedding_dim 0.0 +698 67 model.scoring_fct_norm 1.0 +698 67 loss.margin 17.121784174786633 +698 67 loss.adversarial_temperature 0.5966503956924292 +698 67 negative_sampler.num_negs_per_pos 85.0 +698 67 training.batch_size 2.0 +698 68 model.embedding_dim 1.0 +698 68 model.scoring_fct_norm 2.0 +698 68 loss.margin 21.632430832022106 +698 68 loss.adversarial_temperature 0.25640224277017365 +698 68 negative_sampler.num_negs_per_pos 69.0 +698 68 training.batch_size 2.0 +698 69 model.embedding_dim 0.0 +698 69 model.scoring_fct_norm 2.0 +698 69 loss.margin 2.405623472329646 +698 69 loss.adversarial_temperature 0.37523186142574005 +698 69 negative_sampler.num_negs_per_pos 73.0 +698 69 training.batch_size 0.0 +698 70 model.embedding_dim 0.0 +698 70 model.scoring_fct_norm 2.0 +698 70 loss.margin 27.477718486589286 +698 70 loss.adversarial_temperature 0.5146644026357747 +698 70 negative_sampler.num_negs_per_pos 32.0 +698 70 training.batch_size 1.0 +698 71 model.embedding_dim 1.0 +698 71 model.scoring_fct_norm 1.0 +698 71 loss.margin 18.677218405833198 +698 71 loss.adversarial_temperature 0.867432508167888 +698 71 negative_sampler.num_negs_per_pos 40.0 +698 71 training.batch_size 1.0 +698 72 model.embedding_dim 1.0 +698 72 model.scoring_fct_norm 1.0 +698 72 loss.margin 28.39002549253684 +698 72 loss.adversarial_temperature 0.1753443975509098 +698 72 negative_sampler.num_negs_per_pos 15.0 +698 72 training.batch_size 1.0 +698 73 model.embedding_dim 2.0 +698 73 model.scoring_fct_norm 2.0 +698 73 loss.margin 18.774386909641677 +698 73 loss.adversarial_temperature 0.9852038371267634 +698 73 negative_sampler.num_negs_per_pos 20.0 +698 73 training.batch_size 2.0 +698 74 model.embedding_dim 0.0 +698 74 model.scoring_fct_norm 1.0 +698 74 loss.margin 6.632016231766097 +698 74 loss.adversarial_temperature 0.8795332204816076 +698 74 negative_sampler.num_negs_per_pos 20.0 +698 74 training.batch_size 0.0 +698 75 model.embedding_dim 1.0 +698 75 model.scoring_fct_norm 1.0 +698 75 loss.margin 22.14412136056197 +698 75 loss.adversarial_temperature 0.2686106514996218 +698 75 negative_sampler.num_negs_per_pos 71.0 +698 75 training.batch_size 0.0 +698 76 model.embedding_dim 2.0 +698 76 model.scoring_fct_norm 1.0 +698 76 loss.margin 15.293235779957552 +698 76 loss.adversarial_temperature 0.6287932059863093 +698 76 negative_sampler.num_negs_per_pos 90.0 +698 76 training.batch_size 2.0 +698 77 model.embedding_dim 1.0 +698 77 model.scoring_fct_norm 1.0 +698 77 loss.margin 23.489088209058632 +698 77 loss.adversarial_temperature 0.4864164462670074 +698 77 negative_sampler.num_negs_per_pos 82.0 +698 77 training.batch_size 0.0 +698 78 model.embedding_dim 1.0 +698 78 model.scoring_fct_norm 2.0 +698 78 loss.margin 5.65526164804842 +698 78 loss.adversarial_temperature 0.23502061684809197 +698 78 negative_sampler.num_negs_per_pos 6.0 +698 78 training.batch_size 2.0 +698 79 model.embedding_dim 0.0 +698 79 model.scoring_fct_norm 2.0 +698 79 loss.margin 10.370711460388877 +698 79 loss.adversarial_temperature 0.275059466351572 +698 79 negative_sampler.num_negs_per_pos 92.0 +698 79 training.batch_size 2.0 +698 80 model.embedding_dim 0.0 +698 80 model.scoring_fct_norm 2.0 +698 80 loss.margin 4.299974184038218 +698 80 loss.adversarial_temperature 0.3621002595340045 +698 80 negative_sampler.num_negs_per_pos 80.0 +698 80 training.batch_size 2.0 +698 81 model.embedding_dim 0.0 +698 81 model.scoring_fct_norm 2.0 +698 81 loss.margin 22.369921243772882 +698 81 loss.adversarial_temperature 0.6612083448948642 +698 81 negative_sampler.num_negs_per_pos 55.0 +698 81 training.batch_size 2.0 +698 82 model.embedding_dim 0.0 +698 82 model.scoring_fct_norm 1.0 +698 82 loss.margin 22.198486742161176 +698 82 loss.adversarial_temperature 0.9516589649782689 +698 82 negative_sampler.num_negs_per_pos 79.0 +698 82 training.batch_size 1.0 +698 83 model.embedding_dim 0.0 +698 83 model.scoring_fct_norm 2.0 +698 83 loss.margin 27.286558663248826 +698 83 loss.adversarial_temperature 0.4214572186842416 +698 83 negative_sampler.num_negs_per_pos 22.0 +698 83 training.batch_size 1.0 +698 84 model.embedding_dim 0.0 +698 84 model.scoring_fct_norm 2.0 +698 84 loss.margin 24.0185074854224 +698 84 loss.adversarial_temperature 0.30836688175982707 +698 84 negative_sampler.num_negs_per_pos 26.0 +698 84 training.batch_size 1.0 +698 85 model.embedding_dim 2.0 +698 85 model.scoring_fct_norm 1.0 +698 85 loss.margin 17.89217932578568 +698 85 loss.adversarial_temperature 0.4126319958574789 +698 85 negative_sampler.num_negs_per_pos 59.0 +698 85 training.batch_size 0.0 +698 86 model.embedding_dim 1.0 +698 86 model.scoring_fct_norm 2.0 +698 86 loss.margin 25.034304889946217 +698 86 loss.adversarial_temperature 0.6808280658922606 +698 86 negative_sampler.num_negs_per_pos 23.0 +698 86 training.batch_size 2.0 +698 87 model.embedding_dim 2.0 +698 87 model.scoring_fct_norm 2.0 +698 87 loss.margin 27.275275668530508 +698 87 loss.adversarial_temperature 0.5220683004639214 +698 87 negative_sampler.num_negs_per_pos 54.0 +698 87 training.batch_size 1.0 +698 88 model.embedding_dim 0.0 +698 88 model.scoring_fct_norm 2.0 +698 88 loss.margin 22.080619293177232 +698 88 loss.adversarial_temperature 0.3352767447488043 +698 88 negative_sampler.num_negs_per_pos 60.0 +698 88 training.batch_size 2.0 +698 89 model.embedding_dim 0.0 +698 89 model.scoring_fct_norm 2.0 +698 89 loss.margin 6.359875893054886 +698 89 loss.adversarial_temperature 0.6409456450711033 +698 89 negative_sampler.num_negs_per_pos 95.0 +698 89 training.batch_size 0.0 +698 90 model.embedding_dim 2.0 +698 90 model.scoring_fct_norm 1.0 +698 90 loss.margin 21.804301034306928 +698 90 loss.adversarial_temperature 0.1854356318910011 +698 90 negative_sampler.num_negs_per_pos 20.0 +698 90 training.batch_size 2.0 +698 91 model.embedding_dim 1.0 +698 91 model.scoring_fct_norm 2.0 +698 91 loss.margin 15.711822570057294 +698 91 loss.adversarial_temperature 0.11093298908464007 +698 91 negative_sampler.num_negs_per_pos 21.0 +698 91 training.batch_size 1.0 +698 92 model.embedding_dim 0.0 +698 92 model.scoring_fct_norm 2.0 +698 92 loss.margin 23.788538469415162 +698 92 loss.adversarial_temperature 0.16468232422584736 +698 92 negative_sampler.num_negs_per_pos 67.0 +698 92 training.batch_size 0.0 +698 93 model.embedding_dim 1.0 +698 93 model.scoring_fct_norm 2.0 +698 93 loss.margin 8.019768625963714 +698 93 loss.adversarial_temperature 0.7518136834979455 +698 93 negative_sampler.num_negs_per_pos 98.0 +698 93 training.batch_size 1.0 +698 94 model.embedding_dim 1.0 +698 94 model.scoring_fct_norm 1.0 +698 94 loss.margin 23.458235681184163 +698 94 loss.adversarial_temperature 0.9939773894618507 +698 94 negative_sampler.num_negs_per_pos 79.0 +698 94 training.batch_size 1.0 +698 95 model.embedding_dim 0.0 +698 95 model.scoring_fct_norm 1.0 +698 95 loss.margin 6.546207083723017 +698 95 loss.adversarial_temperature 0.2505726323661832 +698 95 negative_sampler.num_negs_per_pos 95.0 +698 95 training.batch_size 2.0 +698 96 model.embedding_dim 0.0 +698 96 model.scoring_fct_norm 1.0 +698 96 loss.margin 26.27863392015211 +698 96 loss.adversarial_temperature 0.21396235310696787 +698 96 negative_sampler.num_negs_per_pos 59.0 +698 96 training.batch_size 1.0 +698 97 model.embedding_dim 0.0 +698 97 model.scoring_fct_norm 2.0 +698 97 loss.margin 14.939928048805145 +698 97 loss.adversarial_temperature 0.705834381470058 +698 97 negative_sampler.num_negs_per_pos 88.0 +698 97 training.batch_size 0.0 +698 98 model.embedding_dim 2.0 +698 98 model.scoring_fct_norm 1.0 +698 98 loss.margin 9.73210233509276 +698 98 loss.adversarial_temperature 0.22295006168320847 +698 98 negative_sampler.num_negs_per_pos 62.0 +698 98 training.batch_size 0.0 +698 99 model.embedding_dim 2.0 +698 99 model.scoring_fct_norm 2.0 +698 99 loss.margin 1.2171927217044536 +698 99 loss.adversarial_temperature 0.2948656580581607 +698 99 negative_sampler.num_negs_per_pos 16.0 +698 99 training.batch_size 1.0 +698 100 model.embedding_dim 1.0 +698 100 model.scoring_fct_norm 1.0 +698 100 loss.margin 27.375910935670923 +698 100 loss.adversarial_temperature 0.18025398209206495 +698 100 negative_sampler.num_negs_per_pos 77.0 +698 100 training.batch_size 1.0 +698 1 dataset """kinships""" +698 1 model """structuredembedding""" +698 1 loss """nssa""" +698 1 regularizer """no""" +698 1 optimizer """adadelta""" +698 1 training_loop """owa""" +698 1 negative_sampler """basic""" +698 1 evaluator """rankbased""" +698 2 dataset """kinships""" +698 2 model """structuredembedding""" +698 2 loss """nssa""" +698 2 regularizer """no""" +698 2 optimizer """adadelta""" +698 2 training_loop """owa""" +698 2 negative_sampler """basic""" +698 2 evaluator """rankbased""" +698 3 dataset """kinships""" +698 3 model """structuredembedding""" +698 3 loss """nssa""" +698 3 regularizer """no""" +698 3 optimizer """adadelta""" +698 3 training_loop """owa""" +698 3 negative_sampler """basic""" +698 3 evaluator """rankbased""" +698 4 dataset """kinships""" +698 4 model """structuredembedding""" +698 4 loss """nssa""" +698 4 regularizer """no""" +698 4 optimizer """adadelta""" +698 4 training_loop """owa""" +698 4 negative_sampler """basic""" +698 4 evaluator """rankbased""" +698 5 dataset """kinships""" +698 5 model """structuredembedding""" +698 5 loss """nssa""" +698 5 regularizer """no""" +698 5 optimizer """adadelta""" +698 5 training_loop """owa""" +698 5 negative_sampler """basic""" +698 5 evaluator """rankbased""" +698 6 dataset """kinships""" +698 6 model """structuredembedding""" +698 6 loss """nssa""" +698 6 regularizer """no""" +698 6 optimizer """adadelta""" +698 6 training_loop """owa""" +698 6 negative_sampler """basic""" +698 6 evaluator """rankbased""" +698 7 dataset """kinships""" +698 7 model """structuredembedding""" +698 7 loss """nssa""" +698 7 regularizer """no""" +698 7 optimizer """adadelta""" +698 7 training_loop """owa""" +698 7 negative_sampler """basic""" +698 7 evaluator """rankbased""" +698 8 dataset """kinships""" +698 8 model """structuredembedding""" +698 8 loss """nssa""" +698 8 regularizer """no""" +698 8 optimizer """adadelta""" +698 8 training_loop """owa""" +698 8 negative_sampler """basic""" +698 8 evaluator """rankbased""" +698 9 dataset """kinships""" +698 9 model """structuredembedding""" +698 9 loss """nssa""" +698 9 regularizer """no""" +698 9 optimizer """adadelta""" +698 9 training_loop """owa""" +698 9 negative_sampler """basic""" +698 9 evaluator """rankbased""" +698 10 dataset """kinships""" +698 10 model """structuredembedding""" +698 10 loss """nssa""" +698 10 regularizer """no""" +698 10 optimizer """adadelta""" +698 10 training_loop """owa""" +698 10 negative_sampler """basic""" +698 10 evaluator """rankbased""" +698 11 dataset """kinships""" +698 11 model """structuredembedding""" +698 11 loss """nssa""" +698 11 regularizer """no""" +698 11 optimizer """adadelta""" +698 11 training_loop """owa""" +698 11 negative_sampler """basic""" +698 11 evaluator """rankbased""" +698 12 dataset """kinships""" +698 12 model """structuredembedding""" +698 12 loss """nssa""" +698 12 regularizer """no""" +698 12 optimizer """adadelta""" +698 12 training_loop """owa""" +698 12 negative_sampler """basic""" +698 12 evaluator """rankbased""" +698 13 dataset """kinships""" +698 13 model """structuredembedding""" +698 13 loss """nssa""" +698 13 regularizer """no""" +698 13 optimizer """adadelta""" +698 13 training_loop """owa""" +698 13 negative_sampler """basic""" +698 13 evaluator """rankbased""" +698 14 dataset """kinships""" +698 14 model """structuredembedding""" +698 14 loss """nssa""" +698 14 regularizer """no""" +698 14 optimizer """adadelta""" +698 14 training_loop """owa""" +698 14 negative_sampler """basic""" +698 14 evaluator """rankbased""" +698 15 dataset """kinships""" +698 15 model """structuredembedding""" +698 15 loss """nssa""" +698 15 regularizer """no""" +698 15 optimizer """adadelta""" +698 15 training_loop """owa""" +698 15 negative_sampler """basic""" +698 15 evaluator """rankbased""" +698 16 dataset """kinships""" +698 16 model """structuredembedding""" +698 16 loss """nssa""" +698 16 regularizer """no""" +698 16 optimizer """adadelta""" +698 16 training_loop """owa""" +698 16 negative_sampler """basic""" +698 16 evaluator """rankbased""" +698 17 dataset """kinships""" +698 17 model """structuredembedding""" +698 17 loss """nssa""" +698 17 regularizer """no""" +698 17 optimizer """adadelta""" +698 17 training_loop """owa""" +698 17 negative_sampler """basic""" +698 17 evaluator """rankbased""" +698 18 dataset """kinships""" +698 18 model """structuredembedding""" +698 18 loss """nssa""" +698 18 regularizer """no""" +698 18 optimizer """adadelta""" +698 18 training_loop """owa""" +698 18 negative_sampler """basic""" +698 18 evaluator """rankbased""" +698 19 dataset """kinships""" +698 19 model """structuredembedding""" +698 19 loss """nssa""" +698 19 regularizer """no""" +698 19 optimizer """adadelta""" +698 19 training_loop """owa""" +698 19 negative_sampler """basic""" +698 19 evaluator """rankbased""" +698 20 dataset """kinships""" +698 20 model """structuredembedding""" +698 20 loss """nssa""" +698 20 regularizer """no""" +698 20 optimizer """adadelta""" +698 20 training_loop """owa""" +698 20 negative_sampler """basic""" +698 20 evaluator """rankbased""" +698 21 dataset """kinships""" +698 21 model """structuredembedding""" +698 21 loss """nssa""" +698 21 regularizer """no""" +698 21 optimizer """adadelta""" +698 21 training_loop """owa""" +698 21 negative_sampler """basic""" +698 21 evaluator """rankbased""" +698 22 dataset """kinships""" +698 22 model """structuredembedding""" +698 22 loss """nssa""" +698 22 regularizer """no""" +698 22 optimizer """adadelta""" +698 22 training_loop """owa""" +698 22 negative_sampler """basic""" +698 22 evaluator """rankbased""" +698 23 dataset """kinships""" +698 23 model """structuredembedding""" +698 23 loss """nssa""" +698 23 regularizer """no""" +698 23 optimizer """adadelta""" +698 23 training_loop """owa""" +698 23 negative_sampler """basic""" +698 23 evaluator """rankbased""" +698 24 dataset """kinships""" +698 24 model """structuredembedding""" +698 24 loss """nssa""" +698 24 regularizer """no""" +698 24 optimizer """adadelta""" +698 24 training_loop """owa""" +698 24 negative_sampler """basic""" +698 24 evaluator """rankbased""" +698 25 dataset """kinships""" +698 25 model """structuredembedding""" +698 25 loss """nssa""" +698 25 regularizer """no""" +698 25 optimizer """adadelta""" +698 25 training_loop """owa""" +698 25 negative_sampler """basic""" +698 25 evaluator """rankbased""" +698 26 dataset """kinships""" +698 26 model """structuredembedding""" +698 26 loss """nssa""" +698 26 regularizer """no""" +698 26 optimizer """adadelta""" +698 26 training_loop """owa""" +698 26 negative_sampler """basic""" +698 26 evaluator """rankbased""" +698 27 dataset """kinships""" +698 27 model """structuredembedding""" +698 27 loss """nssa""" +698 27 regularizer """no""" +698 27 optimizer """adadelta""" +698 27 training_loop """owa""" +698 27 negative_sampler """basic""" +698 27 evaluator """rankbased""" +698 28 dataset """kinships""" +698 28 model """structuredembedding""" +698 28 loss """nssa""" +698 28 regularizer """no""" +698 28 optimizer """adadelta""" +698 28 training_loop """owa""" +698 28 negative_sampler """basic""" +698 28 evaluator """rankbased""" +698 29 dataset """kinships""" +698 29 model """structuredembedding""" +698 29 loss """nssa""" +698 29 regularizer """no""" +698 29 optimizer """adadelta""" +698 29 training_loop """owa""" +698 29 negative_sampler """basic""" +698 29 evaluator """rankbased""" +698 30 dataset """kinships""" +698 30 model """structuredembedding""" +698 30 loss """nssa""" +698 30 regularizer """no""" +698 30 optimizer """adadelta""" +698 30 training_loop """owa""" +698 30 negative_sampler """basic""" +698 30 evaluator """rankbased""" +698 31 dataset """kinships""" +698 31 model """structuredembedding""" +698 31 loss """nssa""" +698 31 regularizer """no""" +698 31 optimizer """adadelta""" +698 31 training_loop """owa""" +698 31 negative_sampler """basic""" +698 31 evaluator """rankbased""" +698 32 dataset """kinships""" +698 32 model """structuredembedding""" +698 32 loss """nssa""" +698 32 regularizer """no""" +698 32 optimizer """adadelta""" +698 32 training_loop """owa""" +698 32 negative_sampler """basic""" +698 32 evaluator """rankbased""" +698 33 dataset """kinships""" +698 33 model """structuredembedding""" +698 33 loss """nssa""" +698 33 regularizer """no""" +698 33 optimizer """adadelta""" +698 33 training_loop """owa""" +698 33 negative_sampler """basic""" +698 33 evaluator """rankbased""" +698 34 dataset """kinships""" +698 34 model """structuredembedding""" +698 34 loss """nssa""" +698 34 regularizer """no""" +698 34 optimizer """adadelta""" +698 34 training_loop """owa""" +698 34 negative_sampler """basic""" +698 34 evaluator """rankbased""" +698 35 dataset """kinships""" +698 35 model """structuredembedding""" +698 35 loss """nssa""" +698 35 regularizer """no""" +698 35 optimizer """adadelta""" +698 35 training_loop """owa""" +698 35 negative_sampler """basic""" +698 35 evaluator """rankbased""" +698 36 dataset """kinships""" +698 36 model """structuredembedding""" +698 36 loss """nssa""" +698 36 regularizer """no""" +698 36 optimizer """adadelta""" +698 36 training_loop """owa""" +698 36 negative_sampler """basic""" +698 36 evaluator """rankbased""" +698 37 dataset """kinships""" +698 37 model """structuredembedding""" +698 37 loss """nssa""" +698 37 regularizer """no""" +698 37 optimizer """adadelta""" +698 37 training_loop """owa""" +698 37 negative_sampler """basic""" +698 37 evaluator """rankbased""" +698 38 dataset """kinships""" +698 38 model """structuredembedding""" +698 38 loss """nssa""" +698 38 regularizer """no""" +698 38 optimizer """adadelta""" +698 38 training_loop """owa""" +698 38 negative_sampler """basic""" +698 38 evaluator """rankbased""" +698 39 dataset """kinships""" +698 39 model """structuredembedding""" +698 39 loss """nssa""" +698 39 regularizer """no""" +698 39 optimizer """adadelta""" +698 39 training_loop """owa""" +698 39 negative_sampler """basic""" +698 39 evaluator """rankbased""" +698 40 dataset """kinships""" +698 40 model """structuredembedding""" +698 40 loss """nssa""" +698 40 regularizer """no""" +698 40 optimizer """adadelta""" +698 40 training_loop """owa""" +698 40 negative_sampler """basic""" +698 40 evaluator """rankbased""" +698 41 dataset """kinships""" +698 41 model """structuredembedding""" +698 41 loss """nssa""" +698 41 regularizer """no""" +698 41 optimizer """adadelta""" +698 41 training_loop """owa""" +698 41 negative_sampler """basic""" +698 41 evaluator """rankbased""" +698 42 dataset """kinships""" +698 42 model """structuredembedding""" +698 42 loss """nssa""" +698 42 regularizer """no""" +698 42 optimizer """adadelta""" +698 42 training_loop """owa""" +698 42 negative_sampler """basic""" +698 42 evaluator """rankbased""" +698 43 dataset """kinships""" +698 43 model """structuredembedding""" +698 43 loss """nssa""" +698 43 regularizer """no""" +698 43 optimizer """adadelta""" +698 43 training_loop """owa""" +698 43 negative_sampler """basic""" +698 43 evaluator """rankbased""" +698 44 dataset """kinships""" +698 44 model """structuredembedding""" +698 44 loss """nssa""" +698 44 regularizer """no""" +698 44 optimizer """adadelta""" +698 44 training_loop """owa""" +698 44 negative_sampler """basic""" +698 44 evaluator """rankbased""" +698 45 dataset """kinships""" +698 45 model """structuredembedding""" +698 45 loss """nssa""" +698 45 regularizer """no""" +698 45 optimizer """adadelta""" +698 45 training_loop """owa""" +698 45 negative_sampler """basic""" +698 45 evaluator """rankbased""" +698 46 dataset """kinships""" +698 46 model """structuredembedding""" +698 46 loss """nssa""" +698 46 regularizer """no""" +698 46 optimizer """adadelta""" +698 46 training_loop """owa""" +698 46 negative_sampler """basic""" +698 46 evaluator """rankbased""" +698 47 dataset """kinships""" +698 47 model """structuredembedding""" +698 47 loss """nssa""" +698 47 regularizer """no""" +698 47 optimizer """adadelta""" +698 47 training_loop """owa""" +698 47 negative_sampler """basic""" +698 47 evaluator """rankbased""" +698 48 dataset """kinships""" +698 48 model """structuredembedding""" +698 48 loss """nssa""" +698 48 regularizer """no""" +698 48 optimizer """adadelta""" +698 48 training_loop """owa""" +698 48 negative_sampler """basic""" +698 48 evaluator """rankbased""" +698 49 dataset """kinships""" +698 49 model """structuredembedding""" +698 49 loss """nssa""" +698 49 regularizer """no""" +698 49 optimizer """adadelta""" +698 49 training_loop """owa""" +698 49 negative_sampler """basic""" +698 49 evaluator """rankbased""" +698 50 dataset """kinships""" +698 50 model """structuredembedding""" +698 50 loss """nssa""" +698 50 regularizer """no""" +698 50 optimizer """adadelta""" +698 50 training_loop """owa""" +698 50 negative_sampler """basic""" +698 50 evaluator """rankbased""" +698 51 dataset """kinships""" +698 51 model """structuredembedding""" +698 51 loss """nssa""" +698 51 regularizer """no""" +698 51 optimizer """adadelta""" +698 51 training_loop """owa""" +698 51 negative_sampler """basic""" +698 51 evaluator """rankbased""" +698 52 dataset """kinships""" +698 52 model """structuredembedding""" +698 52 loss """nssa""" +698 52 regularizer """no""" +698 52 optimizer """adadelta""" +698 52 training_loop """owa""" +698 52 negative_sampler """basic""" +698 52 evaluator """rankbased""" +698 53 dataset """kinships""" +698 53 model """structuredembedding""" +698 53 loss """nssa""" +698 53 regularizer """no""" +698 53 optimizer """adadelta""" +698 53 training_loop """owa""" +698 53 negative_sampler """basic""" +698 53 evaluator """rankbased""" +698 54 dataset """kinships""" +698 54 model """structuredembedding""" +698 54 loss """nssa""" +698 54 regularizer """no""" +698 54 optimizer """adadelta""" +698 54 training_loop """owa""" +698 54 negative_sampler """basic""" +698 54 evaluator """rankbased""" +698 55 dataset """kinships""" +698 55 model """structuredembedding""" +698 55 loss """nssa""" +698 55 regularizer """no""" +698 55 optimizer """adadelta""" +698 55 training_loop """owa""" +698 55 negative_sampler """basic""" +698 55 evaluator """rankbased""" +698 56 dataset """kinships""" +698 56 model """structuredembedding""" +698 56 loss """nssa""" +698 56 regularizer """no""" +698 56 optimizer """adadelta""" +698 56 training_loop """owa""" +698 56 negative_sampler """basic""" +698 56 evaluator """rankbased""" +698 57 dataset """kinships""" +698 57 model """structuredembedding""" +698 57 loss """nssa""" +698 57 regularizer """no""" +698 57 optimizer """adadelta""" +698 57 training_loop """owa""" +698 57 negative_sampler """basic""" +698 57 evaluator """rankbased""" +698 58 dataset """kinships""" +698 58 model """structuredembedding""" +698 58 loss """nssa""" +698 58 regularizer """no""" +698 58 optimizer """adadelta""" +698 58 training_loop """owa""" +698 58 negative_sampler """basic""" +698 58 evaluator """rankbased""" +698 59 dataset """kinships""" +698 59 model """structuredembedding""" +698 59 loss """nssa""" +698 59 regularizer """no""" +698 59 optimizer """adadelta""" +698 59 training_loop """owa""" +698 59 negative_sampler """basic""" +698 59 evaluator """rankbased""" +698 60 dataset """kinships""" +698 60 model """structuredembedding""" +698 60 loss """nssa""" +698 60 regularizer """no""" +698 60 optimizer """adadelta""" +698 60 training_loop """owa""" +698 60 negative_sampler """basic""" +698 60 evaluator """rankbased""" +698 61 dataset """kinships""" +698 61 model """structuredembedding""" +698 61 loss """nssa""" +698 61 regularizer """no""" +698 61 optimizer """adadelta""" +698 61 training_loop """owa""" +698 61 negative_sampler """basic""" +698 61 evaluator """rankbased""" +698 62 dataset """kinships""" +698 62 model """structuredembedding""" +698 62 loss """nssa""" +698 62 regularizer """no""" +698 62 optimizer """adadelta""" +698 62 training_loop """owa""" +698 62 negative_sampler """basic""" +698 62 evaluator """rankbased""" +698 63 dataset """kinships""" +698 63 model """structuredembedding""" +698 63 loss """nssa""" +698 63 regularizer """no""" +698 63 optimizer """adadelta""" +698 63 training_loop """owa""" +698 63 negative_sampler """basic""" +698 63 evaluator """rankbased""" +698 64 dataset """kinships""" +698 64 model """structuredembedding""" +698 64 loss """nssa""" +698 64 regularizer """no""" +698 64 optimizer """adadelta""" +698 64 training_loop """owa""" +698 64 negative_sampler """basic""" +698 64 evaluator """rankbased""" +698 65 dataset """kinships""" +698 65 model """structuredembedding""" +698 65 loss """nssa""" +698 65 regularizer """no""" +698 65 optimizer """adadelta""" +698 65 training_loop """owa""" +698 65 negative_sampler """basic""" +698 65 evaluator """rankbased""" +698 66 dataset """kinships""" +698 66 model """structuredembedding""" +698 66 loss """nssa""" +698 66 regularizer """no""" +698 66 optimizer """adadelta""" +698 66 training_loop """owa""" +698 66 negative_sampler """basic""" +698 66 evaluator """rankbased""" +698 67 dataset """kinships""" +698 67 model """structuredembedding""" +698 67 loss """nssa""" +698 67 regularizer """no""" +698 67 optimizer """adadelta""" +698 67 training_loop """owa""" +698 67 negative_sampler """basic""" +698 67 evaluator """rankbased""" +698 68 dataset """kinships""" +698 68 model """structuredembedding""" +698 68 loss """nssa""" +698 68 regularizer """no""" +698 68 optimizer """adadelta""" +698 68 training_loop """owa""" +698 68 negative_sampler """basic""" +698 68 evaluator """rankbased""" +698 69 dataset """kinships""" +698 69 model """structuredembedding""" +698 69 loss """nssa""" +698 69 regularizer """no""" +698 69 optimizer """adadelta""" +698 69 training_loop """owa""" +698 69 negative_sampler """basic""" +698 69 evaluator """rankbased""" +698 70 dataset """kinships""" +698 70 model """structuredembedding""" +698 70 loss """nssa""" +698 70 regularizer """no""" +698 70 optimizer """adadelta""" +698 70 training_loop """owa""" +698 70 negative_sampler """basic""" +698 70 evaluator """rankbased""" +698 71 dataset """kinships""" +698 71 model """structuredembedding""" +698 71 loss """nssa""" +698 71 regularizer """no""" +698 71 optimizer """adadelta""" +698 71 training_loop """owa""" +698 71 negative_sampler """basic""" +698 71 evaluator """rankbased""" +698 72 dataset """kinships""" +698 72 model """structuredembedding""" +698 72 loss """nssa""" +698 72 regularizer """no""" +698 72 optimizer """adadelta""" +698 72 training_loop """owa""" +698 72 negative_sampler """basic""" +698 72 evaluator """rankbased""" +698 73 dataset """kinships""" +698 73 model """structuredembedding""" +698 73 loss """nssa""" +698 73 regularizer """no""" +698 73 optimizer """adadelta""" +698 73 training_loop """owa""" +698 73 negative_sampler """basic""" +698 73 evaluator """rankbased""" +698 74 dataset """kinships""" +698 74 model """structuredembedding""" +698 74 loss """nssa""" +698 74 regularizer """no""" +698 74 optimizer """adadelta""" +698 74 training_loop """owa""" +698 74 negative_sampler """basic""" +698 74 evaluator """rankbased""" +698 75 dataset """kinships""" +698 75 model """structuredembedding""" +698 75 loss """nssa""" +698 75 regularizer """no""" +698 75 optimizer """adadelta""" +698 75 training_loop """owa""" +698 75 negative_sampler """basic""" +698 75 evaluator """rankbased""" +698 76 dataset """kinships""" +698 76 model """structuredembedding""" +698 76 loss """nssa""" +698 76 regularizer """no""" +698 76 optimizer """adadelta""" +698 76 training_loop """owa""" +698 76 negative_sampler """basic""" +698 76 evaluator """rankbased""" +698 77 dataset """kinships""" +698 77 model """structuredembedding""" +698 77 loss """nssa""" +698 77 regularizer """no""" +698 77 optimizer """adadelta""" +698 77 training_loop """owa""" +698 77 negative_sampler """basic""" +698 77 evaluator """rankbased""" +698 78 dataset """kinships""" +698 78 model """structuredembedding""" +698 78 loss """nssa""" +698 78 regularizer """no""" +698 78 optimizer """adadelta""" +698 78 training_loop """owa""" +698 78 negative_sampler """basic""" +698 78 evaluator """rankbased""" +698 79 dataset """kinships""" +698 79 model """structuredembedding""" +698 79 loss """nssa""" +698 79 regularizer """no""" +698 79 optimizer """adadelta""" +698 79 training_loop """owa""" +698 79 negative_sampler """basic""" +698 79 evaluator """rankbased""" +698 80 dataset """kinships""" +698 80 model """structuredembedding""" +698 80 loss """nssa""" +698 80 regularizer """no""" +698 80 optimizer """adadelta""" +698 80 training_loop """owa""" +698 80 negative_sampler """basic""" +698 80 evaluator """rankbased""" +698 81 dataset """kinships""" +698 81 model """structuredembedding""" +698 81 loss """nssa""" +698 81 regularizer """no""" +698 81 optimizer """adadelta""" +698 81 training_loop """owa""" +698 81 negative_sampler """basic""" +698 81 evaluator """rankbased""" +698 82 dataset """kinships""" +698 82 model """structuredembedding""" +698 82 loss """nssa""" +698 82 regularizer """no""" +698 82 optimizer """adadelta""" +698 82 training_loop """owa""" +698 82 negative_sampler """basic""" +698 82 evaluator """rankbased""" +698 83 dataset """kinships""" +698 83 model """structuredembedding""" +698 83 loss """nssa""" +698 83 regularizer """no""" +698 83 optimizer """adadelta""" +698 83 training_loop """owa""" +698 83 negative_sampler """basic""" +698 83 evaluator """rankbased""" +698 84 dataset """kinships""" +698 84 model """structuredembedding""" +698 84 loss """nssa""" +698 84 regularizer """no""" +698 84 optimizer """adadelta""" +698 84 training_loop """owa""" +698 84 negative_sampler """basic""" +698 84 evaluator """rankbased""" +698 85 dataset """kinships""" +698 85 model """structuredembedding""" +698 85 loss """nssa""" +698 85 regularizer """no""" +698 85 optimizer """adadelta""" +698 85 training_loop """owa""" +698 85 negative_sampler """basic""" +698 85 evaluator """rankbased""" +698 86 dataset """kinships""" +698 86 model """structuredembedding""" +698 86 loss """nssa""" +698 86 regularizer """no""" +698 86 optimizer """adadelta""" +698 86 training_loop """owa""" +698 86 negative_sampler """basic""" +698 86 evaluator """rankbased""" +698 87 dataset """kinships""" +698 87 model """structuredembedding""" +698 87 loss """nssa""" +698 87 regularizer """no""" +698 87 optimizer """adadelta""" +698 87 training_loop """owa""" +698 87 negative_sampler """basic""" +698 87 evaluator """rankbased""" +698 88 dataset """kinships""" +698 88 model """structuredembedding""" +698 88 loss """nssa""" +698 88 regularizer """no""" +698 88 optimizer """adadelta""" +698 88 training_loop """owa""" +698 88 negative_sampler """basic""" +698 88 evaluator """rankbased""" +698 89 dataset """kinships""" +698 89 model """structuredembedding""" +698 89 loss """nssa""" +698 89 regularizer """no""" +698 89 optimizer """adadelta""" +698 89 training_loop """owa""" +698 89 negative_sampler """basic""" +698 89 evaluator """rankbased""" +698 90 dataset """kinships""" +698 90 model """structuredembedding""" +698 90 loss """nssa""" +698 90 regularizer """no""" +698 90 optimizer """adadelta""" +698 90 training_loop """owa""" +698 90 negative_sampler """basic""" +698 90 evaluator """rankbased""" +698 91 dataset """kinships""" +698 91 model """structuredembedding""" +698 91 loss """nssa""" +698 91 regularizer """no""" +698 91 optimizer """adadelta""" +698 91 training_loop """owa""" +698 91 negative_sampler """basic""" +698 91 evaluator """rankbased""" +698 92 dataset """kinships""" +698 92 model """structuredembedding""" +698 92 loss """nssa""" +698 92 regularizer """no""" +698 92 optimizer """adadelta""" +698 92 training_loop """owa""" +698 92 negative_sampler """basic""" +698 92 evaluator """rankbased""" +698 93 dataset """kinships""" +698 93 model """structuredembedding""" +698 93 loss """nssa""" +698 93 regularizer """no""" +698 93 optimizer """adadelta""" +698 93 training_loop """owa""" +698 93 negative_sampler """basic""" +698 93 evaluator """rankbased""" +698 94 dataset """kinships""" +698 94 model """structuredembedding""" +698 94 loss """nssa""" +698 94 regularizer """no""" +698 94 optimizer """adadelta""" +698 94 training_loop """owa""" +698 94 negative_sampler """basic""" +698 94 evaluator """rankbased""" +698 95 dataset """kinships""" +698 95 model """structuredembedding""" +698 95 loss """nssa""" +698 95 regularizer """no""" +698 95 optimizer """adadelta""" +698 95 training_loop """owa""" +698 95 negative_sampler """basic""" +698 95 evaluator """rankbased""" +698 96 dataset """kinships""" +698 96 model """structuredembedding""" +698 96 loss """nssa""" +698 96 regularizer """no""" +698 96 optimizer """adadelta""" +698 96 training_loop """owa""" +698 96 negative_sampler """basic""" +698 96 evaluator """rankbased""" +698 97 dataset """kinships""" +698 97 model """structuredembedding""" +698 97 loss """nssa""" +698 97 regularizer """no""" +698 97 optimizer """adadelta""" +698 97 training_loop """owa""" +698 97 negative_sampler """basic""" +698 97 evaluator """rankbased""" +698 98 dataset """kinships""" +698 98 model """structuredembedding""" +698 98 loss """nssa""" +698 98 regularizer """no""" +698 98 optimizer """adadelta""" +698 98 training_loop """owa""" +698 98 negative_sampler """basic""" +698 98 evaluator """rankbased""" +698 99 dataset """kinships""" +698 99 model """structuredembedding""" +698 99 loss """nssa""" +698 99 regularizer """no""" +698 99 optimizer """adadelta""" +698 99 training_loop """owa""" +698 99 negative_sampler """basic""" +698 99 evaluator """rankbased""" +698 100 dataset """kinships""" +698 100 model """structuredembedding""" +698 100 loss """nssa""" +698 100 regularizer """no""" +698 100 optimizer """adadelta""" +698 100 training_loop """owa""" +698 100 negative_sampler """basic""" +698 100 evaluator """rankbased""" +699 1 model.embedding_dim 2.0 +699 1 model.scoring_fct_norm 1.0 +699 1 optimizer.lr 0.0010195119573821485 +699 1 training.batch_size 1.0 +699 1 training.label_smoothing 0.0029156157045652606 +699 2 model.embedding_dim 2.0 +699 2 model.scoring_fct_norm 1.0 +699 2 optimizer.lr 0.09071645360614276 +699 2 training.batch_size 0.0 +699 2 training.label_smoothing 0.003201545819495641 +699 3 model.embedding_dim 1.0 +699 3 model.scoring_fct_norm 2.0 +699 3 optimizer.lr 0.021380171965932296 +699 3 training.batch_size 1.0 +699 3 training.label_smoothing 0.01265926164130021 +699 4 model.embedding_dim 0.0 +699 4 model.scoring_fct_norm 2.0 +699 4 optimizer.lr 0.030391931714506184 +699 4 training.batch_size 1.0 +699 4 training.label_smoothing 0.011647180252140378 +699 5 model.embedding_dim 0.0 +699 5 model.scoring_fct_norm 2.0 +699 5 optimizer.lr 0.013628332249048783 +699 5 training.batch_size 1.0 +699 5 training.label_smoothing 0.6676176034618813 +699 6 model.embedding_dim 1.0 +699 6 model.scoring_fct_norm 1.0 +699 6 optimizer.lr 0.0021936782973585246 +699 6 training.batch_size 0.0 +699 6 training.label_smoothing 0.8592299673239121 +699 7 model.embedding_dim 0.0 +699 7 model.scoring_fct_norm 1.0 +699 7 optimizer.lr 0.003140539562984896 +699 7 training.batch_size 1.0 +699 7 training.label_smoothing 0.03659724396941948 +699 8 model.embedding_dim 0.0 +699 8 model.scoring_fct_norm 1.0 +699 8 optimizer.lr 0.09363237714630598 +699 8 training.batch_size 0.0 +699 8 training.label_smoothing 0.4435110525668946 +699 9 model.embedding_dim 2.0 +699 9 model.scoring_fct_norm 1.0 +699 9 optimizer.lr 0.005007088625347087 +699 9 training.batch_size 0.0 +699 9 training.label_smoothing 0.06295956462326778 +699 10 model.embedding_dim 0.0 +699 10 model.scoring_fct_norm 2.0 +699 10 optimizer.lr 0.007716076607320558 +699 10 training.batch_size 2.0 +699 10 training.label_smoothing 0.08862961567893203 +699 11 model.embedding_dim 1.0 +699 11 model.scoring_fct_norm 1.0 +699 11 optimizer.lr 0.0016293037972192103 +699 11 training.batch_size 1.0 +699 11 training.label_smoothing 0.03615540753435197 +699 12 model.embedding_dim 2.0 +699 12 model.scoring_fct_norm 2.0 +699 12 optimizer.lr 0.021606597699301046 +699 12 training.batch_size 0.0 +699 12 training.label_smoothing 0.002875084959800656 +699 13 model.embedding_dim 1.0 +699 13 model.scoring_fct_norm 2.0 +699 13 optimizer.lr 0.0369136920178319 +699 13 training.batch_size 0.0 +699 13 training.label_smoothing 0.06935897653282652 +699 14 model.embedding_dim 0.0 +699 14 model.scoring_fct_norm 2.0 +699 14 optimizer.lr 0.0011376506263764397 +699 14 training.batch_size 1.0 +699 14 training.label_smoothing 0.0022047550380319 +699 15 model.embedding_dim 0.0 +699 15 model.scoring_fct_norm 1.0 +699 15 optimizer.lr 0.010421129364865339 +699 15 training.batch_size 1.0 +699 15 training.label_smoothing 0.19514141806926216 +699 16 model.embedding_dim 0.0 +699 16 model.scoring_fct_norm 2.0 +699 16 optimizer.lr 0.01230486392558482 +699 16 training.batch_size 0.0 +699 16 training.label_smoothing 0.00181573005461649 +699 17 model.embedding_dim 1.0 +699 17 model.scoring_fct_norm 2.0 +699 17 optimizer.lr 0.00960797105793965 +699 17 training.batch_size 1.0 +699 17 training.label_smoothing 0.04239316881892529 +699 18 model.embedding_dim 2.0 +699 18 model.scoring_fct_norm 1.0 +699 18 optimizer.lr 0.03363278731199713 +699 18 training.batch_size 1.0 +699 18 training.label_smoothing 0.005541424583700596 +699 19 model.embedding_dim 0.0 +699 19 model.scoring_fct_norm 2.0 +699 19 optimizer.lr 0.010722748740492031 +699 19 training.batch_size 0.0 +699 19 training.label_smoothing 0.15470285594552022 +699 20 model.embedding_dim 0.0 +699 20 model.scoring_fct_norm 1.0 +699 20 optimizer.lr 0.06504986046305541 +699 20 training.batch_size 2.0 +699 20 training.label_smoothing 0.1813918094998922 +699 21 model.embedding_dim 1.0 +699 21 model.scoring_fct_norm 1.0 +699 21 optimizer.lr 0.09001285408144237 +699 21 training.batch_size 1.0 +699 21 training.label_smoothing 0.001421616646302537 +699 22 model.embedding_dim 0.0 +699 22 model.scoring_fct_norm 2.0 +699 22 optimizer.lr 0.0037591796564096576 +699 22 training.batch_size 2.0 +699 22 training.label_smoothing 0.21112858335819798 +699 23 model.embedding_dim 1.0 +699 23 model.scoring_fct_norm 1.0 +699 23 optimizer.lr 0.0535862211502404 +699 23 training.batch_size 0.0 +699 23 training.label_smoothing 0.052448184560036144 +699 24 model.embedding_dim 2.0 +699 24 model.scoring_fct_norm 1.0 +699 24 optimizer.lr 0.017424657210847953 +699 24 training.batch_size 2.0 +699 24 training.label_smoothing 0.04870769254155459 +699 25 model.embedding_dim 1.0 +699 25 model.scoring_fct_norm 1.0 +699 25 optimizer.lr 0.03646967420550967 +699 25 training.batch_size 2.0 +699 25 training.label_smoothing 0.002416774130471044 +699 26 model.embedding_dim 1.0 +699 26 model.scoring_fct_norm 2.0 +699 26 optimizer.lr 0.007025577742369262 +699 26 training.batch_size 0.0 +699 26 training.label_smoothing 0.025318056657234043 +699 27 model.embedding_dim 2.0 +699 27 model.scoring_fct_norm 1.0 +699 27 optimizer.lr 0.003021985346034147 +699 27 training.batch_size 2.0 +699 27 training.label_smoothing 0.11734834258147002 +699 28 model.embedding_dim 0.0 +699 28 model.scoring_fct_norm 1.0 +699 28 optimizer.lr 0.002145990274550576 +699 28 training.batch_size 2.0 +699 28 training.label_smoothing 0.02953812091735579 +699 29 model.embedding_dim 0.0 +699 29 model.scoring_fct_norm 1.0 +699 29 optimizer.lr 0.002317210414113198 +699 29 training.batch_size 0.0 +699 29 training.label_smoothing 0.060662773188975956 +699 30 model.embedding_dim 2.0 +699 30 model.scoring_fct_norm 2.0 +699 30 optimizer.lr 0.0020085062264779065 +699 30 training.batch_size 2.0 +699 30 training.label_smoothing 0.004457373354717516 +699 31 model.embedding_dim 2.0 +699 31 model.scoring_fct_norm 2.0 +699 31 optimizer.lr 0.00181882895959857 +699 31 training.batch_size 0.0 +699 31 training.label_smoothing 0.001407192694247122 +699 32 model.embedding_dim 2.0 +699 32 model.scoring_fct_norm 2.0 +699 32 optimizer.lr 0.0038961259622112008 +699 32 training.batch_size 2.0 +699 32 training.label_smoothing 0.15790832549026373 +699 33 model.embedding_dim 0.0 +699 33 model.scoring_fct_norm 1.0 +699 33 optimizer.lr 0.08744119881491856 +699 33 training.batch_size 1.0 +699 33 training.label_smoothing 0.0030342609650919382 +699 34 model.embedding_dim 2.0 +699 34 model.scoring_fct_norm 2.0 +699 34 optimizer.lr 0.001683889463826283 +699 34 training.batch_size 2.0 +699 34 training.label_smoothing 0.006985606966425029 +699 35 model.embedding_dim 2.0 +699 35 model.scoring_fct_norm 1.0 +699 35 optimizer.lr 0.0035251502457361956 +699 35 training.batch_size 0.0 +699 35 training.label_smoothing 0.0010850308633854559 +699 36 model.embedding_dim 0.0 +699 36 model.scoring_fct_norm 2.0 +699 36 optimizer.lr 0.003910340859094421 +699 36 training.batch_size 0.0 +699 36 training.label_smoothing 0.017826433233939676 +699 37 model.embedding_dim 1.0 +699 37 model.scoring_fct_norm 2.0 +699 37 optimizer.lr 0.003996305599159292 +699 37 training.batch_size 0.0 +699 37 training.label_smoothing 0.004694304121089317 +699 38 model.embedding_dim 2.0 +699 38 model.scoring_fct_norm 2.0 +699 38 optimizer.lr 0.00899591574392421 +699 38 training.batch_size 0.0 +699 38 training.label_smoothing 0.004111843261361844 +699 39 model.embedding_dim 2.0 +699 39 model.scoring_fct_norm 1.0 +699 39 optimizer.lr 0.025856413758960792 +699 39 training.batch_size 2.0 +699 39 training.label_smoothing 0.0025912006865889707 +699 40 model.embedding_dim 2.0 +699 40 model.scoring_fct_norm 2.0 +699 40 optimizer.lr 0.03821832717768062 +699 40 training.batch_size 0.0 +699 40 training.label_smoothing 0.014828721256251058 +699 41 model.embedding_dim 0.0 +699 41 model.scoring_fct_norm 1.0 +699 41 optimizer.lr 0.06103664384930436 +699 41 training.batch_size 0.0 +699 41 training.label_smoothing 0.04048831898133506 +699 42 model.embedding_dim 1.0 +699 42 model.scoring_fct_norm 1.0 +699 42 optimizer.lr 0.046767850029351375 +699 42 training.batch_size 0.0 +699 42 training.label_smoothing 0.0021922892144012923 +699 43 model.embedding_dim 2.0 +699 43 model.scoring_fct_norm 1.0 +699 43 optimizer.lr 0.0033821064278415506 +699 43 training.batch_size 1.0 +699 43 training.label_smoothing 0.013408628931735734 +699 44 model.embedding_dim 2.0 +699 44 model.scoring_fct_norm 1.0 +699 44 optimizer.lr 0.0032111905787005914 +699 44 training.batch_size 0.0 +699 44 training.label_smoothing 0.001800108244838245 +699 45 model.embedding_dim 1.0 +699 45 model.scoring_fct_norm 1.0 +699 45 optimizer.lr 0.0020984658953954816 +699 45 training.batch_size 1.0 +699 45 training.label_smoothing 0.20813190916660418 +699 46 model.embedding_dim 0.0 +699 46 model.scoring_fct_norm 1.0 +699 46 optimizer.lr 0.007005160892113788 +699 46 training.batch_size 0.0 +699 46 training.label_smoothing 0.19210851429607714 +699 47 model.embedding_dim 2.0 +699 47 model.scoring_fct_norm 1.0 +699 47 optimizer.lr 0.035471446370856786 +699 47 training.batch_size 1.0 +699 47 training.label_smoothing 0.05032155248272667 +699 48 model.embedding_dim 0.0 +699 48 model.scoring_fct_norm 1.0 +699 48 optimizer.lr 0.07611290392528208 +699 48 training.batch_size 1.0 +699 48 training.label_smoothing 0.0014996956640688005 +699 49 model.embedding_dim 1.0 +699 49 model.scoring_fct_norm 1.0 +699 49 optimizer.lr 0.013135500024890607 +699 49 training.batch_size 0.0 +699 49 training.label_smoothing 0.0010960888994666909 +699 50 model.embedding_dim 1.0 +699 50 model.scoring_fct_norm 2.0 +699 50 optimizer.lr 0.07393064287830171 +699 50 training.batch_size 0.0 +699 50 training.label_smoothing 0.6427820571694026 +699 51 model.embedding_dim 2.0 +699 51 model.scoring_fct_norm 2.0 +699 51 optimizer.lr 0.051580576907643096 +699 51 training.batch_size 0.0 +699 51 training.label_smoothing 0.0010681411668644737 +699 52 model.embedding_dim 1.0 +699 52 model.scoring_fct_norm 1.0 +699 52 optimizer.lr 0.0010214327826242044 +699 52 training.batch_size 1.0 +699 52 training.label_smoothing 0.14241865573003834 +699 53 model.embedding_dim 1.0 +699 53 model.scoring_fct_norm 2.0 +699 53 optimizer.lr 0.010965131971763102 +699 53 training.batch_size 1.0 +699 53 training.label_smoothing 0.3796312677502245 +699 54 model.embedding_dim 0.0 +699 54 model.scoring_fct_norm 1.0 +699 54 optimizer.lr 0.00203411288567309 +699 54 training.batch_size 2.0 +699 54 training.label_smoothing 0.016169791347356216 +699 55 model.embedding_dim 0.0 +699 55 model.scoring_fct_norm 1.0 +699 55 optimizer.lr 0.0016685165914291547 +699 55 training.batch_size 2.0 +699 55 training.label_smoothing 0.7523926162607445 +699 56 model.embedding_dim 1.0 +699 56 model.scoring_fct_norm 1.0 +699 56 optimizer.lr 0.001286820893496743 +699 56 training.batch_size 1.0 +699 56 training.label_smoothing 0.0010544048260355333 +699 57 model.embedding_dim 1.0 +699 57 model.scoring_fct_norm 2.0 +699 57 optimizer.lr 0.04066810907417863 +699 57 training.batch_size 2.0 +699 57 training.label_smoothing 0.017262783327292124 +699 58 model.embedding_dim 1.0 +699 58 model.scoring_fct_norm 2.0 +699 58 optimizer.lr 0.0032782153258671697 +699 58 training.batch_size 1.0 +699 58 training.label_smoothing 0.0033129654175701475 +699 59 model.embedding_dim 0.0 +699 59 model.scoring_fct_norm 1.0 +699 59 optimizer.lr 0.08047065343554574 +699 59 training.batch_size 0.0 +699 59 training.label_smoothing 0.00414421934949148 +699 60 model.embedding_dim 0.0 +699 60 model.scoring_fct_norm 1.0 +699 60 optimizer.lr 0.002865424050836614 +699 60 training.batch_size 1.0 +699 60 training.label_smoothing 0.18274502776703752 +699 61 model.embedding_dim 2.0 +699 61 model.scoring_fct_norm 1.0 +699 61 optimizer.lr 0.002796797462821582 +699 61 training.batch_size 1.0 +699 61 training.label_smoothing 0.02675606729379769 +699 62 model.embedding_dim 1.0 +699 62 model.scoring_fct_norm 2.0 +699 62 optimizer.lr 0.0011990684255591892 +699 62 training.batch_size 0.0 +699 62 training.label_smoothing 0.2403312737715101 +699 63 model.embedding_dim 0.0 +699 63 model.scoring_fct_norm 2.0 +699 63 optimizer.lr 0.0049704058058141366 +699 63 training.batch_size 0.0 +699 63 training.label_smoothing 0.5791975499252143 +699 64 model.embedding_dim 1.0 +699 64 model.scoring_fct_norm 1.0 +699 64 optimizer.lr 0.007069885415701081 +699 64 training.batch_size 2.0 +699 64 training.label_smoothing 0.26764648661438317 +699 65 model.embedding_dim 2.0 +699 65 model.scoring_fct_norm 2.0 +699 65 optimizer.lr 0.042641986177175524 +699 65 training.batch_size 0.0 +699 65 training.label_smoothing 0.632241857966821 +699 66 model.embedding_dim 0.0 +699 66 model.scoring_fct_norm 2.0 +699 66 optimizer.lr 0.060926544680535946 +699 66 training.batch_size 0.0 +699 66 training.label_smoothing 0.0059645018421820295 +699 67 model.embedding_dim 2.0 +699 67 model.scoring_fct_norm 2.0 +699 67 optimizer.lr 0.0011153146578578824 +699 67 training.batch_size 2.0 +699 67 training.label_smoothing 0.0018233844745406003 +699 68 model.embedding_dim 2.0 +699 68 model.scoring_fct_norm 2.0 +699 68 optimizer.lr 0.01875601123916963 +699 68 training.batch_size 1.0 +699 68 training.label_smoothing 0.005417217386149913 +699 69 model.embedding_dim 1.0 +699 69 model.scoring_fct_norm 1.0 +699 69 optimizer.lr 0.03161925504652099 +699 69 training.batch_size 1.0 +699 69 training.label_smoothing 0.001088253071196431 +699 70 model.embedding_dim 1.0 +699 70 model.scoring_fct_norm 1.0 +699 70 optimizer.lr 0.0023085226580095365 +699 70 training.batch_size 2.0 +699 70 training.label_smoothing 0.0025711773189211217 +699 71 model.embedding_dim 0.0 +699 71 model.scoring_fct_norm 1.0 +699 71 optimizer.lr 0.06772005866542732 +699 71 training.batch_size 2.0 +699 71 training.label_smoothing 0.021949098369301822 +699 72 model.embedding_dim 2.0 +699 72 model.scoring_fct_norm 2.0 +699 72 optimizer.lr 0.007238401589529798 +699 72 training.batch_size 2.0 +699 72 training.label_smoothing 0.14649866605741182 +699 73 model.embedding_dim 2.0 +699 73 model.scoring_fct_norm 1.0 +699 73 optimizer.lr 0.010999132755603644 +699 73 training.batch_size 2.0 +699 73 training.label_smoothing 0.0189814297347147 +699 74 model.embedding_dim 1.0 +699 74 model.scoring_fct_norm 2.0 +699 74 optimizer.lr 0.01947053091771885 +699 74 training.batch_size 2.0 +699 74 training.label_smoothing 0.0056198156134543995 +699 75 model.embedding_dim 2.0 +699 75 model.scoring_fct_norm 2.0 +699 75 optimizer.lr 0.0020871002670752666 +699 75 training.batch_size 1.0 +699 75 training.label_smoothing 0.03911103841443648 +699 76 model.embedding_dim 2.0 +699 76 model.scoring_fct_norm 1.0 +699 76 optimizer.lr 0.0017936284306375974 +699 76 training.batch_size 2.0 +699 76 training.label_smoothing 0.022705673762458713 +699 77 model.embedding_dim 2.0 +699 77 model.scoring_fct_norm 1.0 +699 77 optimizer.lr 0.07977481443216958 +699 77 training.batch_size 2.0 +699 77 training.label_smoothing 0.0016546670852784307 +699 78 model.embedding_dim 1.0 +699 78 model.scoring_fct_norm 1.0 +699 78 optimizer.lr 0.016101868211013684 +699 78 training.batch_size 2.0 +699 78 training.label_smoothing 0.03757614665872898 +699 79 model.embedding_dim 0.0 +699 79 model.scoring_fct_norm 2.0 +699 79 optimizer.lr 0.004904398575440011 +699 79 training.batch_size 2.0 +699 79 training.label_smoothing 0.058421096436109425 +699 80 model.embedding_dim 0.0 +699 80 model.scoring_fct_norm 2.0 +699 80 optimizer.lr 0.02201305240907499 +699 80 training.batch_size 0.0 +699 80 training.label_smoothing 0.0014170491240355877 +699 81 model.embedding_dim 0.0 +699 81 model.scoring_fct_norm 2.0 +699 81 optimizer.lr 0.0029164415583249645 +699 81 training.batch_size 0.0 +699 81 training.label_smoothing 0.30313632470032553 +699 82 model.embedding_dim 2.0 +699 82 model.scoring_fct_norm 2.0 +699 82 optimizer.lr 0.01685251283835334 +699 82 training.batch_size 2.0 +699 82 training.label_smoothing 0.6621890323512181 +699 83 model.embedding_dim 2.0 +699 83 model.scoring_fct_norm 1.0 +699 83 optimizer.lr 0.00901959807522416 +699 83 training.batch_size 2.0 +699 83 training.label_smoothing 0.005252515180560302 +699 84 model.embedding_dim 1.0 +699 84 model.scoring_fct_norm 2.0 +699 84 optimizer.lr 0.0026565433291734783 +699 84 training.batch_size 1.0 +699 84 training.label_smoothing 0.002511145260900359 +699 85 model.embedding_dim 0.0 +699 85 model.scoring_fct_norm 2.0 +699 85 optimizer.lr 0.009473460929246912 +699 85 training.batch_size 0.0 +699 85 training.label_smoothing 0.6506966785125143 +699 86 model.embedding_dim 0.0 +699 86 model.scoring_fct_norm 2.0 +699 86 optimizer.lr 0.03012867604735191 +699 86 training.batch_size 2.0 +699 86 training.label_smoothing 0.02555141151002932 +699 87 model.embedding_dim 0.0 +699 87 model.scoring_fct_norm 1.0 +699 87 optimizer.lr 0.08361227608055025 +699 87 training.batch_size 2.0 +699 87 training.label_smoothing 0.008197450590812335 +699 88 model.embedding_dim 2.0 +699 88 model.scoring_fct_norm 1.0 +699 88 optimizer.lr 0.001217019767749556 +699 88 training.batch_size 1.0 +699 88 training.label_smoothing 0.0028076709978248036 +699 89 model.embedding_dim 2.0 +699 89 model.scoring_fct_norm 2.0 +699 89 optimizer.lr 0.03974275246328736 +699 89 training.batch_size 2.0 +699 89 training.label_smoothing 0.0011121666259572454 +699 90 model.embedding_dim 1.0 +699 90 model.scoring_fct_norm 2.0 +699 90 optimizer.lr 0.016748656396693287 +699 90 training.batch_size 1.0 +699 90 training.label_smoothing 0.016218882164329745 +699 91 model.embedding_dim 2.0 +699 91 model.scoring_fct_norm 2.0 +699 91 optimizer.lr 0.07791589981333377 +699 91 training.batch_size 2.0 +699 91 training.label_smoothing 0.0025702336705550528 +699 92 model.embedding_dim 0.0 +699 92 model.scoring_fct_norm 2.0 +699 92 optimizer.lr 0.005071993102213763 +699 92 training.batch_size 1.0 +699 92 training.label_smoothing 0.0018400328406851564 +699 93 model.embedding_dim 0.0 +699 93 model.scoring_fct_norm 2.0 +699 93 optimizer.lr 0.01496454726667368 +699 93 training.batch_size 0.0 +699 93 training.label_smoothing 0.7854891332737789 +699 94 model.embedding_dim 2.0 +699 94 model.scoring_fct_norm 2.0 +699 94 optimizer.lr 0.00111704956737574 +699 94 training.batch_size 2.0 +699 94 training.label_smoothing 0.9531148944010344 +699 95 model.embedding_dim 1.0 +699 95 model.scoring_fct_norm 1.0 +699 95 optimizer.lr 0.0642480636900819 +699 95 training.batch_size 0.0 +699 95 training.label_smoothing 0.2193255841497399 +699 96 model.embedding_dim 0.0 +699 96 model.scoring_fct_norm 2.0 +699 96 optimizer.lr 0.010856142039455929 +699 96 training.batch_size 1.0 +699 96 training.label_smoothing 0.8679226103694213 +699 97 model.embedding_dim 1.0 +699 97 model.scoring_fct_norm 1.0 +699 97 optimizer.lr 0.0019386422883170072 +699 97 training.batch_size 0.0 +699 97 training.label_smoothing 0.371040670443391 +699 98 model.embedding_dim 1.0 +699 98 model.scoring_fct_norm 2.0 +699 98 optimizer.lr 0.051146075886192556 +699 98 training.batch_size 0.0 +699 98 training.label_smoothing 0.002960452818894668 +699 99 model.embedding_dim 0.0 +699 99 model.scoring_fct_norm 2.0 +699 99 optimizer.lr 0.0017085303592047898 +699 99 training.batch_size 0.0 +699 99 training.label_smoothing 0.08019317120951948 +699 100 model.embedding_dim 1.0 +699 100 model.scoring_fct_norm 1.0 +699 100 optimizer.lr 0.021924229552685116 +699 100 training.batch_size 0.0 +699 100 training.label_smoothing 0.284150304217614 +699 1 dataset """kinships""" +699 1 model """structuredembedding""" +699 1 loss """bceaftersigmoid""" +699 1 regularizer """no""" +699 1 optimizer """adam""" +699 1 training_loop """lcwa""" +699 1 evaluator """rankbased""" +699 2 dataset """kinships""" +699 2 model """structuredembedding""" +699 2 loss """bceaftersigmoid""" +699 2 regularizer """no""" +699 2 optimizer """adam""" +699 2 training_loop """lcwa""" +699 2 evaluator """rankbased""" +699 3 dataset """kinships""" +699 3 model """structuredembedding""" +699 3 loss """bceaftersigmoid""" +699 3 regularizer """no""" +699 3 optimizer """adam""" +699 3 training_loop """lcwa""" +699 3 evaluator """rankbased""" +699 4 dataset """kinships""" +699 4 model """structuredembedding""" +699 4 loss """bceaftersigmoid""" +699 4 regularizer """no""" +699 4 optimizer """adam""" +699 4 training_loop """lcwa""" +699 4 evaluator """rankbased""" +699 5 dataset """kinships""" +699 5 model """structuredembedding""" +699 5 loss """bceaftersigmoid""" +699 5 regularizer """no""" +699 5 optimizer """adam""" +699 5 training_loop """lcwa""" +699 5 evaluator """rankbased""" +699 6 dataset """kinships""" +699 6 model """structuredembedding""" +699 6 loss """bceaftersigmoid""" +699 6 regularizer """no""" +699 6 optimizer """adam""" +699 6 training_loop """lcwa""" +699 6 evaluator """rankbased""" +699 7 dataset """kinships""" +699 7 model """structuredembedding""" +699 7 loss """bceaftersigmoid""" +699 7 regularizer """no""" +699 7 optimizer """adam""" +699 7 training_loop """lcwa""" +699 7 evaluator """rankbased""" +699 8 dataset """kinships""" +699 8 model """structuredembedding""" +699 8 loss """bceaftersigmoid""" +699 8 regularizer """no""" +699 8 optimizer """adam""" +699 8 training_loop """lcwa""" +699 8 evaluator """rankbased""" +699 9 dataset """kinships""" +699 9 model """structuredembedding""" +699 9 loss """bceaftersigmoid""" +699 9 regularizer """no""" +699 9 optimizer """adam""" +699 9 training_loop """lcwa""" +699 9 evaluator """rankbased""" +699 10 dataset """kinships""" +699 10 model """structuredembedding""" +699 10 loss """bceaftersigmoid""" +699 10 regularizer """no""" +699 10 optimizer """adam""" +699 10 training_loop """lcwa""" +699 10 evaluator """rankbased""" +699 11 dataset """kinships""" +699 11 model """structuredembedding""" +699 11 loss """bceaftersigmoid""" +699 11 regularizer """no""" +699 11 optimizer """adam""" +699 11 training_loop """lcwa""" +699 11 evaluator """rankbased""" +699 12 dataset """kinships""" +699 12 model """structuredembedding""" +699 12 loss """bceaftersigmoid""" +699 12 regularizer """no""" +699 12 optimizer """adam""" +699 12 training_loop """lcwa""" +699 12 evaluator """rankbased""" +699 13 dataset """kinships""" +699 13 model """structuredembedding""" +699 13 loss """bceaftersigmoid""" +699 13 regularizer """no""" +699 13 optimizer """adam""" +699 13 training_loop """lcwa""" +699 13 evaluator """rankbased""" +699 14 dataset """kinships""" +699 14 model """structuredembedding""" +699 14 loss """bceaftersigmoid""" +699 14 regularizer """no""" +699 14 optimizer """adam""" +699 14 training_loop """lcwa""" +699 14 evaluator """rankbased""" +699 15 dataset """kinships""" +699 15 model """structuredembedding""" +699 15 loss """bceaftersigmoid""" +699 15 regularizer """no""" +699 15 optimizer """adam""" +699 15 training_loop """lcwa""" +699 15 evaluator """rankbased""" +699 16 dataset """kinships""" +699 16 model """structuredembedding""" +699 16 loss """bceaftersigmoid""" +699 16 regularizer """no""" +699 16 optimizer """adam""" +699 16 training_loop """lcwa""" +699 16 evaluator """rankbased""" +699 17 dataset """kinships""" +699 17 model """structuredembedding""" +699 17 loss """bceaftersigmoid""" +699 17 regularizer """no""" +699 17 optimizer """adam""" +699 17 training_loop """lcwa""" +699 17 evaluator """rankbased""" +699 18 dataset """kinships""" +699 18 model """structuredembedding""" +699 18 loss """bceaftersigmoid""" +699 18 regularizer """no""" +699 18 optimizer """adam""" +699 18 training_loop """lcwa""" +699 18 evaluator """rankbased""" +699 19 dataset """kinships""" +699 19 model """structuredembedding""" +699 19 loss """bceaftersigmoid""" +699 19 regularizer """no""" +699 19 optimizer """adam""" +699 19 training_loop """lcwa""" +699 19 evaluator """rankbased""" +699 20 dataset """kinships""" +699 20 model """structuredembedding""" +699 20 loss """bceaftersigmoid""" +699 20 regularizer """no""" +699 20 optimizer """adam""" +699 20 training_loop """lcwa""" +699 20 evaluator """rankbased""" +699 21 dataset """kinships""" +699 21 model """structuredembedding""" +699 21 loss """bceaftersigmoid""" +699 21 regularizer """no""" +699 21 optimizer """adam""" +699 21 training_loop """lcwa""" +699 21 evaluator """rankbased""" +699 22 dataset """kinships""" +699 22 model """structuredembedding""" +699 22 loss """bceaftersigmoid""" +699 22 regularizer """no""" +699 22 optimizer """adam""" +699 22 training_loop """lcwa""" +699 22 evaluator """rankbased""" +699 23 dataset """kinships""" +699 23 model """structuredembedding""" +699 23 loss """bceaftersigmoid""" +699 23 regularizer """no""" +699 23 optimizer """adam""" +699 23 training_loop """lcwa""" +699 23 evaluator """rankbased""" +699 24 dataset """kinships""" +699 24 model """structuredembedding""" +699 24 loss """bceaftersigmoid""" +699 24 regularizer """no""" +699 24 optimizer """adam""" +699 24 training_loop """lcwa""" +699 24 evaluator """rankbased""" +699 25 dataset """kinships""" +699 25 model """structuredembedding""" +699 25 loss """bceaftersigmoid""" +699 25 regularizer """no""" +699 25 optimizer """adam""" +699 25 training_loop """lcwa""" +699 25 evaluator """rankbased""" +699 26 dataset """kinships""" +699 26 model """structuredembedding""" +699 26 loss """bceaftersigmoid""" +699 26 regularizer """no""" +699 26 optimizer """adam""" +699 26 training_loop """lcwa""" +699 26 evaluator """rankbased""" +699 27 dataset """kinships""" +699 27 model """structuredembedding""" +699 27 loss """bceaftersigmoid""" +699 27 regularizer """no""" +699 27 optimizer """adam""" +699 27 training_loop """lcwa""" +699 27 evaluator """rankbased""" +699 28 dataset """kinships""" +699 28 model """structuredembedding""" +699 28 loss """bceaftersigmoid""" +699 28 regularizer """no""" +699 28 optimizer """adam""" +699 28 training_loop """lcwa""" +699 28 evaluator """rankbased""" +699 29 dataset """kinships""" +699 29 model """structuredembedding""" +699 29 loss """bceaftersigmoid""" +699 29 regularizer """no""" +699 29 optimizer """adam""" +699 29 training_loop """lcwa""" +699 29 evaluator """rankbased""" +699 30 dataset """kinships""" +699 30 model """structuredembedding""" +699 30 loss """bceaftersigmoid""" +699 30 regularizer """no""" +699 30 optimizer """adam""" +699 30 training_loop """lcwa""" +699 30 evaluator """rankbased""" +699 31 dataset """kinships""" +699 31 model """structuredembedding""" +699 31 loss """bceaftersigmoid""" +699 31 regularizer """no""" +699 31 optimizer """adam""" +699 31 training_loop """lcwa""" +699 31 evaluator """rankbased""" +699 32 dataset """kinships""" +699 32 model """structuredembedding""" +699 32 loss """bceaftersigmoid""" +699 32 regularizer """no""" +699 32 optimizer """adam""" +699 32 training_loop """lcwa""" +699 32 evaluator """rankbased""" +699 33 dataset """kinships""" +699 33 model """structuredembedding""" +699 33 loss """bceaftersigmoid""" +699 33 regularizer """no""" +699 33 optimizer """adam""" +699 33 training_loop """lcwa""" +699 33 evaluator """rankbased""" +699 34 dataset """kinships""" +699 34 model """structuredembedding""" +699 34 loss """bceaftersigmoid""" +699 34 regularizer """no""" +699 34 optimizer """adam""" +699 34 training_loop """lcwa""" +699 34 evaluator """rankbased""" +699 35 dataset """kinships""" +699 35 model """structuredembedding""" +699 35 loss """bceaftersigmoid""" +699 35 regularizer """no""" +699 35 optimizer """adam""" +699 35 training_loop """lcwa""" +699 35 evaluator """rankbased""" +699 36 dataset """kinships""" +699 36 model """structuredembedding""" +699 36 loss """bceaftersigmoid""" +699 36 regularizer """no""" +699 36 optimizer """adam""" +699 36 training_loop """lcwa""" +699 36 evaluator """rankbased""" +699 37 dataset """kinships""" +699 37 model """structuredembedding""" +699 37 loss """bceaftersigmoid""" +699 37 regularizer """no""" +699 37 optimizer """adam""" +699 37 training_loop """lcwa""" +699 37 evaluator """rankbased""" +699 38 dataset """kinships""" +699 38 model """structuredembedding""" +699 38 loss """bceaftersigmoid""" +699 38 regularizer """no""" +699 38 optimizer """adam""" +699 38 training_loop """lcwa""" +699 38 evaluator """rankbased""" +699 39 dataset """kinships""" +699 39 model """structuredembedding""" +699 39 loss """bceaftersigmoid""" +699 39 regularizer """no""" +699 39 optimizer """adam""" +699 39 training_loop """lcwa""" +699 39 evaluator """rankbased""" +699 40 dataset """kinships""" +699 40 model """structuredembedding""" +699 40 loss """bceaftersigmoid""" +699 40 regularizer """no""" +699 40 optimizer """adam""" +699 40 training_loop """lcwa""" +699 40 evaluator """rankbased""" +699 41 dataset """kinships""" +699 41 model """structuredembedding""" +699 41 loss """bceaftersigmoid""" +699 41 regularizer """no""" +699 41 optimizer """adam""" +699 41 training_loop """lcwa""" +699 41 evaluator """rankbased""" +699 42 dataset """kinships""" +699 42 model """structuredembedding""" +699 42 loss """bceaftersigmoid""" +699 42 regularizer """no""" +699 42 optimizer """adam""" +699 42 training_loop """lcwa""" +699 42 evaluator """rankbased""" +699 43 dataset """kinships""" +699 43 model """structuredembedding""" +699 43 loss """bceaftersigmoid""" +699 43 regularizer """no""" +699 43 optimizer """adam""" +699 43 training_loop """lcwa""" +699 43 evaluator """rankbased""" +699 44 dataset """kinships""" +699 44 model """structuredembedding""" +699 44 loss """bceaftersigmoid""" +699 44 regularizer """no""" +699 44 optimizer """adam""" +699 44 training_loop """lcwa""" +699 44 evaluator """rankbased""" +699 45 dataset """kinships""" +699 45 model """structuredembedding""" +699 45 loss """bceaftersigmoid""" +699 45 regularizer """no""" +699 45 optimizer """adam""" +699 45 training_loop """lcwa""" +699 45 evaluator """rankbased""" +699 46 dataset """kinships""" +699 46 model """structuredembedding""" +699 46 loss """bceaftersigmoid""" +699 46 regularizer """no""" +699 46 optimizer """adam""" +699 46 training_loop """lcwa""" +699 46 evaluator """rankbased""" +699 47 dataset """kinships""" +699 47 model """structuredembedding""" +699 47 loss """bceaftersigmoid""" +699 47 regularizer """no""" +699 47 optimizer """adam""" +699 47 training_loop """lcwa""" +699 47 evaluator """rankbased""" +699 48 dataset """kinships""" +699 48 model """structuredembedding""" +699 48 loss """bceaftersigmoid""" +699 48 regularizer """no""" +699 48 optimizer """adam""" +699 48 training_loop """lcwa""" +699 48 evaluator """rankbased""" +699 49 dataset """kinships""" +699 49 model """structuredembedding""" +699 49 loss """bceaftersigmoid""" +699 49 regularizer """no""" +699 49 optimizer """adam""" +699 49 training_loop """lcwa""" +699 49 evaluator """rankbased""" +699 50 dataset """kinships""" +699 50 model """structuredembedding""" +699 50 loss """bceaftersigmoid""" +699 50 regularizer """no""" +699 50 optimizer """adam""" +699 50 training_loop """lcwa""" +699 50 evaluator """rankbased""" +699 51 dataset """kinships""" +699 51 model """structuredembedding""" +699 51 loss """bceaftersigmoid""" +699 51 regularizer """no""" +699 51 optimizer """adam""" +699 51 training_loop """lcwa""" +699 51 evaluator """rankbased""" +699 52 dataset """kinships""" +699 52 model """structuredembedding""" +699 52 loss """bceaftersigmoid""" +699 52 regularizer """no""" +699 52 optimizer """adam""" +699 52 training_loop """lcwa""" +699 52 evaluator """rankbased""" +699 53 dataset """kinships""" +699 53 model """structuredembedding""" +699 53 loss """bceaftersigmoid""" +699 53 regularizer """no""" +699 53 optimizer """adam""" +699 53 training_loop """lcwa""" +699 53 evaluator """rankbased""" +699 54 dataset """kinships""" +699 54 model """structuredembedding""" +699 54 loss """bceaftersigmoid""" +699 54 regularizer """no""" +699 54 optimizer """adam""" +699 54 training_loop """lcwa""" +699 54 evaluator """rankbased""" +699 55 dataset """kinships""" +699 55 model """structuredembedding""" +699 55 loss """bceaftersigmoid""" +699 55 regularizer """no""" +699 55 optimizer """adam""" +699 55 training_loop """lcwa""" +699 55 evaluator """rankbased""" +699 56 dataset """kinships""" +699 56 model """structuredembedding""" +699 56 loss """bceaftersigmoid""" +699 56 regularizer """no""" +699 56 optimizer """adam""" +699 56 training_loop """lcwa""" +699 56 evaluator """rankbased""" +699 57 dataset """kinships""" +699 57 model """structuredembedding""" +699 57 loss """bceaftersigmoid""" +699 57 regularizer """no""" +699 57 optimizer """adam""" +699 57 training_loop """lcwa""" +699 57 evaluator """rankbased""" +699 58 dataset """kinships""" +699 58 model """structuredembedding""" +699 58 loss """bceaftersigmoid""" +699 58 regularizer """no""" +699 58 optimizer """adam""" +699 58 training_loop """lcwa""" +699 58 evaluator """rankbased""" +699 59 dataset """kinships""" +699 59 model """structuredembedding""" +699 59 loss """bceaftersigmoid""" +699 59 regularizer """no""" +699 59 optimizer """adam""" +699 59 training_loop """lcwa""" +699 59 evaluator """rankbased""" +699 60 dataset """kinships""" +699 60 model """structuredembedding""" +699 60 loss """bceaftersigmoid""" +699 60 regularizer """no""" +699 60 optimizer """adam""" +699 60 training_loop """lcwa""" +699 60 evaluator """rankbased""" +699 61 dataset """kinships""" +699 61 model """structuredembedding""" +699 61 loss """bceaftersigmoid""" +699 61 regularizer """no""" +699 61 optimizer """adam""" +699 61 training_loop """lcwa""" +699 61 evaluator """rankbased""" +699 62 dataset """kinships""" +699 62 model """structuredembedding""" +699 62 loss """bceaftersigmoid""" +699 62 regularizer """no""" +699 62 optimizer """adam""" +699 62 training_loop """lcwa""" +699 62 evaluator """rankbased""" +699 63 dataset """kinships""" +699 63 model """structuredembedding""" +699 63 loss """bceaftersigmoid""" +699 63 regularizer """no""" +699 63 optimizer """adam""" +699 63 training_loop """lcwa""" +699 63 evaluator """rankbased""" +699 64 dataset """kinships""" +699 64 model """structuredembedding""" +699 64 loss """bceaftersigmoid""" +699 64 regularizer """no""" +699 64 optimizer """adam""" +699 64 training_loop """lcwa""" +699 64 evaluator """rankbased""" +699 65 dataset """kinships""" +699 65 model """structuredembedding""" +699 65 loss """bceaftersigmoid""" +699 65 regularizer """no""" +699 65 optimizer """adam""" +699 65 training_loop """lcwa""" +699 65 evaluator """rankbased""" +699 66 dataset """kinships""" +699 66 model """structuredembedding""" +699 66 loss """bceaftersigmoid""" +699 66 regularizer """no""" +699 66 optimizer """adam""" +699 66 training_loop """lcwa""" +699 66 evaluator """rankbased""" +699 67 dataset """kinships""" +699 67 model """structuredembedding""" +699 67 loss """bceaftersigmoid""" +699 67 regularizer """no""" +699 67 optimizer """adam""" +699 67 training_loop """lcwa""" +699 67 evaluator """rankbased""" +699 68 dataset """kinships""" +699 68 model """structuredembedding""" +699 68 loss """bceaftersigmoid""" +699 68 regularizer """no""" +699 68 optimizer """adam""" +699 68 training_loop """lcwa""" +699 68 evaluator """rankbased""" +699 69 dataset """kinships""" +699 69 model """structuredembedding""" +699 69 loss """bceaftersigmoid""" +699 69 regularizer """no""" +699 69 optimizer """adam""" +699 69 training_loop """lcwa""" +699 69 evaluator """rankbased""" +699 70 dataset """kinships""" +699 70 model """structuredembedding""" +699 70 loss """bceaftersigmoid""" +699 70 regularizer """no""" +699 70 optimizer """adam""" +699 70 training_loop """lcwa""" +699 70 evaluator """rankbased""" +699 71 dataset """kinships""" +699 71 model """structuredembedding""" +699 71 loss """bceaftersigmoid""" +699 71 regularizer """no""" +699 71 optimizer """adam""" +699 71 training_loop """lcwa""" +699 71 evaluator """rankbased""" +699 72 dataset """kinships""" +699 72 model """structuredembedding""" +699 72 loss """bceaftersigmoid""" +699 72 regularizer """no""" +699 72 optimizer """adam""" +699 72 training_loop """lcwa""" +699 72 evaluator """rankbased""" +699 73 dataset """kinships""" +699 73 model """structuredembedding""" +699 73 loss """bceaftersigmoid""" +699 73 regularizer """no""" +699 73 optimizer """adam""" +699 73 training_loop """lcwa""" +699 73 evaluator """rankbased""" +699 74 dataset """kinships""" +699 74 model """structuredembedding""" +699 74 loss """bceaftersigmoid""" +699 74 regularizer """no""" +699 74 optimizer """adam""" +699 74 training_loop """lcwa""" +699 74 evaluator """rankbased""" +699 75 dataset """kinships""" +699 75 model """structuredembedding""" +699 75 loss """bceaftersigmoid""" +699 75 regularizer """no""" +699 75 optimizer """adam""" +699 75 training_loop """lcwa""" +699 75 evaluator """rankbased""" +699 76 dataset """kinships""" +699 76 model """structuredembedding""" +699 76 loss """bceaftersigmoid""" +699 76 regularizer """no""" +699 76 optimizer """adam""" +699 76 training_loop """lcwa""" +699 76 evaluator """rankbased""" +699 77 dataset """kinships""" +699 77 model """structuredembedding""" +699 77 loss """bceaftersigmoid""" +699 77 regularizer """no""" +699 77 optimizer """adam""" +699 77 training_loop """lcwa""" +699 77 evaluator """rankbased""" +699 78 dataset """kinships""" +699 78 model """structuredembedding""" +699 78 loss """bceaftersigmoid""" +699 78 regularizer """no""" +699 78 optimizer """adam""" +699 78 training_loop """lcwa""" +699 78 evaluator """rankbased""" +699 79 dataset """kinships""" +699 79 model """structuredembedding""" +699 79 loss """bceaftersigmoid""" +699 79 regularizer """no""" +699 79 optimizer """adam""" +699 79 training_loop """lcwa""" +699 79 evaluator """rankbased""" +699 80 dataset """kinships""" +699 80 model """structuredembedding""" +699 80 loss """bceaftersigmoid""" +699 80 regularizer """no""" +699 80 optimizer """adam""" +699 80 training_loop """lcwa""" +699 80 evaluator """rankbased""" +699 81 dataset """kinships""" +699 81 model """structuredembedding""" +699 81 loss """bceaftersigmoid""" +699 81 regularizer """no""" +699 81 optimizer """adam""" +699 81 training_loop """lcwa""" +699 81 evaluator """rankbased""" +699 82 dataset """kinships""" +699 82 model """structuredembedding""" +699 82 loss """bceaftersigmoid""" +699 82 regularizer """no""" +699 82 optimizer """adam""" +699 82 training_loop """lcwa""" +699 82 evaluator """rankbased""" +699 83 dataset """kinships""" +699 83 model """structuredembedding""" +699 83 loss """bceaftersigmoid""" +699 83 regularizer """no""" +699 83 optimizer """adam""" +699 83 training_loop """lcwa""" +699 83 evaluator """rankbased""" +699 84 dataset """kinships""" +699 84 model """structuredembedding""" +699 84 loss """bceaftersigmoid""" +699 84 regularizer """no""" +699 84 optimizer """adam""" +699 84 training_loop """lcwa""" +699 84 evaluator """rankbased""" +699 85 dataset """kinships""" +699 85 model """structuredembedding""" +699 85 loss """bceaftersigmoid""" +699 85 regularizer """no""" +699 85 optimizer """adam""" +699 85 training_loop """lcwa""" +699 85 evaluator """rankbased""" +699 86 dataset """kinships""" +699 86 model """structuredembedding""" +699 86 loss """bceaftersigmoid""" +699 86 regularizer """no""" +699 86 optimizer """adam""" +699 86 training_loop """lcwa""" +699 86 evaluator """rankbased""" +699 87 dataset """kinships""" +699 87 model """structuredembedding""" +699 87 loss """bceaftersigmoid""" +699 87 regularizer """no""" +699 87 optimizer """adam""" +699 87 training_loop """lcwa""" +699 87 evaluator """rankbased""" +699 88 dataset """kinships""" +699 88 model """structuredembedding""" +699 88 loss """bceaftersigmoid""" +699 88 regularizer """no""" +699 88 optimizer """adam""" +699 88 training_loop """lcwa""" +699 88 evaluator """rankbased""" +699 89 dataset """kinships""" +699 89 model """structuredembedding""" +699 89 loss """bceaftersigmoid""" +699 89 regularizer """no""" +699 89 optimizer """adam""" +699 89 training_loop """lcwa""" +699 89 evaluator """rankbased""" +699 90 dataset """kinships""" +699 90 model """structuredembedding""" +699 90 loss """bceaftersigmoid""" +699 90 regularizer """no""" +699 90 optimizer """adam""" +699 90 training_loop """lcwa""" +699 90 evaluator """rankbased""" +699 91 dataset """kinships""" +699 91 model """structuredembedding""" +699 91 loss """bceaftersigmoid""" +699 91 regularizer """no""" +699 91 optimizer """adam""" +699 91 training_loop """lcwa""" +699 91 evaluator """rankbased""" +699 92 dataset """kinships""" +699 92 model """structuredembedding""" +699 92 loss """bceaftersigmoid""" +699 92 regularizer """no""" +699 92 optimizer """adam""" +699 92 training_loop """lcwa""" +699 92 evaluator """rankbased""" +699 93 dataset """kinships""" +699 93 model """structuredembedding""" +699 93 loss """bceaftersigmoid""" +699 93 regularizer """no""" +699 93 optimizer """adam""" +699 93 training_loop """lcwa""" +699 93 evaluator """rankbased""" +699 94 dataset """kinships""" +699 94 model """structuredembedding""" +699 94 loss """bceaftersigmoid""" +699 94 regularizer """no""" +699 94 optimizer """adam""" +699 94 training_loop """lcwa""" +699 94 evaluator """rankbased""" +699 95 dataset """kinships""" +699 95 model """structuredembedding""" +699 95 loss """bceaftersigmoid""" +699 95 regularizer """no""" +699 95 optimizer """adam""" +699 95 training_loop """lcwa""" +699 95 evaluator """rankbased""" +699 96 dataset """kinships""" +699 96 model """structuredembedding""" +699 96 loss """bceaftersigmoid""" +699 96 regularizer """no""" +699 96 optimizer """adam""" +699 96 training_loop """lcwa""" +699 96 evaluator """rankbased""" +699 97 dataset """kinships""" +699 97 model """structuredembedding""" +699 97 loss """bceaftersigmoid""" +699 97 regularizer """no""" +699 97 optimizer """adam""" +699 97 training_loop """lcwa""" +699 97 evaluator """rankbased""" +699 98 dataset """kinships""" +699 98 model """structuredembedding""" +699 98 loss """bceaftersigmoid""" +699 98 regularizer """no""" +699 98 optimizer """adam""" +699 98 training_loop """lcwa""" +699 98 evaluator """rankbased""" +699 99 dataset """kinships""" +699 99 model """structuredembedding""" +699 99 loss """bceaftersigmoid""" +699 99 regularizer """no""" +699 99 optimizer """adam""" +699 99 training_loop """lcwa""" +699 99 evaluator """rankbased""" +699 100 dataset """kinships""" +699 100 model """structuredembedding""" +699 100 loss """bceaftersigmoid""" +699 100 regularizer """no""" +699 100 optimizer """adam""" +699 100 training_loop """lcwa""" +699 100 evaluator """rankbased""" +700 1 model.embedding_dim 1.0 +700 1 model.scoring_fct_norm 2.0 +700 1 optimizer.lr 0.0872113823949561 +700 1 training.batch_size 0.0 +700 1 training.label_smoothing 0.2718826138508245 +700 2 model.embedding_dim 2.0 +700 2 model.scoring_fct_norm 1.0 +700 2 optimizer.lr 0.003891640771834603 +700 2 training.batch_size 0.0 +700 2 training.label_smoothing 0.30878102486850967 +700 3 model.embedding_dim 1.0 +700 3 model.scoring_fct_norm 1.0 +700 3 optimizer.lr 0.0015623613322804102 +700 3 training.batch_size 2.0 +700 3 training.label_smoothing 0.03774794576315184 +700 4 model.embedding_dim 2.0 +700 4 model.scoring_fct_norm 1.0 +700 4 optimizer.lr 0.0056819056188731586 +700 4 training.batch_size 0.0 +700 4 training.label_smoothing 0.0077596190989111654 +700 5 model.embedding_dim 0.0 +700 5 model.scoring_fct_norm 1.0 +700 5 optimizer.lr 0.012152297436244242 +700 5 training.batch_size 1.0 +700 5 training.label_smoothing 0.002885929808367911 +700 6 model.embedding_dim 1.0 +700 6 model.scoring_fct_norm 2.0 +700 6 optimizer.lr 0.0022714173252676713 +700 6 training.batch_size 2.0 +700 6 training.label_smoothing 0.0028474871858318903 +700 7 model.embedding_dim 2.0 +700 7 model.scoring_fct_norm 1.0 +700 7 optimizer.lr 0.0035024579352945853 +700 7 training.batch_size 2.0 +700 7 training.label_smoothing 0.005714865500441789 +700 8 model.embedding_dim 1.0 +700 8 model.scoring_fct_norm 1.0 +700 8 optimizer.lr 0.004201392990925675 +700 8 training.batch_size 2.0 +700 8 training.label_smoothing 0.377438791025719 +700 9 model.embedding_dim 1.0 +700 9 model.scoring_fct_norm 1.0 +700 9 optimizer.lr 0.0052986074276511076 +700 9 training.batch_size 0.0 +700 9 training.label_smoothing 0.16230078399319645 +700 10 model.embedding_dim 1.0 +700 10 model.scoring_fct_norm 2.0 +700 10 optimizer.lr 0.056927961168579407 +700 10 training.batch_size 1.0 +700 10 training.label_smoothing 0.12704457023760235 +700 11 model.embedding_dim 1.0 +700 11 model.scoring_fct_norm 2.0 +700 11 optimizer.lr 0.08769142450646294 +700 11 training.batch_size 0.0 +700 11 training.label_smoothing 0.03464654843826861 +700 12 model.embedding_dim 1.0 +700 12 model.scoring_fct_norm 1.0 +700 12 optimizer.lr 0.09612357064089862 +700 12 training.batch_size 2.0 +700 12 training.label_smoothing 0.19759323012128474 +700 13 model.embedding_dim 0.0 +700 13 model.scoring_fct_norm 2.0 +700 13 optimizer.lr 0.005574653460322259 +700 13 training.batch_size 0.0 +700 13 training.label_smoothing 0.004103184598798643 +700 14 model.embedding_dim 2.0 +700 14 model.scoring_fct_norm 1.0 +700 14 optimizer.lr 0.0023265038648309653 +700 14 training.batch_size 0.0 +700 14 training.label_smoothing 0.1677260575513881 +700 15 model.embedding_dim 2.0 +700 15 model.scoring_fct_norm 2.0 +700 15 optimizer.lr 0.0032996296658548566 +700 15 training.batch_size 2.0 +700 15 training.label_smoothing 0.0013022637807815118 +700 16 model.embedding_dim 2.0 +700 16 model.scoring_fct_norm 2.0 +700 16 optimizer.lr 0.0020869871696475653 +700 16 training.batch_size 2.0 +700 16 training.label_smoothing 0.6793685305537372 +700 17 model.embedding_dim 0.0 +700 17 model.scoring_fct_norm 1.0 +700 17 optimizer.lr 0.03517105331317932 +700 17 training.batch_size 1.0 +700 17 training.label_smoothing 0.018183082810820414 +700 18 model.embedding_dim 1.0 +700 18 model.scoring_fct_norm 2.0 +700 18 optimizer.lr 0.010229750845064037 +700 18 training.batch_size 1.0 +700 18 training.label_smoothing 0.018565750497764218 +700 19 model.embedding_dim 2.0 +700 19 model.scoring_fct_norm 1.0 +700 19 optimizer.lr 0.014645572965932033 +700 19 training.batch_size 2.0 +700 19 training.label_smoothing 0.6659504106024534 +700 20 model.embedding_dim 1.0 +700 20 model.scoring_fct_norm 2.0 +700 20 optimizer.lr 0.020405877443708403 +700 20 training.batch_size 1.0 +700 20 training.label_smoothing 0.028349958615063276 +700 21 model.embedding_dim 0.0 +700 21 model.scoring_fct_norm 1.0 +700 21 optimizer.lr 0.002375691872247507 +700 21 training.batch_size 0.0 +700 21 training.label_smoothing 0.07919629866207542 +700 22 model.embedding_dim 1.0 +700 22 model.scoring_fct_norm 2.0 +700 22 optimizer.lr 0.004122709619860187 +700 22 training.batch_size 1.0 +700 22 training.label_smoothing 0.004684667630368188 +700 23 model.embedding_dim 2.0 +700 23 model.scoring_fct_norm 1.0 +700 23 optimizer.lr 0.030901978552844503 +700 23 training.batch_size 0.0 +700 23 training.label_smoothing 0.016302211672285297 +700 24 model.embedding_dim 0.0 +700 24 model.scoring_fct_norm 1.0 +700 24 optimizer.lr 0.0011885763174163447 +700 24 training.batch_size 1.0 +700 24 training.label_smoothing 0.07058619114110039 +700 25 model.embedding_dim 2.0 +700 25 model.scoring_fct_norm 2.0 +700 25 optimizer.lr 0.0012727135860141647 +700 25 training.batch_size 2.0 +700 25 training.label_smoothing 0.00425480623027264 +700 26 model.embedding_dim 0.0 +700 26 model.scoring_fct_norm 1.0 +700 26 optimizer.lr 0.09343947304201415 +700 26 training.batch_size 2.0 +700 26 training.label_smoothing 0.00829027802668805 +700 27 model.embedding_dim 1.0 +700 27 model.scoring_fct_norm 2.0 +700 27 optimizer.lr 0.0838153906796127 +700 27 training.batch_size 2.0 +700 27 training.label_smoothing 0.9758227921274307 +700 28 model.embedding_dim 0.0 +700 28 model.scoring_fct_norm 1.0 +700 28 optimizer.lr 0.05731403564542473 +700 28 training.batch_size 2.0 +700 28 training.label_smoothing 0.0011847540812603714 +700 29 model.embedding_dim 1.0 +700 29 model.scoring_fct_norm 1.0 +700 29 optimizer.lr 0.0017785247960302995 +700 29 training.batch_size 2.0 +700 29 training.label_smoothing 0.002031410082116988 +700 30 model.embedding_dim 2.0 +700 30 model.scoring_fct_norm 1.0 +700 30 optimizer.lr 0.030095341635067174 +700 30 training.batch_size 0.0 +700 30 training.label_smoothing 0.017056047997012755 +700 31 model.embedding_dim 1.0 +700 31 model.scoring_fct_norm 1.0 +700 31 optimizer.lr 0.008822638187642465 +700 31 training.batch_size 2.0 +700 31 training.label_smoothing 0.10867755433539551 +700 32 model.embedding_dim 2.0 +700 32 model.scoring_fct_norm 2.0 +700 32 optimizer.lr 0.015501482392241112 +700 32 training.batch_size 1.0 +700 32 training.label_smoothing 0.004036511519707872 +700 33 model.embedding_dim 1.0 +700 33 model.scoring_fct_norm 2.0 +700 33 optimizer.lr 0.05412547762500439 +700 33 training.batch_size 0.0 +700 33 training.label_smoothing 0.004422689096914849 +700 34 model.embedding_dim 1.0 +700 34 model.scoring_fct_norm 1.0 +700 34 optimizer.lr 0.05627507969106608 +700 34 training.batch_size 0.0 +700 34 training.label_smoothing 0.004788318117006858 +700 35 model.embedding_dim 2.0 +700 35 model.scoring_fct_norm 2.0 +700 35 optimizer.lr 0.0011302933277622376 +700 35 training.batch_size 0.0 +700 35 training.label_smoothing 0.08079052938454219 +700 36 model.embedding_dim 1.0 +700 36 model.scoring_fct_norm 1.0 +700 36 optimizer.lr 0.00200244566922717 +700 36 training.batch_size 0.0 +700 36 training.label_smoothing 0.0038463931247494946 +700 37 model.embedding_dim 2.0 +700 37 model.scoring_fct_norm 2.0 +700 37 optimizer.lr 0.001569836803057468 +700 37 training.batch_size 2.0 +700 37 training.label_smoothing 0.518099189300742 +700 38 model.embedding_dim 0.0 +700 38 model.scoring_fct_norm 1.0 +700 38 optimizer.lr 0.002798880113978632 +700 38 training.batch_size 1.0 +700 38 training.label_smoothing 0.0015385041111423035 +700 39 model.embedding_dim 1.0 +700 39 model.scoring_fct_norm 1.0 +700 39 optimizer.lr 0.0029948372782210068 +700 39 training.batch_size 0.0 +700 39 training.label_smoothing 0.003959074008903283 +700 40 model.embedding_dim 2.0 +700 40 model.scoring_fct_norm 2.0 +700 40 optimizer.lr 0.015960776382065316 +700 40 training.batch_size 0.0 +700 40 training.label_smoothing 0.12394652253114669 +700 41 model.embedding_dim 0.0 +700 41 model.scoring_fct_norm 2.0 +700 41 optimizer.lr 0.02471882695331625 +700 41 training.batch_size 1.0 +700 41 training.label_smoothing 0.004195139745001208 +700 42 model.embedding_dim 1.0 +700 42 model.scoring_fct_norm 2.0 +700 42 optimizer.lr 0.04380426201740298 +700 42 training.batch_size 0.0 +700 42 training.label_smoothing 0.2796799201854637 +700 43 model.embedding_dim 1.0 +700 43 model.scoring_fct_norm 1.0 +700 43 optimizer.lr 0.007426172787687606 +700 43 training.batch_size 1.0 +700 43 training.label_smoothing 0.1978208706542741 +700 44 model.embedding_dim 1.0 +700 44 model.scoring_fct_norm 1.0 +700 44 optimizer.lr 0.017271517522465387 +700 44 training.batch_size 2.0 +700 44 training.label_smoothing 0.03575287170054489 +700 45 model.embedding_dim 0.0 +700 45 model.scoring_fct_norm 2.0 +700 45 optimizer.lr 0.018860299360816894 +700 45 training.batch_size 2.0 +700 45 training.label_smoothing 0.3207492266120819 +700 46 model.embedding_dim 2.0 +700 46 model.scoring_fct_norm 1.0 +700 46 optimizer.lr 0.06784226948632133 +700 46 training.batch_size 1.0 +700 46 training.label_smoothing 0.015483184134180912 +700 47 model.embedding_dim 1.0 +700 47 model.scoring_fct_norm 1.0 +700 47 optimizer.lr 0.014588021146242977 +700 47 training.batch_size 0.0 +700 47 training.label_smoothing 0.0016926926899294892 +700 48 model.embedding_dim 1.0 +700 48 model.scoring_fct_norm 2.0 +700 48 optimizer.lr 0.001041627331569872 +700 48 training.batch_size 2.0 +700 48 training.label_smoothing 0.004596039717766475 +700 49 model.embedding_dim 0.0 +700 49 model.scoring_fct_norm 2.0 +700 49 optimizer.lr 0.05834294911078864 +700 49 training.batch_size 0.0 +700 49 training.label_smoothing 0.08604378541107856 +700 50 model.embedding_dim 0.0 +700 50 model.scoring_fct_norm 1.0 +700 50 optimizer.lr 0.006958721688878052 +700 50 training.batch_size 0.0 +700 50 training.label_smoothing 0.002550379463962695 +700 51 model.embedding_dim 2.0 +700 51 model.scoring_fct_norm 2.0 +700 51 optimizer.lr 0.021626053643234857 +700 51 training.batch_size 1.0 +700 51 training.label_smoothing 0.01231227470352253 +700 52 model.embedding_dim 2.0 +700 52 model.scoring_fct_norm 2.0 +700 52 optimizer.lr 0.002094922521898601 +700 52 training.batch_size 2.0 +700 52 training.label_smoothing 0.043858139357175925 +700 53 model.embedding_dim 1.0 +700 53 model.scoring_fct_norm 1.0 +700 53 optimizer.lr 0.001709603427959069 +700 53 training.batch_size 2.0 +700 53 training.label_smoothing 0.007844440758139877 +700 54 model.embedding_dim 2.0 +700 54 model.scoring_fct_norm 2.0 +700 54 optimizer.lr 0.00171563186924669 +700 54 training.batch_size 0.0 +700 54 training.label_smoothing 0.0011584416562895473 +700 55 model.embedding_dim 1.0 +700 55 model.scoring_fct_norm 2.0 +700 55 optimizer.lr 0.019631427035318456 +700 55 training.batch_size 1.0 +700 55 training.label_smoothing 0.006923202689311695 +700 56 model.embedding_dim 0.0 +700 56 model.scoring_fct_norm 1.0 +700 56 optimizer.lr 0.006907085204336981 +700 56 training.batch_size 1.0 +700 56 training.label_smoothing 0.004454543033885847 +700 57 model.embedding_dim 0.0 +700 57 model.scoring_fct_norm 1.0 +700 57 optimizer.lr 0.02227136799622551 +700 57 training.batch_size 2.0 +700 57 training.label_smoothing 0.0031860906217134493 +700 58 model.embedding_dim 1.0 +700 58 model.scoring_fct_norm 2.0 +700 58 optimizer.lr 0.0030293844312706303 +700 58 training.batch_size 0.0 +700 58 training.label_smoothing 0.3595221643829327 +700 59 model.embedding_dim 1.0 +700 59 model.scoring_fct_norm 1.0 +700 59 optimizer.lr 0.0020411790236546743 +700 59 training.batch_size 2.0 +700 59 training.label_smoothing 0.8188299011422301 +700 60 model.embedding_dim 2.0 +700 60 model.scoring_fct_norm 1.0 +700 60 optimizer.lr 0.053748758436179024 +700 60 training.batch_size 2.0 +700 60 training.label_smoothing 0.31050165928891876 +700 61 model.embedding_dim 2.0 +700 61 model.scoring_fct_norm 1.0 +700 61 optimizer.lr 0.004085029729513178 +700 61 training.batch_size 1.0 +700 61 training.label_smoothing 0.01444219155718182 +700 62 model.embedding_dim 0.0 +700 62 model.scoring_fct_norm 1.0 +700 62 optimizer.lr 0.006560040836241122 +700 62 training.batch_size 2.0 +700 62 training.label_smoothing 0.0346728173391081 +700 63 model.embedding_dim 1.0 +700 63 model.scoring_fct_norm 2.0 +700 63 optimizer.lr 0.032524683945232474 +700 63 training.batch_size 0.0 +700 63 training.label_smoothing 0.48004291065793697 +700 64 model.embedding_dim 0.0 +700 64 model.scoring_fct_norm 2.0 +700 64 optimizer.lr 0.004744881054589265 +700 64 training.batch_size 0.0 +700 64 training.label_smoothing 0.6880698495632108 +700 65 model.embedding_dim 0.0 +700 65 model.scoring_fct_norm 1.0 +700 65 optimizer.lr 0.02127717216259546 +700 65 training.batch_size 1.0 +700 65 training.label_smoothing 0.23675257843278893 +700 66 model.embedding_dim 2.0 +700 66 model.scoring_fct_norm 1.0 +700 66 optimizer.lr 0.08374559768829916 +700 66 training.batch_size 1.0 +700 66 training.label_smoothing 0.009385462167790303 +700 67 model.embedding_dim 2.0 +700 67 model.scoring_fct_norm 1.0 +700 67 optimizer.lr 0.03258813271868475 +700 67 training.batch_size 0.0 +700 67 training.label_smoothing 0.005539826957052918 +700 68 model.embedding_dim 0.0 +700 68 model.scoring_fct_norm 1.0 +700 68 optimizer.lr 0.0016940560074522072 +700 68 training.batch_size 0.0 +700 68 training.label_smoothing 0.23995827070723688 +700 69 model.embedding_dim 1.0 +700 69 model.scoring_fct_norm 2.0 +700 69 optimizer.lr 0.0034666140491643213 +700 69 training.batch_size 2.0 +700 69 training.label_smoothing 0.27854442180806116 +700 70 model.embedding_dim 2.0 +700 70 model.scoring_fct_norm 2.0 +700 70 optimizer.lr 0.004971252133847501 +700 70 training.batch_size 2.0 +700 70 training.label_smoothing 0.1469118585080028 +700 71 model.embedding_dim 2.0 +700 71 model.scoring_fct_norm 1.0 +700 71 optimizer.lr 0.001783890446060712 +700 71 training.batch_size 1.0 +700 71 training.label_smoothing 0.34417843198290926 +700 72 model.embedding_dim 0.0 +700 72 model.scoring_fct_norm 2.0 +700 72 optimizer.lr 0.022136352782828675 +700 72 training.batch_size 2.0 +700 72 training.label_smoothing 0.270918027299876 +700 73 model.embedding_dim 1.0 +700 73 model.scoring_fct_norm 2.0 +700 73 optimizer.lr 0.0012975418947804344 +700 73 training.batch_size 2.0 +700 73 training.label_smoothing 0.0044880037946630775 +700 74 model.embedding_dim 1.0 +700 74 model.scoring_fct_norm 1.0 +700 74 optimizer.lr 0.002134369564099417 +700 74 training.batch_size 2.0 +700 74 training.label_smoothing 0.07277534890661895 +700 75 model.embedding_dim 2.0 +700 75 model.scoring_fct_norm 1.0 +700 75 optimizer.lr 0.002006243233307645 +700 75 training.batch_size 2.0 +700 75 training.label_smoothing 0.04322856972390622 +700 76 model.embedding_dim 0.0 +700 76 model.scoring_fct_norm 1.0 +700 76 optimizer.lr 0.01766467477989823 +700 76 training.batch_size 0.0 +700 76 training.label_smoothing 0.18971879707278982 +700 77 model.embedding_dim 2.0 +700 77 model.scoring_fct_norm 2.0 +700 77 optimizer.lr 0.0018408790742201122 +700 77 training.batch_size 0.0 +700 77 training.label_smoothing 0.06356855054147834 +700 78 model.embedding_dim 2.0 +700 78 model.scoring_fct_norm 1.0 +700 78 optimizer.lr 0.012866566210072152 +700 78 training.batch_size 0.0 +700 78 training.label_smoothing 0.33472574147889106 +700 79 model.embedding_dim 1.0 +700 79 model.scoring_fct_norm 1.0 +700 79 optimizer.lr 0.004628979427384168 +700 79 training.batch_size 2.0 +700 79 training.label_smoothing 0.4847766097925684 +700 80 model.embedding_dim 1.0 +700 80 model.scoring_fct_norm 1.0 +700 80 optimizer.lr 0.0025849710177372285 +700 80 training.batch_size 0.0 +700 80 training.label_smoothing 0.02285918550625258 +700 81 model.embedding_dim 0.0 +700 81 model.scoring_fct_norm 1.0 +700 81 optimizer.lr 0.007853841004247002 +700 81 training.batch_size 0.0 +700 81 training.label_smoothing 0.0027118482500452542 +700 82 model.embedding_dim 0.0 +700 82 model.scoring_fct_norm 1.0 +700 82 optimizer.lr 0.003247412410730071 +700 82 training.batch_size 1.0 +700 82 training.label_smoothing 0.002086548885859638 +700 83 model.embedding_dim 0.0 +700 83 model.scoring_fct_norm 2.0 +700 83 optimizer.lr 0.01726609279100836 +700 83 training.batch_size 1.0 +700 83 training.label_smoothing 0.0012647332189432482 +700 84 model.embedding_dim 1.0 +700 84 model.scoring_fct_norm 2.0 +700 84 optimizer.lr 0.004995633707803045 +700 84 training.batch_size 0.0 +700 84 training.label_smoothing 0.09917677598718215 +700 85 model.embedding_dim 2.0 +700 85 model.scoring_fct_norm 2.0 +700 85 optimizer.lr 0.0031522405467628283 +700 85 training.batch_size 1.0 +700 85 training.label_smoothing 0.3431401692724592 +700 86 model.embedding_dim 0.0 +700 86 model.scoring_fct_norm 1.0 +700 86 optimizer.lr 0.001344041047655322 +700 86 training.batch_size 1.0 +700 86 training.label_smoothing 0.007685202591091617 +700 87 model.embedding_dim 0.0 +700 87 model.scoring_fct_norm 2.0 +700 87 optimizer.lr 0.002346800732475491 +700 87 training.batch_size 1.0 +700 87 training.label_smoothing 0.19258161690087283 +700 88 model.embedding_dim 2.0 +700 88 model.scoring_fct_norm 2.0 +700 88 optimizer.lr 0.07736291107108277 +700 88 training.batch_size 2.0 +700 88 training.label_smoothing 0.03714674399768079 +700 89 model.embedding_dim 0.0 +700 89 model.scoring_fct_norm 2.0 +700 89 optimizer.lr 0.017473141570669908 +700 89 training.batch_size 0.0 +700 89 training.label_smoothing 0.01455888598667594 +700 90 model.embedding_dim 2.0 +700 90 model.scoring_fct_norm 1.0 +700 90 optimizer.lr 0.005842655058187443 +700 90 training.batch_size 2.0 +700 90 training.label_smoothing 0.2640203719858564 +700 91 model.embedding_dim 1.0 +700 91 model.scoring_fct_norm 2.0 +700 91 optimizer.lr 0.0033642707721540954 +700 91 training.batch_size 0.0 +700 91 training.label_smoothing 0.007140922080271608 +700 92 model.embedding_dim 1.0 +700 92 model.scoring_fct_norm 2.0 +700 92 optimizer.lr 0.053324831497312895 +700 92 training.batch_size 1.0 +700 92 training.label_smoothing 0.912424421316992 +700 93 model.embedding_dim 0.0 +700 93 model.scoring_fct_norm 2.0 +700 93 optimizer.lr 0.07865248751218408 +700 93 training.batch_size 2.0 +700 93 training.label_smoothing 0.2946774339784688 +700 94 model.embedding_dim 0.0 +700 94 model.scoring_fct_norm 1.0 +700 94 optimizer.lr 0.0045854447011011885 +700 94 training.batch_size 2.0 +700 94 training.label_smoothing 0.008849009763107674 +700 95 model.embedding_dim 1.0 +700 95 model.scoring_fct_norm 2.0 +700 95 optimizer.lr 0.0459766638362605 +700 95 training.batch_size 0.0 +700 95 training.label_smoothing 0.0699099529909363 +700 96 model.embedding_dim 2.0 +700 96 model.scoring_fct_norm 2.0 +700 96 optimizer.lr 0.02875368346599975 +700 96 training.batch_size 0.0 +700 96 training.label_smoothing 0.06751628173294219 +700 97 model.embedding_dim 1.0 +700 97 model.scoring_fct_norm 1.0 +700 97 optimizer.lr 0.00560264002470732 +700 97 training.batch_size 1.0 +700 97 training.label_smoothing 0.006802833597991388 +700 98 model.embedding_dim 1.0 +700 98 model.scoring_fct_norm 1.0 +700 98 optimizer.lr 0.0030502265424145096 +700 98 training.batch_size 0.0 +700 98 training.label_smoothing 0.007019128256026654 +700 99 model.embedding_dim 0.0 +700 99 model.scoring_fct_norm 2.0 +700 99 optimizer.lr 0.0021851068790813425 +700 99 training.batch_size 0.0 +700 99 training.label_smoothing 0.7488227325772602 +700 100 model.embedding_dim 1.0 +700 100 model.scoring_fct_norm 2.0 +700 100 optimizer.lr 0.05386931331773036 +700 100 training.batch_size 0.0 +700 100 training.label_smoothing 0.08188447882529079 +700 1 dataset """kinships""" +700 1 model """structuredembedding""" +700 1 loss """softplus""" +700 1 regularizer """no""" +700 1 optimizer """adam""" +700 1 training_loop """lcwa""" +700 1 evaluator """rankbased""" +700 2 dataset """kinships""" +700 2 model """structuredembedding""" +700 2 loss """softplus""" +700 2 regularizer """no""" +700 2 optimizer """adam""" +700 2 training_loop """lcwa""" +700 2 evaluator """rankbased""" +700 3 dataset """kinships""" +700 3 model """structuredembedding""" +700 3 loss """softplus""" +700 3 regularizer """no""" +700 3 optimizer """adam""" +700 3 training_loop """lcwa""" +700 3 evaluator """rankbased""" +700 4 dataset """kinships""" +700 4 model """structuredembedding""" +700 4 loss """softplus""" +700 4 regularizer """no""" +700 4 optimizer """adam""" +700 4 training_loop """lcwa""" +700 4 evaluator """rankbased""" +700 5 dataset """kinships""" +700 5 model """structuredembedding""" +700 5 loss """softplus""" +700 5 regularizer """no""" +700 5 optimizer """adam""" +700 5 training_loop """lcwa""" +700 5 evaluator """rankbased""" +700 6 dataset """kinships""" +700 6 model """structuredembedding""" +700 6 loss """softplus""" +700 6 regularizer """no""" +700 6 optimizer """adam""" +700 6 training_loop """lcwa""" +700 6 evaluator """rankbased""" +700 7 dataset """kinships""" +700 7 model """structuredembedding""" +700 7 loss """softplus""" +700 7 regularizer """no""" +700 7 optimizer """adam""" +700 7 training_loop """lcwa""" +700 7 evaluator """rankbased""" +700 8 dataset """kinships""" +700 8 model """structuredembedding""" +700 8 loss """softplus""" +700 8 regularizer """no""" +700 8 optimizer """adam""" +700 8 training_loop """lcwa""" +700 8 evaluator """rankbased""" +700 9 dataset """kinships""" +700 9 model """structuredembedding""" +700 9 loss """softplus""" +700 9 regularizer """no""" +700 9 optimizer """adam""" +700 9 training_loop """lcwa""" +700 9 evaluator """rankbased""" +700 10 dataset """kinships""" +700 10 model """structuredembedding""" +700 10 loss """softplus""" +700 10 regularizer """no""" +700 10 optimizer """adam""" +700 10 training_loop """lcwa""" +700 10 evaluator """rankbased""" +700 11 dataset """kinships""" +700 11 model """structuredembedding""" +700 11 loss """softplus""" +700 11 regularizer """no""" +700 11 optimizer """adam""" +700 11 training_loop """lcwa""" +700 11 evaluator """rankbased""" +700 12 dataset """kinships""" +700 12 model """structuredembedding""" +700 12 loss """softplus""" +700 12 regularizer """no""" +700 12 optimizer """adam""" +700 12 training_loop """lcwa""" +700 12 evaluator """rankbased""" +700 13 dataset """kinships""" +700 13 model """structuredembedding""" +700 13 loss """softplus""" +700 13 regularizer """no""" +700 13 optimizer """adam""" +700 13 training_loop """lcwa""" +700 13 evaluator """rankbased""" +700 14 dataset """kinships""" +700 14 model """structuredembedding""" +700 14 loss """softplus""" +700 14 regularizer """no""" +700 14 optimizer """adam""" +700 14 training_loop """lcwa""" +700 14 evaluator """rankbased""" +700 15 dataset """kinships""" +700 15 model """structuredembedding""" +700 15 loss """softplus""" +700 15 regularizer """no""" +700 15 optimizer """adam""" +700 15 training_loop """lcwa""" +700 15 evaluator """rankbased""" +700 16 dataset """kinships""" +700 16 model """structuredembedding""" +700 16 loss """softplus""" +700 16 regularizer """no""" +700 16 optimizer """adam""" +700 16 training_loop """lcwa""" +700 16 evaluator """rankbased""" +700 17 dataset """kinships""" +700 17 model """structuredembedding""" +700 17 loss """softplus""" +700 17 regularizer """no""" +700 17 optimizer """adam""" +700 17 training_loop """lcwa""" +700 17 evaluator """rankbased""" +700 18 dataset """kinships""" +700 18 model """structuredembedding""" +700 18 loss """softplus""" +700 18 regularizer """no""" +700 18 optimizer """adam""" +700 18 training_loop """lcwa""" +700 18 evaluator """rankbased""" +700 19 dataset """kinships""" +700 19 model """structuredembedding""" +700 19 loss """softplus""" +700 19 regularizer """no""" +700 19 optimizer """adam""" +700 19 training_loop """lcwa""" +700 19 evaluator """rankbased""" +700 20 dataset """kinships""" +700 20 model """structuredembedding""" +700 20 loss """softplus""" +700 20 regularizer """no""" +700 20 optimizer """adam""" +700 20 training_loop """lcwa""" +700 20 evaluator """rankbased""" +700 21 dataset """kinships""" +700 21 model """structuredembedding""" +700 21 loss """softplus""" +700 21 regularizer """no""" +700 21 optimizer """adam""" +700 21 training_loop """lcwa""" +700 21 evaluator """rankbased""" +700 22 dataset """kinships""" +700 22 model """structuredembedding""" +700 22 loss """softplus""" +700 22 regularizer """no""" +700 22 optimizer """adam""" +700 22 training_loop """lcwa""" +700 22 evaluator """rankbased""" +700 23 dataset """kinships""" +700 23 model """structuredembedding""" +700 23 loss """softplus""" +700 23 regularizer """no""" +700 23 optimizer """adam""" +700 23 training_loop """lcwa""" +700 23 evaluator """rankbased""" +700 24 dataset """kinships""" +700 24 model """structuredembedding""" +700 24 loss """softplus""" +700 24 regularizer """no""" +700 24 optimizer """adam""" +700 24 training_loop """lcwa""" +700 24 evaluator """rankbased""" +700 25 dataset """kinships""" +700 25 model """structuredembedding""" +700 25 loss """softplus""" +700 25 regularizer """no""" +700 25 optimizer """adam""" +700 25 training_loop """lcwa""" +700 25 evaluator """rankbased""" +700 26 dataset """kinships""" +700 26 model """structuredembedding""" +700 26 loss """softplus""" +700 26 regularizer """no""" +700 26 optimizer """adam""" +700 26 training_loop """lcwa""" +700 26 evaluator """rankbased""" +700 27 dataset """kinships""" +700 27 model """structuredembedding""" +700 27 loss """softplus""" +700 27 regularizer """no""" +700 27 optimizer """adam""" +700 27 training_loop """lcwa""" +700 27 evaluator """rankbased""" +700 28 dataset """kinships""" +700 28 model """structuredembedding""" +700 28 loss """softplus""" +700 28 regularizer """no""" +700 28 optimizer """adam""" +700 28 training_loop """lcwa""" +700 28 evaluator """rankbased""" +700 29 dataset """kinships""" +700 29 model """structuredembedding""" +700 29 loss """softplus""" +700 29 regularizer """no""" +700 29 optimizer """adam""" +700 29 training_loop """lcwa""" +700 29 evaluator """rankbased""" +700 30 dataset """kinships""" +700 30 model """structuredembedding""" +700 30 loss """softplus""" +700 30 regularizer """no""" +700 30 optimizer """adam""" +700 30 training_loop """lcwa""" +700 30 evaluator """rankbased""" +700 31 dataset """kinships""" +700 31 model """structuredembedding""" +700 31 loss """softplus""" +700 31 regularizer """no""" +700 31 optimizer """adam""" +700 31 training_loop """lcwa""" +700 31 evaluator """rankbased""" +700 32 dataset """kinships""" +700 32 model """structuredembedding""" +700 32 loss """softplus""" +700 32 regularizer """no""" +700 32 optimizer """adam""" +700 32 training_loop """lcwa""" +700 32 evaluator """rankbased""" +700 33 dataset """kinships""" +700 33 model """structuredembedding""" +700 33 loss """softplus""" +700 33 regularizer """no""" +700 33 optimizer """adam""" +700 33 training_loop """lcwa""" +700 33 evaluator """rankbased""" +700 34 dataset """kinships""" +700 34 model """structuredembedding""" +700 34 loss """softplus""" +700 34 regularizer """no""" +700 34 optimizer """adam""" +700 34 training_loop """lcwa""" +700 34 evaluator """rankbased""" +700 35 dataset """kinships""" +700 35 model """structuredembedding""" +700 35 loss """softplus""" +700 35 regularizer """no""" +700 35 optimizer """adam""" +700 35 training_loop """lcwa""" +700 35 evaluator """rankbased""" +700 36 dataset """kinships""" +700 36 model """structuredembedding""" +700 36 loss """softplus""" +700 36 regularizer """no""" +700 36 optimizer """adam""" +700 36 training_loop """lcwa""" +700 36 evaluator """rankbased""" +700 37 dataset """kinships""" +700 37 model """structuredembedding""" +700 37 loss """softplus""" +700 37 regularizer """no""" +700 37 optimizer """adam""" +700 37 training_loop """lcwa""" +700 37 evaluator """rankbased""" +700 38 dataset """kinships""" +700 38 model """structuredembedding""" +700 38 loss """softplus""" +700 38 regularizer """no""" +700 38 optimizer """adam""" +700 38 training_loop """lcwa""" +700 38 evaluator """rankbased""" +700 39 dataset """kinships""" +700 39 model """structuredembedding""" +700 39 loss """softplus""" +700 39 regularizer """no""" +700 39 optimizer """adam""" +700 39 training_loop """lcwa""" +700 39 evaluator """rankbased""" +700 40 dataset """kinships""" +700 40 model """structuredembedding""" +700 40 loss """softplus""" +700 40 regularizer """no""" +700 40 optimizer """adam""" +700 40 training_loop """lcwa""" +700 40 evaluator """rankbased""" +700 41 dataset """kinships""" +700 41 model """structuredembedding""" +700 41 loss """softplus""" +700 41 regularizer """no""" +700 41 optimizer """adam""" +700 41 training_loop """lcwa""" +700 41 evaluator """rankbased""" +700 42 dataset """kinships""" +700 42 model """structuredembedding""" +700 42 loss """softplus""" +700 42 regularizer """no""" +700 42 optimizer """adam""" +700 42 training_loop """lcwa""" +700 42 evaluator """rankbased""" +700 43 dataset """kinships""" +700 43 model """structuredembedding""" +700 43 loss """softplus""" +700 43 regularizer """no""" +700 43 optimizer """adam""" +700 43 training_loop """lcwa""" +700 43 evaluator """rankbased""" +700 44 dataset """kinships""" +700 44 model """structuredembedding""" +700 44 loss """softplus""" +700 44 regularizer """no""" +700 44 optimizer """adam""" +700 44 training_loop """lcwa""" +700 44 evaluator """rankbased""" +700 45 dataset """kinships""" +700 45 model """structuredembedding""" +700 45 loss """softplus""" +700 45 regularizer """no""" +700 45 optimizer """adam""" +700 45 training_loop """lcwa""" +700 45 evaluator """rankbased""" +700 46 dataset """kinships""" +700 46 model """structuredembedding""" +700 46 loss """softplus""" +700 46 regularizer """no""" +700 46 optimizer """adam""" +700 46 training_loop """lcwa""" +700 46 evaluator """rankbased""" +700 47 dataset """kinships""" +700 47 model """structuredembedding""" +700 47 loss """softplus""" +700 47 regularizer """no""" +700 47 optimizer """adam""" +700 47 training_loop """lcwa""" +700 47 evaluator """rankbased""" +700 48 dataset """kinships""" +700 48 model """structuredembedding""" +700 48 loss """softplus""" +700 48 regularizer """no""" +700 48 optimizer """adam""" +700 48 training_loop """lcwa""" +700 48 evaluator """rankbased""" +700 49 dataset """kinships""" +700 49 model """structuredembedding""" +700 49 loss """softplus""" +700 49 regularizer """no""" +700 49 optimizer """adam""" +700 49 training_loop """lcwa""" +700 49 evaluator """rankbased""" +700 50 dataset """kinships""" +700 50 model """structuredembedding""" +700 50 loss """softplus""" +700 50 regularizer """no""" +700 50 optimizer """adam""" +700 50 training_loop """lcwa""" +700 50 evaluator """rankbased""" +700 51 dataset """kinships""" +700 51 model """structuredembedding""" +700 51 loss """softplus""" +700 51 regularizer """no""" +700 51 optimizer """adam""" +700 51 training_loop """lcwa""" +700 51 evaluator """rankbased""" +700 52 dataset """kinships""" +700 52 model """structuredembedding""" +700 52 loss """softplus""" +700 52 regularizer """no""" +700 52 optimizer """adam""" +700 52 training_loop """lcwa""" +700 52 evaluator """rankbased""" +700 53 dataset """kinships""" +700 53 model """structuredembedding""" +700 53 loss """softplus""" +700 53 regularizer """no""" +700 53 optimizer """adam""" +700 53 training_loop """lcwa""" +700 53 evaluator """rankbased""" +700 54 dataset """kinships""" +700 54 model """structuredembedding""" +700 54 loss """softplus""" +700 54 regularizer """no""" +700 54 optimizer """adam""" +700 54 training_loop """lcwa""" +700 54 evaluator """rankbased""" +700 55 dataset """kinships""" +700 55 model """structuredembedding""" +700 55 loss """softplus""" +700 55 regularizer """no""" +700 55 optimizer """adam""" +700 55 training_loop """lcwa""" +700 55 evaluator """rankbased""" +700 56 dataset """kinships""" +700 56 model """structuredembedding""" +700 56 loss """softplus""" +700 56 regularizer """no""" +700 56 optimizer """adam""" +700 56 training_loop """lcwa""" +700 56 evaluator """rankbased""" +700 57 dataset """kinships""" +700 57 model """structuredembedding""" +700 57 loss """softplus""" +700 57 regularizer """no""" +700 57 optimizer """adam""" +700 57 training_loop """lcwa""" +700 57 evaluator """rankbased""" +700 58 dataset """kinships""" +700 58 model """structuredembedding""" +700 58 loss """softplus""" +700 58 regularizer """no""" +700 58 optimizer """adam""" +700 58 training_loop """lcwa""" +700 58 evaluator """rankbased""" +700 59 dataset """kinships""" +700 59 model """structuredembedding""" +700 59 loss """softplus""" +700 59 regularizer """no""" +700 59 optimizer """adam""" +700 59 training_loop """lcwa""" +700 59 evaluator """rankbased""" +700 60 dataset """kinships""" +700 60 model """structuredembedding""" +700 60 loss """softplus""" +700 60 regularizer """no""" +700 60 optimizer """adam""" +700 60 training_loop """lcwa""" +700 60 evaluator """rankbased""" +700 61 dataset """kinships""" +700 61 model """structuredembedding""" +700 61 loss """softplus""" +700 61 regularizer """no""" +700 61 optimizer """adam""" +700 61 training_loop """lcwa""" +700 61 evaluator """rankbased""" +700 62 dataset """kinships""" +700 62 model """structuredembedding""" +700 62 loss """softplus""" +700 62 regularizer """no""" +700 62 optimizer """adam""" +700 62 training_loop """lcwa""" +700 62 evaluator """rankbased""" +700 63 dataset """kinships""" +700 63 model """structuredembedding""" +700 63 loss """softplus""" +700 63 regularizer """no""" +700 63 optimizer """adam""" +700 63 training_loop """lcwa""" +700 63 evaluator """rankbased""" +700 64 dataset """kinships""" +700 64 model """structuredembedding""" +700 64 loss """softplus""" +700 64 regularizer """no""" +700 64 optimizer """adam""" +700 64 training_loop """lcwa""" +700 64 evaluator """rankbased""" +700 65 dataset """kinships""" +700 65 model """structuredembedding""" +700 65 loss """softplus""" +700 65 regularizer """no""" +700 65 optimizer """adam""" +700 65 training_loop """lcwa""" +700 65 evaluator """rankbased""" +700 66 dataset """kinships""" +700 66 model """structuredembedding""" +700 66 loss """softplus""" +700 66 regularizer """no""" +700 66 optimizer """adam""" +700 66 training_loop """lcwa""" +700 66 evaluator """rankbased""" +700 67 dataset """kinships""" +700 67 model """structuredembedding""" +700 67 loss """softplus""" +700 67 regularizer """no""" +700 67 optimizer """adam""" +700 67 training_loop """lcwa""" +700 67 evaluator """rankbased""" +700 68 dataset """kinships""" +700 68 model """structuredembedding""" +700 68 loss """softplus""" +700 68 regularizer """no""" +700 68 optimizer """adam""" +700 68 training_loop """lcwa""" +700 68 evaluator """rankbased""" +700 69 dataset """kinships""" +700 69 model """structuredembedding""" +700 69 loss """softplus""" +700 69 regularizer """no""" +700 69 optimizer """adam""" +700 69 training_loop """lcwa""" +700 69 evaluator """rankbased""" +700 70 dataset """kinships""" +700 70 model """structuredembedding""" +700 70 loss """softplus""" +700 70 regularizer """no""" +700 70 optimizer """adam""" +700 70 training_loop """lcwa""" +700 70 evaluator """rankbased""" +700 71 dataset """kinships""" +700 71 model """structuredembedding""" +700 71 loss """softplus""" +700 71 regularizer """no""" +700 71 optimizer """adam""" +700 71 training_loop """lcwa""" +700 71 evaluator """rankbased""" +700 72 dataset """kinships""" +700 72 model """structuredembedding""" +700 72 loss """softplus""" +700 72 regularizer """no""" +700 72 optimizer """adam""" +700 72 training_loop """lcwa""" +700 72 evaluator """rankbased""" +700 73 dataset """kinships""" +700 73 model """structuredembedding""" +700 73 loss """softplus""" +700 73 regularizer """no""" +700 73 optimizer """adam""" +700 73 training_loop """lcwa""" +700 73 evaluator """rankbased""" +700 74 dataset """kinships""" +700 74 model """structuredembedding""" +700 74 loss """softplus""" +700 74 regularizer """no""" +700 74 optimizer """adam""" +700 74 training_loop """lcwa""" +700 74 evaluator """rankbased""" +700 75 dataset """kinships""" +700 75 model """structuredembedding""" +700 75 loss """softplus""" +700 75 regularizer """no""" +700 75 optimizer """adam""" +700 75 training_loop """lcwa""" +700 75 evaluator """rankbased""" +700 76 dataset """kinships""" +700 76 model """structuredembedding""" +700 76 loss """softplus""" +700 76 regularizer """no""" +700 76 optimizer """adam""" +700 76 training_loop """lcwa""" +700 76 evaluator """rankbased""" +700 77 dataset """kinships""" +700 77 model """structuredembedding""" +700 77 loss """softplus""" +700 77 regularizer """no""" +700 77 optimizer """adam""" +700 77 training_loop """lcwa""" +700 77 evaluator """rankbased""" +700 78 dataset """kinships""" +700 78 model """structuredembedding""" +700 78 loss """softplus""" +700 78 regularizer """no""" +700 78 optimizer """adam""" +700 78 training_loop """lcwa""" +700 78 evaluator """rankbased""" +700 79 dataset """kinships""" +700 79 model """structuredembedding""" +700 79 loss """softplus""" +700 79 regularizer """no""" +700 79 optimizer """adam""" +700 79 training_loop """lcwa""" +700 79 evaluator """rankbased""" +700 80 dataset """kinships""" +700 80 model """structuredembedding""" +700 80 loss """softplus""" +700 80 regularizer """no""" +700 80 optimizer """adam""" +700 80 training_loop """lcwa""" +700 80 evaluator """rankbased""" +700 81 dataset """kinships""" +700 81 model """structuredembedding""" +700 81 loss """softplus""" +700 81 regularizer """no""" +700 81 optimizer """adam""" +700 81 training_loop """lcwa""" +700 81 evaluator """rankbased""" +700 82 dataset """kinships""" +700 82 model """structuredembedding""" +700 82 loss """softplus""" +700 82 regularizer """no""" +700 82 optimizer """adam""" +700 82 training_loop """lcwa""" +700 82 evaluator """rankbased""" +700 83 dataset """kinships""" +700 83 model """structuredembedding""" +700 83 loss """softplus""" +700 83 regularizer """no""" +700 83 optimizer """adam""" +700 83 training_loop """lcwa""" +700 83 evaluator """rankbased""" +700 84 dataset """kinships""" +700 84 model """structuredembedding""" +700 84 loss """softplus""" +700 84 regularizer """no""" +700 84 optimizer """adam""" +700 84 training_loop """lcwa""" +700 84 evaluator """rankbased""" +700 85 dataset """kinships""" +700 85 model """structuredembedding""" +700 85 loss """softplus""" +700 85 regularizer """no""" +700 85 optimizer """adam""" +700 85 training_loop """lcwa""" +700 85 evaluator """rankbased""" +700 86 dataset """kinships""" +700 86 model """structuredembedding""" +700 86 loss """softplus""" +700 86 regularizer """no""" +700 86 optimizer """adam""" +700 86 training_loop """lcwa""" +700 86 evaluator """rankbased""" +700 87 dataset """kinships""" +700 87 model """structuredembedding""" +700 87 loss """softplus""" +700 87 regularizer """no""" +700 87 optimizer """adam""" +700 87 training_loop """lcwa""" +700 87 evaluator """rankbased""" +700 88 dataset """kinships""" +700 88 model """structuredembedding""" +700 88 loss """softplus""" +700 88 regularizer """no""" +700 88 optimizer """adam""" +700 88 training_loop """lcwa""" +700 88 evaluator """rankbased""" +700 89 dataset """kinships""" +700 89 model """structuredembedding""" +700 89 loss """softplus""" +700 89 regularizer """no""" +700 89 optimizer """adam""" +700 89 training_loop """lcwa""" +700 89 evaluator """rankbased""" +700 90 dataset """kinships""" +700 90 model """structuredembedding""" +700 90 loss """softplus""" +700 90 regularizer """no""" +700 90 optimizer """adam""" +700 90 training_loop """lcwa""" +700 90 evaluator """rankbased""" +700 91 dataset """kinships""" +700 91 model """structuredembedding""" +700 91 loss """softplus""" +700 91 regularizer """no""" +700 91 optimizer """adam""" +700 91 training_loop """lcwa""" +700 91 evaluator """rankbased""" +700 92 dataset """kinships""" +700 92 model """structuredembedding""" +700 92 loss """softplus""" +700 92 regularizer """no""" +700 92 optimizer """adam""" +700 92 training_loop """lcwa""" +700 92 evaluator """rankbased""" +700 93 dataset """kinships""" +700 93 model """structuredembedding""" +700 93 loss """softplus""" +700 93 regularizer """no""" +700 93 optimizer """adam""" +700 93 training_loop """lcwa""" +700 93 evaluator """rankbased""" +700 94 dataset """kinships""" +700 94 model """structuredembedding""" +700 94 loss """softplus""" +700 94 regularizer """no""" +700 94 optimizer """adam""" +700 94 training_loop """lcwa""" +700 94 evaluator """rankbased""" +700 95 dataset """kinships""" +700 95 model """structuredembedding""" +700 95 loss """softplus""" +700 95 regularizer """no""" +700 95 optimizer """adam""" +700 95 training_loop """lcwa""" +700 95 evaluator """rankbased""" +700 96 dataset """kinships""" +700 96 model """structuredembedding""" +700 96 loss """softplus""" +700 96 regularizer """no""" +700 96 optimizer """adam""" +700 96 training_loop """lcwa""" +700 96 evaluator """rankbased""" +700 97 dataset """kinships""" +700 97 model """structuredembedding""" +700 97 loss """softplus""" +700 97 regularizer """no""" +700 97 optimizer """adam""" +700 97 training_loop """lcwa""" +700 97 evaluator """rankbased""" +700 98 dataset """kinships""" +700 98 model """structuredembedding""" +700 98 loss """softplus""" +700 98 regularizer """no""" +700 98 optimizer """adam""" +700 98 training_loop """lcwa""" +700 98 evaluator """rankbased""" +700 99 dataset """kinships""" +700 99 model """structuredembedding""" +700 99 loss """softplus""" +700 99 regularizer """no""" +700 99 optimizer """adam""" +700 99 training_loop """lcwa""" +700 99 evaluator """rankbased""" +700 100 dataset """kinships""" +700 100 model """structuredembedding""" +700 100 loss """softplus""" +700 100 regularizer """no""" +700 100 optimizer """adam""" +700 100 training_loop """lcwa""" +700 100 evaluator """rankbased""" +701 1 model.embedding_dim 0.0 +701 1 model.scoring_fct_norm 1.0 +701 1 optimizer.lr 0.0034018769152159424 +701 1 training.batch_size 2.0 +701 1 training.label_smoothing 0.00972488128312153 +701 2 model.embedding_dim 0.0 +701 2 model.scoring_fct_norm 1.0 +701 2 optimizer.lr 0.02041592509161617 +701 2 training.batch_size 1.0 +701 2 training.label_smoothing 0.63741722450862 +701 3 model.embedding_dim 1.0 +701 3 model.scoring_fct_norm 1.0 +701 3 optimizer.lr 0.024165909679477668 +701 3 training.batch_size 0.0 +701 3 training.label_smoothing 0.0051831180351665255 +701 4 model.embedding_dim 1.0 +701 4 model.scoring_fct_norm 1.0 +701 4 optimizer.lr 0.0014009786216695294 +701 4 training.batch_size 1.0 +701 4 training.label_smoothing 0.5833836869234461 +701 5 model.embedding_dim 2.0 +701 5 model.scoring_fct_norm 1.0 +701 5 optimizer.lr 0.0013979134591479373 +701 5 training.batch_size 2.0 +701 5 training.label_smoothing 0.4659891956115265 +701 6 model.embedding_dim 0.0 +701 6 model.scoring_fct_norm 1.0 +701 6 optimizer.lr 0.004817360785660685 +701 6 training.batch_size 1.0 +701 6 training.label_smoothing 0.0014029351756053847 +701 7 model.embedding_dim 2.0 +701 7 model.scoring_fct_norm 2.0 +701 7 optimizer.lr 0.009693360538344171 +701 7 training.batch_size 1.0 +701 7 training.label_smoothing 0.0011339486385867502 +701 8 model.embedding_dim 2.0 +701 8 model.scoring_fct_norm 2.0 +701 8 optimizer.lr 0.013776880956701149 +701 8 training.batch_size 2.0 +701 8 training.label_smoothing 0.002155144661386109 +701 9 model.embedding_dim 0.0 +701 9 model.scoring_fct_norm 2.0 +701 9 optimizer.lr 0.09889297383177427 +701 9 training.batch_size 0.0 +701 9 training.label_smoothing 0.001597526830249915 +701 10 model.embedding_dim 2.0 +701 10 model.scoring_fct_norm 2.0 +701 10 optimizer.lr 0.018427462648795245 +701 10 training.batch_size 1.0 +701 10 training.label_smoothing 0.0253785757462752 +701 11 model.embedding_dim 0.0 +701 11 model.scoring_fct_norm 1.0 +701 11 optimizer.lr 0.0012336232225722585 +701 11 training.batch_size 2.0 +701 11 training.label_smoothing 0.003403926588595769 +701 12 model.embedding_dim 0.0 +701 12 model.scoring_fct_norm 2.0 +701 12 optimizer.lr 0.016743982589418286 +701 12 training.batch_size 0.0 +701 12 training.label_smoothing 0.01724361934613377 +701 13 model.embedding_dim 2.0 +701 13 model.scoring_fct_norm 1.0 +701 13 optimizer.lr 0.03032228148445331 +701 13 training.batch_size 2.0 +701 13 training.label_smoothing 0.0016468907867877767 +701 14 model.embedding_dim 2.0 +701 14 model.scoring_fct_norm 2.0 +701 14 optimizer.lr 0.01238259892148337 +701 14 training.batch_size 1.0 +701 14 training.label_smoothing 0.0030519759866085693 +701 15 model.embedding_dim 1.0 +701 15 model.scoring_fct_norm 1.0 +701 15 optimizer.lr 0.007673016088147261 +701 15 training.batch_size 1.0 +701 15 training.label_smoothing 0.036287089976293756 +701 16 model.embedding_dim 2.0 +701 16 model.scoring_fct_norm 2.0 +701 16 optimizer.lr 0.004451926737535718 +701 16 training.batch_size 0.0 +701 16 training.label_smoothing 0.2829318158506147 +701 17 model.embedding_dim 0.0 +701 17 model.scoring_fct_norm 2.0 +701 17 optimizer.lr 0.015222166341873938 +701 17 training.batch_size 2.0 +701 17 training.label_smoothing 0.15612977125241678 +701 18 model.embedding_dim 0.0 +701 18 model.scoring_fct_norm 1.0 +701 18 optimizer.lr 0.030020034907309198 +701 18 training.batch_size 2.0 +701 18 training.label_smoothing 0.023262455765387303 +701 19 model.embedding_dim 0.0 +701 19 model.scoring_fct_norm 1.0 +701 19 optimizer.lr 0.0471970224605222 +701 19 training.batch_size 2.0 +701 19 training.label_smoothing 0.044468598739291085 +701 20 model.embedding_dim 2.0 +701 20 model.scoring_fct_norm 1.0 +701 20 optimizer.lr 0.031883251289875014 +701 20 training.batch_size 0.0 +701 20 training.label_smoothing 0.6528148758708904 +701 21 model.embedding_dim 0.0 +701 21 model.scoring_fct_norm 1.0 +701 21 optimizer.lr 0.04950709702616105 +701 21 training.batch_size 2.0 +701 21 training.label_smoothing 0.2563177439294987 +701 22 model.embedding_dim 1.0 +701 22 model.scoring_fct_norm 1.0 +701 22 optimizer.lr 0.0014767323239464785 +701 22 training.batch_size 1.0 +701 22 training.label_smoothing 0.5078635233263855 +701 23 model.embedding_dim 1.0 +701 23 model.scoring_fct_norm 1.0 +701 23 optimizer.lr 0.06508932906358696 +701 23 training.batch_size 1.0 +701 23 training.label_smoothing 0.008142277295809436 +701 24 model.embedding_dim 1.0 +701 24 model.scoring_fct_norm 2.0 +701 24 optimizer.lr 0.06469992255343412 +701 24 training.batch_size 2.0 +701 24 training.label_smoothing 0.0381537593014308 +701 25 model.embedding_dim 1.0 +701 25 model.scoring_fct_norm 2.0 +701 25 optimizer.lr 0.009681933011711738 +701 25 training.batch_size 1.0 +701 25 training.label_smoothing 0.0200243318405972 +701 26 model.embedding_dim 0.0 +701 26 model.scoring_fct_norm 2.0 +701 26 optimizer.lr 0.05985551435866734 +701 26 training.batch_size 2.0 +701 26 training.label_smoothing 0.3170593448747227 +701 27 model.embedding_dim 2.0 +701 27 model.scoring_fct_norm 2.0 +701 27 optimizer.lr 0.017249625913553508 +701 27 training.batch_size 0.0 +701 27 training.label_smoothing 0.02936762631392809 +701 28 model.embedding_dim 1.0 +701 28 model.scoring_fct_norm 1.0 +701 28 optimizer.lr 0.05270248687793783 +701 28 training.batch_size 0.0 +701 28 training.label_smoothing 0.003180110503577852 +701 29 model.embedding_dim 1.0 +701 29 model.scoring_fct_norm 1.0 +701 29 optimizer.lr 0.011287113379834143 +701 29 training.batch_size 2.0 +701 29 training.label_smoothing 0.028763978502925634 +701 30 model.embedding_dim 0.0 +701 30 model.scoring_fct_norm 2.0 +701 30 optimizer.lr 0.004658326878018547 +701 30 training.batch_size 2.0 +701 30 training.label_smoothing 0.11550173149986763 +701 31 model.embedding_dim 2.0 +701 31 model.scoring_fct_norm 2.0 +701 31 optimizer.lr 0.08162240315779373 +701 31 training.batch_size 1.0 +701 31 training.label_smoothing 0.06799388952327083 +701 32 model.embedding_dim 0.0 +701 32 model.scoring_fct_norm 1.0 +701 32 optimizer.lr 0.0017909181639604022 +701 32 training.batch_size 0.0 +701 32 training.label_smoothing 0.24557123878756748 +701 33 model.embedding_dim 1.0 +701 33 model.scoring_fct_norm 1.0 +701 33 optimizer.lr 0.001004879683387579 +701 33 training.batch_size 2.0 +701 33 training.label_smoothing 0.0015272463551326582 +701 34 model.embedding_dim 1.0 +701 34 model.scoring_fct_norm 2.0 +701 34 optimizer.lr 0.0073636970969502644 +701 34 training.batch_size 2.0 +701 34 training.label_smoothing 0.40571657238026015 +701 35 model.embedding_dim 1.0 +701 35 model.scoring_fct_norm 1.0 +701 35 optimizer.lr 0.01979357164633662 +701 35 training.batch_size 1.0 +701 35 training.label_smoothing 0.0028032405952113856 +701 36 model.embedding_dim 1.0 +701 36 model.scoring_fct_norm 1.0 +701 36 optimizer.lr 0.04262238803711508 +701 36 training.batch_size 0.0 +701 36 training.label_smoothing 0.05263742376752776 +701 37 model.embedding_dim 2.0 +701 37 model.scoring_fct_norm 1.0 +701 37 optimizer.lr 0.017465683316221624 +701 37 training.batch_size 0.0 +701 37 training.label_smoothing 0.3787026307946641 +701 38 model.embedding_dim 0.0 +701 38 model.scoring_fct_norm 1.0 +701 38 optimizer.lr 0.0025503308256779413 +701 38 training.batch_size 1.0 +701 38 training.label_smoothing 0.4963474184058693 +701 39 model.embedding_dim 2.0 +701 39 model.scoring_fct_norm 2.0 +701 39 optimizer.lr 0.012003724558258076 +701 39 training.batch_size 2.0 +701 39 training.label_smoothing 0.13716740206322434 +701 40 model.embedding_dim 0.0 +701 40 model.scoring_fct_norm 1.0 +701 40 optimizer.lr 0.0034696882557830855 +701 40 training.batch_size 0.0 +701 40 training.label_smoothing 0.06274688943547838 +701 41 model.embedding_dim 1.0 +701 41 model.scoring_fct_norm 1.0 +701 41 optimizer.lr 0.002003140266864528 +701 41 training.batch_size 2.0 +701 41 training.label_smoothing 0.052489656667242285 +701 42 model.embedding_dim 0.0 +701 42 model.scoring_fct_norm 2.0 +701 42 optimizer.lr 0.0550105390889457 +701 42 training.batch_size 0.0 +701 42 training.label_smoothing 0.27918886299561607 +701 43 model.embedding_dim 2.0 +701 43 model.scoring_fct_norm 2.0 +701 43 optimizer.lr 0.002865313567925011 +701 43 training.batch_size 1.0 +701 43 training.label_smoothing 0.54581152273154 +701 44 model.embedding_dim 0.0 +701 44 model.scoring_fct_norm 1.0 +701 44 optimizer.lr 0.05690821811646472 +701 44 training.batch_size 2.0 +701 44 training.label_smoothing 0.17497797542597157 +701 45 model.embedding_dim 2.0 +701 45 model.scoring_fct_norm 1.0 +701 45 optimizer.lr 0.02566330398903633 +701 45 training.batch_size 0.0 +701 45 training.label_smoothing 0.013186489976638346 +701 46 model.embedding_dim 2.0 +701 46 model.scoring_fct_norm 1.0 +701 46 optimizer.lr 0.003718567830441241 +701 46 training.batch_size 2.0 +701 46 training.label_smoothing 0.03296920863550097 +701 47 model.embedding_dim 0.0 +701 47 model.scoring_fct_norm 2.0 +701 47 optimizer.lr 0.001818744527549138 +701 47 training.batch_size 0.0 +701 47 training.label_smoothing 0.011157771348849 +701 48 model.embedding_dim 0.0 +701 48 model.scoring_fct_norm 2.0 +701 48 optimizer.lr 0.0013371145532436854 +701 48 training.batch_size 1.0 +701 48 training.label_smoothing 0.012071263577456833 +701 49 model.embedding_dim 2.0 +701 49 model.scoring_fct_norm 1.0 +701 49 optimizer.lr 0.0899713099525047 +701 49 training.batch_size 0.0 +701 49 training.label_smoothing 0.02580886591660335 +701 50 model.embedding_dim 0.0 +701 50 model.scoring_fct_norm 1.0 +701 50 optimizer.lr 0.0016851023889775617 +701 50 training.batch_size 0.0 +701 50 training.label_smoothing 0.005657404468905691 +701 51 model.embedding_dim 2.0 +701 51 model.scoring_fct_norm 1.0 +701 51 optimizer.lr 0.010124932884306097 +701 51 training.batch_size 0.0 +701 51 training.label_smoothing 0.19863472321142245 +701 52 model.embedding_dim 2.0 +701 52 model.scoring_fct_norm 1.0 +701 52 optimizer.lr 0.0017251317077457595 +701 52 training.batch_size 2.0 +701 52 training.label_smoothing 0.10439581556976613 +701 53 model.embedding_dim 1.0 +701 53 model.scoring_fct_norm 2.0 +701 53 optimizer.lr 0.014373257922475511 +701 53 training.batch_size 1.0 +701 53 training.label_smoothing 0.001404332543729076 +701 54 model.embedding_dim 1.0 +701 54 model.scoring_fct_norm 1.0 +701 54 optimizer.lr 0.009712936587804722 +701 54 training.batch_size 0.0 +701 54 training.label_smoothing 0.6260410044023951 +701 55 model.embedding_dim 1.0 +701 55 model.scoring_fct_norm 1.0 +701 55 optimizer.lr 0.0012240510297547526 +701 55 training.batch_size 0.0 +701 55 training.label_smoothing 0.0017569020026694984 +701 56 model.embedding_dim 1.0 +701 56 model.scoring_fct_norm 2.0 +701 56 optimizer.lr 0.01765149754801628 +701 56 training.batch_size 1.0 +701 56 training.label_smoothing 0.5475099306204411 +701 57 model.embedding_dim 0.0 +701 57 model.scoring_fct_norm 2.0 +701 57 optimizer.lr 0.022254838427228824 +701 57 training.batch_size 1.0 +701 57 training.label_smoothing 0.013597543057064489 +701 58 model.embedding_dim 2.0 +701 58 model.scoring_fct_norm 1.0 +701 58 optimizer.lr 0.03758377499107166 +701 58 training.batch_size 1.0 +701 58 training.label_smoothing 0.24687552573311444 +701 59 model.embedding_dim 2.0 +701 59 model.scoring_fct_norm 1.0 +701 59 optimizer.lr 0.010695849788190765 +701 59 training.batch_size 1.0 +701 59 training.label_smoothing 0.14665402049852735 +701 60 model.embedding_dim 1.0 +701 60 model.scoring_fct_norm 1.0 +701 60 optimizer.lr 0.08450319613975349 +701 60 training.batch_size 2.0 +701 60 training.label_smoothing 0.31697575282920015 +701 61 model.embedding_dim 0.0 +701 61 model.scoring_fct_norm 1.0 +701 61 optimizer.lr 0.06471660498178684 +701 61 training.batch_size 0.0 +701 61 training.label_smoothing 0.01061578885990759 +701 62 model.embedding_dim 0.0 +701 62 model.scoring_fct_norm 1.0 +701 62 optimizer.lr 0.005933672206107137 +701 62 training.batch_size 1.0 +701 62 training.label_smoothing 0.01334773746739671 +701 63 model.embedding_dim 2.0 +701 63 model.scoring_fct_norm 1.0 +701 63 optimizer.lr 0.008311893474414236 +701 63 training.batch_size 1.0 +701 63 training.label_smoothing 0.008054996967172916 +701 64 model.embedding_dim 2.0 +701 64 model.scoring_fct_norm 1.0 +701 64 optimizer.lr 0.001316546876766238 +701 64 training.batch_size 2.0 +701 64 training.label_smoothing 0.002968395878681188 +701 65 model.embedding_dim 1.0 +701 65 model.scoring_fct_norm 2.0 +701 65 optimizer.lr 0.0011362658226431767 +701 65 training.batch_size 1.0 +701 65 training.label_smoothing 0.03867356615006299 +701 66 model.embedding_dim 2.0 +701 66 model.scoring_fct_norm 2.0 +701 66 optimizer.lr 0.01273144608686672 +701 66 training.batch_size 2.0 +701 66 training.label_smoothing 0.007279176782704565 +701 67 model.embedding_dim 1.0 +701 67 model.scoring_fct_norm 2.0 +701 67 optimizer.lr 0.0011145778146224418 +701 67 training.batch_size 1.0 +701 67 training.label_smoothing 0.5452277231653496 +701 68 model.embedding_dim 1.0 +701 68 model.scoring_fct_norm 2.0 +701 68 optimizer.lr 0.002265604790572085 +701 68 training.batch_size 2.0 +701 68 training.label_smoothing 0.030973315620871373 +701 69 model.embedding_dim 2.0 +701 69 model.scoring_fct_norm 1.0 +701 69 optimizer.lr 0.06490154720644706 +701 69 training.batch_size 2.0 +701 69 training.label_smoothing 0.005847046749187072 +701 70 model.embedding_dim 2.0 +701 70 model.scoring_fct_norm 1.0 +701 70 optimizer.lr 0.00638789502488364 +701 70 training.batch_size 0.0 +701 70 training.label_smoothing 0.07587683814215399 +701 71 model.embedding_dim 0.0 +701 71 model.scoring_fct_norm 2.0 +701 71 optimizer.lr 0.014249019219310186 +701 71 training.batch_size 2.0 +701 71 training.label_smoothing 0.0049205291756370334 +701 72 model.embedding_dim 0.0 +701 72 model.scoring_fct_norm 1.0 +701 72 optimizer.lr 0.09322632210451025 +701 72 training.batch_size 1.0 +701 72 training.label_smoothing 0.7992986371833289 +701 73 model.embedding_dim 1.0 +701 73 model.scoring_fct_norm 1.0 +701 73 optimizer.lr 0.03208490483249806 +701 73 training.batch_size 0.0 +701 73 training.label_smoothing 0.9990690354620289 +701 74 model.embedding_dim 1.0 +701 74 model.scoring_fct_norm 1.0 +701 74 optimizer.lr 0.020103960462177273 +701 74 training.batch_size 0.0 +701 74 training.label_smoothing 0.006632276673105753 +701 75 model.embedding_dim 0.0 +701 75 model.scoring_fct_norm 1.0 +701 75 optimizer.lr 0.0039031779961829272 +701 75 training.batch_size 0.0 +701 75 training.label_smoothing 0.04617569131928873 +701 76 model.embedding_dim 2.0 +701 76 model.scoring_fct_norm 1.0 +701 76 optimizer.lr 0.053520974935000964 +701 76 training.batch_size 1.0 +701 76 training.label_smoothing 0.0027471950724640166 +701 77 model.embedding_dim 0.0 +701 77 model.scoring_fct_norm 1.0 +701 77 optimizer.lr 0.001211476336475972 +701 77 training.batch_size 2.0 +701 77 training.label_smoothing 0.02829631255894541 +701 78 model.embedding_dim 2.0 +701 78 model.scoring_fct_norm 2.0 +701 78 optimizer.lr 0.0027951620055539786 +701 78 training.batch_size 1.0 +701 78 training.label_smoothing 0.1304601859087995 +701 79 model.embedding_dim 1.0 +701 79 model.scoring_fct_norm 1.0 +701 79 optimizer.lr 0.01436374163222146 +701 79 training.batch_size 0.0 +701 79 training.label_smoothing 0.0056657455531240655 +701 80 model.embedding_dim 1.0 +701 80 model.scoring_fct_norm 1.0 +701 80 optimizer.lr 0.004209289523808896 +701 80 training.batch_size 1.0 +701 80 training.label_smoothing 0.01222324895472678 +701 81 model.embedding_dim 1.0 +701 81 model.scoring_fct_norm 1.0 +701 81 optimizer.lr 0.0010629874359643454 +701 81 training.batch_size 2.0 +701 81 training.label_smoothing 0.006344356534799821 +701 82 model.embedding_dim 0.0 +701 82 model.scoring_fct_norm 2.0 +701 82 optimizer.lr 0.05056446161624211 +701 82 training.batch_size 1.0 +701 82 training.label_smoothing 0.008318051555706713 +701 83 model.embedding_dim 1.0 +701 83 model.scoring_fct_norm 1.0 +701 83 optimizer.lr 0.0034514336167222348 +701 83 training.batch_size 2.0 +701 83 training.label_smoothing 0.0019165766821913851 +701 84 model.embedding_dim 2.0 +701 84 model.scoring_fct_norm 1.0 +701 84 optimizer.lr 0.0015111726784361231 +701 84 training.batch_size 2.0 +701 84 training.label_smoothing 0.16817556574537051 +701 85 model.embedding_dim 1.0 +701 85 model.scoring_fct_norm 1.0 +701 85 optimizer.lr 0.01058575787351268 +701 85 training.batch_size 1.0 +701 85 training.label_smoothing 0.050979737230470806 +701 86 model.embedding_dim 0.0 +701 86 model.scoring_fct_norm 1.0 +701 86 optimizer.lr 0.07105448222169809 +701 86 training.batch_size 2.0 +701 86 training.label_smoothing 0.15727343585218234 +701 87 model.embedding_dim 2.0 +701 87 model.scoring_fct_norm 2.0 +701 87 optimizer.lr 0.06394047127956222 +701 87 training.batch_size 2.0 +701 87 training.label_smoothing 0.07975818792378966 +701 88 model.embedding_dim 1.0 +701 88 model.scoring_fct_norm 1.0 +701 88 optimizer.lr 0.0010234478521016226 +701 88 training.batch_size 0.0 +701 88 training.label_smoothing 0.09584538133768555 +701 89 model.embedding_dim 1.0 +701 89 model.scoring_fct_norm 1.0 +701 89 optimizer.lr 0.02985347444018292 +701 89 training.batch_size 0.0 +701 89 training.label_smoothing 0.5073408743691806 +701 90 model.embedding_dim 2.0 +701 90 model.scoring_fct_norm 1.0 +701 90 optimizer.lr 0.005512071272064347 +701 90 training.batch_size 0.0 +701 90 training.label_smoothing 0.5906368028052946 +701 91 model.embedding_dim 1.0 +701 91 model.scoring_fct_norm 2.0 +701 91 optimizer.lr 0.029655218817280712 +701 91 training.batch_size 0.0 +701 91 training.label_smoothing 0.024535707137162476 +701 92 model.embedding_dim 1.0 +701 92 model.scoring_fct_norm 1.0 +701 92 optimizer.lr 0.011011731478477912 +701 92 training.batch_size 1.0 +701 92 training.label_smoothing 0.004620880910097907 +701 93 model.embedding_dim 2.0 +701 93 model.scoring_fct_norm 1.0 +701 93 optimizer.lr 0.02939048156842082 +701 93 training.batch_size 1.0 +701 93 training.label_smoothing 0.024693002751473167 +701 94 model.embedding_dim 1.0 +701 94 model.scoring_fct_norm 1.0 +701 94 optimizer.lr 0.004579765964505451 +701 94 training.batch_size 1.0 +701 94 training.label_smoothing 0.0025657165141948885 +701 95 model.embedding_dim 1.0 +701 95 model.scoring_fct_norm 1.0 +701 95 optimizer.lr 0.0014113142411042146 +701 95 training.batch_size 2.0 +701 95 training.label_smoothing 0.008484496722672607 +701 96 model.embedding_dim 1.0 +701 96 model.scoring_fct_norm 1.0 +701 96 optimizer.lr 0.0038147274347565922 +701 96 training.batch_size 1.0 +701 96 training.label_smoothing 0.24033957384666066 +701 97 model.embedding_dim 0.0 +701 97 model.scoring_fct_norm 2.0 +701 97 optimizer.lr 0.02680458078147498 +701 97 training.batch_size 2.0 +701 97 training.label_smoothing 0.003507878981355771 +701 98 model.embedding_dim 1.0 +701 98 model.scoring_fct_norm 2.0 +701 98 optimizer.lr 0.06223517741724966 +701 98 training.batch_size 0.0 +701 98 training.label_smoothing 0.11174862941199688 +701 99 model.embedding_dim 1.0 +701 99 model.scoring_fct_norm 2.0 +701 99 optimizer.lr 0.0038307928966999243 +701 99 training.batch_size 0.0 +701 99 training.label_smoothing 0.04857363649787378 +701 100 model.embedding_dim 1.0 +701 100 model.scoring_fct_norm 2.0 +701 100 optimizer.lr 0.06388100086791686 +701 100 training.batch_size 0.0 +701 100 training.label_smoothing 0.006337863845264539 +701 1 dataset """kinships""" +701 1 model """structuredembedding""" +701 1 loss """bceaftersigmoid""" +701 1 regularizer """no""" +701 1 optimizer """adam""" +701 1 training_loop """lcwa""" +701 1 evaluator """rankbased""" +701 2 dataset """kinships""" +701 2 model """structuredembedding""" +701 2 loss """bceaftersigmoid""" +701 2 regularizer """no""" +701 2 optimizer """adam""" +701 2 training_loop """lcwa""" +701 2 evaluator """rankbased""" +701 3 dataset """kinships""" +701 3 model """structuredembedding""" +701 3 loss """bceaftersigmoid""" +701 3 regularizer """no""" +701 3 optimizer """adam""" +701 3 training_loop """lcwa""" +701 3 evaluator """rankbased""" +701 4 dataset """kinships""" +701 4 model """structuredembedding""" +701 4 loss """bceaftersigmoid""" +701 4 regularizer """no""" +701 4 optimizer """adam""" +701 4 training_loop """lcwa""" +701 4 evaluator """rankbased""" +701 5 dataset """kinships""" +701 5 model """structuredembedding""" +701 5 loss """bceaftersigmoid""" +701 5 regularizer """no""" +701 5 optimizer """adam""" +701 5 training_loop """lcwa""" +701 5 evaluator """rankbased""" +701 6 dataset """kinships""" +701 6 model """structuredembedding""" +701 6 loss """bceaftersigmoid""" +701 6 regularizer """no""" +701 6 optimizer """adam""" +701 6 training_loop """lcwa""" +701 6 evaluator """rankbased""" +701 7 dataset """kinships""" +701 7 model """structuredembedding""" +701 7 loss """bceaftersigmoid""" +701 7 regularizer """no""" +701 7 optimizer """adam""" +701 7 training_loop """lcwa""" +701 7 evaluator """rankbased""" +701 8 dataset """kinships""" +701 8 model """structuredembedding""" +701 8 loss """bceaftersigmoid""" +701 8 regularizer """no""" +701 8 optimizer """adam""" +701 8 training_loop """lcwa""" +701 8 evaluator """rankbased""" +701 9 dataset """kinships""" +701 9 model """structuredembedding""" +701 9 loss """bceaftersigmoid""" +701 9 regularizer """no""" +701 9 optimizer """adam""" +701 9 training_loop """lcwa""" +701 9 evaluator """rankbased""" +701 10 dataset """kinships""" +701 10 model """structuredembedding""" +701 10 loss """bceaftersigmoid""" +701 10 regularizer """no""" +701 10 optimizer """adam""" +701 10 training_loop """lcwa""" +701 10 evaluator """rankbased""" +701 11 dataset """kinships""" +701 11 model """structuredembedding""" +701 11 loss """bceaftersigmoid""" +701 11 regularizer """no""" +701 11 optimizer """adam""" +701 11 training_loop """lcwa""" +701 11 evaluator """rankbased""" +701 12 dataset """kinships""" +701 12 model """structuredembedding""" +701 12 loss """bceaftersigmoid""" +701 12 regularizer """no""" +701 12 optimizer """adam""" +701 12 training_loop """lcwa""" +701 12 evaluator """rankbased""" +701 13 dataset """kinships""" +701 13 model """structuredembedding""" +701 13 loss """bceaftersigmoid""" +701 13 regularizer """no""" +701 13 optimizer """adam""" +701 13 training_loop """lcwa""" +701 13 evaluator """rankbased""" +701 14 dataset """kinships""" +701 14 model """structuredembedding""" +701 14 loss """bceaftersigmoid""" +701 14 regularizer """no""" +701 14 optimizer """adam""" +701 14 training_loop """lcwa""" +701 14 evaluator """rankbased""" +701 15 dataset """kinships""" +701 15 model """structuredembedding""" +701 15 loss """bceaftersigmoid""" +701 15 regularizer """no""" +701 15 optimizer """adam""" +701 15 training_loop """lcwa""" +701 15 evaluator """rankbased""" +701 16 dataset """kinships""" +701 16 model """structuredembedding""" +701 16 loss """bceaftersigmoid""" +701 16 regularizer """no""" +701 16 optimizer """adam""" +701 16 training_loop """lcwa""" +701 16 evaluator """rankbased""" +701 17 dataset """kinships""" +701 17 model """structuredembedding""" +701 17 loss """bceaftersigmoid""" +701 17 regularizer """no""" +701 17 optimizer """adam""" +701 17 training_loop """lcwa""" +701 17 evaluator """rankbased""" +701 18 dataset """kinships""" +701 18 model """structuredembedding""" +701 18 loss """bceaftersigmoid""" +701 18 regularizer """no""" +701 18 optimizer """adam""" +701 18 training_loop """lcwa""" +701 18 evaluator """rankbased""" +701 19 dataset """kinships""" +701 19 model """structuredembedding""" +701 19 loss """bceaftersigmoid""" +701 19 regularizer """no""" +701 19 optimizer """adam""" +701 19 training_loop """lcwa""" +701 19 evaluator """rankbased""" +701 20 dataset """kinships""" +701 20 model """structuredembedding""" +701 20 loss """bceaftersigmoid""" +701 20 regularizer """no""" +701 20 optimizer """adam""" +701 20 training_loop """lcwa""" +701 20 evaluator """rankbased""" +701 21 dataset """kinships""" +701 21 model """structuredembedding""" +701 21 loss """bceaftersigmoid""" +701 21 regularizer """no""" +701 21 optimizer """adam""" +701 21 training_loop """lcwa""" +701 21 evaluator """rankbased""" +701 22 dataset """kinships""" +701 22 model """structuredembedding""" +701 22 loss """bceaftersigmoid""" +701 22 regularizer """no""" +701 22 optimizer """adam""" +701 22 training_loop """lcwa""" +701 22 evaluator """rankbased""" +701 23 dataset """kinships""" +701 23 model """structuredembedding""" +701 23 loss """bceaftersigmoid""" +701 23 regularizer """no""" +701 23 optimizer """adam""" +701 23 training_loop """lcwa""" +701 23 evaluator """rankbased""" +701 24 dataset """kinships""" +701 24 model """structuredembedding""" +701 24 loss """bceaftersigmoid""" +701 24 regularizer """no""" +701 24 optimizer """adam""" +701 24 training_loop """lcwa""" +701 24 evaluator """rankbased""" +701 25 dataset """kinships""" +701 25 model """structuredembedding""" +701 25 loss """bceaftersigmoid""" +701 25 regularizer """no""" +701 25 optimizer """adam""" +701 25 training_loop """lcwa""" +701 25 evaluator """rankbased""" +701 26 dataset """kinships""" +701 26 model """structuredembedding""" +701 26 loss """bceaftersigmoid""" +701 26 regularizer """no""" +701 26 optimizer """adam""" +701 26 training_loop """lcwa""" +701 26 evaluator """rankbased""" +701 27 dataset """kinships""" +701 27 model """structuredembedding""" +701 27 loss """bceaftersigmoid""" +701 27 regularizer """no""" +701 27 optimizer """adam""" +701 27 training_loop """lcwa""" +701 27 evaluator """rankbased""" +701 28 dataset """kinships""" +701 28 model """structuredembedding""" +701 28 loss """bceaftersigmoid""" +701 28 regularizer """no""" +701 28 optimizer """adam""" +701 28 training_loop """lcwa""" +701 28 evaluator """rankbased""" +701 29 dataset """kinships""" +701 29 model """structuredembedding""" +701 29 loss """bceaftersigmoid""" +701 29 regularizer """no""" +701 29 optimizer """adam""" +701 29 training_loop """lcwa""" +701 29 evaluator """rankbased""" +701 30 dataset """kinships""" +701 30 model """structuredembedding""" +701 30 loss """bceaftersigmoid""" +701 30 regularizer """no""" +701 30 optimizer """adam""" +701 30 training_loop """lcwa""" +701 30 evaluator """rankbased""" +701 31 dataset """kinships""" +701 31 model """structuredembedding""" +701 31 loss """bceaftersigmoid""" +701 31 regularizer """no""" +701 31 optimizer """adam""" +701 31 training_loop """lcwa""" +701 31 evaluator """rankbased""" +701 32 dataset """kinships""" +701 32 model """structuredembedding""" +701 32 loss """bceaftersigmoid""" +701 32 regularizer """no""" +701 32 optimizer """adam""" +701 32 training_loop """lcwa""" +701 32 evaluator """rankbased""" +701 33 dataset """kinships""" +701 33 model """structuredembedding""" +701 33 loss """bceaftersigmoid""" +701 33 regularizer """no""" +701 33 optimizer """adam""" +701 33 training_loop """lcwa""" +701 33 evaluator """rankbased""" +701 34 dataset """kinships""" +701 34 model """structuredembedding""" +701 34 loss """bceaftersigmoid""" +701 34 regularizer """no""" +701 34 optimizer """adam""" +701 34 training_loop """lcwa""" +701 34 evaluator """rankbased""" +701 35 dataset """kinships""" +701 35 model """structuredembedding""" +701 35 loss """bceaftersigmoid""" +701 35 regularizer """no""" +701 35 optimizer """adam""" +701 35 training_loop """lcwa""" +701 35 evaluator """rankbased""" +701 36 dataset """kinships""" +701 36 model """structuredembedding""" +701 36 loss """bceaftersigmoid""" +701 36 regularizer """no""" +701 36 optimizer """adam""" +701 36 training_loop """lcwa""" +701 36 evaluator """rankbased""" +701 37 dataset """kinships""" +701 37 model """structuredembedding""" +701 37 loss """bceaftersigmoid""" +701 37 regularizer """no""" +701 37 optimizer """adam""" +701 37 training_loop """lcwa""" +701 37 evaluator """rankbased""" +701 38 dataset """kinships""" +701 38 model """structuredembedding""" +701 38 loss """bceaftersigmoid""" +701 38 regularizer """no""" +701 38 optimizer """adam""" +701 38 training_loop """lcwa""" +701 38 evaluator """rankbased""" +701 39 dataset """kinships""" +701 39 model """structuredembedding""" +701 39 loss """bceaftersigmoid""" +701 39 regularizer """no""" +701 39 optimizer """adam""" +701 39 training_loop """lcwa""" +701 39 evaluator """rankbased""" +701 40 dataset """kinships""" +701 40 model """structuredembedding""" +701 40 loss """bceaftersigmoid""" +701 40 regularizer """no""" +701 40 optimizer """adam""" +701 40 training_loop """lcwa""" +701 40 evaluator """rankbased""" +701 41 dataset """kinships""" +701 41 model """structuredembedding""" +701 41 loss """bceaftersigmoid""" +701 41 regularizer """no""" +701 41 optimizer """adam""" +701 41 training_loop """lcwa""" +701 41 evaluator """rankbased""" +701 42 dataset """kinships""" +701 42 model """structuredembedding""" +701 42 loss """bceaftersigmoid""" +701 42 regularizer """no""" +701 42 optimizer """adam""" +701 42 training_loop """lcwa""" +701 42 evaluator """rankbased""" +701 43 dataset """kinships""" +701 43 model """structuredembedding""" +701 43 loss """bceaftersigmoid""" +701 43 regularizer """no""" +701 43 optimizer """adam""" +701 43 training_loop """lcwa""" +701 43 evaluator """rankbased""" +701 44 dataset """kinships""" +701 44 model """structuredembedding""" +701 44 loss """bceaftersigmoid""" +701 44 regularizer """no""" +701 44 optimizer """adam""" +701 44 training_loop """lcwa""" +701 44 evaluator """rankbased""" +701 45 dataset """kinships""" +701 45 model """structuredembedding""" +701 45 loss """bceaftersigmoid""" +701 45 regularizer """no""" +701 45 optimizer """adam""" +701 45 training_loop """lcwa""" +701 45 evaluator """rankbased""" +701 46 dataset """kinships""" +701 46 model """structuredembedding""" +701 46 loss """bceaftersigmoid""" +701 46 regularizer """no""" +701 46 optimizer """adam""" +701 46 training_loop """lcwa""" +701 46 evaluator """rankbased""" +701 47 dataset """kinships""" +701 47 model """structuredembedding""" +701 47 loss """bceaftersigmoid""" +701 47 regularizer """no""" +701 47 optimizer """adam""" +701 47 training_loop """lcwa""" +701 47 evaluator """rankbased""" +701 48 dataset """kinships""" +701 48 model """structuredembedding""" +701 48 loss """bceaftersigmoid""" +701 48 regularizer """no""" +701 48 optimizer """adam""" +701 48 training_loop """lcwa""" +701 48 evaluator """rankbased""" +701 49 dataset """kinships""" +701 49 model """structuredembedding""" +701 49 loss """bceaftersigmoid""" +701 49 regularizer """no""" +701 49 optimizer """adam""" +701 49 training_loop """lcwa""" +701 49 evaluator """rankbased""" +701 50 dataset """kinships""" +701 50 model """structuredembedding""" +701 50 loss """bceaftersigmoid""" +701 50 regularizer """no""" +701 50 optimizer """adam""" +701 50 training_loop """lcwa""" +701 50 evaluator """rankbased""" +701 51 dataset """kinships""" +701 51 model """structuredembedding""" +701 51 loss """bceaftersigmoid""" +701 51 regularizer """no""" +701 51 optimizer """adam""" +701 51 training_loop """lcwa""" +701 51 evaluator """rankbased""" +701 52 dataset """kinships""" +701 52 model """structuredembedding""" +701 52 loss """bceaftersigmoid""" +701 52 regularizer """no""" +701 52 optimizer """adam""" +701 52 training_loop """lcwa""" +701 52 evaluator """rankbased""" +701 53 dataset """kinships""" +701 53 model """structuredembedding""" +701 53 loss """bceaftersigmoid""" +701 53 regularizer """no""" +701 53 optimizer """adam""" +701 53 training_loop """lcwa""" +701 53 evaluator """rankbased""" +701 54 dataset """kinships""" +701 54 model """structuredembedding""" +701 54 loss """bceaftersigmoid""" +701 54 regularizer """no""" +701 54 optimizer """adam""" +701 54 training_loop """lcwa""" +701 54 evaluator """rankbased""" +701 55 dataset """kinships""" +701 55 model """structuredembedding""" +701 55 loss """bceaftersigmoid""" +701 55 regularizer """no""" +701 55 optimizer """adam""" +701 55 training_loop """lcwa""" +701 55 evaluator """rankbased""" +701 56 dataset """kinships""" +701 56 model """structuredembedding""" +701 56 loss """bceaftersigmoid""" +701 56 regularizer """no""" +701 56 optimizer """adam""" +701 56 training_loop """lcwa""" +701 56 evaluator """rankbased""" +701 57 dataset """kinships""" +701 57 model """structuredembedding""" +701 57 loss """bceaftersigmoid""" +701 57 regularizer """no""" +701 57 optimizer """adam""" +701 57 training_loop """lcwa""" +701 57 evaluator """rankbased""" +701 58 dataset """kinships""" +701 58 model """structuredembedding""" +701 58 loss """bceaftersigmoid""" +701 58 regularizer """no""" +701 58 optimizer """adam""" +701 58 training_loop """lcwa""" +701 58 evaluator """rankbased""" +701 59 dataset """kinships""" +701 59 model """structuredembedding""" +701 59 loss """bceaftersigmoid""" +701 59 regularizer """no""" +701 59 optimizer """adam""" +701 59 training_loop """lcwa""" +701 59 evaluator """rankbased""" +701 60 dataset """kinships""" +701 60 model """structuredembedding""" +701 60 loss """bceaftersigmoid""" +701 60 regularizer """no""" +701 60 optimizer """adam""" +701 60 training_loop """lcwa""" +701 60 evaluator """rankbased""" +701 61 dataset """kinships""" +701 61 model """structuredembedding""" +701 61 loss """bceaftersigmoid""" +701 61 regularizer """no""" +701 61 optimizer """adam""" +701 61 training_loop """lcwa""" +701 61 evaluator """rankbased""" +701 62 dataset """kinships""" +701 62 model """structuredembedding""" +701 62 loss """bceaftersigmoid""" +701 62 regularizer """no""" +701 62 optimizer """adam""" +701 62 training_loop """lcwa""" +701 62 evaluator """rankbased""" +701 63 dataset """kinships""" +701 63 model """structuredembedding""" +701 63 loss """bceaftersigmoid""" +701 63 regularizer """no""" +701 63 optimizer """adam""" +701 63 training_loop """lcwa""" +701 63 evaluator """rankbased""" +701 64 dataset """kinships""" +701 64 model """structuredembedding""" +701 64 loss """bceaftersigmoid""" +701 64 regularizer """no""" +701 64 optimizer """adam""" +701 64 training_loop """lcwa""" +701 64 evaluator """rankbased""" +701 65 dataset """kinships""" +701 65 model """structuredembedding""" +701 65 loss """bceaftersigmoid""" +701 65 regularizer """no""" +701 65 optimizer """adam""" +701 65 training_loop """lcwa""" +701 65 evaluator """rankbased""" +701 66 dataset """kinships""" +701 66 model """structuredembedding""" +701 66 loss """bceaftersigmoid""" +701 66 regularizer """no""" +701 66 optimizer """adam""" +701 66 training_loop """lcwa""" +701 66 evaluator """rankbased""" +701 67 dataset """kinships""" +701 67 model """structuredembedding""" +701 67 loss """bceaftersigmoid""" +701 67 regularizer """no""" +701 67 optimizer """adam""" +701 67 training_loop """lcwa""" +701 67 evaluator """rankbased""" +701 68 dataset """kinships""" +701 68 model """structuredembedding""" +701 68 loss """bceaftersigmoid""" +701 68 regularizer """no""" +701 68 optimizer """adam""" +701 68 training_loop """lcwa""" +701 68 evaluator """rankbased""" +701 69 dataset """kinships""" +701 69 model """structuredembedding""" +701 69 loss """bceaftersigmoid""" +701 69 regularizer """no""" +701 69 optimizer """adam""" +701 69 training_loop """lcwa""" +701 69 evaluator """rankbased""" +701 70 dataset """kinships""" +701 70 model """structuredembedding""" +701 70 loss """bceaftersigmoid""" +701 70 regularizer """no""" +701 70 optimizer """adam""" +701 70 training_loop """lcwa""" +701 70 evaluator """rankbased""" +701 71 dataset """kinships""" +701 71 model """structuredembedding""" +701 71 loss """bceaftersigmoid""" +701 71 regularizer """no""" +701 71 optimizer """adam""" +701 71 training_loop """lcwa""" +701 71 evaluator """rankbased""" +701 72 dataset """kinships""" +701 72 model """structuredembedding""" +701 72 loss """bceaftersigmoid""" +701 72 regularizer """no""" +701 72 optimizer """adam""" +701 72 training_loop """lcwa""" +701 72 evaluator """rankbased""" +701 73 dataset """kinships""" +701 73 model """structuredembedding""" +701 73 loss """bceaftersigmoid""" +701 73 regularizer """no""" +701 73 optimizer """adam""" +701 73 training_loop """lcwa""" +701 73 evaluator """rankbased""" +701 74 dataset """kinships""" +701 74 model """structuredembedding""" +701 74 loss """bceaftersigmoid""" +701 74 regularizer """no""" +701 74 optimizer """adam""" +701 74 training_loop """lcwa""" +701 74 evaluator """rankbased""" +701 75 dataset """kinships""" +701 75 model """structuredembedding""" +701 75 loss """bceaftersigmoid""" +701 75 regularizer """no""" +701 75 optimizer """adam""" +701 75 training_loop """lcwa""" +701 75 evaluator """rankbased""" +701 76 dataset """kinships""" +701 76 model """structuredembedding""" +701 76 loss """bceaftersigmoid""" +701 76 regularizer """no""" +701 76 optimizer """adam""" +701 76 training_loop """lcwa""" +701 76 evaluator """rankbased""" +701 77 dataset """kinships""" +701 77 model """structuredembedding""" +701 77 loss """bceaftersigmoid""" +701 77 regularizer """no""" +701 77 optimizer """adam""" +701 77 training_loop """lcwa""" +701 77 evaluator """rankbased""" +701 78 dataset """kinships""" +701 78 model """structuredembedding""" +701 78 loss """bceaftersigmoid""" +701 78 regularizer """no""" +701 78 optimizer """adam""" +701 78 training_loop """lcwa""" +701 78 evaluator """rankbased""" +701 79 dataset """kinships""" +701 79 model """structuredembedding""" +701 79 loss """bceaftersigmoid""" +701 79 regularizer """no""" +701 79 optimizer """adam""" +701 79 training_loop """lcwa""" +701 79 evaluator """rankbased""" +701 80 dataset """kinships""" +701 80 model """structuredembedding""" +701 80 loss """bceaftersigmoid""" +701 80 regularizer """no""" +701 80 optimizer """adam""" +701 80 training_loop """lcwa""" +701 80 evaluator """rankbased""" +701 81 dataset """kinships""" +701 81 model """structuredembedding""" +701 81 loss """bceaftersigmoid""" +701 81 regularizer """no""" +701 81 optimizer """adam""" +701 81 training_loop """lcwa""" +701 81 evaluator """rankbased""" +701 82 dataset """kinships""" +701 82 model """structuredembedding""" +701 82 loss """bceaftersigmoid""" +701 82 regularizer """no""" +701 82 optimizer """adam""" +701 82 training_loop """lcwa""" +701 82 evaluator """rankbased""" +701 83 dataset """kinships""" +701 83 model """structuredembedding""" +701 83 loss """bceaftersigmoid""" +701 83 regularizer """no""" +701 83 optimizer """adam""" +701 83 training_loop """lcwa""" +701 83 evaluator """rankbased""" +701 84 dataset """kinships""" +701 84 model """structuredembedding""" +701 84 loss """bceaftersigmoid""" +701 84 regularizer """no""" +701 84 optimizer """adam""" +701 84 training_loop """lcwa""" +701 84 evaluator """rankbased""" +701 85 dataset """kinships""" +701 85 model """structuredembedding""" +701 85 loss """bceaftersigmoid""" +701 85 regularizer """no""" +701 85 optimizer """adam""" +701 85 training_loop """lcwa""" +701 85 evaluator """rankbased""" +701 86 dataset """kinships""" +701 86 model """structuredembedding""" +701 86 loss """bceaftersigmoid""" +701 86 regularizer """no""" +701 86 optimizer """adam""" +701 86 training_loop """lcwa""" +701 86 evaluator """rankbased""" +701 87 dataset """kinships""" +701 87 model """structuredembedding""" +701 87 loss """bceaftersigmoid""" +701 87 regularizer """no""" +701 87 optimizer """adam""" +701 87 training_loop """lcwa""" +701 87 evaluator """rankbased""" +701 88 dataset """kinships""" +701 88 model """structuredembedding""" +701 88 loss """bceaftersigmoid""" +701 88 regularizer """no""" +701 88 optimizer """adam""" +701 88 training_loop """lcwa""" +701 88 evaluator """rankbased""" +701 89 dataset """kinships""" +701 89 model """structuredembedding""" +701 89 loss """bceaftersigmoid""" +701 89 regularizer """no""" +701 89 optimizer """adam""" +701 89 training_loop """lcwa""" +701 89 evaluator """rankbased""" +701 90 dataset """kinships""" +701 90 model """structuredembedding""" +701 90 loss """bceaftersigmoid""" +701 90 regularizer """no""" +701 90 optimizer """adam""" +701 90 training_loop """lcwa""" +701 90 evaluator """rankbased""" +701 91 dataset """kinships""" +701 91 model """structuredembedding""" +701 91 loss """bceaftersigmoid""" +701 91 regularizer """no""" +701 91 optimizer """adam""" +701 91 training_loop """lcwa""" +701 91 evaluator """rankbased""" +701 92 dataset """kinships""" +701 92 model """structuredembedding""" +701 92 loss """bceaftersigmoid""" +701 92 regularizer """no""" +701 92 optimizer """adam""" +701 92 training_loop """lcwa""" +701 92 evaluator """rankbased""" +701 93 dataset """kinships""" +701 93 model """structuredembedding""" +701 93 loss """bceaftersigmoid""" +701 93 regularizer """no""" +701 93 optimizer """adam""" +701 93 training_loop """lcwa""" +701 93 evaluator """rankbased""" +701 94 dataset """kinships""" +701 94 model """structuredembedding""" +701 94 loss """bceaftersigmoid""" +701 94 regularizer """no""" +701 94 optimizer """adam""" +701 94 training_loop """lcwa""" +701 94 evaluator """rankbased""" +701 95 dataset """kinships""" +701 95 model """structuredembedding""" +701 95 loss """bceaftersigmoid""" +701 95 regularizer """no""" +701 95 optimizer """adam""" +701 95 training_loop """lcwa""" +701 95 evaluator """rankbased""" +701 96 dataset """kinships""" +701 96 model """structuredembedding""" +701 96 loss """bceaftersigmoid""" +701 96 regularizer """no""" +701 96 optimizer """adam""" +701 96 training_loop """lcwa""" +701 96 evaluator """rankbased""" +701 97 dataset """kinships""" +701 97 model """structuredembedding""" +701 97 loss """bceaftersigmoid""" +701 97 regularizer """no""" +701 97 optimizer """adam""" +701 97 training_loop """lcwa""" +701 97 evaluator """rankbased""" +701 98 dataset """kinships""" +701 98 model """structuredembedding""" +701 98 loss """bceaftersigmoid""" +701 98 regularizer """no""" +701 98 optimizer """adam""" +701 98 training_loop """lcwa""" +701 98 evaluator """rankbased""" +701 99 dataset """kinships""" +701 99 model """structuredembedding""" +701 99 loss """bceaftersigmoid""" +701 99 regularizer """no""" +701 99 optimizer """adam""" +701 99 training_loop """lcwa""" +701 99 evaluator """rankbased""" +701 100 dataset """kinships""" +701 100 model """structuredembedding""" +701 100 loss """bceaftersigmoid""" +701 100 regularizer """no""" +701 100 optimizer """adam""" +701 100 training_loop """lcwa""" +701 100 evaluator """rankbased""" +702 1 model.embedding_dim 1.0 +702 1 model.scoring_fct_norm 2.0 +702 1 optimizer.lr 0.04230132644111075 +702 1 training.batch_size 0.0 +702 1 training.label_smoothing 0.03134635562061414 +702 2 model.embedding_dim 0.0 +702 2 model.scoring_fct_norm 2.0 +702 2 optimizer.lr 0.0013017189610417452 +702 2 training.batch_size 1.0 +702 2 training.label_smoothing 0.06809306784327382 +702 3 model.embedding_dim 0.0 +702 3 model.scoring_fct_norm 2.0 +702 3 optimizer.lr 0.04851700660004915 +702 3 training.batch_size 0.0 +702 3 training.label_smoothing 0.004362422231056608 +702 4 model.embedding_dim 1.0 +702 4 model.scoring_fct_norm 1.0 +702 4 optimizer.lr 0.0036497697341642595 +702 4 training.batch_size 0.0 +702 4 training.label_smoothing 0.8547652412576007 +702 5 model.embedding_dim 2.0 +702 5 model.scoring_fct_norm 1.0 +702 5 optimizer.lr 0.003167719319235841 +702 5 training.batch_size 0.0 +702 5 training.label_smoothing 0.0036203941306879096 +702 6 model.embedding_dim 2.0 +702 6 model.scoring_fct_norm 2.0 +702 6 optimizer.lr 0.05970095766435257 +702 6 training.batch_size 1.0 +702 6 training.label_smoothing 0.060593084966120606 +702 7 model.embedding_dim 1.0 +702 7 model.scoring_fct_norm 2.0 +702 7 optimizer.lr 0.0032343813230362208 +702 7 training.batch_size 0.0 +702 7 training.label_smoothing 0.006401473376289501 +702 8 model.embedding_dim 1.0 +702 8 model.scoring_fct_norm 2.0 +702 8 optimizer.lr 0.010185892793194228 +702 8 training.batch_size 1.0 +702 8 training.label_smoothing 0.25026055323146196 +702 9 model.embedding_dim 0.0 +702 9 model.scoring_fct_norm 1.0 +702 9 optimizer.lr 0.05599547583937872 +702 9 training.batch_size 2.0 +702 9 training.label_smoothing 0.8621396323942354 +702 10 model.embedding_dim 0.0 +702 10 model.scoring_fct_norm 1.0 +702 10 optimizer.lr 0.03168281758008363 +702 10 training.batch_size 0.0 +702 10 training.label_smoothing 0.0012255157528760963 +702 11 model.embedding_dim 1.0 +702 11 model.scoring_fct_norm 1.0 +702 11 optimizer.lr 0.0024718237910539695 +702 11 training.batch_size 0.0 +702 11 training.label_smoothing 0.026082929565729496 +702 12 model.embedding_dim 2.0 +702 12 model.scoring_fct_norm 1.0 +702 12 optimizer.lr 0.07214560194748063 +702 12 training.batch_size 1.0 +702 12 training.label_smoothing 0.6112984006772467 +702 13 model.embedding_dim 0.0 +702 13 model.scoring_fct_norm 1.0 +702 13 optimizer.lr 0.007442099585806709 +702 13 training.batch_size 0.0 +702 13 training.label_smoothing 0.1754846999699293 +702 14 model.embedding_dim 0.0 +702 14 model.scoring_fct_norm 2.0 +702 14 optimizer.lr 0.02090049444237481 +702 14 training.batch_size 2.0 +702 14 training.label_smoothing 0.03849487472060146 +702 15 model.embedding_dim 1.0 +702 15 model.scoring_fct_norm 1.0 +702 15 optimizer.lr 0.006456192174248306 +702 15 training.batch_size 0.0 +702 15 training.label_smoothing 0.015920813007046328 +702 16 model.embedding_dim 0.0 +702 16 model.scoring_fct_norm 2.0 +702 16 optimizer.lr 0.06526085737105577 +702 16 training.batch_size 2.0 +702 16 training.label_smoothing 0.01820327345227213 +702 17 model.embedding_dim 0.0 +702 17 model.scoring_fct_norm 1.0 +702 17 optimizer.lr 0.0013961047295078413 +702 17 training.batch_size 2.0 +702 17 training.label_smoothing 0.0014296383729399847 +702 18 model.embedding_dim 2.0 +702 18 model.scoring_fct_norm 1.0 +702 18 optimizer.lr 0.03495038070436008 +702 18 training.batch_size 2.0 +702 18 training.label_smoothing 0.011825727859441719 +702 19 model.embedding_dim 2.0 +702 19 model.scoring_fct_norm 2.0 +702 19 optimizer.lr 0.030355752020784107 +702 19 training.batch_size 1.0 +702 19 training.label_smoothing 0.020734708650047905 +702 20 model.embedding_dim 2.0 +702 20 model.scoring_fct_norm 1.0 +702 20 optimizer.lr 0.0763936843517534 +702 20 training.batch_size 2.0 +702 20 training.label_smoothing 0.011482657281368974 +702 21 model.embedding_dim 0.0 +702 21 model.scoring_fct_norm 2.0 +702 21 optimizer.lr 0.004598604762799058 +702 21 training.batch_size 1.0 +702 21 training.label_smoothing 0.6647953682189924 +702 22 model.embedding_dim 1.0 +702 22 model.scoring_fct_norm 1.0 +702 22 optimizer.lr 0.035957537098962454 +702 22 training.batch_size 0.0 +702 22 training.label_smoothing 0.07768142142610653 +702 23 model.embedding_dim 2.0 +702 23 model.scoring_fct_norm 1.0 +702 23 optimizer.lr 0.06684032522853704 +702 23 training.batch_size 0.0 +702 23 training.label_smoothing 0.3968877607912471 +702 24 model.embedding_dim 1.0 +702 24 model.scoring_fct_norm 1.0 +702 24 optimizer.lr 0.037010821360664185 +702 24 training.batch_size 0.0 +702 24 training.label_smoothing 0.009271705956225867 +702 25 model.embedding_dim 2.0 +702 25 model.scoring_fct_norm 2.0 +702 25 optimizer.lr 0.056022191022015784 +702 25 training.batch_size 0.0 +702 25 training.label_smoothing 0.9359902039292494 +702 26 model.embedding_dim 2.0 +702 26 model.scoring_fct_norm 1.0 +702 26 optimizer.lr 0.01762848617119783 +702 26 training.batch_size 1.0 +702 26 training.label_smoothing 0.019898095888731787 +702 27 model.embedding_dim 2.0 +702 27 model.scoring_fct_norm 1.0 +702 27 optimizer.lr 0.0063217367761569596 +702 27 training.batch_size 1.0 +702 27 training.label_smoothing 0.0031594250621510044 +702 28 model.embedding_dim 1.0 +702 28 model.scoring_fct_norm 2.0 +702 28 optimizer.lr 0.026972214555359423 +702 28 training.batch_size 0.0 +702 28 training.label_smoothing 0.0541015716082496 +702 29 model.embedding_dim 2.0 +702 29 model.scoring_fct_norm 2.0 +702 29 optimizer.lr 0.00922569964769782 +702 29 training.batch_size 2.0 +702 29 training.label_smoothing 0.49815083559122547 +702 30 model.embedding_dim 0.0 +702 30 model.scoring_fct_norm 1.0 +702 30 optimizer.lr 0.03929846488609892 +702 30 training.batch_size 1.0 +702 30 training.label_smoothing 0.1494757596186209 +702 31 model.embedding_dim 2.0 +702 31 model.scoring_fct_norm 1.0 +702 31 optimizer.lr 0.003610086414619025 +702 31 training.batch_size 0.0 +702 31 training.label_smoothing 0.0013242053009056942 +702 32 model.embedding_dim 1.0 +702 32 model.scoring_fct_norm 1.0 +702 32 optimizer.lr 0.0010483051550309566 +702 32 training.batch_size 2.0 +702 32 training.label_smoothing 0.14566032280241248 +702 33 model.embedding_dim 1.0 +702 33 model.scoring_fct_norm 1.0 +702 33 optimizer.lr 0.002189715853264732 +702 33 training.batch_size 1.0 +702 33 training.label_smoothing 0.06503453829943458 +702 34 model.embedding_dim 1.0 +702 34 model.scoring_fct_norm 1.0 +702 34 optimizer.lr 0.005621332321870461 +702 34 training.batch_size 1.0 +702 34 training.label_smoothing 0.022694301744173487 +702 35 model.embedding_dim 2.0 +702 35 model.scoring_fct_norm 1.0 +702 35 optimizer.lr 0.0011112958146717099 +702 35 training.batch_size 2.0 +702 35 training.label_smoothing 0.0011868080941476826 +702 36 model.embedding_dim 0.0 +702 36 model.scoring_fct_norm 1.0 +702 36 optimizer.lr 0.03556820439046161 +702 36 training.batch_size 1.0 +702 36 training.label_smoothing 0.0010662874143480153 +702 37 model.embedding_dim 0.0 +702 37 model.scoring_fct_norm 2.0 +702 37 optimizer.lr 0.03442375802461401 +702 37 training.batch_size 0.0 +702 37 training.label_smoothing 0.05416111134493829 +702 38 model.embedding_dim 2.0 +702 38 model.scoring_fct_norm 1.0 +702 38 optimizer.lr 0.00858070689436676 +702 38 training.batch_size 0.0 +702 38 training.label_smoothing 0.004658108650943313 +702 39 model.embedding_dim 2.0 +702 39 model.scoring_fct_norm 1.0 +702 39 optimizer.lr 0.027880982267273754 +702 39 training.batch_size 2.0 +702 39 training.label_smoothing 0.04531071386396924 +702 40 model.embedding_dim 2.0 +702 40 model.scoring_fct_norm 1.0 +702 40 optimizer.lr 0.02210812400339199 +702 40 training.batch_size 2.0 +702 40 training.label_smoothing 0.023882002437572408 +702 41 model.embedding_dim 2.0 +702 41 model.scoring_fct_norm 1.0 +702 41 optimizer.lr 0.0028388995568177783 +702 41 training.batch_size 2.0 +702 41 training.label_smoothing 0.04611592297535855 +702 42 model.embedding_dim 0.0 +702 42 model.scoring_fct_norm 1.0 +702 42 optimizer.lr 0.0025252448440951283 +702 42 training.batch_size 0.0 +702 42 training.label_smoothing 0.09793333373084812 +702 43 model.embedding_dim 0.0 +702 43 model.scoring_fct_norm 1.0 +702 43 optimizer.lr 0.09614478857116 +702 43 training.batch_size 1.0 +702 43 training.label_smoothing 0.003522445226080211 +702 44 model.embedding_dim 1.0 +702 44 model.scoring_fct_norm 1.0 +702 44 optimizer.lr 0.006254449027480186 +702 44 training.batch_size 2.0 +702 44 training.label_smoothing 0.07539759000537409 +702 45 model.embedding_dim 1.0 +702 45 model.scoring_fct_norm 1.0 +702 45 optimizer.lr 0.008940465840571796 +702 45 training.batch_size 2.0 +702 45 training.label_smoothing 0.0021827447676126275 +702 46 model.embedding_dim 1.0 +702 46 model.scoring_fct_norm 1.0 +702 46 optimizer.lr 0.05462612247472793 +702 46 training.batch_size 1.0 +702 46 training.label_smoothing 0.009425061427222695 +702 47 model.embedding_dim 1.0 +702 47 model.scoring_fct_norm 2.0 +702 47 optimizer.lr 0.019190638405902294 +702 47 training.batch_size 0.0 +702 47 training.label_smoothing 0.679987974418684 +702 48 model.embedding_dim 0.0 +702 48 model.scoring_fct_norm 1.0 +702 48 optimizer.lr 0.0055262460060895744 +702 48 training.batch_size 2.0 +702 48 training.label_smoothing 0.02725344015325173 +702 49 model.embedding_dim 2.0 +702 49 model.scoring_fct_norm 1.0 +702 49 optimizer.lr 0.0866483216387701 +702 49 training.batch_size 0.0 +702 49 training.label_smoothing 0.0675228123815354 +702 50 model.embedding_dim 0.0 +702 50 model.scoring_fct_norm 2.0 +702 50 optimizer.lr 0.0026224979807137274 +702 50 training.batch_size 1.0 +702 50 training.label_smoothing 0.2925124251642311 +702 51 model.embedding_dim 1.0 +702 51 model.scoring_fct_norm 1.0 +702 51 optimizer.lr 0.009665020370198245 +702 51 training.batch_size 2.0 +702 51 training.label_smoothing 0.18377520501118674 +702 52 model.embedding_dim 2.0 +702 52 model.scoring_fct_norm 1.0 +702 52 optimizer.lr 0.0059141531979943675 +702 52 training.batch_size 0.0 +702 52 training.label_smoothing 0.30908404161159325 +702 53 model.embedding_dim 0.0 +702 53 model.scoring_fct_norm 2.0 +702 53 optimizer.lr 0.010343940321410066 +702 53 training.batch_size 1.0 +702 53 training.label_smoothing 0.008430982970631447 +702 54 model.embedding_dim 1.0 +702 54 model.scoring_fct_norm 1.0 +702 54 optimizer.lr 0.0070869232710364894 +702 54 training.batch_size 1.0 +702 54 training.label_smoothing 0.31312239861646585 +702 55 model.embedding_dim 2.0 +702 55 model.scoring_fct_norm 1.0 +702 55 optimizer.lr 0.06640798418851587 +702 55 training.batch_size 0.0 +702 55 training.label_smoothing 0.856774720179756 +702 56 model.embedding_dim 1.0 +702 56 model.scoring_fct_norm 2.0 +702 56 optimizer.lr 0.0023355431200660095 +702 56 training.batch_size 2.0 +702 56 training.label_smoothing 0.001485689070450439 +702 57 model.embedding_dim 1.0 +702 57 model.scoring_fct_norm 1.0 +702 57 optimizer.lr 0.0015369454299057367 +702 57 training.batch_size 1.0 +702 57 training.label_smoothing 0.023905725213633884 +702 58 model.embedding_dim 1.0 +702 58 model.scoring_fct_norm 1.0 +702 58 optimizer.lr 0.012416076049419564 +702 58 training.batch_size 0.0 +702 58 training.label_smoothing 0.002122268146176659 +702 59 model.embedding_dim 0.0 +702 59 model.scoring_fct_norm 2.0 +702 59 optimizer.lr 0.0010586617544352656 +702 59 training.batch_size 1.0 +702 59 training.label_smoothing 0.014148301901844303 +702 60 model.embedding_dim 2.0 +702 60 model.scoring_fct_norm 1.0 +702 60 optimizer.lr 0.016096878612992298 +702 60 training.batch_size 1.0 +702 60 training.label_smoothing 0.060081426036187456 +702 61 model.embedding_dim 1.0 +702 61 model.scoring_fct_norm 1.0 +702 61 optimizer.lr 0.0016916612068010265 +702 61 training.batch_size 1.0 +702 61 training.label_smoothing 0.08843317329198591 +702 62 model.embedding_dim 1.0 +702 62 model.scoring_fct_norm 2.0 +702 62 optimizer.lr 0.052141715498851444 +702 62 training.batch_size 2.0 +702 62 training.label_smoothing 0.004805256920008643 +702 63 model.embedding_dim 0.0 +702 63 model.scoring_fct_norm 1.0 +702 63 optimizer.lr 0.005325523368143336 +702 63 training.batch_size 2.0 +702 63 training.label_smoothing 0.04078790950214599 +702 64 model.embedding_dim 1.0 +702 64 model.scoring_fct_norm 1.0 +702 64 optimizer.lr 0.0021901197153749484 +702 64 training.batch_size 1.0 +702 64 training.label_smoothing 0.0034288995177047315 +702 65 model.embedding_dim 2.0 +702 65 model.scoring_fct_norm 2.0 +702 65 optimizer.lr 0.03469224167847384 +702 65 training.batch_size 1.0 +702 65 training.label_smoothing 0.0018440057913935068 +702 66 model.embedding_dim 0.0 +702 66 model.scoring_fct_norm 1.0 +702 66 optimizer.lr 0.021749547829318108 +702 66 training.batch_size 0.0 +702 66 training.label_smoothing 0.6101505163349606 +702 67 model.embedding_dim 2.0 +702 67 model.scoring_fct_norm 2.0 +702 67 optimizer.lr 0.026254066979726376 +702 67 training.batch_size 1.0 +702 67 training.label_smoothing 0.14826668382416688 +702 68 model.embedding_dim 0.0 +702 68 model.scoring_fct_norm 1.0 +702 68 optimizer.lr 0.00353542097880469 +702 68 training.batch_size 0.0 +702 68 training.label_smoothing 0.18674286147958258 +702 69 model.embedding_dim 2.0 +702 69 model.scoring_fct_norm 2.0 +702 69 optimizer.lr 0.0016505726404134812 +702 69 training.batch_size 0.0 +702 69 training.label_smoothing 0.0033573531823034523 +702 70 model.embedding_dim 0.0 +702 70 model.scoring_fct_norm 2.0 +702 70 optimizer.lr 0.04445487104034557 +702 70 training.batch_size 1.0 +702 70 training.label_smoothing 0.014982499121077974 +702 71 model.embedding_dim 0.0 +702 71 model.scoring_fct_norm 2.0 +702 71 optimizer.lr 0.027789612404394856 +702 71 training.batch_size 2.0 +702 71 training.label_smoothing 0.0061973768619153265 +702 72 model.embedding_dim 2.0 +702 72 model.scoring_fct_norm 2.0 +702 72 optimizer.lr 0.01664770157793216 +702 72 training.batch_size 2.0 +702 72 training.label_smoothing 0.004265584592652623 +702 73 model.embedding_dim 2.0 +702 73 model.scoring_fct_norm 2.0 +702 73 optimizer.lr 0.0430227827275656 +702 73 training.batch_size 2.0 +702 73 training.label_smoothing 0.004508225319685628 +702 74 model.embedding_dim 1.0 +702 74 model.scoring_fct_norm 2.0 +702 74 optimizer.lr 0.002965995726226646 +702 74 training.batch_size 2.0 +702 74 training.label_smoothing 0.15410226179701506 +702 75 model.embedding_dim 1.0 +702 75 model.scoring_fct_norm 1.0 +702 75 optimizer.lr 0.008674545661005693 +702 75 training.batch_size 1.0 +702 75 training.label_smoothing 0.6664627460832722 +702 76 model.embedding_dim 0.0 +702 76 model.scoring_fct_norm 2.0 +702 76 optimizer.lr 0.00196928203279883 +702 76 training.batch_size 0.0 +702 76 training.label_smoothing 0.03437621911102946 +702 77 model.embedding_dim 2.0 +702 77 model.scoring_fct_norm 1.0 +702 77 optimizer.lr 0.009343738893209639 +702 77 training.batch_size 0.0 +702 77 training.label_smoothing 0.0016636013329225142 +702 78 model.embedding_dim 2.0 +702 78 model.scoring_fct_norm 2.0 +702 78 optimizer.lr 0.0395533394874423 +702 78 training.batch_size 1.0 +702 78 training.label_smoothing 0.02607650573551681 +702 79 model.embedding_dim 1.0 +702 79 model.scoring_fct_norm 2.0 +702 79 optimizer.lr 0.0017899037829312147 +702 79 training.batch_size 2.0 +702 79 training.label_smoothing 0.005256495499921916 +702 80 model.embedding_dim 2.0 +702 80 model.scoring_fct_norm 2.0 +702 80 optimizer.lr 0.019426388624253804 +702 80 training.batch_size 2.0 +702 80 training.label_smoothing 0.17016330552539896 +702 81 model.embedding_dim 1.0 +702 81 model.scoring_fct_norm 1.0 +702 81 optimizer.lr 0.001263524560937449 +702 81 training.batch_size 1.0 +702 81 training.label_smoothing 0.00607829402429882 +702 82 model.embedding_dim 2.0 +702 82 model.scoring_fct_norm 2.0 +702 82 optimizer.lr 0.002093875633169087 +702 82 training.batch_size 1.0 +702 82 training.label_smoothing 0.9452563360485422 +702 83 model.embedding_dim 0.0 +702 83 model.scoring_fct_norm 1.0 +702 83 optimizer.lr 0.028987873688438002 +702 83 training.batch_size 2.0 +702 83 training.label_smoothing 0.09750108611350038 +702 84 model.embedding_dim 0.0 +702 84 model.scoring_fct_norm 2.0 +702 84 optimizer.lr 0.0017473374808070642 +702 84 training.batch_size 0.0 +702 84 training.label_smoothing 0.49003584815194734 +702 85 model.embedding_dim 0.0 +702 85 model.scoring_fct_norm 2.0 +702 85 optimizer.lr 0.0034356017960027758 +702 85 training.batch_size 1.0 +702 85 training.label_smoothing 0.2234263837174537 +702 86 model.embedding_dim 2.0 +702 86 model.scoring_fct_norm 1.0 +702 86 optimizer.lr 0.05025928116544508 +702 86 training.batch_size 0.0 +702 86 training.label_smoothing 0.014116222723921362 +702 87 model.embedding_dim 1.0 +702 87 model.scoring_fct_norm 1.0 +702 87 optimizer.lr 0.0015285480787370906 +702 87 training.batch_size 0.0 +702 87 training.label_smoothing 0.7672303149323741 +702 88 model.embedding_dim 1.0 +702 88 model.scoring_fct_norm 2.0 +702 88 optimizer.lr 0.001255446071576578 +702 88 training.batch_size 0.0 +702 88 training.label_smoothing 0.03186941891855022 +702 89 model.embedding_dim 2.0 +702 89 model.scoring_fct_norm 2.0 +702 89 optimizer.lr 0.00159620145967314 +702 89 training.batch_size 2.0 +702 89 training.label_smoothing 0.007249555805229218 +702 90 model.embedding_dim 0.0 +702 90 model.scoring_fct_norm 1.0 +702 90 optimizer.lr 0.08402278576785106 +702 90 training.batch_size 2.0 +702 90 training.label_smoothing 0.01125961068800316 +702 91 model.embedding_dim 2.0 +702 91 model.scoring_fct_norm 2.0 +702 91 optimizer.lr 0.010748110047519042 +702 91 training.batch_size 1.0 +702 91 training.label_smoothing 0.023783248585068657 +702 92 model.embedding_dim 2.0 +702 92 model.scoring_fct_norm 2.0 +702 92 optimizer.lr 0.026184057291860127 +702 92 training.batch_size 0.0 +702 92 training.label_smoothing 0.004997825067548418 +702 93 model.embedding_dim 0.0 +702 93 model.scoring_fct_norm 1.0 +702 93 optimizer.lr 0.09691638193001625 +702 93 training.batch_size 2.0 +702 93 training.label_smoothing 0.022554476301496516 +702 94 model.embedding_dim 0.0 +702 94 model.scoring_fct_norm 1.0 +702 94 optimizer.lr 0.03241915680255611 +702 94 training.batch_size 0.0 +702 94 training.label_smoothing 0.8228205740287279 +702 95 model.embedding_dim 1.0 +702 95 model.scoring_fct_norm 2.0 +702 95 optimizer.lr 0.001463756258518966 +702 95 training.batch_size 1.0 +702 95 training.label_smoothing 0.0040548819830374516 +702 96 model.embedding_dim 1.0 +702 96 model.scoring_fct_norm 1.0 +702 96 optimizer.lr 0.0013927087604329415 +702 96 training.batch_size 0.0 +702 96 training.label_smoothing 0.030501916382401833 +702 97 model.embedding_dim 2.0 +702 97 model.scoring_fct_norm 1.0 +702 97 optimizer.lr 0.004877188358031469 +702 97 training.batch_size 0.0 +702 97 training.label_smoothing 0.3288613990686976 +702 98 model.embedding_dim 1.0 +702 98 model.scoring_fct_norm 2.0 +702 98 optimizer.lr 0.012540767751950479 +702 98 training.batch_size 0.0 +702 98 training.label_smoothing 0.039161073957120006 +702 99 model.embedding_dim 2.0 +702 99 model.scoring_fct_norm 1.0 +702 99 optimizer.lr 0.07637476509402472 +702 99 training.batch_size 0.0 +702 99 training.label_smoothing 0.010395399975155456 +702 100 model.embedding_dim 1.0 +702 100 model.scoring_fct_norm 1.0 +702 100 optimizer.lr 0.0043931504880808095 +702 100 training.batch_size 0.0 +702 100 training.label_smoothing 0.5482678105856262 +702 1 dataset """kinships""" +702 1 model """structuredembedding""" +702 1 loss """softplus""" +702 1 regularizer """no""" +702 1 optimizer """adam""" +702 1 training_loop """lcwa""" +702 1 evaluator """rankbased""" +702 2 dataset """kinships""" +702 2 model """structuredembedding""" +702 2 loss """softplus""" +702 2 regularizer """no""" +702 2 optimizer """adam""" +702 2 training_loop """lcwa""" +702 2 evaluator """rankbased""" +702 3 dataset """kinships""" +702 3 model """structuredembedding""" +702 3 loss """softplus""" +702 3 regularizer """no""" +702 3 optimizer """adam""" +702 3 training_loop """lcwa""" +702 3 evaluator """rankbased""" +702 4 dataset """kinships""" +702 4 model """structuredembedding""" +702 4 loss """softplus""" +702 4 regularizer """no""" +702 4 optimizer """adam""" +702 4 training_loop """lcwa""" +702 4 evaluator """rankbased""" +702 5 dataset """kinships""" +702 5 model """structuredembedding""" +702 5 loss """softplus""" +702 5 regularizer """no""" +702 5 optimizer """adam""" +702 5 training_loop """lcwa""" +702 5 evaluator """rankbased""" +702 6 dataset """kinships""" +702 6 model """structuredembedding""" +702 6 loss """softplus""" +702 6 regularizer """no""" +702 6 optimizer """adam""" +702 6 training_loop """lcwa""" +702 6 evaluator """rankbased""" +702 7 dataset """kinships""" +702 7 model """structuredembedding""" +702 7 loss """softplus""" +702 7 regularizer """no""" +702 7 optimizer """adam""" +702 7 training_loop """lcwa""" +702 7 evaluator """rankbased""" +702 8 dataset """kinships""" +702 8 model """structuredembedding""" +702 8 loss """softplus""" +702 8 regularizer """no""" +702 8 optimizer """adam""" +702 8 training_loop """lcwa""" +702 8 evaluator """rankbased""" +702 9 dataset """kinships""" +702 9 model """structuredembedding""" +702 9 loss """softplus""" +702 9 regularizer """no""" +702 9 optimizer """adam""" +702 9 training_loop """lcwa""" +702 9 evaluator """rankbased""" +702 10 dataset """kinships""" +702 10 model """structuredembedding""" +702 10 loss """softplus""" +702 10 regularizer """no""" +702 10 optimizer """adam""" +702 10 training_loop """lcwa""" +702 10 evaluator """rankbased""" +702 11 dataset """kinships""" +702 11 model """structuredembedding""" +702 11 loss """softplus""" +702 11 regularizer """no""" +702 11 optimizer """adam""" +702 11 training_loop """lcwa""" +702 11 evaluator """rankbased""" +702 12 dataset """kinships""" +702 12 model """structuredembedding""" +702 12 loss """softplus""" +702 12 regularizer """no""" +702 12 optimizer """adam""" +702 12 training_loop """lcwa""" +702 12 evaluator """rankbased""" +702 13 dataset """kinships""" +702 13 model """structuredembedding""" +702 13 loss """softplus""" +702 13 regularizer """no""" +702 13 optimizer """adam""" +702 13 training_loop """lcwa""" +702 13 evaluator """rankbased""" +702 14 dataset """kinships""" +702 14 model """structuredembedding""" +702 14 loss """softplus""" +702 14 regularizer """no""" +702 14 optimizer """adam""" +702 14 training_loop """lcwa""" +702 14 evaluator """rankbased""" +702 15 dataset """kinships""" +702 15 model """structuredembedding""" +702 15 loss """softplus""" +702 15 regularizer """no""" +702 15 optimizer """adam""" +702 15 training_loop """lcwa""" +702 15 evaluator """rankbased""" +702 16 dataset """kinships""" +702 16 model """structuredembedding""" +702 16 loss """softplus""" +702 16 regularizer """no""" +702 16 optimizer """adam""" +702 16 training_loop """lcwa""" +702 16 evaluator """rankbased""" +702 17 dataset """kinships""" +702 17 model """structuredembedding""" +702 17 loss """softplus""" +702 17 regularizer """no""" +702 17 optimizer """adam""" +702 17 training_loop """lcwa""" +702 17 evaluator """rankbased""" +702 18 dataset """kinships""" +702 18 model """structuredembedding""" +702 18 loss """softplus""" +702 18 regularizer """no""" +702 18 optimizer """adam""" +702 18 training_loop """lcwa""" +702 18 evaluator """rankbased""" +702 19 dataset """kinships""" +702 19 model """structuredembedding""" +702 19 loss """softplus""" +702 19 regularizer """no""" +702 19 optimizer """adam""" +702 19 training_loop """lcwa""" +702 19 evaluator """rankbased""" +702 20 dataset """kinships""" +702 20 model """structuredembedding""" +702 20 loss """softplus""" +702 20 regularizer """no""" +702 20 optimizer """adam""" +702 20 training_loop """lcwa""" +702 20 evaluator """rankbased""" +702 21 dataset """kinships""" +702 21 model """structuredembedding""" +702 21 loss """softplus""" +702 21 regularizer """no""" +702 21 optimizer """adam""" +702 21 training_loop """lcwa""" +702 21 evaluator """rankbased""" +702 22 dataset """kinships""" +702 22 model """structuredembedding""" +702 22 loss """softplus""" +702 22 regularizer """no""" +702 22 optimizer """adam""" +702 22 training_loop """lcwa""" +702 22 evaluator """rankbased""" +702 23 dataset """kinships""" +702 23 model """structuredembedding""" +702 23 loss """softplus""" +702 23 regularizer """no""" +702 23 optimizer """adam""" +702 23 training_loop """lcwa""" +702 23 evaluator """rankbased""" +702 24 dataset """kinships""" +702 24 model """structuredembedding""" +702 24 loss """softplus""" +702 24 regularizer """no""" +702 24 optimizer """adam""" +702 24 training_loop """lcwa""" +702 24 evaluator """rankbased""" +702 25 dataset """kinships""" +702 25 model """structuredembedding""" +702 25 loss """softplus""" +702 25 regularizer """no""" +702 25 optimizer """adam""" +702 25 training_loop """lcwa""" +702 25 evaluator """rankbased""" +702 26 dataset """kinships""" +702 26 model """structuredembedding""" +702 26 loss """softplus""" +702 26 regularizer """no""" +702 26 optimizer """adam""" +702 26 training_loop """lcwa""" +702 26 evaluator """rankbased""" +702 27 dataset """kinships""" +702 27 model """structuredembedding""" +702 27 loss """softplus""" +702 27 regularizer """no""" +702 27 optimizer """adam""" +702 27 training_loop """lcwa""" +702 27 evaluator """rankbased""" +702 28 dataset """kinships""" +702 28 model """structuredembedding""" +702 28 loss """softplus""" +702 28 regularizer """no""" +702 28 optimizer """adam""" +702 28 training_loop """lcwa""" +702 28 evaluator """rankbased""" +702 29 dataset """kinships""" +702 29 model """structuredembedding""" +702 29 loss """softplus""" +702 29 regularizer """no""" +702 29 optimizer """adam""" +702 29 training_loop """lcwa""" +702 29 evaluator """rankbased""" +702 30 dataset """kinships""" +702 30 model """structuredembedding""" +702 30 loss """softplus""" +702 30 regularizer """no""" +702 30 optimizer """adam""" +702 30 training_loop """lcwa""" +702 30 evaluator """rankbased""" +702 31 dataset """kinships""" +702 31 model """structuredembedding""" +702 31 loss """softplus""" +702 31 regularizer """no""" +702 31 optimizer """adam""" +702 31 training_loop """lcwa""" +702 31 evaluator """rankbased""" +702 32 dataset """kinships""" +702 32 model """structuredembedding""" +702 32 loss """softplus""" +702 32 regularizer """no""" +702 32 optimizer """adam""" +702 32 training_loop """lcwa""" +702 32 evaluator """rankbased""" +702 33 dataset """kinships""" +702 33 model """structuredembedding""" +702 33 loss """softplus""" +702 33 regularizer """no""" +702 33 optimizer """adam""" +702 33 training_loop """lcwa""" +702 33 evaluator """rankbased""" +702 34 dataset """kinships""" +702 34 model """structuredembedding""" +702 34 loss """softplus""" +702 34 regularizer """no""" +702 34 optimizer """adam""" +702 34 training_loop """lcwa""" +702 34 evaluator """rankbased""" +702 35 dataset """kinships""" +702 35 model """structuredembedding""" +702 35 loss """softplus""" +702 35 regularizer """no""" +702 35 optimizer """adam""" +702 35 training_loop """lcwa""" +702 35 evaluator """rankbased""" +702 36 dataset """kinships""" +702 36 model """structuredembedding""" +702 36 loss """softplus""" +702 36 regularizer """no""" +702 36 optimizer """adam""" +702 36 training_loop """lcwa""" +702 36 evaluator """rankbased""" +702 37 dataset """kinships""" +702 37 model """structuredembedding""" +702 37 loss """softplus""" +702 37 regularizer """no""" +702 37 optimizer """adam""" +702 37 training_loop """lcwa""" +702 37 evaluator """rankbased""" +702 38 dataset """kinships""" +702 38 model """structuredembedding""" +702 38 loss """softplus""" +702 38 regularizer """no""" +702 38 optimizer """adam""" +702 38 training_loop """lcwa""" +702 38 evaluator """rankbased""" +702 39 dataset """kinships""" +702 39 model """structuredembedding""" +702 39 loss """softplus""" +702 39 regularizer """no""" +702 39 optimizer """adam""" +702 39 training_loop """lcwa""" +702 39 evaluator """rankbased""" +702 40 dataset """kinships""" +702 40 model """structuredembedding""" +702 40 loss """softplus""" +702 40 regularizer """no""" +702 40 optimizer """adam""" +702 40 training_loop """lcwa""" +702 40 evaluator """rankbased""" +702 41 dataset """kinships""" +702 41 model """structuredembedding""" +702 41 loss """softplus""" +702 41 regularizer """no""" +702 41 optimizer """adam""" +702 41 training_loop """lcwa""" +702 41 evaluator """rankbased""" +702 42 dataset """kinships""" +702 42 model """structuredembedding""" +702 42 loss """softplus""" +702 42 regularizer """no""" +702 42 optimizer """adam""" +702 42 training_loop """lcwa""" +702 42 evaluator """rankbased""" +702 43 dataset """kinships""" +702 43 model """structuredembedding""" +702 43 loss """softplus""" +702 43 regularizer """no""" +702 43 optimizer """adam""" +702 43 training_loop """lcwa""" +702 43 evaluator """rankbased""" +702 44 dataset """kinships""" +702 44 model """structuredembedding""" +702 44 loss """softplus""" +702 44 regularizer """no""" +702 44 optimizer """adam""" +702 44 training_loop """lcwa""" +702 44 evaluator """rankbased""" +702 45 dataset """kinships""" +702 45 model """structuredembedding""" +702 45 loss """softplus""" +702 45 regularizer """no""" +702 45 optimizer """adam""" +702 45 training_loop """lcwa""" +702 45 evaluator """rankbased""" +702 46 dataset """kinships""" +702 46 model """structuredembedding""" +702 46 loss """softplus""" +702 46 regularizer """no""" +702 46 optimizer """adam""" +702 46 training_loop """lcwa""" +702 46 evaluator """rankbased""" +702 47 dataset """kinships""" +702 47 model """structuredembedding""" +702 47 loss """softplus""" +702 47 regularizer """no""" +702 47 optimizer """adam""" +702 47 training_loop """lcwa""" +702 47 evaluator """rankbased""" +702 48 dataset """kinships""" +702 48 model """structuredembedding""" +702 48 loss """softplus""" +702 48 regularizer """no""" +702 48 optimizer """adam""" +702 48 training_loop """lcwa""" +702 48 evaluator """rankbased""" +702 49 dataset """kinships""" +702 49 model """structuredembedding""" +702 49 loss """softplus""" +702 49 regularizer """no""" +702 49 optimizer """adam""" +702 49 training_loop """lcwa""" +702 49 evaluator """rankbased""" +702 50 dataset """kinships""" +702 50 model """structuredembedding""" +702 50 loss """softplus""" +702 50 regularizer """no""" +702 50 optimizer """adam""" +702 50 training_loop """lcwa""" +702 50 evaluator """rankbased""" +702 51 dataset """kinships""" +702 51 model """structuredembedding""" +702 51 loss """softplus""" +702 51 regularizer """no""" +702 51 optimizer """adam""" +702 51 training_loop """lcwa""" +702 51 evaluator """rankbased""" +702 52 dataset """kinships""" +702 52 model """structuredembedding""" +702 52 loss """softplus""" +702 52 regularizer """no""" +702 52 optimizer """adam""" +702 52 training_loop """lcwa""" +702 52 evaluator """rankbased""" +702 53 dataset """kinships""" +702 53 model """structuredembedding""" +702 53 loss """softplus""" +702 53 regularizer """no""" +702 53 optimizer """adam""" +702 53 training_loop """lcwa""" +702 53 evaluator """rankbased""" +702 54 dataset """kinships""" +702 54 model """structuredembedding""" +702 54 loss """softplus""" +702 54 regularizer """no""" +702 54 optimizer """adam""" +702 54 training_loop """lcwa""" +702 54 evaluator """rankbased""" +702 55 dataset """kinships""" +702 55 model """structuredembedding""" +702 55 loss """softplus""" +702 55 regularizer """no""" +702 55 optimizer """adam""" +702 55 training_loop """lcwa""" +702 55 evaluator """rankbased""" +702 56 dataset """kinships""" +702 56 model """structuredembedding""" +702 56 loss """softplus""" +702 56 regularizer """no""" +702 56 optimizer """adam""" +702 56 training_loop """lcwa""" +702 56 evaluator """rankbased""" +702 57 dataset """kinships""" +702 57 model """structuredembedding""" +702 57 loss """softplus""" +702 57 regularizer """no""" +702 57 optimizer """adam""" +702 57 training_loop """lcwa""" +702 57 evaluator """rankbased""" +702 58 dataset """kinships""" +702 58 model """structuredembedding""" +702 58 loss """softplus""" +702 58 regularizer """no""" +702 58 optimizer """adam""" +702 58 training_loop """lcwa""" +702 58 evaluator """rankbased""" +702 59 dataset """kinships""" +702 59 model """structuredembedding""" +702 59 loss """softplus""" +702 59 regularizer """no""" +702 59 optimizer """adam""" +702 59 training_loop """lcwa""" +702 59 evaluator """rankbased""" +702 60 dataset """kinships""" +702 60 model """structuredembedding""" +702 60 loss """softplus""" +702 60 regularizer """no""" +702 60 optimizer """adam""" +702 60 training_loop """lcwa""" +702 60 evaluator """rankbased""" +702 61 dataset """kinships""" +702 61 model """structuredembedding""" +702 61 loss """softplus""" +702 61 regularizer """no""" +702 61 optimizer """adam""" +702 61 training_loop """lcwa""" +702 61 evaluator """rankbased""" +702 62 dataset """kinships""" +702 62 model """structuredembedding""" +702 62 loss """softplus""" +702 62 regularizer """no""" +702 62 optimizer """adam""" +702 62 training_loop """lcwa""" +702 62 evaluator """rankbased""" +702 63 dataset """kinships""" +702 63 model """structuredembedding""" +702 63 loss """softplus""" +702 63 regularizer """no""" +702 63 optimizer """adam""" +702 63 training_loop """lcwa""" +702 63 evaluator """rankbased""" +702 64 dataset """kinships""" +702 64 model """structuredembedding""" +702 64 loss """softplus""" +702 64 regularizer """no""" +702 64 optimizer """adam""" +702 64 training_loop """lcwa""" +702 64 evaluator """rankbased""" +702 65 dataset """kinships""" +702 65 model """structuredembedding""" +702 65 loss """softplus""" +702 65 regularizer """no""" +702 65 optimizer """adam""" +702 65 training_loop """lcwa""" +702 65 evaluator """rankbased""" +702 66 dataset """kinships""" +702 66 model """structuredembedding""" +702 66 loss """softplus""" +702 66 regularizer """no""" +702 66 optimizer """adam""" +702 66 training_loop """lcwa""" +702 66 evaluator """rankbased""" +702 67 dataset """kinships""" +702 67 model """structuredembedding""" +702 67 loss """softplus""" +702 67 regularizer """no""" +702 67 optimizer """adam""" +702 67 training_loop """lcwa""" +702 67 evaluator """rankbased""" +702 68 dataset """kinships""" +702 68 model """structuredembedding""" +702 68 loss """softplus""" +702 68 regularizer """no""" +702 68 optimizer """adam""" +702 68 training_loop """lcwa""" +702 68 evaluator """rankbased""" +702 69 dataset """kinships""" +702 69 model """structuredembedding""" +702 69 loss """softplus""" +702 69 regularizer """no""" +702 69 optimizer """adam""" +702 69 training_loop """lcwa""" +702 69 evaluator """rankbased""" +702 70 dataset """kinships""" +702 70 model """structuredembedding""" +702 70 loss """softplus""" +702 70 regularizer """no""" +702 70 optimizer """adam""" +702 70 training_loop """lcwa""" +702 70 evaluator """rankbased""" +702 71 dataset """kinships""" +702 71 model """structuredembedding""" +702 71 loss """softplus""" +702 71 regularizer """no""" +702 71 optimizer """adam""" +702 71 training_loop """lcwa""" +702 71 evaluator """rankbased""" +702 72 dataset """kinships""" +702 72 model """structuredembedding""" +702 72 loss """softplus""" +702 72 regularizer """no""" +702 72 optimizer """adam""" +702 72 training_loop """lcwa""" +702 72 evaluator """rankbased""" +702 73 dataset """kinships""" +702 73 model """structuredembedding""" +702 73 loss """softplus""" +702 73 regularizer """no""" +702 73 optimizer """adam""" +702 73 training_loop """lcwa""" +702 73 evaluator """rankbased""" +702 74 dataset """kinships""" +702 74 model """structuredembedding""" +702 74 loss """softplus""" +702 74 regularizer """no""" +702 74 optimizer """adam""" +702 74 training_loop """lcwa""" +702 74 evaluator """rankbased""" +702 75 dataset """kinships""" +702 75 model """structuredembedding""" +702 75 loss """softplus""" +702 75 regularizer """no""" +702 75 optimizer """adam""" +702 75 training_loop """lcwa""" +702 75 evaluator """rankbased""" +702 76 dataset """kinships""" +702 76 model """structuredembedding""" +702 76 loss """softplus""" +702 76 regularizer """no""" +702 76 optimizer """adam""" +702 76 training_loop """lcwa""" +702 76 evaluator """rankbased""" +702 77 dataset """kinships""" +702 77 model """structuredembedding""" +702 77 loss """softplus""" +702 77 regularizer """no""" +702 77 optimizer """adam""" +702 77 training_loop """lcwa""" +702 77 evaluator """rankbased""" +702 78 dataset """kinships""" +702 78 model """structuredembedding""" +702 78 loss """softplus""" +702 78 regularizer """no""" +702 78 optimizer """adam""" +702 78 training_loop """lcwa""" +702 78 evaluator """rankbased""" +702 79 dataset """kinships""" +702 79 model """structuredembedding""" +702 79 loss """softplus""" +702 79 regularizer """no""" +702 79 optimizer """adam""" +702 79 training_loop """lcwa""" +702 79 evaluator """rankbased""" +702 80 dataset """kinships""" +702 80 model """structuredembedding""" +702 80 loss """softplus""" +702 80 regularizer """no""" +702 80 optimizer """adam""" +702 80 training_loop """lcwa""" +702 80 evaluator """rankbased""" +702 81 dataset """kinships""" +702 81 model """structuredembedding""" +702 81 loss """softplus""" +702 81 regularizer """no""" +702 81 optimizer """adam""" +702 81 training_loop """lcwa""" +702 81 evaluator """rankbased""" +702 82 dataset """kinships""" +702 82 model """structuredembedding""" +702 82 loss """softplus""" +702 82 regularizer """no""" +702 82 optimizer """adam""" +702 82 training_loop """lcwa""" +702 82 evaluator """rankbased""" +702 83 dataset """kinships""" +702 83 model """structuredembedding""" +702 83 loss """softplus""" +702 83 regularizer """no""" +702 83 optimizer """adam""" +702 83 training_loop """lcwa""" +702 83 evaluator """rankbased""" +702 84 dataset """kinships""" +702 84 model """structuredembedding""" +702 84 loss """softplus""" +702 84 regularizer """no""" +702 84 optimizer """adam""" +702 84 training_loop """lcwa""" +702 84 evaluator """rankbased""" +702 85 dataset """kinships""" +702 85 model """structuredembedding""" +702 85 loss """softplus""" +702 85 regularizer """no""" +702 85 optimizer """adam""" +702 85 training_loop """lcwa""" +702 85 evaluator """rankbased""" +702 86 dataset """kinships""" +702 86 model """structuredembedding""" +702 86 loss """softplus""" +702 86 regularizer """no""" +702 86 optimizer """adam""" +702 86 training_loop """lcwa""" +702 86 evaluator """rankbased""" +702 87 dataset """kinships""" +702 87 model """structuredembedding""" +702 87 loss """softplus""" +702 87 regularizer """no""" +702 87 optimizer """adam""" +702 87 training_loop """lcwa""" +702 87 evaluator """rankbased""" +702 88 dataset """kinships""" +702 88 model """structuredembedding""" +702 88 loss """softplus""" +702 88 regularizer """no""" +702 88 optimizer """adam""" +702 88 training_loop """lcwa""" +702 88 evaluator """rankbased""" +702 89 dataset """kinships""" +702 89 model """structuredembedding""" +702 89 loss """softplus""" +702 89 regularizer """no""" +702 89 optimizer """adam""" +702 89 training_loop """lcwa""" +702 89 evaluator """rankbased""" +702 90 dataset """kinships""" +702 90 model """structuredembedding""" +702 90 loss """softplus""" +702 90 regularizer """no""" +702 90 optimizer """adam""" +702 90 training_loop """lcwa""" +702 90 evaluator """rankbased""" +702 91 dataset """kinships""" +702 91 model """structuredembedding""" +702 91 loss """softplus""" +702 91 regularizer """no""" +702 91 optimizer """adam""" +702 91 training_loop """lcwa""" +702 91 evaluator """rankbased""" +702 92 dataset """kinships""" +702 92 model """structuredembedding""" +702 92 loss """softplus""" +702 92 regularizer """no""" +702 92 optimizer """adam""" +702 92 training_loop """lcwa""" +702 92 evaluator """rankbased""" +702 93 dataset """kinships""" +702 93 model """structuredembedding""" +702 93 loss """softplus""" +702 93 regularizer """no""" +702 93 optimizer """adam""" +702 93 training_loop """lcwa""" +702 93 evaluator """rankbased""" +702 94 dataset """kinships""" +702 94 model """structuredembedding""" +702 94 loss """softplus""" +702 94 regularizer """no""" +702 94 optimizer """adam""" +702 94 training_loop """lcwa""" +702 94 evaluator """rankbased""" +702 95 dataset """kinships""" +702 95 model """structuredembedding""" +702 95 loss """softplus""" +702 95 regularizer """no""" +702 95 optimizer """adam""" +702 95 training_loop """lcwa""" +702 95 evaluator """rankbased""" +702 96 dataset """kinships""" +702 96 model """structuredembedding""" +702 96 loss """softplus""" +702 96 regularizer """no""" +702 96 optimizer """adam""" +702 96 training_loop """lcwa""" +702 96 evaluator """rankbased""" +702 97 dataset """kinships""" +702 97 model """structuredembedding""" +702 97 loss """softplus""" +702 97 regularizer """no""" +702 97 optimizer """adam""" +702 97 training_loop """lcwa""" +702 97 evaluator """rankbased""" +702 98 dataset """kinships""" +702 98 model """structuredembedding""" +702 98 loss """softplus""" +702 98 regularizer """no""" +702 98 optimizer """adam""" +702 98 training_loop """lcwa""" +702 98 evaluator """rankbased""" +702 99 dataset """kinships""" +702 99 model """structuredembedding""" +702 99 loss """softplus""" +702 99 regularizer """no""" +702 99 optimizer """adam""" +702 99 training_loop """lcwa""" +702 99 evaluator """rankbased""" +702 100 dataset """kinships""" +702 100 model """structuredembedding""" +702 100 loss """softplus""" +702 100 regularizer """no""" +702 100 optimizer """adam""" +702 100 training_loop """lcwa""" +702 100 evaluator """rankbased""" +703 1 model.embedding_dim 0.0 +703 1 model.scoring_fct_norm 1.0 +703 1 optimizer.lr 0.007480315561547319 +703 1 training.batch_size 0.0 +703 1 training.label_smoothing 0.005025862459453613 +703 2 model.embedding_dim 2.0 +703 2 model.scoring_fct_norm 2.0 +703 2 optimizer.lr 0.007469096334534122 +703 2 training.batch_size 1.0 +703 2 training.label_smoothing 0.00843188787402316 +703 3 model.embedding_dim 1.0 +703 3 model.scoring_fct_norm 2.0 +703 3 optimizer.lr 0.011993351818398394 +703 3 training.batch_size 2.0 +703 3 training.label_smoothing 0.08287871089135138 +703 4 model.embedding_dim 2.0 +703 4 model.scoring_fct_norm 1.0 +703 4 optimizer.lr 0.014626254560681964 +703 4 training.batch_size 2.0 +703 4 training.label_smoothing 0.6114497675133898 +703 5 model.embedding_dim 1.0 +703 5 model.scoring_fct_norm 1.0 +703 5 optimizer.lr 0.0015154335776456618 +703 5 training.batch_size 0.0 +703 5 training.label_smoothing 0.019220694376825866 +703 6 model.embedding_dim 0.0 +703 6 model.scoring_fct_norm 1.0 +703 6 optimizer.lr 0.005213830286369717 +703 6 training.batch_size 0.0 +703 6 training.label_smoothing 0.8332849771547818 +703 7 model.embedding_dim 0.0 +703 7 model.scoring_fct_norm 1.0 +703 7 optimizer.lr 0.016229143383120665 +703 7 training.batch_size 1.0 +703 7 training.label_smoothing 0.006799067175941686 +703 8 model.embedding_dim 1.0 +703 8 model.scoring_fct_norm 2.0 +703 8 optimizer.lr 0.0028682895867792866 +703 8 training.batch_size 0.0 +703 8 training.label_smoothing 0.025800814943841798 +703 9 model.embedding_dim 0.0 +703 9 model.scoring_fct_norm 2.0 +703 9 optimizer.lr 0.0021575009753477463 +703 9 training.batch_size 0.0 +703 9 training.label_smoothing 0.0020582433837775765 +703 10 model.embedding_dim 0.0 +703 10 model.scoring_fct_norm 2.0 +703 10 optimizer.lr 0.00454257885442763 +703 10 training.batch_size 1.0 +703 10 training.label_smoothing 0.00883440388940477 +703 11 model.embedding_dim 2.0 +703 11 model.scoring_fct_norm 2.0 +703 11 optimizer.lr 0.0035561065023687194 +703 11 training.batch_size 2.0 +703 11 training.label_smoothing 0.008897793014316934 +703 12 model.embedding_dim 0.0 +703 12 model.scoring_fct_norm 2.0 +703 12 optimizer.lr 0.021322849896110378 +703 12 training.batch_size 2.0 +703 12 training.label_smoothing 0.03540765915278387 +703 13 model.embedding_dim 2.0 +703 13 model.scoring_fct_norm 1.0 +703 13 optimizer.lr 0.005943301050327266 +703 13 training.batch_size 0.0 +703 13 training.label_smoothing 0.0732979939467077 +703 14 model.embedding_dim 2.0 +703 14 model.scoring_fct_norm 1.0 +703 14 optimizer.lr 0.0020106256634465766 +703 14 training.batch_size 0.0 +703 14 training.label_smoothing 0.04675281604836577 +703 15 model.embedding_dim 1.0 +703 15 model.scoring_fct_norm 1.0 +703 15 optimizer.lr 0.06389179067341383 +703 15 training.batch_size 0.0 +703 15 training.label_smoothing 0.005588319104044377 +703 16 model.embedding_dim 0.0 +703 16 model.scoring_fct_norm 1.0 +703 16 optimizer.lr 0.010006425865855213 +703 16 training.batch_size 1.0 +703 16 training.label_smoothing 0.6199171001943172 +703 17 model.embedding_dim 0.0 +703 17 model.scoring_fct_norm 2.0 +703 17 optimizer.lr 0.03125907099388415 +703 17 training.batch_size 2.0 +703 17 training.label_smoothing 0.38047842658285247 +703 18 model.embedding_dim 1.0 +703 18 model.scoring_fct_norm 1.0 +703 18 optimizer.lr 0.002842348520665804 +703 18 training.batch_size 2.0 +703 18 training.label_smoothing 0.9230231311475177 +703 19 model.embedding_dim 1.0 +703 19 model.scoring_fct_norm 2.0 +703 19 optimizer.lr 0.001302588918550508 +703 19 training.batch_size 0.0 +703 19 training.label_smoothing 0.49812251896420884 +703 20 model.embedding_dim 1.0 +703 20 model.scoring_fct_norm 2.0 +703 20 optimizer.lr 0.007114364680709932 +703 20 training.batch_size 1.0 +703 20 training.label_smoothing 0.15364273152909674 +703 21 model.embedding_dim 1.0 +703 21 model.scoring_fct_norm 1.0 +703 21 optimizer.lr 0.0023315001322759617 +703 21 training.batch_size 2.0 +703 21 training.label_smoothing 0.0010494710780994065 +703 22 model.embedding_dim 0.0 +703 22 model.scoring_fct_norm 2.0 +703 22 optimizer.lr 0.007465823965849302 +703 22 training.batch_size 2.0 +703 22 training.label_smoothing 0.0018064706890075293 +703 23 model.embedding_dim 2.0 +703 23 model.scoring_fct_norm 1.0 +703 23 optimizer.lr 0.0017004092303091146 +703 23 training.batch_size 2.0 +703 23 training.label_smoothing 0.45424625209682273 +703 24 model.embedding_dim 2.0 +703 24 model.scoring_fct_norm 1.0 +703 24 optimizer.lr 0.044869714906773196 +703 24 training.batch_size 1.0 +703 24 training.label_smoothing 0.09995222653673377 +703 25 model.embedding_dim 2.0 +703 25 model.scoring_fct_norm 1.0 +703 25 optimizer.lr 0.01987294663638479 +703 25 training.batch_size 0.0 +703 25 training.label_smoothing 0.0017216914169053959 +703 26 model.embedding_dim 2.0 +703 26 model.scoring_fct_norm 2.0 +703 26 optimizer.lr 0.0032932636351275858 +703 26 training.batch_size 1.0 +703 26 training.label_smoothing 0.038232711209127516 +703 27 model.embedding_dim 2.0 +703 27 model.scoring_fct_norm 1.0 +703 27 optimizer.lr 0.028942827735927514 +703 27 training.batch_size 2.0 +703 27 training.label_smoothing 0.011699603112347318 +703 28 model.embedding_dim 1.0 +703 28 model.scoring_fct_norm 2.0 +703 28 optimizer.lr 0.0011975384818882606 +703 28 training.batch_size 1.0 +703 28 training.label_smoothing 0.0030503778060798053 +703 29 model.embedding_dim 2.0 +703 29 model.scoring_fct_norm 2.0 +703 29 optimizer.lr 0.01167193194863512 +703 29 training.batch_size 1.0 +703 29 training.label_smoothing 0.0015556884686714091 +703 30 model.embedding_dim 1.0 +703 30 model.scoring_fct_norm 2.0 +703 30 optimizer.lr 0.08022457885372516 +703 30 training.batch_size 2.0 +703 30 training.label_smoothing 0.006320296365289005 +703 31 model.embedding_dim 1.0 +703 31 model.scoring_fct_norm 2.0 +703 31 optimizer.lr 0.05335573802383208 +703 31 training.batch_size 2.0 +703 31 training.label_smoothing 0.0912326666536324 +703 32 model.embedding_dim 0.0 +703 32 model.scoring_fct_norm 1.0 +703 32 optimizer.lr 0.02479574456008585 +703 32 training.batch_size 0.0 +703 32 training.label_smoothing 0.12717291514947063 +703 33 model.embedding_dim 1.0 +703 33 model.scoring_fct_norm 2.0 +703 33 optimizer.lr 0.02537703931600426 +703 33 training.batch_size 2.0 +703 33 training.label_smoothing 0.37160972996879066 +703 34 model.embedding_dim 1.0 +703 34 model.scoring_fct_norm 1.0 +703 34 optimizer.lr 0.0010223909233276652 +703 34 training.batch_size 1.0 +703 34 training.label_smoothing 0.30492448402498457 +703 35 model.embedding_dim 1.0 +703 35 model.scoring_fct_norm 1.0 +703 35 optimizer.lr 0.09276636358316624 +703 35 training.batch_size 0.0 +703 35 training.label_smoothing 0.010187869104349153 +703 36 model.embedding_dim 1.0 +703 36 model.scoring_fct_norm 1.0 +703 36 optimizer.lr 0.005020238672826412 +703 36 training.batch_size 1.0 +703 36 training.label_smoothing 0.7258818071799914 +703 37 model.embedding_dim 2.0 +703 37 model.scoring_fct_norm 2.0 +703 37 optimizer.lr 0.015394448047982932 +703 37 training.batch_size 2.0 +703 37 training.label_smoothing 0.0020760581692545028 +703 38 model.embedding_dim 2.0 +703 38 model.scoring_fct_norm 1.0 +703 38 optimizer.lr 0.0017189743897501099 +703 38 training.batch_size 1.0 +703 38 training.label_smoothing 0.013311531319981904 +703 39 model.embedding_dim 2.0 +703 39 model.scoring_fct_norm 2.0 +703 39 optimizer.lr 0.005293693269639158 +703 39 training.batch_size 2.0 +703 39 training.label_smoothing 0.5683949501514948 +703 40 model.embedding_dim 0.0 +703 40 model.scoring_fct_norm 2.0 +703 40 optimizer.lr 0.008065429701204607 +703 40 training.batch_size 2.0 +703 40 training.label_smoothing 0.5751252702334291 +703 41 model.embedding_dim 1.0 +703 41 model.scoring_fct_norm 1.0 +703 41 optimizer.lr 0.005887441584566219 +703 41 training.batch_size 1.0 +703 41 training.label_smoothing 0.0023109437423791232 +703 42 model.embedding_dim 1.0 +703 42 model.scoring_fct_norm 2.0 +703 42 optimizer.lr 0.0012742077746808 +703 42 training.batch_size 2.0 +703 42 training.label_smoothing 0.07026988673240601 +703 43 model.embedding_dim 0.0 +703 43 model.scoring_fct_norm 2.0 +703 43 optimizer.lr 0.002299353550098714 +703 43 training.batch_size 2.0 +703 43 training.label_smoothing 0.0010858595423957579 +703 44 model.embedding_dim 1.0 +703 44 model.scoring_fct_norm 1.0 +703 44 optimizer.lr 0.08998632515919908 +703 44 training.batch_size 2.0 +703 44 training.label_smoothing 0.0033864788531954835 +703 45 model.embedding_dim 2.0 +703 45 model.scoring_fct_norm 2.0 +703 45 optimizer.lr 0.017691877011466243 +703 45 training.batch_size 0.0 +703 45 training.label_smoothing 0.13096120568717998 +703 46 model.embedding_dim 2.0 +703 46 model.scoring_fct_norm 1.0 +703 46 optimizer.lr 0.08936529440762234 +703 46 training.batch_size 2.0 +703 46 training.label_smoothing 0.010409208858867527 +703 47 model.embedding_dim 2.0 +703 47 model.scoring_fct_norm 2.0 +703 47 optimizer.lr 0.00468447640439154 +703 47 training.batch_size 0.0 +703 47 training.label_smoothing 0.0168152850809063 +703 48 model.embedding_dim 1.0 +703 48 model.scoring_fct_norm 2.0 +703 48 optimizer.lr 0.003578695288456548 +703 48 training.batch_size 0.0 +703 48 training.label_smoothing 0.025672715604145233 +703 49 model.embedding_dim 2.0 +703 49 model.scoring_fct_norm 1.0 +703 49 optimizer.lr 0.001155433624449947 +703 49 training.batch_size 2.0 +703 49 training.label_smoothing 0.005906669258600272 +703 50 model.embedding_dim 1.0 +703 50 model.scoring_fct_norm 2.0 +703 50 optimizer.lr 0.005252596900742412 +703 50 training.batch_size 1.0 +703 50 training.label_smoothing 0.009056314315209098 +703 51 model.embedding_dim 2.0 +703 51 model.scoring_fct_norm 1.0 +703 51 optimizer.lr 0.015818705423667544 +703 51 training.batch_size 1.0 +703 51 training.label_smoothing 0.6767202125154878 +703 52 model.embedding_dim 2.0 +703 52 model.scoring_fct_norm 1.0 +703 52 optimizer.lr 0.012871084573102088 +703 52 training.batch_size 2.0 +703 52 training.label_smoothing 0.001102185885478492 +703 53 model.embedding_dim 0.0 +703 53 model.scoring_fct_norm 2.0 +703 53 optimizer.lr 0.012482073944236317 +703 53 training.batch_size 0.0 +703 53 training.label_smoothing 0.5209893811507542 +703 54 model.embedding_dim 0.0 +703 54 model.scoring_fct_norm 1.0 +703 54 optimizer.lr 0.0045526943060568 +703 54 training.batch_size 0.0 +703 54 training.label_smoothing 0.14229433051567061 +703 55 model.embedding_dim 2.0 +703 55 model.scoring_fct_norm 2.0 +703 55 optimizer.lr 0.0013188351972109034 +703 55 training.batch_size 0.0 +703 55 training.label_smoothing 0.0010157509889489695 +703 56 model.embedding_dim 1.0 +703 56 model.scoring_fct_norm 2.0 +703 56 optimizer.lr 0.03948061047794801 +703 56 training.batch_size 1.0 +703 56 training.label_smoothing 0.040999028653954356 +703 57 model.embedding_dim 0.0 +703 57 model.scoring_fct_norm 1.0 +703 57 optimizer.lr 0.028274441153455943 +703 57 training.batch_size 2.0 +703 57 training.label_smoothing 0.07267290065996342 +703 58 model.embedding_dim 2.0 +703 58 model.scoring_fct_norm 1.0 +703 58 optimizer.lr 0.0011097210905831737 +703 58 training.batch_size 1.0 +703 58 training.label_smoothing 0.035967545245190406 +703 59 model.embedding_dim 0.0 +703 59 model.scoring_fct_norm 1.0 +703 59 optimizer.lr 0.0030942463660537504 +703 59 training.batch_size 0.0 +703 59 training.label_smoothing 0.005002406304595726 +703 60 model.embedding_dim 0.0 +703 60 model.scoring_fct_norm 1.0 +703 60 optimizer.lr 0.0060785649280499275 +703 60 training.batch_size 0.0 +703 60 training.label_smoothing 0.002582964974478095 +703 61 model.embedding_dim 1.0 +703 61 model.scoring_fct_norm 2.0 +703 61 optimizer.lr 0.007026332208780355 +703 61 training.batch_size 0.0 +703 61 training.label_smoothing 0.010565229643040392 +703 62 model.embedding_dim 1.0 +703 62 model.scoring_fct_norm 2.0 +703 62 optimizer.lr 0.003115712462547015 +703 62 training.batch_size 2.0 +703 62 training.label_smoothing 0.0011511775227335051 +703 63 model.embedding_dim 2.0 +703 63 model.scoring_fct_norm 1.0 +703 63 optimizer.lr 0.012521880976665335 +703 63 training.batch_size 0.0 +703 63 training.label_smoothing 0.0018050126618179433 +703 64 model.embedding_dim 2.0 +703 64 model.scoring_fct_norm 2.0 +703 64 optimizer.lr 0.0044286077068859675 +703 64 training.batch_size 1.0 +703 64 training.label_smoothing 0.09032099782599225 +703 65 model.embedding_dim 1.0 +703 65 model.scoring_fct_norm 1.0 +703 65 optimizer.lr 0.019888137889939896 +703 65 training.batch_size 0.0 +703 65 training.label_smoothing 0.46977687328777756 +703 66 model.embedding_dim 0.0 +703 66 model.scoring_fct_norm 2.0 +703 66 optimizer.lr 0.0016997900586231396 +703 66 training.batch_size 2.0 +703 66 training.label_smoothing 0.001611272888643971 +703 67 model.embedding_dim 1.0 +703 67 model.scoring_fct_norm 2.0 +703 67 optimizer.lr 0.004426395340061295 +703 67 training.batch_size 1.0 +703 67 training.label_smoothing 0.025648539082741388 +703 68 model.embedding_dim 2.0 +703 68 model.scoring_fct_norm 2.0 +703 68 optimizer.lr 0.0017806943555681756 +703 68 training.batch_size 0.0 +703 68 training.label_smoothing 0.015804040630423476 +703 69 model.embedding_dim 2.0 +703 69 model.scoring_fct_norm 2.0 +703 69 optimizer.lr 0.0024384882088482576 +703 69 training.batch_size 1.0 +703 69 training.label_smoothing 0.031389975400193476 +703 70 model.embedding_dim 1.0 +703 70 model.scoring_fct_norm 1.0 +703 70 optimizer.lr 0.03511919648748176 +703 70 training.batch_size 2.0 +703 70 training.label_smoothing 0.0017367084342550885 +703 71 model.embedding_dim 0.0 +703 71 model.scoring_fct_norm 1.0 +703 71 optimizer.lr 0.0010852660734957276 +703 71 training.batch_size 2.0 +703 71 training.label_smoothing 0.011139118062234868 +703 72 model.embedding_dim 0.0 +703 72 model.scoring_fct_norm 1.0 +703 72 optimizer.lr 0.0016480940675205483 +703 72 training.batch_size 0.0 +703 72 training.label_smoothing 0.0018487584370455716 +703 73 model.embedding_dim 2.0 +703 73 model.scoring_fct_norm 1.0 +703 73 optimizer.lr 0.0030448464820334066 +703 73 training.batch_size 2.0 +703 73 training.label_smoothing 0.6486674104958439 +703 74 model.embedding_dim 0.0 +703 74 model.scoring_fct_norm 2.0 +703 74 optimizer.lr 0.008409905490454804 +703 74 training.batch_size 0.0 +703 74 training.label_smoothing 0.004306738328257731 +703 75 model.embedding_dim 2.0 +703 75 model.scoring_fct_norm 1.0 +703 75 optimizer.lr 0.0013001636401696864 +703 75 training.batch_size 2.0 +703 75 training.label_smoothing 0.2087185358961002 +703 76 model.embedding_dim 1.0 +703 76 model.scoring_fct_norm 1.0 +703 76 optimizer.lr 0.022844689449234207 +703 76 training.batch_size 1.0 +703 76 training.label_smoothing 0.004315958684916928 +703 77 model.embedding_dim 0.0 +703 77 model.scoring_fct_norm 1.0 +703 77 optimizer.lr 0.03466128390221113 +703 77 training.batch_size 2.0 +703 77 training.label_smoothing 0.39901862489838713 +703 78 model.embedding_dim 2.0 +703 78 model.scoring_fct_norm 1.0 +703 78 optimizer.lr 0.014804733285411735 +703 78 training.batch_size 2.0 +703 78 training.label_smoothing 0.11944838072582603 +703 79 model.embedding_dim 1.0 +703 79 model.scoring_fct_norm 2.0 +703 79 optimizer.lr 0.06405974546676241 +703 79 training.batch_size 0.0 +703 79 training.label_smoothing 0.005174063211282068 +703 80 model.embedding_dim 0.0 +703 80 model.scoring_fct_norm 1.0 +703 80 optimizer.lr 0.05699553374421712 +703 80 training.batch_size 1.0 +703 80 training.label_smoothing 0.01288947015751958 +703 81 model.embedding_dim 0.0 +703 81 model.scoring_fct_norm 1.0 +703 81 optimizer.lr 0.0017970010692952155 +703 81 training.batch_size 0.0 +703 81 training.label_smoothing 0.03326796643743258 +703 82 model.embedding_dim 0.0 +703 82 model.scoring_fct_norm 2.0 +703 82 optimizer.lr 0.0840367908212397 +703 82 training.batch_size 2.0 +703 82 training.label_smoothing 0.9778295117989967 +703 83 model.embedding_dim 0.0 +703 83 model.scoring_fct_norm 1.0 +703 83 optimizer.lr 0.013987364176315902 +703 83 training.batch_size 0.0 +703 83 training.label_smoothing 0.001232933600932815 +703 84 model.embedding_dim 0.0 +703 84 model.scoring_fct_norm 2.0 +703 84 optimizer.lr 0.002420717304890393 +703 84 training.batch_size 2.0 +703 84 training.label_smoothing 0.029966409427436117 +703 85 model.embedding_dim 0.0 +703 85 model.scoring_fct_norm 1.0 +703 85 optimizer.lr 0.01615832528714878 +703 85 training.batch_size 2.0 +703 85 training.label_smoothing 0.24975237608193104 +703 86 model.embedding_dim 1.0 +703 86 model.scoring_fct_norm 1.0 +703 86 optimizer.lr 0.003323157174369482 +703 86 training.batch_size 1.0 +703 86 training.label_smoothing 0.047076734785213084 +703 87 model.embedding_dim 0.0 +703 87 model.scoring_fct_norm 2.0 +703 87 optimizer.lr 0.08689125472267674 +703 87 training.batch_size 0.0 +703 87 training.label_smoothing 0.6293442501791159 +703 88 model.embedding_dim 2.0 +703 88 model.scoring_fct_norm 2.0 +703 88 optimizer.lr 0.020291186686898444 +703 88 training.batch_size 2.0 +703 88 training.label_smoothing 0.0010162555103294224 +703 89 model.embedding_dim 0.0 +703 89 model.scoring_fct_norm 1.0 +703 89 optimizer.lr 0.0015178734580307556 +703 89 training.batch_size 0.0 +703 89 training.label_smoothing 0.06662950671597472 +703 90 model.embedding_dim 0.0 +703 90 model.scoring_fct_norm 2.0 +703 90 optimizer.lr 0.015459168711972908 +703 90 training.batch_size 0.0 +703 90 training.label_smoothing 0.10105997932843536 +703 91 model.embedding_dim 0.0 +703 91 model.scoring_fct_norm 2.0 +703 91 optimizer.lr 0.0011868834791230088 +703 91 training.batch_size 2.0 +703 91 training.label_smoothing 0.42339356193612476 +703 92 model.embedding_dim 0.0 +703 92 model.scoring_fct_norm 1.0 +703 92 optimizer.lr 0.013695070773498925 +703 92 training.batch_size 2.0 +703 92 training.label_smoothing 0.014038287790761675 +703 93 model.embedding_dim 2.0 +703 93 model.scoring_fct_norm 2.0 +703 93 optimizer.lr 0.019358816900594353 +703 93 training.batch_size 1.0 +703 93 training.label_smoothing 0.006352769620855139 +703 94 model.embedding_dim 1.0 +703 94 model.scoring_fct_norm 2.0 +703 94 optimizer.lr 0.04876247730733236 +703 94 training.batch_size 1.0 +703 94 training.label_smoothing 0.005844627894236024 +703 95 model.embedding_dim 2.0 +703 95 model.scoring_fct_norm 1.0 +703 95 optimizer.lr 0.001539227252852125 +703 95 training.batch_size 0.0 +703 95 training.label_smoothing 0.0012316618890656139 +703 96 model.embedding_dim 0.0 +703 96 model.scoring_fct_norm 2.0 +703 96 optimizer.lr 0.07897085002277242 +703 96 training.batch_size 0.0 +703 96 training.label_smoothing 0.03058215338630684 +703 97 model.embedding_dim 0.0 +703 97 model.scoring_fct_norm 2.0 +703 97 optimizer.lr 0.0010833628310505698 +703 97 training.batch_size 1.0 +703 97 training.label_smoothing 0.001331433822532817 +703 98 model.embedding_dim 0.0 +703 98 model.scoring_fct_norm 2.0 +703 98 optimizer.lr 0.011218288351535986 +703 98 training.batch_size 0.0 +703 98 training.label_smoothing 0.012499172658805678 +703 99 model.embedding_dim 0.0 +703 99 model.scoring_fct_norm 2.0 +703 99 optimizer.lr 0.00664070107377522 +703 99 training.batch_size 1.0 +703 99 training.label_smoothing 0.003957760694759874 +703 100 model.embedding_dim 0.0 +703 100 model.scoring_fct_norm 2.0 +703 100 optimizer.lr 0.01186165167085213 +703 100 training.batch_size 0.0 +703 100 training.label_smoothing 0.00957825992247842 +703 1 dataset """kinships""" +703 1 model """structuredembedding""" +703 1 loss """crossentropy""" +703 1 regularizer """no""" +703 1 optimizer """adam""" +703 1 training_loop """lcwa""" +703 1 evaluator """rankbased""" +703 2 dataset """kinships""" +703 2 model """structuredembedding""" +703 2 loss """crossentropy""" +703 2 regularizer """no""" +703 2 optimizer """adam""" +703 2 training_loop """lcwa""" +703 2 evaluator """rankbased""" +703 3 dataset """kinships""" +703 3 model """structuredembedding""" +703 3 loss """crossentropy""" +703 3 regularizer """no""" +703 3 optimizer """adam""" +703 3 training_loop """lcwa""" +703 3 evaluator """rankbased""" +703 4 dataset """kinships""" +703 4 model """structuredembedding""" +703 4 loss """crossentropy""" +703 4 regularizer """no""" +703 4 optimizer """adam""" +703 4 training_loop """lcwa""" +703 4 evaluator """rankbased""" +703 5 dataset """kinships""" +703 5 model """structuredembedding""" +703 5 loss """crossentropy""" +703 5 regularizer """no""" +703 5 optimizer """adam""" +703 5 training_loop """lcwa""" +703 5 evaluator """rankbased""" +703 6 dataset """kinships""" +703 6 model """structuredembedding""" +703 6 loss """crossentropy""" +703 6 regularizer """no""" +703 6 optimizer """adam""" +703 6 training_loop """lcwa""" +703 6 evaluator """rankbased""" +703 7 dataset """kinships""" +703 7 model """structuredembedding""" +703 7 loss """crossentropy""" +703 7 regularizer """no""" +703 7 optimizer """adam""" +703 7 training_loop """lcwa""" +703 7 evaluator """rankbased""" +703 8 dataset """kinships""" +703 8 model """structuredembedding""" +703 8 loss """crossentropy""" +703 8 regularizer """no""" +703 8 optimizer """adam""" +703 8 training_loop """lcwa""" +703 8 evaluator """rankbased""" +703 9 dataset """kinships""" +703 9 model """structuredembedding""" +703 9 loss """crossentropy""" +703 9 regularizer """no""" +703 9 optimizer """adam""" +703 9 training_loop """lcwa""" +703 9 evaluator """rankbased""" +703 10 dataset """kinships""" +703 10 model """structuredembedding""" +703 10 loss """crossentropy""" +703 10 regularizer """no""" +703 10 optimizer """adam""" +703 10 training_loop """lcwa""" +703 10 evaluator """rankbased""" +703 11 dataset """kinships""" +703 11 model """structuredembedding""" +703 11 loss """crossentropy""" +703 11 regularizer """no""" +703 11 optimizer """adam""" +703 11 training_loop """lcwa""" +703 11 evaluator """rankbased""" +703 12 dataset """kinships""" +703 12 model """structuredembedding""" +703 12 loss """crossentropy""" +703 12 regularizer """no""" +703 12 optimizer """adam""" +703 12 training_loop """lcwa""" +703 12 evaluator """rankbased""" +703 13 dataset """kinships""" +703 13 model """structuredembedding""" +703 13 loss """crossentropy""" +703 13 regularizer """no""" +703 13 optimizer """adam""" +703 13 training_loop """lcwa""" +703 13 evaluator """rankbased""" +703 14 dataset """kinships""" +703 14 model """structuredembedding""" +703 14 loss """crossentropy""" +703 14 regularizer """no""" +703 14 optimizer """adam""" +703 14 training_loop """lcwa""" +703 14 evaluator """rankbased""" +703 15 dataset """kinships""" +703 15 model """structuredembedding""" +703 15 loss """crossentropy""" +703 15 regularizer """no""" +703 15 optimizer """adam""" +703 15 training_loop """lcwa""" +703 15 evaluator """rankbased""" +703 16 dataset """kinships""" +703 16 model """structuredembedding""" +703 16 loss """crossentropy""" +703 16 regularizer """no""" +703 16 optimizer """adam""" +703 16 training_loop """lcwa""" +703 16 evaluator """rankbased""" +703 17 dataset """kinships""" +703 17 model """structuredembedding""" +703 17 loss """crossentropy""" +703 17 regularizer """no""" +703 17 optimizer """adam""" +703 17 training_loop """lcwa""" +703 17 evaluator """rankbased""" +703 18 dataset """kinships""" +703 18 model """structuredembedding""" +703 18 loss """crossentropy""" +703 18 regularizer """no""" +703 18 optimizer """adam""" +703 18 training_loop """lcwa""" +703 18 evaluator """rankbased""" +703 19 dataset """kinships""" +703 19 model """structuredembedding""" +703 19 loss """crossentropy""" +703 19 regularizer """no""" +703 19 optimizer """adam""" +703 19 training_loop """lcwa""" +703 19 evaluator """rankbased""" +703 20 dataset """kinships""" +703 20 model """structuredembedding""" +703 20 loss """crossentropy""" +703 20 regularizer """no""" +703 20 optimizer """adam""" +703 20 training_loop """lcwa""" +703 20 evaluator """rankbased""" +703 21 dataset """kinships""" +703 21 model """structuredembedding""" +703 21 loss """crossentropy""" +703 21 regularizer """no""" +703 21 optimizer """adam""" +703 21 training_loop """lcwa""" +703 21 evaluator """rankbased""" +703 22 dataset """kinships""" +703 22 model """structuredembedding""" +703 22 loss """crossentropy""" +703 22 regularizer """no""" +703 22 optimizer """adam""" +703 22 training_loop """lcwa""" +703 22 evaluator """rankbased""" +703 23 dataset """kinships""" +703 23 model """structuredembedding""" +703 23 loss """crossentropy""" +703 23 regularizer """no""" +703 23 optimizer """adam""" +703 23 training_loop """lcwa""" +703 23 evaluator """rankbased""" +703 24 dataset """kinships""" +703 24 model """structuredembedding""" +703 24 loss """crossentropy""" +703 24 regularizer """no""" +703 24 optimizer """adam""" +703 24 training_loop """lcwa""" +703 24 evaluator """rankbased""" +703 25 dataset """kinships""" +703 25 model """structuredembedding""" +703 25 loss """crossentropy""" +703 25 regularizer """no""" +703 25 optimizer """adam""" +703 25 training_loop """lcwa""" +703 25 evaluator """rankbased""" +703 26 dataset """kinships""" +703 26 model """structuredembedding""" +703 26 loss """crossentropy""" +703 26 regularizer """no""" +703 26 optimizer """adam""" +703 26 training_loop """lcwa""" +703 26 evaluator """rankbased""" +703 27 dataset """kinships""" +703 27 model """structuredembedding""" +703 27 loss """crossentropy""" +703 27 regularizer """no""" +703 27 optimizer """adam""" +703 27 training_loop """lcwa""" +703 27 evaluator """rankbased""" +703 28 dataset """kinships""" +703 28 model """structuredembedding""" +703 28 loss """crossentropy""" +703 28 regularizer """no""" +703 28 optimizer """adam""" +703 28 training_loop """lcwa""" +703 28 evaluator """rankbased""" +703 29 dataset """kinships""" +703 29 model """structuredembedding""" +703 29 loss """crossentropy""" +703 29 regularizer """no""" +703 29 optimizer """adam""" +703 29 training_loop """lcwa""" +703 29 evaluator """rankbased""" +703 30 dataset """kinships""" +703 30 model """structuredembedding""" +703 30 loss """crossentropy""" +703 30 regularizer """no""" +703 30 optimizer """adam""" +703 30 training_loop """lcwa""" +703 30 evaluator """rankbased""" +703 31 dataset """kinships""" +703 31 model """structuredembedding""" +703 31 loss """crossentropy""" +703 31 regularizer """no""" +703 31 optimizer """adam""" +703 31 training_loop """lcwa""" +703 31 evaluator """rankbased""" +703 32 dataset """kinships""" +703 32 model """structuredembedding""" +703 32 loss """crossentropy""" +703 32 regularizer """no""" +703 32 optimizer """adam""" +703 32 training_loop """lcwa""" +703 32 evaluator """rankbased""" +703 33 dataset """kinships""" +703 33 model """structuredembedding""" +703 33 loss """crossentropy""" +703 33 regularizer """no""" +703 33 optimizer """adam""" +703 33 training_loop """lcwa""" +703 33 evaluator """rankbased""" +703 34 dataset """kinships""" +703 34 model """structuredembedding""" +703 34 loss """crossentropy""" +703 34 regularizer """no""" +703 34 optimizer """adam""" +703 34 training_loop """lcwa""" +703 34 evaluator """rankbased""" +703 35 dataset """kinships""" +703 35 model """structuredembedding""" +703 35 loss """crossentropy""" +703 35 regularizer """no""" +703 35 optimizer """adam""" +703 35 training_loop """lcwa""" +703 35 evaluator """rankbased""" +703 36 dataset """kinships""" +703 36 model """structuredembedding""" +703 36 loss """crossentropy""" +703 36 regularizer """no""" +703 36 optimizer """adam""" +703 36 training_loop """lcwa""" +703 36 evaluator """rankbased""" +703 37 dataset """kinships""" +703 37 model """structuredembedding""" +703 37 loss """crossentropy""" +703 37 regularizer """no""" +703 37 optimizer """adam""" +703 37 training_loop """lcwa""" +703 37 evaluator """rankbased""" +703 38 dataset """kinships""" +703 38 model """structuredembedding""" +703 38 loss """crossentropy""" +703 38 regularizer """no""" +703 38 optimizer """adam""" +703 38 training_loop """lcwa""" +703 38 evaluator """rankbased""" +703 39 dataset """kinships""" +703 39 model """structuredembedding""" +703 39 loss """crossentropy""" +703 39 regularizer """no""" +703 39 optimizer """adam""" +703 39 training_loop """lcwa""" +703 39 evaluator """rankbased""" +703 40 dataset """kinships""" +703 40 model """structuredembedding""" +703 40 loss """crossentropy""" +703 40 regularizer """no""" +703 40 optimizer """adam""" +703 40 training_loop """lcwa""" +703 40 evaluator """rankbased""" +703 41 dataset """kinships""" +703 41 model """structuredembedding""" +703 41 loss """crossentropy""" +703 41 regularizer """no""" +703 41 optimizer """adam""" +703 41 training_loop """lcwa""" +703 41 evaluator """rankbased""" +703 42 dataset """kinships""" +703 42 model """structuredembedding""" +703 42 loss """crossentropy""" +703 42 regularizer """no""" +703 42 optimizer """adam""" +703 42 training_loop """lcwa""" +703 42 evaluator """rankbased""" +703 43 dataset """kinships""" +703 43 model """structuredembedding""" +703 43 loss """crossentropy""" +703 43 regularizer """no""" +703 43 optimizer """adam""" +703 43 training_loop """lcwa""" +703 43 evaluator """rankbased""" +703 44 dataset """kinships""" +703 44 model """structuredembedding""" +703 44 loss """crossentropy""" +703 44 regularizer """no""" +703 44 optimizer """adam""" +703 44 training_loop """lcwa""" +703 44 evaluator """rankbased""" +703 45 dataset """kinships""" +703 45 model """structuredembedding""" +703 45 loss """crossentropy""" +703 45 regularizer """no""" +703 45 optimizer """adam""" +703 45 training_loop """lcwa""" +703 45 evaluator """rankbased""" +703 46 dataset """kinships""" +703 46 model """structuredembedding""" +703 46 loss """crossentropy""" +703 46 regularizer """no""" +703 46 optimizer """adam""" +703 46 training_loop """lcwa""" +703 46 evaluator """rankbased""" +703 47 dataset """kinships""" +703 47 model """structuredembedding""" +703 47 loss """crossentropy""" +703 47 regularizer """no""" +703 47 optimizer """adam""" +703 47 training_loop """lcwa""" +703 47 evaluator """rankbased""" +703 48 dataset """kinships""" +703 48 model """structuredembedding""" +703 48 loss """crossentropy""" +703 48 regularizer """no""" +703 48 optimizer """adam""" +703 48 training_loop """lcwa""" +703 48 evaluator """rankbased""" +703 49 dataset """kinships""" +703 49 model """structuredembedding""" +703 49 loss """crossentropy""" +703 49 regularizer """no""" +703 49 optimizer """adam""" +703 49 training_loop """lcwa""" +703 49 evaluator """rankbased""" +703 50 dataset """kinships""" +703 50 model """structuredembedding""" +703 50 loss """crossentropy""" +703 50 regularizer """no""" +703 50 optimizer """adam""" +703 50 training_loop """lcwa""" +703 50 evaluator """rankbased""" +703 51 dataset """kinships""" +703 51 model """structuredembedding""" +703 51 loss """crossentropy""" +703 51 regularizer """no""" +703 51 optimizer """adam""" +703 51 training_loop """lcwa""" +703 51 evaluator """rankbased""" +703 52 dataset """kinships""" +703 52 model """structuredembedding""" +703 52 loss """crossentropy""" +703 52 regularizer """no""" +703 52 optimizer """adam""" +703 52 training_loop """lcwa""" +703 52 evaluator """rankbased""" +703 53 dataset """kinships""" +703 53 model """structuredembedding""" +703 53 loss """crossentropy""" +703 53 regularizer """no""" +703 53 optimizer """adam""" +703 53 training_loop """lcwa""" +703 53 evaluator """rankbased""" +703 54 dataset """kinships""" +703 54 model """structuredembedding""" +703 54 loss """crossentropy""" +703 54 regularizer """no""" +703 54 optimizer """adam""" +703 54 training_loop """lcwa""" +703 54 evaluator """rankbased""" +703 55 dataset """kinships""" +703 55 model """structuredembedding""" +703 55 loss """crossentropy""" +703 55 regularizer """no""" +703 55 optimizer """adam""" +703 55 training_loop """lcwa""" +703 55 evaluator """rankbased""" +703 56 dataset """kinships""" +703 56 model """structuredembedding""" +703 56 loss """crossentropy""" +703 56 regularizer """no""" +703 56 optimizer """adam""" +703 56 training_loop """lcwa""" +703 56 evaluator """rankbased""" +703 57 dataset """kinships""" +703 57 model """structuredembedding""" +703 57 loss """crossentropy""" +703 57 regularizer """no""" +703 57 optimizer """adam""" +703 57 training_loop """lcwa""" +703 57 evaluator """rankbased""" +703 58 dataset """kinships""" +703 58 model """structuredembedding""" +703 58 loss """crossentropy""" +703 58 regularizer """no""" +703 58 optimizer """adam""" +703 58 training_loop """lcwa""" +703 58 evaluator """rankbased""" +703 59 dataset """kinships""" +703 59 model """structuredembedding""" +703 59 loss """crossentropy""" +703 59 regularizer """no""" +703 59 optimizer """adam""" +703 59 training_loop """lcwa""" +703 59 evaluator """rankbased""" +703 60 dataset """kinships""" +703 60 model """structuredembedding""" +703 60 loss """crossentropy""" +703 60 regularizer """no""" +703 60 optimizer """adam""" +703 60 training_loop """lcwa""" +703 60 evaluator """rankbased""" +703 61 dataset """kinships""" +703 61 model """structuredembedding""" +703 61 loss """crossentropy""" +703 61 regularizer """no""" +703 61 optimizer """adam""" +703 61 training_loop """lcwa""" +703 61 evaluator """rankbased""" +703 62 dataset """kinships""" +703 62 model """structuredembedding""" +703 62 loss """crossentropy""" +703 62 regularizer """no""" +703 62 optimizer """adam""" +703 62 training_loop """lcwa""" +703 62 evaluator """rankbased""" +703 63 dataset """kinships""" +703 63 model """structuredembedding""" +703 63 loss """crossentropy""" +703 63 regularizer """no""" +703 63 optimizer """adam""" +703 63 training_loop """lcwa""" +703 63 evaluator """rankbased""" +703 64 dataset """kinships""" +703 64 model """structuredembedding""" +703 64 loss """crossentropy""" +703 64 regularizer """no""" +703 64 optimizer """adam""" +703 64 training_loop """lcwa""" +703 64 evaluator """rankbased""" +703 65 dataset """kinships""" +703 65 model """structuredembedding""" +703 65 loss """crossentropy""" +703 65 regularizer """no""" +703 65 optimizer """adam""" +703 65 training_loop """lcwa""" +703 65 evaluator """rankbased""" +703 66 dataset """kinships""" +703 66 model """structuredembedding""" +703 66 loss """crossentropy""" +703 66 regularizer """no""" +703 66 optimizer """adam""" +703 66 training_loop """lcwa""" +703 66 evaluator """rankbased""" +703 67 dataset """kinships""" +703 67 model """structuredembedding""" +703 67 loss """crossentropy""" +703 67 regularizer """no""" +703 67 optimizer """adam""" +703 67 training_loop """lcwa""" +703 67 evaluator """rankbased""" +703 68 dataset """kinships""" +703 68 model """structuredembedding""" +703 68 loss """crossentropy""" +703 68 regularizer """no""" +703 68 optimizer """adam""" +703 68 training_loop """lcwa""" +703 68 evaluator """rankbased""" +703 69 dataset """kinships""" +703 69 model """structuredembedding""" +703 69 loss """crossentropy""" +703 69 regularizer """no""" +703 69 optimizer """adam""" +703 69 training_loop """lcwa""" +703 69 evaluator """rankbased""" +703 70 dataset """kinships""" +703 70 model """structuredembedding""" +703 70 loss """crossentropy""" +703 70 regularizer """no""" +703 70 optimizer """adam""" +703 70 training_loop """lcwa""" +703 70 evaluator """rankbased""" +703 71 dataset """kinships""" +703 71 model """structuredembedding""" +703 71 loss """crossentropy""" +703 71 regularizer """no""" +703 71 optimizer """adam""" +703 71 training_loop """lcwa""" +703 71 evaluator """rankbased""" +703 72 dataset """kinships""" +703 72 model """structuredembedding""" +703 72 loss """crossentropy""" +703 72 regularizer """no""" +703 72 optimizer """adam""" +703 72 training_loop """lcwa""" +703 72 evaluator """rankbased""" +703 73 dataset """kinships""" +703 73 model """structuredembedding""" +703 73 loss """crossentropy""" +703 73 regularizer """no""" +703 73 optimizer """adam""" +703 73 training_loop """lcwa""" +703 73 evaluator """rankbased""" +703 74 dataset """kinships""" +703 74 model """structuredembedding""" +703 74 loss """crossentropy""" +703 74 regularizer """no""" +703 74 optimizer """adam""" +703 74 training_loop """lcwa""" +703 74 evaluator """rankbased""" +703 75 dataset """kinships""" +703 75 model """structuredembedding""" +703 75 loss """crossentropy""" +703 75 regularizer """no""" +703 75 optimizer """adam""" +703 75 training_loop """lcwa""" +703 75 evaluator """rankbased""" +703 76 dataset """kinships""" +703 76 model """structuredembedding""" +703 76 loss """crossentropy""" +703 76 regularizer """no""" +703 76 optimizer """adam""" +703 76 training_loop """lcwa""" +703 76 evaluator """rankbased""" +703 77 dataset """kinships""" +703 77 model """structuredembedding""" +703 77 loss """crossentropy""" +703 77 regularizer """no""" +703 77 optimizer """adam""" +703 77 training_loop """lcwa""" +703 77 evaluator """rankbased""" +703 78 dataset """kinships""" +703 78 model """structuredembedding""" +703 78 loss """crossentropy""" +703 78 regularizer """no""" +703 78 optimizer """adam""" +703 78 training_loop """lcwa""" +703 78 evaluator """rankbased""" +703 79 dataset """kinships""" +703 79 model """structuredembedding""" +703 79 loss """crossentropy""" +703 79 regularizer """no""" +703 79 optimizer """adam""" +703 79 training_loop """lcwa""" +703 79 evaluator """rankbased""" +703 80 dataset """kinships""" +703 80 model """structuredembedding""" +703 80 loss """crossentropy""" +703 80 regularizer """no""" +703 80 optimizer """adam""" +703 80 training_loop """lcwa""" +703 80 evaluator """rankbased""" +703 81 dataset """kinships""" +703 81 model """structuredembedding""" +703 81 loss """crossentropy""" +703 81 regularizer """no""" +703 81 optimizer """adam""" +703 81 training_loop """lcwa""" +703 81 evaluator """rankbased""" +703 82 dataset """kinships""" +703 82 model """structuredembedding""" +703 82 loss """crossentropy""" +703 82 regularizer """no""" +703 82 optimizer """adam""" +703 82 training_loop """lcwa""" +703 82 evaluator """rankbased""" +703 83 dataset """kinships""" +703 83 model """structuredembedding""" +703 83 loss """crossentropy""" +703 83 regularizer """no""" +703 83 optimizer """adam""" +703 83 training_loop """lcwa""" +703 83 evaluator """rankbased""" +703 84 dataset """kinships""" +703 84 model """structuredembedding""" +703 84 loss """crossentropy""" +703 84 regularizer """no""" +703 84 optimizer """adam""" +703 84 training_loop """lcwa""" +703 84 evaluator """rankbased""" +703 85 dataset """kinships""" +703 85 model """structuredembedding""" +703 85 loss """crossentropy""" +703 85 regularizer """no""" +703 85 optimizer """adam""" +703 85 training_loop """lcwa""" +703 85 evaluator """rankbased""" +703 86 dataset """kinships""" +703 86 model """structuredembedding""" +703 86 loss """crossentropy""" +703 86 regularizer """no""" +703 86 optimizer """adam""" +703 86 training_loop """lcwa""" +703 86 evaluator """rankbased""" +703 87 dataset """kinships""" +703 87 model """structuredembedding""" +703 87 loss """crossentropy""" +703 87 regularizer """no""" +703 87 optimizer """adam""" +703 87 training_loop """lcwa""" +703 87 evaluator """rankbased""" +703 88 dataset """kinships""" +703 88 model """structuredembedding""" +703 88 loss """crossentropy""" +703 88 regularizer """no""" +703 88 optimizer """adam""" +703 88 training_loop """lcwa""" +703 88 evaluator """rankbased""" +703 89 dataset """kinships""" +703 89 model """structuredembedding""" +703 89 loss """crossentropy""" +703 89 regularizer """no""" +703 89 optimizer """adam""" +703 89 training_loop """lcwa""" +703 89 evaluator """rankbased""" +703 90 dataset """kinships""" +703 90 model """structuredembedding""" +703 90 loss """crossentropy""" +703 90 regularizer """no""" +703 90 optimizer """adam""" +703 90 training_loop """lcwa""" +703 90 evaluator """rankbased""" +703 91 dataset """kinships""" +703 91 model """structuredembedding""" +703 91 loss """crossentropy""" +703 91 regularizer """no""" +703 91 optimizer """adam""" +703 91 training_loop """lcwa""" +703 91 evaluator """rankbased""" +703 92 dataset """kinships""" +703 92 model """structuredembedding""" +703 92 loss """crossentropy""" +703 92 regularizer """no""" +703 92 optimizer """adam""" +703 92 training_loop """lcwa""" +703 92 evaluator """rankbased""" +703 93 dataset """kinships""" +703 93 model """structuredembedding""" +703 93 loss """crossentropy""" +703 93 regularizer """no""" +703 93 optimizer """adam""" +703 93 training_loop """lcwa""" +703 93 evaluator """rankbased""" +703 94 dataset """kinships""" +703 94 model """structuredembedding""" +703 94 loss """crossentropy""" +703 94 regularizer """no""" +703 94 optimizer """adam""" +703 94 training_loop """lcwa""" +703 94 evaluator """rankbased""" +703 95 dataset """kinships""" +703 95 model """structuredembedding""" +703 95 loss """crossentropy""" +703 95 regularizer """no""" +703 95 optimizer """adam""" +703 95 training_loop """lcwa""" +703 95 evaluator """rankbased""" +703 96 dataset """kinships""" +703 96 model """structuredembedding""" +703 96 loss """crossentropy""" +703 96 regularizer """no""" +703 96 optimizer """adam""" +703 96 training_loop """lcwa""" +703 96 evaluator """rankbased""" +703 97 dataset """kinships""" +703 97 model """structuredembedding""" +703 97 loss """crossentropy""" +703 97 regularizer """no""" +703 97 optimizer """adam""" +703 97 training_loop """lcwa""" +703 97 evaluator """rankbased""" +703 98 dataset """kinships""" +703 98 model """structuredembedding""" +703 98 loss """crossentropy""" +703 98 regularizer """no""" +703 98 optimizer """adam""" +703 98 training_loop """lcwa""" +703 98 evaluator """rankbased""" +703 99 dataset """kinships""" +703 99 model """structuredembedding""" +703 99 loss """crossentropy""" +703 99 regularizer """no""" +703 99 optimizer """adam""" +703 99 training_loop """lcwa""" +703 99 evaluator """rankbased""" +703 100 dataset """kinships""" +703 100 model """structuredembedding""" +703 100 loss """crossentropy""" +703 100 regularizer """no""" +703 100 optimizer """adam""" +703 100 training_loop """lcwa""" +703 100 evaluator """rankbased""" +704 1 model.embedding_dim 1.0 +704 1 model.scoring_fct_norm 1.0 +704 1 optimizer.lr 0.005676920405222504 +704 1 training.batch_size 0.0 +704 1 training.label_smoothing 0.17824676421031982 +704 2 model.embedding_dim 2.0 +704 2 model.scoring_fct_norm 2.0 +704 2 optimizer.lr 0.002614729862352933 +704 2 training.batch_size 0.0 +704 2 training.label_smoothing 0.021479325996331438 +704 3 model.embedding_dim 0.0 +704 3 model.scoring_fct_norm 1.0 +704 3 optimizer.lr 0.050274426684065295 +704 3 training.batch_size 2.0 +704 3 training.label_smoothing 0.05270541204758873 +704 4 model.embedding_dim 0.0 +704 4 model.scoring_fct_norm 1.0 +704 4 optimizer.lr 0.005788635172861433 +704 4 training.batch_size 0.0 +704 4 training.label_smoothing 0.006455141497574785 +704 5 model.embedding_dim 0.0 +704 5 model.scoring_fct_norm 1.0 +704 5 optimizer.lr 0.007818505646262049 +704 5 training.batch_size 1.0 +704 5 training.label_smoothing 0.22960022207402897 +704 6 model.embedding_dim 0.0 +704 6 model.scoring_fct_norm 1.0 +704 6 optimizer.lr 0.003905679710673516 +704 6 training.batch_size 0.0 +704 6 training.label_smoothing 0.44612665681590546 +704 7 model.embedding_dim 0.0 +704 7 model.scoring_fct_norm 1.0 +704 7 optimizer.lr 0.008117215628329384 +704 7 training.batch_size 0.0 +704 7 training.label_smoothing 0.13460342960373364 +704 8 model.embedding_dim 0.0 +704 8 model.scoring_fct_norm 1.0 +704 8 optimizer.lr 0.04364657105094891 +704 8 training.batch_size 0.0 +704 8 training.label_smoothing 0.7956046570380985 +704 9 model.embedding_dim 2.0 +704 9 model.scoring_fct_norm 1.0 +704 9 optimizer.lr 0.0027279871511488784 +704 9 training.batch_size 1.0 +704 9 training.label_smoothing 0.029462078172422104 +704 10 model.embedding_dim 2.0 +704 10 model.scoring_fct_norm 2.0 +704 10 optimizer.lr 0.02449024154361614 +704 10 training.batch_size 2.0 +704 10 training.label_smoothing 0.0018859840618321835 +704 11 model.embedding_dim 2.0 +704 11 model.scoring_fct_norm 1.0 +704 11 optimizer.lr 0.003090065670025759 +704 11 training.batch_size 1.0 +704 11 training.label_smoothing 0.002730626076142024 +704 12 model.embedding_dim 2.0 +704 12 model.scoring_fct_norm 2.0 +704 12 optimizer.lr 0.025534414910748107 +704 12 training.batch_size 0.0 +704 12 training.label_smoothing 0.12917372871706706 +704 13 model.embedding_dim 1.0 +704 13 model.scoring_fct_norm 2.0 +704 13 optimizer.lr 0.0954718106613311 +704 13 training.batch_size 2.0 +704 13 training.label_smoothing 0.017550270821719977 +704 14 model.embedding_dim 1.0 +704 14 model.scoring_fct_norm 1.0 +704 14 optimizer.lr 0.01928480955109906 +704 14 training.batch_size 0.0 +704 14 training.label_smoothing 0.11650582673223146 +704 15 model.embedding_dim 1.0 +704 15 model.scoring_fct_norm 2.0 +704 15 optimizer.lr 0.029834065644140425 +704 15 training.batch_size 1.0 +704 15 training.label_smoothing 0.003361820255284 +704 16 model.embedding_dim 0.0 +704 16 model.scoring_fct_norm 1.0 +704 16 optimizer.lr 0.012475909891638878 +704 16 training.batch_size 1.0 +704 16 training.label_smoothing 0.007929401245719607 +704 17 model.embedding_dim 0.0 +704 17 model.scoring_fct_norm 2.0 +704 17 optimizer.lr 0.050078840470560965 +704 17 training.batch_size 0.0 +704 17 training.label_smoothing 0.08054577888270475 +704 18 model.embedding_dim 0.0 +704 18 model.scoring_fct_norm 2.0 +704 18 optimizer.lr 0.010792766346224423 +704 18 training.batch_size 2.0 +704 18 training.label_smoothing 0.019854072427583032 +704 19 model.embedding_dim 1.0 +704 19 model.scoring_fct_norm 1.0 +704 19 optimizer.lr 0.0010810190107490626 +704 19 training.batch_size 2.0 +704 19 training.label_smoothing 0.25347706300767764 +704 20 model.embedding_dim 0.0 +704 20 model.scoring_fct_norm 1.0 +704 20 optimizer.lr 0.07924953299520296 +704 20 training.batch_size 2.0 +704 20 training.label_smoothing 0.72870105000128 +704 21 model.embedding_dim 1.0 +704 21 model.scoring_fct_norm 1.0 +704 21 optimizer.lr 0.0027058682375620986 +704 21 training.batch_size 1.0 +704 21 training.label_smoothing 0.0013013086360416537 +704 22 model.embedding_dim 0.0 +704 22 model.scoring_fct_norm 2.0 +704 22 optimizer.lr 0.015104494880933532 +704 22 training.batch_size 0.0 +704 22 training.label_smoothing 0.0011330254342139007 +704 23 model.embedding_dim 1.0 +704 23 model.scoring_fct_norm 1.0 +704 23 optimizer.lr 0.05269940673593617 +704 23 training.batch_size 2.0 +704 23 training.label_smoothing 0.08225757508616799 +704 24 model.embedding_dim 2.0 +704 24 model.scoring_fct_norm 1.0 +704 24 optimizer.lr 0.007697763616002486 +704 24 training.batch_size 2.0 +704 24 training.label_smoothing 0.06251347195121154 +704 25 model.embedding_dim 0.0 +704 25 model.scoring_fct_norm 1.0 +704 25 optimizer.lr 0.01567493189979155 +704 25 training.batch_size 2.0 +704 25 training.label_smoothing 0.00928779196501389 +704 26 model.embedding_dim 2.0 +704 26 model.scoring_fct_norm 2.0 +704 26 optimizer.lr 0.0023330360129113726 +704 26 training.batch_size 0.0 +704 26 training.label_smoothing 0.004429794509387024 +704 27 model.embedding_dim 1.0 +704 27 model.scoring_fct_norm 2.0 +704 27 optimizer.lr 0.010877377171467691 +704 27 training.batch_size 0.0 +704 27 training.label_smoothing 0.7519301003662382 +704 28 model.embedding_dim 2.0 +704 28 model.scoring_fct_norm 2.0 +704 28 optimizer.lr 0.035642053197796716 +704 28 training.batch_size 0.0 +704 28 training.label_smoothing 0.002695631441073705 +704 29 model.embedding_dim 2.0 +704 29 model.scoring_fct_norm 2.0 +704 29 optimizer.lr 0.0011482721963223387 +704 29 training.batch_size 1.0 +704 29 training.label_smoothing 0.007147025320064612 +704 30 model.embedding_dim 1.0 +704 30 model.scoring_fct_norm 2.0 +704 30 optimizer.lr 0.02249288630441 +704 30 training.batch_size 0.0 +704 30 training.label_smoothing 0.0013053374437671805 +704 31 model.embedding_dim 1.0 +704 31 model.scoring_fct_norm 2.0 +704 31 optimizer.lr 0.04453569171185976 +704 31 training.batch_size 0.0 +704 31 training.label_smoothing 0.00618323152432284 +704 32 model.embedding_dim 2.0 +704 32 model.scoring_fct_norm 1.0 +704 32 optimizer.lr 0.043412040037788056 +704 32 training.batch_size 2.0 +704 32 training.label_smoothing 0.003463127381394769 +704 33 model.embedding_dim 2.0 +704 33 model.scoring_fct_norm 2.0 +704 33 optimizer.lr 0.025076941171868154 +704 33 training.batch_size 0.0 +704 33 training.label_smoothing 0.5522656278288586 +704 34 model.embedding_dim 1.0 +704 34 model.scoring_fct_norm 1.0 +704 34 optimizer.lr 0.0017582220416881597 +704 34 training.batch_size 1.0 +704 34 training.label_smoothing 0.675558755895419 +704 35 model.embedding_dim 1.0 +704 35 model.scoring_fct_norm 2.0 +704 35 optimizer.lr 0.016154532716154624 +704 35 training.batch_size 1.0 +704 35 training.label_smoothing 0.05697741135634676 +704 36 model.embedding_dim 0.0 +704 36 model.scoring_fct_norm 2.0 +704 36 optimizer.lr 0.00341867728330167 +704 36 training.batch_size 2.0 +704 36 training.label_smoothing 0.0034712800273986764 +704 37 model.embedding_dim 2.0 +704 37 model.scoring_fct_norm 2.0 +704 37 optimizer.lr 0.021933977782352593 +704 37 training.batch_size 2.0 +704 37 training.label_smoothing 0.25942485785175545 +704 38 model.embedding_dim 2.0 +704 38 model.scoring_fct_norm 1.0 +704 38 optimizer.lr 0.0026069444792931585 +704 38 training.batch_size 0.0 +704 38 training.label_smoothing 0.006363389780237628 +704 39 model.embedding_dim 2.0 +704 39 model.scoring_fct_norm 2.0 +704 39 optimizer.lr 0.015438705971689823 +704 39 training.batch_size 0.0 +704 39 training.label_smoothing 0.02130487883314677 +704 40 model.embedding_dim 1.0 +704 40 model.scoring_fct_norm 2.0 +704 40 optimizer.lr 0.00788426276815716 +704 40 training.batch_size 0.0 +704 40 training.label_smoothing 0.10874651129262793 +704 41 model.embedding_dim 1.0 +704 41 model.scoring_fct_norm 1.0 +704 41 optimizer.lr 0.09312568982567775 +704 41 training.batch_size 2.0 +704 41 training.label_smoothing 0.3216305379081254 +704 42 model.embedding_dim 1.0 +704 42 model.scoring_fct_norm 2.0 +704 42 optimizer.lr 0.006334969996726813 +704 42 training.batch_size 0.0 +704 42 training.label_smoothing 0.0013186535818522317 +704 43 model.embedding_dim 0.0 +704 43 model.scoring_fct_norm 2.0 +704 43 optimizer.lr 0.00489458375864006 +704 43 training.batch_size 2.0 +704 43 training.label_smoothing 0.014099830419435369 +704 44 model.embedding_dim 1.0 +704 44 model.scoring_fct_norm 1.0 +704 44 optimizer.lr 0.0011123427913343213 +704 44 training.batch_size 0.0 +704 44 training.label_smoothing 0.008458290465481794 +704 45 model.embedding_dim 2.0 +704 45 model.scoring_fct_norm 1.0 +704 45 optimizer.lr 0.0046556819751659845 +704 45 training.batch_size 1.0 +704 45 training.label_smoothing 0.0028184838188987545 +704 46 model.embedding_dim 2.0 +704 46 model.scoring_fct_norm 2.0 +704 46 optimizer.lr 0.03974451518254978 +704 46 training.batch_size 2.0 +704 46 training.label_smoothing 0.0011602036834285389 +704 47 model.embedding_dim 1.0 +704 47 model.scoring_fct_norm 2.0 +704 47 optimizer.lr 0.015248415022876457 +704 47 training.batch_size 1.0 +704 47 training.label_smoothing 0.3516273257076412 +704 48 model.embedding_dim 2.0 +704 48 model.scoring_fct_norm 1.0 +704 48 optimizer.lr 0.028906512059780412 +704 48 training.batch_size 1.0 +704 48 training.label_smoothing 0.010500701239783352 +704 49 model.embedding_dim 1.0 +704 49 model.scoring_fct_norm 1.0 +704 49 optimizer.lr 0.01339434237753434 +704 49 training.batch_size 0.0 +704 49 training.label_smoothing 0.017360038717001574 +704 50 model.embedding_dim 0.0 +704 50 model.scoring_fct_norm 2.0 +704 50 optimizer.lr 0.016763521733180402 +704 50 training.batch_size 2.0 +704 50 training.label_smoothing 0.24025435665342426 +704 51 model.embedding_dim 0.0 +704 51 model.scoring_fct_norm 2.0 +704 51 optimizer.lr 0.001578190140318615 +704 51 training.batch_size 2.0 +704 51 training.label_smoothing 0.003188374094589031 +704 52 model.embedding_dim 1.0 +704 52 model.scoring_fct_norm 2.0 +704 52 optimizer.lr 0.004304411077943032 +704 52 training.batch_size 0.0 +704 52 training.label_smoothing 0.9694930072414818 +704 53 model.embedding_dim 1.0 +704 53 model.scoring_fct_norm 2.0 +704 53 optimizer.lr 0.017431914628916165 +704 53 training.batch_size 2.0 +704 53 training.label_smoothing 0.5499833547214031 +704 54 model.embedding_dim 0.0 +704 54 model.scoring_fct_norm 2.0 +704 54 optimizer.lr 0.015448090870188122 +704 54 training.batch_size 0.0 +704 54 training.label_smoothing 0.005118186122781095 +704 55 model.embedding_dim 2.0 +704 55 model.scoring_fct_norm 2.0 +704 55 optimizer.lr 0.008559341137410378 +704 55 training.batch_size 0.0 +704 55 training.label_smoothing 0.00173744393394959 +704 56 model.embedding_dim 1.0 +704 56 model.scoring_fct_norm 1.0 +704 56 optimizer.lr 0.0012203311376236105 +704 56 training.batch_size 2.0 +704 56 training.label_smoothing 0.0024208442712070366 +704 57 model.embedding_dim 2.0 +704 57 model.scoring_fct_norm 1.0 +704 57 optimizer.lr 0.03015755205700906 +704 57 training.batch_size 2.0 +704 57 training.label_smoothing 0.696422277424161 +704 58 model.embedding_dim 0.0 +704 58 model.scoring_fct_norm 2.0 +704 58 optimizer.lr 0.0023335556607238864 +704 58 training.batch_size 0.0 +704 58 training.label_smoothing 0.5362715248947016 +704 59 model.embedding_dim 2.0 +704 59 model.scoring_fct_norm 2.0 +704 59 optimizer.lr 0.013446812781291452 +704 59 training.batch_size 1.0 +704 59 training.label_smoothing 0.027843791950903075 +704 60 model.embedding_dim 0.0 +704 60 model.scoring_fct_norm 1.0 +704 60 optimizer.lr 0.0513814801958637 +704 60 training.batch_size 1.0 +704 60 training.label_smoothing 0.005865563546581211 +704 61 model.embedding_dim 1.0 +704 61 model.scoring_fct_norm 2.0 +704 61 optimizer.lr 0.0039346219745039525 +704 61 training.batch_size 0.0 +704 61 training.label_smoothing 0.0021739768027514554 +704 62 model.embedding_dim 1.0 +704 62 model.scoring_fct_norm 2.0 +704 62 optimizer.lr 0.0020068037303896985 +704 62 training.batch_size 0.0 +704 62 training.label_smoothing 0.5607080286142677 +704 63 model.embedding_dim 2.0 +704 63 model.scoring_fct_norm 2.0 +704 63 optimizer.lr 0.06763558945222285 +704 63 training.batch_size 2.0 +704 63 training.label_smoothing 0.012222236688029903 +704 64 model.embedding_dim 1.0 +704 64 model.scoring_fct_norm 2.0 +704 64 optimizer.lr 0.0822715428762492 +704 64 training.batch_size 2.0 +704 64 training.label_smoothing 0.004251685616011417 +704 65 model.embedding_dim 2.0 +704 65 model.scoring_fct_norm 2.0 +704 65 optimizer.lr 0.0025349133849774876 +704 65 training.batch_size 1.0 +704 65 training.label_smoothing 0.06177950515823151 +704 66 model.embedding_dim 1.0 +704 66 model.scoring_fct_norm 1.0 +704 66 optimizer.lr 0.005307324544092565 +704 66 training.batch_size 1.0 +704 66 training.label_smoothing 0.41709947674560777 +704 67 model.embedding_dim 2.0 +704 67 model.scoring_fct_norm 1.0 +704 67 optimizer.lr 0.009064690006525235 +704 67 training.batch_size 2.0 +704 67 training.label_smoothing 0.02191508918391242 +704 68 model.embedding_dim 0.0 +704 68 model.scoring_fct_norm 1.0 +704 68 optimizer.lr 0.004412541335319079 +704 68 training.batch_size 1.0 +704 68 training.label_smoothing 0.16800356546095033 +704 69 model.embedding_dim 2.0 +704 69 model.scoring_fct_norm 2.0 +704 69 optimizer.lr 0.0055798618734689154 +704 69 training.batch_size 0.0 +704 69 training.label_smoothing 0.08818348150214068 +704 70 model.embedding_dim 2.0 +704 70 model.scoring_fct_norm 2.0 +704 70 optimizer.lr 0.023085140862281864 +704 70 training.batch_size 1.0 +704 70 training.label_smoothing 0.24143965038122264 +704 71 model.embedding_dim 2.0 +704 71 model.scoring_fct_norm 2.0 +704 71 optimizer.lr 0.00893199241657921 +704 71 training.batch_size 0.0 +704 71 training.label_smoothing 0.2006175465308179 +704 72 model.embedding_dim 1.0 +704 72 model.scoring_fct_norm 1.0 +704 72 optimizer.lr 0.01595910333062612 +704 72 training.batch_size 0.0 +704 72 training.label_smoothing 0.0010102169358430263 +704 73 model.embedding_dim 1.0 +704 73 model.scoring_fct_norm 1.0 +704 73 optimizer.lr 0.025607324473211124 +704 73 training.batch_size 1.0 +704 73 training.label_smoothing 0.22999144021160553 +704 74 model.embedding_dim 0.0 +704 74 model.scoring_fct_norm 2.0 +704 74 optimizer.lr 0.0026568120952686933 +704 74 training.batch_size 2.0 +704 74 training.label_smoothing 0.006898999264630237 +704 75 model.embedding_dim 0.0 +704 75 model.scoring_fct_norm 1.0 +704 75 optimizer.lr 0.0022933325650169468 +704 75 training.batch_size 2.0 +704 75 training.label_smoothing 0.0013930842131787133 +704 76 model.embedding_dim 2.0 +704 76 model.scoring_fct_norm 1.0 +704 76 optimizer.lr 0.0041213575346097184 +704 76 training.batch_size 0.0 +704 76 training.label_smoothing 0.02552720078659876 +704 77 model.embedding_dim 1.0 +704 77 model.scoring_fct_norm 2.0 +704 77 optimizer.lr 0.002414808804917101 +704 77 training.batch_size 2.0 +704 77 training.label_smoothing 0.06152843550716986 +704 78 model.embedding_dim 2.0 +704 78 model.scoring_fct_norm 2.0 +704 78 optimizer.lr 0.0026988131774884918 +704 78 training.batch_size 2.0 +704 78 training.label_smoothing 0.004789507016946889 +704 79 model.embedding_dim 1.0 +704 79 model.scoring_fct_norm 1.0 +704 79 optimizer.lr 0.013177421488539773 +704 79 training.batch_size 2.0 +704 79 training.label_smoothing 0.0041953553975808975 +704 80 model.embedding_dim 2.0 +704 80 model.scoring_fct_norm 2.0 +704 80 optimizer.lr 0.02971978993193153 +704 80 training.batch_size 0.0 +704 80 training.label_smoothing 0.006248337669018985 +704 81 model.embedding_dim 0.0 +704 81 model.scoring_fct_norm 2.0 +704 81 optimizer.lr 0.02927847406059929 +704 81 training.batch_size 1.0 +704 81 training.label_smoothing 0.04229072134215762 +704 82 model.embedding_dim 0.0 +704 82 model.scoring_fct_norm 1.0 +704 82 optimizer.lr 0.028971224088836595 +704 82 training.batch_size 0.0 +704 82 training.label_smoothing 0.05083466213251217 +704 83 model.embedding_dim 1.0 +704 83 model.scoring_fct_norm 1.0 +704 83 optimizer.lr 0.07326846454843672 +704 83 training.batch_size 1.0 +704 83 training.label_smoothing 0.7733387545259812 +704 84 model.embedding_dim 0.0 +704 84 model.scoring_fct_norm 1.0 +704 84 optimizer.lr 0.005182917782889573 +704 84 training.batch_size 0.0 +704 84 training.label_smoothing 0.5149895970589633 +704 85 model.embedding_dim 1.0 +704 85 model.scoring_fct_norm 1.0 +704 85 optimizer.lr 0.002629313955270682 +704 85 training.batch_size 1.0 +704 85 training.label_smoothing 0.13852524824570292 +704 86 model.embedding_dim 1.0 +704 86 model.scoring_fct_norm 1.0 +704 86 optimizer.lr 0.05838413274557446 +704 86 training.batch_size 1.0 +704 86 training.label_smoothing 0.05520935271977576 +704 87 model.embedding_dim 0.0 +704 87 model.scoring_fct_norm 1.0 +704 87 optimizer.lr 0.019006518661798557 +704 87 training.batch_size 1.0 +704 87 training.label_smoothing 0.5969530073440513 +704 88 model.embedding_dim 2.0 +704 88 model.scoring_fct_norm 2.0 +704 88 optimizer.lr 0.017100519777824983 +704 88 training.batch_size 0.0 +704 88 training.label_smoothing 0.0018242450417769133 +704 89 model.embedding_dim 2.0 +704 89 model.scoring_fct_norm 2.0 +704 89 optimizer.lr 0.026346276513666264 +704 89 training.batch_size 2.0 +704 89 training.label_smoothing 0.011239467477263766 +704 90 model.embedding_dim 2.0 +704 90 model.scoring_fct_norm 1.0 +704 90 optimizer.lr 0.0045571583271199366 +704 90 training.batch_size 1.0 +704 90 training.label_smoothing 0.0012870059922962809 +704 91 model.embedding_dim 1.0 +704 91 model.scoring_fct_norm 2.0 +704 91 optimizer.lr 0.07127633860529814 +704 91 training.batch_size 0.0 +704 91 training.label_smoothing 0.3631584222126276 +704 92 model.embedding_dim 1.0 +704 92 model.scoring_fct_norm 2.0 +704 92 optimizer.lr 0.0035775948163178878 +704 92 training.batch_size 2.0 +704 92 training.label_smoothing 0.05654513088360255 +704 93 model.embedding_dim 0.0 +704 93 model.scoring_fct_norm 2.0 +704 93 optimizer.lr 0.0353368861251304 +704 93 training.batch_size 0.0 +704 93 training.label_smoothing 0.060360142169449925 +704 94 model.embedding_dim 1.0 +704 94 model.scoring_fct_norm 2.0 +704 94 optimizer.lr 0.009917158150404969 +704 94 training.batch_size 1.0 +704 94 training.label_smoothing 0.723856402964948 +704 95 model.embedding_dim 0.0 +704 95 model.scoring_fct_norm 2.0 +704 95 optimizer.lr 0.0014950288446936427 +704 95 training.batch_size 0.0 +704 95 training.label_smoothing 0.1315213900147166 +704 96 model.embedding_dim 1.0 +704 96 model.scoring_fct_norm 1.0 +704 96 optimizer.lr 0.0327284707581438 +704 96 training.batch_size 1.0 +704 96 training.label_smoothing 0.0034820713894592684 +704 97 model.embedding_dim 2.0 +704 97 model.scoring_fct_norm 1.0 +704 97 optimizer.lr 0.07048363392830016 +704 97 training.batch_size 2.0 +704 97 training.label_smoothing 0.21112896360957295 +704 98 model.embedding_dim 2.0 +704 98 model.scoring_fct_norm 2.0 +704 98 optimizer.lr 0.004133082382214844 +704 98 training.batch_size 2.0 +704 98 training.label_smoothing 0.3456215298113285 +704 99 model.embedding_dim 2.0 +704 99 model.scoring_fct_norm 1.0 +704 99 optimizer.lr 0.05067872471823832 +704 99 training.batch_size 0.0 +704 99 training.label_smoothing 0.058178023890016785 +704 100 model.embedding_dim 0.0 +704 100 model.scoring_fct_norm 2.0 +704 100 optimizer.lr 0.01836629578856847 +704 100 training.batch_size 2.0 +704 100 training.label_smoothing 0.2953690750817331 +704 1 dataset """kinships""" +704 1 model """structuredembedding""" +704 1 loss """crossentropy""" +704 1 regularizer """no""" +704 1 optimizer """adam""" +704 1 training_loop """lcwa""" +704 1 evaluator """rankbased""" +704 2 dataset """kinships""" +704 2 model """structuredembedding""" +704 2 loss """crossentropy""" +704 2 regularizer """no""" +704 2 optimizer """adam""" +704 2 training_loop """lcwa""" +704 2 evaluator """rankbased""" +704 3 dataset """kinships""" +704 3 model """structuredembedding""" +704 3 loss """crossentropy""" +704 3 regularizer """no""" +704 3 optimizer """adam""" +704 3 training_loop """lcwa""" +704 3 evaluator """rankbased""" +704 4 dataset """kinships""" +704 4 model """structuredembedding""" +704 4 loss """crossentropy""" +704 4 regularizer """no""" +704 4 optimizer """adam""" +704 4 training_loop """lcwa""" +704 4 evaluator """rankbased""" +704 5 dataset """kinships""" +704 5 model """structuredembedding""" +704 5 loss """crossentropy""" +704 5 regularizer """no""" +704 5 optimizer """adam""" +704 5 training_loop """lcwa""" +704 5 evaluator """rankbased""" +704 6 dataset """kinships""" +704 6 model """structuredembedding""" +704 6 loss """crossentropy""" +704 6 regularizer """no""" +704 6 optimizer """adam""" +704 6 training_loop """lcwa""" +704 6 evaluator """rankbased""" +704 7 dataset """kinships""" +704 7 model """structuredembedding""" +704 7 loss """crossentropy""" +704 7 regularizer """no""" +704 7 optimizer """adam""" +704 7 training_loop """lcwa""" +704 7 evaluator """rankbased""" +704 8 dataset """kinships""" +704 8 model """structuredembedding""" +704 8 loss """crossentropy""" +704 8 regularizer """no""" +704 8 optimizer """adam""" +704 8 training_loop """lcwa""" +704 8 evaluator """rankbased""" +704 9 dataset """kinships""" +704 9 model """structuredembedding""" +704 9 loss """crossentropy""" +704 9 regularizer """no""" +704 9 optimizer """adam""" +704 9 training_loop """lcwa""" +704 9 evaluator """rankbased""" +704 10 dataset """kinships""" +704 10 model """structuredembedding""" +704 10 loss """crossentropy""" +704 10 regularizer """no""" +704 10 optimizer """adam""" +704 10 training_loop """lcwa""" +704 10 evaluator """rankbased""" +704 11 dataset """kinships""" +704 11 model """structuredembedding""" +704 11 loss """crossentropy""" +704 11 regularizer """no""" +704 11 optimizer """adam""" +704 11 training_loop """lcwa""" +704 11 evaluator """rankbased""" +704 12 dataset """kinships""" +704 12 model """structuredembedding""" +704 12 loss """crossentropy""" +704 12 regularizer """no""" +704 12 optimizer """adam""" +704 12 training_loop """lcwa""" +704 12 evaluator """rankbased""" +704 13 dataset """kinships""" +704 13 model """structuredembedding""" +704 13 loss """crossentropy""" +704 13 regularizer """no""" +704 13 optimizer """adam""" +704 13 training_loop """lcwa""" +704 13 evaluator """rankbased""" +704 14 dataset """kinships""" +704 14 model """structuredembedding""" +704 14 loss """crossentropy""" +704 14 regularizer """no""" +704 14 optimizer """adam""" +704 14 training_loop """lcwa""" +704 14 evaluator """rankbased""" +704 15 dataset """kinships""" +704 15 model """structuredembedding""" +704 15 loss """crossentropy""" +704 15 regularizer """no""" +704 15 optimizer """adam""" +704 15 training_loop """lcwa""" +704 15 evaluator """rankbased""" +704 16 dataset """kinships""" +704 16 model """structuredembedding""" +704 16 loss """crossentropy""" +704 16 regularizer """no""" +704 16 optimizer """adam""" +704 16 training_loop """lcwa""" +704 16 evaluator """rankbased""" +704 17 dataset """kinships""" +704 17 model """structuredembedding""" +704 17 loss """crossentropy""" +704 17 regularizer """no""" +704 17 optimizer """adam""" +704 17 training_loop """lcwa""" +704 17 evaluator """rankbased""" +704 18 dataset """kinships""" +704 18 model """structuredembedding""" +704 18 loss """crossentropy""" +704 18 regularizer """no""" +704 18 optimizer """adam""" +704 18 training_loop """lcwa""" +704 18 evaluator """rankbased""" +704 19 dataset """kinships""" +704 19 model """structuredembedding""" +704 19 loss """crossentropy""" +704 19 regularizer """no""" +704 19 optimizer """adam""" +704 19 training_loop """lcwa""" +704 19 evaluator """rankbased""" +704 20 dataset """kinships""" +704 20 model """structuredembedding""" +704 20 loss """crossentropy""" +704 20 regularizer """no""" +704 20 optimizer """adam""" +704 20 training_loop """lcwa""" +704 20 evaluator """rankbased""" +704 21 dataset """kinships""" +704 21 model """structuredembedding""" +704 21 loss """crossentropy""" +704 21 regularizer """no""" +704 21 optimizer """adam""" +704 21 training_loop """lcwa""" +704 21 evaluator """rankbased""" +704 22 dataset """kinships""" +704 22 model """structuredembedding""" +704 22 loss """crossentropy""" +704 22 regularizer """no""" +704 22 optimizer """adam""" +704 22 training_loop """lcwa""" +704 22 evaluator """rankbased""" +704 23 dataset """kinships""" +704 23 model """structuredembedding""" +704 23 loss """crossentropy""" +704 23 regularizer """no""" +704 23 optimizer """adam""" +704 23 training_loop """lcwa""" +704 23 evaluator """rankbased""" +704 24 dataset """kinships""" +704 24 model """structuredembedding""" +704 24 loss """crossentropy""" +704 24 regularizer """no""" +704 24 optimizer """adam""" +704 24 training_loop """lcwa""" +704 24 evaluator """rankbased""" +704 25 dataset """kinships""" +704 25 model """structuredembedding""" +704 25 loss """crossentropy""" +704 25 regularizer """no""" +704 25 optimizer """adam""" +704 25 training_loop """lcwa""" +704 25 evaluator """rankbased""" +704 26 dataset """kinships""" +704 26 model """structuredembedding""" +704 26 loss """crossentropy""" +704 26 regularizer """no""" +704 26 optimizer """adam""" +704 26 training_loop """lcwa""" +704 26 evaluator """rankbased""" +704 27 dataset """kinships""" +704 27 model """structuredembedding""" +704 27 loss """crossentropy""" +704 27 regularizer """no""" +704 27 optimizer """adam""" +704 27 training_loop """lcwa""" +704 27 evaluator """rankbased""" +704 28 dataset """kinships""" +704 28 model """structuredembedding""" +704 28 loss """crossentropy""" +704 28 regularizer """no""" +704 28 optimizer """adam""" +704 28 training_loop """lcwa""" +704 28 evaluator """rankbased""" +704 29 dataset """kinships""" +704 29 model """structuredembedding""" +704 29 loss """crossentropy""" +704 29 regularizer """no""" +704 29 optimizer """adam""" +704 29 training_loop """lcwa""" +704 29 evaluator """rankbased""" +704 30 dataset """kinships""" +704 30 model """structuredembedding""" +704 30 loss """crossentropy""" +704 30 regularizer """no""" +704 30 optimizer """adam""" +704 30 training_loop """lcwa""" +704 30 evaluator """rankbased""" +704 31 dataset """kinships""" +704 31 model """structuredembedding""" +704 31 loss """crossentropy""" +704 31 regularizer """no""" +704 31 optimizer """adam""" +704 31 training_loop """lcwa""" +704 31 evaluator """rankbased""" +704 32 dataset """kinships""" +704 32 model """structuredembedding""" +704 32 loss """crossentropy""" +704 32 regularizer """no""" +704 32 optimizer """adam""" +704 32 training_loop """lcwa""" +704 32 evaluator """rankbased""" +704 33 dataset """kinships""" +704 33 model """structuredembedding""" +704 33 loss """crossentropy""" +704 33 regularizer """no""" +704 33 optimizer """adam""" +704 33 training_loop """lcwa""" +704 33 evaluator """rankbased""" +704 34 dataset """kinships""" +704 34 model """structuredembedding""" +704 34 loss """crossentropy""" +704 34 regularizer """no""" +704 34 optimizer """adam""" +704 34 training_loop """lcwa""" +704 34 evaluator """rankbased""" +704 35 dataset """kinships""" +704 35 model """structuredembedding""" +704 35 loss """crossentropy""" +704 35 regularizer """no""" +704 35 optimizer """adam""" +704 35 training_loop """lcwa""" +704 35 evaluator """rankbased""" +704 36 dataset """kinships""" +704 36 model """structuredembedding""" +704 36 loss """crossentropy""" +704 36 regularizer """no""" +704 36 optimizer """adam""" +704 36 training_loop """lcwa""" +704 36 evaluator """rankbased""" +704 37 dataset """kinships""" +704 37 model """structuredembedding""" +704 37 loss """crossentropy""" +704 37 regularizer """no""" +704 37 optimizer """adam""" +704 37 training_loop """lcwa""" +704 37 evaluator """rankbased""" +704 38 dataset """kinships""" +704 38 model """structuredembedding""" +704 38 loss """crossentropy""" +704 38 regularizer """no""" +704 38 optimizer """adam""" +704 38 training_loop """lcwa""" +704 38 evaluator """rankbased""" +704 39 dataset """kinships""" +704 39 model """structuredembedding""" +704 39 loss """crossentropy""" +704 39 regularizer """no""" +704 39 optimizer """adam""" +704 39 training_loop """lcwa""" +704 39 evaluator """rankbased""" +704 40 dataset """kinships""" +704 40 model """structuredembedding""" +704 40 loss """crossentropy""" +704 40 regularizer """no""" +704 40 optimizer """adam""" +704 40 training_loop """lcwa""" +704 40 evaluator """rankbased""" +704 41 dataset """kinships""" +704 41 model """structuredembedding""" +704 41 loss """crossentropy""" +704 41 regularizer """no""" +704 41 optimizer """adam""" +704 41 training_loop """lcwa""" +704 41 evaluator """rankbased""" +704 42 dataset """kinships""" +704 42 model """structuredembedding""" +704 42 loss """crossentropy""" +704 42 regularizer """no""" +704 42 optimizer """adam""" +704 42 training_loop """lcwa""" +704 42 evaluator """rankbased""" +704 43 dataset """kinships""" +704 43 model """structuredembedding""" +704 43 loss """crossentropy""" +704 43 regularizer """no""" +704 43 optimizer """adam""" +704 43 training_loop """lcwa""" +704 43 evaluator """rankbased""" +704 44 dataset """kinships""" +704 44 model """structuredembedding""" +704 44 loss """crossentropy""" +704 44 regularizer """no""" +704 44 optimizer """adam""" +704 44 training_loop """lcwa""" +704 44 evaluator """rankbased""" +704 45 dataset """kinships""" +704 45 model """structuredembedding""" +704 45 loss """crossentropy""" +704 45 regularizer """no""" +704 45 optimizer """adam""" +704 45 training_loop """lcwa""" +704 45 evaluator """rankbased""" +704 46 dataset """kinships""" +704 46 model """structuredembedding""" +704 46 loss """crossentropy""" +704 46 regularizer """no""" +704 46 optimizer """adam""" +704 46 training_loop """lcwa""" +704 46 evaluator """rankbased""" +704 47 dataset """kinships""" +704 47 model """structuredembedding""" +704 47 loss """crossentropy""" +704 47 regularizer """no""" +704 47 optimizer """adam""" +704 47 training_loop """lcwa""" +704 47 evaluator """rankbased""" +704 48 dataset """kinships""" +704 48 model """structuredembedding""" +704 48 loss """crossentropy""" +704 48 regularizer """no""" +704 48 optimizer """adam""" +704 48 training_loop """lcwa""" +704 48 evaluator """rankbased""" +704 49 dataset """kinships""" +704 49 model """structuredembedding""" +704 49 loss """crossentropy""" +704 49 regularizer """no""" +704 49 optimizer """adam""" +704 49 training_loop """lcwa""" +704 49 evaluator """rankbased""" +704 50 dataset """kinships""" +704 50 model """structuredembedding""" +704 50 loss """crossentropy""" +704 50 regularizer """no""" +704 50 optimizer """adam""" +704 50 training_loop """lcwa""" +704 50 evaluator """rankbased""" +704 51 dataset """kinships""" +704 51 model """structuredembedding""" +704 51 loss """crossentropy""" +704 51 regularizer """no""" +704 51 optimizer """adam""" +704 51 training_loop """lcwa""" +704 51 evaluator """rankbased""" +704 52 dataset """kinships""" +704 52 model """structuredembedding""" +704 52 loss """crossentropy""" +704 52 regularizer """no""" +704 52 optimizer """adam""" +704 52 training_loop """lcwa""" +704 52 evaluator """rankbased""" +704 53 dataset """kinships""" +704 53 model """structuredembedding""" +704 53 loss """crossentropy""" +704 53 regularizer """no""" +704 53 optimizer """adam""" +704 53 training_loop """lcwa""" +704 53 evaluator """rankbased""" +704 54 dataset """kinships""" +704 54 model """structuredembedding""" +704 54 loss """crossentropy""" +704 54 regularizer """no""" +704 54 optimizer """adam""" +704 54 training_loop """lcwa""" +704 54 evaluator """rankbased""" +704 55 dataset """kinships""" +704 55 model """structuredembedding""" +704 55 loss """crossentropy""" +704 55 regularizer """no""" +704 55 optimizer """adam""" +704 55 training_loop """lcwa""" +704 55 evaluator """rankbased""" +704 56 dataset """kinships""" +704 56 model """structuredembedding""" +704 56 loss """crossentropy""" +704 56 regularizer """no""" +704 56 optimizer """adam""" +704 56 training_loop """lcwa""" +704 56 evaluator """rankbased""" +704 57 dataset """kinships""" +704 57 model """structuredembedding""" +704 57 loss """crossentropy""" +704 57 regularizer """no""" +704 57 optimizer """adam""" +704 57 training_loop """lcwa""" +704 57 evaluator """rankbased""" +704 58 dataset """kinships""" +704 58 model """structuredembedding""" +704 58 loss """crossentropy""" +704 58 regularizer """no""" +704 58 optimizer """adam""" +704 58 training_loop """lcwa""" +704 58 evaluator """rankbased""" +704 59 dataset """kinships""" +704 59 model """structuredembedding""" +704 59 loss """crossentropy""" +704 59 regularizer """no""" +704 59 optimizer """adam""" +704 59 training_loop """lcwa""" +704 59 evaluator """rankbased""" +704 60 dataset """kinships""" +704 60 model """structuredembedding""" +704 60 loss """crossentropy""" +704 60 regularizer """no""" +704 60 optimizer """adam""" +704 60 training_loop """lcwa""" +704 60 evaluator """rankbased""" +704 61 dataset """kinships""" +704 61 model """structuredembedding""" +704 61 loss """crossentropy""" +704 61 regularizer """no""" +704 61 optimizer """adam""" +704 61 training_loop """lcwa""" +704 61 evaluator """rankbased""" +704 62 dataset """kinships""" +704 62 model """structuredembedding""" +704 62 loss """crossentropy""" +704 62 regularizer """no""" +704 62 optimizer """adam""" +704 62 training_loop """lcwa""" +704 62 evaluator """rankbased""" +704 63 dataset """kinships""" +704 63 model """structuredembedding""" +704 63 loss """crossentropy""" +704 63 regularizer """no""" +704 63 optimizer """adam""" +704 63 training_loop """lcwa""" +704 63 evaluator """rankbased""" +704 64 dataset """kinships""" +704 64 model """structuredembedding""" +704 64 loss """crossentropy""" +704 64 regularizer """no""" +704 64 optimizer """adam""" +704 64 training_loop """lcwa""" +704 64 evaluator """rankbased""" +704 65 dataset """kinships""" +704 65 model """structuredembedding""" +704 65 loss """crossentropy""" +704 65 regularizer """no""" +704 65 optimizer """adam""" +704 65 training_loop """lcwa""" +704 65 evaluator """rankbased""" +704 66 dataset """kinships""" +704 66 model """structuredembedding""" +704 66 loss """crossentropy""" +704 66 regularizer """no""" +704 66 optimizer """adam""" +704 66 training_loop """lcwa""" +704 66 evaluator """rankbased""" +704 67 dataset """kinships""" +704 67 model """structuredembedding""" +704 67 loss """crossentropy""" +704 67 regularizer """no""" +704 67 optimizer """adam""" +704 67 training_loop """lcwa""" +704 67 evaluator """rankbased""" +704 68 dataset """kinships""" +704 68 model """structuredembedding""" +704 68 loss """crossentropy""" +704 68 regularizer """no""" +704 68 optimizer """adam""" +704 68 training_loop """lcwa""" +704 68 evaluator """rankbased""" +704 69 dataset """kinships""" +704 69 model """structuredembedding""" +704 69 loss """crossentropy""" +704 69 regularizer """no""" +704 69 optimizer """adam""" +704 69 training_loop """lcwa""" +704 69 evaluator """rankbased""" +704 70 dataset """kinships""" +704 70 model """structuredembedding""" +704 70 loss """crossentropy""" +704 70 regularizer """no""" +704 70 optimizer """adam""" +704 70 training_loop """lcwa""" +704 70 evaluator """rankbased""" +704 71 dataset """kinships""" +704 71 model """structuredembedding""" +704 71 loss """crossentropy""" +704 71 regularizer """no""" +704 71 optimizer """adam""" +704 71 training_loop """lcwa""" +704 71 evaluator """rankbased""" +704 72 dataset """kinships""" +704 72 model """structuredembedding""" +704 72 loss """crossentropy""" +704 72 regularizer """no""" +704 72 optimizer """adam""" +704 72 training_loop """lcwa""" +704 72 evaluator """rankbased""" +704 73 dataset """kinships""" +704 73 model """structuredembedding""" +704 73 loss """crossentropy""" +704 73 regularizer """no""" +704 73 optimizer """adam""" +704 73 training_loop """lcwa""" +704 73 evaluator """rankbased""" +704 74 dataset """kinships""" +704 74 model """structuredembedding""" +704 74 loss """crossentropy""" +704 74 regularizer """no""" +704 74 optimizer """adam""" +704 74 training_loop """lcwa""" +704 74 evaluator """rankbased""" +704 75 dataset """kinships""" +704 75 model """structuredembedding""" +704 75 loss """crossentropy""" +704 75 regularizer """no""" +704 75 optimizer """adam""" +704 75 training_loop """lcwa""" +704 75 evaluator """rankbased""" +704 76 dataset """kinships""" +704 76 model """structuredembedding""" +704 76 loss """crossentropy""" +704 76 regularizer """no""" +704 76 optimizer """adam""" +704 76 training_loop """lcwa""" +704 76 evaluator """rankbased""" +704 77 dataset """kinships""" +704 77 model """structuredembedding""" +704 77 loss """crossentropy""" +704 77 regularizer """no""" +704 77 optimizer """adam""" +704 77 training_loop """lcwa""" +704 77 evaluator """rankbased""" +704 78 dataset """kinships""" +704 78 model """structuredembedding""" +704 78 loss """crossentropy""" +704 78 regularizer """no""" +704 78 optimizer """adam""" +704 78 training_loop """lcwa""" +704 78 evaluator """rankbased""" +704 79 dataset """kinships""" +704 79 model """structuredembedding""" +704 79 loss """crossentropy""" +704 79 regularizer """no""" +704 79 optimizer """adam""" +704 79 training_loop """lcwa""" +704 79 evaluator """rankbased""" +704 80 dataset """kinships""" +704 80 model """structuredembedding""" +704 80 loss """crossentropy""" +704 80 regularizer """no""" +704 80 optimizer """adam""" +704 80 training_loop """lcwa""" +704 80 evaluator """rankbased""" +704 81 dataset """kinships""" +704 81 model """structuredembedding""" +704 81 loss """crossentropy""" +704 81 regularizer """no""" +704 81 optimizer """adam""" +704 81 training_loop """lcwa""" +704 81 evaluator """rankbased""" +704 82 dataset """kinships""" +704 82 model """structuredembedding""" +704 82 loss """crossentropy""" +704 82 regularizer """no""" +704 82 optimizer """adam""" +704 82 training_loop """lcwa""" +704 82 evaluator """rankbased""" +704 83 dataset """kinships""" +704 83 model """structuredembedding""" +704 83 loss """crossentropy""" +704 83 regularizer """no""" +704 83 optimizer """adam""" +704 83 training_loop """lcwa""" +704 83 evaluator """rankbased""" +704 84 dataset """kinships""" +704 84 model """structuredembedding""" +704 84 loss """crossentropy""" +704 84 regularizer """no""" +704 84 optimizer """adam""" +704 84 training_loop """lcwa""" +704 84 evaluator """rankbased""" +704 85 dataset """kinships""" +704 85 model """structuredembedding""" +704 85 loss """crossentropy""" +704 85 regularizer """no""" +704 85 optimizer """adam""" +704 85 training_loop """lcwa""" +704 85 evaluator """rankbased""" +704 86 dataset """kinships""" +704 86 model """structuredembedding""" +704 86 loss """crossentropy""" +704 86 regularizer """no""" +704 86 optimizer """adam""" +704 86 training_loop """lcwa""" +704 86 evaluator """rankbased""" +704 87 dataset """kinships""" +704 87 model """structuredembedding""" +704 87 loss """crossentropy""" +704 87 regularizer """no""" +704 87 optimizer """adam""" +704 87 training_loop """lcwa""" +704 87 evaluator """rankbased""" +704 88 dataset """kinships""" +704 88 model """structuredembedding""" +704 88 loss """crossentropy""" +704 88 regularizer """no""" +704 88 optimizer """adam""" +704 88 training_loop """lcwa""" +704 88 evaluator """rankbased""" +704 89 dataset """kinships""" +704 89 model """structuredembedding""" +704 89 loss """crossentropy""" +704 89 regularizer """no""" +704 89 optimizer """adam""" +704 89 training_loop """lcwa""" +704 89 evaluator """rankbased""" +704 90 dataset """kinships""" +704 90 model """structuredembedding""" +704 90 loss """crossentropy""" +704 90 regularizer """no""" +704 90 optimizer """adam""" +704 90 training_loop """lcwa""" +704 90 evaluator """rankbased""" +704 91 dataset """kinships""" +704 91 model """structuredembedding""" +704 91 loss """crossentropy""" +704 91 regularizer """no""" +704 91 optimizer """adam""" +704 91 training_loop """lcwa""" +704 91 evaluator """rankbased""" +704 92 dataset """kinships""" +704 92 model """structuredembedding""" +704 92 loss """crossentropy""" +704 92 regularizer """no""" +704 92 optimizer """adam""" +704 92 training_loop """lcwa""" +704 92 evaluator """rankbased""" +704 93 dataset """kinships""" +704 93 model """structuredembedding""" +704 93 loss """crossentropy""" +704 93 regularizer """no""" +704 93 optimizer """adam""" +704 93 training_loop """lcwa""" +704 93 evaluator """rankbased""" +704 94 dataset """kinships""" +704 94 model """structuredembedding""" +704 94 loss """crossentropy""" +704 94 regularizer """no""" +704 94 optimizer """adam""" +704 94 training_loop """lcwa""" +704 94 evaluator """rankbased""" +704 95 dataset """kinships""" +704 95 model """structuredembedding""" +704 95 loss """crossentropy""" +704 95 regularizer """no""" +704 95 optimizer """adam""" +704 95 training_loop """lcwa""" +704 95 evaluator """rankbased""" +704 96 dataset """kinships""" +704 96 model """structuredembedding""" +704 96 loss """crossentropy""" +704 96 regularizer """no""" +704 96 optimizer """adam""" +704 96 training_loop """lcwa""" +704 96 evaluator """rankbased""" +704 97 dataset """kinships""" +704 97 model """structuredembedding""" +704 97 loss """crossentropy""" +704 97 regularizer """no""" +704 97 optimizer """adam""" +704 97 training_loop """lcwa""" +704 97 evaluator """rankbased""" +704 98 dataset """kinships""" +704 98 model """structuredembedding""" +704 98 loss """crossentropy""" +704 98 regularizer """no""" +704 98 optimizer """adam""" +704 98 training_loop """lcwa""" +704 98 evaluator """rankbased""" +704 99 dataset """kinships""" +704 99 model """structuredembedding""" +704 99 loss """crossentropy""" +704 99 regularizer """no""" +704 99 optimizer """adam""" +704 99 training_loop """lcwa""" +704 99 evaluator """rankbased""" +704 100 dataset """kinships""" +704 100 model """structuredembedding""" +704 100 loss """crossentropy""" +704 100 regularizer """no""" +704 100 optimizer """adam""" +704 100 training_loop """lcwa""" +704 100 evaluator """rankbased""" +705 1 model.embedding_dim 0.0 +705 1 model.scoring_fct_norm 2.0 +705 1 optimizer.lr 0.0020632026468181707 +705 1 negative_sampler.num_negs_per_pos 53.0 +705 1 training.batch_size 2.0 +705 2 model.embedding_dim 2.0 +705 2 model.scoring_fct_norm 1.0 +705 2 optimizer.lr 0.008788063170213358 +705 2 negative_sampler.num_negs_per_pos 25.0 +705 2 training.batch_size 1.0 +705 3 model.embedding_dim 2.0 +705 3 model.scoring_fct_norm 2.0 +705 3 optimizer.lr 0.03642148586662618 +705 3 negative_sampler.num_negs_per_pos 37.0 +705 3 training.batch_size 1.0 +705 4 model.embedding_dim 0.0 +705 4 model.scoring_fct_norm 2.0 +705 4 optimizer.lr 0.017636500451822894 +705 4 negative_sampler.num_negs_per_pos 24.0 +705 4 training.batch_size 2.0 +705 5 model.embedding_dim 0.0 +705 5 model.scoring_fct_norm 1.0 +705 5 optimizer.lr 0.09271958937756306 +705 5 negative_sampler.num_negs_per_pos 97.0 +705 5 training.batch_size 2.0 +705 6 model.embedding_dim 2.0 +705 6 model.scoring_fct_norm 2.0 +705 6 optimizer.lr 0.09514130876100141 +705 6 negative_sampler.num_negs_per_pos 76.0 +705 6 training.batch_size 0.0 +705 7 model.embedding_dim 2.0 +705 7 model.scoring_fct_norm 1.0 +705 7 optimizer.lr 0.03829157611655893 +705 7 negative_sampler.num_negs_per_pos 5.0 +705 7 training.batch_size 2.0 +705 8 model.embedding_dim 1.0 +705 8 model.scoring_fct_norm 2.0 +705 8 optimizer.lr 0.0016921055689825932 +705 8 negative_sampler.num_negs_per_pos 93.0 +705 8 training.batch_size 1.0 +705 9 model.embedding_dim 2.0 +705 9 model.scoring_fct_norm 1.0 +705 9 optimizer.lr 0.08706134177140383 +705 9 negative_sampler.num_negs_per_pos 59.0 +705 9 training.batch_size 2.0 +705 10 model.embedding_dim 1.0 +705 10 model.scoring_fct_norm 2.0 +705 10 optimizer.lr 0.05945509144972223 +705 10 negative_sampler.num_negs_per_pos 94.0 +705 10 training.batch_size 0.0 +705 11 model.embedding_dim 1.0 +705 11 model.scoring_fct_norm 2.0 +705 11 optimizer.lr 0.006923330585191204 +705 11 negative_sampler.num_negs_per_pos 23.0 +705 11 training.batch_size 2.0 +705 12 model.embedding_dim 2.0 +705 12 model.scoring_fct_norm 2.0 +705 12 optimizer.lr 0.010733428766023508 +705 12 negative_sampler.num_negs_per_pos 99.0 +705 12 training.batch_size 2.0 +705 13 model.embedding_dim 0.0 +705 13 model.scoring_fct_norm 1.0 +705 13 optimizer.lr 0.002713829225189306 +705 13 negative_sampler.num_negs_per_pos 73.0 +705 13 training.batch_size 1.0 +705 14 model.embedding_dim 2.0 +705 14 model.scoring_fct_norm 2.0 +705 14 optimizer.lr 0.02430387146393176 +705 14 negative_sampler.num_negs_per_pos 19.0 +705 14 training.batch_size 1.0 +705 15 model.embedding_dim 2.0 +705 15 model.scoring_fct_norm 2.0 +705 15 optimizer.lr 0.013528420426550088 +705 15 negative_sampler.num_negs_per_pos 70.0 +705 15 training.batch_size 1.0 +705 16 model.embedding_dim 0.0 +705 16 model.scoring_fct_norm 2.0 +705 16 optimizer.lr 0.015920199576296125 +705 16 negative_sampler.num_negs_per_pos 53.0 +705 16 training.batch_size 2.0 +705 17 model.embedding_dim 1.0 +705 17 model.scoring_fct_norm 1.0 +705 17 optimizer.lr 0.005930586383566045 +705 17 negative_sampler.num_negs_per_pos 25.0 +705 17 training.batch_size 1.0 +705 18 model.embedding_dim 2.0 +705 18 model.scoring_fct_norm 2.0 +705 18 optimizer.lr 0.03287464372929105 +705 18 negative_sampler.num_negs_per_pos 44.0 +705 18 training.batch_size 1.0 +705 19 model.embedding_dim 0.0 +705 19 model.scoring_fct_norm 1.0 +705 19 optimizer.lr 0.009411073213049808 +705 19 negative_sampler.num_negs_per_pos 39.0 +705 19 training.batch_size 2.0 +705 20 model.embedding_dim 2.0 +705 20 model.scoring_fct_norm 2.0 +705 20 optimizer.lr 0.041137464023195916 +705 20 negative_sampler.num_negs_per_pos 63.0 +705 20 training.batch_size 0.0 +705 21 model.embedding_dim 0.0 +705 21 model.scoring_fct_norm 2.0 +705 21 optimizer.lr 0.007531164932035038 +705 21 negative_sampler.num_negs_per_pos 11.0 +705 21 training.batch_size 1.0 +705 22 model.embedding_dim 0.0 +705 22 model.scoring_fct_norm 2.0 +705 22 optimizer.lr 0.002046412155892674 +705 22 negative_sampler.num_negs_per_pos 84.0 +705 22 training.batch_size 0.0 +705 23 model.embedding_dim 2.0 +705 23 model.scoring_fct_norm 2.0 +705 23 optimizer.lr 0.03612986960529418 +705 23 negative_sampler.num_negs_per_pos 88.0 +705 23 training.batch_size 0.0 +705 24 model.embedding_dim 1.0 +705 24 model.scoring_fct_norm 2.0 +705 24 optimizer.lr 0.007259022176557511 +705 24 negative_sampler.num_negs_per_pos 79.0 +705 24 training.batch_size 0.0 +705 25 model.embedding_dim 0.0 +705 25 model.scoring_fct_norm 1.0 +705 25 optimizer.lr 0.003046021778574916 +705 25 negative_sampler.num_negs_per_pos 20.0 +705 25 training.batch_size 2.0 +705 26 model.embedding_dim 2.0 +705 26 model.scoring_fct_norm 2.0 +705 26 optimizer.lr 0.06356175088196572 +705 26 negative_sampler.num_negs_per_pos 92.0 +705 26 training.batch_size 0.0 +705 27 model.embedding_dim 1.0 +705 27 model.scoring_fct_norm 1.0 +705 27 optimizer.lr 0.00713095767742759 +705 27 negative_sampler.num_negs_per_pos 70.0 +705 27 training.batch_size 2.0 +705 28 model.embedding_dim 0.0 +705 28 model.scoring_fct_norm 2.0 +705 28 optimizer.lr 0.05345665825279664 +705 28 negative_sampler.num_negs_per_pos 6.0 +705 28 training.batch_size 0.0 +705 29 model.embedding_dim 0.0 +705 29 model.scoring_fct_norm 2.0 +705 29 optimizer.lr 0.004830721572046624 +705 29 negative_sampler.num_negs_per_pos 14.0 +705 29 training.batch_size 1.0 +705 30 model.embedding_dim 1.0 +705 30 model.scoring_fct_norm 1.0 +705 30 optimizer.lr 0.017862503463303932 +705 30 negative_sampler.num_negs_per_pos 55.0 +705 30 training.batch_size 1.0 +705 31 model.embedding_dim 0.0 +705 31 model.scoring_fct_norm 1.0 +705 31 optimizer.lr 0.011965226353012197 +705 31 negative_sampler.num_negs_per_pos 44.0 +705 31 training.batch_size 0.0 +705 32 model.embedding_dim 0.0 +705 32 model.scoring_fct_norm 2.0 +705 32 optimizer.lr 0.02371699421702851 +705 32 negative_sampler.num_negs_per_pos 28.0 +705 32 training.batch_size 1.0 +705 33 model.embedding_dim 0.0 +705 33 model.scoring_fct_norm 1.0 +705 33 optimizer.lr 0.062202506759802295 +705 33 negative_sampler.num_negs_per_pos 17.0 +705 33 training.batch_size 1.0 +705 34 model.embedding_dim 2.0 +705 34 model.scoring_fct_norm 2.0 +705 34 optimizer.lr 0.0011396097630542476 +705 34 negative_sampler.num_negs_per_pos 32.0 +705 34 training.batch_size 0.0 +705 35 model.embedding_dim 0.0 +705 35 model.scoring_fct_norm 2.0 +705 35 optimizer.lr 0.052992118019570184 +705 35 negative_sampler.num_negs_per_pos 89.0 +705 35 training.batch_size 0.0 +705 36 model.embedding_dim 0.0 +705 36 model.scoring_fct_norm 2.0 +705 36 optimizer.lr 0.002735074498516245 +705 36 negative_sampler.num_negs_per_pos 68.0 +705 36 training.batch_size 0.0 +705 37 model.embedding_dim 0.0 +705 37 model.scoring_fct_norm 1.0 +705 37 optimizer.lr 0.019112583771354788 +705 37 negative_sampler.num_negs_per_pos 37.0 +705 37 training.batch_size 2.0 +705 38 model.embedding_dim 1.0 +705 38 model.scoring_fct_norm 1.0 +705 38 optimizer.lr 0.00683535817056198 +705 38 negative_sampler.num_negs_per_pos 29.0 +705 38 training.batch_size 0.0 +705 39 model.embedding_dim 1.0 +705 39 model.scoring_fct_norm 2.0 +705 39 optimizer.lr 0.029764816870087092 +705 39 negative_sampler.num_negs_per_pos 1.0 +705 39 training.batch_size 2.0 +705 40 model.embedding_dim 1.0 +705 40 model.scoring_fct_norm 1.0 +705 40 optimizer.lr 0.0032293665397326813 +705 40 negative_sampler.num_negs_per_pos 11.0 +705 40 training.batch_size 1.0 +705 41 model.embedding_dim 0.0 +705 41 model.scoring_fct_norm 1.0 +705 41 optimizer.lr 0.016571128240854487 +705 41 negative_sampler.num_negs_per_pos 35.0 +705 41 training.batch_size 1.0 +705 42 model.embedding_dim 0.0 +705 42 model.scoring_fct_norm 1.0 +705 42 optimizer.lr 0.008747196677099197 +705 42 negative_sampler.num_negs_per_pos 78.0 +705 42 training.batch_size 0.0 +705 43 model.embedding_dim 1.0 +705 43 model.scoring_fct_norm 2.0 +705 43 optimizer.lr 0.09902789080566382 +705 43 negative_sampler.num_negs_per_pos 44.0 +705 43 training.batch_size 0.0 +705 44 model.embedding_dim 0.0 +705 44 model.scoring_fct_norm 1.0 +705 44 optimizer.lr 0.001090635517525277 +705 44 negative_sampler.num_negs_per_pos 84.0 +705 44 training.batch_size 0.0 +705 45 model.embedding_dim 2.0 +705 45 model.scoring_fct_norm 1.0 +705 45 optimizer.lr 0.006448002599863166 +705 45 negative_sampler.num_negs_per_pos 3.0 +705 45 training.batch_size 2.0 +705 46 model.embedding_dim 2.0 +705 46 model.scoring_fct_norm 1.0 +705 46 optimizer.lr 0.0047086696590910365 +705 46 negative_sampler.num_negs_per_pos 21.0 +705 46 training.batch_size 0.0 +705 47 model.embedding_dim 2.0 +705 47 model.scoring_fct_norm 1.0 +705 47 optimizer.lr 0.001666002438993839 +705 47 negative_sampler.num_negs_per_pos 35.0 +705 47 training.batch_size 1.0 +705 48 model.embedding_dim 0.0 +705 48 model.scoring_fct_norm 2.0 +705 48 optimizer.lr 0.05239953649062783 +705 48 negative_sampler.num_negs_per_pos 98.0 +705 48 training.batch_size 0.0 +705 49 model.embedding_dim 1.0 +705 49 model.scoring_fct_norm 2.0 +705 49 optimizer.lr 0.02929557060603943 +705 49 negative_sampler.num_negs_per_pos 11.0 +705 49 training.batch_size 0.0 +705 50 model.embedding_dim 0.0 +705 50 model.scoring_fct_norm 2.0 +705 50 optimizer.lr 0.001960000412189091 +705 50 negative_sampler.num_negs_per_pos 47.0 +705 50 training.batch_size 0.0 +705 51 model.embedding_dim 0.0 +705 51 model.scoring_fct_norm 1.0 +705 51 optimizer.lr 0.04220121100350099 +705 51 negative_sampler.num_negs_per_pos 94.0 +705 51 training.batch_size 2.0 +705 52 model.embedding_dim 1.0 +705 52 model.scoring_fct_norm 2.0 +705 52 optimizer.lr 0.027544153557005804 +705 52 negative_sampler.num_negs_per_pos 69.0 +705 52 training.batch_size 1.0 +705 53 model.embedding_dim 0.0 +705 53 model.scoring_fct_norm 2.0 +705 53 optimizer.lr 0.0018022753232085872 +705 53 negative_sampler.num_negs_per_pos 53.0 +705 53 training.batch_size 2.0 +705 54 model.embedding_dim 0.0 +705 54 model.scoring_fct_norm 2.0 +705 54 optimizer.lr 0.08091899873090971 +705 54 negative_sampler.num_negs_per_pos 17.0 +705 54 training.batch_size 0.0 +705 55 model.embedding_dim 1.0 +705 55 model.scoring_fct_norm 2.0 +705 55 optimizer.lr 0.013697262709253248 +705 55 negative_sampler.num_negs_per_pos 70.0 +705 55 training.batch_size 2.0 +705 56 model.embedding_dim 2.0 +705 56 model.scoring_fct_norm 1.0 +705 56 optimizer.lr 0.002496617631592479 +705 56 negative_sampler.num_negs_per_pos 49.0 +705 56 training.batch_size 2.0 +705 57 model.embedding_dim 0.0 +705 57 model.scoring_fct_norm 2.0 +705 57 optimizer.lr 0.008760514291698796 +705 57 negative_sampler.num_negs_per_pos 45.0 +705 57 training.batch_size 1.0 +705 58 model.embedding_dim 2.0 +705 58 model.scoring_fct_norm 1.0 +705 58 optimizer.lr 0.001138975991496204 +705 58 negative_sampler.num_negs_per_pos 9.0 +705 58 training.batch_size 0.0 +705 59 model.embedding_dim 1.0 +705 59 model.scoring_fct_norm 1.0 +705 59 optimizer.lr 0.003866600222778898 +705 59 negative_sampler.num_negs_per_pos 24.0 +705 59 training.batch_size 0.0 +705 60 model.embedding_dim 2.0 +705 60 model.scoring_fct_norm 2.0 +705 60 optimizer.lr 0.005385492167263121 +705 60 negative_sampler.num_negs_per_pos 76.0 +705 60 training.batch_size 2.0 +705 61 model.embedding_dim 0.0 +705 61 model.scoring_fct_norm 2.0 +705 61 optimizer.lr 0.00337512342901123 +705 61 negative_sampler.num_negs_per_pos 17.0 +705 61 training.batch_size 0.0 +705 62 model.embedding_dim 0.0 +705 62 model.scoring_fct_norm 2.0 +705 62 optimizer.lr 0.0060858912373395425 +705 62 negative_sampler.num_negs_per_pos 14.0 +705 62 training.batch_size 1.0 +705 63 model.embedding_dim 1.0 +705 63 model.scoring_fct_norm 2.0 +705 63 optimizer.lr 0.004049940633697595 +705 63 negative_sampler.num_negs_per_pos 32.0 +705 63 training.batch_size 1.0 +705 64 model.embedding_dim 1.0 +705 64 model.scoring_fct_norm 1.0 +705 64 optimizer.lr 0.0025102986772474675 +705 64 negative_sampler.num_negs_per_pos 1.0 +705 64 training.batch_size 0.0 +705 65 model.embedding_dim 1.0 +705 65 model.scoring_fct_norm 1.0 +705 65 optimizer.lr 0.009331267573544995 +705 65 negative_sampler.num_negs_per_pos 45.0 +705 65 training.batch_size 1.0 +705 66 model.embedding_dim 0.0 +705 66 model.scoring_fct_norm 1.0 +705 66 optimizer.lr 0.0423437673416012 +705 66 negative_sampler.num_negs_per_pos 26.0 +705 66 training.batch_size 2.0 +705 67 model.embedding_dim 2.0 +705 67 model.scoring_fct_norm 2.0 +705 67 optimizer.lr 0.015607185252508014 +705 67 negative_sampler.num_negs_per_pos 17.0 +705 67 training.batch_size 0.0 +705 68 model.embedding_dim 1.0 +705 68 model.scoring_fct_norm 2.0 +705 68 optimizer.lr 0.0015158163200217623 +705 68 negative_sampler.num_negs_per_pos 93.0 +705 68 training.batch_size 2.0 +705 69 model.embedding_dim 2.0 +705 69 model.scoring_fct_norm 2.0 +705 69 optimizer.lr 0.02024937170088155 +705 69 negative_sampler.num_negs_per_pos 23.0 +705 69 training.batch_size 1.0 +705 70 model.embedding_dim 2.0 +705 70 model.scoring_fct_norm 2.0 +705 70 optimizer.lr 0.05151815197534929 +705 70 negative_sampler.num_negs_per_pos 68.0 +705 70 training.batch_size 0.0 +705 71 model.embedding_dim 1.0 +705 71 model.scoring_fct_norm 1.0 +705 71 optimizer.lr 0.014414665169203577 +705 71 negative_sampler.num_negs_per_pos 67.0 +705 71 training.batch_size 2.0 +705 72 model.embedding_dim 1.0 +705 72 model.scoring_fct_norm 1.0 +705 72 optimizer.lr 0.07054487870317996 +705 72 negative_sampler.num_negs_per_pos 17.0 +705 72 training.batch_size 2.0 +705 73 model.embedding_dim 1.0 +705 73 model.scoring_fct_norm 1.0 +705 73 optimizer.lr 0.005557738613181742 +705 73 negative_sampler.num_negs_per_pos 53.0 +705 73 training.batch_size 0.0 +705 74 model.embedding_dim 2.0 +705 74 model.scoring_fct_norm 2.0 +705 74 optimizer.lr 0.01655601559487944 +705 74 negative_sampler.num_negs_per_pos 42.0 +705 74 training.batch_size 2.0 +705 75 model.embedding_dim 1.0 +705 75 model.scoring_fct_norm 1.0 +705 75 optimizer.lr 0.004066561062026266 +705 75 negative_sampler.num_negs_per_pos 74.0 +705 75 training.batch_size 0.0 +705 76 model.embedding_dim 1.0 +705 76 model.scoring_fct_norm 2.0 +705 76 optimizer.lr 0.08293464886901507 +705 76 negative_sampler.num_negs_per_pos 54.0 +705 76 training.batch_size 2.0 +705 1 dataset """kinships""" +705 1 model """structuredembedding""" +705 1 loss """bceaftersigmoid""" +705 1 regularizer """no""" +705 1 optimizer """adam""" +705 1 training_loop """owa""" +705 1 negative_sampler """basic""" +705 1 evaluator """rankbased""" +705 2 dataset """kinships""" +705 2 model """structuredembedding""" +705 2 loss """bceaftersigmoid""" +705 2 regularizer """no""" +705 2 optimizer """adam""" +705 2 training_loop """owa""" +705 2 negative_sampler """basic""" +705 2 evaluator """rankbased""" +705 3 dataset """kinships""" +705 3 model """structuredembedding""" +705 3 loss """bceaftersigmoid""" +705 3 regularizer """no""" +705 3 optimizer """adam""" +705 3 training_loop """owa""" +705 3 negative_sampler """basic""" +705 3 evaluator """rankbased""" +705 4 dataset """kinships""" +705 4 model """structuredembedding""" +705 4 loss """bceaftersigmoid""" +705 4 regularizer """no""" +705 4 optimizer """adam""" +705 4 training_loop """owa""" +705 4 negative_sampler """basic""" +705 4 evaluator """rankbased""" +705 5 dataset """kinships""" +705 5 model """structuredembedding""" +705 5 loss """bceaftersigmoid""" +705 5 regularizer """no""" +705 5 optimizer """adam""" +705 5 training_loop """owa""" +705 5 negative_sampler """basic""" +705 5 evaluator """rankbased""" +705 6 dataset """kinships""" +705 6 model """structuredembedding""" +705 6 loss """bceaftersigmoid""" +705 6 regularizer """no""" +705 6 optimizer """adam""" +705 6 training_loop """owa""" +705 6 negative_sampler """basic""" +705 6 evaluator """rankbased""" +705 7 dataset """kinships""" +705 7 model """structuredembedding""" +705 7 loss """bceaftersigmoid""" +705 7 regularizer """no""" +705 7 optimizer """adam""" +705 7 training_loop """owa""" +705 7 negative_sampler """basic""" +705 7 evaluator """rankbased""" +705 8 dataset """kinships""" +705 8 model """structuredembedding""" +705 8 loss """bceaftersigmoid""" +705 8 regularizer """no""" +705 8 optimizer """adam""" +705 8 training_loop """owa""" +705 8 negative_sampler """basic""" +705 8 evaluator """rankbased""" +705 9 dataset """kinships""" +705 9 model """structuredembedding""" +705 9 loss """bceaftersigmoid""" +705 9 regularizer """no""" +705 9 optimizer """adam""" +705 9 training_loop """owa""" +705 9 negative_sampler """basic""" +705 9 evaluator """rankbased""" +705 10 dataset """kinships""" +705 10 model """structuredembedding""" +705 10 loss """bceaftersigmoid""" +705 10 regularizer """no""" +705 10 optimizer """adam""" +705 10 training_loop """owa""" +705 10 negative_sampler """basic""" +705 10 evaluator """rankbased""" +705 11 dataset """kinships""" +705 11 model """structuredembedding""" +705 11 loss """bceaftersigmoid""" +705 11 regularizer """no""" +705 11 optimizer """adam""" +705 11 training_loop """owa""" +705 11 negative_sampler """basic""" +705 11 evaluator """rankbased""" +705 12 dataset """kinships""" +705 12 model """structuredembedding""" +705 12 loss """bceaftersigmoid""" +705 12 regularizer """no""" +705 12 optimizer """adam""" +705 12 training_loop """owa""" +705 12 negative_sampler """basic""" +705 12 evaluator """rankbased""" +705 13 dataset """kinships""" +705 13 model """structuredembedding""" +705 13 loss """bceaftersigmoid""" +705 13 regularizer """no""" +705 13 optimizer """adam""" +705 13 training_loop """owa""" +705 13 negative_sampler """basic""" +705 13 evaluator """rankbased""" +705 14 dataset """kinships""" +705 14 model """structuredembedding""" +705 14 loss """bceaftersigmoid""" +705 14 regularizer """no""" +705 14 optimizer """adam""" +705 14 training_loop """owa""" +705 14 negative_sampler """basic""" +705 14 evaluator """rankbased""" +705 15 dataset """kinships""" +705 15 model """structuredembedding""" +705 15 loss """bceaftersigmoid""" +705 15 regularizer """no""" +705 15 optimizer """adam""" +705 15 training_loop """owa""" +705 15 negative_sampler """basic""" +705 15 evaluator """rankbased""" +705 16 dataset """kinships""" +705 16 model """structuredembedding""" +705 16 loss """bceaftersigmoid""" +705 16 regularizer """no""" +705 16 optimizer """adam""" +705 16 training_loop """owa""" +705 16 negative_sampler """basic""" +705 16 evaluator """rankbased""" +705 17 dataset """kinships""" +705 17 model """structuredembedding""" +705 17 loss """bceaftersigmoid""" +705 17 regularizer """no""" +705 17 optimizer """adam""" +705 17 training_loop """owa""" +705 17 negative_sampler """basic""" +705 17 evaluator """rankbased""" +705 18 dataset """kinships""" +705 18 model """structuredembedding""" +705 18 loss """bceaftersigmoid""" +705 18 regularizer """no""" +705 18 optimizer """adam""" +705 18 training_loop """owa""" +705 18 negative_sampler """basic""" +705 18 evaluator """rankbased""" +705 19 dataset """kinships""" +705 19 model """structuredembedding""" +705 19 loss """bceaftersigmoid""" +705 19 regularizer """no""" +705 19 optimizer """adam""" +705 19 training_loop """owa""" +705 19 negative_sampler """basic""" +705 19 evaluator """rankbased""" +705 20 dataset """kinships""" +705 20 model """structuredembedding""" +705 20 loss """bceaftersigmoid""" +705 20 regularizer """no""" +705 20 optimizer """adam""" +705 20 training_loop """owa""" +705 20 negative_sampler """basic""" +705 20 evaluator """rankbased""" +705 21 dataset """kinships""" +705 21 model """structuredembedding""" +705 21 loss """bceaftersigmoid""" +705 21 regularizer """no""" +705 21 optimizer """adam""" +705 21 training_loop """owa""" +705 21 negative_sampler """basic""" +705 21 evaluator """rankbased""" +705 22 dataset """kinships""" +705 22 model """structuredembedding""" +705 22 loss """bceaftersigmoid""" +705 22 regularizer """no""" +705 22 optimizer """adam""" +705 22 training_loop """owa""" +705 22 negative_sampler """basic""" +705 22 evaluator """rankbased""" +705 23 dataset """kinships""" +705 23 model """structuredembedding""" +705 23 loss """bceaftersigmoid""" +705 23 regularizer """no""" +705 23 optimizer """adam""" +705 23 training_loop """owa""" +705 23 negative_sampler """basic""" +705 23 evaluator """rankbased""" +705 24 dataset """kinships""" +705 24 model """structuredembedding""" +705 24 loss """bceaftersigmoid""" +705 24 regularizer """no""" +705 24 optimizer """adam""" +705 24 training_loop """owa""" +705 24 negative_sampler """basic""" +705 24 evaluator """rankbased""" +705 25 dataset """kinships""" +705 25 model """structuredembedding""" +705 25 loss """bceaftersigmoid""" +705 25 regularizer """no""" +705 25 optimizer """adam""" +705 25 training_loop """owa""" +705 25 negative_sampler """basic""" +705 25 evaluator """rankbased""" +705 26 dataset """kinships""" +705 26 model """structuredembedding""" +705 26 loss """bceaftersigmoid""" +705 26 regularizer """no""" +705 26 optimizer """adam""" +705 26 training_loop """owa""" +705 26 negative_sampler """basic""" +705 26 evaluator """rankbased""" +705 27 dataset """kinships""" +705 27 model """structuredembedding""" +705 27 loss """bceaftersigmoid""" +705 27 regularizer """no""" +705 27 optimizer """adam""" +705 27 training_loop """owa""" +705 27 negative_sampler """basic""" +705 27 evaluator """rankbased""" +705 28 dataset """kinships""" +705 28 model """structuredembedding""" +705 28 loss """bceaftersigmoid""" +705 28 regularizer """no""" +705 28 optimizer """adam""" +705 28 training_loop """owa""" +705 28 negative_sampler """basic""" +705 28 evaluator """rankbased""" +705 29 dataset """kinships""" +705 29 model """structuredembedding""" +705 29 loss """bceaftersigmoid""" +705 29 regularizer """no""" +705 29 optimizer """adam""" +705 29 training_loop """owa""" +705 29 negative_sampler """basic""" +705 29 evaluator """rankbased""" +705 30 dataset """kinships""" +705 30 model """structuredembedding""" +705 30 loss """bceaftersigmoid""" +705 30 regularizer """no""" +705 30 optimizer """adam""" +705 30 training_loop """owa""" +705 30 negative_sampler """basic""" +705 30 evaluator """rankbased""" +705 31 dataset """kinships""" +705 31 model """structuredembedding""" +705 31 loss """bceaftersigmoid""" +705 31 regularizer """no""" +705 31 optimizer """adam""" +705 31 training_loop """owa""" +705 31 negative_sampler """basic""" +705 31 evaluator """rankbased""" +705 32 dataset """kinships""" +705 32 model """structuredembedding""" +705 32 loss """bceaftersigmoid""" +705 32 regularizer """no""" +705 32 optimizer """adam""" +705 32 training_loop """owa""" +705 32 negative_sampler """basic""" +705 32 evaluator """rankbased""" +705 33 dataset """kinships""" +705 33 model """structuredembedding""" +705 33 loss """bceaftersigmoid""" +705 33 regularizer """no""" +705 33 optimizer """adam""" +705 33 training_loop """owa""" +705 33 negative_sampler """basic""" +705 33 evaluator """rankbased""" +705 34 dataset """kinships""" +705 34 model """structuredembedding""" +705 34 loss """bceaftersigmoid""" +705 34 regularizer """no""" +705 34 optimizer """adam""" +705 34 training_loop """owa""" +705 34 negative_sampler """basic""" +705 34 evaluator """rankbased""" +705 35 dataset """kinships""" +705 35 model """structuredembedding""" +705 35 loss """bceaftersigmoid""" +705 35 regularizer """no""" +705 35 optimizer """adam""" +705 35 training_loop """owa""" +705 35 negative_sampler """basic""" +705 35 evaluator """rankbased""" +705 36 dataset """kinships""" +705 36 model """structuredembedding""" +705 36 loss """bceaftersigmoid""" +705 36 regularizer """no""" +705 36 optimizer """adam""" +705 36 training_loop """owa""" +705 36 negative_sampler """basic""" +705 36 evaluator """rankbased""" +705 37 dataset """kinships""" +705 37 model """structuredembedding""" +705 37 loss """bceaftersigmoid""" +705 37 regularizer """no""" +705 37 optimizer """adam""" +705 37 training_loop """owa""" +705 37 negative_sampler """basic""" +705 37 evaluator """rankbased""" +705 38 dataset """kinships""" +705 38 model """structuredembedding""" +705 38 loss """bceaftersigmoid""" +705 38 regularizer """no""" +705 38 optimizer """adam""" +705 38 training_loop """owa""" +705 38 negative_sampler """basic""" +705 38 evaluator """rankbased""" +705 39 dataset """kinships""" +705 39 model """structuredembedding""" +705 39 loss """bceaftersigmoid""" +705 39 regularizer """no""" +705 39 optimizer """adam""" +705 39 training_loop """owa""" +705 39 negative_sampler """basic""" +705 39 evaluator """rankbased""" +705 40 dataset """kinships""" +705 40 model """structuredembedding""" +705 40 loss """bceaftersigmoid""" +705 40 regularizer """no""" +705 40 optimizer """adam""" +705 40 training_loop """owa""" +705 40 negative_sampler """basic""" +705 40 evaluator """rankbased""" +705 41 dataset """kinships""" +705 41 model """structuredembedding""" +705 41 loss """bceaftersigmoid""" +705 41 regularizer """no""" +705 41 optimizer """adam""" +705 41 training_loop """owa""" +705 41 negative_sampler """basic""" +705 41 evaluator """rankbased""" +705 42 dataset """kinships""" +705 42 model """structuredembedding""" +705 42 loss """bceaftersigmoid""" +705 42 regularizer """no""" +705 42 optimizer """adam""" +705 42 training_loop """owa""" +705 42 negative_sampler """basic""" +705 42 evaluator """rankbased""" +705 43 dataset """kinships""" +705 43 model """structuredembedding""" +705 43 loss """bceaftersigmoid""" +705 43 regularizer """no""" +705 43 optimizer """adam""" +705 43 training_loop """owa""" +705 43 negative_sampler """basic""" +705 43 evaluator """rankbased""" +705 44 dataset """kinships""" +705 44 model """structuredembedding""" +705 44 loss """bceaftersigmoid""" +705 44 regularizer """no""" +705 44 optimizer """adam""" +705 44 training_loop """owa""" +705 44 negative_sampler """basic""" +705 44 evaluator """rankbased""" +705 45 dataset """kinships""" +705 45 model """structuredembedding""" +705 45 loss """bceaftersigmoid""" +705 45 regularizer """no""" +705 45 optimizer """adam""" +705 45 training_loop """owa""" +705 45 negative_sampler """basic""" +705 45 evaluator """rankbased""" +705 46 dataset """kinships""" +705 46 model """structuredembedding""" +705 46 loss """bceaftersigmoid""" +705 46 regularizer """no""" +705 46 optimizer """adam""" +705 46 training_loop """owa""" +705 46 negative_sampler """basic""" +705 46 evaluator """rankbased""" +705 47 dataset """kinships""" +705 47 model """structuredembedding""" +705 47 loss """bceaftersigmoid""" +705 47 regularizer """no""" +705 47 optimizer """adam""" +705 47 training_loop """owa""" +705 47 negative_sampler """basic""" +705 47 evaluator """rankbased""" +705 48 dataset """kinships""" +705 48 model """structuredembedding""" +705 48 loss """bceaftersigmoid""" +705 48 regularizer """no""" +705 48 optimizer """adam""" +705 48 training_loop """owa""" +705 48 negative_sampler """basic""" +705 48 evaluator """rankbased""" +705 49 dataset """kinships""" +705 49 model """structuredembedding""" +705 49 loss """bceaftersigmoid""" +705 49 regularizer """no""" +705 49 optimizer """adam""" +705 49 training_loop """owa""" +705 49 negative_sampler """basic""" +705 49 evaluator """rankbased""" +705 50 dataset """kinships""" +705 50 model """structuredembedding""" +705 50 loss """bceaftersigmoid""" +705 50 regularizer """no""" +705 50 optimizer """adam""" +705 50 training_loop """owa""" +705 50 negative_sampler """basic""" +705 50 evaluator """rankbased""" +705 51 dataset """kinships""" +705 51 model """structuredembedding""" +705 51 loss """bceaftersigmoid""" +705 51 regularizer """no""" +705 51 optimizer """adam""" +705 51 training_loop """owa""" +705 51 negative_sampler """basic""" +705 51 evaluator """rankbased""" +705 52 dataset """kinships""" +705 52 model """structuredembedding""" +705 52 loss """bceaftersigmoid""" +705 52 regularizer """no""" +705 52 optimizer """adam""" +705 52 training_loop """owa""" +705 52 negative_sampler """basic""" +705 52 evaluator """rankbased""" +705 53 dataset """kinships""" +705 53 model """structuredembedding""" +705 53 loss """bceaftersigmoid""" +705 53 regularizer """no""" +705 53 optimizer """adam""" +705 53 training_loop """owa""" +705 53 negative_sampler """basic""" +705 53 evaluator """rankbased""" +705 54 dataset """kinships""" +705 54 model """structuredembedding""" +705 54 loss """bceaftersigmoid""" +705 54 regularizer """no""" +705 54 optimizer """adam""" +705 54 training_loop """owa""" +705 54 negative_sampler """basic""" +705 54 evaluator """rankbased""" +705 55 dataset """kinships""" +705 55 model """structuredembedding""" +705 55 loss """bceaftersigmoid""" +705 55 regularizer """no""" +705 55 optimizer """adam""" +705 55 training_loop """owa""" +705 55 negative_sampler """basic""" +705 55 evaluator """rankbased""" +705 56 dataset """kinships""" +705 56 model """structuredembedding""" +705 56 loss """bceaftersigmoid""" +705 56 regularizer """no""" +705 56 optimizer """adam""" +705 56 training_loop """owa""" +705 56 negative_sampler """basic""" +705 56 evaluator """rankbased""" +705 57 dataset """kinships""" +705 57 model """structuredembedding""" +705 57 loss """bceaftersigmoid""" +705 57 regularizer """no""" +705 57 optimizer """adam""" +705 57 training_loop """owa""" +705 57 negative_sampler """basic""" +705 57 evaluator """rankbased""" +705 58 dataset """kinships""" +705 58 model """structuredembedding""" +705 58 loss """bceaftersigmoid""" +705 58 regularizer """no""" +705 58 optimizer """adam""" +705 58 training_loop """owa""" +705 58 negative_sampler """basic""" +705 58 evaluator """rankbased""" +705 59 dataset """kinships""" +705 59 model """structuredembedding""" +705 59 loss """bceaftersigmoid""" +705 59 regularizer """no""" +705 59 optimizer """adam""" +705 59 training_loop """owa""" +705 59 negative_sampler """basic""" +705 59 evaluator """rankbased""" +705 60 dataset """kinships""" +705 60 model """structuredembedding""" +705 60 loss """bceaftersigmoid""" +705 60 regularizer """no""" +705 60 optimizer """adam""" +705 60 training_loop """owa""" +705 60 negative_sampler """basic""" +705 60 evaluator """rankbased""" +705 61 dataset """kinships""" +705 61 model """structuredembedding""" +705 61 loss """bceaftersigmoid""" +705 61 regularizer """no""" +705 61 optimizer """adam""" +705 61 training_loop """owa""" +705 61 negative_sampler """basic""" +705 61 evaluator """rankbased""" +705 62 dataset """kinships""" +705 62 model """structuredembedding""" +705 62 loss """bceaftersigmoid""" +705 62 regularizer """no""" +705 62 optimizer """adam""" +705 62 training_loop """owa""" +705 62 negative_sampler """basic""" +705 62 evaluator """rankbased""" +705 63 dataset """kinships""" +705 63 model """structuredembedding""" +705 63 loss """bceaftersigmoid""" +705 63 regularizer """no""" +705 63 optimizer """adam""" +705 63 training_loop """owa""" +705 63 negative_sampler """basic""" +705 63 evaluator """rankbased""" +705 64 dataset """kinships""" +705 64 model """structuredembedding""" +705 64 loss """bceaftersigmoid""" +705 64 regularizer """no""" +705 64 optimizer """adam""" +705 64 training_loop """owa""" +705 64 negative_sampler """basic""" +705 64 evaluator """rankbased""" +705 65 dataset """kinships""" +705 65 model """structuredembedding""" +705 65 loss """bceaftersigmoid""" +705 65 regularizer """no""" +705 65 optimizer """adam""" +705 65 training_loop """owa""" +705 65 negative_sampler """basic""" +705 65 evaluator """rankbased""" +705 66 dataset """kinships""" +705 66 model """structuredembedding""" +705 66 loss """bceaftersigmoid""" +705 66 regularizer """no""" +705 66 optimizer """adam""" +705 66 training_loop """owa""" +705 66 negative_sampler """basic""" +705 66 evaluator """rankbased""" +705 67 dataset """kinships""" +705 67 model """structuredembedding""" +705 67 loss """bceaftersigmoid""" +705 67 regularizer """no""" +705 67 optimizer """adam""" +705 67 training_loop """owa""" +705 67 negative_sampler """basic""" +705 67 evaluator """rankbased""" +705 68 dataset """kinships""" +705 68 model """structuredembedding""" +705 68 loss """bceaftersigmoid""" +705 68 regularizer """no""" +705 68 optimizer """adam""" +705 68 training_loop """owa""" +705 68 negative_sampler """basic""" +705 68 evaluator """rankbased""" +705 69 dataset """kinships""" +705 69 model """structuredembedding""" +705 69 loss """bceaftersigmoid""" +705 69 regularizer """no""" +705 69 optimizer """adam""" +705 69 training_loop """owa""" +705 69 negative_sampler """basic""" +705 69 evaluator """rankbased""" +705 70 dataset """kinships""" +705 70 model """structuredembedding""" +705 70 loss """bceaftersigmoid""" +705 70 regularizer """no""" +705 70 optimizer """adam""" +705 70 training_loop """owa""" +705 70 negative_sampler """basic""" +705 70 evaluator """rankbased""" +705 71 dataset """kinships""" +705 71 model """structuredembedding""" +705 71 loss """bceaftersigmoid""" +705 71 regularizer """no""" +705 71 optimizer """adam""" +705 71 training_loop """owa""" +705 71 negative_sampler """basic""" +705 71 evaluator """rankbased""" +705 72 dataset """kinships""" +705 72 model """structuredembedding""" +705 72 loss """bceaftersigmoid""" +705 72 regularizer """no""" +705 72 optimizer """adam""" +705 72 training_loop """owa""" +705 72 negative_sampler """basic""" +705 72 evaluator """rankbased""" +705 73 dataset """kinships""" +705 73 model """structuredembedding""" +705 73 loss """bceaftersigmoid""" +705 73 regularizer """no""" +705 73 optimizer """adam""" +705 73 training_loop """owa""" +705 73 negative_sampler """basic""" +705 73 evaluator """rankbased""" +705 74 dataset """kinships""" +705 74 model """structuredembedding""" +705 74 loss """bceaftersigmoid""" +705 74 regularizer """no""" +705 74 optimizer """adam""" +705 74 training_loop """owa""" +705 74 negative_sampler """basic""" +705 74 evaluator """rankbased""" +705 75 dataset """kinships""" +705 75 model """structuredembedding""" +705 75 loss """bceaftersigmoid""" +705 75 regularizer """no""" +705 75 optimizer """adam""" +705 75 training_loop """owa""" +705 75 negative_sampler """basic""" +705 75 evaluator """rankbased""" +705 76 dataset """kinships""" +705 76 model """structuredembedding""" +705 76 loss """bceaftersigmoid""" +705 76 regularizer """no""" +705 76 optimizer """adam""" +705 76 training_loop """owa""" +705 76 negative_sampler """basic""" +705 76 evaluator """rankbased""" +706 1 model.embedding_dim 2.0 +706 1 model.scoring_fct_norm 1.0 +706 1 optimizer.lr 0.00828447626601327 +706 1 negative_sampler.num_negs_per_pos 86.0 +706 1 training.batch_size 0.0 +706 2 model.embedding_dim 0.0 +706 2 model.scoring_fct_norm 1.0 +706 2 optimizer.lr 0.007727660788667487 +706 2 negative_sampler.num_negs_per_pos 47.0 +706 2 training.batch_size 1.0 +706 3 model.embedding_dim 0.0 +706 3 model.scoring_fct_norm 1.0 +706 3 optimizer.lr 0.0010081697407698827 +706 3 negative_sampler.num_negs_per_pos 16.0 +706 3 training.batch_size 0.0 +706 4 model.embedding_dim 2.0 +706 4 model.scoring_fct_norm 1.0 +706 4 optimizer.lr 0.0028111453650880597 +706 4 negative_sampler.num_negs_per_pos 16.0 +706 4 training.batch_size 1.0 +706 5 model.embedding_dim 2.0 +706 5 model.scoring_fct_norm 1.0 +706 5 optimizer.lr 0.019500648876298068 +706 5 negative_sampler.num_negs_per_pos 21.0 +706 5 training.batch_size 0.0 +706 6 model.embedding_dim 0.0 +706 6 model.scoring_fct_norm 2.0 +706 6 optimizer.lr 0.03365464791210958 +706 6 negative_sampler.num_negs_per_pos 63.0 +706 6 training.batch_size 1.0 +706 7 model.embedding_dim 0.0 +706 7 model.scoring_fct_norm 1.0 +706 7 optimizer.lr 0.001697858866554659 +706 7 negative_sampler.num_negs_per_pos 72.0 +706 7 training.batch_size 2.0 +706 8 model.embedding_dim 0.0 +706 8 model.scoring_fct_norm 1.0 +706 8 optimizer.lr 0.0501262152543891 +706 8 negative_sampler.num_negs_per_pos 60.0 +706 8 training.batch_size 0.0 +706 9 model.embedding_dim 1.0 +706 9 model.scoring_fct_norm 2.0 +706 9 optimizer.lr 0.003319984443692036 +706 9 negative_sampler.num_negs_per_pos 8.0 +706 9 training.batch_size 0.0 +706 10 model.embedding_dim 1.0 +706 10 model.scoring_fct_norm 2.0 +706 10 optimizer.lr 0.00601942513740499 +706 10 negative_sampler.num_negs_per_pos 17.0 +706 10 training.batch_size 1.0 +706 11 model.embedding_dim 1.0 +706 11 model.scoring_fct_norm 1.0 +706 11 optimizer.lr 0.038886355239767544 +706 11 negative_sampler.num_negs_per_pos 80.0 +706 11 training.batch_size 1.0 +706 12 model.embedding_dim 2.0 +706 12 model.scoring_fct_norm 1.0 +706 12 optimizer.lr 0.013815554647808283 +706 12 negative_sampler.num_negs_per_pos 70.0 +706 12 training.batch_size 1.0 +706 13 model.embedding_dim 2.0 +706 13 model.scoring_fct_norm 2.0 +706 13 optimizer.lr 0.0011429151256093337 +706 13 negative_sampler.num_negs_per_pos 46.0 +706 13 training.batch_size 0.0 +706 14 model.embedding_dim 1.0 +706 14 model.scoring_fct_norm 2.0 +706 14 optimizer.lr 0.06716424631722813 +706 14 negative_sampler.num_negs_per_pos 51.0 +706 14 training.batch_size 0.0 +706 15 model.embedding_dim 1.0 +706 15 model.scoring_fct_norm 1.0 +706 15 optimizer.lr 0.031338263084090164 +706 15 negative_sampler.num_negs_per_pos 78.0 +706 15 training.batch_size 2.0 +706 16 model.embedding_dim 1.0 +706 16 model.scoring_fct_norm 2.0 +706 16 optimizer.lr 0.05710466373910357 +706 16 negative_sampler.num_negs_per_pos 52.0 +706 16 training.batch_size 2.0 +706 17 model.embedding_dim 1.0 +706 17 model.scoring_fct_norm 2.0 +706 17 optimizer.lr 0.0013413284349173065 +706 17 negative_sampler.num_negs_per_pos 33.0 +706 17 training.batch_size 1.0 +706 18 model.embedding_dim 2.0 +706 18 model.scoring_fct_norm 1.0 +706 18 optimizer.lr 0.02188771061229163 +706 18 negative_sampler.num_negs_per_pos 41.0 +706 18 training.batch_size 1.0 +706 19 model.embedding_dim 0.0 +706 19 model.scoring_fct_norm 2.0 +706 19 optimizer.lr 0.0012909015700515736 +706 19 negative_sampler.num_negs_per_pos 32.0 +706 19 training.batch_size 0.0 +706 20 model.embedding_dim 1.0 +706 20 model.scoring_fct_norm 1.0 +706 20 optimizer.lr 0.0876585583779341 +706 20 negative_sampler.num_negs_per_pos 10.0 +706 20 training.batch_size 1.0 +706 21 model.embedding_dim 2.0 +706 21 model.scoring_fct_norm 1.0 +706 21 optimizer.lr 0.0034050701973002964 +706 21 negative_sampler.num_negs_per_pos 85.0 +706 21 training.batch_size 0.0 +706 22 model.embedding_dim 0.0 +706 22 model.scoring_fct_norm 1.0 +706 22 optimizer.lr 0.00210776101928904 +706 22 negative_sampler.num_negs_per_pos 43.0 +706 22 training.batch_size 1.0 +706 23 model.embedding_dim 0.0 +706 23 model.scoring_fct_norm 1.0 +706 23 optimizer.lr 0.023721397852903017 +706 23 negative_sampler.num_negs_per_pos 70.0 +706 23 training.batch_size 1.0 +706 24 model.embedding_dim 2.0 +706 24 model.scoring_fct_norm 1.0 +706 24 optimizer.lr 0.028386105193650427 +706 24 negative_sampler.num_negs_per_pos 6.0 +706 24 training.batch_size 0.0 +706 25 model.embedding_dim 1.0 +706 25 model.scoring_fct_norm 1.0 +706 25 optimizer.lr 0.0160470825094249 +706 25 negative_sampler.num_negs_per_pos 34.0 +706 25 training.batch_size 1.0 +706 26 model.embedding_dim 2.0 +706 26 model.scoring_fct_norm 1.0 +706 26 optimizer.lr 0.02570660766029251 +706 26 negative_sampler.num_negs_per_pos 85.0 +706 26 training.batch_size 0.0 +706 27 model.embedding_dim 1.0 +706 27 model.scoring_fct_norm 1.0 +706 27 optimizer.lr 0.003483196153529843 +706 27 negative_sampler.num_negs_per_pos 12.0 +706 27 training.batch_size 2.0 +706 28 model.embedding_dim 1.0 +706 28 model.scoring_fct_norm 1.0 +706 28 optimizer.lr 0.0015350895193514214 +706 28 negative_sampler.num_negs_per_pos 24.0 +706 28 training.batch_size 1.0 +706 29 model.embedding_dim 2.0 +706 29 model.scoring_fct_norm 1.0 +706 29 optimizer.lr 0.08537696584502516 +706 29 negative_sampler.num_negs_per_pos 6.0 +706 29 training.batch_size 2.0 +706 30 model.embedding_dim 2.0 +706 30 model.scoring_fct_norm 2.0 +706 30 optimizer.lr 0.002530939301933576 +706 30 negative_sampler.num_negs_per_pos 37.0 +706 30 training.batch_size 2.0 +706 31 model.embedding_dim 0.0 +706 31 model.scoring_fct_norm 1.0 +706 31 optimizer.lr 0.0070354830776859344 +706 31 negative_sampler.num_negs_per_pos 84.0 +706 31 training.batch_size 0.0 +706 32 model.embedding_dim 2.0 +706 32 model.scoring_fct_norm 1.0 +706 32 optimizer.lr 0.08854849420530397 +706 32 negative_sampler.num_negs_per_pos 39.0 +706 32 training.batch_size 2.0 +706 33 model.embedding_dim 0.0 +706 33 model.scoring_fct_norm 2.0 +706 33 optimizer.lr 0.012114595052580222 +706 33 negative_sampler.num_negs_per_pos 61.0 +706 33 training.batch_size 1.0 +706 34 model.embedding_dim 2.0 +706 34 model.scoring_fct_norm 2.0 +706 34 optimizer.lr 0.029493243081533067 +706 34 negative_sampler.num_negs_per_pos 8.0 +706 34 training.batch_size 2.0 +706 35 model.embedding_dim 2.0 +706 35 model.scoring_fct_norm 1.0 +706 35 optimizer.lr 0.012001199243131443 +706 35 negative_sampler.num_negs_per_pos 1.0 +706 35 training.batch_size 2.0 +706 36 model.embedding_dim 2.0 +706 36 model.scoring_fct_norm 1.0 +706 36 optimizer.lr 0.04652761384600214 +706 36 negative_sampler.num_negs_per_pos 39.0 +706 36 training.batch_size 2.0 +706 37 model.embedding_dim 1.0 +706 37 model.scoring_fct_norm 2.0 +706 37 optimizer.lr 0.02273436281171545 +706 37 negative_sampler.num_negs_per_pos 5.0 +706 37 training.batch_size 1.0 +706 38 model.embedding_dim 0.0 +706 38 model.scoring_fct_norm 1.0 +706 38 optimizer.lr 0.0033363539044970088 +706 38 negative_sampler.num_negs_per_pos 71.0 +706 38 training.batch_size 0.0 +706 39 model.embedding_dim 1.0 +706 39 model.scoring_fct_norm 2.0 +706 39 optimizer.lr 0.0012074602290941651 +706 39 negative_sampler.num_negs_per_pos 93.0 +706 39 training.batch_size 2.0 +706 40 model.embedding_dim 2.0 +706 40 model.scoring_fct_norm 2.0 +706 40 optimizer.lr 0.07448992724785618 +706 40 negative_sampler.num_negs_per_pos 72.0 +706 40 training.batch_size 1.0 +706 41 model.embedding_dim 2.0 +706 41 model.scoring_fct_norm 1.0 +706 41 optimizer.lr 0.011297945735392282 +706 41 negative_sampler.num_negs_per_pos 43.0 +706 41 training.batch_size 2.0 +706 42 model.embedding_dim 0.0 +706 42 model.scoring_fct_norm 1.0 +706 42 optimizer.lr 0.0010588323239441226 +706 42 negative_sampler.num_negs_per_pos 43.0 +706 42 training.batch_size 1.0 +706 43 model.embedding_dim 0.0 +706 43 model.scoring_fct_norm 2.0 +706 43 optimizer.lr 0.003844671757100975 +706 43 negative_sampler.num_negs_per_pos 44.0 +706 43 training.batch_size 1.0 +706 44 model.embedding_dim 2.0 +706 44 model.scoring_fct_norm 1.0 +706 44 optimizer.lr 0.0019296976617415999 +706 44 negative_sampler.num_negs_per_pos 45.0 +706 44 training.batch_size 2.0 +706 45 model.embedding_dim 1.0 +706 45 model.scoring_fct_norm 1.0 +706 45 optimizer.lr 0.020981515001512352 +706 45 negative_sampler.num_negs_per_pos 28.0 +706 45 training.batch_size 2.0 +706 46 model.embedding_dim 0.0 +706 46 model.scoring_fct_norm 2.0 +706 46 optimizer.lr 0.001639299249230606 +706 46 negative_sampler.num_negs_per_pos 65.0 +706 46 training.batch_size 2.0 +706 47 model.embedding_dim 0.0 +706 47 model.scoring_fct_norm 2.0 +706 47 optimizer.lr 0.0015906143184084124 +706 47 negative_sampler.num_negs_per_pos 17.0 +706 47 training.batch_size 1.0 +706 48 model.embedding_dim 0.0 +706 48 model.scoring_fct_norm 2.0 +706 48 optimizer.lr 0.05388469817981991 +706 48 negative_sampler.num_negs_per_pos 33.0 +706 48 training.batch_size 2.0 +706 49 model.embedding_dim 2.0 +706 49 model.scoring_fct_norm 1.0 +706 49 optimizer.lr 0.0055149356290845036 +706 49 negative_sampler.num_negs_per_pos 54.0 +706 49 training.batch_size 2.0 +706 50 model.embedding_dim 0.0 +706 50 model.scoring_fct_norm 2.0 +706 50 optimizer.lr 0.00155213314148985 +706 50 negative_sampler.num_negs_per_pos 36.0 +706 50 training.batch_size 0.0 +706 51 model.embedding_dim 1.0 +706 51 model.scoring_fct_norm 2.0 +706 51 optimizer.lr 0.06804182381539811 +706 51 negative_sampler.num_negs_per_pos 33.0 +706 51 training.batch_size 2.0 +706 52 model.embedding_dim 0.0 +706 52 model.scoring_fct_norm 1.0 +706 52 optimizer.lr 0.013489595302503345 +706 52 negative_sampler.num_negs_per_pos 34.0 +706 52 training.batch_size 0.0 +706 53 model.embedding_dim 0.0 +706 53 model.scoring_fct_norm 2.0 +706 53 optimizer.lr 0.053915482485579466 +706 53 negative_sampler.num_negs_per_pos 55.0 +706 53 training.batch_size 0.0 +706 54 model.embedding_dim 0.0 +706 54 model.scoring_fct_norm 1.0 +706 54 optimizer.lr 0.003308830520941768 +706 54 negative_sampler.num_negs_per_pos 3.0 +706 54 training.batch_size 2.0 +706 55 model.embedding_dim 1.0 +706 55 model.scoring_fct_norm 1.0 +706 55 optimizer.lr 0.0027239613488442473 +706 55 negative_sampler.num_negs_per_pos 20.0 +706 55 training.batch_size 2.0 +706 56 model.embedding_dim 0.0 +706 56 model.scoring_fct_norm 2.0 +706 56 optimizer.lr 0.0017145508374220614 +706 56 negative_sampler.num_negs_per_pos 88.0 +706 56 training.batch_size 0.0 +706 57 model.embedding_dim 0.0 +706 57 model.scoring_fct_norm 2.0 +706 57 optimizer.lr 0.00198708951525198 +706 57 negative_sampler.num_negs_per_pos 23.0 +706 57 training.batch_size 1.0 +706 58 model.embedding_dim 0.0 +706 58 model.scoring_fct_norm 1.0 +706 58 optimizer.lr 0.019574883292067857 +706 58 negative_sampler.num_negs_per_pos 14.0 +706 58 training.batch_size 2.0 +706 59 model.embedding_dim 2.0 +706 59 model.scoring_fct_norm 2.0 +706 59 optimizer.lr 0.008013171119232249 +706 59 negative_sampler.num_negs_per_pos 10.0 +706 59 training.batch_size 1.0 +706 60 model.embedding_dim 0.0 +706 60 model.scoring_fct_norm 2.0 +706 60 optimizer.lr 0.00803491580758107 +706 60 negative_sampler.num_negs_per_pos 94.0 +706 60 training.batch_size 0.0 +706 61 model.embedding_dim 0.0 +706 61 model.scoring_fct_norm 1.0 +706 61 optimizer.lr 0.03965136482300173 +706 61 negative_sampler.num_negs_per_pos 96.0 +706 61 training.batch_size 0.0 +706 62 model.embedding_dim 0.0 +706 62 model.scoring_fct_norm 1.0 +706 62 optimizer.lr 0.08209809075460305 +706 62 negative_sampler.num_negs_per_pos 84.0 +706 62 training.batch_size 2.0 +706 63 model.embedding_dim 0.0 +706 63 model.scoring_fct_norm 1.0 +706 63 optimizer.lr 0.003013742542670169 +706 63 negative_sampler.num_negs_per_pos 74.0 +706 63 training.batch_size 1.0 +706 64 model.embedding_dim 1.0 +706 64 model.scoring_fct_norm 2.0 +706 64 optimizer.lr 0.08110986322034827 +706 64 negative_sampler.num_negs_per_pos 47.0 +706 64 training.batch_size 2.0 +706 65 model.embedding_dim 2.0 +706 65 model.scoring_fct_norm 2.0 +706 65 optimizer.lr 0.007842584296415727 +706 65 negative_sampler.num_negs_per_pos 44.0 +706 65 training.batch_size 0.0 +706 66 model.embedding_dim 2.0 +706 66 model.scoring_fct_norm 2.0 +706 66 optimizer.lr 0.008517069560136592 +706 66 negative_sampler.num_negs_per_pos 31.0 +706 66 training.batch_size 1.0 +706 67 model.embedding_dim 1.0 +706 67 model.scoring_fct_norm 2.0 +706 67 optimizer.lr 0.014004243113646961 +706 67 negative_sampler.num_negs_per_pos 20.0 +706 67 training.batch_size 2.0 +706 68 model.embedding_dim 2.0 +706 68 model.scoring_fct_norm 2.0 +706 68 optimizer.lr 0.004427195032581471 +706 68 negative_sampler.num_negs_per_pos 11.0 +706 68 training.batch_size 2.0 +706 69 model.embedding_dim 2.0 +706 69 model.scoring_fct_norm 2.0 +706 69 optimizer.lr 0.011349342879553663 +706 69 negative_sampler.num_negs_per_pos 59.0 +706 69 training.batch_size 0.0 +706 70 model.embedding_dim 1.0 +706 70 model.scoring_fct_norm 1.0 +706 70 optimizer.lr 0.002275597337445019 +706 70 negative_sampler.num_negs_per_pos 48.0 +706 70 training.batch_size 1.0 +706 71 model.embedding_dim 0.0 +706 71 model.scoring_fct_norm 1.0 +706 71 optimizer.lr 0.0011595108440544185 +706 71 negative_sampler.num_negs_per_pos 44.0 +706 71 training.batch_size 1.0 +706 72 model.embedding_dim 1.0 +706 72 model.scoring_fct_norm 1.0 +706 72 optimizer.lr 0.009548471080211087 +706 72 negative_sampler.num_negs_per_pos 17.0 +706 72 training.batch_size 2.0 +706 73 model.embedding_dim 2.0 +706 73 model.scoring_fct_norm 2.0 +706 73 optimizer.lr 0.019262217325557753 +706 73 negative_sampler.num_negs_per_pos 41.0 +706 73 training.batch_size 1.0 +706 74 model.embedding_dim 1.0 +706 74 model.scoring_fct_norm 1.0 +706 74 optimizer.lr 0.0013405797121803708 +706 74 negative_sampler.num_negs_per_pos 62.0 +706 74 training.batch_size 1.0 +706 75 model.embedding_dim 1.0 +706 75 model.scoring_fct_norm 2.0 +706 75 optimizer.lr 0.0016446316791442794 +706 75 negative_sampler.num_negs_per_pos 16.0 +706 75 training.batch_size 1.0 +706 76 model.embedding_dim 2.0 +706 76 model.scoring_fct_norm 2.0 +706 76 optimizer.lr 0.07877054308328195 +706 76 negative_sampler.num_negs_per_pos 12.0 +706 76 training.batch_size 1.0 +706 77 model.embedding_dim 1.0 +706 77 model.scoring_fct_norm 2.0 +706 77 optimizer.lr 0.00460941189909162 +706 77 negative_sampler.num_negs_per_pos 99.0 +706 77 training.batch_size 2.0 +706 78 model.embedding_dim 0.0 +706 78 model.scoring_fct_norm 2.0 +706 78 optimizer.lr 0.04701578092911362 +706 78 negative_sampler.num_negs_per_pos 91.0 +706 78 training.batch_size 1.0 +706 79 model.embedding_dim 2.0 +706 79 model.scoring_fct_norm 1.0 +706 79 optimizer.lr 0.006577770313724526 +706 79 negative_sampler.num_negs_per_pos 83.0 +706 79 training.batch_size 1.0 +706 80 model.embedding_dim 0.0 +706 80 model.scoring_fct_norm 1.0 +706 80 optimizer.lr 0.001889858617360841 +706 80 negative_sampler.num_negs_per_pos 10.0 +706 80 training.batch_size 2.0 +706 81 model.embedding_dim 1.0 +706 81 model.scoring_fct_norm 1.0 +706 81 optimizer.lr 0.004605345119052166 +706 81 negative_sampler.num_negs_per_pos 69.0 +706 81 training.batch_size 1.0 +706 82 model.embedding_dim 1.0 +706 82 model.scoring_fct_norm 2.0 +706 82 optimizer.lr 0.0013169893214738122 +706 82 negative_sampler.num_negs_per_pos 88.0 +706 82 training.batch_size 2.0 +706 83 model.embedding_dim 2.0 +706 83 model.scoring_fct_norm 1.0 +706 83 optimizer.lr 0.031530738121561534 +706 83 negative_sampler.num_negs_per_pos 72.0 +706 83 training.batch_size 1.0 +706 1 dataset """kinships""" +706 1 model """structuredembedding""" +706 1 loss """softplus""" +706 1 regularizer """no""" +706 1 optimizer """adam""" +706 1 training_loop """owa""" +706 1 negative_sampler """basic""" +706 1 evaluator """rankbased""" +706 2 dataset """kinships""" +706 2 model """structuredembedding""" +706 2 loss """softplus""" +706 2 regularizer """no""" +706 2 optimizer """adam""" +706 2 training_loop """owa""" +706 2 negative_sampler """basic""" +706 2 evaluator """rankbased""" +706 3 dataset """kinships""" +706 3 model """structuredembedding""" +706 3 loss """softplus""" +706 3 regularizer """no""" +706 3 optimizer """adam""" +706 3 training_loop """owa""" +706 3 negative_sampler """basic""" +706 3 evaluator """rankbased""" +706 4 dataset """kinships""" +706 4 model """structuredembedding""" +706 4 loss """softplus""" +706 4 regularizer """no""" +706 4 optimizer """adam""" +706 4 training_loop """owa""" +706 4 negative_sampler """basic""" +706 4 evaluator """rankbased""" +706 5 dataset """kinships""" +706 5 model """structuredembedding""" +706 5 loss """softplus""" +706 5 regularizer """no""" +706 5 optimizer """adam""" +706 5 training_loop """owa""" +706 5 negative_sampler """basic""" +706 5 evaluator """rankbased""" +706 6 dataset """kinships""" +706 6 model """structuredembedding""" +706 6 loss """softplus""" +706 6 regularizer """no""" +706 6 optimizer """adam""" +706 6 training_loop """owa""" +706 6 negative_sampler """basic""" +706 6 evaluator """rankbased""" +706 7 dataset """kinships""" +706 7 model """structuredembedding""" +706 7 loss """softplus""" +706 7 regularizer """no""" +706 7 optimizer """adam""" +706 7 training_loop """owa""" +706 7 negative_sampler """basic""" +706 7 evaluator """rankbased""" +706 8 dataset """kinships""" +706 8 model """structuredembedding""" +706 8 loss """softplus""" +706 8 regularizer """no""" +706 8 optimizer """adam""" +706 8 training_loop """owa""" +706 8 negative_sampler """basic""" +706 8 evaluator """rankbased""" +706 9 dataset """kinships""" +706 9 model """structuredembedding""" +706 9 loss """softplus""" +706 9 regularizer """no""" +706 9 optimizer """adam""" +706 9 training_loop """owa""" +706 9 negative_sampler """basic""" +706 9 evaluator """rankbased""" +706 10 dataset """kinships""" +706 10 model """structuredembedding""" +706 10 loss """softplus""" +706 10 regularizer """no""" +706 10 optimizer """adam""" +706 10 training_loop """owa""" +706 10 negative_sampler """basic""" +706 10 evaluator """rankbased""" +706 11 dataset """kinships""" +706 11 model """structuredembedding""" +706 11 loss """softplus""" +706 11 regularizer """no""" +706 11 optimizer """adam""" +706 11 training_loop """owa""" +706 11 negative_sampler """basic""" +706 11 evaluator """rankbased""" +706 12 dataset """kinships""" +706 12 model """structuredembedding""" +706 12 loss """softplus""" +706 12 regularizer """no""" +706 12 optimizer """adam""" +706 12 training_loop """owa""" +706 12 negative_sampler """basic""" +706 12 evaluator """rankbased""" +706 13 dataset """kinships""" +706 13 model """structuredembedding""" +706 13 loss """softplus""" +706 13 regularizer """no""" +706 13 optimizer """adam""" +706 13 training_loop """owa""" +706 13 negative_sampler """basic""" +706 13 evaluator """rankbased""" +706 14 dataset """kinships""" +706 14 model """structuredembedding""" +706 14 loss """softplus""" +706 14 regularizer """no""" +706 14 optimizer """adam""" +706 14 training_loop """owa""" +706 14 negative_sampler """basic""" +706 14 evaluator """rankbased""" +706 15 dataset """kinships""" +706 15 model """structuredembedding""" +706 15 loss """softplus""" +706 15 regularizer """no""" +706 15 optimizer """adam""" +706 15 training_loop """owa""" +706 15 negative_sampler """basic""" +706 15 evaluator """rankbased""" +706 16 dataset """kinships""" +706 16 model """structuredembedding""" +706 16 loss """softplus""" +706 16 regularizer """no""" +706 16 optimizer """adam""" +706 16 training_loop """owa""" +706 16 negative_sampler """basic""" +706 16 evaluator """rankbased""" +706 17 dataset """kinships""" +706 17 model """structuredembedding""" +706 17 loss """softplus""" +706 17 regularizer """no""" +706 17 optimizer """adam""" +706 17 training_loop """owa""" +706 17 negative_sampler """basic""" +706 17 evaluator """rankbased""" +706 18 dataset """kinships""" +706 18 model """structuredembedding""" +706 18 loss """softplus""" +706 18 regularizer """no""" +706 18 optimizer """adam""" +706 18 training_loop """owa""" +706 18 negative_sampler """basic""" +706 18 evaluator """rankbased""" +706 19 dataset """kinships""" +706 19 model """structuredembedding""" +706 19 loss """softplus""" +706 19 regularizer """no""" +706 19 optimizer """adam""" +706 19 training_loop """owa""" +706 19 negative_sampler """basic""" +706 19 evaluator """rankbased""" +706 20 dataset """kinships""" +706 20 model """structuredembedding""" +706 20 loss """softplus""" +706 20 regularizer """no""" +706 20 optimizer """adam""" +706 20 training_loop """owa""" +706 20 negative_sampler """basic""" +706 20 evaluator """rankbased""" +706 21 dataset """kinships""" +706 21 model """structuredembedding""" +706 21 loss """softplus""" +706 21 regularizer """no""" +706 21 optimizer """adam""" +706 21 training_loop """owa""" +706 21 negative_sampler """basic""" +706 21 evaluator """rankbased""" +706 22 dataset """kinships""" +706 22 model """structuredembedding""" +706 22 loss """softplus""" +706 22 regularizer """no""" +706 22 optimizer """adam""" +706 22 training_loop """owa""" +706 22 negative_sampler """basic""" +706 22 evaluator """rankbased""" +706 23 dataset """kinships""" +706 23 model """structuredembedding""" +706 23 loss """softplus""" +706 23 regularizer """no""" +706 23 optimizer """adam""" +706 23 training_loop """owa""" +706 23 negative_sampler """basic""" +706 23 evaluator """rankbased""" +706 24 dataset """kinships""" +706 24 model """structuredembedding""" +706 24 loss """softplus""" +706 24 regularizer """no""" +706 24 optimizer """adam""" +706 24 training_loop """owa""" +706 24 negative_sampler """basic""" +706 24 evaluator """rankbased""" +706 25 dataset """kinships""" +706 25 model """structuredembedding""" +706 25 loss """softplus""" +706 25 regularizer """no""" +706 25 optimizer """adam""" +706 25 training_loop """owa""" +706 25 negative_sampler """basic""" +706 25 evaluator """rankbased""" +706 26 dataset """kinships""" +706 26 model """structuredembedding""" +706 26 loss """softplus""" +706 26 regularizer """no""" +706 26 optimizer """adam""" +706 26 training_loop """owa""" +706 26 negative_sampler """basic""" +706 26 evaluator """rankbased""" +706 27 dataset """kinships""" +706 27 model """structuredembedding""" +706 27 loss """softplus""" +706 27 regularizer """no""" +706 27 optimizer """adam""" +706 27 training_loop """owa""" +706 27 negative_sampler """basic""" +706 27 evaluator """rankbased""" +706 28 dataset """kinships""" +706 28 model """structuredembedding""" +706 28 loss """softplus""" +706 28 regularizer """no""" +706 28 optimizer """adam""" +706 28 training_loop """owa""" +706 28 negative_sampler """basic""" +706 28 evaluator """rankbased""" +706 29 dataset """kinships""" +706 29 model """structuredembedding""" +706 29 loss """softplus""" +706 29 regularizer """no""" +706 29 optimizer """adam""" +706 29 training_loop """owa""" +706 29 negative_sampler """basic""" +706 29 evaluator """rankbased""" +706 30 dataset """kinships""" +706 30 model """structuredembedding""" +706 30 loss """softplus""" +706 30 regularizer """no""" +706 30 optimizer """adam""" +706 30 training_loop """owa""" +706 30 negative_sampler """basic""" +706 30 evaluator """rankbased""" +706 31 dataset """kinships""" +706 31 model """structuredembedding""" +706 31 loss """softplus""" +706 31 regularizer """no""" +706 31 optimizer """adam""" +706 31 training_loop """owa""" +706 31 negative_sampler """basic""" +706 31 evaluator """rankbased""" +706 32 dataset """kinships""" +706 32 model """structuredembedding""" +706 32 loss """softplus""" +706 32 regularizer """no""" +706 32 optimizer """adam""" +706 32 training_loop """owa""" +706 32 negative_sampler """basic""" +706 32 evaluator """rankbased""" +706 33 dataset """kinships""" +706 33 model """structuredembedding""" +706 33 loss """softplus""" +706 33 regularizer """no""" +706 33 optimizer """adam""" +706 33 training_loop """owa""" +706 33 negative_sampler """basic""" +706 33 evaluator """rankbased""" +706 34 dataset """kinships""" +706 34 model """structuredembedding""" +706 34 loss """softplus""" +706 34 regularizer """no""" +706 34 optimizer """adam""" +706 34 training_loop """owa""" +706 34 negative_sampler """basic""" +706 34 evaluator """rankbased""" +706 35 dataset """kinships""" +706 35 model """structuredembedding""" +706 35 loss """softplus""" +706 35 regularizer """no""" +706 35 optimizer """adam""" +706 35 training_loop """owa""" +706 35 negative_sampler """basic""" +706 35 evaluator """rankbased""" +706 36 dataset """kinships""" +706 36 model """structuredembedding""" +706 36 loss """softplus""" +706 36 regularizer """no""" +706 36 optimizer """adam""" +706 36 training_loop """owa""" +706 36 negative_sampler """basic""" +706 36 evaluator """rankbased""" +706 37 dataset """kinships""" +706 37 model """structuredembedding""" +706 37 loss """softplus""" +706 37 regularizer """no""" +706 37 optimizer """adam""" +706 37 training_loop """owa""" +706 37 negative_sampler """basic""" +706 37 evaluator """rankbased""" +706 38 dataset """kinships""" +706 38 model """structuredembedding""" +706 38 loss """softplus""" +706 38 regularizer """no""" +706 38 optimizer """adam""" +706 38 training_loop """owa""" +706 38 negative_sampler """basic""" +706 38 evaluator """rankbased""" +706 39 dataset """kinships""" +706 39 model """structuredembedding""" +706 39 loss """softplus""" +706 39 regularizer """no""" +706 39 optimizer """adam""" +706 39 training_loop """owa""" +706 39 negative_sampler """basic""" +706 39 evaluator """rankbased""" +706 40 dataset """kinships""" +706 40 model """structuredembedding""" +706 40 loss """softplus""" +706 40 regularizer """no""" +706 40 optimizer """adam""" +706 40 training_loop """owa""" +706 40 negative_sampler """basic""" +706 40 evaluator """rankbased""" +706 41 dataset """kinships""" +706 41 model """structuredembedding""" +706 41 loss """softplus""" +706 41 regularizer """no""" +706 41 optimizer """adam""" +706 41 training_loop """owa""" +706 41 negative_sampler """basic""" +706 41 evaluator """rankbased""" +706 42 dataset """kinships""" +706 42 model """structuredembedding""" +706 42 loss """softplus""" +706 42 regularizer """no""" +706 42 optimizer """adam""" +706 42 training_loop """owa""" +706 42 negative_sampler """basic""" +706 42 evaluator """rankbased""" +706 43 dataset """kinships""" +706 43 model """structuredembedding""" +706 43 loss """softplus""" +706 43 regularizer """no""" +706 43 optimizer """adam""" +706 43 training_loop """owa""" +706 43 negative_sampler """basic""" +706 43 evaluator """rankbased""" +706 44 dataset """kinships""" +706 44 model """structuredembedding""" +706 44 loss """softplus""" +706 44 regularizer """no""" +706 44 optimizer """adam""" +706 44 training_loop """owa""" +706 44 negative_sampler """basic""" +706 44 evaluator """rankbased""" +706 45 dataset """kinships""" +706 45 model """structuredembedding""" +706 45 loss """softplus""" +706 45 regularizer """no""" +706 45 optimizer """adam""" +706 45 training_loop """owa""" +706 45 negative_sampler """basic""" +706 45 evaluator """rankbased""" +706 46 dataset """kinships""" +706 46 model """structuredembedding""" +706 46 loss """softplus""" +706 46 regularizer """no""" +706 46 optimizer """adam""" +706 46 training_loop """owa""" +706 46 negative_sampler """basic""" +706 46 evaluator """rankbased""" +706 47 dataset """kinships""" +706 47 model """structuredembedding""" +706 47 loss """softplus""" +706 47 regularizer """no""" +706 47 optimizer """adam""" +706 47 training_loop """owa""" +706 47 negative_sampler """basic""" +706 47 evaluator """rankbased""" +706 48 dataset """kinships""" +706 48 model """structuredembedding""" +706 48 loss """softplus""" +706 48 regularizer """no""" +706 48 optimizer """adam""" +706 48 training_loop """owa""" +706 48 negative_sampler """basic""" +706 48 evaluator """rankbased""" +706 49 dataset """kinships""" +706 49 model """structuredembedding""" +706 49 loss """softplus""" +706 49 regularizer """no""" +706 49 optimizer """adam""" +706 49 training_loop """owa""" +706 49 negative_sampler """basic""" +706 49 evaluator """rankbased""" +706 50 dataset """kinships""" +706 50 model """structuredembedding""" +706 50 loss """softplus""" +706 50 regularizer """no""" +706 50 optimizer """adam""" +706 50 training_loop """owa""" +706 50 negative_sampler """basic""" +706 50 evaluator """rankbased""" +706 51 dataset """kinships""" +706 51 model """structuredembedding""" +706 51 loss """softplus""" +706 51 regularizer """no""" +706 51 optimizer """adam""" +706 51 training_loop """owa""" +706 51 negative_sampler """basic""" +706 51 evaluator """rankbased""" +706 52 dataset """kinships""" +706 52 model """structuredembedding""" +706 52 loss """softplus""" +706 52 regularizer """no""" +706 52 optimizer """adam""" +706 52 training_loop """owa""" +706 52 negative_sampler """basic""" +706 52 evaluator """rankbased""" +706 53 dataset """kinships""" +706 53 model """structuredembedding""" +706 53 loss """softplus""" +706 53 regularizer """no""" +706 53 optimizer """adam""" +706 53 training_loop """owa""" +706 53 negative_sampler """basic""" +706 53 evaluator """rankbased""" +706 54 dataset """kinships""" +706 54 model """structuredembedding""" +706 54 loss """softplus""" +706 54 regularizer """no""" +706 54 optimizer """adam""" +706 54 training_loop """owa""" +706 54 negative_sampler """basic""" +706 54 evaluator """rankbased""" +706 55 dataset """kinships""" +706 55 model """structuredembedding""" +706 55 loss """softplus""" +706 55 regularizer """no""" +706 55 optimizer """adam""" +706 55 training_loop """owa""" +706 55 negative_sampler """basic""" +706 55 evaluator """rankbased""" +706 56 dataset """kinships""" +706 56 model """structuredembedding""" +706 56 loss """softplus""" +706 56 regularizer """no""" +706 56 optimizer """adam""" +706 56 training_loop """owa""" +706 56 negative_sampler """basic""" +706 56 evaluator """rankbased""" +706 57 dataset """kinships""" +706 57 model """structuredembedding""" +706 57 loss """softplus""" +706 57 regularizer """no""" +706 57 optimizer """adam""" +706 57 training_loop """owa""" +706 57 negative_sampler """basic""" +706 57 evaluator """rankbased""" +706 58 dataset """kinships""" +706 58 model """structuredembedding""" +706 58 loss """softplus""" +706 58 regularizer """no""" +706 58 optimizer """adam""" +706 58 training_loop """owa""" +706 58 negative_sampler """basic""" +706 58 evaluator """rankbased""" +706 59 dataset """kinships""" +706 59 model """structuredembedding""" +706 59 loss """softplus""" +706 59 regularizer """no""" +706 59 optimizer """adam""" +706 59 training_loop """owa""" +706 59 negative_sampler """basic""" +706 59 evaluator """rankbased""" +706 60 dataset """kinships""" +706 60 model """structuredembedding""" +706 60 loss """softplus""" +706 60 regularizer """no""" +706 60 optimizer """adam""" +706 60 training_loop """owa""" +706 60 negative_sampler """basic""" +706 60 evaluator """rankbased""" +706 61 dataset """kinships""" +706 61 model """structuredembedding""" +706 61 loss """softplus""" +706 61 regularizer """no""" +706 61 optimizer """adam""" +706 61 training_loop """owa""" +706 61 negative_sampler """basic""" +706 61 evaluator """rankbased""" +706 62 dataset """kinships""" +706 62 model """structuredembedding""" +706 62 loss """softplus""" +706 62 regularizer """no""" +706 62 optimizer """adam""" +706 62 training_loop """owa""" +706 62 negative_sampler """basic""" +706 62 evaluator """rankbased""" +706 63 dataset """kinships""" +706 63 model """structuredembedding""" +706 63 loss """softplus""" +706 63 regularizer """no""" +706 63 optimizer """adam""" +706 63 training_loop """owa""" +706 63 negative_sampler """basic""" +706 63 evaluator """rankbased""" +706 64 dataset """kinships""" +706 64 model """structuredembedding""" +706 64 loss """softplus""" +706 64 regularizer """no""" +706 64 optimizer """adam""" +706 64 training_loop """owa""" +706 64 negative_sampler """basic""" +706 64 evaluator """rankbased""" +706 65 dataset """kinships""" +706 65 model """structuredembedding""" +706 65 loss """softplus""" +706 65 regularizer """no""" +706 65 optimizer """adam""" +706 65 training_loop """owa""" +706 65 negative_sampler """basic""" +706 65 evaluator """rankbased""" +706 66 dataset """kinships""" +706 66 model """structuredembedding""" +706 66 loss """softplus""" +706 66 regularizer """no""" +706 66 optimizer """adam""" +706 66 training_loop """owa""" +706 66 negative_sampler """basic""" +706 66 evaluator """rankbased""" +706 67 dataset """kinships""" +706 67 model """structuredembedding""" +706 67 loss """softplus""" +706 67 regularizer """no""" +706 67 optimizer """adam""" +706 67 training_loop """owa""" +706 67 negative_sampler """basic""" +706 67 evaluator """rankbased""" +706 68 dataset """kinships""" +706 68 model """structuredembedding""" +706 68 loss """softplus""" +706 68 regularizer """no""" +706 68 optimizer """adam""" +706 68 training_loop """owa""" +706 68 negative_sampler """basic""" +706 68 evaluator """rankbased""" +706 69 dataset """kinships""" +706 69 model """structuredembedding""" +706 69 loss """softplus""" +706 69 regularizer """no""" +706 69 optimizer """adam""" +706 69 training_loop """owa""" +706 69 negative_sampler """basic""" +706 69 evaluator """rankbased""" +706 70 dataset """kinships""" +706 70 model """structuredembedding""" +706 70 loss """softplus""" +706 70 regularizer """no""" +706 70 optimizer """adam""" +706 70 training_loop """owa""" +706 70 negative_sampler """basic""" +706 70 evaluator """rankbased""" +706 71 dataset """kinships""" +706 71 model """structuredembedding""" +706 71 loss """softplus""" +706 71 regularizer """no""" +706 71 optimizer """adam""" +706 71 training_loop """owa""" +706 71 negative_sampler """basic""" +706 71 evaluator """rankbased""" +706 72 dataset """kinships""" +706 72 model """structuredembedding""" +706 72 loss """softplus""" +706 72 regularizer """no""" +706 72 optimizer """adam""" +706 72 training_loop """owa""" +706 72 negative_sampler """basic""" +706 72 evaluator """rankbased""" +706 73 dataset """kinships""" +706 73 model """structuredembedding""" +706 73 loss """softplus""" +706 73 regularizer """no""" +706 73 optimizer """adam""" +706 73 training_loop """owa""" +706 73 negative_sampler """basic""" +706 73 evaluator """rankbased""" +706 74 dataset """kinships""" +706 74 model """structuredembedding""" +706 74 loss """softplus""" +706 74 regularizer """no""" +706 74 optimizer """adam""" +706 74 training_loop """owa""" +706 74 negative_sampler """basic""" +706 74 evaluator """rankbased""" +706 75 dataset """kinships""" +706 75 model """structuredembedding""" +706 75 loss """softplus""" +706 75 regularizer """no""" +706 75 optimizer """adam""" +706 75 training_loop """owa""" +706 75 negative_sampler """basic""" +706 75 evaluator """rankbased""" +706 76 dataset """kinships""" +706 76 model """structuredembedding""" +706 76 loss """softplus""" +706 76 regularizer """no""" +706 76 optimizer """adam""" +706 76 training_loop """owa""" +706 76 negative_sampler """basic""" +706 76 evaluator """rankbased""" +706 77 dataset """kinships""" +706 77 model """structuredembedding""" +706 77 loss """softplus""" +706 77 regularizer """no""" +706 77 optimizer """adam""" +706 77 training_loop """owa""" +706 77 negative_sampler """basic""" +706 77 evaluator """rankbased""" +706 78 dataset """kinships""" +706 78 model """structuredembedding""" +706 78 loss """softplus""" +706 78 regularizer """no""" +706 78 optimizer """adam""" +706 78 training_loop """owa""" +706 78 negative_sampler """basic""" +706 78 evaluator """rankbased""" +706 79 dataset """kinships""" +706 79 model """structuredembedding""" +706 79 loss """softplus""" +706 79 regularizer """no""" +706 79 optimizer """adam""" +706 79 training_loop """owa""" +706 79 negative_sampler """basic""" +706 79 evaluator """rankbased""" +706 80 dataset """kinships""" +706 80 model """structuredembedding""" +706 80 loss """softplus""" +706 80 regularizer """no""" +706 80 optimizer """adam""" +706 80 training_loop """owa""" +706 80 negative_sampler """basic""" +706 80 evaluator """rankbased""" +706 81 dataset """kinships""" +706 81 model """structuredembedding""" +706 81 loss """softplus""" +706 81 regularizer """no""" +706 81 optimizer """adam""" +706 81 training_loop """owa""" +706 81 negative_sampler """basic""" +706 81 evaluator """rankbased""" +706 82 dataset """kinships""" +706 82 model """structuredembedding""" +706 82 loss """softplus""" +706 82 regularizer """no""" +706 82 optimizer """adam""" +706 82 training_loop """owa""" +706 82 negative_sampler """basic""" +706 82 evaluator """rankbased""" +706 83 dataset """kinships""" +706 83 model """structuredembedding""" +706 83 loss """softplus""" +706 83 regularizer """no""" +706 83 optimizer """adam""" +706 83 training_loop """owa""" +706 83 negative_sampler """basic""" +706 83 evaluator """rankbased""" +707 1 model.embedding_dim 0.0 +707 1 model.scoring_fct_norm 2.0 +707 1 optimizer.lr 0.023477885735311923 +707 1 negative_sampler.num_negs_per_pos 32.0 +707 1 training.batch_size 0.0 +707 2 model.embedding_dim 1.0 +707 2 model.scoring_fct_norm 1.0 +707 2 optimizer.lr 0.0010719190301475934 +707 2 negative_sampler.num_negs_per_pos 47.0 +707 2 training.batch_size 2.0 +707 3 model.embedding_dim 1.0 +707 3 model.scoring_fct_norm 1.0 +707 3 optimizer.lr 0.002589133322906808 +707 3 negative_sampler.num_negs_per_pos 2.0 +707 3 training.batch_size 2.0 +707 4 model.embedding_dim 0.0 +707 4 model.scoring_fct_norm 1.0 +707 4 optimizer.lr 0.0014543289540758166 +707 4 negative_sampler.num_negs_per_pos 22.0 +707 4 training.batch_size 2.0 +707 5 model.embedding_dim 0.0 +707 5 model.scoring_fct_norm 1.0 +707 5 optimizer.lr 0.06791102913690007 +707 5 negative_sampler.num_negs_per_pos 84.0 +707 5 training.batch_size 2.0 +707 6 model.embedding_dim 0.0 +707 6 model.scoring_fct_norm 1.0 +707 6 optimizer.lr 0.0015359270404778028 +707 6 negative_sampler.num_negs_per_pos 92.0 +707 6 training.batch_size 2.0 +707 7 model.embedding_dim 1.0 +707 7 model.scoring_fct_norm 1.0 +707 7 optimizer.lr 0.036849524360778776 +707 7 negative_sampler.num_negs_per_pos 8.0 +707 7 training.batch_size 2.0 +707 8 model.embedding_dim 0.0 +707 8 model.scoring_fct_norm 2.0 +707 8 optimizer.lr 0.00636074388546943 +707 8 negative_sampler.num_negs_per_pos 71.0 +707 8 training.batch_size 0.0 +707 9 model.embedding_dim 2.0 +707 9 model.scoring_fct_norm 1.0 +707 9 optimizer.lr 0.00828813591138831 +707 9 negative_sampler.num_negs_per_pos 59.0 +707 9 training.batch_size 2.0 +707 10 model.embedding_dim 0.0 +707 10 model.scoring_fct_norm 1.0 +707 10 optimizer.lr 0.003562223017312312 +707 10 negative_sampler.num_negs_per_pos 23.0 +707 10 training.batch_size 1.0 +707 11 model.embedding_dim 2.0 +707 11 model.scoring_fct_norm 2.0 +707 11 optimizer.lr 0.054199092046315754 +707 11 negative_sampler.num_negs_per_pos 21.0 +707 11 training.batch_size 0.0 +707 12 model.embedding_dim 2.0 +707 12 model.scoring_fct_norm 1.0 +707 12 optimizer.lr 0.019539141991766856 +707 12 negative_sampler.num_negs_per_pos 92.0 +707 12 training.batch_size 2.0 +707 13 model.embedding_dim 2.0 +707 13 model.scoring_fct_norm 2.0 +707 13 optimizer.lr 0.013421969866188642 +707 13 negative_sampler.num_negs_per_pos 76.0 +707 13 training.batch_size 0.0 +707 14 model.embedding_dim 0.0 +707 14 model.scoring_fct_norm 2.0 +707 14 optimizer.lr 0.02452708714915721 +707 14 negative_sampler.num_negs_per_pos 14.0 +707 14 training.batch_size 1.0 +707 15 model.embedding_dim 0.0 +707 15 model.scoring_fct_norm 2.0 +707 15 optimizer.lr 0.05718214128395601 +707 15 negative_sampler.num_negs_per_pos 45.0 +707 15 training.batch_size 0.0 +707 16 model.embedding_dim 1.0 +707 16 model.scoring_fct_norm 2.0 +707 16 optimizer.lr 0.004586900354467392 +707 16 negative_sampler.num_negs_per_pos 25.0 +707 16 training.batch_size 1.0 +707 17 model.embedding_dim 2.0 +707 17 model.scoring_fct_norm 1.0 +707 17 optimizer.lr 0.05585995651671062 +707 17 negative_sampler.num_negs_per_pos 9.0 +707 17 training.batch_size 0.0 +707 18 model.embedding_dim 1.0 +707 18 model.scoring_fct_norm 2.0 +707 18 optimizer.lr 0.0010955883764723234 +707 18 negative_sampler.num_negs_per_pos 54.0 +707 18 training.batch_size 1.0 +707 19 model.embedding_dim 2.0 +707 19 model.scoring_fct_norm 2.0 +707 19 optimizer.lr 0.012188185898989734 +707 19 negative_sampler.num_negs_per_pos 24.0 +707 19 training.batch_size 2.0 +707 20 model.embedding_dim 1.0 +707 20 model.scoring_fct_norm 2.0 +707 20 optimizer.lr 0.06626356364768785 +707 20 negative_sampler.num_negs_per_pos 72.0 +707 20 training.batch_size 2.0 +707 21 model.embedding_dim 1.0 +707 21 model.scoring_fct_norm 1.0 +707 21 optimizer.lr 0.077744421339171 +707 21 negative_sampler.num_negs_per_pos 94.0 +707 21 training.batch_size 2.0 +707 22 model.embedding_dim 1.0 +707 22 model.scoring_fct_norm 2.0 +707 22 optimizer.lr 0.0678037021434402 +707 22 negative_sampler.num_negs_per_pos 87.0 +707 22 training.batch_size 0.0 +707 23 model.embedding_dim 1.0 +707 23 model.scoring_fct_norm 1.0 +707 23 optimizer.lr 0.011575339054878382 +707 23 negative_sampler.num_negs_per_pos 26.0 +707 23 training.batch_size 0.0 +707 24 model.embedding_dim 1.0 +707 24 model.scoring_fct_norm 2.0 +707 24 optimizer.lr 0.02029979219991268 +707 24 negative_sampler.num_negs_per_pos 18.0 +707 24 training.batch_size 2.0 +707 25 model.embedding_dim 1.0 +707 25 model.scoring_fct_norm 1.0 +707 25 optimizer.lr 0.08490417022139363 +707 25 negative_sampler.num_negs_per_pos 50.0 +707 25 training.batch_size 2.0 +707 26 model.embedding_dim 2.0 +707 26 model.scoring_fct_norm 1.0 +707 26 optimizer.lr 0.00214815999220095 +707 26 negative_sampler.num_negs_per_pos 1.0 +707 26 training.batch_size 0.0 +707 27 model.embedding_dim 2.0 +707 27 model.scoring_fct_norm 2.0 +707 27 optimizer.lr 0.0026174648251134592 +707 27 negative_sampler.num_negs_per_pos 42.0 +707 27 training.batch_size 0.0 +707 28 model.embedding_dim 0.0 +707 28 model.scoring_fct_norm 1.0 +707 28 optimizer.lr 0.04880193809135187 +707 28 negative_sampler.num_negs_per_pos 50.0 +707 28 training.batch_size 2.0 +707 29 model.embedding_dim 1.0 +707 29 model.scoring_fct_norm 2.0 +707 29 optimizer.lr 0.02069308609608898 +707 29 negative_sampler.num_negs_per_pos 24.0 +707 29 training.batch_size 1.0 +707 30 model.embedding_dim 1.0 +707 30 model.scoring_fct_norm 1.0 +707 30 optimizer.lr 0.0028793452231036603 +707 30 negative_sampler.num_negs_per_pos 26.0 +707 30 training.batch_size 1.0 +707 31 model.embedding_dim 0.0 +707 31 model.scoring_fct_norm 1.0 +707 31 optimizer.lr 0.02522735312251443 +707 31 negative_sampler.num_negs_per_pos 52.0 +707 31 training.batch_size 0.0 +707 32 model.embedding_dim 2.0 +707 32 model.scoring_fct_norm 1.0 +707 32 optimizer.lr 0.002284144307814789 +707 32 negative_sampler.num_negs_per_pos 43.0 +707 32 training.batch_size 1.0 +707 33 model.embedding_dim 2.0 +707 33 model.scoring_fct_norm 1.0 +707 33 optimizer.lr 0.0077957446338410155 +707 33 negative_sampler.num_negs_per_pos 99.0 +707 33 training.batch_size 2.0 +707 34 model.embedding_dim 0.0 +707 34 model.scoring_fct_norm 2.0 +707 34 optimizer.lr 0.019160007895283836 +707 34 negative_sampler.num_negs_per_pos 52.0 +707 34 training.batch_size 1.0 +707 35 model.embedding_dim 0.0 +707 35 model.scoring_fct_norm 2.0 +707 35 optimizer.lr 0.055878388696636104 +707 35 negative_sampler.num_negs_per_pos 66.0 +707 35 training.batch_size 0.0 +707 36 model.embedding_dim 1.0 +707 36 model.scoring_fct_norm 1.0 +707 36 optimizer.lr 0.012833396004950377 +707 36 negative_sampler.num_negs_per_pos 46.0 +707 36 training.batch_size 1.0 +707 37 model.embedding_dim 1.0 +707 37 model.scoring_fct_norm 2.0 +707 37 optimizer.lr 0.004651103284256398 +707 37 negative_sampler.num_negs_per_pos 25.0 +707 37 training.batch_size 2.0 +707 38 model.embedding_dim 0.0 +707 38 model.scoring_fct_norm 1.0 +707 38 optimizer.lr 0.007479265818496708 +707 38 negative_sampler.num_negs_per_pos 72.0 +707 38 training.batch_size 2.0 +707 39 model.embedding_dim 1.0 +707 39 model.scoring_fct_norm 2.0 +707 39 optimizer.lr 0.004915481506345495 +707 39 negative_sampler.num_negs_per_pos 35.0 +707 39 training.batch_size 1.0 +707 40 model.embedding_dim 1.0 +707 40 model.scoring_fct_norm 2.0 +707 40 optimizer.lr 0.05648131364511346 +707 40 negative_sampler.num_negs_per_pos 54.0 +707 40 training.batch_size 2.0 +707 41 model.embedding_dim 2.0 +707 41 model.scoring_fct_norm 1.0 +707 41 optimizer.lr 0.028320084755804973 +707 41 negative_sampler.num_negs_per_pos 89.0 +707 41 training.batch_size 1.0 +707 42 model.embedding_dim 1.0 +707 42 model.scoring_fct_norm 1.0 +707 42 optimizer.lr 0.0028832829757760455 +707 42 negative_sampler.num_negs_per_pos 35.0 +707 42 training.batch_size 0.0 +707 43 model.embedding_dim 2.0 +707 43 model.scoring_fct_norm 2.0 +707 43 optimizer.lr 0.001956730530099927 +707 43 negative_sampler.num_negs_per_pos 83.0 +707 43 training.batch_size 2.0 +707 44 model.embedding_dim 2.0 +707 44 model.scoring_fct_norm 2.0 +707 44 optimizer.lr 0.05254784575189706 +707 44 negative_sampler.num_negs_per_pos 68.0 +707 44 training.batch_size 0.0 +707 45 model.embedding_dim 1.0 +707 45 model.scoring_fct_norm 1.0 +707 45 optimizer.lr 0.06409938476167489 +707 45 negative_sampler.num_negs_per_pos 98.0 +707 45 training.batch_size 2.0 +707 46 model.embedding_dim 0.0 +707 46 model.scoring_fct_norm 2.0 +707 46 optimizer.lr 0.0014317208823291393 +707 46 negative_sampler.num_negs_per_pos 25.0 +707 46 training.batch_size 0.0 +707 47 model.embedding_dim 1.0 +707 47 model.scoring_fct_norm 1.0 +707 47 optimizer.lr 0.0013522435689974396 +707 47 negative_sampler.num_negs_per_pos 31.0 +707 47 training.batch_size 1.0 +707 48 model.embedding_dim 1.0 +707 48 model.scoring_fct_norm 2.0 +707 48 optimizer.lr 0.010061923002941758 +707 48 negative_sampler.num_negs_per_pos 29.0 +707 48 training.batch_size 1.0 +707 49 model.embedding_dim 0.0 +707 49 model.scoring_fct_norm 1.0 +707 49 optimizer.lr 0.008850952433204993 +707 49 negative_sampler.num_negs_per_pos 76.0 +707 49 training.batch_size 2.0 +707 50 model.embedding_dim 0.0 +707 50 model.scoring_fct_norm 1.0 +707 50 optimizer.lr 0.052539347129339914 +707 50 negative_sampler.num_negs_per_pos 40.0 +707 50 training.batch_size 2.0 +707 51 model.embedding_dim 2.0 +707 51 model.scoring_fct_norm 2.0 +707 51 optimizer.lr 0.010118141160410233 +707 51 negative_sampler.num_negs_per_pos 62.0 +707 51 training.batch_size 0.0 +707 52 model.embedding_dim 0.0 +707 52 model.scoring_fct_norm 1.0 +707 52 optimizer.lr 0.09249527800225854 +707 52 negative_sampler.num_negs_per_pos 99.0 +707 52 training.batch_size 1.0 +707 53 model.embedding_dim 1.0 +707 53 model.scoring_fct_norm 2.0 +707 53 optimizer.lr 0.0065841465240848085 +707 53 negative_sampler.num_negs_per_pos 9.0 +707 53 training.batch_size 0.0 +707 54 model.embedding_dim 2.0 +707 54 model.scoring_fct_norm 1.0 +707 54 optimizer.lr 0.00615025283733492 +707 54 negative_sampler.num_negs_per_pos 44.0 +707 54 training.batch_size 1.0 +707 55 model.embedding_dim 0.0 +707 55 model.scoring_fct_norm 1.0 +707 55 optimizer.lr 0.010132093880845646 +707 55 negative_sampler.num_negs_per_pos 91.0 +707 55 training.batch_size 1.0 +707 56 model.embedding_dim 0.0 +707 56 model.scoring_fct_norm 2.0 +707 56 optimizer.lr 0.004708905678743483 +707 56 negative_sampler.num_negs_per_pos 59.0 +707 56 training.batch_size 1.0 +707 57 model.embedding_dim 2.0 +707 57 model.scoring_fct_norm 2.0 +707 57 optimizer.lr 0.09896849043572423 +707 57 negative_sampler.num_negs_per_pos 79.0 +707 57 training.batch_size 1.0 +707 58 model.embedding_dim 2.0 +707 58 model.scoring_fct_norm 1.0 +707 58 optimizer.lr 0.005516418079524142 +707 58 negative_sampler.num_negs_per_pos 71.0 +707 58 training.batch_size 2.0 +707 59 model.embedding_dim 1.0 +707 59 model.scoring_fct_norm 2.0 +707 59 optimizer.lr 0.011725301650460722 +707 59 negative_sampler.num_negs_per_pos 31.0 +707 59 training.batch_size 2.0 +707 60 model.embedding_dim 1.0 +707 60 model.scoring_fct_norm 2.0 +707 60 optimizer.lr 0.036527043764840776 +707 60 negative_sampler.num_negs_per_pos 30.0 +707 60 training.batch_size 0.0 +707 61 model.embedding_dim 2.0 +707 61 model.scoring_fct_norm 1.0 +707 61 optimizer.lr 0.04270717196694278 +707 61 negative_sampler.num_negs_per_pos 9.0 +707 61 training.batch_size 0.0 +707 62 model.embedding_dim 0.0 +707 62 model.scoring_fct_norm 1.0 +707 62 optimizer.lr 0.013426981769936163 +707 62 negative_sampler.num_negs_per_pos 95.0 +707 62 training.batch_size 2.0 +707 63 model.embedding_dim 1.0 +707 63 model.scoring_fct_norm 1.0 +707 63 optimizer.lr 0.024795535592314583 +707 63 negative_sampler.num_negs_per_pos 36.0 +707 63 training.batch_size 0.0 +707 64 model.embedding_dim 2.0 +707 64 model.scoring_fct_norm 1.0 +707 64 optimizer.lr 0.0020950338960887827 +707 64 negative_sampler.num_negs_per_pos 53.0 +707 64 training.batch_size 1.0 +707 65 model.embedding_dim 2.0 +707 65 model.scoring_fct_norm 2.0 +707 65 optimizer.lr 0.00898111043094217 +707 65 negative_sampler.num_negs_per_pos 64.0 +707 65 training.batch_size 0.0 +707 66 model.embedding_dim 0.0 +707 66 model.scoring_fct_norm 2.0 +707 66 optimizer.lr 0.0690286643471168 +707 66 negative_sampler.num_negs_per_pos 13.0 +707 66 training.batch_size 0.0 +707 67 model.embedding_dim 0.0 +707 67 model.scoring_fct_norm 2.0 +707 67 optimizer.lr 0.03414943584842613 +707 67 negative_sampler.num_negs_per_pos 93.0 +707 67 training.batch_size 2.0 +707 68 model.embedding_dim 1.0 +707 68 model.scoring_fct_norm 1.0 +707 68 optimizer.lr 0.08842724936897056 +707 68 negative_sampler.num_negs_per_pos 59.0 +707 68 training.batch_size 0.0 +707 69 model.embedding_dim 1.0 +707 69 model.scoring_fct_norm 1.0 +707 69 optimizer.lr 0.001191687240327627 +707 69 negative_sampler.num_negs_per_pos 18.0 +707 69 training.batch_size 0.0 +707 70 model.embedding_dim 1.0 +707 70 model.scoring_fct_norm 2.0 +707 70 optimizer.lr 0.0072908051979285095 +707 70 negative_sampler.num_negs_per_pos 52.0 +707 70 training.batch_size 1.0 +707 71 model.embedding_dim 0.0 +707 71 model.scoring_fct_norm 2.0 +707 71 optimizer.lr 0.03918361451740387 +707 71 negative_sampler.num_negs_per_pos 44.0 +707 71 training.batch_size 0.0 +707 72 model.embedding_dim 2.0 +707 72 model.scoring_fct_norm 1.0 +707 72 optimizer.lr 0.0062331144697177 +707 72 negative_sampler.num_negs_per_pos 73.0 +707 72 training.batch_size 0.0 +707 73 model.embedding_dim 1.0 +707 73 model.scoring_fct_norm 2.0 +707 73 optimizer.lr 0.09592840992559769 +707 73 negative_sampler.num_negs_per_pos 36.0 +707 73 training.batch_size 0.0 +707 74 model.embedding_dim 1.0 +707 74 model.scoring_fct_norm 2.0 +707 74 optimizer.lr 0.0022432976990289467 +707 74 negative_sampler.num_negs_per_pos 9.0 +707 74 training.batch_size 0.0 +707 75 model.embedding_dim 1.0 +707 75 model.scoring_fct_norm 1.0 +707 75 optimizer.lr 0.03949431022719126 +707 75 negative_sampler.num_negs_per_pos 49.0 +707 75 training.batch_size 0.0 +707 76 model.embedding_dim 0.0 +707 76 model.scoring_fct_norm 1.0 +707 76 optimizer.lr 0.026319110541222074 +707 76 negative_sampler.num_negs_per_pos 2.0 +707 76 training.batch_size 1.0 +707 77 model.embedding_dim 0.0 +707 77 model.scoring_fct_norm 1.0 +707 77 optimizer.lr 0.004840614478718653 +707 77 negative_sampler.num_negs_per_pos 37.0 +707 77 training.batch_size 1.0 +707 78 model.embedding_dim 0.0 +707 78 model.scoring_fct_norm 1.0 +707 78 optimizer.lr 0.015464982665265171 +707 78 negative_sampler.num_negs_per_pos 19.0 +707 78 training.batch_size 1.0 +707 79 model.embedding_dim 1.0 +707 79 model.scoring_fct_norm 1.0 +707 79 optimizer.lr 0.0011526464485168534 +707 79 negative_sampler.num_negs_per_pos 7.0 +707 79 training.batch_size 0.0 +707 80 model.embedding_dim 2.0 +707 80 model.scoring_fct_norm 1.0 +707 80 optimizer.lr 0.0038690162240820145 +707 80 negative_sampler.num_negs_per_pos 17.0 +707 80 training.batch_size 1.0 +707 81 model.embedding_dim 2.0 +707 81 model.scoring_fct_norm 1.0 +707 81 optimizer.lr 0.03140762498536536 +707 81 negative_sampler.num_negs_per_pos 73.0 +707 81 training.batch_size 1.0 +707 82 model.embedding_dim 2.0 +707 82 model.scoring_fct_norm 2.0 +707 82 optimizer.lr 0.07032991553385183 +707 82 negative_sampler.num_negs_per_pos 11.0 +707 82 training.batch_size 0.0 +707 83 model.embedding_dim 0.0 +707 83 model.scoring_fct_norm 2.0 +707 83 optimizer.lr 0.0034767316942701256 +707 83 negative_sampler.num_negs_per_pos 70.0 +707 83 training.batch_size 0.0 +707 84 model.embedding_dim 2.0 +707 84 model.scoring_fct_norm 2.0 +707 84 optimizer.lr 0.00280335867862324 +707 84 negative_sampler.num_negs_per_pos 90.0 +707 84 training.batch_size 0.0 +707 85 model.embedding_dim 1.0 +707 85 model.scoring_fct_norm 2.0 +707 85 optimizer.lr 0.008572362096826552 +707 85 negative_sampler.num_negs_per_pos 43.0 +707 85 training.batch_size 0.0 +707 86 model.embedding_dim 2.0 +707 86 model.scoring_fct_norm 2.0 +707 86 optimizer.lr 0.006833029522521524 +707 86 negative_sampler.num_negs_per_pos 81.0 +707 86 training.batch_size 0.0 +707 87 model.embedding_dim 1.0 +707 87 model.scoring_fct_norm 2.0 +707 87 optimizer.lr 0.014950505475530847 +707 87 negative_sampler.num_negs_per_pos 30.0 +707 87 training.batch_size 0.0 +707 88 model.embedding_dim 1.0 +707 88 model.scoring_fct_norm 1.0 +707 88 optimizer.lr 0.008263128753770006 +707 88 negative_sampler.num_negs_per_pos 57.0 +707 88 training.batch_size 0.0 +707 89 model.embedding_dim 0.0 +707 89 model.scoring_fct_norm 2.0 +707 89 optimizer.lr 0.04408692667570929 +707 89 negative_sampler.num_negs_per_pos 86.0 +707 89 training.batch_size 0.0 +707 90 model.embedding_dim 0.0 +707 90 model.scoring_fct_norm 1.0 +707 90 optimizer.lr 0.005426087402898307 +707 90 negative_sampler.num_negs_per_pos 18.0 +707 90 training.batch_size 0.0 +707 91 model.embedding_dim 1.0 +707 91 model.scoring_fct_norm 2.0 +707 91 optimizer.lr 0.0017645944132968264 +707 91 negative_sampler.num_negs_per_pos 31.0 +707 91 training.batch_size 0.0 +707 92 model.embedding_dim 1.0 +707 92 model.scoring_fct_norm 1.0 +707 92 optimizer.lr 0.041750567496069535 +707 92 negative_sampler.num_negs_per_pos 82.0 +707 92 training.batch_size 1.0 +707 93 model.embedding_dim 1.0 +707 93 model.scoring_fct_norm 1.0 +707 93 optimizer.lr 0.0028506580664927525 +707 93 negative_sampler.num_negs_per_pos 92.0 +707 93 training.batch_size 1.0 +707 94 model.embedding_dim 1.0 +707 94 model.scoring_fct_norm 2.0 +707 94 optimizer.lr 0.011412047098837374 +707 94 negative_sampler.num_negs_per_pos 70.0 +707 94 training.batch_size 0.0 +707 95 model.embedding_dim 1.0 +707 95 model.scoring_fct_norm 2.0 +707 95 optimizer.lr 0.009021033531659018 +707 95 negative_sampler.num_negs_per_pos 7.0 +707 95 training.batch_size 1.0 +707 96 model.embedding_dim 1.0 +707 96 model.scoring_fct_norm 1.0 +707 96 optimizer.lr 0.0365232548181128 +707 96 negative_sampler.num_negs_per_pos 82.0 +707 96 training.batch_size 2.0 +707 97 model.embedding_dim 2.0 +707 97 model.scoring_fct_norm 2.0 +707 97 optimizer.lr 0.023484411490435383 +707 97 negative_sampler.num_negs_per_pos 71.0 +707 97 training.batch_size 2.0 +707 98 model.embedding_dim 0.0 +707 98 model.scoring_fct_norm 1.0 +707 98 optimizer.lr 0.02898037510321351 +707 98 negative_sampler.num_negs_per_pos 65.0 +707 98 training.batch_size 2.0 +707 99 model.embedding_dim 0.0 +707 99 model.scoring_fct_norm 1.0 +707 99 optimizer.lr 0.0017456068321806686 +707 99 negative_sampler.num_negs_per_pos 50.0 +707 99 training.batch_size 2.0 +707 100 model.embedding_dim 0.0 +707 100 model.scoring_fct_norm 1.0 +707 100 optimizer.lr 0.0152944155008566 +707 100 negative_sampler.num_negs_per_pos 3.0 +707 100 training.batch_size 2.0 +707 1 dataset """kinships""" +707 1 model """structuredembedding""" +707 1 loss """bceaftersigmoid""" +707 1 regularizer """no""" +707 1 optimizer """adam""" +707 1 training_loop """owa""" +707 1 negative_sampler """basic""" +707 1 evaluator """rankbased""" +707 2 dataset """kinships""" +707 2 model """structuredembedding""" +707 2 loss """bceaftersigmoid""" +707 2 regularizer """no""" +707 2 optimizer """adam""" +707 2 training_loop """owa""" +707 2 negative_sampler """basic""" +707 2 evaluator """rankbased""" +707 3 dataset """kinships""" +707 3 model """structuredembedding""" +707 3 loss """bceaftersigmoid""" +707 3 regularizer """no""" +707 3 optimizer """adam""" +707 3 training_loop """owa""" +707 3 negative_sampler """basic""" +707 3 evaluator """rankbased""" +707 4 dataset """kinships""" +707 4 model """structuredembedding""" +707 4 loss """bceaftersigmoid""" +707 4 regularizer """no""" +707 4 optimizer """adam""" +707 4 training_loop """owa""" +707 4 negative_sampler """basic""" +707 4 evaluator """rankbased""" +707 5 dataset """kinships""" +707 5 model """structuredembedding""" +707 5 loss """bceaftersigmoid""" +707 5 regularizer """no""" +707 5 optimizer """adam""" +707 5 training_loop """owa""" +707 5 negative_sampler """basic""" +707 5 evaluator """rankbased""" +707 6 dataset """kinships""" +707 6 model """structuredembedding""" +707 6 loss """bceaftersigmoid""" +707 6 regularizer """no""" +707 6 optimizer """adam""" +707 6 training_loop """owa""" +707 6 negative_sampler """basic""" +707 6 evaluator """rankbased""" +707 7 dataset """kinships""" +707 7 model """structuredembedding""" +707 7 loss """bceaftersigmoid""" +707 7 regularizer """no""" +707 7 optimizer """adam""" +707 7 training_loop """owa""" +707 7 negative_sampler """basic""" +707 7 evaluator """rankbased""" +707 8 dataset """kinships""" +707 8 model """structuredembedding""" +707 8 loss """bceaftersigmoid""" +707 8 regularizer """no""" +707 8 optimizer """adam""" +707 8 training_loop """owa""" +707 8 negative_sampler """basic""" +707 8 evaluator """rankbased""" +707 9 dataset """kinships""" +707 9 model """structuredembedding""" +707 9 loss """bceaftersigmoid""" +707 9 regularizer """no""" +707 9 optimizer """adam""" +707 9 training_loop """owa""" +707 9 negative_sampler """basic""" +707 9 evaluator """rankbased""" +707 10 dataset """kinships""" +707 10 model """structuredembedding""" +707 10 loss """bceaftersigmoid""" +707 10 regularizer """no""" +707 10 optimizer """adam""" +707 10 training_loop """owa""" +707 10 negative_sampler """basic""" +707 10 evaluator """rankbased""" +707 11 dataset """kinships""" +707 11 model """structuredembedding""" +707 11 loss """bceaftersigmoid""" +707 11 regularizer """no""" +707 11 optimizer """adam""" +707 11 training_loop """owa""" +707 11 negative_sampler """basic""" +707 11 evaluator """rankbased""" +707 12 dataset """kinships""" +707 12 model """structuredembedding""" +707 12 loss """bceaftersigmoid""" +707 12 regularizer """no""" +707 12 optimizer """adam""" +707 12 training_loop """owa""" +707 12 negative_sampler """basic""" +707 12 evaluator """rankbased""" +707 13 dataset """kinships""" +707 13 model """structuredembedding""" +707 13 loss """bceaftersigmoid""" +707 13 regularizer """no""" +707 13 optimizer """adam""" +707 13 training_loop """owa""" +707 13 negative_sampler """basic""" +707 13 evaluator """rankbased""" +707 14 dataset """kinships""" +707 14 model """structuredembedding""" +707 14 loss """bceaftersigmoid""" +707 14 regularizer """no""" +707 14 optimizer """adam""" +707 14 training_loop """owa""" +707 14 negative_sampler """basic""" +707 14 evaluator """rankbased""" +707 15 dataset """kinships""" +707 15 model """structuredembedding""" +707 15 loss """bceaftersigmoid""" +707 15 regularizer """no""" +707 15 optimizer """adam""" +707 15 training_loop """owa""" +707 15 negative_sampler """basic""" +707 15 evaluator """rankbased""" +707 16 dataset """kinships""" +707 16 model """structuredembedding""" +707 16 loss """bceaftersigmoid""" +707 16 regularizer """no""" +707 16 optimizer """adam""" +707 16 training_loop """owa""" +707 16 negative_sampler """basic""" +707 16 evaluator """rankbased""" +707 17 dataset """kinships""" +707 17 model """structuredembedding""" +707 17 loss """bceaftersigmoid""" +707 17 regularizer """no""" +707 17 optimizer """adam""" +707 17 training_loop """owa""" +707 17 negative_sampler """basic""" +707 17 evaluator """rankbased""" +707 18 dataset """kinships""" +707 18 model """structuredembedding""" +707 18 loss """bceaftersigmoid""" +707 18 regularizer """no""" +707 18 optimizer """adam""" +707 18 training_loop """owa""" +707 18 negative_sampler """basic""" +707 18 evaluator """rankbased""" +707 19 dataset """kinships""" +707 19 model """structuredembedding""" +707 19 loss """bceaftersigmoid""" +707 19 regularizer """no""" +707 19 optimizer """adam""" +707 19 training_loop """owa""" +707 19 negative_sampler """basic""" +707 19 evaluator """rankbased""" +707 20 dataset """kinships""" +707 20 model """structuredembedding""" +707 20 loss """bceaftersigmoid""" +707 20 regularizer """no""" +707 20 optimizer """adam""" +707 20 training_loop """owa""" +707 20 negative_sampler """basic""" +707 20 evaluator """rankbased""" +707 21 dataset """kinships""" +707 21 model """structuredembedding""" +707 21 loss """bceaftersigmoid""" +707 21 regularizer """no""" +707 21 optimizer """adam""" +707 21 training_loop """owa""" +707 21 negative_sampler """basic""" +707 21 evaluator """rankbased""" +707 22 dataset """kinships""" +707 22 model """structuredembedding""" +707 22 loss """bceaftersigmoid""" +707 22 regularizer """no""" +707 22 optimizer """adam""" +707 22 training_loop """owa""" +707 22 negative_sampler """basic""" +707 22 evaluator """rankbased""" +707 23 dataset """kinships""" +707 23 model """structuredembedding""" +707 23 loss """bceaftersigmoid""" +707 23 regularizer """no""" +707 23 optimizer """adam""" +707 23 training_loop """owa""" +707 23 negative_sampler """basic""" +707 23 evaluator """rankbased""" +707 24 dataset """kinships""" +707 24 model """structuredembedding""" +707 24 loss """bceaftersigmoid""" +707 24 regularizer """no""" +707 24 optimizer """adam""" +707 24 training_loop """owa""" +707 24 negative_sampler """basic""" +707 24 evaluator """rankbased""" +707 25 dataset """kinships""" +707 25 model """structuredembedding""" +707 25 loss """bceaftersigmoid""" +707 25 regularizer """no""" +707 25 optimizer """adam""" +707 25 training_loop """owa""" +707 25 negative_sampler """basic""" +707 25 evaluator """rankbased""" +707 26 dataset """kinships""" +707 26 model """structuredembedding""" +707 26 loss """bceaftersigmoid""" +707 26 regularizer """no""" +707 26 optimizer """adam""" +707 26 training_loop """owa""" +707 26 negative_sampler """basic""" +707 26 evaluator """rankbased""" +707 27 dataset """kinships""" +707 27 model """structuredembedding""" +707 27 loss """bceaftersigmoid""" +707 27 regularizer """no""" +707 27 optimizer """adam""" +707 27 training_loop """owa""" +707 27 negative_sampler """basic""" +707 27 evaluator """rankbased""" +707 28 dataset """kinships""" +707 28 model """structuredembedding""" +707 28 loss """bceaftersigmoid""" +707 28 regularizer """no""" +707 28 optimizer """adam""" +707 28 training_loop """owa""" +707 28 negative_sampler """basic""" +707 28 evaluator """rankbased""" +707 29 dataset """kinships""" +707 29 model """structuredembedding""" +707 29 loss """bceaftersigmoid""" +707 29 regularizer """no""" +707 29 optimizer """adam""" +707 29 training_loop """owa""" +707 29 negative_sampler """basic""" +707 29 evaluator """rankbased""" +707 30 dataset """kinships""" +707 30 model """structuredembedding""" +707 30 loss """bceaftersigmoid""" +707 30 regularizer """no""" +707 30 optimizer """adam""" +707 30 training_loop """owa""" +707 30 negative_sampler """basic""" +707 30 evaluator """rankbased""" +707 31 dataset """kinships""" +707 31 model """structuredembedding""" +707 31 loss """bceaftersigmoid""" +707 31 regularizer """no""" +707 31 optimizer """adam""" +707 31 training_loop """owa""" +707 31 negative_sampler """basic""" +707 31 evaluator """rankbased""" +707 32 dataset """kinships""" +707 32 model """structuredembedding""" +707 32 loss """bceaftersigmoid""" +707 32 regularizer """no""" +707 32 optimizer """adam""" +707 32 training_loop """owa""" +707 32 negative_sampler """basic""" +707 32 evaluator """rankbased""" +707 33 dataset """kinships""" +707 33 model """structuredembedding""" +707 33 loss """bceaftersigmoid""" +707 33 regularizer """no""" +707 33 optimizer """adam""" +707 33 training_loop """owa""" +707 33 negative_sampler """basic""" +707 33 evaluator """rankbased""" +707 34 dataset """kinships""" +707 34 model """structuredembedding""" +707 34 loss """bceaftersigmoid""" +707 34 regularizer """no""" +707 34 optimizer """adam""" +707 34 training_loop """owa""" +707 34 negative_sampler """basic""" +707 34 evaluator """rankbased""" +707 35 dataset """kinships""" +707 35 model """structuredembedding""" +707 35 loss """bceaftersigmoid""" +707 35 regularizer """no""" +707 35 optimizer """adam""" +707 35 training_loop """owa""" +707 35 negative_sampler """basic""" +707 35 evaluator """rankbased""" +707 36 dataset """kinships""" +707 36 model """structuredembedding""" +707 36 loss """bceaftersigmoid""" +707 36 regularizer """no""" +707 36 optimizer """adam""" +707 36 training_loop """owa""" +707 36 negative_sampler """basic""" +707 36 evaluator """rankbased""" +707 37 dataset """kinships""" +707 37 model """structuredembedding""" +707 37 loss """bceaftersigmoid""" +707 37 regularizer """no""" +707 37 optimizer """adam""" +707 37 training_loop """owa""" +707 37 negative_sampler """basic""" +707 37 evaluator """rankbased""" +707 38 dataset """kinships""" +707 38 model """structuredembedding""" +707 38 loss """bceaftersigmoid""" +707 38 regularizer """no""" +707 38 optimizer """adam""" +707 38 training_loop """owa""" +707 38 negative_sampler """basic""" +707 38 evaluator """rankbased""" +707 39 dataset """kinships""" +707 39 model """structuredembedding""" +707 39 loss """bceaftersigmoid""" +707 39 regularizer """no""" +707 39 optimizer """adam""" +707 39 training_loop """owa""" +707 39 negative_sampler """basic""" +707 39 evaluator """rankbased""" +707 40 dataset """kinships""" +707 40 model """structuredembedding""" +707 40 loss """bceaftersigmoid""" +707 40 regularizer """no""" +707 40 optimizer """adam""" +707 40 training_loop """owa""" +707 40 negative_sampler """basic""" +707 40 evaluator """rankbased""" +707 41 dataset """kinships""" +707 41 model """structuredembedding""" +707 41 loss """bceaftersigmoid""" +707 41 regularizer """no""" +707 41 optimizer """adam""" +707 41 training_loop """owa""" +707 41 negative_sampler """basic""" +707 41 evaluator """rankbased""" +707 42 dataset """kinships""" +707 42 model """structuredembedding""" +707 42 loss """bceaftersigmoid""" +707 42 regularizer """no""" +707 42 optimizer """adam""" +707 42 training_loop """owa""" +707 42 negative_sampler """basic""" +707 42 evaluator """rankbased""" +707 43 dataset """kinships""" +707 43 model """structuredembedding""" +707 43 loss """bceaftersigmoid""" +707 43 regularizer """no""" +707 43 optimizer """adam""" +707 43 training_loop """owa""" +707 43 negative_sampler """basic""" +707 43 evaluator """rankbased""" +707 44 dataset """kinships""" +707 44 model """structuredembedding""" +707 44 loss """bceaftersigmoid""" +707 44 regularizer """no""" +707 44 optimizer """adam""" +707 44 training_loop """owa""" +707 44 negative_sampler """basic""" +707 44 evaluator """rankbased""" +707 45 dataset """kinships""" +707 45 model """structuredembedding""" +707 45 loss """bceaftersigmoid""" +707 45 regularizer """no""" +707 45 optimizer """adam""" +707 45 training_loop """owa""" +707 45 negative_sampler """basic""" +707 45 evaluator """rankbased""" +707 46 dataset """kinships""" +707 46 model """structuredembedding""" +707 46 loss """bceaftersigmoid""" +707 46 regularizer """no""" +707 46 optimizer """adam""" +707 46 training_loop """owa""" +707 46 negative_sampler """basic""" +707 46 evaluator """rankbased""" +707 47 dataset """kinships""" +707 47 model """structuredembedding""" +707 47 loss """bceaftersigmoid""" +707 47 regularizer """no""" +707 47 optimizer """adam""" +707 47 training_loop """owa""" +707 47 negative_sampler """basic""" +707 47 evaluator """rankbased""" +707 48 dataset """kinships""" +707 48 model """structuredembedding""" +707 48 loss """bceaftersigmoid""" +707 48 regularizer """no""" +707 48 optimizer """adam""" +707 48 training_loop """owa""" +707 48 negative_sampler """basic""" +707 48 evaluator """rankbased""" +707 49 dataset """kinships""" +707 49 model """structuredembedding""" +707 49 loss """bceaftersigmoid""" +707 49 regularizer """no""" +707 49 optimizer """adam""" +707 49 training_loop """owa""" +707 49 negative_sampler """basic""" +707 49 evaluator """rankbased""" +707 50 dataset """kinships""" +707 50 model """structuredembedding""" +707 50 loss """bceaftersigmoid""" +707 50 regularizer """no""" +707 50 optimizer """adam""" +707 50 training_loop """owa""" +707 50 negative_sampler """basic""" +707 50 evaluator """rankbased""" +707 51 dataset """kinships""" +707 51 model """structuredembedding""" +707 51 loss """bceaftersigmoid""" +707 51 regularizer """no""" +707 51 optimizer """adam""" +707 51 training_loop """owa""" +707 51 negative_sampler """basic""" +707 51 evaluator """rankbased""" +707 52 dataset """kinships""" +707 52 model """structuredembedding""" +707 52 loss """bceaftersigmoid""" +707 52 regularizer """no""" +707 52 optimizer """adam""" +707 52 training_loop """owa""" +707 52 negative_sampler """basic""" +707 52 evaluator """rankbased""" +707 53 dataset """kinships""" +707 53 model """structuredembedding""" +707 53 loss """bceaftersigmoid""" +707 53 regularizer """no""" +707 53 optimizer """adam""" +707 53 training_loop """owa""" +707 53 negative_sampler """basic""" +707 53 evaluator """rankbased""" +707 54 dataset """kinships""" +707 54 model """structuredembedding""" +707 54 loss """bceaftersigmoid""" +707 54 regularizer """no""" +707 54 optimizer """adam""" +707 54 training_loop """owa""" +707 54 negative_sampler """basic""" +707 54 evaluator """rankbased""" +707 55 dataset """kinships""" +707 55 model """structuredembedding""" +707 55 loss """bceaftersigmoid""" +707 55 regularizer """no""" +707 55 optimizer """adam""" +707 55 training_loop """owa""" +707 55 negative_sampler """basic""" +707 55 evaluator """rankbased""" +707 56 dataset """kinships""" +707 56 model """structuredembedding""" +707 56 loss """bceaftersigmoid""" +707 56 regularizer """no""" +707 56 optimizer """adam""" +707 56 training_loop """owa""" +707 56 negative_sampler """basic""" +707 56 evaluator """rankbased""" +707 57 dataset """kinships""" +707 57 model """structuredembedding""" +707 57 loss """bceaftersigmoid""" +707 57 regularizer """no""" +707 57 optimizer """adam""" +707 57 training_loop """owa""" +707 57 negative_sampler """basic""" +707 57 evaluator """rankbased""" +707 58 dataset """kinships""" +707 58 model """structuredembedding""" +707 58 loss """bceaftersigmoid""" +707 58 regularizer """no""" +707 58 optimizer """adam""" +707 58 training_loop """owa""" +707 58 negative_sampler """basic""" +707 58 evaluator """rankbased""" +707 59 dataset """kinships""" +707 59 model """structuredembedding""" +707 59 loss """bceaftersigmoid""" +707 59 regularizer """no""" +707 59 optimizer """adam""" +707 59 training_loop """owa""" +707 59 negative_sampler """basic""" +707 59 evaluator """rankbased""" +707 60 dataset """kinships""" +707 60 model """structuredembedding""" +707 60 loss """bceaftersigmoid""" +707 60 regularizer """no""" +707 60 optimizer """adam""" +707 60 training_loop """owa""" +707 60 negative_sampler """basic""" +707 60 evaluator """rankbased""" +707 61 dataset """kinships""" +707 61 model """structuredembedding""" +707 61 loss """bceaftersigmoid""" +707 61 regularizer """no""" +707 61 optimizer """adam""" +707 61 training_loop """owa""" +707 61 negative_sampler """basic""" +707 61 evaluator """rankbased""" +707 62 dataset """kinships""" +707 62 model """structuredembedding""" +707 62 loss """bceaftersigmoid""" +707 62 regularizer """no""" +707 62 optimizer """adam""" +707 62 training_loop """owa""" +707 62 negative_sampler """basic""" +707 62 evaluator """rankbased""" +707 63 dataset """kinships""" +707 63 model """structuredembedding""" +707 63 loss """bceaftersigmoid""" +707 63 regularizer """no""" +707 63 optimizer """adam""" +707 63 training_loop """owa""" +707 63 negative_sampler """basic""" +707 63 evaluator """rankbased""" +707 64 dataset """kinships""" +707 64 model """structuredembedding""" +707 64 loss """bceaftersigmoid""" +707 64 regularizer """no""" +707 64 optimizer """adam""" +707 64 training_loop """owa""" +707 64 negative_sampler """basic""" +707 64 evaluator """rankbased""" +707 65 dataset """kinships""" +707 65 model """structuredembedding""" +707 65 loss """bceaftersigmoid""" +707 65 regularizer """no""" +707 65 optimizer """adam""" +707 65 training_loop """owa""" +707 65 negative_sampler """basic""" +707 65 evaluator """rankbased""" +707 66 dataset """kinships""" +707 66 model """structuredembedding""" +707 66 loss """bceaftersigmoid""" +707 66 regularizer """no""" +707 66 optimizer """adam""" +707 66 training_loop """owa""" +707 66 negative_sampler """basic""" +707 66 evaluator """rankbased""" +707 67 dataset """kinships""" +707 67 model """structuredembedding""" +707 67 loss """bceaftersigmoid""" +707 67 regularizer """no""" +707 67 optimizer """adam""" +707 67 training_loop """owa""" +707 67 negative_sampler """basic""" +707 67 evaluator """rankbased""" +707 68 dataset """kinships""" +707 68 model """structuredembedding""" +707 68 loss """bceaftersigmoid""" +707 68 regularizer """no""" +707 68 optimizer """adam""" +707 68 training_loop """owa""" +707 68 negative_sampler """basic""" +707 68 evaluator """rankbased""" +707 69 dataset """kinships""" +707 69 model """structuredembedding""" +707 69 loss """bceaftersigmoid""" +707 69 regularizer """no""" +707 69 optimizer """adam""" +707 69 training_loop """owa""" +707 69 negative_sampler """basic""" +707 69 evaluator """rankbased""" +707 70 dataset """kinships""" +707 70 model """structuredembedding""" +707 70 loss """bceaftersigmoid""" +707 70 regularizer """no""" +707 70 optimizer """adam""" +707 70 training_loop """owa""" +707 70 negative_sampler """basic""" +707 70 evaluator """rankbased""" +707 71 dataset """kinships""" +707 71 model """structuredembedding""" +707 71 loss """bceaftersigmoid""" +707 71 regularizer """no""" +707 71 optimizer """adam""" +707 71 training_loop """owa""" +707 71 negative_sampler """basic""" +707 71 evaluator """rankbased""" +707 72 dataset """kinships""" +707 72 model """structuredembedding""" +707 72 loss """bceaftersigmoid""" +707 72 regularizer """no""" +707 72 optimizer """adam""" +707 72 training_loop """owa""" +707 72 negative_sampler """basic""" +707 72 evaluator """rankbased""" +707 73 dataset """kinships""" +707 73 model """structuredembedding""" +707 73 loss """bceaftersigmoid""" +707 73 regularizer """no""" +707 73 optimizer """adam""" +707 73 training_loop """owa""" +707 73 negative_sampler """basic""" +707 73 evaluator """rankbased""" +707 74 dataset """kinships""" +707 74 model """structuredembedding""" +707 74 loss """bceaftersigmoid""" +707 74 regularizer """no""" +707 74 optimizer """adam""" +707 74 training_loop """owa""" +707 74 negative_sampler """basic""" +707 74 evaluator """rankbased""" +707 75 dataset """kinships""" +707 75 model """structuredembedding""" +707 75 loss """bceaftersigmoid""" +707 75 regularizer """no""" +707 75 optimizer """adam""" +707 75 training_loop """owa""" +707 75 negative_sampler """basic""" +707 75 evaluator """rankbased""" +707 76 dataset """kinships""" +707 76 model """structuredembedding""" +707 76 loss """bceaftersigmoid""" +707 76 regularizer """no""" +707 76 optimizer """adam""" +707 76 training_loop """owa""" +707 76 negative_sampler """basic""" +707 76 evaluator """rankbased""" +707 77 dataset """kinships""" +707 77 model """structuredembedding""" +707 77 loss """bceaftersigmoid""" +707 77 regularizer """no""" +707 77 optimizer """adam""" +707 77 training_loop """owa""" +707 77 negative_sampler """basic""" +707 77 evaluator """rankbased""" +707 78 dataset """kinships""" +707 78 model """structuredembedding""" +707 78 loss """bceaftersigmoid""" +707 78 regularizer """no""" +707 78 optimizer """adam""" +707 78 training_loop """owa""" +707 78 negative_sampler """basic""" +707 78 evaluator """rankbased""" +707 79 dataset """kinships""" +707 79 model """structuredembedding""" +707 79 loss """bceaftersigmoid""" +707 79 regularizer """no""" +707 79 optimizer """adam""" +707 79 training_loop """owa""" +707 79 negative_sampler """basic""" +707 79 evaluator """rankbased""" +707 80 dataset """kinships""" +707 80 model """structuredembedding""" +707 80 loss """bceaftersigmoid""" +707 80 regularizer """no""" +707 80 optimizer """adam""" +707 80 training_loop """owa""" +707 80 negative_sampler """basic""" +707 80 evaluator """rankbased""" +707 81 dataset """kinships""" +707 81 model """structuredembedding""" +707 81 loss """bceaftersigmoid""" +707 81 regularizer """no""" +707 81 optimizer """adam""" +707 81 training_loop """owa""" +707 81 negative_sampler """basic""" +707 81 evaluator """rankbased""" +707 82 dataset """kinships""" +707 82 model """structuredembedding""" +707 82 loss """bceaftersigmoid""" +707 82 regularizer """no""" +707 82 optimizer """adam""" +707 82 training_loop """owa""" +707 82 negative_sampler """basic""" +707 82 evaluator """rankbased""" +707 83 dataset """kinships""" +707 83 model """structuredembedding""" +707 83 loss """bceaftersigmoid""" +707 83 regularizer """no""" +707 83 optimizer """adam""" +707 83 training_loop """owa""" +707 83 negative_sampler """basic""" +707 83 evaluator """rankbased""" +707 84 dataset """kinships""" +707 84 model """structuredembedding""" +707 84 loss """bceaftersigmoid""" +707 84 regularizer """no""" +707 84 optimizer """adam""" +707 84 training_loop """owa""" +707 84 negative_sampler """basic""" +707 84 evaluator """rankbased""" +707 85 dataset """kinships""" +707 85 model """structuredembedding""" +707 85 loss """bceaftersigmoid""" +707 85 regularizer """no""" +707 85 optimizer """adam""" +707 85 training_loop """owa""" +707 85 negative_sampler """basic""" +707 85 evaluator """rankbased""" +707 86 dataset """kinships""" +707 86 model """structuredembedding""" +707 86 loss """bceaftersigmoid""" +707 86 regularizer """no""" +707 86 optimizer """adam""" +707 86 training_loop """owa""" +707 86 negative_sampler """basic""" +707 86 evaluator """rankbased""" +707 87 dataset """kinships""" +707 87 model """structuredembedding""" +707 87 loss """bceaftersigmoid""" +707 87 regularizer """no""" +707 87 optimizer """adam""" +707 87 training_loop """owa""" +707 87 negative_sampler """basic""" +707 87 evaluator """rankbased""" +707 88 dataset """kinships""" +707 88 model """structuredembedding""" +707 88 loss """bceaftersigmoid""" +707 88 regularizer """no""" +707 88 optimizer """adam""" +707 88 training_loop """owa""" +707 88 negative_sampler """basic""" +707 88 evaluator """rankbased""" +707 89 dataset """kinships""" +707 89 model """structuredembedding""" +707 89 loss """bceaftersigmoid""" +707 89 regularizer """no""" +707 89 optimizer """adam""" +707 89 training_loop """owa""" +707 89 negative_sampler """basic""" +707 89 evaluator """rankbased""" +707 90 dataset """kinships""" +707 90 model """structuredembedding""" +707 90 loss """bceaftersigmoid""" +707 90 regularizer """no""" +707 90 optimizer """adam""" +707 90 training_loop """owa""" +707 90 negative_sampler """basic""" +707 90 evaluator """rankbased""" +707 91 dataset """kinships""" +707 91 model """structuredembedding""" +707 91 loss """bceaftersigmoid""" +707 91 regularizer """no""" +707 91 optimizer """adam""" +707 91 training_loop """owa""" +707 91 negative_sampler """basic""" +707 91 evaluator """rankbased""" +707 92 dataset """kinships""" +707 92 model """structuredembedding""" +707 92 loss """bceaftersigmoid""" +707 92 regularizer """no""" +707 92 optimizer """adam""" +707 92 training_loop """owa""" +707 92 negative_sampler """basic""" +707 92 evaluator """rankbased""" +707 93 dataset """kinships""" +707 93 model """structuredembedding""" +707 93 loss """bceaftersigmoid""" +707 93 regularizer """no""" +707 93 optimizer """adam""" +707 93 training_loop """owa""" +707 93 negative_sampler """basic""" +707 93 evaluator """rankbased""" +707 94 dataset """kinships""" +707 94 model """structuredembedding""" +707 94 loss """bceaftersigmoid""" +707 94 regularizer """no""" +707 94 optimizer """adam""" +707 94 training_loop """owa""" +707 94 negative_sampler """basic""" +707 94 evaluator """rankbased""" +707 95 dataset """kinships""" +707 95 model """structuredembedding""" +707 95 loss """bceaftersigmoid""" +707 95 regularizer """no""" +707 95 optimizer """adam""" +707 95 training_loop """owa""" +707 95 negative_sampler """basic""" +707 95 evaluator """rankbased""" +707 96 dataset """kinships""" +707 96 model """structuredembedding""" +707 96 loss """bceaftersigmoid""" +707 96 regularizer """no""" +707 96 optimizer """adam""" +707 96 training_loop """owa""" +707 96 negative_sampler """basic""" +707 96 evaluator """rankbased""" +707 97 dataset """kinships""" +707 97 model """structuredembedding""" +707 97 loss """bceaftersigmoid""" +707 97 regularizer """no""" +707 97 optimizer """adam""" +707 97 training_loop """owa""" +707 97 negative_sampler """basic""" +707 97 evaluator """rankbased""" +707 98 dataset """kinships""" +707 98 model """structuredembedding""" +707 98 loss """bceaftersigmoid""" +707 98 regularizer """no""" +707 98 optimizer """adam""" +707 98 training_loop """owa""" +707 98 negative_sampler """basic""" +707 98 evaluator """rankbased""" +707 99 dataset """kinships""" +707 99 model """structuredembedding""" +707 99 loss """bceaftersigmoid""" +707 99 regularizer """no""" +707 99 optimizer """adam""" +707 99 training_loop """owa""" +707 99 negative_sampler """basic""" +707 99 evaluator """rankbased""" +707 100 dataset """kinships""" +707 100 model """structuredembedding""" +707 100 loss """bceaftersigmoid""" +707 100 regularizer """no""" +707 100 optimizer """adam""" +707 100 training_loop """owa""" +707 100 negative_sampler """basic""" +707 100 evaluator """rankbased""" +708 1 model.embedding_dim 1.0 +708 1 model.scoring_fct_norm 1.0 +708 1 optimizer.lr 0.0011562878035596524 +708 1 negative_sampler.num_negs_per_pos 44.0 +708 1 training.batch_size 1.0 +708 2 model.embedding_dim 1.0 +708 2 model.scoring_fct_norm 1.0 +708 2 optimizer.lr 0.05608013264294868 +708 2 negative_sampler.num_negs_per_pos 67.0 +708 2 training.batch_size 0.0 +708 3 model.embedding_dim 2.0 +708 3 model.scoring_fct_norm 1.0 +708 3 optimizer.lr 0.006670656993795028 +708 3 negative_sampler.num_negs_per_pos 49.0 +708 3 training.batch_size 1.0 +708 4 model.embedding_dim 1.0 +708 4 model.scoring_fct_norm 2.0 +708 4 optimizer.lr 0.005807377982116937 +708 4 negative_sampler.num_negs_per_pos 14.0 +708 4 training.batch_size 0.0 +708 5 model.embedding_dim 2.0 +708 5 model.scoring_fct_norm 1.0 +708 5 optimizer.lr 0.0076270752093207834 +708 5 negative_sampler.num_negs_per_pos 86.0 +708 5 training.batch_size 0.0 +708 6 model.embedding_dim 1.0 +708 6 model.scoring_fct_norm 1.0 +708 6 optimizer.lr 0.007567909426615457 +708 6 negative_sampler.num_negs_per_pos 95.0 +708 6 training.batch_size 0.0 +708 7 model.embedding_dim 2.0 +708 7 model.scoring_fct_norm 2.0 +708 7 optimizer.lr 0.0026765642321972057 +708 7 negative_sampler.num_negs_per_pos 31.0 +708 7 training.batch_size 0.0 +708 8 model.embedding_dim 2.0 +708 8 model.scoring_fct_norm 2.0 +708 8 optimizer.lr 0.009934031672667458 +708 8 negative_sampler.num_negs_per_pos 17.0 +708 8 training.batch_size 0.0 +708 9 model.embedding_dim 1.0 +708 9 model.scoring_fct_norm 1.0 +708 9 optimizer.lr 0.0018450346579552386 +708 9 negative_sampler.num_negs_per_pos 10.0 +708 9 training.batch_size 0.0 +708 10 model.embedding_dim 2.0 +708 10 model.scoring_fct_norm 2.0 +708 10 optimizer.lr 0.02770840649978273 +708 10 negative_sampler.num_negs_per_pos 48.0 +708 10 training.batch_size 2.0 +708 11 model.embedding_dim 1.0 +708 11 model.scoring_fct_norm 1.0 +708 11 optimizer.lr 0.00807276127467552 +708 11 negative_sampler.num_negs_per_pos 49.0 +708 11 training.batch_size 2.0 +708 12 model.embedding_dim 2.0 +708 12 model.scoring_fct_norm 1.0 +708 12 optimizer.lr 0.006326223194876317 +708 12 negative_sampler.num_negs_per_pos 58.0 +708 12 training.batch_size 1.0 +708 13 model.embedding_dim 2.0 +708 13 model.scoring_fct_norm 2.0 +708 13 optimizer.lr 0.023910415877916587 +708 13 negative_sampler.num_negs_per_pos 0.0 +708 13 training.batch_size 2.0 +708 14 model.embedding_dim 1.0 +708 14 model.scoring_fct_norm 2.0 +708 14 optimizer.lr 0.02009253196286837 +708 14 negative_sampler.num_negs_per_pos 93.0 +708 14 training.batch_size 1.0 +708 15 model.embedding_dim 1.0 +708 15 model.scoring_fct_norm 2.0 +708 15 optimizer.lr 0.002277071972612224 +708 15 negative_sampler.num_negs_per_pos 39.0 +708 15 training.batch_size 0.0 +708 16 model.embedding_dim 1.0 +708 16 model.scoring_fct_norm 1.0 +708 16 optimizer.lr 0.004519491703433207 +708 16 negative_sampler.num_negs_per_pos 89.0 +708 16 training.batch_size 1.0 +708 17 model.embedding_dim 2.0 +708 17 model.scoring_fct_norm 1.0 +708 17 optimizer.lr 0.0038185401253596216 +708 17 negative_sampler.num_negs_per_pos 73.0 +708 17 training.batch_size 0.0 +708 18 model.embedding_dim 1.0 +708 18 model.scoring_fct_norm 2.0 +708 18 optimizer.lr 0.002214037561883757 +708 18 negative_sampler.num_negs_per_pos 70.0 +708 18 training.batch_size 1.0 +708 19 model.embedding_dim 0.0 +708 19 model.scoring_fct_norm 1.0 +708 19 optimizer.lr 0.0035609065914586693 +708 19 negative_sampler.num_negs_per_pos 12.0 +708 19 training.batch_size 2.0 +708 20 model.embedding_dim 2.0 +708 20 model.scoring_fct_norm 2.0 +708 20 optimizer.lr 0.010371243573348086 +708 20 negative_sampler.num_negs_per_pos 34.0 +708 20 training.batch_size 0.0 +708 21 model.embedding_dim 0.0 +708 21 model.scoring_fct_norm 1.0 +708 21 optimizer.lr 0.08687751840321258 +708 21 negative_sampler.num_negs_per_pos 21.0 +708 21 training.batch_size 1.0 +708 22 model.embedding_dim 1.0 +708 22 model.scoring_fct_norm 1.0 +708 22 optimizer.lr 0.010557047504464921 +708 22 negative_sampler.num_negs_per_pos 20.0 +708 22 training.batch_size 1.0 +708 23 model.embedding_dim 1.0 +708 23 model.scoring_fct_norm 2.0 +708 23 optimizer.lr 0.004248598267434664 +708 23 negative_sampler.num_negs_per_pos 31.0 +708 23 training.batch_size 2.0 +708 24 model.embedding_dim 1.0 +708 24 model.scoring_fct_norm 2.0 +708 24 optimizer.lr 0.003533666119833248 +708 24 negative_sampler.num_negs_per_pos 11.0 +708 24 training.batch_size 2.0 +708 25 model.embedding_dim 1.0 +708 25 model.scoring_fct_norm 1.0 +708 25 optimizer.lr 0.0020088507810506104 +708 25 negative_sampler.num_negs_per_pos 61.0 +708 25 training.batch_size 1.0 +708 26 model.embedding_dim 1.0 +708 26 model.scoring_fct_norm 2.0 +708 26 optimizer.lr 0.001842053047289999 +708 26 negative_sampler.num_negs_per_pos 99.0 +708 26 training.batch_size 0.0 +708 27 model.embedding_dim 2.0 +708 27 model.scoring_fct_norm 2.0 +708 27 optimizer.lr 0.0016400111398623264 +708 27 negative_sampler.num_negs_per_pos 7.0 +708 27 training.batch_size 0.0 +708 28 model.embedding_dim 2.0 +708 28 model.scoring_fct_norm 2.0 +708 28 optimizer.lr 0.01285547154583241 +708 28 negative_sampler.num_negs_per_pos 75.0 +708 28 training.batch_size 2.0 +708 29 model.embedding_dim 0.0 +708 29 model.scoring_fct_norm 1.0 +708 29 optimizer.lr 0.0028140140294121174 +708 29 negative_sampler.num_negs_per_pos 95.0 +708 29 training.batch_size 1.0 +708 30 model.embedding_dim 0.0 +708 30 model.scoring_fct_norm 2.0 +708 30 optimizer.lr 0.05661825414354396 +708 30 negative_sampler.num_negs_per_pos 37.0 +708 30 training.batch_size 0.0 +708 31 model.embedding_dim 0.0 +708 31 model.scoring_fct_norm 2.0 +708 31 optimizer.lr 0.005934955288259231 +708 31 negative_sampler.num_negs_per_pos 83.0 +708 31 training.batch_size 2.0 +708 32 model.embedding_dim 2.0 +708 32 model.scoring_fct_norm 1.0 +708 32 optimizer.lr 0.003212713486550083 +708 32 negative_sampler.num_negs_per_pos 27.0 +708 32 training.batch_size 0.0 +708 33 model.embedding_dim 0.0 +708 33 model.scoring_fct_norm 1.0 +708 33 optimizer.lr 0.016413567483450794 +708 33 negative_sampler.num_negs_per_pos 30.0 +708 33 training.batch_size 1.0 +708 34 model.embedding_dim 2.0 +708 34 model.scoring_fct_norm 2.0 +708 34 optimizer.lr 0.01360780286914141 +708 34 negative_sampler.num_negs_per_pos 35.0 +708 34 training.batch_size 2.0 +708 35 model.embedding_dim 0.0 +708 35 model.scoring_fct_norm 1.0 +708 35 optimizer.lr 0.008844294320201309 +708 35 negative_sampler.num_negs_per_pos 31.0 +708 35 training.batch_size 0.0 +708 36 model.embedding_dim 2.0 +708 36 model.scoring_fct_norm 2.0 +708 36 optimizer.lr 0.013967447312965392 +708 36 negative_sampler.num_negs_per_pos 72.0 +708 36 training.batch_size 1.0 +708 37 model.embedding_dim 0.0 +708 37 model.scoring_fct_norm 1.0 +708 37 optimizer.lr 0.003168404019057714 +708 37 negative_sampler.num_negs_per_pos 22.0 +708 37 training.batch_size 0.0 +708 38 model.embedding_dim 0.0 +708 38 model.scoring_fct_norm 1.0 +708 38 optimizer.lr 0.008633161484423967 +708 38 negative_sampler.num_negs_per_pos 4.0 +708 38 training.batch_size 2.0 +708 39 model.embedding_dim 1.0 +708 39 model.scoring_fct_norm 2.0 +708 39 optimizer.lr 0.029552507750863287 +708 39 negative_sampler.num_negs_per_pos 26.0 +708 39 training.batch_size 2.0 +708 40 model.embedding_dim 1.0 +708 40 model.scoring_fct_norm 1.0 +708 40 optimizer.lr 0.022686594625101475 +708 40 negative_sampler.num_negs_per_pos 39.0 +708 40 training.batch_size 0.0 +708 41 model.embedding_dim 2.0 +708 41 model.scoring_fct_norm 2.0 +708 41 optimizer.lr 0.0040667129976500955 +708 41 negative_sampler.num_negs_per_pos 98.0 +708 41 training.batch_size 1.0 +708 42 model.embedding_dim 1.0 +708 42 model.scoring_fct_norm 2.0 +708 42 optimizer.lr 0.005706780690454092 +708 42 negative_sampler.num_negs_per_pos 18.0 +708 42 training.batch_size 0.0 +708 43 model.embedding_dim 0.0 +708 43 model.scoring_fct_norm 2.0 +708 43 optimizer.lr 0.05808179704980307 +708 43 negative_sampler.num_negs_per_pos 34.0 +708 43 training.batch_size 0.0 +708 44 model.embedding_dim 2.0 +708 44 model.scoring_fct_norm 2.0 +708 44 optimizer.lr 0.07047860734610578 +708 44 negative_sampler.num_negs_per_pos 5.0 +708 44 training.batch_size 0.0 +708 45 model.embedding_dim 2.0 +708 45 model.scoring_fct_norm 1.0 +708 45 optimizer.lr 0.012207333233803866 +708 45 negative_sampler.num_negs_per_pos 3.0 +708 45 training.batch_size 0.0 +708 46 model.embedding_dim 2.0 +708 46 model.scoring_fct_norm 1.0 +708 46 optimizer.lr 0.0015154650910873214 +708 46 negative_sampler.num_negs_per_pos 95.0 +708 46 training.batch_size 1.0 +708 47 model.embedding_dim 0.0 +708 47 model.scoring_fct_norm 2.0 +708 47 optimizer.lr 0.0033460997967660307 +708 47 negative_sampler.num_negs_per_pos 43.0 +708 47 training.batch_size 1.0 +708 48 model.embedding_dim 2.0 +708 48 model.scoring_fct_norm 2.0 +708 48 optimizer.lr 0.014768375965957358 +708 48 negative_sampler.num_negs_per_pos 88.0 +708 48 training.batch_size 1.0 +708 49 model.embedding_dim 1.0 +708 49 model.scoring_fct_norm 1.0 +708 49 optimizer.lr 0.03145325109465052 +708 49 negative_sampler.num_negs_per_pos 91.0 +708 49 training.batch_size 0.0 +708 50 model.embedding_dim 0.0 +708 50 model.scoring_fct_norm 1.0 +708 50 optimizer.lr 0.011315626882623588 +708 50 negative_sampler.num_negs_per_pos 26.0 +708 50 training.batch_size 2.0 +708 51 model.embedding_dim 2.0 +708 51 model.scoring_fct_norm 2.0 +708 51 optimizer.lr 0.005376164724123115 +708 51 negative_sampler.num_negs_per_pos 30.0 +708 51 training.batch_size 1.0 +708 52 model.embedding_dim 1.0 +708 52 model.scoring_fct_norm 2.0 +708 52 optimizer.lr 0.008330295771081696 +708 52 negative_sampler.num_negs_per_pos 51.0 +708 52 training.batch_size 0.0 +708 53 model.embedding_dim 2.0 +708 53 model.scoring_fct_norm 2.0 +708 53 optimizer.lr 0.013492208394814301 +708 53 negative_sampler.num_negs_per_pos 22.0 +708 53 training.batch_size 0.0 +708 54 model.embedding_dim 1.0 +708 54 model.scoring_fct_norm 2.0 +708 54 optimizer.lr 0.0019390828800998994 +708 54 negative_sampler.num_negs_per_pos 65.0 +708 54 training.batch_size 1.0 +708 55 model.embedding_dim 0.0 +708 55 model.scoring_fct_norm 2.0 +708 55 optimizer.lr 0.0029865851628287582 +708 55 negative_sampler.num_negs_per_pos 12.0 +708 55 training.batch_size 2.0 +708 56 model.embedding_dim 1.0 +708 56 model.scoring_fct_norm 1.0 +708 56 optimizer.lr 0.019831742332121604 +708 56 negative_sampler.num_negs_per_pos 22.0 +708 56 training.batch_size 1.0 +708 57 model.embedding_dim 2.0 +708 57 model.scoring_fct_norm 1.0 +708 57 optimizer.lr 0.00813062009478638 +708 57 negative_sampler.num_negs_per_pos 82.0 +708 57 training.batch_size 0.0 +708 58 model.embedding_dim 0.0 +708 58 model.scoring_fct_norm 2.0 +708 58 optimizer.lr 0.007891921005446256 +708 58 negative_sampler.num_negs_per_pos 3.0 +708 58 training.batch_size 1.0 +708 59 model.embedding_dim 1.0 +708 59 model.scoring_fct_norm 1.0 +708 59 optimizer.lr 0.009467182739872715 +708 59 negative_sampler.num_negs_per_pos 24.0 +708 59 training.batch_size 2.0 +708 60 model.embedding_dim 1.0 +708 60 model.scoring_fct_norm 1.0 +708 60 optimizer.lr 0.07609488792029717 +708 60 negative_sampler.num_negs_per_pos 30.0 +708 60 training.batch_size 1.0 +708 61 model.embedding_dim 2.0 +708 61 model.scoring_fct_norm 1.0 +708 61 optimizer.lr 0.020288903306307986 +708 61 negative_sampler.num_negs_per_pos 55.0 +708 61 training.batch_size 2.0 +708 62 model.embedding_dim 1.0 +708 62 model.scoring_fct_norm 2.0 +708 62 optimizer.lr 0.061465119176967675 +708 62 negative_sampler.num_negs_per_pos 75.0 +708 62 training.batch_size 2.0 +708 63 model.embedding_dim 1.0 +708 63 model.scoring_fct_norm 2.0 +708 63 optimizer.lr 0.0029208740348354552 +708 63 negative_sampler.num_negs_per_pos 63.0 +708 63 training.batch_size 0.0 +708 64 model.embedding_dim 1.0 +708 64 model.scoring_fct_norm 2.0 +708 64 optimizer.lr 0.08789812357350331 +708 64 negative_sampler.num_negs_per_pos 81.0 +708 64 training.batch_size 0.0 +708 65 model.embedding_dim 1.0 +708 65 model.scoring_fct_norm 1.0 +708 65 optimizer.lr 0.005740321851734442 +708 65 negative_sampler.num_negs_per_pos 31.0 +708 65 training.batch_size 1.0 +708 66 model.embedding_dim 1.0 +708 66 model.scoring_fct_norm 1.0 +708 66 optimizer.lr 0.0026127886827721283 +708 66 negative_sampler.num_negs_per_pos 2.0 +708 66 training.batch_size 0.0 +708 67 model.embedding_dim 1.0 +708 67 model.scoring_fct_norm 2.0 +708 67 optimizer.lr 0.0051302927556185875 +708 67 negative_sampler.num_negs_per_pos 68.0 +708 67 training.batch_size 0.0 +708 68 model.embedding_dim 0.0 +708 68 model.scoring_fct_norm 2.0 +708 68 optimizer.lr 0.01812129049359274 +708 68 negative_sampler.num_negs_per_pos 33.0 +708 68 training.batch_size 0.0 +708 69 model.embedding_dim 0.0 +708 69 model.scoring_fct_norm 1.0 +708 69 optimizer.lr 0.09332142751719145 +708 69 negative_sampler.num_negs_per_pos 76.0 +708 69 training.batch_size 2.0 +708 70 model.embedding_dim 0.0 +708 70 model.scoring_fct_norm 1.0 +708 70 optimizer.lr 0.04561393630411974 +708 70 negative_sampler.num_negs_per_pos 92.0 +708 70 training.batch_size 1.0 +708 71 model.embedding_dim 2.0 +708 71 model.scoring_fct_norm 2.0 +708 71 optimizer.lr 0.01684532628755521 +708 71 negative_sampler.num_negs_per_pos 26.0 +708 71 training.batch_size 2.0 +708 72 model.embedding_dim 1.0 +708 72 model.scoring_fct_norm 1.0 +708 72 optimizer.lr 0.03385896821666326 +708 72 negative_sampler.num_negs_per_pos 78.0 +708 72 training.batch_size 1.0 +708 73 model.embedding_dim 2.0 +708 73 model.scoring_fct_norm 1.0 +708 73 optimizer.lr 0.002852083034567369 +708 73 negative_sampler.num_negs_per_pos 39.0 +708 73 training.batch_size 1.0 +708 74 model.embedding_dim 1.0 +708 74 model.scoring_fct_norm 1.0 +708 74 optimizer.lr 0.04294491726158767 +708 74 negative_sampler.num_negs_per_pos 55.0 +708 74 training.batch_size 2.0 +708 75 model.embedding_dim 0.0 +708 75 model.scoring_fct_norm 2.0 +708 75 optimizer.lr 0.001470346648592457 +708 75 negative_sampler.num_negs_per_pos 47.0 +708 75 training.batch_size 1.0 +708 76 model.embedding_dim 2.0 +708 76 model.scoring_fct_norm 1.0 +708 76 optimizer.lr 0.005282192021341995 +708 76 negative_sampler.num_negs_per_pos 98.0 +708 76 training.batch_size 1.0 +708 77 model.embedding_dim 2.0 +708 77 model.scoring_fct_norm 2.0 +708 77 optimizer.lr 0.001268393042015583 +708 77 negative_sampler.num_negs_per_pos 65.0 +708 77 training.batch_size 0.0 +708 78 model.embedding_dim 1.0 +708 78 model.scoring_fct_norm 1.0 +708 78 optimizer.lr 0.01077605127684599 +708 78 negative_sampler.num_negs_per_pos 44.0 +708 78 training.batch_size 2.0 +708 79 model.embedding_dim 1.0 +708 79 model.scoring_fct_norm 1.0 +708 79 optimizer.lr 0.0014062827000435724 +708 79 negative_sampler.num_negs_per_pos 27.0 +708 79 training.batch_size 2.0 +708 80 model.embedding_dim 1.0 +708 80 model.scoring_fct_norm 1.0 +708 80 optimizer.lr 0.0015869086432994028 +708 80 negative_sampler.num_negs_per_pos 54.0 +708 80 training.batch_size 1.0 +708 81 model.embedding_dim 1.0 +708 81 model.scoring_fct_norm 2.0 +708 81 optimizer.lr 0.0056252365647790495 +708 81 negative_sampler.num_negs_per_pos 65.0 +708 81 training.batch_size 1.0 +708 82 model.embedding_dim 1.0 +708 82 model.scoring_fct_norm 2.0 +708 82 optimizer.lr 0.01139202542208884 +708 82 negative_sampler.num_negs_per_pos 78.0 +708 82 training.batch_size 2.0 +708 83 model.embedding_dim 1.0 +708 83 model.scoring_fct_norm 1.0 +708 83 optimizer.lr 0.011765763033037006 +708 83 negative_sampler.num_negs_per_pos 57.0 +708 83 training.batch_size 1.0 +708 84 model.embedding_dim 2.0 +708 84 model.scoring_fct_norm 2.0 +708 84 optimizer.lr 0.004611139135057397 +708 84 negative_sampler.num_negs_per_pos 41.0 +708 84 training.batch_size 0.0 +708 85 model.embedding_dim 0.0 +708 85 model.scoring_fct_norm 2.0 +708 85 optimizer.lr 0.0010131709878710364 +708 85 negative_sampler.num_negs_per_pos 69.0 +708 85 training.batch_size 0.0 +708 86 model.embedding_dim 2.0 +708 86 model.scoring_fct_norm 2.0 +708 86 optimizer.lr 0.0014455842632319436 +708 86 negative_sampler.num_negs_per_pos 16.0 +708 86 training.batch_size 1.0 +708 87 model.embedding_dim 1.0 +708 87 model.scoring_fct_norm 2.0 +708 87 optimizer.lr 0.0972985786471499 +708 87 negative_sampler.num_negs_per_pos 75.0 +708 87 training.batch_size 0.0 +708 88 model.embedding_dim 1.0 +708 88 model.scoring_fct_norm 1.0 +708 88 optimizer.lr 0.00297020485972113 +708 88 negative_sampler.num_negs_per_pos 60.0 +708 88 training.batch_size 2.0 +708 89 model.embedding_dim 2.0 +708 89 model.scoring_fct_norm 2.0 +708 89 optimizer.lr 0.043180605073589215 +708 89 negative_sampler.num_negs_per_pos 31.0 +708 89 training.batch_size 2.0 +708 90 model.embedding_dim 0.0 +708 90 model.scoring_fct_norm 2.0 +708 90 optimizer.lr 0.0022424930799743972 +708 90 negative_sampler.num_negs_per_pos 27.0 +708 90 training.batch_size 1.0 +708 91 model.embedding_dim 2.0 +708 91 model.scoring_fct_norm 2.0 +708 91 optimizer.lr 0.09471679405449904 +708 91 negative_sampler.num_negs_per_pos 96.0 +708 91 training.batch_size 2.0 +708 92 model.embedding_dim 1.0 +708 92 model.scoring_fct_norm 2.0 +708 92 optimizer.lr 0.0427253536841122 +708 92 negative_sampler.num_negs_per_pos 44.0 +708 92 training.batch_size 2.0 +708 93 model.embedding_dim 2.0 +708 93 model.scoring_fct_norm 2.0 +708 93 optimizer.lr 0.007778419701256106 +708 93 negative_sampler.num_negs_per_pos 88.0 +708 93 training.batch_size 2.0 +708 94 model.embedding_dim 1.0 +708 94 model.scoring_fct_norm 1.0 +708 94 optimizer.lr 0.09451478407466897 +708 94 negative_sampler.num_negs_per_pos 33.0 +708 94 training.batch_size 1.0 +708 95 model.embedding_dim 0.0 +708 95 model.scoring_fct_norm 2.0 +708 95 optimizer.lr 0.02710213625599304 +708 95 negative_sampler.num_negs_per_pos 9.0 +708 95 training.batch_size 1.0 +708 96 model.embedding_dim 0.0 +708 96 model.scoring_fct_norm 2.0 +708 96 optimizer.lr 0.0517478543263593 +708 96 negative_sampler.num_negs_per_pos 94.0 +708 96 training.batch_size 0.0 +708 97 model.embedding_dim 2.0 +708 97 model.scoring_fct_norm 2.0 +708 97 optimizer.lr 0.0027213991425162655 +708 97 negative_sampler.num_negs_per_pos 80.0 +708 97 training.batch_size 0.0 +708 98 model.embedding_dim 2.0 +708 98 model.scoring_fct_norm 1.0 +708 98 optimizer.lr 0.0019297618239813405 +708 98 negative_sampler.num_negs_per_pos 6.0 +708 98 training.batch_size 1.0 +708 99 model.embedding_dim 2.0 +708 99 model.scoring_fct_norm 2.0 +708 99 optimizer.lr 0.03304089697373722 +708 99 negative_sampler.num_negs_per_pos 56.0 +708 99 training.batch_size 2.0 +708 100 model.embedding_dim 1.0 +708 100 model.scoring_fct_norm 2.0 +708 100 optimizer.lr 0.004813604884853592 +708 100 negative_sampler.num_negs_per_pos 41.0 +708 100 training.batch_size 1.0 +708 1 dataset """kinships""" +708 1 model """structuredembedding""" +708 1 loss """softplus""" +708 1 regularizer """no""" +708 1 optimizer """adam""" +708 1 training_loop """owa""" +708 1 negative_sampler """basic""" +708 1 evaluator """rankbased""" +708 2 dataset """kinships""" +708 2 model """structuredembedding""" +708 2 loss """softplus""" +708 2 regularizer """no""" +708 2 optimizer """adam""" +708 2 training_loop """owa""" +708 2 negative_sampler """basic""" +708 2 evaluator """rankbased""" +708 3 dataset """kinships""" +708 3 model """structuredembedding""" +708 3 loss """softplus""" +708 3 regularizer """no""" +708 3 optimizer """adam""" +708 3 training_loop """owa""" +708 3 negative_sampler """basic""" +708 3 evaluator """rankbased""" +708 4 dataset """kinships""" +708 4 model """structuredembedding""" +708 4 loss """softplus""" +708 4 regularizer """no""" +708 4 optimizer """adam""" +708 4 training_loop """owa""" +708 4 negative_sampler """basic""" +708 4 evaluator """rankbased""" +708 5 dataset """kinships""" +708 5 model """structuredembedding""" +708 5 loss """softplus""" +708 5 regularizer """no""" +708 5 optimizer """adam""" +708 5 training_loop """owa""" +708 5 negative_sampler """basic""" +708 5 evaluator """rankbased""" +708 6 dataset """kinships""" +708 6 model """structuredembedding""" +708 6 loss """softplus""" +708 6 regularizer """no""" +708 6 optimizer """adam""" +708 6 training_loop """owa""" +708 6 negative_sampler """basic""" +708 6 evaluator """rankbased""" +708 7 dataset """kinships""" +708 7 model """structuredembedding""" +708 7 loss """softplus""" +708 7 regularizer """no""" +708 7 optimizer """adam""" +708 7 training_loop """owa""" +708 7 negative_sampler """basic""" +708 7 evaluator """rankbased""" +708 8 dataset """kinships""" +708 8 model """structuredembedding""" +708 8 loss """softplus""" +708 8 regularizer """no""" +708 8 optimizer """adam""" +708 8 training_loop """owa""" +708 8 negative_sampler """basic""" +708 8 evaluator """rankbased""" +708 9 dataset """kinships""" +708 9 model """structuredembedding""" +708 9 loss """softplus""" +708 9 regularizer """no""" +708 9 optimizer """adam""" +708 9 training_loop """owa""" +708 9 negative_sampler """basic""" +708 9 evaluator """rankbased""" +708 10 dataset """kinships""" +708 10 model """structuredembedding""" +708 10 loss """softplus""" +708 10 regularizer """no""" +708 10 optimizer """adam""" +708 10 training_loop """owa""" +708 10 negative_sampler """basic""" +708 10 evaluator """rankbased""" +708 11 dataset """kinships""" +708 11 model """structuredembedding""" +708 11 loss """softplus""" +708 11 regularizer """no""" +708 11 optimizer """adam""" +708 11 training_loop """owa""" +708 11 negative_sampler """basic""" +708 11 evaluator """rankbased""" +708 12 dataset """kinships""" +708 12 model """structuredembedding""" +708 12 loss """softplus""" +708 12 regularizer """no""" +708 12 optimizer """adam""" +708 12 training_loop """owa""" +708 12 negative_sampler """basic""" +708 12 evaluator """rankbased""" +708 13 dataset """kinships""" +708 13 model """structuredembedding""" +708 13 loss """softplus""" +708 13 regularizer """no""" +708 13 optimizer """adam""" +708 13 training_loop """owa""" +708 13 negative_sampler """basic""" +708 13 evaluator """rankbased""" +708 14 dataset """kinships""" +708 14 model """structuredembedding""" +708 14 loss """softplus""" +708 14 regularizer """no""" +708 14 optimizer """adam""" +708 14 training_loop """owa""" +708 14 negative_sampler """basic""" +708 14 evaluator """rankbased""" +708 15 dataset """kinships""" +708 15 model """structuredembedding""" +708 15 loss """softplus""" +708 15 regularizer """no""" +708 15 optimizer """adam""" +708 15 training_loop """owa""" +708 15 negative_sampler """basic""" +708 15 evaluator """rankbased""" +708 16 dataset """kinships""" +708 16 model """structuredembedding""" +708 16 loss """softplus""" +708 16 regularizer """no""" +708 16 optimizer """adam""" +708 16 training_loop """owa""" +708 16 negative_sampler """basic""" +708 16 evaluator """rankbased""" +708 17 dataset """kinships""" +708 17 model """structuredembedding""" +708 17 loss """softplus""" +708 17 regularizer """no""" +708 17 optimizer """adam""" +708 17 training_loop """owa""" +708 17 negative_sampler """basic""" +708 17 evaluator """rankbased""" +708 18 dataset """kinships""" +708 18 model """structuredembedding""" +708 18 loss """softplus""" +708 18 regularizer """no""" +708 18 optimizer """adam""" +708 18 training_loop """owa""" +708 18 negative_sampler """basic""" +708 18 evaluator """rankbased""" +708 19 dataset """kinships""" +708 19 model """structuredembedding""" +708 19 loss """softplus""" +708 19 regularizer """no""" +708 19 optimizer """adam""" +708 19 training_loop """owa""" +708 19 negative_sampler """basic""" +708 19 evaluator """rankbased""" +708 20 dataset """kinships""" +708 20 model """structuredembedding""" +708 20 loss """softplus""" +708 20 regularizer """no""" +708 20 optimizer """adam""" +708 20 training_loop """owa""" +708 20 negative_sampler """basic""" +708 20 evaluator """rankbased""" +708 21 dataset """kinships""" +708 21 model """structuredembedding""" +708 21 loss """softplus""" +708 21 regularizer """no""" +708 21 optimizer """adam""" +708 21 training_loop """owa""" +708 21 negative_sampler """basic""" +708 21 evaluator """rankbased""" +708 22 dataset """kinships""" +708 22 model """structuredembedding""" +708 22 loss """softplus""" +708 22 regularizer """no""" +708 22 optimizer """adam""" +708 22 training_loop """owa""" +708 22 negative_sampler """basic""" +708 22 evaluator """rankbased""" +708 23 dataset """kinships""" +708 23 model """structuredembedding""" +708 23 loss """softplus""" +708 23 regularizer """no""" +708 23 optimizer """adam""" +708 23 training_loop """owa""" +708 23 negative_sampler """basic""" +708 23 evaluator """rankbased""" +708 24 dataset """kinships""" +708 24 model """structuredembedding""" +708 24 loss """softplus""" +708 24 regularizer """no""" +708 24 optimizer """adam""" +708 24 training_loop """owa""" +708 24 negative_sampler """basic""" +708 24 evaluator """rankbased""" +708 25 dataset """kinships""" +708 25 model """structuredembedding""" +708 25 loss """softplus""" +708 25 regularizer """no""" +708 25 optimizer """adam""" +708 25 training_loop """owa""" +708 25 negative_sampler """basic""" +708 25 evaluator """rankbased""" +708 26 dataset """kinships""" +708 26 model """structuredembedding""" +708 26 loss """softplus""" +708 26 regularizer """no""" +708 26 optimizer """adam""" +708 26 training_loop """owa""" +708 26 negative_sampler """basic""" +708 26 evaluator """rankbased""" +708 27 dataset """kinships""" +708 27 model """structuredembedding""" +708 27 loss """softplus""" +708 27 regularizer """no""" +708 27 optimizer """adam""" +708 27 training_loop """owa""" +708 27 negative_sampler """basic""" +708 27 evaluator """rankbased""" +708 28 dataset """kinships""" +708 28 model """structuredembedding""" +708 28 loss """softplus""" +708 28 regularizer """no""" +708 28 optimizer """adam""" +708 28 training_loop """owa""" +708 28 negative_sampler """basic""" +708 28 evaluator """rankbased""" +708 29 dataset """kinships""" +708 29 model """structuredembedding""" +708 29 loss """softplus""" +708 29 regularizer """no""" +708 29 optimizer """adam""" +708 29 training_loop """owa""" +708 29 negative_sampler """basic""" +708 29 evaluator """rankbased""" +708 30 dataset """kinships""" +708 30 model """structuredembedding""" +708 30 loss """softplus""" +708 30 regularizer """no""" +708 30 optimizer """adam""" +708 30 training_loop """owa""" +708 30 negative_sampler """basic""" +708 30 evaluator """rankbased""" +708 31 dataset """kinships""" +708 31 model """structuredembedding""" +708 31 loss """softplus""" +708 31 regularizer """no""" +708 31 optimizer """adam""" +708 31 training_loop """owa""" +708 31 negative_sampler """basic""" +708 31 evaluator """rankbased""" +708 32 dataset """kinships""" +708 32 model """structuredembedding""" +708 32 loss """softplus""" +708 32 regularizer """no""" +708 32 optimizer """adam""" +708 32 training_loop """owa""" +708 32 negative_sampler """basic""" +708 32 evaluator """rankbased""" +708 33 dataset """kinships""" +708 33 model """structuredembedding""" +708 33 loss """softplus""" +708 33 regularizer """no""" +708 33 optimizer """adam""" +708 33 training_loop """owa""" +708 33 negative_sampler """basic""" +708 33 evaluator """rankbased""" +708 34 dataset """kinships""" +708 34 model """structuredembedding""" +708 34 loss """softplus""" +708 34 regularizer """no""" +708 34 optimizer """adam""" +708 34 training_loop """owa""" +708 34 negative_sampler """basic""" +708 34 evaluator """rankbased""" +708 35 dataset """kinships""" +708 35 model """structuredembedding""" +708 35 loss """softplus""" +708 35 regularizer """no""" +708 35 optimizer """adam""" +708 35 training_loop """owa""" +708 35 negative_sampler """basic""" +708 35 evaluator """rankbased""" +708 36 dataset """kinships""" +708 36 model """structuredembedding""" +708 36 loss """softplus""" +708 36 regularizer """no""" +708 36 optimizer """adam""" +708 36 training_loop """owa""" +708 36 negative_sampler """basic""" +708 36 evaluator """rankbased""" +708 37 dataset """kinships""" +708 37 model """structuredembedding""" +708 37 loss """softplus""" +708 37 regularizer """no""" +708 37 optimizer """adam""" +708 37 training_loop """owa""" +708 37 negative_sampler """basic""" +708 37 evaluator """rankbased""" +708 38 dataset """kinships""" +708 38 model """structuredembedding""" +708 38 loss """softplus""" +708 38 regularizer """no""" +708 38 optimizer """adam""" +708 38 training_loop """owa""" +708 38 negative_sampler """basic""" +708 38 evaluator """rankbased""" +708 39 dataset """kinships""" +708 39 model """structuredembedding""" +708 39 loss """softplus""" +708 39 regularizer """no""" +708 39 optimizer """adam""" +708 39 training_loop """owa""" +708 39 negative_sampler """basic""" +708 39 evaluator """rankbased""" +708 40 dataset """kinships""" +708 40 model """structuredembedding""" +708 40 loss """softplus""" +708 40 regularizer """no""" +708 40 optimizer """adam""" +708 40 training_loop """owa""" +708 40 negative_sampler """basic""" +708 40 evaluator """rankbased""" +708 41 dataset """kinships""" +708 41 model """structuredembedding""" +708 41 loss """softplus""" +708 41 regularizer """no""" +708 41 optimizer """adam""" +708 41 training_loop """owa""" +708 41 negative_sampler """basic""" +708 41 evaluator """rankbased""" +708 42 dataset """kinships""" +708 42 model """structuredembedding""" +708 42 loss """softplus""" +708 42 regularizer """no""" +708 42 optimizer """adam""" +708 42 training_loop """owa""" +708 42 negative_sampler """basic""" +708 42 evaluator """rankbased""" +708 43 dataset """kinships""" +708 43 model """structuredembedding""" +708 43 loss """softplus""" +708 43 regularizer """no""" +708 43 optimizer """adam""" +708 43 training_loop """owa""" +708 43 negative_sampler """basic""" +708 43 evaluator """rankbased""" +708 44 dataset """kinships""" +708 44 model """structuredembedding""" +708 44 loss """softplus""" +708 44 regularizer """no""" +708 44 optimizer """adam""" +708 44 training_loop """owa""" +708 44 negative_sampler """basic""" +708 44 evaluator """rankbased""" +708 45 dataset """kinships""" +708 45 model """structuredembedding""" +708 45 loss """softplus""" +708 45 regularizer """no""" +708 45 optimizer """adam""" +708 45 training_loop """owa""" +708 45 negative_sampler """basic""" +708 45 evaluator """rankbased""" +708 46 dataset """kinships""" +708 46 model """structuredembedding""" +708 46 loss """softplus""" +708 46 regularizer """no""" +708 46 optimizer """adam""" +708 46 training_loop """owa""" +708 46 negative_sampler """basic""" +708 46 evaluator """rankbased""" +708 47 dataset """kinships""" +708 47 model """structuredembedding""" +708 47 loss """softplus""" +708 47 regularizer """no""" +708 47 optimizer """adam""" +708 47 training_loop """owa""" +708 47 negative_sampler """basic""" +708 47 evaluator """rankbased""" +708 48 dataset """kinships""" +708 48 model """structuredembedding""" +708 48 loss """softplus""" +708 48 regularizer """no""" +708 48 optimizer """adam""" +708 48 training_loop """owa""" +708 48 negative_sampler """basic""" +708 48 evaluator """rankbased""" +708 49 dataset """kinships""" +708 49 model """structuredembedding""" +708 49 loss """softplus""" +708 49 regularizer """no""" +708 49 optimizer """adam""" +708 49 training_loop """owa""" +708 49 negative_sampler """basic""" +708 49 evaluator """rankbased""" +708 50 dataset """kinships""" +708 50 model """structuredembedding""" +708 50 loss """softplus""" +708 50 regularizer """no""" +708 50 optimizer """adam""" +708 50 training_loop """owa""" +708 50 negative_sampler """basic""" +708 50 evaluator """rankbased""" +708 51 dataset """kinships""" +708 51 model """structuredembedding""" +708 51 loss """softplus""" +708 51 regularizer """no""" +708 51 optimizer """adam""" +708 51 training_loop """owa""" +708 51 negative_sampler """basic""" +708 51 evaluator """rankbased""" +708 52 dataset """kinships""" +708 52 model """structuredembedding""" +708 52 loss """softplus""" +708 52 regularizer """no""" +708 52 optimizer """adam""" +708 52 training_loop """owa""" +708 52 negative_sampler """basic""" +708 52 evaluator """rankbased""" +708 53 dataset """kinships""" +708 53 model """structuredembedding""" +708 53 loss """softplus""" +708 53 regularizer """no""" +708 53 optimizer """adam""" +708 53 training_loop """owa""" +708 53 negative_sampler """basic""" +708 53 evaluator """rankbased""" +708 54 dataset """kinships""" +708 54 model """structuredembedding""" +708 54 loss """softplus""" +708 54 regularizer """no""" +708 54 optimizer """adam""" +708 54 training_loop """owa""" +708 54 negative_sampler """basic""" +708 54 evaluator """rankbased""" +708 55 dataset """kinships""" +708 55 model """structuredembedding""" +708 55 loss """softplus""" +708 55 regularizer """no""" +708 55 optimizer """adam""" +708 55 training_loop """owa""" +708 55 negative_sampler """basic""" +708 55 evaluator """rankbased""" +708 56 dataset """kinships""" +708 56 model """structuredembedding""" +708 56 loss """softplus""" +708 56 regularizer """no""" +708 56 optimizer """adam""" +708 56 training_loop """owa""" +708 56 negative_sampler """basic""" +708 56 evaluator """rankbased""" +708 57 dataset """kinships""" +708 57 model """structuredembedding""" +708 57 loss """softplus""" +708 57 regularizer """no""" +708 57 optimizer """adam""" +708 57 training_loop """owa""" +708 57 negative_sampler """basic""" +708 57 evaluator """rankbased""" +708 58 dataset """kinships""" +708 58 model """structuredembedding""" +708 58 loss """softplus""" +708 58 regularizer """no""" +708 58 optimizer """adam""" +708 58 training_loop """owa""" +708 58 negative_sampler """basic""" +708 58 evaluator """rankbased""" +708 59 dataset """kinships""" +708 59 model """structuredembedding""" +708 59 loss """softplus""" +708 59 regularizer """no""" +708 59 optimizer """adam""" +708 59 training_loop """owa""" +708 59 negative_sampler """basic""" +708 59 evaluator """rankbased""" +708 60 dataset """kinships""" +708 60 model """structuredembedding""" +708 60 loss """softplus""" +708 60 regularizer """no""" +708 60 optimizer """adam""" +708 60 training_loop """owa""" +708 60 negative_sampler """basic""" +708 60 evaluator """rankbased""" +708 61 dataset """kinships""" +708 61 model """structuredembedding""" +708 61 loss """softplus""" +708 61 regularizer """no""" +708 61 optimizer """adam""" +708 61 training_loop """owa""" +708 61 negative_sampler """basic""" +708 61 evaluator """rankbased""" +708 62 dataset """kinships""" +708 62 model """structuredembedding""" +708 62 loss """softplus""" +708 62 regularizer """no""" +708 62 optimizer """adam""" +708 62 training_loop """owa""" +708 62 negative_sampler """basic""" +708 62 evaluator """rankbased""" +708 63 dataset """kinships""" +708 63 model """structuredembedding""" +708 63 loss """softplus""" +708 63 regularizer """no""" +708 63 optimizer """adam""" +708 63 training_loop """owa""" +708 63 negative_sampler """basic""" +708 63 evaluator """rankbased""" +708 64 dataset """kinships""" +708 64 model """structuredembedding""" +708 64 loss """softplus""" +708 64 regularizer """no""" +708 64 optimizer """adam""" +708 64 training_loop """owa""" +708 64 negative_sampler """basic""" +708 64 evaluator """rankbased""" +708 65 dataset """kinships""" +708 65 model """structuredembedding""" +708 65 loss """softplus""" +708 65 regularizer """no""" +708 65 optimizer """adam""" +708 65 training_loop """owa""" +708 65 negative_sampler """basic""" +708 65 evaluator """rankbased""" +708 66 dataset """kinships""" +708 66 model """structuredembedding""" +708 66 loss """softplus""" +708 66 regularizer """no""" +708 66 optimizer """adam""" +708 66 training_loop """owa""" +708 66 negative_sampler """basic""" +708 66 evaluator """rankbased""" +708 67 dataset """kinships""" +708 67 model """structuredembedding""" +708 67 loss """softplus""" +708 67 regularizer """no""" +708 67 optimizer """adam""" +708 67 training_loop """owa""" +708 67 negative_sampler """basic""" +708 67 evaluator """rankbased""" +708 68 dataset """kinships""" +708 68 model """structuredembedding""" +708 68 loss """softplus""" +708 68 regularizer """no""" +708 68 optimizer """adam""" +708 68 training_loop """owa""" +708 68 negative_sampler """basic""" +708 68 evaluator """rankbased""" +708 69 dataset """kinships""" +708 69 model """structuredembedding""" +708 69 loss """softplus""" +708 69 regularizer """no""" +708 69 optimizer """adam""" +708 69 training_loop """owa""" +708 69 negative_sampler """basic""" +708 69 evaluator """rankbased""" +708 70 dataset """kinships""" +708 70 model """structuredembedding""" +708 70 loss """softplus""" +708 70 regularizer """no""" +708 70 optimizer """adam""" +708 70 training_loop """owa""" +708 70 negative_sampler """basic""" +708 70 evaluator """rankbased""" +708 71 dataset """kinships""" +708 71 model """structuredembedding""" +708 71 loss """softplus""" +708 71 regularizer """no""" +708 71 optimizer """adam""" +708 71 training_loop """owa""" +708 71 negative_sampler """basic""" +708 71 evaluator """rankbased""" +708 72 dataset """kinships""" +708 72 model """structuredembedding""" +708 72 loss """softplus""" +708 72 regularizer """no""" +708 72 optimizer """adam""" +708 72 training_loop """owa""" +708 72 negative_sampler """basic""" +708 72 evaluator """rankbased""" +708 73 dataset """kinships""" +708 73 model """structuredembedding""" +708 73 loss """softplus""" +708 73 regularizer """no""" +708 73 optimizer """adam""" +708 73 training_loop """owa""" +708 73 negative_sampler """basic""" +708 73 evaluator """rankbased""" +708 74 dataset """kinships""" +708 74 model """structuredembedding""" +708 74 loss """softplus""" +708 74 regularizer """no""" +708 74 optimizer """adam""" +708 74 training_loop """owa""" +708 74 negative_sampler """basic""" +708 74 evaluator """rankbased""" +708 75 dataset """kinships""" +708 75 model """structuredembedding""" +708 75 loss """softplus""" +708 75 regularizer """no""" +708 75 optimizer """adam""" +708 75 training_loop """owa""" +708 75 negative_sampler """basic""" +708 75 evaluator """rankbased""" +708 76 dataset """kinships""" +708 76 model """structuredembedding""" +708 76 loss """softplus""" +708 76 regularizer """no""" +708 76 optimizer """adam""" +708 76 training_loop """owa""" +708 76 negative_sampler """basic""" +708 76 evaluator """rankbased""" +708 77 dataset """kinships""" +708 77 model """structuredembedding""" +708 77 loss """softplus""" +708 77 regularizer """no""" +708 77 optimizer """adam""" +708 77 training_loop """owa""" +708 77 negative_sampler """basic""" +708 77 evaluator """rankbased""" +708 78 dataset """kinships""" +708 78 model """structuredembedding""" +708 78 loss """softplus""" +708 78 regularizer """no""" +708 78 optimizer """adam""" +708 78 training_loop """owa""" +708 78 negative_sampler """basic""" +708 78 evaluator """rankbased""" +708 79 dataset """kinships""" +708 79 model """structuredembedding""" +708 79 loss """softplus""" +708 79 regularizer """no""" +708 79 optimizer """adam""" +708 79 training_loop """owa""" +708 79 negative_sampler """basic""" +708 79 evaluator """rankbased""" +708 80 dataset """kinships""" +708 80 model """structuredembedding""" +708 80 loss """softplus""" +708 80 regularizer """no""" +708 80 optimizer """adam""" +708 80 training_loop """owa""" +708 80 negative_sampler """basic""" +708 80 evaluator """rankbased""" +708 81 dataset """kinships""" +708 81 model """structuredembedding""" +708 81 loss """softplus""" +708 81 regularizer """no""" +708 81 optimizer """adam""" +708 81 training_loop """owa""" +708 81 negative_sampler """basic""" +708 81 evaluator """rankbased""" +708 82 dataset """kinships""" +708 82 model """structuredembedding""" +708 82 loss """softplus""" +708 82 regularizer """no""" +708 82 optimizer """adam""" +708 82 training_loop """owa""" +708 82 negative_sampler """basic""" +708 82 evaluator """rankbased""" +708 83 dataset """kinships""" +708 83 model """structuredembedding""" +708 83 loss """softplus""" +708 83 regularizer """no""" +708 83 optimizer """adam""" +708 83 training_loop """owa""" +708 83 negative_sampler """basic""" +708 83 evaluator """rankbased""" +708 84 dataset """kinships""" +708 84 model """structuredembedding""" +708 84 loss """softplus""" +708 84 regularizer """no""" +708 84 optimizer """adam""" +708 84 training_loop """owa""" +708 84 negative_sampler """basic""" +708 84 evaluator """rankbased""" +708 85 dataset """kinships""" +708 85 model """structuredembedding""" +708 85 loss """softplus""" +708 85 regularizer """no""" +708 85 optimizer """adam""" +708 85 training_loop """owa""" +708 85 negative_sampler """basic""" +708 85 evaluator """rankbased""" +708 86 dataset """kinships""" +708 86 model """structuredembedding""" +708 86 loss """softplus""" +708 86 regularizer """no""" +708 86 optimizer """adam""" +708 86 training_loop """owa""" +708 86 negative_sampler """basic""" +708 86 evaluator """rankbased""" +708 87 dataset """kinships""" +708 87 model """structuredembedding""" +708 87 loss """softplus""" +708 87 regularizer """no""" +708 87 optimizer """adam""" +708 87 training_loop """owa""" +708 87 negative_sampler """basic""" +708 87 evaluator """rankbased""" +708 88 dataset """kinships""" +708 88 model """structuredembedding""" +708 88 loss """softplus""" +708 88 regularizer """no""" +708 88 optimizer """adam""" +708 88 training_loop """owa""" +708 88 negative_sampler """basic""" +708 88 evaluator """rankbased""" +708 89 dataset """kinships""" +708 89 model """structuredembedding""" +708 89 loss """softplus""" +708 89 regularizer """no""" +708 89 optimizer """adam""" +708 89 training_loop """owa""" +708 89 negative_sampler """basic""" +708 89 evaluator """rankbased""" +708 90 dataset """kinships""" +708 90 model """structuredembedding""" +708 90 loss """softplus""" +708 90 regularizer """no""" +708 90 optimizer """adam""" +708 90 training_loop """owa""" +708 90 negative_sampler """basic""" +708 90 evaluator """rankbased""" +708 91 dataset """kinships""" +708 91 model """structuredembedding""" +708 91 loss """softplus""" +708 91 regularizer """no""" +708 91 optimizer """adam""" +708 91 training_loop """owa""" +708 91 negative_sampler """basic""" +708 91 evaluator """rankbased""" +708 92 dataset """kinships""" +708 92 model """structuredembedding""" +708 92 loss """softplus""" +708 92 regularizer """no""" +708 92 optimizer """adam""" +708 92 training_loop """owa""" +708 92 negative_sampler """basic""" +708 92 evaluator """rankbased""" +708 93 dataset """kinships""" +708 93 model """structuredembedding""" +708 93 loss """softplus""" +708 93 regularizer """no""" +708 93 optimizer """adam""" +708 93 training_loop """owa""" +708 93 negative_sampler """basic""" +708 93 evaluator """rankbased""" +708 94 dataset """kinships""" +708 94 model """structuredembedding""" +708 94 loss """softplus""" +708 94 regularizer """no""" +708 94 optimizer """adam""" +708 94 training_loop """owa""" +708 94 negative_sampler """basic""" +708 94 evaluator """rankbased""" +708 95 dataset """kinships""" +708 95 model """structuredembedding""" +708 95 loss """softplus""" +708 95 regularizer """no""" +708 95 optimizer """adam""" +708 95 training_loop """owa""" +708 95 negative_sampler """basic""" +708 95 evaluator """rankbased""" +708 96 dataset """kinships""" +708 96 model """structuredembedding""" +708 96 loss """softplus""" +708 96 regularizer """no""" +708 96 optimizer """adam""" +708 96 training_loop """owa""" +708 96 negative_sampler """basic""" +708 96 evaluator """rankbased""" +708 97 dataset """kinships""" +708 97 model """structuredembedding""" +708 97 loss """softplus""" +708 97 regularizer """no""" +708 97 optimizer """adam""" +708 97 training_loop """owa""" +708 97 negative_sampler """basic""" +708 97 evaluator """rankbased""" +708 98 dataset """kinships""" +708 98 model """structuredembedding""" +708 98 loss """softplus""" +708 98 regularizer """no""" +708 98 optimizer """adam""" +708 98 training_loop """owa""" +708 98 negative_sampler """basic""" +708 98 evaluator """rankbased""" +708 99 dataset """kinships""" +708 99 model """structuredembedding""" +708 99 loss """softplus""" +708 99 regularizer """no""" +708 99 optimizer """adam""" +708 99 training_loop """owa""" +708 99 negative_sampler """basic""" +708 99 evaluator """rankbased""" +708 100 dataset """kinships""" +708 100 model """structuredembedding""" +708 100 loss """softplus""" +708 100 regularizer """no""" +708 100 optimizer """adam""" +708 100 training_loop """owa""" +708 100 negative_sampler """basic""" +708 100 evaluator """rankbased""" +709 1 model.embedding_dim 0.0 +709 1 model.scoring_fct_norm 2.0 +709 1 loss.margin 7.134537353938564 +709 1 optimizer.lr 0.057341729728303155 +709 1 negative_sampler.num_negs_per_pos 95.0 +709 1 training.batch_size 1.0 +709 2 model.embedding_dim 1.0 +709 2 model.scoring_fct_norm 2.0 +709 2 loss.margin 2.799707257313967 +709 2 optimizer.lr 0.0465582048443481 +709 2 negative_sampler.num_negs_per_pos 81.0 +709 2 training.batch_size 2.0 +709 3 model.embedding_dim 2.0 +709 3 model.scoring_fct_norm 2.0 +709 3 loss.margin 5.61278778057131 +709 3 optimizer.lr 0.0874232182044114 +709 3 negative_sampler.num_negs_per_pos 60.0 +709 3 training.batch_size 1.0 +709 4 model.embedding_dim 1.0 +709 4 model.scoring_fct_norm 2.0 +709 4 loss.margin 8.942209620387919 +709 4 optimizer.lr 0.0049231075780326984 +709 4 negative_sampler.num_negs_per_pos 57.0 +709 4 training.batch_size 1.0 +709 5 model.embedding_dim 0.0 +709 5 model.scoring_fct_norm 2.0 +709 5 loss.margin 1.119168912866467 +709 5 optimizer.lr 0.05666624130215814 +709 5 negative_sampler.num_negs_per_pos 24.0 +709 5 training.batch_size 1.0 +709 6 model.embedding_dim 2.0 +709 6 model.scoring_fct_norm 2.0 +709 6 loss.margin 0.9632802927551214 +709 6 optimizer.lr 0.009846248348916467 +709 6 negative_sampler.num_negs_per_pos 6.0 +709 6 training.batch_size 2.0 +709 7 model.embedding_dim 1.0 +709 7 model.scoring_fct_norm 2.0 +709 7 loss.margin 3.278000672737598 +709 7 optimizer.lr 0.06801744548166892 +709 7 negative_sampler.num_negs_per_pos 46.0 +709 7 training.batch_size 1.0 +709 8 model.embedding_dim 1.0 +709 8 model.scoring_fct_norm 1.0 +709 8 loss.margin 6.3488967757245645 +709 8 optimizer.lr 0.004211915161293752 +709 8 negative_sampler.num_negs_per_pos 47.0 +709 8 training.batch_size 1.0 +709 9 model.embedding_dim 1.0 +709 9 model.scoring_fct_norm 2.0 +709 9 loss.margin 9.987800066886077 +709 9 optimizer.lr 0.004705078657121023 +709 9 negative_sampler.num_negs_per_pos 60.0 +709 9 training.batch_size 1.0 +709 10 model.embedding_dim 1.0 +709 10 model.scoring_fct_norm 1.0 +709 10 loss.margin 6.423600711675467 +709 10 optimizer.lr 0.0011316852344922853 +709 10 negative_sampler.num_negs_per_pos 9.0 +709 10 training.batch_size 0.0 +709 11 model.embedding_dim 1.0 +709 11 model.scoring_fct_norm 1.0 +709 11 loss.margin 5.236973119255769 +709 11 optimizer.lr 0.01874892025381467 +709 11 negative_sampler.num_negs_per_pos 34.0 +709 11 training.batch_size 0.0 +709 12 model.embedding_dim 0.0 +709 12 model.scoring_fct_norm 1.0 +709 12 loss.margin 0.9006655554469642 +709 12 optimizer.lr 0.0019052521646497815 +709 12 negative_sampler.num_negs_per_pos 40.0 +709 12 training.batch_size 0.0 +709 13 model.embedding_dim 1.0 +709 13 model.scoring_fct_norm 1.0 +709 13 loss.margin 1.4739241296004162 +709 13 optimizer.lr 0.0058025450074115584 +709 13 negative_sampler.num_negs_per_pos 74.0 +709 13 training.batch_size 0.0 +709 14 model.embedding_dim 2.0 +709 14 model.scoring_fct_norm 1.0 +709 14 loss.margin 9.85764348957985 +709 14 optimizer.lr 0.02387891301592518 +709 14 negative_sampler.num_negs_per_pos 27.0 +709 14 training.batch_size 0.0 +709 15 model.embedding_dim 1.0 +709 15 model.scoring_fct_norm 2.0 +709 15 loss.margin 9.99634097531505 +709 15 optimizer.lr 0.036423165439719234 +709 15 negative_sampler.num_negs_per_pos 59.0 +709 15 training.batch_size 2.0 +709 16 model.embedding_dim 2.0 +709 16 model.scoring_fct_norm 1.0 +709 16 loss.margin 1.2103836125018652 +709 16 optimizer.lr 0.016978354358878324 +709 16 negative_sampler.num_negs_per_pos 43.0 +709 16 training.batch_size 0.0 +709 17 model.embedding_dim 0.0 +709 17 model.scoring_fct_norm 1.0 +709 17 loss.margin 6.986664056433149 +709 17 optimizer.lr 0.013561765143843555 +709 17 negative_sampler.num_negs_per_pos 24.0 +709 17 training.batch_size 1.0 +709 18 model.embedding_dim 2.0 +709 18 model.scoring_fct_norm 2.0 +709 18 loss.margin 9.66691787613923 +709 18 optimizer.lr 0.05736054304975463 +709 18 negative_sampler.num_negs_per_pos 45.0 +709 18 training.batch_size 0.0 +709 19 model.embedding_dim 0.0 +709 19 model.scoring_fct_norm 1.0 +709 19 loss.margin 9.211828302675999 +709 19 optimizer.lr 0.00593821405475724 +709 19 negative_sampler.num_negs_per_pos 63.0 +709 19 training.batch_size 1.0 +709 20 model.embedding_dim 2.0 +709 20 model.scoring_fct_norm 1.0 +709 20 loss.margin 3.0877811099907135 +709 20 optimizer.lr 0.006394649310165823 +709 20 negative_sampler.num_negs_per_pos 68.0 +709 20 training.batch_size 1.0 +709 21 model.embedding_dim 2.0 +709 21 model.scoring_fct_norm 2.0 +709 21 loss.margin 7.359383449453872 +709 21 optimizer.lr 0.0013274536404358093 +709 21 negative_sampler.num_negs_per_pos 10.0 +709 21 training.batch_size 0.0 +709 22 model.embedding_dim 0.0 +709 22 model.scoring_fct_norm 1.0 +709 22 loss.margin 9.576520425371946 +709 22 optimizer.lr 0.0029424934959368172 +709 22 negative_sampler.num_negs_per_pos 4.0 +709 22 training.batch_size 0.0 +709 23 model.embedding_dim 1.0 +709 23 model.scoring_fct_norm 2.0 +709 23 loss.margin 9.700739354912441 +709 23 optimizer.lr 0.0012910492780177525 +709 23 negative_sampler.num_negs_per_pos 41.0 +709 23 training.batch_size 2.0 +709 24 model.embedding_dim 0.0 +709 24 model.scoring_fct_norm 1.0 +709 24 loss.margin 1.627751108221191 +709 24 optimizer.lr 0.004044218388760217 +709 24 negative_sampler.num_negs_per_pos 84.0 +709 24 training.batch_size 2.0 +709 25 model.embedding_dim 2.0 +709 25 model.scoring_fct_norm 1.0 +709 25 loss.margin 7.385925439069448 +709 25 optimizer.lr 0.05858273302219128 +709 25 negative_sampler.num_negs_per_pos 6.0 +709 25 training.batch_size 0.0 +709 26 model.embedding_dim 0.0 +709 26 model.scoring_fct_norm 1.0 +709 26 loss.margin 8.85303295303542 +709 26 optimizer.lr 0.021746722863293783 +709 26 negative_sampler.num_negs_per_pos 2.0 +709 26 training.batch_size 1.0 +709 27 model.embedding_dim 2.0 +709 27 model.scoring_fct_norm 1.0 +709 27 loss.margin 0.7132266062367656 +709 27 optimizer.lr 0.04866094849582896 +709 27 negative_sampler.num_negs_per_pos 99.0 +709 27 training.batch_size 0.0 +709 28 model.embedding_dim 1.0 +709 28 model.scoring_fct_norm 1.0 +709 28 loss.margin 9.002955805347316 +709 28 optimizer.lr 0.07857179214791778 +709 28 negative_sampler.num_negs_per_pos 22.0 +709 28 training.batch_size 1.0 +709 29 model.embedding_dim 1.0 +709 29 model.scoring_fct_norm 2.0 +709 29 loss.margin 1.0238924760040242 +709 29 optimizer.lr 0.002019698706285521 +709 29 negative_sampler.num_negs_per_pos 53.0 +709 29 training.batch_size 1.0 +709 30 model.embedding_dim 0.0 +709 30 model.scoring_fct_norm 1.0 +709 30 loss.margin 9.041029924371463 +709 30 optimizer.lr 0.0011673737397687963 +709 30 negative_sampler.num_negs_per_pos 84.0 +709 30 training.batch_size 2.0 +709 31 model.embedding_dim 0.0 +709 31 model.scoring_fct_norm 2.0 +709 31 loss.margin 6.161361364083049 +709 31 optimizer.lr 0.05562349887316361 +709 31 negative_sampler.num_negs_per_pos 35.0 +709 31 training.batch_size 2.0 +709 32 model.embedding_dim 1.0 +709 32 model.scoring_fct_norm 2.0 +709 32 loss.margin 3.7877743351563424 +709 32 optimizer.lr 0.033063212098245025 +709 32 negative_sampler.num_negs_per_pos 13.0 +709 32 training.batch_size 2.0 +709 33 model.embedding_dim 0.0 +709 33 model.scoring_fct_norm 1.0 +709 33 loss.margin 1.5041420641560233 +709 33 optimizer.lr 0.07265138990613307 +709 33 negative_sampler.num_negs_per_pos 40.0 +709 33 training.batch_size 0.0 +709 34 model.embedding_dim 2.0 +709 34 model.scoring_fct_norm 1.0 +709 34 loss.margin 5.429440305938492 +709 34 optimizer.lr 0.0014719031383424847 +709 34 negative_sampler.num_negs_per_pos 49.0 +709 34 training.batch_size 2.0 +709 35 model.embedding_dim 2.0 +709 35 model.scoring_fct_norm 2.0 +709 35 loss.margin 4.464717738125128 +709 35 optimizer.lr 0.07087197761039904 +709 35 negative_sampler.num_negs_per_pos 18.0 +709 35 training.batch_size 1.0 +709 36 model.embedding_dim 0.0 +709 36 model.scoring_fct_norm 2.0 +709 36 loss.margin 0.7373121788694318 +709 36 optimizer.lr 0.0268144720495676 +709 36 negative_sampler.num_negs_per_pos 50.0 +709 36 training.batch_size 0.0 +709 37 model.embedding_dim 1.0 +709 37 model.scoring_fct_norm 1.0 +709 37 loss.margin 5.137580998489386 +709 37 optimizer.lr 0.01846877033780317 +709 37 negative_sampler.num_negs_per_pos 57.0 +709 37 training.batch_size 1.0 +709 38 model.embedding_dim 1.0 +709 38 model.scoring_fct_norm 2.0 +709 38 loss.margin 0.5192486664128841 +709 38 optimizer.lr 0.011577449835420835 +709 38 negative_sampler.num_negs_per_pos 46.0 +709 38 training.batch_size 1.0 +709 39 model.embedding_dim 2.0 +709 39 model.scoring_fct_norm 1.0 +709 39 loss.margin 7.0654567790442915 +709 39 optimizer.lr 0.06718362370528129 +709 39 negative_sampler.num_negs_per_pos 73.0 +709 39 training.batch_size 2.0 +709 40 model.embedding_dim 0.0 +709 40 model.scoring_fct_norm 1.0 +709 40 loss.margin 5.331575445908319 +709 40 optimizer.lr 0.008066409509220072 +709 40 negative_sampler.num_negs_per_pos 15.0 +709 40 training.batch_size 2.0 +709 41 model.embedding_dim 0.0 +709 41 model.scoring_fct_norm 2.0 +709 41 loss.margin 1.6194665377282202 +709 41 optimizer.lr 0.007969475762239433 +709 41 negative_sampler.num_negs_per_pos 64.0 +709 41 training.batch_size 0.0 +709 42 model.embedding_dim 0.0 +709 42 model.scoring_fct_norm 2.0 +709 42 loss.margin 0.8710323305431602 +709 42 optimizer.lr 0.0018932618610723391 +709 42 negative_sampler.num_negs_per_pos 48.0 +709 42 training.batch_size 2.0 +709 43 model.embedding_dim 1.0 +709 43 model.scoring_fct_norm 2.0 +709 43 loss.margin 8.911986262447225 +709 43 optimizer.lr 0.0015167560119399024 +709 43 negative_sampler.num_negs_per_pos 83.0 +709 43 training.batch_size 1.0 +709 44 model.embedding_dim 1.0 +709 44 model.scoring_fct_norm 2.0 +709 44 loss.margin 5.099166924625316 +709 44 optimizer.lr 0.04135447610357308 +709 44 negative_sampler.num_negs_per_pos 36.0 +709 44 training.batch_size 2.0 +709 45 model.embedding_dim 1.0 +709 45 model.scoring_fct_norm 1.0 +709 45 loss.margin 0.6946659326661007 +709 45 optimizer.lr 0.011488713245760795 +709 45 negative_sampler.num_negs_per_pos 81.0 +709 45 training.batch_size 0.0 +709 46 model.embedding_dim 0.0 +709 46 model.scoring_fct_norm 2.0 +709 46 loss.margin 5.550435336734117 +709 46 optimizer.lr 0.0033801790333036963 +709 46 negative_sampler.num_negs_per_pos 55.0 +709 46 training.batch_size 2.0 +709 47 model.embedding_dim 0.0 +709 47 model.scoring_fct_norm 2.0 +709 47 loss.margin 6.930512172637506 +709 47 optimizer.lr 0.0019229111676618543 +709 47 negative_sampler.num_negs_per_pos 34.0 +709 47 training.batch_size 1.0 +709 48 model.embedding_dim 2.0 +709 48 model.scoring_fct_norm 2.0 +709 48 loss.margin 4.82630311378388 +709 48 optimizer.lr 0.001567773652930965 +709 48 negative_sampler.num_negs_per_pos 68.0 +709 48 training.batch_size 1.0 +709 49 model.embedding_dim 0.0 +709 49 model.scoring_fct_norm 2.0 +709 49 loss.margin 3.1426151041172563 +709 49 optimizer.lr 0.001253955100179481 +709 49 negative_sampler.num_negs_per_pos 38.0 +709 49 training.batch_size 1.0 +709 50 model.embedding_dim 2.0 +709 50 model.scoring_fct_norm 2.0 +709 50 loss.margin 6.308930377405858 +709 50 optimizer.lr 0.07152029031737749 +709 50 negative_sampler.num_negs_per_pos 17.0 +709 50 training.batch_size 2.0 +709 51 model.embedding_dim 0.0 +709 51 model.scoring_fct_norm 1.0 +709 51 loss.margin 7.108276571249876 +709 51 optimizer.lr 0.005620309137810907 +709 51 negative_sampler.num_negs_per_pos 5.0 +709 51 training.batch_size 0.0 +709 52 model.embedding_dim 0.0 +709 52 model.scoring_fct_norm 2.0 +709 52 loss.margin 8.821850689261055 +709 52 optimizer.lr 0.017309780974868814 +709 52 negative_sampler.num_negs_per_pos 90.0 +709 52 training.batch_size 1.0 +709 53 model.embedding_dim 1.0 +709 53 model.scoring_fct_norm 1.0 +709 53 loss.margin 4.58630182491306 +709 53 optimizer.lr 0.0011938038314281442 +709 53 negative_sampler.num_negs_per_pos 54.0 +709 53 training.batch_size 1.0 +709 54 model.embedding_dim 2.0 +709 54 model.scoring_fct_norm 2.0 +709 54 loss.margin 3.712711362597433 +709 54 optimizer.lr 0.0044373708251509965 +709 54 negative_sampler.num_negs_per_pos 23.0 +709 54 training.batch_size 0.0 +709 55 model.embedding_dim 0.0 +709 55 model.scoring_fct_norm 2.0 +709 55 loss.margin 1.0593552934555033 +709 55 optimizer.lr 0.003158790102584399 +709 55 negative_sampler.num_negs_per_pos 32.0 +709 55 training.batch_size 2.0 +709 56 model.embedding_dim 0.0 +709 56 model.scoring_fct_norm 2.0 +709 56 loss.margin 5.705617947652898 +709 56 optimizer.lr 0.0016485889963998768 +709 56 negative_sampler.num_negs_per_pos 80.0 +709 56 training.batch_size 0.0 +709 57 model.embedding_dim 1.0 +709 57 model.scoring_fct_norm 2.0 +709 57 loss.margin 5.841375558094293 +709 57 optimizer.lr 0.0020858402253833594 +709 57 negative_sampler.num_negs_per_pos 20.0 +709 57 training.batch_size 0.0 +709 58 model.embedding_dim 1.0 +709 58 model.scoring_fct_norm 2.0 +709 58 loss.margin 8.146607693301718 +709 58 optimizer.lr 0.05120358621649812 +709 58 negative_sampler.num_negs_per_pos 89.0 +709 58 training.batch_size 2.0 +709 59 model.embedding_dim 0.0 +709 59 model.scoring_fct_norm 2.0 +709 59 loss.margin 8.194920075698828 +709 59 optimizer.lr 0.0015508230999308602 +709 59 negative_sampler.num_negs_per_pos 35.0 +709 59 training.batch_size 2.0 +709 60 model.embedding_dim 0.0 +709 60 model.scoring_fct_norm 2.0 +709 60 loss.margin 5.190735242345556 +709 60 optimizer.lr 0.0013863766752019759 +709 60 negative_sampler.num_negs_per_pos 33.0 +709 60 training.batch_size 2.0 +709 61 model.embedding_dim 2.0 +709 61 model.scoring_fct_norm 1.0 +709 61 loss.margin 7.098199777919311 +709 61 optimizer.lr 0.06751889417300667 +709 61 negative_sampler.num_negs_per_pos 17.0 +709 61 training.batch_size 2.0 +709 62 model.embedding_dim 2.0 +709 62 model.scoring_fct_norm 1.0 +709 62 loss.margin 3.005455233245366 +709 62 optimizer.lr 0.025635461018504665 +709 62 negative_sampler.num_negs_per_pos 71.0 +709 62 training.batch_size 1.0 +709 63 model.embedding_dim 2.0 +709 63 model.scoring_fct_norm 1.0 +709 63 loss.margin 5.436555922683756 +709 63 optimizer.lr 0.0010127714097659334 +709 63 negative_sampler.num_negs_per_pos 32.0 +709 63 training.batch_size 2.0 +709 64 model.embedding_dim 2.0 +709 64 model.scoring_fct_norm 2.0 +709 64 loss.margin 1.0115918167525997 +709 64 optimizer.lr 0.04728535757576206 +709 64 negative_sampler.num_negs_per_pos 78.0 +709 64 training.batch_size 2.0 +709 65 model.embedding_dim 2.0 +709 65 model.scoring_fct_norm 2.0 +709 65 loss.margin 8.620259458710775 +709 65 optimizer.lr 0.0019452621274171369 +709 65 negative_sampler.num_negs_per_pos 91.0 +709 65 training.batch_size 2.0 +709 66 model.embedding_dim 1.0 +709 66 model.scoring_fct_norm 2.0 +709 66 loss.margin 7.365881760579255 +709 66 optimizer.lr 0.014804896848902523 +709 66 negative_sampler.num_negs_per_pos 53.0 +709 66 training.batch_size 1.0 +709 67 model.embedding_dim 2.0 +709 67 model.scoring_fct_norm 2.0 +709 67 loss.margin 7.823598951211343 +709 67 optimizer.lr 0.00208994209982042 +709 67 negative_sampler.num_negs_per_pos 57.0 +709 67 training.batch_size 2.0 +709 68 model.embedding_dim 1.0 +709 68 model.scoring_fct_norm 1.0 +709 68 loss.margin 1.246088095127055 +709 68 optimizer.lr 0.016881485374073014 +709 68 negative_sampler.num_negs_per_pos 11.0 +709 68 training.batch_size 2.0 +709 69 model.embedding_dim 1.0 +709 69 model.scoring_fct_norm 1.0 +709 69 loss.margin 0.9536653119027745 +709 69 optimizer.lr 0.0011192042212434343 +709 69 negative_sampler.num_negs_per_pos 42.0 +709 69 training.batch_size 2.0 +709 70 model.embedding_dim 2.0 +709 70 model.scoring_fct_norm 2.0 +709 70 loss.margin 4.217049539586975 +709 70 optimizer.lr 0.008710620122220633 +709 70 negative_sampler.num_negs_per_pos 61.0 +709 70 training.batch_size 0.0 +709 71 model.embedding_dim 2.0 +709 71 model.scoring_fct_norm 1.0 +709 71 loss.margin 6.853401466576375 +709 71 optimizer.lr 0.021313277180292422 +709 71 negative_sampler.num_negs_per_pos 32.0 +709 71 training.batch_size 1.0 +709 72 model.embedding_dim 2.0 +709 72 model.scoring_fct_norm 1.0 +709 72 loss.margin 3.8862528697784997 +709 72 optimizer.lr 0.007356109872401404 +709 72 negative_sampler.num_negs_per_pos 2.0 +709 72 training.batch_size 1.0 +709 73 model.embedding_dim 0.0 +709 73 model.scoring_fct_norm 2.0 +709 73 loss.margin 1.738480532801358 +709 73 optimizer.lr 0.001243360253974347 +709 73 negative_sampler.num_negs_per_pos 75.0 +709 73 training.batch_size 2.0 +709 74 model.embedding_dim 2.0 +709 74 model.scoring_fct_norm 1.0 +709 74 loss.margin 4.1741013020679505 +709 74 optimizer.lr 0.019102209852621916 +709 74 negative_sampler.num_negs_per_pos 0.0 +709 74 training.batch_size 1.0 +709 75 model.embedding_dim 1.0 +709 75 model.scoring_fct_norm 1.0 +709 75 loss.margin 6.556241598319719 +709 75 optimizer.lr 0.024618677733130992 +709 75 negative_sampler.num_negs_per_pos 7.0 +709 75 training.batch_size 2.0 +709 76 model.embedding_dim 2.0 +709 76 model.scoring_fct_norm 1.0 +709 76 loss.margin 8.107283451018109 +709 76 optimizer.lr 0.00684959644285402 +709 76 negative_sampler.num_negs_per_pos 43.0 +709 76 training.batch_size 0.0 +709 77 model.embedding_dim 0.0 +709 77 model.scoring_fct_norm 2.0 +709 77 loss.margin 6.099672873734833 +709 77 optimizer.lr 0.015242642472560559 +709 77 negative_sampler.num_negs_per_pos 86.0 +709 77 training.batch_size 1.0 +709 78 model.embedding_dim 0.0 +709 78 model.scoring_fct_norm 1.0 +709 78 loss.margin 2.499055270364512 +709 78 optimizer.lr 0.001708128283096588 +709 78 negative_sampler.num_negs_per_pos 25.0 +709 78 training.batch_size 1.0 +709 79 model.embedding_dim 1.0 +709 79 model.scoring_fct_norm 1.0 +709 79 loss.margin 4.553213239449367 +709 79 optimizer.lr 0.005612951238363324 +709 79 negative_sampler.num_negs_per_pos 42.0 +709 79 training.batch_size 2.0 +709 1 dataset """kinships""" +709 1 model """structuredembedding""" +709 1 loss """marginranking""" +709 1 regularizer """no""" +709 1 optimizer """adam""" +709 1 training_loop """owa""" +709 1 negative_sampler """basic""" +709 1 evaluator """rankbased""" +709 2 dataset """kinships""" +709 2 model """structuredembedding""" +709 2 loss """marginranking""" +709 2 regularizer """no""" +709 2 optimizer """adam""" +709 2 training_loop """owa""" +709 2 negative_sampler """basic""" +709 2 evaluator """rankbased""" +709 3 dataset """kinships""" +709 3 model """structuredembedding""" +709 3 loss """marginranking""" +709 3 regularizer """no""" +709 3 optimizer """adam""" +709 3 training_loop """owa""" +709 3 negative_sampler """basic""" +709 3 evaluator """rankbased""" +709 4 dataset """kinships""" +709 4 model """structuredembedding""" +709 4 loss """marginranking""" +709 4 regularizer """no""" +709 4 optimizer """adam""" +709 4 training_loop """owa""" +709 4 negative_sampler """basic""" +709 4 evaluator """rankbased""" +709 5 dataset """kinships""" +709 5 model """structuredembedding""" +709 5 loss """marginranking""" +709 5 regularizer """no""" +709 5 optimizer """adam""" +709 5 training_loop """owa""" +709 5 negative_sampler """basic""" +709 5 evaluator """rankbased""" +709 6 dataset """kinships""" +709 6 model """structuredembedding""" +709 6 loss """marginranking""" +709 6 regularizer """no""" +709 6 optimizer """adam""" +709 6 training_loop """owa""" +709 6 negative_sampler """basic""" +709 6 evaluator """rankbased""" +709 7 dataset """kinships""" +709 7 model """structuredembedding""" +709 7 loss """marginranking""" +709 7 regularizer """no""" +709 7 optimizer """adam""" +709 7 training_loop """owa""" +709 7 negative_sampler """basic""" +709 7 evaluator """rankbased""" +709 8 dataset """kinships""" +709 8 model """structuredembedding""" +709 8 loss """marginranking""" +709 8 regularizer """no""" +709 8 optimizer """adam""" +709 8 training_loop """owa""" +709 8 negative_sampler """basic""" +709 8 evaluator """rankbased""" +709 9 dataset """kinships""" +709 9 model """structuredembedding""" +709 9 loss """marginranking""" +709 9 regularizer """no""" +709 9 optimizer """adam""" +709 9 training_loop """owa""" +709 9 negative_sampler """basic""" +709 9 evaluator """rankbased""" +709 10 dataset """kinships""" +709 10 model """structuredembedding""" +709 10 loss """marginranking""" +709 10 regularizer """no""" +709 10 optimizer """adam""" +709 10 training_loop """owa""" +709 10 negative_sampler """basic""" +709 10 evaluator """rankbased""" +709 11 dataset """kinships""" +709 11 model """structuredembedding""" +709 11 loss """marginranking""" +709 11 regularizer """no""" +709 11 optimizer """adam""" +709 11 training_loop """owa""" +709 11 negative_sampler """basic""" +709 11 evaluator """rankbased""" +709 12 dataset """kinships""" +709 12 model """structuredembedding""" +709 12 loss """marginranking""" +709 12 regularizer """no""" +709 12 optimizer """adam""" +709 12 training_loop """owa""" +709 12 negative_sampler """basic""" +709 12 evaluator """rankbased""" +709 13 dataset """kinships""" +709 13 model """structuredembedding""" +709 13 loss """marginranking""" +709 13 regularizer """no""" +709 13 optimizer """adam""" +709 13 training_loop """owa""" +709 13 negative_sampler """basic""" +709 13 evaluator """rankbased""" +709 14 dataset """kinships""" +709 14 model """structuredembedding""" +709 14 loss """marginranking""" +709 14 regularizer """no""" +709 14 optimizer """adam""" +709 14 training_loop """owa""" +709 14 negative_sampler """basic""" +709 14 evaluator """rankbased""" +709 15 dataset """kinships""" +709 15 model """structuredembedding""" +709 15 loss """marginranking""" +709 15 regularizer """no""" +709 15 optimizer """adam""" +709 15 training_loop """owa""" +709 15 negative_sampler """basic""" +709 15 evaluator """rankbased""" +709 16 dataset """kinships""" +709 16 model """structuredembedding""" +709 16 loss """marginranking""" +709 16 regularizer """no""" +709 16 optimizer """adam""" +709 16 training_loop """owa""" +709 16 negative_sampler """basic""" +709 16 evaluator """rankbased""" +709 17 dataset """kinships""" +709 17 model """structuredembedding""" +709 17 loss """marginranking""" +709 17 regularizer """no""" +709 17 optimizer """adam""" +709 17 training_loop """owa""" +709 17 negative_sampler """basic""" +709 17 evaluator """rankbased""" +709 18 dataset """kinships""" +709 18 model """structuredembedding""" +709 18 loss """marginranking""" +709 18 regularizer """no""" +709 18 optimizer """adam""" +709 18 training_loop """owa""" +709 18 negative_sampler """basic""" +709 18 evaluator """rankbased""" +709 19 dataset """kinships""" +709 19 model """structuredembedding""" +709 19 loss """marginranking""" +709 19 regularizer """no""" +709 19 optimizer """adam""" +709 19 training_loop """owa""" +709 19 negative_sampler """basic""" +709 19 evaluator """rankbased""" +709 20 dataset """kinships""" +709 20 model """structuredembedding""" +709 20 loss """marginranking""" +709 20 regularizer """no""" +709 20 optimizer """adam""" +709 20 training_loop """owa""" +709 20 negative_sampler """basic""" +709 20 evaluator """rankbased""" +709 21 dataset """kinships""" +709 21 model """structuredembedding""" +709 21 loss """marginranking""" +709 21 regularizer """no""" +709 21 optimizer """adam""" +709 21 training_loop """owa""" +709 21 negative_sampler """basic""" +709 21 evaluator """rankbased""" +709 22 dataset """kinships""" +709 22 model """structuredembedding""" +709 22 loss """marginranking""" +709 22 regularizer """no""" +709 22 optimizer """adam""" +709 22 training_loop """owa""" +709 22 negative_sampler """basic""" +709 22 evaluator """rankbased""" +709 23 dataset """kinships""" +709 23 model """structuredembedding""" +709 23 loss """marginranking""" +709 23 regularizer """no""" +709 23 optimizer """adam""" +709 23 training_loop """owa""" +709 23 negative_sampler """basic""" +709 23 evaluator """rankbased""" +709 24 dataset """kinships""" +709 24 model """structuredembedding""" +709 24 loss """marginranking""" +709 24 regularizer """no""" +709 24 optimizer """adam""" +709 24 training_loop """owa""" +709 24 negative_sampler """basic""" +709 24 evaluator """rankbased""" +709 25 dataset """kinships""" +709 25 model """structuredembedding""" +709 25 loss """marginranking""" +709 25 regularizer """no""" +709 25 optimizer """adam""" +709 25 training_loop """owa""" +709 25 negative_sampler """basic""" +709 25 evaluator """rankbased""" +709 26 dataset """kinships""" +709 26 model """structuredembedding""" +709 26 loss """marginranking""" +709 26 regularizer """no""" +709 26 optimizer """adam""" +709 26 training_loop """owa""" +709 26 negative_sampler """basic""" +709 26 evaluator """rankbased""" +709 27 dataset """kinships""" +709 27 model """structuredembedding""" +709 27 loss """marginranking""" +709 27 regularizer """no""" +709 27 optimizer """adam""" +709 27 training_loop """owa""" +709 27 negative_sampler """basic""" +709 27 evaluator """rankbased""" +709 28 dataset """kinships""" +709 28 model """structuredembedding""" +709 28 loss """marginranking""" +709 28 regularizer """no""" +709 28 optimizer """adam""" +709 28 training_loop """owa""" +709 28 negative_sampler """basic""" +709 28 evaluator """rankbased""" +709 29 dataset """kinships""" +709 29 model """structuredembedding""" +709 29 loss """marginranking""" +709 29 regularizer """no""" +709 29 optimizer """adam""" +709 29 training_loop """owa""" +709 29 negative_sampler """basic""" +709 29 evaluator """rankbased""" +709 30 dataset """kinships""" +709 30 model """structuredembedding""" +709 30 loss """marginranking""" +709 30 regularizer """no""" +709 30 optimizer """adam""" +709 30 training_loop """owa""" +709 30 negative_sampler """basic""" +709 30 evaluator """rankbased""" +709 31 dataset """kinships""" +709 31 model """structuredembedding""" +709 31 loss """marginranking""" +709 31 regularizer """no""" +709 31 optimizer """adam""" +709 31 training_loop """owa""" +709 31 negative_sampler """basic""" +709 31 evaluator """rankbased""" +709 32 dataset """kinships""" +709 32 model """structuredembedding""" +709 32 loss """marginranking""" +709 32 regularizer """no""" +709 32 optimizer """adam""" +709 32 training_loop """owa""" +709 32 negative_sampler """basic""" +709 32 evaluator """rankbased""" +709 33 dataset """kinships""" +709 33 model """structuredembedding""" +709 33 loss """marginranking""" +709 33 regularizer """no""" +709 33 optimizer """adam""" +709 33 training_loop """owa""" +709 33 negative_sampler """basic""" +709 33 evaluator """rankbased""" +709 34 dataset """kinships""" +709 34 model """structuredembedding""" +709 34 loss """marginranking""" +709 34 regularizer """no""" +709 34 optimizer """adam""" +709 34 training_loop """owa""" +709 34 negative_sampler """basic""" +709 34 evaluator """rankbased""" +709 35 dataset """kinships""" +709 35 model """structuredembedding""" +709 35 loss """marginranking""" +709 35 regularizer """no""" +709 35 optimizer """adam""" +709 35 training_loop """owa""" +709 35 negative_sampler """basic""" +709 35 evaluator """rankbased""" +709 36 dataset """kinships""" +709 36 model """structuredembedding""" +709 36 loss """marginranking""" +709 36 regularizer """no""" +709 36 optimizer """adam""" +709 36 training_loop """owa""" +709 36 negative_sampler """basic""" +709 36 evaluator """rankbased""" +709 37 dataset """kinships""" +709 37 model """structuredembedding""" +709 37 loss """marginranking""" +709 37 regularizer """no""" +709 37 optimizer """adam""" +709 37 training_loop """owa""" +709 37 negative_sampler """basic""" +709 37 evaluator """rankbased""" +709 38 dataset """kinships""" +709 38 model """structuredembedding""" +709 38 loss """marginranking""" +709 38 regularizer """no""" +709 38 optimizer """adam""" +709 38 training_loop """owa""" +709 38 negative_sampler """basic""" +709 38 evaluator """rankbased""" +709 39 dataset """kinships""" +709 39 model """structuredembedding""" +709 39 loss """marginranking""" +709 39 regularizer """no""" +709 39 optimizer """adam""" +709 39 training_loop """owa""" +709 39 negative_sampler """basic""" +709 39 evaluator """rankbased""" +709 40 dataset """kinships""" +709 40 model """structuredembedding""" +709 40 loss """marginranking""" +709 40 regularizer """no""" +709 40 optimizer """adam""" +709 40 training_loop """owa""" +709 40 negative_sampler """basic""" +709 40 evaluator """rankbased""" +709 41 dataset """kinships""" +709 41 model """structuredembedding""" +709 41 loss """marginranking""" +709 41 regularizer """no""" +709 41 optimizer """adam""" +709 41 training_loop """owa""" +709 41 negative_sampler """basic""" +709 41 evaluator """rankbased""" +709 42 dataset """kinships""" +709 42 model """structuredembedding""" +709 42 loss """marginranking""" +709 42 regularizer """no""" +709 42 optimizer """adam""" +709 42 training_loop """owa""" +709 42 negative_sampler """basic""" +709 42 evaluator """rankbased""" +709 43 dataset """kinships""" +709 43 model """structuredembedding""" +709 43 loss """marginranking""" +709 43 regularizer """no""" +709 43 optimizer """adam""" +709 43 training_loop """owa""" +709 43 negative_sampler """basic""" +709 43 evaluator """rankbased""" +709 44 dataset """kinships""" +709 44 model """structuredembedding""" +709 44 loss """marginranking""" +709 44 regularizer """no""" +709 44 optimizer """adam""" +709 44 training_loop """owa""" +709 44 negative_sampler """basic""" +709 44 evaluator """rankbased""" +709 45 dataset """kinships""" +709 45 model """structuredembedding""" +709 45 loss """marginranking""" +709 45 regularizer """no""" +709 45 optimizer """adam""" +709 45 training_loop """owa""" +709 45 negative_sampler """basic""" +709 45 evaluator """rankbased""" +709 46 dataset """kinships""" +709 46 model """structuredembedding""" +709 46 loss """marginranking""" +709 46 regularizer """no""" +709 46 optimizer """adam""" +709 46 training_loop """owa""" +709 46 negative_sampler """basic""" +709 46 evaluator """rankbased""" +709 47 dataset """kinships""" +709 47 model """structuredembedding""" +709 47 loss """marginranking""" +709 47 regularizer """no""" +709 47 optimizer """adam""" +709 47 training_loop """owa""" +709 47 negative_sampler """basic""" +709 47 evaluator """rankbased""" +709 48 dataset """kinships""" +709 48 model """structuredembedding""" +709 48 loss """marginranking""" +709 48 regularizer """no""" +709 48 optimizer """adam""" +709 48 training_loop """owa""" +709 48 negative_sampler """basic""" +709 48 evaluator """rankbased""" +709 49 dataset """kinships""" +709 49 model """structuredembedding""" +709 49 loss """marginranking""" +709 49 regularizer """no""" +709 49 optimizer """adam""" +709 49 training_loop """owa""" +709 49 negative_sampler """basic""" +709 49 evaluator """rankbased""" +709 50 dataset """kinships""" +709 50 model """structuredembedding""" +709 50 loss """marginranking""" +709 50 regularizer """no""" +709 50 optimizer """adam""" +709 50 training_loop """owa""" +709 50 negative_sampler """basic""" +709 50 evaluator """rankbased""" +709 51 dataset """kinships""" +709 51 model """structuredembedding""" +709 51 loss """marginranking""" +709 51 regularizer """no""" +709 51 optimizer """adam""" +709 51 training_loop """owa""" +709 51 negative_sampler """basic""" +709 51 evaluator """rankbased""" +709 52 dataset """kinships""" +709 52 model """structuredembedding""" +709 52 loss """marginranking""" +709 52 regularizer """no""" +709 52 optimizer """adam""" +709 52 training_loop """owa""" +709 52 negative_sampler """basic""" +709 52 evaluator """rankbased""" +709 53 dataset """kinships""" +709 53 model """structuredembedding""" +709 53 loss """marginranking""" +709 53 regularizer """no""" +709 53 optimizer """adam""" +709 53 training_loop """owa""" +709 53 negative_sampler """basic""" +709 53 evaluator """rankbased""" +709 54 dataset """kinships""" +709 54 model """structuredembedding""" +709 54 loss """marginranking""" +709 54 regularizer """no""" +709 54 optimizer """adam""" +709 54 training_loop """owa""" +709 54 negative_sampler """basic""" +709 54 evaluator """rankbased""" +709 55 dataset """kinships""" +709 55 model """structuredembedding""" +709 55 loss """marginranking""" +709 55 regularizer """no""" +709 55 optimizer """adam""" +709 55 training_loop """owa""" +709 55 negative_sampler """basic""" +709 55 evaluator """rankbased""" +709 56 dataset """kinships""" +709 56 model """structuredembedding""" +709 56 loss """marginranking""" +709 56 regularizer """no""" +709 56 optimizer """adam""" +709 56 training_loop """owa""" +709 56 negative_sampler """basic""" +709 56 evaluator """rankbased""" +709 57 dataset """kinships""" +709 57 model """structuredembedding""" +709 57 loss """marginranking""" +709 57 regularizer """no""" +709 57 optimizer """adam""" +709 57 training_loop """owa""" +709 57 negative_sampler """basic""" +709 57 evaluator """rankbased""" +709 58 dataset """kinships""" +709 58 model """structuredembedding""" +709 58 loss """marginranking""" +709 58 regularizer """no""" +709 58 optimizer """adam""" +709 58 training_loop """owa""" +709 58 negative_sampler """basic""" +709 58 evaluator """rankbased""" +709 59 dataset """kinships""" +709 59 model """structuredembedding""" +709 59 loss """marginranking""" +709 59 regularizer """no""" +709 59 optimizer """adam""" +709 59 training_loop """owa""" +709 59 negative_sampler """basic""" +709 59 evaluator """rankbased""" +709 60 dataset """kinships""" +709 60 model """structuredembedding""" +709 60 loss """marginranking""" +709 60 regularizer """no""" +709 60 optimizer """adam""" +709 60 training_loop """owa""" +709 60 negative_sampler """basic""" +709 60 evaluator """rankbased""" +709 61 dataset """kinships""" +709 61 model """structuredembedding""" +709 61 loss """marginranking""" +709 61 regularizer """no""" +709 61 optimizer """adam""" +709 61 training_loop """owa""" +709 61 negative_sampler """basic""" +709 61 evaluator """rankbased""" +709 62 dataset """kinships""" +709 62 model """structuredembedding""" +709 62 loss """marginranking""" +709 62 regularizer """no""" +709 62 optimizer """adam""" +709 62 training_loop """owa""" +709 62 negative_sampler """basic""" +709 62 evaluator """rankbased""" +709 63 dataset """kinships""" +709 63 model """structuredembedding""" +709 63 loss """marginranking""" +709 63 regularizer """no""" +709 63 optimizer """adam""" +709 63 training_loop """owa""" +709 63 negative_sampler """basic""" +709 63 evaluator """rankbased""" +709 64 dataset """kinships""" +709 64 model """structuredembedding""" +709 64 loss """marginranking""" +709 64 regularizer """no""" +709 64 optimizer """adam""" +709 64 training_loop """owa""" +709 64 negative_sampler """basic""" +709 64 evaluator """rankbased""" +709 65 dataset """kinships""" +709 65 model """structuredembedding""" +709 65 loss """marginranking""" +709 65 regularizer """no""" +709 65 optimizer """adam""" +709 65 training_loop """owa""" +709 65 negative_sampler """basic""" +709 65 evaluator """rankbased""" +709 66 dataset """kinships""" +709 66 model """structuredembedding""" +709 66 loss """marginranking""" +709 66 regularizer """no""" +709 66 optimizer """adam""" +709 66 training_loop """owa""" +709 66 negative_sampler """basic""" +709 66 evaluator """rankbased""" +709 67 dataset """kinships""" +709 67 model """structuredembedding""" +709 67 loss """marginranking""" +709 67 regularizer """no""" +709 67 optimizer """adam""" +709 67 training_loop """owa""" +709 67 negative_sampler """basic""" +709 67 evaluator """rankbased""" +709 68 dataset """kinships""" +709 68 model """structuredembedding""" +709 68 loss """marginranking""" +709 68 regularizer """no""" +709 68 optimizer """adam""" +709 68 training_loop """owa""" +709 68 negative_sampler """basic""" +709 68 evaluator """rankbased""" +709 69 dataset """kinships""" +709 69 model """structuredembedding""" +709 69 loss """marginranking""" +709 69 regularizer """no""" +709 69 optimizer """adam""" +709 69 training_loop """owa""" +709 69 negative_sampler """basic""" +709 69 evaluator """rankbased""" +709 70 dataset """kinships""" +709 70 model """structuredembedding""" +709 70 loss """marginranking""" +709 70 regularizer """no""" +709 70 optimizer """adam""" +709 70 training_loop """owa""" +709 70 negative_sampler """basic""" +709 70 evaluator """rankbased""" +709 71 dataset """kinships""" +709 71 model """structuredembedding""" +709 71 loss """marginranking""" +709 71 regularizer """no""" +709 71 optimizer """adam""" +709 71 training_loop """owa""" +709 71 negative_sampler """basic""" +709 71 evaluator """rankbased""" +709 72 dataset """kinships""" +709 72 model """structuredembedding""" +709 72 loss """marginranking""" +709 72 regularizer """no""" +709 72 optimizer """adam""" +709 72 training_loop """owa""" +709 72 negative_sampler """basic""" +709 72 evaluator """rankbased""" +709 73 dataset """kinships""" +709 73 model """structuredembedding""" +709 73 loss """marginranking""" +709 73 regularizer """no""" +709 73 optimizer """adam""" +709 73 training_loop """owa""" +709 73 negative_sampler """basic""" +709 73 evaluator """rankbased""" +709 74 dataset """kinships""" +709 74 model """structuredembedding""" +709 74 loss """marginranking""" +709 74 regularizer """no""" +709 74 optimizer """adam""" +709 74 training_loop """owa""" +709 74 negative_sampler """basic""" +709 74 evaluator """rankbased""" +709 75 dataset """kinships""" +709 75 model """structuredembedding""" +709 75 loss """marginranking""" +709 75 regularizer """no""" +709 75 optimizer """adam""" +709 75 training_loop """owa""" +709 75 negative_sampler """basic""" +709 75 evaluator """rankbased""" +709 76 dataset """kinships""" +709 76 model """structuredembedding""" +709 76 loss """marginranking""" +709 76 regularizer """no""" +709 76 optimizer """adam""" +709 76 training_loop """owa""" +709 76 negative_sampler """basic""" +709 76 evaluator """rankbased""" +709 77 dataset """kinships""" +709 77 model """structuredembedding""" +709 77 loss """marginranking""" +709 77 regularizer """no""" +709 77 optimizer """adam""" +709 77 training_loop """owa""" +709 77 negative_sampler """basic""" +709 77 evaluator """rankbased""" +709 78 dataset """kinships""" +709 78 model """structuredembedding""" +709 78 loss """marginranking""" +709 78 regularizer """no""" +709 78 optimizer """adam""" +709 78 training_loop """owa""" +709 78 negative_sampler """basic""" +709 78 evaluator """rankbased""" +709 79 dataset """kinships""" +709 79 model """structuredembedding""" +709 79 loss """marginranking""" +709 79 regularizer """no""" +709 79 optimizer """adam""" +709 79 training_loop """owa""" +709 79 negative_sampler """basic""" +709 79 evaluator """rankbased""" +710 1 model.embedding_dim 2.0 +710 1 model.scoring_fct_norm 1.0 +710 1 loss.margin 8.332555550449737 +710 1 optimizer.lr 0.05084416723683874 +710 1 negative_sampler.num_negs_per_pos 85.0 +710 1 training.batch_size 1.0 +710 2 model.embedding_dim 0.0 +710 2 model.scoring_fct_norm 2.0 +710 2 loss.margin 7.083061417236565 +710 2 optimizer.lr 0.02315441316476644 +710 2 negative_sampler.num_negs_per_pos 77.0 +710 2 training.batch_size 0.0 +710 3 model.embedding_dim 1.0 +710 3 model.scoring_fct_norm 1.0 +710 3 loss.margin 6.083512426302655 +710 3 optimizer.lr 0.019201637212824495 +710 3 negative_sampler.num_negs_per_pos 19.0 +710 3 training.batch_size 2.0 +710 4 model.embedding_dim 0.0 +710 4 model.scoring_fct_norm 1.0 +710 4 loss.margin 3.0230795451670995 +710 4 optimizer.lr 0.010554464162605652 +710 4 negative_sampler.num_negs_per_pos 98.0 +710 4 training.batch_size 2.0 +710 5 model.embedding_dim 1.0 +710 5 model.scoring_fct_norm 2.0 +710 5 loss.margin 2.2254253603706813 +710 5 optimizer.lr 0.015147700229500688 +710 5 negative_sampler.num_negs_per_pos 76.0 +710 5 training.batch_size 0.0 +710 6 model.embedding_dim 2.0 +710 6 model.scoring_fct_norm 2.0 +710 6 loss.margin 9.065369294409248 +710 6 optimizer.lr 0.008660000447368801 +710 6 negative_sampler.num_negs_per_pos 77.0 +710 6 training.batch_size 2.0 +710 7 model.embedding_dim 2.0 +710 7 model.scoring_fct_norm 1.0 +710 7 loss.margin 6.033035545979555 +710 7 optimizer.lr 0.0010418978782404556 +710 7 negative_sampler.num_negs_per_pos 35.0 +710 7 training.batch_size 1.0 +710 8 model.embedding_dim 2.0 +710 8 model.scoring_fct_norm 1.0 +710 8 loss.margin 4.918774245555769 +710 8 optimizer.lr 0.009106764086478255 +710 8 negative_sampler.num_negs_per_pos 66.0 +710 8 training.batch_size 1.0 +710 9 model.embedding_dim 2.0 +710 9 model.scoring_fct_norm 2.0 +710 9 loss.margin 8.290767335804446 +710 9 optimizer.lr 0.015889089504627574 +710 9 negative_sampler.num_negs_per_pos 88.0 +710 9 training.batch_size 0.0 +710 10 model.embedding_dim 2.0 +710 10 model.scoring_fct_norm 1.0 +710 10 loss.margin 4.831586276125722 +710 10 optimizer.lr 0.0027140354442397965 +710 10 negative_sampler.num_negs_per_pos 5.0 +710 10 training.batch_size 1.0 +710 11 model.embedding_dim 1.0 +710 11 model.scoring_fct_norm 2.0 +710 11 loss.margin 8.30177170536242 +710 11 optimizer.lr 0.0015603130021560917 +710 11 negative_sampler.num_negs_per_pos 19.0 +710 11 training.batch_size 2.0 +710 12 model.embedding_dim 2.0 +710 12 model.scoring_fct_norm 2.0 +710 12 loss.margin 7.858941308462179 +710 12 optimizer.lr 0.003449650888846216 +710 12 negative_sampler.num_negs_per_pos 42.0 +710 12 training.batch_size 2.0 +710 13 model.embedding_dim 0.0 +710 13 model.scoring_fct_norm 1.0 +710 13 loss.margin 2.753565136871832 +710 13 optimizer.lr 0.03171154757338971 +710 13 negative_sampler.num_negs_per_pos 96.0 +710 13 training.batch_size 0.0 +710 14 model.embedding_dim 1.0 +710 14 model.scoring_fct_norm 2.0 +710 14 loss.margin 6.772820470730356 +710 14 optimizer.lr 0.07940377591512283 +710 14 negative_sampler.num_negs_per_pos 10.0 +710 14 training.batch_size 0.0 +710 15 model.embedding_dim 2.0 +710 15 model.scoring_fct_norm 1.0 +710 15 loss.margin 3.1778181809856068 +710 15 optimizer.lr 0.01133231018689293 +710 15 negative_sampler.num_negs_per_pos 87.0 +710 15 training.batch_size 2.0 +710 16 model.embedding_dim 1.0 +710 16 model.scoring_fct_norm 1.0 +710 16 loss.margin 4.099907013249378 +710 16 optimizer.lr 0.03505342575962078 +710 16 negative_sampler.num_negs_per_pos 15.0 +710 16 training.batch_size 2.0 +710 17 model.embedding_dim 2.0 +710 17 model.scoring_fct_norm 1.0 +710 17 loss.margin 2.665803609200574 +710 17 optimizer.lr 0.0016881120369429724 +710 17 negative_sampler.num_negs_per_pos 83.0 +710 17 training.batch_size 2.0 +710 18 model.embedding_dim 0.0 +710 18 model.scoring_fct_norm 1.0 +710 18 loss.margin 9.488965143177376 +710 18 optimizer.lr 0.016164561399895968 +710 18 negative_sampler.num_negs_per_pos 98.0 +710 18 training.batch_size 2.0 +710 19 model.embedding_dim 0.0 +710 19 model.scoring_fct_norm 2.0 +710 19 loss.margin 1.024551988969157 +710 19 optimizer.lr 0.0026421478891462515 +710 19 negative_sampler.num_negs_per_pos 40.0 +710 19 training.batch_size 1.0 +710 20 model.embedding_dim 1.0 +710 20 model.scoring_fct_norm 2.0 +710 20 loss.margin 7.767611552413729 +710 20 optimizer.lr 0.0022660036618563956 +710 20 negative_sampler.num_negs_per_pos 0.0 +710 20 training.batch_size 0.0 +710 21 model.embedding_dim 1.0 +710 21 model.scoring_fct_norm 1.0 +710 21 loss.margin 5.007676257875035 +710 21 optimizer.lr 0.0394487594425979 +710 21 negative_sampler.num_negs_per_pos 59.0 +710 21 training.batch_size 2.0 +710 22 model.embedding_dim 2.0 +710 22 model.scoring_fct_norm 1.0 +710 22 loss.margin 0.5624568485889104 +710 22 optimizer.lr 0.0018613679121484508 +710 22 negative_sampler.num_negs_per_pos 96.0 +710 22 training.batch_size 1.0 +710 23 model.embedding_dim 1.0 +710 23 model.scoring_fct_norm 2.0 +710 23 loss.margin 3.5252566290612233 +710 23 optimizer.lr 0.045416336792661965 +710 23 negative_sampler.num_negs_per_pos 13.0 +710 23 training.batch_size 0.0 +710 24 model.embedding_dim 2.0 +710 24 model.scoring_fct_norm 1.0 +710 24 loss.margin 0.5862493357678049 +710 24 optimizer.lr 0.08922581616756928 +710 24 negative_sampler.num_negs_per_pos 54.0 +710 24 training.batch_size 0.0 +710 25 model.embedding_dim 0.0 +710 25 model.scoring_fct_norm 2.0 +710 25 loss.margin 1.916763147997856 +710 25 optimizer.lr 0.032067691364465384 +710 25 negative_sampler.num_negs_per_pos 68.0 +710 25 training.batch_size 1.0 +710 26 model.embedding_dim 2.0 +710 26 model.scoring_fct_norm 2.0 +710 26 loss.margin 4.196773987182454 +710 26 optimizer.lr 0.03095104679788053 +710 26 negative_sampler.num_negs_per_pos 45.0 +710 26 training.batch_size 2.0 +710 27 model.embedding_dim 1.0 +710 27 model.scoring_fct_norm 1.0 +710 27 loss.margin 2.423258172147198 +710 27 optimizer.lr 0.0037448341777653483 +710 27 negative_sampler.num_negs_per_pos 6.0 +710 27 training.batch_size 0.0 +710 28 model.embedding_dim 0.0 +710 28 model.scoring_fct_norm 1.0 +710 28 loss.margin 8.933488372779852 +710 28 optimizer.lr 0.016985262635798897 +710 28 negative_sampler.num_negs_per_pos 42.0 +710 28 training.batch_size 1.0 +710 29 model.embedding_dim 1.0 +710 29 model.scoring_fct_norm 2.0 +710 29 loss.margin 6.706544026872152 +710 29 optimizer.lr 0.0016254338742348804 +710 29 negative_sampler.num_negs_per_pos 57.0 +710 29 training.batch_size 1.0 +710 30 model.embedding_dim 0.0 +710 30 model.scoring_fct_norm 2.0 +710 30 loss.margin 9.763035934219007 +710 30 optimizer.lr 0.009668612844234542 +710 30 negative_sampler.num_negs_per_pos 67.0 +710 30 training.batch_size 1.0 +710 31 model.embedding_dim 2.0 +710 31 model.scoring_fct_norm 1.0 +710 31 loss.margin 6.095253805076433 +710 31 optimizer.lr 0.001000735330996842 +710 31 negative_sampler.num_negs_per_pos 48.0 +710 31 training.batch_size 0.0 +710 32 model.embedding_dim 0.0 +710 32 model.scoring_fct_norm 1.0 +710 32 loss.margin 4.389425363222394 +710 32 optimizer.lr 0.0017737377579110241 +710 32 negative_sampler.num_negs_per_pos 19.0 +710 32 training.batch_size 1.0 +710 33 model.embedding_dim 2.0 +710 33 model.scoring_fct_norm 2.0 +710 33 loss.margin 5.079097204884114 +710 33 optimizer.lr 0.004130167041321107 +710 33 negative_sampler.num_negs_per_pos 8.0 +710 33 training.batch_size 2.0 +710 34 model.embedding_dim 2.0 +710 34 model.scoring_fct_norm 2.0 +710 34 loss.margin 6.108118963086665 +710 34 optimizer.lr 0.007892030261296543 +710 34 negative_sampler.num_negs_per_pos 22.0 +710 34 training.batch_size 1.0 +710 35 model.embedding_dim 0.0 +710 35 model.scoring_fct_norm 2.0 +710 35 loss.margin 3.2630729595829404 +710 35 optimizer.lr 0.004431934348406014 +710 35 negative_sampler.num_negs_per_pos 8.0 +710 35 training.batch_size 2.0 +710 36 model.embedding_dim 0.0 +710 36 model.scoring_fct_norm 1.0 +710 36 loss.margin 1.0016804166280373 +710 36 optimizer.lr 0.010811901692086958 +710 36 negative_sampler.num_negs_per_pos 19.0 +710 36 training.batch_size 0.0 +710 37 model.embedding_dim 1.0 +710 37 model.scoring_fct_norm 1.0 +710 37 loss.margin 7.049799204377205 +710 37 optimizer.lr 0.001589969478065096 +710 37 negative_sampler.num_negs_per_pos 47.0 +710 37 training.batch_size 0.0 +710 38 model.embedding_dim 1.0 +710 38 model.scoring_fct_norm 2.0 +710 38 loss.margin 3.4083170773555653 +710 38 optimizer.lr 0.019432755713387137 +710 38 negative_sampler.num_negs_per_pos 86.0 +710 38 training.batch_size 2.0 +710 39 model.embedding_dim 2.0 +710 39 model.scoring_fct_norm 1.0 +710 39 loss.margin 1.1378380541692257 +710 39 optimizer.lr 0.04642146831230861 +710 39 negative_sampler.num_negs_per_pos 86.0 +710 39 training.batch_size 1.0 +710 40 model.embedding_dim 2.0 +710 40 model.scoring_fct_norm 1.0 +710 40 loss.margin 7.588477273577166 +710 40 optimizer.lr 0.01512035995565564 +710 40 negative_sampler.num_negs_per_pos 83.0 +710 40 training.batch_size 2.0 +710 41 model.embedding_dim 2.0 +710 41 model.scoring_fct_norm 1.0 +710 41 loss.margin 9.627604234316587 +710 41 optimizer.lr 0.01637128250600675 +710 41 negative_sampler.num_negs_per_pos 76.0 +710 41 training.batch_size 0.0 +710 42 model.embedding_dim 2.0 +710 42 model.scoring_fct_norm 2.0 +710 42 loss.margin 9.257415048393625 +710 42 optimizer.lr 0.022727974024948614 +710 42 negative_sampler.num_negs_per_pos 40.0 +710 42 training.batch_size 1.0 +710 43 model.embedding_dim 2.0 +710 43 model.scoring_fct_norm 1.0 +710 43 loss.margin 9.291390299314376 +710 43 optimizer.lr 0.002676944163409815 +710 43 negative_sampler.num_negs_per_pos 69.0 +710 43 training.batch_size 1.0 +710 44 model.embedding_dim 0.0 +710 44 model.scoring_fct_norm 2.0 +710 44 loss.margin 6.037504753475778 +710 44 optimizer.lr 0.004522359628319755 +710 44 negative_sampler.num_negs_per_pos 11.0 +710 44 training.batch_size 2.0 +710 45 model.embedding_dim 2.0 +710 45 model.scoring_fct_norm 1.0 +710 45 loss.margin 5.358308474171564 +710 45 optimizer.lr 0.006107359499659573 +710 45 negative_sampler.num_negs_per_pos 77.0 +710 45 training.batch_size 0.0 +710 46 model.embedding_dim 1.0 +710 46 model.scoring_fct_norm 2.0 +710 46 loss.margin 9.493010555378609 +710 46 optimizer.lr 0.0066409748519473955 +710 46 negative_sampler.num_negs_per_pos 32.0 +710 46 training.batch_size 1.0 +710 47 model.embedding_dim 0.0 +710 47 model.scoring_fct_norm 1.0 +710 47 loss.margin 6.517308337645854 +710 47 optimizer.lr 0.0014166662337533821 +710 47 negative_sampler.num_negs_per_pos 38.0 +710 47 training.batch_size 2.0 +710 48 model.embedding_dim 0.0 +710 48 model.scoring_fct_norm 2.0 +710 48 loss.margin 8.75934425982999 +710 48 optimizer.lr 0.07885544701839911 +710 48 negative_sampler.num_negs_per_pos 18.0 +710 48 training.batch_size 2.0 +710 49 model.embedding_dim 2.0 +710 49 model.scoring_fct_norm 1.0 +710 49 loss.margin 5.806285611701773 +710 49 optimizer.lr 0.0024602403168120456 +710 49 negative_sampler.num_negs_per_pos 91.0 +710 49 training.batch_size 2.0 +710 50 model.embedding_dim 1.0 +710 50 model.scoring_fct_norm 2.0 +710 50 loss.margin 8.786492432373619 +710 50 optimizer.lr 0.042172705365750934 +710 50 negative_sampler.num_negs_per_pos 60.0 +710 50 training.batch_size 0.0 +710 51 model.embedding_dim 2.0 +710 51 model.scoring_fct_norm 1.0 +710 51 loss.margin 7.40225767346665 +710 51 optimizer.lr 0.001981362689438073 +710 51 negative_sampler.num_negs_per_pos 11.0 +710 51 training.batch_size 2.0 +710 52 model.embedding_dim 1.0 +710 52 model.scoring_fct_norm 1.0 +710 52 loss.margin 0.7681462673993084 +710 52 optimizer.lr 0.007928952563476928 +710 52 negative_sampler.num_negs_per_pos 47.0 +710 52 training.batch_size 1.0 +710 53 model.embedding_dim 0.0 +710 53 model.scoring_fct_norm 1.0 +710 53 loss.margin 1.508204291571484 +710 53 optimizer.lr 0.0348852240567198 +710 53 negative_sampler.num_negs_per_pos 98.0 +710 53 training.batch_size 0.0 +710 54 model.embedding_dim 0.0 +710 54 model.scoring_fct_norm 2.0 +710 54 loss.margin 7.264731292450091 +710 54 optimizer.lr 0.0031532300873722894 +710 54 negative_sampler.num_negs_per_pos 30.0 +710 54 training.batch_size 2.0 +710 55 model.embedding_dim 1.0 +710 55 model.scoring_fct_norm 2.0 +710 55 loss.margin 7.228287437092806 +710 55 optimizer.lr 0.07984386063702949 +710 55 negative_sampler.num_negs_per_pos 85.0 +710 55 training.batch_size 0.0 +710 56 model.embedding_dim 2.0 +710 56 model.scoring_fct_norm 2.0 +710 56 loss.margin 6.33295589275082 +710 56 optimizer.lr 0.0027307998482097827 +710 56 negative_sampler.num_negs_per_pos 50.0 +710 56 training.batch_size 2.0 +710 57 model.embedding_dim 2.0 +710 57 model.scoring_fct_norm 1.0 +710 57 loss.margin 6.816303070544908 +710 57 optimizer.lr 0.0015021042838748795 +710 57 negative_sampler.num_negs_per_pos 4.0 +710 57 training.batch_size 2.0 +710 58 model.embedding_dim 0.0 +710 58 model.scoring_fct_norm 2.0 +710 58 loss.margin 3.7324586635348673 +710 58 optimizer.lr 0.0021919836380328342 +710 58 negative_sampler.num_negs_per_pos 9.0 +710 58 training.batch_size 2.0 +710 59 model.embedding_dim 0.0 +710 59 model.scoring_fct_norm 2.0 +710 59 loss.margin 8.873495602519823 +710 59 optimizer.lr 0.05815538109509819 +710 59 negative_sampler.num_negs_per_pos 46.0 +710 59 training.batch_size 1.0 +710 60 model.embedding_dim 0.0 +710 60 model.scoring_fct_norm 1.0 +710 60 loss.margin 5.49068672712166 +710 60 optimizer.lr 0.026640556049159016 +710 60 negative_sampler.num_negs_per_pos 86.0 +710 60 training.batch_size 2.0 +710 61 model.embedding_dim 2.0 +710 61 model.scoring_fct_norm 2.0 +710 61 loss.margin 8.666664660462637 +710 61 optimizer.lr 0.037987580507812986 +710 61 negative_sampler.num_negs_per_pos 65.0 +710 61 training.batch_size 2.0 +710 62 model.embedding_dim 1.0 +710 62 model.scoring_fct_norm 1.0 +710 62 loss.margin 5.678372320145566 +710 62 optimizer.lr 0.00863647784699651 +710 62 negative_sampler.num_negs_per_pos 72.0 +710 62 training.batch_size 1.0 +710 63 model.embedding_dim 2.0 +710 63 model.scoring_fct_norm 1.0 +710 63 loss.margin 8.240852835889193 +710 63 optimizer.lr 0.037226648704848354 +710 63 negative_sampler.num_negs_per_pos 11.0 +710 63 training.batch_size 0.0 +710 64 model.embedding_dim 2.0 +710 64 model.scoring_fct_norm 1.0 +710 64 loss.margin 5.816500437721813 +710 64 optimizer.lr 0.01135446404342131 +710 64 negative_sampler.num_negs_per_pos 51.0 +710 64 training.batch_size 0.0 +710 65 model.embedding_dim 1.0 +710 65 model.scoring_fct_norm 2.0 +710 65 loss.margin 2.7820459963234514 +710 65 optimizer.lr 0.0012491798278618648 +710 65 negative_sampler.num_negs_per_pos 65.0 +710 65 training.batch_size 2.0 +710 66 model.embedding_dim 0.0 +710 66 model.scoring_fct_norm 2.0 +710 66 loss.margin 4.0064069854477244 +710 66 optimizer.lr 0.0033528617750950905 +710 66 negative_sampler.num_negs_per_pos 43.0 +710 66 training.batch_size 0.0 +710 67 model.embedding_dim 2.0 +710 67 model.scoring_fct_norm 2.0 +710 67 loss.margin 6.689064463171217 +710 67 optimizer.lr 0.005031767099701602 +710 67 negative_sampler.num_negs_per_pos 77.0 +710 67 training.batch_size 2.0 +710 68 model.embedding_dim 0.0 +710 68 model.scoring_fct_norm 2.0 +710 68 loss.margin 4.085361017315505 +710 68 optimizer.lr 0.0017713882957307635 +710 68 negative_sampler.num_negs_per_pos 98.0 +710 68 training.batch_size 1.0 +710 69 model.embedding_dim 0.0 +710 69 model.scoring_fct_norm 2.0 +710 69 loss.margin 4.961134064973297 +710 69 optimizer.lr 0.0014166442870152452 +710 69 negative_sampler.num_negs_per_pos 32.0 +710 69 training.batch_size 1.0 +710 70 model.embedding_dim 0.0 +710 70 model.scoring_fct_norm 1.0 +710 70 loss.margin 3.430608226423844 +710 70 optimizer.lr 0.015411094850725893 +710 70 negative_sampler.num_negs_per_pos 37.0 +710 70 training.batch_size 0.0 +710 71 model.embedding_dim 1.0 +710 71 model.scoring_fct_norm 1.0 +710 71 loss.margin 3.415757785563664 +710 71 optimizer.lr 0.030039673333745506 +710 71 negative_sampler.num_negs_per_pos 20.0 +710 71 training.batch_size 1.0 +710 72 model.embedding_dim 1.0 +710 72 model.scoring_fct_norm 2.0 +710 72 loss.margin 0.9969122335157148 +710 72 optimizer.lr 0.0017996750448744318 +710 72 negative_sampler.num_negs_per_pos 23.0 +710 72 training.batch_size 1.0 +710 73 model.embedding_dim 2.0 +710 73 model.scoring_fct_norm 2.0 +710 73 loss.margin 8.680932400815092 +710 73 optimizer.lr 0.004439459579588965 +710 73 negative_sampler.num_negs_per_pos 3.0 +710 73 training.batch_size 1.0 +710 74 model.embedding_dim 1.0 +710 74 model.scoring_fct_norm 2.0 +710 74 loss.margin 6.449438314317614 +710 74 optimizer.lr 0.01358572810563626 +710 74 negative_sampler.num_negs_per_pos 12.0 +710 74 training.batch_size 2.0 +710 75 model.embedding_dim 0.0 +710 75 model.scoring_fct_norm 2.0 +710 75 loss.margin 2.704689287984834 +710 75 optimizer.lr 0.013158316702360092 +710 75 negative_sampler.num_negs_per_pos 77.0 +710 75 training.batch_size 2.0 +710 76 model.embedding_dim 2.0 +710 76 model.scoring_fct_norm 1.0 +710 76 loss.margin 5.7610465703540665 +710 76 optimizer.lr 0.004493238358837023 +710 76 negative_sampler.num_negs_per_pos 29.0 +710 76 training.batch_size 2.0 +710 77 model.embedding_dim 2.0 +710 77 model.scoring_fct_norm 1.0 +710 77 loss.margin 4.124045100878584 +710 77 optimizer.lr 0.0048777422265834005 +710 77 negative_sampler.num_negs_per_pos 61.0 +710 77 training.batch_size 0.0 +710 78 model.embedding_dim 2.0 +710 78 model.scoring_fct_norm 2.0 +710 78 loss.margin 3.8513876618065326 +710 78 optimizer.lr 0.001291843474151219 +710 78 negative_sampler.num_negs_per_pos 66.0 +710 78 training.batch_size 0.0 +710 79 model.embedding_dim 2.0 +710 79 model.scoring_fct_norm 2.0 +710 79 loss.margin 9.141523861509027 +710 79 optimizer.lr 0.0010342584157272683 +710 79 negative_sampler.num_negs_per_pos 1.0 +710 79 training.batch_size 0.0 +710 80 model.embedding_dim 0.0 +710 80 model.scoring_fct_norm 2.0 +710 80 loss.margin 1.3375673533403558 +710 80 optimizer.lr 0.0019783180599545406 +710 80 negative_sampler.num_negs_per_pos 13.0 +710 80 training.batch_size 1.0 +710 81 model.embedding_dim 0.0 +710 81 model.scoring_fct_norm 2.0 +710 81 loss.margin 3.9775387079558384 +710 81 optimizer.lr 0.023535080814586676 +710 81 negative_sampler.num_negs_per_pos 10.0 +710 81 training.batch_size 0.0 +710 82 model.embedding_dim 0.0 +710 82 model.scoring_fct_norm 2.0 +710 82 loss.margin 9.949013132191483 +710 82 optimizer.lr 0.047328106245100095 +710 82 negative_sampler.num_negs_per_pos 63.0 +710 82 training.batch_size 0.0 +710 83 model.embedding_dim 1.0 +710 83 model.scoring_fct_norm 2.0 +710 83 loss.margin 3.951637969094402 +710 83 optimizer.lr 0.0011317046647699397 +710 83 negative_sampler.num_negs_per_pos 87.0 +710 83 training.batch_size 2.0 +710 84 model.embedding_dim 2.0 +710 84 model.scoring_fct_norm 1.0 +710 84 loss.margin 6.889886525677429 +710 84 optimizer.lr 0.057517467226616874 +710 84 negative_sampler.num_negs_per_pos 63.0 +710 84 training.batch_size 2.0 +710 85 model.embedding_dim 2.0 +710 85 model.scoring_fct_norm 1.0 +710 85 loss.margin 8.312136310847649 +710 85 optimizer.lr 0.015134718313672794 +710 85 negative_sampler.num_negs_per_pos 7.0 +710 85 training.batch_size 1.0 +710 86 model.embedding_dim 1.0 +710 86 model.scoring_fct_norm 2.0 +710 86 loss.margin 6.621351821755968 +710 86 optimizer.lr 0.0010644555582970765 +710 86 negative_sampler.num_negs_per_pos 53.0 +710 86 training.batch_size 2.0 +710 87 model.embedding_dim 1.0 +710 87 model.scoring_fct_norm 1.0 +710 87 loss.margin 0.9623405606106239 +710 87 optimizer.lr 0.027457356973599394 +710 87 negative_sampler.num_negs_per_pos 60.0 +710 87 training.batch_size 2.0 +710 88 model.embedding_dim 2.0 +710 88 model.scoring_fct_norm 1.0 +710 88 loss.margin 2.549525778851476 +710 88 optimizer.lr 0.08224634666157649 +710 88 negative_sampler.num_negs_per_pos 32.0 +710 88 training.batch_size 2.0 +710 89 model.embedding_dim 1.0 +710 89 model.scoring_fct_norm 2.0 +710 89 loss.margin 0.8820765578977664 +710 89 optimizer.lr 0.008240335702614602 +710 89 negative_sampler.num_negs_per_pos 83.0 +710 89 training.batch_size 1.0 +710 90 model.embedding_dim 0.0 +710 90 model.scoring_fct_norm 1.0 +710 90 loss.margin 6.118496194692812 +710 90 optimizer.lr 0.081487344501812 +710 90 negative_sampler.num_negs_per_pos 34.0 +710 90 training.batch_size 2.0 +710 91 model.embedding_dim 0.0 +710 91 model.scoring_fct_norm 1.0 +710 91 loss.margin 9.627771288370008 +710 91 optimizer.lr 0.019446294612784184 +710 91 negative_sampler.num_negs_per_pos 83.0 +710 91 training.batch_size 2.0 +710 92 model.embedding_dim 2.0 +710 92 model.scoring_fct_norm 1.0 +710 92 loss.margin 8.373600932123784 +710 92 optimizer.lr 0.0031535713018298593 +710 92 negative_sampler.num_negs_per_pos 95.0 +710 92 training.batch_size 1.0 +710 93 model.embedding_dim 1.0 +710 93 model.scoring_fct_norm 2.0 +710 93 loss.margin 6.039570102657456 +710 93 optimizer.lr 0.004663077813205786 +710 93 negative_sampler.num_negs_per_pos 84.0 +710 93 training.batch_size 0.0 +710 94 model.embedding_dim 2.0 +710 94 model.scoring_fct_norm 1.0 +710 94 loss.margin 3.7627917629232472 +710 94 optimizer.lr 0.07430836642509422 +710 94 negative_sampler.num_negs_per_pos 96.0 +710 94 training.batch_size 0.0 +710 95 model.embedding_dim 1.0 +710 95 model.scoring_fct_norm 2.0 +710 95 loss.margin 3.3601223272643526 +710 95 optimizer.lr 0.002184115728860181 +710 95 negative_sampler.num_negs_per_pos 36.0 +710 95 training.batch_size 1.0 +710 96 model.embedding_dim 1.0 +710 96 model.scoring_fct_norm 1.0 +710 96 loss.margin 5.278783329413308 +710 96 optimizer.lr 0.02692935909985725 +710 96 negative_sampler.num_negs_per_pos 27.0 +710 96 training.batch_size 2.0 +710 97 model.embedding_dim 1.0 +710 97 model.scoring_fct_norm 2.0 +710 97 loss.margin 3.678491830080764 +710 97 optimizer.lr 0.0010460510788521636 +710 97 negative_sampler.num_negs_per_pos 16.0 +710 97 training.batch_size 0.0 +710 98 model.embedding_dim 0.0 +710 98 model.scoring_fct_norm 1.0 +710 98 loss.margin 5.688367982053983 +710 98 optimizer.lr 0.04583865401359654 +710 98 negative_sampler.num_negs_per_pos 28.0 +710 98 training.batch_size 0.0 +710 99 model.embedding_dim 2.0 +710 99 model.scoring_fct_norm 2.0 +710 99 loss.margin 3.9834714602589445 +710 99 optimizer.lr 0.04382754935827922 +710 99 negative_sampler.num_negs_per_pos 35.0 +710 99 training.batch_size 1.0 +710 100 model.embedding_dim 1.0 +710 100 model.scoring_fct_norm 1.0 +710 100 loss.margin 1.1154962572265998 +710 100 optimizer.lr 0.02675935561214599 +710 100 negative_sampler.num_negs_per_pos 77.0 +710 100 training.batch_size 1.0 +710 1 dataset """kinships""" +710 1 model """structuredembedding""" +710 1 loss """marginranking""" +710 1 regularizer """no""" +710 1 optimizer """adam""" +710 1 training_loop """owa""" +710 1 negative_sampler """basic""" +710 1 evaluator """rankbased""" +710 2 dataset """kinships""" +710 2 model """structuredembedding""" +710 2 loss """marginranking""" +710 2 regularizer """no""" +710 2 optimizer """adam""" +710 2 training_loop """owa""" +710 2 negative_sampler """basic""" +710 2 evaluator """rankbased""" +710 3 dataset """kinships""" +710 3 model """structuredembedding""" +710 3 loss """marginranking""" +710 3 regularizer """no""" +710 3 optimizer """adam""" +710 3 training_loop """owa""" +710 3 negative_sampler """basic""" +710 3 evaluator """rankbased""" +710 4 dataset """kinships""" +710 4 model """structuredembedding""" +710 4 loss """marginranking""" +710 4 regularizer """no""" +710 4 optimizer """adam""" +710 4 training_loop """owa""" +710 4 negative_sampler """basic""" +710 4 evaluator """rankbased""" +710 5 dataset """kinships""" +710 5 model """structuredembedding""" +710 5 loss """marginranking""" +710 5 regularizer """no""" +710 5 optimizer """adam""" +710 5 training_loop """owa""" +710 5 negative_sampler """basic""" +710 5 evaluator """rankbased""" +710 6 dataset """kinships""" +710 6 model """structuredembedding""" +710 6 loss """marginranking""" +710 6 regularizer """no""" +710 6 optimizer """adam""" +710 6 training_loop """owa""" +710 6 negative_sampler """basic""" +710 6 evaluator """rankbased""" +710 7 dataset """kinships""" +710 7 model """structuredembedding""" +710 7 loss """marginranking""" +710 7 regularizer """no""" +710 7 optimizer """adam""" +710 7 training_loop """owa""" +710 7 negative_sampler """basic""" +710 7 evaluator """rankbased""" +710 8 dataset """kinships""" +710 8 model """structuredembedding""" +710 8 loss """marginranking""" +710 8 regularizer """no""" +710 8 optimizer """adam""" +710 8 training_loop """owa""" +710 8 negative_sampler """basic""" +710 8 evaluator """rankbased""" +710 9 dataset """kinships""" +710 9 model """structuredembedding""" +710 9 loss """marginranking""" +710 9 regularizer """no""" +710 9 optimizer """adam""" +710 9 training_loop """owa""" +710 9 negative_sampler """basic""" +710 9 evaluator """rankbased""" +710 10 dataset """kinships""" +710 10 model """structuredembedding""" +710 10 loss """marginranking""" +710 10 regularizer """no""" +710 10 optimizer """adam""" +710 10 training_loop """owa""" +710 10 negative_sampler """basic""" +710 10 evaluator """rankbased""" +710 11 dataset """kinships""" +710 11 model """structuredembedding""" +710 11 loss """marginranking""" +710 11 regularizer """no""" +710 11 optimizer """adam""" +710 11 training_loop """owa""" +710 11 negative_sampler """basic""" +710 11 evaluator """rankbased""" +710 12 dataset """kinships""" +710 12 model """structuredembedding""" +710 12 loss """marginranking""" +710 12 regularizer """no""" +710 12 optimizer """adam""" +710 12 training_loop """owa""" +710 12 negative_sampler """basic""" +710 12 evaluator """rankbased""" +710 13 dataset """kinships""" +710 13 model """structuredembedding""" +710 13 loss """marginranking""" +710 13 regularizer """no""" +710 13 optimizer """adam""" +710 13 training_loop """owa""" +710 13 negative_sampler """basic""" +710 13 evaluator """rankbased""" +710 14 dataset """kinships""" +710 14 model """structuredembedding""" +710 14 loss """marginranking""" +710 14 regularizer """no""" +710 14 optimizer """adam""" +710 14 training_loop """owa""" +710 14 negative_sampler """basic""" +710 14 evaluator """rankbased""" +710 15 dataset """kinships""" +710 15 model """structuredembedding""" +710 15 loss """marginranking""" +710 15 regularizer """no""" +710 15 optimizer """adam""" +710 15 training_loop """owa""" +710 15 negative_sampler """basic""" +710 15 evaluator """rankbased""" +710 16 dataset """kinships""" +710 16 model """structuredembedding""" +710 16 loss """marginranking""" +710 16 regularizer """no""" +710 16 optimizer """adam""" +710 16 training_loop """owa""" +710 16 negative_sampler """basic""" +710 16 evaluator """rankbased""" +710 17 dataset """kinships""" +710 17 model """structuredembedding""" +710 17 loss """marginranking""" +710 17 regularizer """no""" +710 17 optimizer """adam""" +710 17 training_loop """owa""" +710 17 negative_sampler """basic""" +710 17 evaluator """rankbased""" +710 18 dataset """kinships""" +710 18 model """structuredembedding""" +710 18 loss """marginranking""" +710 18 regularizer """no""" +710 18 optimizer """adam""" +710 18 training_loop """owa""" +710 18 negative_sampler """basic""" +710 18 evaluator """rankbased""" +710 19 dataset """kinships""" +710 19 model """structuredembedding""" +710 19 loss """marginranking""" +710 19 regularizer """no""" +710 19 optimizer """adam""" +710 19 training_loop """owa""" +710 19 negative_sampler """basic""" +710 19 evaluator """rankbased""" +710 20 dataset """kinships""" +710 20 model """structuredembedding""" +710 20 loss """marginranking""" +710 20 regularizer """no""" +710 20 optimizer """adam""" +710 20 training_loop """owa""" +710 20 negative_sampler """basic""" +710 20 evaluator """rankbased""" +710 21 dataset """kinships""" +710 21 model """structuredembedding""" +710 21 loss """marginranking""" +710 21 regularizer """no""" +710 21 optimizer """adam""" +710 21 training_loop """owa""" +710 21 negative_sampler """basic""" +710 21 evaluator """rankbased""" +710 22 dataset """kinships""" +710 22 model """structuredembedding""" +710 22 loss """marginranking""" +710 22 regularizer """no""" +710 22 optimizer """adam""" +710 22 training_loop """owa""" +710 22 negative_sampler """basic""" +710 22 evaluator """rankbased""" +710 23 dataset """kinships""" +710 23 model """structuredembedding""" +710 23 loss """marginranking""" +710 23 regularizer """no""" +710 23 optimizer """adam""" +710 23 training_loop """owa""" +710 23 negative_sampler """basic""" +710 23 evaluator """rankbased""" +710 24 dataset """kinships""" +710 24 model """structuredembedding""" +710 24 loss """marginranking""" +710 24 regularizer """no""" +710 24 optimizer """adam""" +710 24 training_loop """owa""" +710 24 negative_sampler """basic""" +710 24 evaluator """rankbased""" +710 25 dataset """kinships""" +710 25 model """structuredembedding""" +710 25 loss """marginranking""" +710 25 regularizer """no""" +710 25 optimizer """adam""" +710 25 training_loop """owa""" +710 25 negative_sampler """basic""" +710 25 evaluator """rankbased""" +710 26 dataset """kinships""" +710 26 model """structuredembedding""" +710 26 loss """marginranking""" +710 26 regularizer """no""" +710 26 optimizer """adam""" +710 26 training_loop """owa""" +710 26 negative_sampler """basic""" +710 26 evaluator """rankbased""" +710 27 dataset """kinships""" +710 27 model """structuredembedding""" +710 27 loss """marginranking""" +710 27 regularizer """no""" +710 27 optimizer """adam""" +710 27 training_loop """owa""" +710 27 negative_sampler """basic""" +710 27 evaluator """rankbased""" +710 28 dataset """kinships""" +710 28 model """structuredembedding""" +710 28 loss """marginranking""" +710 28 regularizer """no""" +710 28 optimizer """adam""" +710 28 training_loop """owa""" +710 28 negative_sampler """basic""" +710 28 evaluator """rankbased""" +710 29 dataset """kinships""" +710 29 model """structuredembedding""" +710 29 loss """marginranking""" +710 29 regularizer """no""" +710 29 optimizer """adam""" +710 29 training_loop """owa""" +710 29 negative_sampler """basic""" +710 29 evaluator """rankbased""" +710 30 dataset """kinships""" +710 30 model """structuredembedding""" +710 30 loss """marginranking""" +710 30 regularizer """no""" +710 30 optimizer """adam""" +710 30 training_loop """owa""" +710 30 negative_sampler """basic""" +710 30 evaluator """rankbased""" +710 31 dataset """kinships""" +710 31 model """structuredembedding""" +710 31 loss """marginranking""" +710 31 regularizer """no""" +710 31 optimizer """adam""" +710 31 training_loop """owa""" +710 31 negative_sampler """basic""" +710 31 evaluator """rankbased""" +710 32 dataset """kinships""" +710 32 model """structuredembedding""" +710 32 loss """marginranking""" +710 32 regularizer """no""" +710 32 optimizer """adam""" +710 32 training_loop """owa""" +710 32 negative_sampler """basic""" +710 32 evaluator """rankbased""" +710 33 dataset """kinships""" +710 33 model """structuredembedding""" +710 33 loss """marginranking""" +710 33 regularizer """no""" +710 33 optimizer """adam""" +710 33 training_loop """owa""" +710 33 negative_sampler """basic""" +710 33 evaluator """rankbased""" +710 34 dataset """kinships""" +710 34 model """structuredembedding""" +710 34 loss """marginranking""" +710 34 regularizer """no""" +710 34 optimizer """adam""" +710 34 training_loop """owa""" +710 34 negative_sampler """basic""" +710 34 evaluator """rankbased""" +710 35 dataset """kinships""" +710 35 model """structuredembedding""" +710 35 loss """marginranking""" +710 35 regularizer """no""" +710 35 optimizer """adam""" +710 35 training_loop """owa""" +710 35 negative_sampler """basic""" +710 35 evaluator """rankbased""" +710 36 dataset """kinships""" +710 36 model """structuredembedding""" +710 36 loss """marginranking""" +710 36 regularizer """no""" +710 36 optimizer """adam""" +710 36 training_loop """owa""" +710 36 negative_sampler """basic""" +710 36 evaluator """rankbased""" +710 37 dataset """kinships""" +710 37 model """structuredembedding""" +710 37 loss """marginranking""" +710 37 regularizer """no""" +710 37 optimizer """adam""" +710 37 training_loop """owa""" +710 37 negative_sampler """basic""" +710 37 evaluator """rankbased""" +710 38 dataset """kinships""" +710 38 model """structuredembedding""" +710 38 loss """marginranking""" +710 38 regularizer """no""" +710 38 optimizer """adam""" +710 38 training_loop """owa""" +710 38 negative_sampler """basic""" +710 38 evaluator """rankbased""" +710 39 dataset """kinships""" +710 39 model """structuredembedding""" +710 39 loss """marginranking""" +710 39 regularizer """no""" +710 39 optimizer """adam""" +710 39 training_loop """owa""" +710 39 negative_sampler """basic""" +710 39 evaluator """rankbased""" +710 40 dataset """kinships""" +710 40 model """structuredembedding""" +710 40 loss """marginranking""" +710 40 regularizer """no""" +710 40 optimizer """adam""" +710 40 training_loop """owa""" +710 40 negative_sampler """basic""" +710 40 evaluator """rankbased""" +710 41 dataset """kinships""" +710 41 model """structuredembedding""" +710 41 loss """marginranking""" +710 41 regularizer """no""" +710 41 optimizer """adam""" +710 41 training_loop """owa""" +710 41 negative_sampler """basic""" +710 41 evaluator """rankbased""" +710 42 dataset """kinships""" +710 42 model """structuredembedding""" +710 42 loss """marginranking""" +710 42 regularizer """no""" +710 42 optimizer """adam""" +710 42 training_loop """owa""" +710 42 negative_sampler """basic""" +710 42 evaluator """rankbased""" +710 43 dataset """kinships""" +710 43 model """structuredembedding""" +710 43 loss """marginranking""" +710 43 regularizer """no""" +710 43 optimizer """adam""" +710 43 training_loop """owa""" +710 43 negative_sampler """basic""" +710 43 evaluator """rankbased""" +710 44 dataset """kinships""" +710 44 model """structuredembedding""" +710 44 loss """marginranking""" +710 44 regularizer """no""" +710 44 optimizer """adam""" +710 44 training_loop """owa""" +710 44 negative_sampler """basic""" +710 44 evaluator """rankbased""" +710 45 dataset """kinships""" +710 45 model """structuredembedding""" +710 45 loss """marginranking""" +710 45 regularizer """no""" +710 45 optimizer """adam""" +710 45 training_loop """owa""" +710 45 negative_sampler """basic""" +710 45 evaluator """rankbased""" +710 46 dataset """kinships""" +710 46 model """structuredembedding""" +710 46 loss """marginranking""" +710 46 regularizer """no""" +710 46 optimizer """adam""" +710 46 training_loop """owa""" +710 46 negative_sampler """basic""" +710 46 evaluator """rankbased""" +710 47 dataset """kinships""" +710 47 model """structuredembedding""" +710 47 loss """marginranking""" +710 47 regularizer """no""" +710 47 optimizer """adam""" +710 47 training_loop """owa""" +710 47 negative_sampler """basic""" +710 47 evaluator """rankbased""" +710 48 dataset """kinships""" +710 48 model """structuredembedding""" +710 48 loss """marginranking""" +710 48 regularizer """no""" +710 48 optimizer """adam""" +710 48 training_loop """owa""" +710 48 negative_sampler """basic""" +710 48 evaluator """rankbased""" +710 49 dataset """kinships""" +710 49 model """structuredembedding""" +710 49 loss """marginranking""" +710 49 regularizer """no""" +710 49 optimizer """adam""" +710 49 training_loop """owa""" +710 49 negative_sampler """basic""" +710 49 evaluator """rankbased""" +710 50 dataset """kinships""" +710 50 model """structuredembedding""" +710 50 loss """marginranking""" +710 50 regularizer """no""" +710 50 optimizer """adam""" +710 50 training_loop """owa""" +710 50 negative_sampler """basic""" +710 50 evaluator """rankbased""" +710 51 dataset """kinships""" +710 51 model """structuredembedding""" +710 51 loss """marginranking""" +710 51 regularizer """no""" +710 51 optimizer """adam""" +710 51 training_loop """owa""" +710 51 negative_sampler """basic""" +710 51 evaluator """rankbased""" +710 52 dataset """kinships""" +710 52 model """structuredembedding""" +710 52 loss """marginranking""" +710 52 regularizer """no""" +710 52 optimizer """adam""" +710 52 training_loop """owa""" +710 52 negative_sampler """basic""" +710 52 evaluator """rankbased""" +710 53 dataset """kinships""" +710 53 model """structuredembedding""" +710 53 loss """marginranking""" +710 53 regularizer """no""" +710 53 optimizer """adam""" +710 53 training_loop """owa""" +710 53 negative_sampler """basic""" +710 53 evaluator """rankbased""" +710 54 dataset """kinships""" +710 54 model """structuredembedding""" +710 54 loss """marginranking""" +710 54 regularizer """no""" +710 54 optimizer """adam""" +710 54 training_loop """owa""" +710 54 negative_sampler """basic""" +710 54 evaluator """rankbased""" +710 55 dataset """kinships""" +710 55 model """structuredembedding""" +710 55 loss """marginranking""" +710 55 regularizer """no""" +710 55 optimizer """adam""" +710 55 training_loop """owa""" +710 55 negative_sampler """basic""" +710 55 evaluator """rankbased""" +710 56 dataset """kinships""" +710 56 model """structuredembedding""" +710 56 loss """marginranking""" +710 56 regularizer """no""" +710 56 optimizer """adam""" +710 56 training_loop """owa""" +710 56 negative_sampler """basic""" +710 56 evaluator """rankbased""" +710 57 dataset """kinships""" +710 57 model """structuredembedding""" +710 57 loss """marginranking""" +710 57 regularizer """no""" +710 57 optimizer """adam""" +710 57 training_loop """owa""" +710 57 negative_sampler """basic""" +710 57 evaluator """rankbased""" +710 58 dataset """kinships""" +710 58 model """structuredembedding""" +710 58 loss """marginranking""" +710 58 regularizer """no""" +710 58 optimizer """adam""" +710 58 training_loop """owa""" +710 58 negative_sampler """basic""" +710 58 evaluator """rankbased""" +710 59 dataset """kinships""" +710 59 model """structuredembedding""" +710 59 loss """marginranking""" +710 59 regularizer """no""" +710 59 optimizer """adam""" +710 59 training_loop """owa""" +710 59 negative_sampler """basic""" +710 59 evaluator """rankbased""" +710 60 dataset """kinships""" +710 60 model """structuredembedding""" +710 60 loss """marginranking""" +710 60 regularizer """no""" +710 60 optimizer """adam""" +710 60 training_loop """owa""" +710 60 negative_sampler """basic""" +710 60 evaluator """rankbased""" +710 61 dataset """kinships""" +710 61 model """structuredembedding""" +710 61 loss """marginranking""" +710 61 regularizer """no""" +710 61 optimizer """adam""" +710 61 training_loop """owa""" +710 61 negative_sampler """basic""" +710 61 evaluator """rankbased""" +710 62 dataset """kinships""" +710 62 model """structuredembedding""" +710 62 loss """marginranking""" +710 62 regularizer """no""" +710 62 optimizer """adam""" +710 62 training_loop """owa""" +710 62 negative_sampler """basic""" +710 62 evaluator """rankbased""" +710 63 dataset """kinships""" +710 63 model """structuredembedding""" +710 63 loss """marginranking""" +710 63 regularizer """no""" +710 63 optimizer """adam""" +710 63 training_loop """owa""" +710 63 negative_sampler """basic""" +710 63 evaluator """rankbased""" +710 64 dataset """kinships""" +710 64 model """structuredembedding""" +710 64 loss """marginranking""" +710 64 regularizer """no""" +710 64 optimizer """adam""" +710 64 training_loop """owa""" +710 64 negative_sampler """basic""" +710 64 evaluator """rankbased""" +710 65 dataset """kinships""" +710 65 model """structuredembedding""" +710 65 loss """marginranking""" +710 65 regularizer """no""" +710 65 optimizer """adam""" +710 65 training_loop """owa""" +710 65 negative_sampler """basic""" +710 65 evaluator """rankbased""" +710 66 dataset """kinships""" +710 66 model """structuredembedding""" +710 66 loss """marginranking""" +710 66 regularizer """no""" +710 66 optimizer """adam""" +710 66 training_loop """owa""" +710 66 negative_sampler """basic""" +710 66 evaluator """rankbased""" +710 67 dataset """kinships""" +710 67 model """structuredembedding""" +710 67 loss """marginranking""" +710 67 regularizer """no""" +710 67 optimizer """adam""" +710 67 training_loop """owa""" +710 67 negative_sampler """basic""" +710 67 evaluator """rankbased""" +710 68 dataset """kinships""" +710 68 model """structuredembedding""" +710 68 loss """marginranking""" +710 68 regularizer """no""" +710 68 optimizer """adam""" +710 68 training_loop """owa""" +710 68 negative_sampler """basic""" +710 68 evaluator """rankbased""" +710 69 dataset """kinships""" +710 69 model """structuredembedding""" +710 69 loss """marginranking""" +710 69 regularizer """no""" +710 69 optimizer """adam""" +710 69 training_loop """owa""" +710 69 negative_sampler """basic""" +710 69 evaluator """rankbased""" +710 70 dataset """kinships""" +710 70 model """structuredembedding""" +710 70 loss """marginranking""" +710 70 regularizer """no""" +710 70 optimizer """adam""" +710 70 training_loop """owa""" +710 70 negative_sampler """basic""" +710 70 evaluator """rankbased""" +710 71 dataset """kinships""" +710 71 model """structuredembedding""" +710 71 loss """marginranking""" +710 71 regularizer """no""" +710 71 optimizer """adam""" +710 71 training_loop """owa""" +710 71 negative_sampler """basic""" +710 71 evaluator """rankbased""" +710 72 dataset """kinships""" +710 72 model """structuredembedding""" +710 72 loss """marginranking""" +710 72 regularizer """no""" +710 72 optimizer """adam""" +710 72 training_loop """owa""" +710 72 negative_sampler """basic""" +710 72 evaluator """rankbased""" +710 73 dataset """kinships""" +710 73 model """structuredembedding""" +710 73 loss """marginranking""" +710 73 regularizer """no""" +710 73 optimizer """adam""" +710 73 training_loop """owa""" +710 73 negative_sampler """basic""" +710 73 evaluator """rankbased""" +710 74 dataset """kinships""" +710 74 model """structuredembedding""" +710 74 loss """marginranking""" +710 74 regularizer """no""" +710 74 optimizer """adam""" +710 74 training_loop """owa""" +710 74 negative_sampler """basic""" +710 74 evaluator """rankbased""" +710 75 dataset """kinships""" +710 75 model """structuredembedding""" +710 75 loss """marginranking""" +710 75 regularizer """no""" +710 75 optimizer """adam""" +710 75 training_loop """owa""" +710 75 negative_sampler """basic""" +710 75 evaluator """rankbased""" +710 76 dataset """kinships""" +710 76 model """structuredembedding""" +710 76 loss """marginranking""" +710 76 regularizer """no""" +710 76 optimizer """adam""" +710 76 training_loop """owa""" +710 76 negative_sampler """basic""" +710 76 evaluator """rankbased""" +710 77 dataset """kinships""" +710 77 model """structuredembedding""" +710 77 loss """marginranking""" +710 77 regularizer """no""" +710 77 optimizer """adam""" +710 77 training_loop """owa""" +710 77 negative_sampler """basic""" +710 77 evaluator """rankbased""" +710 78 dataset """kinships""" +710 78 model """structuredembedding""" +710 78 loss """marginranking""" +710 78 regularizer """no""" +710 78 optimizer """adam""" +710 78 training_loop """owa""" +710 78 negative_sampler """basic""" +710 78 evaluator """rankbased""" +710 79 dataset """kinships""" +710 79 model """structuredembedding""" +710 79 loss """marginranking""" +710 79 regularizer """no""" +710 79 optimizer """adam""" +710 79 training_loop """owa""" +710 79 negative_sampler """basic""" +710 79 evaluator """rankbased""" +710 80 dataset """kinships""" +710 80 model """structuredembedding""" +710 80 loss """marginranking""" +710 80 regularizer """no""" +710 80 optimizer """adam""" +710 80 training_loop """owa""" +710 80 negative_sampler """basic""" +710 80 evaluator """rankbased""" +710 81 dataset """kinships""" +710 81 model """structuredembedding""" +710 81 loss """marginranking""" +710 81 regularizer """no""" +710 81 optimizer """adam""" +710 81 training_loop """owa""" +710 81 negative_sampler """basic""" +710 81 evaluator """rankbased""" +710 82 dataset """kinships""" +710 82 model """structuredembedding""" +710 82 loss """marginranking""" +710 82 regularizer """no""" +710 82 optimizer """adam""" +710 82 training_loop """owa""" +710 82 negative_sampler """basic""" +710 82 evaluator """rankbased""" +710 83 dataset """kinships""" +710 83 model """structuredembedding""" +710 83 loss """marginranking""" +710 83 regularizer """no""" +710 83 optimizer """adam""" +710 83 training_loop """owa""" +710 83 negative_sampler """basic""" +710 83 evaluator """rankbased""" +710 84 dataset """kinships""" +710 84 model """structuredembedding""" +710 84 loss """marginranking""" +710 84 regularizer """no""" +710 84 optimizer """adam""" +710 84 training_loop """owa""" +710 84 negative_sampler """basic""" +710 84 evaluator """rankbased""" +710 85 dataset """kinships""" +710 85 model """structuredembedding""" +710 85 loss """marginranking""" +710 85 regularizer """no""" +710 85 optimizer """adam""" +710 85 training_loop """owa""" +710 85 negative_sampler """basic""" +710 85 evaluator """rankbased""" +710 86 dataset """kinships""" +710 86 model """structuredembedding""" +710 86 loss """marginranking""" +710 86 regularizer """no""" +710 86 optimizer """adam""" +710 86 training_loop """owa""" +710 86 negative_sampler """basic""" +710 86 evaluator """rankbased""" +710 87 dataset """kinships""" +710 87 model """structuredembedding""" +710 87 loss """marginranking""" +710 87 regularizer """no""" +710 87 optimizer """adam""" +710 87 training_loop """owa""" +710 87 negative_sampler """basic""" +710 87 evaluator """rankbased""" +710 88 dataset """kinships""" +710 88 model """structuredembedding""" +710 88 loss """marginranking""" +710 88 regularizer """no""" +710 88 optimizer """adam""" +710 88 training_loop """owa""" +710 88 negative_sampler """basic""" +710 88 evaluator """rankbased""" +710 89 dataset """kinships""" +710 89 model """structuredembedding""" +710 89 loss """marginranking""" +710 89 regularizer """no""" +710 89 optimizer """adam""" +710 89 training_loop """owa""" +710 89 negative_sampler """basic""" +710 89 evaluator """rankbased""" +710 90 dataset """kinships""" +710 90 model """structuredembedding""" +710 90 loss """marginranking""" +710 90 regularizer """no""" +710 90 optimizer """adam""" +710 90 training_loop """owa""" +710 90 negative_sampler """basic""" +710 90 evaluator """rankbased""" +710 91 dataset """kinships""" +710 91 model """structuredembedding""" +710 91 loss """marginranking""" +710 91 regularizer """no""" +710 91 optimizer """adam""" +710 91 training_loop """owa""" +710 91 negative_sampler """basic""" +710 91 evaluator """rankbased""" +710 92 dataset """kinships""" +710 92 model """structuredembedding""" +710 92 loss """marginranking""" +710 92 regularizer """no""" +710 92 optimizer """adam""" +710 92 training_loop """owa""" +710 92 negative_sampler """basic""" +710 92 evaluator """rankbased""" +710 93 dataset """kinships""" +710 93 model """structuredembedding""" +710 93 loss """marginranking""" +710 93 regularizer """no""" +710 93 optimizer """adam""" +710 93 training_loop """owa""" +710 93 negative_sampler """basic""" +710 93 evaluator """rankbased""" +710 94 dataset """kinships""" +710 94 model """structuredembedding""" +710 94 loss """marginranking""" +710 94 regularizer """no""" +710 94 optimizer """adam""" +710 94 training_loop """owa""" +710 94 negative_sampler """basic""" +710 94 evaluator """rankbased""" +710 95 dataset """kinships""" +710 95 model """structuredembedding""" +710 95 loss """marginranking""" +710 95 regularizer """no""" +710 95 optimizer """adam""" +710 95 training_loop """owa""" +710 95 negative_sampler """basic""" +710 95 evaluator """rankbased""" +710 96 dataset """kinships""" +710 96 model """structuredembedding""" +710 96 loss """marginranking""" +710 96 regularizer """no""" +710 96 optimizer """adam""" +710 96 training_loop """owa""" +710 96 negative_sampler """basic""" +710 96 evaluator """rankbased""" +710 97 dataset """kinships""" +710 97 model """structuredembedding""" +710 97 loss """marginranking""" +710 97 regularizer """no""" +710 97 optimizer """adam""" +710 97 training_loop """owa""" +710 97 negative_sampler """basic""" +710 97 evaluator """rankbased""" +710 98 dataset """kinships""" +710 98 model """structuredembedding""" +710 98 loss """marginranking""" +710 98 regularizer """no""" +710 98 optimizer """adam""" +710 98 training_loop """owa""" +710 98 negative_sampler """basic""" +710 98 evaluator """rankbased""" +710 99 dataset """kinships""" +710 99 model """structuredembedding""" +710 99 loss """marginranking""" +710 99 regularizer """no""" +710 99 optimizer """adam""" +710 99 training_loop """owa""" +710 99 negative_sampler """basic""" +710 99 evaluator """rankbased""" +710 100 dataset """kinships""" +710 100 model """structuredembedding""" +710 100 loss """marginranking""" +710 100 regularizer """no""" +710 100 optimizer """adam""" +710 100 training_loop """owa""" +710 100 negative_sampler """basic""" +710 100 evaluator """rankbased""" +711 1 model.embedding_dim 1.0 +711 1 model.scoring_fct_norm 1.0 +711 1 loss.margin 10.677543468579438 +711 1 loss.adversarial_temperature 0.70713401623974 +711 1 optimizer.lr 0.0014185539067246833 +711 1 negative_sampler.num_negs_per_pos 24.0 +711 1 training.batch_size 0.0 +711 2 model.embedding_dim 2.0 +711 2 model.scoring_fct_norm 2.0 +711 2 loss.margin 11.993971355342794 +711 2 loss.adversarial_temperature 0.6106078723911982 +711 2 optimizer.lr 0.07734619793692338 +711 2 negative_sampler.num_negs_per_pos 28.0 +711 2 training.batch_size 2.0 +711 3 model.embedding_dim 1.0 +711 3 model.scoring_fct_norm 2.0 +711 3 loss.margin 24.909282070300552 +711 3 loss.adversarial_temperature 0.9811492600114418 +711 3 optimizer.lr 0.00127307467345237 +711 3 negative_sampler.num_negs_per_pos 81.0 +711 3 training.batch_size 0.0 +711 4 model.embedding_dim 1.0 +711 4 model.scoring_fct_norm 2.0 +711 4 loss.margin 27.381567618247693 +711 4 loss.adversarial_temperature 0.497355076234563 +711 4 optimizer.lr 0.0012867673597840759 +711 4 negative_sampler.num_negs_per_pos 11.0 +711 4 training.batch_size 1.0 +711 5 model.embedding_dim 2.0 +711 5 model.scoring_fct_norm 2.0 +711 5 loss.margin 1.004596326145274 +711 5 loss.adversarial_temperature 0.29031616346571587 +711 5 optimizer.lr 0.029856919079628966 +711 5 negative_sampler.num_negs_per_pos 45.0 +711 5 training.batch_size 1.0 +711 6 model.embedding_dim 2.0 +711 6 model.scoring_fct_norm 2.0 +711 6 loss.margin 24.232629464235004 +711 6 loss.adversarial_temperature 0.6702288352446113 +711 6 optimizer.lr 0.0028306428396572825 +711 6 negative_sampler.num_negs_per_pos 98.0 +711 6 training.batch_size 1.0 +711 7 model.embedding_dim 1.0 +711 7 model.scoring_fct_norm 2.0 +711 7 loss.margin 5.74771056653009 +711 7 loss.adversarial_temperature 0.7945584605892599 +711 7 optimizer.lr 0.026624075154751896 +711 7 negative_sampler.num_negs_per_pos 24.0 +711 7 training.batch_size 2.0 +711 8 model.embedding_dim 0.0 +711 8 model.scoring_fct_norm 1.0 +711 8 loss.margin 3.963928152630516 +711 8 loss.adversarial_temperature 0.24940488159853744 +711 8 optimizer.lr 0.008743354920280695 +711 8 negative_sampler.num_negs_per_pos 12.0 +711 8 training.batch_size 0.0 +711 9 model.embedding_dim 0.0 +711 9 model.scoring_fct_norm 2.0 +711 9 loss.margin 8.192886001945034 +711 9 loss.adversarial_temperature 0.798595144122372 +711 9 optimizer.lr 0.007765165158137708 +711 9 negative_sampler.num_negs_per_pos 96.0 +711 9 training.batch_size 0.0 +711 10 model.embedding_dim 2.0 +711 10 model.scoring_fct_norm 1.0 +711 10 loss.margin 25.565940518789628 +711 10 loss.adversarial_temperature 0.4637947881681106 +711 10 optimizer.lr 0.08773752997566768 +711 10 negative_sampler.num_negs_per_pos 64.0 +711 10 training.batch_size 0.0 +711 11 model.embedding_dim 0.0 +711 11 model.scoring_fct_norm 2.0 +711 11 loss.margin 16.69651831488653 +711 11 loss.adversarial_temperature 0.13560240894217976 +711 11 optimizer.lr 0.001096086993425862 +711 11 negative_sampler.num_negs_per_pos 54.0 +711 11 training.batch_size 1.0 +711 12 model.embedding_dim 2.0 +711 12 model.scoring_fct_norm 1.0 +711 12 loss.margin 26.157988946424307 +711 12 loss.adversarial_temperature 0.6692058000087869 +711 12 optimizer.lr 0.010266808730232486 +711 12 negative_sampler.num_negs_per_pos 84.0 +711 12 training.batch_size 0.0 +711 13 model.embedding_dim 0.0 +711 13 model.scoring_fct_norm 1.0 +711 13 loss.margin 22.817637740005445 +711 13 loss.adversarial_temperature 0.2423408040931846 +711 13 optimizer.lr 0.08079515120893783 +711 13 negative_sampler.num_negs_per_pos 98.0 +711 13 training.batch_size 1.0 +711 14 model.embedding_dim 1.0 +711 14 model.scoring_fct_norm 1.0 +711 14 loss.margin 28.707147438789764 +711 14 loss.adversarial_temperature 0.6986554311019844 +711 14 optimizer.lr 0.037638561424909525 +711 14 negative_sampler.num_negs_per_pos 72.0 +711 14 training.batch_size 2.0 +711 15 model.embedding_dim 2.0 +711 15 model.scoring_fct_norm 2.0 +711 15 loss.margin 13.760810509696965 +711 15 loss.adversarial_temperature 0.8367950104854953 +711 15 optimizer.lr 0.0017525020287319885 +711 15 negative_sampler.num_negs_per_pos 11.0 +711 15 training.batch_size 0.0 +711 16 model.embedding_dim 2.0 +711 16 model.scoring_fct_norm 2.0 +711 16 loss.margin 24.093240363593726 +711 16 loss.adversarial_temperature 0.28820697188347966 +711 16 optimizer.lr 0.06057039453141795 +711 16 negative_sampler.num_negs_per_pos 28.0 +711 16 training.batch_size 0.0 +711 17 model.embedding_dim 0.0 +711 17 model.scoring_fct_norm 1.0 +711 17 loss.margin 28.065918750908512 +711 17 loss.adversarial_temperature 0.8440823567630641 +711 17 optimizer.lr 0.0019032267833121337 +711 17 negative_sampler.num_negs_per_pos 92.0 +711 17 training.batch_size 1.0 +711 18 model.embedding_dim 1.0 +711 18 model.scoring_fct_norm 1.0 +711 18 loss.margin 16.970168233843285 +711 18 loss.adversarial_temperature 0.22435457735105657 +711 18 optimizer.lr 0.004623299829887554 +711 18 negative_sampler.num_negs_per_pos 33.0 +711 18 training.batch_size 0.0 +711 19 model.embedding_dim 2.0 +711 19 model.scoring_fct_norm 2.0 +711 19 loss.margin 3.9982935016652936 +711 19 loss.adversarial_temperature 0.5587792845895247 +711 19 optimizer.lr 0.004510217397837058 +711 19 negative_sampler.num_negs_per_pos 90.0 +711 19 training.batch_size 1.0 +711 20 model.embedding_dim 0.0 +711 20 model.scoring_fct_norm 2.0 +711 20 loss.margin 8.91132503631798 +711 20 loss.adversarial_temperature 0.6622315780970168 +711 20 optimizer.lr 0.03992119905354249 +711 20 negative_sampler.num_negs_per_pos 56.0 +711 20 training.batch_size 1.0 +711 21 model.embedding_dim 1.0 +711 21 model.scoring_fct_norm 1.0 +711 21 loss.margin 5.562314220852452 +711 21 loss.adversarial_temperature 0.35221932879553264 +711 21 optimizer.lr 0.0011995098227979373 +711 21 negative_sampler.num_negs_per_pos 50.0 +711 21 training.batch_size 2.0 +711 22 model.embedding_dim 0.0 +711 22 model.scoring_fct_norm 2.0 +711 22 loss.margin 3.9833713896160767 +711 22 loss.adversarial_temperature 0.6564847228798243 +711 22 optimizer.lr 0.004213307221714746 +711 22 negative_sampler.num_negs_per_pos 91.0 +711 22 training.batch_size 0.0 +711 23 model.embedding_dim 0.0 +711 23 model.scoring_fct_norm 2.0 +711 23 loss.margin 5.375820067083077 +711 23 loss.adversarial_temperature 0.47008423737659744 +711 23 optimizer.lr 0.007444555994784104 +711 23 negative_sampler.num_negs_per_pos 45.0 +711 23 training.batch_size 0.0 +711 24 model.embedding_dim 0.0 +711 24 model.scoring_fct_norm 1.0 +711 24 loss.margin 14.710855290252221 +711 24 loss.adversarial_temperature 0.294234035883235 +711 24 optimizer.lr 0.07667408474626623 +711 24 negative_sampler.num_negs_per_pos 72.0 +711 24 training.batch_size 1.0 +711 25 model.embedding_dim 1.0 +711 25 model.scoring_fct_norm 1.0 +711 25 loss.margin 24.67464190567368 +711 25 loss.adversarial_temperature 0.32794992218807034 +711 25 optimizer.lr 0.06350983248068634 +711 25 negative_sampler.num_negs_per_pos 65.0 +711 25 training.batch_size 1.0 +711 26 model.embedding_dim 0.0 +711 26 model.scoring_fct_norm 1.0 +711 26 loss.margin 4.556701381606486 +711 26 loss.adversarial_temperature 0.21500923072276465 +711 26 optimizer.lr 0.007189572783470809 +711 26 negative_sampler.num_negs_per_pos 97.0 +711 26 training.batch_size 2.0 +711 27 model.embedding_dim 2.0 +711 27 model.scoring_fct_norm 2.0 +711 27 loss.margin 14.286440718794314 +711 27 loss.adversarial_temperature 0.2538482387261505 +711 27 optimizer.lr 0.06861798954730351 +711 27 negative_sampler.num_negs_per_pos 64.0 +711 27 training.batch_size 0.0 +711 28 model.embedding_dim 1.0 +711 28 model.scoring_fct_norm 1.0 +711 28 loss.margin 26.445243805803894 +711 28 loss.adversarial_temperature 0.3040517201166566 +711 28 optimizer.lr 0.0012060687862144425 +711 28 negative_sampler.num_negs_per_pos 86.0 +711 28 training.batch_size 2.0 +711 29 model.embedding_dim 0.0 +711 29 model.scoring_fct_norm 1.0 +711 29 loss.margin 8.477007678576719 +711 29 loss.adversarial_temperature 0.6590719577918085 +711 29 optimizer.lr 0.014948591208725636 +711 29 negative_sampler.num_negs_per_pos 52.0 +711 29 training.batch_size 1.0 +711 30 model.embedding_dim 0.0 +711 30 model.scoring_fct_norm 1.0 +711 30 loss.margin 26.844276770210836 +711 30 loss.adversarial_temperature 0.6114950864107034 +711 30 optimizer.lr 0.08056989444124409 +711 30 negative_sampler.num_negs_per_pos 11.0 +711 30 training.batch_size 1.0 +711 31 model.embedding_dim 1.0 +711 31 model.scoring_fct_norm 1.0 +711 31 loss.margin 3.2836082941887534 +711 31 loss.adversarial_temperature 0.12643248768043963 +711 31 optimizer.lr 0.02234240101553411 +711 31 negative_sampler.num_negs_per_pos 20.0 +711 31 training.batch_size 0.0 +711 32 model.embedding_dim 2.0 +711 32 model.scoring_fct_norm 1.0 +711 32 loss.margin 22.610286655472347 +711 32 loss.adversarial_temperature 0.5237128733863259 +711 32 optimizer.lr 0.010840128415987667 +711 32 negative_sampler.num_negs_per_pos 41.0 +711 32 training.batch_size 1.0 +711 33 model.embedding_dim 0.0 +711 33 model.scoring_fct_norm 1.0 +711 33 loss.margin 19.870749140333704 +711 33 loss.adversarial_temperature 0.6196554864238452 +711 33 optimizer.lr 0.010989131832552498 +711 33 negative_sampler.num_negs_per_pos 51.0 +711 33 training.batch_size 0.0 +711 34 model.embedding_dim 2.0 +711 34 model.scoring_fct_norm 1.0 +711 34 loss.margin 25.307241484831746 +711 34 loss.adversarial_temperature 0.8783810780616128 +711 34 optimizer.lr 0.02850916154266569 +711 34 negative_sampler.num_negs_per_pos 72.0 +711 34 training.batch_size 0.0 +711 35 model.embedding_dim 0.0 +711 35 model.scoring_fct_norm 1.0 +711 35 loss.margin 24.853525044603078 +711 35 loss.adversarial_temperature 0.5060308112791088 +711 35 optimizer.lr 0.002169376417932582 +711 35 negative_sampler.num_negs_per_pos 76.0 +711 35 training.batch_size 2.0 +711 36 model.embedding_dim 1.0 +711 36 model.scoring_fct_norm 2.0 +711 36 loss.margin 25.029726605052613 +711 36 loss.adversarial_temperature 0.7574034740362418 +711 36 optimizer.lr 0.00653346156552758 +711 36 negative_sampler.num_negs_per_pos 6.0 +711 36 training.batch_size 2.0 +711 37 model.embedding_dim 1.0 +711 37 model.scoring_fct_norm 2.0 +711 37 loss.margin 25.606529494388223 +711 37 loss.adversarial_temperature 0.42738351878006287 +711 37 optimizer.lr 0.01105577385220867 +711 37 negative_sampler.num_negs_per_pos 46.0 +711 37 training.batch_size 0.0 +711 38 model.embedding_dim 2.0 +711 38 model.scoring_fct_norm 1.0 +711 38 loss.margin 7.7180541579672886 +711 38 loss.adversarial_temperature 0.9286725204499177 +711 38 optimizer.lr 0.08763701895078807 +711 38 negative_sampler.num_negs_per_pos 14.0 +711 38 training.batch_size 0.0 +711 39 model.embedding_dim 1.0 +711 39 model.scoring_fct_norm 2.0 +711 39 loss.margin 11.98565235374455 +711 39 loss.adversarial_temperature 0.13577696718909768 +711 39 optimizer.lr 0.001516787508951743 +711 39 negative_sampler.num_negs_per_pos 58.0 +711 39 training.batch_size 2.0 +711 40 model.embedding_dim 1.0 +711 40 model.scoring_fct_norm 1.0 +711 40 loss.margin 21.106622086524943 +711 40 loss.adversarial_temperature 0.35656841710138987 +711 40 optimizer.lr 0.0037245703133877974 +711 40 negative_sampler.num_negs_per_pos 38.0 +711 40 training.batch_size 1.0 +711 41 model.embedding_dim 1.0 +711 41 model.scoring_fct_norm 2.0 +711 41 loss.margin 18.534953534617053 +711 41 loss.adversarial_temperature 0.31251382944419426 +711 41 optimizer.lr 0.057608151731235076 +711 41 negative_sampler.num_negs_per_pos 4.0 +711 41 training.batch_size 1.0 +711 42 model.embedding_dim 0.0 +711 42 model.scoring_fct_norm 1.0 +711 42 loss.margin 20.026344474224913 +711 42 loss.adversarial_temperature 0.9049107494667427 +711 42 optimizer.lr 0.007667868105374366 +711 42 negative_sampler.num_negs_per_pos 84.0 +711 42 training.batch_size 0.0 +711 43 model.embedding_dim 0.0 +711 43 model.scoring_fct_norm 2.0 +711 43 loss.margin 29.18630079477612 +711 43 loss.adversarial_temperature 0.8180169739135453 +711 43 optimizer.lr 0.0032160403857441192 +711 43 negative_sampler.num_negs_per_pos 34.0 +711 43 training.batch_size 1.0 +711 44 model.embedding_dim 0.0 +711 44 model.scoring_fct_norm 1.0 +711 44 loss.margin 6.038708907749742 +711 44 loss.adversarial_temperature 0.15329874441790217 +711 44 optimizer.lr 0.006265604006939853 +711 44 negative_sampler.num_negs_per_pos 36.0 +711 44 training.batch_size 2.0 +711 45 model.embedding_dim 0.0 +711 45 model.scoring_fct_norm 2.0 +711 45 loss.margin 14.69804040522614 +711 45 loss.adversarial_temperature 0.22135547228890015 +711 45 optimizer.lr 0.005654900829189074 +711 45 negative_sampler.num_negs_per_pos 9.0 +711 45 training.batch_size 1.0 +711 46 model.embedding_dim 1.0 +711 46 model.scoring_fct_norm 2.0 +711 46 loss.margin 27.1373638271186 +711 46 loss.adversarial_temperature 0.8442363570733177 +711 46 optimizer.lr 0.0052805584790435595 +711 46 negative_sampler.num_negs_per_pos 70.0 +711 46 training.batch_size 2.0 +711 47 model.embedding_dim 2.0 +711 47 model.scoring_fct_norm 2.0 +711 47 loss.margin 1.2779652041151852 +711 47 loss.adversarial_temperature 0.8672783814219931 +711 47 optimizer.lr 0.016747569366735517 +711 47 negative_sampler.num_negs_per_pos 87.0 +711 47 training.batch_size 1.0 +711 48 model.embedding_dim 1.0 +711 48 model.scoring_fct_norm 2.0 +711 48 loss.margin 9.52265548442891 +711 48 loss.adversarial_temperature 0.7557695591026106 +711 48 optimizer.lr 0.00802661236655023 +711 48 negative_sampler.num_negs_per_pos 76.0 +711 48 training.batch_size 2.0 +711 49 model.embedding_dim 1.0 +711 49 model.scoring_fct_norm 1.0 +711 49 loss.margin 5.603904185297153 +711 49 loss.adversarial_temperature 0.6307080187885875 +711 49 optimizer.lr 0.006008901413200904 +711 49 negative_sampler.num_negs_per_pos 80.0 +711 49 training.batch_size 2.0 +711 50 model.embedding_dim 2.0 +711 50 model.scoring_fct_norm 2.0 +711 50 loss.margin 6.349163105454361 +711 50 loss.adversarial_temperature 0.12344312944026471 +711 50 optimizer.lr 0.025960051962249566 +711 50 negative_sampler.num_negs_per_pos 87.0 +711 50 training.batch_size 0.0 +711 51 model.embedding_dim 0.0 +711 51 model.scoring_fct_norm 2.0 +711 51 loss.margin 11.461450614770044 +711 51 loss.adversarial_temperature 0.9865917916794783 +711 51 optimizer.lr 0.003313580803867225 +711 51 negative_sampler.num_negs_per_pos 57.0 +711 51 training.batch_size 1.0 +711 52 model.embedding_dim 0.0 +711 52 model.scoring_fct_norm 2.0 +711 52 loss.margin 16.300917670801716 +711 52 loss.adversarial_temperature 0.7155340117515429 +711 52 optimizer.lr 0.00357954882922649 +711 52 negative_sampler.num_negs_per_pos 8.0 +711 52 training.batch_size 1.0 +711 53 model.embedding_dim 0.0 +711 53 model.scoring_fct_norm 2.0 +711 53 loss.margin 18.89566429297625 +711 53 loss.adversarial_temperature 0.8864667978104922 +711 53 optimizer.lr 0.013631058945208501 +711 53 negative_sampler.num_negs_per_pos 34.0 +711 53 training.batch_size 0.0 +711 54 model.embedding_dim 2.0 +711 54 model.scoring_fct_norm 2.0 +711 54 loss.margin 28.779118540618136 +711 54 loss.adversarial_temperature 0.47511534605782 +711 54 optimizer.lr 0.09717199682716705 +711 54 negative_sampler.num_negs_per_pos 5.0 +711 54 training.batch_size 0.0 +711 55 model.embedding_dim 1.0 +711 55 model.scoring_fct_norm 2.0 +711 55 loss.margin 28.766194207480154 +711 55 loss.adversarial_temperature 0.4188135720025704 +711 55 optimizer.lr 0.001973423798240974 +711 55 negative_sampler.num_negs_per_pos 21.0 +711 55 training.batch_size 2.0 +711 56 model.embedding_dim 0.0 +711 56 model.scoring_fct_norm 1.0 +711 56 loss.margin 14.941694530604742 +711 56 loss.adversarial_temperature 0.4169988118633021 +711 56 optimizer.lr 0.0015441563635826333 +711 56 negative_sampler.num_negs_per_pos 18.0 +711 56 training.batch_size 1.0 +711 57 model.embedding_dim 0.0 +711 57 model.scoring_fct_norm 1.0 +711 57 loss.margin 10.716885184402862 +711 57 loss.adversarial_temperature 0.5703819451785892 +711 57 optimizer.lr 0.0012060884713217248 +711 57 negative_sampler.num_negs_per_pos 70.0 +711 57 training.batch_size 1.0 +711 58 model.embedding_dim 2.0 +711 58 model.scoring_fct_norm 1.0 +711 58 loss.margin 13.390999945971783 +711 58 loss.adversarial_temperature 0.663571725068288 +711 58 optimizer.lr 0.0017728538968914288 +711 58 negative_sampler.num_negs_per_pos 88.0 +711 58 training.batch_size 0.0 +711 59 model.embedding_dim 0.0 +711 59 model.scoring_fct_norm 1.0 +711 59 loss.margin 11.823570896601295 +711 59 loss.adversarial_temperature 0.19275989184290948 +711 59 optimizer.lr 0.00144543613468324 +711 59 negative_sampler.num_negs_per_pos 88.0 +711 59 training.batch_size 2.0 +711 60 model.embedding_dim 1.0 +711 60 model.scoring_fct_norm 2.0 +711 60 loss.margin 22.58647700199606 +711 60 loss.adversarial_temperature 0.3053967845280408 +711 60 optimizer.lr 0.026575717723967048 +711 60 negative_sampler.num_negs_per_pos 16.0 +711 60 training.batch_size 1.0 +711 61 model.embedding_dim 2.0 +711 61 model.scoring_fct_norm 2.0 +711 61 loss.margin 9.899036916373811 +711 61 loss.adversarial_temperature 0.8458113404969817 +711 61 optimizer.lr 0.017672055568445887 +711 61 negative_sampler.num_negs_per_pos 12.0 +711 61 training.batch_size 0.0 +711 62 model.embedding_dim 2.0 +711 62 model.scoring_fct_norm 2.0 +711 62 loss.margin 25.38178860852069 +711 62 loss.adversarial_temperature 0.3487932564246744 +711 62 optimizer.lr 0.01818585524403542 +711 62 negative_sampler.num_negs_per_pos 4.0 +711 62 training.batch_size 1.0 +711 63 model.embedding_dim 0.0 +711 63 model.scoring_fct_norm 2.0 +711 63 loss.margin 29.26403909421993 +711 63 loss.adversarial_temperature 0.31327005481378023 +711 63 optimizer.lr 0.00972467597987553 +711 63 negative_sampler.num_negs_per_pos 89.0 +711 63 training.batch_size 0.0 +711 64 model.embedding_dim 0.0 +711 64 model.scoring_fct_norm 1.0 +711 64 loss.margin 1.1899808818319648 +711 64 loss.adversarial_temperature 0.49011159288609807 +711 64 optimizer.lr 0.009048901682901147 +711 64 negative_sampler.num_negs_per_pos 2.0 +711 64 training.batch_size 0.0 +711 65 model.embedding_dim 1.0 +711 65 model.scoring_fct_norm 1.0 +711 65 loss.margin 24.19063614165084 +711 65 loss.adversarial_temperature 0.9076122520275699 +711 65 optimizer.lr 0.0011592308438664795 +711 65 negative_sampler.num_negs_per_pos 24.0 +711 65 training.batch_size 1.0 +711 66 model.embedding_dim 1.0 +711 66 model.scoring_fct_norm 1.0 +711 66 loss.margin 18.44775913201387 +711 66 loss.adversarial_temperature 0.9697228539508802 +711 66 optimizer.lr 0.004049820945054108 +711 66 negative_sampler.num_negs_per_pos 74.0 +711 66 training.batch_size 2.0 +711 67 model.embedding_dim 0.0 +711 67 model.scoring_fct_norm 1.0 +711 67 loss.margin 11.618507400486672 +711 67 loss.adversarial_temperature 0.5081375891549001 +711 67 optimizer.lr 0.08972526894327276 +711 67 negative_sampler.num_negs_per_pos 41.0 +711 67 training.batch_size 1.0 +711 68 model.embedding_dim 2.0 +711 68 model.scoring_fct_norm 2.0 +711 68 loss.margin 12.569220575889002 +711 68 loss.adversarial_temperature 0.18304922585507286 +711 68 optimizer.lr 0.006475185822958796 +711 68 negative_sampler.num_negs_per_pos 88.0 +711 68 training.batch_size 0.0 +711 69 model.embedding_dim 2.0 +711 69 model.scoring_fct_norm 2.0 +711 69 loss.margin 26.30798595691133 +711 69 loss.adversarial_temperature 0.33600296814342834 +711 69 optimizer.lr 0.00388555887288079 +711 69 negative_sampler.num_negs_per_pos 71.0 +711 69 training.batch_size 2.0 +711 70 model.embedding_dim 1.0 +711 70 model.scoring_fct_norm 1.0 +711 70 loss.margin 9.522421190145726 +711 70 loss.adversarial_temperature 0.8752625831238625 +711 70 optimizer.lr 0.00330783057448025 +711 70 negative_sampler.num_negs_per_pos 88.0 +711 70 training.batch_size 2.0 +711 71 model.embedding_dim 1.0 +711 71 model.scoring_fct_norm 2.0 +711 71 loss.margin 22.13110071422032 +711 71 loss.adversarial_temperature 0.8314665003184338 +711 71 optimizer.lr 0.02397129584572889 +711 71 negative_sampler.num_negs_per_pos 64.0 +711 71 training.batch_size 0.0 +711 72 model.embedding_dim 0.0 +711 72 model.scoring_fct_norm 1.0 +711 72 loss.margin 29.313841341230297 +711 72 loss.adversarial_temperature 0.9967796712635233 +711 72 optimizer.lr 0.002667116711173826 +711 72 negative_sampler.num_negs_per_pos 59.0 +711 72 training.batch_size 0.0 +711 73 model.embedding_dim 2.0 +711 73 model.scoring_fct_norm 2.0 +711 73 loss.margin 23.895183666546114 +711 73 loss.adversarial_temperature 0.16892993388416405 +711 73 optimizer.lr 0.010430598956387057 +711 73 negative_sampler.num_negs_per_pos 55.0 +711 73 training.batch_size 0.0 +711 74 model.embedding_dim 1.0 +711 74 model.scoring_fct_norm 1.0 +711 74 loss.margin 19.668444777946796 +711 74 loss.adversarial_temperature 0.40465189358779163 +711 74 optimizer.lr 0.00309562500371612 +711 74 negative_sampler.num_negs_per_pos 67.0 +711 74 training.batch_size 1.0 +711 75 model.embedding_dim 1.0 +711 75 model.scoring_fct_norm 2.0 +711 75 loss.margin 27.49138644069356 +711 75 loss.adversarial_temperature 0.38395270807647164 +711 75 optimizer.lr 0.02367132987690185 +711 75 negative_sampler.num_negs_per_pos 41.0 +711 75 training.batch_size 1.0 +711 76 model.embedding_dim 1.0 +711 76 model.scoring_fct_norm 1.0 +711 76 loss.margin 20.525954463056998 +711 76 loss.adversarial_temperature 0.42248507780883826 +711 76 optimizer.lr 0.0012927957589992428 +711 76 negative_sampler.num_negs_per_pos 5.0 +711 76 training.batch_size 1.0 +711 77 model.embedding_dim 2.0 +711 77 model.scoring_fct_norm 2.0 +711 77 loss.margin 24.871136295864055 +711 77 loss.adversarial_temperature 0.7857303044597796 +711 77 optimizer.lr 0.019944197543334416 +711 77 negative_sampler.num_negs_per_pos 89.0 +711 77 training.batch_size 1.0 +711 1 dataset """kinships""" +711 1 model """structuredembedding""" +711 1 loss """nssa""" +711 1 regularizer """no""" +711 1 optimizer """adam""" +711 1 training_loop """owa""" +711 1 negative_sampler """basic""" +711 1 evaluator """rankbased""" +711 2 dataset """kinships""" +711 2 model """structuredembedding""" +711 2 loss """nssa""" +711 2 regularizer """no""" +711 2 optimizer """adam""" +711 2 training_loop """owa""" +711 2 negative_sampler """basic""" +711 2 evaluator """rankbased""" +711 3 dataset """kinships""" +711 3 model """structuredembedding""" +711 3 loss """nssa""" +711 3 regularizer """no""" +711 3 optimizer """adam""" +711 3 training_loop """owa""" +711 3 negative_sampler """basic""" +711 3 evaluator """rankbased""" +711 4 dataset """kinships""" +711 4 model """structuredembedding""" +711 4 loss """nssa""" +711 4 regularizer """no""" +711 4 optimizer """adam""" +711 4 training_loop """owa""" +711 4 negative_sampler """basic""" +711 4 evaluator """rankbased""" +711 5 dataset """kinships""" +711 5 model """structuredembedding""" +711 5 loss """nssa""" +711 5 regularizer """no""" +711 5 optimizer """adam""" +711 5 training_loop """owa""" +711 5 negative_sampler """basic""" +711 5 evaluator """rankbased""" +711 6 dataset """kinships""" +711 6 model """structuredembedding""" +711 6 loss """nssa""" +711 6 regularizer """no""" +711 6 optimizer """adam""" +711 6 training_loop """owa""" +711 6 negative_sampler """basic""" +711 6 evaluator """rankbased""" +711 7 dataset """kinships""" +711 7 model """structuredembedding""" +711 7 loss """nssa""" +711 7 regularizer """no""" +711 7 optimizer """adam""" +711 7 training_loop """owa""" +711 7 negative_sampler """basic""" +711 7 evaluator """rankbased""" +711 8 dataset """kinships""" +711 8 model """structuredembedding""" +711 8 loss """nssa""" +711 8 regularizer """no""" +711 8 optimizer """adam""" +711 8 training_loop """owa""" +711 8 negative_sampler """basic""" +711 8 evaluator """rankbased""" +711 9 dataset """kinships""" +711 9 model """structuredembedding""" +711 9 loss """nssa""" +711 9 regularizer """no""" +711 9 optimizer """adam""" +711 9 training_loop """owa""" +711 9 negative_sampler """basic""" +711 9 evaluator """rankbased""" +711 10 dataset """kinships""" +711 10 model """structuredembedding""" +711 10 loss """nssa""" +711 10 regularizer """no""" +711 10 optimizer """adam""" +711 10 training_loop """owa""" +711 10 negative_sampler """basic""" +711 10 evaluator """rankbased""" +711 11 dataset """kinships""" +711 11 model """structuredembedding""" +711 11 loss """nssa""" +711 11 regularizer """no""" +711 11 optimizer """adam""" +711 11 training_loop """owa""" +711 11 negative_sampler """basic""" +711 11 evaluator """rankbased""" +711 12 dataset """kinships""" +711 12 model """structuredembedding""" +711 12 loss """nssa""" +711 12 regularizer """no""" +711 12 optimizer """adam""" +711 12 training_loop """owa""" +711 12 negative_sampler """basic""" +711 12 evaluator """rankbased""" +711 13 dataset """kinships""" +711 13 model """structuredembedding""" +711 13 loss """nssa""" +711 13 regularizer """no""" +711 13 optimizer """adam""" +711 13 training_loop """owa""" +711 13 negative_sampler """basic""" +711 13 evaluator """rankbased""" +711 14 dataset """kinships""" +711 14 model """structuredembedding""" +711 14 loss """nssa""" +711 14 regularizer """no""" +711 14 optimizer """adam""" +711 14 training_loop """owa""" +711 14 negative_sampler """basic""" +711 14 evaluator """rankbased""" +711 15 dataset """kinships""" +711 15 model """structuredembedding""" +711 15 loss """nssa""" +711 15 regularizer """no""" +711 15 optimizer """adam""" +711 15 training_loop """owa""" +711 15 negative_sampler """basic""" +711 15 evaluator """rankbased""" +711 16 dataset """kinships""" +711 16 model """structuredembedding""" +711 16 loss """nssa""" +711 16 regularizer """no""" +711 16 optimizer """adam""" +711 16 training_loop """owa""" +711 16 negative_sampler """basic""" +711 16 evaluator """rankbased""" +711 17 dataset """kinships""" +711 17 model """structuredembedding""" +711 17 loss """nssa""" +711 17 regularizer """no""" +711 17 optimizer """adam""" +711 17 training_loop """owa""" +711 17 negative_sampler """basic""" +711 17 evaluator """rankbased""" +711 18 dataset """kinships""" +711 18 model """structuredembedding""" +711 18 loss """nssa""" +711 18 regularizer """no""" +711 18 optimizer """adam""" +711 18 training_loop """owa""" +711 18 negative_sampler """basic""" +711 18 evaluator """rankbased""" +711 19 dataset """kinships""" +711 19 model """structuredembedding""" +711 19 loss """nssa""" +711 19 regularizer """no""" +711 19 optimizer """adam""" +711 19 training_loop """owa""" +711 19 negative_sampler """basic""" +711 19 evaluator """rankbased""" +711 20 dataset """kinships""" +711 20 model """structuredembedding""" +711 20 loss """nssa""" +711 20 regularizer """no""" +711 20 optimizer """adam""" +711 20 training_loop """owa""" +711 20 negative_sampler """basic""" +711 20 evaluator """rankbased""" +711 21 dataset """kinships""" +711 21 model """structuredembedding""" +711 21 loss """nssa""" +711 21 regularizer """no""" +711 21 optimizer """adam""" +711 21 training_loop """owa""" +711 21 negative_sampler """basic""" +711 21 evaluator """rankbased""" +711 22 dataset """kinships""" +711 22 model """structuredembedding""" +711 22 loss """nssa""" +711 22 regularizer """no""" +711 22 optimizer """adam""" +711 22 training_loop """owa""" +711 22 negative_sampler """basic""" +711 22 evaluator """rankbased""" +711 23 dataset """kinships""" +711 23 model """structuredembedding""" +711 23 loss """nssa""" +711 23 regularizer """no""" +711 23 optimizer """adam""" +711 23 training_loop """owa""" +711 23 negative_sampler """basic""" +711 23 evaluator """rankbased""" +711 24 dataset """kinships""" +711 24 model """structuredembedding""" +711 24 loss """nssa""" +711 24 regularizer """no""" +711 24 optimizer """adam""" +711 24 training_loop """owa""" +711 24 negative_sampler """basic""" +711 24 evaluator """rankbased""" +711 25 dataset """kinships""" +711 25 model """structuredembedding""" +711 25 loss """nssa""" +711 25 regularizer """no""" +711 25 optimizer """adam""" +711 25 training_loop """owa""" +711 25 negative_sampler """basic""" +711 25 evaluator """rankbased""" +711 26 dataset """kinships""" +711 26 model """structuredembedding""" +711 26 loss """nssa""" +711 26 regularizer """no""" +711 26 optimizer """adam""" +711 26 training_loop """owa""" +711 26 negative_sampler """basic""" +711 26 evaluator """rankbased""" +711 27 dataset """kinships""" +711 27 model """structuredembedding""" +711 27 loss """nssa""" +711 27 regularizer """no""" +711 27 optimizer """adam""" +711 27 training_loop """owa""" +711 27 negative_sampler """basic""" +711 27 evaluator """rankbased""" +711 28 dataset """kinships""" +711 28 model """structuredembedding""" +711 28 loss """nssa""" +711 28 regularizer """no""" +711 28 optimizer """adam""" +711 28 training_loop """owa""" +711 28 negative_sampler """basic""" +711 28 evaluator """rankbased""" +711 29 dataset """kinships""" +711 29 model """structuredembedding""" +711 29 loss """nssa""" +711 29 regularizer """no""" +711 29 optimizer """adam""" +711 29 training_loop """owa""" +711 29 negative_sampler """basic""" +711 29 evaluator """rankbased""" +711 30 dataset """kinships""" +711 30 model """structuredembedding""" +711 30 loss """nssa""" +711 30 regularizer """no""" +711 30 optimizer """adam""" +711 30 training_loop """owa""" +711 30 negative_sampler """basic""" +711 30 evaluator """rankbased""" +711 31 dataset """kinships""" +711 31 model """structuredembedding""" +711 31 loss """nssa""" +711 31 regularizer """no""" +711 31 optimizer """adam""" +711 31 training_loop """owa""" +711 31 negative_sampler """basic""" +711 31 evaluator """rankbased""" +711 32 dataset """kinships""" +711 32 model """structuredembedding""" +711 32 loss """nssa""" +711 32 regularizer """no""" +711 32 optimizer """adam""" +711 32 training_loop """owa""" +711 32 negative_sampler """basic""" +711 32 evaluator """rankbased""" +711 33 dataset """kinships""" +711 33 model """structuredembedding""" +711 33 loss """nssa""" +711 33 regularizer """no""" +711 33 optimizer """adam""" +711 33 training_loop """owa""" +711 33 negative_sampler """basic""" +711 33 evaluator """rankbased""" +711 34 dataset """kinships""" +711 34 model """structuredembedding""" +711 34 loss """nssa""" +711 34 regularizer """no""" +711 34 optimizer """adam""" +711 34 training_loop """owa""" +711 34 negative_sampler """basic""" +711 34 evaluator """rankbased""" +711 35 dataset """kinships""" +711 35 model """structuredembedding""" +711 35 loss """nssa""" +711 35 regularizer """no""" +711 35 optimizer """adam""" +711 35 training_loop """owa""" +711 35 negative_sampler """basic""" +711 35 evaluator """rankbased""" +711 36 dataset """kinships""" +711 36 model """structuredembedding""" +711 36 loss """nssa""" +711 36 regularizer """no""" +711 36 optimizer """adam""" +711 36 training_loop """owa""" +711 36 negative_sampler """basic""" +711 36 evaluator """rankbased""" +711 37 dataset """kinships""" +711 37 model """structuredembedding""" +711 37 loss """nssa""" +711 37 regularizer """no""" +711 37 optimizer """adam""" +711 37 training_loop """owa""" +711 37 negative_sampler """basic""" +711 37 evaluator """rankbased""" +711 38 dataset """kinships""" +711 38 model """structuredembedding""" +711 38 loss """nssa""" +711 38 regularizer """no""" +711 38 optimizer """adam""" +711 38 training_loop """owa""" +711 38 negative_sampler """basic""" +711 38 evaluator """rankbased""" +711 39 dataset """kinships""" +711 39 model """structuredembedding""" +711 39 loss """nssa""" +711 39 regularizer """no""" +711 39 optimizer """adam""" +711 39 training_loop """owa""" +711 39 negative_sampler """basic""" +711 39 evaluator """rankbased""" +711 40 dataset """kinships""" +711 40 model """structuredembedding""" +711 40 loss """nssa""" +711 40 regularizer """no""" +711 40 optimizer """adam""" +711 40 training_loop """owa""" +711 40 negative_sampler """basic""" +711 40 evaluator """rankbased""" +711 41 dataset """kinships""" +711 41 model """structuredembedding""" +711 41 loss """nssa""" +711 41 regularizer """no""" +711 41 optimizer """adam""" +711 41 training_loop """owa""" +711 41 negative_sampler """basic""" +711 41 evaluator """rankbased""" +711 42 dataset """kinships""" +711 42 model """structuredembedding""" +711 42 loss """nssa""" +711 42 regularizer """no""" +711 42 optimizer """adam""" +711 42 training_loop """owa""" +711 42 negative_sampler """basic""" +711 42 evaluator """rankbased""" +711 43 dataset """kinships""" +711 43 model """structuredembedding""" +711 43 loss """nssa""" +711 43 regularizer """no""" +711 43 optimizer """adam""" +711 43 training_loop """owa""" +711 43 negative_sampler """basic""" +711 43 evaluator """rankbased""" +711 44 dataset """kinships""" +711 44 model """structuredembedding""" +711 44 loss """nssa""" +711 44 regularizer """no""" +711 44 optimizer """adam""" +711 44 training_loop """owa""" +711 44 negative_sampler """basic""" +711 44 evaluator """rankbased""" +711 45 dataset """kinships""" +711 45 model """structuredembedding""" +711 45 loss """nssa""" +711 45 regularizer """no""" +711 45 optimizer """adam""" +711 45 training_loop """owa""" +711 45 negative_sampler """basic""" +711 45 evaluator """rankbased""" +711 46 dataset """kinships""" +711 46 model """structuredembedding""" +711 46 loss """nssa""" +711 46 regularizer """no""" +711 46 optimizer """adam""" +711 46 training_loop """owa""" +711 46 negative_sampler """basic""" +711 46 evaluator """rankbased""" +711 47 dataset """kinships""" +711 47 model """structuredembedding""" +711 47 loss """nssa""" +711 47 regularizer """no""" +711 47 optimizer """adam""" +711 47 training_loop """owa""" +711 47 negative_sampler """basic""" +711 47 evaluator """rankbased""" +711 48 dataset """kinships""" +711 48 model """structuredembedding""" +711 48 loss """nssa""" +711 48 regularizer """no""" +711 48 optimizer """adam""" +711 48 training_loop """owa""" +711 48 negative_sampler """basic""" +711 48 evaluator """rankbased""" +711 49 dataset """kinships""" +711 49 model """structuredembedding""" +711 49 loss """nssa""" +711 49 regularizer """no""" +711 49 optimizer """adam""" +711 49 training_loop """owa""" +711 49 negative_sampler """basic""" +711 49 evaluator """rankbased""" +711 50 dataset """kinships""" +711 50 model """structuredembedding""" +711 50 loss """nssa""" +711 50 regularizer """no""" +711 50 optimizer """adam""" +711 50 training_loop """owa""" +711 50 negative_sampler """basic""" +711 50 evaluator """rankbased""" +711 51 dataset """kinships""" +711 51 model """structuredembedding""" +711 51 loss """nssa""" +711 51 regularizer """no""" +711 51 optimizer """adam""" +711 51 training_loop """owa""" +711 51 negative_sampler """basic""" +711 51 evaluator """rankbased""" +711 52 dataset """kinships""" +711 52 model """structuredembedding""" +711 52 loss """nssa""" +711 52 regularizer """no""" +711 52 optimizer """adam""" +711 52 training_loop """owa""" +711 52 negative_sampler """basic""" +711 52 evaluator """rankbased""" +711 53 dataset """kinships""" +711 53 model """structuredembedding""" +711 53 loss """nssa""" +711 53 regularizer """no""" +711 53 optimizer """adam""" +711 53 training_loop """owa""" +711 53 negative_sampler """basic""" +711 53 evaluator """rankbased""" +711 54 dataset """kinships""" +711 54 model """structuredembedding""" +711 54 loss """nssa""" +711 54 regularizer """no""" +711 54 optimizer """adam""" +711 54 training_loop """owa""" +711 54 negative_sampler """basic""" +711 54 evaluator """rankbased""" +711 55 dataset """kinships""" +711 55 model """structuredembedding""" +711 55 loss """nssa""" +711 55 regularizer """no""" +711 55 optimizer """adam""" +711 55 training_loop """owa""" +711 55 negative_sampler """basic""" +711 55 evaluator """rankbased""" +711 56 dataset """kinships""" +711 56 model """structuredembedding""" +711 56 loss """nssa""" +711 56 regularizer """no""" +711 56 optimizer """adam""" +711 56 training_loop """owa""" +711 56 negative_sampler """basic""" +711 56 evaluator """rankbased""" +711 57 dataset """kinships""" +711 57 model """structuredembedding""" +711 57 loss """nssa""" +711 57 regularizer """no""" +711 57 optimizer """adam""" +711 57 training_loop """owa""" +711 57 negative_sampler """basic""" +711 57 evaluator """rankbased""" +711 58 dataset """kinships""" +711 58 model """structuredembedding""" +711 58 loss """nssa""" +711 58 regularizer """no""" +711 58 optimizer """adam""" +711 58 training_loop """owa""" +711 58 negative_sampler """basic""" +711 58 evaluator """rankbased""" +711 59 dataset """kinships""" +711 59 model """structuredembedding""" +711 59 loss """nssa""" +711 59 regularizer """no""" +711 59 optimizer """adam""" +711 59 training_loop """owa""" +711 59 negative_sampler """basic""" +711 59 evaluator """rankbased""" +711 60 dataset """kinships""" +711 60 model """structuredembedding""" +711 60 loss """nssa""" +711 60 regularizer """no""" +711 60 optimizer """adam""" +711 60 training_loop """owa""" +711 60 negative_sampler """basic""" +711 60 evaluator """rankbased""" +711 61 dataset """kinships""" +711 61 model """structuredembedding""" +711 61 loss """nssa""" +711 61 regularizer """no""" +711 61 optimizer """adam""" +711 61 training_loop """owa""" +711 61 negative_sampler """basic""" +711 61 evaluator """rankbased""" +711 62 dataset """kinships""" +711 62 model """structuredembedding""" +711 62 loss """nssa""" +711 62 regularizer """no""" +711 62 optimizer """adam""" +711 62 training_loop """owa""" +711 62 negative_sampler """basic""" +711 62 evaluator """rankbased""" +711 63 dataset """kinships""" +711 63 model """structuredembedding""" +711 63 loss """nssa""" +711 63 regularizer """no""" +711 63 optimizer """adam""" +711 63 training_loop """owa""" +711 63 negative_sampler """basic""" +711 63 evaluator """rankbased""" +711 64 dataset """kinships""" +711 64 model """structuredembedding""" +711 64 loss """nssa""" +711 64 regularizer """no""" +711 64 optimizer """adam""" +711 64 training_loop """owa""" +711 64 negative_sampler """basic""" +711 64 evaluator """rankbased""" +711 65 dataset """kinships""" +711 65 model """structuredembedding""" +711 65 loss """nssa""" +711 65 regularizer """no""" +711 65 optimizer """adam""" +711 65 training_loop """owa""" +711 65 negative_sampler """basic""" +711 65 evaluator """rankbased""" +711 66 dataset """kinships""" +711 66 model """structuredembedding""" +711 66 loss """nssa""" +711 66 regularizer """no""" +711 66 optimizer """adam""" +711 66 training_loop """owa""" +711 66 negative_sampler """basic""" +711 66 evaluator """rankbased""" +711 67 dataset """kinships""" +711 67 model """structuredembedding""" +711 67 loss """nssa""" +711 67 regularizer """no""" +711 67 optimizer """adam""" +711 67 training_loop """owa""" +711 67 negative_sampler """basic""" +711 67 evaluator """rankbased""" +711 68 dataset """kinships""" +711 68 model """structuredembedding""" +711 68 loss """nssa""" +711 68 regularizer """no""" +711 68 optimizer """adam""" +711 68 training_loop """owa""" +711 68 negative_sampler """basic""" +711 68 evaluator """rankbased""" +711 69 dataset """kinships""" +711 69 model """structuredembedding""" +711 69 loss """nssa""" +711 69 regularizer """no""" +711 69 optimizer """adam""" +711 69 training_loop """owa""" +711 69 negative_sampler """basic""" +711 69 evaluator """rankbased""" +711 70 dataset """kinships""" +711 70 model """structuredembedding""" +711 70 loss """nssa""" +711 70 regularizer """no""" +711 70 optimizer """adam""" +711 70 training_loop """owa""" +711 70 negative_sampler """basic""" +711 70 evaluator """rankbased""" +711 71 dataset """kinships""" +711 71 model """structuredembedding""" +711 71 loss """nssa""" +711 71 regularizer """no""" +711 71 optimizer """adam""" +711 71 training_loop """owa""" +711 71 negative_sampler """basic""" +711 71 evaluator """rankbased""" +711 72 dataset """kinships""" +711 72 model """structuredembedding""" +711 72 loss """nssa""" +711 72 regularizer """no""" +711 72 optimizer """adam""" +711 72 training_loop """owa""" +711 72 negative_sampler """basic""" +711 72 evaluator """rankbased""" +711 73 dataset """kinships""" +711 73 model """structuredembedding""" +711 73 loss """nssa""" +711 73 regularizer """no""" +711 73 optimizer """adam""" +711 73 training_loop """owa""" +711 73 negative_sampler """basic""" +711 73 evaluator """rankbased""" +711 74 dataset """kinships""" +711 74 model """structuredembedding""" +711 74 loss """nssa""" +711 74 regularizer """no""" +711 74 optimizer """adam""" +711 74 training_loop """owa""" +711 74 negative_sampler """basic""" +711 74 evaluator """rankbased""" +711 75 dataset """kinships""" +711 75 model """structuredembedding""" +711 75 loss """nssa""" +711 75 regularizer """no""" +711 75 optimizer """adam""" +711 75 training_loop """owa""" +711 75 negative_sampler """basic""" +711 75 evaluator """rankbased""" +711 76 dataset """kinships""" +711 76 model """structuredembedding""" +711 76 loss """nssa""" +711 76 regularizer """no""" +711 76 optimizer """adam""" +711 76 training_loop """owa""" +711 76 negative_sampler """basic""" +711 76 evaluator """rankbased""" +711 77 dataset """kinships""" +711 77 model """structuredembedding""" +711 77 loss """nssa""" +711 77 regularizer """no""" +711 77 optimizer """adam""" +711 77 training_loop """owa""" +711 77 negative_sampler """basic""" +711 77 evaluator """rankbased""" +712 1 model.embedding_dim 0.0 +712 1 model.scoring_fct_norm 2.0 +712 1 loss.margin 15.103773540790362 +712 1 loss.adversarial_temperature 0.16518705145535198 +712 1 optimizer.lr 0.01653168567698929 +712 1 negative_sampler.num_negs_per_pos 66.0 +712 1 training.batch_size 2.0 +712 2 model.embedding_dim 1.0 +712 2 model.scoring_fct_norm 2.0 +712 2 loss.margin 1.8675702874268887 +712 2 loss.adversarial_temperature 0.8270142548646799 +712 2 optimizer.lr 0.08032096633231552 +712 2 negative_sampler.num_negs_per_pos 46.0 +712 2 training.batch_size 2.0 +712 3 model.embedding_dim 2.0 +712 3 model.scoring_fct_norm 2.0 +712 3 loss.margin 24.019729505174087 +712 3 loss.adversarial_temperature 0.973755683442024 +712 3 optimizer.lr 0.03813542596282154 +712 3 negative_sampler.num_negs_per_pos 24.0 +712 3 training.batch_size 2.0 +712 4 model.embedding_dim 0.0 +712 4 model.scoring_fct_norm 2.0 +712 4 loss.margin 3.2242407967688003 +712 4 loss.adversarial_temperature 0.7187518763595272 +712 4 optimizer.lr 0.011858008823790024 +712 4 negative_sampler.num_negs_per_pos 37.0 +712 4 training.batch_size 2.0 +712 5 model.embedding_dim 0.0 +712 5 model.scoring_fct_norm 2.0 +712 5 loss.margin 18.042642530882716 +712 5 loss.adversarial_temperature 0.6681340343983749 +712 5 optimizer.lr 0.02549846355880323 +712 5 negative_sampler.num_negs_per_pos 42.0 +712 5 training.batch_size 0.0 +712 6 model.embedding_dim 0.0 +712 6 model.scoring_fct_norm 1.0 +712 6 loss.margin 12.035903062638312 +712 6 loss.adversarial_temperature 0.7402298332117445 +712 6 optimizer.lr 0.003039101288329351 +712 6 negative_sampler.num_negs_per_pos 67.0 +712 6 training.batch_size 2.0 +712 7 model.embedding_dim 2.0 +712 7 model.scoring_fct_norm 2.0 +712 7 loss.margin 9.643260822711323 +712 7 loss.adversarial_temperature 0.5830551091690389 +712 7 optimizer.lr 0.027880711958008713 +712 7 negative_sampler.num_negs_per_pos 52.0 +712 7 training.batch_size 0.0 +712 8 model.embedding_dim 2.0 +712 8 model.scoring_fct_norm 1.0 +712 8 loss.margin 23.524036084536302 +712 8 loss.adversarial_temperature 0.8596525909842974 +712 8 optimizer.lr 0.06398558530740027 +712 8 negative_sampler.num_negs_per_pos 27.0 +712 8 training.batch_size 1.0 +712 9 model.embedding_dim 0.0 +712 9 model.scoring_fct_norm 1.0 +712 9 loss.margin 14.8964180311337 +712 9 loss.adversarial_temperature 0.33462351232333465 +712 9 optimizer.lr 0.09606283811566999 +712 9 negative_sampler.num_negs_per_pos 5.0 +712 9 training.batch_size 2.0 +712 10 model.embedding_dim 2.0 +712 10 model.scoring_fct_norm 1.0 +712 10 loss.margin 15.676345596644138 +712 10 loss.adversarial_temperature 0.7587455090326645 +712 10 optimizer.lr 0.0010362070165514125 +712 10 negative_sampler.num_negs_per_pos 5.0 +712 10 training.batch_size 2.0 +712 11 model.embedding_dim 1.0 +712 11 model.scoring_fct_norm 2.0 +712 11 loss.margin 22.41697347298485 +712 11 loss.adversarial_temperature 0.3875830230083248 +712 11 optimizer.lr 0.006150835319873402 +712 11 negative_sampler.num_negs_per_pos 99.0 +712 11 training.batch_size 1.0 +712 12 model.embedding_dim 0.0 +712 12 model.scoring_fct_norm 1.0 +712 12 loss.margin 2.376070906826907 +712 12 loss.adversarial_temperature 0.44541202571886285 +712 12 optimizer.lr 0.007021468128667428 +712 12 negative_sampler.num_negs_per_pos 86.0 +712 12 training.batch_size 1.0 +712 13 model.embedding_dim 1.0 +712 13 model.scoring_fct_norm 2.0 +712 13 loss.margin 29.80489767837648 +712 13 loss.adversarial_temperature 0.16738768528274797 +712 13 optimizer.lr 0.0036465955093276806 +712 13 negative_sampler.num_negs_per_pos 25.0 +712 13 training.batch_size 1.0 +712 14 model.embedding_dim 2.0 +712 14 model.scoring_fct_norm 1.0 +712 14 loss.margin 12.837539008917123 +712 14 loss.adversarial_temperature 0.22362645829191227 +712 14 optimizer.lr 0.001498148922358612 +712 14 negative_sampler.num_negs_per_pos 39.0 +712 14 training.batch_size 0.0 +712 15 model.embedding_dim 2.0 +712 15 model.scoring_fct_norm 2.0 +712 15 loss.margin 21.231881037632945 +712 15 loss.adversarial_temperature 0.5060057129871923 +712 15 optimizer.lr 0.01069770309531344 +712 15 negative_sampler.num_negs_per_pos 19.0 +712 15 training.batch_size 0.0 +712 16 model.embedding_dim 2.0 +712 16 model.scoring_fct_norm 2.0 +712 16 loss.margin 14.908423256283351 +712 16 loss.adversarial_temperature 0.4136564500514941 +712 16 optimizer.lr 0.007738272146783755 +712 16 negative_sampler.num_negs_per_pos 59.0 +712 16 training.batch_size 1.0 +712 17 model.embedding_dim 2.0 +712 17 model.scoring_fct_norm 2.0 +712 17 loss.margin 28.48192094834987 +712 17 loss.adversarial_temperature 0.5638000711124803 +712 17 optimizer.lr 0.03165664559190627 +712 17 negative_sampler.num_negs_per_pos 59.0 +712 17 training.batch_size 2.0 +712 18 model.embedding_dim 0.0 +712 18 model.scoring_fct_norm 2.0 +712 18 loss.margin 29.786145531939844 +712 18 loss.adversarial_temperature 0.5379716754735278 +712 18 optimizer.lr 0.0020801716236841434 +712 18 negative_sampler.num_negs_per_pos 83.0 +712 18 training.batch_size 1.0 +712 19 model.embedding_dim 0.0 +712 19 model.scoring_fct_norm 2.0 +712 19 loss.margin 6.172437185348896 +712 19 loss.adversarial_temperature 0.5507266775461517 +712 19 optimizer.lr 0.01862790625882295 +712 19 negative_sampler.num_negs_per_pos 8.0 +712 19 training.batch_size 2.0 +712 20 model.embedding_dim 2.0 +712 20 model.scoring_fct_norm 1.0 +712 20 loss.margin 15.167855274709234 +712 20 loss.adversarial_temperature 0.7798555500062789 +712 20 optimizer.lr 0.08125510357831563 +712 20 negative_sampler.num_negs_per_pos 27.0 +712 20 training.batch_size 0.0 +712 21 model.embedding_dim 1.0 +712 21 model.scoring_fct_norm 2.0 +712 21 loss.margin 15.408149172462474 +712 21 loss.adversarial_temperature 0.77938321716048 +712 21 optimizer.lr 0.02404254593145707 +712 21 negative_sampler.num_negs_per_pos 27.0 +712 21 training.batch_size 2.0 +712 22 model.embedding_dim 1.0 +712 22 model.scoring_fct_norm 1.0 +712 22 loss.margin 8.35687218626805 +712 22 loss.adversarial_temperature 0.9466106469220265 +712 22 optimizer.lr 0.04799664962499558 +712 22 negative_sampler.num_negs_per_pos 93.0 +712 22 training.batch_size 2.0 +712 23 model.embedding_dim 1.0 +712 23 model.scoring_fct_norm 2.0 +712 23 loss.margin 3.9218888475987956 +712 23 loss.adversarial_temperature 0.5098718621599032 +712 23 optimizer.lr 0.033490579466612366 +712 23 negative_sampler.num_negs_per_pos 83.0 +712 23 training.batch_size 1.0 +712 24 model.embedding_dim 1.0 +712 24 model.scoring_fct_norm 2.0 +712 24 loss.margin 26.07805871957183 +712 24 loss.adversarial_temperature 0.4908529898894629 +712 24 optimizer.lr 0.009727617125885825 +712 24 negative_sampler.num_negs_per_pos 87.0 +712 24 training.batch_size 2.0 +712 25 model.embedding_dim 2.0 +712 25 model.scoring_fct_norm 1.0 +712 25 loss.margin 20.156909430434727 +712 25 loss.adversarial_temperature 0.9588346922359887 +712 25 optimizer.lr 0.006372812511568208 +712 25 negative_sampler.num_negs_per_pos 74.0 +712 25 training.batch_size 1.0 +712 26 model.embedding_dim 1.0 +712 26 model.scoring_fct_norm 2.0 +712 26 loss.margin 27.91455290137065 +712 26 loss.adversarial_temperature 0.908323722457235 +712 26 optimizer.lr 0.020037160567602207 +712 26 negative_sampler.num_negs_per_pos 36.0 +712 26 training.batch_size 2.0 +712 27 model.embedding_dim 2.0 +712 27 model.scoring_fct_norm 1.0 +712 27 loss.margin 26.45278864411348 +712 27 loss.adversarial_temperature 0.8183162237082954 +712 27 optimizer.lr 0.007009294649610836 +712 27 negative_sampler.num_negs_per_pos 82.0 +712 27 training.batch_size 0.0 +712 28 model.embedding_dim 2.0 +712 28 model.scoring_fct_norm 1.0 +712 28 loss.margin 26.28220130788677 +712 28 loss.adversarial_temperature 0.5164183322023972 +712 28 optimizer.lr 0.012741896893069019 +712 28 negative_sampler.num_negs_per_pos 9.0 +712 28 training.batch_size 2.0 +712 29 model.embedding_dim 0.0 +712 29 model.scoring_fct_norm 1.0 +712 29 loss.margin 16.125083270042847 +712 29 loss.adversarial_temperature 0.2951666309471839 +712 29 optimizer.lr 0.04927013206464946 +712 29 negative_sampler.num_negs_per_pos 16.0 +712 29 training.batch_size 1.0 +712 30 model.embedding_dim 2.0 +712 30 model.scoring_fct_norm 1.0 +712 30 loss.margin 21.94654423948354 +712 30 loss.adversarial_temperature 0.24336954700606525 +712 30 optimizer.lr 0.06833755980116046 +712 30 negative_sampler.num_negs_per_pos 12.0 +712 30 training.batch_size 2.0 +712 31 model.embedding_dim 0.0 +712 31 model.scoring_fct_norm 2.0 +712 31 loss.margin 4.677957518149071 +712 31 loss.adversarial_temperature 0.8103008725169379 +712 31 optimizer.lr 0.0595825512308652 +712 31 negative_sampler.num_negs_per_pos 67.0 +712 31 training.batch_size 1.0 +712 32 model.embedding_dim 0.0 +712 32 model.scoring_fct_norm 1.0 +712 32 loss.margin 13.668865368697169 +712 32 loss.adversarial_temperature 0.2762947056115321 +712 32 optimizer.lr 0.049426117983625416 +712 32 negative_sampler.num_negs_per_pos 29.0 +712 32 training.batch_size 2.0 +712 33 model.embedding_dim 2.0 +712 33 model.scoring_fct_norm 2.0 +712 33 loss.margin 8.296042897140094 +712 33 loss.adversarial_temperature 0.8025886371603004 +712 33 optimizer.lr 0.0011639460379756939 +712 33 negative_sampler.num_negs_per_pos 70.0 +712 33 training.batch_size 1.0 +712 34 model.embedding_dim 2.0 +712 34 model.scoring_fct_norm 1.0 +712 34 loss.margin 7.90986616785109 +712 34 loss.adversarial_temperature 0.7046292201814445 +712 34 optimizer.lr 0.0018762101033906514 +712 34 negative_sampler.num_negs_per_pos 9.0 +712 34 training.batch_size 2.0 +712 35 model.embedding_dim 0.0 +712 35 model.scoring_fct_norm 1.0 +712 35 loss.margin 11.55407666488474 +712 35 loss.adversarial_temperature 0.36613371253488824 +712 35 optimizer.lr 0.04154220489834964 +712 35 negative_sampler.num_negs_per_pos 10.0 +712 35 training.batch_size 0.0 +712 36 model.embedding_dim 0.0 +712 36 model.scoring_fct_norm 2.0 +712 36 loss.margin 15.231938994365514 +712 36 loss.adversarial_temperature 0.4521001332887937 +712 36 optimizer.lr 0.008003432418471382 +712 36 negative_sampler.num_negs_per_pos 36.0 +712 36 training.batch_size 2.0 +712 37 model.embedding_dim 1.0 +712 37 model.scoring_fct_norm 2.0 +712 37 loss.margin 22.102666566474323 +712 37 loss.adversarial_temperature 0.23236461320400723 +712 37 optimizer.lr 0.001115354214977134 +712 37 negative_sampler.num_negs_per_pos 77.0 +712 37 training.batch_size 1.0 +712 38 model.embedding_dim 1.0 +712 38 model.scoring_fct_norm 2.0 +712 38 loss.margin 3.90293755601781 +712 38 loss.adversarial_temperature 0.7163467494365408 +712 38 optimizer.lr 0.007003201112466603 +712 38 negative_sampler.num_negs_per_pos 18.0 +712 38 training.batch_size 1.0 +712 39 model.embedding_dim 2.0 +712 39 model.scoring_fct_norm 2.0 +712 39 loss.margin 15.793761629092609 +712 39 loss.adversarial_temperature 0.31504685212165917 +712 39 optimizer.lr 0.0093654791479374 +712 39 negative_sampler.num_negs_per_pos 20.0 +712 39 training.batch_size 0.0 +712 40 model.embedding_dim 0.0 +712 40 model.scoring_fct_norm 1.0 +712 40 loss.margin 15.582089074418091 +712 40 loss.adversarial_temperature 0.17585714682509762 +712 40 optimizer.lr 0.010196047228733919 +712 40 negative_sampler.num_negs_per_pos 57.0 +712 40 training.batch_size 0.0 +712 41 model.embedding_dim 2.0 +712 41 model.scoring_fct_norm 2.0 +712 41 loss.margin 11.818922037781915 +712 41 loss.adversarial_temperature 0.8492111778780602 +712 41 optimizer.lr 0.0731497638588548 +712 41 negative_sampler.num_negs_per_pos 47.0 +712 41 training.batch_size 2.0 +712 42 model.embedding_dim 0.0 +712 42 model.scoring_fct_norm 1.0 +712 42 loss.margin 17.747096626295647 +712 42 loss.adversarial_temperature 0.7435835947374814 +712 42 optimizer.lr 0.002852741031375574 +712 42 negative_sampler.num_negs_per_pos 84.0 +712 42 training.batch_size 1.0 +712 43 model.embedding_dim 0.0 +712 43 model.scoring_fct_norm 2.0 +712 43 loss.margin 8.4020747749889 +712 43 loss.adversarial_temperature 0.6326896105922012 +712 43 optimizer.lr 0.026736752985711026 +712 43 negative_sampler.num_negs_per_pos 18.0 +712 43 training.batch_size 0.0 +712 44 model.embedding_dim 0.0 +712 44 model.scoring_fct_norm 2.0 +712 44 loss.margin 21.572013056862907 +712 44 loss.adversarial_temperature 0.23803362053678878 +712 44 optimizer.lr 0.08595798234065488 +712 44 negative_sampler.num_negs_per_pos 0.0 +712 44 training.batch_size 2.0 +712 45 model.embedding_dim 2.0 +712 45 model.scoring_fct_norm 2.0 +712 45 loss.margin 20.37711796785719 +712 45 loss.adversarial_temperature 0.9965086083787709 +712 45 optimizer.lr 0.004955975278360338 +712 45 negative_sampler.num_negs_per_pos 86.0 +712 45 training.batch_size 1.0 +712 46 model.embedding_dim 1.0 +712 46 model.scoring_fct_norm 1.0 +712 46 loss.margin 26.922738565610416 +712 46 loss.adversarial_temperature 0.1763625202193064 +712 46 optimizer.lr 0.03984534872333245 +712 46 negative_sampler.num_negs_per_pos 79.0 +712 46 training.batch_size 0.0 +712 47 model.embedding_dim 0.0 +712 47 model.scoring_fct_norm 1.0 +712 47 loss.margin 4.7356649302685705 +712 47 loss.adversarial_temperature 0.5351589788395845 +712 47 optimizer.lr 0.02200166854807581 +712 47 negative_sampler.num_negs_per_pos 74.0 +712 47 training.batch_size 1.0 +712 48 model.embedding_dim 1.0 +712 48 model.scoring_fct_norm 1.0 +712 48 loss.margin 11.30759253600903 +712 48 loss.adversarial_temperature 0.7734798256811631 +712 48 optimizer.lr 0.0032941522018269664 +712 48 negative_sampler.num_negs_per_pos 49.0 +712 48 training.batch_size 1.0 +712 49 model.embedding_dim 0.0 +712 49 model.scoring_fct_norm 2.0 +712 49 loss.margin 27.887947157309917 +712 49 loss.adversarial_temperature 0.6081800827311138 +712 49 optimizer.lr 0.09323100794777392 +712 49 negative_sampler.num_negs_per_pos 13.0 +712 49 training.batch_size 1.0 +712 50 model.embedding_dim 1.0 +712 50 model.scoring_fct_norm 2.0 +712 50 loss.margin 24.191098087674234 +712 50 loss.adversarial_temperature 0.6222392102652102 +712 50 optimizer.lr 0.05895837804864453 +712 50 negative_sampler.num_negs_per_pos 89.0 +712 50 training.batch_size 0.0 +712 51 model.embedding_dim 2.0 +712 51 model.scoring_fct_norm 2.0 +712 51 loss.margin 17.391292249981667 +712 51 loss.adversarial_temperature 0.9094647347154013 +712 51 optimizer.lr 0.05000378526468042 +712 51 negative_sampler.num_negs_per_pos 33.0 +712 51 training.batch_size 2.0 +712 52 model.embedding_dim 1.0 +712 52 model.scoring_fct_norm 1.0 +712 52 loss.margin 1.538274242356392 +712 52 loss.adversarial_temperature 0.14059654364266305 +712 52 optimizer.lr 0.010390592251279644 +712 52 negative_sampler.num_negs_per_pos 32.0 +712 52 training.batch_size 2.0 +712 53 model.embedding_dim 0.0 +712 53 model.scoring_fct_norm 2.0 +712 53 loss.margin 21.32219197315536 +712 53 loss.adversarial_temperature 0.8176376681949507 +712 53 optimizer.lr 0.05047821345092288 +712 53 negative_sampler.num_negs_per_pos 71.0 +712 53 training.batch_size 0.0 +712 54 model.embedding_dim 2.0 +712 54 model.scoring_fct_norm 1.0 +712 54 loss.margin 9.677112094178648 +712 54 loss.adversarial_temperature 0.2900017994522216 +712 54 optimizer.lr 0.028107816591411424 +712 54 negative_sampler.num_negs_per_pos 65.0 +712 54 training.batch_size 2.0 +712 55 model.embedding_dim 0.0 +712 55 model.scoring_fct_norm 1.0 +712 55 loss.margin 19.914731913699953 +712 55 loss.adversarial_temperature 0.9345781650189604 +712 55 optimizer.lr 0.09141219568309239 +712 55 negative_sampler.num_negs_per_pos 0.0 +712 55 training.batch_size 1.0 +712 56 model.embedding_dim 1.0 +712 56 model.scoring_fct_norm 2.0 +712 56 loss.margin 28.805778135285667 +712 56 loss.adversarial_temperature 0.703754174319877 +712 56 optimizer.lr 0.041748896352783546 +712 56 negative_sampler.num_negs_per_pos 14.0 +712 56 training.batch_size 0.0 +712 57 model.embedding_dim 2.0 +712 57 model.scoring_fct_norm 1.0 +712 57 loss.margin 11.87252994088988 +712 57 loss.adversarial_temperature 0.7322538251240355 +712 57 optimizer.lr 0.04039139928213812 +712 57 negative_sampler.num_negs_per_pos 69.0 +712 57 training.batch_size 2.0 +712 58 model.embedding_dim 1.0 +712 58 model.scoring_fct_norm 2.0 +712 58 loss.margin 21.199807009572318 +712 58 loss.adversarial_temperature 0.6084025521650648 +712 58 optimizer.lr 0.0864872825911182 +712 58 negative_sampler.num_negs_per_pos 35.0 +712 58 training.batch_size 0.0 +712 59 model.embedding_dim 0.0 +712 59 model.scoring_fct_norm 2.0 +712 59 loss.margin 11.110748695359714 +712 59 loss.adversarial_temperature 0.8349763130999189 +712 59 optimizer.lr 0.0029581451553687896 +712 59 negative_sampler.num_negs_per_pos 61.0 +712 59 training.batch_size 1.0 +712 60 model.embedding_dim 1.0 +712 60 model.scoring_fct_norm 1.0 +712 60 loss.margin 22.975935601397854 +712 60 loss.adversarial_temperature 0.23529697355315027 +712 60 optimizer.lr 0.0022003538697436317 +712 60 negative_sampler.num_negs_per_pos 93.0 +712 60 training.batch_size 2.0 +712 61 model.embedding_dim 2.0 +712 61 model.scoring_fct_norm 2.0 +712 61 loss.margin 9.530266524315666 +712 61 loss.adversarial_temperature 0.5011071384752506 +712 61 optimizer.lr 0.007924014960405963 +712 61 negative_sampler.num_negs_per_pos 51.0 +712 61 training.batch_size 1.0 +712 62 model.embedding_dim 0.0 +712 62 model.scoring_fct_norm 1.0 +712 62 loss.margin 20.4161046336696 +712 62 loss.adversarial_temperature 0.3658441167975147 +712 62 optimizer.lr 0.01521424488046426 +712 62 negative_sampler.num_negs_per_pos 76.0 +712 62 training.batch_size 0.0 +712 63 model.embedding_dim 2.0 +712 63 model.scoring_fct_norm 1.0 +712 63 loss.margin 15.50178745030418 +712 63 loss.adversarial_temperature 0.12479198671708988 +712 63 optimizer.lr 0.01414710862477659 +712 63 negative_sampler.num_negs_per_pos 31.0 +712 63 training.batch_size 2.0 +712 64 model.embedding_dim 0.0 +712 64 model.scoring_fct_norm 2.0 +712 64 loss.margin 18.698416123609295 +712 64 loss.adversarial_temperature 0.5928857041731731 +712 64 optimizer.lr 0.004578725123275828 +712 64 negative_sampler.num_negs_per_pos 19.0 +712 64 training.batch_size 2.0 +712 65 model.embedding_dim 2.0 +712 65 model.scoring_fct_norm 2.0 +712 65 loss.margin 21.638577808568094 +712 65 loss.adversarial_temperature 0.14332335474850053 +712 65 optimizer.lr 0.02303458198546587 +712 65 negative_sampler.num_negs_per_pos 56.0 +712 65 training.batch_size 0.0 +712 66 model.embedding_dim 0.0 +712 66 model.scoring_fct_norm 1.0 +712 66 loss.margin 28.400229692102265 +712 66 loss.adversarial_temperature 0.699908139715599 +712 66 optimizer.lr 0.006585772087002458 +712 66 negative_sampler.num_negs_per_pos 43.0 +712 66 training.batch_size 0.0 +712 67 model.embedding_dim 0.0 +712 67 model.scoring_fct_norm 2.0 +712 67 loss.margin 29.57863901657724 +712 67 loss.adversarial_temperature 0.38939687718466187 +712 67 optimizer.lr 0.0010612467779219372 +712 67 negative_sampler.num_negs_per_pos 90.0 +712 67 training.batch_size 2.0 +712 68 model.embedding_dim 2.0 +712 68 model.scoring_fct_norm 2.0 +712 68 loss.margin 8.198195951185621 +712 68 loss.adversarial_temperature 0.1542893678460229 +712 68 optimizer.lr 0.008957380576416788 +712 68 negative_sampler.num_negs_per_pos 82.0 +712 68 training.batch_size 0.0 +712 69 model.embedding_dim 1.0 +712 69 model.scoring_fct_norm 1.0 +712 69 loss.margin 25.686255700598462 +712 69 loss.adversarial_temperature 0.6934115575713866 +712 69 optimizer.lr 0.02182490302809427 +712 69 negative_sampler.num_negs_per_pos 10.0 +712 69 training.batch_size 1.0 +712 70 model.embedding_dim 2.0 +712 70 model.scoring_fct_norm 2.0 +712 70 loss.margin 4.790343596318229 +712 70 loss.adversarial_temperature 0.8500874306006153 +712 70 optimizer.lr 0.0023182758569370034 +712 70 negative_sampler.num_negs_per_pos 9.0 +712 70 training.batch_size 1.0 +712 71 model.embedding_dim 0.0 +712 71 model.scoring_fct_norm 1.0 +712 71 loss.margin 11.747008067725336 +712 71 loss.adversarial_temperature 0.10251541954896566 +712 71 optimizer.lr 0.037796152320897634 +712 71 negative_sampler.num_negs_per_pos 1.0 +712 71 training.batch_size 2.0 +712 72 model.embedding_dim 2.0 +712 72 model.scoring_fct_norm 2.0 +712 72 loss.margin 27.188189137046972 +712 72 loss.adversarial_temperature 0.7084616515011772 +712 72 optimizer.lr 0.013083644459813353 +712 72 negative_sampler.num_negs_per_pos 26.0 +712 72 training.batch_size 2.0 +712 73 model.embedding_dim 1.0 +712 73 model.scoring_fct_norm 1.0 +712 73 loss.margin 16.16135670759479 +712 73 loss.adversarial_temperature 0.5018037373555526 +712 73 optimizer.lr 0.00105084801497152 +712 73 negative_sampler.num_negs_per_pos 56.0 +712 73 training.batch_size 1.0 +712 74 model.embedding_dim 0.0 +712 74 model.scoring_fct_norm 2.0 +712 74 loss.margin 15.64588066375841 +712 74 loss.adversarial_temperature 0.5342950398997691 +712 74 optimizer.lr 0.027570359305251552 +712 74 negative_sampler.num_negs_per_pos 45.0 +712 74 training.batch_size 2.0 +712 75 model.embedding_dim 1.0 +712 75 model.scoring_fct_norm 1.0 +712 75 loss.margin 6.999282438422002 +712 75 loss.adversarial_temperature 0.4877914130673586 +712 75 optimizer.lr 0.0710410354237786 +712 75 negative_sampler.num_negs_per_pos 50.0 +712 75 training.batch_size 2.0 +712 76 model.embedding_dim 0.0 +712 76 model.scoring_fct_norm 1.0 +712 76 loss.margin 1.5291933393277675 +712 76 loss.adversarial_temperature 0.8202235748756573 +712 76 optimizer.lr 0.010072329058917277 +712 76 negative_sampler.num_negs_per_pos 89.0 +712 76 training.batch_size 1.0 +712 77 model.embedding_dim 2.0 +712 77 model.scoring_fct_norm 1.0 +712 77 loss.margin 13.859238812615377 +712 77 loss.adversarial_temperature 0.1978922399005001 +712 77 optimizer.lr 0.06272275371278772 +712 77 negative_sampler.num_negs_per_pos 34.0 +712 77 training.batch_size 0.0 +712 78 model.embedding_dim 2.0 +712 78 model.scoring_fct_norm 2.0 +712 78 loss.margin 2.18690600778426 +712 78 loss.adversarial_temperature 0.8627072586864096 +712 78 optimizer.lr 0.0013042389171771128 +712 78 negative_sampler.num_negs_per_pos 83.0 +712 78 training.batch_size 1.0 +712 79 model.embedding_dim 0.0 +712 79 model.scoring_fct_norm 2.0 +712 79 loss.margin 9.531237242547087 +712 79 loss.adversarial_temperature 0.2320516610435603 +712 79 optimizer.lr 0.010106119993218825 +712 79 negative_sampler.num_negs_per_pos 23.0 +712 79 training.batch_size 0.0 +712 80 model.embedding_dim 2.0 +712 80 model.scoring_fct_norm 2.0 +712 80 loss.margin 19.068464808108313 +712 80 loss.adversarial_temperature 0.1791038927025551 +712 80 optimizer.lr 0.0012405214469456652 +712 80 negative_sampler.num_negs_per_pos 38.0 +712 80 training.batch_size 1.0 +712 81 model.embedding_dim 2.0 +712 81 model.scoring_fct_norm 2.0 +712 81 loss.margin 18.77698163750544 +712 81 loss.adversarial_temperature 0.10152538800208406 +712 81 optimizer.lr 0.010541056270616902 +712 81 negative_sampler.num_negs_per_pos 73.0 +712 81 training.batch_size 2.0 +712 82 model.embedding_dim 1.0 +712 82 model.scoring_fct_norm 1.0 +712 82 loss.margin 26.79827635828259 +712 82 loss.adversarial_temperature 0.8526914458472891 +712 82 optimizer.lr 0.01660468652253045 +712 82 negative_sampler.num_negs_per_pos 75.0 +712 82 training.batch_size 2.0 +712 83 model.embedding_dim 0.0 +712 83 model.scoring_fct_norm 2.0 +712 83 loss.margin 21.92534437147009 +712 83 loss.adversarial_temperature 0.4995719199410137 +712 83 optimizer.lr 0.010409855926712295 +712 83 negative_sampler.num_negs_per_pos 16.0 +712 83 training.batch_size 2.0 +712 84 model.embedding_dim 1.0 +712 84 model.scoring_fct_norm 2.0 +712 84 loss.margin 10.897894245573259 +712 84 loss.adversarial_temperature 0.6708386326434154 +712 84 optimizer.lr 0.004694616112012181 +712 84 negative_sampler.num_negs_per_pos 26.0 +712 84 training.batch_size 1.0 +712 85 model.embedding_dim 2.0 +712 85 model.scoring_fct_norm 2.0 +712 85 loss.margin 1.0723732316683174 +712 85 loss.adversarial_temperature 0.13620739405406537 +712 85 optimizer.lr 0.004052578400641766 +712 85 negative_sampler.num_negs_per_pos 21.0 +712 85 training.batch_size 0.0 +712 86 model.embedding_dim 0.0 +712 86 model.scoring_fct_norm 2.0 +712 86 loss.margin 27.811855351041448 +712 86 loss.adversarial_temperature 0.21020526919566201 +712 86 optimizer.lr 0.0011469910669193097 +712 86 negative_sampler.num_negs_per_pos 38.0 +712 86 training.batch_size 1.0 +712 87 model.embedding_dim 2.0 +712 87 model.scoring_fct_norm 2.0 +712 87 loss.margin 1.6573837349972274 +712 87 loss.adversarial_temperature 0.1906421396294464 +712 87 optimizer.lr 0.01481223015414138 +712 87 negative_sampler.num_negs_per_pos 42.0 +712 87 training.batch_size 0.0 +712 88 model.embedding_dim 2.0 +712 88 model.scoring_fct_norm 1.0 +712 88 loss.margin 29.36442558403525 +712 88 loss.adversarial_temperature 0.48884113019974273 +712 88 optimizer.lr 0.002234152384751349 +712 88 negative_sampler.num_negs_per_pos 37.0 +712 88 training.batch_size 1.0 +712 89 model.embedding_dim 1.0 +712 89 model.scoring_fct_norm 1.0 +712 89 loss.margin 9.106985087010035 +712 89 loss.adversarial_temperature 0.3446978696807904 +712 89 optimizer.lr 0.0038142308816478355 +712 89 negative_sampler.num_negs_per_pos 16.0 +712 89 training.batch_size 1.0 +712 90 model.embedding_dim 2.0 +712 90 model.scoring_fct_norm 2.0 +712 90 loss.margin 15.622502615349704 +712 90 loss.adversarial_temperature 0.48840740160215357 +712 90 optimizer.lr 0.0015816512035939235 +712 90 negative_sampler.num_negs_per_pos 47.0 +712 90 training.batch_size 2.0 +712 91 model.embedding_dim 1.0 +712 91 model.scoring_fct_norm 1.0 +712 91 loss.margin 5.680852299380523 +712 91 loss.adversarial_temperature 0.11089926515336293 +712 91 optimizer.lr 0.0010615140464022583 +712 91 negative_sampler.num_negs_per_pos 22.0 +712 91 training.batch_size 2.0 +712 92 model.embedding_dim 1.0 +712 92 model.scoring_fct_norm 1.0 +712 92 loss.margin 15.020339135468221 +712 92 loss.adversarial_temperature 0.4144431981812683 +712 92 optimizer.lr 0.08249954777673406 +712 92 negative_sampler.num_negs_per_pos 57.0 +712 92 training.batch_size 2.0 +712 93 model.embedding_dim 1.0 +712 93 model.scoring_fct_norm 1.0 +712 93 loss.margin 26.174360209872575 +712 93 loss.adversarial_temperature 0.2563726648377057 +712 93 optimizer.lr 0.003794575235121439 +712 93 negative_sampler.num_negs_per_pos 27.0 +712 93 training.batch_size 1.0 +712 94 model.embedding_dim 0.0 +712 94 model.scoring_fct_norm 1.0 +712 94 loss.margin 3.8691073967221383 +712 94 loss.adversarial_temperature 0.23881843970823907 +712 94 optimizer.lr 0.0044812815588062965 +712 94 negative_sampler.num_negs_per_pos 7.0 +712 94 training.batch_size 0.0 +712 95 model.embedding_dim 2.0 +712 95 model.scoring_fct_norm 2.0 +712 95 loss.margin 16.052228239498454 +712 95 loss.adversarial_temperature 0.7046295448300823 +712 95 optimizer.lr 0.0032383233560694643 +712 95 negative_sampler.num_negs_per_pos 67.0 +712 95 training.batch_size 2.0 +712 96 model.embedding_dim 2.0 +712 96 model.scoring_fct_norm 2.0 +712 96 loss.margin 15.05338354892141 +712 96 loss.adversarial_temperature 0.9451955623715771 +712 96 optimizer.lr 0.0036039553625699873 +712 96 negative_sampler.num_negs_per_pos 41.0 +712 96 training.batch_size 1.0 +712 97 model.embedding_dim 0.0 +712 97 model.scoring_fct_norm 2.0 +712 97 loss.margin 22.026855108155384 +712 97 loss.adversarial_temperature 0.41345663754568407 +712 97 optimizer.lr 0.0014836876507382131 +712 97 negative_sampler.num_negs_per_pos 85.0 +712 97 training.batch_size 1.0 +712 98 model.embedding_dim 2.0 +712 98 model.scoring_fct_norm 2.0 +712 98 loss.margin 14.62400410677461 +712 98 loss.adversarial_temperature 0.6365766479361349 +712 98 optimizer.lr 0.001051445121460907 +712 98 negative_sampler.num_negs_per_pos 76.0 +712 98 training.batch_size 2.0 +712 99 model.embedding_dim 2.0 +712 99 model.scoring_fct_norm 2.0 +712 99 loss.margin 2.7430908225244965 +712 99 loss.adversarial_temperature 0.5676218930089887 +712 99 optimizer.lr 0.041078514649769304 +712 99 negative_sampler.num_negs_per_pos 86.0 +712 99 training.batch_size 2.0 +712 100 model.embedding_dim 2.0 +712 100 model.scoring_fct_norm 1.0 +712 100 loss.margin 10.301516028676986 +712 100 loss.adversarial_temperature 0.6909096303926437 +712 100 optimizer.lr 0.002141765946909714 +712 100 negative_sampler.num_negs_per_pos 90.0 +712 100 training.batch_size 0.0 +712 1 dataset """kinships""" +712 1 model """structuredembedding""" +712 1 loss """nssa""" +712 1 regularizer """no""" +712 1 optimizer """adam""" +712 1 training_loop """owa""" +712 1 negative_sampler """basic""" +712 1 evaluator """rankbased""" +712 2 dataset """kinships""" +712 2 model """structuredembedding""" +712 2 loss """nssa""" +712 2 regularizer """no""" +712 2 optimizer """adam""" +712 2 training_loop """owa""" +712 2 negative_sampler """basic""" +712 2 evaluator """rankbased""" +712 3 dataset """kinships""" +712 3 model """structuredembedding""" +712 3 loss """nssa""" +712 3 regularizer """no""" +712 3 optimizer """adam""" +712 3 training_loop """owa""" +712 3 negative_sampler """basic""" +712 3 evaluator """rankbased""" +712 4 dataset """kinships""" +712 4 model """structuredembedding""" +712 4 loss """nssa""" +712 4 regularizer """no""" +712 4 optimizer """adam""" +712 4 training_loop """owa""" +712 4 negative_sampler """basic""" +712 4 evaluator """rankbased""" +712 5 dataset """kinships""" +712 5 model """structuredembedding""" +712 5 loss """nssa""" +712 5 regularizer """no""" +712 5 optimizer """adam""" +712 5 training_loop """owa""" +712 5 negative_sampler """basic""" +712 5 evaluator """rankbased""" +712 6 dataset """kinships""" +712 6 model """structuredembedding""" +712 6 loss """nssa""" +712 6 regularizer """no""" +712 6 optimizer """adam""" +712 6 training_loop """owa""" +712 6 negative_sampler """basic""" +712 6 evaluator """rankbased""" +712 7 dataset """kinships""" +712 7 model """structuredembedding""" +712 7 loss """nssa""" +712 7 regularizer """no""" +712 7 optimizer """adam""" +712 7 training_loop """owa""" +712 7 negative_sampler """basic""" +712 7 evaluator """rankbased""" +712 8 dataset """kinships""" +712 8 model """structuredembedding""" +712 8 loss """nssa""" +712 8 regularizer """no""" +712 8 optimizer """adam""" +712 8 training_loop """owa""" +712 8 negative_sampler """basic""" +712 8 evaluator """rankbased""" +712 9 dataset """kinships""" +712 9 model """structuredembedding""" +712 9 loss """nssa""" +712 9 regularizer """no""" +712 9 optimizer """adam""" +712 9 training_loop """owa""" +712 9 negative_sampler """basic""" +712 9 evaluator """rankbased""" +712 10 dataset """kinships""" +712 10 model """structuredembedding""" +712 10 loss """nssa""" +712 10 regularizer """no""" +712 10 optimizer """adam""" +712 10 training_loop """owa""" +712 10 negative_sampler """basic""" +712 10 evaluator """rankbased""" +712 11 dataset """kinships""" +712 11 model """structuredembedding""" +712 11 loss """nssa""" +712 11 regularizer """no""" +712 11 optimizer """adam""" +712 11 training_loop """owa""" +712 11 negative_sampler """basic""" +712 11 evaluator """rankbased""" +712 12 dataset """kinships""" +712 12 model """structuredembedding""" +712 12 loss """nssa""" +712 12 regularizer """no""" +712 12 optimizer """adam""" +712 12 training_loop """owa""" +712 12 negative_sampler """basic""" +712 12 evaluator """rankbased""" +712 13 dataset """kinships""" +712 13 model """structuredembedding""" +712 13 loss """nssa""" +712 13 regularizer """no""" +712 13 optimizer """adam""" +712 13 training_loop """owa""" +712 13 negative_sampler """basic""" +712 13 evaluator """rankbased""" +712 14 dataset """kinships""" +712 14 model """structuredembedding""" +712 14 loss """nssa""" +712 14 regularizer """no""" +712 14 optimizer """adam""" +712 14 training_loop """owa""" +712 14 negative_sampler """basic""" +712 14 evaluator """rankbased""" +712 15 dataset """kinships""" +712 15 model """structuredembedding""" +712 15 loss """nssa""" +712 15 regularizer """no""" +712 15 optimizer """adam""" +712 15 training_loop """owa""" +712 15 negative_sampler """basic""" +712 15 evaluator """rankbased""" +712 16 dataset """kinships""" +712 16 model """structuredembedding""" +712 16 loss """nssa""" +712 16 regularizer """no""" +712 16 optimizer """adam""" +712 16 training_loop """owa""" +712 16 negative_sampler """basic""" +712 16 evaluator """rankbased""" +712 17 dataset """kinships""" +712 17 model """structuredembedding""" +712 17 loss """nssa""" +712 17 regularizer """no""" +712 17 optimizer """adam""" +712 17 training_loop """owa""" +712 17 negative_sampler """basic""" +712 17 evaluator """rankbased""" +712 18 dataset """kinships""" +712 18 model """structuredembedding""" +712 18 loss """nssa""" +712 18 regularizer """no""" +712 18 optimizer """adam""" +712 18 training_loop """owa""" +712 18 negative_sampler """basic""" +712 18 evaluator """rankbased""" +712 19 dataset """kinships""" +712 19 model """structuredembedding""" +712 19 loss """nssa""" +712 19 regularizer """no""" +712 19 optimizer """adam""" +712 19 training_loop """owa""" +712 19 negative_sampler """basic""" +712 19 evaluator """rankbased""" +712 20 dataset """kinships""" +712 20 model """structuredembedding""" +712 20 loss """nssa""" +712 20 regularizer """no""" +712 20 optimizer """adam""" +712 20 training_loop """owa""" +712 20 negative_sampler """basic""" +712 20 evaluator """rankbased""" +712 21 dataset """kinships""" +712 21 model """structuredembedding""" +712 21 loss """nssa""" +712 21 regularizer """no""" +712 21 optimizer """adam""" +712 21 training_loop """owa""" +712 21 negative_sampler """basic""" +712 21 evaluator """rankbased""" +712 22 dataset """kinships""" +712 22 model """structuredembedding""" +712 22 loss """nssa""" +712 22 regularizer """no""" +712 22 optimizer """adam""" +712 22 training_loop """owa""" +712 22 negative_sampler """basic""" +712 22 evaluator """rankbased""" +712 23 dataset """kinships""" +712 23 model """structuredembedding""" +712 23 loss """nssa""" +712 23 regularizer """no""" +712 23 optimizer """adam""" +712 23 training_loop """owa""" +712 23 negative_sampler """basic""" +712 23 evaluator """rankbased""" +712 24 dataset """kinships""" +712 24 model """structuredembedding""" +712 24 loss """nssa""" +712 24 regularizer """no""" +712 24 optimizer """adam""" +712 24 training_loop """owa""" +712 24 negative_sampler """basic""" +712 24 evaluator """rankbased""" +712 25 dataset """kinships""" +712 25 model """structuredembedding""" +712 25 loss """nssa""" +712 25 regularizer """no""" +712 25 optimizer """adam""" +712 25 training_loop """owa""" +712 25 negative_sampler """basic""" +712 25 evaluator """rankbased""" +712 26 dataset """kinships""" +712 26 model """structuredembedding""" +712 26 loss """nssa""" +712 26 regularizer """no""" +712 26 optimizer """adam""" +712 26 training_loop """owa""" +712 26 negative_sampler """basic""" +712 26 evaluator """rankbased""" +712 27 dataset """kinships""" +712 27 model """structuredembedding""" +712 27 loss """nssa""" +712 27 regularizer """no""" +712 27 optimizer """adam""" +712 27 training_loop """owa""" +712 27 negative_sampler """basic""" +712 27 evaluator """rankbased""" +712 28 dataset """kinships""" +712 28 model """structuredembedding""" +712 28 loss """nssa""" +712 28 regularizer """no""" +712 28 optimizer """adam""" +712 28 training_loop """owa""" +712 28 negative_sampler """basic""" +712 28 evaluator """rankbased""" +712 29 dataset """kinships""" +712 29 model """structuredembedding""" +712 29 loss """nssa""" +712 29 regularizer """no""" +712 29 optimizer """adam""" +712 29 training_loop """owa""" +712 29 negative_sampler """basic""" +712 29 evaluator """rankbased""" +712 30 dataset """kinships""" +712 30 model """structuredembedding""" +712 30 loss """nssa""" +712 30 regularizer """no""" +712 30 optimizer """adam""" +712 30 training_loop """owa""" +712 30 negative_sampler """basic""" +712 30 evaluator """rankbased""" +712 31 dataset """kinships""" +712 31 model """structuredembedding""" +712 31 loss """nssa""" +712 31 regularizer """no""" +712 31 optimizer """adam""" +712 31 training_loop """owa""" +712 31 negative_sampler """basic""" +712 31 evaluator """rankbased""" +712 32 dataset """kinships""" +712 32 model """structuredembedding""" +712 32 loss """nssa""" +712 32 regularizer """no""" +712 32 optimizer """adam""" +712 32 training_loop """owa""" +712 32 negative_sampler """basic""" +712 32 evaluator """rankbased""" +712 33 dataset """kinships""" +712 33 model """structuredembedding""" +712 33 loss """nssa""" +712 33 regularizer """no""" +712 33 optimizer """adam""" +712 33 training_loop """owa""" +712 33 negative_sampler """basic""" +712 33 evaluator """rankbased""" +712 34 dataset """kinships""" +712 34 model """structuredembedding""" +712 34 loss """nssa""" +712 34 regularizer """no""" +712 34 optimizer """adam""" +712 34 training_loop """owa""" +712 34 negative_sampler """basic""" +712 34 evaluator """rankbased""" +712 35 dataset """kinships""" +712 35 model """structuredembedding""" +712 35 loss """nssa""" +712 35 regularizer """no""" +712 35 optimizer """adam""" +712 35 training_loop """owa""" +712 35 negative_sampler """basic""" +712 35 evaluator """rankbased""" +712 36 dataset """kinships""" +712 36 model """structuredembedding""" +712 36 loss """nssa""" +712 36 regularizer """no""" +712 36 optimizer """adam""" +712 36 training_loop """owa""" +712 36 negative_sampler """basic""" +712 36 evaluator """rankbased""" +712 37 dataset """kinships""" +712 37 model """structuredembedding""" +712 37 loss """nssa""" +712 37 regularizer """no""" +712 37 optimizer """adam""" +712 37 training_loop """owa""" +712 37 negative_sampler """basic""" +712 37 evaluator """rankbased""" +712 38 dataset """kinships""" +712 38 model """structuredembedding""" +712 38 loss """nssa""" +712 38 regularizer """no""" +712 38 optimizer """adam""" +712 38 training_loop """owa""" +712 38 negative_sampler """basic""" +712 38 evaluator """rankbased""" +712 39 dataset """kinships""" +712 39 model """structuredembedding""" +712 39 loss """nssa""" +712 39 regularizer """no""" +712 39 optimizer """adam""" +712 39 training_loop """owa""" +712 39 negative_sampler """basic""" +712 39 evaluator """rankbased""" +712 40 dataset """kinships""" +712 40 model """structuredembedding""" +712 40 loss """nssa""" +712 40 regularizer """no""" +712 40 optimizer """adam""" +712 40 training_loop """owa""" +712 40 negative_sampler """basic""" +712 40 evaluator """rankbased""" +712 41 dataset """kinships""" +712 41 model """structuredembedding""" +712 41 loss """nssa""" +712 41 regularizer """no""" +712 41 optimizer """adam""" +712 41 training_loop """owa""" +712 41 negative_sampler """basic""" +712 41 evaluator """rankbased""" +712 42 dataset """kinships""" +712 42 model """structuredembedding""" +712 42 loss """nssa""" +712 42 regularizer """no""" +712 42 optimizer """adam""" +712 42 training_loop """owa""" +712 42 negative_sampler """basic""" +712 42 evaluator """rankbased""" +712 43 dataset """kinships""" +712 43 model """structuredembedding""" +712 43 loss """nssa""" +712 43 regularizer """no""" +712 43 optimizer """adam""" +712 43 training_loop """owa""" +712 43 negative_sampler """basic""" +712 43 evaluator """rankbased""" +712 44 dataset """kinships""" +712 44 model """structuredembedding""" +712 44 loss """nssa""" +712 44 regularizer """no""" +712 44 optimizer """adam""" +712 44 training_loop """owa""" +712 44 negative_sampler """basic""" +712 44 evaluator """rankbased""" +712 45 dataset """kinships""" +712 45 model """structuredembedding""" +712 45 loss """nssa""" +712 45 regularizer """no""" +712 45 optimizer """adam""" +712 45 training_loop """owa""" +712 45 negative_sampler """basic""" +712 45 evaluator """rankbased""" +712 46 dataset """kinships""" +712 46 model """structuredembedding""" +712 46 loss """nssa""" +712 46 regularizer """no""" +712 46 optimizer """adam""" +712 46 training_loop """owa""" +712 46 negative_sampler """basic""" +712 46 evaluator """rankbased""" +712 47 dataset """kinships""" +712 47 model """structuredembedding""" +712 47 loss """nssa""" +712 47 regularizer """no""" +712 47 optimizer """adam""" +712 47 training_loop """owa""" +712 47 negative_sampler """basic""" +712 47 evaluator """rankbased""" +712 48 dataset """kinships""" +712 48 model """structuredembedding""" +712 48 loss """nssa""" +712 48 regularizer """no""" +712 48 optimizer """adam""" +712 48 training_loop """owa""" +712 48 negative_sampler """basic""" +712 48 evaluator """rankbased""" +712 49 dataset """kinships""" +712 49 model """structuredembedding""" +712 49 loss """nssa""" +712 49 regularizer """no""" +712 49 optimizer """adam""" +712 49 training_loop """owa""" +712 49 negative_sampler """basic""" +712 49 evaluator """rankbased""" +712 50 dataset """kinships""" +712 50 model """structuredembedding""" +712 50 loss """nssa""" +712 50 regularizer """no""" +712 50 optimizer """adam""" +712 50 training_loop """owa""" +712 50 negative_sampler """basic""" +712 50 evaluator """rankbased""" +712 51 dataset """kinships""" +712 51 model """structuredembedding""" +712 51 loss """nssa""" +712 51 regularizer """no""" +712 51 optimizer """adam""" +712 51 training_loop """owa""" +712 51 negative_sampler """basic""" +712 51 evaluator """rankbased""" +712 52 dataset """kinships""" +712 52 model """structuredembedding""" +712 52 loss """nssa""" +712 52 regularizer """no""" +712 52 optimizer """adam""" +712 52 training_loop """owa""" +712 52 negative_sampler """basic""" +712 52 evaluator """rankbased""" +712 53 dataset """kinships""" +712 53 model """structuredembedding""" +712 53 loss """nssa""" +712 53 regularizer """no""" +712 53 optimizer """adam""" +712 53 training_loop """owa""" +712 53 negative_sampler """basic""" +712 53 evaluator """rankbased""" +712 54 dataset """kinships""" +712 54 model """structuredembedding""" +712 54 loss """nssa""" +712 54 regularizer """no""" +712 54 optimizer """adam""" +712 54 training_loop """owa""" +712 54 negative_sampler """basic""" +712 54 evaluator """rankbased""" +712 55 dataset """kinships""" +712 55 model """structuredembedding""" +712 55 loss """nssa""" +712 55 regularizer """no""" +712 55 optimizer """adam""" +712 55 training_loop """owa""" +712 55 negative_sampler """basic""" +712 55 evaluator """rankbased""" +712 56 dataset """kinships""" +712 56 model """structuredembedding""" +712 56 loss """nssa""" +712 56 regularizer """no""" +712 56 optimizer """adam""" +712 56 training_loop """owa""" +712 56 negative_sampler """basic""" +712 56 evaluator """rankbased""" +712 57 dataset """kinships""" +712 57 model """structuredembedding""" +712 57 loss """nssa""" +712 57 regularizer """no""" +712 57 optimizer """adam""" +712 57 training_loop """owa""" +712 57 negative_sampler """basic""" +712 57 evaluator """rankbased""" +712 58 dataset """kinships""" +712 58 model """structuredembedding""" +712 58 loss """nssa""" +712 58 regularizer """no""" +712 58 optimizer """adam""" +712 58 training_loop """owa""" +712 58 negative_sampler """basic""" +712 58 evaluator """rankbased""" +712 59 dataset """kinships""" +712 59 model """structuredembedding""" +712 59 loss """nssa""" +712 59 regularizer """no""" +712 59 optimizer """adam""" +712 59 training_loop """owa""" +712 59 negative_sampler """basic""" +712 59 evaluator """rankbased""" +712 60 dataset """kinships""" +712 60 model """structuredembedding""" +712 60 loss """nssa""" +712 60 regularizer """no""" +712 60 optimizer """adam""" +712 60 training_loop """owa""" +712 60 negative_sampler """basic""" +712 60 evaluator """rankbased""" +712 61 dataset """kinships""" +712 61 model """structuredembedding""" +712 61 loss """nssa""" +712 61 regularizer """no""" +712 61 optimizer """adam""" +712 61 training_loop """owa""" +712 61 negative_sampler """basic""" +712 61 evaluator """rankbased""" +712 62 dataset """kinships""" +712 62 model """structuredembedding""" +712 62 loss """nssa""" +712 62 regularizer """no""" +712 62 optimizer """adam""" +712 62 training_loop """owa""" +712 62 negative_sampler """basic""" +712 62 evaluator """rankbased""" +712 63 dataset """kinships""" +712 63 model """structuredembedding""" +712 63 loss """nssa""" +712 63 regularizer """no""" +712 63 optimizer """adam""" +712 63 training_loop """owa""" +712 63 negative_sampler """basic""" +712 63 evaluator """rankbased""" +712 64 dataset """kinships""" +712 64 model """structuredembedding""" +712 64 loss """nssa""" +712 64 regularizer """no""" +712 64 optimizer """adam""" +712 64 training_loop """owa""" +712 64 negative_sampler """basic""" +712 64 evaluator """rankbased""" +712 65 dataset """kinships""" +712 65 model """structuredembedding""" +712 65 loss """nssa""" +712 65 regularizer """no""" +712 65 optimizer """adam""" +712 65 training_loop """owa""" +712 65 negative_sampler """basic""" +712 65 evaluator """rankbased""" +712 66 dataset """kinships""" +712 66 model """structuredembedding""" +712 66 loss """nssa""" +712 66 regularizer """no""" +712 66 optimizer """adam""" +712 66 training_loop """owa""" +712 66 negative_sampler """basic""" +712 66 evaluator """rankbased""" +712 67 dataset """kinships""" +712 67 model """structuredembedding""" +712 67 loss """nssa""" +712 67 regularizer """no""" +712 67 optimizer """adam""" +712 67 training_loop """owa""" +712 67 negative_sampler """basic""" +712 67 evaluator """rankbased""" +712 68 dataset """kinships""" +712 68 model """structuredembedding""" +712 68 loss """nssa""" +712 68 regularizer """no""" +712 68 optimizer """adam""" +712 68 training_loop """owa""" +712 68 negative_sampler """basic""" +712 68 evaluator """rankbased""" +712 69 dataset """kinships""" +712 69 model """structuredembedding""" +712 69 loss """nssa""" +712 69 regularizer """no""" +712 69 optimizer """adam""" +712 69 training_loop """owa""" +712 69 negative_sampler """basic""" +712 69 evaluator """rankbased""" +712 70 dataset """kinships""" +712 70 model """structuredembedding""" +712 70 loss """nssa""" +712 70 regularizer """no""" +712 70 optimizer """adam""" +712 70 training_loop """owa""" +712 70 negative_sampler """basic""" +712 70 evaluator """rankbased""" +712 71 dataset """kinships""" +712 71 model """structuredembedding""" +712 71 loss """nssa""" +712 71 regularizer """no""" +712 71 optimizer """adam""" +712 71 training_loop """owa""" +712 71 negative_sampler """basic""" +712 71 evaluator """rankbased""" +712 72 dataset """kinships""" +712 72 model """structuredembedding""" +712 72 loss """nssa""" +712 72 regularizer """no""" +712 72 optimizer """adam""" +712 72 training_loop """owa""" +712 72 negative_sampler """basic""" +712 72 evaluator """rankbased""" +712 73 dataset """kinships""" +712 73 model """structuredembedding""" +712 73 loss """nssa""" +712 73 regularizer """no""" +712 73 optimizer """adam""" +712 73 training_loop """owa""" +712 73 negative_sampler """basic""" +712 73 evaluator """rankbased""" +712 74 dataset """kinships""" +712 74 model """structuredembedding""" +712 74 loss """nssa""" +712 74 regularizer """no""" +712 74 optimizer """adam""" +712 74 training_loop """owa""" +712 74 negative_sampler """basic""" +712 74 evaluator """rankbased""" +712 75 dataset """kinships""" +712 75 model """structuredembedding""" +712 75 loss """nssa""" +712 75 regularizer """no""" +712 75 optimizer """adam""" +712 75 training_loop """owa""" +712 75 negative_sampler """basic""" +712 75 evaluator """rankbased""" +712 76 dataset """kinships""" +712 76 model """structuredembedding""" +712 76 loss """nssa""" +712 76 regularizer """no""" +712 76 optimizer """adam""" +712 76 training_loop """owa""" +712 76 negative_sampler """basic""" +712 76 evaluator """rankbased""" +712 77 dataset """kinships""" +712 77 model """structuredembedding""" +712 77 loss """nssa""" +712 77 regularizer """no""" +712 77 optimizer """adam""" +712 77 training_loop """owa""" +712 77 negative_sampler """basic""" +712 77 evaluator """rankbased""" +712 78 dataset """kinships""" +712 78 model """structuredembedding""" +712 78 loss """nssa""" +712 78 regularizer """no""" +712 78 optimizer """adam""" +712 78 training_loop """owa""" +712 78 negative_sampler """basic""" +712 78 evaluator """rankbased""" +712 79 dataset """kinships""" +712 79 model """structuredembedding""" +712 79 loss """nssa""" +712 79 regularizer """no""" +712 79 optimizer """adam""" +712 79 training_loop """owa""" +712 79 negative_sampler """basic""" +712 79 evaluator """rankbased""" +712 80 dataset """kinships""" +712 80 model """structuredembedding""" +712 80 loss """nssa""" +712 80 regularizer """no""" +712 80 optimizer """adam""" +712 80 training_loop """owa""" +712 80 negative_sampler """basic""" +712 80 evaluator """rankbased""" +712 81 dataset """kinships""" +712 81 model """structuredembedding""" +712 81 loss """nssa""" +712 81 regularizer """no""" +712 81 optimizer """adam""" +712 81 training_loop """owa""" +712 81 negative_sampler """basic""" +712 81 evaluator """rankbased""" +712 82 dataset """kinships""" +712 82 model """structuredembedding""" +712 82 loss """nssa""" +712 82 regularizer """no""" +712 82 optimizer """adam""" +712 82 training_loop """owa""" +712 82 negative_sampler """basic""" +712 82 evaluator """rankbased""" +712 83 dataset """kinships""" +712 83 model """structuredembedding""" +712 83 loss """nssa""" +712 83 regularizer """no""" +712 83 optimizer """adam""" +712 83 training_loop """owa""" +712 83 negative_sampler """basic""" +712 83 evaluator """rankbased""" +712 84 dataset """kinships""" +712 84 model """structuredembedding""" +712 84 loss """nssa""" +712 84 regularizer """no""" +712 84 optimizer """adam""" +712 84 training_loop """owa""" +712 84 negative_sampler """basic""" +712 84 evaluator """rankbased""" +712 85 dataset """kinships""" +712 85 model """structuredembedding""" +712 85 loss """nssa""" +712 85 regularizer """no""" +712 85 optimizer """adam""" +712 85 training_loop """owa""" +712 85 negative_sampler """basic""" +712 85 evaluator """rankbased""" +712 86 dataset """kinships""" +712 86 model """structuredembedding""" +712 86 loss """nssa""" +712 86 regularizer """no""" +712 86 optimizer """adam""" +712 86 training_loop """owa""" +712 86 negative_sampler """basic""" +712 86 evaluator """rankbased""" +712 87 dataset """kinships""" +712 87 model """structuredembedding""" +712 87 loss """nssa""" +712 87 regularizer """no""" +712 87 optimizer """adam""" +712 87 training_loop """owa""" +712 87 negative_sampler """basic""" +712 87 evaluator """rankbased""" +712 88 dataset """kinships""" +712 88 model """structuredembedding""" +712 88 loss """nssa""" +712 88 regularizer """no""" +712 88 optimizer """adam""" +712 88 training_loop """owa""" +712 88 negative_sampler """basic""" +712 88 evaluator """rankbased""" +712 89 dataset """kinships""" +712 89 model """structuredembedding""" +712 89 loss """nssa""" +712 89 regularizer """no""" +712 89 optimizer """adam""" +712 89 training_loop """owa""" +712 89 negative_sampler """basic""" +712 89 evaluator """rankbased""" +712 90 dataset """kinships""" +712 90 model """structuredembedding""" +712 90 loss """nssa""" +712 90 regularizer """no""" +712 90 optimizer """adam""" +712 90 training_loop """owa""" +712 90 negative_sampler """basic""" +712 90 evaluator """rankbased""" +712 91 dataset """kinships""" +712 91 model """structuredembedding""" +712 91 loss """nssa""" +712 91 regularizer """no""" +712 91 optimizer """adam""" +712 91 training_loop """owa""" +712 91 negative_sampler """basic""" +712 91 evaluator """rankbased""" +712 92 dataset """kinships""" +712 92 model """structuredembedding""" +712 92 loss """nssa""" +712 92 regularizer """no""" +712 92 optimizer """adam""" +712 92 training_loop """owa""" +712 92 negative_sampler """basic""" +712 92 evaluator """rankbased""" +712 93 dataset """kinships""" +712 93 model """structuredembedding""" +712 93 loss """nssa""" +712 93 regularizer """no""" +712 93 optimizer """adam""" +712 93 training_loop """owa""" +712 93 negative_sampler """basic""" +712 93 evaluator """rankbased""" +712 94 dataset """kinships""" +712 94 model """structuredembedding""" +712 94 loss """nssa""" +712 94 regularizer """no""" +712 94 optimizer """adam""" +712 94 training_loop """owa""" +712 94 negative_sampler """basic""" +712 94 evaluator """rankbased""" +712 95 dataset """kinships""" +712 95 model """structuredembedding""" +712 95 loss """nssa""" +712 95 regularizer """no""" +712 95 optimizer """adam""" +712 95 training_loop """owa""" +712 95 negative_sampler """basic""" +712 95 evaluator """rankbased""" +712 96 dataset """kinships""" +712 96 model """structuredembedding""" +712 96 loss """nssa""" +712 96 regularizer """no""" +712 96 optimizer """adam""" +712 96 training_loop """owa""" +712 96 negative_sampler """basic""" +712 96 evaluator """rankbased""" +712 97 dataset """kinships""" +712 97 model """structuredembedding""" +712 97 loss """nssa""" +712 97 regularizer """no""" +712 97 optimizer """adam""" +712 97 training_loop """owa""" +712 97 negative_sampler """basic""" +712 97 evaluator """rankbased""" +712 98 dataset """kinships""" +712 98 model """structuredembedding""" +712 98 loss """nssa""" +712 98 regularizer """no""" +712 98 optimizer """adam""" +712 98 training_loop """owa""" +712 98 negative_sampler """basic""" +712 98 evaluator """rankbased""" +712 99 dataset """kinships""" +712 99 model """structuredembedding""" +712 99 loss """nssa""" +712 99 regularizer """no""" +712 99 optimizer """adam""" +712 99 training_loop """owa""" +712 99 negative_sampler """basic""" +712 99 evaluator """rankbased""" +712 100 dataset """kinships""" +712 100 model """structuredembedding""" +712 100 loss """nssa""" +712 100 regularizer """no""" +712 100 optimizer """adam""" +712 100 training_loop """owa""" +712 100 negative_sampler """basic""" +712 100 evaluator """rankbased""" +713 1 model.embedding_dim 0.0 +713 1 model.scoring_fct_norm 2.0 +713 1 optimizer.lr 0.08994093231515869 +713 1 training.batch_size 1.0 +713 1 training.label_smoothing 0.08602725990017862 +713 1 dataset """wn18rr""" +713 1 model """structuredembedding""" +713 1 loss """crossentropy""" +713 1 regularizer """no""" +713 1 optimizer """adam""" +713 1 training_loop """lcwa""" +713 1 evaluator """rankbased""" +714 1 model.embedding_dim 1.0 +714 1 model.scoring_fct_norm 2.0 +714 1 optimizer.lr 0.01240830708528527 +714 1 training.batch_size 0.0 +714 1 training.label_smoothing 0.06800500347961244 +714 1 dataset """wn18rr""" +714 1 model """structuredembedding""" +714 1 loss """crossentropy""" +714 1 regularizer """no""" +714 1 optimizer """adam""" +714 1 training_loop """lcwa""" +714 1 evaluator """rankbased""" +715 1 model.embedding_dim 0.0 +715 1 model.scoring_fct_norm 1.0 +715 1 optimizer.lr 0.0010488143488475432 +715 1 negative_sampler.num_negs_per_pos 25.0 +715 1 training.batch_size 2.0 +715 2 model.embedding_dim 0.0 +715 2 model.scoring_fct_norm 2.0 +715 2 optimizer.lr 0.0019174571071782509 +715 2 negative_sampler.num_negs_per_pos 15.0 +715 2 training.batch_size 1.0 +715 3 model.embedding_dim 2.0 +715 3 model.scoring_fct_norm 2.0 +715 3 optimizer.lr 0.09921484003077194 +715 3 negative_sampler.num_negs_per_pos 56.0 +715 3 training.batch_size 0.0 +715 4 model.embedding_dim 0.0 +715 4 model.scoring_fct_norm 1.0 +715 4 optimizer.lr 0.01135073039673663 +715 4 negative_sampler.num_negs_per_pos 94.0 +715 4 training.batch_size 2.0 +715 5 model.embedding_dim 2.0 +715 5 model.scoring_fct_norm 2.0 +715 5 optimizer.lr 0.004654368904722144 +715 5 negative_sampler.num_negs_per_pos 47.0 +715 5 training.batch_size 1.0 +715 6 model.embedding_dim 2.0 +715 6 model.scoring_fct_norm 1.0 +715 6 optimizer.lr 0.008391718706523608 +715 6 negative_sampler.num_negs_per_pos 13.0 +715 6 training.batch_size 0.0 +715 7 model.embedding_dim 1.0 +715 7 model.scoring_fct_norm 2.0 +715 7 optimizer.lr 0.036864838924135016 +715 7 negative_sampler.num_negs_per_pos 75.0 +715 7 training.batch_size 2.0 +715 8 model.embedding_dim 0.0 +715 8 model.scoring_fct_norm 1.0 +715 8 optimizer.lr 0.055148510303245434 +715 8 negative_sampler.num_negs_per_pos 56.0 +715 8 training.batch_size 2.0 +715 9 model.embedding_dim 2.0 +715 9 model.scoring_fct_norm 1.0 +715 9 optimizer.lr 0.00680993902326362 +715 9 negative_sampler.num_negs_per_pos 44.0 +715 9 training.batch_size 0.0 +715 10 model.embedding_dim 2.0 +715 10 model.scoring_fct_norm 1.0 +715 10 optimizer.lr 0.0934816250579497 +715 10 negative_sampler.num_negs_per_pos 1.0 +715 10 training.batch_size 0.0 +715 11 model.embedding_dim 1.0 +715 11 model.scoring_fct_norm 1.0 +715 11 optimizer.lr 0.003403015214177235 +715 11 negative_sampler.num_negs_per_pos 80.0 +715 11 training.batch_size 2.0 +715 12 model.embedding_dim 0.0 +715 12 model.scoring_fct_norm 2.0 +715 12 optimizer.lr 0.07271338450633566 +715 12 negative_sampler.num_negs_per_pos 85.0 +715 12 training.batch_size 1.0 +715 13 model.embedding_dim 2.0 +715 13 model.scoring_fct_norm 1.0 +715 13 optimizer.lr 0.08330905314319825 +715 13 negative_sampler.num_negs_per_pos 92.0 +715 13 training.batch_size 0.0 +715 14 model.embedding_dim 0.0 +715 14 model.scoring_fct_norm 1.0 +715 14 optimizer.lr 0.017961641290762734 +715 14 negative_sampler.num_negs_per_pos 25.0 +715 14 training.batch_size 2.0 +715 15 model.embedding_dim 1.0 +715 15 model.scoring_fct_norm 1.0 +715 15 optimizer.lr 0.01411201770514887 +715 15 negative_sampler.num_negs_per_pos 74.0 +715 15 training.batch_size 1.0 +715 16 model.embedding_dim 1.0 +715 16 model.scoring_fct_norm 2.0 +715 16 optimizer.lr 0.0013199616965220704 +715 16 negative_sampler.num_negs_per_pos 96.0 +715 16 training.batch_size 2.0 +715 17 model.embedding_dim 1.0 +715 17 model.scoring_fct_norm 2.0 +715 17 optimizer.lr 0.002814348134900388 +715 17 negative_sampler.num_negs_per_pos 85.0 +715 17 training.batch_size 0.0 +715 1 dataset """wn18rr""" +715 1 model """structuredembedding""" +715 1 loss """bceaftersigmoid""" +715 1 regularizer """no""" +715 1 optimizer """adam""" +715 1 training_loop """owa""" +715 1 negative_sampler """basic""" +715 1 evaluator """rankbased""" +715 2 dataset """wn18rr""" +715 2 model """structuredembedding""" +715 2 loss """bceaftersigmoid""" +715 2 regularizer """no""" +715 2 optimizer """adam""" +715 2 training_loop """owa""" +715 2 negative_sampler """basic""" +715 2 evaluator """rankbased""" +715 3 dataset """wn18rr""" +715 3 model """structuredembedding""" +715 3 loss """bceaftersigmoid""" +715 3 regularizer """no""" +715 3 optimizer """adam""" +715 3 training_loop """owa""" +715 3 negative_sampler """basic""" +715 3 evaluator """rankbased""" +715 4 dataset """wn18rr""" +715 4 model """structuredembedding""" +715 4 loss """bceaftersigmoid""" +715 4 regularizer """no""" +715 4 optimizer """adam""" +715 4 training_loop """owa""" +715 4 negative_sampler """basic""" +715 4 evaluator """rankbased""" +715 5 dataset """wn18rr""" +715 5 model """structuredembedding""" +715 5 loss """bceaftersigmoid""" +715 5 regularizer """no""" +715 5 optimizer """adam""" +715 5 training_loop """owa""" +715 5 negative_sampler """basic""" +715 5 evaluator """rankbased""" +715 6 dataset """wn18rr""" +715 6 model """structuredembedding""" +715 6 loss """bceaftersigmoid""" +715 6 regularizer """no""" +715 6 optimizer """adam""" +715 6 training_loop """owa""" +715 6 negative_sampler """basic""" +715 6 evaluator """rankbased""" +715 7 dataset """wn18rr""" +715 7 model """structuredembedding""" +715 7 loss """bceaftersigmoid""" +715 7 regularizer """no""" +715 7 optimizer """adam""" +715 7 training_loop """owa""" +715 7 negative_sampler """basic""" +715 7 evaluator """rankbased""" +715 8 dataset """wn18rr""" +715 8 model """structuredembedding""" +715 8 loss """bceaftersigmoid""" +715 8 regularizer """no""" +715 8 optimizer """adam""" +715 8 training_loop """owa""" +715 8 negative_sampler """basic""" +715 8 evaluator """rankbased""" +715 9 dataset """wn18rr""" +715 9 model """structuredembedding""" +715 9 loss """bceaftersigmoid""" +715 9 regularizer """no""" +715 9 optimizer """adam""" +715 9 training_loop """owa""" +715 9 negative_sampler """basic""" +715 9 evaluator """rankbased""" +715 10 dataset """wn18rr""" +715 10 model """structuredembedding""" +715 10 loss """bceaftersigmoid""" +715 10 regularizer """no""" +715 10 optimizer """adam""" +715 10 training_loop """owa""" +715 10 negative_sampler """basic""" +715 10 evaluator """rankbased""" +715 11 dataset """wn18rr""" +715 11 model """structuredembedding""" +715 11 loss """bceaftersigmoid""" +715 11 regularizer """no""" +715 11 optimizer """adam""" +715 11 training_loop """owa""" +715 11 negative_sampler """basic""" +715 11 evaluator """rankbased""" +715 12 dataset """wn18rr""" +715 12 model """structuredembedding""" +715 12 loss """bceaftersigmoid""" +715 12 regularizer """no""" +715 12 optimizer """adam""" +715 12 training_loop """owa""" +715 12 negative_sampler """basic""" +715 12 evaluator """rankbased""" +715 13 dataset """wn18rr""" +715 13 model """structuredembedding""" +715 13 loss """bceaftersigmoid""" +715 13 regularizer """no""" +715 13 optimizer """adam""" +715 13 training_loop """owa""" +715 13 negative_sampler """basic""" +715 13 evaluator """rankbased""" +715 14 dataset """wn18rr""" +715 14 model """structuredembedding""" +715 14 loss """bceaftersigmoid""" +715 14 regularizer """no""" +715 14 optimizer """adam""" +715 14 training_loop """owa""" +715 14 negative_sampler """basic""" +715 14 evaluator """rankbased""" +715 15 dataset """wn18rr""" +715 15 model """structuredembedding""" +715 15 loss """bceaftersigmoid""" +715 15 regularizer """no""" +715 15 optimizer """adam""" +715 15 training_loop """owa""" +715 15 negative_sampler """basic""" +715 15 evaluator """rankbased""" +715 16 dataset """wn18rr""" +715 16 model """structuredembedding""" +715 16 loss """bceaftersigmoid""" +715 16 regularizer """no""" +715 16 optimizer """adam""" +715 16 training_loop """owa""" +715 16 negative_sampler """basic""" +715 16 evaluator """rankbased""" +715 17 dataset """wn18rr""" +715 17 model """structuredembedding""" +715 17 loss """bceaftersigmoid""" +715 17 regularizer """no""" +715 17 optimizer """adam""" +715 17 training_loop """owa""" +715 17 negative_sampler """basic""" +715 17 evaluator """rankbased""" +716 1 model.embedding_dim 0.0 +716 1 model.scoring_fct_norm 2.0 +716 1 optimizer.lr 0.017006707155749392 +716 1 negative_sampler.num_negs_per_pos 38.0 +716 1 training.batch_size 2.0 +716 2 model.embedding_dim 1.0 +716 2 model.scoring_fct_norm 2.0 +716 2 optimizer.lr 0.006122505653679754 +716 2 negative_sampler.num_negs_per_pos 0.0 +716 2 training.batch_size 2.0 +716 3 model.embedding_dim 2.0 +716 3 model.scoring_fct_norm 1.0 +716 3 optimizer.lr 0.006137821915228032 +716 3 negative_sampler.num_negs_per_pos 60.0 +716 3 training.batch_size 2.0 +716 4 model.embedding_dim 1.0 +716 4 model.scoring_fct_norm 1.0 +716 4 optimizer.lr 0.08226123536248917 +716 4 negative_sampler.num_negs_per_pos 26.0 +716 4 training.batch_size 1.0 +716 5 model.embedding_dim 2.0 +716 5 model.scoring_fct_norm 1.0 +716 5 optimizer.lr 0.0032443783823801193 +716 5 negative_sampler.num_negs_per_pos 1.0 +716 5 training.batch_size 2.0 +716 6 model.embedding_dim 1.0 +716 6 model.scoring_fct_norm 2.0 +716 6 optimizer.lr 0.01900513793695018 +716 6 negative_sampler.num_negs_per_pos 7.0 +716 6 training.batch_size 0.0 +716 7 model.embedding_dim 2.0 +716 7 model.scoring_fct_norm 2.0 +716 7 optimizer.lr 0.06126557776171719 +716 7 negative_sampler.num_negs_per_pos 1.0 +716 7 training.batch_size 2.0 +716 8 model.embedding_dim 1.0 +716 8 model.scoring_fct_norm 1.0 +716 8 optimizer.lr 0.006077340787880126 +716 8 negative_sampler.num_negs_per_pos 39.0 +716 8 training.batch_size 1.0 +716 9 model.embedding_dim 0.0 +716 9 model.scoring_fct_norm 2.0 +716 9 optimizer.lr 0.010322485192744814 +716 9 negative_sampler.num_negs_per_pos 86.0 +716 9 training.batch_size 2.0 +716 10 model.embedding_dim 1.0 +716 10 model.scoring_fct_norm 1.0 +716 10 optimizer.lr 0.008524223635142885 +716 10 negative_sampler.num_negs_per_pos 23.0 +716 10 training.batch_size 1.0 +716 11 model.embedding_dim 1.0 +716 11 model.scoring_fct_norm 1.0 +716 11 optimizer.lr 0.07401982741636086 +716 11 negative_sampler.num_negs_per_pos 63.0 +716 11 training.batch_size 0.0 +716 12 model.embedding_dim 1.0 +716 12 model.scoring_fct_norm 1.0 +716 12 optimizer.lr 0.004819885415024507 +716 12 negative_sampler.num_negs_per_pos 62.0 +716 12 training.batch_size 0.0 +716 13 model.embedding_dim 1.0 +716 13 model.scoring_fct_norm 2.0 +716 13 optimizer.lr 0.0019471850639060835 +716 13 negative_sampler.num_negs_per_pos 0.0 +716 13 training.batch_size 2.0 +716 14 model.embedding_dim 1.0 +716 14 model.scoring_fct_norm 2.0 +716 14 optimizer.lr 0.01267005536681811 +716 14 negative_sampler.num_negs_per_pos 25.0 +716 14 training.batch_size 2.0 +716 15 model.embedding_dim 0.0 +716 15 model.scoring_fct_norm 1.0 +716 15 optimizer.lr 0.005484629579932371 +716 15 negative_sampler.num_negs_per_pos 76.0 +716 15 training.batch_size 0.0 +716 16 model.embedding_dim 0.0 +716 16 model.scoring_fct_norm 1.0 +716 16 optimizer.lr 0.0030674273086001946 +716 16 negative_sampler.num_negs_per_pos 2.0 +716 16 training.batch_size 0.0 +716 17 model.embedding_dim 2.0 +716 17 model.scoring_fct_norm 2.0 +716 17 optimizer.lr 0.002404536085940406 +716 17 negative_sampler.num_negs_per_pos 34.0 +716 17 training.batch_size 1.0 +716 18 model.embedding_dim 1.0 +716 18 model.scoring_fct_norm 1.0 +716 18 optimizer.lr 0.0022688585732116282 +716 18 negative_sampler.num_negs_per_pos 62.0 +716 18 training.batch_size 0.0 +716 19 model.embedding_dim 2.0 +716 19 model.scoring_fct_norm 1.0 +716 19 optimizer.lr 0.08426324149980452 +716 19 negative_sampler.num_negs_per_pos 73.0 +716 19 training.batch_size 0.0 +716 20 model.embedding_dim 1.0 +716 20 model.scoring_fct_norm 2.0 +716 20 optimizer.lr 0.09503087150810478 +716 20 negative_sampler.num_negs_per_pos 38.0 +716 20 training.batch_size 1.0 +716 21 model.embedding_dim 2.0 +716 21 model.scoring_fct_norm 1.0 +716 21 optimizer.lr 0.0070586213790922005 +716 21 negative_sampler.num_negs_per_pos 37.0 +716 21 training.batch_size 2.0 +716 22 model.embedding_dim 0.0 +716 22 model.scoring_fct_norm 1.0 +716 22 optimizer.lr 0.09106183735659494 +716 22 negative_sampler.num_negs_per_pos 19.0 +716 22 training.batch_size 1.0 +716 23 model.embedding_dim 2.0 +716 23 model.scoring_fct_norm 1.0 +716 23 optimizer.lr 0.0896314409482417 +716 23 negative_sampler.num_negs_per_pos 88.0 +716 23 training.batch_size 0.0 +716 24 model.embedding_dim 2.0 +716 24 model.scoring_fct_norm 1.0 +716 24 optimizer.lr 0.0016511857630293007 +716 24 negative_sampler.num_negs_per_pos 59.0 +716 24 training.batch_size 2.0 +716 25 model.embedding_dim 0.0 +716 25 model.scoring_fct_norm 2.0 +716 25 optimizer.lr 0.03031948920367528 +716 25 negative_sampler.num_negs_per_pos 21.0 +716 25 training.batch_size 0.0 +716 26 model.embedding_dim 0.0 +716 26 model.scoring_fct_norm 2.0 +716 26 optimizer.lr 0.005346855927477492 +716 26 negative_sampler.num_negs_per_pos 12.0 +716 26 training.batch_size 2.0 +716 27 model.embedding_dim 0.0 +716 27 model.scoring_fct_norm 2.0 +716 27 optimizer.lr 0.0066173130155942495 +716 27 negative_sampler.num_negs_per_pos 89.0 +716 27 training.batch_size 1.0 +716 28 model.embedding_dim 0.0 +716 28 model.scoring_fct_norm 1.0 +716 28 optimizer.lr 0.09026675004183521 +716 28 negative_sampler.num_negs_per_pos 72.0 +716 28 training.batch_size 1.0 +716 29 model.embedding_dim 1.0 +716 29 model.scoring_fct_norm 1.0 +716 29 optimizer.lr 0.09556479783781388 +716 29 negative_sampler.num_negs_per_pos 92.0 +716 29 training.batch_size 1.0 +716 30 model.embedding_dim 2.0 +716 30 model.scoring_fct_norm 1.0 +716 30 optimizer.lr 0.021754471086282406 +716 30 negative_sampler.num_negs_per_pos 5.0 +716 30 training.batch_size 0.0 +716 31 model.embedding_dim 1.0 +716 31 model.scoring_fct_norm 2.0 +716 31 optimizer.lr 0.013725784797452059 +716 31 negative_sampler.num_negs_per_pos 38.0 +716 31 training.batch_size 0.0 +716 32 model.embedding_dim 1.0 +716 32 model.scoring_fct_norm 1.0 +716 32 optimizer.lr 0.003431771643059867 +716 32 negative_sampler.num_negs_per_pos 47.0 +716 32 training.batch_size 1.0 +716 33 model.embedding_dim 2.0 +716 33 model.scoring_fct_norm 1.0 +716 33 optimizer.lr 0.039456834043426564 +716 33 negative_sampler.num_negs_per_pos 52.0 +716 33 training.batch_size 0.0 +716 34 model.embedding_dim 0.0 +716 34 model.scoring_fct_norm 1.0 +716 34 optimizer.lr 0.07949280378858878 +716 34 negative_sampler.num_negs_per_pos 0.0 +716 34 training.batch_size 1.0 +716 35 model.embedding_dim 0.0 +716 35 model.scoring_fct_norm 1.0 +716 35 optimizer.lr 0.0016492430299270332 +716 35 negative_sampler.num_negs_per_pos 9.0 +716 35 training.batch_size 1.0 +716 36 model.embedding_dim 2.0 +716 36 model.scoring_fct_norm 1.0 +716 36 optimizer.lr 0.002525534835585049 +716 36 negative_sampler.num_negs_per_pos 76.0 +716 36 training.batch_size 0.0 +716 37 model.embedding_dim 1.0 +716 37 model.scoring_fct_norm 2.0 +716 37 optimizer.lr 0.004706653174656905 +716 37 negative_sampler.num_negs_per_pos 38.0 +716 37 training.batch_size 1.0 +716 38 model.embedding_dim 2.0 +716 38 model.scoring_fct_norm 2.0 +716 38 optimizer.lr 0.026291655329395917 +716 38 negative_sampler.num_negs_per_pos 16.0 +716 38 training.batch_size 0.0 +716 1 dataset """wn18rr""" +716 1 model """structuredembedding""" +716 1 loss """bceaftersigmoid""" +716 1 regularizer """no""" +716 1 optimizer """adam""" +716 1 training_loop """owa""" +716 1 negative_sampler """basic""" +716 1 evaluator """rankbased""" +716 2 dataset """wn18rr""" +716 2 model """structuredembedding""" +716 2 loss """bceaftersigmoid""" +716 2 regularizer """no""" +716 2 optimizer """adam""" +716 2 training_loop """owa""" +716 2 negative_sampler """basic""" +716 2 evaluator """rankbased""" +716 3 dataset """wn18rr""" +716 3 model """structuredembedding""" +716 3 loss """bceaftersigmoid""" +716 3 regularizer """no""" +716 3 optimizer """adam""" +716 3 training_loop """owa""" +716 3 negative_sampler """basic""" +716 3 evaluator """rankbased""" +716 4 dataset """wn18rr""" +716 4 model """structuredembedding""" +716 4 loss """bceaftersigmoid""" +716 4 regularizer """no""" +716 4 optimizer """adam""" +716 4 training_loop """owa""" +716 4 negative_sampler """basic""" +716 4 evaluator """rankbased""" +716 5 dataset """wn18rr""" +716 5 model """structuredembedding""" +716 5 loss """bceaftersigmoid""" +716 5 regularizer """no""" +716 5 optimizer """adam""" +716 5 training_loop """owa""" +716 5 negative_sampler """basic""" +716 5 evaluator """rankbased""" +716 6 dataset """wn18rr""" +716 6 model """structuredembedding""" +716 6 loss """bceaftersigmoid""" +716 6 regularizer """no""" +716 6 optimizer """adam""" +716 6 training_loop """owa""" +716 6 negative_sampler """basic""" +716 6 evaluator """rankbased""" +716 7 dataset """wn18rr""" +716 7 model """structuredembedding""" +716 7 loss """bceaftersigmoid""" +716 7 regularizer """no""" +716 7 optimizer """adam""" +716 7 training_loop """owa""" +716 7 negative_sampler """basic""" +716 7 evaluator """rankbased""" +716 8 dataset """wn18rr""" +716 8 model """structuredembedding""" +716 8 loss """bceaftersigmoid""" +716 8 regularizer """no""" +716 8 optimizer """adam""" +716 8 training_loop """owa""" +716 8 negative_sampler """basic""" +716 8 evaluator """rankbased""" +716 9 dataset """wn18rr""" +716 9 model """structuredembedding""" +716 9 loss """bceaftersigmoid""" +716 9 regularizer """no""" +716 9 optimizer """adam""" +716 9 training_loop """owa""" +716 9 negative_sampler """basic""" +716 9 evaluator """rankbased""" +716 10 dataset """wn18rr""" +716 10 model """structuredembedding""" +716 10 loss """bceaftersigmoid""" +716 10 regularizer """no""" +716 10 optimizer """adam""" +716 10 training_loop """owa""" +716 10 negative_sampler """basic""" +716 10 evaluator """rankbased""" +716 11 dataset """wn18rr""" +716 11 model """structuredembedding""" +716 11 loss """bceaftersigmoid""" +716 11 regularizer """no""" +716 11 optimizer """adam""" +716 11 training_loop """owa""" +716 11 negative_sampler """basic""" +716 11 evaluator """rankbased""" +716 12 dataset """wn18rr""" +716 12 model """structuredembedding""" +716 12 loss """bceaftersigmoid""" +716 12 regularizer """no""" +716 12 optimizer """adam""" +716 12 training_loop """owa""" +716 12 negative_sampler """basic""" +716 12 evaluator """rankbased""" +716 13 dataset """wn18rr""" +716 13 model """structuredembedding""" +716 13 loss """bceaftersigmoid""" +716 13 regularizer """no""" +716 13 optimizer """adam""" +716 13 training_loop """owa""" +716 13 negative_sampler """basic""" +716 13 evaluator """rankbased""" +716 14 dataset """wn18rr""" +716 14 model """structuredembedding""" +716 14 loss """bceaftersigmoid""" +716 14 regularizer """no""" +716 14 optimizer """adam""" +716 14 training_loop """owa""" +716 14 negative_sampler """basic""" +716 14 evaluator """rankbased""" +716 15 dataset """wn18rr""" +716 15 model """structuredembedding""" +716 15 loss """bceaftersigmoid""" +716 15 regularizer """no""" +716 15 optimizer """adam""" +716 15 training_loop """owa""" +716 15 negative_sampler """basic""" +716 15 evaluator """rankbased""" +716 16 dataset """wn18rr""" +716 16 model """structuredembedding""" +716 16 loss """bceaftersigmoid""" +716 16 regularizer """no""" +716 16 optimizer """adam""" +716 16 training_loop """owa""" +716 16 negative_sampler """basic""" +716 16 evaluator """rankbased""" +716 17 dataset """wn18rr""" +716 17 model """structuredembedding""" +716 17 loss """bceaftersigmoid""" +716 17 regularizer """no""" +716 17 optimizer """adam""" +716 17 training_loop """owa""" +716 17 negative_sampler """basic""" +716 17 evaluator """rankbased""" +716 18 dataset """wn18rr""" +716 18 model """structuredembedding""" +716 18 loss """bceaftersigmoid""" +716 18 regularizer """no""" +716 18 optimizer """adam""" +716 18 training_loop """owa""" +716 18 negative_sampler """basic""" +716 18 evaluator """rankbased""" +716 19 dataset """wn18rr""" +716 19 model """structuredembedding""" +716 19 loss """bceaftersigmoid""" +716 19 regularizer """no""" +716 19 optimizer """adam""" +716 19 training_loop """owa""" +716 19 negative_sampler """basic""" +716 19 evaluator """rankbased""" +716 20 dataset """wn18rr""" +716 20 model """structuredembedding""" +716 20 loss """bceaftersigmoid""" +716 20 regularizer """no""" +716 20 optimizer """adam""" +716 20 training_loop """owa""" +716 20 negative_sampler """basic""" +716 20 evaluator """rankbased""" +716 21 dataset """wn18rr""" +716 21 model """structuredembedding""" +716 21 loss """bceaftersigmoid""" +716 21 regularizer """no""" +716 21 optimizer """adam""" +716 21 training_loop """owa""" +716 21 negative_sampler """basic""" +716 21 evaluator """rankbased""" +716 22 dataset """wn18rr""" +716 22 model """structuredembedding""" +716 22 loss """bceaftersigmoid""" +716 22 regularizer """no""" +716 22 optimizer """adam""" +716 22 training_loop """owa""" +716 22 negative_sampler """basic""" +716 22 evaluator """rankbased""" +716 23 dataset """wn18rr""" +716 23 model """structuredembedding""" +716 23 loss """bceaftersigmoid""" +716 23 regularizer """no""" +716 23 optimizer """adam""" +716 23 training_loop """owa""" +716 23 negative_sampler """basic""" +716 23 evaluator """rankbased""" +716 24 dataset """wn18rr""" +716 24 model """structuredembedding""" +716 24 loss """bceaftersigmoid""" +716 24 regularizer """no""" +716 24 optimizer """adam""" +716 24 training_loop """owa""" +716 24 negative_sampler """basic""" +716 24 evaluator """rankbased""" +716 25 dataset """wn18rr""" +716 25 model """structuredembedding""" +716 25 loss """bceaftersigmoid""" +716 25 regularizer """no""" +716 25 optimizer """adam""" +716 25 training_loop """owa""" +716 25 negative_sampler """basic""" +716 25 evaluator """rankbased""" +716 26 dataset """wn18rr""" +716 26 model """structuredembedding""" +716 26 loss """bceaftersigmoid""" +716 26 regularizer """no""" +716 26 optimizer """adam""" +716 26 training_loop """owa""" +716 26 negative_sampler """basic""" +716 26 evaluator """rankbased""" +716 27 dataset """wn18rr""" +716 27 model """structuredembedding""" +716 27 loss """bceaftersigmoid""" +716 27 regularizer """no""" +716 27 optimizer """adam""" +716 27 training_loop """owa""" +716 27 negative_sampler """basic""" +716 27 evaluator """rankbased""" +716 28 dataset """wn18rr""" +716 28 model """structuredembedding""" +716 28 loss """bceaftersigmoid""" +716 28 regularizer """no""" +716 28 optimizer """adam""" +716 28 training_loop """owa""" +716 28 negative_sampler """basic""" +716 28 evaluator """rankbased""" +716 29 dataset """wn18rr""" +716 29 model """structuredembedding""" +716 29 loss """bceaftersigmoid""" +716 29 regularizer """no""" +716 29 optimizer """adam""" +716 29 training_loop """owa""" +716 29 negative_sampler """basic""" +716 29 evaluator """rankbased""" +716 30 dataset """wn18rr""" +716 30 model """structuredembedding""" +716 30 loss """bceaftersigmoid""" +716 30 regularizer """no""" +716 30 optimizer """adam""" +716 30 training_loop """owa""" +716 30 negative_sampler """basic""" +716 30 evaluator """rankbased""" +716 31 dataset """wn18rr""" +716 31 model """structuredembedding""" +716 31 loss """bceaftersigmoid""" +716 31 regularizer """no""" +716 31 optimizer """adam""" +716 31 training_loop """owa""" +716 31 negative_sampler """basic""" +716 31 evaluator """rankbased""" +716 32 dataset """wn18rr""" +716 32 model """structuredembedding""" +716 32 loss """bceaftersigmoid""" +716 32 regularizer """no""" +716 32 optimizer """adam""" +716 32 training_loop """owa""" +716 32 negative_sampler """basic""" +716 32 evaluator """rankbased""" +716 33 dataset """wn18rr""" +716 33 model """structuredembedding""" +716 33 loss """bceaftersigmoid""" +716 33 regularizer """no""" +716 33 optimizer """adam""" +716 33 training_loop """owa""" +716 33 negative_sampler """basic""" +716 33 evaluator """rankbased""" +716 34 dataset """wn18rr""" +716 34 model """structuredembedding""" +716 34 loss """bceaftersigmoid""" +716 34 regularizer """no""" +716 34 optimizer """adam""" +716 34 training_loop """owa""" +716 34 negative_sampler """basic""" +716 34 evaluator """rankbased""" +716 35 dataset """wn18rr""" +716 35 model """structuredembedding""" +716 35 loss """bceaftersigmoid""" +716 35 regularizer """no""" +716 35 optimizer """adam""" +716 35 training_loop """owa""" +716 35 negative_sampler """basic""" +716 35 evaluator """rankbased""" +716 36 dataset """wn18rr""" +716 36 model """structuredembedding""" +716 36 loss """bceaftersigmoid""" +716 36 regularizer """no""" +716 36 optimizer """adam""" +716 36 training_loop """owa""" +716 36 negative_sampler """basic""" +716 36 evaluator """rankbased""" +716 37 dataset """wn18rr""" +716 37 model """structuredembedding""" +716 37 loss """bceaftersigmoid""" +716 37 regularizer """no""" +716 37 optimizer """adam""" +716 37 training_loop """owa""" +716 37 negative_sampler """basic""" +716 37 evaluator """rankbased""" +716 38 dataset """wn18rr""" +716 38 model """structuredembedding""" +716 38 loss """bceaftersigmoid""" +716 38 regularizer """no""" +716 38 optimizer """adam""" +716 38 training_loop """owa""" +716 38 negative_sampler """basic""" +716 38 evaluator """rankbased""" +717 1 model.embedding_dim 2.0 +717 1 model.scoring_fct_norm 1.0 +717 1 optimizer.lr 0.00172164120195968 +717 1 negative_sampler.num_negs_per_pos 94.0 +717 1 training.batch_size 1.0 +717 2 model.embedding_dim 0.0 +717 2 model.scoring_fct_norm 1.0 +717 2 optimizer.lr 0.010853465889179577 +717 2 negative_sampler.num_negs_per_pos 20.0 +717 2 training.batch_size 1.0 +717 3 model.embedding_dim 0.0 +717 3 model.scoring_fct_norm 1.0 +717 3 optimizer.lr 0.0010222652254190158 +717 3 negative_sampler.num_negs_per_pos 83.0 +717 3 training.batch_size 0.0 +717 4 model.embedding_dim 1.0 +717 4 model.scoring_fct_norm 1.0 +717 4 optimizer.lr 0.005363895538799881 +717 4 negative_sampler.num_negs_per_pos 49.0 +717 4 training.batch_size 1.0 +717 5 model.embedding_dim 1.0 +717 5 model.scoring_fct_norm 1.0 +717 5 optimizer.lr 0.04381515938338281 +717 5 negative_sampler.num_negs_per_pos 13.0 +717 5 training.batch_size 1.0 +717 6 model.embedding_dim 1.0 +717 6 model.scoring_fct_norm 2.0 +717 6 optimizer.lr 0.003274309177513038 +717 6 negative_sampler.num_negs_per_pos 15.0 +717 6 training.batch_size 0.0 +717 7 model.embedding_dim 1.0 +717 7 model.scoring_fct_norm 1.0 +717 7 optimizer.lr 0.002115731167672687 +717 7 negative_sampler.num_negs_per_pos 34.0 +717 7 training.batch_size 0.0 +717 8 model.embedding_dim 1.0 +717 8 model.scoring_fct_norm 2.0 +717 8 optimizer.lr 0.041626418957546776 +717 8 negative_sampler.num_negs_per_pos 15.0 +717 8 training.batch_size 0.0 +717 9 model.embedding_dim 2.0 +717 9 model.scoring_fct_norm 1.0 +717 9 optimizer.lr 0.018602647815676977 +717 9 negative_sampler.num_negs_per_pos 55.0 +717 9 training.batch_size 0.0 +717 10 model.embedding_dim 2.0 +717 10 model.scoring_fct_norm 1.0 +717 10 optimizer.lr 0.02069330620000196 +717 10 negative_sampler.num_negs_per_pos 12.0 +717 10 training.batch_size 0.0 +717 11 model.embedding_dim 0.0 +717 11 model.scoring_fct_norm 1.0 +717 11 optimizer.lr 0.07339086207168291 +717 11 negative_sampler.num_negs_per_pos 26.0 +717 11 training.batch_size 1.0 +717 12 model.embedding_dim 1.0 +717 12 model.scoring_fct_norm 2.0 +717 12 optimizer.lr 0.04006823805350628 +717 12 negative_sampler.num_negs_per_pos 10.0 +717 12 training.batch_size 0.0 +717 13 model.embedding_dim 0.0 +717 13 model.scoring_fct_norm 2.0 +717 13 optimizer.lr 0.011567715839101244 +717 13 negative_sampler.num_negs_per_pos 76.0 +717 13 training.batch_size 1.0 +717 14 model.embedding_dim 0.0 +717 14 model.scoring_fct_norm 1.0 +717 14 optimizer.lr 0.005727823227751607 +717 14 negative_sampler.num_negs_per_pos 69.0 +717 14 training.batch_size 2.0 +717 15 model.embedding_dim 1.0 +717 15 model.scoring_fct_norm 1.0 +717 15 optimizer.lr 0.0011522349367530757 +717 15 negative_sampler.num_negs_per_pos 42.0 +717 15 training.batch_size 2.0 +717 16 model.embedding_dim 1.0 +717 16 model.scoring_fct_norm 1.0 +717 16 optimizer.lr 0.013714287343875366 +717 16 negative_sampler.num_negs_per_pos 86.0 +717 16 training.batch_size 1.0 +717 17 model.embedding_dim 0.0 +717 17 model.scoring_fct_norm 2.0 +717 17 optimizer.lr 0.016233104832806912 +717 17 negative_sampler.num_negs_per_pos 73.0 +717 17 training.batch_size 1.0 +717 18 model.embedding_dim 0.0 +717 18 model.scoring_fct_norm 1.0 +717 18 optimizer.lr 0.0010560786054690208 +717 18 negative_sampler.num_negs_per_pos 65.0 +717 18 training.batch_size 2.0 +717 19 model.embedding_dim 1.0 +717 19 model.scoring_fct_norm 1.0 +717 19 optimizer.lr 0.0022936932256615093 +717 19 negative_sampler.num_negs_per_pos 71.0 +717 19 training.batch_size 1.0 +717 20 model.embedding_dim 0.0 +717 20 model.scoring_fct_norm 2.0 +717 20 optimizer.lr 0.0397074485731541 +717 20 negative_sampler.num_negs_per_pos 86.0 +717 20 training.batch_size 1.0 +717 21 model.embedding_dim 0.0 +717 21 model.scoring_fct_norm 1.0 +717 21 optimizer.lr 0.007451406029517086 +717 21 negative_sampler.num_negs_per_pos 35.0 +717 21 training.batch_size 1.0 +717 22 model.embedding_dim 2.0 +717 22 model.scoring_fct_norm 2.0 +717 22 optimizer.lr 0.07045724170275178 +717 22 negative_sampler.num_negs_per_pos 88.0 +717 22 training.batch_size 0.0 +717 1 dataset """wn18rr""" +717 1 model """structuredembedding""" +717 1 loss """softplus""" +717 1 regularizer """no""" +717 1 optimizer """adam""" +717 1 training_loop """owa""" +717 1 negative_sampler """basic""" +717 1 evaluator """rankbased""" +717 2 dataset """wn18rr""" +717 2 model """structuredembedding""" +717 2 loss """softplus""" +717 2 regularizer """no""" +717 2 optimizer """adam""" +717 2 training_loop """owa""" +717 2 negative_sampler """basic""" +717 2 evaluator """rankbased""" +717 3 dataset """wn18rr""" +717 3 model """structuredembedding""" +717 3 loss """softplus""" +717 3 regularizer """no""" +717 3 optimizer """adam""" +717 3 training_loop """owa""" +717 3 negative_sampler """basic""" +717 3 evaluator """rankbased""" +717 4 dataset """wn18rr""" +717 4 model """structuredembedding""" +717 4 loss """softplus""" +717 4 regularizer """no""" +717 4 optimizer """adam""" +717 4 training_loop """owa""" +717 4 negative_sampler """basic""" +717 4 evaluator """rankbased""" +717 5 dataset """wn18rr""" +717 5 model """structuredembedding""" +717 5 loss """softplus""" +717 5 regularizer """no""" +717 5 optimizer """adam""" +717 5 training_loop """owa""" +717 5 negative_sampler """basic""" +717 5 evaluator """rankbased""" +717 6 dataset """wn18rr""" +717 6 model """structuredembedding""" +717 6 loss """softplus""" +717 6 regularizer """no""" +717 6 optimizer """adam""" +717 6 training_loop """owa""" +717 6 negative_sampler """basic""" +717 6 evaluator """rankbased""" +717 7 dataset """wn18rr""" +717 7 model """structuredembedding""" +717 7 loss """softplus""" +717 7 regularizer """no""" +717 7 optimizer """adam""" +717 7 training_loop """owa""" +717 7 negative_sampler """basic""" +717 7 evaluator """rankbased""" +717 8 dataset """wn18rr""" +717 8 model """structuredembedding""" +717 8 loss """softplus""" +717 8 regularizer """no""" +717 8 optimizer """adam""" +717 8 training_loop """owa""" +717 8 negative_sampler """basic""" +717 8 evaluator """rankbased""" +717 9 dataset """wn18rr""" +717 9 model """structuredembedding""" +717 9 loss """softplus""" +717 9 regularizer """no""" +717 9 optimizer """adam""" +717 9 training_loop """owa""" +717 9 negative_sampler """basic""" +717 9 evaluator """rankbased""" +717 10 dataset """wn18rr""" +717 10 model """structuredembedding""" +717 10 loss """softplus""" +717 10 regularizer """no""" +717 10 optimizer """adam""" +717 10 training_loop """owa""" +717 10 negative_sampler """basic""" +717 10 evaluator """rankbased""" +717 11 dataset """wn18rr""" +717 11 model """structuredembedding""" +717 11 loss """softplus""" +717 11 regularizer """no""" +717 11 optimizer """adam""" +717 11 training_loop """owa""" +717 11 negative_sampler """basic""" +717 11 evaluator """rankbased""" +717 12 dataset """wn18rr""" +717 12 model """structuredembedding""" +717 12 loss """softplus""" +717 12 regularizer """no""" +717 12 optimizer """adam""" +717 12 training_loop """owa""" +717 12 negative_sampler """basic""" +717 12 evaluator """rankbased""" +717 13 dataset """wn18rr""" +717 13 model """structuredembedding""" +717 13 loss """softplus""" +717 13 regularizer """no""" +717 13 optimizer """adam""" +717 13 training_loop """owa""" +717 13 negative_sampler """basic""" +717 13 evaluator """rankbased""" +717 14 dataset """wn18rr""" +717 14 model """structuredembedding""" +717 14 loss """softplus""" +717 14 regularizer """no""" +717 14 optimizer """adam""" +717 14 training_loop """owa""" +717 14 negative_sampler """basic""" +717 14 evaluator """rankbased""" +717 15 dataset """wn18rr""" +717 15 model """structuredembedding""" +717 15 loss """softplus""" +717 15 regularizer """no""" +717 15 optimizer """adam""" +717 15 training_loop """owa""" +717 15 negative_sampler """basic""" +717 15 evaluator """rankbased""" +717 16 dataset """wn18rr""" +717 16 model """structuredembedding""" +717 16 loss """softplus""" +717 16 regularizer """no""" +717 16 optimizer """adam""" +717 16 training_loop """owa""" +717 16 negative_sampler """basic""" +717 16 evaluator """rankbased""" +717 17 dataset """wn18rr""" +717 17 model """structuredembedding""" +717 17 loss """softplus""" +717 17 regularizer """no""" +717 17 optimizer """adam""" +717 17 training_loop """owa""" +717 17 negative_sampler """basic""" +717 17 evaluator """rankbased""" +717 18 dataset """wn18rr""" +717 18 model """structuredembedding""" +717 18 loss """softplus""" +717 18 regularizer """no""" +717 18 optimizer """adam""" +717 18 training_loop """owa""" +717 18 negative_sampler """basic""" +717 18 evaluator """rankbased""" +717 19 dataset """wn18rr""" +717 19 model """structuredembedding""" +717 19 loss """softplus""" +717 19 regularizer """no""" +717 19 optimizer """adam""" +717 19 training_loop """owa""" +717 19 negative_sampler """basic""" +717 19 evaluator """rankbased""" +717 20 dataset """wn18rr""" +717 20 model """structuredembedding""" +717 20 loss """softplus""" +717 20 regularizer """no""" +717 20 optimizer """adam""" +717 20 training_loop """owa""" +717 20 negative_sampler """basic""" +717 20 evaluator """rankbased""" +717 21 dataset """wn18rr""" +717 21 model """structuredembedding""" +717 21 loss """softplus""" +717 21 regularizer """no""" +717 21 optimizer """adam""" +717 21 training_loop """owa""" +717 21 negative_sampler """basic""" +717 21 evaluator """rankbased""" +717 22 dataset """wn18rr""" +717 22 model """structuredembedding""" +717 22 loss """softplus""" +717 22 regularizer """no""" +717 22 optimizer """adam""" +717 22 training_loop """owa""" +717 22 negative_sampler """basic""" +717 22 evaluator """rankbased""" +718 1 model.embedding_dim 2.0 +718 1 model.scoring_fct_norm 2.0 +718 1 optimizer.lr 0.003719735859999347 +718 1 negative_sampler.num_negs_per_pos 95.0 +718 1 training.batch_size 2.0 +718 2 model.embedding_dim 2.0 +718 2 model.scoring_fct_norm 2.0 +718 2 optimizer.lr 0.04881984167493121 +718 2 negative_sampler.num_negs_per_pos 4.0 +718 2 training.batch_size 2.0 +718 3 model.embedding_dim 0.0 +718 3 model.scoring_fct_norm 1.0 +718 3 optimizer.lr 0.005263557198243585 +718 3 negative_sampler.num_negs_per_pos 22.0 +718 3 training.batch_size 0.0 +718 4 model.embedding_dim 2.0 +718 4 model.scoring_fct_norm 2.0 +718 4 optimizer.lr 0.002042857993686939 +718 4 negative_sampler.num_negs_per_pos 86.0 +718 4 training.batch_size 0.0 +718 5 model.embedding_dim 2.0 +718 5 model.scoring_fct_norm 1.0 +718 5 optimizer.lr 0.0855445318648251 +718 5 negative_sampler.num_negs_per_pos 35.0 +718 5 training.batch_size 2.0 +718 6 model.embedding_dim 1.0 +718 6 model.scoring_fct_norm 2.0 +718 6 optimizer.lr 0.004382266101020198 +718 6 negative_sampler.num_negs_per_pos 21.0 +718 6 training.batch_size 0.0 +718 7 model.embedding_dim 1.0 +718 7 model.scoring_fct_norm 1.0 +718 7 optimizer.lr 0.08758987475283596 +718 7 negative_sampler.num_negs_per_pos 91.0 +718 7 training.batch_size 0.0 +718 8 model.embedding_dim 1.0 +718 8 model.scoring_fct_norm 1.0 +718 8 optimizer.lr 0.0011734650851372198 +718 8 negative_sampler.num_negs_per_pos 82.0 +718 8 training.batch_size 0.0 +718 9 model.embedding_dim 2.0 +718 9 model.scoring_fct_norm 1.0 +718 9 optimizer.lr 0.06741472784275057 +718 9 negative_sampler.num_negs_per_pos 46.0 +718 9 training.batch_size 2.0 +718 10 model.embedding_dim 1.0 +718 10 model.scoring_fct_norm 1.0 +718 10 optimizer.lr 0.003183846752164095 +718 10 negative_sampler.num_negs_per_pos 30.0 +718 10 training.batch_size 2.0 +718 11 model.embedding_dim 1.0 +718 11 model.scoring_fct_norm 1.0 +718 11 optimizer.lr 0.006920722426358878 +718 11 negative_sampler.num_negs_per_pos 99.0 +718 11 training.batch_size 0.0 +718 12 model.embedding_dim 2.0 +718 12 model.scoring_fct_norm 1.0 +718 12 optimizer.lr 0.005186477241665115 +718 12 negative_sampler.num_negs_per_pos 92.0 +718 12 training.batch_size 0.0 +718 13 model.embedding_dim 0.0 +718 13 model.scoring_fct_norm 1.0 +718 13 optimizer.lr 0.06627086287413105 +718 13 negative_sampler.num_negs_per_pos 57.0 +718 13 training.batch_size 1.0 +718 14 model.embedding_dim 0.0 +718 14 model.scoring_fct_norm 2.0 +718 14 optimizer.lr 0.07220039433052487 +718 14 negative_sampler.num_negs_per_pos 32.0 +718 14 training.batch_size 1.0 +718 15 model.embedding_dim 0.0 +718 15 model.scoring_fct_norm 2.0 +718 15 optimizer.lr 0.00962175560244216 +718 15 negative_sampler.num_negs_per_pos 65.0 +718 15 training.batch_size 0.0 +718 16 model.embedding_dim 0.0 +718 16 model.scoring_fct_norm 2.0 +718 16 optimizer.lr 0.013293101434683577 +718 16 negative_sampler.num_negs_per_pos 59.0 +718 16 training.batch_size 2.0 +718 17 model.embedding_dim 0.0 +718 17 model.scoring_fct_norm 2.0 +718 17 optimizer.lr 0.017074071324027495 +718 17 negative_sampler.num_negs_per_pos 38.0 +718 17 training.batch_size 2.0 +718 18 model.embedding_dim 2.0 +718 18 model.scoring_fct_norm 1.0 +718 18 optimizer.lr 0.002597347992626698 +718 18 negative_sampler.num_negs_per_pos 30.0 +718 18 training.batch_size 0.0 +718 19 model.embedding_dim 1.0 +718 19 model.scoring_fct_norm 2.0 +718 19 optimizer.lr 0.0019502082707201654 +718 19 negative_sampler.num_negs_per_pos 65.0 +718 19 training.batch_size 2.0 +718 20 model.embedding_dim 2.0 +718 20 model.scoring_fct_norm 1.0 +718 20 optimizer.lr 0.0014273736479620768 +718 20 negative_sampler.num_negs_per_pos 17.0 +718 20 training.batch_size 0.0 +718 21 model.embedding_dim 0.0 +718 21 model.scoring_fct_norm 2.0 +718 21 optimizer.lr 0.006008225698275927 +718 21 negative_sampler.num_negs_per_pos 87.0 +718 21 training.batch_size 1.0 +718 1 dataset """wn18rr""" +718 1 model """structuredembedding""" +718 1 loss """softplus""" +718 1 regularizer """no""" +718 1 optimizer """adam""" +718 1 training_loop """owa""" +718 1 negative_sampler """basic""" +718 1 evaluator """rankbased""" +718 2 dataset """wn18rr""" +718 2 model """structuredembedding""" +718 2 loss """softplus""" +718 2 regularizer """no""" +718 2 optimizer """adam""" +718 2 training_loop """owa""" +718 2 negative_sampler """basic""" +718 2 evaluator """rankbased""" +718 3 dataset """wn18rr""" +718 3 model """structuredembedding""" +718 3 loss """softplus""" +718 3 regularizer """no""" +718 3 optimizer """adam""" +718 3 training_loop """owa""" +718 3 negative_sampler """basic""" +718 3 evaluator """rankbased""" +718 4 dataset """wn18rr""" +718 4 model """structuredembedding""" +718 4 loss """softplus""" +718 4 regularizer """no""" +718 4 optimizer """adam""" +718 4 training_loop """owa""" +718 4 negative_sampler """basic""" +718 4 evaluator """rankbased""" +718 5 dataset """wn18rr""" +718 5 model """structuredembedding""" +718 5 loss """softplus""" +718 5 regularizer """no""" +718 5 optimizer """adam""" +718 5 training_loop """owa""" +718 5 negative_sampler """basic""" +718 5 evaluator """rankbased""" +718 6 dataset """wn18rr""" +718 6 model """structuredembedding""" +718 6 loss """softplus""" +718 6 regularizer """no""" +718 6 optimizer """adam""" +718 6 training_loop """owa""" +718 6 negative_sampler """basic""" +718 6 evaluator """rankbased""" +718 7 dataset """wn18rr""" +718 7 model """structuredembedding""" +718 7 loss """softplus""" +718 7 regularizer """no""" +718 7 optimizer """adam""" +718 7 training_loop """owa""" +718 7 negative_sampler """basic""" +718 7 evaluator """rankbased""" +718 8 dataset """wn18rr""" +718 8 model """structuredembedding""" +718 8 loss """softplus""" +718 8 regularizer """no""" +718 8 optimizer """adam""" +718 8 training_loop """owa""" +718 8 negative_sampler """basic""" +718 8 evaluator """rankbased""" +718 9 dataset """wn18rr""" +718 9 model """structuredembedding""" +718 9 loss """softplus""" +718 9 regularizer """no""" +718 9 optimizer """adam""" +718 9 training_loop """owa""" +718 9 negative_sampler """basic""" +718 9 evaluator """rankbased""" +718 10 dataset """wn18rr""" +718 10 model """structuredembedding""" +718 10 loss """softplus""" +718 10 regularizer """no""" +718 10 optimizer """adam""" +718 10 training_loop """owa""" +718 10 negative_sampler """basic""" +718 10 evaluator """rankbased""" +718 11 dataset """wn18rr""" +718 11 model """structuredembedding""" +718 11 loss """softplus""" +718 11 regularizer """no""" +718 11 optimizer """adam""" +718 11 training_loop """owa""" +718 11 negative_sampler """basic""" +718 11 evaluator """rankbased""" +718 12 dataset """wn18rr""" +718 12 model """structuredembedding""" +718 12 loss """softplus""" +718 12 regularizer """no""" +718 12 optimizer """adam""" +718 12 training_loop """owa""" +718 12 negative_sampler """basic""" +718 12 evaluator """rankbased""" +718 13 dataset """wn18rr""" +718 13 model """structuredembedding""" +718 13 loss """softplus""" +718 13 regularizer """no""" +718 13 optimizer """adam""" +718 13 training_loop """owa""" +718 13 negative_sampler """basic""" +718 13 evaluator """rankbased""" +718 14 dataset """wn18rr""" +718 14 model """structuredembedding""" +718 14 loss """softplus""" +718 14 regularizer """no""" +718 14 optimizer """adam""" +718 14 training_loop """owa""" +718 14 negative_sampler """basic""" +718 14 evaluator """rankbased""" +718 15 dataset """wn18rr""" +718 15 model """structuredembedding""" +718 15 loss """softplus""" +718 15 regularizer """no""" +718 15 optimizer """adam""" +718 15 training_loop """owa""" +718 15 negative_sampler """basic""" +718 15 evaluator """rankbased""" +718 16 dataset """wn18rr""" +718 16 model """structuredembedding""" +718 16 loss """softplus""" +718 16 regularizer """no""" +718 16 optimizer """adam""" +718 16 training_loop """owa""" +718 16 negative_sampler """basic""" +718 16 evaluator """rankbased""" +718 17 dataset """wn18rr""" +718 17 model """structuredembedding""" +718 17 loss """softplus""" +718 17 regularizer """no""" +718 17 optimizer """adam""" +718 17 training_loop """owa""" +718 17 negative_sampler """basic""" +718 17 evaluator """rankbased""" +718 18 dataset """wn18rr""" +718 18 model """structuredembedding""" +718 18 loss """softplus""" +718 18 regularizer """no""" +718 18 optimizer """adam""" +718 18 training_loop """owa""" +718 18 negative_sampler """basic""" +718 18 evaluator """rankbased""" +718 19 dataset """wn18rr""" +718 19 model """structuredembedding""" +718 19 loss """softplus""" +718 19 regularizer """no""" +718 19 optimizer """adam""" +718 19 training_loop """owa""" +718 19 negative_sampler """basic""" +718 19 evaluator """rankbased""" +718 20 dataset """wn18rr""" +718 20 model """structuredembedding""" +718 20 loss """softplus""" +718 20 regularizer """no""" +718 20 optimizer """adam""" +718 20 training_loop """owa""" +718 20 negative_sampler """basic""" +718 20 evaluator """rankbased""" +718 21 dataset """wn18rr""" +718 21 model """structuredembedding""" +718 21 loss """softplus""" +718 21 regularizer """no""" +718 21 optimizer """adam""" +718 21 training_loop """owa""" +718 21 negative_sampler """basic""" +718 21 evaluator """rankbased""" +719 1 model.embedding_dim 0.0 +719 1 model.scoring_fct_norm 2.0 +719 1 optimizer.lr 0.015972750029774354 +719 1 negative_sampler.num_negs_per_pos 8.0 +719 1 training.batch_size 0.0 +719 2 model.embedding_dim 1.0 +719 2 model.scoring_fct_norm 2.0 +719 2 optimizer.lr 0.01678953003324919 +719 2 negative_sampler.num_negs_per_pos 37.0 +719 2 training.batch_size 2.0 +719 3 model.embedding_dim 0.0 +719 3 model.scoring_fct_norm 2.0 +719 3 optimizer.lr 0.0034144784252933087 +719 3 negative_sampler.num_negs_per_pos 14.0 +719 3 training.batch_size 2.0 +719 4 model.embedding_dim 1.0 +719 4 model.scoring_fct_norm 1.0 +719 4 optimizer.lr 0.007982715606639127 +719 4 negative_sampler.num_negs_per_pos 4.0 +719 4 training.batch_size 2.0 +719 5 model.embedding_dim 1.0 +719 5 model.scoring_fct_norm 1.0 +719 5 optimizer.lr 0.010822215355716057 +719 5 negative_sampler.num_negs_per_pos 48.0 +719 5 training.batch_size 3.0 +719 6 model.embedding_dim 1.0 +719 6 model.scoring_fct_norm 1.0 +719 6 optimizer.lr 0.018962646808601165 +719 6 negative_sampler.num_negs_per_pos 2.0 +719 6 training.batch_size 1.0 +719 7 model.embedding_dim 1.0 +719 7 model.scoring_fct_norm 2.0 +719 7 optimizer.lr 0.0019685809739572627 +719 7 negative_sampler.num_negs_per_pos 0.0 +719 7 training.batch_size 1.0 +719 8 model.embedding_dim 1.0 +719 8 model.scoring_fct_norm 2.0 +719 8 optimizer.lr 0.06718871703244565 +719 8 negative_sampler.num_negs_per_pos 6.0 +719 8 training.batch_size 0.0 +719 9 model.embedding_dim 0.0 +719 9 model.scoring_fct_norm 1.0 +719 9 optimizer.lr 0.012395184351068018 +719 9 negative_sampler.num_negs_per_pos 8.0 +719 9 training.batch_size 0.0 +719 1 dataset """yago310""" +719 1 model """structuredembedding""" +719 1 loss """softplus""" +719 1 regularizer """no""" +719 1 optimizer """adam""" +719 1 training_loop """owa""" +719 1 negative_sampler """basic""" +719 1 evaluator """rankbased""" +719 2 dataset """yago310""" +719 2 model """structuredembedding""" +719 2 loss """softplus""" +719 2 regularizer """no""" +719 2 optimizer """adam""" +719 2 training_loop """owa""" +719 2 negative_sampler """basic""" +719 2 evaluator """rankbased""" +719 3 dataset """yago310""" +719 3 model """structuredembedding""" +719 3 loss """softplus""" +719 3 regularizer """no""" +719 3 optimizer """adam""" +719 3 training_loop """owa""" +719 3 negative_sampler """basic""" +719 3 evaluator """rankbased""" +719 4 dataset """yago310""" +719 4 model """structuredembedding""" +719 4 loss """softplus""" +719 4 regularizer """no""" +719 4 optimizer """adam""" +719 4 training_loop """owa""" +719 4 negative_sampler """basic""" +719 4 evaluator """rankbased""" +719 5 dataset """yago310""" +719 5 model """structuredembedding""" +719 5 loss """softplus""" +719 5 regularizer """no""" +719 5 optimizer """adam""" +719 5 training_loop """owa""" +719 5 negative_sampler """basic""" +719 5 evaluator """rankbased""" +719 6 dataset """yago310""" +719 6 model """structuredembedding""" +719 6 loss """softplus""" +719 6 regularizer """no""" +719 6 optimizer """adam""" +719 6 training_loop """owa""" +719 6 negative_sampler """basic""" +719 6 evaluator """rankbased""" +719 7 dataset """yago310""" +719 7 model """structuredembedding""" +719 7 loss """softplus""" +719 7 regularizer """no""" +719 7 optimizer """adam""" +719 7 training_loop """owa""" +719 7 negative_sampler """basic""" +719 7 evaluator """rankbased""" +719 8 dataset """yago310""" +719 8 model """structuredembedding""" +719 8 loss """softplus""" +719 8 regularizer """no""" +719 8 optimizer """adam""" +719 8 training_loop """owa""" +719 8 negative_sampler """basic""" +719 8 evaluator """rankbased""" +719 9 dataset """yago310""" +719 9 model """structuredembedding""" +719 9 loss """softplus""" +719 9 regularizer """no""" +719 9 optimizer """adam""" +719 9 training_loop """owa""" +719 9 negative_sampler """basic""" +719 9 evaluator """rankbased""" +720 1 model.embedding_dim 1.0 +720 1 model.scoring_fct_norm 1.0 +720 1 optimizer.lr 0.08467230844803912 +720 1 negative_sampler.num_negs_per_pos 42.0 +720 1 training.batch_size 1.0 +720 2 model.embedding_dim 1.0 +720 2 model.scoring_fct_norm 2.0 +720 2 optimizer.lr 0.00984162077590568 +720 2 negative_sampler.num_negs_per_pos 38.0 +720 2 training.batch_size 2.0 +720 3 model.embedding_dim 2.0 +720 3 model.scoring_fct_norm 1.0 +720 3 optimizer.lr 0.01886642257613194 +720 3 negative_sampler.num_negs_per_pos 8.0 +720 3 training.batch_size 0.0 +720 4 model.embedding_dim 0.0 +720 4 model.scoring_fct_norm 1.0 +720 4 optimizer.lr 0.017465196906489566 +720 4 negative_sampler.num_negs_per_pos 28.0 +720 4 training.batch_size 0.0 +720 5 model.embedding_dim 2.0 +720 5 model.scoring_fct_norm 1.0 +720 5 optimizer.lr 0.0010665444000247791 +720 5 negative_sampler.num_negs_per_pos 13.0 +720 5 training.batch_size 1.0 +720 1 dataset """yago310""" +720 1 model """structuredembedding""" +720 1 loss """softplus""" +720 1 regularizer """no""" +720 1 optimizer """adam""" +720 1 training_loop """owa""" +720 1 negative_sampler """basic""" +720 1 evaluator """rankbased""" +720 2 dataset """yago310""" +720 2 model """structuredembedding""" +720 2 loss """softplus""" +720 2 regularizer """no""" +720 2 optimizer """adam""" +720 2 training_loop """owa""" +720 2 negative_sampler """basic""" +720 2 evaluator """rankbased""" +720 3 dataset """yago310""" +720 3 model """structuredembedding""" +720 3 loss """softplus""" +720 3 regularizer """no""" +720 3 optimizer """adam""" +720 3 training_loop """owa""" +720 3 negative_sampler """basic""" +720 3 evaluator """rankbased""" +720 4 dataset """yago310""" +720 4 model """structuredembedding""" +720 4 loss """softplus""" +720 4 regularizer """no""" +720 4 optimizer """adam""" +720 4 training_loop """owa""" +720 4 negative_sampler """basic""" +720 4 evaluator """rankbased""" +720 5 dataset """yago310""" +720 5 model """structuredembedding""" +720 5 loss """softplus""" +720 5 regularizer """no""" +720 5 optimizer """adam""" +720 5 training_loop """owa""" +720 5 negative_sampler """basic""" +720 5 evaluator """rankbased""" +721 1 model.embedding_dim 0.0 +721 1 model.scoring_fct_norm 1.0 +721 1 loss.margin 6.068993140544543 +721 1 optimizer.lr 0.0018017977611011818 +721 1 negative_sampler.num_negs_per_pos 31.0 +721 1 training.batch_size 2.0 +721 2 model.embedding_dim 0.0 +721 2 model.scoring_fct_norm 1.0 +721 2 loss.margin 1.669866146591912 +721 2 optimizer.lr 0.019133084676397738 +721 2 negative_sampler.num_negs_per_pos 10.0 +721 2 training.batch_size 3.0 +721 3 model.embedding_dim 1.0 +721 3 model.scoring_fct_norm 2.0 +721 3 loss.margin 2.871179517702345 +721 3 optimizer.lr 0.0015548490236452763 +721 3 negative_sampler.num_negs_per_pos 0.0 +721 3 training.batch_size 0.0 +721 4 model.embedding_dim 2.0 +721 4 model.scoring_fct_norm 1.0 +721 4 loss.margin 2.1772371412388702 +721 4 optimizer.lr 0.012281120608834274 +721 4 negative_sampler.num_negs_per_pos 19.0 +721 4 training.batch_size 2.0 +721 5 model.embedding_dim 0.0 +721 5 model.scoring_fct_norm 2.0 +721 5 loss.margin 5.005863843177034 +721 5 optimizer.lr 0.040419934472352134 +721 5 negative_sampler.num_negs_per_pos 26.0 +721 5 training.batch_size 2.0 +721 1 dataset """yago310""" +721 1 model """structuredembedding""" +721 1 loss """marginranking""" +721 1 regularizer """no""" +721 1 optimizer """adam""" +721 1 training_loop """owa""" +721 1 negative_sampler """basic""" +721 1 evaluator """rankbased""" +721 2 dataset """yago310""" +721 2 model """structuredembedding""" +721 2 loss """marginranking""" +721 2 regularizer """no""" +721 2 optimizer """adam""" +721 2 training_loop """owa""" +721 2 negative_sampler """basic""" +721 2 evaluator """rankbased""" +721 3 dataset """yago310""" +721 3 model """structuredembedding""" +721 3 loss """marginranking""" +721 3 regularizer """no""" +721 3 optimizer """adam""" +721 3 training_loop """owa""" +721 3 negative_sampler """basic""" +721 3 evaluator """rankbased""" +721 4 dataset """yago310""" +721 4 model """structuredembedding""" +721 4 loss """marginranking""" +721 4 regularizer """no""" +721 4 optimizer """adam""" +721 4 training_loop """owa""" +721 4 negative_sampler """basic""" +721 4 evaluator """rankbased""" +721 5 dataset """yago310""" +721 5 model """structuredembedding""" +721 5 loss """marginranking""" +721 5 regularizer """no""" +721 5 optimizer """adam""" +721 5 training_loop """owa""" +721 5 negative_sampler """basic""" +721 5 evaluator """rankbased""" +722 1 model.embedding_dim 2.0 +722 1 model.scoring_fct_norm 1.0 +722 1 loss.margin 8.582170816835367 +722 1 optimizer.lr 0.04557160511391739 +722 1 negative_sampler.num_negs_per_pos 17.0 +722 1 training.batch_size 2.0 +722 2 model.embedding_dim 1.0 +722 2 model.scoring_fct_norm 1.0 +722 2 loss.margin 7.57659439620785 +722 2 optimizer.lr 0.0155397666021531 +722 2 negative_sampler.num_negs_per_pos 30.0 +722 2 training.batch_size 1.0 +722 3 model.embedding_dim 0.0 +722 3 model.scoring_fct_norm 2.0 +722 3 loss.margin 9.45375312934323 +722 3 optimizer.lr 0.00973091847891648 +722 3 negative_sampler.num_negs_per_pos 24.0 +722 3 training.batch_size 3.0 +722 4 model.embedding_dim 2.0 +722 4 model.scoring_fct_norm 2.0 +722 4 loss.margin 0.8462623318340206 +722 4 optimizer.lr 0.019115835906925914 +722 4 negative_sampler.num_negs_per_pos 28.0 +722 4 training.batch_size 0.0 +722 1 dataset """yago310""" +722 1 model """structuredembedding""" +722 1 loss """marginranking""" +722 1 regularizer """no""" +722 1 optimizer """adam""" +722 1 training_loop """owa""" +722 1 negative_sampler """basic""" +722 1 evaluator """rankbased""" +722 2 dataset """yago310""" +722 2 model """structuredembedding""" +722 2 loss """marginranking""" +722 2 regularizer """no""" +722 2 optimizer """adam""" +722 2 training_loop """owa""" +722 2 negative_sampler """basic""" +722 2 evaluator """rankbased""" +722 3 dataset """yago310""" +722 3 model """structuredembedding""" +722 3 loss """marginranking""" +722 3 regularizer """no""" +722 3 optimizer """adam""" +722 3 training_loop """owa""" +722 3 negative_sampler """basic""" +722 3 evaluator """rankbased""" +722 4 dataset """yago310""" +722 4 model """structuredembedding""" +722 4 loss """marginranking""" +722 4 regularizer """no""" +722 4 optimizer """adam""" +722 4 training_loop """owa""" +722 4 negative_sampler """basic""" +722 4 evaluator """rankbased""" +723 1 model.embedding_dim 2.0 +723 1 model.scoring_fct_norm 2.0 +723 1 loss.margin 14.134895791251052 +723 1 loss.adversarial_temperature 0.31459621833232365 +723 1 optimizer.lr 0.00979939204572891 +723 1 negative_sampler.num_negs_per_pos 11.0 +723 1 training.batch_size 1.0 +723 2 model.embedding_dim 1.0 +723 2 model.scoring_fct_norm 1.0 +723 2 loss.margin 22.669510564446266 +723 2 loss.adversarial_temperature 0.9435446154494682 +723 2 optimizer.lr 0.012392354330803407 +723 2 negative_sampler.num_negs_per_pos 46.0 +723 2 training.batch_size 1.0 +723 3 model.embedding_dim 2.0 +723 3 model.scoring_fct_norm 1.0 +723 3 loss.margin 26.11178945377174 +723 3 loss.adversarial_temperature 0.9629416256851135 +723 3 optimizer.lr 0.002342852507071819 +723 3 negative_sampler.num_negs_per_pos 20.0 +723 3 training.batch_size 2.0 +723 1 dataset """yago310""" +723 1 model """structuredembedding""" +723 1 loss """nssa""" +723 1 regularizer """no""" +723 1 optimizer """adam""" +723 1 training_loop """owa""" +723 1 negative_sampler """basic""" +723 1 evaluator """rankbased""" +723 2 dataset """yago310""" +723 2 model """structuredembedding""" +723 2 loss """nssa""" +723 2 regularizer """no""" +723 2 optimizer """adam""" +723 2 training_loop """owa""" +723 2 negative_sampler """basic""" +723 2 evaluator """rankbased""" +723 3 dataset """yago310""" +723 3 model """structuredembedding""" +723 3 loss """nssa""" +723 3 regularizer """no""" +723 3 optimizer """adam""" +723 3 training_loop """owa""" +723 3 negative_sampler """basic""" +723 3 evaluator """rankbased""" +724 1 model.embedding_dim 0.0 +724 1 model.scoring_fct_norm 1.0 +724 1 loss.margin 10.064317898819239 +724 1 loss.adversarial_temperature 0.39541315509874775 +724 1 optimizer.lr 0.0041320600758553965 +724 1 negative_sampler.num_negs_per_pos 29.0 +724 1 training.batch_size 3.0 +724 2 model.embedding_dim 1.0 +724 2 model.scoring_fct_norm 1.0 +724 2 loss.margin 1.4163696572459086 +724 2 loss.adversarial_temperature 0.4284372323592367 +724 2 optimizer.lr 0.05529070510319142 +724 2 negative_sampler.num_negs_per_pos 27.0 +724 2 training.batch_size 2.0 +724 3 model.embedding_dim 0.0 +724 3 model.scoring_fct_norm 1.0 +724 3 loss.margin 29.186663800790882 +724 3 loss.adversarial_temperature 0.29710410211725236 +724 3 optimizer.lr 0.00287552580480508 +724 3 negative_sampler.num_negs_per_pos 0.0 +724 3 training.batch_size 3.0 +724 4 model.embedding_dim 0.0 +724 4 model.scoring_fct_norm 2.0 +724 4 loss.margin 1.899291924239285 +724 4 loss.adversarial_temperature 0.861504240951377 +724 4 optimizer.lr 0.0081524278857543 +724 4 negative_sampler.num_negs_per_pos 9.0 +724 4 training.batch_size 2.0 +724 5 model.embedding_dim 1.0 +724 5 model.scoring_fct_norm 2.0 +724 5 loss.margin 9.472405371642507 +724 5 loss.adversarial_temperature 0.24155060312909754 +724 5 optimizer.lr 0.00222364016217102 +724 5 negative_sampler.num_negs_per_pos 8.0 +724 5 training.batch_size 2.0 +724 6 model.embedding_dim 0.0 +724 6 model.scoring_fct_norm 2.0 +724 6 loss.margin 28.096646471276685 +724 6 loss.adversarial_temperature 0.5219309359821014 +724 6 optimizer.lr 0.0011195253646232976 +724 6 negative_sampler.num_negs_per_pos 45.0 +724 6 training.batch_size 2.0 +724 7 model.embedding_dim 2.0 +724 7 model.scoring_fct_norm 1.0 +724 7 loss.margin 29.510468939675906 +724 7 loss.adversarial_temperature 0.7127477003413546 +724 7 optimizer.lr 0.034731193467356904 +724 7 negative_sampler.num_negs_per_pos 39.0 +724 7 training.batch_size 1.0 +724 1 dataset """yago310""" +724 1 model """structuredembedding""" +724 1 loss """nssa""" +724 1 regularizer """no""" +724 1 optimizer """adam""" +724 1 training_loop """owa""" +724 1 negative_sampler """basic""" +724 1 evaluator """rankbased""" +724 2 dataset """yago310""" +724 2 model """structuredembedding""" +724 2 loss """nssa""" +724 2 regularizer """no""" +724 2 optimizer """adam""" +724 2 training_loop """owa""" +724 2 negative_sampler """basic""" +724 2 evaluator """rankbased""" +724 3 dataset """yago310""" +724 3 model """structuredembedding""" +724 3 loss """nssa""" +724 3 regularizer """no""" +724 3 optimizer """adam""" +724 3 training_loop """owa""" +724 3 negative_sampler """basic""" +724 3 evaluator """rankbased""" +724 4 dataset """yago310""" +724 4 model """structuredembedding""" +724 4 loss """nssa""" +724 4 regularizer """no""" +724 4 optimizer """adam""" +724 4 training_loop """owa""" +724 4 negative_sampler """basic""" +724 4 evaluator """rankbased""" +724 5 dataset """yago310""" +724 5 model """structuredembedding""" +724 5 loss """nssa""" +724 5 regularizer """no""" +724 5 optimizer """adam""" +724 5 training_loop """owa""" +724 5 negative_sampler """basic""" +724 5 evaluator """rankbased""" +724 6 dataset """yago310""" +724 6 model """structuredembedding""" +724 6 loss """nssa""" +724 6 regularizer """no""" +724 6 optimizer """adam""" +724 6 training_loop """owa""" +724 6 negative_sampler """basic""" +724 6 evaluator """rankbased""" +724 7 dataset """yago310""" +724 7 model """structuredembedding""" +724 7 loss """nssa""" +724 7 regularizer """no""" +724 7 optimizer """adam""" +724 7 training_loop """owa""" +724 7 negative_sampler """basic""" +724 7 evaluator """rankbased""" +725 1 model.embedding_dim 1.0 +725 1 model.scoring_fct_norm 2.0 +725 1 optimizer.lr 0.0027373694115002314 +725 1 negative_sampler.num_negs_per_pos 25.0 +725 1 training.batch_size 2.0 +725 2 model.embedding_dim 1.0 +725 2 model.scoring_fct_norm 2.0 +725 2 optimizer.lr 0.0018072679052508426 +725 2 negative_sampler.num_negs_per_pos 43.0 +725 2 training.batch_size 1.0 +725 3 model.embedding_dim 0.0 +725 3 model.scoring_fct_norm 2.0 +725 3 optimizer.lr 0.002158359392194414 +725 3 negative_sampler.num_negs_per_pos 37.0 +725 3 training.batch_size 1.0 +725 4 model.embedding_dim 1.0 +725 4 model.scoring_fct_norm 2.0 +725 4 optimizer.lr 0.00934557085668436 +725 4 negative_sampler.num_negs_per_pos 2.0 +725 4 training.batch_size 1.0 +725 5 model.embedding_dim 0.0 +725 5 model.scoring_fct_norm 2.0 +725 5 optimizer.lr 0.04382821259892461 +725 5 negative_sampler.num_negs_per_pos 49.0 +725 5 training.batch_size 1.0 +725 6 model.embedding_dim 2.0 +725 6 model.scoring_fct_norm 2.0 +725 6 optimizer.lr 0.008087516562964407 +725 6 negative_sampler.num_negs_per_pos 30.0 +725 6 training.batch_size 3.0 +725 1 dataset """yago310""" +725 1 model """structuredembedding""" +725 1 loss """bceaftersigmoid""" +725 1 regularizer """no""" +725 1 optimizer """adam""" +725 1 training_loop """owa""" +725 1 negative_sampler """basic""" +725 1 evaluator """rankbased""" +725 2 dataset """yago310""" +725 2 model """structuredembedding""" +725 2 loss """bceaftersigmoid""" +725 2 regularizer """no""" +725 2 optimizer """adam""" +725 2 training_loop """owa""" +725 2 negative_sampler """basic""" +725 2 evaluator """rankbased""" +725 3 dataset """yago310""" +725 3 model """structuredembedding""" +725 3 loss """bceaftersigmoid""" +725 3 regularizer """no""" +725 3 optimizer """adam""" +725 3 training_loop """owa""" +725 3 negative_sampler """basic""" +725 3 evaluator """rankbased""" +725 4 dataset """yago310""" +725 4 model """structuredembedding""" +725 4 loss """bceaftersigmoid""" +725 4 regularizer """no""" +725 4 optimizer """adam""" +725 4 training_loop """owa""" +725 4 negative_sampler """basic""" +725 4 evaluator """rankbased""" +725 5 dataset """yago310""" +725 5 model """structuredembedding""" +725 5 loss """bceaftersigmoid""" +725 5 regularizer """no""" +725 5 optimizer """adam""" +725 5 training_loop """owa""" +725 5 negative_sampler """basic""" +725 5 evaluator """rankbased""" +725 6 dataset """yago310""" +725 6 model """structuredembedding""" +725 6 loss """bceaftersigmoid""" +725 6 regularizer """no""" +725 6 optimizer """adam""" +725 6 training_loop """owa""" +725 6 negative_sampler """basic""" +725 6 evaluator """rankbased""" +726 1 model.embedding_dim 1.0 +726 1 model.scoring_fct_norm 2.0 +726 1 optimizer.lr 0.07290306874550083 +726 1 negative_sampler.num_negs_per_pos 7.0 +726 1 training.batch_size 2.0 +726 2 model.embedding_dim 2.0 +726 2 model.scoring_fct_norm 2.0 +726 2 optimizer.lr 0.00900869767212453 +726 2 negative_sampler.num_negs_per_pos 40.0 +726 2 training.batch_size 0.0 +726 3 model.embedding_dim 0.0 +726 3 model.scoring_fct_norm 1.0 +726 3 optimizer.lr 0.06344363027887617 +726 3 negative_sampler.num_negs_per_pos 43.0 +726 3 training.batch_size 1.0 +726 4 model.embedding_dim 2.0 +726 4 model.scoring_fct_norm 1.0 +726 4 optimizer.lr 0.011122350361976391 +726 4 negative_sampler.num_negs_per_pos 35.0 +726 4 training.batch_size 1.0 +726 1 dataset """yago310""" +726 1 model """structuredembedding""" +726 1 loss """bceaftersigmoid""" +726 1 regularizer """no""" +726 1 optimizer """adam""" +726 1 training_loop """owa""" +726 1 negative_sampler """basic""" +726 1 evaluator """rankbased""" +726 2 dataset """yago310""" +726 2 model """structuredembedding""" +726 2 loss """bceaftersigmoid""" +726 2 regularizer """no""" +726 2 optimizer """adam""" +726 2 training_loop """owa""" +726 2 negative_sampler """basic""" +726 2 evaluator """rankbased""" +726 3 dataset """yago310""" +726 3 model """structuredembedding""" +726 3 loss """bceaftersigmoid""" +726 3 regularizer """no""" +726 3 optimizer """adam""" +726 3 training_loop """owa""" +726 3 negative_sampler """basic""" +726 3 evaluator """rankbased""" +726 4 dataset """yago310""" +726 4 model """structuredembedding""" +726 4 loss """bceaftersigmoid""" +726 4 regularizer """no""" +726 4 optimizer """adam""" +726 4 training_loop """owa""" +726 4 negative_sampler """basic""" +726 4 evaluator """rankbased""" +727 1 model.embedding_dim 2.0 +727 1 model.relation_dim 1.0 +727 1 optimizer.lr 0.001013569337719823 +727 1 negative_sampler.num_negs_per_pos 61.0 +727 1 training.batch_size 2.0 +727 2 model.embedding_dim 1.0 +727 2 model.relation_dim 0.0 +727 2 optimizer.lr 0.029302767694974464 +727 2 negative_sampler.num_negs_per_pos 75.0 +727 2 training.batch_size 0.0 +727 3 model.embedding_dim 1.0 +727 3 model.relation_dim 2.0 +727 3 optimizer.lr 0.0019257464798803753 +727 3 negative_sampler.num_negs_per_pos 57.0 +727 3 training.batch_size 1.0 +727 4 model.embedding_dim 1.0 +727 4 model.relation_dim 0.0 +727 4 optimizer.lr 0.0017125294149547955 +727 4 negative_sampler.num_negs_per_pos 87.0 +727 4 training.batch_size 0.0 +727 5 model.embedding_dim 1.0 +727 5 model.relation_dim 0.0 +727 5 optimizer.lr 0.005365247685485154 +727 5 negative_sampler.num_negs_per_pos 81.0 +727 5 training.batch_size 2.0 +727 6 model.embedding_dim 2.0 +727 6 model.relation_dim 0.0 +727 6 optimizer.lr 0.0064838868918032175 +727 6 negative_sampler.num_negs_per_pos 55.0 +727 6 training.batch_size 2.0 +727 7 model.embedding_dim 0.0 +727 7 model.relation_dim 2.0 +727 7 optimizer.lr 0.016458366844581615 +727 7 negative_sampler.num_negs_per_pos 23.0 +727 7 training.batch_size 2.0 +727 8 model.embedding_dim 1.0 +727 8 model.relation_dim 2.0 +727 8 optimizer.lr 0.027995710193783618 +727 8 negative_sampler.num_negs_per_pos 70.0 +727 8 training.batch_size 1.0 +727 9 model.embedding_dim 1.0 +727 9 model.relation_dim 1.0 +727 9 optimizer.lr 0.001364223230804083 +727 9 negative_sampler.num_negs_per_pos 0.0 +727 9 training.batch_size 1.0 +727 10 model.embedding_dim 1.0 +727 10 model.relation_dim 2.0 +727 10 optimizer.lr 0.011298270527542722 +727 10 negative_sampler.num_negs_per_pos 62.0 +727 10 training.batch_size 0.0 +727 11 model.embedding_dim 2.0 +727 11 model.relation_dim 2.0 +727 11 optimizer.lr 0.0023072047837696696 +727 11 negative_sampler.num_negs_per_pos 64.0 +727 11 training.batch_size 0.0 +727 12 model.embedding_dim 0.0 +727 12 model.relation_dim 0.0 +727 12 optimizer.lr 0.045405545341769604 +727 12 negative_sampler.num_negs_per_pos 2.0 +727 12 training.batch_size 1.0 +727 13 model.embedding_dim 0.0 +727 13 model.relation_dim 2.0 +727 13 optimizer.lr 0.004113350330053756 +727 13 negative_sampler.num_negs_per_pos 30.0 +727 13 training.batch_size 2.0 +727 14 model.embedding_dim 1.0 +727 14 model.relation_dim 1.0 +727 14 optimizer.lr 0.012058366865641604 +727 14 negative_sampler.num_negs_per_pos 79.0 +727 14 training.batch_size 2.0 +727 15 model.embedding_dim 2.0 +727 15 model.relation_dim 2.0 +727 15 optimizer.lr 0.009078815912180445 +727 15 negative_sampler.num_negs_per_pos 86.0 +727 15 training.batch_size 1.0 +727 16 model.embedding_dim 0.0 +727 16 model.relation_dim 0.0 +727 16 optimizer.lr 0.009963463590156003 +727 16 negative_sampler.num_negs_per_pos 86.0 +727 16 training.batch_size 2.0 +727 17 model.embedding_dim 0.0 +727 17 model.relation_dim 2.0 +727 17 optimizer.lr 0.0020778516402332166 +727 17 negative_sampler.num_negs_per_pos 18.0 +727 17 training.batch_size 2.0 +727 18 model.embedding_dim 0.0 +727 18 model.relation_dim 0.0 +727 18 optimizer.lr 0.01857710868968354 +727 18 negative_sampler.num_negs_per_pos 29.0 +727 18 training.batch_size 1.0 +727 19 model.embedding_dim 1.0 +727 19 model.relation_dim 0.0 +727 19 optimizer.lr 0.0026967476975521504 +727 19 negative_sampler.num_negs_per_pos 57.0 +727 19 training.batch_size 0.0 +727 1 dataset """fb15k237""" +727 1 model """transd""" +727 1 loss """bceaftersigmoid""" +727 1 regularizer """no""" +727 1 optimizer """adam""" +727 1 training_loop """owa""" +727 1 negative_sampler """basic""" +727 1 evaluator """rankbased""" +727 2 dataset """fb15k237""" +727 2 model """transd""" +727 2 loss """bceaftersigmoid""" +727 2 regularizer """no""" +727 2 optimizer """adam""" +727 2 training_loop """owa""" +727 2 negative_sampler """basic""" +727 2 evaluator """rankbased""" +727 3 dataset """fb15k237""" +727 3 model """transd""" +727 3 loss """bceaftersigmoid""" +727 3 regularizer """no""" +727 3 optimizer """adam""" +727 3 training_loop """owa""" +727 3 negative_sampler """basic""" +727 3 evaluator """rankbased""" +727 4 dataset """fb15k237""" +727 4 model """transd""" +727 4 loss """bceaftersigmoid""" +727 4 regularizer """no""" +727 4 optimizer """adam""" +727 4 training_loop """owa""" +727 4 negative_sampler """basic""" +727 4 evaluator """rankbased""" +727 5 dataset """fb15k237""" +727 5 model """transd""" +727 5 loss """bceaftersigmoid""" +727 5 regularizer """no""" +727 5 optimizer """adam""" +727 5 training_loop """owa""" +727 5 negative_sampler """basic""" +727 5 evaluator """rankbased""" +727 6 dataset """fb15k237""" +727 6 model """transd""" +727 6 loss """bceaftersigmoid""" +727 6 regularizer """no""" +727 6 optimizer """adam""" +727 6 training_loop """owa""" +727 6 negative_sampler """basic""" +727 6 evaluator """rankbased""" +727 7 dataset """fb15k237""" +727 7 model """transd""" +727 7 loss """bceaftersigmoid""" +727 7 regularizer """no""" +727 7 optimizer """adam""" +727 7 training_loop """owa""" +727 7 negative_sampler """basic""" +727 7 evaluator """rankbased""" +727 8 dataset """fb15k237""" +727 8 model """transd""" +727 8 loss """bceaftersigmoid""" +727 8 regularizer """no""" +727 8 optimizer """adam""" +727 8 training_loop """owa""" +727 8 negative_sampler """basic""" +727 8 evaluator """rankbased""" +727 9 dataset """fb15k237""" +727 9 model """transd""" +727 9 loss """bceaftersigmoid""" +727 9 regularizer """no""" +727 9 optimizer """adam""" +727 9 training_loop """owa""" +727 9 negative_sampler """basic""" +727 9 evaluator """rankbased""" +727 10 dataset """fb15k237""" +727 10 model """transd""" +727 10 loss """bceaftersigmoid""" +727 10 regularizer """no""" +727 10 optimizer """adam""" +727 10 training_loop """owa""" +727 10 negative_sampler """basic""" +727 10 evaluator """rankbased""" +727 11 dataset """fb15k237""" +727 11 model """transd""" +727 11 loss """bceaftersigmoid""" +727 11 regularizer """no""" +727 11 optimizer """adam""" +727 11 training_loop """owa""" +727 11 negative_sampler """basic""" +727 11 evaluator """rankbased""" +727 12 dataset """fb15k237""" +727 12 model """transd""" +727 12 loss """bceaftersigmoid""" +727 12 regularizer """no""" +727 12 optimizer """adam""" +727 12 training_loop """owa""" +727 12 negative_sampler """basic""" +727 12 evaluator """rankbased""" +727 13 dataset """fb15k237""" +727 13 model """transd""" +727 13 loss """bceaftersigmoid""" +727 13 regularizer """no""" +727 13 optimizer """adam""" +727 13 training_loop """owa""" +727 13 negative_sampler """basic""" +727 13 evaluator """rankbased""" +727 14 dataset """fb15k237""" +727 14 model """transd""" +727 14 loss """bceaftersigmoid""" +727 14 regularizer """no""" +727 14 optimizer """adam""" +727 14 training_loop """owa""" +727 14 negative_sampler """basic""" +727 14 evaluator """rankbased""" +727 15 dataset """fb15k237""" +727 15 model """transd""" +727 15 loss """bceaftersigmoid""" +727 15 regularizer """no""" +727 15 optimizer """adam""" +727 15 training_loop """owa""" +727 15 negative_sampler """basic""" +727 15 evaluator """rankbased""" +727 16 dataset """fb15k237""" +727 16 model """transd""" +727 16 loss """bceaftersigmoid""" +727 16 regularizer """no""" +727 16 optimizer """adam""" +727 16 training_loop """owa""" +727 16 negative_sampler """basic""" +727 16 evaluator """rankbased""" +727 17 dataset """fb15k237""" +727 17 model """transd""" +727 17 loss """bceaftersigmoid""" +727 17 regularizer """no""" +727 17 optimizer """adam""" +727 17 training_loop """owa""" +727 17 negative_sampler """basic""" +727 17 evaluator """rankbased""" +727 18 dataset """fb15k237""" +727 18 model """transd""" +727 18 loss """bceaftersigmoid""" +727 18 regularizer """no""" +727 18 optimizer """adam""" +727 18 training_loop """owa""" +727 18 negative_sampler """basic""" +727 18 evaluator """rankbased""" +727 19 dataset """fb15k237""" +727 19 model """transd""" +727 19 loss """bceaftersigmoid""" +727 19 regularizer """no""" +727 19 optimizer """adam""" +727 19 training_loop """owa""" +727 19 negative_sampler """basic""" +727 19 evaluator """rankbased""" +728 1 model.embedding_dim 0.0 +728 1 model.relation_dim 1.0 +728 1 optimizer.lr 0.007045474872201827 +728 1 negative_sampler.num_negs_per_pos 64.0 +728 1 training.batch_size 0.0 +728 2 model.embedding_dim 2.0 +728 2 model.relation_dim 1.0 +728 2 optimizer.lr 0.0062018400100636905 +728 2 negative_sampler.num_negs_per_pos 39.0 +728 2 training.batch_size 0.0 +728 3 model.embedding_dim 0.0 +728 3 model.relation_dim 1.0 +728 3 optimizer.lr 0.01194710522153168 +728 3 negative_sampler.num_negs_per_pos 86.0 +728 3 training.batch_size 1.0 +728 4 model.embedding_dim 2.0 +728 4 model.relation_dim 2.0 +728 4 optimizer.lr 0.003073790607523357 +728 4 negative_sampler.num_negs_per_pos 50.0 +728 4 training.batch_size 1.0 +728 5 model.embedding_dim 1.0 +728 5 model.relation_dim 2.0 +728 5 optimizer.lr 0.02215559974383508 +728 5 negative_sampler.num_negs_per_pos 0.0 +728 5 training.batch_size 0.0 +728 6 model.embedding_dim 1.0 +728 6 model.relation_dim 1.0 +728 6 optimizer.lr 0.01642319705133522 +728 6 negative_sampler.num_negs_per_pos 62.0 +728 6 training.batch_size 1.0 +728 7 model.embedding_dim 1.0 +728 7 model.relation_dim 2.0 +728 7 optimizer.lr 0.021081206412280624 +728 7 negative_sampler.num_negs_per_pos 76.0 +728 7 training.batch_size 0.0 +728 8 model.embedding_dim 1.0 +728 8 model.relation_dim 0.0 +728 8 optimizer.lr 0.019195618734468206 +728 8 negative_sampler.num_negs_per_pos 4.0 +728 8 training.batch_size 1.0 +728 9 model.embedding_dim 0.0 +728 9 model.relation_dim 2.0 +728 9 optimizer.lr 0.0046412586670643895 +728 9 negative_sampler.num_negs_per_pos 95.0 +728 9 training.batch_size 0.0 +728 10 model.embedding_dim 1.0 +728 10 model.relation_dim 2.0 +728 10 optimizer.lr 0.0037101122956383235 +728 10 negative_sampler.num_negs_per_pos 93.0 +728 10 training.batch_size 0.0 +728 11 model.embedding_dim 1.0 +728 11 model.relation_dim 1.0 +728 11 optimizer.lr 0.020675177147730588 +728 11 negative_sampler.num_negs_per_pos 88.0 +728 11 training.batch_size 2.0 +728 12 model.embedding_dim 0.0 +728 12 model.relation_dim 1.0 +728 12 optimizer.lr 0.004323132943436389 +728 12 negative_sampler.num_negs_per_pos 77.0 +728 12 training.batch_size 0.0 +728 13 model.embedding_dim 0.0 +728 13 model.relation_dim 1.0 +728 13 optimizer.lr 0.0012928652603466778 +728 13 negative_sampler.num_negs_per_pos 47.0 +728 13 training.batch_size 1.0 +728 14 model.embedding_dim 0.0 +728 14 model.relation_dim 2.0 +728 14 optimizer.lr 0.031080943255505075 +728 14 negative_sampler.num_negs_per_pos 90.0 +728 14 training.batch_size 1.0 +728 15 model.embedding_dim 0.0 +728 15 model.relation_dim 0.0 +728 15 optimizer.lr 0.01031698511983653 +728 15 negative_sampler.num_negs_per_pos 39.0 +728 15 training.batch_size 2.0 +728 16 model.embedding_dim 0.0 +728 16 model.relation_dim 1.0 +728 16 optimizer.lr 0.04837175159074366 +728 16 negative_sampler.num_negs_per_pos 96.0 +728 16 training.batch_size 0.0 +728 17 model.embedding_dim 1.0 +728 17 model.relation_dim 1.0 +728 17 optimizer.lr 0.0011610892921095838 +728 17 negative_sampler.num_negs_per_pos 7.0 +728 17 training.batch_size 0.0 +728 18 model.embedding_dim 0.0 +728 18 model.relation_dim 0.0 +728 18 optimizer.lr 0.0046669553211537845 +728 18 negative_sampler.num_negs_per_pos 99.0 +728 18 training.batch_size 1.0 +728 19 model.embedding_dim 2.0 +728 19 model.relation_dim 0.0 +728 19 optimizer.lr 0.0019654164554150162 +728 19 negative_sampler.num_negs_per_pos 72.0 +728 19 training.batch_size 0.0 +728 1 dataset """fb15k237""" +728 1 model """transd""" +728 1 loss """softplus""" +728 1 regularizer """no""" +728 1 optimizer """adam""" +728 1 training_loop """owa""" +728 1 negative_sampler """basic""" +728 1 evaluator """rankbased""" +728 2 dataset """fb15k237""" +728 2 model """transd""" +728 2 loss """softplus""" +728 2 regularizer """no""" +728 2 optimizer """adam""" +728 2 training_loop """owa""" +728 2 negative_sampler """basic""" +728 2 evaluator """rankbased""" +728 3 dataset """fb15k237""" +728 3 model """transd""" +728 3 loss """softplus""" +728 3 regularizer """no""" +728 3 optimizer """adam""" +728 3 training_loop """owa""" +728 3 negative_sampler """basic""" +728 3 evaluator """rankbased""" +728 4 dataset """fb15k237""" +728 4 model """transd""" +728 4 loss """softplus""" +728 4 regularizer """no""" +728 4 optimizer """adam""" +728 4 training_loop """owa""" +728 4 negative_sampler """basic""" +728 4 evaluator """rankbased""" +728 5 dataset """fb15k237""" +728 5 model """transd""" +728 5 loss """softplus""" +728 5 regularizer """no""" +728 5 optimizer """adam""" +728 5 training_loop """owa""" +728 5 negative_sampler """basic""" +728 5 evaluator """rankbased""" +728 6 dataset """fb15k237""" +728 6 model """transd""" +728 6 loss """softplus""" +728 6 regularizer """no""" +728 6 optimizer """adam""" +728 6 training_loop """owa""" +728 6 negative_sampler """basic""" +728 6 evaluator """rankbased""" +728 7 dataset """fb15k237""" +728 7 model """transd""" +728 7 loss """softplus""" +728 7 regularizer """no""" +728 7 optimizer """adam""" +728 7 training_loop """owa""" +728 7 negative_sampler """basic""" +728 7 evaluator """rankbased""" +728 8 dataset """fb15k237""" +728 8 model """transd""" +728 8 loss """softplus""" +728 8 regularizer """no""" +728 8 optimizer """adam""" +728 8 training_loop """owa""" +728 8 negative_sampler """basic""" +728 8 evaluator """rankbased""" +728 9 dataset """fb15k237""" +728 9 model """transd""" +728 9 loss """softplus""" +728 9 regularizer """no""" +728 9 optimizer """adam""" +728 9 training_loop """owa""" +728 9 negative_sampler """basic""" +728 9 evaluator """rankbased""" +728 10 dataset """fb15k237""" +728 10 model """transd""" +728 10 loss """softplus""" +728 10 regularizer """no""" +728 10 optimizer """adam""" +728 10 training_loop """owa""" +728 10 negative_sampler """basic""" +728 10 evaluator """rankbased""" +728 11 dataset """fb15k237""" +728 11 model """transd""" +728 11 loss """softplus""" +728 11 regularizer """no""" +728 11 optimizer """adam""" +728 11 training_loop """owa""" +728 11 negative_sampler """basic""" +728 11 evaluator """rankbased""" +728 12 dataset """fb15k237""" +728 12 model """transd""" +728 12 loss """softplus""" +728 12 regularizer """no""" +728 12 optimizer """adam""" +728 12 training_loop """owa""" +728 12 negative_sampler """basic""" +728 12 evaluator """rankbased""" +728 13 dataset """fb15k237""" +728 13 model """transd""" +728 13 loss """softplus""" +728 13 regularizer """no""" +728 13 optimizer """adam""" +728 13 training_loop """owa""" +728 13 negative_sampler """basic""" +728 13 evaluator """rankbased""" +728 14 dataset """fb15k237""" +728 14 model """transd""" +728 14 loss """softplus""" +728 14 regularizer """no""" +728 14 optimizer """adam""" +728 14 training_loop """owa""" +728 14 negative_sampler """basic""" +728 14 evaluator """rankbased""" +728 15 dataset """fb15k237""" +728 15 model """transd""" +728 15 loss """softplus""" +728 15 regularizer """no""" +728 15 optimizer """adam""" +728 15 training_loop """owa""" +728 15 negative_sampler """basic""" +728 15 evaluator """rankbased""" +728 16 dataset """fb15k237""" +728 16 model """transd""" +728 16 loss """softplus""" +728 16 regularizer """no""" +728 16 optimizer """adam""" +728 16 training_loop """owa""" +728 16 negative_sampler """basic""" +728 16 evaluator """rankbased""" +728 17 dataset """fb15k237""" +728 17 model """transd""" +728 17 loss """softplus""" +728 17 regularizer """no""" +728 17 optimizer """adam""" +728 17 training_loop """owa""" +728 17 negative_sampler """basic""" +728 17 evaluator """rankbased""" +728 18 dataset """fb15k237""" +728 18 model """transd""" +728 18 loss """softplus""" +728 18 regularizer """no""" +728 18 optimizer """adam""" +728 18 training_loop """owa""" +728 18 negative_sampler """basic""" +728 18 evaluator """rankbased""" +728 19 dataset """fb15k237""" +728 19 model """transd""" +728 19 loss """softplus""" +728 19 regularizer """no""" +728 19 optimizer """adam""" +728 19 training_loop """owa""" +728 19 negative_sampler """basic""" +728 19 evaluator """rankbased""" +729 1 model.embedding_dim 2.0 +729 1 model.relation_dim 1.0 +729 1 optimizer.lr 0.00131786388725451 +729 1 negative_sampler.num_negs_per_pos 14.0 +729 1 training.batch_size 1.0 +729 2 model.embedding_dim 1.0 +729 2 model.relation_dim 1.0 +729 2 optimizer.lr 0.04333597268793517 +729 2 negative_sampler.num_negs_per_pos 58.0 +729 2 training.batch_size 0.0 +729 3 model.embedding_dim 0.0 +729 3 model.relation_dim 1.0 +729 3 optimizer.lr 0.004933758490253641 +729 3 negative_sampler.num_negs_per_pos 4.0 +729 3 training.batch_size 0.0 +729 4 model.embedding_dim 1.0 +729 4 model.relation_dim 2.0 +729 4 optimizer.lr 0.035520364131597884 +729 4 negative_sampler.num_negs_per_pos 5.0 +729 4 training.batch_size 0.0 +729 5 model.embedding_dim 0.0 +729 5 model.relation_dim 2.0 +729 5 optimizer.lr 0.0011658344015602522 +729 5 negative_sampler.num_negs_per_pos 82.0 +729 5 training.batch_size 2.0 +729 6 model.embedding_dim 1.0 +729 6 model.relation_dim 0.0 +729 6 optimizer.lr 0.0035863332057260735 +729 6 negative_sampler.num_negs_per_pos 3.0 +729 6 training.batch_size 0.0 +729 7 model.embedding_dim 0.0 +729 7 model.relation_dim 2.0 +729 7 optimizer.lr 0.001928364305632877 +729 7 negative_sampler.num_negs_per_pos 33.0 +729 7 training.batch_size 0.0 +729 8 model.embedding_dim 1.0 +729 8 model.relation_dim 0.0 +729 8 optimizer.lr 0.001368136914278615 +729 8 negative_sampler.num_negs_per_pos 37.0 +729 8 training.batch_size 0.0 +729 9 model.embedding_dim 1.0 +729 9 model.relation_dim 2.0 +729 9 optimizer.lr 0.04559055303864605 +729 9 negative_sampler.num_negs_per_pos 27.0 +729 9 training.batch_size 0.0 +729 10 model.embedding_dim 0.0 +729 10 model.relation_dim 0.0 +729 10 optimizer.lr 0.037896869613936096 +729 10 negative_sampler.num_negs_per_pos 27.0 +729 10 training.batch_size 2.0 +729 11 model.embedding_dim 2.0 +729 11 model.relation_dim 2.0 +729 11 optimizer.lr 0.08389520476837535 +729 11 negative_sampler.num_negs_per_pos 98.0 +729 11 training.batch_size 2.0 +729 12 model.embedding_dim 0.0 +729 12 model.relation_dim 2.0 +729 12 optimizer.lr 0.037765582527095054 +729 12 negative_sampler.num_negs_per_pos 56.0 +729 12 training.batch_size 1.0 +729 13 model.embedding_dim 2.0 +729 13 model.relation_dim 0.0 +729 13 optimizer.lr 0.008853545422503403 +729 13 negative_sampler.num_negs_per_pos 0.0 +729 13 training.batch_size 1.0 +729 14 model.embedding_dim 0.0 +729 14 model.relation_dim 0.0 +729 14 optimizer.lr 0.003704865002448057 +729 14 negative_sampler.num_negs_per_pos 73.0 +729 14 training.batch_size 2.0 +729 15 model.embedding_dim 2.0 +729 15 model.relation_dim 2.0 +729 15 optimizer.lr 0.01824194563275479 +729 15 negative_sampler.num_negs_per_pos 98.0 +729 15 training.batch_size 2.0 +729 16 model.embedding_dim 1.0 +729 16 model.relation_dim 1.0 +729 16 optimizer.lr 0.006494197388245435 +729 16 negative_sampler.num_negs_per_pos 5.0 +729 16 training.batch_size 0.0 +729 17 model.embedding_dim 0.0 +729 17 model.relation_dim 0.0 +729 17 optimizer.lr 0.00264597264960524 +729 17 negative_sampler.num_negs_per_pos 20.0 +729 17 training.batch_size 1.0 +729 18 model.embedding_dim 0.0 +729 18 model.relation_dim 1.0 +729 18 optimizer.lr 0.00911118385220883 +729 18 negative_sampler.num_negs_per_pos 11.0 +729 18 training.batch_size 1.0 +729 19 model.embedding_dim 1.0 +729 19 model.relation_dim 2.0 +729 19 optimizer.lr 0.04486546839047806 +729 19 negative_sampler.num_negs_per_pos 4.0 +729 19 training.batch_size 2.0 +729 20 model.embedding_dim 1.0 +729 20 model.relation_dim 0.0 +729 20 optimizer.lr 0.006109717688273554 +729 20 negative_sampler.num_negs_per_pos 92.0 +729 20 training.batch_size 1.0 +729 21 model.embedding_dim 2.0 +729 21 model.relation_dim 0.0 +729 21 optimizer.lr 0.00964737896877651 +729 21 negative_sampler.num_negs_per_pos 47.0 +729 21 training.batch_size 1.0 +729 22 model.embedding_dim 1.0 +729 22 model.relation_dim 1.0 +729 22 optimizer.lr 0.0031461816151329407 +729 22 negative_sampler.num_negs_per_pos 88.0 +729 22 training.batch_size 2.0 +729 23 model.embedding_dim 2.0 +729 23 model.relation_dim 1.0 +729 23 optimizer.lr 0.045824063545105834 +729 23 negative_sampler.num_negs_per_pos 40.0 +729 23 training.batch_size 0.0 +729 24 model.embedding_dim 1.0 +729 24 model.relation_dim 1.0 +729 24 optimizer.lr 0.0506690314566898 +729 24 negative_sampler.num_negs_per_pos 41.0 +729 24 training.batch_size 1.0 +729 25 model.embedding_dim 1.0 +729 25 model.relation_dim 0.0 +729 25 optimizer.lr 0.004630265100714751 +729 25 negative_sampler.num_negs_per_pos 67.0 +729 25 training.batch_size 2.0 +729 26 model.embedding_dim 1.0 +729 26 model.relation_dim 0.0 +729 26 optimizer.lr 0.004853705800688128 +729 26 negative_sampler.num_negs_per_pos 48.0 +729 26 training.batch_size 0.0 +729 27 model.embedding_dim 0.0 +729 27 model.relation_dim 2.0 +729 27 optimizer.lr 0.053438364094428155 +729 27 negative_sampler.num_negs_per_pos 60.0 +729 27 training.batch_size 0.0 +729 28 model.embedding_dim 0.0 +729 28 model.relation_dim 2.0 +729 28 optimizer.lr 0.022004945427652224 +729 28 negative_sampler.num_negs_per_pos 67.0 +729 28 training.batch_size 0.0 +729 29 model.embedding_dim 1.0 +729 29 model.relation_dim 0.0 +729 29 optimizer.lr 0.0017481398646763378 +729 29 negative_sampler.num_negs_per_pos 40.0 +729 29 training.batch_size 0.0 +729 30 model.embedding_dim 1.0 +729 30 model.relation_dim 1.0 +729 30 optimizer.lr 0.009888387488864983 +729 30 negative_sampler.num_negs_per_pos 92.0 +729 30 training.batch_size 1.0 +729 31 model.embedding_dim 2.0 +729 31 model.relation_dim 1.0 +729 31 optimizer.lr 0.07934636779303984 +729 31 negative_sampler.num_negs_per_pos 83.0 +729 31 training.batch_size 2.0 +729 32 model.embedding_dim 0.0 +729 32 model.relation_dim 0.0 +729 32 optimizer.lr 0.003939274426119773 +729 32 negative_sampler.num_negs_per_pos 9.0 +729 32 training.batch_size 0.0 +729 33 model.embedding_dim 0.0 +729 33 model.relation_dim 0.0 +729 33 optimizer.lr 0.016373844163986156 +729 33 negative_sampler.num_negs_per_pos 48.0 +729 33 training.batch_size 0.0 +729 34 model.embedding_dim 1.0 +729 34 model.relation_dim 1.0 +729 34 optimizer.lr 0.0023417365136652246 +729 34 negative_sampler.num_negs_per_pos 33.0 +729 34 training.batch_size 2.0 +729 35 model.embedding_dim 0.0 +729 35 model.relation_dim 1.0 +729 35 optimizer.lr 0.002717662082044427 +729 35 negative_sampler.num_negs_per_pos 12.0 +729 35 training.batch_size 0.0 +729 1 dataset """fb15k237""" +729 1 model """transd""" +729 1 loss """bceaftersigmoid""" +729 1 regularizer """no""" +729 1 optimizer """adam""" +729 1 training_loop """owa""" +729 1 negative_sampler """basic""" +729 1 evaluator """rankbased""" +729 2 dataset """fb15k237""" +729 2 model """transd""" +729 2 loss """bceaftersigmoid""" +729 2 regularizer """no""" +729 2 optimizer """adam""" +729 2 training_loop """owa""" +729 2 negative_sampler """basic""" +729 2 evaluator """rankbased""" +729 3 dataset """fb15k237""" +729 3 model """transd""" +729 3 loss """bceaftersigmoid""" +729 3 regularizer """no""" +729 3 optimizer """adam""" +729 3 training_loop """owa""" +729 3 negative_sampler """basic""" +729 3 evaluator """rankbased""" +729 4 dataset """fb15k237""" +729 4 model """transd""" +729 4 loss """bceaftersigmoid""" +729 4 regularizer """no""" +729 4 optimizer """adam""" +729 4 training_loop """owa""" +729 4 negative_sampler """basic""" +729 4 evaluator """rankbased""" +729 5 dataset """fb15k237""" +729 5 model """transd""" +729 5 loss """bceaftersigmoid""" +729 5 regularizer """no""" +729 5 optimizer """adam""" +729 5 training_loop """owa""" +729 5 negative_sampler """basic""" +729 5 evaluator """rankbased""" +729 6 dataset """fb15k237""" +729 6 model """transd""" +729 6 loss """bceaftersigmoid""" +729 6 regularizer """no""" +729 6 optimizer """adam""" +729 6 training_loop """owa""" +729 6 negative_sampler """basic""" +729 6 evaluator """rankbased""" +729 7 dataset """fb15k237""" +729 7 model """transd""" +729 7 loss """bceaftersigmoid""" +729 7 regularizer """no""" +729 7 optimizer """adam""" +729 7 training_loop """owa""" +729 7 negative_sampler """basic""" +729 7 evaluator """rankbased""" +729 8 dataset """fb15k237""" +729 8 model """transd""" +729 8 loss """bceaftersigmoid""" +729 8 regularizer """no""" +729 8 optimizer """adam""" +729 8 training_loop """owa""" +729 8 negative_sampler """basic""" +729 8 evaluator """rankbased""" +729 9 dataset """fb15k237""" +729 9 model """transd""" +729 9 loss """bceaftersigmoid""" +729 9 regularizer """no""" +729 9 optimizer """adam""" +729 9 training_loop """owa""" +729 9 negative_sampler """basic""" +729 9 evaluator """rankbased""" +729 10 dataset """fb15k237""" +729 10 model """transd""" +729 10 loss """bceaftersigmoid""" +729 10 regularizer """no""" +729 10 optimizer """adam""" +729 10 training_loop """owa""" +729 10 negative_sampler """basic""" +729 10 evaluator """rankbased""" +729 11 dataset """fb15k237""" +729 11 model """transd""" +729 11 loss """bceaftersigmoid""" +729 11 regularizer """no""" +729 11 optimizer """adam""" +729 11 training_loop """owa""" +729 11 negative_sampler """basic""" +729 11 evaluator """rankbased""" +729 12 dataset """fb15k237""" +729 12 model """transd""" +729 12 loss """bceaftersigmoid""" +729 12 regularizer """no""" +729 12 optimizer """adam""" +729 12 training_loop """owa""" +729 12 negative_sampler """basic""" +729 12 evaluator """rankbased""" +729 13 dataset """fb15k237""" +729 13 model """transd""" +729 13 loss """bceaftersigmoid""" +729 13 regularizer """no""" +729 13 optimizer """adam""" +729 13 training_loop """owa""" +729 13 negative_sampler """basic""" +729 13 evaluator """rankbased""" +729 14 dataset """fb15k237""" +729 14 model """transd""" +729 14 loss """bceaftersigmoid""" +729 14 regularizer """no""" +729 14 optimizer """adam""" +729 14 training_loop """owa""" +729 14 negative_sampler """basic""" +729 14 evaluator """rankbased""" +729 15 dataset """fb15k237""" +729 15 model """transd""" +729 15 loss """bceaftersigmoid""" +729 15 regularizer """no""" +729 15 optimizer """adam""" +729 15 training_loop """owa""" +729 15 negative_sampler """basic""" +729 15 evaluator """rankbased""" +729 16 dataset """fb15k237""" +729 16 model """transd""" +729 16 loss """bceaftersigmoid""" +729 16 regularizer """no""" +729 16 optimizer """adam""" +729 16 training_loop """owa""" +729 16 negative_sampler """basic""" +729 16 evaluator """rankbased""" +729 17 dataset """fb15k237""" +729 17 model """transd""" +729 17 loss """bceaftersigmoid""" +729 17 regularizer """no""" +729 17 optimizer """adam""" +729 17 training_loop """owa""" +729 17 negative_sampler """basic""" +729 17 evaluator """rankbased""" +729 18 dataset """fb15k237""" +729 18 model """transd""" +729 18 loss """bceaftersigmoid""" +729 18 regularizer """no""" +729 18 optimizer """adam""" +729 18 training_loop """owa""" +729 18 negative_sampler """basic""" +729 18 evaluator """rankbased""" +729 19 dataset """fb15k237""" +729 19 model """transd""" +729 19 loss """bceaftersigmoid""" +729 19 regularizer """no""" +729 19 optimizer """adam""" +729 19 training_loop """owa""" +729 19 negative_sampler """basic""" +729 19 evaluator """rankbased""" +729 20 dataset """fb15k237""" +729 20 model """transd""" +729 20 loss """bceaftersigmoid""" +729 20 regularizer """no""" +729 20 optimizer """adam""" +729 20 training_loop """owa""" +729 20 negative_sampler """basic""" +729 20 evaluator """rankbased""" +729 21 dataset """fb15k237""" +729 21 model """transd""" +729 21 loss """bceaftersigmoid""" +729 21 regularizer """no""" +729 21 optimizer """adam""" +729 21 training_loop """owa""" +729 21 negative_sampler """basic""" +729 21 evaluator """rankbased""" +729 22 dataset """fb15k237""" +729 22 model """transd""" +729 22 loss """bceaftersigmoid""" +729 22 regularizer """no""" +729 22 optimizer """adam""" +729 22 training_loop """owa""" +729 22 negative_sampler """basic""" +729 22 evaluator """rankbased""" +729 23 dataset """fb15k237""" +729 23 model """transd""" +729 23 loss """bceaftersigmoid""" +729 23 regularizer """no""" +729 23 optimizer """adam""" +729 23 training_loop """owa""" +729 23 negative_sampler """basic""" +729 23 evaluator """rankbased""" +729 24 dataset """fb15k237""" +729 24 model """transd""" +729 24 loss """bceaftersigmoid""" +729 24 regularizer """no""" +729 24 optimizer """adam""" +729 24 training_loop """owa""" +729 24 negative_sampler """basic""" +729 24 evaluator """rankbased""" +729 25 dataset """fb15k237""" +729 25 model """transd""" +729 25 loss """bceaftersigmoid""" +729 25 regularizer """no""" +729 25 optimizer """adam""" +729 25 training_loop """owa""" +729 25 negative_sampler """basic""" +729 25 evaluator """rankbased""" +729 26 dataset """fb15k237""" +729 26 model """transd""" +729 26 loss """bceaftersigmoid""" +729 26 regularizer """no""" +729 26 optimizer """adam""" +729 26 training_loop """owa""" +729 26 negative_sampler """basic""" +729 26 evaluator """rankbased""" +729 27 dataset """fb15k237""" +729 27 model """transd""" +729 27 loss """bceaftersigmoid""" +729 27 regularizer """no""" +729 27 optimizer """adam""" +729 27 training_loop """owa""" +729 27 negative_sampler """basic""" +729 27 evaluator """rankbased""" +729 28 dataset """fb15k237""" +729 28 model """transd""" +729 28 loss """bceaftersigmoid""" +729 28 regularizer """no""" +729 28 optimizer """adam""" +729 28 training_loop """owa""" +729 28 negative_sampler """basic""" +729 28 evaluator """rankbased""" +729 29 dataset """fb15k237""" +729 29 model """transd""" +729 29 loss """bceaftersigmoid""" +729 29 regularizer """no""" +729 29 optimizer """adam""" +729 29 training_loop """owa""" +729 29 negative_sampler """basic""" +729 29 evaluator """rankbased""" +729 30 dataset """fb15k237""" +729 30 model """transd""" +729 30 loss """bceaftersigmoid""" +729 30 regularizer """no""" +729 30 optimizer """adam""" +729 30 training_loop """owa""" +729 30 negative_sampler """basic""" +729 30 evaluator """rankbased""" +729 31 dataset """fb15k237""" +729 31 model """transd""" +729 31 loss """bceaftersigmoid""" +729 31 regularizer """no""" +729 31 optimizer """adam""" +729 31 training_loop """owa""" +729 31 negative_sampler """basic""" +729 31 evaluator """rankbased""" +729 32 dataset """fb15k237""" +729 32 model """transd""" +729 32 loss """bceaftersigmoid""" +729 32 regularizer """no""" +729 32 optimizer """adam""" +729 32 training_loop """owa""" +729 32 negative_sampler """basic""" +729 32 evaluator """rankbased""" +729 33 dataset """fb15k237""" +729 33 model """transd""" +729 33 loss """bceaftersigmoid""" +729 33 regularizer """no""" +729 33 optimizer """adam""" +729 33 training_loop """owa""" +729 33 negative_sampler """basic""" +729 33 evaluator """rankbased""" +729 34 dataset """fb15k237""" +729 34 model """transd""" +729 34 loss """bceaftersigmoid""" +729 34 regularizer """no""" +729 34 optimizer """adam""" +729 34 training_loop """owa""" +729 34 negative_sampler """basic""" +729 34 evaluator """rankbased""" +729 35 dataset """fb15k237""" +729 35 model """transd""" +729 35 loss """bceaftersigmoid""" +729 35 regularizer """no""" +729 35 optimizer """adam""" +729 35 training_loop """owa""" +729 35 negative_sampler """basic""" +729 35 evaluator """rankbased""" +730 1 model.embedding_dim 1.0 +730 1 model.relation_dim 0.0 +730 1 optimizer.lr 0.0010237111268298815 +730 1 negative_sampler.num_negs_per_pos 76.0 +730 1 training.batch_size 0.0 +730 2 model.embedding_dim 0.0 +730 2 model.relation_dim 2.0 +730 2 optimizer.lr 0.0014534819344272192 +730 2 negative_sampler.num_negs_per_pos 7.0 +730 2 training.batch_size 0.0 +730 3 model.embedding_dim 1.0 +730 3 model.relation_dim 0.0 +730 3 optimizer.lr 0.009977950744372558 +730 3 negative_sampler.num_negs_per_pos 21.0 +730 3 training.batch_size 1.0 +730 4 model.embedding_dim 1.0 +730 4 model.relation_dim 2.0 +730 4 optimizer.lr 0.018950472254050717 +730 4 negative_sampler.num_negs_per_pos 34.0 +730 4 training.batch_size 0.0 +730 5 model.embedding_dim 2.0 +730 5 model.relation_dim 1.0 +730 5 optimizer.lr 0.0038059297447053915 +730 5 negative_sampler.num_negs_per_pos 58.0 +730 5 training.batch_size 1.0 +730 6 model.embedding_dim 0.0 +730 6 model.relation_dim 0.0 +730 6 optimizer.lr 0.0012965333884339583 +730 6 negative_sampler.num_negs_per_pos 31.0 +730 6 training.batch_size 0.0 +730 7 model.embedding_dim 2.0 +730 7 model.relation_dim 2.0 +730 7 optimizer.lr 0.004997619482502393 +730 7 negative_sampler.num_negs_per_pos 90.0 +730 7 training.batch_size 1.0 +730 8 model.embedding_dim 1.0 +730 8 model.relation_dim 1.0 +730 8 optimizer.lr 0.0014530766483502092 +730 8 negative_sampler.num_negs_per_pos 85.0 +730 8 training.batch_size 2.0 +730 9 model.embedding_dim 2.0 +730 9 model.relation_dim 2.0 +730 9 optimizer.lr 0.016676218360379216 +730 9 negative_sampler.num_negs_per_pos 58.0 +730 9 training.batch_size 1.0 +730 10 model.embedding_dim 2.0 +730 10 model.relation_dim 1.0 +730 10 optimizer.lr 0.013996001904048103 +730 10 negative_sampler.num_negs_per_pos 23.0 +730 10 training.batch_size 2.0 +730 11 model.embedding_dim 1.0 +730 11 model.relation_dim 2.0 +730 11 optimizer.lr 0.0119132048802813 +730 11 negative_sampler.num_negs_per_pos 6.0 +730 11 training.batch_size 1.0 +730 12 model.embedding_dim 1.0 +730 12 model.relation_dim 2.0 +730 12 optimizer.lr 0.016364039292794855 +730 12 negative_sampler.num_negs_per_pos 37.0 +730 12 training.batch_size 0.0 +730 13 model.embedding_dim 0.0 +730 13 model.relation_dim 1.0 +730 13 optimizer.lr 0.0044554267159348645 +730 13 negative_sampler.num_negs_per_pos 92.0 +730 13 training.batch_size 1.0 +730 14 model.embedding_dim 2.0 +730 14 model.relation_dim 2.0 +730 14 optimizer.lr 0.04983198408676674 +730 14 negative_sampler.num_negs_per_pos 3.0 +730 14 training.batch_size 1.0 +730 15 model.embedding_dim 2.0 +730 15 model.relation_dim 1.0 +730 15 optimizer.lr 0.0036439181672794706 +730 15 negative_sampler.num_negs_per_pos 11.0 +730 15 training.batch_size 2.0 +730 16 model.embedding_dim 1.0 +730 16 model.relation_dim 1.0 +730 16 optimizer.lr 0.001595442851387253 +730 16 negative_sampler.num_negs_per_pos 93.0 +730 16 training.batch_size 0.0 +730 17 model.embedding_dim 2.0 +730 17 model.relation_dim 1.0 +730 17 optimizer.lr 0.004481889867785331 +730 17 negative_sampler.num_negs_per_pos 13.0 +730 17 training.batch_size 2.0 +730 18 model.embedding_dim 1.0 +730 18 model.relation_dim 1.0 +730 18 optimizer.lr 0.0012297073656920411 +730 18 negative_sampler.num_negs_per_pos 61.0 +730 18 training.batch_size 1.0 +730 19 model.embedding_dim 0.0 +730 19 model.relation_dim 2.0 +730 19 optimizer.lr 0.004291012141712565 +730 19 negative_sampler.num_negs_per_pos 39.0 +730 19 training.batch_size 2.0 +730 20 model.embedding_dim 2.0 +730 20 model.relation_dim 2.0 +730 20 optimizer.lr 0.004804773192071635 +730 20 negative_sampler.num_negs_per_pos 2.0 +730 20 training.batch_size 1.0 +730 21 model.embedding_dim 1.0 +730 21 model.relation_dim 1.0 +730 21 optimizer.lr 0.0878205884612226 +730 21 negative_sampler.num_negs_per_pos 98.0 +730 21 training.batch_size 1.0 +730 22 model.embedding_dim 1.0 +730 22 model.relation_dim 1.0 +730 22 optimizer.lr 0.0016440815824945456 +730 22 negative_sampler.num_negs_per_pos 12.0 +730 22 training.batch_size 0.0 +730 23 model.embedding_dim 1.0 +730 23 model.relation_dim 0.0 +730 23 optimizer.lr 0.02073201624881075 +730 23 negative_sampler.num_negs_per_pos 29.0 +730 23 training.batch_size 2.0 +730 24 model.embedding_dim 2.0 +730 24 model.relation_dim 1.0 +730 24 optimizer.lr 0.0342412980064329 +730 24 negative_sampler.num_negs_per_pos 52.0 +730 24 training.batch_size 0.0 +730 25 model.embedding_dim 1.0 +730 25 model.relation_dim 0.0 +730 25 optimizer.lr 0.009860205555875788 +730 25 negative_sampler.num_negs_per_pos 57.0 +730 25 training.batch_size 1.0 +730 26 model.embedding_dim 1.0 +730 26 model.relation_dim 0.0 +730 26 optimizer.lr 0.015284541206029168 +730 26 negative_sampler.num_negs_per_pos 30.0 +730 26 training.batch_size 2.0 +730 27 model.embedding_dim 0.0 +730 27 model.relation_dim 0.0 +730 27 optimizer.lr 0.04683661678523915 +730 27 negative_sampler.num_negs_per_pos 31.0 +730 27 training.batch_size 2.0 +730 28 model.embedding_dim 0.0 +730 28 model.relation_dim 1.0 +730 28 optimizer.lr 0.025969065217673983 +730 28 negative_sampler.num_negs_per_pos 23.0 +730 28 training.batch_size 0.0 +730 29 model.embedding_dim 1.0 +730 29 model.relation_dim 0.0 +730 29 optimizer.lr 0.0028365723112955196 +730 29 negative_sampler.num_negs_per_pos 66.0 +730 29 training.batch_size 0.0 +730 30 model.embedding_dim 1.0 +730 30 model.relation_dim 1.0 +730 30 optimizer.lr 0.0011115454185777914 +730 30 negative_sampler.num_negs_per_pos 27.0 +730 30 training.batch_size 0.0 +730 31 model.embedding_dim 2.0 +730 31 model.relation_dim 0.0 +730 31 optimizer.lr 0.0012890102110366343 +730 31 negative_sampler.num_negs_per_pos 64.0 +730 31 training.batch_size 1.0 +730 32 model.embedding_dim 2.0 +730 32 model.relation_dim 2.0 +730 32 optimizer.lr 0.0018111737391843573 +730 32 negative_sampler.num_negs_per_pos 55.0 +730 32 training.batch_size 0.0 +730 1 dataset """fb15k237""" +730 1 model """transd""" +730 1 loss """softplus""" +730 1 regularizer """no""" +730 1 optimizer """adam""" +730 1 training_loop """owa""" +730 1 negative_sampler """basic""" +730 1 evaluator """rankbased""" +730 2 dataset """fb15k237""" +730 2 model """transd""" +730 2 loss """softplus""" +730 2 regularizer """no""" +730 2 optimizer """adam""" +730 2 training_loop """owa""" +730 2 negative_sampler """basic""" +730 2 evaluator """rankbased""" +730 3 dataset """fb15k237""" +730 3 model """transd""" +730 3 loss """softplus""" +730 3 regularizer """no""" +730 3 optimizer """adam""" +730 3 training_loop """owa""" +730 3 negative_sampler """basic""" +730 3 evaluator """rankbased""" +730 4 dataset """fb15k237""" +730 4 model """transd""" +730 4 loss """softplus""" +730 4 regularizer """no""" +730 4 optimizer """adam""" +730 4 training_loop """owa""" +730 4 negative_sampler """basic""" +730 4 evaluator """rankbased""" +730 5 dataset """fb15k237""" +730 5 model """transd""" +730 5 loss """softplus""" +730 5 regularizer """no""" +730 5 optimizer """adam""" +730 5 training_loop """owa""" +730 5 negative_sampler """basic""" +730 5 evaluator """rankbased""" +730 6 dataset """fb15k237""" +730 6 model """transd""" +730 6 loss """softplus""" +730 6 regularizer """no""" +730 6 optimizer """adam""" +730 6 training_loop """owa""" +730 6 negative_sampler """basic""" +730 6 evaluator """rankbased""" +730 7 dataset """fb15k237""" +730 7 model """transd""" +730 7 loss """softplus""" +730 7 regularizer """no""" +730 7 optimizer """adam""" +730 7 training_loop """owa""" +730 7 negative_sampler """basic""" +730 7 evaluator """rankbased""" +730 8 dataset """fb15k237""" +730 8 model """transd""" +730 8 loss """softplus""" +730 8 regularizer """no""" +730 8 optimizer """adam""" +730 8 training_loop """owa""" +730 8 negative_sampler """basic""" +730 8 evaluator """rankbased""" +730 9 dataset """fb15k237""" +730 9 model """transd""" +730 9 loss """softplus""" +730 9 regularizer """no""" +730 9 optimizer """adam""" +730 9 training_loop """owa""" +730 9 negative_sampler """basic""" +730 9 evaluator """rankbased""" +730 10 dataset """fb15k237""" +730 10 model """transd""" +730 10 loss """softplus""" +730 10 regularizer """no""" +730 10 optimizer """adam""" +730 10 training_loop """owa""" +730 10 negative_sampler """basic""" +730 10 evaluator """rankbased""" +730 11 dataset """fb15k237""" +730 11 model """transd""" +730 11 loss """softplus""" +730 11 regularizer """no""" +730 11 optimizer """adam""" +730 11 training_loop """owa""" +730 11 negative_sampler """basic""" +730 11 evaluator """rankbased""" +730 12 dataset """fb15k237""" +730 12 model """transd""" +730 12 loss """softplus""" +730 12 regularizer """no""" +730 12 optimizer """adam""" +730 12 training_loop """owa""" +730 12 negative_sampler """basic""" +730 12 evaluator """rankbased""" +730 13 dataset """fb15k237""" +730 13 model """transd""" +730 13 loss """softplus""" +730 13 regularizer """no""" +730 13 optimizer """adam""" +730 13 training_loop """owa""" +730 13 negative_sampler """basic""" +730 13 evaluator """rankbased""" +730 14 dataset """fb15k237""" +730 14 model """transd""" +730 14 loss """softplus""" +730 14 regularizer """no""" +730 14 optimizer """adam""" +730 14 training_loop """owa""" +730 14 negative_sampler """basic""" +730 14 evaluator """rankbased""" +730 15 dataset """fb15k237""" +730 15 model """transd""" +730 15 loss """softplus""" +730 15 regularizer """no""" +730 15 optimizer """adam""" +730 15 training_loop """owa""" +730 15 negative_sampler """basic""" +730 15 evaluator """rankbased""" +730 16 dataset """fb15k237""" +730 16 model """transd""" +730 16 loss """softplus""" +730 16 regularizer """no""" +730 16 optimizer """adam""" +730 16 training_loop """owa""" +730 16 negative_sampler """basic""" +730 16 evaluator """rankbased""" +730 17 dataset """fb15k237""" +730 17 model """transd""" +730 17 loss """softplus""" +730 17 regularizer """no""" +730 17 optimizer """adam""" +730 17 training_loop """owa""" +730 17 negative_sampler """basic""" +730 17 evaluator """rankbased""" +730 18 dataset """fb15k237""" +730 18 model """transd""" +730 18 loss """softplus""" +730 18 regularizer """no""" +730 18 optimizer """adam""" +730 18 training_loop """owa""" +730 18 negative_sampler """basic""" +730 18 evaluator """rankbased""" +730 19 dataset """fb15k237""" +730 19 model """transd""" +730 19 loss """softplus""" +730 19 regularizer """no""" +730 19 optimizer """adam""" +730 19 training_loop """owa""" +730 19 negative_sampler """basic""" +730 19 evaluator """rankbased""" +730 20 dataset """fb15k237""" +730 20 model """transd""" +730 20 loss """softplus""" +730 20 regularizer """no""" +730 20 optimizer """adam""" +730 20 training_loop """owa""" +730 20 negative_sampler """basic""" +730 20 evaluator """rankbased""" +730 21 dataset """fb15k237""" +730 21 model """transd""" +730 21 loss """softplus""" +730 21 regularizer """no""" +730 21 optimizer """adam""" +730 21 training_loop """owa""" +730 21 negative_sampler """basic""" +730 21 evaluator """rankbased""" +730 22 dataset """fb15k237""" +730 22 model """transd""" +730 22 loss """softplus""" +730 22 regularizer """no""" +730 22 optimizer """adam""" +730 22 training_loop """owa""" +730 22 negative_sampler """basic""" +730 22 evaluator """rankbased""" +730 23 dataset """fb15k237""" +730 23 model """transd""" +730 23 loss """softplus""" +730 23 regularizer """no""" +730 23 optimizer """adam""" +730 23 training_loop """owa""" +730 23 negative_sampler """basic""" +730 23 evaluator """rankbased""" +730 24 dataset """fb15k237""" +730 24 model """transd""" +730 24 loss """softplus""" +730 24 regularizer """no""" +730 24 optimizer """adam""" +730 24 training_loop """owa""" +730 24 negative_sampler """basic""" +730 24 evaluator """rankbased""" +730 25 dataset """fb15k237""" +730 25 model """transd""" +730 25 loss """softplus""" +730 25 regularizer """no""" +730 25 optimizer """adam""" +730 25 training_loop """owa""" +730 25 negative_sampler """basic""" +730 25 evaluator """rankbased""" +730 26 dataset """fb15k237""" +730 26 model """transd""" +730 26 loss """softplus""" +730 26 regularizer """no""" +730 26 optimizer """adam""" +730 26 training_loop """owa""" +730 26 negative_sampler """basic""" +730 26 evaluator """rankbased""" +730 27 dataset """fb15k237""" +730 27 model """transd""" +730 27 loss """softplus""" +730 27 regularizer """no""" +730 27 optimizer """adam""" +730 27 training_loop """owa""" +730 27 negative_sampler """basic""" +730 27 evaluator """rankbased""" +730 28 dataset """fb15k237""" +730 28 model """transd""" +730 28 loss """softplus""" +730 28 regularizer """no""" +730 28 optimizer """adam""" +730 28 training_loop """owa""" +730 28 negative_sampler """basic""" +730 28 evaluator """rankbased""" +730 29 dataset """fb15k237""" +730 29 model """transd""" +730 29 loss """softplus""" +730 29 regularizer """no""" +730 29 optimizer """adam""" +730 29 training_loop """owa""" +730 29 negative_sampler """basic""" +730 29 evaluator """rankbased""" +730 30 dataset """fb15k237""" +730 30 model """transd""" +730 30 loss """softplus""" +730 30 regularizer """no""" +730 30 optimizer """adam""" +730 30 training_loop """owa""" +730 30 negative_sampler """basic""" +730 30 evaluator """rankbased""" +730 31 dataset """fb15k237""" +730 31 model """transd""" +730 31 loss """softplus""" +730 31 regularizer """no""" +730 31 optimizer """adam""" +730 31 training_loop """owa""" +730 31 negative_sampler """basic""" +730 31 evaluator """rankbased""" +730 32 dataset """fb15k237""" +730 32 model """transd""" +730 32 loss """softplus""" +730 32 regularizer """no""" +730 32 optimizer """adam""" +730 32 training_loop """owa""" +730 32 negative_sampler """basic""" +730 32 evaluator """rankbased""" +731 1 model.embedding_dim 2.0 +731 1 model.relation_dim 0.0 +731 1 loss.margin 18.118155299115557 +731 1 loss.adversarial_temperature 0.34482057832545226 +731 1 optimizer.lr 0.030445021031361385 +731 1 negative_sampler.num_negs_per_pos 27.0 +731 1 training.batch_size 1.0 +731 2 model.embedding_dim 2.0 +731 2 model.relation_dim 1.0 +731 2 loss.margin 26.614099365711155 +731 2 loss.adversarial_temperature 0.2820894402334417 +731 2 optimizer.lr 0.0030125560575524546 +731 2 negative_sampler.num_negs_per_pos 26.0 +731 2 training.batch_size 1.0 +731 3 model.embedding_dim 1.0 +731 3 model.relation_dim 1.0 +731 3 loss.margin 19.5927810897286 +731 3 loss.adversarial_temperature 0.5179513941926958 +731 3 optimizer.lr 0.09468897092168309 +731 3 negative_sampler.num_negs_per_pos 23.0 +731 3 training.batch_size 1.0 +731 4 model.embedding_dim 2.0 +731 4 model.relation_dim 1.0 +731 4 loss.margin 3.474612997330336 +731 4 loss.adversarial_temperature 0.22661885129223408 +731 4 optimizer.lr 0.001728989126042389 +731 4 negative_sampler.num_negs_per_pos 98.0 +731 4 training.batch_size 0.0 +731 5 model.embedding_dim 0.0 +731 5 model.relation_dim 0.0 +731 5 loss.margin 1.6796558889990687 +731 5 loss.adversarial_temperature 0.8185148216515823 +731 5 optimizer.lr 0.008249108410604904 +731 5 negative_sampler.num_negs_per_pos 61.0 +731 5 training.batch_size 2.0 +731 6 model.embedding_dim 0.0 +731 6 model.relation_dim 1.0 +731 6 loss.margin 19.40236814732296 +731 6 loss.adversarial_temperature 0.7912663250416285 +731 6 optimizer.lr 0.04252708861061792 +731 6 negative_sampler.num_negs_per_pos 85.0 +731 6 training.batch_size 1.0 +731 7 model.embedding_dim 2.0 +731 7 model.relation_dim 2.0 +731 7 loss.margin 1.583457713277141 +731 7 loss.adversarial_temperature 0.8492715557948053 +731 7 optimizer.lr 0.04507923801213805 +731 7 negative_sampler.num_negs_per_pos 37.0 +731 7 training.batch_size 0.0 +731 8 model.embedding_dim 1.0 +731 8 model.relation_dim 2.0 +731 8 loss.margin 12.178924546259854 +731 8 loss.adversarial_temperature 0.4205053170974284 +731 8 optimizer.lr 0.0023043116001537954 +731 8 negative_sampler.num_negs_per_pos 70.0 +731 8 training.batch_size 0.0 +731 9 model.embedding_dim 2.0 +731 9 model.relation_dim 0.0 +731 9 loss.margin 10.884226936474857 +731 9 loss.adversarial_temperature 0.17864117357927387 +731 9 optimizer.lr 0.009253840302356323 +731 9 negative_sampler.num_negs_per_pos 57.0 +731 9 training.batch_size 2.0 +731 10 model.embedding_dim 2.0 +731 10 model.relation_dim 2.0 +731 10 loss.margin 27.555029579945725 +731 10 loss.adversarial_temperature 0.21440305140985627 +731 10 optimizer.lr 0.012910853162278246 +731 10 negative_sampler.num_negs_per_pos 14.0 +731 10 training.batch_size 1.0 +731 11 model.embedding_dim 0.0 +731 11 model.relation_dim 0.0 +731 11 loss.margin 2.6492337051662456 +731 11 loss.adversarial_temperature 0.9686197108327953 +731 11 optimizer.lr 0.003770067122148111 +731 11 negative_sampler.num_negs_per_pos 21.0 +731 11 training.batch_size 0.0 +731 12 model.embedding_dim 0.0 +731 12 model.relation_dim 0.0 +731 12 loss.margin 1.7949604973245608 +731 12 loss.adversarial_temperature 0.26169546455506465 +731 12 optimizer.lr 0.017242457054796115 +731 12 negative_sampler.num_negs_per_pos 76.0 +731 12 training.batch_size 1.0 +731 13 model.embedding_dim 2.0 +731 13 model.relation_dim 2.0 +731 13 loss.margin 26.214000236604395 +731 13 loss.adversarial_temperature 0.7538064255449483 +731 13 optimizer.lr 0.0026965841932938255 +731 13 negative_sampler.num_negs_per_pos 37.0 +731 13 training.batch_size 1.0 +731 14 model.embedding_dim 2.0 +731 14 model.relation_dim 2.0 +731 14 loss.margin 17.81665158854272 +731 14 loss.adversarial_temperature 0.9934351741717026 +731 14 optimizer.lr 0.0015028268088530577 +731 14 negative_sampler.num_negs_per_pos 26.0 +731 14 training.batch_size 1.0 +731 15 model.embedding_dim 2.0 +731 15 model.relation_dim 0.0 +731 15 loss.margin 25.951569236790245 +731 15 loss.adversarial_temperature 0.9465462139809563 +731 15 optimizer.lr 0.001057947505714195 +731 15 negative_sampler.num_negs_per_pos 28.0 +731 15 training.batch_size 1.0 +731 16 model.embedding_dim 0.0 +731 16 model.relation_dim 2.0 +731 16 loss.margin 12.695682580929983 +731 16 loss.adversarial_temperature 0.3079565082597436 +731 16 optimizer.lr 0.005996584260121339 +731 16 negative_sampler.num_negs_per_pos 68.0 +731 16 training.batch_size 0.0 +731 17 model.embedding_dim 0.0 +731 17 model.relation_dim 0.0 +731 17 loss.margin 1.5544249561090426 +731 17 loss.adversarial_temperature 0.8079030337137975 +731 17 optimizer.lr 0.04982869048893334 +731 17 negative_sampler.num_negs_per_pos 41.0 +731 17 training.batch_size 2.0 +731 18 model.embedding_dim 1.0 +731 18 model.relation_dim 1.0 +731 18 loss.margin 3.0492966588398493 +731 18 loss.adversarial_temperature 0.9425092938462247 +731 18 optimizer.lr 0.004266498649375975 +731 18 negative_sampler.num_negs_per_pos 48.0 +731 18 training.batch_size 1.0 +731 19 model.embedding_dim 1.0 +731 19 model.relation_dim 2.0 +731 19 loss.margin 2.625949308764748 +731 19 loss.adversarial_temperature 0.46686279479997145 +731 19 optimizer.lr 0.02016825484031089 +731 19 negative_sampler.num_negs_per_pos 41.0 +731 19 training.batch_size 0.0 +731 20 model.embedding_dim 0.0 +731 20 model.relation_dim 1.0 +731 20 loss.margin 12.870504119945378 +731 20 loss.adversarial_temperature 0.9588000070344408 +731 20 optimizer.lr 0.0010725089635788517 +731 20 negative_sampler.num_negs_per_pos 38.0 +731 20 training.batch_size 0.0 +731 1 dataset """fb15k237""" +731 1 model """transd""" +731 1 loss """nssa""" +731 1 regularizer """no""" +731 1 optimizer """adam""" +731 1 training_loop """owa""" +731 1 negative_sampler """basic""" +731 1 evaluator """rankbased""" +731 2 dataset """fb15k237""" +731 2 model """transd""" +731 2 loss """nssa""" +731 2 regularizer """no""" +731 2 optimizer """adam""" +731 2 training_loop """owa""" +731 2 negative_sampler """basic""" +731 2 evaluator """rankbased""" +731 3 dataset """fb15k237""" +731 3 model """transd""" +731 3 loss """nssa""" +731 3 regularizer """no""" +731 3 optimizer """adam""" +731 3 training_loop """owa""" +731 3 negative_sampler """basic""" +731 3 evaluator """rankbased""" +731 4 dataset """fb15k237""" +731 4 model """transd""" +731 4 loss """nssa""" +731 4 regularizer """no""" +731 4 optimizer """adam""" +731 4 training_loop """owa""" +731 4 negative_sampler """basic""" +731 4 evaluator """rankbased""" +731 5 dataset """fb15k237""" +731 5 model """transd""" +731 5 loss """nssa""" +731 5 regularizer """no""" +731 5 optimizer """adam""" +731 5 training_loop """owa""" +731 5 negative_sampler """basic""" +731 5 evaluator """rankbased""" +731 6 dataset """fb15k237""" +731 6 model """transd""" +731 6 loss """nssa""" +731 6 regularizer """no""" +731 6 optimizer """adam""" +731 6 training_loop """owa""" +731 6 negative_sampler """basic""" +731 6 evaluator """rankbased""" +731 7 dataset """fb15k237""" +731 7 model """transd""" +731 7 loss """nssa""" +731 7 regularizer """no""" +731 7 optimizer """adam""" +731 7 training_loop """owa""" +731 7 negative_sampler """basic""" +731 7 evaluator """rankbased""" +731 8 dataset """fb15k237""" +731 8 model """transd""" +731 8 loss """nssa""" +731 8 regularizer """no""" +731 8 optimizer """adam""" +731 8 training_loop """owa""" +731 8 negative_sampler """basic""" +731 8 evaluator """rankbased""" +731 9 dataset """fb15k237""" +731 9 model """transd""" +731 9 loss """nssa""" +731 9 regularizer """no""" +731 9 optimizer """adam""" +731 9 training_loop """owa""" +731 9 negative_sampler """basic""" +731 9 evaluator """rankbased""" +731 10 dataset """fb15k237""" +731 10 model """transd""" +731 10 loss """nssa""" +731 10 regularizer """no""" +731 10 optimizer """adam""" +731 10 training_loop """owa""" +731 10 negative_sampler """basic""" +731 10 evaluator """rankbased""" +731 11 dataset """fb15k237""" +731 11 model """transd""" +731 11 loss """nssa""" +731 11 regularizer """no""" +731 11 optimizer """adam""" +731 11 training_loop """owa""" +731 11 negative_sampler """basic""" +731 11 evaluator """rankbased""" +731 12 dataset """fb15k237""" +731 12 model """transd""" +731 12 loss """nssa""" +731 12 regularizer """no""" +731 12 optimizer """adam""" +731 12 training_loop """owa""" +731 12 negative_sampler """basic""" +731 12 evaluator """rankbased""" +731 13 dataset """fb15k237""" +731 13 model """transd""" +731 13 loss """nssa""" +731 13 regularizer """no""" +731 13 optimizer """adam""" +731 13 training_loop """owa""" +731 13 negative_sampler """basic""" +731 13 evaluator """rankbased""" +731 14 dataset """fb15k237""" +731 14 model """transd""" +731 14 loss """nssa""" +731 14 regularizer """no""" +731 14 optimizer """adam""" +731 14 training_loop """owa""" +731 14 negative_sampler """basic""" +731 14 evaluator """rankbased""" +731 15 dataset """fb15k237""" +731 15 model """transd""" +731 15 loss """nssa""" +731 15 regularizer """no""" +731 15 optimizer """adam""" +731 15 training_loop """owa""" +731 15 negative_sampler """basic""" +731 15 evaluator """rankbased""" +731 16 dataset """fb15k237""" +731 16 model """transd""" +731 16 loss """nssa""" +731 16 regularizer """no""" +731 16 optimizer """adam""" +731 16 training_loop """owa""" +731 16 negative_sampler """basic""" +731 16 evaluator """rankbased""" +731 17 dataset """fb15k237""" +731 17 model """transd""" +731 17 loss """nssa""" +731 17 regularizer """no""" +731 17 optimizer """adam""" +731 17 training_loop """owa""" +731 17 negative_sampler """basic""" +731 17 evaluator """rankbased""" +731 18 dataset """fb15k237""" +731 18 model """transd""" +731 18 loss """nssa""" +731 18 regularizer """no""" +731 18 optimizer """adam""" +731 18 training_loop """owa""" +731 18 negative_sampler """basic""" +731 18 evaluator """rankbased""" +731 19 dataset """fb15k237""" +731 19 model """transd""" +731 19 loss """nssa""" +731 19 regularizer """no""" +731 19 optimizer """adam""" +731 19 training_loop """owa""" +731 19 negative_sampler """basic""" +731 19 evaluator """rankbased""" +731 20 dataset """fb15k237""" +731 20 model """transd""" +731 20 loss """nssa""" +731 20 regularizer """no""" +731 20 optimizer """adam""" +731 20 training_loop """owa""" +731 20 negative_sampler """basic""" +731 20 evaluator """rankbased""" +732 1 model.embedding_dim 0.0 +732 1 model.relation_dim 0.0 +732 1 loss.margin 6.161977245232779 +732 1 loss.adversarial_temperature 0.20886720452683555 +732 1 optimizer.lr 0.06738854074113639 +732 1 negative_sampler.num_negs_per_pos 5.0 +732 1 training.batch_size 2.0 +732 2 model.embedding_dim 0.0 +732 2 model.relation_dim 2.0 +732 2 loss.margin 12.68851335775079 +732 2 loss.adversarial_temperature 0.7975911336756789 +732 2 optimizer.lr 0.05227186805270418 +732 2 negative_sampler.num_negs_per_pos 77.0 +732 2 training.batch_size 0.0 +732 3 model.embedding_dim 2.0 +732 3 model.relation_dim 1.0 +732 3 loss.margin 22.90163589471881 +732 3 loss.adversarial_temperature 0.3303244865937642 +732 3 optimizer.lr 0.0014274112427868383 +732 3 negative_sampler.num_negs_per_pos 4.0 +732 3 training.batch_size 0.0 +732 4 model.embedding_dim 2.0 +732 4 model.relation_dim 2.0 +732 4 loss.margin 4.0267999497598534 +732 4 loss.adversarial_temperature 0.5361366675255955 +732 4 optimizer.lr 0.02257375778687799 +732 4 negative_sampler.num_negs_per_pos 65.0 +732 4 training.batch_size 1.0 +732 5 model.embedding_dim 0.0 +732 5 model.relation_dim 2.0 +732 5 loss.margin 1.7661538801796106 +732 5 loss.adversarial_temperature 0.8858943691139058 +732 5 optimizer.lr 0.04040376954251624 +732 5 negative_sampler.num_negs_per_pos 44.0 +732 5 training.batch_size 1.0 +732 6 model.embedding_dim 2.0 +732 6 model.relation_dim 0.0 +732 6 loss.margin 12.730501252099774 +732 6 loss.adversarial_temperature 0.9137574233995257 +732 6 optimizer.lr 0.03456719157870878 +732 6 negative_sampler.num_negs_per_pos 52.0 +732 6 training.batch_size 0.0 +732 7 model.embedding_dim 0.0 +732 7 model.relation_dim 0.0 +732 7 loss.margin 15.603361320182987 +732 7 loss.adversarial_temperature 0.22848169086033404 +732 7 optimizer.lr 0.0032038080401120797 +732 7 negative_sampler.num_negs_per_pos 73.0 +732 7 training.batch_size 2.0 +732 8 model.embedding_dim 1.0 +732 8 model.relation_dim 1.0 +732 8 loss.margin 4.764568081704675 +732 8 loss.adversarial_temperature 0.9274332025083974 +732 8 optimizer.lr 0.010281176254712131 +732 8 negative_sampler.num_negs_per_pos 28.0 +732 8 training.batch_size 0.0 +732 9 model.embedding_dim 0.0 +732 9 model.relation_dim 2.0 +732 9 loss.margin 11.32768037431579 +732 9 loss.adversarial_temperature 0.6619277654271589 +732 9 optimizer.lr 0.004026929711684876 +732 9 negative_sampler.num_negs_per_pos 61.0 +732 9 training.batch_size 0.0 +732 10 model.embedding_dim 0.0 +732 10 model.relation_dim 2.0 +732 10 loss.margin 17.83863419966447 +732 10 loss.adversarial_temperature 0.9339751447634703 +732 10 optimizer.lr 0.0014600647419991447 +732 10 negative_sampler.num_negs_per_pos 31.0 +732 10 training.batch_size 0.0 +732 11 model.embedding_dim 2.0 +732 11 model.relation_dim 0.0 +732 11 loss.margin 2.4373357133431846 +732 11 loss.adversarial_temperature 0.41187805118794 +732 11 optimizer.lr 0.04824964738703971 +732 11 negative_sampler.num_negs_per_pos 90.0 +732 11 training.batch_size 1.0 +732 12 model.embedding_dim 1.0 +732 12 model.relation_dim 1.0 +732 12 loss.margin 4.465739153953332 +732 12 loss.adversarial_temperature 0.3030995616939125 +732 12 optimizer.lr 0.009187323399988618 +732 12 negative_sampler.num_negs_per_pos 71.0 +732 12 training.batch_size 2.0 +732 13 model.embedding_dim 1.0 +732 13 model.relation_dim 1.0 +732 13 loss.margin 8.343653239767566 +732 13 loss.adversarial_temperature 0.7330161820465384 +732 13 optimizer.lr 0.011258259463091844 +732 13 negative_sampler.num_negs_per_pos 92.0 +732 13 training.batch_size 2.0 +732 14 model.embedding_dim 2.0 +732 14 model.relation_dim 2.0 +732 14 loss.margin 9.89930706861949 +732 14 loss.adversarial_temperature 0.22919956992053886 +732 14 optimizer.lr 0.019771133024047705 +732 14 negative_sampler.num_negs_per_pos 12.0 +732 14 training.batch_size 2.0 +732 15 model.embedding_dim 0.0 +732 15 model.relation_dim 2.0 +732 15 loss.margin 12.514825225279584 +732 15 loss.adversarial_temperature 0.7516513176003876 +732 15 optimizer.lr 0.0014443986237981745 +732 15 negative_sampler.num_negs_per_pos 0.0 +732 15 training.batch_size 1.0 +732 16 model.embedding_dim 1.0 +732 16 model.relation_dim 1.0 +732 16 loss.margin 15.942388241126814 +732 16 loss.adversarial_temperature 0.11359079624346807 +732 16 optimizer.lr 0.005574980606133838 +732 16 negative_sampler.num_negs_per_pos 0.0 +732 16 training.batch_size 2.0 +732 17 model.embedding_dim 2.0 +732 17 model.relation_dim 2.0 +732 17 loss.margin 6.010648698814912 +732 17 loss.adversarial_temperature 0.5300188928983279 +732 17 optimizer.lr 0.0017410517827524494 +732 17 negative_sampler.num_negs_per_pos 10.0 +732 17 training.batch_size 2.0 +732 18 model.embedding_dim 0.0 +732 18 model.relation_dim 0.0 +732 18 loss.margin 18.644049250914012 +732 18 loss.adversarial_temperature 0.7502491313446136 +732 18 optimizer.lr 0.09722564265184806 +732 18 negative_sampler.num_negs_per_pos 34.0 +732 18 training.batch_size 1.0 +732 19 model.embedding_dim 1.0 +732 19 model.relation_dim 1.0 +732 19 loss.margin 14.06484379088205 +732 19 loss.adversarial_temperature 0.818532840317779 +732 19 optimizer.lr 0.00275417329717138 +732 19 negative_sampler.num_negs_per_pos 56.0 +732 19 training.batch_size 2.0 +732 20 model.embedding_dim 0.0 +732 20 model.relation_dim 1.0 +732 20 loss.margin 24.52102715361133 +732 20 loss.adversarial_temperature 0.5250479040935115 +732 20 optimizer.lr 0.07477043078446141 +732 20 negative_sampler.num_negs_per_pos 85.0 +732 20 training.batch_size 1.0 +732 21 model.embedding_dim 2.0 +732 21 model.relation_dim 2.0 +732 21 loss.margin 6.217102835626869 +732 21 loss.adversarial_temperature 0.5360229451704377 +732 21 optimizer.lr 0.0010172666325273206 +732 21 negative_sampler.num_negs_per_pos 91.0 +732 21 training.batch_size 1.0 +732 22 model.embedding_dim 0.0 +732 22 model.relation_dim 0.0 +732 22 loss.margin 2.3809623360611325 +732 22 loss.adversarial_temperature 0.18711719715073882 +732 22 optimizer.lr 0.03921942889090885 +732 22 negative_sampler.num_negs_per_pos 77.0 +732 22 training.batch_size 0.0 +732 23 model.embedding_dim 0.0 +732 23 model.relation_dim 1.0 +732 23 loss.margin 14.753836659075288 +732 23 loss.adversarial_temperature 0.703067772393956 +732 23 optimizer.lr 0.016277860045381547 +732 23 negative_sampler.num_negs_per_pos 10.0 +732 23 training.batch_size 0.0 +732 24 model.embedding_dim 2.0 +732 24 model.relation_dim 1.0 +732 24 loss.margin 26.96178925828914 +732 24 loss.adversarial_temperature 0.7545264255045686 +732 24 optimizer.lr 0.0011665432255065244 +732 24 negative_sampler.num_negs_per_pos 55.0 +732 24 training.batch_size 1.0 +732 25 model.embedding_dim 0.0 +732 25 model.relation_dim 1.0 +732 25 loss.margin 8.376968300090422 +732 25 loss.adversarial_temperature 0.1789197465042947 +732 25 optimizer.lr 0.05124538879561378 +732 25 negative_sampler.num_negs_per_pos 99.0 +732 25 training.batch_size 2.0 +732 26 model.embedding_dim 0.0 +732 26 model.relation_dim 0.0 +732 26 loss.margin 13.882696304771356 +732 26 loss.adversarial_temperature 0.8469511037959288 +732 26 optimizer.lr 0.001386755461308103 +732 26 negative_sampler.num_negs_per_pos 82.0 +732 26 training.batch_size 2.0 +732 27 model.embedding_dim 1.0 +732 27 model.relation_dim 1.0 +732 27 loss.margin 11.62098076149873 +732 27 loss.adversarial_temperature 0.7527989855280688 +732 27 optimizer.lr 0.04638842043829376 +732 27 negative_sampler.num_negs_per_pos 22.0 +732 27 training.batch_size 2.0 +732 28 model.embedding_dim 2.0 +732 28 model.relation_dim 0.0 +732 28 loss.margin 3.443372810560209 +732 28 loss.adversarial_temperature 0.24861743430493533 +732 28 optimizer.lr 0.001467461700831933 +732 28 negative_sampler.num_negs_per_pos 20.0 +732 28 training.batch_size 2.0 +732 29 model.embedding_dim 0.0 +732 29 model.relation_dim 2.0 +732 29 loss.margin 9.998758019006985 +732 29 loss.adversarial_temperature 0.6903844683702829 +732 29 optimizer.lr 0.003877225098157635 +732 29 negative_sampler.num_negs_per_pos 38.0 +732 29 training.batch_size 1.0 +732 30 model.embedding_dim 0.0 +732 30 model.relation_dim 2.0 +732 30 loss.margin 4.5199745293447915 +732 30 loss.adversarial_temperature 0.18899961958943087 +732 30 optimizer.lr 0.04123407369279989 +732 30 negative_sampler.num_negs_per_pos 72.0 +732 30 training.batch_size 2.0 +732 31 model.embedding_dim 1.0 +732 31 model.relation_dim 2.0 +732 31 loss.margin 28.61124193080256 +732 31 loss.adversarial_temperature 0.42978945913843475 +732 31 optimizer.lr 0.023481726946579306 +732 31 negative_sampler.num_negs_per_pos 54.0 +732 31 training.batch_size 0.0 +732 32 model.embedding_dim 1.0 +732 32 model.relation_dim 0.0 +732 32 loss.margin 13.764700776466158 +732 32 loss.adversarial_temperature 0.4205982639314761 +732 32 optimizer.lr 0.0033339134466793844 +732 32 negative_sampler.num_negs_per_pos 78.0 +732 32 training.batch_size 0.0 +732 33 model.embedding_dim 2.0 +732 33 model.relation_dim 0.0 +732 33 loss.margin 16.70466787189458 +732 33 loss.adversarial_temperature 0.511661435316472 +732 33 optimizer.lr 0.002265411666670062 +732 33 negative_sampler.num_negs_per_pos 11.0 +732 33 training.batch_size 1.0 +732 34 model.embedding_dim 0.0 +732 34 model.relation_dim 1.0 +732 34 loss.margin 9.59753948615828 +732 34 loss.adversarial_temperature 0.7370264519631333 +732 34 optimizer.lr 0.010792524660817917 +732 34 negative_sampler.num_negs_per_pos 17.0 +732 34 training.batch_size 2.0 +732 35 model.embedding_dim 2.0 +732 35 model.relation_dim 0.0 +732 35 loss.margin 13.536403417748472 +732 35 loss.adversarial_temperature 0.945352291638136 +732 35 optimizer.lr 0.0012900451940526729 +732 35 negative_sampler.num_negs_per_pos 77.0 +732 35 training.batch_size 0.0 +732 36 model.embedding_dim 0.0 +732 36 model.relation_dim 2.0 +732 36 loss.margin 5.73191213688796 +732 36 loss.adversarial_temperature 0.5054332661149515 +732 36 optimizer.lr 0.06943640225876178 +732 36 negative_sampler.num_negs_per_pos 28.0 +732 36 training.batch_size 2.0 +732 37 model.embedding_dim 2.0 +732 37 model.relation_dim 0.0 +732 37 loss.margin 1.4426239109253538 +732 37 loss.adversarial_temperature 0.10873222308211404 +732 37 optimizer.lr 0.05474905648945902 +732 37 negative_sampler.num_negs_per_pos 84.0 +732 37 training.batch_size 1.0 +732 38 model.embedding_dim 2.0 +732 38 model.relation_dim 1.0 +732 38 loss.margin 17.56581395692488 +732 38 loss.adversarial_temperature 0.7189887569618939 +732 38 optimizer.lr 0.07522214185272734 +732 38 negative_sampler.num_negs_per_pos 74.0 +732 38 training.batch_size 0.0 +732 39 model.embedding_dim 1.0 +732 39 model.relation_dim 1.0 +732 39 loss.margin 21.348125167139223 +732 39 loss.adversarial_temperature 0.39182916437831183 +732 39 optimizer.lr 0.02304306696631378 +732 39 negative_sampler.num_negs_per_pos 25.0 +732 39 training.batch_size 2.0 +732 40 model.embedding_dim 0.0 +732 40 model.relation_dim 1.0 +732 40 loss.margin 9.381036177546042 +732 40 loss.adversarial_temperature 0.5515403050050464 +732 40 optimizer.lr 0.00514290003062865 +732 40 negative_sampler.num_negs_per_pos 9.0 +732 40 training.batch_size 0.0 +732 41 model.embedding_dim 0.0 +732 41 model.relation_dim 2.0 +732 41 loss.margin 22.569508766077245 +732 41 loss.adversarial_temperature 0.9627324984868718 +732 41 optimizer.lr 0.0026192142083879214 +732 41 negative_sampler.num_negs_per_pos 91.0 +732 41 training.batch_size 2.0 +732 42 model.embedding_dim 2.0 +732 42 model.relation_dim 1.0 +732 42 loss.margin 22.78272782449251 +732 42 loss.adversarial_temperature 0.6711579971056932 +732 42 optimizer.lr 0.014882778094282678 +732 42 negative_sampler.num_negs_per_pos 40.0 +732 42 training.batch_size 2.0 +732 43 model.embedding_dim 2.0 +732 43 model.relation_dim 1.0 +732 43 loss.margin 23.786393059172596 +732 43 loss.adversarial_temperature 0.28202141591590035 +732 43 optimizer.lr 0.0010172862665900834 +732 43 negative_sampler.num_negs_per_pos 59.0 +732 43 training.batch_size 0.0 +732 44 model.embedding_dim 2.0 +732 44 model.relation_dim 1.0 +732 44 loss.margin 2.0716304043241798 +732 44 loss.adversarial_temperature 0.2989564862626402 +732 44 optimizer.lr 0.0016464414898114076 +732 44 negative_sampler.num_negs_per_pos 87.0 +732 44 training.batch_size 0.0 +732 45 model.embedding_dim 2.0 +732 45 model.relation_dim 1.0 +732 45 loss.margin 20.739093616529857 +732 45 loss.adversarial_temperature 0.46247414746089566 +732 45 optimizer.lr 0.06937458209707886 +732 45 negative_sampler.num_negs_per_pos 24.0 +732 45 training.batch_size 1.0 +732 46 model.embedding_dim 2.0 +732 46 model.relation_dim 2.0 +732 46 loss.margin 5.750355932021616 +732 46 loss.adversarial_temperature 0.6597062833811698 +732 46 optimizer.lr 0.02823997898562071 +732 46 negative_sampler.num_negs_per_pos 59.0 +732 46 training.batch_size 0.0 +732 1 dataset """fb15k237""" +732 1 model """transd""" +732 1 loss """nssa""" +732 1 regularizer """no""" +732 1 optimizer """adam""" +732 1 training_loop """owa""" +732 1 negative_sampler """basic""" +732 1 evaluator """rankbased""" +732 2 dataset """fb15k237""" +732 2 model """transd""" +732 2 loss """nssa""" +732 2 regularizer """no""" +732 2 optimizer """adam""" +732 2 training_loop """owa""" +732 2 negative_sampler """basic""" +732 2 evaluator """rankbased""" +732 3 dataset """fb15k237""" +732 3 model """transd""" +732 3 loss """nssa""" +732 3 regularizer """no""" +732 3 optimizer """adam""" +732 3 training_loop """owa""" +732 3 negative_sampler """basic""" +732 3 evaluator """rankbased""" +732 4 dataset """fb15k237""" +732 4 model """transd""" +732 4 loss """nssa""" +732 4 regularizer """no""" +732 4 optimizer """adam""" +732 4 training_loop """owa""" +732 4 negative_sampler """basic""" +732 4 evaluator """rankbased""" +732 5 dataset """fb15k237""" +732 5 model """transd""" +732 5 loss """nssa""" +732 5 regularizer """no""" +732 5 optimizer """adam""" +732 5 training_loop """owa""" +732 5 negative_sampler """basic""" +732 5 evaluator """rankbased""" +732 6 dataset """fb15k237""" +732 6 model """transd""" +732 6 loss """nssa""" +732 6 regularizer """no""" +732 6 optimizer """adam""" +732 6 training_loop """owa""" +732 6 negative_sampler """basic""" +732 6 evaluator """rankbased""" +732 7 dataset """fb15k237""" +732 7 model """transd""" +732 7 loss """nssa""" +732 7 regularizer """no""" +732 7 optimizer """adam""" +732 7 training_loop """owa""" +732 7 negative_sampler """basic""" +732 7 evaluator """rankbased""" +732 8 dataset """fb15k237""" +732 8 model """transd""" +732 8 loss """nssa""" +732 8 regularizer """no""" +732 8 optimizer """adam""" +732 8 training_loop """owa""" +732 8 negative_sampler """basic""" +732 8 evaluator """rankbased""" +732 9 dataset """fb15k237""" +732 9 model """transd""" +732 9 loss """nssa""" +732 9 regularizer """no""" +732 9 optimizer """adam""" +732 9 training_loop """owa""" +732 9 negative_sampler """basic""" +732 9 evaluator """rankbased""" +732 10 dataset """fb15k237""" +732 10 model """transd""" +732 10 loss """nssa""" +732 10 regularizer """no""" +732 10 optimizer """adam""" +732 10 training_loop """owa""" +732 10 negative_sampler """basic""" +732 10 evaluator """rankbased""" +732 11 dataset """fb15k237""" +732 11 model """transd""" +732 11 loss """nssa""" +732 11 regularizer """no""" +732 11 optimizer """adam""" +732 11 training_loop """owa""" +732 11 negative_sampler """basic""" +732 11 evaluator """rankbased""" +732 12 dataset """fb15k237""" +732 12 model """transd""" +732 12 loss """nssa""" +732 12 regularizer """no""" +732 12 optimizer """adam""" +732 12 training_loop """owa""" +732 12 negative_sampler """basic""" +732 12 evaluator """rankbased""" +732 13 dataset """fb15k237""" +732 13 model """transd""" +732 13 loss """nssa""" +732 13 regularizer """no""" +732 13 optimizer """adam""" +732 13 training_loop """owa""" +732 13 negative_sampler """basic""" +732 13 evaluator """rankbased""" +732 14 dataset """fb15k237""" +732 14 model """transd""" +732 14 loss """nssa""" +732 14 regularizer """no""" +732 14 optimizer """adam""" +732 14 training_loop """owa""" +732 14 negative_sampler """basic""" +732 14 evaluator """rankbased""" +732 15 dataset """fb15k237""" +732 15 model """transd""" +732 15 loss """nssa""" +732 15 regularizer """no""" +732 15 optimizer """adam""" +732 15 training_loop """owa""" +732 15 negative_sampler """basic""" +732 15 evaluator """rankbased""" +732 16 dataset """fb15k237""" +732 16 model """transd""" +732 16 loss """nssa""" +732 16 regularizer """no""" +732 16 optimizer """adam""" +732 16 training_loop """owa""" +732 16 negative_sampler """basic""" +732 16 evaluator """rankbased""" +732 17 dataset """fb15k237""" +732 17 model """transd""" +732 17 loss """nssa""" +732 17 regularizer """no""" +732 17 optimizer """adam""" +732 17 training_loop """owa""" +732 17 negative_sampler """basic""" +732 17 evaluator """rankbased""" +732 18 dataset """fb15k237""" +732 18 model """transd""" +732 18 loss """nssa""" +732 18 regularizer """no""" +732 18 optimizer """adam""" +732 18 training_loop """owa""" +732 18 negative_sampler """basic""" +732 18 evaluator """rankbased""" +732 19 dataset """fb15k237""" +732 19 model """transd""" +732 19 loss """nssa""" +732 19 regularizer """no""" +732 19 optimizer """adam""" +732 19 training_loop """owa""" +732 19 negative_sampler """basic""" +732 19 evaluator """rankbased""" +732 20 dataset """fb15k237""" +732 20 model """transd""" +732 20 loss """nssa""" +732 20 regularizer """no""" +732 20 optimizer """adam""" +732 20 training_loop """owa""" +732 20 negative_sampler """basic""" +732 20 evaluator """rankbased""" +732 21 dataset """fb15k237""" +732 21 model """transd""" +732 21 loss """nssa""" +732 21 regularizer """no""" +732 21 optimizer """adam""" +732 21 training_loop """owa""" +732 21 negative_sampler """basic""" +732 21 evaluator """rankbased""" +732 22 dataset """fb15k237""" +732 22 model """transd""" +732 22 loss """nssa""" +732 22 regularizer """no""" +732 22 optimizer """adam""" +732 22 training_loop """owa""" +732 22 negative_sampler """basic""" +732 22 evaluator """rankbased""" +732 23 dataset """fb15k237""" +732 23 model """transd""" +732 23 loss """nssa""" +732 23 regularizer """no""" +732 23 optimizer """adam""" +732 23 training_loop """owa""" +732 23 negative_sampler """basic""" +732 23 evaluator """rankbased""" +732 24 dataset """fb15k237""" +732 24 model """transd""" +732 24 loss """nssa""" +732 24 regularizer """no""" +732 24 optimizer """adam""" +732 24 training_loop """owa""" +732 24 negative_sampler """basic""" +732 24 evaluator """rankbased""" +732 25 dataset """fb15k237""" +732 25 model """transd""" +732 25 loss """nssa""" +732 25 regularizer """no""" +732 25 optimizer """adam""" +732 25 training_loop """owa""" +732 25 negative_sampler """basic""" +732 25 evaluator """rankbased""" +732 26 dataset """fb15k237""" +732 26 model """transd""" +732 26 loss """nssa""" +732 26 regularizer """no""" +732 26 optimizer """adam""" +732 26 training_loop """owa""" +732 26 negative_sampler """basic""" +732 26 evaluator """rankbased""" +732 27 dataset """fb15k237""" +732 27 model """transd""" +732 27 loss """nssa""" +732 27 regularizer """no""" +732 27 optimizer """adam""" +732 27 training_loop """owa""" +732 27 negative_sampler """basic""" +732 27 evaluator """rankbased""" +732 28 dataset """fb15k237""" +732 28 model """transd""" +732 28 loss """nssa""" +732 28 regularizer """no""" +732 28 optimizer """adam""" +732 28 training_loop """owa""" +732 28 negative_sampler """basic""" +732 28 evaluator """rankbased""" +732 29 dataset """fb15k237""" +732 29 model """transd""" +732 29 loss """nssa""" +732 29 regularizer """no""" +732 29 optimizer """adam""" +732 29 training_loop """owa""" +732 29 negative_sampler """basic""" +732 29 evaluator """rankbased""" +732 30 dataset """fb15k237""" +732 30 model """transd""" +732 30 loss """nssa""" +732 30 regularizer """no""" +732 30 optimizer """adam""" +732 30 training_loop """owa""" +732 30 negative_sampler """basic""" +732 30 evaluator """rankbased""" +732 31 dataset """fb15k237""" +732 31 model """transd""" +732 31 loss """nssa""" +732 31 regularizer """no""" +732 31 optimizer """adam""" +732 31 training_loop """owa""" +732 31 negative_sampler """basic""" +732 31 evaluator """rankbased""" +732 32 dataset """fb15k237""" +732 32 model """transd""" +732 32 loss """nssa""" +732 32 regularizer """no""" +732 32 optimizer """adam""" +732 32 training_loop """owa""" +732 32 negative_sampler """basic""" +732 32 evaluator """rankbased""" +732 33 dataset """fb15k237""" +732 33 model """transd""" +732 33 loss """nssa""" +732 33 regularizer """no""" +732 33 optimizer """adam""" +732 33 training_loop """owa""" +732 33 negative_sampler """basic""" +732 33 evaluator """rankbased""" +732 34 dataset """fb15k237""" +732 34 model """transd""" +732 34 loss """nssa""" +732 34 regularizer """no""" +732 34 optimizer """adam""" +732 34 training_loop """owa""" +732 34 negative_sampler """basic""" +732 34 evaluator """rankbased""" +732 35 dataset """fb15k237""" +732 35 model """transd""" +732 35 loss """nssa""" +732 35 regularizer """no""" +732 35 optimizer """adam""" +732 35 training_loop """owa""" +732 35 negative_sampler """basic""" +732 35 evaluator """rankbased""" +732 36 dataset """fb15k237""" +732 36 model """transd""" +732 36 loss """nssa""" +732 36 regularizer """no""" +732 36 optimizer """adam""" +732 36 training_loop """owa""" +732 36 negative_sampler """basic""" +732 36 evaluator """rankbased""" +732 37 dataset """fb15k237""" +732 37 model """transd""" +732 37 loss """nssa""" +732 37 regularizer """no""" +732 37 optimizer """adam""" +732 37 training_loop """owa""" +732 37 negative_sampler """basic""" +732 37 evaluator """rankbased""" +732 38 dataset """fb15k237""" +732 38 model """transd""" +732 38 loss """nssa""" +732 38 regularizer """no""" +732 38 optimizer """adam""" +732 38 training_loop """owa""" +732 38 negative_sampler """basic""" +732 38 evaluator """rankbased""" +732 39 dataset """fb15k237""" +732 39 model """transd""" +732 39 loss """nssa""" +732 39 regularizer """no""" +732 39 optimizer """adam""" +732 39 training_loop """owa""" +732 39 negative_sampler """basic""" +732 39 evaluator """rankbased""" +732 40 dataset """fb15k237""" +732 40 model """transd""" +732 40 loss """nssa""" +732 40 regularizer """no""" +732 40 optimizer """adam""" +732 40 training_loop """owa""" +732 40 negative_sampler """basic""" +732 40 evaluator """rankbased""" +732 41 dataset """fb15k237""" +732 41 model """transd""" +732 41 loss """nssa""" +732 41 regularizer """no""" +732 41 optimizer """adam""" +732 41 training_loop """owa""" +732 41 negative_sampler """basic""" +732 41 evaluator """rankbased""" +732 42 dataset """fb15k237""" +732 42 model """transd""" +732 42 loss """nssa""" +732 42 regularizer """no""" +732 42 optimizer """adam""" +732 42 training_loop """owa""" +732 42 negative_sampler """basic""" +732 42 evaluator """rankbased""" +732 43 dataset """fb15k237""" +732 43 model """transd""" +732 43 loss """nssa""" +732 43 regularizer """no""" +732 43 optimizer """adam""" +732 43 training_loop """owa""" +732 43 negative_sampler """basic""" +732 43 evaluator """rankbased""" +732 44 dataset """fb15k237""" +732 44 model """transd""" +732 44 loss """nssa""" +732 44 regularizer """no""" +732 44 optimizer """adam""" +732 44 training_loop """owa""" +732 44 negative_sampler """basic""" +732 44 evaluator """rankbased""" +732 45 dataset """fb15k237""" +732 45 model """transd""" +732 45 loss """nssa""" +732 45 regularizer """no""" +732 45 optimizer """adam""" +732 45 training_loop """owa""" +732 45 negative_sampler """basic""" +732 45 evaluator """rankbased""" +732 46 dataset """fb15k237""" +732 46 model """transd""" +732 46 loss """nssa""" +732 46 regularizer """no""" +732 46 optimizer """adam""" +732 46 training_loop """owa""" +732 46 negative_sampler """basic""" +732 46 evaluator """rankbased""" +733 1 model.embedding_dim 2.0 +733 1 model.relation_dim 1.0 +733 1 optimizer.lr 0.002915038652172211 +733 1 training.batch_size 2.0 +733 1 training.label_smoothing 0.003191193961238121 +733 2 model.embedding_dim 2.0 +733 2 model.relation_dim 0.0 +733 2 optimizer.lr 0.0692487959623441 +733 2 training.batch_size 2.0 +733 2 training.label_smoothing 0.013220504126769055 +733 3 model.embedding_dim 0.0 +733 3 model.relation_dim 1.0 +733 3 optimizer.lr 0.02892702318380992 +733 3 training.batch_size 2.0 +733 3 training.label_smoothing 0.0020096202150075106 +733 4 model.embedding_dim 1.0 +733 4 model.relation_dim 1.0 +733 4 optimizer.lr 0.03740601538358211 +733 4 training.batch_size 1.0 +733 4 training.label_smoothing 0.007694683560822328 +733 5 model.embedding_dim 1.0 +733 5 model.relation_dim 0.0 +733 5 optimizer.lr 0.05331367439347468 +733 5 training.batch_size 2.0 +733 5 training.label_smoothing 0.013524317810949728 +733 6 model.embedding_dim 2.0 +733 6 model.relation_dim 0.0 +733 6 optimizer.lr 0.004895502393644244 +733 6 training.batch_size 1.0 +733 6 training.label_smoothing 0.5353091910214609 +733 7 model.embedding_dim 2.0 +733 7 model.relation_dim 1.0 +733 7 optimizer.lr 0.0016247977396494993 +733 7 training.batch_size 1.0 +733 7 training.label_smoothing 0.8270254024049767 +733 8 model.embedding_dim 0.0 +733 8 model.relation_dim 0.0 +733 8 optimizer.lr 0.014712903706016012 +733 8 training.batch_size 1.0 +733 8 training.label_smoothing 0.08136088710780424 +733 9 model.embedding_dim 0.0 +733 9 model.relation_dim 2.0 +733 9 optimizer.lr 0.021327762217471038 +733 9 training.batch_size 2.0 +733 9 training.label_smoothing 0.02138235801213182 +733 1 dataset """fb15k237""" +733 1 model """transd""" +733 1 loss """bceaftersigmoid""" +733 1 regularizer """no""" +733 1 optimizer """adam""" +733 1 training_loop """lcwa""" +733 1 evaluator """rankbased""" +733 2 dataset """fb15k237""" +733 2 model """transd""" +733 2 loss """bceaftersigmoid""" +733 2 regularizer """no""" +733 2 optimizer """adam""" +733 2 training_loop """lcwa""" +733 2 evaluator """rankbased""" +733 3 dataset """fb15k237""" +733 3 model """transd""" +733 3 loss """bceaftersigmoid""" +733 3 regularizer """no""" +733 3 optimizer """adam""" +733 3 training_loop """lcwa""" +733 3 evaluator """rankbased""" +733 4 dataset """fb15k237""" +733 4 model """transd""" +733 4 loss """bceaftersigmoid""" +733 4 regularizer """no""" +733 4 optimizer """adam""" +733 4 training_loop """lcwa""" +733 4 evaluator """rankbased""" +733 5 dataset """fb15k237""" +733 5 model """transd""" +733 5 loss """bceaftersigmoid""" +733 5 regularizer """no""" +733 5 optimizer """adam""" +733 5 training_loop """lcwa""" +733 5 evaluator """rankbased""" +733 6 dataset """fb15k237""" +733 6 model """transd""" +733 6 loss """bceaftersigmoid""" +733 6 regularizer """no""" +733 6 optimizer """adam""" +733 6 training_loop """lcwa""" +733 6 evaluator """rankbased""" +733 7 dataset """fb15k237""" +733 7 model """transd""" +733 7 loss """bceaftersigmoid""" +733 7 regularizer """no""" +733 7 optimizer """adam""" +733 7 training_loop """lcwa""" +733 7 evaluator """rankbased""" +733 8 dataset """fb15k237""" +733 8 model """transd""" +733 8 loss """bceaftersigmoid""" +733 8 regularizer """no""" +733 8 optimizer """adam""" +733 8 training_loop """lcwa""" +733 8 evaluator """rankbased""" +733 9 dataset """fb15k237""" +733 9 model """transd""" +733 9 loss """bceaftersigmoid""" +733 9 regularizer """no""" +733 9 optimizer """adam""" +733 9 training_loop """lcwa""" +733 9 evaluator """rankbased""" +734 1 model.embedding_dim 0.0 +734 1 model.relation_dim 0.0 +734 1 optimizer.lr 0.0014341597525936477 +734 1 training.batch_size 0.0 +734 1 training.label_smoothing 0.0032825232257803444 +734 2 model.embedding_dim 2.0 +734 2 model.relation_dim 2.0 +734 2 optimizer.lr 0.017443483620647193 +734 2 training.batch_size 0.0 +734 2 training.label_smoothing 0.03602950831680717 +734 3 model.embedding_dim 2.0 +734 3 model.relation_dim 2.0 +734 3 optimizer.lr 0.0031955869117022283 +734 3 training.batch_size 2.0 +734 3 training.label_smoothing 0.0019379168499960948 +734 4 model.embedding_dim 2.0 +734 4 model.relation_dim 2.0 +734 4 optimizer.lr 0.00638449550813365 +734 4 training.batch_size 2.0 +734 4 training.label_smoothing 0.056965687992595625 +734 1 dataset """fb15k237""" +734 1 model """transd""" +734 1 loss """softplus""" +734 1 regularizer """no""" +734 1 optimizer """adam""" +734 1 training_loop """lcwa""" +734 1 evaluator """rankbased""" +734 2 dataset """fb15k237""" +734 2 model """transd""" +734 2 loss """softplus""" +734 2 regularizer """no""" +734 2 optimizer """adam""" +734 2 training_loop """lcwa""" +734 2 evaluator """rankbased""" +734 3 dataset """fb15k237""" +734 3 model """transd""" +734 3 loss """softplus""" +734 3 regularizer """no""" +734 3 optimizer """adam""" +734 3 training_loop """lcwa""" +734 3 evaluator """rankbased""" +734 4 dataset """fb15k237""" +734 4 model """transd""" +734 4 loss """softplus""" +734 4 regularizer """no""" +734 4 optimizer """adam""" +734 4 training_loop """lcwa""" +734 4 evaluator """rankbased""" +735 1 model.embedding_dim 2.0 +735 1 model.relation_dim 2.0 +735 1 optimizer.lr 0.0010222935986564754 +735 1 training.batch_size 2.0 +735 1 training.label_smoothing 0.06414772041208469 +735 2 model.embedding_dim 2.0 +735 2 model.relation_dim 0.0 +735 2 optimizer.lr 0.009925547885863265 +735 2 training.batch_size 0.0 +735 2 training.label_smoothing 0.32208764830027614 +735 3 model.embedding_dim 2.0 +735 3 model.relation_dim 2.0 +735 3 optimizer.lr 0.0021975069957006084 +735 3 training.batch_size 1.0 +735 3 training.label_smoothing 0.6288249137204619 +735 4 model.embedding_dim 0.0 +735 4 model.relation_dim 2.0 +735 4 optimizer.lr 0.003833370940881609 +735 4 training.batch_size 0.0 +735 4 training.label_smoothing 0.18255490977965025 +735 5 model.embedding_dim 0.0 +735 5 model.relation_dim 1.0 +735 5 optimizer.lr 0.08312847325979027 +735 5 training.batch_size 0.0 +735 5 training.label_smoothing 0.09407633325025481 +735 6 model.embedding_dim 0.0 +735 6 model.relation_dim 0.0 +735 6 optimizer.lr 0.018914322300703695 +735 6 training.batch_size 2.0 +735 6 training.label_smoothing 0.1699451417336441 +735 7 model.embedding_dim 2.0 +735 7 model.relation_dim 0.0 +735 7 optimizer.lr 0.0014440206731841745 +735 7 training.batch_size 1.0 +735 7 training.label_smoothing 0.0698000559669202 +735 8 model.embedding_dim 1.0 +735 8 model.relation_dim 1.0 +735 8 optimizer.lr 0.0055536861118790914 +735 8 training.batch_size 2.0 +735 8 training.label_smoothing 0.03328929139729963 +735 9 model.embedding_dim 2.0 +735 9 model.relation_dim 0.0 +735 9 optimizer.lr 0.06177474637837965 +735 9 training.batch_size 0.0 +735 9 training.label_smoothing 0.655372095868655 +735 1 dataset """fb15k237""" +735 1 model """transd""" +735 1 loss """bceaftersigmoid""" +735 1 regularizer """no""" +735 1 optimizer """adam""" +735 1 training_loop """lcwa""" +735 1 evaluator """rankbased""" +735 2 dataset """fb15k237""" +735 2 model """transd""" +735 2 loss """bceaftersigmoid""" +735 2 regularizer """no""" +735 2 optimizer """adam""" +735 2 training_loop """lcwa""" +735 2 evaluator """rankbased""" +735 3 dataset """fb15k237""" +735 3 model """transd""" +735 3 loss """bceaftersigmoid""" +735 3 regularizer """no""" +735 3 optimizer """adam""" +735 3 training_loop """lcwa""" +735 3 evaluator """rankbased""" +735 4 dataset """fb15k237""" +735 4 model """transd""" +735 4 loss """bceaftersigmoid""" +735 4 regularizer """no""" +735 4 optimizer """adam""" +735 4 training_loop """lcwa""" +735 4 evaluator """rankbased""" +735 5 dataset """fb15k237""" +735 5 model """transd""" +735 5 loss """bceaftersigmoid""" +735 5 regularizer """no""" +735 5 optimizer """adam""" +735 5 training_loop """lcwa""" +735 5 evaluator """rankbased""" +735 6 dataset """fb15k237""" +735 6 model """transd""" +735 6 loss """bceaftersigmoid""" +735 6 regularizer """no""" +735 6 optimizer """adam""" +735 6 training_loop """lcwa""" +735 6 evaluator """rankbased""" +735 7 dataset """fb15k237""" +735 7 model """transd""" +735 7 loss """bceaftersigmoid""" +735 7 regularizer """no""" +735 7 optimizer """adam""" +735 7 training_loop """lcwa""" +735 7 evaluator """rankbased""" +735 8 dataset """fb15k237""" +735 8 model """transd""" +735 8 loss """bceaftersigmoid""" +735 8 regularizer """no""" +735 8 optimizer """adam""" +735 8 training_loop """lcwa""" +735 8 evaluator """rankbased""" +735 9 dataset """fb15k237""" +735 9 model """transd""" +735 9 loss """bceaftersigmoid""" +735 9 regularizer """no""" +735 9 optimizer """adam""" +735 9 training_loop """lcwa""" +735 9 evaluator """rankbased""" +736 1 model.embedding_dim 0.0 +736 1 model.relation_dim 2.0 +736 1 optimizer.lr 0.007443491831494881 +736 1 training.batch_size 1.0 +736 1 training.label_smoothing 0.02449750949573845 +736 2 model.embedding_dim 0.0 +736 2 model.relation_dim 2.0 +736 2 optimizer.lr 0.002515121552807088 +736 2 training.batch_size 0.0 +736 2 training.label_smoothing 0.03112062902691912 +736 3 model.embedding_dim 1.0 +736 3 model.relation_dim 2.0 +736 3 optimizer.lr 0.016784283134615264 +736 3 training.batch_size 0.0 +736 3 training.label_smoothing 0.0013395664227200575 +736 4 model.embedding_dim 2.0 +736 4 model.relation_dim 0.0 +736 4 optimizer.lr 0.013021609204881898 +736 4 training.batch_size 2.0 +736 4 training.label_smoothing 0.00551614748007953 +736 5 model.embedding_dim 0.0 +736 5 model.relation_dim 2.0 +736 5 optimizer.lr 0.005088109146628144 +736 5 training.batch_size 0.0 +736 5 training.label_smoothing 0.10447165999576238 +736 6 model.embedding_dim 2.0 +736 6 model.relation_dim 0.0 +736 6 optimizer.lr 0.00966540575188868 +736 6 training.batch_size 1.0 +736 6 training.label_smoothing 0.5224434839930069 +736 7 model.embedding_dim 0.0 +736 7 model.relation_dim 1.0 +736 7 optimizer.lr 0.002945251216444858 +736 7 training.batch_size 1.0 +736 7 training.label_smoothing 0.06402576014170906 +736 8 model.embedding_dim 0.0 +736 8 model.relation_dim 1.0 +736 8 optimizer.lr 0.0012359780031287958 +736 8 training.batch_size 0.0 +736 8 training.label_smoothing 0.22927269161826178 +736 9 model.embedding_dim 1.0 +736 9 model.relation_dim 1.0 +736 9 optimizer.lr 0.007291369862357717 +736 9 training.batch_size 0.0 +736 9 training.label_smoothing 0.01053142538533499 +736 1 dataset """fb15k237""" +736 1 model """transd""" +736 1 loss """softplus""" +736 1 regularizer """no""" +736 1 optimizer """adam""" +736 1 training_loop """lcwa""" +736 1 evaluator """rankbased""" +736 2 dataset """fb15k237""" +736 2 model """transd""" +736 2 loss """softplus""" +736 2 regularizer """no""" +736 2 optimizer """adam""" +736 2 training_loop """lcwa""" +736 2 evaluator """rankbased""" +736 3 dataset """fb15k237""" +736 3 model """transd""" +736 3 loss """softplus""" +736 3 regularizer """no""" +736 3 optimizer """adam""" +736 3 training_loop """lcwa""" +736 3 evaluator """rankbased""" +736 4 dataset """fb15k237""" +736 4 model """transd""" +736 4 loss """softplus""" +736 4 regularizer """no""" +736 4 optimizer """adam""" +736 4 training_loop """lcwa""" +736 4 evaluator """rankbased""" +736 5 dataset """fb15k237""" +736 5 model """transd""" +736 5 loss """softplus""" +736 5 regularizer """no""" +736 5 optimizer """adam""" +736 5 training_loop """lcwa""" +736 5 evaluator """rankbased""" +736 6 dataset """fb15k237""" +736 6 model """transd""" +736 6 loss """softplus""" +736 6 regularizer """no""" +736 6 optimizer """adam""" +736 6 training_loop """lcwa""" +736 6 evaluator """rankbased""" +736 7 dataset """fb15k237""" +736 7 model """transd""" +736 7 loss """softplus""" +736 7 regularizer """no""" +736 7 optimizer """adam""" +736 7 training_loop """lcwa""" +736 7 evaluator """rankbased""" +736 8 dataset """fb15k237""" +736 8 model """transd""" +736 8 loss """softplus""" +736 8 regularizer """no""" +736 8 optimizer """adam""" +736 8 training_loop """lcwa""" +736 8 evaluator """rankbased""" +736 9 dataset """fb15k237""" +736 9 model """transd""" +736 9 loss """softplus""" +736 9 regularizer """no""" +736 9 optimizer """adam""" +736 9 training_loop """lcwa""" +736 9 evaluator """rankbased""" +737 1 model.embedding_dim 2.0 +737 1 model.relation_dim 2.0 +737 1 loss.margin 9.352880896428081 +737 1 optimizer.lr 0.001190525451308328 +737 1 negative_sampler.num_negs_per_pos 84.0 +737 1 training.batch_size 0.0 +737 2 model.embedding_dim 1.0 +737 2 model.relation_dim 1.0 +737 2 loss.margin 1.351281528731218 +737 2 optimizer.lr 0.0013283507035338166 +737 2 negative_sampler.num_negs_per_pos 42.0 +737 2 training.batch_size 2.0 +737 3 model.embedding_dim 2.0 +737 3 model.relation_dim 2.0 +737 3 loss.margin 2.5884520645455873 +737 3 optimizer.lr 0.04603667466580664 +737 3 negative_sampler.num_negs_per_pos 32.0 +737 3 training.batch_size 0.0 +737 4 model.embedding_dim 0.0 +737 4 model.relation_dim 2.0 +737 4 loss.margin 4.566566879697216 +737 4 optimizer.lr 0.00173259990419334 +737 4 negative_sampler.num_negs_per_pos 1.0 +737 4 training.batch_size 0.0 +737 5 model.embedding_dim 1.0 +737 5 model.relation_dim 2.0 +737 5 loss.margin 4.156323005317221 +737 5 optimizer.lr 0.0024681919150714997 +737 5 negative_sampler.num_negs_per_pos 42.0 +737 5 training.batch_size 2.0 +737 6 model.embedding_dim 1.0 +737 6 model.relation_dim 2.0 +737 6 loss.margin 4.448183408624657 +737 6 optimizer.lr 0.0013208175700984064 +737 6 negative_sampler.num_negs_per_pos 74.0 +737 6 training.batch_size 2.0 +737 7 model.embedding_dim 1.0 +737 7 model.relation_dim 1.0 +737 7 loss.margin 8.886272482225586 +737 7 optimizer.lr 0.036128841806379985 +737 7 negative_sampler.num_negs_per_pos 65.0 +737 7 training.batch_size 0.0 +737 8 model.embedding_dim 2.0 +737 8 model.relation_dim 1.0 +737 8 loss.margin 6.928340556293909 +737 8 optimizer.lr 0.010152084572431174 +737 8 negative_sampler.num_negs_per_pos 63.0 +737 8 training.batch_size 2.0 +737 9 model.embedding_dim 2.0 +737 9 model.relation_dim 0.0 +737 9 loss.margin 4.241934882802819 +737 9 optimizer.lr 0.002687926364119518 +737 9 negative_sampler.num_negs_per_pos 69.0 +737 9 training.batch_size 2.0 +737 10 model.embedding_dim 0.0 +737 10 model.relation_dim 0.0 +737 10 loss.margin 1.6333227254642928 +737 10 optimizer.lr 0.001240257994243753 +737 10 negative_sampler.num_negs_per_pos 11.0 +737 10 training.batch_size 0.0 +737 11 model.embedding_dim 0.0 +737 11 model.relation_dim 0.0 +737 11 loss.margin 3.089337933557279 +737 11 optimizer.lr 0.0014420415633299708 +737 11 negative_sampler.num_negs_per_pos 44.0 +737 11 training.batch_size 2.0 +737 12 model.embedding_dim 0.0 +737 12 model.relation_dim 1.0 +737 12 loss.margin 9.289851986276528 +737 12 optimizer.lr 0.03596139907183311 +737 12 negative_sampler.num_negs_per_pos 86.0 +737 12 training.batch_size 1.0 +737 13 model.embedding_dim 2.0 +737 13 model.relation_dim 2.0 +737 13 loss.margin 8.377653090555372 +737 13 optimizer.lr 0.06815739287676607 +737 13 negative_sampler.num_negs_per_pos 65.0 +737 13 training.batch_size 2.0 +737 14 model.embedding_dim 1.0 +737 14 model.relation_dim 2.0 +737 14 loss.margin 6.000147404196369 +737 14 optimizer.lr 0.0560759240319757 +737 14 negative_sampler.num_negs_per_pos 22.0 +737 14 training.batch_size 1.0 +737 15 model.embedding_dim 0.0 +737 15 model.relation_dim 2.0 +737 15 loss.margin 1.757435105532116 +737 15 optimizer.lr 0.002364014848503287 +737 15 negative_sampler.num_negs_per_pos 31.0 +737 15 training.batch_size 0.0 +737 16 model.embedding_dim 0.0 +737 16 model.relation_dim 0.0 +737 16 loss.margin 9.899578903556202 +737 16 optimizer.lr 0.09857682278516286 +737 16 negative_sampler.num_negs_per_pos 81.0 +737 16 training.batch_size 2.0 +737 17 model.embedding_dim 1.0 +737 17 model.relation_dim 0.0 +737 17 loss.margin 3.5939971688327828 +737 17 optimizer.lr 0.06892773081767074 +737 17 negative_sampler.num_negs_per_pos 73.0 +737 17 training.batch_size 1.0 +737 1 dataset """fb15k237""" +737 1 model """transd""" +737 1 loss """marginranking""" +737 1 regularizer """no""" +737 1 optimizer """adam""" +737 1 training_loop """owa""" +737 1 negative_sampler """basic""" +737 1 evaluator """rankbased""" +737 2 dataset """fb15k237""" +737 2 model """transd""" +737 2 loss """marginranking""" +737 2 regularizer """no""" +737 2 optimizer """adam""" +737 2 training_loop """owa""" +737 2 negative_sampler """basic""" +737 2 evaluator """rankbased""" +737 3 dataset """fb15k237""" +737 3 model """transd""" +737 3 loss """marginranking""" +737 3 regularizer """no""" +737 3 optimizer """adam""" +737 3 training_loop """owa""" +737 3 negative_sampler """basic""" +737 3 evaluator """rankbased""" +737 4 dataset """fb15k237""" +737 4 model """transd""" +737 4 loss """marginranking""" +737 4 regularizer """no""" +737 4 optimizer """adam""" +737 4 training_loop """owa""" +737 4 negative_sampler """basic""" +737 4 evaluator """rankbased""" +737 5 dataset """fb15k237""" +737 5 model """transd""" +737 5 loss """marginranking""" +737 5 regularizer """no""" +737 5 optimizer """adam""" +737 5 training_loop """owa""" +737 5 negative_sampler """basic""" +737 5 evaluator """rankbased""" +737 6 dataset """fb15k237""" +737 6 model """transd""" +737 6 loss """marginranking""" +737 6 regularizer """no""" +737 6 optimizer """adam""" +737 6 training_loop """owa""" +737 6 negative_sampler """basic""" +737 6 evaluator """rankbased""" +737 7 dataset """fb15k237""" +737 7 model """transd""" +737 7 loss """marginranking""" +737 7 regularizer """no""" +737 7 optimizer """adam""" +737 7 training_loop """owa""" +737 7 negative_sampler """basic""" +737 7 evaluator """rankbased""" +737 8 dataset """fb15k237""" +737 8 model """transd""" +737 8 loss """marginranking""" +737 8 regularizer """no""" +737 8 optimizer """adam""" +737 8 training_loop """owa""" +737 8 negative_sampler """basic""" +737 8 evaluator """rankbased""" +737 9 dataset """fb15k237""" +737 9 model """transd""" +737 9 loss """marginranking""" +737 9 regularizer """no""" +737 9 optimizer """adam""" +737 9 training_loop """owa""" +737 9 negative_sampler """basic""" +737 9 evaluator """rankbased""" +737 10 dataset """fb15k237""" +737 10 model """transd""" +737 10 loss """marginranking""" +737 10 regularizer """no""" +737 10 optimizer """adam""" +737 10 training_loop """owa""" +737 10 negative_sampler """basic""" +737 10 evaluator """rankbased""" +737 11 dataset """fb15k237""" +737 11 model """transd""" +737 11 loss """marginranking""" +737 11 regularizer """no""" +737 11 optimizer """adam""" +737 11 training_loop """owa""" +737 11 negative_sampler """basic""" +737 11 evaluator """rankbased""" +737 12 dataset """fb15k237""" +737 12 model """transd""" +737 12 loss """marginranking""" +737 12 regularizer """no""" +737 12 optimizer """adam""" +737 12 training_loop """owa""" +737 12 negative_sampler """basic""" +737 12 evaluator """rankbased""" +737 13 dataset """fb15k237""" +737 13 model """transd""" +737 13 loss """marginranking""" +737 13 regularizer """no""" +737 13 optimizer """adam""" +737 13 training_loop """owa""" +737 13 negative_sampler """basic""" +737 13 evaluator """rankbased""" +737 14 dataset """fb15k237""" +737 14 model """transd""" +737 14 loss """marginranking""" +737 14 regularizer """no""" +737 14 optimizer """adam""" +737 14 training_loop """owa""" +737 14 negative_sampler """basic""" +737 14 evaluator """rankbased""" +737 15 dataset """fb15k237""" +737 15 model """transd""" +737 15 loss """marginranking""" +737 15 regularizer """no""" +737 15 optimizer """adam""" +737 15 training_loop """owa""" +737 15 negative_sampler """basic""" +737 15 evaluator """rankbased""" +737 16 dataset """fb15k237""" +737 16 model """transd""" +737 16 loss """marginranking""" +737 16 regularizer """no""" +737 16 optimizer """adam""" +737 16 training_loop """owa""" +737 16 negative_sampler """basic""" +737 16 evaluator """rankbased""" +737 17 dataset """fb15k237""" +737 17 model """transd""" +737 17 loss """marginranking""" +737 17 regularizer """no""" +737 17 optimizer """adam""" +737 17 training_loop """owa""" +737 17 negative_sampler """basic""" +737 17 evaluator """rankbased""" +738 1 model.embedding_dim 2.0 +738 1 model.relation_dim 2.0 +738 1 loss.margin 7.125107389812595 +738 1 optimizer.lr 0.03416281802731655 +738 1 negative_sampler.num_negs_per_pos 81.0 +738 1 training.batch_size 1.0 +738 2 model.embedding_dim 0.0 +738 2 model.relation_dim 0.0 +738 2 loss.margin 1.8399695454876015 +738 2 optimizer.lr 0.001367013492065416 +738 2 negative_sampler.num_negs_per_pos 0.0 +738 2 training.batch_size 2.0 +738 3 model.embedding_dim 2.0 +738 3 model.relation_dim 2.0 +738 3 loss.margin 0.9735987051471 +738 3 optimizer.lr 0.002852047267688886 +738 3 negative_sampler.num_negs_per_pos 72.0 +738 3 training.batch_size 0.0 +738 4 model.embedding_dim 1.0 +738 4 model.relation_dim 2.0 +738 4 loss.margin 5.77979518156897 +738 4 optimizer.lr 0.04170778911994108 +738 4 negative_sampler.num_negs_per_pos 51.0 +738 4 training.batch_size 2.0 +738 5 model.embedding_dim 0.0 +738 5 model.relation_dim 2.0 +738 5 loss.margin 7.80825384347227 +738 5 optimizer.lr 0.0021908711942015546 +738 5 negative_sampler.num_negs_per_pos 32.0 +738 5 training.batch_size 0.0 +738 6 model.embedding_dim 2.0 +738 6 model.relation_dim 2.0 +738 6 loss.margin 4.192723965257631 +738 6 optimizer.lr 0.004607727914246341 +738 6 negative_sampler.num_negs_per_pos 55.0 +738 6 training.batch_size 2.0 +738 7 model.embedding_dim 0.0 +738 7 model.relation_dim 1.0 +738 7 loss.margin 3.093980027517886 +738 7 optimizer.lr 0.02876331821998608 +738 7 negative_sampler.num_negs_per_pos 84.0 +738 7 training.batch_size 1.0 +738 8 model.embedding_dim 1.0 +738 8 model.relation_dim 1.0 +738 8 loss.margin 2.243310648189661 +738 8 optimizer.lr 0.004722612760015105 +738 8 negative_sampler.num_negs_per_pos 99.0 +738 8 training.batch_size 1.0 +738 9 model.embedding_dim 1.0 +738 9 model.relation_dim 1.0 +738 9 loss.margin 3.333180035240522 +738 9 optimizer.lr 0.016728049463534667 +738 9 negative_sampler.num_negs_per_pos 50.0 +738 9 training.batch_size 1.0 +738 10 model.embedding_dim 0.0 +738 10 model.relation_dim 0.0 +738 10 loss.margin 2.770657922983497 +738 10 optimizer.lr 0.00244756288175708 +738 10 negative_sampler.num_negs_per_pos 3.0 +738 10 training.batch_size 2.0 +738 11 model.embedding_dim 2.0 +738 11 model.relation_dim 2.0 +738 11 loss.margin 2.041735261202499 +738 11 optimizer.lr 0.0034015825694987535 +738 11 negative_sampler.num_negs_per_pos 14.0 +738 11 training.batch_size 2.0 +738 12 model.embedding_dim 1.0 +738 12 model.relation_dim 2.0 +738 12 loss.margin 5.971563935143757 +738 12 optimizer.lr 0.00189619163636707 +738 12 negative_sampler.num_negs_per_pos 66.0 +738 12 training.batch_size 0.0 +738 13 model.embedding_dim 1.0 +738 13 model.relation_dim 0.0 +738 13 loss.margin 8.73983534218853 +738 13 optimizer.lr 0.0042040490068969046 +738 13 negative_sampler.num_negs_per_pos 27.0 +738 13 training.batch_size 0.0 +738 14 model.embedding_dim 2.0 +738 14 model.relation_dim 2.0 +738 14 loss.margin 1.7753942061980912 +738 14 optimizer.lr 0.004833089881312001 +738 14 negative_sampler.num_negs_per_pos 50.0 +738 14 training.batch_size 0.0 +738 15 model.embedding_dim 0.0 +738 15 model.relation_dim 0.0 +738 15 loss.margin 6.009298200309643 +738 15 optimizer.lr 0.011079772007207118 +738 15 negative_sampler.num_negs_per_pos 60.0 +738 15 training.batch_size 2.0 +738 16 model.embedding_dim 2.0 +738 16 model.relation_dim 1.0 +738 16 loss.margin 2.2457733666112545 +738 16 optimizer.lr 0.04300881605511724 +738 16 negative_sampler.num_negs_per_pos 41.0 +738 16 training.batch_size 0.0 +738 17 model.embedding_dim 0.0 +738 17 model.relation_dim 2.0 +738 17 loss.margin 4.443698243933513 +738 17 optimizer.lr 0.03466981349341189 +738 17 negative_sampler.num_negs_per_pos 84.0 +738 17 training.batch_size 0.0 +738 18 model.embedding_dim 2.0 +738 18 model.relation_dim 0.0 +738 18 loss.margin 4.945679266132877 +738 18 optimizer.lr 0.004480691211785598 +738 18 negative_sampler.num_negs_per_pos 14.0 +738 18 training.batch_size 2.0 +738 19 model.embedding_dim 2.0 +738 19 model.relation_dim 2.0 +738 19 loss.margin 2.6108385433638017 +738 19 optimizer.lr 0.07175285716464828 +738 19 negative_sampler.num_negs_per_pos 53.0 +738 19 training.batch_size 0.0 +738 20 model.embedding_dim 0.0 +738 20 model.relation_dim 2.0 +738 20 loss.margin 5.02028135824866 +738 20 optimizer.lr 0.02578062541932938 +738 20 negative_sampler.num_negs_per_pos 41.0 +738 20 training.batch_size 2.0 +738 21 model.embedding_dim 1.0 +738 21 model.relation_dim 2.0 +738 21 loss.margin 1.8939411389621719 +738 21 optimizer.lr 0.08151686373926116 +738 21 negative_sampler.num_negs_per_pos 68.0 +738 21 training.batch_size 0.0 +738 22 model.embedding_dim 2.0 +738 22 model.relation_dim 0.0 +738 22 loss.margin 3.2680483005146748 +738 22 optimizer.lr 0.0013899382263886794 +738 22 negative_sampler.num_negs_per_pos 52.0 +738 22 training.batch_size 1.0 +738 23 model.embedding_dim 2.0 +738 23 model.relation_dim 2.0 +738 23 loss.margin 3.7837093505766317 +738 23 optimizer.lr 0.0028231883424966763 +738 23 negative_sampler.num_negs_per_pos 1.0 +738 23 training.batch_size 0.0 +738 24 model.embedding_dim 2.0 +738 24 model.relation_dim 2.0 +738 24 loss.margin 7.883181428191147 +738 24 optimizer.lr 0.005893208238749667 +738 24 negative_sampler.num_negs_per_pos 58.0 +738 24 training.batch_size 2.0 +738 25 model.embedding_dim 2.0 +738 25 model.relation_dim 2.0 +738 25 loss.margin 9.995925257476474 +738 25 optimizer.lr 0.04133088719663984 +738 25 negative_sampler.num_negs_per_pos 46.0 +738 25 training.batch_size 2.0 +738 26 model.embedding_dim 2.0 +738 26 model.relation_dim 2.0 +738 26 loss.margin 2.8785633855206005 +738 26 optimizer.lr 0.054643479994826424 +738 26 negative_sampler.num_negs_per_pos 55.0 +738 26 training.batch_size 2.0 +738 27 model.embedding_dim 1.0 +738 27 model.relation_dim 1.0 +738 27 loss.margin 4.700089146647768 +738 27 optimizer.lr 0.008528409176059416 +738 27 negative_sampler.num_negs_per_pos 2.0 +738 27 training.batch_size 0.0 +738 28 model.embedding_dim 2.0 +738 28 model.relation_dim 0.0 +738 28 loss.margin 1.103551210439421 +738 28 optimizer.lr 0.02149895593844172 +738 28 negative_sampler.num_negs_per_pos 65.0 +738 28 training.batch_size 1.0 +738 29 model.embedding_dim 2.0 +738 29 model.relation_dim 1.0 +738 29 loss.margin 2.917162369699569 +738 29 optimizer.lr 0.04255535685807391 +738 29 negative_sampler.num_negs_per_pos 17.0 +738 29 training.batch_size 0.0 +738 30 model.embedding_dim 0.0 +738 30 model.relation_dim 2.0 +738 30 loss.margin 9.706771333212917 +738 30 optimizer.lr 0.006559331629088787 +738 30 negative_sampler.num_negs_per_pos 95.0 +738 30 training.batch_size 2.0 +738 31 model.embedding_dim 1.0 +738 31 model.relation_dim 2.0 +738 31 loss.margin 1.9272724202314337 +738 31 optimizer.lr 0.013096314553697669 +738 31 negative_sampler.num_negs_per_pos 21.0 +738 31 training.batch_size 2.0 +738 32 model.embedding_dim 2.0 +738 32 model.relation_dim 0.0 +738 32 loss.margin 8.06062642336762 +738 32 optimizer.lr 0.004081315491148275 +738 32 negative_sampler.num_negs_per_pos 70.0 +738 32 training.batch_size 1.0 +738 33 model.embedding_dim 0.0 +738 33 model.relation_dim 2.0 +738 33 loss.margin 6.3058188206454115 +738 33 optimizer.lr 0.06481051858826364 +738 33 negative_sampler.num_negs_per_pos 4.0 +738 33 training.batch_size 1.0 +738 34 model.embedding_dim 1.0 +738 34 model.relation_dim 2.0 +738 34 loss.margin 3.270178653172592 +738 34 optimizer.lr 0.09865543531225408 +738 34 negative_sampler.num_negs_per_pos 64.0 +738 34 training.batch_size 1.0 +738 35 model.embedding_dim 0.0 +738 35 model.relation_dim 2.0 +738 35 loss.margin 0.5917364555031858 +738 35 optimizer.lr 0.00676803252340595 +738 35 negative_sampler.num_negs_per_pos 50.0 +738 35 training.batch_size 2.0 +738 36 model.embedding_dim 1.0 +738 36 model.relation_dim 0.0 +738 36 loss.margin 3.405789228507571 +738 36 optimizer.lr 0.005009435854173934 +738 36 negative_sampler.num_negs_per_pos 27.0 +738 36 training.batch_size 1.0 +738 37 model.embedding_dim 1.0 +738 37 model.relation_dim 2.0 +738 37 loss.margin 5.361623494520499 +738 37 optimizer.lr 0.005353648371284717 +738 37 negative_sampler.num_negs_per_pos 43.0 +738 37 training.batch_size 1.0 +738 1 dataset """fb15k237""" +738 1 model """transd""" +738 1 loss """marginranking""" +738 1 regularizer """no""" +738 1 optimizer """adam""" +738 1 training_loop """owa""" +738 1 negative_sampler """basic""" +738 1 evaluator """rankbased""" +738 2 dataset """fb15k237""" +738 2 model """transd""" +738 2 loss """marginranking""" +738 2 regularizer """no""" +738 2 optimizer """adam""" +738 2 training_loop """owa""" +738 2 negative_sampler """basic""" +738 2 evaluator """rankbased""" +738 3 dataset """fb15k237""" +738 3 model """transd""" +738 3 loss """marginranking""" +738 3 regularizer """no""" +738 3 optimizer """adam""" +738 3 training_loop """owa""" +738 3 negative_sampler """basic""" +738 3 evaluator """rankbased""" +738 4 dataset """fb15k237""" +738 4 model """transd""" +738 4 loss """marginranking""" +738 4 regularizer """no""" +738 4 optimizer """adam""" +738 4 training_loop """owa""" +738 4 negative_sampler """basic""" +738 4 evaluator """rankbased""" +738 5 dataset """fb15k237""" +738 5 model """transd""" +738 5 loss """marginranking""" +738 5 regularizer """no""" +738 5 optimizer """adam""" +738 5 training_loop """owa""" +738 5 negative_sampler """basic""" +738 5 evaluator """rankbased""" +738 6 dataset """fb15k237""" +738 6 model """transd""" +738 6 loss """marginranking""" +738 6 regularizer """no""" +738 6 optimizer """adam""" +738 6 training_loop """owa""" +738 6 negative_sampler """basic""" +738 6 evaluator """rankbased""" +738 7 dataset """fb15k237""" +738 7 model """transd""" +738 7 loss """marginranking""" +738 7 regularizer """no""" +738 7 optimizer """adam""" +738 7 training_loop """owa""" +738 7 negative_sampler """basic""" +738 7 evaluator """rankbased""" +738 8 dataset """fb15k237""" +738 8 model """transd""" +738 8 loss """marginranking""" +738 8 regularizer """no""" +738 8 optimizer """adam""" +738 8 training_loop """owa""" +738 8 negative_sampler """basic""" +738 8 evaluator """rankbased""" +738 9 dataset """fb15k237""" +738 9 model """transd""" +738 9 loss """marginranking""" +738 9 regularizer """no""" +738 9 optimizer """adam""" +738 9 training_loop """owa""" +738 9 negative_sampler """basic""" +738 9 evaluator """rankbased""" +738 10 dataset """fb15k237""" +738 10 model """transd""" +738 10 loss """marginranking""" +738 10 regularizer """no""" +738 10 optimizer """adam""" +738 10 training_loop """owa""" +738 10 negative_sampler """basic""" +738 10 evaluator """rankbased""" +738 11 dataset """fb15k237""" +738 11 model """transd""" +738 11 loss """marginranking""" +738 11 regularizer """no""" +738 11 optimizer """adam""" +738 11 training_loop """owa""" +738 11 negative_sampler """basic""" +738 11 evaluator """rankbased""" +738 12 dataset """fb15k237""" +738 12 model """transd""" +738 12 loss """marginranking""" +738 12 regularizer """no""" +738 12 optimizer """adam""" +738 12 training_loop """owa""" +738 12 negative_sampler """basic""" +738 12 evaluator """rankbased""" +738 13 dataset """fb15k237""" +738 13 model """transd""" +738 13 loss """marginranking""" +738 13 regularizer """no""" +738 13 optimizer """adam""" +738 13 training_loop """owa""" +738 13 negative_sampler """basic""" +738 13 evaluator """rankbased""" +738 14 dataset """fb15k237""" +738 14 model """transd""" +738 14 loss """marginranking""" +738 14 regularizer """no""" +738 14 optimizer """adam""" +738 14 training_loop """owa""" +738 14 negative_sampler """basic""" +738 14 evaluator """rankbased""" +738 15 dataset """fb15k237""" +738 15 model """transd""" +738 15 loss """marginranking""" +738 15 regularizer """no""" +738 15 optimizer """adam""" +738 15 training_loop """owa""" +738 15 negative_sampler """basic""" +738 15 evaluator """rankbased""" +738 16 dataset """fb15k237""" +738 16 model """transd""" +738 16 loss """marginranking""" +738 16 regularizer """no""" +738 16 optimizer """adam""" +738 16 training_loop """owa""" +738 16 negative_sampler """basic""" +738 16 evaluator """rankbased""" +738 17 dataset """fb15k237""" +738 17 model """transd""" +738 17 loss """marginranking""" +738 17 regularizer """no""" +738 17 optimizer """adam""" +738 17 training_loop """owa""" +738 17 negative_sampler """basic""" +738 17 evaluator """rankbased""" +738 18 dataset """fb15k237""" +738 18 model """transd""" +738 18 loss """marginranking""" +738 18 regularizer """no""" +738 18 optimizer """adam""" +738 18 training_loop """owa""" +738 18 negative_sampler """basic""" +738 18 evaluator """rankbased""" +738 19 dataset """fb15k237""" +738 19 model """transd""" +738 19 loss """marginranking""" +738 19 regularizer """no""" +738 19 optimizer """adam""" +738 19 training_loop """owa""" +738 19 negative_sampler """basic""" +738 19 evaluator """rankbased""" +738 20 dataset """fb15k237""" +738 20 model """transd""" +738 20 loss """marginranking""" +738 20 regularizer """no""" +738 20 optimizer """adam""" +738 20 training_loop """owa""" +738 20 negative_sampler """basic""" +738 20 evaluator """rankbased""" +738 21 dataset """fb15k237""" +738 21 model """transd""" +738 21 loss """marginranking""" +738 21 regularizer """no""" +738 21 optimizer """adam""" +738 21 training_loop """owa""" +738 21 negative_sampler """basic""" +738 21 evaluator """rankbased""" +738 22 dataset """fb15k237""" +738 22 model """transd""" +738 22 loss """marginranking""" +738 22 regularizer """no""" +738 22 optimizer """adam""" +738 22 training_loop """owa""" +738 22 negative_sampler """basic""" +738 22 evaluator """rankbased""" +738 23 dataset """fb15k237""" +738 23 model """transd""" +738 23 loss """marginranking""" +738 23 regularizer """no""" +738 23 optimizer """adam""" +738 23 training_loop """owa""" +738 23 negative_sampler """basic""" +738 23 evaluator """rankbased""" +738 24 dataset """fb15k237""" +738 24 model """transd""" +738 24 loss """marginranking""" +738 24 regularizer """no""" +738 24 optimizer """adam""" +738 24 training_loop """owa""" +738 24 negative_sampler """basic""" +738 24 evaluator """rankbased""" +738 25 dataset """fb15k237""" +738 25 model """transd""" +738 25 loss """marginranking""" +738 25 regularizer """no""" +738 25 optimizer """adam""" +738 25 training_loop """owa""" +738 25 negative_sampler """basic""" +738 25 evaluator """rankbased""" +738 26 dataset """fb15k237""" +738 26 model """transd""" +738 26 loss """marginranking""" +738 26 regularizer """no""" +738 26 optimizer """adam""" +738 26 training_loop """owa""" +738 26 negative_sampler """basic""" +738 26 evaluator """rankbased""" +738 27 dataset """fb15k237""" +738 27 model """transd""" +738 27 loss """marginranking""" +738 27 regularizer """no""" +738 27 optimizer """adam""" +738 27 training_loop """owa""" +738 27 negative_sampler """basic""" +738 27 evaluator """rankbased""" +738 28 dataset """fb15k237""" +738 28 model """transd""" +738 28 loss """marginranking""" +738 28 regularizer """no""" +738 28 optimizer """adam""" +738 28 training_loop """owa""" +738 28 negative_sampler """basic""" +738 28 evaluator """rankbased""" +738 29 dataset """fb15k237""" +738 29 model """transd""" +738 29 loss """marginranking""" +738 29 regularizer """no""" +738 29 optimizer """adam""" +738 29 training_loop """owa""" +738 29 negative_sampler """basic""" +738 29 evaluator """rankbased""" +738 30 dataset """fb15k237""" +738 30 model """transd""" +738 30 loss """marginranking""" +738 30 regularizer """no""" +738 30 optimizer """adam""" +738 30 training_loop """owa""" +738 30 negative_sampler """basic""" +738 30 evaluator """rankbased""" +738 31 dataset """fb15k237""" +738 31 model """transd""" +738 31 loss """marginranking""" +738 31 regularizer """no""" +738 31 optimizer """adam""" +738 31 training_loop """owa""" +738 31 negative_sampler """basic""" +738 31 evaluator """rankbased""" +738 32 dataset """fb15k237""" +738 32 model """transd""" +738 32 loss """marginranking""" +738 32 regularizer """no""" +738 32 optimizer """adam""" +738 32 training_loop """owa""" +738 32 negative_sampler """basic""" +738 32 evaluator """rankbased""" +738 33 dataset """fb15k237""" +738 33 model """transd""" +738 33 loss """marginranking""" +738 33 regularizer """no""" +738 33 optimizer """adam""" +738 33 training_loop """owa""" +738 33 negative_sampler """basic""" +738 33 evaluator """rankbased""" +738 34 dataset """fb15k237""" +738 34 model """transd""" +738 34 loss """marginranking""" +738 34 regularizer """no""" +738 34 optimizer """adam""" +738 34 training_loop """owa""" +738 34 negative_sampler """basic""" +738 34 evaluator """rankbased""" +738 35 dataset """fb15k237""" +738 35 model """transd""" +738 35 loss """marginranking""" +738 35 regularizer """no""" +738 35 optimizer """adam""" +738 35 training_loop """owa""" +738 35 negative_sampler """basic""" +738 35 evaluator """rankbased""" +738 36 dataset """fb15k237""" +738 36 model """transd""" +738 36 loss """marginranking""" +738 36 regularizer """no""" +738 36 optimizer """adam""" +738 36 training_loop """owa""" +738 36 negative_sampler """basic""" +738 36 evaluator """rankbased""" +738 37 dataset """fb15k237""" +738 37 model """transd""" +738 37 loss """marginranking""" +738 37 regularizer """no""" +738 37 optimizer """adam""" +738 37 training_loop """owa""" +738 37 negative_sampler """basic""" +738 37 evaluator """rankbased""" +739 1 model.embedding_dim 2.0 +739 1 model.relation_dim 1.0 +739 1 optimizer.lr 0.01092384391389878 +739 1 training.batch_size 2.0 +739 1 training.label_smoothing 0.3790804400335497 +739 2 model.embedding_dim 1.0 +739 2 model.relation_dim 2.0 +739 2 optimizer.lr 0.03620466149960045 +739 2 training.batch_size 0.0 +739 2 training.label_smoothing 0.23136384270541646 +739 3 model.embedding_dim 1.0 +739 3 model.relation_dim 1.0 +739 3 optimizer.lr 0.03016572167301373 +739 3 training.batch_size 0.0 +739 3 training.label_smoothing 0.03433899156397703 +739 1 dataset """fb15k237""" +739 1 model """transd""" +739 1 loss """crossentropy""" +739 1 regularizer """no""" +739 1 optimizer """adam""" +739 1 training_loop """lcwa""" +739 1 evaluator """rankbased""" +739 2 dataset """fb15k237""" +739 2 model """transd""" +739 2 loss """crossentropy""" +739 2 regularizer """no""" +739 2 optimizer """adam""" +739 2 training_loop """lcwa""" +739 2 evaluator """rankbased""" +739 3 dataset """fb15k237""" +739 3 model """transd""" +739 3 loss """crossentropy""" +739 3 regularizer """no""" +739 3 optimizer """adam""" +739 3 training_loop """lcwa""" +739 3 evaluator """rankbased""" +740 1 model.embedding_dim 0.0 +740 1 model.relation_dim 2.0 +740 1 optimizer.lr 0.0050405447359075295 +740 1 training.batch_size 2.0 +740 1 training.label_smoothing 0.0966532947183878 +740 2 model.embedding_dim 0.0 +740 2 model.relation_dim 0.0 +740 2 optimizer.lr 0.014729225943149259 +740 2 training.batch_size 1.0 +740 2 training.label_smoothing 0.0030046534444029436 +740 3 model.embedding_dim 0.0 +740 3 model.relation_dim 0.0 +740 3 optimizer.lr 0.0027501481834493104 +740 3 training.batch_size 1.0 +740 3 training.label_smoothing 0.0012483107582065246 +740 4 model.embedding_dim 1.0 +740 4 model.relation_dim 0.0 +740 4 optimizer.lr 0.004229834482102386 +740 4 training.batch_size 0.0 +740 4 training.label_smoothing 0.002351090893649495 +740 5 model.embedding_dim 0.0 +740 5 model.relation_dim 1.0 +740 5 optimizer.lr 0.0824039678133659 +740 5 training.batch_size 0.0 +740 5 training.label_smoothing 0.08690035425787498 +740 6 model.embedding_dim 2.0 +740 6 model.relation_dim 0.0 +740 6 optimizer.lr 0.0020593428970631152 +740 6 training.batch_size 1.0 +740 6 training.label_smoothing 0.0027656289752540117 +740 1 dataset """fb15k237""" +740 1 model """transd""" +740 1 loss """crossentropy""" +740 1 regularizer """no""" +740 1 optimizer """adam""" +740 1 training_loop """lcwa""" +740 1 evaluator """rankbased""" +740 2 dataset """fb15k237""" +740 2 model """transd""" +740 2 loss """crossentropy""" +740 2 regularizer """no""" +740 2 optimizer """adam""" +740 2 training_loop """lcwa""" +740 2 evaluator """rankbased""" +740 3 dataset """fb15k237""" +740 3 model """transd""" +740 3 loss """crossentropy""" +740 3 regularizer """no""" +740 3 optimizer """adam""" +740 3 training_loop """lcwa""" +740 3 evaluator """rankbased""" +740 4 dataset """fb15k237""" +740 4 model """transd""" +740 4 loss """crossentropy""" +740 4 regularizer """no""" +740 4 optimizer """adam""" +740 4 training_loop """lcwa""" +740 4 evaluator """rankbased""" +740 5 dataset """fb15k237""" +740 5 model """transd""" +740 5 loss """crossentropy""" +740 5 regularizer """no""" +740 5 optimizer """adam""" +740 5 training_loop """lcwa""" +740 5 evaluator """rankbased""" +740 6 dataset """fb15k237""" +740 6 model """transd""" +740 6 loss """crossentropy""" +740 6 regularizer """no""" +740 6 optimizer """adam""" +740 6 training_loop """lcwa""" +740 6 evaluator """rankbased""" +741 1 model.embedding_dim 2.0 +741 1 model.relation_dim 1.0 +741 1 training.batch_size 2.0 +741 1 training.label_smoothing 0.0016797626176567582 +741 2 model.embedding_dim 1.0 +741 2 model.relation_dim 2.0 +741 2 training.batch_size 1.0 +741 2 training.label_smoothing 0.004617482071236429 +741 3 model.embedding_dim 0.0 +741 3 model.relation_dim 2.0 +741 3 training.batch_size 0.0 +741 3 training.label_smoothing 0.6270137294978971 +741 4 model.embedding_dim 1.0 +741 4 model.relation_dim 0.0 +741 4 training.batch_size 1.0 +741 4 training.label_smoothing 0.03441995483831971 +741 5 model.embedding_dim 1.0 +741 5 model.relation_dim 0.0 +741 5 training.batch_size 0.0 +741 5 training.label_smoothing 0.3326265134141573 +741 6 model.embedding_dim 0.0 +741 6 model.relation_dim 1.0 +741 6 training.batch_size 1.0 +741 6 training.label_smoothing 0.0010878378359201414 +741 7 model.embedding_dim 2.0 +741 7 model.relation_dim 2.0 +741 7 training.batch_size 2.0 +741 7 training.label_smoothing 0.030992079301910943 +741 8 model.embedding_dim 1.0 +741 8 model.relation_dim 1.0 +741 8 training.batch_size 1.0 +741 8 training.label_smoothing 0.10774784257676624 +741 9 model.embedding_dim 2.0 +741 9 model.relation_dim 2.0 +741 9 training.batch_size 2.0 +741 9 training.label_smoothing 0.061153243928499246 +741 10 model.embedding_dim 1.0 +741 10 model.relation_dim 2.0 +741 10 training.batch_size 0.0 +741 10 training.label_smoothing 0.8205855570098521 +741 11 model.embedding_dim 0.0 +741 11 model.relation_dim 1.0 +741 11 training.batch_size 1.0 +741 11 training.label_smoothing 0.07155417087857201 +741 12 model.embedding_dim 0.0 +741 12 model.relation_dim 2.0 +741 12 training.batch_size 2.0 +741 12 training.label_smoothing 0.8590967615719918 +741 13 model.embedding_dim 1.0 +741 13 model.relation_dim 0.0 +741 13 training.batch_size 1.0 +741 13 training.label_smoothing 0.0014911126173867573 +741 14 model.embedding_dim 1.0 +741 14 model.relation_dim 0.0 +741 14 training.batch_size 0.0 +741 14 training.label_smoothing 0.1918626518527919 +741 15 model.embedding_dim 1.0 +741 15 model.relation_dim 0.0 +741 15 training.batch_size 0.0 +741 15 training.label_smoothing 0.04487737900367326 +741 16 model.embedding_dim 2.0 +741 16 model.relation_dim 2.0 +741 16 training.batch_size 0.0 +741 16 training.label_smoothing 0.030924579454468717 +741 17 model.embedding_dim 2.0 +741 17 model.relation_dim 0.0 +741 17 training.batch_size 0.0 +741 17 training.label_smoothing 0.011547428918253546 +741 18 model.embedding_dim 1.0 +741 18 model.relation_dim 1.0 +741 18 training.batch_size 1.0 +741 18 training.label_smoothing 0.06686205899451185 +741 19 model.embedding_dim 0.0 +741 19 model.relation_dim 0.0 +741 19 training.batch_size 1.0 +741 19 training.label_smoothing 0.011702384859633564 +741 20 model.embedding_dim 0.0 +741 20 model.relation_dim 1.0 +741 20 training.batch_size 0.0 +741 20 training.label_smoothing 0.1090407835205567 +741 21 model.embedding_dim 2.0 +741 21 model.relation_dim 0.0 +741 21 training.batch_size 2.0 +741 21 training.label_smoothing 0.21431730872242516 +741 22 model.embedding_dim 2.0 +741 22 model.relation_dim 1.0 +741 22 training.batch_size 1.0 +741 22 training.label_smoothing 0.005205813650050889 +741 23 model.embedding_dim 0.0 +741 23 model.relation_dim 0.0 +741 23 training.batch_size 0.0 +741 23 training.label_smoothing 0.29979453120767674 +741 24 model.embedding_dim 0.0 +741 24 model.relation_dim 2.0 +741 24 training.batch_size 0.0 +741 24 training.label_smoothing 0.023671221905933126 +741 25 model.embedding_dim 1.0 +741 25 model.relation_dim 1.0 +741 25 training.batch_size 0.0 +741 25 training.label_smoothing 0.15389080843798775 +741 26 model.embedding_dim 2.0 +741 26 model.relation_dim 2.0 +741 26 training.batch_size 2.0 +741 26 training.label_smoothing 0.006435821570496714 +741 27 model.embedding_dim 1.0 +741 27 model.relation_dim 0.0 +741 27 training.batch_size 2.0 +741 27 training.label_smoothing 0.002076812840290546 +741 28 model.embedding_dim 1.0 +741 28 model.relation_dim 0.0 +741 28 training.batch_size 2.0 +741 28 training.label_smoothing 0.09713013065746699 +741 29 model.embedding_dim 1.0 +741 29 model.relation_dim 0.0 +741 29 training.batch_size 2.0 +741 29 training.label_smoothing 0.004010802150475316 +741 30 model.embedding_dim 2.0 +741 30 model.relation_dim 2.0 +741 30 training.batch_size 1.0 +741 30 training.label_smoothing 0.19654222758634624 +741 31 model.embedding_dim 0.0 +741 31 model.relation_dim 1.0 +741 31 training.batch_size 0.0 +741 31 training.label_smoothing 0.3849423500769207 +741 32 model.embedding_dim 0.0 +741 32 model.relation_dim 0.0 +741 32 training.batch_size 2.0 +741 32 training.label_smoothing 0.0010667475766603352 +741 33 model.embedding_dim 2.0 +741 33 model.relation_dim 1.0 +741 33 training.batch_size 2.0 +741 33 training.label_smoothing 0.004911884389046079 +741 34 model.embedding_dim 2.0 +741 34 model.relation_dim 1.0 +741 34 training.batch_size 1.0 +741 34 training.label_smoothing 0.35215333798421056 +741 35 model.embedding_dim 0.0 +741 35 model.relation_dim 2.0 +741 35 training.batch_size 2.0 +741 35 training.label_smoothing 0.18611439076995162 +741 36 model.embedding_dim 1.0 +741 36 model.relation_dim 0.0 +741 36 training.batch_size 2.0 +741 36 training.label_smoothing 0.045885669629009716 +741 37 model.embedding_dim 0.0 +741 37 model.relation_dim 0.0 +741 37 training.batch_size 2.0 +741 37 training.label_smoothing 0.06540168500077825 +741 38 model.embedding_dim 0.0 +741 38 model.relation_dim 0.0 +741 38 training.batch_size 0.0 +741 38 training.label_smoothing 0.3842267704126066 +741 39 model.embedding_dim 2.0 +741 39 model.relation_dim 0.0 +741 39 training.batch_size 0.0 +741 39 training.label_smoothing 0.004271006480907451 +741 40 model.embedding_dim 2.0 +741 40 model.relation_dim 1.0 +741 40 training.batch_size 2.0 +741 40 training.label_smoothing 0.7591455376865973 +741 41 model.embedding_dim 2.0 +741 41 model.relation_dim 2.0 +741 41 training.batch_size 2.0 +741 41 training.label_smoothing 0.021846901376718283 +741 42 model.embedding_dim 1.0 +741 42 model.relation_dim 1.0 +741 42 training.batch_size 0.0 +741 42 training.label_smoothing 0.025344937870464573 +741 43 model.embedding_dim 1.0 +741 43 model.relation_dim 1.0 +741 43 training.batch_size 2.0 +741 43 training.label_smoothing 0.021733556710765205 +741 44 model.embedding_dim 1.0 +741 44 model.relation_dim 0.0 +741 44 training.batch_size 2.0 +741 44 training.label_smoothing 0.7215580590863968 +741 45 model.embedding_dim 2.0 +741 45 model.relation_dim 2.0 +741 45 training.batch_size 2.0 +741 45 training.label_smoothing 0.14157700703692255 +741 46 model.embedding_dim 2.0 +741 46 model.relation_dim 1.0 +741 46 training.batch_size 1.0 +741 46 training.label_smoothing 0.003176529245059377 +741 47 model.embedding_dim 0.0 +741 47 model.relation_dim 2.0 +741 47 training.batch_size 1.0 +741 47 training.label_smoothing 0.009177359282244458 +741 48 model.embedding_dim 1.0 +741 48 model.relation_dim 0.0 +741 48 training.batch_size 0.0 +741 48 training.label_smoothing 0.1879093755995903 +741 49 model.embedding_dim 1.0 +741 49 model.relation_dim 2.0 +741 49 training.batch_size 1.0 +741 49 training.label_smoothing 0.39782767071585173 +741 50 model.embedding_dim 1.0 +741 50 model.relation_dim 0.0 +741 50 training.batch_size 1.0 +741 50 training.label_smoothing 0.035695848153083104 +741 51 model.embedding_dim 0.0 +741 51 model.relation_dim 1.0 +741 51 training.batch_size 2.0 +741 51 training.label_smoothing 0.735533047763185 +741 52 model.embedding_dim 2.0 +741 52 model.relation_dim 1.0 +741 52 training.batch_size 2.0 +741 52 training.label_smoothing 0.6843311062306102 +741 53 model.embedding_dim 0.0 +741 53 model.relation_dim 0.0 +741 53 training.batch_size 0.0 +741 53 training.label_smoothing 0.0011940524115879537 +741 54 model.embedding_dim 0.0 +741 54 model.relation_dim 1.0 +741 54 training.batch_size 1.0 +741 54 training.label_smoothing 0.016913961783551403 +741 55 model.embedding_dim 2.0 +741 55 model.relation_dim 0.0 +741 55 training.batch_size 0.0 +741 55 training.label_smoothing 0.052875716252303494 +741 56 model.embedding_dim 1.0 +741 56 model.relation_dim 0.0 +741 56 training.batch_size 0.0 +741 56 training.label_smoothing 0.15712942423835086 +741 57 model.embedding_dim 2.0 +741 57 model.relation_dim 1.0 +741 57 training.batch_size 1.0 +741 57 training.label_smoothing 0.007492067953049835 +741 58 model.embedding_dim 2.0 +741 58 model.relation_dim 1.0 +741 58 training.batch_size 0.0 +741 58 training.label_smoothing 0.4337418822457999 +741 59 model.embedding_dim 1.0 +741 59 model.relation_dim 0.0 +741 59 training.batch_size 1.0 +741 59 training.label_smoothing 0.15947551642582633 +741 60 model.embedding_dim 2.0 +741 60 model.relation_dim 1.0 +741 60 training.batch_size 0.0 +741 60 training.label_smoothing 0.16485703464331783 +741 61 model.embedding_dim 0.0 +741 61 model.relation_dim 0.0 +741 61 training.batch_size 0.0 +741 61 training.label_smoothing 0.0017956695964158585 +741 62 model.embedding_dim 1.0 +741 62 model.relation_dim 1.0 +741 62 training.batch_size 0.0 +741 62 training.label_smoothing 0.06648081841942603 +741 63 model.embedding_dim 1.0 +741 63 model.relation_dim 0.0 +741 63 training.batch_size 0.0 +741 63 training.label_smoothing 0.11971764499127735 +741 64 model.embedding_dim 1.0 +741 64 model.relation_dim 1.0 +741 64 training.batch_size 1.0 +741 64 training.label_smoothing 0.012836564295140662 +741 65 model.embedding_dim 2.0 +741 65 model.relation_dim 2.0 +741 65 training.batch_size 2.0 +741 65 training.label_smoothing 0.14004362121554345 +741 66 model.embedding_dim 0.0 +741 66 model.relation_dim 2.0 +741 66 training.batch_size 1.0 +741 66 training.label_smoothing 0.0011375169677350543 +741 67 model.embedding_dim 0.0 +741 67 model.relation_dim 1.0 +741 67 training.batch_size 2.0 +741 67 training.label_smoothing 0.6665366109563879 +741 68 model.embedding_dim 0.0 +741 68 model.relation_dim 1.0 +741 68 training.batch_size 0.0 +741 68 training.label_smoothing 0.20251560764967091 +741 69 model.embedding_dim 1.0 +741 69 model.relation_dim 1.0 +741 69 training.batch_size 0.0 +741 69 training.label_smoothing 0.3933019710039762 +741 70 model.embedding_dim 0.0 +741 70 model.relation_dim 0.0 +741 70 training.batch_size 1.0 +741 70 training.label_smoothing 0.008070351482739069 +741 71 model.embedding_dim 1.0 +741 71 model.relation_dim 2.0 +741 71 training.batch_size 1.0 +741 71 training.label_smoothing 0.008895203876832151 +741 72 model.embedding_dim 2.0 +741 72 model.relation_dim 2.0 +741 72 training.batch_size 2.0 +741 72 training.label_smoothing 0.004191242604005341 +741 73 model.embedding_dim 1.0 +741 73 model.relation_dim 1.0 +741 73 training.batch_size 2.0 +741 73 training.label_smoothing 0.29149897030162136 +741 74 model.embedding_dim 0.0 +741 74 model.relation_dim 0.0 +741 74 training.batch_size 1.0 +741 74 training.label_smoothing 0.43514169458257024 +741 75 model.embedding_dim 1.0 +741 75 model.relation_dim 2.0 +741 75 training.batch_size 0.0 +741 75 training.label_smoothing 0.018484666683804947 +741 76 model.embedding_dim 0.0 +741 76 model.relation_dim 1.0 +741 76 training.batch_size 2.0 +741 76 training.label_smoothing 0.010094572952008824 +741 77 model.embedding_dim 1.0 +741 77 model.relation_dim 0.0 +741 77 training.batch_size 0.0 +741 77 training.label_smoothing 0.037513557298704835 +741 78 model.embedding_dim 1.0 +741 78 model.relation_dim 2.0 +741 78 training.batch_size 1.0 +741 78 training.label_smoothing 0.0013209580362772255 +741 79 model.embedding_dim 0.0 +741 79 model.relation_dim 0.0 +741 79 training.batch_size 2.0 +741 79 training.label_smoothing 0.07613709355831288 +741 80 model.embedding_dim 2.0 +741 80 model.relation_dim 0.0 +741 80 training.batch_size 1.0 +741 80 training.label_smoothing 0.22993883783104052 +741 81 model.embedding_dim 1.0 +741 81 model.relation_dim 1.0 +741 81 training.batch_size 0.0 +741 81 training.label_smoothing 0.08344389772522808 +741 82 model.embedding_dim 0.0 +741 82 model.relation_dim 0.0 +741 82 training.batch_size 1.0 +741 82 training.label_smoothing 0.007118424518463871 +741 83 model.embedding_dim 0.0 +741 83 model.relation_dim 1.0 +741 83 training.batch_size 1.0 +741 83 training.label_smoothing 0.3352179916885291 +741 84 model.embedding_dim 0.0 +741 84 model.relation_dim 2.0 +741 84 training.batch_size 0.0 +741 84 training.label_smoothing 0.010854801052393982 +741 85 model.embedding_dim 0.0 +741 85 model.relation_dim 2.0 +741 85 training.batch_size 1.0 +741 85 training.label_smoothing 0.0346552752400732 +741 86 model.embedding_dim 0.0 +741 86 model.relation_dim 2.0 +741 86 training.batch_size 1.0 +741 86 training.label_smoothing 0.02342617237520877 +741 87 model.embedding_dim 1.0 +741 87 model.relation_dim 0.0 +741 87 training.batch_size 0.0 +741 87 training.label_smoothing 0.686677855325263 +741 88 model.embedding_dim 0.0 +741 88 model.relation_dim 2.0 +741 88 training.batch_size 2.0 +741 88 training.label_smoothing 0.011582180288895702 +741 89 model.embedding_dim 1.0 +741 89 model.relation_dim 1.0 +741 89 training.batch_size 1.0 +741 89 training.label_smoothing 0.011628594245107027 +741 90 model.embedding_dim 2.0 +741 90 model.relation_dim 0.0 +741 90 training.batch_size 1.0 +741 90 training.label_smoothing 0.961636806312234 +741 91 model.embedding_dim 1.0 +741 91 model.relation_dim 0.0 +741 91 training.batch_size 2.0 +741 91 training.label_smoothing 0.018094774140938737 +741 92 model.embedding_dim 1.0 +741 92 model.relation_dim 2.0 +741 92 training.batch_size 0.0 +741 92 training.label_smoothing 0.002474178723601139 +741 93 model.embedding_dim 2.0 +741 93 model.relation_dim 0.0 +741 93 training.batch_size 0.0 +741 93 training.label_smoothing 0.6476023132523517 +741 94 model.embedding_dim 0.0 +741 94 model.relation_dim 2.0 +741 94 training.batch_size 0.0 +741 94 training.label_smoothing 0.0014940830219754544 +741 95 model.embedding_dim 0.0 +741 95 model.relation_dim 0.0 +741 95 training.batch_size 2.0 +741 95 training.label_smoothing 0.0035450267538567493 +741 96 model.embedding_dim 2.0 +741 96 model.relation_dim 2.0 +741 96 training.batch_size 1.0 +741 96 training.label_smoothing 0.05162082458588875 +741 97 model.embedding_dim 1.0 +741 97 model.relation_dim 0.0 +741 97 training.batch_size 1.0 +741 97 training.label_smoothing 0.19580745115273768 +741 98 model.embedding_dim 2.0 +741 98 model.relation_dim 0.0 +741 98 training.batch_size 2.0 +741 98 training.label_smoothing 0.002135493656396981 +741 99 model.embedding_dim 1.0 +741 99 model.relation_dim 1.0 +741 99 training.batch_size 0.0 +741 99 training.label_smoothing 0.013227062810376507 +741 100 model.embedding_dim 2.0 +741 100 model.relation_dim 2.0 +741 100 training.batch_size 2.0 +741 100 training.label_smoothing 0.11392035499388171 +741 1 dataset """kinships""" +741 1 model """transd""" +741 1 loss """bceaftersigmoid""" +741 1 regularizer """no""" +741 1 optimizer """adadelta""" +741 1 training_loop """lcwa""" +741 1 evaluator """rankbased""" +741 2 dataset """kinships""" +741 2 model """transd""" +741 2 loss """bceaftersigmoid""" +741 2 regularizer """no""" +741 2 optimizer """adadelta""" +741 2 training_loop """lcwa""" +741 2 evaluator """rankbased""" +741 3 dataset """kinships""" +741 3 model """transd""" +741 3 loss """bceaftersigmoid""" +741 3 regularizer """no""" +741 3 optimizer """adadelta""" +741 3 training_loop """lcwa""" +741 3 evaluator """rankbased""" +741 4 dataset """kinships""" +741 4 model """transd""" +741 4 loss """bceaftersigmoid""" +741 4 regularizer """no""" +741 4 optimizer """adadelta""" +741 4 training_loop """lcwa""" +741 4 evaluator """rankbased""" +741 5 dataset """kinships""" +741 5 model """transd""" +741 5 loss """bceaftersigmoid""" +741 5 regularizer """no""" +741 5 optimizer """adadelta""" +741 5 training_loop """lcwa""" +741 5 evaluator """rankbased""" +741 6 dataset """kinships""" +741 6 model """transd""" +741 6 loss """bceaftersigmoid""" +741 6 regularizer """no""" +741 6 optimizer """adadelta""" +741 6 training_loop """lcwa""" +741 6 evaluator """rankbased""" +741 7 dataset """kinships""" +741 7 model """transd""" +741 7 loss """bceaftersigmoid""" +741 7 regularizer """no""" +741 7 optimizer """adadelta""" +741 7 training_loop """lcwa""" +741 7 evaluator """rankbased""" +741 8 dataset """kinships""" +741 8 model """transd""" +741 8 loss """bceaftersigmoid""" +741 8 regularizer """no""" +741 8 optimizer """adadelta""" +741 8 training_loop """lcwa""" +741 8 evaluator """rankbased""" +741 9 dataset """kinships""" +741 9 model """transd""" +741 9 loss """bceaftersigmoid""" +741 9 regularizer """no""" +741 9 optimizer """adadelta""" +741 9 training_loop """lcwa""" +741 9 evaluator """rankbased""" +741 10 dataset """kinships""" +741 10 model """transd""" +741 10 loss """bceaftersigmoid""" +741 10 regularizer """no""" +741 10 optimizer """adadelta""" +741 10 training_loop """lcwa""" +741 10 evaluator """rankbased""" +741 11 dataset """kinships""" +741 11 model """transd""" +741 11 loss """bceaftersigmoid""" +741 11 regularizer """no""" +741 11 optimizer """adadelta""" +741 11 training_loop """lcwa""" +741 11 evaluator """rankbased""" +741 12 dataset """kinships""" +741 12 model """transd""" +741 12 loss """bceaftersigmoid""" +741 12 regularizer """no""" +741 12 optimizer """adadelta""" +741 12 training_loop """lcwa""" +741 12 evaluator """rankbased""" +741 13 dataset """kinships""" +741 13 model """transd""" +741 13 loss """bceaftersigmoid""" +741 13 regularizer """no""" +741 13 optimizer """adadelta""" +741 13 training_loop """lcwa""" +741 13 evaluator """rankbased""" +741 14 dataset """kinships""" +741 14 model """transd""" +741 14 loss """bceaftersigmoid""" +741 14 regularizer """no""" +741 14 optimizer """adadelta""" +741 14 training_loop """lcwa""" +741 14 evaluator """rankbased""" +741 15 dataset """kinships""" +741 15 model """transd""" +741 15 loss """bceaftersigmoid""" +741 15 regularizer """no""" +741 15 optimizer """adadelta""" +741 15 training_loop """lcwa""" +741 15 evaluator """rankbased""" +741 16 dataset """kinships""" +741 16 model """transd""" +741 16 loss """bceaftersigmoid""" +741 16 regularizer """no""" +741 16 optimizer """adadelta""" +741 16 training_loop """lcwa""" +741 16 evaluator """rankbased""" +741 17 dataset """kinships""" +741 17 model """transd""" +741 17 loss """bceaftersigmoid""" +741 17 regularizer """no""" +741 17 optimizer """adadelta""" +741 17 training_loop """lcwa""" +741 17 evaluator """rankbased""" +741 18 dataset """kinships""" +741 18 model """transd""" +741 18 loss """bceaftersigmoid""" +741 18 regularizer """no""" +741 18 optimizer """adadelta""" +741 18 training_loop """lcwa""" +741 18 evaluator """rankbased""" +741 19 dataset """kinships""" +741 19 model """transd""" +741 19 loss """bceaftersigmoid""" +741 19 regularizer """no""" +741 19 optimizer """adadelta""" +741 19 training_loop """lcwa""" +741 19 evaluator """rankbased""" +741 20 dataset """kinships""" +741 20 model """transd""" +741 20 loss """bceaftersigmoid""" +741 20 regularizer """no""" +741 20 optimizer """adadelta""" +741 20 training_loop """lcwa""" +741 20 evaluator """rankbased""" +741 21 dataset """kinships""" +741 21 model """transd""" +741 21 loss """bceaftersigmoid""" +741 21 regularizer """no""" +741 21 optimizer """adadelta""" +741 21 training_loop """lcwa""" +741 21 evaluator """rankbased""" +741 22 dataset """kinships""" +741 22 model """transd""" +741 22 loss """bceaftersigmoid""" +741 22 regularizer """no""" +741 22 optimizer """adadelta""" +741 22 training_loop """lcwa""" +741 22 evaluator """rankbased""" +741 23 dataset """kinships""" +741 23 model """transd""" +741 23 loss """bceaftersigmoid""" +741 23 regularizer """no""" +741 23 optimizer """adadelta""" +741 23 training_loop """lcwa""" +741 23 evaluator """rankbased""" +741 24 dataset """kinships""" +741 24 model """transd""" +741 24 loss """bceaftersigmoid""" +741 24 regularizer """no""" +741 24 optimizer """adadelta""" +741 24 training_loop """lcwa""" +741 24 evaluator """rankbased""" +741 25 dataset """kinships""" +741 25 model """transd""" +741 25 loss """bceaftersigmoid""" +741 25 regularizer """no""" +741 25 optimizer """adadelta""" +741 25 training_loop """lcwa""" +741 25 evaluator """rankbased""" +741 26 dataset """kinships""" +741 26 model """transd""" +741 26 loss """bceaftersigmoid""" +741 26 regularizer """no""" +741 26 optimizer """adadelta""" +741 26 training_loop """lcwa""" +741 26 evaluator """rankbased""" +741 27 dataset """kinships""" +741 27 model """transd""" +741 27 loss """bceaftersigmoid""" +741 27 regularizer """no""" +741 27 optimizer """adadelta""" +741 27 training_loop """lcwa""" +741 27 evaluator """rankbased""" +741 28 dataset """kinships""" +741 28 model """transd""" +741 28 loss """bceaftersigmoid""" +741 28 regularizer """no""" +741 28 optimizer """adadelta""" +741 28 training_loop """lcwa""" +741 28 evaluator """rankbased""" +741 29 dataset """kinships""" +741 29 model """transd""" +741 29 loss """bceaftersigmoid""" +741 29 regularizer """no""" +741 29 optimizer """adadelta""" +741 29 training_loop """lcwa""" +741 29 evaluator """rankbased""" +741 30 dataset """kinships""" +741 30 model """transd""" +741 30 loss """bceaftersigmoid""" +741 30 regularizer """no""" +741 30 optimizer """adadelta""" +741 30 training_loop """lcwa""" +741 30 evaluator """rankbased""" +741 31 dataset """kinships""" +741 31 model """transd""" +741 31 loss """bceaftersigmoid""" +741 31 regularizer """no""" +741 31 optimizer """adadelta""" +741 31 training_loop """lcwa""" +741 31 evaluator """rankbased""" +741 32 dataset """kinships""" +741 32 model """transd""" +741 32 loss """bceaftersigmoid""" +741 32 regularizer """no""" +741 32 optimizer """adadelta""" +741 32 training_loop """lcwa""" +741 32 evaluator """rankbased""" +741 33 dataset """kinships""" +741 33 model """transd""" +741 33 loss """bceaftersigmoid""" +741 33 regularizer """no""" +741 33 optimizer """adadelta""" +741 33 training_loop """lcwa""" +741 33 evaluator """rankbased""" +741 34 dataset """kinships""" +741 34 model """transd""" +741 34 loss """bceaftersigmoid""" +741 34 regularizer """no""" +741 34 optimizer """adadelta""" +741 34 training_loop """lcwa""" +741 34 evaluator """rankbased""" +741 35 dataset """kinships""" +741 35 model """transd""" +741 35 loss """bceaftersigmoid""" +741 35 regularizer """no""" +741 35 optimizer """adadelta""" +741 35 training_loop """lcwa""" +741 35 evaluator """rankbased""" +741 36 dataset """kinships""" +741 36 model """transd""" +741 36 loss """bceaftersigmoid""" +741 36 regularizer """no""" +741 36 optimizer """adadelta""" +741 36 training_loop """lcwa""" +741 36 evaluator """rankbased""" +741 37 dataset """kinships""" +741 37 model """transd""" +741 37 loss """bceaftersigmoid""" +741 37 regularizer """no""" +741 37 optimizer """adadelta""" +741 37 training_loop """lcwa""" +741 37 evaluator """rankbased""" +741 38 dataset """kinships""" +741 38 model """transd""" +741 38 loss """bceaftersigmoid""" +741 38 regularizer """no""" +741 38 optimizer """adadelta""" +741 38 training_loop """lcwa""" +741 38 evaluator """rankbased""" +741 39 dataset """kinships""" +741 39 model """transd""" +741 39 loss """bceaftersigmoid""" +741 39 regularizer """no""" +741 39 optimizer """adadelta""" +741 39 training_loop """lcwa""" +741 39 evaluator """rankbased""" +741 40 dataset """kinships""" +741 40 model """transd""" +741 40 loss """bceaftersigmoid""" +741 40 regularizer """no""" +741 40 optimizer """adadelta""" +741 40 training_loop """lcwa""" +741 40 evaluator """rankbased""" +741 41 dataset """kinships""" +741 41 model """transd""" +741 41 loss """bceaftersigmoid""" +741 41 regularizer """no""" +741 41 optimizer """adadelta""" +741 41 training_loop """lcwa""" +741 41 evaluator """rankbased""" +741 42 dataset """kinships""" +741 42 model """transd""" +741 42 loss """bceaftersigmoid""" +741 42 regularizer """no""" +741 42 optimizer """adadelta""" +741 42 training_loop """lcwa""" +741 42 evaluator """rankbased""" +741 43 dataset """kinships""" +741 43 model """transd""" +741 43 loss """bceaftersigmoid""" +741 43 regularizer """no""" +741 43 optimizer """adadelta""" +741 43 training_loop """lcwa""" +741 43 evaluator """rankbased""" +741 44 dataset """kinships""" +741 44 model """transd""" +741 44 loss """bceaftersigmoid""" +741 44 regularizer """no""" +741 44 optimizer """adadelta""" +741 44 training_loop """lcwa""" +741 44 evaluator """rankbased""" +741 45 dataset """kinships""" +741 45 model """transd""" +741 45 loss """bceaftersigmoid""" +741 45 regularizer """no""" +741 45 optimizer """adadelta""" +741 45 training_loop """lcwa""" +741 45 evaluator """rankbased""" +741 46 dataset """kinships""" +741 46 model """transd""" +741 46 loss """bceaftersigmoid""" +741 46 regularizer """no""" +741 46 optimizer """adadelta""" +741 46 training_loop """lcwa""" +741 46 evaluator """rankbased""" +741 47 dataset """kinships""" +741 47 model """transd""" +741 47 loss """bceaftersigmoid""" +741 47 regularizer """no""" +741 47 optimizer """adadelta""" +741 47 training_loop """lcwa""" +741 47 evaluator """rankbased""" +741 48 dataset """kinships""" +741 48 model """transd""" +741 48 loss """bceaftersigmoid""" +741 48 regularizer """no""" +741 48 optimizer """adadelta""" +741 48 training_loop """lcwa""" +741 48 evaluator """rankbased""" +741 49 dataset """kinships""" +741 49 model """transd""" +741 49 loss """bceaftersigmoid""" +741 49 regularizer """no""" +741 49 optimizer """adadelta""" +741 49 training_loop """lcwa""" +741 49 evaluator """rankbased""" +741 50 dataset """kinships""" +741 50 model """transd""" +741 50 loss """bceaftersigmoid""" +741 50 regularizer """no""" +741 50 optimizer """adadelta""" +741 50 training_loop """lcwa""" +741 50 evaluator """rankbased""" +741 51 dataset """kinships""" +741 51 model """transd""" +741 51 loss """bceaftersigmoid""" +741 51 regularizer """no""" +741 51 optimizer """adadelta""" +741 51 training_loop """lcwa""" +741 51 evaluator """rankbased""" +741 52 dataset """kinships""" +741 52 model """transd""" +741 52 loss """bceaftersigmoid""" +741 52 regularizer """no""" +741 52 optimizer """adadelta""" +741 52 training_loop """lcwa""" +741 52 evaluator """rankbased""" +741 53 dataset """kinships""" +741 53 model """transd""" +741 53 loss """bceaftersigmoid""" +741 53 regularizer """no""" +741 53 optimizer """adadelta""" +741 53 training_loop """lcwa""" +741 53 evaluator """rankbased""" +741 54 dataset """kinships""" +741 54 model """transd""" +741 54 loss """bceaftersigmoid""" +741 54 regularizer """no""" +741 54 optimizer """adadelta""" +741 54 training_loop """lcwa""" +741 54 evaluator """rankbased""" +741 55 dataset """kinships""" +741 55 model """transd""" +741 55 loss """bceaftersigmoid""" +741 55 regularizer """no""" +741 55 optimizer """adadelta""" +741 55 training_loop """lcwa""" +741 55 evaluator """rankbased""" +741 56 dataset """kinships""" +741 56 model """transd""" +741 56 loss """bceaftersigmoid""" +741 56 regularizer """no""" +741 56 optimizer """adadelta""" +741 56 training_loop """lcwa""" +741 56 evaluator """rankbased""" +741 57 dataset """kinships""" +741 57 model """transd""" +741 57 loss """bceaftersigmoid""" +741 57 regularizer """no""" +741 57 optimizer """adadelta""" +741 57 training_loop """lcwa""" +741 57 evaluator """rankbased""" +741 58 dataset """kinships""" +741 58 model """transd""" +741 58 loss """bceaftersigmoid""" +741 58 regularizer """no""" +741 58 optimizer """adadelta""" +741 58 training_loop """lcwa""" +741 58 evaluator """rankbased""" +741 59 dataset """kinships""" +741 59 model """transd""" +741 59 loss """bceaftersigmoid""" +741 59 regularizer """no""" +741 59 optimizer """adadelta""" +741 59 training_loop """lcwa""" +741 59 evaluator """rankbased""" +741 60 dataset """kinships""" +741 60 model """transd""" +741 60 loss """bceaftersigmoid""" +741 60 regularizer """no""" +741 60 optimizer """adadelta""" +741 60 training_loop """lcwa""" +741 60 evaluator """rankbased""" +741 61 dataset """kinships""" +741 61 model """transd""" +741 61 loss """bceaftersigmoid""" +741 61 regularizer """no""" +741 61 optimizer """adadelta""" +741 61 training_loop """lcwa""" +741 61 evaluator """rankbased""" +741 62 dataset """kinships""" +741 62 model """transd""" +741 62 loss """bceaftersigmoid""" +741 62 regularizer """no""" +741 62 optimizer """adadelta""" +741 62 training_loop """lcwa""" +741 62 evaluator """rankbased""" +741 63 dataset """kinships""" +741 63 model """transd""" +741 63 loss """bceaftersigmoid""" +741 63 regularizer """no""" +741 63 optimizer """adadelta""" +741 63 training_loop """lcwa""" +741 63 evaluator """rankbased""" +741 64 dataset """kinships""" +741 64 model """transd""" +741 64 loss """bceaftersigmoid""" +741 64 regularizer """no""" +741 64 optimizer """adadelta""" +741 64 training_loop """lcwa""" +741 64 evaluator """rankbased""" +741 65 dataset """kinships""" +741 65 model """transd""" +741 65 loss """bceaftersigmoid""" +741 65 regularizer """no""" +741 65 optimizer """adadelta""" +741 65 training_loop """lcwa""" +741 65 evaluator """rankbased""" +741 66 dataset """kinships""" +741 66 model """transd""" +741 66 loss """bceaftersigmoid""" +741 66 regularizer """no""" +741 66 optimizer """adadelta""" +741 66 training_loop """lcwa""" +741 66 evaluator """rankbased""" +741 67 dataset """kinships""" +741 67 model """transd""" +741 67 loss """bceaftersigmoid""" +741 67 regularizer """no""" +741 67 optimizer """adadelta""" +741 67 training_loop """lcwa""" +741 67 evaluator """rankbased""" +741 68 dataset """kinships""" +741 68 model """transd""" +741 68 loss """bceaftersigmoid""" +741 68 regularizer """no""" +741 68 optimizer """adadelta""" +741 68 training_loop """lcwa""" +741 68 evaluator """rankbased""" +741 69 dataset """kinships""" +741 69 model """transd""" +741 69 loss """bceaftersigmoid""" +741 69 regularizer """no""" +741 69 optimizer """adadelta""" +741 69 training_loop """lcwa""" +741 69 evaluator """rankbased""" +741 70 dataset """kinships""" +741 70 model """transd""" +741 70 loss """bceaftersigmoid""" +741 70 regularizer """no""" +741 70 optimizer """adadelta""" +741 70 training_loop """lcwa""" +741 70 evaluator """rankbased""" +741 71 dataset """kinships""" +741 71 model """transd""" +741 71 loss """bceaftersigmoid""" +741 71 regularizer """no""" +741 71 optimizer """adadelta""" +741 71 training_loop """lcwa""" +741 71 evaluator """rankbased""" +741 72 dataset """kinships""" +741 72 model """transd""" +741 72 loss """bceaftersigmoid""" +741 72 regularizer """no""" +741 72 optimizer """adadelta""" +741 72 training_loop """lcwa""" +741 72 evaluator """rankbased""" +741 73 dataset """kinships""" +741 73 model """transd""" +741 73 loss """bceaftersigmoid""" +741 73 regularizer """no""" +741 73 optimizer """adadelta""" +741 73 training_loop """lcwa""" +741 73 evaluator """rankbased""" +741 74 dataset """kinships""" +741 74 model """transd""" +741 74 loss """bceaftersigmoid""" +741 74 regularizer """no""" +741 74 optimizer """adadelta""" +741 74 training_loop """lcwa""" +741 74 evaluator """rankbased""" +741 75 dataset """kinships""" +741 75 model """transd""" +741 75 loss """bceaftersigmoid""" +741 75 regularizer """no""" +741 75 optimizer """adadelta""" +741 75 training_loop """lcwa""" +741 75 evaluator """rankbased""" +741 76 dataset """kinships""" +741 76 model """transd""" +741 76 loss """bceaftersigmoid""" +741 76 regularizer """no""" +741 76 optimizer """adadelta""" +741 76 training_loop """lcwa""" +741 76 evaluator """rankbased""" +741 77 dataset """kinships""" +741 77 model """transd""" +741 77 loss """bceaftersigmoid""" +741 77 regularizer """no""" +741 77 optimizer """adadelta""" +741 77 training_loop """lcwa""" +741 77 evaluator """rankbased""" +741 78 dataset """kinships""" +741 78 model """transd""" +741 78 loss """bceaftersigmoid""" +741 78 regularizer """no""" +741 78 optimizer """adadelta""" +741 78 training_loop """lcwa""" +741 78 evaluator """rankbased""" +741 79 dataset """kinships""" +741 79 model """transd""" +741 79 loss """bceaftersigmoid""" +741 79 regularizer """no""" +741 79 optimizer """adadelta""" +741 79 training_loop """lcwa""" +741 79 evaluator """rankbased""" +741 80 dataset """kinships""" +741 80 model """transd""" +741 80 loss """bceaftersigmoid""" +741 80 regularizer """no""" +741 80 optimizer """adadelta""" +741 80 training_loop """lcwa""" +741 80 evaluator """rankbased""" +741 81 dataset """kinships""" +741 81 model """transd""" +741 81 loss """bceaftersigmoid""" +741 81 regularizer """no""" +741 81 optimizer """adadelta""" +741 81 training_loop """lcwa""" +741 81 evaluator """rankbased""" +741 82 dataset """kinships""" +741 82 model """transd""" +741 82 loss """bceaftersigmoid""" +741 82 regularizer """no""" +741 82 optimizer """adadelta""" +741 82 training_loop """lcwa""" +741 82 evaluator """rankbased""" +741 83 dataset """kinships""" +741 83 model """transd""" +741 83 loss """bceaftersigmoid""" +741 83 regularizer """no""" +741 83 optimizer """adadelta""" +741 83 training_loop """lcwa""" +741 83 evaluator """rankbased""" +741 84 dataset """kinships""" +741 84 model """transd""" +741 84 loss """bceaftersigmoid""" +741 84 regularizer """no""" +741 84 optimizer """adadelta""" +741 84 training_loop """lcwa""" +741 84 evaluator """rankbased""" +741 85 dataset """kinships""" +741 85 model """transd""" +741 85 loss """bceaftersigmoid""" +741 85 regularizer """no""" +741 85 optimizer """adadelta""" +741 85 training_loop """lcwa""" +741 85 evaluator """rankbased""" +741 86 dataset """kinships""" +741 86 model """transd""" +741 86 loss """bceaftersigmoid""" +741 86 regularizer """no""" +741 86 optimizer """adadelta""" +741 86 training_loop """lcwa""" +741 86 evaluator """rankbased""" +741 87 dataset """kinships""" +741 87 model """transd""" +741 87 loss """bceaftersigmoid""" +741 87 regularizer """no""" +741 87 optimizer """adadelta""" +741 87 training_loop """lcwa""" +741 87 evaluator """rankbased""" +741 88 dataset """kinships""" +741 88 model """transd""" +741 88 loss """bceaftersigmoid""" +741 88 regularizer """no""" +741 88 optimizer """adadelta""" +741 88 training_loop """lcwa""" +741 88 evaluator """rankbased""" +741 89 dataset """kinships""" +741 89 model """transd""" +741 89 loss """bceaftersigmoid""" +741 89 regularizer """no""" +741 89 optimizer """adadelta""" +741 89 training_loop """lcwa""" +741 89 evaluator """rankbased""" +741 90 dataset """kinships""" +741 90 model """transd""" +741 90 loss """bceaftersigmoid""" +741 90 regularizer """no""" +741 90 optimizer """adadelta""" +741 90 training_loop """lcwa""" +741 90 evaluator """rankbased""" +741 91 dataset """kinships""" +741 91 model """transd""" +741 91 loss """bceaftersigmoid""" +741 91 regularizer """no""" +741 91 optimizer """adadelta""" +741 91 training_loop """lcwa""" +741 91 evaluator """rankbased""" +741 92 dataset """kinships""" +741 92 model """transd""" +741 92 loss """bceaftersigmoid""" +741 92 regularizer """no""" +741 92 optimizer """adadelta""" +741 92 training_loop """lcwa""" +741 92 evaluator """rankbased""" +741 93 dataset """kinships""" +741 93 model """transd""" +741 93 loss """bceaftersigmoid""" +741 93 regularizer """no""" +741 93 optimizer """adadelta""" +741 93 training_loop """lcwa""" +741 93 evaluator """rankbased""" +741 94 dataset """kinships""" +741 94 model """transd""" +741 94 loss """bceaftersigmoid""" +741 94 regularizer """no""" +741 94 optimizer """adadelta""" +741 94 training_loop """lcwa""" +741 94 evaluator """rankbased""" +741 95 dataset """kinships""" +741 95 model """transd""" +741 95 loss """bceaftersigmoid""" +741 95 regularizer """no""" +741 95 optimizer """adadelta""" +741 95 training_loop """lcwa""" +741 95 evaluator """rankbased""" +741 96 dataset """kinships""" +741 96 model """transd""" +741 96 loss """bceaftersigmoid""" +741 96 regularizer """no""" +741 96 optimizer """adadelta""" +741 96 training_loop """lcwa""" +741 96 evaluator """rankbased""" +741 97 dataset """kinships""" +741 97 model """transd""" +741 97 loss """bceaftersigmoid""" +741 97 regularizer """no""" +741 97 optimizer """adadelta""" +741 97 training_loop """lcwa""" +741 97 evaluator """rankbased""" +741 98 dataset """kinships""" +741 98 model """transd""" +741 98 loss """bceaftersigmoid""" +741 98 regularizer """no""" +741 98 optimizer """adadelta""" +741 98 training_loop """lcwa""" +741 98 evaluator """rankbased""" +741 99 dataset """kinships""" +741 99 model """transd""" +741 99 loss """bceaftersigmoid""" +741 99 regularizer """no""" +741 99 optimizer """adadelta""" +741 99 training_loop """lcwa""" +741 99 evaluator """rankbased""" +741 100 dataset """kinships""" +741 100 model """transd""" +741 100 loss """bceaftersigmoid""" +741 100 regularizer """no""" +741 100 optimizer """adadelta""" +741 100 training_loop """lcwa""" +741 100 evaluator """rankbased""" +742 1 model.embedding_dim 0.0 +742 1 model.relation_dim 2.0 +742 1 training.batch_size 0.0 +742 1 training.label_smoothing 0.0031956981564393388 +742 2 model.embedding_dim 1.0 +742 2 model.relation_dim 1.0 +742 2 training.batch_size 0.0 +742 2 training.label_smoothing 0.09325730251361317 +742 3 model.embedding_dim 2.0 +742 3 model.relation_dim 1.0 +742 3 training.batch_size 1.0 +742 3 training.label_smoothing 0.028209959825177346 +742 4 model.embedding_dim 1.0 +742 4 model.relation_dim 0.0 +742 4 training.batch_size 2.0 +742 4 training.label_smoothing 0.5008011160064098 +742 5 model.embedding_dim 2.0 +742 5 model.relation_dim 1.0 +742 5 training.batch_size 1.0 +742 5 training.label_smoothing 0.002817517300514748 +742 6 model.embedding_dim 2.0 +742 6 model.relation_dim 1.0 +742 6 training.batch_size 2.0 +742 6 training.label_smoothing 0.0045738629835131925 +742 7 model.embedding_dim 1.0 +742 7 model.relation_dim 2.0 +742 7 training.batch_size 2.0 +742 7 training.label_smoothing 0.06815064877386447 +742 8 model.embedding_dim 1.0 +742 8 model.relation_dim 2.0 +742 8 training.batch_size 0.0 +742 8 training.label_smoothing 0.0019288349650312087 +742 9 model.embedding_dim 0.0 +742 9 model.relation_dim 1.0 +742 9 training.batch_size 0.0 +742 9 training.label_smoothing 0.3569358055548948 +742 10 model.embedding_dim 0.0 +742 10 model.relation_dim 0.0 +742 10 training.batch_size 1.0 +742 10 training.label_smoothing 0.032841237399474886 +742 11 model.embedding_dim 2.0 +742 11 model.relation_dim 0.0 +742 11 training.batch_size 2.0 +742 11 training.label_smoothing 0.0036026003207710925 +742 12 model.embedding_dim 2.0 +742 12 model.relation_dim 0.0 +742 12 training.batch_size 0.0 +742 12 training.label_smoothing 0.007504626810320615 +742 13 model.embedding_dim 0.0 +742 13 model.relation_dim 1.0 +742 13 training.batch_size 0.0 +742 13 training.label_smoothing 0.039664927726103284 +742 14 model.embedding_dim 2.0 +742 14 model.relation_dim 2.0 +742 14 training.batch_size 2.0 +742 14 training.label_smoothing 0.03978890270534104 +742 15 model.embedding_dim 1.0 +742 15 model.relation_dim 0.0 +742 15 training.batch_size 2.0 +742 15 training.label_smoothing 0.4910195157021587 +742 16 model.embedding_dim 2.0 +742 16 model.relation_dim 2.0 +742 16 training.batch_size 0.0 +742 16 training.label_smoothing 0.0014060208110784405 +742 17 model.embedding_dim 2.0 +742 17 model.relation_dim 0.0 +742 17 training.batch_size 1.0 +742 17 training.label_smoothing 0.1119572741013726 +742 18 model.embedding_dim 2.0 +742 18 model.relation_dim 0.0 +742 18 training.batch_size 1.0 +742 18 training.label_smoothing 0.0011240399754619205 +742 19 model.embedding_dim 0.0 +742 19 model.relation_dim 0.0 +742 19 training.batch_size 1.0 +742 19 training.label_smoothing 0.9343259782550902 +742 20 model.embedding_dim 0.0 +742 20 model.relation_dim 2.0 +742 20 training.batch_size 0.0 +742 20 training.label_smoothing 0.14029436314732527 +742 21 model.embedding_dim 0.0 +742 21 model.relation_dim 1.0 +742 21 training.batch_size 0.0 +742 21 training.label_smoothing 0.11086367287221723 +742 22 model.embedding_dim 1.0 +742 22 model.relation_dim 0.0 +742 22 training.batch_size 2.0 +742 22 training.label_smoothing 0.3378107352898699 +742 23 model.embedding_dim 1.0 +742 23 model.relation_dim 2.0 +742 23 training.batch_size 2.0 +742 23 training.label_smoothing 0.20327157302439788 +742 24 model.embedding_dim 2.0 +742 24 model.relation_dim 1.0 +742 24 training.batch_size 2.0 +742 24 training.label_smoothing 0.005546090593108021 +742 25 model.embedding_dim 1.0 +742 25 model.relation_dim 1.0 +742 25 training.batch_size 1.0 +742 25 training.label_smoothing 0.8486284858899974 +742 26 model.embedding_dim 0.0 +742 26 model.relation_dim 1.0 +742 26 training.batch_size 1.0 +742 26 training.label_smoothing 0.0015372634944898983 +742 27 model.embedding_dim 0.0 +742 27 model.relation_dim 1.0 +742 27 training.batch_size 1.0 +742 27 training.label_smoothing 0.014777486205198415 +742 28 model.embedding_dim 1.0 +742 28 model.relation_dim 0.0 +742 28 training.batch_size 0.0 +742 28 training.label_smoothing 0.03871900356112479 +742 29 model.embedding_dim 2.0 +742 29 model.relation_dim 1.0 +742 29 training.batch_size 0.0 +742 29 training.label_smoothing 0.005690573252481183 +742 30 model.embedding_dim 2.0 +742 30 model.relation_dim 2.0 +742 30 training.batch_size 0.0 +742 30 training.label_smoothing 0.0014829347356333353 +742 31 model.embedding_dim 0.0 +742 31 model.relation_dim 1.0 +742 31 training.batch_size 1.0 +742 31 training.label_smoothing 0.607917023276792 +742 32 model.embedding_dim 1.0 +742 32 model.relation_dim 0.0 +742 32 training.batch_size 1.0 +742 32 training.label_smoothing 0.009132896596836205 +742 33 model.embedding_dim 0.0 +742 33 model.relation_dim 2.0 +742 33 training.batch_size 1.0 +742 33 training.label_smoothing 0.03851914721079918 +742 34 model.embedding_dim 1.0 +742 34 model.relation_dim 1.0 +742 34 training.batch_size 0.0 +742 34 training.label_smoothing 0.08026216497937053 +742 35 model.embedding_dim 1.0 +742 35 model.relation_dim 2.0 +742 35 training.batch_size 1.0 +742 35 training.label_smoothing 0.004779821559720603 +742 36 model.embedding_dim 0.0 +742 36 model.relation_dim 1.0 +742 36 training.batch_size 2.0 +742 36 training.label_smoothing 0.08596418653734943 +742 37 model.embedding_dim 2.0 +742 37 model.relation_dim 2.0 +742 37 training.batch_size 0.0 +742 37 training.label_smoothing 0.004880862356214714 +742 38 model.embedding_dim 2.0 +742 38 model.relation_dim 0.0 +742 38 training.batch_size 0.0 +742 38 training.label_smoothing 0.005195274434329373 +742 39 model.embedding_dim 1.0 +742 39 model.relation_dim 0.0 +742 39 training.batch_size 1.0 +742 39 training.label_smoothing 0.008864094630949587 +742 40 model.embedding_dim 2.0 +742 40 model.relation_dim 2.0 +742 40 training.batch_size 0.0 +742 40 training.label_smoothing 0.004031591342892227 +742 41 model.embedding_dim 0.0 +742 41 model.relation_dim 2.0 +742 41 training.batch_size 2.0 +742 41 training.label_smoothing 0.043492242535722286 +742 42 model.embedding_dim 1.0 +742 42 model.relation_dim 1.0 +742 42 training.batch_size 0.0 +742 42 training.label_smoothing 0.1321399344314511 +742 43 model.embedding_dim 1.0 +742 43 model.relation_dim 1.0 +742 43 training.batch_size 1.0 +742 43 training.label_smoothing 0.022744634363678636 +742 44 model.embedding_dim 2.0 +742 44 model.relation_dim 1.0 +742 44 training.batch_size 2.0 +742 44 training.label_smoothing 0.13189253431121367 +742 45 model.embedding_dim 1.0 +742 45 model.relation_dim 1.0 +742 45 training.batch_size 0.0 +742 45 training.label_smoothing 0.0015295034663317614 +742 46 model.embedding_dim 2.0 +742 46 model.relation_dim 0.0 +742 46 training.batch_size 1.0 +742 46 training.label_smoothing 0.3797731545644768 +742 47 model.embedding_dim 1.0 +742 47 model.relation_dim 1.0 +742 47 training.batch_size 2.0 +742 47 training.label_smoothing 0.05211105701215339 +742 48 model.embedding_dim 0.0 +742 48 model.relation_dim 1.0 +742 48 training.batch_size 1.0 +742 48 training.label_smoothing 0.31719713550363215 +742 49 model.embedding_dim 1.0 +742 49 model.relation_dim 2.0 +742 49 training.batch_size 0.0 +742 49 training.label_smoothing 0.013889696929344105 +742 50 model.embedding_dim 1.0 +742 50 model.relation_dim 0.0 +742 50 training.batch_size 1.0 +742 50 training.label_smoothing 0.008425730826420185 +742 51 model.embedding_dim 0.0 +742 51 model.relation_dim 2.0 +742 51 training.batch_size 2.0 +742 51 training.label_smoothing 0.3851556177832266 +742 52 model.embedding_dim 2.0 +742 52 model.relation_dim 0.0 +742 52 training.batch_size 1.0 +742 52 training.label_smoothing 0.185110979146005 +742 53 model.embedding_dim 2.0 +742 53 model.relation_dim 0.0 +742 53 training.batch_size 0.0 +742 53 training.label_smoothing 0.09133118605409955 +742 54 model.embedding_dim 1.0 +742 54 model.relation_dim 1.0 +742 54 training.batch_size 0.0 +742 54 training.label_smoothing 0.0014944785042990758 +742 55 model.embedding_dim 1.0 +742 55 model.relation_dim 2.0 +742 55 training.batch_size 2.0 +742 55 training.label_smoothing 0.1847146286250205 +742 56 model.embedding_dim 0.0 +742 56 model.relation_dim 0.0 +742 56 training.batch_size 1.0 +742 56 training.label_smoothing 0.003589293478099322 +742 57 model.embedding_dim 2.0 +742 57 model.relation_dim 0.0 +742 57 training.batch_size 2.0 +742 57 training.label_smoothing 0.9310978557971196 +742 58 model.embedding_dim 0.0 +742 58 model.relation_dim 1.0 +742 58 training.batch_size 2.0 +742 58 training.label_smoothing 0.19239998007178755 +742 59 model.embedding_dim 1.0 +742 59 model.relation_dim 0.0 +742 59 training.batch_size 1.0 +742 59 training.label_smoothing 0.7928021901559704 +742 60 model.embedding_dim 1.0 +742 60 model.relation_dim 0.0 +742 60 training.batch_size 1.0 +742 60 training.label_smoothing 0.005749405286746061 +742 61 model.embedding_dim 1.0 +742 61 model.relation_dim 0.0 +742 61 training.batch_size 2.0 +742 61 training.label_smoothing 0.8697650500900841 +742 62 model.embedding_dim 0.0 +742 62 model.relation_dim 2.0 +742 62 training.batch_size 2.0 +742 62 training.label_smoothing 0.11577963678954228 +742 63 model.embedding_dim 0.0 +742 63 model.relation_dim 1.0 +742 63 training.batch_size 2.0 +742 63 training.label_smoothing 0.0012952476597012648 +742 64 model.embedding_dim 0.0 +742 64 model.relation_dim 1.0 +742 64 training.batch_size 0.0 +742 64 training.label_smoothing 0.010477578501686166 +742 65 model.embedding_dim 2.0 +742 65 model.relation_dim 2.0 +742 65 training.batch_size 2.0 +742 65 training.label_smoothing 0.2565150889603915 +742 66 model.embedding_dim 2.0 +742 66 model.relation_dim 0.0 +742 66 training.batch_size 2.0 +742 66 training.label_smoothing 0.008850027679389309 +742 67 model.embedding_dim 1.0 +742 67 model.relation_dim 1.0 +742 67 training.batch_size 1.0 +742 67 training.label_smoothing 0.004330844195811356 +742 68 model.embedding_dim 2.0 +742 68 model.relation_dim 0.0 +742 68 training.batch_size 0.0 +742 68 training.label_smoothing 0.5995146421581922 +742 69 model.embedding_dim 0.0 +742 69 model.relation_dim 1.0 +742 69 training.batch_size 2.0 +742 69 training.label_smoothing 0.0013290766655709263 +742 70 model.embedding_dim 2.0 +742 70 model.relation_dim 2.0 +742 70 training.batch_size 0.0 +742 70 training.label_smoothing 0.1451651633891028 +742 71 model.embedding_dim 2.0 +742 71 model.relation_dim 0.0 +742 71 training.batch_size 2.0 +742 71 training.label_smoothing 0.0288329048096525 +742 72 model.embedding_dim 0.0 +742 72 model.relation_dim 2.0 +742 72 training.batch_size 1.0 +742 72 training.label_smoothing 0.32359428169342774 +742 73 model.embedding_dim 0.0 +742 73 model.relation_dim 1.0 +742 73 training.batch_size 1.0 +742 73 training.label_smoothing 0.0434407342681247 +742 74 model.embedding_dim 1.0 +742 74 model.relation_dim 0.0 +742 74 training.batch_size 1.0 +742 74 training.label_smoothing 0.3490866621048778 +742 75 model.embedding_dim 2.0 +742 75 model.relation_dim 0.0 +742 75 training.batch_size 2.0 +742 75 training.label_smoothing 0.040278235406422065 +742 76 model.embedding_dim 0.0 +742 76 model.relation_dim 1.0 +742 76 training.batch_size 1.0 +742 76 training.label_smoothing 0.09146408336834624 +742 77 model.embedding_dim 1.0 +742 77 model.relation_dim 0.0 +742 77 training.batch_size 1.0 +742 77 training.label_smoothing 0.04605159396112985 +742 78 model.embedding_dim 2.0 +742 78 model.relation_dim 2.0 +742 78 training.batch_size 1.0 +742 78 training.label_smoothing 0.029390311409403524 +742 79 model.embedding_dim 0.0 +742 79 model.relation_dim 2.0 +742 79 training.batch_size 0.0 +742 79 training.label_smoothing 0.05956898936944729 +742 80 model.embedding_dim 0.0 +742 80 model.relation_dim 1.0 +742 80 training.batch_size 1.0 +742 80 training.label_smoothing 0.00223586531403608 +742 81 model.embedding_dim 0.0 +742 81 model.relation_dim 2.0 +742 81 training.batch_size 1.0 +742 81 training.label_smoothing 0.0837015008886886 +742 82 model.embedding_dim 2.0 +742 82 model.relation_dim 2.0 +742 82 training.batch_size 1.0 +742 82 training.label_smoothing 0.5414084832884337 +742 83 model.embedding_dim 0.0 +742 83 model.relation_dim 0.0 +742 83 training.batch_size 2.0 +742 83 training.label_smoothing 0.016568840906279605 +742 84 model.embedding_dim 2.0 +742 84 model.relation_dim 1.0 +742 84 training.batch_size 0.0 +742 84 training.label_smoothing 0.04277383495604921 +742 85 model.embedding_dim 1.0 +742 85 model.relation_dim 0.0 +742 85 training.batch_size 0.0 +742 85 training.label_smoothing 0.01156941205166702 +742 86 model.embedding_dim 2.0 +742 86 model.relation_dim 0.0 +742 86 training.batch_size 1.0 +742 86 training.label_smoothing 0.009695671211050895 +742 87 model.embedding_dim 2.0 +742 87 model.relation_dim 0.0 +742 87 training.batch_size 2.0 +742 87 training.label_smoothing 0.0011545646722069104 +742 88 model.embedding_dim 2.0 +742 88 model.relation_dim 2.0 +742 88 training.batch_size 2.0 +742 88 training.label_smoothing 0.057620906654740026 +742 89 model.embedding_dim 1.0 +742 89 model.relation_dim 0.0 +742 89 training.batch_size 2.0 +742 89 training.label_smoothing 0.9318934987514906 +742 90 model.embedding_dim 2.0 +742 90 model.relation_dim 2.0 +742 90 training.batch_size 1.0 +742 90 training.label_smoothing 0.0024780884977524315 +742 91 model.embedding_dim 0.0 +742 91 model.relation_dim 1.0 +742 91 training.batch_size 1.0 +742 91 training.label_smoothing 0.561003181184559 +742 92 model.embedding_dim 2.0 +742 92 model.relation_dim 0.0 +742 92 training.batch_size 1.0 +742 92 training.label_smoothing 0.10926949587600501 +742 93 model.embedding_dim 2.0 +742 93 model.relation_dim 0.0 +742 93 training.batch_size 0.0 +742 93 training.label_smoothing 0.018387531231255846 +742 94 model.embedding_dim 2.0 +742 94 model.relation_dim 0.0 +742 94 training.batch_size 1.0 +742 94 training.label_smoothing 0.037850966729426734 +742 95 model.embedding_dim 0.0 +742 95 model.relation_dim 2.0 +742 95 training.batch_size 0.0 +742 95 training.label_smoothing 0.5759075892032026 +742 96 model.embedding_dim 0.0 +742 96 model.relation_dim 2.0 +742 96 training.batch_size 2.0 +742 96 training.label_smoothing 0.12913720725076785 +742 97 model.embedding_dim 2.0 +742 97 model.relation_dim 1.0 +742 97 training.batch_size 0.0 +742 97 training.label_smoothing 0.0020372564804168147 +742 98 model.embedding_dim 1.0 +742 98 model.relation_dim 2.0 +742 98 training.batch_size 1.0 +742 98 training.label_smoothing 0.2541608378594429 +742 99 model.embedding_dim 2.0 +742 99 model.relation_dim 2.0 +742 99 training.batch_size 1.0 +742 99 training.label_smoothing 0.6958708786495682 +742 100 model.embedding_dim 0.0 +742 100 model.relation_dim 1.0 +742 100 training.batch_size 2.0 +742 100 training.label_smoothing 0.2042070254100327 +742 1 dataset """kinships""" +742 1 model """transd""" +742 1 loss """softplus""" +742 1 regularizer """no""" +742 1 optimizer """adadelta""" +742 1 training_loop """lcwa""" +742 1 evaluator """rankbased""" +742 2 dataset """kinships""" +742 2 model """transd""" +742 2 loss """softplus""" +742 2 regularizer """no""" +742 2 optimizer """adadelta""" +742 2 training_loop """lcwa""" +742 2 evaluator """rankbased""" +742 3 dataset """kinships""" +742 3 model """transd""" +742 3 loss """softplus""" +742 3 regularizer """no""" +742 3 optimizer """adadelta""" +742 3 training_loop """lcwa""" +742 3 evaluator """rankbased""" +742 4 dataset """kinships""" +742 4 model """transd""" +742 4 loss """softplus""" +742 4 regularizer """no""" +742 4 optimizer """adadelta""" +742 4 training_loop """lcwa""" +742 4 evaluator """rankbased""" +742 5 dataset """kinships""" +742 5 model """transd""" +742 5 loss """softplus""" +742 5 regularizer """no""" +742 5 optimizer """adadelta""" +742 5 training_loop """lcwa""" +742 5 evaluator """rankbased""" +742 6 dataset """kinships""" +742 6 model """transd""" +742 6 loss """softplus""" +742 6 regularizer """no""" +742 6 optimizer """adadelta""" +742 6 training_loop """lcwa""" +742 6 evaluator """rankbased""" +742 7 dataset """kinships""" +742 7 model """transd""" +742 7 loss """softplus""" +742 7 regularizer """no""" +742 7 optimizer """adadelta""" +742 7 training_loop """lcwa""" +742 7 evaluator """rankbased""" +742 8 dataset """kinships""" +742 8 model """transd""" +742 8 loss """softplus""" +742 8 regularizer """no""" +742 8 optimizer """adadelta""" +742 8 training_loop """lcwa""" +742 8 evaluator """rankbased""" +742 9 dataset """kinships""" +742 9 model """transd""" +742 9 loss """softplus""" +742 9 regularizer """no""" +742 9 optimizer """adadelta""" +742 9 training_loop """lcwa""" +742 9 evaluator """rankbased""" +742 10 dataset """kinships""" +742 10 model """transd""" +742 10 loss """softplus""" +742 10 regularizer """no""" +742 10 optimizer """adadelta""" +742 10 training_loop """lcwa""" +742 10 evaluator """rankbased""" +742 11 dataset """kinships""" +742 11 model """transd""" +742 11 loss """softplus""" +742 11 regularizer """no""" +742 11 optimizer """adadelta""" +742 11 training_loop """lcwa""" +742 11 evaluator """rankbased""" +742 12 dataset """kinships""" +742 12 model """transd""" +742 12 loss """softplus""" +742 12 regularizer """no""" +742 12 optimizer """adadelta""" +742 12 training_loop """lcwa""" +742 12 evaluator """rankbased""" +742 13 dataset """kinships""" +742 13 model """transd""" +742 13 loss """softplus""" +742 13 regularizer """no""" +742 13 optimizer """adadelta""" +742 13 training_loop """lcwa""" +742 13 evaluator """rankbased""" +742 14 dataset """kinships""" +742 14 model """transd""" +742 14 loss """softplus""" +742 14 regularizer """no""" +742 14 optimizer """adadelta""" +742 14 training_loop """lcwa""" +742 14 evaluator """rankbased""" +742 15 dataset """kinships""" +742 15 model """transd""" +742 15 loss """softplus""" +742 15 regularizer """no""" +742 15 optimizer """adadelta""" +742 15 training_loop """lcwa""" +742 15 evaluator """rankbased""" +742 16 dataset """kinships""" +742 16 model """transd""" +742 16 loss """softplus""" +742 16 regularizer """no""" +742 16 optimizer """adadelta""" +742 16 training_loop """lcwa""" +742 16 evaluator """rankbased""" +742 17 dataset """kinships""" +742 17 model """transd""" +742 17 loss """softplus""" +742 17 regularizer """no""" +742 17 optimizer """adadelta""" +742 17 training_loop """lcwa""" +742 17 evaluator """rankbased""" +742 18 dataset """kinships""" +742 18 model """transd""" +742 18 loss """softplus""" +742 18 regularizer """no""" +742 18 optimizer """adadelta""" +742 18 training_loop """lcwa""" +742 18 evaluator """rankbased""" +742 19 dataset """kinships""" +742 19 model """transd""" +742 19 loss """softplus""" +742 19 regularizer """no""" +742 19 optimizer """adadelta""" +742 19 training_loop """lcwa""" +742 19 evaluator """rankbased""" +742 20 dataset """kinships""" +742 20 model """transd""" +742 20 loss """softplus""" +742 20 regularizer """no""" +742 20 optimizer """adadelta""" +742 20 training_loop """lcwa""" +742 20 evaluator """rankbased""" +742 21 dataset """kinships""" +742 21 model """transd""" +742 21 loss """softplus""" +742 21 regularizer """no""" +742 21 optimizer """adadelta""" +742 21 training_loop """lcwa""" +742 21 evaluator """rankbased""" +742 22 dataset """kinships""" +742 22 model """transd""" +742 22 loss """softplus""" +742 22 regularizer """no""" +742 22 optimizer """adadelta""" +742 22 training_loop """lcwa""" +742 22 evaluator """rankbased""" +742 23 dataset """kinships""" +742 23 model """transd""" +742 23 loss """softplus""" +742 23 regularizer """no""" +742 23 optimizer """adadelta""" +742 23 training_loop """lcwa""" +742 23 evaluator """rankbased""" +742 24 dataset """kinships""" +742 24 model """transd""" +742 24 loss """softplus""" +742 24 regularizer """no""" +742 24 optimizer """adadelta""" +742 24 training_loop """lcwa""" +742 24 evaluator """rankbased""" +742 25 dataset """kinships""" +742 25 model """transd""" +742 25 loss """softplus""" +742 25 regularizer """no""" +742 25 optimizer """adadelta""" +742 25 training_loop """lcwa""" +742 25 evaluator """rankbased""" +742 26 dataset """kinships""" +742 26 model """transd""" +742 26 loss """softplus""" +742 26 regularizer """no""" +742 26 optimizer """adadelta""" +742 26 training_loop """lcwa""" +742 26 evaluator """rankbased""" +742 27 dataset """kinships""" +742 27 model """transd""" +742 27 loss """softplus""" +742 27 regularizer """no""" +742 27 optimizer """adadelta""" +742 27 training_loop """lcwa""" +742 27 evaluator """rankbased""" +742 28 dataset """kinships""" +742 28 model """transd""" +742 28 loss """softplus""" +742 28 regularizer """no""" +742 28 optimizer """adadelta""" +742 28 training_loop """lcwa""" +742 28 evaluator """rankbased""" +742 29 dataset """kinships""" +742 29 model """transd""" +742 29 loss """softplus""" +742 29 regularizer """no""" +742 29 optimizer """adadelta""" +742 29 training_loop """lcwa""" +742 29 evaluator """rankbased""" +742 30 dataset """kinships""" +742 30 model """transd""" +742 30 loss """softplus""" +742 30 regularizer """no""" +742 30 optimizer """adadelta""" +742 30 training_loop """lcwa""" +742 30 evaluator """rankbased""" +742 31 dataset """kinships""" +742 31 model """transd""" +742 31 loss """softplus""" +742 31 regularizer """no""" +742 31 optimizer """adadelta""" +742 31 training_loop """lcwa""" +742 31 evaluator """rankbased""" +742 32 dataset """kinships""" +742 32 model """transd""" +742 32 loss """softplus""" +742 32 regularizer """no""" +742 32 optimizer """adadelta""" +742 32 training_loop """lcwa""" +742 32 evaluator """rankbased""" +742 33 dataset """kinships""" +742 33 model """transd""" +742 33 loss """softplus""" +742 33 regularizer """no""" +742 33 optimizer """adadelta""" +742 33 training_loop """lcwa""" +742 33 evaluator """rankbased""" +742 34 dataset """kinships""" +742 34 model """transd""" +742 34 loss """softplus""" +742 34 regularizer """no""" +742 34 optimizer """adadelta""" +742 34 training_loop """lcwa""" +742 34 evaluator """rankbased""" +742 35 dataset """kinships""" +742 35 model """transd""" +742 35 loss """softplus""" +742 35 regularizer """no""" +742 35 optimizer """adadelta""" +742 35 training_loop """lcwa""" +742 35 evaluator """rankbased""" +742 36 dataset """kinships""" +742 36 model """transd""" +742 36 loss """softplus""" +742 36 regularizer """no""" +742 36 optimizer """adadelta""" +742 36 training_loop """lcwa""" +742 36 evaluator """rankbased""" +742 37 dataset """kinships""" +742 37 model """transd""" +742 37 loss """softplus""" +742 37 regularizer """no""" +742 37 optimizer """adadelta""" +742 37 training_loop """lcwa""" +742 37 evaluator """rankbased""" +742 38 dataset """kinships""" +742 38 model """transd""" +742 38 loss """softplus""" +742 38 regularizer """no""" +742 38 optimizer """adadelta""" +742 38 training_loop """lcwa""" +742 38 evaluator """rankbased""" +742 39 dataset """kinships""" +742 39 model """transd""" +742 39 loss """softplus""" +742 39 regularizer """no""" +742 39 optimizer """adadelta""" +742 39 training_loop """lcwa""" +742 39 evaluator """rankbased""" +742 40 dataset """kinships""" +742 40 model """transd""" +742 40 loss """softplus""" +742 40 regularizer """no""" +742 40 optimizer """adadelta""" +742 40 training_loop """lcwa""" +742 40 evaluator """rankbased""" +742 41 dataset """kinships""" +742 41 model """transd""" +742 41 loss """softplus""" +742 41 regularizer """no""" +742 41 optimizer """adadelta""" +742 41 training_loop """lcwa""" +742 41 evaluator """rankbased""" +742 42 dataset """kinships""" +742 42 model """transd""" +742 42 loss """softplus""" +742 42 regularizer """no""" +742 42 optimizer """adadelta""" +742 42 training_loop """lcwa""" +742 42 evaluator """rankbased""" +742 43 dataset """kinships""" +742 43 model """transd""" +742 43 loss """softplus""" +742 43 regularizer """no""" +742 43 optimizer """adadelta""" +742 43 training_loop """lcwa""" +742 43 evaluator """rankbased""" +742 44 dataset """kinships""" +742 44 model """transd""" +742 44 loss """softplus""" +742 44 regularizer """no""" +742 44 optimizer """adadelta""" +742 44 training_loop """lcwa""" +742 44 evaluator """rankbased""" +742 45 dataset """kinships""" +742 45 model """transd""" +742 45 loss """softplus""" +742 45 regularizer """no""" +742 45 optimizer """adadelta""" +742 45 training_loop """lcwa""" +742 45 evaluator """rankbased""" +742 46 dataset """kinships""" +742 46 model """transd""" +742 46 loss """softplus""" +742 46 regularizer """no""" +742 46 optimizer """adadelta""" +742 46 training_loop """lcwa""" +742 46 evaluator """rankbased""" +742 47 dataset """kinships""" +742 47 model """transd""" +742 47 loss """softplus""" +742 47 regularizer """no""" +742 47 optimizer """adadelta""" +742 47 training_loop """lcwa""" +742 47 evaluator """rankbased""" +742 48 dataset """kinships""" +742 48 model """transd""" +742 48 loss """softplus""" +742 48 regularizer """no""" +742 48 optimizer """adadelta""" +742 48 training_loop """lcwa""" +742 48 evaluator """rankbased""" +742 49 dataset """kinships""" +742 49 model """transd""" +742 49 loss """softplus""" +742 49 regularizer """no""" +742 49 optimizer """adadelta""" +742 49 training_loop """lcwa""" +742 49 evaluator """rankbased""" +742 50 dataset """kinships""" +742 50 model """transd""" +742 50 loss """softplus""" +742 50 regularizer """no""" +742 50 optimizer """adadelta""" +742 50 training_loop """lcwa""" +742 50 evaluator """rankbased""" +742 51 dataset """kinships""" +742 51 model """transd""" +742 51 loss """softplus""" +742 51 regularizer """no""" +742 51 optimizer """adadelta""" +742 51 training_loop """lcwa""" +742 51 evaluator """rankbased""" +742 52 dataset """kinships""" +742 52 model """transd""" +742 52 loss """softplus""" +742 52 regularizer """no""" +742 52 optimizer """adadelta""" +742 52 training_loop """lcwa""" +742 52 evaluator """rankbased""" +742 53 dataset """kinships""" +742 53 model """transd""" +742 53 loss """softplus""" +742 53 regularizer """no""" +742 53 optimizer """adadelta""" +742 53 training_loop """lcwa""" +742 53 evaluator """rankbased""" +742 54 dataset """kinships""" +742 54 model """transd""" +742 54 loss """softplus""" +742 54 regularizer """no""" +742 54 optimizer """adadelta""" +742 54 training_loop """lcwa""" +742 54 evaluator """rankbased""" +742 55 dataset """kinships""" +742 55 model """transd""" +742 55 loss """softplus""" +742 55 regularizer """no""" +742 55 optimizer """adadelta""" +742 55 training_loop """lcwa""" +742 55 evaluator """rankbased""" +742 56 dataset """kinships""" +742 56 model """transd""" +742 56 loss """softplus""" +742 56 regularizer """no""" +742 56 optimizer """adadelta""" +742 56 training_loop """lcwa""" +742 56 evaluator """rankbased""" +742 57 dataset """kinships""" +742 57 model """transd""" +742 57 loss """softplus""" +742 57 regularizer """no""" +742 57 optimizer """adadelta""" +742 57 training_loop """lcwa""" +742 57 evaluator """rankbased""" +742 58 dataset """kinships""" +742 58 model """transd""" +742 58 loss """softplus""" +742 58 regularizer """no""" +742 58 optimizer """adadelta""" +742 58 training_loop """lcwa""" +742 58 evaluator """rankbased""" +742 59 dataset """kinships""" +742 59 model """transd""" +742 59 loss """softplus""" +742 59 regularizer """no""" +742 59 optimizer """adadelta""" +742 59 training_loop """lcwa""" +742 59 evaluator """rankbased""" +742 60 dataset """kinships""" +742 60 model """transd""" +742 60 loss """softplus""" +742 60 regularizer """no""" +742 60 optimizer """adadelta""" +742 60 training_loop """lcwa""" +742 60 evaluator """rankbased""" +742 61 dataset """kinships""" +742 61 model """transd""" +742 61 loss """softplus""" +742 61 regularizer """no""" +742 61 optimizer """adadelta""" +742 61 training_loop """lcwa""" +742 61 evaluator """rankbased""" +742 62 dataset """kinships""" +742 62 model """transd""" +742 62 loss """softplus""" +742 62 regularizer """no""" +742 62 optimizer """adadelta""" +742 62 training_loop """lcwa""" +742 62 evaluator """rankbased""" +742 63 dataset """kinships""" +742 63 model """transd""" +742 63 loss """softplus""" +742 63 regularizer """no""" +742 63 optimizer """adadelta""" +742 63 training_loop """lcwa""" +742 63 evaluator """rankbased""" +742 64 dataset """kinships""" +742 64 model """transd""" +742 64 loss """softplus""" +742 64 regularizer """no""" +742 64 optimizer """adadelta""" +742 64 training_loop """lcwa""" +742 64 evaluator """rankbased""" +742 65 dataset """kinships""" +742 65 model """transd""" +742 65 loss """softplus""" +742 65 regularizer """no""" +742 65 optimizer """adadelta""" +742 65 training_loop """lcwa""" +742 65 evaluator """rankbased""" +742 66 dataset """kinships""" +742 66 model """transd""" +742 66 loss """softplus""" +742 66 regularizer """no""" +742 66 optimizer """adadelta""" +742 66 training_loop """lcwa""" +742 66 evaluator """rankbased""" +742 67 dataset """kinships""" +742 67 model """transd""" +742 67 loss """softplus""" +742 67 regularizer """no""" +742 67 optimizer """adadelta""" +742 67 training_loop """lcwa""" +742 67 evaluator """rankbased""" +742 68 dataset """kinships""" +742 68 model """transd""" +742 68 loss """softplus""" +742 68 regularizer """no""" +742 68 optimizer """adadelta""" +742 68 training_loop """lcwa""" +742 68 evaluator """rankbased""" +742 69 dataset """kinships""" +742 69 model """transd""" +742 69 loss """softplus""" +742 69 regularizer """no""" +742 69 optimizer """adadelta""" +742 69 training_loop """lcwa""" +742 69 evaluator """rankbased""" +742 70 dataset """kinships""" +742 70 model """transd""" +742 70 loss """softplus""" +742 70 regularizer """no""" +742 70 optimizer """adadelta""" +742 70 training_loop """lcwa""" +742 70 evaluator """rankbased""" +742 71 dataset """kinships""" +742 71 model """transd""" +742 71 loss """softplus""" +742 71 regularizer """no""" +742 71 optimizer """adadelta""" +742 71 training_loop """lcwa""" +742 71 evaluator """rankbased""" +742 72 dataset """kinships""" +742 72 model """transd""" +742 72 loss """softplus""" +742 72 regularizer """no""" +742 72 optimizer """adadelta""" +742 72 training_loop """lcwa""" +742 72 evaluator """rankbased""" +742 73 dataset """kinships""" +742 73 model """transd""" +742 73 loss """softplus""" +742 73 regularizer """no""" +742 73 optimizer """adadelta""" +742 73 training_loop """lcwa""" +742 73 evaluator """rankbased""" +742 74 dataset """kinships""" +742 74 model """transd""" +742 74 loss """softplus""" +742 74 regularizer """no""" +742 74 optimizer """adadelta""" +742 74 training_loop """lcwa""" +742 74 evaluator """rankbased""" +742 75 dataset """kinships""" +742 75 model """transd""" +742 75 loss """softplus""" +742 75 regularizer """no""" +742 75 optimizer """adadelta""" +742 75 training_loop """lcwa""" +742 75 evaluator """rankbased""" +742 76 dataset """kinships""" +742 76 model """transd""" +742 76 loss """softplus""" +742 76 regularizer """no""" +742 76 optimizer """adadelta""" +742 76 training_loop """lcwa""" +742 76 evaluator """rankbased""" +742 77 dataset """kinships""" +742 77 model """transd""" +742 77 loss """softplus""" +742 77 regularizer """no""" +742 77 optimizer """adadelta""" +742 77 training_loop """lcwa""" +742 77 evaluator """rankbased""" +742 78 dataset """kinships""" +742 78 model """transd""" +742 78 loss """softplus""" +742 78 regularizer """no""" +742 78 optimizer """adadelta""" +742 78 training_loop """lcwa""" +742 78 evaluator """rankbased""" +742 79 dataset """kinships""" +742 79 model """transd""" +742 79 loss """softplus""" +742 79 regularizer """no""" +742 79 optimizer """adadelta""" +742 79 training_loop """lcwa""" +742 79 evaluator """rankbased""" +742 80 dataset """kinships""" +742 80 model """transd""" +742 80 loss """softplus""" +742 80 regularizer """no""" +742 80 optimizer """adadelta""" +742 80 training_loop """lcwa""" +742 80 evaluator """rankbased""" +742 81 dataset """kinships""" +742 81 model """transd""" +742 81 loss """softplus""" +742 81 regularizer """no""" +742 81 optimizer """adadelta""" +742 81 training_loop """lcwa""" +742 81 evaluator """rankbased""" +742 82 dataset """kinships""" +742 82 model """transd""" +742 82 loss """softplus""" +742 82 regularizer """no""" +742 82 optimizer """adadelta""" +742 82 training_loop """lcwa""" +742 82 evaluator """rankbased""" +742 83 dataset """kinships""" +742 83 model """transd""" +742 83 loss """softplus""" +742 83 regularizer """no""" +742 83 optimizer """adadelta""" +742 83 training_loop """lcwa""" +742 83 evaluator """rankbased""" +742 84 dataset """kinships""" +742 84 model """transd""" +742 84 loss """softplus""" +742 84 regularizer """no""" +742 84 optimizer """adadelta""" +742 84 training_loop """lcwa""" +742 84 evaluator """rankbased""" +742 85 dataset """kinships""" +742 85 model """transd""" +742 85 loss """softplus""" +742 85 regularizer """no""" +742 85 optimizer """adadelta""" +742 85 training_loop """lcwa""" +742 85 evaluator """rankbased""" +742 86 dataset """kinships""" +742 86 model """transd""" +742 86 loss """softplus""" +742 86 regularizer """no""" +742 86 optimizer """adadelta""" +742 86 training_loop """lcwa""" +742 86 evaluator """rankbased""" +742 87 dataset """kinships""" +742 87 model """transd""" +742 87 loss """softplus""" +742 87 regularizer """no""" +742 87 optimizer """adadelta""" +742 87 training_loop """lcwa""" +742 87 evaluator """rankbased""" +742 88 dataset """kinships""" +742 88 model """transd""" +742 88 loss """softplus""" +742 88 regularizer """no""" +742 88 optimizer """adadelta""" +742 88 training_loop """lcwa""" +742 88 evaluator """rankbased""" +742 89 dataset """kinships""" +742 89 model """transd""" +742 89 loss """softplus""" +742 89 regularizer """no""" +742 89 optimizer """adadelta""" +742 89 training_loop """lcwa""" +742 89 evaluator """rankbased""" +742 90 dataset """kinships""" +742 90 model """transd""" +742 90 loss """softplus""" +742 90 regularizer """no""" +742 90 optimizer """adadelta""" +742 90 training_loop """lcwa""" +742 90 evaluator """rankbased""" +742 91 dataset """kinships""" +742 91 model """transd""" +742 91 loss """softplus""" +742 91 regularizer """no""" +742 91 optimizer """adadelta""" +742 91 training_loop """lcwa""" +742 91 evaluator """rankbased""" +742 92 dataset """kinships""" +742 92 model """transd""" +742 92 loss """softplus""" +742 92 regularizer """no""" +742 92 optimizer """adadelta""" +742 92 training_loop """lcwa""" +742 92 evaluator """rankbased""" +742 93 dataset """kinships""" +742 93 model """transd""" +742 93 loss """softplus""" +742 93 regularizer """no""" +742 93 optimizer """adadelta""" +742 93 training_loop """lcwa""" +742 93 evaluator """rankbased""" +742 94 dataset """kinships""" +742 94 model """transd""" +742 94 loss """softplus""" +742 94 regularizer """no""" +742 94 optimizer """adadelta""" +742 94 training_loop """lcwa""" +742 94 evaluator """rankbased""" +742 95 dataset """kinships""" +742 95 model """transd""" +742 95 loss """softplus""" +742 95 regularizer """no""" +742 95 optimizer """adadelta""" +742 95 training_loop """lcwa""" +742 95 evaluator """rankbased""" +742 96 dataset """kinships""" +742 96 model """transd""" +742 96 loss """softplus""" +742 96 regularizer """no""" +742 96 optimizer """adadelta""" +742 96 training_loop """lcwa""" +742 96 evaluator """rankbased""" +742 97 dataset """kinships""" +742 97 model """transd""" +742 97 loss """softplus""" +742 97 regularizer """no""" +742 97 optimizer """adadelta""" +742 97 training_loop """lcwa""" +742 97 evaluator """rankbased""" +742 98 dataset """kinships""" +742 98 model """transd""" +742 98 loss """softplus""" +742 98 regularizer """no""" +742 98 optimizer """adadelta""" +742 98 training_loop """lcwa""" +742 98 evaluator """rankbased""" +742 99 dataset """kinships""" +742 99 model """transd""" +742 99 loss """softplus""" +742 99 regularizer """no""" +742 99 optimizer """adadelta""" +742 99 training_loop """lcwa""" +742 99 evaluator """rankbased""" +742 100 dataset """kinships""" +742 100 model """transd""" +742 100 loss """softplus""" +742 100 regularizer """no""" +742 100 optimizer """adadelta""" +742 100 training_loop """lcwa""" +742 100 evaluator """rankbased""" +743 1 model.embedding_dim 1.0 +743 1 model.relation_dim 2.0 +743 1 training.batch_size 1.0 +743 1 training.label_smoothing 0.7076583218999991 +743 2 model.embedding_dim 1.0 +743 2 model.relation_dim 2.0 +743 2 training.batch_size 1.0 +743 2 training.label_smoothing 0.011971117494970521 +743 3 model.embedding_dim 1.0 +743 3 model.relation_dim 0.0 +743 3 training.batch_size 2.0 +743 3 training.label_smoothing 0.00813400523098092 +743 4 model.embedding_dim 0.0 +743 4 model.relation_dim 1.0 +743 4 training.batch_size 1.0 +743 4 training.label_smoothing 0.056938920655897264 +743 5 model.embedding_dim 2.0 +743 5 model.relation_dim 1.0 +743 5 training.batch_size 2.0 +743 5 training.label_smoothing 0.5482139548836915 +743 6 model.embedding_dim 0.0 +743 6 model.relation_dim 2.0 +743 6 training.batch_size 0.0 +743 6 training.label_smoothing 0.9412346079878455 +743 7 model.embedding_dim 1.0 +743 7 model.relation_dim 0.0 +743 7 training.batch_size 2.0 +743 7 training.label_smoothing 0.007391720189476129 +743 8 model.embedding_dim 1.0 +743 8 model.relation_dim 1.0 +743 8 training.batch_size 1.0 +743 8 training.label_smoothing 0.001640371162085156 +743 9 model.embedding_dim 1.0 +743 9 model.relation_dim 2.0 +743 9 training.batch_size 2.0 +743 9 training.label_smoothing 0.001727190897729847 +743 10 model.embedding_dim 2.0 +743 10 model.relation_dim 2.0 +743 10 training.batch_size 2.0 +743 10 training.label_smoothing 0.007069678438182918 +743 11 model.embedding_dim 1.0 +743 11 model.relation_dim 2.0 +743 11 training.batch_size 1.0 +743 11 training.label_smoothing 0.03036729489893123 +743 12 model.embedding_dim 0.0 +743 12 model.relation_dim 2.0 +743 12 training.batch_size 2.0 +743 12 training.label_smoothing 0.037237421326105614 +743 13 model.embedding_dim 1.0 +743 13 model.relation_dim 1.0 +743 13 training.batch_size 1.0 +743 13 training.label_smoothing 0.1256458299378806 +743 14 model.embedding_dim 0.0 +743 14 model.relation_dim 0.0 +743 14 training.batch_size 0.0 +743 14 training.label_smoothing 0.0015539135733997386 +743 15 model.embedding_dim 2.0 +743 15 model.relation_dim 1.0 +743 15 training.batch_size 0.0 +743 15 training.label_smoothing 0.003140126950711394 +743 16 model.embedding_dim 1.0 +743 16 model.relation_dim 2.0 +743 16 training.batch_size 1.0 +743 16 training.label_smoothing 0.1449362151493442 +743 17 model.embedding_dim 2.0 +743 17 model.relation_dim 0.0 +743 17 training.batch_size 2.0 +743 17 training.label_smoothing 0.0010622012238418771 +743 18 model.embedding_dim 1.0 +743 18 model.relation_dim 2.0 +743 18 training.batch_size 2.0 +743 18 training.label_smoothing 0.04726433492312893 +743 19 model.embedding_dim 1.0 +743 19 model.relation_dim 2.0 +743 19 training.batch_size 2.0 +743 19 training.label_smoothing 0.0016388879052592637 +743 20 model.embedding_dim 2.0 +743 20 model.relation_dim 2.0 +743 20 training.batch_size 1.0 +743 20 training.label_smoothing 0.3681292805399071 +743 21 model.embedding_dim 2.0 +743 21 model.relation_dim 0.0 +743 21 training.batch_size 2.0 +743 21 training.label_smoothing 0.03248123873985295 +743 22 model.embedding_dim 1.0 +743 22 model.relation_dim 2.0 +743 22 training.batch_size 0.0 +743 22 training.label_smoothing 0.4159296927783917 +743 23 model.embedding_dim 1.0 +743 23 model.relation_dim 0.0 +743 23 training.batch_size 0.0 +743 23 training.label_smoothing 0.005267631332434725 +743 24 model.embedding_dim 1.0 +743 24 model.relation_dim 1.0 +743 24 training.batch_size 2.0 +743 24 training.label_smoothing 0.01391280459166106 +743 25 model.embedding_dim 1.0 +743 25 model.relation_dim 2.0 +743 25 training.batch_size 1.0 +743 25 training.label_smoothing 0.02151425049383292 +743 26 model.embedding_dim 2.0 +743 26 model.relation_dim 0.0 +743 26 training.batch_size 0.0 +743 26 training.label_smoothing 0.00466566983485418 +743 27 model.embedding_dim 2.0 +743 27 model.relation_dim 2.0 +743 27 training.batch_size 0.0 +743 27 training.label_smoothing 0.022906236963244068 +743 28 model.embedding_dim 0.0 +743 28 model.relation_dim 2.0 +743 28 training.batch_size 2.0 +743 28 training.label_smoothing 0.001132661361101127 +743 29 model.embedding_dim 0.0 +743 29 model.relation_dim 2.0 +743 29 training.batch_size 1.0 +743 29 training.label_smoothing 0.6121456887761753 +743 30 model.embedding_dim 0.0 +743 30 model.relation_dim 0.0 +743 30 training.batch_size 0.0 +743 30 training.label_smoothing 0.7343523481990682 +743 31 model.embedding_dim 0.0 +743 31 model.relation_dim 1.0 +743 31 training.batch_size 2.0 +743 31 training.label_smoothing 0.03851014807054278 +743 32 model.embedding_dim 1.0 +743 32 model.relation_dim 0.0 +743 32 training.batch_size 0.0 +743 32 training.label_smoothing 0.007823785291759217 +743 33 model.embedding_dim 1.0 +743 33 model.relation_dim 0.0 +743 33 training.batch_size 0.0 +743 33 training.label_smoothing 0.200486673116168 +743 34 model.embedding_dim 2.0 +743 34 model.relation_dim 0.0 +743 34 training.batch_size 1.0 +743 34 training.label_smoothing 0.12053182448525172 +743 35 model.embedding_dim 0.0 +743 35 model.relation_dim 1.0 +743 35 training.batch_size 1.0 +743 35 training.label_smoothing 0.19052589871753411 +743 36 model.embedding_dim 2.0 +743 36 model.relation_dim 2.0 +743 36 training.batch_size 2.0 +743 36 training.label_smoothing 0.03859018536562353 +743 37 model.embedding_dim 1.0 +743 37 model.relation_dim 2.0 +743 37 training.batch_size 0.0 +743 37 training.label_smoothing 0.001832695312806848 +743 38 model.embedding_dim 2.0 +743 38 model.relation_dim 2.0 +743 38 training.batch_size 2.0 +743 38 training.label_smoothing 0.011506582682683644 +743 39 model.embedding_dim 0.0 +743 39 model.relation_dim 0.0 +743 39 training.batch_size 2.0 +743 39 training.label_smoothing 0.02008485376468154 +743 40 model.embedding_dim 2.0 +743 40 model.relation_dim 1.0 +743 40 training.batch_size 0.0 +743 40 training.label_smoothing 0.004620845200244581 +743 41 model.embedding_dim 2.0 +743 41 model.relation_dim 0.0 +743 41 training.batch_size 2.0 +743 41 training.label_smoothing 0.13568974355860702 +743 42 model.embedding_dim 2.0 +743 42 model.relation_dim 2.0 +743 42 training.batch_size 2.0 +743 42 training.label_smoothing 0.08539867153793962 +743 43 model.embedding_dim 2.0 +743 43 model.relation_dim 1.0 +743 43 training.batch_size 0.0 +743 43 training.label_smoothing 0.10651252135883037 +743 44 model.embedding_dim 1.0 +743 44 model.relation_dim 0.0 +743 44 training.batch_size 2.0 +743 44 training.label_smoothing 0.4176319999342181 +743 45 model.embedding_dim 2.0 +743 45 model.relation_dim 0.0 +743 45 training.batch_size 0.0 +743 45 training.label_smoothing 0.001449817600578767 +743 46 model.embedding_dim 0.0 +743 46 model.relation_dim 1.0 +743 46 training.batch_size 2.0 +743 46 training.label_smoothing 0.10013743300952556 +743 47 model.embedding_dim 2.0 +743 47 model.relation_dim 1.0 +743 47 training.batch_size 1.0 +743 47 training.label_smoothing 0.004760631756122991 +743 48 model.embedding_dim 1.0 +743 48 model.relation_dim 2.0 +743 48 training.batch_size 1.0 +743 48 training.label_smoothing 0.002103507600490532 +743 49 model.embedding_dim 1.0 +743 49 model.relation_dim 1.0 +743 49 training.batch_size 0.0 +743 49 training.label_smoothing 0.003270929220598497 +743 50 model.embedding_dim 1.0 +743 50 model.relation_dim 0.0 +743 50 training.batch_size 0.0 +743 50 training.label_smoothing 0.23686706924905077 +743 51 model.embedding_dim 1.0 +743 51 model.relation_dim 0.0 +743 51 training.batch_size 1.0 +743 51 training.label_smoothing 0.007395492027553136 +743 52 model.embedding_dim 1.0 +743 52 model.relation_dim 2.0 +743 52 training.batch_size 2.0 +743 52 training.label_smoothing 0.03196451998547364 +743 53 model.embedding_dim 1.0 +743 53 model.relation_dim 1.0 +743 53 training.batch_size 2.0 +743 53 training.label_smoothing 0.03453655099342201 +743 54 model.embedding_dim 1.0 +743 54 model.relation_dim 1.0 +743 54 training.batch_size 2.0 +743 54 training.label_smoothing 0.01762509319844245 +743 55 model.embedding_dim 0.0 +743 55 model.relation_dim 1.0 +743 55 training.batch_size 0.0 +743 55 training.label_smoothing 0.4959243299653684 +743 56 model.embedding_dim 0.0 +743 56 model.relation_dim 2.0 +743 56 training.batch_size 0.0 +743 56 training.label_smoothing 0.11987343744609674 +743 57 model.embedding_dim 0.0 +743 57 model.relation_dim 2.0 +743 57 training.batch_size 0.0 +743 57 training.label_smoothing 0.3553008298760273 +743 58 model.embedding_dim 1.0 +743 58 model.relation_dim 0.0 +743 58 training.batch_size 2.0 +743 58 training.label_smoothing 0.0029865651497820193 +743 59 model.embedding_dim 2.0 +743 59 model.relation_dim 0.0 +743 59 training.batch_size 1.0 +743 59 training.label_smoothing 0.7204862703620007 +743 60 model.embedding_dim 2.0 +743 60 model.relation_dim 0.0 +743 60 training.batch_size 0.0 +743 60 training.label_smoothing 0.03484846857290294 +743 61 model.embedding_dim 2.0 +743 61 model.relation_dim 0.0 +743 61 training.batch_size 2.0 +743 61 training.label_smoothing 0.014377841314603257 +743 62 model.embedding_dim 1.0 +743 62 model.relation_dim 0.0 +743 62 training.batch_size 0.0 +743 62 training.label_smoothing 0.621697166582611 +743 63 model.embedding_dim 0.0 +743 63 model.relation_dim 1.0 +743 63 training.batch_size 0.0 +743 63 training.label_smoothing 0.020495450294923546 +743 64 model.embedding_dim 1.0 +743 64 model.relation_dim 1.0 +743 64 training.batch_size 0.0 +743 64 training.label_smoothing 0.6192051531496977 +743 65 model.embedding_dim 1.0 +743 65 model.relation_dim 2.0 +743 65 training.batch_size 2.0 +743 65 training.label_smoothing 0.49238623908838525 +743 66 model.embedding_dim 2.0 +743 66 model.relation_dim 0.0 +743 66 training.batch_size 0.0 +743 66 training.label_smoothing 0.027017398073505252 +743 67 model.embedding_dim 1.0 +743 67 model.relation_dim 1.0 +743 67 training.batch_size 0.0 +743 67 training.label_smoothing 0.011119132306564665 +743 68 model.embedding_dim 0.0 +743 68 model.relation_dim 2.0 +743 68 training.batch_size 0.0 +743 68 training.label_smoothing 0.002049729963855693 +743 69 model.embedding_dim 2.0 +743 69 model.relation_dim 1.0 +743 69 training.batch_size 1.0 +743 69 training.label_smoothing 0.06939536992714138 +743 70 model.embedding_dim 0.0 +743 70 model.relation_dim 1.0 +743 70 training.batch_size 0.0 +743 70 training.label_smoothing 0.0015117401207266085 +743 71 model.embedding_dim 1.0 +743 71 model.relation_dim 1.0 +743 71 training.batch_size 1.0 +743 71 training.label_smoothing 0.5167998747227299 +743 72 model.embedding_dim 1.0 +743 72 model.relation_dim 0.0 +743 72 training.batch_size 2.0 +743 72 training.label_smoothing 0.0022650974812516876 +743 73 model.embedding_dim 2.0 +743 73 model.relation_dim 0.0 +743 73 training.batch_size 0.0 +743 73 training.label_smoothing 0.040141863336887455 +743 74 model.embedding_dim 0.0 +743 74 model.relation_dim 1.0 +743 74 training.batch_size 1.0 +743 74 training.label_smoothing 0.06348515233480866 +743 75 model.embedding_dim 0.0 +743 75 model.relation_dim 1.0 +743 75 training.batch_size 0.0 +743 75 training.label_smoothing 0.0772969705836466 +743 76 model.embedding_dim 2.0 +743 76 model.relation_dim 0.0 +743 76 training.batch_size 0.0 +743 76 training.label_smoothing 0.0806061457481057 +743 77 model.embedding_dim 0.0 +743 77 model.relation_dim 0.0 +743 77 training.batch_size 2.0 +743 77 training.label_smoothing 0.22127738293752133 +743 78 model.embedding_dim 0.0 +743 78 model.relation_dim 1.0 +743 78 training.batch_size 0.0 +743 78 training.label_smoothing 0.007193128195789545 +743 79 model.embedding_dim 0.0 +743 79 model.relation_dim 0.0 +743 79 training.batch_size 1.0 +743 79 training.label_smoothing 0.001659713663560653 +743 80 model.embedding_dim 0.0 +743 80 model.relation_dim 1.0 +743 80 training.batch_size 0.0 +743 80 training.label_smoothing 0.1083871064646119 +743 81 model.embedding_dim 2.0 +743 81 model.relation_dim 0.0 +743 81 training.batch_size 2.0 +743 81 training.label_smoothing 0.002182709646556114 +743 82 model.embedding_dim 2.0 +743 82 model.relation_dim 1.0 +743 82 training.batch_size 2.0 +743 82 training.label_smoothing 0.003383996706543015 +743 83 model.embedding_dim 0.0 +743 83 model.relation_dim 1.0 +743 83 training.batch_size 2.0 +743 83 training.label_smoothing 0.031193717660596053 +743 84 model.embedding_dim 0.0 +743 84 model.relation_dim 0.0 +743 84 training.batch_size 2.0 +743 84 training.label_smoothing 0.20110671740742503 +743 85 model.embedding_dim 2.0 +743 85 model.relation_dim 2.0 +743 85 training.batch_size 2.0 +743 85 training.label_smoothing 0.041355321587802384 +743 86 model.embedding_dim 0.0 +743 86 model.relation_dim 2.0 +743 86 training.batch_size 2.0 +743 86 training.label_smoothing 0.3859816566016748 +743 87 model.embedding_dim 0.0 +743 87 model.relation_dim 2.0 +743 87 training.batch_size 1.0 +743 87 training.label_smoothing 0.4894544398697159 +743 88 model.embedding_dim 0.0 +743 88 model.relation_dim 0.0 +743 88 training.batch_size 1.0 +743 88 training.label_smoothing 0.9040979082499049 +743 89 model.embedding_dim 0.0 +743 89 model.relation_dim 2.0 +743 89 training.batch_size 1.0 +743 89 training.label_smoothing 0.08358174596844449 +743 90 model.embedding_dim 0.0 +743 90 model.relation_dim 0.0 +743 90 training.batch_size 2.0 +743 90 training.label_smoothing 0.0031607245042153237 +743 91 model.embedding_dim 1.0 +743 91 model.relation_dim 1.0 +743 91 training.batch_size 2.0 +743 91 training.label_smoothing 0.015138257882588567 +743 92 model.embedding_dim 0.0 +743 92 model.relation_dim 1.0 +743 92 training.batch_size 0.0 +743 92 training.label_smoothing 0.058788245576647136 +743 93 model.embedding_dim 2.0 +743 93 model.relation_dim 1.0 +743 93 training.batch_size 0.0 +743 93 training.label_smoothing 0.41910955954932294 +743 94 model.embedding_dim 0.0 +743 94 model.relation_dim 0.0 +743 94 training.batch_size 2.0 +743 94 training.label_smoothing 0.06295706567609075 +743 95 model.embedding_dim 1.0 +743 95 model.relation_dim 1.0 +743 95 training.batch_size 2.0 +743 95 training.label_smoothing 0.02347308083677899 +743 96 model.embedding_dim 1.0 +743 96 model.relation_dim 2.0 +743 96 training.batch_size 0.0 +743 96 training.label_smoothing 0.07868960039741481 +743 97 model.embedding_dim 1.0 +743 97 model.relation_dim 1.0 +743 97 training.batch_size 1.0 +743 97 training.label_smoothing 0.040219457572289466 +743 98 model.embedding_dim 2.0 +743 98 model.relation_dim 2.0 +743 98 training.batch_size 1.0 +743 98 training.label_smoothing 0.0056388024568689 +743 99 model.embedding_dim 0.0 +743 99 model.relation_dim 2.0 +743 99 training.batch_size 0.0 +743 99 training.label_smoothing 0.0038603067251321227 +743 100 model.embedding_dim 1.0 +743 100 model.relation_dim 1.0 +743 100 training.batch_size 2.0 +743 100 training.label_smoothing 0.7486781508088638 +743 1 dataset """kinships""" +743 1 model """transd""" +743 1 loss """bceaftersigmoid""" +743 1 regularizer """no""" +743 1 optimizer """adadelta""" +743 1 training_loop """lcwa""" +743 1 evaluator """rankbased""" +743 2 dataset """kinships""" +743 2 model """transd""" +743 2 loss """bceaftersigmoid""" +743 2 regularizer """no""" +743 2 optimizer """adadelta""" +743 2 training_loop """lcwa""" +743 2 evaluator """rankbased""" +743 3 dataset """kinships""" +743 3 model """transd""" +743 3 loss """bceaftersigmoid""" +743 3 regularizer """no""" +743 3 optimizer """adadelta""" +743 3 training_loop """lcwa""" +743 3 evaluator """rankbased""" +743 4 dataset """kinships""" +743 4 model """transd""" +743 4 loss """bceaftersigmoid""" +743 4 regularizer """no""" +743 4 optimizer """adadelta""" +743 4 training_loop """lcwa""" +743 4 evaluator """rankbased""" +743 5 dataset """kinships""" +743 5 model """transd""" +743 5 loss """bceaftersigmoid""" +743 5 regularizer """no""" +743 5 optimizer """adadelta""" +743 5 training_loop """lcwa""" +743 5 evaluator """rankbased""" +743 6 dataset """kinships""" +743 6 model """transd""" +743 6 loss """bceaftersigmoid""" +743 6 regularizer """no""" +743 6 optimizer """adadelta""" +743 6 training_loop """lcwa""" +743 6 evaluator """rankbased""" +743 7 dataset """kinships""" +743 7 model """transd""" +743 7 loss """bceaftersigmoid""" +743 7 regularizer """no""" +743 7 optimizer """adadelta""" +743 7 training_loop """lcwa""" +743 7 evaluator """rankbased""" +743 8 dataset """kinships""" +743 8 model """transd""" +743 8 loss """bceaftersigmoid""" +743 8 regularizer """no""" +743 8 optimizer """adadelta""" +743 8 training_loop """lcwa""" +743 8 evaluator """rankbased""" +743 9 dataset """kinships""" +743 9 model """transd""" +743 9 loss """bceaftersigmoid""" +743 9 regularizer """no""" +743 9 optimizer """adadelta""" +743 9 training_loop """lcwa""" +743 9 evaluator """rankbased""" +743 10 dataset """kinships""" +743 10 model """transd""" +743 10 loss """bceaftersigmoid""" +743 10 regularizer """no""" +743 10 optimizer """adadelta""" +743 10 training_loop """lcwa""" +743 10 evaluator """rankbased""" +743 11 dataset """kinships""" +743 11 model """transd""" +743 11 loss """bceaftersigmoid""" +743 11 regularizer """no""" +743 11 optimizer """adadelta""" +743 11 training_loop """lcwa""" +743 11 evaluator """rankbased""" +743 12 dataset """kinships""" +743 12 model """transd""" +743 12 loss """bceaftersigmoid""" +743 12 regularizer """no""" +743 12 optimizer """adadelta""" +743 12 training_loop """lcwa""" +743 12 evaluator """rankbased""" +743 13 dataset """kinships""" +743 13 model """transd""" +743 13 loss """bceaftersigmoid""" +743 13 regularizer """no""" +743 13 optimizer """adadelta""" +743 13 training_loop """lcwa""" +743 13 evaluator """rankbased""" +743 14 dataset """kinships""" +743 14 model """transd""" +743 14 loss """bceaftersigmoid""" +743 14 regularizer """no""" +743 14 optimizer """adadelta""" +743 14 training_loop """lcwa""" +743 14 evaluator """rankbased""" +743 15 dataset """kinships""" +743 15 model """transd""" +743 15 loss """bceaftersigmoid""" +743 15 regularizer """no""" +743 15 optimizer """adadelta""" +743 15 training_loop """lcwa""" +743 15 evaluator """rankbased""" +743 16 dataset """kinships""" +743 16 model """transd""" +743 16 loss """bceaftersigmoid""" +743 16 regularizer """no""" +743 16 optimizer """adadelta""" +743 16 training_loop """lcwa""" +743 16 evaluator """rankbased""" +743 17 dataset """kinships""" +743 17 model """transd""" +743 17 loss """bceaftersigmoid""" +743 17 regularizer """no""" +743 17 optimizer """adadelta""" +743 17 training_loop """lcwa""" +743 17 evaluator """rankbased""" +743 18 dataset """kinships""" +743 18 model """transd""" +743 18 loss """bceaftersigmoid""" +743 18 regularizer """no""" +743 18 optimizer """adadelta""" +743 18 training_loop """lcwa""" +743 18 evaluator """rankbased""" +743 19 dataset """kinships""" +743 19 model """transd""" +743 19 loss """bceaftersigmoid""" +743 19 regularizer """no""" +743 19 optimizer """adadelta""" +743 19 training_loop """lcwa""" +743 19 evaluator """rankbased""" +743 20 dataset """kinships""" +743 20 model """transd""" +743 20 loss """bceaftersigmoid""" +743 20 regularizer """no""" +743 20 optimizer """adadelta""" +743 20 training_loop """lcwa""" +743 20 evaluator """rankbased""" +743 21 dataset """kinships""" +743 21 model """transd""" +743 21 loss """bceaftersigmoid""" +743 21 regularizer """no""" +743 21 optimizer """adadelta""" +743 21 training_loop """lcwa""" +743 21 evaluator """rankbased""" +743 22 dataset """kinships""" +743 22 model """transd""" +743 22 loss """bceaftersigmoid""" +743 22 regularizer """no""" +743 22 optimizer """adadelta""" +743 22 training_loop """lcwa""" +743 22 evaluator """rankbased""" +743 23 dataset """kinships""" +743 23 model """transd""" +743 23 loss """bceaftersigmoid""" +743 23 regularizer """no""" +743 23 optimizer """adadelta""" +743 23 training_loop """lcwa""" +743 23 evaluator """rankbased""" +743 24 dataset """kinships""" +743 24 model """transd""" +743 24 loss """bceaftersigmoid""" +743 24 regularizer """no""" +743 24 optimizer """adadelta""" +743 24 training_loop """lcwa""" +743 24 evaluator """rankbased""" +743 25 dataset """kinships""" +743 25 model """transd""" +743 25 loss """bceaftersigmoid""" +743 25 regularizer """no""" +743 25 optimizer """adadelta""" +743 25 training_loop """lcwa""" +743 25 evaluator """rankbased""" +743 26 dataset """kinships""" +743 26 model """transd""" +743 26 loss """bceaftersigmoid""" +743 26 regularizer """no""" +743 26 optimizer """adadelta""" +743 26 training_loop """lcwa""" +743 26 evaluator """rankbased""" +743 27 dataset """kinships""" +743 27 model """transd""" +743 27 loss """bceaftersigmoid""" +743 27 regularizer """no""" +743 27 optimizer """adadelta""" +743 27 training_loop """lcwa""" +743 27 evaluator """rankbased""" +743 28 dataset """kinships""" +743 28 model """transd""" +743 28 loss """bceaftersigmoid""" +743 28 regularizer """no""" +743 28 optimizer """adadelta""" +743 28 training_loop """lcwa""" +743 28 evaluator """rankbased""" +743 29 dataset """kinships""" +743 29 model """transd""" +743 29 loss """bceaftersigmoid""" +743 29 regularizer """no""" +743 29 optimizer """adadelta""" +743 29 training_loop """lcwa""" +743 29 evaluator """rankbased""" +743 30 dataset """kinships""" +743 30 model """transd""" +743 30 loss """bceaftersigmoid""" +743 30 regularizer """no""" +743 30 optimizer """adadelta""" +743 30 training_loop """lcwa""" +743 30 evaluator """rankbased""" +743 31 dataset """kinships""" +743 31 model """transd""" +743 31 loss """bceaftersigmoid""" +743 31 regularizer """no""" +743 31 optimizer """adadelta""" +743 31 training_loop """lcwa""" +743 31 evaluator """rankbased""" +743 32 dataset """kinships""" +743 32 model """transd""" +743 32 loss """bceaftersigmoid""" +743 32 regularizer """no""" +743 32 optimizer """adadelta""" +743 32 training_loop """lcwa""" +743 32 evaluator """rankbased""" +743 33 dataset """kinships""" +743 33 model """transd""" +743 33 loss """bceaftersigmoid""" +743 33 regularizer """no""" +743 33 optimizer """adadelta""" +743 33 training_loop """lcwa""" +743 33 evaluator """rankbased""" +743 34 dataset """kinships""" +743 34 model """transd""" +743 34 loss """bceaftersigmoid""" +743 34 regularizer """no""" +743 34 optimizer """adadelta""" +743 34 training_loop """lcwa""" +743 34 evaluator """rankbased""" +743 35 dataset """kinships""" +743 35 model """transd""" +743 35 loss """bceaftersigmoid""" +743 35 regularizer """no""" +743 35 optimizer """adadelta""" +743 35 training_loop """lcwa""" +743 35 evaluator """rankbased""" +743 36 dataset """kinships""" +743 36 model """transd""" +743 36 loss """bceaftersigmoid""" +743 36 regularizer """no""" +743 36 optimizer """adadelta""" +743 36 training_loop """lcwa""" +743 36 evaluator """rankbased""" +743 37 dataset """kinships""" +743 37 model """transd""" +743 37 loss """bceaftersigmoid""" +743 37 regularizer """no""" +743 37 optimizer """adadelta""" +743 37 training_loop """lcwa""" +743 37 evaluator """rankbased""" +743 38 dataset """kinships""" +743 38 model """transd""" +743 38 loss """bceaftersigmoid""" +743 38 regularizer """no""" +743 38 optimizer """adadelta""" +743 38 training_loop """lcwa""" +743 38 evaluator """rankbased""" +743 39 dataset """kinships""" +743 39 model """transd""" +743 39 loss """bceaftersigmoid""" +743 39 regularizer """no""" +743 39 optimizer """adadelta""" +743 39 training_loop """lcwa""" +743 39 evaluator """rankbased""" +743 40 dataset """kinships""" +743 40 model """transd""" +743 40 loss """bceaftersigmoid""" +743 40 regularizer """no""" +743 40 optimizer """adadelta""" +743 40 training_loop """lcwa""" +743 40 evaluator """rankbased""" +743 41 dataset """kinships""" +743 41 model """transd""" +743 41 loss """bceaftersigmoid""" +743 41 regularizer """no""" +743 41 optimizer """adadelta""" +743 41 training_loop """lcwa""" +743 41 evaluator """rankbased""" +743 42 dataset """kinships""" +743 42 model """transd""" +743 42 loss """bceaftersigmoid""" +743 42 regularizer """no""" +743 42 optimizer """adadelta""" +743 42 training_loop """lcwa""" +743 42 evaluator """rankbased""" +743 43 dataset """kinships""" +743 43 model """transd""" +743 43 loss """bceaftersigmoid""" +743 43 regularizer """no""" +743 43 optimizer """adadelta""" +743 43 training_loop """lcwa""" +743 43 evaluator """rankbased""" +743 44 dataset """kinships""" +743 44 model """transd""" +743 44 loss """bceaftersigmoid""" +743 44 regularizer """no""" +743 44 optimizer """adadelta""" +743 44 training_loop """lcwa""" +743 44 evaluator """rankbased""" +743 45 dataset """kinships""" +743 45 model """transd""" +743 45 loss """bceaftersigmoid""" +743 45 regularizer """no""" +743 45 optimizer """adadelta""" +743 45 training_loop """lcwa""" +743 45 evaluator """rankbased""" +743 46 dataset """kinships""" +743 46 model """transd""" +743 46 loss """bceaftersigmoid""" +743 46 regularizer """no""" +743 46 optimizer """adadelta""" +743 46 training_loop """lcwa""" +743 46 evaluator """rankbased""" +743 47 dataset """kinships""" +743 47 model """transd""" +743 47 loss """bceaftersigmoid""" +743 47 regularizer """no""" +743 47 optimizer """adadelta""" +743 47 training_loop """lcwa""" +743 47 evaluator """rankbased""" +743 48 dataset """kinships""" +743 48 model """transd""" +743 48 loss """bceaftersigmoid""" +743 48 regularizer """no""" +743 48 optimizer """adadelta""" +743 48 training_loop """lcwa""" +743 48 evaluator """rankbased""" +743 49 dataset """kinships""" +743 49 model """transd""" +743 49 loss """bceaftersigmoid""" +743 49 regularizer """no""" +743 49 optimizer """adadelta""" +743 49 training_loop """lcwa""" +743 49 evaluator """rankbased""" +743 50 dataset """kinships""" +743 50 model """transd""" +743 50 loss """bceaftersigmoid""" +743 50 regularizer """no""" +743 50 optimizer """adadelta""" +743 50 training_loop """lcwa""" +743 50 evaluator """rankbased""" +743 51 dataset """kinships""" +743 51 model """transd""" +743 51 loss """bceaftersigmoid""" +743 51 regularizer """no""" +743 51 optimizer """adadelta""" +743 51 training_loop """lcwa""" +743 51 evaluator """rankbased""" +743 52 dataset """kinships""" +743 52 model """transd""" +743 52 loss """bceaftersigmoid""" +743 52 regularizer """no""" +743 52 optimizer """adadelta""" +743 52 training_loop """lcwa""" +743 52 evaluator """rankbased""" +743 53 dataset """kinships""" +743 53 model """transd""" +743 53 loss """bceaftersigmoid""" +743 53 regularizer """no""" +743 53 optimizer """adadelta""" +743 53 training_loop """lcwa""" +743 53 evaluator """rankbased""" +743 54 dataset """kinships""" +743 54 model """transd""" +743 54 loss """bceaftersigmoid""" +743 54 regularizer """no""" +743 54 optimizer """adadelta""" +743 54 training_loop """lcwa""" +743 54 evaluator """rankbased""" +743 55 dataset """kinships""" +743 55 model """transd""" +743 55 loss """bceaftersigmoid""" +743 55 regularizer """no""" +743 55 optimizer """adadelta""" +743 55 training_loop """lcwa""" +743 55 evaluator """rankbased""" +743 56 dataset """kinships""" +743 56 model """transd""" +743 56 loss """bceaftersigmoid""" +743 56 regularizer """no""" +743 56 optimizer """adadelta""" +743 56 training_loop """lcwa""" +743 56 evaluator """rankbased""" +743 57 dataset """kinships""" +743 57 model """transd""" +743 57 loss """bceaftersigmoid""" +743 57 regularizer """no""" +743 57 optimizer """adadelta""" +743 57 training_loop """lcwa""" +743 57 evaluator """rankbased""" +743 58 dataset """kinships""" +743 58 model """transd""" +743 58 loss """bceaftersigmoid""" +743 58 regularizer """no""" +743 58 optimizer """adadelta""" +743 58 training_loop """lcwa""" +743 58 evaluator """rankbased""" +743 59 dataset """kinships""" +743 59 model """transd""" +743 59 loss """bceaftersigmoid""" +743 59 regularizer """no""" +743 59 optimizer """adadelta""" +743 59 training_loop """lcwa""" +743 59 evaluator """rankbased""" +743 60 dataset """kinships""" +743 60 model """transd""" +743 60 loss """bceaftersigmoid""" +743 60 regularizer """no""" +743 60 optimizer """adadelta""" +743 60 training_loop """lcwa""" +743 60 evaluator """rankbased""" +743 61 dataset """kinships""" +743 61 model """transd""" +743 61 loss """bceaftersigmoid""" +743 61 regularizer """no""" +743 61 optimizer """adadelta""" +743 61 training_loop """lcwa""" +743 61 evaluator """rankbased""" +743 62 dataset """kinships""" +743 62 model """transd""" +743 62 loss """bceaftersigmoid""" +743 62 regularizer """no""" +743 62 optimizer """adadelta""" +743 62 training_loop """lcwa""" +743 62 evaluator """rankbased""" +743 63 dataset """kinships""" +743 63 model """transd""" +743 63 loss """bceaftersigmoid""" +743 63 regularizer """no""" +743 63 optimizer """adadelta""" +743 63 training_loop """lcwa""" +743 63 evaluator """rankbased""" +743 64 dataset """kinships""" +743 64 model """transd""" +743 64 loss """bceaftersigmoid""" +743 64 regularizer """no""" +743 64 optimizer """adadelta""" +743 64 training_loop """lcwa""" +743 64 evaluator """rankbased""" +743 65 dataset """kinships""" +743 65 model """transd""" +743 65 loss """bceaftersigmoid""" +743 65 regularizer """no""" +743 65 optimizer """adadelta""" +743 65 training_loop """lcwa""" +743 65 evaluator """rankbased""" +743 66 dataset """kinships""" +743 66 model """transd""" +743 66 loss """bceaftersigmoid""" +743 66 regularizer """no""" +743 66 optimizer """adadelta""" +743 66 training_loop """lcwa""" +743 66 evaluator """rankbased""" +743 67 dataset """kinships""" +743 67 model """transd""" +743 67 loss """bceaftersigmoid""" +743 67 regularizer """no""" +743 67 optimizer """adadelta""" +743 67 training_loop """lcwa""" +743 67 evaluator """rankbased""" +743 68 dataset """kinships""" +743 68 model """transd""" +743 68 loss """bceaftersigmoid""" +743 68 regularizer """no""" +743 68 optimizer """adadelta""" +743 68 training_loop """lcwa""" +743 68 evaluator """rankbased""" +743 69 dataset """kinships""" +743 69 model """transd""" +743 69 loss """bceaftersigmoid""" +743 69 regularizer """no""" +743 69 optimizer """adadelta""" +743 69 training_loop """lcwa""" +743 69 evaluator """rankbased""" +743 70 dataset """kinships""" +743 70 model """transd""" +743 70 loss """bceaftersigmoid""" +743 70 regularizer """no""" +743 70 optimizer """adadelta""" +743 70 training_loop """lcwa""" +743 70 evaluator """rankbased""" +743 71 dataset """kinships""" +743 71 model """transd""" +743 71 loss """bceaftersigmoid""" +743 71 regularizer """no""" +743 71 optimizer """adadelta""" +743 71 training_loop """lcwa""" +743 71 evaluator """rankbased""" +743 72 dataset """kinships""" +743 72 model """transd""" +743 72 loss """bceaftersigmoid""" +743 72 regularizer """no""" +743 72 optimizer """adadelta""" +743 72 training_loop """lcwa""" +743 72 evaluator """rankbased""" +743 73 dataset """kinships""" +743 73 model """transd""" +743 73 loss """bceaftersigmoid""" +743 73 regularizer """no""" +743 73 optimizer """adadelta""" +743 73 training_loop """lcwa""" +743 73 evaluator """rankbased""" +743 74 dataset """kinships""" +743 74 model """transd""" +743 74 loss """bceaftersigmoid""" +743 74 regularizer """no""" +743 74 optimizer """adadelta""" +743 74 training_loop """lcwa""" +743 74 evaluator """rankbased""" +743 75 dataset """kinships""" +743 75 model """transd""" +743 75 loss """bceaftersigmoid""" +743 75 regularizer """no""" +743 75 optimizer """adadelta""" +743 75 training_loop """lcwa""" +743 75 evaluator """rankbased""" +743 76 dataset """kinships""" +743 76 model """transd""" +743 76 loss """bceaftersigmoid""" +743 76 regularizer """no""" +743 76 optimizer """adadelta""" +743 76 training_loop """lcwa""" +743 76 evaluator """rankbased""" +743 77 dataset """kinships""" +743 77 model """transd""" +743 77 loss """bceaftersigmoid""" +743 77 regularizer """no""" +743 77 optimizer """adadelta""" +743 77 training_loop """lcwa""" +743 77 evaluator """rankbased""" +743 78 dataset """kinships""" +743 78 model """transd""" +743 78 loss """bceaftersigmoid""" +743 78 regularizer """no""" +743 78 optimizer """adadelta""" +743 78 training_loop """lcwa""" +743 78 evaluator """rankbased""" +743 79 dataset """kinships""" +743 79 model """transd""" +743 79 loss """bceaftersigmoid""" +743 79 regularizer """no""" +743 79 optimizer """adadelta""" +743 79 training_loop """lcwa""" +743 79 evaluator """rankbased""" +743 80 dataset """kinships""" +743 80 model """transd""" +743 80 loss """bceaftersigmoid""" +743 80 regularizer """no""" +743 80 optimizer """adadelta""" +743 80 training_loop """lcwa""" +743 80 evaluator """rankbased""" +743 81 dataset """kinships""" +743 81 model """transd""" +743 81 loss """bceaftersigmoid""" +743 81 regularizer """no""" +743 81 optimizer """adadelta""" +743 81 training_loop """lcwa""" +743 81 evaluator """rankbased""" +743 82 dataset """kinships""" +743 82 model """transd""" +743 82 loss """bceaftersigmoid""" +743 82 regularizer """no""" +743 82 optimizer """adadelta""" +743 82 training_loop """lcwa""" +743 82 evaluator """rankbased""" +743 83 dataset """kinships""" +743 83 model """transd""" +743 83 loss """bceaftersigmoid""" +743 83 regularizer """no""" +743 83 optimizer """adadelta""" +743 83 training_loop """lcwa""" +743 83 evaluator """rankbased""" +743 84 dataset """kinships""" +743 84 model """transd""" +743 84 loss """bceaftersigmoid""" +743 84 regularizer """no""" +743 84 optimizer """adadelta""" +743 84 training_loop """lcwa""" +743 84 evaluator """rankbased""" +743 85 dataset """kinships""" +743 85 model """transd""" +743 85 loss """bceaftersigmoid""" +743 85 regularizer """no""" +743 85 optimizer """adadelta""" +743 85 training_loop """lcwa""" +743 85 evaluator """rankbased""" +743 86 dataset """kinships""" +743 86 model """transd""" +743 86 loss """bceaftersigmoid""" +743 86 regularizer """no""" +743 86 optimizer """adadelta""" +743 86 training_loop """lcwa""" +743 86 evaluator """rankbased""" +743 87 dataset """kinships""" +743 87 model """transd""" +743 87 loss """bceaftersigmoid""" +743 87 regularizer """no""" +743 87 optimizer """adadelta""" +743 87 training_loop """lcwa""" +743 87 evaluator """rankbased""" +743 88 dataset """kinships""" +743 88 model """transd""" +743 88 loss """bceaftersigmoid""" +743 88 regularizer """no""" +743 88 optimizer """adadelta""" +743 88 training_loop """lcwa""" +743 88 evaluator """rankbased""" +743 89 dataset """kinships""" +743 89 model """transd""" +743 89 loss """bceaftersigmoid""" +743 89 regularizer """no""" +743 89 optimizer """adadelta""" +743 89 training_loop """lcwa""" +743 89 evaluator """rankbased""" +743 90 dataset """kinships""" +743 90 model """transd""" +743 90 loss """bceaftersigmoid""" +743 90 regularizer """no""" +743 90 optimizer """adadelta""" +743 90 training_loop """lcwa""" +743 90 evaluator """rankbased""" +743 91 dataset """kinships""" +743 91 model """transd""" +743 91 loss """bceaftersigmoid""" +743 91 regularizer """no""" +743 91 optimizer """adadelta""" +743 91 training_loop """lcwa""" +743 91 evaluator """rankbased""" +743 92 dataset """kinships""" +743 92 model """transd""" +743 92 loss """bceaftersigmoid""" +743 92 regularizer """no""" +743 92 optimizer """adadelta""" +743 92 training_loop """lcwa""" +743 92 evaluator """rankbased""" +743 93 dataset """kinships""" +743 93 model """transd""" +743 93 loss """bceaftersigmoid""" +743 93 regularizer """no""" +743 93 optimizer """adadelta""" +743 93 training_loop """lcwa""" +743 93 evaluator """rankbased""" +743 94 dataset """kinships""" +743 94 model """transd""" +743 94 loss """bceaftersigmoid""" +743 94 regularizer """no""" +743 94 optimizer """adadelta""" +743 94 training_loop """lcwa""" +743 94 evaluator """rankbased""" +743 95 dataset """kinships""" +743 95 model """transd""" +743 95 loss """bceaftersigmoid""" +743 95 regularizer """no""" +743 95 optimizer """adadelta""" +743 95 training_loop """lcwa""" +743 95 evaluator """rankbased""" +743 96 dataset """kinships""" +743 96 model """transd""" +743 96 loss """bceaftersigmoid""" +743 96 regularizer """no""" +743 96 optimizer """adadelta""" +743 96 training_loop """lcwa""" +743 96 evaluator """rankbased""" +743 97 dataset """kinships""" +743 97 model """transd""" +743 97 loss """bceaftersigmoid""" +743 97 regularizer """no""" +743 97 optimizer """adadelta""" +743 97 training_loop """lcwa""" +743 97 evaluator """rankbased""" +743 98 dataset """kinships""" +743 98 model """transd""" +743 98 loss """bceaftersigmoid""" +743 98 regularizer """no""" +743 98 optimizer """adadelta""" +743 98 training_loop """lcwa""" +743 98 evaluator """rankbased""" +743 99 dataset """kinships""" +743 99 model """transd""" +743 99 loss """bceaftersigmoid""" +743 99 regularizer """no""" +743 99 optimizer """adadelta""" +743 99 training_loop """lcwa""" +743 99 evaluator """rankbased""" +743 100 dataset """kinships""" +743 100 model """transd""" +743 100 loss """bceaftersigmoid""" +743 100 regularizer """no""" +743 100 optimizer """adadelta""" +743 100 training_loop """lcwa""" +743 100 evaluator """rankbased""" +744 1 model.embedding_dim 2.0 +744 1 model.relation_dim 2.0 +744 1 training.batch_size 1.0 +744 1 training.label_smoothing 0.29952714618496756 +744 2 model.embedding_dim 1.0 +744 2 model.relation_dim 0.0 +744 2 training.batch_size 2.0 +744 2 training.label_smoothing 0.0027614601179318846 +744 3 model.embedding_dim 1.0 +744 3 model.relation_dim 0.0 +744 3 training.batch_size 0.0 +744 3 training.label_smoothing 0.007699022904281539 +744 4 model.embedding_dim 1.0 +744 4 model.relation_dim 0.0 +744 4 training.batch_size 0.0 +744 4 training.label_smoothing 0.0027571271054445436 +744 5 model.embedding_dim 1.0 +744 5 model.relation_dim 2.0 +744 5 training.batch_size 2.0 +744 5 training.label_smoothing 0.801609925567631 +744 6 model.embedding_dim 0.0 +744 6 model.relation_dim 1.0 +744 6 training.batch_size 1.0 +744 6 training.label_smoothing 0.008787956569048943 +744 7 model.embedding_dim 0.0 +744 7 model.relation_dim 2.0 +744 7 training.batch_size 2.0 +744 7 training.label_smoothing 0.0012445874069574848 +744 8 model.embedding_dim 0.0 +744 8 model.relation_dim 1.0 +744 8 training.batch_size 2.0 +744 8 training.label_smoothing 0.2511064598013243 +744 9 model.embedding_dim 2.0 +744 9 model.relation_dim 2.0 +744 9 training.batch_size 0.0 +744 9 training.label_smoothing 0.23496158432829242 +744 10 model.embedding_dim 0.0 +744 10 model.relation_dim 1.0 +744 10 training.batch_size 2.0 +744 10 training.label_smoothing 0.007414181667228711 +744 11 model.embedding_dim 1.0 +744 11 model.relation_dim 0.0 +744 11 training.batch_size 1.0 +744 11 training.label_smoothing 0.0014624632220950203 +744 12 model.embedding_dim 1.0 +744 12 model.relation_dim 2.0 +744 12 training.batch_size 1.0 +744 12 training.label_smoothing 0.009431660453817582 +744 13 model.embedding_dim 0.0 +744 13 model.relation_dim 1.0 +744 13 training.batch_size 2.0 +744 13 training.label_smoothing 0.015610710982781573 +744 14 model.embedding_dim 0.0 +744 14 model.relation_dim 0.0 +744 14 training.batch_size 2.0 +744 14 training.label_smoothing 0.1077854692002879 +744 15 model.embedding_dim 2.0 +744 15 model.relation_dim 2.0 +744 15 training.batch_size 1.0 +744 15 training.label_smoothing 0.011443347765673816 +744 16 model.embedding_dim 1.0 +744 16 model.relation_dim 2.0 +744 16 training.batch_size 1.0 +744 16 training.label_smoothing 0.005871784034682153 +744 17 model.embedding_dim 1.0 +744 17 model.relation_dim 0.0 +744 17 training.batch_size 1.0 +744 17 training.label_smoothing 0.017757311011716112 +744 18 model.embedding_dim 2.0 +744 18 model.relation_dim 2.0 +744 18 training.batch_size 1.0 +744 18 training.label_smoothing 0.00105975971487189 +744 19 model.embedding_dim 0.0 +744 19 model.relation_dim 1.0 +744 19 training.batch_size 0.0 +744 19 training.label_smoothing 0.009548677175037842 +744 20 model.embedding_dim 0.0 +744 20 model.relation_dim 1.0 +744 20 training.batch_size 2.0 +744 20 training.label_smoothing 0.008224363787882872 +744 21 model.embedding_dim 1.0 +744 21 model.relation_dim 0.0 +744 21 training.batch_size 1.0 +744 21 training.label_smoothing 0.1089837869565399 +744 22 model.embedding_dim 2.0 +744 22 model.relation_dim 2.0 +744 22 training.batch_size 2.0 +744 22 training.label_smoothing 0.48325126270251717 +744 23 model.embedding_dim 0.0 +744 23 model.relation_dim 0.0 +744 23 training.batch_size 1.0 +744 23 training.label_smoothing 0.4963514197363013 +744 24 model.embedding_dim 2.0 +744 24 model.relation_dim 1.0 +744 24 training.batch_size 0.0 +744 24 training.label_smoothing 0.0038732684598781963 +744 25 model.embedding_dim 1.0 +744 25 model.relation_dim 1.0 +744 25 training.batch_size 1.0 +744 25 training.label_smoothing 0.4358005747257172 +744 26 model.embedding_dim 1.0 +744 26 model.relation_dim 2.0 +744 26 training.batch_size 2.0 +744 26 training.label_smoothing 0.004116598257005517 +744 27 model.embedding_dim 2.0 +744 27 model.relation_dim 1.0 +744 27 training.batch_size 1.0 +744 27 training.label_smoothing 0.0031776347554719423 +744 28 model.embedding_dim 0.0 +744 28 model.relation_dim 0.0 +744 28 training.batch_size 2.0 +744 28 training.label_smoothing 0.0034088723826911083 +744 29 model.embedding_dim 2.0 +744 29 model.relation_dim 1.0 +744 29 training.batch_size 0.0 +744 29 training.label_smoothing 0.002693073393287035 +744 30 model.embedding_dim 1.0 +744 30 model.relation_dim 0.0 +744 30 training.batch_size 1.0 +744 30 training.label_smoothing 0.43275154958764406 +744 31 model.embedding_dim 2.0 +744 31 model.relation_dim 0.0 +744 31 training.batch_size 2.0 +744 31 training.label_smoothing 0.02365037757234394 +744 32 model.embedding_dim 0.0 +744 32 model.relation_dim 2.0 +744 32 training.batch_size 1.0 +744 32 training.label_smoothing 0.0010655140737874345 +744 33 model.embedding_dim 2.0 +744 33 model.relation_dim 1.0 +744 33 training.batch_size 0.0 +744 33 training.label_smoothing 0.062260674117144005 +744 34 model.embedding_dim 0.0 +744 34 model.relation_dim 0.0 +744 34 training.batch_size 2.0 +744 34 training.label_smoothing 0.009790830531706764 +744 35 model.embedding_dim 2.0 +744 35 model.relation_dim 2.0 +744 35 training.batch_size 0.0 +744 35 training.label_smoothing 0.0038975734799775257 +744 36 model.embedding_dim 1.0 +744 36 model.relation_dim 1.0 +744 36 training.batch_size 0.0 +744 36 training.label_smoothing 0.0014199475282409702 +744 37 model.embedding_dim 2.0 +744 37 model.relation_dim 2.0 +744 37 training.batch_size 0.0 +744 37 training.label_smoothing 0.35420923439071705 +744 38 model.embedding_dim 0.0 +744 38 model.relation_dim 2.0 +744 38 training.batch_size 0.0 +744 38 training.label_smoothing 0.016326307979907754 +744 39 model.embedding_dim 0.0 +744 39 model.relation_dim 2.0 +744 39 training.batch_size 2.0 +744 39 training.label_smoothing 0.08651854355252127 +744 40 model.embedding_dim 0.0 +744 40 model.relation_dim 1.0 +744 40 training.batch_size 1.0 +744 40 training.label_smoothing 0.07823790577768235 +744 41 model.embedding_dim 0.0 +744 41 model.relation_dim 2.0 +744 41 training.batch_size 1.0 +744 41 training.label_smoothing 0.5379113331402289 +744 42 model.embedding_dim 2.0 +744 42 model.relation_dim 0.0 +744 42 training.batch_size 1.0 +744 42 training.label_smoothing 0.0034217910658561348 +744 43 model.embedding_dim 1.0 +744 43 model.relation_dim 1.0 +744 43 training.batch_size 0.0 +744 43 training.label_smoothing 0.4738084746436346 +744 44 model.embedding_dim 0.0 +744 44 model.relation_dim 2.0 +744 44 training.batch_size 0.0 +744 44 training.label_smoothing 0.13850823850493424 +744 45 model.embedding_dim 0.0 +744 45 model.relation_dim 1.0 +744 45 training.batch_size 2.0 +744 45 training.label_smoothing 0.12826905313146458 +744 46 model.embedding_dim 0.0 +744 46 model.relation_dim 1.0 +744 46 training.batch_size 1.0 +744 46 training.label_smoothing 0.00644621087629619 +744 47 model.embedding_dim 1.0 +744 47 model.relation_dim 1.0 +744 47 training.batch_size 2.0 +744 47 training.label_smoothing 0.01892390492709724 +744 48 model.embedding_dim 2.0 +744 48 model.relation_dim 1.0 +744 48 training.batch_size 0.0 +744 48 training.label_smoothing 0.36379401077549245 +744 49 model.embedding_dim 0.0 +744 49 model.relation_dim 0.0 +744 49 training.batch_size 1.0 +744 49 training.label_smoothing 0.21946902358238535 +744 50 model.embedding_dim 0.0 +744 50 model.relation_dim 2.0 +744 50 training.batch_size 0.0 +744 50 training.label_smoothing 0.0893389653794981 +744 51 model.embedding_dim 1.0 +744 51 model.relation_dim 2.0 +744 51 training.batch_size 0.0 +744 51 training.label_smoothing 0.12030229373221644 +744 52 model.embedding_dim 2.0 +744 52 model.relation_dim 2.0 +744 52 training.batch_size 0.0 +744 52 training.label_smoothing 0.14864305835662056 +744 53 model.embedding_dim 2.0 +744 53 model.relation_dim 1.0 +744 53 training.batch_size 1.0 +744 53 training.label_smoothing 0.09339114122948193 +744 54 model.embedding_dim 2.0 +744 54 model.relation_dim 0.0 +744 54 training.batch_size 0.0 +744 54 training.label_smoothing 0.1381070021041761 +744 55 model.embedding_dim 2.0 +744 55 model.relation_dim 0.0 +744 55 training.batch_size 1.0 +744 55 training.label_smoothing 0.2440497801502081 +744 56 model.embedding_dim 2.0 +744 56 model.relation_dim 1.0 +744 56 training.batch_size 0.0 +744 56 training.label_smoothing 0.4970936894648594 +744 57 model.embedding_dim 2.0 +744 57 model.relation_dim 1.0 +744 57 training.batch_size 2.0 +744 57 training.label_smoothing 0.006403462859410513 +744 58 model.embedding_dim 0.0 +744 58 model.relation_dim 1.0 +744 58 training.batch_size 0.0 +744 58 training.label_smoothing 0.00964079913530871 +744 59 model.embedding_dim 0.0 +744 59 model.relation_dim 0.0 +744 59 training.batch_size 2.0 +744 59 training.label_smoothing 0.00105532877322252 +744 60 model.embedding_dim 0.0 +744 60 model.relation_dim 1.0 +744 60 training.batch_size 2.0 +744 60 training.label_smoothing 0.42988177209477646 +744 61 model.embedding_dim 2.0 +744 61 model.relation_dim 2.0 +744 61 training.batch_size 2.0 +744 61 training.label_smoothing 0.8540160857339341 +744 62 model.embedding_dim 2.0 +744 62 model.relation_dim 2.0 +744 62 training.batch_size 2.0 +744 62 training.label_smoothing 0.0031179880350366953 +744 63 model.embedding_dim 1.0 +744 63 model.relation_dim 1.0 +744 63 training.batch_size 2.0 +744 63 training.label_smoothing 0.022126878442416644 +744 64 model.embedding_dim 1.0 +744 64 model.relation_dim 0.0 +744 64 training.batch_size 0.0 +744 64 training.label_smoothing 0.6616928302050203 +744 65 model.embedding_dim 2.0 +744 65 model.relation_dim 1.0 +744 65 training.batch_size 2.0 +744 65 training.label_smoothing 0.007065987495403659 +744 66 model.embedding_dim 1.0 +744 66 model.relation_dim 0.0 +744 66 training.batch_size 2.0 +744 66 training.label_smoothing 0.0026016566055962293 +744 67 model.embedding_dim 2.0 +744 67 model.relation_dim 0.0 +744 67 training.batch_size 0.0 +744 67 training.label_smoothing 0.002492194308483513 +744 68 model.embedding_dim 2.0 +744 68 model.relation_dim 1.0 +744 68 training.batch_size 2.0 +744 68 training.label_smoothing 0.032148376521814924 +744 69 model.embedding_dim 1.0 +744 69 model.relation_dim 2.0 +744 69 training.batch_size 1.0 +744 69 training.label_smoothing 0.00979309078009211 +744 70 model.embedding_dim 0.0 +744 70 model.relation_dim 0.0 +744 70 training.batch_size 2.0 +744 70 training.label_smoothing 0.00830096839927538 +744 71 model.embedding_dim 2.0 +744 71 model.relation_dim 1.0 +744 71 training.batch_size 2.0 +744 71 training.label_smoothing 0.013319527204099351 +744 72 model.embedding_dim 2.0 +744 72 model.relation_dim 1.0 +744 72 training.batch_size 0.0 +744 72 training.label_smoothing 0.06880261139510976 +744 73 model.embedding_dim 2.0 +744 73 model.relation_dim 0.0 +744 73 training.batch_size 1.0 +744 73 training.label_smoothing 0.0012390644108546105 +744 74 model.embedding_dim 1.0 +744 74 model.relation_dim 2.0 +744 74 training.batch_size 0.0 +744 74 training.label_smoothing 0.17809363079319718 +744 75 model.embedding_dim 1.0 +744 75 model.relation_dim 2.0 +744 75 training.batch_size 2.0 +744 75 training.label_smoothing 0.0012240705954264619 +744 76 model.embedding_dim 2.0 +744 76 model.relation_dim 1.0 +744 76 training.batch_size 2.0 +744 76 training.label_smoothing 0.12159579896125186 +744 77 model.embedding_dim 0.0 +744 77 model.relation_dim 1.0 +744 77 training.batch_size 2.0 +744 77 training.label_smoothing 0.09320599986511968 +744 78 model.embedding_dim 0.0 +744 78 model.relation_dim 2.0 +744 78 training.batch_size 1.0 +744 78 training.label_smoothing 0.18196127140086238 +744 79 model.embedding_dim 2.0 +744 79 model.relation_dim 0.0 +744 79 training.batch_size 0.0 +744 79 training.label_smoothing 0.0036121378225179163 +744 80 model.embedding_dim 2.0 +744 80 model.relation_dim 0.0 +744 80 training.batch_size 2.0 +744 80 training.label_smoothing 0.009757105532281903 +744 81 model.embedding_dim 2.0 +744 81 model.relation_dim 2.0 +744 81 training.batch_size 2.0 +744 81 training.label_smoothing 0.002996044561200947 +744 82 model.embedding_dim 2.0 +744 82 model.relation_dim 0.0 +744 82 training.batch_size 1.0 +744 82 training.label_smoothing 0.3414697046455172 +744 83 model.embedding_dim 0.0 +744 83 model.relation_dim 0.0 +744 83 training.batch_size 2.0 +744 83 training.label_smoothing 0.2093480638846888 +744 84 model.embedding_dim 1.0 +744 84 model.relation_dim 2.0 +744 84 training.batch_size 0.0 +744 84 training.label_smoothing 0.0037339798514823314 +744 85 model.embedding_dim 2.0 +744 85 model.relation_dim 1.0 +744 85 training.batch_size 2.0 +744 85 training.label_smoothing 0.0025771723548811266 +744 86 model.embedding_dim 2.0 +744 86 model.relation_dim 2.0 +744 86 training.batch_size 2.0 +744 86 training.label_smoothing 0.003633923307828607 +744 87 model.embedding_dim 0.0 +744 87 model.relation_dim 1.0 +744 87 training.batch_size 0.0 +744 87 training.label_smoothing 0.035940867040898 +744 88 model.embedding_dim 1.0 +744 88 model.relation_dim 1.0 +744 88 training.batch_size 2.0 +744 88 training.label_smoothing 0.0025286220800820026 +744 89 model.embedding_dim 1.0 +744 89 model.relation_dim 0.0 +744 89 training.batch_size 2.0 +744 89 training.label_smoothing 0.006646654787287206 +744 90 model.embedding_dim 2.0 +744 90 model.relation_dim 2.0 +744 90 training.batch_size 2.0 +744 90 training.label_smoothing 0.5043516576341898 +744 91 model.embedding_dim 2.0 +744 91 model.relation_dim 2.0 +744 91 training.batch_size 2.0 +744 91 training.label_smoothing 0.041448575031945496 +744 92 model.embedding_dim 0.0 +744 92 model.relation_dim 0.0 +744 92 training.batch_size 2.0 +744 92 training.label_smoothing 0.05167720190516423 +744 93 model.embedding_dim 0.0 +744 93 model.relation_dim 0.0 +744 93 training.batch_size 2.0 +744 93 training.label_smoothing 0.013185076959709137 +744 94 model.embedding_dim 0.0 +744 94 model.relation_dim 2.0 +744 94 training.batch_size 0.0 +744 94 training.label_smoothing 0.0017287068951924497 +744 95 model.embedding_dim 2.0 +744 95 model.relation_dim 2.0 +744 95 training.batch_size 2.0 +744 95 training.label_smoothing 0.034717886310390855 +744 96 model.embedding_dim 1.0 +744 96 model.relation_dim 0.0 +744 96 training.batch_size 2.0 +744 96 training.label_smoothing 0.3801850778030011 +744 97 model.embedding_dim 0.0 +744 97 model.relation_dim 0.0 +744 97 training.batch_size 1.0 +744 97 training.label_smoothing 0.03719594015008681 +744 98 model.embedding_dim 1.0 +744 98 model.relation_dim 0.0 +744 98 training.batch_size 2.0 +744 98 training.label_smoothing 0.0020535713522442556 +744 99 model.embedding_dim 2.0 +744 99 model.relation_dim 2.0 +744 99 training.batch_size 2.0 +744 99 training.label_smoothing 0.06173606997313385 +744 100 model.embedding_dim 0.0 +744 100 model.relation_dim 2.0 +744 100 training.batch_size 1.0 +744 100 training.label_smoothing 0.6520538863423585 +744 1 dataset """kinships""" +744 1 model """transd""" +744 1 loss """softplus""" +744 1 regularizer """no""" +744 1 optimizer """adadelta""" +744 1 training_loop """lcwa""" +744 1 evaluator """rankbased""" +744 2 dataset """kinships""" +744 2 model """transd""" +744 2 loss """softplus""" +744 2 regularizer """no""" +744 2 optimizer """adadelta""" +744 2 training_loop """lcwa""" +744 2 evaluator """rankbased""" +744 3 dataset """kinships""" +744 3 model """transd""" +744 3 loss """softplus""" +744 3 regularizer """no""" +744 3 optimizer """adadelta""" +744 3 training_loop """lcwa""" +744 3 evaluator """rankbased""" +744 4 dataset """kinships""" +744 4 model """transd""" +744 4 loss """softplus""" +744 4 regularizer """no""" +744 4 optimizer """adadelta""" +744 4 training_loop """lcwa""" +744 4 evaluator """rankbased""" +744 5 dataset """kinships""" +744 5 model """transd""" +744 5 loss """softplus""" +744 5 regularizer """no""" +744 5 optimizer """adadelta""" +744 5 training_loop """lcwa""" +744 5 evaluator """rankbased""" +744 6 dataset """kinships""" +744 6 model """transd""" +744 6 loss """softplus""" +744 6 regularizer """no""" +744 6 optimizer """adadelta""" +744 6 training_loop """lcwa""" +744 6 evaluator """rankbased""" +744 7 dataset """kinships""" +744 7 model """transd""" +744 7 loss """softplus""" +744 7 regularizer """no""" +744 7 optimizer """adadelta""" +744 7 training_loop """lcwa""" +744 7 evaluator """rankbased""" +744 8 dataset """kinships""" +744 8 model """transd""" +744 8 loss """softplus""" +744 8 regularizer """no""" +744 8 optimizer """adadelta""" +744 8 training_loop """lcwa""" +744 8 evaluator """rankbased""" +744 9 dataset """kinships""" +744 9 model """transd""" +744 9 loss """softplus""" +744 9 regularizer """no""" +744 9 optimizer """adadelta""" +744 9 training_loop """lcwa""" +744 9 evaluator """rankbased""" +744 10 dataset """kinships""" +744 10 model """transd""" +744 10 loss """softplus""" +744 10 regularizer """no""" +744 10 optimizer """adadelta""" +744 10 training_loop """lcwa""" +744 10 evaluator """rankbased""" +744 11 dataset """kinships""" +744 11 model """transd""" +744 11 loss """softplus""" +744 11 regularizer """no""" +744 11 optimizer """adadelta""" +744 11 training_loop """lcwa""" +744 11 evaluator """rankbased""" +744 12 dataset """kinships""" +744 12 model """transd""" +744 12 loss """softplus""" +744 12 regularizer """no""" +744 12 optimizer """adadelta""" +744 12 training_loop """lcwa""" +744 12 evaluator """rankbased""" +744 13 dataset """kinships""" +744 13 model """transd""" +744 13 loss """softplus""" +744 13 regularizer """no""" +744 13 optimizer """adadelta""" +744 13 training_loop """lcwa""" +744 13 evaluator """rankbased""" +744 14 dataset """kinships""" +744 14 model """transd""" +744 14 loss """softplus""" +744 14 regularizer """no""" +744 14 optimizer """adadelta""" +744 14 training_loop """lcwa""" +744 14 evaluator """rankbased""" +744 15 dataset """kinships""" +744 15 model """transd""" +744 15 loss """softplus""" +744 15 regularizer """no""" +744 15 optimizer """adadelta""" +744 15 training_loop """lcwa""" +744 15 evaluator """rankbased""" +744 16 dataset """kinships""" +744 16 model """transd""" +744 16 loss """softplus""" +744 16 regularizer """no""" +744 16 optimizer """adadelta""" +744 16 training_loop """lcwa""" +744 16 evaluator """rankbased""" +744 17 dataset """kinships""" +744 17 model """transd""" +744 17 loss """softplus""" +744 17 regularizer """no""" +744 17 optimizer """adadelta""" +744 17 training_loop """lcwa""" +744 17 evaluator """rankbased""" +744 18 dataset """kinships""" +744 18 model """transd""" +744 18 loss """softplus""" +744 18 regularizer """no""" +744 18 optimizer """adadelta""" +744 18 training_loop """lcwa""" +744 18 evaluator """rankbased""" +744 19 dataset """kinships""" +744 19 model """transd""" +744 19 loss """softplus""" +744 19 regularizer """no""" +744 19 optimizer """adadelta""" +744 19 training_loop """lcwa""" +744 19 evaluator """rankbased""" +744 20 dataset """kinships""" +744 20 model """transd""" +744 20 loss """softplus""" +744 20 regularizer """no""" +744 20 optimizer """adadelta""" +744 20 training_loop """lcwa""" +744 20 evaluator """rankbased""" +744 21 dataset """kinships""" +744 21 model """transd""" +744 21 loss """softplus""" +744 21 regularizer """no""" +744 21 optimizer """adadelta""" +744 21 training_loop """lcwa""" +744 21 evaluator """rankbased""" +744 22 dataset """kinships""" +744 22 model """transd""" +744 22 loss """softplus""" +744 22 regularizer """no""" +744 22 optimizer """adadelta""" +744 22 training_loop """lcwa""" +744 22 evaluator """rankbased""" +744 23 dataset """kinships""" +744 23 model """transd""" +744 23 loss """softplus""" +744 23 regularizer """no""" +744 23 optimizer """adadelta""" +744 23 training_loop """lcwa""" +744 23 evaluator """rankbased""" +744 24 dataset """kinships""" +744 24 model """transd""" +744 24 loss """softplus""" +744 24 regularizer """no""" +744 24 optimizer """adadelta""" +744 24 training_loop """lcwa""" +744 24 evaluator """rankbased""" +744 25 dataset """kinships""" +744 25 model """transd""" +744 25 loss """softplus""" +744 25 regularizer """no""" +744 25 optimizer """adadelta""" +744 25 training_loop """lcwa""" +744 25 evaluator """rankbased""" +744 26 dataset """kinships""" +744 26 model """transd""" +744 26 loss """softplus""" +744 26 regularizer """no""" +744 26 optimizer """adadelta""" +744 26 training_loop """lcwa""" +744 26 evaluator """rankbased""" +744 27 dataset """kinships""" +744 27 model """transd""" +744 27 loss """softplus""" +744 27 regularizer """no""" +744 27 optimizer """adadelta""" +744 27 training_loop """lcwa""" +744 27 evaluator """rankbased""" +744 28 dataset """kinships""" +744 28 model """transd""" +744 28 loss """softplus""" +744 28 regularizer """no""" +744 28 optimizer """adadelta""" +744 28 training_loop """lcwa""" +744 28 evaluator """rankbased""" +744 29 dataset """kinships""" +744 29 model """transd""" +744 29 loss """softplus""" +744 29 regularizer """no""" +744 29 optimizer """adadelta""" +744 29 training_loop """lcwa""" +744 29 evaluator """rankbased""" +744 30 dataset """kinships""" +744 30 model """transd""" +744 30 loss """softplus""" +744 30 regularizer """no""" +744 30 optimizer """adadelta""" +744 30 training_loop """lcwa""" +744 30 evaluator """rankbased""" +744 31 dataset """kinships""" +744 31 model """transd""" +744 31 loss """softplus""" +744 31 regularizer """no""" +744 31 optimizer """adadelta""" +744 31 training_loop """lcwa""" +744 31 evaluator """rankbased""" +744 32 dataset """kinships""" +744 32 model """transd""" +744 32 loss """softplus""" +744 32 regularizer """no""" +744 32 optimizer """adadelta""" +744 32 training_loop """lcwa""" +744 32 evaluator """rankbased""" +744 33 dataset """kinships""" +744 33 model """transd""" +744 33 loss """softplus""" +744 33 regularizer """no""" +744 33 optimizer """adadelta""" +744 33 training_loop """lcwa""" +744 33 evaluator """rankbased""" +744 34 dataset """kinships""" +744 34 model """transd""" +744 34 loss """softplus""" +744 34 regularizer """no""" +744 34 optimizer """adadelta""" +744 34 training_loop """lcwa""" +744 34 evaluator """rankbased""" +744 35 dataset """kinships""" +744 35 model """transd""" +744 35 loss """softplus""" +744 35 regularizer """no""" +744 35 optimizer """adadelta""" +744 35 training_loop """lcwa""" +744 35 evaluator """rankbased""" +744 36 dataset """kinships""" +744 36 model """transd""" +744 36 loss """softplus""" +744 36 regularizer """no""" +744 36 optimizer """adadelta""" +744 36 training_loop """lcwa""" +744 36 evaluator """rankbased""" +744 37 dataset """kinships""" +744 37 model """transd""" +744 37 loss """softplus""" +744 37 regularizer """no""" +744 37 optimizer """adadelta""" +744 37 training_loop """lcwa""" +744 37 evaluator """rankbased""" +744 38 dataset """kinships""" +744 38 model """transd""" +744 38 loss """softplus""" +744 38 regularizer """no""" +744 38 optimizer """adadelta""" +744 38 training_loop """lcwa""" +744 38 evaluator """rankbased""" +744 39 dataset """kinships""" +744 39 model """transd""" +744 39 loss """softplus""" +744 39 regularizer """no""" +744 39 optimizer """adadelta""" +744 39 training_loop """lcwa""" +744 39 evaluator """rankbased""" +744 40 dataset """kinships""" +744 40 model """transd""" +744 40 loss """softplus""" +744 40 regularizer """no""" +744 40 optimizer """adadelta""" +744 40 training_loop """lcwa""" +744 40 evaluator """rankbased""" +744 41 dataset """kinships""" +744 41 model """transd""" +744 41 loss """softplus""" +744 41 regularizer """no""" +744 41 optimizer """adadelta""" +744 41 training_loop """lcwa""" +744 41 evaluator """rankbased""" +744 42 dataset """kinships""" +744 42 model """transd""" +744 42 loss """softplus""" +744 42 regularizer """no""" +744 42 optimizer """adadelta""" +744 42 training_loop """lcwa""" +744 42 evaluator """rankbased""" +744 43 dataset """kinships""" +744 43 model """transd""" +744 43 loss """softplus""" +744 43 regularizer """no""" +744 43 optimizer """adadelta""" +744 43 training_loop """lcwa""" +744 43 evaluator """rankbased""" +744 44 dataset """kinships""" +744 44 model """transd""" +744 44 loss """softplus""" +744 44 regularizer """no""" +744 44 optimizer """adadelta""" +744 44 training_loop """lcwa""" +744 44 evaluator """rankbased""" +744 45 dataset """kinships""" +744 45 model """transd""" +744 45 loss """softplus""" +744 45 regularizer """no""" +744 45 optimizer """adadelta""" +744 45 training_loop """lcwa""" +744 45 evaluator """rankbased""" +744 46 dataset """kinships""" +744 46 model """transd""" +744 46 loss """softplus""" +744 46 regularizer """no""" +744 46 optimizer """adadelta""" +744 46 training_loop """lcwa""" +744 46 evaluator """rankbased""" +744 47 dataset """kinships""" +744 47 model """transd""" +744 47 loss """softplus""" +744 47 regularizer """no""" +744 47 optimizer """adadelta""" +744 47 training_loop """lcwa""" +744 47 evaluator """rankbased""" +744 48 dataset """kinships""" +744 48 model """transd""" +744 48 loss """softplus""" +744 48 regularizer """no""" +744 48 optimizer """adadelta""" +744 48 training_loop """lcwa""" +744 48 evaluator """rankbased""" +744 49 dataset """kinships""" +744 49 model """transd""" +744 49 loss """softplus""" +744 49 regularizer """no""" +744 49 optimizer """adadelta""" +744 49 training_loop """lcwa""" +744 49 evaluator """rankbased""" +744 50 dataset """kinships""" +744 50 model """transd""" +744 50 loss """softplus""" +744 50 regularizer """no""" +744 50 optimizer """adadelta""" +744 50 training_loop """lcwa""" +744 50 evaluator """rankbased""" +744 51 dataset """kinships""" +744 51 model """transd""" +744 51 loss """softplus""" +744 51 regularizer """no""" +744 51 optimizer """adadelta""" +744 51 training_loop """lcwa""" +744 51 evaluator """rankbased""" +744 52 dataset """kinships""" +744 52 model """transd""" +744 52 loss """softplus""" +744 52 regularizer """no""" +744 52 optimizer """adadelta""" +744 52 training_loop """lcwa""" +744 52 evaluator """rankbased""" +744 53 dataset """kinships""" +744 53 model """transd""" +744 53 loss """softplus""" +744 53 regularizer """no""" +744 53 optimizer """adadelta""" +744 53 training_loop """lcwa""" +744 53 evaluator """rankbased""" +744 54 dataset """kinships""" +744 54 model """transd""" +744 54 loss """softplus""" +744 54 regularizer """no""" +744 54 optimizer """adadelta""" +744 54 training_loop """lcwa""" +744 54 evaluator """rankbased""" +744 55 dataset """kinships""" +744 55 model """transd""" +744 55 loss """softplus""" +744 55 regularizer """no""" +744 55 optimizer """adadelta""" +744 55 training_loop """lcwa""" +744 55 evaluator """rankbased""" +744 56 dataset """kinships""" +744 56 model """transd""" +744 56 loss """softplus""" +744 56 regularizer """no""" +744 56 optimizer """adadelta""" +744 56 training_loop """lcwa""" +744 56 evaluator """rankbased""" +744 57 dataset """kinships""" +744 57 model """transd""" +744 57 loss """softplus""" +744 57 regularizer """no""" +744 57 optimizer """adadelta""" +744 57 training_loop """lcwa""" +744 57 evaluator """rankbased""" +744 58 dataset """kinships""" +744 58 model """transd""" +744 58 loss """softplus""" +744 58 regularizer """no""" +744 58 optimizer """adadelta""" +744 58 training_loop """lcwa""" +744 58 evaluator """rankbased""" +744 59 dataset """kinships""" +744 59 model """transd""" +744 59 loss """softplus""" +744 59 regularizer """no""" +744 59 optimizer """adadelta""" +744 59 training_loop """lcwa""" +744 59 evaluator """rankbased""" +744 60 dataset """kinships""" +744 60 model """transd""" +744 60 loss """softplus""" +744 60 regularizer """no""" +744 60 optimizer """adadelta""" +744 60 training_loop """lcwa""" +744 60 evaluator """rankbased""" +744 61 dataset """kinships""" +744 61 model """transd""" +744 61 loss """softplus""" +744 61 regularizer """no""" +744 61 optimizer """adadelta""" +744 61 training_loop """lcwa""" +744 61 evaluator """rankbased""" +744 62 dataset """kinships""" +744 62 model """transd""" +744 62 loss """softplus""" +744 62 regularizer """no""" +744 62 optimizer """adadelta""" +744 62 training_loop """lcwa""" +744 62 evaluator """rankbased""" +744 63 dataset """kinships""" +744 63 model """transd""" +744 63 loss """softplus""" +744 63 regularizer """no""" +744 63 optimizer """adadelta""" +744 63 training_loop """lcwa""" +744 63 evaluator """rankbased""" +744 64 dataset """kinships""" +744 64 model """transd""" +744 64 loss """softplus""" +744 64 regularizer """no""" +744 64 optimizer """adadelta""" +744 64 training_loop """lcwa""" +744 64 evaluator """rankbased""" +744 65 dataset """kinships""" +744 65 model """transd""" +744 65 loss """softplus""" +744 65 regularizer """no""" +744 65 optimizer """adadelta""" +744 65 training_loop """lcwa""" +744 65 evaluator """rankbased""" +744 66 dataset """kinships""" +744 66 model """transd""" +744 66 loss """softplus""" +744 66 regularizer """no""" +744 66 optimizer """adadelta""" +744 66 training_loop """lcwa""" +744 66 evaluator """rankbased""" +744 67 dataset """kinships""" +744 67 model """transd""" +744 67 loss """softplus""" +744 67 regularizer """no""" +744 67 optimizer """adadelta""" +744 67 training_loop """lcwa""" +744 67 evaluator """rankbased""" +744 68 dataset """kinships""" +744 68 model """transd""" +744 68 loss """softplus""" +744 68 regularizer """no""" +744 68 optimizer """adadelta""" +744 68 training_loop """lcwa""" +744 68 evaluator """rankbased""" +744 69 dataset """kinships""" +744 69 model """transd""" +744 69 loss """softplus""" +744 69 regularizer """no""" +744 69 optimizer """adadelta""" +744 69 training_loop """lcwa""" +744 69 evaluator """rankbased""" +744 70 dataset """kinships""" +744 70 model """transd""" +744 70 loss """softplus""" +744 70 regularizer """no""" +744 70 optimizer """adadelta""" +744 70 training_loop """lcwa""" +744 70 evaluator """rankbased""" +744 71 dataset """kinships""" +744 71 model """transd""" +744 71 loss """softplus""" +744 71 regularizer """no""" +744 71 optimizer """adadelta""" +744 71 training_loop """lcwa""" +744 71 evaluator """rankbased""" +744 72 dataset """kinships""" +744 72 model """transd""" +744 72 loss """softplus""" +744 72 regularizer """no""" +744 72 optimizer """adadelta""" +744 72 training_loop """lcwa""" +744 72 evaluator """rankbased""" +744 73 dataset """kinships""" +744 73 model """transd""" +744 73 loss """softplus""" +744 73 regularizer """no""" +744 73 optimizer """adadelta""" +744 73 training_loop """lcwa""" +744 73 evaluator """rankbased""" +744 74 dataset """kinships""" +744 74 model """transd""" +744 74 loss """softplus""" +744 74 regularizer """no""" +744 74 optimizer """adadelta""" +744 74 training_loop """lcwa""" +744 74 evaluator """rankbased""" +744 75 dataset """kinships""" +744 75 model """transd""" +744 75 loss """softplus""" +744 75 regularizer """no""" +744 75 optimizer """adadelta""" +744 75 training_loop """lcwa""" +744 75 evaluator """rankbased""" +744 76 dataset """kinships""" +744 76 model """transd""" +744 76 loss """softplus""" +744 76 regularizer """no""" +744 76 optimizer """adadelta""" +744 76 training_loop """lcwa""" +744 76 evaluator """rankbased""" +744 77 dataset """kinships""" +744 77 model """transd""" +744 77 loss """softplus""" +744 77 regularizer """no""" +744 77 optimizer """adadelta""" +744 77 training_loop """lcwa""" +744 77 evaluator """rankbased""" +744 78 dataset """kinships""" +744 78 model """transd""" +744 78 loss """softplus""" +744 78 regularizer """no""" +744 78 optimizer """adadelta""" +744 78 training_loop """lcwa""" +744 78 evaluator """rankbased""" +744 79 dataset """kinships""" +744 79 model """transd""" +744 79 loss """softplus""" +744 79 regularizer """no""" +744 79 optimizer """adadelta""" +744 79 training_loop """lcwa""" +744 79 evaluator """rankbased""" +744 80 dataset """kinships""" +744 80 model """transd""" +744 80 loss """softplus""" +744 80 regularizer """no""" +744 80 optimizer """adadelta""" +744 80 training_loop """lcwa""" +744 80 evaluator """rankbased""" +744 81 dataset """kinships""" +744 81 model """transd""" +744 81 loss """softplus""" +744 81 regularizer """no""" +744 81 optimizer """adadelta""" +744 81 training_loop """lcwa""" +744 81 evaluator """rankbased""" +744 82 dataset """kinships""" +744 82 model """transd""" +744 82 loss """softplus""" +744 82 regularizer """no""" +744 82 optimizer """adadelta""" +744 82 training_loop """lcwa""" +744 82 evaluator """rankbased""" +744 83 dataset """kinships""" +744 83 model """transd""" +744 83 loss """softplus""" +744 83 regularizer """no""" +744 83 optimizer """adadelta""" +744 83 training_loop """lcwa""" +744 83 evaluator """rankbased""" +744 84 dataset """kinships""" +744 84 model """transd""" +744 84 loss """softplus""" +744 84 regularizer """no""" +744 84 optimizer """adadelta""" +744 84 training_loop """lcwa""" +744 84 evaluator """rankbased""" +744 85 dataset """kinships""" +744 85 model """transd""" +744 85 loss """softplus""" +744 85 regularizer """no""" +744 85 optimizer """adadelta""" +744 85 training_loop """lcwa""" +744 85 evaluator """rankbased""" +744 86 dataset """kinships""" +744 86 model """transd""" +744 86 loss """softplus""" +744 86 regularizer """no""" +744 86 optimizer """adadelta""" +744 86 training_loop """lcwa""" +744 86 evaluator """rankbased""" +744 87 dataset """kinships""" +744 87 model """transd""" +744 87 loss """softplus""" +744 87 regularizer """no""" +744 87 optimizer """adadelta""" +744 87 training_loop """lcwa""" +744 87 evaluator """rankbased""" +744 88 dataset """kinships""" +744 88 model """transd""" +744 88 loss """softplus""" +744 88 regularizer """no""" +744 88 optimizer """adadelta""" +744 88 training_loop """lcwa""" +744 88 evaluator """rankbased""" +744 89 dataset """kinships""" +744 89 model """transd""" +744 89 loss """softplus""" +744 89 regularizer """no""" +744 89 optimizer """adadelta""" +744 89 training_loop """lcwa""" +744 89 evaluator """rankbased""" +744 90 dataset """kinships""" +744 90 model """transd""" +744 90 loss """softplus""" +744 90 regularizer """no""" +744 90 optimizer """adadelta""" +744 90 training_loop """lcwa""" +744 90 evaluator """rankbased""" +744 91 dataset """kinships""" +744 91 model """transd""" +744 91 loss """softplus""" +744 91 regularizer """no""" +744 91 optimizer """adadelta""" +744 91 training_loop """lcwa""" +744 91 evaluator """rankbased""" +744 92 dataset """kinships""" +744 92 model """transd""" +744 92 loss """softplus""" +744 92 regularizer """no""" +744 92 optimizer """adadelta""" +744 92 training_loop """lcwa""" +744 92 evaluator """rankbased""" +744 93 dataset """kinships""" +744 93 model """transd""" +744 93 loss """softplus""" +744 93 regularizer """no""" +744 93 optimizer """adadelta""" +744 93 training_loop """lcwa""" +744 93 evaluator """rankbased""" +744 94 dataset """kinships""" +744 94 model """transd""" +744 94 loss """softplus""" +744 94 regularizer """no""" +744 94 optimizer """adadelta""" +744 94 training_loop """lcwa""" +744 94 evaluator """rankbased""" +744 95 dataset """kinships""" +744 95 model """transd""" +744 95 loss """softplus""" +744 95 regularizer """no""" +744 95 optimizer """adadelta""" +744 95 training_loop """lcwa""" +744 95 evaluator """rankbased""" +744 96 dataset """kinships""" +744 96 model """transd""" +744 96 loss """softplus""" +744 96 regularizer """no""" +744 96 optimizer """adadelta""" +744 96 training_loop """lcwa""" +744 96 evaluator """rankbased""" +744 97 dataset """kinships""" +744 97 model """transd""" +744 97 loss """softplus""" +744 97 regularizer """no""" +744 97 optimizer """adadelta""" +744 97 training_loop """lcwa""" +744 97 evaluator """rankbased""" +744 98 dataset """kinships""" +744 98 model """transd""" +744 98 loss """softplus""" +744 98 regularizer """no""" +744 98 optimizer """adadelta""" +744 98 training_loop """lcwa""" +744 98 evaluator """rankbased""" +744 99 dataset """kinships""" +744 99 model """transd""" +744 99 loss """softplus""" +744 99 regularizer """no""" +744 99 optimizer """adadelta""" +744 99 training_loop """lcwa""" +744 99 evaluator """rankbased""" +744 100 dataset """kinships""" +744 100 model """transd""" +744 100 loss """softplus""" +744 100 regularizer """no""" +744 100 optimizer """adadelta""" +744 100 training_loop """lcwa""" +744 100 evaluator """rankbased""" +745 1 model.embedding_dim 0.0 +745 1 model.relation_dim 0.0 +745 1 training.batch_size 0.0 +745 1 training.label_smoothing 0.0023813894760564692 +745 2 model.embedding_dim 2.0 +745 2 model.relation_dim 1.0 +745 2 training.batch_size 0.0 +745 2 training.label_smoothing 0.005042258463856074 +745 3 model.embedding_dim 1.0 +745 3 model.relation_dim 2.0 +745 3 training.batch_size 0.0 +745 3 training.label_smoothing 0.5902973752202284 +745 4 model.embedding_dim 1.0 +745 4 model.relation_dim 1.0 +745 4 training.batch_size 0.0 +745 4 training.label_smoothing 0.09349725384803483 +745 5 model.embedding_dim 2.0 +745 5 model.relation_dim 0.0 +745 5 training.batch_size 0.0 +745 5 training.label_smoothing 0.18099949609065064 +745 6 model.embedding_dim 0.0 +745 6 model.relation_dim 0.0 +745 6 training.batch_size 0.0 +745 6 training.label_smoothing 0.0034700297671629824 +745 7 model.embedding_dim 2.0 +745 7 model.relation_dim 0.0 +745 7 training.batch_size 1.0 +745 7 training.label_smoothing 0.002805755837941893 +745 8 model.embedding_dim 2.0 +745 8 model.relation_dim 1.0 +745 8 training.batch_size 0.0 +745 8 training.label_smoothing 0.008176682186313802 +745 9 model.embedding_dim 0.0 +745 9 model.relation_dim 0.0 +745 9 training.batch_size 0.0 +745 9 training.label_smoothing 0.010909212440637656 +745 10 model.embedding_dim 2.0 +745 10 model.relation_dim 1.0 +745 10 training.batch_size 0.0 +745 10 training.label_smoothing 0.2573513424159904 +745 11 model.embedding_dim 1.0 +745 11 model.relation_dim 1.0 +745 11 training.batch_size 2.0 +745 11 training.label_smoothing 0.10179457796600641 +745 12 model.embedding_dim 2.0 +745 12 model.relation_dim 2.0 +745 12 training.batch_size 1.0 +745 12 training.label_smoothing 0.0064282583999800535 +745 13 model.embedding_dim 0.0 +745 13 model.relation_dim 0.0 +745 13 training.batch_size 0.0 +745 13 training.label_smoothing 0.13229097923047756 +745 14 model.embedding_dim 2.0 +745 14 model.relation_dim 1.0 +745 14 training.batch_size 2.0 +745 14 training.label_smoothing 0.009347210686604015 +745 15 model.embedding_dim 2.0 +745 15 model.relation_dim 2.0 +745 15 training.batch_size 1.0 +745 15 training.label_smoothing 0.0011282123218945338 +745 16 model.embedding_dim 2.0 +745 16 model.relation_dim 0.0 +745 16 training.batch_size 1.0 +745 16 training.label_smoothing 0.008171709250744598 +745 17 model.embedding_dim 2.0 +745 17 model.relation_dim 1.0 +745 17 training.batch_size 1.0 +745 17 training.label_smoothing 0.001073258726705112 +745 18 model.embedding_dim 2.0 +745 18 model.relation_dim 1.0 +745 18 training.batch_size 0.0 +745 18 training.label_smoothing 0.002323551785243227 +745 19 model.embedding_dim 1.0 +745 19 model.relation_dim 0.0 +745 19 training.batch_size 1.0 +745 19 training.label_smoothing 0.0036978819341380604 +745 20 model.embedding_dim 1.0 +745 20 model.relation_dim 2.0 +745 20 training.batch_size 2.0 +745 20 training.label_smoothing 0.12709451908398367 +745 21 model.embedding_dim 1.0 +745 21 model.relation_dim 0.0 +745 21 training.batch_size 2.0 +745 21 training.label_smoothing 0.020909641056547305 +745 22 model.embedding_dim 0.0 +745 22 model.relation_dim 2.0 +745 22 training.batch_size 1.0 +745 22 training.label_smoothing 0.009901710955815097 +745 23 model.embedding_dim 2.0 +745 23 model.relation_dim 2.0 +745 23 training.batch_size 1.0 +745 23 training.label_smoothing 0.00561511195605538 +745 24 model.embedding_dim 1.0 +745 24 model.relation_dim 2.0 +745 24 training.batch_size 0.0 +745 24 training.label_smoothing 0.005911696360413741 +745 25 model.embedding_dim 2.0 +745 25 model.relation_dim 0.0 +745 25 training.batch_size 2.0 +745 25 training.label_smoothing 0.005809060009461572 +745 26 model.embedding_dim 2.0 +745 26 model.relation_dim 2.0 +745 26 training.batch_size 1.0 +745 26 training.label_smoothing 0.0029138562351025727 +745 27 model.embedding_dim 2.0 +745 27 model.relation_dim 2.0 +745 27 training.batch_size 2.0 +745 27 training.label_smoothing 0.0014635235448860792 +745 28 model.embedding_dim 2.0 +745 28 model.relation_dim 2.0 +745 28 training.batch_size 0.0 +745 28 training.label_smoothing 0.0034215059978954885 +745 29 model.embedding_dim 0.0 +745 29 model.relation_dim 1.0 +745 29 training.batch_size 0.0 +745 29 training.label_smoothing 0.001014501746365453 +745 30 model.embedding_dim 1.0 +745 30 model.relation_dim 2.0 +745 30 training.batch_size 1.0 +745 30 training.label_smoothing 0.2252828698420794 +745 31 model.embedding_dim 2.0 +745 31 model.relation_dim 2.0 +745 31 training.batch_size 0.0 +745 31 training.label_smoothing 0.20438085563268787 +745 32 model.embedding_dim 1.0 +745 32 model.relation_dim 0.0 +745 32 training.batch_size 0.0 +745 32 training.label_smoothing 0.07027713694030802 +745 33 model.embedding_dim 1.0 +745 33 model.relation_dim 1.0 +745 33 training.batch_size 1.0 +745 33 training.label_smoothing 0.09122895331072567 +745 34 model.embedding_dim 2.0 +745 34 model.relation_dim 0.0 +745 34 training.batch_size 1.0 +745 34 training.label_smoothing 0.02852626443957636 +745 35 model.embedding_dim 0.0 +745 35 model.relation_dim 2.0 +745 35 training.batch_size 2.0 +745 35 training.label_smoothing 0.005771088428005842 +745 36 model.embedding_dim 1.0 +745 36 model.relation_dim 2.0 +745 36 training.batch_size 1.0 +745 36 training.label_smoothing 0.6590059726463088 +745 37 model.embedding_dim 2.0 +745 37 model.relation_dim 0.0 +745 37 training.batch_size 0.0 +745 37 training.label_smoothing 0.00623956562895058 +745 38 model.embedding_dim 2.0 +745 38 model.relation_dim 2.0 +745 38 training.batch_size 0.0 +745 38 training.label_smoothing 0.46978288671738416 +745 39 model.embedding_dim 0.0 +745 39 model.relation_dim 0.0 +745 39 training.batch_size 0.0 +745 39 training.label_smoothing 0.007054104548072532 +745 40 model.embedding_dim 2.0 +745 40 model.relation_dim 0.0 +745 40 training.batch_size 1.0 +745 40 training.label_smoothing 0.5223564743972439 +745 41 model.embedding_dim 0.0 +745 41 model.relation_dim 2.0 +745 41 training.batch_size 0.0 +745 41 training.label_smoothing 0.07108931701574554 +745 42 model.embedding_dim 1.0 +745 42 model.relation_dim 1.0 +745 42 training.batch_size 0.0 +745 42 training.label_smoothing 0.27683063558570575 +745 43 model.embedding_dim 1.0 +745 43 model.relation_dim 2.0 +745 43 training.batch_size 1.0 +745 43 training.label_smoothing 0.8948306230692973 +745 44 model.embedding_dim 0.0 +745 44 model.relation_dim 2.0 +745 44 training.batch_size 1.0 +745 44 training.label_smoothing 0.008049337247923559 +745 45 model.embedding_dim 0.0 +745 45 model.relation_dim 2.0 +745 45 training.batch_size 2.0 +745 45 training.label_smoothing 0.7996655936345018 +745 46 model.embedding_dim 2.0 +745 46 model.relation_dim 1.0 +745 46 training.batch_size 1.0 +745 46 training.label_smoothing 0.028656789105735606 +745 47 model.embedding_dim 2.0 +745 47 model.relation_dim 1.0 +745 47 training.batch_size 2.0 +745 47 training.label_smoothing 0.15914922456533076 +745 48 model.embedding_dim 0.0 +745 48 model.relation_dim 2.0 +745 48 training.batch_size 1.0 +745 48 training.label_smoothing 0.09597854938186391 +745 49 model.embedding_dim 1.0 +745 49 model.relation_dim 2.0 +745 49 training.batch_size 0.0 +745 49 training.label_smoothing 0.0037087910989873824 +745 50 model.embedding_dim 2.0 +745 50 model.relation_dim 2.0 +745 50 training.batch_size 0.0 +745 50 training.label_smoothing 0.06824355794244968 +745 51 model.embedding_dim 2.0 +745 51 model.relation_dim 0.0 +745 51 training.batch_size 2.0 +745 51 training.label_smoothing 0.0381033377295934 +745 52 model.embedding_dim 1.0 +745 52 model.relation_dim 2.0 +745 52 training.batch_size 1.0 +745 52 training.label_smoothing 0.03002836198700225 +745 53 model.embedding_dim 0.0 +745 53 model.relation_dim 0.0 +745 53 training.batch_size 1.0 +745 53 training.label_smoothing 0.003732530393414855 +745 54 model.embedding_dim 0.0 +745 54 model.relation_dim 1.0 +745 54 training.batch_size 0.0 +745 54 training.label_smoothing 0.04121109440900869 +745 55 model.embedding_dim 1.0 +745 55 model.relation_dim 0.0 +745 55 training.batch_size 1.0 +745 55 training.label_smoothing 0.008540797448532596 +745 56 model.embedding_dim 2.0 +745 56 model.relation_dim 0.0 +745 56 training.batch_size 0.0 +745 56 training.label_smoothing 0.31560376625796704 +745 57 model.embedding_dim 1.0 +745 57 model.relation_dim 2.0 +745 57 training.batch_size 2.0 +745 57 training.label_smoothing 0.011909744763421702 +745 58 model.embedding_dim 2.0 +745 58 model.relation_dim 2.0 +745 58 training.batch_size 0.0 +745 58 training.label_smoothing 0.10185726512358527 +745 59 model.embedding_dim 1.0 +745 59 model.relation_dim 2.0 +745 59 training.batch_size 0.0 +745 59 training.label_smoothing 0.0052369244835161095 +745 60 model.embedding_dim 0.0 +745 60 model.relation_dim 0.0 +745 60 training.batch_size 1.0 +745 60 training.label_smoothing 0.04803119205145536 +745 61 model.embedding_dim 2.0 +745 61 model.relation_dim 2.0 +745 61 training.batch_size 0.0 +745 61 training.label_smoothing 0.0064660623645291185 +745 62 model.embedding_dim 2.0 +745 62 model.relation_dim 0.0 +745 62 training.batch_size 0.0 +745 62 training.label_smoothing 0.06944698275211579 +745 63 model.embedding_dim 1.0 +745 63 model.relation_dim 2.0 +745 63 training.batch_size 2.0 +745 63 training.label_smoothing 0.01794064354324568 +745 64 model.embedding_dim 0.0 +745 64 model.relation_dim 1.0 +745 64 training.batch_size 1.0 +745 64 training.label_smoothing 0.034932698916115563 +745 65 model.embedding_dim 2.0 +745 65 model.relation_dim 2.0 +745 65 training.batch_size 0.0 +745 65 training.label_smoothing 0.0980020978946827 +745 66 model.embedding_dim 0.0 +745 66 model.relation_dim 2.0 +745 66 training.batch_size 0.0 +745 66 training.label_smoothing 0.006765347116181111 +745 67 model.embedding_dim 1.0 +745 67 model.relation_dim 0.0 +745 67 training.batch_size 2.0 +745 67 training.label_smoothing 0.009564916481377488 +745 68 model.embedding_dim 2.0 +745 68 model.relation_dim 0.0 +745 68 training.batch_size 1.0 +745 68 training.label_smoothing 0.15806371799665683 +745 69 model.embedding_dim 2.0 +745 69 model.relation_dim 0.0 +745 69 training.batch_size 1.0 +745 69 training.label_smoothing 0.0014651313437527736 +745 70 model.embedding_dim 0.0 +745 70 model.relation_dim 0.0 +745 70 training.batch_size 1.0 +745 70 training.label_smoothing 0.008560784298149084 +745 71 model.embedding_dim 2.0 +745 71 model.relation_dim 1.0 +745 71 training.batch_size 1.0 +745 71 training.label_smoothing 0.06550440070067057 +745 72 model.embedding_dim 0.0 +745 72 model.relation_dim 1.0 +745 72 training.batch_size 1.0 +745 72 training.label_smoothing 0.057650397768887765 +745 73 model.embedding_dim 0.0 +745 73 model.relation_dim 2.0 +745 73 training.batch_size 1.0 +745 73 training.label_smoothing 0.46130644112102825 +745 74 model.embedding_dim 2.0 +745 74 model.relation_dim 1.0 +745 74 training.batch_size 1.0 +745 74 training.label_smoothing 0.02049824841124284 +745 75 model.embedding_dim 0.0 +745 75 model.relation_dim 1.0 +745 75 training.batch_size 2.0 +745 75 training.label_smoothing 0.0071563498730531985 +745 76 model.embedding_dim 1.0 +745 76 model.relation_dim 2.0 +745 76 training.batch_size 2.0 +745 76 training.label_smoothing 0.0016545511026372602 +745 77 model.embedding_dim 0.0 +745 77 model.relation_dim 1.0 +745 77 training.batch_size 0.0 +745 77 training.label_smoothing 0.0021924175703012597 +745 78 model.embedding_dim 2.0 +745 78 model.relation_dim 0.0 +745 78 training.batch_size 1.0 +745 78 training.label_smoothing 0.24664449832444454 +745 79 model.embedding_dim 0.0 +745 79 model.relation_dim 1.0 +745 79 training.batch_size 2.0 +745 79 training.label_smoothing 0.26087023906523044 +745 80 model.embedding_dim 2.0 +745 80 model.relation_dim 0.0 +745 80 training.batch_size 0.0 +745 80 training.label_smoothing 0.23289271425154415 +745 81 model.embedding_dim 2.0 +745 81 model.relation_dim 2.0 +745 81 training.batch_size 1.0 +745 81 training.label_smoothing 0.003620403859292184 +745 82 model.embedding_dim 1.0 +745 82 model.relation_dim 1.0 +745 82 training.batch_size 2.0 +745 82 training.label_smoothing 0.1999581950574364 +745 83 model.embedding_dim 0.0 +745 83 model.relation_dim 1.0 +745 83 training.batch_size 1.0 +745 83 training.label_smoothing 0.0014880417337978119 +745 84 model.embedding_dim 1.0 +745 84 model.relation_dim 0.0 +745 84 training.batch_size 2.0 +745 84 training.label_smoothing 0.1954983303714687 +745 85 model.embedding_dim 2.0 +745 85 model.relation_dim 2.0 +745 85 training.batch_size 1.0 +745 85 training.label_smoothing 0.04754514621349559 +745 86 model.embedding_dim 2.0 +745 86 model.relation_dim 0.0 +745 86 training.batch_size 2.0 +745 86 training.label_smoothing 0.08212786281828549 +745 87 model.embedding_dim 1.0 +745 87 model.relation_dim 2.0 +745 87 training.batch_size 0.0 +745 87 training.label_smoothing 0.06971242835874181 +745 88 model.embedding_dim 1.0 +745 88 model.relation_dim 0.0 +745 88 training.batch_size 1.0 +745 88 training.label_smoothing 0.015534344624578679 +745 89 model.embedding_dim 2.0 +745 89 model.relation_dim 0.0 +745 89 training.batch_size 2.0 +745 89 training.label_smoothing 0.005195716752544603 +745 90 model.embedding_dim 2.0 +745 90 model.relation_dim 2.0 +745 90 training.batch_size 0.0 +745 90 training.label_smoothing 0.006719495651235344 +745 91 model.embedding_dim 2.0 +745 91 model.relation_dim 1.0 +745 91 training.batch_size 0.0 +745 91 training.label_smoothing 0.0400964129115665 +745 92 model.embedding_dim 2.0 +745 92 model.relation_dim 2.0 +745 92 training.batch_size 2.0 +745 92 training.label_smoothing 0.4874165225592182 +745 93 model.embedding_dim 2.0 +745 93 model.relation_dim 0.0 +745 93 training.batch_size 1.0 +745 93 training.label_smoothing 0.051751062013250165 +745 94 model.embedding_dim 1.0 +745 94 model.relation_dim 2.0 +745 94 training.batch_size 0.0 +745 94 training.label_smoothing 0.05116897742179426 +745 95 model.embedding_dim 0.0 +745 95 model.relation_dim 0.0 +745 95 training.batch_size 1.0 +745 95 training.label_smoothing 0.0015687938432693504 +745 96 model.embedding_dim 0.0 +745 96 model.relation_dim 0.0 +745 96 training.batch_size 2.0 +745 96 training.label_smoothing 0.30694998106721916 +745 97 model.embedding_dim 1.0 +745 97 model.relation_dim 0.0 +745 97 training.batch_size 1.0 +745 97 training.label_smoothing 0.05857253046229077 +745 98 model.embedding_dim 2.0 +745 98 model.relation_dim 2.0 +745 98 training.batch_size 1.0 +745 98 training.label_smoothing 0.17229903912022304 +745 99 model.embedding_dim 0.0 +745 99 model.relation_dim 2.0 +745 99 training.batch_size 0.0 +745 99 training.label_smoothing 0.001832487807763694 +745 100 model.embedding_dim 0.0 +745 100 model.relation_dim 0.0 +745 100 training.batch_size 0.0 +745 100 training.label_smoothing 0.0013294340931630285 +745 1 dataset """kinships""" +745 1 model """transd""" +745 1 loss """crossentropy""" +745 1 regularizer """no""" +745 1 optimizer """adadelta""" +745 1 training_loop """lcwa""" +745 1 evaluator """rankbased""" +745 2 dataset """kinships""" +745 2 model """transd""" +745 2 loss """crossentropy""" +745 2 regularizer """no""" +745 2 optimizer """adadelta""" +745 2 training_loop """lcwa""" +745 2 evaluator """rankbased""" +745 3 dataset """kinships""" +745 3 model """transd""" +745 3 loss """crossentropy""" +745 3 regularizer """no""" +745 3 optimizer """adadelta""" +745 3 training_loop """lcwa""" +745 3 evaluator """rankbased""" +745 4 dataset """kinships""" +745 4 model """transd""" +745 4 loss """crossentropy""" +745 4 regularizer """no""" +745 4 optimizer """adadelta""" +745 4 training_loop """lcwa""" +745 4 evaluator """rankbased""" +745 5 dataset """kinships""" +745 5 model """transd""" +745 5 loss """crossentropy""" +745 5 regularizer """no""" +745 5 optimizer """adadelta""" +745 5 training_loop """lcwa""" +745 5 evaluator """rankbased""" +745 6 dataset """kinships""" +745 6 model """transd""" +745 6 loss """crossentropy""" +745 6 regularizer """no""" +745 6 optimizer """adadelta""" +745 6 training_loop """lcwa""" +745 6 evaluator """rankbased""" +745 7 dataset """kinships""" +745 7 model """transd""" +745 7 loss """crossentropy""" +745 7 regularizer """no""" +745 7 optimizer """adadelta""" +745 7 training_loop """lcwa""" +745 7 evaluator """rankbased""" +745 8 dataset """kinships""" +745 8 model """transd""" +745 8 loss """crossentropy""" +745 8 regularizer """no""" +745 8 optimizer """adadelta""" +745 8 training_loop """lcwa""" +745 8 evaluator """rankbased""" +745 9 dataset """kinships""" +745 9 model """transd""" +745 9 loss """crossentropy""" +745 9 regularizer """no""" +745 9 optimizer """adadelta""" +745 9 training_loop """lcwa""" +745 9 evaluator """rankbased""" +745 10 dataset """kinships""" +745 10 model """transd""" +745 10 loss """crossentropy""" +745 10 regularizer """no""" +745 10 optimizer """adadelta""" +745 10 training_loop """lcwa""" +745 10 evaluator """rankbased""" +745 11 dataset """kinships""" +745 11 model """transd""" +745 11 loss """crossentropy""" +745 11 regularizer """no""" +745 11 optimizer """adadelta""" +745 11 training_loop """lcwa""" +745 11 evaluator """rankbased""" +745 12 dataset """kinships""" +745 12 model """transd""" +745 12 loss """crossentropy""" +745 12 regularizer """no""" +745 12 optimizer """adadelta""" +745 12 training_loop """lcwa""" +745 12 evaluator """rankbased""" +745 13 dataset """kinships""" +745 13 model """transd""" +745 13 loss """crossentropy""" +745 13 regularizer """no""" +745 13 optimizer """adadelta""" +745 13 training_loop """lcwa""" +745 13 evaluator """rankbased""" +745 14 dataset """kinships""" +745 14 model """transd""" +745 14 loss """crossentropy""" +745 14 regularizer """no""" +745 14 optimizer """adadelta""" +745 14 training_loop """lcwa""" +745 14 evaluator """rankbased""" +745 15 dataset """kinships""" +745 15 model """transd""" +745 15 loss """crossentropy""" +745 15 regularizer """no""" +745 15 optimizer """adadelta""" +745 15 training_loop """lcwa""" +745 15 evaluator """rankbased""" +745 16 dataset """kinships""" +745 16 model """transd""" +745 16 loss """crossentropy""" +745 16 regularizer """no""" +745 16 optimizer """adadelta""" +745 16 training_loop """lcwa""" +745 16 evaluator """rankbased""" +745 17 dataset """kinships""" +745 17 model """transd""" +745 17 loss """crossentropy""" +745 17 regularizer """no""" +745 17 optimizer """adadelta""" +745 17 training_loop """lcwa""" +745 17 evaluator """rankbased""" +745 18 dataset """kinships""" +745 18 model """transd""" +745 18 loss """crossentropy""" +745 18 regularizer """no""" +745 18 optimizer """adadelta""" +745 18 training_loop """lcwa""" +745 18 evaluator """rankbased""" +745 19 dataset """kinships""" +745 19 model """transd""" +745 19 loss """crossentropy""" +745 19 regularizer """no""" +745 19 optimizer """adadelta""" +745 19 training_loop """lcwa""" +745 19 evaluator """rankbased""" +745 20 dataset """kinships""" +745 20 model """transd""" +745 20 loss """crossentropy""" +745 20 regularizer """no""" +745 20 optimizer """adadelta""" +745 20 training_loop """lcwa""" +745 20 evaluator """rankbased""" +745 21 dataset """kinships""" +745 21 model """transd""" +745 21 loss """crossentropy""" +745 21 regularizer """no""" +745 21 optimizer """adadelta""" +745 21 training_loop """lcwa""" +745 21 evaluator """rankbased""" +745 22 dataset """kinships""" +745 22 model """transd""" +745 22 loss """crossentropy""" +745 22 regularizer """no""" +745 22 optimizer """adadelta""" +745 22 training_loop """lcwa""" +745 22 evaluator """rankbased""" +745 23 dataset """kinships""" +745 23 model """transd""" +745 23 loss """crossentropy""" +745 23 regularizer """no""" +745 23 optimizer """adadelta""" +745 23 training_loop """lcwa""" +745 23 evaluator """rankbased""" +745 24 dataset """kinships""" +745 24 model """transd""" +745 24 loss """crossentropy""" +745 24 regularizer """no""" +745 24 optimizer """adadelta""" +745 24 training_loop """lcwa""" +745 24 evaluator """rankbased""" +745 25 dataset """kinships""" +745 25 model """transd""" +745 25 loss """crossentropy""" +745 25 regularizer """no""" +745 25 optimizer """adadelta""" +745 25 training_loop """lcwa""" +745 25 evaluator """rankbased""" +745 26 dataset """kinships""" +745 26 model """transd""" +745 26 loss """crossentropy""" +745 26 regularizer """no""" +745 26 optimizer """adadelta""" +745 26 training_loop """lcwa""" +745 26 evaluator """rankbased""" +745 27 dataset """kinships""" +745 27 model """transd""" +745 27 loss """crossentropy""" +745 27 regularizer """no""" +745 27 optimizer """adadelta""" +745 27 training_loop """lcwa""" +745 27 evaluator """rankbased""" +745 28 dataset """kinships""" +745 28 model """transd""" +745 28 loss """crossentropy""" +745 28 regularizer """no""" +745 28 optimizer """adadelta""" +745 28 training_loop """lcwa""" +745 28 evaluator """rankbased""" +745 29 dataset """kinships""" +745 29 model """transd""" +745 29 loss """crossentropy""" +745 29 regularizer """no""" +745 29 optimizer """adadelta""" +745 29 training_loop """lcwa""" +745 29 evaluator """rankbased""" +745 30 dataset """kinships""" +745 30 model """transd""" +745 30 loss """crossentropy""" +745 30 regularizer """no""" +745 30 optimizer """adadelta""" +745 30 training_loop """lcwa""" +745 30 evaluator """rankbased""" +745 31 dataset """kinships""" +745 31 model """transd""" +745 31 loss """crossentropy""" +745 31 regularizer """no""" +745 31 optimizer """adadelta""" +745 31 training_loop """lcwa""" +745 31 evaluator """rankbased""" +745 32 dataset """kinships""" +745 32 model """transd""" +745 32 loss """crossentropy""" +745 32 regularizer """no""" +745 32 optimizer """adadelta""" +745 32 training_loop """lcwa""" +745 32 evaluator """rankbased""" +745 33 dataset """kinships""" +745 33 model """transd""" +745 33 loss """crossentropy""" +745 33 regularizer """no""" +745 33 optimizer """adadelta""" +745 33 training_loop """lcwa""" +745 33 evaluator """rankbased""" +745 34 dataset """kinships""" +745 34 model """transd""" +745 34 loss """crossentropy""" +745 34 regularizer """no""" +745 34 optimizer """adadelta""" +745 34 training_loop """lcwa""" +745 34 evaluator """rankbased""" +745 35 dataset """kinships""" +745 35 model """transd""" +745 35 loss """crossentropy""" +745 35 regularizer """no""" +745 35 optimizer """adadelta""" +745 35 training_loop """lcwa""" +745 35 evaluator """rankbased""" +745 36 dataset """kinships""" +745 36 model """transd""" +745 36 loss """crossentropy""" +745 36 regularizer """no""" +745 36 optimizer """adadelta""" +745 36 training_loop """lcwa""" +745 36 evaluator """rankbased""" +745 37 dataset """kinships""" +745 37 model """transd""" +745 37 loss """crossentropy""" +745 37 regularizer """no""" +745 37 optimizer """adadelta""" +745 37 training_loop """lcwa""" +745 37 evaluator """rankbased""" +745 38 dataset """kinships""" +745 38 model """transd""" +745 38 loss """crossentropy""" +745 38 regularizer """no""" +745 38 optimizer """adadelta""" +745 38 training_loop """lcwa""" +745 38 evaluator """rankbased""" +745 39 dataset """kinships""" +745 39 model """transd""" +745 39 loss """crossentropy""" +745 39 regularizer """no""" +745 39 optimizer """adadelta""" +745 39 training_loop """lcwa""" +745 39 evaluator """rankbased""" +745 40 dataset """kinships""" +745 40 model """transd""" +745 40 loss """crossentropy""" +745 40 regularizer """no""" +745 40 optimizer """adadelta""" +745 40 training_loop """lcwa""" +745 40 evaluator """rankbased""" +745 41 dataset """kinships""" +745 41 model """transd""" +745 41 loss """crossentropy""" +745 41 regularizer """no""" +745 41 optimizer """adadelta""" +745 41 training_loop """lcwa""" +745 41 evaluator """rankbased""" +745 42 dataset """kinships""" +745 42 model """transd""" +745 42 loss """crossentropy""" +745 42 regularizer """no""" +745 42 optimizer """adadelta""" +745 42 training_loop """lcwa""" +745 42 evaluator """rankbased""" +745 43 dataset """kinships""" +745 43 model """transd""" +745 43 loss """crossentropy""" +745 43 regularizer """no""" +745 43 optimizer """adadelta""" +745 43 training_loop """lcwa""" +745 43 evaluator """rankbased""" +745 44 dataset """kinships""" +745 44 model """transd""" +745 44 loss """crossentropy""" +745 44 regularizer """no""" +745 44 optimizer """adadelta""" +745 44 training_loop """lcwa""" +745 44 evaluator """rankbased""" +745 45 dataset """kinships""" +745 45 model """transd""" +745 45 loss """crossentropy""" +745 45 regularizer """no""" +745 45 optimizer """adadelta""" +745 45 training_loop """lcwa""" +745 45 evaluator """rankbased""" +745 46 dataset """kinships""" +745 46 model """transd""" +745 46 loss """crossentropy""" +745 46 regularizer """no""" +745 46 optimizer """adadelta""" +745 46 training_loop """lcwa""" +745 46 evaluator """rankbased""" +745 47 dataset """kinships""" +745 47 model """transd""" +745 47 loss """crossentropy""" +745 47 regularizer """no""" +745 47 optimizer """adadelta""" +745 47 training_loop """lcwa""" +745 47 evaluator """rankbased""" +745 48 dataset """kinships""" +745 48 model """transd""" +745 48 loss """crossentropy""" +745 48 regularizer """no""" +745 48 optimizer """adadelta""" +745 48 training_loop """lcwa""" +745 48 evaluator """rankbased""" +745 49 dataset """kinships""" +745 49 model """transd""" +745 49 loss """crossentropy""" +745 49 regularizer """no""" +745 49 optimizer """adadelta""" +745 49 training_loop """lcwa""" +745 49 evaluator """rankbased""" +745 50 dataset """kinships""" +745 50 model """transd""" +745 50 loss """crossentropy""" +745 50 regularizer """no""" +745 50 optimizer """adadelta""" +745 50 training_loop """lcwa""" +745 50 evaluator """rankbased""" +745 51 dataset """kinships""" +745 51 model """transd""" +745 51 loss """crossentropy""" +745 51 regularizer """no""" +745 51 optimizer """adadelta""" +745 51 training_loop """lcwa""" +745 51 evaluator """rankbased""" +745 52 dataset """kinships""" +745 52 model """transd""" +745 52 loss """crossentropy""" +745 52 regularizer """no""" +745 52 optimizer """adadelta""" +745 52 training_loop """lcwa""" +745 52 evaluator """rankbased""" +745 53 dataset """kinships""" +745 53 model """transd""" +745 53 loss """crossentropy""" +745 53 regularizer """no""" +745 53 optimizer """adadelta""" +745 53 training_loop """lcwa""" +745 53 evaluator """rankbased""" +745 54 dataset """kinships""" +745 54 model """transd""" +745 54 loss """crossentropy""" +745 54 regularizer """no""" +745 54 optimizer """adadelta""" +745 54 training_loop """lcwa""" +745 54 evaluator """rankbased""" +745 55 dataset """kinships""" +745 55 model """transd""" +745 55 loss """crossentropy""" +745 55 regularizer """no""" +745 55 optimizer """adadelta""" +745 55 training_loop """lcwa""" +745 55 evaluator """rankbased""" +745 56 dataset """kinships""" +745 56 model """transd""" +745 56 loss """crossentropy""" +745 56 regularizer """no""" +745 56 optimizer """adadelta""" +745 56 training_loop """lcwa""" +745 56 evaluator """rankbased""" +745 57 dataset """kinships""" +745 57 model """transd""" +745 57 loss """crossentropy""" +745 57 regularizer """no""" +745 57 optimizer """adadelta""" +745 57 training_loop """lcwa""" +745 57 evaluator """rankbased""" +745 58 dataset """kinships""" +745 58 model """transd""" +745 58 loss """crossentropy""" +745 58 regularizer """no""" +745 58 optimizer """adadelta""" +745 58 training_loop """lcwa""" +745 58 evaluator """rankbased""" +745 59 dataset """kinships""" +745 59 model """transd""" +745 59 loss """crossentropy""" +745 59 regularizer """no""" +745 59 optimizer """adadelta""" +745 59 training_loop """lcwa""" +745 59 evaluator """rankbased""" +745 60 dataset """kinships""" +745 60 model """transd""" +745 60 loss """crossentropy""" +745 60 regularizer """no""" +745 60 optimizer """adadelta""" +745 60 training_loop """lcwa""" +745 60 evaluator """rankbased""" +745 61 dataset """kinships""" +745 61 model """transd""" +745 61 loss """crossentropy""" +745 61 regularizer """no""" +745 61 optimizer """adadelta""" +745 61 training_loop """lcwa""" +745 61 evaluator """rankbased""" +745 62 dataset """kinships""" +745 62 model """transd""" +745 62 loss """crossentropy""" +745 62 regularizer """no""" +745 62 optimizer """adadelta""" +745 62 training_loop """lcwa""" +745 62 evaluator """rankbased""" +745 63 dataset """kinships""" +745 63 model """transd""" +745 63 loss """crossentropy""" +745 63 regularizer """no""" +745 63 optimizer """adadelta""" +745 63 training_loop """lcwa""" +745 63 evaluator """rankbased""" +745 64 dataset """kinships""" +745 64 model """transd""" +745 64 loss """crossentropy""" +745 64 regularizer """no""" +745 64 optimizer """adadelta""" +745 64 training_loop """lcwa""" +745 64 evaluator """rankbased""" +745 65 dataset """kinships""" +745 65 model """transd""" +745 65 loss """crossentropy""" +745 65 regularizer """no""" +745 65 optimizer """adadelta""" +745 65 training_loop """lcwa""" +745 65 evaluator """rankbased""" +745 66 dataset """kinships""" +745 66 model """transd""" +745 66 loss """crossentropy""" +745 66 regularizer """no""" +745 66 optimizer """adadelta""" +745 66 training_loop """lcwa""" +745 66 evaluator """rankbased""" +745 67 dataset """kinships""" +745 67 model """transd""" +745 67 loss """crossentropy""" +745 67 regularizer """no""" +745 67 optimizer """adadelta""" +745 67 training_loop """lcwa""" +745 67 evaluator """rankbased""" +745 68 dataset """kinships""" +745 68 model """transd""" +745 68 loss """crossentropy""" +745 68 regularizer """no""" +745 68 optimizer """adadelta""" +745 68 training_loop """lcwa""" +745 68 evaluator """rankbased""" +745 69 dataset """kinships""" +745 69 model """transd""" +745 69 loss """crossentropy""" +745 69 regularizer """no""" +745 69 optimizer """adadelta""" +745 69 training_loop """lcwa""" +745 69 evaluator """rankbased""" +745 70 dataset """kinships""" +745 70 model """transd""" +745 70 loss """crossentropy""" +745 70 regularizer """no""" +745 70 optimizer """adadelta""" +745 70 training_loop """lcwa""" +745 70 evaluator """rankbased""" +745 71 dataset """kinships""" +745 71 model """transd""" +745 71 loss """crossentropy""" +745 71 regularizer """no""" +745 71 optimizer """adadelta""" +745 71 training_loop """lcwa""" +745 71 evaluator """rankbased""" +745 72 dataset """kinships""" +745 72 model """transd""" +745 72 loss """crossentropy""" +745 72 regularizer """no""" +745 72 optimizer """adadelta""" +745 72 training_loop """lcwa""" +745 72 evaluator """rankbased""" +745 73 dataset """kinships""" +745 73 model """transd""" +745 73 loss """crossentropy""" +745 73 regularizer """no""" +745 73 optimizer """adadelta""" +745 73 training_loop """lcwa""" +745 73 evaluator """rankbased""" +745 74 dataset """kinships""" +745 74 model """transd""" +745 74 loss """crossentropy""" +745 74 regularizer """no""" +745 74 optimizer """adadelta""" +745 74 training_loop """lcwa""" +745 74 evaluator """rankbased""" +745 75 dataset """kinships""" +745 75 model """transd""" +745 75 loss """crossentropy""" +745 75 regularizer """no""" +745 75 optimizer """adadelta""" +745 75 training_loop """lcwa""" +745 75 evaluator """rankbased""" +745 76 dataset """kinships""" +745 76 model """transd""" +745 76 loss """crossentropy""" +745 76 regularizer """no""" +745 76 optimizer """adadelta""" +745 76 training_loop """lcwa""" +745 76 evaluator """rankbased""" +745 77 dataset """kinships""" +745 77 model """transd""" +745 77 loss """crossentropy""" +745 77 regularizer """no""" +745 77 optimizer """adadelta""" +745 77 training_loop """lcwa""" +745 77 evaluator """rankbased""" +745 78 dataset """kinships""" +745 78 model """transd""" +745 78 loss """crossentropy""" +745 78 regularizer """no""" +745 78 optimizer """adadelta""" +745 78 training_loop """lcwa""" +745 78 evaluator """rankbased""" +745 79 dataset """kinships""" +745 79 model """transd""" +745 79 loss """crossentropy""" +745 79 regularizer """no""" +745 79 optimizer """adadelta""" +745 79 training_loop """lcwa""" +745 79 evaluator """rankbased""" +745 80 dataset """kinships""" +745 80 model """transd""" +745 80 loss """crossentropy""" +745 80 regularizer """no""" +745 80 optimizer """adadelta""" +745 80 training_loop """lcwa""" +745 80 evaluator """rankbased""" +745 81 dataset """kinships""" +745 81 model """transd""" +745 81 loss """crossentropy""" +745 81 regularizer """no""" +745 81 optimizer """adadelta""" +745 81 training_loop """lcwa""" +745 81 evaluator """rankbased""" +745 82 dataset """kinships""" +745 82 model """transd""" +745 82 loss """crossentropy""" +745 82 regularizer """no""" +745 82 optimizer """adadelta""" +745 82 training_loop """lcwa""" +745 82 evaluator """rankbased""" +745 83 dataset """kinships""" +745 83 model """transd""" +745 83 loss """crossentropy""" +745 83 regularizer """no""" +745 83 optimizer """adadelta""" +745 83 training_loop """lcwa""" +745 83 evaluator """rankbased""" +745 84 dataset """kinships""" +745 84 model """transd""" +745 84 loss """crossentropy""" +745 84 regularizer """no""" +745 84 optimizer """adadelta""" +745 84 training_loop """lcwa""" +745 84 evaluator """rankbased""" +745 85 dataset """kinships""" +745 85 model """transd""" +745 85 loss """crossentropy""" +745 85 regularizer """no""" +745 85 optimizer """adadelta""" +745 85 training_loop """lcwa""" +745 85 evaluator """rankbased""" +745 86 dataset """kinships""" +745 86 model """transd""" +745 86 loss """crossentropy""" +745 86 regularizer """no""" +745 86 optimizer """adadelta""" +745 86 training_loop """lcwa""" +745 86 evaluator """rankbased""" +745 87 dataset """kinships""" +745 87 model """transd""" +745 87 loss """crossentropy""" +745 87 regularizer """no""" +745 87 optimizer """adadelta""" +745 87 training_loop """lcwa""" +745 87 evaluator """rankbased""" +745 88 dataset """kinships""" +745 88 model """transd""" +745 88 loss """crossentropy""" +745 88 regularizer """no""" +745 88 optimizer """adadelta""" +745 88 training_loop """lcwa""" +745 88 evaluator """rankbased""" +745 89 dataset """kinships""" +745 89 model """transd""" +745 89 loss """crossentropy""" +745 89 regularizer """no""" +745 89 optimizer """adadelta""" +745 89 training_loop """lcwa""" +745 89 evaluator """rankbased""" +745 90 dataset """kinships""" +745 90 model """transd""" +745 90 loss """crossentropy""" +745 90 regularizer """no""" +745 90 optimizer """adadelta""" +745 90 training_loop """lcwa""" +745 90 evaluator """rankbased""" +745 91 dataset """kinships""" +745 91 model """transd""" +745 91 loss """crossentropy""" +745 91 regularizer """no""" +745 91 optimizer """adadelta""" +745 91 training_loop """lcwa""" +745 91 evaluator """rankbased""" +745 92 dataset """kinships""" +745 92 model """transd""" +745 92 loss """crossentropy""" +745 92 regularizer """no""" +745 92 optimizer """adadelta""" +745 92 training_loop """lcwa""" +745 92 evaluator """rankbased""" +745 93 dataset """kinships""" +745 93 model """transd""" +745 93 loss """crossentropy""" +745 93 regularizer """no""" +745 93 optimizer """adadelta""" +745 93 training_loop """lcwa""" +745 93 evaluator """rankbased""" +745 94 dataset """kinships""" +745 94 model """transd""" +745 94 loss """crossentropy""" +745 94 regularizer """no""" +745 94 optimizer """adadelta""" +745 94 training_loop """lcwa""" +745 94 evaluator """rankbased""" +745 95 dataset """kinships""" +745 95 model """transd""" +745 95 loss """crossentropy""" +745 95 regularizer """no""" +745 95 optimizer """adadelta""" +745 95 training_loop """lcwa""" +745 95 evaluator """rankbased""" +745 96 dataset """kinships""" +745 96 model """transd""" +745 96 loss """crossentropy""" +745 96 regularizer """no""" +745 96 optimizer """adadelta""" +745 96 training_loop """lcwa""" +745 96 evaluator """rankbased""" +745 97 dataset """kinships""" +745 97 model """transd""" +745 97 loss """crossentropy""" +745 97 regularizer """no""" +745 97 optimizer """adadelta""" +745 97 training_loop """lcwa""" +745 97 evaluator """rankbased""" +745 98 dataset """kinships""" +745 98 model """transd""" +745 98 loss """crossentropy""" +745 98 regularizer """no""" +745 98 optimizer """adadelta""" +745 98 training_loop """lcwa""" +745 98 evaluator """rankbased""" +745 99 dataset """kinships""" +745 99 model """transd""" +745 99 loss """crossentropy""" +745 99 regularizer """no""" +745 99 optimizer """adadelta""" +745 99 training_loop """lcwa""" +745 99 evaluator """rankbased""" +745 100 dataset """kinships""" +745 100 model """transd""" +745 100 loss """crossentropy""" +745 100 regularizer """no""" +745 100 optimizer """adadelta""" +745 100 training_loop """lcwa""" +745 100 evaluator """rankbased""" +746 1 model.embedding_dim 1.0 +746 1 model.relation_dim 0.0 +746 1 training.batch_size 0.0 +746 1 training.label_smoothing 0.21205711246542902 +746 2 model.embedding_dim 0.0 +746 2 model.relation_dim 1.0 +746 2 training.batch_size 1.0 +746 2 training.label_smoothing 0.08789200356693339 +746 3 model.embedding_dim 2.0 +746 3 model.relation_dim 2.0 +746 3 training.batch_size 0.0 +746 3 training.label_smoothing 0.19201610073851458 +746 4 model.embedding_dim 0.0 +746 4 model.relation_dim 1.0 +746 4 training.batch_size 0.0 +746 4 training.label_smoothing 0.7309141226666058 +746 5 model.embedding_dim 1.0 +746 5 model.relation_dim 0.0 +746 5 training.batch_size 2.0 +746 5 training.label_smoothing 0.0011122431304917836 +746 6 model.embedding_dim 1.0 +746 6 model.relation_dim 0.0 +746 6 training.batch_size 1.0 +746 6 training.label_smoothing 0.010402279062182685 +746 7 model.embedding_dim 0.0 +746 7 model.relation_dim 1.0 +746 7 training.batch_size 2.0 +746 7 training.label_smoothing 0.02678499296717893 +746 8 model.embedding_dim 2.0 +746 8 model.relation_dim 2.0 +746 8 training.batch_size 0.0 +746 8 training.label_smoothing 0.027539186950845347 +746 9 model.embedding_dim 0.0 +746 9 model.relation_dim 2.0 +746 9 training.batch_size 1.0 +746 9 training.label_smoothing 0.005373853974532999 +746 10 model.embedding_dim 1.0 +746 10 model.relation_dim 2.0 +746 10 training.batch_size 2.0 +746 10 training.label_smoothing 0.008043105353599525 +746 11 model.embedding_dim 2.0 +746 11 model.relation_dim 2.0 +746 11 training.batch_size 1.0 +746 11 training.label_smoothing 0.6147609176224363 +746 12 model.embedding_dim 0.0 +746 12 model.relation_dim 2.0 +746 12 training.batch_size 1.0 +746 12 training.label_smoothing 0.432817554346988 +746 13 model.embedding_dim 0.0 +746 13 model.relation_dim 1.0 +746 13 training.batch_size 0.0 +746 13 training.label_smoothing 0.2544123768887648 +746 14 model.embedding_dim 1.0 +746 14 model.relation_dim 1.0 +746 14 training.batch_size 0.0 +746 14 training.label_smoothing 0.0021066068566814 +746 15 model.embedding_dim 2.0 +746 15 model.relation_dim 0.0 +746 15 training.batch_size 0.0 +746 15 training.label_smoothing 0.675724233271204 +746 16 model.embedding_dim 1.0 +746 16 model.relation_dim 1.0 +746 16 training.batch_size 2.0 +746 16 training.label_smoothing 0.005875814963234664 +746 17 model.embedding_dim 0.0 +746 17 model.relation_dim 2.0 +746 17 training.batch_size 1.0 +746 17 training.label_smoothing 0.017819085366728445 +746 18 model.embedding_dim 0.0 +746 18 model.relation_dim 2.0 +746 18 training.batch_size 2.0 +746 18 training.label_smoothing 0.1362006442160635 +746 19 model.embedding_dim 2.0 +746 19 model.relation_dim 2.0 +746 19 training.batch_size 1.0 +746 19 training.label_smoothing 0.2588343710935218 +746 20 model.embedding_dim 1.0 +746 20 model.relation_dim 2.0 +746 20 training.batch_size 0.0 +746 20 training.label_smoothing 0.015859796695035438 +746 21 model.embedding_dim 2.0 +746 21 model.relation_dim 2.0 +746 21 training.batch_size 2.0 +746 21 training.label_smoothing 0.4721169417624175 +746 22 model.embedding_dim 0.0 +746 22 model.relation_dim 0.0 +746 22 training.batch_size 0.0 +746 22 training.label_smoothing 0.15114604676902918 +746 23 model.embedding_dim 2.0 +746 23 model.relation_dim 0.0 +746 23 training.batch_size 0.0 +746 23 training.label_smoothing 0.0011817080907673028 +746 24 model.embedding_dim 1.0 +746 24 model.relation_dim 1.0 +746 24 training.batch_size 1.0 +746 24 training.label_smoothing 0.17751989123523496 +746 25 model.embedding_dim 1.0 +746 25 model.relation_dim 1.0 +746 25 training.batch_size 1.0 +746 25 training.label_smoothing 0.7437090685634045 +746 26 model.embedding_dim 0.0 +746 26 model.relation_dim 2.0 +746 26 training.batch_size 2.0 +746 26 training.label_smoothing 0.025595568700562715 +746 27 model.embedding_dim 2.0 +746 27 model.relation_dim 2.0 +746 27 training.batch_size 2.0 +746 27 training.label_smoothing 0.008461890755188616 +746 28 model.embedding_dim 2.0 +746 28 model.relation_dim 1.0 +746 28 training.batch_size 2.0 +746 28 training.label_smoothing 0.003622943078989667 +746 29 model.embedding_dim 0.0 +746 29 model.relation_dim 1.0 +746 29 training.batch_size 0.0 +746 29 training.label_smoothing 0.005793147193498854 +746 30 model.embedding_dim 2.0 +746 30 model.relation_dim 1.0 +746 30 training.batch_size 2.0 +746 30 training.label_smoothing 0.017363812778381366 +746 31 model.embedding_dim 2.0 +746 31 model.relation_dim 1.0 +746 31 training.batch_size 1.0 +746 31 training.label_smoothing 0.0060569219541595325 +746 32 model.embedding_dim 2.0 +746 32 model.relation_dim 2.0 +746 32 training.batch_size 1.0 +746 32 training.label_smoothing 0.05550234277837157 +746 33 model.embedding_dim 1.0 +746 33 model.relation_dim 0.0 +746 33 training.batch_size 0.0 +746 33 training.label_smoothing 0.0225075970910146 +746 34 model.embedding_dim 1.0 +746 34 model.relation_dim 1.0 +746 34 training.batch_size 1.0 +746 34 training.label_smoothing 0.017796619816691287 +746 35 model.embedding_dim 0.0 +746 35 model.relation_dim 1.0 +746 35 training.batch_size 0.0 +746 35 training.label_smoothing 0.004952728574620004 +746 36 model.embedding_dim 2.0 +746 36 model.relation_dim 0.0 +746 36 training.batch_size 0.0 +746 36 training.label_smoothing 0.645727536161494 +746 37 model.embedding_dim 1.0 +746 37 model.relation_dim 2.0 +746 37 training.batch_size 2.0 +746 37 training.label_smoothing 0.01288907657845351 +746 38 model.embedding_dim 0.0 +746 38 model.relation_dim 2.0 +746 38 training.batch_size 1.0 +746 38 training.label_smoothing 0.00550457820982087 +746 39 model.embedding_dim 0.0 +746 39 model.relation_dim 0.0 +746 39 training.batch_size 2.0 +746 39 training.label_smoothing 0.47406285549803256 +746 40 model.embedding_dim 2.0 +746 40 model.relation_dim 0.0 +746 40 training.batch_size 0.0 +746 40 training.label_smoothing 0.007989969110822392 +746 41 model.embedding_dim 0.0 +746 41 model.relation_dim 1.0 +746 41 training.batch_size 0.0 +746 41 training.label_smoothing 0.21620642545098695 +746 42 model.embedding_dim 2.0 +746 42 model.relation_dim 0.0 +746 42 training.batch_size 1.0 +746 42 training.label_smoothing 0.36397795897970586 +746 43 model.embedding_dim 2.0 +746 43 model.relation_dim 0.0 +746 43 training.batch_size 2.0 +746 43 training.label_smoothing 0.016713216624596603 +746 44 model.embedding_dim 2.0 +746 44 model.relation_dim 0.0 +746 44 training.batch_size 0.0 +746 44 training.label_smoothing 0.007868507397439484 +746 45 model.embedding_dim 2.0 +746 45 model.relation_dim 1.0 +746 45 training.batch_size 2.0 +746 45 training.label_smoothing 0.003659831806431678 +746 46 model.embedding_dim 1.0 +746 46 model.relation_dim 2.0 +746 46 training.batch_size 1.0 +746 46 training.label_smoothing 0.0014931222885303324 +746 47 model.embedding_dim 2.0 +746 47 model.relation_dim 2.0 +746 47 training.batch_size 2.0 +746 47 training.label_smoothing 0.0010585022028629589 +746 48 model.embedding_dim 2.0 +746 48 model.relation_dim 2.0 +746 48 training.batch_size 1.0 +746 48 training.label_smoothing 0.05012066361815678 +746 49 model.embedding_dim 1.0 +746 49 model.relation_dim 1.0 +746 49 training.batch_size 1.0 +746 49 training.label_smoothing 0.10834227045549366 +746 50 model.embedding_dim 2.0 +746 50 model.relation_dim 0.0 +746 50 training.batch_size 1.0 +746 50 training.label_smoothing 0.0014263576438926708 +746 51 model.embedding_dim 1.0 +746 51 model.relation_dim 2.0 +746 51 training.batch_size 0.0 +746 51 training.label_smoothing 0.002219883251730831 +746 52 model.embedding_dim 2.0 +746 52 model.relation_dim 0.0 +746 52 training.batch_size 2.0 +746 52 training.label_smoothing 0.00545374527765822 +746 53 model.embedding_dim 1.0 +746 53 model.relation_dim 1.0 +746 53 training.batch_size 0.0 +746 53 training.label_smoothing 0.001017741350041322 +746 54 model.embedding_dim 2.0 +746 54 model.relation_dim 2.0 +746 54 training.batch_size 1.0 +746 54 training.label_smoothing 0.07831453370663184 +746 55 model.embedding_dim 0.0 +746 55 model.relation_dim 2.0 +746 55 training.batch_size 1.0 +746 55 training.label_smoothing 0.01558417697498905 +746 56 model.embedding_dim 2.0 +746 56 model.relation_dim 1.0 +746 56 training.batch_size 1.0 +746 56 training.label_smoothing 0.00811989738556048 +746 57 model.embedding_dim 0.0 +746 57 model.relation_dim 0.0 +746 57 training.batch_size 2.0 +746 57 training.label_smoothing 0.4200477865893804 +746 58 model.embedding_dim 0.0 +746 58 model.relation_dim 2.0 +746 58 training.batch_size 2.0 +746 58 training.label_smoothing 0.20472584902876415 +746 59 model.embedding_dim 0.0 +746 59 model.relation_dim 0.0 +746 59 training.batch_size 1.0 +746 59 training.label_smoothing 0.3669939220687502 +746 60 model.embedding_dim 2.0 +746 60 model.relation_dim 0.0 +746 60 training.batch_size 2.0 +746 60 training.label_smoothing 0.22092274953969437 +746 61 model.embedding_dim 2.0 +746 61 model.relation_dim 0.0 +746 61 training.batch_size 1.0 +746 61 training.label_smoothing 0.002201775322818918 +746 62 model.embedding_dim 1.0 +746 62 model.relation_dim 0.0 +746 62 training.batch_size 0.0 +746 62 training.label_smoothing 0.5568475691783105 +746 63 model.embedding_dim 1.0 +746 63 model.relation_dim 0.0 +746 63 training.batch_size 1.0 +746 63 training.label_smoothing 0.21713357908612893 +746 64 model.embedding_dim 2.0 +746 64 model.relation_dim 1.0 +746 64 training.batch_size 1.0 +746 64 training.label_smoothing 0.028468782663644387 +746 65 model.embedding_dim 0.0 +746 65 model.relation_dim 1.0 +746 65 training.batch_size 1.0 +746 65 training.label_smoothing 0.5197066695113557 +746 66 model.embedding_dim 2.0 +746 66 model.relation_dim 2.0 +746 66 training.batch_size 1.0 +746 66 training.label_smoothing 0.0038038339627910057 +746 67 model.embedding_dim 0.0 +746 67 model.relation_dim 0.0 +746 67 training.batch_size 0.0 +746 67 training.label_smoothing 0.028234486901629854 +746 68 model.embedding_dim 2.0 +746 68 model.relation_dim 0.0 +746 68 training.batch_size 2.0 +746 68 training.label_smoothing 0.02314550918676373 +746 69 model.embedding_dim 2.0 +746 69 model.relation_dim 0.0 +746 69 training.batch_size 1.0 +746 69 training.label_smoothing 0.48440443570956365 +746 70 model.embedding_dim 0.0 +746 70 model.relation_dim 0.0 +746 70 training.batch_size 1.0 +746 70 training.label_smoothing 0.011017683152951162 +746 71 model.embedding_dim 0.0 +746 71 model.relation_dim 0.0 +746 71 training.batch_size 2.0 +746 71 training.label_smoothing 0.0029103745059279847 +746 72 model.embedding_dim 2.0 +746 72 model.relation_dim 1.0 +746 72 training.batch_size 0.0 +746 72 training.label_smoothing 0.0462877491973818 +746 73 model.embedding_dim 1.0 +746 73 model.relation_dim 0.0 +746 73 training.batch_size 1.0 +746 73 training.label_smoothing 0.05303446771634057 +746 74 model.embedding_dim 0.0 +746 74 model.relation_dim 0.0 +746 74 training.batch_size 0.0 +746 74 training.label_smoothing 0.4486313076122553 +746 75 model.embedding_dim 1.0 +746 75 model.relation_dim 2.0 +746 75 training.batch_size 2.0 +746 75 training.label_smoothing 0.12896716643797787 +746 76 model.embedding_dim 2.0 +746 76 model.relation_dim 2.0 +746 76 training.batch_size 2.0 +746 76 training.label_smoothing 0.0022138990419418147 +746 77 model.embedding_dim 1.0 +746 77 model.relation_dim 2.0 +746 77 training.batch_size 0.0 +746 77 training.label_smoothing 0.061359396072479186 +746 78 model.embedding_dim 1.0 +746 78 model.relation_dim 0.0 +746 78 training.batch_size 1.0 +746 78 training.label_smoothing 0.005247451862770871 +746 79 model.embedding_dim 0.0 +746 79 model.relation_dim 2.0 +746 79 training.batch_size 0.0 +746 79 training.label_smoothing 0.01836842271547747 +746 80 model.embedding_dim 1.0 +746 80 model.relation_dim 0.0 +746 80 training.batch_size 1.0 +746 80 training.label_smoothing 0.0011782461486227946 +746 81 model.embedding_dim 2.0 +746 81 model.relation_dim 0.0 +746 81 training.batch_size 0.0 +746 81 training.label_smoothing 0.35271204757422203 +746 82 model.embedding_dim 2.0 +746 82 model.relation_dim 0.0 +746 82 training.batch_size 1.0 +746 82 training.label_smoothing 0.2508135237250112 +746 83 model.embedding_dim 1.0 +746 83 model.relation_dim 1.0 +746 83 training.batch_size 1.0 +746 83 training.label_smoothing 0.0019864152076388905 +746 84 model.embedding_dim 2.0 +746 84 model.relation_dim 1.0 +746 84 training.batch_size 1.0 +746 84 training.label_smoothing 0.27399097728217603 +746 85 model.embedding_dim 1.0 +746 85 model.relation_dim 0.0 +746 85 training.batch_size 2.0 +746 85 training.label_smoothing 0.004890168361965502 +746 86 model.embedding_dim 2.0 +746 86 model.relation_dim 2.0 +746 86 training.batch_size 2.0 +746 86 training.label_smoothing 0.00297404984502797 +746 87 model.embedding_dim 2.0 +746 87 model.relation_dim 0.0 +746 87 training.batch_size 0.0 +746 87 training.label_smoothing 0.001165510401600265 +746 88 model.embedding_dim 2.0 +746 88 model.relation_dim 2.0 +746 88 training.batch_size 0.0 +746 88 training.label_smoothing 0.042149741056964306 +746 89 model.embedding_dim 2.0 +746 89 model.relation_dim 1.0 +746 89 training.batch_size 1.0 +746 89 training.label_smoothing 0.7954965836061539 +746 90 model.embedding_dim 2.0 +746 90 model.relation_dim 1.0 +746 90 training.batch_size 0.0 +746 90 training.label_smoothing 0.055906202148192786 +746 91 model.embedding_dim 2.0 +746 91 model.relation_dim 2.0 +746 91 training.batch_size 2.0 +746 91 training.label_smoothing 0.16455500871468526 +746 92 model.embedding_dim 1.0 +746 92 model.relation_dim 2.0 +746 92 training.batch_size 2.0 +746 92 training.label_smoothing 0.29232582697725884 +746 93 model.embedding_dim 2.0 +746 93 model.relation_dim 1.0 +746 93 training.batch_size 2.0 +746 93 training.label_smoothing 0.0093153963798776 +746 94 model.embedding_dim 1.0 +746 94 model.relation_dim 0.0 +746 94 training.batch_size 2.0 +746 94 training.label_smoothing 0.0027478919956650705 +746 95 model.embedding_dim 2.0 +746 95 model.relation_dim 0.0 +746 95 training.batch_size 1.0 +746 95 training.label_smoothing 0.984283251752092 +746 96 model.embedding_dim 0.0 +746 96 model.relation_dim 0.0 +746 96 training.batch_size 1.0 +746 96 training.label_smoothing 0.03706110188250793 +746 97 model.embedding_dim 0.0 +746 97 model.relation_dim 2.0 +746 97 training.batch_size 1.0 +746 97 training.label_smoothing 0.0017635534320547756 +746 98 model.embedding_dim 0.0 +746 98 model.relation_dim 2.0 +746 98 training.batch_size 1.0 +746 98 training.label_smoothing 0.007860881053471493 +746 99 model.embedding_dim 2.0 +746 99 model.relation_dim 0.0 +746 99 training.batch_size 2.0 +746 99 training.label_smoothing 0.6730982325321099 +746 100 model.embedding_dim 2.0 +746 100 model.relation_dim 1.0 +746 100 training.batch_size 1.0 +746 100 training.label_smoothing 0.004011839396163046 +746 1 dataset """kinships""" +746 1 model """transd""" +746 1 loss """crossentropy""" +746 1 regularizer """no""" +746 1 optimizer """adadelta""" +746 1 training_loop """lcwa""" +746 1 evaluator """rankbased""" +746 2 dataset """kinships""" +746 2 model """transd""" +746 2 loss """crossentropy""" +746 2 regularizer """no""" +746 2 optimizer """adadelta""" +746 2 training_loop """lcwa""" +746 2 evaluator """rankbased""" +746 3 dataset """kinships""" +746 3 model """transd""" +746 3 loss """crossentropy""" +746 3 regularizer """no""" +746 3 optimizer """adadelta""" +746 3 training_loop """lcwa""" +746 3 evaluator """rankbased""" +746 4 dataset """kinships""" +746 4 model """transd""" +746 4 loss """crossentropy""" +746 4 regularizer """no""" +746 4 optimizer """adadelta""" +746 4 training_loop """lcwa""" +746 4 evaluator """rankbased""" +746 5 dataset """kinships""" +746 5 model """transd""" +746 5 loss """crossentropy""" +746 5 regularizer """no""" +746 5 optimizer """adadelta""" +746 5 training_loop """lcwa""" +746 5 evaluator """rankbased""" +746 6 dataset """kinships""" +746 6 model """transd""" +746 6 loss """crossentropy""" +746 6 regularizer """no""" +746 6 optimizer """adadelta""" +746 6 training_loop """lcwa""" +746 6 evaluator """rankbased""" +746 7 dataset """kinships""" +746 7 model """transd""" +746 7 loss """crossentropy""" +746 7 regularizer """no""" +746 7 optimizer """adadelta""" +746 7 training_loop """lcwa""" +746 7 evaluator """rankbased""" +746 8 dataset """kinships""" +746 8 model """transd""" +746 8 loss """crossentropy""" +746 8 regularizer """no""" +746 8 optimizer """adadelta""" +746 8 training_loop """lcwa""" +746 8 evaluator """rankbased""" +746 9 dataset """kinships""" +746 9 model """transd""" +746 9 loss """crossentropy""" +746 9 regularizer """no""" +746 9 optimizer """adadelta""" +746 9 training_loop """lcwa""" +746 9 evaluator """rankbased""" +746 10 dataset """kinships""" +746 10 model """transd""" +746 10 loss """crossentropy""" +746 10 regularizer """no""" +746 10 optimizer """adadelta""" +746 10 training_loop """lcwa""" +746 10 evaluator """rankbased""" +746 11 dataset """kinships""" +746 11 model """transd""" +746 11 loss """crossentropy""" +746 11 regularizer """no""" +746 11 optimizer """adadelta""" +746 11 training_loop """lcwa""" +746 11 evaluator """rankbased""" +746 12 dataset """kinships""" +746 12 model """transd""" +746 12 loss """crossentropy""" +746 12 regularizer """no""" +746 12 optimizer """adadelta""" +746 12 training_loop """lcwa""" +746 12 evaluator """rankbased""" +746 13 dataset """kinships""" +746 13 model """transd""" +746 13 loss """crossentropy""" +746 13 regularizer """no""" +746 13 optimizer """adadelta""" +746 13 training_loop """lcwa""" +746 13 evaluator """rankbased""" +746 14 dataset """kinships""" +746 14 model """transd""" +746 14 loss """crossentropy""" +746 14 regularizer """no""" +746 14 optimizer """adadelta""" +746 14 training_loop """lcwa""" +746 14 evaluator """rankbased""" +746 15 dataset """kinships""" +746 15 model """transd""" +746 15 loss """crossentropy""" +746 15 regularizer """no""" +746 15 optimizer """adadelta""" +746 15 training_loop """lcwa""" +746 15 evaluator """rankbased""" +746 16 dataset """kinships""" +746 16 model """transd""" +746 16 loss """crossentropy""" +746 16 regularizer """no""" +746 16 optimizer """adadelta""" +746 16 training_loop """lcwa""" +746 16 evaluator """rankbased""" +746 17 dataset """kinships""" +746 17 model """transd""" +746 17 loss """crossentropy""" +746 17 regularizer """no""" +746 17 optimizer """adadelta""" +746 17 training_loop """lcwa""" +746 17 evaluator """rankbased""" +746 18 dataset """kinships""" +746 18 model """transd""" +746 18 loss """crossentropy""" +746 18 regularizer """no""" +746 18 optimizer """adadelta""" +746 18 training_loop """lcwa""" +746 18 evaluator """rankbased""" +746 19 dataset """kinships""" +746 19 model """transd""" +746 19 loss """crossentropy""" +746 19 regularizer """no""" +746 19 optimizer """adadelta""" +746 19 training_loop """lcwa""" +746 19 evaluator """rankbased""" +746 20 dataset """kinships""" +746 20 model """transd""" +746 20 loss """crossentropy""" +746 20 regularizer """no""" +746 20 optimizer """adadelta""" +746 20 training_loop """lcwa""" +746 20 evaluator """rankbased""" +746 21 dataset """kinships""" +746 21 model """transd""" +746 21 loss """crossentropy""" +746 21 regularizer """no""" +746 21 optimizer """adadelta""" +746 21 training_loop """lcwa""" +746 21 evaluator """rankbased""" +746 22 dataset """kinships""" +746 22 model """transd""" +746 22 loss """crossentropy""" +746 22 regularizer """no""" +746 22 optimizer """adadelta""" +746 22 training_loop """lcwa""" +746 22 evaluator """rankbased""" +746 23 dataset """kinships""" +746 23 model """transd""" +746 23 loss """crossentropy""" +746 23 regularizer """no""" +746 23 optimizer """adadelta""" +746 23 training_loop """lcwa""" +746 23 evaluator """rankbased""" +746 24 dataset """kinships""" +746 24 model """transd""" +746 24 loss """crossentropy""" +746 24 regularizer """no""" +746 24 optimizer """adadelta""" +746 24 training_loop """lcwa""" +746 24 evaluator """rankbased""" +746 25 dataset """kinships""" +746 25 model """transd""" +746 25 loss """crossentropy""" +746 25 regularizer """no""" +746 25 optimizer """adadelta""" +746 25 training_loop """lcwa""" +746 25 evaluator """rankbased""" +746 26 dataset """kinships""" +746 26 model """transd""" +746 26 loss """crossentropy""" +746 26 regularizer """no""" +746 26 optimizer """adadelta""" +746 26 training_loop """lcwa""" +746 26 evaluator """rankbased""" +746 27 dataset """kinships""" +746 27 model """transd""" +746 27 loss """crossentropy""" +746 27 regularizer """no""" +746 27 optimizer """adadelta""" +746 27 training_loop """lcwa""" +746 27 evaluator """rankbased""" +746 28 dataset """kinships""" +746 28 model """transd""" +746 28 loss """crossentropy""" +746 28 regularizer """no""" +746 28 optimizer """adadelta""" +746 28 training_loop """lcwa""" +746 28 evaluator """rankbased""" +746 29 dataset """kinships""" +746 29 model """transd""" +746 29 loss """crossentropy""" +746 29 regularizer """no""" +746 29 optimizer """adadelta""" +746 29 training_loop """lcwa""" +746 29 evaluator """rankbased""" +746 30 dataset """kinships""" +746 30 model """transd""" +746 30 loss """crossentropy""" +746 30 regularizer """no""" +746 30 optimizer """adadelta""" +746 30 training_loop """lcwa""" +746 30 evaluator """rankbased""" +746 31 dataset """kinships""" +746 31 model """transd""" +746 31 loss """crossentropy""" +746 31 regularizer """no""" +746 31 optimizer """adadelta""" +746 31 training_loop """lcwa""" +746 31 evaluator """rankbased""" +746 32 dataset """kinships""" +746 32 model """transd""" +746 32 loss """crossentropy""" +746 32 regularizer """no""" +746 32 optimizer """adadelta""" +746 32 training_loop """lcwa""" +746 32 evaluator """rankbased""" +746 33 dataset """kinships""" +746 33 model """transd""" +746 33 loss """crossentropy""" +746 33 regularizer """no""" +746 33 optimizer """adadelta""" +746 33 training_loop """lcwa""" +746 33 evaluator """rankbased""" +746 34 dataset """kinships""" +746 34 model """transd""" +746 34 loss """crossentropy""" +746 34 regularizer """no""" +746 34 optimizer """adadelta""" +746 34 training_loop """lcwa""" +746 34 evaluator """rankbased""" +746 35 dataset """kinships""" +746 35 model """transd""" +746 35 loss """crossentropy""" +746 35 regularizer """no""" +746 35 optimizer """adadelta""" +746 35 training_loop """lcwa""" +746 35 evaluator """rankbased""" +746 36 dataset """kinships""" +746 36 model """transd""" +746 36 loss """crossentropy""" +746 36 regularizer """no""" +746 36 optimizer """adadelta""" +746 36 training_loop """lcwa""" +746 36 evaluator """rankbased""" +746 37 dataset """kinships""" +746 37 model """transd""" +746 37 loss """crossentropy""" +746 37 regularizer """no""" +746 37 optimizer """adadelta""" +746 37 training_loop """lcwa""" +746 37 evaluator """rankbased""" +746 38 dataset """kinships""" +746 38 model """transd""" +746 38 loss """crossentropy""" +746 38 regularizer """no""" +746 38 optimizer """adadelta""" +746 38 training_loop """lcwa""" +746 38 evaluator """rankbased""" +746 39 dataset """kinships""" +746 39 model """transd""" +746 39 loss """crossentropy""" +746 39 regularizer """no""" +746 39 optimizer """adadelta""" +746 39 training_loop """lcwa""" +746 39 evaluator """rankbased""" +746 40 dataset """kinships""" +746 40 model """transd""" +746 40 loss """crossentropy""" +746 40 regularizer """no""" +746 40 optimizer """adadelta""" +746 40 training_loop """lcwa""" +746 40 evaluator """rankbased""" +746 41 dataset """kinships""" +746 41 model """transd""" +746 41 loss """crossentropy""" +746 41 regularizer """no""" +746 41 optimizer """adadelta""" +746 41 training_loop """lcwa""" +746 41 evaluator """rankbased""" +746 42 dataset """kinships""" +746 42 model """transd""" +746 42 loss """crossentropy""" +746 42 regularizer """no""" +746 42 optimizer """adadelta""" +746 42 training_loop """lcwa""" +746 42 evaluator """rankbased""" +746 43 dataset """kinships""" +746 43 model """transd""" +746 43 loss """crossentropy""" +746 43 regularizer """no""" +746 43 optimizer """adadelta""" +746 43 training_loop """lcwa""" +746 43 evaluator """rankbased""" +746 44 dataset """kinships""" +746 44 model """transd""" +746 44 loss """crossentropy""" +746 44 regularizer """no""" +746 44 optimizer """adadelta""" +746 44 training_loop """lcwa""" +746 44 evaluator """rankbased""" +746 45 dataset """kinships""" +746 45 model """transd""" +746 45 loss """crossentropy""" +746 45 regularizer """no""" +746 45 optimizer """adadelta""" +746 45 training_loop """lcwa""" +746 45 evaluator """rankbased""" +746 46 dataset """kinships""" +746 46 model """transd""" +746 46 loss """crossentropy""" +746 46 regularizer """no""" +746 46 optimizer """adadelta""" +746 46 training_loop """lcwa""" +746 46 evaluator """rankbased""" +746 47 dataset """kinships""" +746 47 model """transd""" +746 47 loss """crossentropy""" +746 47 regularizer """no""" +746 47 optimizer """adadelta""" +746 47 training_loop """lcwa""" +746 47 evaluator """rankbased""" +746 48 dataset """kinships""" +746 48 model """transd""" +746 48 loss """crossentropy""" +746 48 regularizer """no""" +746 48 optimizer """adadelta""" +746 48 training_loop """lcwa""" +746 48 evaluator """rankbased""" +746 49 dataset """kinships""" +746 49 model """transd""" +746 49 loss """crossentropy""" +746 49 regularizer """no""" +746 49 optimizer """adadelta""" +746 49 training_loop """lcwa""" +746 49 evaluator """rankbased""" +746 50 dataset """kinships""" +746 50 model """transd""" +746 50 loss """crossentropy""" +746 50 regularizer """no""" +746 50 optimizer """adadelta""" +746 50 training_loop """lcwa""" +746 50 evaluator """rankbased""" +746 51 dataset """kinships""" +746 51 model """transd""" +746 51 loss """crossentropy""" +746 51 regularizer """no""" +746 51 optimizer """adadelta""" +746 51 training_loop """lcwa""" +746 51 evaluator """rankbased""" +746 52 dataset """kinships""" +746 52 model """transd""" +746 52 loss """crossentropy""" +746 52 regularizer """no""" +746 52 optimizer """adadelta""" +746 52 training_loop """lcwa""" +746 52 evaluator """rankbased""" +746 53 dataset """kinships""" +746 53 model """transd""" +746 53 loss """crossentropy""" +746 53 regularizer """no""" +746 53 optimizer """adadelta""" +746 53 training_loop """lcwa""" +746 53 evaluator """rankbased""" +746 54 dataset """kinships""" +746 54 model """transd""" +746 54 loss """crossentropy""" +746 54 regularizer """no""" +746 54 optimizer """adadelta""" +746 54 training_loop """lcwa""" +746 54 evaluator """rankbased""" +746 55 dataset """kinships""" +746 55 model """transd""" +746 55 loss """crossentropy""" +746 55 regularizer """no""" +746 55 optimizer """adadelta""" +746 55 training_loop """lcwa""" +746 55 evaluator """rankbased""" +746 56 dataset """kinships""" +746 56 model """transd""" +746 56 loss """crossentropy""" +746 56 regularizer """no""" +746 56 optimizer """adadelta""" +746 56 training_loop """lcwa""" +746 56 evaluator """rankbased""" +746 57 dataset """kinships""" +746 57 model """transd""" +746 57 loss """crossentropy""" +746 57 regularizer """no""" +746 57 optimizer """adadelta""" +746 57 training_loop """lcwa""" +746 57 evaluator """rankbased""" +746 58 dataset """kinships""" +746 58 model """transd""" +746 58 loss """crossentropy""" +746 58 regularizer """no""" +746 58 optimizer """adadelta""" +746 58 training_loop """lcwa""" +746 58 evaluator """rankbased""" +746 59 dataset """kinships""" +746 59 model """transd""" +746 59 loss """crossentropy""" +746 59 regularizer """no""" +746 59 optimizer """adadelta""" +746 59 training_loop """lcwa""" +746 59 evaluator """rankbased""" +746 60 dataset """kinships""" +746 60 model """transd""" +746 60 loss """crossentropy""" +746 60 regularizer """no""" +746 60 optimizer """adadelta""" +746 60 training_loop """lcwa""" +746 60 evaluator """rankbased""" +746 61 dataset """kinships""" +746 61 model """transd""" +746 61 loss """crossentropy""" +746 61 regularizer """no""" +746 61 optimizer """adadelta""" +746 61 training_loop """lcwa""" +746 61 evaluator """rankbased""" +746 62 dataset """kinships""" +746 62 model """transd""" +746 62 loss """crossentropy""" +746 62 regularizer """no""" +746 62 optimizer """adadelta""" +746 62 training_loop """lcwa""" +746 62 evaluator """rankbased""" +746 63 dataset """kinships""" +746 63 model """transd""" +746 63 loss """crossentropy""" +746 63 regularizer """no""" +746 63 optimizer """adadelta""" +746 63 training_loop """lcwa""" +746 63 evaluator """rankbased""" +746 64 dataset """kinships""" +746 64 model """transd""" +746 64 loss """crossentropy""" +746 64 regularizer """no""" +746 64 optimizer """adadelta""" +746 64 training_loop """lcwa""" +746 64 evaluator """rankbased""" +746 65 dataset """kinships""" +746 65 model """transd""" +746 65 loss """crossentropy""" +746 65 regularizer """no""" +746 65 optimizer """adadelta""" +746 65 training_loop """lcwa""" +746 65 evaluator """rankbased""" +746 66 dataset """kinships""" +746 66 model """transd""" +746 66 loss """crossentropy""" +746 66 regularizer """no""" +746 66 optimizer """adadelta""" +746 66 training_loop """lcwa""" +746 66 evaluator """rankbased""" +746 67 dataset """kinships""" +746 67 model """transd""" +746 67 loss """crossentropy""" +746 67 regularizer """no""" +746 67 optimizer """adadelta""" +746 67 training_loop """lcwa""" +746 67 evaluator """rankbased""" +746 68 dataset """kinships""" +746 68 model """transd""" +746 68 loss """crossentropy""" +746 68 regularizer """no""" +746 68 optimizer """adadelta""" +746 68 training_loop """lcwa""" +746 68 evaluator """rankbased""" +746 69 dataset """kinships""" +746 69 model """transd""" +746 69 loss """crossentropy""" +746 69 regularizer """no""" +746 69 optimizer """adadelta""" +746 69 training_loop """lcwa""" +746 69 evaluator """rankbased""" +746 70 dataset """kinships""" +746 70 model """transd""" +746 70 loss """crossentropy""" +746 70 regularizer """no""" +746 70 optimizer """adadelta""" +746 70 training_loop """lcwa""" +746 70 evaluator """rankbased""" +746 71 dataset """kinships""" +746 71 model """transd""" +746 71 loss """crossentropy""" +746 71 regularizer """no""" +746 71 optimizer """adadelta""" +746 71 training_loop """lcwa""" +746 71 evaluator """rankbased""" +746 72 dataset """kinships""" +746 72 model """transd""" +746 72 loss """crossentropy""" +746 72 regularizer """no""" +746 72 optimizer """adadelta""" +746 72 training_loop """lcwa""" +746 72 evaluator """rankbased""" +746 73 dataset """kinships""" +746 73 model """transd""" +746 73 loss """crossentropy""" +746 73 regularizer """no""" +746 73 optimizer """adadelta""" +746 73 training_loop """lcwa""" +746 73 evaluator """rankbased""" +746 74 dataset """kinships""" +746 74 model """transd""" +746 74 loss """crossentropy""" +746 74 regularizer """no""" +746 74 optimizer """adadelta""" +746 74 training_loop """lcwa""" +746 74 evaluator """rankbased""" +746 75 dataset """kinships""" +746 75 model """transd""" +746 75 loss """crossentropy""" +746 75 regularizer """no""" +746 75 optimizer """adadelta""" +746 75 training_loop """lcwa""" +746 75 evaluator """rankbased""" +746 76 dataset """kinships""" +746 76 model """transd""" +746 76 loss """crossentropy""" +746 76 regularizer """no""" +746 76 optimizer """adadelta""" +746 76 training_loop """lcwa""" +746 76 evaluator """rankbased""" +746 77 dataset """kinships""" +746 77 model """transd""" +746 77 loss """crossentropy""" +746 77 regularizer """no""" +746 77 optimizer """adadelta""" +746 77 training_loop """lcwa""" +746 77 evaluator """rankbased""" +746 78 dataset """kinships""" +746 78 model """transd""" +746 78 loss """crossentropy""" +746 78 regularizer """no""" +746 78 optimizer """adadelta""" +746 78 training_loop """lcwa""" +746 78 evaluator """rankbased""" +746 79 dataset """kinships""" +746 79 model """transd""" +746 79 loss """crossentropy""" +746 79 regularizer """no""" +746 79 optimizer """adadelta""" +746 79 training_loop """lcwa""" +746 79 evaluator """rankbased""" +746 80 dataset """kinships""" +746 80 model """transd""" +746 80 loss """crossentropy""" +746 80 regularizer """no""" +746 80 optimizer """adadelta""" +746 80 training_loop """lcwa""" +746 80 evaluator """rankbased""" +746 81 dataset """kinships""" +746 81 model """transd""" +746 81 loss """crossentropy""" +746 81 regularizer """no""" +746 81 optimizer """adadelta""" +746 81 training_loop """lcwa""" +746 81 evaluator """rankbased""" +746 82 dataset """kinships""" +746 82 model """transd""" +746 82 loss """crossentropy""" +746 82 regularizer """no""" +746 82 optimizer """adadelta""" +746 82 training_loop """lcwa""" +746 82 evaluator """rankbased""" +746 83 dataset """kinships""" +746 83 model """transd""" +746 83 loss """crossentropy""" +746 83 regularizer """no""" +746 83 optimizer """adadelta""" +746 83 training_loop """lcwa""" +746 83 evaluator """rankbased""" +746 84 dataset """kinships""" +746 84 model """transd""" +746 84 loss """crossentropy""" +746 84 regularizer """no""" +746 84 optimizer """adadelta""" +746 84 training_loop """lcwa""" +746 84 evaluator """rankbased""" +746 85 dataset """kinships""" +746 85 model """transd""" +746 85 loss """crossentropy""" +746 85 regularizer """no""" +746 85 optimizer """adadelta""" +746 85 training_loop """lcwa""" +746 85 evaluator """rankbased""" +746 86 dataset """kinships""" +746 86 model """transd""" +746 86 loss """crossentropy""" +746 86 regularizer """no""" +746 86 optimizer """adadelta""" +746 86 training_loop """lcwa""" +746 86 evaluator """rankbased""" +746 87 dataset """kinships""" +746 87 model """transd""" +746 87 loss """crossentropy""" +746 87 regularizer """no""" +746 87 optimizer """adadelta""" +746 87 training_loop """lcwa""" +746 87 evaluator """rankbased""" +746 88 dataset """kinships""" +746 88 model """transd""" +746 88 loss """crossentropy""" +746 88 regularizer """no""" +746 88 optimizer """adadelta""" +746 88 training_loop """lcwa""" +746 88 evaluator """rankbased""" +746 89 dataset """kinships""" +746 89 model """transd""" +746 89 loss """crossentropy""" +746 89 regularizer """no""" +746 89 optimizer """adadelta""" +746 89 training_loop """lcwa""" +746 89 evaluator """rankbased""" +746 90 dataset """kinships""" +746 90 model """transd""" +746 90 loss """crossentropy""" +746 90 regularizer """no""" +746 90 optimizer """adadelta""" +746 90 training_loop """lcwa""" +746 90 evaluator """rankbased""" +746 91 dataset """kinships""" +746 91 model """transd""" +746 91 loss """crossentropy""" +746 91 regularizer """no""" +746 91 optimizer """adadelta""" +746 91 training_loop """lcwa""" +746 91 evaluator """rankbased""" +746 92 dataset """kinships""" +746 92 model """transd""" +746 92 loss """crossentropy""" +746 92 regularizer """no""" +746 92 optimizer """adadelta""" +746 92 training_loop """lcwa""" +746 92 evaluator """rankbased""" +746 93 dataset """kinships""" +746 93 model """transd""" +746 93 loss """crossentropy""" +746 93 regularizer """no""" +746 93 optimizer """adadelta""" +746 93 training_loop """lcwa""" +746 93 evaluator """rankbased""" +746 94 dataset """kinships""" +746 94 model """transd""" +746 94 loss """crossentropy""" +746 94 regularizer """no""" +746 94 optimizer """adadelta""" +746 94 training_loop """lcwa""" +746 94 evaluator """rankbased""" +746 95 dataset """kinships""" +746 95 model """transd""" +746 95 loss """crossentropy""" +746 95 regularizer """no""" +746 95 optimizer """adadelta""" +746 95 training_loop """lcwa""" +746 95 evaluator """rankbased""" +746 96 dataset """kinships""" +746 96 model """transd""" +746 96 loss """crossentropy""" +746 96 regularizer """no""" +746 96 optimizer """adadelta""" +746 96 training_loop """lcwa""" +746 96 evaluator """rankbased""" +746 97 dataset """kinships""" +746 97 model """transd""" +746 97 loss """crossentropy""" +746 97 regularizer """no""" +746 97 optimizer """adadelta""" +746 97 training_loop """lcwa""" +746 97 evaluator """rankbased""" +746 98 dataset """kinships""" +746 98 model """transd""" +746 98 loss """crossentropy""" +746 98 regularizer """no""" +746 98 optimizer """adadelta""" +746 98 training_loop """lcwa""" +746 98 evaluator """rankbased""" +746 99 dataset """kinships""" +746 99 model """transd""" +746 99 loss """crossentropy""" +746 99 regularizer """no""" +746 99 optimizer """adadelta""" +746 99 training_loop """lcwa""" +746 99 evaluator """rankbased""" +746 100 dataset """kinships""" +746 100 model """transd""" +746 100 loss """crossentropy""" +746 100 regularizer """no""" +746 100 optimizer """adadelta""" +746 100 training_loop """lcwa""" +746 100 evaluator """rankbased""" +747 1 model.embedding_dim 0.0 +747 1 model.relation_dim 2.0 +747 1 negative_sampler.num_negs_per_pos 15.0 +747 1 training.batch_size 1.0 +747 2 model.embedding_dim 0.0 +747 2 model.relation_dim 1.0 +747 2 negative_sampler.num_negs_per_pos 97.0 +747 2 training.batch_size 2.0 +747 3 model.embedding_dim 1.0 +747 3 model.relation_dim 2.0 +747 3 negative_sampler.num_negs_per_pos 99.0 +747 3 training.batch_size 2.0 +747 4 model.embedding_dim 2.0 +747 4 model.relation_dim 2.0 +747 4 negative_sampler.num_negs_per_pos 13.0 +747 4 training.batch_size 2.0 +747 5 model.embedding_dim 0.0 +747 5 model.relation_dim 0.0 +747 5 negative_sampler.num_negs_per_pos 78.0 +747 5 training.batch_size 1.0 +747 6 model.embedding_dim 0.0 +747 6 model.relation_dim 0.0 +747 6 negative_sampler.num_negs_per_pos 42.0 +747 6 training.batch_size 2.0 +747 7 model.embedding_dim 2.0 +747 7 model.relation_dim 2.0 +747 7 negative_sampler.num_negs_per_pos 40.0 +747 7 training.batch_size 0.0 +747 8 model.embedding_dim 1.0 +747 8 model.relation_dim 0.0 +747 8 negative_sampler.num_negs_per_pos 29.0 +747 8 training.batch_size 1.0 +747 9 model.embedding_dim 1.0 +747 9 model.relation_dim 0.0 +747 9 negative_sampler.num_negs_per_pos 15.0 +747 9 training.batch_size 0.0 +747 10 model.embedding_dim 2.0 +747 10 model.relation_dim 1.0 +747 10 negative_sampler.num_negs_per_pos 95.0 +747 10 training.batch_size 2.0 +747 11 model.embedding_dim 0.0 +747 11 model.relation_dim 2.0 +747 11 negative_sampler.num_negs_per_pos 51.0 +747 11 training.batch_size 1.0 +747 12 model.embedding_dim 1.0 +747 12 model.relation_dim 1.0 +747 12 negative_sampler.num_negs_per_pos 46.0 +747 12 training.batch_size 1.0 +747 13 model.embedding_dim 0.0 +747 13 model.relation_dim 0.0 +747 13 negative_sampler.num_negs_per_pos 28.0 +747 13 training.batch_size 0.0 +747 14 model.embedding_dim 2.0 +747 14 model.relation_dim 0.0 +747 14 negative_sampler.num_negs_per_pos 86.0 +747 14 training.batch_size 1.0 +747 15 model.embedding_dim 2.0 +747 15 model.relation_dim 2.0 +747 15 negative_sampler.num_negs_per_pos 48.0 +747 15 training.batch_size 2.0 +747 16 model.embedding_dim 2.0 +747 16 model.relation_dim 0.0 +747 16 negative_sampler.num_negs_per_pos 15.0 +747 16 training.batch_size 1.0 +747 17 model.embedding_dim 1.0 +747 17 model.relation_dim 0.0 +747 17 negative_sampler.num_negs_per_pos 91.0 +747 17 training.batch_size 0.0 +747 18 model.embedding_dim 0.0 +747 18 model.relation_dim 2.0 +747 18 negative_sampler.num_negs_per_pos 28.0 +747 18 training.batch_size 1.0 +747 19 model.embedding_dim 1.0 +747 19 model.relation_dim 0.0 +747 19 negative_sampler.num_negs_per_pos 49.0 +747 19 training.batch_size 1.0 +747 20 model.embedding_dim 2.0 +747 20 model.relation_dim 1.0 +747 20 negative_sampler.num_negs_per_pos 79.0 +747 20 training.batch_size 1.0 +747 21 model.embedding_dim 0.0 +747 21 model.relation_dim 0.0 +747 21 negative_sampler.num_negs_per_pos 87.0 +747 21 training.batch_size 0.0 +747 22 model.embedding_dim 1.0 +747 22 model.relation_dim 2.0 +747 22 negative_sampler.num_negs_per_pos 83.0 +747 22 training.batch_size 0.0 +747 23 model.embedding_dim 0.0 +747 23 model.relation_dim 2.0 +747 23 negative_sampler.num_negs_per_pos 2.0 +747 23 training.batch_size 1.0 +747 24 model.embedding_dim 1.0 +747 24 model.relation_dim 1.0 +747 24 negative_sampler.num_negs_per_pos 81.0 +747 24 training.batch_size 2.0 +747 25 model.embedding_dim 1.0 +747 25 model.relation_dim 1.0 +747 25 negative_sampler.num_negs_per_pos 58.0 +747 25 training.batch_size 2.0 +747 26 model.embedding_dim 0.0 +747 26 model.relation_dim 2.0 +747 26 negative_sampler.num_negs_per_pos 22.0 +747 26 training.batch_size 1.0 +747 27 model.embedding_dim 0.0 +747 27 model.relation_dim 0.0 +747 27 negative_sampler.num_negs_per_pos 75.0 +747 27 training.batch_size 1.0 +747 28 model.embedding_dim 2.0 +747 28 model.relation_dim 2.0 +747 28 negative_sampler.num_negs_per_pos 79.0 +747 28 training.batch_size 1.0 +747 29 model.embedding_dim 2.0 +747 29 model.relation_dim 1.0 +747 29 negative_sampler.num_negs_per_pos 60.0 +747 29 training.batch_size 1.0 +747 30 model.embedding_dim 0.0 +747 30 model.relation_dim 2.0 +747 30 negative_sampler.num_negs_per_pos 40.0 +747 30 training.batch_size 1.0 +747 31 model.embedding_dim 0.0 +747 31 model.relation_dim 0.0 +747 31 negative_sampler.num_negs_per_pos 83.0 +747 31 training.batch_size 0.0 +747 32 model.embedding_dim 2.0 +747 32 model.relation_dim 2.0 +747 32 negative_sampler.num_negs_per_pos 73.0 +747 32 training.batch_size 1.0 +747 33 model.embedding_dim 2.0 +747 33 model.relation_dim 2.0 +747 33 negative_sampler.num_negs_per_pos 99.0 +747 33 training.batch_size 2.0 +747 34 model.embedding_dim 1.0 +747 34 model.relation_dim 2.0 +747 34 negative_sampler.num_negs_per_pos 22.0 +747 34 training.batch_size 0.0 +747 35 model.embedding_dim 1.0 +747 35 model.relation_dim 2.0 +747 35 negative_sampler.num_negs_per_pos 16.0 +747 35 training.batch_size 1.0 +747 36 model.embedding_dim 0.0 +747 36 model.relation_dim 2.0 +747 36 negative_sampler.num_negs_per_pos 47.0 +747 36 training.batch_size 1.0 +747 37 model.embedding_dim 1.0 +747 37 model.relation_dim 0.0 +747 37 negative_sampler.num_negs_per_pos 68.0 +747 37 training.batch_size 1.0 +747 38 model.embedding_dim 1.0 +747 38 model.relation_dim 2.0 +747 38 negative_sampler.num_negs_per_pos 44.0 +747 38 training.batch_size 0.0 +747 39 model.embedding_dim 1.0 +747 39 model.relation_dim 0.0 +747 39 negative_sampler.num_negs_per_pos 15.0 +747 39 training.batch_size 0.0 +747 40 model.embedding_dim 2.0 +747 40 model.relation_dim 1.0 +747 40 negative_sampler.num_negs_per_pos 97.0 +747 40 training.batch_size 1.0 +747 41 model.embedding_dim 0.0 +747 41 model.relation_dim 1.0 +747 41 negative_sampler.num_negs_per_pos 14.0 +747 41 training.batch_size 1.0 +747 42 model.embedding_dim 2.0 +747 42 model.relation_dim 2.0 +747 42 negative_sampler.num_negs_per_pos 25.0 +747 42 training.batch_size 2.0 +747 43 model.embedding_dim 0.0 +747 43 model.relation_dim 1.0 +747 43 negative_sampler.num_negs_per_pos 22.0 +747 43 training.batch_size 2.0 +747 44 model.embedding_dim 0.0 +747 44 model.relation_dim 2.0 +747 44 negative_sampler.num_negs_per_pos 19.0 +747 44 training.batch_size 0.0 +747 45 model.embedding_dim 0.0 +747 45 model.relation_dim 1.0 +747 45 negative_sampler.num_negs_per_pos 29.0 +747 45 training.batch_size 1.0 +747 46 model.embedding_dim 2.0 +747 46 model.relation_dim 1.0 +747 46 negative_sampler.num_negs_per_pos 65.0 +747 46 training.batch_size 1.0 +747 47 model.embedding_dim 2.0 +747 47 model.relation_dim 1.0 +747 47 negative_sampler.num_negs_per_pos 89.0 +747 47 training.batch_size 0.0 +747 48 model.embedding_dim 1.0 +747 48 model.relation_dim 0.0 +747 48 negative_sampler.num_negs_per_pos 13.0 +747 48 training.batch_size 0.0 +747 49 model.embedding_dim 0.0 +747 49 model.relation_dim 0.0 +747 49 negative_sampler.num_negs_per_pos 42.0 +747 49 training.batch_size 2.0 +747 50 model.embedding_dim 2.0 +747 50 model.relation_dim 2.0 +747 50 negative_sampler.num_negs_per_pos 75.0 +747 50 training.batch_size 0.0 +747 51 model.embedding_dim 1.0 +747 51 model.relation_dim 0.0 +747 51 negative_sampler.num_negs_per_pos 57.0 +747 51 training.batch_size 1.0 +747 52 model.embedding_dim 1.0 +747 52 model.relation_dim 1.0 +747 52 negative_sampler.num_negs_per_pos 60.0 +747 52 training.batch_size 2.0 +747 53 model.embedding_dim 1.0 +747 53 model.relation_dim 2.0 +747 53 negative_sampler.num_negs_per_pos 63.0 +747 53 training.batch_size 0.0 +747 54 model.embedding_dim 2.0 +747 54 model.relation_dim 1.0 +747 54 negative_sampler.num_negs_per_pos 61.0 +747 54 training.batch_size 0.0 +747 55 model.embedding_dim 1.0 +747 55 model.relation_dim 1.0 +747 55 negative_sampler.num_negs_per_pos 11.0 +747 55 training.batch_size 1.0 +747 56 model.embedding_dim 1.0 +747 56 model.relation_dim 1.0 +747 56 negative_sampler.num_negs_per_pos 27.0 +747 56 training.batch_size 1.0 +747 57 model.embedding_dim 2.0 +747 57 model.relation_dim 0.0 +747 57 negative_sampler.num_negs_per_pos 0.0 +747 57 training.batch_size 1.0 +747 58 model.embedding_dim 1.0 +747 58 model.relation_dim 1.0 +747 58 negative_sampler.num_negs_per_pos 41.0 +747 58 training.batch_size 1.0 +747 59 model.embedding_dim 1.0 +747 59 model.relation_dim 0.0 +747 59 negative_sampler.num_negs_per_pos 42.0 +747 59 training.batch_size 1.0 +747 60 model.embedding_dim 2.0 +747 60 model.relation_dim 2.0 +747 60 negative_sampler.num_negs_per_pos 59.0 +747 60 training.batch_size 2.0 +747 61 model.embedding_dim 2.0 +747 61 model.relation_dim 0.0 +747 61 negative_sampler.num_negs_per_pos 18.0 +747 61 training.batch_size 0.0 +747 62 model.embedding_dim 1.0 +747 62 model.relation_dim 1.0 +747 62 negative_sampler.num_negs_per_pos 33.0 +747 62 training.batch_size 2.0 +747 63 model.embedding_dim 0.0 +747 63 model.relation_dim 2.0 +747 63 negative_sampler.num_negs_per_pos 1.0 +747 63 training.batch_size 1.0 +747 64 model.embedding_dim 2.0 +747 64 model.relation_dim 1.0 +747 64 negative_sampler.num_negs_per_pos 1.0 +747 64 training.batch_size 1.0 +747 65 model.embedding_dim 1.0 +747 65 model.relation_dim 0.0 +747 65 negative_sampler.num_negs_per_pos 38.0 +747 65 training.batch_size 1.0 +747 66 model.embedding_dim 1.0 +747 66 model.relation_dim 1.0 +747 66 negative_sampler.num_negs_per_pos 59.0 +747 66 training.batch_size 2.0 +747 67 model.embedding_dim 1.0 +747 67 model.relation_dim 1.0 +747 67 negative_sampler.num_negs_per_pos 62.0 +747 67 training.batch_size 2.0 +747 68 model.embedding_dim 2.0 +747 68 model.relation_dim 1.0 +747 68 negative_sampler.num_negs_per_pos 30.0 +747 68 training.batch_size 2.0 +747 69 model.embedding_dim 0.0 +747 69 model.relation_dim 2.0 +747 69 negative_sampler.num_negs_per_pos 42.0 +747 69 training.batch_size 1.0 +747 70 model.embedding_dim 0.0 +747 70 model.relation_dim 1.0 +747 70 negative_sampler.num_negs_per_pos 57.0 +747 70 training.batch_size 0.0 +747 71 model.embedding_dim 1.0 +747 71 model.relation_dim 2.0 +747 71 negative_sampler.num_negs_per_pos 52.0 +747 71 training.batch_size 1.0 +747 72 model.embedding_dim 1.0 +747 72 model.relation_dim 0.0 +747 72 negative_sampler.num_negs_per_pos 57.0 +747 72 training.batch_size 0.0 +747 73 model.embedding_dim 0.0 +747 73 model.relation_dim 2.0 +747 73 negative_sampler.num_negs_per_pos 29.0 +747 73 training.batch_size 2.0 +747 74 model.embedding_dim 2.0 +747 74 model.relation_dim 2.0 +747 74 negative_sampler.num_negs_per_pos 75.0 +747 74 training.batch_size 2.0 +747 75 model.embedding_dim 1.0 +747 75 model.relation_dim 1.0 +747 75 negative_sampler.num_negs_per_pos 70.0 +747 75 training.batch_size 2.0 +747 76 model.embedding_dim 1.0 +747 76 model.relation_dim 0.0 +747 76 negative_sampler.num_negs_per_pos 73.0 +747 76 training.batch_size 1.0 +747 77 model.embedding_dim 2.0 +747 77 model.relation_dim 2.0 +747 77 negative_sampler.num_negs_per_pos 47.0 +747 77 training.batch_size 1.0 +747 78 model.embedding_dim 1.0 +747 78 model.relation_dim 1.0 +747 78 negative_sampler.num_negs_per_pos 93.0 +747 78 training.batch_size 2.0 +747 79 model.embedding_dim 2.0 +747 79 model.relation_dim 1.0 +747 79 negative_sampler.num_negs_per_pos 80.0 +747 79 training.batch_size 2.0 +747 80 model.embedding_dim 2.0 +747 80 model.relation_dim 2.0 +747 80 negative_sampler.num_negs_per_pos 76.0 +747 80 training.batch_size 0.0 +747 81 model.embedding_dim 0.0 +747 81 model.relation_dim 2.0 +747 81 negative_sampler.num_negs_per_pos 28.0 +747 81 training.batch_size 0.0 +747 82 model.embedding_dim 0.0 +747 82 model.relation_dim 1.0 +747 82 negative_sampler.num_negs_per_pos 55.0 +747 82 training.batch_size 1.0 +747 83 model.embedding_dim 0.0 +747 83 model.relation_dim 2.0 +747 83 negative_sampler.num_negs_per_pos 51.0 +747 83 training.batch_size 2.0 +747 84 model.embedding_dim 2.0 +747 84 model.relation_dim 2.0 +747 84 negative_sampler.num_negs_per_pos 93.0 +747 84 training.batch_size 1.0 +747 85 model.embedding_dim 0.0 +747 85 model.relation_dim 2.0 +747 85 negative_sampler.num_negs_per_pos 96.0 +747 85 training.batch_size 1.0 +747 86 model.embedding_dim 0.0 +747 86 model.relation_dim 0.0 +747 86 negative_sampler.num_negs_per_pos 53.0 +747 86 training.batch_size 2.0 +747 87 model.embedding_dim 1.0 +747 87 model.relation_dim 2.0 +747 87 negative_sampler.num_negs_per_pos 75.0 +747 87 training.batch_size 2.0 +747 88 model.embedding_dim 1.0 +747 88 model.relation_dim 1.0 +747 88 negative_sampler.num_negs_per_pos 25.0 +747 88 training.batch_size 2.0 +747 89 model.embedding_dim 2.0 +747 89 model.relation_dim 1.0 +747 89 negative_sampler.num_negs_per_pos 58.0 +747 89 training.batch_size 0.0 +747 90 model.embedding_dim 1.0 +747 90 model.relation_dim 2.0 +747 90 negative_sampler.num_negs_per_pos 14.0 +747 90 training.batch_size 0.0 +747 91 model.embedding_dim 2.0 +747 91 model.relation_dim 2.0 +747 91 negative_sampler.num_negs_per_pos 39.0 +747 91 training.batch_size 0.0 +747 92 model.embedding_dim 0.0 +747 92 model.relation_dim 1.0 +747 92 negative_sampler.num_negs_per_pos 13.0 +747 92 training.batch_size 2.0 +747 93 model.embedding_dim 0.0 +747 93 model.relation_dim 1.0 +747 93 negative_sampler.num_negs_per_pos 99.0 +747 93 training.batch_size 1.0 +747 94 model.embedding_dim 2.0 +747 94 model.relation_dim 2.0 +747 94 negative_sampler.num_negs_per_pos 9.0 +747 94 training.batch_size 2.0 +747 95 model.embedding_dim 1.0 +747 95 model.relation_dim 0.0 +747 95 negative_sampler.num_negs_per_pos 52.0 +747 95 training.batch_size 2.0 +747 96 model.embedding_dim 1.0 +747 96 model.relation_dim 2.0 +747 96 negative_sampler.num_negs_per_pos 53.0 +747 96 training.batch_size 2.0 +747 97 model.embedding_dim 2.0 +747 97 model.relation_dim 1.0 +747 97 negative_sampler.num_negs_per_pos 66.0 +747 97 training.batch_size 2.0 +747 98 model.embedding_dim 1.0 +747 98 model.relation_dim 2.0 +747 98 negative_sampler.num_negs_per_pos 79.0 +747 98 training.batch_size 2.0 +747 99 model.embedding_dim 0.0 +747 99 model.relation_dim 1.0 +747 99 negative_sampler.num_negs_per_pos 64.0 +747 99 training.batch_size 2.0 +747 100 model.embedding_dim 0.0 +747 100 model.relation_dim 1.0 +747 100 negative_sampler.num_negs_per_pos 20.0 +747 100 training.batch_size 0.0 +747 1 dataset """kinships""" +747 1 model """transd""" +747 1 loss """bceaftersigmoid""" +747 1 regularizer """no""" +747 1 optimizer """adadelta""" +747 1 training_loop """owa""" +747 1 negative_sampler """basic""" +747 1 evaluator """rankbased""" +747 2 dataset """kinships""" +747 2 model """transd""" +747 2 loss """bceaftersigmoid""" +747 2 regularizer """no""" +747 2 optimizer """adadelta""" +747 2 training_loop """owa""" +747 2 negative_sampler """basic""" +747 2 evaluator """rankbased""" +747 3 dataset """kinships""" +747 3 model """transd""" +747 3 loss """bceaftersigmoid""" +747 3 regularizer """no""" +747 3 optimizer """adadelta""" +747 3 training_loop """owa""" +747 3 negative_sampler """basic""" +747 3 evaluator """rankbased""" +747 4 dataset """kinships""" +747 4 model """transd""" +747 4 loss """bceaftersigmoid""" +747 4 regularizer """no""" +747 4 optimizer """adadelta""" +747 4 training_loop """owa""" +747 4 negative_sampler """basic""" +747 4 evaluator """rankbased""" +747 5 dataset """kinships""" +747 5 model """transd""" +747 5 loss """bceaftersigmoid""" +747 5 regularizer """no""" +747 5 optimizer """adadelta""" +747 5 training_loop """owa""" +747 5 negative_sampler """basic""" +747 5 evaluator """rankbased""" +747 6 dataset """kinships""" +747 6 model """transd""" +747 6 loss """bceaftersigmoid""" +747 6 regularizer """no""" +747 6 optimizer """adadelta""" +747 6 training_loop """owa""" +747 6 negative_sampler """basic""" +747 6 evaluator """rankbased""" +747 7 dataset """kinships""" +747 7 model """transd""" +747 7 loss """bceaftersigmoid""" +747 7 regularizer """no""" +747 7 optimizer """adadelta""" +747 7 training_loop """owa""" +747 7 negative_sampler """basic""" +747 7 evaluator """rankbased""" +747 8 dataset """kinships""" +747 8 model """transd""" +747 8 loss """bceaftersigmoid""" +747 8 regularizer """no""" +747 8 optimizer """adadelta""" +747 8 training_loop """owa""" +747 8 negative_sampler """basic""" +747 8 evaluator """rankbased""" +747 9 dataset """kinships""" +747 9 model """transd""" +747 9 loss """bceaftersigmoid""" +747 9 regularizer """no""" +747 9 optimizer """adadelta""" +747 9 training_loop """owa""" +747 9 negative_sampler """basic""" +747 9 evaluator """rankbased""" +747 10 dataset """kinships""" +747 10 model """transd""" +747 10 loss """bceaftersigmoid""" +747 10 regularizer """no""" +747 10 optimizer """adadelta""" +747 10 training_loop """owa""" +747 10 negative_sampler """basic""" +747 10 evaluator """rankbased""" +747 11 dataset """kinships""" +747 11 model """transd""" +747 11 loss """bceaftersigmoid""" +747 11 regularizer """no""" +747 11 optimizer """adadelta""" +747 11 training_loop """owa""" +747 11 negative_sampler """basic""" +747 11 evaluator """rankbased""" +747 12 dataset """kinships""" +747 12 model """transd""" +747 12 loss """bceaftersigmoid""" +747 12 regularizer """no""" +747 12 optimizer """adadelta""" +747 12 training_loop """owa""" +747 12 negative_sampler """basic""" +747 12 evaluator """rankbased""" +747 13 dataset """kinships""" +747 13 model """transd""" +747 13 loss """bceaftersigmoid""" +747 13 regularizer """no""" +747 13 optimizer """adadelta""" +747 13 training_loop """owa""" +747 13 negative_sampler """basic""" +747 13 evaluator """rankbased""" +747 14 dataset """kinships""" +747 14 model """transd""" +747 14 loss """bceaftersigmoid""" +747 14 regularizer """no""" +747 14 optimizer """adadelta""" +747 14 training_loop """owa""" +747 14 negative_sampler """basic""" +747 14 evaluator """rankbased""" +747 15 dataset """kinships""" +747 15 model """transd""" +747 15 loss """bceaftersigmoid""" +747 15 regularizer """no""" +747 15 optimizer """adadelta""" +747 15 training_loop """owa""" +747 15 negative_sampler """basic""" +747 15 evaluator """rankbased""" +747 16 dataset """kinships""" +747 16 model """transd""" +747 16 loss """bceaftersigmoid""" +747 16 regularizer """no""" +747 16 optimizer """adadelta""" +747 16 training_loop """owa""" +747 16 negative_sampler """basic""" +747 16 evaluator """rankbased""" +747 17 dataset """kinships""" +747 17 model """transd""" +747 17 loss """bceaftersigmoid""" +747 17 regularizer """no""" +747 17 optimizer """adadelta""" +747 17 training_loop """owa""" +747 17 negative_sampler """basic""" +747 17 evaluator """rankbased""" +747 18 dataset """kinships""" +747 18 model """transd""" +747 18 loss """bceaftersigmoid""" +747 18 regularizer """no""" +747 18 optimizer """adadelta""" +747 18 training_loop """owa""" +747 18 negative_sampler """basic""" +747 18 evaluator """rankbased""" +747 19 dataset """kinships""" +747 19 model """transd""" +747 19 loss """bceaftersigmoid""" +747 19 regularizer """no""" +747 19 optimizer """adadelta""" +747 19 training_loop """owa""" +747 19 negative_sampler """basic""" +747 19 evaluator """rankbased""" +747 20 dataset """kinships""" +747 20 model """transd""" +747 20 loss """bceaftersigmoid""" +747 20 regularizer """no""" +747 20 optimizer """adadelta""" +747 20 training_loop """owa""" +747 20 negative_sampler """basic""" +747 20 evaluator """rankbased""" +747 21 dataset """kinships""" +747 21 model """transd""" +747 21 loss """bceaftersigmoid""" +747 21 regularizer """no""" +747 21 optimizer """adadelta""" +747 21 training_loop """owa""" +747 21 negative_sampler """basic""" +747 21 evaluator """rankbased""" +747 22 dataset """kinships""" +747 22 model """transd""" +747 22 loss """bceaftersigmoid""" +747 22 regularizer """no""" +747 22 optimizer """adadelta""" +747 22 training_loop """owa""" +747 22 negative_sampler """basic""" +747 22 evaluator """rankbased""" +747 23 dataset """kinships""" +747 23 model """transd""" +747 23 loss """bceaftersigmoid""" +747 23 regularizer """no""" +747 23 optimizer """adadelta""" +747 23 training_loop """owa""" +747 23 negative_sampler """basic""" +747 23 evaluator """rankbased""" +747 24 dataset """kinships""" +747 24 model """transd""" +747 24 loss """bceaftersigmoid""" +747 24 regularizer """no""" +747 24 optimizer """adadelta""" +747 24 training_loop """owa""" +747 24 negative_sampler """basic""" +747 24 evaluator """rankbased""" +747 25 dataset """kinships""" +747 25 model """transd""" +747 25 loss """bceaftersigmoid""" +747 25 regularizer """no""" +747 25 optimizer """adadelta""" +747 25 training_loop """owa""" +747 25 negative_sampler """basic""" +747 25 evaluator """rankbased""" +747 26 dataset """kinships""" +747 26 model """transd""" +747 26 loss """bceaftersigmoid""" +747 26 regularizer """no""" +747 26 optimizer """adadelta""" +747 26 training_loop """owa""" +747 26 negative_sampler """basic""" +747 26 evaluator """rankbased""" +747 27 dataset """kinships""" +747 27 model """transd""" +747 27 loss """bceaftersigmoid""" +747 27 regularizer """no""" +747 27 optimizer """adadelta""" +747 27 training_loop """owa""" +747 27 negative_sampler """basic""" +747 27 evaluator """rankbased""" +747 28 dataset """kinships""" +747 28 model """transd""" +747 28 loss """bceaftersigmoid""" +747 28 regularizer """no""" +747 28 optimizer """adadelta""" +747 28 training_loop """owa""" +747 28 negative_sampler """basic""" +747 28 evaluator """rankbased""" +747 29 dataset """kinships""" +747 29 model """transd""" +747 29 loss """bceaftersigmoid""" +747 29 regularizer """no""" +747 29 optimizer """adadelta""" +747 29 training_loop """owa""" +747 29 negative_sampler """basic""" +747 29 evaluator """rankbased""" +747 30 dataset """kinships""" +747 30 model """transd""" +747 30 loss """bceaftersigmoid""" +747 30 regularizer """no""" +747 30 optimizer """adadelta""" +747 30 training_loop """owa""" +747 30 negative_sampler """basic""" +747 30 evaluator """rankbased""" +747 31 dataset """kinships""" +747 31 model """transd""" +747 31 loss """bceaftersigmoid""" +747 31 regularizer """no""" +747 31 optimizer """adadelta""" +747 31 training_loop """owa""" +747 31 negative_sampler """basic""" +747 31 evaluator """rankbased""" +747 32 dataset """kinships""" +747 32 model """transd""" +747 32 loss """bceaftersigmoid""" +747 32 regularizer """no""" +747 32 optimizer """adadelta""" +747 32 training_loop """owa""" +747 32 negative_sampler """basic""" +747 32 evaluator """rankbased""" +747 33 dataset """kinships""" +747 33 model """transd""" +747 33 loss """bceaftersigmoid""" +747 33 regularizer """no""" +747 33 optimizer """adadelta""" +747 33 training_loop """owa""" +747 33 negative_sampler """basic""" +747 33 evaluator """rankbased""" +747 34 dataset """kinships""" +747 34 model """transd""" +747 34 loss """bceaftersigmoid""" +747 34 regularizer """no""" +747 34 optimizer """adadelta""" +747 34 training_loop """owa""" +747 34 negative_sampler """basic""" +747 34 evaluator """rankbased""" +747 35 dataset """kinships""" +747 35 model """transd""" +747 35 loss """bceaftersigmoid""" +747 35 regularizer """no""" +747 35 optimizer """adadelta""" +747 35 training_loop """owa""" +747 35 negative_sampler """basic""" +747 35 evaluator """rankbased""" +747 36 dataset """kinships""" +747 36 model """transd""" +747 36 loss """bceaftersigmoid""" +747 36 regularizer """no""" +747 36 optimizer """adadelta""" +747 36 training_loop """owa""" +747 36 negative_sampler """basic""" +747 36 evaluator """rankbased""" +747 37 dataset """kinships""" +747 37 model """transd""" +747 37 loss """bceaftersigmoid""" +747 37 regularizer """no""" +747 37 optimizer """adadelta""" +747 37 training_loop """owa""" +747 37 negative_sampler """basic""" +747 37 evaluator """rankbased""" +747 38 dataset """kinships""" +747 38 model """transd""" +747 38 loss """bceaftersigmoid""" +747 38 regularizer """no""" +747 38 optimizer """adadelta""" +747 38 training_loop """owa""" +747 38 negative_sampler """basic""" +747 38 evaluator """rankbased""" +747 39 dataset """kinships""" +747 39 model """transd""" +747 39 loss """bceaftersigmoid""" +747 39 regularizer """no""" +747 39 optimizer """adadelta""" +747 39 training_loop """owa""" +747 39 negative_sampler """basic""" +747 39 evaluator """rankbased""" +747 40 dataset """kinships""" +747 40 model """transd""" +747 40 loss """bceaftersigmoid""" +747 40 regularizer """no""" +747 40 optimizer """adadelta""" +747 40 training_loop """owa""" +747 40 negative_sampler """basic""" +747 40 evaluator """rankbased""" +747 41 dataset """kinships""" +747 41 model """transd""" +747 41 loss """bceaftersigmoid""" +747 41 regularizer """no""" +747 41 optimizer """adadelta""" +747 41 training_loop """owa""" +747 41 negative_sampler """basic""" +747 41 evaluator """rankbased""" +747 42 dataset """kinships""" +747 42 model """transd""" +747 42 loss """bceaftersigmoid""" +747 42 regularizer """no""" +747 42 optimizer """adadelta""" +747 42 training_loop """owa""" +747 42 negative_sampler """basic""" +747 42 evaluator """rankbased""" +747 43 dataset """kinships""" +747 43 model """transd""" +747 43 loss """bceaftersigmoid""" +747 43 regularizer """no""" +747 43 optimizer """adadelta""" +747 43 training_loop """owa""" +747 43 negative_sampler """basic""" +747 43 evaluator """rankbased""" +747 44 dataset """kinships""" +747 44 model """transd""" +747 44 loss """bceaftersigmoid""" +747 44 regularizer """no""" +747 44 optimizer """adadelta""" +747 44 training_loop """owa""" +747 44 negative_sampler """basic""" +747 44 evaluator """rankbased""" +747 45 dataset """kinships""" +747 45 model """transd""" +747 45 loss """bceaftersigmoid""" +747 45 regularizer """no""" +747 45 optimizer """adadelta""" +747 45 training_loop """owa""" +747 45 negative_sampler """basic""" +747 45 evaluator """rankbased""" +747 46 dataset """kinships""" +747 46 model """transd""" +747 46 loss """bceaftersigmoid""" +747 46 regularizer """no""" +747 46 optimizer """adadelta""" +747 46 training_loop """owa""" +747 46 negative_sampler """basic""" +747 46 evaluator """rankbased""" +747 47 dataset """kinships""" +747 47 model """transd""" +747 47 loss """bceaftersigmoid""" +747 47 regularizer """no""" +747 47 optimizer """adadelta""" +747 47 training_loop """owa""" +747 47 negative_sampler """basic""" +747 47 evaluator """rankbased""" +747 48 dataset """kinships""" +747 48 model """transd""" +747 48 loss """bceaftersigmoid""" +747 48 regularizer """no""" +747 48 optimizer """adadelta""" +747 48 training_loop """owa""" +747 48 negative_sampler """basic""" +747 48 evaluator """rankbased""" +747 49 dataset """kinships""" +747 49 model """transd""" +747 49 loss """bceaftersigmoid""" +747 49 regularizer """no""" +747 49 optimizer """adadelta""" +747 49 training_loop """owa""" +747 49 negative_sampler """basic""" +747 49 evaluator """rankbased""" +747 50 dataset """kinships""" +747 50 model """transd""" +747 50 loss """bceaftersigmoid""" +747 50 regularizer """no""" +747 50 optimizer """adadelta""" +747 50 training_loop """owa""" +747 50 negative_sampler """basic""" +747 50 evaluator """rankbased""" +747 51 dataset """kinships""" +747 51 model """transd""" +747 51 loss """bceaftersigmoid""" +747 51 regularizer """no""" +747 51 optimizer """adadelta""" +747 51 training_loop """owa""" +747 51 negative_sampler """basic""" +747 51 evaluator """rankbased""" +747 52 dataset """kinships""" +747 52 model """transd""" +747 52 loss """bceaftersigmoid""" +747 52 regularizer """no""" +747 52 optimizer """adadelta""" +747 52 training_loop """owa""" +747 52 negative_sampler """basic""" +747 52 evaluator """rankbased""" +747 53 dataset """kinships""" +747 53 model """transd""" +747 53 loss """bceaftersigmoid""" +747 53 regularizer """no""" +747 53 optimizer """adadelta""" +747 53 training_loop """owa""" +747 53 negative_sampler """basic""" +747 53 evaluator """rankbased""" +747 54 dataset """kinships""" +747 54 model """transd""" +747 54 loss """bceaftersigmoid""" +747 54 regularizer """no""" +747 54 optimizer """adadelta""" +747 54 training_loop """owa""" +747 54 negative_sampler """basic""" +747 54 evaluator """rankbased""" +747 55 dataset """kinships""" +747 55 model """transd""" +747 55 loss """bceaftersigmoid""" +747 55 regularizer """no""" +747 55 optimizer """adadelta""" +747 55 training_loop """owa""" +747 55 negative_sampler """basic""" +747 55 evaluator """rankbased""" +747 56 dataset """kinships""" +747 56 model """transd""" +747 56 loss """bceaftersigmoid""" +747 56 regularizer """no""" +747 56 optimizer """adadelta""" +747 56 training_loop """owa""" +747 56 negative_sampler """basic""" +747 56 evaluator """rankbased""" +747 57 dataset """kinships""" +747 57 model """transd""" +747 57 loss """bceaftersigmoid""" +747 57 regularizer """no""" +747 57 optimizer """adadelta""" +747 57 training_loop """owa""" +747 57 negative_sampler """basic""" +747 57 evaluator """rankbased""" +747 58 dataset """kinships""" +747 58 model """transd""" +747 58 loss """bceaftersigmoid""" +747 58 regularizer """no""" +747 58 optimizer """adadelta""" +747 58 training_loop """owa""" +747 58 negative_sampler """basic""" +747 58 evaluator """rankbased""" +747 59 dataset """kinships""" +747 59 model """transd""" +747 59 loss """bceaftersigmoid""" +747 59 regularizer """no""" +747 59 optimizer """adadelta""" +747 59 training_loop """owa""" +747 59 negative_sampler """basic""" +747 59 evaluator """rankbased""" +747 60 dataset """kinships""" +747 60 model """transd""" +747 60 loss """bceaftersigmoid""" +747 60 regularizer """no""" +747 60 optimizer """adadelta""" +747 60 training_loop """owa""" +747 60 negative_sampler """basic""" +747 60 evaluator """rankbased""" +747 61 dataset """kinships""" +747 61 model """transd""" +747 61 loss """bceaftersigmoid""" +747 61 regularizer """no""" +747 61 optimizer """adadelta""" +747 61 training_loop """owa""" +747 61 negative_sampler """basic""" +747 61 evaluator """rankbased""" +747 62 dataset """kinships""" +747 62 model """transd""" +747 62 loss """bceaftersigmoid""" +747 62 regularizer """no""" +747 62 optimizer """adadelta""" +747 62 training_loop """owa""" +747 62 negative_sampler """basic""" +747 62 evaluator """rankbased""" +747 63 dataset """kinships""" +747 63 model """transd""" +747 63 loss """bceaftersigmoid""" +747 63 regularizer """no""" +747 63 optimizer """adadelta""" +747 63 training_loop """owa""" +747 63 negative_sampler """basic""" +747 63 evaluator """rankbased""" +747 64 dataset """kinships""" +747 64 model """transd""" +747 64 loss """bceaftersigmoid""" +747 64 regularizer """no""" +747 64 optimizer """adadelta""" +747 64 training_loop """owa""" +747 64 negative_sampler """basic""" +747 64 evaluator """rankbased""" +747 65 dataset """kinships""" +747 65 model """transd""" +747 65 loss """bceaftersigmoid""" +747 65 regularizer """no""" +747 65 optimizer """adadelta""" +747 65 training_loop """owa""" +747 65 negative_sampler """basic""" +747 65 evaluator """rankbased""" +747 66 dataset """kinships""" +747 66 model """transd""" +747 66 loss """bceaftersigmoid""" +747 66 regularizer """no""" +747 66 optimizer """adadelta""" +747 66 training_loop """owa""" +747 66 negative_sampler """basic""" +747 66 evaluator """rankbased""" +747 67 dataset """kinships""" +747 67 model """transd""" +747 67 loss """bceaftersigmoid""" +747 67 regularizer """no""" +747 67 optimizer """adadelta""" +747 67 training_loop """owa""" +747 67 negative_sampler """basic""" +747 67 evaluator """rankbased""" +747 68 dataset """kinships""" +747 68 model """transd""" +747 68 loss """bceaftersigmoid""" +747 68 regularizer """no""" +747 68 optimizer """adadelta""" +747 68 training_loop """owa""" +747 68 negative_sampler """basic""" +747 68 evaluator """rankbased""" +747 69 dataset """kinships""" +747 69 model """transd""" +747 69 loss """bceaftersigmoid""" +747 69 regularizer """no""" +747 69 optimizer """adadelta""" +747 69 training_loop """owa""" +747 69 negative_sampler """basic""" +747 69 evaluator """rankbased""" +747 70 dataset """kinships""" +747 70 model """transd""" +747 70 loss """bceaftersigmoid""" +747 70 regularizer """no""" +747 70 optimizer """adadelta""" +747 70 training_loop """owa""" +747 70 negative_sampler """basic""" +747 70 evaluator """rankbased""" +747 71 dataset """kinships""" +747 71 model """transd""" +747 71 loss """bceaftersigmoid""" +747 71 regularizer """no""" +747 71 optimizer """adadelta""" +747 71 training_loop """owa""" +747 71 negative_sampler """basic""" +747 71 evaluator """rankbased""" +747 72 dataset """kinships""" +747 72 model """transd""" +747 72 loss """bceaftersigmoid""" +747 72 regularizer """no""" +747 72 optimizer """adadelta""" +747 72 training_loop """owa""" +747 72 negative_sampler """basic""" +747 72 evaluator """rankbased""" +747 73 dataset """kinships""" +747 73 model """transd""" +747 73 loss """bceaftersigmoid""" +747 73 regularizer """no""" +747 73 optimizer """adadelta""" +747 73 training_loop """owa""" +747 73 negative_sampler """basic""" +747 73 evaluator """rankbased""" +747 74 dataset """kinships""" +747 74 model """transd""" +747 74 loss """bceaftersigmoid""" +747 74 regularizer """no""" +747 74 optimizer """adadelta""" +747 74 training_loop """owa""" +747 74 negative_sampler """basic""" +747 74 evaluator """rankbased""" +747 75 dataset """kinships""" +747 75 model """transd""" +747 75 loss """bceaftersigmoid""" +747 75 regularizer """no""" +747 75 optimizer """adadelta""" +747 75 training_loop """owa""" +747 75 negative_sampler """basic""" +747 75 evaluator """rankbased""" +747 76 dataset """kinships""" +747 76 model """transd""" +747 76 loss """bceaftersigmoid""" +747 76 regularizer """no""" +747 76 optimizer """adadelta""" +747 76 training_loop """owa""" +747 76 negative_sampler """basic""" +747 76 evaluator """rankbased""" +747 77 dataset """kinships""" +747 77 model """transd""" +747 77 loss """bceaftersigmoid""" +747 77 regularizer """no""" +747 77 optimizer """adadelta""" +747 77 training_loop """owa""" +747 77 negative_sampler """basic""" +747 77 evaluator """rankbased""" +747 78 dataset """kinships""" +747 78 model """transd""" +747 78 loss """bceaftersigmoid""" +747 78 regularizer """no""" +747 78 optimizer """adadelta""" +747 78 training_loop """owa""" +747 78 negative_sampler """basic""" +747 78 evaluator """rankbased""" +747 79 dataset """kinships""" +747 79 model """transd""" +747 79 loss """bceaftersigmoid""" +747 79 regularizer """no""" +747 79 optimizer """adadelta""" +747 79 training_loop """owa""" +747 79 negative_sampler """basic""" +747 79 evaluator """rankbased""" +747 80 dataset """kinships""" +747 80 model """transd""" +747 80 loss """bceaftersigmoid""" +747 80 regularizer """no""" +747 80 optimizer """adadelta""" +747 80 training_loop """owa""" +747 80 negative_sampler """basic""" +747 80 evaluator """rankbased""" +747 81 dataset """kinships""" +747 81 model """transd""" +747 81 loss """bceaftersigmoid""" +747 81 regularizer """no""" +747 81 optimizer """adadelta""" +747 81 training_loop """owa""" +747 81 negative_sampler """basic""" +747 81 evaluator """rankbased""" +747 82 dataset """kinships""" +747 82 model """transd""" +747 82 loss """bceaftersigmoid""" +747 82 regularizer """no""" +747 82 optimizer """adadelta""" +747 82 training_loop """owa""" +747 82 negative_sampler """basic""" +747 82 evaluator """rankbased""" +747 83 dataset """kinships""" +747 83 model """transd""" +747 83 loss """bceaftersigmoid""" +747 83 regularizer """no""" +747 83 optimizer """adadelta""" +747 83 training_loop """owa""" +747 83 negative_sampler """basic""" +747 83 evaluator """rankbased""" +747 84 dataset """kinships""" +747 84 model """transd""" +747 84 loss """bceaftersigmoid""" +747 84 regularizer """no""" +747 84 optimizer """adadelta""" +747 84 training_loop """owa""" +747 84 negative_sampler """basic""" +747 84 evaluator """rankbased""" +747 85 dataset """kinships""" +747 85 model """transd""" +747 85 loss """bceaftersigmoid""" +747 85 regularizer """no""" +747 85 optimizer """adadelta""" +747 85 training_loop """owa""" +747 85 negative_sampler """basic""" +747 85 evaluator """rankbased""" +747 86 dataset """kinships""" +747 86 model """transd""" +747 86 loss """bceaftersigmoid""" +747 86 regularizer """no""" +747 86 optimizer """adadelta""" +747 86 training_loop """owa""" +747 86 negative_sampler """basic""" +747 86 evaluator """rankbased""" +747 87 dataset """kinships""" +747 87 model """transd""" +747 87 loss """bceaftersigmoid""" +747 87 regularizer """no""" +747 87 optimizer """adadelta""" +747 87 training_loop """owa""" +747 87 negative_sampler """basic""" +747 87 evaluator """rankbased""" +747 88 dataset """kinships""" +747 88 model """transd""" +747 88 loss """bceaftersigmoid""" +747 88 regularizer """no""" +747 88 optimizer """adadelta""" +747 88 training_loop """owa""" +747 88 negative_sampler """basic""" +747 88 evaluator """rankbased""" +747 89 dataset """kinships""" +747 89 model """transd""" +747 89 loss """bceaftersigmoid""" +747 89 regularizer """no""" +747 89 optimizer """adadelta""" +747 89 training_loop """owa""" +747 89 negative_sampler """basic""" +747 89 evaluator """rankbased""" +747 90 dataset """kinships""" +747 90 model """transd""" +747 90 loss """bceaftersigmoid""" +747 90 regularizer """no""" +747 90 optimizer """adadelta""" +747 90 training_loop """owa""" +747 90 negative_sampler """basic""" +747 90 evaluator """rankbased""" +747 91 dataset """kinships""" +747 91 model """transd""" +747 91 loss """bceaftersigmoid""" +747 91 regularizer """no""" +747 91 optimizer """adadelta""" +747 91 training_loop """owa""" +747 91 negative_sampler """basic""" +747 91 evaluator """rankbased""" +747 92 dataset """kinships""" +747 92 model """transd""" +747 92 loss """bceaftersigmoid""" +747 92 regularizer """no""" +747 92 optimizer """adadelta""" +747 92 training_loop """owa""" +747 92 negative_sampler """basic""" +747 92 evaluator """rankbased""" +747 93 dataset """kinships""" +747 93 model """transd""" +747 93 loss """bceaftersigmoid""" +747 93 regularizer """no""" +747 93 optimizer """adadelta""" +747 93 training_loop """owa""" +747 93 negative_sampler """basic""" +747 93 evaluator """rankbased""" +747 94 dataset """kinships""" +747 94 model """transd""" +747 94 loss """bceaftersigmoid""" +747 94 regularizer """no""" +747 94 optimizer """adadelta""" +747 94 training_loop """owa""" +747 94 negative_sampler """basic""" +747 94 evaluator """rankbased""" +747 95 dataset """kinships""" +747 95 model """transd""" +747 95 loss """bceaftersigmoid""" +747 95 regularizer """no""" +747 95 optimizer """adadelta""" +747 95 training_loop """owa""" +747 95 negative_sampler """basic""" +747 95 evaluator """rankbased""" +747 96 dataset """kinships""" +747 96 model """transd""" +747 96 loss """bceaftersigmoid""" +747 96 regularizer """no""" +747 96 optimizer """adadelta""" +747 96 training_loop """owa""" +747 96 negative_sampler """basic""" +747 96 evaluator """rankbased""" +747 97 dataset """kinships""" +747 97 model """transd""" +747 97 loss """bceaftersigmoid""" +747 97 regularizer """no""" +747 97 optimizer """adadelta""" +747 97 training_loop """owa""" +747 97 negative_sampler """basic""" +747 97 evaluator """rankbased""" +747 98 dataset """kinships""" +747 98 model """transd""" +747 98 loss """bceaftersigmoid""" +747 98 regularizer """no""" +747 98 optimizer """adadelta""" +747 98 training_loop """owa""" +747 98 negative_sampler """basic""" +747 98 evaluator """rankbased""" +747 99 dataset """kinships""" +747 99 model """transd""" +747 99 loss """bceaftersigmoid""" +747 99 regularizer """no""" +747 99 optimizer """adadelta""" +747 99 training_loop """owa""" +747 99 negative_sampler """basic""" +747 99 evaluator """rankbased""" +747 100 dataset """kinships""" +747 100 model """transd""" +747 100 loss """bceaftersigmoid""" +747 100 regularizer """no""" +747 100 optimizer """adadelta""" +747 100 training_loop """owa""" +747 100 negative_sampler """basic""" +747 100 evaluator """rankbased""" +748 1 model.embedding_dim 2.0 +748 1 model.relation_dim 1.0 +748 1 negative_sampler.num_negs_per_pos 91.0 +748 1 training.batch_size 1.0 +748 2 model.embedding_dim 2.0 +748 2 model.relation_dim 2.0 +748 2 negative_sampler.num_negs_per_pos 46.0 +748 2 training.batch_size 1.0 +748 3 model.embedding_dim 2.0 +748 3 model.relation_dim 2.0 +748 3 negative_sampler.num_negs_per_pos 47.0 +748 3 training.batch_size 2.0 +748 4 model.embedding_dim 0.0 +748 4 model.relation_dim 0.0 +748 4 negative_sampler.num_negs_per_pos 81.0 +748 4 training.batch_size 2.0 +748 5 model.embedding_dim 0.0 +748 5 model.relation_dim 2.0 +748 5 negative_sampler.num_negs_per_pos 72.0 +748 5 training.batch_size 0.0 +748 6 model.embedding_dim 2.0 +748 6 model.relation_dim 1.0 +748 6 negative_sampler.num_negs_per_pos 38.0 +748 6 training.batch_size 2.0 +748 7 model.embedding_dim 1.0 +748 7 model.relation_dim 0.0 +748 7 negative_sampler.num_negs_per_pos 59.0 +748 7 training.batch_size 1.0 +748 8 model.embedding_dim 0.0 +748 8 model.relation_dim 0.0 +748 8 negative_sampler.num_negs_per_pos 45.0 +748 8 training.batch_size 2.0 +748 9 model.embedding_dim 2.0 +748 9 model.relation_dim 1.0 +748 9 negative_sampler.num_negs_per_pos 68.0 +748 9 training.batch_size 2.0 +748 10 model.embedding_dim 2.0 +748 10 model.relation_dim 2.0 +748 10 negative_sampler.num_negs_per_pos 36.0 +748 10 training.batch_size 0.0 +748 11 model.embedding_dim 1.0 +748 11 model.relation_dim 1.0 +748 11 negative_sampler.num_negs_per_pos 29.0 +748 11 training.batch_size 1.0 +748 12 model.embedding_dim 1.0 +748 12 model.relation_dim 1.0 +748 12 negative_sampler.num_negs_per_pos 85.0 +748 12 training.batch_size 0.0 +748 13 model.embedding_dim 1.0 +748 13 model.relation_dim 2.0 +748 13 negative_sampler.num_negs_per_pos 17.0 +748 13 training.batch_size 1.0 +748 14 model.embedding_dim 1.0 +748 14 model.relation_dim 0.0 +748 14 negative_sampler.num_negs_per_pos 49.0 +748 14 training.batch_size 1.0 +748 15 model.embedding_dim 1.0 +748 15 model.relation_dim 0.0 +748 15 negative_sampler.num_negs_per_pos 69.0 +748 15 training.batch_size 2.0 +748 16 model.embedding_dim 0.0 +748 16 model.relation_dim 1.0 +748 16 negative_sampler.num_negs_per_pos 77.0 +748 16 training.batch_size 1.0 +748 17 model.embedding_dim 2.0 +748 17 model.relation_dim 0.0 +748 17 negative_sampler.num_negs_per_pos 51.0 +748 17 training.batch_size 2.0 +748 18 model.embedding_dim 2.0 +748 18 model.relation_dim 2.0 +748 18 negative_sampler.num_negs_per_pos 89.0 +748 18 training.batch_size 1.0 +748 19 model.embedding_dim 2.0 +748 19 model.relation_dim 1.0 +748 19 negative_sampler.num_negs_per_pos 72.0 +748 19 training.batch_size 1.0 +748 20 model.embedding_dim 2.0 +748 20 model.relation_dim 1.0 +748 20 negative_sampler.num_negs_per_pos 0.0 +748 20 training.batch_size 1.0 +748 21 model.embedding_dim 2.0 +748 21 model.relation_dim 2.0 +748 21 negative_sampler.num_negs_per_pos 6.0 +748 21 training.batch_size 2.0 +748 22 model.embedding_dim 2.0 +748 22 model.relation_dim 0.0 +748 22 negative_sampler.num_negs_per_pos 95.0 +748 22 training.batch_size 0.0 +748 23 model.embedding_dim 0.0 +748 23 model.relation_dim 2.0 +748 23 negative_sampler.num_negs_per_pos 44.0 +748 23 training.batch_size 1.0 +748 24 model.embedding_dim 0.0 +748 24 model.relation_dim 2.0 +748 24 negative_sampler.num_negs_per_pos 11.0 +748 24 training.batch_size 0.0 +748 25 model.embedding_dim 1.0 +748 25 model.relation_dim 1.0 +748 25 negative_sampler.num_negs_per_pos 87.0 +748 25 training.batch_size 2.0 +748 26 model.embedding_dim 2.0 +748 26 model.relation_dim 1.0 +748 26 negative_sampler.num_negs_per_pos 52.0 +748 26 training.batch_size 2.0 +748 27 model.embedding_dim 2.0 +748 27 model.relation_dim 0.0 +748 27 negative_sampler.num_negs_per_pos 26.0 +748 27 training.batch_size 0.0 +748 28 model.embedding_dim 2.0 +748 28 model.relation_dim 1.0 +748 28 negative_sampler.num_negs_per_pos 18.0 +748 28 training.batch_size 1.0 +748 29 model.embedding_dim 2.0 +748 29 model.relation_dim 1.0 +748 29 negative_sampler.num_negs_per_pos 11.0 +748 29 training.batch_size 0.0 +748 30 model.embedding_dim 1.0 +748 30 model.relation_dim 2.0 +748 30 negative_sampler.num_negs_per_pos 14.0 +748 30 training.batch_size 0.0 +748 31 model.embedding_dim 2.0 +748 31 model.relation_dim 0.0 +748 31 negative_sampler.num_negs_per_pos 22.0 +748 31 training.batch_size 1.0 +748 32 model.embedding_dim 0.0 +748 32 model.relation_dim 1.0 +748 32 negative_sampler.num_negs_per_pos 81.0 +748 32 training.batch_size 0.0 +748 33 model.embedding_dim 0.0 +748 33 model.relation_dim 2.0 +748 33 negative_sampler.num_negs_per_pos 13.0 +748 33 training.batch_size 0.0 +748 34 model.embedding_dim 2.0 +748 34 model.relation_dim 0.0 +748 34 negative_sampler.num_negs_per_pos 65.0 +748 34 training.batch_size 0.0 +748 35 model.embedding_dim 0.0 +748 35 model.relation_dim 2.0 +748 35 negative_sampler.num_negs_per_pos 81.0 +748 35 training.batch_size 0.0 +748 36 model.embedding_dim 0.0 +748 36 model.relation_dim 0.0 +748 36 negative_sampler.num_negs_per_pos 89.0 +748 36 training.batch_size 1.0 +748 37 model.embedding_dim 2.0 +748 37 model.relation_dim 0.0 +748 37 negative_sampler.num_negs_per_pos 30.0 +748 37 training.batch_size 2.0 +748 38 model.embedding_dim 1.0 +748 38 model.relation_dim 2.0 +748 38 negative_sampler.num_negs_per_pos 15.0 +748 38 training.batch_size 0.0 +748 39 model.embedding_dim 2.0 +748 39 model.relation_dim 1.0 +748 39 negative_sampler.num_negs_per_pos 44.0 +748 39 training.batch_size 0.0 +748 40 model.embedding_dim 0.0 +748 40 model.relation_dim 0.0 +748 40 negative_sampler.num_negs_per_pos 6.0 +748 40 training.batch_size 0.0 +748 41 model.embedding_dim 1.0 +748 41 model.relation_dim 2.0 +748 41 negative_sampler.num_negs_per_pos 42.0 +748 41 training.batch_size 1.0 +748 42 model.embedding_dim 2.0 +748 42 model.relation_dim 2.0 +748 42 negative_sampler.num_negs_per_pos 94.0 +748 42 training.batch_size 0.0 +748 43 model.embedding_dim 0.0 +748 43 model.relation_dim 2.0 +748 43 negative_sampler.num_negs_per_pos 37.0 +748 43 training.batch_size 1.0 +748 44 model.embedding_dim 2.0 +748 44 model.relation_dim 1.0 +748 44 negative_sampler.num_negs_per_pos 80.0 +748 44 training.batch_size 2.0 +748 45 model.embedding_dim 1.0 +748 45 model.relation_dim 1.0 +748 45 negative_sampler.num_negs_per_pos 89.0 +748 45 training.batch_size 0.0 +748 46 model.embedding_dim 0.0 +748 46 model.relation_dim 0.0 +748 46 negative_sampler.num_negs_per_pos 87.0 +748 46 training.batch_size 2.0 +748 47 model.embedding_dim 1.0 +748 47 model.relation_dim 0.0 +748 47 negative_sampler.num_negs_per_pos 41.0 +748 47 training.batch_size 1.0 +748 48 model.embedding_dim 1.0 +748 48 model.relation_dim 0.0 +748 48 negative_sampler.num_negs_per_pos 58.0 +748 48 training.batch_size 2.0 +748 49 model.embedding_dim 0.0 +748 49 model.relation_dim 0.0 +748 49 negative_sampler.num_negs_per_pos 14.0 +748 49 training.batch_size 1.0 +748 50 model.embedding_dim 2.0 +748 50 model.relation_dim 0.0 +748 50 negative_sampler.num_negs_per_pos 13.0 +748 50 training.batch_size 2.0 +748 51 model.embedding_dim 1.0 +748 51 model.relation_dim 1.0 +748 51 negative_sampler.num_negs_per_pos 63.0 +748 51 training.batch_size 2.0 +748 52 model.embedding_dim 0.0 +748 52 model.relation_dim 0.0 +748 52 negative_sampler.num_negs_per_pos 30.0 +748 52 training.batch_size 1.0 +748 53 model.embedding_dim 0.0 +748 53 model.relation_dim 0.0 +748 53 negative_sampler.num_negs_per_pos 50.0 +748 53 training.batch_size 0.0 +748 54 model.embedding_dim 2.0 +748 54 model.relation_dim 1.0 +748 54 negative_sampler.num_negs_per_pos 52.0 +748 54 training.batch_size 0.0 +748 55 model.embedding_dim 2.0 +748 55 model.relation_dim 1.0 +748 55 negative_sampler.num_negs_per_pos 74.0 +748 55 training.batch_size 2.0 +748 56 model.embedding_dim 0.0 +748 56 model.relation_dim 1.0 +748 56 negative_sampler.num_negs_per_pos 85.0 +748 56 training.batch_size 0.0 +748 57 model.embedding_dim 1.0 +748 57 model.relation_dim 0.0 +748 57 negative_sampler.num_negs_per_pos 91.0 +748 57 training.batch_size 1.0 +748 58 model.embedding_dim 2.0 +748 58 model.relation_dim 1.0 +748 58 negative_sampler.num_negs_per_pos 76.0 +748 58 training.batch_size 2.0 +748 59 model.embedding_dim 2.0 +748 59 model.relation_dim 2.0 +748 59 negative_sampler.num_negs_per_pos 20.0 +748 59 training.batch_size 2.0 +748 60 model.embedding_dim 2.0 +748 60 model.relation_dim 0.0 +748 60 negative_sampler.num_negs_per_pos 55.0 +748 60 training.batch_size 0.0 +748 61 model.embedding_dim 0.0 +748 61 model.relation_dim 1.0 +748 61 negative_sampler.num_negs_per_pos 98.0 +748 61 training.batch_size 2.0 +748 62 model.embedding_dim 1.0 +748 62 model.relation_dim 1.0 +748 62 negative_sampler.num_negs_per_pos 43.0 +748 62 training.batch_size 2.0 +748 63 model.embedding_dim 2.0 +748 63 model.relation_dim 1.0 +748 63 negative_sampler.num_negs_per_pos 38.0 +748 63 training.batch_size 1.0 +748 64 model.embedding_dim 0.0 +748 64 model.relation_dim 1.0 +748 64 negative_sampler.num_negs_per_pos 14.0 +748 64 training.batch_size 2.0 +748 65 model.embedding_dim 1.0 +748 65 model.relation_dim 0.0 +748 65 negative_sampler.num_negs_per_pos 17.0 +748 65 training.batch_size 1.0 +748 66 model.embedding_dim 0.0 +748 66 model.relation_dim 1.0 +748 66 negative_sampler.num_negs_per_pos 56.0 +748 66 training.batch_size 2.0 +748 67 model.embedding_dim 0.0 +748 67 model.relation_dim 0.0 +748 67 negative_sampler.num_negs_per_pos 48.0 +748 67 training.batch_size 1.0 +748 68 model.embedding_dim 1.0 +748 68 model.relation_dim 0.0 +748 68 negative_sampler.num_negs_per_pos 52.0 +748 68 training.batch_size 2.0 +748 69 model.embedding_dim 2.0 +748 69 model.relation_dim 2.0 +748 69 negative_sampler.num_negs_per_pos 12.0 +748 69 training.batch_size 1.0 +748 70 model.embedding_dim 1.0 +748 70 model.relation_dim 2.0 +748 70 negative_sampler.num_negs_per_pos 64.0 +748 70 training.batch_size 0.0 +748 71 model.embedding_dim 1.0 +748 71 model.relation_dim 0.0 +748 71 negative_sampler.num_negs_per_pos 64.0 +748 71 training.batch_size 1.0 +748 72 model.embedding_dim 2.0 +748 72 model.relation_dim 1.0 +748 72 negative_sampler.num_negs_per_pos 20.0 +748 72 training.batch_size 1.0 +748 73 model.embedding_dim 1.0 +748 73 model.relation_dim 1.0 +748 73 negative_sampler.num_negs_per_pos 62.0 +748 73 training.batch_size 0.0 +748 74 model.embedding_dim 0.0 +748 74 model.relation_dim 1.0 +748 74 negative_sampler.num_negs_per_pos 58.0 +748 74 training.batch_size 2.0 +748 75 model.embedding_dim 0.0 +748 75 model.relation_dim 0.0 +748 75 negative_sampler.num_negs_per_pos 82.0 +748 75 training.batch_size 1.0 +748 76 model.embedding_dim 2.0 +748 76 model.relation_dim 0.0 +748 76 negative_sampler.num_negs_per_pos 49.0 +748 76 training.batch_size 0.0 +748 77 model.embedding_dim 2.0 +748 77 model.relation_dim 0.0 +748 77 negative_sampler.num_negs_per_pos 17.0 +748 77 training.batch_size 2.0 +748 78 model.embedding_dim 2.0 +748 78 model.relation_dim 0.0 +748 78 negative_sampler.num_negs_per_pos 16.0 +748 78 training.batch_size 1.0 +748 79 model.embedding_dim 2.0 +748 79 model.relation_dim 1.0 +748 79 negative_sampler.num_negs_per_pos 7.0 +748 79 training.batch_size 2.0 +748 80 model.embedding_dim 0.0 +748 80 model.relation_dim 1.0 +748 80 negative_sampler.num_negs_per_pos 87.0 +748 80 training.batch_size 0.0 +748 81 model.embedding_dim 1.0 +748 81 model.relation_dim 0.0 +748 81 negative_sampler.num_negs_per_pos 45.0 +748 81 training.batch_size 1.0 +748 82 model.embedding_dim 0.0 +748 82 model.relation_dim 0.0 +748 82 negative_sampler.num_negs_per_pos 0.0 +748 82 training.batch_size 0.0 +748 83 model.embedding_dim 1.0 +748 83 model.relation_dim 1.0 +748 83 negative_sampler.num_negs_per_pos 32.0 +748 83 training.batch_size 0.0 +748 84 model.embedding_dim 2.0 +748 84 model.relation_dim 1.0 +748 84 negative_sampler.num_negs_per_pos 57.0 +748 84 training.batch_size 2.0 +748 85 model.embedding_dim 1.0 +748 85 model.relation_dim 0.0 +748 85 negative_sampler.num_negs_per_pos 6.0 +748 85 training.batch_size 2.0 +748 86 model.embedding_dim 2.0 +748 86 model.relation_dim 2.0 +748 86 negative_sampler.num_negs_per_pos 59.0 +748 86 training.batch_size 1.0 +748 87 model.embedding_dim 0.0 +748 87 model.relation_dim 2.0 +748 87 negative_sampler.num_negs_per_pos 9.0 +748 87 training.batch_size 2.0 +748 88 model.embedding_dim 1.0 +748 88 model.relation_dim 0.0 +748 88 negative_sampler.num_negs_per_pos 65.0 +748 88 training.batch_size 1.0 +748 89 model.embedding_dim 1.0 +748 89 model.relation_dim 2.0 +748 89 negative_sampler.num_negs_per_pos 66.0 +748 89 training.batch_size 0.0 +748 90 model.embedding_dim 0.0 +748 90 model.relation_dim 2.0 +748 90 negative_sampler.num_negs_per_pos 89.0 +748 90 training.batch_size 2.0 +748 91 model.embedding_dim 2.0 +748 91 model.relation_dim 2.0 +748 91 negative_sampler.num_negs_per_pos 57.0 +748 91 training.batch_size 2.0 +748 92 model.embedding_dim 1.0 +748 92 model.relation_dim 1.0 +748 92 negative_sampler.num_negs_per_pos 71.0 +748 92 training.batch_size 2.0 +748 93 model.embedding_dim 0.0 +748 93 model.relation_dim 0.0 +748 93 negative_sampler.num_negs_per_pos 91.0 +748 93 training.batch_size 1.0 +748 94 model.embedding_dim 2.0 +748 94 model.relation_dim 0.0 +748 94 negative_sampler.num_negs_per_pos 16.0 +748 94 training.batch_size 0.0 +748 95 model.embedding_dim 2.0 +748 95 model.relation_dim 1.0 +748 95 negative_sampler.num_negs_per_pos 32.0 +748 95 training.batch_size 0.0 +748 96 model.embedding_dim 1.0 +748 96 model.relation_dim 0.0 +748 96 negative_sampler.num_negs_per_pos 77.0 +748 96 training.batch_size 1.0 +748 97 model.embedding_dim 2.0 +748 97 model.relation_dim 1.0 +748 97 negative_sampler.num_negs_per_pos 62.0 +748 97 training.batch_size 1.0 +748 98 model.embedding_dim 0.0 +748 98 model.relation_dim 1.0 +748 98 negative_sampler.num_negs_per_pos 42.0 +748 98 training.batch_size 0.0 +748 99 model.embedding_dim 0.0 +748 99 model.relation_dim 0.0 +748 99 negative_sampler.num_negs_per_pos 53.0 +748 99 training.batch_size 1.0 +748 100 model.embedding_dim 1.0 +748 100 model.relation_dim 2.0 +748 100 negative_sampler.num_negs_per_pos 62.0 +748 100 training.batch_size 2.0 +748 1 dataset """kinships""" +748 1 model """transd""" +748 1 loss """softplus""" +748 1 regularizer """no""" +748 1 optimizer """adadelta""" +748 1 training_loop """owa""" +748 1 negative_sampler """basic""" +748 1 evaluator """rankbased""" +748 2 dataset """kinships""" +748 2 model """transd""" +748 2 loss """softplus""" +748 2 regularizer """no""" +748 2 optimizer """adadelta""" +748 2 training_loop """owa""" +748 2 negative_sampler """basic""" +748 2 evaluator """rankbased""" +748 3 dataset """kinships""" +748 3 model """transd""" +748 3 loss """softplus""" +748 3 regularizer """no""" +748 3 optimizer """adadelta""" +748 3 training_loop """owa""" +748 3 negative_sampler """basic""" +748 3 evaluator """rankbased""" +748 4 dataset """kinships""" +748 4 model """transd""" +748 4 loss """softplus""" +748 4 regularizer """no""" +748 4 optimizer """adadelta""" +748 4 training_loop """owa""" +748 4 negative_sampler """basic""" +748 4 evaluator """rankbased""" +748 5 dataset """kinships""" +748 5 model """transd""" +748 5 loss """softplus""" +748 5 regularizer """no""" +748 5 optimizer """adadelta""" +748 5 training_loop """owa""" +748 5 negative_sampler """basic""" +748 5 evaluator """rankbased""" +748 6 dataset """kinships""" +748 6 model """transd""" +748 6 loss """softplus""" +748 6 regularizer """no""" +748 6 optimizer """adadelta""" +748 6 training_loop """owa""" +748 6 negative_sampler """basic""" +748 6 evaluator """rankbased""" +748 7 dataset """kinships""" +748 7 model """transd""" +748 7 loss """softplus""" +748 7 regularizer """no""" +748 7 optimizer """adadelta""" +748 7 training_loop """owa""" +748 7 negative_sampler """basic""" +748 7 evaluator """rankbased""" +748 8 dataset """kinships""" +748 8 model """transd""" +748 8 loss """softplus""" +748 8 regularizer """no""" +748 8 optimizer """adadelta""" +748 8 training_loop """owa""" +748 8 negative_sampler """basic""" +748 8 evaluator """rankbased""" +748 9 dataset """kinships""" +748 9 model """transd""" +748 9 loss """softplus""" +748 9 regularizer """no""" +748 9 optimizer """adadelta""" +748 9 training_loop """owa""" +748 9 negative_sampler """basic""" +748 9 evaluator """rankbased""" +748 10 dataset """kinships""" +748 10 model """transd""" +748 10 loss """softplus""" +748 10 regularizer """no""" +748 10 optimizer """adadelta""" +748 10 training_loop """owa""" +748 10 negative_sampler """basic""" +748 10 evaluator """rankbased""" +748 11 dataset """kinships""" +748 11 model """transd""" +748 11 loss """softplus""" +748 11 regularizer """no""" +748 11 optimizer """adadelta""" +748 11 training_loop """owa""" +748 11 negative_sampler """basic""" +748 11 evaluator """rankbased""" +748 12 dataset """kinships""" +748 12 model """transd""" +748 12 loss """softplus""" +748 12 regularizer """no""" +748 12 optimizer """adadelta""" +748 12 training_loop """owa""" +748 12 negative_sampler """basic""" +748 12 evaluator """rankbased""" +748 13 dataset """kinships""" +748 13 model """transd""" +748 13 loss """softplus""" +748 13 regularizer """no""" +748 13 optimizer """adadelta""" +748 13 training_loop """owa""" +748 13 negative_sampler """basic""" +748 13 evaluator """rankbased""" +748 14 dataset """kinships""" +748 14 model """transd""" +748 14 loss """softplus""" +748 14 regularizer """no""" +748 14 optimizer """adadelta""" +748 14 training_loop """owa""" +748 14 negative_sampler """basic""" +748 14 evaluator """rankbased""" +748 15 dataset """kinships""" +748 15 model """transd""" +748 15 loss """softplus""" +748 15 regularizer """no""" +748 15 optimizer """adadelta""" +748 15 training_loop """owa""" +748 15 negative_sampler """basic""" +748 15 evaluator """rankbased""" +748 16 dataset """kinships""" +748 16 model """transd""" +748 16 loss """softplus""" +748 16 regularizer """no""" +748 16 optimizer """adadelta""" +748 16 training_loop """owa""" +748 16 negative_sampler """basic""" +748 16 evaluator """rankbased""" +748 17 dataset """kinships""" +748 17 model """transd""" +748 17 loss """softplus""" +748 17 regularizer """no""" +748 17 optimizer """adadelta""" +748 17 training_loop """owa""" +748 17 negative_sampler """basic""" +748 17 evaluator """rankbased""" +748 18 dataset """kinships""" +748 18 model """transd""" +748 18 loss """softplus""" +748 18 regularizer """no""" +748 18 optimizer """adadelta""" +748 18 training_loop """owa""" +748 18 negative_sampler """basic""" +748 18 evaluator """rankbased""" +748 19 dataset """kinships""" +748 19 model """transd""" +748 19 loss """softplus""" +748 19 regularizer """no""" +748 19 optimizer """adadelta""" +748 19 training_loop """owa""" +748 19 negative_sampler """basic""" +748 19 evaluator """rankbased""" +748 20 dataset """kinships""" +748 20 model """transd""" +748 20 loss """softplus""" +748 20 regularizer """no""" +748 20 optimizer """adadelta""" +748 20 training_loop """owa""" +748 20 negative_sampler """basic""" +748 20 evaluator """rankbased""" +748 21 dataset """kinships""" +748 21 model """transd""" +748 21 loss """softplus""" +748 21 regularizer """no""" +748 21 optimizer """adadelta""" +748 21 training_loop """owa""" +748 21 negative_sampler """basic""" +748 21 evaluator """rankbased""" +748 22 dataset """kinships""" +748 22 model """transd""" +748 22 loss """softplus""" +748 22 regularizer """no""" +748 22 optimizer """adadelta""" +748 22 training_loop """owa""" +748 22 negative_sampler """basic""" +748 22 evaluator """rankbased""" +748 23 dataset """kinships""" +748 23 model """transd""" +748 23 loss """softplus""" +748 23 regularizer """no""" +748 23 optimizer """adadelta""" +748 23 training_loop """owa""" +748 23 negative_sampler """basic""" +748 23 evaluator """rankbased""" +748 24 dataset """kinships""" +748 24 model """transd""" +748 24 loss """softplus""" +748 24 regularizer """no""" +748 24 optimizer """adadelta""" +748 24 training_loop """owa""" +748 24 negative_sampler """basic""" +748 24 evaluator """rankbased""" +748 25 dataset """kinships""" +748 25 model """transd""" +748 25 loss """softplus""" +748 25 regularizer """no""" +748 25 optimizer """adadelta""" +748 25 training_loop """owa""" +748 25 negative_sampler """basic""" +748 25 evaluator """rankbased""" +748 26 dataset """kinships""" +748 26 model """transd""" +748 26 loss """softplus""" +748 26 regularizer """no""" +748 26 optimizer """adadelta""" +748 26 training_loop """owa""" +748 26 negative_sampler """basic""" +748 26 evaluator """rankbased""" +748 27 dataset """kinships""" +748 27 model """transd""" +748 27 loss """softplus""" +748 27 regularizer """no""" +748 27 optimizer """adadelta""" +748 27 training_loop """owa""" +748 27 negative_sampler """basic""" +748 27 evaluator """rankbased""" +748 28 dataset """kinships""" +748 28 model """transd""" +748 28 loss """softplus""" +748 28 regularizer """no""" +748 28 optimizer """adadelta""" +748 28 training_loop """owa""" +748 28 negative_sampler """basic""" +748 28 evaluator """rankbased""" +748 29 dataset """kinships""" +748 29 model """transd""" +748 29 loss """softplus""" +748 29 regularizer """no""" +748 29 optimizer """adadelta""" +748 29 training_loop """owa""" +748 29 negative_sampler """basic""" +748 29 evaluator """rankbased""" +748 30 dataset """kinships""" +748 30 model """transd""" +748 30 loss """softplus""" +748 30 regularizer """no""" +748 30 optimizer """adadelta""" +748 30 training_loop """owa""" +748 30 negative_sampler """basic""" +748 30 evaluator """rankbased""" +748 31 dataset """kinships""" +748 31 model """transd""" +748 31 loss """softplus""" +748 31 regularizer """no""" +748 31 optimizer """adadelta""" +748 31 training_loop """owa""" +748 31 negative_sampler """basic""" +748 31 evaluator """rankbased""" +748 32 dataset """kinships""" +748 32 model """transd""" +748 32 loss """softplus""" +748 32 regularizer """no""" +748 32 optimizer """adadelta""" +748 32 training_loop """owa""" +748 32 negative_sampler """basic""" +748 32 evaluator """rankbased""" +748 33 dataset """kinships""" +748 33 model """transd""" +748 33 loss """softplus""" +748 33 regularizer """no""" +748 33 optimizer """adadelta""" +748 33 training_loop """owa""" +748 33 negative_sampler """basic""" +748 33 evaluator """rankbased""" +748 34 dataset """kinships""" +748 34 model """transd""" +748 34 loss """softplus""" +748 34 regularizer """no""" +748 34 optimizer """adadelta""" +748 34 training_loop """owa""" +748 34 negative_sampler """basic""" +748 34 evaluator """rankbased""" +748 35 dataset """kinships""" +748 35 model """transd""" +748 35 loss """softplus""" +748 35 regularizer """no""" +748 35 optimizer """adadelta""" +748 35 training_loop """owa""" +748 35 negative_sampler """basic""" +748 35 evaluator """rankbased""" +748 36 dataset """kinships""" +748 36 model """transd""" +748 36 loss """softplus""" +748 36 regularizer """no""" +748 36 optimizer """adadelta""" +748 36 training_loop """owa""" +748 36 negative_sampler """basic""" +748 36 evaluator """rankbased""" +748 37 dataset """kinships""" +748 37 model """transd""" +748 37 loss """softplus""" +748 37 regularizer """no""" +748 37 optimizer """adadelta""" +748 37 training_loop """owa""" +748 37 negative_sampler """basic""" +748 37 evaluator """rankbased""" +748 38 dataset """kinships""" +748 38 model """transd""" +748 38 loss """softplus""" +748 38 regularizer """no""" +748 38 optimizer """adadelta""" +748 38 training_loop """owa""" +748 38 negative_sampler """basic""" +748 38 evaluator """rankbased""" +748 39 dataset """kinships""" +748 39 model """transd""" +748 39 loss """softplus""" +748 39 regularizer """no""" +748 39 optimizer """adadelta""" +748 39 training_loop """owa""" +748 39 negative_sampler """basic""" +748 39 evaluator """rankbased""" +748 40 dataset """kinships""" +748 40 model """transd""" +748 40 loss """softplus""" +748 40 regularizer """no""" +748 40 optimizer """adadelta""" +748 40 training_loop """owa""" +748 40 negative_sampler """basic""" +748 40 evaluator """rankbased""" +748 41 dataset """kinships""" +748 41 model """transd""" +748 41 loss """softplus""" +748 41 regularizer """no""" +748 41 optimizer """adadelta""" +748 41 training_loop """owa""" +748 41 negative_sampler """basic""" +748 41 evaluator """rankbased""" +748 42 dataset """kinships""" +748 42 model """transd""" +748 42 loss """softplus""" +748 42 regularizer """no""" +748 42 optimizer """adadelta""" +748 42 training_loop """owa""" +748 42 negative_sampler """basic""" +748 42 evaluator """rankbased""" +748 43 dataset """kinships""" +748 43 model """transd""" +748 43 loss """softplus""" +748 43 regularizer """no""" +748 43 optimizer """adadelta""" +748 43 training_loop """owa""" +748 43 negative_sampler """basic""" +748 43 evaluator """rankbased""" +748 44 dataset """kinships""" +748 44 model """transd""" +748 44 loss """softplus""" +748 44 regularizer """no""" +748 44 optimizer """adadelta""" +748 44 training_loop """owa""" +748 44 negative_sampler """basic""" +748 44 evaluator """rankbased""" +748 45 dataset """kinships""" +748 45 model """transd""" +748 45 loss """softplus""" +748 45 regularizer """no""" +748 45 optimizer """adadelta""" +748 45 training_loop """owa""" +748 45 negative_sampler """basic""" +748 45 evaluator """rankbased""" +748 46 dataset """kinships""" +748 46 model """transd""" +748 46 loss """softplus""" +748 46 regularizer """no""" +748 46 optimizer """adadelta""" +748 46 training_loop """owa""" +748 46 negative_sampler """basic""" +748 46 evaluator """rankbased""" +748 47 dataset """kinships""" +748 47 model """transd""" +748 47 loss """softplus""" +748 47 regularizer """no""" +748 47 optimizer """adadelta""" +748 47 training_loop """owa""" +748 47 negative_sampler """basic""" +748 47 evaluator """rankbased""" +748 48 dataset """kinships""" +748 48 model """transd""" +748 48 loss """softplus""" +748 48 regularizer """no""" +748 48 optimizer """adadelta""" +748 48 training_loop """owa""" +748 48 negative_sampler """basic""" +748 48 evaluator """rankbased""" +748 49 dataset """kinships""" +748 49 model """transd""" +748 49 loss """softplus""" +748 49 regularizer """no""" +748 49 optimizer """adadelta""" +748 49 training_loop """owa""" +748 49 negative_sampler """basic""" +748 49 evaluator """rankbased""" +748 50 dataset """kinships""" +748 50 model """transd""" +748 50 loss """softplus""" +748 50 regularizer """no""" +748 50 optimizer """adadelta""" +748 50 training_loop """owa""" +748 50 negative_sampler """basic""" +748 50 evaluator """rankbased""" +748 51 dataset """kinships""" +748 51 model """transd""" +748 51 loss """softplus""" +748 51 regularizer """no""" +748 51 optimizer """adadelta""" +748 51 training_loop """owa""" +748 51 negative_sampler """basic""" +748 51 evaluator """rankbased""" +748 52 dataset """kinships""" +748 52 model """transd""" +748 52 loss """softplus""" +748 52 regularizer """no""" +748 52 optimizer """adadelta""" +748 52 training_loop """owa""" +748 52 negative_sampler """basic""" +748 52 evaluator """rankbased""" +748 53 dataset """kinships""" +748 53 model """transd""" +748 53 loss """softplus""" +748 53 regularizer """no""" +748 53 optimizer """adadelta""" +748 53 training_loop """owa""" +748 53 negative_sampler """basic""" +748 53 evaluator """rankbased""" +748 54 dataset """kinships""" +748 54 model """transd""" +748 54 loss """softplus""" +748 54 regularizer """no""" +748 54 optimizer """adadelta""" +748 54 training_loop """owa""" +748 54 negative_sampler """basic""" +748 54 evaluator """rankbased""" +748 55 dataset """kinships""" +748 55 model """transd""" +748 55 loss """softplus""" +748 55 regularizer """no""" +748 55 optimizer """adadelta""" +748 55 training_loop """owa""" +748 55 negative_sampler """basic""" +748 55 evaluator """rankbased""" +748 56 dataset """kinships""" +748 56 model """transd""" +748 56 loss """softplus""" +748 56 regularizer """no""" +748 56 optimizer """adadelta""" +748 56 training_loop """owa""" +748 56 negative_sampler """basic""" +748 56 evaluator """rankbased""" +748 57 dataset """kinships""" +748 57 model """transd""" +748 57 loss """softplus""" +748 57 regularizer """no""" +748 57 optimizer """adadelta""" +748 57 training_loop """owa""" +748 57 negative_sampler """basic""" +748 57 evaluator """rankbased""" +748 58 dataset """kinships""" +748 58 model """transd""" +748 58 loss """softplus""" +748 58 regularizer """no""" +748 58 optimizer """adadelta""" +748 58 training_loop """owa""" +748 58 negative_sampler """basic""" +748 58 evaluator """rankbased""" +748 59 dataset """kinships""" +748 59 model """transd""" +748 59 loss """softplus""" +748 59 regularizer """no""" +748 59 optimizer """adadelta""" +748 59 training_loop """owa""" +748 59 negative_sampler """basic""" +748 59 evaluator """rankbased""" +748 60 dataset """kinships""" +748 60 model """transd""" +748 60 loss """softplus""" +748 60 regularizer """no""" +748 60 optimizer """adadelta""" +748 60 training_loop """owa""" +748 60 negative_sampler """basic""" +748 60 evaluator """rankbased""" +748 61 dataset """kinships""" +748 61 model """transd""" +748 61 loss """softplus""" +748 61 regularizer """no""" +748 61 optimizer """adadelta""" +748 61 training_loop """owa""" +748 61 negative_sampler """basic""" +748 61 evaluator """rankbased""" +748 62 dataset """kinships""" +748 62 model """transd""" +748 62 loss """softplus""" +748 62 regularizer """no""" +748 62 optimizer """adadelta""" +748 62 training_loop """owa""" +748 62 negative_sampler """basic""" +748 62 evaluator """rankbased""" +748 63 dataset """kinships""" +748 63 model """transd""" +748 63 loss """softplus""" +748 63 regularizer """no""" +748 63 optimizer """adadelta""" +748 63 training_loop """owa""" +748 63 negative_sampler """basic""" +748 63 evaluator """rankbased""" +748 64 dataset """kinships""" +748 64 model """transd""" +748 64 loss """softplus""" +748 64 regularizer """no""" +748 64 optimizer """adadelta""" +748 64 training_loop """owa""" +748 64 negative_sampler """basic""" +748 64 evaluator """rankbased""" +748 65 dataset """kinships""" +748 65 model """transd""" +748 65 loss """softplus""" +748 65 regularizer """no""" +748 65 optimizer """adadelta""" +748 65 training_loop """owa""" +748 65 negative_sampler """basic""" +748 65 evaluator """rankbased""" +748 66 dataset """kinships""" +748 66 model """transd""" +748 66 loss """softplus""" +748 66 regularizer """no""" +748 66 optimizer """adadelta""" +748 66 training_loop """owa""" +748 66 negative_sampler """basic""" +748 66 evaluator """rankbased""" +748 67 dataset """kinships""" +748 67 model """transd""" +748 67 loss """softplus""" +748 67 regularizer """no""" +748 67 optimizer """adadelta""" +748 67 training_loop """owa""" +748 67 negative_sampler """basic""" +748 67 evaluator """rankbased""" +748 68 dataset """kinships""" +748 68 model """transd""" +748 68 loss """softplus""" +748 68 regularizer """no""" +748 68 optimizer """adadelta""" +748 68 training_loop """owa""" +748 68 negative_sampler """basic""" +748 68 evaluator """rankbased""" +748 69 dataset """kinships""" +748 69 model """transd""" +748 69 loss """softplus""" +748 69 regularizer """no""" +748 69 optimizer """adadelta""" +748 69 training_loop """owa""" +748 69 negative_sampler """basic""" +748 69 evaluator """rankbased""" +748 70 dataset """kinships""" +748 70 model """transd""" +748 70 loss """softplus""" +748 70 regularizer """no""" +748 70 optimizer """adadelta""" +748 70 training_loop """owa""" +748 70 negative_sampler """basic""" +748 70 evaluator """rankbased""" +748 71 dataset """kinships""" +748 71 model """transd""" +748 71 loss """softplus""" +748 71 regularizer """no""" +748 71 optimizer """adadelta""" +748 71 training_loop """owa""" +748 71 negative_sampler """basic""" +748 71 evaluator """rankbased""" +748 72 dataset """kinships""" +748 72 model """transd""" +748 72 loss """softplus""" +748 72 regularizer """no""" +748 72 optimizer """adadelta""" +748 72 training_loop """owa""" +748 72 negative_sampler """basic""" +748 72 evaluator """rankbased""" +748 73 dataset """kinships""" +748 73 model """transd""" +748 73 loss """softplus""" +748 73 regularizer """no""" +748 73 optimizer """adadelta""" +748 73 training_loop """owa""" +748 73 negative_sampler """basic""" +748 73 evaluator """rankbased""" +748 74 dataset """kinships""" +748 74 model """transd""" +748 74 loss """softplus""" +748 74 regularizer """no""" +748 74 optimizer """adadelta""" +748 74 training_loop """owa""" +748 74 negative_sampler """basic""" +748 74 evaluator """rankbased""" +748 75 dataset """kinships""" +748 75 model """transd""" +748 75 loss """softplus""" +748 75 regularizer """no""" +748 75 optimizer """adadelta""" +748 75 training_loop """owa""" +748 75 negative_sampler """basic""" +748 75 evaluator """rankbased""" +748 76 dataset """kinships""" +748 76 model """transd""" +748 76 loss """softplus""" +748 76 regularizer """no""" +748 76 optimizer """adadelta""" +748 76 training_loop """owa""" +748 76 negative_sampler """basic""" +748 76 evaluator """rankbased""" +748 77 dataset """kinships""" +748 77 model """transd""" +748 77 loss """softplus""" +748 77 regularizer """no""" +748 77 optimizer """adadelta""" +748 77 training_loop """owa""" +748 77 negative_sampler """basic""" +748 77 evaluator """rankbased""" +748 78 dataset """kinships""" +748 78 model """transd""" +748 78 loss """softplus""" +748 78 regularizer """no""" +748 78 optimizer """adadelta""" +748 78 training_loop """owa""" +748 78 negative_sampler """basic""" +748 78 evaluator """rankbased""" +748 79 dataset """kinships""" +748 79 model """transd""" +748 79 loss """softplus""" +748 79 regularizer """no""" +748 79 optimizer """adadelta""" +748 79 training_loop """owa""" +748 79 negative_sampler """basic""" +748 79 evaluator """rankbased""" +748 80 dataset """kinships""" +748 80 model """transd""" +748 80 loss """softplus""" +748 80 regularizer """no""" +748 80 optimizer """adadelta""" +748 80 training_loop """owa""" +748 80 negative_sampler """basic""" +748 80 evaluator """rankbased""" +748 81 dataset """kinships""" +748 81 model """transd""" +748 81 loss """softplus""" +748 81 regularizer """no""" +748 81 optimizer """adadelta""" +748 81 training_loop """owa""" +748 81 negative_sampler """basic""" +748 81 evaluator """rankbased""" +748 82 dataset """kinships""" +748 82 model """transd""" +748 82 loss """softplus""" +748 82 regularizer """no""" +748 82 optimizer """adadelta""" +748 82 training_loop """owa""" +748 82 negative_sampler """basic""" +748 82 evaluator """rankbased""" +748 83 dataset """kinships""" +748 83 model """transd""" +748 83 loss """softplus""" +748 83 regularizer """no""" +748 83 optimizer """adadelta""" +748 83 training_loop """owa""" +748 83 negative_sampler """basic""" +748 83 evaluator """rankbased""" +748 84 dataset """kinships""" +748 84 model """transd""" +748 84 loss """softplus""" +748 84 regularizer """no""" +748 84 optimizer """adadelta""" +748 84 training_loop """owa""" +748 84 negative_sampler """basic""" +748 84 evaluator """rankbased""" +748 85 dataset """kinships""" +748 85 model """transd""" +748 85 loss """softplus""" +748 85 regularizer """no""" +748 85 optimizer """adadelta""" +748 85 training_loop """owa""" +748 85 negative_sampler """basic""" +748 85 evaluator """rankbased""" +748 86 dataset """kinships""" +748 86 model """transd""" +748 86 loss """softplus""" +748 86 regularizer """no""" +748 86 optimizer """adadelta""" +748 86 training_loop """owa""" +748 86 negative_sampler """basic""" +748 86 evaluator """rankbased""" +748 87 dataset """kinships""" +748 87 model """transd""" +748 87 loss """softplus""" +748 87 regularizer """no""" +748 87 optimizer """adadelta""" +748 87 training_loop """owa""" +748 87 negative_sampler """basic""" +748 87 evaluator """rankbased""" +748 88 dataset """kinships""" +748 88 model """transd""" +748 88 loss """softplus""" +748 88 regularizer """no""" +748 88 optimizer """adadelta""" +748 88 training_loop """owa""" +748 88 negative_sampler """basic""" +748 88 evaluator """rankbased""" +748 89 dataset """kinships""" +748 89 model """transd""" +748 89 loss """softplus""" +748 89 regularizer """no""" +748 89 optimizer """adadelta""" +748 89 training_loop """owa""" +748 89 negative_sampler """basic""" +748 89 evaluator """rankbased""" +748 90 dataset """kinships""" +748 90 model """transd""" +748 90 loss """softplus""" +748 90 regularizer """no""" +748 90 optimizer """adadelta""" +748 90 training_loop """owa""" +748 90 negative_sampler """basic""" +748 90 evaluator """rankbased""" +748 91 dataset """kinships""" +748 91 model """transd""" +748 91 loss """softplus""" +748 91 regularizer """no""" +748 91 optimizer """adadelta""" +748 91 training_loop """owa""" +748 91 negative_sampler """basic""" +748 91 evaluator """rankbased""" +748 92 dataset """kinships""" +748 92 model """transd""" +748 92 loss """softplus""" +748 92 regularizer """no""" +748 92 optimizer """adadelta""" +748 92 training_loop """owa""" +748 92 negative_sampler """basic""" +748 92 evaluator """rankbased""" +748 93 dataset """kinships""" +748 93 model """transd""" +748 93 loss """softplus""" +748 93 regularizer """no""" +748 93 optimizer """adadelta""" +748 93 training_loop """owa""" +748 93 negative_sampler """basic""" +748 93 evaluator """rankbased""" +748 94 dataset """kinships""" +748 94 model """transd""" +748 94 loss """softplus""" +748 94 regularizer """no""" +748 94 optimizer """adadelta""" +748 94 training_loop """owa""" +748 94 negative_sampler """basic""" +748 94 evaluator """rankbased""" +748 95 dataset """kinships""" +748 95 model """transd""" +748 95 loss """softplus""" +748 95 regularizer """no""" +748 95 optimizer """adadelta""" +748 95 training_loop """owa""" +748 95 negative_sampler """basic""" +748 95 evaluator """rankbased""" +748 96 dataset """kinships""" +748 96 model """transd""" +748 96 loss """softplus""" +748 96 regularizer """no""" +748 96 optimizer """adadelta""" +748 96 training_loop """owa""" +748 96 negative_sampler """basic""" +748 96 evaluator """rankbased""" +748 97 dataset """kinships""" +748 97 model """transd""" +748 97 loss """softplus""" +748 97 regularizer """no""" +748 97 optimizer """adadelta""" +748 97 training_loop """owa""" +748 97 negative_sampler """basic""" +748 97 evaluator """rankbased""" +748 98 dataset """kinships""" +748 98 model """transd""" +748 98 loss """softplus""" +748 98 regularizer """no""" +748 98 optimizer """adadelta""" +748 98 training_loop """owa""" +748 98 negative_sampler """basic""" +748 98 evaluator """rankbased""" +748 99 dataset """kinships""" +748 99 model """transd""" +748 99 loss """softplus""" +748 99 regularizer """no""" +748 99 optimizer """adadelta""" +748 99 training_loop """owa""" +748 99 negative_sampler """basic""" +748 99 evaluator """rankbased""" +748 100 dataset """kinships""" +748 100 model """transd""" +748 100 loss """softplus""" +748 100 regularizer """no""" +748 100 optimizer """adadelta""" +748 100 training_loop """owa""" +748 100 negative_sampler """basic""" +748 100 evaluator """rankbased""" +749 1 model.embedding_dim 1.0 +749 1 model.relation_dim 2.0 +749 1 negative_sampler.num_negs_per_pos 73.0 +749 1 training.batch_size 2.0 +749 2 model.embedding_dim 1.0 +749 2 model.relation_dim 2.0 +749 2 negative_sampler.num_negs_per_pos 19.0 +749 2 training.batch_size 2.0 +749 3 model.embedding_dim 2.0 +749 3 model.relation_dim 1.0 +749 3 negative_sampler.num_negs_per_pos 63.0 +749 3 training.batch_size 2.0 +749 4 model.embedding_dim 0.0 +749 4 model.relation_dim 1.0 +749 4 negative_sampler.num_negs_per_pos 37.0 +749 4 training.batch_size 2.0 +749 5 model.embedding_dim 0.0 +749 5 model.relation_dim 0.0 +749 5 negative_sampler.num_negs_per_pos 89.0 +749 5 training.batch_size 2.0 +749 6 model.embedding_dim 0.0 +749 6 model.relation_dim 1.0 +749 6 negative_sampler.num_negs_per_pos 43.0 +749 6 training.batch_size 1.0 +749 7 model.embedding_dim 2.0 +749 7 model.relation_dim 0.0 +749 7 negative_sampler.num_negs_per_pos 63.0 +749 7 training.batch_size 0.0 +749 8 model.embedding_dim 1.0 +749 8 model.relation_dim 0.0 +749 8 negative_sampler.num_negs_per_pos 36.0 +749 8 training.batch_size 1.0 +749 9 model.embedding_dim 0.0 +749 9 model.relation_dim 2.0 +749 9 negative_sampler.num_negs_per_pos 45.0 +749 9 training.batch_size 1.0 +749 10 model.embedding_dim 1.0 +749 10 model.relation_dim 2.0 +749 10 negative_sampler.num_negs_per_pos 82.0 +749 10 training.batch_size 1.0 +749 11 model.embedding_dim 2.0 +749 11 model.relation_dim 1.0 +749 11 negative_sampler.num_negs_per_pos 74.0 +749 11 training.batch_size 1.0 +749 12 model.embedding_dim 2.0 +749 12 model.relation_dim 2.0 +749 12 negative_sampler.num_negs_per_pos 54.0 +749 12 training.batch_size 1.0 +749 13 model.embedding_dim 2.0 +749 13 model.relation_dim 1.0 +749 13 negative_sampler.num_negs_per_pos 71.0 +749 13 training.batch_size 2.0 +749 14 model.embedding_dim 2.0 +749 14 model.relation_dim 2.0 +749 14 negative_sampler.num_negs_per_pos 69.0 +749 14 training.batch_size 2.0 +749 15 model.embedding_dim 2.0 +749 15 model.relation_dim 0.0 +749 15 negative_sampler.num_negs_per_pos 25.0 +749 15 training.batch_size 0.0 +749 16 model.embedding_dim 1.0 +749 16 model.relation_dim 0.0 +749 16 negative_sampler.num_negs_per_pos 27.0 +749 16 training.batch_size 2.0 +749 17 model.embedding_dim 2.0 +749 17 model.relation_dim 1.0 +749 17 negative_sampler.num_negs_per_pos 78.0 +749 17 training.batch_size 1.0 +749 18 model.embedding_dim 2.0 +749 18 model.relation_dim 1.0 +749 18 negative_sampler.num_negs_per_pos 23.0 +749 18 training.batch_size 2.0 +749 19 model.embedding_dim 0.0 +749 19 model.relation_dim 1.0 +749 19 negative_sampler.num_negs_per_pos 60.0 +749 19 training.batch_size 2.0 +749 20 model.embedding_dim 2.0 +749 20 model.relation_dim 0.0 +749 20 negative_sampler.num_negs_per_pos 64.0 +749 20 training.batch_size 0.0 +749 21 model.embedding_dim 2.0 +749 21 model.relation_dim 1.0 +749 21 negative_sampler.num_negs_per_pos 64.0 +749 21 training.batch_size 0.0 +749 22 model.embedding_dim 1.0 +749 22 model.relation_dim 2.0 +749 22 negative_sampler.num_negs_per_pos 64.0 +749 22 training.batch_size 1.0 +749 23 model.embedding_dim 0.0 +749 23 model.relation_dim 0.0 +749 23 negative_sampler.num_negs_per_pos 78.0 +749 23 training.batch_size 2.0 +749 24 model.embedding_dim 1.0 +749 24 model.relation_dim 2.0 +749 24 negative_sampler.num_negs_per_pos 42.0 +749 24 training.batch_size 0.0 +749 25 model.embedding_dim 1.0 +749 25 model.relation_dim 1.0 +749 25 negative_sampler.num_negs_per_pos 20.0 +749 25 training.batch_size 2.0 +749 26 model.embedding_dim 1.0 +749 26 model.relation_dim 0.0 +749 26 negative_sampler.num_negs_per_pos 0.0 +749 26 training.batch_size 2.0 +749 27 model.embedding_dim 2.0 +749 27 model.relation_dim 0.0 +749 27 negative_sampler.num_negs_per_pos 89.0 +749 27 training.batch_size 0.0 +749 28 model.embedding_dim 0.0 +749 28 model.relation_dim 0.0 +749 28 negative_sampler.num_negs_per_pos 26.0 +749 28 training.batch_size 1.0 +749 29 model.embedding_dim 2.0 +749 29 model.relation_dim 0.0 +749 29 negative_sampler.num_negs_per_pos 5.0 +749 29 training.batch_size 1.0 +749 30 model.embedding_dim 2.0 +749 30 model.relation_dim 0.0 +749 30 negative_sampler.num_negs_per_pos 52.0 +749 30 training.batch_size 0.0 +749 31 model.embedding_dim 2.0 +749 31 model.relation_dim 1.0 +749 31 negative_sampler.num_negs_per_pos 96.0 +749 31 training.batch_size 2.0 +749 32 model.embedding_dim 0.0 +749 32 model.relation_dim 1.0 +749 32 negative_sampler.num_negs_per_pos 51.0 +749 32 training.batch_size 2.0 +749 33 model.embedding_dim 2.0 +749 33 model.relation_dim 0.0 +749 33 negative_sampler.num_negs_per_pos 69.0 +749 33 training.batch_size 0.0 +749 34 model.embedding_dim 2.0 +749 34 model.relation_dim 2.0 +749 34 negative_sampler.num_negs_per_pos 82.0 +749 34 training.batch_size 1.0 +749 35 model.embedding_dim 1.0 +749 35 model.relation_dim 2.0 +749 35 negative_sampler.num_negs_per_pos 0.0 +749 35 training.batch_size 0.0 +749 36 model.embedding_dim 0.0 +749 36 model.relation_dim 2.0 +749 36 negative_sampler.num_negs_per_pos 71.0 +749 36 training.batch_size 0.0 +749 37 model.embedding_dim 2.0 +749 37 model.relation_dim 1.0 +749 37 negative_sampler.num_negs_per_pos 90.0 +749 37 training.batch_size 2.0 +749 38 model.embedding_dim 0.0 +749 38 model.relation_dim 0.0 +749 38 negative_sampler.num_negs_per_pos 50.0 +749 38 training.batch_size 1.0 +749 39 model.embedding_dim 2.0 +749 39 model.relation_dim 1.0 +749 39 negative_sampler.num_negs_per_pos 18.0 +749 39 training.batch_size 1.0 +749 40 model.embedding_dim 0.0 +749 40 model.relation_dim 0.0 +749 40 negative_sampler.num_negs_per_pos 54.0 +749 40 training.batch_size 0.0 +749 41 model.embedding_dim 0.0 +749 41 model.relation_dim 1.0 +749 41 negative_sampler.num_negs_per_pos 2.0 +749 41 training.batch_size 1.0 +749 42 model.embedding_dim 0.0 +749 42 model.relation_dim 0.0 +749 42 negative_sampler.num_negs_per_pos 52.0 +749 42 training.batch_size 0.0 +749 43 model.embedding_dim 2.0 +749 43 model.relation_dim 1.0 +749 43 negative_sampler.num_negs_per_pos 69.0 +749 43 training.batch_size 0.0 +749 44 model.embedding_dim 1.0 +749 44 model.relation_dim 0.0 +749 44 negative_sampler.num_negs_per_pos 75.0 +749 44 training.batch_size 2.0 +749 45 model.embedding_dim 2.0 +749 45 model.relation_dim 1.0 +749 45 negative_sampler.num_negs_per_pos 0.0 +749 45 training.batch_size 0.0 +749 46 model.embedding_dim 0.0 +749 46 model.relation_dim 1.0 +749 46 negative_sampler.num_negs_per_pos 25.0 +749 46 training.batch_size 1.0 +749 47 model.embedding_dim 2.0 +749 47 model.relation_dim 1.0 +749 47 negative_sampler.num_negs_per_pos 81.0 +749 47 training.batch_size 0.0 +749 48 model.embedding_dim 0.0 +749 48 model.relation_dim 1.0 +749 48 negative_sampler.num_negs_per_pos 79.0 +749 48 training.batch_size 2.0 +749 49 model.embedding_dim 1.0 +749 49 model.relation_dim 2.0 +749 49 negative_sampler.num_negs_per_pos 93.0 +749 49 training.batch_size 0.0 +749 50 model.embedding_dim 0.0 +749 50 model.relation_dim 1.0 +749 50 negative_sampler.num_negs_per_pos 66.0 +749 50 training.batch_size 2.0 +749 51 model.embedding_dim 2.0 +749 51 model.relation_dim 0.0 +749 51 negative_sampler.num_negs_per_pos 48.0 +749 51 training.batch_size 1.0 +749 52 model.embedding_dim 1.0 +749 52 model.relation_dim 0.0 +749 52 negative_sampler.num_negs_per_pos 34.0 +749 52 training.batch_size 2.0 +749 53 model.embedding_dim 2.0 +749 53 model.relation_dim 1.0 +749 53 negative_sampler.num_negs_per_pos 60.0 +749 53 training.batch_size 0.0 +749 54 model.embedding_dim 1.0 +749 54 model.relation_dim 0.0 +749 54 negative_sampler.num_negs_per_pos 87.0 +749 54 training.batch_size 0.0 +749 55 model.embedding_dim 1.0 +749 55 model.relation_dim 2.0 +749 55 negative_sampler.num_negs_per_pos 38.0 +749 55 training.batch_size 0.0 +749 56 model.embedding_dim 1.0 +749 56 model.relation_dim 1.0 +749 56 negative_sampler.num_negs_per_pos 30.0 +749 56 training.batch_size 0.0 +749 57 model.embedding_dim 1.0 +749 57 model.relation_dim 2.0 +749 57 negative_sampler.num_negs_per_pos 72.0 +749 57 training.batch_size 2.0 +749 58 model.embedding_dim 0.0 +749 58 model.relation_dim 0.0 +749 58 negative_sampler.num_negs_per_pos 60.0 +749 58 training.batch_size 0.0 +749 59 model.embedding_dim 1.0 +749 59 model.relation_dim 0.0 +749 59 negative_sampler.num_negs_per_pos 68.0 +749 59 training.batch_size 1.0 +749 60 model.embedding_dim 0.0 +749 60 model.relation_dim 0.0 +749 60 negative_sampler.num_negs_per_pos 57.0 +749 60 training.batch_size 2.0 +749 61 model.embedding_dim 0.0 +749 61 model.relation_dim 0.0 +749 61 negative_sampler.num_negs_per_pos 62.0 +749 61 training.batch_size 1.0 +749 62 model.embedding_dim 2.0 +749 62 model.relation_dim 2.0 +749 62 negative_sampler.num_negs_per_pos 3.0 +749 62 training.batch_size 0.0 +749 63 model.embedding_dim 2.0 +749 63 model.relation_dim 1.0 +749 63 negative_sampler.num_negs_per_pos 19.0 +749 63 training.batch_size 0.0 +749 64 model.embedding_dim 0.0 +749 64 model.relation_dim 0.0 +749 64 negative_sampler.num_negs_per_pos 1.0 +749 64 training.batch_size 0.0 +749 65 model.embedding_dim 1.0 +749 65 model.relation_dim 1.0 +749 65 negative_sampler.num_negs_per_pos 42.0 +749 65 training.batch_size 0.0 +749 66 model.embedding_dim 0.0 +749 66 model.relation_dim 0.0 +749 66 negative_sampler.num_negs_per_pos 35.0 +749 66 training.batch_size 2.0 +749 67 model.embedding_dim 0.0 +749 67 model.relation_dim 0.0 +749 67 negative_sampler.num_negs_per_pos 65.0 +749 67 training.batch_size 1.0 +749 68 model.embedding_dim 0.0 +749 68 model.relation_dim 2.0 +749 68 negative_sampler.num_negs_per_pos 58.0 +749 68 training.batch_size 2.0 +749 69 model.embedding_dim 0.0 +749 69 model.relation_dim 1.0 +749 69 negative_sampler.num_negs_per_pos 66.0 +749 69 training.batch_size 0.0 +749 70 model.embedding_dim 0.0 +749 70 model.relation_dim 1.0 +749 70 negative_sampler.num_negs_per_pos 3.0 +749 70 training.batch_size 2.0 +749 71 model.embedding_dim 0.0 +749 71 model.relation_dim 1.0 +749 71 negative_sampler.num_negs_per_pos 23.0 +749 71 training.batch_size 1.0 +749 72 model.embedding_dim 1.0 +749 72 model.relation_dim 2.0 +749 72 negative_sampler.num_negs_per_pos 1.0 +749 72 training.batch_size 0.0 +749 73 model.embedding_dim 2.0 +749 73 model.relation_dim 1.0 +749 73 negative_sampler.num_negs_per_pos 5.0 +749 73 training.batch_size 0.0 +749 74 model.embedding_dim 2.0 +749 74 model.relation_dim 0.0 +749 74 negative_sampler.num_negs_per_pos 30.0 +749 74 training.batch_size 2.0 +749 75 model.embedding_dim 1.0 +749 75 model.relation_dim 0.0 +749 75 negative_sampler.num_negs_per_pos 96.0 +749 75 training.batch_size 2.0 +749 76 model.embedding_dim 2.0 +749 76 model.relation_dim 0.0 +749 76 negative_sampler.num_negs_per_pos 0.0 +749 76 training.batch_size 1.0 +749 77 model.embedding_dim 0.0 +749 77 model.relation_dim 0.0 +749 77 negative_sampler.num_negs_per_pos 93.0 +749 77 training.batch_size 2.0 +749 78 model.embedding_dim 2.0 +749 78 model.relation_dim 2.0 +749 78 negative_sampler.num_negs_per_pos 22.0 +749 78 training.batch_size 2.0 +749 79 model.embedding_dim 0.0 +749 79 model.relation_dim 2.0 +749 79 negative_sampler.num_negs_per_pos 90.0 +749 79 training.batch_size 2.0 +749 80 model.embedding_dim 0.0 +749 80 model.relation_dim 0.0 +749 80 negative_sampler.num_negs_per_pos 24.0 +749 80 training.batch_size 2.0 +749 81 model.embedding_dim 1.0 +749 81 model.relation_dim 1.0 +749 81 negative_sampler.num_negs_per_pos 3.0 +749 81 training.batch_size 1.0 +749 82 model.embedding_dim 2.0 +749 82 model.relation_dim 0.0 +749 82 negative_sampler.num_negs_per_pos 50.0 +749 82 training.batch_size 0.0 +749 83 model.embedding_dim 0.0 +749 83 model.relation_dim 2.0 +749 83 negative_sampler.num_negs_per_pos 4.0 +749 83 training.batch_size 0.0 +749 84 model.embedding_dim 2.0 +749 84 model.relation_dim 2.0 +749 84 negative_sampler.num_negs_per_pos 69.0 +749 84 training.batch_size 2.0 +749 85 model.embedding_dim 0.0 +749 85 model.relation_dim 0.0 +749 85 negative_sampler.num_negs_per_pos 24.0 +749 85 training.batch_size 0.0 +749 86 model.embedding_dim 0.0 +749 86 model.relation_dim 2.0 +749 86 negative_sampler.num_negs_per_pos 11.0 +749 86 training.batch_size 2.0 +749 87 model.embedding_dim 1.0 +749 87 model.relation_dim 0.0 +749 87 negative_sampler.num_negs_per_pos 54.0 +749 87 training.batch_size 1.0 +749 88 model.embedding_dim 0.0 +749 88 model.relation_dim 2.0 +749 88 negative_sampler.num_negs_per_pos 80.0 +749 88 training.batch_size 0.0 +749 89 model.embedding_dim 0.0 +749 89 model.relation_dim 1.0 +749 89 negative_sampler.num_negs_per_pos 19.0 +749 89 training.batch_size 1.0 +749 90 model.embedding_dim 0.0 +749 90 model.relation_dim 1.0 +749 90 negative_sampler.num_negs_per_pos 31.0 +749 90 training.batch_size 1.0 +749 91 model.embedding_dim 1.0 +749 91 model.relation_dim 2.0 +749 91 negative_sampler.num_negs_per_pos 33.0 +749 91 training.batch_size 1.0 +749 92 model.embedding_dim 0.0 +749 92 model.relation_dim 2.0 +749 92 negative_sampler.num_negs_per_pos 84.0 +749 92 training.batch_size 1.0 +749 93 model.embedding_dim 2.0 +749 93 model.relation_dim 2.0 +749 93 negative_sampler.num_negs_per_pos 37.0 +749 93 training.batch_size 0.0 +749 94 model.embedding_dim 2.0 +749 94 model.relation_dim 2.0 +749 94 negative_sampler.num_negs_per_pos 79.0 +749 94 training.batch_size 2.0 +749 95 model.embedding_dim 1.0 +749 95 model.relation_dim 0.0 +749 95 negative_sampler.num_negs_per_pos 23.0 +749 95 training.batch_size 0.0 +749 96 model.embedding_dim 1.0 +749 96 model.relation_dim 1.0 +749 96 negative_sampler.num_negs_per_pos 46.0 +749 96 training.batch_size 2.0 +749 97 model.embedding_dim 2.0 +749 97 model.relation_dim 1.0 +749 97 negative_sampler.num_negs_per_pos 7.0 +749 97 training.batch_size 2.0 +749 98 model.embedding_dim 1.0 +749 98 model.relation_dim 1.0 +749 98 negative_sampler.num_negs_per_pos 41.0 +749 98 training.batch_size 1.0 +749 99 model.embedding_dim 1.0 +749 99 model.relation_dim 2.0 +749 99 negative_sampler.num_negs_per_pos 80.0 +749 99 training.batch_size 2.0 +749 100 model.embedding_dim 1.0 +749 100 model.relation_dim 2.0 +749 100 negative_sampler.num_negs_per_pos 83.0 +749 100 training.batch_size 0.0 +749 1 dataset """kinships""" +749 1 model """transd""" +749 1 loss """bceaftersigmoid""" +749 1 regularizer """no""" +749 1 optimizer """adadelta""" +749 1 training_loop """owa""" +749 1 negative_sampler """basic""" +749 1 evaluator """rankbased""" +749 2 dataset """kinships""" +749 2 model """transd""" +749 2 loss """bceaftersigmoid""" +749 2 regularizer """no""" +749 2 optimizer """adadelta""" +749 2 training_loop """owa""" +749 2 negative_sampler """basic""" +749 2 evaluator """rankbased""" +749 3 dataset """kinships""" +749 3 model """transd""" +749 3 loss """bceaftersigmoid""" +749 3 regularizer """no""" +749 3 optimizer """adadelta""" +749 3 training_loop """owa""" +749 3 negative_sampler """basic""" +749 3 evaluator """rankbased""" +749 4 dataset """kinships""" +749 4 model """transd""" +749 4 loss """bceaftersigmoid""" +749 4 regularizer """no""" +749 4 optimizer """adadelta""" +749 4 training_loop """owa""" +749 4 negative_sampler """basic""" +749 4 evaluator """rankbased""" +749 5 dataset """kinships""" +749 5 model """transd""" +749 5 loss """bceaftersigmoid""" +749 5 regularizer """no""" +749 5 optimizer """adadelta""" +749 5 training_loop """owa""" +749 5 negative_sampler """basic""" +749 5 evaluator """rankbased""" +749 6 dataset """kinships""" +749 6 model """transd""" +749 6 loss """bceaftersigmoid""" +749 6 regularizer """no""" +749 6 optimizer """adadelta""" +749 6 training_loop """owa""" +749 6 negative_sampler """basic""" +749 6 evaluator """rankbased""" +749 7 dataset """kinships""" +749 7 model """transd""" +749 7 loss """bceaftersigmoid""" +749 7 regularizer """no""" +749 7 optimizer """adadelta""" +749 7 training_loop """owa""" +749 7 negative_sampler """basic""" +749 7 evaluator """rankbased""" +749 8 dataset """kinships""" +749 8 model """transd""" +749 8 loss """bceaftersigmoid""" +749 8 regularizer """no""" +749 8 optimizer """adadelta""" +749 8 training_loop """owa""" +749 8 negative_sampler """basic""" +749 8 evaluator """rankbased""" +749 9 dataset """kinships""" +749 9 model """transd""" +749 9 loss """bceaftersigmoid""" +749 9 regularizer """no""" +749 9 optimizer """adadelta""" +749 9 training_loop """owa""" +749 9 negative_sampler """basic""" +749 9 evaluator """rankbased""" +749 10 dataset """kinships""" +749 10 model """transd""" +749 10 loss """bceaftersigmoid""" +749 10 regularizer """no""" +749 10 optimizer """adadelta""" +749 10 training_loop """owa""" +749 10 negative_sampler """basic""" +749 10 evaluator """rankbased""" +749 11 dataset """kinships""" +749 11 model """transd""" +749 11 loss """bceaftersigmoid""" +749 11 regularizer """no""" +749 11 optimizer """adadelta""" +749 11 training_loop """owa""" +749 11 negative_sampler """basic""" +749 11 evaluator """rankbased""" +749 12 dataset """kinships""" +749 12 model """transd""" +749 12 loss """bceaftersigmoid""" +749 12 regularizer """no""" +749 12 optimizer """adadelta""" +749 12 training_loop """owa""" +749 12 negative_sampler """basic""" +749 12 evaluator """rankbased""" +749 13 dataset """kinships""" +749 13 model """transd""" +749 13 loss """bceaftersigmoid""" +749 13 regularizer """no""" +749 13 optimizer """adadelta""" +749 13 training_loop """owa""" +749 13 negative_sampler """basic""" +749 13 evaluator """rankbased""" +749 14 dataset """kinships""" +749 14 model """transd""" +749 14 loss """bceaftersigmoid""" +749 14 regularizer """no""" +749 14 optimizer """adadelta""" +749 14 training_loop """owa""" +749 14 negative_sampler """basic""" +749 14 evaluator """rankbased""" +749 15 dataset """kinships""" +749 15 model """transd""" +749 15 loss """bceaftersigmoid""" +749 15 regularizer """no""" +749 15 optimizer """adadelta""" +749 15 training_loop """owa""" +749 15 negative_sampler """basic""" +749 15 evaluator """rankbased""" +749 16 dataset """kinships""" +749 16 model """transd""" +749 16 loss """bceaftersigmoid""" +749 16 regularizer """no""" +749 16 optimizer """adadelta""" +749 16 training_loop """owa""" +749 16 negative_sampler """basic""" +749 16 evaluator """rankbased""" +749 17 dataset """kinships""" +749 17 model """transd""" +749 17 loss """bceaftersigmoid""" +749 17 regularizer """no""" +749 17 optimizer """adadelta""" +749 17 training_loop """owa""" +749 17 negative_sampler """basic""" +749 17 evaluator """rankbased""" +749 18 dataset """kinships""" +749 18 model """transd""" +749 18 loss """bceaftersigmoid""" +749 18 regularizer """no""" +749 18 optimizer """adadelta""" +749 18 training_loop """owa""" +749 18 negative_sampler """basic""" +749 18 evaluator """rankbased""" +749 19 dataset """kinships""" +749 19 model """transd""" +749 19 loss """bceaftersigmoid""" +749 19 regularizer """no""" +749 19 optimizer """adadelta""" +749 19 training_loop """owa""" +749 19 negative_sampler """basic""" +749 19 evaluator """rankbased""" +749 20 dataset """kinships""" +749 20 model """transd""" +749 20 loss """bceaftersigmoid""" +749 20 regularizer """no""" +749 20 optimizer """adadelta""" +749 20 training_loop """owa""" +749 20 negative_sampler """basic""" +749 20 evaluator """rankbased""" +749 21 dataset """kinships""" +749 21 model """transd""" +749 21 loss """bceaftersigmoid""" +749 21 regularizer """no""" +749 21 optimizer """adadelta""" +749 21 training_loop """owa""" +749 21 negative_sampler """basic""" +749 21 evaluator """rankbased""" +749 22 dataset """kinships""" +749 22 model """transd""" +749 22 loss """bceaftersigmoid""" +749 22 regularizer """no""" +749 22 optimizer """adadelta""" +749 22 training_loop """owa""" +749 22 negative_sampler """basic""" +749 22 evaluator """rankbased""" +749 23 dataset """kinships""" +749 23 model """transd""" +749 23 loss """bceaftersigmoid""" +749 23 regularizer """no""" +749 23 optimizer """adadelta""" +749 23 training_loop """owa""" +749 23 negative_sampler """basic""" +749 23 evaluator """rankbased""" +749 24 dataset """kinships""" +749 24 model """transd""" +749 24 loss """bceaftersigmoid""" +749 24 regularizer """no""" +749 24 optimizer """adadelta""" +749 24 training_loop """owa""" +749 24 negative_sampler """basic""" +749 24 evaluator """rankbased""" +749 25 dataset """kinships""" +749 25 model """transd""" +749 25 loss """bceaftersigmoid""" +749 25 regularizer """no""" +749 25 optimizer """adadelta""" +749 25 training_loop """owa""" +749 25 negative_sampler """basic""" +749 25 evaluator """rankbased""" +749 26 dataset """kinships""" +749 26 model """transd""" +749 26 loss """bceaftersigmoid""" +749 26 regularizer """no""" +749 26 optimizer """adadelta""" +749 26 training_loop """owa""" +749 26 negative_sampler """basic""" +749 26 evaluator """rankbased""" +749 27 dataset """kinships""" +749 27 model """transd""" +749 27 loss """bceaftersigmoid""" +749 27 regularizer """no""" +749 27 optimizer """adadelta""" +749 27 training_loop """owa""" +749 27 negative_sampler """basic""" +749 27 evaluator """rankbased""" +749 28 dataset """kinships""" +749 28 model """transd""" +749 28 loss """bceaftersigmoid""" +749 28 regularizer """no""" +749 28 optimizer """adadelta""" +749 28 training_loop """owa""" +749 28 negative_sampler """basic""" +749 28 evaluator """rankbased""" +749 29 dataset """kinships""" +749 29 model """transd""" +749 29 loss """bceaftersigmoid""" +749 29 regularizer """no""" +749 29 optimizer """adadelta""" +749 29 training_loop """owa""" +749 29 negative_sampler """basic""" +749 29 evaluator """rankbased""" +749 30 dataset """kinships""" +749 30 model """transd""" +749 30 loss """bceaftersigmoid""" +749 30 regularizer """no""" +749 30 optimizer """adadelta""" +749 30 training_loop """owa""" +749 30 negative_sampler """basic""" +749 30 evaluator """rankbased""" +749 31 dataset """kinships""" +749 31 model """transd""" +749 31 loss """bceaftersigmoid""" +749 31 regularizer """no""" +749 31 optimizer """adadelta""" +749 31 training_loop """owa""" +749 31 negative_sampler """basic""" +749 31 evaluator """rankbased""" +749 32 dataset """kinships""" +749 32 model """transd""" +749 32 loss """bceaftersigmoid""" +749 32 regularizer """no""" +749 32 optimizer """adadelta""" +749 32 training_loop """owa""" +749 32 negative_sampler """basic""" +749 32 evaluator """rankbased""" +749 33 dataset """kinships""" +749 33 model """transd""" +749 33 loss """bceaftersigmoid""" +749 33 regularizer """no""" +749 33 optimizer """adadelta""" +749 33 training_loop """owa""" +749 33 negative_sampler """basic""" +749 33 evaluator """rankbased""" +749 34 dataset """kinships""" +749 34 model """transd""" +749 34 loss """bceaftersigmoid""" +749 34 regularizer """no""" +749 34 optimizer """adadelta""" +749 34 training_loop """owa""" +749 34 negative_sampler """basic""" +749 34 evaluator """rankbased""" +749 35 dataset """kinships""" +749 35 model """transd""" +749 35 loss """bceaftersigmoid""" +749 35 regularizer """no""" +749 35 optimizer """adadelta""" +749 35 training_loop """owa""" +749 35 negative_sampler """basic""" +749 35 evaluator """rankbased""" +749 36 dataset """kinships""" +749 36 model """transd""" +749 36 loss """bceaftersigmoid""" +749 36 regularizer """no""" +749 36 optimizer """adadelta""" +749 36 training_loop """owa""" +749 36 negative_sampler """basic""" +749 36 evaluator """rankbased""" +749 37 dataset """kinships""" +749 37 model """transd""" +749 37 loss """bceaftersigmoid""" +749 37 regularizer """no""" +749 37 optimizer """adadelta""" +749 37 training_loop """owa""" +749 37 negative_sampler """basic""" +749 37 evaluator """rankbased""" +749 38 dataset """kinships""" +749 38 model """transd""" +749 38 loss """bceaftersigmoid""" +749 38 regularizer """no""" +749 38 optimizer """adadelta""" +749 38 training_loop """owa""" +749 38 negative_sampler """basic""" +749 38 evaluator """rankbased""" +749 39 dataset """kinships""" +749 39 model """transd""" +749 39 loss """bceaftersigmoid""" +749 39 regularizer """no""" +749 39 optimizer """adadelta""" +749 39 training_loop """owa""" +749 39 negative_sampler """basic""" +749 39 evaluator """rankbased""" +749 40 dataset """kinships""" +749 40 model """transd""" +749 40 loss """bceaftersigmoid""" +749 40 regularizer """no""" +749 40 optimizer """adadelta""" +749 40 training_loop """owa""" +749 40 negative_sampler """basic""" +749 40 evaluator """rankbased""" +749 41 dataset """kinships""" +749 41 model """transd""" +749 41 loss """bceaftersigmoid""" +749 41 regularizer """no""" +749 41 optimizer """adadelta""" +749 41 training_loop """owa""" +749 41 negative_sampler """basic""" +749 41 evaluator """rankbased""" +749 42 dataset """kinships""" +749 42 model """transd""" +749 42 loss """bceaftersigmoid""" +749 42 regularizer """no""" +749 42 optimizer """adadelta""" +749 42 training_loop """owa""" +749 42 negative_sampler """basic""" +749 42 evaluator """rankbased""" +749 43 dataset """kinships""" +749 43 model """transd""" +749 43 loss """bceaftersigmoid""" +749 43 regularizer """no""" +749 43 optimizer """adadelta""" +749 43 training_loop """owa""" +749 43 negative_sampler """basic""" +749 43 evaluator """rankbased""" +749 44 dataset """kinships""" +749 44 model """transd""" +749 44 loss """bceaftersigmoid""" +749 44 regularizer """no""" +749 44 optimizer """adadelta""" +749 44 training_loop """owa""" +749 44 negative_sampler """basic""" +749 44 evaluator """rankbased""" +749 45 dataset """kinships""" +749 45 model """transd""" +749 45 loss """bceaftersigmoid""" +749 45 regularizer """no""" +749 45 optimizer """adadelta""" +749 45 training_loop """owa""" +749 45 negative_sampler """basic""" +749 45 evaluator """rankbased""" +749 46 dataset """kinships""" +749 46 model """transd""" +749 46 loss """bceaftersigmoid""" +749 46 regularizer """no""" +749 46 optimizer """adadelta""" +749 46 training_loop """owa""" +749 46 negative_sampler """basic""" +749 46 evaluator """rankbased""" +749 47 dataset """kinships""" +749 47 model """transd""" +749 47 loss """bceaftersigmoid""" +749 47 regularizer """no""" +749 47 optimizer """adadelta""" +749 47 training_loop """owa""" +749 47 negative_sampler """basic""" +749 47 evaluator """rankbased""" +749 48 dataset """kinships""" +749 48 model """transd""" +749 48 loss """bceaftersigmoid""" +749 48 regularizer """no""" +749 48 optimizer """adadelta""" +749 48 training_loop """owa""" +749 48 negative_sampler """basic""" +749 48 evaluator """rankbased""" +749 49 dataset """kinships""" +749 49 model """transd""" +749 49 loss """bceaftersigmoid""" +749 49 regularizer """no""" +749 49 optimizer """adadelta""" +749 49 training_loop """owa""" +749 49 negative_sampler """basic""" +749 49 evaluator """rankbased""" +749 50 dataset """kinships""" +749 50 model """transd""" +749 50 loss """bceaftersigmoid""" +749 50 regularizer """no""" +749 50 optimizer """adadelta""" +749 50 training_loop """owa""" +749 50 negative_sampler """basic""" +749 50 evaluator """rankbased""" +749 51 dataset """kinships""" +749 51 model """transd""" +749 51 loss """bceaftersigmoid""" +749 51 regularizer """no""" +749 51 optimizer """adadelta""" +749 51 training_loop """owa""" +749 51 negative_sampler """basic""" +749 51 evaluator """rankbased""" +749 52 dataset """kinships""" +749 52 model """transd""" +749 52 loss """bceaftersigmoid""" +749 52 regularizer """no""" +749 52 optimizer """adadelta""" +749 52 training_loop """owa""" +749 52 negative_sampler """basic""" +749 52 evaluator """rankbased""" +749 53 dataset """kinships""" +749 53 model """transd""" +749 53 loss """bceaftersigmoid""" +749 53 regularizer """no""" +749 53 optimizer """adadelta""" +749 53 training_loop """owa""" +749 53 negative_sampler """basic""" +749 53 evaluator """rankbased""" +749 54 dataset """kinships""" +749 54 model """transd""" +749 54 loss """bceaftersigmoid""" +749 54 regularizer """no""" +749 54 optimizer """adadelta""" +749 54 training_loop """owa""" +749 54 negative_sampler """basic""" +749 54 evaluator """rankbased""" +749 55 dataset """kinships""" +749 55 model """transd""" +749 55 loss """bceaftersigmoid""" +749 55 regularizer """no""" +749 55 optimizer """adadelta""" +749 55 training_loop """owa""" +749 55 negative_sampler """basic""" +749 55 evaluator """rankbased""" +749 56 dataset """kinships""" +749 56 model """transd""" +749 56 loss """bceaftersigmoid""" +749 56 regularizer """no""" +749 56 optimizer """adadelta""" +749 56 training_loop """owa""" +749 56 negative_sampler """basic""" +749 56 evaluator """rankbased""" +749 57 dataset """kinships""" +749 57 model """transd""" +749 57 loss """bceaftersigmoid""" +749 57 regularizer """no""" +749 57 optimizer """adadelta""" +749 57 training_loop """owa""" +749 57 negative_sampler """basic""" +749 57 evaluator """rankbased""" +749 58 dataset """kinships""" +749 58 model """transd""" +749 58 loss """bceaftersigmoid""" +749 58 regularizer """no""" +749 58 optimizer """adadelta""" +749 58 training_loop """owa""" +749 58 negative_sampler """basic""" +749 58 evaluator """rankbased""" +749 59 dataset """kinships""" +749 59 model """transd""" +749 59 loss """bceaftersigmoid""" +749 59 regularizer """no""" +749 59 optimizer """adadelta""" +749 59 training_loop """owa""" +749 59 negative_sampler """basic""" +749 59 evaluator """rankbased""" +749 60 dataset """kinships""" +749 60 model """transd""" +749 60 loss """bceaftersigmoid""" +749 60 regularizer """no""" +749 60 optimizer """adadelta""" +749 60 training_loop """owa""" +749 60 negative_sampler """basic""" +749 60 evaluator """rankbased""" +749 61 dataset """kinships""" +749 61 model """transd""" +749 61 loss """bceaftersigmoid""" +749 61 regularizer """no""" +749 61 optimizer """adadelta""" +749 61 training_loop """owa""" +749 61 negative_sampler """basic""" +749 61 evaluator """rankbased""" +749 62 dataset """kinships""" +749 62 model """transd""" +749 62 loss """bceaftersigmoid""" +749 62 regularizer """no""" +749 62 optimizer """adadelta""" +749 62 training_loop """owa""" +749 62 negative_sampler """basic""" +749 62 evaluator """rankbased""" +749 63 dataset """kinships""" +749 63 model """transd""" +749 63 loss """bceaftersigmoid""" +749 63 regularizer """no""" +749 63 optimizer """adadelta""" +749 63 training_loop """owa""" +749 63 negative_sampler """basic""" +749 63 evaluator """rankbased""" +749 64 dataset """kinships""" +749 64 model """transd""" +749 64 loss """bceaftersigmoid""" +749 64 regularizer """no""" +749 64 optimizer """adadelta""" +749 64 training_loop """owa""" +749 64 negative_sampler """basic""" +749 64 evaluator """rankbased""" +749 65 dataset """kinships""" +749 65 model """transd""" +749 65 loss """bceaftersigmoid""" +749 65 regularizer """no""" +749 65 optimizer """adadelta""" +749 65 training_loop """owa""" +749 65 negative_sampler """basic""" +749 65 evaluator """rankbased""" +749 66 dataset """kinships""" +749 66 model """transd""" +749 66 loss """bceaftersigmoid""" +749 66 regularizer """no""" +749 66 optimizer """adadelta""" +749 66 training_loop """owa""" +749 66 negative_sampler """basic""" +749 66 evaluator """rankbased""" +749 67 dataset """kinships""" +749 67 model """transd""" +749 67 loss """bceaftersigmoid""" +749 67 regularizer """no""" +749 67 optimizer """adadelta""" +749 67 training_loop """owa""" +749 67 negative_sampler """basic""" +749 67 evaluator """rankbased""" +749 68 dataset """kinships""" +749 68 model """transd""" +749 68 loss """bceaftersigmoid""" +749 68 regularizer """no""" +749 68 optimizer """adadelta""" +749 68 training_loop """owa""" +749 68 negative_sampler """basic""" +749 68 evaluator """rankbased""" +749 69 dataset """kinships""" +749 69 model """transd""" +749 69 loss """bceaftersigmoid""" +749 69 regularizer """no""" +749 69 optimizer """adadelta""" +749 69 training_loop """owa""" +749 69 negative_sampler """basic""" +749 69 evaluator """rankbased""" +749 70 dataset """kinships""" +749 70 model """transd""" +749 70 loss """bceaftersigmoid""" +749 70 regularizer """no""" +749 70 optimizer """adadelta""" +749 70 training_loop """owa""" +749 70 negative_sampler """basic""" +749 70 evaluator """rankbased""" +749 71 dataset """kinships""" +749 71 model """transd""" +749 71 loss """bceaftersigmoid""" +749 71 regularizer """no""" +749 71 optimizer """adadelta""" +749 71 training_loop """owa""" +749 71 negative_sampler """basic""" +749 71 evaluator """rankbased""" +749 72 dataset """kinships""" +749 72 model """transd""" +749 72 loss """bceaftersigmoid""" +749 72 regularizer """no""" +749 72 optimizer """adadelta""" +749 72 training_loop """owa""" +749 72 negative_sampler """basic""" +749 72 evaluator """rankbased""" +749 73 dataset """kinships""" +749 73 model """transd""" +749 73 loss """bceaftersigmoid""" +749 73 regularizer """no""" +749 73 optimizer """adadelta""" +749 73 training_loop """owa""" +749 73 negative_sampler """basic""" +749 73 evaluator """rankbased""" +749 74 dataset """kinships""" +749 74 model """transd""" +749 74 loss """bceaftersigmoid""" +749 74 regularizer """no""" +749 74 optimizer """adadelta""" +749 74 training_loop """owa""" +749 74 negative_sampler """basic""" +749 74 evaluator """rankbased""" +749 75 dataset """kinships""" +749 75 model """transd""" +749 75 loss """bceaftersigmoid""" +749 75 regularizer """no""" +749 75 optimizer """adadelta""" +749 75 training_loop """owa""" +749 75 negative_sampler """basic""" +749 75 evaluator """rankbased""" +749 76 dataset """kinships""" +749 76 model """transd""" +749 76 loss """bceaftersigmoid""" +749 76 regularizer """no""" +749 76 optimizer """adadelta""" +749 76 training_loop """owa""" +749 76 negative_sampler """basic""" +749 76 evaluator """rankbased""" +749 77 dataset """kinships""" +749 77 model """transd""" +749 77 loss """bceaftersigmoid""" +749 77 regularizer """no""" +749 77 optimizer """adadelta""" +749 77 training_loop """owa""" +749 77 negative_sampler """basic""" +749 77 evaluator """rankbased""" +749 78 dataset """kinships""" +749 78 model """transd""" +749 78 loss """bceaftersigmoid""" +749 78 regularizer """no""" +749 78 optimizer """adadelta""" +749 78 training_loop """owa""" +749 78 negative_sampler """basic""" +749 78 evaluator """rankbased""" +749 79 dataset """kinships""" +749 79 model """transd""" +749 79 loss """bceaftersigmoid""" +749 79 regularizer """no""" +749 79 optimizer """adadelta""" +749 79 training_loop """owa""" +749 79 negative_sampler """basic""" +749 79 evaluator """rankbased""" +749 80 dataset """kinships""" +749 80 model """transd""" +749 80 loss """bceaftersigmoid""" +749 80 regularizer """no""" +749 80 optimizer """adadelta""" +749 80 training_loop """owa""" +749 80 negative_sampler """basic""" +749 80 evaluator """rankbased""" +749 81 dataset """kinships""" +749 81 model """transd""" +749 81 loss """bceaftersigmoid""" +749 81 regularizer """no""" +749 81 optimizer """adadelta""" +749 81 training_loop """owa""" +749 81 negative_sampler """basic""" +749 81 evaluator """rankbased""" +749 82 dataset """kinships""" +749 82 model """transd""" +749 82 loss """bceaftersigmoid""" +749 82 regularizer """no""" +749 82 optimizer """adadelta""" +749 82 training_loop """owa""" +749 82 negative_sampler """basic""" +749 82 evaluator """rankbased""" +749 83 dataset """kinships""" +749 83 model """transd""" +749 83 loss """bceaftersigmoid""" +749 83 regularizer """no""" +749 83 optimizer """adadelta""" +749 83 training_loop """owa""" +749 83 negative_sampler """basic""" +749 83 evaluator """rankbased""" +749 84 dataset """kinships""" +749 84 model """transd""" +749 84 loss """bceaftersigmoid""" +749 84 regularizer """no""" +749 84 optimizer """adadelta""" +749 84 training_loop """owa""" +749 84 negative_sampler """basic""" +749 84 evaluator """rankbased""" +749 85 dataset """kinships""" +749 85 model """transd""" +749 85 loss """bceaftersigmoid""" +749 85 regularizer """no""" +749 85 optimizer """adadelta""" +749 85 training_loop """owa""" +749 85 negative_sampler """basic""" +749 85 evaluator """rankbased""" +749 86 dataset """kinships""" +749 86 model """transd""" +749 86 loss """bceaftersigmoid""" +749 86 regularizer """no""" +749 86 optimizer """adadelta""" +749 86 training_loop """owa""" +749 86 negative_sampler """basic""" +749 86 evaluator """rankbased""" +749 87 dataset """kinships""" +749 87 model """transd""" +749 87 loss """bceaftersigmoid""" +749 87 regularizer """no""" +749 87 optimizer """adadelta""" +749 87 training_loop """owa""" +749 87 negative_sampler """basic""" +749 87 evaluator """rankbased""" +749 88 dataset """kinships""" +749 88 model """transd""" +749 88 loss """bceaftersigmoid""" +749 88 regularizer """no""" +749 88 optimizer """adadelta""" +749 88 training_loop """owa""" +749 88 negative_sampler """basic""" +749 88 evaluator """rankbased""" +749 89 dataset """kinships""" +749 89 model """transd""" +749 89 loss """bceaftersigmoid""" +749 89 regularizer """no""" +749 89 optimizer """adadelta""" +749 89 training_loop """owa""" +749 89 negative_sampler """basic""" +749 89 evaluator """rankbased""" +749 90 dataset """kinships""" +749 90 model """transd""" +749 90 loss """bceaftersigmoid""" +749 90 regularizer """no""" +749 90 optimizer """adadelta""" +749 90 training_loop """owa""" +749 90 negative_sampler """basic""" +749 90 evaluator """rankbased""" +749 91 dataset """kinships""" +749 91 model """transd""" +749 91 loss """bceaftersigmoid""" +749 91 regularizer """no""" +749 91 optimizer """adadelta""" +749 91 training_loop """owa""" +749 91 negative_sampler """basic""" +749 91 evaluator """rankbased""" +749 92 dataset """kinships""" +749 92 model """transd""" +749 92 loss """bceaftersigmoid""" +749 92 regularizer """no""" +749 92 optimizer """adadelta""" +749 92 training_loop """owa""" +749 92 negative_sampler """basic""" +749 92 evaluator """rankbased""" +749 93 dataset """kinships""" +749 93 model """transd""" +749 93 loss """bceaftersigmoid""" +749 93 regularizer """no""" +749 93 optimizer """adadelta""" +749 93 training_loop """owa""" +749 93 negative_sampler """basic""" +749 93 evaluator """rankbased""" +749 94 dataset """kinships""" +749 94 model """transd""" +749 94 loss """bceaftersigmoid""" +749 94 regularizer """no""" +749 94 optimizer """adadelta""" +749 94 training_loop """owa""" +749 94 negative_sampler """basic""" +749 94 evaluator """rankbased""" +749 95 dataset """kinships""" +749 95 model """transd""" +749 95 loss """bceaftersigmoid""" +749 95 regularizer """no""" +749 95 optimizer """adadelta""" +749 95 training_loop """owa""" +749 95 negative_sampler """basic""" +749 95 evaluator """rankbased""" +749 96 dataset """kinships""" +749 96 model """transd""" +749 96 loss """bceaftersigmoid""" +749 96 regularizer """no""" +749 96 optimizer """adadelta""" +749 96 training_loop """owa""" +749 96 negative_sampler """basic""" +749 96 evaluator """rankbased""" +749 97 dataset """kinships""" +749 97 model """transd""" +749 97 loss """bceaftersigmoid""" +749 97 regularizer """no""" +749 97 optimizer """adadelta""" +749 97 training_loop """owa""" +749 97 negative_sampler """basic""" +749 97 evaluator """rankbased""" +749 98 dataset """kinships""" +749 98 model """transd""" +749 98 loss """bceaftersigmoid""" +749 98 regularizer """no""" +749 98 optimizer """adadelta""" +749 98 training_loop """owa""" +749 98 negative_sampler """basic""" +749 98 evaluator """rankbased""" +749 99 dataset """kinships""" +749 99 model """transd""" +749 99 loss """bceaftersigmoid""" +749 99 regularizer """no""" +749 99 optimizer """adadelta""" +749 99 training_loop """owa""" +749 99 negative_sampler """basic""" +749 99 evaluator """rankbased""" +749 100 dataset """kinships""" +749 100 model """transd""" +749 100 loss """bceaftersigmoid""" +749 100 regularizer """no""" +749 100 optimizer """adadelta""" +749 100 training_loop """owa""" +749 100 negative_sampler """basic""" +749 100 evaluator """rankbased""" +750 1 model.embedding_dim 1.0 +750 1 model.relation_dim 1.0 +750 1 negative_sampler.num_negs_per_pos 17.0 +750 1 training.batch_size 2.0 +750 2 model.embedding_dim 1.0 +750 2 model.relation_dim 2.0 +750 2 negative_sampler.num_negs_per_pos 10.0 +750 2 training.batch_size 2.0 +750 3 model.embedding_dim 0.0 +750 3 model.relation_dim 1.0 +750 3 negative_sampler.num_negs_per_pos 64.0 +750 3 training.batch_size 2.0 +750 4 model.embedding_dim 0.0 +750 4 model.relation_dim 0.0 +750 4 negative_sampler.num_negs_per_pos 80.0 +750 4 training.batch_size 1.0 +750 5 model.embedding_dim 1.0 +750 5 model.relation_dim 0.0 +750 5 negative_sampler.num_negs_per_pos 87.0 +750 5 training.batch_size 2.0 +750 6 model.embedding_dim 2.0 +750 6 model.relation_dim 0.0 +750 6 negative_sampler.num_negs_per_pos 13.0 +750 6 training.batch_size 0.0 +750 7 model.embedding_dim 1.0 +750 7 model.relation_dim 0.0 +750 7 negative_sampler.num_negs_per_pos 55.0 +750 7 training.batch_size 2.0 +750 8 model.embedding_dim 1.0 +750 8 model.relation_dim 1.0 +750 8 negative_sampler.num_negs_per_pos 7.0 +750 8 training.batch_size 0.0 +750 9 model.embedding_dim 0.0 +750 9 model.relation_dim 2.0 +750 9 negative_sampler.num_negs_per_pos 1.0 +750 9 training.batch_size 0.0 +750 10 model.embedding_dim 0.0 +750 10 model.relation_dim 1.0 +750 10 negative_sampler.num_negs_per_pos 8.0 +750 10 training.batch_size 2.0 +750 11 model.embedding_dim 0.0 +750 11 model.relation_dim 2.0 +750 11 negative_sampler.num_negs_per_pos 87.0 +750 11 training.batch_size 0.0 +750 12 model.embedding_dim 0.0 +750 12 model.relation_dim 0.0 +750 12 negative_sampler.num_negs_per_pos 30.0 +750 12 training.batch_size 1.0 +750 13 model.embedding_dim 0.0 +750 13 model.relation_dim 0.0 +750 13 negative_sampler.num_negs_per_pos 6.0 +750 13 training.batch_size 1.0 +750 14 model.embedding_dim 2.0 +750 14 model.relation_dim 2.0 +750 14 negative_sampler.num_negs_per_pos 87.0 +750 14 training.batch_size 0.0 +750 15 model.embedding_dim 2.0 +750 15 model.relation_dim 0.0 +750 15 negative_sampler.num_negs_per_pos 75.0 +750 15 training.batch_size 2.0 +750 16 model.embedding_dim 2.0 +750 16 model.relation_dim 1.0 +750 16 negative_sampler.num_negs_per_pos 87.0 +750 16 training.batch_size 0.0 +750 17 model.embedding_dim 0.0 +750 17 model.relation_dim 0.0 +750 17 negative_sampler.num_negs_per_pos 64.0 +750 17 training.batch_size 1.0 +750 18 model.embedding_dim 1.0 +750 18 model.relation_dim 1.0 +750 18 negative_sampler.num_negs_per_pos 14.0 +750 18 training.batch_size 0.0 +750 19 model.embedding_dim 2.0 +750 19 model.relation_dim 0.0 +750 19 negative_sampler.num_negs_per_pos 52.0 +750 19 training.batch_size 0.0 +750 20 model.embedding_dim 1.0 +750 20 model.relation_dim 0.0 +750 20 negative_sampler.num_negs_per_pos 43.0 +750 20 training.batch_size 2.0 +750 21 model.embedding_dim 0.0 +750 21 model.relation_dim 0.0 +750 21 negative_sampler.num_negs_per_pos 24.0 +750 21 training.batch_size 2.0 +750 22 model.embedding_dim 2.0 +750 22 model.relation_dim 0.0 +750 22 negative_sampler.num_negs_per_pos 63.0 +750 22 training.batch_size 2.0 +750 23 model.embedding_dim 2.0 +750 23 model.relation_dim 2.0 +750 23 negative_sampler.num_negs_per_pos 94.0 +750 23 training.batch_size 0.0 +750 24 model.embedding_dim 2.0 +750 24 model.relation_dim 0.0 +750 24 negative_sampler.num_negs_per_pos 90.0 +750 24 training.batch_size 1.0 +750 25 model.embedding_dim 1.0 +750 25 model.relation_dim 0.0 +750 25 negative_sampler.num_negs_per_pos 15.0 +750 25 training.batch_size 0.0 +750 26 model.embedding_dim 1.0 +750 26 model.relation_dim 2.0 +750 26 negative_sampler.num_negs_per_pos 70.0 +750 26 training.batch_size 1.0 +750 27 model.embedding_dim 0.0 +750 27 model.relation_dim 1.0 +750 27 negative_sampler.num_negs_per_pos 64.0 +750 27 training.batch_size 2.0 +750 28 model.embedding_dim 1.0 +750 28 model.relation_dim 2.0 +750 28 negative_sampler.num_negs_per_pos 25.0 +750 28 training.batch_size 2.0 +750 29 model.embedding_dim 1.0 +750 29 model.relation_dim 2.0 +750 29 negative_sampler.num_negs_per_pos 42.0 +750 29 training.batch_size 0.0 +750 30 model.embedding_dim 1.0 +750 30 model.relation_dim 1.0 +750 30 negative_sampler.num_negs_per_pos 53.0 +750 30 training.batch_size 1.0 +750 31 model.embedding_dim 2.0 +750 31 model.relation_dim 2.0 +750 31 negative_sampler.num_negs_per_pos 75.0 +750 31 training.batch_size 1.0 +750 32 model.embedding_dim 2.0 +750 32 model.relation_dim 2.0 +750 32 negative_sampler.num_negs_per_pos 47.0 +750 32 training.batch_size 0.0 +750 33 model.embedding_dim 1.0 +750 33 model.relation_dim 0.0 +750 33 negative_sampler.num_negs_per_pos 33.0 +750 33 training.batch_size 0.0 +750 34 model.embedding_dim 2.0 +750 34 model.relation_dim 0.0 +750 34 negative_sampler.num_negs_per_pos 11.0 +750 34 training.batch_size 1.0 +750 35 model.embedding_dim 1.0 +750 35 model.relation_dim 0.0 +750 35 negative_sampler.num_negs_per_pos 23.0 +750 35 training.batch_size 1.0 +750 36 model.embedding_dim 2.0 +750 36 model.relation_dim 0.0 +750 36 negative_sampler.num_negs_per_pos 95.0 +750 36 training.batch_size 2.0 +750 37 model.embedding_dim 2.0 +750 37 model.relation_dim 2.0 +750 37 negative_sampler.num_negs_per_pos 70.0 +750 37 training.batch_size 1.0 +750 38 model.embedding_dim 2.0 +750 38 model.relation_dim 2.0 +750 38 negative_sampler.num_negs_per_pos 87.0 +750 38 training.batch_size 2.0 +750 39 model.embedding_dim 2.0 +750 39 model.relation_dim 0.0 +750 39 negative_sampler.num_negs_per_pos 80.0 +750 39 training.batch_size 0.0 +750 40 model.embedding_dim 2.0 +750 40 model.relation_dim 2.0 +750 40 negative_sampler.num_negs_per_pos 52.0 +750 40 training.batch_size 2.0 +750 41 model.embedding_dim 2.0 +750 41 model.relation_dim 1.0 +750 41 negative_sampler.num_negs_per_pos 55.0 +750 41 training.batch_size 2.0 +750 42 model.embedding_dim 1.0 +750 42 model.relation_dim 0.0 +750 42 negative_sampler.num_negs_per_pos 84.0 +750 42 training.batch_size 0.0 +750 43 model.embedding_dim 1.0 +750 43 model.relation_dim 0.0 +750 43 negative_sampler.num_negs_per_pos 39.0 +750 43 training.batch_size 2.0 +750 44 model.embedding_dim 0.0 +750 44 model.relation_dim 2.0 +750 44 negative_sampler.num_negs_per_pos 8.0 +750 44 training.batch_size 1.0 +750 45 model.embedding_dim 2.0 +750 45 model.relation_dim 0.0 +750 45 negative_sampler.num_negs_per_pos 71.0 +750 45 training.batch_size 0.0 +750 46 model.embedding_dim 1.0 +750 46 model.relation_dim 2.0 +750 46 negative_sampler.num_negs_per_pos 95.0 +750 46 training.batch_size 0.0 +750 47 model.embedding_dim 0.0 +750 47 model.relation_dim 2.0 +750 47 negative_sampler.num_negs_per_pos 76.0 +750 47 training.batch_size 0.0 +750 48 model.embedding_dim 2.0 +750 48 model.relation_dim 1.0 +750 48 negative_sampler.num_negs_per_pos 43.0 +750 48 training.batch_size 1.0 +750 49 model.embedding_dim 0.0 +750 49 model.relation_dim 2.0 +750 49 negative_sampler.num_negs_per_pos 80.0 +750 49 training.batch_size 0.0 +750 50 model.embedding_dim 1.0 +750 50 model.relation_dim 1.0 +750 50 negative_sampler.num_negs_per_pos 46.0 +750 50 training.batch_size 1.0 +750 51 model.embedding_dim 2.0 +750 51 model.relation_dim 1.0 +750 51 negative_sampler.num_negs_per_pos 43.0 +750 51 training.batch_size 0.0 +750 52 model.embedding_dim 0.0 +750 52 model.relation_dim 1.0 +750 52 negative_sampler.num_negs_per_pos 59.0 +750 52 training.batch_size 0.0 +750 53 model.embedding_dim 2.0 +750 53 model.relation_dim 2.0 +750 53 negative_sampler.num_negs_per_pos 2.0 +750 53 training.batch_size 1.0 +750 54 model.embedding_dim 0.0 +750 54 model.relation_dim 2.0 +750 54 negative_sampler.num_negs_per_pos 3.0 +750 54 training.batch_size 2.0 +750 55 model.embedding_dim 0.0 +750 55 model.relation_dim 2.0 +750 55 negative_sampler.num_negs_per_pos 50.0 +750 55 training.batch_size 0.0 +750 56 model.embedding_dim 1.0 +750 56 model.relation_dim 1.0 +750 56 negative_sampler.num_negs_per_pos 69.0 +750 56 training.batch_size 0.0 +750 57 model.embedding_dim 0.0 +750 57 model.relation_dim 0.0 +750 57 negative_sampler.num_negs_per_pos 96.0 +750 57 training.batch_size 2.0 +750 58 model.embedding_dim 0.0 +750 58 model.relation_dim 2.0 +750 58 negative_sampler.num_negs_per_pos 42.0 +750 58 training.batch_size 2.0 +750 59 model.embedding_dim 2.0 +750 59 model.relation_dim 2.0 +750 59 negative_sampler.num_negs_per_pos 86.0 +750 59 training.batch_size 2.0 +750 60 model.embedding_dim 0.0 +750 60 model.relation_dim 1.0 +750 60 negative_sampler.num_negs_per_pos 66.0 +750 60 training.batch_size 1.0 +750 61 model.embedding_dim 2.0 +750 61 model.relation_dim 0.0 +750 61 negative_sampler.num_negs_per_pos 55.0 +750 61 training.batch_size 0.0 +750 62 model.embedding_dim 1.0 +750 62 model.relation_dim 1.0 +750 62 negative_sampler.num_negs_per_pos 13.0 +750 62 training.batch_size 1.0 +750 63 model.embedding_dim 1.0 +750 63 model.relation_dim 1.0 +750 63 negative_sampler.num_negs_per_pos 33.0 +750 63 training.batch_size 2.0 +750 64 model.embedding_dim 2.0 +750 64 model.relation_dim 0.0 +750 64 negative_sampler.num_negs_per_pos 64.0 +750 64 training.batch_size 2.0 +750 65 model.embedding_dim 2.0 +750 65 model.relation_dim 0.0 +750 65 negative_sampler.num_negs_per_pos 93.0 +750 65 training.batch_size 2.0 +750 66 model.embedding_dim 2.0 +750 66 model.relation_dim 1.0 +750 66 negative_sampler.num_negs_per_pos 22.0 +750 66 training.batch_size 2.0 +750 67 model.embedding_dim 0.0 +750 67 model.relation_dim 0.0 +750 67 negative_sampler.num_negs_per_pos 80.0 +750 67 training.batch_size 1.0 +750 68 model.embedding_dim 0.0 +750 68 model.relation_dim 2.0 +750 68 negative_sampler.num_negs_per_pos 23.0 +750 68 training.batch_size 2.0 +750 69 model.embedding_dim 2.0 +750 69 model.relation_dim 2.0 +750 69 negative_sampler.num_negs_per_pos 44.0 +750 69 training.batch_size 2.0 +750 70 model.embedding_dim 1.0 +750 70 model.relation_dim 1.0 +750 70 negative_sampler.num_negs_per_pos 91.0 +750 70 training.batch_size 0.0 +750 71 model.embedding_dim 0.0 +750 71 model.relation_dim 1.0 +750 71 negative_sampler.num_negs_per_pos 7.0 +750 71 training.batch_size 1.0 +750 72 model.embedding_dim 2.0 +750 72 model.relation_dim 1.0 +750 72 negative_sampler.num_negs_per_pos 13.0 +750 72 training.batch_size 0.0 +750 73 model.embedding_dim 1.0 +750 73 model.relation_dim 0.0 +750 73 negative_sampler.num_negs_per_pos 7.0 +750 73 training.batch_size 0.0 +750 74 model.embedding_dim 2.0 +750 74 model.relation_dim 2.0 +750 74 negative_sampler.num_negs_per_pos 28.0 +750 74 training.batch_size 2.0 +750 75 model.embedding_dim 2.0 +750 75 model.relation_dim 1.0 +750 75 negative_sampler.num_negs_per_pos 49.0 +750 75 training.batch_size 0.0 +750 76 model.embedding_dim 2.0 +750 76 model.relation_dim 1.0 +750 76 negative_sampler.num_negs_per_pos 7.0 +750 76 training.batch_size 2.0 +750 77 model.embedding_dim 1.0 +750 77 model.relation_dim 1.0 +750 77 negative_sampler.num_negs_per_pos 92.0 +750 77 training.batch_size 0.0 +750 78 model.embedding_dim 1.0 +750 78 model.relation_dim 2.0 +750 78 negative_sampler.num_negs_per_pos 30.0 +750 78 training.batch_size 2.0 +750 79 model.embedding_dim 0.0 +750 79 model.relation_dim 1.0 +750 79 negative_sampler.num_negs_per_pos 77.0 +750 79 training.batch_size 2.0 +750 80 model.embedding_dim 2.0 +750 80 model.relation_dim 1.0 +750 80 negative_sampler.num_negs_per_pos 46.0 +750 80 training.batch_size 0.0 +750 81 model.embedding_dim 2.0 +750 81 model.relation_dim 1.0 +750 81 negative_sampler.num_negs_per_pos 28.0 +750 81 training.batch_size 0.0 +750 82 model.embedding_dim 0.0 +750 82 model.relation_dim 2.0 +750 82 negative_sampler.num_negs_per_pos 65.0 +750 82 training.batch_size 1.0 +750 83 model.embedding_dim 1.0 +750 83 model.relation_dim 1.0 +750 83 negative_sampler.num_negs_per_pos 8.0 +750 83 training.batch_size 1.0 +750 84 model.embedding_dim 0.0 +750 84 model.relation_dim 2.0 +750 84 negative_sampler.num_negs_per_pos 31.0 +750 84 training.batch_size 0.0 +750 85 model.embedding_dim 2.0 +750 85 model.relation_dim 2.0 +750 85 negative_sampler.num_negs_per_pos 68.0 +750 85 training.batch_size 0.0 +750 86 model.embedding_dim 2.0 +750 86 model.relation_dim 1.0 +750 86 negative_sampler.num_negs_per_pos 77.0 +750 86 training.batch_size 1.0 +750 87 model.embedding_dim 0.0 +750 87 model.relation_dim 2.0 +750 87 negative_sampler.num_negs_per_pos 58.0 +750 87 training.batch_size 1.0 +750 88 model.embedding_dim 2.0 +750 88 model.relation_dim 2.0 +750 88 negative_sampler.num_negs_per_pos 6.0 +750 88 training.batch_size 2.0 +750 89 model.embedding_dim 1.0 +750 89 model.relation_dim 2.0 +750 89 negative_sampler.num_negs_per_pos 95.0 +750 89 training.batch_size 0.0 +750 90 model.embedding_dim 2.0 +750 90 model.relation_dim 0.0 +750 90 negative_sampler.num_negs_per_pos 32.0 +750 90 training.batch_size 0.0 +750 91 model.embedding_dim 2.0 +750 91 model.relation_dim 1.0 +750 91 negative_sampler.num_negs_per_pos 38.0 +750 91 training.batch_size 1.0 +750 92 model.embedding_dim 1.0 +750 92 model.relation_dim 0.0 +750 92 negative_sampler.num_negs_per_pos 23.0 +750 92 training.batch_size 2.0 +750 93 model.embedding_dim 2.0 +750 93 model.relation_dim 1.0 +750 93 negative_sampler.num_negs_per_pos 58.0 +750 93 training.batch_size 2.0 +750 94 model.embedding_dim 1.0 +750 94 model.relation_dim 1.0 +750 94 negative_sampler.num_negs_per_pos 16.0 +750 94 training.batch_size 2.0 +750 95 model.embedding_dim 0.0 +750 95 model.relation_dim 1.0 +750 95 negative_sampler.num_negs_per_pos 3.0 +750 95 training.batch_size 2.0 +750 96 model.embedding_dim 2.0 +750 96 model.relation_dim 1.0 +750 96 negative_sampler.num_negs_per_pos 90.0 +750 96 training.batch_size 2.0 +750 97 model.embedding_dim 0.0 +750 97 model.relation_dim 1.0 +750 97 negative_sampler.num_negs_per_pos 38.0 +750 97 training.batch_size 1.0 +750 98 model.embedding_dim 1.0 +750 98 model.relation_dim 0.0 +750 98 negative_sampler.num_negs_per_pos 32.0 +750 98 training.batch_size 1.0 +750 99 model.embedding_dim 0.0 +750 99 model.relation_dim 0.0 +750 99 negative_sampler.num_negs_per_pos 75.0 +750 99 training.batch_size 1.0 +750 100 model.embedding_dim 2.0 +750 100 model.relation_dim 2.0 +750 100 negative_sampler.num_negs_per_pos 11.0 +750 100 training.batch_size 1.0 +750 1 dataset """kinships""" +750 1 model """transd""" +750 1 loss """softplus""" +750 1 regularizer """no""" +750 1 optimizer """adadelta""" +750 1 training_loop """owa""" +750 1 negative_sampler """basic""" +750 1 evaluator """rankbased""" +750 2 dataset """kinships""" +750 2 model """transd""" +750 2 loss """softplus""" +750 2 regularizer """no""" +750 2 optimizer """adadelta""" +750 2 training_loop """owa""" +750 2 negative_sampler """basic""" +750 2 evaluator """rankbased""" +750 3 dataset """kinships""" +750 3 model """transd""" +750 3 loss """softplus""" +750 3 regularizer """no""" +750 3 optimizer """adadelta""" +750 3 training_loop """owa""" +750 3 negative_sampler """basic""" +750 3 evaluator """rankbased""" +750 4 dataset """kinships""" +750 4 model """transd""" +750 4 loss """softplus""" +750 4 regularizer """no""" +750 4 optimizer """adadelta""" +750 4 training_loop """owa""" +750 4 negative_sampler """basic""" +750 4 evaluator """rankbased""" +750 5 dataset """kinships""" +750 5 model """transd""" +750 5 loss """softplus""" +750 5 regularizer """no""" +750 5 optimizer """adadelta""" +750 5 training_loop """owa""" +750 5 negative_sampler """basic""" +750 5 evaluator """rankbased""" +750 6 dataset """kinships""" +750 6 model """transd""" +750 6 loss """softplus""" +750 6 regularizer """no""" +750 6 optimizer """adadelta""" +750 6 training_loop """owa""" +750 6 negative_sampler """basic""" +750 6 evaluator """rankbased""" +750 7 dataset """kinships""" +750 7 model """transd""" +750 7 loss """softplus""" +750 7 regularizer """no""" +750 7 optimizer """adadelta""" +750 7 training_loop """owa""" +750 7 negative_sampler """basic""" +750 7 evaluator """rankbased""" +750 8 dataset """kinships""" +750 8 model """transd""" +750 8 loss """softplus""" +750 8 regularizer """no""" +750 8 optimizer """adadelta""" +750 8 training_loop """owa""" +750 8 negative_sampler """basic""" +750 8 evaluator """rankbased""" +750 9 dataset """kinships""" +750 9 model """transd""" +750 9 loss """softplus""" +750 9 regularizer """no""" +750 9 optimizer """adadelta""" +750 9 training_loop """owa""" +750 9 negative_sampler """basic""" +750 9 evaluator """rankbased""" +750 10 dataset """kinships""" +750 10 model """transd""" +750 10 loss """softplus""" +750 10 regularizer """no""" +750 10 optimizer """adadelta""" +750 10 training_loop """owa""" +750 10 negative_sampler """basic""" +750 10 evaluator """rankbased""" +750 11 dataset """kinships""" +750 11 model """transd""" +750 11 loss """softplus""" +750 11 regularizer """no""" +750 11 optimizer """adadelta""" +750 11 training_loop """owa""" +750 11 negative_sampler """basic""" +750 11 evaluator """rankbased""" +750 12 dataset """kinships""" +750 12 model """transd""" +750 12 loss """softplus""" +750 12 regularizer """no""" +750 12 optimizer """adadelta""" +750 12 training_loop """owa""" +750 12 negative_sampler """basic""" +750 12 evaluator """rankbased""" +750 13 dataset """kinships""" +750 13 model """transd""" +750 13 loss """softplus""" +750 13 regularizer """no""" +750 13 optimizer """adadelta""" +750 13 training_loop """owa""" +750 13 negative_sampler """basic""" +750 13 evaluator """rankbased""" +750 14 dataset """kinships""" +750 14 model """transd""" +750 14 loss """softplus""" +750 14 regularizer """no""" +750 14 optimizer """adadelta""" +750 14 training_loop """owa""" +750 14 negative_sampler """basic""" +750 14 evaluator """rankbased""" +750 15 dataset """kinships""" +750 15 model """transd""" +750 15 loss """softplus""" +750 15 regularizer """no""" +750 15 optimizer """adadelta""" +750 15 training_loop """owa""" +750 15 negative_sampler """basic""" +750 15 evaluator """rankbased""" +750 16 dataset """kinships""" +750 16 model """transd""" +750 16 loss """softplus""" +750 16 regularizer """no""" +750 16 optimizer """adadelta""" +750 16 training_loop """owa""" +750 16 negative_sampler """basic""" +750 16 evaluator """rankbased""" +750 17 dataset """kinships""" +750 17 model """transd""" +750 17 loss """softplus""" +750 17 regularizer """no""" +750 17 optimizer """adadelta""" +750 17 training_loop """owa""" +750 17 negative_sampler """basic""" +750 17 evaluator """rankbased""" +750 18 dataset """kinships""" +750 18 model """transd""" +750 18 loss """softplus""" +750 18 regularizer """no""" +750 18 optimizer """adadelta""" +750 18 training_loop """owa""" +750 18 negative_sampler """basic""" +750 18 evaluator """rankbased""" +750 19 dataset """kinships""" +750 19 model """transd""" +750 19 loss """softplus""" +750 19 regularizer """no""" +750 19 optimizer """adadelta""" +750 19 training_loop """owa""" +750 19 negative_sampler """basic""" +750 19 evaluator """rankbased""" +750 20 dataset """kinships""" +750 20 model """transd""" +750 20 loss """softplus""" +750 20 regularizer """no""" +750 20 optimizer """adadelta""" +750 20 training_loop """owa""" +750 20 negative_sampler """basic""" +750 20 evaluator """rankbased""" +750 21 dataset """kinships""" +750 21 model """transd""" +750 21 loss """softplus""" +750 21 regularizer """no""" +750 21 optimizer """adadelta""" +750 21 training_loop """owa""" +750 21 negative_sampler """basic""" +750 21 evaluator """rankbased""" +750 22 dataset """kinships""" +750 22 model """transd""" +750 22 loss """softplus""" +750 22 regularizer """no""" +750 22 optimizer """adadelta""" +750 22 training_loop """owa""" +750 22 negative_sampler """basic""" +750 22 evaluator """rankbased""" +750 23 dataset """kinships""" +750 23 model """transd""" +750 23 loss """softplus""" +750 23 regularizer """no""" +750 23 optimizer """adadelta""" +750 23 training_loop """owa""" +750 23 negative_sampler """basic""" +750 23 evaluator """rankbased""" +750 24 dataset """kinships""" +750 24 model """transd""" +750 24 loss """softplus""" +750 24 regularizer """no""" +750 24 optimizer """adadelta""" +750 24 training_loop """owa""" +750 24 negative_sampler """basic""" +750 24 evaluator """rankbased""" +750 25 dataset """kinships""" +750 25 model """transd""" +750 25 loss """softplus""" +750 25 regularizer """no""" +750 25 optimizer """adadelta""" +750 25 training_loop """owa""" +750 25 negative_sampler """basic""" +750 25 evaluator """rankbased""" +750 26 dataset """kinships""" +750 26 model """transd""" +750 26 loss """softplus""" +750 26 regularizer """no""" +750 26 optimizer """adadelta""" +750 26 training_loop """owa""" +750 26 negative_sampler """basic""" +750 26 evaluator """rankbased""" +750 27 dataset """kinships""" +750 27 model """transd""" +750 27 loss """softplus""" +750 27 regularizer """no""" +750 27 optimizer """adadelta""" +750 27 training_loop """owa""" +750 27 negative_sampler """basic""" +750 27 evaluator """rankbased""" +750 28 dataset """kinships""" +750 28 model """transd""" +750 28 loss """softplus""" +750 28 regularizer """no""" +750 28 optimizer """adadelta""" +750 28 training_loop """owa""" +750 28 negative_sampler """basic""" +750 28 evaluator """rankbased""" +750 29 dataset """kinships""" +750 29 model """transd""" +750 29 loss """softplus""" +750 29 regularizer """no""" +750 29 optimizer """adadelta""" +750 29 training_loop """owa""" +750 29 negative_sampler """basic""" +750 29 evaluator """rankbased""" +750 30 dataset """kinships""" +750 30 model """transd""" +750 30 loss """softplus""" +750 30 regularizer """no""" +750 30 optimizer """adadelta""" +750 30 training_loop """owa""" +750 30 negative_sampler """basic""" +750 30 evaluator """rankbased""" +750 31 dataset """kinships""" +750 31 model """transd""" +750 31 loss """softplus""" +750 31 regularizer """no""" +750 31 optimizer """adadelta""" +750 31 training_loop """owa""" +750 31 negative_sampler """basic""" +750 31 evaluator """rankbased""" +750 32 dataset """kinships""" +750 32 model """transd""" +750 32 loss """softplus""" +750 32 regularizer """no""" +750 32 optimizer """adadelta""" +750 32 training_loop """owa""" +750 32 negative_sampler """basic""" +750 32 evaluator """rankbased""" +750 33 dataset """kinships""" +750 33 model """transd""" +750 33 loss """softplus""" +750 33 regularizer """no""" +750 33 optimizer """adadelta""" +750 33 training_loop """owa""" +750 33 negative_sampler """basic""" +750 33 evaluator """rankbased""" +750 34 dataset """kinships""" +750 34 model """transd""" +750 34 loss """softplus""" +750 34 regularizer """no""" +750 34 optimizer """adadelta""" +750 34 training_loop """owa""" +750 34 negative_sampler """basic""" +750 34 evaluator """rankbased""" +750 35 dataset """kinships""" +750 35 model """transd""" +750 35 loss """softplus""" +750 35 regularizer """no""" +750 35 optimizer """adadelta""" +750 35 training_loop """owa""" +750 35 negative_sampler """basic""" +750 35 evaluator """rankbased""" +750 36 dataset """kinships""" +750 36 model """transd""" +750 36 loss """softplus""" +750 36 regularizer """no""" +750 36 optimizer """adadelta""" +750 36 training_loop """owa""" +750 36 negative_sampler """basic""" +750 36 evaluator """rankbased""" +750 37 dataset """kinships""" +750 37 model """transd""" +750 37 loss """softplus""" +750 37 regularizer """no""" +750 37 optimizer """adadelta""" +750 37 training_loop """owa""" +750 37 negative_sampler """basic""" +750 37 evaluator """rankbased""" +750 38 dataset """kinships""" +750 38 model """transd""" +750 38 loss """softplus""" +750 38 regularizer """no""" +750 38 optimizer """adadelta""" +750 38 training_loop """owa""" +750 38 negative_sampler """basic""" +750 38 evaluator """rankbased""" +750 39 dataset """kinships""" +750 39 model """transd""" +750 39 loss """softplus""" +750 39 regularizer """no""" +750 39 optimizer """adadelta""" +750 39 training_loop """owa""" +750 39 negative_sampler """basic""" +750 39 evaluator """rankbased""" +750 40 dataset """kinships""" +750 40 model """transd""" +750 40 loss """softplus""" +750 40 regularizer """no""" +750 40 optimizer """adadelta""" +750 40 training_loop """owa""" +750 40 negative_sampler """basic""" +750 40 evaluator """rankbased""" +750 41 dataset """kinships""" +750 41 model """transd""" +750 41 loss """softplus""" +750 41 regularizer """no""" +750 41 optimizer """adadelta""" +750 41 training_loop """owa""" +750 41 negative_sampler """basic""" +750 41 evaluator """rankbased""" +750 42 dataset """kinships""" +750 42 model """transd""" +750 42 loss """softplus""" +750 42 regularizer """no""" +750 42 optimizer """adadelta""" +750 42 training_loop """owa""" +750 42 negative_sampler """basic""" +750 42 evaluator """rankbased""" +750 43 dataset """kinships""" +750 43 model """transd""" +750 43 loss """softplus""" +750 43 regularizer """no""" +750 43 optimizer """adadelta""" +750 43 training_loop """owa""" +750 43 negative_sampler """basic""" +750 43 evaluator """rankbased""" +750 44 dataset """kinships""" +750 44 model """transd""" +750 44 loss """softplus""" +750 44 regularizer """no""" +750 44 optimizer """adadelta""" +750 44 training_loop """owa""" +750 44 negative_sampler """basic""" +750 44 evaluator """rankbased""" +750 45 dataset """kinships""" +750 45 model """transd""" +750 45 loss """softplus""" +750 45 regularizer """no""" +750 45 optimizer """adadelta""" +750 45 training_loop """owa""" +750 45 negative_sampler """basic""" +750 45 evaluator """rankbased""" +750 46 dataset """kinships""" +750 46 model """transd""" +750 46 loss """softplus""" +750 46 regularizer """no""" +750 46 optimizer """adadelta""" +750 46 training_loop """owa""" +750 46 negative_sampler """basic""" +750 46 evaluator """rankbased""" +750 47 dataset """kinships""" +750 47 model """transd""" +750 47 loss """softplus""" +750 47 regularizer """no""" +750 47 optimizer """adadelta""" +750 47 training_loop """owa""" +750 47 negative_sampler """basic""" +750 47 evaluator """rankbased""" +750 48 dataset """kinships""" +750 48 model """transd""" +750 48 loss """softplus""" +750 48 regularizer """no""" +750 48 optimizer """adadelta""" +750 48 training_loop """owa""" +750 48 negative_sampler """basic""" +750 48 evaluator """rankbased""" +750 49 dataset """kinships""" +750 49 model """transd""" +750 49 loss """softplus""" +750 49 regularizer """no""" +750 49 optimizer """adadelta""" +750 49 training_loop """owa""" +750 49 negative_sampler """basic""" +750 49 evaluator """rankbased""" +750 50 dataset """kinships""" +750 50 model """transd""" +750 50 loss """softplus""" +750 50 regularizer """no""" +750 50 optimizer """adadelta""" +750 50 training_loop """owa""" +750 50 negative_sampler """basic""" +750 50 evaluator """rankbased""" +750 51 dataset """kinships""" +750 51 model """transd""" +750 51 loss """softplus""" +750 51 regularizer """no""" +750 51 optimizer """adadelta""" +750 51 training_loop """owa""" +750 51 negative_sampler """basic""" +750 51 evaluator """rankbased""" +750 52 dataset """kinships""" +750 52 model """transd""" +750 52 loss """softplus""" +750 52 regularizer """no""" +750 52 optimizer """adadelta""" +750 52 training_loop """owa""" +750 52 negative_sampler """basic""" +750 52 evaluator """rankbased""" +750 53 dataset """kinships""" +750 53 model """transd""" +750 53 loss """softplus""" +750 53 regularizer """no""" +750 53 optimizer """adadelta""" +750 53 training_loop """owa""" +750 53 negative_sampler """basic""" +750 53 evaluator """rankbased""" +750 54 dataset """kinships""" +750 54 model """transd""" +750 54 loss """softplus""" +750 54 regularizer """no""" +750 54 optimizer """adadelta""" +750 54 training_loop """owa""" +750 54 negative_sampler """basic""" +750 54 evaluator """rankbased""" +750 55 dataset """kinships""" +750 55 model """transd""" +750 55 loss """softplus""" +750 55 regularizer """no""" +750 55 optimizer """adadelta""" +750 55 training_loop """owa""" +750 55 negative_sampler """basic""" +750 55 evaluator """rankbased""" +750 56 dataset """kinships""" +750 56 model """transd""" +750 56 loss """softplus""" +750 56 regularizer """no""" +750 56 optimizer """adadelta""" +750 56 training_loop """owa""" +750 56 negative_sampler """basic""" +750 56 evaluator """rankbased""" +750 57 dataset """kinships""" +750 57 model """transd""" +750 57 loss """softplus""" +750 57 regularizer """no""" +750 57 optimizer """adadelta""" +750 57 training_loop """owa""" +750 57 negative_sampler """basic""" +750 57 evaluator """rankbased""" +750 58 dataset """kinships""" +750 58 model """transd""" +750 58 loss """softplus""" +750 58 regularizer """no""" +750 58 optimizer """adadelta""" +750 58 training_loop """owa""" +750 58 negative_sampler """basic""" +750 58 evaluator """rankbased""" +750 59 dataset """kinships""" +750 59 model """transd""" +750 59 loss """softplus""" +750 59 regularizer """no""" +750 59 optimizer """adadelta""" +750 59 training_loop """owa""" +750 59 negative_sampler """basic""" +750 59 evaluator """rankbased""" +750 60 dataset """kinships""" +750 60 model """transd""" +750 60 loss """softplus""" +750 60 regularizer """no""" +750 60 optimizer """adadelta""" +750 60 training_loop """owa""" +750 60 negative_sampler """basic""" +750 60 evaluator """rankbased""" +750 61 dataset """kinships""" +750 61 model """transd""" +750 61 loss """softplus""" +750 61 regularizer """no""" +750 61 optimizer """adadelta""" +750 61 training_loop """owa""" +750 61 negative_sampler """basic""" +750 61 evaluator """rankbased""" +750 62 dataset """kinships""" +750 62 model """transd""" +750 62 loss """softplus""" +750 62 regularizer """no""" +750 62 optimizer """adadelta""" +750 62 training_loop """owa""" +750 62 negative_sampler """basic""" +750 62 evaluator """rankbased""" +750 63 dataset """kinships""" +750 63 model """transd""" +750 63 loss """softplus""" +750 63 regularizer """no""" +750 63 optimizer """adadelta""" +750 63 training_loop """owa""" +750 63 negative_sampler """basic""" +750 63 evaluator """rankbased""" +750 64 dataset """kinships""" +750 64 model """transd""" +750 64 loss """softplus""" +750 64 regularizer """no""" +750 64 optimizer """adadelta""" +750 64 training_loop """owa""" +750 64 negative_sampler """basic""" +750 64 evaluator """rankbased""" +750 65 dataset """kinships""" +750 65 model """transd""" +750 65 loss """softplus""" +750 65 regularizer """no""" +750 65 optimizer """adadelta""" +750 65 training_loop """owa""" +750 65 negative_sampler """basic""" +750 65 evaluator """rankbased""" +750 66 dataset """kinships""" +750 66 model """transd""" +750 66 loss """softplus""" +750 66 regularizer """no""" +750 66 optimizer """adadelta""" +750 66 training_loop """owa""" +750 66 negative_sampler """basic""" +750 66 evaluator """rankbased""" +750 67 dataset """kinships""" +750 67 model """transd""" +750 67 loss """softplus""" +750 67 regularizer """no""" +750 67 optimizer """adadelta""" +750 67 training_loop """owa""" +750 67 negative_sampler """basic""" +750 67 evaluator """rankbased""" +750 68 dataset """kinships""" +750 68 model """transd""" +750 68 loss """softplus""" +750 68 regularizer """no""" +750 68 optimizer """adadelta""" +750 68 training_loop """owa""" +750 68 negative_sampler """basic""" +750 68 evaluator """rankbased""" +750 69 dataset """kinships""" +750 69 model """transd""" +750 69 loss """softplus""" +750 69 regularizer """no""" +750 69 optimizer """adadelta""" +750 69 training_loop """owa""" +750 69 negative_sampler """basic""" +750 69 evaluator """rankbased""" +750 70 dataset """kinships""" +750 70 model """transd""" +750 70 loss """softplus""" +750 70 regularizer """no""" +750 70 optimizer """adadelta""" +750 70 training_loop """owa""" +750 70 negative_sampler """basic""" +750 70 evaluator """rankbased""" +750 71 dataset """kinships""" +750 71 model """transd""" +750 71 loss """softplus""" +750 71 regularizer """no""" +750 71 optimizer """adadelta""" +750 71 training_loop """owa""" +750 71 negative_sampler """basic""" +750 71 evaluator """rankbased""" +750 72 dataset """kinships""" +750 72 model """transd""" +750 72 loss """softplus""" +750 72 regularizer """no""" +750 72 optimizer """adadelta""" +750 72 training_loop """owa""" +750 72 negative_sampler """basic""" +750 72 evaluator """rankbased""" +750 73 dataset """kinships""" +750 73 model """transd""" +750 73 loss """softplus""" +750 73 regularizer """no""" +750 73 optimizer """adadelta""" +750 73 training_loop """owa""" +750 73 negative_sampler """basic""" +750 73 evaluator """rankbased""" +750 74 dataset """kinships""" +750 74 model """transd""" +750 74 loss """softplus""" +750 74 regularizer """no""" +750 74 optimizer """adadelta""" +750 74 training_loop """owa""" +750 74 negative_sampler """basic""" +750 74 evaluator """rankbased""" +750 75 dataset """kinships""" +750 75 model """transd""" +750 75 loss """softplus""" +750 75 regularizer """no""" +750 75 optimizer """adadelta""" +750 75 training_loop """owa""" +750 75 negative_sampler """basic""" +750 75 evaluator """rankbased""" +750 76 dataset """kinships""" +750 76 model """transd""" +750 76 loss """softplus""" +750 76 regularizer """no""" +750 76 optimizer """adadelta""" +750 76 training_loop """owa""" +750 76 negative_sampler """basic""" +750 76 evaluator """rankbased""" +750 77 dataset """kinships""" +750 77 model """transd""" +750 77 loss """softplus""" +750 77 regularizer """no""" +750 77 optimizer """adadelta""" +750 77 training_loop """owa""" +750 77 negative_sampler """basic""" +750 77 evaluator """rankbased""" +750 78 dataset """kinships""" +750 78 model """transd""" +750 78 loss """softplus""" +750 78 regularizer """no""" +750 78 optimizer """adadelta""" +750 78 training_loop """owa""" +750 78 negative_sampler """basic""" +750 78 evaluator """rankbased""" +750 79 dataset """kinships""" +750 79 model """transd""" +750 79 loss """softplus""" +750 79 regularizer """no""" +750 79 optimizer """adadelta""" +750 79 training_loop """owa""" +750 79 negative_sampler """basic""" +750 79 evaluator """rankbased""" +750 80 dataset """kinships""" +750 80 model """transd""" +750 80 loss """softplus""" +750 80 regularizer """no""" +750 80 optimizer """adadelta""" +750 80 training_loop """owa""" +750 80 negative_sampler """basic""" +750 80 evaluator """rankbased""" +750 81 dataset """kinships""" +750 81 model """transd""" +750 81 loss """softplus""" +750 81 regularizer """no""" +750 81 optimizer """adadelta""" +750 81 training_loop """owa""" +750 81 negative_sampler """basic""" +750 81 evaluator """rankbased""" +750 82 dataset """kinships""" +750 82 model """transd""" +750 82 loss """softplus""" +750 82 regularizer """no""" +750 82 optimizer """adadelta""" +750 82 training_loop """owa""" +750 82 negative_sampler """basic""" +750 82 evaluator """rankbased""" +750 83 dataset """kinships""" +750 83 model """transd""" +750 83 loss """softplus""" +750 83 regularizer """no""" +750 83 optimizer """adadelta""" +750 83 training_loop """owa""" +750 83 negative_sampler """basic""" +750 83 evaluator """rankbased""" +750 84 dataset """kinships""" +750 84 model """transd""" +750 84 loss """softplus""" +750 84 regularizer """no""" +750 84 optimizer """adadelta""" +750 84 training_loop """owa""" +750 84 negative_sampler """basic""" +750 84 evaluator """rankbased""" +750 85 dataset """kinships""" +750 85 model """transd""" +750 85 loss """softplus""" +750 85 regularizer """no""" +750 85 optimizer """adadelta""" +750 85 training_loop """owa""" +750 85 negative_sampler """basic""" +750 85 evaluator """rankbased""" +750 86 dataset """kinships""" +750 86 model """transd""" +750 86 loss """softplus""" +750 86 regularizer """no""" +750 86 optimizer """adadelta""" +750 86 training_loop """owa""" +750 86 negative_sampler """basic""" +750 86 evaluator """rankbased""" +750 87 dataset """kinships""" +750 87 model """transd""" +750 87 loss """softplus""" +750 87 regularizer """no""" +750 87 optimizer """adadelta""" +750 87 training_loop """owa""" +750 87 negative_sampler """basic""" +750 87 evaluator """rankbased""" +750 88 dataset """kinships""" +750 88 model """transd""" +750 88 loss """softplus""" +750 88 regularizer """no""" +750 88 optimizer """adadelta""" +750 88 training_loop """owa""" +750 88 negative_sampler """basic""" +750 88 evaluator """rankbased""" +750 89 dataset """kinships""" +750 89 model """transd""" +750 89 loss """softplus""" +750 89 regularizer """no""" +750 89 optimizer """adadelta""" +750 89 training_loop """owa""" +750 89 negative_sampler """basic""" +750 89 evaluator """rankbased""" +750 90 dataset """kinships""" +750 90 model """transd""" +750 90 loss """softplus""" +750 90 regularizer """no""" +750 90 optimizer """adadelta""" +750 90 training_loop """owa""" +750 90 negative_sampler """basic""" +750 90 evaluator """rankbased""" +750 91 dataset """kinships""" +750 91 model """transd""" +750 91 loss """softplus""" +750 91 regularizer """no""" +750 91 optimizer """adadelta""" +750 91 training_loop """owa""" +750 91 negative_sampler """basic""" +750 91 evaluator """rankbased""" +750 92 dataset """kinships""" +750 92 model """transd""" +750 92 loss """softplus""" +750 92 regularizer """no""" +750 92 optimizer """adadelta""" +750 92 training_loop """owa""" +750 92 negative_sampler """basic""" +750 92 evaluator """rankbased""" +750 93 dataset """kinships""" +750 93 model """transd""" +750 93 loss """softplus""" +750 93 regularizer """no""" +750 93 optimizer """adadelta""" +750 93 training_loop """owa""" +750 93 negative_sampler """basic""" +750 93 evaluator """rankbased""" +750 94 dataset """kinships""" +750 94 model """transd""" +750 94 loss """softplus""" +750 94 regularizer """no""" +750 94 optimizer """adadelta""" +750 94 training_loop """owa""" +750 94 negative_sampler """basic""" +750 94 evaluator """rankbased""" +750 95 dataset """kinships""" +750 95 model """transd""" +750 95 loss """softplus""" +750 95 regularizer """no""" +750 95 optimizer """adadelta""" +750 95 training_loop """owa""" +750 95 negative_sampler """basic""" +750 95 evaluator """rankbased""" +750 96 dataset """kinships""" +750 96 model """transd""" +750 96 loss """softplus""" +750 96 regularizer """no""" +750 96 optimizer """adadelta""" +750 96 training_loop """owa""" +750 96 negative_sampler """basic""" +750 96 evaluator """rankbased""" +750 97 dataset """kinships""" +750 97 model """transd""" +750 97 loss """softplus""" +750 97 regularizer """no""" +750 97 optimizer """adadelta""" +750 97 training_loop """owa""" +750 97 negative_sampler """basic""" +750 97 evaluator """rankbased""" +750 98 dataset """kinships""" +750 98 model """transd""" +750 98 loss """softplus""" +750 98 regularizer """no""" +750 98 optimizer """adadelta""" +750 98 training_loop """owa""" +750 98 negative_sampler """basic""" +750 98 evaluator """rankbased""" +750 99 dataset """kinships""" +750 99 model """transd""" +750 99 loss """softplus""" +750 99 regularizer """no""" +750 99 optimizer """adadelta""" +750 99 training_loop """owa""" +750 99 negative_sampler """basic""" +750 99 evaluator """rankbased""" +750 100 dataset """kinships""" +750 100 model """transd""" +750 100 loss """softplus""" +750 100 regularizer """no""" +750 100 optimizer """adadelta""" +750 100 training_loop """owa""" +750 100 negative_sampler """basic""" +750 100 evaluator """rankbased""" +751 1 model.embedding_dim 1.0 +751 1 model.relation_dim 0.0 +751 1 loss.margin 4.484854610322663 +751 1 negative_sampler.num_negs_per_pos 45.0 +751 1 training.batch_size 2.0 +751 2 model.embedding_dim 0.0 +751 2 model.relation_dim 0.0 +751 2 loss.margin 8.0122254667884 +751 2 negative_sampler.num_negs_per_pos 76.0 +751 2 training.batch_size 2.0 +751 3 model.embedding_dim 0.0 +751 3 model.relation_dim 0.0 +751 3 loss.margin 4.110969555414307 +751 3 negative_sampler.num_negs_per_pos 47.0 +751 3 training.batch_size 0.0 +751 4 model.embedding_dim 2.0 +751 4 model.relation_dim 0.0 +751 4 loss.margin 1.348897883002968 +751 4 negative_sampler.num_negs_per_pos 6.0 +751 4 training.batch_size 2.0 +751 5 model.embedding_dim 1.0 +751 5 model.relation_dim 2.0 +751 5 loss.margin 3.6795604826716724 +751 5 negative_sampler.num_negs_per_pos 72.0 +751 5 training.batch_size 2.0 +751 6 model.embedding_dim 0.0 +751 6 model.relation_dim 0.0 +751 6 loss.margin 6.730371862503779 +751 6 negative_sampler.num_negs_per_pos 44.0 +751 6 training.batch_size 0.0 +751 7 model.embedding_dim 1.0 +751 7 model.relation_dim 2.0 +751 7 loss.margin 2.394408722672157 +751 7 negative_sampler.num_negs_per_pos 81.0 +751 7 training.batch_size 2.0 +751 8 model.embedding_dim 1.0 +751 8 model.relation_dim 1.0 +751 8 loss.margin 0.6199940383652016 +751 8 negative_sampler.num_negs_per_pos 73.0 +751 8 training.batch_size 0.0 +751 9 model.embedding_dim 1.0 +751 9 model.relation_dim 0.0 +751 9 loss.margin 5.559659479810824 +751 9 negative_sampler.num_negs_per_pos 61.0 +751 9 training.batch_size 1.0 +751 10 model.embedding_dim 2.0 +751 10 model.relation_dim 1.0 +751 10 loss.margin 3.0757426496187787 +751 10 negative_sampler.num_negs_per_pos 67.0 +751 10 training.batch_size 0.0 +751 11 model.embedding_dim 2.0 +751 11 model.relation_dim 1.0 +751 11 loss.margin 8.842081280670753 +751 11 negative_sampler.num_negs_per_pos 20.0 +751 11 training.batch_size 2.0 +751 12 model.embedding_dim 0.0 +751 12 model.relation_dim 2.0 +751 12 loss.margin 2.6336913267308972 +751 12 negative_sampler.num_negs_per_pos 39.0 +751 12 training.batch_size 2.0 +751 13 model.embedding_dim 0.0 +751 13 model.relation_dim 1.0 +751 13 loss.margin 3.8101375082843543 +751 13 negative_sampler.num_negs_per_pos 48.0 +751 13 training.batch_size 1.0 +751 14 model.embedding_dim 0.0 +751 14 model.relation_dim 0.0 +751 14 loss.margin 5.9377345771422965 +751 14 negative_sampler.num_negs_per_pos 57.0 +751 14 training.batch_size 0.0 +751 15 model.embedding_dim 1.0 +751 15 model.relation_dim 0.0 +751 15 loss.margin 6.221364447095166 +751 15 negative_sampler.num_negs_per_pos 86.0 +751 15 training.batch_size 1.0 +751 16 model.embedding_dim 2.0 +751 16 model.relation_dim 0.0 +751 16 loss.margin 1.795130491876092 +751 16 negative_sampler.num_negs_per_pos 52.0 +751 16 training.batch_size 2.0 +751 17 model.embedding_dim 0.0 +751 17 model.relation_dim 2.0 +751 17 loss.margin 7.191258449972141 +751 17 negative_sampler.num_negs_per_pos 21.0 +751 17 training.batch_size 2.0 +751 18 model.embedding_dim 0.0 +751 18 model.relation_dim 2.0 +751 18 loss.margin 1.4976890123160935 +751 18 negative_sampler.num_negs_per_pos 86.0 +751 18 training.batch_size 1.0 +751 19 model.embedding_dim 0.0 +751 19 model.relation_dim 0.0 +751 19 loss.margin 8.425173944530133 +751 19 negative_sampler.num_negs_per_pos 39.0 +751 19 training.batch_size 1.0 +751 20 model.embedding_dim 0.0 +751 20 model.relation_dim 2.0 +751 20 loss.margin 5.646727962956294 +751 20 negative_sampler.num_negs_per_pos 1.0 +751 20 training.batch_size 1.0 +751 21 model.embedding_dim 0.0 +751 21 model.relation_dim 2.0 +751 21 loss.margin 7.308967451351337 +751 21 negative_sampler.num_negs_per_pos 21.0 +751 21 training.batch_size 1.0 +751 22 model.embedding_dim 1.0 +751 22 model.relation_dim 0.0 +751 22 loss.margin 9.120344251275002 +751 22 negative_sampler.num_negs_per_pos 9.0 +751 22 training.batch_size 2.0 +751 23 model.embedding_dim 2.0 +751 23 model.relation_dim 1.0 +751 23 loss.margin 1.6566916829031708 +751 23 negative_sampler.num_negs_per_pos 29.0 +751 23 training.batch_size 0.0 +751 24 model.embedding_dim 2.0 +751 24 model.relation_dim 0.0 +751 24 loss.margin 4.829002711995585 +751 24 negative_sampler.num_negs_per_pos 68.0 +751 24 training.batch_size 1.0 +751 25 model.embedding_dim 1.0 +751 25 model.relation_dim 1.0 +751 25 loss.margin 8.474231774473166 +751 25 negative_sampler.num_negs_per_pos 57.0 +751 25 training.batch_size 1.0 +751 26 model.embedding_dim 2.0 +751 26 model.relation_dim 0.0 +751 26 loss.margin 4.228829423259727 +751 26 negative_sampler.num_negs_per_pos 62.0 +751 26 training.batch_size 0.0 +751 27 model.embedding_dim 1.0 +751 27 model.relation_dim 1.0 +751 27 loss.margin 2.061486236428581 +751 27 negative_sampler.num_negs_per_pos 63.0 +751 27 training.batch_size 2.0 +751 28 model.embedding_dim 2.0 +751 28 model.relation_dim 2.0 +751 28 loss.margin 2.945263511461781 +751 28 negative_sampler.num_negs_per_pos 13.0 +751 28 training.batch_size 2.0 +751 29 model.embedding_dim 1.0 +751 29 model.relation_dim 2.0 +751 29 loss.margin 9.865075351654621 +751 29 negative_sampler.num_negs_per_pos 35.0 +751 29 training.batch_size 2.0 +751 30 model.embedding_dim 2.0 +751 30 model.relation_dim 2.0 +751 30 loss.margin 8.375739334348328 +751 30 negative_sampler.num_negs_per_pos 67.0 +751 30 training.batch_size 0.0 +751 31 model.embedding_dim 2.0 +751 31 model.relation_dim 1.0 +751 31 loss.margin 7.462910029217231 +751 31 negative_sampler.num_negs_per_pos 42.0 +751 31 training.batch_size 0.0 +751 32 model.embedding_dim 0.0 +751 32 model.relation_dim 0.0 +751 32 loss.margin 4.197844740959795 +751 32 negative_sampler.num_negs_per_pos 58.0 +751 32 training.batch_size 1.0 +751 33 model.embedding_dim 2.0 +751 33 model.relation_dim 1.0 +751 33 loss.margin 0.6084836949860001 +751 33 negative_sampler.num_negs_per_pos 19.0 +751 33 training.batch_size 0.0 +751 34 model.embedding_dim 1.0 +751 34 model.relation_dim 1.0 +751 34 loss.margin 1.404000786765464 +751 34 negative_sampler.num_negs_per_pos 51.0 +751 34 training.batch_size 1.0 +751 35 model.embedding_dim 0.0 +751 35 model.relation_dim 0.0 +751 35 loss.margin 6.208572209015352 +751 35 negative_sampler.num_negs_per_pos 28.0 +751 35 training.batch_size 0.0 +751 36 model.embedding_dim 0.0 +751 36 model.relation_dim 2.0 +751 36 loss.margin 9.033264719238103 +751 36 negative_sampler.num_negs_per_pos 68.0 +751 36 training.batch_size 2.0 +751 37 model.embedding_dim 0.0 +751 37 model.relation_dim 0.0 +751 37 loss.margin 7.502666287143644 +751 37 negative_sampler.num_negs_per_pos 65.0 +751 37 training.batch_size 1.0 +751 38 model.embedding_dim 1.0 +751 38 model.relation_dim 2.0 +751 38 loss.margin 2.309931014168141 +751 38 negative_sampler.num_negs_per_pos 3.0 +751 38 training.batch_size 1.0 +751 39 model.embedding_dim 2.0 +751 39 model.relation_dim 2.0 +751 39 loss.margin 6.761769790330927 +751 39 negative_sampler.num_negs_per_pos 72.0 +751 39 training.batch_size 1.0 +751 40 model.embedding_dim 0.0 +751 40 model.relation_dim 2.0 +751 40 loss.margin 5.15171882232926 +751 40 negative_sampler.num_negs_per_pos 51.0 +751 40 training.batch_size 1.0 +751 41 model.embedding_dim 1.0 +751 41 model.relation_dim 2.0 +751 41 loss.margin 8.998963666906656 +751 41 negative_sampler.num_negs_per_pos 64.0 +751 41 training.batch_size 2.0 +751 42 model.embedding_dim 1.0 +751 42 model.relation_dim 1.0 +751 42 loss.margin 8.888346723648484 +751 42 negative_sampler.num_negs_per_pos 77.0 +751 42 training.batch_size 0.0 +751 43 model.embedding_dim 1.0 +751 43 model.relation_dim 1.0 +751 43 loss.margin 1.2054884573366613 +751 43 negative_sampler.num_negs_per_pos 81.0 +751 43 training.batch_size 0.0 +751 44 model.embedding_dim 1.0 +751 44 model.relation_dim 1.0 +751 44 loss.margin 3.994781543886503 +751 44 negative_sampler.num_negs_per_pos 62.0 +751 44 training.batch_size 2.0 +751 45 model.embedding_dim 0.0 +751 45 model.relation_dim 2.0 +751 45 loss.margin 1.9432816818381256 +751 45 negative_sampler.num_negs_per_pos 42.0 +751 45 training.batch_size 0.0 +751 46 model.embedding_dim 1.0 +751 46 model.relation_dim 2.0 +751 46 loss.margin 6.1553494406909515 +751 46 negative_sampler.num_negs_per_pos 83.0 +751 46 training.batch_size 1.0 +751 47 model.embedding_dim 2.0 +751 47 model.relation_dim 0.0 +751 47 loss.margin 3.8588527034225515 +751 47 negative_sampler.num_negs_per_pos 93.0 +751 47 training.batch_size 0.0 +751 48 model.embedding_dim 1.0 +751 48 model.relation_dim 1.0 +751 48 loss.margin 3.8741464785046613 +751 48 negative_sampler.num_negs_per_pos 3.0 +751 48 training.batch_size 0.0 +751 49 model.embedding_dim 0.0 +751 49 model.relation_dim 1.0 +751 49 loss.margin 4.877292508116639 +751 49 negative_sampler.num_negs_per_pos 18.0 +751 49 training.batch_size 0.0 +751 50 model.embedding_dim 1.0 +751 50 model.relation_dim 2.0 +751 50 loss.margin 3.9416687207826477 +751 50 negative_sampler.num_negs_per_pos 5.0 +751 50 training.batch_size 1.0 +751 51 model.embedding_dim 1.0 +751 51 model.relation_dim 1.0 +751 51 loss.margin 9.306132900194301 +751 51 negative_sampler.num_negs_per_pos 13.0 +751 51 training.batch_size 1.0 +751 52 model.embedding_dim 0.0 +751 52 model.relation_dim 0.0 +751 52 loss.margin 4.822394668753611 +751 52 negative_sampler.num_negs_per_pos 14.0 +751 52 training.batch_size 2.0 +751 53 model.embedding_dim 1.0 +751 53 model.relation_dim 2.0 +751 53 loss.margin 7.178662299285751 +751 53 negative_sampler.num_negs_per_pos 4.0 +751 53 training.batch_size 2.0 +751 54 model.embedding_dim 1.0 +751 54 model.relation_dim 1.0 +751 54 loss.margin 6.932920289206448 +751 54 negative_sampler.num_negs_per_pos 32.0 +751 54 training.batch_size 1.0 +751 55 model.embedding_dim 0.0 +751 55 model.relation_dim 2.0 +751 55 loss.margin 3.9268228927894455 +751 55 negative_sampler.num_negs_per_pos 84.0 +751 55 training.batch_size 2.0 +751 56 model.embedding_dim 1.0 +751 56 model.relation_dim 2.0 +751 56 loss.margin 5.756703986039129 +751 56 negative_sampler.num_negs_per_pos 15.0 +751 56 training.batch_size 2.0 +751 57 model.embedding_dim 0.0 +751 57 model.relation_dim 2.0 +751 57 loss.margin 6.383745788272559 +751 57 negative_sampler.num_negs_per_pos 2.0 +751 57 training.batch_size 0.0 +751 58 model.embedding_dim 1.0 +751 58 model.relation_dim 2.0 +751 58 loss.margin 7.4130548123515565 +751 58 negative_sampler.num_negs_per_pos 19.0 +751 58 training.batch_size 0.0 +751 59 model.embedding_dim 1.0 +751 59 model.relation_dim 1.0 +751 59 loss.margin 9.940863842112341 +751 59 negative_sampler.num_negs_per_pos 93.0 +751 59 training.batch_size 0.0 +751 60 model.embedding_dim 2.0 +751 60 model.relation_dim 1.0 +751 60 loss.margin 6.237263574035293 +751 60 negative_sampler.num_negs_per_pos 42.0 +751 60 training.batch_size 2.0 +751 61 model.embedding_dim 0.0 +751 61 model.relation_dim 2.0 +751 61 loss.margin 8.163792884738317 +751 61 negative_sampler.num_negs_per_pos 92.0 +751 61 training.batch_size 1.0 +751 62 model.embedding_dim 0.0 +751 62 model.relation_dim 1.0 +751 62 loss.margin 7.045502145537303 +751 62 negative_sampler.num_negs_per_pos 3.0 +751 62 training.batch_size 2.0 +751 63 model.embedding_dim 1.0 +751 63 model.relation_dim 1.0 +751 63 loss.margin 8.698149267942503 +751 63 negative_sampler.num_negs_per_pos 22.0 +751 63 training.batch_size 0.0 +751 64 model.embedding_dim 2.0 +751 64 model.relation_dim 1.0 +751 64 loss.margin 4.584265250446702 +751 64 negative_sampler.num_negs_per_pos 65.0 +751 64 training.batch_size 0.0 +751 65 model.embedding_dim 1.0 +751 65 model.relation_dim 0.0 +751 65 loss.margin 0.7126489880156932 +751 65 negative_sampler.num_negs_per_pos 94.0 +751 65 training.batch_size 1.0 +751 66 model.embedding_dim 2.0 +751 66 model.relation_dim 1.0 +751 66 loss.margin 7.96690465835281 +751 66 negative_sampler.num_negs_per_pos 14.0 +751 66 training.batch_size 0.0 +751 67 model.embedding_dim 0.0 +751 67 model.relation_dim 0.0 +751 67 loss.margin 7.331595396567867 +751 67 negative_sampler.num_negs_per_pos 59.0 +751 67 training.batch_size 2.0 +751 68 model.embedding_dim 1.0 +751 68 model.relation_dim 0.0 +751 68 loss.margin 4.774272847801875 +751 68 negative_sampler.num_negs_per_pos 26.0 +751 68 training.batch_size 1.0 +751 69 model.embedding_dim 0.0 +751 69 model.relation_dim 2.0 +751 69 loss.margin 7.4289643155785825 +751 69 negative_sampler.num_negs_per_pos 39.0 +751 69 training.batch_size 1.0 +751 70 model.embedding_dim 0.0 +751 70 model.relation_dim 2.0 +751 70 loss.margin 4.397198606543606 +751 70 negative_sampler.num_negs_per_pos 63.0 +751 70 training.batch_size 2.0 +751 71 model.embedding_dim 1.0 +751 71 model.relation_dim 0.0 +751 71 loss.margin 3.1219988023238057 +751 71 negative_sampler.num_negs_per_pos 32.0 +751 71 training.batch_size 1.0 +751 72 model.embedding_dim 0.0 +751 72 model.relation_dim 2.0 +751 72 loss.margin 2.5875051596325753 +751 72 negative_sampler.num_negs_per_pos 48.0 +751 72 training.batch_size 0.0 +751 73 model.embedding_dim 2.0 +751 73 model.relation_dim 2.0 +751 73 loss.margin 4.58032282977208 +751 73 negative_sampler.num_negs_per_pos 34.0 +751 73 training.batch_size 2.0 +751 74 model.embedding_dim 2.0 +751 74 model.relation_dim 0.0 +751 74 loss.margin 4.057406215249729 +751 74 negative_sampler.num_negs_per_pos 23.0 +751 74 training.batch_size 0.0 +751 75 model.embedding_dim 1.0 +751 75 model.relation_dim 2.0 +751 75 loss.margin 3.570827201113011 +751 75 negative_sampler.num_negs_per_pos 34.0 +751 75 training.batch_size 0.0 +751 76 model.embedding_dim 1.0 +751 76 model.relation_dim 0.0 +751 76 loss.margin 9.4413600368628 +751 76 negative_sampler.num_negs_per_pos 34.0 +751 76 training.batch_size 2.0 +751 77 model.embedding_dim 0.0 +751 77 model.relation_dim 2.0 +751 77 loss.margin 9.578061425493917 +751 77 negative_sampler.num_negs_per_pos 8.0 +751 77 training.batch_size 1.0 +751 78 model.embedding_dim 2.0 +751 78 model.relation_dim 1.0 +751 78 loss.margin 6.250399670558576 +751 78 negative_sampler.num_negs_per_pos 87.0 +751 78 training.batch_size 0.0 +751 79 model.embedding_dim 1.0 +751 79 model.relation_dim 0.0 +751 79 loss.margin 3.300743832324316 +751 79 negative_sampler.num_negs_per_pos 38.0 +751 79 training.batch_size 1.0 +751 80 model.embedding_dim 0.0 +751 80 model.relation_dim 2.0 +751 80 loss.margin 6.3461859477365286 +751 80 negative_sampler.num_negs_per_pos 7.0 +751 80 training.batch_size 0.0 +751 81 model.embedding_dim 0.0 +751 81 model.relation_dim 1.0 +751 81 loss.margin 2.3949544913393446 +751 81 negative_sampler.num_negs_per_pos 45.0 +751 81 training.batch_size 2.0 +751 82 model.embedding_dim 2.0 +751 82 model.relation_dim 2.0 +751 82 loss.margin 7.58568479619484 +751 82 negative_sampler.num_negs_per_pos 9.0 +751 82 training.batch_size 1.0 +751 83 model.embedding_dim 1.0 +751 83 model.relation_dim 1.0 +751 83 loss.margin 3.196997283699098 +751 83 negative_sampler.num_negs_per_pos 13.0 +751 83 training.batch_size 0.0 +751 84 model.embedding_dim 2.0 +751 84 model.relation_dim 1.0 +751 84 loss.margin 3.696214733142668 +751 84 negative_sampler.num_negs_per_pos 25.0 +751 84 training.batch_size 2.0 +751 85 model.embedding_dim 2.0 +751 85 model.relation_dim 2.0 +751 85 loss.margin 4.753764812281664 +751 85 negative_sampler.num_negs_per_pos 57.0 +751 85 training.batch_size 1.0 +751 86 model.embedding_dim 2.0 +751 86 model.relation_dim 2.0 +751 86 loss.margin 9.058847864781235 +751 86 negative_sampler.num_negs_per_pos 60.0 +751 86 training.batch_size 0.0 +751 87 model.embedding_dim 2.0 +751 87 model.relation_dim 0.0 +751 87 loss.margin 2.9754176034100075 +751 87 negative_sampler.num_negs_per_pos 20.0 +751 87 training.batch_size 0.0 +751 88 model.embedding_dim 1.0 +751 88 model.relation_dim 0.0 +751 88 loss.margin 9.636811351445303 +751 88 negative_sampler.num_negs_per_pos 52.0 +751 88 training.batch_size 2.0 +751 89 model.embedding_dim 0.0 +751 89 model.relation_dim 0.0 +751 89 loss.margin 4.306999358262175 +751 89 negative_sampler.num_negs_per_pos 70.0 +751 89 training.batch_size 2.0 +751 90 model.embedding_dim 1.0 +751 90 model.relation_dim 1.0 +751 90 loss.margin 6.769773826515235 +751 90 negative_sampler.num_negs_per_pos 83.0 +751 90 training.batch_size 1.0 +751 91 model.embedding_dim 2.0 +751 91 model.relation_dim 1.0 +751 91 loss.margin 6.421104075058684 +751 91 negative_sampler.num_negs_per_pos 64.0 +751 91 training.batch_size 2.0 +751 92 model.embedding_dim 2.0 +751 92 model.relation_dim 0.0 +751 92 loss.margin 9.379262782945679 +751 92 negative_sampler.num_negs_per_pos 60.0 +751 92 training.batch_size 0.0 +751 93 model.embedding_dim 1.0 +751 93 model.relation_dim 0.0 +751 93 loss.margin 9.185285802307646 +751 93 negative_sampler.num_negs_per_pos 92.0 +751 93 training.batch_size 2.0 +751 94 model.embedding_dim 0.0 +751 94 model.relation_dim 1.0 +751 94 loss.margin 5.245093531017308 +751 94 negative_sampler.num_negs_per_pos 53.0 +751 94 training.batch_size 1.0 +751 95 model.embedding_dim 1.0 +751 95 model.relation_dim 1.0 +751 95 loss.margin 3.67035372665789 +751 95 negative_sampler.num_negs_per_pos 90.0 +751 95 training.batch_size 0.0 +751 96 model.embedding_dim 0.0 +751 96 model.relation_dim 0.0 +751 96 loss.margin 4.166320400729294 +751 96 negative_sampler.num_negs_per_pos 64.0 +751 96 training.batch_size 2.0 +751 97 model.embedding_dim 2.0 +751 97 model.relation_dim 2.0 +751 97 loss.margin 6.368835996400952 +751 97 negative_sampler.num_negs_per_pos 64.0 +751 97 training.batch_size 2.0 +751 98 model.embedding_dim 1.0 +751 98 model.relation_dim 1.0 +751 98 loss.margin 0.5323333853929582 +751 98 negative_sampler.num_negs_per_pos 11.0 +751 98 training.batch_size 1.0 +751 99 model.embedding_dim 0.0 +751 99 model.relation_dim 1.0 +751 99 loss.margin 4.056187692952371 +751 99 negative_sampler.num_negs_per_pos 39.0 +751 99 training.batch_size 0.0 +751 100 model.embedding_dim 2.0 +751 100 model.relation_dim 1.0 +751 100 loss.margin 1.1783728552930366 +751 100 negative_sampler.num_negs_per_pos 92.0 +751 100 training.batch_size 0.0 +751 1 dataset """kinships""" +751 1 model """transd""" +751 1 loss """marginranking""" +751 1 regularizer """no""" +751 1 optimizer """adadelta""" +751 1 training_loop """owa""" +751 1 negative_sampler """basic""" +751 1 evaluator """rankbased""" +751 2 dataset """kinships""" +751 2 model """transd""" +751 2 loss """marginranking""" +751 2 regularizer """no""" +751 2 optimizer """adadelta""" +751 2 training_loop """owa""" +751 2 negative_sampler """basic""" +751 2 evaluator """rankbased""" +751 3 dataset """kinships""" +751 3 model """transd""" +751 3 loss """marginranking""" +751 3 regularizer """no""" +751 3 optimizer """adadelta""" +751 3 training_loop """owa""" +751 3 negative_sampler """basic""" +751 3 evaluator """rankbased""" +751 4 dataset """kinships""" +751 4 model """transd""" +751 4 loss """marginranking""" +751 4 regularizer """no""" +751 4 optimizer """adadelta""" +751 4 training_loop """owa""" +751 4 negative_sampler """basic""" +751 4 evaluator """rankbased""" +751 5 dataset """kinships""" +751 5 model """transd""" +751 5 loss """marginranking""" +751 5 regularizer """no""" +751 5 optimizer """adadelta""" +751 5 training_loop """owa""" +751 5 negative_sampler """basic""" +751 5 evaluator """rankbased""" +751 6 dataset """kinships""" +751 6 model """transd""" +751 6 loss """marginranking""" +751 6 regularizer """no""" +751 6 optimizer """adadelta""" +751 6 training_loop """owa""" +751 6 negative_sampler """basic""" +751 6 evaluator """rankbased""" +751 7 dataset """kinships""" +751 7 model """transd""" +751 7 loss """marginranking""" +751 7 regularizer """no""" +751 7 optimizer """adadelta""" +751 7 training_loop """owa""" +751 7 negative_sampler """basic""" +751 7 evaluator """rankbased""" +751 8 dataset """kinships""" +751 8 model """transd""" +751 8 loss """marginranking""" +751 8 regularizer """no""" +751 8 optimizer """adadelta""" +751 8 training_loop """owa""" +751 8 negative_sampler """basic""" +751 8 evaluator """rankbased""" +751 9 dataset """kinships""" +751 9 model """transd""" +751 9 loss """marginranking""" +751 9 regularizer """no""" +751 9 optimizer """adadelta""" +751 9 training_loop """owa""" +751 9 negative_sampler """basic""" +751 9 evaluator """rankbased""" +751 10 dataset """kinships""" +751 10 model """transd""" +751 10 loss """marginranking""" +751 10 regularizer """no""" +751 10 optimizer """adadelta""" +751 10 training_loop """owa""" +751 10 negative_sampler """basic""" +751 10 evaluator """rankbased""" +751 11 dataset """kinships""" +751 11 model """transd""" +751 11 loss """marginranking""" +751 11 regularizer """no""" +751 11 optimizer """adadelta""" +751 11 training_loop """owa""" +751 11 negative_sampler """basic""" +751 11 evaluator """rankbased""" +751 12 dataset """kinships""" +751 12 model """transd""" +751 12 loss """marginranking""" +751 12 regularizer """no""" +751 12 optimizer """adadelta""" +751 12 training_loop """owa""" +751 12 negative_sampler """basic""" +751 12 evaluator """rankbased""" +751 13 dataset """kinships""" +751 13 model """transd""" +751 13 loss """marginranking""" +751 13 regularizer """no""" +751 13 optimizer """adadelta""" +751 13 training_loop """owa""" +751 13 negative_sampler """basic""" +751 13 evaluator """rankbased""" +751 14 dataset """kinships""" +751 14 model """transd""" +751 14 loss """marginranking""" +751 14 regularizer """no""" +751 14 optimizer """adadelta""" +751 14 training_loop """owa""" +751 14 negative_sampler """basic""" +751 14 evaluator """rankbased""" +751 15 dataset """kinships""" +751 15 model """transd""" +751 15 loss """marginranking""" +751 15 regularizer """no""" +751 15 optimizer """adadelta""" +751 15 training_loop """owa""" +751 15 negative_sampler """basic""" +751 15 evaluator """rankbased""" +751 16 dataset """kinships""" +751 16 model """transd""" +751 16 loss """marginranking""" +751 16 regularizer """no""" +751 16 optimizer """adadelta""" +751 16 training_loop """owa""" +751 16 negative_sampler """basic""" +751 16 evaluator """rankbased""" +751 17 dataset """kinships""" +751 17 model """transd""" +751 17 loss """marginranking""" +751 17 regularizer """no""" +751 17 optimizer """adadelta""" +751 17 training_loop """owa""" +751 17 negative_sampler """basic""" +751 17 evaluator """rankbased""" +751 18 dataset """kinships""" +751 18 model """transd""" +751 18 loss """marginranking""" +751 18 regularizer """no""" +751 18 optimizer """adadelta""" +751 18 training_loop """owa""" +751 18 negative_sampler """basic""" +751 18 evaluator """rankbased""" +751 19 dataset """kinships""" +751 19 model """transd""" +751 19 loss """marginranking""" +751 19 regularizer """no""" +751 19 optimizer """adadelta""" +751 19 training_loop """owa""" +751 19 negative_sampler """basic""" +751 19 evaluator """rankbased""" +751 20 dataset """kinships""" +751 20 model """transd""" +751 20 loss """marginranking""" +751 20 regularizer """no""" +751 20 optimizer """adadelta""" +751 20 training_loop """owa""" +751 20 negative_sampler """basic""" +751 20 evaluator """rankbased""" +751 21 dataset """kinships""" +751 21 model """transd""" +751 21 loss """marginranking""" +751 21 regularizer """no""" +751 21 optimizer """adadelta""" +751 21 training_loop """owa""" +751 21 negative_sampler """basic""" +751 21 evaluator """rankbased""" +751 22 dataset """kinships""" +751 22 model """transd""" +751 22 loss """marginranking""" +751 22 regularizer """no""" +751 22 optimizer """adadelta""" +751 22 training_loop """owa""" +751 22 negative_sampler """basic""" +751 22 evaluator """rankbased""" +751 23 dataset """kinships""" +751 23 model """transd""" +751 23 loss """marginranking""" +751 23 regularizer """no""" +751 23 optimizer """adadelta""" +751 23 training_loop """owa""" +751 23 negative_sampler """basic""" +751 23 evaluator """rankbased""" +751 24 dataset """kinships""" +751 24 model """transd""" +751 24 loss """marginranking""" +751 24 regularizer """no""" +751 24 optimizer """adadelta""" +751 24 training_loop """owa""" +751 24 negative_sampler """basic""" +751 24 evaluator """rankbased""" +751 25 dataset """kinships""" +751 25 model """transd""" +751 25 loss """marginranking""" +751 25 regularizer """no""" +751 25 optimizer """adadelta""" +751 25 training_loop """owa""" +751 25 negative_sampler """basic""" +751 25 evaluator """rankbased""" +751 26 dataset """kinships""" +751 26 model """transd""" +751 26 loss """marginranking""" +751 26 regularizer """no""" +751 26 optimizer """adadelta""" +751 26 training_loop """owa""" +751 26 negative_sampler """basic""" +751 26 evaluator """rankbased""" +751 27 dataset """kinships""" +751 27 model """transd""" +751 27 loss """marginranking""" +751 27 regularizer """no""" +751 27 optimizer """adadelta""" +751 27 training_loop """owa""" +751 27 negative_sampler """basic""" +751 27 evaluator """rankbased""" +751 28 dataset """kinships""" +751 28 model """transd""" +751 28 loss """marginranking""" +751 28 regularizer """no""" +751 28 optimizer """adadelta""" +751 28 training_loop """owa""" +751 28 negative_sampler """basic""" +751 28 evaluator """rankbased""" +751 29 dataset """kinships""" +751 29 model """transd""" +751 29 loss """marginranking""" +751 29 regularizer """no""" +751 29 optimizer """adadelta""" +751 29 training_loop """owa""" +751 29 negative_sampler """basic""" +751 29 evaluator """rankbased""" +751 30 dataset """kinships""" +751 30 model """transd""" +751 30 loss """marginranking""" +751 30 regularizer """no""" +751 30 optimizer """adadelta""" +751 30 training_loop """owa""" +751 30 negative_sampler """basic""" +751 30 evaluator """rankbased""" +751 31 dataset """kinships""" +751 31 model """transd""" +751 31 loss """marginranking""" +751 31 regularizer """no""" +751 31 optimizer """adadelta""" +751 31 training_loop """owa""" +751 31 negative_sampler """basic""" +751 31 evaluator """rankbased""" +751 32 dataset """kinships""" +751 32 model """transd""" +751 32 loss """marginranking""" +751 32 regularizer """no""" +751 32 optimizer """adadelta""" +751 32 training_loop """owa""" +751 32 negative_sampler """basic""" +751 32 evaluator """rankbased""" +751 33 dataset """kinships""" +751 33 model """transd""" +751 33 loss """marginranking""" +751 33 regularizer """no""" +751 33 optimizer """adadelta""" +751 33 training_loop """owa""" +751 33 negative_sampler """basic""" +751 33 evaluator """rankbased""" +751 34 dataset """kinships""" +751 34 model """transd""" +751 34 loss """marginranking""" +751 34 regularizer """no""" +751 34 optimizer """adadelta""" +751 34 training_loop """owa""" +751 34 negative_sampler """basic""" +751 34 evaluator """rankbased""" +751 35 dataset """kinships""" +751 35 model """transd""" +751 35 loss """marginranking""" +751 35 regularizer """no""" +751 35 optimizer """adadelta""" +751 35 training_loop """owa""" +751 35 negative_sampler """basic""" +751 35 evaluator """rankbased""" +751 36 dataset """kinships""" +751 36 model """transd""" +751 36 loss """marginranking""" +751 36 regularizer """no""" +751 36 optimizer """adadelta""" +751 36 training_loop """owa""" +751 36 negative_sampler """basic""" +751 36 evaluator """rankbased""" +751 37 dataset """kinships""" +751 37 model """transd""" +751 37 loss """marginranking""" +751 37 regularizer """no""" +751 37 optimizer """adadelta""" +751 37 training_loop """owa""" +751 37 negative_sampler """basic""" +751 37 evaluator """rankbased""" +751 38 dataset """kinships""" +751 38 model """transd""" +751 38 loss """marginranking""" +751 38 regularizer """no""" +751 38 optimizer """adadelta""" +751 38 training_loop """owa""" +751 38 negative_sampler """basic""" +751 38 evaluator """rankbased""" +751 39 dataset """kinships""" +751 39 model """transd""" +751 39 loss """marginranking""" +751 39 regularizer """no""" +751 39 optimizer """adadelta""" +751 39 training_loop """owa""" +751 39 negative_sampler """basic""" +751 39 evaluator """rankbased""" +751 40 dataset """kinships""" +751 40 model """transd""" +751 40 loss """marginranking""" +751 40 regularizer """no""" +751 40 optimizer """adadelta""" +751 40 training_loop """owa""" +751 40 negative_sampler """basic""" +751 40 evaluator """rankbased""" +751 41 dataset """kinships""" +751 41 model """transd""" +751 41 loss """marginranking""" +751 41 regularizer """no""" +751 41 optimizer """adadelta""" +751 41 training_loop """owa""" +751 41 negative_sampler """basic""" +751 41 evaluator """rankbased""" +751 42 dataset """kinships""" +751 42 model """transd""" +751 42 loss """marginranking""" +751 42 regularizer """no""" +751 42 optimizer """adadelta""" +751 42 training_loop """owa""" +751 42 negative_sampler """basic""" +751 42 evaluator """rankbased""" +751 43 dataset """kinships""" +751 43 model """transd""" +751 43 loss """marginranking""" +751 43 regularizer """no""" +751 43 optimizer """adadelta""" +751 43 training_loop """owa""" +751 43 negative_sampler """basic""" +751 43 evaluator """rankbased""" +751 44 dataset """kinships""" +751 44 model """transd""" +751 44 loss """marginranking""" +751 44 regularizer """no""" +751 44 optimizer """adadelta""" +751 44 training_loop """owa""" +751 44 negative_sampler """basic""" +751 44 evaluator """rankbased""" +751 45 dataset """kinships""" +751 45 model """transd""" +751 45 loss """marginranking""" +751 45 regularizer """no""" +751 45 optimizer """adadelta""" +751 45 training_loop """owa""" +751 45 negative_sampler """basic""" +751 45 evaluator """rankbased""" +751 46 dataset """kinships""" +751 46 model """transd""" +751 46 loss """marginranking""" +751 46 regularizer """no""" +751 46 optimizer """adadelta""" +751 46 training_loop """owa""" +751 46 negative_sampler """basic""" +751 46 evaluator """rankbased""" +751 47 dataset """kinships""" +751 47 model """transd""" +751 47 loss """marginranking""" +751 47 regularizer """no""" +751 47 optimizer """adadelta""" +751 47 training_loop """owa""" +751 47 negative_sampler """basic""" +751 47 evaluator """rankbased""" +751 48 dataset """kinships""" +751 48 model """transd""" +751 48 loss """marginranking""" +751 48 regularizer """no""" +751 48 optimizer """adadelta""" +751 48 training_loop """owa""" +751 48 negative_sampler """basic""" +751 48 evaluator """rankbased""" +751 49 dataset """kinships""" +751 49 model """transd""" +751 49 loss """marginranking""" +751 49 regularizer """no""" +751 49 optimizer """adadelta""" +751 49 training_loop """owa""" +751 49 negative_sampler """basic""" +751 49 evaluator """rankbased""" +751 50 dataset """kinships""" +751 50 model """transd""" +751 50 loss """marginranking""" +751 50 regularizer """no""" +751 50 optimizer """adadelta""" +751 50 training_loop """owa""" +751 50 negative_sampler """basic""" +751 50 evaluator """rankbased""" +751 51 dataset """kinships""" +751 51 model """transd""" +751 51 loss """marginranking""" +751 51 regularizer """no""" +751 51 optimizer """adadelta""" +751 51 training_loop """owa""" +751 51 negative_sampler """basic""" +751 51 evaluator """rankbased""" +751 52 dataset """kinships""" +751 52 model """transd""" +751 52 loss """marginranking""" +751 52 regularizer """no""" +751 52 optimizer """adadelta""" +751 52 training_loop """owa""" +751 52 negative_sampler """basic""" +751 52 evaluator """rankbased""" +751 53 dataset """kinships""" +751 53 model """transd""" +751 53 loss """marginranking""" +751 53 regularizer """no""" +751 53 optimizer """adadelta""" +751 53 training_loop """owa""" +751 53 negative_sampler """basic""" +751 53 evaluator """rankbased""" +751 54 dataset """kinships""" +751 54 model """transd""" +751 54 loss """marginranking""" +751 54 regularizer """no""" +751 54 optimizer """adadelta""" +751 54 training_loop """owa""" +751 54 negative_sampler """basic""" +751 54 evaluator """rankbased""" +751 55 dataset """kinships""" +751 55 model """transd""" +751 55 loss """marginranking""" +751 55 regularizer """no""" +751 55 optimizer """adadelta""" +751 55 training_loop """owa""" +751 55 negative_sampler """basic""" +751 55 evaluator """rankbased""" +751 56 dataset """kinships""" +751 56 model """transd""" +751 56 loss """marginranking""" +751 56 regularizer """no""" +751 56 optimizer """adadelta""" +751 56 training_loop """owa""" +751 56 negative_sampler """basic""" +751 56 evaluator """rankbased""" +751 57 dataset """kinships""" +751 57 model """transd""" +751 57 loss """marginranking""" +751 57 regularizer """no""" +751 57 optimizer """adadelta""" +751 57 training_loop """owa""" +751 57 negative_sampler """basic""" +751 57 evaluator """rankbased""" +751 58 dataset """kinships""" +751 58 model """transd""" +751 58 loss """marginranking""" +751 58 regularizer """no""" +751 58 optimizer """adadelta""" +751 58 training_loop """owa""" +751 58 negative_sampler """basic""" +751 58 evaluator """rankbased""" +751 59 dataset """kinships""" +751 59 model """transd""" +751 59 loss """marginranking""" +751 59 regularizer """no""" +751 59 optimizer """adadelta""" +751 59 training_loop """owa""" +751 59 negative_sampler """basic""" +751 59 evaluator """rankbased""" +751 60 dataset """kinships""" +751 60 model """transd""" +751 60 loss """marginranking""" +751 60 regularizer """no""" +751 60 optimizer """adadelta""" +751 60 training_loop """owa""" +751 60 negative_sampler """basic""" +751 60 evaluator """rankbased""" +751 61 dataset """kinships""" +751 61 model """transd""" +751 61 loss """marginranking""" +751 61 regularizer """no""" +751 61 optimizer """adadelta""" +751 61 training_loop """owa""" +751 61 negative_sampler """basic""" +751 61 evaluator """rankbased""" +751 62 dataset """kinships""" +751 62 model """transd""" +751 62 loss """marginranking""" +751 62 regularizer """no""" +751 62 optimizer """adadelta""" +751 62 training_loop """owa""" +751 62 negative_sampler """basic""" +751 62 evaluator """rankbased""" +751 63 dataset """kinships""" +751 63 model """transd""" +751 63 loss """marginranking""" +751 63 regularizer """no""" +751 63 optimizer """adadelta""" +751 63 training_loop """owa""" +751 63 negative_sampler """basic""" +751 63 evaluator """rankbased""" +751 64 dataset """kinships""" +751 64 model """transd""" +751 64 loss """marginranking""" +751 64 regularizer """no""" +751 64 optimizer """adadelta""" +751 64 training_loop """owa""" +751 64 negative_sampler """basic""" +751 64 evaluator """rankbased""" +751 65 dataset """kinships""" +751 65 model """transd""" +751 65 loss """marginranking""" +751 65 regularizer """no""" +751 65 optimizer """adadelta""" +751 65 training_loop """owa""" +751 65 negative_sampler """basic""" +751 65 evaluator """rankbased""" +751 66 dataset """kinships""" +751 66 model """transd""" +751 66 loss """marginranking""" +751 66 regularizer """no""" +751 66 optimizer """adadelta""" +751 66 training_loop """owa""" +751 66 negative_sampler """basic""" +751 66 evaluator """rankbased""" +751 67 dataset """kinships""" +751 67 model """transd""" +751 67 loss """marginranking""" +751 67 regularizer """no""" +751 67 optimizer """adadelta""" +751 67 training_loop """owa""" +751 67 negative_sampler """basic""" +751 67 evaluator """rankbased""" +751 68 dataset """kinships""" +751 68 model """transd""" +751 68 loss """marginranking""" +751 68 regularizer """no""" +751 68 optimizer """adadelta""" +751 68 training_loop """owa""" +751 68 negative_sampler """basic""" +751 68 evaluator """rankbased""" +751 69 dataset """kinships""" +751 69 model """transd""" +751 69 loss """marginranking""" +751 69 regularizer """no""" +751 69 optimizer """adadelta""" +751 69 training_loop """owa""" +751 69 negative_sampler """basic""" +751 69 evaluator """rankbased""" +751 70 dataset """kinships""" +751 70 model """transd""" +751 70 loss """marginranking""" +751 70 regularizer """no""" +751 70 optimizer """adadelta""" +751 70 training_loop """owa""" +751 70 negative_sampler """basic""" +751 70 evaluator """rankbased""" +751 71 dataset """kinships""" +751 71 model """transd""" +751 71 loss """marginranking""" +751 71 regularizer """no""" +751 71 optimizer """adadelta""" +751 71 training_loop """owa""" +751 71 negative_sampler """basic""" +751 71 evaluator """rankbased""" +751 72 dataset """kinships""" +751 72 model """transd""" +751 72 loss """marginranking""" +751 72 regularizer """no""" +751 72 optimizer """adadelta""" +751 72 training_loop """owa""" +751 72 negative_sampler """basic""" +751 72 evaluator """rankbased""" +751 73 dataset """kinships""" +751 73 model """transd""" +751 73 loss """marginranking""" +751 73 regularizer """no""" +751 73 optimizer """adadelta""" +751 73 training_loop """owa""" +751 73 negative_sampler """basic""" +751 73 evaluator """rankbased""" +751 74 dataset """kinships""" +751 74 model """transd""" +751 74 loss """marginranking""" +751 74 regularizer """no""" +751 74 optimizer """adadelta""" +751 74 training_loop """owa""" +751 74 negative_sampler """basic""" +751 74 evaluator """rankbased""" +751 75 dataset """kinships""" +751 75 model """transd""" +751 75 loss """marginranking""" +751 75 regularizer """no""" +751 75 optimizer """adadelta""" +751 75 training_loop """owa""" +751 75 negative_sampler """basic""" +751 75 evaluator """rankbased""" +751 76 dataset """kinships""" +751 76 model """transd""" +751 76 loss """marginranking""" +751 76 regularizer """no""" +751 76 optimizer """adadelta""" +751 76 training_loop """owa""" +751 76 negative_sampler """basic""" +751 76 evaluator """rankbased""" +751 77 dataset """kinships""" +751 77 model """transd""" +751 77 loss """marginranking""" +751 77 regularizer """no""" +751 77 optimizer """adadelta""" +751 77 training_loop """owa""" +751 77 negative_sampler """basic""" +751 77 evaluator """rankbased""" +751 78 dataset """kinships""" +751 78 model """transd""" +751 78 loss """marginranking""" +751 78 regularizer """no""" +751 78 optimizer """adadelta""" +751 78 training_loop """owa""" +751 78 negative_sampler """basic""" +751 78 evaluator """rankbased""" +751 79 dataset """kinships""" +751 79 model """transd""" +751 79 loss """marginranking""" +751 79 regularizer """no""" +751 79 optimizer """adadelta""" +751 79 training_loop """owa""" +751 79 negative_sampler """basic""" +751 79 evaluator """rankbased""" +751 80 dataset """kinships""" +751 80 model """transd""" +751 80 loss """marginranking""" +751 80 regularizer """no""" +751 80 optimizer """adadelta""" +751 80 training_loop """owa""" +751 80 negative_sampler """basic""" +751 80 evaluator """rankbased""" +751 81 dataset """kinships""" +751 81 model """transd""" +751 81 loss """marginranking""" +751 81 regularizer """no""" +751 81 optimizer """adadelta""" +751 81 training_loop """owa""" +751 81 negative_sampler """basic""" +751 81 evaluator """rankbased""" +751 82 dataset """kinships""" +751 82 model """transd""" +751 82 loss """marginranking""" +751 82 regularizer """no""" +751 82 optimizer """adadelta""" +751 82 training_loop """owa""" +751 82 negative_sampler """basic""" +751 82 evaluator """rankbased""" +751 83 dataset """kinships""" +751 83 model """transd""" +751 83 loss """marginranking""" +751 83 regularizer """no""" +751 83 optimizer """adadelta""" +751 83 training_loop """owa""" +751 83 negative_sampler """basic""" +751 83 evaluator """rankbased""" +751 84 dataset """kinships""" +751 84 model """transd""" +751 84 loss """marginranking""" +751 84 regularizer """no""" +751 84 optimizer """adadelta""" +751 84 training_loop """owa""" +751 84 negative_sampler """basic""" +751 84 evaluator """rankbased""" +751 85 dataset """kinships""" +751 85 model """transd""" +751 85 loss """marginranking""" +751 85 regularizer """no""" +751 85 optimizer """adadelta""" +751 85 training_loop """owa""" +751 85 negative_sampler """basic""" +751 85 evaluator """rankbased""" +751 86 dataset """kinships""" +751 86 model """transd""" +751 86 loss """marginranking""" +751 86 regularizer """no""" +751 86 optimizer """adadelta""" +751 86 training_loop """owa""" +751 86 negative_sampler """basic""" +751 86 evaluator """rankbased""" +751 87 dataset """kinships""" +751 87 model """transd""" +751 87 loss """marginranking""" +751 87 regularizer """no""" +751 87 optimizer """adadelta""" +751 87 training_loop """owa""" +751 87 negative_sampler """basic""" +751 87 evaluator """rankbased""" +751 88 dataset """kinships""" +751 88 model """transd""" +751 88 loss """marginranking""" +751 88 regularizer """no""" +751 88 optimizer """adadelta""" +751 88 training_loop """owa""" +751 88 negative_sampler """basic""" +751 88 evaluator """rankbased""" +751 89 dataset """kinships""" +751 89 model """transd""" +751 89 loss """marginranking""" +751 89 regularizer """no""" +751 89 optimizer """adadelta""" +751 89 training_loop """owa""" +751 89 negative_sampler """basic""" +751 89 evaluator """rankbased""" +751 90 dataset """kinships""" +751 90 model """transd""" +751 90 loss """marginranking""" +751 90 regularizer """no""" +751 90 optimizer """adadelta""" +751 90 training_loop """owa""" +751 90 negative_sampler """basic""" +751 90 evaluator """rankbased""" +751 91 dataset """kinships""" +751 91 model """transd""" +751 91 loss """marginranking""" +751 91 regularizer """no""" +751 91 optimizer """adadelta""" +751 91 training_loop """owa""" +751 91 negative_sampler """basic""" +751 91 evaluator """rankbased""" +751 92 dataset """kinships""" +751 92 model """transd""" +751 92 loss """marginranking""" +751 92 regularizer """no""" +751 92 optimizer """adadelta""" +751 92 training_loop """owa""" +751 92 negative_sampler """basic""" +751 92 evaluator """rankbased""" +751 93 dataset """kinships""" +751 93 model """transd""" +751 93 loss """marginranking""" +751 93 regularizer """no""" +751 93 optimizer """adadelta""" +751 93 training_loop """owa""" +751 93 negative_sampler """basic""" +751 93 evaluator """rankbased""" +751 94 dataset """kinships""" +751 94 model """transd""" +751 94 loss """marginranking""" +751 94 regularizer """no""" +751 94 optimizer """adadelta""" +751 94 training_loop """owa""" +751 94 negative_sampler """basic""" +751 94 evaluator """rankbased""" +751 95 dataset """kinships""" +751 95 model """transd""" +751 95 loss """marginranking""" +751 95 regularizer """no""" +751 95 optimizer """adadelta""" +751 95 training_loop """owa""" +751 95 negative_sampler """basic""" +751 95 evaluator """rankbased""" +751 96 dataset """kinships""" +751 96 model """transd""" +751 96 loss """marginranking""" +751 96 regularizer """no""" +751 96 optimizer """adadelta""" +751 96 training_loop """owa""" +751 96 negative_sampler """basic""" +751 96 evaluator """rankbased""" +751 97 dataset """kinships""" +751 97 model """transd""" +751 97 loss """marginranking""" +751 97 regularizer """no""" +751 97 optimizer """adadelta""" +751 97 training_loop """owa""" +751 97 negative_sampler """basic""" +751 97 evaluator """rankbased""" +751 98 dataset """kinships""" +751 98 model """transd""" +751 98 loss """marginranking""" +751 98 regularizer """no""" +751 98 optimizer """adadelta""" +751 98 training_loop """owa""" +751 98 negative_sampler """basic""" +751 98 evaluator """rankbased""" +751 99 dataset """kinships""" +751 99 model """transd""" +751 99 loss """marginranking""" +751 99 regularizer """no""" +751 99 optimizer """adadelta""" +751 99 training_loop """owa""" +751 99 negative_sampler """basic""" +751 99 evaluator """rankbased""" +751 100 dataset """kinships""" +751 100 model """transd""" +751 100 loss """marginranking""" +751 100 regularizer """no""" +751 100 optimizer """adadelta""" +751 100 training_loop """owa""" +751 100 negative_sampler """basic""" +751 100 evaluator """rankbased""" +752 1 model.embedding_dim 2.0 +752 1 model.relation_dim 0.0 +752 1 loss.margin 5.312614067089488 +752 1 negative_sampler.num_negs_per_pos 58.0 +752 1 training.batch_size 1.0 +752 2 model.embedding_dim 0.0 +752 2 model.relation_dim 0.0 +752 2 loss.margin 7.211801431824611 +752 2 negative_sampler.num_negs_per_pos 63.0 +752 2 training.batch_size 1.0 +752 3 model.embedding_dim 2.0 +752 3 model.relation_dim 1.0 +752 3 loss.margin 6.398738291885751 +752 3 negative_sampler.num_negs_per_pos 40.0 +752 3 training.batch_size 1.0 +752 4 model.embedding_dim 2.0 +752 4 model.relation_dim 2.0 +752 4 loss.margin 6.3598110211895476 +752 4 negative_sampler.num_negs_per_pos 53.0 +752 4 training.batch_size 2.0 +752 5 model.embedding_dim 2.0 +752 5 model.relation_dim 0.0 +752 5 loss.margin 5.039496035974322 +752 5 negative_sampler.num_negs_per_pos 70.0 +752 5 training.batch_size 1.0 +752 6 model.embedding_dim 2.0 +752 6 model.relation_dim 0.0 +752 6 loss.margin 6.698738901352866 +752 6 negative_sampler.num_negs_per_pos 35.0 +752 6 training.batch_size 2.0 +752 7 model.embedding_dim 2.0 +752 7 model.relation_dim 2.0 +752 7 loss.margin 6.288921653933212 +752 7 negative_sampler.num_negs_per_pos 84.0 +752 7 training.batch_size 1.0 +752 8 model.embedding_dim 0.0 +752 8 model.relation_dim 0.0 +752 8 loss.margin 4.907629102600123 +752 8 negative_sampler.num_negs_per_pos 87.0 +752 8 training.batch_size 1.0 +752 9 model.embedding_dim 1.0 +752 9 model.relation_dim 2.0 +752 9 loss.margin 4.121016996411193 +752 9 negative_sampler.num_negs_per_pos 80.0 +752 9 training.batch_size 2.0 +752 10 model.embedding_dim 1.0 +752 10 model.relation_dim 0.0 +752 10 loss.margin 4.242770291854869 +752 10 negative_sampler.num_negs_per_pos 99.0 +752 10 training.batch_size 0.0 +752 11 model.embedding_dim 1.0 +752 11 model.relation_dim 0.0 +752 11 loss.margin 9.159675884753371 +752 11 negative_sampler.num_negs_per_pos 67.0 +752 11 training.batch_size 1.0 +752 12 model.embedding_dim 2.0 +752 12 model.relation_dim 0.0 +752 12 loss.margin 3.3764310753958013 +752 12 negative_sampler.num_negs_per_pos 94.0 +752 12 training.batch_size 1.0 +752 13 model.embedding_dim 1.0 +752 13 model.relation_dim 2.0 +752 13 loss.margin 8.082244994862645 +752 13 negative_sampler.num_negs_per_pos 9.0 +752 13 training.batch_size 0.0 +752 14 model.embedding_dim 1.0 +752 14 model.relation_dim 0.0 +752 14 loss.margin 1.1387779688875073 +752 14 negative_sampler.num_negs_per_pos 14.0 +752 14 training.batch_size 2.0 +752 15 model.embedding_dim 1.0 +752 15 model.relation_dim 0.0 +752 15 loss.margin 3.710894265095095 +752 15 negative_sampler.num_negs_per_pos 65.0 +752 15 training.batch_size 0.0 +752 16 model.embedding_dim 2.0 +752 16 model.relation_dim 2.0 +752 16 loss.margin 2.1957041953699266 +752 16 negative_sampler.num_negs_per_pos 80.0 +752 16 training.batch_size 0.0 +752 17 model.embedding_dim 2.0 +752 17 model.relation_dim 1.0 +752 17 loss.margin 0.7032508170014997 +752 17 negative_sampler.num_negs_per_pos 90.0 +752 17 training.batch_size 1.0 +752 18 model.embedding_dim 0.0 +752 18 model.relation_dim 2.0 +752 18 loss.margin 6.264200288555552 +752 18 negative_sampler.num_negs_per_pos 90.0 +752 18 training.batch_size 1.0 +752 19 model.embedding_dim 1.0 +752 19 model.relation_dim 0.0 +752 19 loss.margin 0.6971236943775061 +752 19 negative_sampler.num_negs_per_pos 25.0 +752 19 training.batch_size 2.0 +752 20 model.embedding_dim 2.0 +752 20 model.relation_dim 1.0 +752 20 loss.margin 2.67853238093337 +752 20 negative_sampler.num_negs_per_pos 92.0 +752 20 training.batch_size 1.0 +752 21 model.embedding_dim 0.0 +752 21 model.relation_dim 1.0 +752 21 loss.margin 4.4835479545478 +752 21 negative_sampler.num_negs_per_pos 11.0 +752 21 training.batch_size 0.0 +752 22 model.embedding_dim 0.0 +752 22 model.relation_dim 0.0 +752 22 loss.margin 5.852205968691001 +752 22 negative_sampler.num_negs_per_pos 60.0 +752 22 training.batch_size 1.0 +752 23 model.embedding_dim 0.0 +752 23 model.relation_dim 1.0 +752 23 loss.margin 3.349865103867666 +752 23 negative_sampler.num_negs_per_pos 68.0 +752 23 training.batch_size 0.0 +752 24 model.embedding_dim 2.0 +752 24 model.relation_dim 2.0 +752 24 loss.margin 3.4839111614072755 +752 24 negative_sampler.num_negs_per_pos 36.0 +752 24 training.batch_size 2.0 +752 25 model.embedding_dim 1.0 +752 25 model.relation_dim 2.0 +752 25 loss.margin 7.250941569948477 +752 25 negative_sampler.num_negs_per_pos 25.0 +752 25 training.batch_size 1.0 +752 26 model.embedding_dim 1.0 +752 26 model.relation_dim 1.0 +752 26 loss.margin 0.568032294308005 +752 26 negative_sampler.num_negs_per_pos 5.0 +752 26 training.batch_size 1.0 +752 27 model.embedding_dim 1.0 +752 27 model.relation_dim 2.0 +752 27 loss.margin 7.8850547474199555 +752 27 negative_sampler.num_negs_per_pos 18.0 +752 27 training.batch_size 1.0 +752 28 model.embedding_dim 1.0 +752 28 model.relation_dim 0.0 +752 28 loss.margin 3.4418722184858264 +752 28 negative_sampler.num_negs_per_pos 17.0 +752 28 training.batch_size 2.0 +752 29 model.embedding_dim 0.0 +752 29 model.relation_dim 2.0 +752 29 loss.margin 2.595223737434344 +752 29 negative_sampler.num_negs_per_pos 32.0 +752 29 training.batch_size 2.0 +752 30 model.embedding_dim 2.0 +752 30 model.relation_dim 1.0 +752 30 loss.margin 7.338147816461222 +752 30 negative_sampler.num_negs_per_pos 30.0 +752 30 training.batch_size 0.0 +752 31 model.embedding_dim 2.0 +752 31 model.relation_dim 1.0 +752 31 loss.margin 9.64534753941332 +752 31 negative_sampler.num_negs_per_pos 28.0 +752 31 training.batch_size 2.0 +752 32 model.embedding_dim 0.0 +752 32 model.relation_dim 2.0 +752 32 loss.margin 0.8280104069554642 +752 32 negative_sampler.num_negs_per_pos 34.0 +752 32 training.batch_size 2.0 +752 33 model.embedding_dim 1.0 +752 33 model.relation_dim 1.0 +752 33 loss.margin 0.7174911717516623 +752 33 negative_sampler.num_negs_per_pos 51.0 +752 33 training.batch_size 2.0 +752 34 model.embedding_dim 2.0 +752 34 model.relation_dim 0.0 +752 34 loss.margin 4.540207988734662 +752 34 negative_sampler.num_negs_per_pos 69.0 +752 34 training.batch_size 1.0 +752 35 model.embedding_dim 0.0 +752 35 model.relation_dim 1.0 +752 35 loss.margin 6.945437731051791 +752 35 negative_sampler.num_negs_per_pos 25.0 +752 35 training.batch_size 2.0 +752 36 model.embedding_dim 1.0 +752 36 model.relation_dim 2.0 +752 36 loss.margin 7.759977431011437 +752 36 negative_sampler.num_negs_per_pos 93.0 +752 36 training.batch_size 2.0 +752 37 model.embedding_dim 2.0 +752 37 model.relation_dim 1.0 +752 37 loss.margin 1.3948783167684937 +752 37 negative_sampler.num_negs_per_pos 84.0 +752 37 training.batch_size 1.0 +752 38 model.embedding_dim 2.0 +752 38 model.relation_dim 0.0 +752 38 loss.margin 8.312769819393498 +752 38 negative_sampler.num_negs_per_pos 1.0 +752 38 training.batch_size 0.0 +752 39 model.embedding_dim 0.0 +752 39 model.relation_dim 1.0 +752 39 loss.margin 4.5634067454224105 +752 39 negative_sampler.num_negs_per_pos 24.0 +752 39 training.batch_size 0.0 +752 40 model.embedding_dim 0.0 +752 40 model.relation_dim 0.0 +752 40 loss.margin 6.210433281327785 +752 40 negative_sampler.num_negs_per_pos 65.0 +752 40 training.batch_size 2.0 +752 41 model.embedding_dim 0.0 +752 41 model.relation_dim 0.0 +752 41 loss.margin 9.396314292916633 +752 41 negative_sampler.num_negs_per_pos 26.0 +752 41 training.batch_size 1.0 +752 42 model.embedding_dim 0.0 +752 42 model.relation_dim 0.0 +752 42 loss.margin 7.799428628597295 +752 42 negative_sampler.num_negs_per_pos 62.0 +752 42 training.batch_size 2.0 +752 43 model.embedding_dim 1.0 +752 43 model.relation_dim 2.0 +752 43 loss.margin 7.053224091316007 +752 43 negative_sampler.num_negs_per_pos 59.0 +752 43 training.batch_size 2.0 +752 44 model.embedding_dim 0.0 +752 44 model.relation_dim 1.0 +752 44 loss.margin 7.223874860664918 +752 44 negative_sampler.num_negs_per_pos 52.0 +752 44 training.batch_size 1.0 +752 45 model.embedding_dim 0.0 +752 45 model.relation_dim 0.0 +752 45 loss.margin 3.2654929808504263 +752 45 negative_sampler.num_negs_per_pos 82.0 +752 45 training.batch_size 2.0 +752 46 model.embedding_dim 2.0 +752 46 model.relation_dim 1.0 +752 46 loss.margin 3.2772545148361654 +752 46 negative_sampler.num_negs_per_pos 9.0 +752 46 training.batch_size 0.0 +752 47 model.embedding_dim 2.0 +752 47 model.relation_dim 2.0 +752 47 loss.margin 3.043944585476698 +752 47 negative_sampler.num_negs_per_pos 75.0 +752 47 training.batch_size 2.0 +752 48 model.embedding_dim 1.0 +752 48 model.relation_dim 2.0 +752 48 loss.margin 5.641106866215587 +752 48 negative_sampler.num_negs_per_pos 35.0 +752 48 training.batch_size 2.0 +752 49 model.embedding_dim 1.0 +752 49 model.relation_dim 0.0 +752 49 loss.margin 3.4519519260058287 +752 49 negative_sampler.num_negs_per_pos 95.0 +752 49 training.batch_size 0.0 +752 50 model.embedding_dim 2.0 +752 50 model.relation_dim 2.0 +752 50 loss.margin 2.9887337622551753 +752 50 negative_sampler.num_negs_per_pos 34.0 +752 50 training.batch_size 0.0 +752 51 model.embedding_dim 1.0 +752 51 model.relation_dim 0.0 +752 51 loss.margin 8.166635052331305 +752 51 negative_sampler.num_negs_per_pos 47.0 +752 51 training.batch_size 0.0 +752 52 model.embedding_dim 1.0 +752 52 model.relation_dim 2.0 +752 52 loss.margin 8.788264054828362 +752 52 negative_sampler.num_negs_per_pos 59.0 +752 52 training.batch_size 0.0 +752 53 model.embedding_dim 2.0 +752 53 model.relation_dim 0.0 +752 53 loss.margin 7.211961033482972 +752 53 negative_sampler.num_negs_per_pos 55.0 +752 53 training.batch_size 0.0 +752 54 model.embedding_dim 2.0 +752 54 model.relation_dim 1.0 +752 54 loss.margin 2.0381520940076188 +752 54 negative_sampler.num_negs_per_pos 97.0 +752 54 training.batch_size 1.0 +752 55 model.embedding_dim 0.0 +752 55 model.relation_dim 2.0 +752 55 loss.margin 7.894342359914088 +752 55 negative_sampler.num_negs_per_pos 49.0 +752 55 training.batch_size 2.0 +752 56 model.embedding_dim 2.0 +752 56 model.relation_dim 2.0 +752 56 loss.margin 8.706832636259914 +752 56 negative_sampler.num_negs_per_pos 65.0 +752 56 training.batch_size 0.0 +752 57 model.embedding_dim 2.0 +752 57 model.relation_dim 0.0 +752 57 loss.margin 4.885057468317898 +752 57 negative_sampler.num_negs_per_pos 15.0 +752 57 training.batch_size 1.0 +752 58 model.embedding_dim 0.0 +752 58 model.relation_dim 1.0 +752 58 loss.margin 5.472460870283605 +752 58 negative_sampler.num_negs_per_pos 20.0 +752 58 training.batch_size 2.0 +752 59 model.embedding_dim 1.0 +752 59 model.relation_dim 2.0 +752 59 loss.margin 2.1567194341389833 +752 59 negative_sampler.num_negs_per_pos 97.0 +752 59 training.batch_size 0.0 +752 60 model.embedding_dim 1.0 +752 60 model.relation_dim 1.0 +752 60 loss.margin 8.961829589298159 +752 60 negative_sampler.num_negs_per_pos 36.0 +752 60 training.batch_size 2.0 +752 61 model.embedding_dim 1.0 +752 61 model.relation_dim 2.0 +752 61 loss.margin 2.9268827381766123 +752 61 negative_sampler.num_negs_per_pos 90.0 +752 61 training.batch_size 2.0 +752 62 model.embedding_dim 1.0 +752 62 model.relation_dim 2.0 +752 62 loss.margin 0.8290119221825071 +752 62 negative_sampler.num_negs_per_pos 64.0 +752 62 training.batch_size 1.0 +752 63 model.embedding_dim 1.0 +752 63 model.relation_dim 0.0 +752 63 loss.margin 6.034574228596014 +752 63 negative_sampler.num_negs_per_pos 1.0 +752 63 training.batch_size 0.0 +752 64 model.embedding_dim 1.0 +752 64 model.relation_dim 0.0 +752 64 loss.margin 5.831329878022056 +752 64 negative_sampler.num_negs_per_pos 29.0 +752 64 training.batch_size 0.0 +752 65 model.embedding_dim 2.0 +752 65 model.relation_dim 0.0 +752 65 loss.margin 7.061122375808963 +752 65 negative_sampler.num_negs_per_pos 64.0 +752 65 training.batch_size 2.0 +752 66 model.embedding_dim 2.0 +752 66 model.relation_dim 1.0 +752 66 loss.margin 8.28196596163027 +752 66 negative_sampler.num_negs_per_pos 55.0 +752 66 training.batch_size 2.0 +752 67 model.embedding_dim 1.0 +752 67 model.relation_dim 2.0 +752 67 loss.margin 4.248903908279713 +752 67 negative_sampler.num_negs_per_pos 91.0 +752 67 training.batch_size 0.0 +752 68 model.embedding_dim 0.0 +752 68 model.relation_dim 1.0 +752 68 loss.margin 3.142717232656525 +752 68 negative_sampler.num_negs_per_pos 34.0 +752 68 training.batch_size 1.0 +752 69 model.embedding_dim 2.0 +752 69 model.relation_dim 1.0 +752 69 loss.margin 1.6913158466882265 +752 69 negative_sampler.num_negs_per_pos 33.0 +752 69 training.batch_size 2.0 +752 70 model.embedding_dim 1.0 +752 70 model.relation_dim 2.0 +752 70 loss.margin 8.73164275687241 +752 70 negative_sampler.num_negs_per_pos 49.0 +752 70 training.batch_size 2.0 +752 71 model.embedding_dim 0.0 +752 71 model.relation_dim 1.0 +752 71 loss.margin 5.142375084065796 +752 71 negative_sampler.num_negs_per_pos 48.0 +752 71 training.batch_size 0.0 +752 72 model.embedding_dim 1.0 +752 72 model.relation_dim 1.0 +752 72 loss.margin 4.621761153480268 +752 72 negative_sampler.num_negs_per_pos 38.0 +752 72 training.batch_size 2.0 +752 73 model.embedding_dim 1.0 +752 73 model.relation_dim 0.0 +752 73 loss.margin 5.2169692088968205 +752 73 negative_sampler.num_negs_per_pos 39.0 +752 73 training.batch_size 0.0 +752 74 model.embedding_dim 1.0 +752 74 model.relation_dim 1.0 +752 74 loss.margin 3.9464527292785627 +752 74 negative_sampler.num_negs_per_pos 6.0 +752 74 training.batch_size 0.0 +752 75 model.embedding_dim 1.0 +752 75 model.relation_dim 1.0 +752 75 loss.margin 1.6132752460417223 +752 75 negative_sampler.num_negs_per_pos 96.0 +752 75 training.batch_size 0.0 +752 76 model.embedding_dim 0.0 +752 76 model.relation_dim 2.0 +752 76 loss.margin 2.2968589556707446 +752 76 negative_sampler.num_negs_per_pos 21.0 +752 76 training.batch_size 1.0 +752 77 model.embedding_dim 2.0 +752 77 model.relation_dim 2.0 +752 77 loss.margin 2.9541318337516658 +752 77 negative_sampler.num_negs_per_pos 57.0 +752 77 training.batch_size 2.0 +752 78 model.embedding_dim 1.0 +752 78 model.relation_dim 0.0 +752 78 loss.margin 9.467439182833456 +752 78 negative_sampler.num_negs_per_pos 35.0 +752 78 training.batch_size 2.0 +752 79 model.embedding_dim 1.0 +752 79 model.relation_dim 1.0 +752 79 loss.margin 2.6620793726640404 +752 79 negative_sampler.num_negs_per_pos 60.0 +752 79 training.batch_size 1.0 +752 80 model.embedding_dim 1.0 +752 80 model.relation_dim 2.0 +752 80 loss.margin 5.747136571120773 +752 80 negative_sampler.num_negs_per_pos 32.0 +752 80 training.batch_size 2.0 +752 81 model.embedding_dim 1.0 +752 81 model.relation_dim 0.0 +752 81 loss.margin 4.049272523621465 +752 81 negative_sampler.num_negs_per_pos 86.0 +752 81 training.batch_size 1.0 +752 82 model.embedding_dim 1.0 +752 82 model.relation_dim 2.0 +752 82 loss.margin 3.1575257928080367 +752 82 negative_sampler.num_negs_per_pos 48.0 +752 82 training.batch_size 0.0 +752 83 model.embedding_dim 1.0 +752 83 model.relation_dim 1.0 +752 83 loss.margin 5.729923633779876 +752 83 negative_sampler.num_negs_per_pos 96.0 +752 83 training.batch_size 0.0 +752 84 model.embedding_dim 2.0 +752 84 model.relation_dim 2.0 +752 84 loss.margin 9.394521437666706 +752 84 negative_sampler.num_negs_per_pos 33.0 +752 84 training.batch_size 1.0 +752 85 model.embedding_dim 1.0 +752 85 model.relation_dim 0.0 +752 85 loss.margin 2.0153102392214386 +752 85 negative_sampler.num_negs_per_pos 71.0 +752 85 training.batch_size 1.0 +752 86 model.embedding_dim 0.0 +752 86 model.relation_dim 0.0 +752 86 loss.margin 8.245461694235425 +752 86 negative_sampler.num_negs_per_pos 95.0 +752 86 training.batch_size 0.0 +752 87 model.embedding_dim 2.0 +752 87 model.relation_dim 1.0 +752 87 loss.margin 3.537885746046028 +752 87 negative_sampler.num_negs_per_pos 56.0 +752 87 training.batch_size 0.0 +752 88 model.embedding_dim 1.0 +752 88 model.relation_dim 1.0 +752 88 loss.margin 9.613859804003962 +752 88 negative_sampler.num_negs_per_pos 87.0 +752 88 training.batch_size 2.0 +752 89 model.embedding_dim 2.0 +752 89 model.relation_dim 0.0 +752 89 loss.margin 1.232879508554663 +752 89 negative_sampler.num_negs_per_pos 57.0 +752 89 training.batch_size 1.0 +752 90 model.embedding_dim 1.0 +752 90 model.relation_dim 0.0 +752 90 loss.margin 6.220375706678711 +752 90 negative_sampler.num_negs_per_pos 64.0 +752 90 training.batch_size 2.0 +752 91 model.embedding_dim 2.0 +752 91 model.relation_dim 2.0 +752 91 loss.margin 4.2519607985434025 +752 91 negative_sampler.num_negs_per_pos 23.0 +752 91 training.batch_size 1.0 +752 92 model.embedding_dim 1.0 +752 92 model.relation_dim 0.0 +752 92 loss.margin 2.4067236530196627 +752 92 negative_sampler.num_negs_per_pos 16.0 +752 92 training.batch_size 2.0 +752 93 model.embedding_dim 0.0 +752 93 model.relation_dim 0.0 +752 93 loss.margin 8.204579200875397 +752 93 negative_sampler.num_negs_per_pos 77.0 +752 93 training.batch_size 0.0 +752 94 model.embedding_dim 0.0 +752 94 model.relation_dim 2.0 +752 94 loss.margin 2.508717405992251 +752 94 negative_sampler.num_negs_per_pos 95.0 +752 94 training.batch_size 0.0 +752 95 model.embedding_dim 2.0 +752 95 model.relation_dim 0.0 +752 95 loss.margin 9.213366286349055 +752 95 negative_sampler.num_negs_per_pos 81.0 +752 95 training.batch_size 0.0 +752 96 model.embedding_dim 0.0 +752 96 model.relation_dim 2.0 +752 96 loss.margin 5.796175267280781 +752 96 negative_sampler.num_negs_per_pos 82.0 +752 96 training.batch_size 1.0 +752 97 model.embedding_dim 2.0 +752 97 model.relation_dim 0.0 +752 97 loss.margin 8.2092939174426 +752 97 negative_sampler.num_negs_per_pos 84.0 +752 97 training.batch_size 0.0 +752 98 model.embedding_dim 1.0 +752 98 model.relation_dim 1.0 +752 98 loss.margin 3.822373438863134 +752 98 negative_sampler.num_negs_per_pos 77.0 +752 98 training.batch_size 1.0 +752 99 model.embedding_dim 1.0 +752 99 model.relation_dim 0.0 +752 99 loss.margin 8.160325587866367 +752 99 negative_sampler.num_negs_per_pos 77.0 +752 99 training.batch_size 0.0 +752 100 model.embedding_dim 0.0 +752 100 model.relation_dim 1.0 +752 100 loss.margin 7.202573316461484 +752 100 negative_sampler.num_negs_per_pos 49.0 +752 100 training.batch_size 0.0 +752 1 dataset """kinships""" +752 1 model """transd""" +752 1 loss """marginranking""" +752 1 regularizer """no""" +752 1 optimizer """adadelta""" +752 1 training_loop """owa""" +752 1 negative_sampler """basic""" +752 1 evaluator """rankbased""" +752 2 dataset """kinships""" +752 2 model """transd""" +752 2 loss """marginranking""" +752 2 regularizer """no""" +752 2 optimizer """adadelta""" +752 2 training_loop """owa""" +752 2 negative_sampler """basic""" +752 2 evaluator """rankbased""" +752 3 dataset """kinships""" +752 3 model """transd""" +752 3 loss """marginranking""" +752 3 regularizer """no""" +752 3 optimizer """adadelta""" +752 3 training_loop """owa""" +752 3 negative_sampler """basic""" +752 3 evaluator """rankbased""" +752 4 dataset """kinships""" +752 4 model """transd""" +752 4 loss """marginranking""" +752 4 regularizer """no""" +752 4 optimizer """adadelta""" +752 4 training_loop """owa""" +752 4 negative_sampler """basic""" +752 4 evaluator """rankbased""" +752 5 dataset """kinships""" +752 5 model """transd""" +752 5 loss """marginranking""" +752 5 regularizer """no""" +752 5 optimizer """adadelta""" +752 5 training_loop """owa""" +752 5 negative_sampler """basic""" +752 5 evaluator """rankbased""" +752 6 dataset """kinships""" +752 6 model """transd""" +752 6 loss """marginranking""" +752 6 regularizer """no""" +752 6 optimizer """adadelta""" +752 6 training_loop """owa""" +752 6 negative_sampler """basic""" +752 6 evaluator """rankbased""" +752 7 dataset """kinships""" +752 7 model """transd""" +752 7 loss """marginranking""" +752 7 regularizer """no""" +752 7 optimizer """adadelta""" +752 7 training_loop """owa""" +752 7 negative_sampler """basic""" +752 7 evaluator """rankbased""" +752 8 dataset """kinships""" +752 8 model """transd""" +752 8 loss """marginranking""" +752 8 regularizer """no""" +752 8 optimizer """adadelta""" +752 8 training_loop """owa""" +752 8 negative_sampler """basic""" +752 8 evaluator """rankbased""" +752 9 dataset """kinships""" +752 9 model """transd""" +752 9 loss """marginranking""" +752 9 regularizer """no""" +752 9 optimizer """adadelta""" +752 9 training_loop """owa""" +752 9 negative_sampler """basic""" +752 9 evaluator """rankbased""" +752 10 dataset """kinships""" +752 10 model """transd""" +752 10 loss """marginranking""" +752 10 regularizer """no""" +752 10 optimizer """adadelta""" +752 10 training_loop """owa""" +752 10 negative_sampler """basic""" +752 10 evaluator """rankbased""" +752 11 dataset """kinships""" +752 11 model """transd""" +752 11 loss """marginranking""" +752 11 regularizer """no""" +752 11 optimizer """adadelta""" +752 11 training_loop """owa""" +752 11 negative_sampler """basic""" +752 11 evaluator """rankbased""" +752 12 dataset """kinships""" +752 12 model """transd""" +752 12 loss """marginranking""" +752 12 regularizer """no""" +752 12 optimizer """adadelta""" +752 12 training_loop """owa""" +752 12 negative_sampler """basic""" +752 12 evaluator """rankbased""" +752 13 dataset """kinships""" +752 13 model """transd""" +752 13 loss """marginranking""" +752 13 regularizer """no""" +752 13 optimizer """adadelta""" +752 13 training_loop """owa""" +752 13 negative_sampler """basic""" +752 13 evaluator """rankbased""" +752 14 dataset """kinships""" +752 14 model """transd""" +752 14 loss """marginranking""" +752 14 regularizer """no""" +752 14 optimizer """adadelta""" +752 14 training_loop """owa""" +752 14 negative_sampler """basic""" +752 14 evaluator """rankbased""" +752 15 dataset """kinships""" +752 15 model """transd""" +752 15 loss """marginranking""" +752 15 regularizer """no""" +752 15 optimizer """adadelta""" +752 15 training_loop """owa""" +752 15 negative_sampler """basic""" +752 15 evaluator """rankbased""" +752 16 dataset """kinships""" +752 16 model """transd""" +752 16 loss """marginranking""" +752 16 regularizer """no""" +752 16 optimizer """adadelta""" +752 16 training_loop """owa""" +752 16 negative_sampler """basic""" +752 16 evaluator """rankbased""" +752 17 dataset """kinships""" +752 17 model """transd""" +752 17 loss """marginranking""" +752 17 regularizer """no""" +752 17 optimizer """adadelta""" +752 17 training_loop """owa""" +752 17 negative_sampler """basic""" +752 17 evaluator """rankbased""" +752 18 dataset """kinships""" +752 18 model """transd""" +752 18 loss """marginranking""" +752 18 regularizer """no""" +752 18 optimizer """adadelta""" +752 18 training_loop """owa""" +752 18 negative_sampler """basic""" +752 18 evaluator """rankbased""" +752 19 dataset """kinships""" +752 19 model """transd""" +752 19 loss """marginranking""" +752 19 regularizer """no""" +752 19 optimizer """adadelta""" +752 19 training_loop """owa""" +752 19 negative_sampler """basic""" +752 19 evaluator """rankbased""" +752 20 dataset """kinships""" +752 20 model """transd""" +752 20 loss """marginranking""" +752 20 regularizer """no""" +752 20 optimizer """adadelta""" +752 20 training_loop """owa""" +752 20 negative_sampler """basic""" +752 20 evaluator """rankbased""" +752 21 dataset """kinships""" +752 21 model """transd""" +752 21 loss """marginranking""" +752 21 regularizer """no""" +752 21 optimizer """adadelta""" +752 21 training_loop """owa""" +752 21 negative_sampler """basic""" +752 21 evaluator """rankbased""" +752 22 dataset """kinships""" +752 22 model """transd""" +752 22 loss """marginranking""" +752 22 regularizer """no""" +752 22 optimizer """adadelta""" +752 22 training_loop """owa""" +752 22 negative_sampler """basic""" +752 22 evaluator """rankbased""" +752 23 dataset """kinships""" +752 23 model """transd""" +752 23 loss """marginranking""" +752 23 regularizer """no""" +752 23 optimizer """adadelta""" +752 23 training_loop """owa""" +752 23 negative_sampler """basic""" +752 23 evaluator """rankbased""" +752 24 dataset """kinships""" +752 24 model """transd""" +752 24 loss """marginranking""" +752 24 regularizer """no""" +752 24 optimizer """adadelta""" +752 24 training_loop """owa""" +752 24 negative_sampler """basic""" +752 24 evaluator """rankbased""" +752 25 dataset """kinships""" +752 25 model """transd""" +752 25 loss """marginranking""" +752 25 regularizer """no""" +752 25 optimizer """adadelta""" +752 25 training_loop """owa""" +752 25 negative_sampler """basic""" +752 25 evaluator """rankbased""" +752 26 dataset """kinships""" +752 26 model """transd""" +752 26 loss """marginranking""" +752 26 regularizer """no""" +752 26 optimizer """adadelta""" +752 26 training_loop """owa""" +752 26 negative_sampler """basic""" +752 26 evaluator """rankbased""" +752 27 dataset """kinships""" +752 27 model """transd""" +752 27 loss """marginranking""" +752 27 regularizer """no""" +752 27 optimizer """adadelta""" +752 27 training_loop """owa""" +752 27 negative_sampler """basic""" +752 27 evaluator """rankbased""" +752 28 dataset """kinships""" +752 28 model """transd""" +752 28 loss """marginranking""" +752 28 regularizer """no""" +752 28 optimizer """adadelta""" +752 28 training_loop """owa""" +752 28 negative_sampler """basic""" +752 28 evaluator """rankbased""" +752 29 dataset """kinships""" +752 29 model """transd""" +752 29 loss """marginranking""" +752 29 regularizer """no""" +752 29 optimizer """adadelta""" +752 29 training_loop """owa""" +752 29 negative_sampler """basic""" +752 29 evaluator """rankbased""" +752 30 dataset """kinships""" +752 30 model """transd""" +752 30 loss """marginranking""" +752 30 regularizer """no""" +752 30 optimizer """adadelta""" +752 30 training_loop """owa""" +752 30 negative_sampler """basic""" +752 30 evaluator """rankbased""" +752 31 dataset """kinships""" +752 31 model """transd""" +752 31 loss """marginranking""" +752 31 regularizer """no""" +752 31 optimizer """adadelta""" +752 31 training_loop """owa""" +752 31 negative_sampler """basic""" +752 31 evaluator """rankbased""" +752 32 dataset """kinships""" +752 32 model """transd""" +752 32 loss """marginranking""" +752 32 regularizer """no""" +752 32 optimizer """adadelta""" +752 32 training_loop """owa""" +752 32 negative_sampler """basic""" +752 32 evaluator """rankbased""" +752 33 dataset """kinships""" +752 33 model """transd""" +752 33 loss """marginranking""" +752 33 regularizer """no""" +752 33 optimizer """adadelta""" +752 33 training_loop """owa""" +752 33 negative_sampler """basic""" +752 33 evaluator """rankbased""" +752 34 dataset """kinships""" +752 34 model """transd""" +752 34 loss """marginranking""" +752 34 regularizer """no""" +752 34 optimizer """adadelta""" +752 34 training_loop """owa""" +752 34 negative_sampler """basic""" +752 34 evaluator """rankbased""" +752 35 dataset """kinships""" +752 35 model """transd""" +752 35 loss """marginranking""" +752 35 regularizer """no""" +752 35 optimizer """adadelta""" +752 35 training_loop """owa""" +752 35 negative_sampler """basic""" +752 35 evaluator """rankbased""" +752 36 dataset """kinships""" +752 36 model """transd""" +752 36 loss """marginranking""" +752 36 regularizer """no""" +752 36 optimizer """adadelta""" +752 36 training_loop """owa""" +752 36 negative_sampler """basic""" +752 36 evaluator """rankbased""" +752 37 dataset """kinships""" +752 37 model """transd""" +752 37 loss """marginranking""" +752 37 regularizer """no""" +752 37 optimizer """adadelta""" +752 37 training_loop """owa""" +752 37 negative_sampler """basic""" +752 37 evaluator """rankbased""" +752 38 dataset """kinships""" +752 38 model """transd""" +752 38 loss """marginranking""" +752 38 regularizer """no""" +752 38 optimizer """adadelta""" +752 38 training_loop """owa""" +752 38 negative_sampler """basic""" +752 38 evaluator """rankbased""" +752 39 dataset """kinships""" +752 39 model """transd""" +752 39 loss """marginranking""" +752 39 regularizer """no""" +752 39 optimizer """adadelta""" +752 39 training_loop """owa""" +752 39 negative_sampler """basic""" +752 39 evaluator """rankbased""" +752 40 dataset """kinships""" +752 40 model """transd""" +752 40 loss """marginranking""" +752 40 regularizer """no""" +752 40 optimizer """adadelta""" +752 40 training_loop """owa""" +752 40 negative_sampler """basic""" +752 40 evaluator """rankbased""" +752 41 dataset """kinships""" +752 41 model """transd""" +752 41 loss """marginranking""" +752 41 regularizer """no""" +752 41 optimizer """adadelta""" +752 41 training_loop """owa""" +752 41 negative_sampler """basic""" +752 41 evaluator """rankbased""" +752 42 dataset """kinships""" +752 42 model """transd""" +752 42 loss """marginranking""" +752 42 regularizer """no""" +752 42 optimizer """adadelta""" +752 42 training_loop """owa""" +752 42 negative_sampler """basic""" +752 42 evaluator """rankbased""" +752 43 dataset """kinships""" +752 43 model """transd""" +752 43 loss """marginranking""" +752 43 regularizer """no""" +752 43 optimizer """adadelta""" +752 43 training_loop """owa""" +752 43 negative_sampler """basic""" +752 43 evaluator """rankbased""" +752 44 dataset """kinships""" +752 44 model """transd""" +752 44 loss """marginranking""" +752 44 regularizer """no""" +752 44 optimizer """adadelta""" +752 44 training_loop """owa""" +752 44 negative_sampler """basic""" +752 44 evaluator """rankbased""" +752 45 dataset """kinships""" +752 45 model """transd""" +752 45 loss """marginranking""" +752 45 regularizer """no""" +752 45 optimizer """adadelta""" +752 45 training_loop """owa""" +752 45 negative_sampler """basic""" +752 45 evaluator """rankbased""" +752 46 dataset """kinships""" +752 46 model """transd""" +752 46 loss """marginranking""" +752 46 regularizer """no""" +752 46 optimizer """adadelta""" +752 46 training_loop """owa""" +752 46 negative_sampler """basic""" +752 46 evaluator """rankbased""" +752 47 dataset """kinships""" +752 47 model """transd""" +752 47 loss """marginranking""" +752 47 regularizer """no""" +752 47 optimizer """adadelta""" +752 47 training_loop """owa""" +752 47 negative_sampler """basic""" +752 47 evaluator """rankbased""" +752 48 dataset """kinships""" +752 48 model """transd""" +752 48 loss """marginranking""" +752 48 regularizer """no""" +752 48 optimizer """adadelta""" +752 48 training_loop """owa""" +752 48 negative_sampler """basic""" +752 48 evaluator """rankbased""" +752 49 dataset """kinships""" +752 49 model """transd""" +752 49 loss """marginranking""" +752 49 regularizer """no""" +752 49 optimizer """adadelta""" +752 49 training_loop """owa""" +752 49 negative_sampler """basic""" +752 49 evaluator """rankbased""" +752 50 dataset """kinships""" +752 50 model """transd""" +752 50 loss """marginranking""" +752 50 regularizer """no""" +752 50 optimizer """adadelta""" +752 50 training_loop """owa""" +752 50 negative_sampler """basic""" +752 50 evaluator """rankbased""" +752 51 dataset """kinships""" +752 51 model """transd""" +752 51 loss """marginranking""" +752 51 regularizer """no""" +752 51 optimizer """adadelta""" +752 51 training_loop """owa""" +752 51 negative_sampler """basic""" +752 51 evaluator """rankbased""" +752 52 dataset """kinships""" +752 52 model """transd""" +752 52 loss """marginranking""" +752 52 regularizer """no""" +752 52 optimizer """adadelta""" +752 52 training_loop """owa""" +752 52 negative_sampler """basic""" +752 52 evaluator """rankbased""" +752 53 dataset """kinships""" +752 53 model """transd""" +752 53 loss """marginranking""" +752 53 regularizer """no""" +752 53 optimizer """adadelta""" +752 53 training_loop """owa""" +752 53 negative_sampler """basic""" +752 53 evaluator """rankbased""" +752 54 dataset """kinships""" +752 54 model """transd""" +752 54 loss """marginranking""" +752 54 regularizer """no""" +752 54 optimizer """adadelta""" +752 54 training_loop """owa""" +752 54 negative_sampler """basic""" +752 54 evaluator """rankbased""" +752 55 dataset """kinships""" +752 55 model """transd""" +752 55 loss """marginranking""" +752 55 regularizer """no""" +752 55 optimizer """adadelta""" +752 55 training_loop """owa""" +752 55 negative_sampler """basic""" +752 55 evaluator """rankbased""" +752 56 dataset """kinships""" +752 56 model """transd""" +752 56 loss """marginranking""" +752 56 regularizer """no""" +752 56 optimizer """adadelta""" +752 56 training_loop """owa""" +752 56 negative_sampler """basic""" +752 56 evaluator """rankbased""" +752 57 dataset """kinships""" +752 57 model """transd""" +752 57 loss """marginranking""" +752 57 regularizer """no""" +752 57 optimizer """adadelta""" +752 57 training_loop """owa""" +752 57 negative_sampler """basic""" +752 57 evaluator """rankbased""" +752 58 dataset """kinships""" +752 58 model """transd""" +752 58 loss """marginranking""" +752 58 regularizer """no""" +752 58 optimizer """adadelta""" +752 58 training_loop """owa""" +752 58 negative_sampler """basic""" +752 58 evaluator """rankbased""" +752 59 dataset """kinships""" +752 59 model """transd""" +752 59 loss """marginranking""" +752 59 regularizer """no""" +752 59 optimizer """adadelta""" +752 59 training_loop """owa""" +752 59 negative_sampler """basic""" +752 59 evaluator """rankbased""" +752 60 dataset """kinships""" +752 60 model """transd""" +752 60 loss """marginranking""" +752 60 regularizer """no""" +752 60 optimizer """adadelta""" +752 60 training_loop """owa""" +752 60 negative_sampler """basic""" +752 60 evaluator """rankbased""" +752 61 dataset """kinships""" +752 61 model """transd""" +752 61 loss """marginranking""" +752 61 regularizer """no""" +752 61 optimizer """adadelta""" +752 61 training_loop """owa""" +752 61 negative_sampler """basic""" +752 61 evaluator """rankbased""" +752 62 dataset """kinships""" +752 62 model """transd""" +752 62 loss """marginranking""" +752 62 regularizer """no""" +752 62 optimizer """adadelta""" +752 62 training_loop """owa""" +752 62 negative_sampler """basic""" +752 62 evaluator """rankbased""" +752 63 dataset """kinships""" +752 63 model """transd""" +752 63 loss """marginranking""" +752 63 regularizer """no""" +752 63 optimizer """adadelta""" +752 63 training_loop """owa""" +752 63 negative_sampler """basic""" +752 63 evaluator """rankbased""" +752 64 dataset """kinships""" +752 64 model """transd""" +752 64 loss """marginranking""" +752 64 regularizer """no""" +752 64 optimizer """adadelta""" +752 64 training_loop """owa""" +752 64 negative_sampler """basic""" +752 64 evaluator """rankbased""" +752 65 dataset """kinships""" +752 65 model """transd""" +752 65 loss """marginranking""" +752 65 regularizer """no""" +752 65 optimizer """adadelta""" +752 65 training_loop """owa""" +752 65 negative_sampler """basic""" +752 65 evaluator """rankbased""" +752 66 dataset """kinships""" +752 66 model """transd""" +752 66 loss """marginranking""" +752 66 regularizer """no""" +752 66 optimizer """adadelta""" +752 66 training_loop """owa""" +752 66 negative_sampler """basic""" +752 66 evaluator """rankbased""" +752 67 dataset """kinships""" +752 67 model """transd""" +752 67 loss """marginranking""" +752 67 regularizer """no""" +752 67 optimizer """adadelta""" +752 67 training_loop """owa""" +752 67 negative_sampler """basic""" +752 67 evaluator """rankbased""" +752 68 dataset """kinships""" +752 68 model """transd""" +752 68 loss """marginranking""" +752 68 regularizer """no""" +752 68 optimizer """adadelta""" +752 68 training_loop """owa""" +752 68 negative_sampler """basic""" +752 68 evaluator """rankbased""" +752 69 dataset """kinships""" +752 69 model """transd""" +752 69 loss """marginranking""" +752 69 regularizer """no""" +752 69 optimizer """adadelta""" +752 69 training_loop """owa""" +752 69 negative_sampler """basic""" +752 69 evaluator """rankbased""" +752 70 dataset """kinships""" +752 70 model """transd""" +752 70 loss """marginranking""" +752 70 regularizer """no""" +752 70 optimizer """adadelta""" +752 70 training_loop """owa""" +752 70 negative_sampler """basic""" +752 70 evaluator """rankbased""" +752 71 dataset """kinships""" +752 71 model """transd""" +752 71 loss """marginranking""" +752 71 regularizer """no""" +752 71 optimizer """adadelta""" +752 71 training_loop """owa""" +752 71 negative_sampler """basic""" +752 71 evaluator """rankbased""" +752 72 dataset """kinships""" +752 72 model """transd""" +752 72 loss """marginranking""" +752 72 regularizer """no""" +752 72 optimizer """adadelta""" +752 72 training_loop """owa""" +752 72 negative_sampler """basic""" +752 72 evaluator """rankbased""" +752 73 dataset """kinships""" +752 73 model """transd""" +752 73 loss """marginranking""" +752 73 regularizer """no""" +752 73 optimizer """adadelta""" +752 73 training_loop """owa""" +752 73 negative_sampler """basic""" +752 73 evaluator """rankbased""" +752 74 dataset """kinships""" +752 74 model """transd""" +752 74 loss """marginranking""" +752 74 regularizer """no""" +752 74 optimizer """adadelta""" +752 74 training_loop """owa""" +752 74 negative_sampler """basic""" +752 74 evaluator """rankbased""" +752 75 dataset """kinships""" +752 75 model """transd""" +752 75 loss """marginranking""" +752 75 regularizer """no""" +752 75 optimizer """adadelta""" +752 75 training_loop """owa""" +752 75 negative_sampler """basic""" +752 75 evaluator """rankbased""" +752 76 dataset """kinships""" +752 76 model """transd""" +752 76 loss """marginranking""" +752 76 regularizer """no""" +752 76 optimizer """adadelta""" +752 76 training_loop """owa""" +752 76 negative_sampler """basic""" +752 76 evaluator """rankbased""" +752 77 dataset """kinships""" +752 77 model """transd""" +752 77 loss """marginranking""" +752 77 regularizer """no""" +752 77 optimizer """adadelta""" +752 77 training_loop """owa""" +752 77 negative_sampler """basic""" +752 77 evaluator """rankbased""" +752 78 dataset """kinships""" +752 78 model """transd""" +752 78 loss """marginranking""" +752 78 regularizer """no""" +752 78 optimizer """adadelta""" +752 78 training_loop """owa""" +752 78 negative_sampler """basic""" +752 78 evaluator """rankbased""" +752 79 dataset """kinships""" +752 79 model """transd""" +752 79 loss """marginranking""" +752 79 regularizer """no""" +752 79 optimizer """adadelta""" +752 79 training_loop """owa""" +752 79 negative_sampler """basic""" +752 79 evaluator """rankbased""" +752 80 dataset """kinships""" +752 80 model """transd""" +752 80 loss """marginranking""" +752 80 regularizer """no""" +752 80 optimizer """adadelta""" +752 80 training_loop """owa""" +752 80 negative_sampler """basic""" +752 80 evaluator """rankbased""" +752 81 dataset """kinships""" +752 81 model """transd""" +752 81 loss """marginranking""" +752 81 regularizer """no""" +752 81 optimizer """adadelta""" +752 81 training_loop """owa""" +752 81 negative_sampler """basic""" +752 81 evaluator """rankbased""" +752 82 dataset """kinships""" +752 82 model """transd""" +752 82 loss """marginranking""" +752 82 regularizer """no""" +752 82 optimizer """adadelta""" +752 82 training_loop """owa""" +752 82 negative_sampler """basic""" +752 82 evaluator """rankbased""" +752 83 dataset """kinships""" +752 83 model """transd""" +752 83 loss """marginranking""" +752 83 regularizer """no""" +752 83 optimizer """adadelta""" +752 83 training_loop """owa""" +752 83 negative_sampler """basic""" +752 83 evaluator """rankbased""" +752 84 dataset """kinships""" +752 84 model """transd""" +752 84 loss """marginranking""" +752 84 regularizer """no""" +752 84 optimizer """adadelta""" +752 84 training_loop """owa""" +752 84 negative_sampler """basic""" +752 84 evaluator """rankbased""" +752 85 dataset """kinships""" +752 85 model """transd""" +752 85 loss """marginranking""" +752 85 regularizer """no""" +752 85 optimizer """adadelta""" +752 85 training_loop """owa""" +752 85 negative_sampler """basic""" +752 85 evaluator """rankbased""" +752 86 dataset """kinships""" +752 86 model """transd""" +752 86 loss """marginranking""" +752 86 regularizer """no""" +752 86 optimizer """adadelta""" +752 86 training_loop """owa""" +752 86 negative_sampler """basic""" +752 86 evaluator """rankbased""" +752 87 dataset """kinships""" +752 87 model """transd""" +752 87 loss """marginranking""" +752 87 regularizer """no""" +752 87 optimizer """adadelta""" +752 87 training_loop """owa""" +752 87 negative_sampler """basic""" +752 87 evaluator """rankbased""" +752 88 dataset """kinships""" +752 88 model """transd""" +752 88 loss """marginranking""" +752 88 regularizer """no""" +752 88 optimizer """adadelta""" +752 88 training_loop """owa""" +752 88 negative_sampler """basic""" +752 88 evaluator """rankbased""" +752 89 dataset """kinships""" +752 89 model """transd""" +752 89 loss """marginranking""" +752 89 regularizer """no""" +752 89 optimizer """adadelta""" +752 89 training_loop """owa""" +752 89 negative_sampler """basic""" +752 89 evaluator """rankbased""" +752 90 dataset """kinships""" +752 90 model """transd""" +752 90 loss """marginranking""" +752 90 regularizer """no""" +752 90 optimizer """adadelta""" +752 90 training_loop """owa""" +752 90 negative_sampler """basic""" +752 90 evaluator """rankbased""" +752 91 dataset """kinships""" +752 91 model """transd""" +752 91 loss """marginranking""" +752 91 regularizer """no""" +752 91 optimizer """adadelta""" +752 91 training_loop """owa""" +752 91 negative_sampler """basic""" +752 91 evaluator """rankbased""" +752 92 dataset """kinships""" +752 92 model """transd""" +752 92 loss """marginranking""" +752 92 regularizer """no""" +752 92 optimizer """adadelta""" +752 92 training_loop """owa""" +752 92 negative_sampler """basic""" +752 92 evaluator """rankbased""" +752 93 dataset """kinships""" +752 93 model """transd""" +752 93 loss """marginranking""" +752 93 regularizer """no""" +752 93 optimizer """adadelta""" +752 93 training_loop """owa""" +752 93 negative_sampler """basic""" +752 93 evaluator """rankbased""" +752 94 dataset """kinships""" +752 94 model """transd""" +752 94 loss """marginranking""" +752 94 regularizer """no""" +752 94 optimizer """adadelta""" +752 94 training_loop """owa""" +752 94 negative_sampler """basic""" +752 94 evaluator """rankbased""" +752 95 dataset """kinships""" +752 95 model """transd""" +752 95 loss """marginranking""" +752 95 regularizer """no""" +752 95 optimizer """adadelta""" +752 95 training_loop """owa""" +752 95 negative_sampler """basic""" +752 95 evaluator """rankbased""" +752 96 dataset """kinships""" +752 96 model """transd""" +752 96 loss """marginranking""" +752 96 regularizer """no""" +752 96 optimizer """adadelta""" +752 96 training_loop """owa""" +752 96 negative_sampler """basic""" +752 96 evaluator """rankbased""" +752 97 dataset """kinships""" +752 97 model """transd""" +752 97 loss """marginranking""" +752 97 regularizer """no""" +752 97 optimizer """adadelta""" +752 97 training_loop """owa""" +752 97 negative_sampler """basic""" +752 97 evaluator """rankbased""" +752 98 dataset """kinships""" +752 98 model """transd""" +752 98 loss """marginranking""" +752 98 regularizer """no""" +752 98 optimizer """adadelta""" +752 98 training_loop """owa""" +752 98 negative_sampler """basic""" +752 98 evaluator """rankbased""" +752 99 dataset """kinships""" +752 99 model """transd""" +752 99 loss """marginranking""" +752 99 regularizer """no""" +752 99 optimizer """adadelta""" +752 99 training_loop """owa""" +752 99 negative_sampler """basic""" +752 99 evaluator """rankbased""" +752 100 dataset """kinships""" +752 100 model """transd""" +752 100 loss """marginranking""" +752 100 regularizer """no""" +752 100 optimizer """adadelta""" +752 100 training_loop """owa""" +752 100 negative_sampler """basic""" +752 100 evaluator """rankbased""" +753 1 model.embedding_dim 1.0 +753 1 model.relation_dim 0.0 +753 1 loss.margin 27.03864300768448 +753 1 loss.adversarial_temperature 0.5567542672526358 +753 1 negative_sampler.num_negs_per_pos 28.0 +753 1 training.batch_size 0.0 +753 2 model.embedding_dim 2.0 +753 2 model.relation_dim 1.0 +753 2 loss.margin 21.241207494792093 +753 2 loss.adversarial_temperature 0.2622843890162532 +753 2 negative_sampler.num_negs_per_pos 31.0 +753 2 training.batch_size 1.0 +753 3 model.embedding_dim 2.0 +753 3 model.relation_dim 2.0 +753 3 loss.margin 21.388226072181507 +753 3 loss.adversarial_temperature 0.353751481949241 +753 3 negative_sampler.num_negs_per_pos 91.0 +753 3 training.batch_size 2.0 +753 4 model.embedding_dim 1.0 +753 4 model.relation_dim 1.0 +753 4 loss.margin 3.276783170377065 +753 4 loss.adversarial_temperature 0.648129112755177 +753 4 negative_sampler.num_negs_per_pos 9.0 +753 4 training.batch_size 2.0 +753 5 model.embedding_dim 1.0 +753 5 model.relation_dim 2.0 +753 5 loss.margin 8.569699183562033 +753 5 loss.adversarial_temperature 0.17141273796749934 +753 5 negative_sampler.num_negs_per_pos 34.0 +753 5 training.batch_size 1.0 +753 6 model.embedding_dim 1.0 +753 6 model.relation_dim 1.0 +753 6 loss.margin 22.56983797624322 +753 6 loss.adversarial_temperature 0.5710011946819098 +753 6 negative_sampler.num_negs_per_pos 71.0 +753 6 training.batch_size 2.0 +753 7 model.embedding_dim 0.0 +753 7 model.relation_dim 1.0 +753 7 loss.margin 12.038235267703591 +753 7 loss.adversarial_temperature 0.39403350443204865 +753 7 negative_sampler.num_negs_per_pos 46.0 +753 7 training.batch_size 2.0 +753 8 model.embedding_dim 0.0 +753 8 model.relation_dim 0.0 +753 8 loss.margin 11.631433766348511 +753 8 loss.adversarial_temperature 0.42719241355320337 +753 8 negative_sampler.num_negs_per_pos 33.0 +753 8 training.batch_size 2.0 +753 9 model.embedding_dim 1.0 +753 9 model.relation_dim 1.0 +753 9 loss.margin 25.36975632201236 +753 9 loss.adversarial_temperature 0.9033823536933238 +753 9 negative_sampler.num_negs_per_pos 12.0 +753 9 training.batch_size 0.0 +753 10 model.embedding_dim 1.0 +753 10 model.relation_dim 2.0 +753 10 loss.margin 8.798557807596207 +753 10 loss.adversarial_temperature 0.39226296780169356 +753 10 negative_sampler.num_negs_per_pos 72.0 +753 10 training.batch_size 2.0 +753 11 model.embedding_dim 2.0 +753 11 model.relation_dim 0.0 +753 11 loss.margin 6.1363574867956805 +753 11 loss.adversarial_temperature 0.7639579168700988 +753 11 negative_sampler.num_negs_per_pos 69.0 +753 11 training.batch_size 2.0 +753 12 model.embedding_dim 1.0 +753 12 model.relation_dim 2.0 +753 12 loss.margin 7.743883065519134 +753 12 loss.adversarial_temperature 0.7178562862397311 +753 12 negative_sampler.num_negs_per_pos 22.0 +753 12 training.batch_size 2.0 +753 13 model.embedding_dim 0.0 +753 13 model.relation_dim 0.0 +753 13 loss.margin 3.192943108048261 +753 13 loss.adversarial_temperature 0.886791404364131 +753 13 negative_sampler.num_negs_per_pos 51.0 +753 13 training.batch_size 2.0 +753 14 model.embedding_dim 0.0 +753 14 model.relation_dim 0.0 +753 14 loss.margin 2.8886882304108807 +753 14 loss.adversarial_temperature 0.7954972391086637 +753 14 negative_sampler.num_negs_per_pos 54.0 +753 14 training.batch_size 0.0 +753 15 model.embedding_dim 2.0 +753 15 model.relation_dim 0.0 +753 15 loss.margin 11.182583334302015 +753 15 loss.adversarial_temperature 0.5459538340568802 +753 15 negative_sampler.num_negs_per_pos 10.0 +753 15 training.batch_size 0.0 +753 16 model.embedding_dim 2.0 +753 16 model.relation_dim 2.0 +753 16 loss.margin 2.478538367505073 +753 16 loss.adversarial_temperature 0.6499762208099034 +753 16 negative_sampler.num_negs_per_pos 12.0 +753 16 training.batch_size 0.0 +753 17 model.embedding_dim 0.0 +753 17 model.relation_dim 2.0 +753 17 loss.margin 5.969657135593659 +753 17 loss.adversarial_temperature 0.43088122244107485 +753 17 negative_sampler.num_negs_per_pos 34.0 +753 17 training.batch_size 2.0 +753 18 model.embedding_dim 0.0 +753 18 model.relation_dim 1.0 +753 18 loss.margin 4.765841286843324 +753 18 loss.adversarial_temperature 0.7690639990777014 +753 18 negative_sampler.num_negs_per_pos 50.0 +753 18 training.batch_size 1.0 +753 19 model.embedding_dim 2.0 +753 19 model.relation_dim 2.0 +753 19 loss.margin 4.4539284473362315 +753 19 loss.adversarial_temperature 0.22152937056439467 +753 19 negative_sampler.num_negs_per_pos 49.0 +753 19 training.batch_size 2.0 +753 20 model.embedding_dim 1.0 +753 20 model.relation_dim 1.0 +753 20 loss.margin 24.99645542470553 +753 20 loss.adversarial_temperature 0.5736183428495535 +753 20 negative_sampler.num_negs_per_pos 19.0 +753 20 training.batch_size 1.0 +753 21 model.embedding_dim 1.0 +753 21 model.relation_dim 2.0 +753 21 loss.margin 23.920667888318153 +753 21 loss.adversarial_temperature 0.8098258087871709 +753 21 negative_sampler.num_negs_per_pos 84.0 +753 21 training.batch_size 0.0 +753 22 model.embedding_dim 1.0 +753 22 model.relation_dim 1.0 +753 22 loss.margin 16.643945381319096 +753 22 loss.adversarial_temperature 0.26040516602272146 +753 22 negative_sampler.num_negs_per_pos 52.0 +753 22 training.batch_size 0.0 +753 23 model.embedding_dim 1.0 +753 23 model.relation_dim 1.0 +753 23 loss.margin 6.1835275618312044 +753 23 loss.adversarial_temperature 0.4419397058437011 +753 23 negative_sampler.num_negs_per_pos 89.0 +753 23 training.batch_size 2.0 +753 24 model.embedding_dim 0.0 +753 24 model.relation_dim 0.0 +753 24 loss.margin 15.459102652533518 +753 24 loss.adversarial_temperature 0.9202503076180967 +753 24 negative_sampler.num_negs_per_pos 92.0 +753 24 training.batch_size 0.0 +753 25 model.embedding_dim 2.0 +753 25 model.relation_dim 0.0 +753 25 loss.margin 12.481621159121342 +753 25 loss.adversarial_temperature 0.928623672711716 +753 25 negative_sampler.num_negs_per_pos 45.0 +753 25 training.batch_size 2.0 +753 26 model.embedding_dim 1.0 +753 26 model.relation_dim 1.0 +753 26 loss.margin 5.942623037714505 +753 26 loss.adversarial_temperature 0.46323910209952246 +753 26 negative_sampler.num_negs_per_pos 61.0 +753 26 training.batch_size 2.0 +753 27 model.embedding_dim 2.0 +753 27 model.relation_dim 2.0 +753 27 loss.margin 11.707203879646931 +753 27 loss.adversarial_temperature 0.658082247712131 +753 27 negative_sampler.num_negs_per_pos 93.0 +753 27 training.batch_size 2.0 +753 28 model.embedding_dim 2.0 +753 28 model.relation_dim 1.0 +753 28 loss.margin 7.2621816687221585 +753 28 loss.adversarial_temperature 0.646211969646781 +753 28 negative_sampler.num_negs_per_pos 96.0 +753 28 training.batch_size 0.0 +753 29 model.embedding_dim 2.0 +753 29 model.relation_dim 0.0 +753 29 loss.margin 7.08516189267772 +753 29 loss.adversarial_temperature 0.24269062561538496 +753 29 negative_sampler.num_negs_per_pos 42.0 +753 29 training.batch_size 1.0 +753 30 model.embedding_dim 1.0 +753 30 model.relation_dim 0.0 +753 30 loss.margin 16.813505536966936 +753 30 loss.adversarial_temperature 0.42299771203077796 +753 30 negative_sampler.num_negs_per_pos 21.0 +753 30 training.batch_size 0.0 +753 31 model.embedding_dim 0.0 +753 31 model.relation_dim 0.0 +753 31 loss.margin 28.333535445101003 +753 31 loss.adversarial_temperature 0.8881515459436988 +753 31 negative_sampler.num_negs_per_pos 39.0 +753 31 training.batch_size 1.0 +753 32 model.embedding_dim 0.0 +753 32 model.relation_dim 2.0 +753 32 loss.margin 1.6599760022865964 +753 32 loss.adversarial_temperature 0.41534618882593277 +753 32 negative_sampler.num_negs_per_pos 6.0 +753 32 training.batch_size 1.0 +753 33 model.embedding_dim 2.0 +753 33 model.relation_dim 2.0 +753 33 loss.margin 12.318136253338922 +753 33 loss.adversarial_temperature 0.5370556314216133 +753 33 negative_sampler.num_negs_per_pos 98.0 +753 33 training.batch_size 2.0 +753 34 model.embedding_dim 0.0 +753 34 model.relation_dim 0.0 +753 34 loss.margin 17.860583683724965 +753 34 loss.adversarial_temperature 0.6973996215315899 +753 34 negative_sampler.num_negs_per_pos 42.0 +753 34 training.batch_size 1.0 +753 35 model.embedding_dim 1.0 +753 35 model.relation_dim 1.0 +753 35 loss.margin 4.690205782484253 +753 35 loss.adversarial_temperature 0.14268920749216255 +753 35 negative_sampler.num_negs_per_pos 7.0 +753 35 training.batch_size 1.0 +753 36 model.embedding_dim 1.0 +753 36 model.relation_dim 0.0 +753 36 loss.margin 9.828987054691268 +753 36 loss.adversarial_temperature 0.9292821549788519 +753 36 negative_sampler.num_negs_per_pos 39.0 +753 36 training.batch_size 2.0 +753 37 model.embedding_dim 1.0 +753 37 model.relation_dim 2.0 +753 37 loss.margin 11.176200811386678 +753 37 loss.adversarial_temperature 0.1047756891138329 +753 37 negative_sampler.num_negs_per_pos 28.0 +753 37 training.batch_size 1.0 +753 38 model.embedding_dim 1.0 +753 38 model.relation_dim 2.0 +753 38 loss.margin 1.2815623120070565 +753 38 loss.adversarial_temperature 0.1814341042705733 +753 38 negative_sampler.num_negs_per_pos 1.0 +753 38 training.batch_size 2.0 +753 39 model.embedding_dim 0.0 +753 39 model.relation_dim 1.0 +753 39 loss.margin 14.743905918975605 +753 39 loss.adversarial_temperature 0.24571752489587526 +753 39 negative_sampler.num_negs_per_pos 5.0 +753 39 training.batch_size 2.0 +753 40 model.embedding_dim 2.0 +753 40 model.relation_dim 0.0 +753 40 loss.margin 19.248249120777665 +753 40 loss.adversarial_temperature 0.24527735521874233 +753 40 negative_sampler.num_negs_per_pos 96.0 +753 40 training.batch_size 2.0 +753 41 model.embedding_dim 1.0 +753 41 model.relation_dim 1.0 +753 41 loss.margin 1.9208146665869086 +753 41 loss.adversarial_temperature 0.4827706703204515 +753 41 negative_sampler.num_negs_per_pos 61.0 +753 41 training.batch_size 1.0 +753 42 model.embedding_dim 2.0 +753 42 model.relation_dim 2.0 +753 42 loss.margin 6.197323169342622 +753 42 loss.adversarial_temperature 0.7402244183761547 +753 42 negative_sampler.num_negs_per_pos 59.0 +753 42 training.batch_size 1.0 +753 43 model.embedding_dim 2.0 +753 43 model.relation_dim 1.0 +753 43 loss.margin 12.968634693264866 +753 43 loss.adversarial_temperature 0.8931127474190078 +753 43 negative_sampler.num_negs_per_pos 96.0 +753 43 training.batch_size 0.0 +753 44 model.embedding_dim 0.0 +753 44 model.relation_dim 2.0 +753 44 loss.margin 9.413343105419841 +753 44 loss.adversarial_temperature 0.5737566093120678 +753 44 negative_sampler.num_negs_per_pos 30.0 +753 44 training.batch_size 1.0 +753 45 model.embedding_dim 1.0 +753 45 model.relation_dim 0.0 +753 45 loss.margin 24.33644572102241 +753 45 loss.adversarial_temperature 0.8058221875931628 +753 45 negative_sampler.num_negs_per_pos 33.0 +753 45 training.batch_size 1.0 +753 46 model.embedding_dim 2.0 +753 46 model.relation_dim 1.0 +753 46 loss.margin 25.3692050613278 +753 46 loss.adversarial_temperature 0.26728204853447524 +753 46 negative_sampler.num_negs_per_pos 18.0 +753 46 training.batch_size 1.0 +753 47 model.embedding_dim 1.0 +753 47 model.relation_dim 2.0 +753 47 loss.margin 17.932595709024085 +753 47 loss.adversarial_temperature 0.40611855014405784 +753 47 negative_sampler.num_negs_per_pos 48.0 +753 47 training.batch_size 2.0 +753 48 model.embedding_dim 1.0 +753 48 model.relation_dim 0.0 +753 48 loss.margin 19.230110299845617 +753 48 loss.adversarial_temperature 0.42769476251538097 +753 48 negative_sampler.num_negs_per_pos 99.0 +753 48 training.batch_size 2.0 +753 49 model.embedding_dim 1.0 +753 49 model.relation_dim 1.0 +753 49 loss.margin 9.302281572121524 +753 49 loss.adversarial_temperature 0.25176910341062825 +753 49 negative_sampler.num_negs_per_pos 83.0 +753 49 training.batch_size 1.0 +753 50 model.embedding_dim 0.0 +753 50 model.relation_dim 2.0 +753 50 loss.margin 7.287197314800867 +753 50 loss.adversarial_temperature 0.20836192270912388 +753 50 negative_sampler.num_negs_per_pos 37.0 +753 50 training.batch_size 2.0 +753 51 model.embedding_dim 2.0 +753 51 model.relation_dim 1.0 +753 51 loss.margin 29.966740933986053 +753 51 loss.adversarial_temperature 0.4283583175236969 +753 51 negative_sampler.num_negs_per_pos 68.0 +753 51 training.batch_size 1.0 +753 52 model.embedding_dim 0.0 +753 52 model.relation_dim 1.0 +753 52 loss.margin 7.281940916850361 +753 52 loss.adversarial_temperature 0.6357400174187142 +753 52 negative_sampler.num_negs_per_pos 19.0 +753 52 training.batch_size 1.0 +753 53 model.embedding_dim 1.0 +753 53 model.relation_dim 2.0 +753 53 loss.margin 27.829549776187626 +753 53 loss.adversarial_temperature 0.6598794187691749 +753 53 negative_sampler.num_negs_per_pos 19.0 +753 53 training.batch_size 0.0 +753 54 model.embedding_dim 1.0 +753 54 model.relation_dim 0.0 +753 54 loss.margin 15.274681399322416 +753 54 loss.adversarial_temperature 0.8912011223797021 +753 54 negative_sampler.num_negs_per_pos 57.0 +753 54 training.batch_size 0.0 +753 55 model.embedding_dim 2.0 +753 55 model.relation_dim 0.0 +753 55 loss.margin 9.87415837120325 +753 55 loss.adversarial_temperature 0.5479612454490599 +753 55 negative_sampler.num_negs_per_pos 10.0 +753 55 training.batch_size 1.0 +753 56 model.embedding_dim 1.0 +753 56 model.relation_dim 0.0 +753 56 loss.margin 9.49561234319232 +753 56 loss.adversarial_temperature 0.8560978499606474 +753 56 negative_sampler.num_negs_per_pos 79.0 +753 56 training.batch_size 2.0 +753 57 model.embedding_dim 1.0 +753 57 model.relation_dim 0.0 +753 57 loss.margin 9.548652172378993 +753 57 loss.adversarial_temperature 0.46278407405177 +753 57 negative_sampler.num_negs_per_pos 58.0 +753 57 training.batch_size 1.0 +753 58 model.embedding_dim 2.0 +753 58 model.relation_dim 0.0 +753 58 loss.margin 8.577373003672763 +753 58 loss.adversarial_temperature 0.6511023056191516 +753 58 negative_sampler.num_negs_per_pos 1.0 +753 58 training.batch_size 2.0 +753 59 model.embedding_dim 0.0 +753 59 model.relation_dim 1.0 +753 59 loss.margin 25.041670077838784 +753 59 loss.adversarial_temperature 0.9233035343776841 +753 59 negative_sampler.num_negs_per_pos 58.0 +753 59 training.batch_size 1.0 +753 60 model.embedding_dim 1.0 +753 60 model.relation_dim 2.0 +753 60 loss.margin 29.395724016696125 +753 60 loss.adversarial_temperature 0.4031246737863572 +753 60 negative_sampler.num_negs_per_pos 90.0 +753 60 training.batch_size 1.0 +753 61 model.embedding_dim 0.0 +753 61 model.relation_dim 0.0 +753 61 loss.margin 6.950272543765835 +753 61 loss.adversarial_temperature 0.47754120834372016 +753 61 negative_sampler.num_negs_per_pos 74.0 +753 61 training.batch_size 2.0 +753 62 model.embedding_dim 0.0 +753 62 model.relation_dim 1.0 +753 62 loss.margin 20.881460760587597 +753 62 loss.adversarial_temperature 0.5267427953645328 +753 62 negative_sampler.num_negs_per_pos 98.0 +753 62 training.batch_size 1.0 +753 63 model.embedding_dim 2.0 +753 63 model.relation_dim 1.0 +753 63 loss.margin 12.711498931008181 +753 63 loss.adversarial_temperature 0.4136382996459752 +753 63 negative_sampler.num_negs_per_pos 40.0 +753 63 training.batch_size 1.0 +753 64 model.embedding_dim 0.0 +753 64 model.relation_dim 1.0 +753 64 loss.margin 10.974156926672073 +753 64 loss.adversarial_temperature 0.2552710855799368 +753 64 negative_sampler.num_negs_per_pos 61.0 +753 64 training.batch_size 2.0 +753 65 model.embedding_dim 1.0 +753 65 model.relation_dim 0.0 +753 65 loss.margin 27.306384123718594 +753 65 loss.adversarial_temperature 0.24441564188245354 +753 65 negative_sampler.num_negs_per_pos 94.0 +753 65 training.batch_size 1.0 +753 66 model.embedding_dim 1.0 +753 66 model.relation_dim 1.0 +753 66 loss.margin 24.57207846815186 +753 66 loss.adversarial_temperature 0.8065395699474839 +753 66 negative_sampler.num_negs_per_pos 83.0 +753 66 training.batch_size 1.0 +753 67 model.embedding_dim 2.0 +753 67 model.relation_dim 2.0 +753 67 loss.margin 17.589457568415412 +753 67 loss.adversarial_temperature 0.9838304483052959 +753 67 negative_sampler.num_negs_per_pos 80.0 +753 67 training.batch_size 2.0 +753 68 model.embedding_dim 2.0 +753 68 model.relation_dim 1.0 +753 68 loss.margin 18.925193561950067 +753 68 loss.adversarial_temperature 0.5660655648430438 +753 68 negative_sampler.num_negs_per_pos 21.0 +753 68 training.batch_size 2.0 +753 69 model.embedding_dim 2.0 +753 69 model.relation_dim 0.0 +753 69 loss.margin 3.6907841653378695 +753 69 loss.adversarial_temperature 0.3990084103915814 +753 69 negative_sampler.num_negs_per_pos 26.0 +753 69 training.batch_size 2.0 +753 70 model.embedding_dim 1.0 +753 70 model.relation_dim 0.0 +753 70 loss.margin 28.5737763346283 +753 70 loss.adversarial_temperature 0.3303292312948863 +753 70 negative_sampler.num_negs_per_pos 48.0 +753 70 training.batch_size 2.0 +753 71 model.embedding_dim 2.0 +753 71 model.relation_dim 0.0 +753 71 loss.margin 17.895716216666862 +753 71 loss.adversarial_temperature 0.8504090302773829 +753 71 negative_sampler.num_negs_per_pos 5.0 +753 71 training.batch_size 2.0 +753 72 model.embedding_dim 2.0 +753 72 model.relation_dim 0.0 +753 72 loss.margin 26.479225033476393 +753 72 loss.adversarial_temperature 0.20102205700103842 +753 72 negative_sampler.num_negs_per_pos 30.0 +753 72 training.batch_size 0.0 +753 73 model.embedding_dim 0.0 +753 73 model.relation_dim 1.0 +753 73 loss.margin 1.0663239580741868 +753 73 loss.adversarial_temperature 0.1705697678950996 +753 73 negative_sampler.num_negs_per_pos 1.0 +753 73 training.batch_size 2.0 +753 74 model.embedding_dim 0.0 +753 74 model.relation_dim 0.0 +753 74 loss.margin 20.432283926673847 +753 74 loss.adversarial_temperature 0.9951800266404246 +753 74 negative_sampler.num_negs_per_pos 39.0 +753 74 training.batch_size 2.0 +753 75 model.embedding_dim 1.0 +753 75 model.relation_dim 0.0 +753 75 loss.margin 13.945975648529654 +753 75 loss.adversarial_temperature 0.3254085449189227 +753 75 negative_sampler.num_negs_per_pos 34.0 +753 75 training.batch_size 2.0 +753 76 model.embedding_dim 2.0 +753 76 model.relation_dim 0.0 +753 76 loss.margin 12.185180401528157 +753 76 loss.adversarial_temperature 0.4575581652063112 +753 76 negative_sampler.num_negs_per_pos 83.0 +753 76 training.batch_size 0.0 +753 77 model.embedding_dim 0.0 +753 77 model.relation_dim 1.0 +753 77 loss.margin 15.365665034883513 +753 77 loss.adversarial_temperature 0.3559155960858771 +753 77 negative_sampler.num_negs_per_pos 26.0 +753 77 training.batch_size 0.0 +753 78 model.embedding_dim 0.0 +753 78 model.relation_dim 2.0 +753 78 loss.margin 15.893918892932286 +753 78 loss.adversarial_temperature 0.4550421562601191 +753 78 negative_sampler.num_negs_per_pos 41.0 +753 78 training.batch_size 1.0 +753 79 model.embedding_dim 2.0 +753 79 model.relation_dim 2.0 +753 79 loss.margin 7.883714586482358 +753 79 loss.adversarial_temperature 0.7224043406400247 +753 79 negative_sampler.num_negs_per_pos 8.0 +753 79 training.batch_size 2.0 +753 80 model.embedding_dim 0.0 +753 80 model.relation_dim 1.0 +753 80 loss.margin 22.16666132190592 +753 80 loss.adversarial_temperature 0.7219729138000014 +753 80 negative_sampler.num_negs_per_pos 53.0 +753 80 training.batch_size 0.0 +753 81 model.embedding_dim 2.0 +753 81 model.relation_dim 0.0 +753 81 loss.margin 26.477250617747018 +753 81 loss.adversarial_temperature 0.4078623727814452 +753 81 negative_sampler.num_negs_per_pos 90.0 +753 81 training.batch_size 0.0 +753 82 model.embedding_dim 1.0 +753 82 model.relation_dim 1.0 +753 82 loss.margin 1.2919999966293154 +753 82 loss.adversarial_temperature 0.4149108549816577 +753 82 negative_sampler.num_negs_per_pos 78.0 +753 82 training.batch_size 2.0 +753 83 model.embedding_dim 1.0 +753 83 model.relation_dim 2.0 +753 83 loss.margin 17.418790688206983 +753 83 loss.adversarial_temperature 0.8380113051976843 +753 83 negative_sampler.num_negs_per_pos 21.0 +753 83 training.batch_size 2.0 +753 84 model.embedding_dim 1.0 +753 84 model.relation_dim 1.0 +753 84 loss.margin 5.548252682005421 +753 84 loss.adversarial_temperature 0.5026421970657887 +753 84 negative_sampler.num_negs_per_pos 15.0 +753 84 training.batch_size 1.0 +753 85 model.embedding_dim 2.0 +753 85 model.relation_dim 2.0 +753 85 loss.margin 22.927213149083144 +753 85 loss.adversarial_temperature 0.6563454741609768 +753 85 negative_sampler.num_negs_per_pos 54.0 +753 85 training.batch_size 0.0 +753 86 model.embedding_dim 2.0 +753 86 model.relation_dim 1.0 +753 86 loss.margin 16.71736875546243 +753 86 loss.adversarial_temperature 0.39405409548870907 +753 86 negative_sampler.num_negs_per_pos 64.0 +753 86 training.batch_size 2.0 +753 87 model.embedding_dim 0.0 +753 87 model.relation_dim 0.0 +753 87 loss.margin 10.425036044870625 +753 87 loss.adversarial_temperature 0.32221983461043685 +753 87 negative_sampler.num_negs_per_pos 72.0 +753 87 training.batch_size 2.0 +753 88 model.embedding_dim 0.0 +753 88 model.relation_dim 0.0 +753 88 loss.margin 8.683780465876396 +753 88 loss.adversarial_temperature 0.7021959536386616 +753 88 negative_sampler.num_negs_per_pos 12.0 +753 88 training.batch_size 0.0 +753 89 model.embedding_dim 1.0 +753 89 model.relation_dim 1.0 +753 89 loss.margin 6.373224755789835 +753 89 loss.adversarial_temperature 0.4798798078226575 +753 89 negative_sampler.num_negs_per_pos 36.0 +753 89 training.batch_size 2.0 +753 90 model.embedding_dim 2.0 +753 90 model.relation_dim 1.0 +753 90 loss.margin 9.15820823069375 +753 90 loss.adversarial_temperature 0.7037587801474392 +753 90 negative_sampler.num_negs_per_pos 73.0 +753 90 training.batch_size 0.0 +753 91 model.embedding_dim 1.0 +753 91 model.relation_dim 1.0 +753 91 loss.margin 17.69603360311097 +753 91 loss.adversarial_temperature 0.36000395460827783 +753 91 negative_sampler.num_negs_per_pos 41.0 +753 91 training.batch_size 1.0 +753 92 model.embedding_dim 0.0 +753 92 model.relation_dim 1.0 +753 92 loss.margin 14.236213011403379 +753 92 loss.adversarial_temperature 0.4901152292883133 +753 92 negative_sampler.num_negs_per_pos 31.0 +753 92 training.batch_size 2.0 +753 93 model.embedding_dim 2.0 +753 93 model.relation_dim 2.0 +753 93 loss.margin 11.3803661507527 +753 93 loss.adversarial_temperature 0.7892148550140436 +753 93 negative_sampler.num_negs_per_pos 46.0 +753 93 training.batch_size 1.0 +753 94 model.embedding_dim 2.0 +753 94 model.relation_dim 0.0 +753 94 loss.margin 27.68349844735354 +753 94 loss.adversarial_temperature 0.6885511377643917 +753 94 negative_sampler.num_negs_per_pos 74.0 +753 94 training.batch_size 1.0 +753 95 model.embedding_dim 1.0 +753 95 model.relation_dim 2.0 +753 95 loss.margin 16.597018296782426 +753 95 loss.adversarial_temperature 0.889382224110737 +753 95 negative_sampler.num_negs_per_pos 80.0 +753 95 training.batch_size 2.0 +753 96 model.embedding_dim 1.0 +753 96 model.relation_dim 0.0 +753 96 loss.margin 18.546939434820057 +753 96 loss.adversarial_temperature 0.8050752818714425 +753 96 negative_sampler.num_negs_per_pos 69.0 +753 96 training.batch_size 1.0 +753 97 model.embedding_dim 1.0 +753 97 model.relation_dim 0.0 +753 97 loss.margin 26.31569961110286 +753 97 loss.adversarial_temperature 0.9534114629738094 +753 97 negative_sampler.num_negs_per_pos 58.0 +753 97 training.batch_size 1.0 +753 98 model.embedding_dim 1.0 +753 98 model.relation_dim 0.0 +753 98 loss.margin 12.47388262072205 +753 98 loss.adversarial_temperature 0.9411035802722738 +753 98 negative_sampler.num_negs_per_pos 88.0 +753 98 training.batch_size 1.0 +753 99 model.embedding_dim 0.0 +753 99 model.relation_dim 1.0 +753 99 loss.margin 28.938513146085192 +753 99 loss.adversarial_temperature 0.7100255893874639 +753 99 negative_sampler.num_negs_per_pos 61.0 +753 99 training.batch_size 0.0 +753 100 model.embedding_dim 0.0 +753 100 model.relation_dim 2.0 +753 100 loss.margin 12.573011025507055 +753 100 loss.adversarial_temperature 0.9738274577137314 +753 100 negative_sampler.num_negs_per_pos 2.0 +753 100 training.batch_size 2.0 +753 1 dataset """kinships""" +753 1 model """transd""" +753 1 loss """nssa""" +753 1 regularizer """no""" +753 1 optimizer """adadelta""" +753 1 training_loop """owa""" +753 1 negative_sampler """basic""" +753 1 evaluator """rankbased""" +753 2 dataset """kinships""" +753 2 model """transd""" +753 2 loss """nssa""" +753 2 regularizer """no""" +753 2 optimizer """adadelta""" +753 2 training_loop """owa""" +753 2 negative_sampler """basic""" +753 2 evaluator """rankbased""" +753 3 dataset """kinships""" +753 3 model """transd""" +753 3 loss """nssa""" +753 3 regularizer """no""" +753 3 optimizer """adadelta""" +753 3 training_loop """owa""" +753 3 negative_sampler """basic""" +753 3 evaluator """rankbased""" +753 4 dataset """kinships""" +753 4 model """transd""" +753 4 loss """nssa""" +753 4 regularizer """no""" +753 4 optimizer """adadelta""" +753 4 training_loop """owa""" +753 4 negative_sampler """basic""" +753 4 evaluator """rankbased""" +753 5 dataset """kinships""" +753 5 model """transd""" +753 5 loss """nssa""" +753 5 regularizer """no""" +753 5 optimizer """adadelta""" +753 5 training_loop """owa""" +753 5 negative_sampler """basic""" +753 5 evaluator """rankbased""" +753 6 dataset """kinships""" +753 6 model """transd""" +753 6 loss """nssa""" +753 6 regularizer """no""" +753 6 optimizer """adadelta""" +753 6 training_loop """owa""" +753 6 negative_sampler """basic""" +753 6 evaluator """rankbased""" +753 7 dataset """kinships""" +753 7 model """transd""" +753 7 loss """nssa""" +753 7 regularizer """no""" +753 7 optimizer """adadelta""" +753 7 training_loop """owa""" +753 7 negative_sampler """basic""" +753 7 evaluator """rankbased""" +753 8 dataset """kinships""" +753 8 model """transd""" +753 8 loss """nssa""" +753 8 regularizer """no""" +753 8 optimizer """adadelta""" +753 8 training_loop """owa""" +753 8 negative_sampler """basic""" +753 8 evaluator """rankbased""" +753 9 dataset """kinships""" +753 9 model """transd""" +753 9 loss """nssa""" +753 9 regularizer """no""" +753 9 optimizer """adadelta""" +753 9 training_loop """owa""" +753 9 negative_sampler """basic""" +753 9 evaluator """rankbased""" +753 10 dataset """kinships""" +753 10 model """transd""" +753 10 loss """nssa""" +753 10 regularizer """no""" +753 10 optimizer """adadelta""" +753 10 training_loop """owa""" +753 10 negative_sampler """basic""" +753 10 evaluator """rankbased""" +753 11 dataset """kinships""" +753 11 model """transd""" +753 11 loss """nssa""" +753 11 regularizer """no""" +753 11 optimizer """adadelta""" +753 11 training_loop """owa""" +753 11 negative_sampler """basic""" +753 11 evaluator """rankbased""" +753 12 dataset """kinships""" +753 12 model """transd""" +753 12 loss """nssa""" +753 12 regularizer """no""" +753 12 optimizer """adadelta""" +753 12 training_loop """owa""" +753 12 negative_sampler """basic""" +753 12 evaluator """rankbased""" +753 13 dataset """kinships""" +753 13 model """transd""" +753 13 loss """nssa""" +753 13 regularizer """no""" +753 13 optimizer """adadelta""" +753 13 training_loop """owa""" +753 13 negative_sampler """basic""" +753 13 evaluator """rankbased""" +753 14 dataset """kinships""" +753 14 model """transd""" +753 14 loss """nssa""" +753 14 regularizer """no""" +753 14 optimizer """adadelta""" +753 14 training_loop """owa""" +753 14 negative_sampler """basic""" +753 14 evaluator """rankbased""" +753 15 dataset """kinships""" +753 15 model """transd""" +753 15 loss """nssa""" +753 15 regularizer """no""" +753 15 optimizer """adadelta""" +753 15 training_loop """owa""" +753 15 negative_sampler """basic""" +753 15 evaluator """rankbased""" +753 16 dataset """kinships""" +753 16 model """transd""" +753 16 loss """nssa""" +753 16 regularizer """no""" +753 16 optimizer """adadelta""" +753 16 training_loop """owa""" +753 16 negative_sampler """basic""" +753 16 evaluator """rankbased""" +753 17 dataset """kinships""" +753 17 model """transd""" +753 17 loss """nssa""" +753 17 regularizer """no""" +753 17 optimizer """adadelta""" +753 17 training_loop """owa""" +753 17 negative_sampler """basic""" +753 17 evaluator """rankbased""" +753 18 dataset """kinships""" +753 18 model """transd""" +753 18 loss """nssa""" +753 18 regularizer """no""" +753 18 optimizer """adadelta""" +753 18 training_loop """owa""" +753 18 negative_sampler """basic""" +753 18 evaluator """rankbased""" +753 19 dataset """kinships""" +753 19 model """transd""" +753 19 loss """nssa""" +753 19 regularizer """no""" +753 19 optimizer """adadelta""" +753 19 training_loop """owa""" +753 19 negative_sampler """basic""" +753 19 evaluator """rankbased""" +753 20 dataset """kinships""" +753 20 model """transd""" +753 20 loss """nssa""" +753 20 regularizer """no""" +753 20 optimizer """adadelta""" +753 20 training_loop """owa""" +753 20 negative_sampler """basic""" +753 20 evaluator """rankbased""" +753 21 dataset """kinships""" +753 21 model """transd""" +753 21 loss """nssa""" +753 21 regularizer """no""" +753 21 optimizer """adadelta""" +753 21 training_loop """owa""" +753 21 negative_sampler """basic""" +753 21 evaluator """rankbased""" +753 22 dataset """kinships""" +753 22 model """transd""" +753 22 loss """nssa""" +753 22 regularizer """no""" +753 22 optimizer """adadelta""" +753 22 training_loop """owa""" +753 22 negative_sampler """basic""" +753 22 evaluator """rankbased""" +753 23 dataset """kinships""" +753 23 model """transd""" +753 23 loss """nssa""" +753 23 regularizer """no""" +753 23 optimizer """adadelta""" +753 23 training_loop """owa""" +753 23 negative_sampler """basic""" +753 23 evaluator """rankbased""" +753 24 dataset """kinships""" +753 24 model """transd""" +753 24 loss """nssa""" +753 24 regularizer """no""" +753 24 optimizer """adadelta""" +753 24 training_loop """owa""" +753 24 negative_sampler """basic""" +753 24 evaluator """rankbased""" +753 25 dataset """kinships""" +753 25 model """transd""" +753 25 loss """nssa""" +753 25 regularizer """no""" +753 25 optimizer """adadelta""" +753 25 training_loop """owa""" +753 25 negative_sampler """basic""" +753 25 evaluator """rankbased""" +753 26 dataset """kinships""" +753 26 model """transd""" +753 26 loss """nssa""" +753 26 regularizer """no""" +753 26 optimizer """adadelta""" +753 26 training_loop """owa""" +753 26 negative_sampler """basic""" +753 26 evaluator """rankbased""" +753 27 dataset """kinships""" +753 27 model """transd""" +753 27 loss """nssa""" +753 27 regularizer """no""" +753 27 optimizer """adadelta""" +753 27 training_loop """owa""" +753 27 negative_sampler """basic""" +753 27 evaluator """rankbased""" +753 28 dataset """kinships""" +753 28 model """transd""" +753 28 loss """nssa""" +753 28 regularizer """no""" +753 28 optimizer """adadelta""" +753 28 training_loop """owa""" +753 28 negative_sampler """basic""" +753 28 evaluator """rankbased""" +753 29 dataset """kinships""" +753 29 model """transd""" +753 29 loss """nssa""" +753 29 regularizer """no""" +753 29 optimizer """adadelta""" +753 29 training_loop """owa""" +753 29 negative_sampler """basic""" +753 29 evaluator """rankbased""" +753 30 dataset """kinships""" +753 30 model """transd""" +753 30 loss """nssa""" +753 30 regularizer """no""" +753 30 optimizer """adadelta""" +753 30 training_loop """owa""" +753 30 negative_sampler """basic""" +753 30 evaluator """rankbased""" +753 31 dataset """kinships""" +753 31 model """transd""" +753 31 loss """nssa""" +753 31 regularizer """no""" +753 31 optimizer """adadelta""" +753 31 training_loop """owa""" +753 31 negative_sampler """basic""" +753 31 evaluator """rankbased""" +753 32 dataset """kinships""" +753 32 model """transd""" +753 32 loss """nssa""" +753 32 regularizer """no""" +753 32 optimizer """adadelta""" +753 32 training_loop """owa""" +753 32 negative_sampler """basic""" +753 32 evaluator """rankbased""" +753 33 dataset """kinships""" +753 33 model """transd""" +753 33 loss """nssa""" +753 33 regularizer """no""" +753 33 optimizer """adadelta""" +753 33 training_loop """owa""" +753 33 negative_sampler """basic""" +753 33 evaluator """rankbased""" +753 34 dataset """kinships""" +753 34 model """transd""" +753 34 loss """nssa""" +753 34 regularizer """no""" +753 34 optimizer """adadelta""" +753 34 training_loop """owa""" +753 34 negative_sampler """basic""" +753 34 evaluator """rankbased""" +753 35 dataset """kinships""" +753 35 model """transd""" +753 35 loss """nssa""" +753 35 regularizer """no""" +753 35 optimizer """adadelta""" +753 35 training_loop """owa""" +753 35 negative_sampler """basic""" +753 35 evaluator """rankbased""" +753 36 dataset """kinships""" +753 36 model """transd""" +753 36 loss """nssa""" +753 36 regularizer """no""" +753 36 optimizer """adadelta""" +753 36 training_loop """owa""" +753 36 negative_sampler """basic""" +753 36 evaluator """rankbased""" +753 37 dataset """kinships""" +753 37 model """transd""" +753 37 loss """nssa""" +753 37 regularizer """no""" +753 37 optimizer """adadelta""" +753 37 training_loop """owa""" +753 37 negative_sampler """basic""" +753 37 evaluator """rankbased""" +753 38 dataset """kinships""" +753 38 model """transd""" +753 38 loss """nssa""" +753 38 regularizer """no""" +753 38 optimizer """adadelta""" +753 38 training_loop """owa""" +753 38 negative_sampler """basic""" +753 38 evaluator """rankbased""" +753 39 dataset """kinships""" +753 39 model """transd""" +753 39 loss """nssa""" +753 39 regularizer """no""" +753 39 optimizer """adadelta""" +753 39 training_loop """owa""" +753 39 negative_sampler """basic""" +753 39 evaluator """rankbased""" +753 40 dataset """kinships""" +753 40 model """transd""" +753 40 loss """nssa""" +753 40 regularizer """no""" +753 40 optimizer """adadelta""" +753 40 training_loop """owa""" +753 40 negative_sampler """basic""" +753 40 evaluator """rankbased""" +753 41 dataset """kinships""" +753 41 model """transd""" +753 41 loss """nssa""" +753 41 regularizer """no""" +753 41 optimizer """adadelta""" +753 41 training_loop """owa""" +753 41 negative_sampler """basic""" +753 41 evaluator """rankbased""" +753 42 dataset """kinships""" +753 42 model """transd""" +753 42 loss """nssa""" +753 42 regularizer """no""" +753 42 optimizer """adadelta""" +753 42 training_loop """owa""" +753 42 negative_sampler """basic""" +753 42 evaluator """rankbased""" +753 43 dataset """kinships""" +753 43 model """transd""" +753 43 loss """nssa""" +753 43 regularizer """no""" +753 43 optimizer """adadelta""" +753 43 training_loop """owa""" +753 43 negative_sampler """basic""" +753 43 evaluator """rankbased""" +753 44 dataset """kinships""" +753 44 model """transd""" +753 44 loss """nssa""" +753 44 regularizer """no""" +753 44 optimizer """adadelta""" +753 44 training_loop """owa""" +753 44 negative_sampler """basic""" +753 44 evaluator """rankbased""" +753 45 dataset """kinships""" +753 45 model """transd""" +753 45 loss """nssa""" +753 45 regularizer """no""" +753 45 optimizer """adadelta""" +753 45 training_loop """owa""" +753 45 negative_sampler """basic""" +753 45 evaluator """rankbased""" +753 46 dataset """kinships""" +753 46 model """transd""" +753 46 loss """nssa""" +753 46 regularizer """no""" +753 46 optimizer """adadelta""" +753 46 training_loop """owa""" +753 46 negative_sampler """basic""" +753 46 evaluator """rankbased""" +753 47 dataset """kinships""" +753 47 model """transd""" +753 47 loss """nssa""" +753 47 regularizer """no""" +753 47 optimizer """adadelta""" +753 47 training_loop """owa""" +753 47 negative_sampler """basic""" +753 47 evaluator """rankbased""" +753 48 dataset """kinships""" +753 48 model """transd""" +753 48 loss """nssa""" +753 48 regularizer """no""" +753 48 optimizer """adadelta""" +753 48 training_loop """owa""" +753 48 negative_sampler """basic""" +753 48 evaluator """rankbased""" +753 49 dataset """kinships""" +753 49 model """transd""" +753 49 loss """nssa""" +753 49 regularizer """no""" +753 49 optimizer """adadelta""" +753 49 training_loop """owa""" +753 49 negative_sampler """basic""" +753 49 evaluator """rankbased""" +753 50 dataset """kinships""" +753 50 model """transd""" +753 50 loss """nssa""" +753 50 regularizer """no""" +753 50 optimizer """adadelta""" +753 50 training_loop """owa""" +753 50 negative_sampler """basic""" +753 50 evaluator """rankbased""" +753 51 dataset """kinships""" +753 51 model """transd""" +753 51 loss """nssa""" +753 51 regularizer """no""" +753 51 optimizer """adadelta""" +753 51 training_loop """owa""" +753 51 negative_sampler """basic""" +753 51 evaluator """rankbased""" +753 52 dataset """kinships""" +753 52 model """transd""" +753 52 loss """nssa""" +753 52 regularizer """no""" +753 52 optimizer """adadelta""" +753 52 training_loop """owa""" +753 52 negative_sampler """basic""" +753 52 evaluator """rankbased""" +753 53 dataset """kinships""" +753 53 model """transd""" +753 53 loss """nssa""" +753 53 regularizer """no""" +753 53 optimizer """adadelta""" +753 53 training_loop """owa""" +753 53 negative_sampler """basic""" +753 53 evaluator """rankbased""" +753 54 dataset """kinships""" +753 54 model """transd""" +753 54 loss """nssa""" +753 54 regularizer """no""" +753 54 optimizer """adadelta""" +753 54 training_loop """owa""" +753 54 negative_sampler """basic""" +753 54 evaluator """rankbased""" +753 55 dataset """kinships""" +753 55 model """transd""" +753 55 loss """nssa""" +753 55 regularizer """no""" +753 55 optimizer """adadelta""" +753 55 training_loop """owa""" +753 55 negative_sampler """basic""" +753 55 evaluator """rankbased""" +753 56 dataset """kinships""" +753 56 model """transd""" +753 56 loss """nssa""" +753 56 regularizer """no""" +753 56 optimizer """adadelta""" +753 56 training_loop """owa""" +753 56 negative_sampler """basic""" +753 56 evaluator """rankbased""" +753 57 dataset """kinships""" +753 57 model """transd""" +753 57 loss """nssa""" +753 57 regularizer """no""" +753 57 optimizer """adadelta""" +753 57 training_loop """owa""" +753 57 negative_sampler """basic""" +753 57 evaluator """rankbased""" +753 58 dataset """kinships""" +753 58 model """transd""" +753 58 loss """nssa""" +753 58 regularizer """no""" +753 58 optimizer """adadelta""" +753 58 training_loop """owa""" +753 58 negative_sampler """basic""" +753 58 evaluator """rankbased""" +753 59 dataset """kinships""" +753 59 model """transd""" +753 59 loss """nssa""" +753 59 regularizer """no""" +753 59 optimizer """adadelta""" +753 59 training_loop """owa""" +753 59 negative_sampler """basic""" +753 59 evaluator """rankbased""" +753 60 dataset """kinships""" +753 60 model """transd""" +753 60 loss """nssa""" +753 60 regularizer """no""" +753 60 optimizer """adadelta""" +753 60 training_loop """owa""" +753 60 negative_sampler """basic""" +753 60 evaluator """rankbased""" +753 61 dataset """kinships""" +753 61 model """transd""" +753 61 loss """nssa""" +753 61 regularizer """no""" +753 61 optimizer """adadelta""" +753 61 training_loop """owa""" +753 61 negative_sampler """basic""" +753 61 evaluator """rankbased""" +753 62 dataset """kinships""" +753 62 model """transd""" +753 62 loss """nssa""" +753 62 regularizer """no""" +753 62 optimizer """adadelta""" +753 62 training_loop """owa""" +753 62 negative_sampler """basic""" +753 62 evaluator """rankbased""" +753 63 dataset """kinships""" +753 63 model """transd""" +753 63 loss """nssa""" +753 63 regularizer """no""" +753 63 optimizer """adadelta""" +753 63 training_loop """owa""" +753 63 negative_sampler """basic""" +753 63 evaluator """rankbased""" +753 64 dataset """kinships""" +753 64 model """transd""" +753 64 loss """nssa""" +753 64 regularizer """no""" +753 64 optimizer """adadelta""" +753 64 training_loop """owa""" +753 64 negative_sampler """basic""" +753 64 evaluator """rankbased""" +753 65 dataset """kinships""" +753 65 model """transd""" +753 65 loss """nssa""" +753 65 regularizer """no""" +753 65 optimizer """adadelta""" +753 65 training_loop """owa""" +753 65 negative_sampler """basic""" +753 65 evaluator """rankbased""" +753 66 dataset """kinships""" +753 66 model """transd""" +753 66 loss """nssa""" +753 66 regularizer """no""" +753 66 optimizer """adadelta""" +753 66 training_loop """owa""" +753 66 negative_sampler """basic""" +753 66 evaluator """rankbased""" +753 67 dataset """kinships""" +753 67 model """transd""" +753 67 loss """nssa""" +753 67 regularizer """no""" +753 67 optimizer """adadelta""" +753 67 training_loop """owa""" +753 67 negative_sampler """basic""" +753 67 evaluator """rankbased""" +753 68 dataset """kinships""" +753 68 model """transd""" +753 68 loss """nssa""" +753 68 regularizer """no""" +753 68 optimizer """adadelta""" +753 68 training_loop """owa""" +753 68 negative_sampler """basic""" +753 68 evaluator """rankbased""" +753 69 dataset """kinships""" +753 69 model """transd""" +753 69 loss """nssa""" +753 69 regularizer """no""" +753 69 optimizer """adadelta""" +753 69 training_loop """owa""" +753 69 negative_sampler """basic""" +753 69 evaluator """rankbased""" +753 70 dataset """kinships""" +753 70 model """transd""" +753 70 loss """nssa""" +753 70 regularizer """no""" +753 70 optimizer """adadelta""" +753 70 training_loop """owa""" +753 70 negative_sampler """basic""" +753 70 evaluator """rankbased""" +753 71 dataset """kinships""" +753 71 model """transd""" +753 71 loss """nssa""" +753 71 regularizer """no""" +753 71 optimizer """adadelta""" +753 71 training_loop """owa""" +753 71 negative_sampler """basic""" +753 71 evaluator """rankbased""" +753 72 dataset """kinships""" +753 72 model """transd""" +753 72 loss """nssa""" +753 72 regularizer """no""" +753 72 optimizer """adadelta""" +753 72 training_loop """owa""" +753 72 negative_sampler """basic""" +753 72 evaluator """rankbased""" +753 73 dataset """kinships""" +753 73 model """transd""" +753 73 loss """nssa""" +753 73 regularizer """no""" +753 73 optimizer """adadelta""" +753 73 training_loop """owa""" +753 73 negative_sampler """basic""" +753 73 evaluator """rankbased""" +753 74 dataset """kinships""" +753 74 model """transd""" +753 74 loss """nssa""" +753 74 regularizer """no""" +753 74 optimizer """adadelta""" +753 74 training_loop """owa""" +753 74 negative_sampler """basic""" +753 74 evaluator """rankbased""" +753 75 dataset """kinships""" +753 75 model """transd""" +753 75 loss """nssa""" +753 75 regularizer """no""" +753 75 optimizer """adadelta""" +753 75 training_loop """owa""" +753 75 negative_sampler """basic""" +753 75 evaluator """rankbased""" +753 76 dataset """kinships""" +753 76 model """transd""" +753 76 loss """nssa""" +753 76 regularizer """no""" +753 76 optimizer """adadelta""" +753 76 training_loop """owa""" +753 76 negative_sampler """basic""" +753 76 evaluator """rankbased""" +753 77 dataset """kinships""" +753 77 model """transd""" +753 77 loss """nssa""" +753 77 regularizer """no""" +753 77 optimizer """adadelta""" +753 77 training_loop """owa""" +753 77 negative_sampler """basic""" +753 77 evaluator """rankbased""" +753 78 dataset """kinships""" +753 78 model """transd""" +753 78 loss """nssa""" +753 78 regularizer """no""" +753 78 optimizer """adadelta""" +753 78 training_loop """owa""" +753 78 negative_sampler """basic""" +753 78 evaluator """rankbased""" +753 79 dataset """kinships""" +753 79 model """transd""" +753 79 loss """nssa""" +753 79 regularizer """no""" +753 79 optimizer """adadelta""" +753 79 training_loop """owa""" +753 79 negative_sampler """basic""" +753 79 evaluator """rankbased""" +753 80 dataset """kinships""" +753 80 model """transd""" +753 80 loss """nssa""" +753 80 regularizer """no""" +753 80 optimizer """adadelta""" +753 80 training_loop """owa""" +753 80 negative_sampler """basic""" +753 80 evaluator """rankbased""" +753 81 dataset """kinships""" +753 81 model """transd""" +753 81 loss """nssa""" +753 81 regularizer """no""" +753 81 optimizer """adadelta""" +753 81 training_loop """owa""" +753 81 negative_sampler """basic""" +753 81 evaluator """rankbased""" +753 82 dataset """kinships""" +753 82 model """transd""" +753 82 loss """nssa""" +753 82 regularizer """no""" +753 82 optimizer """adadelta""" +753 82 training_loop """owa""" +753 82 negative_sampler """basic""" +753 82 evaluator """rankbased""" +753 83 dataset """kinships""" +753 83 model """transd""" +753 83 loss """nssa""" +753 83 regularizer """no""" +753 83 optimizer """adadelta""" +753 83 training_loop """owa""" +753 83 negative_sampler """basic""" +753 83 evaluator """rankbased""" +753 84 dataset """kinships""" +753 84 model """transd""" +753 84 loss """nssa""" +753 84 regularizer """no""" +753 84 optimizer """adadelta""" +753 84 training_loop """owa""" +753 84 negative_sampler """basic""" +753 84 evaluator """rankbased""" +753 85 dataset """kinships""" +753 85 model """transd""" +753 85 loss """nssa""" +753 85 regularizer """no""" +753 85 optimizer """adadelta""" +753 85 training_loop """owa""" +753 85 negative_sampler """basic""" +753 85 evaluator """rankbased""" +753 86 dataset """kinships""" +753 86 model """transd""" +753 86 loss """nssa""" +753 86 regularizer """no""" +753 86 optimizer """adadelta""" +753 86 training_loop """owa""" +753 86 negative_sampler """basic""" +753 86 evaluator """rankbased""" +753 87 dataset """kinships""" +753 87 model """transd""" +753 87 loss """nssa""" +753 87 regularizer """no""" +753 87 optimizer """adadelta""" +753 87 training_loop """owa""" +753 87 negative_sampler """basic""" +753 87 evaluator """rankbased""" +753 88 dataset """kinships""" +753 88 model """transd""" +753 88 loss """nssa""" +753 88 regularizer """no""" +753 88 optimizer """adadelta""" +753 88 training_loop """owa""" +753 88 negative_sampler """basic""" +753 88 evaluator """rankbased""" +753 89 dataset """kinships""" +753 89 model """transd""" +753 89 loss """nssa""" +753 89 regularizer """no""" +753 89 optimizer """adadelta""" +753 89 training_loop """owa""" +753 89 negative_sampler """basic""" +753 89 evaluator """rankbased""" +753 90 dataset """kinships""" +753 90 model """transd""" +753 90 loss """nssa""" +753 90 regularizer """no""" +753 90 optimizer """adadelta""" +753 90 training_loop """owa""" +753 90 negative_sampler """basic""" +753 90 evaluator """rankbased""" +753 91 dataset """kinships""" +753 91 model """transd""" +753 91 loss """nssa""" +753 91 regularizer """no""" +753 91 optimizer """adadelta""" +753 91 training_loop """owa""" +753 91 negative_sampler """basic""" +753 91 evaluator """rankbased""" +753 92 dataset """kinships""" +753 92 model """transd""" +753 92 loss """nssa""" +753 92 regularizer """no""" +753 92 optimizer """adadelta""" +753 92 training_loop """owa""" +753 92 negative_sampler """basic""" +753 92 evaluator """rankbased""" +753 93 dataset """kinships""" +753 93 model """transd""" +753 93 loss """nssa""" +753 93 regularizer """no""" +753 93 optimizer """adadelta""" +753 93 training_loop """owa""" +753 93 negative_sampler """basic""" +753 93 evaluator """rankbased""" +753 94 dataset """kinships""" +753 94 model """transd""" +753 94 loss """nssa""" +753 94 regularizer """no""" +753 94 optimizer """adadelta""" +753 94 training_loop """owa""" +753 94 negative_sampler """basic""" +753 94 evaluator """rankbased""" +753 95 dataset """kinships""" +753 95 model """transd""" +753 95 loss """nssa""" +753 95 regularizer """no""" +753 95 optimizer """adadelta""" +753 95 training_loop """owa""" +753 95 negative_sampler """basic""" +753 95 evaluator """rankbased""" +753 96 dataset """kinships""" +753 96 model """transd""" +753 96 loss """nssa""" +753 96 regularizer """no""" +753 96 optimizer """adadelta""" +753 96 training_loop """owa""" +753 96 negative_sampler """basic""" +753 96 evaluator """rankbased""" +753 97 dataset """kinships""" +753 97 model """transd""" +753 97 loss """nssa""" +753 97 regularizer """no""" +753 97 optimizer """adadelta""" +753 97 training_loop """owa""" +753 97 negative_sampler """basic""" +753 97 evaluator """rankbased""" +753 98 dataset """kinships""" +753 98 model """transd""" +753 98 loss """nssa""" +753 98 regularizer """no""" +753 98 optimizer """adadelta""" +753 98 training_loop """owa""" +753 98 negative_sampler """basic""" +753 98 evaluator """rankbased""" +753 99 dataset """kinships""" +753 99 model """transd""" +753 99 loss """nssa""" +753 99 regularizer """no""" +753 99 optimizer """adadelta""" +753 99 training_loop """owa""" +753 99 negative_sampler """basic""" +753 99 evaluator """rankbased""" +753 100 dataset """kinships""" +753 100 model """transd""" +753 100 loss """nssa""" +753 100 regularizer """no""" +753 100 optimizer """adadelta""" +753 100 training_loop """owa""" +753 100 negative_sampler """basic""" +753 100 evaluator """rankbased""" +754 1 model.embedding_dim 2.0 +754 1 model.relation_dim 0.0 +754 1 loss.margin 12.172361370322605 +754 1 loss.adversarial_temperature 0.5170975145133294 +754 1 negative_sampler.num_negs_per_pos 68.0 +754 1 training.batch_size 1.0 +754 2 model.embedding_dim 1.0 +754 2 model.relation_dim 2.0 +754 2 loss.margin 13.777437979662526 +754 2 loss.adversarial_temperature 0.8685994427472288 +754 2 negative_sampler.num_negs_per_pos 5.0 +754 2 training.batch_size 1.0 +754 3 model.embedding_dim 0.0 +754 3 model.relation_dim 1.0 +754 3 loss.margin 5.297311232929578 +754 3 loss.adversarial_temperature 0.8433140590024717 +754 3 negative_sampler.num_negs_per_pos 36.0 +754 3 training.batch_size 2.0 +754 4 model.embedding_dim 1.0 +754 4 model.relation_dim 1.0 +754 4 loss.margin 6.893755942934323 +754 4 loss.adversarial_temperature 0.4344398150252061 +754 4 negative_sampler.num_negs_per_pos 59.0 +754 4 training.batch_size 2.0 +754 5 model.embedding_dim 0.0 +754 5 model.relation_dim 1.0 +754 5 loss.margin 11.321995511012213 +754 5 loss.adversarial_temperature 0.6129401608864621 +754 5 negative_sampler.num_negs_per_pos 57.0 +754 5 training.batch_size 1.0 +754 6 model.embedding_dim 2.0 +754 6 model.relation_dim 1.0 +754 6 loss.margin 19.293706538083587 +754 6 loss.adversarial_temperature 0.533126701794326 +754 6 negative_sampler.num_negs_per_pos 82.0 +754 6 training.batch_size 2.0 +754 7 model.embedding_dim 2.0 +754 7 model.relation_dim 0.0 +754 7 loss.margin 3.600893345813258 +754 7 loss.adversarial_temperature 0.9317309593754755 +754 7 negative_sampler.num_negs_per_pos 86.0 +754 7 training.batch_size 2.0 +754 8 model.embedding_dim 0.0 +754 8 model.relation_dim 2.0 +754 8 loss.margin 3.055545155577433 +754 8 loss.adversarial_temperature 0.26547915088581453 +754 8 negative_sampler.num_negs_per_pos 28.0 +754 8 training.batch_size 2.0 +754 9 model.embedding_dim 1.0 +754 9 model.relation_dim 2.0 +754 9 loss.margin 21.915259479269874 +754 9 loss.adversarial_temperature 0.6876239549983905 +754 9 negative_sampler.num_negs_per_pos 10.0 +754 9 training.batch_size 1.0 +754 10 model.embedding_dim 1.0 +754 10 model.relation_dim 0.0 +754 10 loss.margin 7.094950756461018 +754 10 loss.adversarial_temperature 0.8822581790636641 +754 10 negative_sampler.num_negs_per_pos 31.0 +754 10 training.batch_size 0.0 +754 11 model.embedding_dim 0.0 +754 11 model.relation_dim 0.0 +754 11 loss.margin 8.58263203335737 +754 11 loss.adversarial_temperature 0.6507848960496532 +754 11 negative_sampler.num_negs_per_pos 30.0 +754 11 training.batch_size 0.0 +754 12 model.embedding_dim 0.0 +754 12 model.relation_dim 0.0 +754 12 loss.margin 7.814998267588389 +754 12 loss.adversarial_temperature 0.3612842204694896 +754 12 negative_sampler.num_negs_per_pos 50.0 +754 12 training.batch_size 2.0 +754 13 model.embedding_dim 2.0 +754 13 model.relation_dim 2.0 +754 13 loss.margin 7.436935513973371 +754 13 loss.adversarial_temperature 0.5454293133659348 +754 13 negative_sampler.num_negs_per_pos 98.0 +754 13 training.batch_size 1.0 +754 14 model.embedding_dim 1.0 +754 14 model.relation_dim 1.0 +754 14 loss.margin 14.110305226708293 +754 14 loss.adversarial_temperature 0.5998187463377086 +754 14 negative_sampler.num_negs_per_pos 45.0 +754 14 training.batch_size 0.0 +754 15 model.embedding_dim 2.0 +754 15 model.relation_dim 0.0 +754 15 loss.margin 10.16699573997595 +754 15 loss.adversarial_temperature 0.11628891576439841 +754 15 negative_sampler.num_negs_per_pos 5.0 +754 15 training.batch_size 2.0 +754 16 model.embedding_dim 2.0 +754 16 model.relation_dim 0.0 +754 16 loss.margin 17.780017632989072 +754 16 loss.adversarial_temperature 0.2006637172253818 +754 16 negative_sampler.num_negs_per_pos 67.0 +754 16 training.batch_size 2.0 +754 17 model.embedding_dim 0.0 +754 17 model.relation_dim 1.0 +754 17 loss.margin 27.57275789619813 +754 17 loss.adversarial_temperature 0.6616473771057735 +754 17 negative_sampler.num_negs_per_pos 46.0 +754 17 training.batch_size 1.0 +754 18 model.embedding_dim 2.0 +754 18 model.relation_dim 0.0 +754 18 loss.margin 5.971731202639175 +754 18 loss.adversarial_temperature 0.2175282715995734 +754 18 negative_sampler.num_negs_per_pos 56.0 +754 18 training.batch_size 0.0 +754 19 model.embedding_dim 0.0 +754 19 model.relation_dim 0.0 +754 19 loss.margin 23.99756026886053 +754 19 loss.adversarial_temperature 0.5769182878876083 +754 19 negative_sampler.num_negs_per_pos 34.0 +754 19 training.batch_size 2.0 +754 20 model.embedding_dim 1.0 +754 20 model.relation_dim 2.0 +754 20 loss.margin 21.1826701164016 +754 20 loss.adversarial_temperature 0.38102110352839214 +754 20 negative_sampler.num_negs_per_pos 80.0 +754 20 training.batch_size 0.0 +754 21 model.embedding_dim 1.0 +754 21 model.relation_dim 1.0 +754 21 loss.margin 19.538451384955636 +754 21 loss.adversarial_temperature 0.5342399019734128 +754 21 negative_sampler.num_negs_per_pos 59.0 +754 21 training.batch_size 2.0 +754 22 model.embedding_dim 2.0 +754 22 model.relation_dim 1.0 +754 22 loss.margin 17.556041520555176 +754 22 loss.adversarial_temperature 0.5078974750364902 +754 22 negative_sampler.num_negs_per_pos 58.0 +754 22 training.batch_size 0.0 +754 23 model.embedding_dim 2.0 +754 23 model.relation_dim 0.0 +754 23 loss.margin 2.699158896171068 +754 23 loss.adversarial_temperature 0.7654661404834596 +754 23 negative_sampler.num_negs_per_pos 60.0 +754 23 training.batch_size 0.0 +754 24 model.embedding_dim 1.0 +754 24 model.relation_dim 0.0 +754 24 loss.margin 17.31074561471062 +754 24 loss.adversarial_temperature 0.7494395287404098 +754 24 negative_sampler.num_negs_per_pos 2.0 +754 24 training.batch_size 1.0 +754 25 model.embedding_dim 0.0 +754 25 model.relation_dim 1.0 +754 25 loss.margin 25.636142078954137 +754 25 loss.adversarial_temperature 0.2952633641322884 +754 25 negative_sampler.num_negs_per_pos 59.0 +754 25 training.batch_size 2.0 +754 26 model.embedding_dim 0.0 +754 26 model.relation_dim 2.0 +754 26 loss.margin 7.810301532289267 +754 26 loss.adversarial_temperature 0.6017212003718612 +754 26 negative_sampler.num_negs_per_pos 86.0 +754 26 training.batch_size 1.0 +754 27 model.embedding_dim 2.0 +754 27 model.relation_dim 0.0 +754 27 loss.margin 5.062732926552204 +754 27 loss.adversarial_temperature 0.5699374868138628 +754 27 negative_sampler.num_negs_per_pos 35.0 +754 27 training.batch_size 0.0 +754 28 model.embedding_dim 2.0 +754 28 model.relation_dim 2.0 +754 28 loss.margin 18.29609313318568 +754 28 loss.adversarial_temperature 0.5965485862578067 +754 28 negative_sampler.num_negs_per_pos 78.0 +754 28 training.batch_size 2.0 +754 29 model.embedding_dim 1.0 +754 29 model.relation_dim 2.0 +754 29 loss.margin 1.3450678495699737 +754 29 loss.adversarial_temperature 0.22567698526519264 +754 29 negative_sampler.num_negs_per_pos 21.0 +754 29 training.batch_size 2.0 +754 30 model.embedding_dim 2.0 +754 30 model.relation_dim 2.0 +754 30 loss.margin 2.082485746878653 +754 30 loss.adversarial_temperature 0.16039347906538473 +754 30 negative_sampler.num_negs_per_pos 96.0 +754 30 training.batch_size 0.0 +754 31 model.embedding_dim 0.0 +754 31 model.relation_dim 1.0 +754 31 loss.margin 27.234804889891393 +754 31 loss.adversarial_temperature 0.15270917747007612 +754 31 negative_sampler.num_negs_per_pos 82.0 +754 31 training.batch_size 2.0 +754 32 model.embedding_dim 0.0 +754 32 model.relation_dim 2.0 +754 32 loss.margin 26.170675426886923 +754 32 loss.adversarial_temperature 0.6183633422125187 +754 32 negative_sampler.num_negs_per_pos 73.0 +754 32 training.batch_size 2.0 +754 33 model.embedding_dim 1.0 +754 33 model.relation_dim 0.0 +754 33 loss.margin 4.485961415788195 +754 33 loss.adversarial_temperature 0.6672033521138062 +754 33 negative_sampler.num_negs_per_pos 92.0 +754 33 training.batch_size 1.0 +754 34 model.embedding_dim 1.0 +754 34 model.relation_dim 2.0 +754 34 loss.margin 28.268970042159243 +754 34 loss.adversarial_temperature 0.27128419399473813 +754 34 negative_sampler.num_negs_per_pos 61.0 +754 34 training.batch_size 0.0 +754 35 model.embedding_dim 1.0 +754 35 model.relation_dim 2.0 +754 35 loss.margin 5.443652361269704 +754 35 loss.adversarial_temperature 0.8936020686219878 +754 35 negative_sampler.num_negs_per_pos 45.0 +754 35 training.batch_size 0.0 +754 36 model.embedding_dim 0.0 +754 36 model.relation_dim 0.0 +754 36 loss.margin 25.289901568807494 +754 36 loss.adversarial_temperature 0.6407413946349525 +754 36 negative_sampler.num_negs_per_pos 89.0 +754 36 training.batch_size 2.0 +754 37 model.embedding_dim 0.0 +754 37 model.relation_dim 2.0 +754 37 loss.margin 14.491058085729886 +754 37 loss.adversarial_temperature 0.24026251445123245 +754 37 negative_sampler.num_negs_per_pos 41.0 +754 37 training.batch_size 0.0 +754 38 model.embedding_dim 1.0 +754 38 model.relation_dim 2.0 +754 38 loss.margin 14.020303079909972 +754 38 loss.adversarial_temperature 0.6541042409306768 +754 38 negative_sampler.num_negs_per_pos 46.0 +754 38 training.batch_size 1.0 +754 39 model.embedding_dim 1.0 +754 39 model.relation_dim 0.0 +754 39 loss.margin 13.768913135243377 +754 39 loss.adversarial_temperature 0.9150175590854926 +754 39 negative_sampler.num_negs_per_pos 52.0 +754 39 training.batch_size 2.0 +754 40 model.embedding_dim 2.0 +754 40 model.relation_dim 0.0 +754 40 loss.margin 6.712666165929251 +754 40 loss.adversarial_temperature 0.7595400162614331 +754 40 negative_sampler.num_negs_per_pos 20.0 +754 40 training.batch_size 0.0 +754 41 model.embedding_dim 0.0 +754 41 model.relation_dim 0.0 +754 41 loss.margin 15.956422610157842 +754 41 loss.adversarial_temperature 0.5325040561866556 +754 41 negative_sampler.num_negs_per_pos 49.0 +754 41 training.batch_size 1.0 +754 42 model.embedding_dim 2.0 +754 42 model.relation_dim 2.0 +754 42 loss.margin 2.9608012076364862 +754 42 loss.adversarial_temperature 0.6584249315385189 +754 42 negative_sampler.num_negs_per_pos 21.0 +754 42 training.batch_size 0.0 +754 43 model.embedding_dim 2.0 +754 43 model.relation_dim 0.0 +754 43 loss.margin 6.502628649035051 +754 43 loss.adversarial_temperature 0.1698923650012103 +754 43 negative_sampler.num_negs_per_pos 13.0 +754 43 training.batch_size 2.0 +754 44 model.embedding_dim 2.0 +754 44 model.relation_dim 0.0 +754 44 loss.margin 10.564674284165365 +754 44 loss.adversarial_temperature 0.8370538912298308 +754 44 negative_sampler.num_negs_per_pos 67.0 +754 44 training.batch_size 0.0 +754 45 model.embedding_dim 1.0 +754 45 model.relation_dim 2.0 +754 45 loss.margin 24.2616002320911 +754 45 loss.adversarial_temperature 0.6689265159656774 +754 45 negative_sampler.num_negs_per_pos 10.0 +754 45 training.batch_size 2.0 +754 46 model.embedding_dim 2.0 +754 46 model.relation_dim 1.0 +754 46 loss.margin 11.235332389855975 +754 46 loss.adversarial_temperature 0.23466188720931772 +754 46 negative_sampler.num_negs_per_pos 89.0 +754 46 training.batch_size 1.0 +754 47 model.embedding_dim 0.0 +754 47 model.relation_dim 1.0 +754 47 loss.margin 2.587476454828083 +754 47 loss.adversarial_temperature 0.7004701426743153 +754 47 negative_sampler.num_negs_per_pos 50.0 +754 47 training.batch_size 0.0 +754 48 model.embedding_dim 1.0 +754 48 model.relation_dim 0.0 +754 48 loss.margin 8.280187855144813 +754 48 loss.adversarial_temperature 0.9526959977419136 +754 48 negative_sampler.num_negs_per_pos 24.0 +754 48 training.batch_size 1.0 +754 49 model.embedding_dim 1.0 +754 49 model.relation_dim 0.0 +754 49 loss.margin 5.858740637628152 +754 49 loss.adversarial_temperature 0.5443040389191669 +754 49 negative_sampler.num_negs_per_pos 80.0 +754 49 training.batch_size 2.0 +754 50 model.embedding_dim 1.0 +754 50 model.relation_dim 2.0 +754 50 loss.margin 20.360447412547366 +754 50 loss.adversarial_temperature 0.684852612707676 +754 50 negative_sampler.num_negs_per_pos 67.0 +754 50 training.batch_size 2.0 +754 51 model.embedding_dim 0.0 +754 51 model.relation_dim 0.0 +754 51 loss.margin 27.55061371445766 +754 51 loss.adversarial_temperature 0.5128035203302864 +754 51 negative_sampler.num_negs_per_pos 95.0 +754 51 training.batch_size 1.0 +754 52 model.embedding_dim 0.0 +754 52 model.relation_dim 0.0 +754 52 loss.margin 19.01218838174369 +754 52 loss.adversarial_temperature 0.22225943628233955 +754 52 negative_sampler.num_negs_per_pos 40.0 +754 52 training.batch_size 2.0 +754 53 model.embedding_dim 1.0 +754 53 model.relation_dim 0.0 +754 53 loss.margin 4.767018723225959 +754 53 loss.adversarial_temperature 0.24858404938276257 +754 53 negative_sampler.num_negs_per_pos 86.0 +754 53 training.batch_size 1.0 +754 54 model.embedding_dim 0.0 +754 54 model.relation_dim 1.0 +754 54 loss.margin 25.531970151889144 +754 54 loss.adversarial_temperature 0.5621568750549902 +754 54 negative_sampler.num_negs_per_pos 75.0 +754 54 training.batch_size 2.0 +754 55 model.embedding_dim 2.0 +754 55 model.relation_dim 2.0 +754 55 loss.margin 17.119209657310527 +754 55 loss.adversarial_temperature 0.57120118379113 +754 55 negative_sampler.num_negs_per_pos 88.0 +754 55 training.batch_size 1.0 +754 56 model.embedding_dim 2.0 +754 56 model.relation_dim 0.0 +754 56 loss.margin 12.786350077035586 +754 56 loss.adversarial_temperature 0.641394648257955 +754 56 negative_sampler.num_negs_per_pos 18.0 +754 56 training.batch_size 1.0 +754 57 model.embedding_dim 0.0 +754 57 model.relation_dim 1.0 +754 57 loss.margin 21.711982679717465 +754 57 loss.adversarial_temperature 0.6401046303595349 +754 57 negative_sampler.num_negs_per_pos 51.0 +754 57 training.batch_size 0.0 +754 58 model.embedding_dim 0.0 +754 58 model.relation_dim 2.0 +754 58 loss.margin 17.21669004475747 +754 58 loss.adversarial_temperature 0.6365648599367367 +754 58 negative_sampler.num_negs_per_pos 0.0 +754 58 training.batch_size 2.0 +754 59 model.embedding_dim 1.0 +754 59 model.relation_dim 2.0 +754 59 loss.margin 9.50008049981491 +754 59 loss.adversarial_temperature 0.9998770904564539 +754 59 negative_sampler.num_negs_per_pos 29.0 +754 59 training.batch_size 2.0 +754 60 model.embedding_dim 1.0 +754 60 model.relation_dim 2.0 +754 60 loss.margin 14.766031574727329 +754 60 loss.adversarial_temperature 0.6441122263159227 +754 60 negative_sampler.num_negs_per_pos 36.0 +754 60 training.batch_size 2.0 +754 61 model.embedding_dim 2.0 +754 61 model.relation_dim 1.0 +754 61 loss.margin 18.21756439844457 +754 61 loss.adversarial_temperature 0.9713634993776231 +754 61 negative_sampler.num_negs_per_pos 87.0 +754 61 training.batch_size 2.0 +754 62 model.embedding_dim 1.0 +754 62 model.relation_dim 0.0 +754 62 loss.margin 15.920546454983533 +754 62 loss.adversarial_temperature 0.4777754858681491 +754 62 negative_sampler.num_negs_per_pos 35.0 +754 62 training.batch_size 2.0 +754 63 model.embedding_dim 0.0 +754 63 model.relation_dim 0.0 +754 63 loss.margin 15.433982489907564 +754 63 loss.adversarial_temperature 0.43743637891884213 +754 63 negative_sampler.num_negs_per_pos 58.0 +754 63 training.batch_size 0.0 +754 64 model.embedding_dim 1.0 +754 64 model.relation_dim 1.0 +754 64 loss.margin 27.083206346781626 +754 64 loss.adversarial_temperature 0.991870755224424 +754 64 negative_sampler.num_negs_per_pos 77.0 +754 64 training.batch_size 1.0 +754 65 model.embedding_dim 0.0 +754 65 model.relation_dim 2.0 +754 65 loss.margin 23.549420968177255 +754 65 loss.adversarial_temperature 0.9678839337260671 +754 65 negative_sampler.num_negs_per_pos 89.0 +754 65 training.batch_size 2.0 +754 66 model.embedding_dim 0.0 +754 66 model.relation_dim 0.0 +754 66 loss.margin 8.879088676104686 +754 66 loss.adversarial_temperature 0.7273400533577286 +754 66 negative_sampler.num_negs_per_pos 40.0 +754 66 training.batch_size 0.0 +754 67 model.embedding_dim 0.0 +754 67 model.relation_dim 0.0 +754 67 loss.margin 13.005167009134134 +754 67 loss.adversarial_temperature 0.14862045077369446 +754 67 negative_sampler.num_negs_per_pos 53.0 +754 67 training.batch_size 2.0 +754 68 model.embedding_dim 0.0 +754 68 model.relation_dim 2.0 +754 68 loss.margin 12.064322399113365 +754 68 loss.adversarial_temperature 0.35855535143277695 +754 68 negative_sampler.num_negs_per_pos 60.0 +754 68 training.batch_size 2.0 +754 69 model.embedding_dim 1.0 +754 69 model.relation_dim 0.0 +754 69 loss.margin 22.206493125612223 +754 69 loss.adversarial_temperature 0.6726767352703158 +754 69 negative_sampler.num_negs_per_pos 64.0 +754 69 training.batch_size 2.0 +754 70 model.embedding_dim 0.0 +754 70 model.relation_dim 1.0 +754 70 loss.margin 2.587901114823971 +754 70 loss.adversarial_temperature 0.33177818753912025 +754 70 negative_sampler.num_negs_per_pos 98.0 +754 70 training.batch_size 2.0 +754 71 model.embedding_dim 1.0 +754 71 model.relation_dim 0.0 +754 71 loss.margin 17.691070508882817 +754 71 loss.adversarial_temperature 0.5539076520685072 +754 71 negative_sampler.num_negs_per_pos 1.0 +754 71 training.batch_size 0.0 +754 72 model.embedding_dim 1.0 +754 72 model.relation_dim 0.0 +754 72 loss.margin 22.604931922364347 +754 72 loss.adversarial_temperature 0.3614178861918189 +754 72 negative_sampler.num_negs_per_pos 81.0 +754 72 training.batch_size 0.0 +754 73 model.embedding_dim 0.0 +754 73 model.relation_dim 2.0 +754 73 loss.margin 15.922323162194484 +754 73 loss.adversarial_temperature 0.7481152540345681 +754 73 negative_sampler.num_negs_per_pos 97.0 +754 73 training.batch_size 2.0 +754 74 model.embedding_dim 1.0 +754 74 model.relation_dim 1.0 +754 74 loss.margin 7.913062272007842 +754 74 loss.adversarial_temperature 0.8422760894980135 +754 74 negative_sampler.num_negs_per_pos 55.0 +754 74 training.batch_size 2.0 +754 75 model.embedding_dim 2.0 +754 75 model.relation_dim 0.0 +754 75 loss.margin 5.319487693618531 +754 75 loss.adversarial_temperature 0.8481767002152196 +754 75 negative_sampler.num_negs_per_pos 95.0 +754 75 training.batch_size 2.0 +754 76 model.embedding_dim 2.0 +754 76 model.relation_dim 0.0 +754 76 loss.margin 18.15103700985302 +754 76 loss.adversarial_temperature 0.1867160051717282 +754 76 negative_sampler.num_negs_per_pos 72.0 +754 76 training.batch_size 2.0 +754 77 model.embedding_dim 1.0 +754 77 model.relation_dim 1.0 +754 77 loss.margin 10.36433073000647 +754 77 loss.adversarial_temperature 0.6075491934521074 +754 77 negative_sampler.num_negs_per_pos 7.0 +754 77 training.batch_size 0.0 +754 78 model.embedding_dim 2.0 +754 78 model.relation_dim 0.0 +754 78 loss.margin 29.011433082366295 +754 78 loss.adversarial_temperature 0.20108981431580178 +754 78 negative_sampler.num_negs_per_pos 81.0 +754 78 training.batch_size 1.0 +754 79 model.embedding_dim 2.0 +754 79 model.relation_dim 0.0 +754 79 loss.margin 6.846518943466144 +754 79 loss.adversarial_temperature 0.3919519621150217 +754 79 negative_sampler.num_negs_per_pos 97.0 +754 79 training.batch_size 1.0 +754 80 model.embedding_dim 0.0 +754 80 model.relation_dim 0.0 +754 80 loss.margin 27.40404252838995 +754 80 loss.adversarial_temperature 0.2406780039656365 +754 80 negative_sampler.num_negs_per_pos 11.0 +754 80 training.batch_size 2.0 +754 81 model.embedding_dim 2.0 +754 81 model.relation_dim 1.0 +754 81 loss.margin 1.719993341495051 +754 81 loss.adversarial_temperature 0.11834614242319523 +754 81 negative_sampler.num_negs_per_pos 92.0 +754 81 training.batch_size 0.0 +754 82 model.embedding_dim 0.0 +754 82 model.relation_dim 0.0 +754 82 loss.margin 15.78619241687241 +754 82 loss.adversarial_temperature 0.37627328866040444 +754 82 negative_sampler.num_negs_per_pos 19.0 +754 82 training.batch_size 2.0 +754 83 model.embedding_dim 2.0 +754 83 model.relation_dim 1.0 +754 83 loss.margin 11.236966745489282 +754 83 loss.adversarial_temperature 0.8377064220683931 +754 83 negative_sampler.num_negs_per_pos 49.0 +754 83 training.batch_size 1.0 +754 84 model.embedding_dim 1.0 +754 84 model.relation_dim 1.0 +754 84 loss.margin 19.584976936367806 +754 84 loss.adversarial_temperature 0.35574580849035886 +754 84 negative_sampler.num_negs_per_pos 77.0 +754 84 training.batch_size 0.0 +754 85 model.embedding_dim 2.0 +754 85 model.relation_dim 0.0 +754 85 loss.margin 15.137668653925862 +754 85 loss.adversarial_temperature 0.1512806513510585 +754 85 negative_sampler.num_negs_per_pos 30.0 +754 85 training.batch_size 2.0 +754 86 model.embedding_dim 1.0 +754 86 model.relation_dim 0.0 +754 86 loss.margin 9.989056964805105 +754 86 loss.adversarial_temperature 0.40361690884029255 +754 86 negative_sampler.num_negs_per_pos 34.0 +754 86 training.batch_size 1.0 +754 87 model.embedding_dim 1.0 +754 87 model.relation_dim 2.0 +754 87 loss.margin 29.331648072365383 +754 87 loss.adversarial_temperature 0.9721952268790504 +754 87 negative_sampler.num_negs_per_pos 26.0 +754 87 training.batch_size 1.0 +754 88 model.embedding_dim 0.0 +754 88 model.relation_dim 1.0 +754 88 loss.margin 18.28138832096377 +754 88 loss.adversarial_temperature 0.12399241428587782 +754 88 negative_sampler.num_negs_per_pos 71.0 +754 88 training.batch_size 2.0 +754 89 model.embedding_dim 1.0 +754 89 model.relation_dim 1.0 +754 89 loss.margin 2.417770691549063 +754 89 loss.adversarial_temperature 0.5569225844944585 +754 89 negative_sampler.num_negs_per_pos 66.0 +754 89 training.batch_size 1.0 +754 90 model.embedding_dim 2.0 +754 90 model.relation_dim 1.0 +754 90 loss.margin 13.520827166300393 +754 90 loss.adversarial_temperature 0.6614144890493723 +754 90 negative_sampler.num_negs_per_pos 28.0 +754 90 training.batch_size 0.0 +754 91 model.embedding_dim 0.0 +754 91 model.relation_dim 2.0 +754 91 loss.margin 19.910274549928307 +754 91 loss.adversarial_temperature 0.14935134497592412 +754 91 negative_sampler.num_negs_per_pos 44.0 +754 91 training.batch_size 2.0 +754 92 model.embedding_dim 0.0 +754 92 model.relation_dim 0.0 +754 92 loss.margin 19.191951988131773 +754 92 loss.adversarial_temperature 0.5824467076066401 +754 92 negative_sampler.num_negs_per_pos 87.0 +754 92 training.batch_size 0.0 +754 93 model.embedding_dim 0.0 +754 93 model.relation_dim 2.0 +754 93 loss.margin 15.789331444540437 +754 93 loss.adversarial_temperature 0.9955154331291348 +754 93 negative_sampler.num_negs_per_pos 7.0 +754 93 training.batch_size 2.0 +754 94 model.embedding_dim 2.0 +754 94 model.relation_dim 2.0 +754 94 loss.margin 1.2913091051013628 +754 94 loss.adversarial_temperature 0.4599555799583547 +754 94 negative_sampler.num_negs_per_pos 48.0 +754 94 training.batch_size 1.0 +754 95 model.embedding_dim 0.0 +754 95 model.relation_dim 1.0 +754 95 loss.margin 14.192358861498766 +754 95 loss.adversarial_temperature 0.9830564090432693 +754 95 negative_sampler.num_negs_per_pos 39.0 +754 95 training.batch_size 0.0 +754 96 model.embedding_dim 2.0 +754 96 model.relation_dim 2.0 +754 96 loss.margin 11.541862084497279 +754 96 loss.adversarial_temperature 0.4298336389704873 +754 96 negative_sampler.num_negs_per_pos 65.0 +754 96 training.batch_size 0.0 +754 97 model.embedding_dim 2.0 +754 97 model.relation_dim 2.0 +754 97 loss.margin 19.91663336916444 +754 97 loss.adversarial_temperature 0.8056655118203938 +754 97 negative_sampler.num_negs_per_pos 54.0 +754 97 training.batch_size 2.0 +754 98 model.embedding_dim 2.0 +754 98 model.relation_dim 0.0 +754 98 loss.margin 6.908444835486245 +754 98 loss.adversarial_temperature 0.197032816276634 +754 98 negative_sampler.num_negs_per_pos 94.0 +754 98 training.batch_size 1.0 +754 99 model.embedding_dim 1.0 +754 99 model.relation_dim 0.0 +754 99 loss.margin 6.705521563692951 +754 99 loss.adversarial_temperature 0.7391590916618257 +754 99 negative_sampler.num_negs_per_pos 19.0 +754 99 training.batch_size 2.0 +754 100 model.embedding_dim 2.0 +754 100 model.relation_dim 2.0 +754 100 loss.margin 3.837934541577524 +754 100 loss.adversarial_temperature 0.5738862928859437 +754 100 negative_sampler.num_negs_per_pos 92.0 +754 100 training.batch_size 1.0 +754 1 dataset """kinships""" +754 1 model """transd""" +754 1 loss """nssa""" +754 1 regularizer """no""" +754 1 optimizer """adadelta""" +754 1 training_loop """owa""" +754 1 negative_sampler """basic""" +754 1 evaluator """rankbased""" +754 2 dataset """kinships""" +754 2 model """transd""" +754 2 loss """nssa""" +754 2 regularizer """no""" +754 2 optimizer """adadelta""" +754 2 training_loop """owa""" +754 2 negative_sampler """basic""" +754 2 evaluator """rankbased""" +754 3 dataset """kinships""" +754 3 model """transd""" +754 3 loss """nssa""" +754 3 regularizer """no""" +754 3 optimizer """adadelta""" +754 3 training_loop """owa""" +754 3 negative_sampler """basic""" +754 3 evaluator """rankbased""" +754 4 dataset """kinships""" +754 4 model """transd""" +754 4 loss """nssa""" +754 4 regularizer """no""" +754 4 optimizer """adadelta""" +754 4 training_loop """owa""" +754 4 negative_sampler """basic""" +754 4 evaluator """rankbased""" +754 5 dataset """kinships""" +754 5 model """transd""" +754 5 loss """nssa""" +754 5 regularizer """no""" +754 5 optimizer """adadelta""" +754 5 training_loop """owa""" +754 5 negative_sampler """basic""" +754 5 evaluator """rankbased""" +754 6 dataset """kinships""" +754 6 model """transd""" +754 6 loss """nssa""" +754 6 regularizer """no""" +754 6 optimizer """adadelta""" +754 6 training_loop """owa""" +754 6 negative_sampler """basic""" +754 6 evaluator """rankbased""" +754 7 dataset """kinships""" +754 7 model """transd""" +754 7 loss """nssa""" +754 7 regularizer """no""" +754 7 optimizer """adadelta""" +754 7 training_loop """owa""" +754 7 negative_sampler """basic""" +754 7 evaluator """rankbased""" +754 8 dataset """kinships""" +754 8 model """transd""" +754 8 loss """nssa""" +754 8 regularizer """no""" +754 8 optimizer """adadelta""" +754 8 training_loop """owa""" +754 8 negative_sampler """basic""" +754 8 evaluator """rankbased""" +754 9 dataset """kinships""" +754 9 model """transd""" +754 9 loss """nssa""" +754 9 regularizer """no""" +754 9 optimizer """adadelta""" +754 9 training_loop """owa""" +754 9 negative_sampler """basic""" +754 9 evaluator """rankbased""" +754 10 dataset """kinships""" +754 10 model """transd""" +754 10 loss """nssa""" +754 10 regularizer """no""" +754 10 optimizer """adadelta""" +754 10 training_loop """owa""" +754 10 negative_sampler """basic""" +754 10 evaluator """rankbased""" +754 11 dataset """kinships""" +754 11 model """transd""" +754 11 loss """nssa""" +754 11 regularizer """no""" +754 11 optimizer """adadelta""" +754 11 training_loop """owa""" +754 11 negative_sampler """basic""" +754 11 evaluator """rankbased""" +754 12 dataset """kinships""" +754 12 model """transd""" +754 12 loss """nssa""" +754 12 regularizer """no""" +754 12 optimizer """adadelta""" +754 12 training_loop """owa""" +754 12 negative_sampler """basic""" +754 12 evaluator """rankbased""" +754 13 dataset """kinships""" +754 13 model """transd""" +754 13 loss """nssa""" +754 13 regularizer """no""" +754 13 optimizer """adadelta""" +754 13 training_loop """owa""" +754 13 negative_sampler """basic""" +754 13 evaluator """rankbased""" +754 14 dataset """kinships""" +754 14 model """transd""" +754 14 loss """nssa""" +754 14 regularizer """no""" +754 14 optimizer """adadelta""" +754 14 training_loop """owa""" +754 14 negative_sampler """basic""" +754 14 evaluator """rankbased""" +754 15 dataset """kinships""" +754 15 model """transd""" +754 15 loss """nssa""" +754 15 regularizer """no""" +754 15 optimizer """adadelta""" +754 15 training_loop """owa""" +754 15 negative_sampler """basic""" +754 15 evaluator """rankbased""" +754 16 dataset """kinships""" +754 16 model """transd""" +754 16 loss """nssa""" +754 16 regularizer """no""" +754 16 optimizer """adadelta""" +754 16 training_loop """owa""" +754 16 negative_sampler """basic""" +754 16 evaluator """rankbased""" +754 17 dataset """kinships""" +754 17 model """transd""" +754 17 loss """nssa""" +754 17 regularizer """no""" +754 17 optimizer """adadelta""" +754 17 training_loop """owa""" +754 17 negative_sampler """basic""" +754 17 evaluator """rankbased""" +754 18 dataset """kinships""" +754 18 model """transd""" +754 18 loss """nssa""" +754 18 regularizer """no""" +754 18 optimizer """adadelta""" +754 18 training_loop """owa""" +754 18 negative_sampler """basic""" +754 18 evaluator """rankbased""" +754 19 dataset """kinships""" +754 19 model """transd""" +754 19 loss """nssa""" +754 19 regularizer """no""" +754 19 optimizer """adadelta""" +754 19 training_loop """owa""" +754 19 negative_sampler """basic""" +754 19 evaluator """rankbased""" +754 20 dataset """kinships""" +754 20 model """transd""" +754 20 loss """nssa""" +754 20 regularizer """no""" +754 20 optimizer """adadelta""" +754 20 training_loop """owa""" +754 20 negative_sampler """basic""" +754 20 evaluator """rankbased""" +754 21 dataset """kinships""" +754 21 model """transd""" +754 21 loss """nssa""" +754 21 regularizer """no""" +754 21 optimizer """adadelta""" +754 21 training_loop """owa""" +754 21 negative_sampler """basic""" +754 21 evaluator """rankbased""" +754 22 dataset """kinships""" +754 22 model """transd""" +754 22 loss """nssa""" +754 22 regularizer """no""" +754 22 optimizer """adadelta""" +754 22 training_loop """owa""" +754 22 negative_sampler """basic""" +754 22 evaluator """rankbased""" +754 23 dataset """kinships""" +754 23 model """transd""" +754 23 loss """nssa""" +754 23 regularizer """no""" +754 23 optimizer """adadelta""" +754 23 training_loop """owa""" +754 23 negative_sampler """basic""" +754 23 evaluator """rankbased""" +754 24 dataset """kinships""" +754 24 model """transd""" +754 24 loss """nssa""" +754 24 regularizer """no""" +754 24 optimizer """adadelta""" +754 24 training_loop """owa""" +754 24 negative_sampler """basic""" +754 24 evaluator """rankbased""" +754 25 dataset """kinships""" +754 25 model """transd""" +754 25 loss """nssa""" +754 25 regularizer """no""" +754 25 optimizer """adadelta""" +754 25 training_loop """owa""" +754 25 negative_sampler """basic""" +754 25 evaluator """rankbased""" +754 26 dataset """kinships""" +754 26 model """transd""" +754 26 loss """nssa""" +754 26 regularizer """no""" +754 26 optimizer """adadelta""" +754 26 training_loop """owa""" +754 26 negative_sampler """basic""" +754 26 evaluator """rankbased""" +754 27 dataset """kinships""" +754 27 model """transd""" +754 27 loss """nssa""" +754 27 regularizer """no""" +754 27 optimizer """adadelta""" +754 27 training_loop """owa""" +754 27 negative_sampler """basic""" +754 27 evaluator """rankbased""" +754 28 dataset """kinships""" +754 28 model """transd""" +754 28 loss """nssa""" +754 28 regularizer """no""" +754 28 optimizer """adadelta""" +754 28 training_loop """owa""" +754 28 negative_sampler """basic""" +754 28 evaluator """rankbased""" +754 29 dataset """kinships""" +754 29 model """transd""" +754 29 loss """nssa""" +754 29 regularizer """no""" +754 29 optimizer """adadelta""" +754 29 training_loop """owa""" +754 29 negative_sampler """basic""" +754 29 evaluator """rankbased""" +754 30 dataset """kinships""" +754 30 model """transd""" +754 30 loss """nssa""" +754 30 regularizer """no""" +754 30 optimizer """adadelta""" +754 30 training_loop """owa""" +754 30 negative_sampler """basic""" +754 30 evaluator """rankbased""" +754 31 dataset """kinships""" +754 31 model """transd""" +754 31 loss """nssa""" +754 31 regularizer """no""" +754 31 optimizer """adadelta""" +754 31 training_loop """owa""" +754 31 negative_sampler """basic""" +754 31 evaluator """rankbased""" +754 32 dataset """kinships""" +754 32 model """transd""" +754 32 loss """nssa""" +754 32 regularizer """no""" +754 32 optimizer """adadelta""" +754 32 training_loop """owa""" +754 32 negative_sampler """basic""" +754 32 evaluator """rankbased""" +754 33 dataset """kinships""" +754 33 model """transd""" +754 33 loss """nssa""" +754 33 regularizer """no""" +754 33 optimizer """adadelta""" +754 33 training_loop """owa""" +754 33 negative_sampler """basic""" +754 33 evaluator """rankbased""" +754 34 dataset """kinships""" +754 34 model """transd""" +754 34 loss """nssa""" +754 34 regularizer """no""" +754 34 optimizer """adadelta""" +754 34 training_loop """owa""" +754 34 negative_sampler """basic""" +754 34 evaluator """rankbased""" +754 35 dataset """kinships""" +754 35 model """transd""" +754 35 loss """nssa""" +754 35 regularizer """no""" +754 35 optimizer """adadelta""" +754 35 training_loop """owa""" +754 35 negative_sampler """basic""" +754 35 evaluator """rankbased""" +754 36 dataset """kinships""" +754 36 model """transd""" +754 36 loss """nssa""" +754 36 regularizer """no""" +754 36 optimizer """adadelta""" +754 36 training_loop """owa""" +754 36 negative_sampler """basic""" +754 36 evaluator """rankbased""" +754 37 dataset """kinships""" +754 37 model """transd""" +754 37 loss """nssa""" +754 37 regularizer """no""" +754 37 optimizer """adadelta""" +754 37 training_loop """owa""" +754 37 negative_sampler """basic""" +754 37 evaluator """rankbased""" +754 38 dataset """kinships""" +754 38 model """transd""" +754 38 loss """nssa""" +754 38 regularizer """no""" +754 38 optimizer """adadelta""" +754 38 training_loop """owa""" +754 38 negative_sampler """basic""" +754 38 evaluator """rankbased""" +754 39 dataset """kinships""" +754 39 model """transd""" +754 39 loss """nssa""" +754 39 regularizer """no""" +754 39 optimizer """adadelta""" +754 39 training_loop """owa""" +754 39 negative_sampler """basic""" +754 39 evaluator """rankbased""" +754 40 dataset """kinships""" +754 40 model """transd""" +754 40 loss """nssa""" +754 40 regularizer """no""" +754 40 optimizer """adadelta""" +754 40 training_loop """owa""" +754 40 negative_sampler """basic""" +754 40 evaluator """rankbased""" +754 41 dataset """kinships""" +754 41 model """transd""" +754 41 loss """nssa""" +754 41 regularizer """no""" +754 41 optimizer """adadelta""" +754 41 training_loop """owa""" +754 41 negative_sampler """basic""" +754 41 evaluator """rankbased""" +754 42 dataset """kinships""" +754 42 model """transd""" +754 42 loss """nssa""" +754 42 regularizer """no""" +754 42 optimizer """adadelta""" +754 42 training_loop """owa""" +754 42 negative_sampler """basic""" +754 42 evaluator """rankbased""" +754 43 dataset """kinships""" +754 43 model """transd""" +754 43 loss """nssa""" +754 43 regularizer """no""" +754 43 optimizer """adadelta""" +754 43 training_loop """owa""" +754 43 negative_sampler """basic""" +754 43 evaluator """rankbased""" +754 44 dataset """kinships""" +754 44 model """transd""" +754 44 loss """nssa""" +754 44 regularizer """no""" +754 44 optimizer """adadelta""" +754 44 training_loop """owa""" +754 44 negative_sampler """basic""" +754 44 evaluator """rankbased""" +754 45 dataset """kinships""" +754 45 model """transd""" +754 45 loss """nssa""" +754 45 regularizer """no""" +754 45 optimizer """adadelta""" +754 45 training_loop """owa""" +754 45 negative_sampler """basic""" +754 45 evaluator """rankbased""" +754 46 dataset """kinships""" +754 46 model """transd""" +754 46 loss """nssa""" +754 46 regularizer """no""" +754 46 optimizer """adadelta""" +754 46 training_loop """owa""" +754 46 negative_sampler """basic""" +754 46 evaluator """rankbased""" +754 47 dataset """kinships""" +754 47 model """transd""" +754 47 loss """nssa""" +754 47 regularizer """no""" +754 47 optimizer """adadelta""" +754 47 training_loop """owa""" +754 47 negative_sampler """basic""" +754 47 evaluator """rankbased""" +754 48 dataset """kinships""" +754 48 model """transd""" +754 48 loss """nssa""" +754 48 regularizer """no""" +754 48 optimizer """adadelta""" +754 48 training_loop """owa""" +754 48 negative_sampler """basic""" +754 48 evaluator """rankbased""" +754 49 dataset """kinships""" +754 49 model """transd""" +754 49 loss """nssa""" +754 49 regularizer """no""" +754 49 optimizer """adadelta""" +754 49 training_loop """owa""" +754 49 negative_sampler """basic""" +754 49 evaluator """rankbased""" +754 50 dataset """kinships""" +754 50 model """transd""" +754 50 loss """nssa""" +754 50 regularizer """no""" +754 50 optimizer """adadelta""" +754 50 training_loop """owa""" +754 50 negative_sampler """basic""" +754 50 evaluator """rankbased""" +754 51 dataset """kinships""" +754 51 model """transd""" +754 51 loss """nssa""" +754 51 regularizer """no""" +754 51 optimizer """adadelta""" +754 51 training_loop """owa""" +754 51 negative_sampler """basic""" +754 51 evaluator """rankbased""" +754 52 dataset """kinships""" +754 52 model """transd""" +754 52 loss """nssa""" +754 52 regularizer """no""" +754 52 optimizer """adadelta""" +754 52 training_loop """owa""" +754 52 negative_sampler """basic""" +754 52 evaluator """rankbased""" +754 53 dataset """kinships""" +754 53 model """transd""" +754 53 loss """nssa""" +754 53 regularizer """no""" +754 53 optimizer """adadelta""" +754 53 training_loop """owa""" +754 53 negative_sampler """basic""" +754 53 evaluator """rankbased""" +754 54 dataset """kinships""" +754 54 model """transd""" +754 54 loss """nssa""" +754 54 regularizer """no""" +754 54 optimizer """adadelta""" +754 54 training_loop """owa""" +754 54 negative_sampler """basic""" +754 54 evaluator """rankbased""" +754 55 dataset """kinships""" +754 55 model """transd""" +754 55 loss """nssa""" +754 55 regularizer """no""" +754 55 optimizer """adadelta""" +754 55 training_loop """owa""" +754 55 negative_sampler """basic""" +754 55 evaluator """rankbased""" +754 56 dataset """kinships""" +754 56 model """transd""" +754 56 loss """nssa""" +754 56 regularizer """no""" +754 56 optimizer """adadelta""" +754 56 training_loop """owa""" +754 56 negative_sampler """basic""" +754 56 evaluator """rankbased""" +754 57 dataset """kinships""" +754 57 model """transd""" +754 57 loss """nssa""" +754 57 regularizer """no""" +754 57 optimizer """adadelta""" +754 57 training_loop """owa""" +754 57 negative_sampler """basic""" +754 57 evaluator """rankbased""" +754 58 dataset """kinships""" +754 58 model """transd""" +754 58 loss """nssa""" +754 58 regularizer """no""" +754 58 optimizer """adadelta""" +754 58 training_loop """owa""" +754 58 negative_sampler """basic""" +754 58 evaluator """rankbased""" +754 59 dataset """kinships""" +754 59 model """transd""" +754 59 loss """nssa""" +754 59 regularizer """no""" +754 59 optimizer """adadelta""" +754 59 training_loop """owa""" +754 59 negative_sampler """basic""" +754 59 evaluator """rankbased""" +754 60 dataset """kinships""" +754 60 model """transd""" +754 60 loss """nssa""" +754 60 regularizer """no""" +754 60 optimizer """adadelta""" +754 60 training_loop """owa""" +754 60 negative_sampler """basic""" +754 60 evaluator """rankbased""" +754 61 dataset """kinships""" +754 61 model """transd""" +754 61 loss """nssa""" +754 61 regularizer """no""" +754 61 optimizer """adadelta""" +754 61 training_loop """owa""" +754 61 negative_sampler """basic""" +754 61 evaluator """rankbased""" +754 62 dataset """kinships""" +754 62 model """transd""" +754 62 loss """nssa""" +754 62 regularizer """no""" +754 62 optimizer """adadelta""" +754 62 training_loop """owa""" +754 62 negative_sampler """basic""" +754 62 evaluator """rankbased""" +754 63 dataset """kinships""" +754 63 model """transd""" +754 63 loss """nssa""" +754 63 regularizer """no""" +754 63 optimizer """adadelta""" +754 63 training_loop """owa""" +754 63 negative_sampler """basic""" +754 63 evaluator """rankbased""" +754 64 dataset """kinships""" +754 64 model """transd""" +754 64 loss """nssa""" +754 64 regularizer """no""" +754 64 optimizer """adadelta""" +754 64 training_loop """owa""" +754 64 negative_sampler """basic""" +754 64 evaluator """rankbased""" +754 65 dataset """kinships""" +754 65 model """transd""" +754 65 loss """nssa""" +754 65 regularizer """no""" +754 65 optimizer """adadelta""" +754 65 training_loop """owa""" +754 65 negative_sampler """basic""" +754 65 evaluator """rankbased""" +754 66 dataset """kinships""" +754 66 model """transd""" +754 66 loss """nssa""" +754 66 regularizer """no""" +754 66 optimizer """adadelta""" +754 66 training_loop """owa""" +754 66 negative_sampler """basic""" +754 66 evaluator """rankbased""" +754 67 dataset """kinships""" +754 67 model """transd""" +754 67 loss """nssa""" +754 67 regularizer """no""" +754 67 optimizer """adadelta""" +754 67 training_loop """owa""" +754 67 negative_sampler """basic""" +754 67 evaluator """rankbased""" +754 68 dataset """kinships""" +754 68 model """transd""" +754 68 loss """nssa""" +754 68 regularizer """no""" +754 68 optimizer """adadelta""" +754 68 training_loop """owa""" +754 68 negative_sampler """basic""" +754 68 evaluator """rankbased""" +754 69 dataset """kinships""" +754 69 model """transd""" +754 69 loss """nssa""" +754 69 regularizer """no""" +754 69 optimizer """adadelta""" +754 69 training_loop """owa""" +754 69 negative_sampler """basic""" +754 69 evaluator """rankbased""" +754 70 dataset """kinships""" +754 70 model """transd""" +754 70 loss """nssa""" +754 70 regularizer """no""" +754 70 optimizer """adadelta""" +754 70 training_loop """owa""" +754 70 negative_sampler """basic""" +754 70 evaluator """rankbased""" +754 71 dataset """kinships""" +754 71 model """transd""" +754 71 loss """nssa""" +754 71 regularizer """no""" +754 71 optimizer """adadelta""" +754 71 training_loop """owa""" +754 71 negative_sampler """basic""" +754 71 evaluator """rankbased""" +754 72 dataset """kinships""" +754 72 model """transd""" +754 72 loss """nssa""" +754 72 regularizer """no""" +754 72 optimizer """adadelta""" +754 72 training_loop """owa""" +754 72 negative_sampler """basic""" +754 72 evaluator """rankbased""" +754 73 dataset """kinships""" +754 73 model """transd""" +754 73 loss """nssa""" +754 73 regularizer """no""" +754 73 optimizer """adadelta""" +754 73 training_loop """owa""" +754 73 negative_sampler """basic""" +754 73 evaluator """rankbased""" +754 74 dataset """kinships""" +754 74 model """transd""" +754 74 loss """nssa""" +754 74 regularizer """no""" +754 74 optimizer """adadelta""" +754 74 training_loop """owa""" +754 74 negative_sampler """basic""" +754 74 evaluator """rankbased""" +754 75 dataset """kinships""" +754 75 model """transd""" +754 75 loss """nssa""" +754 75 regularizer """no""" +754 75 optimizer """adadelta""" +754 75 training_loop """owa""" +754 75 negative_sampler """basic""" +754 75 evaluator """rankbased""" +754 76 dataset """kinships""" +754 76 model """transd""" +754 76 loss """nssa""" +754 76 regularizer """no""" +754 76 optimizer """adadelta""" +754 76 training_loop """owa""" +754 76 negative_sampler """basic""" +754 76 evaluator """rankbased""" +754 77 dataset """kinships""" +754 77 model """transd""" +754 77 loss """nssa""" +754 77 regularizer """no""" +754 77 optimizer """adadelta""" +754 77 training_loop """owa""" +754 77 negative_sampler """basic""" +754 77 evaluator """rankbased""" +754 78 dataset """kinships""" +754 78 model """transd""" +754 78 loss """nssa""" +754 78 regularizer """no""" +754 78 optimizer """adadelta""" +754 78 training_loop """owa""" +754 78 negative_sampler """basic""" +754 78 evaluator """rankbased""" +754 79 dataset """kinships""" +754 79 model """transd""" +754 79 loss """nssa""" +754 79 regularizer """no""" +754 79 optimizer """adadelta""" +754 79 training_loop """owa""" +754 79 negative_sampler """basic""" +754 79 evaluator """rankbased""" +754 80 dataset """kinships""" +754 80 model """transd""" +754 80 loss """nssa""" +754 80 regularizer """no""" +754 80 optimizer """adadelta""" +754 80 training_loop """owa""" +754 80 negative_sampler """basic""" +754 80 evaluator """rankbased""" +754 81 dataset """kinships""" +754 81 model """transd""" +754 81 loss """nssa""" +754 81 regularizer """no""" +754 81 optimizer """adadelta""" +754 81 training_loop """owa""" +754 81 negative_sampler """basic""" +754 81 evaluator """rankbased""" +754 82 dataset """kinships""" +754 82 model """transd""" +754 82 loss """nssa""" +754 82 regularizer """no""" +754 82 optimizer """adadelta""" +754 82 training_loop """owa""" +754 82 negative_sampler """basic""" +754 82 evaluator """rankbased""" +754 83 dataset """kinships""" +754 83 model """transd""" +754 83 loss """nssa""" +754 83 regularizer """no""" +754 83 optimizer """adadelta""" +754 83 training_loop """owa""" +754 83 negative_sampler """basic""" +754 83 evaluator """rankbased""" +754 84 dataset """kinships""" +754 84 model """transd""" +754 84 loss """nssa""" +754 84 regularizer """no""" +754 84 optimizer """adadelta""" +754 84 training_loop """owa""" +754 84 negative_sampler """basic""" +754 84 evaluator """rankbased""" +754 85 dataset """kinships""" +754 85 model """transd""" +754 85 loss """nssa""" +754 85 regularizer """no""" +754 85 optimizer """adadelta""" +754 85 training_loop """owa""" +754 85 negative_sampler """basic""" +754 85 evaluator """rankbased""" +754 86 dataset """kinships""" +754 86 model """transd""" +754 86 loss """nssa""" +754 86 regularizer """no""" +754 86 optimizer """adadelta""" +754 86 training_loop """owa""" +754 86 negative_sampler """basic""" +754 86 evaluator """rankbased""" +754 87 dataset """kinships""" +754 87 model """transd""" +754 87 loss """nssa""" +754 87 regularizer """no""" +754 87 optimizer """adadelta""" +754 87 training_loop """owa""" +754 87 negative_sampler """basic""" +754 87 evaluator """rankbased""" +754 88 dataset """kinships""" +754 88 model """transd""" +754 88 loss """nssa""" +754 88 regularizer """no""" +754 88 optimizer """adadelta""" +754 88 training_loop """owa""" +754 88 negative_sampler """basic""" +754 88 evaluator """rankbased""" +754 89 dataset """kinships""" +754 89 model """transd""" +754 89 loss """nssa""" +754 89 regularizer """no""" +754 89 optimizer """adadelta""" +754 89 training_loop """owa""" +754 89 negative_sampler """basic""" +754 89 evaluator """rankbased""" +754 90 dataset """kinships""" +754 90 model """transd""" +754 90 loss """nssa""" +754 90 regularizer """no""" +754 90 optimizer """adadelta""" +754 90 training_loop """owa""" +754 90 negative_sampler """basic""" +754 90 evaluator """rankbased""" +754 91 dataset """kinships""" +754 91 model """transd""" +754 91 loss """nssa""" +754 91 regularizer """no""" +754 91 optimizer """adadelta""" +754 91 training_loop """owa""" +754 91 negative_sampler """basic""" +754 91 evaluator """rankbased""" +754 92 dataset """kinships""" +754 92 model """transd""" +754 92 loss """nssa""" +754 92 regularizer """no""" +754 92 optimizer """adadelta""" +754 92 training_loop """owa""" +754 92 negative_sampler """basic""" +754 92 evaluator """rankbased""" +754 93 dataset """kinships""" +754 93 model """transd""" +754 93 loss """nssa""" +754 93 regularizer """no""" +754 93 optimizer """adadelta""" +754 93 training_loop """owa""" +754 93 negative_sampler """basic""" +754 93 evaluator """rankbased""" +754 94 dataset """kinships""" +754 94 model """transd""" +754 94 loss """nssa""" +754 94 regularizer """no""" +754 94 optimizer """adadelta""" +754 94 training_loop """owa""" +754 94 negative_sampler """basic""" +754 94 evaluator """rankbased""" +754 95 dataset """kinships""" +754 95 model """transd""" +754 95 loss """nssa""" +754 95 regularizer """no""" +754 95 optimizer """adadelta""" +754 95 training_loop """owa""" +754 95 negative_sampler """basic""" +754 95 evaluator """rankbased""" +754 96 dataset """kinships""" +754 96 model """transd""" +754 96 loss """nssa""" +754 96 regularizer """no""" +754 96 optimizer """adadelta""" +754 96 training_loop """owa""" +754 96 negative_sampler """basic""" +754 96 evaluator """rankbased""" +754 97 dataset """kinships""" +754 97 model """transd""" +754 97 loss """nssa""" +754 97 regularizer """no""" +754 97 optimizer """adadelta""" +754 97 training_loop """owa""" +754 97 negative_sampler """basic""" +754 97 evaluator """rankbased""" +754 98 dataset """kinships""" +754 98 model """transd""" +754 98 loss """nssa""" +754 98 regularizer """no""" +754 98 optimizer """adadelta""" +754 98 training_loop """owa""" +754 98 negative_sampler """basic""" +754 98 evaluator """rankbased""" +754 99 dataset """kinships""" +754 99 model """transd""" +754 99 loss """nssa""" +754 99 regularizer """no""" +754 99 optimizer """adadelta""" +754 99 training_loop """owa""" +754 99 negative_sampler """basic""" +754 99 evaluator """rankbased""" +754 100 dataset """kinships""" +754 100 model """transd""" +754 100 loss """nssa""" +754 100 regularizer """no""" +754 100 optimizer """adadelta""" +754 100 training_loop """owa""" +754 100 negative_sampler """basic""" +754 100 evaluator """rankbased""" +755 1 model.embedding_dim 2.0 +755 1 model.relation_dim 0.0 +755 1 optimizer.lr 0.001493988346158344 +755 1 training.batch_size 2.0 +755 1 training.label_smoothing 0.0013250713444244184 +755 2 model.embedding_dim 2.0 +755 2 model.relation_dim 1.0 +755 2 optimizer.lr 0.012493270008233172 +755 2 training.batch_size 0.0 +755 2 training.label_smoothing 0.42037219508679363 +755 3 model.embedding_dim 2.0 +755 3 model.relation_dim 1.0 +755 3 optimizer.lr 0.0018189373853572989 +755 3 training.batch_size 1.0 +755 3 training.label_smoothing 0.004464475013334754 +755 4 model.embedding_dim 2.0 +755 4 model.relation_dim 2.0 +755 4 optimizer.lr 0.036598893676490205 +755 4 training.batch_size 2.0 +755 4 training.label_smoothing 0.01911254787880349 +755 5 model.embedding_dim 2.0 +755 5 model.relation_dim 0.0 +755 5 optimizer.lr 0.002073461039497446 +755 5 training.batch_size 0.0 +755 5 training.label_smoothing 0.009349129861316651 +755 6 model.embedding_dim 1.0 +755 6 model.relation_dim 1.0 +755 6 optimizer.lr 0.09562655169603286 +755 6 training.batch_size 1.0 +755 6 training.label_smoothing 0.3428729456052039 +755 7 model.embedding_dim 2.0 +755 7 model.relation_dim 1.0 +755 7 optimizer.lr 0.010527024063235543 +755 7 training.batch_size 2.0 +755 7 training.label_smoothing 0.059783983846965955 +755 8 model.embedding_dim 2.0 +755 8 model.relation_dim 2.0 +755 8 optimizer.lr 0.005048019216366632 +755 8 training.batch_size 1.0 +755 8 training.label_smoothing 0.014654683284399596 +755 9 model.embedding_dim 0.0 +755 9 model.relation_dim 0.0 +755 9 optimizer.lr 0.028767077492083213 +755 9 training.batch_size 2.0 +755 9 training.label_smoothing 0.0055361516993225985 +755 10 model.embedding_dim 2.0 +755 10 model.relation_dim 2.0 +755 10 optimizer.lr 0.002385653865113628 +755 10 training.batch_size 1.0 +755 10 training.label_smoothing 0.1567124112721617 +755 11 model.embedding_dim 2.0 +755 11 model.relation_dim 1.0 +755 11 optimizer.lr 0.07068384583138508 +755 11 training.batch_size 1.0 +755 11 training.label_smoothing 0.0022739711429685377 +755 12 model.embedding_dim 2.0 +755 12 model.relation_dim 2.0 +755 12 optimizer.lr 0.022344888856334934 +755 12 training.batch_size 0.0 +755 12 training.label_smoothing 0.012637133093460857 +755 13 model.embedding_dim 2.0 +755 13 model.relation_dim 1.0 +755 13 optimizer.lr 0.0023527340959794522 +755 13 training.batch_size 0.0 +755 13 training.label_smoothing 0.11158913630550897 +755 14 model.embedding_dim 1.0 +755 14 model.relation_dim 2.0 +755 14 optimizer.lr 0.06376553031196494 +755 14 training.batch_size 0.0 +755 14 training.label_smoothing 0.5957709059203923 +755 15 model.embedding_dim 0.0 +755 15 model.relation_dim 1.0 +755 15 optimizer.lr 0.010464781538571563 +755 15 training.batch_size 1.0 +755 15 training.label_smoothing 0.016596631553345485 +755 16 model.embedding_dim 1.0 +755 16 model.relation_dim 2.0 +755 16 optimizer.lr 0.005126629023556148 +755 16 training.batch_size 1.0 +755 16 training.label_smoothing 0.4344331429101513 +755 17 model.embedding_dim 1.0 +755 17 model.relation_dim 0.0 +755 17 optimizer.lr 0.02871858402874429 +755 17 training.batch_size 0.0 +755 17 training.label_smoothing 0.00398902823205909 +755 18 model.embedding_dim 0.0 +755 18 model.relation_dim 1.0 +755 18 optimizer.lr 0.05540305711470221 +755 18 training.batch_size 2.0 +755 18 training.label_smoothing 0.31017200768963005 +755 19 model.embedding_dim 2.0 +755 19 model.relation_dim 0.0 +755 19 optimizer.lr 0.0011167992654569677 +755 19 training.batch_size 2.0 +755 19 training.label_smoothing 0.6374499302780009 +755 20 model.embedding_dim 2.0 +755 20 model.relation_dim 2.0 +755 20 optimizer.lr 0.061293402523479805 +755 20 training.batch_size 2.0 +755 20 training.label_smoothing 0.013823483542446087 +755 21 model.embedding_dim 0.0 +755 21 model.relation_dim 2.0 +755 21 optimizer.lr 0.0069735068666488004 +755 21 training.batch_size 0.0 +755 21 training.label_smoothing 0.21700571687714865 +755 22 model.embedding_dim 2.0 +755 22 model.relation_dim 0.0 +755 22 optimizer.lr 0.030851516892992512 +755 22 training.batch_size 1.0 +755 22 training.label_smoothing 0.006341838936412855 +755 23 model.embedding_dim 1.0 +755 23 model.relation_dim 2.0 +755 23 optimizer.lr 0.020692972055602263 +755 23 training.batch_size 0.0 +755 23 training.label_smoothing 0.001027832821950402 +755 24 model.embedding_dim 1.0 +755 24 model.relation_dim 0.0 +755 24 optimizer.lr 0.002402629506177435 +755 24 training.batch_size 0.0 +755 24 training.label_smoothing 0.007963707991370007 +755 25 model.embedding_dim 1.0 +755 25 model.relation_dim 0.0 +755 25 optimizer.lr 0.03642894361167617 +755 25 training.batch_size 1.0 +755 25 training.label_smoothing 0.013897075445993123 +755 26 model.embedding_dim 2.0 +755 26 model.relation_dim 1.0 +755 26 optimizer.lr 0.0948443384403442 +755 26 training.batch_size 2.0 +755 26 training.label_smoothing 0.010925493769999356 +755 27 model.embedding_dim 2.0 +755 27 model.relation_dim 1.0 +755 27 optimizer.lr 0.0015713983650701114 +755 27 training.batch_size 1.0 +755 27 training.label_smoothing 0.5551459769896309 +755 28 model.embedding_dim 2.0 +755 28 model.relation_dim 2.0 +755 28 optimizer.lr 0.0352289379532456 +755 28 training.batch_size 2.0 +755 28 training.label_smoothing 0.2475188443241826 +755 29 model.embedding_dim 1.0 +755 29 model.relation_dim 1.0 +755 29 optimizer.lr 0.05086212812247308 +755 29 training.batch_size 0.0 +755 29 training.label_smoothing 0.8019418688569621 +755 30 model.embedding_dim 2.0 +755 30 model.relation_dim 2.0 +755 30 optimizer.lr 0.009256354402312282 +755 30 training.batch_size 2.0 +755 30 training.label_smoothing 0.9577103035350913 +755 31 model.embedding_dim 0.0 +755 31 model.relation_dim 2.0 +755 31 optimizer.lr 0.022988017061895755 +755 31 training.batch_size 2.0 +755 31 training.label_smoothing 0.6034493588481038 +755 32 model.embedding_dim 0.0 +755 32 model.relation_dim 2.0 +755 32 optimizer.lr 0.007710370993383753 +755 32 training.batch_size 1.0 +755 32 training.label_smoothing 0.16690618537840318 +755 33 model.embedding_dim 1.0 +755 33 model.relation_dim 0.0 +755 33 optimizer.lr 0.010661647788632286 +755 33 training.batch_size 0.0 +755 33 training.label_smoothing 0.006784727529951828 +755 34 model.embedding_dim 2.0 +755 34 model.relation_dim 2.0 +755 34 optimizer.lr 0.0644316669802219 +755 34 training.batch_size 0.0 +755 34 training.label_smoothing 0.5947608880853482 +755 35 model.embedding_dim 0.0 +755 35 model.relation_dim 2.0 +755 35 optimizer.lr 0.029635556748916374 +755 35 training.batch_size 2.0 +755 35 training.label_smoothing 0.005671509488582512 +755 36 model.embedding_dim 0.0 +755 36 model.relation_dim 0.0 +755 36 optimizer.lr 0.023088635668431733 +755 36 training.batch_size 0.0 +755 36 training.label_smoothing 0.0016692080636842672 +755 37 model.embedding_dim 0.0 +755 37 model.relation_dim 2.0 +755 37 optimizer.lr 0.0057722538923873894 +755 37 training.batch_size 2.0 +755 37 training.label_smoothing 0.3797843844693643 +755 38 model.embedding_dim 2.0 +755 38 model.relation_dim 0.0 +755 38 optimizer.lr 0.027716350309543916 +755 38 training.batch_size 2.0 +755 38 training.label_smoothing 0.011474978978174353 +755 39 model.embedding_dim 1.0 +755 39 model.relation_dim 0.0 +755 39 optimizer.lr 0.0031968151366419313 +755 39 training.batch_size 2.0 +755 39 training.label_smoothing 0.0024547128469405966 +755 40 model.embedding_dim 0.0 +755 40 model.relation_dim 2.0 +755 40 optimizer.lr 0.00992497180763803 +755 40 training.batch_size 2.0 +755 40 training.label_smoothing 0.029887580967820362 +755 41 model.embedding_dim 2.0 +755 41 model.relation_dim 0.0 +755 41 optimizer.lr 0.0027082571428500356 +755 41 training.batch_size 0.0 +755 41 training.label_smoothing 0.010132603648625652 +755 42 model.embedding_dim 1.0 +755 42 model.relation_dim 0.0 +755 42 optimizer.lr 0.005409053786939015 +755 42 training.batch_size 0.0 +755 42 training.label_smoothing 0.001843975838681293 +755 43 model.embedding_dim 1.0 +755 43 model.relation_dim 1.0 +755 43 optimizer.lr 0.013739481517303037 +755 43 training.batch_size 2.0 +755 43 training.label_smoothing 0.0024687206936260227 +755 44 model.embedding_dim 1.0 +755 44 model.relation_dim 1.0 +755 44 optimizer.lr 0.0514833761329891 +755 44 training.batch_size 0.0 +755 44 training.label_smoothing 0.0010626555531993208 +755 45 model.embedding_dim 1.0 +755 45 model.relation_dim 1.0 +755 45 optimizer.lr 0.042022368126829 +755 45 training.batch_size 1.0 +755 45 training.label_smoothing 0.03136838331575611 +755 46 model.embedding_dim 0.0 +755 46 model.relation_dim 0.0 +755 46 optimizer.lr 0.012030184869429595 +755 46 training.batch_size 0.0 +755 46 training.label_smoothing 0.004688592127711058 +755 47 model.embedding_dim 2.0 +755 47 model.relation_dim 0.0 +755 47 optimizer.lr 0.05120283477234227 +755 47 training.batch_size 0.0 +755 47 training.label_smoothing 0.04107211311455502 +755 48 model.embedding_dim 2.0 +755 48 model.relation_dim 0.0 +755 48 optimizer.lr 0.0033599374596116546 +755 48 training.batch_size 1.0 +755 48 training.label_smoothing 0.02482640130720849 +755 49 model.embedding_dim 0.0 +755 49 model.relation_dim 2.0 +755 49 optimizer.lr 0.01582724852825125 +755 49 training.batch_size 1.0 +755 49 training.label_smoothing 0.180662195421385 +755 50 model.embedding_dim 0.0 +755 50 model.relation_dim 2.0 +755 50 optimizer.lr 0.005824563946713725 +755 50 training.batch_size 0.0 +755 50 training.label_smoothing 0.06982096187937847 +755 51 model.embedding_dim 0.0 +755 51 model.relation_dim 1.0 +755 51 optimizer.lr 0.013456632539929242 +755 51 training.batch_size 2.0 +755 51 training.label_smoothing 0.0018794432945574138 +755 52 model.embedding_dim 0.0 +755 52 model.relation_dim 0.0 +755 52 optimizer.lr 0.007746782948828486 +755 52 training.batch_size 2.0 +755 52 training.label_smoothing 0.044997825386560206 +755 53 model.embedding_dim 2.0 +755 53 model.relation_dim 0.0 +755 53 optimizer.lr 0.005423638744946067 +755 53 training.batch_size 2.0 +755 53 training.label_smoothing 0.008538009253231043 +755 54 model.embedding_dim 2.0 +755 54 model.relation_dim 1.0 +755 54 optimizer.lr 0.0037599560294237287 +755 54 training.batch_size 1.0 +755 54 training.label_smoothing 0.012503628672559592 +755 55 model.embedding_dim 1.0 +755 55 model.relation_dim 0.0 +755 55 optimizer.lr 0.004505374569803264 +755 55 training.batch_size 0.0 +755 55 training.label_smoothing 0.012467260265194718 +755 56 model.embedding_dim 2.0 +755 56 model.relation_dim 2.0 +755 56 optimizer.lr 0.0017400945547929717 +755 56 training.batch_size 0.0 +755 56 training.label_smoothing 0.04888591703942717 +755 57 model.embedding_dim 0.0 +755 57 model.relation_dim 2.0 +755 57 optimizer.lr 0.03887777918087084 +755 57 training.batch_size 1.0 +755 57 training.label_smoothing 0.009262407781400198 +755 58 model.embedding_dim 2.0 +755 58 model.relation_dim 1.0 +755 58 optimizer.lr 0.06573588824855248 +755 58 training.batch_size 2.0 +755 58 training.label_smoothing 0.0026803519927557575 +755 59 model.embedding_dim 0.0 +755 59 model.relation_dim 2.0 +755 59 optimizer.lr 0.001354068158522629 +755 59 training.batch_size 1.0 +755 59 training.label_smoothing 0.032825442421077156 +755 60 model.embedding_dim 2.0 +755 60 model.relation_dim 1.0 +755 60 optimizer.lr 0.03255788916587599 +755 60 training.batch_size 1.0 +755 60 training.label_smoothing 0.033833695113239436 +755 61 model.embedding_dim 1.0 +755 61 model.relation_dim 2.0 +755 61 optimizer.lr 0.014261991594627014 +755 61 training.batch_size 2.0 +755 61 training.label_smoothing 0.0014109016925955705 +755 62 model.embedding_dim 2.0 +755 62 model.relation_dim 1.0 +755 62 optimizer.lr 0.0027680604551805544 +755 62 training.batch_size 1.0 +755 62 training.label_smoothing 0.0363520177588848 +755 63 model.embedding_dim 1.0 +755 63 model.relation_dim 0.0 +755 63 optimizer.lr 0.04332744604867442 +755 63 training.batch_size 1.0 +755 63 training.label_smoothing 0.004190035577698201 +755 64 model.embedding_dim 2.0 +755 64 model.relation_dim 1.0 +755 64 optimizer.lr 0.05819306841297606 +755 64 training.batch_size 2.0 +755 64 training.label_smoothing 0.035462783040151144 +755 65 model.embedding_dim 0.0 +755 65 model.relation_dim 2.0 +755 65 optimizer.lr 0.0010933402024077838 +755 65 training.batch_size 1.0 +755 65 training.label_smoothing 0.006205951927001175 +755 66 model.embedding_dim 0.0 +755 66 model.relation_dim 0.0 +755 66 optimizer.lr 0.0972495431974216 +755 66 training.batch_size 1.0 +755 66 training.label_smoothing 0.01716042930830095 +755 67 model.embedding_dim 1.0 +755 67 model.relation_dim 1.0 +755 67 optimizer.lr 0.0016263840644766792 +755 67 training.batch_size 1.0 +755 67 training.label_smoothing 0.01454722652838286 +755 68 model.embedding_dim 0.0 +755 68 model.relation_dim 2.0 +755 68 optimizer.lr 0.017913821979265488 +755 68 training.batch_size 0.0 +755 68 training.label_smoothing 0.030884891346935884 +755 69 model.embedding_dim 0.0 +755 69 model.relation_dim 2.0 +755 69 optimizer.lr 0.0017888736467435314 +755 69 training.batch_size 1.0 +755 69 training.label_smoothing 0.06736844158662834 +755 70 model.embedding_dim 2.0 +755 70 model.relation_dim 1.0 +755 70 optimizer.lr 0.04678191785940446 +755 70 training.batch_size 1.0 +755 70 training.label_smoothing 0.004891830563299388 +755 71 model.embedding_dim 2.0 +755 71 model.relation_dim 0.0 +755 71 optimizer.lr 0.0037752096974089148 +755 71 training.batch_size 2.0 +755 71 training.label_smoothing 0.03991841096807905 +755 72 model.embedding_dim 0.0 +755 72 model.relation_dim 2.0 +755 72 optimizer.lr 0.020473203303445296 +755 72 training.batch_size 1.0 +755 72 training.label_smoothing 0.24988828122416032 +755 73 model.embedding_dim 1.0 +755 73 model.relation_dim 2.0 +755 73 optimizer.lr 0.023978874660866442 +755 73 training.batch_size 0.0 +755 73 training.label_smoothing 0.0014067525403655505 +755 74 model.embedding_dim 1.0 +755 74 model.relation_dim 2.0 +755 74 optimizer.lr 0.0015267938889406045 +755 74 training.batch_size 1.0 +755 74 training.label_smoothing 0.06872759477049568 +755 75 model.embedding_dim 1.0 +755 75 model.relation_dim 2.0 +755 75 optimizer.lr 0.016441342220906412 +755 75 training.batch_size 2.0 +755 75 training.label_smoothing 0.9235883896362816 +755 76 model.embedding_dim 1.0 +755 76 model.relation_dim 1.0 +755 76 optimizer.lr 0.017997423373878087 +755 76 training.batch_size 1.0 +755 76 training.label_smoothing 0.054308423316576024 +755 77 model.embedding_dim 2.0 +755 77 model.relation_dim 2.0 +755 77 optimizer.lr 0.09985308880356153 +755 77 training.batch_size 0.0 +755 77 training.label_smoothing 0.005474129195441722 +755 78 model.embedding_dim 1.0 +755 78 model.relation_dim 2.0 +755 78 optimizer.lr 0.0025298327944315623 +755 78 training.batch_size 1.0 +755 78 training.label_smoothing 0.13365553454010123 +755 79 model.embedding_dim 2.0 +755 79 model.relation_dim 0.0 +755 79 optimizer.lr 0.034148995771343185 +755 79 training.batch_size 0.0 +755 79 training.label_smoothing 0.07033896333850119 +755 80 model.embedding_dim 0.0 +755 80 model.relation_dim 2.0 +755 80 optimizer.lr 0.0034036642302144892 +755 80 training.batch_size 1.0 +755 80 training.label_smoothing 0.0036106382535978383 +755 81 model.embedding_dim 1.0 +755 81 model.relation_dim 2.0 +755 81 optimizer.lr 0.0164611289631001 +755 81 training.batch_size 1.0 +755 81 training.label_smoothing 0.021468543891522154 +755 82 model.embedding_dim 1.0 +755 82 model.relation_dim 2.0 +755 82 optimizer.lr 0.0864844232503452 +755 82 training.batch_size 2.0 +755 82 training.label_smoothing 0.04816322139857118 +755 83 model.embedding_dim 0.0 +755 83 model.relation_dim 0.0 +755 83 optimizer.lr 0.08183869702014486 +755 83 training.batch_size 1.0 +755 83 training.label_smoothing 0.06950879336973814 +755 84 model.embedding_dim 0.0 +755 84 model.relation_dim 1.0 +755 84 optimizer.lr 0.010208923961992276 +755 84 training.batch_size 2.0 +755 84 training.label_smoothing 0.004014421195146977 +755 85 model.embedding_dim 2.0 +755 85 model.relation_dim 0.0 +755 85 optimizer.lr 0.03543627711014426 +755 85 training.batch_size 1.0 +755 85 training.label_smoothing 0.0018143603039474535 +755 86 model.embedding_dim 2.0 +755 86 model.relation_dim 1.0 +755 86 optimizer.lr 0.045913728422794106 +755 86 training.batch_size 1.0 +755 86 training.label_smoothing 0.0712988514891711 +755 87 model.embedding_dim 1.0 +755 87 model.relation_dim 2.0 +755 87 optimizer.lr 0.04546586060676337 +755 87 training.batch_size 1.0 +755 87 training.label_smoothing 0.16563205554619648 +755 88 model.embedding_dim 2.0 +755 88 model.relation_dim 2.0 +755 88 optimizer.lr 0.037158302673418506 +755 88 training.batch_size 0.0 +755 88 training.label_smoothing 0.09714182484659692 +755 89 model.embedding_dim 1.0 +755 89 model.relation_dim 0.0 +755 89 optimizer.lr 0.0030171157331198195 +755 89 training.batch_size 0.0 +755 89 training.label_smoothing 0.23405793894525823 +755 90 model.embedding_dim 2.0 +755 90 model.relation_dim 0.0 +755 90 optimizer.lr 0.0023624025899532898 +755 90 training.batch_size 0.0 +755 90 training.label_smoothing 0.03387858792436756 +755 91 model.embedding_dim 2.0 +755 91 model.relation_dim 0.0 +755 91 optimizer.lr 0.003851823491603023 +755 91 training.batch_size 0.0 +755 91 training.label_smoothing 0.02153336103608299 +755 92 model.embedding_dim 1.0 +755 92 model.relation_dim 2.0 +755 92 optimizer.lr 0.03773094504266877 +755 92 training.batch_size 2.0 +755 92 training.label_smoothing 0.5404707397674756 +755 93 model.embedding_dim 0.0 +755 93 model.relation_dim 0.0 +755 93 optimizer.lr 0.04872564495081588 +755 93 training.batch_size 1.0 +755 93 training.label_smoothing 0.0016858020169576258 +755 94 model.embedding_dim 1.0 +755 94 model.relation_dim 2.0 +755 94 optimizer.lr 0.0031515609094669335 +755 94 training.batch_size 1.0 +755 94 training.label_smoothing 0.187753422480912 +755 95 model.embedding_dim 0.0 +755 95 model.relation_dim 0.0 +755 95 optimizer.lr 0.032154383186795164 +755 95 training.batch_size 0.0 +755 95 training.label_smoothing 0.13025223531296135 +755 96 model.embedding_dim 2.0 +755 96 model.relation_dim 0.0 +755 96 optimizer.lr 0.010094669231469797 +755 96 training.batch_size 0.0 +755 96 training.label_smoothing 0.07039796599082077 +755 97 model.embedding_dim 2.0 +755 97 model.relation_dim 0.0 +755 97 optimizer.lr 0.006239436914184279 +755 97 training.batch_size 1.0 +755 97 training.label_smoothing 0.046842456872465056 +755 98 model.embedding_dim 2.0 +755 98 model.relation_dim 1.0 +755 98 optimizer.lr 0.004026992527369919 +755 98 training.batch_size 2.0 +755 98 training.label_smoothing 0.0021825882438721274 +755 99 model.embedding_dim 1.0 +755 99 model.relation_dim 0.0 +755 99 optimizer.lr 0.08426069173122512 +755 99 training.batch_size 1.0 +755 99 training.label_smoothing 0.005089407712583788 +755 100 model.embedding_dim 1.0 +755 100 model.relation_dim 1.0 +755 100 optimizer.lr 0.002041737722161803 +755 100 training.batch_size 0.0 +755 100 training.label_smoothing 0.020292301875528083 +755 1 dataset """kinships""" +755 1 model """transd""" +755 1 loss """crossentropy""" +755 1 regularizer """no""" +755 1 optimizer """adam""" +755 1 training_loop """lcwa""" +755 1 evaluator """rankbased""" +755 2 dataset """kinships""" +755 2 model """transd""" +755 2 loss """crossentropy""" +755 2 regularizer """no""" +755 2 optimizer """adam""" +755 2 training_loop """lcwa""" +755 2 evaluator """rankbased""" +755 3 dataset """kinships""" +755 3 model """transd""" +755 3 loss """crossentropy""" +755 3 regularizer """no""" +755 3 optimizer """adam""" +755 3 training_loop """lcwa""" +755 3 evaluator """rankbased""" +755 4 dataset """kinships""" +755 4 model """transd""" +755 4 loss """crossentropy""" +755 4 regularizer """no""" +755 4 optimizer """adam""" +755 4 training_loop """lcwa""" +755 4 evaluator """rankbased""" +755 5 dataset """kinships""" +755 5 model """transd""" +755 5 loss """crossentropy""" +755 5 regularizer """no""" +755 5 optimizer """adam""" +755 5 training_loop """lcwa""" +755 5 evaluator """rankbased""" +755 6 dataset """kinships""" +755 6 model """transd""" +755 6 loss """crossentropy""" +755 6 regularizer """no""" +755 6 optimizer """adam""" +755 6 training_loop """lcwa""" +755 6 evaluator """rankbased""" +755 7 dataset """kinships""" +755 7 model """transd""" +755 7 loss """crossentropy""" +755 7 regularizer """no""" +755 7 optimizer """adam""" +755 7 training_loop """lcwa""" +755 7 evaluator """rankbased""" +755 8 dataset """kinships""" +755 8 model """transd""" +755 8 loss """crossentropy""" +755 8 regularizer """no""" +755 8 optimizer """adam""" +755 8 training_loop """lcwa""" +755 8 evaluator """rankbased""" +755 9 dataset """kinships""" +755 9 model """transd""" +755 9 loss """crossentropy""" +755 9 regularizer """no""" +755 9 optimizer """adam""" +755 9 training_loop """lcwa""" +755 9 evaluator """rankbased""" +755 10 dataset """kinships""" +755 10 model """transd""" +755 10 loss """crossentropy""" +755 10 regularizer """no""" +755 10 optimizer """adam""" +755 10 training_loop """lcwa""" +755 10 evaluator """rankbased""" +755 11 dataset """kinships""" +755 11 model """transd""" +755 11 loss """crossentropy""" +755 11 regularizer """no""" +755 11 optimizer """adam""" +755 11 training_loop """lcwa""" +755 11 evaluator """rankbased""" +755 12 dataset """kinships""" +755 12 model """transd""" +755 12 loss """crossentropy""" +755 12 regularizer """no""" +755 12 optimizer """adam""" +755 12 training_loop """lcwa""" +755 12 evaluator """rankbased""" +755 13 dataset """kinships""" +755 13 model """transd""" +755 13 loss """crossentropy""" +755 13 regularizer """no""" +755 13 optimizer """adam""" +755 13 training_loop """lcwa""" +755 13 evaluator """rankbased""" +755 14 dataset """kinships""" +755 14 model """transd""" +755 14 loss """crossentropy""" +755 14 regularizer """no""" +755 14 optimizer """adam""" +755 14 training_loop """lcwa""" +755 14 evaluator """rankbased""" +755 15 dataset """kinships""" +755 15 model """transd""" +755 15 loss """crossentropy""" +755 15 regularizer """no""" +755 15 optimizer """adam""" +755 15 training_loop """lcwa""" +755 15 evaluator """rankbased""" +755 16 dataset """kinships""" +755 16 model """transd""" +755 16 loss """crossentropy""" +755 16 regularizer """no""" +755 16 optimizer """adam""" +755 16 training_loop """lcwa""" +755 16 evaluator """rankbased""" +755 17 dataset """kinships""" +755 17 model """transd""" +755 17 loss """crossentropy""" +755 17 regularizer """no""" +755 17 optimizer """adam""" +755 17 training_loop """lcwa""" +755 17 evaluator """rankbased""" +755 18 dataset """kinships""" +755 18 model """transd""" +755 18 loss """crossentropy""" +755 18 regularizer """no""" +755 18 optimizer """adam""" +755 18 training_loop """lcwa""" +755 18 evaluator """rankbased""" +755 19 dataset """kinships""" +755 19 model """transd""" +755 19 loss """crossentropy""" +755 19 regularizer """no""" +755 19 optimizer """adam""" +755 19 training_loop """lcwa""" +755 19 evaluator """rankbased""" +755 20 dataset """kinships""" +755 20 model """transd""" +755 20 loss """crossentropy""" +755 20 regularizer """no""" +755 20 optimizer """adam""" +755 20 training_loop """lcwa""" +755 20 evaluator """rankbased""" +755 21 dataset """kinships""" +755 21 model """transd""" +755 21 loss """crossentropy""" +755 21 regularizer """no""" +755 21 optimizer """adam""" +755 21 training_loop """lcwa""" +755 21 evaluator """rankbased""" +755 22 dataset """kinships""" +755 22 model """transd""" +755 22 loss """crossentropy""" +755 22 regularizer """no""" +755 22 optimizer """adam""" +755 22 training_loop """lcwa""" +755 22 evaluator """rankbased""" +755 23 dataset """kinships""" +755 23 model """transd""" +755 23 loss """crossentropy""" +755 23 regularizer """no""" +755 23 optimizer """adam""" +755 23 training_loop """lcwa""" +755 23 evaluator """rankbased""" +755 24 dataset """kinships""" +755 24 model """transd""" +755 24 loss """crossentropy""" +755 24 regularizer """no""" +755 24 optimizer """adam""" +755 24 training_loop """lcwa""" +755 24 evaluator """rankbased""" +755 25 dataset """kinships""" +755 25 model """transd""" +755 25 loss """crossentropy""" +755 25 regularizer """no""" +755 25 optimizer """adam""" +755 25 training_loop """lcwa""" +755 25 evaluator """rankbased""" +755 26 dataset """kinships""" +755 26 model """transd""" +755 26 loss """crossentropy""" +755 26 regularizer """no""" +755 26 optimizer """adam""" +755 26 training_loop """lcwa""" +755 26 evaluator """rankbased""" +755 27 dataset """kinships""" +755 27 model """transd""" +755 27 loss """crossentropy""" +755 27 regularizer """no""" +755 27 optimizer """adam""" +755 27 training_loop """lcwa""" +755 27 evaluator """rankbased""" +755 28 dataset """kinships""" +755 28 model """transd""" +755 28 loss """crossentropy""" +755 28 regularizer """no""" +755 28 optimizer """adam""" +755 28 training_loop """lcwa""" +755 28 evaluator """rankbased""" +755 29 dataset """kinships""" +755 29 model """transd""" +755 29 loss """crossentropy""" +755 29 regularizer """no""" +755 29 optimizer """adam""" +755 29 training_loop """lcwa""" +755 29 evaluator """rankbased""" +755 30 dataset """kinships""" +755 30 model """transd""" +755 30 loss """crossentropy""" +755 30 regularizer """no""" +755 30 optimizer """adam""" +755 30 training_loop """lcwa""" +755 30 evaluator """rankbased""" +755 31 dataset """kinships""" +755 31 model """transd""" +755 31 loss """crossentropy""" +755 31 regularizer """no""" +755 31 optimizer """adam""" +755 31 training_loop """lcwa""" +755 31 evaluator """rankbased""" +755 32 dataset """kinships""" +755 32 model """transd""" +755 32 loss """crossentropy""" +755 32 regularizer """no""" +755 32 optimizer """adam""" +755 32 training_loop """lcwa""" +755 32 evaluator """rankbased""" +755 33 dataset """kinships""" +755 33 model """transd""" +755 33 loss """crossentropy""" +755 33 regularizer """no""" +755 33 optimizer """adam""" +755 33 training_loop """lcwa""" +755 33 evaluator """rankbased""" +755 34 dataset """kinships""" +755 34 model """transd""" +755 34 loss """crossentropy""" +755 34 regularizer """no""" +755 34 optimizer """adam""" +755 34 training_loop """lcwa""" +755 34 evaluator """rankbased""" +755 35 dataset """kinships""" +755 35 model """transd""" +755 35 loss """crossentropy""" +755 35 regularizer """no""" +755 35 optimizer """adam""" +755 35 training_loop """lcwa""" +755 35 evaluator """rankbased""" +755 36 dataset """kinships""" +755 36 model """transd""" +755 36 loss """crossentropy""" +755 36 regularizer """no""" +755 36 optimizer """adam""" +755 36 training_loop """lcwa""" +755 36 evaluator """rankbased""" +755 37 dataset """kinships""" +755 37 model """transd""" +755 37 loss """crossentropy""" +755 37 regularizer """no""" +755 37 optimizer """adam""" +755 37 training_loop """lcwa""" +755 37 evaluator """rankbased""" +755 38 dataset """kinships""" +755 38 model """transd""" +755 38 loss """crossentropy""" +755 38 regularizer """no""" +755 38 optimizer """adam""" +755 38 training_loop """lcwa""" +755 38 evaluator """rankbased""" +755 39 dataset """kinships""" +755 39 model """transd""" +755 39 loss """crossentropy""" +755 39 regularizer """no""" +755 39 optimizer """adam""" +755 39 training_loop """lcwa""" +755 39 evaluator """rankbased""" +755 40 dataset """kinships""" +755 40 model """transd""" +755 40 loss """crossentropy""" +755 40 regularizer """no""" +755 40 optimizer """adam""" +755 40 training_loop """lcwa""" +755 40 evaluator """rankbased""" +755 41 dataset """kinships""" +755 41 model """transd""" +755 41 loss """crossentropy""" +755 41 regularizer """no""" +755 41 optimizer """adam""" +755 41 training_loop """lcwa""" +755 41 evaluator """rankbased""" +755 42 dataset """kinships""" +755 42 model """transd""" +755 42 loss """crossentropy""" +755 42 regularizer """no""" +755 42 optimizer """adam""" +755 42 training_loop """lcwa""" +755 42 evaluator """rankbased""" +755 43 dataset """kinships""" +755 43 model """transd""" +755 43 loss """crossentropy""" +755 43 regularizer """no""" +755 43 optimizer """adam""" +755 43 training_loop """lcwa""" +755 43 evaluator """rankbased""" +755 44 dataset """kinships""" +755 44 model """transd""" +755 44 loss """crossentropy""" +755 44 regularizer """no""" +755 44 optimizer """adam""" +755 44 training_loop """lcwa""" +755 44 evaluator """rankbased""" +755 45 dataset """kinships""" +755 45 model """transd""" +755 45 loss """crossentropy""" +755 45 regularizer """no""" +755 45 optimizer """adam""" +755 45 training_loop """lcwa""" +755 45 evaluator """rankbased""" +755 46 dataset """kinships""" +755 46 model """transd""" +755 46 loss """crossentropy""" +755 46 regularizer """no""" +755 46 optimizer """adam""" +755 46 training_loop """lcwa""" +755 46 evaluator """rankbased""" +755 47 dataset """kinships""" +755 47 model """transd""" +755 47 loss """crossentropy""" +755 47 regularizer """no""" +755 47 optimizer """adam""" +755 47 training_loop """lcwa""" +755 47 evaluator """rankbased""" +755 48 dataset """kinships""" +755 48 model """transd""" +755 48 loss """crossentropy""" +755 48 regularizer """no""" +755 48 optimizer """adam""" +755 48 training_loop """lcwa""" +755 48 evaluator """rankbased""" +755 49 dataset """kinships""" +755 49 model """transd""" +755 49 loss """crossentropy""" +755 49 regularizer """no""" +755 49 optimizer """adam""" +755 49 training_loop """lcwa""" +755 49 evaluator """rankbased""" +755 50 dataset """kinships""" +755 50 model """transd""" +755 50 loss """crossentropy""" +755 50 regularizer """no""" +755 50 optimizer """adam""" +755 50 training_loop """lcwa""" +755 50 evaluator """rankbased""" +755 51 dataset """kinships""" +755 51 model """transd""" +755 51 loss """crossentropy""" +755 51 regularizer """no""" +755 51 optimizer """adam""" +755 51 training_loop """lcwa""" +755 51 evaluator """rankbased""" +755 52 dataset """kinships""" +755 52 model """transd""" +755 52 loss """crossentropy""" +755 52 regularizer """no""" +755 52 optimizer """adam""" +755 52 training_loop """lcwa""" +755 52 evaluator """rankbased""" +755 53 dataset """kinships""" +755 53 model """transd""" +755 53 loss """crossentropy""" +755 53 regularizer """no""" +755 53 optimizer """adam""" +755 53 training_loop """lcwa""" +755 53 evaluator """rankbased""" +755 54 dataset """kinships""" +755 54 model """transd""" +755 54 loss """crossentropy""" +755 54 regularizer """no""" +755 54 optimizer """adam""" +755 54 training_loop """lcwa""" +755 54 evaluator """rankbased""" +755 55 dataset """kinships""" +755 55 model """transd""" +755 55 loss """crossentropy""" +755 55 regularizer """no""" +755 55 optimizer """adam""" +755 55 training_loop """lcwa""" +755 55 evaluator """rankbased""" +755 56 dataset """kinships""" +755 56 model """transd""" +755 56 loss """crossentropy""" +755 56 regularizer """no""" +755 56 optimizer """adam""" +755 56 training_loop """lcwa""" +755 56 evaluator """rankbased""" +755 57 dataset """kinships""" +755 57 model """transd""" +755 57 loss """crossentropy""" +755 57 regularizer """no""" +755 57 optimizer """adam""" +755 57 training_loop """lcwa""" +755 57 evaluator """rankbased""" +755 58 dataset """kinships""" +755 58 model """transd""" +755 58 loss """crossentropy""" +755 58 regularizer """no""" +755 58 optimizer """adam""" +755 58 training_loop """lcwa""" +755 58 evaluator """rankbased""" +755 59 dataset """kinships""" +755 59 model """transd""" +755 59 loss """crossentropy""" +755 59 regularizer """no""" +755 59 optimizer """adam""" +755 59 training_loop """lcwa""" +755 59 evaluator """rankbased""" +755 60 dataset """kinships""" +755 60 model """transd""" +755 60 loss """crossentropy""" +755 60 regularizer """no""" +755 60 optimizer """adam""" +755 60 training_loop """lcwa""" +755 60 evaluator """rankbased""" +755 61 dataset """kinships""" +755 61 model """transd""" +755 61 loss """crossentropy""" +755 61 regularizer """no""" +755 61 optimizer """adam""" +755 61 training_loop """lcwa""" +755 61 evaluator """rankbased""" +755 62 dataset """kinships""" +755 62 model """transd""" +755 62 loss """crossentropy""" +755 62 regularizer """no""" +755 62 optimizer """adam""" +755 62 training_loop """lcwa""" +755 62 evaluator """rankbased""" +755 63 dataset """kinships""" +755 63 model """transd""" +755 63 loss """crossentropy""" +755 63 regularizer """no""" +755 63 optimizer """adam""" +755 63 training_loop """lcwa""" +755 63 evaluator """rankbased""" +755 64 dataset """kinships""" +755 64 model """transd""" +755 64 loss """crossentropy""" +755 64 regularizer """no""" +755 64 optimizer """adam""" +755 64 training_loop """lcwa""" +755 64 evaluator """rankbased""" +755 65 dataset """kinships""" +755 65 model """transd""" +755 65 loss """crossentropy""" +755 65 regularizer """no""" +755 65 optimizer """adam""" +755 65 training_loop """lcwa""" +755 65 evaluator """rankbased""" +755 66 dataset """kinships""" +755 66 model """transd""" +755 66 loss """crossentropy""" +755 66 regularizer """no""" +755 66 optimizer """adam""" +755 66 training_loop """lcwa""" +755 66 evaluator """rankbased""" +755 67 dataset """kinships""" +755 67 model """transd""" +755 67 loss """crossentropy""" +755 67 regularizer """no""" +755 67 optimizer """adam""" +755 67 training_loop """lcwa""" +755 67 evaluator """rankbased""" +755 68 dataset """kinships""" +755 68 model """transd""" +755 68 loss """crossentropy""" +755 68 regularizer """no""" +755 68 optimizer """adam""" +755 68 training_loop """lcwa""" +755 68 evaluator """rankbased""" +755 69 dataset """kinships""" +755 69 model """transd""" +755 69 loss """crossentropy""" +755 69 regularizer """no""" +755 69 optimizer """adam""" +755 69 training_loop """lcwa""" +755 69 evaluator """rankbased""" +755 70 dataset """kinships""" +755 70 model """transd""" +755 70 loss """crossentropy""" +755 70 regularizer """no""" +755 70 optimizer """adam""" +755 70 training_loop """lcwa""" +755 70 evaluator """rankbased""" +755 71 dataset """kinships""" +755 71 model """transd""" +755 71 loss """crossentropy""" +755 71 regularizer """no""" +755 71 optimizer """adam""" +755 71 training_loop """lcwa""" +755 71 evaluator """rankbased""" +755 72 dataset """kinships""" +755 72 model """transd""" +755 72 loss """crossentropy""" +755 72 regularizer """no""" +755 72 optimizer """adam""" +755 72 training_loop """lcwa""" +755 72 evaluator """rankbased""" +755 73 dataset """kinships""" +755 73 model """transd""" +755 73 loss """crossentropy""" +755 73 regularizer """no""" +755 73 optimizer """adam""" +755 73 training_loop """lcwa""" +755 73 evaluator """rankbased""" +755 74 dataset """kinships""" +755 74 model """transd""" +755 74 loss """crossentropy""" +755 74 regularizer """no""" +755 74 optimizer """adam""" +755 74 training_loop """lcwa""" +755 74 evaluator """rankbased""" +755 75 dataset """kinships""" +755 75 model """transd""" +755 75 loss """crossentropy""" +755 75 regularizer """no""" +755 75 optimizer """adam""" +755 75 training_loop """lcwa""" +755 75 evaluator """rankbased""" +755 76 dataset """kinships""" +755 76 model """transd""" +755 76 loss """crossentropy""" +755 76 regularizer """no""" +755 76 optimizer """adam""" +755 76 training_loop """lcwa""" +755 76 evaluator """rankbased""" +755 77 dataset """kinships""" +755 77 model """transd""" +755 77 loss """crossentropy""" +755 77 regularizer """no""" +755 77 optimizer """adam""" +755 77 training_loop """lcwa""" +755 77 evaluator """rankbased""" +755 78 dataset """kinships""" +755 78 model """transd""" +755 78 loss """crossentropy""" +755 78 regularizer """no""" +755 78 optimizer """adam""" +755 78 training_loop """lcwa""" +755 78 evaluator """rankbased""" +755 79 dataset """kinships""" +755 79 model """transd""" +755 79 loss """crossentropy""" +755 79 regularizer """no""" +755 79 optimizer """adam""" +755 79 training_loop """lcwa""" +755 79 evaluator """rankbased""" +755 80 dataset """kinships""" +755 80 model """transd""" +755 80 loss """crossentropy""" +755 80 regularizer """no""" +755 80 optimizer """adam""" +755 80 training_loop """lcwa""" +755 80 evaluator """rankbased""" +755 81 dataset """kinships""" +755 81 model """transd""" +755 81 loss """crossentropy""" +755 81 regularizer """no""" +755 81 optimizer """adam""" +755 81 training_loop """lcwa""" +755 81 evaluator """rankbased""" +755 82 dataset """kinships""" +755 82 model """transd""" +755 82 loss """crossentropy""" +755 82 regularizer """no""" +755 82 optimizer """adam""" +755 82 training_loop """lcwa""" +755 82 evaluator """rankbased""" +755 83 dataset """kinships""" +755 83 model """transd""" +755 83 loss """crossentropy""" +755 83 regularizer """no""" +755 83 optimizer """adam""" +755 83 training_loop """lcwa""" +755 83 evaluator """rankbased""" +755 84 dataset """kinships""" +755 84 model """transd""" +755 84 loss """crossentropy""" +755 84 regularizer """no""" +755 84 optimizer """adam""" +755 84 training_loop """lcwa""" +755 84 evaluator """rankbased""" +755 85 dataset """kinships""" +755 85 model """transd""" +755 85 loss """crossentropy""" +755 85 regularizer """no""" +755 85 optimizer """adam""" +755 85 training_loop """lcwa""" +755 85 evaluator """rankbased""" +755 86 dataset """kinships""" +755 86 model """transd""" +755 86 loss """crossentropy""" +755 86 regularizer """no""" +755 86 optimizer """adam""" +755 86 training_loop """lcwa""" +755 86 evaluator """rankbased""" +755 87 dataset """kinships""" +755 87 model """transd""" +755 87 loss """crossentropy""" +755 87 regularizer """no""" +755 87 optimizer """adam""" +755 87 training_loop """lcwa""" +755 87 evaluator """rankbased""" +755 88 dataset """kinships""" +755 88 model """transd""" +755 88 loss """crossentropy""" +755 88 regularizer """no""" +755 88 optimizer """adam""" +755 88 training_loop """lcwa""" +755 88 evaluator """rankbased""" +755 89 dataset """kinships""" +755 89 model """transd""" +755 89 loss """crossentropy""" +755 89 regularizer """no""" +755 89 optimizer """adam""" +755 89 training_loop """lcwa""" +755 89 evaluator """rankbased""" +755 90 dataset """kinships""" +755 90 model """transd""" +755 90 loss """crossentropy""" +755 90 regularizer """no""" +755 90 optimizer """adam""" +755 90 training_loop """lcwa""" +755 90 evaluator """rankbased""" +755 91 dataset """kinships""" +755 91 model """transd""" +755 91 loss """crossentropy""" +755 91 regularizer """no""" +755 91 optimizer """adam""" +755 91 training_loop """lcwa""" +755 91 evaluator """rankbased""" +755 92 dataset """kinships""" +755 92 model """transd""" +755 92 loss """crossentropy""" +755 92 regularizer """no""" +755 92 optimizer """adam""" +755 92 training_loop """lcwa""" +755 92 evaluator """rankbased""" +755 93 dataset """kinships""" +755 93 model """transd""" +755 93 loss """crossentropy""" +755 93 regularizer """no""" +755 93 optimizer """adam""" +755 93 training_loop """lcwa""" +755 93 evaluator """rankbased""" +755 94 dataset """kinships""" +755 94 model """transd""" +755 94 loss """crossentropy""" +755 94 regularizer """no""" +755 94 optimizer """adam""" +755 94 training_loop """lcwa""" +755 94 evaluator """rankbased""" +755 95 dataset """kinships""" +755 95 model """transd""" +755 95 loss """crossentropy""" +755 95 regularizer """no""" +755 95 optimizer """adam""" +755 95 training_loop """lcwa""" +755 95 evaluator """rankbased""" +755 96 dataset """kinships""" +755 96 model """transd""" +755 96 loss """crossentropy""" +755 96 regularizer """no""" +755 96 optimizer """adam""" +755 96 training_loop """lcwa""" +755 96 evaluator """rankbased""" +755 97 dataset """kinships""" +755 97 model """transd""" +755 97 loss """crossentropy""" +755 97 regularizer """no""" +755 97 optimizer """adam""" +755 97 training_loop """lcwa""" +755 97 evaluator """rankbased""" +755 98 dataset """kinships""" +755 98 model """transd""" +755 98 loss """crossentropy""" +755 98 regularizer """no""" +755 98 optimizer """adam""" +755 98 training_loop """lcwa""" +755 98 evaluator """rankbased""" +755 99 dataset """kinships""" +755 99 model """transd""" +755 99 loss """crossentropy""" +755 99 regularizer """no""" +755 99 optimizer """adam""" +755 99 training_loop """lcwa""" +755 99 evaluator """rankbased""" +755 100 dataset """kinships""" +755 100 model """transd""" +755 100 loss """crossentropy""" +755 100 regularizer """no""" +755 100 optimizer """adam""" +755 100 training_loop """lcwa""" +755 100 evaluator """rankbased""" +756 1 model.embedding_dim 1.0 +756 1 model.relation_dim 1.0 +756 1 optimizer.lr 0.001989979323812233 +756 1 training.batch_size 1.0 +756 1 training.label_smoothing 0.0013608360992556359 +756 2 model.embedding_dim 0.0 +756 2 model.relation_dim 1.0 +756 2 optimizer.lr 0.043916778266516326 +756 2 training.batch_size 1.0 +756 2 training.label_smoothing 0.004137805617455833 +756 3 model.embedding_dim 0.0 +756 3 model.relation_dim 2.0 +756 3 optimizer.lr 0.04793534389508952 +756 3 training.batch_size 2.0 +756 3 training.label_smoothing 0.820795117064871 +756 4 model.embedding_dim 2.0 +756 4 model.relation_dim 0.0 +756 4 optimizer.lr 0.018857893354871303 +756 4 training.batch_size 2.0 +756 4 training.label_smoothing 0.008347501350746238 +756 5 model.embedding_dim 2.0 +756 5 model.relation_dim 0.0 +756 5 optimizer.lr 0.02858456176907949 +756 5 training.batch_size 0.0 +756 5 training.label_smoothing 0.0016872417258602637 +756 6 model.embedding_dim 1.0 +756 6 model.relation_dim 0.0 +756 6 optimizer.lr 0.0029514105796401347 +756 6 training.batch_size 0.0 +756 6 training.label_smoothing 0.356550718010471 +756 7 model.embedding_dim 2.0 +756 7 model.relation_dim 2.0 +756 7 optimizer.lr 0.0018271331958975717 +756 7 training.batch_size 1.0 +756 7 training.label_smoothing 0.029200223068341996 +756 8 model.embedding_dim 0.0 +756 8 model.relation_dim 0.0 +756 8 optimizer.lr 0.0070635836931505986 +756 8 training.batch_size 1.0 +756 8 training.label_smoothing 0.21285192143877493 +756 9 model.embedding_dim 2.0 +756 9 model.relation_dim 1.0 +756 9 optimizer.lr 0.015470376419738136 +756 9 training.batch_size 0.0 +756 9 training.label_smoothing 0.0035069975226743643 +756 10 model.embedding_dim 1.0 +756 10 model.relation_dim 0.0 +756 10 optimizer.lr 0.0021325993912344313 +756 10 training.batch_size 0.0 +756 10 training.label_smoothing 0.0010856855804585294 +756 11 model.embedding_dim 1.0 +756 11 model.relation_dim 0.0 +756 11 optimizer.lr 0.0067498928452934225 +756 11 training.batch_size 0.0 +756 11 training.label_smoothing 0.0016948663945194242 +756 12 model.embedding_dim 2.0 +756 12 model.relation_dim 2.0 +756 12 optimizer.lr 0.018069000134707004 +756 12 training.batch_size 0.0 +756 12 training.label_smoothing 0.19260693302071713 +756 13 model.embedding_dim 1.0 +756 13 model.relation_dim 2.0 +756 13 optimizer.lr 0.08143831752259457 +756 13 training.batch_size 1.0 +756 13 training.label_smoothing 0.07179525048269049 +756 14 model.embedding_dim 2.0 +756 14 model.relation_dim 2.0 +756 14 optimizer.lr 0.01764367154618085 +756 14 training.batch_size 1.0 +756 14 training.label_smoothing 0.0010805467892953935 +756 15 model.embedding_dim 2.0 +756 15 model.relation_dim 2.0 +756 15 optimizer.lr 0.02429022324025659 +756 15 training.batch_size 1.0 +756 15 training.label_smoothing 0.01549827319790096 +756 16 model.embedding_dim 2.0 +756 16 model.relation_dim 2.0 +756 16 optimizer.lr 0.05142767261907339 +756 16 training.batch_size 2.0 +756 16 training.label_smoothing 0.30968562498042457 +756 17 model.embedding_dim 2.0 +756 17 model.relation_dim 1.0 +756 17 optimizer.lr 0.04472262189757376 +756 17 training.batch_size 0.0 +756 17 training.label_smoothing 0.009397905742428055 +756 18 model.embedding_dim 1.0 +756 18 model.relation_dim 0.0 +756 18 optimizer.lr 0.024575502718426862 +756 18 training.batch_size 1.0 +756 18 training.label_smoothing 0.05589845215547268 +756 19 model.embedding_dim 0.0 +756 19 model.relation_dim 0.0 +756 19 optimizer.lr 0.00568219236882535 +756 19 training.batch_size 1.0 +756 19 training.label_smoothing 0.007130070949939051 +756 20 model.embedding_dim 0.0 +756 20 model.relation_dim 2.0 +756 20 optimizer.lr 0.007571747777942523 +756 20 training.batch_size 0.0 +756 20 training.label_smoothing 0.27742277783219177 +756 21 model.embedding_dim 0.0 +756 21 model.relation_dim 2.0 +756 21 optimizer.lr 0.018766137755039203 +756 21 training.batch_size 1.0 +756 21 training.label_smoothing 0.024414957510962686 +756 22 model.embedding_dim 1.0 +756 22 model.relation_dim 0.0 +756 22 optimizer.lr 0.02015714950610464 +756 22 training.batch_size 1.0 +756 22 training.label_smoothing 0.004700341699813737 +756 23 model.embedding_dim 1.0 +756 23 model.relation_dim 2.0 +756 23 optimizer.lr 0.03466414971182401 +756 23 training.batch_size 0.0 +756 23 training.label_smoothing 0.0011854652051407062 +756 24 model.embedding_dim 2.0 +756 24 model.relation_dim 0.0 +756 24 optimizer.lr 0.0024476586508474243 +756 24 training.batch_size 0.0 +756 24 training.label_smoothing 0.38471221146119017 +756 25 model.embedding_dim 2.0 +756 25 model.relation_dim 2.0 +756 25 optimizer.lr 0.0036710061320514636 +756 25 training.batch_size 2.0 +756 25 training.label_smoothing 0.0020530308902048824 +756 26 model.embedding_dim 1.0 +756 26 model.relation_dim 2.0 +756 26 optimizer.lr 0.0736200465610559 +756 26 training.batch_size 1.0 +756 26 training.label_smoothing 0.0012060889904980457 +756 27 model.embedding_dim 1.0 +756 27 model.relation_dim 1.0 +756 27 optimizer.lr 0.0050203767857120146 +756 27 training.batch_size 0.0 +756 27 training.label_smoothing 0.0014617072308203492 +756 28 model.embedding_dim 1.0 +756 28 model.relation_dim 2.0 +756 28 optimizer.lr 0.014644576679265296 +756 28 training.batch_size 0.0 +756 28 training.label_smoothing 0.008933545176566117 +756 29 model.embedding_dim 2.0 +756 29 model.relation_dim 2.0 +756 29 optimizer.lr 0.012284972357527109 +756 29 training.batch_size 2.0 +756 29 training.label_smoothing 0.026205098965972465 +756 30 model.embedding_dim 2.0 +756 30 model.relation_dim 1.0 +756 30 optimizer.lr 0.0030960305922252047 +756 30 training.batch_size 2.0 +756 30 training.label_smoothing 0.010517714727372682 +756 31 model.embedding_dim 1.0 +756 31 model.relation_dim 2.0 +756 31 optimizer.lr 0.016771297538185133 +756 31 training.batch_size 2.0 +756 31 training.label_smoothing 0.02264866473881437 +756 32 model.embedding_dim 1.0 +756 32 model.relation_dim 1.0 +756 32 optimizer.lr 0.03815985154646961 +756 32 training.batch_size 2.0 +756 32 training.label_smoothing 0.1265758425367667 +756 33 model.embedding_dim 0.0 +756 33 model.relation_dim 0.0 +756 33 optimizer.lr 0.0011995837651429338 +756 33 training.batch_size 1.0 +756 33 training.label_smoothing 0.28491378616491514 +756 34 model.embedding_dim 2.0 +756 34 model.relation_dim 0.0 +756 34 optimizer.lr 0.00985158854801548 +756 34 training.batch_size 2.0 +756 34 training.label_smoothing 0.010644070008556881 +756 35 model.embedding_dim 1.0 +756 35 model.relation_dim 1.0 +756 35 optimizer.lr 0.09776063202149951 +756 35 training.batch_size 2.0 +756 35 training.label_smoothing 0.7159723807724898 +756 36 model.embedding_dim 2.0 +756 36 model.relation_dim 1.0 +756 36 optimizer.lr 0.02171921935269042 +756 36 training.batch_size 1.0 +756 36 training.label_smoothing 0.8502782065855168 +756 37 model.embedding_dim 1.0 +756 37 model.relation_dim 1.0 +756 37 optimizer.lr 0.02917507407431974 +756 37 training.batch_size 0.0 +756 37 training.label_smoothing 0.012788405055473911 +756 38 model.embedding_dim 2.0 +756 38 model.relation_dim 0.0 +756 38 optimizer.lr 0.00558942516433573 +756 38 training.batch_size 0.0 +756 38 training.label_smoothing 0.016472533197167138 +756 39 model.embedding_dim 1.0 +756 39 model.relation_dim 2.0 +756 39 optimizer.lr 0.003377252362856027 +756 39 training.batch_size 1.0 +756 39 training.label_smoothing 0.09025991416389997 +756 40 model.embedding_dim 1.0 +756 40 model.relation_dim 2.0 +756 40 optimizer.lr 0.028754349245442234 +756 40 training.batch_size 0.0 +756 40 training.label_smoothing 0.028812800132651036 +756 41 model.embedding_dim 2.0 +756 41 model.relation_dim 0.0 +756 41 optimizer.lr 0.01417124218119973 +756 41 training.batch_size 0.0 +756 41 training.label_smoothing 0.004481730814430178 +756 42 model.embedding_dim 2.0 +756 42 model.relation_dim 2.0 +756 42 optimizer.lr 0.008367693241066312 +756 42 training.batch_size 0.0 +756 42 training.label_smoothing 0.2912207454441774 +756 43 model.embedding_dim 0.0 +756 43 model.relation_dim 1.0 +756 43 optimizer.lr 0.04532765312867461 +756 43 training.batch_size 1.0 +756 43 training.label_smoothing 0.0058480204922709365 +756 44 model.embedding_dim 1.0 +756 44 model.relation_dim 1.0 +756 44 optimizer.lr 0.012383402704172475 +756 44 training.batch_size 1.0 +756 44 training.label_smoothing 0.12221482384712362 +756 45 model.embedding_dim 0.0 +756 45 model.relation_dim 2.0 +756 45 optimizer.lr 0.09132736277080794 +756 45 training.batch_size 2.0 +756 45 training.label_smoothing 0.8233971786491678 +756 46 model.embedding_dim 2.0 +756 46 model.relation_dim 0.0 +756 46 optimizer.lr 0.018370523450539446 +756 46 training.batch_size 2.0 +756 46 training.label_smoothing 0.0012072880736013808 +756 47 model.embedding_dim 2.0 +756 47 model.relation_dim 0.0 +756 47 optimizer.lr 0.0012994509380595833 +756 47 training.batch_size 0.0 +756 47 training.label_smoothing 0.05166610511326583 +756 48 model.embedding_dim 1.0 +756 48 model.relation_dim 2.0 +756 48 optimizer.lr 0.06662066384560836 +756 48 training.batch_size 2.0 +756 48 training.label_smoothing 0.004240176010353487 +756 49 model.embedding_dim 0.0 +756 49 model.relation_dim 1.0 +756 49 optimizer.lr 0.008103191737786961 +756 49 training.batch_size 2.0 +756 49 training.label_smoothing 0.4479592356656648 +756 50 model.embedding_dim 1.0 +756 50 model.relation_dim 2.0 +756 50 optimizer.lr 0.0035996830890867586 +756 50 training.batch_size 2.0 +756 50 training.label_smoothing 0.5853558204062059 +756 51 model.embedding_dim 2.0 +756 51 model.relation_dim 0.0 +756 51 optimizer.lr 0.003588789739743749 +756 51 training.batch_size 2.0 +756 51 training.label_smoothing 0.10458482971052771 +756 52 model.embedding_dim 2.0 +756 52 model.relation_dim 1.0 +756 52 optimizer.lr 0.0018738195523772087 +756 52 training.batch_size 0.0 +756 52 training.label_smoothing 0.1540831824308243 +756 53 model.embedding_dim 1.0 +756 53 model.relation_dim 0.0 +756 53 optimizer.lr 0.0016373149298599923 +756 53 training.batch_size 2.0 +756 53 training.label_smoothing 0.19332869136581465 +756 54 model.embedding_dim 0.0 +756 54 model.relation_dim 0.0 +756 54 optimizer.lr 0.007939796897231453 +756 54 training.batch_size 2.0 +756 54 training.label_smoothing 0.1285696977558025 +756 55 model.embedding_dim 2.0 +756 55 model.relation_dim 0.0 +756 55 optimizer.lr 0.0016248323058473306 +756 55 training.batch_size 2.0 +756 55 training.label_smoothing 0.05623544130455594 +756 56 model.embedding_dim 2.0 +756 56 model.relation_dim 2.0 +756 56 optimizer.lr 0.01805240927024346 +756 56 training.batch_size 1.0 +756 56 training.label_smoothing 0.0023098557495871 +756 57 model.embedding_dim 1.0 +756 57 model.relation_dim 1.0 +756 57 optimizer.lr 0.001724601027196999 +756 57 training.batch_size 0.0 +756 57 training.label_smoothing 0.006512369403975683 +756 58 model.embedding_dim 2.0 +756 58 model.relation_dim 2.0 +756 58 optimizer.lr 0.015856395689761105 +756 58 training.batch_size 2.0 +756 58 training.label_smoothing 0.5631702661855655 +756 59 model.embedding_dim 2.0 +756 59 model.relation_dim 1.0 +756 59 optimizer.lr 0.00412489862664369 +756 59 training.batch_size 1.0 +756 59 training.label_smoothing 0.6446513791369591 +756 60 model.embedding_dim 2.0 +756 60 model.relation_dim 2.0 +756 60 optimizer.lr 0.0010324521695765914 +756 60 training.batch_size 0.0 +756 60 training.label_smoothing 0.002325168109894106 +756 61 model.embedding_dim 2.0 +756 61 model.relation_dim 0.0 +756 61 optimizer.lr 0.006139853863984979 +756 61 training.batch_size 2.0 +756 61 training.label_smoothing 0.0011395716345203339 +756 62 model.embedding_dim 1.0 +756 62 model.relation_dim 0.0 +756 62 optimizer.lr 0.05590010576698267 +756 62 training.batch_size 0.0 +756 62 training.label_smoothing 0.001400811155028624 +756 63 model.embedding_dim 1.0 +756 63 model.relation_dim 1.0 +756 63 optimizer.lr 0.00560188498262829 +756 63 training.batch_size 2.0 +756 63 training.label_smoothing 0.5071992760843768 +756 64 model.embedding_dim 0.0 +756 64 model.relation_dim 2.0 +756 64 optimizer.lr 0.026523064497185275 +756 64 training.batch_size 0.0 +756 64 training.label_smoothing 0.011985421768871045 +756 65 model.embedding_dim 1.0 +756 65 model.relation_dim 0.0 +756 65 optimizer.lr 0.00928190720053467 +756 65 training.batch_size 0.0 +756 65 training.label_smoothing 0.031022857471180915 +756 66 model.embedding_dim 2.0 +756 66 model.relation_dim 0.0 +756 66 optimizer.lr 0.012883573048846651 +756 66 training.batch_size 0.0 +756 66 training.label_smoothing 0.07589240369405056 +756 67 model.embedding_dim 2.0 +756 67 model.relation_dim 1.0 +756 67 optimizer.lr 0.0766096444007647 +756 67 training.batch_size 0.0 +756 67 training.label_smoothing 0.0039955832376674175 +756 68 model.embedding_dim 0.0 +756 68 model.relation_dim 2.0 +756 68 optimizer.lr 0.012687367372281811 +756 68 training.batch_size 2.0 +756 68 training.label_smoothing 0.9887514907449411 +756 69 model.embedding_dim 0.0 +756 69 model.relation_dim 0.0 +756 69 optimizer.lr 0.007767192115946603 +756 69 training.batch_size 1.0 +756 69 training.label_smoothing 0.0036427004377048698 +756 70 model.embedding_dim 1.0 +756 70 model.relation_dim 0.0 +756 70 optimizer.lr 0.004660503437653367 +756 70 training.batch_size 1.0 +756 70 training.label_smoothing 0.02298733629371651 +756 71 model.embedding_dim 1.0 +756 71 model.relation_dim 0.0 +756 71 optimizer.lr 0.0020151219110483595 +756 71 training.batch_size 2.0 +756 71 training.label_smoothing 0.5686613068115509 +756 72 model.embedding_dim 1.0 +756 72 model.relation_dim 2.0 +756 72 optimizer.lr 0.011981517347491468 +756 72 training.batch_size 0.0 +756 72 training.label_smoothing 0.04467482250323052 +756 73 model.embedding_dim 1.0 +756 73 model.relation_dim 0.0 +756 73 optimizer.lr 0.03299339732154393 +756 73 training.batch_size 2.0 +756 73 training.label_smoothing 0.04585103206841533 +756 74 model.embedding_dim 0.0 +756 74 model.relation_dim 1.0 +756 74 optimizer.lr 0.0011203964028348615 +756 74 training.batch_size 1.0 +756 74 training.label_smoothing 0.014524925426242227 +756 75 model.embedding_dim 0.0 +756 75 model.relation_dim 2.0 +756 75 optimizer.lr 0.0902584491232973 +756 75 training.batch_size 1.0 +756 75 training.label_smoothing 0.077263240273702 +756 76 model.embedding_dim 1.0 +756 76 model.relation_dim 2.0 +756 76 optimizer.lr 0.003837962685609794 +756 76 training.batch_size 2.0 +756 76 training.label_smoothing 0.05297157476222079 +756 77 model.embedding_dim 0.0 +756 77 model.relation_dim 2.0 +756 77 optimizer.lr 0.0017239659303374328 +756 77 training.batch_size 0.0 +756 77 training.label_smoothing 0.0058426388660558345 +756 78 model.embedding_dim 0.0 +756 78 model.relation_dim 2.0 +756 78 optimizer.lr 0.014255655687260548 +756 78 training.batch_size 1.0 +756 78 training.label_smoothing 0.08572123211506733 +756 79 model.embedding_dim 1.0 +756 79 model.relation_dim 1.0 +756 79 optimizer.lr 0.006325539501682585 +756 79 training.batch_size 0.0 +756 79 training.label_smoothing 0.0014127120918385706 +756 80 model.embedding_dim 2.0 +756 80 model.relation_dim 1.0 +756 80 optimizer.lr 0.007599969193645645 +756 80 training.batch_size 0.0 +756 80 training.label_smoothing 0.7675359422601546 +756 81 model.embedding_dim 1.0 +756 81 model.relation_dim 0.0 +756 81 optimizer.lr 0.03198481466772297 +756 81 training.batch_size 1.0 +756 81 training.label_smoothing 0.10502625287811237 +756 82 model.embedding_dim 2.0 +756 82 model.relation_dim 2.0 +756 82 optimizer.lr 0.04046279855077127 +756 82 training.batch_size 2.0 +756 82 training.label_smoothing 0.8065138675465541 +756 83 model.embedding_dim 2.0 +756 83 model.relation_dim 2.0 +756 83 optimizer.lr 0.07675709418007097 +756 83 training.batch_size 2.0 +756 83 training.label_smoothing 0.031019773091715587 +756 84 model.embedding_dim 0.0 +756 84 model.relation_dim 1.0 +756 84 optimizer.lr 0.060424524306773454 +756 84 training.batch_size 1.0 +756 84 training.label_smoothing 0.5518174835661038 +756 85 model.embedding_dim 2.0 +756 85 model.relation_dim 1.0 +756 85 optimizer.lr 0.020468996987025135 +756 85 training.batch_size 0.0 +756 85 training.label_smoothing 0.0010970350477466117 +756 86 model.embedding_dim 1.0 +756 86 model.relation_dim 0.0 +756 86 optimizer.lr 0.022007618449238184 +756 86 training.batch_size 1.0 +756 86 training.label_smoothing 0.5053536201607073 +756 87 model.embedding_dim 2.0 +756 87 model.relation_dim 1.0 +756 87 optimizer.lr 0.0045922729329212235 +756 87 training.batch_size 2.0 +756 87 training.label_smoothing 0.35340635607515 +756 88 model.embedding_dim 2.0 +756 88 model.relation_dim 1.0 +756 88 optimizer.lr 0.03840619035848535 +756 88 training.batch_size 2.0 +756 88 training.label_smoothing 0.6697027681358672 +756 89 model.embedding_dim 1.0 +756 89 model.relation_dim 0.0 +756 89 optimizer.lr 0.032435689730268544 +756 89 training.batch_size 0.0 +756 89 training.label_smoothing 0.004163024880825713 +756 90 model.embedding_dim 1.0 +756 90 model.relation_dim 2.0 +756 90 optimizer.lr 0.09192183382123868 +756 90 training.batch_size 0.0 +756 90 training.label_smoothing 0.32957405590414135 +756 91 model.embedding_dim 2.0 +756 91 model.relation_dim 1.0 +756 91 optimizer.lr 0.01976606951479267 +756 91 training.batch_size 1.0 +756 91 training.label_smoothing 0.07642674154117361 +756 92 model.embedding_dim 2.0 +756 92 model.relation_dim 1.0 +756 92 optimizer.lr 0.004978491328079165 +756 92 training.batch_size 1.0 +756 92 training.label_smoothing 0.3803897270470078 +756 93 model.embedding_dim 2.0 +756 93 model.relation_dim 0.0 +756 93 optimizer.lr 0.026018560498792382 +756 93 training.batch_size 0.0 +756 93 training.label_smoothing 0.0319539967483408 +756 94 model.embedding_dim 0.0 +756 94 model.relation_dim 0.0 +756 94 optimizer.lr 0.0164202451166976 +756 94 training.batch_size 2.0 +756 94 training.label_smoothing 0.018774301558619085 +756 95 model.embedding_dim 1.0 +756 95 model.relation_dim 1.0 +756 95 optimizer.lr 0.007403920188440649 +756 95 training.batch_size 1.0 +756 95 training.label_smoothing 0.021402448419364853 +756 96 model.embedding_dim 1.0 +756 96 model.relation_dim 1.0 +756 96 optimizer.lr 0.04904953035043253 +756 96 training.batch_size 1.0 +756 96 training.label_smoothing 0.232421313695741 +756 97 model.embedding_dim 1.0 +756 97 model.relation_dim 2.0 +756 97 optimizer.lr 0.007575984744984425 +756 97 training.batch_size 0.0 +756 97 training.label_smoothing 0.0021264004633348055 +756 98 model.embedding_dim 1.0 +756 98 model.relation_dim 2.0 +756 98 optimizer.lr 0.02575422497344201 +756 98 training.batch_size 2.0 +756 98 training.label_smoothing 0.054501514480421044 +756 99 model.embedding_dim 2.0 +756 99 model.relation_dim 1.0 +756 99 optimizer.lr 0.0010164582403454153 +756 99 training.batch_size 1.0 +756 99 training.label_smoothing 0.09480923827759745 +756 100 model.embedding_dim 1.0 +756 100 model.relation_dim 2.0 +756 100 optimizer.lr 0.009330390265409173 +756 100 training.batch_size 2.0 +756 100 training.label_smoothing 0.013234507306118757 +756 1 dataset """kinships""" +756 1 model """transd""" +756 1 loss """crossentropy""" +756 1 regularizer """no""" +756 1 optimizer """adam""" +756 1 training_loop """lcwa""" +756 1 evaluator """rankbased""" +756 2 dataset """kinships""" +756 2 model """transd""" +756 2 loss """crossentropy""" +756 2 regularizer """no""" +756 2 optimizer """adam""" +756 2 training_loop """lcwa""" +756 2 evaluator """rankbased""" +756 3 dataset """kinships""" +756 3 model """transd""" +756 3 loss """crossentropy""" +756 3 regularizer """no""" +756 3 optimizer """adam""" +756 3 training_loop """lcwa""" +756 3 evaluator """rankbased""" +756 4 dataset """kinships""" +756 4 model """transd""" +756 4 loss """crossentropy""" +756 4 regularizer """no""" +756 4 optimizer """adam""" +756 4 training_loop """lcwa""" +756 4 evaluator """rankbased""" +756 5 dataset """kinships""" +756 5 model """transd""" +756 5 loss """crossentropy""" +756 5 regularizer """no""" +756 5 optimizer """adam""" +756 5 training_loop """lcwa""" +756 5 evaluator """rankbased""" +756 6 dataset """kinships""" +756 6 model """transd""" +756 6 loss """crossentropy""" +756 6 regularizer """no""" +756 6 optimizer """adam""" +756 6 training_loop """lcwa""" +756 6 evaluator """rankbased""" +756 7 dataset """kinships""" +756 7 model """transd""" +756 7 loss """crossentropy""" +756 7 regularizer """no""" +756 7 optimizer """adam""" +756 7 training_loop """lcwa""" +756 7 evaluator """rankbased""" +756 8 dataset """kinships""" +756 8 model """transd""" +756 8 loss """crossentropy""" +756 8 regularizer """no""" +756 8 optimizer """adam""" +756 8 training_loop """lcwa""" +756 8 evaluator """rankbased""" +756 9 dataset """kinships""" +756 9 model """transd""" +756 9 loss """crossentropy""" +756 9 regularizer """no""" +756 9 optimizer """adam""" +756 9 training_loop """lcwa""" +756 9 evaluator """rankbased""" +756 10 dataset """kinships""" +756 10 model """transd""" +756 10 loss """crossentropy""" +756 10 regularizer """no""" +756 10 optimizer """adam""" +756 10 training_loop """lcwa""" +756 10 evaluator """rankbased""" +756 11 dataset """kinships""" +756 11 model """transd""" +756 11 loss """crossentropy""" +756 11 regularizer """no""" +756 11 optimizer """adam""" +756 11 training_loop """lcwa""" +756 11 evaluator """rankbased""" +756 12 dataset """kinships""" +756 12 model """transd""" +756 12 loss """crossentropy""" +756 12 regularizer """no""" +756 12 optimizer """adam""" +756 12 training_loop """lcwa""" +756 12 evaluator """rankbased""" +756 13 dataset """kinships""" +756 13 model """transd""" +756 13 loss """crossentropy""" +756 13 regularizer """no""" +756 13 optimizer """adam""" +756 13 training_loop """lcwa""" +756 13 evaluator """rankbased""" +756 14 dataset """kinships""" +756 14 model """transd""" +756 14 loss """crossentropy""" +756 14 regularizer """no""" +756 14 optimizer """adam""" +756 14 training_loop """lcwa""" +756 14 evaluator """rankbased""" +756 15 dataset """kinships""" +756 15 model """transd""" +756 15 loss """crossentropy""" +756 15 regularizer """no""" +756 15 optimizer """adam""" +756 15 training_loop """lcwa""" +756 15 evaluator """rankbased""" +756 16 dataset """kinships""" +756 16 model """transd""" +756 16 loss """crossentropy""" +756 16 regularizer """no""" +756 16 optimizer """adam""" +756 16 training_loop """lcwa""" +756 16 evaluator """rankbased""" +756 17 dataset """kinships""" +756 17 model """transd""" +756 17 loss """crossentropy""" +756 17 regularizer """no""" +756 17 optimizer """adam""" +756 17 training_loop """lcwa""" +756 17 evaluator """rankbased""" +756 18 dataset """kinships""" +756 18 model """transd""" +756 18 loss """crossentropy""" +756 18 regularizer """no""" +756 18 optimizer """adam""" +756 18 training_loop """lcwa""" +756 18 evaluator """rankbased""" +756 19 dataset """kinships""" +756 19 model """transd""" +756 19 loss """crossentropy""" +756 19 regularizer """no""" +756 19 optimizer """adam""" +756 19 training_loop """lcwa""" +756 19 evaluator """rankbased""" +756 20 dataset """kinships""" +756 20 model """transd""" +756 20 loss """crossentropy""" +756 20 regularizer """no""" +756 20 optimizer """adam""" +756 20 training_loop """lcwa""" +756 20 evaluator """rankbased""" +756 21 dataset """kinships""" +756 21 model """transd""" +756 21 loss """crossentropy""" +756 21 regularizer """no""" +756 21 optimizer """adam""" +756 21 training_loop """lcwa""" +756 21 evaluator """rankbased""" +756 22 dataset """kinships""" +756 22 model """transd""" +756 22 loss """crossentropy""" +756 22 regularizer """no""" +756 22 optimizer """adam""" +756 22 training_loop """lcwa""" +756 22 evaluator """rankbased""" +756 23 dataset """kinships""" +756 23 model """transd""" +756 23 loss """crossentropy""" +756 23 regularizer """no""" +756 23 optimizer """adam""" +756 23 training_loop """lcwa""" +756 23 evaluator """rankbased""" +756 24 dataset """kinships""" +756 24 model """transd""" +756 24 loss """crossentropy""" +756 24 regularizer """no""" +756 24 optimizer """adam""" +756 24 training_loop """lcwa""" +756 24 evaluator """rankbased""" +756 25 dataset """kinships""" +756 25 model """transd""" +756 25 loss """crossentropy""" +756 25 regularizer """no""" +756 25 optimizer """adam""" +756 25 training_loop """lcwa""" +756 25 evaluator """rankbased""" +756 26 dataset """kinships""" +756 26 model """transd""" +756 26 loss """crossentropy""" +756 26 regularizer """no""" +756 26 optimizer """adam""" +756 26 training_loop """lcwa""" +756 26 evaluator """rankbased""" +756 27 dataset """kinships""" +756 27 model """transd""" +756 27 loss """crossentropy""" +756 27 regularizer """no""" +756 27 optimizer """adam""" +756 27 training_loop """lcwa""" +756 27 evaluator """rankbased""" +756 28 dataset """kinships""" +756 28 model """transd""" +756 28 loss """crossentropy""" +756 28 regularizer """no""" +756 28 optimizer """adam""" +756 28 training_loop """lcwa""" +756 28 evaluator """rankbased""" +756 29 dataset """kinships""" +756 29 model """transd""" +756 29 loss """crossentropy""" +756 29 regularizer """no""" +756 29 optimizer """adam""" +756 29 training_loop """lcwa""" +756 29 evaluator """rankbased""" +756 30 dataset """kinships""" +756 30 model """transd""" +756 30 loss """crossentropy""" +756 30 regularizer """no""" +756 30 optimizer """adam""" +756 30 training_loop """lcwa""" +756 30 evaluator """rankbased""" +756 31 dataset """kinships""" +756 31 model """transd""" +756 31 loss """crossentropy""" +756 31 regularizer """no""" +756 31 optimizer """adam""" +756 31 training_loop """lcwa""" +756 31 evaluator """rankbased""" +756 32 dataset """kinships""" +756 32 model """transd""" +756 32 loss """crossentropy""" +756 32 regularizer """no""" +756 32 optimizer """adam""" +756 32 training_loop """lcwa""" +756 32 evaluator """rankbased""" +756 33 dataset """kinships""" +756 33 model """transd""" +756 33 loss """crossentropy""" +756 33 regularizer """no""" +756 33 optimizer """adam""" +756 33 training_loop """lcwa""" +756 33 evaluator """rankbased""" +756 34 dataset """kinships""" +756 34 model """transd""" +756 34 loss """crossentropy""" +756 34 regularizer """no""" +756 34 optimizer """adam""" +756 34 training_loop """lcwa""" +756 34 evaluator """rankbased""" +756 35 dataset """kinships""" +756 35 model """transd""" +756 35 loss """crossentropy""" +756 35 regularizer """no""" +756 35 optimizer """adam""" +756 35 training_loop """lcwa""" +756 35 evaluator """rankbased""" +756 36 dataset """kinships""" +756 36 model """transd""" +756 36 loss """crossentropy""" +756 36 regularizer """no""" +756 36 optimizer """adam""" +756 36 training_loop """lcwa""" +756 36 evaluator """rankbased""" +756 37 dataset """kinships""" +756 37 model """transd""" +756 37 loss """crossentropy""" +756 37 regularizer """no""" +756 37 optimizer """adam""" +756 37 training_loop """lcwa""" +756 37 evaluator """rankbased""" +756 38 dataset """kinships""" +756 38 model """transd""" +756 38 loss """crossentropy""" +756 38 regularizer """no""" +756 38 optimizer """adam""" +756 38 training_loop """lcwa""" +756 38 evaluator """rankbased""" +756 39 dataset """kinships""" +756 39 model """transd""" +756 39 loss """crossentropy""" +756 39 regularizer """no""" +756 39 optimizer """adam""" +756 39 training_loop """lcwa""" +756 39 evaluator """rankbased""" +756 40 dataset """kinships""" +756 40 model """transd""" +756 40 loss """crossentropy""" +756 40 regularizer """no""" +756 40 optimizer """adam""" +756 40 training_loop """lcwa""" +756 40 evaluator """rankbased""" +756 41 dataset """kinships""" +756 41 model """transd""" +756 41 loss """crossentropy""" +756 41 regularizer """no""" +756 41 optimizer """adam""" +756 41 training_loop """lcwa""" +756 41 evaluator """rankbased""" +756 42 dataset """kinships""" +756 42 model """transd""" +756 42 loss """crossentropy""" +756 42 regularizer """no""" +756 42 optimizer """adam""" +756 42 training_loop """lcwa""" +756 42 evaluator """rankbased""" +756 43 dataset """kinships""" +756 43 model """transd""" +756 43 loss """crossentropy""" +756 43 regularizer """no""" +756 43 optimizer """adam""" +756 43 training_loop """lcwa""" +756 43 evaluator """rankbased""" +756 44 dataset """kinships""" +756 44 model """transd""" +756 44 loss """crossentropy""" +756 44 regularizer """no""" +756 44 optimizer """adam""" +756 44 training_loop """lcwa""" +756 44 evaluator """rankbased""" +756 45 dataset """kinships""" +756 45 model """transd""" +756 45 loss """crossentropy""" +756 45 regularizer """no""" +756 45 optimizer """adam""" +756 45 training_loop """lcwa""" +756 45 evaluator """rankbased""" +756 46 dataset """kinships""" +756 46 model """transd""" +756 46 loss """crossentropy""" +756 46 regularizer """no""" +756 46 optimizer """adam""" +756 46 training_loop """lcwa""" +756 46 evaluator """rankbased""" +756 47 dataset """kinships""" +756 47 model """transd""" +756 47 loss """crossentropy""" +756 47 regularizer """no""" +756 47 optimizer """adam""" +756 47 training_loop """lcwa""" +756 47 evaluator """rankbased""" +756 48 dataset """kinships""" +756 48 model """transd""" +756 48 loss """crossentropy""" +756 48 regularizer """no""" +756 48 optimizer """adam""" +756 48 training_loop """lcwa""" +756 48 evaluator """rankbased""" +756 49 dataset """kinships""" +756 49 model """transd""" +756 49 loss """crossentropy""" +756 49 regularizer """no""" +756 49 optimizer """adam""" +756 49 training_loop """lcwa""" +756 49 evaluator """rankbased""" +756 50 dataset """kinships""" +756 50 model """transd""" +756 50 loss """crossentropy""" +756 50 regularizer """no""" +756 50 optimizer """adam""" +756 50 training_loop """lcwa""" +756 50 evaluator """rankbased""" +756 51 dataset """kinships""" +756 51 model """transd""" +756 51 loss """crossentropy""" +756 51 regularizer """no""" +756 51 optimizer """adam""" +756 51 training_loop """lcwa""" +756 51 evaluator """rankbased""" +756 52 dataset """kinships""" +756 52 model """transd""" +756 52 loss """crossentropy""" +756 52 regularizer """no""" +756 52 optimizer """adam""" +756 52 training_loop """lcwa""" +756 52 evaluator """rankbased""" +756 53 dataset """kinships""" +756 53 model """transd""" +756 53 loss """crossentropy""" +756 53 regularizer """no""" +756 53 optimizer """adam""" +756 53 training_loop """lcwa""" +756 53 evaluator """rankbased""" +756 54 dataset """kinships""" +756 54 model """transd""" +756 54 loss """crossentropy""" +756 54 regularizer """no""" +756 54 optimizer """adam""" +756 54 training_loop """lcwa""" +756 54 evaluator """rankbased""" +756 55 dataset """kinships""" +756 55 model """transd""" +756 55 loss """crossentropy""" +756 55 regularizer """no""" +756 55 optimizer """adam""" +756 55 training_loop """lcwa""" +756 55 evaluator """rankbased""" +756 56 dataset """kinships""" +756 56 model """transd""" +756 56 loss """crossentropy""" +756 56 regularizer """no""" +756 56 optimizer """adam""" +756 56 training_loop """lcwa""" +756 56 evaluator """rankbased""" +756 57 dataset """kinships""" +756 57 model """transd""" +756 57 loss """crossentropy""" +756 57 regularizer """no""" +756 57 optimizer """adam""" +756 57 training_loop """lcwa""" +756 57 evaluator """rankbased""" +756 58 dataset """kinships""" +756 58 model """transd""" +756 58 loss """crossentropy""" +756 58 regularizer """no""" +756 58 optimizer """adam""" +756 58 training_loop """lcwa""" +756 58 evaluator """rankbased""" +756 59 dataset """kinships""" +756 59 model """transd""" +756 59 loss """crossentropy""" +756 59 regularizer """no""" +756 59 optimizer """adam""" +756 59 training_loop """lcwa""" +756 59 evaluator """rankbased""" +756 60 dataset """kinships""" +756 60 model """transd""" +756 60 loss """crossentropy""" +756 60 regularizer """no""" +756 60 optimizer """adam""" +756 60 training_loop """lcwa""" +756 60 evaluator """rankbased""" +756 61 dataset """kinships""" +756 61 model """transd""" +756 61 loss """crossentropy""" +756 61 regularizer """no""" +756 61 optimizer """adam""" +756 61 training_loop """lcwa""" +756 61 evaluator """rankbased""" +756 62 dataset """kinships""" +756 62 model """transd""" +756 62 loss """crossentropy""" +756 62 regularizer """no""" +756 62 optimizer """adam""" +756 62 training_loop """lcwa""" +756 62 evaluator """rankbased""" +756 63 dataset """kinships""" +756 63 model """transd""" +756 63 loss """crossentropy""" +756 63 regularizer """no""" +756 63 optimizer """adam""" +756 63 training_loop """lcwa""" +756 63 evaluator """rankbased""" +756 64 dataset """kinships""" +756 64 model """transd""" +756 64 loss """crossentropy""" +756 64 regularizer """no""" +756 64 optimizer """adam""" +756 64 training_loop """lcwa""" +756 64 evaluator """rankbased""" +756 65 dataset """kinships""" +756 65 model """transd""" +756 65 loss """crossentropy""" +756 65 regularizer """no""" +756 65 optimizer """adam""" +756 65 training_loop """lcwa""" +756 65 evaluator """rankbased""" +756 66 dataset """kinships""" +756 66 model """transd""" +756 66 loss """crossentropy""" +756 66 regularizer """no""" +756 66 optimizer """adam""" +756 66 training_loop """lcwa""" +756 66 evaluator """rankbased""" +756 67 dataset """kinships""" +756 67 model """transd""" +756 67 loss """crossentropy""" +756 67 regularizer """no""" +756 67 optimizer """adam""" +756 67 training_loop """lcwa""" +756 67 evaluator """rankbased""" +756 68 dataset """kinships""" +756 68 model """transd""" +756 68 loss """crossentropy""" +756 68 regularizer """no""" +756 68 optimizer """adam""" +756 68 training_loop """lcwa""" +756 68 evaluator """rankbased""" +756 69 dataset """kinships""" +756 69 model """transd""" +756 69 loss """crossentropy""" +756 69 regularizer """no""" +756 69 optimizer """adam""" +756 69 training_loop """lcwa""" +756 69 evaluator """rankbased""" +756 70 dataset """kinships""" +756 70 model """transd""" +756 70 loss """crossentropy""" +756 70 regularizer """no""" +756 70 optimizer """adam""" +756 70 training_loop """lcwa""" +756 70 evaluator """rankbased""" +756 71 dataset """kinships""" +756 71 model """transd""" +756 71 loss """crossentropy""" +756 71 regularizer """no""" +756 71 optimizer """adam""" +756 71 training_loop """lcwa""" +756 71 evaluator """rankbased""" +756 72 dataset """kinships""" +756 72 model """transd""" +756 72 loss """crossentropy""" +756 72 regularizer """no""" +756 72 optimizer """adam""" +756 72 training_loop """lcwa""" +756 72 evaluator """rankbased""" +756 73 dataset """kinships""" +756 73 model """transd""" +756 73 loss """crossentropy""" +756 73 regularizer """no""" +756 73 optimizer """adam""" +756 73 training_loop """lcwa""" +756 73 evaluator """rankbased""" +756 74 dataset """kinships""" +756 74 model """transd""" +756 74 loss """crossentropy""" +756 74 regularizer """no""" +756 74 optimizer """adam""" +756 74 training_loop """lcwa""" +756 74 evaluator """rankbased""" +756 75 dataset """kinships""" +756 75 model """transd""" +756 75 loss """crossentropy""" +756 75 regularizer """no""" +756 75 optimizer """adam""" +756 75 training_loop """lcwa""" +756 75 evaluator """rankbased""" +756 76 dataset """kinships""" +756 76 model """transd""" +756 76 loss """crossentropy""" +756 76 regularizer """no""" +756 76 optimizer """adam""" +756 76 training_loop """lcwa""" +756 76 evaluator """rankbased""" +756 77 dataset """kinships""" +756 77 model """transd""" +756 77 loss """crossentropy""" +756 77 regularizer """no""" +756 77 optimizer """adam""" +756 77 training_loop """lcwa""" +756 77 evaluator """rankbased""" +756 78 dataset """kinships""" +756 78 model """transd""" +756 78 loss """crossentropy""" +756 78 regularizer """no""" +756 78 optimizer """adam""" +756 78 training_loop """lcwa""" +756 78 evaluator """rankbased""" +756 79 dataset """kinships""" +756 79 model """transd""" +756 79 loss """crossentropy""" +756 79 regularizer """no""" +756 79 optimizer """adam""" +756 79 training_loop """lcwa""" +756 79 evaluator """rankbased""" +756 80 dataset """kinships""" +756 80 model """transd""" +756 80 loss """crossentropy""" +756 80 regularizer """no""" +756 80 optimizer """adam""" +756 80 training_loop """lcwa""" +756 80 evaluator """rankbased""" +756 81 dataset """kinships""" +756 81 model """transd""" +756 81 loss """crossentropy""" +756 81 regularizer """no""" +756 81 optimizer """adam""" +756 81 training_loop """lcwa""" +756 81 evaluator """rankbased""" +756 82 dataset """kinships""" +756 82 model """transd""" +756 82 loss """crossentropy""" +756 82 regularizer """no""" +756 82 optimizer """adam""" +756 82 training_loop """lcwa""" +756 82 evaluator """rankbased""" +756 83 dataset """kinships""" +756 83 model """transd""" +756 83 loss """crossentropy""" +756 83 regularizer """no""" +756 83 optimizer """adam""" +756 83 training_loop """lcwa""" +756 83 evaluator """rankbased""" +756 84 dataset """kinships""" +756 84 model """transd""" +756 84 loss """crossentropy""" +756 84 regularizer """no""" +756 84 optimizer """adam""" +756 84 training_loop """lcwa""" +756 84 evaluator """rankbased""" +756 85 dataset """kinships""" +756 85 model """transd""" +756 85 loss """crossentropy""" +756 85 regularizer """no""" +756 85 optimizer """adam""" +756 85 training_loop """lcwa""" +756 85 evaluator """rankbased""" +756 86 dataset """kinships""" +756 86 model """transd""" +756 86 loss """crossentropy""" +756 86 regularizer """no""" +756 86 optimizer """adam""" +756 86 training_loop """lcwa""" +756 86 evaluator """rankbased""" +756 87 dataset """kinships""" +756 87 model """transd""" +756 87 loss """crossentropy""" +756 87 regularizer """no""" +756 87 optimizer """adam""" +756 87 training_loop """lcwa""" +756 87 evaluator """rankbased""" +756 88 dataset """kinships""" +756 88 model """transd""" +756 88 loss """crossentropy""" +756 88 regularizer """no""" +756 88 optimizer """adam""" +756 88 training_loop """lcwa""" +756 88 evaluator """rankbased""" +756 89 dataset """kinships""" +756 89 model """transd""" +756 89 loss """crossentropy""" +756 89 regularizer """no""" +756 89 optimizer """adam""" +756 89 training_loop """lcwa""" +756 89 evaluator """rankbased""" +756 90 dataset """kinships""" +756 90 model """transd""" +756 90 loss """crossentropy""" +756 90 regularizer """no""" +756 90 optimizer """adam""" +756 90 training_loop """lcwa""" +756 90 evaluator """rankbased""" +756 91 dataset """kinships""" +756 91 model """transd""" +756 91 loss """crossentropy""" +756 91 regularizer """no""" +756 91 optimizer """adam""" +756 91 training_loop """lcwa""" +756 91 evaluator """rankbased""" +756 92 dataset """kinships""" +756 92 model """transd""" +756 92 loss """crossentropy""" +756 92 regularizer """no""" +756 92 optimizer """adam""" +756 92 training_loop """lcwa""" +756 92 evaluator """rankbased""" +756 93 dataset """kinships""" +756 93 model """transd""" +756 93 loss """crossentropy""" +756 93 regularizer """no""" +756 93 optimizer """adam""" +756 93 training_loop """lcwa""" +756 93 evaluator """rankbased""" +756 94 dataset """kinships""" +756 94 model """transd""" +756 94 loss """crossentropy""" +756 94 regularizer """no""" +756 94 optimizer """adam""" +756 94 training_loop """lcwa""" +756 94 evaluator """rankbased""" +756 95 dataset """kinships""" +756 95 model """transd""" +756 95 loss """crossentropy""" +756 95 regularizer """no""" +756 95 optimizer """adam""" +756 95 training_loop """lcwa""" +756 95 evaluator """rankbased""" +756 96 dataset """kinships""" +756 96 model """transd""" +756 96 loss """crossentropy""" +756 96 regularizer """no""" +756 96 optimizer """adam""" +756 96 training_loop """lcwa""" +756 96 evaluator """rankbased""" +756 97 dataset """kinships""" +756 97 model """transd""" +756 97 loss """crossentropy""" +756 97 regularizer """no""" +756 97 optimizer """adam""" +756 97 training_loop """lcwa""" +756 97 evaluator """rankbased""" +756 98 dataset """kinships""" +756 98 model """transd""" +756 98 loss """crossentropy""" +756 98 regularizer """no""" +756 98 optimizer """adam""" +756 98 training_loop """lcwa""" +756 98 evaluator """rankbased""" +756 99 dataset """kinships""" +756 99 model """transd""" +756 99 loss """crossentropy""" +756 99 regularizer """no""" +756 99 optimizer """adam""" +756 99 training_loop """lcwa""" +756 99 evaluator """rankbased""" +756 100 dataset """kinships""" +756 100 model """transd""" +756 100 loss """crossentropy""" +756 100 regularizer """no""" +756 100 optimizer """adam""" +756 100 training_loop """lcwa""" +756 100 evaluator """rankbased""" +757 1 model.embedding_dim 2.0 +757 1 model.relation_dim 1.0 +757 1 optimizer.lr 0.009828756368638846 +757 1 training.batch_size 0.0 +757 1 training.label_smoothing 0.9976908496523974 +757 2 model.embedding_dim 1.0 +757 2 model.relation_dim 1.0 +757 2 optimizer.lr 0.005336086350712613 +757 2 training.batch_size 2.0 +757 2 training.label_smoothing 0.009137356626811874 +757 3 model.embedding_dim 0.0 +757 3 model.relation_dim 1.0 +757 3 optimizer.lr 0.004383173279329251 +757 3 training.batch_size 1.0 +757 3 training.label_smoothing 0.09706555270240681 +757 4 model.embedding_dim 2.0 +757 4 model.relation_dim 1.0 +757 4 optimizer.lr 0.004379868402595908 +757 4 training.batch_size 1.0 +757 4 training.label_smoothing 0.3790995039606477 +757 5 model.embedding_dim 1.0 +757 5 model.relation_dim 1.0 +757 5 optimizer.lr 0.019977312522212187 +757 5 training.batch_size 2.0 +757 5 training.label_smoothing 0.11364471858919897 +757 6 model.embedding_dim 1.0 +757 6 model.relation_dim 1.0 +757 6 optimizer.lr 0.0022846068462980666 +757 6 training.batch_size 2.0 +757 6 training.label_smoothing 0.02819563261125524 +757 7 model.embedding_dim 1.0 +757 7 model.relation_dim 1.0 +757 7 optimizer.lr 0.0013896007363008412 +757 7 training.batch_size 2.0 +757 7 training.label_smoothing 0.3410827020323739 +757 8 model.embedding_dim 0.0 +757 8 model.relation_dim 2.0 +757 8 optimizer.lr 0.006661989368658413 +757 8 training.batch_size 1.0 +757 8 training.label_smoothing 0.10427573886642397 +757 9 model.embedding_dim 2.0 +757 9 model.relation_dim 1.0 +757 9 optimizer.lr 0.04495520811071659 +757 9 training.batch_size 2.0 +757 9 training.label_smoothing 0.004447054251677138 +757 10 model.embedding_dim 2.0 +757 10 model.relation_dim 0.0 +757 10 optimizer.lr 0.0014467105296618711 +757 10 training.batch_size 2.0 +757 10 training.label_smoothing 0.011234738140826607 +757 11 model.embedding_dim 0.0 +757 11 model.relation_dim 1.0 +757 11 optimizer.lr 0.004978953297297549 +757 11 training.batch_size 0.0 +757 11 training.label_smoothing 0.7837307380280766 +757 12 model.embedding_dim 2.0 +757 12 model.relation_dim 2.0 +757 12 optimizer.lr 0.0033961491659591685 +757 12 training.batch_size 2.0 +757 12 training.label_smoothing 0.05082787157296052 +757 13 model.embedding_dim 1.0 +757 13 model.relation_dim 0.0 +757 13 optimizer.lr 0.006605447452209474 +757 13 training.batch_size 2.0 +757 13 training.label_smoothing 0.004116802342873251 +757 14 model.embedding_dim 1.0 +757 14 model.relation_dim 1.0 +757 14 optimizer.lr 0.004743525795138773 +757 14 training.batch_size 0.0 +757 14 training.label_smoothing 0.008641007321303745 +757 15 model.embedding_dim 0.0 +757 15 model.relation_dim 0.0 +757 15 optimizer.lr 0.006198588967193918 +757 15 training.batch_size 0.0 +757 15 training.label_smoothing 0.012167241674033154 +757 16 model.embedding_dim 1.0 +757 16 model.relation_dim 2.0 +757 16 optimizer.lr 0.004077899338244584 +757 16 training.batch_size 0.0 +757 16 training.label_smoothing 0.00994584168725638 +757 17 model.embedding_dim 2.0 +757 17 model.relation_dim 2.0 +757 17 optimizer.lr 0.008665365943035625 +757 17 training.batch_size 0.0 +757 17 training.label_smoothing 0.0018525204971356817 +757 18 model.embedding_dim 0.0 +757 18 model.relation_dim 0.0 +757 18 optimizer.lr 0.0038731688731304076 +757 18 training.batch_size 0.0 +757 18 training.label_smoothing 0.004367176207246945 +757 19 model.embedding_dim 0.0 +757 19 model.relation_dim 0.0 +757 19 optimizer.lr 0.0152452638808767 +757 19 training.batch_size 2.0 +757 19 training.label_smoothing 0.136647609526689 +757 20 model.embedding_dim 2.0 +757 20 model.relation_dim 0.0 +757 20 optimizer.lr 0.006864231354533422 +757 20 training.batch_size 1.0 +757 20 training.label_smoothing 0.0367698415118742 +757 21 model.embedding_dim 0.0 +757 21 model.relation_dim 2.0 +757 21 optimizer.lr 0.010116825830884286 +757 21 training.batch_size 1.0 +757 21 training.label_smoothing 0.16720847858093113 +757 22 model.embedding_dim 0.0 +757 22 model.relation_dim 1.0 +757 22 optimizer.lr 0.008450842030785017 +757 22 training.batch_size 1.0 +757 22 training.label_smoothing 0.011061694496684057 +757 23 model.embedding_dim 0.0 +757 23 model.relation_dim 2.0 +757 23 optimizer.lr 0.07883800009582545 +757 23 training.batch_size 2.0 +757 23 training.label_smoothing 0.014843955094746475 +757 24 model.embedding_dim 0.0 +757 24 model.relation_dim 0.0 +757 24 optimizer.lr 0.01692817588718486 +757 24 training.batch_size 2.0 +757 24 training.label_smoothing 0.691711989192951 +757 25 model.embedding_dim 2.0 +757 25 model.relation_dim 2.0 +757 25 optimizer.lr 0.004308411082994072 +757 25 training.batch_size 1.0 +757 25 training.label_smoothing 0.03916489368890048 +757 26 model.embedding_dim 0.0 +757 26 model.relation_dim 0.0 +757 26 optimizer.lr 0.015126282829729908 +757 26 training.batch_size 2.0 +757 26 training.label_smoothing 0.023564134432083272 +757 27 model.embedding_dim 1.0 +757 27 model.relation_dim 1.0 +757 27 optimizer.lr 0.0671171879467949 +757 27 training.batch_size 2.0 +757 27 training.label_smoothing 0.010891409097732778 +757 28 model.embedding_dim 2.0 +757 28 model.relation_dim 2.0 +757 28 optimizer.lr 0.06935996105970488 +757 28 training.batch_size 1.0 +757 28 training.label_smoothing 0.0059719794261505746 +757 29 model.embedding_dim 0.0 +757 29 model.relation_dim 2.0 +757 29 optimizer.lr 0.007308352716483397 +757 29 training.batch_size 2.0 +757 29 training.label_smoothing 0.026215641524326764 +757 30 model.embedding_dim 2.0 +757 30 model.relation_dim 2.0 +757 30 optimizer.lr 0.003338004159990456 +757 30 training.batch_size 0.0 +757 30 training.label_smoothing 0.057516171320557294 +757 31 model.embedding_dim 0.0 +757 31 model.relation_dim 1.0 +757 31 optimizer.lr 0.03719916016811506 +757 31 training.batch_size 0.0 +757 31 training.label_smoothing 0.009019214439887747 +757 32 model.embedding_dim 1.0 +757 32 model.relation_dim 1.0 +757 32 optimizer.lr 0.030906326095292116 +757 32 training.batch_size 2.0 +757 32 training.label_smoothing 0.001670509307950067 +757 33 model.embedding_dim 1.0 +757 33 model.relation_dim 2.0 +757 33 optimizer.lr 0.0018317313554283873 +757 33 training.batch_size 0.0 +757 33 training.label_smoothing 0.008022711250232968 +757 34 model.embedding_dim 1.0 +757 34 model.relation_dim 0.0 +757 34 optimizer.lr 0.07402302921931804 +757 34 training.batch_size 1.0 +757 34 training.label_smoothing 0.002288457725724891 +757 35 model.embedding_dim 1.0 +757 35 model.relation_dim 1.0 +757 35 optimizer.lr 0.014732951783175961 +757 35 training.batch_size 2.0 +757 35 training.label_smoothing 0.1668711252877409 +757 36 model.embedding_dim 2.0 +757 36 model.relation_dim 1.0 +757 36 optimizer.lr 0.0012962330008106434 +757 36 training.batch_size 0.0 +757 36 training.label_smoothing 0.5756943224478988 +757 37 model.embedding_dim 1.0 +757 37 model.relation_dim 0.0 +757 37 optimizer.lr 0.0053594189129944425 +757 37 training.batch_size 1.0 +757 37 training.label_smoothing 0.09107155708962293 +757 38 model.embedding_dim 1.0 +757 38 model.relation_dim 0.0 +757 38 optimizer.lr 0.0017538716509023452 +757 38 training.batch_size 2.0 +757 38 training.label_smoothing 0.5506318007655334 +757 39 model.embedding_dim 2.0 +757 39 model.relation_dim 2.0 +757 39 optimizer.lr 0.0022949042520682174 +757 39 training.batch_size 2.0 +757 39 training.label_smoothing 0.7918086398311432 +757 40 model.embedding_dim 2.0 +757 40 model.relation_dim 1.0 +757 40 optimizer.lr 0.0100297336203312 +757 40 training.batch_size 2.0 +757 40 training.label_smoothing 0.017291238942087905 +757 41 model.embedding_dim 0.0 +757 41 model.relation_dim 1.0 +757 41 optimizer.lr 0.09383769920248619 +757 41 training.batch_size 2.0 +757 41 training.label_smoothing 0.0017839695885367602 +757 42 model.embedding_dim 0.0 +757 42 model.relation_dim 0.0 +757 42 optimizer.lr 0.0013251143591406563 +757 42 training.batch_size 1.0 +757 42 training.label_smoothing 0.0028846324124319448 +757 43 model.embedding_dim 2.0 +757 43 model.relation_dim 2.0 +757 43 optimizer.lr 0.003469933174361488 +757 43 training.batch_size 1.0 +757 43 training.label_smoothing 0.0038844860223183277 +757 44 model.embedding_dim 1.0 +757 44 model.relation_dim 2.0 +757 44 optimizer.lr 0.08539529810742294 +757 44 training.batch_size 0.0 +757 44 training.label_smoothing 0.5802773644792355 +757 45 model.embedding_dim 2.0 +757 45 model.relation_dim 2.0 +757 45 optimizer.lr 0.020336887117553503 +757 45 training.batch_size 1.0 +757 45 training.label_smoothing 0.022771296235426588 +757 46 model.embedding_dim 0.0 +757 46 model.relation_dim 1.0 +757 46 optimizer.lr 0.0010042902878884005 +757 46 training.batch_size 0.0 +757 46 training.label_smoothing 0.5670468367345171 +757 47 model.embedding_dim 1.0 +757 47 model.relation_dim 0.0 +757 47 optimizer.lr 0.0025002896832875786 +757 47 training.batch_size 1.0 +757 47 training.label_smoothing 0.0027185805063503275 +757 48 model.embedding_dim 0.0 +757 48 model.relation_dim 2.0 +757 48 optimizer.lr 0.00653239734976055 +757 48 training.batch_size 1.0 +757 48 training.label_smoothing 0.4158838086317109 +757 49 model.embedding_dim 0.0 +757 49 model.relation_dim 0.0 +757 49 optimizer.lr 0.013736733536698735 +757 49 training.batch_size 0.0 +757 49 training.label_smoothing 0.17784117081604459 +757 50 model.embedding_dim 1.0 +757 50 model.relation_dim 2.0 +757 50 optimizer.lr 0.017396127861903685 +757 50 training.batch_size 2.0 +757 50 training.label_smoothing 0.045913038359259216 +757 51 model.embedding_dim 0.0 +757 51 model.relation_dim 1.0 +757 51 optimizer.lr 0.017304150082105278 +757 51 training.batch_size 2.0 +757 51 training.label_smoothing 0.0020049208331331747 +757 52 model.embedding_dim 0.0 +757 52 model.relation_dim 2.0 +757 52 optimizer.lr 0.019730283293286053 +757 52 training.batch_size 1.0 +757 52 training.label_smoothing 0.06934453188948526 +757 53 model.embedding_dim 2.0 +757 53 model.relation_dim 2.0 +757 53 optimizer.lr 0.004669048778216655 +757 53 training.batch_size 1.0 +757 53 training.label_smoothing 0.09136785137020147 +757 54 model.embedding_dim 2.0 +757 54 model.relation_dim 0.0 +757 54 optimizer.lr 0.020562920096127178 +757 54 training.batch_size 1.0 +757 54 training.label_smoothing 0.008169671995464017 +757 55 model.embedding_dim 1.0 +757 55 model.relation_dim 2.0 +757 55 optimizer.lr 0.0011170974070972295 +757 55 training.batch_size 1.0 +757 55 training.label_smoothing 0.00834649036159367 +757 56 model.embedding_dim 2.0 +757 56 model.relation_dim 0.0 +757 56 optimizer.lr 0.003908945976738849 +757 56 training.batch_size 1.0 +757 56 training.label_smoothing 0.28941917910455117 +757 57 model.embedding_dim 2.0 +757 57 model.relation_dim 1.0 +757 57 optimizer.lr 0.0020042939451413917 +757 57 training.batch_size 1.0 +757 57 training.label_smoothing 0.0034686809823025987 +757 58 model.embedding_dim 0.0 +757 58 model.relation_dim 1.0 +757 58 optimizer.lr 0.001393816911679729 +757 58 training.batch_size 2.0 +757 58 training.label_smoothing 0.29903849395203413 +757 59 model.embedding_dim 1.0 +757 59 model.relation_dim 2.0 +757 59 optimizer.lr 0.00918802845676046 +757 59 training.batch_size 2.0 +757 59 training.label_smoothing 0.00813040572153551 +757 60 model.embedding_dim 0.0 +757 60 model.relation_dim 0.0 +757 60 optimizer.lr 0.003094022873208277 +757 60 training.batch_size 1.0 +757 60 training.label_smoothing 0.046239566717139725 +757 61 model.embedding_dim 0.0 +757 61 model.relation_dim 0.0 +757 61 optimizer.lr 0.011064491076541753 +757 61 training.batch_size 2.0 +757 61 training.label_smoothing 0.010578587575334978 +757 62 model.embedding_dim 1.0 +757 62 model.relation_dim 1.0 +757 62 optimizer.lr 0.05577766797587612 +757 62 training.batch_size 1.0 +757 62 training.label_smoothing 0.003335165026006121 +757 63 model.embedding_dim 1.0 +757 63 model.relation_dim 2.0 +757 63 optimizer.lr 0.012446595941957735 +757 63 training.batch_size 2.0 +757 63 training.label_smoothing 0.257531993162072 +757 64 model.embedding_dim 0.0 +757 64 model.relation_dim 2.0 +757 64 optimizer.lr 0.0029919218328278044 +757 64 training.batch_size 0.0 +757 64 training.label_smoothing 0.0765766321046897 +757 65 model.embedding_dim 1.0 +757 65 model.relation_dim 2.0 +757 65 optimizer.lr 0.01433959486892799 +757 65 training.batch_size 2.0 +757 65 training.label_smoothing 0.009028053439592683 +757 66 model.embedding_dim 0.0 +757 66 model.relation_dim 2.0 +757 66 optimizer.lr 0.001084130178497232 +757 66 training.batch_size 1.0 +757 66 training.label_smoothing 0.01902671462635192 +757 67 model.embedding_dim 0.0 +757 67 model.relation_dim 2.0 +757 67 optimizer.lr 0.002927848616253947 +757 67 training.batch_size 0.0 +757 67 training.label_smoothing 0.04385476040147939 +757 68 model.embedding_dim 0.0 +757 68 model.relation_dim 0.0 +757 68 optimizer.lr 0.040842037871208776 +757 68 training.batch_size 2.0 +757 68 training.label_smoothing 0.0558975637722056 +757 69 model.embedding_dim 0.0 +757 69 model.relation_dim 0.0 +757 69 optimizer.lr 0.028833818252375392 +757 69 training.batch_size 0.0 +757 69 training.label_smoothing 0.32439310323586823 +757 70 model.embedding_dim 0.0 +757 70 model.relation_dim 0.0 +757 70 optimizer.lr 0.0013484663545623773 +757 70 training.batch_size 2.0 +757 70 training.label_smoothing 0.0014585205052159097 +757 71 model.embedding_dim 2.0 +757 71 model.relation_dim 2.0 +757 71 optimizer.lr 0.04669146229441718 +757 71 training.batch_size 1.0 +757 71 training.label_smoothing 0.11817334152168024 +757 72 model.embedding_dim 1.0 +757 72 model.relation_dim 0.0 +757 72 optimizer.lr 0.00937681536842061 +757 72 training.batch_size 0.0 +757 72 training.label_smoothing 0.040798071531318196 +757 73 model.embedding_dim 0.0 +757 73 model.relation_dim 0.0 +757 73 optimizer.lr 0.001863642448186246 +757 73 training.batch_size 0.0 +757 73 training.label_smoothing 0.01837456688414266 +757 74 model.embedding_dim 2.0 +757 74 model.relation_dim 2.0 +757 74 optimizer.lr 0.007257089563311205 +757 74 training.batch_size 1.0 +757 74 training.label_smoothing 0.04961350607193646 +757 75 model.embedding_dim 0.0 +757 75 model.relation_dim 2.0 +757 75 optimizer.lr 0.011828638652404116 +757 75 training.batch_size 2.0 +757 75 training.label_smoothing 0.32112074605830937 +757 76 model.embedding_dim 0.0 +757 76 model.relation_dim 0.0 +757 76 optimizer.lr 0.027262857082087234 +757 76 training.batch_size 0.0 +757 76 training.label_smoothing 0.0020365309449120487 +757 77 model.embedding_dim 1.0 +757 77 model.relation_dim 0.0 +757 77 optimizer.lr 0.009784322180631431 +757 77 training.batch_size 0.0 +757 77 training.label_smoothing 0.05863059573682852 +757 78 model.embedding_dim 0.0 +757 78 model.relation_dim 2.0 +757 78 optimizer.lr 0.01215314024897596 +757 78 training.batch_size 2.0 +757 78 training.label_smoothing 0.005221663351145766 +757 79 model.embedding_dim 2.0 +757 79 model.relation_dim 0.0 +757 79 optimizer.lr 0.0010395231331291786 +757 79 training.batch_size 2.0 +757 79 training.label_smoothing 0.08624126179740724 +757 80 model.embedding_dim 2.0 +757 80 model.relation_dim 0.0 +757 80 optimizer.lr 0.0013659483644698478 +757 80 training.batch_size 0.0 +757 80 training.label_smoothing 0.47364278843246466 +757 81 model.embedding_dim 2.0 +757 81 model.relation_dim 0.0 +757 81 optimizer.lr 0.027649183061258402 +757 81 training.batch_size 1.0 +757 81 training.label_smoothing 0.9378483093054496 +757 82 model.embedding_dim 1.0 +757 82 model.relation_dim 2.0 +757 82 optimizer.lr 0.004533305157517073 +757 82 training.batch_size 0.0 +757 82 training.label_smoothing 0.8660287662342199 +757 83 model.embedding_dim 2.0 +757 83 model.relation_dim 2.0 +757 83 optimizer.lr 0.0022891217659953785 +757 83 training.batch_size 0.0 +757 83 training.label_smoothing 0.12516860172251945 +757 84 model.embedding_dim 1.0 +757 84 model.relation_dim 1.0 +757 84 optimizer.lr 0.004165013773018883 +757 84 training.batch_size 1.0 +757 84 training.label_smoothing 0.023381303426122997 +757 85 model.embedding_dim 0.0 +757 85 model.relation_dim 2.0 +757 85 optimizer.lr 0.011333223495459236 +757 85 training.batch_size 1.0 +757 85 training.label_smoothing 0.011044528476040524 +757 86 model.embedding_dim 0.0 +757 86 model.relation_dim 2.0 +757 86 optimizer.lr 0.05732977227719488 +757 86 training.batch_size 0.0 +757 86 training.label_smoothing 0.002412491926213496 +757 87 model.embedding_dim 0.0 +757 87 model.relation_dim 0.0 +757 87 optimizer.lr 0.011596788947757041 +757 87 training.batch_size 2.0 +757 87 training.label_smoothing 0.20840959315719126 +757 88 model.embedding_dim 0.0 +757 88 model.relation_dim 2.0 +757 88 optimizer.lr 0.07579729195113202 +757 88 training.batch_size 1.0 +757 88 training.label_smoothing 0.07825592028674154 +757 89 model.embedding_dim 0.0 +757 89 model.relation_dim 1.0 +757 89 optimizer.lr 0.007400818683835481 +757 89 training.batch_size 1.0 +757 89 training.label_smoothing 0.012246016686317168 +757 90 model.embedding_dim 1.0 +757 90 model.relation_dim 1.0 +757 90 optimizer.lr 0.005623497783896512 +757 90 training.batch_size 2.0 +757 90 training.label_smoothing 0.16138076108215585 +757 91 model.embedding_dim 2.0 +757 91 model.relation_dim 0.0 +757 91 optimizer.lr 0.001058770925909261 +757 91 training.batch_size 0.0 +757 91 training.label_smoothing 0.41545745089745467 +757 92 model.embedding_dim 1.0 +757 92 model.relation_dim 1.0 +757 92 optimizer.lr 0.012590464893993049 +757 92 training.batch_size 1.0 +757 92 training.label_smoothing 0.00363036276824538 +757 93 model.embedding_dim 2.0 +757 93 model.relation_dim 0.0 +757 93 optimizer.lr 0.022223502309738203 +757 93 training.batch_size 0.0 +757 93 training.label_smoothing 0.08520203001561444 +757 94 model.embedding_dim 1.0 +757 94 model.relation_dim 0.0 +757 94 optimizer.lr 0.01776128572547025 +757 94 training.batch_size 0.0 +757 94 training.label_smoothing 0.003680409397069642 +757 95 model.embedding_dim 0.0 +757 95 model.relation_dim 2.0 +757 95 optimizer.lr 0.005423850285986365 +757 95 training.batch_size 0.0 +757 95 training.label_smoothing 0.001104234716007383 +757 96 model.embedding_dim 2.0 +757 96 model.relation_dim 2.0 +757 96 optimizer.lr 0.06985902506159107 +757 96 training.batch_size 1.0 +757 96 training.label_smoothing 0.6114863559049906 +757 97 model.embedding_dim 2.0 +757 97 model.relation_dim 0.0 +757 97 optimizer.lr 0.05541176310825413 +757 97 training.batch_size 1.0 +757 97 training.label_smoothing 0.012942725391242228 +757 98 model.embedding_dim 0.0 +757 98 model.relation_dim 0.0 +757 98 optimizer.lr 0.05700624805223457 +757 98 training.batch_size 2.0 +757 98 training.label_smoothing 0.0015734454623046775 +757 99 model.embedding_dim 1.0 +757 99 model.relation_dim 0.0 +757 99 optimizer.lr 0.0053845218797256515 +757 99 training.batch_size 1.0 +757 99 training.label_smoothing 0.02703766727038122 +757 100 model.embedding_dim 0.0 +757 100 model.relation_dim 1.0 +757 100 optimizer.lr 0.007755925873900502 +757 100 training.batch_size 0.0 +757 100 training.label_smoothing 0.006591114727676946 +757 1 dataset """kinships""" +757 1 model """transd""" +757 1 loss """bceaftersigmoid""" +757 1 regularizer """no""" +757 1 optimizer """adam""" +757 1 training_loop """lcwa""" +757 1 evaluator """rankbased""" +757 2 dataset """kinships""" +757 2 model """transd""" +757 2 loss """bceaftersigmoid""" +757 2 regularizer """no""" +757 2 optimizer """adam""" +757 2 training_loop """lcwa""" +757 2 evaluator """rankbased""" +757 3 dataset """kinships""" +757 3 model """transd""" +757 3 loss """bceaftersigmoid""" +757 3 regularizer """no""" +757 3 optimizer """adam""" +757 3 training_loop """lcwa""" +757 3 evaluator """rankbased""" +757 4 dataset """kinships""" +757 4 model """transd""" +757 4 loss """bceaftersigmoid""" +757 4 regularizer """no""" +757 4 optimizer """adam""" +757 4 training_loop """lcwa""" +757 4 evaluator """rankbased""" +757 5 dataset """kinships""" +757 5 model """transd""" +757 5 loss """bceaftersigmoid""" +757 5 regularizer """no""" +757 5 optimizer """adam""" +757 5 training_loop """lcwa""" +757 5 evaluator """rankbased""" +757 6 dataset """kinships""" +757 6 model """transd""" +757 6 loss """bceaftersigmoid""" +757 6 regularizer """no""" +757 6 optimizer """adam""" +757 6 training_loop """lcwa""" +757 6 evaluator """rankbased""" +757 7 dataset """kinships""" +757 7 model """transd""" +757 7 loss """bceaftersigmoid""" +757 7 regularizer """no""" +757 7 optimizer """adam""" +757 7 training_loop """lcwa""" +757 7 evaluator """rankbased""" +757 8 dataset """kinships""" +757 8 model """transd""" +757 8 loss """bceaftersigmoid""" +757 8 regularizer """no""" +757 8 optimizer """adam""" +757 8 training_loop """lcwa""" +757 8 evaluator """rankbased""" +757 9 dataset """kinships""" +757 9 model """transd""" +757 9 loss """bceaftersigmoid""" +757 9 regularizer """no""" +757 9 optimizer """adam""" +757 9 training_loop """lcwa""" +757 9 evaluator """rankbased""" +757 10 dataset """kinships""" +757 10 model """transd""" +757 10 loss """bceaftersigmoid""" +757 10 regularizer """no""" +757 10 optimizer """adam""" +757 10 training_loop """lcwa""" +757 10 evaluator """rankbased""" +757 11 dataset """kinships""" +757 11 model """transd""" +757 11 loss """bceaftersigmoid""" +757 11 regularizer """no""" +757 11 optimizer """adam""" +757 11 training_loop """lcwa""" +757 11 evaluator """rankbased""" +757 12 dataset """kinships""" +757 12 model """transd""" +757 12 loss """bceaftersigmoid""" +757 12 regularizer """no""" +757 12 optimizer """adam""" +757 12 training_loop """lcwa""" +757 12 evaluator """rankbased""" +757 13 dataset """kinships""" +757 13 model """transd""" +757 13 loss """bceaftersigmoid""" +757 13 regularizer """no""" +757 13 optimizer """adam""" +757 13 training_loop """lcwa""" +757 13 evaluator """rankbased""" +757 14 dataset """kinships""" +757 14 model """transd""" +757 14 loss """bceaftersigmoid""" +757 14 regularizer """no""" +757 14 optimizer """adam""" +757 14 training_loop """lcwa""" +757 14 evaluator """rankbased""" +757 15 dataset """kinships""" +757 15 model """transd""" +757 15 loss """bceaftersigmoid""" +757 15 regularizer """no""" +757 15 optimizer """adam""" +757 15 training_loop """lcwa""" +757 15 evaluator """rankbased""" +757 16 dataset """kinships""" +757 16 model """transd""" +757 16 loss """bceaftersigmoid""" +757 16 regularizer """no""" +757 16 optimizer """adam""" +757 16 training_loop """lcwa""" +757 16 evaluator """rankbased""" +757 17 dataset """kinships""" +757 17 model """transd""" +757 17 loss """bceaftersigmoid""" +757 17 regularizer """no""" +757 17 optimizer """adam""" +757 17 training_loop """lcwa""" +757 17 evaluator """rankbased""" +757 18 dataset """kinships""" +757 18 model """transd""" +757 18 loss """bceaftersigmoid""" +757 18 regularizer """no""" +757 18 optimizer """adam""" +757 18 training_loop """lcwa""" +757 18 evaluator """rankbased""" +757 19 dataset """kinships""" +757 19 model """transd""" +757 19 loss """bceaftersigmoid""" +757 19 regularizer """no""" +757 19 optimizer """adam""" +757 19 training_loop """lcwa""" +757 19 evaluator """rankbased""" +757 20 dataset """kinships""" +757 20 model """transd""" +757 20 loss """bceaftersigmoid""" +757 20 regularizer """no""" +757 20 optimizer """adam""" +757 20 training_loop """lcwa""" +757 20 evaluator """rankbased""" +757 21 dataset """kinships""" +757 21 model """transd""" +757 21 loss """bceaftersigmoid""" +757 21 regularizer """no""" +757 21 optimizer """adam""" +757 21 training_loop """lcwa""" +757 21 evaluator """rankbased""" +757 22 dataset """kinships""" +757 22 model """transd""" +757 22 loss """bceaftersigmoid""" +757 22 regularizer """no""" +757 22 optimizer """adam""" +757 22 training_loop """lcwa""" +757 22 evaluator """rankbased""" +757 23 dataset """kinships""" +757 23 model """transd""" +757 23 loss """bceaftersigmoid""" +757 23 regularizer """no""" +757 23 optimizer """adam""" +757 23 training_loop """lcwa""" +757 23 evaluator """rankbased""" +757 24 dataset """kinships""" +757 24 model """transd""" +757 24 loss """bceaftersigmoid""" +757 24 regularizer """no""" +757 24 optimizer """adam""" +757 24 training_loop """lcwa""" +757 24 evaluator """rankbased""" +757 25 dataset """kinships""" +757 25 model """transd""" +757 25 loss """bceaftersigmoid""" +757 25 regularizer """no""" +757 25 optimizer """adam""" +757 25 training_loop """lcwa""" +757 25 evaluator """rankbased""" +757 26 dataset """kinships""" +757 26 model """transd""" +757 26 loss """bceaftersigmoid""" +757 26 regularizer """no""" +757 26 optimizer """adam""" +757 26 training_loop """lcwa""" +757 26 evaluator """rankbased""" +757 27 dataset """kinships""" +757 27 model """transd""" +757 27 loss """bceaftersigmoid""" +757 27 regularizer """no""" +757 27 optimizer """adam""" +757 27 training_loop """lcwa""" +757 27 evaluator """rankbased""" +757 28 dataset """kinships""" +757 28 model """transd""" +757 28 loss """bceaftersigmoid""" +757 28 regularizer """no""" +757 28 optimizer """adam""" +757 28 training_loop """lcwa""" +757 28 evaluator """rankbased""" +757 29 dataset """kinships""" +757 29 model """transd""" +757 29 loss """bceaftersigmoid""" +757 29 regularizer """no""" +757 29 optimizer """adam""" +757 29 training_loop """lcwa""" +757 29 evaluator """rankbased""" +757 30 dataset """kinships""" +757 30 model """transd""" +757 30 loss """bceaftersigmoid""" +757 30 regularizer """no""" +757 30 optimizer """adam""" +757 30 training_loop """lcwa""" +757 30 evaluator """rankbased""" +757 31 dataset """kinships""" +757 31 model """transd""" +757 31 loss """bceaftersigmoid""" +757 31 regularizer """no""" +757 31 optimizer """adam""" +757 31 training_loop """lcwa""" +757 31 evaluator """rankbased""" +757 32 dataset """kinships""" +757 32 model """transd""" +757 32 loss """bceaftersigmoid""" +757 32 regularizer """no""" +757 32 optimizer """adam""" +757 32 training_loop """lcwa""" +757 32 evaluator """rankbased""" +757 33 dataset """kinships""" +757 33 model """transd""" +757 33 loss """bceaftersigmoid""" +757 33 regularizer """no""" +757 33 optimizer """adam""" +757 33 training_loop """lcwa""" +757 33 evaluator """rankbased""" +757 34 dataset """kinships""" +757 34 model """transd""" +757 34 loss """bceaftersigmoid""" +757 34 regularizer """no""" +757 34 optimizer """adam""" +757 34 training_loop """lcwa""" +757 34 evaluator """rankbased""" +757 35 dataset """kinships""" +757 35 model """transd""" +757 35 loss """bceaftersigmoid""" +757 35 regularizer """no""" +757 35 optimizer """adam""" +757 35 training_loop """lcwa""" +757 35 evaluator """rankbased""" +757 36 dataset """kinships""" +757 36 model """transd""" +757 36 loss """bceaftersigmoid""" +757 36 regularizer """no""" +757 36 optimizer """adam""" +757 36 training_loop """lcwa""" +757 36 evaluator """rankbased""" +757 37 dataset """kinships""" +757 37 model """transd""" +757 37 loss """bceaftersigmoid""" +757 37 regularizer """no""" +757 37 optimizer """adam""" +757 37 training_loop """lcwa""" +757 37 evaluator """rankbased""" +757 38 dataset """kinships""" +757 38 model """transd""" +757 38 loss """bceaftersigmoid""" +757 38 regularizer """no""" +757 38 optimizer """adam""" +757 38 training_loop """lcwa""" +757 38 evaluator """rankbased""" +757 39 dataset """kinships""" +757 39 model """transd""" +757 39 loss """bceaftersigmoid""" +757 39 regularizer """no""" +757 39 optimizer """adam""" +757 39 training_loop """lcwa""" +757 39 evaluator """rankbased""" +757 40 dataset """kinships""" +757 40 model """transd""" +757 40 loss """bceaftersigmoid""" +757 40 regularizer """no""" +757 40 optimizer """adam""" +757 40 training_loop """lcwa""" +757 40 evaluator """rankbased""" +757 41 dataset """kinships""" +757 41 model """transd""" +757 41 loss """bceaftersigmoid""" +757 41 regularizer """no""" +757 41 optimizer """adam""" +757 41 training_loop """lcwa""" +757 41 evaluator """rankbased""" +757 42 dataset """kinships""" +757 42 model """transd""" +757 42 loss """bceaftersigmoid""" +757 42 regularizer """no""" +757 42 optimizer """adam""" +757 42 training_loop """lcwa""" +757 42 evaluator """rankbased""" +757 43 dataset """kinships""" +757 43 model """transd""" +757 43 loss """bceaftersigmoid""" +757 43 regularizer """no""" +757 43 optimizer """adam""" +757 43 training_loop """lcwa""" +757 43 evaluator """rankbased""" +757 44 dataset """kinships""" +757 44 model """transd""" +757 44 loss """bceaftersigmoid""" +757 44 regularizer """no""" +757 44 optimizer """adam""" +757 44 training_loop """lcwa""" +757 44 evaluator """rankbased""" +757 45 dataset """kinships""" +757 45 model """transd""" +757 45 loss """bceaftersigmoid""" +757 45 regularizer """no""" +757 45 optimizer """adam""" +757 45 training_loop """lcwa""" +757 45 evaluator """rankbased""" +757 46 dataset """kinships""" +757 46 model """transd""" +757 46 loss """bceaftersigmoid""" +757 46 regularizer """no""" +757 46 optimizer """adam""" +757 46 training_loop """lcwa""" +757 46 evaluator """rankbased""" +757 47 dataset """kinships""" +757 47 model """transd""" +757 47 loss """bceaftersigmoid""" +757 47 regularizer """no""" +757 47 optimizer """adam""" +757 47 training_loop """lcwa""" +757 47 evaluator """rankbased""" +757 48 dataset """kinships""" +757 48 model """transd""" +757 48 loss """bceaftersigmoid""" +757 48 regularizer """no""" +757 48 optimizer """adam""" +757 48 training_loop """lcwa""" +757 48 evaluator """rankbased""" +757 49 dataset """kinships""" +757 49 model """transd""" +757 49 loss """bceaftersigmoid""" +757 49 regularizer """no""" +757 49 optimizer """adam""" +757 49 training_loop """lcwa""" +757 49 evaluator """rankbased""" +757 50 dataset """kinships""" +757 50 model """transd""" +757 50 loss """bceaftersigmoid""" +757 50 regularizer """no""" +757 50 optimizer """adam""" +757 50 training_loop """lcwa""" +757 50 evaluator """rankbased""" +757 51 dataset """kinships""" +757 51 model """transd""" +757 51 loss """bceaftersigmoid""" +757 51 regularizer """no""" +757 51 optimizer """adam""" +757 51 training_loop """lcwa""" +757 51 evaluator """rankbased""" +757 52 dataset """kinships""" +757 52 model """transd""" +757 52 loss """bceaftersigmoid""" +757 52 regularizer """no""" +757 52 optimizer """adam""" +757 52 training_loop """lcwa""" +757 52 evaluator """rankbased""" +757 53 dataset """kinships""" +757 53 model """transd""" +757 53 loss """bceaftersigmoid""" +757 53 regularizer """no""" +757 53 optimizer """adam""" +757 53 training_loop """lcwa""" +757 53 evaluator """rankbased""" +757 54 dataset """kinships""" +757 54 model """transd""" +757 54 loss """bceaftersigmoid""" +757 54 regularizer """no""" +757 54 optimizer """adam""" +757 54 training_loop """lcwa""" +757 54 evaluator """rankbased""" +757 55 dataset """kinships""" +757 55 model """transd""" +757 55 loss """bceaftersigmoid""" +757 55 regularizer """no""" +757 55 optimizer """adam""" +757 55 training_loop """lcwa""" +757 55 evaluator """rankbased""" +757 56 dataset """kinships""" +757 56 model """transd""" +757 56 loss """bceaftersigmoid""" +757 56 regularizer """no""" +757 56 optimizer """adam""" +757 56 training_loop """lcwa""" +757 56 evaluator """rankbased""" +757 57 dataset """kinships""" +757 57 model """transd""" +757 57 loss """bceaftersigmoid""" +757 57 regularizer """no""" +757 57 optimizer """adam""" +757 57 training_loop """lcwa""" +757 57 evaluator """rankbased""" +757 58 dataset """kinships""" +757 58 model """transd""" +757 58 loss """bceaftersigmoid""" +757 58 regularizer """no""" +757 58 optimizer """adam""" +757 58 training_loop """lcwa""" +757 58 evaluator """rankbased""" +757 59 dataset """kinships""" +757 59 model """transd""" +757 59 loss """bceaftersigmoid""" +757 59 regularizer """no""" +757 59 optimizer """adam""" +757 59 training_loop """lcwa""" +757 59 evaluator """rankbased""" +757 60 dataset """kinships""" +757 60 model """transd""" +757 60 loss """bceaftersigmoid""" +757 60 regularizer """no""" +757 60 optimizer """adam""" +757 60 training_loop """lcwa""" +757 60 evaluator """rankbased""" +757 61 dataset """kinships""" +757 61 model """transd""" +757 61 loss """bceaftersigmoid""" +757 61 regularizer """no""" +757 61 optimizer """adam""" +757 61 training_loop """lcwa""" +757 61 evaluator """rankbased""" +757 62 dataset """kinships""" +757 62 model """transd""" +757 62 loss """bceaftersigmoid""" +757 62 regularizer """no""" +757 62 optimizer """adam""" +757 62 training_loop """lcwa""" +757 62 evaluator """rankbased""" +757 63 dataset """kinships""" +757 63 model """transd""" +757 63 loss """bceaftersigmoid""" +757 63 regularizer """no""" +757 63 optimizer """adam""" +757 63 training_loop """lcwa""" +757 63 evaluator """rankbased""" +757 64 dataset """kinships""" +757 64 model """transd""" +757 64 loss """bceaftersigmoid""" +757 64 regularizer """no""" +757 64 optimizer """adam""" +757 64 training_loop """lcwa""" +757 64 evaluator """rankbased""" +757 65 dataset """kinships""" +757 65 model """transd""" +757 65 loss """bceaftersigmoid""" +757 65 regularizer """no""" +757 65 optimizer """adam""" +757 65 training_loop """lcwa""" +757 65 evaluator """rankbased""" +757 66 dataset """kinships""" +757 66 model """transd""" +757 66 loss """bceaftersigmoid""" +757 66 regularizer """no""" +757 66 optimizer """adam""" +757 66 training_loop """lcwa""" +757 66 evaluator """rankbased""" +757 67 dataset """kinships""" +757 67 model """transd""" +757 67 loss """bceaftersigmoid""" +757 67 regularizer """no""" +757 67 optimizer """adam""" +757 67 training_loop """lcwa""" +757 67 evaluator """rankbased""" +757 68 dataset """kinships""" +757 68 model """transd""" +757 68 loss """bceaftersigmoid""" +757 68 regularizer """no""" +757 68 optimizer """adam""" +757 68 training_loop """lcwa""" +757 68 evaluator """rankbased""" +757 69 dataset """kinships""" +757 69 model """transd""" +757 69 loss """bceaftersigmoid""" +757 69 regularizer """no""" +757 69 optimizer """adam""" +757 69 training_loop """lcwa""" +757 69 evaluator """rankbased""" +757 70 dataset """kinships""" +757 70 model """transd""" +757 70 loss """bceaftersigmoid""" +757 70 regularizer """no""" +757 70 optimizer """adam""" +757 70 training_loop """lcwa""" +757 70 evaluator """rankbased""" +757 71 dataset """kinships""" +757 71 model """transd""" +757 71 loss """bceaftersigmoid""" +757 71 regularizer """no""" +757 71 optimizer """adam""" +757 71 training_loop """lcwa""" +757 71 evaluator """rankbased""" +757 72 dataset """kinships""" +757 72 model """transd""" +757 72 loss """bceaftersigmoid""" +757 72 regularizer """no""" +757 72 optimizer """adam""" +757 72 training_loop """lcwa""" +757 72 evaluator """rankbased""" +757 73 dataset """kinships""" +757 73 model """transd""" +757 73 loss """bceaftersigmoid""" +757 73 regularizer """no""" +757 73 optimizer """adam""" +757 73 training_loop """lcwa""" +757 73 evaluator """rankbased""" +757 74 dataset """kinships""" +757 74 model """transd""" +757 74 loss """bceaftersigmoid""" +757 74 regularizer """no""" +757 74 optimizer """adam""" +757 74 training_loop """lcwa""" +757 74 evaluator """rankbased""" +757 75 dataset """kinships""" +757 75 model """transd""" +757 75 loss """bceaftersigmoid""" +757 75 regularizer """no""" +757 75 optimizer """adam""" +757 75 training_loop """lcwa""" +757 75 evaluator """rankbased""" +757 76 dataset """kinships""" +757 76 model """transd""" +757 76 loss """bceaftersigmoid""" +757 76 regularizer """no""" +757 76 optimizer """adam""" +757 76 training_loop """lcwa""" +757 76 evaluator """rankbased""" +757 77 dataset """kinships""" +757 77 model """transd""" +757 77 loss """bceaftersigmoid""" +757 77 regularizer """no""" +757 77 optimizer """adam""" +757 77 training_loop """lcwa""" +757 77 evaluator """rankbased""" +757 78 dataset """kinships""" +757 78 model """transd""" +757 78 loss """bceaftersigmoid""" +757 78 regularizer """no""" +757 78 optimizer """adam""" +757 78 training_loop """lcwa""" +757 78 evaluator """rankbased""" +757 79 dataset """kinships""" +757 79 model """transd""" +757 79 loss """bceaftersigmoid""" +757 79 regularizer """no""" +757 79 optimizer """adam""" +757 79 training_loop """lcwa""" +757 79 evaluator """rankbased""" +757 80 dataset """kinships""" +757 80 model """transd""" +757 80 loss """bceaftersigmoid""" +757 80 regularizer """no""" +757 80 optimizer """adam""" +757 80 training_loop """lcwa""" +757 80 evaluator """rankbased""" +757 81 dataset """kinships""" +757 81 model """transd""" +757 81 loss """bceaftersigmoid""" +757 81 regularizer """no""" +757 81 optimizer """adam""" +757 81 training_loop """lcwa""" +757 81 evaluator """rankbased""" +757 82 dataset """kinships""" +757 82 model """transd""" +757 82 loss """bceaftersigmoid""" +757 82 regularizer """no""" +757 82 optimizer """adam""" +757 82 training_loop """lcwa""" +757 82 evaluator """rankbased""" +757 83 dataset """kinships""" +757 83 model """transd""" +757 83 loss """bceaftersigmoid""" +757 83 regularizer """no""" +757 83 optimizer """adam""" +757 83 training_loop """lcwa""" +757 83 evaluator """rankbased""" +757 84 dataset """kinships""" +757 84 model """transd""" +757 84 loss """bceaftersigmoid""" +757 84 regularizer """no""" +757 84 optimizer """adam""" +757 84 training_loop """lcwa""" +757 84 evaluator """rankbased""" +757 85 dataset """kinships""" +757 85 model """transd""" +757 85 loss """bceaftersigmoid""" +757 85 regularizer """no""" +757 85 optimizer """adam""" +757 85 training_loop """lcwa""" +757 85 evaluator """rankbased""" +757 86 dataset """kinships""" +757 86 model """transd""" +757 86 loss """bceaftersigmoid""" +757 86 regularizer """no""" +757 86 optimizer """adam""" +757 86 training_loop """lcwa""" +757 86 evaluator """rankbased""" +757 87 dataset """kinships""" +757 87 model """transd""" +757 87 loss """bceaftersigmoid""" +757 87 regularizer """no""" +757 87 optimizer """adam""" +757 87 training_loop """lcwa""" +757 87 evaluator """rankbased""" +757 88 dataset """kinships""" +757 88 model """transd""" +757 88 loss """bceaftersigmoid""" +757 88 regularizer """no""" +757 88 optimizer """adam""" +757 88 training_loop """lcwa""" +757 88 evaluator """rankbased""" +757 89 dataset """kinships""" +757 89 model """transd""" +757 89 loss """bceaftersigmoid""" +757 89 regularizer """no""" +757 89 optimizer """adam""" +757 89 training_loop """lcwa""" +757 89 evaluator """rankbased""" +757 90 dataset """kinships""" +757 90 model """transd""" +757 90 loss """bceaftersigmoid""" +757 90 regularizer """no""" +757 90 optimizer """adam""" +757 90 training_loop """lcwa""" +757 90 evaluator """rankbased""" +757 91 dataset """kinships""" +757 91 model """transd""" +757 91 loss """bceaftersigmoid""" +757 91 regularizer """no""" +757 91 optimizer """adam""" +757 91 training_loop """lcwa""" +757 91 evaluator """rankbased""" +757 92 dataset """kinships""" +757 92 model """transd""" +757 92 loss """bceaftersigmoid""" +757 92 regularizer """no""" +757 92 optimizer """adam""" +757 92 training_loop """lcwa""" +757 92 evaluator """rankbased""" +757 93 dataset """kinships""" +757 93 model """transd""" +757 93 loss """bceaftersigmoid""" +757 93 regularizer """no""" +757 93 optimizer """adam""" +757 93 training_loop """lcwa""" +757 93 evaluator """rankbased""" +757 94 dataset """kinships""" +757 94 model """transd""" +757 94 loss """bceaftersigmoid""" +757 94 regularizer """no""" +757 94 optimizer """adam""" +757 94 training_loop """lcwa""" +757 94 evaluator """rankbased""" +757 95 dataset """kinships""" +757 95 model """transd""" +757 95 loss """bceaftersigmoid""" +757 95 regularizer """no""" +757 95 optimizer """adam""" +757 95 training_loop """lcwa""" +757 95 evaluator """rankbased""" +757 96 dataset """kinships""" +757 96 model """transd""" +757 96 loss """bceaftersigmoid""" +757 96 regularizer """no""" +757 96 optimizer """adam""" +757 96 training_loop """lcwa""" +757 96 evaluator """rankbased""" +757 97 dataset """kinships""" +757 97 model """transd""" +757 97 loss """bceaftersigmoid""" +757 97 regularizer """no""" +757 97 optimizer """adam""" +757 97 training_loop """lcwa""" +757 97 evaluator """rankbased""" +757 98 dataset """kinships""" +757 98 model """transd""" +757 98 loss """bceaftersigmoid""" +757 98 regularizer """no""" +757 98 optimizer """adam""" +757 98 training_loop """lcwa""" +757 98 evaluator """rankbased""" +757 99 dataset """kinships""" +757 99 model """transd""" +757 99 loss """bceaftersigmoid""" +757 99 regularizer """no""" +757 99 optimizer """adam""" +757 99 training_loop """lcwa""" +757 99 evaluator """rankbased""" +757 100 dataset """kinships""" +757 100 model """transd""" +757 100 loss """bceaftersigmoid""" +757 100 regularizer """no""" +757 100 optimizer """adam""" +757 100 training_loop """lcwa""" +757 100 evaluator """rankbased""" +758 1 model.embedding_dim 2.0 +758 1 model.relation_dim 2.0 +758 1 optimizer.lr 0.005232057626995663 +758 1 training.batch_size 0.0 +758 1 training.label_smoothing 0.5929980388366212 +758 2 model.embedding_dim 2.0 +758 2 model.relation_dim 0.0 +758 2 optimizer.lr 0.010683894412428408 +758 2 training.batch_size 1.0 +758 2 training.label_smoothing 0.11880236974558431 +758 3 model.embedding_dim 2.0 +758 3 model.relation_dim 0.0 +758 3 optimizer.lr 0.012530934670122175 +758 3 training.batch_size 0.0 +758 3 training.label_smoothing 0.1833013865884229 +758 4 model.embedding_dim 1.0 +758 4 model.relation_dim 2.0 +758 4 optimizer.lr 0.0069035796434006634 +758 4 training.batch_size 0.0 +758 4 training.label_smoothing 0.14827709661176194 +758 5 model.embedding_dim 2.0 +758 5 model.relation_dim 0.0 +758 5 optimizer.lr 0.002203129183810632 +758 5 training.batch_size 0.0 +758 5 training.label_smoothing 0.5294427021667414 +758 6 model.embedding_dim 0.0 +758 6 model.relation_dim 2.0 +758 6 optimizer.lr 0.008187396418722091 +758 6 training.batch_size 1.0 +758 6 training.label_smoothing 0.029807137288022718 +758 7 model.embedding_dim 0.0 +758 7 model.relation_dim 2.0 +758 7 optimizer.lr 0.03977939462910892 +758 7 training.batch_size 2.0 +758 7 training.label_smoothing 0.1338579132130103 +758 8 model.embedding_dim 2.0 +758 8 model.relation_dim 1.0 +758 8 optimizer.lr 0.016729316464115577 +758 8 training.batch_size 2.0 +758 8 training.label_smoothing 0.007160822006664527 +758 9 model.embedding_dim 2.0 +758 9 model.relation_dim 1.0 +758 9 optimizer.lr 0.0017682516047751577 +758 9 training.batch_size 1.0 +758 9 training.label_smoothing 0.5431853166530628 +758 10 model.embedding_dim 0.0 +758 10 model.relation_dim 2.0 +758 10 optimizer.lr 0.0015461439416182836 +758 10 training.batch_size 1.0 +758 10 training.label_smoothing 0.003999221358016672 +758 11 model.embedding_dim 1.0 +758 11 model.relation_dim 0.0 +758 11 optimizer.lr 0.007064342654189287 +758 11 training.batch_size 1.0 +758 11 training.label_smoothing 0.08105651952924581 +758 12 model.embedding_dim 0.0 +758 12 model.relation_dim 2.0 +758 12 optimizer.lr 0.04799480835769825 +758 12 training.batch_size 2.0 +758 12 training.label_smoothing 0.16309597695226566 +758 13 model.embedding_dim 0.0 +758 13 model.relation_dim 2.0 +758 13 optimizer.lr 0.03817600312913394 +758 13 training.batch_size 2.0 +758 13 training.label_smoothing 0.19114523106501505 +758 14 model.embedding_dim 1.0 +758 14 model.relation_dim 0.0 +758 14 optimizer.lr 0.05762277054651154 +758 14 training.batch_size 1.0 +758 14 training.label_smoothing 0.7462214434433898 +758 15 model.embedding_dim 2.0 +758 15 model.relation_dim 0.0 +758 15 optimizer.lr 0.024191654567025957 +758 15 training.batch_size 0.0 +758 15 training.label_smoothing 0.01315398175451934 +758 16 model.embedding_dim 1.0 +758 16 model.relation_dim 0.0 +758 16 optimizer.lr 0.0014594898071644956 +758 16 training.batch_size 2.0 +758 16 training.label_smoothing 0.5452857546061738 +758 17 model.embedding_dim 2.0 +758 17 model.relation_dim 2.0 +758 17 optimizer.lr 0.015830289934351805 +758 17 training.batch_size 0.0 +758 17 training.label_smoothing 0.3438837722976869 +758 18 model.embedding_dim 2.0 +758 18 model.relation_dim 2.0 +758 18 optimizer.lr 0.029599389069938604 +758 18 training.batch_size 2.0 +758 18 training.label_smoothing 0.012678104452085734 +758 19 model.embedding_dim 0.0 +758 19 model.relation_dim 0.0 +758 19 optimizer.lr 0.006198613855362518 +758 19 training.batch_size 1.0 +758 19 training.label_smoothing 0.0033112385544605224 +758 20 model.embedding_dim 2.0 +758 20 model.relation_dim 0.0 +758 20 optimizer.lr 0.002165735405803446 +758 20 training.batch_size 0.0 +758 20 training.label_smoothing 0.01333158630548112 +758 21 model.embedding_dim 1.0 +758 21 model.relation_dim 1.0 +758 21 optimizer.lr 0.0014171235556028608 +758 21 training.batch_size 1.0 +758 21 training.label_smoothing 0.0237068160173498 +758 22 model.embedding_dim 1.0 +758 22 model.relation_dim 2.0 +758 22 optimizer.lr 0.002344526072980484 +758 22 training.batch_size 0.0 +758 22 training.label_smoothing 0.0013115219045792108 +758 23 model.embedding_dim 2.0 +758 23 model.relation_dim 1.0 +758 23 optimizer.lr 0.0022069297406412413 +758 23 training.batch_size 0.0 +758 23 training.label_smoothing 0.5592179394852478 +758 24 model.embedding_dim 0.0 +758 24 model.relation_dim 0.0 +758 24 optimizer.lr 0.08469420395595767 +758 24 training.batch_size 2.0 +758 24 training.label_smoothing 0.17998710455479736 +758 25 model.embedding_dim 0.0 +758 25 model.relation_dim 2.0 +758 25 optimizer.lr 0.09191542474408577 +758 25 training.batch_size 0.0 +758 25 training.label_smoothing 0.3545874475169404 +758 26 model.embedding_dim 1.0 +758 26 model.relation_dim 0.0 +758 26 optimizer.lr 0.00357539609666707 +758 26 training.batch_size 1.0 +758 26 training.label_smoothing 0.006513647145555801 +758 27 model.embedding_dim 1.0 +758 27 model.relation_dim 1.0 +758 27 optimizer.lr 0.026330479181798955 +758 27 training.batch_size 2.0 +758 27 training.label_smoothing 0.0274640685499419 +758 28 model.embedding_dim 1.0 +758 28 model.relation_dim 1.0 +758 28 optimizer.lr 0.02779738090346217 +758 28 training.batch_size 1.0 +758 28 training.label_smoothing 0.16934119679088344 +758 29 model.embedding_dim 2.0 +758 29 model.relation_dim 0.0 +758 29 optimizer.lr 0.006287781194990509 +758 29 training.batch_size 1.0 +758 29 training.label_smoothing 0.001506107261390962 +758 30 model.embedding_dim 0.0 +758 30 model.relation_dim 1.0 +758 30 optimizer.lr 0.045786301588275666 +758 30 training.batch_size 0.0 +758 30 training.label_smoothing 0.042121596969172614 +758 31 model.embedding_dim 2.0 +758 31 model.relation_dim 0.0 +758 31 optimizer.lr 0.0014339656326301615 +758 31 training.batch_size 2.0 +758 31 training.label_smoothing 0.25844938678521073 +758 32 model.embedding_dim 0.0 +758 32 model.relation_dim 0.0 +758 32 optimizer.lr 0.040144140953127835 +758 32 training.batch_size 2.0 +758 32 training.label_smoothing 0.0044365460922764334 +758 33 model.embedding_dim 0.0 +758 33 model.relation_dim 2.0 +758 33 optimizer.lr 0.003761010565615671 +758 33 training.batch_size 2.0 +758 33 training.label_smoothing 0.009309621296650828 +758 34 model.embedding_dim 0.0 +758 34 model.relation_dim 2.0 +758 34 optimizer.lr 0.06277377073502315 +758 34 training.batch_size 0.0 +758 34 training.label_smoothing 0.00308240522857837 +758 35 model.embedding_dim 1.0 +758 35 model.relation_dim 1.0 +758 35 optimizer.lr 0.0010889735621468525 +758 35 training.batch_size 2.0 +758 35 training.label_smoothing 0.01806079403172 +758 36 model.embedding_dim 2.0 +758 36 model.relation_dim 1.0 +758 36 optimizer.lr 0.04217459160625923 +758 36 training.batch_size 1.0 +758 36 training.label_smoothing 0.0030776477965276648 +758 37 model.embedding_dim 1.0 +758 37 model.relation_dim 0.0 +758 37 optimizer.lr 0.0024572457845334207 +758 37 training.batch_size 1.0 +758 37 training.label_smoothing 0.0020102567624179536 +758 38 model.embedding_dim 1.0 +758 38 model.relation_dim 2.0 +758 38 optimizer.lr 0.0011242479404863942 +758 38 training.batch_size 1.0 +758 38 training.label_smoothing 0.10937778482416229 +758 39 model.embedding_dim 2.0 +758 39 model.relation_dim 2.0 +758 39 optimizer.lr 0.05922741001938021 +758 39 training.batch_size 1.0 +758 39 training.label_smoothing 0.035586042777476096 +758 40 model.embedding_dim 1.0 +758 40 model.relation_dim 1.0 +758 40 optimizer.lr 0.03372459070341862 +758 40 training.batch_size 1.0 +758 40 training.label_smoothing 0.007167577777382488 +758 41 model.embedding_dim 1.0 +758 41 model.relation_dim 1.0 +758 41 optimizer.lr 0.07343298085713827 +758 41 training.batch_size 1.0 +758 41 training.label_smoothing 0.8398715296233453 +758 42 model.embedding_dim 2.0 +758 42 model.relation_dim 2.0 +758 42 optimizer.lr 0.0019805369720927645 +758 42 training.batch_size 1.0 +758 42 training.label_smoothing 0.23015097528195752 +758 43 model.embedding_dim 1.0 +758 43 model.relation_dim 0.0 +758 43 optimizer.lr 0.017765273473748153 +758 43 training.batch_size 2.0 +758 43 training.label_smoothing 0.0019169563805577355 +758 44 model.embedding_dim 0.0 +758 44 model.relation_dim 2.0 +758 44 optimizer.lr 0.0014730510904269235 +758 44 training.batch_size 1.0 +758 44 training.label_smoothing 0.0560483131848434 +758 45 model.embedding_dim 1.0 +758 45 model.relation_dim 2.0 +758 45 optimizer.lr 0.015331171108326025 +758 45 training.batch_size 0.0 +758 45 training.label_smoothing 0.0036643285509503176 +758 46 model.embedding_dim 1.0 +758 46 model.relation_dim 2.0 +758 46 optimizer.lr 0.06465368242377781 +758 46 training.batch_size 2.0 +758 46 training.label_smoothing 0.02886729801332099 +758 47 model.embedding_dim 0.0 +758 47 model.relation_dim 2.0 +758 47 optimizer.lr 0.001975567137515764 +758 47 training.batch_size 0.0 +758 47 training.label_smoothing 0.83251846585385 +758 48 model.embedding_dim 0.0 +758 48 model.relation_dim 0.0 +758 48 optimizer.lr 0.06475708572942807 +758 48 training.batch_size 0.0 +758 48 training.label_smoothing 0.9111402230835297 +758 49 model.embedding_dim 2.0 +758 49 model.relation_dim 0.0 +758 49 optimizer.lr 0.011278000331201604 +758 49 training.batch_size 2.0 +758 49 training.label_smoothing 0.003523045311321291 +758 50 model.embedding_dim 1.0 +758 50 model.relation_dim 1.0 +758 50 optimizer.lr 0.08731573917084387 +758 50 training.batch_size 0.0 +758 50 training.label_smoothing 0.03889327650927154 +758 51 model.embedding_dim 2.0 +758 51 model.relation_dim 0.0 +758 51 optimizer.lr 0.07815978151607898 +758 51 training.batch_size 1.0 +758 51 training.label_smoothing 0.06306257353806687 +758 52 model.embedding_dim 2.0 +758 52 model.relation_dim 2.0 +758 52 optimizer.lr 0.032274597711733015 +758 52 training.batch_size 1.0 +758 52 training.label_smoothing 0.9313535160037655 +758 53 model.embedding_dim 2.0 +758 53 model.relation_dim 0.0 +758 53 optimizer.lr 0.002198217526306787 +758 53 training.batch_size 0.0 +758 53 training.label_smoothing 0.7107097137198558 +758 54 model.embedding_dim 2.0 +758 54 model.relation_dim 0.0 +758 54 optimizer.lr 0.0015535212174940024 +758 54 training.batch_size 2.0 +758 54 training.label_smoothing 0.0015705849412798445 +758 55 model.embedding_dim 0.0 +758 55 model.relation_dim 2.0 +758 55 optimizer.lr 0.012074833560497832 +758 55 training.batch_size 2.0 +758 55 training.label_smoothing 0.0021117161884907113 +758 56 model.embedding_dim 2.0 +758 56 model.relation_dim 2.0 +758 56 optimizer.lr 0.002815428508943307 +758 56 training.batch_size 0.0 +758 56 training.label_smoothing 0.019040573320260493 +758 57 model.embedding_dim 1.0 +758 57 model.relation_dim 0.0 +758 57 optimizer.lr 0.002723116447083742 +758 57 training.batch_size 2.0 +758 57 training.label_smoothing 0.002172770392787809 +758 58 model.embedding_dim 1.0 +758 58 model.relation_dim 1.0 +758 58 optimizer.lr 0.006564910580451176 +758 58 training.batch_size 1.0 +758 58 training.label_smoothing 0.010111181255116381 +758 59 model.embedding_dim 2.0 +758 59 model.relation_dim 1.0 +758 59 optimizer.lr 0.06006186774231243 +758 59 training.batch_size 0.0 +758 59 training.label_smoothing 0.002765595947340368 +758 60 model.embedding_dim 0.0 +758 60 model.relation_dim 0.0 +758 60 optimizer.lr 0.0019794658550807017 +758 60 training.batch_size 0.0 +758 60 training.label_smoothing 0.035589438347828385 +758 61 model.embedding_dim 1.0 +758 61 model.relation_dim 2.0 +758 61 optimizer.lr 0.044599369290051795 +758 61 training.batch_size 2.0 +758 61 training.label_smoothing 0.22344325981680677 +758 62 model.embedding_dim 1.0 +758 62 model.relation_dim 2.0 +758 62 optimizer.lr 0.0013346719217605422 +758 62 training.batch_size 0.0 +758 62 training.label_smoothing 0.0258058711569417 +758 63 model.embedding_dim 0.0 +758 63 model.relation_dim 1.0 +758 63 optimizer.lr 0.036684534415297926 +758 63 training.batch_size 1.0 +758 63 training.label_smoothing 0.02262432174905648 +758 64 model.embedding_dim 0.0 +758 64 model.relation_dim 1.0 +758 64 optimizer.lr 0.006041474441898678 +758 64 training.batch_size 1.0 +758 64 training.label_smoothing 0.006247759737777755 +758 65 model.embedding_dim 1.0 +758 65 model.relation_dim 1.0 +758 65 optimizer.lr 0.02293891958234521 +758 65 training.batch_size 0.0 +758 65 training.label_smoothing 0.30926649282898305 +758 66 model.embedding_dim 2.0 +758 66 model.relation_dim 0.0 +758 66 optimizer.lr 0.0638860111481556 +758 66 training.batch_size 2.0 +758 66 training.label_smoothing 0.06389972869378224 +758 67 model.embedding_dim 0.0 +758 67 model.relation_dim 0.0 +758 67 optimizer.lr 0.004994342832951191 +758 67 training.batch_size 0.0 +758 67 training.label_smoothing 0.0013144327475457908 +758 68 model.embedding_dim 1.0 +758 68 model.relation_dim 2.0 +758 68 optimizer.lr 0.0017754567965059702 +758 68 training.batch_size 1.0 +758 68 training.label_smoothing 0.31832966698621823 +758 69 model.embedding_dim 2.0 +758 69 model.relation_dim 0.0 +758 69 optimizer.lr 0.054433654053690564 +758 69 training.batch_size 2.0 +758 69 training.label_smoothing 0.0021057595142398846 +758 70 model.embedding_dim 1.0 +758 70 model.relation_dim 0.0 +758 70 optimizer.lr 0.0025815205973751167 +758 70 training.batch_size 0.0 +758 70 training.label_smoothing 0.0029681643461172483 +758 71 model.embedding_dim 2.0 +758 71 model.relation_dim 2.0 +758 71 optimizer.lr 0.05163122591310536 +758 71 training.batch_size 2.0 +758 71 training.label_smoothing 0.012144502193218233 +758 72 model.embedding_dim 1.0 +758 72 model.relation_dim 1.0 +758 72 optimizer.lr 0.046234333648132106 +758 72 training.batch_size 0.0 +758 72 training.label_smoothing 0.13429142348366202 +758 73 model.embedding_dim 2.0 +758 73 model.relation_dim 0.0 +758 73 optimizer.lr 0.04783551941872848 +758 73 training.batch_size 2.0 +758 73 training.label_smoothing 0.21035702466716294 +758 74 model.embedding_dim 0.0 +758 74 model.relation_dim 0.0 +758 74 optimizer.lr 0.0010544520133205862 +758 74 training.batch_size 2.0 +758 74 training.label_smoothing 0.03510776315771906 +758 75 model.embedding_dim 2.0 +758 75 model.relation_dim 2.0 +758 75 optimizer.lr 0.0013176203500394163 +758 75 training.batch_size 0.0 +758 75 training.label_smoothing 0.21820248636376352 +758 76 model.embedding_dim 2.0 +758 76 model.relation_dim 2.0 +758 76 optimizer.lr 0.005388722726297215 +758 76 training.batch_size 2.0 +758 76 training.label_smoothing 0.021982330015876007 +758 77 model.embedding_dim 0.0 +758 77 model.relation_dim 1.0 +758 77 optimizer.lr 0.032630095531851255 +758 77 training.batch_size 0.0 +758 77 training.label_smoothing 0.0026189576774622255 +758 78 model.embedding_dim 2.0 +758 78 model.relation_dim 1.0 +758 78 optimizer.lr 0.0013798467287631981 +758 78 training.batch_size 1.0 +758 78 training.label_smoothing 0.020779003612343663 +758 79 model.embedding_dim 2.0 +758 79 model.relation_dim 2.0 +758 79 optimizer.lr 0.024787052393061858 +758 79 training.batch_size 2.0 +758 79 training.label_smoothing 0.5043674356098765 +758 80 model.embedding_dim 0.0 +758 80 model.relation_dim 2.0 +758 80 optimizer.lr 0.07861687766837491 +758 80 training.batch_size 1.0 +758 80 training.label_smoothing 0.12462269406574263 +758 81 model.embedding_dim 0.0 +758 81 model.relation_dim 2.0 +758 81 optimizer.lr 0.003275178516326428 +758 81 training.batch_size 0.0 +758 81 training.label_smoothing 0.018423210642282487 +758 82 model.embedding_dim 1.0 +758 82 model.relation_dim 1.0 +758 82 optimizer.lr 0.029275639917647374 +758 82 training.batch_size 2.0 +758 82 training.label_smoothing 0.01225680159326436 +758 83 model.embedding_dim 2.0 +758 83 model.relation_dim 2.0 +758 83 optimizer.lr 0.00312350518862645 +758 83 training.batch_size 1.0 +758 83 training.label_smoothing 0.00888318550700434 +758 84 model.embedding_dim 1.0 +758 84 model.relation_dim 0.0 +758 84 optimizer.lr 0.015113967466568342 +758 84 training.batch_size 1.0 +758 84 training.label_smoothing 0.012463209269499051 +758 85 model.embedding_dim 1.0 +758 85 model.relation_dim 1.0 +758 85 optimizer.lr 0.004381496252301899 +758 85 training.batch_size 0.0 +758 85 training.label_smoothing 0.0026938815616564466 +758 86 model.embedding_dim 1.0 +758 86 model.relation_dim 1.0 +758 86 optimizer.lr 0.08898359739539753 +758 86 training.batch_size 2.0 +758 86 training.label_smoothing 0.5209047105330058 +758 87 model.embedding_dim 1.0 +758 87 model.relation_dim 2.0 +758 87 optimizer.lr 0.04785262219496979 +758 87 training.batch_size 2.0 +758 87 training.label_smoothing 0.13122466249394435 +758 88 model.embedding_dim 0.0 +758 88 model.relation_dim 0.0 +758 88 optimizer.lr 0.006877017499549495 +758 88 training.batch_size 0.0 +758 88 training.label_smoothing 0.009539739570345492 +758 89 model.embedding_dim 2.0 +758 89 model.relation_dim 2.0 +758 89 optimizer.lr 0.026671660517495335 +758 89 training.batch_size 1.0 +758 89 training.label_smoothing 0.008998561199953592 +758 90 model.embedding_dim 0.0 +758 90 model.relation_dim 0.0 +758 90 optimizer.lr 0.0025509727795714515 +758 90 training.batch_size 0.0 +758 90 training.label_smoothing 0.0016625705966900695 +758 91 model.embedding_dim 1.0 +758 91 model.relation_dim 1.0 +758 91 optimizer.lr 0.017464652329043106 +758 91 training.batch_size 0.0 +758 91 training.label_smoothing 0.21540523485394247 +758 92 model.embedding_dim 0.0 +758 92 model.relation_dim 0.0 +758 92 optimizer.lr 0.04130551966972048 +758 92 training.batch_size 2.0 +758 92 training.label_smoothing 0.8143621196218275 +758 93 model.embedding_dim 0.0 +758 93 model.relation_dim 1.0 +758 93 optimizer.lr 0.0011004621058840675 +758 93 training.batch_size 2.0 +758 93 training.label_smoothing 0.0035763717343914917 +758 94 model.embedding_dim 2.0 +758 94 model.relation_dim 2.0 +758 94 optimizer.lr 0.023974550460385706 +758 94 training.batch_size 0.0 +758 94 training.label_smoothing 0.028607629842948658 +758 95 model.embedding_dim 0.0 +758 95 model.relation_dim 1.0 +758 95 optimizer.lr 0.002445966771527132 +758 95 training.batch_size 0.0 +758 95 training.label_smoothing 0.3144777960091777 +758 96 model.embedding_dim 1.0 +758 96 model.relation_dim 0.0 +758 96 optimizer.lr 0.049233149303758654 +758 96 training.batch_size 0.0 +758 96 training.label_smoothing 0.007957943299304509 +758 97 model.embedding_dim 0.0 +758 97 model.relation_dim 1.0 +758 97 optimizer.lr 0.07073785119258688 +758 97 training.batch_size 1.0 +758 97 training.label_smoothing 0.7516157451248476 +758 98 model.embedding_dim 1.0 +758 98 model.relation_dim 2.0 +758 98 optimizer.lr 0.00909576123655378 +758 98 training.batch_size 0.0 +758 98 training.label_smoothing 0.09187009857957466 +758 99 model.embedding_dim 1.0 +758 99 model.relation_dim 1.0 +758 99 optimizer.lr 0.031752848938002745 +758 99 training.batch_size 0.0 +758 99 training.label_smoothing 0.008280142848076736 +758 100 model.embedding_dim 0.0 +758 100 model.relation_dim 2.0 +758 100 optimizer.lr 0.02436401301671904 +758 100 training.batch_size 2.0 +758 100 training.label_smoothing 0.011158572781868359 +758 1 dataset """kinships""" +758 1 model """transd""" +758 1 loss """softplus""" +758 1 regularizer """no""" +758 1 optimizer """adam""" +758 1 training_loop """lcwa""" +758 1 evaluator """rankbased""" +758 2 dataset """kinships""" +758 2 model """transd""" +758 2 loss """softplus""" +758 2 regularizer """no""" +758 2 optimizer """adam""" +758 2 training_loop """lcwa""" +758 2 evaluator """rankbased""" +758 3 dataset """kinships""" +758 3 model """transd""" +758 3 loss """softplus""" +758 3 regularizer """no""" +758 3 optimizer """adam""" +758 3 training_loop """lcwa""" +758 3 evaluator """rankbased""" +758 4 dataset """kinships""" +758 4 model """transd""" +758 4 loss """softplus""" +758 4 regularizer """no""" +758 4 optimizer """adam""" +758 4 training_loop """lcwa""" +758 4 evaluator """rankbased""" +758 5 dataset """kinships""" +758 5 model """transd""" +758 5 loss """softplus""" +758 5 regularizer """no""" +758 5 optimizer """adam""" +758 5 training_loop """lcwa""" +758 5 evaluator """rankbased""" +758 6 dataset """kinships""" +758 6 model """transd""" +758 6 loss """softplus""" +758 6 regularizer """no""" +758 6 optimizer """adam""" +758 6 training_loop """lcwa""" +758 6 evaluator """rankbased""" +758 7 dataset """kinships""" +758 7 model """transd""" +758 7 loss """softplus""" +758 7 regularizer """no""" +758 7 optimizer """adam""" +758 7 training_loop """lcwa""" +758 7 evaluator """rankbased""" +758 8 dataset """kinships""" +758 8 model """transd""" +758 8 loss """softplus""" +758 8 regularizer """no""" +758 8 optimizer """adam""" +758 8 training_loop """lcwa""" +758 8 evaluator """rankbased""" +758 9 dataset """kinships""" +758 9 model """transd""" +758 9 loss """softplus""" +758 9 regularizer """no""" +758 9 optimizer """adam""" +758 9 training_loop """lcwa""" +758 9 evaluator """rankbased""" +758 10 dataset """kinships""" +758 10 model """transd""" +758 10 loss """softplus""" +758 10 regularizer """no""" +758 10 optimizer """adam""" +758 10 training_loop """lcwa""" +758 10 evaluator """rankbased""" +758 11 dataset """kinships""" +758 11 model """transd""" +758 11 loss """softplus""" +758 11 regularizer """no""" +758 11 optimizer """adam""" +758 11 training_loop """lcwa""" +758 11 evaluator """rankbased""" +758 12 dataset """kinships""" +758 12 model """transd""" +758 12 loss """softplus""" +758 12 regularizer """no""" +758 12 optimizer """adam""" +758 12 training_loop """lcwa""" +758 12 evaluator """rankbased""" +758 13 dataset """kinships""" +758 13 model """transd""" +758 13 loss """softplus""" +758 13 regularizer """no""" +758 13 optimizer """adam""" +758 13 training_loop """lcwa""" +758 13 evaluator """rankbased""" +758 14 dataset """kinships""" +758 14 model """transd""" +758 14 loss """softplus""" +758 14 regularizer """no""" +758 14 optimizer """adam""" +758 14 training_loop """lcwa""" +758 14 evaluator """rankbased""" +758 15 dataset """kinships""" +758 15 model """transd""" +758 15 loss """softplus""" +758 15 regularizer """no""" +758 15 optimizer """adam""" +758 15 training_loop """lcwa""" +758 15 evaluator """rankbased""" +758 16 dataset """kinships""" +758 16 model """transd""" +758 16 loss """softplus""" +758 16 regularizer """no""" +758 16 optimizer """adam""" +758 16 training_loop """lcwa""" +758 16 evaluator """rankbased""" +758 17 dataset """kinships""" +758 17 model """transd""" +758 17 loss """softplus""" +758 17 regularizer """no""" +758 17 optimizer """adam""" +758 17 training_loop """lcwa""" +758 17 evaluator """rankbased""" +758 18 dataset """kinships""" +758 18 model """transd""" +758 18 loss """softplus""" +758 18 regularizer """no""" +758 18 optimizer """adam""" +758 18 training_loop """lcwa""" +758 18 evaluator """rankbased""" +758 19 dataset """kinships""" +758 19 model """transd""" +758 19 loss """softplus""" +758 19 regularizer """no""" +758 19 optimizer """adam""" +758 19 training_loop """lcwa""" +758 19 evaluator """rankbased""" +758 20 dataset """kinships""" +758 20 model """transd""" +758 20 loss """softplus""" +758 20 regularizer """no""" +758 20 optimizer """adam""" +758 20 training_loop """lcwa""" +758 20 evaluator """rankbased""" +758 21 dataset """kinships""" +758 21 model """transd""" +758 21 loss """softplus""" +758 21 regularizer """no""" +758 21 optimizer """adam""" +758 21 training_loop """lcwa""" +758 21 evaluator """rankbased""" +758 22 dataset """kinships""" +758 22 model """transd""" +758 22 loss """softplus""" +758 22 regularizer """no""" +758 22 optimizer """adam""" +758 22 training_loop """lcwa""" +758 22 evaluator """rankbased""" +758 23 dataset """kinships""" +758 23 model """transd""" +758 23 loss """softplus""" +758 23 regularizer """no""" +758 23 optimizer """adam""" +758 23 training_loop """lcwa""" +758 23 evaluator """rankbased""" +758 24 dataset """kinships""" +758 24 model """transd""" +758 24 loss """softplus""" +758 24 regularizer """no""" +758 24 optimizer """adam""" +758 24 training_loop """lcwa""" +758 24 evaluator """rankbased""" +758 25 dataset """kinships""" +758 25 model """transd""" +758 25 loss """softplus""" +758 25 regularizer """no""" +758 25 optimizer """adam""" +758 25 training_loop """lcwa""" +758 25 evaluator """rankbased""" +758 26 dataset """kinships""" +758 26 model """transd""" +758 26 loss """softplus""" +758 26 regularizer """no""" +758 26 optimizer """adam""" +758 26 training_loop """lcwa""" +758 26 evaluator """rankbased""" +758 27 dataset """kinships""" +758 27 model """transd""" +758 27 loss """softplus""" +758 27 regularizer """no""" +758 27 optimizer """adam""" +758 27 training_loop """lcwa""" +758 27 evaluator """rankbased""" +758 28 dataset """kinships""" +758 28 model """transd""" +758 28 loss """softplus""" +758 28 regularizer """no""" +758 28 optimizer """adam""" +758 28 training_loop """lcwa""" +758 28 evaluator """rankbased""" +758 29 dataset """kinships""" +758 29 model """transd""" +758 29 loss """softplus""" +758 29 regularizer """no""" +758 29 optimizer """adam""" +758 29 training_loop """lcwa""" +758 29 evaluator """rankbased""" +758 30 dataset """kinships""" +758 30 model """transd""" +758 30 loss """softplus""" +758 30 regularizer """no""" +758 30 optimizer """adam""" +758 30 training_loop """lcwa""" +758 30 evaluator """rankbased""" +758 31 dataset """kinships""" +758 31 model """transd""" +758 31 loss """softplus""" +758 31 regularizer """no""" +758 31 optimizer """adam""" +758 31 training_loop """lcwa""" +758 31 evaluator """rankbased""" +758 32 dataset """kinships""" +758 32 model """transd""" +758 32 loss """softplus""" +758 32 regularizer """no""" +758 32 optimizer """adam""" +758 32 training_loop """lcwa""" +758 32 evaluator """rankbased""" +758 33 dataset """kinships""" +758 33 model """transd""" +758 33 loss """softplus""" +758 33 regularizer """no""" +758 33 optimizer """adam""" +758 33 training_loop """lcwa""" +758 33 evaluator """rankbased""" +758 34 dataset """kinships""" +758 34 model """transd""" +758 34 loss """softplus""" +758 34 regularizer """no""" +758 34 optimizer """adam""" +758 34 training_loop """lcwa""" +758 34 evaluator """rankbased""" +758 35 dataset """kinships""" +758 35 model """transd""" +758 35 loss """softplus""" +758 35 regularizer """no""" +758 35 optimizer """adam""" +758 35 training_loop """lcwa""" +758 35 evaluator """rankbased""" +758 36 dataset """kinships""" +758 36 model """transd""" +758 36 loss """softplus""" +758 36 regularizer """no""" +758 36 optimizer """adam""" +758 36 training_loop """lcwa""" +758 36 evaluator """rankbased""" +758 37 dataset """kinships""" +758 37 model """transd""" +758 37 loss """softplus""" +758 37 regularizer """no""" +758 37 optimizer """adam""" +758 37 training_loop """lcwa""" +758 37 evaluator """rankbased""" +758 38 dataset """kinships""" +758 38 model """transd""" +758 38 loss """softplus""" +758 38 regularizer """no""" +758 38 optimizer """adam""" +758 38 training_loop """lcwa""" +758 38 evaluator """rankbased""" +758 39 dataset """kinships""" +758 39 model """transd""" +758 39 loss """softplus""" +758 39 regularizer """no""" +758 39 optimizer """adam""" +758 39 training_loop """lcwa""" +758 39 evaluator """rankbased""" +758 40 dataset """kinships""" +758 40 model """transd""" +758 40 loss """softplus""" +758 40 regularizer """no""" +758 40 optimizer """adam""" +758 40 training_loop """lcwa""" +758 40 evaluator """rankbased""" +758 41 dataset """kinships""" +758 41 model """transd""" +758 41 loss """softplus""" +758 41 regularizer """no""" +758 41 optimizer """adam""" +758 41 training_loop """lcwa""" +758 41 evaluator """rankbased""" +758 42 dataset """kinships""" +758 42 model """transd""" +758 42 loss """softplus""" +758 42 regularizer """no""" +758 42 optimizer """adam""" +758 42 training_loop """lcwa""" +758 42 evaluator """rankbased""" +758 43 dataset """kinships""" +758 43 model """transd""" +758 43 loss """softplus""" +758 43 regularizer """no""" +758 43 optimizer """adam""" +758 43 training_loop """lcwa""" +758 43 evaluator """rankbased""" +758 44 dataset """kinships""" +758 44 model """transd""" +758 44 loss """softplus""" +758 44 regularizer """no""" +758 44 optimizer """adam""" +758 44 training_loop """lcwa""" +758 44 evaluator """rankbased""" +758 45 dataset """kinships""" +758 45 model """transd""" +758 45 loss """softplus""" +758 45 regularizer """no""" +758 45 optimizer """adam""" +758 45 training_loop """lcwa""" +758 45 evaluator """rankbased""" +758 46 dataset """kinships""" +758 46 model """transd""" +758 46 loss """softplus""" +758 46 regularizer """no""" +758 46 optimizer """adam""" +758 46 training_loop """lcwa""" +758 46 evaluator """rankbased""" +758 47 dataset """kinships""" +758 47 model """transd""" +758 47 loss """softplus""" +758 47 regularizer """no""" +758 47 optimizer """adam""" +758 47 training_loop """lcwa""" +758 47 evaluator """rankbased""" +758 48 dataset """kinships""" +758 48 model """transd""" +758 48 loss """softplus""" +758 48 regularizer """no""" +758 48 optimizer """adam""" +758 48 training_loop """lcwa""" +758 48 evaluator """rankbased""" +758 49 dataset """kinships""" +758 49 model """transd""" +758 49 loss """softplus""" +758 49 regularizer """no""" +758 49 optimizer """adam""" +758 49 training_loop """lcwa""" +758 49 evaluator """rankbased""" +758 50 dataset """kinships""" +758 50 model """transd""" +758 50 loss """softplus""" +758 50 regularizer """no""" +758 50 optimizer """adam""" +758 50 training_loop """lcwa""" +758 50 evaluator """rankbased""" +758 51 dataset """kinships""" +758 51 model """transd""" +758 51 loss """softplus""" +758 51 regularizer """no""" +758 51 optimizer """adam""" +758 51 training_loop """lcwa""" +758 51 evaluator """rankbased""" +758 52 dataset """kinships""" +758 52 model """transd""" +758 52 loss """softplus""" +758 52 regularizer """no""" +758 52 optimizer """adam""" +758 52 training_loop """lcwa""" +758 52 evaluator """rankbased""" +758 53 dataset """kinships""" +758 53 model """transd""" +758 53 loss """softplus""" +758 53 regularizer """no""" +758 53 optimizer """adam""" +758 53 training_loop """lcwa""" +758 53 evaluator """rankbased""" +758 54 dataset """kinships""" +758 54 model """transd""" +758 54 loss """softplus""" +758 54 regularizer """no""" +758 54 optimizer """adam""" +758 54 training_loop """lcwa""" +758 54 evaluator """rankbased""" +758 55 dataset """kinships""" +758 55 model """transd""" +758 55 loss """softplus""" +758 55 regularizer """no""" +758 55 optimizer """adam""" +758 55 training_loop """lcwa""" +758 55 evaluator """rankbased""" +758 56 dataset """kinships""" +758 56 model """transd""" +758 56 loss """softplus""" +758 56 regularizer """no""" +758 56 optimizer """adam""" +758 56 training_loop """lcwa""" +758 56 evaluator """rankbased""" +758 57 dataset """kinships""" +758 57 model """transd""" +758 57 loss """softplus""" +758 57 regularizer """no""" +758 57 optimizer """adam""" +758 57 training_loop """lcwa""" +758 57 evaluator """rankbased""" +758 58 dataset """kinships""" +758 58 model """transd""" +758 58 loss """softplus""" +758 58 regularizer """no""" +758 58 optimizer """adam""" +758 58 training_loop """lcwa""" +758 58 evaluator """rankbased""" +758 59 dataset """kinships""" +758 59 model """transd""" +758 59 loss """softplus""" +758 59 regularizer """no""" +758 59 optimizer """adam""" +758 59 training_loop """lcwa""" +758 59 evaluator """rankbased""" +758 60 dataset """kinships""" +758 60 model """transd""" +758 60 loss """softplus""" +758 60 regularizer """no""" +758 60 optimizer """adam""" +758 60 training_loop """lcwa""" +758 60 evaluator """rankbased""" +758 61 dataset """kinships""" +758 61 model """transd""" +758 61 loss """softplus""" +758 61 regularizer """no""" +758 61 optimizer """adam""" +758 61 training_loop """lcwa""" +758 61 evaluator """rankbased""" +758 62 dataset """kinships""" +758 62 model """transd""" +758 62 loss """softplus""" +758 62 regularizer """no""" +758 62 optimizer """adam""" +758 62 training_loop """lcwa""" +758 62 evaluator """rankbased""" +758 63 dataset """kinships""" +758 63 model """transd""" +758 63 loss """softplus""" +758 63 regularizer """no""" +758 63 optimizer """adam""" +758 63 training_loop """lcwa""" +758 63 evaluator """rankbased""" +758 64 dataset """kinships""" +758 64 model """transd""" +758 64 loss """softplus""" +758 64 regularizer """no""" +758 64 optimizer """adam""" +758 64 training_loop """lcwa""" +758 64 evaluator """rankbased""" +758 65 dataset """kinships""" +758 65 model """transd""" +758 65 loss """softplus""" +758 65 regularizer """no""" +758 65 optimizer """adam""" +758 65 training_loop """lcwa""" +758 65 evaluator """rankbased""" +758 66 dataset """kinships""" +758 66 model """transd""" +758 66 loss """softplus""" +758 66 regularizer """no""" +758 66 optimizer """adam""" +758 66 training_loop """lcwa""" +758 66 evaluator """rankbased""" +758 67 dataset """kinships""" +758 67 model """transd""" +758 67 loss """softplus""" +758 67 regularizer """no""" +758 67 optimizer """adam""" +758 67 training_loop """lcwa""" +758 67 evaluator """rankbased""" +758 68 dataset """kinships""" +758 68 model """transd""" +758 68 loss """softplus""" +758 68 regularizer """no""" +758 68 optimizer """adam""" +758 68 training_loop """lcwa""" +758 68 evaluator """rankbased""" +758 69 dataset """kinships""" +758 69 model """transd""" +758 69 loss """softplus""" +758 69 regularizer """no""" +758 69 optimizer """adam""" +758 69 training_loop """lcwa""" +758 69 evaluator """rankbased""" +758 70 dataset """kinships""" +758 70 model """transd""" +758 70 loss """softplus""" +758 70 regularizer """no""" +758 70 optimizer """adam""" +758 70 training_loop """lcwa""" +758 70 evaluator """rankbased""" +758 71 dataset """kinships""" +758 71 model """transd""" +758 71 loss """softplus""" +758 71 regularizer """no""" +758 71 optimizer """adam""" +758 71 training_loop """lcwa""" +758 71 evaluator """rankbased""" +758 72 dataset """kinships""" +758 72 model """transd""" +758 72 loss """softplus""" +758 72 regularizer """no""" +758 72 optimizer """adam""" +758 72 training_loop """lcwa""" +758 72 evaluator """rankbased""" +758 73 dataset """kinships""" +758 73 model """transd""" +758 73 loss """softplus""" +758 73 regularizer """no""" +758 73 optimizer """adam""" +758 73 training_loop """lcwa""" +758 73 evaluator """rankbased""" +758 74 dataset """kinships""" +758 74 model """transd""" +758 74 loss """softplus""" +758 74 regularizer """no""" +758 74 optimizer """adam""" +758 74 training_loop """lcwa""" +758 74 evaluator """rankbased""" +758 75 dataset """kinships""" +758 75 model """transd""" +758 75 loss """softplus""" +758 75 regularizer """no""" +758 75 optimizer """adam""" +758 75 training_loop """lcwa""" +758 75 evaluator """rankbased""" +758 76 dataset """kinships""" +758 76 model """transd""" +758 76 loss """softplus""" +758 76 regularizer """no""" +758 76 optimizer """adam""" +758 76 training_loop """lcwa""" +758 76 evaluator """rankbased""" +758 77 dataset """kinships""" +758 77 model """transd""" +758 77 loss """softplus""" +758 77 regularizer """no""" +758 77 optimizer """adam""" +758 77 training_loop """lcwa""" +758 77 evaluator """rankbased""" +758 78 dataset """kinships""" +758 78 model """transd""" +758 78 loss """softplus""" +758 78 regularizer """no""" +758 78 optimizer """adam""" +758 78 training_loop """lcwa""" +758 78 evaluator """rankbased""" +758 79 dataset """kinships""" +758 79 model """transd""" +758 79 loss """softplus""" +758 79 regularizer """no""" +758 79 optimizer """adam""" +758 79 training_loop """lcwa""" +758 79 evaluator """rankbased""" +758 80 dataset """kinships""" +758 80 model """transd""" +758 80 loss """softplus""" +758 80 regularizer """no""" +758 80 optimizer """adam""" +758 80 training_loop """lcwa""" +758 80 evaluator """rankbased""" +758 81 dataset """kinships""" +758 81 model """transd""" +758 81 loss """softplus""" +758 81 regularizer """no""" +758 81 optimizer """adam""" +758 81 training_loop """lcwa""" +758 81 evaluator """rankbased""" +758 82 dataset """kinships""" +758 82 model """transd""" +758 82 loss """softplus""" +758 82 regularizer """no""" +758 82 optimizer """adam""" +758 82 training_loop """lcwa""" +758 82 evaluator """rankbased""" +758 83 dataset """kinships""" +758 83 model """transd""" +758 83 loss """softplus""" +758 83 regularizer """no""" +758 83 optimizer """adam""" +758 83 training_loop """lcwa""" +758 83 evaluator """rankbased""" +758 84 dataset """kinships""" +758 84 model """transd""" +758 84 loss """softplus""" +758 84 regularizer """no""" +758 84 optimizer """adam""" +758 84 training_loop """lcwa""" +758 84 evaluator """rankbased""" +758 85 dataset """kinships""" +758 85 model """transd""" +758 85 loss """softplus""" +758 85 regularizer """no""" +758 85 optimizer """adam""" +758 85 training_loop """lcwa""" +758 85 evaluator """rankbased""" +758 86 dataset """kinships""" +758 86 model """transd""" +758 86 loss """softplus""" +758 86 regularizer """no""" +758 86 optimizer """adam""" +758 86 training_loop """lcwa""" +758 86 evaluator """rankbased""" +758 87 dataset """kinships""" +758 87 model """transd""" +758 87 loss """softplus""" +758 87 regularizer """no""" +758 87 optimizer """adam""" +758 87 training_loop """lcwa""" +758 87 evaluator """rankbased""" +758 88 dataset """kinships""" +758 88 model """transd""" +758 88 loss """softplus""" +758 88 regularizer """no""" +758 88 optimizer """adam""" +758 88 training_loop """lcwa""" +758 88 evaluator """rankbased""" +758 89 dataset """kinships""" +758 89 model """transd""" +758 89 loss """softplus""" +758 89 regularizer """no""" +758 89 optimizer """adam""" +758 89 training_loop """lcwa""" +758 89 evaluator """rankbased""" +758 90 dataset """kinships""" +758 90 model """transd""" +758 90 loss """softplus""" +758 90 regularizer """no""" +758 90 optimizer """adam""" +758 90 training_loop """lcwa""" +758 90 evaluator """rankbased""" +758 91 dataset """kinships""" +758 91 model """transd""" +758 91 loss """softplus""" +758 91 regularizer """no""" +758 91 optimizer """adam""" +758 91 training_loop """lcwa""" +758 91 evaluator """rankbased""" +758 92 dataset """kinships""" +758 92 model """transd""" +758 92 loss """softplus""" +758 92 regularizer """no""" +758 92 optimizer """adam""" +758 92 training_loop """lcwa""" +758 92 evaluator """rankbased""" +758 93 dataset """kinships""" +758 93 model """transd""" +758 93 loss """softplus""" +758 93 regularizer """no""" +758 93 optimizer """adam""" +758 93 training_loop """lcwa""" +758 93 evaluator """rankbased""" +758 94 dataset """kinships""" +758 94 model """transd""" +758 94 loss """softplus""" +758 94 regularizer """no""" +758 94 optimizer """adam""" +758 94 training_loop """lcwa""" +758 94 evaluator """rankbased""" +758 95 dataset """kinships""" +758 95 model """transd""" +758 95 loss """softplus""" +758 95 regularizer """no""" +758 95 optimizer """adam""" +758 95 training_loop """lcwa""" +758 95 evaluator """rankbased""" +758 96 dataset """kinships""" +758 96 model """transd""" +758 96 loss """softplus""" +758 96 regularizer """no""" +758 96 optimizer """adam""" +758 96 training_loop """lcwa""" +758 96 evaluator """rankbased""" +758 97 dataset """kinships""" +758 97 model """transd""" +758 97 loss """softplus""" +758 97 regularizer """no""" +758 97 optimizer """adam""" +758 97 training_loop """lcwa""" +758 97 evaluator """rankbased""" +758 98 dataset """kinships""" +758 98 model """transd""" +758 98 loss """softplus""" +758 98 regularizer """no""" +758 98 optimizer """adam""" +758 98 training_loop """lcwa""" +758 98 evaluator """rankbased""" +758 99 dataset """kinships""" +758 99 model """transd""" +758 99 loss """softplus""" +758 99 regularizer """no""" +758 99 optimizer """adam""" +758 99 training_loop """lcwa""" +758 99 evaluator """rankbased""" +758 100 dataset """kinships""" +758 100 model """transd""" +758 100 loss """softplus""" +758 100 regularizer """no""" +758 100 optimizer """adam""" +758 100 training_loop """lcwa""" +758 100 evaluator """rankbased""" +759 1 model.embedding_dim 1.0 +759 1 model.relation_dim 2.0 +759 1 optimizer.lr 0.009469671517409756 +759 1 training.batch_size 0.0 +759 1 training.label_smoothing 0.5733484193030421 +759 2 model.embedding_dim 2.0 +759 2 model.relation_dim 2.0 +759 2 optimizer.lr 0.05806891303700878 +759 2 training.batch_size 2.0 +759 2 training.label_smoothing 0.006641245174242639 +759 3 model.embedding_dim 1.0 +759 3 model.relation_dim 0.0 +759 3 optimizer.lr 0.0037519533448211457 +759 3 training.batch_size 2.0 +759 3 training.label_smoothing 0.1780037921609391 +759 4 model.embedding_dim 1.0 +759 4 model.relation_dim 2.0 +759 4 optimizer.lr 0.0021015733114509846 +759 4 training.batch_size 0.0 +759 4 training.label_smoothing 0.5162862307591715 +759 5 model.embedding_dim 0.0 +759 5 model.relation_dim 2.0 +759 5 optimizer.lr 0.017704098565794385 +759 5 training.batch_size 2.0 +759 5 training.label_smoothing 0.038415986766118744 +759 6 model.embedding_dim 0.0 +759 6 model.relation_dim 1.0 +759 6 optimizer.lr 0.01749518575634997 +759 6 training.batch_size 1.0 +759 6 training.label_smoothing 0.05144978392449603 +759 7 model.embedding_dim 0.0 +759 7 model.relation_dim 0.0 +759 7 optimizer.lr 0.014677240824271024 +759 7 training.batch_size 1.0 +759 7 training.label_smoothing 0.045389022914603096 +759 8 model.embedding_dim 2.0 +759 8 model.relation_dim 1.0 +759 8 optimizer.lr 0.06437704060432582 +759 8 training.batch_size 2.0 +759 8 training.label_smoothing 0.0030809613629327556 +759 9 model.embedding_dim 2.0 +759 9 model.relation_dim 2.0 +759 9 optimizer.lr 0.003298929425627355 +759 9 training.batch_size 0.0 +759 9 training.label_smoothing 0.06082833033579209 +759 10 model.embedding_dim 1.0 +759 10 model.relation_dim 2.0 +759 10 optimizer.lr 0.0011160376040551725 +759 10 training.batch_size 2.0 +759 10 training.label_smoothing 0.030341354667891174 +759 11 model.embedding_dim 0.0 +759 11 model.relation_dim 0.0 +759 11 optimizer.lr 0.0015287487226489098 +759 11 training.batch_size 1.0 +759 11 training.label_smoothing 0.0020379944970516225 +759 12 model.embedding_dim 1.0 +759 12 model.relation_dim 0.0 +759 12 optimizer.lr 0.0019343681699034512 +759 12 training.batch_size 0.0 +759 12 training.label_smoothing 0.03681268837517536 +759 13 model.embedding_dim 0.0 +759 13 model.relation_dim 1.0 +759 13 optimizer.lr 0.0019919902288778424 +759 13 training.batch_size 0.0 +759 13 training.label_smoothing 0.0010309760912903757 +759 14 model.embedding_dim 2.0 +759 14 model.relation_dim 2.0 +759 14 optimizer.lr 0.001994495195251677 +759 14 training.batch_size 1.0 +759 14 training.label_smoothing 0.0012330910848833243 +759 15 model.embedding_dim 1.0 +759 15 model.relation_dim 1.0 +759 15 optimizer.lr 0.0021035772560791975 +759 15 training.batch_size 1.0 +759 15 training.label_smoothing 0.03709789261684902 +759 16 model.embedding_dim 0.0 +759 16 model.relation_dim 2.0 +759 16 optimizer.lr 0.007143324871531049 +759 16 training.batch_size 0.0 +759 16 training.label_smoothing 0.7662006301498517 +759 17 model.embedding_dim 0.0 +759 17 model.relation_dim 0.0 +759 17 optimizer.lr 0.0826452079561417 +759 17 training.batch_size 1.0 +759 17 training.label_smoothing 0.9150204817087565 +759 18 model.embedding_dim 1.0 +759 18 model.relation_dim 0.0 +759 18 optimizer.lr 0.009220660016075485 +759 18 training.batch_size 2.0 +759 18 training.label_smoothing 0.24933588115843464 +759 19 model.embedding_dim 0.0 +759 19 model.relation_dim 0.0 +759 19 optimizer.lr 0.007216634657295828 +759 19 training.batch_size 2.0 +759 19 training.label_smoothing 0.003805298781830832 +759 20 model.embedding_dim 0.0 +759 20 model.relation_dim 0.0 +759 20 optimizer.lr 0.0035672397304463226 +759 20 training.batch_size 1.0 +759 20 training.label_smoothing 0.0019370235596277326 +759 21 model.embedding_dim 0.0 +759 21 model.relation_dim 2.0 +759 21 optimizer.lr 0.04649228801264188 +759 21 training.batch_size 0.0 +759 21 training.label_smoothing 0.015368534803348613 +759 22 model.embedding_dim 2.0 +759 22 model.relation_dim 1.0 +759 22 optimizer.lr 0.0011702164797517528 +759 22 training.batch_size 1.0 +759 22 training.label_smoothing 0.0013589442052751046 +759 23 model.embedding_dim 0.0 +759 23 model.relation_dim 1.0 +759 23 optimizer.lr 0.07076461440308578 +759 23 training.batch_size 1.0 +759 23 training.label_smoothing 0.5629828300662295 +759 24 model.embedding_dim 0.0 +759 24 model.relation_dim 2.0 +759 24 optimizer.lr 0.0022568273229519728 +759 24 training.batch_size 2.0 +759 24 training.label_smoothing 0.001122818817664394 +759 25 model.embedding_dim 2.0 +759 25 model.relation_dim 0.0 +759 25 optimizer.lr 0.003120044536981049 +759 25 training.batch_size 0.0 +759 25 training.label_smoothing 0.03745111263880751 +759 26 model.embedding_dim 1.0 +759 26 model.relation_dim 0.0 +759 26 optimizer.lr 0.022984824354497925 +759 26 training.batch_size 0.0 +759 26 training.label_smoothing 0.00869073491115651 +759 27 model.embedding_dim 0.0 +759 27 model.relation_dim 1.0 +759 27 optimizer.lr 0.018630508705340387 +759 27 training.batch_size 1.0 +759 27 training.label_smoothing 0.38331843794469694 +759 28 model.embedding_dim 2.0 +759 28 model.relation_dim 0.0 +759 28 optimizer.lr 0.07194778238092403 +759 28 training.batch_size 0.0 +759 28 training.label_smoothing 0.015687520344506035 +759 29 model.embedding_dim 1.0 +759 29 model.relation_dim 1.0 +759 29 optimizer.lr 0.06775341226153454 +759 29 training.batch_size 0.0 +759 29 training.label_smoothing 0.026057646349381867 +759 30 model.embedding_dim 0.0 +759 30 model.relation_dim 2.0 +759 30 optimizer.lr 0.005902216921552761 +759 30 training.batch_size 2.0 +759 30 training.label_smoothing 0.005002704482568276 +759 31 model.embedding_dim 2.0 +759 31 model.relation_dim 1.0 +759 31 optimizer.lr 0.027136575722397936 +759 31 training.batch_size 2.0 +759 31 training.label_smoothing 0.01978566305329951 +759 32 model.embedding_dim 0.0 +759 32 model.relation_dim 0.0 +759 32 optimizer.lr 0.00611378050769732 +759 32 training.batch_size 0.0 +759 32 training.label_smoothing 0.004469142471560798 +759 33 model.embedding_dim 1.0 +759 33 model.relation_dim 0.0 +759 33 optimizer.lr 0.002744191504106857 +759 33 training.batch_size 0.0 +759 33 training.label_smoothing 0.007477700443298887 +759 34 model.embedding_dim 0.0 +759 34 model.relation_dim 2.0 +759 34 optimizer.lr 0.005667795159959424 +759 34 training.batch_size 0.0 +759 34 training.label_smoothing 0.001441110542331929 +759 35 model.embedding_dim 0.0 +759 35 model.relation_dim 0.0 +759 35 optimizer.lr 0.007949629240275556 +759 35 training.batch_size 1.0 +759 35 training.label_smoothing 0.0015310193565662308 +759 36 model.embedding_dim 1.0 +759 36 model.relation_dim 2.0 +759 36 optimizer.lr 0.006936008437352833 +759 36 training.batch_size 0.0 +759 36 training.label_smoothing 0.052631453572277956 +759 37 model.embedding_dim 2.0 +759 37 model.relation_dim 0.0 +759 37 optimizer.lr 0.003915510349998752 +759 37 training.batch_size 1.0 +759 37 training.label_smoothing 0.006733035250764106 +759 38 model.embedding_dim 0.0 +759 38 model.relation_dim 2.0 +759 38 optimizer.lr 0.01668016827437344 +759 38 training.batch_size 1.0 +759 38 training.label_smoothing 0.0010727117306445731 +759 39 model.embedding_dim 0.0 +759 39 model.relation_dim 1.0 +759 39 optimizer.lr 0.0981003518644233 +759 39 training.batch_size 1.0 +759 39 training.label_smoothing 0.35292327345082497 +759 40 model.embedding_dim 0.0 +759 40 model.relation_dim 0.0 +759 40 optimizer.lr 0.023560560936150013 +759 40 training.batch_size 0.0 +759 40 training.label_smoothing 0.0017106685950579233 +759 41 model.embedding_dim 2.0 +759 41 model.relation_dim 2.0 +759 41 optimizer.lr 0.020885846267317116 +759 41 training.batch_size 0.0 +759 41 training.label_smoothing 0.01062347561087141 +759 42 model.embedding_dim 2.0 +759 42 model.relation_dim 2.0 +759 42 optimizer.lr 0.0244197798497223 +759 42 training.batch_size 1.0 +759 42 training.label_smoothing 0.08758902127600157 +759 43 model.embedding_dim 0.0 +759 43 model.relation_dim 0.0 +759 43 optimizer.lr 0.043571279283434156 +759 43 training.batch_size 2.0 +759 43 training.label_smoothing 0.003948677468781057 +759 44 model.embedding_dim 2.0 +759 44 model.relation_dim 1.0 +759 44 optimizer.lr 0.005483030942968756 +759 44 training.batch_size 1.0 +759 44 training.label_smoothing 0.002573888497238529 +759 45 model.embedding_dim 1.0 +759 45 model.relation_dim 0.0 +759 45 optimizer.lr 0.0016919617082192492 +759 45 training.batch_size 2.0 +759 45 training.label_smoothing 0.7287173820817585 +759 46 model.embedding_dim 2.0 +759 46 model.relation_dim 2.0 +759 46 optimizer.lr 0.0015985054551444812 +759 46 training.batch_size 0.0 +759 46 training.label_smoothing 0.0026599701733361165 +759 47 model.embedding_dim 1.0 +759 47 model.relation_dim 1.0 +759 47 optimizer.lr 0.0020591833032738874 +759 47 training.batch_size 2.0 +759 47 training.label_smoothing 0.013550121710395883 +759 48 model.embedding_dim 2.0 +759 48 model.relation_dim 0.0 +759 48 optimizer.lr 0.01453315631804154 +759 48 training.batch_size 1.0 +759 48 training.label_smoothing 0.06468061684220001 +759 49 model.embedding_dim 2.0 +759 49 model.relation_dim 0.0 +759 49 optimizer.lr 0.04176120103045482 +759 49 training.batch_size 1.0 +759 49 training.label_smoothing 0.0028510205259362864 +759 50 model.embedding_dim 0.0 +759 50 model.relation_dim 0.0 +759 50 optimizer.lr 0.002875184842993406 +759 50 training.batch_size 2.0 +759 50 training.label_smoothing 0.017983515755406528 +759 51 model.embedding_dim 1.0 +759 51 model.relation_dim 2.0 +759 51 optimizer.lr 0.08266503326373385 +759 51 training.batch_size 2.0 +759 51 training.label_smoothing 0.001390989569003293 +759 52 model.embedding_dim 0.0 +759 52 model.relation_dim 0.0 +759 52 optimizer.lr 0.053167825920993055 +759 52 training.batch_size 2.0 +759 52 training.label_smoothing 0.09263882694863752 +759 53 model.embedding_dim 0.0 +759 53 model.relation_dim 0.0 +759 53 optimizer.lr 0.0462340508445675 +759 53 training.batch_size 2.0 +759 53 training.label_smoothing 0.0737249387157711 +759 54 model.embedding_dim 1.0 +759 54 model.relation_dim 1.0 +759 54 optimizer.lr 0.0018097782110239804 +759 54 training.batch_size 1.0 +759 54 training.label_smoothing 0.008093321854208926 +759 55 model.embedding_dim 0.0 +759 55 model.relation_dim 1.0 +759 55 optimizer.lr 0.009010226909314877 +759 55 training.batch_size 1.0 +759 55 training.label_smoothing 0.009729845563791984 +759 56 model.embedding_dim 2.0 +759 56 model.relation_dim 2.0 +759 56 optimizer.lr 0.03660682530311666 +759 56 training.batch_size 0.0 +759 56 training.label_smoothing 0.005935856009887894 +759 57 model.embedding_dim 2.0 +759 57 model.relation_dim 2.0 +759 57 optimizer.lr 0.0030929349841762824 +759 57 training.batch_size 0.0 +759 57 training.label_smoothing 0.011917824146261866 +759 58 model.embedding_dim 1.0 +759 58 model.relation_dim 2.0 +759 58 optimizer.lr 0.01158736869439507 +759 58 training.batch_size 0.0 +759 58 training.label_smoothing 0.09241352504635585 +759 59 model.embedding_dim 1.0 +759 59 model.relation_dim 0.0 +759 59 optimizer.lr 0.014975896102797825 +759 59 training.batch_size 1.0 +759 59 training.label_smoothing 0.08013858829284407 +759 60 model.embedding_dim 0.0 +759 60 model.relation_dim 2.0 +759 60 optimizer.lr 0.005334418261408634 +759 60 training.batch_size 0.0 +759 60 training.label_smoothing 0.01362513686013652 +759 61 model.embedding_dim 0.0 +759 61 model.relation_dim 1.0 +759 61 optimizer.lr 0.006353001496203831 +759 61 training.batch_size 2.0 +759 61 training.label_smoothing 0.08085561772352413 +759 62 model.embedding_dim 0.0 +759 62 model.relation_dim 0.0 +759 62 optimizer.lr 0.012503027431178454 +759 62 training.batch_size 1.0 +759 62 training.label_smoothing 0.0012771327758195957 +759 63 model.embedding_dim 0.0 +759 63 model.relation_dim 0.0 +759 63 optimizer.lr 0.0016973750645265034 +759 63 training.batch_size 1.0 +759 63 training.label_smoothing 0.002404920735057434 +759 64 model.embedding_dim 1.0 +759 64 model.relation_dim 2.0 +759 64 optimizer.lr 0.017958453050693105 +759 64 training.batch_size 0.0 +759 64 training.label_smoothing 0.044379302310168275 +759 65 model.embedding_dim 0.0 +759 65 model.relation_dim 1.0 +759 65 optimizer.lr 0.04564788733804999 +759 65 training.batch_size 1.0 +759 65 training.label_smoothing 0.002137777477648074 +759 66 model.embedding_dim 0.0 +759 66 model.relation_dim 1.0 +759 66 optimizer.lr 0.012398493812506229 +759 66 training.batch_size 0.0 +759 66 training.label_smoothing 0.2692310508830286 +759 67 model.embedding_dim 1.0 +759 67 model.relation_dim 0.0 +759 67 optimizer.lr 0.0019670164290053584 +759 67 training.batch_size 2.0 +759 67 training.label_smoothing 0.008566563474256682 +759 68 model.embedding_dim 2.0 +759 68 model.relation_dim 0.0 +759 68 optimizer.lr 0.08528123461299823 +759 68 training.batch_size 0.0 +759 68 training.label_smoothing 0.11659572016912798 +759 69 model.embedding_dim 2.0 +759 69 model.relation_dim 1.0 +759 69 optimizer.lr 0.019844642693389897 +759 69 training.batch_size 2.0 +759 69 training.label_smoothing 0.030916625317663194 +759 70 model.embedding_dim 1.0 +759 70 model.relation_dim 1.0 +759 70 optimizer.lr 0.06775702864669879 +759 70 training.batch_size 2.0 +759 70 training.label_smoothing 0.4517277579098147 +759 71 model.embedding_dim 2.0 +759 71 model.relation_dim 2.0 +759 71 optimizer.lr 0.0021005770509460392 +759 71 training.batch_size 0.0 +759 71 training.label_smoothing 0.00918675071827444 +759 72 model.embedding_dim 1.0 +759 72 model.relation_dim 1.0 +759 72 optimizer.lr 0.028579049035495924 +759 72 training.batch_size 1.0 +759 72 training.label_smoothing 0.002219517883975891 +759 73 model.embedding_dim 2.0 +759 73 model.relation_dim 1.0 +759 73 optimizer.lr 0.008040307927464331 +759 73 training.batch_size 1.0 +759 73 training.label_smoothing 0.006044904323369847 +759 74 model.embedding_dim 0.0 +759 74 model.relation_dim 1.0 +759 74 optimizer.lr 0.03562884990319554 +759 74 training.batch_size 2.0 +759 74 training.label_smoothing 0.021520428311932627 +759 75 model.embedding_dim 0.0 +759 75 model.relation_dim 0.0 +759 75 optimizer.lr 0.0935124508576304 +759 75 training.batch_size 0.0 +759 75 training.label_smoothing 0.8909934130883325 +759 76 model.embedding_dim 1.0 +759 76 model.relation_dim 2.0 +759 76 optimizer.lr 0.011725478794051166 +759 76 training.batch_size 2.0 +759 76 training.label_smoothing 0.10965396436423691 +759 77 model.embedding_dim 2.0 +759 77 model.relation_dim 0.0 +759 77 optimizer.lr 0.002477838818723693 +759 77 training.batch_size 0.0 +759 77 training.label_smoothing 0.43499723594674045 +759 78 model.embedding_dim 2.0 +759 78 model.relation_dim 2.0 +759 78 optimizer.lr 0.0021692155385943895 +759 78 training.batch_size 1.0 +759 78 training.label_smoothing 0.03310204272890963 +759 79 model.embedding_dim 1.0 +759 79 model.relation_dim 0.0 +759 79 optimizer.lr 0.0033310787930642266 +759 79 training.batch_size 1.0 +759 79 training.label_smoothing 0.0057332936860986965 +759 80 model.embedding_dim 2.0 +759 80 model.relation_dim 2.0 +759 80 optimizer.lr 0.08691922594048113 +759 80 training.batch_size 1.0 +759 80 training.label_smoothing 0.057607299440466865 +759 81 model.embedding_dim 2.0 +759 81 model.relation_dim 1.0 +759 81 optimizer.lr 0.006315821550873691 +759 81 training.batch_size 2.0 +759 81 training.label_smoothing 0.9529429743111584 +759 82 model.embedding_dim 2.0 +759 82 model.relation_dim 1.0 +759 82 optimizer.lr 0.03931418789393891 +759 82 training.batch_size 0.0 +759 82 training.label_smoothing 0.017003803683629064 +759 83 model.embedding_dim 0.0 +759 83 model.relation_dim 1.0 +759 83 optimizer.lr 0.004164438824334873 +759 83 training.batch_size 2.0 +759 83 training.label_smoothing 0.011062972987225703 +759 84 model.embedding_dim 1.0 +759 84 model.relation_dim 1.0 +759 84 optimizer.lr 0.0021859098036606027 +759 84 training.batch_size 1.0 +759 84 training.label_smoothing 0.0785081843216346 +759 85 model.embedding_dim 2.0 +759 85 model.relation_dim 0.0 +759 85 optimizer.lr 0.013465923151618334 +759 85 training.batch_size 2.0 +759 85 training.label_smoothing 0.41901043650112474 +759 86 model.embedding_dim 0.0 +759 86 model.relation_dim 0.0 +759 86 optimizer.lr 0.08304985144592737 +759 86 training.batch_size 0.0 +759 86 training.label_smoothing 0.01578436105062534 +759 87 model.embedding_dim 0.0 +759 87 model.relation_dim 2.0 +759 87 optimizer.lr 0.0056575684806281 +759 87 training.batch_size 0.0 +759 87 training.label_smoothing 0.7025659367904367 +759 88 model.embedding_dim 2.0 +759 88 model.relation_dim 2.0 +759 88 optimizer.lr 0.04233217584844375 +759 88 training.batch_size 2.0 +759 88 training.label_smoothing 0.013698256604553757 +759 89 model.embedding_dim 1.0 +759 89 model.relation_dim 0.0 +759 89 optimizer.lr 0.009970463454124782 +759 89 training.batch_size 0.0 +759 89 training.label_smoothing 0.04590249869244016 +759 90 model.embedding_dim 0.0 +759 90 model.relation_dim 0.0 +759 90 optimizer.lr 0.012530742597073014 +759 90 training.batch_size 0.0 +759 90 training.label_smoothing 0.2626364125161651 +759 91 model.embedding_dim 2.0 +759 91 model.relation_dim 2.0 +759 91 optimizer.lr 0.002854197688652109 +759 91 training.batch_size 1.0 +759 91 training.label_smoothing 0.01252268106570916 +759 92 model.embedding_dim 1.0 +759 92 model.relation_dim 2.0 +759 92 optimizer.lr 0.06067590828136181 +759 92 training.batch_size 0.0 +759 92 training.label_smoothing 0.09243201018934742 +759 93 model.embedding_dim 2.0 +759 93 model.relation_dim 2.0 +759 93 optimizer.lr 0.0011866273054076957 +759 93 training.batch_size 2.0 +759 93 training.label_smoothing 0.09871228881819215 +759 94 model.embedding_dim 0.0 +759 94 model.relation_dim 2.0 +759 94 optimizer.lr 0.09626886992716771 +759 94 training.batch_size 1.0 +759 94 training.label_smoothing 0.03603319118636714 +759 95 model.embedding_dim 1.0 +759 95 model.relation_dim 1.0 +759 95 optimizer.lr 0.004131039672868028 +759 95 training.batch_size 0.0 +759 95 training.label_smoothing 0.00994753621019219 +759 96 model.embedding_dim 1.0 +759 96 model.relation_dim 0.0 +759 96 optimizer.lr 0.005484272063458165 +759 96 training.batch_size 1.0 +759 96 training.label_smoothing 0.06684842013138088 +759 97 model.embedding_dim 0.0 +759 97 model.relation_dim 2.0 +759 97 optimizer.lr 0.050390668305280946 +759 97 training.batch_size 1.0 +759 97 training.label_smoothing 0.30684796610433046 +759 98 model.embedding_dim 1.0 +759 98 model.relation_dim 1.0 +759 98 optimizer.lr 0.006966762648873892 +759 98 training.batch_size 1.0 +759 98 training.label_smoothing 0.4000366864374486 +759 99 model.embedding_dim 2.0 +759 99 model.relation_dim 1.0 +759 99 optimizer.lr 0.0734158710767675 +759 99 training.batch_size 0.0 +759 99 training.label_smoothing 0.3703427149293606 +759 100 model.embedding_dim 2.0 +759 100 model.relation_dim 1.0 +759 100 optimizer.lr 0.04824929968610453 +759 100 training.batch_size 0.0 +759 100 training.label_smoothing 0.23256759038843552 +759 1 dataset """kinships""" +759 1 model """transd""" +759 1 loss """bceaftersigmoid""" +759 1 regularizer """no""" +759 1 optimizer """adam""" +759 1 training_loop """lcwa""" +759 1 evaluator """rankbased""" +759 2 dataset """kinships""" +759 2 model """transd""" +759 2 loss """bceaftersigmoid""" +759 2 regularizer """no""" +759 2 optimizer """adam""" +759 2 training_loop """lcwa""" +759 2 evaluator """rankbased""" +759 3 dataset """kinships""" +759 3 model """transd""" +759 3 loss """bceaftersigmoid""" +759 3 regularizer """no""" +759 3 optimizer """adam""" +759 3 training_loop """lcwa""" +759 3 evaluator """rankbased""" +759 4 dataset """kinships""" +759 4 model """transd""" +759 4 loss """bceaftersigmoid""" +759 4 regularizer """no""" +759 4 optimizer """adam""" +759 4 training_loop """lcwa""" +759 4 evaluator """rankbased""" +759 5 dataset """kinships""" +759 5 model """transd""" +759 5 loss """bceaftersigmoid""" +759 5 regularizer """no""" +759 5 optimizer """adam""" +759 5 training_loop """lcwa""" +759 5 evaluator """rankbased""" +759 6 dataset """kinships""" +759 6 model """transd""" +759 6 loss """bceaftersigmoid""" +759 6 regularizer """no""" +759 6 optimizer """adam""" +759 6 training_loop """lcwa""" +759 6 evaluator """rankbased""" +759 7 dataset """kinships""" +759 7 model """transd""" +759 7 loss """bceaftersigmoid""" +759 7 regularizer """no""" +759 7 optimizer """adam""" +759 7 training_loop """lcwa""" +759 7 evaluator """rankbased""" +759 8 dataset """kinships""" +759 8 model """transd""" +759 8 loss """bceaftersigmoid""" +759 8 regularizer """no""" +759 8 optimizer """adam""" +759 8 training_loop """lcwa""" +759 8 evaluator """rankbased""" +759 9 dataset """kinships""" +759 9 model """transd""" +759 9 loss """bceaftersigmoid""" +759 9 regularizer """no""" +759 9 optimizer """adam""" +759 9 training_loop """lcwa""" +759 9 evaluator """rankbased""" +759 10 dataset """kinships""" +759 10 model """transd""" +759 10 loss """bceaftersigmoid""" +759 10 regularizer """no""" +759 10 optimizer """adam""" +759 10 training_loop """lcwa""" +759 10 evaluator """rankbased""" +759 11 dataset """kinships""" +759 11 model """transd""" +759 11 loss """bceaftersigmoid""" +759 11 regularizer """no""" +759 11 optimizer """adam""" +759 11 training_loop """lcwa""" +759 11 evaluator """rankbased""" +759 12 dataset """kinships""" +759 12 model """transd""" +759 12 loss """bceaftersigmoid""" +759 12 regularizer """no""" +759 12 optimizer """adam""" +759 12 training_loop """lcwa""" +759 12 evaluator """rankbased""" +759 13 dataset """kinships""" +759 13 model """transd""" +759 13 loss """bceaftersigmoid""" +759 13 regularizer """no""" +759 13 optimizer """adam""" +759 13 training_loop """lcwa""" +759 13 evaluator """rankbased""" +759 14 dataset """kinships""" +759 14 model """transd""" +759 14 loss """bceaftersigmoid""" +759 14 regularizer """no""" +759 14 optimizer """adam""" +759 14 training_loop """lcwa""" +759 14 evaluator """rankbased""" +759 15 dataset """kinships""" +759 15 model """transd""" +759 15 loss """bceaftersigmoid""" +759 15 regularizer """no""" +759 15 optimizer """adam""" +759 15 training_loop """lcwa""" +759 15 evaluator """rankbased""" +759 16 dataset """kinships""" +759 16 model """transd""" +759 16 loss """bceaftersigmoid""" +759 16 regularizer """no""" +759 16 optimizer """adam""" +759 16 training_loop """lcwa""" +759 16 evaluator """rankbased""" +759 17 dataset """kinships""" +759 17 model """transd""" +759 17 loss """bceaftersigmoid""" +759 17 regularizer """no""" +759 17 optimizer """adam""" +759 17 training_loop """lcwa""" +759 17 evaluator """rankbased""" +759 18 dataset """kinships""" +759 18 model """transd""" +759 18 loss """bceaftersigmoid""" +759 18 regularizer """no""" +759 18 optimizer """adam""" +759 18 training_loop """lcwa""" +759 18 evaluator """rankbased""" +759 19 dataset """kinships""" +759 19 model """transd""" +759 19 loss """bceaftersigmoid""" +759 19 regularizer """no""" +759 19 optimizer """adam""" +759 19 training_loop """lcwa""" +759 19 evaluator """rankbased""" +759 20 dataset """kinships""" +759 20 model """transd""" +759 20 loss """bceaftersigmoid""" +759 20 regularizer """no""" +759 20 optimizer """adam""" +759 20 training_loop """lcwa""" +759 20 evaluator """rankbased""" +759 21 dataset """kinships""" +759 21 model """transd""" +759 21 loss """bceaftersigmoid""" +759 21 regularizer """no""" +759 21 optimizer """adam""" +759 21 training_loop """lcwa""" +759 21 evaluator """rankbased""" +759 22 dataset """kinships""" +759 22 model """transd""" +759 22 loss """bceaftersigmoid""" +759 22 regularizer """no""" +759 22 optimizer """adam""" +759 22 training_loop """lcwa""" +759 22 evaluator """rankbased""" +759 23 dataset """kinships""" +759 23 model """transd""" +759 23 loss """bceaftersigmoid""" +759 23 regularizer """no""" +759 23 optimizer """adam""" +759 23 training_loop """lcwa""" +759 23 evaluator """rankbased""" +759 24 dataset """kinships""" +759 24 model """transd""" +759 24 loss """bceaftersigmoid""" +759 24 regularizer """no""" +759 24 optimizer """adam""" +759 24 training_loop """lcwa""" +759 24 evaluator """rankbased""" +759 25 dataset """kinships""" +759 25 model """transd""" +759 25 loss """bceaftersigmoid""" +759 25 regularizer """no""" +759 25 optimizer """adam""" +759 25 training_loop """lcwa""" +759 25 evaluator """rankbased""" +759 26 dataset """kinships""" +759 26 model """transd""" +759 26 loss """bceaftersigmoid""" +759 26 regularizer """no""" +759 26 optimizer """adam""" +759 26 training_loop """lcwa""" +759 26 evaluator """rankbased""" +759 27 dataset """kinships""" +759 27 model """transd""" +759 27 loss """bceaftersigmoid""" +759 27 regularizer """no""" +759 27 optimizer """adam""" +759 27 training_loop """lcwa""" +759 27 evaluator """rankbased""" +759 28 dataset """kinships""" +759 28 model """transd""" +759 28 loss """bceaftersigmoid""" +759 28 regularizer """no""" +759 28 optimizer """adam""" +759 28 training_loop """lcwa""" +759 28 evaluator """rankbased""" +759 29 dataset """kinships""" +759 29 model """transd""" +759 29 loss """bceaftersigmoid""" +759 29 regularizer """no""" +759 29 optimizer """adam""" +759 29 training_loop """lcwa""" +759 29 evaluator """rankbased""" +759 30 dataset """kinships""" +759 30 model """transd""" +759 30 loss """bceaftersigmoid""" +759 30 regularizer """no""" +759 30 optimizer """adam""" +759 30 training_loop """lcwa""" +759 30 evaluator """rankbased""" +759 31 dataset """kinships""" +759 31 model """transd""" +759 31 loss """bceaftersigmoid""" +759 31 regularizer """no""" +759 31 optimizer """adam""" +759 31 training_loop """lcwa""" +759 31 evaluator """rankbased""" +759 32 dataset """kinships""" +759 32 model """transd""" +759 32 loss """bceaftersigmoid""" +759 32 regularizer """no""" +759 32 optimizer """adam""" +759 32 training_loop """lcwa""" +759 32 evaluator """rankbased""" +759 33 dataset """kinships""" +759 33 model """transd""" +759 33 loss """bceaftersigmoid""" +759 33 regularizer """no""" +759 33 optimizer """adam""" +759 33 training_loop """lcwa""" +759 33 evaluator """rankbased""" +759 34 dataset """kinships""" +759 34 model """transd""" +759 34 loss """bceaftersigmoid""" +759 34 regularizer """no""" +759 34 optimizer """adam""" +759 34 training_loop """lcwa""" +759 34 evaluator """rankbased""" +759 35 dataset """kinships""" +759 35 model """transd""" +759 35 loss """bceaftersigmoid""" +759 35 regularizer """no""" +759 35 optimizer """adam""" +759 35 training_loop """lcwa""" +759 35 evaluator """rankbased""" +759 36 dataset """kinships""" +759 36 model """transd""" +759 36 loss """bceaftersigmoid""" +759 36 regularizer """no""" +759 36 optimizer """adam""" +759 36 training_loop """lcwa""" +759 36 evaluator """rankbased""" +759 37 dataset """kinships""" +759 37 model """transd""" +759 37 loss """bceaftersigmoid""" +759 37 regularizer """no""" +759 37 optimizer """adam""" +759 37 training_loop """lcwa""" +759 37 evaluator """rankbased""" +759 38 dataset """kinships""" +759 38 model """transd""" +759 38 loss """bceaftersigmoid""" +759 38 regularizer """no""" +759 38 optimizer """adam""" +759 38 training_loop """lcwa""" +759 38 evaluator """rankbased""" +759 39 dataset """kinships""" +759 39 model """transd""" +759 39 loss """bceaftersigmoid""" +759 39 regularizer """no""" +759 39 optimizer """adam""" +759 39 training_loop """lcwa""" +759 39 evaluator """rankbased""" +759 40 dataset """kinships""" +759 40 model """transd""" +759 40 loss """bceaftersigmoid""" +759 40 regularizer """no""" +759 40 optimizer """adam""" +759 40 training_loop """lcwa""" +759 40 evaluator """rankbased""" +759 41 dataset """kinships""" +759 41 model """transd""" +759 41 loss """bceaftersigmoid""" +759 41 regularizer """no""" +759 41 optimizer """adam""" +759 41 training_loop """lcwa""" +759 41 evaluator """rankbased""" +759 42 dataset """kinships""" +759 42 model """transd""" +759 42 loss """bceaftersigmoid""" +759 42 regularizer """no""" +759 42 optimizer """adam""" +759 42 training_loop """lcwa""" +759 42 evaluator """rankbased""" +759 43 dataset """kinships""" +759 43 model """transd""" +759 43 loss """bceaftersigmoid""" +759 43 regularizer """no""" +759 43 optimizer """adam""" +759 43 training_loop """lcwa""" +759 43 evaluator """rankbased""" +759 44 dataset """kinships""" +759 44 model """transd""" +759 44 loss """bceaftersigmoid""" +759 44 regularizer """no""" +759 44 optimizer """adam""" +759 44 training_loop """lcwa""" +759 44 evaluator """rankbased""" +759 45 dataset """kinships""" +759 45 model """transd""" +759 45 loss """bceaftersigmoid""" +759 45 regularizer """no""" +759 45 optimizer """adam""" +759 45 training_loop """lcwa""" +759 45 evaluator """rankbased""" +759 46 dataset """kinships""" +759 46 model """transd""" +759 46 loss """bceaftersigmoid""" +759 46 regularizer """no""" +759 46 optimizer """adam""" +759 46 training_loop """lcwa""" +759 46 evaluator """rankbased""" +759 47 dataset """kinships""" +759 47 model """transd""" +759 47 loss """bceaftersigmoid""" +759 47 regularizer """no""" +759 47 optimizer """adam""" +759 47 training_loop """lcwa""" +759 47 evaluator """rankbased""" +759 48 dataset """kinships""" +759 48 model """transd""" +759 48 loss """bceaftersigmoid""" +759 48 regularizer """no""" +759 48 optimizer """adam""" +759 48 training_loop """lcwa""" +759 48 evaluator """rankbased""" +759 49 dataset """kinships""" +759 49 model """transd""" +759 49 loss """bceaftersigmoid""" +759 49 regularizer """no""" +759 49 optimizer """adam""" +759 49 training_loop """lcwa""" +759 49 evaluator """rankbased""" +759 50 dataset """kinships""" +759 50 model """transd""" +759 50 loss """bceaftersigmoid""" +759 50 regularizer """no""" +759 50 optimizer """adam""" +759 50 training_loop """lcwa""" +759 50 evaluator """rankbased""" +759 51 dataset """kinships""" +759 51 model """transd""" +759 51 loss """bceaftersigmoid""" +759 51 regularizer """no""" +759 51 optimizer """adam""" +759 51 training_loop """lcwa""" +759 51 evaluator """rankbased""" +759 52 dataset """kinships""" +759 52 model """transd""" +759 52 loss """bceaftersigmoid""" +759 52 regularizer """no""" +759 52 optimizer """adam""" +759 52 training_loop """lcwa""" +759 52 evaluator """rankbased""" +759 53 dataset """kinships""" +759 53 model """transd""" +759 53 loss """bceaftersigmoid""" +759 53 regularizer """no""" +759 53 optimizer """adam""" +759 53 training_loop """lcwa""" +759 53 evaluator """rankbased""" +759 54 dataset """kinships""" +759 54 model """transd""" +759 54 loss """bceaftersigmoid""" +759 54 regularizer """no""" +759 54 optimizer """adam""" +759 54 training_loop """lcwa""" +759 54 evaluator """rankbased""" +759 55 dataset """kinships""" +759 55 model """transd""" +759 55 loss """bceaftersigmoid""" +759 55 regularizer """no""" +759 55 optimizer """adam""" +759 55 training_loop """lcwa""" +759 55 evaluator """rankbased""" +759 56 dataset """kinships""" +759 56 model """transd""" +759 56 loss """bceaftersigmoid""" +759 56 regularizer """no""" +759 56 optimizer """adam""" +759 56 training_loop """lcwa""" +759 56 evaluator """rankbased""" +759 57 dataset """kinships""" +759 57 model """transd""" +759 57 loss """bceaftersigmoid""" +759 57 regularizer """no""" +759 57 optimizer """adam""" +759 57 training_loop """lcwa""" +759 57 evaluator """rankbased""" +759 58 dataset """kinships""" +759 58 model """transd""" +759 58 loss """bceaftersigmoid""" +759 58 regularizer """no""" +759 58 optimizer """adam""" +759 58 training_loop """lcwa""" +759 58 evaluator """rankbased""" +759 59 dataset """kinships""" +759 59 model """transd""" +759 59 loss """bceaftersigmoid""" +759 59 regularizer """no""" +759 59 optimizer """adam""" +759 59 training_loop """lcwa""" +759 59 evaluator """rankbased""" +759 60 dataset """kinships""" +759 60 model """transd""" +759 60 loss """bceaftersigmoid""" +759 60 regularizer """no""" +759 60 optimizer """adam""" +759 60 training_loop """lcwa""" +759 60 evaluator """rankbased""" +759 61 dataset """kinships""" +759 61 model """transd""" +759 61 loss """bceaftersigmoid""" +759 61 regularizer """no""" +759 61 optimizer """adam""" +759 61 training_loop """lcwa""" +759 61 evaluator """rankbased""" +759 62 dataset """kinships""" +759 62 model """transd""" +759 62 loss """bceaftersigmoid""" +759 62 regularizer """no""" +759 62 optimizer """adam""" +759 62 training_loop """lcwa""" +759 62 evaluator """rankbased""" +759 63 dataset """kinships""" +759 63 model """transd""" +759 63 loss """bceaftersigmoid""" +759 63 regularizer """no""" +759 63 optimizer """adam""" +759 63 training_loop """lcwa""" +759 63 evaluator """rankbased""" +759 64 dataset """kinships""" +759 64 model """transd""" +759 64 loss """bceaftersigmoid""" +759 64 regularizer """no""" +759 64 optimizer """adam""" +759 64 training_loop """lcwa""" +759 64 evaluator """rankbased""" +759 65 dataset """kinships""" +759 65 model """transd""" +759 65 loss """bceaftersigmoid""" +759 65 regularizer """no""" +759 65 optimizer """adam""" +759 65 training_loop """lcwa""" +759 65 evaluator """rankbased""" +759 66 dataset """kinships""" +759 66 model """transd""" +759 66 loss """bceaftersigmoid""" +759 66 regularizer """no""" +759 66 optimizer """adam""" +759 66 training_loop """lcwa""" +759 66 evaluator """rankbased""" +759 67 dataset """kinships""" +759 67 model """transd""" +759 67 loss """bceaftersigmoid""" +759 67 regularizer """no""" +759 67 optimizer """adam""" +759 67 training_loop """lcwa""" +759 67 evaluator """rankbased""" +759 68 dataset """kinships""" +759 68 model """transd""" +759 68 loss """bceaftersigmoid""" +759 68 regularizer """no""" +759 68 optimizer """adam""" +759 68 training_loop """lcwa""" +759 68 evaluator """rankbased""" +759 69 dataset """kinships""" +759 69 model """transd""" +759 69 loss """bceaftersigmoid""" +759 69 regularizer """no""" +759 69 optimizer """adam""" +759 69 training_loop """lcwa""" +759 69 evaluator """rankbased""" +759 70 dataset """kinships""" +759 70 model """transd""" +759 70 loss """bceaftersigmoid""" +759 70 regularizer """no""" +759 70 optimizer """adam""" +759 70 training_loop """lcwa""" +759 70 evaluator """rankbased""" +759 71 dataset """kinships""" +759 71 model """transd""" +759 71 loss """bceaftersigmoid""" +759 71 regularizer """no""" +759 71 optimizer """adam""" +759 71 training_loop """lcwa""" +759 71 evaluator """rankbased""" +759 72 dataset """kinships""" +759 72 model """transd""" +759 72 loss """bceaftersigmoid""" +759 72 regularizer """no""" +759 72 optimizer """adam""" +759 72 training_loop """lcwa""" +759 72 evaluator """rankbased""" +759 73 dataset """kinships""" +759 73 model """transd""" +759 73 loss """bceaftersigmoid""" +759 73 regularizer """no""" +759 73 optimizer """adam""" +759 73 training_loop """lcwa""" +759 73 evaluator """rankbased""" +759 74 dataset """kinships""" +759 74 model """transd""" +759 74 loss """bceaftersigmoid""" +759 74 regularizer """no""" +759 74 optimizer """adam""" +759 74 training_loop """lcwa""" +759 74 evaluator """rankbased""" +759 75 dataset """kinships""" +759 75 model """transd""" +759 75 loss """bceaftersigmoid""" +759 75 regularizer """no""" +759 75 optimizer """adam""" +759 75 training_loop """lcwa""" +759 75 evaluator """rankbased""" +759 76 dataset """kinships""" +759 76 model """transd""" +759 76 loss """bceaftersigmoid""" +759 76 regularizer """no""" +759 76 optimizer """adam""" +759 76 training_loop """lcwa""" +759 76 evaluator """rankbased""" +759 77 dataset """kinships""" +759 77 model """transd""" +759 77 loss """bceaftersigmoid""" +759 77 regularizer """no""" +759 77 optimizer """adam""" +759 77 training_loop """lcwa""" +759 77 evaluator """rankbased""" +759 78 dataset """kinships""" +759 78 model """transd""" +759 78 loss """bceaftersigmoid""" +759 78 regularizer """no""" +759 78 optimizer """adam""" +759 78 training_loop """lcwa""" +759 78 evaluator """rankbased""" +759 79 dataset """kinships""" +759 79 model """transd""" +759 79 loss """bceaftersigmoid""" +759 79 regularizer """no""" +759 79 optimizer """adam""" +759 79 training_loop """lcwa""" +759 79 evaluator """rankbased""" +759 80 dataset """kinships""" +759 80 model """transd""" +759 80 loss """bceaftersigmoid""" +759 80 regularizer """no""" +759 80 optimizer """adam""" +759 80 training_loop """lcwa""" +759 80 evaluator """rankbased""" +759 81 dataset """kinships""" +759 81 model """transd""" +759 81 loss """bceaftersigmoid""" +759 81 regularizer """no""" +759 81 optimizer """adam""" +759 81 training_loop """lcwa""" +759 81 evaluator """rankbased""" +759 82 dataset """kinships""" +759 82 model """transd""" +759 82 loss """bceaftersigmoid""" +759 82 regularizer """no""" +759 82 optimizer """adam""" +759 82 training_loop """lcwa""" +759 82 evaluator """rankbased""" +759 83 dataset """kinships""" +759 83 model """transd""" +759 83 loss """bceaftersigmoid""" +759 83 regularizer """no""" +759 83 optimizer """adam""" +759 83 training_loop """lcwa""" +759 83 evaluator """rankbased""" +759 84 dataset """kinships""" +759 84 model """transd""" +759 84 loss """bceaftersigmoid""" +759 84 regularizer """no""" +759 84 optimizer """adam""" +759 84 training_loop """lcwa""" +759 84 evaluator """rankbased""" +759 85 dataset """kinships""" +759 85 model """transd""" +759 85 loss """bceaftersigmoid""" +759 85 regularizer """no""" +759 85 optimizer """adam""" +759 85 training_loop """lcwa""" +759 85 evaluator """rankbased""" +759 86 dataset """kinships""" +759 86 model """transd""" +759 86 loss """bceaftersigmoid""" +759 86 regularizer """no""" +759 86 optimizer """adam""" +759 86 training_loop """lcwa""" +759 86 evaluator """rankbased""" +759 87 dataset """kinships""" +759 87 model """transd""" +759 87 loss """bceaftersigmoid""" +759 87 regularizer """no""" +759 87 optimizer """adam""" +759 87 training_loop """lcwa""" +759 87 evaluator """rankbased""" +759 88 dataset """kinships""" +759 88 model """transd""" +759 88 loss """bceaftersigmoid""" +759 88 regularizer """no""" +759 88 optimizer """adam""" +759 88 training_loop """lcwa""" +759 88 evaluator """rankbased""" +759 89 dataset """kinships""" +759 89 model """transd""" +759 89 loss """bceaftersigmoid""" +759 89 regularizer """no""" +759 89 optimizer """adam""" +759 89 training_loop """lcwa""" +759 89 evaluator """rankbased""" +759 90 dataset """kinships""" +759 90 model """transd""" +759 90 loss """bceaftersigmoid""" +759 90 regularizer """no""" +759 90 optimizer """adam""" +759 90 training_loop """lcwa""" +759 90 evaluator """rankbased""" +759 91 dataset """kinships""" +759 91 model """transd""" +759 91 loss """bceaftersigmoid""" +759 91 regularizer """no""" +759 91 optimizer """adam""" +759 91 training_loop """lcwa""" +759 91 evaluator """rankbased""" +759 92 dataset """kinships""" +759 92 model """transd""" +759 92 loss """bceaftersigmoid""" +759 92 regularizer """no""" +759 92 optimizer """adam""" +759 92 training_loop """lcwa""" +759 92 evaluator """rankbased""" +759 93 dataset """kinships""" +759 93 model """transd""" +759 93 loss """bceaftersigmoid""" +759 93 regularizer """no""" +759 93 optimizer """adam""" +759 93 training_loop """lcwa""" +759 93 evaluator """rankbased""" +759 94 dataset """kinships""" +759 94 model """transd""" +759 94 loss """bceaftersigmoid""" +759 94 regularizer """no""" +759 94 optimizer """adam""" +759 94 training_loop """lcwa""" +759 94 evaluator """rankbased""" +759 95 dataset """kinships""" +759 95 model """transd""" +759 95 loss """bceaftersigmoid""" +759 95 regularizer """no""" +759 95 optimizer """adam""" +759 95 training_loop """lcwa""" +759 95 evaluator """rankbased""" +759 96 dataset """kinships""" +759 96 model """transd""" +759 96 loss """bceaftersigmoid""" +759 96 regularizer """no""" +759 96 optimizer """adam""" +759 96 training_loop """lcwa""" +759 96 evaluator """rankbased""" +759 97 dataset """kinships""" +759 97 model """transd""" +759 97 loss """bceaftersigmoid""" +759 97 regularizer """no""" +759 97 optimizer """adam""" +759 97 training_loop """lcwa""" +759 97 evaluator """rankbased""" +759 98 dataset """kinships""" +759 98 model """transd""" +759 98 loss """bceaftersigmoid""" +759 98 regularizer """no""" +759 98 optimizer """adam""" +759 98 training_loop """lcwa""" +759 98 evaluator """rankbased""" +759 99 dataset """kinships""" +759 99 model """transd""" +759 99 loss """bceaftersigmoid""" +759 99 regularizer """no""" +759 99 optimizer """adam""" +759 99 training_loop """lcwa""" +759 99 evaluator """rankbased""" +759 100 dataset """kinships""" +759 100 model """transd""" +759 100 loss """bceaftersigmoid""" +759 100 regularizer """no""" +759 100 optimizer """adam""" +759 100 training_loop """lcwa""" +759 100 evaluator """rankbased""" +760 1 model.embedding_dim 1.0 +760 1 model.relation_dim 1.0 +760 1 optimizer.lr 0.031109556373548754 +760 1 training.batch_size 2.0 +760 1 training.label_smoothing 0.2680501235354796 +760 2 model.embedding_dim 0.0 +760 2 model.relation_dim 1.0 +760 2 optimizer.lr 0.014276287784464412 +760 2 training.batch_size 0.0 +760 2 training.label_smoothing 0.004476610751869968 +760 3 model.embedding_dim 1.0 +760 3 model.relation_dim 1.0 +760 3 optimizer.lr 0.006173560859576614 +760 3 training.batch_size 0.0 +760 3 training.label_smoothing 0.0037854164749694247 +760 4 model.embedding_dim 1.0 +760 4 model.relation_dim 2.0 +760 4 optimizer.lr 0.013199540833905203 +760 4 training.batch_size 0.0 +760 4 training.label_smoothing 0.0022031730434892384 +760 5 model.embedding_dim 1.0 +760 5 model.relation_dim 2.0 +760 5 optimizer.lr 0.02534160457751597 +760 5 training.batch_size 1.0 +760 5 training.label_smoothing 0.42102292609098596 +760 6 model.embedding_dim 2.0 +760 6 model.relation_dim 1.0 +760 6 optimizer.lr 0.004743893309982769 +760 6 training.batch_size 0.0 +760 6 training.label_smoothing 0.05253626824931992 +760 7 model.embedding_dim 1.0 +760 7 model.relation_dim 2.0 +760 7 optimizer.lr 0.01197847431520585 +760 7 training.batch_size 2.0 +760 7 training.label_smoothing 0.0019449474051741764 +760 8 model.embedding_dim 1.0 +760 8 model.relation_dim 0.0 +760 8 optimizer.lr 0.09166182274856388 +760 8 training.batch_size 2.0 +760 8 training.label_smoothing 0.5483467017183666 +760 9 model.embedding_dim 0.0 +760 9 model.relation_dim 0.0 +760 9 optimizer.lr 0.09394237959036388 +760 9 training.batch_size 1.0 +760 9 training.label_smoothing 0.6158517923122305 +760 10 model.embedding_dim 1.0 +760 10 model.relation_dim 0.0 +760 10 optimizer.lr 0.014913600910208318 +760 10 training.batch_size 0.0 +760 10 training.label_smoothing 0.16587372233185232 +760 11 model.embedding_dim 2.0 +760 11 model.relation_dim 1.0 +760 11 optimizer.lr 0.022820116076306934 +760 11 training.batch_size 0.0 +760 11 training.label_smoothing 0.06745222599962546 +760 12 model.embedding_dim 0.0 +760 12 model.relation_dim 2.0 +760 12 optimizer.lr 0.01781279599956232 +760 12 training.batch_size 0.0 +760 12 training.label_smoothing 0.35321127022905924 +760 13 model.embedding_dim 2.0 +760 13 model.relation_dim 1.0 +760 13 optimizer.lr 0.06513339236393627 +760 13 training.batch_size 2.0 +760 13 training.label_smoothing 0.014847901962112723 +760 14 model.embedding_dim 2.0 +760 14 model.relation_dim 0.0 +760 14 optimizer.lr 0.0246767461611043 +760 14 training.batch_size 2.0 +760 14 training.label_smoothing 0.002115731287264398 +760 15 model.embedding_dim 2.0 +760 15 model.relation_dim 1.0 +760 15 optimizer.lr 0.0029548225539785915 +760 15 training.batch_size 0.0 +760 15 training.label_smoothing 0.004039044825210574 +760 16 model.embedding_dim 2.0 +760 16 model.relation_dim 0.0 +760 16 optimizer.lr 0.0015548827792656302 +760 16 training.batch_size 2.0 +760 16 training.label_smoothing 0.03930917045359432 +760 17 model.embedding_dim 2.0 +760 17 model.relation_dim 2.0 +760 17 optimizer.lr 0.002410265008357932 +760 17 training.batch_size 1.0 +760 17 training.label_smoothing 0.001561112817653107 +760 18 model.embedding_dim 1.0 +760 18 model.relation_dim 0.0 +760 18 optimizer.lr 0.02774995634886979 +760 18 training.batch_size 0.0 +760 18 training.label_smoothing 0.326945386323376 +760 19 model.embedding_dim 1.0 +760 19 model.relation_dim 0.0 +760 19 optimizer.lr 0.0430639373941629 +760 19 training.batch_size 0.0 +760 19 training.label_smoothing 0.8257726212700708 +760 20 model.embedding_dim 0.0 +760 20 model.relation_dim 1.0 +760 20 optimizer.lr 0.022682994241366407 +760 20 training.batch_size 0.0 +760 20 training.label_smoothing 0.009250220066007115 +760 21 model.embedding_dim 0.0 +760 21 model.relation_dim 2.0 +760 21 optimizer.lr 0.010723033216629901 +760 21 training.batch_size 2.0 +760 21 training.label_smoothing 0.05506220811407672 +760 22 model.embedding_dim 1.0 +760 22 model.relation_dim 2.0 +760 22 optimizer.lr 0.03172146627646359 +760 22 training.batch_size 2.0 +760 22 training.label_smoothing 0.009890471443284064 +760 23 model.embedding_dim 1.0 +760 23 model.relation_dim 0.0 +760 23 optimizer.lr 0.0019431062246265762 +760 23 training.batch_size 2.0 +760 23 training.label_smoothing 0.5154609344532677 +760 24 model.embedding_dim 0.0 +760 24 model.relation_dim 2.0 +760 24 optimizer.lr 0.04036953446284529 +760 24 training.batch_size 2.0 +760 24 training.label_smoothing 0.001150765481943693 +760 25 model.embedding_dim 1.0 +760 25 model.relation_dim 2.0 +760 25 optimizer.lr 0.07635472800365461 +760 25 training.batch_size 2.0 +760 25 training.label_smoothing 0.010420352191755038 +760 26 model.embedding_dim 1.0 +760 26 model.relation_dim 0.0 +760 26 optimizer.lr 0.007718362482630833 +760 26 training.batch_size 1.0 +760 26 training.label_smoothing 0.0014058702060450606 +760 27 model.embedding_dim 0.0 +760 27 model.relation_dim 0.0 +760 27 optimizer.lr 0.016598048971908558 +760 27 training.batch_size 2.0 +760 27 training.label_smoothing 0.22361311399288783 +760 28 model.embedding_dim 0.0 +760 28 model.relation_dim 2.0 +760 28 optimizer.lr 0.009223923966266048 +760 28 training.batch_size 0.0 +760 28 training.label_smoothing 0.0021490248652359157 +760 29 model.embedding_dim 0.0 +760 29 model.relation_dim 0.0 +760 29 optimizer.lr 0.004946451969338571 +760 29 training.batch_size 2.0 +760 29 training.label_smoothing 0.24954577308200068 +760 30 model.embedding_dim 1.0 +760 30 model.relation_dim 0.0 +760 30 optimizer.lr 0.010267776615567735 +760 30 training.batch_size 2.0 +760 30 training.label_smoothing 0.8097786714222004 +760 31 model.embedding_dim 2.0 +760 31 model.relation_dim 1.0 +760 31 optimizer.lr 0.002149716205640415 +760 31 training.batch_size 1.0 +760 31 training.label_smoothing 0.0026207225114199656 +760 32 model.embedding_dim 1.0 +760 32 model.relation_dim 0.0 +760 32 optimizer.lr 0.016465358137163506 +760 32 training.batch_size 1.0 +760 32 training.label_smoothing 0.08976533420337218 +760 33 model.embedding_dim 0.0 +760 33 model.relation_dim 1.0 +760 33 optimizer.lr 0.001694646345251417 +760 33 training.batch_size 1.0 +760 33 training.label_smoothing 0.5500462087210037 +760 34 model.embedding_dim 1.0 +760 34 model.relation_dim 0.0 +760 34 optimizer.lr 0.023367225725931088 +760 34 training.batch_size 0.0 +760 34 training.label_smoothing 0.03924862133275021 +760 35 model.embedding_dim 2.0 +760 35 model.relation_dim 0.0 +760 35 optimizer.lr 0.0012189489879835362 +760 35 training.batch_size 0.0 +760 35 training.label_smoothing 0.06504968681804149 +760 36 model.embedding_dim 1.0 +760 36 model.relation_dim 1.0 +760 36 optimizer.lr 0.002481671258273293 +760 36 training.batch_size 1.0 +760 36 training.label_smoothing 0.06167986129909765 +760 37 model.embedding_dim 0.0 +760 37 model.relation_dim 2.0 +760 37 optimizer.lr 0.040066737989465834 +760 37 training.batch_size 2.0 +760 37 training.label_smoothing 0.06101850632881266 +760 38 model.embedding_dim 1.0 +760 38 model.relation_dim 1.0 +760 38 optimizer.lr 0.0012940180439276387 +760 38 training.batch_size 1.0 +760 38 training.label_smoothing 0.018764913449745197 +760 39 model.embedding_dim 2.0 +760 39 model.relation_dim 1.0 +760 39 optimizer.lr 0.01638407320004348 +760 39 training.batch_size 2.0 +760 39 training.label_smoothing 0.05920305858932865 +760 40 model.embedding_dim 2.0 +760 40 model.relation_dim 1.0 +760 40 optimizer.lr 0.04844185518098443 +760 40 training.batch_size 1.0 +760 40 training.label_smoothing 0.008502106001400963 +760 41 model.embedding_dim 2.0 +760 41 model.relation_dim 1.0 +760 41 optimizer.lr 0.07521160268025534 +760 41 training.batch_size 2.0 +760 41 training.label_smoothing 0.0038806684040389667 +760 42 model.embedding_dim 2.0 +760 42 model.relation_dim 1.0 +760 42 optimizer.lr 0.009216419414896154 +760 42 training.batch_size 2.0 +760 42 training.label_smoothing 0.4214859034137634 +760 43 model.embedding_dim 2.0 +760 43 model.relation_dim 2.0 +760 43 optimizer.lr 0.0092027048459615 +760 43 training.batch_size 2.0 +760 43 training.label_smoothing 0.010916488837531126 +760 44 model.embedding_dim 0.0 +760 44 model.relation_dim 2.0 +760 44 optimizer.lr 0.08259778961532697 +760 44 training.batch_size 2.0 +760 44 training.label_smoothing 0.010717065241881072 +760 45 model.embedding_dim 0.0 +760 45 model.relation_dim 2.0 +760 45 optimizer.lr 0.05417763243643069 +760 45 training.batch_size 0.0 +760 45 training.label_smoothing 0.002924260571459962 +760 46 model.embedding_dim 1.0 +760 46 model.relation_dim 0.0 +760 46 optimizer.lr 0.003413030176522502 +760 46 training.batch_size 2.0 +760 46 training.label_smoothing 0.0012639116802779032 +760 47 model.embedding_dim 1.0 +760 47 model.relation_dim 2.0 +760 47 optimizer.lr 0.01054081283030058 +760 47 training.batch_size 0.0 +760 47 training.label_smoothing 0.2169410999922189 +760 48 model.embedding_dim 0.0 +760 48 model.relation_dim 0.0 +760 48 optimizer.lr 0.011092755722598295 +760 48 training.batch_size 1.0 +760 48 training.label_smoothing 0.006983318338828925 +760 49 model.embedding_dim 0.0 +760 49 model.relation_dim 0.0 +760 49 optimizer.lr 0.012515511052848543 +760 49 training.batch_size 0.0 +760 49 training.label_smoothing 0.006367447103449618 +760 50 model.embedding_dim 1.0 +760 50 model.relation_dim 2.0 +760 50 optimizer.lr 0.004574876901890787 +760 50 training.batch_size 1.0 +760 50 training.label_smoothing 0.17003427758473216 +760 51 model.embedding_dim 2.0 +760 51 model.relation_dim 0.0 +760 51 optimizer.lr 0.0724877764203281 +760 51 training.batch_size 0.0 +760 51 training.label_smoothing 0.005083012593364282 +760 52 model.embedding_dim 1.0 +760 52 model.relation_dim 1.0 +760 52 optimizer.lr 0.03570636327816492 +760 52 training.batch_size 2.0 +760 52 training.label_smoothing 0.5577208041562146 +760 53 model.embedding_dim 2.0 +760 53 model.relation_dim 0.0 +760 53 optimizer.lr 0.05566730012228277 +760 53 training.batch_size 1.0 +760 53 training.label_smoothing 0.0025055353439604406 +760 54 model.embedding_dim 2.0 +760 54 model.relation_dim 0.0 +760 54 optimizer.lr 0.002591157524398123 +760 54 training.batch_size 2.0 +760 54 training.label_smoothing 0.23618268485435456 +760 55 model.embedding_dim 1.0 +760 55 model.relation_dim 1.0 +760 55 optimizer.lr 0.018743286857248624 +760 55 training.batch_size 2.0 +760 55 training.label_smoothing 0.011743601233020406 +760 56 model.embedding_dim 1.0 +760 56 model.relation_dim 2.0 +760 56 optimizer.lr 0.05270901272681915 +760 56 training.batch_size 0.0 +760 56 training.label_smoothing 0.007523978989973691 +760 57 model.embedding_dim 0.0 +760 57 model.relation_dim 1.0 +760 57 optimizer.lr 0.02640707089088382 +760 57 training.batch_size 2.0 +760 57 training.label_smoothing 0.04953091424681848 +760 58 model.embedding_dim 0.0 +760 58 model.relation_dim 0.0 +760 58 optimizer.lr 0.002409863530226325 +760 58 training.batch_size 0.0 +760 58 training.label_smoothing 0.12986414693834142 +760 59 model.embedding_dim 1.0 +760 59 model.relation_dim 0.0 +760 59 optimizer.lr 0.001343461642205654 +760 59 training.batch_size 2.0 +760 59 training.label_smoothing 0.021348659616953012 +760 60 model.embedding_dim 2.0 +760 60 model.relation_dim 2.0 +760 60 optimizer.lr 0.018612065472413043 +760 60 training.batch_size 2.0 +760 60 training.label_smoothing 0.19272620159826748 +760 61 model.embedding_dim 2.0 +760 61 model.relation_dim 1.0 +760 61 optimizer.lr 0.01074407210460497 +760 61 training.batch_size 2.0 +760 61 training.label_smoothing 0.00148702673242437 +760 62 model.embedding_dim 0.0 +760 62 model.relation_dim 1.0 +760 62 optimizer.lr 0.0640479587674931 +760 62 training.batch_size 2.0 +760 62 training.label_smoothing 0.005138916385572254 +760 63 model.embedding_dim 0.0 +760 63 model.relation_dim 0.0 +760 63 optimizer.lr 0.0025346305925827723 +760 63 training.batch_size 0.0 +760 63 training.label_smoothing 0.04274146141097194 +760 64 model.embedding_dim 2.0 +760 64 model.relation_dim 0.0 +760 64 optimizer.lr 0.02328146550426144 +760 64 training.batch_size 2.0 +760 64 training.label_smoothing 0.01760284536778901 +760 65 model.embedding_dim 0.0 +760 65 model.relation_dim 2.0 +760 65 optimizer.lr 0.0012755457587381377 +760 65 training.batch_size 0.0 +760 65 training.label_smoothing 0.21100206110841496 +760 66 model.embedding_dim 0.0 +760 66 model.relation_dim 0.0 +760 66 optimizer.lr 0.0016683545697056194 +760 66 training.batch_size 0.0 +760 66 training.label_smoothing 0.4098986051664627 +760 67 model.embedding_dim 2.0 +760 67 model.relation_dim 2.0 +760 67 optimizer.lr 0.006515699703028927 +760 67 training.batch_size 0.0 +760 67 training.label_smoothing 0.009003031520757586 +760 68 model.embedding_dim 2.0 +760 68 model.relation_dim 2.0 +760 68 optimizer.lr 0.0012652712330553605 +760 68 training.batch_size 0.0 +760 68 training.label_smoothing 0.07313955038859922 +760 69 model.embedding_dim 2.0 +760 69 model.relation_dim 0.0 +760 69 optimizer.lr 0.025277491612822767 +760 69 training.batch_size 0.0 +760 69 training.label_smoothing 0.06971258180267279 +760 70 model.embedding_dim 1.0 +760 70 model.relation_dim 1.0 +760 70 optimizer.lr 0.07026462096591948 +760 70 training.batch_size 2.0 +760 70 training.label_smoothing 0.09857205386763239 +760 71 model.embedding_dim 2.0 +760 71 model.relation_dim 2.0 +760 71 optimizer.lr 0.0017006380458592276 +760 71 training.batch_size 1.0 +760 71 training.label_smoothing 0.17009397102469928 +760 72 model.embedding_dim 1.0 +760 72 model.relation_dim 0.0 +760 72 optimizer.lr 0.0049496918604964605 +760 72 training.batch_size 2.0 +760 72 training.label_smoothing 0.003009525358409297 +760 73 model.embedding_dim 0.0 +760 73 model.relation_dim 1.0 +760 73 optimizer.lr 0.0086647311211209 +760 73 training.batch_size 2.0 +760 73 training.label_smoothing 0.016937796431671735 +760 74 model.embedding_dim 0.0 +760 74 model.relation_dim 2.0 +760 74 optimizer.lr 0.0028216781621532437 +760 74 training.batch_size 0.0 +760 74 training.label_smoothing 0.6474039039438978 +760 75 model.embedding_dim 1.0 +760 75 model.relation_dim 0.0 +760 75 optimizer.lr 0.002601318904320069 +760 75 training.batch_size 1.0 +760 75 training.label_smoothing 0.0018580414743226922 +760 76 model.embedding_dim 1.0 +760 76 model.relation_dim 0.0 +760 76 optimizer.lr 0.0018769278312718053 +760 76 training.batch_size 2.0 +760 76 training.label_smoothing 0.0019128903891739454 +760 77 model.embedding_dim 0.0 +760 77 model.relation_dim 2.0 +760 77 optimizer.lr 0.009348978231895249 +760 77 training.batch_size 0.0 +760 77 training.label_smoothing 0.24840906541733918 +760 78 model.embedding_dim 1.0 +760 78 model.relation_dim 1.0 +760 78 optimizer.lr 0.006496311206238137 +760 78 training.batch_size 1.0 +760 78 training.label_smoothing 0.04831970683137394 +760 79 model.embedding_dim 0.0 +760 79 model.relation_dim 0.0 +760 79 optimizer.lr 0.0951432222570622 +760 79 training.batch_size 1.0 +760 79 training.label_smoothing 0.0022833452183977795 +760 80 model.embedding_dim 1.0 +760 80 model.relation_dim 1.0 +760 80 optimizer.lr 0.004588789568921117 +760 80 training.batch_size 2.0 +760 80 training.label_smoothing 0.10805515371025319 +760 81 model.embedding_dim 0.0 +760 81 model.relation_dim 1.0 +760 81 optimizer.lr 0.06550785807430312 +760 81 training.batch_size 2.0 +760 81 training.label_smoothing 0.005443764008957721 +760 82 model.embedding_dim 0.0 +760 82 model.relation_dim 2.0 +760 82 optimizer.lr 0.0033608673603267958 +760 82 training.batch_size 1.0 +760 82 training.label_smoothing 0.28627209919024743 +760 83 model.embedding_dim 1.0 +760 83 model.relation_dim 1.0 +760 83 optimizer.lr 0.0011365738792016807 +760 83 training.batch_size 0.0 +760 83 training.label_smoothing 0.0015415883098389945 +760 84 model.embedding_dim 1.0 +760 84 model.relation_dim 2.0 +760 84 optimizer.lr 0.09382689464876504 +760 84 training.batch_size 1.0 +760 84 training.label_smoothing 0.10096589166568666 +760 85 model.embedding_dim 2.0 +760 85 model.relation_dim 1.0 +760 85 optimizer.lr 0.01612040556912275 +760 85 training.batch_size 1.0 +760 85 training.label_smoothing 0.684394920555944 +760 86 model.embedding_dim 2.0 +760 86 model.relation_dim 2.0 +760 86 optimizer.lr 0.002333004020331572 +760 86 training.batch_size 0.0 +760 86 training.label_smoothing 0.04313186171525064 +760 87 model.embedding_dim 0.0 +760 87 model.relation_dim 1.0 +760 87 optimizer.lr 0.0038901833823604044 +760 87 training.batch_size 1.0 +760 87 training.label_smoothing 0.018436776246655847 +760 88 model.embedding_dim 0.0 +760 88 model.relation_dim 2.0 +760 88 optimizer.lr 0.0024075782978453595 +760 88 training.batch_size 1.0 +760 88 training.label_smoothing 0.008903865221691122 +760 89 model.embedding_dim 1.0 +760 89 model.relation_dim 1.0 +760 89 optimizer.lr 0.001234087858876408 +760 89 training.batch_size 0.0 +760 89 training.label_smoothing 0.047305718968761894 +760 90 model.embedding_dim 0.0 +760 90 model.relation_dim 1.0 +760 90 optimizer.lr 0.0440625723951609 +760 90 training.batch_size 0.0 +760 90 training.label_smoothing 0.038766018818198474 +760 91 model.embedding_dim 1.0 +760 91 model.relation_dim 1.0 +760 91 optimizer.lr 0.002798559300753038 +760 91 training.batch_size 2.0 +760 91 training.label_smoothing 0.048676180574395926 +760 92 model.embedding_dim 1.0 +760 92 model.relation_dim 1.0 +760 92 optimizer.lr 0.08700168886929076 +760 92 training.batch_size 2.0 +760 92 training.label_smoothing 0.0028543229863924415 +760 93 model.embedding_dim 2.0 +760 93 model.relation_dim 2.0 +760 93 optimizer.lr 0.005322973523853944 +760 93 training.batch_size 2.0 +760 93 training.label_smoothing 0.0115454502640974 +760 94 model.embedding_dim 1.0 +760 94 model.relation_dim 0.0 +760 94 optimizer.lr 0.006911082134446495 +760 94 training.batch_size 0.0 +760 94 training.label_smoothing 0.011737313862778237 +760 95 model.embedding_dim 1.0 +760 95 model.relation_dim 2.0 +760 95 optimizer.lr 0.025757264854095612 +760 95 training.batch_size 1.0 +760 95 training.label_smoothing 0.0021389887437771064 +760 96 model.embedding_dim 1.0 +760 96 model.relation_dim 0.0 +760 96 optimizer.lr 0.001898207059029613 +760 96 training.batch_size 0.0 +760 96 training.label_smoothing 0.0011161157933511697 +760 97 model.embedding_dim 0.0 +760 97 model.relation_dim 0.0 +760 97 optimizer.lr 0.0574924831718474 +760 97 training.batch_size 1.0 +760 97 training.label_smoothing 0.004789395610606127 +760 98 model.embedding_dim 1.0 +760 98 model.relation_dim 0.0 +760 98 optimizer.lr 0.016595398697093453 +760 98 training.batch_size 2.0 +760 98 training.label_smoothing 0.21838831704014322 +760 99 model.embedding_dim 0.0 +760 99 model.relation_dim 1.0 +760 99 optimizer.lr 0.06491210577223669 +760 99 training.batch_size 1.0 +760 99 training.label_smoothing 0.0017001976373268023 +760 100 model.embedding_dim 1.0 +760 100 model.relation_dim 2.0 +760 100 optimizer.lr 0.031157782019434616 +760 100 training.batch_size 0.0 +760 100 training.label_smoothing 0.11238891361468646 +760 1 dataset """kinships""" +760 1 model """transd""" +760 1 loss """softplus""" +760 1 regularizer """no""" +760 1 optimizer """adam""" +760 1 training_loop """lcwa""" +760 1 evaluator """rankbased""" +760 2 dataset """kinships""" +760 2 model """transd""" +760 2 loss """softplus""" +760 2 regularizer """no""" +760 2 optimizer """adam""" +760 2 training_loop """lcwa""" +760 2 evaluator """rankbased""" +760 3 dataset """kinships""" +760 3 model """transd""" +760 3 loss """softplus""" +760 3 regularizer """no""" +760 3 optimizer """adam""" +760 3 training_loop """lcwa""" +760 3 evaluator """rankbased""" +760 4 dataset """kinships""" +760 4 model """transd""" +760 4 loss """softplus""" +760 4 regularizer """no""" +760 4 optimizer """adam""" +760 4 training_loop """lcwa""" +760 4 evaluator """rankbased""" +760 5 dataset """kinships""" +760 5 model """transd""" +760 5 loss """softplus""" +760 5 regularizer """no""" +760 5 optimizer """adam""" +760 5 training_loop """lcwa""" +760 5 evaluator """rankbased""" +760 6 dataset """kinships""" +760 6 model """transd""" +760 6 loss """softplus""" +760 6 regularizer """no""" +760 6 optimizer """adam""" +760 6 training_loop """lcwa""" +760 6 evaluator """rankbased""" +760 7 dataset """kinships""" +760 7 model """transd""" +760 7 loss """softplus""" +760 7 regularizer """no""" +760 7 optimizer """adam""" +760 7 training_loop """lcwa""" +760 7 evaluator """rankbased""" +760 8 dataset """kinships""" +760 8 model """transd""" +760 8 loss """softplus""" +760 8 regularizer """no""" +760 8 optimizer """adam""" +760 8 training_loop """lcwa""" +760 8 evaluator """rankbased""" +760 9 dataset """kinships""" +760 9 model """transd""" +760 9 loss """softplus""" +760 9 regularizer """no""" +760 9 optimizer """adam""" +760 9 training_loop """lcwa""" +760 9 evaluator """rankbased""" +760 10 dataset """kinships""" +760 10 model """transd""" +760 10 loss """softplus""" +760 10 regularizer """no""" +760 10 optimizer """adam""" +760 10 training_loop """lcwa""" +760 10 evaluator """rankbased""" +760 11 dataset """kinships""" +760 11 model """transd""" +760 11 loss """softplus""" +760 11 regularizer """no""" +760 11 optimizer """adam""" +760 11 training_loop """lcwa""" +760 11 evaluator """rankbased""" +760 12 dataset """kinships""" +760 12 model """transd""" +760 12 loss """softplus""" +760 12 regularizer """no""" +760 12 optimizer """adam""" +760 12 training_loop """lcwa""" +760 12 evaluator """rankbased""" +760 13 dataset """kinships""" +760 13 model """transd""" +760 13 loss """softplus""" +760 13 regularizer """no""" +760 13 optimizer """adam""" +760 13 training_loop """lcwa""" +760 13 evaluator """rankbased""" +760 14 dataset """kinships""" +760 14 model """transd""" +760 14 loss """softplus""" +760 14 regularizer """no""" +760 14 optimizer """adam""" +760 14 training_loop """lcwa""" +760 14 evaluator """rankbased""" +760 15 dataset """kinships""" +760 15 model """transd""" +760 15 loss """softplus""" +760 15 regularizer """no""" +760 15 optimizer """adam""" +760 15 training_loop """lcwa""" +760 15 evaluator """rankbased""" +760 16 dataset """kinships""" +760 16 model """transd""" +760 16 loss """softplus""" +760 16 regularizer """no""" +760 16 optimizer """adam""" +760 16 training_loop """lcwa""" +760 16 evaluator """rankbased""" +760 17 dataset """kinships""" +760 17 model """transd""" +760 17 loss """softplus""" +760 17 regularizer """no""" +760 17 optimizer """adam""" +760 17 training_loop """lcwa""" +760 17 evaluator """rankbased""" +760 18 dataset """kinships""" +760 18 model """transd""" +760 18 loss """softplus""" +760 18 regularizer """no""" +760 18 optimizer """adam""" +760 18 training_loop """lcwa""" +760 18 evaluator """rankbased""" +760 19 dataset """kinships""" +760 19 model """transd""" +760 19 loss """softplus""" +760 19 regularizer """no""" +760 19 optimizer """adam""" +760 19 training_loop """lcwa""" +760 19 evaluator """rankbased""" +760 20 dataset """kinships""" +760 20 model """transd""" +760 20 loss """softplus""" +760 20 regularizer """no""" +760 20 optimizer """adam""" +760 20 training_loop """lcwa""" +760 20 evaluator """rankbased""" +760 21 dataset """kinships""" +760 21 model """transd""" +760 21 loss """softplus""" +760 21 regularizer """no""" +760 21 optimizer """adam""" +760 21 training_loop """lcwa""" +760 21 evaluator """rankbased""" +760 22 dataset """kinships""" +760 22 model """transd""" +760 22 loss """softplus""" +760 22 regularizer """no""" +760 22 optimizer """adam""" +760 22 training_loop """lcwa""" +760 22 evaluator """rankbased""" +760 23 dataset """kinships""" +760 23 model """transd""" +760 23 loss """softplus""" +760 23 regularizer """no""" +760 23 optimizer """adam""" +760 23 training_loop """lcwa""" +760 23 evaluator """rankbased""" +760 24 dataset """kinships""" +760 24 model """transd""" +760 24 loss """softplus""" +760 24 regularizer """no""" +760 24 optimizer """adam""" +760 24 training_loop """lcwa""" +760 24 evaluator """rankbased""" +760 25 dataset """kinships""" +760 25 model """transd""" +760 25 loss """softplus""" +760 25 regularizer """no""" +760 25 optimizer """adam""" +760 25 training_loop """lcwa""" +760 25 evaluator """rankbased""" +760 26 dataset """kinships""" +760 26 model """transd""" +760 26 loss """softplus""" +760 26 regularizer """no""" +760 26 optimizer """adam""" +760 26 training_loop """lcwa""" +760 26 evaluator """rankbased""" +760 27 dataset """kinships""" +760 27 model """transd""" +760 27 loss """softplus""" +760 27 regularizer """no""" +760 27 optimizer """adam""" +760 27 training_loop """lcwa""" +760 27 evaluator """rankbased""" +760 28 dataset """kinships""" +760 28 model """transd""" +760 28 loss """softplus""" +760 28 regularizer """no""" +760 28 optimizer """adam""" +760 28 training_loop """lcwa""" +760 28 evaluator """rankbased""" +760 29 dataset """kinships""" +760 29 model """transd""" +760 29 loss """softplus""" +760 29 regularizer """no""" +760 29 optimizer """adam""" +760 29 training_loop """lcwa""" +760 29 evaluator """rankbased""" +760 30 dataset """kinships""" +760 30 model """transd""" +760 30 loss """softplus""" +760 30 regularizer """no""" +760 30 optimizer """adam""" +760 30 training_loop """lcwa""" +760 30 evaluator """rankbased""" +760 31 dataset """kinships""" +760 31 model """transd""" +760 31 loss """softplus""" +760 31 regularizer """no""" +760 31 optimizer """adam""" +760 31 training_loop """lcwa""" +760 31 evaluator """rankbased""" +760 32 dataset """kinships""" +760 32 model """transd""" +760 32 loss """softplus""" +760 32 regularizer """no""" +760 32 optimizer """adam""" +760 32 training_loop """lcwa""" +760 32 evaluator """rankbased""" +760 33 dataset """kinships""" +760 33 model """transd""" +760 33 loss """softplus""" +760 33 regularizer """no""" +760 33 optimizer """adam""" +760 33 training_loop """lcwa""" +760 33 evaluator """rankbased""" +760 34 dataset """kinships""" +760 34 model """transd""" +760 34 loss """softplus""" +760 34 regularizer """no""" +760 34 optimizer """adam""" +760 34 training_loop """lcwa""" +760 34 evaluator """rankbased""" +760 35 dataset """kinships""" +760 35 model """transd""" +760 35 loss """softplus""" +760 35 regularizer """no""" +760 35 optimizer """adam""" +760 35 training_loop """lcwa""" +760 35 evaluator """rankbased""" +760 36 dataset """kinships""" +760 36 model """transd""" +760 36 loss """softplus""" +760 36 regularizer """no""" +760 36 optimizer """adam""" +760 36 training_loop """lcwa""" +760 36 evaluator """rankbased""" +760 37 dataset """kinships""" +760 37 model """transd""" +760 37 loss """softplus""" +760 37 regularizer """no""" +760 37 optimizer """adam""" +760 37 training_loop """lcwa""" +760 37 evaluator """rankbased""" +760 38 dataset """kinships""" +760 38 model """transd""" +760 38 loss """softplus""" +760 38 regularizer """no""" +760 38 optimizer """adam""" +760 38 training_loop """lcwa""" +760 38 evaluator """rankbased""" +760 39 dataset """kinships""" +760 39 model """transd""" +760 39 loss """softplus""" +760 39 regularizer """no""" +760 39 optimizer """adam""" +760 39 training_loop """lcwa""" +760 39 evaluator """rankbased""" +760 40 dataset """kinships""" +760 40 model """transd""" +760 40 loss """softplus""" +760 40 regularizer """no""" +760 40 optimizer """adam""" +760 40 training_loop """lcwa""" +760 40 evaluator """rankbased""" +760 41 dataset """kinships""" +760 41 model """transd""" +760 41 loss """softplus""" +760 41 regularizer """no""" +760 41 optimizer """adam""" +760 41 training_loop """lcwa""" +760 41 evaluator """rankbased""" +760 42 dataset """kinships""" +760 42 model """transd""" +760 42 loss """softplus""" +760 42 regularizer """no""" +760 42 optimizer """adam""" +760 42 training_loop """lcwa""" +760 42 evaluator """rankbased""" +760 43 dataset """kinships""" +760 43 model """transd""" +760 43 loss """softplus""" +760 43 regularizer """no""" +760 43 optimizer """adam""" +760 43 training_loop """lcwa""" +760 43 evaluator """rankbased""" +760 44 dataset """kinships""" +760 44 model """transd""" +760 44 loss """softplus""" +760 44 regularizer """no""" +760 44 optimizer """adam""" +760 44 training_loop """lcwa""" +760 44 evaluator """rankbased""" +760 45 dataset """kinships""" +760 45 model """transd""" +760 45 loss """softplus""" +760 45 regularizer """no""" +760 45 optimizer """adam""" +760 45 training_loop """lcwa""" +760 45 evaluator """rankbased""" +760 46 dataset """kinships""" +760 46 model """transd""" +760 46 loss """softplus""" +760 46 regularizer """no""" +760 46 optimizer """adam""" +760 46 training_loop """lcwa""" +760 46 evaluator """rankbased""" +760 47 dataset """kinships""" +760 47 model """transd""" +760 47 loss """softplus""" +760 47 regularizer """no""" +760 47 optimizer """adam""" +760 47 training_loop """lcwa""" +760 47 evaluator """rankbased""" +760 48 dataset """kinships""" +760 48 model """transd""" +760 48 loss """softplus""" +760 48 regularizer """no""" +760 48 optimizer """adam""" +760 48 training_loop """lcwa""" +760 48 evaluator """rankbased""" +760 49 dataset """kinships""" +760 49 model """transd""" +760 49 loss """softplus""" +760 49 regularizer """no""" +760 49 optimizer """adam""" +760 49 training_loop """lcwa""" +760 49 evaluator """rankbased""" +760 50 dataset """kinships""" +760 50 model """transd""" +760 50 loss """softplus""" +760 50 regularizer """no""" +760 50 optimizer """adam""" +760 50 training_loop """lcwa""" +760 50 evaluator """rankbased""" +760 51 dataset """kinships""" +760 51 model """transd""" +760 51 loss """softplus""" +760 51 regularizer """no""" +760 51 optimizer """adam""" +760 51 training_loop """lcwa""" +760 51 evaluator """rankbased""" +760 52 dataset """kinships""" +760 52 model """transd""" +760 52 loss """softplus""" +760 52 regularizer """no""" +760 52 optimizer """adam""" +760 52 training_loop """lcwa""" +760 52 evaluator """rankbased""" +760 53 dataset """kinships""" +760 53 model """transd""" +760 53 loss """softplus""" +760 53 regularizer """no""" +760 53 optimizer """adam""" +760 53 training_loop """lcwa""" +760 53 evaluator """rankbased""" +760 54 dataset """kinships""" +760 54 model """transd""" +760 54 loss """softplus""" +760 54 regularizer """no""" +760 54 optimizer """adam""" +760 54 training_loop """lcwa""" +760 54 evaluator """rankbased""" +760 55 dataset """kinships""" +760 55 model """transd""" +760 55 loss """softplus""" +760 55 regularizer """no""" +760 55 optimizer """adam""" +760 55 training_loop """lcwa""" +760 55 evaluator """rankbased""" +760 56 dataset """kinships""" +760 56 model """transd""" +760 56 loss """softplus""" +760 56 regularizer """no""" +760 56 optimizer """adam""" +760 56 training_loop """lcwa""" +760 56 evaluator """rankbased""" +760 57 dataset """kinships""" +760 57 model """transd""" +760 57 loss """softplus""" +760 57 regularizer """no""" +760 57 optimizer """adam""" +760 57 training_loop """lcwa""" +760 57 evaluator """rankbased""" +760 58 dataset """kinships""" +760 58 model """transd""" +760 58 loss """softplus""" +760 58 regularizer """no""" +760 58 optimizer """adam""" +760 58 training_loop """lcwa""" +760 58 evaluator """rankbased""" +760 59 dataset """kinships""" +760 59 model """transd""" +760 59 loss """softplus""" +760 59 regularizer """no""" +760 59 optimizer """adam""" +760 59 training_loop """lcwa""" +760 59 evaluator """rankbased""" +760 60 dataset """kinships""" +760 60 model """transd""" +760 60 loss """softplus""" +760 60 regularizer """no""" +760 60 optimizer """adam""" +760 60 training_loop """lcwa""" +760 60 evaluator """rankbased""" +760 61 dataset """kinships""" +760 61 model """transd""" +760 61 loss """softplus""" +760 61 regularizer """no""" +760 61 optimizer """adam""" +760 61 training_loop """lcwa""" +760 61 evaluator """rankbased""" +760 62 dataset """kinships""" +760 62 model """transd""" +760 62 loss """softplus""" +760 62 regularizer """no""" +760 62 optimizer """adam""" +760 62 training_loop """lcwa""" +760 62 evaluator """rankbased""" +760 63 dataset """kinships""" +760 63 model """transd""" +760 63 loss """softplus""" +760 63 regularizer """no""" +760 63 optimizer """adam""" +760 63 training_loop """lcwa""" +760 63 evaluator """rankbased""" +760 64 dataset """kinships""" +760 64 model """transd""" +760 64 loss """softplus""" +760 64 regularizer """no""" +760 64 optimizer """adam""" +760 64 training_loop """lcwa""" +760 64 evaluator """rankbased""" +760 65 dataset """kinships""" +760 65 model """transd""" +760 65 loss """softplus""" +760 65 regularizer """no""" +760 65 optimizer """adam""" +760 65 training_loop """lcwa""" +760 65 evaluator """rankbased""" +760 66 dataset """kinships""" +760 66 model """transd""" +760 66 loss """softplus""" +760 66 regularizer """no""" +760 66 optimizer """adam""" +760 66 training_loop """lcwa""" +760 66 evaluator """rankbased""" +760 67 dataset """kinships""" +760 67 model """transd""" +760 67 loss """softplus""" +760 67 regularizer """no""" +760 67 optimizer """adam""" +760 67 training_loop """lcwa""" +760 67 evaluator """rankbased""" +760 68 dataset """kinships""" +760 68 model """transd""" +760 68 loss """softplus""" +760 68 regularizer """no""" +760 68 optimizer """adam""" +760 68 training_loop """lcwa""" +760 68 evaluator """rankbased""" +760 69 dataset """kinships""" +760 69 model """transd""" +760 69 loss """softplus""" +760 69 regularizer """no""" +760 69 optimizer """adam""" +760 69 training_loop """lcwa""" +760 69 evaluator """rankbased""" +760 70 dataset """kinships""" +760 70 model """transd""" +760 70 loss """softplus""" +760 70 regularizer """no""" +760 70 optimizer """adam""" +760 70 training_loop """lcwa""" +760 70 evaluator """rankbased""" +760 71 dataset """kinships""" +760 71 model """transd""" +760 71 loss """softplus""" +760 71 regularizer """no""" +760 71 optimizer """adam""" +760 71 training_loop """lcwa""" +760 71 evaluator """rankbased""" +760 72 dataset """kinships""" +760 72 model """transd""" +760 72 loss """softplus""" +760 72 regularizer """no""" +760 72 optimizer """adam""" +760 72 training_loop """lcwa""" +760 72 evaluator """rankbased""" +760 73 dataset """kinships""" +760 73 model """transd""" +760 73 loss """softplus""" +760 73 regularizer """no""" +760 73 optimizer """adam""" +760 73 training_loop """lcwa""" +760 73 evaluator """rankbased""" +760 74 dataset """kinships""" +760 74 model """transd""" +760 74 loss """softplus""" +760 74 regularizer """no""" +760 74 optimizer """adam""" +760 74 training_loop """lcwa""" +760 74 evaluator """rankbased""" +760 75 dataset """kinships""" +760 75 model """transd""" +760 75 loss """softplus""" +760 75 regularizer """no""" +760 75 optimizer """adam""" +760 75 training_loop """lcwa""" +760 75 evaluator """rankbased""" +760 76 dataset """kinships""" +760 76 model """transd""" +760 76 loss """softplus""" +760 76 regularizer """no""" +760 76 optimizer """adam""" +760 76 training_loop """lcwa""" +760 76 evaluator """rankbased""" +760 77 dataset """kinships""" +760 77 model """transd""" +760 77 loss """softplus""" +760 77 regularizer """no""" +760 77 optimizer """adam""" +760 77 training_loop """lcwa""" +760 77 evaluator """rankbased""" +760 78 dataset """kinships""" +760 78 model """transd""" +760 78 loss """softplus""" +760 78 regularizer """no""" +760 78 optimizer """adam""" +760 78 training_loop """lcwa""" +760 78 evaluator """rankbased""" +760 79 dataset """kinships""" +760 79 model """transd""" +760 79 loss """softplus""" +760 79 regularizer """no""" +760 79 optimizer """adam""" +760 79 training_loop """lcwa""" +760 79 evaluator """rankbased""" +760 80 dataset """kinships""" +760 80 model """transd""" +760 80 loss """softplus""" +760 80 regularizer """no""" +760 80 optimizer """adam""" +760 80 training_loop """lcwa""" +760 80 evaluator """rankbased""" +760 81 dataset """kinships""" +760 81 model """transd""" +760 81 loss """softplus""" +760 81 regularizer """no""" +760 81 optimizer """adam""" +760 81 training_loop """lcwa""" +760 81 evaluator """rankbased""" +760 82 dataset """kinships""" +760 82 model """transd""" +760 82 loss """softplus""" +760 82 regularizer """no""" +760 82 optimizer """adam""" +760 82 training_loop """lcwa""" +760 82 evaluator """rankbased""" +760 83 dataset """kinships""" +760 83 model """transd""" +760 83 loss """softplus""" +760 83 regularizer """no""" +760 83 optimizer """adam""" +760 83 training_loop """lcwa""" +760 83 evaluator """rankbased""" +760 84 dataset """kinships""" +760 84 model """transd""" +760 84 loss """softplus""" +760 84 regularizer """no""" +760 84 optimizer """adam""" +760 84 training_loop """lcwa""" +760 84 evaluator """rankbased""" +760 85 dataset """kinships""" +760 85 model """transd""" +760 85 loss """softplus""" +760 85 regularizer """no""" +760 85 optimizer """adam""" +760 85 training_loop """lcwa""" +760 85 evaluator """rankbased""" +760 86 dataset """kinships""" +760 86 model """transd""" +760 86 loss """softplus""" +760 86 regularizer """no""" +760 86 optimizer """adam""" +760 86 training_loop """lcwa""" +760 86 evaluator """rankbased""" +760 87 dataset """kinships""" +760 87 model """transd""" +760 87 loss """softplus""" +760 87 regularizer """no""" +760 87 optimizer """adam""" +760 87 training_loop """lcwa""" +760 87 evaluator """rankbased""" +760 88 dataset """kinships""" +760 88 model """transd""" +760 88 loss """softplus""" +760 88 regularizer """no""" +760 88 optimizer """adam""" +760 88 training_loop """lcwa""" +760 88 evaluator """rankbased""" +760 89 dataset """kinships""" +760 89 model """transd""" +760 89 loss """softplus""" +760 89 regularizer """no""" +760 89 optimizer """adam""" +760 89 training_loop """lcwa""" +760 89 evaluator """rankbased""" +760 90 dataset """kinships""" +760 90 model """transd""" +760 90 loss """softplus""" +760 90 regularizer """no""" +760 90 optimizer """adam""" +760 90 training_loop """lcwa""" +760 90 evaluator """rankbased""" +760 91 dataset """kinships""" +760 91 model """transd""" +760 91 loss """softplus""" +760 91 regularizer """no""" +760 91 optimizer """adam""" +760 91 training_loop """lcwa""" +760 91 evaluator """rankbased""" +760 92 dataset """kinships""" +760 92 model """transd""" +760 92 loss """softplus""" +760 92 regularizer """no""" +760 92 optimizer """adam""" +760 92 training_loop """lcwa""" +760 92 evaluator """rankbased""" +760 93 dataset """kinships""" +760 93 model """transd""" +760 93 loss """softplus""" +760 93 regularizer """no""" +760 93 optimizer """adam""" +760 93 training_loop """lcwa""" +760 93 evaluator """rankbased""" +760 94 dataset """kinships""" +760 94 model """transd""" +760 94 loss """softplus""" +760 94 regularizer """no""" +760 94 optimizer """adam""" +760 94 training_loop """lcwa""" +760 94 evaluator """rankbased""" +760 95 dataset """kinships""" +760 95 model """transd""" +760 95 loss """softplus""" +760 95 regularizer """no""" +760 95 optimizer """adam""" +760 95 training_loop """lcwa""" +760 95 evaluator """rankbased""" +760 96 dataset """kinships""" +760 96 model """transd""" +760 96 loss """softplus""" +760 96 regularizer """no""" +760 96 optimizer """adam""" +760 96 training_loop """lcwa""" +760 96 evaluator """rankbased""" +760 97 dataset """kinships""" +760 97 model """transd""" +760 97 loss """softplus""" +760 97 regularizer """no""" +760 97 optimizer """adam""" +760 97 training_loop """lcwa""" +760 97 evaluator """rankbased""" +760 98 dataset """kinships""" +760 98 model """transd""" +760 98 loss """softplus""" +760 98 regularizer """no""" +760 98 optimizer """adam""" +760 98 training_loop """lcwa""" +760 98 evaluator """rankbased""" +760 99 dataset """kinships""" +760 99 model """transd""" +760 99 loss """softplus""" +760 99 regularizer """no""" +760 99 optimizer """adam""" +760 99 training_loop """lcwa""" +760 99 evaluator """rankbased""" +760 100 dataset """kinships""" +760 100 model """transd""" +760 100 loss """softplus""" +760 100 regularizer """no""" +760 100 optimizer """adam""" +760 100 training_loop """lcwa""" +760 100 evaluator """rankbased""" +761 1 model.embedding_dim 2.0 +761 1 model.relation_dim 1.0 +761 1 optimizer.lr 0.008869799851605756 +761 1 negative_sampler.num_negs_per_pos 88.0 +761 1 training.batch_size 2.0 +761 2 model.embedding_dim 0.0 +761 2 model.relation_dim 1.0 +761 2 optimizer.lr 0.08480988183259315 +761 2 negative_sampler.num_negs_per_pos 8.0 +761 2 training.batch_size 1.0 +761 3 model.embedding_dim 2.0 +761 3 model.relation_dim 0.0 +761 3 optimizer.lr 0.001939184602981017 +761 3 negative_sampler.num_negs_per_pos 89.0 +761 3 training.batch_size 1.0 +761 4 model.embedding_dim 2.0 +761 4 model.relation_dim 2.0 +761 4 optimizer.lr 0.003979300740661503 +761 4 negative_sampler.num_negs_per_pos 26.0 +761 4 training.batch_size 1.0 +761 5 model.embedding_dim 0.0 +761 5 model.relation_dim 1.0 +761 5 optimizer.lr 0.0023394746242471486 +761 5 negative_sampler.num_negs_per_pos 10.0 +761 5 training.batch_size 0.0 +761 6 model.embedding_dim 1.0 +761 6 model.relation_dim 1.0 +761 6 optimizer.lr 0.01353664901160462 +761 6 negative_sampler.num_negs_per_pos 66.0 +761 6 training.batch_size 0.0 +761 7 model.embedding_dim 1.0 +761 7 model.relation_dim 1.0 +761 7 optimizer.lr 0.0017577047658695708 +761 7 negative_sampler.num_negs_per_pos 37.0 +761 7 training.batch_size 0.0 +761 8 model.embedding_dim 2.0 +761 8 model.relation_dim 1.0 +761 8 optimizer.lr 0.006171320566030233 +761 8 negative_sampler.num_negs_per_pos 16.0 +761 8 training.batch_size 1.0 +761 9 model.embedding_dim 2.0 +761 9 model.relation_dim 0.0 +761 9 optimizer.lr 0.003534068868709956 +761 9 negative_sampler.num_negs_per_pos 89.0 +761 9 training.batch_size 1.0 +761 10 model.embedding_dim 2.0 +761 10 model.relation_dim 0.0 +761 10 optimizer.lr 0.004078987771102832 +761 10 negative_sampler.num_negs_per_pos 26.0 +761 10 training.batch_size 2.0 +761 11 model.embedding_dim 2.0 +761 11 model.relation_dim 1.0 +761 11 optimizer.lr 0.031228548341777903 +761 11 negative_sampler.num_negs_per_pos 57.0 +761 11 training.batch_size 0.0 +761 12 model.embedding_dim 2.0 +761 12 model.relation_dim 2.0 +761 12 optimizer.lr 0.033740767794715326 +761 12 negative_sampler.num_negs_per_pos 76.0 +761 12 training.batch_size 1.0 +761 13 model.embedding_dim 1.0 +761 13 model.relation_dim 0.0 +761 13 optimizer.lr 0.03709317401096869 +761 13 negative_sampler.num_negs_per_pos 79.0 +761 13 training.batch_size 0.0 +761 14 model.embedding_dim 2.0 +761 14 model.relation_dim 2.0 +761 14 optimizer.lr 0.023778496610333107 +761 14 negative_sampler.num_negs_per_pos 74.0 +761 14 training.batch_size 0.0 +761 15 model.embedding_dim 1.0 +761 15 model.relation_dim 2.0 +761 15 optimizer.lr 0.0028332349869100502 +761 15 negative_sampler.num_negs_per_pos 50.0 +761 15 training.batch_size 1.0 +761 16 model.embedding_dim 2.0 +761 16 model.relation_dim 2.0 +761 16 optimizer.lr 0.001967961026887558 +761 16 negative_sampler.num_negs_per_pos 21.0 +761 16 training.batch_size 2.0 +761 17 model.embedding_dim 1.0 +761 17 model.relation_dim 0.0 +761 17 optimizer.lr 0.013791303913376329 +761 17 negative_sampler.num_negs_per_pos 77.0 +761 17 training.batch_size 1.0 +761 18 model.embedding_dim 0.0 +761 18 model.relation_dim 2.0 +761 18 optimizer.lr 0.0071172216304428906 +761 18 negative_sampler.num_negs_per_pos 1.0 +761 18 training.batch_size 1.0 +761 19 model.embedding_dim 1.0 +761 19 model.relation_dim 0.0 +761 19 optimizer.lr 0.0067559891815780326 +761 19 negative_sampler.num_negs_per_pos 51.0 +761 19 training.batch_size 2.0 +761 20 model.embedding_dim 2.0 +761 20 model.relation_dim 0.0 +761 20 optimizer.lr 0.04577273916852265 +761 20 negative_sampler.num_negs_per_pos 47.0 +761 20 training.batch_size 1.0 +761 21 model.embedding_dim 0.0 +761 21 model.relation_dim 2.0 +761 21 optimizer.lr 0.0028178035658772264 +761 21 negative_sampler.num_negs_per_pos 66.0 +761 21 training.batch_size 0.0 +761 22 model.embedding_dim 1.0 +761 22 model.relation_dim 0.0 +761 22 optimizer.lr 0.006350427484747762 +761 22 negative_sampler.num_negs_per_pos 93.0 +761 22 training.batch_size 2.0 +761 23 model.embedding_dim 1.0 +761 23 model.relation_dim 0.0 +761 23 optimizer.lr 0.052265018377580894 +761 23 negative_sampler.num_negs_per_pos 58.0 +761 23 training.batch_size 1.0 +761 24 model.embedding_dim 0.0 +761 24 model.relation_dim 2.0 +761 24 optimizer.lr 0.07870082476574078 +761 24 negative_sampler.num_negs_per_pos 37.0 +761 24 training.batch_size 0.0 +761 25 model.embedding_dim 1.0 +761 25 model.relation_dim 1.0 +761 25 optimizer.lr 0.042784520798738804 +761 25 negative_sampler.num_negs_per_pos 63.0 +761 25 training.batch_size 0.0 +761 26 model.embedding_dim 1.0 +761 26 model.relation_dim 1.0 +761 26 optimizer.lr 0.01501873724727216 +761 26 negative_sampler.num_negs_per_pos 22.0 +761 26 training.batch_size 2.0 +761 27 model.embedding_dim 2.0 +761 27 model.relation_dim 0.0 +761 27 optimizer.lr 0.07352257168857639 +761 27 negative_sampler.num_negs_per_pos 17.0 +761 27 training.batch_size 1.0 +761 28 model.embedding_dim 2.0 +761 28 model.relation_dim 1.0 +761 28 optimizer.lr 0.03962659859551015 +761 28 negative_sampler.num_negs_per_pos 74.0 +761 28 training.batch_size 2.0 +761 29 model.embedding_dim 1.0 +761 29 model.relation_dim 2.0 +761 29 optimizer.lr 0.016337327243919092 +761 29 negative_sampler.num_negs_per_pos 70.0 +761 29 training.batch_size 0.0 +761 30 model.embedding_dim 1.0 +761 30 model.relation_dim 1.0 +761 30 optimizer.lr 0.011294507015587282 +761 30 negative_sampler.num_negs_per_pos 32.0 +761 30 training.batch_size 0.0 +761 31 model.embedding_dim 2.0 +761 31 model.relation_dim 0.0 +761 31 optimizer.lr 0.07563930658949537 +761 31 negative_sampler.num_negs_per_pos 77.0 +761 31 training.batch_size 0.0 +761 32 model.embedding_dim 1.0 +761 32 model.relation_dim 2.0 +761 32 optimizer.lr 0.00546878049704477 +761 32 negative_sampler.num_negs_per_pos 83.0 +761 32 training.batch_size 2.0 +761 33 model.embedding_dim 0.0 +761 33 model.relation_dim 0.0 +761 33 optimizer.lr 0.009083155766254984 +761 33 negative_sampler.num_negs_per_pos 54.0 +761 33 training.batch_size 1.0 +761 34 model.embedding_dim 1.0 +761 34 model.relation_dim 0.0 +761 34 optimizer.lr 0.0011324812458811252 +761 34 negative_sampler.num_negs_per_pos 1.0 +761 34 training.batch_size 0.0 +761 35 model.embedding_dim 2.0 +761 35 model.relation_dim 0.0 +761 35 optimizer.lr 0.012212825744728349 +761 35 negative_sampler.num_negs_per_pos 95.0 +761 35 training.batch_size 2.0 +761 36 model.embedding_dim 2.0 +761 36 model.relation_dim 0.0 +761 36 optimizer.lr 0.018639630489428325 +761 36 negative_sampler.num_negs_per_pos 50.0 +761 36 training.batch_size 2.0 +761 37 model.embedding_dim 0.0 +761 37 model.relation_dim 1.0 +761 37 optimizer.lr 0.0016985331774877815 +761 37 negative_sampler.num_negs_per_pos 39.0 +761 37 training.batch_size 1.0 +761 38 model.embedding_dim 0.0 +761 38 model.relation_dim 1.0 +761 38 optimizer.lr 0.004763179785854429 +761 38 negative_sampler.num_negs_per_pos 68.0 +761 38 training.batch_size 1.0 +761 39 model.embedding_dim 2.0 +761 39 model.relation_dim 2.0 +761 39 optimizer.lr 0.002727870887514185 +761 39 negative_sampler.num_negs_per_pos 33.0 +761 39 training.batch_size 2.0 +761 40 model.embedding_dim 1.0 +761 40 model.relation_dim 1.0 +761 40 optimizer.lr 0.05577496188447149 +761 40 negative_sampler.num_negs_per_pos 98.0 +761 40 training.batch_size 1.0 +761 41 model.embedding_dim 0.0 +761 41 model.relation_dim 0.0 +761 41 optimizer.lr 0.01928792991748528 +761 41 negative_sampler.num_negs_per_pos 89.0 +761 41 training.batch_size 0.0 +761 42 model.embedding_dim 1.0 +761 42 model.relation_dim 2.0 +761 42 optimizer.lr 0.0026014076256206916 +761 42 negative_sampler.num_negs_per_pos 57.0 +761 42 training.batch_size 2.0 +761 43 model.embedding_dim 0.0 +761 43 model.relation_dim 1.0 +761 43 optimizer.lr 0.056049080523720866 +761 43 negative_sampler.num_negs_per_pos 47.0 +761 43 training.batch_size 0.0 +761 44 model.embedding_dim 0.0 +761 44 model.relation_dim 2.0 +761 44 optimizer.lr 0.001958091687095039 +761 44 negative_sampler.num_negs_per_pos 39.0 +761 44 training.batch_size 1.0 +761 45 model.embedding_dim 1.0 +761 45 model.relation_dim 2.0 +761 45 optimizer.lr 0.0011734637737224403 +761 45 negative_sampler.num_negs_per_pos 94.0 +761 45 training.batch_size 0.0 +761 46 model.embedding_dim 2.0 +761 46 model.relation_dim 0.0 +761 46 optimizer.lr 0.08436264001211032 +761 46 negative_sampler.num_negs_per_pos 14.0 +761 46 training.batch_size 2.0 +761 47 model.embedding_dim 2.0 +761 47 model.relation_dim 1.0 +761 47 optimizer.lr 0.0012724977746273979 +761 47 negative_sampler.num_negs_per_pos 48.0 +761 47 training.batch_size 2.0 +761 48 model.embedding_dim 2.0 +761 48 model.relation_dim 1.0 +761 48 optimizer.lr 0.008508836929511453 +761 48 negative_sampler.num_negs_per_pos 34.0 +761 48 training.batch_size 2.0 +761 49 model.embedding_dim 0.0 +761 49 model.relation_dim 1.0 +761 49 optimizer.lr 0.038501482591478384 +761 49 negative_sampler.num_negs_per_pos 25.0 +761 49 training.batch_size 0.0 +761 50 model.embedding_dim 2.0 +761 50 model.relation_dim 2.0 +761 50 optimizer.lr 0.006163968753715005 +761 50 negative_sampler.num_negs_per_pos 68.0 +761 50 training.batch_size 0.0 +761 51 model.embedding_dim 1.0 +761 51 model.relation_dim 1.0 +761 51 optimizer.lr 0.0010519236698304422 +761 51 negative_sampler.num_negs_per_pos 3.0 +761 51 training.batch_size 1.0 +761 52 model.embedding_dim 2.0 +761 52 model.relation_dim 0.0 +761 52 optimizer.lr 0.07786564958940217 +761 52 negative_sampler.num_negs_per_pos 88.0 +761 52 training.batch_size 2.0 +761 53 model.embedding_dim 1.0 +761 53 model.relation_dim 0.0 +761 53 optimizer.lr 0.0034796249331173366 +761 53 negative_sampler.num_negs_per_pos 43.0 +761 53 training.batch_size 0.0 +761 54 model.embedding_dim 2.0 +761 54 model.relation_dim 0.0 +761 54 optimizer.lr 0.0018761171690330708 +761 54 negative_sampler.num_negs_per_pos 6.0 +761 54 training.batch_size 0.0 +761 55 model.embedding_dim 1.0 +761 55 model.relation_dim 0.0 +761 55 optimizer.lr 0.0018687389483633646 +761 55 negative_sampler.num_negs_per_pos 23.0 +761 55 training.batch_size 1.0 +761 56 model.embedding_dim 2.0 +761 56 model.relation_dim 2.0 +761 56 optimizer.lr 0.05706550998400495 +761 56 negative_sampler.num_negs_per_pos 73.0 +761 56 training.batch_size 1.0 +761 57 model.embedding_dim 0.0 +761 57 model.relation_dim 2.0 +761 57 optimizer.lr 0.0019085070038389995 +761 57 negative_sampler.num_negs_per_pos 91.0 +761 57 training.batch_size 2.0 +761 58 model.embedding_dim 1.0 +761 58 model.relation_dim 2.0 +761 58 optimizer.lr 0.013187499691284398 +761 58 negative_sampler.num_negs_per_pos 61.0 +761 58 training.batch_size 0.0 +761 59 model.embedding_dim 1.0 +761 59 model.relation_dim 1.0 +761 59 optimizer.lr 0.008178797452170263 +761 59 negative_sampler.num_negs_per_pos 99.0 +761 59 training.batch_size 2.0 +761 60 model.embedding_dim 2.0 +761 60 model.relation_dim 1.0 +761 60 optimizer.lr 0.03207702661140178 +761 60 negative_sampler.num_negs_per_pos 65.0 +761 60 training.batch_size 0.0 +761 61 model.embedding_dim 0.0 +761 61 model.relation_dim 2.0 +761 61 optimizer.lr 0.022311623019661074 +761 61 negative_sampler.num_negs_per_pos 56.0 +761 61 training.batch_size 2.0 +761 62 model.embedding_dim 0.0 +761 62 model.relation_dim 0.0 +761 62 optimizer.lr 0.028618607479736297 +761 62 negative_sampler.num_negs_per_pos 57.0 +761 62 training.batch_size 1.0 +761 63 model.embedding_dim 1.0 +761 63 model.relation_dim 2.0 +761 63 optimizer.lr 0.04495568161525054 +761 63 negative_sampler.num_negs_per_pos 40.0 +761 63 training.batch_size 2.0 +761 64 model.embedding_dim 0.0 +761 64 model.relation_dim 0.0 +761 64 optimizer.lr 0.0084751743166467 +761 64 negative_sampler.num_negs_per_pos 76.0 +761 64 training.batch_size 0.0 +761 65 model.embedding_dim 1.0 +761 65 model.relation_dim 2.0 +761 65 optimizer.lr 0.02261096146659395 +761 65 negative_sampler.num_negs_per_pos 72.0 +761 65 training.batch_size 0.0 +761 66 model.embedding_dim 0.0 +761 66 model.relation_dim 1.0 +761 66 optimizer.lr 0.025981190943790138 +761 66 negative_sampler.num_negs_per_pos 35.0 +761 66 training.batch_size 2.0 +761 67 model.embedding_dim 2.0 +761 67 model.relation_dim 2.0 +761 67 optimizer.lr 0.0022472882646368333 +761 67 negative_sampler.num_negs_per_pos 17.0 +761 67 training.batch_size 0.0 +761 68 model.embedding_dim 2.0 +761 68 model.relation_dim 1.0 +761 68 optimizer.lr 0.00638452855388065 +761 68 negative_sampler.num_negs_per_pos 13.0 +761 68 training.batch_size 2.0 +761 69 model.embedding_dim 0.0 +761 69 model.relation_dim 0.0 +761 69 optimizer.lr 0.007734860309551783 +761 69 negative_sampler.num_negs_per_pos 72.0 +761 69 training.batch_size 2.0 +761 70 model.embedding_dim 2.0 +761 70 model.relation_dim 0.0 +761 70 optimizer.lr 0.01146621117764101 +761 70 negative_sampler.num_negs_per_pos 41.0 +761 70 training.batch_size 2.0 +761 71 model.embedding_dim 0.0 +761 71 model.relation_dim 2.0 +761 71 optimizer.lr 0.020154310305241698 +761 71 negative_sampler.num_negs_per_pos 10.0 +761 71 training.batch_size 2.0 +761 72 model.embedding_dim 1.0 +761 72 model.relation_dim 2.0 +761 72 optimizer.lr 0.03860084885662641 +761 72 negative_sampler.num_negs_per_pos 3.0 +761 72 training.batch_size 1.0 +761 73 model.embedding_dim 2.0 +761 73 model.relation_dim 0.0 +761 73 optimizer.lr 0.0014719423467584594 +761 73 negative_sampler.num_negs_per_pos 8.0 +761 73 training.batch_size 1.0 +761 74 model.embedding_dim 0.0 +761 74 model.relation_dim 0.0 +761 74 optimizer.lr 0.03566240929759047 +761 74 negative_sampler.num_negs_per_pos 32.0 +761 74 training.batch_size 0.0 +761 75 model.embedding_dim 2.0 +761 75 model.relation_dim 2.0 +761 75 optimizer.lr 0.06905853973678484 +761 75 negative_sampler.num_negs_per_pos 6.0 +761 75 training.batch_size 0.0 +761 76 model.embedding_dim 2.0 +761 76 model.relation_dim 2.0 +761 76 optimizer.lr 0.009436384885480502 +761 76 negative_sampler.num_negs_per_pos 95.0 +761 76 training.batch_size 0.0 +761 77 model.embedding_dim 2.0 +761 77 model.relation_dim 0.0 +761 77 optimizer.lr 0.026546711781350995 +761 77 negative_sampler.num_negs_per_pos 33.0 +761 77 training.batch_size 0.0 +761 78 model.embedding_dim 2.0 +761 78 model.relation_dim 1.0 +761 78 optimizer.lr 0.003335058620188785 +761 78 negative_sampler.num_negs_per_pos 59.0 +761 78 training.batch_size 0.0 +761 79 model.embedding_dim 0.0 +761 79 model.relation_dim 0.0 +761 79 optimizer.lr 0.0027431758180055488 +761 79 negative_sampler.num_negs_per_pos 86.0 +761 79 training.batch_size 2.0 +761 80 model.embedding_dim 2.0 +761 80 model.relation_dim 0.0 +761 80 optimizer.lr 0.03210762327067416 +761 80 negative_sampler.num_negs_per_pos 15.0 +761 80 training.batch_size 0.0 +761 81 model.embedding_dim 1.0 +761 81 model.relation_dim 1.0 +761 81 optimizer.lr 0.0066033206095457144 +761 81 negative_sampler.num_negs_per_pos 59.0 +761 81 training.batch_size 0.0 +761 82 model.embedding_dim 0.0 +761 82 model.relation_dim 1.0 +761 82 optimizer.lr 0.001898482426139475 +761 82 negative_sampler.num_negs_per_pos 62.0 +761 82 training.batch_size 2.0 +761 83 model.embedding_dim 1.0 +761 83 model.relation_dim 1.0 +761 83 optimizer.lr 0.026229749413554035 +761 83 negative_sampler.num_negs_per_pos 52.0 +761 83 training.batch_size 2.0 +761 84 model.embedding_dim 0.0 +761 84 model.relation_dim 0.0 +761 84 optimizer.lr 0.0010297530466707632 +761 84 negative_sampler.num_negs_per_pos 46.0 +761 84 training.batch_size 2.0 +761 85 model.embedding_dim 2.0 +761 85 model.relation_dim 1.0 +761 85 optimizer.lr 0.059622216016660075 +761 85 negative_sampler.num_negs_per_pos 24.0 +761 85 training.batch_size 0.0 +761 86 model.embedding_dim 2.0 +761 86 model.relation_dim 2.0 +761 86 optimizer.lr 0.03783170925303139 +761 86 negative_sampler.num_negs_per_pos 60.0 +761 86 training.batch_size 0.0 +761 87 model.embedding_dim 1.0 +761 87 model.relation_dim 1.0 +761 87 optimizer.lr 0.019176795204365875 +761 87 negative_sampler.num_negs_per_pos 1.0 +761 87 training.batch_size 1.0 +761 88 model.embedding_dim 1.0 +761 88 model.relation_dim 0.0 +761 88 optimizer.lr 0.004494589461126787 +761 88 negative_sampler.num_negs_per_pos 67.0 +761 88 training.batch_size 0.0 +761 89 model.embedding_dim 0.0 +761 89 model.relation_dim 2.0 +761 89 optimizer.lr 0.012546457801505085 +761 89 negative_sampler.num_negs_per_pos 23.0 +761 89 training.batch_size 2.0 +761 90 model.embedding_dim 1.0 +761 90 model.relation_dim 1.0 +761 90 optimizer.lr 0.01686029235759979 +761 90 negative_sampler.num_negs_per_pos 50.0 +761 90 training.batch_size 0.0 +761 91 model.embedding_dim 1.0 +761 91 model.relation_dim 1.0 +761 91 optimizer.lr 0.00895080883502514 +761 91 negative_sampler.num_negs_per_pos 93.0 +761 91 training.batch_size 0.0 +761 92 model.embedding_dim 2.0 +761 92 model.relation_dim 2.0 +761 92 optimizer.lr 0.06721201189276298 +761 92 negative_sampler.num_negs_per_pos 94.0 +761 92 training.batch_size 2.0 +761 93 model.embedding_dim 2.0 +761 93 model.relation_dim 2.0 +761 93 optimizer.lr 0.013744942388029197 +761 93 negative_sampler.num_negs_per_pos 88.0 +761 93 training.batch_size 2.0 +761 94 model.embedding_dim 2.0 +761 94 model.relation_dim 2.0 +761 94 optimizer.lr 0.0016787243422124376 +761 94 negative_sampler.num_negs_per_pos 30.0 +761 94 training.batch_size 2.0 +761 95 model.embedding_dim 0.0 +761 95 model.relation_dim 1.0 +761 95 optimizer.lr 0.006826001950836299 +761 95 negative_sampler.num_negs_per_pos 92.0 +761 95 training.batch_size 0.0 +761 96 model.embedding_dim 1.0 +761 96 model.relation_dim 2.0 +761 96 optimizer.lr 0.0031908987030425497 +761 96 negative_sampler.num_negs_per_pos 42.0 +761 96 training.batch_size 2.0 +761 97 model.embedding_dim 2.0 +761 97 model.relation_dim 1.0 +761 97 optimizer.lr 0.001711988072729442 +761 97 negative_sampler.num_negs_per_pos 61.0 +761 97 training.batch_size 1.0 +761 98 model.embedding_dim 0.0 +761 98 model.relation_dim 0.0 +761 98 optimizer.lr 0.008764476350252352 +761 98 negative_sampler.num_negs_per_pos 60.0 +761 98 training.batch_size 2.0 +761 99 model.embedding_dim 0.0 +761 99 model.relation_dim 1.0 +761 99 optimizer.lr 0.003398937918098779 +761 99 negative_sampler.num_negs_per_pos 28.0 +761 99 training.batch_size 1.0 +761 100 model.embedding_dim 0.0 +761 100 model.relation_dim 1.0 +761 100 optimizer.lr 0.02753044240815527 +761 100 negative_sampler.num_negs_per_pos 38.0 +761 100 training.batch_size 1.0 +761 1 dataset """kinships""" +761 1 model """transd""" +761 1 loss """bceaftersigmoid""" +761 1 regularizer """no""" +761 1 optimizer """adam""" +761 1 training_loop """owa""" +761 1 negative_sampler """basic""" +761 1 evaluator """rankbased""" +761 2 dataset """kinships""" +761 2 model """transd""" +761 2 loss """bceaftersigmoid""" +761 2 regularizer """no""" +761 2 optimizer """adam""" +761 2 training_loop """owa""" +761 2 negative_sampler """basic""" +761 2 evaluator """rankbased""" +761 3 dataset """kinships""" +761 3 model """transd""" +761 3 loss """bceaftersigmoid""" +761 3 regularizer """no""" +761 3 optimizer """adam""" +761 3 training_loop """owa""" +761 3 negative_sampler """basic""" +761 3 evaluator """rankbased""" +761 4 dataset """kinships""" +761 4 model """transd""" +761 4 loss """bceaftersigmoid""" +761 4 regularizer """no""" +761 4 optimizer """adam""" +761 4 training_loop """owa""" +761 4 negative_sampler """basic""" +761 4 evaluator """rankbased""" +761 5 dataset """kinships""" +761 5 model """transd""" +761 5 loss """bceaftersigmoid""" +761 5 regularizer """no""" +761 5 optimizer """adam""" +761 5 training_loop """owa""" +761 5 negative_sampler """basic""" +761 5 evaluator """rankbased""" +761 6 dataset """kinships""" +761 6 model """transd""" +761 6 loss """bceaftersigmoid""" +761 6 regularizer """no""" +761 6 optimizer """adam""" +761 6 training_loop """owa""" +761 6 negative_sampler """basic""" +761 6 evaluator """rankbased""" +761 7 dataset """kinships""" +761 7 model """transd""" +761 7 loss """bceaftersigmoid""" +761 7 regularizer """no""" +761 7 optimizer """adam""" +761 7 training_loop """owa""" +761 7 negative_sampler """basic""" +761 7 evaluator """rankbased""" +761 8 dataset """kinships""" +761 8 model """transd""" +761 8 loss """bceaftersigmoid""" +761 8 regularizer """no""" +761 8 optimizer """adam""" +761 8 training_loop """owa""" +761 8 negative_sampler """basic""" +761 8 evaluator """rankbased""" +761 9 dataset """kinships""" +761 9 model """transd""" +761 9 loss """bceaftersigmoid""" +761 9 regularizer """no""" +761 9 optimizer """adam""" +761 9 training_loop """owa""" +761 9 negative_sampler """basic""" +761 9 evaluator """rankbased""" +761 10 dataset """kinships""" +761 10 model """transd""" +761 10 loss """bceaftersigmoid""" +761 10 regularizer """no""" +761 10 optimizer """adam""" +761 10 training_loop """owa""" +761 10 negative_sampler """basic""" +761 10 evaluator """rankbased""" +761 11 dataset """kinships""" +761 11 model """transd""" +761 11 loss """bceaftersigmoid""" +761 11 regularizer """no""" +761 11 optimizer """adam""" +761 11 training_loop """owa""" +761 11 negative_sampler """basic""" +761 11 evaluator """rankbased""" +761 12 dataset """kinships""" +761 12 model """transd""" +761 12 loss """bceaftersigmoid""" +761 12 regularizer """no""" +761 12 optimizer """adam""" +761 12 training_loop """owa""" +761 12 negative_sampler """basic""" +761 12 evaluator """rankbased""" +761 13 dataset """kinships""" +761 13 model """transd""" +761 13 loss """bceaftersigmoid""" +761 13 regularizer """no""" +761 13 optimizer """adam""" +761 13 training_loop """owa""" +761 13 negative_sampler """basic""" +761 13 evaluator """rankbased""" +761 14 dataset """kinships""" +761 14 model """transd""" +761 14 loss """bceaftersigmoid""" +761 14 regularizer """no""" +761 14 optimizer """adam""" +761 14 training_loop """owa""" +761 14 negative_sampler """basic""" +761 14 evaluator """rankbased""" +761 15 dataset """kinships""" +761 15 model """transd""" +761 15 loss """bceaftersigmoid""" +761 15 regularizer """no""" +761 15 optimizer """adam""" +761 15 training_loop """owa""" +761 15 negative_sampler """basic""" +761 15 evaluator """rankbased""" +761 16 dataset """kinships""" +761 16 model """transd""" +761 16 loss """bceaftersigmoid""" +761 16 regularizer """no""" +761 16 optimizer """adam""" +761 16 training_loop """owa""" +761 16 negative_sampler """basic""" +761 16 evaluator """rankbased""" +761 17 dataset """kinships""" +761 17 model """transd""" +761 17 loss """bceaftersigmoid""" +761 17 regularizer """no""" +761 17 optimizer """adam""" +761 17 training_loop """owa""" +761 17 negative_sampler """basic""" +761 17 evaluator """rankbased""" +761 18 dataset """kinships""" +761 18 model """transd""" +761 18 loss """bceaftersigmoid""" +761 18 regularizer """no""" +761 18 optimizer """adam""" +761 18 training_loop """owa""" +761 18 negative_sampler """basic""" +761 18 evaluator """rankbased""" +761 19 dataset """kinships""" +761 19 model """transd""" +761 19 loss """bceaftersigmoid""" +761 19 regularizer """no""" +761 19 optimizer """adam""" +761 19 training_loop """owa""" +761 19 negative_sampler """basic""" +761 19 evaluator """rankbased""" +761 20 dataset """kinships""" +761 20 model """transd""" +761 20 loss """bceaftersigmoid""" +761 20 regularizer """no""" +761 20 optimizer """adam""" +761 20 training_loop """owa""" +761 20 negative_sampler """basic""" +761 20 evaluator """rankbased""" +761 21 dataset """kinships""" +761 21 model """transd""" +761 21 loss """bceaftersigmoid""" +761 21 regularizer """no""" +761 21 optimizer """adam""" +761 21 training_loop """owa""" +761 21 negative_sampler """basic""" +761 21 evaluator """rankbased""" +761 22 dataset """kinships""" +761 22 model """transd""" +761 22 loss """bceaftersigmoid""" +761 22 regularizer """no""" +761 22 optimizer """adam""" +761 22 training_loop """owa""" +761 22 negative_sampler """basic""" +761 22 evaluator """rankbased""" +761 23 dataset """kinships""" +761 23 model """transd""" +761 23 loss """bceaftersigmoid""" +761 23 regularizer """no""" +761 23 optimizer """adam""" +761 23 training_loop """owa""" +761 23 negative_sampler """basic""" +761 23 evaluator """rankbased""" +761 24 dataset """kinships""" +761 24 model """transd""" +761 24 loss """bceaftersigmoid""" +761 24 regularizer """no""" +761 24 optimizer """adam""" +761 24 training_loop """owa""" +761 24 negative_sampler """basic""" +761 24 evaluator """rankbased""" +761 25 dataset """kinships""" +761 25 model """transd""" +761 25 loss """bceaftersigmoid""" +761 25 regularizer """no""" +761 25 optimizer """adam""" +761 25 training_loop """owa""" +761 25 negative_sampler """basic""" +761 25 evaluator """rankbased""" +761 26 dataset """kinships""" +761 26 model """transd""" +761 26 loss """bceaftersigmoid""" +761 26 regularizer """no""" +761 26 optimizer """adam""" +761 26 training_loop """owa""" +761 26 negative_sampler """basic""" +761 26 evaluator """rankbased""" +761 27 dataset """kinships""" +761 27 model """transd""" +761 27 loss """bceaftersigmoid""" +761 27 regularizer """no""" +761 27 optimizer """adam""" +761 27 training_loop """owa""" +761 27 negative_sampler """basic""" +761 27 evaluator """rankbased""" +761 28 dataset """kinships""" +761 28 model """transd""" +761 28 loss """bceaftersigmoid""" +761 28 regularizer """no""" +761 28 optimizer """adam""" +761 28 training_loop """owa""" +761 28 negative_sampler """basic""" +761 28 evaluator """rankbased""" +761 29 dataset """kinships""" +761 29 model """transd""" +761 29 loss """bceaftersigmoid""" +761 29 regularizer """no""" +761 29 optimizer """adam""" +761 29 training_loop """owa""" +761 29 negative_sampler """basic""" +761 29 evaluator """rankbased""" +761 30 dataset """kinships""" +761 30 model """transd""" +761 30 loss """bceaftersigmoid""" +761 30 regularizer """no""" +761 30 optimizer """adam""" +761 30 training_loop """owa""" +761 30 negative_sampler """basic""" +761 30 evaluator """rankbased""" +761 31 dataset """kinships""" +761 31 model """transd""" +761 31 loss """bceaftersigmoid""" +761 31 regularizer """no""" +761 31 optimizer """adam""" +761 31 training_loop """owa""" +761 31 negative_sampler """basic""" +761 31 evaluator """rankbased""" +761 32 dataset """kinships""" +761 32 model """transd""" +761 32 loss """bceaftersigmoid""" +761 32 regularizer """no""" +761 32 optimizer """adam""" +761 32 training_loop """owa""" +761 32 negative_sampler """basic""" +761 32 evaluator """rankbased""" +761 33 dataset """kinships""" +761 33 model """transd""" +761 33 loss """bceaftersigmoid""" +761 33 regularizer """no""" +761 33 optimizer """adam""" +761 33 training_loop """owa""" +761 33 negative_sampler """basic""" +761 33 evaluator """rankbased""" +761 34 dataset """kinships""" +761 34 model """transd""" +761 34 loss """bceaftersigmoid""" +761 34 regularizer """no""" +761 34 optimizer """adam""" +761 34 training_loop """owa""" +761 34 negative_sampler """basic""" +761 34 evaluator """rankbased""" +761 35 dataset """kinships""" +761 35 model """transd""" +761 35 loss """bceaftersigmoid""" +761 35 regularizer """no""" +761 35 optimizer """adam""" +761 35 training_loop """owa""" +761 35 negative_sampler """basic""" +761 35 evaluator """rankbased""" +761 36 dataset """kinships""" +761 36 model """transd""" +761 36 loss """bceaftersigmoid""" +761 36 regularizer """no""" +761 36 optimizer """adam""" +761 36 training_loop """owa""" +761 36 negative_sampler """basic""" +761 36 evaluator """rankbased""" +761 37 dataset """kinships""" +761 37 model """transd""" +761 37 loss """bceaftersigmoid""" +761 37 regularizer """no""" +761 37 optimizer """adam""" +761 37 training_loop """owa""" +761 37 negative_sampler """basic""" +761 37 evaluator """rankbased""" +761 38 dataset """kinships""" +761 38 model """transd""" +761 38 loss """bceaftersigmoid""" +761 38 regularizer """no""" +761 38 optimizer """adam""" +761 38 training_loop """owa""" +761 38 negative_sampler """basic""" +761 38 evaluator """rankbased""" +761 39 dataset """kinships""" +761 39 model """transd""" +761 39 loss """bceaftersigmoid""" +761 39 regularizer """no""" +761 39 optimizer """adam""" +761 39 training_loop """owa""" +761 39 negative_sampler """basic""" +761 39 evaluator """rankbased""" +761 40 dataset """kinships""" +761 40 model """transd""" +761 40 loss """bceaftersigmoid""" +761 40 regularizer """no""" +761 40 optimizer """adam""" +761 40 training_loop """owa""" +761 40 negative_sampler """basic""" +761 40 evaluator """rankbased""" +761 41 dataset """kinships""" +761 41 model """transd""" +761 41 loss """bceaftersigmoid""" +761 41 regularizer """no""" +761 41 optimizer """adam""" +761 41 training_loop """owa""" +761 41 negative_sampler """basic""" +761 41 evaluator """rankbased""" +761 42 dataset """kinships""" +761 42 model """transd""" +761 42 loss """bceaftersigmoid""" +761 42 regularizer """no""" +761 42 optimizer """adam""" +761 42 training_loop """owa""" +761 42 negative_sampler """basic""" +761 42 evaluator """rankbased""" +761 43 dataset """kinships""" +761 43 model """transd""" +761 43 loss """bceaftersigmoid""" +761 43 regularizer """no""" +761 43 optimizer """adam""" +761 43 training_loop """owa""" +761 43 negative_sampler """basic""" +761 43 evaluator """rankbased""" +761 44 dataset """kinships""" +761 44 model """transd""" +761 44 loss """bceaftersigmoid""" +761 44 regularizer """no""" +761 44 optimizer """adam""" +761 44 training_loop """owa""" +761 44 negative_sampler """basic""" +761 44 evaluator """rankbased""" +761 45 dataset """kinships""" +761 45 model """transd""" +761 45 loss """bceaftersigmoid""" +761 45 regularizer """no""" +761 45 optimizer """adam""" +761 45 training_loop """owa""" +761 45 negative_sampler """basic""" +761 45 evaluator """rankbased""" +761 46 dataset """kinships""" +761 46 model """transd""" +761 46 loss """bceaftersigmoid""" +761 46 regularizer """no""" +761 46 optimizer """adam""" +761 46 training_loop """owa""" +761 46 negative_sampler """basic""" +761 46 evaluator """rankbased""" +761 47 dataset """kinships""" +761 47 model """transd""" +761 47 loss """bceaftersigmoid""" +761 47 regularizer """no""" +761 47 optimizer """adam""" +761 47 training_loop """owa""" +761 47 negative_sampler """basic""" +761 47 evaluator """rankbased""" +761 48 dataset """kinships""" +761 48 model """transd""" +761 48 loss """bceaftersigmoid""" +761 48 regularizer """no""" +761 48 optimizer """adam""" +761 48 training_loop """owa""" +761 48 negative_sampler """basic""" +761 48 evaluator """rankbased""" +761 49 dataset """kinships""" +761 49 model """transd""" +761 49 loss """bceaftersigmoid""" +761 49 regularizer """no""" +761 49 optimizer """adam""" +761 49 training_loop """owa""" +761 49 negative_sampler """basic""" +761 49 evaluator """rankbased""" +761 50 dataset """kinships""" +761 50 model """transd""" +761 50 loss """bceaftersigmoid""" +761 50 regularizer """no""" +761 50 optimizer """adam""" +761 50 training_loop """owa""" +761 50 negative_sampler """basic""" +761 50 evaluator """rankbased""" +761 51 dataset """kinships""" +761 51 model """transd""" +761 51 loss """bceaftersigmoid""" +761 51 regularizer """no""" +761 51 optimizer """adam""" +761 51 training_loop """owa""" +761 51 negative_sampler """basic""" +761 51 evaluator """rankbased""" +761 52 dataset """kinships""" +761 52 model """transd""" +761 52 loss """bceaftersigmoid""" +761 52 regularizer """no""" +761 52 optimizer """adam""" +761 52 training_loop """owa""" +761 52 negative_sampler """basic""" +761 52 evaluator """rankbased""" +761 53 dataset """kinships""" +761 53 model """transd""" +761 53 loss """bceaftersigmoid""" +761 53 regularizer """no""" +761 53 optimizer """adam""" +761 53 training_loop """owa""" +761 53 negative_sampler """basic""" +761 53 evaluator """rankbased""" +761 54 dataset """kinships""" +761 54 model """transd""" +761 54 loss """bceaftersigmoid""" +761 54 regularizer """no""" +761 54 optimizer """adam""" +761 54 training_loop """owa""" +761 54 negative_sampler """basic""" +761 54 evaluator """rankbased""" +761 55 dataset """kinships""" +761 55 model """transd""" +761 55 loss """bceaftersigmoid""" +761 55 regularizer """no""" +761 55 optimizer """adam""" +761 55 training_loop """owa""" +761 55 negative_sampler """basic""" +761 55 evaluator """rankbased""" +761 56 dataset """kinships""" +761 56 model """transd""" +761 56 loss """bceaftersigmoid""" +761 56 regularizer """no""" +761 56 optimizer """adam""" +761 56 training_loop """owa""" +761 56 negative_sampler """basic""" +761 56 evaluator """rankbased""" +761 57 dataset """kinships""" +761 57 model """transd""" +761 57 loss """bceaftersigmoid""" +761 57 regularizer """no""" +761 57 optimizer """adam""" +761 57 training_loop """owa""" +761 57 negative_sampler """basic""" +761 57 evaluator """rankbased""" +761 58 dataset """kinships""" +761 58 model """transd""" +761 58 loss """bceaftersigmoid""" +761 58 regularizer """no""" +761 58 optimizer """adam""" +761 58 training_loop """owa""" +761 58 negative_sampler """basic""" +761 58 evaluator """rankbased""" +761 59 dataset """kinships""" +761 59 model """transd""" +761 59 loss """bceaftersigmoid""" +761 59 regularizer """no""" +761 59 optimizer """adam""" +761 59 training_loop """owa""" +761 59 negative_sampler """basic""" +761 59 evaluator """rankbased""" +761 60 dataset """kinships""" +761 60 model """transd""" +761 60 loss """bceaftersigmoid""" +761 60 regularizer """no""" +761 60 optimizer """adam""" +761 60 training_loop """owa""" +761 60 negative_sampler """basic""" +761 60 evaluator """rankbased""" +761 61 dataset """kinships""" +761 61 model """transd""" +761 61 loss """bceaftersigmoid""" +761 61 regularizer """no""" +761 61 optimizer """adam""" +761 61 training_loop """owa""" +761 61 negative_sampler """basic""" +761 61 evaluator """rankbased""" +761 62 dataset """kinships""" +761 62 model """transd""" +761 62 loss """bceaftersigmoid""" +761 62 regularizer """no""" +761 62 optimizer """adam""" +761 62 training_loop """owa""" +761 62 negative_sampler """basic""" +761 62 evaluator """rankbased""" +761 63 dataset """kinships""" +761 63 model """transd""" +761 63 loss """bceaftersigmoid""" +761 63 regularizer """no""" +761 63 optimizer """adam""" +761 63 training_loop """owa""" +761 63 negative_sampler """basic""" +761 63 evaluator """rankbased""" +761 64 dataset """kinships""" +761 64 model """transd""" +761 64 loss """bceaftersigmoid""" +761 64 regularizer """no""" +761 64 optimizer """adam""" +761 64 training_loop """owa""" +761 64 negative_sampler """basic""" +761 64 evaluator """rankbased""" +761 65 dataset """kinships""" +761 65 model """transd""" +761 65 loss """bceaftersigmoid""" +761 65 regularizer """no""" +761 65 optimizer """adam""" +761 65 training_loop """owa""" +761 65 negative_sampler """basic""" +761 65 evaluator """rankbased""" +761 66 dataset """kinships""" +761 66 model """transd""" +761 66 loss """bceaftersigmoid""" +761 66 regularizer """no""" +761 66 optimizer """adam""" +761 66 training_loop """owa""" +761 66 negative_sampler """basic""" +761 66 evaluator """rankbased""" +761 67 dataset """kinships""" +761 67 model """transd""" +761 67 loss """bceaftersigmoid""" +761 67 regularizer """no""" +761 67 optimizer """adam""" +761 67 training_loop """owa""" +761 67 negative_sampler """basic""" +761 67 evaluator """rankbased""" +761 68 dataset """kinships""" +761 68 model """transd""" +761 68 loss """bceaftersigmoid""" +761 68 regularizer """no""" +761 68 optimizer """adam""" +761 68 training_loop """owa""" +761 68 negative_sampler """basic""" +761 68 evaluator """rankbased""" +761 69 dataset """kinships""" +761 69 model """transd""" +761 69 loss """bceaftersigmoid""" +761 69 regularizer """no""" +761 69 optimizer """adam""" +761 69 training_loop """owa""" +761 69 negative_sampler """basic""" +761 69 evaluator """rankbased""" +761 70 dataset """kinships""" +761 70 model """transd""" +761 70 loss """bceaftersigmoid""" +761 70 regularizer """no""" +761 70 optimizer """adam""" +761 70 training_loop """owa""" +761 70 negative_sampler """basic""" +761 70 evaluator """rankbased""" +761 71 dataset """kinships""" +761 71 model """transd""" +761 71 loss """bceaftersigmoid""" +761 71 regularizer """no""" +761 71 optimizer """adam""" +761 71 training_loop """owa""" +761 71 negative_sampler """basic""" +761 71 evaluator """rankbased""" +761 72 dataset """kinships""" +761 72 model """transd""" +761 72 loss """bceaftersigmoid""" +761 72 regularizer """no""" +761 72 optimizer """adam""" +761 72 training_loop """owa""" +761 72 negative_sampler """basic""" +761 72 evaluator """rankbased""" +761 73 dataset """kinships""" +761 73 model """transd""" +761 73 loss """bceaftersigmoid""" +761 73 regularizer """no""" +761 73 optimizer """adam""" +761 73 training_loop """owa""" +761 73 negative_sampler """basic""" +761 73 evaluator """rankbased""" +761 74 dataset """kinships""" +761 74 model """transd""" +761 74 loss """bceaftersigmoid""" +761 74 regularizer """no""" +761 74 optimizer """adam""" +761 74 training_loop """owa""" +761 74 negative_sampler """basic""" +761 74 evaluator """rankbased""" +761 75 dataset """kinships""" +761 75 model """transd""" +761 75 loss """bceaftersigmoid""" +761 75 regularizer """no""" +761 75 optimizer """adam""" +761 75 training_loop """owa""" +761 75 negative_sampler """basic""" +761 75 evaluator """rankbased""" +761 76 dataset """kinships""" +761 76 model """transd""" +761 76 loss """bceaftersigmoid""" +761 76 regularizer """no""" +761 76 optimizer """adam""" +761 76 training_loop """owa""" +761 76 negative_sampler """basic""" +761 76 evaluator """rankbased""" +761 77 dataset """kinships""" +761 77 model """transd""" +761 77 loss """bceaftersigmoid""" +761 77 regularizer """no""" +761 77 optimizer """adam""" +761 77 training_loop """owa""" +761 77 negative_sampler """basic""" +761 77 evaluator """rankbased""" +761 78 dataset """kinships""" +761 78 model """transd""" +761 78 loss """bceaftersigmoid""" +761 78 regularizer """no""" +761 78 optimizer """adam""" +761 78 training_loop """owa""" +761 78 negative_sampler """basic""" +761 78 evaluator """rankbased""" +761 79 dataset """kinships""" +761 79 model """transd""" +761 79 loss """bceaftersigmoid""" +761 79 regularizer """no""" +761 79 optimizer """adam""" +761 79 training_loop """owa""" +761 79 negative_sampler """basic""" +761 79 evaluator """rankbased""" +761 80 dataset """kinships""" +761 80 model """transd""" +761 80 loss """bceaftersigmoid""" +761 80 regularizer """no""" +761 80 optimizer """adam""" +761 80 training_loop """owa""" +761 80 negative_sampler """basic""" +761 80 evaluator """rankbased""" +761 81 dataset """kinships""" +761 81 model """transd""" +761 81 loss """bceaftersigmoid""" +761 81 regularizer """no""" +761 81 optimizer """adam""" +761 81 training_loop """owa""" +761 81 negative_sampler """basic""" +761 81 evaluator """rankbased""" +761 82 dataset """kinships""" +761 82 model """transd""" +761 82 loss """bceaftersigmoid""" +761 82 regularizer """no""" +761 82 optimizer """adam""" +761 82 training_loop """owa""" +761 82 negative_sampler """basic""" +761 82 evaluator """rankbased""" +761 83 dataset """kinships""" +761 83 model """transd""" +761 83 loss """bceaftersigmoid""" +761 83 regularizer """no""" +761 83 optimizer """adam""" +761 83 training_loop """owa""" +761 83 negative_sampler """basic""" +761 83 evaluator """rankbased""" +761 84 dataset """kinships""" +761 84 model """transd""" +761 84 loss """bceaftersigmoid""" +761 84 regularizer """no""" +761 84 optimizer """adam""" +761 84 training_loop """owa""" +761 84 negative_sampler """basic""" +761 84 evaluator """rankbased""" +761 85 dataset """kinships""" +761 85 model """transd""" +761 85 loss """bceaftersigmoid""" +761 85 regularizer """no""" +761 85 optimizer """adam""" +761 85 training_loop """owa""" +761 85 negative_sampler """basic""" +761 85 evaluator """rankbased""" +761 86 dataset """kinships""" +761 86 model """transd""" +761 86 loss """bceaftersigmoid""" +761 86 regularizer """no""" +761 86 optimizer """adam""" +761 86 training_loop """owa""" +761 86 negative_sampler """basic""" +761 86 evaluator """rankbased""" +761 87 dataset """kinships""" +761 87 model """transd""" +761 87 loss """bceaftersigmoid""" +761 87 regularizer """no""" +761 87 optimizer """adam""" +761 87 training_loop """owa""" +761 87 negative_sampler """basic""" +761 87 evaluator """rankbased""" +761 88 dataset """kinships""" +761 88 model """transd""" +761 88 loss """bceaftersigmoid""" +761 88 regularizer """no""" +761 88 optimizer """adam""" +761 88 training_loop """owa""" +761 88 negative_sampler """basic""" +761 88 evaluator """rankbased""" +761 89 dataset """kinships""" +761 89 model """transd""" +761 89 loss """bceaftersigmoid""" +761 89 regularizer """no""" +761 89 optimizer """adam""" +761 89 training_loop """owa""" +761 89 negative_sampler """basic""" +761 89 evaluator """rankbased""" +761 90 dataset """kinships""" +761 90 model """transd""" +761 90 loss """bceaftersigmoid""" +761 90 regularizer """no""" +761 90 optimizer """adam""" +761 90 training_loop """owa""" +761 90 negative_sampler """basic""" +761 90 evaluator """rankbased""" +761 91 dataset """kinships""" +761 91 model """transd""" +761 91 loss """bceaftersigmoid""" +761 91 regularizer """no""" +761 91 optimizer """adam""" +761 91 training_loop """owa""" +761 91 negative_sampler """basic""" +761 91 evaluator """rankbased""" +761 92 dataset """kinships""" +761 92 model """transd""" +761 92 loss """bceaftersigmoid""" +761 92 regularizer """no""" +761 92 optimizer """adam""" +761 92 training_loop """owa""" +761 92 negative_sampler """basic""" +761 92 evaluator """rankbased""" +761 93 dataset """kinships""" +761 93 model """transd""" +761 93 loss """bceaftersigmoid""" +761 93 regularizer """no""" +761 93 optimizer """adam""" +761 93 training_loop """owa""" +761 93 negative_sampler """basic""" +761 93 evaluator """rankbased""" +761 94 dataset """kinships""" +761 94 model """transd""" +761 94 loss """bceaftersigmoid""" +761 94 regularizer """no""" +761 94 optimizer """adam""" +761 94 training_loop """owa""" +761 94 negative_sampler """basic""" +761 94 evaluator """rankbased""" +761 95 dataset """kinships""" +761 95 model """transd""" +761 95 loss """bceaftersigmoid""" +761 95 regularizer """no""" +761 95 optimizer """adam""" +761 95 training_loop """owa""" +761 95 negative_sampler """basic""" +761 95 evaluator """rankbased""" +761 96 dataset """kinships""" +761 96 model """transd""" +761 96 loss """bceaftersigmoid""" +761 96 regularizer """no""" +761 96 optimizer """adam""" +761 96 training_loop """owa""" +761 96 negative_sampler """basic""" +761 96 evaluator """rankbased""" +761 97 dataset """kinships""" +761 97 model """transd""" +761 97 loss """bceaftersigmoid""" +761 97 regularizer """no""" +761 97 optimizer """adam""" +761 97 training_loop """owa""" +761 97 negative_sampler """basic""" +761 97 evaluator """rankbased""" +761 98 dataset """kinships""" +761 98 model """transd""" +761 98 loss """bceaftersigmoid""" +761 98 regularizer """no""" +761 98 optimizer """adam""" +761 98 training_loop """owa""" +761 98 negative_sampler """basic""" +761 98 evaluator """rankbased""" +761 99 dataset """kinships""" +761 99 model """transd""" +761 99 loss """bceaftersigmoid""" +761 99 regularizer """no""" +761 99 optimizer """adam""" +761 99 training_loop """owa""" +761 99 negative_sampler """basic""" +761 99 evaluator """rankbased""" +761 100 dataset """kinships""" +761 100 model """transd""" +761 100 loss """bceaftersigmoid""" +761 100 regularizer """no""" +761 100 optimizer """adam""" +761 100 training_loop """owa""" +761 100 negative_sampler """basic""" +761 100 evaluator """rankbased""" +762 1 model.embedding_dim 1.0 +762 1 model.relation_dim 1.0 +762 1 optimizer.lr 0.004644473737312585 +762 1 negative_sampler.num_negs_per_pos 57.0 +762 1 training.batch_size 0.0 +762 2 model.embedding_dim 2.0 +762 2 model.relation_dim 0.0 +762 2 optimizer.lr 0.00665587602561266 +762 2 negative_sampler.num_negs_per_pos 72.0 +762 2 training.batch_size 1.0 +762 3 model.embedding_dim 2.0 +762 3 model.relation_dim 1.0 +762 3 optimizer.lr 0.0011151752969117547 +762 3 negative_sampler.num_negs_per_pos 77.0 +762 3 training.batch_size 1.0 +762 4 model.embedding_dim 1.0 +762 4 model.relation_dim 2.0 +762 4 optimizer.lr 0.0013731387202400133 +762 4 negative_sampler.num_negs_per_pos 83.0 +762 4 training.batch_size 1.0 +762 5 model.embedding_dim 1.0 +762 5 model.relation_dim 0.0 +762 5 optimizer.lr 0.06941898353205687 +762 5 negative_sampler.num_negs_per_pos 55.0 +762 5 training.batch_size 2.0 +762 6 model.embedding_dim 2.0 +762 6 model.relation_dim 0.0 +762 6 optimizer.lr 0.04432313782827617 +762 6 negative_sampler.num_negs_per_pos 45.0 +762 6 training.batch_size 2.0 +762 7 model.embedding_dim 0.0 +762 7 model.relation_dim 0.0 +762 7 optimizer.lr 0.001273317976683014 +762 7 negative_sampler.num_negs_per_pos 49.0 +762 7 training.batch_size 1.0 +762 8 model.embedding_dim 0.0 +762 8 model.relation_dim 1.0 +762 8 optimizer.lr 0.010204981412378453 +762 8 negative_sampler.num_negs_per_pos 26.0 +762 8 training.batch_size 2.0 +762 9 model.embedding_dim 1.0 +762 9 model.relation_dim 2.0 +762 9 optimizer.lr 0.006809735273144616 +762 9 negative_sampler.num_negs_per_pos 98.0 +762 9 training.batch_size 1.0 +762 10 model.embedding_dim 1.0 +762 10 model.relation_dim 2.0 +762 10 optimizer.lr 0.07039022168539373 +762 10 negative_sampler.num_negs_per_pos 38.0 +762 10 training.batch_size 0.0 +762 11 model.embedding_dim 1.0 +762 11 model.relation_dim 0.0 +762 11 optimizer.lr 0.003788967962334674 +762 11 negative_sampler.num_negs_per_pos 48.0 +762 11 training.batch_size 1.0 +762 12 model.embedding_dim 1.0 +762 12 model.relation_dim 2.0 +762 12 optimizer.lr 0.09046361642388458 +762 12 negative_sampler.num_negs_per_pos 95.0 +762 12 training.batch_size 2.0 +762 13 model.embedding_dim 2.0 +762 13 model.relation_dim 0.0 +762 13 optimizer.lr 0.0029412414787317437 +762 13 negative_sampler.num_negs_per_pos 99.0 +762 13 training.batch_size 2.0 +762 14 model.embedding_dim 1.0 +762 14 model.relation_dim 1.0 +762 14 optimizer.lr 0.045219468828292225 +762 14 negative_sampler.num_negs_per_pos 46.0 +762 14 training.batch_size 0.0 +762 15 model.embedding_dim 2.0 +762 15 model.relation_dim 0.0 +762 15 optimizer.lr 0.0019926710903871514 +762 15 negative_sampler.num_negs_per_pos 72.0 +762 15 training.batch_size 1.0 +762 16 model.embedding_dim 2.0 +762 16 model.relation_dim 2.0 +762 16 optimizer.lr 0.004363811764864076 +762 16 negative_sampler.num_negs_per_pos 92.0 +762 16 training.batch_size 1.0 +762 17 model.embedding_dim 2.0 +762 17 model.relation_dim 1.0 +762 17 optimizer.lr 0.008212160117093383 +762 17 negative_sampler.num_negs_per_pos 0.0 +762 17 training.batch_size 0.0 +762 18 model.embedding_dim 0.0 +762 18 model.relation_dim 1.0 +762 18 optimizer.lr 0.06771108634111193 +762 18 negative_sampler.num_negs_per_pos 48.0 +762 18 training.batch_size 2.0 +762 19 model.embedding_dim 1.0 +762 19 model.relation_dim 0.0 +762 19 optimizer.lr 0.0036908033253438676 +762 19 negative_sampler.num_negs_per_pos 91.0 +762 19 training.batch_size 0.0 +762 20 model.embedding_dim 1.0 +762 20 model.relation_dim 0.0 +762 20 optimizer.lr 0.0040199297551693 +762 20 negative_sampler.num_negs_per_pos 5.0 +762 20 training.batch_size 0.0 +762 21 model.embedding_dim 0.0 +762 21 model.relation_dim 0.0 +762 21 optimizer.lr 0.04771046178758553 +762 21 negative_sampler.num_negs_per_pos 35.0 +762 21 training.batch_size 0.0 +762 22 model.embedding_dim 0.0 +762 22 model.relation_dim 1.0 +762 22 optimizer.lr 0.007152584449403826 +762 22 negative_sampler.num_negs_per_pos 25.0 +762 22 training.batch_size 0.0 +762 23 model.embedding_dim 1.0 +762 23 model.relation_dim 1.0 +762 23 optimizer.lr 0.0027222841280289765 +762 23 negative_sampler.num_negs_per_pos 43.0 +762 23 training.batch_size 1.0 +762 24 model.embedding_dim 2.0 +762 24 model.relation_dim 0.0 +762 24 optimizer.lr 0.044366654419308496 +762 24 negative_sampler.num_negs_per_pos 6.0 +762 24 training.batch_size 1.0 +762 25 model.embedding_dim 1.0 +762 25 model.relation_dim 0.0 +762 25 optimizer.lr 0.07153587608352169 +762 25 negative_sampler.num_negs_per_pos 24.0 +762 25 training.batch_size 0.0 +762 26 model.embedding_dim 2.0 +762 26 model.relation_dim 2.0 +762 26 optimizer.lr 0.006495741397918748 +762 26 negative_sampler.num_negs_per_pos 47.0 +762 26 training.batch_size 2.0 +762 27 model.embedding_dim 1.0 +762 27 model.relation_dim 0.0 +762 27 optimizer.lr 0.00502073662396926 +762 27 negative_sampler.num_negs_per_pos 43.0 +762 27 training.batch_size 2.0 +762 28 model.embedding_dim 2.0 +762 28 model.relation_dim 0.0 +762 28 optimizer.lr 0.0026482735902529546 +762 28 negative_sampler.num_negs_per_pos 88.0 +762 28 training.batch_size 0.0 +762 29 model.embedding_dim 2.0 +762 29 model.relation_dim 2.0 +762 29 optimizer.lr 0.004749145697530395 +762 29 negative_sampler.num_negs_per_pos 16.0 +762 29 training.batch_size 1.0 +762 30 model.embedding_dim 0.0 +762 30 model.relation_dim 0.0 +762 30 optimizer.lr 0.0396181107761281 +762 30 negative_sampler.num_negs_per_pos 75.0 +762 30 training.batch_size 2.0 +762 31 model.embedding_dim 0.0 +762 31 model.relation_dim 1.0 +762 31 optimizer.lr 0.0042749195795035305 +762 31 negative_sampler.num_negs_per_pos 38.0 +762 31 training.batch_size 1.0 +762 32 model.embedding_dim 0.0 +762 32 model.relation_dim 2.0 +762 32 optimizer.lr 0.008799598406941509 +762 32 negative_sampler.num_negs_per_pos 98.0 +762 32 training.batch_size 1.0 +762 33 model.embedding_dim 0.0 +762 33 model.relation_dim 1.0 +762 33 optimizer.lr 0.0871776598669945 +762 33 negative_sampler.num_negs_per_pos 51.0 +762 33 training.batch_size 1.0 +762 34 model.embedding_dim 1.0 +762 34 model.relation_dim 2.0 +762 34 optimizer.lr 0.022899839648193962 +762 34 negative_sampler.num_negs_per_pos 7.0 +762 34 training.batch_size 2.0 +762 35 model.embedding_dim 0.0 +762 35 model.relation_dim 2.0 +762 35 optimizer.lr 0.047764394314283226 +762 35 negative_sampler.num_negs_per_pos 0.0 +762 35 training.batch_size 2.0 +762 36 model.embedding_dim 2.0 +762 36 model.relation_dim 2.0 +762 36 optimizer.lr 0.0045827809053873405 +762 36 negative_sampler.num_negs_per_pos 59.0 +762 36 training.batch_size 2.0 +762 37 model.embedding_dim 2.0 +762 37 model.relation_dim 1.0 +762 37 optimizer.lr 0.05556599558812088 +762 37 negative_sampler.num_negs_per_pos 52.0 +762 37 training.batch_size 0.0 +762 38 model.embedding_dim 1.0 +762 38 model.relation_dim 1.0 +762 38 optimizer.lr 0.0014760073188895466 +762 38 negative_sampler.num_negs_per_pos 61.0 +762 38 training.batch_size 0.0 +762 39 model.embedding_dim 0.0 +762 39 model.relation_dim 2.0 +762 39 optimizer.lr 0.02635945437406197 +762 39 negative_sampler.num_negs_per_pos 0.0 +762 39 training.batch_size 1.0 +762 40 model.embedding_dim 0.0 +762 40 model.relation_dim 2.0 +762 40 optimizer.lr 0.08732986200538623 +762 40 negative_sampler.num_negs_per_pos 51.0 +762 40 training.batch_size 2.0 +762 41 model.embedding_dim 0.0 +762 41 model.relation_dim 1.0 +762 41 optimizer.lr 0.07767029761976985 +762 41 negative_sampler.num_negs_per_pos 37.0 +762 41 training.batch_size 2.0 +762 42 model.embedding_dim 0.0 +762 42 model.relation_dim 1.0 +762 42 optimizer.lr 0.04202856755609964 +762 42 negative_sampler.num_negs_per_pos 88.0 +762 42 training.batch_size 2.0 +762 43 model.embedding_dim 0.0 +762 43 model.relation_dim 0.0 +762 43 optimizer.lr 0.002043086872918683 +762 43 negative_sampler.num_negs_per_pos 0.0 +762 43 training.batch_size 2.0 +762 44 model.embedding_dim 2.0 +762 44 model.relation_dim 1.0 +762 44 optimizer.lr 0.024488901610204717 +762 44 negative_sampler.num_negs_per_pos 97.0 +762 44 training.batch_size 2.0 +762 45 model.embedding_dim 2.0 +762 45 model.relation_dim 0.0 +762 45 optimizer.lr 0.0545967862703397 +762 45 negative_sampler.num_negs_per_pos 77.0 +762 45 training.batch_size 1.0 +762 46 model.embedding_dim 2.0 +762 46 model.relation_dim 0.0 +762 46 optimizer.lr 0.06211355141630654 +762 46 negative_sampler.num_negs_per_pos 18.0 +762 46 training.batch_size 0.0 +762 47 model.embedding_dim 0.0 +762 47 model.relation_dim 1.0 +762 47 optimizer.lr 0.008579165238029826 +762 47 negative_sampler.num_negs_per_pos 53.0 +762 47 training.batch_size 0.0 +762 48 model.embedding_dim 1.0 +762 48 model.relation_dim 2.0 +762 48 optimizer.lr 0.004524055091769412 +762 48 negative_sampler.num_negs_per_pos 80.0 +762 48 training.batch_size 2.0 +762 49 model.embedding_dim 1.0 +762 49 model.relation_dim 0.0 +762 49 optimizer.lr 0.008056731695902764 +762 49 negative_sampler.num_negs_per_pos 87.0 +762 49 training.batch_size 0.0 +762 50 model.embedding_dim 2.0 +762 50 model.relation_dim 2.0 +762 50 optimizer.lr 0.003464714635663963 +762 50 negative_sampler.num_negs_per_pos 42.0 +762 50 training.batch_size 0.0 +762 51 model.embedding_dim 1.0 +762 51 model.relation_dim 1.0 +762 51 optimizer.lr 0.005808781861670596 +762 51 negative_sampler.num_negs_per_pos 49.0 +762 51 training.batch_size 1.0 +762 52 model.embedding_dim 0.0 +762 52 model.relation_dim 1.0 +762 52 optimizer.lr 0.0017553021627623427 +762 52 negative_sampler.num_negs_per_pos 65.0 +762 52 training.batch_size 0.0 +762 53 model.embedding_dim 0.0 +762 53 model.relation_dim 2.0 +762 53 optimizer.lr 0.001654244888047197 +762 53 negative_sampler.num_negs_per_pos 80.0 +762 53 training.batch_size 2.0 +762 54 model.embedding_dim 2.0 +762 54 model.relation_dim 0.0 +762 54 optimizer.lr 0.021996307178305856 +762 54 negative_sampler.num_negs_per_pos 1.0 +762 54 training.batch_size 2.0 +762 55 model.embedding_dim 0.0 +762 55 model.relation_dim 0.0 +762 55 optimizer.lr 0.001039418455162999 +762 55 negative_sampler.num_negs_per_pos 35.0 +762 55 training.batch_size 1.0 +762 56 model.embedding_dim 0.0 +762 56 model.relation_dim 1.0 +762 56 optimizer.lr 0.002457643731815938 +762 56 negative_sampler.num_negs_per_pos 9.0 +762 56 training.batch_size 2.0 +762 57 model.embedding_dim 0.0 +762 57 model.relation_dim 1.0 +762 57 optimizer.lr 0.0010720825914980413 +762 57 negative_sampler.num_negs_per_pos 50.0 +762 57 training.batch_size 0.0 +762 58 model.embedding_dim 0.0 +762 58 model.relation_dim 2.0 +762 58 optimizer.lr 0.0010641823655522312 +762 58 negative_sampler.num_negs_per_pos 26.0 +762 58 training.batch_size 0.0 +762 59 model.embedding_dim 2.0 +762 59 model.relation_dim 1.0 +762 59 optimizer.lr 0.05575624939562811 +762 59 negative_sampler.num_negs_per_pos 3.0 +762 59 training.batch_size 2.0 +762 60 model.embedding_dim 1.0 +762 60 model.relation_dim 0.0 +762 60 optimizer.lr 0.0021584877873013155 +762 60 negative_sampler.num_negs_per_pos 36.0 +762 60 training.batch_size 0.0 +762 61 model.embedding_dim 1.0 +762 61 model.relation_dim 1.0 +762 61 optimizer.lr 0.006909242175671699 +762 61 negative_sampler.num_negs_per_pos 73.0 +762 61 training.batch_size 1.0 +762 62 model.embedding_dim 2.0 +762 62 model.relation_dim 2.0 +762 62 optimizer.lr 0.04611377857782901 +762 62 negative_sampler.num_negs_per_pos 55.0 +762 62 training.batch_size 0.0 +762 63 model.embedding_dim 2.0 +762 63 model.relation_dim 2.0 +762 63 optimizer.lr 0.010183155078722925 +762 63 negative_sampler.num_negs_per_pos 75.0 +762 63 training.batch_size 2.0 +762 64 model.embedding_dim 1.0 +762 64 model.relation_dim 1.0 +762 64 optimizer.lr 0.003574989196312323 +762 64 negative_sampler.num_negs_per_pos 31.0 +762 64 training.batch_size 1.0 +762 65 model.embedding_dim 0.0 +762 65 model.relation_dim 2.0 +762 65 optimizer.lr 0.005051667557963868 +762 65 negative_sampler.num_negs_per_pos 26.0 +762 65 training.batch_size 0.0 +762 66 model.embedding_dim 1.0 +762 66 model.relation_dim 2.0 +762 66 optimizer.lr 0.029149498129149642 +762 66 negative_sampler.num_negs_per_pos 78.0 +762 66 training.batch_size 2.0 +762 67 model.embedding_dim 1.0 +762 67 model.relation_dim 1.0 +762 67 optimizer.lr 0.0015879488533783713 +762 67 negative_sampler.num_negs_per_pos 71.0 +762 67 training.batch_size 2.0 +762 68 model.embedding_dim 0.0 +762 68 model.relation_dim 1.0 +762 68 optimizer.lr 0.004833539495812273 +762 68 negative_sampler.num_negs_per_pos 18.0 +762 68 training.batch_size 2.0 +762 69 model.embedding_dim 0.0 +762 69 model.relation_dim 2.0 +762 69 optimizer.lr 0.0015521839271033086 +762 69 negative_sampler.num_negs_per_pos 84.0 +762 69 training.batch_size 0.0 +762 70 model.embedding_dim 2.0 +762 70 model.relation_dim 2.0 +762 70 optimizer.lr 0.04740656135205994 +762 70 negative_sampler.num_negs_per_pos 47.0 +762 70 training.batch_size 1.0 +762 71 model.embedding_dim 2.0 +762 71 model.relation_dim 1.0 +762 71 optimizer.lr 0.060237256528974326 +762 71 negative_sampler.num_negs_per_pos 79.0 +762 71 training.batch_size 2.0 +762 72 model.embedding_dim 2.0 +762 72 model.relation_dim 1.0 +762 72 optimizer.lr 0.0769274302472198 +762 72 negative_sampler.num_negs_per_pos 67.0 +762 72 training.batch_size 0.0 +762 73 model.embedding_dim 0.0 +762 73 model.relation_dim 2.0 +762 73 optimizer.lr 0.005072942951386719 +762 73 negative_sampler.num_negs_per_pos 70.0 +762 73 training.batch_size 2.0 +762 74 model.embedding_dim 1.0 +762 74 model.relation_dim 0.0 +762 74 optimizer.lr 0.001807306083493889 +762 74 negative_sampler.num_negs_per_pos 73.0 +762 74 training.batch_size 0.0 +762 75 model.embedding_dim 0.0 +762 75 model.relation_dim 0.0 +762 75 optimizer.lr 0.006613975581125588 +762 75 negative_sampler.num_negs_per_pos 42.0 +762 75 training.batch_size 0.0 +762 76 model.embedding_dim 2.0 +762 76 model.relation_dim 1.0 +762 76 optimizer.lr 0.012560731300297176 +762 76 negative_sampler.num_negs_per_pos 19.0 +762 76 training.batch_size 1.0 +762 77 model.embedding_dim 0.0 +762 77 model.relation_dim 2.0 +762 77 optimizer.lr 0.08234427567321416 +762 77 negative_sampler.num_negs_per_pos 27.0 +762 77 training.batch_size 2.0 +762 78 model.embedding_dim 0.0 +762 78 model.relation_dim 0.0 +762 78 optimizer.lr 0.0026794345517407174 +762 78 negative_sampler.num_negs_per_pos 74.0 +762 78 training.batch_size 2.0 +762 79 model.embedding_dim 0.0 +762 79 model.relation_dim 0.0 +762 79 optimizer.lr 0.0012983205174840451 +762 79 negative_sampler.num_negs_per_pos 60.0 +762 79 training.batch_size 0.0 +762 80 model.embedding_dim 2.0 +762 80 model.relation_dim 2.0 +762 80 optimizer.lr 0.012987123141604658 +762 80 negative_sampler.num_negs_per_pos 55.0 +762 80 training.batch_size 1.0 +762 81 model.embedding_dim 2.0 +762 81 model.relation_dim 2.0 +762 81 optimizer.lr 0.04139182398372517 +762 81 negative_sampler.num_negs_per_pos 61.0 +762 81 training.batch_size 0.0 +762 82 model.embedding_dim 2.0 +762 82 model.relation_dim 0.0 +762 82 optimizer.lr 0.0046025414931364795 +762 82 negative_sampler.num_negs_per_pos 37.0 +762 82 training.batch_size 1.0 +762 83 model.embedding_dim 0.0 +762 83 model.relation_dim 2.0 +762 83 optimizer.lr 0.01842765717136672 +762 83 negative_sampler.num_negs_per_pos 51.0 +762 83 training.batch_size 2.0 +762 84 model.embedding_dim 2.0 +762 84 model.relation_dim 2.0 +762 84 optimizer.lr 0.0012818780291392292 +762 84 negative_sampler.num_negs_per_pos 60.0 +762 84 training.batch_size 1.0 +762 85 model.embedding_dim 0.0 +762 85 model.relation_dim 0.0 +762 85 optimizer.lr 0.014287549408511693 +762 85 negative_sampler.num_negs_per_pos 34.0 +762 85 training.batch_size 0.0 +762 86 model.embedding_dim 0.0 +762 86 model.relation_dim 1.0 +762 86 optimizer.lr 0.0020895008786290348 +762 86 negative_sampler.num_negs_per_pos 40.0 +762 86 training.batch_size 1.0 +762 87 model.embedding_dim 0.0 +762 87 model.relation_dim 0.0 +762 87 optimizer.lr 0.0033439081773616376 +762 87 negative_sampler.num_negs_per_pos 62.0 +762 87 training.batch_size 0.0 +762 88 model.embedding_dim 1.0 +762 88 model.relation_dim 2.0 +762 88 optimizer.lr 0.02627433206114209 +762 88 negative_sampler.num_negs_per_pos 24.0 +762 88 training.batch_size 0.0 +762 89 model.embedding_dim 2.0 +762 89 model.relation_dim 0.0 +762 89 optimizer.lr 0.003886202860684781 +762 89 negative_sampler.num_negs_per_pos 34.0 +762 89 training.batch_size 1.0 +762 90 model.embedding_dim 0.0 +762 90 model.relation_dim 2.0 +762 90 optimizer.lr 0.05939751732782327 +762 90 negative_sampler.num_negs_per_pos 49.0 +762 90 training.batch_size 1.0 +762 91 model.embedding_dim 1.0 +762 91 model.relation_dim 0.0 +762 91 optimizer.lr 0.0029251204468902637 +762 91 negative_sampler.num_negs_per_pos 37.0 +762 91 training.batch_size 2.0 +762 92 model.embedding_dim 0.0 +762 92 model.relation_dim 2.0 +762 92 optimizer.lr 0.05342676972427412 +762 92 negative_sampler.num_negs_per_pos 19.0 +762 92 training.batch_size 0.0 +762 93 model.embedding_dim 0.0 +762 93 model.relation_dim 2.0 +762 93 optimizer.lr 0.010163514875118067 +762 93 negative_sampler.num_negs_per_pos 99.0 +762 93 training.batch_size 0.0 +762 94 model.embedding_dim 0.0 +762 94 model.relation_dim 2.0 +762 94 optimizer.lr 0.002680068198403629 +762 94 negative_sampler.num_negs_per_pos 88.0 +762 94 training.batch_size 0.0 +762 95 model.embedding_dim 0.0 +762 95 model.relation_dim 1.0 +762 95 optimizer.lr 0.03114069829345108 +762 95 negative_sampler.num_negs_per_pos 9.0 +762 95 training.batch_size 1.0 +762 96 model.embedding_dim 0.0 +762 96 model.relation_dim 0.0 +762 96 optimizer.lr 0.00788528912909028 +762 96 negative_sampler.num_negs_per_pos 23.0 +762 96 training.batch_size 2.0 +762 97 model.embedding_dim 2.0 +762 97 model.relation_dim 1.0 +762 97 optimizer.lr 0.002953371845608875 +762 97 negative_sampler.num_negs_per_pos 82.0 +762 97 training.batch_size 2.0 +762 98 model.embedding_dim 2.0 +762 98 model.relation_dim 1.0 +762 98 optimizer.lr 0.00976229932790349 +762 98 negative_sampler.num_negs_per_pos 19.0 +762 98 training.batch_size 0.0 +762 99 model.embedding_dim 1.0 +762 99 model.relation_dim 1.0 +762 99 optimizer.lr 0.0019295984514096367 +762 99 negative_sampler.num_negs_per_pos 10.0 +762 99 training.batch_size 1.0 +762 100 model.embedding_dim 1.0 +762 100 model.relation_dim 1.0 +762 100 optimizer.lr 0.0044086162644812405 +762 100 negative_sampler.num_negs_per_pos 7.0 +762 100 training.batch_size 1.0 +762 1 dataset """kinships""" +762 1 model """transd""" +762 1 loss """softplus""" +762 1 regularizer """no""" +762 1 optimizer """adam""" +762 1 training_loop """owa""" +762 1 negative_sampler """basic""" +762 1 evaluator """rankbased""" +762 2 dataset """kinships""" +762 2 model """transd""" +762 2 loss """softplus""" +762 2 regularizer """no""" +762 2 optimizer """adam""" +762 2 training_loop """owa""" +762 2 negative_sampler """basic""" +762 2 evaluator """rankbased""" +762 3 dataset """kinships""" +762 3 model """transd""" +762 3 loss """softplus""" +762 3 regularizer """no""" +762 3 optimizer """adam""" +762 3 training_loop """owa""" +762 3 negative_sampler """basic""" +762 3 evaluator """rankbased""" +762 4 dataset """kinships""" +762 4 model """transd""" +762 4 loss """softplus""" +762 4 regularizer """no""" +762 4 optimizer """adam""" +762 4 training_loop """owa""" +762 4 negative_sampler """basic""" +762 4 evaluator """rankbased""" +762 5 dataset """kinships""" +762 5 model """transd""" +762 5 loss """softplus""" +762 5 regularizer """no""" +762 5 optimizer """adam""" +762 5 training_loop """owa""" +762 5 negative_sampler """basic""" +762 5 evaluator """rankbased""" +762 6 dataset """kinships""" +762 6 model """transd""" +762 6 loss """softplus""" +762 6 regularizer """no""" +762 6 optimizer """adam""" +762 6 training_loop """owa""" +762 6 negative_sampler """basic""" +762 6 evaluator """rankbased""" +762 7 dataset """kinships""" +762 7 model """transd""" +762 7 loss """softplus""" +762 7 regularizer """no""" +762 7 optimizer """adam""" +762 7 training_loop """owa""" +762 7 negative_sampler """basic""" +762 7 evaluator """rankbased""" +762 8 dataset """kinships""" +762 8 model """transd""" +762 8 loss """softplus""" +762 8 regularizer """no""" +762 8 optimizer """adam""" +762 8 training_loop """owa""" +762 8 negative_sampler """basic""" +762 8 evaluator """rankbased""" +762 9 dataset """kinships""" +762 9 model """transd""" +762 9 loss """softplus""" +762 9 regularizer """no""" +762 9 optimizer """adam""" +762 9 training_loop """owa""" +762 9 negative_sampler """basic""" +762 9 evaluator """rankbased""" +762 10 dataset """kinships""" +762 10 model """transd""" +762 10 loss """softplus""" +762 10 regularizer """no""" +762 10 optimizer """adam""" +762 10 training_loop """owa""" +762 10 negative_sampler """basic""" +762 10 evaluator """rankbased""" +762 11 dataset """kinships""" +762 11 model """transd""" +762 11 loss """softplus""" +762 11 regularizer """no""" +762 11 optimizer """adam""" +762 11 training_loop """owa""" +762 11 negative_sampler """basic""" +762 11 evaluator """rankbased""" +762 12 dataset """kinships""" +762 12 model """transd""" +762 12 loss """softplus""" +762 12 regularizer """no""" +762 12 optimizer """adam""" +762 12 training_loop """owa""" +762 12 negative_sampler """basic""" +762 12 evaluator """rankbased""" +762 13 dataset """kinships""" +762 13 model """transd""" +762 13 loss """softplus""" +762 13 regularizer """no""" +762 13 optimizer """adam""" +762 13 training_loop """owa""" +762 13 negative_sampler """basic""" +762 13 evaluator """rankbased""" +762 14 dataset """kinships""" +762 14 model """transd""" +762 14 loss """softplus""" +762 14 regularizer """no""" +762 14 optimizer """adam""" +762 14 training_loop """owa""" +762 14 negative_sampler """basic""" +762 14 evaluator """rankbased""" +762 15 dataset """kinships""" +762 15 model """transd""" +762 15 loss """softplus""" +762 15 regularizer """no""" +762 15 optimizer """adam""" +762 15 training_loop """owa""" +762 15 negative_sampler """basic""" +762 15 evaluator """rankbased""" +762 16 dataset """kinships""" +762 16 model """transd""" +762 16 loss """softplus""" +762 16 regularizer """no""" +762 16 optimizer """adam""" +762 16 training_loop """owa""" +762 16 negative_sampler """basic""" +762 16 evaluator """rankbased""" +762 17 dataset """kinships""" +762 17 model """transd""" +762 17 loss """softplus""" +762 17 regularizer """no""" +762 17 optimizer """adam""" +762 17 training_loop """owa""" +762 17 negative_sampler """basic""" +762 17 evaluator """rankbased""" +762 18 dataset """kinships""" +762 18 model """transd""" +762 18 loss """softplus""" +762 18 regularizer """no""" +762 18 optimizer """adam""" +762 18 training_loop """owa""" +762 18 negative_sampler """basic""" +762 18 evaluator """rankbased""" +762 19 dataset """kinships""" +762 19 model """transd""" +762 19 loss """softplus""" +762 19 regularizer """no""" +762 19 optimizer """adam""" +762 19 training_loop """owa""" +762 19 negative_sampler """basic""" +762 19 evaluator """rankbased""" +762 20 dataset """kinships""" +762 20 model """transd""" +762 20 loss """softplus""" +762 20 regularizer """no""" +762 20 optimizer """adam""" +762 20 training_loop """owa""" +762 20 negative_sampler """basic""" +762 20 evaluator """rankbased""" +762 21 dataset """kinships""" +762 21 model """transd""" +762 21 loss """softplus""" +762 21 regularizer """no""" +762 21 optimizer """adam""" +762 21 training_loop """owa""" +762 21 negative_sampler """basic""" +762 21 evaluator """rankbased""" +762 22 dataset """kinships""" +762 22 model """transd""" +762 22 loss """softplus""" +762 22 regularizer """no""" +762 22 optimizer """adam""" +762 22 training_loop """owa""" +762 22 negative_sampler """basic""" +762 22 evaluator """rankbased""" +762 23 dataset """kinships""" +762 23 model """transd""" +762 23 loss """softplus""" +762 23 regularizer """no""" +762 23 optimizer """adam""" +762 23 training_loop """owa""" +762 23 negative_sampler """basic""" +762 23 evaluator """rankbased""" +762 24 dataset """kinships""" +762 24 model """transd""" +762 24 loss """softplus""" +762 24 regularizer """no""" +762 24 optimizer """adam""" +762 24 training_loop """owa""" +762 24 negative_sampler """basic""" +762 24 evaluator """rankbased""" +762 25 dataset """kinships""" +762 25 model """transd""" +762 25 loss """softplus""" +762 25 regularizer """no""" +762 25 optimizer """adam""" +762 25 training_loop """owa""" +762 25 negative_sampler """basic""" +762 25 evaluator """rankbased""" +762 26 dataset """kinships""" +762 26 model """transd""" +762 26 loss """softplus""" +762 26 regularizer """no""" +762 26 optimizer """adam""" +762 26 training_loop """owa""" +762 26 negative_sampler """basic""" +762 26 evaluator """rankbased""" +762 27 dataset """kinships""" +762 27 model """transd""" +762 27 loss """softplus""" +762 27 regularizer """no""" +762 27 optimizer """adam""" +762 27 training_loop """owa""" +762 27 negative_sampler """basic""" +762 27 evaluator """rankbased""" +762 28 dataset """kinships""" +762 28 model """transd""" +762 28 loss """softplus""" +762 28 regularizer """no""" +762 28 optimizer """adam""" +762 28 training_loop """owa""" +762 28 negative_sampler """basic""" +762 28 evaluator """rankbased""" +762 29 dataset """kinships""" +762 29 model """transd""" +762 29 loss """softplus""" +762 29 regularizer """no""" +762 29 optimizer """adam""" +762 29 training_loop """owa""" +762 29 negative_sampler """basic""" +762 29 evaluator """rankbased""" +762 30 dataset """kinships""" +762 30 model """transd""" +762 30 loss """softplus""" +762 30 regularizer """no""" +762 30 optimizer """adam""" +762 30 training_loop """owa""" +762 30 negative_sampler """basic""" +762 30 evaluator """rankbased""" +762 31 dataset """kinships""" +762 31 model """transd""" +762 31 loss """softplus""" +762 31 regularizer """no""" +762 31 optimizer """adam""" +762 31 training_loop """owa""" +762 31 negative_sampler """basic""" +762 31 evaluator """rankbased""" +762 32 dataset """kinships""" +762 32 model """transd""" +762 32 loss """softplus""" +762 32 regularizer """no""" +762 32 optimizer """adam""" +762 32 training_loop """owa""" +762 32 negative_sampler """basic""" +762 32 evaluator """rankbased""" +762 33 dataset """kinships""" +762 33 model """transd""" +762 33 loss """softplus""" +762 33 regularizer """no""" +762 33 optimizer """adam""" +762 33 training_loop """owa""" +762 33 negative_sampler """basic""" +762 33 evaluator """rankbased""" +762 34 dataset """kinships""" +762 34 model """transd""" +762 34 loss """softplus""" +762 34 regularizer """no""" +762 34 optimizer """adam""" +762 34 training_loop """owa""" +762 34 negative_sampler """basic""" +762 34 evaluator """rankbased""" +762 35 dataset """kinships""" +762 35 model """transd""" +762 35 loss """softplus""" +762 35 regularizer """no""" +762 35 optimizer """adam""" +762 35 training_loop """owa""" +762 35 negative_sampler """basic""" +762 35 evaluator """rankbased""" +762 36 dataset """kinships""" +762 36 model """transd""" +762 36 loss """softplus""" +762 36 regularizer """no""" +762 36 optimizer """adam""" +762 36 training_loop """owa""" +762 36 negative_sampler """basic""" +762 36 evaluator """rankbased""" +762 37 dataset """kinships""" +762 37 model """transd""" +762 37 loss """softplus""" +762 37 regularizer """no""" +762 37 optimizer """adam""" +762 37 training_loop """owa""" +762 37 negative_sampler """basic""" +762 37 evaluator """rankbased""" +762 38 dataset """kinships""" +762 38 model """transd""" +762 38 loss """softplus""" +762 38 regularizer """no""" +762 38 optimizer """adam""" +762 38 training_loop """owa""" +762 38 negative_sampler """basic""" +762 38 evaluator """rankbased""" +762 39 dataset """kinships""" +762 39 model """transd""" +762 39 loss """softplus""" +762 39 regularizer """no""" +762 39 optimizer """adam""" +762 39 training_loop """owa""" +762 39 negative_sampler """basic""" +762 39 evaluator """rankbased""" +762 40 dataset """kinships""" +762 40 model """transd""" +762 40 loss """softplus""" +762 40 regularizer """no""" +762 40 optimizer """adam""" +762 40 training_loop """owa""" +762 40 negative_sampler """basic""" +762 40 evaluator """rankbased""" +762 41 dataset """kinships""" +762 41 model """transd""" +762 41 loss """softplus""" +762 41 regularizer """no""" +762 41 optimizer """adam""" +762 41 training_loop """owa""" +762 41 negative_sampler """basic""" +762 41 evaluator """rankbased""" +762 42 dataset """kinships""" +762 42 model """transd""" +762 42 loss """softplus""" +762 42 regularizer """no""" +762 42 optimizer """adam""" +762 42 training_loop """owa""" +762 42 negative_sampler """basic""" +762 42 evaluator """rankbased""" +762 43 dataset """kinships""" +762 43 model """transd""" +762 43 loss """softplus""" +762 43 regularizer """no""" +762 43 optimizer """adam""" +762 43 training_loop """owa""" +762 43 negative_sampler """basic""" +762 43 evaluator """rankbased""" +762 44 dataset """kinships""" +762 44 model """transd""" +762 44 loss """softplus""" +762 44 regularizer """no""" +762 44 optimizer """adam""" +762 44 training_loop """owa""" +762 44 negative_sampler """basic""" +762 44 evaluator """rankbased""" +762 45 dataset """kinships""" +762 45 model """transd""" +762 45 loss """softplus""" +762 45 regularizer """no""" +762 45 optimizer """adam""" +762 45 training_loop """owa""" +762 45 negative_sampler """basic""" +762 45 evaluator """rankbased""" +762 46 dataset """kinships""" +762 46 model """transd""" +762 46 loss """softplus""" +762 46 regularizer """no""" +762 46 optimizer """adam""" +762 46 training_loop """owa""" +762 46 negative_sampler """basic""" +762 46 evaluator """rankbased""" +762 47 dataset """kinships""" +762 47 model """transd""" +762 47 loss """softplus""" +762 47 regularizer """no""" +762 47 optimizer """adam""" +762 47 training_loop """owa""" +762 47 negative_sampler """basic""" +762 47 evaluator """rankbased""" +762 48 dataset """kinships""" +762 48 model """transd""" +762 48 loss """softplus""" +762 48 regularizer """no""" +762 48 optimizer """adam""" +762 48 training_loop """owa""" +762 48 negative_sampler """basic""" +762 48 evaluator """rankbased""" +762 49 dataset """kinships""" +762 49 model """transd""" +762 49 loss """softplus""" +762 49 regularizer """no""" +762 49 optimizer """adam""" +762 49 training_loop """owa""" +762 49 negative_sampler """basic""" +762 49 evaluator """rankbased""" +762 50 dataset """kinships""" +762 50 model """transd""" +762 50 loss """softplus""" +762 50 regularizer """no""" +762 50 optimizer """adam""" +762 50 training_loop """owa""" +762 50 negative_sampler """basic""" +762 50 evaluator """rankbased""" +762 51 dataset """kinships""" +762 51 model """transd""" +762 51 loss """softplus""" +762 51 regularizer """no""" +762 51 optimizer """adam""" +762 51 training_loop """owa""" +762 51 negative_sampler """basic""" +762 51 evaluator """rankbased""" +762 52 dataset """kinships""" +762 52 model """transd""" +762 52 loss """softplus""" +762 52 regularizer """no""" +762 52 optimizer """adam""" +762 52 training_loop """owa""" +762 52 negative_sampler """basic""" +762 52 evaluator """rankbased""" +762 53 dataset """kinships""" +762 53 model """transd""" +762 53 loss """softplus""" +762 53 regularizer """no""" +762 53 optimizer """adam""" +762 53 training_loop """owa""" +762 53 negative_sampler """basic""" +762 53 evaluator """rankbased""" +762 54 dataset """kinships""" +762 54 model """transd""" +762 54 loss """softplus""" +762 54 regularizer """no""" +762 54 optimizer """adam""" +762 54 training_loop """owa""" +762 54 negative_sampler """basic""" +762 54 evaluator """rankbased""" +762 55 dataset """kinships""" +762 55 model """transd""" +762 55 loss """softplus""" +762 55 regularizer """no""" +762 55 optimizer """adam""" +762 55 training_loop """owa""" +762 55 negative_sampler """basic""" +762 55 evaluator """rankbased""" +762 56 dataset """kinships""" +762 56 model """transd""" +762 56 loss """softplus""" +762 56 regularizer """no""" +762 56 optimizer """adam""" +762 56 training_loop """owa""" +762 56 negative_sampler """basic""" +762 56 evaluator """rankbased""" +762 57 dataset """kinships""" +762 57 model """transd""" +762 57 loss """softplus""" +762 57 regularizer """no""" +762 57 optimizer """adam""" +762 57 training_loop """owa""" +762 57 negative_sampler """basic""" +762 57 evaluator """rankbased""" +762 58 dataset """kinships""" +762 58 model """transd""" +762 58 loss """softplus""" +762 58 regularizer """no""" +762 58 optimizer """adam""" +762 58 training_loop """owa""" +762 58 negative_sampler """basic""" +762 58 evaluator """rankbased""" +762 59 dataset """kinships""" +762 59 model """transd""" +762 59 loss """softplus""" +762 59 regularizer """no""" +762 59 optimizer """adam""" +762 59 training_loop """owa""" +762 59 negative_sampler """basic""" +762 59 evaluator """rankbased""" +762 60 dataset """kinships""" +762 60 model """transd""" +762 60 loss """softplus""" +762 60 regularizer """no""" +762 60 optimizer """adam""" +762 60 training_loop """owa""" +762 60 negative_sampler """basic""" +762 60 evaluator """rankbased""" +762 61 dataset """kinships""" +762 61 model """transd""" +762 61 loss """softplus""" +762 61 regularizer """no""" +762 61 optimizer """adam""" +762 61 training_loop """owa""" +762 61 negative_sampler """basic""" +762 61 evaluator """rankbased""" +762 62 dataset """kinships""" +762 62 model """transd""" +762 62 loss """softplus""" +762 62 regularizer """no""" +762 62 optimizer """adam""" +762 62 training_loop """owa""" +762 62 negative_sampler """basic""" +762 62 evaluator """rankbased""" +762 63 dataset """kinships""" +762 63 model """transd""" +762 63 loss """softplus""" +762 63 regularizer """no""" +762 63 optimizer """adam""" +762 63 training_loop """owa""" +762 63 negative_sampler """basic""" +762 63 evaluator """rankbased""" +762 64 dataset """kinships""" +762 64 model """transd""" +762 64 loss """softplus""" +762 64 regularizer """no""" +762 64 optimizer """adam""" +762 64 training_loop """owa""" +762 64 negative_sampler """basic""" +762 64 evaluator """rankbased""" +762 65 dataset """kinships""" +762 65 model """transd""" +762 65 loss """softplus""" +762 65 regularizer """no""" +762 65 optimizer """adam""" +762 65 training_loop """owa""" +762 65 negative_sampler """basic""" +762 65 evaluator """rankbased""" +762 66 dataset """kinships""" +762 66 model """transd""" +762 66 loss """softplus""" +762 66 regularizer """no""" +762 66 optimizer """adam""" +762 66 training_loop """owa""" +762 66 negative_sampler """basic""" +762 66 evaluator """rankbased""" +762 67 dataset """kinships""" +762 67 model """transd""" +762 67 loss """softplus""" +762 67 regularizer """no""" +762 67 optimizer """adam""" +762 67 training_loop """owa""" +762 67 negative_sampler """basic""" +762 67 evaluator """rankbased""" +762 68 dataset """kinships""" +762 68 model """transd""" +762 68 loss """softplus""" +762 68 regularizer """no""" +762 68 optimizer """adam""" +762 68 training_loop """owa""" +762 68 negative_sampler """basic""" +762 68 evaluator """rankbased""" +762 69 dataset """kinships""" +762 69 model """transd""" +762 69 loss """softplus""" +762 69 regularizer """no""" +762 69 optimizer """adam""" +762 69 training_loop """owa""" +762 69 negative_sampler """basic""" +762 69 evaluator """rankbased""" +762 70 dataset """kinships""" +762 70 model """transd""" +762 70 loss """softplus""" +762 70 regularizer """no""" +762 70 optimizer """adam""" +762 70 training_loop """owa""" +762 70 negative_sampler """basic""" +762 70 evaluator """rankbased""" +762 71 dataset """kinships""" +762 71 model """transd""" +762 71 loss """softplus""" +762 71 regularizer """no""" +762 71 optimizer """adam""" +762 71 training_loop """owa""" +762 71 negative_sampler """basic""" +762 71 evaluator """rankbased""" +762 72 dataset """kinships""" +762 72 model """transd""" +762 72 loss """softplus""" +762 72 regularizer """no""" +762 72 optimizer """adam""" +762 72 training_loop """owa""" +762 72 negative_sampler """basic""" +762 72 evaluator """rankbased""" +762 73 dataset """kinships""" +762 73 model """transd""" +762 73 loss """softplus""" +762 73 regularizer """no""" +762 73 optimizer """adam""" +762 73 training_loop """owa""" +762 73 negative_sampler """basic""" +762 73 evaluator """rankbased""" +762 74 dataset """kinships""" +762 74 model """transd""" +762 74 loss """softplus""" +762 74 regularizer """no""" +762 74 optimizer """adam""" +762 74 training_loop """owa""" +762 74 negative_sampler """basic""" +762 74 evaluator """rankbased""" +762 75 dataset """kinships""" +762 75 model """transd""" +762 75 loss """softplus""" +762 75 regularizer """no""" +762 75 optimizer """adam""" +762 75 training_loop """owa""" +762 75 negative_sampler """basic""" +762 75 evaluator """rankbased""" +762 76 dataset """kinships""" +762 76 model """transd""" +762 76 loss """softplus""" +762 76 regularizer """no""" +762 76 optimizer """adam""" +762 76 training_loop """owa""" +762 76 negative_sampler """basic""" +762 76 evaluator """rankbased""" +762 77 dataset """kinships""" +762 77 model """transd""" +762 77 loss """softplus""" +762 77 regularizer """no""" +762 77 optimizer """adam""" +762 77 training_loop """owa""" +762 77 negative_sampler """basic""" +762 77 evaluator """rankbased""" +762 78 dataset """kinships""" +762 78 model """transd""" +762 78 loss """softplus""" +762 78 regularizer """no""" +762 78 optimizer """adam""" +762 78 training_loop """owa""" +762 78 negative_sampler """basic""" +762 78 evaluator """rankbased""" +762 79 dataset """kinships""" +762 79 model """transd""" +762 79 loss """softplus""" +762 79 regularizer """no""" +762 79 optimizer """adam""" +762 79 training_loop """owa""" +762 79 negative_sampler """basic""" +762 79 evaluator """rankbased""" +762 80 dataset """kinships""" +762 80 model """transd""" +762 80 loss """softplus""" +762 80 regularizer """no""" +762 80 optimizer """adam""" +762 80 training_loop """owa""" +762 80 negative_sampler """basic""" +762 80 evaluator """rankbased""" +762 81 dataset """kinships""" +762 81 model """transd""" +762 81 loss """softplus""" +762 81 regularizer """no""" +762 81 optimizer """adam""" +762 81 training_loop """owa""" +762 81 negative_sampler """basic""" +762 81 evaluator """rankbased""" +762 82 dataset """kinships""" +762 82 model """transd""" +762 82 loss """softplus""" +762 82 regularizer """no""" +762 82 optimizer """adam""" +762 82 training_loop """owa""" +762 82 negative_sampler """basic""" +762 82 evaluator """rankbased""" +762 83 dataset """kinships""" +762 83 model """transd""" +762 83 loss """softplus""" +762 83 regularizer """no""" +762 83 optimizer """adam""" +762 83 training_loop """owa""" +762 83 negative_sampler """basic""" +762 83 evaluator """rankbased""" +762 84 dataset """kinships""" +762 84 model """transd""" +762 84 loss """softplus""" +762 84 regularizer """no""" +762 84 optimizer """adam""" +762 84 training_loop """owa""" +762 84 negative_sampler """basic""" +762 84 evaluator """rankbased""" +762 85 dataset """kinships""" +762 85 model """transd""" +762 85 loss """softplus""" +762 85 regularizer """no""" +762 85 optimizer """adam""" +762 85 training_loop """owa""" +762 85 negative_sampler """basic""" +762 85 evaluator """rankbased""" +762 86 dataset """kinships""" +762 86 model """transd""" +762 86 loss """softplus""" +762 86 regularizer """no""" +762 86 optimizer """adam""" +762 86 training_loop """owa""" +762 86 negative_sampler """basic""" +762 86 evaluator """rankbased""" +762 87 dataset """kinships""" +762 87 model """transd""" +762 87 loss """softplus""" +762 87 regularizer """no""" +762 87 optimizer """adam""" +762 87 training_loop """owa""" +762 87 negative_sampler """basic""" +762 87 evaluator """rankbased""" +762 88 dataset """kinships""" +762 88 model """transd""" +762 88 loss """softplus""" +762 88 regularizer """no""" +762 88 optimizer """adam""" +762 88 training_loop """owa""" +762 88 negative_sampler """basic""" +762 88 evaluator """rankbased""" +762 89 dataset """kinships""" +762 89 model """transd""" +762 89 loss """softplus""" +762 89 regularizer """no""" +762 89 optimizer """adam""" +762 89 training_loop """owa""" +762 89 negative_sampler """basic""" +762 89 evaluator """rankbased""" +762 90 dataset """kinships""" +762 90 model """transd""" +762 90 loss """softplus""" +762 90 regularizer """no""" +762 90 optimizer """adam""" +762 90 training_loop """owa""" +762 90 negative_sampler """basic""" +762 90 evaluator """rankbased""" +762 91 dataset """kinships""" +762 91 model """transd""" +762 91 loss """softplus""" +762 91 regularizer """no""" +762 91 optimizer """adam""" +762 91 training_loop """owa""" +762 91 negative_sampler """basic""" +762 91 evaluator """rankbased""" +762 92 dataset """kinships""" +762 92 model """transd""" +762 92 loss """softplus""" +762 92 regularizer """no""" +762 92 optimizer """adam""" +762 92 training_loop """owa""" +762 92 negative_sampler """basic""" +762 92 evaluator """rankbased""" +762 93 dataset """kinships""" +762 93 model """transd""" +762 93 loss """softplus""" +762 93 regularizer """no""" +762 93 optimizer """adam""" +762 93 training_loop """owa""" +762 93 negative_sampler """basic""" +762 93 evaluator """rankbased""" +762 94 dataset """kinships""" +762 94 model """transd""" +762 94 loss """softplus""" +762 94 regularizer """no""" +762 94 optimizer """adam""" +762 94 training_loop """owa""" +762 94 negative_sampler """basic""" +762 94 evaluator """rankbased""" +762 95 dataset """kinships""" +762 95 model """transd""" +762 95 loss """softplus""" +762 95 regularizer """no""" +762 95 optimizer """adam""" +762 95 training_loop """owa""" +762 95 negative_sampler """basic""" +762 95 evaluator """rankbased""" +762 96 dataset """kinships""" +762 96 model """transd""" +762 96 loss """softplus""" +762 96 regularizer """no""" +762 96 optimizer """adam""" +762 96 training_loop """owa""" +762 96 negative_sampler """basic""" +762 96 evaluator """rankbased""" +762 97 dataset """kinships""" +762 97 model """transd""" +762 97 loss """softplus""" +762 97 regularizer """no""" +762 97 optimizer """adam""" +762 97 training_loop """owa""" +762 97 negative_sampler """basic""" +762 97 evaluator """rankbased""" +762 98 dataset """kinships""" +762 98 model """transd""" +762 98 loss """softplus""" +762 98 regularizer """no""" +762 98 optimizer """adam""" +762 98 training_loop """owa""" +762 98 negative_sampler """basic""" +762 98 evaluator """rankbased""" +762 99 dataset """kinships""" +762 99 model """transd""" +762 99 loss """softplus""" +762 99 regularizer """no""" +762 99 optimizer """adam""" +762 99 training_loop """owa""" +762 99 negative_sampler """basic""" +762 99 evaluator """rankbased""" +762 100 dataset """kinships""" +762 100 model """transd""" +762 100 loss """softplus""" +762 100 regularizer """no""" +762 100 optimizer """adam""" +762 100 training_loop """owa""" +762 100 negative_sampler """basic""" +762 100 evaluator """rankbased""" +763 1 model.embedding_dim 1.0 +763 1 model.relation_dim 1.0 +763 1 optimizer.lr 0.008191974684016591 +763 1 negative_sampler.num_negs_per_pos 0.0 +763 1 training.batch_size 2.0 +763 2 model.embedding_dim 0.0 +763 2 model.relation_dim 0.0 +763 2 optimizer.lr 0.012965000976639758 +763 2 negative_sampler.num_negs_per_pos 56.0 +763 2 training.batch_size 2.0 +763 3 model.embedding_dim 1.0 +763 3 model.relation_dim 0.0 +763 3 optimizer.lr 0.0011561563920157827 +763 3 negative_sampler.num_negs_per_pos 29.0 +763 3 training.batch_size 2.0 +763 4 model.embedding_dim 2.0 +763 4 model.relation_dim 0.0 +763 4 optimizer.lr 0.0022829780088269515 +763 4 negative_sampler.num_negs_per_pos 21.0 +763 4 training.batch_size 2.0 +763 5 model.embedding_dim 1.0 +763 5 model.relation_dim 1.0 +763 5 optimizer.lr 0.06739233488882813 +763 5 negative_sampler.num_negs_per_pos 23.0 +763 5 training.batch_size 2.0 +763 6 model.embedding_dim 0.0 +763 6 model.relation_dim 1.0 +763 6 optimizer.lr 0.07440257357942302 +763 6 negative_sampler.num_negs_per_pos 14.0 +763 6 training.batch_size 0.0 +763 7 model.embedding_dim 2.0 +763 7 model.relation_dim 0.0 +763 7 optimizer.lr 0.004218618773572807 +763 7 negative_sampler.num_negs_per_pos 47.0 +763 7 training.batch_size 2.0 +763 8 model.embedding_dim 0.0 +763 8 model.relation_dim 1.0 +763 8 optimizer.lr 0.021639371788206865 +763 8 negative_sampler.num_negs_per_pos 69.0 +763 8 training.batch_size 0.0 +763 9 model.embedding_dim 2.0 +763 9 model.relation_dim 2.0 +763 9 optimizer.lr 0.01238759869317226 +763 9 negative_sampler.num_negs_per_pos 62.0 +763 9 training.batch_size 2.0 +763 10 model.embedding_dim 0.0 +763 10 model.relation_dim 0.0 +763 10 optimizer.lr 0.0010649330847224896 +763 10 negative_sampler.num_negs_per_pos 40.0 +763 10 training.batch_size 1.0 +763 11 model.embedding_dim 0.0 +763 11 model.relation_dim 2.0 +763 11 optimizer.lr 0.0019848407175707015 +763 11 negative_sampler.num_negs_per_pos 12.0 +763 11 training.batch_size 1.0 +763 12 model.embedding_dim 2.0 +763 12 model.relation_dim 2.0 +763 12 optimizer.lr 0.003979480254718183 +763 12 negative_sampler.num_negs_per_pos 56.0 +763 12 training.batch_size 0.0 +763 13 model.embedding_dim 2.0 +763 13 model.relation_dim 2.0 +763 13 optimizer.lr 0.016700816196293475 +763 13 negative_sampler.num_negs_per_pos 22.0 +763 13 training.batch_size 1.0 +763 14 model.embedding_dim 0.0 +763 14 model.relation_dim 2.0 +763 14 optimizer.lr 0.003593291833469147 +763 14 negative_sampler.num_negs_per_pos 81.0 +763 14 training.batch_size 2.0 +763 15 model.embedding_dim 1.0 +763 15 model.relation_dim 2.0 +763 15 optimizer.lr 0.04283361354929792 +763 15 negative_sampler.num_negs_per_pos 45.0 +763 15 training.batch_size 1.0 +763 16 model.embedding_dim 0.0 +763 16 model.relation_dim 1.0 +763 16 optimizer.lr 0.04105850191045634 +763 16 negative_sampler.num_negs_per_pos 49.0 +763 16 training.batch_size 2.0 +763 17 model.embedding_dim 1.0 +763 17 model.relation_dim 2.0 +763 17 optimizer.lr 0.09101658604562943 +763 17 negative_sampler.num_negs_per_pos 45.0 +763 17 training.batch_size 1.0 +763 18 model.embedding_dim 2.0 +763 18 model.relation_dim 2.0 +763 18 optimizer.lr 0.01892587514042787 +763 18 negative_sampler.num_negs_per_pos 57.0 +763 18 training.batch_size 2.0 +763 19 model.embedding_dim 0.0 +763 19 model.relation_dim 1.0 +763 19 optimizer.lr 0.059642388148887074 +763 19 negative_sampler.num_negs_per_pos 20.0 +763 19 training.batch_size 0.0 +763 20 model.embedding_dim 0.0 +763 20 model.relation_dim 1.0 +763 20 optimizer.lr 0.0037668745604412988 +763 20 negative_sampler.num_negs_per_pos 59.0 +763 20 training.batch_size 2.0 +763 21 model.embedding_dim 0.0 +763 21 model.relation_dim 0.0 +763 21 optimizer.lr 0.045487227813123976 +763 21 negative_sampler.num_negs_per_pos 52.0 +763 21 training.batch_size 2.0 +763 22 model.embedding_dim 2.0 +763 22 model.relation_dim 1.0 +763 22 optimizer.lr 0.010452086099512979 +763 22 negative_sampler.num_negs_per_pos 20.0 +763 22 training.batch_size 2.0 +763 23 model.embedding_dim 1.0 +763 23 model.relation_dim 2.0 +763 23 optimizer.lr 0.013784647441696954 +763 23 negative_sampler.num_negs_per_pos 24.0 +763 23 training.batch_size 0.0 +763 24 model.embedding_dim 2.0 +763 24 model.relation_dim 1.0 +763 24 optimizer.lr 0.053768400272539374 +763 24 negative_sampler.num_negs_per_pos 96.0 +763 24 training.batch_size 0.0 +763 25 model.embedding_dim 1.0 +763 25 model.relation_dim 0.0 +763 25 optimizer.lr 0.05432256514254952 +763 25 negative_sampler.num_negs_per_pos 39.0 +763 25 training.batch_size 0.0 +763 26 model.embedding_dim 0.0 +763 26 model.relation_dim 0.0 +763 26 optimizer.lr 0.0038419833246398265 +763 26 negative_sampler.num_negs_per_pos 86.0 +763 26 training.batch_size 2.0 +763 27 model.embedding_dim 2.0 +763 27 model.relation_dim 2.0 +763 27 optimizer.lr 0.010316782346559372 +763 27 negative_sampler.num_negs_per_pos 16.0 +763 27 training.batch_size 1.0 +763 28 model.embedding_dim 0.0 +763 28 model.relation_dim 0.0 +763 28 optimizer.lr 0.009305569299439752 +763 28 negative_sampler.num_negs_per_pos 70.0 +763 28 training.batch_size 1.0 +763 29 model.embedding_dim 0.0 +763 29 model.relation_dim 1.0 +763 29 optimizer.lr 0.07331434747526715 +763 29 negative_sampler.num_negs_per_pos 43.0 +763 29 training.batch_size 1.0 +763 30 model.embedding_dim 1.0 +763 30 model.relation_dim 0.0 +763 30 optimizer.lr 0.0024027319753881053 +763 30 negative_sampler.num_negs_per_pos 69.0 +763 30 training.batch_size 1.0 +763 31 model.embedding_dim 0.0 +763 31 model.relation_dim 0.0 +763 31 optimizer.lr 0.049178168261553905 +763 31 negative_sampler.num_negs_per_pos 91.0 +763 31 training.batch_size 0.0 +763 32 model.embedding_dim 2.0 +763 32 model.relation_dim 1.0 +763 32 optimizer.lr 0.009265963432870011 +763 32 negative_sampler.num_negs_per_pos 89.0 +763 32 training.batch_size 2.0 +763 33 model.embedding_dim 2.0 +763 33 model.relation_dim 1.0 +763 33 optimizer.lr 0.06807624411897306 +763 33 negative_sampler.num_negs_per_pos 26.0 +763 33 training.batch_size 2.0 +763 34 model.embedding_dim 0.0 +763 34 model.relation_dim 1.0 +763 34 optimizer.lr 0.0026993807810316377 +763 34 negative_sampler.num_negs_per_pos 32.0 +763 34 training.batch_size 1.0 +763 35 model.embedding_dim 2.0 +763 35 model.relation_dim 1.0 +763 35 optimizer.lr 0.0673198787381456 +763 35 negative_sampler.num_negs_per_pos 73.0 +763 35 training.batch_size 0.0 +763 36 model.embedding_dim 0.0 +763 36 model.relation_dim 1.0 +763 36 optimizer.lr 0.012428239245165942 +763 36 negative_sampler.num_negs_per_pos 37.0 +763 36 training.batch_size 0.0 +763 37 model.embedding_dim 1.0 +763 37 model.relation_dim 0.0 +763 37 optimizer.lr 0.07521413185632518 +763 37 negative_sampler.num_negs_per_pos 50.0 +763 37 training.batch_size 1.0 +763 38 model.embedding_dim 0.0 +763 38 model.relation_dim 2.0 +763 38 optimizer.lr 0.020623829785513137 +763 38 negative_sampler.num_negs_per_pos 32.0 +763 38 training.batch_size 1.0 +763 39 model.embedding_dim 1.0 +763 39 model.relation_dim 2.0 +763 39 optimizer.lr 0.052159811601354265 +763 39 negative_sampler.num_negs_per_pos 75.0 +763 39 training.batch_size 1.0 +763 40 model.embedding_dim 2.0 +763 40 model.relation_dim 2.0 +763 40 optimizer.lr 0.0032954918220373893 +763 40 negative_sampler.num_negs_per_pos 0.0 +763 40 training.batch_size 2.0 +763 41 model.embedding_dim 2.0 +763 41 model.relation_dim 0.0 +763 41 optimizer.lr 0.006776272454905251 +763 41 negative_sampler.num_negs_per_pos 61.0 +763 41 training.batch_size 1.0 +763 42 model.embedding_dim 1.0 +763 42 model.relation_dim 0.0 +763 42 optimizer.lr 0.0022104778964484946 +763 42 negative_sampler.num_negs_per_pos 3.0 +763 42 training.batch_size 1.0 +763 43 model.embedding_dim 1.0 +763 43 model.relation_dim 2.0 +763 43 optimizer.lr 0.03949472275832109 +763 43 negative_sampler.num_negs_per_pos 50.0 +763 43 training.batch_size 0.0 +763 44 model.embedding_dim 0.0 +763 44 model.relation_dim 1.0 +763 44 optimizer.lr 0.015073343674863465 +763 44 negative_sampler.num_negs_per_pos 60.0 +763 44 training.batch_size 0.0 +763 45 model.embedding_dim 0.0 +763 45 model.relation_dim 2.0 +763 45 optimizer.lr 0.022663214968026986 +763 45 negative_sampler.num_negs_per_pos 47.0 +763 45 training.batch_size 1.0 +763 46 model.embedding_dim 0.0 +763 46 model.relation_dim 1.0 +763 46 optimizer.lr 0.007705899868463062 +763 46 negative_sampler.num_negs_per_pos 52.0 +763 46 training.batch_size 1.0 +763 47 model.embedding_dim 2.0 +763 47 model.relation_dim 1.0 +763 47 optimizer.lr 0.008512503516031654 +763 47 negative_sampler.num_negs_per_pos 14.0 +763 47 training.batch_size 0.0 +763 48 model.embedding_dim 0.0 +763 48 model.relation_dim 1.0 +763 48 optimizer.lr 0.002543797725927736 +763 48 negative_sampler.num_negs_per_pos 7.0 +763 48 training.batch_size 2.0 +763 49 model.embedding_dim 2.0 +763 49 model.relation_dim 0.0 +763 49 optimizer.lr 0.005281958658174918 +763 49 negative_sampler.num_negs_per_pos 0.0 +763 49 training.batch_size 1.0 +763 50 model.embedding_dim 1.0 +763 50 model.relation_dim 1.0 +763 50 optimizer.lr 0.016997728473199306 +763 50 negative_sampler.num_negs_per_pos 73.0 +763 50 training.batch_size 1.0 +763 51 model.embedding_dim 2.0 +763 51 model.relation_dim 2.0 +763 51 optimizer.lr 0.0792949421142277 +763 51 negative_sampler.num_negs_per_pos 54.0 +763 51 training.batch_size 1.0 +763 52 model.embedding_dim 1.0 +763 52 model.relation_dim 1.0 +763 52 optimizer.lr 0.0075330691609867915 +763 52 negative_sampler.num_negs_per_pos 1.0 +763 52 training.batch_size 1.0 +763 53 model.embedding_dim 1.0 +763 53 model.relation_dim 2.0 +763 53 optimizer.lr 0.00368985475709028 +763 53 negative_sampler.num_negs_per_pos 56.0 +763 53 training.batch_size 2.0 +763 54 model.embedding_dim 2.0 +763 54 model.relation_dim 1.0 +763 54 optimizer.lr 0.014694239966552678 +763 54 negative_sampler.num_negs_per_pos 27.0 +763 54 training.batch_size 1.0 +763 55 model.embedding_dim 1.0 +763 55 model.relation_dim 0.0 +763 55 optimizer.lr 0.043421145296355136 +763 55 negative_sampler.num_negs_per_pos 29.0 +763 55 training.batch_size 1.0 +763 56 model.embedding_dim 1.0 +763 56 model.relation_dim 2.0 +763 56 optimizer.lr 0.0015736217079024982 +763 56 negative_sampler.num_negs_per_pos 49.0 +763 56 training.batch_size 0.0 +763 57 model.embedding_dim 0.0 +763 57 model.relation_dim 2.0 +763 57 optimizer.lr 0.04750374879478881 +763 57 negative_sampler.num_negs_per_pos 91.0 +763 57 training.batch_size 0.0 +763 58 model.embedding_dim 1.0 +763 58 model.relation_dim 2.0 +763 58 optimizer.lr 0.014824593579862102 +763 58 negative_sampler.num_negs_per_pos 24.0 +763 58 training.batch_size 1.0 +763 59 model.embedding_dim 0.0 +763 59 model.relation_dim 1.0 +763 59 optimizer.lr 0.003884140526688362 +763 59 negative_sampler.num_negs_per_pos 4.0 +763 59 training.batch_size 2.0 +763 60 model.embedding_dim 0.0 +763 60 model.relation_dim 1.0 +763 60 optimizer.lr 0.0013248803909948152 +763 60 negative_sampler.num_negs_per_pos 67.0 +763 60 training.batch_size 1.0 +763 61 model.embedding_dim 0.0 +763 61 model.relation_dim 2.0 +763 61 optimizer.lr 0.00221298762762063 +763 61 negative_sampler.num_negs_per_pos 4.0 +763 61 training.batch_size 0.0 +763 62 model.embedding_dim 0.0 +763 62 model.relation_dim 2.0 +763 62 optimizer.lr 0.008615528502211728 +763 62 negative_sampler.num_negs_per_pos 8.0 +763 62 training.batch_size 0.0 +763 63 model.embedding_dim 2.0 +763 63 model.relation_dim 1.0 +763 63 optimizer.lr 0.0010798625667755339 +763 63 negative_sampler.num_negs_per_pos 52.0 +763 63 training.batch_size 1.0 +763 64 model.embedding_dim 2.0 +763 64 model.relation_dim 0.0 +763 64 optimizer.lr 0.007624637688524912 +763 64 negative_sampler.num_negs_per_pos 24.0 +763 64 training.batch_size 2.0 +763 65 model.embedding_dim 1.0 +763 65 model.relation_dim 0.0 +763 65 optimizer.lr 0.0011880521518241002 +763 65 negative_sampler.num_negs_per_pos 89.0 +763 65 training.batch_size 2.0 +763 66 model.embedding_dim 1.0 +763 66 model.relation_dim 1.0 +763 66 optimizer.lr 0.02454031351515309 +763 66 negative_sampler.num_negs_per_pos 64.0 +763 66 training.batch_size 0.0 +763 67 model.embedding_dim 2.0 +763 67 model.relation_dim 1.0 +763 67 optimizer.lr 0.003650529792335321 +763 67 negative_sampler.num_negs_per_pos 22.0 +763 67 training.batch_size 1.0 +763 68 model.embedding_dim 0.0 +763 68 model.relation_dim 2.0 +763 68 optimizer.lr 0.010036805305048522 +763 68 negative_sampler.num_negs_per_pos 32.0 +763 68 training.batch_size 1.0 +763 69 model.embedding_dim 1.0 +763 69 model.relation_dim 1.0 +763 69 optimizer.lr 0.006337806876384719 +763 69 negative_sampler.num_negs_per_pos 58.0 +763 69 training.batch_size 0.0 +763 70 model.embedding_dim 0.0 +763 70 model.relation_dim 2.0 +763 70 optimizer.lr 0.003106524980421596 +763 70 negative_sampler.num_negs_per_pos 59.0 +763 70 training.batch_size 2.0 +763 71 model.embedding_dim 1.0 +763 71 model.relation_dim 0.0 +763 71 optimizer.lr 0.001016753368142614 +763 71 negative_sampler.num_negs_per_pos 87.0 +763 71 training.batch_size 2.0 +763 72 model.embedding_dim 2.0 +763 72 model.relation_dim 1.0 +763 72 optimizer.lr 0.003082496981895894 +763 72 negative_sampler.num_negs_per_pos 30.0 +763 72 training.batch_size 0.0 +763 73 model.embedding_dim 2.0 +763 73 model.relation_dim 2.0 +763 73 optimizer.lr 0.03745524143993067 +763 73 negative_sampler.num_negs_per_pos 49.0 +763 73 training.batch_size 0.0 +763 74 model.embedding_dim 2.0 +763 74 model.relation_dim 2.0 +763 74 optimizer.lr 0.011029379765971958 +763 74 negative_sampler.num_negs_per_pos 99.0 +763 74 training.batch_size 0.0 +763 75 model.embedding_dim 2.0 +763 75 model.relation_dim 1.0 +763 75 optimizer.lr 0.01371906222064327 +763 75 negative_sampler.num_negs_per_pos 28.0 +763 75 training.batch_size 2.0 +763 76 model.embedding_dim 0.0 +763 76 model.relation_dim 0.0 +763 76 optimizer.lr 0.0029084657075324756 +763 76 negative_sampler.num_negs_per_pos 40.0 +763 76 training.batch_size 1.0 +763 77 model.embedding_dim 0.0 +763 77 model.relation_dim 2.0 +763 77 optimizer.lr 0.04026867356145444 +763 77 negative_sampler.num_negs_per_pos 95.0 +763 77 training.batch_size 1.0 +763 78 model.embedding_dim 1.0 +763 78 model.relation_dim 0.0 +763 78 optimizer.lr 0.0017719702202510597 +763 78 negative_sampler.num_negs_per_pos 12.0 +763 78 training.batch_size 2.0 +763 79 model.embedding_dim 0.0 +763 79 model.relation_dim 2.0 +763 79 optimizer.lr 0.003687243610772221 +763 79 negative_sampler.num_negs_per_pos 16.0 +763 79 training.batch_size 2.0 +763 80 model.embedding_dim 2.0 +763 80 model.relation_dim 0.0 +763 80 optimizer.lr 0.009960722294433379 +763 80 negative_sampler.num_negs_per_pos 55.0 +763 80 training.batch_size 2.0 +763 81 model.embedding_dim 2.0 +763 81 model.relation_dim 0.0 +763 81 optimizer.lr 0.006228525416907635 +763 81 negative_sampler.num_negs_per_pos 22.0 +763 81 training.batch_size 1.0 +763 82 model.embedding_dim 2.0 +763 82 model.relation_dim 1.0 +763 82 optimizer.lr 0.006790702014920462 +763 82 negative_sampler.num_negs_per_pos 10.0 +763 82 training.batch_size 2.0 +763 83 model.embedding_dim 2.0 +763 83 model.relation_dim 2.0 +763 83 optimizer.lr 0.022082992507422065 +763 83 negative_sampler.num_negs_per_pos 9.0 +763 83 training.batch_size 0.0 +763 84 model.embedding_dim 0.0 +763 84 model.relation_dim 2.0 +763 84 optimizer.lr 0.013056276067315535 +763 84 negative_sampler.num_negs_per_pos 93.0 +763 84 training.batch_size 0.0 +763 85 model.embedding_dim 2.0 +763 85 model.relation_dim 1.0 +763 85 optimizer.lr 0.004965593569025482 +763 85 negative_sampler.num_negs_per_pos 0.0 +763 85 training.batch_size 1.0 +763 86 model.embedding_dim 2.0 +763 86 model.relation_dim 1.0 +763 86 optimizer.lr 0.030094215610893506 +763 86 negative_sampler.num_negs_per_pos 37.0 +763 86 training.batch_size 1.0 +763 87 model.embedding_dim 2.0 +763 87 model.relation_dim 0.0 +763 87 optimizer.lr 0.004749329723634257 +763 87 negative_sampler.num_negs_per_pos 97.0 +763 87 training.batch_size 2.0 +763 88 model.embedding_dim 0.0 +763 88 model.relation_dim 1.0 +763 88 optimizer.lr 0.019390817590613438 +763 88 negative_sampler.num_negs_per_pos 13.0 +763 88 training.batch_size 2.0 +763 89 model.embedding_dim 0.0 +763 89 model.relation_dim 0.0 +763 89 optimizer.lr 0.08101263471471062 +763 89 negative_sampler.num_negs_per_pos 69.0 +763 89 training.batch_size 0.0 +763 90 model.embedding_dim 2.0 +763 90 model.relation_dim 0.0 +763 90 optimizer.lr 0.004079455621480968 +763 90 negative_sampler.num_negs_per_pos 57.0 +763 90 training.batch_size 2.0 +763 91 model.embedding_dim 0.0 +763 91 model.relation_dim 2.0 +763 91 optimizer.lr 0.013423076153008686 +763 91 negative_sampler.num_negs_per_pos 92.0 +763 91 training.batch_size 2.0 +763 92 model.embedding_dim 2.0 +763 92 model.relation_dim 0.0 +763 92 optimizer.lr 0.001255883368492047 +763 92 negative_sampler.num_negs_per_pos 83.0 +763 92 training.batch_size 0.0 +763 93 model.embedding_dim 1.0 +763 93 model.relation_dim 2.0 +763 93 optimizer.lr 0.028100824264551103 +763 93 negative_sampler.num_negs_per_pos 4.0 +763 93 training.batch_size 0.0 +763 94 model.embedding_dim 0.0 +763 94 model.relation_dim 0.0 +763 94 optimizer.lr 0.05648305809293452 +763 94 negative_sampler.num_negs_per_pos 82.0 +763 94 training.batch_size 0.0 +763 95 model.embedding_dim 2.0 +763 95 model.relation_dim 0.0 +763 95 optimizer.lr 0.001977937551786762 +763 95 negative_sampler.num_negs_per_pos 60.0 +763 95 training.batch_size 1.0 +763 96 model.embedding_dim 2.0 +763 96 model.relation_dim 1.0 +763 96 optimizer.lr 0.019372752689751385 +763 96 negative_sampler.num_negs_per_pos 64.0 +763 96 training.batch_size 2.0 +763 97 model.embedding_dim 1.0 +763 97 model.relation_dim 2.0 +763 97 optimizer.lr 0.004200203258711317 +763 97 negative_sampler.num_negs_per_pos 40.0 +763 97 training.batch_size 0.0 +763 98 model.embedding_dim 0.0 +763 98 model.relation_dim 1.0 +763 98 optimizer.lr 0.05495389858740534 +763 98 negative_sampler.num_negs_per_pos 48.0 +763 98 training.batch_size 1.0 +763 99 model.embedding_dim 1.0 +763 99 model.relation_dim 0.0 +763 99 optimizer.lr 0.0017137814105366873 +763 99 negative_sampler.num_negs_per_pos 42.0 +763 99 training.batch_size 1.0 +763 100 model.embedding_dim 2.0 +763 100 model.relation_dim 0.0 +763 100 optimizer.lr 0.002642539490259898 +763 100 negative_sampler.num_negs_per_pos 78.0 +763 100 training.batch_size 0.0 +763 1 dataset """kinships""" +763 1 model """transd""" +763 1 loss """bceaftersigmoid""" +763 1 regularizer """no""" +763 1 optimizer """adam""" +763 1 training_loop """owa""" +763 1 negative_sampler """basic""" +763 1 evaluator """rankbased""" +763 2 dataset """kinships""" +763 2 model """transd""" +763 2 loss """bceaftersigmoid""" +763 2 regularizer """no""" +763 2 optimizer """adam""" +763 2 training_loop """owa""" +763 2 negative_sampler """basic""" +763 2 evaluator """rankbased""" +763 3 dataset """kinships""" +763 3 model """transd""" +763 3 loss """bceaftersigmoid""" +763 3 regularizer """no""" +763 3 optimizer """adam""" +763 3 training_loop """owa""" +763 3 negative_sampler """basic""" +763 3 evaluator """rankbased""" +763 4 dataset """kinships""" +763 4 model """transd""" +763 4 loss """bceaftersigmoid""" +763 4 regularizer """no""" +763 4 optimizer """adam""" +763 4 training_loop """owa""" +763 4 negative_sampler """basic""" +763 4 evaluator """rankbased""" +763 5 dataset """kinships""" +763 5 model """transd""" +763 5 loss """bceaftersigmoid""" +763 5 regularizer """no""" +763 5 optimizer """adam""" +763 5 training_loop """owa""" +763 5 negative_sampler """basic""" +763 5 evaluator """rankbased""" +763 6 dataset """kinships""" +763 6 model """transd""" +763 6 loss """bceaftersigmoid""" +763 6 regularizer """no""" +763 6 optimizer """adam""" +763 6 training_loop """owa""" +763 6 negative_sampler """basic""" +763 6 evaluator """rankbased""" +763 7 dataset """kinships""" +763 7 model """transd""" +763 7 loss """bceaftersigmoid""" +763 7 regularizer """no""" +763 7 optimizer """adam""" +763 7 training_loop """owa""" +763 7 negative_sampler """basic""" +763 7 evaluator """rankbased""" +763 8 dataset """kinships""" +763 8 model """transd""" +763 8 loss """bceaftersigmoid""" +763 8 regularizer """no""" +763 8 optimizer """adam""" +763 8 training_loop """owa""" +763 8 negative_sampler """basic""" +763 8 evaluator """rankbased""" +763 9 dataset """kinships""" +763 9 model """transd""" +763 9 loss """bceaftersigmoid""" +763 9 regularizer """no""" +763 9 optimizer """adam""" +763 9 training_loop """owa""" +763 9 negative_sampler """basic""" +763 9 evaluator """rankbased""" +763 10 dataset """kinships""" +763 10 model """transd""" +763 10 loss """bceaftersigmoid""" +763 10 regularizer """no""" +763 10 optimizer """adam""" +763 10 training_loop """owa""" +763 10 negative_sampler """basic""" +763 10 evaluator """rankbased""" +763 11 dataset """kinships""" +763 11 model """transd""" +763 11 loss """bceaftersigmoid""" +763 11 regularizer """no""" +763 11 optimizer """adam""" +763 11 training_loop """owa""" +763 11 negative_sampler """basic""" +763 11 evaluator """rankbased""" +763 12 dataset """kinships""" +763 12 model """transd""" +763 12 loss """bceaftersigmoid""" +763 12 regularizer """no""" +763 12 optimizer """adam""" +763 12 training_loop """owa""" +763 12 negative_sampler """basic""" +763 12 evaluator """rankbased""" +763 13 dataset """kinships""" +763 13 model """transd""" +763 13 loss """bceaftersigmoid""" +763 13 regularizer """no""" +763 13 optimizer """adam""" +763 13 training_loop """owa""" +763 13 negative_sampler """basic""" +763 13 evaluator """rankbased""" +763 14 dataset """kinships""" +763 14 model """transd""" +763 14 loss """bceaftersigmoid""" +763 14 regularizer """no""" +763 14 optimizer """adam""" +763 14 training_loop """owa""" +763 14 negative_sampler """basic""" +763 14 evaluator """rankbased""" +763 15 dataset """kinships""" +763 15 model """transd""" +763 15 loss """bceaftersigmoid""" +763 15 regularizer """no""" +763 15 optimizer """adam""" +763 15 training_loop """owa""" +763 15 negative_sampler """basic""" +763 15 evaluator """rankbased""" +763 16 dataset """kinships""" +763 16 model """transd""" +763 16 loss """bceaftersigmoid""" +763 16 regularizer """no""" +763 16 optimizer """adam""" +763 16 training_loop """owa""" +763 16 negative_sampler """basic""" +763 16 evaluator """rankbased""" +763 17 dataset """kinships""" +763 17 model """transd""" +763 17 loss """bceaftersigmoid""" +763 17 regularizer """no""" +763 17 optimizer """adam""" +763 17 training_loop """owa""" +763 17 negative_sampler """basic""" +763 17 evaluator """rankbased""" +763 18 dataset """kinships""" +763 18 model """transd""" +763 18 loss """bceaftersigmoid""" +763 18 regularizer """no""" +763 18 optimizer """adam""" +763 18 training_loop """owa""" +763 18 negative_sampler """basic""" +763 18 evaluator """rankbased""" +763 19 dataset """kinships""" +763 19 model """transd""" +763 19 loss """bceaftersigmoid""" +763 19 regularizer """no""" +763 19 optimizer """adam""" +763 19 training_loop """owa""" +763 19 negative_sampler """basic""" +763 19 evaluator """rankbased""" +763 20 dataset """kinships""" +763 20 model """transd""" +763 20 loss """bceaftersigmoid""" +763 20 regularizer """no""" +763 20 optimizer """adam""" +763 20 training_loop """owa""" +763 20 negative_sampler """basic""" +763 20 evaluator """rankbased""" +763 21 dataset """kinships""" +763 21 model """transd""" +763 21 loss """bceaftersigmoid""" +763 21 regularizer """no""" +763 21 optimizer """adam""" +763 21 training_loop """owa""" +763 21 negative_sampler """basic""" +763 21 evaluator """rankbased""" +763 22 dataset """kinships""" +763 22 model """transd""" +763 22 loss """bceaftersigmoid""" +763 22 regularizer """no""" +763 22 optimizer """adam""" +763 22 training_loop """owa""" +763 22 negative_sampler """basic""" +763 22 evaluator """rankbased""" +763 23 dataset """kinships""" +763 23 model """transd""" +763 23 loss """bceaftersigmoid""" +763 23 regularizer """no""" +763 23 optimizer """adam""" +763 23 training_loop """owa""" +763 23 negative_sampler """basic""" +763 23 evaluator """rankbased""" +763 24 dataset """kinships""" +763 24 model """transd""" +763 24 loss """bceaftersigmoid""" +763 24 regularizer """no""" +763 24 optimizer """adam""" +763 24 training_loop """owa""" +763 24 negative_sampler """basic""" +763 24 evaluator """rankbased""" +763 25 dataset """kinships""" +763 25 model """transd""" +763 25 loss """bceaftersigmoid""" +763 25 regularizer """no""" +763 25 optimizer """adam""" +763 25 training_loop """owa""" +763 25 negative_sampler """basic""" +763 25 evaluator """rankbased""" +763 26 dataset """kinships""" +763 26 model """transd""" +763 26 loss """bceaftersigmoid""" +763 26 regularizer """no""" +763 26 optimizer """adam""" +763 26 training_loop """owa""" +763 26 negative_sampler """basic""" +763 26 evaluator """rankbased""" +763 27 dataset """kinships""" +763 27 model """transd""" +763 27 loss """bceaftersigmoid""" +763 27 regularizer """no""" +763 27 optimizer """adam""" +763 27 training_loop """owa""" +763 27 negative_sampler """basic""" +763 27 evaluator """rankbased""" +763 28 dataset """kinships""" +763 28 model """transd""" +763 28 loss """bceaftersigmoid""" +763 28 regularizer """no""" +763 28 optimizer """adam""" +763 28 training_loop """owa""" +763 28 negative_sampler """basic""" +763 28 evaluator """rankbased""" +763 29 dataset """kinships""" +763 29 model """transd""" +763 29 loss """bceaftersigmoid""" +763 29 regularizer """no""" +763 29 optimizer """adam""" +763 29 training_loop """owa""" +763 29 negative_sampler """basic""" +763 29 evaluator """rankbased""" +763 30 dataset """kinships""" +763 30 model """transd""" +763 30 loss """bceaftersigmoid""" +763 30 regularizer """no""" +763 30 optimizer """adam""" +763 30 training_loop """owa""" +763 30 negative_sampler """basic""" +763 30 evaluator """rankbased""" +763 31 dataset """kinships""" +763 31 model """transd""" +763 31 loss """bceaftersigmoid""" +763 31 regularizer """no""" +763 31 optimizer """adam""" +763 31 training_loop """owa""" +763 31 negative_sampler """basic""" +763 31 evaluator """rankbased""" +763 32 dataset """kinships""" +763 32 model """transd""" +763 32 loss """bceaftersigmoid""" +763 32 regularizer """no""" +763 32 optimizer """adam""" +763 32 training_loop """owa""" +763 32 negative_sampler """basic""" +763 32 evaluator """rankbased""" +763 33 dataset """kinships""" +763 33 model """transd""" +763 33 loss """bceaftersigmoid""" +763 33 regularizer """no""" +763 33 optimizer """adam""" +763 33 training_loop """owa""" +763 33 negative_sampler """basic""" +763 33 evaluator """rankbased""" +763 34 dataset """kinships""" +763 34 model """transd""" +763 34 loss """bceaftersigmoid""" +763 34 regularizer """no""" +763 34 optimizer """adam""" +763 34 training_loop """owa""" +763 34 negative_sampler """basic""" +763 34 evaluator """rankbased""" +763 35 dataset """kinships""" +763 35 model """transd""" +763 35 loss """bceaftersigmoid""" +763 35 regularizer """no""" +763 35 optimizer """adam""" +763 35 training_loop """owa""" +763 35 negative_sampler """basic""" +763 35 evaluator """rankbased""" +763 36 dataset """kinships""" +763 36 model """transd""" +763 36 loss """bceaftersigmoid""" +763 36 regularizer """no""" +763 36 optimizer """adam""" +763 36 training_loop """owa""" +763 36 negative_sampler """basic""" +763 36 evaluator """rankbased""" +763 37 dataset """kinships""" +763 37 model """transd""" +763 37 loss """bceaftersigmoid""" +763 37 regularizer """no""" +763 37 optimizer """adam""" +763 37 training_loop """owa""" +763 37 negative_sampler """basic""" +763 37 evaluator """rankbased""" +763 38 dataset """kinships""" +763 38 model """transd""" +763 38 loss """bceaftersigmoid""" +763 38 regularizer """no""" +763 38 optimizer """adam""" +763 38 training_loop """owa""" +763 38 negative_sampler """basic""" +763 38 evaluator """rankbased""" +763 39 dataset """kinships""" +763 39 model """transd""" +763 39 loss """bceaftersigmoid""" +763 39 regularizer """no""" +763 39 optimizer """adam""" +763 39 training_loop """owa""" +763 39 negative_sampler """basic""" +763 39 evaluator """rankbased""" +763 40 dataset """kinships""" +763 40 model """transd""" +763 40 loss """bceaftersigmoid""" +763 40 regularizer """no""" +763 40 optimizer """adam""" +763 40 training_loop """owa""" +763 40 negative_sampler """basic""" +763 40 evaluator """rankbased""" +763 41 dataset """kinships""" +763 41 model """transd""" +763 41 loss """bceaftersigmoid""" +763 41 regularizer """no""" +763 41 optimizer """adam""" +763 41 training_loop """owa""" +763 41 negative_sampler """basic""" +763 41 evaluator """rankbased""" +763 42 dataset """kinships""" +763 42 model """transd""" +763 42 loss """bceaftersigmoid""" +763 42 regularizer """no""" +763 42 optimizer """adam""" +763 42 training_loop """owa""" +763 42 negative_sampler """basic""" +763 42 evaluator """rankbased""" +763 43 dataset """kinships""" +763 43 model """transd""" +763 43 loss """bceaftersigmoid""" +763 43 regularizer """no""" +763 43 optimizer """adam""" +763 43 training_loop """owa""" +763 43 negative_sampler """basic""" +763 43 evaluator """rankbased""" +763 44 dataset """kinships""" +763 44 model """transd""" +763 44 loss """bceaftersigmoid""" +763 44 regularizer """no""" +763 44 optimizer """adam""" +763 44 training_loop """owa""" +763 44 negative_sampler """basic""" +763 44 evaluator """rankbased""" +763 45 dataset """kinships""" +763 45 model """transd""" +763 45 loss """bceaftersigmoid""" +763 45 regularizer """no""" +763 45 optimizer """adam""" +763 45 training_loop """owa""" +763 45 negative_sampler """basic""" +763 45 evaluator """rankbased""" +763 46 dataset """kinships""" +763 46 model """transd""" +763 46 loss """bceaftersigmoid""" +763 46 regularizer """no""" +763 46 optimizer """adam""" +763 46 training_loop """owa""" +763 46 negative_sampler """basic""" +763 46 evaluator """rankbased""" +763 47 dataset """kinships""" +763 47 model """transd""" +763 47 loss """bceaftersigmoid""" +763 47 regularizer """no""" +763 47 optimizer """adam""" +763 47 training_loop """owa""" +763 47 negative_sampler """basic""" +763 47 evaluator """rankbased""" +763 48 dataset """kinships""" +763 48 model """transd""" +763 48 loss """bceaftersigmoid""" +763 48 regularizer """no""" +763 48 optimizer """adam""" +763 48 training_loop """owa""" +763 48 negative_sampler """basic""" +763 48 evaluator """rankbased""" +763 49 dataset """kinships""" +763 49 model """transd""" +763 49 loss """bceaftersigmoid""" +763 49 regularizer """no""" +763 49 optimizer """adam""" +763 49 training_loop """owa""" +763 49 negative_sampler """basic""" +763 49 evaluator """rankbased""" +763 50 dataset """kinships""" +763 50 model """transd""" +763 50 loss """bceaftersigmoid""" +763 50 regularizer """no""" +763 50 optimizer """adam""" +763 50 training_loop """owa""" +763 50 negative_sampler """basic""" +763 50 evaluator """rankbased""" +763 51 dataset """kinships""" +763 51 model """transd""" +763 51 loss """bceaftersigmoid""" +763 51 regularizer """no""" +763 51 optimizer """adam""" +763 51 training_loop """owa""" +763 51 negative_sampler """basic""" +763 51 evaluator """rankbased""" +763 52 dataset """kinships""" +763 52 model """transd""" +763 52 loss """bceaftersigmoid""" +763 52 regularizer """no""" +763 52 optimizer """adam""" +763 52 training_loop """owa""" +763 52 negative_sampler """basic""" +763 52 evaluator """rankbased""" +763 53 dataset """kinships""" +763 53 model """transd""" +763 53 loss """bceaftersigmoid""" +763 53 regularizer """no""" +763 53 optimizer """adam""" +763 53 training_loop """owa""" +763 53 negative_sampler """basic""" +763 53 evaluator """rankbased""" +763 54 dataset """kinships""" +763 54 model """transd""" +763 54 loss """bceaftersigmoid""" +763 54 regularizer """no""" +763 54 optimizer """adam""" +763 54 training_loop """owa""" +763 54 negative_sampler """basic""" +763 54 evaluator """rankbased""" +763 55 dataset """kinships""" +763 55 model """transd""" +763 55 loss """bceaftersigmoid""" +763 55 regularizer """no""" +763 55 optimizer """adam""" +763 55 training_loop """owa""" +763 55 negative_sampler """basic""" +763 55 evaluator """rankbased""" +763 56 dataset """kinships""" +763 56 model """transd""" +763 56 loss """bceaftersigmoid""" +763 56 regularizer """no""" +763 56 optimizer """adam""" +763 56 training_loop """owa""" +763 56 negative_sampler """basic""" +763 56 evaluator """rankbased""" +763 57 dataset """kinships""" +763 57 model """transd""" +763 57 loss """bceaftersigmoid""" +763 57 regularizer """no""" +763 57 optimizer """adam""" +763 57 training_loop """owa""" +763 57 negative_sampler """basic""" +763 57 evaluator """rankbased""" +763 58 dataset """kinships""" +763 58 model """transd""" +763 58 loss """bceaftersigmoid""" +763 58 regularizer """no""" +763 58 optimizer """adam""" +763 58 training_loop """owa""" +763 58 negative_sampler """basic""" +763 58 evaluator """rankbased""" +763 59 dataset """kinships""" +763 59 model """transd""" +763 59 loss """bceaftersigmoid""" +763 59 regularizer """no""" +763 59 optimizer """adam""" +763 59 training_loop """owa""" +763 59 negative_sampler """basic""" +763 59 evaluator """rankbased""" +763 60 dataset """kinships""" +763 60 model """transd""" +763 60 loss """bceaftersigmoid""" +763 60 regularizer """no""" +763 60 optimizer """adam""" +763 60 training_loop """owa""" +763 60 negative_sampler """basic""" +763 60 evaluator """rankbased""" +763 61 dataset """kinships""" +763 61 model """transd""" +763 61 loss """bceaftersigmoid""" +763 61 regularizer """no""" +763 61 optimizer """adam""" +763 61 training_loop """owa""" +763 61 negative_sampler """basic""" +763 61 evaluator """rankbased""" +763 62 dataset """kinships""" +763 62 model """transd""" +763 62 loss """bceaftersigmoid""" +763 62 regularizer """no""" +763 62 optimizer """adam""" +763 62 training_loop """owa""" +763 62 negative_sampler """basic""" +763 62 evaluator """rankbased""" +763 63 dataset """kinships""" +763 63 model """transd""" +763 63 loss """bceaftersigmoid""" +763 63 regularizer """no""" +763 63 optimizer """adam""" +763 63 training_loop """owa""" +763 63 negative_sampler """basic""" +763 63 evaluator """rankbased""" +763 64 dataset """kinships""" +763 64 model """transd""" +763 64 loss """bceaftersigmoid""" +763 64 regularizer """no""" +763 64 optimizer """adam""" +763 64 training_loop """owa""" +763 64 negative_sampler """basic""" +763 64 evaluator """rankbased""" +763 65 dataset """kinships""" +763 65 model """transd""" +763 65 loss """bceaftersigmoid""" +763 65 regularizer """no""" +763 65 optimizer """adam""" +763 65 training_loop """owa""" +763 65 negative_sampler """basic""" +763 65 evaluator """rankbased""" +763 66 dataset """kinships""" +763 66 model """transd""" +763 66 loss """bceaftersigmoid""" +763 66 regularizer """no""" +763 66 optimizer """adam""" +763 66 training_loop """owa""" +763 66 negative_sampler """basic""" +763 66 evaluator """rankbased""" +763 67 dataset """kinships""" +763 67 model """transd""" +763 67 loss """bceaftersigmoid""" +763 67 regularizer """no""" +763 67 optimizer """adam""" +763 67 training_loop """owa""" +763 67 negative_sampler """basic""" +763 67 evaluator """rankbased""" +763 68 dataset """kinships""" +763 68 model """transd""" +763 68 loss """bceaftersigmoid""" +763 68 regularizer """no""" +763 68 optimizer """adam""" +763 68 training_loop """owa""" +763 68 negative_sampler """basic""" +763 68 evaluator """rankbased""" +763 69 dataset """kinships""" +763 69 model """transd""" +763 69 loss """bceaftersigmoid""" +763 69 regularizer """no""" +763 69 optimizer """adam""" +763 69 training_loop """owa""" +763 69 negative_sampler """basic""" +763 69 evaluator """rankbased""" +763 70 dataset """kinships""" +763 70 model """transd""" +763 70 loss """bceaftersigmoid""" +763 70 regularizer """no""" +763 70 optimizer """adam""" +763 70 training_loop """owa""" +763 70 negative_sampler """basic""" +763 70 evaluator """rankbased""" +763 71 dataset """kinships""" +763 71 model """transd""" +763 71 loss """bceaftersigmoid""" +763 71 regularizer """no""" +763 71 optimizer """adam""" +763 71 training_loop """owa""" +763 71 negative_sampler """basic""" +763 71 evaluator """rankbased""" +763 72 dataset """kinships""" +763 72 model """transd""" +763 72 loss """bceaftersigmoid""" +763 72 regularizer """no""" +763 72 optimizer """adam""" +763 72 training_loop """owa""" +763 72 negative_sampler """basic""" +763 72 evaluator """rankbased""" +763 73 dataset """kinships""" +763 73 model """transd""" +763 73 loss """bceaftersigmoid""" +763 73 regularizer """no""" +763 73 optimizer """adam""" +763 73 training_loop """owa""" +763 73 negative_sampler """basic""" +763 73 evaluator """rankbased""" +763 74 dataset """kinships""" +763 74 model """transd""" +763 74 loss """bceaftersigmoid""" +763 74 regularizer """no""" +763 74 optimizer """adam""" +763 74 training_loop """owa""" +763 74 negative_sampler """basic""" +763 74 evaluator """rankbased""" +763 75 dataset """kinships""" +763 75 model """transd""" +763 75 loss """bceaftersigmoid""" +763 75 regularizer """no""" +763 75 optimizer """adam""" +763 75 training_loop """owa""" +763 75 negative_sampler """basic""" +763 75 evaluator """rankbased""" +763 76 dataset """kinships""" +763 76 model """transd""" +763 76 loss """bceaftersigmoid""" +763 76 regularizer """no""" +763 76 optimizer """adam""" +763 76 training_loop """owa""" +763 76 negative_sampler """basic""" +763 76 evaluator """rankbased""" +763 77 dataset """kinships""" +763 77 model """transd""" +763 77 loss """bceaftersigmoid""" +763 77 regularizer """no""" +763 77 optimizer """adam""" +763 77 training_loop """owa""" +763 77 negative_sampler """basic""" +763 77 evaluator """rankbased""" +763 78 dataset """kinships""" +763 78 model """transd""" +763 78 loss """bceaftersigmoid""" +763 78 regularizer """no""" +763 78 optimizer """adam""" +763 78 training_loop """owa""" +763 78 negative_sampler """basic""" +763 78 evaluator """rankbased""" +763 79 dataset """kinships""" +763 79 model """transd""" +763 79 loss """bceaftersigmoid""" +763 79 regularizer """no""" +763 79 optimizer """adam""" +763 79 training_loop """owa""" +763 79 negative_sampler """basic""" +763 79 evaluator """rankbased""" +763 80 dataset """kinships""" +763 80 model """transd""" +763 80 loss """bceaftersigmoid""" +763 80 regularizer """no""" +763 80 optimizer """adam""" +763 80 training_loop """owa""" +763 80 negative_sampler """basic""" +763 80 evaluator """rankbased""" +763 81 dataset """kinships""" +763 81 model """transd""" +763 81 loss """bceaftersigmoid""" +763 81 regularizer """no""" +763 81 optimizer """adam""" +763 81 training_loop """owa""" +763 81 negative_sampler """basic""" +763 81 evaluator """rankbased""" +763 82 dataset """kinships""" +763 82 model """transd""" +763 82 loss """bceaftersigmoid""" +763 82 regularizer """no""" +763 82 optimizer """adam""" +763 82 training_loop """owa""" +763 82 negative_sampler """basic""" +763 82 evaluator """rankbased""" +763 83 dataset """kinships""" +763 83 model """transd""" +763 83 loss """bceaftersigmoid""" +763 83 regularizer """no""" +763 83 optimizer """adam""" +763 83 training_loop """owa""" +763 83 negative_sampler """basic""" +763 83 evaluator """rankbased""" +763 84 dataset """kinships""" +763 84 model """transd""" +763 84 loss """bceaftersigmoid""" +763 84 regularizer """no""" +763 84 optimizer """adam""" +763 84 training_loop """owa""" +763 84 negative_sampler """basic""" +763 84 evaluator """rankbased""" +763 85 dataset """kinships""" +763 85 model """transd""" +763 85 loss """bceaftersigmoid""" +763 85 regularizer """no""" +763 85 optimizer """adam""" +763 85 training_loop """owa""" +763 85 negative_sampler """basic""" +763 85 evaluator """rankbased""" +763 86 dataset """kinships""" +763 86 model """transd""" +763 86 loss """bceaftersigmoid""" +763 86 regularizer """no""" +763 86 optimizer """adam""" +763 86 training_loop """owa""" +763 86 negative_sampler """basic""" +763 86 evaluator """rankbased""" +763 87 dataset """kinships""" +763 87 model """transd""" +763 87 loss """bceaftersigmoid""" +763 87 regularizer """no""" +763 87 optimizer """adam""" +763 87 training_loop """owa""" +763 87 negative_sampler """basic""" +763 87 evaluator """rankbased""" +763 88 dataset """kinships""" +763 88 model """transd""" +763 88 loss """bceaftersigmoid""" +763 88 regularizer """no""" +763 88 optimizer """adam""" +763 88 training_loop """owa""" +763 88 negative_sampler """basic""" +763 88 evaluator """rankbased""" +763 89 dataset """kinships""" +763 89 model """transd""" +763 89 loss """bceaftersigmoid""" +763 89 regularizer """no""" +763 89 optimizer """adam""" +763 89 training_loop """owa""" +763 89 negative_sampler """basic""" +763 89 evaluator """rankbased""" +763 90 dataset """kinships""" +763 90 model """transd""" +763 90 loss """bceaftersigmoid""" +763 90 regularizer """no""" +763 90 optimizer """adam""" +763 90 training_loop """owa""" +763 90 negative_sampler """basic""" +763 90 evaluator """rankbased""" +763 91 dataset """kinships""" +763 91 model """transd""" +763 91 loss """bceaftersigmoid""" +763 91 regularizer """no""" +763 91 optimizer """adam""" +763 91 training_loop """owa""" +763 91 negative_sampler """basic""" +763 91 evaluator """rankbased""" +763 92 dataset """kinships""" +763 92 model """transd""" +763 92 loss """bceaftersigmoid""" +763 92 regularizer """no""" +763 92 optimizer """adam""" +763 92 training_loop """owa""" +763 92 negative_sampler """basic""" +763 92 evaluator """rankbased""" +763 93 dataset """kinships""" +763 93 model """transd""" +763 93 loss """bceaftersigmoid""" +763 93 regularizer """no""" +763 93 optimizer """adam""" +763 93 training_loop """owa""" +763 93 negative_sampler """basic""" +763 93 evaluator """rankbased""" +763 94 dataset """kinships""" +763 94 model """transd""" +763 94 loss """bceaftersigmoid""" +763 94 regularizer """no""" +763 94 optimizer """adam""" +763 94 training_loop """owa""" +763 94 negative_sampler """basic""" +763 94 evaluator """rankbased""" +763 95 dataset """kinships""" +763 95 model """transd""" +763 95 loss """bceaftersigmoid""" +763 95 regularizer """no""" +763 95 optimizer """adam""" +763 95 training_loop """owa""" +763 95 negative_sampler """basic""" +763 95 evaluator """rankbased""" +763 96 dataset """kinships""" +763 96 model """transd""" +763 96 loss """bceaftersigmoid""" +763 96 regularizer """no""" +763 96 optimizer """adam""" +763 96 training_loop """owa""" +763 96 negative_sampler """basic""" +763 96 evaluator """rankbased""" +763 97 dataset """kinships""" +763 97 model """transd""" +763 97 loss """bceaftersigmoid""" +763 97 regularizer """no""" +763 97 optimizer """adam""" +763 97 training_loop """owa""" +763 97 negative_sampler """basic""" +763 97 evaluator """rankbased""" +763 98 dataset """kinships""" +763 98 model """transd""" +763 98 loss """bceaftersigmoid""" +763 98 regularizer """no""" +763 98 optimizer """adam""" +763 98 training_loop """owa""" +763 98 negative_sampler """basic""" +763 98 evaluator """rankbased""" +763 99 dataset """kinships""" +763 99 model """transd""" +763 99 loss """bceaftersigmoid""" +763 99 regularizer """no""" +763 99 optimizer """adam""" +763 99 training_loop """owa""" +763 99 negative_sampler """basic""" +763 99 evaluator """rankbased""" +763 100 dataset """kinships""" +763 100 model """transd""" +763 100 loss """bceaftersigmoid""" +763 100 regularizer """no""" +763 100 optimizer """adam""" +763 100 training_loop """owa""" +763 100 negative_sampler """basic""" +763 100 evaluator """rankbased""" +764 1 model.embedding_dim 2.0 +764 1 model.relation_dim 0.0 +764 1 optimizer.lr 0.07251886084463387 +764 1 negative_sampler.num_negs_per_pos 77.0 +764 1 training.batch_size 1.0 +764 2 model.embedding_dim 1.0 +764 2 model.relation_dim 0.0 +764 2 optimizer.lr 0.014350148396171304 +764 2 negative_sampler.num_negs_per_pos 42.0 +764 2 training.batch_size 1.0 +764 3 model.embedding_dim 1.0 +764 3 model.relation_dim 0.0 +764 3 optimizer.lr 0.09985218091985065 +764 3 negative_sampler.num_negs_per_pos 77.0 +764 3 training.batch_size 2.0 +764 4 model.embedding_dim 2.0 +764 4 model.relation_dim 2.0 +764 4 optimizer.lr 0.0010352552878830688 +764 4 negative_sampler.num_negs_per_pos 15.0 +764 4 training.batch_size 1.0 +764 5 model.embedding_dim 2.0 +764 5 model.relation_dim 2.0 +764 5 optimizer.lr 0.08928527325995304 +764 5 negative_sampler.num_negs_per_pos 74.0 +764 5 training.batch_size 0.0 +764 6 model.embedding_dim 2.0 +764 6 model.relation_dim 1.0 +764 6 optimizer.lr 0.002278004088979192 +764 6 negative_sampler.num_negs_per_pos 33.0 +764 6 training.batch_size 0.0 +764 7 model.embedding_dim 2.0 +764 7 model.relation_dim 2.0 +764 7 optimizer.lr 0.0016770314883063065 +764 7 negative_sampler.num_negs_per_pos 48.0 +764 7 training.batch_size 2.0 +764 8 model.embedding_dim 0.0 +764 8 model.relation_dim 2.0 +764 8 optimizer.lr 0.002044023700765123 +764 8 negative_sampler.num_negs_per_pos 37.0 +764 8 training.batch_size 2.0 +764 9 model.embedding_dim 0.0 +764 9 model.relation_dim 2.0 +764 9 optimizer.lr 0.0010701159851913466 +764 9 negative_sampler.num_negs_per_pos 45.0 +764 9 training.batch_size 0.0 +764 10 model.embedding_dim 0.0 +764 10 model.relation_dim 2.0 +764 10 optimizer.lr 0.042416473398356636 +764 10 negative_sampler.num_negs_per_pos 38.0 +764 10 training.batch_size 2.0 +764 11 model.embedding_dim 2.0 +764 11 model.relation_dim 1.0 +764 11 optimizer.lr 0.0018715894968966218 +764 11 negative_sampler.num_negs_per_pos 74.0 +764 11 training.batch_size 0.0 +764 12 model.embedding_dim 1.0 +764 12 model.relation_dim 0.0 +764 12 optimizer.lr 0.07175582635430267 +764 12 negative_sampler.num_negs_per_pos 82.0 +764 12 training.batch_size 1.0 +764 13 model.embedding_dim 0.0 +764 13 model.relation_dim 1.0 +764 13 optimizer.lr 0.011854192140784342 +764 13 negative_sampler.num_negs_per_pos 65.0 +764 13 training.batch_size 1.0 +764 14 model.embedding_dim 2.0 +764 14 model.relation_dim 1.0 +764 14 optimizer.lr 0.039706904437590756 +764 14 negative_sampler.num_negs_per_pos 70.0 +764 14 training.batch_size 2.0 +764 15 model.embedding_dim 1.0 +764 15 model.relation_dim 2.0 +764 15 optimizer.lr 0.05626286489727147 +764 15 negative_sampler.num_negs_per_pos 59.0 +764 15 training.batch_size 0.0 +764 16 model.embedding_dim 1.0 +764 16 model.relation_dim 0.0 +764 16 optimizer.lr 0.00492151486461443 +764 16 negative_sampler.num_negs_per_pos 25.0 +764 16 training.batch_size 0.0 +764 17 model.embedding_dim 2.0 +764 17 model.relation_dim 0.0 +764 17 optimizer.lr 0.028581251722322037 +764 17 negative_sampler.num_negs_per_pos 5.0 +764 17 training.batch_size 2.0 +764 18 model.embedding_dim 1.0 +764 18 model.relation_dim 1.0 +764 18 optimizer.lr 0.0032964815352341447 +764 18 negative_sampler.num_negs_per_pos 30.0 +764 18 training.batch_size 0.0 +764 19 model.embedding_dim 2.0 +764 19 model.relation_dim 1.0 +764 19 optimizer.lr 0.022595191380272262 +764 19 negative_sampler.num_negs_per_pos 74.0 +764 19 training.batch_size 0.0 +764 20 model.embedding_dim 1.0 +764 20 model.relation_dim 1.0 +764 20 optimizer.lr 0.01515105377819195 +764 20 negative_sampler.num_negs_per_pos 98.0 +764 20 training.batch_size 1.0 +764 21 model.embedding_dim 1.0 +764 21 model.relation_dim 1.0 +764 21 optimizer.lr 0.0029982079467701186 +764 21 negative_sampler.num_negs_per_pos 72.0 +764 21 training.batch_size 0.0 +764 22 model.embedding_dim 1.0 +764 22 model.relation_dim 0.0 +764 22 optimizer.lr 0.007094905197841041 +764 22 negative_sampler.num_negs_per_pos 64.0 +764 22 training.batch_size 2.0 +764 23 model.embedding_dim 1.0 +764 23 model.relation_dim 1.0 +764 23 optimizer.lr 0.05226594661507914 +764 23 negative_sampler.num_negs_per_pos 67.0 +764 23 training.batch_size 2.0 +764 24 model.embedding_dim 2.0 +764 24 model.relation_dim 1.0 +764 24 optimizer.lr 0.0038672363982709445 +764 24 negative_sampler.num_negs_per_pos 1.0 +764 24 training.batch_size 0.0 +764 25 model.embedding_dim 1.0 +764 25 model.relation_dim 2.0 +764 25 optimizer.lr 0.00415286183247801 +764 25 negative_sampler.num_negs_per_pos 5.0 +764 25 training.batch_size 0.0 +764 26 model.embedding_dim 2.0 +764 26 model.relation_dim 1.0 +764 26 optimizer.lr 0.001112888069856387 +764 26 negative_sampler.num_negs_per_pos 23.0 +764 26 training.batch_size 1.0 +764 27 model.embedding_dim 1.0 +764 27 model.relation_dim 0.0 +764 27 optimizer.lr 0.011663741699246647 +764 27 negative_sampler.num_negs_per_pos 44.0 +764 27 training.batch_size 0.0 +764 28 model.embedding_dim 1.0 +764 28 model.relation_dim 0.0 +764 28 optimizer.lr 0.01567647120218431 +764 28 negative_sampler.num_negs_per_pos 93.0 +764 28 training.batch_size 0.0 +764 29 model.embedding_dim 0.0 +764 29 model.relation_dim 1.0 +764 29 optimizer.lr 0.002424631096863686 +764 29 negative_sampler.num_negs_per_pos 39.0 +764 29 training.batch_size 1.0 +764 30 model.embedding_dim 2.0 +764 30 model.relation_dim 1.0 +764 30 optimizer.lr 0.0013305997626400492 +764 30 negative_sampler.num_negs_per_pos 33.0 +764 30 training.batch_size 0.0 +764 31 model.embedding_dim 2.0 +764 31 model.relation_dim 2.0 +764 31 optimizer.lr 0.01743766322263446 +764 31 negative_sampler.num_negs_per_pos 46.0 +764 31 training.batch_size 2.0 +764 32 model.embedding_dim 2.0 +764 32 model.relation_dim 1.0 +764 32 optimizer.lr 0.0026529302323872244 +764 32 negative_sampler.num_negs_per_pos 90.0 +764 32 training.batch_size 2.0 +764 33 model.embedding_dim 1.0 +764 33 model.relation_dim 0.0 +764 33 optimizer.lr 0.005340293849886117 +764 33 negative_sampler.num_negs_per_pos 72.0 +764 33 training.batch_size 1.0 +764 34 model.embedding_dim 1.0 +764 34 model.relation_dim 1.0 +764 34 optimizer.lr 0.0015650757857382427 +764 34 negative_sampler.num_negs_per_pos 76.0 +764 34 training.batch_size 1.0 +764 35 model.embedding_dim 2.0 +764 35 model.relation_dim 0.0 +764 35 optimizer.lr 0.008367892544522177 +764 35 negative_sampler.num_negs_per_pos 88.0 +764 35 training.batch_size 0.0 +764 36 model.embedding_dim 2.0 +764 36 model.relation_dim 2.0 +764 36 optimizer.lr 0.010538387281478366 +764 36 negative_sampler.num_negs_per_pos 97.0 +764 36 training.batch_size 1.0 +764 37 model.embedding_dim 2.0 +764 37 model.relation_dim 2.0 +764 37 optimizer.lr 0.0012103252624467537 +764 37 negative_sampler.num_negs_per_pos 10.0 +764 37 training.batch_size 0.0 +764 38 model.embedding_dim 1.0 +764 38 model.relation_dim 1.0 +764 38 optimizer.lr 0.012610527741269336 +764 38 negative_sampler.num_negs_per_pos 4.0 +764 38 training.batch_size 2.0 +764 39 model.embedding_dim 0.0 +764 39 model.relation_dim 2.0 +764 39 optimizer.lr 0.008898320762467037 +764 39 negative_sampler.num_negs_per_pos 56.0 +764 39 training.batch_size 1.0 +764 40 model.embedding_dim 2.0 +764 40 model.relation_dim 1.0 +764 40 optimizer.lr 0.001030176270340188 +764 40 negative_sampler.num_negs_per_pos 81.0 +764 40 training.batch_size 0.0 +764 41 model.embedding_dim 0.0 +764 41 model.relation_dim 2.0 +764 41 optimizer.lr 0.003839522604125581 +764 41 negative_sampler.num_negs_per_pos 93.0 +764 41 training.batch_size 0.0 +764 42 model.embedding_dim 0.0 +764 42 model.relation_dim 1.0 +764 42 optimizer.lr 0.0020462321849641484 +764 42 negative_sampler.num_negs_per_pos 11.0 +764 42 training.batch_size 1.0 +764 43 model.embedding_dim 0.0 +764 43 model.relation_dim 1.0 +764 43 optimizer.lr 0.008696056170880912 +764 43 negative_sampler.num_negs_per_pos 69.0 +764 43 training.batch_size 1.0 +764 44 model.embedding_dim 2.0 +764 44 model.relation_dim 1.0 +764 44 optimizer.lr 0.0013747539330068686 +764 44 negative_sampler.num_negs_per_pos 1.0 +764 44 training.batch_size 2.0 +764 45 model.embedding_dim 2.0 +764 45 model.relation_dim 1.0 +764 45 optimizer.lr 0.0030110149688886813 +764 45 negative_sampler.num_negs_per_pos 8.0 +764 45 training.batch_size 2.0 +764 46 model.embedding_dim 1.0 +764 46 model.relation_dim 0.0 +764 46 optimizer.lr 0.009828724362009404 +764 46 negative_sampler.num_negs_per_pos 81.0 +764 46 training.batch_size 1.0 +764 47 model.embedding_dim 1.0 +764 47 model.relation_dim 2.0 +764 47 optimizer.lr 0.013033368107928817 +764 47 negative_sampler.num_negs_per_pos 28.0 +764 47 training.batch_size 0.0 +764 48 model.embedding_dim 1.0 +764 48 model.relation_dim 1.0 +764 48 optimizer.lr 0.016008793825774317 +764 48 negative_sampler.num_negs_per_pos 93.0 +764 48 training.batch_size 1.0 +764 49 model.embedding_dim 1.0 +764 49 model.relation_dim 1.0 +764 49 optimizer.lr 0.004616839161639486 +764 49 negative_sampler.num_negs_per_pos 35.0 +764 49 training.batch_size 0.0 +764 50 model.embedding_dim 1.0 +764 50 model.relation_dim 0.0 +764 50 optimizer.lr 0.016661187335602255 +764 50 negative_sampler.num_negs_per_pos 69.0 +764 50 training.batch_size 2.0 +764 51 model.embedding_dim 0.0 +764 51 model.relation_dim 2.0 +764 51 optimizer.lr 0.01931449600056349 +764 51 negative_sampler.num_negs_per_pos 60.0 +764 51 training.batch_size 1.0 +764 52 model.embedding_dim 1.0 +764 52 model.relation_dim 2.0 +764 52 optimizer.lr 0.0016982569639659097 +764 52 negative_sampler.num_negs_per_pos 31.0 +764 52 training.batch_size 2.0 +764 53 model.embedding_dim 2.0 +764 53 model.relation_dim 2.0 +764 53 optimizer.lr 0.001192515451665037 +764 53 negative_sampler.num_negs_per_pos 56.0 +764 53 training.batch_size 1.0 +764 54 model.embedding_dim 1.0 +764 54 model.relation_dim 1.0 +764 54 optimizer.lr 0.0024592280361681765 +764 54 negative_sampler.num_negs_per_pos 71.0 +764 54 training.batch_size 2.0 +764 55 model.embedding_dim 1.0 +764 55 model.relation_dim 2.0 +764 55 optimizer.lr 0.0036856988782131106 +764 55 negative_sampler.num_negs_per_pos 22.0 +764 55 training.batch_size 0.0 +764 56 model.embedding_dim 0.0 +764 56 model.relation_dim 0.0 +764 56 optimizer.lr 0.06788052102755046 +764 56 negative_sampler.num_negs_per_pos 24.0 +764 56 training.batch_size 2.0 +764 57 model.embedding_dim 2.0 +764 57 model.relation_dim 2.0 +764 57 optimizer.lr 0.04803483567977034 +764 57 negative_sampler.num_negs_per_pos 13.0 +764 57 training.batch_size 1.0 +764 58 model.embedding_dim 1.0 +764 58 model.relation_dim 1.0 +764 58 optimizer.lr 0.026917198639529825 +764 58 negative_sampler.num_negs_per_pos 7.0 +764 58 training.batch_size 0.0 +764 59 model.embedding_dim 2.0 +764 59 model.relation_dim 1.0 +764 59 optimizer.lr 0.02265354546944498 +764 59 negative_sampler.num_negs_per_pos 85.0 +764 59 training.batch_size 1.0 +764 60 model.embedding_dim 0.0 +764 60 model.relation_dim 1.0 +764 60 optimizer.lr 0.026933450078545362 +764 60 negative_sampler.num_negs_per_pos 14.0 +764 60 training.batch_size 2.0 +764 61 model.embedding_dim 1.0 +764 61 model.relation_dim 0.0 +764 61 optimizer.lr 0.03152051021192497 +764 61 negative_sampler.num_negs_per_pos 86.0 +764 61 training.batch_size 0.0 +764 62 model.embedding_dim 1.0 +764 62 model.relation_dim 1.0 +764 62 optimizer.lr 0.07313746900066694 +764 62 negative_sampler.num_negs_per_pos 22.0 +764 62 training.batch_size 1.0 +764 63 model.embedding_dim 0.0 +764 63 model.relation_dim 2.0 +764 63 optimizer.lr 0.0034717695181974103 +764 63 negative_sampler.num_negs_per_pos 96.0 +764 63 training.batch_size 1.0 +764 64 model.embedding_dim 2.0 +764 64 model.relation_dim 2.0 +764 64 optimizer.lr 0.001441552532009181 +764 64 negative_sampler.num_negs_per_pos 77.0 +764 64 training.batch_size 1.0 +764 65 model.embedding_dim 1.0 +764 65 model.relation_dim 2.0 +764 65 optimizer.lr 0.0049556928631912175 +764 65 negative_sampler.num_negs_per_pos 59.0 +764 65 training.batch_size 1.0 +764 66 model.embedding_dim 0.0 +764 66 model.relation_dim 2.0 +764 66 optimizer.lr 0.004307727443370012 +764 66 negative_sampler.num_negs_per_pos 92.0 +764 66 training.batch_size 2.0 +764 67 model.embedding_dim 2.0 +764 67 model.relation_dim 2.0 +764 67 optimizer.lr 0.0011290103060975496 +764 67 negative_sampler.num_negs_per_pos 57.0 +764 67 training.batch_size 2.0 +764 68 model.embedding_dim 0.0 +764 68 model.relation_dim 1.0 +764 68 optimizer.lr 0.01260393746811858 +764 68 negative_sampler.num_negs_per_pos 33.0 +764 68 training.batch_size 1.0 +764 69 model.embedding_dim 1.0 +764 69 model.relation_dim 2.0 +764 69 optimizer.lr 0.006285477165621848 +764 69 negative_sampler.num_negs_per_pos 30.0 +764 69 training.batch_size 1.0 +764 70 model.embedding_dim 0.0 +764 70 model.relation_dim 0.0 +764 70 optimizer.lr 0.07158250235843244 +764 70 negative_sampler.num_negs_per_pos 13.0 +764 70 training.batch_size 1.0 +764 71 model.embedding_dim 0.0 +764 71 model.relation_dim 2.0 +764 71 optimizer.lr 0.0353300336821324 +764 71 negative_sampler.num_negs_per_pos 70.0 +764 71 training.batch_size 2.0 +764 72 model.embedding_dim 0.0 +764 72 model.relation_dim 1.0 +764 72 optimizer.lr 0.02527733225905959 +764 72 negative_sampler.num_negs_per_pos 47.0 +764 72 training.batch_size 1.0 +764 73 model.embedding_dim 0.0 +764 73 model.relation_dim 0.0 +764 73 optimizer.lr 0.0027440864613085176 +764 73 negative_sampler.num_negs_per_pos 7.0 +764 73 training.batch_size 2.0 +764 74 model.embedding_dim 0.0 +764 74 model.relation_dim 2.0 +764 74 optimizer.lr 0.06084596702472777 +764 74 negative_sampler.num_negs_per_pos 87.0 +764 74 training.batch_size 2.0 +764 75 model.embedding_dim 2.0 +764 75 model.relation_dim 1.0 +764 75 optimizer.lr 0.003420207619926654 +764 75 negative_sampler.num_negs_per_pos 60.0 +764 75 training.batch_size 0.0 +764 76 model.embedding_dim 2.0 +764 76 model.relation_dim 1.0 +764 76 optimizer.lr 0.00885932343828682 +764 76 negative_sampler.num_negs_per_pos 46.0 +764 76 training.batch_size 2.0 +764 77 model.embedding_dim 1.0 +764 77 model.relation_dim 2.0 +764 77 optimizer.lr 0.008121583955946593 +764 77 negative_sampler.num_negs_per_pos 18.0 +764 77 training.batch_size 1.0 +764 78 model.embedding_dim 1.0 +764 78 model.relation_dim 2.0 +764 78 optimizer.lr 0.0013176147805486788 +764 78 negative_sampler.num_negs_per_pos 23.0 +764 78 training.batch_size 2.0 +764 79 model.embedding_dim 1.0 +764 79 model.relation_dim 2.0 +764 79 optimizer.lr 0.01314762895563906 +764 79 negative_sampler.num_negs_per_pos 96.0 +764 79 training.batch_size 1.0 +764 80 model.embedding_dim 0.0 +764 80 model.relation_dim 1.0 +764 80 optimizer.lr 0.007195725514876908 +764 80 negative_sampler.num_negs_per_pos 61.0 +764 80 training.batch_size 0.0 +764 81 model.embedding_dim 1.0 +764 81 model.relation_dim 2.0 +764 81 optimizer.lr 0.03765987200010963 +764 81 negative_sampler.num_negs_per_pos 12.0 +764 81 training.batch_size 2.0 +764 82 model.embedding_dim 1.0 +764 82 model.relation_dim 2.0 +764 82 optimizer.lr 0.0016789606195991476 +764 82 negative_sampler.num_negs_per_pos 95.0 +764 82 training.batch_size 2.0 +764 83 model.embedding_dim 1.0 +764 83 model.relation_dim 1.0 +764 83 optimizer.lr 0.005573514108164046 +764 83 negative_sampler.num_negs_per_pos 96.0 +764 83 training.batch_size 1.0 +764 84 model.embedding_dim 2.0 +764 84 model.relation_dim 0.0 +764 84 optimizer.lr 0.012288792714551336 +764 84 negative_sampler.num_negs_per_pos 26.0 +764 84 training.batch_size 1.0 +764 85 model.embedding_dim 2.0 +764 85 model.relation_dim 0.0 +764 85 optimizer.lr 0.028746916201710482 +764 85 negative_sampler.num_negs_per_pos 93.0 +764 85 training.batch_size 0.0 +764 86 model.embedding_dim 0.0 +764 86 model.relation_dim 2.0 +764 86 optimizer.lr 0.0042759223409901456 +764 86 negative_sampler.num_negs_per_pos 13.0 +764 86 training.batch_size 0.0 +764 87 model.embedding_dim 0.0 +764 87 model.relation_dim 2.0 +764 87 optimizer.lr 0.040604577046335855 +764 87 negative_sampler.num_negs_per_pos 66.0 +764 87 training.batch_size 0.0 +764 88 model.embedding_dim 1.0 +764 88 model.relation_dim 0.0 +764 88 optimizer.lr 0.008032291868739935 +764 88 negative_sampler.num_negs_per_pos 27.0 +764 88 training.batch_size 1.0 +764 89 model.embedding_dim 2.0 +764 89 model.relation_dim 0.0 +764 89 optimizer.lr 0.016128306741207132 +764 89 negative_sampler.num_negs_per_pos 15.0 +764 89 training.batch_size 2.0 +764 90 model.embedding_dim 1.0 +764 90 model.relation_dim 1.0 +764 90 optimizer.lr 0.01994067187049327 +764 90 negative_sampler.num_negs_per_pos 24.0 +764 90 training.batch_size 0.0 +764 91 model.embedding_dim 1.0 +764 91 model.relation_dim 2.0 +764 91 optimizer.lr 0.05648396812769976 +764 91 negative_sampler.num_negs_per_pos 41.0 +764 91 training.batch_size 1.0 +764 92 model.embedding_dim 0.0 +764 92 model.relation_dim 2.0 +764 92 optimizer.lr 0.0263465234187041 +764 92 negative_sampler.num_negs_per_pos 0.0 +764 92 training.batch_size 1.0 +764 93 model.embedding_dim 2.0 +764 93 model.relation_dim 2.0 +764 93 optimizer.lr 0.0014108899198374304 +764 93 negative_sampler.num_negs_per_pos 75.0 +764 93 training.batch_size 0.0 +764 94 model.embedding_dim 0.0 +764 94 model.relation_dim 2.0 +764 94 optimizer.lr 0.03142496817374605 +764 94 negative_sampler.num_negs_per_pos 80.0 +764 94 training.batch_size 1.0 +764 95 model.embedding_dim 2.0 +764 95 model.relation_dim 1.0 +764 95 optimizer.lr 0.009680968188451915 +764 95 negative_sampler.num_negs_per_pos 8.0 +764 95 training.batch_size 2.0 +764 96 model.embedding_dim 2.0 +764 96 model.relation_dim 2.0 +764 96 optimizer.lr 0.07694615650536896 +764 96 negative_sampler.num_negs_per_pos 1.0 +764 96 training.batch_size 0.0 +764 97 model.embedding_dim 0.0 +764 97 model.relation_dim 1.0 +764 97 optimizer.lr 0.0010752488848009313 +764 97 negative_sampler.num_negs_per_pos 23.0 +764 97 training.batch_size 0.0 +764 98 model.embedding_dim 0.0 +764 98 model.relation_dim 2.0 +764 98 optimizer.lr 0.04698671213428226 +764 98 negative_sampler.num_negs_per_pos 66.0 +764 98 training.batch_size 1.0 +764 99 model.embedding_dim 2.0 +764 99 model.relation_dim 0.0 +764 99 optimizer.lr 0.0016437344362933862 +764 99 negative_sampler.num_negs_per_pos 78.0 +764 99 training.batch_size 0.0 +764 100 model.embedding_dim 0.0 +764 100 model.relation_dim 0.0 +764 100 optimizer.lr 0.07388750421790388 +764 100 negative_sampler.num_negs_per_pos 89.0 +764 100 training.batch_size 1.0 +764 1 dataset """kinships""" +764 1 model """transd""" +764 1 loss """softplus""" +764 1 regularizer """no""" +764 1 optimizer """adam""" +764 1 training_loop """owa""" +764 1 negative_sampler """basic""" +764 1 evaluator """rankbased""" +764 2 dataset """kinships""" +764 2 model """transd""" +764 2 loss """softplus""" +764 2 regularizer """no""" +764 2 optimizer """adam""" +764 2 training_loop """owa""" +764 2 negative_sampler """basic""" +764 2 evaluator """rankbased""" +764 3 dataset """kinships""" +764 3 model """transd""" +764 3 loss """softplus""" +764 3 regularizer """no""" +764 3 optimizer """adam""" +764 3 training_loop """owa""" +764 3 negative_sampler """basic""" +764 3 evaluator """rankbased""" +764 4 dataset """kinships""" +764 4 model """transd""" +764 4 loss """softplus""" +764 4 regularizer """no""" +764 4 optimizer """adam""" +764 4 training_loop """owa""" +764 4 negative_sampler """basic""" +764 4 evaluator """rankbased""" +764 5 dataset """kinships""" +764 5 model """transd""" +764 5 loss """softplus""" +764 5 regularizer """no""" +764 5 optimizer """adam""" +764 5 training_loop """owa""" +764 5 negative_sampler """basic""" +764 5 evaluator """rankbased""" +764 6 dataset """kinships""" +764 6 model """transd""" +764 6 loss """softplus""" +764 6 regularizer """no""" +764 6 optimizer """adam""" +764 6 training_loop """owa""" +764 6 negative_sampler """basic""" +764 6 evaluator """rankbased""" +764 7 dataset """kinships""" +764 7 model """transd""" +764 7 loss """softplus""" +764 7 regularizer """no""" +764 7 optimizer """adam""" +764 7 training_loop """owa""" +764 7 negative_sampler """basic""" +764 7 evaluator """rankbased""" +764 8 dataset """kinships""" +764 8 model """transd""" +764 8 loss """softplus""" +764 8 regularizer """no""" +764 8 optimizer """adam""" +764 8 training_loop """owa""" +764 8 negative_sampler """basic""" +764 8 evaluator """rankbased""" +764 9 dataset """kinships""" +764 9 model """transd""" +764 9 loss """softplus""" +764 9 regularizer """no""" +764 9 optimizer """adam""" +764 9 training_loop """owa""" +764 9 negative_sampler """basic""" +764 9 evaluator """rankbased""" +764 10 dataset """kinships""" +764 10 model """transd""" +764 10 loss """softplus""" +764 10 regularizer """no""" +764 10 optimizer """adam""" +764 10 training_loop """owa""" +764 10 negative_sampler """basic""" +764 10 evaluator """rankbased""" +764 11 dataset """kinships""" +764 11 model """transd""" +764 11 loss """softplus""" +764 11 regularizer """no""" +764 11 optimizer """adam""" +764 11 training_loop """owa""" +764 11 negative_sampler """basic""" +764 11 evaluator """rankbased""" +764 12 dataset """kinships""" +764 12 model """transd""" +764 12 loss """softplus""" +764 12 regularizer """no""" +764 12 optimizer """adam""" +764 12 training_loop """owa""" +764 12 negative_sampler """basic""" +764 12 evaluator """rankbased""" +764 13 dataset """kinships""" +764 13 model """transd""" +764 13 loss """softplus""" +764 13 regularizer """no""" +764 13 optimizer """adam""" +764 13 training_loop """owa""" +764 13 negative_sampler """basic""" +764 13 evaluator """rankbased""" +764 14 dataset """kinships""" +764 14 model """transd""" +764 14 loss """softplus""" +764 14 regularizer """no""" +764 14 optimizer """adam""" +764 14 training_loop """owa""" +764 14 negative_sampler """basic""" +764 14 evaluator """rankbased""" +764 15 dataset """kinships""" +764 15 model """transd""" +764 15 loss """softplus""" +764 15 regularizer """no""" +764 15 optimizer """adam""" +764 15 training_loop """owa""" +764 15 negative_sampler """basic""" +764 15 evaluator """rankbased""" +764 16 dataset """kinships""" +764 16 model """transd""" +764 16 loss """softplus""" +764 16 regularizer """no""" +764 16 optimizer """adam""" +764 16 training_loop """owa""" +764 16 negative_sampler """basic""" +764 16 evaluator """rankbased""" +764 17 dataset """kinships""" +764 17 model """transd""" +764 17 loss """softplus""" +764 17 regularizer """no""" +764 17 optimizer """adam""" +764 17 training_loop """owa""" +764 17 negative_sampler """basic""" +764 17 evaluator """rankbased""" +764 18 dataset """kinships""" +764 18 model """transd""" +764 18 loss """softplus""" +764 18 regularizer """no""" +764 18 optimizer """adam""" +764 18 training_loop """owa""" +764 18 negative_sampler """basic""" +764 18 evaluator """rankbased""" +764 19 dataset """kinships""" +764 19 model """transd""" +764 19 loss """softplus""" +764 19 regularizer """no""" +764 19 optimizer """adam""" +764 19 training_loop """owa""" +764 19 negative_sampler """basic""" +764 19 evaluator """rankbased""" +764 20 dataset """kinships""" +764 20 model """transd""" +764 20 loss """softplus""" +764 20 regularizer """no""" +764 20 optimizer """adam""" +764 20 training_loop """owa""" +764 20 negative_sampler """basic""" +764 20 evaluator """rankbased""" +764 21 dataset """kinships""" +764 21 model """transd""" +764 21 loss """softplus""" +764 21 regularizer """no""" +764 21 optimizer """adam""" +764 21 training_loop """owa""" +764 21 negative_sampler """basic""" +764 21 evaluator """rankbased""" +764 22 dataset """kinships""" +764 22 model """transd""" +764 22 loss """softplus""" +764 22 regularizer """no""" +764 22 optimizer """adam""" +764 22 training_loop """owa""" +764 22 negative_sampler """basic""" +764 22 evaluator """rankbased""" +764 23 dataset """kinships""" +764 23 model """transd""" +764 23 loss """softplus""" +764 23 regularizer """no""" +764 23 optimizer """adam""" +764 23 training_loop """owa""" +764 23 negative_sampler """basic""" +764 23 evaluator """rankbased""" +764 24 dataset """kinships""" +764 24 model """transd""" +764 24 loss """softplus""" +764 24 regularizer """no""" +764 24 optimizer """adam""" +764 24 training_loop """owa""" +764 24 negative_sampler """basic""" +764 24 evaluator """rankbased""" +764 25 dataset """kinships""" +764 25 model """transd""" +764 25 loss """softplus""" +764 25 regularizer """no""" +764 25 optimizer """adam""" +764 25 training_loop """owa""" +764 25 negative_sampler """basic""" +764 25 evaluator """rankbased""" +764 26 dataset """kinships""" +764 26 model """transd""" +764 26 loss """softplus""" +764 26 regularizer """no""" +764 26 optimizer """adam""" +764 26 training_loop """owa""" +764 26 negative_sampler """basic""" +764 26 evaluator """rankbased""" +764 27 dataset """kinships""" +764 27 model """transd""" +764 27 loss """softplus""" +764 27 regularizer """no""" +764 27 optimizer """adam""" +764 27 training_loop """owa""" +764 27 negative_sampler """basic""" +764 27 evaluator """rankbased""" +764 28 dataset """kinships""" +764 28 model """transd""" +764 28 loss """softplus""" +764 28 regularizer """no""" +764 28 optimizer """adam""" +764 28 training_loop """owa""" +764 28 negative_sampler """basic""" +764 28 evaluator """rankbased""" +764 29 dataset """kinships""" +764 29 model """transd""" +764 29 loss """softplus""" +764 29 regularizer """no""" +764 29 optimizer """adam""" +764 29 training_loop """owa""" +764 29 negative_sampler """basic""" +764 29 evaluator """rankbased""" +764 30 dataset """kinships""" +764 30 model """transd""" +764 30 loss """softplus""" +764 30 regularizer """no""" +764 30 optimizer """adam""" +764 30 training_loop """owa""" +764 30 negative_sampler """basic""" +764 30 evaluator """rankbased""" +764 31 dataset """kinships""" +764 31 model """transd""" +764 31 loss """softplus""" +764 31 regularizer """no""" +764 31 optimizer """adam""" +764 31 training_loop """owa""" +764 31 negative_sampler """basic""" +764 31 evaluator """rankbased""" +764 32 dataset """kinships""" +764 32 model """transd""" +764 32 loss """softplus""" +764 32 regularizer """no""" +764 32 optimizer """adam""" +764 32 training_loop """owa""" +764 32 negative_sampler """basic""" +764 32 evaluator """rankbased""" +764 33 dataset """kinships""" +764 33 model """transd""" +764 33 loss """softplus""" +764 33 regularizer """no""" +764 33 optimizer """adam""" +764 33 training_loop """owa""" +764 33 negative_sampler """basic""" +764 33 evaluator """rankbased""" +764 34 dataset """kinships""" +764 34 model """transd""" +764 34 loss """softplus""" +764 34 regularizer """no""" +764 34 optimizer """adam""" +764 34 training_loop """owa""" +764 34 negative_sampler """basic""" +764 34 evaluator """rankbased""" +764 35 dataset """kinships""" +764 35 model """transd""" +764 35 loss """softplus""" +764 35 regularizer """no""" +764 35 optimizer """adam""" +764 35 training_loop """owa""" +764 35 negative_sampler """basic""" +764 35 evaluator """rankbased""" +764 36 dataset """kinships""" +764 36 model """transd""" +764 36 loss """softplus""" +764 36 regularizer """no""" +764 36 optimizer """adam""" +764 36 training_loop """owa""" +764 36 negative_sampler """basic""" +764 36 evaluator """rankbased""" +764 37 dataset """kinships""" +764 37 model """transd""" +764 37 loss """softplus""" +764 37 regularizer """no""" +764 37 optimizer """adam""" +764 37 training_loop """owa""" +764 37 negative_sampler """basic""" +764 37 evaluator """rankbased""" +764 38 dataset """kinships""" +764 38 model """transd""" +764 38 loss """softplus""" +764 38 regularizer """no""" +764 38 optimizer """adam""" +764 38 training_loop """owa""" +764 38 negative_sampler """basic""" +764 38 evaluator """rankbased""" +764 39 dataset """kinships""" +764 39 model """transd""" +764 39 loss """softplus""" +764 39 regularizer """no""" +764 39 optimizer """adam""" +764 39 training_loop """owa""" +764 39 negative_sampler """basic""" +764 39 evaluator """rankbased""" +764 40 dataset """kinships""" +764 40 model """transd""" +764 40 loss """softplus""" +764 40 regularizer """no""" +764 40 optimizer """adam""" +764 40 training_loop """owa""" +764 40 negative_sampler """basic""" +764 40 evaluator """rankbased""" +764 41 dataset """kinships""" +764 41 model """transd""" +764 41 loss """softplus""" +764 41 regularizer """no""" +764 41 optimizer """adam""" +764 41 training_loop """owa""" +764 41 negative_sampler """basic""" +764 41 evaluator """rankbased""" +764 42 dataset """kinships""" +764 42 model """transd""" +764 42 loss """softplus""" +764 42 regularizer """no""" +764 42 optimizer """adam""" +764 42 training_loop """owa""" +764 42 negative_sampler """basic""" +764 42 evaluator """rankbased""" +764 43 dataset """kinships""" +764 43 model """transd""" +764 43 loss """softplus""" +764 43 regularizer """no""" +764 43 optimizer """adam""" +764 43 training_loop """owa""" +764 43 negative_sampler """basic""" +764 43 evaluator """rankbased""" +764 44 dataset """kinships""" +764 44 model """transd""" +764 44 loss """softplus""" +764 44 regularizer """no""" +764 44 optimizer """adam""" +764 44 training_loop """owa""" +764 44 negative_sampler """basic""" +764 44 evaluator """rankbased""" +764 45 dataset """kinships""" +764 45 model """transd""" +764 45 loss """softplus""" +764 45 regularizer """no""" +764 45 optimizer """adam""" +764 45 training_loop """owa""" +764 45 negative_sampler """basic""" +764 45 evaluator """rankbased""" +764 46 dataset """kinships""" +764 46 model """transd""" +764 46 loss """softplus""" +764 46 regularizer """no""" +764 46 optimizer """adam""" +764 46 training_loop """owa""" +764 46 negative_sampler """basic""" +764 46 evaluator """rankbased""" +764 47 dataset """kinships""" +764 47 model """transd""" +764 47 loss """softplus""" +764 47 regularizer """no""" +764 47 optimizer """adam""" +764 47 training_loop """owa""" +764 47 negative_sampler """basic""" +764 47 evaluator """rankbased""" +764 48 dataset """kinships""" +764 48 model """transd""" +764 48 loss """softplus""" +764 48 regularizer """no""" +764 48 optimizer """adam""" +764 48 training_loop """owa""" +764 48 negative_sampler """basic""" +764 48 evaluator """rankbased""" +764 49 dataset """kinships""" +764 49 model """transd""" +764 49 loss """softplus""" +764 49 regularizer """no""" +764 49 optimizer """adam""" +764 49 training_loop """owa""" +764 49 negative_sampler """basic""" +764 49 evaluator """rankbased""" +764 50 dataset """kinships""" +764 50 model """transd""" +764 50 loss """softplus""" +764 50 regularizer """no""" +764 50 optimizer """adam""" +764 50 training_loop """owa""" +764 50 negative_sampler """basic""" +764 50 evaluator """rankbased""" +764 51 dataset """kinships""" +764 51 model """transd""" +764 51 loss """softplus""" +764 51 regularizer """no""" +764 51 optimizer """adam""" +764 51 training_loop """owa""" +764 51 negative_sampler """basic""" +764 51 evaluator """rankbased""" +764 52 dataset """kinships""" +764 52 model """transd""" +764 52 loss """softplus""" +764 52 regularizer """no""" +764 52 optimizer """adam""" +764 52 training_loop """owa""" +764 52 negative_sampler """basic""" +764 52 evaluator """rankbased""" +764 53 dataset """kinships""" +764 53 model """transd""" +764 53 loss """softplus""" +764 53 regularizer """no""" +764 53 optimizer """adam""" +764 53 training_loop """owa""" +764 53 negative_sampler """basic""" +764 53 evaluator """rankbased""" +764 54 dataset """kinships""" +764 54 model """transd""" +764 54 loss """softplus""" +764 54 regularizer """no""" +764 54 optimizer """adam""" +764 54 training_loop """owa""" +764 54 negative_sampler """basic""" +764 54 evaluator """rankbased""" +764 55 dataset """kinships""" +764 55 model """transd""" +764 55 loss """softplus""" +764 55 regularizer """no""" +764 55 optimizer """adam""" +764 55 training_loop """owa""" +764 55 negative_sampler """basic""" +764 55 evaluator """rankbased""" +764 56 dataset """kinships""" +764 56 model """transd""" +764 56 loss """softplus""" +764 56 regularizer """no""" +764 56 optimizer """adam""" +764 56 training_loop """owa""" +764 56 negative_sampler """basic""" +764 56 evaluator """rankbased""" +764 57 dataset """kinships""" +764 57 model """transd""" +764 57 loss """softplus""" +764 57 regularizer """no""" +764 57 optimizer """adam""" +764 57 training_loop """owa""" +764 57 negative_sampler """basic""" +764 57 evaluator """rankbased""" +764 58 dataset """kinships""" +764 58 model """transd""" +764 58 loss """softplus""" +764 58 regularizer """no""" +764 58 optimizer """adam""" +764 58 training_loop """owa""" +764 58 negative_sampler """basic""" +764 58 evaluator """rankbased""" +764 59 dataset """kinships""" +764 59 model """transd""" +764 59 loss """softplus""" +764 59 regularizer """no""" +764 59 optimizer """adam""" +764 59 training_loop """owa""" +764 59 negative_sampler """basic""" +764 59 evaluator """rankbased""" +764 60 dataset """kinships""" +764 60 model """transd""" +764 60 loss """softplus""" +764 60 regularizer """no""" +764 60 optimizer """adam""" +764 60 training_loop """owa""" +764 60 negative_sampler """basic""" +764 60 evaluator """rankbased""" +764 61 dataset """kinships""" +764 61 model """transd""" +764 61 loss """softplus""" +764 61 regularizer """no""" +764 61 optimizer """adam""" +764 61 training_loop """owa""" +764 61 negative_sampler """basic""" +764 61 evaluator """rankbased""" +764 62 dataset """kinships""" +764 62 model """transd""" +764 62 loss """softplus""" +764 62 regularizer """no""" +764 62 optimizer """adam""" +764 62 training_loop """owa""" +764 62 negative_sampler """basic""" +764 62 evaluator """rankbased""" +764 63 dataset """kinships""" +764 63 model """transd""" +764 63 loss """softplus""" +764 63 regularizer """no""" +764 63 optimizer """adam""" +764 63 training_loop """owa""" +764 63 negative_sampler """basic""" +764 63 evaluator """rankbased""" +764 64 dataset """kinships""" +764 64 model """transd""" +764 64 loss """softplus""" +764 64 regularizer """no""" +764 64 optimizer """adam""" +764 64 training_loop """owa""" +764 64 negative_sampler """basic""" +764 64 evaluator """rankbased""" +764 65 dataset """kinships""" +764 65 model """transd""" +764 65 loss """softplus""" +764 65 regularizer """no""" +764 65 optimizer """adam""" +764 65 training_loop """owa""" +764 65 negative_sampler """basic""" +764 65 evaluator """rankbased""" +764 66 dataset """kinships""" +764 66 model """transd""" +764 66 loss """softplus""" +764 66 regularizer """no""" +764 66 optimizer """adam""" +764 66 training_loop """owa""" +764 66 negative_sampler """basic""" +764 66 evaluator """rankbased""" +764 67 dataset """kinships""" +764 67 model """transd""" +764 67 loss """softplus""" +764 67 regularizer """no""" +764 67 optimizer """adam""" +764 67 training_loop """owa""" +764 67 negative_sampler """basic""" +764 67 evaluator """rankbased""" +764 68 dataset """kinships""" +764 68 model """transd""" +764 68 loss """softplus""" +764 68 regularizer """no""" +764 68 optimizer """adam""" +764 68 training_loop """owa""" +764 68 negative_sampler """basic""" +764 68 evaluator """rankbased""" +764 69 dataset """kinships""" +764 69 model """transd""" +764 69 loss """softplus""" +764 69 regularizer """no""" +764 69 optimizer """adam""" +764 69 training_loop """owa""" +764 69 negative_sampler """basic""" +764 69 evaluator """rankbased""" +764 70 dataset """kinships""" +764 70 model """transd""" +764 70 loss """softplus""" +764 70 regularizer """no""" +764 70 optimizer """adam""" +764 70 training_loop """owa""" +764 70 negative_sampler """basic""" +764 70 evaluator """rankbased""" +764 71 dataset """kinships""" +764 71 model """transd""" +764 71 loss """softplus""" +764 71 regularizer """no""" +764 71 optimizer """adam""" +764 71 training_loop """owa""" +764 71 negative_sampler """basic""" +764 71 evaluator """rankbased""" +764 72 dataset """kinships""" +764 72 model """transd""" +764 72 loss """softplus""" +764 72 regularizer """no""" +764 72 optimizer """adam""" +764 72 training_loop """owa""" +764 72 negative_sampler """basic""" +764 72 evaluator """rankbased""" +764 73 dataset """kinships""" +764 73 model """transd""" +764 73 loss """softplus""" +764 73 regularizer """no""" +764 73 optimizer """adam""" +764 73 training_loop """owa""" +764 73 negative_sampler """basic""" +764 73 evaluator """rankbased""" +764 74 dataset """kinships""" +764 74 model """transd""" +764 74 loss """softplus""" +764 74 regularizer """no""" +764 74 optimizer """adam""" +764 74 training_loop """owa""" +764 74 negative_sampler """basic""" +764 74 evaluator """rankbased""" +764 75 dataset """kinships""" +764 75 model """transd""" +764 75 loss """softplus""" +764 75 regularizer """no""" +764 75 optimizer """adam""" +764 75 training_loop """owa""" +764 75 negative_sampler """basic""" +764 75 evaluator """rankbased""" +764 76 dataset """kinships""" +764 76 model """transd""" +764 76 loss """softplus""" +764 76 regularizer """no""" +764 76 optimizer """adam""" +764 76 training_loop """owa""" +764 76 negative_sampler """basic""" +764 76 evaluator """rankbased""" +764 77 dataset """kinships""" +764 77 model """transd""" +764 77 loss """softplus""" +764 77 regularizer """no""" +764 77 optimizer """adam""" +764 77 training_loop """owa""" +764 77 negative_sampler """basic""" +764 77 evaluator """rankbased""" +764 78 dataset """kinships""" +764 78 model """transd""" +764 78 loss """softplus""" +764 78 regularizer """no""" +764 78 optimizer """adam""" +764 78 training_loop """owa""" +764 78 negative_sampler """basic""" +764 78 evaluator """rankbased""" +764 79 dataset """kinships""" +764 79 model """transd""" +764 79 loss """softplus""" +764 79 regularizer """no""" +764 79 optimizer """adam""" +764 79 training_loop """owa""" +764 79 negative_sampler """basic""" +764 79 evaluator """rankbased""" +764 80 dataset """kinships""" +764 80 model """transd""" +764 80 loss """softplus""" +764 80 regularizer """no""" +764 80 optimizer """adam""" +764 80 training_loop """owa""" +764 80 negative_sampler """basic""" +764 80 evaluator """rankbased""" +764 81 dataset """kinships""" +764 81 model """transd""" +764 81 loss """softplus""" +764 81 regularizer """no""" +764 81 optimizer """adam""" +764 81 training_loop """owa""" +764 81 negative_sampler """basic""" +764 81 evaluator """rankbased""" +764 82 dataset """kinships""" +764 82 model """transd""" +764 82 loss """softplus""" +764 82 regularizer """no""" +764 82 optimizer """adam""" +764 82 training_loop """owa""" +764 82 negative_sampler """basic""" +764 82 evaluator """rankbased""" +764 83 dataset """kinships""" +764 83 model """transd""" +764 83 loss """softplus""" +764 83 regularizer """no""" +764 83 optimizer """adam""" +764 83 training_loop """owa""" +764 83 negative_sampler """basic""" +764 83 evaluator """rankbased""" +764 84 dataset """kinships""" +764 84 model """transd""" +764 84 loss """softplus""" +764 84 regularizer """no""" +764 84 optimizer """adam""" +764 84 training_loop """owa""" +764 84 negative_sampler """basic""" +764 84 evaluator """rankbased""" +764 85 dataset """kinships""" +764 85 model """transd""" +764 85 loss """softplus""" +764 85 regularizer """no""" +764 85 optimizer """adam""" +764 85 training_loop """owa""" +764 85 negative_sampler """basic""" +764 85 evaluator """rankbased""" +764 86 dataset """kinships""" +764 86 model """transd""" +764 86 loss """softplus""" +764 86 regularizer """no""" +764 86 optimizer """adam""" +764 86 training_loop """owa""" +764 86 negative_sampler """basic""" +764 86 evaluator """rankbased""" +764 87 dataset """kinships""" +764 87 model """transd""" +764 87 loss """softplus""" +764 87 regularizer """no""" +764 87 optimizer """adam""" +764 87 training_loop """owa""" +764 87 negative_sampler """basic""" +764 87 evaluator """rankbased""" +764 88 dataset """kinships""" +764 88 model """transd""" +764 88 loss """softplus""" +764 88 regularizer """no""" +764 88 optimizer """adam""" +764 88 training_loop """owa""" +764 88 negative_sampler """basic""" +764 88 evaluator """rankbased""" +764 89 dataset """kinships""" +764 89 model """transd""" +764 89 loss """softplus""" +764 89 regularizer """no""" +764 89 optimizer """adam""" +764 89 training_loop """owa""" +764 89 negative_sampler """basic""" +764 89 evaluator """rankbased""" +764 90 dataset """kinships""" +764 90 model """transd""" +764 90 loss """softplus""" +764 90 regularizer """no""" +764 90 optimizer """adam""" +764 90 training_loop """owa""" +764 90 negative_sampler """basic""" +764 90 evaluator """rankbased""" +764 91 dataset """kinships""" +764 91 model """transd""" +764 91 loss """softplus""" +764 91 regularizer """no""" +764 91 optimizer """adam""" +764 91 training_loop """owa""" +764 91 negative_sampler """basic""" +764 91 evaluator """rankbased""" +764 92 dataset """kinships""" +764 92 model """transd""" +764 92 loss """softplus""" +764 92 regularizer """no""" +764 92 optimizer """adam""" +764 92 training_loop """owa""" +764 92 negative_sampler """basic""" +764 92 evaluator """rankbased""" +764 93 dataset """kinships""" +764 93 model """transd""" +764 93 loss """softplus""" +764 93 regularizer """no""" +764 93 optimizer """adam""" +764 93 training_loop """owa""" +764 93 negative_sampler """basic""" +764 93 evaluator """rankbased""" +764 94 dataset """kinships""" +764 94 model """transd""" +764 94 loss """softplus""" +764 94 regularizer """no""" +764 94 optimizer """adam""" +764 94 training_loop """owa""" +764 94 negative_sampler """basic""" +764 94 evaluator """rankbased""" +764 95 dataset """kinships""" +764 95 model """transd""" +764 95 loss """softplus""" +764 95 regularizer """no""" +764 95 optimizer """adam""" +764 95 training_loop """owa""" +764 95 negative_sampler """basic""" +764 95 evaluator """rankbased""" +764 96 dataset """kinships""" +764 96 model """transd""" +764 96 loss """softplus""" +764 96 regularizer """no""" +764 96 optimizer """adam""" +764 96 training_loop """owa""" +764 96 negative_sampler """basic""" +764 96 evaluator """rankbased""" +764 97 dataset """kinships""" +764 97 model """transd""" +764 97 loss """softplus""" +764 97 regularizer """no""" +764 97 optimizer """adam""" +764 97 training_loop """owa""" +764 97 negative_sampler """basic""" +764 97 evaluator """rankbased""" +764 98 dataset """kinships""" +764 98 model """transd""" +764 98 loss """softplus""" +764 98 regularizer """no""" +764 98 optimizer """adam""" +764 98 training_loop """owa""" +764 98 negative_sampler """basic""" +764 98 evaluator """rankbased""" +764 99 dataset """kinships""" +764 99 model """transd""" +764 99 loss """softplus""" +764 99 regularizer """no""" +764 99 optimizer """adam""" +764 99 training_loop """owa""" +764 99 negative_sampler """basic""" +764 99 evaluator """rankbased""" +764 100 dataset """kinships""" +764 100 model """transd""" +764 100 loss """softplus""" +764 100 regularizer """no""" +764 100 optimizer """adam""" +764 100 training_loop """owa""" +764 100 negative_sampler """basic""" +764 100 evaluator """rankbased""" +765 1 model.embedding_dim 0.0 +765 1 model.relation_dim 1.0 +765 1 loss.margin 6.844649192731517 +765 1 optimizer.lr 0.001777452331666547 +765 1 negative_sampler.num_negs_per_pos 10.0 +765 1 training.batch_size 2.0 +765 2 model.embedding_dim 0.0 +765 2 model.relation_dim 0.0 +765 2 loss.margin 2.8845224357335724 +765 2 optimizer.lr 0.002499671057077136 +765 2 negative_sampler.num_negs_per_pos 6.0 +765 2 training.batch_size 1.0 +765 3 model.embedding_dim 0.0 +765 3 model.relation_dim 0.0 +765 3 loss.margin 7.3241112024637545 +765 3 optimizer.lr 0.0011399888397701605 +765 3 negative_sampler.num_negs_per_pos 67.0 +765 3 training.batch_size 2.0 +765 4 model.embedding_dim 0.0 +765 4 model.relation_dim 2.0 +765 4 loss.margin 4.951647177974523 +765 4 optimizer.lr 0.0016725243450575356 +765 4 negative_sampler.num_negs_per_pos 29.0 +765 4 training.batch_size 2.0 +765 5 model.embedding_dim 0.0 +765 5 model.relation_dim 2.0 +765 5 loss.margin 2.0977188631929904 +765 5 optimizer.lr 0.07780999135652207 +765 5 negative_sampler.num_negs_per_pos 36.0 +765 5 training.batch_size 2.0 +765 6 model.embedding_dim 1.0 +765 6 model.relation_dim 0.0 +765 6 loss.margin 8.299218814532502 +765 6 optimizer.lr 0.02296008086814717 +765 6 negative_sampler.num_negs_per_pos 91.0 +765 6 training.batch_size 1.0 +765 7 model.embedding_dim 1.0 +765 7 model.relation_dim 1.0 +765 7 loss.margin 4.570585777653123 +765 7 optimizer.lr 0.001621636327294477 +765 7 negative_sampler.num_negs_per_pos 22.0 +765 7 training.batch_size 2.0 +765 8 model.embedding_dim 1.0 +765 8 model.relation_dim 1.0 +765 8 loss.margin 7.2026435903374315 +765 8 optimizer.lr 0.032760604145708354 +765 8 negative_sampler.num_negs_per_pos 81.0 +765 8 training.batch_size 1.0 +765 9 model.embedding_dim 1.0 +765 9 model.relation_dim 0.0 +765 9 loss.margin 8.73202850604812 +765 9 optimizer.lr 0.0021589730538839357 +765 9 negative_sampler.num_negs_per_pos 3.0 +765 9 training.batch_size 1.0 +765 10 model.embedding_dim 1.0 +765 10 model.relation_dim 0.0 +765 10 loss.margin 1.8392132077901269 +765 10 optimizer.lr 0.004259874339755325 +765 10 negative_sampler.num_negs_per_pos 63.0 +765 10 training.batch_size 2.0 +765 11 model.embedding_dim 0.0 +765 11 model.relation_dim 1.0 +765 11 loss.margin 9.750806062682303 +765 11 optimizer.lr 0.06483088216817881 +765 11 negative_sampler.num_negs_per_pos 30.0 +765 11 training.batch_size 1.0 +765 12 model.embedding_dim 2.0 +765 12 model.relation_dim 0.0 +765 12 loss.margin 6.601792220014335 +765 12 optimizer.lr 0.01755964181504486 +765 12 negative_sampler.num_negs_per_pos 92.0 +765 12 training.batch_size 0.0 +765 13 model.embedding_dim 2.0 +765 13 model.relation_dim 2.0 +765 13 loss.margin 2.8737258329929567 +765 13 optimizer.lr 0.038007469177862595 +765 13 negative_sampler.num_negs_per_pos 9.0 +765 13 training.batch_size 0.0 +765 14 model.embedding_dim 0.0 +765 14 model.relation_dim 1.0 +765 14 loss.margin 0.6582007281822987 +765 14 optimizer.lr 0.040133437600665296 +765 14 negative_sampler.num_negs_per_pos 40.0 +765 14 training.batch_size 2.0 +765 15 model.embedding_dim 0.0 +765 15 model.relation_dim 0.0 +765 15 loss.margin 0.9596665463676886 +765 15 optimizer.lr 0.011166463096469948 +765 15 negative_sampler.num_negs_per_pos 9.0 +765 15 training.batch_size 2.0 +765 16 model.embedding_dim 0.0 +765 16 model.relation_dim 0.0 +765 16 loss.margin 3.2053269397297344 +765 16 optimizer.lr 0.07069719940679496 +765 16 negative_sampler.num_negs_per_pos 66.0 +765 16 training.batch_size 2.0 +765 17 model.embedding_dim 0.0 +765 17 model.relation_dim 1.0 +765 17 loss.margin 1.00637100464765 +765 17 optimizer.lr 0.03816662443808744 +765 17 negative_sampler.num_negs_per_pos 20.0 +765 17 training.batch_size 0.0 +765 18 model.embedding_dim 0.0 +765 18 model.relation_dim 2.0 +765 18 loss.margin 8.526715037527701 +765 18 optimizer.lr 0.0012003606059674263 +765 18 negative_sampler.num_negs_per_pos 7.0 +765 18 training.batch_size 0.0 +765 19 model.embedding_dim 0.0 +765 19 model.relation_dim 2.0 +765 19 loss.margin 8.593842799248243 +765 19 optimizer.lr 0.0747920443752896 +765 19 negative_sampler.num_negs_per_pos 38.0 +765 19 training.batch_size 2.0 +765 20 model.embedding_dim 0.0 +765 20 model.relation_dim 2.0 +765 20 loss.margin 7.480253457626631 +765 20 optimizer.lr 0.003683481151964611 +765 20 negative_sampler.num_negs_per_pos 29.0 +765 20 training.batch_size 0.0 +765 21 model.embedding_dim 2.0 +765 21 model.relation_dim 1.0 +765 21 loss.margin 0.8221588985161448 +765 21 optimizer.lr 0.0018313693730364533 +765 21 negative_sampler.num_negs_per_pos 15.0 +765 21 training.batch_size 0.0 +765 22 model.embedding_dim 2.0 +765 22 model.relation_dim 2.0 +765 22 loss.margin 8.111853179728477 +765 22 optimizer.lr 0.0050615577392138095 +765 22 negative_sampler.num_negs_per_pos 86.0 +765 22 training.batch_size 1.0 +765 23 model.embedding_dim 2.0 +765 23 model.relation_dim 2.0 +765 23 loss.margin 8.80322577224968 +765 23 optimizer.lr 0.02034344106633194 +765 23 negative_sampler.num_negs_per_pos 95.0 +765 23 training.batch_size 1.0 +765 24 model.embedding_dim 0.0 +765 24 model.relation_dim 1.0 +765 24 loss.margin 7.055470606314022 +765 24 optimizer.lr 0.029142049114032627 +765 24 negative_sampler.num_negs_per_pos 78.0 +765 24 training.batch_size 1.0 +765 25 model.embedding_dim 1.0 +765 25 model.relation_dim 1.0 +765 25 loss.margin 3.748774392979845 +765 25 optimizer.lr 0.0019979608600584555 +765 25 negative_sampler.num_negs_per_pos 84.0 +765 25 training.batch_size 1.0 +765 26 model.embedding_dim 2.0 +765 26 model.relation_dim 1.0 +765 26 loss.margin 6.437863750164386 +765 26 optimizer.lr 0.011629391251504556 +765 26 negative_sampler.num_negs_per_pos 28.0 +765 26 training.batch_size 2.0 +765 27 model.embedding_dim 2.0 +765 27 model.relation_dim 1.0 +765 27 loss.margin 3.87460417981167 +765 27 optimizer.lr 0.0027089291202826767 +765 27 negative_sampler.num_negs_per_pos 53.0 +765 27 training.batch_size 1.0 +765 28 model.embedding_dim 0.0 +765 28 model.relation_dim 1.0 +765 28 loss.margin 1.7934005092278154 +765 28 optimizer.lr 0.002698077125344536 +765 28 negative_sampler.num_negs_per_pos 12.0 +765 28 training.batch_size 0.0 +765 29 model.embedding_dim 2.0 +765 29 model.relation_dim 2.0 +765 29 loss.margin 8.24664100521176 +765 29 optimizer.lr 0.0033910300874031773 +765 29 negative_sampler.num_negs_per_pos 84.0 +765 29 training.batch_size 0.0 +765 30 model.embedding_dim 1.0 +765 30 model.relation_dim 2.0 +765 30 loss.margin 1.6473729447467749 +765 30 optimizer.lr 0.009236752027973253 +765 30 negative_sampler.num_negs_per_pos 79.0 +765 30 training.batch_size 1.0 +765 31 model.embedding_dim 1.0 +765 31 model.relation_dim 2.0 +765 31 loss.margin 8.290356189251831 +765 31 optimizer.lr 0.016648743532518395 +765 31 negative_sampler.num_negs_per_pos 20.0 +765 31 training.batch_size 0.0 +765 32 model.embedding_dim 1.0 +765 32 model.relation_dim 1.0 +765 32 loss.margin 1.3310986711443191 +765 32 optimizer.lr 0.050742577460577296 +765 32 negative_sampler.num_negs_per_pos 8.0 +765 32 training.batch_size 1.0 +765 33 model.embedding_dim 2.0 +765 33 model.relation_dim 2.0 +765 33 loss.margin 9.919212837411484 +765 33 optimizer.lr 0.030645980824436612 +765 33 negative_sampler.num_negs_per_pos 94.0 +765 33 training.batch_size 2.0 +765 34 model.embedding_dim 2.0 +765 34 model.relation_dim 1.0 +765 34 loss.margin 5.495795088678221 +765 34 optimizer.lr 0.007898459202082769 +765 34 negative_sampler.num_negs_per_pos 9.0 +765 34 training.batch_size 0.0 +765 35 model.embedding_dim 1.0 +765 35 model.relation_dim 2.0 +765 35 loss.margin 7.222938391495795 +765 35 optimizer.lr 0.0012458649892110076 +765 35 negative_sampler.num_negs_per_pos 67.0 +765 35 training.batch_size 0.0 +765 36 model.embedding_dim 1.0 +765 36 model.relation_dim 1.0 +765 36 loss.margin 2.566904550704638 +765 36 optimizer.lr 0.05314416316150862 +765 36 negative_sampler.num_negs_per_pos 4.0 +765 36 training.batch_size 2.0 +765 37 model.embedding_dim 0.0 +765 37 model.relation_dim 1.0 +765 37 loss.margin 7.0293560403070705 +765 37 optimizer.lr 0.027499690996495532 +765 37 negative_sampler.num_negs_per_pos 87.0 +765 37 training.batch_size 1.0 +765 38 model.embedding_dim 0.0 +765 38 model.relation_dim 0.0 +765 38 loss.margin 8.553439996905103 +765 38 optimizer.lr 0.016543851765566983 +765 38 negative_sampler.num_negs_per_pos 52.0 +765 38 training.batch_size 1.0 +765 39 model.embedding_dim 2.0 +765 39 model.relation_dim 0.0 +765 39 loss.margin 9.77751525337001 +765 39 optimizer.lr 0.068819126794878 +765 39 negative_sampler.num_negs_per_pos 94.0 +765 39 training.batch_size 1.0 +765 40 model.embedding_dim 1.0 +765 40 model.relation_dim 1.0 +765 40 loss.margin 8.145381694425378 +765 40 optimizer.lr 0.0020185243368169227 +765 40 negative_sampler.num_negs_per_pos 76.0 +765 40 training.batch_size 2.0 +765 41 model.embedding_dim 2.0 +765 41 model.relation_dim 1.0 +765 41 loss.margin 7.224391722208571 +765 41 optimizer.lr 0.04463218548013346 +765 41 negative_sampler.num_negs_per_pos 91.0 +765 41 training.batch_size 1.0 +765 42 model.embedding_dim 0.0 +765 42 model.relation_dim 0.0 +765 42 loss.margin 6.1636802584994665 +765 42 optimizer.lr 0.04380146623427381 +765 42 negative_sampler.num_negs_per_pos 3.0 +765 42 training.batch_size 2.0 +765 43 model.embedding_dim 1.0 +765 43 model.relation_dim 2.0 +765 43 loss.margin 7.719838435894715 +765 43 optimizer.lr 0.007679011387642758 +765 43 negative_sampler.num_negs_per_pos 74.0 +765 43 training.batch_size 1.0 +765 44 model.embedding_dim 0.0 +765 44 model.relation_dim 0.0 +765 44 loss.margin 8.697831154873349 +765 44 optimizer.lr 0.018494339472774034 +765 44 negative_sampler.num_negs_per_pos 73.0 +765 44 training.batch_size 0.0 +765 45 model.embedding_dim 1.0 +765 45 model.relation_dim 0.0 +765 45 loss.margin 7.705825458787996 +765 45 optimizer.lr 0.007423797730800441 +765 45 negative_sampler.num_negs_per_pos 84.0 +765 45 training.batch_size 2.0 +765 46 model.embedding_dim 1.0 +765 46 model.relation_dim 1.0 +765 46 loss.margin 4.420883343834309 +765 46 optimizer.lr 0.011116327318647756 +765 46 negative_sampler.num_negs_per_pos 57.0 +765 46 training.batch_size 2.0 +765 47 model.embedding_dim 1.0 +765 47 model.relation_dim 1.0 +765 47 loss.margin 6.714936793266383 +765 47 optimizer.lr 0.0053784596852560086 +765 47 negative_sampler.num_negs_per_pos 36.0 +765 47 training.batch_size 1.0 +765 48 model.embedding_dim 0.0 +765 48 model.relation_dim 2.0 +765 48 loss.margin 5.786616385408326 +765 48 optimizer.lr 0.02828119626281139 +765 48 negative_sampler.num_negs_per_pos 66.0 +765 48 training.batch_size 1.0 +765 49 model.embedding_dim 0.0 +765 49 model.relation_dim 0.0 +765 49 loss.margin 9.268199667689847 +765 49 optimizer.lr 0.09073607886316541 +765 49 negative_sampler.num_negs_per_pos 4.0 +765 49 training.batch_size 1.0 +765 50 model.embedding_dim 2.0 +765 50 model.relation_dim 2.0 +765 50 loss.margin 5.522647462321608 +765 50 optimizer.lr 0.016548748933595186 +765 50 negative_sampler.num_negs_per_pos 50.0 +765 50 training.batch_size 0.0 +765 51 model.embedding_dim 0.0 +765 51 model.relation_dim 2.0 +765 51 loss.margin 3.4136213188021056 +765 51 optimizer.lr 0.007271552535633156 +765 51 negative_sampler.num_negs_per_pos 56.0 +765 51 training.batch_size 0.0 +765 52 model.embedding_dim 0.0 +765 52 model.relation_dim 0.0 +765 52 loss.margin 4.65392641742341 +765 52 optimizer.lr 0.0013079370925669128 +765 52 negative_sampler.num_negs_per_pos 24.0 +765 52 training.batch_size 2.0 +765 53 model.embedding_dim 2.0 +765 53 model.relation_dim 2.0 +765 53 loss.margin 8.47695921047828 +765 53 optimizer.lr 0.0010433205476470988 +765 53 negative_sampler.num_negs_per_pos 4.0 +765 53 training.batch_size 2.0 +765 54 model.embedding_dim 0.0 +765 54 model.relation_dim 2.0 +765 54 loss.margin 7.1384999901801 +765 54 optimizer.lr 0.0410477781966369 +765 54 negative_sampler.num_negs_per_pos 41.0 +765 54 training.batch_size 1.0 +765 55 model.embedding_dim 1.0 +765 55 model.relation_dim 2.0 +765 55 loss.margin 4.480893425917136 +765 55 optimizer.lr 0.035724272698137154 +765 55 negative_sampler.num_negs_per_pos 14.0 +765 55 training.batch_size 0.0 +765 56 model.embedding_dim 0.0 +765 56 model.relation_dim 0.0 +765 56 loss.margin 6.075543798779507 +765 56 optimizer.lr 0.026341391901535224 +765 56 negative_sampler.num_negs_per_pos 91.0 +765 56 training.batch_size 2.0 +765 57 model.embedding_dim 1.0 +765 57 model.relation_dim 1.0 +765 57 loss.margin 8.248737826564074 +765 57 optimizer.lr 0.01346795742280916 +765 57 negative_sampler.num_negs_per_pos 22.0 +765 57 training.batch_size 1.0 +765 58 model.embedding_dim 1.0 +765 58 model.relation_dim 2.0 +765 58 loss.margin 9.337148106004863 +765 58 optimizer.lr 0.05924147574978677 +765 58 negative_sampler.num_negs_per_pos 51.0 +765 58 training.batch_size 2.0 +765 59 model.embedding_dim 0.0 +765 59 model.relation_dim 0.0 +765 59 loss.margin 2.369994488510732 +765 59 optimizer.lr 0.012307925709424388 +765 59 negative_sampler.num_negs_per_pos 7.0 +765 59 training.batch_size 0.0 +765 60 model.embedding_dim 0.0 +765 60 model.relation_dim 2.0 +765 60 loss.margin 9.460417777906192 +765 60 optimizer.lr 0.0011877439436399784 +765 60 negative_sampler.num_negs_per_pos 55.0 +765 60 training.batch_size 0.0 +765 61 model.embedding_dim 1.0 +765 61 model.relation_dim 2.0 +765 61 loss.margin 5.828492087737802 +765 61 optimizer.lr 0.09143439432611547 +765 61 negative_sampler.num_negs_per_pos 26.0 +765 61 training.batch_size 0.0 +765 62 model.embedding_dim 0.0 +765 62 model.relation_dim 0.0 +765 62 loss.margin 8.774299422719844 +765 62 optimizer.lr 0.0633658965826788 +765 62 negative_sampler.num_negs_per_pos 2.0 +765 62 training.batch_size 1.0 +765 63 model.embedding_dim 1.0 +765 63 model.relation_dim 0.0 +765 63 loss.margin 2.3160921567116306 +765 63 optimizer.lr 0.012202352693552106 +765 63 negative_sampler.num_negs_per_pos 56.0 +765 63 training.batch_size 0.0 +765 64 model.embedding_dim 1.0 +765 64 model.relation_dim 2.0 +765 64 loss.margin 6.082529785554765 +765 64 optimizer.lr 0.05029276630378217 +765 64 negative_sampler.num_negs_per_pos 34.0 +765 64 training.batch_size 1.0 +765 65 model.embedding_dim 1.0 +765 65 model.relation_dim 0.0 +765 65 loss.margin 0.8837487613512529 +765 65 optimizer.lr 0.0080692466223527 +765 65 negative_sampler.num_negs_per_pos 38.0 +765 65 training.batch_size 1.0 +765 66 model.embedding_dim 2.0 +765 66 model.relation_dim 0.0 +765 66 loss.margin 7.303759906819413 +765 66 optimizer.lr 0.0010200340148607493 +765 66 negative_sampler.num_negs_per_pos 11.0 +765 66 training.batch_size 2.0 +765 67 model.embedding_dim 0.0 +765 67 model.relation_dim 0.0 +765 67 loss.margin 6.175879354396467 +765 67 optimizer.lr 0.03604799448638368 +765 67 negative_sampler.num_negs_per_pos 80.0 +765 67 training.batch_size 1.0 +765 68 model.embedding_dim 0.0 +765 68 model.relation_dim 0.0 +765 68 loss.margin 5.34788227251331 +765 68 optimizer.lr 0.030060648320580093 +765 68 negative_sampler.num_negs_per_pos 13.0 +765 68 training.batch_size 0.0 +765 69 model.embedding_dim 1.0 +765 69 model.relation_dim 2.0 +765 69 loss.margin 4.347603762204243 +765 69 optimizer.lr 0.06153769318997738 +765 69 negative_sampler.num_negs_per_pos 40.0 +765 69 training.batch_size 0.0 +765 70 model.embedding_dim 0.0 +765 70 model.relation_dim 1.0 +765 70 loss.margin 6.484741012650104 +765 70 optimizer.lr 0.0577880740281324 +765 70 negative_sampler.num_negs_per_pos 6.0 +765 70 training.batch_size 2.0 +765 71 model.embedding_dim 2.0 +765 71 model.relation_dim 1.0 +765 71 loss.margin 0.5981589736471478 +765 71 optimizer.lr 0.012660553079354866 +765 71 negative_sampler.num_negs_per_pos 55.0 +765 71 training.batch_size 2.0 +765 72 model.embedding_dim 2.0 +765 72 model.relation_dim 2.0 +765 72 loss.margin 3.5296205442675674 +765 72 optimizer.lr 0.013133577720206874 +765 72 negative_sampler.num_negs_per_pos 26.0 +765 72 training.batch_size 2.0 +765 73 model.embedding_dim 1.0 +765 73 model.relation_dim 2.0 +765 73 loss.margin 5.515127406627539 +765 73 optimizer.lr 0.001496849548822656 +765 73 negative_sampler.num_negs_per_pos 60.0 +765 73 training.batch_size 1.0 +765 74 model.embedding_dim 0.0 +765 74 model.relation_dim 2.0 +765 74 loss.margin 0.6879851286312179 +765 74 optimizer.lr 0.004944210687345293 +765 74 negative_sampler.num_negs_per_pos 69.0 +765 74 training.batch_size 2.0 +765 75 model.embedding_dim 2.0 +765 75 model.relation_dim 2.0 +765 75 loss.margin 9.156050660906088 +765 75 optimizer.lr 0.01633057308259405 +765 75 negative_sampler.num_negs_per_pos 62.0 +765 75 training.batch_size 0.0 +765 76 model.embedding_dim 0.0 +765 76 model.relation_dim 1.0 +765 76 loss.margin 5.437358727199629 +765 76 optimizer.lr 0.03549760270195937 +765 76 negative_sampler.num_negs_per_pos 47.0 +765 76 training.batch_size 2.0 +765 77 model.embedding_dim 1.0 +765 77 model.relation_dim 1.0 +765 77 loss.margin 8.56513812654052 +765 77 optimizer.lr 0.0033788039295407682 +765 77 negative_sampler.num_negs_per_pos 88.0 +765 77 training.batch_size 0.0 +765 78 model.embedding_dim 2.0 +765 78 model.relation_dim 2.0 +765 78 loss.margin 1.3947201133108775 +765 78 optimizer.lr 0.0015274035556274556 +765 78 negative_sampler.num_negs_per_pos 98.0 +765 78 training.batch_size 0.0 +765 79 model.embedding_dim 2.0 +765 79 model.relation_dim 2.0 +765 79 loss.margin 5.862014427417158 +765 79 optimizer.lr 0.06057347589128597 +765 79 negative_sampler.num_negs_per_pos 3.0 +765 79 training.batch_size 1.0 +765 80 model.embedding_dim 2.0 +765 80 model.relation_dim 2.0 +765 80 loss.margin 8.3569006514135 +765 80 optimizer.lr 0.011722730936538767 +765 80 negative_sampler.num_negs_per_pos 28.0 +765 80 training.batch_size 2.0 +765 81 model.embedding_dim 1.0 +765 81 model.relation_dim 0.0 +765 81 loss.margin 7.024273381567151 +765 81 optimizer.lr 0.06047939094732166 +765 81 negative_sampler.num_negs_per_pos 44.0 +765 81 training.batch_size 0.0 +765 82 model.embedding_dim 0.0 +765 82 model.relation_dim 0.0 +765 82 loss.margin 0.8173916982780591 +765 82 optimizer.lr 0.0014507732543721362 +765 82 negative_sampler.num_negs_per_pos 97.0 +765 82 training.batch_size 0.0 +765 83 model.embedding_dim 0.0 +765 83 model.relation_dim 2.0 +765 83 loss.margin 5.569814748444475 +765 83 optimizer.lr 0.018525376471617033 +765 83 negative_sampler.num_negs_per_pos 58.0 +765 83 training.batch_size 1.0 +765 84 model.embedding_dim 0.0 +765 84 model.relation_dim 1.0 +765 84 loss.margin 9.867996510462445 +765 84 optimizer.lr 0.0026784333476732305 +765 84 negative_sampler.num_negs_per_pos 15.0 +765 84 training.batch_size 1.0 +765 85 model.embedding_dim 0.0 +765 85 model.relation_dim 1.0 +765 85 loss.margin 4.3028253319981005 +765 85 optimizer.lr 0.05810430000999431 +765 85 negative_sampler.num_negs_per_pos 7.0 +765 85 training.batch_size 1.0 +765 86 model.embedding_dim 0.0 +765 86 model.relation_dim 0.0 +765 86 loss.margin 2.827958805845582 +765 86 optimizer.lr 0.0037590034821148354 +765 86 negative_sampler.num_negs_per_pos 46.0 +765 86 training.batch_size 1.0 +765 87 model.embedding_dim 1.0 +765 87 model.relation_dim 2.0 +765 87 loss.margin 1.0271594962134412 +765 87 optimizer.lr 0.0713638377633371 +765 87 negative_sampler.num_negs_per_pos 0.0 +765 87 training.batch_size 0.0 +765 88 model.embedding_dim 2.0 +765 88 model.relation_dim 2.0 +765 88 loss.margin 4.795176446573254 +765 88 optimizer.lr 0.025148209975314837 +765 88 negative_sampler.num_negs_per_pos 47.0 +765 88 training.batch_size 2.0 +765 89 model.embedding_dim 2.0 +765 89 model.relation_dim 0.0 +765 89 loss.margin 8.747385559943842 +765 89 optimizer.lr 0.0037452061589297236 +765 89 negative_sampler.num_negs_per_pos 17.0 +765 89 training.batch_size 1.0 +765 90 model.embedding_dim 1.0 +765 90 model.relation_dim 2.0 +765 90 loss.margin 3.379502508702362 +765 90 optimizer.lr 0.0020633730188912374 +765 90 negative_sampler.num_negs_per_pos 54.0 +765 90 training.batch_size 1.0 +765 91 model.embedding_dim 1.0 +765 91 model.relation_dim 1.0 +765 91 loss.margin 3.7167498299213158 +765 91 optimizer.lr 0.008047674776993653 +765 91 negative_sampler.num_negs_per_pos 12.0 +765 91 training.batch_size 1.0 +765 92 model.embedding_dim 2.0 +765 92 model.relation_dim 0.0 +765 92 loss.margin 4.542167665767999 +765 92 optimizer.lr 0.0011160612356181396 +765 92 negative_sampler.num_negs_per_pos 53.0 +765 92 training.batch_size 1.0 +765 93 model.embedding_dim 0.0 +765 93 model.relation_dim 1.0 +765 93 loss.margin 1.0116966110541872 +765 93 optimizer.lr 0.0027716983438311944 +765 93 negative_sampler.num_negs_per_pos 91.0 +765 93 training.batch_size 2.0 +765 94 model.embedding_dim 2.0 +765 94 model.relation_dim 2.0 +765 94 loss.margin 2.4325329372993245 +765 94 optimizer.lr 0.031113100033349717 +765 94 negative_sampler.num_negs_per_pos 37.0 +765 94 training.batch_size 2.0 +765 95 model.embedding_dim 1.0 +765 95 model.relation_dim 2.0 +765 95 loss.margin 3.199273507149098 +765 95 optimizer.lr 0.001709034147014544 +765 95 negative_sampler.num_negs_per_pos 63.0 +765 95 training.batch_size 2.0 +765 96 model.embedding_dim 2.0 +765 96 model.relation_dim 0.0 +765 96 loss.margin 9.758660556797278 +765 96 optimizer.lr 0.03184398587629556 +765 96 negative_sampler.num_negs_per_pos 30.0 +765 96 training.batch_size 2.0 +765 97 model.embedding_dim 2.0 +765 97 model.relation_dim 2.0 +765 97 loss.margin 4.7189456232743625 +765 97 optimizer.lr 0.0011837753362459323 +765 97 negative_sampler.num_negs_per_pos 44.0 +765 97 training.batch_size 1.0 +765 98 model.embedding_dim 0.0 +765 98 model.relation_dim 1.0 +765 98 loss.margin 8.938080509420939 +765 98 optimizer.lr 0.011852638067152789 +765 98 negative_sampler.num_negs_per_pos 6.0 +765 98 training.batch_size 0.0 +765 99 model.embedding_dim 2.0 +765 99 model.relation_dim 1.0 +765 99 loss.margin 7.3866254844930275 +765 99 optimizer.lr 0.0020021943056135064 +765 99 negative_sampler.num_negs_per_pos 69.0 +765 99 training.batch_size 1.0 +765 100 model.embedding_dim 2.0 +765 100 model.relation_dim 2.0 +765 100 loss.margin 1.7806220101794068 +765 100 optimizer.lr 0.002685166459333749 +765 100 negative_sampler.num_negs_per_pos 91.0 +765 100 training.batch_size 2.0 +765 1 dataset """kinships""" +765 1 model """transd""" +765 1 loss """marginranking""" +765 1 regularizer """no""" +765 1 optimizer """adam""" +765 1 training_loop """owa""" +765 1 negative_sampler """basic""" +765 1 evaluator """rankbased""" +765 2 dataset """kinships""" +765 2 model """transd""" +765 2 loss """marginranking""" +765 2 regularizer """no""" +765 2 optimizer """adam""" +765 2 training_loop """owa""" +765 2 negative_sampler """basic""" +765 2 evaluator """rankbased""" +765 3 dataset """kinships""" +765 3 model """transd""" +765 3 loss """marginranking""" +765 3 regularizer """no""" +765 3 optimizer """adam""" +765 3 training_loop """owa""" +765 3 negative_sampler """basic""" +765 3 evaluator """rankbased""" +765 4 dataset """kinships""" +765 4 model """transd""" +765 4 loss """marginranking""" +765 4 regularizer """no""" +765 4 optimizer """adam""" +765 4 training_loop """owa""" +765 4 negative_sampler """basic""" +765 4 evaluator """rankbased""" +765 5 dataset """kinships""" +765 5 model """transd""" +765 5 loss """marginranking""" +765 5 regularizer """no""" +765 5 optimizer """adam""" +765 5 training_loop """owa""" +765 5 negative_sampler """basic""" +765 5 evaluator """rankbased""" +765 6 dataset """kinships""" +765 6 model """transd""" +765 6 loss """marginranking""" +765 6 regularizer """no""" +765 6 optimizer """adam""" +765 6 training_loop """owa""" +765 6 negative_sampler """basic""" +765 6 evaluator """rankbased""" +765 7 dataset """kinships""" +765 7 model """transd""" +765 7 loss """marginranking""" +765 7 regularizer """no""" +765 7 optimizer """adam""" +765 7 training_loop """owa""" +765 7 negative_sampler """basic""" +765 7 evaluator """rankbased""" +765 8 dataset """kinships""" +765 8 model """transd""" +765 8 loss """marginranking""" +765 8 regularizer """no""" +765 8 optimizer """adam""" +765 8 training_loop """owa""" +765 8 negative_sampler """basic""" +765 8 evaluator """rankbased""" +765 9 dataset """kinships""" +765 9 model """transd""" +765 9 loss """marginranking""" +765 9 regularizer """no""" +765 9 optimizer """adam""" +765 9 training_loop """owa""" +765 9 negative_sampler """basic""" +765 9 evaluator """rankbased""" +765 10 dataset """kinships""" +765 10 model """transd""" +765 10 loss """marginranking""" +765 10 regularizer """no""" +765 10 optimizer """adam""" +765 10 training_loop """owa""" +765 10 negative_sampler """basic""" +765 10 evaluator """rankbased""" +765 11 dataset """kinships""" +765 11 model """transd""" +765 11 loss """marginranking""" +765 11 regularizer """no""" +765 11 optimizer """adam""" +765 11 training_loop """owa""" +765 11 negative_sampler """basic""" +765 11 evaluator """rankbased""" +765 12 dataset """kinships""" +765 12 model """transd""" +765 12 loss """marginranking""" +765 12 regularizer """no""" +765 12 optimizer """adam""" +765 12 training_loop """owa""" +765 12 negative_sampler """basic""" +765 12 evaluator """rankbased""" +765 13 dataset """kinships""" +765 13 model """transd""" +765 13 loss """marginranking""" +765 13 regularizer """no""" +765 13 optimizer """adam""" +765 13 training_loop """owa""" +765 13 negative_sampler """basic""" +765 13 evaluator """rankbased""" +765 14 dataset """kinships""" +765 14 model """transd""" +765 14 loss """marginranking""" +765 14 regularizer """no""" +765 14 optimizer """adam""" +765 14 training_loop """owa""" +765 14 negative_sampler """basic""" +765 14 evaluator """rankbased""" +765 15 dataset """kinships""" +765 15 model """transd""" +765 15 loss """marginranking""" +765 15 regularizer """no""" +765 15 optimizer """adam""" +765 15 training_loop """owa""" +765 15 negative_sampler """basic""" +765 15 evaluator """rankbased""" +765 16 dataset """kinships""" +765 16 model """transd""" +765 16 loss """marginranking""" +765 16 regularizer """no""" +765 16 optimizer """adam""" +765 16 training_loop """owa""" +765 16 negative_sampler """basic""" +765 16 evaluator """rankbased""" +765 17 dataset """kinships""" +765 17 model """transd""" +765 17 loss """marginranking""" +765 17 regularizer """no""" +765 17 optimizer """adam""" +765 17 training_loop """owa""" +765 17 negative_sampler """basic""" +765 17 evaluator """rankbased""" +765 18 dataset """kinships""" +765 18 model """transd""" +765 18 loss """marginranking""" +765 18 regularizer """no""" +765 18 optimizer """adam""" +765 18 training_loop """owa""" +765 18 negative_sampler """basic""" +765 18 evaluator """rankbased""" +765 19 dataset """kinships""" +765 19 model """transd""" +765 19 loss """marginranking""" +765 19 regularizer """no""" +765 19 optimizer """adam""" +765 19 training_loop """owa""" +765 19 negative_sampler """basic""" +765 19 evaluator """rankbased""" +765 20 dataset """kinships""" +765 20 model """transd""" +765 20 loss """marginranking""" +765 20 regularizer """no""" +765 20 optimizer """adam""" +765 20 training_loop """owa""" +765 20 negative_sampler """basic""" +765 20 evaluator """rankbased""" +765 21 dataset """kinships""" +765 21 model """transd""" +765 21 loss """marginranking""" +765 21 regularizer """no""" +765 21 optimizer """adam""" +765 21 training_loop """owa""" +765 21 negative_sampler """basic""" +765 21 evaluator """rankbased""" +765 22 dataset """kinships""" +765 22 model """transd""" +765 22 loss """marginranking""" +765 22 regularizer """no""" +765 22 optimizer """adam""" +765 22 training_loop """owa""" +765 22 negative_sampler """basic""" +765 22 evaluator """rankbased""" +765 23 dataset """kinships""" +765 23 model """transd""" +765 23 loss """marginranking""" +765 23 regularizer """no""" +765 23 optimizer """adam""" +765 23 training_loop """owa""" +765 23 negative_sampler """basic""" +765 23 evaluator """rankbased""" +765 24 dataset """kinships""" +765 24 model """transd""" +765 24 loss """marginranking""" +765 24 regularizer """no""" +765 24 optimizer """adam""" +765 24 training_loop """owa""" +765 24 negative_sampler """basic""" +765 24 evaluator """rankbased""" +765 25 dataset """kinships""" +765 25 model """transd""" +765 25 loss """marginranking""" +765 25 regularizer """no""" +765 25 optimizer """adam""" +765 25 training_loop """owa""" +765 25 negative_sampler """basic""" +765 25 evaluator """rankbased""" +765 26 dataset """kinships""" +765 26 model """transd""" +765 26 loss """marginranking""" +765 26 regularizer """no""" +765 26 optimizer """adam""" +765 26 training_loop """owa""" +765 26 negative_sampler """basic""" +765 26 evaluator """rankbased""" +765 27 dataset """kinships""" +765 27 model """transd""" +765 27 loss """marginranking""" +765 27 regularizer """no""" +765 27 optimizer """adam""" +765 27 training_loop """owa""" +765 27 negative_sampler """basic""" +765 27 evaluator """rankbased""" +765 28 dataset """kinships""" +765 28 model """transd""" +765 28 loss """marginranking""" +765 28 regularizer """no""" +765 28 optimizer """adam""" +765 28 training_loop """owa""" +765 28 negative_sampler """basic""" +765 28 evaluator """rankbased""" +765 29 dataset """kinships""" +765 29 model """transd""" +765 29 loss """marginranking""" +765 29 regularizer """no""" +765 29 optimizer """adam""" +765 29 training_loop """owa""" +765 29 negative_sampler """basic""" +765 29 evaluator """rankbased""" +765 30 dataset """kinships""" +765 30 model """transd""" +765 30 loss """marginranking""" +765 30 regularizer """no""" +765 30 optimizer """adam""" +765 30 training_loop """owa""" +765 30 negative_sampler """basic""" +765 30 evaluator """rankbased""" +765 31 dataset """kinships""" +765 31 model """transd""" +765 31 loss """marginranking""" +765 31 regularizer """no""" +765 31 optimizer """adam""" +765 31 training_loop """owa""" +765 31 negative_sampler """basic""" +765 31 evaluator """rankbased""" +765 32 dataset """kinships""" +765 32 model """transd""" +765 32 loss """marginranking""" +765 32 regularizer """no""" +765 32 optimizer """adam""" +765 32 training_loop """owa""" +765 32 negative_sampler """basic""" +765 32 evaluator """rankbased""" +765 33 dataset """kinships""" +765 33 model """transd""" +765 33 loss """marginranking""" +765 33 regularizer """no""" +765 33 optimizer """adam""" +765 33 training_loop """owa""" +765 33 negative_sampler """basic""" +765 33 evaluator """rankbased""" +765 34 dataset """kinships""" +765 34 model """transd""" +765 34 loss """marginranking""" +765 34 regularizer """no""" +765 34 optimizer """adam""" +765 34 training_loop """owa""" +765 34 negative_sampler """basic""" +765 34 evaluator """rankbased""" +765 35 dataset """kinships""" +765 35 model """transd""" +765 35 loss """marginranking""" +765 35 regularizer """no""" +765 35 optimizer """adam""" +765 35 training_loop """owa""" +765 35 negative_sampler """basic""" +765 35 evaluator """rankbased""" +765 36 dataset """kinships""" +765 36 model """transd""" +765 36 loss """marginranking""" +765 36 regularizer """no""" +765 36 optimizer """adam""" +765 36 training_loop """owa""" +765 36 negative_sampler """basic""" +765 36 evaluator """rankbased""" +765 37 dataset """kinships""" +765 37 model """transd""" +765 37 loss """marginranking""" +765 37 regularizer """no""" +765 37 optimizer """adam""" +765 37 training_loop """owa""" +765 37 negative_sampler """basic""" +765 37 evaluator """rankbased""" +765 38 dataset """kinships""" +765 38 model """transd""" +765 38 loss """marginranking""" +765 38 regularizer """no""" +765 38 optimizer """adam""" +765 38 training_loop """owa""" +765 38 negative_sampler """basic""" +765 38 evaluator """rankbased""" +765 39 dataset """kinships""" +765 39 model """transd""" +765 39 loss """marginranking""" +765 39 regularizer """no""" +765 39 optimizer """adam""" +765 39 training_loop """owa""" +765 39 negative_sampler """basic""" +765 39 evaluator """rankbased""" +765 40 dataset """kinships""" +765 40 model """transd""" +765 40 loss """marginranking""" +765 40 regularizer """no""" +765 40 optimizer """adam""" +765 40 training_loop """owa""" +765 40 negative_sampler """basic""" +765 40 evaluator """rankbased""" +765 41 dataset """kinships""" +765 41 model """transd""" +765 41 loss """marginranking""" +765 41 regularizer """no""" +765 41 optimizer """adam""" +765 41 training_loop """owa""" +765 41 negative_sampler """basic""" +765 41 evaluator """rankbased""" +765 42 dataset """kinships""" +765 42 model """transd""" +765 42 loss """marginranking""" +765 42 regularizer """no""" +765 42 optimizer """adam""" +765 42 training_loop """owa""" +765 42 negative_sampler """basic""" +765 42 evaluator """rankbased""" +765 43 dataset """kinships""" +765 43 model """transd""" +765 43 loss """marginranking""" +765 43 regularizer """no""" +765 43 optimizer """adam""" +765 43 training_loop """owa""" +765 43 negative_sampler """basic""" +765 43 evaluator """rankbased""" +765 44 dataset """kinships""" +765 44 model """transd""" +765 44 loss """marginranking""" +765 44 regularizer """no""" +765 44 optimizer """adam""" +765 44 training_loop """owa""" +765 44 negative_sampler """basic""" +765 44 evaluator """rankbased""" +765 45 dataset """kinships""" +765 45 model """transd""" +765 45 loss """marginranking""" +765 45 regularizer """no""" +765 45 optimizer """adam""" +765 45 training_loop """owa""" +765 45 negative_sampler """basic""" +765 45 evaluator """rankbased""" +765 46 dataset """kinships""" +765 46 model """transd""" +765 46 loss """marginranking""" +765 46 regularizer """no""" +765 46 optimizer """adam""" +765 46 training_loop """owa""" +765 46 negative_sampler """basic""" +765 46 evaluator """rankbased""" +765 47 dataset """kinships""" +765 47 model """transd""" +765 47 loss """marginranking""" +765 47 regularizer """no""" +765 47 optimizer """adam""" +765 47 training_loop """owa""" +765 47 negative_sampler """basic""" +765 47 evaluator """rankbased""" +765 48 dataset """kinships""" +765 48 model """transd""" +765 48 loss """marginranking""" +765 48 regularizer """no""" +765 48 optimizer """adam""" +765 48 training_loop """owa""" +765 48 negative_sampler """basic""" +765 48 evaluator """rankbased""" +765 49 dataset """kinships""" +765 49 model """transd""" +765 49 loss """marginranking""" +765 49 regularizer """no""" +765 49 optimizer """adam""" +765 49 training_loop """owa""" +765 49 negative_sampler """basic""" +765 49 evaluator """rankbased""" +765 50 dataset """kinships""" +765 50 model """transd""" +765 50 loss """marginranking""" +765 50 regularizer """no""" +765 50 optimizer """adam""" +765 50 training_loop """owa""" +765 50 negative_sampler """basic""" +765 50 evaluator """rankbased""" +765 51 dataset """kinships""" +765 51 model """transd""" +765 51 loss """marginranking""" +765 51 regularizer """no""" +765 51 optimizer """adam""" +765 51 training_loop """owa""" +765 51 negative_sampler """basic""" +765 51 evaluator """rankbased""" +765 52 dataset """kinships""" +765 52 model """transd""" +765 52 loss """marginranking""" +765 52 regularizer """no""" +765 52 optimizer """adam""" +765 52 training_loop """owa""" +765 52 negative_sampler """basic""" +765 52 evaluator """rankbased""" +765 53 dataset """kinships""" +765 53 model """transd""" +765 53 loss """marginranking""" +765 53 regularizer """no""" +765 53 optimizer """adam""" +765 53 training_loop """owa""" +765 53 negative_sampler """basic""" +765 53 evaluator """rankbased""" +765 54 dataset """kinships""" +765 54 model """transd""" +765 54 loss """marginranking""" +765 54 regularizer """no""" +765 54 optimizer """adam""" +765 54 training_loop """owa""" +765 54 negative_sampler """basic""" +765 54 evaluator """rankbased""" +765 55 dataset """kinships""" +765 55 model """transd""" +765 55 loss """marginranking""" +765 55 regularizer """no""" +765 55 optimizer """adam""" +765 55 training_loop """owa""" +765 55 negative_sampler """basic""" +765 55 evaluator """rankbased""" +765 56 dataset """kinships""" +765 56 model """transd""" +765 56 loss """marginranking""" +765 56 regularizer """no""" +765 56 optimizer """adam""" +765 56 training_loop """owa""" +765 56 negative_sampler """basic""" +765 56 evaluator """rankbased""" +765 57 dataset """kinships""" +765 57 model """transd""" +765 57 loss """marginranking""" +765 57 regularizer """no""" +765 57 optimizer """adam""" +765 57 training_loop """owa""" +765 57 negative_sampler """basic""" +765 57 evaluator """rankbased""" +765 58 dataset """kinships""" +765 58 model """transd""" +765 58 loss """marginranking""" +765 58 regularizer """no""" +765 58 optimizer """adam""" +765 58 training_loop """owa""" +765 58 negative_sampler """basic""" +765 58 evaluator """rankbased""" +765 59 dataset """kinships""" +765 59 model """transd""" +765 59 loss """marginranking""" +765 59 regularizer """no""" +765 59 optimizer """adam""" +765 59 training_loop """owa""" +765 59 negative_sampler """basic""" +765 59 evaluator """rankbased""" +765 60 dataset """kinships""" +765 60 model """transd""" +765 60 loss """marginranking""" +765 60 regularizer """no""" +765 60 optimizer """adam""" +765 60 training_loop """owa""" +765 60 negative_sampler """basic""" +765 60 evaluator """rankbased""" +765 61 dataset """kinships""" +765 61 model """transd""" +765 61 loss """marginranking""" +765 61 regularizer """no""" +765 61 optimizer """adam""" +765 61 training_loop """owa""" +765 61 negative_sampler """basic""" +765 61 evaluator """rankbased""" +765 62 dataset """kinships""" +765 62 model """transd""" +765 62 loss """marginranking""" +765 62 regularizer """no""" +765 62 optimizer """adam""" +765 62 training_loop """owa""" +765 62 negative_sampler """basic""" +765 62 evaluator """rankbased""" +765 63 dataset """kinships""" +765 63 model """transd""" +765 63 loss """marginranking""" +765 63 regularizer """no""" +765 63 optimizer """adam""" +765 63 training_loop """owa""" +765 63 negative_sampler """basic""" +765 63 evaluator """rankbased""" +765 64 dataset """kinships""" +765 64 model """transd""" +765 64 loss """marginranking""" +765 64 regularizer """no""" +765 64 optimizer """adam""" +765 64 training_loop """owa""" +765 64 negative_sampler """basic""" +765 64 evaluator """rankbased""" +765 65 dataset """kinships""" +765 65 model """transd""" +765 65 loss """marginranking""" +765 65 regularizer """no""" +765 65 optimizer """adam""" +765 65 training_loop """owa""" +765 65 negative_sampler """basic""" +765 65 evaluator """rankbased""" +765 66 dataset """kinships""" +765 66 model """transd""" +765 66 loss """marginranking""" +765 66 regularizer """no""" +765 66 optimizer """adam""" +765 66 training_loop """owa""" +765 66 negative_sampler """basic""" +765 66 evaluator """rankbased""" +765 67 dataset """kinships""" +765 67 model """transd""" +765 67 loss """marginranking""" +765 67 regularizer """no""" +765 67 optimizer """adam""" +765 67 training_loop """owa""" +765 67 negative_sampler """basic""" +765 67 evaluator """rankbased""" +765 68 dataset """kinships""" +765 68 model """transd""" +765 68 loss """marginranking""" +765 68 regularizer """no""" +765 68 optimizer """adam""" +765 68 training_loop """owa""" +765 68 negative_sampler """basic""" +765 68 evaluator """rankbased""" +765 69 dataset """kinships""" +765 69 model """transd""" +765 69 loss """marginranking""" +765 69 regularizer """no""" +765 69 optimizer """adam""" +765 69 training_loop """owa""" +765 69 negative_sampler """basic""" +765 69 evaluator """rankbased""" +765 70 dataset """kinships""" +765 70 model """transd""" +765 70 loss """marginranking""" +765 70 regularizer """no""" +765 70 optimizer """adam""" +765 70 training_loop """owa""" +765 70 negative_sampler """basic""" +765 70 evaluator """rankbased""" +765 71 dataset """kinships""" +765 71 model """transd""" +765 71 loss """marginranking""" +765 71 regularizer """no""" +765 71 optimizer """adam""" +765 71 training_loop """owa""" +765 71 negative_sampler """basic""" +765 71 evaluator """rankbased""" +765 72 dataset """kinships""" +765 72 model """transd""" +765 72 loss """marginranking""" +765 72 regularizer """no""" +765 72 optimizer """adam""" +765 72 training_loop """owa""" +765 72 negative_sampler """basic""" +765 72 evaluator """rankbased""" +765 73 dataset """kinships""" +765 73 model """transd""" +765 73 loss """marginranking""" +765 73 regularizer """no""" +765 73 optimizer """adam""" +765 73 training_loop """owa""" +765 73 negative_sampler """basic""" +765 73 evaluator """rankbased""" +765 74 dataset """kinships""" +765 74 model """transd""" +765 74 loss """marginranking""" +765 74 regularizer """no""" +765 74 optimizer """adam""" +765 74 training_loop """owa""" +765 74 negative_sampler """basic""" +765 74 evaluator """rankbased""" +765 75 dataset """kinships""" +765 75 model """transd""" +765 75 loss """marginranking""" +765 75 regularizer """no""" +765 75 optimizer """adam""" +765 75 training_loop """owa""" +765 75 negative_sampler """basic""" +765 75 evaluator """rankbased""" +765 76 dataset """kinships""" +765 76 model """transd""" +765 76 loss """marginranking""" +765 76 regularizer """no""" +765 76 optimizer """adam""" +765 76 training_loop """owa""" +765 76 negative_sampler """basic""" +765 76 evaluator """rankbased""" +765 77 dataset """kinships""" +765 77 model """transd""" +765 77 loss """marginranking""" +765 77 regularizer """no""" +765 77 optimizer """adam""" +765 77 training_loop """owa""" +765 77 negative_sampler """basic""" +765 77 evaluator """rankbased""" +765 78 dataset """kinships""" +765 78 model """transd""" +765 78 loss """marginranking""" +765 78 regularizer """no""" +765 78 optimizer """adam""" +765 78 training_loop """owa""" +765 78 negative_sampler """basic""" +765 78 evaluator """rankbased""" +765 79 dataset """kinships""" +765 79 model """transd""" +765 79 loss """marginranking""" +765 79 regularizer """no""" +765 79 optimizer """adam""" +765 79 training_loop """owa""" +765 79 negative_sampler """basic""" +765 79 evaluator """rankbased""" +765 80 dataset """kinships""" +765 80 model """transd""" +765 80 loss """marginranking""" +765 80 regularizer """no""" +765 80 optimizer """adam""" +765 80 training_loop """owa""" +765 80 negative_sampler """basic""" +765 80 evaluator """rankbased""" +765 81 dataset """kinships""" +765 81 model """transd""" +765 81 loss """marginranking""" +765 81 regularizer """no""" +765 81 optimizer """adam""" +765 81 training_loop """owa""" +765 81 negative_sampler """basic""" +765 81 evaluator """rankbased""" +765 82 dataset """kinships""" +765 82 model """transd""" +765 82 loss """marginranking""" +765 82 regularizer """no""" +765 82 optimizer """adam""" +765 82 training_loop """owa""" +765 82 negative_sampler """basic""" +765 82 evaluator """rankbased""" +765 83 dataset """kinships""" +765 83 model """transd""" +765 83 loss """marginranking""" +765 83 regularizer """no""" +765 83 optimizer """adam""" +765 83 training_loop """owa""" +765 83 negative_sampler """basic""" +765 83 evaluator """rankbased""" +765 84 dataset """kinships""" +765 84 model """transd""" +765 84 loss """marginranking""" +765 84 regularizer """no""" +765 84 optimizer """adam""" +765 84 training_loop """owa""" +765 84 negative_sampler """basic""" +765 84 evaluator """rankbased""" +765 85 dataset """kinships""" +765 85 model """transd""" +765 85 loss """marginranking""" +765 85 regularizer """no""" +765 85 optimizer """adam""" +765 85 training_loop """owa""" +765 85 negative_sampler """basic""" +765 85 evaluator """rankbased""" +765 86 dataset """kinships""" +765 86 model """transd""" +765 86 loss """marginranking""" +765 86 regularizer """no""" +765 86 optimizer """adam""" +765 86 training_loop """owa""" +765 86 negative_sampler """basic""" +765 86 evaluator """rankbased""" +765 87 dataset """kinships""" +765 87 model """transd""" +765 87 loss """marginranking""" +765 87 regularizer """no""" +765 87 optimizer """adam""" +765 87 training_loop """owa""" +765 87 negative_sampler """basic""" +765 87 evaluator """rankbased""" +765 88 dataset """kinships""" +765 88 model """transd""" +765 88 loss """marginranking""" +765 88 regularizer """no""" +765 88 optimizer """adam""" +765 88 training_loop """owa""" +765 88 negative_sampler """basic""" +765 88 evaluator """rankbased""" +765 89 dataset """kinships""" +765 89 model """transd""" +765 89 loss """marginranking""" +765 89 regularizer """no""" +765 89 optimizer """adam""" +765 89 training_loop """owa""" +765 89 negative_sampler """basic""" +765 89 evaluator """rankbased""" +765 90 dataset """kinships""" +765 90 model """transd""" +765 90 loss """marginranking""" +765 90 regularizer """no""" +765 90 optimizer """adam""" +765 90 training_loop """owa""" +765 90 negative_sampler """basic""" +765 90 evaluator """rankbased""" +765 91 dataset """kinships""" +765 91 model """transd""" +765 91 loss """marginranking""" +765 91 regularizer """no""" +765 91 optimizer """adam""" +765 91 training_loop """owa""" +765 91 negative_sampler """basic""" +765 91 evaluator """rankbased""" +765 92 dataset """kinships""" +765 92 model """transd""" +765 92 loss """marginranking""" +765 92 regularizer """no""" +765 92 optimizer """adam""" +765 92 training_loop """owa""" +765 92 negative_sampler """basic""" +765 92 evaluator """rankbased""" +765 93 dataset """kinships""" +765 93 model """transd""" +765 93 loss """marginranking""" +765 93 regularizer """no""" +765 93 optimizer """adam""" +765 93 training_loop """owa""" +765 93 negative_sampler """basic""" +765 93 evaluator """rankbased""" +765 94 dataset """kinships""" +765 94 model """transd""" +765 94 loss """marginranking""" +765 94 regularizer """no""" +765 94 optimizer """adam""" +765 94 training_loop """owa""" +765 94 negative_sampler """basic""" +765 94 evaluator """rankbased""" +765 95 dataset """kinships""" +765 95 model """transd""" +765 95 loss """marginranking""" +765 95 regularizer """no""" +765 95 optimizer """adam""" +765 95 training_loop """owa""" +765 95 negative_sampler """basic""" +765 95 evaluator """rankbased""" +765 96 dataset """kinships""" +765 96 model """transd""" +765 96 loss """marginranking""" +765 96 regularizer """no""" +765 96 optimizer """adam""" +765 96 training_loop """owa""" +765 96 negative_sampler """basic""" +765 96 evaluator """rankbased""" +765 97 dataset """kinships""" +765 97 model """transd""" +765 97 loss """marginranking""" +765 97 regularizer """no""" +765 97 optimizer """adam""" +765 97 training_loop """owa""" +765 97 negative_sampler """basic""" +765 97 evaluator """rankbased""" +765 98 dataset """kinships""" +765 98 model """transd""" +765 98 loss """marginranking""" +765 98 regularizer """no""" +765 98 optimizer """adam""" +765 98 training_loop """owa""" +765 98 negative_sampler """basic""" +765 98 evaluator """rankbased""" +765 99 dataset """kinships""" +765 99 model """transd""" +765 99 loss """marginranking""" +765 99 regularizer """no""" +765 99 optimizer """adam""" +765 99 training_loop """owa""" +765 99 negative_sampler """basic""" +765 99 evaluator """rankbased""" +765 100 dataset """kinships""" +765 100 model """transd""" +765 100 loss """marginranking""" +765 100 regularizer """no""" +765 100 optimizer """adam""" +765 100 training_loop """owa""" +765 100 negative_sampler """basic""" +765 100 evaluator """rankbased""" +766 1 model.embedding_dim 2.0 +766 1 model.relation_dim 0.0 +766 1 loss.margin 8.611422631774538 +766 1 optimizer.lr 0.010829569964031572 +766 1 negative_sampler.num_negs_per_pos 50.0 +766 1 training.batch_size 1.0 +766 2 model.embedding_dim 2.0 +766 2 model.relation_dim 1.0 +766 2 loss.margin 4.812335390856056 +766 2 optimizer.lr 0.010718540458946409 +766 2 negative_sampler.num_negs_per_pos 59.0 +766 2 training.batch_size 1.0 +766 3 model.embedding_dim 2.0 +766 3 model.relation_dim 1.0 +766 3 loss.margin 2.4926487362896106 +766 3 optimizer.lr 0.08116449920097897 +766 3 negative_sampler.num_negs_per_pos 62.0 +766 3 training.batch_size 0.0 +766 4 model.embedding_dim 0.0 +766 4 model.relation_dim 0.0 +766 4 loss.margin 8.32151527359785 +766 4 optimizer.lr 0.029253562447925822 +766 4 negative_sampler.num_negs_per_pos 76.0 +766 4 training.batch_size 1.0 +766 5 model.embedding_dim 1.0 +766 5 model.relation_dim 0.0 +766 5 loss.margin 9.590329810597893 +766 5 optimizer.lr 0.004344792448625502 +766 5 negative_sampler.num_negs_per_pos 25.0 +766 5 training.batch_size 0.0 +766 6 model.embedding_dim 2.0 +766 6 model.relation_dim 1.0 +766 6 loss.margin 7.763233667240267 +766 6 optimizer.lr 0.012740407808437389 +766 6 negative_sampler.num_negs_per_pos 59.0 +766 6 training.batch_size 2.0 +766 7 model.embedding_dim 1.0 +766 7 model.relation_dim 2.0 +766 7 loss.margin 6.111739166131301 +766 7 optimizer.lr 0.003638213737135419 +766 7 negative_sampler.num_negs_per_pos 0.0 +766 7 training.batch_size 2.0 +766 8 model.embedding_dim 1.0 +766 8 model.relation_dim 1.0 +766 8 loss.margin 7.479242471335856 +766 8 optimizer.lr 0.005461119662982662 +766 8 negative_sampler.num_negs_per_pos 93.0 +766 8 training.batch_size 1.0 +766 9 model.embedding_dim 0.0 +766 9 model.relation_dim 1.0 +766 9 loss.margin 7.467616576090942 +766 9 optimizer.lr 0.09702000188377967 +766 9 negative_sampler.num_negs_per_pos 10.0 +766 9 training.batch_size 1.0 +766 10 model.embedding_dim 0.0 +766 10 model.relation_dim 2.0 +766 10 loss.margin 9.671981245359165 +766 10 optimizer.lr 0.0010191052342498224 +766 10 negative_sampler.num_negs_per_pos 60.0 +766 10 training.batch_size 1.0 +766 11 model.embedding_dim 0.0 +766 11 model.relation_dim 2.0 +766 11 loss.margin 6.014222529056392 +766 11 optimizer.lr 0.0496238970449322 +766 11 negative_sampler.num_negs_per_pos 58.0 +766 11 training.batch_size 0.0 +766 12 model.embedding_dim 0.0 +766 12 model.relation_dim 1.0 +766 12 loss.margin 2.261101108196395 +766 12 optimizer.lr 0.0416350098954829 +766 12 negative_sampler.num_negs_per_pos 2.0 +766 12 training.batch_size 1.0 +766 13 model.embedding_dim 1.0 +766 13 model.relation_dim 1.0 +766 13 loss.margin 6.859329310861488 +766 13 optimizer.lr 0.001110716851548354 +766 13 negative_sampler.num_negs_per_pos 92.0 +766 13 training.batch_size 1.0 +766 14 model.embedding_dim 1.0 +766 14 model.relation_dim 1.0 +766 14 loss.margin 7.2147295410654415 +766 14 optimizer.lr 0.0017624090967655697 +766 14 negative_sampler.num_negs_per_pos 51.0 +766 14 training.batch_size 1.0 +766 15 model.embedding_dim 2.0 +766 15 model.relation_dim 1.0 +766 15 loss.margin 4.062758085560116 +766 15 optimizer.lr 0.004572555565594369 +766 15 negative_sampler.num_negs_per_pos 7.0 +766 15 training.batch_size 2.0 +766 16 model.embedding_dim 1.0 +766 16 model.relation_dim 2.0 +766 16 loss.margin 5.577474022172194 +766 16 optimizer.lr 0.007697124981053708 +766 16 negative_sampler.num_negs_per_pos 39.0 +766 16 training.batch_size 2.0 +766 17 model.embedding_dim 2.0 +766 17 model.relation_dim 2.0 +766 17 loss.margin 5.889748738242003 +766 17 optimizer.lr 0.014365984887070124 +766 17 negative_sampler.num_negs_per_pos 44.0 +766 17 training.batch_size 1.0 +766 18 model.embedding_dim 0.0 +766 18 model.relation_dim 2.0 +766 18 loss.margin 5.727312798656303 +766 18 optimizer.lr 0.04496340086987008 +766 18 negative_sampler.num_negs_per_pos 24.0 +766 18 training.batch_size 2.0 +766 19 model.embedding_dim 2.0 +766 19 model.relation_dim 2.0 +766 19 loss.margin 5.460905616562281 +766 19 optimizer.lr 0.004864250591816349 +766 19 negative_sampler.num_negs_per_pos 24.0 +766 19 training.batch_size 1.0 +766 20 model.embedding_dim 2.0 +766 20 model.relation_dim 2.0 +766 20 loss.margin 3.1181377968267254 +766 20 optimizer.lr 0.051438777140435064 +766 20 negative_sampler.num_negs_per_pos 42.0 +766 20 training.batch_size 1.0 +766 21 model.embedding_dim 0.0 +766 21 model.relation_dim 2.0 +766 21 loss.margin 0.6092756824470894 +766 21 optimizer.lr 0.05369999233362779 +766 21 negative_sampler.num_negs_per_pos 96.0 +766 21 training.batch_size 1.0 +766 22 model.embedding_dim 1.0 +766 22 model.relation_dim 0.0 +766 22 loss.margin 8.427709485841852 +766 22 optimizer.lr 0.04845667562751092 +766 22 negative_sampler.num_negs_per_pos 50.0 +766 22 training.batch_size 2.0 +766 23 model.embedding_dim 0.0 +766 23 model.relation_dim 0.0 +766 23 loss.margin 1.0416422823742224 +766 23 optimizer.lr 0.054125004515434755 +766 23 negative_sampler.num_negs_per_pos 80.0 +766 23 training.batch_size 0.0 +766 24 model.embedding_dim 1.0 +766 24 model.relation_dim 2.0 +766 24 loss.margin 8.796476634477704 +766 24 optimizer.lr 0.060397608568368975 +766 24 negative_sampler.num_negs_per_pos 1.0 +766 24 training.batch_size 2.0 +766 25 model.embedding_dim 1.0 +766 25 model.relation_dim 1.0 +766 25 loss.margin 8.7773814619422 +766 25 optimizer.lr 0.005158055820130697 +766 25 negative_sampler.num_negs_per_pos 86.0 +766 25 training.batch_size 2.0 +766 26 model.embedding_dim 2.0 +766 26 model.relation_dim 2.0 +766 26 loss.margin 8.083599601291098 +766 26 optimizer.lr 0.0010360983728542337 +766 26 negative_sampler.num_negs_per_pos 58.0 +766 26 training.batch_size 1.0 +766 27 model.embedding_dim 1.0 +766 27 model.relation_dim 2.0 +766 27 loss.margin 7.776492234312233 +766 27 optimizer.lr 0.014883998494787694 +766 27 negative_sampler.num_negs_per_pos 87.0 +766 27 training.batch_size 2.0 +766 28 model.embedding_dim 0.0 +766 28 model.relation_dim 1.0 +766 28 loss.margin 7.153545592785299 +766 28 optimizer.lr 0.046810171069449465 +766 28 negative_sampler.num_negs_per_pos 61.0 +766 28 training.batch_size 0.0 +766 29 model.embedding_dim 2.0 +766 29 model.relation_dim 1.0 +766 29 loss.margin 7.058447397256093 +766 29 optimizer.lr 0.07572480952226271 +766 29 negative_sampler.num_negs_per_pos 49.0 +766 29 training.batch_size 2.0 +766 30 model.embedding_dim 0.0 +766 30 model.relation_dim 0.0 +766 30 loss.margin 2.873917171131691 +766 30 optimizer.lr 0.002185347218855407 +766 30 negative_sampler.num_negs_per_pos 84.0 +766 30 training.batch_size 1.0 +766 31 model.embedding_dim 1.0 +766 31 model.relation_dim 1.0 +766 31 loss.margin 1.8363477435421731 +766 31 optimizer.lr 0.012406370948406133 +766 31 negative_sampler.num_negs_per_pos 68.0 +766 31 training.batch_size 2.0 +766 32 model.embedding_dim 0.0 +766 32 model.relation_dim 1.0 +766 32 loss.margin 8.449161183427467 +766 32 optimizer.lr 0.002127211914642046 +766 32 negative_sampler.num_negs_per_pos 77.0 +766 32 training.batch_size 0.0 +766 33 model.embedding_dim 1.0 +766 33 model.relation_dim 0.0 +766 33 loss.margin 2.8041159093118933 +766 33 optimizer.lr 0.011497644770348208 +766 33 negative_sampler.num_negs_per_pos 12.0 +766 33 training.batch_size 0.0 +766 34 model.embedding_dim 2.0 +766 34 model.relation_dim 0.0 +766 34 loss.margin 9.671458450806211 +766 34 optimizer.lr 0.018578131064875873 +766 34 negative_sampler.num_negs_per_pos 1.0 +766 34 training.batch_size 0.0 +766 35 model.embedding_dim 0.0 +766 35 model.relation_dim 2.0 +766 35 loss.margin 4.22259512282649 +766 35 optimizer.lr 0.0026045013185069855 +766 35 negative_sampler.num_negs_per_pos 99.0 +766 35 training.batch_size 0.0 +766 36 model.embedding_dim 1.0 +766 36 model.relation_dim 0.0 +766 36 loss.margin 2.0965014983055985 +766 36 optimizer.lr 0.0025540192701106676 +766 36 negative_sampler.num_negs_per_pos 44.0 +766 36 training.batch_size 0.0 +766 37 model.embedding_dim 0.0 +766 37 model.relation_dim 2.0 +766 37 loss.margin 2.686881034905029 +766 37 optimizer.lr 0.024673709297312676 +766 37 negative_sampler.num_negs_per_pos 23.0 +766 37 training.batch_size 1.0 +766 38 model.embedding_dim 0.0 +766 38 model.relation_dim 1.0 +766 38 loss.margin 9.370881934022758 +766 38 optimizer.lr 0.004406619146940106 +766 38 negative_sampler.num_negs_per_pos 87.0 +766 38 training.batch_size 1.0 +766 39 model.embedding_dim 2.0 +766 39 model.relation_dim 2.0 +766 39 loss.margin 9.242078960724365 +766 39 optimizer.lr 0.014760305545800058 +766 39 negative_sampler.num_negs_per_pos 80.0 +766 39 training.batch_size 0.0 +766 40 model.embedding_dim 1.0 +766 40 model.relation_dim 2.0 +766 40 loss.margin 1.979837948267281 +766 40 optimizer.lr 0.0028202655878852086 +766 40 negative_sampler.num_negs_per_pos 63.0 +766 40 training.batch_size 0.0 +766 41 model.embedding_dim 1.0 +766 41 model.relation_dim 1.0 +766 41 loss.margin 6.63325592600889 +766 41 optimizer.lr 0.011762843539780462 +766 41 negative_sampler.num_negs_per_pos 49.0 +766 41 training.batch_size 1.0 +766 42 model.embedding_dim 1.0 +766 42 model.relation_dim 0.0 +766 42 loss.margin 0.9509296130386572 +766 42 optimizer.lr 0.031131498643162746 +766 42 negative_sampler.num_negs_per_pos 68.0 +766 42 training.batch_size 0.0 +766 43 model.embedding_dim 2.0 +766 43 model.relation_dim 2.0 +766 43 loss.margin 6.972030137968368 +766 43 optimizer.lr 0.002600938507698959 +766 43 negative_sampler.num_negs_per_pos 87.0 +766 43 training.batch_size 1.0 +766 44 model.embedding_dim 1.0 +766 44 model.relation_dim 2.0 +766 44 loss.margin 5.3185884954072264 +766 44 optimizer.lr 0.003802029695163053 +766 44 negative_sampler.num_negs_per_pos 54.0 +766 44 training.batch_size 1.0 +766 45 model.embedding_dim 1.0 +766 45 model.relation_dim 0.0 +766 45 loss.margin 9.042214822601931 +766 45 optimizer.lr 0.008776211685505894 +766 45 negative_sampler.num_negs_per_pos 41.0 +766 45 training.batch_size 2.0 +766 46 model.embedding_dim 2.0 +766 46 model.relation_dim 2.0 +766 46 loss.margin 8.877078866691019 +766 46 optimizer.lr 0.004889833413902204 +766 46 negative_sampler.num_negs_per_pos 99.0 +766 46 training.batch_size 2.0 +766 47 model.embedding_dim 0.0 +766 47 model.relation_dim 0.0 +766 47 loss.margin 1.6829602255170233 +766 47 optimizer.lr 0.0015666447446059748 +766 47 negative_sampler.num_negs_per_pos 44.0 +766 47 training.batch_size 2.0 +766 48 model.embedding_dim 2.0 +766 48 model.relation_dim 1.0 +766 48 loss.margin 2.1046357254630097 +766 48 optimizer.lr 0.001622171446531319 +766 48 negative_sampler.num_negs_per_pos 53.0 +766 48 training.batch_size 1.0 +766 49 model.embedding_dim 1.0 +766 49 model.relation_dim 2.0 +766 49 loss.margin 7.010095764202445 +766 49 optimizer.lr 0.048425860655248364 +766 49 negative_sampler.num_negs_per_pos 10.0 +766 49 training.batch_size 2.0 +766 50 model.embedding_dim 1.0 +766 50 model.relation_dim 2.0 +766 50 loss.margin 9.41645998862097 +766 50 optimizer.lr 0.04706417233644989 +766 50 negative_sampler.num_negs_per_pos 22.0 +766 50 training.batch_size 2.0 +766 51 model.embedding_dim 1.0 +766 51 model.relation_dim 1.0 +766 51 loss.margin 5.906304067263589 +766 51 optimizer.lr 0.006683819377112252 +766 51 negative_sampler.num_negs_per_pos 34.0 +766 51 training.batch_size 1.0 +766 52 model.embedding_dim 1.0 +766 52 model.relation_dim 2.0 +766 52 loss.margin 6.644758060045761 +766 52 optimizer.lr 0.0024605353194189143 +766 52 negative_sampler.num_negs_per_pos 1.0 +766 52 training.batch_size 0.0 +766 53 model.embedding_dim 2.0 +766 53 model.relation_dim 2.0 +766 53 loss.margin 2.355855416536957 +766 53 optimizer.lr 0.0674564331372652 +766 53 negative_sampler.num_negs_per_pos 12.0 +766 53 training.batch_size 0.0 +766 54 model.embedding_dim 2.0 +766 54 model.relation_dim 0.0 +766 54 loss.margin 3.3940001213357345 +766 54 optimizer.lr 0.0010041232060433497 +766 54 negative_sampler.num_negs_per_pos 39.0 +766 54 training.batch_size 1.0 +766 55 model.embedding_dim 0.0 +766 55 model.relation_dim 0.0 +766 55 loss.margin 8.387232755858737 +766 55 optimizer.lr 0.053549593151184245 +766 55 negative_sampler.num_negs_per_pos 62.0 +766 55 training.batch_size 2.0 +766 56 model.embedding_dim 0.0 +766 56 model.relation_dim 2.0 +766 56 loss.margin 5.540280843112622 +766 56 optimizer.lr 0.0011351335655322495 +766 56 negative_sampler.num_negs_per_pos 50.0 +766 56 training.batch_size 0.0 +766 57 model.embedding_dim 2.0 +766 57 model.relation_dim 1.0 +766 57 loss.margin 7.9064457162951705 +766 57 optimizer.lr 0.05789027180385524 +766 57 negative_sampler.num_negs_per_pos 1.0 +766 57 training.batch_size 2.0 +766 58 model.embedding_dim 2.0 +766 58 model.relation_dim 0.0 +766 58 loss.margin 9.00323897802827 +766 58 optimizer.lr 0.030154533837684485 +766 58 negative_sampler.num_negs_per_pos 94.0 +766 58 training.batch_size 0.0 +766 59 model.embedding_dim 2.0 +766 59 model.relation_dim 1.0 +766 59 loss.margin 6.950227793451865 +766 59 optimizer.lr 0.09946359759646371 +766 59 negative_sampler.num_negs_per_pos 98.0 +766 59 training.batch_size 2.0 +766 60 model.embedding_dim 2.0 +766 60 model.relation_dim 0.0 +766 60 loss.margin 7.917225047883007 +766 60 optimizer.lr 0.0036776807632718884 +766 60 negative_sampler.num_negs_per_pos 67.0 +766 60 training.batch_size 0.0 +766 61 model.embedding_dim 2.0 +766 61 model.relation_dim 1.0 +766 61 loss.margin 7.999964042210857 +766 61 optimizer.lr 0.0011990349581452713 +766 61 negative_sampler.num_negs_per_pos 61.0 +766 61 training.batch_size 0.0 +766 62 model.embedding_dim 0.0 +766 62 model.relation_dim 0.0 +766 62 loss.margin 9.067421232762754 +766 62 optimizer.lr 0.0038838024118907095 +766 62 negative_sampler.num_negs_per_pos 19.0 +766 62 training.batch_size 2.0 +766 63 model.embedding_dim 0.0 +766 63 model.relation_dim 1.0 +766 63 loss.margin 6.797461732869635 +766 63 optimizer.lr 0.01432095916622105 +766 63 negative_sampler.num_negs_per_pos 73.0 +766 63 training.batch_size 0.0 +766 64 model.embedding_dim 0.0 +766 64 model.relation_dim 1.0 +766 64 loss.margin 0.5799193136604983 +766 64 optimizer.lr 0.0012492290998041785 +766 64 negative_sampler.num_negs_per_pos 71.0 +766 64 training.batch_size 2.0 +766 65 model.embedding_dim 2.0 +766 65 model.relation_dim 0.0 +766 65 loss.margin 1.3682197218748349 +766 65 optimizer.lr 0.0010324789947886953 +766 65 negative_sampler.num_negs_per_pos 95.0 +766 65 training.batch_size 2.0 +766 66 model.embedding_dim 2.0 +766 66 model.relation_dim 0.0 +766 66 loss.margin 9.729068747015074 +766 66 optimizer.lr 0.04601848906157686 +766 66 negative_sampler.num_negs_per_pos 1.0 +766 66 training.batch_size 0.0 +766 67 model.embedding_dim 0.0 +766 67 model.relation_dim 1.0 +766 67 loss.margin 8.935153042806823 +766 67 optimizer.lr 0.03213278299279214 +766 67 negative_sampler.num_negs_per_pos 77.0 +766 67 training.batch_size 0.0 +766 68 model.embedding_dim 1.0 +766 68 model.relation_dim 0.0 +766 68 loss.margin 9.740564579330792 +766 68 optimizer.lr 0.024340632685808317 +766 68 negative_sampler.num_negs_per_pos 88.0 +766 68 training.batch_size 1.0 +766 69 model.embedding_dim 1.0 +766 69 model.relation_dim 0.0 +766 69 loss.margin 6.785155326972271 +766 69 optimizer.lr 0.016643667476481418 +766 69 negative_sampler.num_negs_per_pos 77.0 +766 69 training.batch_size 0.0 +766 70 model.embedding_dim 0.0 +766 70 model.relation_dim 1.0 +766 70 loss.margin 6.181722620049221 +766 70 optimizer.lr 0.00993123020638675 +766 70 negative_sampler.num_negs_per_pos 96.0 +766 70 training.batch_size 0.0 +766 71 model.embedding_dim 1.0 +766 71 model.relation_dim 2.0 +766 71 loss.margin 8.430342469473864 +766 71 optimizer.lr 0.0021014282425895333 +766 71 negative_sampler.num_negs_per_pos 23.0 +766 71 training.batch_size 0.0 +766 72 model.embedding_dim 1.0 +766 72 model.relation_dim 0.0 +766 72 loss.margin 4.165855884652327 +766 72 optimizer.lr 0.07352417358421726 +766 72 negative_sampler.num_negs_per_pos 57.0 +766 72 training.batch_size 0.0 +766 73 model.embedding_dim 1.0 +766 73 model.relation_dim 2.0 +766 73 loss.margin 4.381522593922366 +766 73 optimizer.lr 0.0016564743593015865 +766 73 negative_sampler.num_negs_per_pos 64.0 +766 73 training.batch_size 1.0 +766 74 model.embedding_dim 0.0 +766 74 model.relation_dim 1.0 +766 74 loss.margin 5.782574742580818 +766 74 optimizer.lr 0.003188630532233079 +766 74 negative_sampler.num_negs_per_pos 89.0 +766 74 training.batch_size 2.0 +766 75 model.embedding_dim 0.0 +766 75 model.relation_dim 1.0 +766 75 loss.margin 0.8990348444398533 +766 75 optimizer.lr 0.01099417722483196 +766 75 negative_sampler.num_negs_per_pos 20.0 +766 75 training.batch_size 0.0 +766 76 model.embedding_dim 2.0 +766 76 model.relation_dim 1.0 +766 76 loss.margin 6.117728073032053 +766 76 optimizer.lr 0.0019850173455017808 +766 76 negative_sampler.num_negs_per_pos 74.0 +766 76 training.batch_size 0.0 +766 77 model.embedding_dim 2.0 +766 77 model.relation_dim 1.0 +766 77 loss.margin 7.749572471880759 +766 77 optimizer.lr 0.044310908863412074 +766 77 negative_sampler.num_negs_per_pos 81.0 +766 77 training.batch_size 2.0 +766 78 model.embedding_dim 0.0 +766 78 model.relation_dim 2.0 +766 78 loss.margin 4.388647897249641 +766 78 optimizer.lr 0.001887359010377344 +766 78 negative_sampler.num_negs_per_pos 87.0 +766 78 training.batch_size 0.0 +766 79 model.embedding_dim 0.0 +766 79 model.relation_dim 0.0 +766 79 loss.margin 9.394269924015052 +766 79 optimizer.lr 0.003421075712960981 +766 79 negative_sampler.num_negs_per_pos 20.0 +766 79 training.batch_size 1.0 +766 80 model.embedding_dim 0.0 +766 80 model.relation_dim 0.0 +766 80 loss.margin 2.4442586854808166 +766 80 optimizer.lr 0.029385916488621596 +766 80 negative_sampler.num_negs_per_pos 77.0 +766 80 training.batch_size 1.0 +766 81 model.embedding_dim 1.0 +766 81 model.relation_dim 2.0 +766 81 loss.margin 0.5788147596784011 +766 81 optimizer.lr 0.020980244036261995 +766 81 negative_sampler.num_negs_per_pos 88.0 +766 81 training.batch_size 1.0 +766 82 model.embedding_dim 1.0 +766 82 model.relation_dim 1.0 +766 82 loss.margin 9.025647511226987 +766 82 optimizer.lr 0.02525583217702398 +766 82 negative_sampler.num_negs_per_pos 80.0 +766 82 training.batch_size 1.0 +766 83 model.embedding_dim 2.0 +766 83 model.relation_dim 0.0 +766 83 loss.margin 9.497053823837957 +766 83 optimizer.lr 0.002563111293468743 +766 83 negative_sampler.num_negs_per_pos 45.0 +766 83 training.batch_size 1.0 +766 84 model.embedding_dim 0.0 +766 84 model.relation_dim 0.0 +766 84 loss.margin 3.5440936267937535 +766 84 optimizer.lr 0.0012363497835286873 +766 84 negative_sampler.num_negs_per_pos 55.0 +766 84 training.batch_size 0.0 +766 85 model.embedding_dim 1.0 +766 85 model.relation_dim 2.0 +766 85 loss.margin 7.748678404348409 +766 85 optimizer.lr 0.0017094128896470537 +766 85 negative_sampler.num_negs_per_pos 67.0 +766 85 training.batch_size 1.0 +766 86 model.embedding_dim 2.0 +766 86 model.relation_dim 0.0 +766 86 loss.margin 2.276353692779715 +766 86 optimizer.lr 0.044760900568770044 +766 86 negative_sampler.num_negs_per_pos 28.0 +766 86 training.batch_size 2.0 +766 87 model.embedding_dim 0.0 +766 87 model.relation_dim 0.0 +766 87 loss.margin 3.1294836759768523 +766 87 optimizer.lr 0.004014022823372455 +766 87 negative_sampler.num_negs_per_pos 34.0 +766 87 training.batch_size 2.0 +766 88 model.embedding_dim 1.0 +766 88 model.relation_dim 2.0 +766 88 loss.margin 1.3718769283317556 +766 88 optimizer.lr 0.002910472129813977 +766 88 negative_sampler.num_negs_per_pos 87.0 +766 88 training.batch_size 1.0 +766 89 model.embedding_dim 1.0 +766 89 model.relation_dim 2.0 +766 89 loss.margin 3.3585029704832707 +766 89 optimizer.lr 0.003009066849232337 +766 89 negative_sampler.num_negs_per_pos 56.0 +766 89 training.batch_size 2.0 +766 90 model.embedding_dim 2.0 +766 90 model.relation_dim 2.0 +766 90 loss.margin 3.2426719072888637 +766 90 optimizer.lr 0.04504517929753943 +766 90 negative_sampler.num_negs_per_pos 11.0 +766 90 training.batch_size 0.0 +766 91 model.embedding_dim 2.0 +766 91 model.relation_dim 2.0 +766 91 loss.margin 1.6784301167789013 +766 91 optimizer.lr 0.0712777886901936 +766 91 negative_sampler.num_negs_per_pos 90.0 +766 91 training.batch_size 1.0 +766 92 model.embedding_dim 0.0 +766 92 model.relation_dim 0.0 +766 92 loss.margin 6.161913651425568 +766 92 optimizer.lr 0.0068378905303444594 +766 92 negative_sampler.num_negs_per_pos 27.0 +766 92 training.batch_size 2.0 +766 93 model.embedding_dim 1.0 +766 93 model.relation_dim 0.0 +766 93 loss.margin 2.119341976941211 +766 93 optimizer.lr 0.09326865305277933 +766 93 negative_sampler.num_negs_per_pos 24.0 +766 93 training.batch_size 0.0 +766 94 model.embedding_dim 2.0 +766 94 model.relation_dim 1.0 +766 94 loss.margin 2.067466456925021 +766 94 optimizer.lr 0.044285770075867946 +766 94 negative_sampler.num_negs_per_pos 89.0 +766 94 training.batch_size 0.0 +766 95 model.embedding_dim 2.0 +766 95 model.relation_dim 0.0 +766 95 loss.margin 1.1906516963492502 +766 95 optimizer.lr 0.03403261877867631 +766 95 negative_sampler.num_negs_per_pos 1.0 +766 95 training.batch_size 2.0 +766 96 model.embedding_dim 1.0 +766 96 model.relation_dim 0.0 +766 96 loss.margin 9.719318013026012 +766 96 optimizer.lr 0.04193231806452095 +766 96 negative_sampler.num_negs_per_pos 92.0 +766 96 training.batch_size 2.0 +766 97 model.embedding_dim 2.0 +766 97 model.relation_dim 1.0 +766 97 loss.margin 7.803465609976464 +766 97 optimizer.lr 0.01383885037019137 +766 97 negative_sampler.num_negs_per_pos 83.0 +766 97 training.batch_size 2.0 +766 98 model.embedding_dim 0.0 +766 98 model.relation_dim 0.0 +766 98 loss.margin 2.11572123268307 +766 98 optimizer.lr 0.006194546116206164 +766 98 negative_sampler.num_negs_per_pos 37.0 +766 98 training.batch_size 2.0 +766 99 model.embedding_dim 2.0 +766 99 model.relation_dim 2.0 +766 99 loss.margin 3.423211231474151 +766 99 optimizer.lr 0.001541686829568377 +766 99 negative_sampler.num_negs_per_pos 92.0 +766 99 training.batch_size 1.0 +766 100 model.embedding_dim 1.0 +766 100 model.relation_dim 0.0 +766 100 loss.margin 5.019954226606956 +766 100 optimizer.lr 0.0027602322497613033 +766 100 negative_sampler.num_negs_per_pos 80.0 +766 100 training.batch_size 0.0 +766 1 dataset """kinships""" +766 1 model """transd""" +766 1 loss """marginranking""" +766 1 regularizer """no""" +766 1 optimizer """adam""" +766 1 training_loop """owa""" +766 1 negative_sampler """basic""" +766 1 evaluator """rankbased""" +766 2 dataset """kinships""" +766 2 model """transd""" +766 2 loss """marginranking""" +766 2 regularizer """no""" +766 2 optimizer """adam""" +766 2 training_loop """owa""" +766 2 negative_sampler """basic""" +766 2 evaluator """rankbased""" +766 3 dataset """kinships""" +766 3 model """transd""" +766 3 loss """marginranking""" +766 3 regularizer """no""" +766 3 optimizer """adam""" +766 3 training_loop """owa""" +766 3 negative_sampler """basic""" +766 3 evaluator """rankbased""" +766 4 dataset """kinships""" +766 4 model """transd""" +766 4 loss """marginranking""" +766 4 regularizer """no""" +766 4 optimizer """adam""" +766 4 training_loop """owa""" +766 4 negative_sampler """basic""" +766 4 evaluator """rankbased""" +766 5 dataset """kinships""" +766 5 model """transd""" +766 5 loss """marginranking""" +766 5 regularizer """no""" +766 5 optimizer """adam""" +766 5 training_loop """owa""" +766 5 negative_sampler """basic""" +766 5 evaluator """rankbased""" +766 6 dataset """kinships""" +766 6 model """transd""" +766 6 loss """marginranking""" +766 6 regularizer """no""" +766 6 optimizer """adam""" +766 6 training_loop """owa""" +766 6 negative_sampler """basic""" +766 6 evaluator """rankbased""" +766 7 dataset """kinships""" +766 7 model """transd""" +766 7 loss """marginranking""" +766 7 regularizer """no""" +766 7 optimizer """adam""" +766 7 training_loop """owa""" +766 7 negative_sampler """basic""" +766 7 evaluator """rankbased""" +766 8 dataset """kinships""" +766 8 model """transd""" +766 8 loss """marginranking""" +766 8 regularizer """no""" +766 8 optimizer """adam""" +766 8 training_loop """owa""" +766 8 negative_sampler """basic""" +766 8 evaluator """rankbased""" +766 9 dataset """kinships""" +766 9 model """transd""" +766 9 loss """marginranking""" +766 9 regularizer """no""" +766 9 optimizer """adam""" +766 9 training_loop """owa""" +766 9 negative_sampler """basic""" +766 9 evaluator """rankbased""" +766 10 dataset """kinships""" +766 10 model """transd""" +766 10 loss """marginranking""" +766 10 regularizer """no""" +766 10 optimizer """adam""" +766 10 training_loop """owa""" +766 10 negative_sampler """basic""" +766 10 evaluator """rankbased""" +766 11 dataset """kinships""" +766 11 model """transd""" +766 11 loss """marginranking""" +766 11 regularizer """no""" +766 11 optimizer """adam""" +766 11 training_loop """owa""" +766 11 negative_sampler """basic""" +766 11 evaluator """rankbased""" +766 12 dataset """kinships""" +766 12 model """transd""" +766 12 loss """marginranking""" +766 12 regularizer """no""" +766 12 optimizer """adam""" +766 12 training_loop """owa""" +766 12 negative_sampler """basic""" +766 12 evaluator """rankbased""" +766 13 dataset """kinships""" +766 13 model """transd""" +766 13 loss """marginranking""" +766 13 regularizer """no""" +766 13 optimizer """adam""" +766 13 training_loop """owa""" +766 13 negative_sampler """basic""" +766 13 evaluator """rankbased""" +766 14 dataset """kinships""" +766 14 model """transd""" +766 14 loss """marginranking""" +766 14 regularizer """no""" +766 14 optimizer """adam""" +766 14 training_loop """owa""" +766 14 negative_sampler """basic""" +766 14 evaluator """rankbased""" +766 15 dataset """kinships""" +766 15 model """transd""" +766 15 loss """marginranking""" +766 15 regularizer """no""" +766 15 optimizer """adam""" +766 15 training_loop """owa""" +766 15 negative_sampler """basic""" +766 15 evaluator """rankbased""" +766 16 dataset """kinships""" +766 16 model """transd""" +766 16 loss """marginranking""" +766 16 regularizer """no""" +766 16 optimizer """adam""" +766 16 training_loop """owa""" +766 16 negative_sampler """basic""" +766 16 evaluator """rankbased""" +766 17 dataset """kinships""" +766 17 model """transd""" +766 17 loss """marginranking""" +766 17 regularizer """no""" +766 17 optimizer """adam""" +766 17 training_loop """owa""" +766 17 negative_sampler """basic""" +766 17 evaluator """rankbased""" +766 18 dataset """kinships""" +766 18 model """transd""" +766 18 loss """marginranking""" +766 18 regularizer """no""" +766 18 optimizer """adam""" +766 18 training_loop """owa""" +766 18 negative_sampler """basic""" +766 18 evaluator """rankbased""" +766 19 dataset """kinships""" +766 19 model """transd""" +766 19 loss """marginranking""" +766 19 regularizer """no""" +766 19 optimizer """adam""" +766 19 training_loop """owa""" +766 19 negative_sampler """basic""" +766 19 evaluator """rankbased""" +766 20 dataset """kinships""" +766 20 model """transd""" +766 20 loss """marginranking""" +766 20 regularizer """no""" +766 20 optimizer """adam""" +766 20 training_loop """owa""" +766 20 negative_sampler """basic""" +766 20 evaluator """rankbased""" +766 21 dataset """kinships""" +766 21 model """transd""" +766 21 loss """marginranking""" +766 21 regularizer """no""" +766 21 optimizer """adam""" +766 21 training_loop """owa""" +766 21 negative_sampler """basic""" +766 21 evaluator """rankbased""" +766 22 dataset """kinships""" +766 22 model """transd""" +766 22 loss """marginranking""" +766 22 regularizer """no""" +766 22 optimizer """adam""" +766 22 training_loop """owa""" +766 22 negative_sampler """basic""" +766 22 evaluator """rankbased""" +766 23 dataset """kinships""" +766 23 model """transd""" +766 23 loss """marginranking""" +766 23 regularizer """no""" +766 23 optimizer """adam""" +766 23 training_loop """owa""" +766 23 negative_sampler """basic""" +766 23 evaluator """rankbased""" +766 24 dataset """kinships""" +766 24 model """transd""" +766 24 loss """marginranking""" +766 24 regularizer """no""" +766 24 optimizer """adam""" +766 24 training_loop """owa""" +766 24 negative_sampler """basic""" +766 24 evaluator """rankbased""" +766 25 dataset """kinships""" +766 25 model """transd""" +766 25 loss """marginranking""" +766 25 regularizer """no""" +766 25 optimizer """adam""" +766 25 training_loop """owa""" +766 25 negative_sampler """basic""" +766 25 evaluator """rankbased""" +766 26 dataset """kinships""" +766 26 model """transd""" +766 26 loss """marginranking""" +766 26 regularizer """no""" +766 26 optimizer """adam""" +766 26 training_loop """owa""" +766 26 negative_sampler """basic""" +766 26 evaluator """rankbased""" +766 27 dataset """kinships""" +766 27 model """transd""" +766 27 loss """marginranking""" +766 27 regularizer """no""" +766 27 optimizer """adam""" +766 27 training_loop """owa""" +766 27 negative_sampler """basic""" +766 27 evaluator """rankbased""" +766 28 dataset """kinships""" +766 28 model """transd""" +766 28 loss """marginranking""" +766 28 regularizer """no""" +766 28 optimizer """adam""" +766 28 training_loop """owa""" +766 28 negative_sampler """basic""" +766 28 evaluator """rankbased""" +766 29 dataset """kinships""" +766 29 model """transd""" +766 29 loss """marginranking""" +766 29 regularizer """no""" +766 29 optimizer """adam""" +766 29 training_loop """owa""" +766 29 negative_sampler """basic""" +766 29 evaluator """rankbased""" +766 30 dataset """kinships""" +766 30 model """transd""" +766 30 loss """marginranking""" +766 30 regularizer """no""" +766 30 optimizer """adam""" +766 30 training_loop """owa""" +766 30 negative_sampler """basic""" +766 30 evaluator """rankbased""" +766 31 dataset """kinships""" +766 31 model """transd""" +766 31 loss """marginranking""" +766 31 regularizer """no""" +766 31 optimizer """adam""" +766 31 training_loop """owa""" +766 31 negative_sampler """basic""" +766 31 evaluator """rankbased""" +766 32 dataset """kinships""" +766 32 model """transd""" +766 32 loss """marginranking""" +766 32 regularizer """no""" +766 32 optimizer """adam""" +766 32 training_loop """owa""" +766 32 negative_sampler """basic""" +766 32 evaluator """rankbased""" +766 33 dataset """kinships""" +766 33 model """transd""" +766 33 loss """marginranking""" +766 33 regularizer """no""" +766 33 optimizer """adam""" +766 33 training_loop """owa""" +766 33 negative_sampler """basic""" +766 33 evaluator """rankbased""" +766 34 dataset """kinships""" +766 34 model """transd""" +766 34 loss """marginranking""" +766 34 regularizer """no""" +766 34 optimizer """adam""" +766 34 training_loop """owa""" +766 34 negative_sampler """basic""" +766 34 evaluator """rankbased""" +766 35 dataset """kinships""" +766 35 model """transd""" +766 35 loss """marginranking""" +766 35 regularizer """no""" +766 35 optimizer """adam""" +766 35 training_loop """owa""" +766 35 negative_sampler """basic""" +766 35 evaluator """rankbased""" +766 36 dataset """kinships""" +766 36 model """transd""" +766 36 loss """marginranking""" +766 36 regularizer """no""" +766 36 optimizer """adam""" +766 36 training_loop """owa""" +766 36 negative_sampler """basic""" +766 36 evaluator """rankbased""" +766 37 dataset """kinships""" +766 37 model """transd""" +766 37 loss """marginranking""" +766 37 regularizer """no""" +766 37 optimizer """adam""" +766 37 training_loop """owa""" +766 37 negative_sampler """basic""" +766 37 evaluator """rankbased""" +766 38 dataset """kinships""" +766 38 model """transd""" +766 38 loss """marginranking""" +766 38 regularizer """no""" +766 38 optimizer """adam""" +766 38 training_loop """owa""" +766 38 negative_sampler """basic""" +766 38 evaluator """rankbased""" +766 39 dataset """kinships""" +766 39 model """transd""" +766 39 loss """marginranking""" +766 39 regularizer """no""" +766 39 optimizer """adam""" +766 39 training_loop """owa""" +766 39 negative_sampler """basic""" +766 39 evaluator """rankbased""" +766 40 dataset """kinships""" +766 40 model """transd""" +766 40 loss """marginranking""" +766 40 regularizer """no""" +766 40 optimizer """adam""" +766 40 training_loop """owa""" +766 40 negative_sampler """basic""" +766 40 evaluator """rankbased""" +766 41 dataset """kinships""" +766 41 model """transd""" +766 41 loss """marginranking""" +766 41 regularizer """no""" +766 41 optimizer """adam""" +766 41 training_loop """owa""" +766 41 negative_sampler """basic""" +766 41 evaluator """rankbased""" +766 42 dataset """kinships""" +766 42 model """transd""" +766 42 loss """marginranking""" +766 42 regularizer """no""" +766 42 optimizer """adam""" +766 42 training_loop """owa""" +766 42 negative_sampler """basic""" +766 42 evaluator """rankbased""" +766 43 dataset """kinships""" +766 43 model """transd""" +766 43 loss """marginranking""" +766 43 regularizer """no""" +766 43 optimizer """adam""" +766 43 training_loop """owa""" +766 43 negative_sampler """basic""" +766 43 evaluator """rankbased""" +766 44 dataset """kinships""" +766 44 model """transd""" +766 44 loss """marginranking""" +766 44 regularizer """no""" +766 44 optimizer """adam""" +766 44 training_loop """owa""" +766 44 negative_sampler """basic""" +766 44 evaluator """rankbased""" +766 45 dataset """kinships""" +766 45 model """transd""" +766 45 loss """marginranking""" +766 45 regularizer """no""" +766 45 optimizer """adam""" +766 45 training_loop """owa""" +766 45 negative_sampler """basic""" +766 45 evaluator """rankbased""" +766 46 dataset """kinships""" +766 46 model """transd""" +766 46 loss """marginranking""" +766 46 regularizer """no""" +766 46 optimizer """adam""" +766 46 training_loop """owa""" +766 46 negative_sampler """basic""" +766 46 evaluator """rankbased""" +766 47 dataset """kinships""" +766 47 model """transd""" +766 47 loss """marginranking""" +766 47 regularizer """no""" +766 47 optimizer """adam""" +766 47 training_loop """owa""" +766 47 negative_sampler """basic""" +766 47 evaluator """rankbased""" +766 48 dataset """kinships""" +766 48 model """transd""" +766 48 loss """marginranking""" +766 48 regularizer """no""" +766 48 optimizer """adam""" +766 48 training_loop """owa""" +766 48 negative_sampler """basic""" +766 48 evaluator """rankbased""" +766 49 dataset """kinships""" +766 49 model """transd""" +766 49 loss """marginranking""" +766 49 regularizer """no""" +766 49 optimizer """adam""" +766 49 training_loop """owa""" +766 49 negative_sampler """basic""" +766 49 evaluator """rankbased""" +766 50 dataset """kinships""" +766 50 model """transd""" +766 50 loss """marginranking""" +766 50 regularizer """no""" +766 50 optimizer """adam""" +766 50 training_loop """owa""" +766 50 negative_sampler """basic""" +766 50 evaluator """rankbased""" +766 51 dataset """kinships""" +766 51 model """transd""" +766 51 loss """marginranking""" +766 51 regularizer """no""" +766 51 optimizer """adam""" +766 51 training_loop """owa""" +766 51 negative_sampler """basic""" +766 51 evaluator """rankbased""" +766 52 dataset """kinships""" +766 52 model """transd""" +766 52 loss """marginranking""" +766 52 regularizer """no""" +766 52 optimizer """adam""" +766 52 training_loop """owa""" +766 52 negative_sampler """basic""" +766 52 evaluator """rankbased""" +766 53 dataset """kinships""" +766 53 model """transd""" +766 53 loss """marginranking""" +766 53 regularizer """no""" +766 53 optimizer """adam""" +766 53 training_loop """owa""" +766 53 negative_sampler """basic""" +766 53 evaluator """rankbased""" +766 54 dataset """kinships""" +766 54 model """transd""" +766 54 loss """marginranking""" +766 54 regularizer """no""" +766 54 optimizer """adam""" +766 54 training_loop """owa""" +766 54 negative_sampler """basic""" +766 54 evaluator """rankbased""" +766 55 dataset """kinships""" +766 55 model """transd""" +766 55 loss """marginranking""" +766 55 regularizer """no""" +766 55 optimizer """adam""" +766 55 training_loop """owa""" +766 55 negative_sampler """basic""" +766 55 evaluator """rankbased""" +766 56 dataset """kinships""" +766 56 model """transd""" +766 56 loss """marginranking""" +766 56 regularizer """no""" +766 56 optimizer """adam""" +766 56 training_loop """owa""" +766 56 negative_sampler """basic""" +766 56 evaluator """rankbased""" +766 57 dataset """kinships""" +766 57 model """transd""" +766 57 loss """marginranking""" +766 57 regularizer """no""" +766 57 optimizer """adam""" +766 57 training_loop """owa""" +766 57 negative_sampler """basic""" +766 57 evaluator """rankbased""" +766 58 dataset """kinships""" +766 58 model """transd""" +766 58 loss """marginranking""" +766 58 regularizer """no""" +766 58 optimizer """adam""" +766 58 training_loop """owa""" +766 58 negative_sampler """basic""" +766 58 evaluator """rankbased""" +766 59 dataset """kinships""" +766 59 model """transd""" +766 59 loss """marginranking""" +766 59 regularizer """no""" +766 59 optimizer """adam""" +766 59 training_loop """owa""" +766 59 negative_sampler """basic""" +766 59 evaluator """rankbased""" +766 60 dataset """kinships""" +766 60 model """transd""" +766 60 loss """marginranking""" +766 60 regularizer """no""" +766 60 optimizer """adam""" +766 60 training_loop """owa""" +766 60 negative_sampler """basic""" +766 60 evaluator """rankbased""" +766 61 dataset """kinships""" +766 61 model """transd""" +766 61 loss """marginranking""" +766 61 regularizer """no""" +766 61 optimizer """adam""" +766 61 training_loop """owa""" +766 61 negative_sampler """basic""" +766 61 evaluator """rankbased""" +766 62 dataset """kinships""" +766 62 model """transd""" +766 62 loss """marginranking""" +766 62 regularizer """no""" +766 62 optimizer """adam""" +766 62 training_loop """owa""" +766 62 negative_sampler """basic""" +766 62 evaluator """rankbased""" +766 63 dataset """kinships""" +766 63 model """transd""" +766 63 loss """marginranking""" +766 63 regularizer """no""" +766 63 optimizer """adam""" +766 63 training_loop """owa""" +766 63 negative_sampler """basic""" +766 63 evaluator """rankbased""" +766 64 dataset """kinships""" +766 64 model """transd""" +766 64 loss """marginranking""" +766 64 regularizer """no""" +766 64 optimizer """adam""" +766 64 training_loop """owa""" +766 64 negative_sampler """basic""" +766 64 evaluator """rankbased""" +766 65 dataset """kinships""" +766 65 model """transd""" +766 65 loss """marginranking""" +766 65 regularizer """no""" +766 65 optimizer """adam""" +766 65 training_loop """owa""" +766 65 negative_sampler """basic""" +766 65 evaluator """rankbased""" +766 66 dataset """kinships""" +766 66 model """transd""" +766 66 loss """marginranking""" +766 66 regularizer """no""" +766 66 optimizer """adam""" +766 66 training_loop """owa""" +766 66 negative_sampler """basic""" +766 66 evaluator """rankbased""" +766 67 dataset """kinships""" +766 67 model """transd""" +766 67 loss """marginranking""" +766 67 regularizer """no""" +766 67 optimizer """adam""" +766 67 training_loop """owa""" +766 67 negative_sampler """basic""" +766 67 evaluator """rankbased""" +766 68 dataset """kinships""" +766 68 model """transd""" +766 68 loss """marginranking""" +766 68 regularizer """no""" +766 68 optimizer """adam""" +766 68 training_loop """owa""" +766 68 negative_sampler """basic""" +766 68 evaluator """rankbased""" +766 69 dataset """kinships""" +766 69 model """transd""" +766 69 loss """marginranking""" +766 69 regularizer """no""" +766 69 optimizer """adam""" +766 69 training_loop """owa""" +766 69 negative_sampler """basic""" +766 69 evaluator """rankbased""" +766 70 dataset """kinships""" +766 70 model """transd""" +766 70 loss """marginranking""" +766 70 regularizer """no""" +766 70 optimizer """adam""" +766 70 training_loop """owa""" +766 70 negative_sampler """basic""" +766 70 evaluator """rankbased""" +766 71 dataset """kinships""" +766 71 model """transd""" +766 71 loss """marginranking""" +766 71 regularizer """no""" +766 71 optimizer """adam""" +766 71 training_loop """owa""" +766 71 negative_sampler """basic""" +766 71 evaluator """rankbased""" +766 72 dataset """kinships""" +766 72 model """transd""" +766 72 loss """marginranking""" +766 72 regularizer """no""" +766 72 optimizer """adam""" +766 72 training_loop """owa""" +766 72 negative_sampler """basic""" +766 72 evaluator """rankbased""" +766 73 dataset """kinships""" +766 73 model """transd""" +766 73 loss """marginranking""" +766 73 regularizer """no""" +766 73 optimizer """adam""" +766 73 training_loop """owa""" +766 73 negative_sampler """basic""" +766 73 evaluator """rankbased""" +766 74 dataset """kinships""" +766 74 model """transd""" +766 74 loss """marginranking""" +766 74 regularizer """no""" +766 74 optimizer """adam""" +766 74 training_loop """owa""" +766 74 negative_sampler """basic""" +766 74 evaluator """rankbased""" +766 75 dataset """kinships""" +766 75 model """transd""" +766 75 loss """marginranking""" +766 75 regularizer """no""" +766 75 optimizer """adam""" +766 75 training_loop """owa""" +766 75 negative_sampler """basic""" +766 75 evaluator """rankbased""" +766 76 dataset """kinships""" +766 76 model """transd""" +766 76 loss """marginranking""" +766 76 regularizer """no""" +766 76 optimizer """adam""" +766 76 training_loop """owa""" +766 76 negative_sampler """basic""" +766 76 evaluator """rankbased""" +766 77 dataset """kinships""" +766 77 model """transd""" +766 77 loss """marginranking""" +766 77 regularizer """no""" +766 77 optimizer """adam""" +766 77 training_loop """owa""" +766 77 negative_sampler """basic""" +766 77 evaluator """rankbased""" +766 78 dataset """kinships""" +766 78 model """transd""" +766 78 loss """marginranking""" +766 78 regularizer """no""" +766 78 optimizer """adam""" +766 78 training_loop """owa""" +766 78 negative_sampler """basic""" +766 78 evaluator """rankbased""" +766 79 dataset """kinships""" +766 79 model """transd""" +766 79 loss """marginranking""" +766 79 regularizer """no""" +766 79 optimizer """adam""" +766 79 training_loop """owa""" +766 79 negative_sampler """basic""" +766 79 evaluator """rankbased""" +766 80 dataset """kinships""" +766 80 model """transd""" +766 80 loss """marginranking""" +766 80 regularizer """no""" +766 80 optimizer """adam""" +766 80 training_loop """owa""" +766 80 negative_sampler """basic""" +766 80 evaluator """rankbased""" +766 81 dataset """kinships""" +766 81 model """transd""" +766 81 loss """marginranking""" +766 81 regularizer """no""" +766 81 optimizer """adam""" +766 81 training_loop """owa""" +766 81 negative_sampler """basic""" +766 81 evaluator """rankbased""" +766 82 dataset """kinships""" +766 82 model """transd""" +766 82 loss """marginranking""" +766 82 regularizer """no""" +766 82 optimizer """adam""" +766 82 training_loop """owa""" +766 82 negative_sampler """basic""" +766 82 evaluator """rankbased""" +766 83 dataset """kinships""" +766 83 model """transd""" +766 83 loss """marginranking""" +766 83 regularizer """no""" +766 83 optimizer """adam""" +766 83 training_loop """owa""" +766 83 negative_sampler """basic""" +766 83 evaluator """rankbased""" +766 84 dataset """kinships""" +766 84 model """transd""" +766 84 loss """marginranking""" +766 84 regularizer """no""" +766 84 optimizer """adam""" +766 84 training_loop """owa""" +766 84 negative_sampler """basic""" +766 84 evaluator """rankbased""" +766 85 dataset """kinships""" +766 85 model """transd""" +766 85 loss """marginranking""" +766 85 regularizer """no""" +766 85 optimizer """adam""" +766 85 training_loop """owa""" +766 85 negative_sampler """basic""" +766 85 evaluator """rankbased""" +766 86 dataset """kinships""" +766 86 model """transd""" +766 86 loss """marginranking""" +766 86 regularizer """no""" +766 86 optimizer """adam""" +766 86 training_loop """owa""" +766 86 negative_sampler """basic""" +766 86 evaluator """rankbased""" +766 87 dataset """kinships""" +766 87 model """transd""" +766 87 loss """marginranking""" +766 87 regularizer """no""" +766 87 optimizer """adam""" +766 87 training_loop """owa""" +766 87 negative_sampler """basic""" +766 87 evaluator """rankbased""" +766 88 dataset """kinships""" +766 88 model """transd""" +766 88 loss """marginranking""" +766 88 regularizer """no""" +766 88 optimizer """adam""" +766 88 training_loop """owa""" +766 88 negative_sampler """basic""" +766 88 evaluator """rankbased""" +766 89 dataset """kinships""" +766 89 model """transd""" +766 89 loss """marginranking""" +766 89 regularizer """no""" +766 89 optimizer """adam""" +766 89 training_loop """owa""" +766 89 negative_sampler """basic""" +766 89 evaluator """rankbased""" +766 90 dataset """kinships""" +766 90 model """transd""" +766 90 loss """marginranking""" +766 90 regularizer """no""" +766 90 optimizer """adam""" +766 90 training_loop """owa""" +766 90 negative_sampler """basic""" +766 90 evaluator """rankbased""" +766 91 dataset """kinships""" +766 91 model """transd""" +766 91 loss """marginranking""" +766 91 regularizer """no""" +766 91 optimizer """adam""" +766 91 training_loop """owa""" +766 91 negative_sampler """basic""" +766 91 evaluator """rankbased""" +766 92 dataset """kinships""" +766 92 model """transd""" +766 92 loss """marginranking""" +766 92 regularizer """no""" +766 92 optimizer """adam""" +766 92 training_loop """owa""" +766 92 negative_sampler """basic""" +766 92 evaluator """rankbased""" +766 93 dataset """kinships""" +766 93 model """transd""" +766 93 loss """marginranking""" +766 93 regularizer """no""" +766 93 optimizer """adam""" +766 93 training_loop """owa""" +766 93 negative_sampler """basic""" +766 93 evaluator """rankbased""" +766 94 dataset """kinships""" +766 94 model """transd""" +766 94 loss """marginranking""" +766 94 regularizer """no""" +766 94 optimizer """adam""" +766 94 training_loop """owa""" +766 94 negative_sampler """basic""" +766 94 evaluator """rankbased""" +766 95 dataset """kinships""" +766 95 model """transd""" +766 95 loss """marginranking""" +766 95 regularizer """no""" +766 95 optimizer """adam""" +766 95 training_loop """owa""" +766 95 negative_sampler """basic""" +766 95 evaluator """rankbased""" +766 96 dataset """kinships""" +766 96 model """transd""" +766 96 loss """marginranking""" +766 96 regularizer """no""" +766 96 optimizer """adam""" +766 96 training_loop """owa""" +766 96 negative_sampler """basic""" +766 96 evaluator """rankbased""" +766 97 dataset """kinships""" +766 97 model """transd""" +766 97 loss """marginranking""" +766 97 regularizer """no""" +766 97 optimizer """adam""" +766 97 training_loop """owa""" +766 97 negative_sampler """basic""" +766 97 evaluator """rankbased""" +766 98 dataset """kinships""" +766 98 model """transd""" +766 98 loss """marginranking""" +766 98 regularizer """no""" +766 98 optimizer """adam""" +766 98 training_loop """owa""" +766 98 negative_sampler """basic""" +766 98 evaluator """rankbased""" +766 99 dataset """kinships""" +766 99 model """transd""" +766 99 loss """marginranking""" +766 99 regularizer """no""" +766 99 optimizer """adam""" +766 99 training_loop """owa""" +766 99 negative_sampler """basic""" +766 99 evaluator """rankbased""" +766 100 dataset """kinships""" +766 100 model """transd""" +766 100 loss """marginranking""" +766 100 regularizer """no""" +766 100 optimizer """adam""" +766 100 training_loop """owa""" +766 100 negative_sampler """basic""" +766 100 evaluator """rankbased""" +767 1 model.embedding_dim 0.0 +767 1 model.relation_dim 0.0 +767 1 loss.margin 6.589260355153135 +767 1 loss.adversarial_temperature 0.37640051294609966 +767 1 optimizer.lr 0.0791602396903598 +767 1 negative_sampler.num_negs_per_pos 73.0 +767 1 training.batch_size 2.0 +767 2 model.embedding_dim 2.0 +767 2 model.relation_dim 0.0 +767 2 loss.margin 16.379346138412973 +767 2 loss.adversarial_temperature 0.9544500207361434 +767 2 optimizer.lr 0.02998491560590519 +767 2 negative_sampler.num_negs_per_pos 35.0 +767 2 training.batch_size 2.0 +767 3 model.embedding_dim 1.0 +767 3 model.relation_dim 2.0 +767 3 loss.margin 19.06063850602806 +767 3 loss.adversarial_temperature 0.6850760503638444 +767 3 optimizer.lr 0.0011533018435824197 +767 3 negative_sampler.num_negs_per_pos 2.0 +767 3 training.batch_size 0.0 +767 4 model.embedding_dim 0.0 +767 4 model.relation_dim 2.0 +767 4 loss.margin 26.364447473412838 +767 4 loss.adversarial_temperature 0.9678603043343157 +767 4 optimizer.lr 0.026727087312310528 +767 4 negative_sampler.num_negs_per_pos 46.0 +767 4 training.batch_size 0.0 +767 5 model.embedding_dim 1.0 +767 5 model.relation_dim 1.0 +767 5 loss.margin 8.079031797372483 +767 5 loss.adversarial_temperature 0.6086916713951344 +767 5 optimizer.lr 0.0012520114055087229 +767 5 negative_sampler.num_negs_per_pos 66.0 +767 5 training.batch_size 2.0 +767 6 model.embedding_dim 2.0 +767 6 model.relation_dim 0.0 +767 6 loss.margin 15.502364696817972 +767 6 loss.adversarial_temperature 0.5510225871158011 +767 6 optimizer.lr 0.008655379498525195 +767 6 negative_sampler.num_negs_per_pos 99.0 +767 6 training.batch_size 2.0 +767 7 model.embedding_dim 1.0 +767 7 model.relation_dim 1.0 +767 7 loss.margin 6.72217202212269 +767 7 loss.adversarial_temperature 0.25732489432961325 +767 7 optimizer.lr 0.0010739591704123572 +767 7 negative_sampler.num_negs_per_pos 48.0 +767 7 training.batch_size 2.0 +767 8 model.embedding_dim 1.0 +767 8 model.relation_dim 1.0 +767 8 loss.margin 14.373625763459982 +767 8 loss.adversarial_temperature 0.2018702117431777 +767 8 optimizer.lr 0.04213107096026231 +767 8 negative_sampler.num_negs_per_pos 85.0 +767 8 training.batch_size 0.0 +767 9 model.embedding_dim 2.0 +767 9 model.relation_dim 0.0 +767 9 loss.margin 14.578764851992844 +767 9 loss.adversarial_temperature 0.26743314902635595 +767 9 optimizer.lr 0.005776664655090126 +767 9 negative_sampler.num_negs_per_pos 68.0 +767 9 training.batch_size 0.0 +767 10 model.embedding_dim 0.0 +767 10 model.relation_dim 1.0 +767 10 loss.margin 29.58894144182024 +767 10 loss.adversarial_temperature 0.18188732817356573 +767 10 optimizer.lr 0.0019043643188598463 +767 10 negative_sampler.num_negs_per_pos 25.0 +767 10 training.batch_size 1.0 +767 11 model.embedding_dim 1.0 +767 11 model.relation_dim 1.0 +767 11 loss.margin 5.573598052034457 +767 11 loss.adversarial_temperature 0.21173955681418616 +767 11 optimizer.lr 0.07799374040467702 +767 11 negative_sampler.num_negs_per_pos 88.0 +767 11 training.batch_size 0.0 +767 12 model.embedding_dim 2.0 +767 12 model.relation_dim 1.0 +767 12 loss.margin 14.61055168035593 +767 12 loss.adversarial_temperature 0.9697640444358656 +767 12 optimizer.lr 0.02930772449071098 +767 12 negative_sampler.num_negs_per_pos 6.0 +767 12 training.batch_size 2.0 +767 13 model.embedding_dim 0.0 +767 13 model.relation_dim 0.0 +767 13 loss.margin 11.843980683857554 +767 13 loss.adversarial_temperature 0.2920445536456421 +767 13 optimizer.lr 0.0031123054944609267 +767 13 negative_sampler.num_negs_per_pos 25.0 +767 13 training.batch_size 2.0 +767 14 model.embedding_dim 0.0 +767 14 model.relation_dim 0.0 +767 14 loss.margin 29.118204132304573 +767 14 loss.adversarial_temperature 0.3845797546626526 +767 14 optimizer.lr 0.016428524290065864 +767 14 negative_sampler.num_negs_per_pos 5.0 +767 14 training.batch_size 1.0 +767 15 model.embedding_dim 0.0 +767 15 model.relation_dim 2.0 +767 15 loss.margin 8.938779738824627 +767 15 loss.adversarial_temperature 0.41529664960261503 +767 15 optimizer.lr 0.006144789750486491 +767 15 negative_sampler.num_negs_per_pos 99.0 +767 15 training.batch_size 2.0 +767 16 model.embedding_dim 2.0 +767 16 model.relation_dim 1.0 +767 16 loss.margin 29.85353330591358 +767 16 loss.adversarial_temperature 0.9452408587867301 +767 16 optimizer.lr 0.01757272629075156 +767 16 negative_sampler.num_negs_per_pos 38.0 +767 16 training.batch_size 2.0 +767 17 model.embedding_dim 2.0 +767 17 model.relation_dim 2.0 +767 17 loss.margin 17.115657382910307 +767 17 loss.adversarial_temperature 0.816778427571472 +767 17 optimizer.lr 0.05122645399843404 +767 17 negative_sampler.num_negs_per_pos 2.0 +767 17 training.batch_size 2.0 +767 18 model.embedding_dim 0.0 +767 18 model.relation_dim 2.0 +767 18 loss.margin 22.84078943496788 +767 18 loss.adversarial_temperature 0.4068272429777683 +767 18 optimizer.lr 0.07955689482452606 +767 18 negative_sampler.num_negs_per_pos 81.0 +767 18 training.batch_size 0.0 +767 19 model.embedding_dim 0.0 +767 19 model.relation_dim 1.0 +767 19 loss.margin 6.302490630588734 +767 19 loss.adversarial_temperature 0.734427525724088 +767 19 optimizer.lr 0.028321044106288523 +767 19 negative_sampler.num_negs_per_pos 1.0 +767 19 training.batch_size 2.0 +767 20 model.embedding_dim 2.0 +767 20 model.relation_dim 2.0 +767 20 loss.margin 19.149196183192558 +767 20 loss.adversarial_temperature 0.4534887198341263 +767 20 optimizer.lr 0.002872513645105764 +767 20 negative_sampler.num_negs_per_pos 49.0 +767 20 training.batch_size 0.0 +767 21 model.embedding_dim 0.0 +767 21 model.relation_dim 2.0 +767 21 loss.margin 19.068176574543237 +767 21 loss.adversarial_temperature 0.41611358473900706 +767 21 optimizer.lr 0.012814338200920768 +767 21 negative_sampler.num_negs_per_pos 79.0 +767 21 training.batch_size 1.0 +767 22 model.embedding_dim 0.0 +767 22 model.relation_dim 2.0 +767 22 loss.margin 27.9861726494572 +767 22 loss.adversarial_temperature 0.44471489575549794 +767 22 optimizer.lr 0.05675871160724136 +767 22 negative_sampler.num_negs_per_pos 18.0 +767 22 training.batch_size 2.0 +767 23 model.embedding_dim 1.0 +767 23 model.relation_dim 1.0 +767 23 loss.margin 1.3845599345896415 +767 23 loss.adversarial_temperature 0.735156322156079 +767 23 optimizer.lr 0.09127188568437694 +767 23 negative_sampler.num_negs_per_pos 63.0 +767 23 training.batch_size 2.0 +767 24 model.embedding_dim 0.0 +767 24 model.relation_dim 0.0 +767 24 loss.margin 19.80357565955191 +767 24 loss.adversarial_temperature 0.5803810704457498 +767 24 optimizer.lr 0.0016230688020222957 +767 24 negative_sampler.num_negs_per_pos 46.0 +767 24 training.batch_size 2.0 +767 25 model.embedding_dim 1.0 +767 25 model.relation_dim 0.0 +767 25 loss.margin 11.005576066894397 +767 25 loss.adversarial_temperature 0.4419196420169863 +767 25 optimizer.lr 0.0016530767258834413 +767 25 negative_sampler.num_negs_per_pos 74.0 +767 25 training.batch_size 1.0 +767 26 model.embedding_dim 0.0 +767 26 model.relation_dim 2.0 +767 26 loss.margin 12.516901676029846 +767 26 loss.adversarial_temperature 0.7589948961945574 +767 26 optimizer.lr 0.0038460856848400063 +767 26 negative_sampler.num_negs_per_pos 26.0 +767 26 training.batch_size 0.0 +767 27 model.embedding_dim 0.0 +767 27 model.relation_dim 1.0 +767 27 loss.margin 6.9548858337915185 +767 27 loss.adversarial_temperature 0.2448446691951221 +767 27 optimizer.lr 0.0010144408782887181 +767 27 negative_sampler.num_negs_per_pos 86.0 +767 27 training.batch_size 1.0 +767 28 model.embedding_dim 0.0 +767 28 model.relation_dim 1.0 +767 28 loss.margin 18.375803456869626 +767 28 loss.adversarial_temperature 0.45737601200876077 +767 28 optimizer.lr 0.015812858292485486 +767 28 negative_sampler.num_negs_per_pos 60.0 +767 28 training.batch_size 0.0 +767 29 model.embedding_dim 0.0 +767 29 model.relation_dim 0.0 +767 29 loss.margin 18.13648034101599 +767 29 loss.adversarial_temperature 0.5949671511073801 +767 29 optimizer.lr 0.009137012797248666 +767 29 negative_sampler.num_negs_per_pos 18.0 +767 29 training.batch_size 2.0 +767 30 model.embedding_dim 1.0 +767 30 model.relation_dim 1.0 +767 30 loss.margin 28.508826506751877 +767 30 loss.adversarial_temperature 0.5653011104361423 +767 30 optimizer.lr 0.018969501474641902 +767 30 negative_sampler.num_negs_per_pos 22.0 +767 30 training.batch_size 2.0 +767 31 model.embedding_dim 1.0 +767 31 model.relation_dim 2.0 +767 31 loss.margin 6.3982263183812496 +767 31 loss.adversarial_temperature 0.3867384603502022 +767 31 optimizer.lr 0.012722038113910851 +767 31 negative_sampler.num_negs_per_pos 67.0 +767 31 training.batch_size 2.0 +767 32 model.embedding_dim 1.0 +767 32 model.relation_dim 2.0 +767 32 loss.margin 6.888927038529764 +767 32 loss.adversarial_temperature 0.8537253144672045 +767 32 optimizer.lr 0.009296423894241955 +767 32 negative_sampler.num_negs_per_pos 81.0 +767 32 training.batch_size 2.0 +767 33 model.embedding_dim 2.0 +767 33 model.relation_dim 2.0 +767 33 loss.margin 18.247793555675273 +767 33 loss.adversarial_temperature 0.4343135070434897 +767 33 optimizer.lr 0.02013111641155688 +767 33 negative_sampler.num_negs_per_pos 54.0 +767 33 training.batch_size 2.0 +767 34 model.embedding_dim 1.0 +767 34 model.relation_dim 2.0 +767 34 loss.margin 28.627309149247353 +767 34 loss.adversarial_temperature 0.39218044201542934 +767 34 optimizer.lr 0.005603508258862261 +767 34 negative_sampler.num_negs_per_pos 0.0 +767 34 training.batch_size 2.0 +767 35 model.embedding_dim 2.0 +767 35 model.relation_dim 0.0 +767 35 loss.margin 11.872976649899817 +767 35 loss.adversarial_temperature 0.41593775694186885 +767 35 optimizer.lr 0.0015811232482087557 +767 35 negative_sampler.num_negs_per_pos 9.0 +767 35 training.batch_size 1.0 +767 36 model.embedding_dim 1.0 +767 36 model.relation_dim 0.0 +767 36 loss.margin 22.938591251164414 +767 36 loss.adversarial_temperature 0.5513153113640186 +767 36 optimizer.lr 0.007524120461553376 +767 36 negative_sampler.num_negs_per_pos 13.0 +767 36 training.batch_size 2.0 +767 37 model.embedding_dim 2.0 +767 37 model.relation_dim 0.0 +767 37 loss.margin 4.195947147436787 +767 37 loss.adversarial_temperature 0.10420733030185775 +767 37 optimizer.lr 0.0029349790812597054 +767 37 negative_sampler.num_negs_per_pos 9.0 +767 37 training.batch_size 2.0 +767 38 model.embedding_dim 2.0 +767 38 model.relation_dim 0.0 +767 38 loss.margin 17.784166661052804 +767 38 loss.adversarial_temperature 0.22704449404378185 +767 38 optimizer.lr 0.0014400117840373068 +767 38 negative_sampler.num_negs_per_pos 82.0 +767 38 training.batch_size 2.0 +767 39 model.embedding_dim 2.0 +767 39 model.relation_dim 0.0 +767 39 loss.margin 12.573426083730737 +767 39 loss.adversarial_temperature 0.2739997935222109 +767 39 optimizer.lr 0.01004657036890988 +767 39 negative_sampler.num_negs_per_pos 69.0 +767 39 training.batch_size 0.0 +767 40 model.embedding_dim 2.0 +767 40 model.relation_dim 1.0 +767 40 loss.margin 14.220408111969117 +767 40 loss.adversarial_temperature 0.8023347626602336 +767 40 optimizer.lr 0.005365762942312157 +767 40 negative_sampler.num_negs_per_pos 34.0 +767 40 training.batch_size 2.0 +767 41 model.embedding_dim 0.0 +767 41 model.relation_dim 1.0 +767 41 loss.margin 21.478142707262496 +767 41 loss.adversarial_temperature 0.5921246007852357 +767 41 optimizer.lr 0.02647438426976817 +767 41 negative_sampler.num_negs_per_pos 51.0 +767 41 training.batch_size 2.0 +767 42 model.embedding_dim 0.0 +767 42 model.relation_dim 1.0 +767 42 loss.margin 9.674742242996897 +767 42 loss.adversarial_temperature 0.37311253047391013 +767 42 optimizer.lr 0.0039081407066387935 +767 42 negative_sampler.num_negs_per_pos 94.0 +767 42 training.batch_size 0.0 +767 43 model.embedding_dim 2.0 +767 43 model.relation_dim 0.0 +767 43 loss.margin 10.715316259172987 +767 43 loss.adversarial_temperature 0.3319741325519055 +767 43 optimizer.lr 0.025904546858751255 +767 43 negative_sampler.num_negs_per_pos 22.0 +767 43 training.batch_size 2.0 +767 44 model.embedding_dim 2.0 +767 44 model.relation_dim 0.0 +767 44 loss.margin 15.238020617808665 +767 44 loss.adversarial_temperature 0.8953992372575397 +767 44 optimizer.lr 0.02759180672309559 +767 44 negative_sampler.num_negs_per_pos 18.0 +767 44 training.batch_size 2.0 +767 45 model.embedding_dim 1.0 +767 45 model.relation_dim 2.0 +767 45 loss.margin 3.86689854062928 +767 45 loss.adversarial_temperature 0.2457522770709569 +767 45 optimizer.lr 0.00505663545119842 +767 45 negative_sampler.num_negs_per_pos 61.0 +767 45 training.batch_size 0.0 +767 46 model.embedding_dim 2.0 +767 46 model.relation_dim 1.0 +767 46 loss.margin 3.682304586065496 +767 46 loss.adversarial_temperature 0.4980845612661411 +767 46 optimizer.lr 0.022985431138130317 +767 46 negative_sampler.num_negs_per_pos 24.0 +767 46 training.batch_size 0.0 +767 47 model.embedding_dim 2.0 +767 47 model.relation_dim 1.0 +767 47 loss.margin 27.24304894019028 +767 47 loss.adversarial_temperature 0.9021948105082798 +767 47 optimizer.lr 0.04055428277338735 +767 47 negative_sampler.num_negs_per_pos 44.0 +767 47 training.batch_size 2.0 +767 48 model.embedding_dim 2.0 +767 48 model.relation_dim 1.0 +767 48 loss.margin 24.6604820933555 +767 48 loss.adversarial_temperature 0.608137312738659 +767 48 optimizer.lr 0.012151756788324478 +767 48 negative_sampler.num_negs_per_pos 65.0 +767 48 training.batch_size 0.0 +767 49 model.embedding_dim 1.0 +767 49 model.relation_dim 0.0 +767 49 loss.margin 23.769318293884485 +767 49 loss.adversarial_temperature 0.5201113853909173 +767 49 optimizer.lr 0.0036396689604938403 +767 49 negative_sampler.num_negs_per_pos 35.0 +767 49 training.batch_size 0.0 +767 50 model.embedding_dim 0.0 +767 50 model.relation_dim 2.0 +767 50 loss.margin 18.821100924599108 +767 50 loss.adversarial_temperature 0.8354437679157974 +767 50 optimizer.lr 0.050172232534453465 +767 50 negative_sampler.num_negs_per_pos 37.0 +767 50 training.batch_size 2.0 +767 51 model.embedding_dim 0.0 +767 51 model.relation_dim 1.0 +767 51 loss.margin 7.054118679702013 +767 51 loss.adversarial_temperature 0.7668093435328273 +767 51 optimizer.lr 0.0018396851166596609 +767 51 negative_sampler.num_negs_per_pos 85.0 +767 51 training.batch_size 2.0 +767 52 model.embedding_dim 1.0 +767 52 model.relation_dim 1.0 +767 52 loss.margin 19.175979658669494 +767 52 loss.adversarial_temperature 0.6668690825956572 +767 52 optimizer.lr 0.061820206782591175 +767 52 negative_sampler.num_negs_per_pos 7.0 +767 52 training.batch_size 2.0 +767 53 model.embedding_dim 0.0 +767 53 model.relation_dim 1.0 +767 53 loss.margin 6.636422790199942 +767 53 loss.adversarial_temperature 0.5493225231933323 +767 53 optimizer.lr 0.0034217411273175656 +767 53 negative_sampler.num_negs_per_pos 64.0 +767 53 training.batch_size 2.0 +767 54 model.embedding_dim 0.0 +767 54 model.relation_dim 0.0 +767 54 loss.margin 1.2860172399776988 +767 54 loss.adversarial_temperature 0.9973743415806867 +767 54 optimizer.lr 0.008289987103855746 +767 54 negative_sampler.num_negs_per_pos 4.0 +767 54 training.batch_size 1.0 +767 55 model.embedding_dim 1.0 +767 55 model.relation_dim 2.0 +767 55 loss.margin 14.1514456763031 +767 55 loss.adversarial_temperature 0.6032687571403964 +767 55 optimizer.lr 0.0031528974100542275 +767 55 negative_sampler.num_negs_per_pos 94.0 +767 55 training.batch_size 0.0 +767 56 model.embedding_dim 2.0 +767 56 model.relation_dim 0.0 +767 56 loss.margin 4.5148134751017315 +767 56 loss.adversarial_temperature 0.34524323911774857 +767 56 optimizer.lr 0.051128032051051306 +767 56 negative_sampler.num_negs_per_pos 31.0 +767 56 training.batch_size 2.0 +767 57 model.embedding_dim 1.0 +767 57 model.relation_dim 2.0 +767 57 loss.margin 20.730370303826323 +767 57 loss.adversarial_temperature 0.41264044555533397 +767 57 optimizer.lr 0.011396078770301502 +767 57 negative_sampler.num_negs_per_pos 33.0 +767 57 training.batch_size 0.0 +767 58 model.embedding_dim 2.0 +767 58 model.relation_dim 2.0 +767 58 loss.margin 29.764003711855402 +767 58 loss.adversarial_temperature 0.3819982691540066 +767 58 optimizer.lr 0.001549273019564096 +767 58 negative_sampler.num_negs_per_pos 17.0 +767 58 training.batch_size 0.0 +767 59 model.embedding_dim 0.0 +767 59 model.relation_dim 2.0 +767 59 loss.margin 15.123047014324369 +767 59 loss.adversarial_temperature 0.9297417617363244 +767 59 optimizer.lr 0.09039413958919872 +767 59 negative_sampler.num_negs_per_pos 48.0 +767 59 training.batch_size 2.0 +767 60 model.embedding_dim 1.0 +767 60 model.relation_dim 2.0 +767 60 loss.margin 11.075216909595511 +767 60 loss.adversarial_temperature 0.755277513863384 +767 60 optimizer.lr 0.007655091305454119 +767 60 negative_sampler.num_negs_per_pos 64.0 +767 60 training.batch_size 1.0 +767 61 model.embedding_dim 2.0 +767 61 model.relation_dim 2.0 +767 61 loss.margin 4.896610016871895 +767 61 loss.adversarial_temperature 0.26516585429698314 +767 61 optimizer.lr 0.007304591320891545 +767 61 negative_sampler.num_negs_per_pos 27.0 +767 61 training.batch_size 1.0 +767 62 model.embedding_dim 0.0 +767 62 model.relation_dim 0.0 +767 62 loss.margin 10.403127387965542 +767 62 loss.adversarial_temperature 0.34916366957725076 +767 62 optimizer.lr 0.09764841809992053 +767 62 negative_sampler.num_negs_per_pos 75.0 +767 62 training.batch_size 2.0 +767 63 model.embedding_dim 2.0 +767 63 model.relation_dim 1.0 +767 63 loss.margin 6.9090137765898145 +767 63 loss.adversarial_temperature 0.5428249279005217 +767 63 optimizer.lr 0.039956009801094115 +767 63 negative_sampler.num_negs_per_pos 40.0 +767 63 training.batch_size 1.0 +767 64 model.embedding_dim 0.0 +767 64 model.relation_dim 0.0 +767 64 loss.margin 22.525698071875137 +767 64 loss.adversarial_temperature 0.22549391901759142 +767 64 optimizer.lr 0.0015007548558912671 +767 64 negative_sampler.num_negs_per_pos 37.0 +767 64 training.batch_size 1.0 +767 65 model.embedding_dim 0.0 +767 65 model.relation_dim 2.0 +767 65 loss.margin 8.857474682728654 +767 65 loss.adversarial_temperature 0.41929899350217326 +767 65 optimizer.lr 0.008879106703377085 +767 65 negative_sampler.num_negs_per_pos 75.0 +767 65 training.batch_size 2.0 +767 66 model.embedding_dim 0.0 +767 66 model.relation_dim 0.0 +767 66 loss.margin 23.634553696354626 +767 66 loss.adversarial_temperature 0.5106843515085757 +767 66 optimizer.lr 0.09116687251971087 +767 66 negative_sampler.num_negs_per_pos 22.0 +767 66 training.batch_size 1.0 +767 67 model.embedding_dim 0.0 +767 67 model.relation_dim 1.0 +767 67 loss.margin 2.3613154816647 +767 67 loss.adversarial_temperature 0.23507634011928757 +767 67 optimizer.lr 0.0014495560441275576 +767 67 negative_sampler.num_negs_per_pos 3.0 +767 67 training.batch_size 1.0 +767 68 model.embedding_dim 0.0 +767 68 model.relation_dim 2.0 +767 68 loss.margin 1.559193146015247 +767 68 loss.adversarial_temperature 0.4670147341570262 +767 68 optimizer.lr 0.06830670344683382 +767 68 negative_sampler.num_negs_per_pos 68.0 +767 68 training.batch_size 0.0 +767 69 model.embedding_dim 1.0 +767 69 model.relation_dim 2.0 +767 69 loss.margin 9.76749957453086 +767 69 loss.adversarial_temperature 0.43368311826660366 +767 69 optimizer.lr 0.001346742741788523 +767 69 negative_sampler.num_negs_per_pos 67.0 +767 69 training.batch_size 2.0 +767 70 model.embedding_dim 2.0 +767 70 model.relation_dim 1.0 +767 70 loss.margin 10.180997992594971 +767 70 loss.adversarial_temperature 0.751475123590447 +767 70 optimizer.lr 0.03249219873168812 +767 70 negative_sampler.num_negs_per_pos 69.0 +767 70 training.batch_size 0.0 +767 71 model.embedding_dim 0.0 +767 71 model.relation_dim 0.0 +767 71 loss.margin 19.31225173203041 +767 71 loss.adversarial_temperature 0.521744761937925 +767 71 optimizer.lr 0.03127816709010877 +767 71 negative_sampler.num_negs_per_pos 20.0 +767 71 training.batch_size 2.0 +767 72 model.embedding_dim 1.0 +767 72 model.relation_dim 2.0 +767 72 loss.margin 7.2488337528987055 +767 72 loss.adversarial_temperature 0.5917120213950671 +767 72 optimizer.lr 0.030377670075025454 +767 72 negative_sampler.num_negs_per_pos 31.0 +767 72 training.batch_size 1.0 +767 73 model.embedding_dim 0.0 +767 73 model.relation_dim 0.0 +767 73 loss.margin 1.6642325016679393 +767 73 loss.adversarial_temperature 0.8289164857007214 +767 73 optimizer.lr 0.005351056670634944 +767 73 negative_sampler.num_negs_per_pos 12.0 +767 73 training.batch_size 2.0 +767 74 model.embedding_dim 2.0 +767 74 model.relation_dim 1.0 +767 74 loss.margin 3.920234359896165 +767 74 loss.adversarial_temperature 0.2689634358214258 +767 74 optimizer.lr 0.04897785833980517 +767 74 negative_sampler.num_negs_per_pos 9.0 +767 74 training.batch_size 0.0 +767 75 model.embedding_dim 1.0 +767 75 model.relation_dim 2.0 +767 75 loss.margin 4.03803645343958 +767 75 loss.adversarial_temperature 0.7681862139993938 +767 75 optimizer.lr 0.0028206889146746798 +767 75 negative_sampler.num_negs_per_pos 95.0 +767 75 training.batch_size 0.0 +767 76 model.embedding_dim 2.0 +767 76 model.relation_dim 0.0 +767 76 loss.margin 28.011557151146846 +767 76 loss.adversarial_temperature 0.8094573595026399 +767 76 optimizer.lr 0.04866732553928987 +767 76 negative_sampler.num_negs_per_pos 8.0 +767 76 training.batch_size 0.0 +767 77 model.embedding_dim 0.0 +767 77 model.relation_dim 1.0 +767 77 loss.margin 26.00112944415803 +767 77 loss.adversarial_temperature 0.950482293107228 +767 77 optimizer.lr 0.022740119324725923 +767 77 negative_sampler.num_negs_per_pos 85.0 +767 77 training.batch_size 2.0 +767 78 model.embedding_dim 1.0 +767 78 model.relation_dim 2.0 +767 78 loss.margin 26.43045668682757 +767 78 loss.adversarial_temperature 0.21845632796585635 +767 78 optimizer.lr 0.012392564352243502 +767 78 negative_sampler.num_negs_per_pos 75.0 +767 78 training.batch_size 1.0 +767 79 model.embedding_dim 2.0 +767 79 model.relation_dim 2.0 +767 79 loss.margin 26.724041883681537 +767 79 loss.adversarial_temperature 0.5808463654033346 +767 79 optimizer.lr 0.024084863795261573 +767 79 negative_sampler.num_negs_per_pos 70.0 +767 79 training.batch_size 1.0 +767 80 model.embedding_dim 2.0 +767 80 model.relation_dim 2.0 +767 80 loss.margin 17.129020744156318 +767 80 loss.adversarial_temperature 0.1439308583648941 +767 80 optimizer.lr 0.01468883158052467 +767 80 negative_sampler.num_negs_per_pos 64.0 +767 80 training.batch_size 0.0 +767 81 model.embedding_dim 1.0 +767 81 model.relation_dim 2.0 +767 81 loss.margin 29.754151830551837 +767 81 loss.adversarial_temperature 0.6089122109540117 +767 81 optimizer.lr 0.0018724090804646543 +767 81 negative_sampler.num_negs_per_pos 56.0 +767 81 training.batch_size 2.0 +767 82 model.embedding_dim 1.0 +767 82 model.relation_dim 2.0 +767 82 loss.margin 29.141275964791404 +767 82 loss.adversarial_temperature 0.32552232297524897 +767 82 optimizer.lr 0.050851032457941224 +767 82 negative_sampler.num_negs_per_pos 81.0 +767 82 training.batch_size 0.0 +767 83 model.embedding_dim 2.0 +767 83 model.relation_dim 0.0 +767 83 loss.margin 4.612079785725093 +767 83 loss.adversarial_temperature 0.21080840155637848 +767 83 optimizer.lr 0.09488944342242064 +767 83 negative_sampler.num_negs_per_pos 15.0 +767 83 training.batch_size 2.0 +767 84 model.embedding_dim 1.0 +767 84 model.relation_dim 2.0 +767 84 loss.margin 17.982284178992682 +767 84 loss.adversarial_temperature 0.6239583603360441 +767 84 optimizer.lr 0.003488378212003012 +767 84 negative_sampler.num_negs_per_pos 26.0 +767 84 training.batch_size 0.0 +767 85 model.embedding_dim 1.0 +767 85 model.relation_dim 2.0 +767 85 loss.margin 14.42076707440658 +767 85 loss.adversarial_temperature 0.41546039310716154 +767 85 optimizer.lr 0.04387016421715068 +767 85 negative_sampler.num_negs_per_pos 32.0 +767 85 training.batch_size 0.0 +767 86 model.embedding_dim 0.0 +767 86 model.relation_dim 1.0 +767 86 loss.margin 26.853910985175915 +767 86 loss.adversarial_temperature 0.7046377074541683 +767 86 optimizer.lr 0.006182460946768844 +767 86 negative_sampler.num_negs_per_pos 46.0 +767 86 training.batch_size 1.0 +767 87 model.embedding_dim 0.0 +767 87 model.relation_dim 0.0 +767 87 loss.margin 14.131221971441496 +767 87 loss.adversarial_temperature 0.3074155219396295 +767 87 optimizer.lr 0.003353385335615487 +767 87 negative_sampler.num_negs_per_pos 73.0 +767 87 training.batch_size 2.0 +767 88 model.embedding_dim 2.0 +767 88 model.relation_dim 2.0 +767 88 loss.margin 29.178399567704634 +767 88 loss.adversarial_temperature 0.788237435311032 +767 88 optimizer.lr 0.02683225322474912 +767 88 negative_sampler.num_negs_per_pos 75.0 +767 88 training.batch_size 1.0 +767 89 model.embedding_dim 0.0 +767 89 model.relation_dim 2.0 +767 89 loss.margin 24.375037169606745 +767 89 loss.adversarial_temperature 0.9661750492073109 +767 89 optimizer.lr 0.014422353796602448 +767 89 negative_sampler.num_negs_per_pos 71.0 +767 89 training.batch_size 1.0 +767 90 model.embedding_dim 1.0 +767 90 model.relation_dim 2.0 +767 90 loss.margin 13.633994374268712 +767 90 loss.adversarial_temperature 0.6027946706757663 +767 90 optimizer.lr 0.002643507342742443 +767 90 negative_sampler.num_negs_per_pos 0.0 +767 90 training.batch_size 1.0 +767 91 model.embedding_dim 2.0 +767 91 model.relation_dim 0.0 +767 91 loss.margin 29.12793203766931 +767 91 loss.adversarial_temperature 0.8233529119972817 +767 91 optimizer.lr 0.0011980082750311053 +767 91 negative_sampler.num_negs_per_pos 95.0 +767 91 training.batch_size 0.0 +767 92 model.embedding_dim 0.0 +767 92 model.relation_dim 2.0 +767 92 loss.margin 6.176377752626159 +767 92 loss.adversarial_temperature 0.15102150435577238 +767 92 optimizer.lr 0.02098397708166658 +767 92 negative_sampler.num_negs_per_pos 59.0 +767 92 training.batch_size 0.0 +767 93 model.embedding_dim 0.0 +767 93 model.relation_dim 2.0 +767 93 loss.margin 25.89448805246626 +767 93 loss.adversarial_temperature 0.9734372995761557 +767 93 optimizer.lr 0.0023587037949406487 +767 93 negative_sampler.num_negs_per_pos 69.0 +767 93 training.batch_size 1.0 +767 94 model.embedding_dim 0.0 +767 94 model.relation_dim 1.0 +767 94 loss.margin 1.8881783765995535 +767 94 loss.adversarial_temperature 0.26899816779820473 +767 94 optimizer.lr 0.0022596902183589676 +767 94 negative_sampler.num_negs_per_pos 23.0 +767 94 training.batch_size 0.0 +767 95 model.embedding_dim 0.0 +767 95 model.relation_dim 1.0 +767 95 loss.margin 19.766599293377368 +767 95 loss.adversarial_temperature 0.4512218835426557 +767 95 optimizer.lr 0.010261156168272124 +767 95 negative_sampler.num_negs_per_pos 15.0 +767 95 training.batch_size 2.0 +767 96 model.embedding_dim 2.0 +767 96 model.relation_dim 0.0 +767 96 loss.margin 3.3594763328674344 +767 96 loss.adversarial_temperature 0.4779980291886986 +767 96 optimizer.lr 0.021871858109211394 +767 96 negative_sampler.num_negs_per_pos 87.0 +767 96 training.batch_size 0.0 +767 97 model.embedding_dim 2.0 +767 97 model.relation_dim 1.0 +767 97 loss.margin 23.455126400442133 +767 97 loss.adversarial_temperature 0.4859400500509965 +767 97 optimizer.lr 0.08350863716364483 +767 97 negative_sampler.num_negs_per_pos 16.0 +767 97 training.batch_size 1.0 +767 98 model.embedding_dim 1.0 +767 98 model.relation_dim 2.0 +767 98 loss.margin 24.83577250362917 +767 98 loss.adversarial_temperature 0.6677809938125876 +767 98 optimizer.lr 0.07746923723363203 +767 98 negative_sampler.num_negs_per_pos 3.0 +767 98 training.batch_size 0.0 +767 99 model.embedding_dim 0.0 +767 99 model.relation_dim 0.0 +767 99 loss.margin 18.813421311365992 +767 99 loss.adversarial_temperature 0.2774660670076553 +767 99 optimizer.lr 0.032544315843795905 +767 99 negative_sampler.num_negs_per_pos 54.0 +767 99 training.batch_size 2.0 +767 100 model.embedding_dim 0.0 +767 100 model.relation_dim 1.0 +767 100 loss.margin 14.58789380331674 +767 100 loss.adversarial_temperature 0.22894165140022457 +767 100 optimizer.lr 0.003437925925010862 +767 100 negative_sampler.num_negs_per_pos 62.0 +767 100 training.batch_size 0.0 +767 1 dataset """kinships""" +767 1 model """transd""" +767 1 loss """nssa""" +767 1 regularizer """no""" +767 1 optimizer """adam""" +767 1 training_loop """owa""" +767 1 negative_sampler """basic""" +767 1 evaluator """rankbased""" +767 2 dataset """kinships""" +767 2 model """transd""" +767 2 loss """nssa""" +767 2 regularizer """no""" +767 2 optimizer """adam""" +767 2 training_loop """owa""" +767 2 negative_sampler """basic""" +767 2 evaluator """rankbased""" +767 3 dataset """kinships""" +767 3 model """transd""" +767 3 loss """nssa""" +767 3 regularizer """no""" +767 3 optimizer """adam""" +767 3 training_loop """owa""" +767 3 negative_sampler """basic""" +767 3 evaluator """rankbased""" +767 4 dataset """kinships""" +767 4 model """transd""" +767 4 loss """nssa""" +767 4 regularizer """no""" +767 4 optimizer """adam""" +767 4 training_loop """owa""" +767 4 negative_sampler """basic""" +767 4 evaluator """rankbased""" +767 5 dataset """kinships""" +767 5 model """transd""" +767 5 loss """nssa""" +767 5 regularizer """no""" +767 5 optimizer """adam""" +767 5 training_loop """owa""" +767 5 negative_sampler """basic""" +767 5 evaluator """rankbased""" +767 6 dataset """kinships""" +767 6 model """transd""" +767 6 loss """nssa""" +767 6 regularizer """no""" +767 6 optimizer """adam""" +767 6 training_loop """owa""" +767 6 negative_sampler """basic""" +767 6 evaluator """rankbased""" +767 7 dataset """kinships""" +767 7 model """transd""" +767 7 loss """nssa""" +767 7 regularizer """no""" +767 7 optimizer """adam""" +767 7 training_loop """owa""" +767 7 negative_sampler """basic""" +767 7 evaluator """rankbased""" +767 8 dataset """kinships""" +767 8 model """transd""" +767 8 loss """nssa""" +767 8 regularizer """no""" +767 8 optimizer """adam""" +767 8 training_loop """owa""" +767 8 negative_sampler """basic""" +767 8 evaluator """rankbased""" +767 9 dataset """kinships""" +767 9 model """transd""" +767 9 loss """nssa""" +767 9 regularizer """no""" +767 9 optimizer """adam""" +767 9 training_loop """owa""" +767 9 negative_sampler """basic""" +767 9 evaluator """rankbased""" +767 10 dataset """kinships""" +767 10 model """transd""" +767 10 loss """nssa""" +767 10 regularizer """no""" +767 10 optimizer """adam""" +767 10 training_loop """owa""" +767 10 negative_sampler """basic""" +767 10 evaluator """rankbased""" +767 11 dataset """kinships""" +767 11 model """transd""" +767 11 loss """nssa""" +767 11 regularizer """no""" +767 11 optimizer """adam""" +767 11 training_loop """owa""" +767 11 negative_sampler """basic""" +767 11 evaluator """rankbased""" +767 12 dataset """kinships""" +767 12 model """transd""" +767 12 loss """nssa""" +767 12 regularizer """no""" +767 12 optimizer """adam""" +767 12 training_loop """owa""" +767 12 negative_sampler """basic""" +767 12 evaluator """rankbased""" +767 13 dataset """kinships""" +767 13 model """transd""" +767 13 loss """nssa""" +767 13 regularizer """no""" +767 13 optimizer """adam""" +767 13 training_loop """owa""" +767 13 negative_sampler """basic""" +767 13 evaluator """rankbased""" +767 14 dataset """kinships""" +767 14 model """transd""" +767 14 loss """nssa""" +767 14 regularizer """no""" +767 14 optimizer """adam""" +767 14 training_loop """owa""" +767 14 negative_sampler """basic""" +767 14 evaluator """rankbased""" +767 15 dataset """kinships""" +767 15 model """transd""" +767 15 loss """nssa""" +767 15 regularizer """no""" +767 15 optimizer """adam""" +767 15 training_loop """owa""" +767 15 negative_sampler """basic""" +767 15 evaluator """rankbased""" +767 16 dataset """kinships""" +767 16 model """transd""" +767 16 loss """nssa""" +767 16 regularizer """no""" +767 16 optimizer """adam""" +767 16 training_loop """owa""" +767 16 negative_sampler """basic""" +767 16 evaluator """rankbased""" +767 17 dataset """kinships""" +767 17 model """transd""" +767 17 loss """nssa""" +767 17 regularizer """no""" +767 17 optimizer """adam""" +767 17 training_loop """owa""" +767 17 negative_sampler """basic""" +767 17 evaluator """rankbased""" +767 18 dataset """kinships""" +767 18 model """transd""" +767 18 loss """nssa""" +767 18 regularizer """no""" +767 18 optimizer """adam""" +767 18 training_loop """owa""" +767 18 negative_sampler """basic""" +767 18 evaluator """rankbased""" +767 19 dataset """kinships""" +767 19 model """transd""" +767 19 loss """nssa""" +767 19 regularizer """no""" +767 19 optimizer """adam""" +767 19 training_loop """owa""" +767 19 negative_sampler """basic""" +767 19 evaluator """rankbased""" +767 20 dataset """kinships""" +767 20 model """transd""" +767 20 loss """nssa""" +767 20 regularizer """no""" +767 20 optimizer """adam""" +767 20 training_loop """owa""" +767 20 negative_sampler """basic""" +767 20 evaluator """rankbased""" +767 21 dataset """kinships""" +767 21 model """transd""" +767 21 loss """nssa""" +767 21 regularizer """no""" +767 21 optimizer """adam""" +767 21 training_loop """owa""" +767 21 negative_sampler """basic""" +767 21 evaluator """rankbased""" +767 22 dataset """kinships""" +767 22 model """transd""" +767 22 loss """nssa""" +767 22 regularizer """no""" +767 22 optimizer """adam""" +767 22 training_loop """owa""" +767 22 negative_sampler """basic""" +767 22 evaluator """rankbased""" +767 23 dataset """kinships""" +767 23 model """transd""" +767 23 loss """nssa""" +767 23 regularizer """no""" +767 23 optimizer """adam""" +767 23 training_loop """owa""" +767 23 negative_sampler """basic""" +767 23 evaluator """rankbased""" +767 24 dataset """kinships""" +767 24 model """transd""" +767 24 loss """nssa""" +767 24 regularizer """no""" +767 24 optimizer """adam""" +767 24 training_loop """owa""" +767 24 negative_sampler """basic""" +767 24 evaluator """rankbased""" +767 25 dataset """kinships""" +767 25 model """transd""" +767 25 loss """nssa""" +767 25 regularizer """no""" +767 25 optimizer """adam""" +767 25 training_loop """owa""" +767 25 negative_sampler """basic""" +767 25 evaluator """rankbased""" +767 26 dataset """kinships""" +767 26 model """transd""" +767 26 loss """nssa""" +767 26 regularizer """no""" +767 26 optimizer """adam""" +767 26 training_loop """owa""" +767 26 negative_sampler """basic""" +767 26 evaluator """rankbased""" +767 27 dataset """kinships""" +767 27 model """transd""" +767 27 loss """nssa""" +767 27 regularizer """no""" +767 27 optimizer """adam""" +767 27 training_loop """owa""" +767 27 negative_sampler """basic""" +767 27 evaluator """rankbased""" +767 28 dataset """kinships""" +767 28 model """transd""" +767 28 loss """nssa""" +767 28 regularizer """no""" +767 28 optimizer """adam""" +767 28 training_loop """owa""" +767 28 negative_sampler """basic""" +767 28 evaluator """rankbased""" +767 29 dataset """kinships""" +767 29 model """transd""" +767 29 loss """nssa""" +767 29 regularizer """no""" +767 29 optimizer """adam""" +767 29 training_loop """owa""" +767 29 negative_sampler """basic""" +767 29 evaluator """rankbased""" +767 30 dataset """kinships""" +767 30 model """transd""" +767 30 loss """nssa""" +767 30 regularizer """no""" +767 30 optimizer """adam""" +767 30 training_loop """owa""" +767 30 negative_sampler """basic""" +767 30 evaluator """rankbased""" +767 31 dataset """kinships""" +767 31 model """transd""" +767 31 loss """nssa""" +767 31 regularizer """no""" +767 31 optimizer """adam""" +767 31 training_loop """owa""" +767 31 negative_sampler """basic""" +767 31 evaluator """rankbased""" +767 32 dataset """kinships""" +767 32 model """transd""" +767 32 loss """nssa""" +767 32 regularizer """no""" +767 32 optimizer """adam""" +767 32 training_loop """owa""" +767 32 negative_sampler """basic""" +767 32 evaluator """rankbased""" +767 33 dataset """kinships""" +767 33 model """transd""" +767 33 loss """nssa""" +767 33 regularizer """no""" +767 33 optimizer """adam""" +767 33 training_loop """owa""" +767 33 negative_sampler """basic""" +767 33 evaluator """rankbased""" +767 34 dataset """kinships""" +767 34 model """transd""" +767 34 loss """nssa""" +767 34 regularizer """no""" +767 34 optimizer """adam""" +767 34 training_loop """owa""" +767 34 negative_sampler """basic""" +767 34 evaluator """rankbased""" +767 35 dataset """kinships""" +767 35 model """transd""" +767 35 loss """nssa""" +767 35 regularizer """no""" +767 35 optimizer """adam""" +767 35 training_loop """owa""" +767 35 negative_sampler """basic""" +767 35 evaluator """rankbased""" +767 36 dataset """kinships""" +767 36 model """transd""" +767 36 loss """nssa""" +767 36 regularizer """no""" +767 36 optimizer """adam""" +767 36 training_loop """owa""" +767 36 negative_sampler """basic""" +767 36 evaluator """rankbased""" +767 37 dataset """kinships""" +767 37 model """transd""" +767 37 loss """nssa""" +767 37 regularizer """no""" +767 37 optimizer """adam""" +767 37 training_loop """owa""" +767 37 negative_sampler """basic""" +767 37 evaluator """rankbased""" +767 38 dataset """kinships""" +767 38 model """transd""" +767 38 loss """nssa""" +767 38 regularizer """no""" +767 38 optimizer """adam""" +767 38 training_loop """owa""" +767 38 negative_sampler """basic""" +767 38 evaluator """rankbased""" +767 39 dataset """kinships""" +767 39 model """transd""" +767 39 loss """nssa""" +767 39 regularizer """no""" +767 39 optimizer """adam""" +767 39 training_loop """owa""" +767 39 negative_sampler """basic""" +767 39 evaluator """rankbased""" +767 40 dataset """kinships""" +767 40 model """transd""" +767 40 loss """nssa""" +767 40 regularizer """no""" +767 40 optimizer """adam""" +767 40 training_loop """owa""" +767 40 negative_sampler """basic""" +767 40 evaluator """rankbased""" +767 41 dataset """kinships""" +767 41 model """transd""" +767 41 loss """nssa""" +767 41 regularizer """no""" +767 41 optimizer """adam""" +767 41 training_loop """owa""" +767 41 negative_sampler """basic""" +767 41 evaluator """rankbased""" +767 42 dataset """kinships""" +767 42 model """transd""" +767 42 loss """nssa""" +767 42 regularizer """no""" +767 42 optimizer """adam""" +767 42 training_loop """owa""" +767 42 negative_sampler """basic""" +767 42 evaluator """rankbased""" +767 43 dataset """kinships""" +767 43 model """transd""" +767 43 loss """nssa""" +767 43 regularizer """no""" +767 43 optimizer """adam""" +767 43 training_loop """owa""" +767 43 negative_sampler """basic""" +767 43 evaluator """rankbased""" +767 44 dataset """kinships""" +767 44 model """transd""" +767 44 loss """nssa""" +767 44 regularizer """no""" +767 44 optimizer """adam""" +767 44 training_loop """owa""" +767 44 negative_sampler """basic""" +767 44 evaluator """rankbased""" +767 45 dataset """kinships""" +767 45 model """transd""" +767 45 loss """nssa""" +767 45 regularizer """no""" +767 45 optimizer """adam""" +767 45 training_loop """owa""" +767 45 negative_sampler """basic""" +767 45 evaluator """rankbased""" +767 46 dataset """kinships""" +767 46 model """transd""" +767 46 loss """nssa""" +767 46 regularizer """no""" +767 46 optimizer """adam""" +767 46 training_loop """owa""" +767 46 negative_sampler """basic""" +767 46 evaluator """rankbased""" +767 47 dataset """kinships""" +767 47 model """transd""" +767 47 loss """nssa""" +767 47 regularizer """no""" +767 47 optimizer """adam""" +767 47 training_loop """owa""" +767 47 negative_sampler """basic""" +767 47 evaluator """rankbased""" +767 48 dataset """kinships""" +767 48 model """transd""" +767 48 loss """nssa""" +767 48 regularizer """no""" +767 48 optimizer """adam""" +767 48 training_loop """owa""" +767 48 negative_sampler """basic""" +767 48 evaluator """rankbased""" +767 49 dataset """kinships""" +767 49 model """transd""" +767 49 loss """nssa""" +767 49 regularizer """no""" +767 49 optimizer """adam""" +767 49 training_loop """owa""" +767 49 negative_sampler """basic""" +767 49 evaluator """rankbased""" +767 50 dataset """kinships""" +767 50 model """transd""" +767 50 loss """nssa""" +767 50 regularizer """no""" +767 50 optimizer """adam""" +767 50 training_loop """owa""" +767 50 negative_sampler """basic""" +767 50 evaluator """rankbased""" +767 51 dataset """kinships""" +767 51 model """transd""" +767 51 loss """nssa""" +767 51 regularizer """no""" +767 51 optimizer """adam""" +767 51 training_loop """owa""" +767 51 negative_sampler """basic""" +767 51 evaluator """rankbased""" +767 52 dataset """kinships""" +767 52 model """transd""" +767 52 loss """nssa""" +767 52 regularizer """no""" +767 52 optimizer """adam""" +767 52 training_loop """owa""" +767 52 negative_sampler """basic""" +767 52 evaluator """rankbased""" +767 53 dataset """kinships""" +767 53 model """transd""" +767 53 loss """nssa""" +767 53 regularizer """no""" +767 53 optimizer """adam""" +767 53 training_loop """owa""" +767 53 negative_sampler """basic""" +767 53 evaluator """rankbased""" +767 54 dataset """kinships""" +767 54 model """transd""" +767 54 loss """nssa""" +767 54 regularizer """no""" +767 54 optimizer """adam""" +767 54 training_loop """owa""" +767 54 negative_sampler """basic""" +767 54 evaluator """rankbased""" +767 55 dataset """kinships""" +767 55 model """transd""" +767 55 loss """nssa""" +767 55 regularizer """no""" +767 55 optimizer """adam""" +767 55 training_loop """owa""" +767 55 negative_sampler """basic""" +767 55 evaluator """rankbased""" +767 56 dataset """kinships""" +767 56 model """transd""" +767 56 loss """nssa""" +767 56 regularizer """no""" +767 56 optimizer """adam""" +767 56 training_loop """owa""" +767 56 negative_sampler """basic""" +767 56 evaluator """rankbased""" +767 57 dataset """kinships""" +767 57 model """transd""" +767 57 loss """nssa""" +767 57 regularizer """no""" +767 57 optimizer """adam""" +767 57 training_loop """owa""" +767 57 negative_sampler """basic""" +767 57 evaluator """rankbased""" +767 58 dataset """kinships""" +767 58 model """transd""" +767 58 loss """nssa""" +767 58 regularizer """no""" +767 58 optimizer """adam""" +767 58 training_loop """owa""" +767 58 negative_sampler """basic""" +767 58 evaluator """rankbased""" +767 59 dataset """kinships""" +767 59 model """transd""" +767 59 loss """nssa""" +767 59 regularizer """no""" +767 59 optimizer """adam""" +767 59 training_loop """owa""" +767 59 negative_sampler """basic""" +767 59 evaluator """rankbased""" +767 60 dataset """kinships""" +767 60 model """transd""" +767 60 loss """nssa""" +767 60 regularizer """no""" +767 60 optimizer """adam""" +767 60 training_loop """owa""" +767 60 negative_sampler """basic""" +767 60 evaluator """rankbased""" +767 61 dataset """kinships""" +767 61 model """transd""" +767 61 loss """nssa""" +767 61 regularizer """no""" +767 61 optimizer """adam""" +767 61 training_loop """owa""" +767 61 negative_sampler """basic""" +767 61 evaluator """rankbased""" +767 62 dataset """kinships""" +767 62 model """transd""" +767 62 loss """nssa""" +767 62 regularizer """no""" +767 62 optimizer """adam""" +767 62 training_loop """owa""" +767 62 negative_sampler """basic""" +767 62 evaluator """rankbased""" +767 63 dataset """kinships""" +767 63 model """transd""" +767 63 loss """nssa""" +767 63 regularizer """no""" +767 63 optimizer """adam""" +767 63 training_loop """owa""" +767 63 negative_sampler """basic""" +767 63 evaluator """rankbased""" +767 64 dataset """kinships""" +767 64 model """transd""" +767 64 loss """nssa""" +767 64 regularizer """no""" +767 64 optimizer """adam""" +767 64 training_loop """owa""" +767 64 negative_sampler """basic""" +767 64 evaluator """rankbased""" +767 65 dataset """kinships""" +767 65 model """transd""" +767 65 loss """nssa""" +767 65 regularizer """no""" +767 65 optimizer """adam""" +767 65 training_loop """owa""" +767 65 negative_sampler """basic""" +767 65 evaluator """rankbased""" +767 66 dataset """kinships""" +767 66 model """transd""" +767 66 loss """nssa""" +767 66 regularizer """no""" +767 66 optimizer """adam""" +767 66 training_loop """owa""" +767 66 negative_sampler """basic""" +767 66 evaluator """rankbased""" +767 67 dataset """kinships""" +767 67 model """transd""" +767 67 loss """nssa""" +767 67 regularizer """no""" +767 67 optimizer """adam""" +767 67 training_loop """owa""" +767 67 negative_sampler """basic""" +767 67 evaluator """rankbased""" +767 68 dataset """kinships""" +767 68 model """transd""" +767 68 loss """nssa""" +767 68 regularizer """no""" +767 68 optimizer """adam""" +767 68 training_loop """owa""" +767 68 negative_sampler """basic""" +767 68 evaluator """rankbased""" +767 69 dataset """kinships""" +767 69 model """transd""" +767 69 loss """nssa""" +767 69 regularizer """no""" +767 69 optimizer """adam""" +767 69 training_loop """owa""" +767 69 negative_sampler """basic""" +767 69 evaluator """rankbased""" +767 70 dataset """kinships""" +767 70 model """transd""" +767 70 loss """nssa""" +767 70 regularizer """no""" +767 70 optimizer """adam""" +767 70 training_loop """owa""" +767 70 negative_sampler """basic""" +767 70 evaluator """rankbased""" +767 71 dataset """kinships""" +767 71 model """transd""" +767 71 loss """nssa""" +767 71 regularizer """no""" +767 71 optimizer """adam""" +767 71 training_loop """owa""" +767 71 negative_sampler """basic""" +767 71 evaluator """rankbased""" +767 72 dataset """kinships""" +767 72 model """transd""" +767 72 loss """nssa""" +767 72 regularizer """no""" +767 72 optimizer """adam""" +767 72 training_loop """owa""" +767 72 negative_sampler """basic""" +767 72 evaluator """rankbased""" +767 73 dataset """kinships""" +767 73 model """transd""" +767 73 loss """nssa""" +767 73 regularizer """no""" +767 73 optimizer """adam""" +767 73 training_loop """owa""" +767 73 negative_sampler """basic""" +767 73 evaluator """rankbased""" +767 74 dataset """kinships""" +767 74 model """transd""" +767 74 loss """nssa""" +767 74 regularizer """no""" +767 74 optimizer """adam""" +767 74 training_loop """owa""" +767 74 negative_sampler """basic""" +767 74 evaluator """rankbased""" +767 75 dataset """kinships""" +767 75 model """transd""" +767 75 loss """nssa""" +767 75 regularizer """no""" +767 75 optimizer """adam""" +767 75 training_loop """owa""" +767 75 negative_sampler """basic""" +767 75 evaluator """rankbased""" +767 76 dataset """kinships""" +767 76 model """transd""" +767 76 loss """nssa""" +767 76 regularizer """no""" +767 76 optimizer """adam""" +767 76 training_loop """owa""" +767 76 negative_sampler """basic""" +767 76 evaluator """rankbased""" +767 77 dataset """kinships""" +767 77 model """transd""" +767 77 loss """nssa""" +767 77 regularizer """no""" +767 77 optimizer """adam""" +767 77 training_loop """owa""" +767 77 negative_sampler """basic""" +767 77 evaluator """rankbased""" +767 78 dataset """kinships""" +767 78 model """transd""" +767 78 loss """nssa""" +767 78 regularizer """no""" +767 78 optimizer """adam""" +767 78 training_loop """owa""" +767 78 negative_sampler """basic""" +767 78 evaluator """rankbased""" +767 79 dataset """kinships""" +767 79 model """transd""" +767 79 loss """nssa""" +767 79 regularizer """no""" +767 79 optimizer """adam""" +767 79 training_loop """owa""" +767 79 negative_sampler """basic""" +767 79 evaluator """rankbased""" +767 80 dataset """kinships""" +767 80 model """transd""" +767 80 loss """nssa""" +767 80 regularizer """no""" +767 80 optimizer """adam""" +767 80 training_loop """owa""" +767 80 negative_sampler """basic""" +767 80 evaluator """rankbased""" +767 81 dataset """kinships""" +767 81 model """transd""" +767 81 loss """nssa""" +767 81 regularizer """no""" +767 81 optimizer """adam""" +767 81 training_loop """owa""" +767 81 negative_sampler """basic""" +767 81 evaluator """rankbased""" +767 82 dataset """kinships""" +767 82 model """transd""" +767 82 loss """nssa""" +767 82 regularizer """no""" +767 82 optimizer """adam""" +767 82 training_loop """owa""" +767 82 negative_sampler """basic""" +767 82 evaluator """rankbased""" +767 83 dataset """kinships""" +767 83 model """transd""" +767 83 loss """nssa""" +767 83 regularizer """no""" +767 83 optimizer """adam""" +767 83 training_loop """owa""" +767 83 negative_sampler """basic""" +767 83 evaluator """rankbased""" +767 84 dataset """kinships""" +767 84 model """transd""" +767 84 loss """nssa""" +767 84 regularizer """no""" +767 84 optimizer """adam""" +767 84 training_loop """owa""" +767 84 negative_sampler """basic""" +767 84 evaluator """rankbased""" +767 85 dataset """kinships""" +767 85 model """transd""" +767 85 loss """nssa""" +767 85 regularizer """no""" +767 85 optimizer """adam""" +767 85 training_loop """owa""" +767 85 negative_sampler """basic""" +767 85 evaluator """rankbased""" +767 86 dataset """kinships""" +767 86 model """transd""" +767 86 loss """nssa""" +767 86 regularizer """no""" +767 86 optimizer """adam""" +767 86 training_loop """owa""" +767 86 negative_sampler """basic""" +767 86 evaluator """rankbased""" +767 87 dataset """kinships""" +767 87 model """transd""" +767 87 loss """nssa""" +767 87 regularizer """no""" +767 87 optimizer """adam""" +767 87 training_loop """owa""" +767 87 negative_sampler """basic""" +767 87 evaluator """rankbased""" +767 88 dataset """kinships""" +767 88 model """transd""" +767 88 loss """nssa""" +767 88 regularizer """no""" +767 88 optimizer """adam""" +767 88 training_loop """owa""" +767 88 negative_sampler """basic""" +767 88 evaluator """rankbased""" +767 89 dataset """kinships""" +767 89 model """transd""" +767 89 loss """nssa""" +767 89 regularizer """no""" +767 89 optimizer """adam""" +767 89 training_loop """owa""" +767 89 negative_sampler """basic""" +767 89 evaluator """rankbased""" +767 90 dataset """kinships""" +767 90 model """transd""" +767 90 loss """nssa""" +767 90 regularizer """no""" +767 90 optimizer """adam""" +767 90 training_loop """owa""" +767 90 negative_sampler """basic""" +767 90 evaluator """rankbased""" +767 91 dataset """kinships""" +767 91 model """transd""" +767 91 loss """nssa""" +767 91 regularizer """no""" +767 91 optimizer """adam""" +767 91 training_loop """owa""" +767 91 negative_sampler """basic""" +767 91 evaluator """rankbased""" +767 92 dataset """kinships""" +767 92 model """transd""" +767 92 loss """nssa""" +767 92 regularizer """no""" +767 92 optimizer """adam""" +767 92 training_loop """owa""" +767 92 negative_sampler """basic""" +767 92 evaluator """rankbased""" +767 93 dataset """kinships""" +767 93 model """transd""" +767 93 loss """nssa""" +767 93 regularizer """no""" +767 93 optimizer """adam""" +767 93 training_loop """owa""" +767 93 negative_sampler """basic""" +767 93 evaluator """rankbased""" +767 94 dataset """kinships""" +767 94 model """transd""" +767 94 loss """nssa""" +767 94 regularizer """no""" +767 94 optimizer """adam""" +767 94 training_loop """owa""" +767 94 negative_sampler """basic""" +767 94 evaluator """rankbased""" +767 95 dataset """kinships""" +767 95 model """transd""" +767 95 loss """nssa""" +767 95 regularizer """no""" +767 95 optimizer """adam""" +767 95 training_loop """owa""" +767 95 negative_sampler """basic""" +767 95 evaluator """rankbased""" +767 96 dataset """kinships""" +767 96 model """transd""" +767 96 loss """nssa""" +767 96 regularizer """no""" +767 96 optimizer """adam""" +767 96 training_loop """owa""" +767 96 negative_sampler """basic""" +767 96 evaluator """rankbased""" +767 97 dataset """kinships""" +767 97 model """transd""" +767 97 loss """nssa""" +767 97 regularizer """no""" +767 97 optimizer """adam""" +767 97 training_loop """owa""" +767 97 negative_sampler """basic""" +767 97 evaluator """rankbased""" +767 98 dataset """kinships""" +767 98 model """transd""" +767 98 loss """nssa""" +767 98 regularizer """no""" +767 98 optimizer """adam""" +767 98 training_loop """owa""" +767 98 negative_sampler """basic""" +767 98 evaluator """rankbased""" +767 99 dataset """kinships""" +767 99 model """transd""" +767 99 loss """nssa""" +767 99 regularizer """no""" +767 99 optimizer """adam""" +767 99 training_loop """owa""" +767 99 negative_sampler """basic""" +767 99 evaluator """rankbased""" +767 100 dataset """kinships""" +767 100 model """transd""" +767 100 loss """nssa""" +767 100 regularizer """no""" +767 100 optimizer """adam""" +767 100 training_loop """owa""" +767 100 negative_sampler """basic""" +767 100 evaluator """rankbased""" +768 1 model.embedding_dim 2.0 +768 1 model.relation_dim 1.0 +768 1 loss.margin 12.933255173912586 +768 1 loss.adversarial_temperature 0.7626542096414617 +768 1 optimizer.lr 0.0035761269503535842 +768 1 negative_sampler.num_negs_per_pos 37.0 +768 1 training.batch_size 1.0 +768 2 model.embedding_dim 1.0 +768 2 model.relation_dim 0.0 +768 2 loss.margin 9.024498793130224 +768 2 loss.adversarial_temperature 0.3554634847336653 +768 2 optimizer.lr 0.016623735041929454 +768 2 negative_sampler.num_negs_per_pos 5.0 +768 2 training.batch_size 0.0 +768 3 model.embedding_dim 1.0 +768 3 model.relation_dim 1.0 +768 3 loss.margin 18.49297691445796 +768 3 loss.adversarial_temperature 0.3510308254100827 +768 3 optimizer.lr 0.0022802780631319144 +768 3 negative_sampler.num_negs_per_pos 58.0 +768 3 training.batch_size 2.0 +768 4 model.embedding_dim 2.0 +768 4 model.relation_dim 0.0 +768 4 loss.margin 20.44351284830747 +768 4 loss.adversarial_temperature 0.1686474669017999 +768 4 optimizer.lr 0.08789798510285649 +768 4 negative_sampler.num_negs_per_pos 64.0 +768 4 training.batch_size 2.0 +768 5 model.embedding_dim 0.0 +768 5 model.relation_dim 0.0 +768 5 loss.margin 1.2976489386126393 +768 5 loss.adversarial_temperature 0.8463110626910566 +768 5 optimizer.lr 0.009374566511832409 +768 5 negative_sampler.num_negs_per_pos 19.0 +768 5 training.batch_size 1.0 +768 6 model.embedding_dim 0.0 +768 6 model.relation_dim 2.0 +768 6 loss.margin 26.936967348424368 +768 6 loss.adversarial_temperature 0.36583943259051643 +768 6 optimizer.lr 0.0021240196096320766 +768 6 negative_sampler.num_negs_per_pos 22.0 +768 6 training.batch_size 2.0 +768 7 model.embedding_dim 0.0 +768 7 model.relation_dim 0.0 +768 7 loss.margin 15.476351946734415 +768 7 loss.adversarial_temperature 0.5753959799222422 +768 7 optimizer.lr 0.003535657026450083 +768 7 negative_sampler.num_negs_per_pos 76.0 +768 7 training.batch_size 1.0 +768 8 model.embedding_dim 0.0 +768 8 model.relation_dim 1.0 +768 8 loss.margin 15.400812525141161 +768 8 loss.adversarial_temperature 0.9489695662374766 +768 8 optimizer.lr 0.0021920499347243376 +768 8 negative_sampler.num_negs_per_pos 41.0 +768 8 training.batch_size 2.0 +768 9 model.embedding_dim 0.0 +768 9 model.relation_dim 2.0 +768 9 loss.margin 6.9951986653134295 +768 9 loss.adversarial_temperature 0.1403545236901627 +768 9 optimizer.lr 0.00102821888432896 +768 9 negative_sampler.num_negs_per_pos 86.0 +768 9 training.batch_size 2.0 +768 10 model.embedding_dim 0.0 +768 10 model.relation_dim 0.0 +768 10 loss.margin 18.411899445885847 +768 10 loss.adversarial_temperature 0.45165927470187206 +768 10 optimizer.lr 0.017021580797632444 +768 10 negative_sampler.num_negs_per_pos 12.0 +768 10 training.batch_size 2.0 +768 11 model.embedding_dim 0.0 +768 11 model.relation_dim 2.0 +768 11 loss.margin 8.62469004564528 +768 11 loss.adversarial_temperature 0.6212276213880569 +768 11 optimizer.lr 0.0018074955117375346 +768 11 negative_sampler.num_negs_per_pos 20.0 +768 11 training.batch_size 2.0 +768 12 model.embedding_dim 0.0 +768 12 model.relation_dim 2.0 +768 12 loss.margin 29.42869840124905 +768 12 loss.adversarial_temperature 0.5577320759834161 +768 12 optimizer.lr 0.05672747967685033 +768 12 negative_sampler.num_negs_per_pos 13.0 +768 12 training.batch_size 0.0 +768 13 model.embedding_dim 0.0 +768 13 model.relation_dim 2.0 +768 13 loss.margin 26.45236204512203 +768 13 loss.adversarial_temperature 0.4270755336691612 +768 13 optimizer.lr 0.0038592152728769437 +768 13 negative_sampler.num_negs_per_pos 81.0 +768 13 training.batch_size 0.0 +768 14 model.embedding_dim 2.0 +768 14 model.relation_dim 2.0 +768 14 loss.margin 1.3904093850453765 +768 14 loss.adversarial_temperature 0.8868346087288919 +768 14 optimizer.lr 0.008086856576095458 +768 14 negative_sampler.num_negs_per_pos 54.0 +768 14 training.batch_size 2.0 +768 15 model.embedding_dim 2.0 +768 15 model.relation_dim 2.0 +768 15 loss.margin 15.445587404553153 +768 15 loss.adversarial_temperature 0.20472358263864965 +768 15 optimizer.lr 0.0011551616248436439 +768 15 negative_sampler.num_negs_per_pos 68.0 +768 15 training.batch_size 2.0 +768 16 model.embedding_dim 2.0 +768 16 model.relation_dim 0.0 +768 16 loss.margin 10.7050046167769 +768 16 loss.adversarial_temperature 0.6688849233278534 +768 16 optimizer.lr 0.09474811241920224 +768 16 negative_sampler.num_negs_per_pos 79.0 +768 16 training.batch_size 0.0 +768 17 model.embedding_dim 0.0 +768 17 model.relation_dim 1.0 +768 17 loss.margin 24.58258675644109 +768 17 loss.adversarial_temperature 0.30163604776437225 +768 17 optimizer.lr 0.01939820798817671 +768 17 negative_sampler.num_negs_per_pos 72.0 +768 17 training.batch_size 1.0 +768 18 model.embedding_dim 2.0 +768 18 model.relation_dim 0.0 +768 18 loss.margin 17.04931544448023 +768 18 loss.adversarial_temperature 0.5120981797741262 +768 18 optimizer.lr 0.09701240308993622 +768 18 negative_sampler.num_negs_per_pos 71.0 +768 18 training.batch_size 1.0 +768 19 model.embedding_dim 0.0 +768 19 model.relation_dim 0.0 +768 19 loss.margin 10.042380590971696 +768 19 loss.adversarial_temperature 0.9250051646919778 +768 19 optimizer.lr 0.0011583911528513363 +768 19 negative_sampler.num_negs_per_pos 19.0 +768 19 training.batch_size 0.0 +768 20 model.embedding_dim 0.0 +768 20 model.relation_dim 2.0 +768 20 loss.margin 27.26849847564328 +768 20 loss.adversarial_temperature 0.8347092523483318 +768 20 optimizer.lr 0.04600624244597897 +768 20 negative_sampler.num_negs_per_pos 61.0 +768 20 training.batch_size 1.0 +768 21 model.embedding_dim 1.0 +768 21 model.relation_dim 1.0 +768 21 loss.margin 21.686471917576814 +768 21 loss.adversarial_temperature 0.6542898412790558 +768 21 optimizer.lr 0.0015355415996792213 +768 21 negative_sampler.num_negs_per_pos 8.0 +768 21 training.batch_size 1.0 +768 22 model.embedding_dim 1.0 +768 22 model.relation_dim 1.0 +768 22 loss.margin 20.833074775388802 +768 22 loss.adversarial_temperature 0.975040201459651 +768 22 optimizer.lr 0.0013432230754403195 +768 22 negative_sampler.num_negs_per_pos 16.0 +768 22 training.batch_size 0.0 +768 23 model.embedding_dim 2.0 +768 23 model.relation_dim 0.0 +768 23 loss.margin 11.475146469881238 +768 23 loss.adversarial_temperature 0.36289667594269204 +768 23 optimizer.lr 0.008196388168171516 +768 23 negative_sampler.num_negs_per_pos 39.0 +768 23 training.batch_size 0.0 +768 24 model.embedding_dim 1.0 +768 24 model.relation_dim 1.0 +768 24 loss.margin 5.63522466110746 +768 24 loss.adversarial_temperature 0.8251017437385336 +768 24 optimizer.lr 0.002247619795429754 +768 24 negative_sampler.num_negs_per_pos 51.0 +768 24 training.batch_size 1.0 +768 25 model.embedding_dim 2.0 +768 25 model.relation_dim 1.0 +768 25 loss.margin 3.5876939016612894 +768 25 loss.adversarial_temperature 0.749243906703706 +768 25 optimizer.lr 0.034363839406320935 +768 25 negative_sampler.num_negs_per_pos 92.0 +768 25 training.batch_size 0.0 +768 26 model.embedding_dim 1.0 +768 26 model.relation_dim 0.0 +768 26 loss.margin 24.053611067687665 +768 26 loss.adversarial_temperature 0.7299503847231618 +768 26 optimizer.lr 0.00281950787626907 +768 26 negative_sampler.num_negs_per_pos 48.0 +768 26 training.batch_size 2.0 +768 27 model.embedding_dim 1.0 +768 27 model.relation_dim 0.0 +768 27 loss.margin 23.307517910258287 +768 27 loss.adversarial_temperature 0.39464171035359497 +768 27 optimizer.lr 0.00882346839576537 +768 27 negative_sampler.num_negs_per_pos 4.0 +768 27 training.batch_size 2.0 +768 28 model.embedding_dim 1.0 +768 28 model.relation_dim 1.0 +768 28 loss.margin 11.574703399198128 +768 28 loss.adversarial_temperature 0.5441868401505318 +768 28 optimizer.lr 0.002816576310543132 +768 28 negative_sampler.num_negs_per_pos 35.0 +768 28 training.batch_size 0.0 +768 29 model.embedding_dim 2.0 +768 29 model.relation_dim 1.0 +768 29 loss.margin 5.878918841607121 +768 29 loss.adversarial_temperature 0.5162089379186149 +768 29 optimizer.lr 0.0025817697963611357 +768 29 negative_sampler.num_negs_per_pos 72.0 +768 29 training.batch_size 1.0 +768 30 model.embedding_dim 0.0 +768 30 model.relation_dim 1.0 +768 30 loss.margin 13.671949432753294 +768 30 loss.adversarial_temperature 0.16967570172836594 +768 30 optimizer.lr 0.005269689758794867 +768 30 negative_sampler.num_negs_per_pos 33.0 +768 30 training.batch_size 0.0 +768 31 model.embedding_dim 1.0 +768 31 model.relation_dim 1.0 +768 31 loss.margin 2.5605379013597886 +768 31 loss.adversarial_temperature 0.20096147703699466 +768 31 optimizer.lr 0.008719090488177067 +768 31 negative_sampler.num_negs_per_pos 36.0 +768 31 training.batch_size 0.0 +768 32 model.embedding_dim 2.0 +768 32 model.relation_dim 0.0 +768 32 loss.margin 21.260842735277294 +768 32 loss.adversarial_temperature 0.6684257987553259 +768 32 optimizer.lr 0.015266892393699386 +768 32 negative_sampler.num_negs_per_pos 89.0 +768 32 training.batch_size 2.0 +768 33 model.embedding_dim 0.0 +768 33 model.relation_dim 2.0 +768 33 loss.margin 2.310250820690098 +768 33 loss.adversarial_temperature 0.36376158011948645 +768 33 optimizer.lr 0.0015319780317808844 +768 33 negative_sampler.num_negs_per_pos 91.0 +768 33 training.batch_size 2.0 +768 34 model.embedding_dim 1.0 +768 34 model.relation_dim 2.0 +768 34 loss.margin 7.718718425138456 +768 34 loss.adversarial_temperature 0.6428933157990622 +768 34 optimizer.lr 0.025952669891299556 +768 34 negative_sampler.num_negs_per_pos 61.0 +768 34 training.batch_size 1.0 +768 35 model.embedding_dim 0.0 +768 35 model.relation_dim 0.0 +768 35 loss.margin 10.058392468288085 +768 35 loss.adversarial_temperature 0.23912404002626947 +768 35 optimizer.lr 0.07901295092073561 +768 35 negative_sampler.num_negs_per_pos 54.0 +768 35 training.batch_size 1.0 +768 36 model.embedding_dim 2.0 +768 36 model.relation_dim 2.0 +768 36 loss.margin 20.5433648705953 +768 36 loss.adversarial_temperature 0.5289126706332088 +768 36 optimizer.lr 0.0034007938213966664 +768 36 negative_sampler.num_negs_per_pos 89.0 +768 36 training.batch_size 2.0 +768 37 model.embedding_dim 1.0 +768 37 model.relation_dim 2.0 +768 37 loss.margin 23.57446379531474 +768 37 loss.adversarial_temperature 0.4414929437134648 +768 37 optimizer.lr 0.007272440911233297 +768 37 negative_sampler.num_negs_per_pos 23.0 +768 37 training.batch_size 0.0 +768 38 model.embedding_dim 0.0 +768 38 model.relation_dim 0.0 +768 38 loss.margin 29.15234550832603 +768 38 loss.adversarial_temperature 0.5302175097889439 +768 38 optimizer.lr 0.0653255676194985 +768 38 negative_sampler.num_negs_per_pos 6.0 +768 38 training.batch_size 2.0 +768 39 model.embedding_dim 0.0 +768 39 model.relation_dim 1.0 +768 39 loss.margin 16.786784257405788 +768 39 loss.adversarial_temperature 0.16473905226544883 +768 39 optimizer.lr 0.014342977241321818 +768 39 negative_sampler.num_negs_per_pos 98.0 +768 39 training.batch_size 1.0 +768 40 model.embedding_dim 2.0 +768 40 model.relation_dim 2.0 +768 40 loss.margin 22.699897585672222 +768 40 loss.adversarial_temperature 0.4952309869950927 +768 40 optimizer.lr 0.007022144148784475 +768 40 negative_sampler.num_negs_per_pos 91.0 +768 40 training.batch_size 0.0 +768 41 model.embedding_dim 1.0 +768 41 model.relation_dim 1.0 +768 41 loss.margin 28.93816654628136 +768 41 loss.adversarial_temperature 0.5775481299826669 +768 41 optimizer.lr 0.029049137681476593 +768 41 negative_sampler.num_negs_per_pos 84.0 +768 41 training.batch_size 1.0 +768 42 model.embedding_dim 0.0 +768 42 model.relation_dim 2.0 +768 42 loss.margin 7.744882091288005 +768 42 loss.adversarial_temperature 0.343736575873798 +768 42 optimizer.lr 0.05014168166165998 +768 42 negative_sampler.num_negs_per_pos 85.0 +768 42 training.batch_size 1.0 +768 43 model.embedding_dim 2.0 +768 43 model.relation_dim 1.0 +768 43 loss.margin 19.13975049397722 +768 43 loss.adversarial_temperature 0.30363409947586706 +768 43 optimizer.lr 0.019064614399768268 +768 43 negative_sampler.num_negs_per_pos 8.0 +768 43 training.batch_size 1.0 +768 44 model.embedding_dim 2.0 +768 44 model.relation_dim 0.0 +768 44 loss.margin 1.9237584845181437 +768 44 loss.adversarial_temperature 0.106382495562604 +768 44 optimizer.lr 0.0032878546238584434 +768 44 negative_sampler.num_negs_per_pos 28.0 +768 44 training.batch_size 0.0 +768 45 model.embedding_dim 1.0 +768 45 model.relation_dim 1.0 +768 45 loss.margin 18.57906696443607 +768 45 loss.adversarial_temperature 0.27954184529426523 +768 45 optimizer.lr 0.0031556752081780912 +768 45 negative_sampler.num_negs_per_pos 49.0 +768 45 training.batch_size 0.0 +768 46 model.embedding_dim 1.0 +768 46 model.relation_dim 2.0 +768 46 loss.margin 1.5358036628134235 +768 46 loss.adversarial_temperature 0.45900169061741036 +768 46 optimizer.lr 0.007567532450546106 +768 46 negative_sampler.num_negs_per_pos 77.0 +768 46 training.batch_size 1.0 +768 47 model.embedding_dim 0.0 +768 47 model.relation_dim 0.0 +768 47 loss.margin 26.054587858060067 +768 47 loss.adversarial_temperature 0.7546446531629487 +768 47 optimizer.lr 0.01612968215299347 +768 47 negative_sampler.num_negs_per_pos 21.0 +768 47 training.batch_size 2.0 +768 48 model.embedding_dim 2.0 +768 48 model.relation_dim 1.0 +768 48 loss.margin 15.712939725997458 +768 48 loss.adversarial_temperature 0.3337620201305032 +768 48 optimizer.lr 0.0011909187966213835 +768 48 negative_sampler.num_negs_per_pos 55.0 +768 48 training.batch_size 1.0 +768 49 model.embedding_dim 0.0 +768 49 model.relation_dim 1.0 +768 49 loss.margin 24.375929822224712 +768 49 loss.adversarial_temperature 0.677617308088886 +768 49 optimizer.lr 0.028905194579948047 +768 49 negative_sampler.num_negs_per_pos 79.0 +768 49 training.batch_size 1.0 +768 50 model.embedding_dim 1.0 +768 50 model.relation_dim 0.0 +768 50 loss.margin 19.046950921892993 +768 50 loss.adversarial_temperature 0.9137867164633988 +768 50 optimizer.lr 0.0026126386443909794 +768 50 negative_sampler.num_negs_per_pos 64.0 +768 50 training.batch_size 1.0 +768 51 model.embedding_dim 2.0 +768 51 model.relation_dim 0.0 +768 51 loss.margin 4.8142745122092885 +768 51 loss.adversarial_temperature 0.12189785610590444 +768 51 optimizer.lr 0.06340496493202864 +768 51 negative_sampler.num_negs_per_pos 27.0 +768 51 training.batch_size 2.0 +768 52 model.embedding_dim 1.0 +768 52 model.relation_dim 1.0 +768 52 loss.margin 9.314972946653635 +768 52 loss.adversarial_temperature 0.46660207150634203 +768 52 optimizer.lr 0.0040743287364379275 +768 52 negative_sampler.num_negs_per_pos 22.0 +768 52 training.batch_size 0.0 +768 53 model.embedding_dim 0.0 +768 53 model.relation_dim 2.0 +768 53 loss.margin 20.774297658310086 +768 53 loss.adversarial_temperature 0.9331119240195651 +768 53 optimizer.lr 0.002429252198970793 +768 53 negative_sampler.num_negs_per_pos 85.0 +768 53 training.batch_size 2.0 +768 54 model.embedding_dim 2.0 +768 54 model.relation_dim 1.0 +768 54 loss.margin 3.6554226706138007 +768 54 loss.adversarial_temperature 0.5232391649585287 +768 54 optimizer.lr 0.02904655428018026 +768 54 negative_sampler.num_negs_per_pos 60.0 +768 54 training.batch_size 0.0 +768 55 model.embedding_dim 0.0 +768 55 model.relation_dim 1.0 +768 55 loss.margin 16.79838076511529 +768 55 loss.adversarial_temperature 0.34753299104310476 +768 55 optimizer.lr 0.048188216692725994 +768 55 negative_sampler.num_negs_per_pos 95.0 +768 55 training.batch_size 2.0 +768 56 model.embedding_dim 0.0 +768 56 model.relation_dim 2.0 +768 56 loss.margin 28.633568558126335 +768 56 loss.adversarial_temperature 0.78575512623701 +768 56 optimizer.lr 0.02860993418903748 +768 56 negative_sampler.num_negs_per_pos 48.0 +768 56 training.batch_size 2.0 +768 57 model.embedding_dim 1.0 +768 57 model.relation_dim 2.0 +768 57 loss.margin 7.541710566812238 +768 57 loss.adversarial_temperature 0.3268258808965548 +768 57 optimizer.lr 0.0019057748138770886 +768 57 negative_sampler.num_negs_per_pos 26.0 +768 57 training.batch_size 0.0 +768 58 model.embedding_dim 1.0 +768 58 model.relation_dim 0.0 +768 58 loss.margin 17.845803990998018 +768 58 loss.adversarial_temperature 0.4211466957380733 +768 58 optimizer.lr 0.01709377973055311 +768 58 negative_sampler.num_negs_per_pos 87.0 +768 58 training.batch_size 1.0 +768 59 model.embedding_dim 0.0 +768 59 model.relation_dim 2.0 +768 59 loss.margin 5.936611533202991 +768 59 loss.adversarial_temperature 0.28596834840183405 +768 59 optimizer.lr 0.006601810351230177 +768 59 negative_sampler.num_negs_per_pos 89.0 +768 59 training.batch_size 1.0 +768 60 model.embedding_dim 2.0 +768 60 model.relation_dim 2.0 +768 60 loss.margin 24.638677467631112 +768 60 loss.adversarial_temperature 0.7050763204640638 +768 60 optimizer.lr 0.0012182861700606822 +768 60 negative_sampler.num_negs_per_pos 30.0 +768 60 training.batch_size 2.0 +768 61 model.embedding_dim 1.0 +768 61 model.relation_dim 0.0 +768 61 loss.margin 22.552332741553485 +768 61 loss.adversarial_temperature 0.7644358831280731 +768 61 optimizer.lr 0.0010176736881694506 +768 61 negative_sampler.num_negs_per_pos 15.0 +768 61 training.batch_size 1.0 +768 62 model.embedding_dim 1.0 +768 62 model.relation_dim 2.0 +768 62 loss.margin 14.068914303248272 +768 62 loss.adversarial_temperature 0.4845431225411768 +768 62 optimizer.lr 0.013643657685077281 +768 62 negative_sampler.num_negs_per_pos 3.0 +768 62 training.batch_size 0.0 +768 63 model.embedding_dim 0.0 +768 63 model.relation_dim 2.0 +768 63 loss.margin 19.414048322199825 +768 63 loss.adversarial_temperature 0.7137350653697233 +768 63 optimizer.lr 0.003755440808658387 +768 63 negative_sampler.num_negs_per_pos 7.0 +768 63 training.batch_size 2.0 +768 64 model.embedding_dim 2.0 +768 64 model.relation_dim 0.0 +768 64 loss.margin 22.48716080802457 +768 64 loss.adversarial_temperature 0.6510729149567398 +768 64 optimizer.lr 0.007909389221664183 +768 64 negative_sampler.num_negs_per_pos 4.0 +768 64 training.batch_size 0.0 +768 65 model.embedding_dim 2.0 +768 65 model.relation_dim 1.0 +768 65 loss.margin 18.044066135031354 +768 65 loss.adversarial_temperature 0.5640314336101531 +768 65 optimizer.lr 0.006738335381835233 +768 65 negative_sampler.num_negs_per_pos 19.0 +768 65 training.batch_size 2.0 +768 66 model.embedding_dim 1.0 +768 66 model.relation_dim 0.0 +768 66 loss.margin 22.435427602003667 +768 66 loss.adversarial_temperature 0.7200097957646667 +768 66 optimizer.lr 0.010852682193423592 +768 66 negative_sampler.num_negs_per_pos 70.0 +768 66 training.batch_size 1.0 +768 67 model.embedding_dim 2.0 +768 67 model.relation_dim 1.0 +768 67 loss.margin 10.61014635073985 +768 67 loss.adversarial_temperature 0.9012403791669167 +768 67 optimizer.lr 0.015582244279026942 +768 67 negative_sampler.num_negs_per_pos 10.0 +768 67 training.batch_size 2.0 +768 68 model.embedding_dim 0.0 +768 68 model.relation_dim 1.0 +768 68 loss.margin 7.972863481343366 +768 68 loss.adversarial_temperature 0.8258859237605667 +768 68 optimizer.lr 0.011057570780587736 +768 68 negative_sampler.num_negs_per_pos 87.0 +768 68 training.batch_size 1.0 +768 69 model.embedding_dim 0.0 +768 69 model.relation_dim 2.0 +768 69 loss.margin 5.948957229769672 +768 69 loss.adversarial_temperature 0.9165398724913101 +768 69 optimizer.lr 0.0260175979824281 +768 69 negative_sampler.num_negs_per_pos 58.0 +768 69 training.batch_size 2.0 +768 70 model.embedding_dim 1.0 +768 70 model.relation_dim 1.0 +768 70 loss.margin 3.182631122352655 +768 70 loss.adversarial_temperature 0.5104166521687336 +768 70 optimizer.lr 0.09677264242989451 +768 70 negative_sampler.num_negs_per_pos 92.0 +768 70 training.batch_size 0.0 +768 71 model.embedding_dim 1.0 +768 71 model.relation_dim 1.0 +768 71 loss.margin 13.465965454654535 +768 71 loss.adversarial_temperature 0.3752255218140461 +768 71 optimizer.lr 0.006220389445172056 +768 71 negative_sampler.num_negs_per_pos 7.0 +768 71 training.batch_size 2.0 +768 72 model.embedding_dim 2.0 +768 72 model.relation_dim 0.0 +768 72 loss.margin 19.83274498072087 +768 72 loss.adversarial_temperature 0.7996213381085366 +768 72 optimizer.lr 0.062320325721763374 +768 72 negative_sampler.num_negs_per_pos 42.0 +768 72 training.batch_size 2.0 +768 73 model.embedding_dim 2.0 +768 73 model.relation_dim 2.0 +768 73 loss.margin 2.2710033915168535 +768 73 loss.adversarial_temperature 0.11436596735744643 +768 73 optimizer.lr 0.019185499313969918 +768 73 negative_sampler.num_negs_per_pos 65.0 +768 73 training.batch_size 2.0 +768 74 model.embedding_dim 1.0 +768 74 model.relation_dim 2.0 +768 74 loss.margin 27.323503912015305 +768 74 loss.adversarial_temperature 0.5060459716352164 +768 74 optimizer.lr 0.03099202262062468 +768 74 negative_sampler.num_negs_per_pos 67.0 +768 74 training.batch_size 1.0 +768 75 model.embedding_dim 0.0 +768 75 model.relation_dim 0.0 +768 75 loss.margin 15.358739742975377 +768 75 loss.adversarial_temperature 0.4532186450547623 +768 75 optimizer.lr 0.01567137119380298 +768 75 negative_sampler.num_negs_per_pos 64.0 +768 75 training.batch_size 1.0 +768 76 model.embedding_dim 0.0 +768 76 model.relation_dim 1.0 +768 76 loss.margin 12.336313632561604 +768 76 loss.adversarial_temperature 0.8978827710454627 +768 76 optimizer.lr 0.016623315081782997 +768 76 negative_sampler.num_negs_per_pos 70.0 +768 76 training.batch_size 0.0 +768 77 model.embedding_dim 1.0 +768 77 model.relation_dim 0.0 +768 77 loss.margin 19.838788305287146 +768 77 loss.adversarial_temperature 0.593860809107464 +768 77 optimizer.lr 0.031171733270554804 +768 77 negative_sampler.num_negs_per_pos 62.0 +768 77 training.batch_size 2.0 +768 78 model.embedding_dim 0.0 +768 78 model.relation_dim 2.0 +768 78 loss.margin 5.384385622476919 +768 78 loss.adversarial_temperature 0.11915220439635502 +768 78 optimizer.lr 0.09277559997043763 +768 78 negative_sampler.num_negs_per_pos 39.0 +768 78 training.batch_size 2.0 +768 79 model.embedding_dim 1.0 +768 79 model.relation_dim 1.0 +768 79 loss.margin 12.367496290818261 +768 79 loss.adversarial_temperature 0.3611974337122127 +768 79 optimizer.lr 0.08133183360507683 +768 79 negative_sampler.num_negs_per_pos 73.0 +768 79 training.batch_size 2.0 +768 80 model.embedding_dim 0.0 +768 80 model.relation_dim 0.0 +768 80 loss.margin 13.910856558950002 +768 80 loss.adversarial_temperature 0.85467103138278 +768 80 optimizer.lr 0.0028917547387894236 +768 80 negative_sampler.num_negs_per_pos 21.0 +768 80 training.batch_size 2.0 +768 81 model.embedding_dim 0.0 +768 81 model.relation_dim 0.0 +768 81 loss.margin 2.192172576788841 +768 81 loss.adversarial_temperature 0.565825052424934 +768 81 optimizer.lr 0.0018922464237060357 +768 81 negative_sampler.num_negs_per_pos 45.0 +768 81 training.batch_size 2.0 +768 82 model.embedding_dim 0.0 +768 82 model.relation_dim 2.0 +768 82 loss.margin 10.40784296852113 +768 82 loss.adversarial_temperature 0.278047059404833 +768 82 optimizer.lr 0.004186746456934605 +768 82 negative_sampler.num_negs_per_pos 71.0 +768 82 training.batch_size 2.0 +768 83 model.embedding_dim 1.0 +768 83 model.relation_dim 1.0 +768 83 loss.margin 12.07832742636637 +768 83 loss.adversarial_temperature 0.21902830567905746 +768 83 optimizer.lr 0.01873357894842586 +768 83 negative_sampler.num_negs_per_pos 12.0 +768 83 training.batch_size 0.0 +768 84 model.embedding_dim 2.0 +768 84 model.relation_dim 0.0 +768 84 loss.margin 21.050523272689382 +768 84 loss.adversarial_temperature 0.45759764016537285 +768 84 optimizer.lr 0.004542391031188281 +768 84 negative_sampler.num_negs_per_pos 48.0 +768 84 training.batch_size 1.0 +768 85 model.embedding_dim 0.0 +768 85 model.relation_dim 1.0 +768 85 loss.margin 9.65956987067729 +768 85 loss.adversarial_temperature 0.8605982583285068 +768 85 optimizer.lr 0.04524209627839044 +768 85 negative_sampler.num_negs_per_pos 96.0 +768 85 training.batch_size 0.0 +768 86 model.embedding_dim 2.0 +768 86 model.relation_dim 2.0 +768 86 loss.margin 18.484850022610743 +768 86 loss.adversarial_temperature 0.3743088677918597 +768 86 optimizer.lr 0.010232631111881219 +768 86 negative_sampler.num_negs_per_pos 47.0 +768 86 training.batch_size 0.0 +768 87 model.embedding_dim 1.0 +768 87 model.relation_dim 0.0 +768 87 loss.margin 5.645893228299162 +768 87 loss.adversarial_temperature 0.5840094747152869 +768 87 optimizer.lr 0.003224324254994337 +768 87 negative_sampler.num_negs_per_pos 9.0 +768 87 training.batch_size 2.0 +768 88 model.embedding_dim 1.0 +768 88 model.relation_dim 1.0 +768 88 loss.margin 24.38502533913856 +768 88 loss.adversarial_temperature 0.7219593338489741 +768 88 optimizer.lr 0.024274816587992028 +768 88 negative_sampler.num_negs_per_pos 3.0 +768 88 training.batch_size 2.0 +768 89 model.embedding_dim 1.0 +768 89 model.relation_dim 1.0 +768 89 loss.margin 5.5620719944028565 +768 89 loss.adversarial_temperature 0.4869164586825153 +768 89 optimizer.lr 0.02106843300368248 +768 89 negative_sampler.num_negs_per_pos 33.0 +768 89 training.batch_size 0.0 +768 90 model.embedding_dim 2.0 +768 90 model.relation_dim 2.0 +768 90 loss.margin 1.777449998620745 +768 90 loss.adversarial_temperature 0.11725183007325764 +768 90 optimizer.lr 0.0013422626063923537 +768 90 negative_sampler.num_negs_per_pos 13.0 +768 90 training.batch_size 2.0 +768 91 model.embedding_dim 0.0 +768 91 model.relation_dim 2.0 +768 91 loss.margin 13.148672093058238 +768 91 loss.adversarial_temperature 0.8912675931719752 +768 91 optimizer.lr 0.001121211799558947 +768 91 negative_sampler.num_negs_per_pos 9.0 +768 91 training.batch_size 1.0 +768 92 model.embedding_dim 1.0 +768 92 model.relation_dim 1.0 +768 92 loss.margin 5.770566741480316 +768 92 loss.adversarial_temperature 0.4547270828778308 +768 92 optimizer.lr 0.0011424515165358892 +768 92 negative_sampler.num_negs_per_pos 94.0 +768 92 training.batch_size 0.0 +768 93 model.embedding_dim 0.0 +768 93 model.relation_dim 2.0 +768 93 loss.margin 2.596160940682004 +768 93 loss.adversarial_temperature 0.5720036261105703 +768 93 optimizer.lr 0.0029046633693177205 +768 93 negative_sampler.num_negs_per_pos 6.0 +768 93 training.batch_size 1.0 +768 94 model.embedding_dim 2.0 +768 94 model.relation_dim 1.0 +768 94 loss.margin 3.640024555796135 +768 94 loss.adversarial_temperature 0.25651435497639674 +768 94 optimizer.lr 0.014343586861285321 +768 94 negative_sampler.num_negs_per_pos 92.0 +768 94 training.batch_size 2.0 +768 95 model.embedding_dim 2.0 +768 95 model.relation_dim 2.0 +768 95 loss.margin 26.00126365343007 +768 95 loss.adversarial_temperature 0.8620191650171114 +768 95 optimizer.lr 0.082880024071934 +768 95 negative_sampler.num_negs_per_pos 63.0 +768 95 training.batch_size 2.0 +768 96 model.embedding_dim 1.0 +768 96 model.relation_dim 2.0 +768 96 loss.margin 19.40589121832474 +768 96 loss.adversarial_temperature 0.6221784428115003 +768 96 optimizer.lr 0.016827293811780757 +768 96 negative_sampler.num_negs_per_pos 3.0 +768 96 training.batch_size 0.0 +768 97 model.embedding_dim 2.0 +768 97 model.relation_dim 0.0 +768 97 loss.margin 15.190009328863479 +768 97 loss.adversarial_temperature 0.8071161178293124 +768 97 optimizer.lr 0.01008824519769576 +768 97 negative_sampler.num_negs_per_pos 85.0 +768 97 training.batch_size 1.0 +768 98 model.embedding_dim 2.0 +768 98 model.relation_dim 1.0 +768 98 loss.margin 22.934044539280862 +768 98 loss.adversarial_temperature 0.3948863403432019 +768 98 optimizer.lr 0.002044156507850091 +768 98 negative_sampler.num_negs_per_pos 20.0 +768 98 training.batch_size 0.0 +768 99 model.embedding_dim 1.0 +768 99 model.relation_dim 1.0 +768 99 loss.margin 13.6947661200924 +768 99 loss.adversarial_temperature 0.34821841068631143 +768 99 optimizer.lr 0.0025516342637609828 +768 99 negative_sampler.num_negs_per_pos 22.0 +768 99 training.batch_size 0.0 +768 100 model.embedding_dim 1.0 +768 100 model.relation_dim 0.0 +768 100 loss.margin 25.52117688213169 +768 100 loss.adversarial_temperature 0.36887026445425963 +768 100 optimizer.lr 0.04604276784094685 +768 100 negative_sampler.num_negs_per_pos 50.0 +768 100 training.batch_size 2.0 +768 1 dataset """kinships""" +768 1 model """transd""" +768 1 loss """nssa""" +768 1 regularizer """no""" +768 1 optimizer """adam""" +768 1 training_loop """owa""" +768 1 negative_sampler """basic""" +768 1 evaluator """rankbased""" +768 2 dataset """kinships""" +768 2 model """transd""" +768 2 loss """nssa""" +768 2 regularizer """no""" +768 2 optimizer """adam""" +768 2 training_loop """owa""" +768 2 negative_sampler """basic""" +768 2 evaluator """rankbased""" +768 3 dataset """kinships""" +768 3 model """transd""" +768 3 loss """nssa""" +768 3 regularizer """no""" +768 3 optimizer """adam""" +768 3 training_loop """owa""" +768 3 negative_sampler """basic""" +768 3 evaluator """rankbased""" +768 4 dataset """kinships""" +768 4 model """transd""" +768 4 loss """nssa""" +768 4 regularizer """no""" +768 4 optimizer """adam""" +768 4 training_loop """owa""" +768 4 negative_sampler """basic""" +768 4 evaluator """rankbased""" +768 5 dataset """kinships""" +768 5 model """transd""" +768 5 loss """nssa""" +768 5 regularizer """no""" +768 5 optimizer """adam""" +768 5 training_loop """owa""" +768 5 negative_sampler """basic""" +768 5 evaluator """rankbased""" +768 6 dataset """kinships""" +768 6 model """transd""" +768 6 loss """nssa""" +768 6 regularizer """no""" +768 6 optimizer """adam""" +768 6 training_loop """owa""" +768 6 negative_sampler """basic""" +768 6 evaluator """rankbased""" +768 7 dataset """kinships""" +768 7 model """transd""" +768 7 loss """nssa""" +768 7 regularizer """no""" +768 7 optimizer """adam""" +768 7 training_loop """owa""" +768 7 negative_sampler """basic""" +768 7 evaluator """rankbased""" +768 8 dataset """kinships""" +768 8 model """transd""" +768 8 loss """nssa""" +768 8 regularizer """no""" +768 8 optimizer """adam""" +768 8 training_loop """owa""" +768 8 negative_sampler """basic""" +768 8 evaluator """rankbased""" +768 9 dataset """kinships""" +768 9 model """transd""" +768 9 loss """nssa""" +768 9 regularizer """no""" +768 9 optimizer """adam""" +768 9 training_loop """owa""" +768 9 negative_sampler """basic""" +768 9 evaluator """rankbased""" +768 10 dataset """kinships""" +768 10 model """transd""" +768 10 loss """nssa""" +768 10 regularizer """no""" +768 10 optimizer """adam""" +768 10 training_loop """owa""" +768 10 negative_sampler """basic""" +768 10 evaluator """rankbased""" +768 11 dataset """kinships""" +768 11 model """transd""" +768 11 loss """nssa""" +768 11 regularizer """no""" +768 11 optimizer """adam""" +768 11 training_loop """owa""" +768 11 negative_sampler """basic""" +768 11 evaluator """rankbased""" +768 12 dataset """kinships""" +768 12 model """transd""" +768 12 loss """nssa""" +768 12 regularizer """no""" +768 12 optimizer """adam""" +768 12 training_loop """owa""" +768 12 negative_sampler """basic""" +768 12 evaluator """rankbased""" +768 13 dataset """kinships""" +768 13 model """transd""" +768 13 loss """nssa""" +768 13 regularizer """no""" +768 13 optimizer """adam""" +768 13 training_loop """owa""" +768 13 negative_sampler """basic""" +768 13 evaluator """rankbased""" +768 14 dataset """kinships""" +768 14 model """transd""" +768 14 loss """nssa""" +768 14 regularizer """no""" +768 14 optimizer """adam""" +768 14 training_loop """owa""" +768 14 negative_sampler """basic""" +768 14 evaluator """rankbased""" +768 15 dataset """kinships""" +768 15 model """transd""" +768 15 loss """nssa""" +768 15 regularizer """no""" +768 15 optimizer """adam""" +768 15 training_loop """owa""" +768 15 negative_sampler """basic""" +768 15 evaluator """rankbased""" +768 16 dataset """kinships""" +768 16 model """transd""" +768 16 loss """nssa""" +768 16 regularizer """no""" +768 16 optimizer """adam""" +768 16 training_loop """owa""" +768 16 negative_sampler """basic""" +768 16 evaluator """rankbased""" +768 17 dataset """kinships""" +768 17 model """transd""" +768 17 loss """nssa""" +768 17 regularizer """no""" +768 17 optimizer """adam""" +768 17 training_loop """owa""" +768 17 negative_sampler """basic""" +768 17 evaluator """rankbased""" +768 18 dataset """kinships""" +768 18 model """transd""" +768 18 loss """nssa""" +768 18 regularizer """no""" +768 18 optimizer """adam""" +768 18 training_loop """owa""" +768 18 negative_sampler """basic""" +768 18 evaluator """rankbased""" +768 19 dataset """kinships""" +768 19 model """transd""" +768 19 loss """nssa""" +768 19 regularizer """no""" +768 19 optimizer """adam""" +768 19 training_loop """owa""" +768 19 negative_sampler """basic""" +768 19 evaluator """rankbased""" +768 20 dataset """kinships""" +768 20 model """transd""" +768 20 loss """nssa""" +768 20 regularizer """no""" +768 20 optimizer """adam""" +768 20 training_loop """owa""" +768 20 negative_sampler """basic""" +768 20 evaluator """rankbased""" +768 21 dataset """kinships""" +768 21 model """transd""" +768 21 loss """nssa""" +768 21 regularizer """no""" +768 21 optimizer """adam""" +768 21 training_loop """owa""" +768 21 negative_sampler """basic""" +768 21 evaluator """rankbased""" +768 22 dataset """kinships""" +768 22 model """transd""" +768 22 loss """nssa""" +768 22 regularizer """no""" +768 22 optimizer """adam""" +768 22 training_loop """owa""" +768 22 negative_sampler """basic""" +768 22 evaluator """rankbased""" +768 23 dataset """kinships""" +768 23 model """transd""" +768 23 loss """nssa""" +768 23 regularizer """no""" +768 23 optimizer """adam""" +768 23 training_loop """owa""" +768 23 negative_sampler """basic""" +768 23 evaluator """rankbased""" +768 24 dataset """kinships""" +768 24 model """transd""" +768 24 loss """nssa""" +768 24 regularizer """no""" +768 24 optimizer """adam""" +768 24 training_loop """owa""" +768 24 negative_sampler """basic""" +768 24 evaluator """rankbased""" +768 25 dataset """kinships""" +768 25 model """transd""" +768 25 loss """nssa""" +768 25 regularizer """no""" +768 25 optimizer """adam""" +768 25 training_loop """owa""" +768 25 negative_sampler """basic""" +768 25 evaluator """rankbased""" +768 26 dataset """kinships""" +768 26 model """transd""" +768 26 loss """nssa""" +768 26 regularizer """no""" +768 26 optimizer """adam""" +768 26 training_loop """owa""" +768 26 negative_sampler """basic""" +768 26 evaluator """rankbased""" +768 27 dataset """kinships""" +768 27 model """transd""" +768 27 loss """nssa""" +768 27 regularizer """no""" +768 27 optimizer """adam""" +768 27 training_loop """owa""" +768 27 negative_sampler """basic""" +768 27 evaluator """rankbased""" +768 28 dataset """kinships""" +768 28 model """transd""" +768 28 loss """nssa""" +768 28 regularizer """no""" +768 28 optimizer """adam""" +768 28 training_loop """owa""" +768 28 negative_sampler """basic""" +768 28 evaluator """rankbased""" +768 29 dataset """kinships""" +768 29 model """transd""" +768 29 loss """nssa""" +768 29 regularizer """no""" +768 29 optimizer """adam""" +768 29 training_loop """owa""" +768 29 negative_sampler """basic""" +768 29 evaluator """rankbased""" +768 30 dataset """kinships""" +768 30 model """transd""" +768 30 loss """nssa""" +768 30 regularizer """no""" +768 30 optimizer """adam""" +768 30 training_loop """owa""" +768 30 negative_sampler """basic""" +768 30 evaluator """rankbased""" +768 31 dataset """kinships""" +768 31 model """transd""" +768 31 loss """nssa""" +768 31 regularizer """no""" +768 31 optimizer """adam""" +768 31 training_loop """owa""" +768 31 negative_sampler """basic""" +768 31 evaluator """rankbased""" +768 32 dataset """kinships""" +768 32 model """transd""" +768 32 loss """nssa""" +768 32 regularizer """no""" +768 32 optimizer """adam""" +768 32 training_loop """owa""" +768 32 negative_sampler """basic""" +768 32 evaluator """rankbased""" +768 33 dataset """kinships""" +768 33 model """transd""" +768 33 loss """nssa""" +768 33 regularizer """no""" +768 33 optimizer """adam""" +768 33 training_loop """owa""" +768 33 negative_sampler """basic""" +768 33 evaluator """rankbased""" +768 34 dataset """kinships""" +768 34 model """transd""" +768 34 loss """nssa""" +768 34 regularizer """no""" +768 34 optimizer """adam""" +768 34 training_loop """owa""" +768 34 negative_sampler """basic""" +768 34 evaluator """rankbased""" +768 35 dataset """kinships""" +768 35 model """transd""" +768 35 loss """nssa""" +768 35 regularizer """no""" +768 35 optimizer """adam""" +768 35 training_loop """owa""" +768 35 negative_sampler """basic""" +768 35 evaluator """rankbased""" +768 36 dataset """kinships""" +768 36 model """transd""" +768 36 loss """nssa""" +768 36 regularizer """no""" +768 36 optimizer """adam""" +768 36 training_loop """owa""" +768 36 negative_sampler """basic""" +768 36 evaluator """rankbased""" +768 37 dataset """kinships""" +768 37 model """transd""" +768 37 loss """nssa""" +768 37 regularizer """no""" +768 37 optimizer """adam""" +768 37 training_loop """owa""" +768 37 negative_sampler """basic""" +768 37 evaluator """rankbased""" +768 38 dataset """kinships""" +768 38 model """transd""" +768 38 loss """nssa""" +768 38 regularizer """no""" +768 38 optimizer """adam""" +768 38 training_loop """owa""" +768 38 negative_sampler """basic""" +768 38 evaluator """rankbased""" +768 39 dataset """kinships""" +768 39 model """transd""" +768 39 loss """nssa""" +768 39 regularizer """no""" +768 39 optimizer """adam""" +768 39 training_loop """owa""" +768 39 negative_sampler """basic""" +768 39 evaluator """rankbased""" +768 40 dataset """kinships""" +768 40 model """transd""" +768 40 loss """nssa""" +768 40 regularizer """no""" +768 40 optimizer """adam""" +768 40 training_loop """owa""" +768 40 negative_sampler """basic""" +768 40 evaluator """rankbased""" +768 41 dataset """kinships""" +768 41 model """transd""" +768 41 loss """nssa""" +768 41 regularizer """no""" +768 41 optimizer """adam""" +768 41 training_loop """owa""" +768 41 negative_sampler """basic""" +768 41 evaluator """rankbased""" +768 42 dataset """kinships""" +768 42 model """transd""" +768 42 loss """nssa""" +768 42 regularizer """no""" +768 42 optimizer """adam""" +768 42 training_loop """owa""" +768 42 negative_sampler """basic""" +768 42 evaluator """rankbased""" +768 43 dataset """kinships""" +768 43 model """transd""" +768 43 loss """nssa""" +768 43 regularizer """no""" +768 43 optimizer """adam""" +768 43 training_loop """owa""" +768 43 negative_sampler """basic""" +768 43 evaluator """rankbased""" +768 44 dataset """kinships""" +768 44 model """transd""" +768 44 loss """nssa""" +768 44 regularizer """no""" +768 44 optimizer """adam""" +768 44 training_loop """owa""" +768 44 negative_sampler """basic""" +768 44 evaluator """rankbased""" +768 45 dataset """kinships""" +768 45 model """transd""" +768 45 loss """nssa""" +768 45 regularizer """no""" +768 45 optimizer """adam""" +768 45 training_loop """owa""" +768 45 negative_sampler """basic""" +768 45 evaluator """rankbased""" +768 46 dataset """kinships""" +768 46 model """transd""" +768 46 loss """nssa""" +768 46 regularizer """no""" +768 46 optimizer """adam""" +768 46 training_loop """owa""" +768 46 negative_sampler """basic""" +768 46 evaluator """rankbased""" +768 47 dataset """kinships""" +768 47 model """transd""" +768 47 loss """nssa""" +768 47 regularizer """no""" +768 47 optimizer """adam""" +768 47 training_loop """owa""" +768 47 negative_sampler """basic""" +768 47 evaluator """rankbased""" +768 48 dataset """kinships""" +768 48 model """transd""" +768 48 loss """nssa""" +768 48 regularizer """no""" +768 48 optimizer """adam""" +768 48 training_loop """owa""" +768 48 negative_sampler """basic""" +768 48 evaluator """rankbased""" +768 49 dataset """kinships""" +768 49 model """transd""" +768 49 loss """nssa""" +768 49 regularizer """no""" +768 49 optimizer """adam""" +768 49 training_loop """owa""" +768 49 negative_sampler """basic""" +768 49 evaluator """rankbased""" +768 50 dataset """kinships""" +768 50 model """transd""" +768 50 loss """nssa""" +768 50 regularizer """no""" +768 50 optimizer """adam""" +768 50 training_loop """owa""" +768 50 negative_sampler """basic""" +768 50 evaluator """rankbased""" +768 51 dataset """kinships""" +768 51 model """transd""" +768 51 loss """nssa""" +768 51 regularizer """no""" +768 51 optimizer """adam""" +768 51 training_loop """owa""" +768 51 negative_sampler """basic""" +768 51 evaluator """rankbased""" +768 52 dataset """kinships""" +768 52 model """transd""" +768 52 loss """nssa""" +768 52 regularizer """no""" +768 52 optimizer """adam""" +768 52 training_loop """owa""" +768 52 negative_sampler """basic""" +768 52 evaluator """rankbased""" +768 53 dataset """kinships""" +768 53 model """transd""" +768 53 loss """nssa""" +768 53 regularizer """no""" +768 53 optimizer """adam""" +768 53 training_loop """owa""" +768 53 negative_sampler """basic""" +768 53 evaluator """rankbased""" +768 54 dataset """kinships""" +768 54 model """transd""" +768 54 loss """nssa""" +768 54 regularizer """no""" +768 54 optimizer """adam""" +768 54 training_loop """owa""" +768 54 negative_sampler """basic""" +768 54 evaluator """rankbased""" +768 55 dataset """kinships""" +768 55 model """transd""" +768 55 loss """nssa""" +768 55 regularizer """no""" +768 55 optimizer """adam""" +768 55 training_loop """owa""" +768 55 negative_sampler """basic""" +768 55 evaluator """rankbased""" +768 56 dataset """kinships""" +768 56 model """transd""" +768 56 loss """nssa""" +768 56 regularizer """no""" +768 56 optimizer """adam""" +768 56 training_loop """owa""" +768 56 negative_sampler """basic""" +768 56 evaluator """rankbased""" +768 57 dataset """kinships""" +768 57 model """transd""" +768 57 loss """nssa""" +768 57 regularizer """no""" +768 57 optimizer """adam""" +768 57 training_loop """owa""" +768 57 negative_sampler """basic""" +768 57 evaluator """rankbased""" +768 58 dataset """kinships""" +768 58 model """transd""" +768 58 loss """nssa""" +768 58 regularizer """no""" +768 58 optimizer """adam""" +768 58 training_loop """owa""" +768 58 negative_sampler """basic""" +768 58 evaluator """rankbased""" +768 59 dataset """kinships""" +768 59 model """transd""" +768 59 loss """nssa""" +768 59 regularizer """no""" +768 59 optimizer """adam""" +768 59 training_loop """owa""" +768 59 negative_sampler """basic""" +768 59 evaluator """rankbased""" +768 60 dataset """kinships""" +768 60 model """transd""" +768 60 loss """nssa""" +768 60 regularizer """no""" +768 60 optimizer """adam""" +768 60 training_loop """owa""" +768 60 negative_sampler """basic""" +768 60 evaluator """rankbased""" +768 61 dataset """kinships""" +768 61 model """transd""" +768 61 loss """nssa""" +768 61 regularizer """no""" +768 61 optimizer """adam""" +768 61 training_loop """owa""" +768 61 negative_sampler """basic""" +768 61 evaluator """rankbased""" +768 62 dataset """kinships""" +768 62 model """transd""" +768 62 loss """nssa""" +768 62 regularizer """no""" +768 62 optimizer """adam""" +768 62 training_loop """owa""" +768 62 negative_sampler """basic""" +768 62 evaluator """rankbased""" +768 63 dataset """kinships""" +768 63 model """transd""" +768 63 loss """nssa""" +768 63 regularizer """no""" +768 63 optimizer """adam""" +768 63 training_loop """owa""" +768 63 negative_sampler """basic""" +768 63 evaluator """rankbased""" +768 64 dataset """kinships""" +768 64 model """transd""" +768 64 loss """nssa""" +768 64 regularizer """no""" +768 64 optimizer """adam""" +768 64 training_loop """owa""" +768 64 negative_sampler """basic""" +768 64 evaluator """rankbased""" +768 65 dataset """kinships""" +768 65 model """transd""" +768 65 loss """nssa""" +768 65 regularizer """no""" +768 65 optimizer """adam""" +768 65 training_loop """owa""" +768 65 negative_sampler """basic""" +768 65 evaluator """rankbased""" +768 66 dataset """kinships""" +768 66 model """transd""" +768 66 loss """nssa""" +768 66 regularizer """no""" +768 66 optimizer """adam""" +768 66 training_loop """owa""" +768 66 negative_sampler """basic""" +768 66 evaluator """rankbased""" +768 67 dataset """kinships""" +768 67 model """transd""" +768 67 loss """nssa""" +768 67 regularizer """no""" +768 67 optimizer """adam""" +768 67 training_loop """owa""" +768 67 negative_sampler """basic""" +768 67 evaluator """rankbased""" +768 68 dataset """kinships""" +768 68 model """transd""" +768 68 loss """nssa""" +768 68 regularizer """no""" +768 68 optimizer """adam""" +768 68 training_loop """owa""" +768 68 negative_sampler """basic""" +768 68 evaluator """rankbased""" +768 69 dataset """kinships""" +768 69 model """transd""" +768 69 loss """nssa""" +768 69 regularizer """no""" +768 69 optimizer """adam""" +768 69 training_loop """owa""" +768 69 negative_sampler """basic""" +768 69 evaluator """rankbased""" +768 70 dataset """kinships""" +768 70 model """transd""" +768 70 loss """nssa""" +768 70 regularizer """no""" +768 70 optimizer """adam""" +768 70 training_loop """owa""" +768 70 negative_sampler """basic""" +768 70 evaluator """rankbased""" +768 71 dataset """kinships""" +768 71 model """transd""" +768 71 loss """nssa""" +768 71 regularizer """no""" +768 71 optimizer """adam""" +768 71 training_loop """owa""" +768 71 negative_sampler """basic""" +768 71 evaluator """rankbased""" +768 72 dataset """kinships""" +768 72 model """transd""" +768 72 loss """nssa""" +768 72 regularizer """no""" +768 72 optimizer """adam""" +768 72 training_loop """owa""" +768 72 negative_sampler """basic""" +768 72 evaluator """rankbased""" +768 73 dataset """kinships""" +768 73 model """transd""" +768 73 loss """nssa""" +768 73 regularizer """no""" +768 73 optimizer """adam""" +768 73 training_loop """owa""" +768 73 negative_sampler """basic""" +768 73 evaluator """rankbased""" +768 74 dataset """kinships""" +768 74 model """transd""" +768 74 loss """nssa""" +768 74 regularizer """no""" +768 74 optimizer """adam""" +768 74 training_loop """owa""" +768 74 negative_sampler """basic""" +768 74 evaluator """rankbased""" +768 75 dataset """kinships""" +768 75 model """transd""" +768 75 loss """nssa""" +768 75 regularizer """no""" +768 75 optimizer """adam""" +768 75 training_loop """owa""" +768 75 negative_sampler """basic""" +768 75 evaluator """rankbased""" +768 76 dataset """kinships""" +768 76 model """transd""" +768 76 loss """nssa""" +768 76 regularizer """no""" +768 76 optimizer """adam""" +768 76 training_loop """owa""" +768 76 negative_sampler """basic""" +768 76 evaluator """rankbased""" +768 77 dataset """kinships""" +768 77 model """transd""" +768 77 loss """nssa""" +768 77 regularizer """no""" +768 77 optimizer """adam""" +768 77 training_loop """owa""" +768 77 negative_sampler """basic""" +768 77 evaluator """rankbased""" +768 78 dataset """kinships""" +768 78 model """transd""" +768 78 loss """nssa""" +768 78 regularizer """no""" +768 78 optimizer """adam""" +768 78 training_loop """owa""" +768 78 negative_sampler """basic""" +768 78 evaluator """rankbased""" +768 79 dataset """kinships""" +768 79 model """transd""" +768 79 loss """nssa""" +768 79 regularizer """no""" +768 79 optimizer """adam""" +768 79 training_loop """owa""" +768 79 negative_sampler """basic""" +768 79 evaluator """rankbased""" +768 80 dataset """kinships""" +768 80 model """transd""" +768 80 loss """nssa""" +768 80 regularizer """no""" +768 80 optimizer """adam""" +768 80 training_loop """owa""" +768 80 negative_sampler """basic""" +768 80 evaluator """rankbased""" +768 81 dataset """kinships""" +768 81 model """transd""" +768 81 loss """nssa""" +768 81 regularizer """no""" +768 81 optimizer """adam""" +768 81 training_loop """owa""" +768 81 negative_sampler """basic""" +768 81 evaluator """rankbased""" +768 82 dataset """kinships""" +768 82 model """transd""" +768 82 loss """nssa""" +768 82 regularizer """no""" +768 82 optimizer """adam""" +768 82 training_loop """owa""" +768 82 negative_sampler """basic""" +768 82 evaluator """rankbased""" +768 83 dataset """kinships""" +768 83 model """transd""" +768 83 loss """nssa""" +768 83 regularizer """no""" +768 83 optimizer """adam""" +768 83 training_loop """owa""" +768 83 negative_sampler """basic""" +768 83 evaluator """rankbased""" +768 84 dataset """kinships""" +768 84 model """transd""" +768 84 loss """nssa""" +768 84 regularizer """no""" +768 84 optimizer """adam""" +768 84 training_loop """owa""" +768 84 negative_sampler """basic""" +768 84 evaluator """rankbased""" +768 85 dataset """kinships""" +768 85 model """transd""" +768 85 loss """nssa""" +768 85 regularizer """no""" +768 85 optimizer """adam""" +768 85 training_loop """owa""" +768 85 negative_sampler """basic""" +768 85 evaluator """rankbased""" +768 86 dataset """kinships""" +768 86 model """transd""" +768 86 loss """nssa""" +768 86 regularizer """no""" +768 86 optimizer """adam""" +768 86 training_loop """owa""" +768 86 negative_sampler """basic""" +768 86 evaluator """rankbased""" +768 87 dataset """kinships""" +768 87 model """transd""" +768 87 loss """nssa""" +768 87 regularizer """no""" +768 87 optimizer """adam""" +768 87 training_loop """owa""" +768 87 negative_sampler """basic""" +768 87 evaluator """rankbased""" +768 88 dataset """kinships""" +768 88 model """transd""" +768 88 loss """nssa""" +768 88 regularizer """no""" +768 88 optimizer """adam""" +768 88 training_loop """owa""" +768 88 negative_sampler """basic""" +768 88 evaluator """rankbased""" +768 89 dataset """kinships""" +768 89 model """transd""" +768 89 loss """nssa""" +768 89 regularizer """no""" +768 89 optimizer """adam""" +768 89 training_loop """owa""" +768 89 negative_sampler """basic""" +768 89 evaluator """rankbased""" +768 90 dataset """kinships""" +768 90 model """transd""" +768 90 loss """nssa""" +768 90 regularizer """no""" +768 90 optimizer """adam""" +768 90 training_loop """owa""" +768 90 negative_sampler """basic""" +768 90 evaluator """rankbased""" +768 91 dataset """kinships""" +768 91 model """transd""" +768 91 loss """nssa""" +768 91 regularizer """no""" +768 91 optimizer """adam""" +768 91 training_loop """owa""" +768 91 negative_sampler """basic""" +768 91 evaluator """rankbased""" +768 92 dataset """kinships""" +768 92 model """transd""" +768 92 loss """nssa""" +768 92 regularizer """no""" +768 92 optimizer """adam""" +768 92 training_loop """owa""" +768 92 negative_sampler """basic""" +768 92 evaluator """rankbased""" +768 93 dataset """kinships""" +768 93 model """transd""" +768 93 loss """nssa""" +768 93 regularizer """no""" +768 93 optimizer """adam""" +768 93 training_loop """owa""" +768 93 negative_sampler """basic""" +768 93 evaluator """rankbased""" +768 94 dataset """kinships""" +768 94 model """transd""" +768 94 loss """nssa""" +768 94 regularizer """no""" +768 94 optimizer """adam""" +768 94 training_loop """owa""" +768 94 negative_sampler """basic""" +768 94 evaluator """rankbased""" +768 95 dataset """kinships""" +768 95 model """transd""" +768 95 loss """nssa""" +768 95 regularizer """no""" +768 95 optimizer """adam""" +768 95 training_loop """owa""" +768 95 negative_sampler """basic""" +768 95 evaluator """rankbased""" +768 96 dataset """kinships""" +768 96 model """transd""" +768 96 loss """nssa""" +768 96 regularizer """no""" +768 96 optimizer """adam""" +768 96 training_loop """owa""" +768 96 negative_sampler """basic""" +768 96 evaluator """rankbased""" +768 97 dataset """kinships""" +768 97 model """transd""" +768 97 loss """nssa""" +768 97 regularizer """no""" +768 97 optimizer """adam""" +768 97 training_loop """owa""" +768 97 negative_sampler """basic""" +768 97 evaluator """rankbased""" +768 98 dataset """kinships""" +768 98 model """transd""" +768 98 loss """nssa""" +768 98 regularizer """no""" +768 98 optimizer """adam""" +768 98 training_loop """owa""" +768 98 negative_sampler """basic""" +768 98 evaluator """rankbased""" +768 99 dataset """kinships""" +768 99 model """transd""" +768 99 loss """nssa""" +768 99 regularizer """no""" +768 99 optimizer """adam""" +768 99 training_loop """owa""" +768 99 negative_sampler """basic""" +768 99 evaluator """rankbased""" +768 100 dataset """kinships""" +768 100 model """transd""" +768 100 loss """nssa""" +768 100 regularizer """no""" +768 100 optimizer """adam""" +768 100 training_loop """owa""" +768 100 negative_sampler """basic""" +768 100 evaluator """rankbased""" +769 1 model.embedding_dim 2.0 +769 1 model.relation_dim 2.0 +769 1 loss.margin 2.5531031750862163 +769 1 optimizer.lr 0.001137032604807849 +769 1 negative_sampler.num_negs_per_pos 7.0 +769 1 training.batch_size 0.0 +769 2 model.embedding_dim 1.0 +769 2 model.relation_dim 1.0 +769 2 loss.margin 0.732154654397835 +769 2 optimizer.lr 0.0011463659686005926 +769 2 negative_sampler.num_negs_per_pos 26.0 +769 2 training.batch_size 0.0 +769 3 model.embedding_dim 2.0 +769 3 model.relation_dim 1.0 +769 3 loss.margin 3.177894045469525 +769 3 optimizer.lr 0.002753211522023224 +769 3 negative_sampler.num_negs_per_pos 83.0 +769 3 training.batch_size 1.0 +769 4 model.embedding_dim 2.0 +769 4 model.relation_dim 2.0 +769 4 loss.margin 4.099359363630253 +769 4 optimizer.lr 0.002503814158804663 +769 4 negative_sampler.num_negs_per_pos 75.0 +769 4 training.batch_size 1.0 +769 5 model.embedding_dim 2.0 +769 5 model.relation_dim 0.0 +769 5 loss.margin 4.199475352958118 +769 5 optimizer.lr 0.06314433965106128 +769 5 negative_sampler.num_negs_per_pos 17.0 +769 5 training.batch_size 1.0 +769 6 model.embedding_dim 0.0 +769 6 model.relation_dim 0.0 +769 6 loss.margin 8.706902769865366 +769 6 optimizer.lr 0.07260578938820048 +769 6 negative_sampler.num_negs_per_pos 21.0 +769 6 training.batch_size 0.0 +769 7 model.embedding_dim 0.0 +769 7 model.relation_dim 0.0 +769 7 loss.margin 5.814963486396514 +769 7 optimizer.lr 0.007462686539898794 +769 7 negative_sampler.num_negs_per_pos 65.0 +769 7 training.batch_size 0.0 +769 8 model.embedding_dim 0.0 +769 8 model.relation_dim 2.0 +769 8 loss.margin 5.801475743100356 +769 8 optimizer.lr 0.020479071488426093 +769 8 negative_sampler.num_negs_per_pos 56.0 +769 8 training.batch_size 2.0 +769 9 model.embedding_dim 1.0 +769 9 model.relation_dim 2.0 +769 9 loss.margin 7.8561725811381145 +769 9 optimizer.lr 0.013087558220671069 +769 9 negative_sampler.num_negs_per_pos 73.0 +769 9 training.batch_size 2.0 +769 10 model.embedding_dim 1.0 +769 10 model.relation_dim 0.0 +769 10 loss.margin 9.552247962810872 +769 10 optimizer.lr 0.0033886770568531755 +769 10 negative_sampler.num_negs_per_pos 67.0 +769 10 training.batch_size 2.0 +769 11 model.embedding_dim 1.0 +769 11 model.relation_dim 0.0 +769 11 loss.margin 2.0234469021004715 +769 11 optimizer.lr 0.016462598798770704 +769 11 negative_sampler.num_negs_per_pos 64.0 +769 11 training.batch_size 0.0 +769 12 model.embedding_dim 0.0 +769 12 model.relation_dim 0.0 +769 12 loss.margin 4.109675899017777 +769 12 optimizer.lr 0.007146853147777094 +769 12 negative_sampler.num_negs_per_pos 53.0 +769 12 training.batch_size 1.0 +769 13 model.embedding_dim 1.0 +769 13 model.relation_dim 1.0 +769 13 loss.margin 8.168547838456755 +769 13 optimizer.lr 0.0030527825082863245 +769 13 negative_sampler.num_negs_per_pos 76.0 +769 13 training.batch_size 0.0 +769 14 model.embedding_dim 1.0 +769 14 model.relation_dim 0.0 +769 14 loss.margin 1.6733398555420673 +769 14 optimizer.lr 0.0015039326818714965 +769 14 negative_sampler.num_negs_per_pos 59.0 +769 14 training.batch_size 1.0 +769 15 model.embedding_dim 2.0 +769 15 model.relation_dim 0.0 +769 15 loss.margin 3.364962631226262 +769 15 optimizer.lr 0.0011657310808858878 +769 15 negative_sampler.num_negs_per_pos 18.0 +769 15 training.batch_size 1.0 +769 16 model.embedding_dim 0.0 +769 16 model.relation_dim 0.0 +769 16 loss.margin 1.6581982606686545 +769 16 optimizer.lr 0.0011195978635335584 +769 16 negative_sampler.num_negs_per_pos 70.0 +769 16 training.batch_size 0.0 +769 17 model.embedding_dim 0.0 +769 17 model.relation_dim 2.0 +769 17 loss.margin 1.8569861245730421 +769 17 optimizer.lr 0.005753999574489474 +769 17 negative_sampler.num_negs_per_pos 70.0 +769 17 training.batch_size 1.0 +769 18 model.embedding_dim 1.0 +769 18 model.relation_dim 0.0 +769 18 loss.margin 1.0817350468824227 +769 18 optimizer.lr 0.00633688894094597 +769 18 negative_sampler.num_negs_per_pos 14.0 +769 18 training.batch_size 0.0 +769 19 model.embedding_dim 1.0 +769 19 model.relation_dim 1.0 +769 19 loss.margin 5.644592573657433 +769 19 optimizer.lr 0.015696146795452122 +769 19 negative_sampler.num_negs_per_pos 36.0 +769 19 training.batch_size 1.0 +769 20 model.embedding_dim 1.0 +769 20 model.relation_dim 1.0 +769 20 loss.margin 2.0334389547403418 +769 20 optimizer.lr 0.09312085768855216 +769 20 negative_sampler.num_negs_per_pos 35.0 +769 20 training.batch_size 2.0 +769 21 model.embedding_dim 1.0 +769 21 model.relation_dim 0.0 +769 21 loss.margin 7.09675253631311 +769 21 optimizer.lr 0.0025044733674030885 +769 21 negative_sampler.num_negs_per_pos 67.0 +769 21 training.batch_size 1.0 +769 22 model.embedding_dim 2.0 +769 22 model.relation_dim 0.0 +769 22 loss.margin 9.568879321434139 +769 22 optimizer.lr 0.02921764925070724 +769 22 negative_sampler.num_negs_per_pos 36.0 +769 22 training.batch_size 0.0 +769 23 model.embedding_dim 2.0 +769 23 model.relation_dim 1.0 +769 23 loss.margin 1.3410058038062762 +769 23 optimizer.lr 0.0022736591895690275 +769 23 negative_sampler.num_negs_per_pos 68.0 +769 23 training.batch_size 1.0 +769 24 model.embedding_dim 2.0 +769 24 model.relation_dim 0.0 +769 24 loss.margin 9.990203723556512 +769 24 optimizer.lr 0.012439052515742467 +769 24 negative_sampler.num_negs_per_pos 62.0 +769 24 training.batch_size 1.0 +769 25 model.embedding_dim 2.0 +769 25 model.relation_dim 0.0 +769 25 loss.margin 7.027389844505425 +769 25 optimizer.lr 0.0016892430923670268 +769 25 negative_sampler.num_negs_per_pos 54.0 +769 25 training.batch_size 0.0 +769 26 model.embedding_dim 1.0 +769 26 model.relation_dim 0.0 +769 26 loss.margin 9.634579964600977 +769 26 optimizer.lr 0.00685130367896253 +769 26 negative_sampler.num_negs_per_pos 95.0 +769 26 training.batch_size 0.0 +769 27 model.embedding_dim 2.0 +769 27 model.relation_dim 1.0 +769 27 loss.margin 6.461984120255138 +769 27 optimizer.lr 0.031145575567620275 +769 27 negative_sampler.num_negs_per_pos 35.0 +769 27 training.batch_size 1.0 +769 28 model.embedding_dim 2.0 +769 28 model.relation_dim 1.0 +769 28 loss.margin 8.554846927086375 +769 28 optimizer.lr 0.03632159765853973 +769 28 negative_sampler.num_negs_per_pos 9.0 +769 28 training.batch_size 1.0 +769 29 model.embedding_dim 2.0 +769 29 model.relation_dim 2.0 +769 29 loss.margin 4.133638415123981 +769 29 optimizer.lr 0.002034589587965593 +769 29 negative_sampler.num_negs_per_pos 36.0 +769 29 training.batch_size 0.0 +769 30 model.embedding_dim 0.0 +769 30 model.relation_dim 1.0 +769 30 loss.margin 0.8547745381638333 +769 30 optimizer.lr 0.01329362190194058 +769 30 negative_sampler.num_negs_per_pos 86.0 +769 30 training.batch_size 2.0 +769 31 model.embedding_dim 0.0 +769 31 model.relation_dim 2.0 +769 31 loss.margin 8.986156688235415 +769 31 optimizer.lr 0.001201379050427823 +769 31 negative_sampler.num_negs_per_pos 12.0 +769 31 training.batch_size 1.0 +769 32 model.embedding_dim 1.0 +769 32 model.relation_dim 0.0 +769 32 loss.margin 4.773263370771134 +769 32 optimizer.lr 0.05797429963025976 +769 32 negative_sampler.num_negs_per_pos 73.0 +769 32 training.batch_size 1.0 +769 33 model.embedding_dim 2.0 +769 33 model.relation_dim 2.0 +769 33 loss.margin 9.311664332236033 +769 33 optimizer.lr 0.0020383965975098615 +769 33 negative_sampler.num_negs_per_pos 68.0 +769 33 training.batch_size 1.0 +769 34 model.embedding_dim 1.0 +769 34 model.relation_dim 0.0 +769 34 loss.margin 7.066948544067121 +769 34 optimizer.lr 0.005804053479024811 +769 34 negative_sampler.num_negs_per_pos 95.0 +769 34 training.batch_size 0.0 +769 35 model.embedding_dim 0.0 +769 35 model.relation_dim 1.0 +769 35 loss.margin 7.099304070927865 +769 35 optimizer.lr 0.0010489349529074034 +769 35 negative_sampler.num_negs_per_pos 86.0 +769 35 training.batch_size 1.0 +769 36 model.embedding_dim 0.0 +769 36 model.relation_dim 2.0 +769 36 loss.margin 4.636253093807332 +769 36 optimizer.lr 0.0014183875936459552 +769 36 negative_sampler.num_negs_per_pos 3.0 +769 36 training.batch_size 0.0 +769 37 model.embedding_dim 1.0 +769 37 model.relation_dim 1.0 +769 37 loss.margin 2.5874900406665997 +769 37 optimizer.lr 0.0020019757982066015 +769 37 negative_sampler.num_negs_per_pos 26.0 +769 37 training.batch_size 2.0 +769 38 model.embedding_dim 1.0 +769 38 model.relation_dim 1.0 +769 38 loss.margin 2.550678629082921 +769 38 optimizer.lr 0.0063712110603505595 +769 38 negative_sampler.num_negs_per_pos 8.0 +769 38 training.batch_size 1.0 +769 39 model.embedding_dim 1.0 +769 39 model.relation_dim 0.0 +769 39 loss.margin 3.2392688959751106 +769 39 optimizer.lr 0.006409583865374007 +769 39 negative_sampler.num_negs_per_pos 37.0 +769 39 training.batch_size 2.0 +769 40 model.embedding_dim 0.0 +769 40 model.relation_dim 2.0 +769 40 loss.margin 8.73976873787815 +769 40 optimizer.lr 0.006480378355689994 +769 40 negative_sampler.num_negs_per_pos 11.0 +769 40 training.batch_size 0.0 +769 41 model.embedding_dim 1.0 +769 41 model.relation_dim 0.0 +769 41 loss.margin 7.504207711973481 +769 41 optimizer.lr 0.010630486899040094 +769 41 negative_sampler.num_negs_per_pos 42.0 +769 41 training.batch_size 2.0 +769 42 model.embedding_dim 1.0 +769 42 model.relation_dim 0.0 +769 42 loss.margin 4.284085249893313 +769 42 optimizer.lr 0.07885404546984387 +769 42 negative_sampler.num_negs_per_pos 50.0 +769 42 training.batch_size 1.0 +769 43 model.embedding_dim 1.0 +769 43 model.relation_dim 0.0 +769 43 loss.margin 6.744358640027618 +769 43 optimizer.lr 0.001277892604549061 +769 43 negative_sampler.num_negs_per_pos 92.0 +769 43 training.batch_size 2.0 +769 44 model.embedding_dim 0.0 +769 44 model.relation_dim 0.0 +769 44 loss.margin 8.52640506080156 +769 44 optimizer.lr 0.036889185379732306 +769 44 negative_sampler.num_negs_per_pos 84.0 +769 44 training.batch_size 1.0 +769 45 model.embedding_dim 1.0 +769 45 model.relation_dim 0.0 +769 45 loss.margin 6.88744015832889 +769 45 optimizer.lr 0.0024893349085316004 +769 45 negative_sampler.num_negs_per_pos 85.0 +769 45 training.batch_size 1.0 +769 46 model.embedding_dim 2.0 +769 46 model.relation_dim 2.0 +769 46 loss.margin 7.825215173535988 +769 46 optimizer.lr 0.006131040811005288 +769 46 negative_sampler.num_negs_per_pos 12.0 +769 46 training.batch_size 2.0 +769 47 model.embedding_dim 2.0 +769 47 model.relation_dim 2.0 +769 47 loss.margin 2.6412710255858634 +769 47 optimizer.lr 0.008957923732057674 +769 47 negative_sampler.num_negs_per_pos 24.0 +769 47 training.batch_size 2.0 +769 48 model.embedding_dim 1.0 +769 48 model.relation_dim 2.0 +769 48 loss.margin 2.8306153645681773 +769 48 optimizer.lr 0.0065366883845055955 +769 48 negative_sampler.num_negs_per_pos 32.0 +769 48 training.batch_size 1.0 +769 49 model.embedding_dim 2.0 +769 49 model.relation_dim 0.0 +769 49 loss.margin 7.405899939605474 +769 49 optimizer.lr 0.00834618096189366 +769 49 negative_sampler.num_negs_per_pos 64.0 +769 49 training.batch_size 2.0 +769 50 model.embedding_dim 0.0 +769 50 model.relation_dim 1.0 +769 50 loss.margin 8.43974673656755 +769 50 optimizer.lr 0.029256041057422057 +769 50 negative_sampler.num_negs_per_pos 35.0 +769 50 training.batch_size 2.0 +769 51 model.embedding_dim 0.0 +769 51 model.relation_dim 0.0 +769 51 loss.margin 4.634851804470142 +769 51 optimizer.lr 0.027091386074052256 +769 51 negative_sampler.num_negs_per_pos 77.0 +769 51 training.batch_size 0.0 +769 1 dataset """wn18rr""" +769 1 model """transd""" +769 1 loss """marginranking""" +769 1 regularizer """no""" +769 1 optimizer """adam""" +769 1 training_loop """owa""" +769 1 negative_sampler """basic""" +769 1 evaluator """rankbased""" +769 2 dataset """wn18rr""" +769 2 model """transd""" +769 2 loss """marginranking""" +769 2 regularizer """no""" +769 2 optimizer """adam""" +769 2 training_loop """owa""" +769 2 negative_sampler """basic""" +769 2 evaluator """rankbased""" +769 3 dataset """wn18rr""" +769 3 model """transd""" +769 3 loss """marginranking""" +769 3 regularizer """no""" +769 3 optimizer """adam""" +769 3 training_loop """owa""" +769 3 negative_sampler """basic""" +769 3 evaluator """rankbased""" +769 4 dataset """wn18rr""" +769 4 model """transd""" +769 4 loss """marginranking""" +769 4 regularizer """no""" +769 4 optimizer """adam""" +769 4 training_loop """owa""" +769 4 negative_sampler """basic""" +769 4 evaluator """rankbased""" +769 5 dataset """wn18rr""" +769 5 model """transd""" +769 5 loss """marginranking""" +769 5 regularizer """no""" +769 5 optimizer """adam""" +769 5 training_loop """owa""" +769 5 negative_sampler """basic""" +769 5 evaluator """rankbased""" +769 6 dataset """wn18rr""" +769 6 model """transd""" +769 6 loss """marginranking""" +769 6 regularizer """no""" +769 6 optimizer """adam""" +769 6 training_loop """owa""" +769 6 negative_sampler """basic""" +769 6 evaluator """rankbased""" +769 7 dataset """wn18rr""" +769 7 model """transd""" +769 7 loss """marginranking""" +769 7 regularizer """no""" +769 7 optimizer """adam""" +769 7 training_loop """owa""" +769 7 negative_sampler """basic""" +769 7 evaluator """rankbased""" +769 8 dataset """wn18rr""" +769 8 model """transd""" +769 8 loss """marginranking""" +769 8 regularizer """no""" +769 8 optimizer """adam""" +769 8 training_loop """owa""" +769 8 negative_sampler """basic""" +769 8 evaluator """rankbased""" +769 9 dataset """wn18rr""" +769 9 model """transd""" +769 9 loss """marginranking""" +769 9 regularizer """no""" +769 9 optimizer """adam""" +769 9 training_loop """owa""" +769 9 negative_sampler """basic""" +769 9 evaluator """rankbased""" +769 10 dataset """wn18rr""" +769 10 model """transd""" +769 10 loss """marginranking""" +769 10 regularizer """no""" +769 10 optimizer """adam""" +769 10 training_loop """owa""" +769 10 negative_sampler """basic""" +769 10 evaluator """rankbased""" +769 11 dataset """wn18rr""" +769 11 model """transd""" +769 11 loss """marginranking""" +769 11 regularizer """no""" +769 11 optimizer """adam""" +769 11 training_loop """owa""" +769 11 negative_sampler """basic""" +769 11 evaluator """rankbased""" +769 12 dataset """wn18rr""" +769 12 model """transd""" +769 12 loss """marginranking""" +769 12 regularizer """no""" +769 12 optimizer """adam""" +769 12 training_loop """owa""" +769 12 negative_sampler """basic""" +769 12 evaluator """rankbased""" +769 13 dataset """wn18rr""" +769 13 model """transd""" +769 13 loss """marginranking""" +769 13 regularizer """no""" +769 13 optimizer """adam""" +769 13 training_loop """owa""" +769 13 negative_sampler """basic""" +769 13 evaluator """rankbased""" +769 14 dataset """wn18rr""" +769 14 model """transd""" +769 14 loss """marginranking""" +769 14 regularizer """no""" +769 14 optimizer """adam""" +769 14 training_loop """owa""" +769 14 negative_sampler """basic""" +769 14 evaluator """rankbased""" +769 15 dataset """wn18rr""" +769 15 model """transd""" +769 15 loss """marginranking""" +769 15 regularizer """no""" +769 15 optimizer """adam""" +769 15 training_loop """owa""" +769 15 negative_sampler """basic""" +769 15 evaluator """rankbased""" +769 16 dataset """wn18rr""" +769 16 model """transd""" +769 16 loss """marginranking""" +769 16 regularizer """no""" +769 16 optimizer """adam""" +769 16 training_loop """owa""" +769 16 negative_sampler """basic""" +769 16 evaluator """rankbased""" +769 17 dataset """wn18rr""" +769 17 model """transd""" +769 17 loss """marginranking""" +769 17 regularizer """no""" +769 17 optimizer """adam""" +769 17 training_loop """owa""" +769 17 negative_sampler """basic""" +769 17 evaluator """rankbased""" +769 18 dataset """wn18rr""" +769 18 model """transd""" +769 18 loss """marginranking""" +769 18 regularizer """no""" +769 18 optimizer """adam""" +769 18 training_loop """owa""" +769 18 negative_sampler """basic""" +769 18 evaluator """rankbased""" +769 19 dataset """wn18rr""" +769 19 model """transd""" +769 19 loss """marginranking""" +769 19 regularizer """no""" +769 19 optimizer """adam""" +769 19 training_loop """owa""" +769 19 negative_sampler """basic""" +769 19 evaluator """rankbased""" +769 20 dataset """wn18rr""" +769 20 model """transd""" +769 20 loss """marginranking""" +769 20 regularizer """no""" +769 20 optimizer """adam""" +769 20 training_loop """owa""" +769 20 negative_sampler """basic""" +769 20 evaluator """rankbased""" +769 21 dataset """wn18rr""" +769 21 model """transd""" +769 21 loss """marginranking""" +769 21 regularizer """no""" +769 21 optimizer """adam""" +769 21 training_loop """owa""" +769 21 negative_sampler """basic""" +769 21 evaluator """rankbased""" +769 22 dataset """wn18rr""" +769 22 model """transd""" +769 22 loss """marginranking""" +769 22 regularizer """no""" +769 22 optimizer """adam""" +769 22 training_loop """owa""" +769 22 negative_sampler """basic""" +769 22 evaluator """rankbased""" +769 23 dataset """wn18rr""" +769 23 model """transd""" +769 23 loss """marginranking""" +769 23 regularizer """no""" +769 23 optimizer """adam""" +769 23 training_loop """owa""" +769 23 negative_sampler """basic""" +769 23 evaluator """rankbased""" +769 24 dataset """wn18rr""" +769 24 model """transd""" +769 24 loss """marginranking""" +769 24 regularizer """no""" +769 24 optimizer """adam""" +769 24 training_loop """owa""" +769 24 negative_sampler """basic""" +769 24 evaluator """rankbased""" +769 25 dataset """wn18rr""" +769 25 model """transd""" +769 25 loss """marginranking""" +769 25 regularizer """no""" +769 25 optimizer """adam""" +769 25 training_loop """owa""" +769 25 negative_sampler """basic""" +769 25 evaluator """rankbased""" +769 26 dataset """wn18rr""" +769 26 model """transd""" +769 26 loss """marginranking""" +769 26 regularizer """no""" +769 26 optimizer """adam""" +769 26 training_loop """owa""" +769 26 negative_sampler """basic""" +769 26 evaluator """rankbased""" +769 27 dataset """wn18rr""" +769 27 model """transd""" +769 27 loss """marginranking""" +769 27 regularizer """no""" +769 27 optimizer """adam""" +769 27 training_loop """owa""" +769 27 negative_sampler """basic""" +769 27 evaluator """rankbased""" +769 28 dataset """wn18rr""" +769 28 model """transd""" +769 28 loss """marginranking""" +769 28 regularizer """no""" +769 28 optimizer """adam""" +769 28 training_loop """owa""" +769 28 negative_sampler """basic""" +769 28 evaluator """rankbased""" +769 29 dataset """wn18rr""" +769 29 model """transd""" +769 29 loss """marginranking""" +769 29 regularizer """no""" +769 29 optimizer """adam""" +769 29 training_loop """owa""" +769 29 negative_sampler """basic""" +769 29 evaluator """rankbased""" +769 30 dataset """wn18rr""" +769 30 model """transd""" +769 30 loss """marginranking""" +769 30 regularizer """no""" +769 30 optimizer """adam""" +769 30 training_loop """owa""" +769 30 negative_sampler """basic""" +769 30 evaluator """rankbased""" +769 31 dataset """wn18rr""" +769 31 model """transd""" +769 31 loss """marginranking""" +769 31 regularizer """no""" +769 31 optimizer """adam""" +769 31 training_loop """owa""" +769 31 negative_sampler """basic""" +769 31 evaluator """rankbased""" +769 32 dataset """wn18rr""" +769 32 model """transd""" +769 32 loss """marginranking""" +769 32 regularizer """no""" +769 32 optimizer """adam""" +769 32 training_loop """owa""" +769 32 negative_sampler """basic""" +769 32 evaluator """rankbased""" +769 33 dataset """wn18rr""" +769 33 model """transd""" +769 33 loss """marginranking""" +769 33 regularizer """no""" +769 33 optimizer """adam""" +769 33 training_loop """owa""" +769 33 negative_sampler """basic""" +769 33 evaluator """rankbased""" +769 34 dataset """wn18rr""" +769 34 model """transd""" +769 34 loss """marginranking""" +769 34 regularizer """no""" +769 34 optimizer """adam""" +769 34 training_loop """owa""" +769 34 negative_sampler """basic""" +769 34 evaluator """rankbased""" +769 35 dataset """wn18rr""" +769 35 model """transd""" +769 35 loss """marginranking""" +769 35 regularizer """no""" +769 35 optimizer """adam""" +769 35 training_loop """owa""" +769 35 negative_sampler """basic""" +769 35 evaluator """rankbased""" +769 36 dataset """wn18rr""" +769 36 model """transd""" +769 36 loss """marginranking""" +769 36 regularizer """no""" +769 36 optimizer """adam""" +769 36 training_loop """owa""" +769 36 negative_sampler """basic""" +769 36 evaluator """rankbased""" +769 37 dataset """wn18rr""" +769 37 model """transd""" +769 37 loss """marginranking""" +769 37 regularizer """no""" +769 37 optimizer """adam""" +769 37 training_loop """owa""" +769 37 negative_sampler """basic""" +769 37 evaluator """rankbased""" +769 38 dataset """wn18rr""" +769 38 model """transd""" +769 38 loss """marginranking""" +769 38 regularizer """no""" +769 38 optimizer """adam""" +769 38 training_loop """owa""" +769 38 negative_sampler """basic""" +769 38 evaluator """rankbased""" +769 39 dataset """wn18rr""" +769 39 model """transd""" +769 39 loss """marginranking""" +769 39 regularizer """no""" +769 39 optimizer """adam""" +769 39 training_loop """owa""" +769 39 negative_sampler """basic""" +769 39 evaluator """rankbased""" +769 40 dataset """wn18rr""" +769 40 model """transd""" +769 40 loss """marginranking""" +769 40 regularizer """no""" +769 40 optimizer """adam""" +769 40 training_loop """owa""" +769 40 negative_sampler """basic""" +769 40 evaluator """rankbased""" +769 41 dataset """wn18rr""" +769 41 model """transd""" +769 41 loss """marginranking""" +769 41 regularizer """no""" +769 41 optimizer """adam""" +769 41 training_loop """owa""" +769 41 negative_sampler """basic""" +769 41 evaluator """rankbased""" +769 42 dataset """wn18rr""" +769 42 model """transd""" +769 42 loss """marginranking""" +769 42 regularizer """no""" +769 42 optimizer """adam""" +769 42 training_loop """owa""" +769 42 negative_sampler """basic""" +769 42 evaluator """rankbased""" +769 43 dataset """wn18rr""" +769 43 model """transd""" +769 43 loss """marginranking""" +769 43 regularizer """no""" +769 43 optimizer """adam""" +769 43 training_loop """owa""" +769 43 negative_sampler """basic""" +769 43 evaluator """rankbased""" +769 44 dataset """wn18rr""" +769 44 model """transd""" +769 44 loss """marginranking""" +769 44 regularizer """no""" +769 44 optimizer """adam""" +769 44 training_loop """owa""" +769 44 negative_sampler """basic""" +769 44 evaluator """rankbased""" +769 45 dataset """wn18rr""" +769 45 model """transd""" +769 45 loss """marginranking""" +769 45 regularizer """no""" +769 45 optimizer """adam""" +769 45 training_loop """owa""" +769 45 negative_sampler """basic""" +769 45 evaluator """rankbased""" +769 46 dataset """wn18rr""" +769 46 model """transd""" +769 46 loss """marginranking""" +769 46 regularizer """no""" +769 46 optimizer """adam""" +769 46 training_loop """owa""" +769 46 negative_sampler """basic""" +769 46 evaluator """rankbased""" +769 47 dataset """wn18rr""" +769 47 model """transd""" +769 47 loss """marginranking""" +769 47 regularizer """no""" +769 47 optimizer """adam""" +769 47 training_loop """owa""" +769 47 negative_sampler """basic""" +769 47 evaluator """rankbased""" +769 48 dataset """wn18rr""" +769 48 model """transd""" +769 48 loss """marginranking""" +769 48 regularizer """no""" +769 48 optimizer """adam""" +769 48 training_loop """owa""" +769 48 negative_sampler """basic""" +769 48 evaluator """rankbased""" +769 49 dataset """wn18rr""" +769 49 model """transd""" +769 49 loss """marginranking""" +769 49 regularizer """no""" +769 49 optimizer """adam""" +769 49 training_loop """owa""" +769 49 negative_sampler """basic""" +769 49 evaluator """rankbased""" +769 50 dataset """wn18rr""" +769 50 model """transd""" +769 50 loss """marginranking""" +769 50 regularizer """no""" +769 50 optimizer """adam""" +769 50 training_loop """owa""" +769 50 negative_sampler """basic""" +769 50 evaluator """rankbased""" +769 51 dataset """wn18rr""" +769 51 model """transd""" +769 51 loss """marginranking""" +769 51 regularizer """no""" +769 51 optimizer """adam""" +769 51 training_loop """owa""" +769 51 negative_sampler """basic""" +769 51 evaluator """rankbased""" +770 1 model.embedding_dim 0.0 +770 1 model.relation_dim 2.0 +770 1 loss.margin 4.462884358252522 +770 1 optimizer.lr 0.06487210765252686 +770 1 negative_sampler.num_negs_per_pos 11.0 +770 1 training.batch_size 1.0 +770 2 model.embedding_dim 2.0 +770 2 model.relation_dim 0.0 +770 2 loss.margin 9.537188572471582 +770 2 optimizer.lr 0.001413798378064593 +770 2 negative_sampler.num_negs_per_pos 88.0 +770 2 training.batch_size 0.0 +770 3 model.embedding_dim 2.0 +770 3 model.relation_dim 1.0 +770 3 loss.margin 9.665391503105669 +770 3 optimizer.lr 0.001738849637713911 +770 3 negative_sampler.num_negs_per_pos 4.0 +770 3 training.batch_size 2.0 +770 4 model.embedding_dim 2.0 +770 4 model.relation_dim 1.0 +770 4 loss.margin 4.824082172063877 +770 4 optimizer.lr 0.0817972470941259 +770 4 negative_sampler.num_negs_per_pos 0.0 +770 4 training.batch_size 0.0 +770 5 model.embedding_dim 0.0 +770 5 model.relation_dim 1.0 +770 5 loss.margin 4.2230462414220655 +770 5 optimizer.lr 0.0014581635328521532 +770 5 negative_sampler.num_negs_per_pos 47.0 +770 5 training.batch_size 2.0 +770 6 model.embedding_dim 1.0 +770 6 model.relation_dim 2.0 +770 6 loss.margin 6.736017800739766 +770 6 optimizer.lr 0.02976187523180481 +770 6 negative_sampler.num_negs_per_pos 13.0 +770 6 training.batch_size 2.0 +770 7 model.embedding_dim 2.0 +770 7 model.relation_dim 1.0 +770 7 loss.margin 8.54285787842925 +770 7 optimizer.lr 0.002756979984480509 +770 7 negative_sampler.num_negs_per_pos 78.0 +770 7 training.batch_size 2.0 +770 8 model.embedding_dim 0.0 +770 8 model.relation_dim 1.0 +770 8 loss.margin 6.185646985146428 +770 8 optimizer.lr 0.01882963244468696 +770 8 negative_sampler.num_negs_per_pos 94.0 +770 8 training.batch_size 1.0 +770 9 model.embedding_dim 1.0 +770 9 model.relation_dim 1.0 +770 9 loss.margin 5.4685716411358944 +770 9 optimizer.lr 0.05828571546845168 +770 9 negative_sampler.num_negs_per_pos 71.0 +770 9 training.batch_size 2.0 +770 10 model.embedding_dim 2.0 +770 10 model.relation_dim 0.0 +770 10 loss.margin 3.5092072430211534 +770 10 optimizer.lr 0.026571136486646933 +770 10 negative_sampler.num_negs_per_pos 45.0 +770 10 training.batch_size 2.0 +770 11 model.embedding_dim 1.0 +770 11 model.relation_dim 1.0 +770 11 loss.margin 7.164315320701899 +770 11 optimizer.lr 0.0024363919697578 +770 11 negative_sampler.num_negs_per_pos 7.0 +770 11 training.batch_size 1.0 +770 12 model.embedding_dim 2.0 +770 12 model.relation_dim 2.0 +770 12 loss.margin 3.4136222600850714 +770 12 optimizer.lr 0.0364663250373309 +770 12 negative_sampler.num_negs_per_pos 19.0 +770 12 training.batch_size 0.0 +770 13 model.embedding_dim 1.0 +770 13 model.relation_dim 1.0 +770 13 loss.margin 5.480630373139842 +770 13 optimizer.lr 0.0011141911819373652 +770 13 negative_sampler.num_negs_per_pos 59.0 +770 13 training.batch_size 2.0 +770 14 model.embedding_dim 2.0 +770 14 model.relation_dim 1.0 +770 14 loss.margin 6.618712636755118 +770 14 optimizer.lr 0.0011309584260355166 +770 14 negative_sampler.num_negs_per_pos 1.0 +770 14 training.batch_size 0.0 +770 15 model.embedding_dim 0.0 +770 15 model.relation_dim 0.0 +770 15 loss.margin 6.611042658824389 +770 15 optimizer.lr 0.007564497337595613 +770 15 negative_sampler.num_negs_per_pos 49.0 +770 15 training.batch_size 0.0 +770 16 model.embedding_dim 1.0 +770 16 model.relation_dim 2.0 +770 16 loss.margin 8.130916797133501 +770 16 optimizer.lr 0.0055723858887730526 +770 16 negative_sampler.num_negs_per_pos 46.0 +770 16 training.batch_size 2.0 +770 17 model.embedding_dim 2.0 +770 17 model.relation_dim 0.0 +770 17 loss.margin 8.496279823175874 +770 17 optimizer.lr 0.013097807183990413 +770 17 negative_sampler.num_negs_per_pos 33.0 +770 17 training.batch_size 1.0 +770 18 model.embedding_dim 1.0 +770 18 model.relation_dim 0.0 +770 18 loss.margin 0.9795411699514283 +770 18 optimizer.lr 0.0101525440764641 +770 18 negative_sampler.num_negs_per_pos 23.0 +770 18 training.batch_size 1.0 +770 19 model.embedding_dim 2.0 +770 19 model.relation_dim 2.0 +770 19 loss.margin 6.029624071865077 +770 19 optimizer.lr 0.050910766540214175 +770 19 negative_sampler.num_negs_per_pos 97.0 +770 19 training.batch_size 1.0 +770 20 model.embedding_dim 0.0 +770 20 model.relation_dim 1.0 +770 20 loss.margin 0.780021817206271 +770 20 optimizer.lr 0.007729949248971433 +770 20 negative_sampler.num_negs_per_pos 61.0 +770 20 training.batch_size 2.0 +770 21 model.embedding_dim 1.0 +770 21 model.relation_dim 2.0 +770 21 loss.margin 4.748907019187054 +770 21 optimizer.lr 0.05440729064891454 +770 21 negative_sampler.num_negs_per_pos 56.0 +770 21 training.batch_size 0.0 +770 22 model.embedding_dim 1.0 +770 22 model.relation_dim 1.0 +770 22 loss.margin 5.8145909058965195 +770 22 optimizer.lr 0.0020924149665387994 +770 22 negative_sampler.num_negs_per_pos 5.0 +770 22 training.batch_size 1.0 +770 23 model.embedding_dim 2.0 +770 23 model.relation_dim 2.0 +770 23 loss.margin 9.545223750457264 +770 23 optimizer.lr 0.003445680543325828 +770 23 negative_sampler.num_negs_per_pos 86.0 +770 23 training.batch_size 0.0 +770 24 model.embedding_dim 0.0 +770 24 model.relation_dim 1.0 +770 24 loss.margin 4.375205125384738 +770 24 optimizer.lr 0.04768943708655236 +770 24 negative_sampler.num_negs_per_pos 89.0 +770 24 training.batch_size 2.0 +770 25 model.embedding_dim 1.0 +770 25 model.relation_dim 2.0 +770 25 loss.margin 8.551992365197343 +770 25 optimizer.lr 0.0010398376809507865 +770 25 negative_sampler.num_negs_per_pos 2.0 +770 25 training.batch_size 1.0 +770 26 model.embedding_dim 2.0 +770 26 model.relation_dim 0.0 +770 26 loss.margin 5.834127262610166 +770 26 optimizer.lr 0.0035427236539080655 +770 26 negative_sampler.num_negs_per_pos 58.0 +770 26 training.batch_size 0.0 +770 27 model.embedding_dim 0.0 +770 27 model.relation_dim 2.0 +770 27 loss.margin 9.915574100563104 +770 27 optimizer.lr 0.06044337085568791 +770 27 negative_sampler.num_negs_per_pos 48.0 +770 27 training.batch_size 1.0 +770 28 model.embedding_dim 2.0 +770 28 model.relation_dim 1.0 +770 28 loss.margin 7.6680567702963 +770 28 optimizer.lr 0.006816340936765948 +770 28 negative_sampler.num_negs_per_pos 10.0 +770 28 training.batch_size 0.0 +770 29 model.embedding_dim 1.0 +770 29 model.relation_dim 2.0 +770 29 loss.margin 6.37280164128483 +770 29 optimizer.lr 0.031926824499028096 +770 29 negative_sampler.num_negs_per_pos 35.0 +770 29 training.batch_size 2.0 +770 30 model.embedding_dim 1.0 +770 30 model.relation_dim 0.0 +770 30 loss.margin 8.942109714857931 +770 30 optimizer.lr 0.008911418212695631 +770 30 negative_sampler.num_negs_per_pos 25.0 +770 30 training.batch_size 0.0 +770 31 model.embedding_dim 1.0 +770 31 model.relation_dim 1.0 +770 31 loss.margin 1.0186645344915004 +770 31 optimizer.lr 0.02894211809033973 +770 31 negative_sampler.num_negs_per_pos 1.0 +770 31 training.batch_size 2.0 +770 32 model.embedding_dim 2.0 +770 32 model.relation_dim 1.0 +770 32 loss.margin 3.6000805636965763 +770 32 optimizer.lr 0.00936060229670512 +770 32 negative_sampler.num_negs_per_pos 10.0 +770 32 training.batch_size 1.0 +770 33 model.embedding_dim 0.0 +770 33 model.relation_dim 0.0 +770 33 loss.margin 7.525327495354002 +770 33 optimizer.lr 0.05675721289439305 +770 33 negative_sampler.num_negs_per_pos 74.0 +770 33 training.batch_size 0.0 +770 34 model.embedding_dim 2.0 +770 34 model.relation_dim 2.0 +770 34 loss.margin 1.8501380978845319 +770 34 optimizer.lr 0.017351339791426324 +770 34 negative_sampler.num_negs_per_pos 75.0 +770 34 training.batch_size 2.0 +770 35 model.embedding_dim 2.0 +770 35 model.relation_dim 0.0 +770 35 loss.margin 9.195902773119284 +770 35 optimizer.lr 0.0018378457597150986 +770 35 negative_sampler.num_negs_per_pos 35.0 +770 35 training.batch_size 2.0 +770 36 model.embedding_dim 2.0 +770 36 model.relation_dim 2.0 +770 36 loss.margin 1.036508832677204 +770 36 optimizer.lr 0.007511917975228004 +770 36 negative_sampler.num_negs_per_pos 97.0 +770 36 training.batch_size 2.0 +770 37 model.embedding_dim 1.0 +770 37 model.relation_dim 2.0 +770 37 loss.margin 1.5179543951212444 +770 37 optimizer.lr 0.0028214578708036214 +770 37 negative_sampler.num_negs_per_pos 5.0 +770 37 training.batch_size 2.0 +770 38 model.embedding_dim 1.0 +770 38 model.relation_dim 0.0 +770 38 loss.margin 5.592789572128131 +770 38 optimizer.lr 0.06309764144254035 +770 38 negative_sampler.num_negs_per_pos 39.0 +770 38 training.batch_size 1.0 +770 39 model.embedding_dim 2.0 +770 39 model.relation_dim 0.0 +770 39 loss.margin 9.31194259830983 +770 39 optimizer.lr 0.010388568973788476 +770 39 negative_sampler.num_negs_per_pos 17.0 +770 39 training.batch_size 0.0 +770 40 model.embedding_dim 2.0 +770 40 model.relation_dim 0.0 +770 40 loss.margin 9.0386064232064 +770 40 optimizer.lr 0.05737098284986039 +770 40 negative_sampler.num_negs_per_pos 68.0 +770 40 training.batch_size 0.0 +770 41 model.embedding_dim 0.0 +770 41 model.relation_dim 0.0 +770 41 loss.margin 5.94019230950629 +770 41 optimizer.lr 0.001510759462852154 +770 41 negative_sampler.num_negs_per_pos 89.0 +770 41 training.batch_size 2.0 +770 42 model.embedding_dim 0.0 +770 42 model.relation_dim 1.0 +770 42 loss.margin 3.3605290941765187 +770 42 optimizer.lr 0.030117180650331855 +770 42 negative_sampler.num_negs_per_pos 11.0 +770 42 training.batch_size 2.0 +770 43 model.embedding_dim 1.0 +770 43 model.relation_dim 1.0 +770 43 loss.margin 4.241540144806033 +770 43 optimizer.lr 0.011495958556089585 +770 43 negative_sampler.num_negs_per_pos 46.0 +770 43 training.batch_size 1.0 +770 44 model.embedding_dim 2.0 +770 44 model.relation_dim 2.0 +770 44 loss.margin 2.5621628136304886 +770 44 optimizer.lr 0.029508874468907913 +770 44 negative_sampler.num_negs_per_pos 65.0 +770 44 training.batch_size 1.0 +770 45 model.embedding_dim 1.0 +770 45 model.relation_dim 1.0 +770 45 loss.margin 3.1133102907898453 +770 45 optimizer.lr 0.0018644591445514977 +770 45 negative_sampler.num_negs_per_pos 81.0 +770 45 training.batch_size 1.0 +770 46 model.embedding_dim 2.0 +770 46 model.relation_dim 2.0 +770 46 loss.margin 8.800534177638465 +770 46 optimizer.lr 0.0040253131958036 +770 46 negative_sampler.num_negs_per_pos 72.0 +770 46 training.batch_size 2.0 +770 47 model.embedding_dim 0.0 +770 47 model.relation_dim 1.0 +770 47 loss.margin 5.916259404809796 +770 47 optimizer.lr 0.01083192283619182 +770 47 negative_sampler.num_negs_per_pos 75.0 +770 47 training.batch_size 0.0 +770 48 model.embedding_dim 2.0 +770 48 model.relation_dim 1.0 +770 48 loss.margin 8.325487439111333 +770 48 optimizer.lr 0.04683372754009884 +770 48 negative_sampler.num_negs_per_pos 3.0 +770 48 training.batch_size 2.0 +770 49 model.embedding_dim 2.0 +770 49 model.relation_dim 1.0 +770 49 loss.margin 4.318806053016042 +770 49 optimizer.lr 0.0025321690476598833 +770 49 negative_sampler.num_negs_per_pos 99.0 +770 49 training.batch_size 0.0 +770 50 model.embedding_dim 1.0 +770 50 model.relation_dim 1.0 +770 50 loss.margin 3.4781978989953526 +770 50 optimizer.lr 0.06371847754533537 +770 50 negative_sampler.num_negs_per_pos 76.0 +770 50 training.batch_size 1.0 +770 51 model.embedding_dim 1.0 +770 51 model.relation_dim 2.0 +770 51 loss.margin 9.255921497722381 +770 51 optimizer.lr 0.02088411553367463 +770 51 negative_sampler.num_negs_per_pos 84.0 +770 51 training.batch_size 1.0 +770 52 model.embedding_dim 2.0 +770 52 model.relation_dim 2.0 +770 52 loss.margin 6.358870801139255 +770 52 optimizer.lr 0.019460186096533248 +770 52 negative_sampler.num_negs_per_pos 70.0 +770 52 training.batch_size 1.0 +770 53 model.embedding_dim 0.0 +770 53 model.relation_dim 2.0 +770 53 loss.margin 9.423247433724777 +770 53 optimizer.lr 0.0012072924932577773 +770 53 negative_sampler.num_negs_per_pos 57.0 +770 53 training.batch_size 0.0 +770 54 model.embedding_dim 0.0 +770 54 model.relation_dim 2.0 +770 54 loss.margin 6.522114335900864 +770 54 optimizer.lr 0.001491464434113654 +770 54 negative_sampler.num_negs_per_pos 3.0 +770 54 training.batch_size 2.0 +770 55 model.embedding_dim 0.0 +770 55 model.relation_dim 0.0 +770 55 loss.margin 6.674822128254516 +770 55 optimizer.lr 0.003393839564237732 +770 55 negative_sampler.num_negs_per_pos 24.0 +770 55 training.batch_size 1.0 +770 56 model.embedding_dim 2.0 +770 56 model.relation_dim 1.0 +770 56 loss.margin 9.608187450293064 +770 56 optimizer.lr 0.037154114724318364 +770 56 negative_sampler.num_negs_per_pos 22.0 +770 56 training.batch_size 0.0 +770 57 model.embedding_dim 0.0 +770 57 model.relation_dim 0.0 +770 57 loss.margin 1.8101088764452578 +770 57 optimizer.lr 0.0408591139043235 +770 57 negative_sampler.num_negs_per_pos 7.0 +770 57 training.batch_size 2.0 +770 58 model.embedding_dim 0.0 +770 58 model.relation_dim 0.0 +770 58 loss.margin 7.532311740718343 +770 58 optimizer.lr 0.0077554033328829505 +770 58 negative_sampler.num_negs_per_pos 31.0 +770 58 training.batch_size 0.0 +770 59 model.embedding_dim 1.0 +770 59 model.relation_dim 0.0 +770 59 loss.margin 0.713883929169705 +770 59 optimizer.lr 0.00410993301938647 +770 59 negative_sampler.num_negs_per_pos 47.0 +770 59 training.batch_size 2.0 +770 60 model.embedding_dim 2.0 +770 60 model.relation_dim 1.0 +770 60 loss.margin 1.886180080753677 +770 60 optimizer.lr 0.07506285912063493 +770 60 negative_sampler.num_negs_per_pos 47.0 +770 60 training.batch_size 1.0 +770 61 model.embedding_dim 1.0 +770 61 model.relation_dim 2.0 +770 61 loss.margin 6.434418365589076 +770 61 optimizer.lr 0.0541560589564601 +770 61 negative_sampler.num_negs_per_pos 0.0 +770 61 training.batch_size 2.0 +770 62 model.embedding_dim 0.0 +770 62 model.relation_dim 2.0 +770 62 loss.margin 7.435989064741146 +770 62 optimizer.lr 0.0072849653127096976 +770 62 negative_sampler.num_negs_per_pos 17.0 +770 62 training.batch_size 2.0 +770 63 model.embedding_dim 1.0 +770 63 model.relation_dim 0.0 +770 63 loss.margin 3.4303045083147383 +770 63 optimizer.lr 0.0015883267801620894 +770 63 negative_sampler.num_negs_per_pos 39.0 +770 63 training.batch_size 0.0 +770 64 model.embedding_dim 1.0 +770 64 model.relation_dim 1.0 +770 64 loss.margin 5.365367057533413 +770 64 optimizer.lr 0.08981457099674736 +770 64 negative_sampler.num_negs_per_pos 53.0 +770 64 training.batch_size 1.0 +770 65 model.embedding_dim 0.0 +770 65 model.relation_dim 2.0 +770 65 loss.margin 5.229591339467585 +770 65 optimizer.lr 0.001135759256680837 +770 65 negative_sampler.num_negs_per_pos 37.0 +770 65 training.batch_size 2.0 +770 66 model.embedding_dim 0.0 +770 66 model.relation_dim 0.0 +770 66 loss.margin 8.577267952031871 +770 66 optimizer.lr 0.0020201918395141723 +770 66 negative_sampler.num_negs_per_pos 69.0 +770 66 training.batch_size 2.0 +770 67 model.embedding_dim 2.0 +770 67 model.relation_dim 0.0 +770 67 loss.margin 0.6058276988531519 +770 67 optimizer.lr 0.0011116161107055178 +770 67 negative_sampler.num_negs_per_pos 26.0 +770 67 training.batch_size 2.0 +770 68 model.embedding_dim 0.0 +770 68 model.relation_dim 0.0 +770 68 loss.margin 9.653524519755734 +770 68 optimizer.lr 0.05443489406010746 +770 68 negative_sampler.num_negs_per_pos 88.0 +770 68 training.batch_size 1.0 +770 69 model.embedding_dim 1.0 +770 69 model.relation_dim 1.0 +770 69 loss.margin 7.937737803967621 +770 69 optimizer.lr 0.0036818467354159455 +770 69 negative_sampler.num_negs_per_pos 50.0 +770 69 training.batch_size 0.0 +770 70 model.embedding_dim 0.0 +770 70 model.relation_dim 1.0 +770 70 loss.margin 3.9347591797943755 +770 70 optimizer.lr 0.006468675224321166 +770 70 negative_sampler.num_negs_per_pos 53.0 +770 70 training.batch_size 0.0 +770 71 model.embedding_dim 2.0 +770 71 model.relation_dim 2.0 +770 71 loss.margin 3.765820489686442 +770 71 optimizer.lr 0.0025865595704902943 +770 71 negative_sampler.num_negs_per_pos 60.0 +770 71 training.batch_size 1.0 +770 72 model.embedding_dim 0.0 +770 72 model.relation_dim 0.0 +770 72 loss.margin 3.8190754615541422 +770 72 optimizer.lr 0.008831615243034371 +770 72 negative_sampler.num_negs_per_pos 8.0 +770 72 training.batch_size 0.0 +770 73 model.embedding_dim 0.0 +770 73 model.relation_dim 1.0 +770 73 loss.margin 7.783263707399262 +770 73 optimizer.lr 0.014116226793776991 +770 73 negative_sampler.num_negs_per_pos 65.0 +770 73 training.batch_size 2.0 +770 74 model.embedding_dim 0.0 +770 74 model.relation_dim 1.0 +770 74 loss.margin 4.450964307177354 +770 74 optimizer.lr 0.0013917592144325837 +770 74 negative_sampler.num_negs_per_pos 14.0 +770 74 training.batch_size 2.0 +770 75 model.embedding_dim 2.0 +770 75 model.relation_dim 1.0 +770 75 loss.margin 7.1051832560609105 +770 75 optimizer.lr 0.0010208047975262648 +770 75 negative_sampler.num_negs_per_pos 74.0 +770 75 training.batch_size 0.0 +770 76 model.embedding_dim 2.0 +770 76 model.relation_dim 1.0 +770 76 loss.margin 4.44573589916753 +770 76 optimizer.lr 0.03788351623166107 +770 76 negative_sampler.num_negs_per_pos 86.0 +770 76 training.batch_size 2.0 +770 77 model.embedding_dim 1.0 +770 77 model.relation_dim 1.0 +770 77 loss.margin 1.64510445847394 +770 77 optimizer.lr 0.001056570580864202 +770 77 negative_sampler.num_negs_per_pos 22.0 +770 77 training.batch_size 1.0 +770 78 model.embedding_dim 1.0 +770 78 model.relation_dim 0.0 +770 78 loss.margin 2.46240618259608 +770 78 optimizer.lr 0.0011450956433622865 +770 78 negative_sampler.num_negs_per_pos 92.0 +770 78 training.batch_size 0.0 +770 79 model.embedding_dim 1.0 +770 79 model.relation_dim 1.0 +770 79 loss.margin 2.1438007283678555 +770 79 optimizer.lr 0.02874908989857435 +770 79 negative_sampler.num_negs_per_pos 83.0 +770 79 training.batch_size 1.0 +770 80 model.embedding_dim 0.0 +770 80 model.relation_dim 1.0 +770 80 loss.margin 8.971899026560177 +770 80 optimizer.lr 0.0029564019260262197 +770 80 negative_sampler.num_negs_per_pos 52.0 +770 80 training.batch_size 2.0 +770 81 model.embedding_dim 0.0 +770 81 model.relation_dim 0.0 +770 81 loss.margin 4.940909643906266 +770 81 optimizer.lr 0.012007486059056448 +770 81 negative_sampler.num_negs_per_pos 95.0 +770 81 training.batch_size 0.0 +770 82 model.embedding_dim 2.0 +770 82 model.relation_dim 2.0 +770 82 loss.margin 3.2329150787906724 +770 82 optimizer.lr 0.03646942560711962 +770 82 negative_sampler.num_negs_per_pos 62.0 +770 82 training.batch_size 0.0 +770 83 model.embedding_dim 0.0 +770 83 model.relation_dim 1.0 +770 83 loss.margin 7.141962930618837 +770 83 optimizer.lr 0.004387585963946972 +770 83 negative_sampler.num_negs_per_pos 51.0 +770 83 training.batch_size 2.0 +770 84 model.embedding_dim 2.0 +770 84 model.relation_dim 2.0 +770 84 loss.margin 2.284698054408225 +770 84 optimizer.lr 0.028094418472292555 +770 84 negative_sampler.num_negs_per_pos 34.0 +770 84 training.batch_size 1.0 +770 85 model.embedding_dim 2.0 +770 85 model.relation_dim 1.0 +770 85 loss.margin 6.320164937836197 +770 85 optimizer.lr 0.002191976118703858 +770 85 negative_sampler.num_negs_per_pos 60.0 +770 85 training.batch_size 1.0 +770 86 model.embedding_dim 0.0 +770 86 model.relation_dim 1.0 +770 86 loss.margin 8.372325260805392 +770 86 optimizer.lr 0.07961372440795085 +770 86 negative_sampler.num_negs_per_pos 88.0 +770 86 training.batch_size 0.0 +770 87 model.embedding_dim 2.0 +770 87 model.relation_dim 1.0 +770 87 loss.margin 5.9943196568409185 +770 87 optimizer.lr 0.0011578992334292078 +770 87 negative_sampler.num_negs_per_pos 82.0 +770 87 training.batch_size 0.0 +770 88 model.embedding_dim 1.0 +770 88 model.relation_dim 0.0 +770 88 loss.margin 7.98348500357924 +770 88 optimizer.lr 0.016282992052005403 +770 88 negative_sampler.num_negs_per_pos 65.0 +770 88 training.batch_size 0.0 +770 89 model.embedding_dim 2.0 +770 89 model.relation_dim 1.0 +770 89 loss.margin 1.3102790145790857 +770 89 optimizer.lr 0.008941368106538583 +770 89 negative_sampler.num_negs_per_pos 34.0 +770 89 training.batch_size 2.0 +770 90 model.embedding_dim 0.0 +770 90 model.relation_dim 1.0 +770 90 loss.margin 9.16601203432757 +770 90 optimizer.lr 0.030890569953540607 +770 90 negative_sampler.num_negs_per_pos 82.0 +770 90 training.batch_size 1.0 +770 91 model.embedding_dim 0.0 +770 91 model.relation_dim 0.0 +770 91 loss.margin 0.815279834911416 +770 91 optimizer.lr 0.00680835738325777 +770 91 negative_sampler.num_negs_per_pos 71.0 +770 91 training.batch_size 1.0 +770 92 model.embedding_dim 2.0 +770 92 model.relation_dim 1.0 +770 92 loss.margin 8.734597394074596 +770 92 optimizer.lr 0.048034511764960174 +770 92 negative_sampler.num_negs_per_pos 36.0 +770 92 training.batch_size 1.0 +770 93 model.embedding_dim 0.0 +770 93 model.relation_dim 1.0 +770 93 loss.margin 8.198151377248022 +770 93 optimizer.lr 0.0013441303690387488 +770 93 negative_sampler.num_negs_per_pos 58.0 +770 93 training.batch_size 1.0 +770 94 model.embedding_dim 0.0 +770 94 model.relation_dim 1.0 +770 94 loss.margin 6.164918196932232 +770 94 optimizer.lr 0.07465380294124402 +770 94 negative_sampler.num_negs_per_pos 79.0 +770 94 training.batch_size 0.0 +770 95 model.embedding_dim 1.0 +770 95 model.relation_dim 1.0 +770 95 loss.margin 4.759363549032702 +770 95 optimizer.lr 0.010413787909622947 +770 95 negative_sampler.num_negs_per_pos 40.0 +770 95 training.batch_size 0.0 +770 96 model.embedding_dim 2.0 +770 96 model.relation_dim 1.0 +770 96 loss.margin 8.234253927489792 +770 96 optimizer.lr 0.03443007745465244 +770 96 negative_sampler.num_negs_per_pos 70.0 +770 96 training.batch_size 0.0 +770 97 model.embedding_dim 1.0 +770 97 model.relation_dim 2.0 +770 97 loss.margin 9.81836093661255 +770 97 optimizer.lr 0.01216662868248264 +770 97 negative_sampler.num_negs_per_pos 30.0 +770 97 training.batch_size 2.0 +770 98 model.embedding_dim 0.0 +770 98 model.relation_dim 2.0 +770 98 loss.margin 9.955673182495747 +770 98 optimizer.lr 0.07329578579320338 +770 98 negative_sampler.num_negs_per_pos 93.0 +770 98 training.batch_size 1.0 +770 99 model.embedding_dim 2.0 +770 99 model.relation_dim 0.0 +770 99 loss.margin 1.3620665796047366 +770 99 optimizer.lr 0.037093426684494536 +770 99 negative_sampler.num_negs_per_pos 4.0 +770 99 training.batch_size 1.0 +770 100 model.embedding_dim 0.0 +770 100 model.relation_dim 0.0 +770 100 loss.margin 5.86413466067194 +770 100 optimizer.lr 0.006945316389805193 +770 100 negative_sampler.num_negs_per_pos 18.0 +770 100 training.batch_size 0.0 +770 1 dataset """wn18rr""" +770 1 model """transd""" +770 1 loss """marginranking""" +770 1 regularizer """no""" +770 1 optimizer """adam""" +770 1 training_loop """owa""" +770 1 negative_sampler """basic""" +770 1 evaluator """rankbased""" +770 2 dataset """wn18rr""" +770 2 model """transd""" +770 2 loss """marginranking""" +770 2 regularizer """no""" +770 2 optimizer """adam""" +770 2 training_loop """owa""" +770 2 negative_sampler """basic""" +770 2 evaluator """rankbased""" +770 3 dataset """wn18rr""" +770 3 model """transd""" +770 3 loss """marginranking""" +770 3 regularizer """no""" +770 3 optimizer """adam""" +770 3 training_loop """owa""" +770 3 negative_sampler """basic""" +770 3 evaluator """rankbased""" +770 4 dataset """wn18rr""" +770 4 model """transd""" +770 4 loss """marginranking""" +770 4 regularizer """no""" +770 4 optimizer """adam""" +770 4 training_loop """owa""" +770 4 negative_sampler """basic""" +770 4 evaluator """rankbased""" +770 5 dataset """wn18rr""" +770 5 model """transd""" +770 5 loss """marginranking""" +770 5 regularizer """no""" +770 5 optimizer """adam""" +770 5 training_loop """owa""" +770 5 negative_sampler """basic""" +770 5 evaluator """rankbased""" +770 6 dataset """wn18rr""" +770 6 model """transd""" +770 6 loss """marginranking""" +770 6 regularizer """no""" +770 6 optimizer """adam""" +770 6 training_loop """owa""" +770 6 negative_sampler """basic""" +770 6 evaluator """rankbased""" +770 7 dataset """wn18rr""" +770 7 model """transd""" +770 7 loss """marginranking""" +770 7 regularizer """no""" +770 7 optimizer """adam""" +770 7 training_loop """owa""" +770 7 negative_sampler """basic""" +770 7 evaluator """rankbased""" +770 8 dataset """wn18rr""" +770 8 model """transd""" +770 8 loss """marginranking""" +770 8 regularizer """no""" +770 8 optimizer """adam""" +770 8 training_loop """owa""" +770 8 negative_sampler """basic""" +770 8 evaluator """rankbased""" +770 9 dataset """wn18rr""" +770 9 model """transd""" +770 9 loss """marginranking""" +770 9 regularizer """no""" +770 9 optimizer """adam""" +770 9 training_loop """owa""" +770 9 negative_sampler """basic""" +770 9 evaluator """rankbased""" +770 10 dataset """wn18rr""" +770 10 model """transd""" +770 10 loss """marginranking""" +770 10 regularizer """no""" +770 10 optimizer """adam""" +770 10 training_loop """owa""" +770 10 negative_sampler """basic""" +770 10 evaluator """rankbased""" +770 11 dataset """wn18rr""" +770 11 model """transd""" +770 11 loss """marginranking""" +770 11 regularizer """no""" +770 11 optimizer """adam""" +770 11 training_loop """owa""" +770 11 negative_sampler """basic""" +770 11 evaluator """rankbased""" +770 12 dataset """wn18rr""" +770 12 model """transd""" +770 12 loss """marginranking""" +770 12 regularizer """no""" +770 12 optimizer """adam""" +770 12 training_loop """owa""" +770 12 negative_sampler """basic""" +770 12 evaluator """rankbased""" +770 13 dataset """wn18rr""" +770 13 model """transd""" +770 13 loss """marginranking""" +770 13 regularizer """no""" +770 13 optimizer """adam""" +770 13 training_loop """owa""" +770 13 negative_sampler """basic""" +770 13 evaluator """rankbased""" +770 14 dataset """wn18rr""" +770 14 model """transd""" +770 14 loss """marginranking""" +770 14 regularizer """no""" +770 14 optimizer """adam""" +770 14 training_loop """owa""" +770 14 negative_sampler """basic""" +770 14 evaluator """rankbased""" +770 15 dataset """wn18rr""" +770 15 model """transd""" +770 15 loss """marginranking""" +770 15 regularizer """no""" +770 15 optimizer """adam""" +770 15 training_loop """owa""" +770 15 negative_sampler """basic""" +770 15 evaluator """rankbased""" +770 16 dataset """wn18rr""" +770 16 model """transd""" +770 16 loss """marginranking""" +770 16 regularizer """no""" +770 16 optimizer """adam""" +770 16 training_loop """owa""" +770 16 negative_sampler """basic""" +770 16 evaluator """rankbased""" +770 17 dataset """wn18rr""" +770 17 model """transd""" +770 17 loss """marginranking""" +770 17 regularizer """no""" +770 17 optimizer """adam""" +770 17 training_loop """owa""" +770 17 negative_sampler """basic""" +770 17 evaluator """rankbased""" +770 18 dataset """wn18rr""" +770 18 model """transd""" +770 18 loss """marginranking""" +770 18 regularizer """no""" +770 18 optimizer """adam""" +770 18 training_loop """owa""" +770 18 negative_sampler """basic""" +770 18 evaluator """rankbased""" +770 19 dataset """wn18rr""" +770 19 model """transd""" +770 19 loss """marginranking""" +770 19 regularizer """no""" +770 19 optimizer """adam""" +770 19 training_loop """owa""" +770 19 negative_sampler """basic""" +770 19 evaluator """rankbased""" +770 20 dataset """wn18rr""" +770 20 model """transd""" +770 20 loss """marginranking""" +770 20 regularizer """no""" +770 20 optimizer """adam""" +770 20 training_loop """owa""" +770 20 negative_sampler """basic""" +770 20 evaluator """rankbased""" +770 21 dataset """wn18rr""" +770 21 model """transd""" +770 21 loss """marginranking""" +770 21 regularizer """no""" +770 21 optimizer """adam""" +770 21 training_loop """owa""" +770 21 negative_sampler """basic""" +770 21 evaluator """rankbased""" +770 22 dataset """wn18rr""" +770 22 model """transd""" +770 22 loss """marginranking""" +770 22 regularizer """no""" +770 22 optimizer """adam""" +770 22 training_loop """owa""" +770 22 negative_sampler """basic""" +770 22 evaluator """rankbased""" +770 23 dataset """wn18rr""" +770 23 model """transd""" +770 23 loss """marginranking""" +770 23 regularizer """no""" +770 23 optimizer """adam""" +770 23 training_loop """owa""" +770 23 negative_sampler """basic""" +770 23 evaluator """rankbased""" +770 24 dataset """wn18rr""" +770 24 model """transd""" +770 24 loss """marginranking""" +770 24 regularizer """no""" +770 24 optimizer """adam""" +770 24 training_loop """owa""" +770 24 negative_sampler """basic""" +770 24 evaluator """rankbased""" +770 25 dataset """wn18rr""" +770 25 model """transd""" +770 25 loss """marginranking""" +770 25 regularizer """no""" +770 25 optimizer """adam""" +770 25 training_loop """owa""" +770 25 negative_sampler """basic""" +770 25 evaluator """rankbased""" +770 26 dataset """wn18rr""" +770 26 model """transd""" +770 26 loss """marginranking""" +770 26 regularizer """no""" +770 26 optimizer """adam""" +770 26 training_loop """owa""" +770 26 negative_sampler """basic""" +770 26 evaluator """rankbased""" +770 27 dataset """wn18rr""" +770 27 model """transd""" +770 27 loss """marginranking""" +770 27 regularizer """no""" +770 27 optimizer """adam""" +770 27 training_loop """owa""" +770 27 negative_sampler """basic""" +770 27 evaluator """rankbased""" +770 28 dataset """wn18rr""" +770 28 model """transd""" +770 28 loss """marginranking""" +770 28 regularizer """no""" +770 28 optimizer """adam""" +770 28 training_loop """owa""" +770 28 negative_sampler """basic""" +770 28 evaluator """rankbased""" +770 29 dataset """wn18rr""" +770 29 model """transd""" +770 29 loss """marginranking""" +770 29 regularizer """no""" +770 29 optimizer """adam""" +770 29 training_loop """owa""" +770 29 negative_sampler """basic""" +770 29 evaluator """rankbased""" +770 30 dataset """wn18rr""" +770 30 model """transd""" +770 30 loss """marginranking""" +770 30 regularizer """no""" +770 30 optimizer """adam""" +770 30 training_loop """owa""" +770 30 negative_sampler """basic""" +770 30 evaluator """rankbased""" +770 31 dataset """wn18rr""" +770 31 model """transd""" +770 31 loss """marginranking""" +770 31 regularizer """no""" +770 31 optimizer """adam""" +770 31 training_loop """owa""" +770 31 negative_sampler """basic""" +770 31 evaluator """rankbased""" +770 32 dataset """wn18rr""" +770 32 model """transd""" +770 32 loss """marginranking""" +770 32 regularizer """no""" +770 32 optimizer """adam""" +770 32 training_loop """owa""" +770 32 negative_sampler """basic""" +770 32 evaluator """rankbased""" +770 33 dataset """wn18rr""" +770 33 model """transd""" +770 33 loss """marginranking""" +770 33 regularizer """no""" +770 33 optimizer """adam""" +770 33 training_loop """owa""" +770 33 negative_sampler """basic""" +770 33 evaluator """rankbased""" +770 34 dataset """wn18rr""" +770 34 model """transd""" +770 34 loss """marginranking""" +770 34 regularizer """no""" +770 34 optimizer """adam""" +770 34 training_loop """owa""" +770 34 negative_sampler """basic""" +770 34 evaluator """rankbased""" +770 35 dataset """wn18rr""" +770 35 model """transd""" +770 35 loss """marginranking""" +770 35 regularizer """no""" +770 35 optimizer """adam""" +770 35 training_loop """owa""" +770 35 negative_sampler """basic""" +770 35 evaluator """rankbased""" +770 36 dataset """wn18rr""" +770 36 model """transd""" +770 36 loss """marginranking""" +770 36 regularizer """no""" +770 36 optimizer """adam""" +770 36 training_loop """owa""" +770 36 negative_sampler """basic""" +770 36 evaluator """rankbased""" +770 37 dataset """wn18rr""" +770 37 model """transd""" +770 37 loss """marginranking""" +770 37 regularizer """no""" +770 37 optimizer """adam""" +770 37 training_loop """owa""" +770 37 negative_sampler """basic""" +770 37 evaluator """rankbased""" +770 38 dataset """wn18rr""" +770 38 model """transd""" +770 38 loss """marginranking""" +770 38 regularizer """no""" +770 38 optimizer """adam""" +770 38 training_loop """owa""" +770 38 negative_sampler """basic""" +770 38 evaluator """rankbased""" +770 39 dataset """wn18rr""" +770 39 model """transd""" +770 39 loss """marginranking""" +770 39 regularizer """no""" +770 39 optimizer """adam""" +770 39 training_loop """owa""" +770 39 negative_sampler """basic""" +770 39 evaluator """rankbased""" +770 40 dataset """wn18rr""" +770 40 model """transd""" +770 40 loss """marginranking""" +770 40 regularizer """no""" +770 40 optimizer """adam""" +770 40 training_loop """owa""" +770 40 negative_sampler """basic""" +770 40 evaluator """rankbased""" +770 41 dataset """wn18rr""" +770 41 model """transd""" +770 41 loss """marginranking""" +770 41 regularizer """no""" +770 41 optimizer """adam""" +770 41 training_loop """owa""" +770 41 negative_sampler """basic""" +770 41 evaluator """rankbased""" +770 42 dataset """wn18rr""" +770 42 model """transd""" +770 42 loss """marginranking""" +770 42 regularizer """no""" +770 42 optimizer """adam""" +770 42 training_loop """owa""" +770 42 negative_sampler """basic""" +770 42 evaluator """rankbased""" +770 43 dataset """wn18rr""" +770 43 model """transd""" +770 43 loss """marginranking""" +770 43 regularizer """no""" +770 43 optimizer """adam""" +770 43 training_loop """owa""" +770 43 negative_sampler """basic""" +770 43 evaluator """rankbased""" +770 44 dataset """wn18rr""" +770 44 model """transd""" +770 44 loss """marginranking""" +770 44 regularizer """no""" +770 44 optimizer """adam""" +770 44 training_loop """owa""" +770 44 negative_sampler """basic""" +770 44 evaluator """rankbased""" +770 45 dataset """wn18rr""" +770 45 model """transd""" +770 45 loss """marginranking""" +770 45 regularizer """no""" +770 45 optimizer """adam""" +770 45 training_loop """owa""" +770 45 negative_sampler """basic""" +770 45 evaluator """rankbased""" +770 46 dataset """wn18rr""" +770 46 model """transd""" +770 46 loss """marginranking""" +770 46 regularizer """no""" +770 46 optimizer """adam""" +770 46 training_loop """owa""" +770 46 negative_sampler """basic""" +770 46 evaluator """rankbased""" +770 47 dataset """wn18rr""" +770 47 model """transd""" +770 47 loss """marginranking""" +770 47 regularizer """no""" +770 47 optimizer """adam""" +770 47 training_loop """owa""" +770 47 negative_sampler """basic""" +770 47 evaluator """rankbased""" +770 48 dataset """wn18rr""" +770 48 model """transd""" +770 48 loss """marginranking""" +770 48 regularizer """no""" +770 48 optimizer """adam""" +770 48 training_loop """owa""" +770 48 negative_sampler """basic""" +770 48 evaluator """rankbased""" +770 49 dataset """wn18rr""" +770 49 model """transd""" +770 49 loss """marginranking""" +770 49 regularizer """no""" +770 49 optimizer """adam""" +770 49 training_loop """owa""" +770 49 negative_sampler """basic""" +770 49 evaluator """rankbased""" +770 50 dataset """wn18rr""" +770 50 model """transd""" +770 50 loss """marginranking""" +770 50 regularizer """no""" +770 50 optimizer """adam""" +770 50 training_loop """owa""" +770 50 negative_sampler """basic""" +770 50 evaluator """rankbased""" +770 51 dataset """wn18rr""" +770 51 model """transd""" +770 51 loss """marginranking""" +770 51 regularizer """no""" +770 51 optimizer """adam""" +770 51 training_loop """owa""" +770 51 negative_sampler """basic""" +770 51 evaluator """rankbased""" +770 52 dataset """wn18rr""" +770 52 model """transd""" +770 52 loss """marginranking""" +770 52 regularizer """no""" +770 52 optimizer """adam""" +770 52 training_loop """owa""" +770 52 negative_sampler """basic""" +770 52 evaluator """rankbased""" +770 53 dataset """wn18rr""" +770 53 model """transd""" +770 53 loss """marginranking""" +770 53 regularizer """no""" +770 53 optimizer """adam""" +770 53 training_loop """owa""" +770 53 negative_sampler """basic""" +770 53 evaluator """rankbased""" +770 54 dataset """wn18rr""" +770 54 model """transd""" +770 54 loss """marginranking""" +770 54 regularizer """no""" +770 54 optimizer """adam""" +770 54 training_loop """owa""" +770 54 negative_sampler """basic""" +770 54 evaluator """rankbased""" +770 55 dataset """wn18rr""" +770 55 model """transd""" +770 55 loss """marginranking""" +770 55 regularizer """no""" +770 55 optimizer """adam""" +770 55 training_loop """owa""" +770 55 negative_sampler """basic""" +770 55 evaluator """rankbased""" +770 56 dataset """wn18rr""" +770 56 model """transd""" +770 56 loss """marginranking""" +770 56 regularizer """no""" +770 56 optimizer """adam""" +770 56 training_loop """owa""" +770 56 negative_sampler """basic""" +770 56 evaluator """rankbased""" +770 57 dataset """wn18rr""" +770 57 model """transd""" +770 57 loss """marginranking""" +770 57 regularizer """no""" +770 57 optimizer """adam""" +770 57 training_loop """owa""" +770 57 negative_sampler """basic""" +770 57 evaluator """rankbased""" +770 58 dataset """wn18rr""" +770 58 model """transd""" +770 58 loss """marginranking""" +770 58 regularizer """no""" +770 58 optimizer """adam""" +770 58 training_loop """owa""" +770 58 negative_sampler """basic""" +770 58 evaluator """rankbased""" +770 59 dataset """wn18rr""" +770 59 model """transd""" +770 59 loss """marginranking""" +770 59 regularizer """no""" +770 59 optimizer """adam""" +770 59 training_loop """owa""" +770 59 negative_sampler """basic""" +770 59 evaluator """rankbased""" +770 60 dataset """wn18rr""" +770 60 model """transd""" +770 60 loss """marginranking""" +770 60 regularizer """no""" +770 60 optimizer """adam""" +770 60 training_loop """owa""" +770 60 negative_sampler """basic""" +770 60 evaluator """rankbased""" +770 61 dataset """wn18rr""" +770 61 model """transd""" +770 61 loss """marginranking""" +770 61 regularizer """no""" +770 61 optimizer """adam""" +770 61 training_loop """owa""" +770 61 negative_sampler """basic""" +770 61 evaluator """rankbased""" +770 62 dataset """wn18rr""" +770 62 model """transd""" +770 62 loss """marginranking""" +770 62 regularizer """no""" +770 62 optimizer """adam""" +770 62 training_loop """owa""" +770 62 negative_sampler """basic""" +770 62 evaluator """rankbased""" +770 63 dataset """wn18rr""" +770 63 model """transd""" +770 63 loss """marginranking""" +770 63 regularizer """no""" +770 63 optimizer """adam""" +770 63 training_loop """owa""" +770 63 negative_sampler """basic""" +770 63 evaluator """rankbased""" +770 64 dataset """wn18rr""" +770 64 model """transd""" +770 64 loss """marginranking""" +770 64 regularizer """no""" +770 64 optimizer """adam""" +770 64 training_loop """owa""" +770 64 negative_sampler """basic""" +770 64 evaluator """rankbased""" +770 65 dataset """wn18rr""" +770 65 model """transd""" +770 65 loss """marginranking""" +770 65 regularizer """no""" +770 65 optimizer """adam""" +770 65 training_loop """owa""" +770 65 negative_sampler """basic""" +770 65 evaluator """rankbased""" +770 66 dataset """wn18rr""" +770 66 model """transd""" +770 66 loss """marginranking""" +770 66 regularizer """no""" +770 66 optimizer """adam""" +770 66 training_loop """owa""" +770 66 negative_sampler """basic""" +770 66 evaluator """rankbased""" +770 67 dataset """wn18rr""" +770 67 model """transd""" +770 67 loss """marginranking""" +770 67 regularizer """no""" +770 67 optimizer """adam""" +770 67 training_loop """owa""" +770 67 negative_sampler """basic""" +770 67 evaluator """rankbased""" +770 68 dataset """wn18rr""" +770 68 model """transd""" +770 68 loss """marginranking""" +770 68 regularizer """no""" +770 68 optimizer """adam""" +770 68 training_loop """owa""" +770 68 negative_sampler """basic""" +770 68 evaluator """rankbased""" +770 69 dataset """wn18rr""" +770 69 model """transd""" +770 69 loss """marginranking""" +770 69 regularizer """no""" +770 69 optimizer """adam""" +770 69 training_loop """owa""" +770 69 negative_sampler """basic""" +770 69 evaluator """rankbased""" +770 70 dataset """wn18rr""" +770 70 model """transd""" +770 70 loss """marginranking""" +770 70 regularizer """no""" +770 70 optimizer """adam""" +770 70 training_loop """owa""" +770 70 negative_sampler """basic""" +770 70 evaluator """rankbased""" +770 71 dataset """wn18rr""" +770 71 model """transd""" +770 71 loss """marginranking""" +770 71 regularizer """no""" +770 71 optimizer """adam""" +770 71 training_loop """owa""" +770 71 negative_sampler """basic""" +770 71 evaluator """rankbased""" +770 72 dataset """wn18rr""" +770 72 model """transd""" +770 72 loss """marginranking""" +770 72 regularizer """no""" +770 72 optimizer """adam""" +770 72 training_loop """owa""" +770 72 negative_sampler """basic""" +770 72 evaluator """rankbased""" +770 73 dataset """wn18rr""" +770 73 model """transd""" +770 73 loss """marginranking""" +770 73 regularizer """no""" +770 73 optimizer """adam""" +770 73 training_loop """owa""" +770 73 negative_sampler """basic""" +770 73 evaluator """rankbased""" +770 74 dataset """wn18rr""" +770 74 model """transd""" +770 74 loss """marginranking""" +770 74 regularizer """no""" +770 74 optimizer """adam""" +770 74 training_loop """owa""" +770 74 negative_sampler """basic""" +770 74 evaluator """rankbased""" +770 75 dataset """wn18rr""" +770 75 model """transd""" +770 75 loss """marginranking""" +770 75 regularizer """no""" +770 75 optimizer """adam""" +770 75 training_loop """owa""" +770 75 negative_sampler """basic""" +770 75 evaluator """rankbased""" +770 76 dataset """wn18rr""" +770 76 model """transd""" +770 76 loss """marginranking""" +770 76 regularizer """no""" +770 76 optimizer """adam""" +770 76 training_loop """owa""" +770 76 negative_sampler """basic""" +770 76 evaluator """rankbased""" +770 77 dataset """wn18rr""" +770 77 model """transd""" +770 77 loss """marginranking""" +770 77 regularizer """no""" +770 77 optimizer """adam""" +770 77 training_loop """owa""" +770 77 negative_sampler """basic""" +770 77 evaluator """rankbased""" +770 78 dataset """wn18rr""" +770 78 model """transd""" +770 78 loss """marginranking""" +770 78 regularizer """no""" +770 78 optimizer """adam""" +770 78 training_loop """owa""" +770 78 negative_sampler """basic""" +770 78 evaluator """rankbased""" +770 79 dataset """wn18rr""" +770 79 model """transd""" +770 79 loss """marginranking""" +770 79 regularizer """no""" +770 79 optimizer """adam""" +770 79 training_loop """owa""" +770 79 negative_sampler """basic""" +770 79 evaluator """rankbased""" +770 80 dataset """wn18rr""" +770 80 model """transd""" +770 80 loss """marginranking""" +770 80 regularizer """no""" +770 80 optimizer """adam""" +770 80 training_loop """owa""" +770 80 negative_sampler """basic""" +770 80 evaluator """rankbased""" +770 81 dataset """wn18rr""" +770 81 model """transd""" +770 81 loss """marginranking""" +770 81 regularizer """no""" +770 81 optimizer """adam""" +770 81 training_loop """owa""" +770 81 negative_sampler """basic""" +770 81 evaluator """rankbased""" +770 82 dataset """wn18rr""" +770 82 model """transd""" +770 82 loss """marginranking""" +770 82 regularizer """no""" +770 82 optimizer """adam""" +770 82 training_loop """owa""" +770 82 negative_sampler """basic""" +770 82 evaluator """rankbased""" +770 83 dataset """wn18rr""" +770 83 model """transd""" +770 83 loss """marginranking""" +770 83 regularizer """no""" +770 83 optimizer """adam""" +770 83 training_loop """owa""" +770 83 negative_sampler """basic""" +770 83 evaluator """rankbased""" +770 84 dataset """wn18rr""" +770 84 model """transd""" +770 84 loss """marginranking""" +770 84 regularizer """no""" +770 84 optimizer """adam""" +770 84 training_loop """owa""" +770 84 negative_sampler """basic""" +770 84 evaluator """rankbased""" +770 85 dataset """wn18rr""" +770 85 model """transd""" +770 85 loss """marginranking""" +770 85 regularizer """no""" +770 85 optimizer """adam""" +770 85 training_loop """owa""" +770 85 negative_sampler """basic""" +770 85 evaluator """rankbased""" +770 86 dataset """wn18rr""" +770 86 model """transd""" +770 86 loss """marginranking""" +770 86 regularizer """no""" +770 86 optimizer """adam""" +770 86 training_loop """owa""" +770 86 negative_sampler """basic""" +770 86 evaluator """rankbased""" +770 87 dataset """wn18rr""" +770 87 model """transd""" +770 87 loss """marginranking""" +770 87 regularizer """no""" +770 87 optimizer """adam""" +770 87 training_loop """owa""" +770 87 negative_sampler """basic""" +770 87 evaluator """rankbased""" +770 88 dataset """wn18rr""" +770 88 model """transd""" +770 88 loss """marginranking""" +770 88 regularizer """no""" +770 88 optimizer """adam""" +770 88 training_loop """owa""" +770 88 negative_sampler """basic""" +770 88 evaluator """rankbased""" +770 89 dataset """wn18rr""" +770 89 model """transd""" +770 89 loss """marginranking""" +770 89 regularizer """no""" +770 89 optimizer """adam""" +770 89 training_loop """owa""" +770 89 negative_sampler """basic""" +770 89 evaluator """rankbased""" +770 90 dataset """wn18rr""" +770 90 model """transd""" +770 90 loss """marginranking""" +770 90 regularizer """no""" +770 90 optimizer """adam""" +770 90 training_loop """owa""" +770 90 negative_sampler """basic""" +770 90 evaluator """rankbased""" +770 91 dataset """wn18rr""" +770 91 model """transd""" +770 91 loss """marginranking""" +770 91 regularizer """no""" +770 91 optimizer """adam""" +770 91 training_loop """owa""" +770 91 negative_sampler """basic""" +770 91 evaluator """rankbased""" +770 92 dataset """wn18rr""" +770 92 model """transd""" +770 92 loss """marginranking""" +770 92 regularizer """no""" +770 92 optimizer """adam""" +770 92 training_loop """owa""" +770 92 negative_sampler """basic""" +770 92 evaluator """rankbased""" +770 93 dataset """wn18rr""" +770 93 model """transd""" +770 93 loss """marginranking""" +770 93 regularizer """no""" +770 93 optimizer """adam""" +770 93 training_loop """owa""" +770 93 negative_sampler """basic""" +770 93 evaluator """rankbased""" +770 94 dataset """wn18rr""" +770 94 model """transd""" +770 94 loss """marginranking""" +770 94 regularizer """no""" +770 94 optimizer """adam""" +770 94 training_loop """owa""" +770 94 negative_sampler """basic""" +770 94 evaluator """rankbased""" +770 95 dataset """wn18rr""" +770 95 model """transd""" +770 95 loss """marginranking""" +770 95 regularizer """no""" +770 95 optimizer """adam""" +770 95 training_loop """owa""" +770 95 negative_sampler """basic""" +770 95 evaluator """rankbased""" +770 96 dataset """wn18rr""" +770 96 model """transd""" +770 96 loss """marginranking""" +770 96 regularizer """no""" +770 96 optimizer """adam""" +770 96 training_loop """owa""" +770 96 negative_sampler """basic""" +770 96 evaluator """rankbased""" +770 97 dataset """wn18rr""" +770 97 model """transd""" +770 97 loss """marginranking""" +770 97 regularizer """no""" +770 97 optimizer """adam""" +770 97 training_loop """owa""" +770 97 negative_sampler """basic""" +770 97 evaluator """rankbased""" +770 98 dataset """wn18rr""" +770 98 model """transd""" +770 98 loss """marginranking""" +770 98 regularizer """no""" +770 98 optimizer """adam""" +770 98 training_loop """owa""" +770 98 negative_sampler """basic""" +770 98 evaluator """rankbased""" +770 99 dataset """wn18rr""" +770 99 model """transd""" +770 99 loss """marginranking""" +770 99 regularizer """no""" +770 99 optimizer """adam""" +770 99 training_loop """owa""" +770 99 negative_sampler """basic""" +770 99 evaluator """rankbased""" +770 100 dataset """wn18rr""" +770 100 model """transd""" +770 100 loss """marginranking""" +770 100 regularizer """no""" +770 100 optimizer """adam""" +770 100 training_loop """owa""" +770 100 negative_sampler """basic""" +770 100 evaluator """rankbased""" +771 1 model.embedding_dim 1.0 +771 1 model.relation_dim 2.0 +771 1 optimizer.lr 0.005197156695302792 +771 1 negative_sampler.num_negs_per_pos 61.0 +771 1 training.batch_size 0.0 +771 2 model.embedding_dim 1.0 +771 2 model.relation_dim 2.0 +771 2 optimizer.lr 0.00155346319611732 +771 2 negative_sampler.num_negs_per_pos 47.0 +771 2 training.batch_size 1.0 +771 3 model.embedding_dim 0.0 +771 3 model.relation_dim 0.0 +771 3 optimizer.lr 0.003495973582743322 +771 3 negative_sampler.num_negs_per_pos 44.0 +771 3 training.batch_size 1.0 +771 4 model.embedding_dim 1.0 +771 4 model.relation_dim 2.0 +771 4 optimizer.lr 0.001922989243584882 +771 4 negative_sampler.num_negs_per_pos 67.0 +771 4 training.batch_size 1.0 +771 5 model.embedding_dim 0.0 +771 5 model.relation_dim 1.0 +771 5 optimizer.lr 0.0017219847649972409 +771 5 negative_sampler.num_negs_per_pos 13.0 +771 5 training.batch_size 0.0 +771 6 model.embedding_dim 0.0 +771 6 model.relation_dim 1.0 +771 6 optimizer.lr 0.0015881379927625704 +771 6 negative_sampler.num_negs_per_pos 0.0 +771 6 training.batch_size 1.0 +771 7 model.embedding_dim 0.0 +771 7 model.relation_dim 0.0 +771 7 optimizer.lr 0.004279987198038307 +771 7 negative_sampler.num_negs_per_pos 65.0 +771 7 training.batch_size 1.0 +771 8 model.embedding_dim 0.0 +771 8 model.relation_dim 2.0 +771 8 optimizer.lr 0.001985908819713858 +771 8 negative_sampler.num_negs_per_pos 72.0 +771 8 training.batch_size 2.0 +771 9 model.embedding_dim 0.0 +771 9 model.relation_dim 0.0 +771 9 optimizer.lr 0.007724450446733624 +771 9 negative_sampler.num_negs_per_pos 17.0 +771 9 training.batch_size 0.0 +771 10 model.embedding_dim 1.0 +771 10 model.relation_dim 1.0 +771 10 optimizer.lr 0.0778180412986388 +771 10 negative_sampler.num_negs_per_pos 63.0 +771 10 training.batch_size 0.0 +771 11 model.embedding_dim 2.0 +771 11 model.relation_dim 2.0 +771 11 optimizer.lr 0.020400776129771765 +771 11 negative_sampler.num_negs_per_pos 22.0 +771 11 training.batch_size 1.0 +771 12 model.embedding_dim 0.0 +771 12 model.relation_dim 0.0 +771 12 optimizer.lr 0.0183073617990868 +771 12 negative_sampler.num_negs_per_pos 82.0 +771 12 training.batch_size 0.0 +771 13 model.embedding_dim 1.0 +771 13 model.relation_dim 2.0 +771 13 optimizer.lr 0.002987293169315846 +771 13 negative_sampler.num_negs_per_pos 13.0 +771 13 training.batch_size 2.0 +771 14 model.embedding_dim 1.0 +771 14 model.relation_dim 0.0 +771 14 optimizer.lr 0.0019171673575564534 +771 14 negative_sampler.num_negs_per_pos 45.0 +771 14 training.batch_size 1.0 +771 15 model.embedding_dim 1.0 +771 15 model.relation_dim 1.0 +771 15 optimizer.lr 0.04821222126458825 +771 15 negative_sampler.num_negs_per_pos 92.0 +771 15 training.batch_size 1.0 +771 16 model.embedding_dim 0.0 +771 16 model.relation_dim 0.0 +771 16 optimizer.lr 0.04000049731101421 +771 16 negative_sampler.num_negs_per_pos 97.0 +771 16 training.batch_size 2.0 +771 17 model.embedding_dim 1.0 +771 17 model.relation_dim 1.0 +771 17 optimizer.lr 0.0012932200010005812 +771 17 negative_sampler.num_negs_per_pos 32.0 +771 17 training.batch_size 1.0 +771 18 model.embedding_dim 0.0 +771 18 model.relation_dim 0.0 +771 18 optimizer.lr 0.001598279418258294 +771 18 negative_sampler.num_negs_per_pos 56.0 +771 18 training.batch_size 1.0 +771 19 model.embedding_dim 1.0 +771 19 model.relation_dim 2.0 +771 19 optimizer.lr 0.004906281577742174 +771 19 negative_sampler.num_negs_per_pos 88.0 +771 19 training.batch_size 2.0 +771 20 model.embedding_dim 2.0 +771 20 model.relation_dim 0.0 +771 20 optimizer.lr 0.0010877079764025927 +771 20 negative_sampler.num_negs_per_pos 34.0 +771 20 training.batch_size 2.0 +771 21 model.embedding_dim 2.0 +771 21 model.relation_dim 1.0 +771 21 optimizer.lr 0.00892024772572672 +771 21 negative_sampler.num_negs_per_pos 81.0 +771 21 training.batch_size 0.0 +771 22 model.embedding_dim 1.0 +771 22 model.relation_dim 2.0 +771 22 optimizer.lr 0.0021187751525043068 +771 22 negative_sampler.num_negs_per_pos 58.0 +771 22 training.batch_size 2.0 +771 23 model.embedding_dim 2.0 +771 23 model.relation_dim 0.0 +771 23 optimizer.lr 0.03163137187559644 +771 23 negative_sampler.num_negs_per_pos 50.0 +771 23 training.batch_size 1.0 +771 24 model.embedding_dim 0.0 +771 24 model.relation_dim 2.0 +771 24 optimizer.lr 0.008697300306311715 +771 24 negative_sampler.num_negs_per_pos 53.0 +771 24 training.batch_size 0.0 +771 25 model.embedding_dim 0.0 +771 25 model.relation_dim 0.0 +771 25 optimizer.lr 0.05325409778527392 +771 25 negative_sampler.num_negs_per_pos 49.0 +771 25 training.batch_size 2.0 +771 26 model.embedding_dim 0.0 +771 26 model.relation_dim 0.0 +771 26 optimizer.lr 0.0025435204921451173 +771 26 negative_sampler.num_negs_per_pos 3.0 +771 26 training.batch_size 2.0 +771 27 model.embedding_dim 1.0 +771 27 model.relation_dim 0.0 +771 27 optimizer.lr 0.01395636320983812 +771 27 negative_sampler.num_negs_per_pos 86.0 +771 27 training.batch_size 1.0 +771 28 model.embedding_dim 1.0 +771 28 model.relation_dim 2.0 +771 28 optimizer.lr 0.0010658945819148541 +771 28 negative_sampler.num_negs_per_pos 65.0 +771 28 training.batch_size 1.0 +771 29 model.embedding_dim 2.0 +771 29 model.relation_dim 0.0 +771 29 optimizer.lr 0.09663471665457168 +771 29 negative_sampler.num_negs_per_pos 83.0 +771 29 training.batch_size 2.0 +771 30 model.embedding_dim 0.0 +771 30 model.relation_dim 0.0 +771 30 optimizer.lr 0.04370881842417911 +771 30 negative_sampler.num_negs_per_pos 99.0 +771 30 training.batch_size 1.0 +771 31 model.embedding_dim 2.0 +771 31 model.relation_dim 2.0 +771 31 optimizer.lr 0.01678769970141795 +771 31 negative_sampler.num_negs_per_pos 79.0 +771 31 training.batch_size 1.0 +771 32 model.embedding_dim 2.0 +771 32 model.relation_dim 0.0 +771 32 optimizer.lr 0.001732283653324968 +771 32 negative_sampler.num_negs_per_pos 57.0 +771 32 training.batch_size 0.0 +771 33 model.embedding_dim 2.0 +771 33 model.relation_dim 0.0 +771 33 optimizer.lr 0.007603769897179958 +771 33 negative_sampler.num_negs_per_pos 14.0 +771 33 training.batch_size 1.0 +771 34 model.embedding_dim 0.0 +771 34 model.relation_dim 2.0 +771 34 optimizer.lr 0.04169810088028495 +771 34 negative_sampler.num_negs_per_pos 26.0 +771 34 training.batch_size 1.0 +771 35 model.embedding_dim 2.0 +771 35 model.relation_dim 2.0 +771 35 optimizer.lr 0.0018710579713894168 +771 35 negative_sampler.num_negs_per_pos 77.0 +771 35 training.batch_size 2.0 +771 36 model.embedding_dim 0.0 +771 36 model.relation_dim 0.0 +771 36 optimizer.lr 0.00176589662674971 +771 36 negative_sampler.num_negs_per_pos 25.0 +771 36 training.batch_size 1.0 +771 37 model.embedding_dim 0.0 +771 37 model.relation_dim 0.0 +771 37 optimizer.lr 0.012802301672634051 +771 37 negative_sampler.num_negs_per_pos 52.0 +771 37 training.batch_size 0.0 +771 38 model.embedding_dim 0.0 +771 38 model.relation_dim 0.0 +771 38 optimizer.lr 0.012618682487572929 +771 38 negative_sampler.num_negs_per_pos 53.0 +771 38 training.batch_size 2.0 +771 39 model.embedding_dim 2.0 +771 39 model.relation_dim 2.0 +771 39 optimizer.lr 0.02206551439292042 +771 39 negative_sampler.num_negs_per_pos 15.0 +771 39 training.batch_size 0.0 +771 40 model.embedding_dim 0.0 +771 40 model.relation_dim 2.0 +771 40 optimizer.lr 0.004852089647938538 +771 40 negative_sampler.num_negs_per_pos 83.0 +771 40 training.batch_size 2.0 +771 41 model.embedding_dim 2.0 +771 41 model.relation_dim 1.0 +771 41 optimizer.lr 0.011450428401319093 +771 41 negative_sampler.num_negs_per_pos 64.0 +771 41 training.batch_size 1.0 +771 42 model.embedding_dim 0.0 +771 42 model.relation_dim 1.0 +771 42 optimizer.lr 0.0024557319237120913 +771 42 negative_sampler.num_negs_per_pos 57.0 +771 42 training.batch_size 0.0 +771 1 dataset """wn18rr""" +771 1 model """transd""" +771 1 loss """bceaftersigmoid""" +771 1 regularizer """no""" +771 1 optimizer """adam""" +771 1 training_loop """owa""" +771 1 negative_sampler """basic""" +771 1 evaluator """rankbased""" +771 2 dataset """wn18rr""" +771 2 model """transd""" +771 2 loss """bceaftersigmoid""" +771 2 regularizer """no""" +771 2 optimizer """adam""" +771 2 training_loop """owa""" +771 2 negative_sampler """basic""" +771 2 evaluator """rankbased""" +771 3 dataset """wn18rr""" +771 3 model """transd""" +771 3 loss """bceaftersigmoid""" +771 3 regularizer """no""" +771 3 optimizer """adam""" +771 3 training_loop """owa""" +771 3 negative_sampler """basic""" +771 3 evaluator """rankbased""" +771 4 dataset """wn18rr""" +771 4 model """transd""" +771 4 loss """bceaftersigmoid""" +771 4 regularizer """no""" +771 4 optimizer """adam""" +771 4 training_loop """owa""" +771 4 negative_sampler """basic""" +771 4 evaluator """rankbased""" +771 5 dataset """wn18rr""" +771 5 model """transd""" +771 5 loss """bceaftersigmoid""" +771 5 regularizer """no""" +771 5 optimizer """adam""" +771 5 training_loop """owa""" +771 5 negative_sampler """basic""" +771 5 evaluator """rankbased""" +771 6 dataset """wn18rr""" +771 6 model """transd""" +771 6 loss """bceaftersigmoid""" +771 6 regularizer """no""" +771 6 optimizer """adam""" +771 6 training_loop """owa""" +771 6 negative_sampler """basic""" +771 6 evaluator """rankbased""" +771 7 dataset """wn18rr""" +771 7 model """transd""" +771 7 loss """bceaftersigmoid""" +771 7 regularizer """no""" +771 7 optimizer """adam""" +771 7 training_loop """owa""" +771 7 negative_sampler """basic""" +771 7 evaluator """rankbased""" +771 8 dataset """wn18rr""" +771 8 model """transd""" +771 8 loss """bceaftersigmoid""" +771 8 regularizer """no""" +771 8 optimizer """adam""" +771 8 training_loop """owa""" +771 8 negative_sampler """basic""" +771 8 evaluator """rankbased""" +771 9 dataset """wn18rr""" +771 9 model """transd""" +771 9 loss """bceaftersigmoid""" +771 9 regularizer """no""" +771 9 optimizer """adam""" +771 9 training_loop """owa""" +771 9 negative_sampler """basic""" +771 9 evaluator """rankbased""" +771 10 dataset """wn18rr""" +771 10 model """transd""" +771 10 loss """bceaftersigmoid""" +771 10 regularizer """no""" +771 10 optimizer """adam""" +771 10 training_loop """owa""" +771 10 negative_sampler """basic""" +771 10 evaluator """rankbased""" +771 11 dataset """wn18rr""" +771 11 model """transd""" +771 11 loss """bceaftersigmoid""" +771 11 regularizer """no""" +771 11 optimizer """adam""" +771 11 training_loop """owa""" +771 11 negative_sampler """basic""" +771 11 evaluator """rankbased""" +771 12 dataset """wn18rr""" +771 12 model """transd""" +771 12 loss """bceaftersigmoid""" +771 12 regularizer """no""" +771 12 optimizer """adam""" +771 12 training_loop """owa""" +771 12 negative_sampler """basic""" +771 12 evaluator """rankbased""" +771 13 dataset """wn18rr""" +771 13 model """transd""" +771 13 loss """bceaftersigmoid""" +771 13 regularizer """no""" +771 13 optimizer """adam""" +771 13 training_loop """owa""" +771 13 negative_sampler """basic""" +771 13 evaluator """rankbased""" +771 14 dataset """wn18rr""" +771 14 model """transd""" +771 14 loss """bceaftersigmoid""" +771 14 regularizer """no""" +771 14 optimizer """adam""" +771 14 training_loop """owa""" +771 14 negative_sampler """basic""" +771 14 evaluator """rankbased""" +771 15 dataset """wn18rr""" +771 15 model """transd""" +771 15 loss """bceaftersigmoid""" +771 15 regularizer """no""" +771 15 optimizer """adam""" +771 15 training_loop """owa""" +771 15 negative_sampler """basic""" +771 15 evaluator """rankbased""" +771 16 dataset """wn18rr""" +771 16 model """transd""" +771 16 loss """bceaftersigmoid""" +771 16 regularizer """no""" +771 16 optimizer """adam""" +771 16 training_loop """owa""" +771 16 negative_sampler """basic""" +771 16 evaluator """rankbased""" +771 17 dataset """wn18rr""" +771 17 model """transd""" +771 17 loss """bceaftersigmoid""" +771 17 regularizer """no""" +771 17 optimizer """adam""" +771 17 training_loop """owa""" +771 17 negative_sampler """basic""" +771 17 evaluator """rankbased""" +771 18 dataset """wn18rr""" +771 18 model """transd""" +771 18 loss """bceaftersigmoid""" +771 18 regularizer """no""" +771 18 optimizer """adam""" +771 18 training_loop """owa""" +771 18 negative_sampler """basic""" +771 18 evaluator """rankbased""" +771 19 dataset """wn18rr""" +771 19 model """transd""" +771 19 loss """bceaftersigmoid""" +771 19 regularizer """no""" +771 19 optimizer """adam""" +771 19 training_loop """owa""" +771 19 negative_sampler """basic""" +771 19 evaluator """rankbased""" +771 20 dataset """wn18rr""" +771 20 model """transd""" +771 20 loss """bceaftersigmoid""" +771 20 regularizer """no""" +771 20 optimizer """adam""" +771 20 training_loop """owa""" +771 20 negative_sampler """basic""" +771 20 evaluator """rankbased""" +771 21 dataset """wn18rr""" +771 21 model """transd""" +771 21 loss """bceaftersigmoid""" +771 21 regularizer """no""" +771 21 optimizer """adam""" +771 21 training_loop """owa""" +771 21 negative_sampler """basic""" +771 21 evaluator """rankbased""" +771 22 dataset """wn18rr""" +771 22 model """transd""" +771 22 loss """bceaftersigmoid""" +771 22 regularizer """no""" +771 22 optimizer """adam""" +771 22 training_loop """owa""" +771 22 negative_sampler """basic""" +771 22 evaluator """rankbased""" +771 23 dataset """wn18rr""" +771 23 model """transd""" +771 23 loss """bceaftersigmoid""" +771 23 regularizer """no""" +771 23 optimizer """adam""" +771 23 training_loop """owa""" +771 23 negative_sampler """basic""" +771 23 evaluator """rankbased""" +771 24 dataset """wn18rr""" +771 24 model """transd""" +771 24 loss """bceaftersigmoid""" +771 24 regularizer """no""" +771 24 optimizer """adam""" +771 24 training_loop """owa""" +771 24 negative_sampler """basic""" +771 24 evaluator """rankbased""" +771 25 dataset """wn18rr""" +771 25 model """transd""" +771 25 loss """bceaftersigmoid""" +771 25 regularizer """no""" +771 25 optimizer """adam""" +771 25 training_loop """owa""" +771 25 negative_sampler """basic""" +771 25 evaluator """rankbased""" +771 26 dataset """wn18rr""" +771 26 model """transd""" +771 26 loss """bceaftersigmoid""" +771 26 regularizer """no""" +771 26 optimizer """adam""" +771 26 training_loop """owa""" +771 26 negative_sampler """basic""" +771 26 evaluator """rankbased""" +771 27 dataset """wn18rr""" +771 27 model """transd""" +771 27 loss """bceaftersigmoid""" +771 27 regularizer """no""" +771 27 optimizer """adam""" +771 27 training_loop """owa""" +771 27 negative_sampler """basic""" +771 27 evaluator """rankbased""" +771 28 dataset """wn18rr""" +771 28 model """transd""" +771 28 loss """bceaftersigmoid""" +771 28 regularizer """no""" +771 28 optimizer """adam""" +771 28 training_loop """owa""" +771 28 negative_sampler """basic""" +771 28 evaluator """rankbased""" +771 29 dataset """wn18rr""" +771 29 model """transd""" +771 29 loss """bceaftersigmoid""" +771 29 regularizer """no""" +771 29 optimizer """adam""" +771 29 training_loop """owa""" +771 29 negative_sampler """basic""" +771 29 evaluator """rankbased""" +771 30 dataset """wn18rr""" +771 30 model """transd""" +771 30 loss """bceaftersigmoid""" +771 30 regularizer """no""" +771 30 optimizer """adam""" +771 30 training_loop """owa""" +771 30 negative_sampler """basic""" +771 30 evaluator """rankbased""" +771 31 dataset """wn18rr""" +771 31 model """transd""" +771 31 loss """bceaftersigmoid""" +771 31 regularizer """no""" +771 31 optimizer """adam""" +771 31 training_loop """owa""" +771 31 negative_sampler """basic""" +771 31 evaluator """rankbased""" +771 32 dataset """wn18rr""" +771 32 model """transd""" +771 32 loss """bceaftersigmoid""" +771 32 regularizer """no""" +771 32 optimizer """adam""" +771 32 training_loop """owa""" +771 32 negative_sampler """basic""" +771 32 evaluator """rankbased""" +771 33 dataset """wn18rr""" +771 33 model """transd""" +771 33 loss """bceaftersigmoid""" +771 33 regularizer """no""" +771 33 optimizer """adam""" +771 33 training_loop """owa""" +771 33 negative_sampler """basic""" +771 33 evaluator """rankbased""" +771 34 dataset """wn18rr""" +771 34 model """transd""" +771 34 loss """bceaftersigmoid""" +771 34 regularizer """no""" +771 34 optimizer """adam""" +771 34 training_loop """owa""" +771 34 negative_sampler """basic""" +771 34 evaluator """rankbased""" +771 35 dataset """wn18rr""" +771 35 model """transd""" +771 35 loss """bceaftersigmoid""" +771 35 regularizer """no""" +771 35 optimizer """adam""" +771 35 training_loop """owa""" +771 35 negative_sampler """basic""" +771 35 evaluator """rankbased""" +771 36 dataset """wn18rr""" +771 36 model """transd""" +771 36 loss """bceaftersigmoid""" +771 36 regularizer """no""" +771 36 optimizer """adam""" +771 36 training_loop """owa""" +771 36 negative_sampler """basic""" +771 36 evaluator """rankbased""" +771 37 dataset """wn18rr""" +771 37 model """transd""" +771 37 loss """bceaftersigmoid""" +771 37 regularizer """no""" +771 37 optimizer """adam""" +771 37 training_loop """owa""" +771 37 negative_sampler """basic""" +771 37 evaluator """rankbased""" +771 38 dataset """wn18rr""" +771 38 model """transd""" +771 38 loss """bceaftersigmoid""" +771 38 regularizer """no""" +771 38 optimizer """adam""" +771 38 training_loop """owa""" +771 38 negative_sampler """basic""" +771 38 evaluator """rankbased""" +771 39 dataset """wn18rr""" +771 39 model """transd""" +771 39 loss """bceaftersigmoid""" +771 39 regularizer """no""" +771 39 optimizer """adam""" +771 39 training_loop """owa""" +771 39 negative_sampler """basic""" +771 39 evaluator """rankbased""" +771 40 dataset """wn18rr""" +771 40 model """transd""" +771 40 loss """bceaftersigmoid""" +771 40 regularizer """no""" +771 40 optimizer """adam""" +771 40 training_loop """owa""" +771 40 negative_sampler """basic""" +771 40 evaluator """rankbased""" +771 41 dataset """wn18rr""" +771 41 model """transd""" +771 41 loss """bceaftersigmoid""" +771 41 regularizer """no""" +771 41 optimizer """adam""" +771 41 training_loop """owa""" +771 41 negative_sampler """basic""" +771 41 evaluator """rankbased""" +771 42 dataset """wn18rr""" +771 42 model """transd""" +771 42 loss """bceaftersigmoid""" +771 42 regularizer """no""" +771 42 optimizer """adam""" +771 42 training_loop """owa""" +771 42 negative_sampler """basic""" +771 42 evaluator """rankbased""" +772 1 model.embedding_dim 2.0 +772 1 model.relation_dim 1.0 +772 1 optimizer.lr 0.04280671872271012 +772 1 negative_sampler.num_negs_per_pos 91.0 +772 1 training.batch_size 0.0 +772 2 model.embedding_dim 1.0 +772 2 model.relation_dim 1.0 +772 2 optimizer.lr 0.024479048896208794 +772 2 negative_sampler.num_negs_per_pos 84.0 +772 2 training.batch_size 1.0 +772 3 model.embedding_dim 1.0 +772 3 model.relation_dim 0.0 +772 3 optimizer.lr 0.009323827230221963 +772 3 negative_sampler.num_negs_per_pos 53.0 +772 3 training.batch_size 2.0 +772 4 model.embedding_dim 0.0 +772 4 model.relation_dim 0.0 +772 4 optimizer.lr 0.012360551771090127 +772 4 negative_sampler.num_negs_per_pos 7.0 +772 4 training.batch_size 1.0 +772 5 model.embedding_dim 1.0 +772 5 model.relation_dim 0.0 +772 5 optimizer.lr 0.004639963985390875 +772 5 negative_sampler.num_negs_per_pos 82.0 +772 5 training.batch_size 2.0 +772 6 model.embedding_dim 0.0 +772 6 model.relation_dim 0.0 +772 6 optimizer.lr 0.009287153973231975 +772 6 negative_sampler.num_negs_per_pos 33.0 +772 6 training.batch_size 2.0 +772 7 model.embedding_dim 0.0 +772 7 model.relation_dim 1.0 +772 7 optimizer.lr 0.0039948844406318944 +772 7 negative_sampler.num_negs_per_pos 25.0 +772 7 training.batch_size 0.0 +772 8 model.embedding_dim 2.0 +772 8 model.relation_dim 0.0 +772 8 optimizer.lr 0.016813197760621474 +772 8 negative_sampler.num_negs_per_pos 50.0 +772 8 training.batch_size 0.0 +772 9 model.embedding_dim 1.0 +772 9 model.relation_dim 2.0 +772 9 optimizer.lr 0.02320177116799741 +772 9 negative_sampler.num_negs_per_pos 62.0 +772 9 training.batch_size 2.0 +772 10 model.embedding_dim 1.0 +772 10 model.relation_dim 2.0 +772 10 optimizer.lr 0.004519629522641718 +772 10 negative_sampler.num_negs_per_pos 97.0 +772 10 training.batch_size 2.0 +772 11 model.embedding_dim 1.0 +772 11 model.relation_dim 1.0 +772 11 optimizer.lr 0.002240160063954891 +772 11 negative_sampler.num_negs_per_pos 11.0 +772 11 training.batch_size 1.0 +772 12 model.embedding_dim 1.0 +772 12 model.relation_dim 2.0 +772 12 optimizer.lr 0.004146397196148842 +772 12 negative_sampler.num_negs_per_pos 98.0 +772 12 training.batch_size 0.0 +772 13 model.embedding_dim 2.0 +772 13 model.relation_dim 2.0 +772 13 optimizer.lr 0.0025850601903308137 +772 13 negative_sampler.num_negs_per_pos 19.0 +772 13 training.batch_size 0.0 +772 14 model.embedding_dim 1.0 +772 14 model.relation_dim 1.0 +772 14 optimizer.lr 0.020133985847842158 +772 14 negative_sampler.num_negs_per_pos 79.0 +772 14 training.batch_size 1.0 +772 15 model.embedding_dim 0.0 +772 15 model.relation_dim 1.0 +772 15 optimizer.lr 0.03340573242457493 +772 15 negative_sampler.num_negs_per_pos 53.0 +772 15 training.batch_size 0.0 +772 16 model.embedding_dim 2.0 +772 16 model.relation_dim 1.0 +772 16 optimizer.lr 0.027041881667882045 +772 16 negative_sampler.num_negs_per_pos 85.0 +772 16 training.batch_size 2.0 +772 17 model.embedding_dim 0.0 +772 17 model.relation_dim 2.0 +772 17 optimizer.lr 0.013348460689996722 +772 17 negative_sampler.num_negs_per_pos 32.0 +772 17 training.batch_size 0.0 +772 18 model.embedding_dim 0.0 +772 18 model.relation_dim 2.0 +772 18 optimizer.lr 0.012611422643352865 +772 18 negative_sampler.num_negs_per_pos 40.0 +772 18 training.batch_size 2.0 +772 19 model.embedding_dim 2.0 +772 19 model.relation_dim 0.0 +772 19 optimizer.lr 0.0026423624001111505 +772 19 negative_sampler.num_negs_per_pos 94.0 +772 19 training.batch_size 1.0 +772 20 model.embedding_dim 0.0 +772 20 model.relation_dim 1.0 +772 20 optimizer.lr 0.05877943218474118 +772 20 negative_sampler.num_negs_per_pos 48.0 +772 20 training.batch_size 1.0 +772 21 model.embedding_dim 1.0 +772 21 model.relation_dim 0.0 +772 21 optimizer.lr 0.006903979374387103 +772 21 negative_sampler.num_negs_per_pos 58.0 +772 21 training.batch_size 2.0 +772 22 model.embedding_dim 1.0 +772 22 model.relation_dim 1.0 +772 22 optimizer.lr 0.007877614615777696 +772 22 negative_sampler.num_negs_per_pos 93.0 +772 22 training.batch_size 2.0 +772 23 model.embedding_dim 1.0 +772 23 model.relation_dim 1.0 +772 23 optimizer.lr 0.0011359319070484014 +772 23 negative_sampler.num_negs_per_pos 11.0 +772 23 training.batch_size 1.0 +772 24 model.embedding_dim 0.0 +772 24 model.relation_dim 0.0 +772 24 optimizer.lr 0.0029337775428723003 +772 24 negative_sampler.num_negs_per_pos 12.0 +772 24 training.batch_size 0.0 +772 25 model.embedding_dim 0.0 +772 25 model.relation_dim 2.0 +772 25 optimizer.lr 0.040725054193825966 +772 25 negative_sampler.num_negs_per_pos 4.0 +772 25 training.batch_size 0.0 +772 26 model.embedding_dim 0.0 +772 26 model.relation_dim 2.0 +772 26 optimizer.lr 0.008370753201356535 +772 26 negative_sampler.num_negs_per_pos 76.0 +772 26 training.batch_size 2.0 +772 27 model.embedding_dim 2.0 +772 27 model.relation_dim 2.0 +772 27 optimizer.lr 0.001562116595050147 +772 27 negative_sampler.num_negs_per_pos 82.0 +772 27 training.batch_size 2.0 +772 28 model.embedding_dim 2.0 +772 28 model.relation_dim 2.0 +772 28 optimizer.lr 0.01126075375469074 +772 28 negative_sampler.num_negs_per_pos 96.0 +772 28 training.batch_size 1.0 +772 29 model.embedding_dim 2.0 +772 29 model.relation_dim 2.0 +772 29 optimizer.lr 0.032894433635461136 +772 29 negative_sampler.num_negs_per_pos 13.0 +772 29 training.batch_size 1.0 +772 30 model.embedding_dim 0.0 +772 30 model.relation_dim 1.0 +772 30 optimizer.lr 0.023104255077789355 +772 30 negative_sampler.num_negs_per_pos 13.0 +772 30 training.batch_size 0.0 +772 31 model.embedding_dim 0.0 +772 31 model.relation_dim 1.0 +772 31 optimizer.lr 0.008363552794550031 +772 31 negative_sampler.num_negs_per_pos 55.0 +772 31 training.batch_size 1.0 +772 32 model.embedding_dim 0.0 +772 32 model.relation_dim 2.0 +772 32 optimizer.lr 0.004746489446328297 +772 32 negative_sampler.num_negs_per_pos 66.0 +772 32 training.batch_size 1.0 +772 33 model.embedding_dim 0.0 +772 33 model.relation_dim 2.0 +772 33 optimizer.lr 0.046414935388645454 +772 33 negative_sampler.num_negs_per_pos 92.0 +772 33 training.batch_size 1.0 +772 34 model.embedding_dim 1.0 +772 34 model.relation_dim 1.0 +772 34 optimizer.lr 0.011752880262947315 +772 34 negative_sampler.num_negs_per_pos 98.0 +772 34 training.batch_size 1.0 +772 35 model.embedding_dim 1.0 +772 35 model.relation_dim 1.0 +772 35 optimizer.lr 0.007114833988984465 +772 35 negative_sampler.num_negs_per_pos 39.0 +772 35 training.batch_size 0.0 +772 36 model.embedding_dim 1.0 +772 36 model.relation_dim 0.0 +772 36 optimizer.lr 0.004000518736645208 +772 36 negative_sampler.num_negs_per_pos 68.0 +772 36 training.batch_size 1.0 +772 37 model.embedding_dim 0.0 +772 37 model.relation_dim 1.0 +772 37 optimizer.lr 0.05437876375082197 +772 37 negative_sampler.num_negs_per_pos 85.0 +772 37 training.batch_size 1.0 +772 38 model.embedding_dim 0.0 +772 38 model.relation_dim 0.0 +772 38 optimizer.lr 0.0338758535357814 +772 38 negative_sampler.num_negs_per_pos 4.0 +772 38 training.batch_size 0.0 +772 39 model.embedding_dim 2.0 +772 39 model.relation_dim 1.0 +772 39 optimizer.lr 0.007286851757977181 +772 39 negative_sampler.num_negs_per_pos 38.0 +772 39 training.batch_size 0.0 +772 40 model.embedding_dim 1.0 +772 40 model.relation_dim 2.0 +772 40 optimizer.lr 0.04077507180517174 +772 40 negative_sampler.num_negs_per_pos 92.0 +772 40 training.batch_size 2.0 +772 41 model.embedding_dim 0.0 +772 41 model.relation_dim 2.0 +772 41 optimizer.lr 0.05834489979861209 +772 41 negative_sampler.num_negs_per_pos 49.0 +772 41 training.batch_size 2.0 +772 42 model.embedding_dim 2.0 +772 42 model.relation_dim 1.0 +772 42 optimizer.lr 0.007680784278185355 +772 42 negative_sampler.num_negs_per_pos 75.0 +772 42 training.batch_size 0.0 +772 1 dataset """wn18rr""" +772 1 model """transd""" +772 1 loss """softplus""" +772 1 regularizer """no""" +772 1 optimizer """adam""" +772 1 training_loop """owa""" +772 1 negative_sampler """basic""" +772 1 evaluator """rankbased""" +772 2 dataset """wn18rr""" +772 2 model """transd""" +772 2 loss """softplus""" +772 2 regularizer """no""" +772 2 optimizer """adam""" +772 2 training_loop """owa""" +772 2 negative_sampler """basic""" +772 2 evaluator """rankbased""" +772 3 dataset """wn18rr""" +772 3 model """transd""" +772 3 loss """softplus""" +772 3 regularizer """no""" +772 3 optimizer """adam""" +772 3 training_loop """owa""" +772 3 negative_sampler """basic""" +772 3 evaluator """rankbased""" +772 4 dataset """wn18rr""" +772 4 model """transd""" +772 4 loss """softplus""" +772 4 regularizer """no""" +772 4 optimizer """adam""" +772 4 training_loop """owa""" +772 4 negative_sampler """basic""" +772 4 evaluator """rankbased""" +772 5 dataset """wn18rr""" +772 5 model """transd""" +772 5 loss """softplus""" +772 5 regularizer """no""" +772 5 optimizer """adam""" +772 5 training_loop """owa""" +772 5 negative_sampler """basic""" +772 5 evaluator """rankbased""" +772 6 dataset """wn18rr""" +772 6 model """transd""" +772 6 loss """softplus""" +772 6 regularizer """no""" +772 6 optimizer """adam""" +772 6 training_loop """owa""" +772 6 negative_sampler """basic""" +772 6 evaluator """rankbased""" +772 7 dataset """wn18rr""" +772 7 model """transd""" +772 7 loss """softplus""" +772 7 regularizer """no""" +772 7 optimizer """adam""" +772 7 training_loop """owa""" +772 7 negative_sampler """basic""" +772 7 evaluator """rankbased""" +772 8 dataset """wn18rr""" +772 8 model """transd""" +772 8 loss """softplus""" +772 8 regularizer """no""" +772 8 optimizer """adam""" +772 8 training_loop """owa""" +772 8 negative_sampler """basic""" +772 8 evaluator """rankbased""" +772 9 dataset """wn18rr""" +772 9 model """transd""" +772 9 loss """softplus""" +772 9 regularizer """no""" +772 9 optimizer """adam""" +772 9 training_loop """owa""" +772 9 negative_sampler """basic""" +772 9 evaluator """rankbased""" +772 10 dataset """wn18rr""" +772 10 model """transd""" +772 10 loss """softplus""" +772 10 regularizer """no""" +772 10 optimizer """adam""" +772 10 training_loop """owa""" +772 10 negative_sampler """basic""" +772 10 evaluator """rankbased""" +772 11 dataset """wn18rr""" +772 11 model """transd""" +772 11 loss """softplus""" +772 11 regularizer """no""" +772 11 optimizer """adam""" +772 11 training_loop """owa""" +772 11 negative_sampler """basic""" +772 11 evaluator """rankbased""" +772 12 dataset """wn18rr""" +772 12 model """transd""" +772 12 loss """softplus""" +772 12 regularizer """no""" +772 12 optimizer """adam""" +772 12 training_loop """owa""" +772 12 negative_sampler """basic""" +772 12 evaluator """rankbased""" +772 13 dataset """wn18rr""" +772 13 model """transd""" +772 13 loss """softplus""" +772 13 regularizer """no""" +772 13 optimizer """adam""" +772 13 training_loop """owa""" +772 13 negative_sampler """basic""" +772 13 evaluator """rankbased""" +772 14 dataset """wn18rr""" +772 14 model """transd""" +772 14 loss """softplus""" +772 14 regularizer """no""" +772 14 optimizer """adam""" +772 14 training_loop """owa""" +772 14 negative_sampler """basic""" +772 14 evaluator """rankbased""" +772 15 dataset """wn18rr""" +772 15 model """transd""" +772 15 loss """softplus""" +772 15 regularizer """no""" +772 15 optimizer """adam""" +772 15 training_loop """owa""" +772 15 negative_sampler """basic""" +772 15 evaluator """rankbased""" +772 16 dataset """wn18rr""" +772 16 model """transd""" +772 16 loss """softplus""" +772 16 regularizer """no""" +772 16 optimizer """adam""" +772 16 training_loop """owa""" +772 16 negative_sampler """basic""" +772 16 evaluator """rankbased""" +772 17 dataset """wn18rr""" +772 17 model """transd""" +772 17 loss """softplus""" +772 17 regularizer """no""" +772 17 optimizer """adam""" +772 17 training_loop """owa""" +772 17 negative_sampler """basic""" +772 17 evaluator """rankbased""" +772 18 dataset """wn18rr""" +772 18 model """transd""" +772 18 loss """softplus""" +772 18 regularizer """no""" +772 18 optimizer """adam""" +772 18 training_loop """owa""" +772 18 negative_sampler """basic""" +772 18 evaluator """rankbased""" +772 19 dataset """wn18rr""" +772 19 model """transd""" +772 19 loss """softplus""" +772 19 regularizer """no""" +772 19 optimizer """adam""" +772 19 training_loop """owa""" +772 19 negative_sampler """basic""" +772 19 evaluator """rankbased""" +772 20 dataset """wn18rr""" +772 20 model """transd""" +772 20 loss """softplus""" +772 20 regularizer """no""" +772 20 optimizer """adam""" +772 20 training_loop """owa""" +772 20 negative_sampler """basic""" +772 20 evaluator """rankbased""" +772 21 dataset """wn18rr""" +772 21 model """transd""" +772 21 loss """softplus""" +772 21 regularizer """no""" +772 21 optimizer """adam""" +772 21 training_loop """owa""" +772 21 negative_sampler """basic""" +772 21 evaluator """rankbased""" +772 22 dataset """wn18rr""" +772 22 model """transd""" +772 22 loss """softplus""" +772 22 regularizer """no""" +772 22 optimizer """adam""" +772 22 training_loop """owa""" +772 22 negative_sampler """basic""" +772 22 evaluator """rankbased""" +772 23 dataset """wn18rr""" +772 23 model """transd""" +772 23 loss """softplus""" +772 23 regularizer """no""" +772 23 optimizer """adam""" +772 23 training_loop """owa""" +772 23 negative_sampler """basic""" +772 23 evaluator """rankbased""" +772 24 dataset """wn18rr""" +772 24 model """transd""" +772 24 loss """softplus""" +772 24 regularizer """no""" +772 24 optimizer """adam""" +772 24 training_loop """owa""" +772 24 negative_sampler """basic""" +772 24 evaluator """rankbased""" +772 25 dataset """wn18rr""" +772 25 model """transd""" +772 25 loss """softplus""" +772 25 regularizer """no""" +772 25 optimizer """adam""" +772 25 training_loop """owa""" +772 25 negative_sampler """basic""" +772 25 evaluator """rankbased""" +772 26 dataset """wn18rr""" +772 26 model """transd""" +772 26 loss """softplus""" +772 26 regularizer """no""" +772 26 optimizer """adam""" +772 26 training_loop """owa""" +772 26 negative_sampler """basic""" +772 26 evaluator """rankbased""" +772 27 dataset """wn18rr""" +772 27 model """transd""" +772 27 loss """softplus""" +772 27 regularizer """no""" +772 27 optimizer """adam""" +772 27 training_loop """owa""" +772 27 negative_sampler """basic""" +772 27 evaluator """rankbased""" +772 28 dataset """wn18rr""" +772 28 model """transd""" +772 28 loss """softplus""" +772 28 regularizer """no""" +772 28 optimizer """adam""" +772 28 training_loop """owa""" +772 28 negative_sampler """basic""" +772 28 evaluator """rankbased""" +772 29 dataset """wn18rr""" +772 29 model """transd""" +772 29 loss """softplus""" +772 29 regularizer """no""" +772 29 optimizer """adam""" +772 29 training_loop """owa""" +772 29 negative_sampler """basic""" +772 29 evaluator """rankbased""" +772 30 dataset """wn18rr""" +772 30 model """transd""" +772 30 loss """softplus""" +772 30 regularizer """no""" +772 30 optimizer """adam""" +772 30 training_loop """owa""" +772 30 negative_sampler """basic""" +772 30 evaluator """rankbased""" +772 31 dataset """wn18rr""" +772 31 model """transd""" +772 31 loss """softplus""" +772 31 regularizer """no""" +772 31 optimizer """adam""" +772 31 training_loop """owa""" +772 31 negative_sampler """basic""" +772 31 evaluator """rankbased""" +772 32 dataset """wn18rr""" +772 32 model """transd""" +772 32 loss """softplus""" +772 32 regularizer """no""" +772 32 optimizer """adam""" +772 32 training_loop """owa""" +772 32 negative_sampler """basic""" +772 32 evaluator """rankbased""" +772 33 dataset """wn18rr""" +772 33 model """transd""" +772 33 loss """softplus""" +772 33 regularizer """no""" +772 33 optimizer """adam""" +772 33 training_loop """owa""" +772 33 negative_sampler """basic""" +772 33 evaluator """rankbased""" +772 34 dataset """wn18rr""" +772 34 model """transd""" +772 34 loss """softplus""" +772 34 regularizer """no""" +772 34 optimizer """adam""" +772 34 training_loop """owa""" +772 34 negative_sampler """basic""" +772 34 evaluator """rankbased""" +772 35 dataset """wn18rr""" +772 35 model """transd""" +772 35 loss """softplus""" +772 35 regularizer """no""" +772 35 optimizer """adam""" +772 35 training_loop """owa""" +772 35 negative_sampler """basic""" +772 35 evaluator """rankbased""" +772 36 dataset """wn18rr""" +772 36 model """transd""" +772 36 loss """softplus""" +772 36 regularizer """no""" +772 36 optimizer """adam""" +772 36 training_loop """owa""" +772 36 negative_sampler """basic""" +772 36 evaluator """rankbased""" +772 37 dataset """wn18rr""" +772 37 model """transd""" +772 37 loss """softplus""" +772 37 regularizer """no""" +772 37 optimizer """adam""" +772 37 training_loop """owa""" +772 37 negative_sampler """basic""" +772 37 evaluator """rankbased""" +772 38 dataset """wn18rr""" +772 38 model """transd""" +772 38 loss """softplus""" +772 38 regularizer """no""" +772 38 optimizer """adam""" +772 38 training_loop """owa""" +772 38 negative_sampler """basic""" +772 38 evaluator """rankbased""" +772 39 dataset """wn18rr""" +772 39 model """transd""" +772 39 loss """softplus""" +772 39 regularizer """no""" +772 39 optimizer """adam""" +772 39 training_loop """owa""" +772 39 negative_sampler """basic""" +772 39 evaluator """rankbased""" +772 40 dataset """wn18rr""" +772 40 model """transd""" +772 40 loss """softplus""" +772 40 regularizer """no""" +772 40 optimizer """adam""" +772 40 training_loop """owa""" +772 40 negative_sampler """basic""" +772 40 evaluator """rankbased""" +772 41 dataset """wn18rr""" +772 41 model """transd""" +772 41 loss """softplus""" +772 41 regularizer """no""" +772 41 optimizer """adam""" +772 41 training_loop """owa""" +772 41 negative_sampler """basic""" +772 41 evaluator """rankbased""" +772 42 dataset """wn18rr""" +772 42 model """transd""" +772 42 loss """softplus""" +772 42 regularizer """no""" +772 42 optimizer """adam""" +772 42 training_loop """owa""" +772 42 negative_sampler """basic""" +772 42 evaluator """rankbased""" +773 1 model.embedding_dim 0.0 +773 1 model.relation_dim 1.0 +773 1 optimizer.lr 0.09296836316841871 +773 1 negative_sampler.num_negs_per_pos 99.0 +773 1 training.batch_size 2.0 +773 2 model.embedding_dim 0.0 +773 2 model.relation_dim 2.0 +773 2 optimizer.lr 0.06226571643304153 +773 2 negative_sampler.num_negs_per_pos 89.0 +773 2 training.batch_size 0.0 +773 3 model.embedding_dim 2.0 +773 3 model.relation_dim 1.0 +773 3 optimizer.lr 0.007160107359938472 +773 3 negative_sampler.num_negs_per_pos 40.0 +773 3 training.batch_size 1.0 +773 4 model.embedding_dim 1.0 +773 4 model.relation_dim 1.0 +773 4 optimizer.lr 0.0035418120071094273 +773 4 negative_sampler.num_negs_per_pos 11.0 +773 4 training.batch_size 1.0 +773 5 model.embedding_dim 0.0 +773 5 model.relation_dim 1.0 +773 5 optimizer.lr 0.06340152715617366 +773 5 negative_sampler.num_negs_per_pos 59.0 +773 5 training.batch_size 2.0 +773 6 model.embedding_dim 2.0 +773 6 model.relation_dim 1.0 +773 6 optimizer.lr 0.051533966969139394 +773 6 negative_sampler.num_negs_per_pos 10.0 +773 6 training.batch_size 0.0 +773 7 model.embedding_dim 0.0 +773 7 model.relation_dim 1.0 +773 7 optimizer.lr 0.041354136548076785 +773 7 negative_sampler.num_negs_per_pos 92.0 +773 7 training.batch_size 2.0 +773 8 model.embedding_dim 0.0 +773 8 model.relation_dim 2.0 +773 8 optimizer.lr 0.0030299993859120196 +773 8 negative_sampler.num_negs_per_pos 7.0 +773 8 training.batch_size 1.0 +773 9 model.embedding_dim 1.0 +773 9 model.relation_dim 0.0 +773 9 optimizer.lr 0.0018277351218190592 +773 9 negative_sampler.num_negs_per_pos 56.0 +773 9 training.batch_size 2.0 +773 10 model.embedding_dim 2.0 +773 10 model.relation_dim 1.0 +773 10 optimizer.lr 0.007215436576855939 +773 10 negative_sampler.num_negs_per_pos 45.0 +773 10 training.batch_size 0.0 +773 11 model.embedding_dim 1.0 +773 11 model.relation_dim 1.0 +773 11 optimizer.lr 0.01436766345620852 +773 11 negative_sampler.num_negs_per_pos 57.0 +773 11 training.batch_size 1.0 +773 12 model.embedding_dim 0.0 +773 12 model.relation_dim 2.0 +773 12 optimizer.lr 0.007414507086377709 +773 12 negative_sampler.num_negs_per_pos 68.0 +773 12 training.batch_size 1.0 +773 13 model.embedding_dim 1.0 +773 13 model.relation_dim 0.0 +773 13 optimizer.lr 0.06908131790548716 +773 13 negative_sampler.num_negs_per_pos 88.0 +773 13 training.batch_size 0.0 +773 14 model.embedding_dim 1.0 +773 14 model.relation_dim 0.0 +773 14 optimizer.lr 0.011149362389134334 +773 14 negative_sampler.num_negs_per_pos 99.0 +773 14 training.batch_size 2.0 +773 15 model.embedding_dim 1.0 +773 15 model.relation_dim 1.0 +773 15 optimizer.lr 0.07994066696554906 +773 15 negative_sampler.num_negs_per_pos 48.0 +773 15 training.batch_size 2.0 +773 16 model.embedding_dim 0.0 +773 16 model.relation_dim 0.0 +773 16 optimizer.lr 0.06661572745184849 +773 16 negative_sampler.num_negs_per_pos 83.0 +773 16 training.batch_size 1.0 +773 17 model.embedding_dim 0.0 +773 17 model.relation_dim 0.0 +773 17 optimizer.lr 0.03577466150522248 +773 17 negative_sampler.num_negs_per_pos 63.0 +773 17 training.batch_size 0.0 +773 18 model.embedding_dim 0.0 +773 18 model.relation_dim 0.0 +773 18 optimizer.lr 0.0029347680514882784 +773 18 negative_sampler.num_negs_per_pos 32.0 +773 18 training.batch_size 2.0 +773 19 model.embedding_dim 2.0 +773 19 model.relation_dim 1.0 +773 19 optimizer.lr 0.0610957265906764 +773 19 negative_sampler.num_negs_per_pos 84.0 +773 19 training.batch_size 0.0 +773 20 model.embedding_dim 1.0 +773 20 model.relation_dim 1.0 +773 20 optimizer.lr 0.001113600231761952 +773 20 negative_sampler.num_negs_per_pos 64.0 +773 20 training.batch_size 0.0 +773 21 model.embedding_dim 2.0 +773 21 model.relation_dim 2.0 +773 21 optimizer.lr 0.030173398208408874 +773 21 negative_sampler.num_negs_per_pos 22.0 +773 21 training.batch_size 2.0 +773 22 model.embedding_dim 2.0 +773 22 model.relation_dim 1.0 +773 22 optimizer.lr 0.0026413165269281455 +773 22 negative_sampler.num_negs_per_pos 74.0 +773 22 training.batch_size 0.0 +773 23 model.embedding_dim 1.0 +773 23 model.relation_dim 2.0 +773 23 optimizer.lr 0.0727932661599707 +773 23 negative_sampler.num_negs_per_pos 8.0 +773 23 training.batch_size 2.0 +773 24 model.embedding_dim 1.0 +773 24 model.relation_dim 1.0 +773 24 optimizer.lr 0.013833330225467257 +773 24 negative_sampler.num_negs_per_pos 53.0 +773 24 training.batch_size 1.0 +773 25 model.embedding_dim 1.0 +773 25 model.relation_dim 0.0 +773 25 optimizer.lr 0.087552083018603 +773 25 negative_sampler.num_negs_per_pos 48.0 +773 25 training.batch_size 2.0 +773 26 model.embedding_dim 1.0 +773 26 model.relation_dim 2.0 +773 26 optimizer.lr 0.03171777867806622 +773 26 negative_sampler.num_negs_per_pos 29.0 +773 26 training.batch_size 1.0 +773 27 model.embedding_dim 0.0 +773 27 model.relation_dim 1.0 +773 27 optimizer.lr 0.07978319492844207 +773 27 negative_sampler.num_negs_per_pos 99.0 +773 27 training.batch_size 1.0 +773 28 model.embedding_dim 0.0 +773 28 model.relation_dim 2.0 +773 28 optimizer.lr 0.0017853749010677217 +773 28 negative_sampler.num_negs_per_pos 64.0 +773 28 training.batch_size 1.0 +773 29 model.embedding_dim 2.0 +773 29 model.relation_dim 0.0 +773 29 optimizer.lr 0.013520321322635176 +773 29 negative_sampler.num_negs_per_pos 88.0 +773 29 training.batch_size 0.0 +773 30 model.embedding_dim 2.0 +773 30 model.relation_dim 2.0 +773 30 optimizer.lr 0.004113776615350135 +773 30 negative_sampler.num_negs_per_pos 35.0 +773 30 training.batch_size 0.0 +773 31 model.embedding_dim 1.0 +773 31 model.relation_dim 2.0 +773 31 optimizer.lr 0.0313967853566441 +773 31 negative_sampler.num_negs_per_pos 81.0 +773 31 training.batch_size 0.0 +773 32 model.embedding_dim 0.0 +773 32 model.relation_dim 1.0 +773 32 optimizer.lr 0.0478159231569039 +773 32 negative_sampler.num_negs_per_pos 11.0 +773 32 training.batch_size 1.0 +773 33 model.embedding_dim 2.0 +773 33 model.relation_dim 0.0 +773 33 optimizer.lr 0.006960591534277521 +773 33 negative_sampler.num_negs_per_pos 14.0 +773 33 training.batch_size 2.0 +773 34 model.embedding_dim 2.0 +773 34 model.relation_dim 0.0 +773 34 optimizer.lr 0.0028429469404486632 +773 34 negative_sampler.num_negs_per_pos 92.0 +773 34 training.batch_size 1.0 +773 35 model.embedding_dim 1.0 +773 35 model.relation_dim 1.0 +773 35 optimizer.lr 0.0060317346785729985 +773 35 negative_sampler.num_negs_per_pos 16.0 +773 35 training.batch_size 2.0 +773 36 model.embedding_dim 2.0 +773 36 model.relation_dim 2.0 +773 36 optimizer.lr 0.002718972691866723 +773 36 negative_sampler.num_negs_per_pos 81.0 +773 36 training.batch_size 0.0 +773 37 model.embedding_dim 0.0 +773 37 model.relation_dim 0.0 +773 37 optimizer.lr 0.001785427322627648 +773 37 negative_sampler.num_negs_per_pos 36.0 +773 37 training.batch_size 2.0 +773 38 model.embedding_dim 2.0 +773 38 model.relation_dim 0.0 +773 38 optimizer.lr 0.015092928145046593 +773 38 negative_sampler.num_negs_per_pos 45.0 +773 38 training.batch_size 1.0 +773 39 model.embedding_dim 0.0 +773 39 model.relation_dim 0.0 +773 39 optimizer.lr 0.09574477528963339 +773 39 negative_sampler.num_negs_per_pos 80.0 +773 39 training.batch_size 1.0 +773 40 model.embedding_dim 1.0 +773 40 model.relation_dim 2.0 +773 40 optimizer.lr 0.06003209041106074 +773 40 negative_sampler.num_negs_per_pos 91.0 +773 40 training.batch_size 1.0 +773 41 model.embedding_dim 2.0 +773 41 model.relation_dim 2.0 +773 41 optimizer.lr 0.003486523556735364 +773 41 negative_sampler.num_negs_per_pos 1.0 +773 41 training.batch_size 0.0 +773 42 model.embedding_dim 0.0 +773 42 model.relation_dim 1.0 +773 42 optimizer.lr 0.009441239050472332 +773 42 negative_sampler.num_negs_per_pos 30.0 +773 42 training.batch_size 1.0 +773 43 model.embedding_dim 1.0 +773 43 model.relation_dim 2.0 +773 43 optimizer.lr 0.01526081574650943 +773 43 negative_sampler.num_negs_per_pos 64.0 +773 43 training.batch_size 0.0 +773 44 model.embedding_dim 0.0 +773 44 model.relation_dim 0.0 +773 44 optimizer.lr 0.004129848222458586 +773 44 negative_sampler.num_negs_per_pos 64.0 +773 44 training.batch_size 1.0 +773 45 model.embedding_dim 1.0 +773 45 model.relation_dim 2.0 +773 45 optimizer.lr 0.0012008880253529168 +773 45 negative_sampler.num_negs_per_pos 7.0 +773 45 training.batch_size 0.0 +773 46 model.embedding_dim 1.0 +773 46 model.relation_dim 0.0 +773 46 optimizer.lr 0.033892633276310355 +773 46 negative_sampler.num_negs_per_pos 37.0 +773 46 training.batch_size 1.0 +773 47 model.embedding_dim 2.0 +773 47 model.relation_dim 0.0 +773 47 optimizer.lr 0.010486923900228666 +773 47 negative_sampler.num_negs_per_pos 74.0 +773 47 training.batch_size 0.0 +773 48 model.embedding_dim 1.0 +773 48 model.relation_dim 1.0 +773 48 optimizer.lr 0.007139332131671566 +773 48 negative_sampler.num_negs_per_pos 44.0 +773 48 training.batch_size 2.0 +773 49 model.embedding_dim 1.0 +773 49 model.relation_dim 1.0 +773 49 optimizer.lr 0.03176910550045012 +773 49 negative_sampler.num_negs_per_pos 5.0 +773 49 training.batch_size 2.0 +773 50 model.embedding_dim 2.0 +773 50 model.relation_dim 1.0 +773 50 optimizer.lr 0.0057531208492498445 +773 50 negative_sampler.num_negs_per_pos 35.0 +773 50 training.batch_size 2.0 +773 51 model.embedding_dim 1.0 +773 51 model.relation_dim 2.0 +773 51 optimizer.lr 0.017953133429987646 +773 51 negative_sampler.num_negs_per_pos 37.0 +773 51 training.batch_size 0.0 +773 52 model.embedding_dim 2.0 +773 52 model.relation_dim 1.0 +773 52 optimizer.lr 0.0030066243520779756 +773 52 negative_sampler.num_negs_per_pos 99.0 +773 52 training.batch_size 0.0 +773 53 model.embedding_dim 2.0 +773 53 model.relation_dim 1.0 +773 53 optimizer.lr 0.034705533266552166 +773 53 negative_sampler.num_negs_per_pos 21.0 +773 53 training.batch_size 0.0 +773 54 model.embedding_dim 2.0 +773 54 model.relation_dim 1.0 +773 54 optimizer.lr 0.006566035965779931 +773 54 negative_sampler.num_negs_per_pos 57.0 +773 54 training.batch_size 0.0 +773 55 model.embedding_dim 0.0 +773 55 model.relation_dim 1.0 +773 55 optimizer.lr 0.001332419517477388 +773 55 negative_sampler.num_negs_per_pos 8.0 +773 55 training.batch_size 1.0 +773 56 model.embedding_dim 1.0 +773 56 model.relation_dim 1.0 +773 56 optimizer.lr 0.0035124412861571503 +773 56 negative_sampler.num_negs_per_pos 10.0 +773 56 training.batch_size 1.0 +773 57 model.embedding_dim 0.0 +773 57 model.relation_dim 0.0 +773 57 optimizer.lr 0.018964677205075053 +773 57 negative_sampler.num_negs_per_pos 90.0 +773 57 training.batch_size 0.0 +773 58 model.embedding_dim 1.0 +773 58 model.relation_dim 0.0 +773 58 optimizer.lr 0.002096898078713167 +773 58 negative_sampler.num_negs_per_pos 74.0 +773 58 training.batch_size 2.0 +773 59 model.embedding_dim 2.0 +773 59 model.relation_dim 2.0 +773 59 optimizer.lr 0.003612337528300702 +773 59 negative_sampler.num_negs_per_pos 97.0 +773 59 training.batch_size 0.0 +773 60 model.embedding_dim 1.0 +773 60 model.relation_dim 2.0 +773 60 optimizer.lr 0.08985383227084107 +773 60 negative_sampler.num_negs_per_pos 99.0 +773 60 training.batch_size 2.0 +773 61 model.embedding_dim 2.0 +773 61 model.relation_dim 0.0 +773 61 optimizer.lr 0.005243000284179337 +773 61 negative_sampler.num_negs_per_pos 8.0 +773 61 training.batch_size 1.0 +773 62 model.embedding_dim 1.0 +773 62 model.relation_dim 2.0 +773 62 optimizer.lr 0.0024054435243721158 +773 62 negative_sampler.num_negs_per_pos 26.0 +773 62 training.batch_size 2.0 +773 63 model.embedding_dim 2.0 +773 63 model.relation_dim 1.0 +773 63 optimizer.lr 0.09534626033265617 +773 63 negative_sampler.num_negs_per_pos 55.0 +773 63 training.batch_size 2.0 +773 64 model.embedding_dim 2.0 +773 64 model.relation_dim 0.0 +773 64 optimizer.lr 0.005537772322844078 +773 64 negative_sampler.num_negs_per_pos 18.0 +773 64 training.batch_size 2.0 +773 65 model.embedding_dim 1.0 +773 65 model.relation_dim 1.0 +773 65 optimizer.lr 0.006857071281453611 +773 65 negative_sampler.num_negs_per_pos 70.0 +773 65 training.batch_size 0.0 +773 66 model.embedding_dim 2.0 +773 66 model.relation_dim 2.0 +773 66 optimizer.lr 0.0011204854664606833 +773 66 negative_sampler.num_negs_per_pos 94.0 +773 66 training.batch_size 1.0 +773 67 model.embedding_dim 0.0 +773 67 model.relation_dim 0.0 +773 67 optimizer.lr 0.007793741981829261 +773 67 negative_sampler.num_negs_per_pos 88.0 +773 67 training.batch_size 2.0 +773 68 model.embedding_dim 2.0 +773 68 model.relation_dim 1.0 +773 68 optimizer.lr 0.0012946792598192301 +773 68 negative_sampler.num_negs_per_pos 30.0 +773 68 training.batch_size 0.0 +773 69 model.embedding_dim 1.0 +773 69 model.relation_dim 0.0 +773 69 optimizer.lr 0.002798298178245527 +773 69 negative_sampler.num_negs_per_pos 30.0 +773 69 training.batch_size 2.0 +773 70 model.embedding_dim 1.0 +773 70 model.relation_dim 0.0 +773 70 optimizer.lr 0.001507521730632302 +773 70 negative_sampler.num_negs_per_pos 15.0 +773 70 training.batch_size 0.0 +773 71 model.embedding_dim 0.0 +773 71 model.relation_dim 2.0 +773 71 optimizer.lr 0.0022154716132367666 +773 71 negative_sampler.num_negs_per_pos 25.0 +773 71 training.batch_size 2.0 +773 72 model.embedding_dim 2.0 +773 72 model.relation_dim 1.0 +773 72 optimizer.lr 0.002737454384350524 +773 72 negative_sampler.num_negs_per_pos 80.0 +773 72 training.batch_size 1.0 +773 73 model.embedding_dim 2.0 +773 73 model.relation_dim 0.0 +773 73 optimizer.lr 0.007908783843001699 +773 73 negative_sampler.num_negs_per_pos 27.0 +773 73 training.batch_size 2.0 +773 74 model.embedding_dim 2.0 +773 74 model.relation_dim 2.0 +773 74 optimizer.lr 0.03631745484280135 +773 74 negative_sampler.num_negs_per_pos 11.0 +773 74 training.batch_size 0.0 +773 75 model.embedding_dim 0.0 +773 75 model.relation_dim 1.0 +773 75 optimizer.lr 0.0022091460669578745 +773 75 negative_sampler.num_negs_per_pos 65.0 +773 75 training.batch_size 0.0 +773 76 model.embedding_dim 0.0 +773 76 model.relation_dim 0.0 +773 76 optimizer.lr 0.002989072468503601 +773 76 negative_sampler.num_negs_per_pos 45.0 +773 76 training.batch_size 1.0 +773 77 model.embedding_dim 0.0 +773 77 model.relation_dim 2.0 +773 77 optimizer.lr 0.004622897108300257 +773 77 negative_sampler.num_negs_per_pos 14.0 +773 77 training.batch_size 1.0 +773 78 model.embedding_dim 2.0 +773 78 model.relation_dim 1.0 +773 78 optimizer.lr 0.001075877799734766 +773 78 negative_sampler.num_negs_per_pos 32.0 +773 78 training.batch_size 1.0 +773 79 model.embedding_dim 0.0 +773 79 model.relation_dim 1.0 +773 79 optimizer.lr 0.004736774879950932 +773 79 negative_sampler.num_negs_per_pos 67.0 +773 79 training.batch_size 1.0 +773 80 model.embedding_dim 0.0 +773 80 model.relation_dim 1.0 +773 80 optimizer.lr 0.0013183909280586675 +773 80 negative_sampler.num_negs_per_pos 19.0 +773 80 training.batch_size 0.0 +773 81 model.embedding_dim 2.0 +773 81 model.relation_dim 1.0 +773 81 optimizer.lr 0.004079218430840594 +773 81 negative_sampler.num_negs_per_pos 8.0 +773 81 training.batch_size 0.0 +773 1 dataset """wn18rr""" +773 1 model """transd""" +773 1 loss """bceaftersigmoid""" +773 1 regularizer """no""" +773 1 optimizer """adam""" +773 1 training_loop """owa""" +773 1 negative_sampler """basic""" +773 1 evaluator """rankbased""" +773 2 dataset """wn18rr""" +773 2 model """transd""" +773 2 loss """bceaftersigmoid""" +773 2 regularizer """no""" +773 2 optimizer """adam""" +773 2 training_loop """owa""" +773 2 negative_sampler """basic""" +773 2 evaluator """rankbased""" +773 3 dataset """wn18rr""" +773 3 model """transd""" +773 3 loss """bceaftersigmoid""" +773 3 regularizer """no""" +773 3 optimizer """adam""" +773 3 training_loop """owa""" +773 3 negative_sampler """basic""" +773 3 evaluator """rankbased""" +773 4 dataset """wn18rr""" +773 4 model """transd""" +773 4 loss """bceaftersigmoid""" +773 4 regularizer """no""" +773 4 optimizer """adam""" +773 4 training_loop """owa""" +773 4 negative_sampler """basic""" +773 4 evaluator """rankbased""" +773 5 dataset """wn18rr""" +773 5 model """transd""" +773 5 loss """bceaftersigmoid""" +773 5 regularizer """no""" +773 5 optimizer """adam""" +773 5 training_loop """owa""" +773 5 negative_sampler """basic""" +773 5 evaluator """rankbased""" +773 6 dataset """wn18rr""" +773 6 model """transd""" +773 6 loss """bceaftersigmoid""" +773 6 regularizer """no""" +773 6 optimizer """adam""" +773 6 training_loop """owa""" +773 6 negative_sampler """basic""" +773 6 evaluator """rankbased""" +773 7 dataset """wn18rr""" +773 7 model """transd""" +773 7 loss """bceaftersigmoid""" +773 7 regularizer """no""" +773 7 optimizer """adam""" +773 7 training_loop """owa""" +773 7 negative_sampler """basic""" +773 7 evaluator """rankbased""" +773 8 dataset """wn18rr""" +773 8 model """transd""" +773 8 loss """bceaftersigmoid""" +773 8 regularizer """no""" +773 8 optimizer """adam""" +773 8 training_loop """owa""" +773 8 negative_sampler """basic""" +773 8 evaluator """rankbased""" +773 9 dataset """wn18rr""" +773 9 model """transd""" +773 9 loss """bceaftersigmoid""" +773 9 regularizer """no""" +773 9 optimizer """adam""" +773 9 training_loop """owa""" +773 9 negative_sampler """basic""" +773 9 evaluator """rankbased""" +773 10 dataset """wn18rr""" +773 10 model """transd""" +773 10 loss """bceaftersigmoid""" +773 10 regularizer """no""" +773 10 optimizer """adam""" +773 10 training_loop """owa""" +773 10 negative_sampler """basic""" +773 10 evaluator """rankbased""" +773 11 dataset """wn18rr""" +773 11 model """transd""" +773 11 loss """bceaftersigmoid""" +773 11 regularizer """no""" +773 11 optimizer """adam""" +773 11 training_loop """owa""" +773 11 negative_sampler """basic""" +773 11 evaluator """rankbased""" +773 12 dataset """wn18rr""" +773 12 model """transd""" +773 12 loss """bceaftersigmoid""" +773 12 regularizer """no""" +773 12 optimizer """adam""" +773 12 training_loop """owa""" +773 12 negative_sampler """basic""" +773 12 evaluator """rankbased""" +773 13 dataset """wn18rr""" +773 13 model """transd""" +773 13 loss """bceaftersigmoid""" +773 13 regularizer """no""" +773 13 optimizer """adam""" +773 13 training_loop """owa""" +773 13 negative_sampler """basic""" +773 13 evaluator """rankbased""" +773 14 dataset """wn18rr""" +773 14 model """transd""" +773 14 loss """bceaftersigmoid""" +773 14 regularizer """no""" +773 14 optimizer """adam""" +773 14 training_loop """owa""" +773 14 negative_sampler """basic""" +773 14 evaluator """rankbased""" +773 15 dataset """wn18rr""" +773 15 model """transd""" +773 15 loss """bceaftersigmoid""" +773 15 regularizer """no""" +773 15 optimizer """adam""" +773 15 training_loop """owa""" +773 15 negative_sampler """basic""" +773 15 evaluator """rankbased""" +773 16 dataset """wn18rr""" +773 16 model """transd""" +773 16 loss """bceaftersigmoid""" +773 16 regularizer """no""" +773 16 optimizer """adam""" +773 16 training_loop """owa""" +773 16 negative_sampler """basic""" +773 16 evaluator """rankbased""" +773 17 dataset """wn18rr""" +773 17 model """transd""" +773 17 loss """bceaftersigmoid""" +773 17 regularizer """no""" +773 17 optimizer """adam""" +773 17 training_loop """owa""" +773 17 negative_sampler """basic""" +773 17 evaluator """rankbased""" +773 18 dataset """wn18rr""" +773 18 model """transd""" +773 18 loss """bceaftersigmoid""" +773 18 regularizer """no""" +773 18 optimizer """adam""" +773 18 training_loop """owa""" +773 18 negative_sampler """basic""" +773 18 evaluator """rankbased""" +773 19 dataset """wn18rr""" +773 19 model """transd""" +773 19 loss """bceaftersigmoid""" +773 19 regularizer """no""" +773 19 optimizer """adam""" +773 19 training_loop """owa""" +773 19 negative_sampler """basic""" +773 19 evaluator """rankbased""" +773 20 dataset """wn18rr""" +773 20 model """transd""" +773 20 loss """bceaftersigmoid""" +773 20 regularizer """no""" +773 20 optimizer """adam""" +773 20 training_loop """owa""" +773 20 negative_sampler """basic""" +773 20 evaluator """rankbased""" +773 21 dataset """wn18rr""" +773 21 model """transd""" +773 21 loss """bceaftersigmoid""" +773 21 regularizer """no""" +773 21 optimizer """adam""" +773 21 training_loop """owa""" +773 21 negative_sampler """basic""" +773 21 evaluator """rankbased""" +773 22 dataset """wn18rr""" +773 22 model """transd""" +773 22 loss """bceaftersigmoid""" +773 22 regularizer """no""" +773 22 optimizer """adam""" +773 22 training_loop """owa""" +773 22 negative_sampler """basic""" +773 22 evaluator """rankbased""" +773 23 dataset """wn18rr""" +773 23 model """transd""" +773 23 loss """bceaftersigmoid""" +773 23 regularizer """no""" +773 23 optimizer """adam""" +773 23 training_loop """owa""" +773 23 negative_sampler """basic""" +773 23 evaluator """rankbased""" +773 24 dataset """wn18rr""" +773 24 model """transd""" +773 24 loss """bceaftersigmoid""" +773 24 regularizer """no""" +773 24 optimizer """adam""" +773 24 training_loop """owa""" +773 24 negative_sampler """basic""" +773 24 evaluator """rankbased""" +773 25 dataset """wn18rr""" +773 25 model """transd""" +773 25 loss """bceaftersigmoid""" +773 25 regularizer """no""" +773 25 optimizer """adam""" +773 25 training_loop """owa""" +773 25 negative_sampler """basic""" +773 25 evaluator """rankbased""" +773 26 dataset """wn18rr""" +773 26 model """transd""" +773 26 loss """bceaftersigmoid""" +773 26 regularizer """no""" +773 26 optimizer """adam""" +773 26 training_loop """owa""" +773 26 negative_sampler """basic""" +773 26 evaluator """rankbased""" +773 27 dataset """wn18rr""" +773 27 model """transd""" +773 27 loss """bceaftersigmoid""" +773 27 regularizer """no""" +773 27 optimizer """adam""" +773 27 training_loop """owa""" +773 27 negative_sampler """basic""" +773 27 evaluator """rankbased""" +773 28 dataset """wn18rr""" +773 28 model """transd""" +773 28 loss """bceaftersigmoid""" +773 28 regularizer """no""" +773 28 optimizer """adam""" +773 28 training_loop """owa""" +773 28 negative_sampler """basic""" +773 28 evaluator """rankbased""" +773 29 dataset """wn18rr""" +773 29 model """transd""" +773 29 loss """bceaftersigmoid""" +773 29 regularizer """no""" +773 29 optimizer """adam""" +773 29 training_loop """owa""" +773 29 negative_sampler """basic""" +773 29 evaluator """rankbased""" +773 30 dataset """wn18rr""" +773 30 model """transd""" +773 30 loss """bceaftersigmoid""" +773 30 regularizer """no""" +773 30 optimizer """adam""" +773 30 training_loop """owa""" +773 30 negative_sampler """basic""" +773 30 evaluator """rankbased""" +773 31 dataset """wn18rr""" +773 31 model """transd""" +773 31 loss """bceaftersigmoid""" +773 31 regularizer """no""" +773 31 optimizer """adam""" +773 31 training_loop """owa""" +773 31 negative_sampler """basic""" +773 31 evaluator """rankbased""" +773 32 dataset """wn18rr""" +773 32 model """transd""" +773 32 loss """bceaftersigmoid""" +773 32 regularizer """no""" +773 32 optimizer """adam""" +773 32 training_loop """owa""" +773 32 negative_sampler """basic""" +773 32 evaluator """rankbased""" +773 33 dataset """wn18rr""" +773 33 model """transd""" +773 33 loss """bceaftersigmoid""" +773 33 regularizer """no""" +773 33 optimizer """adam""" +773 33 training_loop """owa""" +773 33 negative_sampler """basic""" +773 33 evaluator """rankbased""" +773 34 dataset """wn18rr""" +773 34 model """transd""" +773 34 loss """bceaftersigmoid""" +773 34 regularizer """no""" +773 34 optimizer """adam""" +773 34 training_loop """owa""" +773 34 negative_sampler """basic""" +773 34 evaluator """rankbased""" +773 35 dataset """wn18rr""" +773 35 model """transd""" +773 35 loss """bceaftersigmoid""" +773 35 regularizer """no""" +773 35 optimizer """adam""" +773 35 training_loop """owa""" +773 35 negative_sampler """basic""" +773 35 evaluator """rankbased""" +773 36 dataset """wn18rr""" +773 36 model """transd""" +773 36 loss """bceaftersigmoid""" +773 36 regularizer """no""" +773 36 optimizer """adam""" +773 36 training_loop """owa""" +773 36 negative_sampler """basic""" +773 36 evaluator """rankbased""" +773 37 dataset """wn18rr""" +773 37 model """transd""" +773 37 loss """bceaftersigmoid""" +773 37 regularizer """no""" +773 37 optimizer """adam""" +773 37 training_loop """owa""" +773 37 negative_sampler """basic""" +773 37 evaluator """rankbased""" +773 38 dataset """wn18rr""" +773 38 model """transd""" +773 38 loss """bceaftersigmoid""" +773 38 regularizer """no""" +773 38 optimizer """adam""" +773 38 training_loop """owa""" +773 38 negative_sampler """basic""" +773 38 evaluator """rankbased""" +773 39 dataset """wn18rr""" +773 39 model """transd""" +773 39 loss """bceaftersigmoid""" +773 39 regularizer """no""" +773 39 optimizer """adam""" +773 39 training_loop """owa""" +773 39 negative_sampler """basic""" +773 39 evaluator """rankbased""" +773 40 dataset """wn18rr""" +773 40 model """transd""" +773 40 loss """bceaftersigmoid""" +773 40 regularizer """no""" +773 40 optimizer """adam""" +773 40 training_loop """owa""" +773 40 negative_sampler """basic""" +773 40 evaluator """rankbased""" +773 41 dataset """wn18rr""" +773 41 model """transd""" +773 41 loss """bceaftersigmoid""" +773 41 regularizer """no""" +773 41 optimizer """adam""" +773 41 training_loop """owa""" +773 41 negative_sampler """basic""" +773 41 evaluator """rankbased""" +773 42 dataset """wn18rr""" +773 42 model """transd""" +773 42 loss """bceaftersigmoid""" +773 42 regularizer """no""" +773 42 optimizer """adam""" +773 42 training_loop """owa""" +773 42 negative_sampler """basic""" +773 42 evaluator """rankbased""" +773 43 dataset """wn18rr""" +773 43 model """transd""" +773 43 loss """bceaftersigmoid""" +773 43 regularizer """no""" +773 43 optimizer """adam""" +773 43 training_loop """owa""" +773 43 negative_sampler """basic""" +773 43 evaluator """rankbased""" +773 44 dataset """wn18rr""" +773 44 model """transd""" +773 44 loss """bceaftersigmoid""" +773 44 regularizer """no""" +773 44 optimizer """adam""" +773 44 training_loop """owa""" +773 44 negative_sampler """basic""" +773 44 evaluator """rankbased""" +773 45 dataset """wn18rr""" +773 45 model """transd""" +773 45 loss """bceaftersigmoid""" +773 45 regularizer """no""" +773 45 optimizer """adam""" +773 45 training_loop """owa""" +773 45 negative_sampler """basic""" +773 45 evaluator """rankbased""" +773 46 dataset """wn18rr""" +773 46 model """transd""" +773 46 loss """bceaftersigmoid""" +773 46 regularizer """no""" +773 46 optimizer """adam""" +773 46 training_loop """owa""" +773 46 negative_sampler """basic""" +773 46 evaluator """rankbased""" +773 47 dataset """wn18rr""" +773 47 model """transd""" +773 47 loss """bceaftersigmoid""" +773 47 regularizer """no""" +773 47 optimizer """adam""" +773 47 training_loop """owa""" +773 47 negative_sampler """basic""" +773 47 evaluator """rankbased""" +773 48 dataset """wn18rr""" +773 48 model """transd""" +773 48 loss """bceaftersigmoid""" +773 48 regularizer """no""" +773 48 optimizer """adam""" +773 48 training_loop """owa""" +773 48 negative_sampler """basic""" +773 48 evaluator """rankbased""" +773 49 dataset """wn18rr""" +773 49 model """transd""" +773 49 loss """bceaftersigmoid""" +773 49 regularizer """no""" +773 49 optimizer """adam""" +773 49 training_loop """owa""" +773 49 negative_sampler """basic""" +773 49 evaluator """rankbased""" +773 50 dataset """wn18rr""" +773 50 model """transd""" +773 50 loss """bceaftersigmoid""" +773 50 regularizer """no""" +773 50 optimizer """adam""" +773 50 training_loop """owa""" +773 50 negative_sampler """basic""" +773 50 evaluator """rankbased""" +773 51 dataset """wn18rr""" +773 51 model """transd""" +773 51 loss """bceaftersigmoid""" +773 51 regularizer """no""" +773 51 optimizer """adam""" +773 51 training_loop """owa""" +773 51 negative_sampler """basic""" +773 51 evaluator """rankbased""" +773 52 dataset """wn18rr""" +773 52 model """transd""" +773 52 loss """bceaftersigmoid""" +773 52 regularizer """no""" +773 52 optimizer """adam""" +773 52 training_loop """owa""" +773 52 negative_sampler """basic""" +773 52 evaluator """rankbased""" +773 53 dataset """wn18rr""" +773 53 model """transd""" +773 53 loss """bceaftersigmoid""" +773 53 regularizer """no""" +773 53 optimizer """adam""" +773 53 training_loop """owa""" +773 53 negative_sampler """basic""" +773 53 evaluator """rankbased""" +773 54 dataset """wn18rr""" +773 54 model """transd""" +773 54 loss """bceaftersigmoid""" +773 54 regularizer """no""" +773 54 optimizer """adam""" +773 54 training_loop """owa""" +773 54 negative_sampler """basic""" +773 54 evaluator """rankbased""" +773 55 dataset """wn18rr""" +773 55 model """transd""" +773 55 loss """bceaftersigmoid""" +773 55 regularizer """no""" +773 55 optimizer """adam""" +773 55 training_loop """owa""" +773 55 negative_sampler """basic""" +773 55 evaluator """rankbased""" +773 56 dataset """wn18rr""" +773 56 model """transd""" +773 56 loss """bceaftersigmoid""" +773 56 regularizer """no""" +773 56 optimizer """adam""" +773 56 training_loop """owa""" +773 56 negative_sampler """basic""" +773 56 evaluator """rankbased""" +773 57 dataset """wn18rr""" +773 57 model """transd""" +773 57 loss """bceaftersigmoid""" +773 57 regularizer """no""" +773 57 optimizer """adam""" +773 57 training_loop """owa""" +773 57 negative_sampler """basic""" +773 57 evaluator """rankbased""" +773 58 dataset """wn18rr""" +773 58 model """transd""" +773 58 loss """bceaftersigmoid""" +773 58 regularizer """no""" +773 58 optimizer """adam""" +773 58 training_loop """owa""" +773 58 negative_sampler """basic""" +773 58 evaluator """rankbased""" +773 59 dataset """wn18rr""" +773 59 model """transd""" +773 59 loss """bceaftersigmoid""" +773 59 regularizer """no""" +773 59 optimizer """adam""" +773 59 training_loop """owa""" +773 59 negative_sampler """basic""" +773 59 evaluator """rankbased""" +773 60 dataset """wn18rr""" +773 60 model """transd""" +773 60 loss """bceaftersigmoid""" +773 60 regularizer """no""" +773 60 optimizer """adam""" +773 60 training_loop """owa""" +773 60 negative_sampler """basic""" +773 60 evaluator """rankbased""" +773 61 dataset """wn18rr""" +773 61 model """transd""" +773 61 loss """bceaftersigmoid""" +773 61 regularizer """no""" +773 61 optimizer """adam""" +773 61 training_loop """owa""" +773 61 negative_sampler """basic""" +773 61 evaluator """rankbased""" +773 62 dataset """wn18rr""" +773 62 model """transd""" +773 62 loss """bceaftersigmoid""" +773 62 regularizer """no""" +773 62 optimizer """adam""" +773 62 training_loop """owa""" +773 62 negative_sampler """basic""" +773 62 evaluator """rankbased""" +773 63 dataset """wn18rr""" +773 63 model """transd""" +773 63 loss """bceaftersigmoid""" +773 63 regularizer """no""" +773 63 optimizer """adam""" +773 63 training_loop """owa""" +773 63 negative_sampler """basic""" +773 63 evaluator """rankbased""" +773 64 dataset """wn18rr""" +773 64 model """transd""" +773 64 loss """bceaftersigmoid""" +773 64 regularizer """no""" +773 64 optimizer """adam""" +773 64 training_loop """owa""" +773 64 negative_sampler """basic""" +773 64 evaluator """rankbased""" +773 65 dataset """wn18rr""" +773 65 model """transd""" +773 65 loss """bceaftersigmoid""" +773 65 regularizer """no""" +773 65 optimizer """adam""" +773 65 training_loop """owa""" +773 65 negative_sampler """basic""" +773 65 evaluator """rankbased""" +773 66 dataset """wn18rr""" +773 66 model """transd""" +773 66 loss """bceaftersigmoid""" +773 66 regularizer """no""" +773 66 optimizer """adam""" +773 66 training_loop """owa""" +773 66 negative_sampler """basic""" +773 66 evaluator """rankbased""" +773 67 dataset """wn18rr""" +773 67 model """transd""" +773 67 loss """bceaftersigmoid""" +773 67 regularizer """no""" +773 67 optimizer """adam""" +773 67 training_loop """owa""" +773 67 negative_sampler """basic""" +773 67 evaluator """rankbased""" +773 68 dataset """wn18rr""" +773 68 model """transd""" +773 68 loss """bceaftersigmoid""" +773 68 regularizer """no""" +773 68 optimizer """adam""" +773 68 training_loop """owa""" +773 68 negative_sampler """basic""" +773 68 evaluator """rankbased""" +773 69 dataset """wn18rr""" +773 69 model """transd""" +773 69 loss """bceaftersigmoid""" +773 69 regularizer """no""" +773 69 optimizer """adam""" +773 69 training_loop """owa""" +773 69 negative_sampler """basic""" +773 69 evaluator """rankbased""" +773 70 dataset """wn18rr""" +773 70 model """transd""" +773 70 loss """bceaftersigmoid""" +773 70 regularizer """no""" +773 70 optimizer """adam""" +773 70 training_loop """owa""" +773 70 negative_sampler """basic""" +773 70 evaluator """rankbased""" +773 71 dataset """wn18rr""" +773 71 model """transd""" +773 71 loss """bceaftersigmoid""" +773 71 regularizer """no""" +773 71 optimizer """adam""" +773 71 training_loop """owa""" +773 71 negative_sampler """basic""" +773 71 evaluator """rankbased""" +773 72 dataset """wn18rr""" +773 72 model """transd""" +773 72 loss """bceaftersigmoid""" +773 72 regularizer """no""" +773 72 optimizer """adam""" +773 72 training_loop """owa""" +773 72 negative_sampler """basic""" +773 72 evaluator """rankbased""" +773 73 dataset """wn18rr""" +773 73 model """transd""" +773 73 loss """bceaftersigmoid""" +773 73 regularizer """no""" +773 73 optimizer """adam""" +773 73 training_loop """owa""" +773 73 negative_sampler """basic""" +773 73 evaluator """rankbased""" +773 74 dataset """wn18rr""" +773 74 model """transd""" +773 74 loss """bceaftersigmoid""" +773 74 regularizer """no""" +773 74 optimizer """adam""" +773 74 training_loop """owa""" +773 74 negative_sampler """basic""" +773 74 evaluator """rankbased""" +773 75 dataset """wn18rr""" +773 75 model """transd""" +773 75 loss """bceaftersigmoid""" +773 75 regularizer """no""" +773 75 optimizer """adam""" +773 75 training_loop """owa""" +773 75 negative_sampler """basic""" +773 75 evaluator """rankbased""" +773 76 dataset """wn18rr""" +773 76 model """transd""" +773 76 loss """bceaftersigmoid""" +773 76 regularizer """no""" +773 76 optimizer """adam""" +773 76 training_loop """owa""" +773 76 negative_sampler """basic""" +773 76 evaluator """rankbased""" +773 77 dataset """wn18rr""" +773 77 model """transd""" +773 77 loss """bceaftersigmoid""" +773 77 regularizer """no""" +773 77 optimizer """adam""" +773 77 training_loop """owa""" +773 77 negative_sampler """basic""" +773 77 evaluator """rankbased""" +773 78 dataset """wn18rr""" +773 78 model """transd""" +773 78 loss """bceaftersigmoid""" +773 78 regularizer """no""" +773 78 optimizer """adam""" +773 78 training_loop """owa""" +773 78 negative_sampler """basic""" +773 78 evaluator """rankbased""" +773 79 dataset """wn18rr""" +773 79 model """transd""" +773 79 loss """bceaftersigmoid""" +773 79 regularizer """no""" +773 79 optimizer """adam""" +773 79 training_loop """owa""" +773 79 negative_sampler """basic""" +773 79 evaluator """rankbased""" +773 80 dataset """wn18rr""" +773 80 model """transd""" +773 80 loss """bceaftersigmoid""" +773 80 regularizer """no""" +773 80 optimizer """adam""" +773 80 training_loop """owa""" +773 80 negative_sampler """basic""" +773 80 evaluator """rankbased""" +773 81 dataset """wn18rr""" +773 81 model """transd""" +773 81 loss """bceaftersigmoid""" +773 81 regularizer """no""" +773 81 optimizer """adam""" +773 81 training_loop """owa""" +773 81 negative_sampler """basic""" +773 81 evaluator """rankbased""" +774 1 model.embedding_dim 1.0 +774 1 model.relation_dim 1.0 +774 1 optimizer.lr 0.0036921534110423452 +774 1 negative_sampler.num_negs_per_pos 91.0 +774 1 training.batch_size 2.0 +774 2 model.embedding_dim 1.0 +774 2 model.relation_dim 0.0 +774 2 optimizer.lr 0.04263929656372607 +774 2 negative_sampler.num_negs_per_pos 13.0 +774 2 training.batch_size 0.0 +774 3 model.embedding_dim 0.0 +774 3 model.relation_dim 2.0 +774 3 optimizer.lr 0.003169663616122863 +774 3 negative_sampler.num_negs_per_pos 26.0 +774 3 training.batch_size 1.0 +774 4 model.embedding_dim 0.0 +774 4 model.relation_dim 2.0 +774 4 optimizer.lr 0.0560558730471072 +774 4 negative_sampler.num_negs_per_pos 80.0 +774 4 training.batch_size 0.0 +774 5 model.embedding_dim 1.0 +774 5 model.relation_dim 2.0 +774 5 optimizer.lr 0.004919560382607988 +774 5 negative_sampler.num_negs_per_pos 30.0 +774 5 training.batch_size 0.0 +774 6 model.embedding_dim 2.0 +774 6 model.relation_dim 2.0 +774 6 optimizer.lr 0.0014972515055107641 +774 6 negative_sampler.num_negs_per_pos 55.0 +774 6 training.batch_size 1.0 +774 7 model.embedding_dim 1.0 +774 7 model.relation_dim 0.0 +774 7 optimizer.lr 0.0010122476446242225 +774 7 negative_sampler.num_negs_per_pos 24.0 +774 7 training.batch_size 2.0 +774 8 model.embedding_dim 2.0 +774 8 model.relation_dim 2.0 +774 8 optimizer.lr 0.0015936826433653004 +774 8 negative_sampler.num_negs_per_pos 55.0 +774 8 training.batch_size 0.0 +774 9 model.embedding_dim 0.0 +774 9 model.relation_dim 0.0 +774 9 optimizer.lr 0.002310427857160109 +774 9 negative_sampler.num_negs_per_pos 68.0 +774 9 training.batch_size 0.0 +774 10 model.embedding_dim 0.0 +774 10 model.relation_dim 0.0 +774 10 optimizer.lr 0.03914423198911012 +774 10 negative_sampler.num_negs_per_pos 2.0 +774 10 training.batch_size 0.0 +774 11 model.embedding_dim 0.0 +774 11 model.relation_dim 0.0 +774 11 optimizer.lr 0.002791479112783323 +774 11 negative_sampler.num_negs_per_pos 89.0 +774 11 training.batch_size 0.0 +774 12 model.embedding_dim 1.0 +774 12 model.relation_dim 2.0 +774 12 optimizer.lr 0.009623692420898729 +774 12 negative_sampler.num_negs_per_pos 40.0 +774 12 training.batch_size 1.0 +774 13 model.embedding_dim 2.0 +774 13 model.relation_dim 2.0 +774 13 optimizer.lr 0.04936490303743201 +774 13 negative_sampler.num_negs_per_pos 61.0 +774 13 training.batch_size 2.0 +774 14 model.embedding_dim 1.0 +774 14 model.relation_dim 1.0 +774 14 optimizer.lr 0.02451969595877861 +774 14 negative_sampler.num_negs_per_pos 12.0 +774 14 training.batch_size 0.0 +774 15 model.embedding_dim 0.0 +774 15 model.relation_dim 1.0 +774 15 optimizer.lr 0.0019231585489885654 +774 15 negative_sampler.num_negs_per_pos 74.0 +774 15 training.batch_size 2.0 +774 16 model.embedding_dim 1.0 +774 16 model.relation_dim 1.0 +774 16 optimizer.lr 0.006695440818678001 +774 16 negative_sampler.num_negs_per_pos 24.0 +774 16 training.batch_size 1.0 +774 17 model.embedding_dim 0.0 +774 17 model.relation_dim 0.0 +774 17 optimizer.lr 0.001383707583481088 +774 17 negative_sampler.num_negs_per_pos 40.0 +774 17 training.batch_size 0.0 +774 18 model.embedding_dim 0.0 +774 18 model.relation_dim 1.0 +774 18 optimizer.lr 0.034079988719586965 +774 18 negative_sampler.num_negs_per_pos 5.0 +774 18 training.batch_size 2.0 +774 19 model.embedding_dim 1.0 +774 19 model.relation_dim 1.0 +774 19 optimizer.lr 0.023838429805305504 +774 19 negative_sampler.num_negs_per_pos 76.0 +774 19 training.batch_size 2.0 +774 20 model.embedding_dim 0.0 +774 20 model.relation_dim 0.0 +774 20 optimizer.lr 0.05225779418818757 +774 20 negative_sampler.num_negs_per_pos 71.0 +774 20 training.batch_size 2.0 +774 21 model.embedding_dim 1.0 +774 21 model.relation_dim 1.0 +774 21 optimizer.lr 0.003969998604528463 +774 21 negative_sampler.num_negs_per_pos 99.0 +774 21 training.batch_size 1.0 +774 22 model.embedding_dim 0.0 +774 22 model.relation_dim 0.0 +774 22 optimizer.lr 0.04406900831873826 +774 22 negative_sampler.num_negs_per_pos 56.0 +774 22 training.batch_size 0.0 +774 23 model.embedding_dim 2.0 +774 23 model.relation_dim 1.0 +774 23 optimizer.lr 0.017656610984249508 +774 23 negative_sampler.num_negs_per_pos 54.0 +774 23 training.batch_size 1.0 +774 24 model.embedding_dim 2.0 +774 24 model.relation_dim 1.0 +774 24 optimizer.lr 0.0014633388640761856 +774 24 negative_sampler.num_negs_per_pos 42.0 +774 24 training.batch_size 0.0 +774 25 model.embedding_dim 1.0 +774 25 model.relation_dim 1.0 +774 25 optimizer.lr 0.033447684919894825 +774 25 negative_sampler.num_negs_per_pos 58.0 +774 25 training.batch_size 0.0 +774 26 model.embedding_dim 2.0 +774 26 model.relation_dim 1.0 +774 26 optimizer.lr 0.0012237662113861166 +774 26 negative_sampler.num_negs_per_pos 53.0 +774 26 training.batch_size 0.0 +774 27 model.embedding_dim 0.0 +774 27 model.relation_dim 1.0 +774 27 optimizer.lr 0.017188746997633324 +774 27 negative_sampler.num_negs_per_pos 0.0 +774 27 training.batch_size 2.0 +774 28 model.embedding_dim 1.0 +774 28 model.relation_dim 2.0 +774 28 optimizer.lr 0.0012784886708059858 +774 28 negative_sampler.num_negs_per_pos 65.0 +774 28 training.batch_size 0.0 +774 29 model.embedding_dim 2.0 +774 29 model.relation_dim 1.0 +774 29 optimizer.lr 0.006180294619697761 +774 29 negative_sampler.num_negs_per_pos 20.0 +774 29 training.batch_size 0.0 +774 30 model.embedding_dim 2.0 +774 30 model.relation_dim 1.0 +774 30 optimizer.lr 0.04522338555622012 +774 30 negative_sampler.num_negs_per_pos 63.0 +774 30 training.batch_size 0.0 +774 31 model.embedding_dim 2.0 +774 31 model.relation_dim 1.0 +774 31 optimizer.lr 0.002400510552003967 +774 31 negative_sampler.num_negs_per_pos 13.0 +774 31 training.batch_size 2.0 +774 32 model.embedding_dim 0.0 +774 32 model.relation_dim 1.0 +774 32 optimizer.lr 0.0012794892171801818 +774 32 negative_sampler.num_negs_per_pos 82.0 +774 32 training.batch_size 0.0 +774 33 model.embedding_dim 1.0 +774 33 model.relation_dim 2.0 +774 33 optimizer.lr 0.008893281767644323 +774 33 negative_sampler.num_negs_per_pos 61.0 +774 33 training.batch_size 0.0 +774 34 model.embedding_dim 2.0 +774 34 model.relation_dim 0.0 +774 34 optimizer.lr 0.014079797991360323 +774 34 negative_sampler.num_negs_per_pos 76.0 +774 34 training.batch_size 1.0 +774 35 model.embedding_dim 2.0 +774 35 model.relation_dim 1.0 +774 35 optimizer.lr 0.08479613906575506 +774 35 negative_sampler.num_negs_per_pos 65.0 +774 35 training.batch_size 2.0 +774 36 model.embedding_dim 0.0 +774 36 model.relation_dim 1.0 +774 36 optimizer.lr 0.015080306436536314 +774 36 negative_sampler.num_negs_per_pos 58.0 +774 36 training.batch_size 1.0 +774 37 model.embedding_dim 0.0 +774 37 model.relation_dim 0.0 +774 37 optimizer.lr 0.002575903527252597 +774 37 negative_sampler.num_negs_per_pos 76.0 +774 37 training.batch_size 2.0 +774 38 model.embedding_dim 2.0 +774 38 model.relation_dim 1.0 +774 38 optimizer.lr 0.0016881374736331142 +774 38 negative_sampler.num_negs_per_pos 46.0 +774 38 training.batch_size 2.0 +774 39 model.embedding_dim 2.0 +774 39 model.relation_dim 2.0 +774 39 optimizer.lr 0.01129121951504296 +774 39 negative_sampler.num_negs_per_pos 84.0 +774 39 training.batch_size 2.0 +774 40 model.embedding_dim 1.0 +774 40 model.relation_dim 0.0 +774 40 optimizer.lr 0.03156155131948831 +774 40 negative_sampler.num_negs_per_pos 98.0 +774 40 training.batch_size 1.0 +774 41 model.embedding_dim 1.0 +774 41 model.relation_dim 1.0 +774 41 optimizer.lr 0.004007869519461367 +774 41 negative_sampler.num_negs_per_pos 68.0 +774 41 training.batch_size 2.0 +774 42 model.embedding_dim 2.0 +774 42 model.relation_dim 0.0 +774 42 optimizer.lr 0.09210744043769792 +774 42 negative_sampler.num_negs_per_pos 63.0 +774 42 training.batch_size 0.0 +774 43 model.embedding_dim 0.0 +774 43 model.relation_dim 1.0 +774 43 optimizer.lr 0.05310056631037254 +774 43 negative_sampler.num_negs_per_pos 38.0 +774 43 training.batch_size 0.0 +774 44 model.embedding_dim 0.0 +774 44 model.relation_dim 0.0 +774 44 optimizer.lr 0.06424992058909886 +774 44 negative_sampler.num_negs_per_pos 56.0 +774 44 training.batch_size 0.0 +774 45 model.embedding_dim 0.0 +774 45 model.relation_dim 1.0 +774 45 optimizer.lr 0.014736030726710035 +774 45 negative_sampler.num_negs_per_pos 98.0 +774 45 training.batch_size 0.0 +774 46 model.embedding_dim 2.0 +774 46 model.relation_dim 0.0 +774 46 optimizer.lr 0.02980582595943577 +774 46 negative_sampler.num_negs_per_pos 53.0 +774 46 training.batch_size 1.0 +774 47 model.embedding_dim 1.0 +774 47 model.relation_dim 2.0 +774 47 optimizer.lr 0.0954433263161808 +774 47 negative_sampler.num_negs_per_pos 60.0 +774 47 training.batch_size 1.0 +774 48 model.embedding_dim 1.0 +774 48 model.relation_dim 1.0 +774 48 optimizer.lr 0.01494357066992842 +774 48 negative_sampler.num_negs_per_pos 69.0 +774 48 training.batch_size 1.0 +774 49 model.embedding_dim 2.0 +774 49 model.relation_dim 1.0 +774 49 optimizer.lr 0.07825117909059108 +774 49 negative_sampler.num_negs_per_pos 18.0 +774 49 training.batch_size 1.0 +774 50 model.embedding_dim 2.0 +774 50 model.relation_dim 0.0 +774 50 optimizer.lr 0.0021664433362975632 +774 50 negative_sampler.num_negs_per_pos 48.0 +774 50 training.batch_size 2.0 +774 51 model.embedding_dim 0.0 +774 51 model.relation_dim 0.0 +774 51 optimizer.lr 0.0557661379067399 +774 51 negative_sampler.num_negs_per_pos 28.0 +774 51 training.batch_size 1.0 +774 52 model.embedding_dim 0.0 +774 52 model.relation_dim 2.0 +774 52 optimizer.lr 0.00837668661412319 +774 52 negative_sampler.num_negs_per_pos 12.0 +774 52 training.batch_size 0.0 +774 53 model.embedding_dim 0.0 +774 53 model.relation_dim 2.0 +774 53 optimizer.lr 0.0021166157684678047 +774 53 negative_sampler.num_negs_per_pos 10.0 +774 53 training.batch_size 1.0 +774 54 model.embedding_dim 2.0 +774 54 model.relation_dim 2.0 +774 54 optimizer.lr 0.02045477544100034 +774 54 negative_sampler.num_negs_per_pos 84.0 +774 54 training.batch_size 2.0 +774 55 model.embedding_dim 0.0 +774 55 model.relation_dim 2.0 +774 55 optimizer.lr 0.03142606593803422 +774 55 negative_sampler.num_negs_per_pos 3.0 +774 55 training.batch_size 1.0 +774 56 model.embedding_dim 2.0 +774 56 model.relation_dim 2.0 +774 56 optimizer.lr 0.07346114289628464 +774 56 negative_sampler.num_negs_per_pos 11.0 +774 56 training.batch_size 2.0 +774 57 model.embedding_dim 2.0 +774 57 model.relation_dim 0.0 +774 57 optimizer.lr 0.00759953278960672 +774 57 negative_sampler.num_negs_per_pos 29.0 +774 57 training.batch_size 1.0 +774 58 model.embedding_dim 0.0 +774 58 model.relation_dim 1.0 +774 58 optimizer.lr 0.06218786008181039 +774 58 negative_sampler.num_negs_per_pos 49.0 +774 58 training.batch_size 2.0 +774 59 model.embedding_dim 2.0 +774 59 model.relation_dim 0.0 +774 59 optimizer.lr 0.018050901062709408 +774 59 negative_sampler.num_negs_per_pos 80.0 +774 59 training.batch_size 2.0 +774 60 model.embedding_dim 2.0 +774 60 model.relation_dim 1.0 +774 60 optimizer.lr 0.0955595083893129 +774 60 negative_sampler.num_negs_per_pos 87.0 +774 60 training.batch_size 1.0 +774 61 model.embedding_dim 1.0 +774 61 model.relation_dim 0.0 +774 61 optimizer.lr 0.003954369839500921 +774 61 negative_sampler.num_negs_per_pos 45.0 +774 61 training.batch_size 2.0 +774 62 model.embedding_dim 0.0 +774 62 model.relation_dim 1.0 +774 62 optimizer.lr 0.007160942140486111 +774 62 negative_sampler.num_negs_per_pos 52.0 +774 62 training.batch_size 2.0 +774 63 model.embedding_dim 1.0 +774 63 model.relation_dim 1.0 +774 63 optimizer.lr 0.08305045574524562 +774 63 negative_sampler.num_negs_per_pos 55.0 +774 63 training.batch_size 2.0 +774 64 model.embedding_dim 1.0 +774 64 model.relation_dim 0.0 +774 64 optimizer.lr 0.011543760907772517 +774 64 negative_sampler.num_negs_per_pos 20.0 +774 64 training.batch_size 0.0 +774 65 model.embedding_dim 2.0 +774 65 model.relation_dim 1.0 +774 65 optimizer.lr 0.001336547757781906 +774 65 negative_sampler.num_negs_per_pos 31.0 +774 65 training.batch_size 2.0 +774 66 model.embedding_dim 0.0 +774 66 model.relation_dim 2.0 +774 66 optimizer.lr 0.004677717840129558 +774 66 negative_sampler.num_negs_per_pos 29.0 +774 66 training.batch_size 1.0 +774 67 model.embedding_dim 2.0 +774 67 model.relation_dim 0.0 +774 67 optimizer.lr 0.018442049198662686 +774 67 negative_sampler.num_negs_per_pos 13.0 +774 67 training.batch_size 0.0 +774 68 model.embedding_dim 0.0 +774 68 model.relation_dim 0.0 +774 68 optimizer.lr 0.023669436119372188 +774 68 negative_sampler.num_negs_per_pos 94.0 +774 68 training.batch_size 1.0 +774 69 model.embedding_dim 2.0 +774 69 model.relation_dim 2.0 +774 69 optimizer.lr 0.007416860454010311 +774 69 negative_sampler.num_negs_per_pos 81.0 +774 69 training.batch_size 2.0 +774 70 model.embedding_dim 1.0 +774 70 model.relation_dim 0.0 +774 70 optimizer.lr 0.0034474642551160476 +774 70 negative_sampler.num_negs_per_pos 53.0 +774 70 training.batch_size 1.0 +774 71 model.embedding_dim 1.0 +774 71 model.relation_dim 0.0 +774 71 optimizer.lr 0.038460996729250824 +774 71 negative_sampler.num_negs_per_pos 47.0 +774 71 training.batch_size 1.0 +774 72 model.embedding_dim 0.0 +774 72 model.relation_dim 1.0 +774 72 optimizer.lr 0.02170884637225288 +774 72 negative_sampler.num_negs_per_pos 53.0 +774 72 training.batch_size 1.0 +774 73 model.embedding_dim 0.0 +774 73 model.relation_dim 1.0 +774 73 optimizer.lr 0.003543931867939887 +774 73 negative_sampler.num_negs_per_pos 1.0 +774 73 training.batch_size 0.0 +774 74 model.embedding_dim 2.0 +774 74 model.relation_dim 0.0 +774 74 optimizer.lr 0.06537435593792092 +774 74 negative_sampler.num_negs_per_pos 56.0 +774 74 training.batch_size 1.0 +774 75 model.embedding_dim 1.0 +774 75 model.relation_dim 0.0 +774 75 optimizer.lr 0.06345260623115163 +774 75 negative_sampler.num_negs_per_pos 99.0 +774 75 training.batch_size 0.0 +774 76 model.embedding_dim 1.0 +774 76 model.relation_dim 0.0 +774 76 optimizer.lr 0.012214429217767081 +774 76 negative_sampler.num_negs_per_pos 47.0 +774 76 training.batch_size 0.0 +774 77 model.embedding_dim 1.0 +774 77 model.relation_dim 0.0 +774 77 optimizer.lr 0.009931322364880176 +774 77 negative_sampler.num_negs_per_pos 11.0 +774 77 training.batch_size 0.0 +774 78 model.embedding_dim 2.0 +774 78 model.relation_dim 1.0 +774 78 optimizer.lr 0.024049804814098286 +774 78 negative_sampler.num_negs_per_pos 75.0 +774 78 training.batch_size 2.0 +774 79 model.embedding_dim 0.0 +774 79 model.relation_dim 1.0 +774 79 optimizer.lr 0.05442781897697725 +774 79 negative_sampler.num_negs_per_pos 18.0 +774 79 training.batch_size 1.0 +774 80 model.embedding_dim 2.0 +774 80 model.relation_dim 1.0 +774 80 optimizer.lr 0.0037810778173240113 +774 80 negative_sampler.num_negs_per_pos 52.0 +774 80 training.batch_size 1.0 +774 81 model.embedding_dim 1.0 +774 81 model.relation_dim 0.0 +774 81 optimizer.lr 0.0027398270548658317 +774 81 negative_sampler.num_negs_per_pos 12.0 +774 81 training.batch_size 2.0 +774 82 model.embedding_dim 0.0 +774 82 model.relation_dim 2.0 +774 82 optimizer.lr 0.032050631681107444 +774 82 negative_sampler.num_negs_per_pos 54.0 +774 82 training.batch_size 2.0 +774 1 dataset """wn18rr""" +774 1 model """transd""" +774 1 loss """softplus""" +774 1 regularizer """no""" +774 1 optimizer """adam""" +774 1 training_loop """owa""" +774 1 negative_sampler """basic""" +774 1 evaluator """rankbased""" +774 2 dataset """wn18rr""" +774 2 model """transd""" +774 2 loss """softplus""" +774 2 regularizer """no""" +774 2 optimizer """adam""" +774 2 training_loop """owa""" +774 2 negative_sampler """basic""" +774 2 evaluator """rankbased""" +774 3 dataset """wn18rr""" +774 3 model """transd""" +774 3 loss """softplus""" +774 3 regularizer """no""" +774 3 optimizer """adam""" +774 3 training_loop """owa""" +774 3 negative_sampler """basic""" +774 3 evaluator """rankbased""" +774 4 dataset """wn18rr""" +774 4 model """transd""" +774 4 loss """softplus""" +774 4 regularizer """no""" +774 4 optimizer """adam""" +774 4 training_loop """owa""" +774 4 negative_sampler """basic""" +774 4 evaluator """rankbased""" +774 5 dataset """wn18rr""" +774 5 model """transd""" +774 5 loss """softplus""" +774 5 regularizer """no""" +774 5 optimizer """adam""" +774 5 training_loop """owa""" +774 5 negative_sampler """basic""" +774 5 evaluator """rankbased""" +774 6 dataset """wn18rr""" +774 6 model """transd""" +774 6 loss """softplus""" +774 6 regularizer """no""" +774 6 optimizer """adam""" +774 6 training_loop """owa""" +774 6 negative_sampler """basic""" +774 6 evaluator """rankbased""" +774 7 dataset """wn18rr""" +774 7 model """transd""" +774 7 loss """softplus""" +774 7 regularizer """no""" +774 7 optimizer """adam""" +774 7 training_loop """owa""" +774 7 negative_sampler """basic""" +774 7 evaluator """rankbased""" +774 8 dataset """wn18rr""" +774 8 model """transd""" +774 8 loss """softplus""" +774 8 regularizer """no""" +774 8 optimizer """adam""" +774 8 training_loop """owa""" +774 8 negative_sampler """basic""" +774 8 evaluator """rankbased""" +774 9 dataset """wn18rr""" +774 9 model """transd""" +774 9 loss """softplus""" +774 9 regularizer """no""" +774 9 optimizer """adam""" +774 9 training_loop """owa""" +774 9 negative_sampler """basic""" +774 9 evaluator """rankbased""" +774 10 dataset """wn18rr""" +774 10 model """transd""" +774 10 loss """softplus""" +774 10 regularizer """no""" +774 10 optimizer """adam""" +774 10 training_loop """owa""" +774 10 negative_sampler """basic""" +774 10 evaluator """rankbased""" +774 11 dataset """wn18rr""" +774 11 model """transd""" +774 11 loss """softplus""" +774 11 regularizer """no""" +774 11 optimizer """adam""" +774 11 training_loop """owa""" +774 11 negative_sampler """basic""" +774 11 evaluator """rankbased""" +774 12 dataset """wn18rr""" +774 12 model """transd""" +774 12 loss """softplus""" +774 12 regularizer """no""" +774 12 optimizer """adam""" +774 12 training_loop """owa""" +774 12 negative_sampler """basic""" +774 12 evaluator """rankbased""" +774 13 dataset """wn18rr""" +774 13 model """transd""" +774 13 loss """softplus""" +774 13 regularizer """no""" +774 13 optimizer """adam""" +774 13 training_loop """owa""" +774 13 negative_sampler """basic""" +774 13 evaluator """rankbased""" +774 14 dataset """wn18rr""" +774 14 model """transd""" +774 14 loss """softplus""" +774 14 regularizer """no""" +774 14 optimizer """adam""" +774 14 training_loop """owa""" +774 14 negative_sampler """basic""" +774 14 evaluator """rankbased""" +774 15 dataset """wn18rr""" +774 15 model """transd""" +774 15 loss """softplus""" +774 15 regularizer """no""" +774 15 optimizer """adam""" +774 15 training_loop """owa""" +774 15 negative_sampler """basic""" +774 15 evaluator """rankbased""" +774 16 dataset """wn18rr""" +774 16 model """transd""" +774 16 loss """softplus""" +774 16 regularizer """no""" +774 16 optimizer """adam""" +774 16 training_loop """owa""" +774 16 negative_sampler """basic""" +774 16 evaluator """rankbased""" +774 17 dataset """wn18rr""" +774 17 model """transd""" +774 17 loss """softplus""" +774 17 regularizer """no""" +774 17 optimizer """adam""" +774 17 training_loop """owa""" +774 17 negative_sampler """basic""" +774 17 evaluator """rankbased""" +774 18 dataset """wn18rr""" +774 18 model """transd""" +774 18 loss """softplus""" +774 18 regularizer """no""" +774 18 optimizer """adam""" +774 18 training_loop """owa""" +774 18 negative_sampler """basic""" +774 18 evaluator """rankbased""" +774 19 dataset """wn18rr""" +774 19 model """transd""" +774 19 loss """softplus""" +774 19 regularizer """no""" +774 19 optimizer """adam""" +774 19 training_loop """owa""" +774 19 negative_sampler """basic""" +774 19 evaluator """rankbased""" +774 20 dataset """wn18rr""" +774 20 model """transd""" +774 20 loss """softplus""" +774 20 regularizer """no""" +774 20 optimizer """adam""" +774 20 training_loop """owa""" +774 20 negative_sampler """basic""" +774 20 evaluator """rankbased""" +774 21 dataset """wn18rr""" +774 21 model """transd""" +774 21 loss """softplus""" +774 21 regularizer """no""" +774 21 optimizer """adam""" +774 21 training_loop """owa""" +774 21 negative_sampler """basic""" +774 21 evaluator """rankbased""" +774 22 dataset """wn18rr""" +774 22 model """transd""" +774 22 loss """softplus""" +774 22 regularizer """no""" +774 22 optimizer """adam""" +774 22 training_loop """owa""" +774 22 negative_sampler """basic""" +774 22 evaluator """rankbased""" +774 23 dataset """wn18rr""" +774 23 model """transd""" +774 23 loss """softplus""" +774 23 regularizer """no""" +774 23 optimizer """adam""" +774 23 training_loop """owa""" +774 23 negative_sampler """basic""" +774 23 evaluator """rankbased""" +774 24 dataset """wn18rr""" +774 24 model """transd""" +774 24 loss """softplus""" +774 24 regularizer """no""" +774 24 optimizer """adam""" +774 24 training_loop """owa""" +774 24 negative_sampler """basic""" +774 24 evaluator """rankbased""" +774 25 dataset """wn18rr""" +774 25 model """transd""" +774 25 loss """softplus""" +774 25 regularizer """no""" +774 25 optimizer """adam""" +774 25 training_loop """owa""" +774 25 negative_sampler """basic""" +774 25 evaluator """rankbased""" +774 26 dataset """wn18rr""" +774 26 model """transd""" +774 26 loss """softplus""" +774 26 regularizer """no""" +774 26 optimizer """adam""" +774 26 training_loop """owa""" +774 26 negative_sampler """basic""" +774 26 evaluator """rankbased""" +774 27 dataset """wn18rr""" +774 27 model """transd""" +774 27 loss """softplus""" +774 27 regularizer """no""" +774 27 optimizer """adam""" +774 27 training_loop """owa""" +774 27 negative_sampler """basic""" +774 27 evaluator """rankbased""" +774 28 dataset """wn18rr""" +774 28 model """transd""" +774 28 loss """softplus""" +774 28 regularizer """no""" +774 28 optimizer """adam""" +774 28 training_loop """owa""" +774 28 negative_sampler """basic""" +774 28 evaluator """rankbased""" +774 29 dataset """wn18rr""" +774 29 model """transd""" +774 29 loss """softplus""" +774 29 regularizer """no""" +774 29 optimizer """adam""" +774 29 training_loop """owa""" +774 29 negative_sampler """basic""" +774 29 evaluator """rankbased""" +774 30 dataset """wn18rr""" +774 30 model """transd""" +774 30 loss """softplus""" +774 30 regularizer """no""" +774 30 optimizer """adam""" +774 30 training_loop """owa""" +774 30 negative_sampler """basic""" +774 30 evaluator """rankbased""" +774 31 dataset """wn18rr""" +774 31 model """transd""" +774 31 loss """softplus""" +774 31 regularizer """no""" +774 31 optimizer """adam""" +774 31 training_loop """owa""" +774 31 negative_sampler """basic""" +774 31 evaluator """rankbased""" +774 32 dataset """wn18rr""" +774 32 model """transd""" +774 32 loss """softplus""" +774 32 regularizer """no""" +774 32 optimizer """adam""" +774 32 training_loop """owa""" +774 32 negative_sampler """basic""" +774 32 evaluator """rankbased""" +774 33 dataset """wn18rr""" +774 33 model """transd""" +774 33 loss """softplus""" +774 33 regularizer """no""" +774 33 optimizer """adam""" +774 33 training_loop """owa""" +774 33 negative_sampler """basic""" +774 33 evaluator """rankbased""" +774 34 dataset """wn18rr""" +774 34 model """transd""" +774 34 loss """softplus""" +774 34 regularizer """no""" +774 34 optimizer """adam""" +774 34 training_loop """owa""" +774 34 negative_sampler """basic""" +774 34 evaluator """rankbased""" +774 35 dataset """wn18rr""" +774 35 model """transd""" +774 35 loss """softplus""" +774 35 regularizer """no""" +774 35 optimizer """adam""" +774 35 training_loop """owa""" +774 35 negative_sampler """basic""" +774 35 evaluator """rankbased""" +774 36 dataset """wn18rr""" +774 36 model """transd""" +774 36 loss """softplus""" +774 36 regularizer """no""" +774 36 optimizer """adam""" +774 36 training_loop """owa""" +774 36 negative_sampler """basic""" +774 36 evaluator """rankbased""" +774 37 dataset """wn18rr""" +774 37 model """transd""" +774 37 loss """softplus""" +774 37 regularizer """no""" +774 37 optimizer """adam""" +774 37 training_loop """owa""" +774 37 negative_sampler """basic""" +774 37 evaluator """rankbased""" +774 38 dataset """wn18rr""" +774 38 model """transd""" +774 38 loss """softplus""" +774 38 regularizer """no""" +774 38 optimizer """adam""" +774 38 training_loop """owa""" +774 38 negative_sampler """basic""" +774 38 evaluator """rankbased""" +774 39 dataset """wn18rr""" +774 39 model """transd""" +774 39 loss """softplus""" +774 39 regularizer """no""" +774 39 optimizer """adam""" +774 39 training_loop """owa""" +774 39 negative_sampler """basic""" +774 39 evaluator """rankbased""" +774 40 dataset """wn18rr""" +774 40 model """transd""" +774 40 loss """softplus""" +774 40 regularizer """no""" +774 40 optimizer """adam""" +774 40 training_loop """owa""" +774 40 negative_sampler """basic""" +774 40 evaluator """rankbased""" +774 41 dataset """wn18rr""" +774 41 model """transd""" +774 41 loss """softplus""" +774 41 regularizer """no""" +774 41 optimizer """adam""" +774 41 training_loop """owa""" +774 41 negative_sampler """basic""" +774 41 evaluator """rankbased""" +774 42 dataset """wn18rr""" +774 42 model """transd""" +774 42 loss """softplus""" +774 42 regularizer """no""" +774 42 optimizer """adam""" +774 42 training_loop """owa""" +774 42 negative_sampler """basic""" +774 42 evaluator """rankbased""" +774 43 dataset """wn18rr""" +774 43 model """transd""" +774 43 loss """softplus""" +774 43 regularizer """no""" +774 43 optimizer """adam""" +774 43 training_loop """owa""" +774 43 negative_sampler """basic""" +774 43 evaluator """rankbased""" +774 44 dataset """wn18rr""" +774 44 model """transd""" +774 44 loss """softplus""" +774 44 regularizer """no""" +774 44 optimizer """adam""" +774 44 training_loop """owa""" +774 44 negative_sampler """basic""" +774 44 evaluator """rankbased""" +774 45 dataset """wn18rr""" +774 45 model """transd""" +774 45 loss """softplus""" +774 45 regularizer """no""" +774 45 optimizer """adam""" +774 45 training_loop """owa""" +774 45 negative_sampler """basic""" +774 45 evaluator """rankbased""" +774 46 dataset """wn18rr""" +774 46 model """transd""" +774 46 loss """softplus""" +774 46 regularizer """no""" +774 46 optimizer """adam""" +774 46 training_loop """owa""" +774 46 negative_sampler """basic""" +774 46 evaluator """rankbased""" +774 47 dataset """wn18rr""" +774 47 model """transd""" +774 47 loss """softplus""" +774 47 regularizer """no""" +774 47 optimizer """adam""" +774 47 training_loop """owa""" +774 47 negative_sampler """basic""" +774 47 evaluator """rankbased""" +774 48 dataset """wn18rr""" +774 48 model """transd""" +774 48 loss """softplus""" +774 48 regularizer """no""" +774 48 optimizer """adam""" +774 48 training_loop """owa""" +774 48 negative_sampler """basic""" +774 48 evaluator """rankbased""" +774 49 dataset """wn18rr""" +774 49 model """transd""" +774 49 loss """softplus""" +774 49 regularizer """no""" +774 49 optimizer """adam""" +774 49 training_loop """owa""" +774 49 negative_sampler """basic""" +774 49 evaluator """rankbased""" +774 50 dataset """wn18rr""" +774 50 model """transd""" +774 50 loss """softplus""" +774 50 regularizer """no""" +774 50 optimizer """adam""" +774 50 training_loop """owa""" +774 50 negative_sampler """basic""" +774 50 evaluator """rankbased""" +774 51 dataset """wn18rr""" +774 51 model """transd""" +774 51 loss """softplus""" +774 51 regularizer """no""" +774 51 optimizer """adam""" +774 51 training_loop """owa""" +774 51 negative_sampler """basic""" +774 51 evaluator """rankbased""" +774 52 dataset """wn18rr""" +774 52 model """transd""" +774 52 loss """softplus""" +774 52 regularizer """no""" +774 52 optimizer """adam""" +774 52 training_loop """owa""" +774 52 negative_sampler """basic""" +774 52 evaluator """rankbased""" +774 53 dataset """wn18rr""" +774 53 model """transd""" +774 53 loss """softplus""" +774 53 regularizer """no""" +774 53 optimizer """adam""" +774 53 training_loop """owa""" +774 53 negative_sampler """basic""" +774 53 evaluator """rankbased""" +774 54 dataset """wn18rr""" +774 54 model """transd""" +774 54 loss """softplus""" +774 54 regularizer """no""" +774 54 optimizer """adam""" +774 54 training_loop """owa""" +774 54 negative_sampler """basic""" +774 54 evaluator """rankbased""" +774 55 dataset """wn18rr""" +774 55 model """transd""" +774 55 loss """softplus""" +774 55 regularizer """no""" +774 55 optimizer """adam""" +774 55 training_loop """owa""" +774 55 negative_sampler """basic""" +774 55 evaluator """rankbased""" +774 56 dataset """wn18rr""" +774 56 model """transd""" +774 56 loss """softplus""" +774 56 regularizer """no""" +774 56 optimizer """adam""" +774 56 training_loop """owa""" +774 56 negative_sampler """basic""" +774 56 evaluator """rankbased""" +774 57 dataset """wn18rr""" +774 57 model """transd""" +774 57 loss """softplus""" +774 57 regularizer """no""" +774 57 optimizer """adam""" +774 57 training_loop """owa""" +774 57 negative_sampler """basic""" +774 57 evaluator """rankbased""" +774 58 dataset """wn18rr""" +774 58 model """transd""" +774 58 loss """softplus""" +774 58 regularizer """no""" +774 58 optimizer """adam""" +774 58 training_loop """owa""" +774 58 negative_sampler """basic""" +774 58 evaluator """rankbased""" +774 59 dataset """wn18rr""" +774 59 model """transd""" +774 59 loss """softplus""" +774 59 regularizer """no""" +774 59 optimizer """adam""" +774 59 training_loop """owa""" +774 59 negative_sampler """basic""" +774 59 evaluator """rankbased""" +774 60 dataset """wn18rr""" +774 60 model """transd""" +774 60 loss """softplus""" +774 60 regularizer """no""" +774 60 optimizer """adam""" +774 60 training_loop """owa""" +774 60 negative_sampler """basic""" +774 60 evaluator """rankbased""" +774 61 dataset """wn18rr""" +774 61 model """transd""" +774 61 loss """softplus""" +774 61 regularizer """no""" +774 61 optimizer """adam""" +774 61 training_loop """owa""" +774 61 negative_sampler """basic""" +774 61 evaluator """rankbased""" +774 62 dataset """wn18rr""" +774 62 model """transd""" +774 62 loss """softplus""" +774 62 regularizer """no""" +774 62 optimizer """adam""" +774 62 training_loop """owa""" +774 62 negative_sampler """basic""" +774 62 evaluator """rankbased""" +774 63 dataset """wn18rr""" +774 63 model """transd""" +774 63 loss """softplus""" +774 63 regularizer """no""" +774 63 optimizer """adam""" +774 63 training_loop """owa""" +774 63 negative_sampler """basic""" +774 63 evaluator """rankbased""" +774 64 dataset """wn18rr""" +774 64 model """transd""" +774 64 loss """softplus""" +774 64 regularizer """no""" +774 64 optimizer """adam""" +774 64 training_loop """owa""" +774 64 negative_sampler """basic""" +774 64 evaluator """rankbased""" +774 65 dataset """wn18rr""" +774 65 model """transd""" +774 65 loss """softplus""" +774 65 regularizer """no""" +774 65 optimizer """adam""" +774 65 training_loop """owa""" +774 65 negative_sampler """basic""" +774 65 evaluator """rankbased""" +774 66 dataset """wn18rr""" +774 66 model """transd""" +774 66 loss """softplus""" +774 66 regularizer """no""" +774 66 optimizer """adam""" +774 66 training_loop """owa""" +774 66 negative_sampler """basic""" +774 66 evaluator """rankbased""" +774 67 dataset """wn18rr""" +774 67 model """transd""" +774 67 loss """softplus""" +774 67 regularizer """no""" +774 67 optimizer """adam""" +774 67 training_loop """owa""" +774 67 negative_sampler """basic""" +774 67 evaluator """rankbased""" +774 68 dataset """wn18rr""" +774 68 model """transd""" +774 68 loss """softplus""" +774 68 regularizer """no""" +774 68 optimizer """adam""" +774 68 training_loop """owa""" +774 68 negative_sampler """basic""" +774 68 evaluator """rankbased""" +774 69 dataset """wn18rr""" +774 69 model """transd""" +774 69 loss """softplus""" +774 69 regularizer """no""" +774 69 optimizer """adam""" +774 69 training_loop """owa""" +774 69 negative_sampler """basic""" +774 69 evaluator """rankbased""" +774 70 dataset """wn18rr""" +774 70 model """transd""" +774 70 loss """softplus""" +774 70 regularizer """no""" +774 70 optimizer """adam""" +774 70 training_loop """owa""" +774 70 negative_sampler """basic""" +774 70 evaluator """rankbased""" +774 71 dataset """wn18rr""" +774 71 model """transd""" +774 71 loss """softplus""" +774 71 regularizer """no""" +774 71 optimizer """adam""" +774 71 training_loop """owa""" +774 71 negative_sampler """basic""" +774 71 evaluator """rankbased""" +774 72 dataset """wn18rr""" +774 72 model """transd""" +774 72 loss """softplus""" +774 72 regularizer """no""" +774 72 optimizer """adam""" +774 72 training_loop """owa""" +774 72 negative_sampler """basic""" +774 72 evaluator """rankbased""" +774 73 dataset """wn18rr""" +774 73 model """transd""" +774 73 loss """softplus""" +774 73 regularizer """no""" +774 73 optimizer """adam""" +774 73 training_loop """owa""" +774 73 negative_sampler """basic""" +774 73 evaluator """rankbased""" +774 74 dataset """wn18rr""" +774 74 model """transd""" +774 74 loss """softplus""" +774 74 regularizer """no""" +774 74 optimizer """adam""" +774 74 training_loop """owa""" +774 74 negative_sampler """basic""" +774 74 evaluator """rankbased""" +774 75 dataset """wn18rr""" +774 75 model """transd""" +774 75 loss """softplus""" +774 75 regularizer """no""" +774 75 optimizer """adam""" +774 75 training_loop """owa""" +774 75 negative_sampler """basic""" +774 75 evaluator """rankbased""" +774 76 dataset """wn18rr""" +774 76 model """transd""" +774 76 loss """softplus""" +774 76 regularizer """no""" +774 76 optimizer """adam""" +774 76 training_loop """owa""" +774 76 negative_sampler """basic""" +774 76 evaluator """rankbased""" +774 77 dataset """wn18rr""" +774 77 model """transd""" +774 77 loss """softplus""" +774 77 regularizer """no""" +774 77 optimizer """adam""" +774 77 training_loop """owa""" +774 77 negative_sampler """basic""" +774 77 evaluator """rankbased""" +774 78 dataset """wn18rr""" +774 78 model """transd""" +774 78 loss """softplus""" +774 78 regularizer """no""" +774 78 optimizer """adam""" +774 78 training_loop """owa""" +774 78 negative_sampler """basic""" +774 78 evaluator """rankbased""" +774 79 dataset """wn18rr""" +774 79 model """transd""" +774 79 loss """softplus""" +774 79 regularizer """no""" +774 79 optimizer """adam""" +774 79 training_loop """owa""" +774 79 negative_sampler """basic""" +774 79 evaluator """rankbased""" +774 80 dataset """wn18rr""" +774 80 model """transd""" +774 80 loss """softplus""" +774 80 regularizer """no""" +774 80 optimizer """adam""" +774 80 training_loop """owa""" +774 80 negative_sampler """basic""" +774 80 evaluator """rankbased""" +774 81 dataset """wn18rr""" +774 81 model """transd""" +774 81 loss """softplus""" +774 81 regularizer """no""" +774 81 optimizer """adam""" +774 81 training_loop """owa""" +774 81 negative_sampler """basic""" +774 81 evaluator """rankbased""" +774 82 dataset """wn18rr""" +774 82 model """transd""" +774 82 loss """softplus""" +774 82 regularizer """no""" +774 82 optimizer """adam""" +774 82 training_loop """owa""" +774 82 negative_sampler """basic""" +774 82 evaluator """rankbased""" +775 1 model.embedding_dim 1.0 +775 1 model.relation_dim 0.0 +775 1 loss.margin 11.615858892772227 +775 1 loss.adversarial_temperature 0.8147855666613885 +775 1 optimizer.lr 0.00862513043274269 +775 1 negative_sampler.num_negs_per_pos 54.0 +775 1 training.batch_size 2.0 +775 2 model.embedding_dim 1.0 +775 2 model.relation_dim 0.0 +775 2 loss.margin 22.56182585888319 +775 2 loss.adversarial_temperature 0.42495798766915216 +775 2 optimizer.lr 0.0015281179087381041 +775 2 negative_sampler.num_negs_per_pos 58.0 +775 2 training.batch_size 1.0 +775 3 model.embedding_dim 1.0 +775 3 model.relation_dim 0.0 +775 3 loss.margin 2.7038612405638047 +775 3 loss.adversarial_temperature 0.6338169727004318 +775 3 optimizer.lr 0.014884798849338124 +775 3 negative_sampler.num_negs_per_pos 14.0 +775 3 training.batch_size 2.0 +775 4 model.embedding_dim 1.0 +775 4 model.relation_dim 1.0 +775 4 loss.margin 11.342159409042019 +775 4 loss.adversarial_temperature 0.1942022603279844 +775 4 optimizer.lr 0.04576049265950804 +775 4 negative_sampler.num_negs_per_pos 25.0 +775 4 training.batch_size 1.0 +775 5 model.embedding_dim 2.0 +775 5 model.relation_dim 2.0 +775 5 loss.margin 4.134266767363548 +775 5 loss.adversarial_temperature 0.24289892613793607 +775 5 optimizer.lr 0.007446685968119542 +775 5 negative_sampler.num_negs_per_pos 55.0 +775 5 training.batch_size 2.0 +775 6 model.embedding_dim 1.0 +775 6 model.relation_dim 1.0 +775 6 loss.margin 7.137590913876868 +775 6 loss.adversarial_temperature 0.7646907337424179 +775 6 optimizer.lr 0.007396365991415703 +775 6 negative_sampler.num_negs_per_pos 62.0 +775 6 training.batch_size 2.0 +775 7 model.embedding_dim 0.0 +775 7 model.relation_dim 2.0 +775 7 loss.margin 27.752998412560313 +775 7 loss.adversarial_temperature 0.4891328489417907 +775 7 optimizer.lr 0.04725202630836239 +775 7 negative_sampler.num_negs_per_pos 7.0 +775 7 training.batch_size 1.0 +775 8 model.embedding_dim 1.0 +775 8 model.relation_dim 1.0 +775 8 loss.margin 20.438900677802096 +775 8 loss.adversarial_temperature 0.20767072850025295 +775 8 optimizer.lr 0.09696604390450349 +775 8 negative_sampler.num_negs_per_pos 14.0 +775 8 training.batch_size 1.0 +775 9 model.embedding_dim 2.0 +775 9 model.relation_dim 1.0 +775 9 loss.margin 12.532178688391932 +775 9 loss.adversarial_temperature 0.3191139248089415 +775 9 optimizer.lr 0.017577347679427206 +775 9 negative_sampler.num_negs_per_pos 62.0 +775 9 training.batch_size 1.0 +775 10 model.embedding_dim 2.0 +775 10 model.relation_dim 0.0 +775 10 loss.margin 1.180634693153602 +775 10 loss.adversarial_temperature 0.447934681004807 +775 10 optimizer.lr 0.0549335499570913 +775 10 negative_sampler.num_negs_per_pos 41.0 +775 10 training.batch_size 0.0 +775 11 model.embedding_dim 2.0 +775 11 model.relation_dim 0.0 +775 11 loss.margin 14.337311182261075 +775 11 loss.adversarial_temperature 0.5252470986801379 +775 11 optimizer.lr 0.0019283814552369268 +775 11 negative_sampler.num_negs_per_pos 71.0 +775 11 training.batch_size 2.0 +775 12 model.embedding_dim 1.0 +775 12 model.relation_dim 2.0 +775 12 loss.margin 26.663949467097865 +775 12 loss.adversarial_temperature 0.6029916195927018 +775 12 optimizer.lr 0.005331069664070446 +775 12 negative_sampler.num_negs_per_pos 66.0 +775 12 training.batch_size 0.0 +775 13 model.embedding_dim 1.0 +775 13 model.relation_dim 2.0 +775 13 loss.margin 16.604027604836983 +775 13 loss.adversarial_temperature 0.767410396476467 +775 13 optimizer.lr 0.0013782910264197115 +775 13 negative_sampler.num_negs_per_pos 19.0 +775 13 training.batch_size 2.0 +775 14 model.embedding_dim 1.0 +775 14 model.relation_dim 2.0 +775 14 loss.margin 1.007481824697287 +775 14 loss.adversarial_temperature 0.5921836154608449 +775 14 optimizer.lr 0.015003656490233963 +775 14 negative_sampler.num_negs_per_pos 24.0 +775 14 training.batch_size 2.0 +775 15 model.embedding_dim 2.0 +775 15 model.relation_dim 0.0 +775 15 loss.margin 5.237248777274368 +775 15 loss.adversarial_temperature 0.6066876921255464 +775 15 optimizer.lr 0.008677146946561543 +775 15 negative_sampler.num_negs_per_pos 83.0 +775 15 training.batch_size 0.0 +775 16 model.embedding_dim 1.0 +775 16 model.relation_dim 2.0 +775 16 loss.margin 28.1956654252748 +775 16 loss.adversarial_temperature 0.7555258032025333 +775 16 optimizer.lr 0.05410461769456329 +775 16 negative_sampler.num_negs_per_pos 42.0 +775 16 training.batch_size 0.0 +775 17 model.embedding_dim 2.0 +775 17 model.relation_dim 1.0 +775 17 loss.margin 13.893970615836142 +775 17 loss.adversarial_temperature 0.6725954201232025 +775 17 optimizer.lr 0.06963938850373767 +775 17 negative_sampler.num_negs_per_pos 63.0 +775 17 training.batch_size 1.0 +775 18 model.embedding_dim 2.0 +775 18 model.relation_dim 0.0 +775 18 loss.margin 20.676005228474764 +775 18 loss.adversarial_temperature 0.18803969009438057 +775 18 optimizer.lr 0.024237399210677267 +775 18 negative_sampler.num_negs_per_pos 77.0 +775 18 training.batch_size 2.0 +775 19 model.embedding_dim 1.0 +775 19 model.relation_dim 1.0 +775 19 loss.margin 29.374353016521866 +775 19 loss.adversarial_temperature 0.4241556568630039 +775 19 optimizer.lr 0.006023927077618379 +775 19 negative_sampler.num_negs_per_pos 36.0 +775 19 training.batch_size 2.0 +775 20 model.embedding_dim 2.0 +775 20 model.relation_dim 0.0 +775 20 loss.margin 8.254849058387988 +775 20 loss.adversarial_temperature 0.8880514404357807 +775 20 optimizer.lr 0.0031067934632619584 +775 20 negative_sampler.num_negs_per_pos 54.0 +775 20 training.batch_size 2.0 +775 21 model.embedding_dim 0.0 +775 21 model.relation_dim 0.0 +775 21 loss.margin 9.144961181802955 +775 21 loss.adversarial_temperature 0.8368397446587376 +775 21 optimizer.lr 0.0865506107610798 +775 21 negative_sampler.num_negs_per_pos 40.0 +775 21 training.batch_size 0.0 +775 22 model.embedding_dim 1.0 +775 22 model.relation_dim 0.0 +775 22 loss.margin 1.0849417579349816 +775 22 loss.adversarial_temperature 0.6060107411251419 +775 22 optimizer.lr 0.0471574125625399 +775 22 negative_sampler.num_negs_per_pos 60.0 +775 22 training.batch_size 1.0 +775 23 model.embedding_dim 0.0 +775 23 model.relation_dim 1.0 +775 23 loss.margin 2.189520167071951 +775 23 loss.adversarial_temperature 0.3796186643478972 +775 23 optimizer.lr 0.005110888206697883 +775 23 negative_sampler.num_negs_per_pos 26.0 +775 23 training.batch_size 1.0 +775 24 model.embedding_dim 2.0 +775 24 model.relation_dim 0.0 +775 24 loss.margin 5.837289905825229 +775 24 loss.adversarial_temperature 0.42843473385427633 +775 24 optimizer.lr 0.03085329234609514 +775 24 negative_sampler.num_negs_per_pos 62.0 +775 24 training.batch_size 0.0 +775 25 model.embedding_dim 0.0 +775 25 model.relation_dim 1.0 +775 25 loss.margin 27.42171656075078 +775 25 loss.adversarial_temperature 0.6577744963891431 +775 25 optimizer.lr 0.0016719108475792498 +775 25 negative_sampler.num_negs_per_pos 84.0 +775 25 training.batch_size 0.0 +775 26 model.embedding_dim 2.0 +775 26 model.relation_dim 1.0 +775 26 loss.margin 2.9429946454113964 +775 26 loss.adversarial_temperature 0.4976510761933835 +775 26 optimizer.lr 0.005032673603065977 +775 26 negative_sampler.num_negs_per_pos 78.0 +775 26 training.batch_size 0.0 +775 27 model.embedding_dim 0.0 +775 27 model.relation_dim 2.0 +775 27 loss.margin 28.746655630175592 +775 27 loss.adversarial_temperature 0.4975600586858106 +775 27 optimizer.lr 0.005761907435469243 +775 27 negative_sampler.num_negs_per_pos 51.0 +775 27 training.batch_size 1.0 +775 28 model.embedding_dim 0.0 +775 28 model.relation_dim 2.0 +775 28 loss.margin 2.845456284643431 +775 28 loss.adversarial_temperature 0.7806777657160269 +775 28 optimizer.lr 0.016262851865473462 +775 28 negative_sampler.num_negs_per_pos 83.0 +775 28 training.batch_size 2.0 +775 29 model.embedding_dim 1.0 +775 29 model.relation_dim 1.0 +775 29 loss.margin 20.75749079849937 +775 29 loss.adversarial_temperature 0.9188680030938674 +775 29 optimizer.lr 0.0021842460535036667 +775 29 negative_sampler.num_negs_per_pos 27.0 +775 29 training.batch_size 2.0 +775 30 model.embedding_dim 0.0 +775 30 model.relation_dim 0.0 +775 30 loss.margin 24.032293210759 +775 30 loss.adversarial_temperature 0.3482327036933073 +775 30 optimizer.lr 0.0014577561286162827 +775 30 negative_sampler.num_negs_per_pos 71.0 +775 30 training.batch_size 0.0 +775 31 model.embedding_dim 1.0 +775 31 model.relation_dim 1.0 +775 31 loss.margin 18.800391553984397 +775 31 loss.adversarial_temperature 0.5940783222620569 +775 31 optimizer.lr 0.01453137900977265 +775 31 negative_sampler.num_negs_per_pos 53.0 +775 31 training.batch_size 0.0 +775 32 model.embedding_dim 0.0 +775 32 model.relation_dim 0.0 +775 32 loss.margin 28.89098191648445 +775 32 loss.adversarial_temperature 0.1121511374800678 +775 32 optimizer.lr 0.09569448421961906 +775 32 negative_sampler.num_negs_per_pos 10.0 +775 32 training.batch_size 2.0 +775 33 model.embedding_dim 0.0 +775 33 model.relation_dim 2.0 +775 33 loss.margin 7.418608746874178 +775 33 loss.adversarial_temperature 0.806860517706702 +775 33 optimizer.lr 0.0010793691382787892 +775 33 negative_sampler.num_negs_per_pos 33.0 +775 33 training.batch_size 0.0 +775 34 model.embedding_dim 2.0 +775 34 model.relation_dim 0.0 +775 34 loss.margin 14.153815808986046 +775 34 loss.adversarial_temperature 0.6364261596631888 +775 34 optimizer.lr 0.020662640040221572 +775 34 negative_sampler.num_negs_per_pos 65.0 +775 34 training.batch_size 1.0 +775 35 model.embedding_dim 1.0 +775 35 model.relation_dim 1.0 +775 35 loss.margin 26.84457589019881 +775 35 loss.adversarial_temperature 0.9098857944881859 +775 35 optimizer.lr 0.006635737107379909 +775 35 negative_sampler.num_negs_per_pos 29.0 +775 35 training.batch_size 1.0 +775 36 model.embedding_dim 2.0 +775 36 model.relation_dim 2.0 +775 36 loss.margin 21.683743301404697 +775 36 loss.adversarial_temperature 0.7483162715528745 +775 36 optimizer.lr 0.0071331311423652385 +775 36 negative_sampler.num_negs_per_pos 61.0 +775 36 training.batch_size 2.0 +775 37 model.embedding_dim 0.0 +775 37 model.relation_dim 0.0 +775 37 loss.margin 20.585928155344348 +775 37 loss.adversarial_temperature 0.7106000253239692 +775 37 optimizer.lr 0.022111329028697127 +775 37 negative_sampler.num_negs_per_pos 1.0 +775 37 training.batch_size 1.0 +775 38 model.embedding_dim 0.0 +775 38 model.relation_dim 1.0 +775 38 loss.margin 27.027310600185046 +775 38 loss.adversarial_temperature 0.9216134554005563 +775 38 optimizer.lr 0.03642160494169556 +775 38 negative_sampler.num_negs_per_pos 2.0 +775 38 training.batch_size 2.0 +775 39 model.embedding_dim 0.0 +775 39 model.relation_dim 1.0 +775 39 loss.margin 27.588806799609266 +775 39 loss.adversarial_temperature 0.7019593330306237 +775 39 optimizer.lr 0.020360974635798744 +775 39 negative_sampler.num_negs_per_pos 26.0 +775 39 training.batch_size 1.0 +775 40 model.embedding_dim 2.0 +775 40 model.relation_dim 0.0 +775 40 loss.margin 6.342557848333636 +775 40 loss.adversarial_temperature 0.6776445140093202 +775 40 optimizer.lr 0.031265134310183657 +775 40 negative_sampler.num_negs_per_pos 58.0 +775 40 training.batch_size 2.0 +775 41 model.embedding_dim 1.0 +775 41 model.relation_dim 2.0 +775 41 loss.margin 13.166664783965123 +775 41 loss.adversarial_temperature 0.43462562090942536 +775 41 optimizer.lr 0.09805390626102979 +775 41 negative_sampler.num_negs_per_pos 2.0 +775 41 training.batch_size 2.0 +775 42 model.embedding_dim 1.0 +775 42 model.relation_dim 1.0 +775 42 loss.margin 10.378206783336342 +775 42 loss.adversarial_temperature 0.20309401291394993 +775 42 optimizer.lr 0.017569011652751366 +775 42 negative_sampler.num_negs_per_pos 73.0 +775 42 training.batch_size 1.0 +775 43 model.embedding_dim 2.0 +775 43 model.relation_dim 2.0 +775 43 loss.margin 26.213125941532656 +775 43 loss.adversarial_temperature 0.9366910216063662 +775 43 optimizer.lr 0.011321694759394159 +775 43 negative_sampler.num_negs_per_pos 57.0 +775 43 training.batch_size 2.0 +775 44 model.embedding_dim 1.0 +775 44 model.relation_dim 2.0 +775 44 loss.margin 4.592679981042761 +775 44 loss.adversarial_temperature 0.23874858069442845 +775 44 optimizer.lr 0.002731954313993087 +775 44 negative_sampler.num_negs_per_pos 46.0 +775 44 training.batch_size 1.0 +775 45 model.embedding_dim 1.0 +775 45 model.relation_dim 1.0 +775 45 loss.margin 17.635065877094412 +775 45 loss.adversarial_temperature 0.9723639719417437 +775 45 optimizer.lr 0.050212390058182885 +775 45 negative_sampler.num_negs_per_pos 41.0 +775 45 training.batch_size 1.0 +775 46 model.embedding_dim 2.0 +775 46 model.relation_dim 0.0 +775 46 loss.margin 11.093103582360117 +775 46 loss.adversarial_temperature 0.5080895593945458 +775 46 optimizer.lr 0.018310208537203002 +775 46 negative_sampler.num_negs_per_pos 15.0 +775 46 training.batch_size 0.0 +775 47 model.embedding_dim 2.0 +775 47 model.relation_dim 1.0 +775 47 loss.margin 11.526253248585437 +775 47 loss.adversarial_temperature 0.8069964145877399 +775 47 optimizer.lr 0.0015618803118032565 +775 47 negative_sampler.num_negs_per_pos 33.0 +775 47 training.batch_size 1.0 +775 48 model.embedding_dim 1.0 +775 48 model.relation_dim 1.0 +775 48 loss.margin 12.972722966790258 +775 48 loss.adversarial_temperature 0.10620482852414027 +775 48 optimizer.lr 0.024496905495952737 +775 48 negative_sampler.num_negs_per_pos 31.0 +775 48 training.batch_size 2.0 +775 49 model.embedding_dim 2.0 +775 49 model.relation_dim 0.0 +775 49 loss.margin 24.3847525226868 +775 49 loss.adversarial_temperature 0.49873835458299826 +775 49 optimizer.lr 0.006881464570488157 +775 49 negative_sampler.num_negs_per_pos 25.0 +775 49 training.batch_size 1.0 +775 50 model.embedding_dim 1.0 +775 50 model.relation_dim 1.0 +775 50 loss.margin 24.00571276475893 +775 50 loss.adversarial_temperature 0.2147189962053049 +775 50 optimizer.lr 0.025022948052923905 +775 50 negative_sampler.num_negs_per_pos 6.0 +775 50 training.batch_size 0.0 +775 51 model.embedding_dim 0.0 +775 51 model.relation_dim 1.0 +775 51 loss.margin 26.52082022690994 +775 51 loss.adversarial_temperature 0.6853497752143769 +775 51 optimizer.lr 0.028369577524431283 +775 51 negative_sampler.num_negs_per_pos 30.0 +775 51 training.batch_size 1.0 +775 52 model.embedding_dim 0.0 +775 52 model.relation_dim 2.0 +775 52 loss.margin 2.0679507130655868 +775 52 loss.adversarial_temperature 0.2325626082383816 +775 52 optimizer.lr 0.023427911734460967 +775 52 negative_sampler.num_negs_per_pos 34.0 +775 52 training.batch_size 0.0 +775 53 model.embedding_dim 1.0 +775 53 model.relation_dim 2.0 +775 53 loss.margin 17.724740387489472 +775 53 loss.adversarial_temperature 0.45886677768397893 +775 53 optimizer.lr 0.02199649343290674 +775 53 negative_sampler.num_negs_per_pos 69.0 +775 53 training.batch_size 1.0 +775 54 model.embedding_dim 1.0 +775 54 model.relation_dim 2.0 +775 54 loss.margin 10.528592281660849 +775 54 loss.adversarial_temperature 0.10390106860373317 +775 54 optimizer.lr 0.034806719186872156 +775 54 negative_sampler.num_negs_per_pos 47.0 +775 54 training.batch_size 2.0 +775 55 model.embedding_dim 2.0 +775 55 model.relation_dim 0.0 +775 55 loss.margin 8.164164288460734 +775 55 loss.adversarial_temperature 0.8599523377581644 +775 55 optimizer.lr 0.0010412079251812653 +775 55 negative_sampler.num_negs_per_pos 19.0 +775 55 training.batch_size 1.0 +775 56 model.embedding_dim 0.0 +775 56 model.relation_dim 1.0 +775 56 loss.margin 12.284373013078355 +775 56 loss.adversarial_temperature 0.4303462203954573 +775 56 optimizer.lr 0.0250089952785784 +775 56 negative_sampler.num_negs_per_pos 13.0 +775 56 training.batch_size 0.0 +775 57 model.embedding_dim 2.0 +775 57 model.relation_dim 2.0 +775 57 loss.margin 6.875550447462901 +775 57 loss.adversarial_temperature 0.4463630432019179 +775 57 optimizer.lr 0.0018479118248529557 +775 57 negative_sampler.num_negs_per_pos 14.0 +775 57 training.batch_size 0.0 +775 58 model.embedding_dim 0.0 +775 58 model.relation_dim 2.0 +775 58 loss.margin 24.686699868782192 +775 58 loss.adversarial_temperature 0.6605147472122779 +775 58 optimizer.lr 0.03903413425861867 +775 58 negative_sampler.num_negs_per_pos 4.0 +775 58 training.batch_size 2.0 +775 59 model.embedding_dim 2.0 +775 59 model.relation_dim 0.0 +775 59 loss.margin 8.911821575998767 +775 59 loss.adversarial_temperature 0.31841342657990224 +775 59 optimizer.lr 0.05133658643976303 +775 59 negative_sampler.num_negs_per_pos 31.0 +775 59 training.batch_size 0.0 +775 60 model.embedding_dim 1.0 +775 60 model.relation_dim 2.0 +775 60 loss.margin 17.319940115672733 +775 60 loss.adversarial_temperature 0.5661733722092162 +775 60 optimizer.lr 0.05641145129973268 +775 60 negative_sampler.num_negs_per_pos 55.0 +775 60 training.batch_size 0.0 +775 61 model.embedding_dim 0.0 +775 61 model.relation_dim 2.0 +775 61 loss.margin 18.941809935890912 +775 61 loss.adversarial_temperature 0.3644206125873893 +775 61 optimizer.lr 0.0033641721450164376 +775 61 negative_sampler.num_negs_per_pos 75.0 +775 61 training.batch_size 0.0 +775 62 model.embedding_dim 0.0 +775 62 model.relation_dim 0.0 +775 62 loss.margin 4.241300965268879 +775 62 loss.adversarial_temperature 0.9704565391648731 +775 62 optimizer.lr 0.041225445267625574 +775 62 negative_sampler.num_negs_per_pos 80.0 +775 62 training.batch_size 2.0 +775 63 model.embedding_dim 0.0 +775 63 model.relation_dim 1.0 +775 63 loss.margin 27.923532956039473 +775 63 loss.adversarial_temperature 0.9189735009526259 +775 63 optimizer.lr 0.007455078471073295 +775 63 negative_sampler.num_negs_per_pos 11.0 +775 63 training.batch_size 1.0 +775 1 dataset """wn18rr""" +775 1 model """transd""" +775 1 loss """nssa""" +775 1 regularizer """no""" +775 1 optimizer """adam""" +775 1 training_loop """owa""" +775 1 negative_sampler """basic""" +775 1 evaluator """rankbased""" +775 2 dataset """wn18rr""" +775 2 model """transd""" +775 2 loss """nssa""" +775 2 regularizer """no""" +775 2 optimizer """adam""" +775 2 training_loop """owa""" +775 2 negative_sampler """basic""" +775 2 evaluator """rankbased""" +775 3 dataset """wn18rr""" +775 3 model """transd""" +775 3 loss """nssa""" +775 3 regularizer """no""" +775 3 optimizer """adam""" +775 3 training_loop """owa""" +775 3 negative_sampler """basic""" +775 3 evaluator """rankbased""" +775 4 dataset """wn18rr""" +775 4 model """transd""" +775 4 loss """nssa""" +775 4 regularizer """no""" +775 4 optimizer """adam""" +775 4 training_loop """owa""" +775 4 negative_sampler """basic""" +775 4 evaluator """rankbased""" +775 5 dataset """wn18rr""" +775 5 model """transd""" +775 5 loss """nssa""" +775 5 regularizer """no""" +775 5 optimizer """adam""" +775 5 training_loop """owa""" +775 5 negative_sampler """basic""" +775 5 evaluator """rankbased""" +775 6 dataset """wn18rr""" +775 6 model """transd""" +775 6 loss """nssa""" +775 6 regularizer """no""" +775 6 optimizer """adam""" +775 6 training_loop """owa""" +775 6 negative_sampler """basic""" +775 6 evaluator """rankbased""" +775 7 dataset """wn18rr""" +775 7 model """transd""" +775 7 loss """nssa""" +775 7 regularizer """no""" +775 7 optimizer """adam""" +775 7 training_loop """owa""" +775 7 negative_sampler """basic""" +775 7 evaluator """rankbased""" +775 8 dataset """wn18rr""" +775 8 model """transd""" +775 8 loss """nssa""" +775 8 regularizer """no""" +775 8 optimizer """adam""" +775 8 training_loop """owa""" +775 8 negative_sampler """basic""" +775 8 evaluator """rankbased""" +775 9 dataset """wn18rr""" +775 9 model """transd""" +775 9 loss """nssa""" +775 9 regularizer """no""" +775 9 optimizer """adam""" +775 9 training_loop """owa""" +775 9 negative_sampler """basic""" +775 9 evaluator """rankbased""" +775 10 dataset """wn18rr""" +775 10 model """transd""" +775 10 loss """nssa""" +775 10 regularizer """no""" +775 10 optimizer """adam""" +775 10 training_loop """owa""" +775 10 negative_sampler """basic""" +775 10 evaluator """rankbased""" +775 11 dataset """wn18rr""" +775 11 model """transd""" +775 11 loss """nssa""" +775 11 regularizer """no""" +775 11 optimizer """adam""" +775 11 training_loop """owa""" +775 11 negative_sampler """basic""" +775 11 evaluator """rankbased""" +775 12 dataset """wn18rr""" +775 12 model """transd""" +775 12 loss """nssa""" +775 12 regularizer """no""" +775 12 optimizer """adam""" +775 12 training_loop """owa""" +775 12 negative_sampler """basic""" +775 12 evaluator """rankbased""" +775 13 dataset """wn18rr""" +775 13 model """transd""" +775 13 loss """nssa""" +775 13 regularizer """no""" +775 13 optimizer """adam""" +775 13 training_loop """owa""" +775 13 negative_sampler """basic""" +775 13 evaluator """rankbased""" +775 14 dataset """wn18rr""" +775 14 model """transd""" +775 14 loss """nssa""" +775 14 regularizer """no""" +775 14 optimizer """adam""" +775 14 training_loop """owa""" +775 14 negative_sampler """basic""" +775 14 evaluator """rankbased""" +775 15 dataset """wn18rr""" +775 15 model """transd""" +775 15 loss """nssa""" +775 15 regularizer """no""" +775 15 optimizer """adam""" +775 15 training_loop """owa""" +775 15 negative_sampler """basic""" +775 15 evaluator """rankbased""" +775 16 dataset """wn18rr""" +775 16 model """transd""" +775 16 loss """nssa""" +775 16 regularizer """no""" +775 16 optimizer """adam""" +775 16 training_loop """owa""" +775 16 negative_sampler """basic""" +775 16 evaluator """rankbased""" +775 17 dataset """wn18rr""" +775 17 model """transd""" +775 17 loss """nssa""" +775 17 regularizer """no""" +775 17 optimizer """adam""" +775 17 training_loop """owa""" +775 17 negative_sampler """basic""" +775 17 evaluator """rankbased""" +775 18 dataset """wn18rr""" +775 18 model """transd""" +775 18 loss """nssa""" +775 18 regularizer """no""" +775 18 optimizer """adam""" +775 18 training_loop """owa""" +775 18 negative_sampler """basic""" +775 18 evaluator """rankbased""" +775 19 dataset """wn18rr""" +775 19 model """transd""" +775 19 loss """nssa""" +775 19 regularizer """no""" +775 19 optimizer """adam""" +775 19 training_loop """owa""" +775 19 negative_sampler """basic""" +775 19 evaluator """rankbased""" +775 20 dataset """wn18rr""" +775 20 model """transd""" +775 20 loss """nssa""" +775 20 regularizer """no""" +775 20 optimizer """adam""" +775 20 training_loop """owa""" +775 20 negative_sampler """basic""" +775 20 evaluator """rankbased""" +775 21 dataset """wn18rr""" +775 21 model """transd""" +775 21 loss """nssa""" +775 21 regularizer """no""" +775 21 optimizer """adam""" +775 21 training_loop """owa""" +775 21 negative_sampler """basic""" +775 21 evaluator """rankbased""" +775 22 dataset """wn18rr""" +775 22 model """transd""" +775 22 loss """nssa""" +775 22 regularizer """no""" +775 22 optimizer """adam""" +775 22 training_loop """owa""" +775 22 negative_sampler """basic""" +775 22 evaluator """rankbased""" +775 23 dataset """wn18rr""" +775 23 model """transd""" +775 23 loss """nssa""" +775 23 regularizer """no""" +775 23 optimizer """adam""" +775 23 training_loop """owa""" +775 23 negative_sampler """basic""" +775 23 evaluator """rankbased""" +775 24 dataset """wn18rr""" +775 24 model """transd""" +775 24 loss """nssa""" +775 24 regularizer """no""" +775 24 optimizer """adam""" +775 24 training_loop """owa""" +775 24 negative_sampler """basic""" +775 24 evaluator """rankbased""" +775 25 dataset """wn18rr""" +775 25 model """transd""" +775 25 loss """nssa""" +775 25 regularizer """no""" +775 25 optimizer """adam""" +775 25 training_loop """owa""" +775 25 negative_sampler """basic""" +775 25 evaluator """rankbased""" +775 26 dataset """wn18rr""" +775 26 model """transd""" +775 26 loss """nssa""" +775 26 regularizer """no""" +775 26 optimizer """adam""" +775 26 training_loop """owa""" +775 26 negative_sampler """basic""" +775 26 evaluator """rankbased""" +775 27 dataset """wn18rr""" +775 27 model """transd""" +775 27 loss """nssa""" +775 27 regularizer """no""" +775 27 optimizer """adam""" +775 27 training_loop """owa""" +775 27 negative_sampler """basic""" +775 27 evaluator """rankbased""" +775 28 dataset """wn18rr""" +775 28 model """transd""" +775 28 loss """nssa""" +775 28 regularizer """no""" +775 28 optimizer """adam""" +775 28 training_loop """owa""" +775 28 negative_sampler """basic""" +775 28 evaluator """rankbased""" +775 29 dataset """wn18rr""" +775 29 model """transd""" +775 29 loss """nssa""" +775 29 regularizer """no""" +775 29 optimizer """adam""" +775 29 training_loop """owa""" +775 29 negative_sampler """basic""" +775 29 evaluator """rankbased""" +775 30 dataset """wn18rr""" +775 30 model """transd""" +775 30 loss """nssa""" +775 30 regularizer """no""" +775 30 optimizer """adam""" +775 30 training_loop """owa""" +775 30 negative_sampler """basic""" +775 30 evaluator """rankbased""" +775 31 dataset """wn18rr""" +775 31 model """transd""" +775 31 loss """nssa""" +775 31 regularizer """no""" +775 31 optimizer """adam""" +775 31 training_loop """owa""" +775 31 negative_sampler """basic""" +775 31 evaluator """rankbased""" +775 32 dataset """wn18rr""" +775 32 model """transd""" +775 32 loss """nssa""" +775 32 regularizer """no""" +775 32 optimizer """adam""" +775 32 training_loop """owa""" +775 32 negative_sampler """basic""" +775 32 evaluator """rankbased""" +775 33 dataset """wn18rr""" +775 33 model """transd""" +775 33 loss """nssa""" +775 33 regularizer """no""" +775 33 optimizer """adam""" +775 33 training_loop """owa""" +775 33 negative_sampler """basic""" +775 33 evaluator """rankbased""" +775 34 dataset """wn18rr""" +775 34 model """transd""" +775 34 loss """nssa""" +775 34 regularizer """no""" +775 34 optimizer """adam""" +775 34 training_loop """owa""" +775 34 negative_sampler """basic""" +775 34 evaluator """rankbased""" +775 35 dataset """wn18rr""" +775 35 model """transd""" +775 35 loss """nssa""" +775 35 regularizer """no""" +775 35 optimizer """adam""" +775 35 training_loop """owa""" +775 35 negative_sampler """basic""" +775 35 evaluator """rankbased""" +775 36 dataset """wn18rr""" +775 36 model """transd""" +775 36 loss """nssa""" +775 36 regularizer """no""" +775 36 optimizer """adam""" +775 36 training_loop """owa""" +775 36 negative_sampler """basic""" +775 36 evaluator """rankbased""" +775 37 dataset """wn18rr""" +775 37 model """transd""" +775 37 loss """nssa""" +775 37 regularizer """no""" +775 37 optimizer """adam""" +775 37 training_loop """owa""" +775 37 negative_sampler """basic""" +775 37 evaluator """rankbased""" +775 38 dataset """wn18rr""" +775 38 model """transd""" +775 38 loss """nssa""" +775 38 regularizer """no""" +775 38 optimizer """adam""" +775 38 training_loop """owa""" +775 38 negative_sampler """basic""" +775 38 evaluator """rankbased""" +775 39 dataset """wn18rr""" +775 39 model """transd""" +775 39 loss """nssa""" +775 39 regularizer """no""" +775 39 optimizer """adam""" +775 39 training_loop """owa""" +775 39 negative_sampler """basic""" +775 39 evaluator """rankbased""" +775 40 dataset """wn18rr""" +775 40 model """transd""" +775 40 loss """nssa""" +775 40 regularizer """no""" +775 40 optimizer """adam""" +775 40 training_loop """owa""" +775 40 negative_sampler """basic""" +775 40 evaluator """rankbased""" +775 41 dataset """wn18rr""" +775 41 model """transd""" +775 41 loss """nssa""" +775 41 regularizer """no""" +775 41 optimizer """adam""" +775 41 training_loop """owa""" +775 41 negative_sampler """basic""" +775 41 evaluator """rankbased""" +775 42 dataset """wn18rr""" +775 42 model """transd""" +775 42 loss """nssa""" +775 42 regularizer """no""" +775 42 optimizer """adam""" +775 42 training_loop """owa""" +775 42 negative_sampler """basic""" +775 42 evaluator """rankbased""" +775 43 dataset """wn18rr""" +775 43 model """transd""" +775 43 loss """nssa""" +775 43 regularizer """no""" +775 43 optimizer """adam""" +775 43 training_loop """owa""" +775 43 negative_sampler """basic""" +775 43 evaluator """rankbased""" +775 44 dataset """wn18rr""" +775 44 model """transd""" +775 44 loss """nssa""" +775 44 regularizer """no""" +775 44 optimizer """adam""" +775 44 training_loop """owa""" +775 44 negative_sampler """basic""" +775 44 evaluator """rankbased""" +775 45 dataset """wn18rr""" +775 45 model """transd""" +775 45 loss """nssa""" +775 45 regularizer """no""" +775 45 optimizer """adam""" +775 45 training_loop """owa""" +775 45 negative_sampler """basic""" +775 45 evaluator """rankbased""" +775 46 dataset """wn18rr""" +775 46 model """transd""" +775 46 loss """nssa""" +775 46 regularizer """no""" +775 46 optimizer """adam""" +775 46 training_loop """owa""" +775 46 negative_sampler """basic""" +775 46 evaluator """rankbased""" +775 47 dataset """wn18rr""" +775 47 model """transd""" +775 47 loss """nssa""" +775 47 regularizer """no""" +775 47 optimizer """adam""" +775 47 training_loop """owa""" +775 47 negative_sampler """basic""" +775 47 evaluator """rankbased""" +775 48 dataset """wn18rr""" +775 48 model """transd""" +775 48 loss """nssa""" +775 48 regularizer """no""" +775 48 optimizer """adam""" +775 48 training_loop """owa""" +775 48 negative_sampler """basic""" +775 48 evaluator """rankbased""" +775 49 dataset """wn18rr""" +775 49 model """transd""" +775 49 loss """nssa""" +775 49 regularizer """no""" +775 49 optimizer """adam""" +775 49 training_loop """owa""" +775 49 negative_sampler """basic""" +775 49 evaluator """rankbased""" +775 50 dataset """wn18rr""" +775 50 model """transd""" +775 50 loss """nssa""" +775 50 regularizer """no""" +775 50 optimizer """adam""" +775 50 training_loop """owa""" +775 50 negative_sampler """basic""" +775 50 evaluator """rankbased""" +775 51 dataset """wn18rr""" +775 51 model """transd""" +775 51 loss """nssa""" +775 51 regularizer """no""" +775 51 optimizer """adam""" +775 51 training_loop """owa""" +775 51 negative_sampler """basic""" +775 51 evaluator """rankbased""" +775 52 dataset """wn18rr""" +775 52 model """transd""" +775 52 loss """nssa""" +775 52 regularizer """no""" +775 52 optimizer """adam""" +775 52 training_loop """owa""" +775 52 negative_sampler """basic""" +775 52 evaluator """rankbased""" +775 53 dataset """wn18rr""" +775 53 model """transd""" +775 53 loss """nssa""" +775 53 regularizer """no""" +775 53 optimizer """adam""" +775 53 training_loop """owa""" +775 53 negative_sampler """basic""" +775 53 evaluator """rankbased""" +775 54 dataset """wn18rr""" +775 54 model """transd""" +775 54 loss """nssa""" +775 54 regularizer """no""" +775 54 optimizer """adam""" +775 54 training_loop """owa""" +775 54 negative_sampler """basic""" +775 54 evaluator """rankbased""" +775 55 dataset """wn18rr""" +775 55 model """transd""" +775 55 loss """nssa""" +775 55 regularizer """no""" +775 55 optimizer """adam""" +775 55 training_loop """owa""" +775 55 negative_sampler """basic""" +775 55 evaluator """rankbased""" +775 56 dataset """wn18rr""" +775 56 model """transd""" +775 56 loss """nssa""" +775 56 regularizer """no""" +775 56 optimizer """adam""" +775 56 training_loop """owa""" +775 56 negative_sampler """basic""" +775 56 evaluator """rankbased""" +775 57 dataset """wn18rr""" +775 57 model """transd""" +775 57 loss """nssa""" +775 57 regularizer """no""" +775 57 optimizer """adam""" +775 57 training_loop """owa""" +775 57 negative_sampler """basic""" +775 57 evaluator """rankbased""" +775 58 dataset """wn18rr""" +775 58 model """transd""" +775 58 loss """nssa""" +775 58 regularizer """no""" +775 58 optimizer """adam""" +775 58 training_loop """owa""" +775 58 negative_sampler """basic""" +775 58 evaluator """rankbased""" +775 59 dataset """wn18rr""" +775 59 model """transd""" +775 59 loss """nssa""" +775 59 regularizer """no""" +775 59 optimizer """adam""" +775 59 training_loop """owa""" +775 59 negative_sampler """basic""" +775 59 evaluator """rankbased""" +775 60 dataset """wn18rr""" +775 60 model """transd""" +775 60 loss """nssa""" +775 60 regularizer """no""" +775 60 optimizer """adam""" +775 60 training_loop """owa""" +775 60 negative_sampler """basic""" +775 60 evaluator """rankbased""" +775 61 dataset """wn18rr""" +775 61 model """transd""" +775 61 loss """nssa""" +775 61 regularizer """no""" +775 61 optimizer """adam""" +775 61 training_loop """owa""" +775 61 negative_sampler """basic""" +775 61 evaluator """rankbased""" +775 62 dataset """wn18rr""" +775 62 model """transd""" +775 62 loss """nssa""" +775 62 regularizer """no""" +775 62 optimizer """adam""" +775 62 training_loop """owa""" +775 62 negative_sampler """basic""" +775 62 evaluator """rankbased""" +775 63 dataset """wn18rr""" +775 63 model """transd""" +775 63 loss """nssa""" +775 63 regularizer """no""" +775 63 optimizer """adam""" +775 63 training_loop """owa""" +775 63 negative_sampler """basic""" +775 63 evaluator """rankbased""" +776 1 model.embedding_dim 1.0 +776 1 model.relation_dim 0.0 +776 1 loss.margin 20.192474409621983 +776 1 loss.adversarial_temperature 0.48880130056815924 +776 1 optimizer.lr 0.0033672088733027556 +776 1 negative_sampler.num_negs_per_pos 99.0 +776 1 training.batch_size 1.0 +776 2 model.embedding_dim 2.0 +776 2 model.relation_dim 2.0 +776 2 loss.margin 4.58999235038025 +776 2 loss.adversarial_temperature 0.6845888761654942 +776 2 optimizer.lr 0.03605820581090481 +776 2 negative_sampler.num_negs_per_pos 49.0 +776 2 training.batch_size 1.0 +776 3 model.embedding_dim 2.0 +776 3 model.relation_dim 0.0 +776 3 loss.margin 11.527138608139111 +776 3 loss.adversarial_temperature 0.28597473324883077 +776 3 optimizer.lr 0.0726200952076266 +776 3 negative_sampler.num_negs_per_pos 51.0 +776 3 training.batch_size 1.0 +776 4 model.embedding_dim 1.0 +776 4 model.relation_dim 1.0 +776 4 loss.margin 24.974719809461376 +776 4 loss.adversarial_temperature 0.11595987514127369 +776 4 optimizer.lr 0.02454467972868098 +776 4 negative_sampler.num_negs_per_pos 17.0 +776 4 training.batch_size 0.0 +776 5 model.embedding_dim 2.0 +776 5 model.relation_dim 0.0 +776 5 loss.margin 2.542205104531413 +776 5 loss.adversarial_temperature 0.425336028004379 +776 5 optimizer.lr 0.0018998103316522064 +776 5 negative_sampler.num_negs_per_pos 76.0 +776 5 training.batch_size 1.0 +776 6 model.embedding_dim 1.0 +776 6 model.relation_dim 2.0 +776 6 loss.margin 4.370658642103073 +776 6 loss.adversarial_temperature 0.5857712445533961 +776 6 optimizer.lr 0.014849539290670465 +776 6 negative_sampler.num_negs_per_pos 65.0 +776 6 training.batch_size 0.0 +776 7 model.embedding_dim 1.0 +776 7 model.relation_dim 1.0 +776 7 loss.margin 1.7002288377185943 +776 7 loss.adversarial_temperature 0.5422217193109227 +776 7 optimizer.lr 0.0019983684928589714 +776 7 negative_sampler.num_negs_per_pos 34.0 +776 7 training.batch_size 0.0 +776 8 model.embedding_dim 2.0 +776 8 model.relation_dim 0.0 +776 8 loss.margin 25.678268942475793 +776 8 loss.adversarial_temperature 0.5090187351518505 +776 8 optimizer.lr 0.022754424179568977 +776 8 negative_sampler.num_negs_per_pos 24.0 +776 8 training.batch_size 1.0 +776 9 model.embedding_dim 2.0 +776 9 model.relation_dim 2.0 +776 9 loss.margin 3.924956768953395 +776 9 loss.adversarial_temperature 0.965485240166641 +776 9 optimizer.lr 0.001417194279280215 +776 9 negative_sampler.num_negs_per_pos 10.0 +776 9 training.batch_size 0.0 +776 10 model.embedding_dim 1.0 +776 10 model.relation_dim 1.0 +776 10 loss.margin 17.073182660198842 +776 10 loss.adversarial_temperature 0.595290336907702 +776 10 optimizer.lr 0.040370050757616745 +776 10 negative_sampler.num_negs_per_pos 93.0 +776 10 training.batch_size 0.0 +776 11 model.embedding_dim 0.0 +776 11 model.relation_dim 2.0 +776 11 loss.margin 10.10496516914597 +776 11 loss.adversarial_temperature 0.10507349414236919 +776 11 optimizer.lr 0.004593292591414657 +776 11 negative_sampler.num_negs_per_pos 83.0 +776 11 training.batch_size 2.0 +776 12 model.embedding_dim 0.0 +776 12 model.relation_dim 2.0 +776 12 loss.margin 21.47470003123524 +776 12 loss.adversarial_temperature 0.28755529319415785 +776 12 optimizer.lr 0.019889280610387354 +776 12 negative_sampler.num_negs_per_pos 79.0 +776 12 training.batch_size 2.0 +776 13 model.embedding_dim 0.0 +776 13 model.relation_dim 2.0 +776 13 loss.margin 26.797870633145564 +776 13 loss.adversarial_temperature 0.381700202149096 +776 13 optimizer.lr 0.0034736673859236946 +776 13 negative_sampler.num_negs_per_pos 11.0 +776 13 training.batch_size 1.0 +776 14 model.embedding_dim 1.0 +776 14 model.relation_dim 1.0 +776 14 loss.margin 23.846801878729316 +776 14 loss.adversarial_temperature 0.7901567847167567 +776 14 optimizer.lr 0.008493460455510362 +776 14 negative_sampler.num_negs_per_pos 4.0 +776 14 training.batch_size 2.0 +776 15 model.embedding_dim 1.0 +776 15 model.relation_dim 1.0 +776 15 loss.margin 12.354738328960202 +776 15 loss.adversarial_temperature 0.7510260424219221 +776 15 optimizer.lr 0.0013159028115031308 +776 15 negative_sampler.num_negs_per_pos 55.0 +776 15 training.batch_size 2.0 +776 16 model.embedding_dim 0.0 +776 16 model.relation_dim 1.0 +776 16 loss.margin 21.85861994256385 +776 16 loss.adversarial_temperature 0.4922026610048556 +776 16 optimizer.lr 0.02219245948916671 +776 16 negative_sampler.num_negs_per_pos 20.0 +776 16 training.batch_size 0.0 +776 17 model.embedding_dim 0.0 +776 17 model.relation_dim 1.0 +776 17 loss.margin 26.18597644832357 +776 17 loss.adversarial_temperature 0.6083399092286973 +776 17 optimizer.lr 0.0014482385043832918 +776 17 negative_sampler.num_negs_per_pos 3.0 +776 17 training.batch_size 1.0 +776 18 model.embedding_dim 0.0 +776 18 model.relation_dim 1.0 +776 18 loss.margin 25.671177964881515 +776 18 loss.adversarial_temperature 0.7211548124039633 +776 18 optimizer.lr 0.01392350313919237 +776 18 negative_sampler.num_negs_per_pos 16.0 +776 18 training.batch_size 1.0 +776 19 model.embedding_dim 1.0 +776 19 model.relation_dim 2.0 +776 19 loss.margin 11.623931479813528 +776 19 loss.adversarial_temperature 0.6883732226678246 +776 19 optimizer.lr 0.0011601652865133748 +776 19 negative_sampler.num_negs_per_pos 80.0 +776 19 training.batch_size 1.0 +776 20 model.embedding_dim 0.0 +776 20 model.relation_dim 0.0 +776 20 loss.margin 10.724707752953334 +776 20 loss.adversarial_temperature 0.6779314071429962 +776 20 optimizer.lr 0.0031354830010020584 +776 20 negative_sampler.num_negs_per_pos 3.0 +776 20 training.batch_size 2.0 +776 21 model.embedding_dim 0.0 +776 21 model.relation_dim 1.0 +776 21 loss.margin 2.469447534408218 +776 21 loss.adversarial_temperature 0.8148948908604592 +776 21 optimizer.lr 0.0997593004030044 +776 21 negative_sampler.num_negs_per_pos 82.0 +776 21 training.batch_size 0.0 +776 22 model.embedding_dim 0.0 +776 22 model.relation_dim 0.0 +776 22 loss.margin 18.71979786213176 +776 22 loss.adversarial_temperature 0.6584664726404363 +776 22 optimizer.lr 0.014460527760896257 +776 22 negative_sampler.num_negs_per_pos 58.0 +776 22 training.batch_size 1.0 +776 23 model.embedding_dim 1.0 +776 23 model.relation_dim 0.0 +776 23 loss.margin 23.18071132307082 +776 23 loss.adversarial_temperature 0.6052603459477651 +776 23 optimizer.lr 0.05994752900628411 +776 23 negative_sampler.num_negs_per_pos 41.0 +776 23 training.batch_size 0.0 +776 24 model.embedding_dim 0.0 +776 24 model.relation_dim 1.0 +776 24 loss.margin 4.7798806648411505 +776 24 loss.adversarial_temperature 0.5496371435138968 +776 24 optimizer.lr 0.03437009016622512 +776 24 negative_sampler.num_negs_per_pos 36.0 +776 24 training.batch_size 2.0 +776 25 model.embedding_dim 0.0 +776 25 model.relation_dim 2.0 +776 25 loss.margin 27.138049799709922 +776 25 loss.adversarial_temperature 0.6441643902566684 +776 25 optimizer.lr 0.002451747288082349 +776 25 negative_sampler.num_negs_per_pos 29.0 +776 25 training.batch_size 0.0 +776 26 model.embedding_dim 2.0 +776 26 model.relation_dim 1.0 +776 26 loss.margin 26.215670852220413 +776 26 loss.adversarial_temperature 0.8960176628219509 +776 26 optimizer.lr 0.008801814397929536 +776 26 negative_sampler.num_negs_per_pos 81.0 +776 26 training.batch_size 2.0 +776 27 model.embedding_dim 1.0 +776 27 model.relation_dim 2.0 +776 27 loss.margin 9.946152361759312 +776 27 loss.adversarial_temperature 0.795007147780941 +776 27 optimizer.lr 0.003262865095047945 +776 27 negative_sampler.num_negs_per_pos 90.0 +776 27 training.batch_size 1.0 +776 28 model.embedding_dim 1.0 +776 28 model.relation_dim 1.0 +776 28 loss.margin 3.597977488381146 +776 28 loss.adversarial_temperature 0.7625685255863613 +776 28 optimizer.lr 0.0011552273840838346 +776 28 negative_sampler.num_negs_per_pos 83.0 +776 28 training.batch_size 1.0 +776 29 model.embedding_dim 2.0 +776 29 model.relation_dim 0.0 +776 29 loss.margin 23.862903940744015 +776 29 loss.adversarial_temperature 0.3247272114881666 +776 29 optimizer.lr 0.00921823730971038 +776 29 negative_sampler.num_negs_per_pos 72.0 +776 29 training.batch_size 2.0 +776 30 model.embedding_dim 2.0 +776 30 model.relation_dim 0.0 +776 30 loss.margin 16.415388560294605 +776 30 loss.adversarial_temperature 0.6144577422989397 +776 30 optimizer.lr 0.0014675488535233839 +776 30 negative_sampler.num_negs_per_pos 89.0 +776 30 training.batch_size 2.0 +776 31 model.embedding_dim 2.0 +776 31 model.relation_dim 0.0 +776 31 loss.margin 22.04055628033809 +776 31 loss.adversarial_temperature 0.4365760667968586 +776 31 optimizer.lr 0.0028051425711131195 +776 31 negative_sampler.num_negs_per_pos 56.0 +776 31 training.batch_size 1.0 +776 32 model.embedding_dim 1.0 +776 32 model.relation_dim 1.0 +776 32 loss.margin 24.768918358612485 +776 32 loss.adversarial_temperature 0.7045734454777469 +776 32 optimizer.lr 0.0015185111280226538 +776 32 negative_sampler.num_negs_per_pos 16.0 +776 32 training.batch_size 2.0 +776 33 model.embedding_dim 2.0 +776 33 model.relation_dim 0.0 +776 33 loss.margin 25.53064778869922 +776 33 loss.adversarial_temperature 0.32543635310258945 +776 33 optimizer.lr 0.00848157057455822 +776 33 negative_sampler.num_negs_per_pos 29.0 +776 33 training.batch_size 1.0 +776 34 model.embedding_dim 2.0 +776 34 model.relation_dim 2.0 +776 34 loss.margin 28.607440182493026 +776 34 loss.adversarial_temperature 0.9374970624295008 +776 34 optimizer.lr 0.009874016381095356 +776 34 negative_sampler.num_negs_per_pos 8.0 +776 34 training.batch_size 1.0 +776 35 model.embedding_dim 2.0 +776 35 model.relation_dim 2.0 +776 35 loss.margin 15.328800804050115 +776 35 loss.adversarial_temperature 0.33240236598759487 +776 35 optimizer.lr 0.004915200539561215 +776 35 negative_sampler.num_negs_per_pos 18.0 +776 35 training.batch_size 2.0 +776 36 model.embedding_dim 2.0 +776 36 model.relation_dim 2.0 +776 36 loss.margin 8.797387695822792 +776 36 loss.adversarial_temperature 0.5282707586031644 +776 36 optimizer.lr 0.006709713266348662 +776 36 negative_sampler.num_negs_per_pos 77.0 +776 36 training.batch_size 1.0 +776 37 model.embedding_dim 0.0 +776 37 model.relation_dim 1.0 +776 37 loss.margin 5.1981727275709835 +776 37 loss.adversarial_temperature 0.7306112524431352 +776 37 optimizer.lr 0.07851190941134595 +776 37 negative_sampler.num_negs_per_pos 34.0 +776 37 training.batch_size 0.0 +776 38 model.embedding_dim 2.0 +776 38 model.relation_dim 2.0 +776 38 loss.margin 25.922206741873637 +776 38 loss.adversarial_temperature 0.5799434566286098 +776 38 optimizer.lr 0.01521287748931801 +776 38 negative_sampler.num_negs_per_pos 77.0 +776 38 training.batch_size 2.0 +776 39 model.embedding_dim 1.0 +776 39 model.relation_dim 1.0 +776 39 loss.margin 29.182736062215096 +776 39 loss.adversarial_temperature 0.4466670074799148 +776 39 optimizer.lr 0.006865659496960095 +776 39 negative_sampler.num_negs_per_pos 93.0 +776 39 training.batch_size 1.0 +776 40 model.embedding_dim 2.0 +776 40 model.relation_dim 1.0 +776 40 loss.margin 21.903998863612276 +776 40 loss.adversarial_temperature 0.4401813689739219 +776 40 optimizer.lr 0.009363514856788555 +776 40 negative_sampler.num_negs_per_pos 3.0 +776 40 training.batch_size 1.0 +776 41 model.embedding_dim 0.0 +776 41 model.relation_dim 1.0 +776 41 loss.margin 17.108335685691294 +776 41 loss.adversarial_temperature 0.8384399000960877 +776 41 optimizer.lr 0.0043304148369893245 +776 41 negative_sampler.num_negs_per_pos 90.0 +776 41 training.batch_size 0.0 +776 42 model.embedding_dim 0.0 +776 42 model.relation_dim 2.0 +776 42 loss.margin 3.4084379802356852 +776 42 loss.adversarial_temperature 0.109799524526856 +776 42 optimizer.lr 0.007271525978424704 +776 42 negative_sampler.num_negs_per_pos 25.0 +776 42 training.batch_size 2.0 +776 43 model.embedding_dim 0.0 +776 43 model.relation_dim 1.0 +776 43 loss.margin 16.26191994613367 +776 43 loss.adversarial_temperature 0.28270642419603503 +776 43 optimizer.lr 0.03552871594976318 +776 43 negative_sampler.num_negs_per_pos 34.0 +776 43 training.batch_size 2.0 +776 44 model.embedding_dim 0.0 +776 44 model.relation_dim 1.0 +776 44 loss.margin 26.857525378284418 +776 44 loss.adversarial_temperature 0.21155703829277817 +776 44 optimizer.lr 0.005740850933719595 +776 44 negative_sampler.num_negs_per_pos 73.0 +776 44 training.batch_size 2.0 +776 45 model.embedding_dim 2.0 +776 45 model.relation_dim 2.0 +776 45 loss.margin 27.98641986111533 +776 45 loss.adversarial_temperature 0.8897175710906735 +776 45 optimizer.lr 0.0034314991641434377 +776 45 negative_sampler.num_negs_per_pos 95.0 +776 45 training.batch_size 0.0 +776 46 model.embedding_dim 2.0 +776 46 model.relation_dim 0.0 +776 46 loss.margin 21.541806883874923 +776 46 loss.adversarial_temperature 0.31901994073491696 +776 46 optimizer.lr 0.07177647210260046 +776 46 negative_sampler.num_negs_per_pos 79.0 +776 46 training.batch_size 1.0 +776 47 model.embedding_dim 1.0 +776 47 model.relation_dim 1.0 +776 47 loss.margin 11.035278331324209 +776 47 loss.adversarial_temperature 0.4950628185222045 +776 47 optimizer.lr 0.06003858084939581 +776 47 negative_sampler.num_negs_per_pos 36.0 +776 47 training.batch_size 0.0 +776 48 model.embedding_dim 1.0 +776 48 model.relation_dim 2.0 +776 48 loss.margin 20.027828818472848 +776 48 loss.adversarial_temperature 0.796336169045395 +776 48 optimizer.lr 0.011272526613416774 +776 48 negative_sampler.num_negs_per_pos 18.0 +776 48 training.batch_size 0.0 +776 49 model.embedding_dim 1.0 +776 49 model.relation_dim 1.0 +776 49 loss.margin 16.337920883067444 +776 49 loss.adversarial_temperature 0.47991097264518545 +776 49 optimizer.lr 0.014629706643285322 +776 49 negative_sampler.num_negs_per_pos 58.0 +776 49 training.batch_size 1.0 +776 50 model.embedding_dim 2.0 +776 50 model.relation_dim 0.0 +776 50 loss.margin 8.693522241738666 +776 50 loss.adversarial_temperature 0.8902849153044008 +776 50 optimizer.lr 0.027219448388392138 +776 50 negative_sampler.num_negs_per_pos 29.0 +776 50 training.batch_size 0.0 +776 51 model.embedding_dim 1.0 +776 51 model.relation_dim 1.0 +776 51 loss.margin 23.364062212373355 +776 51 loss.adversarial_temperature 0.49638324882027723 +776 51 optimizer.lr 0.023415100010964765 +776 51 negative_sampler.num_negs_per_pos 56.0 +776 51 training.batch_size 0.0 +776 52 model.embedding_dim 2.0 +776 52 model.relation_dim 2.0 +776 52 loss.margin 7.097839697403124 +776 52 loss.adversarial_temperature 0.23454480952166873 +776 52 optimizer.lr 0.03697315423722269 +776 52 negative_sampler.num_negs_per_pos 53.0 +776 52 training.batch_size 1.0 +776 53 model.embedding_dim 2.0 +776 53 model.relation_dim 2.0 +776 53 loss.margin 24.150905829774697 +776 53 loss.adversarial_temperature 0.9463186166150163 +776 53 optimizer.lr 0.001373245499675612 +776 53 negative_sampler.num_negs_per_pos 72.0 +776 53 training.batch_size 1.0 +776 54 model.embedding_dim 0.0 +776 54 model.relation_dim 1.0 +776 54 loss.margin 28.795046000632627 +776 54 loss.adversarial_temperature 0.6254678498527328 +776 54 optimizer.lr 0.04248203094823703 +776 54 negative_sampler.num_negs_per_pos 0.0 +776 54 training.batch_size 0.0 +776 55 model.embedding_dim 1.0 +776 55 model.relation_dim 1.0 +776 55 loss.margin 14.910998126561319 +776 55 loss.adversarial_temperature 0.3666379604539287 +776 55 optimizer.lr 0.0022663814820626615 +776 55 negative_sampler.num_negs_per_pos 26.0 +776 55 training.batch_size 1.0 +776 56 model.embedding_dim 1.0 +776 56 model.relation_dim 1.0 +776 56 loss.margin 17.60935058009851 +776 56 loss.adversarial_temperature 0.12722952530906198 +776 56 optimizer.lr 0.004785959737928636 +776 56 negative_sampler.num_negs_per_pos 4.0 +776 56 training.batch_size 0.0 +776 57 model.embedding_dim 1.0 +776 57 model.relation_dim 0.0 +776 57 loss.margin 24.792031071995535 +776 57 loss.adversarial_temperature 0.5955156171403981 +776 57 optimizer.lr 0.0169278675781551 +776 57 negative_sampler.num_negs_per_pos 34.0 +776 57 training.batch_size 0.0 +776 58 model.embedding_dim 0.0 +776 58 model.relation_dim 2.0 +776 58 loss.margin 3.6455793577785776 +776 58 loss.adversarial_temperature 0.5187502036640553 +776 58 optimizer.lr 0.018640854555929634 +776 58 negative_sampler.num_negs_per_pos 1.0 +776 58 training.batch_size 2.0 +776 59 model.embedding_dim 2.0 +776 59 model.relation_dim 0.0 +776 59 loss.margin 10.043457468853212 +776 59 loss.adversarial_temperature 0.3997564186674514 +776 59 optimizer.lr 0.007691905673561352 +776 59 negative_sampler.num_negs_per_pos 84.0 +776 59 training.batch_size 1.0 +776 60 model.embedding_dim 2.0 +776 60 model.relation_dim 0.0 +776 60 loss.margin 11.732700271470518 +776 60 loss.adversarial_temperature 0.9206065373334508 +776 60 optimizer.lr 0.07508501013687174 +776 60 negative_sampler.num_negs_per_pos 8.0 +776 60 training.batch_size 2.0 +776 61 model.embedding_dim 2.0 +776 61 model.relation_dim 0.0 +776 61 loss.margin 3.330357674082847 +776 61 loss.adversarial_temperature 0.896589613320132 +776 61 optimizer.lr 0.06196745067762068 +776 61 negative_sampler.num_negs_per_pos 90.0 +776 61 training.batch_size 2.0 +776 62 model.embedding_dim 2.0 +776 62 model.relation_dim 2.0 +776 62 loss.margin 9.73952902992353 +776 62 loss.adversarial_temperature 0.9504804453373441 +776 62 optimizer.lr 0.03646793409017446 +776 62 negative_sampler.num_negs_per_pos 1.0 +776 62 training.batch_size 0.0 +776 63 model.embedding_dim 2.0 +776 63 model.relation_dim 1.0 +776 63 loss.margin 25.09191875545837 +776 63 loss.adversarial_temperature 0.655680970754863 +776 63 optimizer.lr 0.016118734273117045 +776 63 negative_sampler.num_negs_per_pos 56.0 +776 63 training.batch_size 0.0 +776 64 model.embedding_dim 0.0 +776 64 model.relation_dim 2.0 +776 64 loss.margin 15.421448540653133 +776 64 loss.adversarial_temperature 0.17153672232063463 +776 64 optimizer.lr 0.008419095879398491 +776 64 negative_sampler.num_negs_per_pos 89.0 +776 64 training.batch_size 1.0 +776 65 model.embedding_dim 0.0 +776 65 model.relation_dim 1.0 +776 65 loss.margin 3.151583398830286 +776 65 loss.adversarial_temperature 0.92769435087966 +776 65 optimizer.lr 0.0014910682630235816 +776 65 negative_sampler.num_negs_per_pos 72.0 +776 65 training.batch_size 0.0 +776 66 model.embedding_dim 0.0 +776 66 model.relation_dim 2.0 +776 66 loss.margin 12.716984439074585 +776 66 loss.adversarial_temperature 0.6148120662952672 +776 66 optimizer.lr 0.009896872795016333 +776 66 negative_sampler.num_negs_per_pos 21.0 +776 66 training.batch_size 0.0 +776 67 model.embedding_dim 1.0 +776 67 model.relation_dim 2.0 +776 67 loss.margin 4.568081256098366 +776 67 loss.adversarial_temperature 0.7098392570668058 +776 67 optimizer.lr 0.001234984157493458 +776 67 negative_sampler.num_negs_per_pos 60.0 +776 67 training.batch_size 0.0 +776 68 model.embedding_dim 0.0 +776 68 model.relation_dim 0.0 +776 68 loss.margin 10.624310193562579 +776 68 loss.adversarial_temperature 0.11530492721936024 +776 68 optimizer.lr 0.013130489828765248 +776 68 negative_sampler.num_negs_per_pos 72.0 +776 68 training.batch_size 1.0 +776 69 model.embedding_dim 2.0 +776 69 model.relation_dim 1.0 +776 69 loss.margin 17.155811008463335 +776 69 loss.adversarial_temperature 0.9914508760358575 +776 69 optimizer.lr 0.003331802464878749 +776 69 negative_sampler.num_negs_per_pos 82.0 +776 69 training.batch_size 0.0 +776 70 model.embedding_dim 1.0 +776 70 model.relation_dim 0.0 +776 70 loss.margin 29.52871773503237 +776 70 loss.adversarial_temperature 0.7691977820002686 +776 70 optimizer.lr 0.0048262808074240994 +776 70 negative_sampler.num_negs_per_pos 19.0 +776 70 training.batch_size 0.0 +776 71 model.embedding_dim 0.0 +776 71 model.relation_dim 1.0 +776 71 loss.margin 21.626375831537658 +776 71 loss.adversarial_temperature 0.9837288261246707 +776 71 optimizer.lr 0.0026948102234427176 +776 71 negative_sampler.num_negs_per_pos 12.0 +776 71 training.batch_size 1.0 +776 72 model.embedding_dim 1.0 +776 72 model.relation_dim 1.0 +776 72 loss.margin 28.81986318401228 +776 72 loss.adversarial_temperature 0.581663309718052 +776 72 optimizer.lr 0.0031185646202629897 +776 72 negative_sampler.num_negs_per_pos 14.0 +776 72 training.batch_size 2.0 +776 73 model.embedding_dim 1.0 +776 73 model.relation_dim 1.0 +776 73 loss.margin 26.696483233479672 +776 73 loss.adversarial_temperature 0.2991821975034788 +776 73 optimizer.lr 0.07555549326540659 +776 73 negative_sampler.num_negs_per_pos 20.0 +776 73 training.batch_size 1.0 +776 74 model.embedding_dim 1.0 +776 74 model.relation_dim 2.0 +776 74 loss.margin 17.08922646101235 +776 74 loss.adversarial_temperature 0.7608640121006585 +776 74 optimizer.lr 0.06159037458448027 +776 74 negative_sampler.num_negs_per_pos 98.0 +776 74 training.batch_size 0.0 +776 75 model.embedding_dim 2.0 +776 75 model.relation_dim 1.0 +776 75 loss.margin 14.429166672559619 +776 75 loss.adversarial_temperature 0.7961071914688838 +776 75 optimizer.lr 0.01170217022881092 +776 75 negative_sampler.num_negs_per_pos 0.0 +776 75 training.batch_size 1.0 +776 76 model.embedding_dim 1.0 +776 76 model.relation_dim 2.0 +776 76 loss.margin 7.503993742978089 +776 76 loss.adversarial_temperature 0.2732169288347333 +776 76 optimizer.lr 0.005748637896996722 +776 76 negative_sampler.num_negs_per_pos 50.0 +776 76 training.batch_size 2.0 +776 77 model.embedding_dim 1.0 +776 77 model.relation_dim 2.0 +776 77 loss.margin 10.263802224474539 +776 77 loss.adversarial_temperature 0.5322612407443842 +776 77 optimizer.lr 0.010970736464762914 +776 77 negative_sampler.num_negs_per_pos 31.0 +776 77 training.batch_size 2.0 +776 78 model.embedding_dim 2.0 +776 78 model.relation_dim 1.0 +776 78 loss.margin 13.425534547606198 +776 78 loss.adversarial_temperature 0.2876747760165836 +776 78 optimizer.lr 0.0014756762805828707 +776 78 negative_sampler.num_negs_per_pos 75.0 +776 78 training.batch_size 2.0 +776 79 model.embedding_dim 0.0 +776 79 model.relation_dim 2.0 +776 79 loss.margin 14.519727785806605 +776 79 loss.adversarial_temperature 0.4040009449227709 +776 79 optimizer.lr 0.02301624744798664 +776 79 negative_sampler.num_negs_per_pos 30.0 +776 79 training.batch_size 0.0 +776 80 model.embedding_dim 0.0 +776 80 model.relation_dim 0.0 +776 80 loss.margin 15.200687790893388 +776 80 loss.adversarial_temperature 0.9929938188250215 +776 80 optimizer.lr 0.0015482925341100196 +776 80 negative_sampler.num_negs_per_pos 83.0 +776 80 training.batch_size 2.0 +776 81 model.embedding_dim 1.0 +776 81 model.relation_dim 1.0 +776 81 loss.margin 18.88919928487928 +776 81 loss.adversarial_temperature 0.9471007654532596 +776 81 optimizer.lr 0.06414075784244364 +776 81 negative_sampler.num_negs_per_pos 54.0 +776 81 training.batch_size 0.0 +776 82 model.embedding_dim 2.0 +776 82 model.relation_dim 0.0 +776 82 loss.margin 12.401261219420675 +776 82 loss.adversarial_temperature 0.7476166098726971 +776 82 optimizer.lr 0.0024569221085763166 +776 82 negative_sampler.num_negs_per_pos 43.0 +776 82 training.batch_size 2.0 +776 83 model.embedding_dim 0.0 +776 83 model.relation_dim 2.0 +776 83 loss.margin 4.115737204034651 +776 83 loss.adversarial_temperature 0.8285897075601742 +776 83 optimizer.lr 0.009896473994370476 +776 83 negative_sampler.num_negs_per_pos 54.0 +776 83 training.batch_size 0.0 +776 84 model.embedding_dim 2.0 +776 84 model.relation_dim 0.0 +776 84 loss.margin 8.587529345376169 +776 84 loss.adversarial_temperature 0.21441016193637386 +776 84 optimizer.lr 0.0015360761624871168 +776 84 negative_sampler.num_negs_per_pos 26.0 +776 84 training.batch_size 2.0 +776 85 model.embedding_dim 1.0 +776 85 model.relation_dim 2.0 +776 85 loss.margin 9.65816959851431 +776 85 loss.adversarial_temperature 0.7414414756866773 +776 85 optimizer.lr 0.0024171804980560674 +776 85 negative_sampler.num_negs_per_pos 13.0 +776 85 training.batch_size 0.0 +776 86 model.embedding_dim 1.0 +776 86 model.relation_dim 0.0 +776 86 loss.margin 11.73124515107551 +776 86 loss.adversarial_temperature 0.8894272444209889 +776 86 optimizer.lr 0.009546857928761535 +776 86 negative_sampler.num_negs_per_pos 17.0 +776 86 training.batch_size 2.0 +776 87 model.embedding_dim 1.0 +776 87 model.relation_dim 2.0 +776 87 loss.margin 9.073067597545856 +776 87 loss.adversarial_temperature 0.5691793230079124 +776 87 optimizer.lr 0.0023725934987100464 +776 87 negative_sampler.num_negs_per_pos 3.0 +776 87 training.batch_size 2.0 +776 88 model.embedding_dim 1.0 +776 88 model.relation_dim 1.0 +776 88 loss.margin 20.500374345756114 +776 88 loss.adversarial_temperature 0.7835231473007531 +776 88 optimizer.lr 0.00450485835070599 +776 88 negative_sampler.num_negs_per_pos 87.0 +776 88 training.batch_size 1.0 +776 89 model.embedding_dim 0.0 +776 89 model.relation_dim 1.0 +776 89 loss.margin 1.672473764055245 +776 89 loss.adversarial_temperature 0.12779939352423605 +776 89 optimizer.lr 0.02850125202458576 +776 89 negative_sampler.num_negs_per_pos 46.0 +776 89 training.batch_size 1.0 +776 90 model.embedding_dim 0.0 +776 90 model.relation_dim 2.0 +776 90 loss.margin 27.256849356759208 +776 90 loss.adversarial_temperature 0.8611893706279404 +776 90 optimizer.lr 0.03994698724463701 +776 90 negative_sampler.num_negs_per_pos 40.0 +776 90 training.batch_size 0.0 +776 91 model.embedding_dim 1.0 +776 91 model.relation_dim 1.0 +776 91 loss.margin 21.472353202801667 +776 91 loss.adversarial_temperature 0.2049602023846605 +776 91 optimizer.lr 0.018525650188540615 +776 91 negative_sampler.num_negs_per_pos 0.0 +776 91 training.batch_size 0.0 +776 92 model.embedding_dim 1.0 +776 92 model.relation_dim 0.0 +776 92 loss.margin 26.928775307078364 +776 92 loss.adversarial_temperature 0.19704783082388547 +776 92 optimizer.lr 0.001880981254457969 +776 92 negative_sampler.num_negs_per_pos 67.0 +776 92 training.batch_size 0.0 +776 93 model.embedding_dim 2.0 +776 93 model.relation_dim 2.0 +776 93 loss.margin 14.672685233449943 +776 93 loss.adversarial_temperature 0.2786444905724227 +776 93 optimizer.lr 0.09365772671350615 +776 93 negative_sampler.num_negs_per_pos 96.0 +776 93 training.batch_size 1.0 +776 94 model.embedding_dim 2.0 +776 94 model.relation_dim 1.0 +776 94 loss.margin 17.1846457046779 +776 94 loss.adversarial_temperature 0.4389075284310752 +776 94 optimizer.lr 0.0025687375400341326 +776 94 negative_sampler.num_negs_per_pos 94.0 +776 94 training.batch_size 1.0 +776 95 model.embedding_dim 1.0 +776 95 model.relation_dim 1.0 +776 95 loss.margin 14.407217610075904 +776 95 loss.adversarial_temperature 0.30486535341425575 +776 95 optimizer.lr 0.0012221873778060122 +776 95 negative_sampler.num_negs_per_pos 9.0 +776 95 training.batch_size 2.0 +776 96 model.embedding_dim 1.0 +776 96 model.relation_dim 1.0 +776 96 loss.margin 8.892244597379886 +776 96 loss.adversarial_temperature 0.7646217528929832 +776 96 optimizer.lr 0.061346002263420706 +776 96 negative_sampler.num_negs_per_pos 35.0 +776 96 training.batch_size 0.0 +776 97 model.embedding_dim 0.0 +776 97 model.relation_dim 0.0 +776 97 loss.margin 8.692675493045172 +776 97 loss.adversarial_temperature 0.3734688280791867 +776 97 optimizer.lr 0.0014414807730706552 +776 97 negative_sampler.num_negs_per_pos 39.0 +776 97 training.batch_size 1.0 +776 98 model.embedding_dim 1.0 +776 98 model.relation_dim 0.0 +776 98 loss.margin 22.083604167459043 +776 98 loss.adversarial_temperature 0.7082500975949896 +776 98 optimizer.lr 0.0023882769304995914 +776 98 negative_sampler.num_negs_per_pos 33.0 +776 98 training.batch_size 2.0 +776 99 model.embedding_dim 1.0 +776 99 model.relation_dim 0.0 +776 99 loss.margin 21.229744278995888 +776 99 loss.adversarial_temperature 0.22808909252116724 +776 99 optimizer.lr 0.003743675082281017 +776 99 negative_sampler.num_negs_per_pos 25.0 +776 99 training.batch_size 0.0 +776 100 model.embedding_dim 0.0 +776 100 model.relation_dim 1.0 +776 100 loss.margin 26.30413774585697 +776 100 loss.adversarial_temperature 0.7273721183534029 +776 100 optimizer.lr 0.022898403885550006 +776 100 negative_sampler.num_negs_per_pos 69.0 +776 100 training.batch_size 2.0 +776 1 dataset """wn18rr""" +776 1 model """transd""" +776 1 loss """nssa""" +776 1 regularizer """no""" +776 1 optimizer """adam""" +776 1 training_loop """owa""" +776 1 negative_sampler """basic""" +776 1 evaluator """rankbased""" +776 2 dataset """wn18rr""" +776 2 model """transd""" +776 2 loss """nssa""" +776 2 regularizer """no""" +776 2 optimizer """adam""" +776 2 training_loop """owa""" +776 2 negative_sampler """basic""" +776 2 evaluator """rankbased""" +776 3 dataset """wn18rr""" +776 3 model """transd""" +776 3 loss """nssa""" +776 3 regularizer """no""" +776 3 optimizer """adam""" +776 3 training_loop """owa""" +776 3 negative_sampler """basic""" +776 3 evaluator """rankbased""" +776 4 dataset """wn18rr""" +776 4 model """transd""" +776 4 loss """nssa""" +776 4 regularizer """no""" +776 4 optimizer """adam""" +776 4 training_loop """owa""" +776 4 negative_sampler """basic""" +776 4 evaluator """rankbased""" +776 5 dataset """wn18rr""" +776 5 model """transd""" +776 5 loss """nssa""" +776 5 regularizer """no""" +776 5 optimizer """adam""" +776 5 training_loop """owa""" +776 5 negative_sampler """basic""" +776 5 evaluator """rankbased""" +776 6 dataset """wn18rr""" +776 6 model """transd""" +776 6 loss """nssa""" +776 6 regularizer """no""" +776 6 optimizer """adam""" +776 6 training_loop """owa""" +776 6 negative_sampler """basic""" +776 6 evaluator """rankbased""" +776 7 dataset """wn18rr""" +776 7 model """transd""" +776 7 loss """nssa""" +776 7 regularizer """no""" +776 7 optimizer """adam""" +776 7 training_loop """owa""" +776 7 negative_sampler """basic""" +776 7 evaluator """rankbased""" +776 8 dataset """wn18rr""" +776 8 model """transd""" +776 8 loss """nssa""" +776 8 regularizer """no""" +776 8 optimizer """adam""" +776 8 training_loop """owa""" +776 8 negative_sampler """basic""" +776 8 evaluator """rankbased""" +776 9 dataset """wn18rr""" +776 9 model """transd""" +776 9 loss """nssa""" +776 9 regularizer """no""" +776 9 optimizer """adam""" +776 9 training_loop """owa""" +776 9 negative_sampler """basic""" +776 9 evaluator """rankbased""" +776 10 dataset """wn18rr""" +776 10 model """transd""" +776 10 loss """nssa""" +776 10 regularizer """no""" +776 10 optimizer """adam""" +776 10 training_loop """owa""" +776 10 negative_sampler """basic""" +776 10 evaluator """rankbased""" +776 11 dataset """wn18rr""" +776 11 model """transd""" +776 11 loss """nssa""" +776 11 regularizer """no""" +776 11 optimizer """adam""" +776 11 training_loop """owa""" +776 11 negative_sampler """basic""" +776 11 evaluator """rankbased""" +776 12 dataset """wn18rr""" +776 12 model """transd""" +776 12 loss """nssa""" +776 12 regularizer """no""" +776 12 optimizer """adam""" +776 12 training_loop """owa""" +776 12 negative_sampler """basic""" +776 12 evaluator """rankbased""" +776 13 dataset """wn18rr""" +776 13 model """transd""" +776 13 loss """nssa""" +776 13 regularizer """no""" +776 13 optimizer """adam""" +776 13 training_loop """owa""" +776 13 negative_sampler """basic""" +776 13 evaluator """rankbased""" +776 14 dataset """wn18rr""" +776 14 model """transd""" +776 14 loss """nssa""" +776 14 regularizer """no""" +776 14 optimizer """adam""" +776 14 training_loop """owa""" +776 14 negative_sampler """basic""" +776 14 evaluator """rankbased""" +776 15 dataset """wn18rr""" +776 15 model """transd""" +776 15 loss """nssa""" +776 15 regularizer """no""" +776 15 optimizer """adam""" +776 15 training_loop """owa""" +776 15 negative_sampler """basic""" +776 15 evaluator """rankbased""" +776 16 dataset """wn18rr""" +776 16 model """transd""" +776 16 loss """nssa""" +776 16 regularizer """no""" +776 16 optimizer """adam""" +776 16 training_loop """owa""" +776 16 negative_sampler """basic""" +776 16 evaluator """rankbased""" +776 17 dataset """wn18rr""" +776 17 model """transd""" +776 17 loss """nssa""" +776 17 regularizer """no""" +776 17 optimizer """adam""" +776 17 training_loop """owa""" +776 17 negative_sampler """basic""" +776 17 evaluator """rankbased""" +776 18 dataset """wn18rr""" +776 18 model """transd""" +776 18 loss """nssa""" +776 18 regularizer """no""" +776 18 optimizer """adam""" +776 18 training_loop """owa""" +776 18 negative_sampler """basic""" +776 18 evaluator """rankbased""" +776 19 dataset """wn18rr""" +776 19 model """transd""" +776 19 loss """nssa""" +776 19 regularizer """no""" +776 19 optimizer """adam""" +776 19 training_loop """owa""" +776 19 negative_sampler """basic""" +776 19 evaluator """rankbased""" +776 20 dataset """wn18rr""" +776 20 model """transd""" +776 20 loss """nssa""" +776 20 regularizer """no""" +776 20 optimizer """adam""" +776 20 training_loop """owa""" +776 20 negative_sampler """basic""" +776 20 evaluator """rankbased""" +776 21 dataset """wn18rr""" +776 21 model """transd""" +776 21 loss """nssa""" +776 21 regularizer """no""" +776 21 optimizer """adam""" +776 21 training_loop """owa""" +776 21 negative_sampler """basic""" +776 21 evaluator """rankbased""" +776 22 dataset """wn18rr""" +776 22 model """transd""" +776 22 loss """nssa""" +776 22 regularizer """no""" +776 22 optimizer """adam""" +776 22 training_loop """owa""" +776 22 negative_sampler """basic""" +776 22 evaluator """rankbased""" +776 23 dataset """wn18rr""" +776 23 model """transd""" +776 23 loss """nssa""" +776 23 regularizer """no""" +776 23 optimizer """adam""" +776 23 training_loop """owa""" +776 23 negative_sampler """basic""" +776 23 evaluator """rankbased""" +776 24 dataset """wn18rr""" +776 24 model """transd""" +776 24 loss """nssa""" +776 24 regularizer """no""" +776 24 optimizer """adam""" +776 24 training_loop """owa""" +776 24 negative_sampler """basic""" +776 24 evaluator """rankbased""" +776 25 dataset """wn18rr""" +776 25 model """transd""" +776 25 loss """nssa""" +776 25 regularizer """no""" +776 25 optimizer """adam""" +776 25 training_loop """owa""" +776 25 negative_sampler """basic""" +776 25 evaluator """rankbased""" +776 26 dataset """wn18rr""" +776 26 model """transd""" +776 26 loss """nssa""" +776 26 regularizer """no""" +776 26 optimizer """adam""" +776 26 training_loop """owa""" +776 26 negative_sampler """basic""" +776 26 evaluator """rankbased""" +776 27 dataset """wn18rr""" +776 27 model """transd""" +776 27 loss """nssa""" +776 27 regularizer """no""" +776 27 optimizer """adam""" +776 27 training_loop """owa""" +776 27 negative_sampler """basic""" +776 27 evaluator """rankbased""" +776 28 dataset """wn18rr""" +776 28 model """transd""" +776 28 loss """nssa""" +776 28 regularizer """no""" +776 28 optimizer """adam""" +776 28 training_loop """owa""" +776 28 negative_sampler """basic""" +776 28 evaluator """rankbased""" +776 29 dataset """wn18rr""" +776 29 model """transd""" +776 29 loss """nssa""" +776 29 regularizer """no""" +776 29 optimizer """adam""" +776 29 training_loop """owa""" +776 29 negative_sampler """basic""" +776 29 evaluator """rankbased""" +776 30 dataset """wn18rr""" +776 30 model """transd""" +776 30 loss """nssa""" +776 30 regularizer """no""" +776 30 optimizer """adam""" +776 30 training_loop """owa""" +776 30 negative_sampler """basic""" +776 30 evaluator """rankbased""" +776 31 dataset """wn18rr""" +776 31 model """transd""" +776 31 loss """nssa""" +776 31 regularizer """no""" +776 31 optimizer """adam""" +776 31 training_loop """owa""" +776 31 negative_sampler """basic""" +776 31 evaluator """rankbased""" +776 32 dataset """wn18rr""" +776 32 model """transd""" +776 32 loss """nssa""" +776 32 regularizer """no""" +776 32 optimizer """adam""" +776 32 training_loop """owa""" +776 32 negative_sampler """basic""" +776 32 evaluator """rankbased""" +776 33 dataset """wn18rr""" +776 33 model """transd""" +776 33 loss """nssa""" +776 33 regularizer """no""" +776 33 optimizer """adam""" +776 33 training_loop """owa""" +776 33 negative_sampler """basic""" +776 33 evaluator """rankbased""" +776 34 dataset """wn18rr""" +776 34 model """transd""" +776 34 loss """nssa""" +776 34 regularizer """no""" +776 34 optimizer """adam""" +776 34 training_loop """owa""" +776 34 negative_sampler """basic""" +776 34 evaluator """rankbased""" +776 35 dataset """wn18rr""" +776 35 model """transd""" +776 35 loss """nssa""" +776 35 regularizer """no""" +776 35 optimizer """adam""" +776 35 training_loop """owa""" +776 35 negative_sampler """basic""" +776 35 evaluator """rankbased""" +776 36 dataset """wn18rr""" +776 36 model """transd""" +776 36 loss """nssa""" +776 36 regularizer """no""" +776 36 optimizer """adam""" +776 36 training_loop """owa""" +776 36 negative_sampler """basic""" +776 36 evaluator """rankbased""" +776 37 dataset """wn18rr""" +776 37 model """transd""" +776 37 loss """nssa""" +776 37 regularizer """no""" +776 37 optimizer """adam""" +776 37 training_loop """owa""" +776 37 negative_sampler """basic""" +776 37 evaluator """rankbased""" +776 38 dataset """wn18rr""" +776 38 model """transd""" +776 38 loss """nssa""" +776 38 regularizer """no""" +776 38 optimizer """adam""" +776 38 training_loop """owa""" +776 38 negative_sampler """basic""" +776 38 evaluator """rankbased""" +776 39 dataset """wn18rr""" +776 39 model """transd""" +776 39 loss """nssa""" +776 39 regularizer """no""" +776 39 optimizer """adam""" +776 39 training_loop """owa""" +776 39 negative_sampler """basic""" +776 39 evaluator """rankbased""" +776 40 dataset """wn18rr""" +776 40 model """transd""" +776 40 loss """nssa""" +776 40 regularizer """no""" +776 40 optimizer """adam""" +776 40 training_loop """owa""" +776 40 negative_sampler """basic""" +776 40 evaluator """rankbased""" +776 41 dataset """wn18rr""" +776 41 model """transd""" +776 41 loss """nssa""" +776 41 regularizer """no""" +776 41 optimizer """adam""" +776 41 training_loop """owa""" +776 41 negative_sampler """basic""" +776 41 evaluator """rankbased""" +776 42 dataset """wn18rr""" +776 42 model """transd""" +776 42 loss """nssa""" +776 42 regularizer """no""" +776 42 optimizer """adam""" +776 42 training_loop """owa""" +776 42 negative_sampler """basic""" +776 42 evaluator """rankbased""" +776 43 dataset """wn18rr""" +776 43 model """transd""" +776 43 loss """nssa""" +776 43 regularizer """no""" +776 43 optimizer """adam""" +776 43 training_loop """owa""" +776 43 negative_sampler """basic""" +776 43 evaluator """rankbased""" +776 44 dataset """wn18rr""" +776 44 model """transd""" +776 44 loss """nssa""" +776 44 regularizer """no""" +776 44 optimizer """adam""" +776 44 training_loop """owa""" +776 44 negative_sampler """basic""" +776 44 evaluator """rankbased""" +776 45 dataset """wn18rr""" +776 45 model """transd""" +776 45 loss """nssa""" +776 45 regularizer """no""" +776 45 optimizer """adam""" +776 45 training_loop """owa""" +776 45 negative_sampler """basic""" +776 45 evaluator """rankbased""" +776 46 dataset """wn18rr""" +776 46 model """transd""" +776 46 loss """nssa""" +776 46 regularizer """no""" +776 46 optimizer """adam""" +776 46 training_loop """owa""" +776 46 negative_sampler """basic""" +776 46 evaluator """rankbased""" +776 47 dataset """wn18rr""" +776 47 model """transd""" +776 47 loss """nssa""" +776 47 regularizer """no""" +776 47 optimizer """adam""" +776 47 training_loop """owa""" +776 47 negative_sampler """basic""" +776 47 evaluator """rankbased""" +776 48 dataset """wn18rr""" +776 48 model """transd""" +776 48 loss """nssa""" +776 48 regularizer """no""" +776 48 optimizer """adam""" +776 48 training_loop """owa""" +776 48 negative_sampler """basic""" +776 48 evaluator """rankbased""" +776 49 dataset """wn18rr""" +776 49 model """transd""" +776 49 loss """nssa""" +776 49 regularizer """no""" +776 49 optimizer """adam""" +776 49 training_loop """owa""" +776 49 negative_sampler """basic""" +776 49 evaluator """rankbased""" +776 50 dataset """wn18rr""" +776 50 model """transd""" +776 50 loss """nssa""" +776 50 regularizer """no""" +776 50 optimizer """adam""" +776 50 training_loop """owa""" +776 50 negative_sampler """basic""" +776 50 evaluator """rankbased""" +776 51 dataset """wn18rr""" +776 51 model """transd""" +776 51 loss """nssa""" +776 51 regularizer """no""" +776 51 optimizer """adam""" +776 51 training_loop """owa""" +776 51 negative_sampler """basic""" +776 51 evaluator """rankbased""" +776 52 dataset """wn18rr""" +776 52 model """transd""" +776 52 loss """nssa""" +776 52 regularizer """no""" +776 52 optimizer """adam""" +776 52 training_loop """owa""" +776 52 negative_sampler """basic""" +776 52 evaluator """rankbased""" +776 53 dataset """wn18rr""" +776 53 model """transd""" +776 53 loss """nssa""" +776 53 regularizer """no""" +776 53 optimizer """adam""" +776 53 training_loop """owa""" +776 53 negative_sampler """basic""" +776 53 evaluator """rankbased""" +776 54 dataset """wn18rr""" +776 54 model """transd""" +776 54 loss """nssa""" +776 54 regularizer """no""" +776 54 optimizer """adam""" +776 54 training_loop """owa""" +776 54 negative_sampler """basic""" +776 54 evaluator """rankbased""" +776 55 dataset """wn18rr""" +776 55 model """transd""" +776 55 loss """nssa""" +776 55 regularizer """no""" +776 55 optimizer """adam""" +776 55 training_loop """owa""" +776 55 negative_sampler """basic""" +776 55 evaluator """rankbased""" +776 56 dataset """wn18rr""" +776 56 model """transd""" +776 56 loss """nssa""" +776 56 regularizer """no""" +776 56 optimizer """adam""" +776 56 training_loop """owa""" +776 56 negative_sampler """basic""" +776 56 evaluator """rankbased""" +776 57 dataset """wn18rr""" +776 57 model """transd""" +776 57 loss """nssa""" +776 57 regularizer """no""" +776 57 optimizer """adam""" +776 57 training_loop """owa""" +776 57 negative_sampler """basic""" +776 57 evaluator """rankbased""" +776 58 dataset """wn18rr""" +776 58 model """transd""" +776 58 loss """nssa""" +776 58 regularizer """no""" +776 58 optimizer """adam""" +776 58 training_loop """owa""" +776 58 negative_sampler """basic""" +776 58 evaluator """rankbased""" +776 59 dataset """wn18rr""" +776 59 model """transd""" +776 59 loss """nssa""" +776 59 regularizer """no""" +776 59 optimizer """adam""" +776 59 training_loop """owa""" +776 59 negative_sampler """basic""" +776 59 evaluator """rankbased""" +776 60 dataset """wn18rr""" +776 60 model """transd""" +776 60 loss """nssa""" +776 60 regularizer """no""" +776 60 optimizer """adam""" +776 60 training_loop """owa""" +776 60 negative_sampler """basic""" +776 60 evaluator """rankbased""" +776 61 dataset """wn18rr""" +776 61 model """transd""" +776 61 loss """nssa""" +776 61 regularizer """no""" +776 61 optimizer """adam""" +776 61 training_loop """owa""" +776 61 negative_sampler """basic""" +776 61 evaluator """rankbased""" +776 62 dataset """wn18rr""" +776 62 model """transd""" +776 62 loss """nssa""" +776 62 regularizer """no""" +776 62 optimizer """adam""" +776 62 training_loop """owa""" +776 62 negative_sampler """basic""" +776 62 evaluator """rankbased""" +776 63 dataset """wn18rr""" +776 63 model """transd""" +776 63 loss """nssa""" +776 63 regularizer """no""" +776 63 optimizer """adam""" +776 63 training_loop """owa""" +776 63 negative_sampler """basic""" +776 63 evaluator """rankbased""" +776 64 dataset """wn18rr""" +776 64 model """transd""" +776 64 loss """nssa""" +776 64 regularizer """no""" +776 64 optimizer """adam""" +776 64 training_loop """owa""" +776 64 negative_sampler """basic""" +776 64 evaluator """rankbased""" +776 65 dataset """wn18rr""" +776 65 model """transd""" +776 65 loss """nssa""" +776 65 regularizer """no""" +776 65 optimizer """adam""" +776 65 training_loop """owa""" +776 65 negative_sampler """basic""" +776 65 evaluator """rankbased""" +776 66 dataset """wn18rr""" +776 66 model """transd""" +776 66 loss """nssa""" +776 66 regularizer """no""" +776 66 optimizer """adam""" +776 66 training_loop """owa""" +776 66 negative_sampler """basic""" +776 66 evaluator """rankbased""" +776 67 dataset """wn18rr""" +776 67 model """transd""" +776 67 loss """nssa""" +776 67 regularizer """no""" +776 67 optimizer """adam""" +776 67 training_loop """owa""" +776 67 negative_sampler """basic""" +776 67 evaluator """rankbased""" +776 68 dataset """wn18rr""" +776 68 model """transd""" +776 68 loss """nssa""" +776 68 regularizer """no""" +776 68 optimizer """adam""" +776 68 training_loop """owa""" +776 68 negative_sampler """basic""" +776 68 evaluator """rankbased""" +776 69 dataset """wn18rr""" +776 69 model """transd""" +776 69 loss """nssa""" +776 69 regularizer """no""" +776 69 optimizer """adam""" +776 69 training_loop """owa""" +776 69 negative_sampler """basic""" +776 69 evaluator """rankbased""" +776 70 dataset """wn18rr""" +776 70 model """transd""" +776 70 loss """nssa""" +776 70 regularizer """no""" +776 70 optimizer """adam""" +776 70 training_loop """owa""" +776 70 negative_sampler """basic""" +776 70 evaluator """rankbased""" +776 71 dataset """wn18rr""" +776 71 model """transd""" +776 71 loss """nssa""" +776 71 regularizer """no""" +776 71 optimizer """adam""" +776 71 training_loop """owa""" +776 71 negative_sampler """basic""" +776 71 evaluator """rankbased""" +776 72 dataset """wn18rr""" +776 72 model """transd""" +776 72 loss """nssa""" +776 72 regularizer """no""" +776 72 optimizer """adam""" +776 72 training_loop """owa""" +776 72 negative_sampler """basic""" +776 72 evaluator """rankbased""" +776 73 dataset """wn18rr""" +776 73 model """transd""" +776 73 loss """nssa""" +776 73 regularizer """no""" +776 73 optimizer """adam""" +776 73 training_loop """owa""" +776 73 negative_sampler """basic""" +776 73 evaluator """rankbased""" +776 74 dataset """wn18rr""" +776 74 model """transd""" +776 74 loss """nssa""" +776 74 regularizer """no""" +776 74 optimizer """adam""" +776 74 training_loop """owa""" +776 74 negative_sampler """basic""" +776 74 evaluator """rankbased""" +776 75 dataset """wn18rr""" +776 75 model """transd""" +776 75 loss """nssa""" +776 75 regularizer """no""" +776 75 optimizer """adam""" +776 75 training_loop """owa""" +776 75 negative_sampler """basic""" +776 75 evaluator """rankbased""" +776 76 dataset """wn18rr""" +776 76 model """transd""" +776 76 loss """nssa""" +776 76 regularizer """no""" +776 76 optimizer """adam""" +776 76 training_loop """owa""" +776 76 negative_sampler """basic""" +776 76 evaluator """rankbased""" +776 77 dataset """wn18rr""" +776 77 model """transd""" +776 77 loss """nssa""" +776 77 regularizer """no""" +776 77 optimizer """adam""" +776 77 training_loop """owa""" +776 77 negative_sampler """basic""" +776 77 evaluator """rankbased""" +776 78 dataset """wn18rr""" +776 78 model """transd""" +776 78 loss """nssa""" +776 78 regularizer """no""" +776 78 optimizer """adam""" +776 78 training_loop """owa""" +776 78 negative_sampler """basic""" +776 78 evaluator """rankbased""" +776 79 dataset """wn18rr""" +776 79 model """transd""" +776 79 loss """nssa""" +776 79 regularizer """no""" +776 79 optimizer """adam""" +776 79 training_loop """owa""" +776 79 negative_sampler """basic""" +776 79 evaluator """rankbased""" +776 80 dataset """wn18rr""" +776 80 model """transd""" +776 80 loss """nssa""" +776 80 regularizer """no""" +776 80 optimizer """adam""" +776 80 training_loop """owa""" +776 80 negative_sampler """basic""" +776 80 evaluator """rankbased""" +776 81 dataset """wn18rr""" +776 81 model """transd""" +776 81 loss """nssa""" +776 81 regularizer """no""" +776 81 optimizer """adam""" +776 81 training_loop """owa""" +776 81 negative_sampler """basic""" +776 81 evaluator """rankbased""" +776 82 dataset """wn18rr""" +776 82 model """transd""" +776 82 loss """nssa""" +776 82 regularizer """no""" +776 82 optimizer """adam""" +776 82 training_loop """owa""" +776 82 negative_sampler """basic""" +776 82 evaluator """rankbased""" +776 83 dataset """wn18rr""" +776 83 model """transd""" +776 83 loss """nssa""" +776 83 regularizer """no""" +776 83 optimizer """adam""" +776 83 training_loop """owa""" +776 83 negative_sampler """basic""" +776 83 evaluator """rankbased""" +776 84 dataset """wn18rr""" +776 84 model """transd""" +776 84 loss """nssa""" +776 84 regularizer """no""" +776 84 optimizer """adam""" +776 84 training_loop """owa""" +776 84 negative_sampler """basic""" +776 84 evaluator """rankbased""" +776 85 dataset """wn18rr""" +776 85 model """transd""" +776 85 loss """nssa""" +776 85 regularizer """no""" +776 85 optimizer """adam""" +776 85 training_loop """owa""" +776 85 negative_sampler """basic""" +776 85 evaluator """rankbased""" +776 86 dataset """wn18rr""" +776 86 model """transd""" +776 86 loss """nssa""" +776 86 regularizer """no""" +776 86 optimizer """adam""" +776 86 training_loop """owa""" +776 86 negative_sampler """basic""" +776 86 evaluator """rankbased""" +776 87 dataset """wn18rr""" +776 87 model """transd""" +776 87 loss """nssa""" +776 87 regularizer """no""" +776 87 optimizer """adam""" +776 87 training_loop """owa""" +776 87 negative_sampler """basic""" +776 87 evaluator """rankbased""" +776 88 dataset """wn18rr""" +776 88 model """transd""" +776 88 loss """nssa""" +776 88 regularizer """no""" +776 88 optimizer """adam""" +776 88 training_loop """owa""" +776 88 negative_sampler """basic""" +776 88 evaluator """rankbased""" +776 89 dataset """wn18rr""" +776 89 model """transd""" +776 89 loss """nssa""" +776 89 regularizer """no""" +776 89 optimizer """adam""" +776 89 training_loop """owa""" +776 89 negative_sampler """basic""" +776 89 evaluator """rankbased""" +776 90 dataset """wn18rr""" +776 90 model """transd""" +776 90 loss """nssa""" +776 90 regularizer """no""" +776 90 optimizer """adam""" +776 90 training_loop """owa""" +776 90 negative_sampler """basic""" +776 90 evaluator """rankbased""" +776 91 dataset """wn18rr""" +776 91 model """transd""" +776 91 loss """nssa""" +776 91 regularizer """no""" +776 91 optimizer """adam""" +776 91 training_loop """owa""" +776 91 negative_sampler """basic""" +776 91 evaluator """rankbased""" +776 92 dataset """wn18rr""" +776 92 model """transd""" +776 92 loss """nssa""" +776 92 regularizer """no""" +776 92 optimizer """adam""" +776 92 training_loop """owa""" +776 92 negative_sampler """basic""" +776 92 evaluator """rankbased""" +776 93 dataset """wn18rr""" +776 93 model """transd""" +776 93 loss """nssa""" +776 93 regularizer """no""" +776 93 optimizer """adam""" +776 93 training_loop """owa""" +776 93 negative_sampler """basic""" +776 93 evaluator """rankbased""" +776 94 dataset """wn18rr""" +776 94 model """transd""" +776 94 loss """nssa""" +776 94 regularizer """no""" +776 94 optimizer """adam""" +776 94 training_loop """owa""" +776 94 negative_sampler """basic""" +776 94 evaluator """rankbased""" +776 95 dataset """wn18rr""" +776 95 model """transd""" +776 95 loss """nssa""" +776 95 regularizer """no""" +776 95 optimizer """adam""" +776 95 training_loop """owa""" +776 95 negative_sampler """basic""" +776 95 evaluator """rankbased""" +776 96 dataset """wn18rr""" +776 96 model """transd""" +776 96 loss """nssa""" +776 96 regularizer """no""" +776 96 optimizer """adam""" +776 96 training_loop """owa""" +776 96 negative_sampler """basic""" +776 96 evaluator """rankbased""" +776 97 dataset """wn18rr""" +776 97 model """transd""" +776 97 loss """nssa""" +776 97 regularizer """no""" +776 97 optimizer """adam""" +776 97 training_loop """owa""" +776 97 negative_sampler """basic""" +776 97 evaluator """rankbased""" +776 98 dataset """wn18rr""" +776 98 model """transd""" +776 98 loss """nssa""" +776 98 regularizer """no""" +776 98 optimizer """adam""" +776 98 training_loop """owa""" +776 98 negative_sampler """basic""" +776 98 evaluator """rankbased""" +776 99 dataset """wn18rr""" +776 99 model """transd""" +776 99 loss """nssa""" +776 99 regularizer """no""" +776 99 optimizer """adam""" +776 99 training_loop """owa""" +776 99 negative_sampler """basic""" +776 99 evaluator """rankbased""" +776 100 dataset """wn18rr""" +776 100 model """transd""" +776 100 loss """nssa""" +776 100 regularizer """no""" +776 100 optimizer """adam""" +776 100 training_loop """owa""" +776 100 negative_sampler """basic""" +776 100 evaluator """rankbased""" +777 1 model.embedding_dim 0.0 +777 1 model.relation_dim 1.0 +777 1 optimizer.lr 0.003450684409531524 +777 1 training.batch_size 1.0 +777 1 training.label_smoothing 0.009757692863718127 +777 2 model.embedding_dim 1.0 +777 2 model.relation_dim 1.0 +777 2 optimizer.lr 0.0047443111915656125 +777 2 training.batch_size 1.0 +777 2 training.label_smoothing 0.02891863808183801 +777 1 dataset """wn18rr""" +777 1 model """transd""" +777 1 loss """crossentropy""" +777 1 regularizer """no""" +777 1 optimizer """adam""" +777 1 training_loop """lcwa""" +777 1 evaluator """rankbased""" +777 2 dataset """wn18rr""" +777 2 model """transd""" +777 2 loss """crossentropy""" +777 2 regularizer """no""" +777 2 optimizer """adam""" +777 2 training_loop """lcwa""" +777 2 evaluator """rankbased""" +778 1 model.embedding_dim 1.0 +778 1 model.relation_dim 0.0 +778 1 optimizer.lr 0.01857080344182711 +778 1 training.batch_size 1.0 +778 1 training.label_smoothing 0.9823591721185486 +778 2 model.embedding_dim 0.0 +778 2 model.relation_dim 1.0 +778 2 optimizer.lr 0.002622623800057903 +778 2 training.batch_size 2.0 +778 2 training.label_smoothing 0.0029098551676545368 +778 3 model.embedding_dim 0.0 +778 3 model.relation_dim 2.0 +778 3 optimizer.lr 0.03385904369629495 +778 3 training.batch_size 2.0 +778 3 training.label_smoothing 0.020278265009316523 +778 4 model.embedding_dim 2.0 +778 4 model.relation_dim 2.0 +778 4 optimizer.lr 0.013739222739236338 +778 4 training.batch_size 1.0 +778 4 training.label_smoothing 0.00236685289161138 +778 1 dataset """wn18rr""" +778 1 model """transd""" +778 1 loss """crossentropy""" +778 1 regularizer """no""" +778 1 optimizer """adam""" +778 1 training_loop """lcwa""" +778 1 evaluator """rankbased""" +778 2 dataset """wn18rr""" +778 2 model """transd""" +778 2 loss """crossentropy""" +778 2 regularizer """no""" +778 2 optimizer """adam""" +778 2 training_loop """lcwa""" +778 2 evaluator """rankbased""" +778 3 dataset """wn18rr""" +778 3 model """transd""" +778 3 loss """crossentropy""" +778 3 regularizer """no""" +778 3 optimizer """adam""" +778 3 training_loop """lcwa""" +778 3 evaluator """rankbased""" +778 4 dataset """wn18rr""" +778 4 model """transd""" +778 4 loss """crossentropy""" +778 4 regularizer """no""" +778 4 optimizer """adam""" +778 4 training_loop """lcwa""" +778 4 evaluator """rankbased""" +779 1 model.embedding_dim 2.0 +779 1 model.relation_dim 2.0 +779 1 optimizer.lr 0.0011217734681831064 +779 1 training.batch_size 2.0 +779 1 training.label_smoothing 0.001076745682109876 +779 2 model.embedding_dim 1.0 +779 2 model.relation_dim 1.0 +779 2 optimizer.lr 0.0013806546504724901 +779 2 training.batch_size 0.0 +779 2 training.label_smoothing 0.6818330136620889 +779 3 model.embedding_dim 2.0 +779 3 model.relation_dim 1.0 +779 3 optimizer.lr 0.02655170709020887 +779 3 training.batch_size 0.0 +779 3 training.label_smoothing 0.2960648328432187 +779 1 dataset """wn18rr""" +779 1 model """transd""" +779 1 loss """bceaftersigmoid""" +779 1 regularizer """no""" +779 1 optimizer """adam""" +779 1 training_loop """lcwa""" +779 1 evaluator """rankbased""" +779 2 dataset """wn18rr""" +779 2 model """transd""" +779 2 loss """bceaftersigmoid""" +779 2 regularizer """no""" +779 2 optimizer """adam""" +779 2 training_loop """lcwa""" +779 2 evaluator """rankbased""" +779 3 dataset """wn18rr""" +779 3 model """transd""" +779 3 loss """bceaftersigmoid""" +779 3 regularizer """no""" +779 3 optimizer """adam""" +779 3 training_loop """lcwa""" +779 3 evaluator """rankbased""" +780 1 model.embedding_dim 2.0 +780 1 model.relation_dim 0.0 +780 1 optimizer.lr 0.044874551543283875 +780 1 training.batch_size 1.0 +780 1 training.label_smoothing 0.011758345051638204 +780 2 model.embedding_dim 0.0 +780 2 model.relation_dim 1.0 +780 2 optimizer.lr 0.09962017423889076 +780 2 training.batch_size 1.0 +780 2 training.label_smoothing 0.005406204583495286 +780 3 model.embedding_dim 0.0 +780 3 model.relation_dim 1.0 +780 3 optimizer.lr 0.03184874028569379 +780 3 training.batch_size 0.0 +780 3 training.label_smoothing 0.004426336286096612 +780 4 model.embedding_dim 1.0 +780 4 model.relation_dim 2.0 +780 4 optimizer.lr 0.06133868572541591 +780 4 training.batch_size 2.0 +780 4 training.label_smoothing 0.08925864800439 +780 1 dataset """wn18rr""" +780 1 model """transd""" +780 1 loss """softplus""" +780 1 regularizer """no""" +780 1 optimizer """adam""" +780 1 training_loop """lcwa""" +780 1 evaluator """rankbased""" +780 2 dataset """wn18rr""" +780 2 model """transd""" +780 2 loss """softplus""" +780 2 regularizer """no""" +780 2 optimizer """adam""" +780 2 training_loop """lcwa""" +780 2 evaluator """rankbased""" +780 3 dataset """wn18rr""" +780 3 model """transd""" +780 3 loss """softplus""" +780 3 regularizer """no""" +780 3 optimizer """adam""" +780 3 training_loop """lcwa""" +780 3 evaluator """rankbased""" +780 4 dataset """wn18rr""" +780 4 model """transd""" +780 4 loss """softplus""" +780 4 regularizer """no""" +780 4 optimizer """adam""" +780 4 training_loop """lcwa""" +780 4 evaluator """rankbased""" +781 1 model.embedding_dim 0.0 +781 1 model.relation_dim 2.0 +781 1 optimizer.lr 0.0031842376792570724 +781 1 training.batch_size 2.0 +781 1 training.label_smoothing 0.4301298485250423 +781 2 model.embedding_dim 2.0 +781 2 model.relation_dim 0.0 +781 2 optimizer.lr 0.0671073693439012 +781 2 training.batch_size 2.0 +781 2 training.label_smoothing 0.0069003529519881986 +781 3 model.embedding_dim 2.0 +781 3 model.relation_dim 1.0 +781 3 optimizer.lr 0.018621076187274637 +781 3 training.batch_size 1.0 +781 3 training.label_smoothing 0.031703159542342776 +781 4 model.embedding_dim 1.0 +781 4 model.relation_dim 2.0 +781 4 optimizer.lr 0.001947388982073561 +781 4 training.batch_size 0.0 +781 4 training.label_smoothing 0.004539746061190467 +781 5 model.embedding_dim 0.0 +781 5 model.relation_dim 2.0 +781 5 optimizer.lr 0.014294726272684272 +781 5 training.batch_size 0.0 +781 5 training.label_smoothing 0.8553607223225504 +781 1 dataset """wn18rr""" +781 1 model """transd""" +781 1 loss """bceaftersigmoid""" +781 1 regularizer """no""" +781 1 optimizer """adam""" +781 1 training_loop """lcwa""" +781 1 evaluator """rankbased""" +781 2 dataset """wn18rr""" +781 2 model """transd""" +781 2 loss """bceaftersigmoid""" +781 2 regularizer """no""" +781 2 optimizer """adam""" +781 2 training_loop """lcwa""" +781 2 evaluator """rankbased""" +781 3 dataset """wn18rr""" +781 3 model """transd""" +781 3 loss """bceaftersigmoid""" +781 3 regularizer """no""" +781 3 optimizer """adam""" +781 3 training_loop """lcwa""" +781 3 evaluator """rankbased""" +781 4 dataset """wn18rr""" +781 4 model """transd""" +781 4 loss """bceaftersigmoid""" +781 4 regularizer """no""" +781 4 optimizer """adam""" +781 4 training_loop """lcwa""" +781 4 evaluator """rankbased""" +781 5 dataset """wn18rr""" +781 5 model """transd""" +781 5 loss """bceaftersigmoid""" +781 5 regularizer """no""" +781 5 optimizer """adam""" +781 5 training_loop """lcwa""" +781 5 evaluator """rankbased""" +782 1 model.embedding_dim 2.0 +782 1 model.relation_dim 0.0 +782 1 optimizer.lr 0.009774572089487017 +782 1 training.batch_size 1.0 +782 1 training.label_smoothing 0.07563589681128778 +782 2 model.embedding_dim 0.0 +782 2 model.relation_dim 2.0 +782 2 optimizer.lr 0.001424053759319833 +782 2 training.batch_size 0.0 +782 2 training.label_smoothing 0.050447239894023686 +782 3 model.embedding_dim 1.0 +782 3 model.relation_dim 2.0 +782 3 optimizer.lr 0.008789125303470003 +782 3 training.batch_size 2.0 +782 3 training.label_smoothing 0.0038106849125221012 +782 4 model.embedding_dim 1.0 +782 4 model.relation_dim 2.0 +782 4 optimizer.lr 0.00366289576776108 +782 4 training.batch_size 2.0 +782 4 training.label_smoothing 0.07522194328613746 +782 5 model.embedding_dim 1.0 +782 5 model.relation_dim 0.0 +782 5 optimizer.lr 0.013715295506528963 +782 5 training.batch_size 0.0 +782 5 training.label_smoothing 0.0623207594778007 +782 1 dataset """wn18rr""" +782 1 model """transd""" +782 1 loss """softplus""" +782 1 regularizer """no""" +782 1 optimizer """adam""" +782 1 training_loop """lcwa""" +782 1 evaluator """rankbased""" +782 2 dataset """wn18rr""" +782 2 model """transd""" +782 2 loss """softplus""" +782 2 regularizer """no""" +782 2 optimizer """adam""" +782 2 training_loop """lcwa""" +782 2 evaluator """rankbased""" +782 3 dataset """wn18rr""" +782 3 model """transd""" +782 3 loss """softplus""" +782 3 regularizer """no""" +782 3 optimizer """adam""" +782 3 training_loop """lcwa""" +782 3 evaluator """rankbased""" +782 4 dataset """wn18rr""" +782 4 model """transd""" +782 4 loss """softplus""" +782 4 regularizer """no""" +782 4 optimizer """adam""" +782 4 training_loop """lcwa""" +782 4 evaluator """rankbased""" +782 5 dataset """wn18rr""" +782 5 model """transd""" +782 5 loss """softplus""" +782 5 regularizer """no""" +782 5 optimizer """adam""" +782 5 training_loop """lcwa""" +782 5 evaluator """rankbased""" +783 1 model.embedding_dim 0.0 +783 1 model.relation_dim 0.0 +783 1 optimizer.lr 0.09736124799091088 +783 1 negative_sampler.num_negs_per_pos 2.0 +783 1 training.batch_size 0.0 +783 2 model.embedding_dim 1.0 +783 2 model.relation_dim 0.0 +783 2 optimizer.lr 0.0012227477742931423 +783 2 negative_sampler.num_negs_per_pos 14.0 +783 2 training.batch_size 0.0 +783 3 model.embedding_dim 0.0 +783 3 model.relation_dim 0.0 +783 3 optimizer.lr 0.0023383871930562896 +783 3 negative_sampler.num_negs_per_pos 39.0 +783 3 training.batch_size 1.0 +783 4 model.embedding_dim 0.0 +783 4 model.relation_dim 1.0 +783 4 optimizer.lr 0.003420312191964583 +783 4 negative_sampler.num_negs_per_pos 27.0 +783 4 training.batch_size 3.0 +783 5 model.embedding_dim 0.0 +783 5 model.relation_dim 2.0 +783 5 optimizer.lr 0.0030800720969845386 +783 5 negative_sampler.num_negs_per_pos 44.0 +783 5 training.batch_size 2.0 +783 6 model.embedding_dim 0.0 +783 6 model.relation_dim 1.0 +783 6 optimizer.lr 0.015213660614076335 +783 6 negative_sampler.num_negs_per_pos 30.0 +783 6 training.batch_size 0.0 +783 7 model.embedding_dim 1.0 +783 7 model.relation_dim 1.0 +783 7 optimizer.lr 0.09657304710713104 +783 7 negative_sampler.num_negs_per_pos 18.0 +783 7 training.batch_size 0.0 +783 8 model.embedding_dim 2.0 +783 8 model.relation_dim 2.0 +783 8 optimizer.lr 0.011819921548573628 +783 8 negative_sampler.num_negs_per_pos 48.0 +783 8 training.batch_size 3.0 +783 9 model.embedding_dim 0.0 +783 9 model.relation_dim 1.0 +783 9 optimizer.lr 0.02340573101839127 +783 9 negative_sampler.num_negs_per_pos 14.0 +783 9 training.batch_size 0.0 +783 10 model.embedding_dim 2.0 +783 10 model.relation_dim 1.0 +783 10 optimizer.lr 0.00680207150713927 +783 10 negative_sampler.num_negs_per_pos 1.0 +783 10 training.batch_size 1.0 +783 11 model.embedding_dim 2.0 +783 11 model.relation_dim 2.0 +783 11 optimizer.lr 0.0018715344161161702 +783 11 negative_sampler.num_negs_per_pos 41.0 +783 11 training.batch_size 1.0 +783 12 model.embedding_dim 2.0 +783 12 model.relation_dim 2.0 +783 12 optimizer.lr 0.010369217483126896 +783 12 negative_sampler.num_negs_per_pos 24.0 +783 12 training.batch_size 1.0 +783 13 model.embedding_dim 2.0 +783 13 model.relation_dim 0.0 +783 13 optimizer.lr 0.054040035390541696 +783 13 negative_sampler.num_negs_per_pos 6.0 +783 13 training.batch_size 1.0 +783 14 model.embedding_dim 2.0 +783 14 model.relation_dim 2.0 +783 14 optimizer.lr 0.017007490928716163 +783 14 negative_sampler.num_negs_per_pos 20.0 +783 14 training.batch_size 2.0 +783 1 dataset """yago310""" +783 1 model """transd""" +783 1 loss """softplus""" +783 1 regularizer """no""" +783 1 optimizer """adam""" +783 1 training_loop """owa""" +783 1 negative_sampler """basic""" +783 1 evaluator """rankbased""" +783 2 dataset """yago310""" +783 2 model """transd""" +783 2 loss """softplus""" +783 2 regularizer """no""" +783 2 optimizer """adam""" +783 2 training_loop """owa""" +783 2 negative_sampler """basic""" +783 2 evaluator """rankbased""" +783 3 dataset """yago310""" +783 3 model """transd""" +783 3 loss """softplus""" +783 3 regularizer """no""" +783 3 optimizer """adam""" +783 3 training_loop """owa""" +783 3 negative_sampler """basic""" +783 3 evaluator """rankbased""" +783 4 dataset """yago310""" +783 4 model """transd""" +783 4 loss """softplus""" +783 4 regularizer """no""" +783 4 optimizer """adam""" +783 4 training_loop """owa""" +783 4 negative_sampler """basic""" +783 4 evaluator """rankbased""" +783 5 dataset """yago310""" +783 5 model """transd""" +783 5 loss """softplus""" +783 5 regularizer """no""" +783 5 optimizer """adam""" +783 5 training_loop """owa""" +783 5 negative_sampler """basic""" +783 5 evaluator """rankbased""" +783 6 dataset """yago310""" +783 6 model """transd""" +783 6 loss """softplus""" +783 6 regularizer """no""" +783 6 optimizer """adam""" +783 6 training_loop """owa""" +783 6 negative_sampler """basic""" +783 6 evaluator """rankbased""" +783 7 dataset """yago310""" +783 7 model """transd""" +783 7 loss """softplus""" +783 7 regularizer """no""" +783 7 optimizer """adam""" +783 7 training_loop """owa""" +783 7 negative_sampler """basic""" +783 7 evaluator """rankbased""" +783 8 dataset """yago310""" +783 8 model """transd""" +783 8 loss """softplus""" +783 8 regularizer """no""" +783 8 optimizer """adam""" +783 8 training_loop """owa""" +783 8 negative_sampler """basic""" +783 8 evaluator """rankbased""" +783 9 dataset """yago310""" +783 9 model """transd""" +783 9 loss """softplus""" +783 9 regularizer """no""" +783 9 optimizer """adam""" +783 9 training_loop """owa""" +783 9 negative_sampler """basic""" +783 9 evaluator """rankbased""" +783 10 dataset """yago310""" +783 10 model """transd""" +783 10 loss """softplus""" +783 10 regularizer """no""" +783 10 optimizer """adam""" +783 10 training_loop """owa""" +783 10 negative_sampler """basic""" +783 10 evaluator """rankbased""" +783 11 dataset """yago310""" +783 11 model """transd""" +783 11 loss """softplus""" +783 11 regularizer """no""" +783 11 optimizer """adam""" +783 11 training_loop """owa""" +783 11 negative_sampler """basic""" +783 11 evaluator """rankbased""" +783 12 dataset """yago310""" +783 12 model """transd""" +783 12 loss """softplus""" +783 12 regularizer """no""" +783 12 optimizer """adam""" +783 12 training_loop """owa""" +783 12 negative_sampler """basic""" +783 12 evaluator """rankbased""" +783 13 dataset """yago310""" +783 13 model """transd""" +783 13 loss """softplus""" +783 13 regularizer """no""" +783 13 optimizer """adam""" +783 13 training_loop """owa""" +783 13 negative_sampler """basic""" +783 13 evaluator """rankbased""" +783 14 dataset """yago310""" +783 14 model """transd""" +783 14 loss """softplus""" +783 14 regularizer """no""" +783 14 optimizer """adam""" +783 14 training_loop """owa""" +783 14 negative_sampler """basic""" +783 14 evaluator """rankbased""" +784 1 model.embedding_dim 1.0 +784 1 model.relation_dim 2.0 +784 1 optimizer.lr 0.0019019640002320103 +784 1 negative_sampler.num_negs_per_pos 15.0 +784 1 training.batch_size 1.0 +784 2 model.embedding_dim 1.0 +784 2 model.relation_dim 2.0 +784 2 optimizer.lr 0.0012975621134454802 +784 2 negative_sampler.num_negs_per_pos 27.0 +784 2 training.batch_size 3.0 +784 3 model.embedding_dim 2.0 +784 3 model.relation_dim 2.0 +784 3 optimizer.lr 0.005983916856896893 +784 3 negative_sampler.num_negs_per_pos 26.0 +784 3 training.batch_size 1.0 +784 4 model.embedding_dim 2.0 +784 4 model.relation_dim 0.0 +784 4 optimizer.lr 0.0012170141311002467 +784 4 negative_sampler.num_negs_per_pos 49.0 +784 4 training.batch_size 2.0 +784 5 model.embedding_dim 2.0 +784 5 model.relation_dim 0.0 +784 5 optimizer.lr 0.03517703334038353 +784 5 negative_sampler.num_negs_per_pos 43.0 +784 5 training.batch_size 0.0 +784 6 model.embedding_dim 0.0 +784 6 model.relation_dim 0.0 +784 6 optimizer.lr 0.005885739320245044 +784 6 negative_sampler.num_negs_per_pos 45.0 +784 6 training.batch_size 0.0 +784 7 model.embedding_dim 2.0 +784 7 model.relation_dim 2.0 +784 7 optimizer.lr 0.002187458621499947 +784 7 negative_sampler.num_negs_per_pos 17.0 +784 7 training.batch_size 0.0 +784 8 model.embedding_dim 0.0 +784 8 model.relation_dim 1.0 +784 8 optimizer.lr 0.03370578863222941 +784 8 negative_sampler.num_negs_per_pos 19.0 +784 8 training.batch_size 0.0 +784 9 model.embedding_dim 2.0 +784 9 model.relation_dim 2.0 +784 9 optimizer.lr 0.0064265003683907805 +784 9 negative_sampler.num_negs_per_pos 48.0 +784 9 training.batch_size 3.0 +784 10 model.embedding_dim 0.0 +784 10 model.relation_dim 2.0 +784 10 optimizer.lr 0.035809981475264395 +784 10 negative_sampler.num_negs_per_pos 40.0 +784 10 training.batch_size 1.0 +784 11 model.embedding_dim 1.0 +784 11 model.relation_dim 0.0 +784 11 optimizer.lr 0.04738396254041899 +784 11 negative_sampler.num_negs_per_pos 32.0 +784 11 training.batch_size 0.0 +784 12 model.embedding_dim 2.0 +784 12 model.relation_dim 0.0 +784 12 optimizer.lr 0.0031186645370812594 +784 12 negative_sampler.num_negs_per_pos 49.0 +784 12 training.batch_size 0.0 +784 13 model.embedding_dim 1.0 +784 13 model.relation_dim 0.0 +784 13 optimizer.lr 0.0017967724248949752 +784 13 negative_sampler.num_negs_per_pos 23.0 +784 13 training.batch_size 0.0 +784 14 model.embedding_dim 0.0 +784 14 model.relation_dim 0.0 +784 14 optimizer.lr 0.03380198942788811 +784 14 negative_sampler.num_negs_per_pos 43.0 +784 14 training.batch_size 2.0 +784 15 model.embedding_dim 2.0 +784 15 model.relation_dim 2.0 +784 15 optimizer.lr 0.044783024244825366 +784 15 negative_sampler.num_negs_per_pos 48.0 +784 15 training.batch_size 1.0 +784 16 model.embedding_dim 2.0 +784 16 model.relation_dim 0.0 +784 16 optimizer.lr 0.00454490882846136 +784 16 negative_sampler.num_negs_per_pos 39.0 +784 16 training.batch_size 0.0 +784 17 model.embedding_dim 2.0 +784 17 model.relation_dim 1.0 +784 17 optimizer.lr 0.03039031037214082 +784 17 negative_sampler.num_negs_per_pos 44.0 +784 17 training.batch_size 3.0 +784 18 model.embedding_dim 1.0 +784 18 model.relation_dim 0.0 +784 18 optimizer.lr 0.006233310718933523 +784 18 negative_sampler.num_negs_per_pos 23.0 +784 18 training.batch_size 3.0 +784 19 model.embedding_dim 1.0 +784 19 model.relation_dim 2.0 +784 19 optimizer.lr 0.0440168657718269 +784 19 negative_sampler.num_negs_per_pos 24.0 +784 19 training.batch_size 0.0 +784 1 dataset """yago310""" +784 1 model """transd""" +784 1 loss """softplus""" +784 1 regularizer """no""" +784 1 optimizer """adam""" +784 1 training_loop """owa""" +784 1 negative_sampler """basic""" +784 1 evaluator """rankbased""" +784 2 dataset """yago310""" +784 2 model """transd""" +784 2 loss """softplus""" +784 2 regularizer """no""" +784 2 optimizer """adam""" +784 2 training_loop """owa""" +784 2 negative_sampler """basic""" +784 2 evaluator """rankbased""" +784 3 dataset """yago310""" +784 3 model """transd""" +784 3 loss """softplus""" +784 3 regularizer """no""" +784 3 optimizer """adam""" +784 3 training_loop """owa""" +784 3 negative_sampler """basic""" +784 3 evaluator """rankbased""" +784 4 dataset """yago310""" +784 4 model """transd""" +784 4 loss """softplus""" +784 4 regularizer """no""" +784 4 optimizer """adam""" +784 4 training_loop """owa""" +784 4 negative_sampler """basic""" +784 4 evaluator """rankbased""" +784 5 dataset """yago310""" +784 5 model """transd""" +784 5 loss """softplus""" +784 5 regularizer """no""" +784 5 optimizer """adam""" +784 5 training_loop """owa""" +784 5 negative_sampler """basic""" +784 5 evaluator """rankbased""" +784 6 dataset """yago310""" +784 6 model """transd""" +784 6 loss """softplus""" +784 6 regularizer """no""" +784 6 optimizer """adam""" +784 6 training_loop """owa""" +784 6 negative_sampler """basic""" +784 6 evaluator """rankbased""" +784 7 dataset """yago310""" +784 7 model """transd""" +784 7 loss """softplus""" +784 7 regularizer """no""" +784 7 optimizer """adam""" +784 7 training_loop """owa""" +784 7 negative_sampler """basic""" +784 7 evaluator """rankbased""" +784 8 dataset """yago310""" +784 8 model """transd""" +784 8 loss """softplus""" +784 8 regularizer """no""" +784 8 optimizer """adam""" +784 8 training_loop """owa""" +784 8 negative_sampler """basic""" +784 8 evaluator """rankbased""" +784 9 dataset """yago310""" +784 9 model """transd""" +784 9 loss """softplus""" +784 9 regularizer """no""" +784 9 optimizer """adam""" +784 9 training_loop """owa""" +784 9 negative_sampler """basic""" +784 9 evaluator """rankbased""" +784 10 dataset """yago310""" +784 10 model """transd""" +784 10 loss """softplus""" +784 10 regularizer """no""" +784 10 optimizer """adam""" +784 10 training_loop """owa""" +784 10 negative_sampler """basic""" +784 10 evaluator """rankbased""" +784 11 dataset """yago310""" +784 11 model """transd""" +784 11 loss """softplus""" +784 11 regularizer """no""" +784 11 optimizer """adam""" +784 11 training_loop """owa""" +784 11 negative_sampler """basic""" +784 11 evaluator """rankbased""" +784 12 dataset """yago310""" +784 12 model """transd""" +784 12 loss """softplus""" +784 12 regularizer """no""" +784 12 optimizer """adam""" +784 12 training_loop """owa""" +784 12 negative_sampler """basic""" +784 12 evaluator """rankbased""" +784 13 dataset """yago310""" +784 13 model """transd""" +784 13 loss """softplus""" +784 13 regularizer """no""" +784 13 optimizer """adam""" +784 13 training_loop """owa""" +784 13 negative_sampler """basic""" +784 13 evaluator """rankbased""" +784 14 dataset """yago310""" +784 14 model """transd""" +784 14 loss """softplus""" +784 14 regularizer """no""" +784 14 optimizer """adam""" +784 14 training_loop """owa""" +784 14 negative_sampler """basic""" +784 14 evaluator """rankbased""" +784 15 dataset """yago310""" +784 15 model """transd""" +784 15 loss """softplus""" +784 15 regularizer """no""" +784 15 optimizer """adam""" +784 15 training_loop """owa""" +784 15 negative_sampler """basic""" +784 15 evaluator """rankbased""" +784 16 dataset """yago310""" +784 16 model """transd""" +784 16 loss """softplus""" +784 16 regularizer """no""" +784 16 optimizer """adam""" +784 16 training_loop """owa""" +784 16 negative_sampler """basic""" +784 16 evaluator """rankbased""" +784 17 dataset """yago310""" +784 17 model """transd""" +784 17 loss """softplus""" +784 17 regularizer """no""" +784 17 optimizer """adam""" +784 17 training_loop """owa""" +784 17 negative_sampler """basic""" +784 17 evaluator """rankbased""" +784 18 dataset """yago310""" +784 18 model """transd""" +784 18 loss """softplus""" +784 18 regularizer """no""" +784 18 optimizer """adam""" +784 18 training_loop """owa""" +784 18 negative_sampler """basic""" +784 18 evaluator """rankbased""" +784 19 dataset """yago310""" +784 19 model """transd""" +784 19 loss """softplus""" +784 19 regularizer """no""" +784 19 optimizer """adam""" +784 19 training_loop """owa""" +784 19 negative_sampler """basic""" +784 19 evaluator """rankbased""" +785 1 model.embedding_dim 1.0 +785 1 model.relation_dim 2.0 +785 1 optimizer.lr 0.01078892146620525 +785 1 negative_sampler.num_negs_per_pos 0.0 +785 1 training.batch_size 1.0 +785 2 model.embedding_dim 1.0 +785 2 model.relation_dim 1.0 +785 2 optimizer.lr 0.0015418499694753527 +785 2 negative_sampler.num_negs_per_pos 29.0 +785 2 training.batch_size 0.0 +785 3 model.embedding_dim 2.0 +785 3 model.relation_dim 1.0 +785 3 optimizer.lr 0.03962511085492083 +785 3 negative_sampler.num_negs_per_pos 25.0 +785 3 training.batch_size 1.0 +785 4 model.embedding_dim 2.0 +785 4 model.relation_dim 0.0 +785 4 optimizer.lr 0.0013717410935385832 +785 4 negative_sampler.num_negs_per_pos 18.0 +785 4 training.batch_size 3.0 +785 5 model.embedding_dim 1.0 +785 5 model.relation_dim 2.0 +785 5 optimizer.lr 0.03740605230130987 +785 5 negative_sampler.num_negs_per_pos 49.0 +785 5 training.batch_size 2.0 +785 6 model.embedding_dim 2.0 +785 6 model.relation_dim 2.0 +785 6 optimizer.lr 0.07647095535298247 +785 6 negative_sampler.num_negs_per_pos 43.0 +785 6 training.batch_size 0.0 +785 7 model.embedding_dim 1.0 +785 7 model.relation_dim 0.0 +785 7 optimizer.lr 0.0011214142300043364 +785 7 negative_sampler.num_negs_per_pos 22.0 +785 7 training.batch_size 1.0 +785 8 model.embedding_dim 1.0 +785 8 model.relation_dim 0.0 +785 8 optimizer.lr 0.0037551981700672263 +785 8 negative_sampler.num_negs_per_pos 16.0 +785 8 training.batch_size 0.0 +785 9 model.embedding_dim 1.0 +785 9 model.relation_dim 0.0 +785 9 optimizer.lr 0.016499134616192762 +785 9 negative_sampler.num_negs_per_pos 22.0 +785 9 training.batch_size 1.0 +785 10 model.embedding_dim 0.0 +785 10 model.relation_dim 2.0 +785 10 optimizer.lr 0.02509236994531242 +785 10 negative_sampler.num_negs_per_pos 32.0 +785 10 training.batch_size 2.0 +785 11 model.embedding_dim 0.0 +785 11 model.relation_dim 2.0 +785 11 optimizer.lr 0.001470137055291324 +785 11 negative_sampler.num_negs_per_pos 47.0 +785 11 training.batch_size 2.0 +785 12 model.embedding_dim 0.0 +785 12 model.relation_dim 2.0 +785 12 optimizer.lr 0.03276244025965943 +785 12 negative_sampler.num_negs_per_pos 19.0 +785 12 training.batch_size 3.0 +785 13 model.embedding_dim 1.0 +785 13 model.relation_dim 2.0 +785 13 optimizer.lr 0.015763473658416176 +785 13 negative_sampler.num_negs_per_pos 11.0 +785 13 training.batch_size 0.0 +785 14 model.embedding_dim 1.0 +785 14 model.relation_dim 1.0 +785 14 optimizer.lr 0.08467933169450878 +785 14 negative_sampler.num_negs_per_pos 22.0 +785 14 training.batch_size 2.0 +785 1 dataset """yago310""" +785 1 model """transd""" +785 1 loss """bceaftersigmoid""" +785 1 regularizer """no""" +785 1 optimizer """adam""" +785 1 training_loop """owa""" +785 1 negative_sampler """basic""" +785 1 evaluator """rankbased""" +785 2 dataset """yago310""" +785 2 model """transd""" +785 2 loss """bceaftersigmoid""" +785 2 regularizer """no""" +785 2 optimizer """adam""" +785 2 training_loop """owa""" +785 2 negative_sampler """basic""" +785 2 evaluator """rankbased""" +785 3 dataset """yago310""" +785 3 model """transd""" +785 3 loss """bceaftersigmoid""" +785 3 regularizer """no""" +785 3 optimizer """adam""" +785 3 training_loop """owa""" +785 3 negative_sampler """basic""" +785 3 evaluator """rankbased""" +785 4 dataset """yago310""" +785 4 model """transd""" +785 4 loss """bceaftersigmoid""" +785 4 regularizer """no""" +785 4 optimizer """adam""" +785 4 training_loop """owa""" +785 4 negative_sampler """basic""" +785 4 evaluator """rankbased""" +785 5 dataset """yago310""" +785 5 model """transd""" +785 5 loss """bceaftersigmoid""" +785 5 regularizer """no""" +785 5 optimizer """adam""" +785 5 training_loop """owa""" +785 5 negative_sampler """basic""" +785 5 evaluator """rankbased""" +785 6 dataset """yago310""" +785 6 model """transd""" +785 6 loss """bceaftersigmoid""" +785 6 regularizer """no""" +785 6 optimizer """adam""" +785 6 training_loop """owa""" +785 6 negative_sampler """basic""" +785 6 evaluator """rankbased""" +785 7 dataset """yago310""" +785 7 model """transd""" +785 7 loss """bceaftersigmoid""" +785 7 regularizer """no""" +785 7 optimizer """adam""" +785 7 training_loop """owa""" +785 7 negative_sampler """basic""" +785 7 evaluator """rankbased""" +785 8 dataset """yago310""" +785 8 model """transd""" +785 8 loss """bceaftersigmoid""" +785 8 regularizer """no""" +785 8 optimizer """adam""" +785 8 training_loop """owa""" +785 8 negative_sampler """basic""" +785 8 evaluator """rankbased""" +785 9 dataset """yago310""" +785 9 model """transd""" +785 9 loss """bceaftersigmoid""" +785 9 regularizer """no""" +785 9 optimizer """adam""" +785 9 training_loop """owa""" +785 9 negative_sampler """basic""" +785 9 evaluator """rankbased""" +785 10 dataset """yago310""" +785 10 model """transd""" +785 10 loss """bceaftersigmoid""" +785 10 regularizer """no""" +785 10 optimizer """adam""" +785 10 training_loop """owa""" +785 10 negative_sampler """basic""" +785 10 evaluator """rankbased""" +785 11 dataset """yago310""" +785 11 model """transd""" +785 11 loss """bceaftersigmoid""" +785 11 regularizer """no""" +785 11 optimizer """adam""" +785 11 training_loop """owa""" +785 11 negative_sampler """basic""" +785 11 evaluator """rankbased""" +785 12 dataset """yago310""" +785 12 model """transd""" +785 12 loss """bceaftersigmoid""" +785 12 regularizer """no""" +785 12 optimizer """adam""" +785 12 training_loop """owa""" +785 12 negative_sampler """basic""" +785 12 evaluator """rankbased""" +785 13 dataset """yago310""" +785 13 model """transd""" +785 13 loss """bceaftersigmoid""" +785 13 regularizer """no""" +785 13 optimizer """adam""" +785 13 training_loop """owa""" +785 13 negative_sampler """basic""" +785 13 evaluator """rankbased""" +785 14 dataset """yago310""" +785 14 model """transd""" +785 14 loss """bceaftersigmoid""" +785 14 regularizer """no""" +785 14 optimizer """adam""" +785 14 training_loop """owa""" +785 14 negative_sampler """basic""" +785 14 evaluator """rankbased""" +786 1 model.embedding_dim 2.0 +786 1 model.relation_dim 2.0 +786 1 optimizer.lr 0.003220147755290676 +786 1 negative_sampler.num_negs_per_pos 17.0 +786 1 training.batch_size 3.0 +786 2 model.embedding_dim 2.0 +786 2 model.relation_dim 1.0 +786 2 optimizer.lr 0.0011045307591264334 +786 2 negative_sampler.num_negs_per_pos 13.0 +786 2 training.batch_size 1.0 +786 3 model.embedding_dim 2.0 +786 3 model.relation_dim 0.0 +786 3 optimizer.lr 0.002222657252677634 +786 3 negative_sampler.num_negs_per_pos 44.0 +786 3 training.batch_size 3.0 +786 4 model.embedding_dim 0.0 +786 4 model.relation_dim 1.0 +786 4 optimizer.lr 0.00571282238497401 +786 4 negative_sampler.num_negs_per_pos 48.0 +786 4 training.batch_size 3.0 +786 5 model.embedding_dim 2.0 +786 5 model.relation_dim 2.0 +786 5 optimizer.lr 0.026705688349219107 +786 5 negative_sampler.num_negs_per_pos 16.0 +786 5 training.batch_size 0.0 +786 6 model.embedding_dim 0.0 +786 6 model.relation_dim 0.0 +786 6 optimizer.lr 0.008435058567022586 +786 6 negative_sampler.num_negs_per_pos 22.0 +786 6 training.batch_size 0.0 +786 7 model.embedding_dim 1.0 +786 7 model.relation_dim 1.0 +786 7 optimizer.lr 0.0016613520794544392 +786 7 negative_sampler.num_negs_per_pos 24.0 +786 7 training.batch_size 2.0 +786 8 model.embedding_dim 0.0 +786 8 model.relation_dim 2.0 +786 8 optimizer.lr 0.001508230833957381 +786 8 negative_sampler.num_negs_per_pos 19.0 +786 8 training.batch_size 1.0 +786 9 model.embedding_dim 1.0 +786 9 model.relation_dim 2.0 +786 9 optimizer.lr 0.001456509548544549 +786 9 negative_sampler.num_negs_per_pos 15.0 +786 9 training.batch_size 3.0 +786 10 model.embedding_dim 1.0 +786 10 model.relation_dim 0.0 +786 10 optimizer.lr 0.031011595281073615 +786 10 negative_sampler.num_negs_per_pos 28.0 +786 10 training.batch_size 1.0 +786 11 model.embedding_dim 2.0 +786 11 model.relation_dim 2.0 +786 11 optimizer.lr 0.006265176692275326 +786 11 negative_sampler.num_negs_per_pos 8.0 +786 11 training.batch_size 0.0 +786 12 model.embedding_dim 0.0 +786 12 model.relation_dim 2.0 +786 12 optimizer.lr 0.029705363726791514 +786 12 negative_sampler.num_negs_per_pos 33.0 +786 12 training.batch_size 3.0 +786 13 model.embedding_dim 1.0 +786 13 model.relation_dim 1.0 +786 13 optimizer.lr 0.06042127810543155 +786 13 negative_sampler.num_negs_per_pos 22.0 +786 13 training.batch_size 2.0 +786 14 model.embedding_dim 0.0 +786 14 model.relation_dim 1.0 +786 14 optimizer.lr 0.0011294283962657073 +786 14 negative_sampler.num_negs_per_pos 41.0 +786 14 training.batch_size 0.0 +786 15 model.embedding_dim 2.0 +786 15 model.relation_dim 2.0 +786 15 optimizer.lr 0.01074340030809403 +786 15 negative_sampler.num_negs_per_pos 8.0 +786 15 training.batch_size 2.0 +786 16 model.embedding_dim 2.0 +786 16 model.relation_dim 2.0 +786 16 optimizer.lr 0.03387036088454663 +786 16 negative_sampler.num_negs_per_pos 30.0 +786 16 training.batch_size 0.0 +786 17 model.embedding_dim 0.0 +786 17 model.relation_dim 0.0 +786 17 optimizer.lr 0.007737292847869411 +786 17 negative_sampler.num_negs_per_pos 44.0 +786 17 training.batch_size 3.0 +786 18 model.embedding_dim 0.0 +786 18 model.relation_dim 1.0 +786 18 optimizer.lr 0.006134671977417899 +786 18 negative_sampler.num_negs_per_pos 39.0 +786 18 training.batch_size 0.0 +786 19 model.embedding_dim 1.0 +786 19 model.relation_dim 2.0 +786 19 optimizer.lr 0.003409832435544247 +786 19 negative_sampler.num_negs_per_pos 26.0 +786 19 training.batch_size 1.0 +786 20 model.embedding_dim 0.0 +786 20 model.relation_dim 0.0 +786 20 optimizer.lr 0.01567006875286318 +786 20 negative_sampler.num_negs_per_pos 41.0 +786 20 training.batch_size 1.0 +786 21 model.embedding_dim 1.0 +786 21 model.relation_dim 1.0 +786 21 optimizer.lr 0.0017269682257493695 +786 21 negative_sampler.num_negs_per_pos 41.0 +786 21 training.batch_size 1.0 +786 22 model.embedding_dim 0.0 +786 22 model.relation_dim 1.0 +786 22 optimizer.lr 0.007659502899096321 +786 22 negative_sampler.num_negs_per_pos 11.0 +786 22 training.batch_size 3.0 +786 23 model.embedding_dim 1.0 +786 23 model.relation_dim 2.0 +786 23 optimizer.lr 0.031202028076437006 +786 23 negative_sampler.num_negs_per_pos 46.0 +786 23 training.batch_size 2.0 +786 1 dataset """yago310""" +786 1 model """transd""" +786 1 loss """bceaftersigmoid""" +786 1 regularizer """no""" +786 1 optimizer """adam""" +786 1 training_loop """owa""" +786 1 negative_sampler """basic""" +786 1 evaluator """rankbased""" +786 2 dataset """yago310""" +786 2 model """transd""" +786 2 loss """bceaftersigmoid""" +786 2 regularizer """no""" +786 2 optimizer """adam""" +786 2 training_loop """owa""" +786 2 negative_sampler """basic""" +786 2 evaluator """rankbased""" +786 3 dataset """yago310""" +786 3 model """transd""" +786 3 loss """bceaftersigmoid""" +786 3 regularizer """no""" +786 3 optimizer """adam""" +786 3 training_loop """owa""" +786 3 negative_sampler """basic""" +786 3 evaluator """rankbased""" +786 4 dataset """yago310""" +786 4 model """transd""" +786 4 loss """bceaftersigmoid""" +786 4 regularizer """no""" +786 4 optimizer """adam""" +786 4 training_loop """owa""" +786 4 negative_sampler """basic""" +786 4 evaluator """rankbased""" +786 5 dataset """yago310""" +786 5 model """transd""" +786 5 loss """bceaftersigmoid""" +786 5 regularizer """no""" +786 5 optimizer """adam""" +786 5 training_loop """owa""" +786 5 negative_sampler """basic""" +786 5 evaluator """rankbased""" +786 6 dataset """yago310""" +786 6 model """transd""" +786 6 loss """bceaftersigmoid""" +786 6 regularizer """no""" +786 6 optimizer """adam""" +786 6 training_loop """owa""" +786 6 negative_sampler """basic""" +786 6 evaluator """rankbased""" +786 7 dataset """yago310""" +786 7 model """transd""" +786 7 loss """bceaftersigmoid""" +786 7 regularizer """no""" +786 7 optimizer """adam""" +786 7 training_loop """owa""" +786 7 negative_sampler """basic""" +786 7 evaluator """rankbased""" +786 8 dataset """yago310""" +786 8 model """transd""" +786 8 loss """bceaftersigmoid""" +786 8 regularizer """no""" +786 8 optimizer """adam""" +786 8 training_loop """owa""" +786 8 negative_sampler """basic""" +786 8 evaluator """rankbased""" +786 9 dataset """yago310""" +786 9 model """transd""" +786 9 loss """bceaftersigmoid""" +786 9 regularizer """no""" +786 9 optimizer """adam""" +786 9 training_loop """owa""" +786 9 negative_sampler """basic""" +786 9 evaluator """rankbased""" +786 10 dataset """yago310""" +786 10 model """transd""" +786 10 loss """bceaftersigmoid""" +786 10 regularizer """no""" +786 10 optimizer """adam""" +786 10 training_loop """owa""" +786 10 negative_sampler """basic""" +786 10 evaluator """rankbased""" +786 11 dataset """yago310""" +786 11 model """transd""" +786 11 loss """bceaftersigmoid""" +786 11 regularizer """no""" +786 11 optimizer """adam""" +786 11 training_loop """owa""" +786 11 negative_sampler """basic""" +786 11 evaluator """rankbased""" +786 12 dataset """yago310""" +786 12 model """transd""" +786 12 loss """bceaftersigmoid""" +786 12 regularizer """no""" +786 12 optimizer """adam""" +786 12 training_loop """owa""" +786 12 negative_sampler """basic""" +786 12 evaluator """rankbased""" +786 13 dataset """yago310""" +786 13 model """transd""" +786 13 loss """bceaftersigmoid""" +786 13 regularizer """no""" +786 13 optimizer """adam""" +786 13 training_loop """owa""" +786 13 negative_sampler """basic""" +786 13 evaluator """rankbased""" +786 14 dataset """yago310""" +786 14 model """transd""" +786 14 loss """bceaftersigmoid""" +786 14 regularizer """no""" +786 14 optimizer """adam""" +786 14 training_loop """owa""" +786 14 negative_sampler """basic""" +786 14 evaluator """rankbased""" +786 15 dataset """yago310""" +786 15 model """transd""" +786 15 loss """bceaftersigmoid""" +786 15 regularizer """no""" +786 15 optimizer """adam""" +786 15 training_loop """owa""" +786 15 negative_sampler """basic""" +786 15 evaluator """rankbased""" +786 16 dataset """yago310""" +786 16 model """transd""" +786 16 loss """bceaftersigmoid""" +786 16 regularizer """no""" +786 16 optimizer """adam""" +786 16 training_loop """owa""" +786 16 negative_sampler """basic""" +786 16 evaluator """rankbased""" +786 17 dataset """yago310""" +786 17 model """transd""" +786 17 loss """bceaftersigmoid""" +786 17 regularizer """no""" +786 17 optimizer """adam""" +786 17 training_loop """owa""" +786 17 negative_sampler """basic""" +786 17 evaluator """rankbased""" +786 18 dataset """yago310""" +786 18 model """transd""" +786 18 loss """bceaftersigmoid""" +786 18 regularizer """no""" +786 18 optimizer """adam""" +786 18 training_loop """owa""" +786 18 negative_sampler """basic""" +786 18 evaluator """rankbased""" +786 19 dataset """yago310""" +786 19 model """transd""" +786 19 loss """bceaftersigmoid""" +786 19 regularizer """no""" +786 19 optimizer """adam""" +786 19 training_loop """owa""" +786 19 negative_sampler """basic""" +786 19 evaluator """rankbased""" +786 20 dataset """yago310""" +786 20 model """transd""" +786 20 loss """bceaftersigmoid""" +786 20 regularizer """no""" +786 20 optimizer """adam""" +786 20 training_loop """owa""" +786 20 negative_sampler """basic""" +786 20 evaluator """rankbased""" +786 21 dataset """yago310""" +786 21 model """transd""" +786 21 loss """bceaftersigmoid""" +786 21 regularizer """no""" +786 21 optimizer """adam""" +786 21 training_loop """owa""" +786 21 negative_sampler """basic""" +786 21 evaluator """rankbased""" +786 22 dataset """yago310""" +786 22 model """transd""" +786 22 loss """bceaftersigmoid""" +786 22 regularizer """no""" +786 22 optimizer """adam""" +786 22 training_loop """owa""" +786 22 negative_sampler """basic""" +786 22 evaluator """rankbased""" +786 23 dataset """yago310""" +786 23 model """transd""" +786 23 loss """bceaftersigmoid""" +786 23 regularizer """no""" +786 23 optimizer """adam""" +786 23 training_loop """owa""" +786 23 negative_sampler """basic""" +786 23 evaluator """rankbased""" +787 1 model.embedding_dim 2.0 +787 1 model.relation_dim 2.0 +787 1 loss.margin 8.817393278874041 +787 1 optimizer.lr 0.030402140806644686 +787 1 negative_sampler.num_negs_per_pos 14.0 +787 1 training.batch_size 0.0 +787 2 model.embedding_dim 1.0 +787 2 model.relation_dim 1.0 +787 2 loss.margin 2.0111015933871474 +787 2 optimizer.lr 0.006309238843392257 +787 2 negative_sampler.num_negs_per_pos 6.0 +787 2 training.batch_size 2.0 +787 3 model.embedding_dim 2.0 +787 3 model.relation_dim 1.0 +787 3 loss.margin 8.50421229738888 +787 3 optimizer.lr 0.004356437099553912 +787 3 negative_sampler.num_negs_per_pos 18.0 +787 3 training.batch_size 1.0 +787 4 model.embedding_dim 2.0 +787 4 model.relation_dim 0.0 +787 4 loss.margin 9.635221378278958 +787 4 optimizer.lr 0.026327534567999775 +787 4 negative_sampler.num_negs_per_pos 10.0 +787 4 training.batch_size 1.0 +787 5 model.embedding_dim 1.0 +787 5 model.relation_dim 2.0 +787 5 loss.margin 7.793857422378816 +787 5 optimizer.lr 0.052209148070469086 +787 5 negative_sampler.num_negs_per_pos 33.0 +787 5 training.batch_size 2.0 +787 6 model.embedding_dim 0.0 +787 6 model.relation_dim 0.0 +787 6 loss.margin 6.457061758188102 +787 6 optimizer.lr 0.04506620026552796 +787 6 negative_sampler.num_negs_per_pos 41.0 +787 6 training.batch_size 3.0 +787 7 model.embedding_dim 1.0 +787 7 model.relation_dim 2.0 +787 7 loss.margin 4.076935551155991 +787 7 optimizer.lr 0.0011035347084023825 +787 7 negative_sampler.num_negs_per_pos 3.0 +787 7 training.batch_size 2.0 +787 8 model.embedding_dim 0.0 +787 8 model.relation_dim 2.0 +787 8 loss.margin 8.78713022789106 +787 8 optimizer.lr 0.013515840011815458 +787 8 negative_sampler.num_negs_per_pos 31.0 +787 8 training.batch_size 2.0 +787 9 model.embedding_dim 0.0 +787 9 model.relation_dim 1.0 +787 9 loss.margin 5.979778634428614 +787 9 optimizer.lr 0.014911275919434534 +787 9 negative_sampler.num_negs_per_pos 35.0 +787 9 training.batch_size 2.0 +787 10 model.embedding_dim 2.0 +787 10 model.relation_dim 1.0 +787 10 loss.margin 3.9636432008535936 +787 10 optimizer.lr 0.0010349841756267476 +787 10 negative_sampler.num_negs_per_pos 37.0 +787 10 training.batch_size 3.0 +787 11 model.embedding_dim 1.0 +787 11 model.relation_dim 1.0 +787 11 loss.margin 3.039714116292188 +787 11 optimizer.lr 0.010579091912961162 +787 11 negative_sampler.num_negs_per_pos 39.0 +787 11 training.batch_size 2.0 +787 12 model.embedding_dim 0.0 +787 12 model.relation_dim 0.0 +787 12 loss.margin 3.0681300058916112 +787 12 optimizer.lr 0.005481989842003084 +787 12 negative_sampler.num_negs_per_pos 19.0 +787 12 training.batch_size 0.0 +787 13 model.embedding_dim 1.0 +787 13 model.relation_dim 0.0 +787 13 loss.margin 5.814420121340884 +787 13 optimizer.lr 0.03517164460981712 +787 13 negative_sampler.num_negs_per_pos 46.0 +787 13 training.batch_size 3.0 +787 14 model.embedding_dim 1.0 +787 14 model.relation_dim 2.0 +787 14 loss.margin 4.25479919150473 +787 14 optimizer.lr 0.027302956751948243 +787 14 negative_sampler.num_negs_per_pos 42.0 +787 14 training.batch_size 3.0 +787 15 model.embedding_dim 2.0 +787 15 model.relation_dim 2.0 +787 15 loss.margin 6.9705490684901505 +787 15 optimizer.lr 0.004237833940994826 +787 15 negative_sampler.num_negs_per_pos 10.0 +787 15 training.batch_size 3.0 +787 1 dataset """yago310""" +787 1 model """transd""" +787 1 loss """marginranking""" +787 1 regularizer """no""" +787 1 optimizer """adam""" +787 1 training_loop """owa""" +787 1 negative_sampler """basic""" +787 1 evaluator """rankbased""" +787 2 dataset """yago310""" +787 2 model """transd""" +787 2 loss """marginranking""" +787 2 regularizer """no""" +787 2 optimizer """adam""" +787 2 training_loop """owa""" +787 2 negative_sampler """basic""" +787 2 evaluator """rankbased""" +787 3 dataset """yago310""" +787 3 model """transd""" +787 3 loss """marginranking""" +787 3 regularizer """no""" +787 3 optimizer """adam""" +787 3 training_loop """owa""" +787 3 negative_sampler """basic""" +787 3 evaluator """rankbased""" +787 4 dataset """yago310""" +787 4 model """transd""" +787 4 loss """marginranking""" +787 4 regularizer """no""" +787 4 optimizer """adam""" +787 4 training_loop """owa""" +787 4 negative_sampler """basic""" +787 4 evaluator """rankbased""" +787 5 dataset """yago310""" +787 5 model """transd""" +787 5 loss """marginranking""" +787 5 regularizer """no""" +787 5 optimizer """adam""" +787 5 training_loop """owa""" +787 5 negative_sampler """basic""" +787 5 evaluator """rankbased""" +787 6 dataset """yago310""" +787 6 model """transd""" +787 6 loss """marginranking""" +787 6 regularizer """no""" +787 6 optimizer """adam""" +787 6 training_loop """owa""" +787 6 negative_sampler """basic""" +787 6 evaluator """rankbased""" +787 7 dataset """yago310""" +787 7 model """transd""" +787 7 loss """marginranking""" +787 7 regularizer """no""" +787 7 optimizer """adam""" +787 7 training_loop """owa""" +787 7 negative_sampler """basic""" +787 7 evaluator """rankbased""" +787 8 dataset """yago310""" +787 8 model """transd""" +787 8 loss """marginranking""" +787 8 regularizer """no""" +787 8 optimizer """adam""" +787 8 training_loop """owa""" +787 8 negative_sampler """basic""" +787 8 evaluator """rankbased""" +787 9 dataset """yago310""" +787 9 model """transd""" +787 9 loss """marginranking""" +787 9 regularizer """no""" +787 9 optimizer """adam""" +787 9 training_loop """owa""" +787 9 negative_sampler """basic""" +787 9 evaluator """rankbased""" +787 10 dataset """yago310""" +787 10 model """transd""" +787 10 loss """marginranking""" +787 10 regularizer """no""" +787 10 optimizer """adam""" +787 10 training_loop """owa""" +787 10 negative_sampler """basic""" +787 10 evaluator """rankbased""" +787 11 dataset """yago310""" +787 11 model """transd""" +787 11 loss """marginranking""" +787 11 regularizer """no""" +787 11 optimizer """adam""" +787 11 training_loop """owa""" +787 11 negative_sampler """basic""" +787 11 evaluator """rankbased""" +787 12 dataset """yago310""" +787 12 model """transd""" +787 12 loss """marginranking""" +787 12 regularizer """no""" +787 12 optimizer """adam""" +787 12 training_loop """owa""" +787 12 negative_sampler """basic""" +787 12 evaluator """rankbased""" +787 13 dataset """yago310""" +787 13 model """transd""" +787 13 loss """marginranking""" +787 13 regularizer """no""" +787 13 optimizer """adam""" +787 13 training_loop """owa""" +787 13 negative_sampler """basic""" +787 13 evaluator """rankbased""" +787 14 dataset """yago310""" +787 14 model """transd""" +787 14 loss """marginranking""" +787 14 regularizer """no""" +787 14 optimizer """adam""" +787 14 training_loop """owa""" +787 14 negative_sampler """basic""" +787 14 evaluator """rankbased""" +787 15 dataset """yago310""" +787 15 model """transd""" +787 15 loss """marginranking""" +787 15 regularizer """no""" +787 15 optimizer """adam""" +787 15 training_loop """owa""" +787 15 negative_sampler """basic""" +787 15 evaluator """rankbased""" +788 1 model.embedding_dim 2.0 +788 1 model.relation_dim 1.0 +788 1 loss.margin 1.3605413908172759 +788 1 optimizer.lr 0.002754180506465828 +788 1 negative_sampler.num_negs_per_pos 16.0 +788 1 training.batch_size 1.0 +788 2 model.embedding_dim 2.0 +788 2 model.relation_dim 2.0 +788 2 loss.margin 5.029919779342807 +788 2 optimizer.lr 0.04301985482031513 +788 2 negative_sampler.num_negs_per_pos 30.0 +788 2 training.batch_size 2.0 +788 3 model.embedding_dim 0.0 +788 3 model.relation_dim 0.0 +788 3 loss.margin 7.277181484722612 +788 3 optimizer.lr 0.00496693699991772 +788 3 negative_sampler.num_negs_per_pos 12.0 +788 3 training.batch_size 1.0 +788 4 model.embedding_dim 0.0 +788 4 model.relation_dim 1.0 +788 4 loss.margin 0.6099341466689443 +788 4 optimizer.lr 0.025719548972496988 +788 4 negative_sampler.num_negs_per_pos 43.0 +788 4 training.batch_size 3.0 +788 5 model.embedding_dim 2.0 +788 5 model.relation_dim 0.0 +788 5 loss.margin 8.244507105840952 +788 5 optimizer.lr 0.0010045233568346265 +788 5 negative_sampler.num_negs_per_pos 13.0 +788 5 training.batch_size 0.0 +788 6 model.embedding_dim 2.0 +788 6 model.relation_dim 0.0 +788 6 loss.margin 5.086724010439799 +788 6 optimizer.lr 0.0035315613662201656 +788 6 negative_sampler.num_negs_per_pos 33.0 +788 6 training.batch_size 2.0 +788 7 model.embedding_dim 2.0 +788 7 model.relation_dim 0.0 +788 7 loss.margin 6.29548434076408 +788 7 optimizer.lr 0.003072316895576071 +788 7 negative_sampler.num_negs_per_pos 37.0 +788 7 training.batch_size 3.0 +788 8 model.embedding_dim 2.0 +788 8 model.relation_dim 2.0 +788 8 loss.margin 0.6176541026557305 +788 8 optimizer.lr 0.005391760002132472 +788 8 negative_sampler.num_negs_per_pos 18.0 +788 8 training.batch_size 3.0 +788 9 model.embedding_dim 0.0 +788 9 model.relation_dim 1.0 +788 9 loss.margin 5.688859201024304 +788 9 optimizer.lr 0.007160368395682607 +788 9 negative_sampler.num_negs_per_pos 8.0 +788 9 training.batch_size 1.0 +788 10 model.embedding_dim 1.0 +788 10 model.relation_dim 0.0 +788 10 loss.margin 4.9271831064295855 +788 10 optimizer.lr 0.010490107832482868 +788 10 negative_sampler.num_negs_per_pos 35.0 +788 10 training.batch_size 1.0 +788 11 model.embedding_dim 2.0 +788 11 model.relation_dim 0.0 +788 11 loss.margin 9.471904991174995 +788 11 optimizer.lr 0.0038601725866194577 +788 11 negative_sampler.num_negs_per_pos 47.0 +788 11 training.batch_size 2.0 +788 12 model.embedding_dim 2.0 +788 12 model.relation_dim 2.0 +788 12 loss.margin 3.298368650761703 +788 12 optimizer.lr 0.0029112604256109577 +788 12 negative_sampler.num_negs_per_pos 32.0 +788 12 training.batch_size 2.0 +788 13 model.embedding_dim 1.0 +788 13 model.relation_dim 2.0 +788 13 loss.margin 4.446153575622836 +788 13 optimizer.lr 0.03014772181522983 +788 13 negative_sampler.num_negs_per_pos 33.0 +788 13 training.batch_size 1.0 +788 14 model.embedding_dim 1.0 +788 14 model.relation_dim 2.0 +788 14 loss.margin 0.8094439613112858 +788 14 optimizer.lr 0.0014654356045804761 +788 14 negative_sampler.num_negs_per_pos 9.0 +788 14 training.batch_size 1.0 +788 15 model.embedding_dim 1.0 +788 15 model.relation_dim 1.0 +788 15 loss.margin 2.5979670248830975 +788 15 optimizer.lr 0.040142630655473154 +788 15 negative_sampler.num_negs_per_pos 39.0 +788 15 training.batch_size 1.0 +788 16 model.embedding_dim 0.0 +788 16 model.relation_dim 0.0 +788 16 loss.margin 2.3506990094814624 +788 16 optimizer.lr 0.005145886151766797 +788 16 negative_sampler.num_negs_per_pos 15.0 +788 16 training.batch_size 3.0 +788 17 model.embedding_dim 2.0 +788 17 model.relation_dim 2.0 +788 17 loss.margin 8.231965925045198 +788 17 optimizer.lr 0.02915102735173528 +788 17 negative_sampler.num_negs_per_pos 45.0 +788 17 training.batch_size 1.0 +788 18 model.embedding_dim 0.0 +788 18 model.relation_dim 1.0 +788 18 loss.margin 5.415644670358828 +788 18 optimizer.lr 0.07883029847110734 +788 18 negative_sampler.num_negs_per_pos 48.0 +788 18 training.batch_size 2.0 +788 19 model.embedding_dim 2.0 +788 19 model.relation_dim 0.0 +788 19 loss.margin 2.2231001965586175 +788 19 optimizer.lr 0.07844866180802036 +788 19 negative_sampler.num_negs_per_pos 15.0 +788 19 training.batch_size 1.0 +788 20 model.embedding_dim 1.0 +788 20 model.relation_dim 0.0 +788 20 loss.margin 4.022799088672894 +788 20 optimizer.lr 0.017207088589011824 +788 20 negative_sampler.num_negs_per_pos 42.0 +788 20 training.batch_size 1.0 +788 21 model.embedding_dim 0.0 +788 21 model.relation_dim 0.0 +788 21 loss.margin 9.115604370129674 +788 21 optimizer.lr 0.04592447018973836 +788 21 negative_sampler.num_negs_per_pos 14.0 +788 21 training.batch_size 0.0 +788 22 model.embedding_dim 2.0 +788 22 model.relation_dim 2.0 +788 22 loss.margin 7.815279059098201 +788 22 optimizer.lr 0.011887973994144064 +788 22 negative_sampler.num_negs_per_pos 44.0 +788 22 training.batch_size 1.0 +788 1 dataset """yago310""" +788 1 model """transd""" +788 1 loss """marginranking""" +788 1 regularizer """no""" +788 1 optimizer """adam""" +788 1 training_loop """owa""" +788 1 negative_sampler """basic""" +788 1 evaluator """rankbased""" +788 2 dataset """yago310""" +788 2 model """transd""" +788 2 loss """marginranking""" +788 2 regularizer """no""" +788 2 optimizer """adam""" +788 2 training_loop """owa""" +788 2 negative_sampler """basic""" +788 2 evaluator """rankbased""" +788 3 dataset """yago310""" +788 3 model """transd""" +788 3 loss """marginranking""" +788 3 regularizer """no""" +788 3 optimizer """adam""" +788 3 training_loop """owa""" +788 3 negative_sampler """basic""" +788 3 evaluator """rankbased""" +788 4 dataset """yago310""" +788 4 model """transd""" +788 4 loss """marginranking""" +788 4 regularizer """no""" +788 4 optimizer """adam""" +788 4 training_loop """owa""" +788 4 negative_sampler """basic""" +788 4 evaluator """rankbased""" +788 5 dataset """yago310""" +788 5 model """transd""" +788 5 loss """marginranking""" +788 5 regularizer """no""" +788 5 optimizer """adam""" +788 5 training_loop """owa""" +788 5 negative_sampler """basic""" +788 5 evaluator """rankbased""" +788 6 dataset """yago310""" +788 6 model """transd""" +788 6 loss """marginranking""" +788 6 regularizer """no""" +788 6 optimizer """adam""" +788 6 training_loop """owa""" +788 6 negative_sampler """basic""" +788 6 evaluator """rankbased""" +788 7 dataset """yago310""" +788 7 model """transd""" +788 7 loss """marginranking""" +788 7 regularizer """no""" +788 7 optimizer """adam""" +788 7 training_loop """owa""" +788 7 negative_sampler """basic""" +788 7 evaluator """rankbased""" +788 8 dataset """yago310""" +788 8 model """transd""" +788 8 loss """marginranking""" +788 8 regularizer """no""" +788 8 optimizer """adam""" +788 8 training_loop """owa""" +788 8 negative_sampler """basic""" +788 8 evaluator """rankbased""" +788 9 dataset """yago310""" +788 9 model """transd""" +788 9 loss """marginranking""" +788 9 regularizer """no""" +788 9 optimizer """adam""" +788 9 training_loop """owa""" +788 9 negative_sampler """basic""" +788 9 evaluator """rankbased""" +788 10 dataset """yago310""" +788 10 model """transd""" +788 10 loss """marginranking""" +788 10 regularizer """no""" +788 10 optimizer """adam""" +788 10 training_loop """owa""" +788 10 negative_sampler """basic""" +788 10 evaluator """rankbased""" +788 11 dataset """yago310""" +788 11 model """transd""" +788 11 loss """marginranking""" +788 11 regularizer """no""" +788 11 optimizer """adam""" +788 11 training_loop """owa""" +788 11 negative_sampler """basic""" +788 11 evaluator """rankbased""" +788 12 dataset """yago310""" +788 12 model """transd""" +788 12 loss """marginranking""" +788 12 regularizer """no""" +788 12 optimizer """adam""" +788 12 training_loop """owa""" +788 12 negative_sampler """basic""" +788 12 evaluator """rankbased""" +788 13 dataset """yago310""" +788 13 model """transd""" +788 13 loss """marginranking""" +788 13 regularizer """no""" +788 13 optimizer """adam""" +788 13 training_loop """owa""" +788 13 negative_sampler """basic""" +788 13 evaluator """rankbased""" +788 14 dataset """yago310""" +788 14 model """transd""" +788 14 loss """marginranking""" +788 14 regularizer """no""" +788 14 optimizer """adam""" +788 14 training_loop """owa""" +788 14 negative_sampler """basic""" +788 14 evaluator """rankbased""" +788 15 dataset """yago310""" +788 15 model """transd""" +788 15 loss """marginranking""" +788 15 regularizer """no""" +788 15 optimizer """adam""" +788 15 training_loop """owa""" +788 15 negative_sampler """basic""" +788 15 evaluator """rankbased""" +788 16 dataset """yago310""" +788 16 model """transd""" +788 16 loss """marginranking""" +788 16 regularizer """no""" +788 16 optimizer """adam""" +788 16 training_loop """owa""" +788 16 negative_sampler """basic""" +788 16 evaluator """rankbased""" +788 17 dataset """yago310""" +788 17 model """transd""" +788 17 loss """marginranking""" +788 17 regularizer """no""" +788 17 optimizer """adam""" +788 17 training_loop """owa""" +788 17 negative_sampler """basic""" +788 17 evaluator """rankbased""" +788 18 dataset """yago310""" +788 18 model """transd""" +788 18 loss """marginranking""" +788 18 regularizer """no""" +788 18 optimizer """adam""" +788 18 training_loop """owa""" +788 18 negative_sampler """basic""" +788 18 evaluator """rankbased""" +788 19 dataset """yago310""" +788 19 model """transd""" +788 19 loss """marginranking""" +788 19 regularizer """no""" +788 19 optimizer """adam""" +788 19 training_loop """owa""" +788 19 negative_sampler """basic""" +788 19 evaluator """rankbased""" +788 20 dataset """yago310""" +788 20 model """transd""" +788 20 loss """marginranking""" +788 20 regularizer """no""" +788 20 optimizer """adam""" +788 20 training_loop """owa""" +788 20 negative_sampler """basic""" +788 20 evaluator """rankbased""" +788 21 dataset """yago310""" +788 21 model """transd""" +788 21 loss """marginranking""" +788 21 regularizer """no""" +788 21 optimizer """adam""" +788 21 training_loop """owa""" +788 21 negative_sampler """basic""" +788 21 evaluator """rankbased""" +788 22 dataset """yago310""" +788 22 model """transd""" +788 22 loss """marginranking""" +788 22 regularizer """no""" +788 22 optimizer """adam""" +788 22 training_loop """owa""" +788 22 negative_sampler """basic""" +788 22 evaluator """rankbased""" +789 1 model.embedding_dim 0.0 +789 1 model.relation_dim 0.0 +789 1 loss.margin 14.345330572895405 +789 1 loss.adversarial_temperature 0.47401701814590025 +789 1 optimizer.lr 0.06653027418252427 +789 1 negative_sampler.num_negs_per_pos 0.0 +789 1 training.batch_size 2.0 +789 2 model.embedding_dim 2.0 +789 2 model.relation_dim 1.0 +789 2 loss.margin 10.486108418341734 +789 2 loss.adversarial_temperature 0.8954762864809677 +789 2 optimizer.lr 0.007623717355820685 +789 2 negative_sampler.num_negs_per_pos 25.0 +789 2 training.batch_size 1.0 +789 3 model.embedding_dim 0.0 +789 3 model.relation_dim 2.0 +789 3 loss.margin 14.938574348135653 +789 3 loss.adversarial_temperature 0.5827294403069498 +789 3 optimizer.lr 0.03923838472807481 +789 3 negative_sampler.num_negs_per_pos 42.0 +789 3 training.batch_size 3.0 +789 4 model.embedding_dim 2.0 +789 4 model.relation_dim 1.0 +789 4 loss.margin 9.446580148192723 +789 4 loss.adversarial_temperature 0.6959707069361439 +789 4 optimizer.lr 0.0012971462712995396 +789 4 negative_sampler.num_negs_per_pos 46.0 +789 4 training.batch_size 0.0 +789 5 model.embedding_dim 2.0 +789 5 model.relation_dim 2.0 +789 5 loss.margin 24.776157783808955 +789 5 loss.adversarial_temperature 0.4607638239000501 +789 5 optimizer.lr 0.03054196615729587 +789 5 negative_sampler.num_negs_per_pos 44.0 +789 5 training.batch_size 3.0 +789 6 model.embedding_dim 1.0 +789 6 model.relation_dim 0.0 +789 6 loss.margin 16.623849657879013 +789 6 loss.adversarial_temperature 0.1774321306765147 +789 6 optimizer.lr 0.003988218543928864 +789 6 negative_sampler.num_negs_per_pos 28.0 +789 6 training.batch_size 0.0 +789 7 model.embedding_dim 0.0 +789 7 model.relation_dim 0.0 +789 7 loss.margin 6.898841205224224 +789 7 loss.adversarial_temperature 0.46901856622123184 +789 7 optimizer.lr 0.0014373355579453428 +789 7 negative_sampler.num_negs_per_pos 38.0 +789 7 training.batch_size 0.0 +789 8 model.embedding_dim 2.0 +789 8 model.relation_dim 1.0 +789 8 loss.margin 18.844582391527418 +789 8 loss.adversarial_temperature 0.5620362705138643 +789 8 optimizer.lr 0.004476558585832925 +789 8 negative_sampler.num_negs_per_pos 18.0 +789 8 training.batch_size 2.0 +789 9 model.embedding_dim 0.0 +789 9 model.relation_dim 0.0 +789 9 loss.margin 13.22323218061696 +789 9 loss.adversarial_temperature 0.6850591254291207 +789 9 optimizer.lr 0.005285041447688541 +789 9 negative_sampler.num_negs_per_pos 16.0 +789 9 training.batch_size 3.0 +789 10 model.embedding_dim 2.0 +789 10 model.relation_dim 0.0 +789 10 loss.margin 23.958297677715734 +789 10 loss.adversarial_temperature 0.6958222042452381 +789 10 optimizer.lr 0.0014062196777874319 +789 10 negative_sampler.num_negs_per_pos 39.0 +789 10 training.batch_size 1.0 +789 11 model.embedding_dim 1.0 +789 11 model.relation_dim 0.0 +789 11 loss.margin 4.460955153937564 +789 11 loss.adversarial_temperature 0.8368775370696678 +789 11 optimizer.lr 0.012261621418847294 +789 11 negative_sampler.num_negs_per_pos 43.0 +789 11 training.batch_size 3.0 +789 12 model.embedding_dim 0.0 +789 12 model.relation_dim 1.0 +789 12 loss.margin 2.956844126710467 +789 12 loss.adversarial_temperature 0.21418813995365774 +789 12 optimizer.lr 0.009123554337709311 +789 12 negative_sampler.num_negs_per_pos 6.0 +789 12 training.batch_size 3.0 +789 13 model.embedding_dim 0.0 +789 13 model.relation_dim 2.0 +789 13 loss.margin 18.81988257605348 +789 13 loss.adversarial_temperature 0.2156419378084296 +789 13 optimizer.lr 0.0034327994886654773 +789 13 negative_sampler.num_negs_per_pos 17.0 +789 13 training.batch_size 2.0 +789 14 model.embedding_dim 0.0 +789 14 model.relation_dim 0.0 +789 14 loss.margin 13.121691350807565 +789 14 loss.adversarial_temperature 0.6773167772775458 +789 14 optimizer.lr 0.019260239691009932 +789 14 negative_sampler.num_negs_per_pos 33.0 +789 14 training.batch_size 3.0 +789 15 model.embedding_dim 2.0 +789 15 model.relation_dim 1.0 +789 15 loss.margin 29.751538175174446 +789 15 loss.adversarial_temperature 0.7316822159641458 +789 15 optimizer.lr 0.0010601718354479649 +789 15 negative_sampler.num_negs_per_pos 30.0 +789 15 training.batch_size 2.0 +789 16 model.embedding_dim 2.0 +789 16 model.relation_dim 2.0 +789 16 loss.margin 7.582169526134346 +789 16 loss.adversarial_temperature 0.8271192694207179 +789 16 optimizer.lr 0.0088898506297653 +789 16 negative_sampler.num_negs_per_pos 9.0 +789 16 training.batch_size 3.0 +789 17 model.embedding_dim 1.0 +789 17 model.relation_dim 0.0 +789 17 loss.margin 19.048845361628892 +789 17 loss.adversarial_temperature 0.3084630837591346 +789 17 optimizer.lr 0.08421359061206385 +789 17 negative_sampler.num_negs_per_pos 9.0 +789 17 training.batch_size 1.0 +789 18 model.embedding_dim 1.0 +789 18 model.relation_dim 2.0 +789 18 loss.margin 6.744402981237087 +789 18 loss.adversarial_temperature 0.49262933605503967 +789 18 optimizer.lr 0.04602206671667431 +789 18 negative_sampler.num_negs_per_pos 49.0 +789 18 training.batch_size 0.0 +789 1 dataset """yago310""" +789 1 model """transd""" +789 1 loss """nssa""" +789 1 regularizer """no""" +789 1 optimizer """adam""" +789 1 training_loop """owa""" +789 1 negative_sampler """basic""" +789 1 evaluator """rankbased""" +789 2 dataset """yago310""" +789 2 model """transd""" +789 2 loss """nssa""" +789 2 regularizer """no""" +789 2 optimizer """adam""" +789 2 training_loop """owa""" +789 2 negative_sampler """basic""" +789 2 evaluator """rankbased""" +789 3 dataset """yago310""" +789 3 model """transd""" +789 3 loss """nssa""" +789 3 regularizer """no""" +789 3 optimizer """adam""" +789 3 training_loop """owa""" +789 3 negative_sampler """basic""" +789 3 evaluator """rankbased""" +789 4 dataset """yago310""" +789 4 model """transd""" +789 4 loss """nssa""" +789 4 regularizer """no""" +789 4 optimizer """adam""" +789 4 training_loop """owa""" +789 4 negative_sampler """basic""" +789 4 evaluator """rankbased""" +789 5 dataset """yago310""" +789 5 model """transd""" +789 5 loss """nssa""" +789 5 regularizer """no""" +789 5 optimizer """adam""" +789 5 training_loop """owa""" +789 5 negative_sampler """basic""" +789 5 evaluator """rankbased""" +789 6 dataset """yago310""" +789 6 model """transd""" +789 6 loss """nssa""" +789 6 regularizer """no""" +789 6 optimizer """adam""" +789 6 training_loop """owa""" +789 6 negative_sampler """basic""" +789 6 evaluator """rankbased""" +789 7 dataset """yago310""" +789 7 model """transd""" +789 7 loss """nssa""" +789 7 regularizer """no""" +789 7 optimizer """adam""" +789 7 training_loop """owa""" +789 7 negative_sampler """basic""" +789 7 evaluator """rankbased""" +789 8 dataset """yago310""" +789 8 model """transd""" +789 8 loss """nssa""" +789 8 regularizer """no""" +789 8 optimizer """adam""" +789 8 training_loop """owa""" +789 8 negative_sampler """basic""" +789 8 evaluator """rankbased""" +789 9 dataset """yago310""" +789 9 model """transd""" +789 9 loss """nssa""" +789 9 regularizer """no""" +789 9 optimizer """adam""" +789 9 training_loop """owa""" +789 9 negative_sampler """basic""" +789 9 evaluator """rankbased""" +789 10 dataset """yago310""" +789 10 model """transd""" +789 10 loss """nssa""" +789 10 regularizer """no""" +789 10 optimizer """adam""" +789 10 training_loop """owa""" +789 10 negative_sampler """basic""" +789 10 evaluator """rankbased""" +789 11 dataset """yago310""" +789 11 model """transd""" +789 11 loss """nssa""" +789 11 regularizer """no""" +789 11 optimizer """adam""" +789 11 training_loop """owa""" +789 11 negative_sampler """basic""" +789 11 evaluator """rankbased""" +789 12 dataset """yago310""" +789 12 model """transd""" +789 12 loss """nssa""" +789 12 regularizer """no""" +789 12 optimizer """adam""" +789 12 training_loop """owa""" +789 12 negative_sampler """basic""" +789 12 evaluator """rankbased""" +789 13 dataset """yago310""" +789 13 model """transd""" +789 13 loss """nssa""" +789 13 regularizer """no""" +789 13 optimizer """adam""" +789 13 training_loop """owa""" +789 13 negative_sampler """basic""" +789 13 evaluator """rankbased""" +789 14 dataset """yago310""" +789 14 model """transd""" +789 14 loss """nssa""" +789 14 regularizer """no""" +789 14 optimizer """adam""" +789 14 training_loop """owa""" +789 14 negative_sampler """basic""" +789 14 evaluator """rankbased""" +789 15 dataset """yago310""" +789 15 model """transd""" +789 15 loss """nssa""" +789 15 regularizer """no""" +789 15 optimizer """adam""" +789 15 training_loop """owa""" +789 15 negative_sampler """basic""" +789 15 evaluator """rankbased""" +789 16 dataset """yago310""" +789 16 model """transd""" +789 16 loss """nssa""" +789 16 regularizer """no""" +789 16 optimizer """adam""" +789 16 training_loop """owa""" +789 16 negative_sampler """basic""" +789 16 evaluator """rankbased""" +789 17 dataset """yago310""" +789 17 model """transd""" +789 17 loss """nssa""" +789 17 regularizer """no""" +789 17 optimizer """adam""" +789 17 training_loop """owa""" +789 17 negative_sampler """basic""" +789 17 evaluator """rankbased""" +789 18 dataset """yago310""" +789 18 model """transd""" +789 18 loss """nssa""" +789 18 regularizer """no""" +789 18 optimizer """adam""" +789 18 training_loop """owa""" +789 18 negative_sampler """basic""" +789 18 evaluator """rankbased""" +790 1 model.embedding_dim 0.0 +790 1 model.relation_dim 2.0 +790 1 loss.margin 7.410926875761916 +790 1 loss.adversarial_temperature 0.768086759600852 +790 1 optimizer.lr 0.0032076926656590256 +790 1 negative_sampler.num_negs_per_pos 18.0 +790 1 training.batch_size 3.0 +790 2 model.embedding_dim 2.0 +790 2 model.relation_dim 0.0 +790 2 loss.margin 14.434486669830076 +790 2 loss.adversarial_temperature 0.16824853590023106 +790 2 optimizer.lr 0.015275627044348943 +790 2 negative_sampler.num_negs_per_pos 40.0 +790 2 training.batch_size 3.0 +790 3 model.embedding_dim 2.0 +790 3 model.relation_dim 2.0 +790 3 loss.margin 2.8150121535387624 +790 3 loss.adversarial_temperature 0.2992893335895591 +790 3 optimizer.lr 0.00549652867073074 +790 3 negative_sampler.num_negs_per_pos 5.0 +790 3 training.batch_size 1.0 +790 4 model.embedding_dim 0.0 +790 4 model.relation_dim 1.0 +790 4 loss.margin 4.776988125177913 +790 4 loss.adversarial_temperature 0.7238571159491533 +790 4 optimizer.lr 0.03176825267099617 +790 4 negative_sampler.num_negs_per_pos 37.0 +790 4 training.batch_size 2.0 +790 5 model.embedding_dim 1.0 +790 5 model.relation_dim 1.0 +790 5 loss.margin 2.8237358637851244 +790 5 loss.adversarial_temperature 0.2997070247296517 +790 5 optimizer.lr 0.004290523751351746 +790 5 negative_sampler.num_negs_per_pos 45.0 +790 5 training.batch_size 1.0 +790 6 model.embedding_dim 1.0 +790 6 model.relation_dim 0.0 +790 6 loss.margin 1.1430315517528156 +790 6 loss.adversarial_temperature 0.9756034714289531 +790 6 optimizer.lr 0.04722874807097709 +790 6 negative_sampler.num_negs_per_pos 11.0 +790 6 training.batch_size 0.0 +790 7 model.embedding_dim 0.0 +790 7 model.relation_dim 0.0 +790 7 loss.margin 17.9148016737304 +790 7 loss.adversarial_temperature 0.6819903802976588 +790 7 optimizer.lr 0.03946418052932834 +790 7 negative_sampler.num_negs_per_pos 19.0 +790 7 training.batch_size 3.0 +790 8 model.embedding_dim 0.0 +790 8 model.relation_dim 1.0 +790 8 loss.margin 18.562820497563752 +790 8 loss.adversarial_temperature 0.7123199774567645 +790 8 optimizer.lr 0.007518733089909469 +790 8 negative_sampler.num_negs_per_pos 31.0 +790 8 training.batch_size 1.0 +790 9 model.embedding_dim 2.0 +790 9 model.relation_dim 1.0 +790 9 loss.margin 5.385641368626031 +790 9 loss.adversarial_temperature 0.15956621041994812 +790 9 optimizer.lr 0.09946013695901008 +790 9 negative_sampler.num_negs_per_pos 4.0 +790 9 training.batch_size 2.0 +790 10 model.embedding_dim 2.0 +790 10 model.relation_dim 0.0 +790 10 loss.margin 23.006672905238737 +790 10 loss.adversarial_temperature 0.8376045937956398 +790 10 optimizer.lr 0.002468057786811411 +790 10 negative_sampler.num_negs_per_pos 43.0 +790 10 training.batch_size 2.0 +790 11 model.embedding_dim 0.0 +790 11 model.relation_dim 1.0 +790 11 loss.margin 9.18096470985681 +790 11 loss.adversarial_temperature 0.6151038711620944 +790 11 optimizer.lr 0.004691288254760139 +790 11 negative_sampler.num_negs_per_pos 24.0 +790 11 training.batch_size 0.0 +790 12 model.embedding_dim 1.0 +790 12 model.relation_dim 1.0 +790 12 loss.margin 25.192823558991638 +790 12 loss.adversarial_temperature 0.3649420057871743 +790 12 optimizer.lr 0.0019887729872007285 +790 12 negative_sampler.num_negs_per_pos 18.0 +790 12 training.batch_size 0.0 +790 13 model.embedding_dim 2.0 +790 13 model.relation_dim 2.0 +790 13 loss.margin 8.366376571638545 +790 13 loss.adversarial_temperature 0.777325782090118 +790 13 optimizer.lr 0.002071327722555253 +790 13 negative_sampler.num_negs_per_pos 1.0 +790 13 training.batch_size 1.0 +790 14 model.embedding_dim 1.0 +790 14 model.relation_dim 0.0 +790 14 loss.margin 2.4388228956063394 +790 14 loss.adversarial_temperature 0.9238849892039823 +790 14 optimizer.lr 0.0023963924523095234 +790 14 negative_sampler.num_negs_per_pos 6.0 +790 14 training.batch_size 2.0 +790 15 model.embedding_dim 2.0 +790 15 model.relation_dim 0.0 +790 15 loss.margin 3.8397921283312444 +790 15 loss.adversarial_temperature 0.19782363240145998 +790 15 optimizer.lr 0.07398545800079687 +790 15 negative_sampler.num_negs_per_pos 12.0 +790 15 training.batch_size 3.0 +790 16 model.embedding_dim 0.0 +790 16 model.relation_dim 1.0 +790 16 loss.margin 11.833484900426356 +790 16 loss.adversarial_temperature 0.9813662759150743 +790 16 optimizer.lr 0.058439649500342083 +790 16 negative_sampler.num_negs_per_pos 19.0 +790 16 training.batch_size 3.0 +790 17 model.embedding_dim 2.0 +790 17 model.relation_dim 2.0 +790 17 loss.margin 24.843217975368358 +790 17 loss.adversarial_temperature 0.49642505610163445 +790 17 optimizer.lr 0.0040223217116584905 +790 17 negative_sampler.num_negs_per_pos 33.0 +790 17 training.batch_size 3.0 +790 18 model.embedding_dim 0.0 +790 18 model.relation_dim 2.0 +790 18 loss.margin 23.501215076769792 +790 18 loss.adversarial_temperature 0.38145465998449757 +790 18 optimizer.lr 0.019935903620578746 +790 18 negative_sampler.num_negs_per_pos 27.0 +790 18 training.batch_size 1.0 +790 19 model.embedding_dim 1.0 +790 19 model.relation_dim 0.0 +790 19 loss.margin 19.65175349375782 +790 19 loss.adversarial_temperature 0.3906323363789266 +790 19 optimizer.lr 0.02154460215341473 +790 19 negative_sampler.num_negs_per_pos 32.0 +790 19 training.batch_size 1.0 +790 20 model.embedding_dim 0.0 +790 20 model.relation_dim 2.0 +790 20 loss.margin 23.331428831512284 +790 20 loss.adversarial_temperature 0.2829071814173629 +790 20 optimizer.lr 0.015237783299904523 +790 20 negative_sampler.num_negs_per_pos 29.0 +790 20 training.batch_size 0.0 +790 21 model.embedding_dim 1.0 +790 21 model.relation_dim 1.0 +790 21 loss.margin 22.773192945137144 +790 21 loss.adversarial_temperature 0.7717769377768706 +790 21 optimizer.lr 0.004349770169328528 +790 21 negative_sampler.num_negs_per_pos 3.0 +790 21 training.batch_size 1.0 +790 22 model.embedding_dim 1.0 +790 22 model.relation_dim 2.0 +790 22 loss.margin 25.737408841949705 +790 22 loss.adversarial_temperature 0.6953265637113692 +790 22 optimizer.lr 0.06974720337668289 +790 22 negative_sampler.num_negs_per_pos 3.0 +790 22 training.batch_size 0.0 +790 23 model.embedding_dim 0.0 +790 23 model.relation_dim 1.0 +790 23 loss.margin 16.13758523639697 +790 23 loss.adversarial_temperature 0.7917841236782941 +790 23 optimizer.lr 0.004267755521695608 +790 23 negative_sampler.num_negs_per_pos 14.0 +790 23 training.batch_size 2.0 +790 24 model.embedding_dim 0.0 +790 24 model.relation_dim 0.0 +790 24 loss.margin 5.521675592074471 +790 24 loss.adversarial_temperature 0.6135326098750065 +790 24 optimizer.lr 0.01000095309757487 +790 24 negative_sampler.num_negs_per_pos 28.0 +790 24 training.batch_size 2.0 +790 25 model.embedding_dim 1.0 +790 25 model.relation_dim 1.0 +790 25 loss.margin 21.690987250655784 +790 25 loss.adversarial_temperature 0.46845951647895 +790 25 optimizer.lr 0.002596656575319538 +790 25 negative_sampler.num_negs_per_pos 33.0 +790 25 training.batch_size 0.0 +790 26 model.embedding_dim 2.0 +790 26 model.relation_dim 0.0 +790 26 loss.margin 11.609181744324856 +790 26 loss.adversarial_temperature 0.8716865088423646 +790 26 optimizer.lr 0.00799521653818563 +790 26 negative_sampler.num_negs_per_pos 16.0 +790 26 training.batch_size 3.0 +790 27 model.embedding_dim 0.0 +790 27 model.relation_dim 0.0 +790 27 loss.margin 8.690576114442708 +790 27 loss.adversarial_temperature 0.9184573543079336 +790 27 optimizer.lr 0.02747410596184215 +790 27 negative_sampler.num_negs_per_pos 4.0 +790 27 training.batch_size 0.0 +790 28 model.embedding_dim 1.0 +790 28 model.relation_dim 0.0 +790 28 loss.margin 5.139321210605151 +790 28 loss.adversarial_temperature 0.4175649117520551 +790 28 optimizer.lr 0.08285103336045772 +790 28 negative_sampler.num_negs_per_pos 15.0 +790 28 training.batch_size 0.0 +790 29 model.embedding_dim 0.0 +790 29 model.relation_dim 1.0 +790 29 loss.margin 19.841035939197877 +790 29 loss.adversarial_temperature 0.8508890909825637 +790 29 optimizer.lr 0.0013431313823607756 +790 29 negative_sampler.num_negs_per_pos 41.0 +790 29 training.batch_size 2.0 +790 30 model.embedding_dim 2.0 +790 30 model.relation_dim 0.0 +790 30 loss.margin 14.358810385081835 +790 30 loss.adversarial_temperature 0.302805277154156 +790 30 optimizer.lr 0.0014221713724641655 +790 30 negative_sampler.num_negs_per_pos 13.0 +790 30 training.batch_size 3.0 +790 31 model.embedding_dim 2.0 +790 31 model.relation_dim 1.0 +790 31 loss.margin 1.4450716102629955 +790 31 loss.adversarial_temperature 0.7189738097331192 +790 31 optimizer.lr 0.021770800006074457 +790 31 negative_sampler.num_negs_per_pos 35.0 +790 31 training.batch_size 2.0 +790 32 model.embedding_dim 0.0 +790 32 model.relation_dim 2.0 +790 32 loss.margin 15.200052147807353 +790 32 loss.adversarial_temperature 0.7613002608823647 +790 32 optimizer.lr 0.006269090031537787 +790 32 negative_sampler.num_negs_per_pos 23.0 +790 32 training.batch_size 0.0 +790 1 dataset """yago310""" +790 1 model """transd""" +790 1 loss """nssa""" +790 1 regularizer """no""" +790 1 optimizer """adam""" +790 1 training_loop """owa""" +790 1 negative_sampler """basic""" +790 1 evaluator """rankbased""" +790 2 dataset """yago310""" +790 2 model """transd""" +790 2 loss """nssa""" +790 2 regularizer """no""" +790 2 optimizer """adam""" +790 2 training_loop """owa""" +790 2 negative_sampler """basic""" +790 2 evaluator """rankbased""" +790 3 dataset """yago310""" +790 3 model """transd""" +790 3 loss """nssa""" +790 3 regularizer """no""" +790 3 optimizer """adam""" +790 3 training_loop """owa""" +790 3 negative_sampler """basic""" +790 3 evaluator """rankbased""" +790 4 dataset """yago310""" +790 4 model """transd""" +790 4 loss """nssa""" +790 4 regularizer """no""" +790 4 optimizer """adam""" +790 4 training_loop """owa""" +790 4 negative_sampler """basic""" +790 4 evaluator """rankbased""" +790 5 dataset """yago310""" +790 5 model """transd""" +790 5 loss """nssa""" +790 5 regularizer """no""" +790 5 optimizer """adam""" +790 5 training_loop """owa""" +790 5 negative_sampler """basic""" +790 5 evaluator """rankbased""" +790 6 dataset """yago310""" +790 6 model """transd""" +790 6 loss """nssa""" +790 6 regularizer """no""" +790 6 optimizer """adam""" +790 6 training_loop """owa""" +790 6 negative_sampler """basic""" +790 6 evaluator """rankbased""" +790 7 dataset """yago310""" +790 7 model """transd""" +790 7 loss """nssa""" +790 7 regularizer """no""" +790 7 optimizer """adam""" +790 7 training_loop """owa""" +790 7 negative_sampler """basic""" +790 7 evaluator """rankbased""" +790 8 dataset """yago310""" +790 8 model """transd""" +790 8 loss """nssa""" +790 8 regularizer """no""" +790 8 optimizer """adam""" +790 8 training_loop """owa""" +790 8 negative_sampler """basic""" +790 8 evaluator """rankbased""" +790 9 dataset """yago310""" +790 9 model """transd""" +790 9 loss """nssa""" +790 9 regularizer """no""" +790 9 optimizer """adam""" +790 9 training_loop """owa""" +790 9 negative_sampler """basic""" +790 9 evaluator """rankbased""" +790 10 dataset """yago310""" +790 10 model """transd""" +790 10 loss """nssa""" +790 10 regularizer """no""" +790 10 optimizer """adam""" +790 10 training_loop """owa""" +790 10 negative_sampler """basic""" +790 10 evaluator """rankbased""" +790 11 dataset """yago310""" +790 11 model """transd""" +790 11 loss """nssa""" +790 11 regularizer """no""" +790 11 optimizer """adam""" +790 11 training_loop """owa""" +790 11 negative_sampler """basic""" +790 11 evaluator """rankbased""" +790 12 dataset """yago310""" +790 12 model """transd""" +790 12 loss """nssa""" +790 12 regularizer """no""" +790 12 optimizer """adam""" +790 12 training_loop """owa""" +790 12 negative_sampler """basic""" +790 12 evaluator """rankbased""" +790 13 dataset """yago310""" +790 13 model """transd""" +790 13 loss """nssa""" +790 13 regularizer """no""" +790 13 optimizer """adam""" +790 13 training_loop """owa""" +790 13 negative_sampler """basic""" +790 13 evaluator """rankbased""" +790 14 dataset """yago310""" +790 14 model """transd""" +790 14 loss """nssa""" +790 14 regularizer """no""" +790 14 optimizer """adam""" +790 14 training_loop """owa""" +790 14 negative_sampler """basic""" +790 14 evaluator """rankbased""" +790 15 dataset """yago310""" +790 15 model """transd""" +790 15 loss """nssa""" +790 15 regularizer """no""" +790 15 optimizer """adam""" +790 15 training_loop """owa""" +790 15 negative_sampler """basic""" +790 15 evaluator """rankbased""" +790 16 dataset """yago310""" +790 16 model """transd""" +790 16 loss """nssa""" +790 16 regularizer """no""" +790 16 optimizer """adam""" +790 16 training_loop """owa""" +790 16 negative_sampler """basic""" +790 16 evaluator """rankbased""" +790 17 dataset """yago310""" +790 17 model """transd""" +790 17 loss """nssa""" +790 17 regularizer """no""" +790 17 optimizer """adam""" +790 17 training_loop """owa""" +790 17 negative_sampler """basic""" +790 17 evaluator """rankbased""" +790 18 dataset """yago310""" +790 18 model """transd""" +790 18 loss """nssa""" +790 18 regularizer """no""" +790 18 optimizer """adam""" +790 18 training_loop """owa""" +790 18 negative_sampler """basic""" +790 18 evaluator """rankbased""" +790 19 dataset """yago310""" +790 19 model """transd""" +790 19 loss """nssa""" +790 19 regularizer """no""" +790 19 optimizer """adam""" +790 19 training_loop """owa""" +790 19 negative_sampler """basic""" +790 19 evaluator """rankbased""" +790 20 dataset """yago310""" +790 20 model """transd""" +790 20 loss """nssa""" +790 20 regularizer """no""" +790 20 optimizer """adam""" +790 20 training_loop """owa""" +790 20 negative_sampler """basic""" +790 20 evaluator """rankbased""" +790 21 dataset """yago310""" +790 21 model """transd""" +790 21 loss """nssa""" +790 21 regularizer """no""" +790 21 optimizer """adam""" +790 21 training_loop """owa""" +790 21 negative_sampler """basic""" +790 21 evaluator """rankbased""" +790 22 dataset """yago310""" +790 22 model """transd""" +790 22 loss """nssa""" +790 22 regularizer """no""" +790 22 optimizer """adam""" +790 22 training_loop """owa""" +790 22 negative_sampler """basic""" +790 22 evaluator """rankbased""" +790 23 dataset """yago310""" +790 23 model """transd""" +790 23 loss """nssa""" +790 23 regularizer """no""" +790 23 optimizer """adam""" +790 23 training_loop """owa""" +790 23 negative_sampler """basic""" +790 23 evaluator """rankbased""" +790 24 dataset """yago310""" +790 24 model """transd""" +790 24 loss """nssa""" +790 24 regularizer """no""" +790 24 optimizer """adam""" +790 24 training_loop """owa""" +790 24 negative_sampler """basic""" +790 24 evaluator """rankbased""" +790 25 dataset """yago310""" +790 25 model """transd""" +790 25 loss """nssa""" +790 25 regularizer """no""" +790 25 optimizer """adam""" +790 25 training_loop """owa""" +790 25 negative_sampler """basic""" +790 25 evaluator """rankbased""" +790 26 dataset """yago310""" +790 26 model """transd""" +790 26 loss """nssa""" +790 26 regularizer """no""" +790 26 optimizer """adam""" +790 26 training_loop """owa""" +790 26 negative_sampler """basic""" +790 26 evaluator """rankbased""" +790 27 dataset """yago310""" +790 27 model """transd""" +790 27 loss """nssa""" +790 27 regularizer """no""" +790 27 optimizer """adam""" +790 27 training_loop """owa""" +790 27 negative_sampler """basic""" +790 27 evaluator """rankbased""" +790 28 dataset """yago310""" +790 28 model """transd""" +790 28 loss """nssa""" +790 28 regularizer """no""" +790 28 optimizer """adam""" +790 28 training_loop """owa""" +790 28 negative_sampler """basic""" +790 28 evaluator """rankbased""" +790 29 dataset """yago310""" +790 29 model """transd""" +790 29 loss """nssa""" +790 29 regularizer """no""" +790 29 optimizer """adam""" +790 29 training_loop """owa""" +790 29 negative_sampler """basic""" +790 29 evaluator """rankbased""" +790 30 dataset """yago310""" +790 30 model """transd""" +790 30 loss """nssa""" +790 30 regularizer """no""" +790 30 optimizer """adam""" +790 30 training_loop """owa""" +790 30 negative_sampler """basic""" +790 30 evaluator """rankbased""" +790 31 dataset """yago310""" +790 31 model """transd""" +790 31 loss """nssa""" +790 31 regularizer """no""" +790 31 optimizer """adam""" +790 31 training_loop """owa""" +790 31 negative_sampler """basic""" +790 31 evaluator """rankbased""" +790 32 dataset """yago310""" +790 32 model """transd""" +790 32 loss """nssa""" +790 32 regularizer """no""" +790 32 optimizer """adam""" +790 32 training_loop """owa""" +790 32 negative_sampler """basic""" +790 32 evaluator """rankbased""" +791 1 model.embedding_dim 0.0 +791 1 model.scoring_fct_norm 2.0 +791 1 optimizer.lr 0.007296183933219858 +791 1 negative_sampler.num_negs_per_pos 61.0 +791 1 training.batch_size 1.0 +791 2 model.embedding_dim 2.0 +791 2 model.scoring_fct_norm 2.0 +791 2 optimizer.lr 0.07573419199560545 +791 2 negative_sampler.num_negs_per_pos 38.0 +791 2 training.batch_size 1.0 +791 3 model.embedding_dim 0.0 +791 3 model.scoring_fct_norm 1.0 +791 3 optimizer.lr 0.026982137249657947 +791 3 negative_sampler.num_negs_per_pos 12.0 +791 3 training.batch_size 0.0 +791 4 model.embedding_dim 1.0 +791 4 model.scoring_fct_norm 1.0 +791 4 optimizer.lr 0.001624748718277314 +791 4 negative_sampler.num_negs_per_pos 9.0 +791 4 training.batch_size 1.0 +791 5 model.embedding_dim 0.0 +791 5 model.scoring_fct_norm 1.0 +791 5 optimizer.lr 0.008585649573789636 +791 5 negative_sampler.num_negs_per_pos 86.0 +791 5 training.batch_size 2.0 +791 6 model.embedding_dim 1.0 +791 6 model.scoring_fct_norm 1.0 +791 6 optimizer.lr 0.018461865112549648 +791 6 negative_sampler.num_negs_per_pos 41.0 +791 6 training.batch_size 2.0 +791 7 model.embedding_dim 1.0 +791 7 model.scoring_fct_norm 2.0 +791 7 optimizer.lr 0.03525227872327567 +791 7 negative_sampler.num_negs_per_pos 61.0 +791 7 training.batch_size 2.0 +791 8 model.embedding_dim 0.0 +791 8 model.scoring_fct_norm 1.0 +791 8 optimizer.lr 0.003463717651892269 +791 8 negative_sampler.num_negs_per_pos 41.0 +791 8 training.batch_size 2.0 +791 9 model.embedding_dim 0.0 +791 9 model.scoring_fct_norm 2.0 +791 9 optimizer.lr 0.019742872904983947 +791 9 negative_sampler.num_negs_per_pos 72.0 +791 9 training.batch_size 2.0 +791 10 model.embedding_dim 2.0 +791 10 model.scoring_fct_norm 1.0 +791 10 optimizer.lr 0.015173993736805754 +791 10 negative_sampler.num_negs_per_pos 67.0 +791 10 training.batch_size 2.0 +791 11 model.embedding_dim 2.0 +791 11 model.scoring_fct_norm 1.0 +791 11 optimizer.lr 0.003089799907488747 +791 11 negative_sampler.num_negs_per_pos 76.0 +791 11 training.batch_size 2.0 +791 12 model.embedding_dim 0.0 +791 12 model.scoring_fct_norm 2.0 +791 12 optimizer.lr 0.04027642146048711 +791 12 negative_sampler.num_negs_per_pos 2.0 +791 12 training.batch_size 1.0 +791 13 model.embedding_dim 1.0 +791 13 model.scoring_fct_norm 1.0 +791 13 optimizer.lr 0.010623471026672425 +791 13 negative_sampler.num_negs_per_pos 65.0 +791 13 training.batch_size 1.0 +791 14 model.embedding_dim 0.0 +791 14 model.scoring_fct_norm 2.0 +791 14 optimizer.lr 0.041683642491422514 +791 14 negative_sampler.num_negs_per_pos 31.0 +791 14 training.batch_size 0.0 +791 15 model.embedding_dim 1.0 +791 15 model.scoring_fct_norm 2.0 +791 15 optimizer.lr 0.00228567313614886 +791 15 negative_sampler.num_negs_per_pos 59.0 +791 15 training.batch_size 0.0 +791 16 model.embedding_dim 0.0 +791 16 model.scoring_fct_norm 2.0 +791 16 optimizer.lr 0.006478622373369363 +791 16 negative_sampler.num_negs_per_pos 73.0 +791 16 training.batch_size 2.0 +791 17 model.embedding_dim 1.0 +791 17 model.scoring_fct_norm 1.0 +791 17 optimizer.lr 0.004387801698041953 +791 17 negative_sampler.num_negs_per_pos 12.0 +791 17 training.batch_size 2.0 +791 18 model.embedding_dim 0.0 +791 18 model.scoring_fct_norm 1.0 +791 18 optimizer.lr 0.018588299201409367 +791 18 negative_sampler.num_negs_per_pos 46.0 +791 18 training.batch_size 1.0 +791 19 model.embedding_dim 2.0 +791 19 model.scoring_fct_norm 2.0 +791 19 optimizer.lr 0.03211555111897548 +791 19 negative_sampler.num_negs_per_pos 99.0 +791 19 training.batch_size 2.0 +791 20 model.embedding_dim 2.0 +791 20 model.scoring_fct_norm 2.0 +791 20 optimizer.lr 0.002301166402193264 +791 20 negative_sampler.num_negs_per_pos 3.0 +791 20 training.batch_size 1.0 +791 21 model.embedding_dim 0.0 +791 21 model.scoring_fct_norm 2.0 +791 21 optimizer.lr 0.025953135723296706 +791 21 negative_sampler.num_negs_per_pos 34.0 +791 21 training.batch_size 1.0 +791 22 model.embedding_dim 2.0 +791 22 model.scoring_fct_norm 1.0 +791 22 optimizer.lr 0.007326669796483468 +791 22 negative_sampler.num_negs_per_pos 59.0 +791 22 training.batch_size 0.0 +791 23 model.embedding_dim 0.0 +791 23 model.scoring_fct_norm 2.0 +791 23 optimizer.lr 0.029983846502224513 +791 23 negative_sampler.num_negs_per_pos 30.0 +791 23 training.batch_size 1.0 +791 24 model.embedding_dim 0.0 +791 24 model.scoring_fct_norm 2.0 +791 24 optimizer.lr 0.01584102197177633 +791 24 negative_sampler.num_negs_per_pos 82.0 +791 24 training.batch_size 2.0 +791 25 model.embedding_dim 0.0 +791 25 model.scoring_fct_norm 1.0 +791 25 optimizer.lr 0.030449933996455732 +791 25 negative_sampler.num_negs_per_pos 79.0 +791 25 training.batch_size 0.0 +791 26 model.embedding_dim 1.0 +791 26 model.scoring_fct_norm 2.0 +791 26 optimizer.lr 0.09860728592593633 +791 26 negative_sampler.num_negs_per_pos 51.0 +791 26 training.batch_size 2.0 +791 27 model.embedding_dim 2.0 +791 27 model.scoring_fct_norm 2.0 +791 27 optimizer.lr 0.034055966784867206 +791 27 negative_sampler.num_negs_per_pos 61.0 +791 27 training.batch_size 1.0 +791 28 model.embedding_dim 1.0 +791 28 model.scoring_fct_norm 2.0 +791 28 optimizer.lr 0.0017642875028315637 +791 28 negative_sampler.num_negs_per_pos 56.0 +791 28 training.batch_size 0.0 +791 29 model.embedding_dim 2.0 +791 29 model.scoring_fct_norm 1.0 +791 29 optimizer.lr 0.0029656575262924675 +791 29 negative_sampler.num_negs_per_pos 38.0 +791 29 training.batch_size 1.0 +791 30 model.embedding_dim 2.0 +791 30 model.scoring_fct_norm 1.0 +791 30 optimizer.lr 0.015244228621031516 +791 30 negative_sampler.num_negs_per_pos 64.0 +791 30 training.batch_size 1.0 +791 31 model.embedding_dim 2.0 +791 31 model.scoring_fct_norm 2.0 +791 31 optimizer.lr 0.03811538410738589 +791 31 negative_sampler.num_negs_per_pos 11.0 +791 31 training.batch_size 1.0 +791 32 model.embedding_dim 1.0 +791 32 model.scoring_fct_norm 2.0 +791 32 optimizer.lr 0.07629857789567104 +791 32 negative_sampler.num_negs_per_pos 87.0 +791 32 training.batch_size 0.0 +791 33 model.embedding_dim 0.0 +791 33 model.scoring_fct_norm 1.0 +791 33 optimizer.lr 0.01606894426300453 +791 33 negative_sampler.num_negs_per_pos 46.0 +791 33 training.batch_size 2.0 +791 34 model.embedding_dim 2.0 +791 34 model.scoring_fct_norm 2.0 +791 34 optimizer.lr 0.0030967445584817775 +791 34 negative_sampler.num_negs_per_pos 62.0 +791 34 training.batch_size 0.0 +791 35 model.embedding_dim 1.0 +791 35 model.scoring_fct_norm 2.0 +791 35 optimizer.lr 0.002422438291339068 +791 35 negative_sampler.num_negs_per_pos 30.0 +791 35 training.batch_size 2.0 +791 36 model.embedding_dim 2.0 +791 36 model.scoring_fct_norm 2.0 +791 36 optimizer.lr 0.01689667948584523 +791 36 negative_sampler.num_negs_per_pos 89.0 +791 36 training.batch_size 0.0 +791 37 model.embedding_dim 1.0 +791 37 model.scoring_fct_norm 1.0 +791 37 optimizer.lr 0.023485886829918083 +791 37 negative_sampler.num_negs_per_pos 91.0 +791 37 training.batch_size 2.0 +791 38 model.embedding_dim 1.0 +791 38 model.scoring_fct_norm 1.0 +791 38 optimizer.lr 0.0019872996933825576 +791 38 negative_sampler.num_negs_per_pos 72.0 +791 38 training.batch_size 1.0 +791 39 model.embedding_dim 1.0 +791 39 model.scoring_fct_norm 1.0 +791 39 optimizer.lr 0.0032334080364247245 +791 39 negative_sampler.num_negs_per_pos 93.0 +791 39 training.batch_size 0.0 +791 40 model.embedding_dim 1.0 +791 40 model.scoring_fct_norm 2.0 +791 40 optimizer.lr 0.002955569837693043 +791 40 negative_sampler.num_negs_per_pos 81.0 +791 40 training.batch_size 1.0 +791 41 model.embedding_dim 1.0 +791 41 model.scoring_fct_norm 1.0 +791 41 optimizer.lr 0.022407774453148503 +791 41 negative_sampler.num_negs_per_pos 56.0 +791 41 training.batch_size 0.0 +791 42 model.embedding_dim 1.0 +791 42 model.scoring_fct_norm 2.0 +791 42 optimizer.lr 0.0016435712103398685 +791 42 negative_sampler.num_negs_per_pos 62.0 +791 42 training.batch_size 1.0 +791 43 model.embedding_dim 2.0 +791 43 model.scoring_fct_norm 1.0 +791 43 optimizer.lr 0.007728692469464051 +791 43 negative_sampler.num_negs_per_pos 78.0 +791 43 training.batch_size 1.0 +791 44 model.embedding_dim 1.0 +791 44 model.scoring_fct_norm 2.0 +791 44 optimizer.lr 0.005074613750770505 +791 44 negative_sampler.num_negs_per_pos 88.0 +791 44 training.batch_size 2.0 +791 45 model.embedding_dim 2.0 +791 45 model.scoring_fct_norm 2.0 +791 45 optimizer.lr 0.007532875459200767 +791 45 negative_sampler.num_negs_per_pos 97.0 +791 45 training.batch_size 1.0 +791 46 model.embedding_dim 1.0 +791 46 model.scoring_fct_norm 2.0 +791 46 optimizer.lr 0.014743214678441854 +791 46 negative_sampler.num_negs_per_pos 16.0 +791 46 training.batch_size 1.0 +791 47 model.embedding_dim 2.0 +791 47 model.scoring_fct_norm 1.0 +791 47 optimizer.lr 0.00601616334913111 +791 47 negative_sampler.num_negs_per_pos 41.0 +791 47 training.batch_size 1.0 +791 48 model.embedding_dim 1.0 +791 48 model.scoring_fct_norm 1.0 +791 48 optimizer.lr 0.002225292899002083 +791 48 negative_sampler.num_negs_per_pos 23.0 +791 48 training.batch_size 0.0 +791 49 model.embedding_dim 0.0 +791 49 model.scoring_fct_norm 2.0 +791 49 optimizer.lr 0.019345206367334358 +791 49 negative_sampler.num_negs_per_pos 65.0 +791 49 training.batch_size 2.0 +791 50 model.embedding_dim 2.0 +791 50 model.scoring_fct_norm 2.0 +791 50 optimizer.lr 0.09113452669629556 +791 50 negative_sampler.num_negs_per_pos 64.0 +791 50 training.batch_size 0.0 +791 51 model.embedding_dim 0.0 +791 51 model.scoring_fct_norm 1.0 +791 51 optimizer.lr 0.0071545226123774725 +791 51 negative_sampler.num_negs_per_pos 69.0 +791 51 training.batch_size 1.0 +791 52 model.embedding_dim 2.0 +791 52 model.scoring_fct_norm 2.0 +791 52 optimizer.lr 0.0031076674612343087 +791 52 negative_sampler.num_negs_per_pos 5.0 +791 52 training.batch_size 2.0 +791 53 model.embedding_dim 0.0 +791 53 model.scoring_fct_norm 1.0 +791 53 optimizer.lr 0.0021796633423490606 +791 53 negative_sampler.num_negs_per_pos 10.0 +791 53 training.batch_size 1.0 +791 54 model.embedding_dim 0.0 +791 54 model.scoring_fct_norm 2.0 +791 54 optimizer.lr 0.0024657822963796584 +791 54 negative_sampler.num_negs_per_pos 87.0 +791 54 training.batch_size 2.0 +791 1 dataset """fb15k237""" +791 1 model """transe""" +791 1 loss """bceaftersigmoid""" +791 1 regularizer """no""" +791 1 optimizer """adam""" +791 1 training_loop """owa""" +791 1 negative_sampler """basic""" +791 1 evaluator """rankbased""" +791 2 dataset """fb15k237""" +791 2 model """transe""" +791 2 loss """bceaftersigmoid""" +791 2 regularizer """no""" +791 2 optimizer """adam""" +791 2 training_loop """owa""" +791 2 negative_sampler """basic""" +791 2 evaluator """rankbased""" +791 3 dataset """fb15k237""" +791 3 model """transe""" +791 3 loss """bceaftersigmoid""" +791 3 regularizer """no""" +791 3 optimizer """adam""" +791 3 training_loop """owa""" +791 3 negative_sampler """basic""" +791 3 evaluator """rankbased""" +791 4 dataset """fb15k237""" +791 4 model """transe""" +791 4 loss """bceaftersigmoid""" +791 4 regularizer """no""" +791 4 optimizer """adam""" +791 4 training_loop """owa""" +791 4 negative_sampler """basic""" +791 4 evaluator """rankbased""" +791 5 dataset """fb15k237""" +791 5 model """transe""" +791 5 loss """bceaftersigmoid""" +791 5 regularizer """no""" +791 5 optimizer """adam""" +791 5 training_loop """owa""" +791 5 negative_sampler """basic""" +791 5 evaluator """rankbased""" +791 6 dataset """fb15k237""" +791 6 model """transe""" +791 6 loss """bceaftersigmoid""" +791 6 regularizer """no""" +791 6 optimizer """adam""" +791 6 training_loop """owa""" +791 6 negative_sampler """basic""" +791 6 evaluator """rankbased""" +791 7 dataset """fb15k237""" +791 7 model """transe""" +791 7 loss """bceaftersigmoid""" +791 7 regularizer """no""" +791 7 optimizer """adam""" +791 7 training_loop """owa""" +791 7 negative_sampler """basic""" +791 7 evaluator """rankbased""" +791 8 dataset """fb15k237""" +791 8 model """transe""" +791 8 loss """bceaftersigmoid""" +791 8 regularizer """no""" +791 8 optimizer """adam""" +791 8 training_loop """owa""" +791 8 negative_sampler """basic""" +791 8 evaluator """rankbased""" +791 9 dataset """fb15k237""" +791 9 model """transe""" +791 9 loss """bceaftersigmoid""" +791 9 regularizer """no""" +791 9 optimizer """adam""" +791 9 training_loop """owa""" +791 9 negative_sampler """basic""" +791 9 evaluator """rankbased""" +791 10 dataset """fb15k237""" +791 10 model """transe""" +791 10 loss """bceaftersigmoid""" +791 10 regularizer """no""" +791 10 optimizer """adam""" +791 10 training_loop """owa""" +791 10 negative_sampler """basic""" +791 10 evaluator """rankbased""" +791 11 dataset """fb15k237""" +791 11 model """transe""" +791 11 loss """bceaftersigmoid""" +791 11 regularizer """no""" +791 11 optimizer """adam""" +791 11 training_loop """owa""" +791 11 negative_sampler """basic""" +791 11 evaluator """rankbased""" +791 12 dataset """fb15k237""" +791 12 model """transe""" +791 12 loss """bceaftersigmoid""" +791 12 regularizer """no""" +791 12 optimizer """adam""" +791 12 training_loop """owa""" +791 12 negative_sampler """basic""" +791 12 evaluator """rankbased""" +791 13 dataset """fb15k237""" +791 13 model """transe""" +791 13 loss """bceaftersigmoid""" +791 13 regularizer """no""" +791 13 optimizer """adam""" +791 13 training_loop """owa""" +791 13 negative_sampler """basic""" +791 13 evaluator """rankbased""" +791 14 dataset """fb15k237""" +791 14 model """transe""" +791 14 loss """bceaftersigmoid""" +791 14 regularizer """no""" +791 14 optimizer """adam""" +791 14 training_loop """owa""" +791 14 negative_sampler """basic""" +791 14 evaluator """rankbased""" +791 15 dataset """fb15k237""" +791 15 model """transe""" +791 15 loss """bceaftersigmoid""" +791 15 regularizer """no""" +791 15 optimizer """adam""" +791 15 training_loop """owa""" +791 15 negative_sampler """basic""" +791 15 evaluator """rankbased""" +791 16 dataset """fb15k237""" +791 16 model """transe""" +791 16 loss """bceaftersigmoid""" +791 16 regularizer """no""" +791 16 optimizer """adam""" +791 16 training_loop """owa""" +791 16 negative_sampler """basic""" +791 16 evaluator """rankbased""" +791 17 dataset """fb15k237""" +791 17 model """transe""" +791 17 loss """bceaftersigmoid""" +791 17 regularizer """no""" +791 17 optimizer """adam""" +791 17 training_loop """owa""" +791 17 negative_sampler """basic""" +791 17 evaluator """rankbased""" +791 18 dataset """fb15k237""" +791 18 model """transe""" +791 18 loss """bceaftersigmoid""" +791 18 regularizer """no""" +791 18 optimizer """adam""" +791 18 training_loop """owa""" +791 18 negative_sampler """basic""" +791 18 evaluator """rankbased""" +791 19 dataset """fb15k237""" +791 19 model """transe""" +791 19 loss """bceaftersigmoid""" +791 19 regularizer """no""" +791 19 optimizer """adam""" +791 19 training_loop """owa""" +791 19 negative_sampler """basic""" +791 19 evaluator """rankbased""" +791 20 dataset """fb15k237""" +791 20 model """transe""" +791 20 loss """bceaftersigmoid""" +791 20 regularizer """no""" +791 20 optimizer """adam""" +791 20 training_loop """owa""" +791 20 negative_sampler """basic""" +791 20 evaluator """rankbased""" +791 21 dataset """fb15k237""" +791 21 model """transe""" +791 21 loss """bceaftersigmoid""" +791 21 regularizer """no""" +791 21 optimizer """adam""" +791 21 training_loop """owa""" +791 21 negative_sampler """basic""" +791 21 evaluator """rankbased""" +791 22 dataset """fb15k237""" +791 22 model """transe""" +791 22 loss """bceaftersigmoid""" +791 22 regularizer """no""" +791 22 optimizer """adam""" +791 22 training_loop """owa""" +791 22 negative_sampler """basic""" +791 22 evaluator """rankbased""" +791 23 dataset """fb15k237""" +791 23 model """transe""" +791 23 loss """bceaftersigmoid""" +791 23 regularizer """no""" +791 23 optimizer """adam""" +791 23 training_loop """owa""" +791 23 negative_sampler """basic""" +791 23 evaluator """rankbased""" +791 24 dataset """fb15k237""" +791 24 model """transe""" +791 24 loss """bceaftersigmoid""" +791 24 regularizer """no""" +791 24 optimizer """adam""" +791 24 training_loop """owa""" +791 24 negative_sampler """basic""" +791 24 evaluator """rankbased""" +791 25 dataset """fb15k237""" +791 25 model """transe""" +791 25 loss """bceaftersigmoid""" +791 25 regularizer """no""" +791 25 optimizer """adam""" +791 25 training_loop """owa""" +791 25 negative_sampler """basic""" +791 25 evaluator """rankbased""" +791 26 dataset """fb15k237""" +791 26 model """transe""" +791 26 loss """bceaftersigmoid""" +791 26 regularizer """no""" +791 26 optimizer """adam""" +791 26 training_loop """owa""" +791 26 negative_sampler """basic""" +791 26 evaluator """rankbased""" +791 27 dataset """fb15k237""" +791 27 model """transe""" +791 27 loss """bceaftersigmoid""" +791 27 regularizer """no""" +791 27 optimizer """adam""" +791 27 training_loop """owa""" +791 27 negative_sampler """basic""" +791 27 evaluator """rankbased""" +791 28 dataset """fb15k237""" +791 28 model """transe""" +791 28 loss """bceaftersigmoid""" +791 28 regularizer """no""" +791 28 optimizer """adam""" +791 28 training_loop """owa""" +791 28 negative_sampler """basic""" +791 28 evaluator """rankbased""" +791 29 dataset """fb15k237""" +791 29 model """transe""" +791 29 loss """bceaftersigmoid""" +791 29 regularizer """no""" +791 29 optimizer """adam""" +791 29 training_loop """owa""" +791 29 negative_sampler """basic""" +791 29 evaluator """rankbased""" +791 30 dataset """fb15k237""" +791 30 model """transe""" +791 30 loss """bceaftersigmoid""" +791 30 regularizer """no""" +791 30 optimizer """adam""" +791 30 training_loop """owa""" +791 30 negative_sampler """basic""" +791 30 evaluator """rankbased""" +791 31 dataset """fb15k237""" +791 31 model """transe""" +791 31 loss """bceaftersigmoid""" +791 31 regularizer """no""" +791 31 optimizer """adam""" +791 31 training_loop """owa""" +791 31 negative_sampler """basic""" +791 31 evaluator """rankbased""" +791 32 dataset """fb15k237""" +791 32 model """transe""" +791 32 loss """bceaftersigmoid""" +791 32 regularizer """no""" +791 32 optimizer """adam""" +791 32 training_loop """owa""" +791 32 negative_sampler """basic""" +791 32 evaluator """rankbased""" +791 33 dataset """fb15k237""" +791 33 model """transe""" +791 33 loss """bceaftersigmoid""" +791 33 regularizer """no""" +791 33 optimizer """adam""" +791 33 training_loop """owa""" +791 33 negative_sampler """basic""" +791 33 evaluator """rankbased""" +791 34 dataset """fb15k237""" +791 34 model """transe""" +791 34 loss """bceaftersigmoid""" +791 34 regularizer """no""" +791 34 optimizer """adam""" +791 34 training_loop """owa""" +791 34 negative_sampler """basic""" +791 34 evaluator """rankbased""" +791 35 dataset """fb15k237""" +791 35 model """transe""" +791 35 loss """bceaftersigmoid""" +791 35 regularizer """no""" +791 35 optimizer """adam""" +791 35 training_loop """owa""" +791 35 negative_sampler """basic""" +791 35 evaluator """rankbased""" +791 36 dataset """fb15k237""" +791 36 model """transe""" +791 36 loss """bceaftersigmoid""" +791 36 regularizer """no""" +791 36 optimizer """adam""" +791 36 training_loop """owa""" +791 36 negative_sampler """basic""" +791 36 evaluator """rankbased""" +791 37 dataset """fb15k237""" +791 37 model """transe""" +791 37 loss """bceaftersigmoid""" +791 37 regularizer """no""" +791 37 optimizer """adam""" +791 37 training_loop """owa""" +791 37 negative_sampler """basic""" +791 37 evaluator """rankbased""" +791 38 dataset """fb15k237""" +791 38 model """transe""" +791 38 loss """bceaftersigmoid""" +791 38 regularizer """no""" +791 38 optimizer """adam""" +791 38 training_loop """owa""" +791 38 negative_sampler """basic""" +791 38 evaluator """rankbased""" +791 39 dataset """fb15k237""" +791 39 model """transe""" +791 39 loss """bceaftersigmoid""" +791 39 regularizer """no""" +791 39 optimizer """adam""" +791 39 training_loop """owa""" +791 39 negative_sampler """basic""" +791 39 evaluator """rankbased""" +791 40 dataset """fb15k237""" +791 40 model """transe""" +791 40 loss """bceaftersigmoid""" +791 40 regularizer """no""" +791 40 optimizer """adam""" +791 40 training_loop """owa""" +791 40 negative_sampler """basic""" +791 40 evaluator """rankbased""" +791 41 dataset """fb15k237""" +791 41 model """transe""" +791 41 loss """bceaftersigmoid""" +791 41 regularizer """no""" +791 41 optimizer """adam""" +791 41 training_loop """owa""" +791 41 negative_sampler """basic""" +791 41 evaluator """rankbased""" +791 42 dataset """fb15k237""" +791 42 model """transe""" +791 42 loss """bceaftersigmoid""" +791 42 regularizer """no""" +791 42 optimizer """adam""" +791 42 training_loop """owa""" +791 42 negative_sampler """basic""" +791 42 evaluator """rankbased""" +791 43 dataset """fb15k237""" +791 43 model """transe""" +791 43 loss """bceaftersigmoid""" +791 43 regularizer """no""" +791 43 optimizer """adam""" +791 43 training_loop """owa""" +791 43 negative_sampler """basic""" +791 43 evaluator """rankbased""" +791 44 dataset """fb15k237""" +791 44 model """transe""" +791 44 loss """bceaftersigmoid""" +791 44 regularizer """no""" +791 44 optimizer """adam""" +791 44 training_loop """owa""" +791 44 negative_sampler """basic""" +791 44 evaluator """rankbased""" +791 45 dataset """fb15k237""" +791 45 model """transe""" +791 45 loss """bceaftersigmoid""" +791 45 regularizer """no""" +791 45 optimizer """adam""" +791 45 training_loop """owa""" +791 45 negative_sampler """basic""" +791 45 evaluator """rankbased""" +791 46 dataset """fb15k237""" +791 46 model """transe""" +791 46 loss """bceaftersigmoid""" +791 46 regularizer """no""" +791 46 optimizer """adam""" +791 46 training_loop """owa""" +791 46 negative_sampler """basic""" +791 46 evaluator """rankbased""" +791 47 dataset """fb15k237""" +791 47 model """transe""" +791 47 loss """bceaftersigmoid""" +791 47 regularizer """no""" +791 47 optimizer """adam""" +791 47 training_loop """owa""" +791 47 negative_sampler """basic""" +791 47 evaluator """rankbased""" +791 48 dataset """fb15k237""" +791 48 model """transe""" +791 48 loss """bceaftersigmoid""" +791 48 regularizer """no""" +791 48 optimizer """adam""" +791 48 training_loop """owa""" +791 48 negative_sampler """basic""" +791 48 evaluator """rankbased""" +791 49 dataset """fb15k237""" +791 49 model """transe""" +791 49 loss """bceaftersigmoid""" +791 49 regularizer """no""" +791 49 optimizer """adam""" +791 49 training_loop """owa""" +791 49 negative_sampler """basic""" +791 49 evaluator """rankbased""" +791 50 dataset """fb15k237""" +791 50 model """transe""" +791 50 loss """bceaftersigmoid""" +791 50 regularizer """no""" +791 50 optimizer """adam""" +791 50 training_loop """owa""" +791 50 negative_sampler """basic""" +791 50 evaluator """rankbased""" +791 51 dataset """fb15k237""" +791 51 model """transe""" +791 51 loss """bceaftersigmoid""" +791 51 regularizer """no""" +791 51 optimizer """adam""" +791 51 training_loop """owa""" +791 51 negative_sampler """basic""" +791 51 evaluator """rankbased""" +791 52 dataset """fb15k237""" +791 52 model """transe""" +791 52 loss """bceaftersigmoid""" +791 52 regularizer """no""" +791 52 optimizer """adam""" +791 52 training_loop """owa""" +791 52 negative_sampler """basic""" +791 52 evaluator """rankbased""" +791 53 dataset """fb15k237""" +791 53 model """transe""" +791 53 loss """bceaftersigmoid""" +791 53 regularizer """no""" +791 53 optimizer """adam""" +791 53 training_loop """owa""" +791 53 negative_sampler """basic""" +791 53 evaluator """rankbased""" +791 54 dataset """fb15k237""" +791 54 model """transe""" +791 54 loss """bceaftersigmoid""" +791 54 regularizer """no""" +791 54 optimizer """adam""" +791 54 training_loop """owa""" +791 54 negative_sampler """basic""" +791 54 evaluator """rankbased""" +792 1 model.embedding_dim 2.0 +792 1 model.scoring_fct_norm 1.0 +792 1 optimizer.lr 0.08158197429162571 +792 1 negative_sampler.num_negs_per_pos 28.0 +792 1 training.batch_size 0.0 +792 2 model.embedding_dim 2.0 +792 2 model.scoring_fct_norm 2.0 +792 2 optimizer.lr 0.0045342128834247476 +792 2 negative_sampler.num_negs_per_pos 93.0 +792 2 training.batch_size 2.0 +792 3 model.embedding_dim 1.0 +792 3 model.scoring_fct_norm 1.0 +792 3 optimizer.lr 0.02459700292277927 +792 3 negative_sampler.num_negs_per_pos 7.0 +792 3 training.batch_size 1.0 +792 4 model.embedding_dim 1.0 +792 4 model.scoring_fct_norm 1.0 +792 4 optimizer.lr 0.005270036658339627 +792 4 negative_sampler.num_negs_per_pos 47.0 +792 4 training.batch_size 0.0 +792 5 model.embedding_dim 2.0 +792 5 model.scoring_fct_norm 1.0 +792 5 optimizer.lr 0.009594312166223019 +792 5 negative_sampler.num_negs_per_pos 42.0 +792 5 training.batch_size 2.0 +792 6 model.embedding_dim 1.0 +792 6 model.scoring_fct_norm 2.0 +792 6 optimizer.lr 0.003957096937181429 +792 6 negative_sampler.num_negs_per_pos 96.0 +792 6 training.batch_size 1.0 +792 7 model.embedding_dim 1.0 +792 7 model.scoring_fct_norm 2.0 +792 7 optimizer.lr 0.023961200445729518 +792 7 negative_sampler.num_negs_per_pos 22.0 +792 7 training.batch_size 2.0 +792 8 model.embedding_dim 0.0 +792 8 model.scoring_fct_norm 2.0 +792 8 optimizer.lr 0.015589138905699787 +792 8 negative_sampler.num_negs_per_pos 96.0 +792 8 training.batch_size 1.0 +792 9 model.embedding_dim 2.0 +792 9 model.scoring_fct_norm 2.0 +792 9 optimizer.lr 0.01339263589432841 +792 9 negative_sampler.num_negs_per_pos 97.0 +792 9 training.batch_size 0.0 +792 10 model.embedding_dim 0.0 +792 10 model.scoring_fct_norm 1.0 +792 10 optimizer.lr 0.0015728386727946173 +792 10 negative_sampler.num_negs_per_pos 52.0 +792 10 training.batch_size 2.0 +792 11 model.embedding_dim 1.0 +792 11 model.scoring_fct_norm 2.0 +792 11 optimizer.lr 0.018149967820211655 +792 11 negative_sampler.num_negs_per_pos 6.0 +792 11 training.batch_size 0.0 +792 12 model.embedding_dim 1.0 +792 12 model.scoring_fct_norm 2.0 +792 12 optimizer.lr 0.032260382127019524 +792 12 negative_sampler.num_negs_per_pos 46.0 +792 12 training.batch_size 0.0 +792 13 model.embedding_dim 2.0 +792 13 model.scoring_fct_norm 1.0 +792 13 optimizer.lr 0.018661027073520062 +792 13 negative_sampler.num_negs_per_pos 0.0 +792 13 training.batch_size 0.0 +792 14 model.embedding_dim 0.0 +792 14 model.scoring_fct_norm 1.0 +792 14 optimizer.lr 0.006356935589504993 +792 14 negative_sampler.num_negs_per_pos 84.0 +792 14 training.batch_size 1.0 +792 15 model.embedding_dim 0.0 +792 15 model.scoring_fct_norm 1.0 +792 15 optimizer.lr 0.029728835341437946 +792 15 negative_sampler.num_negs_per_pos 10.0 +792 15 training.batch_size 0.0 +792 16 model.embedding_dim 0.0 +792 16 model.scoring_fct_norm 2.0 +792 16 optimizer.lr 0.0373006881967094 +792 16 negative_sampler.num_negs_per_pos 76.0 +792 16 training.batch_size 2.0 +792 17 model.embedding_dim 2.0 +792 17 model.scoring_fct_norm 1.0 +792 17 optimizer.lr 0.04946411180752351 +792 17 negative_sampler.num_negs_per_pos 0.0 +792 17 training.batch_size 0.0 +792 18 model.embedding_dim 2.0 +792 18 model.scoring_fct_norm 2.0 +792 18 optimizer.lr 0.04259447611968374 +792 18 negative_sampler.num_negs_per_pos 10.0 +792 18 training.batch_size 0.0 +792 19 model.embedding_dim 0.0 +792 19 model.scoring_fct_norm 1.0 +792 19 optimizer.lr 0.01350919792512841 +792 19 negative_sampler.num_negs_per_pos 99.0 +792 19 training.batch_size 0.0 +792 20 model.embedding_dim 1.0 +792 20 model.scoring_fct_norm 1.0 +792 20 optimizer.lr 0.0015121632599338856 +792 20 negative_sampler.num_negs_per_pos 67.0 +792 20 training.batch_size 1.0 +792 21 model.embedding_dim 0.0 +792 21 model.scoring_fct_norm 2.0 +792 21 optimizer.lr 0.0936765652731332 +792 21 negative_sampler.num_negs_per_pos 15.0 +792 21 training.batch_size 2.0 +792 22 model.embedding_dim 1.0 +792 22 model.scoring_fct_norm 1.0 +792 22 optimizer.lr 0.0035835366457011263 +792 22 negative_sampler.num_negs_per_pos 41.0 +792 22 training.batch_size 0.0 +792 23 model.embedding_dim 2.0 +792 23 model.scoring_fct_norm 2.0 +792 23 optimizer.lr 0.09535204129746941 +792 23 negative_sampler.num_negs_per_pos 33.0 +792 23 training.batch_size 2.0 +792 24 model.embedding_dim 1.0 +792 24 model.scoring_fct_norm 1.0 +792 24 optimizer.lr 0.0012314839386348018 +792 24 negative_sampler.num_negs_per_pos 94.0 +792 24 training.batch_size 2.0 +792 25 model.embedding_dim 1.0 +792 25 model.scoring_fct_norm 2.0 +792 25 optimizer.lr 0.04989165958334188 +792 25 negative_sampler.num_negs_per_pos 64.0 +792 25 training.batch_size 0.0 +792 26 model.embedding_dim 0.0 +792 26 model.scoring_fct_norm 1.0 +792 26 optimizer.lr 0.014867480401506731 +792 26 negative_sampler.num_negs_per_pos 53.0 +792 26 training.batch_size 1.0 +792 27 model.embedding_dim 2.0 +792 27 model.scoring_fct_norm 2.0 +792 27 optimizer.lr 0.005572619379591748 +792 27 negative_sampler.num_negs_per_pos 28.0 +792 27 training.batch_size 0.0 +792 28 model.embedding_dim 1.0 +792 28 model.scoring_fct_norm 1.0 +792 28 optimizer.lr 0.001921539106645831 +792 28 negative_sampler.num_negs_per_pos 84.0 +792 28 training.batch_size 1.0 +792 29 model.embedding_dim 2.0 +792 29 model.scoring_fct_norm 2.0 +792 29 optimizer.lr 0.008192049600719566 +792 29 negative_sampler.num_negs_per_pos 86.0 +792 29 training.batch_size 2.0 +792 30 model.embedding_dim 2.0 +792 30 model.scoring_fct_norm 1.0 +792 30 optimizer.lr 0.04473373137890407 +792 30 negative_sampler.num_negs_per_pos 8.0 +792 30 training.batch_size 2.0 +792 31 model.embedding_dim 2.0 +792 31 model.scoring_fct_norm 1.0 +792 31 optimizer.lr 0.0028231431829788267 +792 31 negative_sampler.num_negs_per_pos 2.0 +792 31 training.batch_size 1.0 +792 32 model.embedding_dim 0.0 +792 32 model.scoring_fct_norm 1.0 +792 32 optimizer.lr 0.08802657230137093 +792 32 negative_sampler.num_negs_per_pos 50.0 +792 32 training.batch_size 0.0 +792 33 model.embedding_dim 1.0 +792 33 model.scoring_fct_norm 2.0 +792 33 optimizer.lr 0.057504818105784124 +792 33 negative_sampler.num_negs_per_pos 31.0 +792 33 training.batch_size 1.0 +792 34 model.embedding_dim 0.0 +792 34 model.scoring_fct_norm 1.0 +792 34 optimizer.lr 0.005246219040801113 +792 34 negative_sampler.num_negs_per_pos 35.0 +792 34 training.batch_size 2.0 +792 35 model.embedding_dim 0.0 +792 35 model.scoring_fct_norm 1.0 +792 35 optimizer.lr 0.0019153342328588076 +792 35 negative_sampler.num_negs_per_pos 40.0 +792 35 training.batch_size 2.0 +792 36 model.embedding_dim 0.0 +792 36 model.scoring_fct_norm 2.0 +792 36 optimizer.lr 0.0010647226632486911 +792 36 negative_sampler.num_negs_per_pos 73.0 +792 36 training.batch_size 1.0 +792 37 model.embedding_dim 2.0 +792 37 model.scoring_fct_norm 1.0 +792 37 optimizer.lr 0.011916968135002808 +792 37 negative_sampler.num_negs_per_pos 52.0 +792 37 training.batch_size 1.0 +792 38 model.embedding_dim 2.0 +792 38 model.scoring_fct_norm 2.0 +792 38 optimizer.lr 0.003925763454598406 +792 38 negative_sampler.num_negs_per_pos 79.0 +792 38 training.batch_size 1.0 +792 39 model.embedding_dim 2.0 +792 39 model.scoring_fct_norm 1.0 +792 39 optimizer.lr 0.040337554944639205 +792 39 negative_sampler.num_negs_per_pos 42.0 +792 39 training.batch_size 2.0 +792 40 model.embedding_dim 2.0 +792 40 model.scoring_fct_norm 2.0 +792 40 optimizer.lr 0.03617199207506391 +792 40 negative_sampler.num_negs_per_pos 74.0 +792 40 training.batch_size 2.0 +792 41 model.embedding_dim 0.0 +792 41 model.scoring_fct_norm 2.0 +792 41 optimizer.lr 0.03125379726738825 +792 41 negative_sampler.num_negs_per_pos 86.0 +792 41 training.batch_size 0.0 +792 42 model.embedding_dim 2.0 +792 42 model.scoring_fct_norm 2.0 +792 42 optimizer.lr 0.00535847277730836 +792 42 negative_sampler.num_negs_per_pos 47.0 +792 42 training.batch_size 2.0 +792 43 model.embedding_dim 1.0 +792 43 model.scoring_fct_norm 1.0 +792 43 optimizer.lr 0.0013912228741953837 +792 43 negative_sampler.num_negs_per_pos 37.0 +792 43 training.batch_size 1.0 +792 44 model.embedding_dim 0.0 +792 44 model.scoring_fct_norm 1.0 +792 44 optimizer.lr 0.059012970969131495 +792 44 negative_sampler.num_negs_per_pos 83.0 +792 44 training.batch_size 0.0 +792 1 dataset """fb15k237""" +792 1 model """transe""" +792 1 loss """softplus""" +792 1 regularizer """no""" +792 1 optimizer """adam""" +792 1 training_loop """owa""" +792 1 negative_sampler """basic""" +792 1 evaluator """rankbased""" +792 2 dataset """fb15k237""" +792 2 model """transe""" +792 2 loss """softplus""" +792 2 regularizer """no""" +792 2 optimizer """adam""" +792 2 training_loop """owa""" +792 2 negative_sampler """basic""" +792 2 evaluator """rankbased""" +792 3 dataset """fb15k237""" +792 3 model """transe""" +792 3 loss """softplus""" +792 3 regularizer """no""" +792 3 optimizer """adam""" +792 3 training_loop """owa""" +792 3 negative_sampler """basic""" +792 3 evaluator """rankbased""" +792 4 dataset """fb15k237""" +792 4 model """transe""" +792 4 loss """softplus""" +792 4 regularizer """no""" +792 4 optimizer """adam""" +792 4 training_loop """owa""" +792 4 negative_sampler """basic""" +792 4 evaluator """rankbased""" +792 5 dataset """fb15k237""" +792 5 model """transe""" +792 5 loss """softplus""" +792 5 regularizer """no""" +792 5 optimizer """adam""" +792 5 training_loop """owa""" +792 5 negative_sampler """basic""" +792 5 evaluator """rankbased""" +792 6 dataset """fb15k237""" +792 6 model """transe""" +792 6 loss """softplus""" +792 6 regularizer """no""" +792 6 optimizer """adam""" +792 6 training_loop """owa""" +792 6 negative_sampler """basic""" +792 6 evaluator """rankbased""" +792 7 dataset """fb15k237""" +792 7 model """transe""" +792 7 loss """softplus""" +792 7 regularizer """no""" +792 7 optimizer """adam""" +792 7 training_loop """owa""" +792 7 negative_sampler """basic""" +792 7 evaluator """rankbased""" +792 8 dataset """fb15k237""" +792 8 model """transe""" +792 8 loss """softplus""" +792 8 regularizer """no""" +792 8 optimizer """adam""" +792 8 training_loop """owa""" +792 8 negative_sampler """basic""" +792 8 evaluator """rankbased""" +792 9 dataset """fb15k237""" +792 9 model """transe""" +792 9 loss """softplus""" +792 9 regularizer """no""" +792 9 optimizer """adam""" +792 9 training_loop """owa""" +792 9 negative_sampler """basic""" +792 9 evaluator """rankbased""" +792 10 dataset """fb15k237""" +792 10 model """transe""" +792 10 loss """softplus""" +792 10 regularizer """no""" +792 10 optimizer """adam""" +792 10 training_loop """owa""" +792 10 negative_sampler """basic""" +792 10 evaluator """rankbased""" +792 11 dataset """fb15k237""" +792 11 model """transe""" +792 11 loss """softplus""" +792 11 regularizer """no""" +792 11 optimizer """adam""" +792 11 training_loop """owa""" +792 11 negative_sampler """basic""" +792 11 evaluator """rankbased""" +792 12 dataset """fb15k237""" +792 12 model """transe""" +792 12 loss """softplus""" +792 12 regularizer """no""" +792 12 optimizer """adam""" +792 12 training_loop """owa""" +792 12 negative_sampler """basic""" +792 12 evaluator """rankbased""" +792 13 dataset """fb15k237""" +792 13 model """transe""" +792 13 loss """softplus""" +792 13 regularizer """no""" +792 13 optimizer """adam""" +792 13 training_loop """owa""" +792 13 negative_sampler """basic""" +792 13 evaluator """rankbased""" +792 14 dataset """fb15k237""" +792 14 model """transe""" +792 14 loss """softplus""" +792 14 regularizer """no""" +792 14 optimizer """adam""" +792 14 training_loop """owa""" +792 14 negative_sampler """basic""" +792 14 evaluator """rankbased""" +792 15 dataset """fb15k237""" +792 15 model """transe""" +792 15 loss """softplus""" +792 15 regularizer """no""" +792 15 optimizer """adam""" +792 15 training_loop """owa""" +792 15 negative_sampler """basic""" +792 15 evaluator """rankbased""" +792 16 dataset """fb15k237""" +792 16 model """transe""" +792 16 loss """softplus""" +792 16 regularizer """no""" +792 16 optimizer """adam""" +792 16 training_loop """owa""" +792 16 negative_sampler """basic""" +792 16 evaluator """rankbased""" +792 17 dataset """fb15k237""" +792 17 model """transe""" +792 17 loss """softplus""" +792 17 regularizer """no""" +792 17 optimizer """adam""" +792 17 training_loop """owa""" +792 17 negative_sampler """basic""" +792 17 evaluator """rankbased""" +792 18 dataset """fb15k237""" +792 18 model """transe""" +792 18 loss """softplus""" +792 18 regularizer """no""" +792 18 optimizer """adam""" +792 18 training_loop """owa""" +792 18 negative_sampler """basic""" +792 18 evaluator """rankbased""" +792 19 dataset """fb15k237""" +792 19 model """transe""" +792 19 loss """softplus""" +792 19 regularizer """no""" +792 19 optimizer """adam""" +792 19 training_loop """owa""" +792 19 negative_sampler """basic""" +792 19 evaluator """rankbased""" +792 20 dataset """fb15k237""" +792 20 model """transe""" +792 20 loss """softplus""" +792 20 regularizer """no""" +792 20 optimizer """adam""" +792 20 training_loop """owa""" +792 20 negative_sampler """basic""" +792 20 evaluator """rankbased""" +792 21 dataset """fb15k237""" +792 21 model """transe""" +792 21 loss """softplus""" +792 21 regularizer """no""" +792 21 optimizer """adam""" +792 21 training_loop """owa""" +792 21 negative_sampler """basic""" +792 21 evaluator """rankbased""" +792 22 dataset """fb15k237""" +792 22 model """transe""" +792 22 loss """softplus""" +792 22 regularizer """no""" +792 22 optimizer """adam""" +792 22 training_loop """owa""" +792 22 negative_sampler """basic""" +792 22 evaluator """rankbased""" +792 23 dataset """fb15k237""" +792 23 model """transe""" +792 23 loss """softplus""" +792 23 regularizer """no""" +792 23 optimizer """adam""" +792 23 training_loop """owa""" +792 23 negative_sampler """basic""" +792 23 evaluator """rankbased""" +792 24 dataset """fb15k237""" +792 24 model """transe""" +792 24 loss """softplus""" +792 24 regularizer """no""" +792 24 optimizer """adam""" +792 24 training_loop """owa""" +792 24 negative_sampler """basic""" +792 24 evaluator """rankbased""" +792 25 dataset """fb15k237""" +792 25 model """transe""" +792 25 loss """softplus""" +792 25 regularizer """no""" +792 25 optimizer """adam""" +792 25 training_loop """owa""" +792 25 negative_sampler """basic""" +792 25 evaluator """rankbased""" +792 26 dataset """fb15k237""" +792 26 model """transe""" +792 26 loss """softplus""" +792 26 regularizer """no""" +792 26 optimizer """adam""" +792 26 training_loop """owa""" +792 26 negative_sampler """basic""" +792 26 evaluator """rankbased""" +792 27 dataset """fb15k237""" +792 27 model """transe""" +792 27 loss """softplus""" +792 27 regularizer """no""" +792 27 optimizer """adam""" +792 27 training_loop """owa""" +792 27 negative_sampler """basic""" +792 27 evaluator """rankbased""" +792 28 dataset """fb15k237""" +792 28 model """transe""" +792 28 loss """softplus""" +792 28 regularizer """no""" +792 28 optimizer """adam""" +792 28 training_loop """owa""" +792 28 negative_sampler """basic""" +792 28 evaluator """rankbased""" +792 29 dataset """fb15k237""" +792 29 model """transe""" +792 29 loss """softplus""" +792 29 regularizer """no""" +792 29 optimizer """adam""" +792 29 training_loop """owa""" +792 29 negative_sampler """basic""" +792 29 evaluator """rankbased""" +792 30 dataset """fb15k237""" +792 30 model """transe""" +792 30 loss """softplus""" +792 30 regularizer """no""" +792 30 optimizer """adam""" +792 30 training_loop """owa""" +792 30 negative_sampler """basic""" +792 30 evaluator """rankbased""" +792 31 dataset """fb15k237""" +792 31 model """transe""" +792 31 loss """softplus""" +792 31 regularizer """no""" +792 31 optimizer """adam""" +792 31 training_loop """owa""" +792 31 negative_sampler """basic""" +792 31 evaluator """rankbased""" +792 32 dataset """fb15k237""" +792 32 model """transe""" +792 32 loss """softplus""" +792 32 regularizer """no""" +792 32 optimizer """adam""" +792 32 training_loop """owa""" +792 32 negative_sampler """basic""" +792 32 evaluator """rankbased""" +792 33 dataset """fb15k237""" +792 33 model """transe""" +792 33 loss """softplus""" +792 33 regularizer """no""" +792 33 optimizer """adam""" +792 33 training_loop """owa""" +792 33 negative_sampler """basic""" +792 33 evaluator """rankbased""" +792 34 dataset """fb15k237""" +792 34 model """transe""" +792 34 loss """softplus""" +792 34 regularizer """no""" +792 34 optimizer """adam""" +792 34 training_loop """owa""" +792 34 negative_sampler """basic""" +792 34 evaluator """rankbased""" +792 35 dataset """fb15k237""" +792 35 model """transe""" +792 35 loss """softplus""" +792 35 regularizer """no""" +792 35 optimizer """adam""" +792 35 training_loop """owa""" +792 35 negative_sampler """basic""" +792 35 evaluator """rankbased""" +792 36 dataset """fb15k237""" +792 36 model """transe""" +792 36 loss """softplus""" +792 36 regularizer """no""" +792 36 optimizer """adam""" +792 36 training_loop """owa""" +792 36 negative_sampler """basic""" +792 36 evaluator """rankbased""" +792 37 dataset """fb15k237""" +792 37 model """transe""" +792 37 loss """softplus""" +792 37 regularizer """no""" +792 37 optimizer """adam""" +792 37 training_loop """owa""" +792 37 negative_sampler """basic""" +792 37 evaluator """rankbased""" +792 38 dataset """fb15k237""" +792 38 model """transe""" +792 38 loss """softplus""" +792 38 regularizer """no""" +792 38 optimizer """adam""" +792 38 training_loop """owa""" +792 38 negative_sampler """basic""" +792 38 evaluator """rankbased""" +792 39 dataset """fb15k237""" +792 39 model """transe""" +792 39 loss """softplus""" +792 39 regularizer """no""" +792 39 optimizer """adam""" +792 39 training_loop """owa""" +792 39 negative_sampler """basic""" +792 39 evaluator """rankbased""" +792 40 dataset """fb15k237""" +792 40 model """transe""" +792 40 loss """softplus""" +792 40 regularizer """no""" +792 40 optimizer """adam""" +792 40 training_loop """owa""" +792 40 negative_sampler """basic""" +792 40 evaluator """rankbased""" +792 41 dataset """fb15k237""" +792 41 model """transe""" +792 41 loss """softplus""" +792 41 regularizer """no""" +792 41 optimizer """adam""" +792 41 training_loop """owa""" +792 41 negative_sampler """basic""" +792 41 evaluator """rankbased""" +792 42 dataset """fb15k237""" +792 42 model """transe""" +792 42 loss """softplus""" +792 42 regularizer """no""" +792 42 optimizer """adam""" +792 42 training_loop """owa""" +792 42 negative_sampler """basic""" +792 42 evaluator """rankbased""" +792 43 dataset """fb15k237""" +792 43 model """transe""" +792 43 loss """softplus""" +792 43 regularizer """no""" +792 43 optimizer """adam""" +792 43 training_loop """owa""" +792 43 negative_sampler """basic""" +792 43 evaluator """rankbased""" +792 44 dataset """fb15k237""" +792 44 model """transe""" +792 44 loss """softplus""" +792 44 regularizer """no""" +792 44 optimizer """adam""" +792 44 training_loop """owa""" +792 44 negative_sampler """basic""" +792 44 evaluator """rankbased""" +793 1 model.embedding_dim 1.0 +793 1 model.scoring_fct_norm 1.0 +793 1 optimizer.lr 0.0042163366724353975 +793 1 negative_sampler.num_negs_per_pos 19.0 +793 1 training.batch_size 0.0 +793 2 model.embedding_dim 1.0 +793 2 model.scoring_fct_norm 1.0 +793 2 optimizer.lr 0.012999203168338557 +793 2 negative_sampler.num_negs_per_pos 25.0 +793 2 training.batch_size 1.0 +793 3 model.embedding_dim 2.0 +793 3 model.scoring_fct_norm 1.0 +793 3 optimizer.lr 0.0841000074440536 +793 3 negative_sampler.num_negs_per_pos 67.0 +793 3 training.batch_size 0.0 +793 4 model.embedding_dim 0.0 +793 4 model.scoring_fct_norm 2.0 +793 4 optimizer.lr 0.010383098297065223 +793 4 negative_sampler.num_negs_per_pos 97.0 +793 4 training.batch_size 1.0 +793 5 model.embedding_dim 0.0 +793 5 model.scoring_fct_norm 2.0 +793 5 optimizer.lr 0.09985719055130987 +793 5 negative_sampler.num_negs_per_pos 97.0 +793 5 training.batch_size 1.0 +793 6 model.embedding_dim 0.0 +793 6 model.scoring_fct_norm 2.0 +793 6 optimizer.lr 0.058850678093113604 +793 6 negative_sampler.num_negs_per_pos 66.0 +793 6 training.batch_size 0.0 +793 7 model.embedding_dim 1.0 +793 7 model.scoring_fct_norm 1.0 +793 7 optimizer.lr 0.01725956042726339 +793 7 negative_sampler.num_negs_per_pos 37.0 +793 7 training.batch_size 2.0 +793 8 model.embedding_dim 1.0 +793 8 model.scoring_fct_norm 2.0 +793 8 optimizer.lr 0.027147533001458023 +793 8 negative_sampler.num_negs_per_pos 25.0 +793 8 training.batch_size 2.0 +793 9 model.embedding_dim 1.0 +793 9 model.scoring_fct_norm 2.0 +793 9 optimizer.lr 0.008555741343940219 +793 9 negative_sampler.num_negs_per_pos 19.0 +793 9 training.batch_size 0.0 +793 10 model.embedding_dim 0.0 +793 10 model.scoring_fct_norm 2.0 +793 10 optimizer.lr 0.07575145947990082 +793 10 negative_sampler.num_negs_per_pos 3.0 +793 10 training.batch_size 1.0 +793 11 model.embedding_dim 1.0 +793 11 model.scoring_fct_norm 1.0 +793 11 optimizer.lr 0.018094813196262466 +793 11 negative_sampler.num_negs_per_pos 18.0 +793 11 training.batch_size 2.0 +793 12 model.embedding_dim 2.0 +793 12 model.scoring_fct_norm 1.0 +793 12 optimizer.lr 0.0048128196975208495 +793 12 negative_sampler.num_negs_per_pos 69.0 +793 12 training.batch_size 2.0 +793 13 model.embedding_dim 0.0 +793 13 model.scoring_fct_norm 1.0 +793 13 optimizer.lr 0.045118521232932494 +793 13 negative_sampler.num_negs_per_pos 15.0 +793 13 training.batch_size 2.0 +793 14 model.embedding_dim 2.0 +793 14 model.scoring_fct_norm 2.0 +793 14 optimizer.lr 0.023130368329059327 +793 14 negative_sampler.num_negs_per_pos 48.0 +793 14 training.batch_size 1.0 +793 15 model.embedding_dim 1.0 +793 15 model.scoring_fct_norm 1.0 +793 15 optimizer.lr 0.05245205031119885 +793 15 negative_sampler.num_negs_per_pos 70.0 +793 15 training.batch_size 0.0 +793 16 model.embedding_dim 2.0 +793 16 model.scoring_fct_norm 2.0 +793 16 optimizer.lr 0.0010265909546570068 +793 16 negative_sampler.num_negs_per_pos 45.0 +793 16 training.batch_size 2.0 +793 17 model.embedding_dim 1.0 +793 17 model.scoring_fct_norm 2.0 +793 17 optimizer.lr 0.006741399132264039 +793 17 negative_sampler.num_negs_per_pos 79.0 +793 17 training.batch_size 2.0 +793 18 model.embedding_dim 1.0 +793 18 model.scoring_fct_norm 2.0 +793 18 optimizer.lr 0.07204698271245177 +793 18 negative_sampler.num_negs_per_pos 56.0 +793 18 training.batch_size 1.0 +793 19 model.embedding_dim 2.0 +793 19 model.scoring_fct_norm 1.0 +793 19 optimizer.lr 0.009108348987066035 +793 19 negative_sampler.num_negs_per_pos 0.0 +793 19 training.batch_size 0.0 +793 20 model.embedding_dim 0.0 +793 20 model.scoring_fct_norm 1.0 +793 20 optimizer.lr 0.0016629717839982965 +793 20 negative_sampler.num_negs_per_pos 9.0 +793 20 training.batch_size 1.0 +793 21 model.embedding_dim 2.0 +793 21 model.scoring_fct_norm 2.0 +793 21 optimizer.lr 0.0019536139216425486 +793 21 negative_sampler.num_negs_per_pos 37.0 +793 21 training.batch_size 1.0 +793 22 model.embedding_dim 1.0 +793 22 model.scoring_fct_norm 2.0 +793 22 optimizer.lr 0.0025155496608419703 +793 22 negative_sampler.num_negs_per_pos 80.0 +793 22 training.batch_size 0.0 +793 23 model.embedding_dim 0.0 +793 23 model.scoring_fct_norm 1.0 +793 23 optimizer.lr 0.0430976765474957 +793 23 negative_sampler.num_negs_per_pos 87.0 +793 23 training.batch_size 1.0 +793 24 model.embedding_dim 2.0 +793 24 model.scoring_fct_norm 1.0 +793 24 optimizer.lr 0.00392924497884588 +793 24 negative_sampler.num_negs_per_pos 31.0 +793 24 training.batch_size 0.0 +793 25 model.embedding_dim 1.0 +793 25 model.scoring_fct_norm 2.0 +793 25 optimizer.lr 0.025131411603900793 +793 25 negative_sampler.num_negs_per_pos 27.0 +793 25 training.batch_size 0.0 +793 26 model.embedding_dim 0.0 +793 26 model.scoring_fct_norm 2.0 +793 26 optimizer.lr 0.009870380878984559 +793 26 negative_sampler.num_negs_per_pos 13.0 +793 26 training.batch_size 0.0 +793 27 model.embedding_dim 0.0 +793 27 model.scoring_fct_norm 1.0 +793 27 optimizer.lr 0.04928854376186927 +793 27 negative_sampler.num_negs_per_pos 92.0 +793 27 training.batch_size 0.0 +793 28 model.embedding_dim 1.0 +793 28 model.scoring_fct_norm 2.0 +793 28 optimizer.lr 0.0014200235619682196 +793 28 negative_sampler.num_negs_per_pos 69.0 +793 28 training.batch_size 1.0 +793 29 model.embedding_dim 2.0 +793 29 model.scoring_fct_norm 2.0 +793 29 optimizer.lr 0.004967391307912496 +793 29 negative_sampler.num_negs_per_pos 67.0 +793 29 training.batch_size 1.0 +793 30 model.embedding_dim 0.0 +793 30 model.scoring_fct_norm 2.0 +793 30 optimizer.lr 0.040960774079158095 +793 30 negative_sampler.num_negs_per_pos 6.0 +793 30 training.batch_size 1.0 +793 31 model.embedding_dim 2.0 +793 31 model.scoring_fct_norm 1.0 +793 31 optimizer.lr 0.008231357037849158 +793 31 negative_sampler.num_negs_per_pos 73.0 +793 31 training.batch_size 1.0 +793 32 model.embedding_dim 0.0 +793 32 model.scoring_fct_norm 2.0 +793 32 optimizer.lr 0.002687356422360399 +793 32 negative_sampler.num_negs_per_pos 52.0 +793 32 training.batch_size 0.0 +793 33 model.embedding_dim 0.0 +793 33 model.scoring_fct_norm 1.0 +793 33 optimizer.lr 0.003578176896336627 +793 33 negative_sampler.num_negs_per_pos 56.0 +793 33 training.batch_size 1.0 +793 34 model.embedding_dim 1.0 +793 34 model.scoring_fct_norm 1.0 +793 34 optimizer.lr 0.008426590141832906 +793 34 negative_sampler.num_negs_per_pos 3.0 +793 34 training.batch_size 2.0 +793 35 model.embedding_dim 1.0 +793 35 model.scoring_fct_norm 2.0 +793 35 optimizer.lr 0.0854229196270154 +793 35 negative_sampler.num_negs_per_pos 0.0 +793 35 training.batch_size 0.0 +793 36 model.embedding_dim 0.0 +793 36 model.scoring_fct_norm 1.0 +793 36 optimizer.lr 0.05763583174201258 +793 36 negative_sampler.num_negs_per_pos 66.0 +793 36 training.batch_size 0.0 +793 37 model.embedding_dim 0.0 +793 37 model.scoring_fct_norm 1.0 +793 37 optimizer.lr 0.0022004950423035632 +793 37 negative_sampler.num_negs_per_pos 76.0 +793 37 training.batch_size 0.0 +793 38 model.embedding_dim 2.0 +793 38 model.scoring_fct_norm 2.0 +793 38 optimizer.lr 0.014489033072341104 +793 38 negative_sampler.num_negs_per_pos 48.0 +793 38 training.batch_size 1.0 +793 39 model.embedding_dim 2.0 +793 39 model.scoring_fct_norm 2.0 +793 39 optimizer.lr 0.0040244931472624155 +793 39 negative_sampler.num_negs_per_pos 82.0 +793 39 training.batch_size 1.0 +793 40 model.embedding_dim 2.0 +793 40 model.scoring_fct_norm 1.0 +793 40 optimizer.lr 0.0016974956592895156 +793 40 negative_sampler.num_negs_per_pos 27.0 +793 40 training.batch_size 1.0 +793 41 model.embedding_dim 1.0 +793 41 model.scoring_fct_norm 1.0 +793 41 optimizer.lr 0.0032419474772660273 +793 41 negative_sampler.num_negs_per_pos 87.0 +793 41 training.batch_size 1.0 +793 42 model.embedding_dim 0.0 +793 42 model.scoring_fct_norm 1.0 +793 42 optimizer.lr 0.009772641650104915 +793 42 negative_sampler.num_negs_per_pos 56.0 +793 42 training.batch_size 1.0 +793 43 model.embedding_dim 1.0 +793 43 model.scoring_fct_norm 2.0 +793 43 optimizer.lr 0.002134191327179359 +793 43 negative_sampler.num_negs_per_pos 73.0 +793 43 training.batch_size 0.0 +793 44 model.embedding_dim 1.0 +793 44 model.scoring_fct_norm 2.0 +793 44 optimizer.lr 0.024805622100565302 +793 44 negative_sampler.num_negs_per_pos 70.0 +793 44 training.batch_size 1.0 +793 45 model.embedding_dim 2.0 +793 45 model.scoring_fct_norm 2.0 +793 45 optimizer.lr 0.004201996861374535 +793 45 negative_sampler.num_negs_per_pos 46.0 +793 45 training.batch_size 1.0 +793 46 model.embedding_dim 0.0 +793 46 model.scoring_fct_norm 2.0 +793 46 optimizer.lr 0.008339162531559306 +793 46 negative_sampler.num_negs_per_pos 62.0 +793 46 training.batch_size 1.0 +793 47 model.embedding_dim 1.0 +793 47 model.scoring_fct_norm 1.0 +793 47 optimizer.lr 0.00313476879660112 +793 47 negative_sampler.num_negs_per_pos 91.0 +793 47 training.batch_size 2.0 +793 48 model.embedding_dim 1.0 +793 48 model.scoring_fct_norm 2.0 +793 48 optimizer.lr 0.09923982393303292 +793 48 negative_sampler.num_negs_per_pos 21.0 +793 48 training.batch_size 0.0 +793 49 model.embedding_dim 2.0 +793 49 model.scoring_fct_norm 2.0 +793 49 optimizer.lr 0.0012862760742398938 +793 49 negative_sampler.num_negs_per_pos 18.0 +793 49 training.batch_size 0.0 +793 50 model.embedding_dim 2.0 +793 50 model.scoring_fct_norm 1.0 +793 50 optimizer.lr 0.00930678741014442 +793 50 negative_sampler.num_negs_per_pos 19.0 +793 50 training.batch_size 1.0 +793 51 model.embedding_dim 1.0 +793 51 model.scoring_fct_norm 1.0 +793 51 optimizer.lr 0.0017685081936957955 +793 51 negative_sampler.num_negs_per_pos 58.0 +793 51 training.batch_size 0.0 +793 52 model.embedding_dim 0.0 +793 52 model.scoring_fct_norm 1.0 +793 52 optimizer.lr 0.018796688905453152 +793 52 negative_sampler.num_negs_per_pos 84.0 +793 52 training.batch_size 0.0 +793 53 model.embedding_dim 1.0 +793 53 model.scoring_fct_norm 1.0 +793 53 optimizer.lr 0.025489961353104842 +793 53 negative_sampler.num_negs_per_pos 23.0 +793 53 training.batch_size 2.0 +793 54 model.embedding_dim 0.0 +793 54 model.scoring_fct_norm 1.0 +793 54 optimizer.lr 0.02463001533412959 +793 54 negative_sampler.num_negs_per_pos 61.0 +793 54 training.batch_size 0.0 +793 55 model.embedding_dim 2.0 +793 55 model.scoring_fct_norm 2.0 +793 55 optimizer.lr 0.0013194418116780353 +793 55 negative_sampler.num_negs_per_pos 45.0 +793 55 training.batch_size 2.0 +793 56 model.embedding_dim 0.0 +793 56 model.scoring_fct_norm 2.0 +793 56 optimizer.lr 0.07871805330604857 +793 56 negative_sampler.num_negs_per_pos 45.0 +793 56 training.batch_size 1.0 +793 57 model.embedding_dim 2.0 +793 57 model.scoring_fct_norm 1.0 +793 57 optimizer.lr 0.0025027639759183985 +793 57 negative_sampler.num_negs_per_pos 16.0 +793 57 training.batch_size 1.0 +793 58 model.embedding_dim 2.0 +793 58 model.scoring_fct_norm 2.0 +793 58 optimizer.lr 0.002302097094129657 +793 58 negative_sampler.num_negs_per_pos 1.0 +793 58 training.batch_size 2.0 +793 59 model.embedding_dim 1.0 +793 59 model.scoring_fct_norm 2.0 +793 59 optimizer.lr 0.00234213699483008 +793 59 negative_sampler.num_negs_per_pos 66.0 +793 59 training.batch_size 0.0 +793 60 model.embedding_dim 1.0 +793 60 model.scoring_fct_norm 1.0 +793 60 optimizer.lr 0.01672375213519235 +793 60 negative_sampler.num_negs_per_pos 85.0 +793 60 training.batch_size 1.0 +793 61 model.embedding_dim 2.0 +793 61 model.scoring_fct_norm 2.0 +793 61 optimizer.lr 0.004096801769058982 +793 61 negative_sampler.num_negs_per_pos 97.0 +793 61 training.batch_size 2.0 +793 62 model.embedding_dim 2.0 +793 62 model.scoring_fct_norm 1.0 +793 62 optimizer.lr 0.02282274049010747 +793 62 negative_sampler.num_negs_per_pos 36.0 +793 62 training.batch_size 1.0 +793 63 model.embedding_dim 2.0 +793 63 model.scoring_fct_norm 2.0 +793 63 optimizer.lr 0.0029948566861745618 +793 63 negative_sampler.num_negs_per_pos 9.0 +793 63 training.batch_size 0.0 +793 64 model.embedding_dim 0.0 +793 64 model.scoring_fct_norm 1.0 +793 64 optimizer.lr 0.002882748545401419 +793 64 negative_sampler.num_negs_per_pos 39.0 +793 64 training.batch_size 0.0 +793 65 model.embedding_dim 1.0 +793 65 model.scoring_fct_norm 2.0 +793 65 optimizer.lr 0.0018411077580604515 +793 65 negative_sampler.num_negs_per_pos 55.0 +793 65 training.batch_size 1.0 +793 66 model.embedding_dim 0.0 +793 66 model.scoring_fct_norm 2.0 +793 66 optimizer.lr 0.0012495829613670105 +793 66 negative_sampler.num_negs_per_pos 93.0 +793 66 training.batch_size 0.0 +793 67 model.embedding_dim 0.0 +793 67 model.scoring_fct_norm 2.0 +793 67 optimizer.lr 0.0860993369645198 +793 67 negative_sampler.num_negs_per_pos 0.0 +793 67 training.batch_size 0.0 +793 68 model.embedding_dim 0.0 +793 68 model.scoring_fct_norm 2.0 +793 68 optimizer.lr 0.04619800586221148 +793 68 negative_sampler.num_negs_per_pos 19.0 +793 68 training.batch_size 1.0 +793 69 model.embedding_dim 1.0 +793 69 model.scoring_fct_norm 2.0 +793 69 optimizer.lr 0.0033045346637113372 +793 69 negative_sampler.num_negs_per_pos 30.0 +793 69 training.batch_size 1.0 +793 70 model.embedding_dim 0.0 +793 70 model.scoring_fct_norm 1.0 +793 70 optimizer.lr 0.003327362975819532 +793 70 negative_sampler.num_negs_per_pos 6.0 +793 70 training.batch_size 2.0 +793 71 model.embedding_dim 0.0 +793 71 model.scoring_fct_norm 2.0 +793 71 optimizer.lr 0.030408672037704074 +793 71 negative_sampler.num_negs_per_pos 34.0 +793 71 training.batch_size 1.0 +793 72 model.embedding_dim 0.0 +793 72 model.scoring_fct_norm 2.0 +793 72 optimizer.lr 0.013982095814981402 +793 72 negative_sampler.num_negs_per_pos 35.0 +793 72 training.batch_size 1.0 +793 73 model.embedding_dim 0.0 +793 73 model.scoring_fct_norm 2.0 +793 73 optimizer.lr 0.002740375675046727 +793 73 negative_sampler.num_negs_per_pos 90.0 +793 73 training.batch_size 0.0 +793 74 model.embedding_dim 1.0 +793 74 model.scoring_fct_norm 2.0 +793 74 optimizer.lr 0.0013588259020126882 +793 74 negative_sampler.num_negs_per_pos 61.0 +793 74 training.batch_size 2.0 +793 75 model.embedding_dim 2.0 +793 75 model.scoring_fct_norm 1.0 +793 75 optimizer.lr 0.028610137528827313 +793 75 negative_sampler.num_negs_per_pos 18.0 +793 75 training.batch_size 1.0 +793 76 model.embedding_dim 2.0 +793 76 model.scoring_fct_norm 1.0 +793 76 optimizer.lr 0.006295637913525729 +793 76 negative_sampler.num_negs_per_pos 93.0 +793 76 training.batch_size 0.0 +793 77 model.embedding_dim 1.0 +793 77 model.scoring_fct_norm 1.0 +793 77 optimizer.lr 0.05818775387203868 +793 77 negative_sampler.num_negs_per_pos 61.0 +793 77 training.batch_size 0.0 +793 78 model.embedding_dim 2.0 +793 78 model.scoring_fct_norm 2.0 +793 78 optimizer.lr 0.0266021716971038 +793 78 negative_sampler.num_negs_per_pos 23.0 +793 78 training.batch_size 0.0 +793 79 model.embedding_dim 1.0 +793 79 model.scoring_fct_norm 2.0 +793 79 optimizer.lr 0.04718938052390075 +793 79 negative_sampler.num_negs_per_pos 99.0 +793 79 training.batch_size 0.0 +793 80 model.embedding_dim 2.0 +793 80 model.scoring_fct_norm 1.0 +793 80 optimizer.lr 0.0037497783008810263 +793 80 negative_sampler.num_negs_per_pos 95.0 +793 80 training.batch_size 0.0 +793 81 model.embedding_dim 2.0 +793 81 model.scoring_fct_norm 1.0 +793 81 optimizer.lr 0.0069886841851592 +793 81 negative_sampler.num_negs_per_pos 32.0 +793 81 training.batch_size 1.0 +793 82 model.embedding_dim 2.0 +793 82 model.scoring_fct_norm 1.0 +793 82 optimizer.lr 0.007148097496649678 +793 82 negative_sampler.num_negs_per_pos 10.0 +793 82 training.batch_size 0.0 +793 83 model.embedding_dim 2.0 +793 83 model.scoring_fct_norm 1.0 +793 83 optimizer.lr 0.005994568533733085 +793 83 negative_sampler.num_negs_per_pos 5.0 +793 83 training.batch_size 2.0 +793 84 model.embedding_dim 2.0 +793 84 model.scoring_fct_norm 1.0 +793 84 optimizer.lr 0.003703255944235552 +793 84 negative_sampler.num_negs_per_pos 62.0 +793 84 training.batch_size 0.0 +793 85 model.embedding_dim 2.0 +793 85 model.scoring_fct_norm 1.0 +793 85 optimizer.lr 0.016034045942990145 +793 85 negative_sampler.num_negs_per_pos 48.0 +793 85 training.batch_size 2.0 +793 1 dataset """fb15k237""" +793 1 model """transe""" +793 1 loss """bceaftersigmoid""" +793 1 regularizer """no""" +793 1 optimizer """adam""" +793 1 training_loop """owa""" +793 1 negative_sampler """basic""" +793 1 evaluator """rankbased""" +793 2 dataset """fb15k237""" +793 2 model """transe""" +793 2 loss """bceaftersigmoid""" +793 2 regularizer """no""" +793 2 optimizer """adam""" +793 2 training_loop """owa""" +793 2 negative_sampler """basic""" +793 2 evaluator """rankbased""" +793 3 dataset """fb15k237""" +793 3 model """transe""" +793 3 loss """bceaftersigmoid""" +793 3 regularizer """no""" +793 3 optimizer """adam""" +793 3 training_loop """owa""" +793 3 negative_sampler """basic""" +793 3 evaluator """rankbased""" +793 4 dataset """fb15k237""" +793 4 model """transe""" +793 4 loss """bceaftersigmoid""" +793 4 regularizer """no""" +793 4 optimizer """adam""" +793 4 training_loop """owa""" +793 4 negative_sampler """basic""" +793 4 evaluator """rankbased""" +793 5 dataset """fb15k237""" +793 5 model """transe""" +793 5 loss """bceaftersigmoid""" +793 5 regularizer """no""" +793 5 optimizer """adam""" +793 5 training_loop """owa""" +793 5 negative_sampler """basic""" +793 5 evaluator """rankbased""" +793 6 dataset """fb15k237""" +793 6 model """transe""" +793 6 loss """bceaftersigmoid""" +793 6 regularizer """no""" +793 6 optimizer """adam""" +793 6 training_loop """owa""" +793 6 negative_sampler """basic""" +793 6 evaluator """rankbased""" +793 7 dataset """fb15k237""" +793 7 model """transe""" +793 7 loss """bceaftersigmoid""" +793 7 regularizer """no""" +793 7 optimizer """adam""" +793 7 training_loop """owa""" +793 7 negative_sampler """basic""" +793 7 evaluator """rankbased""" +793 8 dataset """fb15k237""" +793 8 model """transe""" +793 8 loss """bceaftersigmoid""" +793 8 regularizer """no""" +793 8 optimizer """adam""" +793 8 training_loop """owa""" +793 8 negative_sampler """basic""" +793 8 evaluator """rankbased""" +793 9 dataset """fb15k237""" +793 9 model """transe""" +793 9 loss """bceaftersigmoid""" +793 9 regularizer """no""" +793 9 optimizer """adam""" +793 9 training_loop """owa""" +793 9 negative_sampler """basic""" +793 9 evaluator """rankbased""" +793 10 dataset """fb15k237""" +793 10 model """transe""" +793 10 loss """bceaftersigmoid""" +793 10 regularizer """no""" +793 10 optimizer """adam""" +793 10 training_loop """owa""" +793 10 negative_sampler """basic""" +793 10 evaluator """rankbased""" +793 11 dataset """fb15k237""" +793 11 model """transe""" +793 11 loss """bceaftersigmoid""" +793 11 regularizer """no""" +793 11 optimizer """adam""" +793 11 training_loop """owa""" +793 11 negative_sampler """basic""" +793 11 evaluator """rankbased""" +793 12 dataset """fb15k237""" +793 12 model """transe""" +793 12 loss """bceaftersigmoid""" +793 12 regularizer """no""" +793 12 optimizer """adam""" +793 12 training_loop """owa""" +793 12 negative_sampler """basic""" +793 12 evaluator """rankbased""" +793 13 dataset """fb15k237""" +793 13 model """transe""" +793 13 loss """bceaftersigmoid""" +793 13 regularizer """no""" +793 13 optimizer """adam""" +793 13 training_loop """owa""" +793 13 negative_sampler """basic""" +793 13 evaluator """rankbased""" +793 14 dataset """fb15k237""" +793 14 model """transe""" +793 14 loss """bceaftersigmoid""" +793 14 regularizer """no""" +793 14 optimizer """adam""" +793 14 training_loop """owa""" +793 14 negative_sampler """basic""" +793 14 evaluator """rankbased""" +793 15 dataset """fb15k237""" +793 15 model """transe""" +793 15 loss """bceaftersigmoid""" +793 15 regularizer """no""" +793 15 optimizer """adam""" +793 15 training_loop """owa""" +793 15 negative_sampler """basic""" +793 15 evaluator """rankbased""" +793 16 dataset """fb15k237""" +793 16 model """transe""" +793 16 loss """bceaftersigmoid""" +793 16 regularizer """no""" +793 16 optimizer """adam""" +793 16 training_loop """owa""" +793 16 negative_sampler """basic""" +793 16 evaluator """rankbased""" +793 17 dataset """fb15k237""" +793 17 model """transe""" +793 17 loss """bceaftersigmoid""" +793 17 regularizer """no""" +793 17 optimizer """adam""" +793 17 training_loop """owa""" +793 17 negative_sampler """basic""" +793 17 evaluator """rankbased""" +793 18 dataset """fb15k237""" +793 18 model """transe""" +793 18 loss """bceaftersigmoid""" +793 18 regularizer """no""" +793 18 optimizer """adam""" +793 18 training_loop """owa""" +793 18 negative_sampler """basic""" +793 18 evaluator """rankbased""" +793 19 dataset """fb15k237""" +793 19 model """transe""" +793 19 loss """bceaftersigmoid""" +793 19 regularizer """no""" +793 19 optimizer """adam""" +793 19 training_loop """owa""" +793 19 negative_sampler """basic""" +793 19 evaluator """rankbased""" +793 20 dataset """fb15k237""" +793 20 model """transe""" +793 20 loss """bceaftersigmoid""" +793 20 regularizer """no""" +793 20 optimizer """adam""" +793 20 training_loop """owa""" +793 20 negative_sampler """basic""" +793 20 evaluator """rankbased""" +793 21 dataset """fb15k237""" +793 21 model """transe""" +793 21 loss """bceaftersigmoid""" +793 21 regularizer """no""" +793 21 optimizer """adam""" +793 21 training_loop """owa""" +793 21 negative_sampler """basic""" +793 21 evaluator """rankbased""" +793 22 dataset """fb15k237""" +793 22 model """transe""" +793 22 loss """bceaftersigmoid""" +793 22 regularizer """no""" +793 22 optimizer """adam""" +793 22 training_loop """owa""" +793 22 negative_sampler """basic""" +793 22 evaluator """rankbased""" +793 23 dataset """fb15k237""" +793 23 model """transe""" +793 23 loss """bceaftersigmoid""" +793 23 regularizer """no""" +793 23 optimizer """adam""" +793 23 training_loop """owa""" +793 23 negative_sampler """basic""" +793 23 evaluator """rankbased""" +793 24 dataset """fb15k237""" +793 24 model """transe""" +793 24 loss """bceaftersigmoid""" +793 24 regularizer """no""" +793 24 optimizer """adam""" +793 24 training_loop """owa""" +793 24 negative_sampler """basic""" +793 24 evaluator """rankbased""" +793 25 dataset """fb15k237""" +793 25 model """transe""" +793 25 loss """bceaftersigmoid""" +793 25 regularizer """no""" +793 25 optimizer """adam""" +793 25 training_loop """owa""" +793 25 negative_sampler """basic""" +793 25 evaluator """rankbased""" +793 26 dataset """fb15k237""" +793 26 model """transe""" +793 26 loss """bceaftersigmoid""" +793 26 regularizer """no""" +793 26 optimizer """adam""" +793 26 training_loop """owa""" +793 26 negative_sampler """basic""" +793 26 evaluator """rankbased""" +793 27 dataset """fb15k237""" +793 27 model """transe""" +793 27 loss """bceaftersigmoid""" +793 27 regularizer """no""" +793 27 optimizer """adam""" +793 27 training_loop """owa""" +793 27 negative_sampler """basic""" +793 27 evaluator """rankbased""" +793 28 dataset """fb15k237""" +793 28 model """transe""" +793 28 loss """bceaftersigmoid""" +793 28 regularizer """no""" +793 28 optimizer """adam""" +793 28 training_loop """owa""" +793 28 negative_sampler """basic""" +793 28 evaluator """rankbased""" +793 29 dataset """fb15k237""" +793 29 model """transe""" +793 29 loss """bceaftersigmoid""" +793 29 regularizer """no""" +793 29 optimizer """adam""" +793 29 training_loop """owa""" +793 29 negative_sampler """basic""" +793 29 evaluator """rankbased""" +793 30 dataset """fb15k237""" +793 30 model """transe""" +793 30 loss """bceaftersigmoid""" +793 30 regularizer """no""" +793 30 optimizer """adam""" +793 30 training_loop """owa""" +793 30 negative_sampler """basic""" +793 30 evaluator """rankbased""" +793 31 dataset """fb15k237""" +793 31 model """transe""" +793 31 loss """bceaftersigmoid""" +793 31 regularizer """no""" +793 31 optimizer """adam""" +793 31 training_loop """owa""" +793 31 negative_sampler """basic""" +793 31 evaluator """rankbased""" +793 32 dataset """fb15k237""" +793 32 model """transe""" +793 32 loss """bceaftersigmoid""" +793 32 regularizer """no""" +793 32 optimizer """adam""" +793 32 training_loop """owa""" +793 32 negative_sampler """basic""" +793 32 evaluator """rankbased""" +793 33 dataset """fb15k237""" +793 33 model """transe""" +793 33 loss """bceaftersigmoid""" +793 33 regularizer """no""" +793 33 optimizer """adam""" +793 33 training_loop """owa""" +793 33 negative_sampler """basic""" +793 33 evaluator """rankbased""" +793 34 dataset """fb15k237""" +793 34 model """transe""" +793 34 loss """bceaftersigmoid""" +793 34 regularizer """no""" +793 34 optimizer """adam""" +793 34 training_loop """owa""" +793 34 negative_sampler """basic""" +793 34 evaluator """rankbased""" +793 35 dataset """fb15k237""" +793 35 model """transe""" +793 35 loss """bceaftersigmoid""" +793 35 regularizer """no""" +793 35 optimizer """adam""" +793 35 training_loop """owa""" +793 35 negative_sampler """basic""" +793 35 evaluator """rankbased""" +793 36 dataset """fb15k237""" +793 36 model """transe""" +793 36 loss """bceaftersigmoid""" +793 36 regularizer """no""" +793 36 optimizer """adam""" +793 36 training_loop """owa""" +793 36 negative_sampler """basic""" +793 36 evaluator """rankbased""" +793 37 dataset """fb15k237""" +793 37 model """transe""" +793 37 loss """bceaftersigmoid""" +793 37 regularizer """no""" +793 37 optimizer """adam""" +793 37 training_loop """owa""" +793 37 negative_sampler """basic""" +793 37 evaluator """rankbased""" +793 38 dataset """fb15k237""" +793 38 model """transe""" +793 38 loss """bceaftersigmoid""" +793 38 regularizer """no""" +793 38 optimizer """adam""" +793 38 training_loop """owa""" +793 38 negative_sampler """basic""" +793 38 evaluator """rankbased""" +793 39 dataset """fb15k237""" +793 39 model """transe""" +793 39 loss """bceaftersigmoid""" +793 39 regularizer """no""" +793 39 optimizer """adam""" +793 39 training_loop """owa""" +793 39 negative_sampler """basic""" +793 39 evaluator """rankbased""" +793 40 dataset """fb15k237""" +793 40 model """transe""" +793 40 loss """bceaftersigmoid""" +793 40 regularizer """no""" +793 40 optimizer """adam""" +793 40 training_loop """owa""" +793 40 negative_sampler """basic""" +793 40 evaluator """rankbased""" +793 41 dataset """fb15k237""" +793 41 model """transe""" +793 41 loss """bceaftersigmoid""" +793 41 regularizer """no""" +793 41 optimizer """adam""" +793 41 training_loop """owa""" +793 41 negative_sampler """basic""" +793 41 evaluator """rankbased""" +793 42 dataset """fb15k237""" +793 42 model """transe""" +793 42 loss """bceaftersigmoid""" +793 42 regularizer """no""" +793 42 optimizer """adam""" +793 42 training_loop """owa""" +793 42 negative_sampler """basic""" +793 42 evaluator """rankbased""" +793 43 dataset """fb15k237""" +793 43 model """transe""" +793 43 loss """bceaftersigmoid""" +793 43 regularizer """no""" +793 43 optimizer """adam""" +793 43 training_loop """owa""" +793 43 negative_sampler """basic""" +793 43 evaluator """rankbased""" +793 44 dataset """fb15k237""" +793 44 model """transe""" +793 44 loss """bceaftersigmoid""" +793 44 regularizer """no""" +793 44 optimizer """adam""" +793 44 training_loop """owa""" +793 44 negative_sampler """basic""" +793 44 evaluator """rankbased""" +793 45 dataset """fb15k237""" +793 45 model """transe""" +793 45 loss """bceaftersigmoid""" +793 45 regularizer """no""" +793 45 optimizer """adam""" +793 45 training_loop """owa""" +793 45 negative_sampler """basic""" +793 45 evaluator """rankbased""" +793 46 dataset """fb15k237""" +793 46 model """transe""" +793 46 loss """bceaftersigmoid""" +793 46 regularizer """no""" +793 46 optimizer """adam""" +793 46 training_loop """owa""" +793 46 negative_sampler """basic""" +793 46 evaluator """rankbased""" +793 47 dataset """fb15k237""" +793 47 model """transe""" +793 47 loss """bceaftersigmoid""" +793 47 regularizer """no""" +793 47 optimizer """adam""" +793 47 training_loop """owa""" +793 47 negative_sampler """basic""" +793 47 evaluator """rankbased""" +793 48 dataset """fb15k237""" +793 48 model """transe""" +793 48 loss """bceaftersigmoid""" +793 48 regularizer """no""" +793 48 optimizer """adam""" +793 48 training_loop """owa""" +793 48 negative_sampler """basic""" +793 48 evaluator """rankbased""" +793 49 dataset """fb15k237""" +793 49 model """transe""" +793 49 loss """bceaftersigmoid""" +793 49 regularizer """no""" +793 49 optimizer """adam""" +793 49 training_loop """owa""" +793 49 negative_sampler """basic""" +793 49 evaluator """rankbased""" +793 50 dataset """fb15k237""" +793 50 model """transe""" +793 50 loss """bceaftersigmoid""" +793 50 regularizer """no""" +793 50 optimizer """adam""" +793 50 training_loop """owa""" +793 50 negative_sampler """basic""" +793 50 evaluator """rankbased""" +793 51 dataset """fb15k237""" +793 51 model """transe""" +793 51 loss """bceaftersigmoid""" +793 51 regularizer """no""" +793 51 optimizer """adam""" +793 51 training_loop """owa""" +793 51 negative_sampler """basic""" +793 51 evaluator """rankbased""" +793 52 dataset """fb15k237""" +793 52 model """transe""" +793 52 loss """bceaftersigmoid""" +793 52 regularizer """no""" +793 52 optimizer """adam""" +793 52 training_loop """owa""" +793 52 negative_sampler """basic""" +793 52 evaluator """rankbased""" +793 53 dataset """fb15k237""" +793 53 model """transe""" +793 53 loss """bceaftersigmoid""" +793 53 regularizer """no""" +793 53 optimizer """adam""" +793 53 training_loop """owa""" +793 53 negative_sampler """basic""" +793 53 evaluator """rankbased""" +793 54 dataset """fb15k237""" +793 54 model """transe""" +793 54 loss """bceaftersigmoid""" +793 54 regularizer """no""" +793 54 optimizer """adam""" +793 54 training_loop """owa""" +793 54 negative_sampler """basic""" +793 54 evaluator """rankbased""" +793 55 dataset """fb15k237""" +793 55 model """transe""" +793 55 loss """bceaftersigmoid""" +793 55 regularizer """no""" +793 55 optimizer """adam""" +793 55 training_loop """owa""" +793 55 negative_sampler """basic""" +793 55 evaluator """rankbased""" +793 56 dataset """fb15k237""" +793 56 model """transe""" +793 56 loss """bceaftersigmoid""" +793 56 regularizer """no""" +793 56 optimizer """adam""" +793 56 training_loop """owa""" +793 56 negative_sampler """basic""" +793 56 evaluator """rankbased""" +793 57 dataset """fb15k237""" +793 57 model """transe""" +793 57 loss """bceaftersigmoid""" +793 57 regularizer """no""" +793 57 optimizer """adam""" +793 57 training_loop """owa""" +793 57 negative_sampler """basic""" +793 57 evaluator """rankbased""" +793 58 dataset """fb15k237""" +793 58 model """transe""" +793 58 loss """bceaftersigmoid""" +793 58 regularizer """no""" +793 58 optimizer """adam""" +793 58 training_loop """owa""" +793 58 negative_sampler """basic""" +793 58 evaluator """rankbased""" +793 59 dataset """fb15k237""" +793 59 model """transe""" +793 59 loss """bceaftersigmoid""" +793 59 regularizer """no""" +793 59 optimizer """adam""" +793 59 training_loop """owa""" +793 59 negative_sampler """basic""" +793 59 evaluator """rankbased""" +793 60 dataset """fb15k237""" +793 60 model """transe""" +793 60 loss """bceaftersigmoid""" +793 60 regularizer """no""" +793 60 optimizer """adam""" +793 60 training_loop """owa""" +793 60 negative_sampler """basic""" +793 60 evaluator """rankbased""" +793 61 dataset """fb15k237""" +793 61 model """transe""" +793 61 loss """bceaftersigmoid""" +793 61 regularizer """no""" +793 61 optimizer """adam""" +793 61 training_loop """owa""" +793 61 negative_sampler """basic""" +793 61 evaluator """rankbased""" +793 62 dataset """fb15k237""" +793 62 model """transe""" +793 62 loss """bceaftersigmoid""" +793 62 regularizer """no""" +793 62 optimizer """adam""" +793 62 training_loop """owa""" +793 62 negative_sampler """basic""" +793 62 evaluator """rankbased""" +793 63 dataset """fb15k237""" +793 63 model """transe""" +793 63 loss """bceaftersigmoid""" +793 63 regularizer """no""" +793 63 optimizer """adam""" +793 63 training_loop """owa""" +793 63 negative_sampler """basic""" +793 63 evaluator """rankbased""" +793 64 dataset """fb15k237""" +793 64 model """transe""" +793 64 loss """bceaftersigmoid""" +793 64 regularizer """no""" +793 64 optimizer """adam""" +793 64 training_loop """owa""" +793 64 negative_sampler """basic""" +793 64 evaluator """rankbased""" +793 65 dataset """fb15k237""" +793 65 model """transe""" +793 65 loss """bceaftersigmoid""" +793 65 regularizer """no""" +793 65 optimizer """adam""" +793 65 training_loop """owa""" +793 65 negative_sampler """basic""" +793 65 evaluator """rankbased""" +793 66 dataset """fb15k237""" +793 66 model """transe""" +793 66 loss """bceaftersigmoid""" +793 66 regularizer """no""" +793 66 optimizer """adam""" +793 66 training_loop """owa""" +793 66 negative_sampler """basic""" +793 66 evaluator """rankbased""" +793 67 dataset """fb15k237""" +793 67 model """transe""" +793 67 loss """bceaftersigmoid""" +793 67 regularizer """no""" +793 67 optimizer """adam""" +793 67 training_loop """owa""" +793 67 negative_sampler """basic""" +793 67 evaluator """rankbased""" +793 68 dataset """fb15k237""" +793 68 model """transe""" +793 68 loss """bceaftersigmoid""" +793 68 regularizer """no""" +793 68 optimizer """adam""" +793 68 training_loop """owa""" +793 68 negative_sampler """basic""" +793 68 evaluator """rankbased""" +793 69 dataset """fb15k237""" +793 69 model """transe""" +793 69 loss """bceaftersigmoid""" +793 69 regularizer """no""" +793 69 optimizer """adam""" +793 69 training_loop """owa""" +793 69 negative_sampler """basic""" +793 69 evaluator """rankbased""" +793 70 dataset """fb15k237""" +793 70 model """transe""" +793 70 loss """bceaftersigmoid""" +793 70 regularizer """no""" +793 70 optimizer """adam""" +793 70 training_loop """owa""" +793 70 negative_sampler """basic""" +793 70 evaluator """rankbased""" +793 71 dataset """fb15k237""" +793 71 model """transe""" +793 71 loss """bceaftersigmoid""" +793 71 regularizer """no""" +793 71 optimizer """adam""" +793 71 training_loop """owa""" +793 71 negative_sampler """basic""" +793 71 evaluator """rankbased""" +793 72 dataset """fb15k237""" +793 72 model """transe""" +793 72 loss """bceaftersigmoid""" +793 72 regularizer """no""" +793 72 optimizer """adam""" +793 72 training_loop """owa""" +793 72 negative_sampler """basic""" +793 72 evaluator """rankbased""" +793 73 dataset """fb15k237""" +793 73 model """transe""" +793 73 loss """bceaftersigmoid""" +793 73 regularizer """no""" +793 73 optimizer """adam""" +793 73 training_loop """owa""" +793 73 negative_sampler """basic""" +793 73 evaluator """rankbased""" +793 74 dataset """fb15k237""" +793 74 model """transe""" +793 74 loss """bceaftersigmoid""" +793 74 regularizer """no""" +793 74 optimizer """adam""" +793 74 training_loop """owa""" +793 74 negative_sampler """basic""" +793 74 evaluator """rankbased""" +793 75 dataset """fb15k237""" +793 75 model """transe""" +793 75 loss """bceaftersigmoid""" +793 75 regularizer """no""" +793 75 optimizer """adam""" +793 75 training_loop """owa""" +793 75 negative_sampler """basic""" +793 75 evaluator """rankbased""" +793 76 dataset """fb15k237""" +793 76 model """transe""" +793 76 loss """bceaftersigmoid""" +793 76 regularizer """no""" +793 76 optimizer """adam""" +793 76 training_loop """owa""" +793 76 negative_sampler """basic""" +793 76 evaluator """rankbased""" +793 77 dataset """fb15k237""" +793 77 model """transe""" +793 77 loss """bceaftersigmoid""" +793 77 regularizer """no""" +793 77 optimizer """adam""" +793 77 training_loop """owa""" +793 77 negative_sampler """basic""" +793 77 evaluator """rankbased""" +793 78 dataset """fb15k237""" +793 78 model """transe""" +793 78 loss """bceaftersigmoid""" +793 78 regularizer """no""" +793 78 optimizer """adam""" +793 78 training_loop """owa""" +793 78 negative_sampler """basic""" +793 78 evaluator """rankbased""" +793 79 dataset """fb15k237""" +793 79 model """transe""" +793 79 loss """bceaftersigmoid""" +793 79 regularizer """no""" +793 79 optimizer """adam""" +793 79 training_loop """owa""" +793 79 negative_sampler """basic""" +793 79 evaluator """rankbased""" +793 80 dataset """fb15k237""" +793 80 model """transe""" +793 80 loss """bceaftersigmoid""" +793 80 regularizer """no""" +793 80 optimizer """adam""" +793 80 training_loop """owa""" +793 80 negative_sampler """basic""" +793 80 evaluator """rankbased""" +793 81 dataset """fb15k237""" +793 81 model """transe""" +793 81 loss """bceaftersigmoid""" +793 81 regularizer """no""" +793 81 optimizer """adam""" +793 81 training_loop """owa""" +793 81 negative_sampler """basic""" +793 81 evaluator """rankbased""" +793 82 dataset """fb15k237""" +793 82 model """transe""" +793 82 loss """bceaftersigmoid""" +793 82 regularizer """no""" +793 82 optimizer """adam""" +793 82 training_loop """owa""" +793 82 negative_sampler """basic""" +793 82 evaluator """rankbased""" +793 83 dataset """fb15k237""" +793 83 model """transe""" +793 83 loss """bceaftersigmoid""" +793 83 regularizer """no""" +793 83 optimizer """adam""" +793 83 training_loop """owa""" +793 83 negative_sampler """basic""" +793 83 evaluator """rankbased""" +793 84 dataset """fb15k237""" +793 84 model """transe""" +793 84 loss """bceaftersigmoid""" +793 84 regularizer """no""" +793 84 optimizer """adam""" +793 84 training_loop """owa""" +793 84 negative_sampler """basic""" +793 84 evaluator """rankbased""" +793 85 dataset """fb15k237""" +793 85 model """transe""" +793 85 loss """bceaftersigmoid""" +793 85 regularizer """no""" +793 85 optimizer """adam""" +793 85 training_loop """owa""" +793 85 negative_sampler """basic""" +793 85 evaluator """rankbased""" +794 1 model.embedding_dim 1.0 +794 1 model.scoring_fct_norm 2.0 +794 1 optimizer.lr 0.0026547108844451562 +794 1 negative_sampler.num_negs_per_pos 48.0 +794 1 training.batch_size 2.0 +794 2 model.embedding_dim 0.0 +794 2 model.scoring_fct_norm 1.0 +794 2 optimizer.lr 0.0025692928161674274 +794 2 negative_sampler.num_negs_per_pos 51.0 +794 2 training.batch_size 2.0 +794 3 model.embedding_dim 1.0 +794 3 model.scoring_fct_norm 2.0 +794 3 optimizer.lr 0.003536593921188249 +794 3 negative_sampler.num_negs_per_pos 19.0 +794 3 training.batch_size 1.0 +794 4 model.embedding_dim 0.0 +794 4 model.scoring_fct_norm 2.0 +794 4 optimizer.lr 0.02053722201338109 +794 4 negative_sampler.num_negs_per_pos 37.0 +794 4 training.batch_size 1.0 +794 5 model.embedding_dim 0.0 +794 5 model.scoring_fct_norm 1.0 +794 5 optimizer.lr 0.003626784456685818 +794 5 negative_sampler.num_negs_per_pos 36.0 +794 5 training.batch_size 2.0 +794 6 model.embedding_dim 0.0 +794 6 model.scoring_fct_norm 2.0 +794 6 optimizer.lr 0.004522785564061274 +794 6 negative_sampler.num_negs_per_pos 54.0 +794 6 training.batch_size 2.0 +794 7 model.embedding_dim 0.0 +794 7 model.scoring_fct_norm 1.0 +794 7 optimizer.lr 0.015170186758991719 +794 7 negative_sampler.num_negs_per_pos 1.0 +794 7 training.batch_size 0.0 +794 8 model.embedding_dim 0.0 +794 8 model.scoring_fct_norm 2.0 +794 8 optimizer.lr 0.0021525281834587195 +794 8 negative_sampler.num_negs_per_pos 24.0 +794 8 training.batch_size 2.0 +794 9 model.embedding_dim 2.0 +794 9 model.scoring_fct_norm 1.0 +794 9 optimizer.lr 0.02515612503617327 +794 9 negative_sampler.num_negs_per_pos 38.0 +794 9 training.batch_size 0.0 +794 10 model.embedding_dim 2.0 +794 10 model.scoring_fct_norm 1.0 +794 10 optimizer.lr 0.01521855326452068 +794 10 negative_sampler.num_negs_per_pos 46.0 +794 10 training.batch_size 1.0 +794 11 model.embedding_dim 0.0 +794 11 model.scoring_fct_norm 2.0 +794 11 optimizer.lr 0.005505287665441185 +794 11 negative_sampler.num_negs_per_pos 89.0 +794 11 training.batch_size 2.0 +794 12 model.embedding_dim 0.0 +794 12 model.scoring_fct_norm 2.0 +794 12 optimizer.lr 0.028423355136873173 +794 12 negative_sampler.num_negs_per_pos 6.0 +794 12 training.batch_size 1.0 +794 13 model.embedding_dim 2.0 +794 13 model.scoring_fct_norm 1.0 +794 13 optimizer.lr 0.0021475614189548067 +794 13 negative_sampler.num_negs_per_pos 20.0 +794 13 training.batch_size 2.0 +794 14 model.embedding_dim 2.0 +794 14 model.scoring_fct_norm 1.0 +794 14 optimizer.lr 0.0014120939168257513 +794 14 negative_sampler.num_negs_per_pos 86.0 +794 14 training.batch_size 0.0 +794 15 model.embedding_dim 1.0 +794 15 model.scoring_fct_norm 2.0 +794 15 optimizer.lr 0.02292738612970511 +794 15 negative_sampler.num_negs_per_pos 23.0 +794 15 training.batch_size 0.0 +794 16 model.embedding_dim 2.0 +794 16 model.scoring_fct_norm 1.0 +794 16 optimizer.lr 0.07704247922791614 +794 16 negative_sampler.num_negs_per_pos 21.0 +794 16 training.batch_size 0.0 +794 17 model.embedding_dim 2.0 +794 17 model.scoring_fct_norm 2.0 +794 17 optimizer.lr 0.0027488049919365065 +794 17 negative_sampler.num_negs_per_pos 92.0 +794 17 training.batch_size 2.0 +794 18 model.embedding_dim 2.0 +794 18 model.scoring_fct_norm 2.0 +794 18 optimizer.lr 0.0013095301300684499 +794 18 negative_sampler.num_negs_per_pos 35.0 +794 18 training.batch_size 2.0 +794 19 model.embedding_dim 1.0 +794 19 model.scoring_fct_norm 1.0 +794 19 optimizer.lr 0.0420675052960439 +794 19 negative_sampler.num_negs_per_pos 78.0 +794 19 training.batch_size 1.0 +794 20 model.embedding_dim 0.0 +794 20 model.scoring_fct_norm 2.0 +794 20 optimizer.lr 0.016017828377778855 +794 20 negative_sampler.num_negs_per_pos 46.0 +794 20 training.batch_size 1.0 +794 21 model.embedding_dim 1.0 +794 21 model.scoring_fct_norm 1.0 +794 21 optimizer.lr 0.0022879526245491817 +794 21 negative_sampler.num_negs_per_pos 87.0 +794 21 training.batch_size 2.0 +794 22 model.embedding_dim 1.0 +794 22 model.scoring_fct_norm 1.0 +794 22 optimizer.lr 0.0011911433566879463 +794 22 negative_sampler.num_negs_per_pos 81.0 +794 22 training.batch_size 1.0 +794 23 model.embedding_dim 1.0 +794 23 model.scoring_fct_norm 1.0 +794 23 optimizer.lr 0.012323887318442474 +794 23 negative_sampler.num_negs_per_pos 50.0 +794 23 training.batch_size 2.0 +794 24 model.embedding_dim 1.0 +794 24 model.scoring_fct_norm 2.0 +794 24 optimizer.lr 0.008041693506660267 +794 24 negative_sampler.num_negs_per_pos 44.0 +794 24 training.batch_size 1.0 +794 25 model.embedding_dim 2.0 +794 25 model.scoring_fct_norm 2.0 +794 25 optimizer.lr 0.001513687665304469 +794 25 negative_sampler.num_negs_per_pos 22.0 +794 25 training.batch_size 1.0 +794 26 model.embedding_dim 1.0 +794 26 model.scoring_fct_norm 1.0 +794 26 optimizer.lr 0.06621391862277991 +794 26 negative_sampler.num_negs_per_pos 0.0 +794 26 training.batch_size 0.0 +794 27 model.embedding_dim 0.0 +794 27 model.scoring_fct_norm 2.0 +794 27 optimizer.lr 0.003262244212223906 +794 27 negative_sampler.num_negs_per_pos 52.0 +794 27 training.batch_size 1.0 +794 28 model.embedding_dim 0.0 +794 28 model.scoring_fct_norm 1.0 +794 28 optimizer.lr 0.025326291377227892 +794 28 negative_sampler.num_negs_per_pos 72.0 +794 28 training.batch_size 2.0 +794 29 model.embedding_dim 1.0 +794 29 model.scoring_fct_norm 2.0 +794 29 optimizer.lr 0.0011383845818105488 +794 29 negative_sampler.num_negs_per_pos 89.0 +794 29 training.batch_size 2.0 +794 30 model.embedding_dim 1.0 +794 30 model.scoring_fct_norm 2.0 +794 30 optimizer.lr 0.007367782671546626 +794 30 negative_sampler.num_negs_per_pos 42.0 +794 30 training.batch_size 0.0 +794 31 model.embedding_dim 0.0 +794 31 model.scoring_fct_norm 1.0 +794 31 optimizer.lr 0.08890155173707977 +794 31 negative_sampler.num_negs_per_pos 74.0 +794 31 training.batch_size 1.0 +794 32 model.embedding_dim 2.0 +794 32 model.scoring_fct_norm 2.0 +794 32 optimizer.lr 0.00706428523478732 +794 32 negative_sampler.num_negs_per_pos 8.0 +794 32 training.batch_size 0.0 +794 33 model.embedding_dim 1.0 +794 33 model.scoring_fct_norm 1.0 +794 33 optimizer.lr 0.028550421226102955 +794 33 negative_sampler.num_negs_per_pos 58.0 +794 33 training.batch_size 0.0 +794 34 model.embedding_dim 1.0 +794 34 model.scoring_fct_norm 1.0 +794 34 optimizer.lr 0.0017030426206358935 +794 34 negative_sampler.num_negs_per_pos 75.0 +794 34 training.batch_size 1.0 +794 35 model.embedding_dim 0.0 +794 35 model.scoring_fct_norm 2.0 +794 35 optimizer.lr 0.04691432079722269 +794 35 negative_sampler.num_negs_per_pos 72.0 +794 35 training.batch_size 1.0 +794 36 model.embedding_dim 0.0 +794 36 model.scoring_fct_norm 2.0 +794 36 optimizer.lr 0.06335169630517429 +794 36 negative_sampler.num_negs_per_pos 81.0 +794 36 training.batch_size 0.0 +794 37 model.embedding_dim 0.0 +794 37 model.scoring_fct_norm 2.0 +794 37 optimizer.lr 0.0020109697991805026 +794 37 negative_sampler.num_negs_per_pos 95.0 +794 37 training.batch_size 2.0 +794 38 model.embedding_dim 1.0 +794 38 model.scoring_fct_norm 1.0 +794 38 optimizer.lr 0.01422069599620201 +794 38 negative_sampler.num_negs_per_pos 24.0 +794 38 training.batch_size 0.0 +794 39 model.embedding_dim 1.0 +794 39 model.scoring_fct_norm 1.0 +794 39 optimizer.lr 0.012029775939213205 +794 39 negative_sampler.num_negs_per_pos 18.0 +794 39 training.batch_size 1.0 +794 40 model.embedding_dim 1.0 +794 40 model.scoring_fct_norm 2.0 +794 40 optimizer.lr 0.0011024859688390717 +794 40 negative_sampler.num_negs_per_pos 63.0 +794 40 training.batch_size 1.0 +794 41 model.embedding_dim 1.0 +794 41 model.scoring_fct_norm 1.0 +794 41 optimizer.lr 0.0012092366343932509 +794 41 negative_sampler.num_negs_per_pos 19.0 +794 41 training.batch_size 1.0 +794 42 model.embedding_dim 0.0 +794 42 model.scoring_fct_norm 2.0 +794 42 optimizer.lr 0.027271673068824633 +794 42 negative_sampler.num_negs_per_pos 56.0 +794 42 training.batch_size 2.0 +794 43 model.embedding_dim 1.0 +794 43 model.scoring_fct_norm 1.0 +794 43 optimizer.lr 0.0319993333912897 +794 43 negative_sampler.num_negs_per_pos 52.0 +794 43 training.batch_size 1.0 +794 44 model.embedding_dim 0.0 +794 44 model.scoring_fct_norm 1.0 +794 44 optimizer.lr 0.006783998419636872 +794 44 negative_sampler.num_negs_per_pos 29.0 +794 44 training.batch_size 0.0 +794 45 model.embedding_dim 2.0 +794 45 model.scoring_fct_norm 1.0 +794 45 optimizer.lr 0.011620923734810003 +794 45 negative_sampler.num_negs_per_pos 46.0 +794 45 training.batch_size 0.0 +794 46 model.embedding_dim 0.0 +794 46 model.scoring_fct_norm 2.0 +794 46 optimizer.lr 0.011272494511665546 +794 46 negative_sampler.num_negs_per_pos 14.0 +794 46 training.batch_size 0.0 +794 47 model.embedding_dim 0.0 +794 47 model.scoring_fct_norm 1.0 +794 47 optimizer.lr 0.001766845532986542 +794 47 negative_sampler.num_negs_per_pos 76.0 +794 47 training.batch_size 2.0 +794 48 model.embedding_dim 2.0 +794 48 model.scoring_fct_norm 1.0 +794 48 optimizer.lr 0.015202169436464524 +794 48 negative_sampler.num_negs_per_pos 83.0 +794 48 training.batch_size 2.0 +794 49 model.embedding_dim 2.0 +794 49 model.scoring_fct_norm 1.0 +794 49 optimizer.lr 0.019626178554656967 +794 49 negative_sampler.num_negs_per_pos 81.0 +794 49 training.batch_size 2.0 +794 50 model.embedding_dim 0.0 +794 50 model.scoring_fct_norm 1.0 +794 50 optimizer.lr 0.0015435336255520179 +794 50 negative_sampler.num_negs_per_pos 13.0 +794 50 training.batch_size 2.0 +794 51 model.embedding_dim 1.0 +794 51 model.scoring_fct_norm 1.0 +794 51 optimizer.lr 0.028873129208871536 +794 51 negative_sampler.num_negs_per_pos 87.0 +794 51 training.batch_size 1.0 +794 52 model.embedding_dim 2.0 +794 52 model.scoring_fct_norm 2.0 +794 52 optimizer.lr 0.0025912177856407545 +794 52 negative_sampler.num_negs_per_pos 0.0 +794 52 training.batch_size 2.0 +794 53 model.embedding_dim 2.0 +794 53 model.scoring_fct_norm 2.0 +794 53 optimizer.lr 0.0037185308327748047 +794 53 negative_sampler.num_negs_per_pos 30.0 +794 53 training.batch_size 1.0 +794 54 model.embedding_dim 1.0 +794 54 model.scoring_fct_norm 1.0 +794 54 optimizer.lr 0.013720163989437298 +794 54 negative_sampler.num_negs_per_pos 96.0 +794 54 training.batch_size 1.0 +794 55 model.embedding_dim 0.0 +794 55 model.scoring_fct_norm 2.0 +794 55 optimizer.lr 0.0018484613676358042 +794 55 negative_sampler.num_negs_per_pos 18.0 +794 55 training.batch_size 0.0 +794 56 model.embedding_dim 0.0 +794 56 model.scoring_fct_norm 2.0 +794 56 optimizer.lr 0.004230983324484438 +794 56 negative_sampler.num_negs_per_pos 28.0 +794 56 training.batch_size 1.0 +794 57 model.embedding_dim 1.0 +794 57 model.scoring_fct_norm 2.0 +794 57 optimizer.lr 0.00963706260496459 +794 57 negative_sampler.num_negs_per_pos 23.0 +794 57 training.batch_size 1.0 +794 58 model.embedding_dim 0.0 +794 58 model.scoring_fct_norm 1.0 +794 58 optimizer.lr 0.04443169638485827 +794 58 negative_sampler.num_negs_per_pos 97.0 +794 58 training.batch_size 0.0 +794 59 model.embedding_dim 0.0 +794 59 model.scoring_fct_norm 1.0 +794 59 optimizer.lr 0.0020918220613323547 +794 59 negative_sampler.num_negs_per_pos 75.0 +794 59 training.batch_size 2.0 +794 60 model.embedding_dim 1.0 +794 60 model.scoring_fct_norm 2.0 +794 60 optimizer.lr 0.002093594612890463 +794 60 negative_sampler.num_negs_per_pos 23.0 +794 60 training.batch_size 1.0 +794 61 model.embedding_dim 2.0 +794 61 model.scoring_fct_norm 2.0 +794 61 optimizer.lr 0.01583672598736287 +794 61 negative_sampler.num_negs_per_pos 88.0 +794 61 training.batch_size 1.0 +794 62 model.embedding_dim 0.0 +794 62 model.scoring_fct_norm 2.0 +794 62 optimizer.lr 0.015232946877538827 +794 62 negative_sampler.num_negs_per_pos 28.0 +794 62 training.batch_size 0.0 +794 63 model.embedding_dim 2.0 +794 63 model.scoring_fct_norm 2.0 +794 63 optimizer.lr 0.01588549557526308 +794 63 negative_sampler.num_negs_per_pos 16.0 +794 63 training.batch_size 2.0 +794 64 model.embedding_dim 0.0 +794 64 model.scoring_fct_norm 2.0 +794 64 optimizer.lr 0.005603472809848146 +794 64 negative_sampler.num_negs_per_pos 86.0 +794 64 training.batch_size 1.0 +794 65 model.embedding_dim 0.0 +794 65 model.scoring_fct_norm 2.0 +794 65 optimizer.lr 0.09130168806705932 +794 65 negative_sampler.num_negs_per_pos 60.0 +794 65 training.batch_size 2.0 +794 66 model.embedding_dim 0.0 +794 66 model.scoring_fct_norm 1.0 +794 66 optimizer.lr 0.014642488369719302 +794 66 negative_sampler.num_negs_per_pos 44.0 +794 66 training.batch_size 1.0 +794 67 model.embedding_dim 1.0 +794 67 model.scoring_fct_norm 1.0 +794 67 optimizer.lr 0.05247972256658868 +794 67 negative_sampler.num_negs_per_pos 70.0 +794 67 training.batch_size 0.0 +794 68 model.embedding_dim 0.0 +794 68 model.scoring_fct_norm 1.0 +794 68 optimizer.lr 0.0024752068372830747 +794 68 negative_sampler.num_negs_per_pos 68.0 +794 68 training.batch_size 0.0 +794 69 model.embedding_dim 1.0 +794 69 model.scoring_fct_norm 1.0 +794 69 optimizer.lr 0.05052856936139801 +794 69 negative_sampler.num_negs_per_pos 57.0 +794 69 training.batch_size 2.0 +794 70 model.embedding_dim 1.0 +794 70 model.scoring_fct_norm 2.0 +794 70 optimizer.lr 0.0016051413803985003 +794 70 negative_sampler.num_negs_per_pos 28.0 +794 70 training.batch_size 2.0 +794 71 model.embedding_dim 1.0 +794 71 model.scoring_fct_norm 1.0 +794 71 optimizer.lr 0.02978920227727338 +794 71 negative_sampler.num_negs_per_pos 24.0 +794 71 training.batch_size 0.0 +794 72 model.embedding_dim 2.0 +794 72 model.scoring_fct_norm 1.0 +794 72 optimizer.lr 0.04192324778693118 +794 72 negative_sampler.num_negs_per_pos 93.0 +794 72 training.batch_size 2.0 +794 73 model.embedding_dim 0.0 +794 73 model.scoring_fct_norm 2.0 +794 73 optimizer.lr 0.00459380582852189 +794 73 negative_sampler.num_negs_per_pos 9.0 +794 73 training.batch_size 0.0 +794 74 model.embedding_dim 2.0 +794 74 model.scoring_fct_norm 1.0 +794 74 optimizer.lr 0.08541923916756089 +794 74 negative_sampler.num_negs_per_pos 64.0 +794 74 training.batch_size 1.0 +794 75 model.embedding_dim 2.0 +794 75 model.scoring_fct_norm 2.0 +794 75 optimizer.lr 0.002591684602566673 +794 75 negative_sampler.num_negs_per_pos 66.0 +794 75 training.batch_size 1.0 +794 76 model.embedding_dim 0.0 +794 76 model.scoring_fct_norm 2.0 +794 76 optimizer.lr 0.020779282783146974 +794 76 negative_sampler.num_negs_per_pos 32.0 +794 76 training.batch_size 0.0 +794 77 model.embedding_dim 2.0 +794 77 model.scoring_fct_norm 1.0 +794 77 optimizer.lr 0.005680108907072166 +794 77 negative_sampler.num_negs_per_pos 58.0 +794 77 training.batch_size 2.0 +794 78 model.embedding_dim 0.0 +794 78 model.scoring_fct_norm 2.0 +794 78 optimizer.lr 0.04172829601682044 +794 78 negative_sampler.num_negs_per_pos 38.0 +794 78 training.batch_size 0.0 +794 79 model.embedding_dim 2.0 +794 79 model.scoring_fct_norm 2.0 +794 79 optimizer.lr 0.0016398086306281614 +794 79 negative_sampler.num_negs_per_pos 65.0 +794 79 training.batch_size 1.0 +794 80 model.embedding_dim 0.0 +794 80 model.scoring_fct_norm 1.0 +794 80 optimizer.lr 0.02057185739981351 +794 80 negative_sampler.num_negs_per_pos 61.0 +794 80 training.batch_size 2.0 +794 81 model.embedding_dim 2.0 +794 81 model.scoring_fct_norm 1.0 +794 81 optimizer.lr 0.012943876542509882 +794 81 negative_sampler.num_negs_per_pos 70.0 +794 81 training.batch_size 1.0 +794 82 model.embedding_dim 2.0 +794 82 model.scoring_fct_norm 1.0 +794 82 optimizer.lr 0.0374091550010636 +794 82 negative_sampler.num_negs_per_pos 28.0 +794 82 training.batch_size 1.0 +794 83 model.embedding_dim 1.0 +794 83 model.scoring_fct_norm 1.0 +794 83 optimizer.lr 0.0017630103533293722 +794 83 negative_sampler.num_negs_per_pos 23.0 +794 83 training.batch_size 1.0 +794 84 model.embedding_dim 1.0 +794 84 model.scoring_fct_norm 2.0 +794 84 optimizer.lr 0.018293809398296584 +794 84 negative_sampler.num_negs_per_pos 26.0 +794 84 training.batch_size 2.0 +794 1 dataset """fb15k237""" +794 1 model """transe""" +794 1 loss """softplus""" +794 1 regularizer """no""" +794 1 optimizer """adam""" +794 1 training_loop """owa""" +794 1 negative_sampler """basic""" +794 1 evaluator """rankbased""" +794 2 dataset """fb15k237""" +794 2 model """transe""" +794 2 loss """softplus""" +794 2 regularizer """no""" +794 2 optimizer """adam""" +794 2 training_loop """owa""" +794 2 negative_sampler """basic""" +794 2 evaluator """rankbased""" +794 3 dataset """fb15k237""" +794 3 model """transe""" +794 3 loss """softplus""" +794 3 regularizer """no""" +794 3 optimizer """adam""" +794 3 training_loop """owa""" +794 3 negative_sampler """basic""" +794 3 evaluator """rankbased""" +794 4 dataset """fb15k237""" +794 4 model """transe""" +794 4 loss """softplus""" +794 4 regularizer """no""" +794 4 optimizer """adam""" +794 4 training_loop """owa""" +794 4 negative_sampler """basic""" +794 4 evaluator """rankbased""" +794 5 dataset """fb15k237""" +794 5 model """transe""" +794 5 loss """softplus""" +794 5 regularizer """no""" +794 5 optimizer """adam""" +794 5 training_loop """owa""" +794 5 negative_sampler """basic""" +794 5 evaluator """rankbased""" +794 6 dataset """fb15k237""" +794 6 model """transe""" +794 6 loss """softplus""" +794 6 regularizer """no""" +794 6 optimizer """adam""" +794 6 training_loop """owa""" +794 6 negative_sampler """basic""" +794 6 evaluator """rankbased""" +794 7 dataset """fb15k237""" +794 7 model """transe""" +794 7 loss """softplus""" +794 7 regularizer """no""" +794 7 optimizer """adam""" +794 7 training_loop """owa""" +794 7 negative_sampler """basic""" +794 7 evaluator """rankbased""" +794 8 dataset """fb15k237""" +794 8 model """transe""" +794 8 loss """softplus""" +794 8 regularizer """no""" +794 8 optimizer """adam""" +794 8 training_loop """owa""" +794 8 negative_sampler """basic""" +794 8 evaluator """rankbased""" +794 9 dataset """fb15k237""" +794 9 model """transe""" +794 9 loss """softplus""" +794 9 regularizer """no""" +794 9 optimizer """adam""" +794 9 training_loop """owa""" +794 9 negative_sampler """basic""" +794 9 evaluator """rankbased""" +794 10 dataset """fb15k237""" +794 10 model """transe""" +794 10 loss """softplus""" +794 10 regularizer """no""" +794 10 optimizer """adam""" +794 10 training_loop """owa""" +794 10 negative_sampler """basic""" +794 10 evaluator """rankbased""" +794 11 dataset """fb15k237""" +794 11 model """transe""" +794 11 loss """softplus""" +794 11 regularizer """no""" +794 11 optimizer """adam""" +794 11 training_loop """owa""" +794 11 negative_sampler """basic""" +794 11 evaluator """rankbased""" +794 12 dataset """fb15k237""" +794 12 model """transe""" +794 12 loss """softplus""" +794 12 regularizer """no""" +794 12 optimizer """adam""" +794 12 training_loop """owa""" +794 12 negative_sampler """basic""" +794 12 evaluator """rankbased""" +794 13 dataset """fb15k237""" +794 13 model """transe""" +794 13 loss """softplus""" +794 13 regularizer """no""" +794 13 optimizer """adam""" +794 13 training_loop """owa""" +794 13 negative_sampler """basic""" +794 13 evaluator """rankbased""" +794 14 dataset """fb15k237""" +794 14 model """transe""" +794 14 loss """softplus""" +794 14 regularizer """no""" +794 14 optimizer """adam""" +794 14 training_loop """owa""" +794 14 negative_sampler """basic""" +794 14 evaluator """rankbased""" +794 15 dataset """fb15k237""" +794 15 model """transe""" +794 15 loss """softplus""" +794 15 regularizer """no""" +794 15 optimizer """adam""" +794 15 training_loop """owa""" +794 15 negative_sampler """basic""" +794 15 evaluator """rankbased""" +794 16 dataset """fb15k237""" +794 16 model """transe""" +794 16 loss """softplus""" +794 16 regularizer """no""" +794 16 optimizer """adam""" +794 16 training_loop """owa""" +794 16 negative_sampler """basic""" +794 16 evaluator """rankbased""" +794 17 dataset """fb15k237""" +794 17 model """transe""" +794 17 loss """softplus""" +794 17 regularizer """no""" +794 17 optimizer """adam""" +794 17 training_loop """owa""" +794 17 negative_sampler """basic""" +794 17 evaluator """rankbased""" +794 18 dataset """fb15k237""" +794 18 model """transe""" +794 18 loss """softplus""" +794 18 regularizer """no""" +794 18 optimizer """adam""" +794 18 training_loop """owa""" +794 18 negative_sampler """basic""" +794 18 evaluator """rankbased""" +794 19 dataset """fb15k237""" +794 19 model """transe""" +794 19 loss """softplus""" +794 19 regularizer """no""" +794 19 optimizer """adam""" +794 19 training_loop """owa""" +794 19 negative_sampler """basic""" +794 19 evaluator """rankbased""" +794 20 dataset """fb15k237""" +794 20 model """transe""" +794 20 loss """softplus""" +794 20 regularizer """no""" +794 20 optimizer """adam""" +794 20 training_loop """owa""" +794 20 negative_sampler """basic""" +794 20 evaluator """rankbased""" +794 21 dataset """fb15k237""" +794 21 model """transe""" +794 21 loss """softplus""" +794 21 regularizer """no""" +794 21 optimizer """adam""" +794 21 training_loop """owa""" +794 21 negative_sampler """basic""" +794 21 evaluator """rankbased""" +794 22 dataset """fb15k237""" +794 22 model """transe""" +794 22 loss """softplus""" +794 22 regularizer """no""" +794 22 optimizer """adam""" +794 22 training_loop """owa""" +794 22 negative_sampler """basic""" +794 22 evaluator """rankbased""" +794 23 dataset """fb15k237""" +794 23 model """transe""" +794 23 loss """softplus""" +794 23 regularizer """no""" +794 23 optimizer """adam""" +794 23 training_loop """owa""" +794 23 negative_sampler """basic""" +794 23 evaluator """rankbased""" +794 24 dataset """fb15k237""" +794 24 model """transe""" +794 24 loss """softplus""" +794 24 regularizer """no""" +794 24 optimizer """adam""" +794 24 training_loop """owa""" +794 24 negative_sampler """basic""" +794 24 evaluator """rankbased""" +794 25 dataset """fb15k237""" +794 25 model """transe""" +794 25 loss """softplus""" +794 25 regularizer """no""" +794 25 optimizer """adam""" +794 25 training_loop """owa""" +794 25 negative_sampler """basic""" +794 25 evaluator """rankbased""" +794 26 dataset """fb15k237""" +794 26 model """transe""" +794 26 loss """softplus""" +794 26 regularizer """no""" +794 26 optimizer """adam""" +794 26 training_loop """owa""" +794 26 negative_sampler """basic""" +794 26 evaluator """rankbased""" +794 27 dataset """fb15k237""" +794 27 model """transe""" +794 27 loss """softplus""" +794 27 regularizer """no""" +794 27 optimizer """adam""" +794 27 training_loop """owa""" +794 27 negative_sampler """basic""" +794 27 evaluator """rankbased""" +794 28 dataset """fb15k237""" +794 28 model """transe""" +794 28 loss """softplus""" +794 28 regularizer """no""" +794 28 optimizer """adam""" +794 28 training_loop """owa""" +794 28 negative_sampler """basic""" +794 28 evaluator """rankbased""" +794 29 dataset """fb15k237""" +794 29 model """transe""" +794 29 loss """softplus""" +794 29 regularizer """no""" +794 29 optimizer """adam""" +794 29 training_loop """owa""" +794 29 negative_sampler """basic""" +794 29 evaluator """rankbased""" +794 30 dataset """fb15k237""" +794 30 model """transe""" +794 30 loss """softplus""" +794 30 regularizer """no""" +794 30 optimizer """adam""" +794 30 training_loop """owa""" +794 30 negative_sampler """basic""" +794 30 evaluator """rankbased""" +794 31 dataset """fb15k237""" +794 31 model """transe""" +794 31 loss """softplus""" +794 31 regularizer """no""" +794 31 optimizer """adam""" +794 31 training_loop """owa""" +794 31 negative_sampler """basic""" +794 31 evaluator """rankbased""" +794 32 dataset """fb15k237""" +794 32 model """transe""" +794 32 loss """softplus""" +794 32 regularizer """no""" +794 32 optimizer """adam""" +794 32 training_loop """owa""" +794 32 negative_sampler """basic""" +794 32 evaluator """rankbased""" +794 33 dataset """fb15k237""" +794 33 model """transe""" +794 33 loss """softplus""" +794 33 regularizer """no""" +794 33 optimizer """adam""" +794 33 training_loop """owa""" +794 33 negative_sampler """basic""" +794 33 evaluator """rankbased""" +794 34 dataset """fb15k237""" +794 34 model """transe""" +794 34 loss """softplus""" +794 34 regularizer """no""" +794 34 optimizer """adam""" +794 34 training_loop """owa""" +794 34 negative_sampler """basic""" +794 34 evaluator """rankbased""" +794 35 dataset """fb15k237""" +794 35 model """transe""" +794 35 loss """softplus""" +794 35 regularizer """no""" +794 35 optimizer """adam""" +794 35 training_loop """owa""" +794 35 negative_sampler """basic""" +794 35 evaluator """rankbased""" +794 36 dataset """fb15k237""" +794 36 model """transe""" +794 36 loss """softplus""" +794 36 regularizer """no""" +794 36 optimizer """adam""" +794 36 training_loop """owa""" +794 36 negative_sampler """basic""" +794 36 evaluator """rankbased""" +794 37 dataset """fb15k237""" +794 37 model """transe""" +794 37 loss """softplus""" +794 37 regularizer """no""" +794 37 optimizer """adam""" +794 37 training_loop """owa""" +794 37 negative_sampler """basic""" +794 37 evaluator """rankbased""" +794 38 dataset """fb15k237""" +794 38 model """transe""" +794 38 loss """softplus""" +794 38 regularizer """no""" +794 38 optimizer """adam""" +794 38 training_loop """owa""" +794 38 negative_sampler """basic""" +794 38 evaluator """rankbased""" +794 39 dataset """fb15k237""" +794 39 model """transe""" +794 39 loss """softplus""" +794 39 regularizer """no""" +794 39 optimizer """adam""" +794 39 training_loop """owa""" +794 39 negative_sampler """basic""" +794 39 evaluator """rankbased""" +794 40 dataset """fb15k237""" +794 40 model """transe""" +794 40 loss """softplus""" +794 40 regularizer """no""" +794 40 optimizer """adam""" +794 40 training_loop """owa""" +794 40 negative_sampler """basic""" +794 40 evaluator """rankbased""" +794 41 dataset """fb15k237""" +794 41 model """transe""" +794 41 loss """softplus""" +794 41 regularizer """no""" +794 41 optimizer """adam""" +794 41 training_loop """owa""" +794 41 negative_sampler """basic""" +794 41 evaluator """rankbased""" +794 42 dataset """fb15k237""" +794 42 model """transe""" +794 42 loss """softplus""" +794 42 regularizer """no""" +794 42 optimizer """adam""" +794 42 training_loop """owa""" +794 42 negative_sampler """basic""" +794 42 evaluator """rankbased""" +794 43 dataset """fb15k237""" +794 43 model """transe""" +794 43 loss """softplus""" +794 43 regularizer """no""" +794 43 optimizer """adam""" +794 43 training_loop """owa""" +794 43 negative_sampler """basic""" +794 43 evaluator """rankbased""" +794 44 dataset """fb15k237""" +794 44 model """transe""" +794 44 loss """softplus""" +794 44 regularizer """no""" +794 44 optimizer """adam""" +794 44 training_loop """owa""" +794 44 negative_sampler """basic""" +794 44 evaluator """rankbased""" +794 45 dataset """fb15k237""" +794 45 model """transe""" +794 45 loss """softplus""" +794 45 regularizer """no""" +794 45 optimizer """adam""" +794 45 training_loop """owa""" +794 45 negative_sampler """basic""" +794 45 evaluator """rankbased""" +794 46 dataset """fb15k237""" +794 46 model """transe""" +794 46 loss """softplus""" +794 46 regularizer """no""" +794 46 optimizer """adam""" +794 46 training_loop """owa""" +794 46 negative_sampler """basic""" +794 46 evaluator """rankbased""" +794 47 dataset """fb15k237""" +794 47 model """transe""" +794 47 loss """softplus""" +794 47 regularizer """no""" +794 47 optimizer """adam""" +794 47 training_loop """owa""" +794 47 negative_sampler """basic""" +794 47 evaluator """rankbased""" +794 48 dataset """fb15k237""" +794 48 model """transe""" +794 48 loss """softplus""" +794 48 regularizer """no""" +794 48 optimizer """adam""" +794 48 training_loop """owa""" +794 48 negative_sampler """basic""" +794 48 evaluator """rankbased""" +794 49 dataset """fb15k237""" +794 49 model """transe""" +794 49 loss """softplus""" +794 49 regularizer """no""" +794 49 optimizer """adam""" +794 49 training_loop """owa""" +794 49 negative_sampler """basic""" +794 49 evaluator """rankbased""" +794 50 dataset """fb15k237""" +794 50 model """transe""" +794 50 loss """softplus""" +794 50 regularizer """no""" +794 50 optimizer """adam""" +794 50 training_loop """owa""" +794 50 negative_sampler """basic""" +794 50 evaluator """rankbased""" +794 51 dataset """fb15k237""" +794 51 model """transe""" +794 51 loss """softplus""" +794 51 regularizer """no""" +794 51 optimizer """adam""" +794 51 training_loop """owa""" +794 51 negative_sampler """basic""" +794 51 evaluator """rankbased""" +794 52 dataset """fb15k237""" +794 52 model """transe""" +794 52 loss """softplus""" +794 52 regularizer """no""" +794 52 optimizer """adam""" +794 52 training_loop """owa""" +794 52 negative_sampler """basic""" +794 52 evaluator """rankbased""" +794 53 dataset """fb15k237""" +794 53 model """transe""" +794 53 loss """softplus""" +794 53 regularizer """no""" +794 53 optimizer """adam""" +794 53 training_loop """owa""" +794 53 negative_sampler """basic""" +794 53 evaluator """rankbased""" +794 54 dataset """fb15k237""" +794 54 model """transe""" +794 54 loss """softplus""" +794 54 regularizer """no""" +794 54 optimizer """adam""" +794 54 training_loop """owa""" +794 54 negative_sampler """basic""" +794 54 evaluator """rankbased""" +794 55 dataset """fb15k237""" +794 55 model """transe""" +794 55 loss """softplus""" +794 55 regularizer """no""" +794 55 optimizer """adam""" +794 55 training_loop """owa""" +794 55 negative_sampler """basic""" +794 55 evaluator """rankbased""" +794 56 dataset """fb15k237""" +794 56 model """transe""" +794 56 loss """softplus""" +794 56 regularizer """no""" +794 56 optimizer """adam""" +794 56 training_loop """owa""" +794 56 negative_sampler """basic""" +794 56 evaluator """rankbased""" +794 57 dataset """fb15k237""" +794 57 model """transe""" +794 57 loss """softplus""" +794 57 regularizer """no""" +794 57 optimizer """adam""" +794 57 training_loop """owa""" +794 57 negative_sampler """basic""" +794 57 evaluator """rankbased""" +794 58 dataset """fb15k237""" +794 58 model """transe""" +794 58 loss """softplus""" +794 58 regularizer """no""" +794 58 optimizer """adam""" +794 58 training_loop """owa""" +794 58 negative_sampler """basic""" +794 58 evaluator """rankbased""" +794 59 dataset """fb15k237""" +794 59 model """transe""" +794 59 loss """softplus""" +794 59 regularizer """no""" +794 59 optimizer """adam""" +794 59 training_loop """owa""" +794 59 negative_sampler """basic""" +794 59 evaluator """rankbased""" +794 60 dataset """fb15k237""" +794 60 model """transe""" +794 60 loss """softplus""" +794 60 regularizer """no""" +794 60 optimizer """adam""" +794 60 training_loop """owa""" +794 60 negative_sampler """basic""" +794 60 evaluator """rankbased""" +794 61 dataset """fb15k237""" +794 61 model """transe""" +794 61 loss """softplus""" +794 61 regularizer """no""" +794 61 optimizer """adam""" +794 61 training_loop """owa""" +794 61 negative_sampler """basic""" +794 61 evaluator """rankbased""" +794 62 dataset """fb15k237""" +794 62 model """transe""" +794 62 loss """softplus""" +794 62 regularizer """no""" +794 62 optimizer """adam""" +794 62 training_loop """owa""" +794 62 negative_sampler """basic""" +794 62 evaluator """rankbased""" +794 63 dataset """fb15k237""" +794 63 model """transe""" +794 63 loss """softplus""" +794 63 regularizer """no""" +794 63 optimizer """adam""" +794 63 training_loop """owa""" +794 63 negative_sampler """basic""" +794 63 evaluator """rankbased""" +794 64 dataset """fb15k237""" +794 64 model """transe""" +794 64 loss """softplus""" +794 64 regularizer """no""" +794 64 optimizer """adam""" +794 64 training_loop """owa""" +794 64 negative_sampler """basic""" +794 64 evaluator """rankbased""" +794 65 dataset """fb15k237""" +794 65 model """transe""" +794 65 loss """softplus""" +794 65 regularizer """no""" +794 65 optimizer """adam""" +794 65 training_loop """owa""" +794 65 negative_sampler """basic""" +794 65 evaluator """rankbased""" +794 66 dataset """fb15k237""" +794 66 model """transe""" +794 66 loss """softplus""" +794 66 regularizer """no""" +794 66 optimizer """adam""" +794 66 training_loop """owa""" +794 66 negative_sampler """basic""" +794 66 evaluator """rankbased""" +794 67 dataset """fb15k237""" +794 67 model """transe""" +794 67 loss """softplus""" +794 67 regularizer """no""" +794 67 optimizer """adam""" +794 67 training_loop """owa""" +794 67 negative_sampler """basic""" +794 67 evaluator """rankbased""" +794 68 dataset """fb15k237""" +794 68 model """transe""" +794 68 loss """softplus""" +794 68 regularizer """no""" +794 68 optimizer """adam""" +794 68 training_loop """owa""" +794 68 negative_sampler """basic""" +794 68 evaluator """rankbased""" +794 69 dataset """fb15k237""" +794 69 model """transe""" +794 69 loss """softplus""" +794 69 regularizer """no""" +794 69 optimizer """adam""" +794 69 training_loop """owa""" +794 69 negative_sampler """basic""" +794 69 evaluator """rankbased""" +794 70 dataset """fb15k237""" +794 70 model """transe""" +794 70 loss """softplus""" +794 70 regularizer """no""" +794 70 optimizer """adam""" +794 70 training_loop """owa""" +794 70 negative_sampler """basic""" +794 70 evaluator """rankbased""" +794 71 dataset """fb15k237""" +794 71 model """transe""" +794 71 loss """softplus""" +794 71 regularizer """no""" +794 71 optimizer """adam""" +794 71 training_loop """owa""" +794 71 negative_sampler """basic""" +794 71 evaluator """rankbased""" +794 72 dataset """fb15k237""" +794 72 model """transe""" +794 72 loss """softplus""" +794 72 regularizer """no""" +794 72 optimizer """adam""" +794 72 training_loop """owa""" +794 72 negative_sampler """basic""" +794 72 evaluator """rankbased""" +794 73 dataset """fb15k237""" +794 73 model """transe""" +794 73 loss """softplus""" +794 73 regularizer """no""" +794 73 optimizer """adam""" +794 73 training_loop """owa""" +794 73 negative_sampler """basic""" +794 73 evaluator """rankbased""" +794 74 dataset """fb15k237""" +794 74 model """transe""" +794 74 loss """softplus""" +794 74 regularizer """no""" +794 74 optimizer """adam""" +794 74 training_loop """owa""" +794 74 negative_sampler """basic""" +794 74 evaluator """rankbased""" +794 75 dataset """fb15k237""" +794 75 model """transe""" +794 75 loss """softplus""" +794 75 regularizer """no""" +794 75 optimizer """adam""" +794 75 training_loop """owa""" +794 75 negative_sampler """basic""" +794 75 evaluator """rankbased""" +794 76 dataset """fb15k237""" +794 76 model """transe""" +794 76 loss """softplus""" +794 76 regularizer """no""" +794 76 optimizer """adam""" +794 76 training_loop """owa""" +794 76 negative_sampler """basic""" +794 76 evaluator """rankbased""" +794 77 dataset """fb15k237""" +794 77 model """transe""" +794 77 loss """softplus""" +794 77 regularizer """no""" +794 77 optimizer """adam""" +794 77 training_loop """owa""" +794 77 negative_sampler """basic""" +794 77 evaluator """rankbased""" +794 78 dataset """fb15k237""" +794 78 model """transe""" +794 78 loss """softplus""" +794 78 regularizer """no""" +794 78 optimizer """adam""" +794 78 training_loop """owa""" +794 78 negative_sampler """basic""" +794 78 evaluator """rankbased""" +794 79 dataset """fb15k237""" +794 79 model """transe""" +794 79 loss """softplus""" +794 79 regularizer """no""" +794 79 optimizer """adam""" +794 79 training_loop """owa""" +794 79 negative_sampler """basic""" +794 79 evaluator """rankbased""" +794 80 dataset """fb15k237""" +794 80 model """transe""" +794 80 loss """softplus""" +794 80 regularizer """no""" +794 80 optimizer """adam""" +794 80 training_loop """owa""" +794 80 negative_sampler """basic""" +794 80 evaluator """rankbased""" +794 81 dataset """fb15k237""" +794 81 model """transe""" +794 81 loss """softplus""" +794 81 regularizer """no""" +794 81 optimizer """adam""" +794 81 training_loop """owa""" +794 81 negative_sampler """basic""" +794 81 evaluator """rankbased""" +794 82 dataset """fb15k237""" +794 82 model """transe""" +794 82 loss """softplus""" +794 82 regularizer """no""" +794 82 optimizer """adam""" +794 82 training_loop """owa""" +794 82 negative_sampler """basic""" +794 82 evaluator """rankbased""" +794 83 dataset """fb15k237""" +794 83 model """transe""" +794 83 loss """softplus""" +794 83 regularizer """no""" +794 83 optimizer """adam""" +794 83 training_loop """owa""" +794 83 negative_sampler """basic""" +794 83 evaluator """rankbased""" +794 84 dataset """fb15k237""" +794 84 model """transe""" +794 84 loss """softplus""" +794 84 regularizer """no""" +794 84 optimizer """adam""" +794 84 training_loop """owa""" +794 84 negative_sampler """basic""" +794 84 evaluator """rankbased""" +795 1 model.embedding_dim 2.0 +795 1 model.scoring_fct_norm 2.0 +795 1 loss.margin 6.204767587799228 +795 1 optimizer.lr 0.0028271270256175524 +795 1 negative_sampler.num_negs_per_pos 29.0 +795 1 training.batch_size 1.0 +795 2 model.embedding_dim 1.0 +795 2 model.scoring_fct_norm 1.0 +795 2 loss.margin 8.601094342717614 +795 2 optimizer.lr 0.04175091411521486 +795 2 negative_sampler.num_negs_per_pos 5.0 +795 2 training.batch_size 1.0 +795 3 model.embedding_dim 0.0 +795 3 model.scoring_fct_norm 1.0 +795 3 loss.margin 6.335717982584735 +795 3 optimizer.lr 0.07170040012320666 +795 3 negative_sampler.num_negs_per_pos 53.0 +795 3 training.batch_size 2.0 +795 4 model.embedding_dim 1.0 +795 4 model.scoring_fct_norm 1.0 +795 4 loss.margin 3.8714136257398777 +795 4 optimizer.lr 0.0015531064021217394 +795 4 negative_sampler.num_negs_per_pos 26.0 +795 4 training.batch_size 1.0 +795 5 model.embedding_dim 1.0 +795 5 model.scoring_fct_norm 2.0 +795 5 loss.margin 3.5296308140140353 +795 5 optimizer.lr 0.006853386193262446 +795 5 negative_sampler.num_negs_per_pos 52.0 +795 5 training.batch_size 2.0 +795 6 model.embedding_dim 1.0 +795 6 model.scoring_fct_norm 1.0 +795 6 loss.margin 2.6737887173642054 +795 6 optimizer.lr 0.09200707408440191 +795 6 negative_sampler.num_negs_per_pos 95.0 +795 6 training.batch_size 2.0 +795 7 model.embedding_dim 1.0 +795 7 model.scoring_fct_norm 2.0 +795 7 loss.margin 9.241691122242212 +795 7 optimizer.lr 0.0022389059803828142 +795 7 negative_sampler.num_negs_per_pos 81.0 +795 7 training.batch_size 2.0 +795 8 model.embedding_dim 2.0 +795 8 model.scoring_fct_norm 2.0 +795 8 loss.margin 7.824068474395756 +795 8 optimizer.lr 0.02354612090274854 +795 8 negative_sampler.num_negs_per_pos 4.0 +795 8 training.batch_size 1.0 +795 9 model.embedding_dim 1.0 +795 9 model.scoring_fct_norm 1.0 +795 9 loss.margin 6.078489816476001 +795 9 optimizer.lr 0.0022957781082542862 +795 9 negative_sampler.num_negs_per_pos 61.0 +795 9 training.batch_size 0.0 +795 10 model.embedding_dim 0.0 +795 10 model.scoring_fct_norm 2.0 +795 10 loss.margin 7.8693702914465495 +795 10 optimizer.lr 0.00255039570772438 +795 10 negative_sampler.num_negs_per_pos 73.0 +795 10 training.batch_size 0.0 +795 11 model.embedding_dim 1.0 +795 11 model.scoring_fct_norm 1.0 +795 11 loss.margin 4.7398067718936945 +795 11 optimizer.lr 0.0696375556911848 +795 11 negative_sampler.num_negs_per_pos 13.0 +795 11 training.batch_size 1.0 +795 12 model.embedding_dim 2.0 +795 12 model.scoring_fct_norm 1.0 +795 12 loss.margin 6.986103179098777 +795 12 optimizer.lr 0.0065514227920805065 +795 12 negative_sampler.num_negs_per_pos 26.0 +795 12 training.batch_size 0.0 +795 13 model.embedding_dim 0.0 +795 13 model.scoring_fct_norm 1.0 +795 13 loss.margin 8.65610984165705 +795 13 optimizer.lr 0.0010092097991288445 +795 13 negative_sampler.num_negs_per_pos 95.0 +795 13 training.batch_size 2.0 +795 14 model.embedding_dim 1.0 +795 14 model.scoring_fct_norm 1.0 +795 14 loss.margin 9.247376068671356 +795 14 optimizer.lr 0.001994006365815487 +795 14 negative_sampler.num_negs_per_pos 52.0 +795 14 training.batch_size 0.0 +795 15 model.embedding_dim 2.0 +795 15 model.scoring_fct_norm 2.0 +795 15 loss.margin 7.877694416643254 +795 15 optimizer.lr 0.00860024596583916 +795 15 negative_sampler.num_negs_per_pos 18.0 +795 15 training.batch_size 2.0 +795 16 model.embedding_dim 2.0 +795 16 model.scoring_fct_norm 1.0 +795 16 loss.margin 3.2533705761831504 +795 16 optimizer.lr 0.0044815131783761866 +795 16 negative_sampler.num_negs_per_pos 50.0 +795 16 training.batch_size 2.0 +795 17 model.embedding_dim 0.0 +795 17 model.scoring_fct_norm 2.0 +795 17 loss.margin 8.484812708135609 +795 17 optimizer.lr 0.03384891924129323 +795 17 negative_sampler.num_negs_per_pos 2.0 +795 17 training.batch_size 0.0 +795 18 model.embedding_dim 0.0 +795 18 model.scoring_fct_norm 2.0 +795 18 loss.margin 2.7867517925064367 +795 18 optimizer.lr 0.023596336813047767 +795 18 negative_sampler.num_negs_per_pos 90.0 +795 18 training.batch_size 1.0 +795 19 model.embedding_dim 2.0 +795 19 model.scoring_fct_norm 2.0 +795 19 loss.margin 0.6626029685862578 +795 19 optimizer.lr 0.04860271960591781 +795 19 negative_sampler.num_negs_per_pos 15.0 +795 19 training.batch_size 0.0 +795 20 model.embedding_dim 2.0 +795 20 model.scoring_fct_norm 1.0 +795 20 loss.margin 8.330345453289757 +795 20 optimizer.lr 0.004085417960232666 +795 20 negative_sampler.num_negs_per_pos 17.0 +795 20 training.batch_size 2.0 +795 21 model.embedding_dim 2.0 +795 21 model.scoring_fct_norm 2.0 +795 21 loss.margin 9.118701865474033 +795 21 optimizer.lr 0.003911385740895433 +795 21 negative_sampler.num_negs_per_pos 94.0 +795 21 training.batch_size 1.0 +795 22 model.embedding_dim 2.0 +795 22 model.scoring_fct_norm 2.0 +795 22 loss.margin 6.260829173949444 +795 22 optimizer.lr 0.04218778873390823 +795 22 negative_sampler.num_negs_per_pos 85.0 +795 22 training.batch_size 0.0 +795 23 model.embedding_dim 0.0 +795 23 model.scoring_fct_norm 1.0 +795 23 loss.margin 9.581303661021295 +795 23 optimizer.lr 0.004529124662634509 +795 23 negative_sampler.num_negs_per_pos 34.0 +795 23 training.batch_size 2.0 +795 24 model.embedding_dim 1.0 +795 24 model.scoring_fct_norm 1.0 +795 24 loss.margin 1.8555077966289397 +795 24 optimizer.lr 0.02254178487926572 +795 24 negative_sampler.num_negs_per_pos 55.0 +795 24 training.batch_size 1.0 +795 25 model.embedding_dim 1.0 +795 25 model.scoring_fct_norm 2.0 +795 25 loss.margin 7.284260793820981 +795 25 optimizer.lr 0.006653912033172262 +795 25 negative_sampler.num_negs_per_pos 27.0 +795 25 training.batch_size 0.0 +795 26 model.embedding_dim 0.0 +795 26 model.scoring_fct_norm 2.0 +795 26 loss.margin 8.771046880954584 +795 26 optimizer.lr 0.0010367087186278535 +795 26 negative_sampler.num_negs_per_pos 70.0 +795 26 training.batch_size 1.0 +795 27 model.embedding_dim 1.0 +795 27 model.scoring_fct_norm 1.0 +795 27 loss.margin 6.3111960747437985 +795 27 optimizer.lr 0.013875192378830009 +795 27 negative_sampler.num_negs_per_pos 13.0 +795 27 training.batch_size 0.0 +795 28 model.embedding_dim 1.0 +795 28 model.scoring_fct_norm 2.0 +795 28 loss.margin 6.084874370835556 +795 28 optimizer.lr 0.001309035723871488 +795 28 negative_sampler.num_negs_per_pos 69.0 +795 28 training.batch_size 0.0 +795 29 model.embedding_dim 1.0 +795 29 model.scoring_fct_norm 1.0 +795 29 loss.margin 5.638466048845766 +795 29 optimizer.lr 0.09630168618081625 +795 29 negative_sampler.num_negs_per_pos 16.0 +795 29 training.batch_size 1.0 +795 30 model.embedding_dim 0.0 +795 30 model.scoring_fct_norm 1.0 +795 30 loss.margin 5.704417770145979 +795 30 optimizer.lr 0.03981963328935068 +795 30 negative_sampler.num_negs_per_pos 62.0 +795 30 training.batch_size 0.0 +795 31 model.embedding_dim 2.0 +795 31 model.scoring_fct_norm 2.0 +795 31 loss.margin 5.630408687551509 +795 31 optimizer.lr 0.09479513513454378 +795 31 negative_sampler.num_negs_per_pos 26.0 +795 31 training.batch_size 2.0 +795 32 model.embedding_dim 1.0 +795 32 model.scoring_fct_norm 2.0 +795 32 loss.margin 0.9772520202167321 +795 32 optimizer.lr 0.013804777656264299 +795 32 negative_sampler.num_negs_per_pos 16.0 +795 32 training.batch_size 0.0 +795 33 model.embedding_dim 0.0 +795 33 model.scoring_fct_norm 1.0 +795 33 loss.margin 7.433050247040152 +795 33 optimizer.lr 0.0031265595514463193 +795 33 negative_sampler.num_negs_per_pos 17.0 +795 33 training.batch_size 1.0 +795 34 model.embedding_dim 2.0 +795 34 model.scoring_fct_norm 2.0 +795 34 loss.margin 8.9535744016431 +795 34 optimizer.lr 0.04738339473674044 +795 34 negative_sampler.num_negs_per_pos 17.0 +795 34 training.batch_size 1.0 +795 35 model.embedding_dim 1.0 +795 35 model.scoring_fct_norm 2.0 +795 35 loss.margin 3.859008405735003 +795 35 optimizer.lr 0.0018377906512751898 +795 35 negative_sampler.num_negs_per_pos 78.0 +795 35 training.batch_size 2.0 +795 36 model.embedding_dim 2.0 +795 36 model.scoring_fct_norm 2.0 +795 36 loss.margin 2.1315216400900967 +795 36 optimizer.lr 0.002881103793066077 +795 36 negative_sampler.num_negs_per_pos 90.0 +795 36 training.batch_size 0.0 +795 37 model.embedding_dim 1.0 +795 37 model.scoring_fct_norm 1.0 +795 37 loss.margin 6.484197473086945 +795 37 optimizer.lr 0.0097719957237764 +795 37 negative_sampler.num_negs_per_pos 94.0 +795 37 training.batch_size 1.0 +795 1 dataset """fb15k237""" +795 1 model """transe""" +795 1 loss """marginranking""" +795 1 regularizer """no""" +795 1 optimizer """adam""" +795 1 training_loop """owa""" +795 1 negative_sampler """basic""" +795 1 evaluator """rankbased""" +795 2 dataset """fb15k237""" +795 2 model """transe""" +795 2 loss """marginranking""" +795 2 regularizer """no""" +795 2 optimizer """adam""" +795 2 training_loop """owa""" +795 2 negative_sampler """basic""" +795 2 evaluator """rankbased""" +795 3 dataset """fb15k237""" +795 3 model """transe""" +795 3 loss """marginranking""" +795 3 regularizer """no""" +795 3 optimizer """adam""" +795 3 training_loop """owa""" +795 3 negative_sampler """basic""" +795 3 evaluator """rankbased""" +795 4 dataset """fb15k237""" +795 4 model """transe""" +795 4 loss """marginranking""" +795 4 regularizer """no""" +795 4 optimizer """adam""" +795 4 training_loop """owa""" +795 4 negative_sampler """basic""" +795 4 evaluator """rankbased""" +795 5 dataset """fb15k237""" +795 5 model """transe""" +795 5 loss """marginranking""" +795 5 regularizer """no""" +795 5 optimizer """adam""" +795 5 training_loop """owa""" +795 5 negative_sampler """basic""" +795 5 evaluator """rankbased""" +795 6 dataset """fb15k237""" +795 6 model """transe""" +795 6 loss """marginranking""" +795 6 regularizer """no""" +795 6 optimizer """adam""" +795 6 training_loop """owa""" +795 6 negative_sampler """basic""" +795 6 evaluator """rankbased""" +795 7 dataset """fb15k237""" +795 7 model """transe""" +795 7 loss """marginranking""" +795 7 regularizer """no""" +795 7 optimizer """adam""" +795 7 training_loop """owa""" +795 7 negative_sampler """basic""" +795 7 evaluator """rankbased""" +795 8 dataset """fb15k237""" +795 8 model """transe""" +795 8 loss """marginranking""" +795 8 regularizer """no""" +795 8 optimizer """adam""" +795 8 training_loop """owa""" +795 8 negative_sampler """basic""" +795 8 evaluator """rankbased""" +795 9 dataset """fb15k237""" +795 9 model """transe""" +795 9 loss """marginranking""" +795 9 regularizer """no""" +795 9 optimizer """adam""" +795 9 training_loop """owa""" +795 9 negative_sampler """basic""" +795 9 evaluator """rankbased""" +795 10 dataset """fb15k237""" +795 10 model """transe""" +795 10 loss """marginranking""" +795 10 regularizer """no""" +795 10 optimizer """adam""" +795 10 training_loop """owa""" +795 10 negative_sampler """basic""" +795 10 evaluator """rankbased""" +795 11 dataset """fb15k237""" +795 11 model """transe""" +795 11 loss """marginranking""" +795 11 regularizer """no""" +795 11 optimizer """adam""" +795 11 training_loop """owa""" +795 11 negative_sampler """basic""" +795 11 evaluator """rankbased""" +795 12 dataset """fb15k237""" +795 12 model """transe""" +795 12 loss """marginranking""" +795 12 regularizer """no""" +795 12 optimizer """adam""" +795 12 training_loop """owa""" +795 12 negative_sampler """basic""" +795 12 evaluator """rankbased""" +795 13 dataset """fb15k237""" +795 13 model """transe""" +795 13 loss """marginranking""" +795 13 regularizer """no""" +795 13 optimizer """adam""" +795 13 training_loop """owa""" +795 13 negative_sampler """basic""" +795 13 evaluator """rankbased""" +795 14 dataset """fb15k237""" +795 14 model """transe""" +795 14 loss """marginranking""" +795 14 regularizer """no""" +795 14 optimizer """adam""" +795 14 training_loop """owa""" +795 14 negative_sampler """basic""" +795 14 evaluator """rankbased""" +795 15 dataset """fb15k237""" +795 15 model """transe""" +795 15 loss """marginranking""" +795 15 regularizer """no""" +795 15 optimizer """adam""" +795 15 training_loop """owa""" +795 15 negative_sampler """basic""" +795 15 evaluator """rankbased""" +795 16 dataset """fb15k237""" +795 16 model """transe""" +795 16 loss """marginranking""" +795 16 regularizer """no""" +795 16 optimizer """adam""" +795 16 training_loop """owa""" +795 16 negative_sampler """basic""" +795 16 evaluator """rankbased""" +795 17 dataset """fb15k237""" +795 17 model """transe""" +795 17 loss """marginranking""" +795 17 regularizer """no""" +795 17 optimizer """adam""" +795 17 training_loop """owa""" +795 17 negative_sampler """basic""" +795 17 evaluator """rankbased""" +795 18 dataset """fb15k237""" +795 18 model """transe""" +795 18 loss """marginranking""" +795 18 regularizer """no""" +795 18 optimizer """adam""" +795 18 training_loop """owa""" +795 18 negative_sampler """basic""" +795 18 evaluator """rankbased""" +795 19 dataset """fb15k237""" +795 19 model """transe""" +795 19 loss """marginranking""" +795 19 regularizer """no""" +795 19 optimizer """adam""" +795 19 training_loop """owa""" +795 19 negative_sampler """basic""" +795 19 evaluator """rankbased""" +795 20 dataset """fb15k237""" +795 20 model """transe""" +795 20 loss """marginranking""" +795 20 regularizer """no""" +795 20 optimizer """adam""" +795 20 training_loop """owa""" +795 20 negative_sampler """basic""" +795 20 evaluator """rankbased""" +795 21 dataset """fb15k237""" +795 21 model """transe""" +795 21 loss """marginranking""" +795 21 regularizer """no""" +795 21 optimizer """adam""" +795 21 training_loop """owa""" +795 21 negative_sampler """basic""" +795 21 evaluator """rankbased""" +795 22 dataset """fb15k237""" +795 22 model """transe""" +795 22 loss """marginranking""" +795 22 regularizer """no""" +795 22 optimizer """adam""" +795 22 training_loop """owa""" +795 22 negative_sampler """basic""" +795 22 evaluator """rankbased""" +795 23 dataset """fb15k237""" +795 23 model """transe""" +795 23 loss """marginranking""" +795 23 regularizer """no""" +795 23 optimizer """adam""" +795 23 training_loop """owa""" +795 23 negative_sampler """basic""" +795 23 evaluator """rankbased""" +795 24 dataset """fb15k237""" +795 24 model """transe""" +795 24 loss """marginranking""" +795 24 regularizer """no""" +795 24 optimizer """adam""" +795 24 training_loop """owa""" +795 24 negative_sampler """basic""" +795 24 evaluator """rankbased""" +795 25 dataset """fb15k237""" +795 25 model """transe""" +795 25 loss """marginranking""" +795 25 regularizer """no""" +795 25 optimizer """adam""" +795 25 training_loop """owa""" +795 25 negative_sampler """basic""" +795 25 evaluator """rankbased""" +795 26 dataset """fb15k237""" +795 26 model """transe""" +795 26 loss """marginranking""" +795 26 regularizer """no""" +795 26 optimizer """adam""" +795 26 training_loop """owa""" +795 26 negative_sampler """basic""" +795 26 evaluator """rankbased""" +795 27 dataset """fb15k237""" +795 27 model """transe""" +795 27 loss """marginranking""" +795 27 regularizer """no""" +795 27 optimizer """adam""" +795 27 training_loop """owa""" +795 27 negative_sampler """basic""" +795 27 evaluator """rankbased""" +795 28 dataset """fb15k237""" +795 28 model """transe""" +795 28 loss """marginranking""" +795 28 regularizer """no""" +795 28 optimizer """adam""" +795 28 training_loop """owa""" +795 28 negative_sampler """basic""" +795 28 evaluator """rankbased""" +795 29 dataset """fb15k237""" +795 29 model """transe""" +795 29 loss """marginranking""" +795 29 regularizer """no""" +795 29 optimizer """adam""" +795 29 training_loop """owa""" +795 29 negative_sampler """basic""" +795 29 evaluator """rankbased""" +795 30 dataset """fb15k237""" +795 30 model """transe""" +795 30 loss """marginranking""" +795 30 regularizer """no""" +795 30 optimizer """adam""" +795 30 training_loop """owa""" +795 30 negative_sampler """basic""" +795 30 evaluator """rankbased""" +795 31 dataset """fb15k237""" +795 31 model """transe""" +795 31 loss """marginranking""" +795 31 regularizer """no""" +795 31 optimizer """adam""" +795 31 training_loop """owa""" +795 31 negative_sampler """basic""" +795 31 evaluator """rankbased""" +795 32 dataset """fb15k237""" +795 32 model """transe""" +795 32 loss """marginranking""" +795 32 regularizer """no""" +795 32 optimizer """adam""" +795 32 training_loop """owa""" +795 32 negative_sampler """basic""" +795 32 evaluator """rankbased""" +795 33 dataset """fb15k237""" +795 33 model """transe""" +795 33 loss """marginranking""" +795 33 regularizer """no""" +795 33 optimizer """adam""" +795 33 training_loop """owa""" +795 33 negative_sampler """basic""" +795 33 evaluator """rankbased""" +795 34 dataset """fb15k237""" +795 34 model """transe""" +795 34 loss """marginranking""" +795 34 regularizer """no""" +795 34 optimizer """adam""" +795 34 training_loop """owa""" +795 34 negative_sampler """basic""" +795 34 evaluator """rankbased""" +795 35 dataset """fb15k237""" +795 35 model """transe""" +795 35 loss """marginranking""" +795 35 regularizer """no""" +795 35 optimizer """adam""" +795 35 training_loop """owa""" +795 35 negative_sampler """basic""" +795 35 evaluator """rankbased""" +795 36 dataset """fb15k237""" +795 36 model """transe""" +795 36 loss """marginranking""" +795 36 regularizer """no""" +795 36 optimizer """adam""" +795 36 training_loop """owa""" +795 36 negative_sampler """basic""" +795 36 evaluator """rankbased""" +795 37 dataset """fb15k237""" +795 37 model """transe""" +795 37 loss """marginranking""" +795 37 regularizer """no""" +795 37 optimizer """adam""" +795 37 training_loop """owa""" +795 37 negative_sampler """basic""" +795 37 evaluator """rankbased""" +796 1 model.embedding_dim 1.0 +796 1 model.scoring_fct_norm 1.0 +796 1 loss.margin 2.6861406188094135 +796 1 optimizer.lr 0.0011936858534857958 +796 1 negative_sampler.num_negs_per_pos 79.0 +796 1 training.batch_size 2.0 +796 2 model.embedding_dim 2.0 +796 2 model.scoring_fct_norm 2.0 +796 2 loss.margin 5.075693119414476 +796 2 optimizer.lr 0.002508274023883108 +796 2 negative_sampler.num_negs_per_pos 47.0 +796 2 training.batch_size 1.0 +796 3 model.embedding_dim 2.0 +796 3 model.scoring_fct_norm 2.0 +796 3 loss.margin 6.227696031050969 +796 3 optimizer.lr 0.05306161517094671 +796 3 negative_sampler.num_negs_per_pos 92.0 +796 3 training.batch_size 1.0 +796 4 model.embedding_dim 1.0 +796 4 model.scoring_fct_norm 2.0 +796 4 loss.margin 4.2912337239028275 +796 4 optimizer.lr 0.00676446082967399 +796 4 negative_sampler.num_negs_per_pos 27.0 +796 4 training.batch_size 2.0 +796 5 model.embedding_dim 2.0 +796 5 model.scoring_fct_norm 1.0 +796 5 loss.margin 8.2501154987533 +796 5 optimizer.lr 0.08528868090284135 +796 5 negative_sampler.num_negs_per_pos 41.0 +796 5 training.batch_size 2.0 +796 6 model.embedding_dim 2.0 +796 6 model.scoring_fct_norm 2.0 +796 6 loss.margin 3.3784904138163943 +796 6 optimizer.lr 0.023683940554351676 +796 6 negative_sampler.num_negs_per_pos 11.0 +796 6 training.batch_size 0.0 +796 7 model.embedding_dim 0.0 +796 7 model.scoring_fct_norm 2.0 +796 7 loss.margin 7.259062956735917 +796 7 optimizer.lr 0.041127007690095974 +796 7 negative_sampler.num_negs_per_pos 50.0 +796 7 training.batch_size 2.0 +796 8 model.embedding_dim 2.0 +796 8 model.scoring_fct_norm 2.0 +796 8 loss.margin 1.8150696197035645 +796 8 optimizer.lr 0.08340476488134185 +796 8 negative_sampler.num_negs_per_pos 17.0 +796 8 training.batch_size 1.0 +796 9 model.embedding_dim 2.0 +796 9 model.scoring_fct_norm 1.0 +796 9 loss.margin 8.245199747419061 +796 9 optimizer.lr 0.04085711193173013 +796 9 negative_sampler.num_negs_per_pos 15.0 +796 9 training.batch_size 0.0 +796 10 model.embedding_dim 0.0 +796 10 model.scoring_fct_norm 2.0 +796 10 loss.margin 8.14801466248793 +796 10 optimizer.lr 0.005155825624570182 +796 10 negative_sampler.num_negs_per_pos 85.0 +796 10 training.batch_size 0.0 +796 11 model.embedding_dim 0.0 +796 11 model.scoring_fct_norm 2.0 +796 11 loss.margin 5.241299076050715 +796 11 optimizer.lr 0.003805999255726479 +796 11 negative_sampler.num_negs_per_pos 36.0 +796 11 training.batch_size 0.0 +796 12 model.embedding_dim 0.0 +796 12 model.scoring_fct_norm 1.0 +796 12 loss.margin 3.8396581309834104 +796 12 optimizer.lr 0.003543360483934507 +796 12 negative_sampler.num_negs_per_pos 52.0 +796 12 training.batch_size 2.0 +796 13 model.embedding_dim 1.0 +796 13 model.scoring_fct_norm 2.0 +796 13 loss.margin 6.485398663363933 +796 13 optimizer.lr 0.010460188397306258 +796 13 negative_sampler.num_negs_per_pos 90.0 +796 13 training.batch_size 0.0 +796 14 model.embedding_dim 0.0 +796 14 model.scoring_fct_norm 2.0 +796 14 loss.margin 4.897744516082164 +796 14 optimizer.lr 0.007123801269056768 +796 14 negative_sampler.num_negs_per_pos 22.0 +796 14 training.batch_size 1.0 +796 15 model.embedding_dim 2.0 +796 15 model.scoring_fct_norm 1.0 +796 15 loss.margin 7.096359766745495 +796 15 optimizer.lr 0.023183875059337215 +796 15 negative_sampler.num_negs_per_pos 5.0 +796 15 training.batch_size 1.0 +796 16 model.embedding_dim 2.0 +796 16 model.scoring_fct_norm 1.0 +796 16 loss.margin 1.255170589863815 +796 16 optimizer.lr 0.01368729186852972 +796 16 negative_sampler.num_negs_per_pos 98.0 +796 16 training.batch_size 0.0 +796 17 model.embedding_dim 1.0 +796 17 model.scoring_fct_norm 2.0 +796 17 loss.margin 6.2376970549278905 +796 17 optimizer.lr 0.0025080914178039287 +796 17 negative_sampler.num_negs_per_pos 34.0 +796 17 training.batch_size 0.0 +796 18 model.embedding_dim 2.0 +796 18 model.scoring_fct_norm 2.0 +796 18 loss.margin 6.818140057474593 +796 18 optimizer.lr 0.08266927974563062 +796 18 negative_sampler.num_negs_per_pos 60.0 +796 18 training.batch_size 1.0 +796 19 model.embedding_dim 1.0 +796 19 model.scoring_fct_norm 2.0 +796 19 loss.margin 1.347709529597518 +796 19 optimizer.lr 0.009685761827652477 +796 19 negative_sampler.num_negs_per_pos 85.0 +796 19 training.batch_size 1.0 +796 20 model.embedding_dim 1.0 +796 20 model.scoring_fct_norm 1.0 +796 20 loss.margin 2.385783553933879 +796 20 optimizer.lr 0.018471418913452588 +796 20 negative_sampler.num_negs_per_pos 77.0 +796 20 training.batch_size 2.0 +796 21 model.embedding_dim 0.0 +796 21 model.scoring_fct_norm 1.0 +796 21 loss.margin 9.723795054802805 +796 21 optimizer.lr 0.0375992183348211 +796 21 negative_sampler.num_negs_per_pos 35.0 +796 21 training.batch_size 0.0 +796 22 model.embedding_dim 0.0 +796 22 model.scoring_fct_norm 1.0 +796 22 loss.margin 3.255974291086174 +796 22 optimizer.lr 0.0764301445996422 +796 22 negative_sampler.num_negs_per_pos 65.0 +796 22 training.batch_size 0.0 +796 23 model.embedding_dim 0.0 +796 23 model.scoring_fct_norm 2.0 +796 23 loss.margin 6.365100407705369 +796 23 optimizer.lr 0.0016802455090682079 +796 23 negative_sampler.num_negs_per_pos 90.0 +796 23 training.batch_size 0.0 +796 24 model.embedding_dim 1.0 +796 24 model.scoring_fct_norm 1.0 +796 24 loss.margin 6.628419589549495 +796 24 optimizer.lr 0.0023266555939560596 +796 24 negative_sampler.num_negs_per_pos 65.0 +796 24 training.batch_size 1.0 +796 25 model.embedding_dim 1.0 +796 25 model.scoring_fct_norm 1.0 +796 25 loss.margin 4.539444354216664 +796 25 optimizer.lr 0.026720882818265044 +796 25 negative_sampler.num_negs_per_pos 34.0 +796 25 training.batch_size 2.0 +796 26 model.embedding_dim 0.0 +796 26 model.scoring_fct_norm 2.0 +796 26 loss.margin 1.8084566357988032 +796 26 optimizer.lr 0.0019697768713502445 +796 26 negative_sampler.num_negs_per_pos 96.0 +796 26 training.batch_size 1.0 +796 27 model.embedding_dim 2.0 +796 27 model.scoring_fct_norm 1.0 +796 27 loss.margin 3.3564273191562664 +796 27 optimizer.lr 0.03826901819275957 +796 27 negative_sampler.num_negs_per_pos 80.0 +796 27 training.batch_size 2.0 +796 28 model.embedding_dim 1.0 +796 28 model.scoring_fct_norm 2.0 +796 28 loss.margin 4.512586466093297 +796 28 optimizer.lr 0.032163221106572146 +796 28 negative_sampler.num_negs_per_pos 55.0 +796 28 training.batch_size 0.0 +796 29 model.embedding_dim 1.0 +796 29 model.scoring_fct_norm 1.0 +796 29 loss.margin 4.4340375038456 +796 29 optimizer.lr 0.025839907627757036 +796 29 negative_sampler.num_negs_per_pos 41.0 +796 29 training.batch_size 1.0 +796 30 model.embedding_dim 0.0 +796 30 model.scoring_fct_norm 1.0 +796 30 loss.margin 7.562324473840193 +796 30 optimizer.lr 0.001965615248062524 +796 30 negative_sampler.num_negs_per_pos 77.0 +796 30 training.batch_size 0.0 +796 31 model.embedding_dim 2.0 +796 31 model.scoring_fct_norm 2.0 +796 31 loss.margin 8.271008738797157 +796 31 optimizer.lr 0.07324717528079631 +796 31 negative_sampler.num_negs_per_pos 79.0 +796 31 training.batch_size 0.0 +796 32 model.embedding_dim 1.0 +796 32 model.scoring_fct_norm 2.0 +796 32 loss.margin 3.2876492054719435 +796 32 optimizer.lr 0.0024950091802805863 +796 32 negative_sampler.num_negs_per_pos 21.0 +796 32 training.batch_size 1.0 +796 33 model.embedding_dim 0.0 +796 33 model.scoring_fct_norm 1.0 +796 33 loss.margin 5.160295806524705 +796 33 optimizer.lr 0.01061274146578472 +796 33 negative_sampler.num_negs_per_pos 67.0 +796 33 training.batch_size 1.0 +796 34 model.embedding_dim 2.0 +796 34 model.scoring_fct_norm 2.0 +796 34 loss.margin 3.9106344925726635 +796 34 optimizer.lr 0.0017127372507393674 +796 34 negative_sampler.num_negs_per_pos 2.0 +796 34 training.batch_size 1.0 +796 35 model.embedding_dim 2.0 +796 35 model.scoring_fct_norm 1.0 +796 35 loss.margin 2.4395566775388966 +796 35 optimizer.lr 0.022102931437855522 +796 35 negative_sampler.num_negs_per_pos 74.0 +796 35 training.batch_size 2.0 +796 36 model.embedding_dim 2.0 +796 36 model.scoring_fct_norm 1.0 +796 36 loss.margin 2.246300521373011 +796 36 optimizer.lr 0.006260156232472634 +796 36 negative_sampler.num_negs_per_pos 58.0 +796 36 training.batch_size 2.0 +796 37 model.embedding_dim 0.0 +796 37 model.scoring_fct_norm 2.0 +796 37 loss.margin 5.903268510252965 +796 37 optimizer.lr 0.009222228101617599 +796 37 negative_sampler.num_negs_per_pos 62.0 +796 37 training.batch_size 1.0 +796 38 model.embedding_dim 2.0 +796 38 model.scoring_fct_norm 2.0 +796 38 loss.margin 9.43399758052833 +796 38 optimizer.lr 0.0340778712507046 +796 38 negative_sampler.num_negs_per_pos 7.0 +796 38 training.batch_size 0.0 +796 39 model.embedding_dim 1.0 +796 39 model.scoring_fct_norm 1.0 +796 39 loss.margin 2.077259748268928 +796 39 optimizer.lr 0.001024669290173932 +796 39 negative_sampler.num_negs_per_pos 22.0 +796 39 training.batch_size 1.0 +796 40 model.embedding_dim 0.0 +796 40 model.scoring_fct_norm 1.0 +796 40 loss.margin 5.138370818807303 +796 40 optimizer.lr 0.02006262901801613 +796 40 negative_sampler.num_negs_per_pos 13.0 +796 40 training.batch_size 2.0 +796 41 model.embedding_dim 2.0 +796 41 model.scoring_fct_norm 1.0 +796 41 loss.margin 5.554447468865772 +796 41 optimizer.lr 0.0010166092989655905 +796 41 negative_sampler.num_negs_per_pos 26.0 +796 41 training.batch_size 1.0 +796 42 model.embedding_dim 2.0 +796 42 model.scoring_fct_norm 1.0 +796 42 loss.margin 6.931182820191306 +796 42 optimizer.lr 0.005097896215328468 +796 42 negative_sampler.num_negs_per_pos 76.0 +796 42 training.batch_size 1.0 +796 43 model.embedding_dim 1.0 +796 43 model.scoring_fct_norm 1.0 +796 43 loss.margin 1.270374939669458 +796 43 optimizer.lr 0.07237418160110645 +796 43 negative_sampler.num_negs_per_pos 0.0 +796 43 training.batch_size 1.0 +796 44 model.embedding_dim 1.0 +796 44 model.scoring_fct_norm 2.0 +796 44 loss.margin 1.5045258923985318 +796 44 optimizer.lr 0.001169662124377808 +796 44 negative_sampler.num_negs_per_pos 42.0 +796 44 training.batch_size 2.0 +796 45 model.embedding_dim 0.0 +796 45 model.scoring_fct_norm 2.0 +796 45 loss.margin 7.876728500975527 +796 45 optimizer.lr 0.005837227063282995 +796 45 negative_sampler.num_negs_per_pos 13.0 +796 45 training.batch_size 1.0 +796 46 model.embedding_dim 2.0 +796 46 model.scoring_fct_norm 2.0 +796 46 loss.margin 1.36213735148428 +796 46 optimizer.lr 0.005629444703984288 +796 46 negative_sampler.num_negs_per_pos 28.0 +796 46 training.batch_size 0.0 +796 47 model.embedding_dim 0.0 +796 47 model.scoring_fct_norm 1.0 +796 47 loss.margin 6.611204636821774 +796 47 optimizer.lr 0.04672806165824492 +796 47 negative_sampler.num_negs_per_pos 38.0 +796 47 training.batch_size 2.0 +796 48 model.embedding_dim 0.0 +796 48 model.scoring_fct_norm 1.0 +796 48 loss.margin 2.524195734057245 +796 48 optimizer.lr 0.014861149155540824 +796 48 negative_sampler.num_negs_per_pos 36.0 +796 48 training.batch_size 1.0 +796 49 model.embedding_dim 1.0 +796 49 model.scoring_fct_norm 1.0 +796 49 loss.margin 6.662094238650759 +796 49 optimizer.lr 0.011938994184029203 +796 49 negative_sampler.num_negs_per_pos 35.0 +796 49 training.batch_size 2.0 +796 50 model.embedding_dim 1.0 +796 50 model.scoring_fct_norm 1.0 +796 50 loss.margin 7.929963766217732 +796 50 optimizer.lr 0.017771191836330778 +796 50 negative_sampler.num_negs_per_pos 24.0 +796 50 training.batch_size 2.0 +796 51 model.embedding_dim 1.0 +796 51 model.scoring_fct_norm 2.0 +796 51 loss.margin 8.46358488919297 +796 51 optimizer.lr 0.04994041280896878 +796 51 negative_sampler.num_negs_per_pos 79.0 +796 51 training.batch_size 0.0 +796 52 model.embedding_dim 0.0 +796 52 model.scoring_fct_norm 1.0 +796 52 loss.margin 3.832824907498363 +796 52 optimizer.lr 0.015188620779061482 +796 52 negative_sampler.num_negs_per_pos 35.0 +796 52 training.batch_size 0.0 +796 53 model.embedding_dim 0.0 +796 53 model.scoring_fct_norm 2.0 +796 53 loss.margin 5.831511127392224 +796 53 optimizer.lr 0.024160925093169448 +796 53 negative_sampler.num_negs_per_pos 67.0 +796 53 training.batch_size 0.0 +796 54 model.embedding_dim 0.0 +796 54 model.scoring_fct_norm 1.0 +796 54 loss.margin 9.88974032754888 +796 54 optimizer.lr 0.0023106830771674833 +796 54 negative_sampler.num_negs_per_pos 61.0 +796 54 training.batch_size 0.0 +796 55 model.embedding_dim 0.0 +796 55 model.scoring_fct_norm 2.0 +796 55 loss.margin 8.664484431822796 +796 55 optimizer.lr 0.002337758172493496 +796 55 negative_sampler.num_negs_per_pos 40.0 +796 55 training.batch_size 1.0 +796 56 model.embedding_dim 2.0 +796 56 model.scoring_fct_norm 2.0 +796 56 loss.margin 0.6230343720307657 +796 56 optimizer.lr 0.043660066298490445 +796 56 negative_sampler.num_negs_per_pos 29.0 +796 56 training.batch_size 1.0 +796 57 model.embedding_dim 1.0 +796 57 model.scoring_fct_norm 1.0 +796 57 loss.margin 9.523738016364748 +796 57 optimizer.lr 0.002841166329712127 +796 57 negative_sampler.num_negs_per_pos 31.0 +796 57 training.batch_size 1.0 +796 58 model.embedding_dim 0.0 +796 58 model.scoring_fct_norm 2.0 +796 58 loss.margin 3.7064984086828066 +796 58 optimizer.lr 0.025000914462153617 +796 58 negative_sampler.num_negs_per_pos 24.0 +796 58 training.batch_size 1.0 +796 59 model.embedding_dim 0.0 +796 59 model.scoring_fct_norm 1.0 +796 59 loss.margin 1.232511902836404 +796 59 optimizer.lr 0.001250837917830573 +796 59 negative_sampler.num_negs_per_pos 17.0 +796 59 training.batch_size 0.0 +796 60 model.embedding_dim 2.0 +796 60 model.scoring_fct_norm 2.0 +796 60 loss.margin 7.353513863735534 +796 60 optimizer.lr 0.0031389157527937353 +796 60 negative_sampler.num_negs_per_pos 1.0 +796 60 training.batch_size 0.0 +796 61 model.embedding_dim 0.0 +796 61 model.scoring_fct_norm 2.0 +796 61 loss.margin 0.9273012747204148 +796 61 optimizer.lr 0.05131326982947715 +796 61 negative_sampler.num_negs_per_pos 27.0 +796 61 training.batch_size 0.0 +796 62 model.embedding_dim 1.0 +796 62 model.scoring_fct_norm 2.0 +796 62 loss.margin 1.2978833119273498 +796 62 optimizer.lr 0.002064883255943833 +796 62 negative_sampler.num_negs_per_pos 68.0 +796 62 training.batch_size 0.0 +796 63 model.embedding_dim 2.0 +796 63 model.scoring_fct_norm 1.0 +796 63 loss.margin 5.206177358842874 +796 63 optimizer.lr 0.0480257366235864 +796 63 negative_sampler.num_negs_per_pos 10.0 +796 63 training.batch_size 1.0 +796 64 model.embedding_dim 1.0 +796 64 model.scoring_fct_norm 2.0 +796 64 loss.margin 2.922796416771786 +796 64 optimizer.lr 0.006176935327850202 +796 64 negative_sampler.num_negs_per_pos 47.0 +796 64 training.batch_size 2.0 +796 65 model.embedding_dim 1.0 +796 65 model.scoring_fct_norm 2.0 +796 65 loss.margin 2.029465488964488 +796 65 optimizer.lr 0.0018846841142739679 +796 65 negative_sampler.num_negs_per_pos 49.0 +796 65 training.batch_size 2.0 +796 66 model.embedding_dim 0.0 +796 66 model.scoring_fct_norm 1.0 +796 66 loss.margin 6.698247184981409 +796 66 optimizer.lr 0.006990492860697387 +796 66 negative_sampler.num_negs_per_pos 60.0 +796 66 training.batch_size 0.0 +796 67 model.embedding_dim 1.0 +796 67 model.scoring_fct_norm 2.0 +796 67 loss.margin 4.376177369068011 +796 67 optimizer.lr 0.005963384124377635 +796 67 negative_sampler.num_negs_per_pos 14.0 +796 67 training.batch_size 0.0 +796 68 model.embedding_dim 0.0 +796 68 model.scoring_fct_norm 1.0 +796 68 loss.margin 8.406461849293313 +796 68 optimizer.lr 0.05790901201711178 +796 68 negative_sampler.num_negs_per_pos 94.0 +796 68 training.batch_size 2.0 +796 69 model.embedding_dim 0.0 +796 69 model.scoring_fct_norm 1.0 +796 69 loss.margin 5.680237570822337 +796 69 optimizer.lr 0.002519871223633358 +796 69 negative_sampler.num_negs_per_pos 17.0 +796 69 training.batch_size 2.0 +796 70 model.embedding_dim 0.0 +796 70 model.scoring_fct_norm 2.0 +796 70 loss.margin 9.6010701499221 +796 70 optimizer.lr 0.005657418347726788 +796 70 negative_sampler.num_negs_per_pos 31.0 +796 70 training.batch_size 0.0 +796 1 dataset """fb15k237""" +796 1 model """transe""" +796 1 loss """marginranking""" +796 1 regularizer """no""" +796 1 optimizer """adam""" +796 1 training_loop """owa""" +796 1 negative_sampler """basic""" +796 1 evaluator """rankbased""" +796 2 dataset """fb15k237""" +796 2 model """transe""" +796 2 loss """marginranking""" +796 2 regularizer """no""" +796 2 optimizer """adam""" +796 2 training_loop """owa""" +796 2 negative_sampler """basic""" +796 2 evaluator """rankbased""" +796 3 dataset """fb15k237""" +796 3 model """transe""" +796 3 loss """marginranking""" +796 3 regularizer """no""" +796 3 optimizer """adam""" +796 3 training_loop """owa""" +796 3 negative_sampler """basic""" +796 3 evaluator """rankbased""" +796 4 dataset """fb15k237""" +796 4 model """transe""" +796 4 loss """marginranking""" +796 4 regularizer """no""" +796 4 optimizer """adam""" +796 4 training_loop """owa""" +796 4 negative_sampler """basic""" +796 4 evaluator """rankbased""" +796 5 dataset """fb15k237""" +796 5 model """transe""" +796 5 loss """marginranking""" +796 5 regularizer """no""" +796 5 optimizer """adam""" +796 5 training_loop """owa""" +796 5 negative_sampler """basic""" +796 5 evaluator """rankbased""" +796 6 dataset """fb15k237""" +796 6 model """transe""" +796 6 loss """marginranking""" +796 6 regularizer """no""" +796 6 optimizer """adam""" +796 6 training_loop """owa""" +796 6 negative_sampler """basic""" +796 6 evaluator """rankbased""" +796 7 dataset """fb15k237""" +796 7 model """transe""" +796 7 loss """marginranking""" +796 7 regularizer """no""" +796 7 optimizer """adam""" +796 7 training_loop """owa""" +796 7 negative_sampler """basic""" +796 7 evaluator """rankbased""" +796 8 dataset """fb15k237""" +796 8 model """transe""" +796 8 loss """marginranking""" +796 8 regularizer """no""" +796 8 optimizer """adam""" +796 8 training_loop """owa""" +796 8 negative_sampler """basic""" +796 8 evaluator """rankbased""" +796 9 dataset """fb15k237""" +796 9 model """transe""" +796 9 loss """marginranking""" +796 9 regularizer """no""" +796 9 optimizer """adam""" +796 9 training_loop """owa""" +796 9 negative_sampler """basic""" +796 9 evaluator """rankbased""" +796 10 dataset """fb15k237""" +796 10 model """transe""" +796 10 loss """marginranking""" +796 10 regularizer """no""" +796 10 optimizer """adam""" +796 10 training_loop """owa""" +796 10 negative_sampler """basic""" +796 10 evaluator """rankbased""" +796 11 dataset """fb15k237""" +796 11 model """transe""" +796 11 loss """marginranking""" +796 11 regularizer """no""" +796 11 optimizer """adam""" +796 11 training_loop """owa""" +796 11 negative_sampler """basic""" +796 11 evaluator """rankbased""" +796 12 dataset """fb15k237""" +796 12 model """transe""" +796 12 loss """marginranking""" +796 12 regularizer """no""" +796 12 optimizer """adam""" +796 12 training_loop """owa""" +796 12 negative_sampler """basic""" +796 12 evaluator """rankbased""" +796 13 dataset """fb15k237""" +796 13 model """transe""" +796 13 loss """marginranking""" +796 13 regularizer """no""" +796 13 optimizer """adam""" +796 13 training_loop """owa""" +796 13 negative_sampler """basic""" +796 13 evaluator """rankbased""" +796 14 dataset """fb15k237""" +796 14 model """transe""" +796 14 loss """marginranking""" +796 14 regularizer """no""" +796 14 optimizer """adam""" +796 14 training_loop """owa""" +796 14 negative_sampler """basic""" +796 14 evaluator """rankbased""" +796 15 dataset """fb15k237""" +796 15 model """transe""" +796 15 loss """marginranking""" +796 15 regularizer """no""" +796 15 optimizer """adam""" +796 15 training_loop """owa""" +796 15 negative_sampler """basic""" +796 15 evaluator """rankbased""" +796 16 dataset """fb15k237""" +796 16 model """transe""" +796 16 loss """marginranking""" +796 16 regularizer """no""" +796 16 optimizer """adam""" +796 16 training_loop """owa""" +796 16 negative_sampler """basic""" +796 16 evaluator """rankbased""" +796 17 dataset """fb15k237""" +796 17 model """transe""" +796 17 loss """marginranking""" +796 17 regularizer """no""" +796 17 optimizer """adam""" +796 17 training_loop """owa""" +796 17 negative_sampler """basic""" +796 17 evaluator """rankbased""" +796 18 dataset """fb15k237""" +796 18 model """transe""" +796 18 loss """marginranking""" +796 18 regularizer """no""" +796 18 optimizer """adam""" +796 18 training_loop """owa""" +796 18 negative_sampler """basic""" +796 18 evaluator """rankbased""" +796 19 dataset """fb15k237""" +796 19 model """transe""" +796 19 loss """marginranking""" +796 19 regularizer """no""" +796 19 optimizer """adam""" +796 19 training_loop """owa""" +796 19 negative_sampler """basic""" +796 19 evaluator """rankbased""" +796 20 dataset """fb15k237""" +796 20 model """transe""" +796 20 loss """marginranking""" +796 20 regularizer """no""" +796 20 optimizer """adam""" +796 20 training_loop """owa""" +796 20 negative_sampler """basic""" +796 20 evaluator """rankbased""" +796 21 dataset """fb15k237""" +796 21 model """transe""" +796 21 loss """marginranking""" +796 21 regularizer """no""" +796 21 optimizer """adam""" +796 21 training_loop """owa""" +796 21 negative_sampler """basic""" +796 21 evaluator """rankbased""" +796 22 dataset """fb15k237""" +796 22 model """transe""" +796 22 loss """marginranking""" +796 22 regularizer """no""" +796 22 optimizer """adam""" +796 22 training_loop """owa""" +796 22 negative_sampler """basic""" +796 22 evaluator """rankbased""" +796 23 dataset """fb15k237""" +796 23 model """transe""" +796 23 loss """marginranking""" +796 23 regularizer """no""" +796 23 optimizer """adam""" +796 23 training_loop """owa""" +796 23 negative_sampler """basic""" +796 23 evaluator """rankbased""" +796 24 dataset """fb15k237""" +796 24 model """transe""" +796 24 loss """marginranking""" +796 24 regularizer """no""" +796 24 optimizer """adam""" +796 24 training_loop """owa""" +796 24 negative_sampler """basic""" +796 24 evaluator """rankbased""" +796 25 dataset """fb15k237""" +796 25 model """transe""" +796 25 loss """marginranking""" +796 25 regularizer """no""" +796 25 optimizer """adam""" +796 25 training_loop """owa""" +796 25 negative_sampler """basic""" +796 25 evaluator """rankbased""" +796 26 dataset """fb15k237""" +796 26 model """transe""" +796 26 loss """marginranking""" +796 26 regularizer """no""" +796 26 optimizer """adam""" +796 26 training_loop """owa""" +796 26 negative_sampler """basic""" +796 26 evaluator """rankbased""" +796 27 dataset """fb15k237""" +796 27 model """transe""" +796 27 loss """marginranking""" +796 27 regularizer """no""" +796 27 optimizer """adam""" +796 27 training_loop """owa""" +796 27 negative_sampler """basic""" +796 27 evaluator """rankbased""" +796 28 dataset """fb15k237""" +796 28 model """transe""" +796 28 loss """marginranking""" +796 28 regularizer """no""" +796 28 optimizer """adam""" +796 28 training_loop """owa""" +796 28 negative_sampler """basic""" +796 28 evaluator """rankbased""" +796 29 dataset """fb15k237""" +796 29 model """transe""" +796 29 loss """marginranking""" +796 29 regularizer """no""" +796 29 optimizer """adam""" +796 29 training_loop """owa""" +796 29 negative_sampler """basic""" +796 29 evaluator """rankbased""" +796 30 dataset """fb15k237""" +796 30 model """transe""" +796 30 loss """marginranking""" +796 30 regularizer """no""" +796 30 optimizer """adam""" +796 30 training_loop """owa""" +796 30 negative_sampler """basic""" +796 30 evaluator """rankbased""" +796 31 dataset """fb15k237""" +796 31 model """transe""" +796 31 loss """marginranking""" +796 31 regularizer """no""" +796 31 optimizer """adam""" +796 31 training_loop """owa""" +796 31 negative_sampler """basic""" +796 31 evaluator """rankbased""" +796 32 dataset """fb15k237""" +796 32 model """transe""" +796 32 loss """marginranking""" +796 32 regularizer """no""" +796 32 optimizer """adam""" +796 32 training_loop """owa""" +796 32 negative_sampler """basic""" +796 32 evaluator """rankbased""" +796 33 dataset """fb15k237""" +796 33 model """transe""" +796 33 loss """marginranking""" +796 33 regularizer """no""" +796 33 optimizer """adam""" +796 33 training_loop """owa""" +796 33 negative_sampler """basic""" +796 33 evaluator """rankbased""" +796 34 dataset """fb15k237""" +796 34 model """transe""" +796 34 loss """marginranking""" +796 34 regularizer """no""" +796 34 optimizer """adam""" +796 34 training_loop """owa""" +796 34 negative_sampler """basic""" +796 34 evaluator """rankbased""" +796 35 dataset """fb15k237""" +796 35 model """transe""" +796 35 loss """marginranking""" +796 35 regularizer """no""" +796 35 optimizer """adam""" +796 35 training_loop """owa""" +796 35 negative_sampler """basic""" +796 35 evaluator """rankbased""" +796 36 dataset """fb15k237""" +796 36 model """transe""" +796 36 loss """marginranking""" +796 36 regularizer """no""" +796 36 optimizer """adam""" +796 36 training_loop """owa""" +796 36 negative_sampler """basic""" +796 36 evaluator """rankbased""" +796 37 dataset """fb15k237""" +796 37 model """transe""" +796 37 loss """marginranking""" +796 37 regularizer """no""" +796 37 optimizer """adam""" +796 37 training_loop """owa""" +796 37 negative_sampler """basic""" +796 37 evaluator """rankbased""" +796 38 dataset """fb15k237""" +796 38 model """transe""" +796 38 loss """marginranking""" +796 38 regularizer """no""" +796 38 optimizer """adam""" +796 38 training_loop """owa""" +796 38 negative_sampler """basic""" +796 38 evaluator """rankbased""" +796 39 dataset """fb15k237""" +796 39 model """transe""" +796 39 loss """marginranking""" +796 39 regularizer """no""" +796 39 optimizer """adam""" +796 39 training_loop """owa""" +796 39 negative_sampler """basic""" +796 39 evaluator """rankbased""" +796 40 dataset """fb15k237""" +796 40 model """transe""" +796 40 loss """marginranking""" +796 40 regularizer """no""" +796 40 optimizer """adam""" +796 40 training_loop """owa""" +796 40 negative_sampler """basic""" +796 40 evaluator """rankbased""" +796 41 dataset """fb15k237""" +796 41 model """transe""" +796 41 loss """marginranking""" +796 41 regularizer """no""" +796 41 optimizer """adam""" +796 41 training_loop """owa""" +796 41 negative_sampler """basic""" +796 41 evaluator """rankbased""" +796 42 dataset """fb15k237""" +796 42 model """transe""" +796 42 loss """marginranking""" +796 42 regularizer """no""" +796 42 optimizer """adam""" +796 42 training_loop """owa""" +796 42 negative_sampler """basic""" +796 42 evaluator """rankbased""" +796 43 dataset """fb15k237""" +796 43 model """transe""" +796 43 loss """marginranking""" +796 43 regularizer """no""" +796 43 optimizer """adam""" +796 43 training_loop """owa""" +796 43 negative_sampler """basic""" +796 43 evaluator """rankbased""" +796 44 dataset """fb15k237""" +796 44 model """transe""" +796 44 loss """marginranking""" +796 44 regularizer """no""" +796 44 optimizer """adam""" +796 44 training_loop """owa""" +796 44 negative_sampler """basic""" +796 44 evaluator """rankbased""" +796 45 dataset """fb15k237""" +796 45 model """transe""" +796 45 loss """marginranking""" +796 45 regularizer """no""" +796 45 optimizer """adam""" +796 45 training_loop """owa""" +796 45 negative_sampler """basic""" +796 45 evaluator """rankbased""" +796 46 dataset """fb15k237""" +796 46 model """transe""" +796 46 loss """marginranking""" +796 46 regularizer """no""" +796 46 optimizer """adam""" +796 46 training_loop """owa""" +796 46 negative_sampler """basic""" +796 46 evaluator """rankbased""" +796 47 dataset """fb15k237""" +796 47 model """transe""" +796 47 loss """marginranking""" +796 47 regularizer """no""" +796 47 optimizer """adam""" +796 47 training_loop """owa""" +796 47 negative_sampler """basic""" +796 47 evaluator """rankbased""" +796 48 dataset """fb15k237""" +796 48 model """transe""" +796 48 loss """marginranking""" +796 48 regularizer """no""" +796 48 optimizer """adam""" +796 48 training_loop """owa""" +796 48 negative_sampler """basic""" +796 48 evaluator """rankbased""" +796 49 dataset """fb15k237""" +796 49 model """transe""" +796 49 loss """marginranking""" +796 49 regularizer """no""" +796 49 optimizer """adam""" +796 49 training_loop """owa""" +796 49 negative_sampler """basic""" +796 49 evaluator """rankbased""" +796 50 dataset """fb15k237""" +796 50 model """transe""" +796 50 loss """marginranking""" +796 50 regularizer """no""" +796 50 optimizer """adam""" +796 50 training_loop """owa""" +796 50 negative_sampler """basic""" +796 50 evaluator """rankbased""" +796 51 dataset """fb15k237""" +796 51 model """transe""" +796 51 loss """marginranking""" +796 51 regularizer """no""" +796 51 optimizer """adam""" +796 51 training_loop """owa""" +796 51 negative_sampler """basic""" +796 51 evaluator """rankbased""" +796 52 dataset """fb15k237""" +796 52 model """transe""" +796 52 loss """marginranking""" +796 52 regularizer """no""" +796 52 optimizer """adam""" +796 52 training_loop """owa""" +796 52 negative_sampler """basic""" +796 52 evaluator """rankbased""" +796 53 dataset """fb15k237""" +796 53 model """transe""" +796 53 loss """marginranking""" +796 53 regularizer """no""" +796 53 optimizer """adam""" +796 53 training_loop """owa""" +796 53 negative_sampler """basic""" +796 53 evaluator """rankbased""" +796 54 dataset """fb15k237""" +796 54 model """transe""" +796 54 loss """marginranking""" +796 54 regularizer """no""" +796 54 optimizer """adam""" +796 54 training_loop """owa""" +796 54 negative_sampler """basic""" +796 54 evaluator """rankbased""" +796 55 dataset """fb15k237""" +796 55 model """transe""" +796 55 loss """marginranking""" +796 55 regularizer """no""" +796 55 optimizer """adam""" +796 55 training_loop """owa""" +796 55 negative_sampler """basic""" +796 55 evaluator """rankbased""" +796 56 dataset """fb15k237""" +796 56 model """transe""" +796 56 loss """marginranking""" +796 56 regularizer """no""" +796 56 optimizer """adam""" +796 56 training_loop """owa""" +796 56 negative_sampler """basic""" +796 56 evaluator """rankbased""" +796 57 dataset """fb15k237""" +796 57 model """transe""" +796 57 loss """marginranking""" +796 57 regularizer """no""" +796 57 optimizer """adam""" +796 57 training_loop """owa""" +796 57 negative_sampler """basic""" +796 57 evaluator """rankbased""" +796 58 dataset """fb15k237""" +796 58 model """transe""" +796 58 loss """marginranking""" +796 58 regularizer """no""" +796 58 optimizer """adam""" +796 58 training_loop """owa""" +796 58 negative_sampler """basic""" +796 58 evaluator """rankbased""" +796 59 dataset """fb15k237""" +796 59 model """transe""" +796 59 loss """marginranking""" +796 59 regularizer """no""" +796 59 optimizer """adam""" +796 59 training_loop """owa""" +796 59 negative_sampler """basic""" +796 59 evaluator """rankbased""" +796 60 dataset """fb15k237""" +796 60 model """transe""" +796 60 loss """marginranking""" +796 60 regularizer """no""" +796 60 optimizer """adam""" +796 60 training_loop """owa""" +796 60 negative_sampler """basic""" +796 60 evaluator """rankbased""" +796 61 dataset """fb15k237""" +796 61 model """transe""" +796 61 loss """marginranking""" +796 61 regularizer """no""" +796 61 optimizer """adam""" +796 61 training_loop """owa""" +796 61 negative_sampler """basic""" +796 61 evaluator """rankbased""" +796 62 dataset """fb15k237""" +796 62 model """transe""" +796 62 loss """marginranking""" +796 62 regularizer """no""" +796 62 optimizer """adam""" +796 62 training_loop """owa""" +796 62 negative_sampler """basic""" +796 62 evaluator """rankbased""" +796 63 dataset """fb15k237""" +796 63 model """transe""" +796 63 loss """marginranking""" +796 63 regularizer """no""" +796 63 optimizer """adam""" +796 63 training_loop """owa""" +796 63 negative_sampler """basic""" +796 63 evaluator """rankbased""" +796 64 dataset """fb15k237""" +796 64 model """transe""" +796 64 loss """marginranking""" +796 64 regularizer """no""" +796 64 optimizer """adam""" +796 64 training_loop """owa""" +796 64 negative_sampler """basic""" +796 64 evaluator """rankbased""" +796 65 dataset """fb15k237""" +796 65 model """transe""" +796 65 loss """marginranking""" +796 65 regularizer """no""" +796 65 optimizer """adam""" +796 65 training_loop """owa""" +796 65 negative_sampler """basic""" +796 65 evaluator """rankbased""" +796 66 dataset """fb15k237""" +796 66 model """transe""" +796 66 loss """marginranking""" +796 66 regularizer """no""" +796 66 optimizer """adam""" +796 66 training_loop """owa""" +796 66 negative_sampler """basic""" +796 66 evaluator """rankbased""" +796 67 dataset """fb15k237""" +796 67 model """transe""" +796 67 loss """marginranking""" +796 67 regularizer """no""" +796 67 optimizer """adam""" +796 67 training_loop """owa""" +796 67 negative_sampler """basic""" +796 67 evaluator """rankbased""" +796 68 dataset """fb15k237""" +796 68 model """transe""" +796 68 loss """marginranking""" +796 68 regularizer """no""" +796 68 optimizer """adam""" +796 68 training_loop """owa""" +796 68 negative_sampler """basic""" +796 68 evaluator """rankbased""" +796 69 dataset """fb15k237""" +796 69 model """transe""" +796 69 loss """marginranking""" +796 69 regularizer """no""" +796 69 optimizer """adam""" +796 69 training_loop """owa""" +796 69 negative_sampler """basic""" +796 69 evaluator """rankbased""" +796 70 dataset """fb15k237""" +796 70 model """transe""" +796 70 loss """marginranking""" +796 70 regularizer """no""" +796 70 optimizer """adam""" +796 70 training_loop """owa""" +796 70 negative_sampler """basic""" +796 70 evaluator """rankbased""" +797 1 model.embedding_dim 2.0 +797 1 model.scoring_fct_norm 2.0 +797 1 loss.margin 18.21238757577245 +797 1 loss.adversarial_temperature 0.18473421397430423 +797 1 optimizer.lr 0.030643964497844758 +797 1 negative_sampler.num_negs_per_pos 36.0 +797 1 training.batch_size 1.0 +797 2 model.embedding_dim 1.0 +797 2 model.scoring_fct_norm 2.0 +797 2 loss.margin 11.156282340587603 +797 2 loss.adversarial_temperature 0.6088314747564343 +797 2 optimizer.lr 0.008345285378519456 +797 2 negative_sampler.num_negs_per_pos 49.0 +797 2 training.batch_size 0.0 +797 3 model.embedding_dim 1.0 +797 3 model.scoring_fct_norm 1.0 +797 3 loss.margin 21.55449523648531 +797 3 loss.adversarial_temperature 0.4359855425426145 +797 3 optimizer.lr 0.00264454500669529 +797 3 negative_sampler.num_negs_per_pos 21.0 +797 3 training.batch_size 2.0 +797 4 model.embedding_dim 1.0 +797 4 model.scoring_fct_norm 2.0 +797 4 loss.margin 23.223821129500397 +797 4 loss.adversarial_temperature 0.43065506219446603 +797 4 optimizer.lr 0.05532670863070594 +797 4 negative_sampler.num_negs_per_pos 11.0 +797 4 training.batch_size 1.0 +797 5 model.embedding_dim 0.0 +797 5 model.scoring_fct_norm 2.0 +797 5 loss.margin 29.53337157936595 +797 5 loss.adversarial_temperature 0.7993831936055923 +797 5 optimizer.lr 0.01869544686203605 +797 5 negative_sampler.num_negs_per_pos 75.0 +797 5 training.batch_size 0.0 +797 6 model.embedding_dim 1.0 +797 6 model.scoring_fct_norm 1.0 +797 6 loss.margin 2.0909732010634223 +797 6 loss.adversarial_temperature 0.7747981550521564 +797 6 optimizer.lr 0.09185895368917457 +797 6 negative_sampler.num_negs_per_pos 89.0 +797 6 training.batch_size 0.0 +797 7 model.embedding_dim 0.0 +797 7 model.scoring_fct_norm 2.0 +797 7 loss.margin 11.521020575012216 +797 7 loss.adversarial_temperature 0.17460707505588996 +797 7 optimizer.lr 0.009223169483512142 +797 7 negative_sampler.num_negs_per_pos 32.0 +797 7 training.batch_size 1.0 +797 8 model.embedding_dim 0.0 +797 8 model.scoring_fct_norm 2.0 +797 8 loss.margin 3.936496363300953 +797 8 loss.adversarial_temperature 0.7528074296337285 +797 8 optimizer.lr 0.06499230209428711 +797 8 negative_sampler.num_negs_per_pos 39.0 +797 8 training.batch_size 0.0 +797 9 model.embedding_dim 1.0 +797 9 model.scoring_fct_norm 1.0 +797 9 loss.margin 29.191275838035924 +797 9 loss.adversarial_temperature 0.20416131158425793 +797 9 optimizer.lr 0.06035338319919015 +797 9 negative_sampler.num_negs_per_pos 74.0 +797 9 training.batch_size 1.0 +797 10 model.embedding_dim 0.0 +797 10 model.scoring_fct_norm 2.0 +797 10 loss.margin 21.507290318230023 +797 10 loss.adversarial_temperature 0.5965298211833745 +797 10 optimizer.lr 0.09808540031329283 +797 10 negative_sampler.num_negs_per_pos 3.0 +797 10 training.batch_size 2.0 +797 11 model.embedding_dim 1.0 +797 11 model.scoring_fct_norm 1.0 +797 11 loss.margin 18.845689748903816 +797 11 loss.adversarial_temperature 0.9141262359628357 +797 11 optimizer.lr 0.0022000372205192758 +797 11 negative_sampler.num_negs_per_pos 92.0 +797 11 training.batch_size 0.0 +797 12 model.embedding_dim 0.0 +797 12 model.scoring_fct_norm 2.0 +797 12 loss.margin 27.001778628678334 +797 12 loss.adversarial_temperature 0.8195398271676761 +797 12 optimizer.lr 0.007424924448535009 +797 12 negative_sampler.num_negs_per_pos 89.0 +797 12 training.batch_size 2.0 +797 13 model.embedding_dim 2.0 +797 13 model.scoring_fct_norm 2.0 +797 13 loss.margin 8.230599683340094 +797 13 loss.adversarial_temperature 0.23603734894020337 +797 13 optimizer.lr 0.02107366215063539 +797 13 negative_sampler.num_negs_per_pos 59.0 +797 13 training.batch_size 0.0 +797 14 model.embedding_dim 0.0 +797 14 model.scoring_fct_norm 1.0 +797 14 loss.margin 19.496216481364044 +797 14 loss.adversarial_temperature 0.8033054030112803 +797 14 optimizer.lr 0.05100676425796705 +797 14 negative_sampler.num_negs_per_pos 90.0 +797 14 training.batch_size 1.0 +797 15 model.embedding_dim 2.0 +797 15 model.scoring_fct_norm 2.0 +797 15 loss.margin 3.4854385839939055 +797 15 loss.adversarial_temperature 0.2179633543724384 +797 15 optimizer.lr 0.09632004042577999 +797 15 negative_sampler.num_negs_per_pos 74.0 +797 15 training.batch_size 0.0 +797 16 model.embedding_dim 2.0 +797 16 model.scoring_fct_norm 2.0 +797 16 loss.margin 23.392941897322558 +797 16 loss.adversarial_temperature 0.6820930109717063 +797 16 optimizer.lr 0.010729627760645698 +797 16 negative_sampler.num_negs_per_pos 79.0 +797 16 training.batch_size 0.0 +797 17 model.embedding_dim 0.0 +797 17 model.scoring_fct_norm 1.0 +797 17 loss.margin 13.57042065119099 +797 17 loss.adversarial_temperature 0.2711915903363731 +797 17 optimizer.lr 0.09873287256946758 +797 17 negative_sampler.num_negs_per_pos 69.0 +797 17 training.batch_size 1.0 +797 18 model.embedding_dim 2.0 +797 18 model.scoring_fct_norm 2.0 +797 18 loss.margin 10.647999918981743 +797 18 loss.adversarial_temperature 0.13893894677225418 +797 18 optimizer.lr 0.0577952231933186 +797 18 negative_sampler.num_negs_per_pos 34.0 +797 18 training.batch_size 0.0 +797 19 model.embedding_dim 0.0 +797 19 model.scoring_fct_norm 1.0 +797 19 loss.margin 5.112646484683381 +797 19 loss.adversarial_temperature 0.5199057106400072 +797 19 optimizer.lr 0.0022764175727737596 +797 19 negative_sampler.num_negs_per_pos 81.0 +797 19 training.batch_size 2.0 +797 20 model.embedding_dim 0.0 +797 20 model.scoring_fct_norm 2.0 +797 20 loss.margin 8.922276060172877 +797 20 loss.adversarial_temperature 0.8618688478961126 +797 20 optimizer.lr 0.017154921175414693 +797 20 negative_sampler.num_negs_per_pos 41.0 +797 20 training.batch_size 2.0 +797 21 model.embedding_dim 0.0 +797 21 model.scoring_fct_norm 1.0 +797 21 loss.margin 17.610493359356347 +797 21 loss.adversarial_temperature 0.37487425535627633 +797 21 optimizer.lr 0.001084305368393955 +797 21 negative_sampler.num_negs_per_pos 48.0 +797 21 training.batch_size 2.0 +797 22 model.embedding_dim 2.0 +797 22 model.scoring_fct_norm 2.0 +797 22 loss.margin 17.150949929851745 +797 22 loss.adversarial_temperature 0.40423981199676357 +797 22 optimizer.lr 0.0013994216141919918 +797 22 negative_sampler.num_negs_per_pos 13.0 +797 22 training.batch_size 1.0 +797 23 model.embedding_dim 0.0 +797 23 model.scoring_fct_norm 2.0 +797 23 loss.margin 26.719435685533128 +797 23 loss.adversarial_temperature 0.47788628762574914 +797 23 optimizer.lr 0.004921528899649711 +797 23 negative_sampler.num_negs_per_pos 58.0 +797 23 training.batch_size 0.0 +797 24 model.embedding_dim 1.0 +797 24 model.scoring_fct_norm 1.0 +797 24 loss.margin 6.3188541662896 +797 24 loss.adversarial_temperature 0.23898024814456553 +797 24 optimizer.lr 0.003439172090332136 +797 24 negative_sampler.num_negs_per_pos 15.0 +797 24 training.batch_size 0.0 +797 25 model.embedding_dim 2.0 +797 25 model.scoring_fct_norm 2.0 +797 25 loss.margin 6.241257313742837 +797 25 loss.adversarial_temperature 0.8697939426335344 +797 25 optimizer.lr 0.04397837964550903 +797 25 negative_sampler.num_negs_per_pos 32.0 +797 25 training.batch_size 0.0 +797 26 model.embedding_dim 2.0 +797 26 model.scoring_fct_norm 1.0 +797 26 loss.margin 29.789413764784456 +797 26 loss.adversarial_temperature 0.5092731420121591 +797 26 optimizer.lr 0.04864956888758979 +797 26 negative_sampler.num_negs_per_pos 57.0 +797 26 training.batch_size 1.0 +797 27 model.embedding_dim 0.0 +797 27 model.scoring_fct_norm 2.0 +797 27 loss.margin 15.47025470317693 +797 27 loss.adversarial_temperature 0.2975128988095161 +797 27 optimizer.lr 0.011490584364754699 +797 27 negative_sampler.num_negs_per_pos 66.0 +797 27 training.batch_size 1.0 +797 28 model.embedding_dim 0.0 +797 28 model.scoring_fct_norm 1.0 +797 28 loss.margin 11.056967119007538 +797 28 loss.adversarial_temperature 0.1078864336247688 +797 28 optimizer.lr 0.005658667648108996 +797 28 negative_sampler.num_negs_per_pos 48.0 +797 28 training.batch_size 1.0 +797 29 model.embedding_dim 1.0 +797 29 model.scoring_fct_norm 1.0 +797 29 loss.margin 13.501643652102453 +797 29 loss.adversarial_temperature 0.6487591112437467 +797 29 optimizer.lr 0.04382161496835647 +797 29 negative_sampler.num_negs_per_pos 5.0 +797 29 training.batch_size 2.0 +797 30 model.embedding_dim 2.0 +797 30 model.scoring_fct_norm 1.0 +797 30 loss.margin 29.805420518383336 +797 30 loss.adversarial_temperature 0.33037556357233544 +797 30 optimizer.lr 0.0040511391365953085 +797 30 negative_sampler.num_negs_per_pos 7.0 +797 30 training.batch_size 2.0 +797 31 model.embedding_dim 0.0 +797 31 model.scoring_fct_norm 1.0 +797 31 loss.margin 5.879920582450611 +797 31 loss.adversarial_temperature 0.8352676073087252 +797 31 optimizer.lr 0.03549059359756671 +797 31 negative_sampler.num_negs_per_pos 87.0 +797 31 training.batch_size 2.0 +797 32 model.embedding_dim 0.0 +797 32 model.scoring_fct_norm 1.0 +797 32 loss.margin 3.780274726533037 +797 32 loss.adversarial_temperature 0.22002754984543574 +797 32 optimizer.lr 0.01273618182079008 +797 32 negative_sampler.num_negs_per_pos 53.0 +797 32 training.batch_size 0.0 +797 33 model.embedding_dim 0.0 +797 33 model.scoring_fct_norm 2.0 +797 33 loss.margin 13.65022390622371 +797 33 loss.adversarial_temperature 0.5047859165739171 +797 33 optimizer.lr 0.05402851678164836 +797 33 negative_sampler.num_negs_per_pos 50.0 +797 33 training.batch_size 0.0 +797 34 model.embedding_dim 1.0 +797 34 model.scoring_fct_norm 1.0 +797 34 loss.margin 7.091322102545423 +797 34 loss.adversarial_temperature 0.2425847268321797 +797 34 optimizer.lr 0.010285161037482098 +797 34 negative_sampler.num_negs_per_pos 54.0 +797 34 training.batch_size 2.0 +797 35 model.embedding_dim 2.0 +797 35 model.scoring_fct_norm 2.0 +797 35 loss.margin 11.005843667570131 +797 35 loss.adversarial_temperature 0.2739573209525119 +797 35 optimizer.lr 0.0045273851998748876 +797 35 negative_sampler.num_negs_per_pos 52.0 +797 35 training.batch_size 1.0 +797 36 model.embedding_dim 1.0 +797 36 model.scoring_fct_norm 2.0 +797 36 loss.margin 29.280597747627716 +797 36 loss.adversarial_temperature 0.23039455788302252 +797 36 optimizer.lr 0.04974672092388586 +797 36 negative_sampler.num_negs_per_pos 10.0 +797 36 training.batch_size 2.0 +797 1 dataset """fb15k237""" +797 1 model """transe""" +797 1 loss """nssa""" +797 1 regularizer """no""" +797 1 optimizer """adam""" +797 1 training_loop """owa""" +797 1 negative_sampler """basic""" +797 1 evaluator """rankbased""" +797 2 dataset """fb15k237""" +797 2 model """transe""" +797 2 loss """nssa""" +797 2 regularizer """no""" +797 2 optimizer """adam""" +797 2 training_loop """owa""" +797 2 negative_sampler """basic""" +797 2 evaluator """rankbased""" +797 3 dataset """fb15k237""" +797 3 model """transe""" +797 3 loss """nssa""" +797 3 regularizer """no""" +797 3 optimizer """adam""" +797 3 training_loop """owa""" +797 3 negative_sampler """basic""" +797 3 evaluator """rankbased""" +797 4 dataset """fb15k237""" +797 4 model """transe""" +797 4 loss """nssa""" +797 4 regularizer """no""" +797 4 optimizer """adam""" +797 4 training_loop """owa""" +797 4 negative_sampler """basic""" +797 4 evaluator """rankbased""" +797 5 dataset """fb15k237""" +797 5 model """transe""" +797 5 loss """nssa""" +797 5 regularizer """no""" +797 5 optimizer """adam""" +797 5 training_loop """owa""" +797 5 negative_sampler """basic""" +797 5 evaluator """rankbased""" +797 6 dataset """fb15k237""" +797 6 model """transe""" +797 6 loss """nssa""" +797 6 regularizer """no""" +797 6 optimizer """adam""" +797 6 training_loop """owa""" +797 6 negative_sampler """basic""" +797 6 evaluator """rankbased""" +797 7 dataset """fb15k237""" +797 7 model """transe""" +797 7 loss """nssa""" +797 7 regularizer """no""" +797 7 optimizer """adam""" +797 7 training_loop """owa""" +797 7 negative_sampler """basic""" +797 7 evaluator """rankbased""" +797 8 dataset """fb15k237""" +797 8 model """transe""" +797 8 loss """nssa""" +797 8 regularizer """no""" +797 8 optimizer """adam""" +797 8 training_loop """owa""" +797 8 negative_sampler """basic""" +797 8 evaluator """rankbased""" +797 9 dataset """fb15k237""" +797 9 model """transe""" +797 9 loss """nssa""" +797 9 regularizer """no""" +797 9 optimizer """adam""" +797 9 training_loop """owa""" +797 9 negative_sampler """basic""" +797 9 evaluator """rankbased""" +797 10 dataset """fb15k237""" +797 10 model """transe""" +797 10 loss """nssa""" +797 10 regularizer """no""" +797 10 optimizer """adam""" +797 10 training_loop """owa""" +797 10 negative_sampler """basic""" +797 10 evaluator """rankbased""" +797 11 dataset """fb15k237""" +797 11 model """transe""" +797 11 loss """nssa""" +797 11 regularizer """no""" +797 11 optimizer """adam""" +797 11 training_loop """owa""" +797 11 negative_sampler """basic""" +797 11 evaluator """rankbased""" +797 12 dataset """fb15k237""" +797 12 model """transe""" +797 12 loss """nssa""" +797 12 regularizer """no""" +797 12 optimizer """adam""" +797 12 training_loop """owa""" +797 12 negative_sampler """basic""" +797 12 evaluator """rankbased""" +797 13 dataset """fb15k237""" +797 13 model """transe""" +797 13 loss """nssa""" +797 13 regularizer """no""" +797 13 optimizer """adam""" +797 13 training_loop """owa""" +797 13 negative_sampler """basic""" +797 13 evaluator """rankbased""" +797 14 dataset """fb15k237""" +797 14 model """transe""" +797 14 loss """nssa""" +797 14 regularizer """no""" +797 14 optimizer """adam""" +797 14 training_loop """owa""" +797 14 negative_sampler """basic""" +797 14 evaluator """rankbased""" +797 15 dataset """fb15k237""" +797 15 model """transe""" +797 15 loss """nssa""" +797 15 regularizer """no""" +797 15 optimizer """adam""" +797 15 training_loop """owa""" +797 15 negative_sampler """basic""" +797 15 evaluator """rankbased""" +797 16 dataset """fb15k237""" +797 16 model """transe""" +797 16 loss """nssa""" +797 16 regularizer """no""" +797 16 optimizer """adam""" +797 16 training_loop """owa""" +797 16 negative_sampler """basic""" +797 16 evaluator """rankbased""" +797 17 dataset """fb15k237""" +797 17 model """transe""" +797 17 loss """nssa""" +797 17 regularizer """no""" +797 17 optimizer """adam""" +797 17 training_loop """owa""" +797 17 negative_sampler """basic""" +797 17 evaluator """rankbased""" +797 18 dataset """fb15k237""" +797 18 model """transe""" +797 18 loss """nssa""" +797 18 regularizer """no""" +797 18 optimizer """adam""" +797 18 training_loop """owa""" +797 18 negative_sampler """basic""" +797 18 evaluator """rankbased""" +797 19 dataset """fb15k237""" +797 19 model """transe""" +797 19 loss """nssa""" +797 19 regularizer """no""" +797 19 optimizer """adam""" +797 19 training_loop """owa""" +797 19 negative_sampler """basic""" +797 19 evaluator """rankbased""" +797 20 dataset """fb15k237""" +797 20 model """transe""" +797 20 loss """nssa""" +797 20 regularizer """no""" +797 20 optimizer """adam""" +797 20 training_loop """owa""" +797 20 negative_sampler """basic""" +797 20 evaluator """rankbased""" +797 21 dataset """fb15k237""" +797 21 model """transe""" +797 21 loss """nssa""" +797 21 regularizer """no""" +797 21 optimizer """adam""" +797 21 training_loop """owa""" +797 21 negative_sampler """basic""" +797 21 evaluator """rankbased""" +797 22 dataset """fb15k237""" +797 22 model """transe""" +797 22 loss """nssa""" +797 22 regularizer """no""" +797 22 optimizer """adam""" +797 22 training_loop """owa""" +797 22 negative_sampler """basic""" +797 22 evaluator """rankbased""" +797 23 dataset """fb15k237""" +797 23 model """transe""" +797 23 loss """nssa""" +797 23 regularizer """no""" +797 23 optimizer """adam""" +797 23 training_loop """owa""" +797 23 negative_sampler """basic""" +797 23 evaluator """rankbased""" +797 24 dataset """fb15k237""" +797 24 model """transe""" +797 24 loss """nssa""" +797 24 regularizer """no""" +797 24 optimizer """adam""" +797 24 training_loop """owa""" +797 24 negative_sampler """basic""" +797 24 evaluator """rankbased""" +797 25 dataset """fb15k237""" +797 25 model """transe""" +797 25 loss """nssa""" +797 25 regularizer """no""" +797 25 optimizer """adam""" +797 25 training_loop """owa""" +797 25 negative_sampler """basic""" +797 25 evaluator """rankbased""" +797 26 dataset """fb15k237""" +797 26 model """transe""" +797 26 loss """nssa""" +797 26 regularizer """no""" +797 26 optimizer """adam""" +797 26 training_loop """owa""" +797 26 negative_sampler """basic""" +797 26 evaluator """rankbased""" +797 27 dataset """fb15k237""" +797 27 model """transe""" +797 27 loss """nssa""" +797 27 regularizer """no""" +797 27 optimizer """adam""" +797 27 training_loop """owa""" +797 27 negative_sampler """basic""" +797 27 evaluator """rankbased""" +797 28 dataset """fb15k237""" +797 28 model """transe""" +797 28 loss """nssa""" +797 28 regularizer """no""" +797 28 optimizer """adam""" +797 28 training_loop """owa""" +797 28 negative_sampler """basic""" +797 28 evaluator """rankbased""" +797 29 dataset """fb15k237""" +797 29 model """transe""" +797 29 loss """nssa""" +797 29 regularizer """no""" +797 29 optimizer """adam""" +797 29 training_loop """owa""" +797 29 negative_sampler """basic""" +797 29 evaluator """rankbased""" +797 30 dataset """fb15k237""" +797 30 model """transe""" +797 30 loss """nssa""" +797 30 regularizer """no""" +797 30 optimizer """adam""" +797 30 training_loop """owa""" +797 30 negative_sampler """basic""" +797 30 evaluator """rankbased""" +797 31 dataset """fb15k237""" +797 31 model """transe""" +797 31 loss """nssa""" +797 31 regularizer """no""" +797 31 optimizer """adam""" +797 31 training_loop """owa""" +797 31 negative_sampler """basic""" +797 31 evaluator """rankbased""" +797 32 dataset """fb15k237""" +797 32 model """transe""" +797 32 loss """nssa""" +797 32 regularizer """no""" +797 32 optimizer """adam""" +797 32 training_loop """owa""" +797 32 negative_sampler """basic""" +797 32 evaluator """rankbased""" +797 33 dataset """fb15k237""" +797 33 model """transe""" +797 33 loss """nssa""" +797 33 regularizer """no""" +797 33 optimizer """adam""" +797 33 training_loop """owa""" +797 33 negative_sampler """basic""" +797 33 evaluator """rankbased""" +797 34 dataset """fb15k237""" +797 34 model """transe""" +797 34 loss """nssa""" +797 34 regularizer """no""" +797 34 optimizer """adam""" +797 34 training_loop """owa""" +797 34 negative_sampler """basic""" +797 34 evaluator """rankbased""" +797 35 dataset """fb15k237""" +797 35 model """transe""" +797 35 loss """nssa""" +797 35 regularizer """no""" +797 35 optimizer """adam""" +797 35 training_loop """owa""" +797 35 negative_sampler """basic""" +797 35 evaluator """rankbased""" +797 36 dataset """fb15k237""" +797 36 model """transe""" +797 36 loss """nssa""" +797 36 regularizer """no""" +797 36 optimizer """adam""" +797 36 training_loop """owa""" +797 36 negative_sampler """basic""" +797 36 evaluator """rankbased""" +798 1 model.embedding_dim 0.0 +798 1 model.scoring_fct_norm 2.0 +798 1 loss.margin 5.0857029715490665 +798 1 loss.adversarial_temperature 0.6932780471652727 +798 1 optimizer.lr 0.005349981623982054 +798 1 negative_sampler.num_negs_per_pos 14.0 +798 1 training.batch_size 1.0 +798 2 model.embedding_dim 2.0 +798 2 model.scoring_fct_norm 2.0 +798 2 loss.margin 28.892439156607498 +798 2 loss.adversarial_temperature 0.9185796964743326 +798 2 optimizer.lr 0.05524623598906733 +798 2 negative_sampler.num_negs_per_pos 65.0 +798 2 training.batch_size 2.0 +798 3 model.embedding_dim 2.0 +798 3 model.scoring_fct_norm 2.0 +798 3 loss.margin 20.62127373537284 +798 3 loss.adversarial_temperature 0.8781013630159064 +798 3 optimizer.lr 0.03693077941010353 +798 3 negative_sampler.num_negs_per_pos 31.0 +798 3 training.batch_size 1.0 +798 4 model.embedding_dim 0.0 +798 4 model.scoring_fct_norm 2.0 +798 4 loss.margin 24.66891198049868 +798 4 loss.adversarial_temperature 0.10804387118809211 +798 4 optimizer.lr 0.0017984061882502275 +798 4 negative_sampler.num_negs_per_pos 62.0 +798 4 training.batch_size 1.0 +798 5 model.embedding_dim 2.0 +798 5 model.scoring_fct_norm 2.0 +798 5 loss.margin 26.003082837899488 +798 5 loss.adversarial_temperature 0.7531300387404685 +798 5 optimizer.lr 0.01851467447777171 +798 5 negative_sampler.num_negs_per_pos 94.0 +798 5 training.batch_size 2.0 +798 6 model.embedding_dim 1.0 +798 6 model.scoring_fct_norm 2.0 +798 6 loss.margin 27.056663592446384 +798 6 loss.adversarial_temperature 0.2231481728227968 +798 6 optimizer.lr 0.00362728565259561 +798 6 negative_sampler.num_negs_per_pos 59.0 +798 6 training.batch_size 0.0 +798 7 model.embedding_dim 0.0 +798 7 model.scoring_fct_norm 1.0 +798 7 loss.margin 12.84335549728122 +798 7 loss.adversarial_temperature 0.28476734320183156 +798 7 optimizer.lr 0.0010283999961899508 +798 7 negative_sampler.num_negs_per_pos 32.0 +798 7 training.batch_size 0.0 +798 8 model.embedding_dim 1.0 +798 8 model.scoring_fct_norm 2.0 +798 8 loss.margin 28.786767295070256 +798 8 loss.adversarial_temperature 0.21927743588822535 +798 8 optimizer.lr 0.001399336448266566 +798 8 negative_sampler.num_negs_per_pos 66.0 +798 8 training.batch_size 2.0 +798 9 model.embedding_dim 1.0 +798 9 model.scoring_fct_norm 1.0 +798 9 loss.margin 6.222462834154314 +798 9 loss.adversarial_temperature 0.9651398797239642 +798 9 optimizer.lr 0.01750379247573172 +798 9 negative_sampler.num_negs_per_pos 59.0 +798 9 training.batch_size 1.0 +798 10 model.embedding_dim 2.0 +798 10 model.scoring_fct_norm 2.0 +798 10 loss.margin 8.712444041866638 +798 10 loss.adversarial_temperature 0.2117889109882152 +798 10 optimizer.lr 0.025699372280997217 +798 10 negative_sampler.num_negs_per_pos 31.0 +798 10 training.batch_size 0.0 +798 11 model.embedding_dim 2.0 +798 11 model.scoring_fct_norm 2.0 +798 11 loss.margin 22.080480031133277 +798 11 loss.adversarial_temperature 0.36579088219209993 +798 11 optimizer.lr 0.06770043890787955 +798 11 negative_sampler.num_negs_per_pos 42.0 +798 11 training.batch_size 2.0 +798 12 model.embedding_dim 2.0 +798 12 model.scoring_fct_norm 2.0 +798 12 loss.margin 14.049506185423427 +798 12 loss.adversarial_temperature 0.5664107047011567 +798 12 optimizer.lr 0.030316939965710877 +798 12 negative_sampler.num_negs_per_pos 57.0 +798 12 training.batch_size 0.0 +798 13 model.embedding_dim 0.0 +798 13 model.scoring_fct_norm 2.0 +798 13 loss.margin 11.89188535162052 +798 13 loss.adversarial_temperature 0.29676807077062406 +798 13 optimizer.lr 0.0029782795910063974 +798 13 negative_sampler.num_negs_per_pos 97.0 +798 13 training.batch_size 1.0 +798 14 model.embedding_dim 2.0 +798 14 model.scoring_fct_norm 2.0 +798 14 loss.margin 29.63030862377981 +798 14 loss.adversarial_temperature 0.9654610480330647 +798 14 optimizer.lr 0.004474262065387818 +798 14 negative_sampler.num_negs_per_pos 53.0 +798 14 training.batch_size 2.0 +798 15 model.embedding_dim 1.0 +798 15 model.scoring_fct_norm 1.0 +798 15 loss.margin 27.53292331924969 +798 15 loss.adversarial_temperature 0.5590025630448576 +798 15 optimizer.lr 0.0203395121107825 +798 15 negative_sampler.num_negs_per_pos 47.0 +798 15 training.batch_size 2.0 +798 16 model.embedding_dim 2.0 +798 16 model.scoring_fct_norm 1.0 +798 16 loss.margin 27.70493225668645 +798 16 loss.adversarial_temperature 0.4216291061829563 +798 16 optimizer.lr 0.0015307638261543267 +798 16 negative_sampler.num_negs_per_pos 10.0 +798 16 training.batch_size 2.0 +798 17 model.embedding_dim 1.0 +798 17 model.scoring_fct_norm 1.0 +798 17 loss.margin 16.028833671375118 +798 17 loss.adversarial_temperature 0.30967213613794015 +798 17 optimizer.lr 0.0037396189500512934 +798 17 negative_sampler.num_negs_per_pos 97.0 +798 17 training.batch_size 1.0 +798 18 model.embedding_dim 1.0 +798 18 model.scoring_fct_norm 2.0 +798 18 loss.margin 19.459191609284602 +798 18 loss.adversarial_temperature 0.9266211186367849 +798 18 optimizer.lr 0.0026664355816644345 +798 18 negative_sampler.num_negs_per_pos 0.0 +798 18 training.batch_size 0.0 +798 19 model.embedding_dim 1.0 +798 19 model.scoring_fct_norm 2.0 +798 19 loss.margin 14.028516235669358 +798 19 loss.adversarial_temperature 0.31292359295497235 +798 19 optimizer.lr 0.0019209359300197926 +798 19 negative_sampler.num_negs_per_pos 55.0 +798 19 training.batch_size 2.0 +798 20 model.embedding_dim 2.0 +798 20 model.scoring_fct_norm 1.0 +798 20 loss.margin 8.07241907545967 +798 20 loss.adversarial_temperature 0.22117079012519714 +798 20 optimizer.lr 0.012624693591160495 +798 20 negative_sampler.num_negs_per_pos 82.0 +798 20 training.batch_size 1.0 +798 21 model.embedding_dim 0.0 +798 21 model.scoring_fct_norm 2.0 +798 21 loss.margin 7.212765688857548 +798 21 loss.adversarial_temperature 0.8378664318375645 +798 21 optimizer.lr 0.0015935723049742333 +798 21 negative_sampler.num_negs_per_pos 28.0 +798 21 training.batch_size 2.0 +798 22 model.embedding_dim 2.0 +798 22 model.scoring_fct_norm 1.0 +798 22 loss.margin 21.806784999557607 +798 22 loss.adversarial_temperature 0.7254143668636784 +798 22 optimizer.lr 0.003061666509461479 +798 22 negative_sampler.num_negs_per_pos 29.0 +798 22 training.batch_size 2.0 +798 23 model.embedding_dim 2.0 +798 23 model.scoring_fct_norm 1.0 +798 23 loss.margin 27.13871151999014 +798 23 loss.adversarial_temperature 0.7065498023279663 +798 23 optimizer.lr 0.016840105558049618 +798 23 negative_sampler.num_negs_per_pos 45.0 +798 23 training.batch_size 2.0 +798 24 model.embedding_dim 0.0 +798 24 model.scoring_fct_norm 2.0 +798 24 loss.margin 27.2578498760812 +798 24 loss.adversarial_temperature 0.22889566227088243 +798 24 optimizer.lr 0.004960123186162039 +798 24 negative_sampler.num_negs_per_pos 53.0 +798 24 training.batch_size 0.0 +798 25 model.embedding_dim 0.0 +798 25 model.scoring_fct_norm 1.0 +798 25 loss.margin 6.55420494350458 +798 25 loss.adversarial_temperature 0.7534738877800157 +798 25 optimizer.lr 0.009086327543217991 +798 25 negative_sampler.num_negs_per_pos 19.0 +798 25 training.batch_size 2.0 +798 26 model.embedding_dim 2.0 +798 26 model.scoring_fct_norm 2.0 +798 26 loss.margin 26.612712415519695 +798 26 loss.adversarial_temperature 0.3223009955055116 +798 26 optimizer.lr 0.05110710064283006 +798 26 negative_sampler.num_negs_per_pos 69.0 +798 26 training.batch_size 0.0 +798 27 model.embedding_dim 2.0 +798 27 model.scoring_fct_norm 2.0 +798 27 loss.margin 25.019324480647594 +798 27 loss.adversarial_temperature 0.7940591440035176 +798 27 optimizer.lr 0.030588925934995827 +798 27 negative_sampler.num_negs_per_pos 45.0 +798 27 training.batch_size 0.0 +798 28 model.embedding_dim 2.0 +798 28 model.scoring_fct_norm 1.0 +798 28 loss.margin 21.608946517751477 +798 28 loss.adversarial_temperature 0.5035361574622292 +798 28 optimizer.lr 0.026789973164756994 +798 28 negative_sampler.num_negs_per_pos 51.0 +798 28 training.batch_size 0.0 +798 29 model.embedding_dim 1.0 +798 29 model.scoring_fct_norm 2.0 +798 29 loss.margin 17.51133038152929 +798 29 loss.adversarial_temperature 0.29679662293404563 +798 29 optimizer.lr 0.001161615079827962 +798 29 negative_sampler.num_negs_per_pos 73.0 +798 29 training.batch_size 1.0 +798 30 model.embedding_dim 2.0 +798 30 model.scoring_fct_norm 2.0 +798 30 loss.margin 14.394960059365756 +798 30 loss.adversarial_temperature 0.3034809648030886 +798 30 optimizer.lr 0.0603608970877514 +798 30 negative_sampler.num_negs_per_pos 14.0 +798 30 training.batch_size 1.0 +798 31 model.embedding_dim 2.0 +798 31 model.scoring_fct_norm 1.0 +798 31 loss.margin 28.937359547726114 +798 31 loss.adversarial_temperature 0.5536781492837201 +798 31 optimizer.lr 0.032787408162655185 +798 31 negative_sampler.num_negs_per_pos 31.0 +798 31 training.batch_size 0.0 +798 32 model.embedding_dim 1.0 +798 32 model.scoring_fct_norm 1.0 +798 32 loss.margin 25.695825437534815 +798 32 loss.adversarial_temperature 0.26248320718359003 +798 32 optimizer.lr 0.008114986344045162 +798 32 negative_sampler.num_negs_per_pos 11.0 +798 32 training.batch_size 2.0 +798 33 model.embedding_dim 0.0 +798 33 model.scoring_fct_norm 1.0 +798 33 loss.margin 20.507225663067608 +798 33 loss.adversarial_temperature 0.22661658263722093 +798 33 optimizer.lr 0.0013509623498345006 +798 33 negative_sampler.num_negs_per_pos 86.0 +798 33 training.batch_size 0.0 +798 34 model.embedding_dim 2.0 +798 34 model.scoring_fct_norm 2.0 +798 34 loss.margin 2.1930364016797492 +798 34 loss.adversarial_temperature 0.7425704718310147 +798 34 optimizer.lr 0.006959150158639705 +798 34 negative_sampler.num_negs_per_pos 32.0 +798 34 training.batch_size 1.0 +798 35 model.embedding_dim 2.0 +798 35 model.scoring_fct_norm 1.0 +798 35 loss.margin 6.603974865042725 +798 35 loss.adversarial_temperature 0.8926187397340102 +798 35 optimizer.lr 0.026598209803015558 +798 35 negative_sampler.num_negs_per_pos 71.0 +798 35 training.batch_size 1.0 +798 36 model.embedding_dim 0.0 +798 36 model.scoring_fct_norm 1.0 +798 36 loss.margin 24.45495826350835 +798 36 loss.adversarial_temperature 0.8256636862618685 +798 36 optimizer.lr 0.006710565091942002 +798 36 negative_sampler.num_negs_per_pos 75.0 +798 36 training.batch_size 0.0 +798 37 model.embedding_dim 0.0 +798 37 model.scoring_fct_norm 1.0 +798 37 loss.margin 18.642051009415734 +798 37 loss.adversarial_temperature 0.5982195660966113 +798 37 optimizer.lr 0.006144695409413477 +798 37 negative_sampler.num_negs_per_pos 59.0 +798 37 training.batch_size 1.0 +798 38 model.embedding_dim 0.0 +798 38 model.scoring_fct_norm 2.0 +798 38 loss.margin 16.361802596685422 +798 38 loss.adversarial_temperature 0.4060312018929046 +798 38 optimizer.lr 0.002558149519737553 +798 38 negative_sampler.num_negs_per_pos 83.0 +798 38 training.batch_size 2.0 +798 39 model.embedding_dim 1.0 +798 39 model.scoring_fct_norm 1.0 +798 39 loss.margin 15.412269844775926 +798 39 loss.adversarial_temperature 0.1925600845013561 +798 39 optimizer.lr 0.0026022401554210584 +798 39 negative_sampler.num_negs_per_pos 97.0 +798 39 training.batch_size 2.0 +798 40 model.embedding_dim 1.0 +798 40 model.scoring_fct_norm 2.0 +798 40 loss.margin 2.9911528376712107 +798 40 loss.adversarial_temperature 0.5163027036991048 +798 40 optimizer.lr 0.02325613412813909 +798 40 negative_sampler.num_negs_per_pos 83.0 +798 40 training.batch_size 0.0 +798 41 model.embedding_dim 2.0 +798 41 model.scoring_fct_norm 1.0 +798 41 loss.margin 18.668587907584733 +798 41 loss.adversarial_temperature 0.47588671492017265 +798 41 optimizer.lr 0.007198005955635944 +798 41 negative_sampler.num_negs_per_pos 2.0 +798 41 training.batch_size 1.0 +798 42 model.embedding_dim 0.0 +798 42 model.scoring_fct_norm 2.0 +798 42 loss.margin 21.9434456655149 +798 42 loss.adversarial_temperature 0.4868271363326313 +798 42 optimizer.lr 0.010378066397871547 +798 42 negative_sampler.num_negs_per_pos 89.0 +798 42 training.batch_size 2.0 +798 43 model.embedding_dim 0.0 +798 43 model.scoring_fct_norm 1.0 +798 43 loss.margin 4.491930159216469 +798 43 loss.adversarial_temperature 0.5736127807800498 +798 43 optimizer.lr 0.0045920276346610766 +798 43 negative_sampler.num_negs_per_pos 12.0 +798 43 training.batch_size 2.0 +798 44 model.embedding_dim 1.0 +798 44 model.scoring_fct_norm 2.0 +798 44 loss.margin 26.354277797777964 +798 44 loss.adversarial_temperature 0.4981006036657584 +798 44 optimizer.lr 0.007924034693025675 +798 44 negative_sampler.num_negs_per_pos 21.0 +798 44 training.batch_size 2.0 +798 45 model.embedding_dim 0.0 +798 45 model.scoring_fct_norm 1.0 +798 45 loss.margin 23.30876322697583 +798 45 loss.adversarial_temperature 0.8198822653822987 +798 45 optimizer.lr 0.02352381422839476 +798 45 negative_sampler.num_negs_per_pos 77.0 +798 45 training.batch_size 1.0 +798 46 model.embedding_dim 1.0 +798 46 model.scoring_fct_norm 2.0 +798 46 loss.margin 21.754974421554785 +798 46 loss.adversarial_temperature 0.5326347725394953 +798 46 optimizer.lr 0.0043386928741829265 +798 46 negative_sampler.num_negs_per_pos 45.0 +798 46 training.batch_size 2.0 +798 47 model.embedding_dim 0.0 +798 47 model.scoring_fct_norm 2.0 +798 47 loss.margin 18.87832079544546 +798 47 loss.adversarial_temperature 0.12356980575773986 +798 47 optimizer.lr 0.002595039436303664 +798 47 negative_sampler.num_negs_per_pos 47.0 +798 47 training.batch_size 1.0 +798 48 model.embedding_dim 2.0 +798 48 model.scoring_fct_norm 2.0 +798 48 loss.margin 3.635146990450451 +798 48 loss.adversarial_temperature 0.7573713846225792 +798 48 optimizer.lr 0.04167416318216598 +798 48 negative_sampler.num_negs_per_pos 70.0 +798 48 training.batch_size 0.0 +798 49 model.embedding_dim 0.0 +798 49 model.scoring_fct_norm 1.0 +798 49 loss.margin 18.549826264066702 +798 49 loss.adversarial_temperature 0.6438667109775065 +798 49 optimizer.lr 0.02704285816483514 +798 49 negative_sampler.num_negs_per_pos 52.0 +798 49 training.batch_size 1.0 +798 50 model.embedding_dim 0.0 +798 50 model.scoring_fct_norm 1.0 +798 50 loss.margin 13.6378458142396 +798 50 loss.adversarial_temperature 0.3441780791526199 +798 50 optimizer.lr 0.0016298097795920388 +798 50 negative_sampler.num_negs_per_pos 87.0 +798 50 training.batch_size 2.0 +798 51 model.embedding_dim 0.0 +798 51 model.scoring_fct_norm 1.0 +798 51 loss.margin 17.80174127304558 +798 51 loss.adversarial_temperature 0.3320527176885818 +798 51 optimizer.lr 0.0374251291987397 +798 51 negative_sampler.num_negs_per_pos 63.0 +798 51 training.batch_size 2.0 +798 52 model.embedding_dim 2.0 +798 52 model.scoring_fct_norm 2.0 +798 52 loss.margin 3.569143757247623 +798 52 loss.adversarial_temperature 0.262548672883304 +798 52 optimizer.lr 0.0039601177165267685 +798 52 negative_sampler.num_negs_per_pos 59.0 +798 52 training.batch_size 0.0 +798 53 model.embedding_dim 0.0 +798 53 model.scoring_fct_norm 2.0 +798 53 loss.margin 15.318892538985798 +798 53 loss.adversarial_temperature 0.8753133260181869 +798 53 optimizer.lr 0.007962202421308127 +798 53 negative_sampler.num_negs_per_pos 55.0 +798 53 training.batch_size 0.0 +798 54 model.embedding_dim 1.0 +798 54 model.scoring_fct_norm 2.0 +798 54 loss.margin 20.324401354045744 +798 54 loss.adversarial_temperature 0.3412754716380694 +798 54 optimizer.lr 0.01868007924099361 +798 54 negative_sampler.num_negs_per_pos 27.0 +798 54 training.batch_size 0.0 +798 55 model.embedding_dim 0.0 +798 55 model.scoring_fct_norm 1.0 +798 55 loss.margin 4.659481740871016 +798 55 loss.adversarial_temperature 0.515045798155219 +798 55 optimizer.lr 0.008337195130058716 +798 55 negative_sampler.num_negs_per_pos 74.0 +798 55 training.batch_size 0.0 +798 56 model.embedding_dim 0.0 +798 56 model.scoring_fct_norm 2.0 +798 56 loss.margin 15.267011891627384 +798 56 loss.adversarial_temperature 0.31235473629153676 +798 56 optimizer.lr 0.005521589540117911 +798 56 negative_sampler.num_negs_per_pos 87.0 +798 56 training.batch_size 0.0 +798 57 model.embedding_dim 0.0 +798 57 model.scoring_fct_norm 1.0 +798 57 loss.margin 4.084253876738307 +798 57 loss.adversarial_temperature 0.2880680046946936 +798 57 optimizer.lr 0.006952270416108141 +798 57 negative_sampler.num_negs_per_pos 53.0 +798 57 training.batch_size 0.0 +798 58 model.embedding_dim 1.0 +798 58 model.scoring_fct_norm 2.0 +798 58 loss.margin 25.487867040483206 +798 58 loss.adversarial_temperature 0.6979995639477082 +798 58 optimizer.lr 0.029236426810976702 +798 58 negative_sampler.num_negs_per_pos 43.0 +798 58 training.batch_size 2.0 +798 59 model.embedding_dim 2.0 +798 59 model.scoring_fct_norm 1.0 +798 59 loss.margin 17.27847115415718 +798 59 loss.adversarial_temperature 0.7178593597978841 +798 59 optimizer.lr 0.06452740035425558 +798 59 negative_sampler.num_negs_per_pos 57.0 +798 59 training.batch_size 0.0 +798 60 model.embedding_dim 0.0 +798 60 model.scoring_fct_norm 1.0 +798 60 loss.margin 21.30845788503996 +798 60 loss.adversarial_temperature 0.15699111841493713 +798 60 optimizer.lr 0.0112818191042405 +798 60 negative_sampler.num_negs_per_pos 75.0 +798 60 training.batch_size 0.0 +798 61 model.embedding_dim 1.0 +798 61 model.scoring_fct_norm 1.0 +798 61 loss.margin 18.903818834908634 +798 61 loss.adversarial_temperature 0.9198779570160376 +798 61 optimizer.lr 0.0028336804651300372 +798 61 negative_sampler.num_negs_per_pos 22.0 +798 61 training.batch_size 0.0 +798 62 model.embedding_dim 0.0 +798 62 model.scoring_fct_norm 1.0 +798 62 loss.margin 25.43701404970191 +798 62 loss.adversarial_temperature 0.9979941015372485 +798 62 optimizer.lr 0.03130668860900941 +798 62 negative_sampler.num_negs_per_pos 78.0 +798 62 training.batch_size 0.0 +798 63 model.embedding_dim 1.0 +798 63 model.scoring_fct_norm 1.0 +798 63 loss.margin 10.600599442398922 +798 63 loss.adversarial_temperature 0.43238000283558303 +798 63 optimizer.lr 0.05129707148533214 +798 63 negative_sampler.num_negs_per_pos 30.0 +798 63 training.batch_size 0.0 +798 64 model.embedding_dim 0.0 +798 64 model.scoring_fct_norm 2.0 +798 64 loss.margin 12.607273107716818 +798 64 loss.adversarial_temperature 0.5408997930499312 +798 64 optimizer.lr 0.008598880862879625 +798 64 negative_sampler.num_negs_per_pos 64.0 +798 64 training.batch_size 0.0 +798 65 model.embedding_dim 2.0 +798 65 model.scoring_fct_norm 1.0 +798 65 loss.margin 26.74940897306672 +798 65 loss.adversarial_temperature 0.47389974129175866 +798 65 optimizer.lr 0.00833366221533752 +798 65 negative_sampler.num_negs_per_pos 0.0 +798 65 training.batch_size 1.0 +798 66 model.embedding_dim 1.0 +798 66 model.scoring_fct_norm 2.0 +798 66 loss.margin 19.99645824745581 +798 66 loss.adversarial_temperature 0.16237053813431374 +798 66 optimizer.lr 0.0011393544198848196 +798 66 negative_sampler.num_negs_per_pos 20.0 +798 66 training.batch_size 1.0 +798 67 model.embedding_dim 1.0 +798 67 model.scoring_fct_norm 2.0 +798 67 loss.margin 25.279449875271983 +798 67 loss.adversarial_temperature 0.2256316498311523 +798 67 optimizer.lr 0.08399895371876609 +798 67 negative_sampler.num_negs_per_pos 24.0 +798 67 training.batch_size 2.0 +798 68 model.embedding_dim 2.0 +798 68 model.scoring_fct_norm 1.0 +798 68 loss.margin 5.51612378043217 +798 68 loss.adversarial_temperature 0.7720872943335902 +798 68 optimizer.lr 0.003425593024514708 +798 68 negative_sampler.num_negs_per_pos 83.0 +798 68 training.batch_size 1.0 +798 69 model.embedding_dim 0.0 +798 69 model.scoring_fct_norm 2.0 +798 69 loss.margin 1.8591105598761042 +798 69 loss.adversarial_temperature 0.5247505379917748 +798 69 optimizer.lr 0.00830356836265456 +798 69 negative_sampler.num_negs_per_pos 54.0 +798 69 training.batch_size 2.0 +798 1 dataset """fb15k237""" +798 1 model """transe""" +798 1 loss """nssa""" +798 1 regularizer """no""" +798 1 optimizer """adam""" +798 1 training_loop """owa""" +798 1 negative_sampler """basic""" +798 1 evaluator """rankbased""" +798 2 dataset """fb15k237""" +798 2 model """transe""" +798 2 loss """nssa""" +798 2 regularizer """no""" +798 2 optimizer """adam""" +798 2 training_loop """owa""" +798 2 negative_sampler """basic""" +798 2 evaluator """rankbased""" +798 3 dataset """fb15k237""" +798 3 model """transe""" +798 3 loss """nssa""" +798 3 regularizer """no""" +798 3 optimizer """adam""" +798 3 training_loop """owa""" +798 3 negative_sampler """basic""" +798 3 evaluator """rankbased""" +798 4 dataset """fb15k237""" +798 4 model """transe""" +798 4 loss """nssa""" +798 4 regularizer """no""" +798 4 optimizer """adam""" +798 4 training_loop """owa""" +798 4 negative_sampler """basic""" +798 4 evaluator """rankbased""" +798 5 dataset """fb15k237""" +798 5 model """transe""" +798 5 loss """nssa""" +798 5 regularizer """no""" +798 5 optimizer """adam""" +798 5 training_loop """owa""" +798 5 negative_sampler """basic""" +798 5 evaluator """rankbased""" +798 6 dataset """fb15k237""" +798 6 model """transe""" +798 6 loss """nssa""" +798 6 regularizer """no""" +798 6 optimizer """adam""" +798 6 training_loop """owa""" +798 6 negative_sampler """basic""" +798 6 evaluator """rankbased""" +798 7 dataset """fb15k237""" +798 7 model """transe""" +798 7 loss """nssa""" +798 7 regularizer """no""" +798 7 optimizer """adam""" +798 7 training_loop """owa""" +798 7 negative_sampler """basic""" +798 7 evaluator """rankbased""" +798 8 dataset """fb15k237""" +798 8 model """transe""" +798 8 loss """nssa""" +798 8 regularizer """no""" +798 8 optimizer """adam""" +798 8 training_loop """owa""" +798 8 negative_sampler """basic""" +798 8 evaluator """rankbased""" +798 9 dataset """fb15k237""" +798 9 model """transe""" +798 9 loss """nssa""" +798 9 regularizer """no""" +798 9 optimizer """adam""" +798 9 training_loop """owa""" +798 9 negative_sampler """basic""" +798 9 evaluator """rankbased""" +798 10 dataset """fb15k237""" +798 10 model """transe""" +798 10 loss """nssa""" +798 10 regularizer """no""" +798 10 optimizer """adam""" +798 10 training_loop """owa""" +798 10 negative_sampler """basic""" +798 10 evaluator """rankbased""" +798 11 dataset """fb15k237""" +798 11 model """transe""" +798 11 loss """nssa""" +798 11 regularizer """no""" +798 11 optimizer """adam""" +798 11 training_loop """owa""" +798 11 negative_sampler """basic""" +798 11 evaluator """rankbased""" +798 12 dataset """fb15k237""" +798 12 model """transe""" +798 12 loss """nssa""" +798 12 regularizer """no""" +798 12 optimizer """adam""" +798 12 training_loop """owa""" +798 12 negative_sampler """basic""" +798 12 evaluator """rankbased""" +798 13 dataset """fb15k237""" +798 13 model """transe""" +798 13 loss """nssa""" +798 13 regularizer """no""" +798 13 optimizer """adam""" +798 13 training_loop """owa""" +798 13 negative_sampler """basic""" +798 13 evaluator """rankbased""" +798 14 dataset """fb15k237""" +798 14 model """transe""" +798 14 loss """nssa""" +798 14 regularizer """no""" +798 14 optimizer """adam""" +798 14 training_loop """owa""" +798 14 negative_sampler """basic""" +798 14 evaluator """rankbased""" +798 15 dataset """fb15k237""" +798 15 model """transe""" +798 15 loss """nssa""" +798 15 regularizer """no""" +798 15 optimizer """adam""" +798 15 training_loop """owa""" +798 15 negative_sampler """basic""" +798 15 evaluator """rankbased""" +798 16 dataset """fb15k237""" +798 16 model """transe""" +798 16 loss """nssa""" +798 16 regularizer """no""" +798 16 optimizer """adam""" +798 16 training_loop """owa""" +798 16 negative_sampler """basic""" +798 16 evaluator """rankbased""" +798 17 dataset """fb15k237""" +798 17 model """transe""" +798 17 loss """nssa""" +798 17 regularizer """no""" +798 17 optimizer """adam""" +798 17 training_loop """owa""" +798 17 negative_sampler """basic""" +798 17 evaluator """rankbased""" +798 18 dataset """fb15k237""" +798 18 model """transe""" +798 18 loss """nssa""" +798 18 regularizer """no""" +798 18 optimizer """adam""" +798 18 training_loop """owa""" +798 18 negative_sampler """basic""" +798 18 evaluator """rankbased""" +798 19 dataset """fb15k237""" +798 19 model """transe""" +798 19 loss """nssa""" +798 19 regularizer """no""" +798 19 optimizer """adam""" +798 19 training_loop """owa""" +798 19 negative_sampler """basic""" +798 19 evaluator """rankbased""" +798 20 dataset """fb15k237""" +798 20 model """transe""" +798 20 loss """nssa""" +798 20 regularizer """no""" +798 20 optimizer """adam""" +798 20 training_loop """owa""" +798 20 negative_sampler """basic""" +798 20 evaluator """rankbased""" +798 21 dataset """fb15k237""" +798 21 model """transe""" +798 21 loss """nssa""" +798 21 regularizer """no""" +798 21 optimizer """adam""" +798 21 training_loop """owa""" +798 21 negative_sampler """basic""" +798 21 evaluator """rankbased""" +798 22 dataset """fb15k237""" +798 22 model """transe""" +798 22 loss """nssa""" +798 22 regularizer """no""" +798 22 optimizer """adam""" +798 22 training_loop """owa""" +798 22 negative_sampler """basic""" +798 22 evaluator """rankbased""" +798 23 dataset """fb15k237""" +798 23 model """transe""" +798 23 loss """nssa""" +798 23 regularizer """no""" +798 23 optimizer """adam""" +798 23 training_loop """owa""" +798 23 negative_sampler """basic""" +798 23 evaluator """rankbased""" +798 24 dataset """fb15k237""" +798 24 model """transe""" +798 24 loss """nssa""" +798 24 regularizer """no""" +798 24 optimizer """adam""" +798 24 training_loop """owa""" +798 24 negative_sampler """basic""" +798 24 evaluator """rankbased""" +798 25 dataset """fb15k237""" +798 25 model """transe""" +798 25 loss """nssa""" +798 25 regularizer """no""" +798 25 optimizer """adam""" +798 25 training_loop """owa""" +798 25 negative_sampler """basic""" +798 25 evaluator """rankbased""" +798 26 dataset """fb15k237""" +798 26 model """transe""" +798 26 loss """nssa""" +798 26 regularizer """no""" +798 26 optimizer """adam""" +798 26 training_loop """owa""" +798 26 negative_sampler """basic""" +798 26 evaluator """rankbased""" +798 27 dataset """fb15k237""" +798 27 model """transe""" +798 27 loss """nssa""" +798 27 regularizer """no""" +798 27 optimizer """adam""" +798 27 training_loop """owa""" +798 27 negative_sampler """basic""" +798 27 evaluator """rankbased""" +798 28 dataset """fb15k237""" +798 28 model """transe""" +798 28 loss """nssa""" +798 28 regularizer """no""" +798 28 optimizer """adam""" +798 28 training_loop """owa""" +798 28 negative_sampler """basic""" +798 28 evaluator """rankbased""" +798 29 dataset """fb15k237""" +798 29 model """transe""" +798 29 loss """nssa""" +798 29 regularizer """no""" +798 29 optimizer """adam""" +798 29 training_loop """owa""" +798 29 negative_sampler """basic""" +798 29 evaluator """rankbased""" +798 30 dataset """fb15k237""" +798 30 model """transe""" +798 30 loss """nssa""" +798 30 regularizer """no""" +798 30 optimizer """adam""" +798 30 training_loop """owa""" +798 30 negative_sampler """basic""" +798 30 evaluator """rankbased""" +798 31 dataset """fb15k237""" +798 31 model """transe""" +798 31 loss """nssa""" +798 31 regularizer """no""" +798 31 optimizer """adam""" +798 31 training_loop """owa""" +798 31 negative_sampler """basic""" +798 31 evaluator """rankbased""" +798 32 dataset """fb15k237""" +798 32 model """transe""" +798 32 loss """nssa""" +798 32 regularizer """no""" +798 32 optimizer """adam""" +798 32 training_loop """owa""" +798 32 negative_sampler """basic""" +798 32 evaluator """rankbased""" +798 33 dataset """fb15k237""" +798 33 model """transe""" +798 33 loss """nssa""" +798 33 regularizer """no""" +798 33 optimizer """adam""" +798 33 training_loop """owa""" +798 33 negative_sampler """basic""" +798 33 evaluator """rankbased""" +798 34 dataset """fb15k237""" +798 34 model """transe""" +798 34 loss """nssa""" +798 34 regularizer """no""" +798 34 optimizer """adam""" +798 34 training_loop """owa""" +798 34 negative_sampler """basic""" +798 34 evaluator """rankbased""" +798 35 dataset """fb15k237""" +798 35 model """transe""" +798 35 loss """nssa""" +798 35 regularizer """no""" +798 35 optimizer """adam""" +798 35 training_loop """owa""" +798 35 negative_sampler """basic""" +798 35 evaluator """rankbased""" +798 36 dataset """fb15k237""" +798 36 model """transe""" +798 36 loss """nssa""" +798 36 regularizer """no""" +798 36 optimizer """adam""" +798 36 training_loop """owa""" +798 36 negative_sampler """basic""" +798 36 evaluator """rankbased""" +798 37 dataset """fb15k237""" +798 37 model """transe""" +798 37 loss """nssa""" +798 37 regularizer """no""" +798 37 optimizer """adam""" +798 37 training_loop """owa""" +798 37 negative_sampler """basic""" +798 37 evaluator """rankbased""" +798 38 dataset """fb15k237""" +798 38 model """transe""" +798 38 loss """nssa""" +798 38 regularizer """no""" +798 38 optimizer """adam""" +798 38 training_loop """owa""" +798 38 negative_sampler """basic""" +798 38 evaluator """rankbased""" +798 39 dataset """fb15k237""" +798 39 model """transe""" +798 39 loss """nssa""" +798 39 regularizer """no""" +798 39 optimizer """adam""" +798 39 training_loop """owa""" +798 39 negative_sampler """basic""" +798 39 evaluator """rankbased""" +798 40 dataset """fb15k237""" +798 40 model """transe""" +798 40 loss """nssa""" +798 40 regularizer """no""" +798 40 optimizer """adam""" +798 40 training_loop """owa""" +798 40 negative_sampler """basic""" +798 40 evaluator """rankbased""" +798 41 dataset """fb15k237""" +798 41 model """transe""" +798 41 loss """nssa""" +798 41 regularizer """no""" +798 41 optimizer """adam""" +798 41 training_loop """owa""" +798 41 negative_sampler """basic""" +798 41 evaluator """rankbased""" +798 42 dataset """fb15k237""" +798 42 model """transe""" +798 42 loss """nssa""" +798 42 regularizer """no""" +798 42 optimizer """adam""" +798 42 training_loop """owa""" +798 42 negative_sampler """basic""" +798 42 evaluator """rankbased""" +798 43 dataset """fb15k237""" +798 43 model """transe""" +798 43 loss """nssa""" +798 43 regularizer """no""" +798 43 optimizer """adam""" +798 43 training_loop """owa""" +798 43 negative_sampler """basic""" +798 43 evaluator """rankbased""" +798 44 dataset """fb15k237""" +798 44 model """transe""" +798 44 loss """nssa""" +798 44 regularizer """no""" +798 44 optimizer """adam""" +798 44 training_loop """owa""" +798 44 negative_sampler """basic""" +798 44 evaluator """rankbased""" +798 45 dataset """fb15k237""" +798 45 model """transe""" +798 45 loss """nssa""" +798 45 regularizer """no""" +798 45 optimizer """adam""" +798 45 training_loop """owa""" +798 45 negative_sampler """basic""" +798 45 evaluator """rankbased""" +798 46 dataset """fb15k237""" +798 46 model """transe""" +798 46 loss """nssa""" +798 46 regularizer """no""" +798 46 optimizer """adam""" +798 46 training_loop """owa""" +798 46 negative_sampler """basic""" +798 46 evaluator """rankbased""" +798 47 dataset """fb15k237""" +798 47 model """transe""" +798 47 loss """nssa""" +798 47 regularizer """no""" +798 47 optimizer """adam""" +798 47 training_loop """owa""" +798 47 negative_sampler """basic""" +798 47 evaluator """rankbased""" +798 48 dataset """fb15k237""" +798 48 model """transe""" +798 48 loss """nssa""" +798 48 regularizer """no""" +798 48 optimizer """adam""" +798 48 training_loop """owa""" +798 48 negative_sampler """basic""" +798 48 evaluator """rankbased""" +798 49 dataset """fb15k237""" +798 49 model """transe""" +798 49 loss """nssa""" +798 49 regularizer """no""" +798 49 optimizer """adam""" +798 49 training_loop """owa""" +798 49 negative_sampler """basic""" +798 49 evaluator """rankbased""" +798 50 dataset """fb15k237""" +798 50 model """transe""" +798 50 loss """nssa""" +798 50 regularizer """no""" +798 50 optimizer """adam""" +798 50 training_loop """owa""" +798 50 negative_sampler """basic""" +798 50 evaluator """rankbased""" +798 51 dataset """fb15k237""" +798 51 model """transe""" +798 51 loss """nssa""" +798 51 regularizer """no""" +798 51 optimizer """adam""" +798 51 training_loop """owa""" +798 51 negative_sampler """basic""" +798 51 evaluator """rankbased""" +798 52 dataset """fb15k237""" +798 52 model """transe""" +798 52 loss """nssa""" +798 52 regularizer """no""" +798 52 optimizer """adam""" +798 52 training_loop """owa""" +798 52 negative_sampler """basic""" +798 52 evaluator """rankbased""" +798 53 dataset """fb15k237""" +798 53 model """transe""" +798 53 loss """nssa""" +798 53 regularizer """no""" +798 53 optimizer """adam""" +798 53 training_loop """owa""" +798 53 negative_sampler """basic""" +798 53 evaluator """rankbased""" +798 54 dataset """fb15k237""" +798 54 model """transe""" +798 54 loss """nssa""" +798 54 regularizer """no""" +798 54 optimizer """adam""" +798 54 training_loop """owa""" +798 54 negative_sampler """basic""" +798 54 evaluator """rankbased""" +798 55 dataset """fb15k237""" +798 55 model """transe""" +798 55 loss """nssa""" +798 55 regularizer """no""" +798 55 optimizer """adam""" +798 55 training_loop """owa""" +798 55 negative_sampler """basic""" +798 55 evaluator """rankbased""" +798 56 dataset """fb15k237""" +798 56 model """transe""" +798 56 loss """nssa""" +798 56 regularizer """no""" +798 56 optimizer """adam""" +798 56 training_loop """owa""" +798 56 negative_sampler """basic""" +798 56 evaluator """rankbased""" +798 57 dataset """fb15k237""" +798 57 model """transe""" +798 57 loss """nssa""" +798 57 regularizer """no""" +798 57 optimizer """adam""" +798 57 training_loop """owa""" +798 57 negative_sampler """basic""" +798 57 evaluator """rankbased""" +798 58 dataset """fb15k237""" +798 58 model """transe""" +798 58 loss """nssa""" +798 58 regularizer """no""" +798 58 optimizer """adam""" +798 58 training_loop """owa""" +798 58 negative_sampler """basic""" +798 58 evaluator """rankbased""" +798 59 dataset """fb15k237""" +798 59 model """transe""" +798 59 loss """nssa""" +798 59 regularizer """no""" +798 59 optimizer """adam""" +798 59 training_loop """owa""" +798 59 negative_sampler """basic""" +798 59 evaluator """rankbased""" +798 60 dataset """fb15k237""" +798 60 model """transe""" +798 60 loss """nssa""" +798 60 regularizer """no""" +798 60 optimizer """adam""" +798 60 training_loop """owa""" +798 60 negative_sampler """basic""" +798 60 evaluator """rankbased""" +798 61 dataset """fb15k237""" +798 61 model """transe""" +798 61 loss """nssa""" +798 61 regularizer """no""" +798 61 optimizer """adam""" +798 61 training_loop """owa""" +798 61 negative_sampler """basic""" +798 61 evaluator """rankbased""" +798 62 dataset """fb15k237""" +798 62 model """transe""" +798 62 loss """nssa""" +798 62 regularizer """no""" +798 62 optimizer """adam""" +798 62 training_loop """owa""" +798 62 negative_sampler """basic""" +798 62 evaluator """rankbased""" +798 63 dataset """fb15k237""" +798 63 model """transe""" +798 63 loss """nssa""" +798 63 regularizer """no""" +798 63 optimizer """adam""" +798 63 training_loop """owa""" +798 63 negative_sampler """basic""" +798 63 evaluator """rankbased""" +798 64 dataset """fb15k237""" +798 64 model """transe""" +798 64 loss """nssa""" +798 64 regularizer """no""" +798 64 optimizer """adam""" +798 64 training_loop """owa""" +798 64 negative_sampler """basic""" +798 64 evaluator """rankbased""" +798 65 dataset """fb15k237""" +798 65 model """transe""" +798 65 loss """nssa""" +798 65 regularizer """no""" +798 65 optimizer """adam""" +798 65 training_loop """owa""" +798 65 negative_sampler """basic""" +798 65 evaluator """rankbased""" +798 66 dataset """fb15k237""" +798 66 model """transe""" +798 66 loss """nssa""" +798 66 regularizer """no""" +798 66 optimizer """adam""" +798 66 training_loop """owa""" +798 66 negative_sampler """basic""" +798 66 evaluator """rankbased""" +798 67 dataset """fb15k237""" +798 67 model """transe""" +798 67 loss """nssa""" +798 67 regularizer """no""" +798 67 optimizer """adam""" +798 67 training_loop """owa""" +798 67 negative_sampler """basic""" +798 67 evaluator """rankbased""" +798 68 dataset """fb15k237""" +798 68 model """transe""" +798 68 loss """nssa""" +798 68 regularizer """no""" +798 68 optimizer """adam""" +798 68 training_loop """owa""" +798 68 negative_sampler """basic""" +798 68 evaluator """rankbased""" +798 69 dataset """fb15k237""" +798 69 model """transe""" +798 69 loss """nssa""" +798 69 regularizer """no""" +798 69 optimizer """adam""" +798 69 training_loop """owa""" +798 69 negative_sampler """basic""" +798 69 evaluator """rankbased""" +799 1 model.embedding_dim 0.0 +799 1 model.scoring_fct_norm 1.0 +799 1 optimizer.lr 0.07096088173636696 +799 1 training.batch_size 2.0 +799 1 training.label_smoothing 0.07840261278723511 +799 2 model.embedding_dim 0.0 +799 2 model.scoring_fct_norm 2.0 +799 2 optimizer.lr 0.060365580079893 +799 2 training.batch_size 2.0 +799 2 training.label_smoothing 0.0014963898714073216 +799 3 model.embedding_dim 2.0 +799 3 model.scoring_fct_norm 1.0 +799 3 optimizer.lr 0.003942848225963993 +799 3 training.batch_size 0.0 +799 3 training.label_smoothing 0.11141978103187071 +799 4 model.embedding_dim 0.0 +799 4 model.scoring_fct_norm 2.0 +799 4 optimizer.lr 0.03027875367998259 +799 4 training.batch_size 0.0 +799 4 training.label_smoothing 0.03652397551830769 +799 5 model.embedding_dim 0.0 +799 5 model.scoring_fct_norm 2.0 +799 5 optimizer.lr 0.002077550018729659 +799 5 training.batch_size 0.0 +799 5 training.label_smoothing 0.0016759211853020408 +799 6 model.embedding_dim 2.0 +799 6 model.scoring_fct_norm 2.0 +799 6 optimizer.lr 0.0033475115643519546 +799 6 training.batch_size 0.0 +799 6 training.label_smoothing 0.1225845651729152 +799 7 model.embedding_dim 2.0 +799 7 model.scoring_fct_norm 1.0 +799 7 optimizer.lr 0.02846404042521643 +799 7 training.batch_size 1.0 +799 7 training.label_smoothing 0.09551780111282077 +799 8 model.embedding_dim 0.0 +799 8 model.scoring_fct_norm 2.0 +799 8 optimizer.lr 0.010471310016691193 +799 8 training.batch_size 2.0 +799 8 training.label_smoothing 0.047555146935148805 +799 9 model.embedding_dim 1.0 +799 9 model.scoring_fct_norm 1.0 +799 9 optimizer.lr 0.032676761529688014 +799 9 training.batch_size 1.0 +799 9 training.label_smoothing 0.004797722538422334 +799 10 model.embedding_dim 1.0 +799 10 model.scoring_fct_norm 2.0 +799 10 optimizer.lr 0.06623843065377495 +799 10 training.batch_size 1.0 +799 10 training.label_smoothing 0.9804448439103293 +799 11 model.embedding_dim 0.0 +799 11 model.scoring_fct_norm 2.0 +799 11 optimizer.lr 0.07645916807707699 +799 11 training.batch_size 2.0 +799 11 training.label_smoothing 0.0212087202435948 +799 12 model.embedding_dim 0.0 +799 12 model.scoring_fct_norm 2.0 +799 12 optimizer.lr 0.011361572229523557 +799 12 training.batch_size 1.0 +799 12 training.label_smoothing 0.0011089323705974635 +799 13 model.embedding_dim 0.0 +799 13 model.scoring_fct_norm 2.0 +799 13 optimizer.lr 0.0034740176451214123 +799 13 training.batch_size 0.0 +799 13 training.label_smoothing 0.4072251614828615 +799 14 model.embedding_dim 0.0 +799 14 model.scoring_fct_norm 2.0 +799 14 optimizer.lr 0.020690391707091978 +799 14 training.batch_size 1.0 +799 14 training.label_smoothing 0.14457026691305686 +799 15 model.embedding_dim 1.0 +799 15 model.scoring_fct_norm 1.0 +799 15 optimizer.lr 0.05494399794288299 +799 15 training.batch_size 1.0 +799 15 training.label_smoothing 0.5047844362535909 +799 16 model.embedding_dim 0.0 +799 16 model.scoring_fct_norm 2.0 +799 16 optimizer.lr 0.016171561801619865 +799 16 training.batch_size 2.0 +799 16 training.label_smoothing 0.0011761571259087664 +799 17 model.embedding_dim 2.0 +799 17 model.scoring_fct_norm 1.0 +799 17 optimizer.lr 0.02382941308676484 +799 17 training.batch_size 1.0 +799 17 training.label_smoothing 0.2481930515295104 +799 18 model.embedding_dim 2.0 +799 18 model.scoring_fct_norm 1.0 +799 18 optimizer.lr 0.0017673634003615888 +799 18 training.batch_size 2.0 +799 18 training.label_smoothing 0.00996062627876656 +799 1 dataset """fb15k237""" +799 1 model """transe""" +799 1 loss """bceaftersigmoid""" +799 1 regularizer """no""" +799 1 optimizer """adam""" +799 1 training_loop """lcwa""" +799 1 evaluator """rankbased""" +799 2 dataset """fb15k237""" +799 2 model """transe""" +799 2 loss """bceaftersigmoid""" +799 2 regularizer """no""" +799 2 optimizer """adam""" +799 2 training_loop """lcwa""" +799 2 evaluator """rankbased""" +799 3 dataset """fb15k237""" +799 3 model """transe""" +799 3 loss """bceaftersigmoid""" +799 3 regularizer """no""" +799 3 optimizer """adam""" +799 3 training_loop """lcwa""" +799 3 evaluator """rankbased""" +799 4 dataset """fb15k237""" +799 4 model """transe""" +799 4 loss """bceaftersigmoid""" +799 4 regularizer """no""" +799 4 optimizer """adam""" +799 4 training_loop """lcwa""" +799 4 evaluator """rankbased""" +799 5 dataset """fb15k237""" +799 5 model """transe""" +799 5 loss """bceaftersigmoid""" +799 5 regularizer """no""" +799 5 optimizer """adam""" +799 5 training_loop """lcwa""" +799 5 evaluator """rankbased""" +799 6 dataset """fb15k237""" +799 6 model """transe""" +799 6 loss """bceaftersigmoid""" +799 6 regularizer """no""" +799 6 optimizer """adam""" +799 6 training_loop """lcwa""" +799 6 evaluator """rankbased""" +799 7 dataset """fb15k237""" +799 7 model """transe""" +799 7 loss """bceaftersigmoid""" +799 7 regularizer """no""" +799 7 optimizer """adam""" +799 7 training_loop """lcwa""" +799 7 evaluator """rankbased""" +799 8 dataset """fb15k237""" +799 8 model """transe""" +799 8 loss """bceaftersigmoid""" +799 8 regularizer """no""" +799 8 optimizer """adam""" +799 8 training_loop """lcwa""" +799 8 evaluator """rankbased""" +799 9 dataset """fb15k237""" +799 9 model """transe""" +799 9 loss """bceaftersigmoid""" +799 9 regularizer """no""" +799 9 optimizer """adam""" +799 9 training_loop """lcwa""" +799 9 evaluator """rankbased""" +799 10 dataset """fb15k237""" +799 10 model """transe""" +799 10 loss """bceaftersigmoid""" +799 10 regularizer """no""" +799 10 optimizer """adam""" +799 10 training_loop """lcwa""" +799 10 evaluator """rankbased""" +799 11 dataset """fb15k237""" +799 11 model """transe""" +799 11 loss """bceaftersigmoid""" +799 11 regularizer """no""" +799 11 optimizer """adam""" +799 11 training_loop """lcwa""" +799 11 evaluator """rankbased""" +799 12 dataset """fb15k237""" +799 12 model """transe""" +799 12 loss """bceaftersigmoid""" +799 12 regularizer """no""" +799 12 optimizer """adam""" +799 12 training_loop """lcwa""" +799 12 evaluator """rankbased""" +799 13 dataset """fb15k237""" +799 13 model """transe""" +799 13 loss """bceaftersigmoid""" +799 13 regularizer """no""" +799 13 optimizer """adam""" +799 13 training_loop """lcwa""" +799 13 evaluator """rankbased""" +799 14 dataset """fb15k237""" +799 14 model """transe""" +799 14 loss """bceaftersigmoid""" +799 14 regularizer """no""" +799 14 optimizer """adam""" +799 14 training_loop """lcwa""" +799 14 evaluator """rankbased""" +799 15 dataset """fb15k237""" +799 15 model """transe""" +799 15 loss """bceaftersigmoid""" +799 15 regularizer """no""" +799 15 optimizer """adam""" +799 15 training_loop """lcwa""" +799 15 evaluator """rankbased""" +799 16 dataset """fb15k237""" +799 16 model """transe""" +799 16 loss """bceaftersigmoid""" +799 16 regularizer """no""" +799 16 optimizer """adam""" +799 16 training_loop """lcwa""" +799 16 evaluator """rankbased""" +799 17 dataset """fb15k237""" +799 17 model """transe""" +799 17 loss """bceaftersigmoid""" +799 17 regularizer """no""" +799 17 optimizer """adam""" +799 17 training_loop """lcwa""" +799 17 evaluator """rankbased""" +799 18 dataset """fb15k237""" +799 18 model """transe""" +799 18 loss """bceaftersigmoid""" +799 18 regularizer """no""" +799 18 optimizer """adam""" +799 18 training_loop """lcwa""" +799 18 evaluator """rankbased""" +800 1 model.embedding_dim 1.0 +800 1 model.scoring_fct_norm 2.0 +800 1 optimizer.lr 0.003529252389180424 +800 1 training.batch_size 2.0 +800 1 training.label_smoothing 0.19056474068870824 +800 2 model.embedding_dim 1.0 +800 2 model.scoring_fct_norm 1.0 +800 2 optimizer.lr 0.006431784186420906 +800 2 training.batch_size 0.0 +800 2 training.label_smoothing 0.006233699621076187 +800 3 model.embedding_dim 0.0 +800 3 model.scoring_fct_norm 2.0 +800 3 optimizer.lr 0.0012224740838359457 +800 3 training.batch_size 0.0 +800 3 training.label_smoothing 0.08103014055992823 +800 4 model.embedding_dim 2.0 +800 4 model.scoring_fct_norm 2.0 +800 4 optimizer.lr 0.03727130648557559 +800 4 training.batch_size 1.0 +800 4 training.label_smoothing 0.20509627344454326 +800 5 model.embedding_dim 0.0 +800 5 model.scoring_fct_norm 1.0 +800 5 optimizer.lr 0.003882643683009246 +800 5 training.batch_size 0.0 +800 5 training.label_smoothing 0.06190755075206799 +800 6 model.embedding_dim 0.0 +800 6 model.scoring_fct_norm 1.0 +800 6 optimizer.lr 0.05539332670928133 +800 6 training.batch_size 2.0 +800 6 training.label_smoothing 0.21877872289620656 +800 7 model.embedding_dim 2.0 +800 7 model.scoring_fct_norm 2.0 +800 7 optimizer.lr 0.02585272034357958 +800 7 training.batch_size 2.0 +800 7 training.label_smoothing 0.41512051110560655 +800 8 model.embedding_dim 1.0 +800 8 model.scoring_fct_norm 2.0 +800 8 optimizer.lr 0.0513199592940198 +800 8 training.batch_size 1.0 +800 8 training.label_smoothing 0.0077800521958632865 +800 9 model.embedding_dim 2.0 +800 9 model.scoring_fct_norm 1.0 +800 9 optimizer.lr 0.037685963132477164 +800 9 training.batch_size 0.0 +800 9 training.label_smoothing 0.1821979385528264 +800 10 model.embedding_dim 0.0 +800 10 model.scoring_fct_norm 1.0 +800 10 optimizer.lr 0.033704157446593476 +800 10 training.batch_size 1.0 +800 10 training.label_smoothing 0.005974821658714106 +800 11 model.embedding_dim 0.0 +800 11 model.scoring_fct_norm 1.0 +800 11 optimizer.lr 0.0020772677760358734 +800 11 training.batch_size 0.0 +800 11 training.label_smoothing 0.037127830116210625 +800 12 model.embedding_dim 0.0 +800 12 model.scoring_fct_norm 2.0 +800 12 optimizer.lr 0.001771608143595813 +800 12 training.batch_size 0.0 +800 12 training.label_smoothing 0.02053676102011525 +800 13 model.embedding_dim 0.0 +800 13 model.scoring_fct_norm 1.0 +800 13 optimizer.lr 0.08875956204456095 +800 13 training.batch_size 2.0 +800 13 training.label_smoothing 0.0011375744463955038 +800 14 model.embedding_dim 2.0 +800 14 model.scoring_fct_norm 2.0 +800 14 optimizer.lr 0.018784579405564994 +800 14 training.batch_size 1.0 +800 14 training.label_smoothing 0.38087271628331615 +800 15 model.embedding_dim 0.0 +800 15 model.scoring_fct_norm 2.0 +800 15 optimizer.lr 0.0017136290928695915 +800 15 training.batch_size 0.0 +800 15 training.label_smoothing 0.0052055817366723905 +800 1 dataset """fb15k237""" +800 1 model """transe""" +800 1 loss """softplus""" +800 1 regularizer """no""" +800 1 optimizer """adam""" +800 1 training_loop """lcwa""" +800 1 evaluator """rankbased""" +800 2 dataset """fb15k237""" +800 2 model """transe""" +800 2 loss """softplus""" +800 2 regularizer """no""" +800 2 optimizer """adam""" +800 2 training_loop """lcwa""" +800 2 evaluator """rankbased""" +800 3 dataset """fb15k237""" +800 3 model """transe""" +800 3 loss """softplus""" +800 3 regularizer """no""" +800 3 optimizer """adam""" +800 3 training_loop """lcwa""" +800 3 evaluator """rankbased""" +800 4 dataset """fb15k237""" +800 4 model """transe""" +800 4 loss """softplus""" +800 4 regularizer """no""" +800 4 optimizer """adam""" +800 4 training_loop """lcwa""" +800 4 evaluator """rankbased""" +800 5 dataset """fb15k237""" +800 5 model """transe""" +800 5 loss """softplus""" +800 5 regularizer """no""" +800 5 optimizer """adam""" +800 5 training_loop """lcwa""" +800 5 evaluator """rankbased""" +800 6 dataset """fb15k237""" +800 6 model """transe""" +800 6 loss """softplus""" +800 6 regularizer """no""" +800 6 optimizer """adam""" +800 6 training_loop """lcwa""" +800 6 evaluator """rankbased""" +800 7 dataset """fb15k237""" +800 7 model """transe""" +800 7 loss """softplus""" +800 7 regularizer """no""" +800 7 optimizer """adam""" +800 7 training_loop """lcwa""" +800 7 evaluator """rankbased""" +800 8 dataset """fb15k237""" +800 8 model """transe""" +800 8 loss """softplus""" +800 8 regularizer """no""" +800 8 optimizer """adam""" +800 8 training_loop """lcwa""" +800 8 evaluator """rankbased""" +800 9 dataset """fb15k237""" +800 9 model """transe""" +800 9 loss """softplus""" +800 9 regularizer """no""" +800 9 optimizer """adam""" +800 9 training_loop """lcwa""" +800 9 evaluator """rankbased""" +800 10 dataset """fb15k237""" +800 10 model """transe""" +800 10 loss """softplus""" +800 10 regularizer """no""" +800 10 optimizer """adam""" +800 10 training_loop """lcwa""" +800 10 evaluator """rankbased""" +800 11 dataset """fb15k237""" +800 11 model """transe""" +800 11 loss """softplus""" +800 11 regularizer """no""" +800 11 optimizer """adam""" +800 11 training_loop """lcwa""" +800 11 evaluator """rankbased""" +800 12 dataset """fb15k237""" +800 12 model """transe""" +800 12 loss """softplus""" +800 12 regularizer """no""" +800 12 optimizer """adam""" +800 12 training_loop """lcwa""" +800 12 evaluator """rankbased""" +800 13 dataset """fb15k237""" +800 13 model """transe""" +800 13 loss """softplus""" +800 13 regularizer """no""" +800 13 optimizer """adam""" +800 13 training_loop """lcwa""" +800 13 evaluator """rankbased""" +800 14 dataset """fb15k237""" +800 14 model """transe""" +800 14 loss """softplus""" +800 14 regularizer """no""" +800 14 optimizer """adam""" +800 14 training_loop """lcwa""" +800 14 evaluator """rankbased""" +800 15 dataset """fb15k237""" +800 15 model """transe""" +800 15 loss """softplus""" +800 15 regularizer """no""" +800 15 optimizer """adam""" +800 15 training_loop """lcwa""" +800 15 evaluator """rankbased""" +801 1 model.embedding_dim 0.0 +801 1 model.scoring_fct_norm 1.0 +801 1 optimizer.lr 0.06781029356830567 +801 1 training.batch_size 1.0 +801 1 training.label_smoothing 0.9958865170462867 +801 2 model.embedding_dim 1.0 +801 2 model.scoring_fct_norm 1.0 +801 2 optimizer.lr 0.003140196489809229 +801 2 training.batch_size 0.0 +801 2 training.label_smoothing 0.23549159303016753 +801 3 model.embedding_dim 1.0 +801 3 model.scoring_fct_norm 1.0 +801 3 optimizer.lr 0.022369291751466173 +801 3 training.batch_size 0.0 +801 3 training.label_smoothing 0.7709084528599467 +801 4 model.embedding_dim 2.0 +801 4 model.scoring_fct_norm 2.0 +801 4 optimizer.lr 0.009546063573923656 +801 4 training.batch_size 2.0 +801 4 training.label_smoothing 0.36705303090025393 +801 5 model.embedding_dim 0.0 +801 5 model.scoring_fct_norm 2.0 +801 5 optimizer.lr 0.014862714320946055 +801 5 training.batch_size 1.0 +801 5 training.label_smoothing 0.07575224131971475 +801 6 model.embedding_dim 1.0 +801 6 model.scoring_fct_norm 2.0 +801 6 optimizer.lr 0.0014464953430491493 +801 6 training.batch_size 2.0 +801 6 training.label_smoothing 0.06442064164044309 +801 7 model.embedding_dim 1.0 +801 7 model.scoring_fct_norm 2.0 +801 7 optimizer.lr 0.04168168789728073 +801 7 training.batch_size 1.0 +801 7 training.label_smoothing 0.09330805390845971 +801 8 model.embedding_dim 1.0 +801 8 model.scoring_fct_norm 1.0 +801 8 optimizer.lr 0.003160956670520717 +801 8 training.batch_size 1.0 +801 8 training.label_smoothing 0.01936772238356839 +801 9 model.embedding_dim 0.0 +801 9 model.scoring_fct_norm 2.0 +801 9 optimizer.lr 0.04191842276385816 +801 9 training.batch_size 0.0 +801 9 training.label_smoothing 0.0022465742890348307 +801 10 model.embedding_dim 2.0 +801 10 model.scoring_fct_norm 2.0 +801 10 optimizer.lr 0.009290743966174238 +801 10 training.batch_size 2.0 +801 10 training.label_smoothing 0.4256992775183833 +801 11 model.embedding_dim 1.0 +801 11 model.scoring_fct_norm 2.0 +801 11 optimizer.lr 0.06821154555724068 +801 11 training.batch_size 2.0 +801 11 training.label_smoothing 0.038253859563867565 +801 12 model.embedding_dim 1.0 +801 12 model.scoring_fct_norm 1.0 +801 12 optimizer.lr 0.00112394330530804 +801 12 training.batch_size 2.0 +801 12 training.label_smoothing 0.06449661347588692 +801 13 model.embedding_dim 0.0 +801 13 model.scoring_fct_norm 2.0 +801 13 optimizer.lr 0.037768209007957465 +801 13 training.batch_size 0.0 +801 13 training.label_smoothing 0.09499910211872496 +801 14 model.embedding_dim 2.0 +801 14 model.scoring_fct_norm 2.0 +801 14 optimizer.lr 0.022847614550281628 +801 14 training.batch_size 2.0 +801 14 training.label_smoothing 0.2308096123925005 +801 15 model.embedding_dim 0.0 +801 15 model.scoring_fct_norm 1.0 +801 15 optimizer.lr 0.009815756774986296 +801 15 training.batch_size 0.0 +801 15 training.label_smoothing 0.022666830178373346 +801 16 model.embedding_dim 1.0 +801 16 model.scoring_fct_norm 1.0 +801 16 optimizer.lr 0.0319816937401702 +801 16 training.batch_size 1.0 +801 16 training.label_smoothing 0.002254695309613193 +801 17 model.embedding_dim 1.0 +801 17 model.scoring_fct_norm 2.0 +801 17 optimizer.lr 0.09045567882818119 +801 17 training.batch_size 0.0 +801 17 training.label_smoothing 0.08664918856150038 +801 18 model.embedding_dim 1.0 +801 18 model.scoring_fct_norm 2.0 +801 18 optimizer.lr 0.007597547608089895 +801 18 training.batch_size 0.0 +801 18 training.label_smoothing 0.02111388672350056 +801 19 model.embedding_dim 1.0 +801 19 model.scoring_fct_norm 2.0 +801 19 optimizer.lr 0.0015450867693807544 +801 19 training.batch_size 2.0 +801 19 training.label_smoothing 0.9417371606860337 +801 20 model.embedding_dim 1.0 +801 20 model.scoring_fct_norm 1.0 +801 20 optimizer.lr 0.09166541996238482 +801 20 training.batch_size 2.0 +801 20 training.label_smoothing 0.37762084790240374 +801 21 model.embedding_dim 1.0 +801 21 model.scoring_fct_norm 1.0 +801 21 optimizer.lr 0.05468915492041683 +801 21 training.batch_size 0.0 +801 21 training.label_smoothing 0.28565817280205724 +801 22 model.embedding_dim 1.0 +801 22 model.scoring_fct_norm 2.0 +801 22 optimizer.lr 0.005287938556173898 +801 22 training.batch_size 1.0 +801 22 training.label_smoothing 0.011020673168495459 +801 23 model.embedding_dim 2.0 +801 23 model.scoring_fct_norm 1.0 +801 23 optimizer.lr 0.01641290211848061 +801 23 training.batch_size 2.0 +801 23 training.label_smoothing 0.1796303172657714 +801 24 model.embedding_dim 1.0 +801 24 model.scoring_fct_norm 1.0 +801 24 optimizer.lr 0.005811853995345648 +801 24 training.batch_size 2.0 +801 24 training.label_smoothing 0.060966491645601265 +801 25 model.embedding_dim 2.0 +801 25 model.scoring_fct_norm 2.0 +801 25 optimizer.lr 0.01096247217431444 +801 25 training.batch_size 2.0 +801 25 training.label_smoothing 0.09090689467894833 +801 1 dataset """fb15k237""" +801 1 model """transe""" +801 1 loss """bceaftersigmoid""" +801 1 regularizer """no""" +801 1 optimizer """adam""" +801 1 training_loop """lcwa""" +801 1 evaluator """rankbased""" +801 2 dataset """fb15k237""" +801 2 model """transe""" +801 2 loss """bceaftersigmoid""" +801 2 regularizer """no""" +801 2 optimizer """adam""" +801 2 training_loop """lcwa""" +801 2 evaluator """rankbased""" +801 3 dataset """fb15k237""" +801 3 model """transe""" +801 3 loss """bceaftersigmoid""" +801 3 regularizer """no""" +801 3 optimizer """adam""" +801 3 training_loop """lcwa""" +801 3 evaluator """rankbased""" +801 4 dataset """fb15k237""" +801 4 model """transe""" +801 4 loss """bceaftersigmoid""" +801 4 regularizer """no""" +801 4 optimizer """adam""" +801 4 training_loop """lcwa""" +801 4 evaluator """rankbased""" +801 5 dataset """fb15k237""" +801 5 model """transe""" +801 5 loss """bceaftersigmoid""" +801 5 regularizer """no""" +801 5 optimizer """adam""" +801 5 training_loop """lcwa""" +801 5 evaluator """rankbased""" +801 6 dataset """fb15k237""" +801 6 model """transe""" +801 6 loss """bceaftersigmoid""" +801 6 regularizer """no""" +801 6 optimizer """adam""" +801 6 training_loop """lcwa""" +801 6 evaluator """rankbased""" +801 7 dataset """fb15k237""" +801 7 model """transe""" +801 7 loss """bceaftersigmoid""" +801 7 regularizer """no""" +801 7 optimizer """adam""" +801 7 training_loop """lcwa""" +801 7 evaluator """rankbased""" +801 8 dataset """fb15k237""" +801 8 model """transe""" +801 8 loss """bceaftersigmoid""" +801 8 regularizer """no""" +801 8 optimizer """adam""" +801 8 training_loop """lcwa""" +801 8 evaluator """rankbased""" +801 9 dataset """fb15k237""" +801 9 model """transe""" +801 9 loss """bceaftersigmoid""" +801 9 regularizer """no""" +801 9 optimizer """adam""" +801 9 training_loop """lcwa""" +801 9 evaluator """rankbased""" +801 10 dataset """fb15k237""" +801 10 model """transe""" +801 10 loss """bceaftersigmoid""" +801 10 regularizer """no""" +801 10 optimizer """adam""" +801 10 training_loop """lcwa""" +801 10 evaluator """rankbased""" +801 11 dataset """fb15k237""" +801 11 model """transe""" +801 11 loss """bceaftersigmoid""" +801 11 regularizer """no""" +801 11 optimizer """adam""" +801 11 training_loop """lcwa""" +801 11 evaluator """rankbased""" +801 12 dataset """fb15k237""" +801 12 model """transe""" +801 12 loss """bceaftersigmoid""" +801 12 regularizer """no""" +801 12 optimizer """adam""" +801 12 training_loop """lcwa""" +801 12 evaluator """rankbased""" +801 13 dataset """fb15k237""" +801 13 model """transe""" +801 13 loss """bceaftersigmoid""" +801 13 regularizer """no""" +801 13 optimizer """adam""" +801 13 training_loop """lcwa""" +801 13 evaluator """rankbased""" +801 14 dataset """fb15k237""" +801 14 model """transe""" +801 14 loss """bceaftersigmoid""" +801 14 regularizer """no""" +801 14 optimizer """adam""" +801 14 training_loop """lcwa""" +801 14 evaluator """rankbased""" +801 15 dataset """fb15k237""" +801 15 model """transe""" +801 15 loss """bceaftersigmoid""" +801 15 regularizer """no""" +801 15 optimizer """adam""" +801 15 training_loop """lcwa""" +801 15 evaluator """rankbased""" +801 16 dataset """fb15k237""" +801 16 model """transe""" +801 16 loss """bceaftersigmoid""" +801 16 regularizer """no""" +801 16 optimizer """adam""" +801 16 training_loop """lcwa""" +801 16 evaluator """rankbased""" +801 17 dataset """fb15k237""" +801 17 model """transe""" +801 17 loss """bceaftersigmoid""" +801 17 regularizer """no""" +801 17 optimizer """adam""" +801 17 training_loop """lcwa""" +801 17 evaluator """rankbased""" +801 18 dataset """fb15k237""" +801 18 model """transe""" +801 18 loss """bceaftersigmoid""" +801 18 regularizer """no""" +801 18 optimizer """adam""" +801 18 training_loop """lcwa""" +801 18 evaluator """rankbased""" +801 19 dataset """fb15k237""" +801 19 model """transe""" +801 19 loss """bceaftersigmoid""" +801 19 regularizer """no""" +801 19 optimizer """adam""" +801 19 training_loop """lcwa""" +801 19 evaluator """rankbased""" +801 20 dataset """fb15k237""" +801 20 model """transe""" +801 20 loss """bceaftersigmoid""" +801 20 regularizer """no""" +801 20 optimizer """adam""" +801 20 training_loop """lcwa""" +801 20 evaluator """rankbased""" +801 21 dataset """fb15k237""" +801 21 model """transe""" +801 21 loss """bceaftersigmoid""" +801 21 regularizer """no""" +801 21 optimizer """adam""" +801 21 training_loop """lcwa""" +801 21 evaluator """rankbased""" +801 22 dataset """fb15k237""" +801 22 model """transe""" +801 22 loss """bceaftersigmoid""" +801 22 regularizer """no""" +801 22 optimizer """adam""" +801 22 training_loop """lcwa""" +801 22 evaluator """rankbased""" +801 23 dataset """fb15k237""" +801 23 model """transe""" +801 23 loss """bceaftersigmoid""" +801 23 regularizer """no""" +801 23 optimizer """adam""" +801 23 training_loop """lcwa""" +801 23 evaluator """rankbased""" +801 24 dataset """fb15k237""" +801 24 model """transe""" +801 24 loss """bceaftersigmoid""" +801 24 regularizer """no""" +801 24 optimizer """adam""" +801 24 training_loop """lcwa""" +801 24 evaluator """rankbased""" +801 25 dataset """fb15k237""" +801 25 model """transe""" +801 25 loss """bceaftersigmoid""" +801 25 regularizer """no""" +801 25 optimizer """adam""" +801 25 training_loop """lcwa""" +801 25 evaluator """rankbased""" +802 1 model.embedding_dim 2.0 +802 1 model.scoring_fct_norm 2.0 +802 1 optimizer.lr 0.0025191728485839436 +802 1 training.batch_size 1.0 +802 1 training.label_smoothing 0.0012746724315371783 +802 2 model.embedding_dim 2.0 +802 2 model.scoring_fct_norm 1.0 +802 2 optimizer.lr 0.027016201114892287 +802 2 training.batch_size 0.0 +802 2 training.label_smoothing 0.004625359547775231 +802 3 model.embedding_dim 1.0 +802 3 model.scoring_fct_norm 2.0 +802 3 optimizer.lr 0.035944095063619855 +802 3 training.batch_size 2.0 +802 3 training.label_smoothing 0.00827477224134234 +802 4 model.embedding_dim 0.0 +802 4 model.scoring_fct_norm 1.0 +802 4 optimizer.lr 0.0013122037217788653 +802 4 training.batch_size 1.0 +802 4 training.label_smoothing 0.007949852705481403 +802 5 model.embedding_dim 0.0 +802 5 model.scoring_fct_norm 1.0 +802 5 optimizer.lr 0.013224172989092706 +802 5 training.batch_size 1.0 +802 5 training.label_smoothing 0.014468741730059827 +802 6 model.embedding_dim 0.0 +802 6 model.scoring_fct_norm 1.0 +802 6 optimizer.lr 0.01809034883883113 +802 6 training.batch_size 1.0 +802 6 training.label_smoothing 0.0024153687314366398 +802 7 model.embedding_dim 1.0 +802 7 model.scoring_fct_norm 1.0 +802 7 optimizer.lr 0.01172262297532517 +802 7 training.batch_size 1.0 +802 7 training.label_smoothing 0.002647466928588572 +802 8 model.embedding_dim 0.0 +802 8 model.scoring_fct_norm 2.0 +802 8 optimizer.lr 0.027861479016695792 +802 8 training.batch_size 0.0 +802 8 training.label_smoothing 0.4861676664833375 +802 9 model.embedding_dim 2.0 +802 9 model.scoring_fct_norm 2.0 +802 9 optimizer.lr 0.0029065051358326465 +802 9 training.batch_size 2.0 +802 9 training.label_smoothing 0.1425271082743406 +802 10 model.embedding_dim 0.0 +802 10 model.scoring_fct_norm 1.0 +802 10 optimizer.lr 0.0021412156508744183 +802 10 training.batch_size 1.0 +802 10 training.label_smoothing 0.3308971193179854 +802 11 model.embedding_dim 2.0 +802 11 model.scoring_fct_norm 2.0 +802 11 optimizer.lr 0.00480000512811405 +802 11 training.batch_size 1.0 +802 11 training.label_smoothing 0.006902747785639608 +802 12 model.embedding_dim 2.0 +802 12 model.scoring_fct_norm 2.0 +802 12 optimizer.lr 0.021215386827422012 +802 12 training.batch_size 2.0 +802 12 training.label_smoothing 0.0012773464923695156 +802 13 model.embedding_dim 0.0 +802 13 model.scoring_fct_norm 1.0 +802 13 optimizer.lr 0.08584248362752332 +802 13 training.batch_size 0.0 +802 13 training.label_smoothing 0.004258865539889366 +802 14 model.embedding_dim 1.0 +802 14 model.scoring_fct_norm 1.0 +802 14 optimizer.lr 0.0013466470572521955 +802 14 training.batch_size 1.0 +802 14 training.label_smoothing 0.0036154811737901067 +802 15 model.embedding_dim 0.0 +802 15 model.scoring_fct_norm 2.0 +802 15 optimizer.lr 0.005113778021289214 +802 15 training.batch_size 2.0 +802 15 training.label_smoothing 0.0029682392241330863 +802 16 model.embedding_dim 1.0 +802 16 model.scoring_fct_norm 1.0 +802 16 optimizer.lr 0.0058704006328260784 +802 16 training.batch_size 0.0 +802 16 training.label_smoothing 0.7168826432609374 +802 17 model.embedding_dim 0.0 +802 17 model.scoring_fct_norm 2.0 +802 17 optimizer.lr 0.03634526988195302 +802 17 training.batch_size 2.0 +802 17 training.label_smoothing 0.0010761093413752339 +802 18 model.embedding_dim 0.0 +802 18 model.scoring_fct_norm 1.0 +802 18 optimizer.lr 0.00414824360975234 +802 18 training.batch_size 2.0 +802 18 training.label_smoothing 0.002511207581639875 +802 19 model.embedding_dim 1.0 +802 19 model.scoring_fct_norm 1.0 +802 19 optimizer.lr 0.04669356163812981 +802 19 training.batch_size 2.0 +802 19 training.label_smoothing 0.19098530175354136 +802 20 model.embedding_dim 1.0 +802 20 model.scoring_fct_norm 1.0 +802 20 optimizer.lr 0.008732293154645025 +802 20 training.batch_size 0.0 +802 20 training.label_smoothing 0.003464636029224256 +802 21 model.embedding_dim 1.0 +802 21 model.scoring_fct_norm 1.0 +802 21 optimizer.lr 0.05556701592518461 +802 21 training.batch_size 1.0 +802 21 training.label_smoothing 0.12148136807666231 +802 22 model.embedding_dim 0.0 +802 22 model.scoring_fct_norm 2.0 +802 22 optimizer.lr 0.002444892017626687 +802 22 training.batch_size 2.0 +802 22 training.label_smoothing 0.007228402824499937 +802 23 model.embedding_dim 2.0 +802 23 model.scoring_fct_norm 1.0 +802 23 optimizer.lr 0.0014787363429177812 +802 23 training.batch_size 0.0 +802 23 training.label_smoothing 0.0013486462341355632 +802 24 model.embedding_dim 1.0 +802 24 model.scoring_fct_norm 1.0 +802 24 optimizer.lr 0.019799354385884715 +802 24 training.batch_size 1.0 +802 24 training.label_smoothing 0.050059666414041144 +802 25 model.embedding_dim 1.0 +802 25 model.scoring_fct_norm 2.0 +802 25 optimizer.lr 0.043264740484148495 +802 25 training.batch_size 0.0 +802 25 training.label_smoothing 0.01588752542314055 +802 26 model.embedding_dim 0.0 +802 26 model.scoring_fct_norm 2.0 +802 26 optimizer.lr 0.03597085193512498 +802 26 training.batch_size 2.0 +802 26 training.label_smoothing 0.0016071736694684676 +802 27 model.embedding_dim 0.0 +802 27 model.scoring_fct_norm 2.0 +802 27 optimizer.lr 0.006812665840505006 +802 27 training.batch_size 1.0 +802 27 training.label_smoothing 0.09721380825528471 +802 28 model.embedding_dim 1.0 +802 28 model.scoring_fct_norm 1.0 +802 28 optimizer.lr 0.005688224110314003 +802 28 training.batch_size 2.0 +802 28 training.label_smoothing 0.18387100683810587 +802 29 model.embedding_dim 2.0 +802 29 model.scoring_fct_norm 1.0 +802 29 optimizer.lr 0.01665846186498699 +802 29 training.batch_size 0.0 +802 29 training.label_smoothing 0.004459354555449201 +802 30 model.embedding_dim 0.0 +802 30 model.scoring_fct_norm 2.0 +802 30 optimizer.lr 0.0997868174057334 +802 30 training.batch_size 0.0 +802 30 training.label_smoothing 0.8591446236741509 +802 31 model.embedding_dim 1.0 +802 31 model.scoring_fct_norm 1.0 +802 31 optimizer.lr 0.09347880502219173 +802 31 training.batch_size 0.0 +802 31 training.label_smoothing 0.23017786849104432 +802 1 dataset """fb15k237""" +802 1 model """transe""" +802 1 loss """softplus""" +802 1 regularizer """no""" +802 1 optimizer """adam""" +802 1 training_loop """lcwa""" +802 1 evaluator """rankbased""" +802 2 dataset """fb15k237""" +802 2 model """transe""" +802 2 loss """softplus""" +802 2 regularizer """no""" +802 2 optimizer """adam""" +802 2 training_loop """lcwa""" +802 2 evaluator """rankbased""" +802 3 dataset """fb15k237""" +802 3 model """transe""" +802 3 loss """softplus""" +802 3 regularizer """no""" +802 3 optimizer """adam""" +802 3 training_loop """lcwa""" +802 3 evaluator """rankbased""" +802 4 dataset """fb15k237""" +802 4 model """transe""" +802 4 loss """softplus""" +802 4 regularizer """no""" +802 4 optimizer """adam""" +802 4 training_loop """lcwa""" +802 4 evaluator """rankbased""" +802 5 dataset """fb15k237""" +802 5 model """transe""" +802 5 loss """softplus""" +802 5 regularizer """no""" +802 5 optimizer """adam""" +802 5 training_loop """lcwa""" +802 5 evaluator """rankbased""" +802 6 dataset """fb15k237""" +802 6 model """transe""" +802 6 loss """softplus""" +802 6 regularizer """no""" +802 6 optimizer """adam""" +802 6 training_loop """lcwa""" +802 6 evaluator """rankbased""" +802 7 dataset """fb15k237""" +802 7 model """transe""" +802 7 loss """softplus""" +802 7 regularizer """no""" +802 7 optimizer """adam""" +802 7 training_loop """lcwa""" +802 7 evaluator """rankbased""" +802 8 dataset """fb15k237""" +802 8 model """transe""" +802 8 loss """softplus""" +802 8 regularizer """no""" +802 8 optimizer """adam""" +802 8 training_loop """lcwa""" +802 8 evaluator """rankbased""" +802 9 dataset """fb15k237""" +802 9 model """transe""" +802 9 loss """softplus""" +802 9 regularizer """no""" +802 9 optimizer """adam""" +802 9 training_loop """lcwa""" +802 9 evaluator """rankbased""" +802 10 dataset """fb15k237""" +802 10 model """transe""" +802 10 loss """softplus""" +802 10 regularizer """no""" +802 10 optimizer """adam""" +802 10 training_loop """lcwa""" +802 10 evaluator """rankbased""" +802 11 dataset """fb15k237""" +802 11 model """transe""" +802 11 loss """softplus""" +802 11 regularizer """no""" +802 11 optimizer """adam""" +802 11 training_loop """lcwa""" +802 11 evaluator """rankbased""" +802 12 dataset """fb15k237""" +802 12 model """transe""" +802 12 loss """softplus""" +802 12 regularizer """no""" +802 12 optimizer """adam""" +802 12 training_loop """lcwa""" +802 12 evaluator """rankbased""" +802 13 dataset """fb15k237""" +802 13 model """transe""" +802 13 loss """softplus""" +802 13 regularizer """no""" +802 13 optimizer """adam""" +802 13 training_loop """lcwa""" +802 13 evaluator """rankbased""" +802 14 dataset """fb15k237""" +802 14 model """transe""" +802 14 loss """softplus""" +802 14 regularizer """no""" +802 14 optimizer """adam""" +802 14 training_loop """lcwa""" +802 14 evaluator """rankbased""" +802 15 dataset """fb15k237""" +802 15 model """transe""" +802 15 loss """softplus""" +802 15 regularizer """no""" +802 15 optimizer """adam""" +802 15 training_loop """lcwa""" +802 15 evaluator """rankbased""" +802 16 dataset """fb15k237""" +802 16 model """transe""" +802 16 loss """softplus""" +802 16 regularizer """no""" +802 16 optimizer """adam""" +802 16 training_loop """lcwa""" +802 16 evaluator """rankbased""" +802 17 dataset """fb15k237""" +802 17 model """transe""" +802 17 loss """softplus""" +802 17 regularizer """no""" +802 17 optimizer """adam""" +802 17 training_loop """lcwa""" +802 17 evaluator """rankbased""" +802 18 dataset """fb15k237""" +802 18 model """transe""" +802 18 loss """softplus""" +802 18 regularizer """no""" +802 18 optimizer """adam""" +802 18 training_loop """lcwa""" +802 18 evaluator """rankbased""" +802 19 dataset """fb15k237""" +802 19 model """transe""" +802 19 loss """softplus""" +802 19 regularizer """no""" +802 19 optimizer """adam""" +802 19 training_loop """lcwa""" +802 19 evaluator """rankbased""" +802 20 dataset """fb15k237""" +802 20 model """transe""" +802 20 loss """softplus""" +802 20 regularizer """no""" +802 20 optimizer """adam""" +802 20 training_loop """lcwa""" +802 20 evaluator """rankbased""" +802 21 dataset """fb15k237""" +802 21 model """transe""" +802 21 loss """softplus""" +802 21 regularizer """no""" +802 21 optimizer """adam""" +802 21 training_loop """lcwa""" +802 21 evaluator """rankbased""" +802 22 dataset """fb15k237""" +802 22 model """transe""" +802 22 loss """softplus""" +802 22 regularizer """no""" +802 22 optimizer """adam""" +802 22 training_loop """lcwa""" +802 22 evaluator """rankbased""" +802 23 dataset """fb15k237""" +802 23 model """transe""" +802 23 loss """softplus""" +802 23 regularizer """no""" +802 23 optimizer """adam""" +802 23 training_loop """lcwa""" +802 23 evaluator """rankbased""" +802 24 dataset """fb15k237""" +802 24 model """transe""" +802 24 loss """softplus""" +802 24 regularizer """no""" +802 24 optimizer """adam""" +802 24 training_loop """lcwa""" +802 24 evaluator """rankbased""" +802 25 dataset """fb15k237""" +802 25 model """transe""" +802 25 loss """softplus""" +802 25 regularizer """no""" +802 25 optimizer """adam""" +802 25 training_loop """lcwa""" +802 25 evaluator """rankbased""" +802 26 dataset """fb15k237""" +802 26 model """transe""" +802 26 loss """softplus""" +802 26 regularizer """no""" +802 26 optimizer """adam""" +802 26 training_loop """lcwa""" +802 26 evaluator """rankbased""" +802 27 dataset """fb15k237""" +802 27 model """transe""" +802 27 loss """softplus""" +802 27 regularizer """no""" +802 27 optimizer """adam""" +802 27 training_loop """lcwa""" +802 27 evaluator """rankbased""" +802 28 dataset """fb15k237""" +802 28 model """transe""" +802 28 loss """softplus""" +802 28 regularizer """no""" +802 28 optimizer """adam""" +802 28 training_loop """lcwa""" +802 28 evaluator """rankbased""" +802 29 dataset """fb15k237""" +802 29 model """transe""" +802 29 loss """softplus""" +802 29 regularizer """no""" +802 29 optimizer """adam""" +802 29 training_loop """lcwa""" +802 29 evaluator """rankbased""" +802 30 dataset """fb15k237""" +802 30 model """transe""" +802 30 loss """softplus""" +802 30 regularizer """no""" +802 30 optimizer """adam""" +802 30 training_loop """lcwa""" +802 30 evaluator """rankbased""" +802 31 dataset """fb15k237""" +802 31 model """transe""" +802 31 loss """softplus""" +802 31 regularizer """no""" +802 31 optimizer """adam""" +802 31 training_loop """lcwa""" +802 31 evaluator """rankbased""" +803 1 model.embedding_dim 0.0 +803 1 model.scoring_fct_norm 2.0 +803 1 optimizer.lr 0.07089327439185111 +803 1 training.batch_size 2.0 +803 1 training.label_smoothing 0.006051685512437962 +803 2 model.embedding_dim 1.0 +803 2 model.scoring_fct_norm 2.0 +803 2 optimizer.lr 0.007778280764436805 +803 2 training.batch_size 0.0 +803 2 training.label_smoothing 0.2242773718490954 +803 3 model.embedding_dim 1.0 +803 3 model.scoring_fct_norm 2.0 +803 3 optimizer.lr 0.01426530008407064 +803 3 training.batch_size 1.0 +803 3 training.label_smoothing 0.1340222861428854 +803 4 model.embedding_dim 0.0 +803 4 model.scoring_fct_norm 1.0 +803 4 optimizer.lr 0.0033667338964843813 +803 4 training.batch_size 1.0 +803 4 training.label_smoothing 0.04882752994744317 +803 5 model.embedding_dim 0.0 +803 5 model.scoring_fct_norm 2.0 +803 5 optimizer.lr 0.047871576738492 +803 5 training.batch_size 2.0 +803 5 training.label_smoothing 0.0118158611440994 +803 6 model.embedding_dim 2.0 +803 6 model.scoring_fct_norm 2.0 +803 6 optimizer.lr 0.0032765523659978877 +803 6 training.batch_size 2.0 +803 6 training.label_smoothing 0.3414715843527228 +803 7 model.embedding_dim 0.0 +803 7 model.scoring_fct_norm 1.0 +803 7 optimizer.lr 0.017788037529342608 +803 7 training.batch_size 2.0 +803 7 training.label_smoothing 0.003000971032227267 +803 8 model.embedding_dim 1.0 +803 8 model.scoring_fct_norm 2.0 +803 8 optimizer.lr 0.0726852071361765 +803 8 training.batch_size 1.0 +803 8 training.label_smoothing 0.9575530315358933 +803 9 model.embedding_dim 0.0 +803 9 model.scoring_fct_norm 2.0 +803 9 optimizer.lr 0.03039505487802802 +803 9 training.batch_size 2.0 +803 9 training.label_smoothing 0.005494232586656886 +803 10 model.embedding_dim 1.0 +803 10 model.scoring_fct_norm 2.0 +803 10 optimizer.lr 0.0034103374778274493 +803 10 training.batch_size 0.0 +803 10 training.label_smoothing 0.10368677614245284 +803 11 model.embedding_dim 0.0 +803 11 model.scoring_fct_norm 2.0 +803 11 optimizer.lr 0.008400623592823338 +803 11 training.batch_size 2.0 +803 11 training.label_smoothing 0.10582434964649852 +803 12 model.embedding_dim 1.0 +803 12 model.scoring_fct_norm 2.0 +803 12 optimizer.lr 0.008727399000251794 +803 12 training.batch_size 1.0 +803 12 training.label_smoothing 0.0014501827636972527 +803 13 model.embedding_dim 2.0 +803 13 model.scoring_fct_norm 2.0 +803 13 optimizer.lr 0.006034581188107317 +803 13 training.batch_size 0.0 +803 13 training.label_smoothing 0.008933810866796269 +803 14 model.embedding_dim 0.0 +803 14 model.scoring_fct_norm 1.0 +803 14 optimizer.lr 0.0019478354321018447 +803 14 training.batch_size 0.0 +803 14 training.label_smoothing 0.03535839161637959 +803 15 model.embedding_dim 0.0 +803 15 model.scoring_fct_norm 2.0 +803 15 optimizer.lr 0.0012277485149767766 +803 15 training.batch_size 1.0 +803 15 training.label_smoothing 0.008572849003207757 +803 16 model.embedding_dim 0.0 +803 16 model.scoring_fct_norm 2.0 +803 16 optimizer.lr 0.0022344411208538714 +803 16 training.batch_size 2.0 +803 16 training.label_smoothing 0.0015283757694597934 +803 17 model.embedding_dim 2.0 +803 17 model.scoring_fct_norm 1.0 +803 17 optimizer.lr 0.02489064405249178 +803 17 training.batch_size 1.0 +803 17 training.label_smoothing 0.0015827992359523162 +803 18 model.embedding_dim 0.0 +803 18 model.scoring_fct_norm 1.0 +803 18 optimizer.lr 0.0016608460884079603 +803 18 training.batch_size 1.0 +803 18 training.label_smoothing 0.717650072390557 +803 19 model.embedding_dim 0.0 +803 19 model.scoring_fct_norm 2.0 +803 19 optimizer.lr 0.0018074904773349042 +803 19 training.batch_size 0.0 +803 19 training.label_smoothing 0.534095131025855 +803 20 model.embedding_dim 1.0 +803 20 model.scoring_fct_norm 2.0 +803 20 optimizer.lr 0.0021093562953444756 +803 20 training.batch_size 1.0 +803 20 training.label_smoothing 0.004418134506564592 +803 21 model.embedding_dim 2.0 +803 21 model.scoring_fct_norm 2.0 +803 21 optimizer.lr 0.0015528426529138628 +803 21 training.batch_size 1.0 +803 21 training.label_smoothing 0.0016044928611268537 +803 22 model.embedding_dim 2.0 +803 22 model.scoring_fct_norm 2.0 +803 22 optimizer.lr 0.011003336719627636 +803 22 training.batch_size 0.0 +803 22 training.label_smoothing 0.20540587553618792 +803 23 model.embedding_dim 1.0 +803 23 model.scoring_fct_norm 2.0 +803 23 optimizer.lr 0.0038007277190367987 +803 23 training.batch_size 1.0 +803 23 training.label_smoothing 0.020110206390643405 +803 24 model.embedding_dim 0.0 +803 24 model.scoring_fct_norm 2.0 +803 24 optimizer.lr 0.07147498921489587 +803 24 training.batch_size 1.0 +803 24 training.label_smoothing 0.004588087541616142 +803 1 dataset """fb15k237""" +803 1 model """transe""" +803 1 loss """crossentropy""" +803 1 regularizer """no""" +803 1 optimizer """adam""" +803 1 training_loop """lcwa""" +803 1 evaluator """rankbased""" +803 2 dataset """fb15k237""" +803 2 model """transe""" +803 2 loss """crossentropy""" +803 2 regularizer """no""" +803 2 optimizer """adam""" +803 2 training_loop """lcwa""" +803 2 evaluator """rankbased""" +803 3 dataset """fb15k237""" +803 3 model """transe""" +803 3 loss """crossentropy""" +803 3 regularizer """no""" +803 3 optimizer """adam""" +803 3 training_loop """lcwa""" +803 3 evaluator """rankbased""" +803 4 dataset """fb15k237""" +803 4 model """transe""" +803 4 loss """crossentropy""" +803 4 regularizer """no""" +803 4 optimizer """adam""" +803 4 training_loop """lcwa""" +803 4 evaluator """rankbased""" +803 5 dataset """fb15k237""" +803 5 model """transe""" +803 5 loss """crossentropy""" +803 5 regularizer """no""" +803 5 optimizer """adam""" +803 5 training_loop """lcwa""" +803 5 evaluator """rankbased""" +803 6 dataset """fb15k237""" +803 6 model """transe""" +803 6 loss """crossentropy""" +803 6 regularizer """no""" +803 6 optimizer """adam""" +803 6 training_loop """lcwa""" +803 6 evaluator """rankbased""" +803 7 dataset """fb15k237""" +803 7 model """transe""" +803 7 loss """crossentropy""" +803 7 regularizer """no""" +803 7 optimizer """adam""" +803 7 training_loop """lcwa""" +803 7 evaluator """rankbased""" +803 8 dataset """fb15k237""" +803 8 model """transe""" +803 8 loss """crossentropy""" +803 8 regularizer """no""" +803 8 optimizer """adam""" +803 8 training_loop """lcwa""" +803 8 evaluator """rankbased""" +803 9 dataset """fb15k237""" +803 9 model """transe""" +803 9 loss """crossentropy""" +803 9 regularizer """no""" +803 9 optimizer """adam""" +803 9 training_loop """lcwa""" +803 9 evaluator """rankbased""" +803 10 dataset """fb15k237""" +803 10 model """transe""" +803 10 loss """crossentropy""" +803 10 regularizer """no""" +803 10 optimizer """adam""" +803 10 training_loop """lcwa""" +803 10 evaluator """rankbased""" +803 11 dataset """fb15k237""" +803 11 model """transe""" +803 11 loss """crossentropy""" +803 11 regularizer """no""" +803 11 optimizer """adam""" +803 11 training_loop """lcwa""" +803 11 evaluator """rankbased""" +803 12 dataset """fb15k237""" +803 12 model """transe""" +803 12 loss """crossentropy""" +803 12 regularizer """no""" +803 12 optimizer """adam""" +803 12 training_loop """lcwa""" +803 12 evaluator """rankbased""" +803 13 dataset """fb15k237""" +803 13 model """transe""" +803 13 loss """crossentropy""" +803 13 regularizer """no""" +803 13 optimizer """adam""" +803 13 training_loop """lcwa""" +803 13 evaluator """rankbased""" +803 14 dataset """fb15k237""" +803 14 model """transe""" +803 14 loss """crossentropy""" +803 14 regularizer """no""" +803 14 optimizer """adam""" +803 14 training_loop """lcwa""" +803 14 evaluator """rankbased""" +803 15 dataset """fb15k237""" +803 15 model """transe""" +803 15 loss """crossentropy""" +803 15 regularizer """no""" +803 15 optimizer """adam""" +803 15 training_loop """lcwa""" +803 15 evaluator """rankbased""" +803 16 dataset """fb15k237""" +803 16 model """transe""" +803 16 loss """crossentropy""" +803 16 regularizer """no""" +803 16 optimizer """adam""" +803 16 training_loop """lcwa""" +803 16 evaluator """rankbased""" +803 17 dataset """fb15k237""" +803 17 model """transe""" +803 17 loss """crossentropy""" +803 17 regularizer """no""" +803 17 optimizer """adam""" +803 17 training_loop """lcwa""" +803 17 evaluator """rankbased""" +803 18 dataset """fb15k237""" +803 18 model """transe""" +803 18 loss """crossentropy""" +803 18 regularizer """no""" +803 18 optimizer """adam""" +803 18 training_loop """lcwa""" +803 18 evaluator """rankbased""" +803 19 dataset """fb15k237""" +803 19 model """transe""" +803 19 loss """crossentropy""" +803 19 regularizer """no""" +803 19 optimizer """adam""" +803 19 training_loop """lcwa""" +803 19 evaluator """rankbased""" +803 20 dataset """fb15k237""" +803 20 model """transe""" +803 20 loss """crossentropy""" +803 20 regularizer """no""" +803 20 optimizer """adam""" +803 20 training_loop """lcwa""" +803 20 evaluator """rankbased""" +803 21 dataset """fb15k237""" +803 21 model """transe""" +803 21 loss """crossentropy""" +803 21 regularizer """no""" +803 21 optimizer """adam""" +803 21 training_loop """lcwa""" +803 21 evaluator """rankbased""" +803 22 dataset """fb15k237""" +803 22 model """transe""" +803 22 loss """crossentropy""" +803 22 regularizer """no""" +803 22 optimizer """adam""" +803 22 training_loop """lcwa""" +803 22 evaluator """rankbased""" +803 23 dataset """fb15k237""" +803 23 model """transe""" +803 23 loss """crossentropy""" +803 23 regularizer """no""" +803 23 optimizer """adam""" +803 23 training_loop """lcwa""" +803 23 evaluator """rankbased""" +803 24 dataset """fb15k237""" +803 24 model """transe""" +803 24 loss """crossentropy""" +803 24 regularizer """no""" +803 24 optimizer """adam""" +803 24 training_loop """lcwa""" +803 24 evaluator """rankbased""" +804 1 model.embedding_dim 1.0 +804 1 model.scoring_fct_norm 2.0 +804 1 optimizer.lr 0.027708364928982755 +804 1 training.batch_size 0.0 +804 1 training.label_smoothing 0.35326876707152743 +804 2 model.embedding_dim 1.0 +804 2 model.scoring_fct_norm 1.0 +804 2 optimizer.lr 0.017158633792178413 +804 2 training.batch_size 1.0 +804 2 training.label_smoothing 0.040888204534514465 +804 3 model.embedding_dim 2.0 +804 3 model.scoring_fct_norm 2.0 +804 3 optimizer.lr 0.00308833460291651 +804 3 training.batch_size 2.0 +804 3 training.label_smoothing 0.007313123437827674 +804 4 model.embedding_dim 1.0 +804 4 model.scoring_fct_norm 1.0 +804 4 optimizer.lr 0.08294064728649368 +804 4 training.batch_size 0.0 +804 4 training.label_smoothing 0.3101923947449872 +804 5 model.embedding_dim 0.0 +804 5 model.scoring_fct_norm 2.0 +804 5 optimizer.lr 0.03414250851188775 +804 5 training.batch_size 1.0 +804 5 training.label_smoothing 0.002141244149937035 +804 6 model.embedding_dim 0.0 +804 6 model.scoring_fct_norm 1.0 +804 6 optimizer.lr 0.09708714222988873 +804 6 training.batch_size 0.0 +804 6 training.label_smoothing 0.007703326934713119 +804 7 model.embedding_dim 2.0 +804 7 model.scoring_fct_norm 2.0 +804 7 optimizer.lr 0.04399810920747263 +804 7 training.batch_size 1.0 +804 7 training.label_smoothing 0.9372163068426727 +804 8 model.embedding_dim 0.0 +804 8 model.scoring_fct_norm 2.0 +804 8 optimizer.lr 0.07334544977320603 +804 8 training.batch_size 2.0 +804 8 training.label_smoothing 0.0022513589942981895 +804 9 model.embedding_dim 1.0 +804 9 model.scoring_fct_norm 2.0 +804 9 optimizer.lr 0.0321522616595471 +804 9 training.batch_size 1.0 +804 9 training.label_smoothing 0.478649518577278 +804 10 model.embedding_dim 2.0 +804 10 model.scoring_fct_norm 1.0 +804 10 optimizer.lr 0.09320114969077811 +804 10 training.batch_size 1.0 +804 10 training.label_smoothing 0.007313298452638953 +804 11 model.embedding_dim 0.0 +804 11 model.scoring_fct_norm 1.0 +804 11 optimizer.lr 0.08451546831872203 +804 11 training.batch_size 1.0 +804 11 training.label_smoothing 0.0031486795021437785 +804 12 model.embedding_dim 1.0 +804 12 model.scoring_fct_norm 1.0 +804 12 optimizer.lr 0.012814048594809314 +804 12 training.batch_size 1.0 +804 12 training.label_smoothing 0.007900815659762138 +804 13 model.embedding_dim 1.0 +804 13 model.scoring_fct_norm 1.0 +804 13 optimizer.lr 0.009457164102620721 +804 13 training.batch_size 0.0 +804 13 training.label_smoothing 0.008801756750134516 +804 14 model.embedding_dim 0.0 +804 14 model.scoring_fct_norm 2.0 +804 14 optimizer.lr 0.0010707974859435786 +804 14 training.batch_size 0.0 +804 14 training.label_smoothing 0.010580750703460977 +804 15 model.embedding_dim 1.0 +804 15 model.scoring_fct_norm 2.0 +804 15 optimizer.lr 0.0255723098489214 +804 15 training.batch_size 1.0 +804 15 training.label_smoothing 0.01972464856704757 +804 16 model.embedding_dim 2.0 +804 16 model.scoring_fct_norm 1.0 +804 16 optimizer.lr 0.0098970507534465 +804 16 training.batch_size 0.0 +804 16 training.label_smoothing 0.19131616354310002 +804 17 model.embedding_dim 1.0 +804 17 model.scoring_fct_norm 1.0 +804 17 optimizer.lr 0.02496022868929992 +804 17 training.batch_size 2.0 +804 17 training.label_smoothing 0.08047579423033781 +804 18 model.embedding_dim 0.0 +804 18 model.scoring_fct_norm 2.0 +804 18 optimizer.lr 0.001578355494915229 +804 18 training.batch_size 2.0 +804 18 training.label_smoothing 0.04517399404612766 +804 19 model.embedding_dim 0.0 +804 19 model.scoring_fct_norm 2.0 +804 19 optimizer.lr 0.001622632009528519 +804 19 training.batch_size 2.0 +804 19 training.label_smoothing 0.10688764317359589 +804 20 model.embedding_dim 0.0 +804 20 model.scoring_fct_norm 1.0 +804 20 optimizer.lr 0.0027952798205781444 +804 20 training.batch_size 1.0 +804 20 training.label_smoothing 0.0019343790717515335 +804 21 model.embedding_dim 0.0 +804 21 model.scoring_fct_norm 2.0 +804 21 optimizer.lr 0.06563729533910587 +804 21 training.batch_size 1.0 +804 21 training.label_smoothing 0.0018782970070921625 +804 22 model.embedding_dim 0.0 +804 22 model.scoring_fct_norm 1.0 +804 22 optimizer.lr 0.03777945436692528 +804 22 training.batch_size 2.0 +804 22 training.label_smoothing 0.0070323890312735445 +804 23 model.embedding_dim 1.0 +804 23 model.scoring_fct_norm 2.0 +804 23 optimizer.lr 0.013982842006713308 +804 23 training.batch_size 0.0 +804 23 training.label_smoothing 0.004530797175385601 +804 24 model.embedding_dim 2.0 +804 24 model.scoring_fct_norm 1.0 +804 24 optimizer.lr 0.04417590654507678 +804 24 training.batch_size 0.0 +804 24 training.label_smoothing 0.0799656443591383 +804 25 model.embedding_dim 2.0 +804 25 model.scoring_fct_norm 1.0 +804 25 optimizer.lr 0.002256570208126583 +804 25 training.batch_size 0.0 +804 25 training.label_smoothing 0.7644642800393661 +804 26 model.embedding_dim 0.0 +804 26 model.scoring_fct_norm 1.0 +804 26 optimizer.lr 0.002546345857633867 +804 26 training.batch_size 0.0 +804 26 training.label_smoothing 0.2686016664885805 +804 27 model.embedding_dim 0.0 +804 27 model.scoring_fct_norm 1.0 +804 27 optimizer.lr 0.00229784459077261 +804 27 training.batch_size 0.0 +804 27 training.label_smoothing 0.006385341974303131 +804 28 model.embedding_dim 0.0 +804 28 model.scoring_fct_norm 2.0 +804 28 optimizer.lr 0.0016694087517340676 +804 28 training.batch_size 2.0 +804 28 training.label_smoothing 0.21441950483211938 +804 29 model.embedding_dim 0.0 +804 29 model.scoring_fct_norm 1.0 +804 29 optimizer.lr 0.0014278429455803136 +804 29 training.batch_size 1.0 +804 29 training.label_smoothing 0.005255278920095671 +804 30 model.embedding_dim 0.0 +804 30 model.scoring_fct_norm 1.0 +804 30 optimizer.lr 0.0012238413181193894 +804 30 training.batch_size 0.0 +804 30 training.label_smoothing 0.36833248462902707 +804 1 dataset """fb15k237""" +804 1 model """transe""" +804 1 loss """crossentropy""" +804 1 regularizer """no""" +804 1 optimizer """adam""" +804 1 training_loop """lcwa""" +804 1 evaluator """rankbased""" +804 2 dataset """fb15k237""" +804 2 model """transe""" +804 2 loss """crossentropy""" +804 2 regularizer """no""" +804 2 optimizer """adam""" +804 2 training_loop """lcwa""" +804 2 evaluator """rankbased""" +804 3 dataset """fb15k237""" +804 3 model """transe""" +804 3 loss """crossentropy""" +804 3 regularizer """no""" +804 3 optimizer """adam""" +804 3 training_loop """lcwa""" +804 3 evaluator """rankbased""" +804 4 dataset """fb15k237""" +804 4 model """transe""" +804 4 loss """crossentropy""" +804 4 regularizer """no""" +804 4 optimizer """adam""" +804 4 training_loop """lcwa""" +804 4 evaluator """rankbased""" +804 5 dataset """fb15k237""" +804 5 model """transe""" +804 5 loss """crossentropy""" +804 5 regularizer """no""" +804 5 optimizer """adam""" +804 5 training_loop """lcwa""" +804 5 evaluator """rankbased""" +804 6 dataset """fb15k237""" +804 6 model """transe""" +804 6 loss """crossentropy""" +804 6 regularizer """no""" +804 6 optimizer """adam""" +804 6 training_loop """lcwa""" +804 6 evaluator """rankbased""" +804 7 dataset """fb15k237""" +804 7 model """transe""" +804 7 loss """crossentropy""" +804 7 regularizer """no""" +804 7 optimizer """adam""" +804 7 training_loop """lcwa""" +804 7 evaluator """rankbased""" +804 8 dataset """fb15k237""" +804 8 model """transe""" +804 8 loss """crossentropy""" +804 8 regularizer """no""" +804 8 optimizer """adam""" +804 8 training_loop """lcwa""" +804 8 evaluator """rankbased""" +804 9 dataset """fb15k237""" +804 9 model """transe""" +804 9 loss """crossentropy""" +804 9 regularizer """no""" +804 9 optimizer """adam""" +804 9 training_loop """lcwa""" +804 9 evaluator """rankbased""" +804 10 dataset """fb15k237""" +804 10 model """transe""" +804 10 loss """crossentropy""" +804 10 regularizer """no""" +804 10 optimizer """adam""" +804 10 training_loop """lcwa""" +804 10 evaluator """rankbased""" +804 11 dataset """fb15k237""" +804 11 model """transe""" +804 11 loss """crossentropy""" +804 11 regularizer """no""" +804 11 optimizer """adam""" +804 11 training_loop """lcwa""" +804 11 evaluator """rankbased""" +804 12 dataset """fb15k237""" +804 12 model """transe""" +804 12 loss """crossentropy""" +804 12 regularizer """no""" +804 12 optimizer """adam""" +804 12 training_loop """lcwa""" +804 12 evaluator """rankbased""" +804 13 dataset """fb15k237""" +804 13 model """transe""" +804 13 loss """crossentropy""" +804 13 regularizer """no""" +804 13 optimizer """adam""" +804 13 training_loop """lcwa""" +804 13 evaluator """rankbased""" +804 14 dataset """fb15k237""" +804 14 model """transe""" +804 14 loss """crossentropy""" +804 14 regularizer """no""" +804 14 optimizer """adam""" +804 14 training_loop """lcwa""" +804 14 evaluator """rankbased""" +804 15 dataset """fb15k237""" +804 15 model """transe""" +804 15 loss """crossentropy""" +804 15 regularizer """no""" +804 15 optimizer """adam""" +804 15 training_loop """lcwa""" +804 15 evaluator """rankbased""" +804 16 dataset """fb15k237""" +804 16 model """transe""" +804 16 loss """crossentropy""" +804 16 regularizer """no""" +804 16 optimizer """adam""" +804 16 training_loop """lcwa""" +804 16 evaluator """rankbased""" +804 17 dataset """fb15k237""" +804 17 model """transe""" +804 17 loss """crossentropy""" +804 17 regularizer """no""" +804 17 optimizer """adam""" +804 17 training_loop """lcwa""" +804 17 evaluator """rankbased""" +804 18 dataset """fb15k237""" +804 18 model """transe""" +804 18 loss """crossentropy""" +804 18 regularizer """no""" +804 18 optimizer """adam""" +804 18 training_loop """lcwa""" +804 18 evaluator """rankbased""" +804 19 dataset """fb15k237""" +804 19 model """transe""" +804 19 loss """crossentropy""" +804 19 regularizer """no""" +804 19 optimizer """adam""" +804 19 training_loop """lcwa""" +804 19 evaluator """rankbased""" +804 20 dataset """fb15k237""" +804 20 model """transe""" +804 20 loss """crossentropy""" +804 20 regularizer """no""" +804 20 optimizer """adam""" +804 20 training_loop """lcwa""" +804 20 evaluator """rankbased""" +804 21 dataset """fb15k237""" +804 21 model """transe""" +804 21 loss """crossentropy""" +804 21 regularizer """no""" +804 21 optimizer """adam""" +804 21 training_loop """lcwa""" +804 21 evaluator """rankbased""" +804 22 dataset """fb15k237""" +804 22 model """transe""" +804 22 loss """crossentropy""" +804 22 regularizer """no""" +804 22 optimizer """adam""" +804 22 training_loop """lcwa""" +804 22 evaluator """rankbased""" +804 23 dataset """fb15k237""" +804 23 model """transe""" +804 23 loss """crossentropy""" +804 23 regularizer """no""" +804 23 optimizer """adam""" +804 23 training_loop """lcwa""" +804 23 evaluator """rankbased""" +804 24 dataset """fb15k237""" +804 24 model """transe""" +804 24 loss """crossentropy""" +804 24 regularizer """no""" +804 24 optimizer """adam""" +804 24 training_loop """lcwa""" +804 24 evaluator """rankbased""" +804 25 dataset """fb15k237""" +804 25 model """transe""" +804 25 loss """crossentropy""" +804 25 regularizer """no""" +804 25 optimizer """adam""" +804 25 training_loop """lcwa""" +804 25 evaluator """rankbased""" +804 26 dataset """fb15k237""" +804 26 model """transe""" +804 26 loss """crossentropy""" +804 26 regularizer """no""" +804 26 optimizer """adam""" +804 26 training_loop """lcwa""" +804 26 evaluator """rankbased""" +804 27 dataset """fb15k237""" +804 27 model """transe""" +804 27 loss """crossentropy""" +804 27 regularizer """no""" +804 27 optimizer """adam""" +804 27 training_loop """lcwa""" +804 27 evaluator """rankbased""" +804 28 dataset """fb15k237""" +804 28 model """transe""" +804 28 loss """crossentropy""" +804 28 regularizer """no""" +804 28 optimizer """adam""" +804 28 training_loop """lcwa""" +804 28 evaluator """rankbased""" +804 29 dataset """fb15k237""" +804 29 model """transe""" +804 29 loss """crossentropy""" +804 29 regularizer """no""" +804 29 optimizer """adam""" +804 29 training_loop """lcwa""" +804 29 evaluator """rankbased""" +804 30 dataset """fb15k237""" +804 30 model """transe""" +804 30 loss """crossentropy""" +804 30 regularizer """no""" +804 30 optimizer """adam""" +804 30 training_loop """lcwa""" +804 30 evaluator """rankbased""" +805 1 model.embedding_dim 2.0 +805 1 model.scoring_fct_norm 1.0 +805 1 training.batch_size 2.0 +805 1 training.label_smoothing 0.04981297486066849 +805 2 model.embedding_dim 1.0 +805 2 model.scoring_fct_norm 1.0 +805 2 training.batch_size 0.0 +805 2 training.label_smoothing 0.006657812500904424 +805 3 model.embedding_dim 2.0 +805 3 model.scoring_fct_norm 1.0 +805 3 training.batch_size 1.0 +805 3 training.label_smoothing 0.904772688095509 +805 4 model.embedding_dim 0.0 +805 4 model.scoring_fct_norm 1.0 +805 4 training.batch_size 0.0 +805 4 training.label_smoothing 0.0030245935257119136 +805 5 model.embedding_dim 0.0 +805 5 model.scoring_fct_norm 1.0 +805 5 training.batch_size 2.0 +805 5 training.label_smoothing 0.03789885457079255 +805 6 model.embedding_dim 2.0 +805 6 model.scoring_fct_norm 1.0 +805 6 training.batch_size 2.0 +805 6 training.label_smoothing 0.02826575860958945 +805 7 model.embedding_dim 0.0 +805 7 model.scoring_fct_norm 1.0 +805 7 training.batch_size 0.0 +805 7 training.label_smoothing 0.05159857480539562 +805 8 model.embedding_dim 1.0 +805 8 model.scoring_fct_norm 1.0 +805 8 training.batch_size 2.0 +805 8 training.label_smoothing 0.5179660435119161 +805 9 model.embedding_dim 1.0 +805 9 model.scoring_fct_norm 1.0 +805 9 training.batch_size 0.0 +805 9 training.label_smoothing 0.0873479142711164 +805 10 model.embedding_dim 1.0 +805 10 model.scoring_fct_norm 1.0 +805 10 training.batch_size 1.0 +805 10 training.label_smoothing 0.4122925195086909 +805 11 model.embedding_dim 2.0 +805 11 model.scoring_fct_norm 2.0 +805 11 training.batch_size 1.0 +805 11 training.label_smoothing 0.803172254322781 +805 12 model.embedding_dim 1.0 +805 12 model.scoring_fct_norm 1.0 +805 12 training.batch_size 2.0 +805 12 training.label_smoothing 0.0016236538351028522 +805 13 model.embedding_dim 2.0 +805 13 model.scoring_fct_norm 1.0 +805 13 training.batch_size 2.0 +805 13 training.label_smoothing 0.00845570656110604 +805 14 model.embedding_dim 1.0 +805 14 model.scoring_fct_norm 1.0 +805 14 training.batch_size 1.0 +805 14 training.label_smoothing 0.8182183921512087 +805 15 model.embedding_dim 2.0 +805 15 model.scoring_fct_norm 1.0 +805 15 training.batch_size 1.0 +805 15 training.label_smoothing 0.01636581301876182 +805 16 model.embedding_dim 2.0 +805 16 model.scoring_fct_norm 2.0 +805 16 training.batch_size 2.0 +805 16 training.label_smoothing 0.04771436272485402 +805 17 model.embedding_dim 1.0 +805 17 model.scoring_fct_norm 1.0 +805 17 training.batch_size 1.0 +805 17 training.label_smoothing 0.0026719962291904594 +805 18 model.embedding_dim 2.0 +805 18 model.scoring_fct_norm 1.0 +805 18 training.batch_size 0.0 +805 18 training.label_smoothing 0.7693148923070645 +805 19 model.embedding_dim 1.0 +805 19 model.scoring_fct_norm 2.0 +805 19 training.batch_size 1.0 +805 19 training.label_smoothing 0.0046086379737607175 +805 20 model.embedding_dim 2.0 +805 20 model.scoring_fct_norm 1.0 +805 20 training.batch_size 2.0 +805 20 training.label_smoothing 0.033367460815741275 +805 21 model.embedding_dim 2.0 +805 21 model.scoring_fct_norm 2.0 +805 21 training.batch_size 1.0 +805 21 training.label_smoothing 0.0011236661299391518 +805 22 model.embedding_dim 0.0 +805 22 model.scoring_fct_norm 2.0 +805 22 training.batch_size 1.0 +805 22 training.label_smoothing 0.04118648299197582 +805 23 model.embedding_dim 2.0 +805 23 model.scoring_fct_norm 2.0 +805 23 training.batch_size 2.0 +805 23 training.label_smoothing 0.2237870057333487 +805 24 model.embedding_dim 0.0 +805 24 model.scoring_fct_norm 1.0 +805 24 training.batch_size 1.0 +805 24 training.label_smoothing 0.07301171153210474 +805 25 model.embedding_dim 2.0 +805 25 model.scoring_fct_norm 2.0 +805 25 training.batch_size 0.0 +805 25 training.label_smoothing 0.2442301042828054 +805 26 model.embedding_dim 1.0 +805 26 model.scoring_fct_norm 1.0 +805 26 training.batch_size 0.0 +805 26 training.label_smoothing 0.0012848503634580455 +805 27 model.embedding_dim 0.0 +805 27 model.scoring_fct_norm 1.0 +805 27 training.batch_size 2.0 +805 27 training.label_smoothing 0.1450953402079852 +805 28 model.embedding_dim 2.0 +805 28 model.scoring_fct_norm 2.0 +805 28 training.batch_size 2.0 +805 28 training.label_smoothing 0.012081779800158291 +805 29 model.embedding_dim 0.0 +805 29 model.scoring_fct_norm 1.0 +805 29 training.batch_size 2.0 +805 29 training.label_smoothing 0.6951810221454273 +805 30 model.embedding_dim 1.0 +805 30 model.scoring_fct_norm 1.0 +805 30 training.batch_size 1.0 +805 30 training.label_smoothing 0.0055616374224248125 +805 31 model.embedding_dim 0.0 +805 31 model.scoring_fct_norm 2.0 +805 31 training.batch_size 0.0 +805 31 training.label_smoothing 0.4803983122639341 +805 32 model.embedding_dim 2.0 +805 32 model.scoring_fct_norm 2.0 +805 32 training.batch_size 0.0 +805 32 training.label_smoothing 0.003165964875332203 +805 33 model.embedding_dim 2.0 +805 33 model.scoring_fct_norm 1.0 +805 33 training.batch_size 0.0 +805 33 training.label_smoothing 0.6136163196717237 +805 34 model.embedding_dim 0.0 +805 34 model.scoring_fct_norm 1.0 +805 34 training.batch_size 0.0 +805 34 training.label_smoothing 0.004487508279221285 +805 35 model.embedding_dim 0.0 +805 35 model.scoring_fct_norm 2.0 +805 35 training.batch_size 0.0 +805 35 training.label_smoothing 0.5261008589128336 +805 36 model.embedding_dim 1.0 +805 36 model.scoring_fct_norm 2.0 +805 36 training.batch_size 2.0 +805 36 training.label_smoothing 0.08445410462515254 +805 37 model.embedding_dim 1.0 +805 37 model.scoring_fct_norm 1.0 +805 37 training.batch_size 0.0 +805 37 training.label_smoothing 0.295553106746268 +805 38 model.embedding_dim 1.0 +805 38 model.scoring_fct_norm 1.0 +805 38 training.batch_size 2.0 +805 38 training.label_smoothing 0.022234478536356704 +805 39 model.embedding_dim 2.0 +805 39 model.scoring_fct_norm 2.0 +805 39 training.batch_size 0.0 +805 39 training.label_smoothing 0.7424254926772119 +805 40 model.embedding_dim 1.0 +805 40 model.scoring_fct_norm 2.0 +805 40 training.batch_size 1.0 +805 40 training.label_smoothing 0.003402755334298212 +805 41 model.embedding_dim 1.0 +805 41 model.scoring_fct_norm 2.0 +805 41 training.batch_size 1.0 +805 41 training.label_smoothing 0.013931011992922647 +805 42 model.embedding_dim 0.0 +805 42 model.scoring_fct_norm 2.0 +805 42 training.batch_size 0.0 +805 42 training.label_smoothing 0.0013362811415330899 +805 43 model.embedding_dim 0.0 +805 43 model.scoring_fct_norm 1.0 +805 43 training.batch_size 2.0 +805 43 training.label_smoothing 0.03557480400163799 +805 44 model.embedding_dim 1.0 +805 44 model.scoring_fct_norm 1.0 +805 44 training.batch_size 2.0 +805 44 training.label_smoothing 0.0010350660226937066 +805 45 model.embedding_dim 1.0 +805 45 model.scoring_fct_norm 1.0 +805 45 training.batch_size 1.0 +805 45 training.label_smoothing 0.02609165157233796 +805 46 model.embedding_dim 1.0 +805 46 model.scoring_fct_norm 1.0 +805 46 training.batch_size 0.0 +805 46 training.label_smoothing 0.5135243861500904 +805 47 model.embedding_dim 0.0 +805 47 model.scoring_fct_norm 1.0 +805 47 training.batch_size 2.0 +805 47 training.label_smoothing 0.08045233113861096 +805 48 model.embedding_dim 1.0 +805 48 model.scoring_fct_norm 1.0 +805 48 training.batch_size 0.0 +805 48 training.label_smoothing 0.0026230619222567847 +805 49 model.embedding_dim 2.0 +805 49 model.scoring_fct_norm 1.0 +805 49 training.batch_size 0.0 +805 49 training.label_smoothing 0.01678439085731058 +805 50 model.embedding_dim 1.0 +805 50 model.scoring_fct_norm 1.0 +805 50 training.batch_size 0.0 +805 50 training.label_smoothing 0.019476903639674876 +805 51 model.embedding_dim 0.0 +805 51 model.scoring_fct_norm 1.0 +805 51 training.batch_size 2.0 +805 51 training.label_smoothing 0.0054032090704667106 +805 52 model.embedding_dim 1.0 +805 52 model.scoring_fct_norm 2.0 +805 52 training.batch_size 1.0 +805 52 training.label_smoothing 0.00547008647108746 +805 53 model.embedding_dim 0.0 +805 53 model.scoring_fct_norm 1.0 +805 53 training.batch_size 2.0 +805 53 training.label_smoothing 0.006386137333191722 +805 54 model.embedding_dim 2.0 +805 54 model.scoring_fct_norm 1.0 +805 54 training.batch_size 2.0 +805 54 training.label_smoothing 0.061807258195723205 +805 55 model.embedding_dim 1.0 +805 55 model.scoring_fct_norm 2.0 +805 55 training.batch_size 1.0 +805 55 training.label_smoothing 0.6715260688237115 +805 56 model.embedding_dim 1.0 +805 56 model.scoring_fct_norm 1.0 +805 56 training.batch_size 1.0 +805 56 training.label_smoothing 0.19777124389022782 +805 57 model.embedding_dim 1.0 +805 57 model.scoring_fct_norm 1.0 +805 57 training.batch_size 2.0 +805 57 training.label_smoothing 0.03993594667115217 +805 58 model.embedding_dim 1.0 +805 58 model.scoring_fct_norm 1.0 +805 58 training.batch_size 0.0 +805 58 training.label_smoothing 0.6412892477999756 +805 59 model.embedding_dim 0.0 +805 59 model.scoring_fct_norm 2.0 +805 59 training.batch_size 1.0 +805 59 training.label_smoothing 0.005121572491564912 +805 60 model.embedding_dim 2.0 +805 60 model.scoring_fct_norm 2.0 +805 60 training.batch_size 1.0 +805 60 training.label_smoothing 0.13175112603737488 +805 61 model.embedding_dim 1.0 +805 61 model.scoring_fct_norm 1.0 +805 61 training.batch_size 1.0 +805 61 training.label_smoothing 0.0044935720918627825 +805 62 model.embedding_dim 0.0 +805 62 model.scoring_fct_norm 2.0 +805 62 training.batch_size 2.0 +805 62 training.label_smoothing 0.014199433052711003 +805 63 model.embedding_dim 0.0 +805 63 model.scoring_fct_norm 2.0 +805 63 training.batch_size 1.0 +805 63 training.label_smoothing 0.04641242655994712 +805 64 model.embedding_dim 2.0 +805 64 model.scoring_fct_norm 2.0 +805 64 training.batch_size 1.0 +805 64 training.label_smoothing 0.10347654996814797 +805 65 model.embedding_dim 2.0 +805 65 model.scoring_fct_norm 1.0 +805 65 training.batch_size 2.0 +805 65 training.label_smoothing 0.001089631967514231 +805 66 model.embedding_dim 1.0 +805 66 model.scoring_fct_norm 1.0 +805 66 training.batch_size 2.0 +805 66 training.label_smoothing 0.009120722001227143 +805 67 model.embedding_dim 0.0 +805 67 model.scoring_fct_norm 1.0 +805 67 training.batch_size 1.0 +805 67 training.label_smoothing 0.1160317277886189 +805 68 model.embedding_dim 2.0 +805 68 model.scoring_fct_norm 2.0 +805 68 training.batch_size 1.0 +805 68 training.label_smoothing 0.06213775169167941 +805 69 model.embedding_dim 2.0 +805 69 model.scoring_fct_norm 2.0 +805 69 training.batch_size 1.0 +805 69 training.label_smoothing 0.0013664173077681673 +805 70 model.embedding_dim 0.0 +805 70 model.scoring_fct_norm 2.0 +805 70 training.batch_size 2.0 +805 70 training.label_smoothing 0.0822046359309552 +805 71 model.embedding_dim 0.0 +805 71 model.scoring_fct_norm 2.0 +805 71 training.batch_size 0.0 +805 71 training.label_smoothing 0.12359732340610204 +805 72 model.embedding_dim 2.0 +805 72 model.scoring_fct_norm 1.0 +805 72 training.batch_size 1.0 +805 72 training.label_smoothing 0.0015186597470209574 +805 73 model.embedding_dim 1.0 +805 73 model.scoring_fct_norm 2.0 +805 73 training.batch_size 2.0 +805 73 training.label_smoothing 0.0050299098450468215 +805 74 model.embedding_dim 1.0 +805 74 model.scoring_fct_norm 2.0 +805 74 training.batch_size 1.0 +805 74 training.label_smoothing 0.21733841626959474 +805 75 model.embedding_dim 0.0 +805 75 model.scoring_fct_norm 1.0 +805 75 training.batch_size 1.0 +805 75 training.label_smoothing 0.8463507488684061 +805 76 model.embedding_dim 2.0 +805 76 model.scoring_fct_norm 2.0 +805 76 training.batch_size 1.0 +805 76 training.label_smoothing 0.009644709375698226 +805 77 model.embedding_dim 2.0 +805 77 model.scoring_fct_norm 2.0 +805 77 training.batch_size 2.0 +805 77 training.label_smoothing 0.10309143551419767 +805 78 model.embedding_dim 2.0 +805 78 model.scoring_fct_norm 1.0 +805 78 training.batch_size 0.0 +805 78 training.label_smoothing 0.14662287944913072 +805 79 model.embedding_dim 0.0 +805 79 model.scoring_fct_norm 2.0 +805 79 training.batch_size 0.0 +805 79 training.label_smoothing 0.012513531482662607 +805 80 model.embedding_dim 0.0 +805 80 model.scoring_fct_norm 1.0 +805 80 training.batch_size 1.0 +805 80 training.label_smoothing 0.16553029764729643 +805 81 model.embedding_dim 2.0 +805 81 model.scoring_fct_norm 2.0 +805 81 training.batch_size 1.0 +805 81 training.label_smoothing 0.041929939272922434 +805 82 model.embedding_dim 1.0 +805 82 model.scoring_fct_norm 2.0 +805 82 training.batch_size 1.0 +805 82 training.label_smoothing 0.17485510044219782 +805 83 model.embedding_dim 1.0 +805 83 model.scoring_fct_norm 2.0 +805 83 training.batch_size 1.0 +805 83 training.label_smoothing 0.0021864350643026006 +805 84 model.embedding_dim 0.0 +805 84 model.scoring_fct_norm 2.0 +805 84 training.batch_size 2.0 +805 84 training.label_smoothing 0.027377644427044403 +805 85 model.embedding_dim 0.0 +805 85 model.scoring_fct_norm 1.0 +805 85 training.batch_size 0.0 +805 85 training.label_smoothing 0.06873530111323291 +805 86 model.embedding_dim 1.0 +805 86 model.scoring_fct_norm 1.0 +805 86 training.batch_size 1.0 +805 86 training.label_smoothing 0.9603074379120086 +805 87 model.embedding_dim 2.0 +805 87 model.scoring_fct_norm 2.0 +805 87 training.batch_size 2.0 +805 87 training.label_smoothing 0.617457359446447 +805 88 model.embedding_dim 2.0 +805 88 model.scoring_fct_norm 2.0 +805 88 training.batch_size 1.0 +805 88 training.label_smoothing 0.01930792865707782 +805 89 model.embedding_dim 0.0 +805 89 model.scoring_fct_norm 2.0 +805 89 training.batch_size 1.0 +805 89 training.label_smoothing 0.1360540139032007 +805 90 model.embedding_dim 2.0 +805 90 model.scoring_fct_norm 2.0 +805 90 training.batch_size 1.0 +805 90 training.label_smoothing 0.01463212986039168 +805 91 model.embedding_dim 0.0 +805 91 model.scoring_fct_norm 1.0 +805 91 training.batch_size 2.0 +805 91 training.label_smoothing 0.0037257312560719613 +805 92 model.embedding_dim 1.0 +805 92 model.scoring_fct_norm 1.0 +805 92 training.batch_size 0.0 +805 92 training.label_smoothing 0.19809095380604974 +805 93 model.embedding_dim 0.0 +805 93 model.scoring_fct_norm 1.0 +805 93 training.batch_size 0.0 +805 93 training.label_smoothing 0.8681910166679297 +805 94 model.embedding_dim 2.0 +805 94 model.scoring_fct_norm 2.0 +805 94 training.batch_size 1.0 +805 94 training.label_smoothing 0.0015260808449743257 +805 95 model.embedding_dim 1.0 +805 95 model.scoring_fct_norm 1.0 +805 95 training.batch_size 0.0 +805 95 training.label_smoothing 0.0041623048584466344 +805 96 model.embedding_dim 2.0 +805 96 model.scoring_fct_norm 2.0 +805 96 training.batch_size 1.0 +805 96 training.label_smoothing 0.014747516008442034 +805 97 model.embedding_dim 1.0 +805 97 model.scoring_fct_norm 1.0 +805 97 training.batch_size 1.0 +805 97 training.label_smoothing 0.018759151004085987 +805 98 model.embedding_dim 0.0 +805 98 model.scoring_fct_norm 1.0 +805 98 training.batch_size 0.0 +805 98 training.label_smoothing 0.0029697600676993527 +805 99 model.embedding_dim 2.0 +805 99 model.scoring_fct_norm 1.0 +805 99 training.batch_size 1.0 +805 99 training.label_smoothing 0.001979661842477208 +805 100 model.embedding_dim 1.0 +805 100 model.scoring_fct_norm 1.0 +805 100 training.batch_size 1.0 +805 100 training.label_smoothing 0.9425524022992617 +805 1 dataset """kinships""" +805 1 model """transe""" +805 1 loss """bceaftersigmoid""" +805 1 regularizer """no""" +805 1 optimizer """adadelta""" +805 1 training_loop """lcwa""" +805 1 evaluator """rankbased""" +805 2 dataset """kinships""" +805 2 model """transe""" +805 2 loss """bceaftersigmoid""" +805 2 regularizer """no""" +805 2 optimizer """adadelta""" +805 2 training_loop """lcwa""" +805 2 evaluator """rankbased""" +805 3 dataset """kinships""" +805 3 model """transe""" +805 3 loss """bceaftersigmoid""" +805 3 regularizer """no""" +805 3 optimizer """adadelta""" +805 3 training_loop """lcwa""" +805 3 evaluator """rankbased""" +805 4 dataset """kinships""" +805 4 model """transe""" +805 4 loss """bceaftersigmoid""" +805 4 regularizer """no""" +805 4 optimizer """adadelta""" +805 4 training_loop """lcwa""" +805 4 evaluator """rankbased""" +805 5 dataset """kinships""" +805 5 model """transe""" +805 5 loss """bceaftersigmoid""" +805 5 regularizer """no""" +805 5 optimizer """adadelta""" +805 5 training_loop """lcwa""" +805 5 evaluator """rankbased""" +805 6 dataset """kinships""" +805 6 model """transe""" +805 6 loss """bceaftersigmoid""" +805 6 regularizer """no""" +805 6 optimizer """adadelta""" +805 6 training_loop """lcwa""" +805 6 evaluator """rankbased""" +805 7 dataset """kinships""" +805 7 model """transe""" +805 7 loss """bceaftersigmoid""" +805 7 regularizer """no""" +805 7 optimizer """adadelta""" +805 7 training_loop """lcwa""" +805 7 evaluator """rankbased""" +805 8 dataset """kinships""" +805 8 model """transe""" +805 8 loss """bceaftersigmoid""" +805 8 regularizer """no""" +805 8 optimizer """adadelta""" +805 8 training_loop """lcwa""" +805 8 evaluator """rankbased""" +805 9 dataset """kinships""" +805 9 model """transe""" +805 9 loss """bceaftersigmoid""" +805 9 regularizer """no""" +805 9 optimizer """adadelta""" +805 9 training_loop """lcwa""" +805 9 evaluator """rankbased""" +805 10 dataset """kinships""" +805 10 model """transe""" +805 10 loss """bceaftersigmoid""" +805 10 regularizer """no""" +805 10 optimizer """adadelta""" +805 10 training_loop """lcwa""" +805 10 evaluator """rankbased""" +805 11 dataset """kinships""" +805 11 model """transe""" +805 11 loss """bceaftersigmoid""" +805 11 regularizer """no""" +805 11 optimizer """adadelta""" +805 11 training_loop """lcwa""" +805 11 evaluator """rankbased""" +805 12 dataset """kinships""" +805 12 model """transe""" +805 12 loss """bceaftersigmoid""" +805 12 regularizer """no""" +805 12 optimizer """adadelta""" +805 12 training_loop """lcwa""" +805 12 evaluator """rankbased""" +805 13 dataset """kinships""" +805 13 model """transe""" +805 13 loss """bceaftersigmoid""" +805 13 regularizer """no""" +805 13 optimizer """adadelta""" +805 13 training_loop """lcwa""" +805 13 evaluator """rankbased""" +805 14 dataset """kinships""" +805 14 model """transe""" +805 14 loss """bceaftersigmoid""" +805 14 regularizer """no""" +805 14 optimizer """adadelta""" +805 14 training_loop """lcwa""" +805 14 evaluator """rankbased""" +805 15 dataset """kinships""" +805 15 model """transe""" +805 15 loss """bceaftersigmoid""" +805 15 regularizer """no""" +805 15 optimizer """adadelta""" +805 15 training_loop """lcwa""" +805 15 evaluator """rankbased""" +805 16 dataset """kinships""" +805 16 model """transe""" +805 16 loss """bceaftersigmoid""" +805 16 regularizer """no""" +805 16 optimizer """adadelta""" +805 16 training_loop """lcwa""" +805 16 evaluator """rankbased""" +805 17 dataset """kinships""" +805 17 model """transe""" +805 17 loss """bceaftersigmoid""" +805 17 regularizer """no""" +805 17 optimizer """adadelta""" +805 17 training_loop """lcwa""" +805 17 evaluator """rankbased""" +805 18 dataset """kinships""" +805 18 model """transe""" +805 18 loss """bceaftersigmoid""" +805 18 regularizer """no""" +805 18 optimizer """adadelta""" +805 18 training_loop """lcwa""" +805 18 evaluator """rankbased""" +805 19 dataset """kinships""" +805 19 model """transe""" +805 19 loss """bceaftersigmoid""" +805 19 regularizer """no""" +805 19 optimizer """adadelta""" +805 19 training_loop """lcwa""" +805 19 evaluator """rankbased""" +805 20 dataset """kinships""" +805 20 model """transe""" +805 20 loss """bceaftersigmoid""" +805 20 regularizer """no""" +805 20 optimizer """adadelta""" +805 20 training_loop """lcwa""" +805 20 evaluator """rankbased""" +805 21 dataset """kinships""" +805 21 model """transe""" +805 21 loss """bceaftersigmoid""" +805 21 regularizer """no""" +805 21 optimizer """adadelta""" +805 21 training_loop """lcwa""" +805 21 evaluator """rankbased""" +805 22 dataset """kinships""" +805 22 model """transe""" +805 22 loss """bceaftersigmoid""" +805 22 regularizer """no""" +805 22 optimizer """adadelta""" +805 22 training_loop """lcwa""" +805 22 evaluator """rankbased""" +805 23 dataset """kinships""" +805 23 model """transe""" +805 23 loss """bceaftersigmoid""" +805 23 regularizer """no""" +805 23 optimizer """adadelta""" +805 23 training_loop """lcwa""" +805 23 evaluator """rankbased""" +805 24 dataset """kinships""" +805 24 model """transe""" +805 24 loss """bceaftersigmoid""" +805 24 regularizer """no""" +805 24 optimizer """adadelta""" +805 24 training_loop """lcwa""" +805 24 evaluator """rankbased""" +805 25 dataset """kinships""" +805 25 model """transe""" +805 25 loss """bceaftersigmoid""" +805 25 regularizer """no""" +805 25 optimizer """adadelta""" +805 25 training_loop """lcwa""" +805 25 evaluator """rankbased""" +805 26 dataset """kinships""" +805 26 model """transe""" +805 26 loss """bceaftersigmoid""" +805 26 regularizer """no""" +805 26 optimizer """adadelta""" +805 26 training_loop """lcwa""" +805 26 evaluator """rankbased""" +805 27 dataset """kinships""" +805 27 model """transe""" +805 27 loss """bceaftersigmoid""" +805 27 regularizer """no""" +805 27 optimizer """adadelta""" +805 27 training_loop """lcwa""" +805 27 evaluator """rankbased""" +805 28 dataset """kinships""" +805 28 model """transe""" +805 28 loss """bceaftersigmoid""" +805 28 regularizer """no""" +805 28 optimizer """adadelta""" +805 28 training_loop """lcwa""" +805 28 evaluator """rankbased""" +805 29 dataset """kinships""" +805 29 model """transe""" +805 29 loss """bceaftersigmoid""" +805 29 regularizer """no""" +805 29 optimizer """adadelta""" +805 29 training_loop """lcwa""" +805 29 evaluator """rankbased""" +805 30 dataset """kinships""" +805 30 model """transe""" +805 30 loss """bceaftersigmoid""" +805 30 regularizer """no""" +805 30 optimizer """adadelta""" +805 30 training_loop """lcwa""" +805 30 evaluator """rankbased""" +805 31 dataset """kinships""" +805 31 model """transe""" +805 31 loss """bceaftersigmoid""" +805 31 regularizer """no""" +805 31 optimizer """adadelta""" +805 31 training_loop """lcwa""" +805 31 evaluator """rankbased""" +805 32 dataset """kinships""" +805 32 model """transe""" +805 32 loss """bceaftersigmoid""" +805 32 regularizer """no""" +805 32 optimizer """adadelta""" +805 32 training_loop """lcwa""" +805 32 evaluator """rankbased""" +805 33 dataset """kinships""" +805 33 model """transe""" +805 33 loss """bceaftersigmoid""" +805 33 regularizer """no""" +805 33 optimizer """adadelta""" +805 33 training_loop """lcwa""" +805 33 evaluator """rankbased""" +805 34 dataset """kinships""" +805 34 model """transe""" +805 34 loss """bceaftersigmoid""" +805 34 regularizer """no""" +805 34 optimizer """adadelta""" +805 34 training_loop """lcwa""" +805 34 evaluator """rankbased""" +805 35 dataset """kinships""" +805 35 model """transe""" +805 35 loss """bceaftersigmoid""" +805 35 regularizer """no""" +805 35 optimizer """adadelta""" +805 35 training_loop """lcwa""" +805 35 evaluator """rankbased""" +805 36 dataset """kinships""" +805 36 model """transe""" +805 36 loss """bceaftersigmoid""" +805 36 regularizer """no""" +805 36 optimizer """adadelta""" +805 36 training_loop """lcwa""" +805 36 evaluator """rankbased""" +805 37 dataset """kinships""" +805 37 model """transe""" +805 37 loss """bceaftersigmoid""" +805 37 regularizer """no""" +805 37 optimizer """adadelta""" +805 37 training_loop """lcwa""" +805 37 evaluator """rankbased""" +805 38 dataset """kinships""" +805 38 model """transe""" +805 38 loss """bceaftersigmoid""" +805 38 regularizer """no""" +805 38 optimizer """adadelta""" +805 38 training_loop """lcwa""" +805 38 evaluator """rankbased""" +805 39 dataset """kinships""" +805 39 model """transe""" +805 39 loss """bceaftersigmoid""" +805 39 regularizer """no""" +805 39 optimizer """adadelta""" +805 39 training_loop """lcwa""" +805 39 evaluator """rankbased""" +805 40 dataset """kinships""" +805 40 model """transe""" +805 40 loss """bceaftersigmoid""" +805 40 regularizer """no""" +805 40 optimizer """adadelta""" +805 40 training_loop """lcwa""" +805 40 evaluator """rankbased""" +805 41 dataset """kinships""" +805 41 model """transe""" +805 41 loss """bceaftersigmoid""" +805 41 regularizer """no""" +805 41 optimizer """adadelta""" +805 41 training_loop """lcwa""" +805 41 evaluator """rankbased""" +805 42 dataset """kinships""" +805 42 model """transe""" +805 42 loss """bceaftersigmoid""" +805 42 regularizer """no""" +805 42 optimizer """adadelta""" +805 42 training_loop """lcwa""" +805 42 evaluator """rankbased""" +805 43 dataset """kinships""" +805 43 model """transe""" +805 43 loss """bceaftersigmoid""" +805 43 regularizer """no""" +805 43 optimizer """adadelta""" +805 43 training_loop """lcwa""" +805 43 evaluator """rankbased""" +805 44 dataset """kinships""" +805 44 model """transe""" +805 44 loss """bceaftersigmoid""" +805 44 regularizer """no""" +805 44 optimizer """adadelta""" +805 44 training_loop """lcwa""" +805 44 evaluator """rankbased""" +805 45 dataset """kinships""" +805 45 model """transe""" +805 45 loss """bceaftersigmoid""" +805 45 regularizer """no""" +805 45 optimizer """adadelta""" +805 45 training_loop """lcwa""" +805 45 evaluator """rankbased""" +805 46 dataset """kinships""" +805 46 model """transe""" +805 46 loss """bceaftersigmoid""" +805 46 regularizer """no""" +805 46 optimizer """adadelta""" +805 46 training_loop """lcwa""" +805 46 evaluator """rankbased""" +805 47 dataset """kinships""" +805 47 model """transe""" +805 47 loss """bceaftersigmoid""" +805 47 regularizer """no""" +805 47 optimizer """adadelta""" +805 47 training_loop """lcwa""" +805 47 evaluator """rankbased""" +805 48 dataset """kinships""" +805 48 model """transe""" +805 48 loss """bceaftersigmoid""" +805 48 regularizer """no""" +805 48 optimizer """adadelta""" +805 48 training_loop """lcwa""" +805 48 evaluator """rankbased""" +805 49 dataset """kinships""" +805 49 model """transe""" +805 49 loss """bceaftersigmoid""" +805 49 regularizer """no""" +805 49 optimizer """adadelta""" +805 49 training_loop """lcwa""" +805 49 evaluator """rankbased""" +805 50 dataset """kinships""" +805 50 model """transe""" +805 50 loss """bceaftersigmoid""" +805 50 regularizer """no""" +805 50 optimizer """adadelta""" +805 50 training_loop """lcwa""" +805 50 evaluator """rankbased""" +805 51 dataset """kinships""" +805 51 model """transe""" +805 51 loss """bceaftersigmoid""" +805 51 regularizer """no""" +805 51 optimizer """adadelta""" +805 51 training_loop """lcwa""" +805 51 evaluator """rankbased""" +805 52 dataset """kinships""" +805 52 model """transe""" +805 52 loss """bceaftersigmoid""" +805 52 regularizer """no""" +805 52 optimizer """adadelta""" +805 52 training_loop """lcwa""" +805 52 evaluator """rankbased""" +805 53 dataset """kinships""" +805 53 model """transe""" +805 53 loss """bceaftersigmoid""" +805 53 regularizer """no""" +805 53 optimizer """adadelta""" +805 53 training_loop """lcwa""" +805 53 evaluator """rankbased""" +805 54 dataset """kinships""" +805 54 model """transe""" +805 54 loss """bceaftersigmoid""" +805 54 regularizer """no""" +805 54 optimizer """adadelta""" +805 54 training_loop """lcwa""" +805 54 evaluator """rankbased""" +805 55 dataset """kinships""" +805 55 model """transe""" +805 55 loss """bceaftersigmoid""" +805 55 regularizer """no""" +805 55 optimizer """adadelta""" +805 55 training_loop """lcwa""" +805 55 evaluator """rankbased""" +805 56 dataset """kinships""" +805 56 model """transe""" +805 56 loss """bceaftersigmoid""" +805 56 regularizer """no""" +805 56 optimizer """adadelta""" +805 56 training_loop """lcwa""" +805 56 evaluator """rankbased""" +805 57 dataset """kinships""" +805 57 model """transe""" +805 57 loss """bceaftersigmoid""" +805 57 regularizer """no""" +805 57 optimizer """adadelta""" +805 57 training_loop """lcwa""" +805 57 evaluator """rankbased""" +805 58 dataset """kinships""" +805 58 model """transe""" +805 58 loss """bceaftersigmoid""" +805 58 regularizer """no""" +805 58 optimizer """adadelta""" +805 58 training_loop """lcwa""" +805 58 evaluator """rankbased""" +805 59 dataset """kinships""" +805 59 model """transe""" +805 59 loss """bceaftersigmoid""" +805 59 regularizer """no""" +805 59 optimizer """adadelta""" +805 59 training_loop """lcwa""" +805 59 evaluator """rankbased""" +805 60 dataset """kinships""" +805 60 model """transe""" +805 60 loss """bceaftersigmoid""" +805 60 regularizer """no""" +805 60 optimizer """adadelta""" +805 60 training_loop """lcwa""" +805 60 evaluator """rankbased""" +805 61 dataset """kinships""" +805 61 model """transe""" +805 61 loss """bceaftersigmoid""" +805 61 regularizer """no""" +805 61 optimizer """adadelta""" +805 61 training_loop """lcwa""" +805 61 evaluator """rankbased""" +805 62 dataset """kinships""" +805 62 model """transe""" +805 62 loss """bceaftersigmoid""" +805 62 regularizer """no""" +805 62 optimizer """adadelta""" +805 62 training_loop """lcwa""" +805 62 evaluator """rankbased""" +805 63 dataset """kinships""" +805 63 model """transe""" +805 63 loss """bceaftersigmoid""" +805 63 regularizer """no""" +805 63 optimizer """adadelta""" +805 63 training_loop """lcwa""" +805 63 evaluator """rankbased""" +805 64 dataset """kinships""" +805 64 model """transe""" +805 64 loss """bceaftersigmoid""" +805 64 regularizer """no""" +805 64 optimizer """adadelta""" +805 64 training_loop """lcwa""" +805 64 evaluator """rankbased""" +805 65 dataset """kinships""" +805 65 model """transe""" +805 65 loss """bceaftersigmoid""" +805 65 regularizer """no""" +805 65 optimizer """adadelta""" +805 65 training_loop """lcwa""" +805 65 evaluator """rankbased""" +805 66 dataset """kinships""" +805 66 model """transe""" +805 66 loss """bceaftersigmoid""" +805 66 regularizer """no""" +805 66 optimizer """adadelta""" +805 66 training_loop """lcwa""" +805 66 evaluator """rankbased""" +805 67 dataset """kinships""" +805 67 model """transe""" +805 67 loss """bceaftersigmoid""" +805 67 regularizer """no""" +805 67 optimizer """adadelta""" +805 67 training_loop """lcwa""" +805 67 evaluator """rankbased""" +805 68 dataset """kinships""" +805 68 model """transe""" +805 68 loss """bceaftersigmoid""" +805 68 regularizer """no""" +805 68 optimizer """adadelta""" +805 68 training_loop """lcwa""" +805 68 evaluator """rankbased""" +805 69 dataset """kinships""" +805 69 model """transe""" +805 69 loss """bceaftersigmoid""" +805 69 regularizer """no""" +805 69 optimizer """adadelta""" +805 69 training_loop """lcwa""" +805 69 evaluator """rankbased""" +805 70 dataset """kinships""" +805 70 model """transe""" +805 70 loss """bceaftersigmoid""" +805 70 regularizer """no""" +805 70 optimizer """adadelta""" +805 70 training_loop """lcwa""" +805 70 evaluator """rankbased""" +805 71 dataset """kinships""" +805 71 model """transe""" +805 71 loss """bceaftersigmoid""" +805 71 regularizer """no""" +805 71 optimizer """adadelta""" +805 71 training_loop """lcwa""" +805 71 evaluator """rankbased""" +805 72 dataset """kinships""" +805 72 model """transe""" +805 72 loss """bceaftersigmoid""" +805 72 regularizer """no""" +805 72 optimizer """adadelta""" +805 72 training_loop """lcwa""" +805 72 evaluator """rankbased""" +805 73 dataset """kinships""" +805 73 model """transe""" +805 73 loss """bceaftersigmoid""" +805 73 regularizer """no""" +805 73 optimizer """adadelta""" +805 73 training_loop """lcwa""" +805 73 evaluator """rankbased""" +805 74 dataset """kinships""" +805 74 model """transe""" +805 74 loss """bceaftersigmoid""" +805 74 regularizer """no""" +805 74 optimizer """adadelta""" +805 74 training_loop """lcwa""" +805 74 evaluator """rankbased""" +805 75 dataset """kinships""" +805 75 model """transe""" +805 75 loss """bceaftersigmoid""" +805 75 regularizer """no""" +805 75 optimizer """adadelta""" +805 75 training_loop """lcwa""" +805 75 evaluator """rankbased""" +805 76 dataset """kinships""" +805 76 model """transe""" +805 76 loss """bceaftersigmoid""" +805 76 regularizer """no""" +805 76 optimizer """adadelta""" +805 76 training_loop """lcwa""" +805 76 evaluator """rankbased""" +805 77 dataset """kinships""" +805 77 model """transe""" +805 77 loss """bceaftersigmoid""" +805 77 regularizer """no""" +805 77 optimizer """adadelta""" +805 77 training_loop """lcwa""" +805 77 evaluator """rankbased""" +805 78 dataset """kinships""" +805 78 model """transe""" +805 78 loss """bceaftersigmoid""" +805 78 regularizer """no""" +805 78 optimizer """adadelta""" +805 78 training_loop """lcwa""" +805 78 evaluator """rankbased""" +805 79 dataset """kinships""" +805 79 model """transe""" +805 79 loss """bceaftersigmoid""" +805 79 regularizer """no""" +805 79 optimizer """adadelta""" +805 79 training_loop """lcwa""" +805 79 evaluator """rankbased""" +805 80 dataset """kinships""" +805 80 model """transe""" +805 80 loss """bceaftersigmoid""" +805 80 regularizer """no""" +805 80 optimizer """adadelta""" +805 80 training_loop """lcwa""" +805 80 evaluator """rankbased""" +805 81 dataset """kinships""" +805 81 model """transe""" +805 81 loss """bceaftersigmoid""" +805 81 regularizer """no""" +805 81 optimizer """adadelta""" +805 81 training_loop """lcwa""" +805 81 evaluator """rankbased""" +805 82 dataset """kinships""" +805 82 model """transe""" +805 82 loss """bceaftersigmoid""" +805 82 regularizer """no""" +805 82 optimizer """adadelta""" +805 82 training_loop """lcwa""" +805 82 evaluator """rankbased""" +805 83 dataset """kinships""" +805 83 model """transe""" +805 83 loss """bceaftersigmoid""" +805 83 regularizer """no""" +805 83 optimizer """adadelta""" +805 83 training_loop """lcwa""" +805 83 evaluator """rankbased""" +805 84 dataset """kinships""" +805 84 model """transe""" +805 84 loss """bceaftersigmoid""" +805 84 regularizer """no""" +805 84 optimizer """adadelta""" +805 84 training_loop """lcwa""" +805 84 evaluator """rankbased""" +805 85 dataset """kinships""" +805 85 model """transe""" +805 85 loss """bceaftersigmoid""" +805 85 regularizer """no""" +805 85 optimizer """adadelta""" +805 85 training_loop """lcwa""" +805 85 evaluator """rankbased""" +805 86 dataset """kinships""" +805 86 model """transe""" +805 86 loss """bceaftersigmoid""" +805 86 regularizer """no""" +805 86 optimizer """adadelta""" +805 86 training_loop """lcwa""" +805 86 evaluator """rankbased""" +805 87 dataset """kinships""" +805 87 model """transe""" +805 87 loss """bceaftersigmoid""" +805 87 regularizer """no""" +805 87 optimizer """adadelta""" +805 87 training_loop """lcwa""" +805 87 evaluator """rankbased""" +805 88 dataset """kinships""" +805 88 model """transe""" +805 88 loss """bceaftersigmoid""" +805 88 regularizer """no""" +805 88 optimizer """adadelta""" +805 88 training_loop """lcwa""" +805 88 evaluator """rankbased""" +805 89 dataset """kinships""" +805 89 model """transe""" +805 89 loss """bceaftersigmoid""" +805 89 regularizer """no""" +805 89 optimizer """adadelta""" +805 89 training_loop """lcwa""" +805 89 evaluator """rankbased""" +805 90 dataset """kinships""" +805 90 model """transe""" +805 90 loss """bceaftersigmoid""" +805 90 regularizer """no""" +805 90 optimizer """adadelta""" +805 90 training_loop """lcwa""" +805 90 evaluator """rankbased""" +805 91 dataset """kinships""" +805 91 model """transe""" +805 91 loss """bceaftersigmoid""" +805 91 regularizer """no""" +805 91 optimizer """adadelta""" +805 91 training_loop """lcwa""" +805 91 evaluator """rankbased""" +805 92 dataset """kinships""" +805 92 model """transe""" +805 92 loss """bceaftersigmoid""" +805 92 regularizer """no""" +805 92 optimizer """adadelta""" +805 92 training_loop """lcwa""" +805 92 evaluator """rankbased""" +805 93 dataset """kinships""" +805 93 model """transe""" +805 93 loss """bceaftersigmoid""" +805 93 regularizer """no""" +805 93 optimizer """adadelta""" +805 93 training_loop """lcwa""" +805 93 evaluator """rankbased""" +805 94 dataset """kinships""" +805 94 model """transe""" +805 94 loss """bceaftersigmoid""" +805 94 regularizer """no""" +805 94 optimizer """adadelta""" +805 94 training_loop """lcwa""" +805 94 evaluator """rankbased""" +805 95 dataset """kinships""" +805 95 model """transe""" +805 95 loss """bceaftersigmoid""" +805 95 regularizer """no""" +805 95 optimizer """adadelta""" +805 95 training_loop """lcwa""" +805 95 evaluator """rankbased""" +805 96 dataset """kinships""" +805 96 model """transe""" +805 96 loss """bceaftersigmoid""" +805 96 regularizer """no""" +805 96 optimizer """adadelta""" +805 96 training_loop """lcwa""" +805 96 evaluator """rankbased""" +805 97 dataset """kinships""" +805 97 model """transe""" +805 97 loss """bceaftersigmoid""" +805 97 regularizer """no""" +805 97 optimizer """adadelta""" +805 97 training_loop """lcwa""" +805 97 evaluator """rankbased""" +805 98 dataset """kinships""" +805 98 model """transe""" +805 98 loss """bceaftersigmoid""" +805 98 regularizer """no""" +805 98 optimizer """adadelta""" +805 98 training_loop """lcwa""" +805 98 evaluator """rankbased""" +805 99 dataset """kinships""" +805 99 model """transe""" +805 99 loss """bceaftersigmoid""" +805 99 regularizer """no""" +805 99 optimizer """adadelta""" +805 99 training_loop """lcwa""" +805 99 evaluator """rankbased""" +805 100 dataset """kinships""" +805 100 model """transe""" +805 100 loss """bceaftersigmoid""" +805 100 regularizer """no""" +805 100 optimizer """adadelta""" +805 100 training_loop """lcwa""" +805 100 evaluator """rankbased""" +806 1 model.embedding_dim 0.0 +806 1 model.scoring_fct_norm 2.0 +806 1 training.batch_size 1.0 +806 1 training.label_smoothing 0.0018250180909233679 +806 2 model.embedding_dim 0.0 +806 2 model.scoring_fct_norm 2.0 +806 2 training.batch_size 1.0 +806 2 training.label_smoothing 0.05871713972593121 +806 3 model.embedding_dim 0.0 +806 3 model.scoring_fct_norm 1.0 +806 3 training.batch_size 2.0 +806 3 training.label_smoothing 0.0013821284623530708 +806 4 model.embedding_dim 0.0 +806 4 model.scoring_fct_norm 2.0 +806 4 training.batch_size 1.0 +806 4 training.label_smoothing 0.01945400697228718 +806 5 model.embedding_dim 0.0 +806 5 model.scoring_fct_norm 2.0 +806 5 training.batch_size 2.0 +806 5 training.label_smoothing 0.17234525821013766 +806 6 model.embedding_dim 0.0 +806 6 model.scoring_fct_norm 2.0 +806 6 training.batch_size 1.0 +806 6 training.label_smoothing 0.32930513555473284 +806 7 model.embedding_dim 2.0 +806 7 model.scoring_fct_norm 2.0 +806 7 training.batch_size 2.0 +806 7 training.label_smoothing 0.004458122036898628 +806 8 model.embedding_dim 2.0 +806 8 model.scoring_fct_norm 2.0 +806 8 training.batch_size 1.0 +806 8 training.label_smoothing 0.0015783279776739828 +806 9 model.embedding_dim 0.0 +806 9 model.scoring_fct_norm 2.0 +806 9 training.batch_size 2.0 +806 9 training.label_smoothing 0.15667133259808055 +806 10 model.embedding_dim 0.0 +806 10 model.scoring_fct_norm 1.0 +806 10 training.batch_size 2.0 +806 10 training.label_smoothing 0.008220482290682249 +806 11 model.embedding_dim 0.0 +806 11 model.scoring_fct_norm 2.0 +806 11 training.batch_size 0.0 +806 11 training.label_smoothing 0.003223561929592206 +806 12 model.embedding_dim 0.0 +806 12 model.scoring_fct_norm 2.0 +806 12 training.batch_size 0.0 +806 12 training.label_smoothing 0.00806874320407113 +806 13 model.embedding_dim 1.0 +806 13 model.scoring_fct_norm 1.0 +806 13 training.batch_size 2.0 +806 13 training.label_smoothing 0.004836076365295479 +806 14 model.embedding_dim 1.0 +806 14 model.scoring_fct_norm 2.0 +806 14 training.batch_size 2.0 +806 14 training.label_smoothing 0.012353200130558429 +806 15 model.embedding_dim 1.0 +806 15 model.scoring_fct_norm 2.0 +806 15 training.batch_size 0.0 +806 15 training.label_smoothing 0.2138192909319302 +806 16 model.embedding_dim 0.0 +806 16 model.scoring_fct_norm 2.0 +806 16 training.batch_size 0.0 +806 16 training.label_smoothing 0.015082888010183348 +806 17 model.embedding_dim 1.0 +806 17 model.scoring_fct_norm 2.0 +806 17 training.batch_size 2.0 +806 17 training.label_smoothing 0.003356979827508039 +806 18 model.embedding_dim 1.0 +806 18 model.scoring_fct_norm 2.0 +806 18 training.batch_size 1.0 +806 18 training.label_smoothing 0.4437190484467546 +806 19 model.embedding_dim 0.0 +806 19 model.scoring_fct_norm 2.0 +806 19 training.batch_size 0.0 +806 19 training.label_smoothing 0.018359105912598598 +806 20 model.embedding_dim 2.0 +806 20 model.scoring_fct_norm 2.0 +806 20 training.batch_size 2.0 +806 20 training.label_smoothing 0.008774057344502182 +806 21 model.embedding_dim 1.0 +806 21 model.scoring_fct_norm 1.0 +806 21 training.batch_size 2.0 +806 21 training.label_smoothing 0.324359491289459 +806 22 model.embedding_dim 1.0 +806 22 model.scoring_fct_norm 1.0 +806 22 training.batch_size 0.0 +806 22 training.label_smoothing 0.013861134827266391 +806 23 model.embedding_dim 1.0 +806 23 model.scoring_fct_norm 1.0 +806 23 training.batch_size 0.0 +806 23 training.label_smoothing 0.007952588138910891 +806 24 model.embedding_dim 1.0 +806 24 model.scoring_fct_norm 1.0 +806 24 training.batch_size 1.0 +806 24 training.label_smoothing 0.004476397654876786 +806 25 model.embedding_dim 0.0 +806 25 model.scoring_fct_norm 2.0 +806 25 training.batch_size 2.0 +806 25 training.label_smoothing 0.0037446289976032607 +806 26 model.embedding_dim 2.0 +806 26 model.scoring_fct_norm 1.0 +806 26 training.batch_size 1.0 +806 26 training.label_smoothing 0.2769052335227954 +806 27 model.embedding_dim 0.0 +806 27 model.scoring_fct_norm 1.0 +806 27 training.batch_size 0.0 +806 27 training.label_smoothing 0.004512111134112547 +806 28 model.embedding_dim 0.0 +806 28 model.scoring_fct_norm 2.0 +806 28 training.batch_size 1.0 +806 28 training.label_smoothing 0.002776793016983226 +806 29 model.embedding_dim 0.0 +806 29 model.scoring_fct_norm 2.0 +806 29 training.batch_size 0.0 +806 29 training.label_smoothing 0.10457393856235761 +806 30 model.embedding_dim 1.0 +806 30 model.scoring_fct_norm 1.0 +806 30 training.batch_size 1.0 +806 30 training.label_smoothing 0.0517909703050418 +806 31 model.embedding_dim 0.0 +806 31 model.scoring_fct_norm 2.0 +806 31 training.batch_size 2.0 +806 31 training.label_smoothing 0.396582477068748 +806 32 model.embedding_dim 0.0 +806 32 model.scoring_fct_norm 1.0 +806 32 training.batch_size 0.0 +806 32 training.label_smoothing 0.0020638816703531 +806 33 model.embedding_dim 1.0 +806 33 model.scoring_fct_norm 2.0 +806 33 training.batch_size 0.0 +806 33 training.label_smoothing 0.0026725113332722603 +806 34 model.embedding_dim 0.0 +806 34 model.scoring_fct_norm 1.0 +806 34 training.batch_size 0.0 +806 34 training.label_smoothing 0.1168969539552908 +806 35 model.embedding_dim 0.0 +806 35 model.scoring_fct_norm 1.0 +806 35 training.batch_size 1.0 +806 35 training.label_smoothing 0.17198245984817426 +806 36 model.embedding_dim 2.0 +806 36 model.scoring_fct_norm 2.0 +806 36 training.batch_size 1.0 +806 36 training.label_smoothing 0.007708974542917733 +806 37 model.embedding_dim 1.0 +806 37 model.scoring_fct_norm 1.0 +806 37 training.batch_size 0.0 +806 37 training.label_smoothing 0.9927311159275507 +806 38 model.embedding_dim 1.0 +806 38 model.scoring_fct_norm 2.0 +806 38 training.batch_size 2.0 +806 38 training.label_smoothing 0.17181529501165718 +806 39 model.embedding_dim 1.0 +806 39 model.scoring_fct_norm 2.0 +806 39 training.batch_size 1.0 +806 39 training.label_smoothing 0.09454311423183398 +806 40 model.embedding_dim 0.0 +806 40 model.scoring_fct_norm 2.0 +806 40 training.batch_size 1.0 +806 40 training.label_smoothing 0.09636917303215758 +806 41 model.embedding_dim 0.0 +806 41 model.scoring_fct_norm 2.0 +806 41 training.batch_size 2.0 +806 41 training.label_smoothing 0.0012102381174774362 +806 42 model.embedding_dim 2.0 +806 42 model.scoring_fct_norm 1.0 +806 42 training.batch_size 2.0 +806 42 training.label_smoothing 0.49245376596925605 +806 43 model.embedding_dim 1.0 +806 43 model.scoring_fct_norm 2.0 +806 43 training.batch_size 0.0 +806 43 training.label_smoothing 0.26699734070107106 +806 44 model.embedding_dim 0.0 +806 44 model.scoring_fct_norm 1.0 +806 44 training.batch_size 0.0 +806 44 training.label_smoothing 0.00727548897263069 +806 45 model.embedding_dim 2.0 +806 45 model.scoring_fct_norm 2.0 +806 45 training.batch_size 0.0 +806 45 training.label_smoothing 0.002127565657318377 +806 46 model.embedding_dim 2.0 +806 46 model.scoring_fct_norm 2.0 +806 46 training.batch_size 1.0 +806 46 training.label_smoothing 0.022595374882345948 +806 47 model.embedding_dim 0.0 +806 47 model.scoring_fct_norm 1.0 +806 47 training.batch_size 0.0 +806 47 training.label_smoothing 0.022804875859290693 +806 48 model.embedding_dim 0.0 +806 48 model.scoring_fct_norm 2.0 +806 48 training.batch_size 0.0 +806 48 training.label_smoothing 0.07548317778818078 +806 49 model.embedding_dim 0.0 +806 49 model.scoring_fct_norm 1.0 +806 49 training.batch_size 0.0 +806 49 training.label_smoothing 0.0011025171732048618 +806 50 model.embedding_dim 1.0 +806 50 model.scoring_fct_norm 2.0 +806 50 training.batch_size 2.0 +806 50 training.label_smoothing 0.0010841667170571362 +806 51 model.embedding_dim 0.0 +806 51 model.scoring_fct_norm 2.0 +806 51 training.batch_size 1.0 +806 51 training.label_smoothing 0.07676563804824157 +806 52 model.embedding_dim 2.0 +806 52 model.scoring_fct_norm 2.0 +806 52 training.batch_size 0.0 +806 52 training.label_smoothing 0.13096717163333607 +806 53 model.embedding_dim 0.0 +806 53 model.scoring_fct_norm 2.0 +806 53 training.batch_size 1.0 +806 53 training.label_smoothing 0.09599825110733219 +806 54 model.embedding_dim 2.0 +806 54 model.scoring_fct_norm 1.0 +806 54 training.batch_size 0.0 +806 54 training.label_smoothing 0.4166165668916028 +806 55 model.embedding_dim 2.0 +806 55 model.scoring_fct_norm 1.0 +806 55 training.batch_size 2.0 +806 55 training.label_smoothing 0.34573923112545996 +806 56 model.embedding_dim 0.0 +806 56 model.scoring_fct_norm 2.0 +806 56 training.batch_size 1.0 +806 56 training.label_smoothing 0.044484378764761835 +806 57 model.embedding_dim 1.0 +806 57 model.scoring_fct_norm 2.0 +806 57 training.batch_size 2.0 +806 57 training.label_smoothing 0.003654731394442493 +806 58 model.embedding_dim 0.0 +806 58 model.scoring_fct_norm 2.0 +806 58 training.batch_size 1.0 +806 58 training.label_smoothing 0.02159607107750141 +806 59 model.embedding_dim 1.0 +806 59 model.scoring_fct_norm 2.0 +806 59 training.batch_size 0.0 +806 59 training.label_smoothing 0.005091028104911047 +806 60 model.embedding_dim 2.0 +806 60 model.scoring_fct_norm 1.0 +806 60 training.batch_size 1.0 +806 60 training.label_smoothing 0.01213137400956731 +806 61 model.embedding_dim 2.0 +806 61 model.scoring_fct_norm 1.0 +806 61 training.batch_size 1.0 +806 61 training.label_smoothing 0.009507305747344458 +806 62 model.embedding_dim 2.0 +806 62 model.scoring_fct_norm 1.0 +806 62 training.batch_size 0.0 +806 62 training.label_smoothing 0.006847603429538038 +806 63 model.embedding_dim 0.0 +806 63 model.scoring_fct_norm 1.0 +806 63 training.batch_size 2.0 +806 63 training.label_smoothing 0.008815929018437708 +806 64 model.embedding_dim 2.0 +806 64 model.scoring_fct_norm 2.0 +806 64 training.batch_size 0.0 +806 64 training.label_smoothing 0.060369093369169596 +806 65 model.embedding_dim 1.0 +806 65 model.scoring_fct_norm 2.0 +806 65 training.batch_size 0.0 +806 65 training.label_smoothing 0.478052473971391 +806 66 model.embedding_dim 2.0 +806 66 model.scoring_fct_norm 1.0 +806 66 training.batch_size 0.0 +806 66 training.label_smoothing 0.0017854848461619174 +806 67 model.embedding_dim 1.0 +806 67 model.scoring_fct_norm 2.0 +806 67 training.batch_size 1.0 +806 67 training.label_smoothing 0.03326706185106413 +806 68 model.embedding_dim 2.0 +806 68 model.scoring_fct_norm 1.0 +806 68 training.batch_size 1.0 +806 68 training.label_smoothing 0.6651114299980371 +806 69 model.embedding_dim 2.0 +806 69 model.scoring_fct_norm 1.0 +806 69 training.batch_size 2.0 +806 69 training.label_smoothing 0.4428891263757167 +806 70 model.embedding_dim 2.0 +806 70 model.scoring_fct_norm 1.0 +806 70 training.batch_size 2.0 +806 70 training.label_smoothing 0.002003807867845526 +806 71 model.embedding_dim 0.0 +806 71 model.scoring_fct_norm 1.0 +806 71 training.batch_size 1.0 +806 71 training.label_smoothing 0.045615372659527 +806 72 model.embedding_dim 2.0 +806 72 model.scoring_fct_norm 2.0 +806 72 training.batch_size 2.0 +806 72 training.label_smoothing 0.0031462782679736088 +806 73 model.embedding_dim 2.0 +806 73 model.scoring_fct_norm 2.0 +806 73 training.batch_size 2.0 +806 73 training.label_smoothing 0.008122218744054414 +806 74 model.embedding_dim 0.0 +806 74 model.scoring_fct_norm 1.0 +806 74 training.batch_size 0.0 +806 74 training.label_smoothing 0.15665432818197877 +806 75 model.embedding_dim 2.0 +806 75 model.scoring_fct_norm 2.0 +806 75 training.batch_size 1.0 +806 75 training.label_smoothing 0.01764734257784793 +806 76 model.embedding_dim 1.0 +806 76 model.scoring_fct_norm 2.0 +806 76 training.batch_size 1.0 +806 76 training.label_smoothing 0.0031339335558240376 +806 77 model.embedding_dim 1.0 +806 77 model.scoring_fct_norm 2.0 +806 77 training.batch_size 0.0 +806 77 training.label_smoothing 0.8282089459349385 +806 78 model.embedding_dim 0.0 +806 78 model.scoring_fct_norm 1.0 +806 78 training.batch_size 0.0 +806 78 training.label_smoothing 0.005425228917136953 +806 79 model.embedding_dim 1.0 +806 79 model.scoring_fct_norm 2.0 +806 79 training.batch_size 0.0 +806 79 training.label_smoothing 0.008712443328523884 +806 80 model.embedding_dim 1.0 +806 80 model.scoring_fct_norm 1.0 +806 80 training.batch_size 1.0 +806 80 training.label_smoothing 0.004014988431572369 +806 81 model.embedding_dim 2.0 +806 81 model.scoring_fct_norm 2.0 +806 81 training.batch_size 1.0 +806 81 training.label_smoothing 0.3560429296467131 +806 82 model.embedding_dim 0.0 +806 82 model.scoring_fct_norm 1.0 +806 82 training.batch_size 1.0 +806 82 training.label_smoothing 0.6972981152508947 +806 83 model.embedding_dim 0.0 +806 83 model.scoring_fct_norm 2.0 +806 83 training.batch_size 0.0 +806 83 training.label_smoothing 0.019096585639300095 +806 84 model.embedding_dim 1.0 +806 84 model.scoring_fct_norm 2.0 +806 84 training.batch_size 0.0 +806 84 training.label_smoothing 0.061272771273342935 +806 85 model.embedding_dim 1.0 +806 85 model.scoring_fct_norm 2.0 +806 85 training.batch_size 2.0 +806 85 training.label_smoothing 0.01636059342195003 +806 86 model.embedding_dim 2.0 +806 86 model.scoring_fct_norm 1.0 +806 86 training.batch_size 1.0 +806 86 training.label_smoothing 0.029831338785582587 +806 87 model.embedding_dim 1.0 +806 87 model.scoring_fct_norm 2.0 +806 87 training.batch_size 2.0 +806 87 training.label_smoothing 0.051759546253554674 +806 88 model.embedding_dim 2.0 +806 88 model.scoring_fct_norm 1.0 +806 88 training.batch_size 1.0 +806 88 training.label_smoothing 0.39803714341894436 +806 89 model.embedding_dim 1.0 +806 89 model.scoring_fct_norm 1.0 +806 89 training.batch_size 0.0 +806 89 training.label_smoothing 0.06080980167947067 +806 90 model.embedding_dim 2.0 +806 90 model.scoring_fct_norm 2.0 +806 90 training.batch_size 1.0 +806 90 training.label_smoothing 0.0018905594170808594 +806 91 model.embedding_dim 2.0 +806 91 model.scoring_fct_norm 1.0 +806 91 training.batch_size 1.0 +806 91 training.label_smoothing 0.6084733027983489 +806 92 model.embedding_dim 1.0 +806 92 model.scoring_fct_norm 1.0 +806 92 training.batch_size 2.0 +806 92 training.label_smoothing 0.027168455970144674 +806 93 model.embedding_dim 2.0 +806 93 model.scoring_fct_norm 2.0 +806 93 training.batch_size 0.0 +806 93 training.label_smoothing 0.0014343364157731735 +806 94 model.embedding_dim 0.0 +806 94 model.scoring_fct_norm 1.0 +806 94 training.batch_size 0.0 +806 94 training.label_smoothing 0.007316058523473991 +806 95 model.embedding_dim 2.0 +806 95 model.scoring_fct_norm 2.0 +806 95 training.batch_size 1.0 +806 95 training.label_smoothing 0.004035373925093876 +806 96 model.embedding_dim 2.0 +806 96 model.scoring_fct_norm 1.0 +806 96 training.batch_size 0.0 +806 96 training.label_smoothing 0.8285415862531363 +806 97 model.embedding_dim 2.0 +806 97 model.scoring_fct_norm 2.0 +806 97 training.batch_size 0.0 +806 97 training.label_smoothing 0.07502234764096866 +806 98 model.embedding_dim 2.0 +806 98 model.scoring_fct_norm 2.0 +806 98 training.batch_size 1.0 +806 98 training.label_smoothing 0.013756007670908094 +806 99 model.embedding_dim 2.0 +806 99 model.scoring_fct_norm 2.0 +806 99 training.batch_size 0.0 +806 99 training.label_smoothing 0.0065988491250385676 +806 100 model.embedding_dim 1.0 +806 100 model.scoring_fct_norm 2.0 +806 100 training.batch_size 2.0 +806 100 training.label_smoothing 0.06695233909979913 +806 1 dataset """kinships""" +806 1 model """transe""" +806 1 loss """softplus""" +806 1 regularizer """no""" +806 1 optimizer """adadelta""" +806 1 training_loop """lcwa""" +806 1 evaluator """rankbased""" +806 2 dataset """kinships""" +806 2 model """transe""" +806 2 loss """softplus""" +806 2 regularizer """no""" +806 2 optimizer """adadelta""" +806 2 training_loop """lcwa""" +806 2 evaluator """rankbased""" +806 3 dataset """kinships""" +806 3 model """transe""" +806 3 loss """softplus""" +806 3 regularizer """no""" +806 3 optimizer """adadelta""" +806 3 training_loop """lcwa""" +806 3 evaluator """rankbased""" +806 4 dataset """kinships""" +806 4 model """transe""" +806 4 loss """softplus""" +806 4 regularizer """no""" +806 4 optimizer """adadelta""" +806 4 training_loop """lcwa""" +806 4 evaluator """rankbased""" +806 5 dataset """kinships""" +806 5 model """transe""" +806 5 loss """softplus""" +806 5 regularizer """no""" +806 5 optimizer """adadelta""" +806 5 training_loop """lcwa""" +806 5 evaluator """rankbased""" +806 6 dataset """kinships""" +806 6 model """transe""" +806 6 loss """softplus""" +806 6 regularizer """no""" +806 6 optimizer """adadelta""" +806 6 training_loop """lcwa""" +806 6 evaluator """rankbased""" +806 7 dataset """kinships""" +806 7 model """transe""" +806 7 loss """softplus""" +806 7 regularizer """no""" +806 7 optimizer """adadelta""" +806 7 training_loop """lcwa""" +806 7 evaluator """rankbased""" +806 8 dataset """kinships""" +806 8 model """transe""" +806 8 loss """softplus""" +806 8 regularizer """no""" +806 8 optimizer """adadelta""" +806 8 training_loop """lcwa""" +806 8 evaluator """rankbased""" +806 9 dataset """kinships""" +806 9 model """transe""" +806 9 loss """softplus""" +806 9 regularizer """no""" +806 9 optimizer """adadelta""" +806 9 training_loop """lcwa""" +806 9 evaluator """rankbased""" +806 10 dataset """kinships""" +806 10 model """transe""" +806 10 loss """softplus""" +806 10 regularizer """no""" +806 10 optimizer """adadelta""" +806 10 training_loop """lcwa""" +806 10 evaluator """rankbased""" +806 11 dataset """kinships""" +806 11 model """transe""" +806 11 loss """softplus""" +806 11 regularizer """no""" +806 11 optimizer """adadelta""" +806 11 training_loop """lcwa""" +806 11 evaluator """rankbased""" +806 12 dataset """kinships""" +806 12 model """transe""" +806 12 loss """softplus""" +806 12 regularizer """no""" +806 12 optimizer """adadelta""" +806 12 training_loop """lcwa""" +806 12 evaluator """rankbased""" +806 13 dataset """kinships""" +806 13 model """transe""" +806 13 loss """softplus""" +806 13 regularizer """no""" +806 13 optimizer """adadelta""" +806 13 training_loop """lcwa""" +806 13 evaluator """rankbased""" +806 14 dataset """kinships""" +806 14 model """transe""" +806 14 loss """softplus""" +806 14 regularizer """no""" +806 14 optimizer """adadelta""" +806 14 training_loop """lcwa""" +806 14 evaluator """rankbased""" +806 15 dataset """kinships""" +806 15 model """transe""" +806 15 loss """softplus""" +806 15 regularizer """no""" +806 15 optimizer """adadelta""" +806 15 training_loop """lcwa""" +806 15 evaluator """rankbased""" +806 16 dataset """kinships""" +806 16 model """transe""" +806 16 loss """softplus""" +806 16 regularizer """no""" +806 16 optimizer """adadelta""" +806 16 training_loop """lcwa""" +806 16 evaluator """rankbased""" +806 17 dataset """kinships""" +806 17 model """transe""" +806 17 loss """softplus""" +806 17 regularizer """no""" +806 17 optimizer """adadelta""" +806 17 training_loop """lcwa""" +806 17 evaluator """rankbased""" +806 18 dataset """kinships""" +806 18 model """transe""" +806 18 loss """softplus""" +806 18 regularizer """no""" +806 18 optimizer """adadelta""" +806 18 training_loop """lcwa""" +806 18 evaluator """rankbased""" +806 19 dataset """kinships""" +806 19 model """transe""" +806 19 loss """softplus""" +806 19 regularizer """no""" +806 19 optimizer """adadelta""" +806 19 training_loop """lcwa""" +806 19 evaluator """rankbased""" +806 20 dataset """kinships""" +806 20 model """transe""" +806 20 loss """softplus""" +806 20 regularizer """no""" +806 20 optimizer """adadelta""" +806 20 training_loop """lcwa""" +806 20 evaluator """rankbased""" +806 21 dataset """kinships""" +806 21 model """transe""" +806 21 loss """softplus""" +806 21 regularizer """no""" +806 21 optimizer """adadelta""" +806 21 training_loop """lcwa""" +806 21 evaluator """rankbased""" +806 22 dataset """kinships""" +806 22 model """transe""" +806 22 loss """softplus""" +806 22 regularizer """no""" +806 22 optimizer """adadelta""" +806 22 training_loop """lcwa""" +806 22 evaluator """rankbased""" +806 23 dataset """kinships""" +806 23 model """transe""" +806 23 loss """softplus""" +806 23 regularizer """no""" +806 23 optimizer """adadelta""" +806 23 training_loop """lcwa""" +806 23 evaluator """rankbased""" +806 24 dataset """kinships""" +806 24 model """transe""" +806 24 loss """softplus""" +806 24 regularizer """no""" +806 24 optimizer """adadelta""" +806 24 training_loop """lcwa""" +806 24 evaluator """rankbased""" +806 25 dataset """kinships""" +806 25 model """transe""" +806 25 loss """softplus""" +806 25 regularizer """no""" +806 25 optimizer """adadelta""" +806 25 training_loop """lcwa""" +806 25 evaluator """rankbased""" +806 26 dataset """kinships""" +806 26 model """transe""" +806 26 loss """softplus""" +806 26 regularizer """no""" +806 26 optimizer """adadelta""" +806 26 training_loop """lcwa""" +806 26 evaluator """rankbased""" +806 27 dataset """kinships""" +806 27 model """transe""" +806 27 loss """softplus""" +806 27 regularizer """no""" +806 27 optimizer """adadelta""" +806 27 training_loop """lcwa""" +806 27 evaluator """rankbased""" +806 28 dataset """kinships""" +806 28 model """transe""" +806 28 loss """softplus""" +806 28 regularizer """no""" +806 28 optimizer """adadelta""" +806 28 training_loop """lcwa""" +806 28 evaluator """rankbased""" +806 29 dataset """kinships""" +806 29 model """transe""" +806 29 loss """softplus""" +806 29 regularizer """no""" +806 29 optimizer """adadelta""" +806 29 training_loop """lcwa""" +806 29 evaluator """rankbased""" +806 30 dataset """kinships""" +806 30 model """transe""" +806 30 loss """softplus""" +806 30 regularizer """no""" +806 30 optimizer """adadelta""" +806 30 training_loop """lcwa""" +806 30 evaluator """rankbased""" +806 31 dataset """kinships""" +806 31 model """transe""" +806 31 loss """softplus""" +806 31 regularizer """no""" +806 31 optimizer """adadelta""" +806 31 training_loop """lcwa""" +806 31 evaluator """rankbased""" +806 32 dataset """kinships""" +806 32 model """transe""" +806 32 loss """softplus""" +806 32 regularizer """no""" +806 32 optimizer """adadelta""" +806 32 training_loop """lcwa""" +806 32 evaluator """rankbased""" +806 33 dataset """kinships""" +806 33 model """transe""" +806 33 loss """softplus""" +806 33 regularizer """no""" +806 33 optimizer """adadelta""" +806 33 training_loop """lcwa""" +806 33 evaluator """rankbased""" +806 34 dataset """kinships""" +806 34 model """transe""" +806 34 loss """softplus""" +806 34 regularizer """no""" +806 34 optimizer """adadelta""" +806 34 training_loop """lcwa""" +806 34 evaluator """rankbased""" +806 35 dataset """kinships""" +806 35 model """transe""" +806 35 loss """softplus""" +806 35 regularizer """no""" +806 35 optimizer """adadelta""" +806 35 training_loop """lcwa""" +806 35 evaluator """rankbased""" +806 36 dataset """kinships""" +806 36 model """transe""" +806 36 loss """softplus""" +806 36 regularizer """no""" +806 36 optimizer """adadelta""" +806 36 training_loop """lcwa""" +806 36 evaluator """rankbased""" +806 37 dataset """kinships""" +806 37 model """transe""" +806 37 loss """softplus""" +806 37 regularizer """no""" +806 37 optimizer """adadelta""" +806 37 training_loop """lcwa""" +806 37 evaluator """rankbased""" +806 38 dataset """kinships""" +806 38 model """transe""" +806 38 loss """softplus""" +806 38 regularizer """no""" +806 38 optimizer """adadelta""" +806 38 training_loop """lcwa""" +806 38 evaluator """rankbased""" +806 39 dataset """kinships""" +806 39 model """transe""" +806 39 loss """softplus""" +806 39 regularizer """no""" +806 39 optimizer """adadelta""" +806 39 training_loop """lcwa""" +806 39 evaluator """rankbased""" +806 40 dataset """kinships""" +806 40 model """transe""" +806 40 loss """softplus""" +806 40 regularizer """no""" +806 40 optimizer """adadelta""" +806 40 training_loop """lcwa""" +806 40 evaluator """rankbased""" +806 41 dataset """kinships""" +806 41 model """transe""" +806 41 loss """softplus""" +806 41 regularizer """no""" +806 41 optimizer """adadelta""" +806 41 training_loop """lcwa""" +806 41 evaluator """rankbased""" +806 42 dataset """kinships""" +806 42 model """transe""" +806 42 loss """softplus""" +806 42 regularizer """no""" +806 42 optimizer """adadelta""" +806 42 training_loop """lcwa""" +806 42 evaluator """rankbased""" +806 43 dataset """kinships""" +806 43 model """transe""" +806 43 loss """softplus""" +806 43 regularizer """no""" +806 43 optimizer """adadelta""" +806 43 training_loop """lcwa""" +806 43 evaluator """rankbased""" +806 44 dataset """kinships""" +806 44 model """transe""" +806 44 loss """softplus""" +806 44 regularizer """no""" +806 44 optimizer """adadelta""" +806 44 training_loop """lcwa""" +806 44 evaluator """rankbased""" +806 45 dataset """kinships""" +806 45 model """transe""" +806 45 loss """softplus""" +806 45 regularizer """no""" +806 45 optimizer """adadelta""" +806 45 training_loop """lcwa""" +806 45 evaluator """rankbased""" +806 46 dataset """kinships""" +806 46 model """transe""" +806 46 loss """softplus""" +806 46 regularizer """no""" +806 46 optimizer """adadelta""" +806 46 training_loop """lcwa""" +806 46 evaluator """rankbased""" +806 47 dataset """kinships""" +806 47 model """transe""" +806 47 loss """softplus""" +806 47 regularizer """no""" +806 47 optimizer """adadelta""" +806 47 training_loop """lcwa""" +806 47 evaluator """rankbased""" +806 48 dataset """kinships""" +806 48 model """transe""" +806 48 loss """softplus""" +806 48 regularizer """no""" +806 48 optimizer """adadelta""" +806 48 training_loop """lcwa""" +806 48 evaluator """rankbased""" +806 49 dataset """kinships""" +806 49 model """transe""" +806 49 loss """softplus""" +806 49 regularizer """no""" +806 49 optimizer """adadelta""" +806 49 training_loop """lcwa""" +806 49 evaluator """rankbased""" +806 50 dataset """kinships""" +806 50 model """transe""" +806 50 loss """softplus""" +806 50 regularizer """no""" +806 50 optimizer """adadelta""" +806 50 training_loop """lcwa""" +806 50 evaluator """rankbased""" +806 51 dataset """kinships""" +806 51 model """transe""" +806 51 loss """softplus""" +806 51 regularizer """no""" +806 51 optimizer """adadelta""" +806 51 training_loop """lcwa""" +806 51 evaluator """rankbased""" +806 52 dataset """kinships""" +806 52 model """transe""" +806 52 loss """softplus""" +806 52 regularizer """no""" +806 52 optimizer """adadelta""" +806 52 training_loop """lcwa""" +806 52 evaluator """rankbased""" +806 53 dataset """kinships""" +806 53 model """transe""" +806 53 loss """softplus""" +806 53 regularizer """no""" +806 53 optimizer """adadelta""" +806 53 training_loop """lcwa""" +806 53 evaluator """rankbased""" +806 54 dataset """kinships""" +806 54 model """transe""" +806 54 loss """softplus""" +806 54 regularizer """no""" +806 54 optimizer """adadelta""" +806 54 training_loop """lcwa""" +806 54 evaluator """rankbased""" +806 55 dataset """kinships""" +806 55 model """transe""" +806 55 loss """softplus""" +806 55 regularizer """no""" +806 55 optimizer """adadelta""" +806 55 training_loop """lcwa""" +806 55 evaluator """rankbased""" +806 56 dataset """kinships""" +806 56 model """transe""" +806 56 loss """softplus""" +806 56 regularizer """no""" +806 56 optimizer """adadelta""" +806 56 training_loop """lcwa""" +806 56 evaluator """rankbased""" +806 57 dataset """kinships""" +806 57 model """transe""" +806 57 loss """softplus""" +806 57 regularizer """no""" +806 57 optimizer """adadelta""" +806 57 training_loop """lcwa""" +806 57 evaluator """rankbased""" +806 58 dataset """kinships""" +806 58 model """transe""" +806 58 loss """softplus""" +806 58 regularizer """no""" +806 58 optimizer """adadelta""" +806 58 training_loop """lcwa""" +806 58 evaluator """rankbased""" +806 59 dataset """kinships""" +806 59 model """transe""" +806 59 loss """softplus""" +806 59 regularizer """no""" +806 59 optimizer """adadelta""" +806 59 training_loop """lcwa""" +806 59 evaluator """rankbased""" +806 60 dataset """kinships""" +806 60 model """transe""" +806 60 loss """softplus""" +806 60 regularizer """no""" +806 60 optimizer """adadelta""" +806 60 training_loop """lcwa""" +806 60 evaluator """rankbased""" +806 61 dataset """kinships""" +806 61 model """transe""" +806 61 loss """softplus""" +806 61 regularizer """no""" +806 61 optimizer """adadelta""" +806 61 training_loop """lcwa""" +806 61 evaluator """rankbased""" +806 62 dataset """kinships""" +806 62 model """transe""" +806 62 loss """softplus""" +806 62 regularizer """no""" +806 62 optimizer """adadelta""" +806 62 training_loop """lcwa""" +806 62 evaluator """rankbased""" +806 63 dataset """kinships""" +806 63 model """transe""" +806 63 loss """softplus""" +806 63 regularizer """no""" +806 63 optimizer """adadelta""" +806 63 training_loop """lcwa""" +806 63 evaluator """rankbased""" +806 64 dataset """kinships""" +806 64 model """transe""" +806 64 loss """softplus""" +806 64 regularizer """no""" +806 64 optimizer """adadelta""" +806 64 training_loop """lcwa""" +806 64 evaluator """rankbased""" +806 65 dataset """kinships""" +806 65 model """transe""" +806 65 loss """softplus""" +806 65 regularizer """no""" +806 65 optimizer """adadelta""" +806 65 training_loop """lcwa""" +806 65 evaluator """rankbased""" +806 66 dataset """kinships""" +806 66 model """transe""" +806 66 loss """softplus""" +806 66 regularizer """no""" +806 66 optimizer """adadelta""" +806 66 training_loop """lcwa""" +806 66 evaluator """rankbased""" +806 67 dataset """kinships""" +806 67 model """transe""" +806 67 loss """softplus""" +806 67 regularizer """no""" +806 67 optimizer """adadelta""" +806 67 training_loop """lcwa""" +806 67 evaluator """rankbased""" +806 68 dataset """kinships""" +806 68 model """transe""" +806 68 loss """softplus""" +806 68 regularizer """no""" +806 68 optimizer """adadelta""" +806 68 training_loop """lcwa""" +806 68 evaluator """rankbased""" +806 69 dataset """kinships""" +806 69 model """transe""" +806 69 loss """softplus""" +806 69 regularizer """no""" +806 69 optimizer """adadelta""" +806 69 training_loop """lcwa""" +806 69 evaluator """rankbased""" +806 70 dataset """kinships""" +806 70 model """transe""" +806 70 loss """softplus""" +806 70 regularizer """no""" +806 70 optimizer """adadelta""" +806 70 training_loop """lcwa""" +806 70 evaluator """rankbased""" +806 71 dataset """kinships""" +806 71 model """transe""" +806 71 loss """softplus""" +806 71 regularizer """no""" +806 71 optimizer """adadelta""" +806 71 training_loop """lcwa""" +806 71 evaluator """rankbased""" +806 72 dataset """kinships""" +806 72 model """transe""" +806 72 loss """softplus""" +806 72 regularizer """no""" +806 72 optimizer """adadelta""" +806 72 training_loop """lcwa""" +806 72 evaluator """rankbased""" +806 73 dataset """kinships""" +806 73 model """transe""" +806 73 loss """softplus""" +806 73 regularizer """no""" +806 73 optimizer """adadelta""" +806 73 training_loop """lcwa""" +806 73 evaluator """rankbased""" +806 74 dataset """kinships""" +806 74 model """transe""" +806 74 loss """softplus""" +806 74 regularizer """no""" +806 74 optimizer """adadelta""" +806 74 training_loop """lcwa""" +806 74 evaluator """rankbased""" +806 75 dataset """kinships""" +806 75 model """transe""" +806 75 loss """softplus""" +806 75 regularizer """no""" +806 75 optimizer """adadelta""" +806 75 training_loop """lcwa""" +806 75 evaluator """rankbased""" +806 76 dataset """kinships""" +806 76 model """transe""" +806 76 loss """softplus""" +806 76 regularizer """no""" +806 76 optimizer """adadelta""" +806 76 training_loop """lcwa""" +806 76 evaluator """rankbased""" +806 77 dataset """kinships""" +806 77 model """transe""" +806 77 loss """softplus""" +806 77 regularizer """no""" +806 77 optimizer """adadelta""" +806 77 training_loop """lcwa""" +806 77 evaluator """rankbased""" +806 78 dataset """kinships""" +806 78 model """transe""" +806 78 loss """softplus""" +806 78 regularizer """no""" +806 78 optimizer """adadelta""" +806 78 training_loop """lcwa""" +806 78 evaluator """rankbased""" +806 79 dataset """kinships""" +806 79 model """transe""" +806 79 loss """softplus""" +806 79 regularizer """no""" +806 79 optimizer """adadelta""" +806 79 training_loop """lcwa""" +806 79 evaluator """rankbased""" +806 80 dataset """kinships""" +806 80 model """transe""" +806 80 loss """softplus""" +806 80 regularizer """no""" +806 80 optimizer """adadelta""" +806 80 training_loop """lcwa""" +806 80 evaluator """rankbased""" +806 81 dataset """kinships""" +806 81 model """transe""" +806 81 loss """softplus""" +806 81 regularizer """no""" +806 81 optimizer """adadelta""" +806 81 training_loop """lcwa""" +806 81 evaluator """rankbased""" +806 82 dataset """kinships""" +806 82 model """transe""" +806 82 loss """softplus""" +806 82 regularizer """no""" +806 82 optimizer """adadelta""" +806 82 training_loop """lcwa""" +806 82 evaluator """rankbased""" +806 83 dataset """kinships""" +806 83 model """transe""" +806 83 loss """softplus""" +806 83 regularizer """no""" +806 83 optimizer """adadelta""" +806 83 training_loop """lcwa""" +806 83 evaluator """rankbased""" +806 84 dataset """kinships""" +806 84 model """transe""" +806 84 loss """softplus""" +806 84 regularizer """no""" +806 84 optimizer """adadelta""" +806 84 training_loop """lcwa""" +806 84 evaluator """rankbased""" +806 85 dataset """kinships""" +806 85 model """transe""" +806 85 loss """softplus""" +806 85 regularizer """no""" +806 85 optimizer """adadelta""" +806 85 training_loop """lcwa""" +806 85 evaluator """rankbased""" +806 86 dataset """kinships""" +806 86 model """transe""" +806 86 loss """softplus""" +806 86 regularizer """no""" +806 86 optimizer """adadelta""" +806 86 training_loop """lcwa""" +806 86 evaluator """rankbased""" +806 87 dataset """kinships""" +806 87 model """transe""" +806 87 loss """softplus""" +806 87 regularizer """no""" +806 87 optimizer """adadelta""" +806 87 training_loop """lcwa""" +806 87 evaluator """rankbased""" +806 88 dataset """kinships""" +806 88 model """transe""" +806 88 loss """softplus""" +806 88 regularizer """no""" +806 88 optimizer """adadelta""" +806 88 training_loop """lcwa""" +806 88 evaluator """rankbased""" +806 89 dataset """kinships""" +806 89 model """transe""" +806 89 loss """softplus""" +806 89 regularizer """no""" +806 89 optimizer """adadelta""" +806 89 training_loop """lcwa""" +806 89 evaluator """rankbased""" +806 90 dataset """kinships""" +806 90 model """transe""" +806 90 loss """softplus""" +806 90 regularizer """no""" +806 90 optimizer """adadelta""" +806 90 training_loop """lcwa""" +806 90 evaluator """rankbased""" +806 91 dataset """kinships""" +806 91 model """transe""" +806 91 loss """softplus""" +806 91 regularizer """no""" +806 91 optimizer """adadelta""" +806 91 training_loop """lcwa""" +806 91 evaluator """rankbased""" +806 92 dataset """kinships""" +806 92 model """transe""" +806 92 loss """softplus""" +806 92 regularizer """no""" +806 92 optimizer """adadelta""" +806 92 training_loop """lcwa""" +806 92 evaluator """rankbased""" +806 93 dataset """kinships""" +806 93 model """transe""" +806 93 loss """softplus""" +806 93 regularizer """no""" +806 93 optimizer """adadelta""" +806 93 training_loop """lcwa""" +806 93 evaluator """rankbased""" +806 94 dataset """kinships""" +806 94 model """transe""" +806 94 loss """softplus""" +806 94 regularizer """no""" +806 94 optimizer """adadelta""" +806 94 training_loop """lcwa""" +806 94 evaluator """rankbased""" +806 95 dataset """kinships""" +806 95 model """transe""" +806 95 loss """softplus""" +806 95 regularizer """no""" +806 95 optimizer """adadelta""" +806 95 training_loop """lcwa""" +806 95 evaluator """rankbased""" +806 96 dataset """kinships""" +806 96 model """transe""" +806 96 loss """softplus""" +806 96 regularizer """no""" +806 96 optimizer """adadelta""" +806 96 training_loop """lcwa""" +806 96 evaluator """rankbased""" +806 97 dataset """kinships""" +806 97 model """transe""" +806 97 loss """softplus""" +806 97 regularizer """no""" +806 97 optimizer """adadelta""" +806 97 training_loop """lcwa""" +806 97 evaluator """rankbased""" +806 98 dataset """kinships""" +806 98 model """transe""" +806 98 loss """softplus""" +806 98 regularizer """no""" +806 98 optimizer """adadelta""" +806 98 training_loop """lcwa""" +806 98 evaluator """rankbased""" +806 99 dataset """kinships""" +806 99 model """transe""" +806 99 loss """softplus""" +806 99 regularizer """no""" +806 99 optimizer """adadelta""" +806 99 training_loop """lcwa""" +806 99 evaluator """rankbased""" +806 100 dataset """kinships""" +806 100 model """transe""" +806 100 loss """softplus""" +806 100 regularizer """no""" +806 100 optimizer """adadelta""" +806 100 training_loop """lcwa""" +806 100 evaluator """rankbased""" +807 1 model.embedding_dim 1.0 +807 1 model.scoring_fct_norm 2.0 +807 1 training.batch_size 1.0 +807 1 training.label_smoothing 0.6320494711475472 +807 2 model.embedding_dim 1.0 +807 2 model.scoring_fct_norm 2.0 +807 2 training.batch_size 1.0 +807 2 training.label_smoothing 0.15711111985147055 +807 3 model.embedding_dim 1.0 +807 3 model.scoring_fct_norm 2.0 +807 3 training.batch_size 2.0 +807 3 training.label_smoothing 0.006695326480865827 +807 4 model.embedding_dim 2.0 +807 4 model.scoring_fct_norm 1.0 +807 4 training.batch_size 0.0 +807 4 training.label_smoothing 0.005921186504172924 +807 5 model.embedding_dim 1.0 +807 5 model.scoring_fct_norm 1.0 +807 5 training.batch_size 2.0 +807 5 training.label_smoothing 0.012927514750707391 +807 6 model.embedding_dim 0.0 +807 6 model.scoring_fct_norm 2.0 +807 6 training.batch_size 2.0 +807 6 training.label_smoothing 0.7355314607886962 +807 7 model.embedding_dim 1.0 +807 7 model.scoring_fct_norm 1.0 +807 7 training.batch_size 2.0 +807 7 training.label_smoothing 0.012076534508829244 +807 8 model.embedding_dim 0.0 +807 8 model.scoring_fct_norm 1.0 +807 8 training.batch_size 0.0 +807 8 training.label_smoothing 0.0012598512582406188 +807 9 model.embedding_dim 1.0 +807 9 model.scoring_fct_norm 1.0 +807 9 training.batch_size 1.0 +807 9 training.label_smoothing 0.011704373948591558 +807 10 model.embedding_dim 1.0 +807 10 model.scoring_fct_norm 2.0 +807 10 training.batch_size 1.0 +807 10 training.label_smoothing 0.55217373038735 +807 11 model.embedding_dim 0.0 +807 11 model.scoring_fct_norm 1.0 +807 11 training.batch_size 0.0 +807 11 training.label_smoothing 0.19862722334947872 +807 12 model.embedding_dim 2.0 +807 12 model.scoring_fct_norm 2.0 +807 12 training.batch_size 2.0 +807 12 training.label_smoothing 0.11606363857991842 +807 13 model.embedding_dim 1.0 +807 13 model.scoring_fct_norm 2.0 +807 13 training.batch_size 1.0 +807 13 training.label_smoothing 0.0027764012697409535 +807 14 model.embedding_dim 2.0 +807 14 model.scoring_fct_norm 1.0 +807 14 training.batch_size 1.0 +807 14 training.label_smoothing 0.003136970984958599 +807 15 model.embedding_dim 2.0 +807 15 model.scoring_fct_norm 2.0 +807 15 training.batch_size 1.0 +807 15 training.label_smoothing 0.07648769901771885 +807 16 model.embedding_dim 2.0 +807 16 model.scoring_fct_norm 2.0 +807 16 training.batch_size 0.0 +807 16 training.label_smoothing 0.007523079498131044 +807 17 model.embedding_dim 1.0 +807 17 model.scoring_fct_norm 2.0 +807 17 training.batch_size 0.0 +807 17 training.label_smoothing 0.0019011682326444117 +807 18 model.embedding_dim 1.0 +807 18 model.scoring_fct_norm 2.0 +807 18 training.batch_size 1.0 +807 18 training.label_smoothing 0.26989061963654953 +807 19 model.embedding_dim 1.0 +807 19 model.scoring_fct_norm 1.0 +807 19 training.batch_size 1.0 +807 19 training.label_smoothing 0.11932935330735552 +807 20 model.embedding_dim 2.0 +807 20 model.scoring_fct_norm 2.0 +807 20 training.batch_size 2.0 +807 20 training.label_smoothing 0.0037679300357592238 +807 21 model.embedding_dim 1.0 +807 21 model.scoring_fct_norm 2.0 +807 21 training.batch_size 0.0 +807 21 training.label_smoothing 0.005970270057542157 +807 22 model.embedding_dim 1.0 +807 22 model.scoring_fct_norm 1.0 +807 22 training.batch_size 2.0 +807 22 training.label_smoothing 0.04657440099452287 +807 23 model.embedding_dim 2.0 +807 23 model.scoring_fct_norm 1.0 +807 23 training.batch_size 0.0 +807 23 training.label_smoothing 0.8439129709758297 +807 24 model.embedding_dim 2.0 +807 24 model.scoring_fct_norm 1.0 +807 24 training.batch_size 2.0 +807 24 training.label_smoothing 0.1309053945754871 +807 25 model.embedding_dim 0.0 +807 25 model.scoring_fct_norm 2.0 +807 25 training.batch_size 1.0 +807 25 training.label_smoothing 0.0013546751184131552 +807 26 model.embedding_dim 0.0 +807 26 model.scoring_fct_norm 2.0 +807 26 training.batch_size 0.0 +807 26 training.label_smoothing 0.5820021557537206 +807 27 model.embedding_dim 2.0 +807 27 model.scoring_fct_norm 1.0 +807 27 training.batch_size 2.0 +807 27 training.label_smoothing 0.921971702017672 +807 28 model.embedding_dim 1.0 +807 28 model.scoring_fct_norm 2.0 +807 28 training.batch_size 2.0 +807 28 training.label_smoothing 0.06984163565677802 +807 29 model.embedding_dim 1.0 +807 29 model.scoring_fct_norm 2.0 +807 29 training.batch_size 2.0 +807 29 training.label_smoothing 0.0016593909015068008 +807 30 model.embedding_dim 2.0 +807 30 model.scoring_fct_norm 2.0 +807 30 training.batch_size 2.0 +807 30 training.label_smoothing 0.036407366200883 +807 31 model.embedding_dim 0.0 +807 31 model.scoring_fct_norm 1.0 +807 31 training.batch_size 2.0 +807 31 training.label_smoothing 0.011605559723669457 +807 32 model.embedding_dim 0.0 +807 32 model.scoring_fct_norm 1.0 +807 32 training.batch_size 0.0 +807 32 training.label_smoothing 0.0029330937277885407 +807 33 model.embedding_dim 2.0 +807 33 model.scoring_fct_norm 2.0 +807 33 training.batch_size 1.0 +807 33 training.label_smoothing 0.008539394132378603 +807 34 model.embedding_dim 2.0 +807 34 model.scoring_fct_norm 2.0 +807 34 training.batch_size 0.0 +807 34 training.label_smoothing 0.019608507053035965 +807 35 model.embedding_dim 0.0 +807 35 model.scoring_fct_norm 1.0 +807 35 training.batch_size 2.0 +807 35 training.label_smoothing 0.05008856659421142 +807 36 model.embedding_dim 1.0 +807 36 model.scoring_fct_norm 1.0 +807 36 training.batch_size 0.0 +807 36 training.label_smoothing 0.018056843665512013 +807 37 model.embedding_dim 2.0 +807 37 model.scoring_fct_norm 1.0 +807 37 training.batch_size 0.0 +807 37 training.label_smoothing 0.002930181960180321 +807 38 model.embedding_dim 0.0 +807 38 model.scoring_fct_norm 2.0 +807 38 training.batch_size 2.0 +807 38 training.label_smoothing 0.00872814966432181 +807 39 model.embedding_dim 0.0 +807 39 model.scoring_fct_norm 1.0 +807 39 training.batch_size 2.0 +807 39 training.label_smoothing 0.016169556733968877 +807 40 model.embedding_dim 0.0 +807 40 model.scoring_fct_norm 2.0 +807 40 training.batch_size 1.0 +807 40 training.label_smoothing 0.04236431856229329 +807 41 model.embedding_dim 2.0 +807 41 model.scoring_fct_norm 1.0 +807 41 training.batch_size 0.0 +807 41 training.label_smoothing 0.006319799327367501 +807 42 model.embedding_dim 0.0 +807 42 model.scoring_fct_norm 1.0 +807 42 training.batch_size 2.0 +807 42 training.label_smoothing 0.4390045460057428 +807 43 model.embedding_dim 1.0 +807 43 model.scoring_fct_norm 1.0 +807 43 training.batch_size 0.0 +807 43 training.label_smoothing 0.23741091367233075 +807 44 model.embedding_dim 0.0 +807 44 model.scoring_fct_norm 1.0 +807 44 training.batch_size 2.0 +807 44 training.label_smoothing 0.01737028296660527 +807 45 model.embedding_dim 0.0 +807 45 model.scoring_fct_norm 1.0 +807 45 training.batch_size 2.0 +807 45 training.label_smoothing 0.16149613768291904 +807 46 model.embedding_dim 0.0 +807 46 model.scoring_fct_norm 2.0 +807 46 training.batch_size 2.0 +807 46 training.label_smoothing 0.0012655580721542055 +807 47 model.embedding_dim 0.0 +807 47 model.scoring_fct_norm 2.0 +807 47 training.batch_size 1.0 +807 47 training.label_smoothing 0.8043691274657516 +807 48 model.embedding_dim 1.0 +807 48 model.scoring_fct_norm 2.0 +807 48 training.batch_size 0.0 +807 48 training.label_smoothing 0.732828626678578 +807 49 model.embedding_dim 2.0 +807 49 model.scoring_fct_norm 2.0 +807 49 training.batch_size 2.0 +807 49 training.label_smoothing 0.34545498049861234 +807 50 model.embedding_dim 2.0 +807 50 model.scoring_fct_norm 2.0 +807 50 training.batch_size 2.0 +807 50 training.label_smoothing 0.0018129538726342825 +807 51 model.embedding_dim 2.0 +807 51 model.scoring_fct_norm 1.0 +807 51 training.batch_size 2.0 +807 51 training.label_smoothing 0.023455352214187403 +807 52 model.embedding_dim 1.0 +807 52 model.scoring_fct_norm 1.0 +807 52 training.batch_size 0.0 +807 52 training.label_smoothing 0.1664218252378843 +807 53 model.embedding_dim 0.0 +807 53 model.scoring_fct_norm 1.0 +807 53 training.batch_size 0.0 +807 53 training.label_smoothing 0.01999062267385742 +807 54 model.embedding_dim 2.0 +807 54 model.scoring_fct_norm 1.0 +807 54 training.batch_size 2.0 +807 54 training.label_smoothing 0.48316267698434406 +807 55 model.embedding_dim 2.0 +807 55 model.scoring_fct_norm 2.0 +807 55 training.batch_size 0.0 +807 55 training.label_smoothing 0.008682764124226654 +807 56 model.embedding_dim 2.0 +807 56 model.scoring_fct_norm 2.0 +807 56 training.batch_size 2.0 +807 56 training.label_smoothing 0.011982478009489534 +807 57 model.embedding_dim 2.0 +807 57 model.scoring_fct_norm 2.0 +807 57 training.batch_size 0.0 +807 57 training.label_smoothing 0.0036783168797804573 +807 58 model.embedding_dim 0.0 +807 58 model.scoring_fct_norm 2.0 +807 58 training.batch_size 0.0 +807 58 training.label_smoothing 0.03126846493399755 +807 59 model.embedding_dim 2.0 +807 59 model.scoring_fct_norm 1.0 +807 59 training.batch_size 2.0 +807 59 training.label_smoothing 0.012093134896743256 +807 60 model.embedding_dim 0.0 +807 60 model.scoring_fct_norm 2.0 +807 60 training.batch_size 2.0 +807 60 training.label_smoothing 0.062240508522016555 +807 61 model.embedding_dim 1.0 +807 61 model.scoring_fct_norm 1.0 +807 61 training.batch_size 1.0 +807 61 training.label_smoothing 0.009547980623958223 +807 62 model.embedding_dim 1.0 +807 62 model.scoring_fct_norm 1.0 +807 62 training.batch_size 0.0 +807 62 training.label_smoothing 0.10789977309368458 +807 63 model.embedding_dim 0.0 +807 63 model.scoring_fct_norm 1.0 +807 63 training.batch_size 1.0 +807 63 training.label_smoothing 0.00290663977500145 +807 64 model.embedding_dim 0.0 +807 64 model.scoring_fct_norm 2.0 +807 64 training.batch_size 1.0 +807 64 training.label_smoothing 0.00367894336044953 +807 65 model.embedding_dim 1.0 +807 65 model.scoring_fct_norm 1.0 +807 65 training.batch_size 0.0 +807 65 training.label_smoothing 0.005530815489490969 +807 66 model.embedding_dim 2.0 +807 66 model.scoring_fct_norm 1.0 +807 66 training.batch_size 0.0 +807 66 training.label_smoothing 0.16677868304255639 +807 67 model.embedding_dim 0.0 +807 67 model.scoring_fct_norm 2.0 +807 67 training.batch_size 2.0 +807 67 training.label_smoothing 0.22689527089230263 +807 68 model.embedding_dim 2.0 +807 68 model.scoring_fct_norm 1.0 +807 68 training.batch_size 0.0 +807 68 training.label_smoothing 0.0013896401066548082 +807 69 model.embedding_dim 2.0 +807 69 model.scoring_fct_norm 1.0 +807 69 training.batch_size 2.0 +807 69 training.label_smoothing 0.04600883669010244 +807 70 model.embedding_dim 2.0 +807 70 model.scoring_fct_norm 2.0 +807 70 training.batch_size 0.0 +807 70 training.label_smoothing 0.013114319159462987 +807 71 model.embedding_dim 2.0 +807 71 model.scoring_fct_norm 1.0 +807 71 training.batch_size 2.0 +807 71 training.label_smoothing 0.03897042421835867 +807 72 model.embedding_dim 2.0 +807 72 model.scoring_fct_norm 1.0 +807 72 training.batch_size 0.0 +807 72 training.label_smoothing 0.6857463093177821 +807 73 model.embedding_dim 1.0 +807 73 model.scoring_fct_norm 1.0 +807 73 training.batch_size 1.0 +807 73 training.label_smoothing 0.003099155370766093 +807 74 model.embedding_dim 1.0 +807 74 model.scoring_fct_norm 2.0 +807 74 training.batch_size 1.0 +807 74 training.label_smoothing 0.0021939155078572444 +807 75 model.embedding_dim 1.0 +807 75 model.scoring_fct_norm 2.0 +807 75 training.batch_size 1.0 +807 75 training.label_smoothing 0.0022949115216752297 +807 76 model.embedding_dim 0.0 +807 76 model.scoring_fct_norm 1.0 +807 76 training.batch_size 1.0 +807 76 training.label_smoothing 0.0012333464635637733 +807 77 model.embedding_dim 2.0 +807 77 model.scoring_fct_norm 2.0 +807 77 training.batch_size 2.0 +807 77 training.label_smoothing 0.41142706676423846 +807 78 model.embedding_dim 2.0 +807 78 model.scoring_fct_norm 2.0 +807 78 training.batch_size 2.0 +807 78 training.label_smoothing 0.2676548154342076 +807 79 model.embedding_dim 1.0 +807 79 model.scoring_fct_norm 1.0 +807 79 training.batch_size 1.0 +807 79 training.label_smoothing 0.5498447847367584 +807 80 model.embedding_dim 1.0 +807 80 model.scoring_fct_norm 2.0 +807 80 training.batch_size 2.0 +807 80 training.label_smoothing 0.15428599465485904 +807 81 model.embedding_dim 0.0 +807 81 model.scoring_fct_norm 1.0 +807 81 training.batch_size 0.0 +807 81 training.label_smoothing 0.7403838364422183 +807 82 model.embedding_dim 2.0 +807 82 model.scoring_fct_norm 1.0 +807 82 training.batch_size 1.0 +807 82 training.label_smoothing 0.006289906222369856 +807 83 model.embedding_dim 2.0 +807 83 model.scoring_fct_norm 2.0 +807 83 training.batch_size 0.0 +807 83 training.label_smoothing 0.048143610462252344 +807 84 model.embedding_dim 2.0 +807 84 model.scoring_fct_norm 1.0 +807 84 training.batch_size 0.0 +807 84 training.label_smoothing 0.03657377820279506 +807 85 model.embedding_dim 1.0 +807 85 model.scoring_fct_norm 1.0 +807 85 training.batch_size 2.0 +807 85 training.label_smoothing 0.1505734093812547 +807 86 model.embedding_dim 0.0 +807 86 model.scoring_fct_norm 2.0 +807 86 training.batch_size 2.0 +807 86 training.label_smoothing 0.008320198273876986 +807 87 model.embedding_dim 0.0 +807 87 model.scoring_fct_norm 1.0 +807 87 training.batch_size 0.0 +807 87 training.label_smoothing 0.002187507731846494 +807 88 model.embedding_dim 2.0 +807 88 model.scoring_fct_norm 2.0 +807 88 training.batch_size 2.0 +807 88 training.label_smoothing 0.006759922973437791 +807 89 model.embedding_dim 0.0 +807 89 model.scoring_fct_norm 2.0 +807 89 training.batch_size 2.0 +807 89 training.label_smoothing 0.019323638012969924 +807 90 model.embedding_dim 0.0 +807 90 model.scoring_fct_norm 1.0 +807 90 training.batch_size 2.0 +807 90 training.label_smoothing 0.13777673639483906 +807 91 model.embedding_dim 0.0 +807 91 model.scoring_fct_norm 2.0 +807 91 training.batch_size 1.0 +807 91 training.label_smoothing 0.05344786555541523 +807 92 model.embedding_dim 1.0 +807 92 model.scoring_fct_norm 2.0 +807 92 training.batch_size 2.0 +807 92 training.label_smoothing 0.006721133747495817 +807 93 model.embedding_dim 0.0 +807 93 model.scoring_fct_norm 1.0 +807 93 training.batch_size 0.0 +807 93 training.label_smoothing 0.008051448639485908 +807 94 model.embedding_dim 0.0 +807 94 model.scoring_fct_norm 2.0 +807 94 training.batch_size 0.0 +807 94 training.label_smoothing 0.01793551206150538 +807 95 model.embedding_dim 0.0 +807 95 model.scoring_fct_norm 1.0 +807 95 training.batch_size 0.0 +807 95 training.label_smoothing 0.0014672971725731322 +807 96 model.embedding_dim 1.0 +807 96 model.scoring_fct_norm 1.0 +807 96 training.batch_size 0.0 +807 96 training.label_smoothing 0.08942513315934561 +807 97 model.embedding_dim 0.0 +807 97 model.scoring_fct_norm 2.0 +807 97 training.batch_size 0.0 +807 97 training.label_smoothing 0.03313044261157538 +807 98 model.embedding_dim 1.0 +807 98 model.scoring_fct_norm 1.0 +807 98 training.batch_size 2.0 +807 98 training.label_smoothing 0.004327508085730953 +807 99 model.embedding_dim 1.0 +807 99 model.scoring_fct_norm 1.0 +807 99 training.batch_size 0.0 +807 99 training.label_smoothing 0.1436138024050957 +807 100 model.embedding_dim 0.0 +807 100 model.scoring_fct_norm 1.0 +807 100 training.batch_size 2.0 +807 100 training.label_smoothing 0.3343927271432008 +807 1 dataset """kinships""" +807 1 model """transe""" +807 1 loss """bceaftersigmoid""" +807 1 regularizer """no""" +807 1 optimizer """adadelta""" +807 1 training_loop """lcwa""" +807 1 evaluator """rankbased""" +807 2 dataset """kinships""" +807 2 model """transe""" +807 2 loss """bceaftersigmoid""" +807 2 regularizer """no""" +807 2 optimizer """adadelta""" +807 2 training_loop """lcwa""" +807 2 evaluator """rankbased""" +807 3 dataset """kinships""" +807 3 model """transe""" +807 3 loss """bceaftersigmoid""" +807 3 regularizer """no""" +807 3 optimizer """adadelta""" +807 3 training_loop """lcwa""" +807 3 evaluator """rankbased""" +807 4 dataset """kinships""" +807 4 model """transe""" +807 4 loss """bceaftersigmoid""" +807 4 regularizer """no""" +807 4 optimizer """adadelta""" +807 4 training_loop """lcwa""" +807 4 evaluator """rankbased""" +807 5 dataset """kinships""" +807 5 model """transe""" +807 5 loss """bceaftersigmoid""" +807 5 regularizer """no""" +807 5 optimizer """adadelta""" +807 5 training_loop """lcwa""" +807 5 evaluator """rankbased""" +807 6 dataset """kinships""" +807 6 model """transe""" +807 6 loss """bceaftersigmoid""" +807 6 regularizer """no""" +807 6 optimizer """adadelta""" +807 6 training_loop """lcwa""" +807 6 evaluator """rankbased""" +807 7 dataset """kinships""" +807 7 model """transe""" +807 7 loss """bceaftersigmoid""" +807 7 regularizer """no""" +807 7 optimizer """adadelta""" +807 7 training_loop """lcwa""" +807 7 evaluator """rankbased""" +807 8 dataset """kinships""" +807 8 model """transe""" +807 8 loss """bceaftersigmoid""" +807 8 regularizer """no""" +807 8 optimizer """adadelta""" +807 8 training_loop """lcwa""" +807 8 evaluator """rankbased""" +807 9 dataset """kinships""" +807 9 model """transe""" +807 9 loss """bceaftersigmoid""" +807 9 regularizer """no""" +807 9 optimizer """adadelta""" +807 9 training_loop """lcwa""" +807 9 evaluator """rankbased""" +807 10 dataset """kinships""" +807 10 model """transe""" +807 10 loss """bceaftersigmoid""" +807 10 regularizer """no""" +807 10 optimizer """adadelta""" +807 10 training_loop """lcwa""" +807 10 evaluator """rankbased""" +807 11 dataset """kinships""" +807 11 model """transe""" +807 11 loss """bceaftersigmoid""" +807 11 regularizer """no""" +807 11 optimizer """adadelta""" +807 11 training_loop """lcwa""" +807 11 evaluator """rankbased""" +807 12 dataset """kinships""" +807 12 model """transe""" +807 12 loss """bceaftersigmoid""" +807 12 regularizer """no""" +807 12 optimizer """adadelta""" +807 12 training_loop """lcwa""" +807 12 evaluator """rankbased""" +807 13 dataset """kinships""" +807 13 model """transe""" +807 13 loss """bceaftersigmoid""" +807 13 regularizer """no""" +807 13 optimizer """adadelta""" +807 13 training_loop """lcwa""" +807 13 evaluator """rankbased""" +807 14 dataset """kinships""" +807 14 model """transe""" +807 14 loss """bceaftersigmoid""" +807 14 regularizer """no""" +807 14 optimizer """adadelta""" +807 14 training_loop """lcwa""" +807 14 evaluator """rankbased""" +807 15 dataset """kinships""" +807 15 model """transe""" +807 15 loss """bceaftersigmoid""" +807 15 regularizer """no""" +807 15 optimizer """adadelta""" +807 15 training_loop """lcwa""" +807 15 evaluator """rankbased""" +807 16 dataset """kinships""" +807 16 model """transe""" +807 16 loss """bceaftersigmoid""" +807 16 regularizer """no""" +807 16 optimizer """adadelta""" +807 16 training_loop """lcwa""" +807 16 evaluator """rankbased""" +807 17 dataset """kinships""" +807 17 model """transe""" +807 17 loss """bceaftersigmoid""" +807 17 regularizer """no""" +807 17 optimizer """adadelta""" +807 17 training_loop """lcwa""" +807 17 evaluator """rankbased""" +807 18 dataset """kinships""" +807 18 model """transe""" +807 18 loss """bceaftersigmoid""" +807 18 regularizer """no""" +807 18 optimizer """adadelta""" +807 18 training_loop """lcwa""" +807 18 evaluator """rankbased""" +807 19 dataset """kinships""" +807 19 model """transe""" +807 19 loss """bceaftersigmoid""" +807 19 regularizer """no""" +807 19 optimizer """adadelta""" +807 19 training_loop """lcwa""" +807 19 evaluator """rankbased""" +807 20 dataset """kinships""" +807 20 model """transe""" +807 20 loss """bceaftersigmoid""" +807 20 regularizer """no""" +807 20 optimizer """adadelta""" +807 20 training_loop """lcwa""" +807 20 evaluator """rankbased""" +807 21 dataset """kinships""" +807 21 model """transe""" +807 21 loss """bceaftersigmoid""" +807 21 regularizer """no""" +807 21 optimizer """adadelta""" +807 21 training_loop """lcwa""" +807 21 evaluator """rankbased""" +807 22 dataset """kinships""" +807 22 model """transe""" +807 22 loss """bceaftersigmoid""" +807 22 regularizer """no""" +807 22 optimizer """adadelta""" +807 22 training_loop """lcwa""" +807 22 evaluator """rankbased""" +807 23 dataset """kinships""" +807 23 model """transe""" +807 23 loss """bceaftersigmoid""" +807 23 regularizer """no""" +807 23 optimizer """adadelta""" +807 23 training_loop """lcwa""" +807 23 evaluator """rankbased""" +807 24 dataset """kinships""" +807 24 model """transe""" +807 24 loss """bceaftersigmoid""" +807 24 regularizer """no""" +807 24 optimizer """adadelta""" +807 24 training_loop """lcwa""" +807 24 evaluator """rankbased""" +807 25 dataset """kinships""" +807 25 model """transe""" +807 25 loss """bceaftersigmoid""" +807 25 regularizer """no""" +807 25 optimizer """adadelta""" +807 25 training_loop """lcwa""" +807 25 evaluator """rankbased""" +807 26 dataset """kinships""" +807 26 model """transe""" +807 26 loss """bceaftersigmoid""" +807 26 regularizer """no""" +807 26 optimizer """adadelta""" +807 26 training_loop """lcwa""" +807 26 evaluator """rankbased""" +807 27 dataset """kinships""" +807 27 model """transe""" +807 27 loss """bceaftersigmoid""" +807 27 regularizer """no""" +807 27 optimizer """adadelta""" +807 27 training_loop """lcwa""" +807 27 evaluator """rankbased""" +807 28 dataset """kinships""" +807 28 model """transe""" +807 28 loss """bceaftersigmoid""" +807 28 regularizer """no""" +807 28 optimizer """adadelta""" +807 28 training_loop """lcwa""" +807 28 evaluator """rankbased""" +807 29 dataset """kinships""" +807 29 model """transe""" +807 29 loss """bceaftersigmoid""" +807 29 regularizer """no""" +807 29 optimizer """adadelta""" +807 29 training_loop """lcwa""" +807 29 evaluator """rankbased""" +807 30 dataset """kinships""" +807 30 model """transe""" +807 30 loss """bceaftersigmoid""" +807 30 regularizer """no""" +807 30 optimizer """adadelta""" +807 30 training_loop """lcwa""" +807 30 evaluator """rankbased""" +807 31 dataset """kinships""" +807 31 model """transe""" +807 31 loss """bceaftersigmoid""" +807 31 regularizer """no""" +807 31 optimizer """adadelta""" +807 31 training_loop """lcwa""" +807 31 evaluator """rankbased""" +807 32 dataset """kinships""" +807 32 model """transe""" +807 32 loss """bceaftersigmoid""" +807 32 regularizer """no""" +807 32 optimizer """adadelta""" +807 32 training_loop """lcwa""" +807 32 evaluator """rankbased""" +807 33 dataset """kinships""" +807 33 model """transe""" +807 33 loss """bceaftersigmoid""" +807 33 regularizer """no""" +807 33 optimizer """adadelta""" +807 33 training_loop """lcwa""" +807 33 evaluator """rankbased""" +807 34 dataset """kinships""" +807 34 model """transe""" +807 34 loss """bceaftersigmoid""" +807 34 regularizer """no""" +807 34 optimizer """adadelta""" +807 34 training_loop """lcwa""" +807 34 evaluator """rankbased""" +807 35 dataset """kinships""" +807 35 model """transe""" +807 35 loss """bceaftersigmoid""" +807 35 regularizer """no""" +807 35 optimizer """adadelta""" +807 35 training_loop """lcwa""" +807 35 evaluator """rankbased""" +807 36 dataset """kinships""" +807 36 model """transe""" +807 36 loss """bceaftersigmoid""" +807 36 regularizer """no""" +807 36 optimizer """adadelta""" +807 36 training_loop """lcwa""" +807 36 evaluator """rankbased""" +807 37 dataset """kinships""" +807 37 model """transe""" +807 37 loss """bceaftersigmoid""" +807 37 regularizer """no""" +807 37 optimizer """adadelta""" +807 37 training_loop """lcwa""" +807 37 evaluator """rankbased""" +807 38 dataset """kinships""" +807 38 model """transe""" +807 38 loss """bceaftersigmoid""" +807 38 regularizer """no""" +807 38 optimizer """adadelta""" +807 38 training_loop """lcwa""" +807 38 evaluator """rankbased""" +807 39 dataset """kinships""" +807 39 model """transe""" +807 39 loss """bceaftersigmoid""" +807 39 regularizer """no""" +807 39 optimizer """adadelta""" +807 39 training_loop """lcwa""" +807 39 evaluator """rankbased""" +807 40 dataset """kinships""" +807 40 model """transe""" +807 40 loss """bceaftersigmoid""" +807 40 regularizer """no""" +807 40 optimizer """adadelta""" +807 40 training_loop """lcwa""" +807 40 evaluator """rankbased""" +807 41 dataset """kinships""" +807 41 model """transe""" +807 41 loss """bceaftersigmoid""" +807 41 regularizer """no""" +807 41 optimizer """adadelta""" +807 41 training_loop """lcwa""" +807 41 evaluator """rankbased""" +807 42 dataset """kinships""" +807 42 model """transe""" +807 42 loss """bceaftersigmoid""" +807 42 regularizer """no""" +807 42 optimizer """adadelta""" +807 42 training_loop """lcwa""" +807 42 evaluator """rankbased""" +807 43 dataset """kinships""" +807 43 model """transe""" +807 43 loss """bceaftersigmoid""" +807 43 regularizer """no""" +807 43 optimizer """adadelta""" +807 43 training_loop """lcwa""" +807 43 evaluator """rankbased""" +807 44 dataset """kinships""" +807 44 model """transe""" +807 44 loss """bceaftersigmoid""" +807 44 regularizer """no""" +807 44 optimizer """adadelta""" +807 44 training_loop """lcwa""" +807 44 evaluator """rankbased""" +807 45 dataset """kinships""" +807 45 model """transe""" +807 45 loss """bceaftersigmoid""" +807 45 regularizer """no""" +807 45 optimizer """adadelta""" +807 45 training_loop """lcwa""" +807 45 evaluator """rankbased""" +807 46 dataset """kinships""" +807 46 model """transe""" +807 46 loss """bceaftersigmoid""" +807 46 regularizer """no""" +807 46 optimizer """adadelta""" +807 46 training_loop """lcwa""" +807 46 evaluator """rankbased""" +807 47 dataset """kinships""" +807 47 model """transe""" +807 47 loss """bceaftersigmoid""" +807 47 regularizer """no""" +807 47 optimizer """adadelta""" +807 47 training_loop """lcwa""" +807 47 evaluator """rankbased""" +807 48 dataset """kinships""" +807 48 model """transe""" +807 48 loss """bceaftersigmoid""" +807 48 regularizer """no""" +807 48 optimizer """adadelta""" +807 48 training_loop """lcwa""" +807 48 evaluator """rankbased""" +807 49 dataset """kinships""" +807 49 model """transe""" +807 49 loss """bceaftersigmoid""" +807 49 regularizer """no""" +807 49 optimizer """adadelta""" +807 49 training_loop """lcwa""" +807 49 evaluator """rankbased""" +807 50 dataset """kinships""" +807 50 model """transe""" +807 50 loss """bceaftersigmoid""" +807 50 regularizer """no""" +807 50 optimizer """adadelta""" +807 50 training_loop """lcwa""" +807 50 evaluator """rankbased""" +807 51 dataset """kinships""" +807 51 model """transe""" +807 51 loss """bceaftersigmoid""" +807 51 regularizer """no""" +807 51 optimizer """adadelta""" +807 51 training_loop """lcwa""" +807 51 evaluator """rankbased""" +807 52 dataset """kinships""" +807 52 model """transe""" +807 52 loss """bceaftersigmoid""" +807 52 regularizer """no""" +807 52 optimizer """adadelta""" +807 52 training_loop """lcwa""" +807 52 evaluator """rankbased""" +807 53 dataset """kinships""" +807 53 model """transe""" +807 53 loss """bceaftersigmoid""" +807 53 regularizer """no""" +807 53 optimizer """adadelta""" +807 53 training_loop """lcwa""" +807 53 evaluator """rankbased""" +807 54 dataset """kinships""" +807 54 model """transe""" +807 54 loss """bceaftersigmoid""" +807 54 regularizer """no""" +807 54 optimizer """adadelta""" +807 54 training_loop """lcwa""" +807 54 evaluator """rankbased""" +807 55 dataset """kinships""" +807 55 model """transe""" +807 55 loss """bceaftersigmoid""" +807 55 regularizer """no""" +807 55 optimizer """adadelta""" +807 55 training_loop """lcwa""" +807 55 evaluator """rankbased""" +807 56 dataset """kinships""" +807 56 model """transe""" +807 56 loss """bceaftersigmoid""" +807 56 regularizer """no""" +807 56 optimizer """adadelta""" +807 56 training_loop """lcwa""" +807 56 evaluator """rankbased""" +807 57 dataset """kinships""" +807 57 model """transe""" +807 57 loss """bceaftersigmoid""" +807 57 regularizer """no""" +807 57 optimizer """adadelta""" +807 57 training_loop """lcwa""" +807 57 evaluator """rankbased""" +807 58 dataset """kinships""" +807 58 model """transe""" +807 58 loss """bceaftersigmoid""" +807 58 regularizer """no""" +807 58 optimizer """adadelta""" +807 58 training_loop """lcwa""" +807 58 evaluator """rankbased""" +807 59 dataset """kinships""" +807 59 model """transe""" +807 59 loss """bceaftersigmoid""" +807 59 regularizer """no""" +807 59 optimizer """adadelta""" +807 59 training_loop """lcwa""" +807 59 evaluator """rankbased""" +807 60 dataset """kinships""" +807 60 model """transe""" +807 60 loss """bceaftersigmoid""" +807 60 regularizer """no""" +807 60 optimizer """adadelta""" +807 60 training_loop """lcwa""" +807 60 evaluator """rankbased""" +807 61 dataset """kinships""" +807 61 model """transe""" +807 61 loss """bceaftersigmoid""" +807 61 regularizer """no""" +807 61 optimizer """adadelta""" +807 61 training_loop """lcwa""" +807 61 evaluator """rankbased""" +807 62 dataset """kinships""" +807 62 model """transe""" +807 62 loss """bceaftersigmoid""" +807 62 regularizer """no""" +807 62 optimizer """adadelta""" +807 62 training_loop """lcwa""" +807 62 evaluator """rankbased""" +807 63 dataset """kinships""" +807 63 model """transe""" +807 63 loss """bceaftersigmoid""" +807 63 regularizer """no""" +807 63 optimizer """adadelta""" +807 63 training_loop """lcwa""" +807 63 evaluator """rankbased""" +807 64 dataset """kinships""" +807 64 model """transe""" +807 64 loss """bceaftersigmoid""" +807 64 regularizer """no""" +807 64 optimizer """adadelta""" +807 64 training_loop """lcwa""" +807 64 evaluator """rankbased""" +807 65 dataset """kinships""" +807 65 model """transe""" +807 65 loss """bceaftersigmoid""" +807 65 regularizer """no""" +807 65 optimizer """adadelta""" +807 65 training_loop """lcwa""" +807 65 evaluator """rankbased""" +807 66 dataset """kinships""" +807 66 model """transe""" +807 66 loss """bceaftersigmoid""" +807 66 regularizer """no""" +807 66 optimizer """adadelta""" +807 66 training_loop """lcwa""" +807 66 evaluator """rankbased""" +807 67 dataset """kinships""" +807 67 model """transe""" +807 67 loss """bceaftersigmoid""" +807 67 regularizer """no""" +807 67 optimizer """adadelta""" +807 67 training_loop """lcwa""" +807 67 evaluator """rankbased""" +807 68 dataset """kinships""" +807 68 model """transe""" +807 68 loss """bceaftersigmoid""" +807 68 regularizer """no""" +807 68 optimizer """adadelta""" +807 68 training_loop """lcwa""" +807 68 evaluator """rankbased""" +807 69 dataset """kinships""" +807 69 model """transe""" +807 69 loss """bceaftersigmoid""" +807 69 regularizer """no""" +807 69 optimizer """adadelta""" +807 69 training_loop """lcwa""" +807 69 evaluator """rankbased""" +807 70 dataset """kinships""" +807 70 model """transe""" +807 70 loss """bceaftersigmoid""" +807 70 regularizer """no""" +807 70 optimizer """adadelta""" +807 70 training_loop """lcwa""" +807 70 evaluator """rankbased""" +807 71 dataset """kinships""" +807 71 model """transe""" +807 71 loss """bceaftersigmoid""" +807 71 regularizer """no""" +807 71 optimizer """adadelta""" +807 71 training_loop """lcwa""" +807 71 evaluator """rankbased""" +807 72 dataset """kinships""" +807 72 model """transe""" +807 72 loss """bceaftersigmoid""" +807 72 regularizer """no""" +807 72 optimizer """adadelta""" +807 72 training_loop """lcwa""" +807 72 evaluator """rankbased""" +807 73 dataset """kinships""" +807 73 model """transe""" +807 73 loss """bceaftersigmoid""" +807 73 regularizer """no""" +807 73 optimizer """adadelta""" +807 73 training_loop """lcwa""" +807 73 evaluator """rankbased""" +807 74 dataset """kinships""" +807 74 model """transe""" +807 74 loss """bceaftersigmoid""" +807 74 regularizer """no""" +807 74 optimizer """adadelta""" +807 74 training_loop """lcwa""" +807 74 evaluator """rankbased""" +807 75 dataset """kinships""" +807 75 model """transe""" +807 75 loss """bceaftersigmoid""" +807 75 regularizer """no""" +807 75 optimizer """adadelta""" +807 75 training_loop """lcwa""" +807 75 evaluator """rankbased""" +807 76 dataset """kinships""" +807 76 model """transe""" +807 76 loss """bceaftersigmoid""" +807 76 regularizer """no""" +807 76 optimizer """adadelta""" +807 76 training_loop """lcwa""" +807 76 evaluator """rankbased""" +807 77 dataset """kinships""" +807 77 model """transe""" +807 77 loss """bceaftersigmoid""" +807 77 regularizer """no""" +807 77 optimizer """adadelta""" +807 77 training_loop """lcwa""" +807 77 evaluator """rankbased""" +807 78 dataset """kinships""" +807 78 model """transe""" +807 78 loss """bceaftersigmoid""" +807 78 regularizer """no""" +807 78 optimizer """adadelta""" +807 78 training_loop """lcwa""" +807 78 evaluator """rankbased""" +807 79 dataset """kinships""" +807 79 model """transe""" +807 79 loss """bceaftersigmoid""" +807 79 regularizer """no""" +807 79 optimizer """adadelta""" +807 79 training_loop """lcwa""" +807 79 evaluator """rankbased""" +807 80 dataset """kinships""" +807 80 model """transe""" +807 80 loss """bceaftersigmoid""" +807 80 regularizer """no""" +807 80 optimizer """adadelta""" +807 80 training_loop """lcwa""" +807 80 evaluator """rankbased""" +807 81 dataset """kinships""" +807 81 model """transe""" +807 81 loss """bceaftersigmoid""" +807 81 regularizer """no""" +807 81 optimizer """adadelta""" +807 81 training_loop """lcwa""" +807 81 evaluator """rankbased""" +807 82 dataset """kinships""" +807 82 model """transe""" +807 82 loss """bceaftersigmoid""" +807 82 regularizer """no""" +807 82 optimizer """adadelta""" +807 82 training_loop """lcwa""" +807 82 evaluator """rankbased""" +807 83 dataset """kinships""" +807 83 model """transe""" +807 83 loss """bceaftersigmoid""" +807 83 regularizer """no""" +807 83 optimizer """adadelta""" +807 83 training_loop """lcwa""" +807 83 evaluator """rankbased""" +807 84 dataset """kinships""" +807 84 model """transe""" +807 84 loss """bceaftersigmoid""" +807 84 regularizer """no""" +807 84 optimizer """adadelta""" +807 84 training_loop """lcwa""" +807 84 evaluator """rankbased""" +807 85 dataset """kinships""" +807 85 model """transe""" +807 85 loss """bceaftersigmoid""" +807 85 regularizer """no""" +807 85 optimizer """adadelta""" +807 85 training_loop """lcwa""" +807 85 evaluator """rankbased""" +807 86 dataset """kinships""" +807 86 model """transe""" +807 86 loss """bceaftersigmoid""" +807 86 regularizer """no""" +807 86 optimizer """adadelta""" +807 86 training_loop """lcwa""" +807 86 evaluator """rankbased""" +807 87 dataset """kinships""" +807 87 model """transe""" +807 87 loss """bceaftersigmoid""" +807 87 regularizer """no""" +807 87 optimizer """adadelta""" +807 87 training_loop """lcwa""" +807 87 evaluator """rankbased""" +807 88 dataset """kinships""" +807 88 model """transe""" +807 88 loss """bceaftersigmoid""" +807 88 regularizer """no""" +807 88 optimizer """adadelta""" +807 88 training_loop """lcwa""" +807 88 evaluator """rankbased""" +807 89 dataset """kinships""" +807 89 model """transe""" +807 89 loss """bceaftersigmoid""" +807 89 regularizer """no""" +807 89 optimizer """adadelta""" +807 89 training_loop """lcwa""" +807 89 evaluator """rankbased""" +807 90 dataset """kinships""" +807 90 model """transe""" +807 90 loss """bceaftersigmoid""" +807 90 regularizer """no""" +807 90 optimizer """adadelta""" +807 90 training_loop """lcwa""" +807 90 evaluator """rankbased""" +807 91 dataset """kinships""" +807 91 model """transe""" +807 91 loss """bceaftersigmoid""" +807 91 regularizer """no""" +807 91 optimizer """adadelta""" +807 91 training_loop """lcwa""" +807 91 evaluator """rankbased""" +807 92 dataset """kinships""" +807 92 model """transe""" +807 92 loss """bceaftersigmoid""" +807 92 regularizer """no""" +807 92 optimizer """adadelta""" +807 92 training_loop """lcwa""" +807 92 evaluator """rankbased""" +807 93 dataset """kinships""" +807 93 model """transe""" +807 93 loss """bceaftersigmoid""" +807 93 regularizer """no""" +807 93 optimizer """adadelta""" +807 93 training_loop """lcwa""" +807 93 evaluator """rankbased""" +807 94 dataset """kinships""" +807 94 model """transe""" +807 94 loss """bceaftersigmoid""" +807 94 regularizer """no""" +807 94 optimizer """adadelta""" +807 94 training_loop """lcwa""" +807 94 evaluator """rankbased""" +807 95 dataset """kinships""" +807 95 model """transe""" +807 95 loss """bceaftersigmoid""" +807 95 regularizer """no""" +807 95 optimizer """adadelta""" +807 95 training_loop """lcwa""" +807 95 evaluator """rankbased""" +807 96 dataset """kinships""" +807 96 model """transe""" +807 96 loss """bceaftersigmoid""" +807 96 regularizer """no""" +807 96 optimizer """adadelta""" +807 96 training_loop """lcwa""" +807 96 evaluator """rankbased""" +807 97 dataset """kinships""" +807 97 model """transe""" +807 97 loss """bceaftersigmoid""" +807 97 regularizer """no""" +807 97 optimizer """adadelta""" +807 97 training_loop """lcwa""" +807 97 evaluator """rankbased""" +807 98 dataset """kinships""" +807 98 model """transe""" +807 98 loss """bceaftersigmoid""" +807 98 regularizer """no""" +807 98 optimizer """adadelta""" +807 98 training_loop """lcwa""" +807 98 evaluator """rankbased""" +807 99 dataset """kinships""" +807 99 model """transe""" +807 99 loss """bceaftersigmoid""" +807 99 regularizer """no""" +807 99 optimizer """adadelta""" +807 99 training_loop """lcwa""" +807 99 evaluator """rankbased""" +807 100 dataset """kinships""" +807 100 model """transe""" +807 100 loss """bceaftersigmoid""" +807 100 regularizer """no""" +807 100 optimizer """adadelta""" +807 100 training_loop """lcwa""" +807 100 evaluator """rankbased""" +808 1 model.embedding_dim 2.0 +808 1 model.scoring_fct_norm 2.0 +808 1 training.batch_size 2.0 +808 1 training.label_smoothing 0.5339659473293633 +808 2 model.embedding_dim 1.0 +808 2 model.scoring_fct_norm 2.0 +808 2 training.batch_size 0.0 +808 2 training.label_smoothing 0.13613418412943526 +808 3 model.embedding_dim 2.0 +808 3 model.scoring_fct_norm 2.0 +808 3 training.batch_size 0.0 +808 3 training.label_smoothing 0.07164189787369961 +808 4 model.embedding_dim 0.0 +808 4 model.scoring_fct_norm 2.0 +808 4 training.batch_size 0.0 +808 4 training.label_smoothing 0.9328531834998408 +808 5 model.embedding_dim 0.0 +808 5 model.scoring_fct_norm 1.0 +808 5 training.batch_size 1.0 +808 5 training.label_smoothing 0.03294907875824646 +808 6 model.embedding_dim 1.0 +808 6 model.scoring_fct_norm 2.0 +808 6 training.batch_size 2.0 +808 6 training.label_smoothing 0.5709147970661809 +808 7 model.embedding_dim 0.0 +808 7 model.scoring_fct_norm 2.0 +808 7 training.batch_size 1.0 +808 7 training.label_smoothing 0.024039540188111826 +808 8 model.embedding_dim 2.0 +808 8 model.scoring_fct_norm 2.0 +808 8 training.batch_size 0.0 +808 8 training.label_smoothing 0.08710181824839437 +808 9 model.embedding_dim 2.0 +808 9 model.scoring_fct_norm 1.0 +808 9 training.batch_size 1.0 +808 9 training.label_smoothing 0.032042138604070396 +808 10 model.embedding_dim 1.0 +808 10 model.scoring_fct_norm 1.0 +808 10 training.batch_size 2.0 +808 10 training.label_smoothing 0.045747617175518285 +808 11 model.embedding_dim 0.0 +808 11 model.scoring_fct_norm 2.0 +808 11 training.batch_size 2.0 +808 11 training.label_smoothing 0.3286493902988386 +808 12 model.embedding_dim 2.0 +808 12 model.scoring_fct_norm 1.0 +808 12 training.batch_size 0.0 +808 12 training.label_smoothing 0.0013808760133790455 +808 13 model.embedding_dim 2.0 +808 13 model.scoring_fct_norm 1.0 +808 13 training.batch_size 2.0 +808 13 training.label_smoothing 0.04583179817148671 +808 14 model.embedding_dim 0.0 +808 14 model.scoring_fct_norm 2.0 +808 14 training.batch_size 2.0 +808 14 training.label_smoothing 0.005464471676450095 +808 15 model.embedding_dim 2.0 +808 15 model.scoring_fct_norm 1.0 +808 15 training.batch_size 1.0 +808 15 training.label_smoothing 0.8466453237055747 +808 16 model.embedding_dim 2.0 +808 16 model.scoring_fct_norm 2.0 +808 16 training.batch_size 0.0 +808 16 training.label_smoothing 0.034222693896965486 +808 17 model.embedding_dim 2.0 +808 17 model.scoring_fct_norm 2.0 +808 17 training.batch_size 1.0 +808 17 training.label_smoothing 0.22818148857125325 +808 18 model.embedding_dim 1.0 +808 18 model.scoring_fct_norm 2.0 +808 18 training.batch_size 1.0 +808 18 training.label_smoothing 0.12335306514798954 +808 19 model.embedding_dim 0.0 +808 19 model.scoring_fct_norm 2.0 +808 19 training.batch_size 1.0 +808 19 training.label_smoothing 0.16386765393924585 +808 20 model.embedding_dim 2.0 +808 20 model.scoring_fct_norm 2.0 +808 20 training.batch_size 2.0 +808 20 training.label_smoothing 0.0015873479791460269 +808 21 model.embedding_dim 1.0 +808 21 model.scoring_fct_norm 1.0 +808 21 training.batch_size 1.0 +808 21 training.label_smoothing 0.07432086255065876 +808 22 model.embedding_dim 0.0 +808 22 model.scoring_fct_norm 2.0 +808 22 training.batch_size 1.0 +808 22 training.label_smoothing 0.016099799423300264 +808 23 model.embedding_dim 2.0 +808 23 model.scoring_fct_norm 2.0 +808 23 training.batch_size 2.0 +808 23 training.label_smoothing 0.051074317682749996 +808 24 model.embedding_dim 0.0 +808 24 model.scoring_fct_norm 1.0 +808 24 training.batch_size 2.0 +808 24 training.label_smoothing 0.006833566508169464 +808 25 model.embedding_dim 1.0 +808 25 model.scoring_fct_norm 2.0 +808 25 training.batch_size 0.0 +808 25 training.label_smoothing 0.41132825837281695 +808 26 model.embedding_dim 0.0 +808 26 model.scoring_fct_norm 2.0 +808 26 training.batch_size 0.0 +808 26 training.label_smoothing 0.019130067156348813 +808 27 model.embedding_dim 2.0 +808 27 model.scoring_fct_norm 1.0 +808 27 training.batch_size 0.0 +808 27 training.label_smoothing 0.08209217754665207 +808 28 model.embedding_dim 0.0 +808 28 model.scoring_fct_norm 2.0 +808 28 training.batch_size 2.0 +808 28 training.label_smoothing 0.7506281823089269 +808 29 model.embedding_dim 0.0 +808 29 model.scoring_fct_norm 1.0 +808 29 training.batch_size 1.0 +808 29 training.label_smoothing 0.016020724761914927 +808 30 model.embedding_dim 2.0 +808 30 model.scoring_fct_norm 2.0 +808 30 training.batch_size 0.0 +808 30 training.label_smoothing 0.14628662375692825 +808 31 model.embedding_dim 2.0 +808 31 model.scoring_fct_norm 1.0 +808 31 training.batch_size 1.0 +808 31 training.label_smoothing 0.8345281053338748 +808 32 model.embedding_dim 1.0 +808 32 model.scoring_fct_norm 2.0 +808 32 training.batch_size 2.0 +808 32 training.label_smoothing 0.048237317084603624 +808 33 model.embedding_dim 1.0 +808 33 model.scoring_fct_norm 1.0 +808 33 training.batch_size 1.0 +808 33 training.label_smoothing 0.025183781696652977 +808 34 model.embedding_dim 1.0 +808 34 model.scoring_fct_norm 2.0 +808 34 training.batch_size 0.0 +808 34 training.label_smoothing 0.040940883647495784 +808 35 model.embedding_dim 0.0 +808 35 model.scoring_fct_norm 1.0 +808 35 training.batch_size 2.0 +808 35 training.label_smoothing 0.001369555861712202 +808 36 model.embedding_dim 0.0 +808 36 model.scoring_fct_norm 2.0 +808 36 training.batch_size 0.0 +808 36 training.label_smoothing 0.0028166591010644355 +808 37 model.embedding_dim 1.0 +808 37 model.scoring_fct_norm 1.0 +808 37 training.batch_size 2.0 +808 37 training.label_smoothing 0.2049789367503147 +808 38 model.embedding_dim 2.0 +808 38 model.scoring_fct_norm 1.0 +808 38 training.batch_size 1.0 +808 38 training.label_smoothing 0.0011355970453478146 +808 39 model.embedding_dim 2.0 +808 39 model.scoring_fct_norm 1.0 +808 39 training.batch_size 0.0 +808 39 training.label_smoothing 0.00193271301575261 +808 40 model.embedding_dim 1.0 +808 40 model.scoring_fct_norm 1.0 +808 40 training.batch_size 1.0 +808 40 training.label_smoothing 0.044447384700029284 +808 41 model.embedding_dim 2.0 +808 41 model.scoring_fct_norm 1.0 +808 41 training.batch_size 2.0 +808 41 training.label_smoothing 0.9521252422169572 +808 42 model.embedding_dim 1.0 +808 42 model.scoring_fct_norm 1.0 +808 42 training.batch_size 1.0 +808 42 training.label_smoothing 0.007017650240090404 +808 43 model.embedding_dim 0.0 +808 43 model.scoring_fct_norm 2.0 +808 43 training.batch_size 1.0 +808 43 training.label_smoothing 0.011170052832887708 +808 44 model.embedding_dim 2.0 +808 44 model.scoring_fct_norm 2.0 +808 44 training.batch_size 2.0 +808 44 training.label_smoothing 0.3537544870535614 +808 45 model.embedding_dim 0.0 +808 45 model.scoring_fct_norm 2.0 +808 45 training.batch_size 0.0 +808 45 training.label_smoothing 0.5839699567021384 +808 46 model.embedding_dim 1.0 +808 46 model.scoring_fct_norm 2.0 +808 46 training.batch_size 2.0 +808 46 training.label_smoothing 0.38546104028102407 +808 47 model.embedding_dim 2.0 +808 47 model.scoring_fct_norm 1.0 +808 47 training.batch_size 2.0 +808 47 training.label_smoothing 0.04263629502709746 +808 48 model.embedding_dim 1.0 +808 48 model.scoring_fct_norm 1.0 +808 48 training.batch_size 0.0 +808 48 training.label_smoothing 0.7855946007924892 +808 49 model.embedding_dim 0.0 +808 49 model.scoring_fct_norm 1.0 +808 49 training.batch_size 2.0 +808 49 training.label_smoothing 0.2166939718105326 +808 50 model.embedding_dim 0.0 +808 50 model.scoring_fct_norm 1.0 +808 50 training.batch_size 1.0 +808 50 training.label_smoothing 0.027763325748749908 +808 51 model.embedding_dim 1.0 +808 51 model.scoring_fct_norm 1.0 +808 51 training.batch_size 2.0 +808 51 training.label_smoothing 0.21754965877676305 +808 52 model.embedding_dim 2.0 +808 52 model.scoring_fct_norm 2.0 +808 52 training.batch_size 1.0 +808 52 training.label_smoothing 0.2715710194216046 +808 53 model.embedding_dim 0.0 +808 53 model.scoring_fct_norm 1.0 +808 53 training.batch_size 0.0 +808 53 training.label_smoothing 0.05380434649901449 +808 54 model.embedding_dim 0.0 +808 54 model.scoring_fct_norm 2.0 +808 54 training.batch_size 2.0 +808 54 training.label_smoothing 0.14625911605284858 +808 55 model.embedding_dim 0.0 +808 55 model.scoring_fct_norm 2.0 +808 55 training.batch_size 0.0 +808 55 training.label_smoothing 0.4208524273797626 +808 56 model.embedding_dim 1.0 +808 56 model.scoring_fct_norm 1.0 +808 56 training.batch_size 0.0 +808 56 training.label_smoothing 0.0142930577911878 +808 57 model.embedding_dim 0.0 +808 57 model.scoring_fct_norm 2.0 +808 57 training.batch_size 2.0 +808 57 training.label_smoothing 0.3180711097637657 +808 58 model.embedding_dim 2.0 +808 58 model.scoring_fct_norm 2.0 +808 58 training.batch_size 0.0 +808 58 training.label_smoothing 0.650172808458536 +808 59 model.embedding_dim 0.0 +808 59 model.scoring_fct_norm 2.0 +808 59 training.batch_size 2.0 +808 59 training.label_smoothing 0.3376483627713425 +808 60 model.embedding_dim 2.0 +808 60 model.scoring_fct_norm 2.0 +808 60 training.batch_size 0.0 +808 60 training.label_smoothing 0.0018482992114816714 +808 61 model.embedding_dim 0.0 +808 61 model.scoring_fct_norm 2.0 +808 61 training.batch_size 0.0 +808 61 training.label_smoothing 0.031551542307374975 +808 62 model.embedding_dim 2.0 +808 62 model.scoring_fct_norm 1.0 +808 62 training.batch_size 2.0 +808 62 training.label_smoothing 0.001140938722401373 +808 63 model.embedding_dim 1.0 +808 63 model.scoring_fct_norm 1.0 +808 63 training.batch_size 0.0 +808 63 training.label_smoothing 0.03280014256895036 +808 64 model.embedding_dim 1.0 +808 64 model.scoring_fct_norm 1.0 +808 64 training.batch_size 1.0 +808 64 training.label_smoothing 0.21713727879827008 +808 65 model.embedding_dim 0.0 +808 65 model.scoring_fct_norm 2.0 +808 65 training.batch_size 2.0 +808 65 training.label_smoothing 0.2361231624379889 +808 66 model.embedding_dim 0.0 +808 66 model.scoring_fct_norm 2.0 +808 66 training.batch_size 1.0 +808 66 training.label_smoothing 0.0146832816315818 +808 67 model.embedding_dim 0.0 +808 67 model.scoring_fct_norm 1.0 +808 67 training.batch_size 0.0 +808 67 training.label_smoothing 0.36450947106318393 +808 68 model.embedding_dim 0.0 +808 68 model.scoring_fct_norm 2.0 +808 68 training.batch_size 0.0 +808 68 training.label_smoothing 0.013543059926419774 +808 69 model.embedding_dim 2.0 +808 69 model.scoring_fct_norm 2.0 +808 69 training.batch_size 1.0 +808 69 training.label_smoothing 0.00674970509657661 +808 70 model.embedding_dim 1.0 +808 70 model.scoring_fct_norm 2.0 +808 70 training.batch_size 2.0 +808 70 training.label_smoothing 0.0011901452777572845 +808 71 model.embedding_dim 2.0 +808 71 model.scoring_fct_norm 1.0 +808 71 training.batch_size 0.0 +808 71 training.label_smoothing 0.16335966912875305 +808 72 model.embedding_dim 0.0 +808 72 model.scoring_fct_norm 2.0 +808 72 training.batch_size 1.0 +808 72 training.label_smoothing 0.7868371060772477 +808 73 model.embedding_dim 0.0 +808 73 model.scoring_fct_norm 2.0 +808 73 training.batch_size 0.0 +808 73 training.label_smoothing 0.5498687737716496 +808 74 model.embedding_dim 1.0 +808 74 model.scoring_fct_norm 2.0 +808 74 training.batch_size 2.0 +808 74 training.label_smoothing 0.00707849911868117 +808 75 model.embedding_dim 1.0 +808 75 model.scoring_fct_norm 2.0 +808 75 training.batch_size 0.0 +808 75 training.label_smoothing 0.10831625069446807 +808 76 model.embedding_dim 1.0 +808 76 model.scoring_fct_norm 1.0 +808 76 training.batch_size 0.0 +808 76 training.label_smoothing 0.3756375835152103 +808 77 model.embedding_dim 0.0 +808 77 model.scoring_fct_norm 1.0 +808 77 training.batch_size 2.0 +808 77 training.label_smoothing 0.5622560206508187 +808 78 model.embedding_dim 2.0 +808 78 model.scoring_fct_norm 2.0 +808 78 training.batch_size 2.0 +808 78 training.label_smoothing 0.024348349434995833 +808 79 model.embedding_dim 2.0 +808 79 model.scoring_fct_norm 2.0 +808 79 training.batch_size 2.0 +808 79 training.label_smoothing 0.004340153258267072 +808 80 model.embedding_dim 2.0 +808 80 model.scoring_fct_norm 1.0 +808 80 training.batch_size 1.0 +808 80 training.label_smoothing 0.12481805148330781 +808 81 model.embedding_dim 1.0 +808 81 model.scoring_fct_norm 1.0 +808 81 training.batch_size 1.0 +808 81 training.label_smoothing 0.027607903434227723 +808 82 model.embedding_dim 0.0 +808 82 model.scoring_fct_norm 1.0 +808 82 training.batch_size 2.0 +808 82 training.label_smoothing 0.0011149992749078788 +808 83 model.embedding_dim 1.0 +808 83 model.scoring_fct_norm 2.0 +808 83 training.batch_size 1.0 +808 83 training.label_smoothing 0.0138384064188617 +808 84 model.embedding_dim 1.0 +808 84 model.scoring_fct_norm 1.0 +808 84 training.batch_size 2.0 +808 84 training.label_smoothing 0.005469956691940256 +808 85 model.embedding_dim 2.0 +808 85 model.scoring_fct_norm 2.0 +808 85 training.batch_size 1.0 +808 85 training.label_smoothing 0.686710050200412 +808 86 model.embedding_dim 0.0 +808 86 model.scoring_fct_norm 2.0 +808 86 training.batch_size 2.0 +808 86 training.label_smoothing 0.6711153073986076 +808 87 model.embedding_dim 1.0 +808 87 model.scoring_fct_norm 2.0 +808 87 training.batch_size 1.0 +808 87 training.label_smoothing 0.004680845875274436 +808 88 model.embedding_dim 2.0 +808 88 model.scoring_fct_norm 1.0 +808 88 training.batch_size 2.0 +808 88 training.label_smoothing 0.031630335838754194 +808 89 model.embedding_dim 0.0 +808 89 model.scoring_fct_norm 2.0 +808 89 training.batch_size 1.0 +808 89 training.label_smoothing 0.0011132850184666953 +808 90 model.embedding_dim 2.0 +808 90 model.scoring_fct_norm 1.0 +808 90 training.batch_size 2.0 +808 90 training.label_smoothing 0.07642611399061867 +808 91 model.embedding_dim 1.0 +808 91 model.scoring_fct_norm 2.0 +808 91 training.batch_size 2.0 +808 91 training.label_smoothing 0.042349476651676196 +808 92 model.embedding_dim 2.0 +808 92 model.scoring_fct_norm 1.0 +808 92 training.batch_size 2.0 +808 92 training.label_smoothing 0.18744177037373455 +808 93 model.embedding_dim 2.0 +808 93 model.scoring_fct_norm 2.0 +808 93 training.batch_size 0.0 +808 93 training.label_smoothing 0.0017249491854160668 +808 94 model.embedding_dim 2.0 +808 94 model.scoring_fct_norm 1.0 +808 94 training.batch_size 1.0 +808 94 training.label_smoothing 0.21931608657698276 +808 95 model.embedding_dim 2.0 +808 95 model.scoring_fct_norm 1.0 +808 95 training.batch_size 1.0 +808 95 training.label_smoothing 0.7766438956325333 +808 96 model.embedding_dim 0.0 +808 96 model.scoring_fct_norm 1.0 +808 96 training.batch_size 2.0 +808 96 training.label_smoothing 0.026102764577157708 +808 97 model.embedding_dim 0.0 +808 97 model.scoring_fct_norm 1.0 +808 97 training.batch_size 0.0 +808 97 training.label_smoothing 0.004175266865891881 +808 98 model.embedding_dim 0.0 +808 98 model.scoring_fct_norm 1.0 +808 98 training.batch_size 2.0 +808 98 training.label_smoothing 0.9478103887824281 +808 99 model.embedding_dim 2.0 +808 99 model.scoring_fct_norm 2.0 +808 99 training.batch_size 1.0 +808 99 training.label_smoothing 0.10516118651844625 +808 100 model.embedding_dim 2.0 +808 100 model.scoring_fct_norm 2.0 +808 100 training.batch_size 2.0 +808 100 training.label_smoothing 0.07128354135877021 +808 1 dataset """kinships""" +808 1 model """transe""" +808 1 loss """softplus""" +808 1 regularizer """no""" +808 1 optimizer """adadelta""" +808 1 training_loop """lcwa""" +808 1 evaluator """rankbased""" +808 2 dataset """kinships""" +808 2 model """transe""" +808 2 loss """softplus""" +808 2 regularizer """no""" +808 2 optimizer """adadelta""" +808 2 training_loop """lcwa""" +808 2 evaluator """rankbased""" +808 3 dataset """kinships""" +808 3 model """transe""" +808 3 loss """softplus""" +808 3 regularizer """no""" +808 3 optimizer """adadelta""" +808 3 training_loop """lcwa""" +808 3 evaluator """rankbased""" +808 4 dataset """kinships""" +808 4 model """transe""" +808 4 loss """softplus""" +808 4 regularizer """no""" +808 4 optimizer """adadelta""" +808 4 training_loop """lcwa""" +808 4 evaluator """rankbased""" +808 5 dataset """kinships""" +808 5 model """transe""" +808 5 loss """softplus""" +808 5 regularizer """no""" +808 5 optimizer """adadelta""" +808 5 training_loop """lcwa""" +808 5 evaluator """rankbased""" +808 6 dataset """kinships""" +808 6 model """transe""" +808 6 loss """softplus""" +808 6 regularizer """no""" +808 6 optimizer """adadelta""" +808 6 training_loop """lcwa""" +808 6 evaluator """rankbased""" +808 7 dataset """kinships""" +808 7 model """transe""" +808 7 loss """softplus""" +808 7 regularizer """no""" +808 7 optimizer """adadelta""" +808 7 training_loop """lcwa""" +808 7 evaluator """rankbased""" +808 8 dataset """kinships""" +808 8 model """transe""" +808 8 loss """softplus""" +808 8 regularizer """no""" +808 8 optimizer """adadelta""" +808 8 training_loop """lcwa""" +808 8 evaluator """rankbased""" +808 9 dataset """kinships""" +808 9 model """transe""" +808 9 loss """softplus""" +808 9 regularizer """no""" +808 9 optimizer """adadelta""" +808 9 training_loop """lcwa""" +808 9 evaluator """rankbased""" +808 10 dataset """kinships""" +808 10 model """transe""" +808 10 loss """softplus""" +808 10 regularizer """no""" +808 10 optimizer """adadelta""" +808 10 training_loop """lcwa""" +808 10 evaluator """rankbased""" +808 11 dataset """kinships""" +808 11 model """transe""" +808 11 loss """softplus""" +808 11 regularizer """no""" +808 11 optimizer """adadelta""" +808 11 training_loop """lcwa""" +808 11 evaluator """rankbased""" +808 12 dataset """kinships""" +808 12 model """transe""" +808 12 loss """softplus""" +808 12 regularizer """no""" +808 12 optimizer """adadelta""" +808 12 training_loop """lcwa""" +808 12 evaluator """rankbased""" +808 13 dataset """kinships""" +808 13 model """transe""" +808 13 loss """softplus""" +808 13 regularizer """no""" +808 13 optimizer """adadelta""" +808 13 training_loop """lcwa""" +808 13 evaluator """rankbased""" +808 14 dataset """kinships""" +808 14 model """transe""" +808 14 loss """softplus""" +808 14 regularizer """no""" +808 14 optimizer """adadelta""" +808 14 training_loop """lcwa""" +808 14 evaluator """rankbased""" +808 15 dataset """kinships""" +808 15 model """transe""" +808 15 loss """softplus""" +808 15 regularizer """no""" +808 15 optimizer """adadelta""" +808 15 training_loop """lcwa""" +808 15 evaluator """rankbased""" +808 16 dataset """kinships""" +808 16 model """transe""" +808 16 loss """softplus""" +808 16 regularizer """no""" +808 16 optimizer """adadelta""" +808 16 training_loop """lcwa""" +808 16 evaluator """rankbased""" +808 17 dataset """kinships""" +808 17 model """transe""" +808 17 loss """softplus""" +808 17 regularizer """no""" +808 17 optimizer """adadelta""" +808 17 training_loop """lcwa""" +808 17 evaluator """rankbased""" +808 18 dataset """kinships""" +808 18 model """transe""" +808 18 loss """softplus""" +808 18 regularizer """no""" +808 18 optimizer """adadelta""" +808 18 training_loop """lcwa""" +808 18 evaluator """rankbased""" +808 19 dataset """kinships""" +808 19 model """transe""" +808 19 loss """softplus""" +808 19 regularizer """no""" +808 19 optimizer """adadelta""" +808 19 training_loop """lcwa""" +808 19 evaluator """rankbased""" +808 20 dataset """kinships""" +808 20 model """transe""" +808 20 loss """softplus""" +808 20 regularizer """no""" +808 20 optimizer """adadelta""" +808 20 training_loop """lcwa""" +808 20 evaluator """rankbased""" +808 21 dataset """kinships""" +808 21 model """transe""" +808 21 loss """softplus""" +808 21 regularizer """no""" +808 21 optimizer """adadelta""" +808 21 training_loop """lcwa""" +808 21 evaluator """rankbased""" +808 22 dataset """kinships""" +808 22 model """transe""" +808 22 loss """softplus""" +808 22 regularizer """no""" +808 22 optimizer """adadelta""" +808 22 training_loop """lcwa""" +808 22 evaluator """rankbased""" +808 23 dataset """kinships""" +808 23 model """transe""" +808 23 loss """softplus""" +808 23 regularizer """no""" +808 23 optimizer """adadelta""" +808 23 training_loop """lcwa""" +808 23 evaluator """rankbased""" +808 24 dataset """kinships""" +808 24 model """transe""" +808 24 loss """softplus""" +808 24 regularizer """no""" +808 24 optimizer """adadelta""" +808 24 training_loop """lcwa""" +808 24 evaluator """rankbased""" +808 25 dataset """kinships""" +808 25 model """transe""" +808 25 loss """softplus""" +808 25 regularizer """no""" +808 25 optimizer """adadelta""" +808 25 training_loop """lcwa""" +808 25 evaluator """rankbased""" +808 26 dataset """kinships""" +808 26 model """transe""" +808 26 loss """softplus""" +808 26 regularizer """no""" +808 26 optimizer """adadelta""" +808 26 training_loop """lcwa""" +808 26 evaluator """rankbased""" +808 27 dataset """kinships""" +808 27 model """transe""" +808 27 loss """softplus""" +808 27 regularizer """no""" +808 27 optimizer """adadelta""" +808 27 training_loop """lcwa""" +808 27 evaluator """rankbased""" +808 28 dataset """kinships""" +808 28 model """transe""" +808 28 loss """softplus""" +808 28 regularizer """no""" +808 28 optimizer """adadelta""" +808 28 training_loop """lcwa""" +808 28 evaluator """rankbased""" +808 29 dataset """kinships""" +808 29 model """transe""" +808 29 loss """softplus""" +808 29 regularizer """no""" +808 29 optimizer """adadelta""" +808 29 training_loop """lcwa""" +808 29 evaluator """rankbased""" +808 30 dataset """kinships""" +808 30 model """transe""" +808 30 loss """softplus""" +808 30 regularizer """no""" +808 30 optimizer """adadelta""" +808 30 training_loop """lcwa""" +808 30 evaluator """rankbased""" +808 31 dataset """kinships""" +808 31 model """transe""" +808 31 loss """softplus""" +808 31 regularizer """no""" +808 31 optimizer """adadelta""" +808 31 training_loop """lcwa""" +808 31 evaluator """rankbased""" +808 32 dataset """kinships""" +808 32 model """transe""" +808 32 loss """softplus""" +808 32 regularizer """no""" +808 32 optimizer """adadelta""" +808 32 training_loop """lcwa""" +808 32 evaluator """rankbased""" +808 33 dataset """kinships""" +808 33 model """transe""" +808 33 loss """softplus""" +808 33 regularizer """no""" +808 33 optimizer """adadelta""" +808 33 training_loop """lcwa""" +808 33 evaluator """rankbased""" +808 34 dataset """kinships""" +808 34 model """transe""" +808 34 loss """softplus""" +808 34 regularizer """no""" +808 34 optimizer """adadelta""" +808 34 training_loop """lcwa""" +808 34 evaluator """rankbased""" +808 35 dataset """kinships""" +808 35 model """transe""" +808 35 loss """softplus""" +808 35 regularizer """no""" +808 35 optimizer """adadelta""" +808 35 training_loop """lcwa""" +808 35 evaluator """rankbased""" +808 36 dataset """kinships""" +808 36 model """transe""" +808 36 loss """softplus""" +808 36 regularizer """no""" +808 36 optimizer """adadelta""" +808 36 training_loop """lcwa""" +808 36 evaluator """rankbased""" +808 37 dataset """kinships""" +808 37 model """transe""" +808 37 loss """softplus""" +808 37 regularizer """no""" +808 37 optimizer """adadelta""" +808 37 training_loop """lcwa""" +808 37 evaluator """rankbased""" +808 38 dataset """kinships""" +808 38 model """transe""" +808 38 loss """softplus""" +808 38 regularizer """no""" +808 38 optimizer """adadelta""" +808 38 training_loop """lcwa""" +808 38 evaluator """rankbased""" +808 39 dataset """kinships""" +808 39 model """transe""" +808 39 loss """softplus""" +808 39 regularizer """no""" +808 39 optimizer """adadelta""" +808 39 training_loop """lcwa""" +808 39 evaluator """rankbased""" +808 40 dataset """kinships""" +808 40 model """transe""" +808 40 loss """softplus""" +808 40 regularizer """no""" +808 40 optimizer """adadelta""" +808 40 training_loop """lcwa""" +808 40 evaluator """rankbased""" +808 41 dataset """kinships""" +808 41 model """transe""" +808 41 loss """softplus""" +808 41 regularizer """no""" +808 41 optimizer """adadelta""" +808 41 training_loop """lcwa""" +808 41 evaluator """rankbased""" +808 42 dataset """kinships""" +808 42 model """transe""" +808 42 loss """softplus""" +808 42 regularizer """no""" +808 42 optimizer """adadelta""" +808 42 training_loop """lcwa""" +808 42 evaluator """rankbased""" +808 43 dataset """kinships""" +808 43 model """transe""" +808 43 loss """softplus""" +808 43 regularizer """no""" +808 43 optimizer """adadelta""" +808 43 training_loop """lcwa""" +808 43 evaluator """rankbased""" +808 44 dataset """kinships""" +808 44 model """transe""" +808 44 loss """softplus""" +808 44 regularizer """no""" +808 44 optimizer """adadelta""" +808 44 training_loop """lcwa""" +808 44 evaluator """rankbased""" +808 45 dataset """kinships""" +808 45 model """transe""" +808 45 loss """softplus""" +808 45 regularizer """no""" +808 45 optimizer """adadelta""" +808 45 training_loop """lcwa""" +808 45 evaluator """rankbased""" +808 46 dataset """kinships""" +808 46 model """transe""" +808 46 loss """softplus""" +808 46 regularizer """no""" +808 46 optimizer """adadelta""" +808 46 training_loop """lcwa""" +808 46 evaluator """rankbased""" +808 47 dataset """kinships""" +808 47 model """transe""" +808 47 loss """softplus""" +808 47 regularizer """no""" +808 47 optimizer """adadelta""" +808 47 training_loop """lcwa""" +808 47 evaluator """rankbased""" +808 48 dataset """kinships""" +808 48 model """transe""" +808 48 loss """softplus""" +808 48 regularizer """no""" +808 48 optimizer """adadelta""" +808 48 training_loop """lcwa""" +808 48 evaluator """rankbased""" +808 49 dataset """kinships""" +808 49 model """transe""" +808 49 loss """softplus""" +808 49 regularizer """no""" +808 49 optimizer """adadelta""" +808 49 training_loop """lcwa""" +808 49 evaluator """rankbased""" +808 50 dataset """kinships""" +808 50 model """transe""" +808 50 loss """softplus""" +808 50 regularizer """no""" +808 50 optimizer """adadelta""" +808 50 training_loop """lcwa""" +808 50 evaluator """rankbased""" +808 51 dataset """kinships""" +808 51 model """transe""" +808 51 loss """softplus""" +808 51 regularizer """no""" +808 51 optimizer """adadelta""" +808 51 training_loop """lcwa""" +808 51 evaluator """rankbased""" +808 52 dataset """kinships""" +808 52 model """transe""" +808 52 loss """softplus""" +808 52 regularizer """no""" +808 52 optimizer """adadelta""" +808 52 training_loop """lcwa""" +808 52 evaluator """rankbased""" +808 53 dataset """kinships""" +808 53 model """transe""" +808 53 loss """softplus""" +808 53 regularizer """no""" +808 53 optimizer """adadelta""" +808 53 training_loop """lcwa""" +808 53 evaluator """rankbased""" +808 54 dataset """kinships""" +808 54 model """transe""" +808 54 loss """softplus""" +808 54 regularizer """no""" +808 54 optimizer """adadelta""" +808 54 training_loop """lcwa""" +808 54 evaluator """rankbased""" +808 55 dataset """kinships""" +808 55 model """transe""" +808 55 loss """softplus""" +808 55 regularizer """no""" +808 55 optimizer """adadelta""" +808 55 training_loop """lcwa""" +808 55 evaluator """rankbased""" +808 56 dataset """kinships""" +808 56 model """transe""" +808 56 loss """softplus""" +808 56 regularizer """no""" +808 56 optimizer """adadelta""" +808 56 training_loop """lcwa""" +808 56 evaluator """rankbased""" +808 57 dataset """kinships""" +808 57 model """transe""" +808 57 loss """softplus""" +808 57 regularizer """no""" +808 57 optimizer """adadelta""" +808 57 training_loop """lcwa""" +808 57 evaluator """rankbased""" +808 58 dataset """kinships""" +808 58 model """transe""" +808 58 loss """softplus""" +808 58 regularizer """no""" +808 58 optimizer """adadelta""" +808 58 training_loop """lcwa""" +808 58 evaluator """rankbased""" +808 59 dataset """kinships""" +808 59 model """transe""" +808 59 loss """softplus""" +808 59 regularizer """no""" +808 59 optimizer """adadelta""" +808 59 training_loop """lcwa""" +808 59 evaluator """rankbased""" +808 60 dataset """kinships""" +808 60 model """transe""" +808 60 loss """softplus""" +808 60 regularizer """no""" +808 60 optimizer """adadelta""" +808 60 training_loop """lcwa""" +808 60 evaluator """rankbased""" +808 61 dataset """kinships""" +808 61 model """transe""" +808 61 loss """softplus""" +808 61 regularizer """no""" +808 61 optimizer """adadelta""" +808 61 training_loop """lcwa""" +808 61 evaluator """rankbased""" +808 62 dataset """kinships""" +808 62 model """transe""" +808 62 loss """softplus""" +808 62 regularizer """no""" +808 62 optimizer """adadelta""" +808 62 training_loop """lcwa""" +808 62 evaluator """rankbased""" +808 63 dataset """kinships""" +808 63 model """transe""" +808 63 loss """softplus""" +808 63 regularizer """no""" +808 63 optimizer """adadelta""" +808 63 training_loop """lcwa""" +808 63 evaluator """rankbased""" +808 64 dataset """kinships""" +808 64 model """transe""" +808 64 loss """softplus""" +808 64 regularizer """no""" +808 64 optimizer """adadelta""" +808 64 training_loop """lcwa""" +808 64 evaluator """rankbased""" +808 65 dataset """kinships""" +808 65 model """transe""" +808 65 loss """softplus""" +808 65 regularizer """no""" +808 65 optimizer """adadelta""" +808 65 training_loop """lcwa""" +808 65 evaluator """rankbased""" +808 66 dataset """kinships""" +808 66 model """transe""" +808 66 loss """softplus""" +808 66 regularizer """no""" +808 66 optimizer """adadelta""" +808 66 training_loop """lcwa""" +808 66 evaluator """rankbased""" +808 67 dataset """kinships""" +808 67 model """transe""" +808 67 loss """softplus""" +808 67 regularizer """no""" +808 67 optimizer """adadelta""" +808 67 training_loop """lcwa""" +808 67 evaluator """rankbased""" +808 68 dataset """kinships""" +808 68 model """transe""" +808 68 loss """softplus""" +808 68 regularizer """no""" +808 68 optimizer """adadelta""" +808 68 training_loop """lcwa""" +808 68 evaluator """rankbased""" +808 69 dataset """kinships""" +808 69 model """transe""" +808 69 loss """softplus""" +808 69 regularizer """no""" +808 69 optimizer """adadelta""" +808 69 training_loop """lcwa""" +808 69 evaluator """rankbased""" +808 70 dataset """kinships""" +808 70 model """transe""" +808 70 loss """softplus""" +808 70 regularizer """no""" +808 70 optimizer """adadelta""" +808 70 training_loop """lcwa""" +808 70 evaluator """rankbased""" +808 71 dataset """kinships""" +808 71 model """transe""" +808 71 loss """softplus""" +808 71 regularizer """no""" +808 71 optimizer """adadelta""" +808 71 training_loop """lcwa""" +808 71 evaluator """rankbased""" +808 72 dataset """kinships""" +808 72 model """transe""" +808 72 loss """softplus""" +808 72 regularizer """no""" +808 72 optimizer """adadelta""" +808 72 training_loop """lcwa""" +808 72 evaluator """rankbased""" +808 73 dataset """kinships""" +808 73 model """transe""" +808 73 loss """softplus""" +808 73 regularizer """no""" +808 73 optimizer """adadelta""" +808 73 training_loop """lcwa""" +808 73 evaluator """rankbased""" +808 74 dataset """kinships""" +808 74 model """transe""" +808 74 loss """softplus""" +808 74 regularizer """no""" +808 74 optimizer """adadelta""" +808 74 training_loop """lcwa""" +808 74 evaluator """rankbased""" +808 75 dataset """kinships""" +808 75 model """transe""" +808 75 loss """softplus""" +808 75 regularizer """no""" +808 75 optimizer """adadelta""" +808 75 training_loop """lcwa""" +808 75 evaluator """rankbased""" +808 76 dataset """kinships""" +808 76 model """transe""" +808 76 loss """softplus""" +808 76 regularizer """no""" +808 76 optimizer """adadelta""" +808 76 training_loop """lcwa""" +808 76 evaluator """rankbased""" +808 77 dataset """kinships""" +808 77 model """transe""" +808 77 loss """softplus""" +808 77 regularizer """no""" +808 77 optimizer """adadelta""" +808 77 training_loop """lcwa""" +808 77 evaluator """rankbased""" +808 78 dataset """kinships""" +808 78 model """transe""" +808 78 loss """softplus""" +808 78 regularizer """no""" +808 78 optimizer """adadelta""" +808 78 training_loop """lcwa""" +808 78 evaluator """rankbased""" +808 79 dataset """kinships""" +808 79 model """transe""" +808 79 loss """softplus""" +808 79 regularizer """no""" +808 79 optimizer """adadelta""" +808 79 training_loop """lcwa""" +808 79 evaluator """rankbased""" +808 80 dataset """kinships""" +808 80 model """transe""" +808 80 loss """softplus""" +808 80 regularizer """no""" +808 80 optimizer """adadelta""" +808 80 training_loop """lcwa""" +808 80 evaluator """rankbased""" +808 81 dataset """kinships""" +808 81 model """transe""" +808 81 loss """softplus""" +808 81 regularizer """no""" +808 81 optimizer """adadelta""" +808 81 training_loop """lcwa""" +808 81 evaluator """rankbased""" +808 82 dataset """kinships""" +808 82 model """transe""" +808 82 loss """softplus""" +808 82 regularizer """no""" +808 82 optimizer """adadelta""" +808 82 training_loop """lcwa""" +808 82 evaluator """rankbased""" +808 83 dataset """kinships""" +808 83 model """transe""" +808 83 loss """softplus""" +808 83 regularizer """no""" +808 83 optimizer """adadelta""" +808 83 training_loop """lcwa""" +808 83 evaluator """rankbased""" +808 84 dataset """kinships""" +808 84 model """transe""" +808 84 loss """softplus""" +808 84 regularizer """no""" +808 84 optimizer """adadelta""" +808 84 training_loop """lcwa""" +808 84 evaluator """rankbased""" +808 85 dataset """kinships""" +808 85 model """transe""" +808 85 loss """softplus""" +808 85 regularizer """no""" +808 85 optimizer """adadelta""" +808 85 training_loop """lcwa""" +808 85 evaluator """rankbased""" +808 86 dataset """kinships""" +808 86 model """transe""" +808 86 loss """softplus""" +808 86 regularizer """no""" +808 86 optimizer """adadelta""" +808 86 training_loop """lcwa""" +808 86 evaluator """rankbased""" +808 87 dataset """kinships""" +808 87 model """transe""" +808 87 loss """softplus""" +808 87 regularizer """no""" +808 87 optimizer """adadelta""" +808 87 training_loop """lcwa""" +808 87 evaluator """rankbased""" +808 88 dataset """kinships""" +808 88 model """transe""" +808 88 loss """softplus""" +808 88 regularizer """no""" +808 88 optimizer """adadelta""" +808 88 training_loop """lcwa""" +808 88 evaluator """rankbased""" +808 89 dataset """kinships""" +808 89 model """transe""" +808 89 loss """softplus""" +808 89 regularizer """no""" +808 89 optimizer """adadelta""" +808 89 training_loop """lcwa""" +808 89 evaluator """rankbased""" +808 90 dataset """kinships""" +808 90 model """transe""" +808 90 loss """softplus""" +808 90 regularizer """no""" +808 90 optimizer """adadelta""" +808 90 training_loop """lcwa""" +808 90 evaluator """rankbased""" +808 91 dataset """kinships""" +808 91 model """transe""" +808 91 loss """softplus""" +808 91 regularizer """no""" +808 91 optimizer """adadelta""" +808 91 training_loop """lcwa""" +808 91 evaluator """rankbased""" +808 92 dataset """kinships""" +808 92 model """transe""" +808 92 loss """softplus""" +808 92 regularizer """no""" +808 92 optimizer """adadelta""" +808 92 training_loop """lcwa""" +808 92 evaluator """rankbased""" +808 93 dataset """kinships""" +808 93 model """transe""" +808 93 loss """softplus""" +808 93 regularizer """no""" +808 93 optimizer """adadelta""" +808 93 training_loop """lcwa""" +808 93 evaluator """rankbased""" +808 94 dataset """kinships""" +808 94 model """transe""" +808 94 loss """softplus""" +808 94 regularizer """no""" +808 94 optimizer """adadelta""" +808 94 training_loop """lcwa""" +808 94 evaluator """rankbased""" +808 95 dataset """kinships""" +808 95 model """transe""" +808 95 loss """softplus""" +808 95 regularizer """no""" +808 95 optimizer """adadelta""" +808 95 training_loop """lcwa""" +808 95 evaluator """rankbased""" +808 96 dataset """kinships""" +808 96 model """transe""" +808 96 loss """softplus""" +808 96 regularizer """no""" +808 96 optimizer """adadelta""" +808 96 training_loop """lcwa""" +808 96 evaluator """rankbased""" +808 97 dataset """kinships""" +808 97 model """transe""" +808 97 loss """softplus""" +808 97 regularizer """no""" +808 97 optimizer """adadelta""" +808 97 training_loop """lcwa""" +808 97 evaluator """rankbased""" +808 98 dataset """kinships""" +808 98 model """transe""" +808 98 loss """softplus""" +808 98 regularizer """no""" +808 98 optimizer """adadelta""" +808 98 training_loop """lcwa""" +808 98 evaluator """rankbased""" +808 99 dataset """kinships""" +808 99 model """transe""" +808 99 loss """softplus""" +808 99 regularizer """no""" +808 99 optimizer """adadelta""" +808 99 training_loop """lcwa""" +808 99 evaluator """rankbased""" +808 100 dataset """kinships""" +808 100 model """transe""" +808 100 loss """softplus""" +808 100 regularizer """no""" +808 100 optimizer """adadelta""" +808 100 training_loop """lcwa""" +808 100 evaluator """rankbased""" +809 1 model.embedding_dim 0.0 +809 1 model.scoring_fct_norm 2.0 +809 1 training.batch_size 1.0 +809 1 training.label_smoothing 0.08658278862397681 +809 2 model.embedding_dim 0.0 +809 2 model.scoring_fct_norm 2.0 +809 2 training.batch_size 1.0 +809 2 training.label_smoothing 0.4407556285551389 +809 3 model.embedding_dim 2.0 +809 3 model.scoring_fct_norm 1.0 +809 3 training.batch_size 0.0 +809 3 training.label_smoothing 0.00824364756924427 +809 4 model.embedding_dim 0.0 +809 4 model.scoring_fct_norm 2.0 +809 4 training.batch_size 0.0 +809 4 training.label_smoothing 0.7962934483695586 +809 5 model.embedding_dim 2.0 +809 5 model.scoring_fct_norm 2.0 +809 5 training.batch_size 0.0 +809 5 training.label_smoothing 0.05171138307251873 +809 6 model.embedding_dim 2.0 +809 6 model.scoring_fct_norm 1.0 +809 6 training.batch_size 0.0 +809 6 training.label_smoothing 0.7227308122316711 +809 7 model.embedding_dim 1.0 +809 7 model.scoring_fct_norm 2.0 +809 7 training.batch_size 1.0 +809 7 training.label_smoothing 0.7525004214016613 +809 8 model.embedding_dim 2.0 +809 8 model.scoring_fct_norm 2.0 +809 8 training.batch_size 1.0 +809 8 training.label_smoothing 0.25718701904315555 +809 9 model.embedding_dim 2.0 +809 9 model.scoring_fct_norm 2.0 +809 9 training.batch_size 1.0 +809 9 training.label_smoothing 0.018011688735990985 +809 10 model.embedding_dim 0.0 +809 10 model.scoring_fct_norm 1.0 +809 10 training.batch_size 0.0 +809 10 training.label_smoothing 0.7297172471433439 +809 11 model.embedding_dim 1.0 +809 11 model.scoring_fct_norm 2.0 +809 11 training.batch_size 1.0 +809 11 training.label_smoothing 0.004492094626875937 +809 12 model.embedding_dim 1.0 +809 12 model.scoring_fct_norm 1.0 +809 12 training.batch_size 1.0 +809 12 training.label_smoothing 0.008506644048052396 +809 13 model.embedding_dim 0.0 +809 13 model.scoring_fct_norm 1.0 +809 13 training.batch_size 1.0 +809 13 training.label_smoothing 0.2534528925476645 +809 14 model.embedding_dim 1.0 +809 14 model.scoring_fct_norm 2.0 +809 14 training.batch_size 2.0 +809 14 training.label_smoothing 0.29790994282509947 +809 15 model.embedding_dim 0.0 +809 15 model.scoring_fct_norm 2.0 +809 15 training.batch_size 2.0 +809 15 training.label_smoothing 0.0013076351199878401 +809 16 model.embedding_dim 2.0 +809 16 model.scoring_fct_norm 2.0 +809 16 training.batch_size 2.0 +809 16 training.label_smoothing 0.14202983423981164 +809 17 model.embedding_dim 2.0 +809 17 model.scoring_fct_norm 1.0 +809 17 training.batch_size 1.0 +809 17 training.label_smoothing 0.3658939559200047 +809 18 model.embedding_dim 0.0 +809 18 model.scoring_fct_norm 1.0 +809 18 training.batch_size 2.0 +809 18 training.label_smoothing 0.172698740032436 +809 19 model.embedding_dim 0.0 +809 19 model.scoring_fct_norm 2.0 +809 19 training.batch_size 1.0 +809 19 training.label_smoothing 0.3935085283567949 +809 20 model.embedding_dim 0.0 +809 20 model.scoring_fct_norm 1.0 +809 20 training.batch_size 1.0 +809 20 training.label_smoothing 0.04888504335865334 +809 21 model.embedding_dim 0.0 +809 21 model.scoring_fct_norm 2.0 +809 21 training.batch_size 1.0 +809 21 training.label_smoothing 0.0111130436432956 +809 22 model.embedding_dim 2.0 +809 22 model.scoring_fct_norm 1.0 +809 22 training.batch_size 0.0 +809 22 training.label_smoothing 0.3221135618583535 +809 23 model.embedding_dim 0.0 +809 23 model.scoring_fct_norm 1.0 +809 23 training.batch_size 1.0 +809 23 training.label_smoothing 0.050284332734080534 +809 24 model.embedding_dim 1.0 +809 24 model.scoring_fct_norm 1.0 +809 24 training.batch_size 0.0 +809 24 training.label_smoothing 0.03768416581794943 +809 25 model.embedding_dim 2.0 +809 25 model.scoring_fct_norm 1.0 +809 25 training.batch_size 0.0 +809 25 training.label_smoothing 0.03309139033050449 +809 26 model.embedding_dim 2.0 +809 26 model.scoring_fct_norm 1.0 +809 26 training.batch_size 0.0 +809 26 training.label_smoothing 0.7883064028266159 +809 27 model.embedding_dim 1.0 +809 27 model.scoring_fct_norm 1.0 +809 27 training.batch_size 1.0 +809 27 training.label_smoothing 0.05568923943318721 +809 28 model.embedding_dim 2.0 +809 28 model.scoring_fct_norm 1.0 +809 28 training.batch_size 1.0 +809 28 training.label_smoothing 0.009237422075865292 +809 29 model.embedding_dim 2.0 +809 29 model.scoring_fct_norm 2.0 +809 29 training.batch_size 2.0 +809 29 training.label_smoothing 0.04164986252942005 +809 30 model.embedding_dim 1.0 +809 30 model.scoring_fct_norm 2.0 +809 30 training.batch_size 0.0 +809 30 training.label_smoothing 0.21896395286264042 +809 31 model.embedding_dim 1.0 +809 31 model.scoring_fct_norm 1.0 +809 31 training.batch_size 0.0 +809 31 training.label_smoothing 0.8571441807824663 +809 32 model.embedding_dim 0.0 +809 32 model.scoring_fct_norm 2.0 +809 32 training.batch_size 0.0 +809 32 training.label_smoothing 0.18936687094662094 +809 33 model.embedding_dim 0.0 +809 33 model.scoring_fct_norm 1.0 +809 33 training.batch_size 1.0 +809 33 training.label_smoothing 0.00113942817390018 +809 34 model.embedding_dim 1.0 +809 34 model.scoring_fct_norm 1.0 +809 34 training.batch_size 0.0 +809 34 training.label_smoothing 0.010950424699797567 +809 35 model.embedding_dim 2.0 +809 35 model.scoring_fct_norm 1.0 +809 35 training.batch_size 0.0 +809 35 training.label_smoothing 0.038141529186638386 +809 36 model.embedding_dim 0.0 +809 36 model.scoring_fct_norm 2.0 +809 36 training.batch_size 0.0 +809 36 training.label_smoothing 0.616554962205647 +809 37 model.embedding_dim 0.0 +809 37 model.scoring_fct_norm 1.0 +809 37 training.batch_size 1.0 +809 37 training.label_smoothing 0.002500282171005476 +809 38 model.embedding_dim 2.0 +809 38 model.scoring_fct_norm 1.0 +809 38 training.batch_size 1.0 +809 38 training.label_smoothing 0.047766415065591185 +809 39 model.embedding_dim 1.0 +809 39 model.scoring_fct_norm 2.0 +809 39 training.batch_size 1.0 +809 39 training.label_smoothing 0.3265745787883297 +809 40 model.embedding_dim 1.0 +809 40 model.scoring_fct_norm 1.0 +809 40 training.batch_size 2.0 +809 40 training.label_smoothing 0.09887197904514466 +809 41 model.embedding_dim 1.0 +809 41 model.scoring_fct_norm 1.0 +809 41 training.batch_size 0.0 +809 41 training.label_smoothing 0.12406738450074409 +809 42 model.embedding_dim 1.0 +809 42 model.scoring_fct_norm 2.0 +809 42 training.batch_size 1.0 +809 42 training.label_smoothing 0.09045721488814405 +809 43 model.embedding_dim 2.0 +809 43 model.scoring_fct_norm 1.0 +809 43 training.batch_size 1.0 +809 43 training.label_smoothing 0.00939049101716577 +809 44 model.embedding_dim 0.0 +809 44 model.scoring_fct_norm 2.0 +809 44 training.batch_size 1.0 +809 44 training.label_smoothing 0.014746567423471942 +809 45 model.embedding_dim 0.0 +809 45 model.scoring_fct_norm 2.0 +809 45 training.batch_size 1.0 +809 45 training.label_smoothing 0.012592836444726942 +809 46 model.embedding_dim 2.0 +809 46 model.scoring_fct_norm 2.0 +809 46 training.batch_size 1.0 +809 46 training.label_smoothing 0.02034921894622596 +809 47 model.embedding_dim 2.0 +809 47 model.scoring_fct_norm 2.0 +809 47 training.batch_size 0.0 +809 47 training.label_smoothing 0.40444151312614873 +809 48 model.embedding_dim 1.0 +809 48 model.scoring_fct_norm 1.0 +809 48 training.batch_size 1.0 +809 48 training.label_smoothing 0.0010029701079975982 +809 49 model.embedding_dim 1.0 +809 49 model.scoring_fct_norm 2.0 +809 49 training.batch_size 2.0 +809 49 training.label_smoothing 0.057605647714264914 +809 50 model.embedding_dim 0.0 +809 50 model.scoring_fct_norm 2.0 +809 50 training.batch_size 2.0 +809 50 training.label_smoothing 0.05799807075989874 +809 51 model.embedding_dim 1.0 +809 51 model.scoring_fct_norm 2.0 +809 51 training.batch_size 2.0 +809 51 training.label_smoothing 0.6987996571161729 +809 52 model.embedding_dim 2.0 +809 52 model.scoring_fct_norm 2.0 +809 52 training.batch_size 1.0 +809 52 training.label_smoothing 0.0056874734126458725 +809 53 model.embedding_dim 1.0 +809 53 model.scoring_fct_norm 2.0 +809 53 training.batch_size 2.0 +809 53 training.label_smoothing 0.1776074712409769 +809 54 model.embedding_dim 0.0 +809 54 model.scoring_fct_norm 1.0 +809 54 training.batch_size 1.0 +809 54 training.label_smoothing 0.08019045635824688 +809 55 model.embedding_dim 1.0 +809 55 model.scoring_fct_norm 2.0 +809 55 training.batch_size 1.0 +809 55 training.label_smoothing 0.13381904463105684 +809 56 model.embedding_dim 2.0 +809 56 model.scoring_fct_norm 1.0 +809 56 training.batch_size 2.0 +809 56 training.label_smoothing 0.05709521124254935 +809 57 model.embedding_dim 2.0 +809 57 model.scoring_fct_norm 2.0 +809 57 training.batch_size 1.0 +809 57 training.label_smoothing 0.9682241785955341 +809 58 model.embedding_dim 2.0 +809 58 model.scoring_fct_norm 1.0 +809 58 training.batch_size 0.0 +809 58 training.label_smoothing 0.01434999790038188 +809 59 model.embedding_dim 1.0 +809 59 model.scoring_fct_norm 1.0 +809 59 training.batch_size 2.0 +809 59 training.label_smoothing 0.10756286695643923 +809 60 model.embedding_dim 2.0 +809 60 model.scoring_fct_norm 2.0 +809 60 training.batch_size 2.0 +809 60 training.label_smoothing 0.9015973443566392 +809 61 model.embedding_dim 1.0 +809 61 model.scoring_fct_norm 1.0 +809 61 training.batch_size 1.0 +809 61 training.label_smoothing 0.0023129705619900592 +809 62 model.embedding_dim 1.0 +809 62 model.scoring_fct_norm 2.0 +809 62 training.batch_size 2.0 +809 62 training.label_smoothing 0.001650359821706071 +809 63 model.embedding_dim 0.0 +809 63 model.scoring_fct_norm 2.0 +809 63 training.batch_size 1.0 +809 63 training.label_smoothing 0.7992798264107634 +809 64 model.embedding_dim 0.0 +809 64 model.scoring_fct_norm 2.0 +809 64 training.batch_size 2.0 +809 64 training.label_smoothing 0.00808262768016314 +809 65 model.embedding_dim 2.0 +809 65 model.scoring_fct_norm 2.0 +809 65 training.batch_size 1.0 +809 65 training.label_smoothing 0.009288496485000904 +809 66 model.embedding_dim 2.0 +809 66 model.scoring_fct_norm 2.0 +809 66 training.batch_size 1.0 +809 66 training.label_smoothing 0.06870530129591312 +809 67 model.embedding_dim 1.0 +809 67 model.scoring_fct_norm 2.0 +809 67 training.batch_size 2.0 +809 67 training.label_smoothing 0.011438635056776364 +809 68 model.embedding_dim 2.0 +809 68 model.scoring_fct_norm 2.0 +809 68 training.batch_size 2.0 +809 68 training.label_smoothing 0.0038276003395246623 +809 69 model.embedding_dim 1.0 +809 69 model.scoring_fct_norm 2.0 +809 69 training.batch_size 1.0 +809 69 training.label_smoothing 0.3429986150283974 +809 70 model.embedding_dim 1.0 +809 70 model.scoring_fct_norm 2.0 +809 70 training.batch_size 0.0 +809 70 training.label_smoothing 0.2327162795492455 +809 71 model.embedding_dim 1.0 +809 71 model.scoring_fct_norm 1.0 +809 71 training.batch_size 2.0 +809 71 training.label_smoothing 0.003747899511724432 +809 72 model.embedding_dim 0.0 +809 72 model.scoring_fct_norm 2.0 +809 72 training.batch_size 0.0 +809 72 training.label_smoothing 0.02157942063732315 +809 73 model.embedding_dim 2.0 +809 73 model.scoring_fct_norm 1.0 +809 73 training.batch_size 1.0 +809 73 training.label_smoothing 0.9201587911876172 +809 74 model.embedding_dim 1.0 +809 74 model.scoring_fct_norm 1.0 +809 74 training.batch_size 1.0 +809 74 training.label_smoothing 0.014385423856984304 +809 75 model.embedding_dim 0.0 +809 75 model.scoring_fct_norm 2.0 +809 75 training.batch_size 0.0 +809 75 training.label_smoothing 0.02626005338329638 +809 76 model.embedding_dim 0.0 +809 76 model.scoring_fct_norm 2.0 +809 76 training.batch_size 1.0 +809 76 training.label_smoothing 0.011243536139195624 +809 77 model.embedding_dim 1.0 +809 77 model.scoring_fct_norm 2.0 +809 77 training.batch_size 0.0 +809 77 training.label_smoothing 0.04030151336268814 +809 78 model.embedding_dim 2.0 +809 78 model.scoring_fct_norm 1.0 +809 78 training.batch_size 1.0 +809 78 training.label_smoothing 0.02867358080374974 +809 79 model.embedding_dim 2.0 +809 79 model.scoring_fct_norm 1.0 +809 79 training.batch_size 0.0 +809 79 training.label_smoothing 0.0016364842100459868 +809 80 model.embedding_dim 2.0 +809 80 model.scoring_fct_norm 1.0 +809 80 training.batch_size 0.0 +809 80 training.label_smoothing 0.9313987044579384 +809 81 model.embedding_dim 2.0 +809 81 model.scoring_fct_norm 1.0 +809 81 training.batch_size 0.0 +809 81 training.label_smoothing 0.2516072231915892 +809 82 model.embedding_dim 1.0 +809 82 model.scoring_fct_norm 2.0 +809 82 training.batch_size 1.0 +809 82 training.label_smoothing 0.07268658686826544 +809 83 model.embedding_dim 2.0 +809 83 model.scoring_fct_norm 1.0 +809 83 training.batch_size 2.0 +809 83 training.label_smoothing 0.08477950334147294 +809 84 model.embedding_dim 2.0 +809 84 model.scoring_fct_norm 1.0 +809 84 training.batch_size 2.0 +809 84 training.label_smoothing 0.0024116028694297085 +809 85 model.embedding_dim 2.0 +809 85 model.scoring_fct_norm 2.0 +809 85 training.batch_size 1.0 +809 85 training.label_smoothing 0.0032419872843228686 +809 86 model.embedding_dim 2.0 +809 86 model.scoring_fct_norm 1.0 +809 86 training.batch_size 0.0 +809 86 training.label_smoothing 0.8741708940896777 +809 87 model.embedding_dim 0.0 +809 87 model.scoring_fct_norm 1.0 +809 87 training.batch_size 0.0 +809 87 training.label_smoothing 0.22794295981517207 +809 88 model.embedding_dim 2.0 +809 88 model.scoring_fct_norm 2.0 +809 88 training.batch_size 1.0 +809 88 training.label_smoothing 0.005956127172697486 +809 89 model.embedding_dim 0.0 +809 89 model.scoring_fct_norm 1.0 +809 89 training.batch_size 0.0 +809 89 training.label_smoothing 0.020899461646409687 +809 90 model.embedding_dim 0.0 +809 90 model.scoring_fct_norm 1.0 +809 90 training.batch_size 2.0 +809 90 training.label_smoothing 0.07025154720740141 +809 91 model.embedding_dim 2.0 +809 91 model.scoring_fct_norm 2.0 +809 91 training.batch_size 0.0 +809 91 training.label_smoothing 0.23224672714696493 +809 92 model.embedding_dim 0.0 +809 92 model.scoring_fct_norm 1.0 +809 92 training.batch_size 1.0 +809 92 training.label_smoothing 0.1804210629682932 +809 93 model.embedding_dim 0.0 +809 93 model.scoring_fct_norm 2.0 +809 93 training.batch_size 2.0 +809 93 training.label_smoothing 0.06927978462210907 +809 94 model.embedding_dim 1.0 +809 94 model.scoring_fct_norm 2.0 +809 94 training.batch_size 0.0 +809 94 training.label_smoothing 0.7777206051141807 +809 95 model.embedding_dim 2.0 +809 95 model.scoring_fct_norm 2.0 +809 95 training.batch_size 1.0 +809 95 training.label_smoothing 0.0033923769630414464 +809 96 model.embedding_dim 0.0 +809 96 model.scoring_fct_norm 2.0 +809 96 training.batch_size 2.0 +809 96 training.label_smoothing 0.10807568919591586 +809 97 model.embedding_dim 1.0 +809 97 model.scoring_fct_norm 1.0 +809 97 training.batch_size 1.0 +809 97 training.label_smoothing 0.24625562268540613 +809 98 model.embedding_dim 0.0 +809 98 model.scoring_fct_norm 1.0 +809 98 training.batch_size 1.0 +809 98 training.label_smoothing 0.7887113062841365 +809 99 model.embedding_dim 0.0 +809 99 model.scoring_fct_norm 2.0 +809 99 training.batch_size 0.0 +809 99 training.label_smoothing 0.003192857325799549 +809 100 model.embedding_dim 1.0 +809 100 model.scoring_fct_norm 2.0 +809 100 training.batch_size 0.0 +809 100 training.label_smoothing 0.21404805899204268 +809 1 dataset """kinships""" +809 1 model """transe""" +809 1 loss """crossentropy""" +809 1 regularizer """no""" +809 1 optimizer """adadelta""" +809 1 training_loop """lcwa""" +809 1 evaluator """rankbased""" +809 2 dataset """kinships""" +809 2 model """transe""" +809 2 loss """crossentropy""" +809 2 regularizer """no""" +809 2 optimizer """adadelta""" +809 2 training_loop """lcwa""" +809 2 evaluator """rankbased""" +809 3 dataset """kinships""" +809 3 model """transe""" +809 3 loss """crossentropy""" +809 3 regularizer """no""" +809 3 optimizer """adadelta""" +809 3 training_loop """lcwa""" +809 3 evaluator """rankbased""" +809 4 dataset """kinships""" +809 4 model """transe""" +809 4 loss """crossentropy""" +809 4 regularizer """no""" +809 4 optimizer """adadelta""" +809 4 training_loop """lcwa""" +809 4 evaluator """rankbased""" +809 5 dataset """kinships""" +809 5 model """transe""" +809 5 loss """crossentropy""" +809 5 regularizer """no""" +809 5 optimizer """adadelta""" +809 5 training_loop """lcwa""" +809 5 evaluator """rankbased""" +809 6 dataset """kinships""" +809 6 model """transe""" +809 6 loss """crossentropy""" +809 6 regularizer """no""" +809 6 optimizer """adadelta""" +809 6 training_loop """lcwa""" +809 6 evaluator """rankbased""" +809 7 dataset """kinships""" +809 7 model """transe""" +809 7 loss """crossentropy""" +809 7 regularizer """no""" +809 7 optimizer """adadelta""" +809 7 training_loop """lcwa""" +809 7 evaluator """rankbased""" +809 8 dataset """kinships""" +809 8 model """transe""" +809 8 loss """crossentropy""" +809 8 regularizer """no""" +809 8 optimizer """adadelta""" +809 8 training_loop """lcwa""" +809 8 evaluator """rankbased""" +809 9 dataset """kinships""" +809 9 model """transe""" +809 9 loss """crossentropy""" +809 9 regularizer """no""" +809 9 optimizer """adadelta""" +809 9 training_loop """lcwa""" +809 9 evaluator """rankbased""" +809 10 dataset """kinships""" +809 10 model """transe""" +809 10 loss """crossentropy""" +809 10 regularizer """no""" +809 10 optimizer """adadelta""" +809 10 training_loop """lcwa""" +809 10 evaluator """rankbased""" +809 11 dataset """kinships""" +809 11 model """transe""" +809 11 loss """crossentropy""" +809 11 regularizer """no""" +809 11 optimizer """adadelta""" +809 11 training_loop """lcwa""" +809 11 evaluator """rankbased""" +809 12 dataset """kinships""" +809 12 model """transe""" +809 12 loss """crossentropy""" +809 12 regularizer """no""" +809 12 optimizer """adadelta""" +809 12 training_loop """lcwa""" +809 12 evaluator """rankbased""" +809 13 dataset """kinships""" +809 13 model """transe""" +809 13 loss """crossentropy""" +809 13 regularizer """no""" +809 13 optimizer """adadelta""" +809 13 training_loop """lcwa""" +809 13 evaluator """rankbased""" +809 14 dataset """kinships""" +809 14 model """transe""" +809 14 loss """crossentropy""" +809 14 regularizer """no""" +809 14 optimizer """adadelta""" +809 14 training_loop """lcwa""" +809 14 evaluator """rankbased""" +809 15 dataset """kinships""" +809 15 model """transe""" +809 15 loss """crossentropy""" +809 15 regularizer """no""" +809 15 optimizer """adadelta""" +809 15 training_loop """lcwa""" +809 15 evaluator """rankbased""" +809 16 dataset """kinships""" +809 16 model """transe""" +809 16 loss """crossentropy""" +809 16 regularizer """no""" +809 16 optimizer """adadelta""" +809 16 training_loop """lcwa""" +809 16 evaluator """rankbased""" +809 17 dataset """kinships""" +809 17 model """transe""" +809 17 loss """crossentropy""" +809 17 regularizer """no""" +809 17 optimizer """adadelta""" +809 17 training_loop """lcwa""" +809 17 evaluator """rankbased""" +809 18 dataset """kinships""" +809 18 model """transe""" +809 18 loss """crossentropy""" +809 18 regularizer """no""" +809 18 optimizer """adadelta""" +809 18 training_loop """lcwa""" +809 18 evaluator """rankbased""" +809 19 dataset """kinships""" +809 19 model """transe""" +809 19 loss """crossentropy""" +809 19 regularizer """no""" +809 19 optimizer """adadelta""" +809 19 training_loop """lcwa""" +809 19 evaluator """rankbased""" +809 20 dataset """kinships""" +809 20 model """transe""" +809 20 loss """crossentropy""" +809 20 regularizer """no""" +809 20 optimizer """adadelta""" +809 20 training_loop """lcwa""" +809 20 evaluator """rankbased""" +809 21 dataset """kinships""" +809 21 model """transe""" +809 21 loss """crossentropy""" +809 21 regularizer """no""" +809 21 optimizer """adadelta""" +809 21 training_loop """lcwa""" +809 21 evaluator """rankbased""" +809 22 dataset """kinships""" +809 22 model """transe""" +809 22 loss """crossentropy""" +809 22 regularizer """no""" +809 22 optimizer """adadelta""" +809 22 training_loop """lcwa""" +809 22 evaluator """rankbased""" +809 23 dataset """kinships""" +809 23 model """transe""" +809 23 loss """crossentropy""" +809 23 regularizer """no""" +809 23 optimizer """adadelta""" +809 23 training_loop """lcwa""" +809 23 evaluator """rankbased""" +809 24 dataset """kinships""" +809 24 model """transe""" +809 24 loss """crossentropy""" +809 24 regularizer """no""" +809 24 optimizer """adadelta""" +809 24 training_loop """lcwa""" +809 24 evaluator """rankbased""" +809 25 dataset """kinships""" +809 25 model """transe""" +809 25 loss """crossentropy""" +809 25 regularizer """no""" +809 25 optimizer """adadelta""" +809 25 training_loop """lcwa""" +809 25 evaluator """rankbased""" +809 26 dataset """kinships""" +809 26 model """transe""" +809 26 loss """crossentropy""" +809 26 regularizer """no""" +809 26 optimizer """adadelta""" +809 26 training_loop """lcwa""" +809 26 evaluator """rankbased""" +809 27 dataset """kinships""" +809 27 model """transe""" +809 27 loss """crossentropy""" +809 27 regularizer """no""" +809 27 optimizer """adadelta""" +809 27 training_loop """lcwa""" +809 27 evaluator """rankbased""" +809 28 dataset """kinships""" +809 28 model """transe""" +809 28 loss """crossentropy""" +809 28 regularizer """no""" +809 28 optimizer """adadelta""" +809 28 training_loop """lcwa""" +809 28 evaluator """rankbased""" +809 29 dataset """kinships""" +809 29 model """transe""" +809 29 loss """crossentropy""" +809 29 regularizer """no""" +809 29 optimizer """adadelta""" +809 29 training_loop """lcwa""" +809 29 evaluator """rankbased""" +809 30 dataset """kinships""" +809 30 model """transe""" +809 30 loss """crossentropy""" +809 30 regularizer """no""" +809 30 optimizer """adadelta""" +809 30 training_loop """lcwa""" +809 30 evaluator """rankbased""" +809 31 dataset """kinships""" +809 31 model """transe""" +809 31 loss """crossentropy""" +809 31 regularizer """no""" +809 31 optimizer """adadelta""" +809 31 training_loop """lcwa""" +809 31 evaluator """rankbased""" +809 32 dataset """kinships""" +809 32 model """transe""" +809 32 loss """crossentropy""" +809 32 regularizer """no""" +809 32 optimizer """adadelta""" +809 32 training_loop """lcwa""" +809 32 evaluator """rankbased""" +809 33 dataset """kinships""" +809 33 model """transe""" +809 33 loss """crossentropy""" +809 33 regularizer """no""" +809 33 optimizer """adadelta""" +809 33 training_loop """lcwa""" +809 33 evaluator """rankbased""" +809 34 dataset """kinships""" +809 34 model """transe""" +809 34 loss """crossentropy""" +809 34 regularizer """no""" +809 34 optimizer """adadelta""" +809 34 training_loop """lcwa""" +809 34 evaluator """rankbased""" +809 35 dataset """kinships""" +809 35 model """transe""" +809 35 loss """crossentropy""" +809 35 regularizer """no""" +809 35 optimizer """adadelta""" +809 35 training_loop """lcwa""" +809 35 evaluator """rankbased""" +809 36 dataset """kinships""" +809 36 model """transe""" +809 36 loss """crossentropy""" +809 36 regularizer """no""" +809 36 optimizer """adadelta""" +809 36 training_loop """lcwa""" +809 36 evaluator """rankbased""" +809 37 dataset """kinships""" +809 37 model """transe""" +809 37 loss """crossentropy""" +809 37 regularizer """no""" +809 37 optimizer """adadelta""" +809 37 training_loop """lcwa""" +809 37 evaluator """rankbased""" +809 38 dataset """kinships""" +809 38 model """transe""" +809 38 loss """crossentropy""" +809 38 regularizer """no""" +809 38 optimizer """adadelta""" +809 38 training_loop """lcwa""" +809 38 evaluator """rankbased""" +809 39 dataset """kinships""" +809 39 model """transe""" +809 39 loss """crossentropy""" +809 39 regularizer """no""" +809 39 optimizer """adadelta""" +809 39 training_loop """lcwa""" +809 39 evaluator """rankbased""" +809 40 dataset """kinships""" +809 40 model """transe""" +809 40 loss """crossentropy""" +809 40 regularizer """no""" +809 40 optimizer """adadelta""" +809 40 training_loop """lcwa""" +809 40 evaluator """rankbased""" +809 41 dataset """kinships""" +809 41 model """transe""" +809 41 loss """crossentropy""" +809 41 regularizer """no""" +809 41 optimizer """adadelta""" +809 41 training_loop """lcwa""" +809 41 evaluator """rankbased""" +809 42 dataset """kinships""" +809 42 model """transe""" +809 42 loss """crossentropy""" +809 42 regularizer """no""" +809 42 optimizer """adadelta""" +809 42 training_loop """lcwa""" +809 42 evaluator """rankbased""" +809 43 dataset """kinships""" +809 43 model """transe""" +809 43 loss """crossentropy""" +809 43 regularizer """no""" +809 43 optimizer """adadelta""" +809 43 training_loop """lcwa""" +809 43 evaluator """rankbased""" +809 44 dataset """kinships""" +809 44 model """transe""" +809 44 loss """crossentropy""" +809 44 regularizer """no""" +809 44 optimizer """adadelta""" +809 44 training_loop """lcwa""" +809 44 evaluator """rankbased""" +809 45 dataset """kinships""" +809 45 model """transe""" +809 45 loss """crossentropy""" +809 45 regularizer """no""" +809 45 optimizer """adadelta""" +809 45 training_loop """lcwa""" +809 45 evaluator """rankbased""" +809 46 dataset """kinships""" +809 46 model """transe""" +809 46 loss """crossentropy""" +809 46 regularizer """no""" +809 46 optimizer """adadelta""" +809 46 training_loop """lcwa""" +809 46 evaluator """rankbased""" +809 47 dataset """kinships""" +809 47 model """transe""" +809 47 loss """crossentropy""" +809 47 regularizer """no""" +809 47 optimizer """adadelta""" +809 47 training_loop """lcwa""" +809 47 evaluator """rankbased""" +809 48 dataset """kinships""" +809 48 model """transe""" +809 48 loss """crossentropy""" +809 48 regularizer """no""" +809 48 optimizer """adadelta""" +809 48 training_loop """lcwa""" +809 48 evaluator """rankbased""" +809 49 dataset """kinships""" +809 49 model """transe""" +809 49 loss """crossentropy""" +809 49 regularizer """no""" +809 49 optimizer """adadelta""" +809 49 training_loop """lcwa""" +809 49 evaluator """rankbased""" +809 50 dataset """kinships""" +809 50 model """transe""" +809 50 loss """crossentropy""" +809 50 regularizer """no""" +809 50 optimizer """adadelta""" +809 50 training_loop """lcwa""" +809 50 evaluator """rankbased""" +809 51 dataset """kinships""" +809 51 model """transe""" +809 51 loss """crossentropy""" +809 51 regularizer """no""" +809 51 optimizer """adadelta""" +809 51 training_loop """lcwa""" +809 51 evaluator """rankbased""" +809 52 dataset """kinships""" +809 52 model """transe""" +809 52 loss """crossentropy""" +809 52 regularizer """no""" +809 52 optimizer """adadelta""" +809 52 training_loop """lcwa""" +809 52 evaluator """rankbased""" +809 53 dataset """kinships""" +809 53 model """transe""" +809 53 loss """crossentropy""" +809 53 regularizer """no""" +809 53 optimizer """adadelta""" +809 53 training_loop """lcwa""" +809 53 evaluator """rankbased""" +809 54 dataset """kinships""" +809 54 model """transe""" +809 54 loss """crossentropy""" +809 54 regularizer """no""" +809 54 optimizer """adadelta""" +809 54 training_loop """lcwa""" +809 54 evaluator """rankbased""" +809 55 dataset """kinships""" +809 55 model """transe""" +809 55 loss """crossentropy""" +809 55 regularizer """no""" +809 55 optimizer """adadelta""" +809 55 training_loop """lcwa""" +809 55 evaluator """rankbased""" +809 56 dataset """kinships""" +809 56 model """transe""" +809 56 loss """crossentropy""" +809 56 regularizer """no""" +809 56 optimizer """adadelta""" +809 56 training_loop """lcwa""" +809 56 evaluator """rankbased""" +809 57 dataset """kinships""" +809 57 model """transe""" +809 57 loss """crossentropy""" +809 57 regularizer """no""" +809 57 optimizer """adadelta""" +809 57 training_loop """lcwa""" +809 57 evaluator """rankbased""" +809 58 dataset """kinships""" +809 58 model """transe""" +809 58 loss """crossentropy""" +809 58 regularizer """no""" +809 58 optimizer """adadelta""" +809 58 training_loop """lcwa""" +809 58 evaluator """rankbased""" +809 59 dataset """kinships""" +809 59 model """transe""" +809 59 loss """crossentropy""" +809 59 regularizer """no""" +809 59 optimizer """adadelta""" +809 59 training_loop """lcwa""" +809 59 evaluator """rankbased""" +809 60 dataset """kinships""" +809 60 model """transe""" +809 60 loss """crossentropy""" +809 60 regularizer """no""" +809 60 optimizer """adadelta""" +809 60 training_loop """lcwa""" +809 60 evaluator """rankbased""" +809 61 dataset """kinships""" +809 61 model """transe""" +809 61 loss """crossentropy""" +809 61 regularizer """no""" +809 61 optimizer """adadelta""" +809 61 training_loop """lcwa""" +809 61 evaluator """rankbased""" +809 62 dataset """kinships""" +809 62 model """transe""" +809 62 loss """crossentropy""" +809 62 regularizer """no""" +809 62 optimizer """adadelta""" +809 62 training_loop """lcwa""" +809 62 evaluator """rankbased""" +809 63 dataset """kinships""" +809 63 model """transe""" +809 63 loss """crossentropy""" +809 63 regularizer """no""" +809 63 optimizer """adadelta""" +809 63 training_loop """lcwa""" +809 63 evaluator """rankbased""" +809 64 dataset """kinships""" +809 64 model """transe""" +809 64 loss """crossentropy""" +809 64 regularizer """no""" +809 64 optimizer """adadelta""" +809 64 training_loop """lcwa""" +809 64 evaluator """rankbased""" +809 65 dataset """kinships""" +809 65 model """transe""" +809 65 loss """crossentropy""" +809 65 regularizer """no""" +809 65 optimizer """adadelta""" +809 65 training_loop """lcwa""" +809 65 evaluator """rankbased""" +809 66 dataset """kinships""" +809 66 model """transe""" +809 66 loss """crossentropy""" +809 66 regularizer """no""" +809 66 optimizer """adadelta""" +809 66 training_loop """lcwa""" +809 66 evaluator """rankbased""" +809 67 dataset """kinships""" +809 67 model """transe""" +809 67 loss """crossentropy""" +809 67 regularizer """no""" +809 67 optimizer """adadelta""" +809 67 training_loop """lcwa""" +809 67 evaluator """rankbased""" +809 68 dataset """kinships""" +809 68 model """transe""" +809 68 loss """crossentropy""" +809 68 regularizer """no""" +809 68 optimizer """adadelta""" +809 68 training_loop """lcwa""" +809 68 evaluator """rankbased""" +809 69 dataset """kinships""" +809 69 model """transe""" +809 69 loss """crossentropy""" +809 69 regularizer """no""" +809 69 optimizer """adadelta""" +809 69 training_loop """lcwa""" +809 69 evaluator """rankbased""" +809 70 dataset """kinships""" +809 70 model """transe""" +809 70 loss """crossentropy""" +809 70 regularizer """no""" +809 70 optimizer """adadelta""" +809 70 training_loop """lcwa""" +809 70 evaluator """rankbased""" +809 71 dataset """kinships""" +809 71 model """transe""" +809 71 loss """crossentropy""" +809 71 regularizer """no""" +809 71 optimizer """adadelta""" +809 71 training_loop """lcwa""" +809 71 evaluator """rankbased""" +809 72 dataset """kinships""" +809 72 model """transe""" +809 72 loss """crossentropy""" +809 72 regularizer """no""" +809 72 optimizer """adadelta""" +809 72 training_loop """lcwa""" +809 72 evaluator """rankbased""" +809 73 dataset """kinships""" +809 73 model """transe""" +809 73 loss """crossentropy""" +809 73 regularizer """no""" +809 73 optimizer """adadelta""" +809 73 training_loop """lcwa""" +809 73 evaluator """rankbased""" +809 74 dataset """kinships""" +809 74 model """transe""" +809 74 loss """crossentropy""" +809 74 regularizer """no""" +809 74 optimizer """adadelta""" +809 74 training_loop """lcwa""" +809 74 evaluator """rankbased""" +809 75 dataset """kinships""" +809 75 model """transe""" +809 75 loss """crossentropy""" +809 75 regularizer """no""" +809 75 optimizer """adadelta""" +809 75 training_loop """lcwa""" +809 75 evaluator """rankbased""" +809 76 dataset """kinships""" +809 76 model """transe""" +809 76 loss """crossentropy""" +809 76 regularizer """no""" +809 76 optimizer """adadelta""" +809 76 training_loop """lcwa""" +809 76 evaluator """rankbased""" +809 77 dataset """kinships""" +809 77 model """transe""" +809 77 loss """crossentropy""" +809 77 regularizer """no""" +809 77 optimizer """adadelta""" +809 77 training_loop """lcwa""" +809 77 evaluator """rankbased""" +809 78 dataset """kinships""" +809 78 model """transe""" +809 78 loss """crossentropy""" +809 78 regularizer """no""" +809 78 optimizer """adadelta""" +809 78 training_loop """lcwa""" +809 78 evaluator """rankbased""" +809 79 dataset """kinships""" +809 79 model """transe""" +809 79 loss """crossentropy""" +809 79 regularizer """no""" +809 79 optimizer """adadelta""" +809 79 training_loop """lcwa""" +809 79 evaluator """rankbased""" +809 80 dataset """kinships""" +809 80 model """transe""" +809 80 loss """crossentropy""" +809 80 regularizer """no""" +809 80 optimizer """adadelta""" +809 80 training_loop """lcwa""" +809 80 evaluator """rankbased""" +809 81 dataset """kinships""" +809 81 model """transe""" +809 81 loss """crossentropy""" +809 81 regularizer """no""" +809 81 optimizer """adadelta""" +809 81 training_loop """lcwa""" +809 81 evaluator """rankbased""" +809 82 dataset """kinships""" +809 82 model """transe""" +809 82 loss """crossentropy""" +809 82 regularizer """no""" +809 82 optimizer """adadelta""" +809 82 training_loop """lcwa""" +809 82 evaluator """rankbased""" +809 83 dataset """kinships""" +809 83 model """transe""" +809 83 loss """crossentropy""" +809 83 regularizer """no""" +809 83 optimizer """adadelta""" +809 83 training_loop """lcwa""" +809 83 evaluator """rankbased""" +809 84 dataset """kinships""" +809 84 model """transe""" +809 84 loss """crossentropy""" +809 84 regularizer """no""" +809 84 optimizer """adadelta""" +809 84 training_loop """lcwa""" +809 84 evaluator """rankbased""" +809 85 dataset """kinships""" +809 85 model """transe""" +809 85 loss """crossentropy""" +809 85 regularizer """no""" +809 85 optimizer """adadelta""" +809 85 training_loop """lcwa""" +809 85 evaluator """rankbased""" +809 86 dataset """kinships""" +809 86 model """transe""" +809 86 loss """crossentropy""" +809 86 regularizer """no""" +809 86 optimizer """adadelta""" +809 86 training_loop """lcwa""" +809 86 evaluator """rankbased""" +809 87 dataset """kinships""" +809 87 model """transe""" +809 87 loss """crossentropy""" +809 87 regularizer """no""" +809 87 optimizer """adadelta""" +809 87 training_loop """lcwa""" +809 87 evaluator """rankbased""" +809 88 dataset """kinships""" +809 88 model """transe""" +809 88 loss """crossentropy""" +809 88 regularizer """no""" +809 88 optimizer """adadelta""" +809 88 training_loop """lcwa""" +809 88 evaluator """rankbased""" +809 89 dataset """kinships""" +809 89 model """transe""" +809 89 loss """crossentropy""" +809 89 regularizer """no""" +809 89 optimizer """adadelta""" +809 89 training_loop """lcwa""" +809 89 evaluator """rankbased""" +809 90 dataset """kinships""" +809 90 model """transe""" +809 90 loss """crossentropy""" +809 90 regularizer """no""" +809 90 optimizer """adadelta""" +809 90 training_loop """lcwa""" +809 90 evaluator """rankbased""" +809 91 dataset """kinships""" +809 91 model """transe""" +809 91 loss """crossentropy""" +809 91 regularizer """no""" +809 91 optimizer """adadelta""" +809 91 training_loop """lcwa""" +809 91 evaluator """rankbased""" +809 92 dataset """kinships""" +809 92 model """transe""" +809 92 loss """crossentropy""" +809 92 regularizer """no""" +809 92 optimizer """adadelta""" +809 92 training_loop """lcwa""" +809 92 evaluator """rankbased""" +809 93 dataset """kinships""" +809 93 model """transe""" +809 93 loss """crossentropy""" +809 93 regularizer """no""" +809 93 optimizer """adadelta""" +809 93 training_loop """lcwa""" +809 93 evaluator """rankbased""" +809 94 dataset """kinships""" +809 94 model """transe""" +809 94 loss """crossentropy""" +809 94 regularizer """no""" +809 94 optimizer """adadelta""" +809 94 training_loop """lcwa""" +809 94 evaluator """rankbased""" +809 95 dataset """kinships""" +809 95 model """transe""" +809 95 loss """crossentropy""" +809 95 regularizer """no""" +809 95 optimizer """adadelta""" +809 95 training_loop """lcwa""" +809 95 evaluator """rankbased""" +809 96 dataset """kinships""" +809 96 model """transe""" +809 96 loss """crossentropy""" +809 96 regularizer """no""" +809 96 optimizer """adadelta""" +809 96 training_loop """lcwa""" +809 96 evaluator """rankbased""" +809 97 dataset """kinships""" +809 97 model """transe""" +809 97 loss """crossentropy""" +809 97 regularizer """no""" +809 97 optimizer """adadelta""" +809 97 training_loop """lcwa""" +809 97 evaluator """rankbased""" +809 98 dataset """kinships""" +809 98 model """transe""" +809 98 loss """crossentropy""" +809 98 regularizer """no""" +809 98 optimizer """adadelta""" +809 98 training_loop """lcwa""" +809 98 evaluator """rankbased""" +809 99 dataset """kinships""" +809 99 model """transe""" +809 99 loss """crossentropy""" +809 99 regularizer """no""" +809 99 optimizer """adadelta""" +809 99 training_loop """lcwa""" +809 99 evaluator """rankbased""" +809 100 dataset """kinships""" +809 100 model """transe""" +809 100 loss """crossentropy""" +809 100 regularizer """no""" +809 100 optimizer """adadelta""" +809 100 training_loop """lcwa""" +809 100 evaluator """rankbased""" +810 1 model.embedding_dim 2.0 +810 1 model.scoring_fct_norm 1.0 +810 1 training.batch_size 2.0 +810 1 training.label_smoothing 0.06578107689058664 +810 2 model.embedding_dim 2.0 +810 2 model.scoring_fct_norm 2.0 +810 2 training.batch_size 2.0 +810 2 training.label_smoothing 0.0010474550832073562 +810 3 model.embedding_dim 0.0 +810 3 model.scoring_fct_norm 1.0 +810 3 training.batch_size 1.0 +810 3 training.label_smoothing 0.0040577274403717205 +810 4 model.embedding_dim 1.0 +810 4 model.scoring_fct_norm 2.0 +810 4 training.batch_size 1.0 +810 4 training.label_smoothing 0.05495177794538968 +810 5 model.embedding_dim 1.0 +810 5 model.scoring_fct_norm 2.0 +810 5 training.batch_size 0.0 +810 5 training.label_smoothing 0.0015871326516740238 +810 6 model.embedding_dim 2.0 +810 6 model.scoring_fct_norm 1.0 +810 6 training.batch_size 2.0 +810 6 training.label_smoothing 0.022281709058361045 +810 7 model.embedding_dim 2.0 +810 7 model.scoring_fct_norm 1.0 +810 7 training.batch_size 0.0 +810 7 training.label_smoothing 0.35389556343380474 +810 8 model.embedding_dim 2.0 +810 8 model.scoring_fct_norm 1.0 +810 8 training.batch_size 1.0 +810 8 training.label_smoothing 0.28608502746749004 +810 9 model.embedding_dim 0.0 +810 9 model.scoring_fct_norm 1.0 +810 9 training.batch_size 0.0 +810 9 training.label_smoothing 0.020094942414362618 +810 10 model.embedding_dim 0.0 +810 10 model.scoring_fct_norm 2.0 +810 10 training.batch_size 0.0 +810 10 training.label_smoothing 0.030616911791313577 +810 11 model.embedding_dim 1.0 +810 11 model.scoring_fct_norm 2.0 +810 11 training.batch_size 2.0 +810 11 training.label_smoothing 0.15741414073719537 +810 12 model.embedding_dim 1.0 +810 12 model.scoring_fct_norm 1.0 +810 12 training.batch_size 0.0 +810 12 training.label_smoothing 0.041139526349080996 +810 13 model.embedding_dim 1.0 +810 13 model.scoring_fct_norm 1.0 +810 13 training.batch_size 0.0 +810 13 training.label_smoothing 0.09358362955983994 +810 14 model.embedding_dim 1.0 +810 14 model.scoring_fct_norm 2.0 +810 14 training.batch_size 1.0 +810 14 training.label_smoothing 0.002287837801056894 +810 15 model.embedding_dim 0.0 +810 15 model.scoring_fct_norm 1.0 +810 15 training.batch_size 1.0 +810 15 training.label_smoothing 0.001051312773337014 +810 16 model.embedding_dim 2.0 +810 16 model.scoring_fct_norm 1.0 +810 16 training.batch_size 2.0 +810 16 training.label_smoothing 0.6550348993267611 +810 17 model.embedding_dim 1.0 +810 17 model.scoring_fct_norm 2.0 +810 17 training.batch_size 2.0 +810 17 training.label_smoothing 0.027010670223845522 +810 18 model.embedding_dim 2.0 +810 18 model.scoring_fct_norm 2.0 +810 18 training.batch_size 1.0 +810 18 training.label_smoothing 0.7650986705074384 +810 19 model.embedding_dim 1.0 +810 19 model.scoring_fct_norm 1.0 +810 19 training.batch_size 2.0 +810 19 training.label_smoothing 0.24055888127547598 +810 20 model.embedding_dim 1.0 +810 20 model.scoring_fct_norm 2.0 +810 20 training.batch_size 1.0 +810 20 training.label_smoothing 0.02389729854694859 +810 21 model.embedding_dim 1.0 +810 21 model.scoring_fct_norm 1.0 +810 21 training.batch_size 0.0 +810 21 training.label_smoothing 0.037646971991608304 +810 22 model.embedding_dim 0.0 +810 22 model.scoring_fct_norm 2.0 +810 22 training.batch_size 1.0 +810 22 training.label_smoothing 0.002832162975733045 +810 23 model.embedding_dim 0.0 +810 23 model.scoring_fct_norm 2.0 +810 23 training.batch_size 0.0 +810 23 training.label_smoothing 0.9284841868712073 +810 24 model.embedding_dim 2.0 +810 24 model.scoring_fct_norm 2.0 +810 24 training.batch_size 1.0 +810 24 training.label_smoothing 0.0049947201637785925 +810 25 model.embedding_dim 0.0 +810 25 model.scoring_fct_norm 2.0 +810 25 training.batch_size 2.0 +810 25 training.label_smoothing 0.59559535519522 +810 26 model.embedding_dim 0.0 +810 26 model.scoring_fct_norm 2.0 +810 26 training.batch_size 1.0 +810 26 training.label_smoothing 0.0034939965616015206 +810 27 model.embedding_dim 0.0 +810 27 model.scoring_fct_norm 1.0 +810 27 training.batch_size 2.0 +810 27 training.label_smoothing 0.0024210805750035152 +810 28 model.embedding_dim 0.0 +810 28 model.scoring_fct_norm 1.0 +810 28 training.batch_size 0.0 +810 28 training.label_smoothing 0.5620995845036858 +810 29 model.embedding_dim 1.0 +810 29 model.scoring_fct_norm 2.0 +810 29 training.batch_size 1.0 +810 29 training.label_smoothing 0.0011225580861397321 +810 30 model.embedding_dim 2.0 +810 30 model.scoring_fct_norm 2.0 +810 30 training.batch_size 1.0 +810 30 training.label_smoothing 0.0020933654069296283 +810 31 model.embedding_dim 1.0 +810 31 model.scoring_fct_norm 1.0 +810 31 training.batch_size 2.0 +810 31 training.label_smoothing 0.8103471884855621 +810 32 model.embedding_dim 1.0 +810 32 model.scoring_fct_norm 1.0 +810 32 training.batch_size 0.0 +810 32 training.label_smoothing 0.0232251608693364 +810 33 model.embedding_dim 2.0 +810 33 model.scoring_fct_norm 1.0 +810 33 training.batch_size 0.0 +810 33 training.label_smoothing 0.0017346505367843003 +810 34 model.embedding_dim 0.0 +810 34 model.scoring_fct_norm 2.0 +810 34 training.batch_size 0.0 +810 34 training.label_smoothing 0.004407011308189737 +810 35 model.embedding_dim 2.0 +810 35 model.scoring_fct_norm 2.0 +810 35 training.batch_size 0.0 +810 35 training.label_smoothing 0.022353737297656048 +810 36 model.embedding_dim 2.0 +810 36 model.scoring_fct_norm 2.0 +810 36 training.batch_size 2.0 +810 36 training.label_smoothing 0.3653468618413409 +810 37 model.embedding_dim 0.0 +810 37 model.scoring_fct_norm 1.0 +810 37 training.batch_size 0.0 +810 37 training.label_smoothing 0.13387404951584875 +810 38 model.embedding_dim 1.0 +810 38 model.scoring_fct_norm 1.0 +810 38 training.batch_size 0.0 +810 38 training.label_smoothing 0.011309842430561864 +810 39 model.embedding_dim 1.0 +810 39 model.scoring_fct_norm 2.0 +810 39 training.batch_size 2.0 +810 39 training.label_smoothing 0.0023890530936602544 +810 40 model.embedding_dim 1.0 +810 40 model.scoring_fct_norm 2.0 +810 40 training.batch_size 0.0 +810 40 training.label_smoothing 0.010695263633061529 +810 41 model.embedding_dim 1.0 +810 41 model.scoring_fct_norm 1.0 +810 41 training.batch_size 2.0 +810 41 training.label_smoothing 0.06682979796740239 +810 42 model.embedding_dim 2.0 +810 42 model.scoring_fct_norm 1.0 +810 42 training.batch_size 0.0 +810 42 training.label_smoothing 0.647157485051253 +810 43 model.embedding_dim 2.0 +810 43 model.scoring_fct_norm 1.0 +810 43 training.batch_size 2.0 +810 43 training.label_smoothing 0.0018134794373861956 +810 44 model.embedding_dim 1.0 +810 44 model.scoring_fct_norm 2.0 +810 44 training.batch_size 0.0 +810 44 training.label_smoothing 0.4064731237166649 +810 45 model.embedding_dim 0.0 +810 45 model.scoring_fct_norm 2.0 +810 45 training.batch_size 0.0 +810 45 training.label_smoothing 0.0016261307333994373 +810 46 model.embedding_dim 0.0 +810 46 model.scoring_fct_norm 1.0 +810 46 training.batch_size 2.0 +810 46 training.label_smoothing 0.0016492891580247853 +810 47 model.embedding_dim 2.0 +810 47 model.scoring_fct_norm 1.0 +810 47 training.batch_size 1.0 +810 47 training.label_smoothing 0.8991509114589679 +810 48 model.embedding_dim 0.0 +810 48 model.scoring_fct_norm 2.0 +810 48 training.batch_size 2.0 +810 48 training.label_smoothing 0.031043453178603894 +810 49 model.embedding_dim 0.0 +810 49 model.scoring_fct_norm 2.0 +810 49 training.batch_size 2.0 +810 49 training.label_smoothing 0.007553077080048967 +810 50 model.embedding_dim 2.0 +810 50 model.scoring_fct_norm 1.0 +810 50 training.batch_size 2.0 +810 50 training.label_smoothing 0.17774913094456665 +810 51 model.embedding_dim 2.0 +810 51 model.scoring_fct_norm 2.0 +810 51 training.batch_size 1.0 +810 51 training.label_smoothing 0.045440812071193916 +810 52 model.embedding_dim 1.0 +810 52 model.scoring_fct_norm 1.0 +810 52 training.batch_size 1.0 +810 52 training.label_smoothing 0.009384382903910015 +810 53 model.embedding_dim 2.0 +810 53 model.scoring_fct_norm 2.0 +810 53 training.batch_size 2.0 +810 53 training.label_smoothing 0.7261495027421396 +810 54 model.embedding_dim 2.0 +810 54 model.scoring_fct_norm 1.0 +810 54 training.batch_size 0.0 +810 54 training.label_smoothing 0.003558616401700655 +810 55 model.embedding_dim 2.0 +810 55 model.scoring_fct_norm 1.0 +810 55 training.batch_size 0.0 +810 55 training.label_smoothing 0.0010728374349611687 +810 56 model.embedding_dim 0.0 +810 56 model.scoring_fct_norm 1.0 +810 56 training.batch_size 1.0 +810 56 training.label_smoothing 0.002544764352678364 +810 57 model.embedding_dim 2.0 +810 57 model.scoring_fct_norm 1.0 +810 57 training.batch_size 1.0 +810 57 training.label_smoothing 0.001062613320250046 +810 58 model.embedding_dim 0.0 +810 58 model.scoring_fct_norm 2.0 +810 58 training.batch_size 1.0 +810 58 training.label_smoothing 0.03042445955094599 +810 59 model.embedding_dim 2.0 +810 59 model.scoring_fct_norm 2.0 +810 59 training.batch_size 1.0 +810 59 training.label_smoothing 0.017893841603043588 +810 60 model.embedding_dim 0.0 +810 60 model.scoring_fct_norm 2.0 +810 60 training.batch_size 2.0 +810 60 training.label_smoothing 0.2992308842089571 +810 61 model.embedding_dim 1.0 +810 61 model.scoring_fct_norm 1.0 +810 61 training.batch_size 0.0 +810 61 training.label_smoothing 0.25157875251724515 +810 62 model.embedding_dim 0.0 +810 62 model.scoring_fct_norm 2.0 +810 62 training.batch_size 1.0 +810 62 training.label_smoothing 0.3027279488282285 +810 63 model.embedding_dim 0.0 +810 63 model.scoring_fct_norm 2.0 +810 63 training.batch_size 1.0 +810 63 training.label_smoothing 0.6763767587507014 +810 64 model.embedding_dim 2.0 +810 64 model.scoring_fct_norm 2.0 +810 64 training.batch_size 0.0 +810 64 training.label_smoothing 0.0017534655656606928 +810 65 model.embedding_dim 1.0 +810 65 model.scoring_fct_norm 2.0 +810 65 training.batch_size 0.0 +810 65 training.label_smoothing 0.0010506320990448614 +810 66 model.embedding_dim 2.0 +810 66 model.scoring_fct_norm 1.0 +810 66 training.batch_size 1.0 +810 66 training.label_smoothing 0.011667477645125377 +810 67 model.embedding_dim 2.0 +810 67 model.scoring_fct_norm 1.0 +810 67 training.batch_size 0.0 +810 67 training.label_smoothing 0.4733217645737161 +810 68 model.embedding_dim 0.0 +810 68 model.scoring_fct_norm 2.0 +810 68 training.batch_size 2.0 +810 68 training.label_smoothing 0.027557913559400408 +810 69 model.embedding_dim 0.0 +810 69 model.scoring_fct_norm 2.0 +810 69 training.batch_size 2.0 +810 69 training.label_smoothing 0.00233110373023458 +810 70 model.embedding_dim 1.0 +810 70 model.scoring_fct_norm 2.0 +810 70 training.batch_size 1.0 +810 70 training.label_smoothing 0.008669640155689579 +810 71 model.embedding_dim 0.0 +810 71 model.scoring_fct_norm 1.0 +810 71 training.batch_size 2.0 +810 71 training.label_smoothing 0.0932755147087406 +810 72 model.embedding_dim 1.0 +810 72 model.scoring_fct_norm 1.0 +810 72 training.batch_size 2.0 +810 72 training.label_smoothing 0.0013616608580610545 +810 73 model.embedding_dim 1.0 +810 73 model.scoring_fct_norm 2.0 +810 73 training.batch_size 2.0 +810 73 training.label_smoothing 0.024898014001134353 +810 74 model.embedding_dim 1.0 +810 74 model.scoring_fct_norm 1.0 +810 74 training.batch_size 2.0 +810 74 training.label_smoothing 0.0013698386535678645 +810 75 model.embedding_dim 1.0 +810 75 model.scoring_fct_norm 1.0 +810 75 training.batch_size 1.0 +810 75 training.label_smoothing 0.44741472350222233 +810 76 model.embedding_dim 2.0 +810 76 model.scoring_fct_norm 2.0 +810 76 training.batch_size 1.0 +810 76 training.label_smoothing 0.0015517187281049328 +810 77 model.embedding_dim 1.0 +810 77 model.scoring_fct_norm 2.0 +810 77 training.batch_size 1.0 +810 77 training.label_smoothing 0.06298928511505968 +810 78 model.embedding_dim 0.0 +810 78 model.scoring_fct_norm 2.0 +810 78 training.batch_size 0.0 +810 78 training.label_smoothing 0.01631922240061769 +810 79 model.embedding_dim 1.0 +810 79 model.scoring_fct_norm 2.0 +810 79 training.batch_size 0.0 +810 79 training.label_smoothing 0.11486312551276556 +810 80 model.embedding_dim 2.0 +810 80 model.scoring_fct_norm 1.0 +810 80 training.batch_size 1.0 +810 80 training.label_smoothing 0.70532603646708 +810 81 model.embedding_dim 1.0 +810 81 model.scoring_fct_norm 2.0 +810 81 training.batch_size 0.0 +810 81 training.label_smoothing 0.3871143223737669 +810 82 model.embedding_dim 0.0 +810 82 model.scoring_fct_norm 2.0 +810 82 training.batch_size 1.0 +810 82 training.label_smoothing 0.002593167316699681 +810 83 model.embedding_dim 1.0 +810 83 model.scoring_fct_norm 2.0 +810 83 training.batch_size 0.0 +810 83 training.label_smoothing 0.007024602319564408 +810 84 model.embedding_dim 0.0 +810 84 model.scoring_fct_norm 1.0 +810 84 training.batch_size 0.0 +810 84 training.label_smoothing 0.012464214620866077 +810 85 model.embedding_dim 2.0 +810 85 model.scoring_fct_norm 1.0 +810 85 training.batch_size 2.0 +810 85 training.label_smoothing 0.04865077671622222 +810 86 model.embedding_dim 2.0 +810 86 model.scoring_fct_norm 2.0 +810 86 training.batch_size 1.0 +810 86 training.label_smoothing 0.0015073255731751065 +810 87 model.embedding_dim 0.0 +810 87 model.scoring_fct_norm 1.0 +810 87 training.batch_size 1.0 +810 87 training.label_smoothing 0.011613456182943138 +810 88 model.embedding_dim 0.0 +810 88 model.scoring_fct_norm 1.0 +810 88 training.batch_size 0.0 +810 88 training.label_smoothing 0.06446413195519021 +810 89 model.embedding_dim 0.0 +810 89 model.scoring_fct_norm 2.0 +810 89 training.batch_size 0.0 +810 89 training.label_smoothing 0.001176228309093181 +810 90 model.embedding_dim 1.0 +810 90 model.scoring_fct_norm 1.0 +810 90 training.batch_size 1.0 +810 90 training.label_smoothing 0.015600110444460111 +810 91 model.embedding_dim 2.0 +810 91 model.scoring_fct_norm 1.0 +810 91 training.batch_size 2.0 +810 91 training.label_smoothing 0.05478536783734447 +810 92 model.embedding_dim 0.0 +810 92 model.scoring_fct_norm 1.0 +810 92 training.batch_size 0.0 +810 92 training.label_smoothing 0.002207387004160411 +810 93 model.embedding_dim 2.0 +810 93 model.scoring_fct_norm 2.0 +810 93 training.batch_size 1.0 +810 93 training.label_smoothing 0.12561666288824286 +810 94 model.embedding_dim 0.0 +810 94 model.scoring_fct_norm 1.0 +810 94 training.batch_size 2.0 +810 94 training.label_smoothing 0.4345069896537028 +810 95 model.embedding_dim 0.0 +810 95 model.scoring_fct_norm 1.0 +810 95 training.batch_size 0.0 +810 95 training.label_smoothing 0.09030636513689957 +810 96 model.embedding_dim 2.0 +810 96 model.scoring_fct_norm 1.0 +810 96 training.batch_size 0.0 +810 96 training.label_smoothing 0.14158446324708684 +810 97 model.embedding_dim 2.0 +810 97 model.scoring_fct_norm 1.0 +810 97 training.batch_size 1.0 +810 97 training.label_smoothing 0.0011128580302935196 +810 98 model.embedding_dim 1.0 +810 98 model.scoring_fct_norm 1.0 +810 98 training.batch_size 1.0 +810 98 training.label_smoothing 0.002293500004261367 +810 99 model.embedding_dim 2.0 +810 99 model.scoring_fct_norm 1.0 +810 99 training.batch_size 2.0 +810 99 training.label_smoothing 0.005627987327065776 +810 100 model.embedding_dim 2.0 +810 100 model.scoring_fct_norm 2.0 +810 100 training.batch_size 1.0 +810 100 training.label_smoothing 0.25256518754326 +810 1 dataset """kinships""" +810 1 model """transe""" +810 1 loss """crossentropy""" +810 1 regularizer """no""" +810 1 optimizer """adadelta""" +810 1 training_loop """lcwa""" +810 1 evaluator """rankbased""" +810 2 dataset """kinships""" +810 2 model """transe""" +810 2 loss """crossentropy""" +810 2 regularizer """no""" +810 2 optimizer """adadelta""" +810 2 training_loop """lcwa""" +810 2 evaluator """rankbased""" +810 3 dataset """kinships""" +810 3 model """transe""" +810 3 loss """crossentropy""" +810 3 regularizer """no""" +810 3 optimizer """adadelta""" +810 3 training_loop """lcwa""" +810 3 evaluator """rankbased""" +810 4 dataset """kinships""" +810 4 model """transe""" +810 4 loss """crossentropy""" +810 4 regularizer """no""" +810 4 optimizer """adadelta""" +810 4 training_loop """lcwa""" +810 4 evaluator """rankbased""" +810 5 dataset """kinships""" +810 5 model """transe""" +810 5 loss """crossentropy""" +810 5 regularizer """no""" +810 5 optimizer """adadelta""" +810 5 training_loop """lcwa""" +810 5 evaluator """rankbased""" +810 6 dataset """kinships""" +810 6 model """transe""" +810 6 loss """crossentropy""" +810 6 regularizer """no""" +810 6 optimizer """adadelta""" +810 6 training_loop """lcwa""" +810 6 evaluator """rankbased""" +810 7 dataset """kinships""" +810 7 model """transe""" +810 7 loss """crossentropy""" +810 7 regularizer """no""" +810 7 optimizer """adadelta""" +810 7 training_loop """lcwa""" +810 7 evaluator """rankbased""" +810 8 dataset """kinships""" +810 8 model """transe""" +810 8 loss """crossentropy""" +810 8 regularizer """no""" +810 8 optimizer """adadelta""" +810 8 training_loop """lcwa""" +810 8 evaluator """rankbased""" +810 9 dataset """kinships""" +810 9 model """transe""" +810 9 loss """crossentropy""" +810 9 regularizer """no""" +810 9 optimizer """adadelta""" +810 9 training_loop """lcwa""" +810 9 evaluator """rankbased""" +810 10 dataset """kinships""" +810 10 model """transe""" +810 10 loss """crossentropy""" +810 10 regularizer """no""" +810 10 optimizer """adadelta""" +810 10 training_loop """lcwa""" +810 10 evaluator """rankbased""" +810 11 dataset """kinships""" +810 11 model """transe""" +810 11 loss """crossentropy""" +810 11 regularizer """no""" +810 11 optimizer """adadelta""" +810 11 training_loop """lcwa""" +810 11 evaluator """rankbased""" +810 12 dataset """kinships""" +810 12 model """transe""" +810 12 loss """crossentropy""" +810 12 regularizer """no""" +810 12 optimizer """adadelta""" +810 12 training_loop """lcwa""" +810 12 evaluator """rankbased""" +810 13 dataset """kinships""" +810 13 model """transe""" +810 13 loss """crossentropy""" +810 13 regularizer """no""" +810 13 optimizer """adadelta""" +810 13 training_loop """lcwa""" +810 13 evaluator """rankbased""" +810 14 dataset """kinships""" +810 14 model """transe""" +810 14 loss """crossentropy""" +810 14 regularizer """no""" +810 14 optimizer """adadelta""" +810 14 training_loop """lcwa""" +810 14 evaluator """rankbased""" +810 15 dataset """kinships""" +810 15 model """transe""" +810 15 loss """crossentropy""" +810 15 regularizer """no""" +810 15 optimizer """adadelta""" +810 15 training_loop """lcwa""" +810 15 evaluator """rankbased""" +810 16 dataset """kinships""" +810 16 model """transe""" +810 16 loss """crossentropy""" +810 16 regularizer """no""" +810 16 optimizer """adadelta""" +810 16 training_loop """lcwa""" +810 16 evaluator """rankbased""" +810 17 dataset """kinships""" +810 17 model """transe""" +810 17 loss """crossentropy""" +810 17 regularizer """no""" +810 17 optimizer """adadelta""" +810 17 training_loop """lcwa""" +810 17 evaluator """rankbased""" +810 18 dataset """kinships""" +810 18 model """transe""" +810 18 loss """crossentropy""" +810 18 regularizer """no""" +810 18 optimizer """adadelta""" +810 18 training_loop """lcwa""" +810 18 evaluator """rankbased""" +810 19 dataset """kinships""" +810 19 model """transe""" +810 19 loss """crossentropy""" +810 19 regularizer """no""" +810 19 optimizer """adadelta""" +810 19 training_loop """lcwa""" +810 19 evaluator """rankbased""" +810 20 dataset """kinships""" +810 20 model """transe""" +810 20 loss """crossentropy""" +810 20 regularizer """no""" +810 20 optimizer """adadelta""" +810 20 training_loop """lcwa""" +810 20 evaluator """rankbased""" +810 21 dataset """kinships""" +810 21 model """transe""" +810 21 loss """crossentropy""" +810 21 regularizer """no""" +810 21 optimizer """adadelta""" +810 21 training_loop """lcwa""" +810 21 evaluator """rankbased""" +810 22 dataset """kinships""" +810 22 model """transe""" +810 22 loss """crossentropy""" +810 22 regularizer """no""" +810 22 optimizer """adadelta""" +810 22 training_loop """lcwa""" +810 22 evaluator """rankbased""" +810 23 dataset """kinships""" +810 23 model """transe""" +810 23 loss """crossentropy""" +810 23 regularizer """no""" +810 23 optimizer """adadelta""" +810 23 training_loop """lcwa""" +810 23 evaluator """rankbased""" +810 24 dataset """kinships""" +810 24 model """transe""" +810 24 loss """crossentropy""" +810 24 regularizer """no""" +810 24 optimizer """adadelta""" +810 24 training_loop """lcwa""" +810 24 evaluator """rankbased""" +810 25 dataset """kinships""" +810 25 model """transe""" +810 25 loss """crossentropy""" +810 25 regularizer """no""" +810 25 optimizer """adadelta""" +810 25 training_loop """lcwa""" +810 25 evaluator """rankbased""" +810 26 dataset """kinships""" +810 26 model """transe""" +810 26 loss """crossentropy""" +810 26 regularizer """no""" +810 26 optimizer """adadelta""" +810 26 training_loop """lcwa""" +810 26 evaluator """rankbased""" +810 27 dataset """kinships""" +810 27 model """transe""" +810 27 loss """crossentropy""" +810 27 regularizer """no""" +810 27 optimizer """adadelta""" +810 27 training_loop """lcwa""" +810 27 evaluator """rankbased""" +810 28 dataset """kinships""" +810 28 model """transe""" +810 28 loss """crossentropy""" +810 28 regularizer """no""" +810 28 optimizer """adadelta""" +810 28 training_loop """lcwa""" +810 28 evaluator """rankbased""" +810 29 dataset """kinships""" +810 29 model """transe""" +810 29 loss """crossentropy""" +810 29 regularizer """no""" +810 29 optimizer """adadelta""" +810 29 training_loop """lcwa""" +810 29 evaluator """rankbased""" +810 30 dataset """kinships""" +810 30 model """transe""" +810 30 loss """crossentropy""" +810 30 regularizer """no""" +810 30 optimizer """adadelta""" +810 30 training_loop """lcwa""" +810 30 evaluator """rankbased""" +810 31 dataset """kinships""" +810 31 model """transe""" +810 31 loss """crossentropy""" +810 31 regularizer """no""" +810 31 optimizer """adadelta""" +810 31 training_loop """lcwa""" +810 31 evaluator """rankbased""" +810 32 dataset """kinships""" +810 32 model """transe""" +810 32 loss """crossentropy""" +810 32 regularizer """no""" +810 32 optimizer """adadelta""" +810 32 training_loop """lcwa""" +810 32 evaluator """rankbased""" +810 33 dataset """kinships""" +810 33 model """transe""" +810 33 loss """crossentropy""" +810 33 regularizer """no""" +810 33 optimizer """adadelta""" +810 33 training_loop """lcwa""" +810 33 evaluator """rankbased""" +810 34 dataset """kinships""" +810 34 model """transe""" +810 34 loss """crossentropy""" +810 34 regularizer """no""" +810 34 optimizer """adadelta""" +810 34 training_loop """lcwa""" +810 34 evaluator """rankbased""" +810 35 dataset """kinships""" +810 35 model """transe""" +810 35 loss """crossentropy""" +810 35 regularizer """no""" +810 35 optimizer """adadelta""" +810 35 training_loop """lcwa""" +810 35 evaluator """rankbased""" +810 36 dataset """kinships""" +810 36 model """transe""" +810 36 loss """crossentropy""" +810 36 regularizer """no""" +810 36 optimizer """adadelta""" +810 36 training_loop """lcwa""" +810 36 evaluator """rankbased""" +810 37 dataset """kinships""" +810 37 model """transe""" +810 37 loss """crossentropy""" +810 37 regularizer """no""" +810 37 optimizer """adadelta""" +810 37 training_loop """lcwa""" +810 37 evaluator """rankbased""" +810 38 dataset """kinships""" +810 38 model """transe""" +810 38 loss """crossentropy""" +810 38 regularizer """no""" +810 38 optimizer """adadelta""" +810 38 training_loop """lcwa""" +810 38 evaluator """rankbased""" +810 39 dataset """kinships""" +810 39 model """transe""" +810 39 loss """crossentropy""" +810 39 regularizer """no""" +810 39 optimizer """adadelta""" +810 39 training_loop """lcwa""" +810 39 evaluator """rankbased""" +810 40 dataset """kinships""" +810 40 model """transe""" +810 40 loss """crossentropy""" +810 40 regularizer """no""" +810 40 optimizer """adadelta""" +810 40 training_loop """lcwa""" +810 40 evaluator """rankbased""" +810 41 dataset """kinships""" +810 41 model """transe""" +810 41 loss """crossentropy""" +810 41 regularizer """no""" +810 41 optimizer """adadelta""" +810 41 training_loop """lcwa""" +810 41 evaluator """rankbased""" +810 42 dataset """kinships""" +810 42 model """transe""" +810 42 loss """crossentropy""" +810 42 regularizer """no""" +810 42 optimizer """adadelta""" +810 42 training_loop """lcwa""" +810 42 evaluator """rankbased""" +810 43 dataset """kinships""" +810 43 model """transe""" +810 43 loss """crossentropy""" +810 43 regularizer """no""" +810 43 optimizer """adadelta""" +810 43 training_loop """lcwa""" +810 43 evaluator """rankbased""" +810 44 dataset """kinships""" +810 44 model """transe""" +810 44 loss """crossentropy""" +810 44 regularizer """no""" +810 44 optimizer """adadelta""" +810 44 training_loop """lcwa""" +810 44 evaluator """rankbased""" +810 45 dataset """kinships""" +810 45 model """transe""" +810 45 loss """crossentropy""" +810 45 regularizer """no""" +810 45 optimizer """adadelta""" +810 45 training_loop """lcwa""" +810 45 evaluator """rankbased""" +810 46 dataset """kinships""" +810 46 model """transe""" +810 46 loss """crossentropy""" +810 46 regularizer """no""" +810 46 optimizer """adadelta""" +810 46 training_loop """lcwa""" +810 46 evaluator """rankbased""" +810 47 dataset """kinships""" +810 47 model """transe""" +810 47 loss """crossentropy""" +810 47 regularizer """no""" +810 47 optimizer """adadelta""" +810 47 training_loop """lcwa""" +810 47 evaluator """rankbased""" +810 48 dataset """kinships""" +810 48 model """transe""" +810 48 loss """crossentropy""" +810 48 regularizer """no""" +810 48 optimizer """adadelta""" +810 48 training_loop """lcwa""" +810 48 evaluator """rankbased""" +810 49 dataset """kinships""" +810 49 model """transe""" +810 49 loss """crossentropy""" +810 49 regularizer """no""" +810 49 optimizer """adadelta""" +810 49 training_loop """lcwa""" +810 49 evaluator """rankbased""" +810 50 dataset """kinships""" +810 50 model """transe""" +810 50 loss """crossentropy""" +810 50 regularizer """no""" +810 50 optimizer """adadelta""" +810 50 training_loop """lcwa""" +810 50 evaluator """rankbased""" +810 51 dataset """kinships""" +810 51 model """transe""" +810 51 loss """crossentropy""" +810 51 regularizer """no""" +810 51 optimizer """adadelta""" +810 51 training_loop """lcwa""" +810 51 evaluator """rankbased""" +810 52 dataset """kinships""" +810 52 model """transe""" +810 52 loss """crossentropy""" +810 52 regularizer """no""" +810 52 optimizer """adadelta""" +810 52 training_loop """lcwa""" +810 52 evaluator """rankbased""" +810 53 dataset """kinships""" +810 53 model """transe""" +810 53 loss """crossentropy""" +810 53 regularizer """no""" +810 53 optimizer """adadelta""" +810 53 training_loop """lcwa""" +810 53 evaluator """rankbased""" +810 54 dataset """kinships""" +810 54 model """transe""" +810 54 loss """crossentropy""" +810 54 regularizer """no""" +810 54 optimizer """adadelta""" +810 54 training_loop """lcwa""" +810 54 evaluator """rankbased""" +810 55 dataset """kinships""" +810 55 model """transe""" +810 55 loss """crossentropy""" +810 55 regularizer """no""" +810 55 optimizer """adadelta""" +810 55 training_loop """lcwa""" +810 55 evaluator """rankbased""" +810 56 dataset """kinships""" +810 56 model """transe""" +810 56 loss """crossentropy""" +810 56 regularizer """no""" +810 56 optimizer """adadelta""" +810 56 training_loop """lcwa""" +810 56 evaluator """rankbased""" +810 57 dataset """kinships""" +810 57 model """transe""" +810 57 loss """crossentropy""" +810 57 regularizer """no""" +810 57 optimizer """adadelta""" +810 57 training_loop """lcwa""" +810 57 evaluator """rankbased""" +810 58 dataset """kinships""" +810 58 model """transe""" +810 58 loss """crossentropy""" +810 58 regularizer """no""" +810 58 optimizer """adadelta""" +810 58 training_loop """lcwa""" +810 58 evaluator """rankbased""" +810 59 dataset """kinships""" +810 59 model """transe""" +810 59 loss """crossentropy""" +810 59 regularizer """no""" +810 59 optimizer """adadelta""" +810 59 training_loop """lcwa""" +810 59 evaluator """rankbased""" +810 60 dataset """kinships""" +810 60 model """transe""" +810 60 loss """crossentropy""" +810 60 regularizer """no""" +810 60 optimizer """adadelta""" +810 60 training_loop """lcwa""" +810 60 evaluator """rankbased""" +810 61 dataset """kinships""" +810 61 model """transe""" +810 61 loss """crossentropy""" +810 61 regularizer """no""" +810 61 optimizer """adadelta""" +810 61 training_loop """lcwa""" +810 61 evaluator """rankbased""" +810 62 dataset """kinships""" +810 62 model """transe""" +810 62 loss """crossentropy""" +810 62 regularizer """no""" +810 62 optimizer """adadelta""" +810 62 training_loop """lcwa""" +810 62 evaluator """rankbased""" +810 63 dataset """kinships""" +810 63 model """transe""" +810 63 loss """crossentropy""" +810 63 regularizer """no""" +810 63 optimizer """adadelta""" +810 63 training_loop """lcwa""" +810 63 evaluator """rankbased""" +810 64 dataset """kinships""" +810 64 model """transe""" +810 64 loss """crossentropy""" +810 64 regularizer """no""" +810 64 optimizer """adadelta""" +810 64 training_loop """lcwa""" +810 64 evaluator """rankbased""" +810 65 dataset """kinships""" +810 65 model """transe""" +810 65 loss """crossentropy""" +810 65 regularizer """no""" +810 65 optimizer """adadelta""" +810 65 training_loop """lcwa""" +810 65 evaluator """rankbased""" +810 66 dataset """kinships""" +810 66 model """transe""" +810 66 loss """crossentropy""" +810 66 regularizer """no""" +810 66 optimizer """adadelta""" +810 66 training_loop """lcwa""" +810 66 evaluator """rankbased""" +810 67 dataset """kinships""" +810 67 model """transe""" +810 67 loss """crossentropy""" +810 67 regularizer """no""" +810 67 optimizer """adadelta""" +810 67 training_loop """lcwa""" +810 67 evaluator """rankbased""" +810 68 dataset """kinships""" +810 68 model """transe""" +810 68 loss """crossentropy""" +810 68 regularizer """no""" +810 68 optimizer """adadelta""" +810 68 training_loop """lcwa""" +810 68 evaluator """rankbased""" +810 69 dataset """kinships""" +810 69 model """transe""" +810 69 loss """crossentropy""" +810 69 regularizer """no""" +810 69 optimizer """adadelta""" +810 69 training_loop """lcwa""" +810 69 evaluator """rankbased""" +810 70 dataset """kinships""" +810 70 model """transe""" +810 70 loss """crossentropy""" +810 70 regularizer """no""" +810 70 optimizer """adadelta""" +810 70 training_loop """lcwa""" +810 70 evaluator """rankbased""" +810 71 dataset """kinships""" +810 71 model """transe""" +810 71 loss """crossentropy""" +810 71 regularizer """no""" +810 71 optimizer """adadelta""" +810 71 training_loop """lcwa""" +810 71 evaluator """rankbased""" +810 72 dataset """kinships""" +810 72 model """transe""" +810 72 loss """crossentropy""" +810 72 regularizer """no""" +810 72 optimizer """adadelta""" +810 72 training_loop """lcwa""" +810 72 evaluator """rankbased""" +810 73 dataset """kinships""" +810 73 model """transe""" +810 73 loss """crossentropy""" +810 73 regularizer """no""" +810 73 optimizer """adadelta""" +810 73 training_loop """lcwa""" +810 73 evaluator """rankbased""" +810 74 dataset """kinships""" +810 74 model """transe""" +810 74 loss """crossentropy""" +810 74 regularizer """no""" +810 74 optimizer """adadelta""" +810 74 training_loop """lcwa""" +810 74 evaluator """rankbased""" +810 75 dataset """kinships""" +810 75 model """transe""" +810 75 loss """crossentropy""" +810 75 regularizer """no""" +810 75 optimizer """adadelta""" +810 75 training_loop """lcwa""" +810 75 evaluator """rankbased""" +810 76 dataset """kinships""" +810 76 model """transe""" +810 76 loss """crossentropy""" +810 76 regularizer """no""" +810 76 optimizer """adadelta""" +810 76 training_loop """lcwa""" +810 76 evaluator """rankbased""" +810 77 dataset """kinships""" +810 77 model """transe""" +810 77 loss """crossentropy""" +810 77 regularizer """no""" +810 77 optimizer """adadelta""" +810 77 training_loop """lcwa""" +810 77 evaluator """rankbased""" +810 78 dataset """kinships""" +810 78 model """transe""" +810 78 loss """crossentropy""" +810 78 regularizer """no""" +810 78 optimizer """adadelta""" +810 78 training_loop """lcwa""" +810 78 evaluator """rankbased""" +810 79 dataset """kinships""" +810 79 model """transe""" +810 79 loss """crossentropy""" +810 79 regularizer """no""" +810 79 optimizer """adadelta""" +810 79 training_loop """lcwa""" +810 79 evaluator """rankbased""" +810 80 dataset """kinships""" +810 80 model """transe""" +810 80 loss """crossentropy""" +810 80 regularizer """no""" +810 80 optimizer """adadelta""" +810 80 training_loop """lcwa""" +810 80 evaluator """rankbased""" +810 81 dataset """kinships""" +810 81 model """transe""" +810 81 loss """crossentropy""" +810 81 regularizer """no""" +810 81 optimizer """adadelta""" +810 81 training_loop """lcwa""" +810 81 evaluator """rankbased""" +810 82 dataset """kinships""" +810 82 model """transe""" +810 82 loss """crossentropy""" +810 82 regularizer """no""" +810 82 optimizer """adadelta""" +810 82 training_loop """lcwa""" +810 82 evaluator """rankbased""" +810 83 dataset """kinships""" +810 83 model """transe""" +810 83 loss """crossentropy""" +810 83 regularizer """no""" +810 83 optimizer """adadelta""" +810 83 training_loop """lcwa""" +810 83 evaluator """rankbased""" +810 84 dataset """kinships""" +810 84 model """transe""" +810 84 loss """crossentropy""" +810 84 regularizer """no""" +810 84 optimizer """adadelta""" +810 84 training_loop """lcwa""" +810 84 evaluator """rankbased""" +810 85 dataset """kinships""" +810 85 model """transe""" +810 85 loss """crossentropy""" +810 85 regularizer """no""" +810 85 optimizer """adadelta""" +810 85 training_loop """lcwa""" +810 85 evaluator """rankbased""" +810 86 dataset """kinships""" +810 86 model """transe""" +810 86 loss """crossentropy""" +810 86 regularizer """no""" +810 86 optimizer """adadelta""" +810 86 training_loop """lcwa""" +810 86 evaluator """rankbased""" +810 87 dataset """kinships""" +810 87 model """transe""" +810 87 loss """crossentropy""" +810 87 regularizer """no""" +810 87 optimizer """adadelta""" +810 87 training_loop """lcwa""" +810 87 evaluator """rankbased""" +810 88 dataset """kinships""" +810 88 model """transe""" +810 88 loss """crossentropy""" +810 88 regularizer """no""" +810 88 optimizer """adadelta""" +810 88 training_loop """lcwa""" +810 88 evaluator """rankbased""" +810 89 dataset """kinships""" +810 89 model """transe""" +810 89 loss """crossentropy""" +810 89 regularizer """no""" +810 89 optimizer """adadelta""" +810 89 training_loop """lcwa""" +810 89 evaluator """rankbased""" +810 90 dataset """kinships""" +810 90 model """transe""" +810 90 loss """crossentropy""" +810 90 regularizer """no""" +810 90 optimizer """adadelta""" +810 90 training_loop """lcwa""" +810 90 evaluator """rankbased""" +810 91 dataset """kinships""" +810 91 model """transe""" +810 91 loss """crossentropy""" +810 91 regularizer """no""" +810 91 optimizer """adadelta""" +810 91 training_loop """lcwa""" +810 91 evaluator """rankbased""" +810 92 dataset """kinships""" +810 92 model """transe""" +810 92 loss """crossentropy""" +810 92 regularizer """no""" +810 92 optimizer """adadelta""" +810 92 training_loop """lcwa""" +810 92 evaluator """rankbased""" +810 93 dataset """kinships""" +810 93 model """transe""" +810 93 loss """crossentropy""" +810 93 regularizer """no""" +810 93 optimizer """adadelta""" +810 93 training_loop """lcwa""" +810 93 evaluator """rankbased""" +810 94 dataset """kinships""" +810 94 model """transe""" +810 94 loss """crossentropy""" +810 94 regularizer """no""" +810 94 optimizer """adadelta""" +810 94 training_loop """lcwa""" +810 94 evaluator """rankbased""" +810 95 dataset """kinships""" +810 95 model """transe""" +810 95 loss """crossentropy""" +810 95 regularizer """no""" +810 95 optimizer """adadelta""" +810 95 training_loop """lcwa""" +810 95 evaluator """rankbased""" +810 96 dataset """kinships""" +810 96 model """transe""" +810 96 loss """crossentropy""" +810 96 regularizer """no""" +810 96 optimizer """adadelta""" +810 96 training_loop """lcwa""" +810 96 evaluator """rankbased""" +810 97 dataset """kinships""" +810 97 model """transe""" +810 97 loss """crossentropy""" +810 97 regularizer """no""" +810 97 optimizer """adadelta""" +810 97 training_loop """lcwa""" +810 97 evaluator """rankbased""" +810 98 dataset """kinships""" +810 98 model """transe""" +810 98 loss """crossentropy""" +810 98 regularizer """no""" +810 98 optimizer """adadelta""" +810 98 training_loop """lcwa""" +810 98 evaluator """rankbased""" +810 99 dataset """kinships""" +810 99 model """transe""" +810 99 loss """crossentropy""" +810 99 regularizer """no""" +810 99 optimizer """adadelta""" +810 99 training_loop """lcwa""" +810 99 evaluator """rankbased""" +810 100 dataset """kinships""" +810 100 model """transe""" +810 100 loss """crossentropy""" +810 100 regularizer """no""" +810 100 optimizer """adadelta""" +810 100 training_loop """lcwa""" +810 100 evaluator """rankbased""" +811 1 model.embedding_dim 0.0 +811 1 model.scoring_fct_norm 2.0 +811 1 negative_sampler.num_negs_per_pos 60.0 +811 1 training.batch_size 1.0 +811 2 model.embedding_dim 2.0 +811 2 model.scoring_fct_norm 1.0 +811 2 negative_sampler.num_negs_per_pos 7.0 +811 2 training.batch_size 1.0 +811 3 model.embedding_dim 1.0 +811 3 model.scoring_fct_norm 2.0 +811 3 negative_sampler.num_negs_per_pos 78.0 +811 3 training.batch_size 0.0 +811 4 model.embedding_dim 0.0 +811 4 model.scoring_fct_norm 2.0 +811 4 negative_sampler.num_negs_per_pos 11.0 +811 4 training.batch_size 2.0 +811 5 model.embedding_dim 0.0 +811 5 model.scoring_fct_norm 1.0 +811 5 negative_sampler.num_negs_per_pos 79.0 +811 5 training.batch_size 0.0 +811 6 model.embedding_dim 0.0 +811 6 model.scoring_fct_norm 1.0 +811 6 negative_sampler.num_negs_per_pos 11.0 +811 6 training.batch_size 2.0 +811 7 model.embedding_dim 2.0 +811 7 model.scoring_fct_norm 2.0 +811 7 negative_sampler.num_negs_per_pos 49.0 +811 7 training.batch_size 1.0 +811 8 model.embedding_dim 2.0 +811 8 model.scoring_fct_norm 1.0 +811 8 negative_sampler.num_negs_per_pos 73.0 +811 8 training.batch_size 1.0 +811 9 model.embedding_dim 0.0 +811 9 model.scoring_fct_norm 1.0 +811 9 negative_sampler.num_negs_per_pos 26.0 +811 9 training.batch_size 0.0 +811 10 model.embedding_dim 2.0 +811 10 model.scoring_fct_norm 1.0 +811 10 negative_sampler.num_negs_per_pos 84.0 +811 10 training.batch_size 1.0 +811 11 model.embedding_dim 0.0 +811 11 model.scoring_fct_norm 1.0 +811 11 negative_sampler.num_negs_per_pos 38.0 +811 11 training.batch_size 1.0 +811 12 model.embedding_dim 0.0 +811 12 model.scoring_fct_norm 2.0 +811 12 negative_sampler.num_negs_per_pos 44.0 +811 12 training.batch_size 1.0 +811 13 model.embedding_dim 0.0 +811 13 model.scoring_fct_norm 2.0 +811 13 negative_sampler.num_negs_per_pos 17.0 +811 13 training.batch_size 1.0 +811 14 model.embedding_dim 1.0 +811 14 model.scoring_fct_norm 2.0 +811 14 negative_sampler.num_negs_per_pos 40.0 +811 14 training.batch_size 1.0 +811 15 model.embedding_dim 2.0 +811 15 model.scoring_fct_norm 1.0 +811 15 negative_sampler.num_negs_per_pos 64.0 +811 15 training.batch_size 0.0 +811 16 model.embedding_dim 0.0 +811 16 model.scoring_fct_norm 2.0 +811 16 negative_sampler.num_negs_per_pos 39.0 +811 16 training.batch_size 1.0 +811 17 model.embedding_dim 2.0 +811 17 model.scoring_fct_norm 1.0 +811 17 negative_sampler.num_negs_per_pos 30.0 +811 17 training.batch_size 0.0 +811 18 model.embedding_dim 0.0 +811 18 model.scoring_fct_norm 1.0 +811 18 negative_sampler.num_negs_per_pos 1.0 +811 18 training.batch_size 0.0 +811 19 model.embedding_dim 2.0 +811 19 model.scoring_fct_norm 2.0 +811 19 negative_sampler.num_negs_per_pos 86.0 +811 19 training.batch_size 1.0 +811 20 model.embedding_dim 1.0 +811 20 model.scoring_fct_norm 1.0 +811 20 negative_sampler.num_negs_per_pos 25.0 +811 20 training.batch_size 0.0 +811 21 model.embedding_dim 1.0 +811 21 model.scoring_fct_norm 1.0 +811 21 negative_sampler.num_negs_per_pos 88.0 +811 21 training.batch_size 2.0 +811 22 model.embedding_dim 0.0 +811 22 model.scoring_fct_norm 2.0 +811 22 negative_sampler.num_negs_per_pos 30.0 +811 22 training.batch_size 2.0 +811 23 model.embedding_dim 1.0 +811 23 model.scoring_fct_norm 1.0 +811 23 negative_sampler.num_negs_per_pos 82.0 +811 23 training.batch_size 0.0 +811 24 model.embedding_dim 1.0 +811 24 model.scoring_fct_norm 1.0 +811 24 negative_sampler.num_negs_per_pos 63.0 +811 24 training.batch_size 2.0 +811 25 model.embedding_dim 2.0 +811 25 model.scoring_fct_norm 2.0 +811 25 negative_sampler.num_negs_per_pos 97.0 +811 25 training.batch_size 1.0 +811 26 model.embedding_dim 1.0 +811 26 model.scoring_fct_norm 2.0 +811 26 negative_sampler.num_negs_per_pos 75.0 +811 26 training.batch_size 1.0 +811 27 model.embedding_dim 0.0 +811 27 model.scoring_fct_norm 1.0 +811 27 negative_sampler.num_negs_per_pos 40.0 +811 27 training.batch_size 0.0 +811 28 model.embedding_dim 2.0 +811 28 model.scoring_fct_norm 2.0 +811 28 negative_sampler.num_negs_per_pos 62.0 +811 28 training.batch_size 1.0 +811 29 model.embedding_dim 0.0 +811 29 model.scoring_fct_norm 2.0 +811 29 negative_sampler.num_negs_per_pos 64.0 +811 29 training.batch_size 1.0 +811 30 model.embedding_dim 1.0 +811 30 model.scoring_fct_norm 1.0 +811 30 negative_sampler.num_negs_per_pos 66.0 +811 30 training.batch_size 1.0 +811 31 model.embedding_dim 2.0 +811 31 model.scoring_fct_norm 1.0 +811 31 negative_sampler.num_negs_per_pos 29.0 +811 31 training.batch_size 2.0 +811 32 model.embedding_dim 1.0 +811 32 model.scoring_fct_norm 1.0 +811 32 negative_sampler.num_negs_per_pos 21.0 +811 32 training.batch_size 2.0 +811 33 model.embedding_dim 2.0 +811 33 model.scoring_fct_norm 1.0 +811 33 negative_sampler.num_negs_per_pos 63.0 +811 33 training.batch_size 0.0 +811 34 model.embedding_dim 1.0 +811 34 model.scoring_fct_norm 1.0 +811 34 negative_sampler.num_negs_per_pos 14.0 +811 34 training.batch_size 2.0 +811 35 model.embedding_dim 2.0 +811 35 model.scoring_fct_norm 1.0 +811 35 negative_sampler.num_negs_per_pos 74.0 +811 35 training.batch_size 2.0 +811 36 model.embedding_dim 1.0 +811 36 model.scoring_fct_norm 1.0 +811 36 negative_sampler.num_negs_per_pos 2.0 +811 36 training.batch_size 1.0 +811 37 model.embedding_dim 0.0 +811 37 model.scoring_fct_norm 2.0 +811 37 negative_sampler.num_negs_per_pos 38.0 +811 37 training.batch_size 1.0 +811 38 model.embedding_dim 0.0 +811 38 model.scoring_fct_norm 1.0 +811 38 negative_sampler.num_negs_per_pos 85.0 +811 38 training.batch_size 0.0 +811 39 model.embedding_dim 0.0 +811 39 model.scoring_fct_norm 2.0 +811 39 negative_sampler.num_negs_per_pos 47.0 +811 39 training.batch_size 1.0 +811 40 model.embedding_dim 2.0 +811 40 model.scoring_fct_norm 2.0 +811 40 negative_sampler.num_negs_per_pos 33.0 +811 40 training.batch_size 1.0 +811 41 model.embedding_dim 0.0 +811 41 model.scoring_fct_norm 1.0 +811 41 negative_sampler.num_negs_per_pos 84.0 +811 41 training.batch_size 1.0 +811 42 model.embedding_dim 0.0 +811 42 model.scoring_fct_norm 2.0 +811 42 negative_sampler.num_negs_per_pos 21.0 +811 42 training.batch_size 0.0 +811 43 model.embedding_dim 0.0 +811 43 model.scoring_fct_norm 1.0 +811 43 negative_sampler.num_negs_per_pos 75.0 +811 43 training.batch_size 0.0 +811 44 model.embedding_dim 0.0 +811 44 model.scoring_fct_norm 1.0 +811 44 negative_sampler.num_negs_per_pos 84.0 +811 44 training.batch_size 1.0 +811 45 model.embedding_dim 2.0 +811 45 model.scoring_fct_norm 2.0 +811 45 negative_sampler.num_negs_per_pos 76.0 +811 45 training.batch_size 2.0 +811 46 model.embedding_dim 2.0 +811 46 model.scoring_fct_norm 1.0 +811 46 negative_sampler.num_negs_per_pos 14.0 +811 46 training.batch_size 0.0 +811 47 model.embedding_dim 2.0 +811 47 model.scoring_fct_norm 1.0 +811 47 negative_sampler.num_negs_per_pos 53.0 +811 47 training.batch_size 2.0 +811 48 model.embedding_dim 0.0 +811 48 model.scoring_fct_norm 2.0 +811 48 negative_sampler.num_negs_per_pos 94.0 +811 48 training.batch_size 1.0 +811 49 model.embedding_dim 2.0 +811 49 model.scoring_fct_norm 1.0 +811 49 negative_sampler.num_negs_per_pos 1.0 +811 49 training.batch_size 2.0 +811 50 model.embedding_dim 1.0 +811 50 model.scoring_fct_norm 2.0 +811 50 negative_sampler.num_negs_per_pos 93.0 +811 50 training.batch_size 0.0 +811 51 model.embedding_dim 0.0 +811 51 model.scoring_fct_norm 1.0 +811 51 negative_sampler.num_negs_per_pos 13.0 +811 51 training.batch_size 1.0 +811 52 model.embedding_dim 0.0 +811 52 model.scoring_fct_norm 1.0 +811 52 negative_sampler.num_negs_per_pos 85.0 +811 52 training.batch_size 2.0 +811 53 model.embedding_dim 2.0 +811 53 model.scoring_fct_norm 2.0 +811 53 negative_sampler.num_negs_per_pos 28.0 +811 53 training.batch_size 0.0 +811 54 model.embedding_dim 0.0 +811 54 model.scoring_fct_norm 1.0 +811 54 negative_sampler.num_negs_per_pos 25.0 +811 54 training.batch_size 1.0 +811 55 model.embedding_dim 2.0 +811 55 model.scoring_fct_norm 1.0 +811 55 negative_sampler.num_negs_per_pos 53.0 +811 55 training.batch_size 2.0 +811 56 model.embedding_dim 0.0 +811 56 model.scoring_fct_norm 1.0 +811 56 negative_sampler.num_negs_per_pos 25.0 +811 56 training.batch_size 0.0 +811 57 model.embedding_dim 0.0 +811 57 model.scoring_fct_norm 1.0 +811 57 negative_sampler.num_negs_per_pos 93.0 +811 57 training.batch_size 0.0 +811 58 model.embedding_dim 2.0 +811 58 model.scoring_fct_norm 2.0 +811 58 negative_sampler.num_negs_per_pos 64.0 +811 58 training.batch_size 2.0 +811 59 model.embedding_dim 0.0 +811 59 model.scoring_fct_norm 2.0 +811 59 negative_sampler.num_negs_per_pos 34.0 +811 59 training.batch_size 2.0 +811 60 model.embedding_dim 0.0 +811 60 model.scoring_fct_norm 1.0 +811 60 negative_sampler.num_negs_per_pos 41.0 +811 60 training.batch_size 2.0 +811 61 model.embedding_dim 1.0 +811 61 model.scoring_fct_norm 2.0 +811 61 negative_sampler.num_negs_per_pos 52.0 +811 61 training.batch_size 0.0 +811 62 model.embedding_dim 2.0 +811 62 model.scoring_fct_norm 1.0 +811 62 negative_sampler.num_negs_per_pos 68.0 +811 62 training.batch_size 1.0 +811 63 model.embedding_dim 0.0 +811 63 model.scoring_fct_norm 2.0 +811 63 negative_sampler.num_negs_per_pos 88.0 +811 63 training.batch_size 1.0 +811 64 model.embedding_dim 0.0 +811 64 model.scoring_fct_norm 2.0 +811 64 negative_sampler.num_negs_per_pos 37.0 +811 64 training.batch_size 1.0 +811 65 model.embedding_dim 0.0 +811 65 model.scoring_fct_norm 1.0 +811 65 negative_sampler.num_negs_per_pos 17.0 +811 65 training.batch_size 2.0 +811 66 model.embedding_dim 2.0 +811 66 model.scoring_fct_norm 1.0 +811 66 negative_sampler.num_negs_per_pos 90.0 +811 66 training.batch_size 2.0 +811 67 model.embedding_dim 0.0 +811 67 model.scoring_fct_norm 2.0 +811 67 negative_sampler.num_negs_per_pos 93.0 +811 67 training.batch_size 1.0 +811 68 model.embedding_dim 1.0 +811 68 model.scoring_fct_norm 1.0 +811 68 negative_sampler.num_negs_per_pos 28.0 +811 68 training.batch_size 0.0 +811 69 model.embedding_dim 1.0 +811 69 model.scoring_fct_norm 1.0 +811 69 negative_sampler.num_negs_per_pos 16.0 +811 69 training.batch_size 0.0 +811 70 model.embedding_dim 2.0 +811 70 model.scoring_fct_norm 1.0 +811 70 negative_sampler.num_negs_per_pos 13.0 +811 70 training.batch_size 2.0 +811 71 model.embedding_dim 1.0 +811 71 model.scoring_fct_norm 1.0 +811 71 negative_sampler.num_negs_per_pos 29.0 +811 71 training.batch_size 1.0 +811 72 model.embedding_dim 0.0 +811 72 model.scoring_fct_norm 2.0 +811 72 negative_sampler.num_negs_per_pos 96.0 +811 72 training.batch_size 1.0 +811 73 model.embedding_dim 0.0 +811 73 model.scoring_fct_norm 1.0 +811 73 negative_sampler.num_negs_per_pos 74.0 +811 73 training.batch_size 0.0 +811 74 model.embedding_dim 0.0 +811 74 model.scoring_fct_norm 1.0 +811 74 negative_sampler.num_negs_per_pos 68.0 +811 74 training.batch_size 1.0 +811 75 model.embedding_dim 1.0 +811 75 model.scoring_fct_norm 2.0 +811 75 negative_sampler.num_negs_per_pos 81.0 +811 75 training.batch_size 1.0 +811 76 model.embedding_dim 2.0 +811 76 model.scoring_fct_norm 2.0 +811 76 negative_sampler.num_negs_per_pos 5.0 +811 76 training.batch_size 0.0 +811 77 model.embedding_dim 2.0 +811 77 model.scoring_fct_norm 1.0 +811 77 negative_sampler.num_negs_per_pos 96.0 +811 77 training.batch_size 2.0 +811 78 model.embedding_dim 0.0 +811 78 model.scoring_fct_norm 1.0 +811 78 negative_sampler.num_negs_per_pos 90.0 +811 78 training.batch_size 1.0 +811 79 model.embedding_dim 2.0 +811 79 model.scoring_fct_norm 1.0 +811 79 negative_sampler.num_negs_per_pos 58.0 +811 79 training.batch_size 2.0 +811 80 model.embedding_dim 2.0 +811 80 model.scoring_fct_norm 1.0 +811 80 negative_sampler.num_negs_per_pos 80.0 +811 80 training.batch_size 1.0 +811 81 model.embedding_dim 0.0 +811 81 model.scoring_fct_norm 1.0 +811 81 negative_sampler.num_negs_per_pos 22.0 +811 81 training.batch_size 2.0 +811 82 model.embedding_dim 0.0 +811 82 model.scoring_fct_norm 1.0 +811 82 negative_sampler.num_negs_per_pos 12.0 +811 82 training.batch_size 2.0 +811 83 model.embedding_dim 0.0 +811 83 model.scoring_fct_norm 1.0 +811 83 negative_sampler.num_negs_per_pos 66.0 +811 83 training.batch_size 0.0 +811 84 model.embedding_dim 0.0 +811 84 model.scoring_fct_norm 2.0 +811 84 negative_sampler.num_negs_per_pos 63.0 +811 84 training.batch_size 0.0 +811 85 model.embedding_dim 1.0 +811 85 model.scoring_fct_norm 1.0 +811 85 negative_sampler.num_negs_per_pos 81.0 +811 85 training.batch_size 1.0 +811 86 model.embedding_dim 2.0 +811 86 model.scoring_fct_norm 1.0 +811 86 negative_sampler.num_negs_per_pos 24.0 +811 86 training.batch_size 1.0 +811 87 model.embedding_dim 0.0 +811 87 model.scoring_fct_norm 2.0 +811 87 negative_sampler.num_negs_per_pos 55.0 +811 87 training.batch_size 0.0 +811 88 model.embedding_dim 0.0 +811 88 model.scoring_fct_norm 2.0 +811 88 negative_sampler.num_negs_per_pos 88.0 +811 88 training.batch_size 2.0 +811 89 model.embedding_dim 1.0 +811 89 model.scoring_fct_norm 1.0 +811 89 negative_sampler.num_negs_per_pos 67.0 +811 89 training.batch_size 2.0 +811 90 model.embedding_dim 0.0 +811 90 model.scoring_fct_norm 2.0 +811 90 negative_sampler.num_negs_per_pos 93.0 +811 90 training.batch_size 0.0 +811 91 model.embedding_dim 2.0 +811 91 model.scoring_fct_norm 1.0 +811 91 negative_sampler.num_negs_per_pos 72.0 +811 91 training.batch_size 1.0 +811 92 model.embedding_dim 0.0 +811 92 model.scoring_fct_norm 2.0 +811 92 negative_sampler.num_negs_per_pos 68.0 +811 92 training.batch_size 2.0 +811 93 model.embedding_dim 0.0 +811 93 model.scoring_fct_norm 2.0 +811 93 negative_sampler.num_negs_per_pos 94.0 +811 93 training.batch_size 0.0 +811 94 model.embedding_dim 2.0 +811 94 model.scoring_fct_norm 2.0 +811 94 negative_sampler.num_negs_per_pos 81.0 +811 94 training.batch_size 2.0 +811 95 model.embedding_dim 1.0 +811 95 model.scoring_fct_norm 1.0 +811 95 negative_sampler.num_negs_per_pos 63.0 +811 95 training.batch_size 1.0 +811 96 model.embedding_dim 0.0 +811 96 model.scoring_fct_norm 1.0 +811 96 negative_sampler.num_negs_per_pos 42.0 +811 96 training.batch_size 1.0 +811 97 model.embedding_dim 0.0 +811 97 model.scoring_fct_norm 2.0 +811 97 negative_sampler.num_negs_per_pos 3.0 +811 97 training.batch_size 0.0 +811 98 model.embedding_dim 0.0 +811 98 model.scoring_fct_norm 2.0 +811 98 negative_sampler.num_negs_per_pos 64.0 +811 98 training.batch_size 1.0 +811 99 model.embedding_dim 2.0 +811 99 model.scoring_fct_norm 2.0 +811 99 negative_sampler.num_negs_per_pos 0.0 +811 99 training.batch_size 0.0 +811 100 model.embedding_dim 0.0 +811 100 model.scoring_fct_norm 1.0 +811 100 negative_sampler.num_negs_per_pos 37.0 +811 100 training.batch_size 1.0 +811 1 dataset """kinships""" +811 1 model """transe""" +811 1 loss """bceaftersigmoid""" +811 1 regularizer """no""" +811 1 optimizer """adadelta""" +811 1 training_loop """owa""" +811 1 negative_sampler """basic""" +811 1 evaluator """rankbased""" +811 2 dataset """kinships""" +811 2 model """transe""" +811 2 loss """bceaftersigmoid""" +811 2 regularizer """no""" +811 2 optimizer """adadelta""" +811 2 training_loop """owa""" +811 2 negative_sampler """basic""" +811 2 evaluator """rankbased""" +811 3 dataset """kinships""" +811 3 model """transe""" +811 3 loss """bceaftersigmoid""" +811 3 regularizer """no""" +811 3 optimizer """adadelta""" +811 3 training_loop """owa""" +811 3 negative_sampler """basic""" +811 3 evaluator """rankbased""" +811 4 dataset """kinships""" +811 4 model """transe""" +811 4 loss """bceaftersigmoid""" +811 4 regularizer """no""" +811 4 optimizer """adadelta""" +811 4 training_loop """owa""" +811 4 negative_sampler """basic""" +811 4 evaluator """rankbased""" +811 5 dataset """kinships""" +811 5 model """transe""" +811 5 loss """bceaftersigmoid""" +811 5 regularizer """no""" +811 5 optimizer """adadelta""" +811 5 training_loop """owa""" +811 5 negative_sampler """basic""" +811 5 evaluator """rankbased""" +811 6 dataset """kinships""" +811 6 model """transe""" +811 6 loss """bceaftersigmoid""" +811 6 regularizer """no""" +811 6 optimizer """adadelta""" +811 6 training_loop """owa""" +811 6 negative_sampler """basic""" +811 6 evaluator """rankbased""" +811 7 dataset """kinships""" +811 7 model """transe""" +811 7 loss """bceaftersigmoid""" +811 7 regularizer """no""" +811 7 optimizer """adadelta""" +811 7 training_loop """owa""" +811 7 negative_sampler """basic""" +811 7 evaluator """rankbased""" +811 8 dataset """kinships""" +811 8 model """transe""" +811 8 loss """bceaftersigmoid""" +811 8 regularizer """no""" +811 8 optimizer """adadelta""" +811 8 training_loop """owa""" +811 8 negative_sampler """basic""" +811 8 evaluator """rankbased""" +811 9 dataset """kinships""" +811 9 model """transe""" +811 9 loss """bceaftersigmoid""" +811 9 regularizer """no""" +811 9 optimizer """adadelta""" +811 9 training_loop """owa""" +811 9 negative_sampler """basic""" +811 9 evaluator """rankbased""" +811 10 dataset """kinships""" +811 10 model """transe""" +811 10 loss """bceaftersigmoid""" +811 10 regularizer """no""" +811 10 optimizer """adadelta""" +811 10 training_loop """owa""" +811 10 negative_sampler """basic""" +811 10 evaluator """rankbased""" +811 11 dataset """kinships""" +811 11 model """transe""" +811 11 loss """bceaftersigmoid""" +811 11 regularizer """no""" +811 11 optimizer """adadelta""" +811 11 training_loop """owa""" +811 11 negative_sampler """basic""" +811 11 evaluator """rankbased""" +811 12 dataset """kinships""" +811 12 model """transe""" +811 12 loss """bceaftersigmoid""" +811 12 regularizer """no""" +811 12 optimizer """adadelta""" +811 12 training_loop """owa""" +811 12 negative_sampler """basic""" +811 12 evaluator """rankbased""" +811 13 dataset """kinships""" +811 13 model """transe""" +811 13 loss """bceaftersigmoid""" +811 13 regularizer """no""" +811 13 optimizer """adadelta""" +811 13 training_loop """owa""" +811 13 negative_sampler """basic""" +811 13 evaluator """rankbased""" +811 14 dataset """kinships""" +811 14 model """transe""" +811 14 loss """bceaftersigmoid""" +811 14 regularizer """no""" +811 14 optimizer """adadelta""" +811 14 training_loop """owa""" +811 14 negative_sampler """basic""" +811 14 evaluator """rankbased""" +811 15 dataset """kinships""" +811 15 model """transe""" +811 15 loss """bceaftersigmoid""" +811 15 regularizer """no""" +811 15 optimizer """adadelta""" +811 15 training_loop """owa""" +811 15 negative_sampler """basic""" +811 15 evaluator """rankbased""" +811 16 dataset """kinships""" +811 16 model """transe""" +811 16 loss """bceaftersigmoid""" +811 16 regularizer """no""" +811 16 optimizer """adadelta""" +811 16 training_loop """owa""" +811 16 negative_sampler """basic""" +811 16 evaluator """rankbased""" +811 17 dataset """kinships""" +811 17 model """transe""" +811 17 loss """bceaftersigmoid""" +811 17 regularizer """no""" +811 17 optimizer """adadelta""" +811 17 training_loop """owa""" +811 17 negative_sampler """basic""" +811 17 evaluator """rankbased""" +811 18 dataset """kinships""" +811 18 model """transe""" +811 18 loss """bceaftersigmoid""" +811 18 regularizer """no""" +811 18 optimizer """adadelta""" +811 18 training_loop """owa""" +811 18 negative_sampler """basic""" +811 18 evaluator """rankbased""" +811 19 dataset """kinships""" +811 19 model """transe""" +811 19 loss """bceaftersigmoid""" +811 19 regularizer """no""" +811 19 optimizer """adadelta""" +811 19 training_loop """owa""" +811 19 negative_sampler """basic""" +811 19 evaluator """rankbased""" +811 20 dataset """kinships""" +811 20 model """transe""" +811 20 loss """bceaftersigmoid""" +811 20 regularizer """no""" +811 20 optimizer """adadelta""" +811 20 training_loop """owa""" +811 20 negative_sampler """basic""" +811 20 evaluator """rankbased""" +811 21 dataset """kinships""" +811 21 model """transe""" +811 21 loss """bceaftersigmoid""" +811 21 regularizer """no""" +811 21 optimizer """adadelta""" +811 21 training_loop """owa""" +811 21 negative_sampler """basic""" +811 21 evaluator """rankbased""" +811 22 dataset """kinships""" +811 22 model """transe""" +811 22 loss """bceaftersigmoid""" +811 22 regularizer """no""" +811 22 optimizer """adadelta""" +811 22 training_loop """owa""" +811 22 negative_sampler """basic""" +811 22 evaluator """rankbased""" +811 23 dataset """kinships""" +811 23 model """transe""" +811 23 loss """bceaftersigmoid""" +811 23 regularizer """no""" +811 23 optimizer """adadelta""" +811 23 training_loop """owa""" +811 23 negative_sampler """basic""" +811 23 evaluator """rankbased""" +811 24 dataset """kinships""" +811 24 model """transe""" +811 24 loss """bceaftersigmoid""" +811 24 regularizer """no""" +811 24 optimizer """adadelta""" +811 24 training_loop """owa""" +811 24 negative_sampler """basic""" +811 24 evaluator """rankbased""" +811 25 dataset """kinships""" +811 25 model """transe""" +811 25 loss """bceaftersigmoid""" +811 25 regularizer """no""" +811 25 optimizer """adadelta""" +811 25 training_loop """owa""" +811 25 negative_sampler """basic""" +811 25 evaluator """rankbased""" +811 26 dataset """kinships""" +811 26 model """transe""" +811 26 loss """bceaftersigmoid""" +811 26 regularizer """no""" +811 26 optimizer """adadelta""" +811 26 training_loop """owa""" +811 26 negative_sampler """basic""" +811 26 evaluator """rankbased""" +811 27 dataset """kinships""" +811 27 model """transe""" +811 27 loss """bceaftersigmoid""" +811 27 regularizer """no""" +811 27 optimizer """adadelta""" +811 27 training_loop """owa""" +811 27 negative_sampler """basic""" +811 27 evaluator """rankbased""" +811 28 dataset """kinships""" +811 28 model """transe""" +811 28 loss """bceaftersigmoid""" +811 28 regularizer """no""" +811 28 optimizer """adadelta""" +811 28 training_loop """owa""" +811 28 negative_sampler """basic""" +811 28 evaluator """rankbased""" +811 29 dataset """kinships""" +811 29 model """transe""" +811 29 loss """bceaftersigmoid""" +811 29 regularizer """no""" +811 29 optimizer """adadelta""" +811 29 training_loop """owa""" +811 29 negative_sampler """basic""" +811 29 evaluator """rankbased""" +811 30 dataset """kinships""" +811 30 model """transe""" +811 30 loss """bceaftersigmoid""" +811 30 regularizer """no""" +811 30 optimizer """adadelta""" +811 30 training_loop """owa""" +811 30 negative_sampler """basic""" +811 30 evaluator """rankbased""" +811 31 dataset """kinships""" +811 31 model """transe""" +811 31 loss """bceaftersigmoid""" +811 31 regularizer """no""" +811 31 optimizer """adadelta""" +811 31 training_loop """owa""" +811 31 negative_sampler """basic""" +811 31 evaluator """rankbased""" +811 32 dataset """kinships""" +811 32 model """transe""" +811 32 loss """bceaftersigmoid""" +811 32 regularizer """no""" +811 32 optimizer """adadelta""" +811 32 training_loop """owa""" +811 32 negative_sampler """basic""" +811 32 evaluator """rankbased""" +811 33 dataset """kinships""" +811 33 model """transe""" +811 33 loss """bceaftersigmoid""" +811 33 regularizer """no""" +811 33 optimizer """adadelta""" +811 33 training_loop """owa""" +811 33 negative_sampler """basic""" +811 33 evaluator """rankbased""" +811 34 dataset """kinships""" +811 34 model """transe""" +811 34 loss """bceaftersigmoid""" +811 34 regularizer """no""" +811 34 optimizer """adadelta""" +811 34 training_loop """owa""" +811 34 negative_sampler """basic""" +811 34 evaluator """rankbased""" +811 35 dataset """kinships""" +811 35 model """transe""" +811 35 loss """bceaftersigmoid""" +811 35 regularizer """no""" +811 35 optimizer """adadelta""" +811 35 training_loop """owa""" +811 35 negative_sampler """basic""" +811 35 evaluator """rankbased""" +811 36 dataset """kinships""" +811 36 model """transe""" +811 36 loss """bceaftersigmoid""" +811 36 regularizer """no""" +811 36 optimizer """adadelta""" +811 36 training_loop """owa""" +811 36 negative_sampler """basic""" +811 36 evaluator """rankbased""" +811 37 dataset """kinships""" +811 37 model """transe""" +811 37 loss """bceaftersigmoid""" +811 37 regularizer """no""" +811 37 optimizer """adadelta""" +811 37 training_loop """owa""" +811 37 negative_sampler """basic""" +811 37 evaluator """rankbased""" +811 38 dataset """kinships""" +811 38 model """transe""" +811 38 loss """bceaftersigmoid""" +811 38 regularizer """no""" +811 38 optimizer """adadelta""" +811 38 training_loop """owa""" +811 38 negative_sampler """basic""" +811 38 evaluator """rankbased""" +811 39 dataset """kinships""" +811 39 model """transe""" +811 39 loss """bceaftersigmoid""" +811 39 regularizer """no""" +811 39 optimizer """adadelta""" +811 39 training_loop """owa""" +811 39 negative_sampler """basic""" +811 39 evaluator """rankbased""" +811 40 dataset """kinships""" +811 40 model """transe""" +811 40 loss """bceaftersigmoid""" +811 40 regularizer """no""" +811 40 optimizer """adadelta""" +811 40 training_loop """owa""" +811 40 negative_sampler """basic""" +811 40 evaluator """rankbased""" +811 41 dataset """kinships""" +811 41 model """transe""" +811 41 loss """bceaftersigmoid""" +811 41 regularizer """no""" +811 41 optimizer """adadelta""" +811 41 training_loop """owa""" +811 41 negative_sampler """basic""" +811 41 evaluator """rankbased""" +811 42 dataset """kinships""" +811 42 model """transe""" +811 42 loss """bceaftersigmoid""" +811 42 regularizer """no""" +811 42 optimizer """adadelta""" +811 42 training_loop """owa""" +811 42 negative_sampler """basic""" +811 42 evaluator """rankbased""" +811 43 dataset """kinships""" +811 43 model """transe""" +811 43 loss """bceaftersigmoid""" +811 43 regularizer """no""" +811 43 optimizer """adadelta""" +811 43 training_loop """owa""" +811 43 negative_sampler """basic""" +811 43 evaluator """rankbased""" +811 44 dataset """kinships""" +811 44 model """transe""" +811 44 loss """bceaftersigmoid""" +811 44 regularizer """no""" +811 44 optimizer """adadelta""" +811 44 training_loop """owa""" +811 44 negative_sampler """basic""" +811 44 evaluator """rankbased""" +811 45 dataset """kinships""" +811 45 model """transe""" +811 45 loss """bceaftersigmoid""" +811 45 regularizer """no""" +811 45 optimizer """adadelta""" +811 45 training_loop """owa""" +811 45 negative_sampler """basic""" +811 45 evaluator """rankbased""" +811 46 dataset """kinships""" +811 46 model """transe""" +811 46 loss """bceaftersigmoid""" +811 46 regularizer """no""" +811 46 optimizer """adadelta""" +811 46 training_loop """owa""" +811 46 negative_sampler """basic""" +811 46 evaluator """rankbased""" +811 47 dataset """kinships""" +811 47 model """transe""" +811 47 loss """bceaftersigmoid""" +811 47 regularizer """no""" +811 47 optimizer """adadelta""" +811 47 training_loop """owa""" +811 47 negative_sampler """basic""" +811 47 evaluator """rankbased""" +811 48 dataset """kinships""" +811 48 model """transe""" +811 48 loss """bceaftersigmoid""" +811 48 regularizer """no""" +811 48 optimizer """adadelta""" +811 48 training_loop """owa""" +811 48 negative_sampler """basic""" +811 48 evaluator """rankbased""" +811 49 dataset """kinships""" +811 49 model """transe""" +811 49 loss """bceaftersigmoid""" +811 49 regularizer """no""" +811 49 optimizer """adadelta""" +811 49 training_loop """owa""" +811 49 negative_sampler """basic""" +811 49 evaluator """rankbased""" +811 50 dataset """kinships""" +811 50 model """transe""" +811 50 loss """bceaftersigmoid""" +811 50 regularizer """no""" +811 50 optimizer """adadelta""" +811 50 training_loop """owa""" +811 50 negative_sampler """basic""" +811 50 evaluator """rankbased""" +811 51 dataset """kinships""" +811 51 model """transe""" +811 51 loss """bceaftersigmoid""" +811 51 regularizer """no""" +811 51 optimizer """adadelta""" +811 51 training_loop """owa""" +811 51 negative_sampler """basic""" +811 51 evaluator """rankbased""" +811 52 dataset """kinships""" +811 52 model """transe""" +811 52 loss """bceaftersigmoid""" +811 52 regularizer """no""" +811 52 optimizer """adadelta""" +811 52 training_loop """owa""" +811 52 negative_sampler """basic""" +811 52 evaluator """rankbased""" +811 53 dataset """kinships""" +811 53 model """transe""" +811 53 loss """bceaftersigmoid""" +811 53 regularizer """no""" +811 53 optimizer """adadelta""" +811 53 training_loop """owa""" +811 53 negative_sampler """basic""" +811 53 evaluator """rankbased""" +811 54 dataset """kinships""" +811 54 model """transe""" +811 54 loss """bceaftersigmoid""" +811 54 regularizer """no""" +811 54 optimizer """adadelta""" +811 54 training_loop """owa""" +811 54 negative_sampler """basic""" +811 54 evaluator """rankbased""" +811 55 dataset """kinships""" +811 55 model """transe""" +811 55 loss """bceaftersigmoid""" +811 55 regularizer """no""" +811 55 optimizer """adadelta""" +811 55 training_loop """owa""" +811 55 negative_sampler """basic""" +811 55 evaluator """rankbased""" +811 56 dataset """kinships""" +811 56 model """transe""" +811 56 loss """bceaftersigmoid""" +811 56 regularizer """no""" +811 56 optimizer """adadelta""" +811 56 training_loop """owa""" +811 56 negative_sampler """basic""" +811 56 evaluator """rankbased""" +811 57 dataset """kinships""" +811 57 model """transe""" +811 57 loss """bceaftersigmoid""" +811 57 regularizer """no""" +811 57 optimizer """adadelta""" +811 57 training_loop """owa""" +811 57 negative_sampler """basic""" +811 57 evaluator """rankbased""" +811 58 dataset """kinships""" +811 58 model """transe""" +811 58 loss """bceaftersigmoid""" +811 58 regularizer """no""" +811 58 optimizer """adadelta""" +811 58 training_loop """owa""" +811 58 negative_sampler """basic""" +811 58 evaluator """rankbased""" +811 59 dataset """kinships""" +811 59 model """transe""" +811 59 loss """bceaftersigmoid""" +811 59 regularizer """no""" +811 59 optimizer """adadelta""" +811 59 training_loop """owa""" +811 59 negative_sampler """basic""" +811 59 evaluator """rankbased""" +811 60 dataset """kinships""" +811 60 model """transe""" +811 60 loss """bceaftersigmoid""" +811 60 regularizer """no""" +811 60 optimizer """adadelta""" +811 60 training_loop """owa""" +811 60 negative_sampler """basic""" +811 60 evaluator """rankbased""" +811 61 dataset """kinships""" +811 61 model """transe""" +811 61 loss """bceaftersigmoid""" +811 61 regularizer """no""" +811 61 optimizer """adadelta""" +811 61 training_loop """owa""" +811 61 negative_sampler """basic""" +811 61 evaluator """rankbased""" +811 62 dataset """kinships""" +811 62 model """transe""" +811 62 loss """bceaftersigmoid""" +811 62 regularizer """no""" +811 62 optimizer """adadelta""" +811 62 training_loop """owa""" +811 62 negative_sampler """basic""" +811 62 evaluator """rankbased""" +811 63 dataset """kinships""" +811 63 model """transe""" +811 63 loss """bceaftersigmoid""" +811 63 regularizer """no""" +811 63 optimizer """adadelta""" +811 63 training_loop """owa""" +811 63 negative_sampler """basic""" +811 63 evaluator """rankbased""" +811 64 dataset """kinships""" +811 64 model """transe""" +811 64 loss """bceaftersigmoid""" +811 64 regularizer """no""" +811 64 optimizer """adadelta""" +811 64 training_loop """owa""" +811 64 negative_sampler """basic""" +811 64 evaluator """rankbased""" +811 65 dataset """kinships""" +811 65 model """transe""" +811 65 loss """bceaftersigmoid""" +811 65 regularizer """no""" +811 65 optimizer """adadelta""" +811 65 training_loop """owa""" +811 65 negative_sampler """basic""" +811 65 evaluator """rankbased""" +811 66 dataset """kinships""" +811 66 model """transe""" +811 66 loss """bceaftersigmoid""" +811 66 regularizer """no""" +811 66 optimizer """adadelta""" +811 66 training_loop """owa""" +811 66 negative_sampler """basic""" +811 66 evaluator """rankbased""" +811 67 dataset """kinships""" +811 67 model """transe""" +811 67 loss """bceaftersigmoid""" +811 67 regularizer """no""" +811 67 optimizer """adadelta""" +811 67 training_loop """owa""" +811 67 negative_sampler """basic""" +811 67 evaluator """rankbased""" +811 68 dataset """kinships""" +811 68 model """transe""" +811 68 loss """bceaftersigmoid""" +811 68 regularizer """no""" +811 68 optimizer """adadelta""" +811 68 training_loop """owa""" +811 68 negative_sampler """basic""" +811 68 evaluator """rankbased""" +811 69 dataset """kinships""" +811 69 model """transe""" +811 69 loss """bceaftersigmoid""" +811 69 regularizer """no""" +811 69 optimizer """adadelta""" +811 69 training_loop """owa""" +811 69 negative_sampler """basic""" +811 69 evaluator """rankbased""" +811 70 dataset """kinships""" +811 70 model """transe""" +811 70 loss """bceaftersigmoid""" +811 70 regularizer """no""" +811 70 optimizer """adadelta""" +811 70 training_loop """owa""" +811 70 negative_sampler """basic""" +811 70 evaluator """rankbased""" +811 71 dataset """kinships""" +811 71 model """transe""" +811 71 loss """bceaftersigmoid""" +811 71 regularizer """no""" +811 71 optimizer """adadelta""" +811 71 training_loop """owa""" +811 71 negative_sampler """basic""" +811 71 evaluator """rankbased""" +811 72 dataset """kinships""" +811 72 model """transe""" +811 72 loss """bceaftersigmoid""" +811 72 regularizer """no""" +811 72 optimizer """adadelta""" +811 72 training_loop """owa""" +811 72 negative_sampler """basic""" +811 72 evaluator """rankbased""" +811 73 dataset """kinships""" +811 73 model """transe""" +811 73 loss """bceaftersigmoid""" +811 73 regularizer """no""" +811 73 optimizer """adadelta""" +811 73 training_loop """owa""" +811 73 negative_sampler """basic""" +811 73 evaluator """rankbased""" +811 74 dataset """kinships""" +811 74 model """transe""" +811 74 loss """bceaftersigmoid""" +811 74 regularizer """no""" +811 74 optimizer """adadelta""" +811 74 training_loop """owa""" +811 74 negative_sampler """basic""" +811 74 evaluator """rankbased""" +811 75 dataset """kinships""" +811 75 model """transe""" +811 75 loss """bceaftersigmoid""" +811 75 regularizer """no""" +811 75 optimizer """adadelta""" +811 75 training_loop """owa""" +811 75 negative_sampler """basic""" +811 75 evaluator """rankbased""" +811 76 dataset """kinships""" +811 76 model """transe""" +811 76 loss """bceaftersigmoid""" +811 76 regularizer """no""" +811 76 optimizer """adadelta""" +811 76 training_loop """owa""" +811 76 negative_sampler """basic""" +811 76 evaluator """rankbased""" +811 77 dataset """kinships""" +811 77 model """transe""" +811 77 loss """bceaftersigmoid""" +811 77 regularizer """no""" +811 77 optimizer """adadelta""" +811 77 training_loop """owa""" +811 77 negative_sampler """basic""" +811 77 evaluator """rankbased""" +811 78 dataset """kinships""" +811 78 model """transe""" +811 78 loss """bceaftersigmoid""" +811 78 regularizer """no""" +811 78 optimizer """adadelta""" +811 78 training_loop """owa""" +811 78 negative_sampler """basic""" +811 78 evaluator """rankbased""" +811 79 dataset """kinships""" +811 79 model """transe""" +811 79 loss """bceaftersigmoid""" +811 79 regularizer """no""" +811 79 optimizer """adadelta""" +811 79 training_loop """owa""" +811 79 negative_sampler """basic""" +811 79 evaluator """rankbased""" +811 80 dataset """kinships""" +811 80 model """transe""" +811 80 loss """bceaftersigmoid""" +811 80 regularizer """no""" +811 80 optimizer """adadelta""" +811 80 training_loop """owa""" +811 80 negative_sampler """basic""" +811 80 evaluator """rankbased""" +811 81 dataset """kinships""" +811 81 model """transe""" +811 81 loss """bceaftersigmoid""" +811 81 regularizer """no""" +811 81 optimizer """adadelta""" +811 81 training_loop """owa""" +811 81 negative_sampler """basic""" +811 81 evaluator """rankbased""" +811 82 dataset """kinships""" +811 82 model """transe""" +811 82 loss """bceaftersigmoid""" +811 82 regularizer """no""" +811 82 optimizer """adadelta""" +811 82 training_loop """owa""" +811 82 negative_sampler """basic""" +811 82 evaluator """rankbased""" +811 83 dataset """kinships""" +811 83 model """transe""" +811 83 loss """bceaftersigmoid""" +811 83 regularizer """no""" +811 83 optimizer """adadelta""" +811 83 training_loop """owa""" +811 83 negative_sampler """basic""" +811 83 evaluator """rankbased""" +811 84 dataset """kinships""" +811 84 model """transe""" +811 84 loss """bceaftersigmoid""" +811 84 regularizer """no""" +811 84 optimizer """adadelta""" +811 84 training_loop """owa""" +811 84 negative_sampler """basic""" +811 84 evaluator """rankbased""" +811 85 dataset """kinships""" +811 85 model """transe""" +811 85 loss """bceaftersigmoid""" +811 85 regularizer """no""" +811 85 optimizer """adadelta""" +811 85 training_loop """owa""" +811 85 negative_sampler """basic""" +811 85 evaluator """rankbased""" +811 86 dataset """kinships""" +811 86 model """transe""" +811 86 loss """bceaftersigmoid""" +811 86 regularizer """no""" +811 86 optimizer """adadelta""" +811 86 training_loop """owa""" +811 86 negative_sampler """basic""" +811 86 evaluator """rankbased""" +811 87 dataset """kinships""" +811 87 model """transe""" +811 87 loss """bceaftersigmoid""" +811 87 regularizer """no""" +811 87 optimizer """adadelta""" +811 87 training_loop """owa""" +811 87 negative_sampler """basic""" +811 87 evaluator """rankbased""" +811 88 dataset """kinships""" +811 88 model """transe""" +811 88 loss """bceaftersigmoid""" +811 88 regularizer """no""" +811 88 optimizer """adadelta""" +811 88 training_loop """owa""" +811 88 negative_sampler """basic""" +811 88 evaluator """rankbased""" +811 89 dataset """kinships""" +811 89 model """transe""" +811 89 loss """bceaftersigmoid""" +811 89 regularizer """no""" +811 89 optimizer """adadelta""" +811 89 training_loop """owa""" +811 89 negative_sampler """basic""" +811 89 evaluator """rankbased""" +811 90 dataset """kinships""" +811 90 model """transe""" +811 90 loss """bceaftersigmoid""" +811 90 regularizer """no""" +811 90 optimizer """adadelta""" +811 90 training_loop """owa""" +811 90 negative_sampler """basic""" +811 90 evaluator """rankbased""" +811 91 dataset """kinships""" +811 91 model """transe""" +811 91 loss """bceaftersigmoid""" +811 91 regularizer """no""" +811 91 optimizer """adadelta""" +811 91 training_loop """owa""" +811 91 negative_sampler """basic""" +811 91 evaluator """rankbased""" +811 92 dataset """kinships""" +811 92 model """transe""" +811 92 loss """bceaftersigmoid""" +811 92 regularizer """no""" +811 92 optimizer """adadelta""" +811 92 training_loop """owa""" +811 92 negative_sampler """basic""" +811 92 evaluator """rankbased""" +811 93 dataset """kinships""" +811 93 model """transe""" +811 93 loss """bceaftersigmoid""" +811 93 regularizer """no""" +811 93 optimizer """adadelta""" +811 93 training_loop """owa""" +811 93 negative_sampler """basic""" +811 93 evaluator """rankbased""" +811 94 dataset """kinships""" +811 94 model """transe""" +811 94 loss """bceaftersigmoid""" +811 94 regularizer """no""" +811 94 optimizer """adadelta""" +811 94 training_loop """owa""" +811 94 negative_sampler """basic""" +811 94 evaluator """rankbased""" +811 95 dataset """kinships""" +811 95 model """transe""" +811 95 loss """bceaftersigmoid""" +811 95 regularizer """no""" +811 95 optimizer """adadelta""" +811 95 training_loop """owa""" +811 95 negative_sampler """basic""" +811 95 evaluator """rankbased""" +811 96 dataset """kinships""" +811 96 model """transe""" +811 96 loss """bceaftersigmoid""" +811 96 regularizer """no""" +811 96 optimizer """adadelta""" +811 96 training_loop """owa""" +811 96 negative_sampler """basic""" +811 96 evaluator """rankbased""" +811 97 dataset """kinships""" +811 97 model """transe""" +811 97 loss """bceaftersigmoid""" +811 97 regularizer """no""" +811 97 optimizer """adadelta""" +811 97 training_loop """owa""" +811 97 negative_sampler """basic""" +811 97 evaluator """rankbased""" +811 98 dataset """kinships""" +811 98 model """transe""" +811 98 loss """bceaftersigmoid""" +811 98 regularizer """no""" +811 98 optimizer """adadelta""" +811 98 training_loop """owa""" +811 98 negative_sampler """basic""" +811 98 evaluator """rankbased""" +811 99 dataset """kinships""" +811 99 model """transe""" +811 99 loss """bceaftersigmoid""" +811 99 regularizer """no""" +811 99 optimizer """adadelta""" +811 99 training_loop """owa""" +811 99 negative_sampler """basic""" +811 99 evaluator """rankbased""" +811 100 dataset """kinships""" +811 100 model """transe""" +811 100 loss """bceaftersigmoid""" +811 100 regularizer """no""" +811 100 optimizer """adadelta""" +811 100 training_loop """owa""" +811 100 negative_sampler """basic""" +811 100 evaluator """rankbased""" +812 1 model.embedding_dim 0.0 +812 1 model.scoring_fct_norm 1.0 +812 1 negative_sampler.num_negs_per_pos 7.0 +812 1 training.batch_size 0.0 +812 2 model.embedding_dim 1.0 +812 2 model.scoring_fct_norm 1.0 +812 2 negative_sampler.num_negs_per_pos 11.0 +812 2 training.batch_size 0.0 +812 3 model.embedding_dim 1.0 +812 3 model.scoring_fct_norm 2.0 +812 3 negative_sampler.num_negs_per_pos 36.0 +812 3 training.batch_size 2.0 +812 4 model.embedding_dim 2.0 +812 4 model.scoring_fct_norm 1.0 +812 4 negative_sampler.num_negs_per_pos 60.0 +812 4 training.batch_size 0.0 +812 5 model.embedding_dim 0.0 +812 5 model.scoring_fct_norm 1.0 +812 5 negative_sampler.num_negs_per_pos 26.0 +812 5 training.batch_size 1.0 +812 6 model.embedding_dim 1.0 +812 6 model.scoring_fct_norm 2.0 +812 6 negative_sampler.num_negs_per_pos 91.0 +812 6 training.batch_size 0.0 +812 7 model.embedding_dim 2.0 +812 7 model.scoring_fct_norm 2.0 +812 7 negative_sampler.num_negs_per_pos 86.0 +812 7 training.batch_size 2.0 +812 8 model.embedding_dim 0.0 +812 8 model.scoring_fct_norm 2.0 +812 8 negative_sampler.num_negs_per_pos 75.0 +812 8 training.batch_size 2.0 +812 9 model.embedding_dim 0.0 +812 9 model.scoring_fct_norm 2.0 +812 9 negative_sampler.num_negs_per_pos 70.0 +812 9 training.batch_size 0.0 +812 10 model.embedding_dim 2.0 +812 10 model.scoring_fct_norm 1.0 +812 10 negative_sampler.num_negs_per_pos 79.0 +812 10 training.batch_size 1.0 +812 11 model.embedding_dim 0.0 +812 11 model.scoring_fct_norm 2.0 +812 11 negative_sampler.num_negs_per_pos 39.0 +812 11 training.batch_size 1.0 +812 12 model.embedding_dim 1.0 +812 12 model.scoring_fct_norm 2.0 +812 12 negative_sampler.num_negs_per_pos 44.0 +812 12 training.batch_size 1.0 +812 13 model.embedding_dim 1.0 +812 13 model.scoring_fct_norm 2.0 +812 13 negative_sampler.num_negs_per_pos 48.0 +812 13 training.batch_size 0.0 +812 14 model.embedding_dim 1.0 +812 14 model.scoring_fct_norm 2.0 +812 14 negative_sampler.num_negs_per_pos 46.0 +812 14 training.batch_size 2.0 +812 15 model.embedding_dim 2.0 +812 15 model.scoring_fct_norm 2.0 +812 15 negative_sampler.num_negs_per_pos 7.0 +812 15 training.batch_size 1.0 +812 16 model.embedding_dim 2.0 +812 16 model.scoring_fct_norm 2.0 +812 16 negative_sampler.num_negs_per_pos 60.0 +812 16 training.batch_size 1.0 +812 17 model.embedding_dim 0.0 +812 17 model.scoring_fct_norm 1.0 +812 17 negative_sampler.num_negs_per_pos 25.0 +812 17 training.batch_size 1.0 +812 18 model.embedding_dim 2.0 +812 18 model.scoring_fct_norm 1.0 +812 18 negative_sampler.num_negs_per_pos 75.0 +812 18 training.batch_size 2.0 +812 19 model.embedding_dim 1.0 +812 19 model.scoring_fct_norm 1.0 +812 19 negative_sampler.num_negs_per_pos 78.0 +812 19 training.batch_size 0.0 +812 20 model.embedding_dim 2.0 +812 20 model.scoring_fct_norm 2.0 +812 20 negative_sampler.num_negs_per_pos 90.0 +812 20 training.batch_size 1.0 +812 21 model.embedding_dim 1.0 +812 21 model.scoring_fct_norm 2.0 +812 21 negative_sampler.num_negs_per_pos 31.0 +812 21 training.batch_size 0.0 +812 22 model.embedding_dim 0.0 +812 22 model.scoring_fct_norm 1.0 +812 22 negative_sampler.num_negs_per_pos 45.0 +812 22 training.batch_size 0.0 +812 23 model.embedding_dim 2.0 +812 23 model.scoring_fct_norm 1.0 +812 23 negative_sampler.num_negs_per_pos 20.0 +812 23 training.batch_size 2.0 +812 24 model.embedding_dim 2.0 +812 24 model.scoring_fct_norm 1.0 +812 24 negative_sampler.num_negs_per_pos 85.0 +812 24 training.batch_size 2.0 +812 25 model.embedding_dim 1.0 +812 25 model.scoring_fct_norm 1.0 +812 25 negative_sampler.num_negs_per_pos 3.0 +812 25 training.batch_size 1.0 +812 26 model.embedding_dim 0.0 +812 26 model.scoring_fct_norm 1.0 +812 26 negative_sampler.num_negs_per_pos 23.0 +812 26 training.batch_size 1.0 +812 27 model.embedding_dim 2.0 +812 27 model.scoring_fct_norm 2.0 +812 27 negative_sampler.num_negs_per_pos 44.0 +812 27 training.batch_size 2.0 +812 28 model.embedding_dim 1.0 +812 28 model.scoring_fct_norm 2.0 +812 28 negative_sampler.num_negs_per_pos 70.0 +812 28 training.batch_size 0.0 +812 29 model.embedding_dim 0.0 +812 29 model.scoring_fct_norm 1.0 +812 29 negative_sampler.num_negs_per_pos 65.0 +812 29 training.batch_size 1.0 +812 30 model.embedding_dim 2.0 +812 30 model.scoring_fct_norm 1.0 +812 30 negative_sampler.num_negs_per_pos 88.0 +812 30 training.batch_size 1.0 +812 31 model.embedding_dim 2.0 +812 31 model.scoring_fct_norm 1.0 +812 31 negative_sampler.num_negs_per_pos 98.0 +812 31 training.batch_size 1.0 +812 32 model.embedding_dim 2.0 +812 32 model.scoring_fct_norm 1.0 +812 32 negative_sampler.num_negs_per_pos 56.0 +812 32 training.batch_size 1.0 +812 33 model.embedding_dim 2.0 +812 33 model.scoring_fct_norm 1.0 +812 33 negative_sampler.num_negs_per_pos 44.0 +812 33 training.batch_size 0.0 +812 34 model.embedding_dim 2.0 +812 34 model.scoring_fct_norm 1.0 +812 34 negative_sampler.num_negs_per_pos 47.0 +812 34 training.batch_size 1.0 +812 35 model.embedding_dim 0.0 +812 35 model.scoring_fct_norm 2.0 +812 35 negative_sampler.num_negs_per_pos 26.0 +812 35 training.batch_size 2.0 +812 36 model.embedding_dim 2.0 +812 36 model.scoring_fct_norm 1.0 +812 36 negative_sampler.num_negs_per_pos 89.0 +812 36 training.batch_size 0.0 +812 37 model.embedding_dim 1.0 +812 37 model.scoring_fct_norm 1.0 +812 37 negative_sampler.num_negs_per_pos 69.0 +812 37 training.batch_size 0.0 +812 38 model.embedding_dim 1.0 +812 38 model.scoring_fct_norm 2.0 +812 38 negative_sampler.num_negs_per_pos 28.0 +812 38 training.batch_size 2.0 +812 39 model.embedding_dim 2.0 +812 39 model.scoring_fct_norm 1.0 +812 39 negative_sampler.num_negs_per_pos 30.0 +812 39 training.batch_size 2.0 +812 40 model.embedding_dim 0.0 +812 40 model.scoring_fct_norm 2.0 +812 40 negative_sampler.num_negs_per_pos 94.0 +812 40 training.batch_size 1.0 +812 41 model.embedding_dim 0.0 +812 41 model.scoring_fct_norm 1.0 +812 41 negative_sampler.num_negs_per_pos 20.0 +812 41 training.batch_size 1.0 +812 42 model.embedding_dim 1.0 +812 42 model.scoring_fct_norm 1.0 +812 42 negative_sampler.num_negs_per_pos 30.0 +812 42 training.batch_size 1.0 +812 43 model.embedding_dim 1.0 +812 43 model.scoring_fct_norm 2.0 +812 43 negative_sampler.num_negs_per_pos 96.0 +812 43 training.batch_size 1.0 +812 44 model.embedding_dim 0.0 +812 44 model.scoring_fct_norm 1.0 +812 44 negative_sampler.num_negs_per_pos 74.0 +812 44 training.batch_size 2.0 +812 45 model.embedding_dim 1.0 +812 45 model.scoring_fct_norm 1.0 +812 45 negative_sampler.num_negs_per_pos 34.0 +812 45 training.batch_size 0.0 +812 46 model.embedding_dim 1.0 +812 46 model.scoring_fct_norm 2.0 +812 46 negative_sampler.num_negs_per_pos 55.0 +812 46 training.batch_size 2.0 +812 47 model.embedding_dim 0.0 +812 47 model.scoring_fct_norm 1.0 +812 47 negative_sampler.num_negs_per_pos 6.0 +812 47 training.batch_size 2.0 +812 48 model.embedding_dim 0.0 +812 48 model.scoring_fct_norm 1.0 +812 48 negative_sampler.num_negs_per_pos 38.0 +812 48 training.batch_size 0.0 +812 49 model.embedding_dim 2.0 +812 49 model.scoring_fct_norm 2.0 +812 49 negative_sampler.num_negs_per_pos 6.0 +812 49 training.batch_size 0.0 +812 50 model.embedding_dim 1.0 +812 50 model.scoring_fct_norm 1.0 +812 50 negative_sampler.num_negs_per_pos 48.0 +812 50 training.batch_size 2.0 +812 51 model.embedding_dim 2.0 +812 51 model.scoring_fct_norm 2.0 +812 51 negative_sampler.num_negs_per_pos 7.0 +812 51 training.batch_size 2.0 +812 52 model.embedding_dim 2.0 +812 52 model.scoring_fct_norm 1.0 +812 52 negative_sampler.num_negs_per_pos 97.0 +812 52 training.batch_size 0.0 +812 53 model.embedding_dim 2.0 +812 53 model.scoring_fct_norm 2.0 +812 53 negative_sampler.num_negs_per_pos 9.0 +812 53 training.batch_size 2.0 +812 54 model.embedding_dim 2.0 +812 54 model.scoring_fct_norm 2.0 +812 54 negative_sampler.num_negs_per_pos 54.0 +812 54 training.batch_size 0.0 +812 55 model.embedding_dim 2.0 +812 55 model.scoring_fct_norm 2.0 +812 55 negative_sampler.num_negs_per_pos 99.0 +812 55 training.batch_size 2.0 +812 56 model.embedding_dim 2.0 +812 56 model.scoring_fct_norm 1.0 +812 56 negative_sampler.num_negs_per_pos 97.0 +812 56 training.batch_size 0.0 +812 57 model.embedding_dim 2.0 +812 57 model.scoring_fct_norm 2.0 +812 57 negative_sampler.num_negs_per_pos 59.0 +812 57 training.batch_size 1.0 +812 58 model.embedding_dim 0.0 +812 58 model.scoring_fct_norm 1.0 +812 58 negative_sampler.num_negs_per_pos 68.0 +812 58 training.batch_size 2.0 +812 59 model.embedding_dim 0.0 +812 59 model.scoring_fct_norm 2.0 +812 59 negative_sampler.num_negs_per_pos 48.0 +812 59 training.batch_size 1.0 +812 60 model.embedding_dim 1.0 +812 60 model.scoring_fct_norm 1.0 +812 60 negative_sampler.num_negs_per_pos 10.0 +812 60 training.batch_size 0.0 +812 61 model.embedding_dim 1.0 +812 61 model.scoring_fct_norm 2.0 +812 61 negative_sampler.num_negs_per_pos 49.0 +812 61 training.batch_size 1.0 +812 62 model.embedding_dim 2.0 +812 62 model.scoring_fct_norm 2.0 +812 62 negative_sampler.num_negs_per_pos 83.0 +812 62 training.batch_size 1.0 +812 63 model.embedding_dim 1.0 +812 63 model.scoring_fct_norm 2.0 +812 63 negative_sampler.num_negs_per_pos 74.0 +812 63 training.batch_size 1.0 +812 64 model.embedding_dim 2.0 +812 64 model.scoring_fct_norm 1.0 +812 64 negative_sampler.num_negs_per_pos 51.0 +812 64 training.batch_size 1.0 +812 65 model.embedding_dim 2.0 +812 65 model.scoring_fct_norm 2.0 +812 65 negative_sampler.num_negs_per_pos 31.0 +812 65 training.batch_size 2.0 +812 66 model.embedding_dim 1.0 +812 66 model.scoring_fct_norm 2.0 +812 66 negative_sampler.num_negs_per_pos 49.0 +812 66 training.batch_size 0.0 +812 67 model.embedding_dim 0.0 +812 67 model.scoring_fct_norm 2.0 +812 67 negative_sampler.num_negs_per_pos 29.0 +812 67 training.batch_size 2.0 +812 68 model.embedding_dim 0.0 +812 68 model.scoring_fct_norm 2.0 +812 68 negative_sampler.num_negs_per_pos 77.0 +812 68 training.batch_size 1.0 +812 69 model.embedding_dim 2.0 +812 69 model.scoring_fct_norm 1.0 +812 69 negative_sampler.num_negs_per_pos 29.0 +812 69 training.batch_size 2.0 +812 70 model.embedding_dim 0.0 +812 70 model.scoring_fct_norm 2.0 +812 70 negative_sampler.num_negs_per_pos 76.0 +812 70 training.batch_size 2.0 +812 71 model.embedding_dim 0.0 +812 71 model.scoring_fct_norm 1.0 +812 71 negative_sampler.num_negs_per_pos 89.0 +812 71 training.batch_size 1.0 +812 72 model.embedding_dim 2.0 +812 72 model.scoring_fct_norm 2.0 +812 72 negative_sampler.num_negs_per_pos 85.0 +812 72 training.batch_size 0.0 +812 73 model.embedding_dim 1.0 +812 73 model.scoring_fct_norm 1.0 +812 73 negative_sampler.num_negs_per_pos 76.0 +812 73 training.batch_size 1.0 +812 74 model.embedding_dim 0.0 +812 74 model.scoring_fct_norm 1.0 +812 74 negative_sampler.num_negs_per_pos 35.0 +812 74 training.batch_size 2.0 +812 75 model.embedding_dim 0.0 +812 75 model.scoring_fct_norm 1.0 +812 75 negative_sampler.num_negs_per_pos 89.0 +812 75 training.batch_size 1.0 +812 76 model.embedding_dim 2.0 +812 76 model.scoring_fct_norm 2.0 +812 76 negative_sampler.num_negs_per_pos 62.0 +812 76 training.batch_size 2.0 +812 77 model.embedding_dim 1.0 +812 77 model.scoring_fct_norm 1.0 +812 77 negative_sampler.num_negs_per_pos 15.0 +812 77 training.batch_size 0.0 +812 78 model.embedding_dim 1.0 +812 78 model.scoring_fct_norm 2.0 +812 78 negative_sampler.num_negs_per_pos 16.0 +812 78 training.batch_size 1.0 +812 79 model.embedding_dim 1.0 +812 79 model.scoring_fct_norm 1.0 +812 79 negative_sampler.num_negs_per_pos 16.0 +812 79 training.batch_size 0.0 +812 80 model.embedding_dim 2.0 +812 80 model.scoring_fct_norm 1.0 +812 80 negative_sampler.num_negs_per_pos 53.0 +812 80 training.batch_size 1.0 +812 81 model.embedding_dim 1.0 +812 81 model.scoring_fct_norm 2.0 +812 81 negative_sampler.num_negs_per_pos 23.0 +812 81 training.batch_size 1.0 +812 82 model.embedding_dim 1.0 +812 82 model.scoring_fct_norm 2.0 +812 82 negative_sampler.num_negs_per_pos 77.0 +812 82 training.batch_size 2.0 +812 83 model.embedding_dim 1.0 +812 83 model.scoring_fct_norm 2.0 +812 83 negative_sampler.num_negs_per_pos 71.0 +812 83 training.batch_size 1.0 +812 84 model.embedding_dim 0.0 +812 84 model.scoring_fct_norm 2.0 +812 84 negative_sampler.num_negs_per_pos 41.0 +812 84 training.batch_size 0.0 +812 85 model.embedding_dim 2.0 +812 85 model.scoring_fct_norm 1.0 +812 85 negative_sampler.num_negs_per_pos 69.0 +812 85 training.batch_size 2.0 +812 86 model.embedding_dim 2.0 +812 86 model.scoring_fct_norm 2.0 +812 86 negative_sampler.num_negs_per_pos 88.0 +812 86 training.batch_size 1.0 +812 87 model.embedding_dim 1.0 +812 87 model.scoring_fct_norm 1.0 +812 87 negative_sampler.num_negs_per_pos 48.0 +812 87 training.batch_size 0.0 +812 88 model.embedding_dim 1.0 +812 88 model.scoring_fct_norm 2.0 +812 88 negative_sampler.num_negs_per_pos 40.0 +812 88 training.batch_size 1.0 +812 89 model.embedding_dim 2.0 +812 89 model.scoring_fct_norm 1.0 +812 89 negative_sampler.num_negs_per_pos 89.0 +812 89 training.batch_size 2.0 +812 90 model.embedding_dim 1.0 +812 90 model.scoring_fct_norm 1.0 +812 90 negative_sampler.num_negs_per_pos 99.0 +812 90 training.batch_size 1.0 +812 91 model.embedding_dim 1.0 +812 91 model.scoring_fct_norm 1.0 +812 91 negative_sampler.num_negs_per_pos 98.0 +812 91 training.batch_size 2.0 +812 92 model.embedding_dim 0.0 +812 92 model.scoring_fct_norm 1.0 +812 92 negative_sampler.num_negs_per_pos 2.0 +812 92 training.batch_size 2.0 +812 93 model.embedding_dim 1.0 +812 93 model.scoring_fct_norm 1.0 +812 93 negative_sampler.num_negs_per_pos 21.0 +812 93 training.batch_size 0.0 +812 94 model.embedding_dim 1.0 +812 94 model.scoring_fct_norm 2.0 +812 94 negative_sampler.num_negs_per_pos 86.0 +812 94 training.batch_size 0.0 +812 95 model.embedding_dim 1.0 +812 95 model.scoring_fct_norm 2.0 +812 95 negative_sampler.num_negs_per_pos 9.0 +812 95 training.batch_size 2.0 +812 96 model.embedding_dim 1.0 +812 96 model.scoring_fct_norm 2.0 +812 96 negative_sampler.num_negs_per_pos 41.0 +812 96 training.batch_size 2.0 +812 97 model.embedding_dim 1.0 +812 97 model.scoring_fct_norm 2.0 +812 97 negative_sampler.num_negs_per_pos 16.0 +812 97 training.batch_size 1.0 +812 98 model.embedding_dim 2.0 +812 98 model.scoring_fct_norm 2.0 +812 98 negative_sampler.num_negs_per_pos 47.0 +812 98 training.batch_size 1.0 +812 99 model.embedding_dim 1.0 +812 99 model.scoring_fct_norm 1.0 +812 99 negative_sampler.num_negs_per_pos 42.0 +812 99 training.batch_size 2.0 +812 100 model.embedding_dim 2.0 +812 100 model.scoring_fct_norm 1.0 +812 100 negative_sampler.num_negs_per_pos 83.0 +812 100 training.batch_size 0.0 +812 1 dataset """kinships""" +812 1 model """transe""" +812 1 loss """softplus""" +812 1 regularizer """no""" +812 1 optimizer """adadelta""" +812 1 training_loop """owa""" +812 1 negative_sampler """basic""" +812 1 evaluator """rankbased""" +812 2 dataset """kinships""" +812 2 model """transe""" +812 2 loss """softplus""" +812 2 regularizer """no""" +812 2 optimizer """adadelta""" +812 2 training_loop """owa""" +812 2 negative_sampler """basic""" +812 2 evaluator """rankbased""" +812 3 dataset """kinships""" +812 3 model """transe""" +812 3 loss """softplus""" +812 3 regularizer """no""" +812 3 optimizer """adadelta""" +812 3 training_loop """owa""" +812 3 negative_sampler """basic""" +812 3 evaluator """rankbased""" +812 4 dataset """kinships""" +812 4 model """transe""" +812 4 loss """softplus""" +812 4 regularizer """no""" +812 4 optimizer """adadelta""" +812 4 training_loop """owa""" +812 4 negative_sampler """basic""" +812 4 evaluator """rankbased""" +812 5 dataset """kinships""" +812 5 model """transe""" +812 5 loss """softplus""" +812 5 regularizer """no""" +812 5 optimizer """adadelta""" +812 5 training_loop """owa""" +812 5 negative_sampler """basic""" +812 5 evaluator """rankbased""" +812 6 dataset """kinships""" +812 6 model """transe""" +812 6 loss """softplus""" +812 6 regularizer """no""" +812 6 optimizer """adadelta""" +812 6 training_loop """owa""" +812 6 negative_sampler """basic""" +812 6 evaluator """rankbased""" +812 7 dataset """kinships""" +812 7 model """transe""" +812 7 loss """softplus""" +812 7 regularizer """no""" +812 7 optimizer """adadelta""" +812 7 training_loop """owa""" +812 7 negative_sampler """basic""" +812 7 evaluator """rankbased""" +812 8 dataset """kinships""" +812 8 model """transe""" +812 8 loss """softplus""" +812 8 regularizer """no""" +812 8 optimizer """adadelta""" +812 8 training_loop """owa""" +812 8 negative_sampler """basic""" +812 8 evaluator """rankbased""" +812 9 dataset """kinships""" +812 9 model """transe""" +812 9 loss """softplus""" +812 9 regularizer """no""" +812 9 optimizer """adadelta""" +812 9 training_loop """owa""" +812 9 negative_sampler """basic""" +812 9 evaluator """rankbased""" +812 10 dataset """kinships""" +812 10 model """transe""" +812 10 loss """softplus""" +812 10 regularizer """no""" +812 10 optimizer """adadelta""" +812 10 training_loop """owa""" +812 10 negative_sampler """basic""" +812 10 evaluator """rankbased""" +812 11 dataset """kinships""" +812 11 model """transe""" +812 11 loss """softplus""" +812 11 regularizer """no""" +812 11 optimizer """adadelta""" +812 11 training_loop """owa""" +812 11 negative_sampler """basic""" +812 11 evaluator """rankbased""" +812 12 dataset """kinships""" +812 12 model """transe""" +812 12 loss """softplus""" +812 12 regularizer """no""" +812 12 optimizer """adadelta""" +812 12 training_loop """owa""" +812 12 negative_sampler """basic""" +812 12 evaluator """rankbased""" +812 13 dataset """kinships""" +812 13 model """transe""" +812 13 loss """softplus""" +812 13 regularizer """no""" +812 13 optimizer """adadelta""" +812 13 training_loop """owa""" +812 13 negative_sampler """basic""" +812 13 evaluator """rankbased""" +812 14 dataset """kinships""" +812 14 model """transe""" +812 14 loss """softplus""" +812 14 regularizer """no""" +812 14 optimizer """adadelta""" +812 14 training_loop """owa""" +812 14 negative_sampler """basic""" +812 14 evaluator """rankbased""" +812 15 dataset """kinships""" +812 15 model """transe""" +812 15 loss """softplus""" +812 15 regularizer """no""" +812 15 optimizer """adadelta""" +812 15 training_loop """owa""" +812 15 negative_sampler """basic""" +812 15 evaluator """rankbased""" +812 16 dataset """kinships""" +812 16 model """transe""" +812 16 loss """softplus""" +812 16 regularizer """no""" +812 16 optimizer """adadelta""" +812 16 training_loop """owa""" +812 16 negative_sampler """basic""" +812 16 evaluator """rankbased""" +812 17 dataset """kinships""" +812 17 model """transe""" +812 17 loss """softplus""" +812 17 regularizer """no""" +812 17 optimizer """adadelta""" +812 17 training_loop """owa""" +812 17 negative_sampler """basic""" +812 17 evaluator """rankbased""" +812 18 dataset """kinships""" +812 18 model """transe""" +812 18 loss """softplus""" +812 18 regularizer """no""" +812 18 optimizer """adadelta""" +812 18 training_loop """owa""" +812 18 negative_sampler """basic""" +812 18 evaluator """rankbased""" +812 19 dataset """kinships""" +812 19 model """transe""" +812 19 loss """softplus""" +812 19 regularizer """no""" +812 19 optimizer """adadelta""" +812 19 training_loop """owa""" +812 19 negative_sampler """basic""" +812 19 evaluator """rankbased""" +812 20 dataset """kinships""" +812 20 model """transe""" +812 20 loss """softplus""" +812 20 regularizer """no""" +812 20 optimizer """adadelta""" +812 20 training_loop """owa""" +812 20 negative_sampler """basic""" +812 20 evaluator """rankbased""" +812 21 dataset """kinships""" +812 21 model """transe""" +812 21 loss """softplus""" +812 21 regularizer """no""" +812 21 optimizer """adadelta""" +812 21 training_loop """owa""" +812 21 negative_sampler """basic""" +812 21 evaluator """rankbased""" +812 22 dataset """kinships""" +812 22 model """transe""" +812 22 loss """softplus""" +812 22 regularizer """no""" +812 22 optimizer """adadelta""" +812 22 training_loop """owa""" +812 22 negative_sampler """basic""" +812 22 evaluator """rankbased""" +812 23 dataset """kinships""" +812 23 model """transe""" +812 23 loss """softplus""" +812 23 regularizer """no""" +812 23 optimizer """adadelta""" +812 23 training_loop """owa""" +812 23 negative_sampler """basic""" +812 23 evaluator """rankbased""" +812 24 dataset """kinships""" +812 24 model """transe""" +812 24 loss """softplus""" +812 24 regularizer """no""" +812 24 optimizer """adadelta""" +812 24 training_loop """owa""" +812 24 negative_sampler """basic""" +812 24 evaluator """rankbased""" +812 25 dataset """kinships""" +812 25 model """transe""" +812 25 loss """softplus""" +812 25 regularizer """no""" +812 25 optimizer """adadelta""" +812 25 training_loop """owa""" +812 25 negative_sampler """basic""" +812 25 evaluator """rankbased""" +812 26 dataset """kinships""" +812 26 model """transe""" +812 26 loss """softplus""" +812 26 regularizer """no""" +812 26 optimizer """adadelta""" +812 26 training_loop """owa""" +812 26 negative_sampler """basic""" +812 26 evaluator """rankbased""" +812 27 dataset """kinships""" +812 27 model """transe""" +812 27 loss """softplus""" +812 27 regularizer """no""" +812 27 optimizer """adadelta""" +812 27 training_loop """owa""" +812 27 negative_sampler """basic""" +812 27 evaluator """rankbased""" +812 28 dataset """kinships""" +812 28 model """transe""" +812 28 loss """softplus""" +812 28 regularizer """no""" +812 28 optimizer """adadelta""" +812 28 training_loop """owa""" +812 28 negative_sampler """basic""" +812 28 evaluator """rankbased""" +812 29 dataset """kinships""" +812 29 model """transe""" +812 29 loss """softplus""" +812 29 regularizer """no""" +812 29 optimizer """adadelta""" +812 29 training_loop """owa""" +812 29 negative_sampler """basic""" +812 29 evaluator """rankbased""" +812 30 dataset """kinships""" +812 30 model """transe""" +812 30 loss """softplus""" +812 30 regularizer """no""" +812 30 optimizer """adadelta""" +812 30 training_loop """owa""" +812 30 negative_sampler """basic""" +812 30 evaluator """rankbased""" +812 31 dataset """kinships""" +812 31 model """transe""" +812 31 loss """softplus""" +812 31 regularizer """no""" +812 31 optimizer """adadelta""" +812 31 training_loop """owa""" +812 31 negative_sampler """basic""" +812 31 evaluator """rankbased""" +812 32 dataset """kinships""" +812 32 model """transe""" +812 32 loss """softplus""" +812 32 regularizer """no""" +812 32 optimizer """adadelta""" +812 32 training_loop """owa""" +812 32 negative_sampler """basic""" +812 32 evaluator """rankbased""" +812 33 dataset """kinships""" +812 33 model """transe""" +812 33 loss """softplus""" +812 33 regularizer """no""" +812 33 optimizer """adadelta""" +812 33 training_loop """owa""" +812 33 negative_sampler """basic""" +812 33 evaluator """rankbased""" +812 34 dataset """kinships""" +812 34 model """transe""" +812 34 loss """softplus""" +812 34 regularizer """no""" +812 34 optimizer """adadelta""" +812 34 training_loop """owa""" +812 34 negative_sampler """basic""" +812 34 evaluator """rankbased""" +812 35 dataset """kinships""" +812 35 model """transe""" +812 35 loss """softplus""" +812 35 regularizer """no""" +812 35 optimizer """adadelta""" +812 35 training_loop """owa""" +812 35 negative_sampler """basic""" +812 35 evaluator """rankbased""" +812 36 dataset """kinships""" +812 36 model """transe""" +812 36 loss """softplus""" +812 36 regularizer """no""" +812 36 optimizer """adadelta""" +812 36 training_loop """owa""" +812 36 negative_sampler """basic""" +812 36 evaluator """rankbased""" +812 37 dataset """kinships""" +812 37 model """transe""" +812 37 loss """softplus""" +812 37 regularizer """no""" +812 37 optimizer """adadelta""" +812 37 training_loop """owa""" +812 37 negative_sampler """basic""" +812 37 evaluator """rankbased""" +812 38 dataset """kinships""" +812 38 model """transe""" +812 38 loss """softplus""" +812 38 regularizer """no""" +812 38 optimizer """adadelta""" +812 38 training_loop """owa""" +812 38 negative_sampler """basic""" +812 38 evaluator """rankbased""" +812 39 dataset """kinships""" +812 39 model """transe""" +812 39 loss """softplus""" +812 39 regularizer """no""" +812 39 optimizer """adadelta""" +812 39 training_loop """owa""" +812 39 negative_sampler """basic""" +812 39 evaluator """rankbased""" +812 40 dataset """kinships""" +812 40 model """transe""" +812 40 loss """softplus""" +812 40 regularizer """no""" +812 40 optimizer """adadelta""" +812 40 training_loop """owa""" +812 40 negative_sampler """basic""" +812 40 evaluator """rankbased""" +812 41 dataset """kinships""" +812 41 model """transe""" +812 41 loss """softplus""" +812 41 regularizer """no""" +812 41 optimizer """adadelta""" +812 41 training_loop """owa""" +812 41 negative_sampler """basic""" +812 41 evaluator """rankbased""" +812 42 dataset """kinships""" +812 42 model """transe""" +812 42 loss """softplus""" +812 42 regularizer """no""" +812 42 optimizer """adadelta""" +812 42 training_loop """owa""" +812 42 negative_sampler """basic""" +812 42 evaluator """rankbased""" +812 43 dataset """kinships""" +812 43 model """transe""" +812 43 loss """softplus""" +812 43 regularizer """no""" +812 43 optimizer """adadelta""" +812 43 training_loop """owa""" +812 43 negative_sampler """basic""" +812 43 evaluator """rankbased""" +812 44 dataset """kinships""" +812 44 model """transe""" +812 44 loss """softplus""" +812 44 regularizer """no""" +812 44 optimizer """adadelta""" +812 44 training_loop """owa""" +812 44 negative_sampler """basic""" +812 44 evaluator """rankbased""" +812 45 dataset """kinships""" +812 45 model """transe""" +812 45 loss """softplus""" +812 45 regularizer """no""" +812 45 optimizer """adadelta""" +812 45 training_loop """owa""" +812 45 negative_sampler """basic""" +812 45 evaluator """rankbased""" +812 46 dataset """kinships""" +812 46 model """transe""" +812 46 loss """softplus""" +812 46 regularizer """no""" +812 46 optimizer """adadelta""" +812 46 training_loop """owa""" +812 46 negative_sampler """basic""" +812 46 evaluator """rankbased""" +812 47 dataset """kinships""" +812 47 model """transe""" +812 47 loss """softplus""" +812 47 regularizer """no""" +812 47 optimizer """adadelta""" +812 47 training_loop """owa""" +812 47 negative_sampler """basic""" +812 47 evaluator """rankbased""" +812 48 dataset """kinships""" +812 48 model """transe""" +812 48 loss """softplus""" +812 48 regularizer """no""" +812 48 optimizer """adadelta""" +812 48 training_loop """owa""" +812 48 negative_sampler """basic""" +812 48 evaluator """rankbased""" +812 49 dataset """kinships""" +812 49 model """transe""" +812 49 loss """softplus""" +812 49 regularizer """no""" +812 49 optimizer """adadelta""" +812 49 training_loop """owa""" +812 49 negative_sampler """basic""" +812 49 evaluator """rankbased""" +812 50 dataset """kinships""" +812 50 model """transe""" +812 50 loss """softplus""" +812 50 regularizer """no""" +812 50 optimizer """adadelta""" +812 50 training_loop """owa""" +812 50 negative_sampler """basic""" +812 50 evaluator """rankbased""" +812 51 dataset """kinships""" +812 51 model """transe""" +812 51 loss """softplus""" +812 51 regularizer """no""" +812 51 optimizer """adadelta""" +812 51 training_loop """owa""" +812 51 negative_sampler """basic""" +812 51 evaluator """rankbased""" +812 52 dataset """kinships""" +812 52 model """transe""" +812 52 loss """softplus""" +812 52 regularizer """no""" +812 52 optimizer """adadelta""" +812 52 training_loop """owa""" +812 52 negative_sampler """basic""" +812 52 evaluator """rankbased""" +812 53 dataset """kinships""" +812 53 model """transe""" +812 53 loss """softplus""" +812 53 regularizer """no""" +812 53 optimizer """adadelta""" +812 53 training_loop """owa""" +812 53 negative_sampler """basic""" +812 53 evaluator """rankbased""" +812 54 dataset """kinships""" +812 54 model """transe""" +812 54 loss """softplus""" +812 54 regularizer """no""" +812 54 optimizer """adadelta""" +812 54 training_loop """owa""" +812 54 negative_sampler """basic""" +812 54 evaluator """rankbased""" +812 55 dataset """kinships""" +812 55 model """transe""" +812 55 loss """softplus""" +812 55 regularizer """no""" +812 55 optimizer """adadelta""" +812 55 training_loop """owa""" +812 55 negative_sampler """basic""" +812 55 evaluator """rankbased""" +812 56 dataset """kinships""" +812 56 model """transe""" +812 56 loss """softplus""" +812 56 regularizer """no""" +812 56 optimizer """adadelta""" +812 56 training_loop """owa""" +812 56 negative_sampler """basic""" +812 56 evaluator """rankbased""" +812 57 dataset """kinships""" +812 57 model """transe""" +812 57 loss """softplus""" +812 57 regularizer """no""" +812 57 optimizer """adadelta""" +812 57 training_loop """owa""" +812 57 negative_sampler """basic""" +812 57 evaluator """rankbased""" +812 58 dataset """kinships""" +812 58 model """transe""" +812 58 loss """softplus""" +812 58 regularizer """no""" +812 58 optimizer """adadelta""" +812 58 training_loop """owa""" +812 58 negative_sampler """basic""" +812 58 evaluator """rankbased""" +812 59 dataset """kinships""" +812 59 model """transe""" +812 59 loss """softplus""" +812 59 regularizer """no""" +812 59 optimizer """adadelta""" +812 59 training_loop """owa""" +812 59 negative_sampler """basic""" +812 59 evaluator """rankbased""" +812 60 dataset """kinships""" +812 60 model """transe""" +812 60 loss """softplus""" +812 60 regularizer """no""" +812 60 optimizer """adadelta""" +812 60 training_loop """owa""" +812 60 negative_sampler """basic""" +812 60 evaluator """rankbased""" +812 61 dataset """kinships""" +812 61 model """transe""" +812 61 loss """softplus""" +812 61 regularizer """no""" +812 61 optimizer """adadelta""" +812 61 training_loop """owa""" +812 61 negative_sampler """basic""" +812 61 evaluator """rankbased""" +812 62 dataset """kinships""" +812 62 model """transe""" +812 62 loss """softplus""" +812 62 regularizer """no""" +812 62 optimizer """adadelta""" +812 62 training_loop """owa""" +812 62 negative_sampler """basic""" +812 62 evaluator """rankbased""" +812 63 dataset """kinships""" +812 63 model """transe""" +812 63 loss """softplus""" +812 63 regularizer """no""" +812 63 optimizer """adadelta""" +812 63 training_loop """owa""" +812 63 negative_sampler """basic""" +812 63 evaluator """rankbased""" +812 64 dataset """kinships""" +812 64 model """transe""" +812 64 loss """softplus""" +812 64 regularizer """no""" +812 64 optimizer """adadelta""" +812 64 training_loop """owa""" +812 64 negative_sampler """basic""" +812 64 evaluator """rankbased""" +812 65 dataset """kinships""" +812 65 model """transe""" +812 65 loss """softplus""" +812 65 regularizer """no""" +812 65 optimizer """adadelta""" +812 65 training_loop """owa""" +812 65 negative_sampler """basic""" +812 65 evaluator """rankbased""" +812 66 dataset """kinships""" +812 66 model """transe""" +812 66 loss """softplus""" +812 66 regularizer """no""" +812 66 optimizer """adadelta""" +812 66 training_loop """owa""" +812 66 negative_sampler """basic""" +812 66 evaluator """rankbased""" +812 67 dataset """kinships""" +812 67 model """transe""" +812 67 loss """softplus""" +812 67 regularizer """no""" +812 67 optimizer """adadelta""" +812 67 training_loop """owa""" +812 67 negative_sampler """basic""" +812 67 evaluator """rankbased""" +812 68 dataset """kinships""" +812 68 model """transe""" +812 68 loss """softplus""" +812 68 regularizer """no""" +812 68 optimizer """adadelta""" +812 68 training_loop """owa""" +812 68 negative_sampler """basic""" +812 68 evaluator """rankbased""" +812 69 dataset """kinships""" +812 69 model """transe""" +812 69 loss """softplus""" +812 69 regularizer """no""" +812 69 optimizer """adadelta""" +812 69 training_loop """owa""" +812 69 negative_sampler """basic""" +812 69 evaluator """rankbased""" +812 70 dataset """kinships""" +812 70 model """transe""" +812 70 loss """softplus""" +812 70 regularizer """no""" +812 70 optimizer """adadelta""" +812 70 training_loop """owa""" +812 70 negative_sampler """basic""" +812 70 evaluator """rankbased""" +812 71 dataset """kinships""" +812 71 model """transe""" +812 71 loss """softplus""" +812 71 regularizer """no""" +812 71 optimizer """adadelta""" +812 71 training_loop """owa""" +812 71 negative_sampler """basic""" +812 71 evaluator """rankbased""" +812 72 dataset """kinships""" +812 72 model """transe""" +812 72 loss """softplus""" +812 72 regularizer """no""" +812 72 optimizer """adadelta""" +812 72 training_loop """owa""" +812 72 negative_sampler """basic""" +812 72 evaluator """rankbased""" +812 73 dataset """kinships""" +812 73 model """transe""" +812 73 loss """softplus""" +812 73 regularizer """no""" +812 73 optimizer """adadelta""" +812 73 training_loop """owa""" +812 73 negative_sampler """basic""" +812 73 evaluator """rankbased""" +812 74 dataset """kinships""" +812 74 model """transe""" +812 74 loss """softplus""" +812 74 regularizer """no""" +812 74 optimizer """adadelta""" +812 74 training_loop """owa""" +812 74 negative_sampler """basic""" +812 74 evaluator """rankbased""" +812 75 dataset """kinships""" +812 75 model """transe""" +812 75 loss """softplus""" +812 75 regularizer """no""" +812 75 optimizer """adadelta""" +812 75 training_loop """owa""" +812 75 negative_sampler """basic""" +812 75 evaluator """rankbased""" +812 76 dataset """kinships""" +812 76 model """transe""" +812 76 loss """softplus""" +812 76 regularizer """no""" +812 76 optimizer """adadelta""" +812 76 training_loop """owa""" +812 76 negative_sampler """basic""" +812 76 evaluator """rankbased""" +812 77 dataset """kinships""" +812 77 model """transe""" +812 77 loss """softplus""" +812 77 regularizer """no""" +812 77 optimizer """adadelta""" +812 77 training_loop """owa""" +812 77 negative_sampler """basic""" +812 77 evaluator """rankbased""" +812 78 dataset """kinships""" +812 78 model """transe""" +812 78 loss """softplus""" +812 78 regularizer """no""" +812 78 optimizer """adadelta""" +812 78 training_loop """owa""" +812 78 negative_sampler """basic""" +812 78 evaluator """rankbased""" +812 79 dataset """kinships""" +812 79 model """transe""" +812 79 loss """softplus""" +812 79 regularizer """no""" +812 79 optimizer """adadelta""" +812 79 training_loop """owa""" +812 79 negative_sampler """basic""" +812 79 evaluator """rankbased""" +812 80 dataset """kinships""" +812 80 model """transe""" +812 80 loss """softplus""" +812 80 regularizer """no""" +812 80 optimizer """adadelta""" +812 80 training_loop """owa""" +812 80 negative_sampler """basic""" +812 80 evaluator """rankbased""" +812 81 dataset """kinships""" +812 81 model """transe""" +812 81 loss """softplus""" +812 81 regularizer """no""" +812 81 optimizer """adadelta""" +812 81 training_loop """owa""" +812 81 negative_sampler """basic""" +812 81 evaluator """rankbased""" +812 82 dataset """kinships""" +812 82 model """transe""" +812 82 loss """softplus""" +812 82 regularizer """no""" +812 82 optimizer """adadelta""" +812 82 training_loop """owa""" +812 82 negative_sampler """basic""" +812 82 evaluator """rankbased""" +812 83 dataset """kinships""" +812 83 model """transe""" +812 83 loss """softplus""" +812 83 regularizer """no""" +812 83 optimizer """adadelta""" +812 83 training_loop """owa""" +812 83 negative_sampler """basic""" +812 83 evaluator """rankbased""" +812 84 dataset """kinships""" +812 84 model """transe""" +812 84 loss """softplus""" +812 84 regularizer """no""" +812 84 optimizer """adadelta""" +812 84 training_loop """owa""" +812 84 negative_sampler """basic""" +812 84 evaluator """rankbased""" +812 85 dataset """kinships""" +812 85 model """transe""" +812 85 loss """softplus""" +812 85 regularizer """no""" +812 85 optimizer """adadelta""" +812 85 training_loop """owa""" +812 85 negative_sampler """basic""" +812 85 evaluator """rankbased""" +812 86 dataset """kinships""" +812 86 model """transe""" +812 86 loss """softplus""" +812 86 regularizer """no""" +812 86 optimizer """adadelta""" +812 86 training_loop """owa""" +812 86 negative_sampler """basic""" +812 86 evaluator """rankbased""" +812 87 dataset """kinships""" +812 87 model """transe""" +812 87 loss """softplus""" +812 87 regularizer """no""" +812 87 optimizer """adadelta""" +812 87 training_loop """owa""" +812 87 negative_sampler """basic""" +812 87 evaluator """rankbased""" +812 88 dataset """kinships""" +812 88 model """transe""" +812 88 loss """softplus""" +812 88 regularizer """no""" +812 88 optimizer """adadelta""" +812 88 training_loop """owa""" +812 88 negative_sampler """basic""" +812 88 evaluator """rankbased""" +812 89 dataset """kinships""" +812 89 model """transe""" +812 89 loss """softplus""" +812 89 regularizer """no""" +812 89 optimizer """adadelta""" +812 89 training_loop """owa""" +812 89 negative_sampler """basic""" +812 89 evaluator """rankbased""" +812 90 dataset """kinships""" +812 90 model """transe""" +812 90 loss """softplus""" +812 90 regularizer """no""" +812 90 optimizer """adadelta""" +812 90 training_loop """owa""" +812 90 negative_sampler """basic""" +812 90 evaluator """rankbased""" +812 91 dataset """kinships""" +812 91 model """transe""" +812 91 loss """softplus""" +812 91 regularizer """no""" +812 91 optimizer """adadelta""" +812 91 training_loop """owa""" +812 91 negative_sampler """basic""" +812 91 evaluator """rankbased""" +812 92 dataset """kinships""" +812 92 model """transe""" +812 92 loss """softplus""" +812 92 regularizer """no""" +812 92 optimizer """adadelta""" +812 92 training_loop """owa""" +812 92 negative_sampler """basic""" +812 92 evaluator """rankbased""" +812 93 dataset """kinships""" +812 93 model """transe""" +812 93 loss """softplus""" +812 93 regularizer """no""" +812 93 optimizer """adadelta""" +812 93 training_loop """owa""" +812 93 negative_sampler """basic""" +812 93 evaluator """rankbased""" +812 94 dataset """kinships""" +812 94 model """transe""" +812 94 loss """softplus""" +812 94 regularizer """no""" +812 94 optimizer """adadelta""" +812 94 training_loop """owa""" +812 94 negative_sampler """basic""" +812 94 evaluator """rankbased""" +812 95 dataset """kinships""" +812 95 model """transe""" +812 95 loss """softplus""" +812 95 regularizer """no""" +812 95 optimizer """adadelta""" +812 95 training_loop """owa""" +812 95 negative_sampler """basic""" +812 95 evaluator """rankbased""" +812 96 dataset """kinships""" +812 96 model """transe""" +812 96 loss """softplus""" +812 96 regularizer """no""" +812 96 optimizer """adadelta""" +812 96 training_loop """owa""" +812 96 negative_sampler """basic""" +812 96 evaluator """rankbased""" +812 97 dataset """kinships""" +812 97 model """transe""" +812 97 loss """softplus""" +812 97 regularizer """no""" +812 97 optimizer """adadelta""" +812 97 training_loop """owa""" +812 97 negative_sampler """basic""" +812 97 evaluator """rankbased""" +812 98 dataset """kinships""" +812 98 model """transe""" +812 98 loss """softplus""" +812 98 regularizer """no""" +812 98 optimizer """adadelta""" +812 98 training_loop """owa""" +812 98 negative_sampler """basic""" +812 98 evaluator """rankbased""" +812 99 dataset """kinships""" +812 99 model """transe""" +812 99 loss """softplus""" +812 99 regularizer """no""" +812 99 optimizer """adadelta""" +812 99 training_loop """owa""" +812 99 negative_sampler """basic""" +812 99 evaluator """rankbased""" +812 100 dataset """kinships""" +812 100 model """transe""" +812 100 loss """softplus""" +812 100 regularizer """no""" +812 100 optimizer """adadelta""" +812 100 training_loop """owa""" +812 100 negative_sampler """basic""" +812 100 evaluator """rankbased""" +813 1 model.embedding_dim 2.0 +813 1 model.scoring_fct_norm 2.0 +813 1 negative_sampler.num_negs_per_pos 79.0 +813 1 training.batch_size 0.0 +813 2 model.embedding_dim 2.0 +813 2 model.scoring_fct_norm 1.0 +813 2 negative_sampler.num_negs_per_pos 31.0 +813 2 training.batch_size 2.0 +813 3 model.embedding_dim 0.0 +813 3 model.scoring_fct_norm 1.0 +813 3 negative_sampler.num_negs_per_pos 71.0 +813 3 training.batch_size 2.0 +813 4 model.embedding_dim 0.0 +813 4 model.scoring_fct_norm 1.0 +813 4 negative_sampler.num_negs_per_pos 36.0 +813 4 training.batch_size 0.0 +813 5 model.embedding_dim 1.0 +813 5 model.scoring_fct_norm 1.0 +813 5 negative_sampler.num_negs_per_pos 13.0 +813 5 training.batch_size 1.0 +813 6 model.embedding_dim 1.0 +813 6 model.scoring_fct_norm 1.0 +813 6 negative_sampler.num_negs_per_pos 57.0 +813 6 training.batch_size 2.0 +813 7 model.embedding_dim 2.0 +813 7 model.scoring_fct_norm 1.0 +813 7 negative_sampler.num_negs_per_pos 88.0 +813 7 training.batch_size 2.0 +813 8 model.embedding_dim 2.0 +813 8 model.scoring_fct_norm 1.0 +813 8 negative_sampler.num_negs_per_pos 22.0 +813 8 training.batch_size 2.0 +813 9 model.embedding_dim 2.0 +813 9 model.scoring_fct_norm 1.0 +813 9 negative_sampler.num_negs_per_pos 17.0 +813 9 training.batch_size 0.0 +813 10 model.embedding_dim 2.0 +813 10 model.scoring_fct_norm 2.0 +813 10 negative_sampler.num_negs_per_pos 33.0 +813 10 training.batch_size 1.0 +813 11 model.embedding_dim 1.0 +813 11 model.scoring_fct_norm 1.0 +813 11 negative_sampler.num_negs_per_pos 20.0 +813 11 training.batch_size 0.0 +813 12 model.embedding_dim 0.0 +813 12 model.scoring_fct_norm 1.0 +813 12 negative_sampler.num_negs_per_pos 76.0 +813 12 training.batch_size 0.0 +813 13 model.embedding_dim 0.0 +813 13 model.scoring_fct_norm 1.0 +813 13 negative_sampler.num_negs_per_pos 42.0 +813 13 training.batch_size 1.0 +813 14 model.embedding_dim 0.0 +813 14 model.scoring_fct_norm 1.0 +813 14 negative_sampler.num_negs_per_pos 74.0 +813 14 training.batch_size 1.0 +813 15 model.embedding_dim 1.0 +813 15 model.scoring_fct_norm 1.0 +813 15 negative_sampler.num_negs_per_pos 86.0 +813 15 training.batch_size 0.0 +813 16 model.embedding_dim 2.0 +813 16 model.scoring_fct_norm 2.0 +813 16 negative_sampler.num_negs_per_pos 68.0 +813 16 training.batch_size 1.0 +813 17 model.embedding_dim 0.0 +813 17 model.scoring_fct_norm 1.0 +813 17 negative_sampler.num_negs_per_pos 52.0 +813 17 training.batch_size 1.0 +813 18 model.embedding_dim 2.0 +813 18 model.scoring_fct_norm 2.0 +813 18 negative_sampler.num_negs_per_pos 94.0 +813 18 training.batch_size 1.0 +813 19 model.embedding_dim 2.0 +813 19 model.scoring_fct_norm 1.0 +813 19 negative_sampler.num_negs_per_pos 53.0 +813 19 training.batch_size 2.0 +813 20 model.embedding_dim 1.0 +813 20 model.scoring_fct_norm 2.0 +813 20 negative_sampler.num_negs_per_pos 91.0 +813 20 training.batch_size 2.0 +813 21 model.embedding_dim 0.0 +813 21 model.scoring_fct_norm 1.0 +813 21 negative_sampler.num_negs_per_pos 49.0 +813 21 training.batch_size 1.0 +813 22 model.embedding_dim 0.0 +813 22 model.scoring_fct_norm 1.0 +813 22 negative_sampler.num_negs_per_pos 75.0 +813 22 training.batch_size 0.0 +813 23 model.embedding_dim 2.0 +813 23 model.scoring_fct_norm 2.0 +813 23 negative_sampler.num_negs_per_pos 79.0 +813 23 training.batch_size 2.0 +813 24 model.embedding_dim 1.0 +813 24 model.scoring_fct_norm 1.0 +813 24 negative_sampler.num_negs_per_pos 92.0 +813 24 training.batch_size 2.0 +813 25 model.embedding_dim 1.0 +813 25 model.scoring_fct_norm 1.0 +813 25 negative_sampler.num_negs_per_pos 61.0 +813 25 training.batch_size 2.0 +813 26 model.embedding_dim 2.0 +813 26 model.scoring_fct_norm 2.0 +813 26 negative_sampler.num_negs_per_pos 89.0 +813 26 training.batch_size 1.0 +813 27 model.embedding_dim 0.0 +813 27 model.scoring_fct_norm 2.0 +813 27 negative_sampler.num_negs_per_pos 77.0 +813 27 training.batch_size 2.0 +813 28 model.embedding_dim 2.0 +813 28 model.scoring_fct_norm 1.0 +813 28 negative_sampler.num_negs_per_pos 85.0 +813 28 training.batch_size 2.0 +813 29 model.embedding_dim 1.0 +813 29 model.scoring_fct_norm 1.0 +813 29 negative_sampler.num_negs_per_pos 80.0 +813 29 training.batch_size 0.0 +813 30 model.embedding_dim 0.0 +813 30 model.scoring_fct_norm 1.0 +813 30 negative_sampler.num_negs_per_pos 10.0 +813 30 training.batch_size 1.0 +813 31 model.embedding_dim 1.0 +813 31 model.scoring_fct_norm 2.0 +813 31 negative_sampler.num_negs_per_pos 14.0 +813 31 training.batch_size 0.0 +813 32 model.embedding_dim 0.0 +813 32 model.scoring_fct_norm 1.0 +813 32 negative_sampler.num_negs_per_pos 77.0 +813 32 training.batch_size 0.0 +813 33 model.embedding_dim 2.0 +813 33 model.scoring_fct_norm 2.0 +813 33 negative_sampler.num_negs_per_pos 10.0 +813 33 training.batch_size 1.0 +813 34 model.embedding_dim 2.0 +813 34 model.scoring_fct_norm 1.0 +813 34 negative_sampler.num_negs_per_pos 46.0 +813 34 training.batch_size 0.0 +813 35 model.embedding_dim 1.0 +813 35 model.scoring_fct_norm 1.0 +813 35 negative_sampler.num_negs_per_pos 1.0 +813 35 training.batch_size 1.0 +813 36 model.embedding_dim 0.0 +813 36 model.scoring_fct_norm 2.0 +813 36 negative_sampler.num_negs_per_pos 60.0 +813 36 training.batch_size 1.0 +813 37 model.embedding_dim 1.0 +813 37 model.scoring_fct_norm 1.0 +813 37 negative_sampler.num_negs_per_pos 13.0 +813 37 training.batch_size 1.0 +813 38 model.embedding_dim 1.0 +813 38 model.scoring_fct_norm 2.0 +813 38 negative_sampler.num_negs_per_pos 78.0 +813 38 training.batch_size 2.0 +813 39 model.embedding_dim 0.0 +813 39 model.scoring_fct_norm 1.0 +813 39 negative_sampler.num_negs_per_pos 19.0 +813 39 training.batch_size 0.0 +813 40 model.embedding_dim 2.0 +813 40 model.scoring_fct_norm 2.0 +813 40 negative_sampler.num_negs_per_pos 85.0 +813 40 training.batch_size 2.0 +813 41 model.embedding_dim 2.0 +813 41 model.scoring_fct_norm 1.0 +813 41 negative_sampler.num_negs_per_pos 41.0 +813 41 training.batch_size 0.0 +813 42 model.embedding_dim 2.0 +813 42 model.scoring_fct_norm 1.0 +813 42 negative_sampler.num_negs_per_pos 46.0 +813 42 training.batch_size 2.0 +813 43 model.embedding_dim 0.0 +813 43 model.scoring_fct_norm 2.0 +813 43 negative_sampler.num_negs_per_pos 62.0 +813 43 training.batch_size 1.0 +813 44 model.embedding_dim 0.0 +813 44 model.scoring_fct_norm 1.0 +813 44 negative_sampler.num_negs_per_pos 87.0 +813 44 training.batch_size 1.0 +813 45 model.embedding_dim 0.0 +813 45 model.scoring_fct_norm 2.0 +813 45 negative_sampler.num_negs_per_pos 47.0 +813 45 training.batch_size 0.0 +813 46 model.embedding_dim 0.0 +813 46 model.scoring_fct_norm 2.0 +813 46 negative_sampler.num_negs_per_pos 27.0 +813 46 training.batch_size 1.0 +813 47 model.embedding_dim 1.0 +813 47 model.scoring_fct_norm 1.0 +813 47 negative_sampler.num_negs_per_pos 43.0 +813 47 training.batch_size 2.0 +813 48 model.embedding_dim 0.0 +813 48 model.scoring_fct_norm 1.0 +813 48 negative_sampler.num_negs_per_pos 85.0 +813 48 training.batch_size 1.0 +813 49 model.embedding_dim 1.0 +813 49 model.scoring_fct_norm 1.0 +813 49 negative_sampler.num_negs_per_pos 90.0 +813 49 training.batch_size 0.0 +813 50 model.embedding_dim 1.0 +813 50 model.scoring_fct_norm 1.0 +813 50 negative_sampler.num_negs_per_pos 12.0 +813 50 training.batch_size 2.0 +813 51 model.embedding_dim 0.0 +813 51 model.scoring_fct_norm 2.0 +813 51 negative_sampler.num_negs_per_pos 61.0 +813 51 training.batch_size 1.0 +813 52 model.embedding_dim 2.0 +813 52 model.scoring_fct_norm 1.0 +813 52 negative_sampler.num_negs_per_pos 58.0 +813 52 training.batch_size 1.0 +813 53 model.embedding_dim 2.0 +813 53 model.scoring_fct_norm 1.0 +813 53 negative_sampler.num_negs_per_pos 48.0 +813 53 training.batch_size 0.0 +813 54 model.embedding_dim 2.0 +813 54 model.scoring_fct_norm 2.0 +813 54 negative_sampler.num_negs_per_pos 10.0 +813 54 training.batch_size 1.0 +813 55 model.embedding_dim 1.0 +813 55 model.scoring_fct_norm 2.0 +813 55 negative_sampler.num_negs_per_pos 82.0 +813 55 training.batch_size 0.0 +813 56 model.embedding_dim 1.0 +813 56 model.scoring_fct_norm 2.0 +813 56 negative_sampler.num_negs_per_pos 61.0 +813 56 training.batch_size 0.0 +813 57 model.embedding_dim 1.0 +813 57 model.scoring_fct_norm 1.0 +813 57 negative_sampler.num_negs_per_pos 36.0 +813 57 training.batch_size 1.0 +813 58 model.embedding_dim 0.0 +813 58 model.scoring_fct_norm 1.0 +813 58 negative_sampler.num_negs_per_pos 57.0 +813 58 training.batch_size 1.0 +813 59 model.embedding_dim 0.0 +813 59 model.scoring_fct_norm 1.0 +813 59 negative_sampler.num_negs_per_pos 59.0 +813 59 training.batch_size 1.0 +813 60 model.embedding_dim 0.0 +813 60 model.scoring_fct_norm 2.0 +813 60 negative_sampler.num_negs_per_pos 89.0 +813 60 training.batch_size 0.0 +813 61 model.embedding_dim 1.0 +813 61 model.scoring_fct_norm 1.0 +813 61 negative_sampler.num_negs_per_pos 26.0 +813 61 training.batch_size 2.0 +813 62 model.embedding_dim 1.0 +813 62 model.scoring_fct_norm 2.0 +813 62 negative_sampler.num_negs_per_pos 80.0 +813 62 training.batch_size 0.0 +813 63 model.embedding_dim 0.0 +813 63 model.scoring_fct_norm 1.0 +813 63 negative_sampler.num_negs_per_pos 79.0 +813 63 training.batch_size 1.0 +813 64 model.embedding_dim 2.0 +813 64 model.scoring_fct_norm 2.0 +813 64 negative_sampler.num_negs_per_pos 20.0 +813 64 training.batch_size 0.0 +813 65 model.embedding_dim 1.0 +813 65 model.scoring_fct_norm 2.0 +813 65 negative_sampler.num_negs_per_pos 10.0 +813 65 training.batch_size 0.0 +813 66 model.embedding_dim 1.0 +813 66 model.scoring_fct_norm 2.0 +813 66 negative_sampler.num_negs_per_pos 97.0 +813 66 training.batch_size 2.0 +813 67 model.embedding_dim 2.0 +813 67 model.scoring_fct_norm 1.0 +813 67 negative_sampler.num_negs_per_pos 54.0 +813 67 training.batch_size 1.0 +813 68 model.embedding_dim 1.0 +813 68 model.scoring_fct_norm 1.0 +813 68 negative_sampler.num_negs_per_pos 7.0 +813 68 training.batch_size 2.0 +813 69 model.embedding_dim 2.0 +813 69 model.scoring_fct_norm 2.0 +813 69 negative_sampler.num_negs_per_pos 38.0 +813 69 training.batch_size 1.0 +813 70 model.embedding_dim 1.0 +813 70 model.scoring_fct_norm 1.0 +813 70 negative_sampler.num_negs_per_pos 67.0 +813 70 training.batch_size 0.0 +813 71 model.embedding_dim 1.0 +813 71 model.scoring_fct_norm 1.0 +813 71 negative_sampler.num_negs_per_pos 78.0 +813 71 training.batch_size 2.0 +813 72 model.embedding_dim 0.0 +813 72 model.scoring_fct_norm 1.0 +813 72 negative_sampler.num_negs_per_pos 21.0 +813 72 training.batch_size 0.0 +813 73 model.embedding_dim 1.0 +813 73 model.scoring_fct_norm 1.0 +813 73 negative_sampler.num_negs_per_pos 88.0 +813 73 training.batch_size 1.0 +813 74 model.embedding_dim 2.0 +813 74 model.scoring_fct_norm 1.0 +813 74 negative_sampler.num_negs_per_pos 43.0 +813 74 training.batch_size 1.0 +813 75 model.embedding_dim 1.0 +813 75 model.scoring_fct_norm 2.0 +813 75 negative_sampler.num_negs_per_pos 12.0 +813 75 training.batch_size 0.0 +813 76 model.embedding_dim 1.0 +813 76 model.scoring_fct_norm 2.0 +813 76 negative_sampler.num_negs_per_pos 67.0 +813 76 training.batch_size 1.0 +813 77 model.embedding_dim 0.0 +813 77 model.scoring_fct_norm 1.0 +813 77 negative_sampler.num_negs_per_pos 28.0 +813 77 training.batch_size 0.0 +813 78 model.embedding_dim 1.0 +813 78 model.scoring_fct_norm 1.0 +813 78 negative_sampler.num_negs_per_pos 54.0 +813 78 training.batch_size 0.0 +813 79 model.embedding_dim 1.0 +813 79 model.scoring_fct_norm 1.0 +813 79 negative_sampler.num_negs_per_pos 66.0 +813 79 training.batch_size 1.0 +813 80 model.embedding_dim 1.0 +813 80 model.scoring_fct_norm 1.0 +813 80 negative_sampler.num_negs_per_pos 84.0 +813 80 training.batch_size 2.0 +813 81 model.embedding_dim 2.0 +813 81 model.scoring_fct_norm 2.0 +813 81 negative_sampler.num_negs_per_pos 93.0 +813 81 training.batch_size 0.0 +813 82 model.embedding_dim 0.0 +813 82 model.scoring_fct_norm 2.0 +813 82 negative_sampler.num_negs_per_pos 87.0 +813 82 training.batch_size 0.0 +813 83 model.embedding_dim 2.0 +813 83 model.scoring_fct_norm 2.0 +813 83 negative_sampler.num_negs_per_pos 61.0 +813 83 training.batch_size 1.0 +813 84 model.embedding_dim 2.0 +813 84 model.scoring_fct_norm 2.0 +813 84 negative_sampler.num_negs_per_pos 60.0 +813 84 training.batch_size 1.0 +813 85 model.embedding_dim 1.0 +813 85 model.scoring_fct_norm 2.0 +813 85 negative_sampler.num_negs_per_pos 47.0 +813 85 training.batch_size 2.0 +813 86 model.embedding_dim 1.0 +813 86 model.scoring_fct_norm 1.0 +813 86 negative_sampler.num_negs_per_pos 72.0 +813 86 training.batch_size 1.0 +813 87 model.embedding_dim 1.0 +813 87 model.scoring_fct_norm 1.0 +813 87 negative_sampler.num_negs_per_pos 5.0 +813 87 training.batch_size 2.0 +813 88 model.embedding_dim 2.0 +813 88 model.scoring_fct_norm 1.0 +813 88 negative_sampler.num_negs_per_pos 24.0 +813 88 training.batch_size 1.0 +813 89 model.embedding_dim 1.0 +813 89 model.scoring_fct_norm 2.0 +813 89 negative_sampler.num_negs_per_pos 20.0 +813 89 training.batch_size 1.0 +813 90 model.embedding_dim 2.0 +813 90 model.scoring_fct_norm 2.0 +813 90 negative_sampler.num_negs_per_pos 14.0 +813 90 training.batch_size 2.0 +813 91 model.embedding_dim 2.0 +813 91 model.scoring_fct_norm 2.0 +813 91 negative_sampler.num_negs_per_pos 61.0 +813 91 training.batch_size 2.0 +813 92 model.embedding_dim 2.0 +813 92 model.scoring_fct_norm 1.0 +813 92 negative_sampler.num_negs_per_pos 40.0 +813 92 training.batch_size 0.0 +813 93 model.embedding_dim 1.0 +813 93 model.scoring_fct_norm 1.0 +813 93 negative_sampler.num_negs_per_pos 93.0 +813 93 training.batch_size 0.0 +813 94 model.embedding_dim 1.0 +813 94 model.scoring_fct_norm 2.0 +813 94 negative_sampler.num_negs_per_pos 27.0 +813 94 training.batch_size 2.0 +813 95 model.embedding_dim 1.0 +813 95 model.scoring_fct_norm 1.0 +813 95 negative_sampler.num_negs_per_pos 8.0 +813 95 training.batch_size 1.0 +813 96 model.embedding_dim 2.0 +813 96 model.scoring_fct_norm 2.0 +813 96 negative_sampler.num_negs_per_pos 90.0 +813 96 training.batch_size 2.0 +813 97 model.embedding_dim 2.0 +813 97 model.scoring_fct_norm 1.0 +813 97 negative_sampler.num_negs_per_pos 71.0 +813 97 training.batch_size 2.0 +813 98 model.embedding_dim 0.0 +813 98 model.scoring_fct_norm 1.0 +813 98 negative_sampler.num_negs_per_pos 99.0 +813 98 training.batch_size 2.0 +813 99 model.embedding_dim 2.0 +813 99 model.scoring_fct_norm 2.0 +813 99 negative_sampler.num_negs_per_pos 4.0 +813 99 training.batch_size 1.0 +813 100 model.embedding_dim 2.0 +813 100 model.scoring_fct_norm 1.0 +813 100 negative_sampler.num_negs_per_pos 92.0 +813 100 training.batch_size 2.0 +813 1 dataset """kinships""" +813 1 model """transe""" +813 1 loss """bceaftersigmoid""" +813 1 regularizer """no""" +813 1 optimizer """adadelta""" +813 1 training_loop """owa""" +813 1 negative_sampler """basic""" +813 1 evaluator """rankbased""" +813 2 dataset """kinships""" +813 2 model """transe""" +813 2 loss """bceaftersigmoid""" +813 2 regularizer """no""" +813 2 optimizer """adadelta""" +813 2 training_loop """owa""" +813 2 negative_sampler """basic""" +813 2 evaluator """rankbased""" +813 3 dataset """kinships""" +813 3 model """transe""" +813 3 loss """bceaftersigmoid""" +813 3 regularizer """no""" +813 3 optimizer """adadelta""" +813 3 training_loop """owa""" +813 3 negative_sampler """basic""" +813 3 evaluator """rankbased""" +813 4 dataset """kinships""" +813 4 model """transe""" +813 4 loss """bceaftersigmoid""" +813 4 regularizer """no""" +813 4 optimizer """adadelta""" +813 4 training_loop """owa""" +813 4 negative_sampler """basic""" +813 4 evaluator """rankbased""" +813 5 dataset """kinships""" +813 5 model """transe""" +813 5 loss """bceaftersigmoid""" +813 5 regularizer """no""" +813 5 optimizer """adadelta""" +813 5 training_loop """owa""" +813 5 negative_sampler """basic""" +813 5 evaluator """rankbased""" +813 6 dataset """kinships""" +813 6 model """transe""" +813 6 loss """bceaftersigmoid""" +813 6 regularizer """no""" +813 6 optimizer """adadelta""" +813 6 training_loop """owa""" +813 6 negative_sampler """basic""" +813 6 evaluator """rankbased""" +813 7 dataset """kinships""" +813 7 model """transe""" +813 7 loss """bceaftersigmoid""" +813 7 regularizer """no""" +813 7 optimizer """adadelta""" +813 7 training_loop """owa""" +813 7 negative_sampler """basic""" +813 7 evaluator """rankbased""" +813 8 dataset """kinships""" +813 8 model """transe""" +813 8 loss """bceaftersigmoid""" +813 8 regularizer """no""" +813 8 optimizer """adadelta""" +813 8 training_loop """owa""" +813 8 negative_sampler """basic""" +813 8 evaluator """rankbased""" +813 9 dataset """kinships""" +813 9 model """transe""" +813 9 loss """bceaftersigmoid""" +813 9 regularizer """no""" +813 9 optimizer """adadelta""" +813 9 training_loop """owa""" +813 9 negative_sampler """basic""" +813 9 evaluator """rankbased""" +813 10 dataset """kinships""" +813 10 model """transe""" +813 10 loss """bceaftersigmoid""" +813 10 regularizer """no""" +813 10 optimizer """adadelta""" +813 10 training_loop """owa""" +813 10 negative_sampler """basic""" +813 10 evaluator """rankbased""" +813 11 dataset """kinships""" +813 11 model """transe""" +813 11 loss """bceaftersigmoid""" +813 11 regularizer """no""" +813 11 optimizer """adadelta""" +813 11 training_loop """owa""" +813 11 negative_sampler """basic""" +813 11 evaluator """rankbased""" +813 12 dataset """kinships""" +813 12 model """transe""" +813 12 loss """bceaftersigmoid""" +813 12 regularizer """no""" +813 12 optimizer """adadelta""" +813 12 training_loop """owa""" +813 12 negative_sampler """basic""" +813 12 evaluator """rankbased""" +813 13 dataset """kinships""" +813 13 model """transe""" +813 13 loss """bceaftersigmoid""" +813 13 regularizer """no""" +813 13 optimizer """adadelta""" +813 13 training_loop """owa""" +813 13 negative_sampler """basic""" +813 13 evaluator """rankbased""" +813 14 dataset """kinships""" +813 14 model """transe""" +813 14 loss """bceaftersigmoid""" +813 14 regularizer """no""" +813 14 optimizer """adadelta""" +813 14 training_loop """owa""" +813 14 negative_sampler """basic""" +813 14 evaluator """rankbased""" +813 15 dataset """kinships""" +813 15 model """transe""" +813 15 loss """bceaftersigmoid""" +813 15 regularizer """no""" +813 15 optimizer """adadelta""" +813 15 training_loop """owa""" +813 15 negative_sampler """basic""" +813 15 evaluator """rankbased""" +813 16 dataset """kinships""" +813 16 model """transe""" +813 16 loss """bceaftersigmoid""" +813 16 regularizer """no""" +813 16 optimizer """adadelta""" +813 16 training_loop """owa""" +813 16 negative_sampler """basic""" +813 16 evaluator """rankbased""" +813 17 dataset """kinships""" +813 17 model """transe""" +813 17 loss """bceaftersigmoid""" +813 17 regularizer """no""" +813 17 optimizer """adadelta""" +813 17 training_loop """owa""" +813 17 negative_sampler """basic""" +813 17 evaluator """rankbased""" +813 18 dataset """kinships""" +813 18 model """transe""" +813 18 loss """bceaftersigmoid""" +813 18 regularizer """no""" +813 18 optimizer """adadelta""" +813 18 training_loop """owa""" +813 18 negative_sampler """basic""" +813 18 evaluator """rankbased""" +813 19 dataset """kinships""" +813 19 model """transe""" +813 19 loss """bceaftersigmoid""" +813 19 regularizer """no""" +813 19 optimizer """adadelta""" +813 19 training_loop """owa""" +813 19 negative_sampler """basic""" +813 19 evaluator """rankbased""" +813 20 dataset """kinships""" +813 20 model """transe""" +813 20 loss """bceaftersigmoid""" +813 20 regularizer """no""" +813 20 optimizer """adadelta""" +813 20 training_loop """owa""" +813 20 negative_sampler """basic""" +813 20 evaluator """rankbased""" +813 21 dataset """kinships""" +813 21 model """transe""" +813 21 loss """bceaftersigmoid""" +813 21 regularizer """no""" +813 21 optimizer """adadelta""" +813 21 training_loop """owa""" +813 21 negative_sampler """basic""" +813 21 evaluator """rankbased""" +813 22 dataset """kinships""" +813 22 model """transe""" +813 22 loss """bceaftersigmoid""" +813 22 regularizer """no""" +813 22 optimizer """adadelta""" +813 22 training_loop """owa""" +813 22 negative_sampler """basic""" +813 22 evaluator """rankbased""" +813 23 dataset """kinships""" +813 23 model """transe""" +813 23 loss """bceaftersigmoid""" +813 23 regularizer """no""" +813 23 optimizer """adadelta""" +813 23 training_loop """owa""" +813 23 negative_sampler """basic""" +813 23 evaluator """rankbased""" +813 24 dataset """kinships""" +813 24 model """transe""" +813 24 loss """bceaftersigmoid""" +813 24 regularizer """no""" +813 24 optimizer """adadelta""" +813 24 training_loop """owa""" +813 24 negative_sampler """basic""" +813 24 evaluator """rankbased""" +813 25 dataset """kinships""" +813 25 model """transe""" +813 25 loss """bceaftersigmoid""" +813 25 regularizer """no""" +813 25 optimizer """adadelta""" +813 25 training_loop """owa""" +813 25 negative_sampler """basic""" +813 25 evaluator """rankbased""" +813 26 dataset """kinships""" +813 26 model """transe""" +813 26 loss """bceaftersigmoid""" +813 26 regularizer """no""" +813 26 optimizer """adadelta""" +813 26 training_loop """owa""" +813 26 negative_sampler """basic""" +813 26 evaluator """rankbased""" +813 27 dataset """kinships""" +813 27 model """transe""" +813 27 loss """bceaftersigmoid""" +813 27 regularizer """no""" +813 27 optimizer """adadelta""" +813 27 training_loop """owa""" +813 27 negative_sampler """basic""" +813 27 evaluator """rankbased""" +813 28 dataset """kinships""" +813 28 model """transe""" +813 28 loss """bceaftersigmoid""" +813 28 regularizer """no""" +813 28 optimizer """adadelta""" +813 28 training_loop """owa""" +813 28 negative_sampler """basic""" +813 28 evaluator """rankbased""" +813 29 dataset """kinships""" +813 29 model """transe""" +813 29 loss """bceaftersigmoid""" +813 29 regularizer """no""" +813 29 optimizer """adadelta""" +813 29 training_loop """owa""" +813 29 negative_sampler """basic""" +813 29 evaluator """rankbased""" +813 30 dataset """kinships""" +813 30 model """transe""" +813 30 loss """bceaftersigmoid""" +813 30 regularizer """no""" +813 30 optimizer """adadelta""" +813 30 training_loop """owa""" +813 30 negative_sampler """basic""" +813 30 evaluator """rankbased""" +813 31 dataset """kinships""" +813 31 model """transe""" +813 31 loss """bceaftersigmoid""" +813 31 regularizer """no""" +813 31 optimizer """adadelta""" +813 31 training_loop """owa""" +813 31 negative_sampler """basic""" +813 31 evaluator """rankbased""" +813 32 dataset """kinships""" +813 32 model """transe""" +813 32 loss """bceaftersigmoid""" +813 32 regularizer """no""" +813 32 optimizer """adadelta""" +813 32 training_loop """owa""" +813 32 negative_sampler """basic""" +813 32 evaluator """rankbased""" +813 33 dataset """kinships""" +813 33 model """transe""" +813 33 loss """bceaftersigmoid""" +813 33 regularizer """no""" +813 33 optimizer """adadelta""" +813 33 training_loop """owa""" +813 33 negative_sampler """basic""" +813 33 evaluator """rankbased""" +813 34 dataset """kinships""" +813 34 model """transe""" +813 34 loss """bceaftersigmoid""" +813 34 regularizer """no""" +813 34 optimizer """adadelta""" +813 34 training_loop """owa""" +813 34 negative_sampler """basic""" +813 34 evaluator """rankbased""" +813 35 dataset """kinships""" +813 35 model """transe""" +813 35 loss """bceaftersigmoid""" +813 35 regularizer """no""" +813 35 optimizer """adadelta""" +813 35 training_loop """owa""" +813 35 negative_sampler """basic""" +813 35 evaluator """rankbased""" +813 36 dataset """kinships""" +813 36 model """transe""" +813 36 loss """bceaftersigmoid""" +813 36 regularizer """no""" +813 36 optimizer """adadelta""" +813 36 training_loop """owa""" +813 36 negative_sampler """basic""" +813 36 evaluator """rankbased""" +813 37 dataset """kinships""" +813 37 model """transe""" +813 37 loss """bceaftersigmoid""" +813 37 regularizer """no""" +813 37 optimizer """adadelta""" +813 37 training_loop """owa""" +813 37 negative_sampler """basic""" +813 37 evaluator """rankbased""" +813 38 dataset """kinships""" +813 38 model """transe""" +813 38 loss """bceaftersigmoid""" +813 38 regularizer """no""" +813 38 optimizer """adadelta""" +813 38 training_loop """owa""" +813 38 negative_sampler """basic""" +813 38 evaluator """rankbased""" +813 39 dataset """kinships""" +813 39 model """transe""" +813 39 loss """bceaftersigmoid""" +813 39 regularizer """no""" +813 39 optimizer """adadelta""" +813 39 training_loop """owa""" +813 39 negative_sampler """basic""" +813 39 evaluator """rankbased""" +813 40 dataset """kinships""" +813 40 model """transe""" +813 40 loss """bceaftersigmoid""" +813 40 regularizer """no""" +813 40 optimizer """adadelta""" +813 40 training_loop """owa""" +813 40 negative_sampler """basic""" +813 40 evaluator """rankbased""" +813 41 dataset """kinships""" +813 41 model """transe""" +813 41 loss """bceaftersigmoid""" +813 41 regularizer """no""" +813 41 optimizer """adadelta""" +813 41 training_loop """owa""" +813 41 negative_sampler """basic""" +813 41 evaluator """rankbased""" +813 42 dataset """kinships""" +813 42 model """transe""" +813 42 loss """bceaftersigmoid""" +813 42 regularizer """no""" +813 42 optimizer """adadelta""" +813 42 training_loop """owa""" +813 42 negative_sampler """basic""" +813 42 evaluator """rankbased""" +813 43 dataset """kinships""" +813 43 model """transe""" +813 43 loss """bceaftersigmoid""" +813 43 regularizer """no""" +813 43 optimizer """adadelta""" +813 43 training_loop """owa""" +813 43 negative_sampler """basic""" +813 43 evaluator """rankbased""" +813 44 dataset """kinships""" +813 44 model """transe""" +813 44 loss """bceaftersigmoid""" +813 44 regularizer """no""" +813 44 optimizer """adadelta""" +813 44 training_loop """owa""" +813 44 negative_sampler """basic""" +813 44 evaluator """rankbased""" +813 45 dataset """kinships""" +813 45 model """transe""" +813 45 loss """bceaftersigmoid""" +813 45 regularizer """no""" +813 45 optimizer """adadelta""" +813 45 training_loop """owa""" +813 45 negative_sampler """basic""" +813 45 evaluator """rankbased""" +813 46 dataset """kinships""" +813 46 model """transe""" +813 46 loss """bceaftersigmoid""" +813 46 regularizer """no""" +813 46 optimizer """adadelta""" +813 46 training_loop """owa""" +813 46 negative_sampler """basic""" +813 46 evaluator """rankbased""" +813 47 dataset """kinships""" +813 47 model """transe""" +813 47 loss """bceaftersigmoid""" +813 47 regularizer """no""" +813 47 optimizer """adadelta""" +813 47 training_loop """owa""" +813 47 negative_sampler """basic""" +813 47 evaluator """rankbased""" +813 48 dataset """kinships""" +813 48 model """transe""" +813 48 loss """bceaftersigmoid""" +813 48 regularizer """no""" +813 48 optimizer """adadelta""" +813 48 training_loop """owa""" +813 48 negative_sampler """basic""" +813 48 evaluator """rankbased""" +813 49 dataset """kinships""" +813 49 model """transe""" +813 49 loss """bceaftersigmoid""" +813 49 regularizer """no""" +813 49 optimizer """adadelta""" +813 49 training_loop """owa""" +813 49 negative_sampler """basic""" +813 49 evaluator """rankbased""" +813 50 dataset """kinships""" +813 50 model """transe""" +813 50 loss """bceaftersigmoid""" +813 50 regularizer """no""" +813 50 optimizer """adadelta""" +813 50 training_loop """owa""" +813 50 negative_sampler """basic""" +813 50 evaluator """rankbased""" +813 51 dataset """kinships""" +813 51 model """transe""" +813 51 loss """bceaftersigmoid""" +813 51 regularizer """no""" +813 51 optimizer """adadelta""" +813 51 training_loop """owa""" +813 51 negative_sampler """basic""" +813 51 evaluator """rankbased""" +813 52 dataset """kinships""" +813 52 model """transe""" +813 52 loss """bceaftersigmoid""" +813 52 regularizer """no""" +813 52 optimizer """adadelta""" +813 52 training_loop """owa""" +813 52 negative_sampler """basic""" +813 52 evaluator """rankbased""" +813 53 dataset """kinships""" +813 53 model """transe""" +813 53 loss """bceaftersigmoid""" +813 53 regularizer """no""" +813 53 optimizer """adadelta""" +813 53 training_loop """owa""" +813 53 negative_sampler """basic""" +813 53 evaluator """rankbased""" +813 54 dataset """kinships""" +813 54 model """transe""" +813 54 loss """bceaftersigmoid""" +813 54 regularizer """no""" +813 54 optimizer """adadelta""" +813 54 training_loop """owa""" +813 54 negative_sampler """basic""" +813 54 evaluator """rankbased""" +813 55 dataset """kinships""" +813 55 model """transe""" +813 55 loss """bceaftersigmoid""" +813 55 regularizer """no""" +813 55 optimizer """adadelta""" +813 55 training_loop """owa""" +813 55 negative_sampler """basic""" +813 55 evaluator """rankbased""" +813 56 dataset """kinships""" +813 56 model """transe""" +813 56 loss """bceaftersigmoid""" +813 56 regularizer """no""" +813 56 optimizer """adadelta""" +813 56 training_loop """owa""" +813 56 negative_sampler """basic""" +813 56 evaluator """rankbased""" +813 57 dataset """kinships""" +813 57 model """transe""" +813 57 loss """bceaftersigmoid""" +813 57 regularizer """no""" +813 57 optimizer """adadelta""" +813 57 training_loop """owa""" +813 57 negative_sampler """basic""" +813 57 evaluator """rankbased""" +813 58 dataset """kinships""" +813 58 model """transe""" +813 58 loss """bceaftersigmoid""" +813 58 regularizer """no""" +813 58 optimizer """adadelta""" +813 58 training_loop """owa""" +813 58 negative_sampler """basic""" +813 58 evaluator """rankbased""" +813 59 dataset """kinships""" +813 59 model """transe""" +813 59 loss """bceaftersigmoid""" +813 59 regularizer """no""" +813 59 optimizer """adadelta""" +813 59 training_loop """owa""" +813 59 negative_sampler """basic""" +813 59 evaluator """rankbased""" +813 60 dataset """kinships""" +813 60 model """transe""" +813 60 loss """bceaftersigmoid""" +813 60 regularizer """no""" +813 60 optimizer """adadelta""" +813 60 training_loop """owa""" +813 60 negative_sampler """basic""" +813 60 evaluator """rankbased""" +813 61 dataset """kinships""" +813 61 model """transe""" +813 61 loss """bceaftersigmoid""" +813 61 regularizer """no""" +813 61 optimizer """adadelta""" +813 61 training_loop """owa""" +813 61 negative_sampler """basic""" +813 61 evaluator """rankbased""" +813 62 dataset """kinships""" +813 62 model """transe""" +813 62 loss """bceaftersigmoid""" +813 62 regularizer """no""" +813 62 optimizer """adadelta""" +813 62 training_loop """owa""" +813 62 negative_sampler """basic""" +813 62 evaluator """rankbased""" +813 63 dataset """kinships""" +813 63 model """transe""" +813 63 loss """bceaftersigmoid""" +813 63 regularizer """no""" +813 63 optimizer """adadelta""" +813 63 training_loop """owa""" +813 63 negative_sampler """basic""" +813 63 evaluator """rankbased""" +813 64 dataset """kinships""" +813 64 model """transe""" +813 64 loss """bceaftersigmoid""" +813 64 regularizer """no""" +813 64 optimizer """adadelta""" +813 64 training_loop """owa""" +813 64 negative_sampler """basic""" +813 64 evaluator """rankbased""" +813 65 dataset """kinships""" +813 65 model """transe""" +813 65 loss """bceaftersigmoid""" +813 65 regularizer """no""" +813 65 optimizer """adadelta""" +813 65 training_loop """owa""" +813 65 negative_sampler """basic""" +813 65 evaluator """rankbased""" +813 66 dataset """kinships""" +813 66 model """transe""" +813 66 loss """bceaftersigmoid""" +813 66 regularizer """no""" +813 66 optimizer """adadelta""" +813 66 training_loop """owa""" +813 66 negative_sampler """basic""" +813 66 evaluator """rankbased""" +813 67 dataset """kinships""" +813 67 model """transe""" +813 67 loss """bceaftersigmoid""" +813 67 regularizer """no""" +813 67 optimizer """adadelta""" +813 67 training_loop """owa""" +813 67 negative_sampler """basic""" +813 67 evaluator """rankbased""" +813 68 dataset """kinships""" +813 68 model """transe""" +813 68 loss """bceaftersigmoid""" +813 68 regularizer """no""" +813 68 optimizer """adadelta""" +813 68 training_loop """owa""" +813 68 negative_sampler """basic""" +813 68 evaluator """rankbased""" +813 69 dataset """kinships""" +813 69 model """transe""" +813 69 loss """bceaftersigmoid""" +813 69 regularizer """no""" +813 69 optimizer """adadelta""" +813 69 training_loop """owa""" +813 69 negative_sampler """basic""" +813 69 evaluator """rankbased""" +813 70 dataset """kinships""" +813 70 model """transe""" +813 70 loss """bceaftersigmoid""" +813 70 regularizer """no""" +813 70 optimizer """adadelta""" +813 70 training_loop """owa""" +813 70 negative_sampler """basic""" +813 70 evaluator """rankbased""" +813 71 dataset """kinships""" +813 71 model """transe""" +813 71 loss """bceaftersigmoid""" +813 71 regularizer """no""" +813 71 optimizer """adadelta""" +813 71 training_loop """owa""" +813 71 negative_sampler """basic""" +813 71 evaluator """rankbased""" +813 72 dataset """kinships""" +813 72 model """transe""" +813 72 loss """bceaftersigmoid""" +813 72 regularizer """no""" +813 72 optimizer """adadelta""" +813 72 training_loop """owa""" +813 72 negative_sampler """basic""" +813 72 evaluator """rankbased""" +813 73 dataset """kinships""" +813 73 model """transe""" +813 73 loss """bceaftersigmoid""" +813 73 regularizer """no""" +813 73 optimizer """adadelta""" +813 73 training_loop """owa""" +813 73 negative_sampler """basic""" +813 73 evaluator """rankbased""" +813 74 dataset """kinships""" +813 74 model """transe""" +813 74 loss """bceaftersigmoid""" +813 74 regularizer """no""" +813 74 optimizer """adadelta""" +813 74 training_loop """owa""" +813 74 negative_sampler """basic""" +813 74 evaluator """rankbased""" +813 75 dataset """kinships""" +813 75 model """transe""" +813 75 loss """bceaftersigmoid""" +813 75 regularizer """no""" +813 75 optimizer """adadelta""" +813 75 training_loop """owa""" +813 75 negative_sampler """basic""" +813 75 evaluator """rankbased""" +813 76 dataset """kinships""" +813 76 model """transe""" +813 76 loss """bceaftersigmoid""" +813 76 regularizer """no""" +813 76 optimizer """adadelta""" +813 76 training_loop """owa""" +813 76 negative_sampler """basic""" +813 76 evaluator """rankbased""" +813 77 dataset """kinships""" +813 77 model """transe""" +813 77 loss """bceaftersigmoid""" +813 77 regularizer """no""" +813 77 optimizer """adadelta""" +813 77 training_loop """owa""" +813 77 negative_sampler """basic""" +813 77 evaluator """rankbased""" +813 78 dataset """kinships""" +813 78 model """transe""" +813 78 loss """bceaftersigmoid""" +813 78 regularizer """no""" +813 78 optimizer """adadelta""" +813 78 training_loop """owa""" +813 78 negative_sampler """basic""" +813 78 evaluator """rankbased""" +813 79 dataset """kinships""" +813 79 model """transe""" +813 79 loss """bceaftersigmoid""" +813 79 regularizer """no""" +813 79 optimizer """adadelta""" +813 79 training_loop """owa""" +813 79 negative_sampler """basic""" +813 79 evaluator """rankbased""" +813 80 dataset """kinships""" +813 80 model """transe""" +813 80 loss """bceaftersigmoid""" +813 80 regularizer """no""" +813 80 optimizer """adadelta""" +813 80 training_loop """owa""" +813 80 negative_sampler """basic""" +813 80 evaluator """rankbased""" +813 81 dataset """kinships""" +813 81 model """transe""" +813 81 loss """bceaftersigmoid""" +813 81 regularizer """no""" +813 81 optimizer """adadelta""" +813 81 training_loop """owa""" +813 81 negative_sampler """basic""" +813 81 evaluator """rankbased""" +813 82 dataset """kinships""" +813 82 model """transe""" +813 82 loss """bceaftersigmoid""" +813 82 regularizer """no""" +813 82 optimizer """adadelta""" +813 82 training_loop """owa""" +813 82 negative_sampler """basic""" +813 82 evaluator """rankbased""" +813 83 dataset """kinships""" +813 83 model """transe""" +813 83 loss """bceaftersigmoid""" +813 83 regularizer """no""" +813 83 optimizer """adadelta""" +813 83 training_loop """owa""" +813 83 negative_sampler """basic""" +813 83 evaluator """rankbased""" +813 84 dataset """kinships""" +813 84 model """transe""" +813 84 loss """bceaftersigmoid""" +813 84 regularizer """no""" +813 84 optimizer """adadelta""" +813 84 training_loop """owa""" +813 84 negative_sampler """basic""" +813 84 evaluator """rankbased""" +813 85 dataset """kinships""" +813 85 model """transe""" +813 85 loss """bceaftersigmoid""" +813 85 regularizer """no""" +813 85 optimizer """adadelta""" +813 85 training_loop """owa""" +813 85 negative_sampler """basic""" +813 85 evaluator """rankbased""" +813 86 dataset """kinships""" +813 86 model """transe""" +813 86 loss """bceaftersigmoid""" +813 86 regularizer """no""" +813 86 optimizer """adadelta""" +813 86 training_loop """owa""" +813 86 negative_sampler """basic""" +813 86 evaluator """rankbased""" +813 87 dataset """kinships""" +813 87 model """transe""" +813 87 loss """bceaftersigmoid""" +813 87 regularizer """no""" +813 87 optimizer """adadelta""" +813 87 training_loop """owa""" +813 87 negative_sampler """basic""" +813 87 evaluator """rankbased""" +813 88 dataset """kinships""" +813 88 model """transe""" +813 88 loss """bceaftersigmoid""" +813 88 regularizer """no""" +813 88 optimizer """adadelta""" +813 88 training_loop """owa""" +813 88 negative_sampler """basic""" +813 88 evaluator """rankbased""" +813 89 dataset """kinships""" +813 89 model """transe""" +813 89 loss """bceaftersigmoid""" +813 89 regularizer """no""" +813 89 optimizer """adadelta""" +813 89 training_loop """owa""" +813 89 negative_sampler """basic""" +813 89 evaluator """rankbased""" +813 90 dataset """kinships""" +813 90 model """transe""" +813 90 loss """bceaftersigmoid""" +813 90 regularizer """no""" +813 90 optimizer """adadelta""" +813 90 training_loop """owa""" +813 90 negative_sampler """basic""" +813 90 evaluator """rankbased""" +813 91 dataset """kinships""" +813 91 model """transe""" +813 91 loss """bceaftersigmoid""" +813 91 regularizer """no""" +813 91 optimizer """adadelta""" +813 91 training_loop """owa""" +813 91 negative_sampler """basic""" +813 91 evaluator """rankbased""" +813 92 dataset """kinships""" +813 92 model """transe""" +813 92 loss """bceaftersigmoid""" +813 92 regularizer """no""" +813 92 optimizer """adadelta""" +813 92 training_loop """owa""" +813 92 negative_sampler """basic""" +813 92 evaluator """rankbased""" +813 93 dataset """kinships""" +813 93 model """transe""" +813 93 loss """bceaftersigmoid""" +813 93 regularizer """no""" +813 93 optimizer """adadelta""" +813 93 training_loop """owa""" +813 93 negative_sampler """basic""" +813 93 evaluator """rankbased""" +813 94 dataset """kinships""" +813 94 model """transe""" +813 94 loss """bceaftersigmoid""" +813 94 regularizer """no""" +813 94 optimizer """adadelta""" +813 94 training_loop """owa""" +813 94 negative_sampler """basic""" +813 94 evaluator """rankbased""" +813 95 dataset """kinships""" +813 95 model """transe""" +813 95 loss """bceaftersigmoid""" +813 95 regularizer """no""" +813 95 optimizer """adadelta""" +813 95 training_loop """owa""" +813 95 negative_sampler """basic""" +813 95 evaluator """rankbased""" +813 96 dataset """kinships""" +813 96 model """transe""" +813 96 loss """bceaftersigmoid""" +813 96 regularizer """no""" +813 96 optimizer """adadelta""" +813 96 training_loop """owa""" +813 96 negative_sampler """basic""" +813 96 evaluator """rankbased""" +813 97 dataset """kinships""" +813 97 model """transe""" +813 97 loss """bceaftersigmoid""" +813 97 regularizer """no""" +813 97 optimizer """adadelta""" +813 97 training_loop """owa""" +813 97 negative_sampler """basic""" +813 97 evaluator """rankbased""" +813 98 dataset """kinships""" +813 98 model """transe""" +813 98 loss """bceaftersigmoid""" +813 98 regularizer """no""" +813 98 optimizer """adadelta""" +813 98 training_loop """owa""" +813 98 negative_sampler """basic""" +813 98 evaluator """rankbased""" +813 99 dataset """kinships""" +813 99 model """transe""" +813 99 loss """bceaftersigmoid""" +813 99 regularizer """no""" +813 99 optimizer """adadelta""" +813 99 training_loop """owa""" +813 99 negative_sampler """basic""" +813 99 evaluator """rankbased""" +813 100 dataset """kinships""" +813 100 model """transe""" +813 100 loss """bceaftersigmoid""" +813 100 regularizer """no""" +813 100 optimizer """adadelta""" +813 100 training_loop """owa""" +813 100 negative_sampler """basic""" +813 100 evaluator """rankbased""" +814 1 model.embedding_dim 2.0 +814 1 model.scoring_fct_norm 2.0 +814 1 negative_sampler.num_negs_per_pos 85.0 +814 1 training.batch_size 2.0 +814 2 model.embedding_dim 0.0 +814 2 model.scoring_fct_norm 1.0 +814 2 negative_sampler.num_negs_per_pos 10.0 +814 2 training.batch_size 1.0 +814 3 model.embedding_dim 2.0 +814 3 model.scoring_fct_norm 1.0 +814 3 negative_sampler.num_negs_per_pos 9.0 +814 3 training.batch_size 1.0 +814 4 model.embedding_dim 0.0 +814 4 model.scoring_fct_norm 2.0 +814 4 negative_sampler.num_negs_per_pos 5.0 +814 4 training.batch_size 2.0 +814 5 model.embedding_dim 0.0 +814 5 model.scoring_fct_norm 2.0 +814 5 negative_sampler.num_negs_per_pos 23.0 +814 5 training.batch_size 2.0 +814 6 model.embedding_dim 0.0 +814 6 model.scoring_fct_norm 1.0 +814 6 negative_sampler.num_negs_per_pos 52.0 +814 6 training.batch_size 1.0 +814 7 model.embedding_dim 2.0 +814 7 model.scoring_fct_norm 2.0 +814 7 negative_sampler.num_negs_per_pos 35.0 +814 7 training.batch_size 1.0 +814 8 model.embedding_dim 2.0 +814 8 model.scoring_fct_norm 2.0 +814 8 negative_sampler.num_negs_per_pos 79.0 +814 8 training.batch_size 2.0 +814 9 model.embedding_dim 1.0 +814 9 model.scoring_fct_norm 2.0 +814 9 negative_sampler.num_negs_per_pos 59.0 +814 9 training.batch_size 0.0 +814 10 model.embedding_dim 0.0 +814 10 model.scoring_fct_norm 1.0 +814 10 negative_sampler.num_negs_per_pos 99.0 +814 10 training.batch_size 0.0 +814 11 model.embedding_dim 0.0 +814 11 model.scoring_fct_norm 2.0 +814 11 negative_sampler.num_negs_per_pos 55.0 +814 11 training.batch_size 0.0 +814 12 model.embedding_dim 0.0 +814 12 model.scoring_fct_norm 1.0 +814 12 negative_sampler.num_negs_per_pos 24.0 +814 12 training.batch_size 2.0 +814 13 model.embedding_dim 0.0 +814 13 model.scoring_fct_norm 2.0 +814 13 negative_sampler.num_negs_per_pos 17.0 +814 13 training.batch_size 0.0 +814 14 model.embedding_dim 0.0 +814 14 model.scoring_fct_norm 2.0 +814 14 negative_sampler.num_negs_per_pos 6.0 +814 14 training.batch_size 1.0 +814 15 model.embedding_dim 2.0 +814 15 model.scoring_fct_norm 2.0 +814 15 negative_sampler.num_negs_per_pos 37.0 +814 15 training.batch_size 2.0 +814 16 model.embedding_dim 1.0 +814 16 model.scoring_fct_norm 1.0 +814 16 negative_sampler.num_negs_per_pos 17.0 +814 16 training.batch_size 1.0 +814 17 model.embedding_dim 1.0 +814 17 model.scoring_fct_norm 2.0 +814 17 negative_sampler.num_negs_per_pos 57.0 +814 17 training.batch_size 0.0 +814 18 model.embedding_dim 0.0 +814 18 model.scoring_fct_norm 1.0 +814 18 negative_sampler.num_negs_per_pos 1.0 +814 18 training.batch_size 1.0 +814 19 model.embedding_dim 1.0 +814 19 model.scoring_fct_norm 1.0 +814 19 negative_sampler.num_negs_per_pos 23.0 +814 19 training.batch_size 1.0 +814 20 model.embedding_dim 1.0 +814 20 model.scoring_fct_norm 2.0 +814 20 negative_sampler.num_negs_per_pos 13.0 +814 20 training.batch_size 0.0 +814 21 model.embedding_dim 0.0 +814 21 model.scoring_fct_norm 2.0 +814 21 negative_sampler.num_negs_per_pos 6.0 +814 21 training.batch_size 1.0 +814 22 model.embedding_dim 1.0 +814 22 model.scoring_fct_norm 1.0 +814 22 negative_sampler.num_negs_per_pos 42.0 +814 22 training.batch_size 0.0 +814 23 model.embedding_dim 1.0 +814 23 model.scoring_fct_norm 2.0 +814 23 negative_sampler.num_negs_per_pos 86.0 +814 23 training.batch_size 2.0 +814 24 model.embedding_dim 2.0 +814 24 model.scoring_fct_norm 1.0 +814 24 negative_sampler.num_negs_per_pos 72.0 +814 24 training.batch_size 1.0 +814 25 model.embedding_dim 2.0 +814 25 model.scoring_fct_norm 1.0 +814 25 negative_sampler.num_negs_per_pos 4.0 +814 25 training.batch_size 2.0 +814 26 model.embedding_dim 1.0 +814 26 model.scoring_fct_norm 1.0 +814 26 negative_sampler.num_negs_per_pos 82.0 +814 26 training.batch_size 1.0 +814 27 model.embedding_dim 2.0 +814 27 model.scoring_fct_norm 1.0 +814 27 negative_sampler.num_negs_per_pos 69.0 +814 27 training.batch_size 0.0 +814 28 model.embedding_dim 1.0 +814 28 model.scoring_fct_norm 2.0 +814 28 negative_sampler.num_negs_per_pos 10.0 +814 28 training.batch_size 0.0 +814 29 model.embedding_dim 1.0 +814 29 model.scoring_fct_norm 1.0 +814 29 negative_sampler.num_negs_per_pos 17.0 +814 29 training.batch_size 0.0 +814 30 model.embedding_dim 2.0 +814 30 model.scoring_fct_norm 2.0 +814 30 negative_sampler.num_negs_per_pos 33.0 +814 30 training.batch_size 2.0 +814 31 model.embedding_dim 1.0 +814 31 model.scoring_fct_norm 1.0 +814 31 negative_sampler.num_negs_per_pos 37.0 +814 31 training.batch_size 1.0 +814 32 model.embedding_dim 2.0 +814 32 model.scoring_fct_norm 1.0 +814 32 negative_sampler.num_negs_per_pos 67.0 +814 32 training.batch_size 0.0 +814 33 model.embedding_dim 0.0 +814 33 model.scoring_fct_norm 1.0 +814 33 negative_sampler.num_negs_per_pos 53.0 +814 33 training.batch_size 2.0 +814 34 model.embedding_dim 2.0 +814 34 model.scoring_fct_norm 2.0 +814 34 negative_sampler.num_negs_per_pos 51.0 +814 34 training.batch_size 2.0 +814 35 model.embedding_dim 1.0 +814 35 model.scoring_fct_norm 2.0 +814 35 negative_sampler.num_negs_per_pos 93.0 +814 35 training.batch_size 1.0 +814 36 model.embedding_dim 0.0 +814 36 model.scoring_fct_norm 1.0 +814 36 negative_sampler.num_negs_per_pos 23.0 +814 36 training.batch_size 0.0 +814 37 model.embedding_dim 0.0 +814 37 model.scoring_fct_norm 1.0 +814 37 negative_sampler.num_negs_per_pos 73.0 +814 37 training.batch_size 2.0 +814 38 model.embedding_dim 1.0 +814 38 model.scoring_fct_norm 1.0 +814 38 negative_sampler.num_negs_per_pos 63.0 +814 38 training.batch_size 1.0 +814 39 model.embedding_dim 0.0 +814 39 model.scoring_fct_norm 1.0 +814 39 negative_sampler.num_negs_per_pos 12.0 +814 39 training.batch_size 0.0 +814 40 model.embedding_dim 0.0 +814 40 model.scoring_fct_norm 1.0 +814 40 negative_sampler.num_negs_per_pos 99.0 +814 40 training.batch_size 1.0 +814 41 model.embedding_dim 0.0 +814 41 model.scoring_fct_norm 1.0 +814 41 negative_sampler.num_negs_per_pos 74.0 +814 41 training.batch_size 1.0 +814 42 model.embedding_dim 1.0 +814 42 model.scoring_fct_norm 1.0 +814 42 negative_sampler.num_negs_per_pos 89.0 +814 42 training.batch_size 2.0 +814 43 model.embedding_dim 0.0 +814 43 model.scoring_fct_norm 1.0 +814 43 negative_sampler.num_negs_per_pos 9.0 +814 43 training.batch_size 2.0 +814 44 model.embedding_dim 2.0 +814 44 model.scoring_fct_norm 1.0 +814 44 negative_sampler.num_negs_per_pos 59.0 +814 44 training.batch_size 0.0 +814 45 model.embedding_dim 1.0 +814 45 model.scoring_fct_norm 2.0 +814 45 negative_sampler.num_negs_per_pos 84.0 +814 45 training.batch_size 1.0 +814 46 model.embedding_dim 2.0 +814 46 model.scoring_fct_norm 2.0 +814 46 negative_sampler.num_negs_per_pos 9.0 +814 46 training.batch_size 2.0 +814 47 model.embedding_dim 1.0 +814 47 model.scoring_fct_norm 1.0 +814 47 negative_sampler.num_negs_per_pos 11.0 +814 47 training.batch_size 2.0 +814 48 model.embedding_dim 0.0 +814 48 model.scoring_fct_norm 1.0 +814 48 negative_sampler.num_negs_per_pos 36.0 +814 48 training.batch_size 2.0 +814 49 model.embedding_dim 2.0 +814 49 model.scoring_fct_norm 2.0 +814 49 negative_sampler.num_negs_per_pos 30.0 +814 49 training.batch_size 1.0 +814 50 model.embedding_dim 2.0 +814 50 model.scoring_fct_norm 2.0 +814 50 negative_sampler.num_negs_per_pos 67.0 +814 50 training.batch_size 2.0 +814 51 model.embedding_dim 2.0 +814 51 model.scoring_fct_norm 2.0 +814 51 negative_sampler.num_negs_per_pos 16.0 +814 51 training.batch_size 1.0 +814 52 model.embedding_dim 1.0 +814 52 model.scoring_fct_norm 2.0 +814 52 negative_sampler.num_negs_per_pos 16.0 +814 52 training.batch_size 1.0 +814 53 model.embedding_dim 1.0 +814 53 model.scoring_fct_norm 2.0 +814 53 negative_sampler.num_negs_per_pos 26.0 +814 53 training.batch_size 1.0 +814 54 model.embedding_dim 0.0 +814 54 model.scoring_fct_norm 2.0 +814 54 negative_sampler.num_negs_per_pos 59.0 +814 54 training.batch_size 2.0 +814 55 model.embedding_dim 1.0 +814 55 model.scoring_fct_norm 2.0 +814 55 negative_sampler.num_negs_per_pos 91.0 +814 55 training.batch_size 0.0 +814 56 model.embedding_dim 0.0 +814 56 model.scoring_fct_norm 1.0 +814 56 negative_sampler.num_negs_per_pos 89.0 +814 56 training.batch_size 1.0 +814 57 model.embedding_dim 1.0 +814 57 model.scoring_fct_norm 2.0 +814 57 negative_sampler.num_negs_per_pos 76.0 +814 57 training.batch_size 2.0 +814 58 model.embedding_dim 1.0 +814 58 model.scoring_fct_norm 1.0 +814 58 negative_sampler.num_negs_per_pos 91.0 +814 58 training.batch_size 0.0 +814 59 model.embedding_dim 0.0 +814 59 model.scoring_fct_norm 2.0 +814 59 negative_sampler.num_negs_per_pos 9.0 +814 59 training.batch_size 2.0 +814 60 model.embedding_dim 2.0 +814 60 model.scoring_fct_norm 1.0 +814 60 negative_sampler.num_negs_per_pos 1.0 +814 60 training.batch_size 0.0 +814 61 model.embedding_dim 1.0 +814 61 model.scoring_fct_norm 1.0 +814 61 negative_sampler.num_negs_per_pos 36.0 +814 61 training.batch_size 0.0 +814 62 model.embedding_dim 0.0 +814 62 model.scoring_fct_norm 1.0 +814 62 negative_sampler.num_negs_per_pos 68.0 +814 62 training.batch_size 0.0 +814 63 model.embedding_dim 0.0 +814 63 model.scoring_fct_norm 2.0 +814 63 negative_sampler.num_negs_per_pos 56.0 +814 63 training.batch_size 2.0 +814 64 model.embedding_dim 0.0 +814 64 model.scoring_fct_norm 1.0 +814 64 negative_sampler.num_negs_per_pos 12.0 +814 64 training.batch_size 1.0 +814 65 model.embedding_dim 0.0 +814 65 model.scoring_fct_norm 1.0 +814 65 negative_sampler.num_negs_per_pos 33.0 +814 65 training.batch_size 2.0 +814 66 model.embedding_dim 1.0 +814 66 model.scoring_fct_norm 1.0 +814 66 negative_sampler.num_negs_per_pos 52.0 +814 66 training.batch_size 0.0 +814 67 model.embedding_dim 0.0 +814 67 model.scoring_fct_norm 2.0 +814 67 negative_sampler.num_negs_per_pos 59.0 +814 67 training.batch_size 0.0 +814 68 model.embedding_dim 1.0 +814 68 model.scoring_fct_norm 2.0 +814 68 negative_sampler.num_negs_per_pos 61.0 +814 68 training.batch_size 0.0 +814 69 model.embedding_dim 1.0 +814 69 model.scoring_fct_norm 2.0 +814 69 negative_sampler.num_negs_per_pos 44.0 +814 69 training.batch_size 1.0 +814 70 model.embedding_dim 1.0 +814 70 model.scoring_fct_norm 2.0 +814 70 negative_sampler.num_negs_per_pos 19.0 +814 70 training.batch_size 2.0 +814 71 model.embedding_dim 2.0 +814 71 model.scoring_fct_norm 2.0 +814 71 negative_sampler.num_negs_per_pos 88.0 +814 71 training.batch_size 0.0 +814 72 model.embedding_dim 1.0 +814 72 model.scoring_fct_norm 1.0 +814 72 negative_sampler.num_negs_per_pos 27.0 +814 72 training.batch_size 1.0 +814 73 model.embedding_dim 0.0 +814 73 model.scoring_fct_norm 2.0 +814 73 negative_sampler.num_negs_per_pos 34.0 +814 73 training.batch_size 2.0 +814 74 model.embedding_dim 1.0 +814 74 model.scoring_fct_norm 1.0 +814 74 negative_sampler.num_negs_per_pos 13.0 +814 74 training.batch_size 0.0 +814 75 model.embedding_dim 1.0 +814 75 model.scoring_fct_norm 1.0 +814 75 negative_sampler.num_negs_per_pos 30.0 +814 75 training.batch_size 2.0 +814 76 model.embedding_dim 2.0 +814 76 model.scoring_fct_norm 1.0 +814 76 negative_sampler.num_negs_per_pos 18.0 +814 76 training.batch_size 1.0 +814 77 model.embedding_dim 1.0 +814 77 model.scoring_fct_norm 1.0 +814 77 negative_sampler.num_negs_per_pos 92.0 +814 77 training.batch_size 1.0 +814 78 model.embedding_dim 1.0 +814 78 model.scoring_fct_norm 1.0 +814 78 negative_sampler.num_negs_per_pos 1.0 +814 78 training.batch_size 1.0 +814 79 model.embedding_dim 0.0 +814 79 model.scoring_fct_norm 2.0 +814 79 negative_sampler.num_negs_per_pos 21.0 +814 79 training.batch_size 2.0 +814 80 model.embedding_dim 1.0 +814 80 model.scoring_fct_norm 2.0 +814 80 negative_sampler.num_negs_per_pos 60.0 +814 80 training.batch_size 2.0 +814 81 model.embedding_dim 1.0 +814 81 model.scoring_fct_norm 2.0 +814 81 negative_sampler.num_negs_per_pos 84.0 +814 81 training.batch_size 1.0 +814 82 model.embedding_dim 2.0 +814 82 model.scoring_fct_norm 1.0 +814 82 negative_sampler.num_negs_per_pos 16.0 +814 82 training.batch_size 2.0 +814 83 model.embedding_dim 2.0 +814 83 model.scoring_fct_norm 2.0 +814 83 negative_sampler.num_negs_per_pos 62.0 +814 83 training.batch_size 0.0 +814 84 model.embedding_dim 1.0 +814 84 model.scoring_fct_norm 2.0 +814 84 negative_sampler.num_negs_per_pos 86.0 +814 84 training.batch_size 2.0 +814 85 model.embedding_dim 1.0 +814 85 model.scoring_fct_norm 2.0 +814 85 negative_sampler.num_negs_per_pos 89.0 +814 85 training.batch_size 0.0 +814 86 model.embedding_dim 1.0 +814 86 model.scoring_fct_norm 1.0 +814 86 negative_sampler.num_negs_per_pos 79.0 +814 86 training.batch_size 1.0 +814 87 model.embedding_dim 1.0 +814 87 model.scoring_fct_norm 1.0 +814 87 negative_sampler.num_negs_per_pos 9.0 +814 87 training.batch_size 2.0 +814 88 model.embedding_dim 1.0 +814 88 model.scoring_fct_norm 1.0 +814 88 negative_sampler.num_negs_per_pos 15.0 +814 88 training.batch_size 1.0 +814 89 model.embedding_dim 0.0 +814 89 model.scoring_fct_norm 1.0 +814 89 negative_sampler.num_negs_per_pos 90.0 +814 89 training.batch_size 1.0 +814 90 model.embedding_dim 1.0 +814 90 model.scoring_fct_norm 1.0 +814 90 negative_sampler.num_negs_per_pos 94.0 +814 90 training.batch_size 0.0 +814 91 model.embedding_dim 2.0 +814 91 model.scoring_fct_norm 2.0 +814 91 negative_sampler.num_negs_per_pos 63.0 +814 91 training.batch_size 1.0 +814 92 model.embedding_dim 0.0 +814 92 model.scoring_fct_norm 2.0 +814 92 negative_sampler.num_negs_per_pos 83.0 +814 92 training.batch_size 2.0 +814 93 model.embedding_dim 1.0 +814 93 model.scoring_fct_norm 2.0 +814 93 negative_sampler.num_negs_per_pos 96.0 +814 93 training.batch_size 1.0 +814 94 model.embedding_dim 1.0 +814 94 model.scoring_fct_norm 1.0 +814 94 negative_sampler.num_negs_per_pos 79.0 +814 94 training.batch_size 2.0 +814 95 model.embedding_dim 0.0 +814 95 model.scoring_fct_norm 2.0 +814 95 negative_sampler.num_negs_per_pos 66.0 +814 95 training.batch_size 1.0 +814 96 model.embedding_dim 1.0 +814 96 model.scoring_fct_norm 1.0 +814 96 negative_sampler.num_negs_per_pos 60.0 +814 96 training.batch_size 0.0 +814 97 model.embedding_dim 2.0 +814 97 model.scoring_fct_norm 1.0 +814 97 negative_sampler.num_negs_per_pos 11.0 +814 97 training.batch_size 0.0 +814 98 model.embedding_dim 1.0 +814 98 model.scoring_fct_norm 2.0 +814 98 negative_sampler.num_negs_per_pos 14.0 +814 98 training.batch_size 1.0 +814 99 model.embedding_dim 1.0 +814 99 model.scoring_fct_norm 2.0 +814 99 negative_sampler.num_negs_per_pos 61.0 +814 99 training.batch_size 1.0 +814 100 model.embedding_dim 0.0 +814 100 model.scoring_fct_norm 1.0 +814 100 negative_sampler.num_negs_per_pos 49.0 +814 100 training.batch_size 0.0 +814 1 dataset """kinships""" +814 1 model """transe""" +814 1 loss """softplus""" +814 1 regularizer """no""" +814 1 optimizer """adadelta""" +814 1 training_loop """owa""" +814 1 negative_sampler """basic""" +814 1 evaluator """rankbased""" +814 2 dataset """kinships""" +814 2 model """transe""" +814 2 loss """softplus""" +814 2 regularizer """no""" +814 2 optimizer """adadelta""" +814 2 training_loop """owa""" +814 2 negative_sampler """basic""" +814 2 evaluator """rankbased""" +814 3 dataset """kinships""" +814 3 model """transe""" +814 3 loss """softplus""" +814 3 regularizer """no""" +814 3 optimizer """adadelta""" +814 3 training_loop """owa""" +814 3 negative_sampler """basic""" +814 3 evaluator """rankbased""" +814 4 dataset """kinships""" +814 4 model """transe""" +814 4 loss """softplus""" +814 4 regularizer """no""" +814 4 optimizer """adadelta""" +814 4 training_loop """owa""" +814 4 negative_sampler """basic""" +814 4 evaluator """rankbased""" +814 5 dataset """kinships""" +814 5 model """transe""" +814 5 loss """softplus""" +814 5 regularizer """no""" +814 5 optimizer """adadelta""" +814 5 training_loop """owa""" +814 5 negative_sampler """basic""" +814 5 evaluator """rankbased""" +814 6 dataset """kinships""" +814 6 model """transe""" +814 6 loss """softplus""" +814 6 regularizer """no""" +814 6 optimizer """adadelta""" +814 6 training_loop """owa""" +814 6 negative_sampler """basic""" +814 6 evaluator """rankbased""" +814 7 dataset """kinships""" +814 7 model """transe""" +814 7 loss """softplus""" +814 7 regularizer """no""" +814 7 optimizer """adadelta""" +814 7 training_loop """owa""" +814 7 negative_sampler """basic""" +814 7 evaluator """rankbased""" +814 8 dataset """kinships""" +814 8 model """transe""" +814 8 loss """softplus""" +814 8 regularizer """no""" +814 8 optimizer """adadelta""" +814 8 training_loop """owa""" +814 8 negative_sampler """basic""" +814 8 evaluator """rankbased""" +814 9 dataset """kinships""" +814 9 model """transe""" +814 9 loss """softplus""" +814 9 regularizer """no""" +814 9 optimizer """adadelta""" +814 9 training_loop """owa""" +814 9 negative_sampler """basic""" +814 9 evaluator """rankbased""" +814 10 dataset """kinships""" +814 10 model """transe""" +814 10 loss """softplus""" +814 10 regularizer """no""" +814 10 optimizer """adadelta""" +814 10 training_loop """owa""" +814 10 negative_sampler """basic""" +814 10 evaluator """rankbased""" +814 11 dataset """kinships""" +814 11 model """transe""" +814 11 loss """softplus""" +814 11 regularizer """no""" +814 11 optimizer """adadelta""" +814 11 training_loop """owa""" +814 11 negative_sampler """basic""" +814 11 evaluator """rankbased""" +814 12 dataset """kinships""" +814 12 model """transe""" +814 12 loss """softplus""" +814 12 regularizer """no""" +814 12 optimizer """adadelta""" +814 12 training_loop """owa""" +814 12 negative_sampler """basic""" +814 12 evaluator """rankbased""" +814 13 dataset """kinships""" +814 13 model """transe""" +814 13 loss """softplus""" +814 13 regularizer """no""" +814 13 optimizer """adadelta""" +814 13 training_loop """owa""" +814 13 negative_sampler """basic""" +814 13 evaluator """rankbased""" +814 14 dataset """kinships""" +814 14 model """transe""" +814 14 loss """softplus""" +814 14 regularizer """no""" +814 14 optimizer """adadelta""" +814 14 training_loop """owa""" +814 14 negative_sampler """basic""" +814 14 evaluator """rankbased""" +814 15 dataset """kinships""" +814 15 model """transe""" +814 15 loss """softplus""" +814 15 regularizer """no""" +814 15 optimizer """adadelta""" +814 15 training_loop """owa""" +814 15 negative_sampler """basic""" +814 15 evaluator """rankbased""" +814 16 dataset """kinships""" +814 16 model """transe""" +814 16 loss """softplus""" +814 16 regularizer """no""" +814 16 optimizer """adadelta""" +814 16 training_loop """owa""" +814 16 negative_sampler """basic""" +814 16 evaluator """rankbased""" +814 17 dataset """kinships""" +814 17 model """transe""" +814 17 loss """softplus""" +814 17 regularizer """no""" +814 17 optimizer """adadelta""" +814 17 training_loop """owa""" +814 17 negative_sampler """basic""" +814 17 evaluator """rankbased""" +814 18 dataset """kinships""" +814 18 model """transe""" +814 18 loss """softplus""" +814 18 regularizer """no""" +814 18 optimizer """adadelta""" +814 18 training_loop """owa""" +814 18 negative_sampler """basic""" +814 18 evaluator """rankbased""" +814 19 dataset """kinships""" +814 19 model """transe""" +814 19 loss """softplus""" +814 19 regularizer """no""" +814 19 optimizer """adadelta""" +814 19 training_loop """owa""" +814 19 negative_sampler """basic""" +814 19 evaluator """rankbased""" +814 20 dataset """kinships""" +814 20 model """transe""" +814 20 loss """softplus""" +814 20 regularizer """no""" +814 20 optimizer """adadelta""" +814 20 training_loop """owa""" +814 20 negative_sampler """basic""" +814 20 evaluator """rankbased""" +814 21 dataset """kinships""" +814 21 model """transe""" +814 21 loss """softplus""" +814 21 regularizer """no""" +814 21 optimizer """adadelta""" +814 21 training_loop """owa""" +814 21 negative_sampler """basic""" +814 21 evaluator """rankbased""" +814 22 dataset """kinships""" +814 22 model """transe""" +814 22 loss """softplus""" +814 22 regularizer """no""" +814 22 optimizer """adadelta""" +814 22 training_loop """owa""" +814 22 negative_sampler """basic""" +814 22 evaluator """rankbased""" +814 23 dataset """kinships""" +814 23 model """transe""" +814 23 loss """softplus""" +814 23 regularizer """no""" +814 23 optimizer """adadelta""" +814 23 training_loop """owa""" +814 23 negative_sampler """basic""" +814 23 evaluator """rankbased""" +814 24 dataset """kinships""" +814 24 model """transe""" +814 24 loss """softplus""" +814 24 regularizer """no""" +814 24 optimizer """adadelta""" +814 24 training_loop """owa""" +814 24 negative_sampler """basic""" +814 24 evaluator """rankbased""" +814 25 dataset """kinships""" +814 25 model """transe""" +814 25 loss """softplus""" +814 25 regularizer """no""" +814 25 optimizer """adadelta""" +814 25 training_loop """owa""" +814 25 negative_sampler """basic""" +814 25 evaluator """rankbased""" +814 26 dataset """kinships""" +814 26 model """transe""" +814 26 loss """softplus""" +814 26 regularizer """no""" +814 26 optimizer """adadelta""" +814 26 training_loop """owa""" +814 26 negative_sampler """basic""" +814 26 evaluator """rankbased""" +814 27 dataset """kinships""" +814 27 model """transe""" +814 27 loss """softplus""" +814 27 regularizer """no""" +814 27 optimizer """adadelta""" +814 27 training_loop """owa""" +814 27 negative_sampler """basic""" +814 27 evaluator """rankbased""" +814 28 dataset """kinships""" +814 28 model """transe""" +814 28 loss """softplus""" +814 28 regularizer """no""" +814 28 optimizer """adadelta""" +814 28 training_loop """owa""" +814 28 negative_sampler """basic""" +814 28 evaluator """rankbased""" +814 29 dataset """kinships""" +814 29 model """transe""" +814 29 loss """softplus""" +814 29 regularizer """no""" +814 29 optimizer """adadelta""" +814 29 training_loop """owa""" +814 29 negative_sampler """basic""" +814 29 evaluator """rankbased""" +814 30 dataset """kinships""" +814 30 model """transe""" +814 30 loss """softplus""" +814 30 regularizer """no""" +814 30 optimizer """adadelta""" +814 30 training_loop """owa""" +814 30 negative_sampler """basic""" +814 30 evaluator """rankbased""" +814 31 dataset """kinships""" +814 31 model """transe""" +814 31 loss """softplus""" +814 31 regularizer """no""" +814 31 optimizer """adadelta""" +814 31 training_loop """owa""" +814 31 negative_sampler """basic""" +814 31 evaluator """rankbased""" +814 32 dataset """kinships""" +814 32 model """transe""" +814 32 loss """softplus""" +814 32 regularizer """no""" +814 32 optimizer """adadelta""" +814 32 training_loop """owa""" +814 32 negative_sampler """basic""" +814 32 evaluator """rankbased""" +814 33 dataset """kinships""" +814 33 model """transe""" +814 33 loss """softplus""" +814 33 regularizer """no""" +814 33 optimizer """adadelta""" +814 33 training_loop """owa""" +814 33 negative_sampler """basic""" +814 33 evaluator """rankbased""" +814 34 dataset """kinships""" +814 34 model """transe""" +814 34 loss """softplus""" +814 34 regularizer """no""" +814 34 optimizer """adadelta""" +814 34 training_loop """owa""" +814 34 negative_sampler """basic""" +814 34 evaluator """rankbased""" +814 35 dataset """kinships""" +814 35 model """transe""" +814 35 loss """softplus""" +814 35 regularizer """no""" +814 35 optimizer """adadelta""" +814 35 training_loop """owa""" +814 35 negative_sampler """basic""" +814 35 evaluator """rankbased""" +814 36 dataset """kinships""" +814 36 model """transe""" +814 36 loss """softplus""" +814 36 regularizer """no""" +814 36 optimizer """adadelta""" +814 36 training_loop """owa""" +814 36 negative_sampler """basic""" +814 36 evaluator """rankbased""" +814 37 dataset """kinships""" +814 37 model """transe""" +814 37 loss """softplus""" +814 37 regularizer """no""" +814 37 optimizer """adadelta""" +814 37 training_loop """owa""" +814 37 negative_sampler """basic""" +814 37 evaluator """rankbased""" +814 38 dataset """kinships""" +814 38 model """transe""" +814 38 loss """softplus""" +814 38 regularizer """no""" +814 38 optimizer """adadelta""" +814 38 training_loop """owa""" +814 38 negative_sampler """basic""" +814 38 evaluator """rankbased""" +814 39 dataset """kinships""" +814 39 model """transe""" +814 39 loss """softplus""" +814 39 regularizer """no""" +814 39 optimizer """adadelta""" +814 39 training_loop """owa""" +814 39 negative_sampler """basic""" +814 39 evaluator """rankbased""" +814 40 dataset """kinships""" +814 40 model """transe""" +814 40 loss """softplus""" +814 40 regularizer """no""" +814 40 optimizer """adadelta""" +814 40 training_loop """owa""" +814 40 negative_sampler """basic""" +814 40 evaluator """rankbased""" +814 41 dataset """kinships""" +814 41 model """transe""" +814 41 loss """softplus""" +814 41 regularizer """no""" +814 41 optimizer """adadelta""" +814 41 training_loop """owa""" +814 41 negative_sampler """basic""" +814 41 evaluator """rankbased""" +814 42 dataset """kinships""" +814 42 model """transe""" +814 42 loss """softplus""" +814 42 regularizer """no""" +814 42 optimizer """adadelta""" +814 42 training_loop """owa""" +814 42 negative_sampler """basic""" +814 42 evaluator """rankbased""" +814 43 dataset """kinships""" +814 43 model """transe""" +814 43 loss """softplus""" +814 43 regularizer """no""" +814 43 optimizer """adadelta""" +814 43 training_loop """owa""" +814 43 negative_sampler """basic""" +814 43 evaluator """rankbased""" +814 44 dataset """kinships""" +814 44 model """transe""" +814 44 loss """softplus""" +814 44 regularizer """no""" +814 44 optimizer """adadelta""" +814 44 training_loop """owa""" +814 44 negative_sampler """basic""" +814 44 evaluator """rankbased""" +814 45 dataset """kinships""" +814 45 model """transe""" +814 45 loss """softplus""" +814 45 regularizer """no""" +814 45 optimizer """adadelta""" +814 45 training_loop """owa""" +814 45 negative_sampler """basic""" +814 45 evaluator """rankbased""" +814 46 dataset """kinships""" +814 46 model """transe""" +814 46 loss """softplus""" +814 46 regularizer """no""" +814 46 optimizer """adadelta""" +814 46 training_loop """owa""" +814 46 negative_sampler """basic""" +814 46 evaluator """rankbased""" +814 47 dataset """kinships""" +814 47 model """transe""" +814 47 loss """softplus""" +814 47 regularizer """no""" +814 47 optimizer """adadelta""" +814 47 training_loop """owa""" +814 47 negative_sampler """basic""" +814 47 evaluator """rankbased""" +814 48 dataset """kinships""" +814 48 model """transe""" +814 48 loss """softplus""" +814 48 regularizer """no""" +814 48 optimizer """adadelta""" +814 48 training_loop """owa""" +814 48 negative_sampler """basic""" +814 48 evaluator """rankbased""" +814 49 dataset """kinships""" +814 49 model """transe""" +814 49 loss """softplus""" +814 49 regularizer """no""" +814 49 optimizer """adadelta""" +814 49 training_loop """owa""" +814 49 negative_sampler """basic""" +814 49 evaluator """rankbased""" +814 50 dataset """kinships""" +814 50 model """transe""" +814 50 loss """softplus""" +814 50 regularizer """no""" +814 50 optimizer """adadelta""" +814 50 training_loop """owa""" +814 50 negative_sampler """basic""" +814 50 evaluator """rankbased""" +814 51 dataset """kinships""" +814 51 model """transe""" +814 51 loss """softplus""" +814 51 regularizer """no""" +814 51 optimizer """adadelta""" +814 51 training_loop """owa""" +814 51 negative_sampler """basic""" +814 51 evaluator """rankbased""" +814 52 dataset """kinships""" +814 52 model """transe""" +814 52 loss """softplus""" +814 52 regularizer """no""" +814 52 optimizer """adadelta""" +814 52 training_loop """owa""" +814 52 negative_sampler """basic""" +814 52 evaluator """rankbased""" +814 53 dataset """kinships""" +814 53 model """transe""" +814 53 loss """softplus""" +814 53 regularizer """no""" +814 53 optimizer """adadelta""" +814 53 training_loop """owa""" +814 53 negative_sampler """basic""" +814 53 evaluator """rankbased""" +814 54 dataset """kinships""" +814 54 model """transe""" +814 54 loss """softplus""" +814 54 regularizer """no""" +814 54 optimizer """adadelta""" +814 54 training_loop """owa""" +814 54 negative_sampler """basic""" +814 54 evaluator """rankbased""" +814 55 dataset """kinships""" +814 55 model """transe""" +814 55 loss """softplus""" +814 55 regularizer """no""" +814 55 optimizer """adadelta""" +814 55 training_loop """owa""" +814 55 negative_sampler """basic""" +814 55 evaluator """rankbased""" +814 56 dataset """kinships""" +814 56 model """transe""" +814 56 loss """softplus""" +814 56 regularizer """no""" +814 56 optimizer """adadelta""" +814 56 training_loop """owa""" +814 56 negative_sampler """basic""" +814 56 evaluator """rankbased""" +814 57 dataset """kinships""" +814 57 model """transe""" +814 57 loss """softplus""" +814 57 regularizer """no""" +814 57 optimizer """adadelta""" +814 57 training_loop """owa""" +814 57 negative_sampler """basic""" +814 57 evaluator """rankbased""" +814 58 dataset """kinships""" +814 58 model """transe""" +814 58 loss """softplus""" +814 58 regularizer """no""" +814 58 optimizer """adadelta""" +814 58 training_loop """owa""" +814 58 negative_sampler """basic""" +814 58 evaluator """rankbased""" +814 59 dataset """kinships""" +814 59 model """transe""" +814 59 loss """softplus""" +814 59 regularizer """no""" +814 59 optimizer """adadelta""" +814 59 training_loop """owa""" +814 59 negative_sampler """basic""" +814 59 evaluator """rankbased""" +814 60 dataset """kinships""" +814 60 model """transe""" +814 60 loss """softplus""" +814 60 regularizer """no""" +814 60 optimizer """adadelta""" +814 60 training_loop """owa""" +814 60 negative_sampler """basic""" +814 60 evaluator """rankbased""" +814 61 dataset """kinships""" +814 61 model """transe""" +814 61 loss """softplus""" +814 61 regularizer """no""" +814 61 optimizer """adadelta""" +814 61 training_loop """owa""" +814 61 negative_sampler """basic""" +814 61 evaluator """rankbased""" +814 62 dataset """kinships""" +814 62 model """transe""" +814 62 loss """softplus""" +814 62 regularizer """no""" +814 62 optimizer """adadelta""" +814 62 training_loop """owa""" +814 62 negative_sampler """basic""" +814 62 evaluator """rankbased""" +814 63 dataset """kinships""" +814 63 model """transe""" +814 63 loss """softplus""" +814 63 regularizer """no""" +814 63 optimizer """adadelta""" +814 63 training_loop """owa""" +814 63 negative_sampler """basic""" +814 63 evaluator """rankbased""" +814 64 dataset """kinships""" +814 64 model """transe""" +814 64 loss """softplus""" +814 64 regularizer """no""" +814 64 optimizer """adadelta""" +814 64 training_loop """owa""" +814 64 negative_sampler """basic""" +814 64 evaluator """rankbased""" +814 65 dataset """kinships""" +814 65 model """transe""" +814 65 loss """softplus""" +814 65 regularizer """no""" +814 65 optimizer """adadelta""" +814 65 training_loop """owa""" +814 65 negative_sampler """basic""" +814 65 evaluator """rankbased""" +814 66 dataset """kinships""" +814 66 model """transe""" +814 66 loss """softplus""" +814 66 regularizer """no""" +814 66 optimizer """adadelta""" +814 66 training_loop """owa""" +814 66 negative_sampler """basic""" +814 66 evaluator """rankbased""" +814 67 dataset """kinships""" +814 67 model """transe""" +814 67 loss """softplus""" +814 67 regularizer """no""" +814 67 optimizer """adadelta""" +814 67 training_loop """owa""" +814 67 negative_sampler """basic""" +814 67 evaluator """rankbased""" +814 68 dataset """kinships""" +814 68 model """transe""" +814 68 loss """softplus""" +814 68 regularizer """no""" +814 68 optimizer """adadelta""" +814 68 training_loop """owa""" +814 68 negative_sampler """basic""" +814 68 evaluator """rankbased""" +814 69 dataset """kinships""" +814 69 model """transe""" +814 69 loss """softplus""" +814 69 regularizer """no""" +814 69 optimizer """adadelta""" +814 69 training_loop """owa""" +814 69 negative_sampler """basic""" +814 69 evaluator """rankbased""" +814 70 dataset """kinships""" +814 70 model """transe""" +814 70 loss """softplus""" +814 70 regularizer """no""" +814 70 optimizer """adadelta""" +814 70 training_loop """owa""" +814 70 negative_sampler """basic""" +814 70 evaluator """rankbased""" +814 71 dataset """kinships""" +814 71 model """transe""" +814 71 loss """softplus""" +814 71 regularizer """no""" +814 71 optimizer """adadelta""" +814 71 training_loop """owa""" +814 71 negative_sampler """basic""" +814 71 evaluator """rankbased""" +814 72 dataset """kinships""" +814 72 model """transe""" +814 72 loss """softplus""" +814 72 regularizer """no""" +814 72 optimizer """adadelta""" +814 72 training_loop """owa""" +814 72 negative_sampler """basic""" +814 72 evaluator """rankbased""" +814 73 dataset """kinships""" +814 73 model """transe""" +814 73 loss """softplus""" +814 73 regularizer """no""" +814 73 optimizer """adadelta""" +814 73 training_loop """owa""" +814 73 negative_sampler """basic""" +814 73 evaluator """rankbased""" +814 74 dataset """kinships""" +814 74 model """transe""" +814 74 loss """softplus""" +814 74 regularizer """no""" +814 74 optimizer """adadelta""" +814 74 training_loop """owa""" +814 74 negative_sampler """basic""" +814 74 evaluator """rankbased""" +814 75 dataset """kinships""" +814 75 model """transe""" +814 75 loss """softplus""" +814 75 regularizer """no""" +814 75 optimizer """adadelta""" +814 75 training_loop """owa""" +814 75 negative_sampler """basic""" +814 75 evaluator """rankbased""" +814 76 dataset """kinships""" +814 76 model """transe""" +814 76 loss """softplus""" +814 76 regularizer """no""" +814 76 optimizer """adadelta""" +814 76 training_loop """owa""" +814 76 negative_sampler """basic""" +814 76 evaluator """rankbased""" +814 77 dataset """kinships""" +814 77 model """transe""" +814 77 loss """softplus""" +814 77 regularizer """no""" +814 77 optimizer """adadelta""" +814 77 training_loop """owa""" +814 77 negative_sampler """basic""" +814 77 evaluator """rankbased""" +814 78 dataset """kinships""" +814 78 model """transe""" +814 78 loss """softplus""" +814 78 regularizer """no""" +814 78 optimizer """adadelta""" +814 78 training_loop """owa""" +814 78 negative_sampler """basic""" +814 78 evaluator """rankbased""" +814 79 dataset """kinships""" +814 79 model """transe""" +814 79 loss """softplus""" +814 79 regularizer """no""" +814 79 optimizer """adadelta""" +814 79 training_loop """owa""" +814 79 negative_sampler """basic""" +814 79 evaluator """rankbased""" +814 80 dataset """kinships""" +814 80 model """transe""" +814 80 loss """softplus""" +814 80 regularizer """no""" +814 80 optimizer """adadelta""" +814 80 training_loop """owa""" +814 80 negative_sampler """basic""" +814 80 evaluator """rankbased""" +814 81 dataset """kinships""" +814 81 model """transe""" +814 81 loss """softplus""" +814 81 regularizer """no""" +814 81 optimizer """adadelta""" +814 81 training_loop """owa""" +814 81 negative_sampler """basic""" +814 81 evaluator """rankbased""" +814 82 dataset """kinships""" +814 82 model """transe""" +814 82 loss """softplus""" +814 82 regularizer """no""" +814 82 optimizer """adadelta""" +814 82 training_loop """owa""" +814 82 negative_sampler """basic""" +814 82 evaluator """rankbased""" +814 83 dataset """kinships""" +814 83 model """transe""" +814 83 loss """softplus""" +814 83 regularizer """no""" +814 83 optimizer """adadelta""" +814 83 training_loop """owa""" +814 83 negative_sampler """basic""" +814 83 evaluator """rankbased""" +814 84 dataset """kinships""" +814 84 model """transe""" +814 84 loss """softplus""" +814 84 regularizer """no""" +814 84 optimizer """adadelta""" +814 84 training_loop """owa""" +814 84 negative_sampler """basic""" +814 84 evaluator """rankbased""" +814 85 dataset """kinships""" +814 85 model """transe""" +814 85 loss """softplus""" +814 85 regularizer """no""" +814 85 optimizer """adadelta""" +814 85 training_loop """owa""" +814 85 negative_sampler """basic""" +814 85 evaluator """rankbased""" +814 86 dataset """kinships""" +814 86 model """transe""" +814 86 loss """softplus""" +814 86 regularizer """no""" +814 86 optimizer """adadelta""" +814 86 training_loop """owa""" +814 86 negative_sampler """basic""" +814 86 evaluator """rankbased""" +814 87 dataset """kinships""" +814 87 model """transe""" +814 87 loss """softplus""" +814 87 regularizer """no""" +814 87 optimizer """adadelta""" +814 87 training_loop """owa""" +814 87 negative_sampler """basic""" +814 87 evaluator """rankbased""" +814 88 dataset """kinships""" +814 88 model """transe""" +814 88 loss """softplus""" +814 88 regularizer """no""" +814 88 optimizer """adadelta""" +814 88 training_loop """owa""" +814 88 negative_sampler """basic""" +814 88 evaluator """rankbased""" +814 89 dataset """kinships""" +814 89 model """transe""" +814 89 loss """softplus""" +814 89 regularizer """no""" +814 89 optimizer """adadelta""" +814 89 training_loop """owa""" +814 89 negative_sampler """basic""" +814 89 evaluator """rankbased""" +814 90 dataset """kinships""" +814 90 model """transe""" +814 90 loss """softplus""" +814 90 regularizer """no""" +814 90 optimizer """adadelta""" +814 90 training_loop """owa""" +814 90 negative_sampler """basic""" +814 90 evaluator """rankbased""" +814 91 dataset """kinships""" +814 91 model """transe""" +814 91 loss """softplus""" +814 91 regularizer """no""" +814 91 optimizer """adadelta""" +814 91 training_loop """owa""" +814 91 negative_sampler """basic""" +814 91 evaluator """rankbased""" +814 92 dataset """kinships""" +814 92 model """transe""" +814 92 loss """softplus""" +814 92 regularizer """no""" +814 92 optimizer """adadelta""" +814 92 training_loop """owa""" +814 92 negative_sampler """basic""" +814 92 evaluator """rankbased""" +814 93 dataset """kinships""" +814 93 model """transe""" +814 93 loss """softplus""" +814 93 regularizer """no""" +814 93 optimizer """adadelta""" +814 93 training_loop """owa""" +814 93 negative_sampler """basic""" +814 93 evaluator """rankbased""" +814 94 dataset """kinships""" +814 94 model """transe""" +814 94 loss """softplus""" +814 94 regularizer """no""" +814 94 optimizer """adadelta""" +814 94 training_loop """owa""" +814 94 negative_sampler """basic""" +814 94 evaluator """rankbased""" +814 95 dataset """kinships""" +814 95 model """transe""" +814 95 loss """softplus""" +814 95 regularizer """no""" +814 95 optimizer """adadelta""" +814 95 training_loop """owa""" +814 95 negative_sampler """basic""" +814 95 evaluator """rankbased""" +814 96 dataset """kinships""" +814 96 model """transe""" +814 96 loss """softplus""" +814 96 regularizer """no""" +814 96 optimizer """adadelta""" +814 96 training_loop """owa""" +814 96 negative_sampler """basic""" +814 96 evaluator """rankbased""" +814 97 dataset """kinships""" +814 97 model """transe""" +814 97 loss """softplus""" +814 97 regularizer """no""" +814 97 optimizer """adadelta""" +814 97 training_loop """owa""" +814 97 negative_sampler """basic""" +814 97 evaluator """rankbased""" +814 98 dataset """kinships""" +814 98 model """transe""" +814 98 loss """softplus""" +814 98 regularizer """no""" +814 98 optimizer """adadelta""" +814 98 training_loop """owa""" +814 98 negative_sampler """basic""" +814 98 evaluator """rankbased""" +814 99 dataset """kinships""" +814 99 model """transe""" +814 99 loss """softplus""" +814 99 regularizer """no""" +814 99 optimizer """adadelta""" +814 99 training_loop """owa""" +814 99 negative_sampler """basic""" +814 99 evaluator """rankbased""" +814 100 dataset """kinships""" +814 100 model """transe""" +814 100 loss """softplus""" +814 100 regularizer """no""" +814 100 optimizer """adadelta""" +814 100 training_loop """owa""" +814 100 negative_sampler """basic""" +814 100 evaluator """rankbased""" +815 1 model.embedding_dim 2.0 +815 1 model.scoring_fct_norm 2.0 +815 1 loss.margin 1.1033423248659286 +815 1 negative_sampler.num_negs_per_pos 37.0 +815 1 training.batch_size 0.0 +815 2 model.embedding_dim 1.0 +815 2 model.scoring_fct_norm 2.0 +815 2 loss.margin 5.1839897456458255 +815 2 negative_sampler.num_negs_per_pos 32.0 +815 2 training.batch_size 0.0 +815 3 model.embedding_dim 1.0 +815 3 model.scoring_fct_norm 2.0 +815 3 loss.margin 5.941268197722379 +815 3 negative_sampler.num_negs_per_pos 12.0 +815 3 training.batch_size 0.0 +815 4 model.embedding_dim 2.0 +815 4 model.scoring_fct_norm 2.0 +815 4 loss.margin 9.232058969475396 +815 4 negative_sampler.num_negs_per_pos 38.0 +815 4 training.batch_size 1.0 +815 5 model.embedding_dim 2.0 +815 5 model.scoring_fct_norm 1.0 +815 5 loss.margin 8.846192763437088 +815 5 negative_sampler.num_negs_per_pos 29.0 +815 5 training.batch_size 1.0 +815 6 model.embedding_dim 1.0 +815 6 model.scoring_fct_norm 2.0 +815 6 loss.margin 7.75993501319042 +815 6 negative_sampler.num_negs_per_pos 81.0 +815 6 training.batch_size 0.0 +815 7 model.embedding_dim 1.0 +815 7 model.scoring_fct_norm 1.0 +815 7 loss.margin 1.2662861473656097 +815 7 negative_sampler.num_negs_per_pos 42.0 +815 7 training.batch_size 1.0 +815 8 model.embedding_dim 0.0 +815 8 model.scoring_fct_norm 2.0 +815 8 loss.margin 9.075924714401012 +815 8 negative_sampler.num_negs_per_pos 50.0 +815 8 training.batch_size 0.0 +815 9 model.embedding_dim 1.0 +815 9 model.scoring_fct_norm 1.0 +815 9 loss.margin 0.9937080176955351 +815 9 negative_sampler.num_negs_per_pos 35.0 +815 9 training.batch_size 2.0 +815 10 model.embedding_dim 2.0 +815 10 model.scoring_fct_norm 2.0 +815 10 loss.margin 3.2713745081204046 +815 10 negative_sampler.num_negs_per_pos 1.0 +815 10 training.batch_size 2.0 +815 11 model.embedding_dim 2.0 +815 11 model.scoring_fct_norm 1.0 +815 11 loss.margin 6.739168386282395 +815 11 negative_sampler.num_negs_per_pos 44.0 +815 11 training.batch_size 0.0 +815 12 model.embedding_dim 1.0 +815 12 model.scoring_fct_norm 1.0 +815 12 loss.margin 1.304283102556081 +815 12 negative_sampler.num_negs_per_pos 31.0 +815 12 training.batch_size 0.0 +815 13 model.embedding_dim 1.0 +815 13 model.scoring_fct_norm 2.0 +815 13 loss.margin 2.0441691576047143 +815 13 negative_sampler.num_negs_per_pos 86.0 +815 13 training.batch_size 0.0 +815 14 model.embedding_dim 0.0 +815 14 model.scoring_fct_norm 2.0 +815 14 loss.margin 6.662931087921958 +815 14 negative_sampler.num_negs_per_pos 13.0 +815 14 training.batch_size 2.0 +815 15 model.embedding_dim 0.0 +815 15 model.scoring_fct_norm 2.0 +815 15 loss.margin 2.9419380136614115 +815 15 negative_sampler.num_negs_per_pos 99.0 +815 15 training.batch_size 0.0 +815 16 model.embedding_dim 1.0 +815 16 model.scoring_fct_norm 2.0 +815 16 loss.margin 3.538391179359432 +815 16 negative_sampler.num_negs_per_pos 41.0 +815 16 training.batch_size 1.0 +815 17 model.embedding_dim 1.0 +815 17 model.scoring_fct_norm 2.0 +815 17 loss.margin 5.615145013015927 +815 17 negative_sampler.num_negs_per_pos 49.0 +815 17 training.batch_size 2.0 +815 18 model.embedding_dim 0.0 +815 18 model.scoring_fct_norm 1.0 +815 18 loss.margin 9.442391266717816 +815 18 negative_sampler.num_negs_per_pos 93.0 +815 18 training.batch_size 1.0 +815 19 model.embedding_dim 2.0 +815 19 model.scoring_fct_norm 2.0 +815 19 loss.margin 7.51393676910839 +815 19 negative_sampler.num_negs_per_pos 24.0 +815 19 training.batch_size 1.0 +815 20 model.embedding_dim 0.0 +815 20 model.scoring_fct_norm 2.0 +815 20 loss.margin 6.154968838193384 +815 20 negative_sampler.num_negs_per_pos 50.0 +815 20 training.batch_size 0.0 +815 21 model.embedding_dim 0.0 +815 21 model.scoring_fct_norm 1.0 +815 21 loss.margin 0.7236693509582961 +815 21 negative_sampler.num_negs_per_pos 48.0 +815 21 training.batch_size 2.0 +815 22 model.embedding_dim 2.0 +815 22 model.scoring_fct_norm 2.0 +815 22 loss.margin 8.214606505287176 +815 22 negative_sampler.num_negs_per_pos 46.0 +815 22 training.batch_size 2.0 +815 23 model.embedding_dim 1.0 +815 23 model.scoring_fct_norm 2.0 +815 23 loss.margin 5.200499305420882 +815 23 negative_sampler.num_negs_per_pos 71.0 +815 23 training.batch_size 2.0 +815 24 model.embedding_dim 0.0 +815 24 model.scoring_fct_norm 1.0 +815 24 loss.margin 6.781107774031439 +815 24 negative_sampler.num_negs_per_pos 97.0 +815 24 training.batch_size 2.0 +815 25 model.embedding_dim 2.0 +815 25 model.scoring_fct_norm 2.0 +815 25 loss.margin 2.840268111050739 +815 25 negative_sampler.num_negs_per_pos 56.0 +815 25 training.batch_size 1.0 +815 26 model.embedding_dim 1.0 +815 26 model.scoring_fct_norm 1.0 +815 26 loss.margin 6.260781381305449 +815 26 negative_sampler.num_negs_per_pos 29.0 +815 26 training.batch_size 2.0 +815 27 model.embedding_dim 1.0 +815 27 model.scoring_fct_norm 2.0 +815 27 loss.margin 5.525759990145362 +815 27 negative_sampler.num_negs_per_pos 17.0 +815 27 training.batch_size 1.0 +815 28 model.embedding_dim 2.0 +815 28 model.scoring_fct_norm 1.0 +815 28 loss.margin 6.870246037253777 +815 28 negative_sampler.num_negs_per_pos 93.0 +815 28 training.batch_size 1.0 +815 29 model.embedding_dim 0.0 +815 29 model.scoring_fct_norm 2.0 +815 29 loss.margin 4.416242858288722 +815 29 negative_sampler.num_negs_per_pos 96.0 +815 29 training.batch_size 0.0 +815 30 model.embedding_dim 1.0 +815 30 model.scoring_fct_norm 2.0 +815 30 loss.margin 5.341570541237926 +815 30 negative_sampler.num_negs_per_pos 57.0 +815 30 training.batch_size 1.0 +815 31 model.embedding_dim 2.0 +815 31 model.scoring_fct_norm 1.0 +815 31 loss.margin 8.853305530590662 +815 31 negative_sampler.num_negs_per_pos 20.0 +815 31 training.batch_size 2.0 +815 32 model.embedding_dim 1.0 +815 32 model.scoring_fct_norm 2.0 +815 32 loss.margin 9.570426992258666 +815 32 negative_sampler.num_negs_per_pos 52.0 +815 32 training.batch_size 0.0 +815 33 model.embedding_dim 2.0 +815 33 model.scoring_fct_norm 1.0 +815 33 loss.margin 3.181266790646192 +815 33 negative_sampler.num_negs_per_pos 95.0 +815 33 training.batch_size 1.0 +815 34 model.embedding_dim 2.0 +815 34 model.scoring_fct_norm 2.0 +815 34 loss.margin 4.6561052360876065 +815 34 negative_sampler.num_negs_per_pos 95.0 +815 34 training.batch_size 1.0 +815 35 model.embedding_dim 2.0 +815 35 model.scoring_fct_norm 2.0 +815 35 loss.margin 4.101009932765474 +815 35 negative_sampler.num_negs_per_pos 51.0 +815 35 training.batch_size 2.0 +815 36 model.embedding_dim 0.0 +815 36 model.scoring_fct_norm 1.0 +815 36 loss.margin 9.317824952708472 +815 36 negative_sampler.num_negs_per_pos 81.0 +815 36 training.batch_size 0.0 +815 37 model.embedding_dim 0.0 +815 37 model.scoring_fct_norm 1.0 +815 37 loss.margin 2.0590192077201452 +815 37 negative_sampler.num_negs_per_pos 36.0 +815 37 training.batch_size 2.0 +815 38 model.embedding_dim 0.0 +815 38 model.scoring_fct_norm 2.0 +815 38 loss.margin 1.1447897549761008 +815 38 negative_sampler.num_negs_per_pos 19.0 +815 38 training.batch_size 0.0 +815 39 model.embedding_dim 2.0 +815 39 model.scoring_fct_norm 1.0 +815 39 loss.margin 0.5514810044425201 +815 39 negative_sampler.num_negs_per_pos 75.0 +815 39 training.batch_size 2.0 +815 40 model.embedding_dim 0.0 +815 40 model.scoring_fct_norm 1.0 +815 40 loss.margin 4.138963773701533 +815 40 negative_sampler.num_negs_per_pos 11.0 +815 40 training.batch_size 1.0 +815 41 model.embedding_dim 0.0 +815 41 model.scoring_fct_norm 1.0 +815 41 loss.margin 2.2708573948045956 +815 41 negative_sampler.num_negs_per_pos 48.0 +815 41 training.batch_size 0.0 +815 42 model.embedding_dim 1.0 +815 42 model.scoring_fct_norm 2.0 +815 42 loss.margin 3.4359552803176316 +815 42 negative_sampler.num_negs_per_pos 7.0 +815 42 training.batch_size 0.0 +815 43 model.embedding_dim 0.0 +815 43 model.scoring_fct_norm 1.0 +815 43 loss.margin 3.7212783006419157 +815 43 negative_sampler.num_negs_per_pos 12.0 +815 43 training.batch_size 2.0 +815 44 model.embedding_dim 0.0 +815 44 model.scoring_fct_norm 2.0 +815 44 loss.margin 4.013547626404233 +815 44 negative_sampler.num_negs_per_pos 31.0 +815 44 training.batch_size 1.0 +815 45 model.embedding_dim 2.0 +815 45 model.scoring_fct_norm 1.0 +815 45 loss.margin 7.880471542007374 +815 45 negative_sampler.num_negs_per_pos 75.0 +815 45 training.batch_size 1.0 +815 46 model.embedding_dim 2.0 +815 46 model.scoring_fct_norm 2.0 +815 46 loss.margin 4.698531130950735 +815 46 negative_sampler.num_negs_per_pos 8.0 +815 46 training.batch_size 0.0 +815 47 model.embedding_dim 1.0 +815 47 model.scoring_fct_norm 2.0 +815 47 loss.margin 8.247511426884321 +815 47 negative_sampler.num_negs_per_pos 50.0 +815 47 training.batch_size 1.0 +815 48 model.embedding_dim 2.0 +815 48 model.scoring_fct_norm 1.0 +815 48 loss.margin 4.24317548375757 +815 48 negative_sampler.num_negs_per_pos 97.0 +815 48 training.batch_size 2.0 +815 49 model.embedding_dim 0.0 +815 49 model.scoring_fct_norm 1.0 +815 49 loss.margin 9.548580702629945 +815 49 negative_sampler.num_negs_per_pos 19.0 +815 49 training.batch_size 2.0 +815 50 model.embedding_dim 2.0 +815 50 model.scoring_fct_norm 2.0 +815 50 loss.margin 3.3253523196398773 +815 50 negative_sampler.num_negs_per_pos 57.0 +815 50 training.batch_size 1.0 +815 51 model.embedding_dim 1.0 +815 51 model.scoring_fct_norm 2.0 +815 51 loss.margin 8.308820336440773 +815 51 negative_sampler.num_negs_per_pos 30.0 +815 51 training.batch_size 0.0 +815 52 model.embedding_dim 0.0 +815 52 model.scoring_fct_norm 2.0 +815 52 loss.margin 9.803310596080141 +815 52 negative_sampler.num_negs_per_pos 9.0 +815 52 training.batch_size 2.0 +815 53 model.embedding_dim 0.0 +815 53 model.scoring_fct_norm 1.0 +815 53 loss.margin 7.792787957953765 +815 53 negative_sampler.num_negs_per_pos 35.0 +815 53 training.batch_size 1.0 +815 54 model.embedding_dim 0.0 +815 54 model.scoring_fct_norm 1.0 +815 54 loss.margin 5.178582192554405 +815 54 negative_sampler.num_negs_per_pos 89.0 +815 54 training.batch_size 1.0 +815 55 model.embedding_dim 2.0 +815 55 model.scoring_fct_norm 1.0 +815 55 loss.margin 8.397337321211694 +815 55 negative_sampler.num_negs_per_pos 82.0 +815 55 training.batch_size 0.0 +815 56 model.embedding_dim 1.0 +815 56 model.scoring_fct_norm 1.0 +815 56 loss.margin 1.088075174908497 +815 56 negative_sampler.num_negs_per_pos 47.0 +815 56 training.batch_size 1.0 +815 57 model.embedding_dim 2.0 +815 57 model.scoring_fct_norm 2.0 +815 57 loss.margin 8.439535395026525 +815 57 negative_sampler.num_negs_per_pos 20.0 +815 57 training.batch_size 2.0 +815 58 model.embedding_dim 2.0 +815 58 model.scoring_fct_norm 1.0 +815 58 loss.margin 5.429105490630228 +815 58 negative_sampler.num_negs_per_pos 89.0 +815 58 training.batch_size 2.0 +815 59 model.embedding_dim 1.0 +815 59 model.scoring_fct_norm 2.0 +815 59 loss.margin 8.013230441577548 +815 59 negative_sampler.num_negs_per_pos 19.0 +815 59 training.batch_size 2.0 +815 60 model.embedding_dim 0.0 +815 60 model.scoring_fct_norm 1.0 +815 60 loss.margin 6.276737078426515 +815 60 negative_sampler.num_negs_per_pos 70.0 +815 60 training.batch_size 0.0 +815 61 model.embedding_dim 2.0 +815 61 model.scoring_fct_norm 2.0 +815 61 loss.margin 5.875832941377799 +815 61 negative_sampler.num_negs_per_pos 12.0 +815 61 training.batch_size 1.0 +815 62 model.embedding_dim 0.0 +815 62 model.scoring_fct_norm 1.0 +815 62 loss.margin 4.056974978374686 +815 62 negative_sampler.num_negs_per_pos 31.0 +815 62 training.batch_size 2.0 +815 63 model.embedding_dim 1.0 +815 63 model.scoring_fct_norm 2.0 +815 63 loss.margin 5.244772188746117 +815 63 negative_sampler.num_negs_per_pos 81.0 +815 63 training.batch_size 0.0 +815 64 model.embedding_dim 2.0 +815 64 model.scoring_fct_norm 1.0 +815 64 loss.margin 3.6612741219517764 +815 64 negative_sampler.num_negs_per_pos 25.0 +815 64 training.batch_size 1.0 +815 65 model.embedding_dim 0.0 +815 65 model.scoring_fct_norm 1.0 +815 65 loss.margin 6.462189778842573 +815 65 negative_sampler.num_negs_per_pos 18.0 +815 65 training.batch_size 0.0 +815 66 model.embedding_dim 1.0 +815 66 model.scoring_fct_norm 2.0 +815 66 loss.margin 2.884542242772966 +815 66 negative_sampler.num_negs_per_pos 69.0 +815 66 training.batch_size 2.0 +815 67 model.embedding_dim 0.0 +815 67 model.scoring_fct_norm 1.0 +815 67 loss.margin 3.7928860702302734 +815 67 negative_sampler.num_negs_per_pos 25.0 +815 67 training.batch_size 1.0 +815 68 model.embedding_dim 0.0 +815 68 model.scoring_fct_norm 2.0 +815 68 loss.margin 2.970557633762489 +815 68 negative_sampler.num_negs_per_pos 92.0 +815 68 training.batch_size 1.0 +815 69 model.embedding_dim 2.0 +815 69 model.scoring_fct_norm 1.0 +815 69 loss.margin 1.9858118969631253 +815 69 negative_sampler.num_negs_per_pos 36.0 +815 69 training.batch_size 0.0 +815 70 model.embedding_dim 2.0 +815 70 model.scoring_fct_norm 2.0 +815 70 loss.margin 5.8682014026552425 +815 70 negative_sampler.num_negs_per_pos 82.0 +815 70 training.batch_size 2.0 +815 71 model.embedding_dim 2.0 +815 71 model.scoring_fct_norm 1.0 +815 71 loss.margin 2.674016680190352 +815 71 negative_sampler.num_negs_per_pos 36.0 +815 71 training.batch_size 0.0 +815 72 model.embedding_dim 2.0 +815 72 model.scoring_fct_norm 1.0 +815 72 loss.margin 5.609581033592865 +815 72 negative_sampler.num_negs_per_pos 22.0 +815 72 training.batch_size 0.0 +815 73 model.embedding_dim 1.0 +815 73 model.scoring_fct_norm 1.0 +815 73 loss.margin 6.566045803107174 +815 73 negative_sampler.num_negs_per_pos 41.0 +815 73 training.batch_size 0.0 +815 74 model.embedding_dim 0.0 +815 74 model.scoring_fct_norm 2.0 +815 74 loss.margin 7.623561186326278 +815 74 negative_sampler.num_negs_per_pos 97.0 +815 74 training.batch_size 0.0 +815 75 model.embedding_dim 1.0 +815 75 model.scoring_fct_norm 1.0 +815 75 loss.margin 6.949058429720381 +815 75 negative_sampler.num_negs_per_pos 59.0 +815 75 training.batch_size 2.0 +815 76 model.embedding_dim 0.0 +815 76 model.scoring_fct_norm 2.0 +815 76 loss.margin 2.4985455516976756 +815 76 negative_sampler.num_negs_per_pos 72.0 +815 76 training.batch_size 0.0 +815 77 model.embedding_dim 1.0 +815 77 model.scoring_fct_norm 1.0 +815 77 loss.margin 6.343673388921948 +815 77 negative_sampler.num_negs_per_pos 72.0 +815 77 training.batch_size 0.0 +815 78 model.embedding_dim 0.0 +815 78 model.scoring_fct_norm 2.0 +815 78 loss.margin 6.166605966229988 +815 78 negative_sampler.num_negs_per_pos 18.0 +815 78 training.batch_size 2.0 +815 79 model.embedding_dim 2.0 +815 79 model.scoring_fct_norm 2.0 +815 79 loss.margin 4.25840220297199 +815 79 negative_sampler.num_negs_per_pos 2.0 +815 79 training.batch_size 2.0 +815 80 model.embedding_dim 0.0 +815 80 model.scoring_fct_norm 2.0 +815 80 loss.margin 1.5450487322235418 +815 80 negative_sampler.num_negs_per_pos 26.0 +815 80 training.batch_size 1.0 +815 81 model.embedding_dim 0.0 +815 81 model.scoring_fct_norm 1.0 +815 81 loss.margin 1.7205604220096475 +815 81 negative_sampler.num_negs_per_pos 3.0 +815 81 training.batch_size 2.0 +815 82 model.embedding_dim 2.0 +815 82 model.scoring_fct_norm 2.0 +815 82 loss.margin 5.449424295383843 +815 82 negative_sampler.num_negs_per_pos 95.0 +815 82 training.batch_size 2.0 +815 83 model.embedding_dim 0.0 +815 83 model.scoring_fct_norm 1.0 +815 83 loss.margin 3.231766371879945 +815 83 negative_sampler.num_negs_per_pos 71.0 +815 83 training.batch_size 1.0 +815 84 model.embedding_dim 1.0 +815 84 model.scoring_fct_norm 1.0 +815 84 loss.margin 5.1976939372525415 +815 84 negative_sampler.num_negs_per_pos 73.0 +815 84 training.batch_size 0.0 +815 85 model.embedding_dim 1.0 +815 85 model.scoring_fct_norm 1.0 +815 85 loss.margin 5.604305462458609 +815 85 negative_sampler.num_negs_per_pos 51.0 +815 85 training.batch_size 2.0 +815 86 model.embedding_dim 1.0 +815 86 model.scoring_fct_norm 1.0 +815 86 loss.margin 2.2082455810834385 +815 86 negative_sampler.num_negs_per_pos 41.0 +815 86 training.batch_size 2.0 +815 87 model.embedding_dim 1.0 +815 87 model.scoring_fct_norm 2.0 +815 87 loss.margin 3.739284350194178 +815 87 negative_sampler.num_negs_per_pos 87.0 +815 87 training.batch_size 0.0 +815 88 model.embedding_dim 2.0 +815 88 model.scoring_fct_norm 1.0 +815 88 loss.margin 7.59094104629862 +815 88 negative_sampler.num_negs_per_pos 1.0 +815 88 training.batch_size 0.0 +815 89 model.embedding_dim 1.0 +815 89 model.scoring_fct_norm 2.0 +815 89 loss.margin 5.155414057728245 +815 89 negative_sampler.num_negs_per_pos 12.0 +815 89 training.batch_size 1.0 +815 90 model.embedding_dim 1.0 +815 90 model.scoring_fct_norm 1.0 +815 90 loss.margin 9.723481720635254 +815 90 negative_sampler.num_negs_per_pos 64.0 +815 90 training.batch_size 1.0 +815 91 model.embedding_dim 1.0 +815 91 model.scoring_fct_norm 2.0 +815 91 loss.margin 4.397388927776793 +815 91 negative_sampler.num_negs_per_pos 45.0 +815 91 training.batch_size 0.0 +815 92 model.embedding_dim 0.0 +815 92 model.scoring_fct_norm 2.0 +815 92 loss.margin 9.282536797280256 +815 92 negative_sampler.num_negs_per_pos 80.0 +815 92 training.batch_size 0.0 +815 93 model.embedding_dim 0.0 +815 93 model.scoring_fct_norm 1.0 +815 93 loss.margin 1.0805455752296325 +815 93 negative_sampler.num_negs_per_pos 43.0 +815 93 training.batch_size 1.0 +815 94 model.embedding_dim 1.0 +815 94 model.scoring_fct_norm 2.0 +815 94 loss.margin 4.579290548509646 +815 94 negative_sampler.num_negs_per_pos 72.0 +815 94 training.batch_size 1.0 +815 95 model.embedding_dim 1.0 +815 95 model.scoring_fct_norm 2.0 +815 95 loss.margin 4.562248703645517 +815 95 negative_sampler.num_negs_per_pos 41.0 +815 95 training.batch_size 2.0 +815 96 model.embedding_dim 1.0 +815 96 model.scoring_fct_norm 1.0 +815 96 loss.margin 6.436204822664971 +815 96 negative_sampler.num_negs_per_pos 18.0 +815 96 training.batch_size 2.0 +815 97 model.embedding_dim 1.0 +815 97 model.scoring_fct_norm 2.0 +815 97 loss.margin 0.8280232401936616 +815 97 negative_sampler.num_negs_per_pos 31.0 +815 97 training.batch_size 1.0 +815 98 model.embedding_dim 2.0 +815 98 model.scoring_fct_norm 1.0 +815 98 loss.margin 4.460296732844933 +815 98 negative_sampler.num_negs_per_pos 88.0 +815 98 training.batch_size 0.0 +815 99 model.embedding_dim 2.0 +815 99 model.scoring_fct_norm 2.0 +815 99 loss.margin 1.5212257967819296 +815 99 negative_sampler.num_negs_per_pos 62.0 +815 99 training.batch_size 0.0 +815 100 model.embedding_dim 1.0 +815 100 model.scoring_fct_norm 2.0 +815 100 loss.margin 1.9731337202344144 +815 100 negative_sampler.num_negs_per_pos 54.0 +815 100 training.batch_size 0.0 +815 1 dataset """kinships""" +815 1 model """transe""" +815 1 loss """marginranking""" +815 1 regularizer """no""" +815 1 optimizer """adadelta""" +815 1 training_loop """owa""" +815 1 negative_sampler """basic""" +815 1 evaluator """rankbased""" +815 2 dataset """kinships""" +815 2 model """transe""" +815 2 loss """marginranking""" +815 2 regularizer """no""" +815 2 optimizer """adadelta""" +815 2 training_loop """owa""" +815 2 negative_sampler """basic""" +815 2 evaluator """rankbased""" +815 3 dataset """kinships""" +815 3 model """transe""" +815 3 loss """marginranking""" +815 3 regularizer """no""" +815 3 optimizer """adadelta""" +815 3 training_loop """owa""" +815 3 negative_sampler """basic""" +815 3 evaluator """rankbased""" +815 4 dataset """kinships""" +815 4 model """transe""" +815 4 loss """marginranking""" +815 4 regularizer """no""" +815 4 optimizer """adadelta""" +815 4 training_loop """owa""" +815 4 negative_sampler """basic""" +815 4 evaluator """rankbased""" +815 5 dataset """kinships""" +815 5 model """transe""" +815 5 loss """marginranking""" +815 5 regularizer """no""" +815 5 optimizer """adadelta""" +815 5 training_loop """owa""" +815 5 negative_sampler """basic""" +815 5 evaluator """rankbased""" +815 6 dataset """kinships""" +815 6 model """transe""" +815 6 loss """marginranking""" +815 6 regularizer """no""" +815 6 optimizer """adadelta""" +815 6 training_loop """owa""" +815 6 negative_sampler """basic""" +815 6 evaluator """rankbased""" +815 7 dataset """kinships""" +815 7 model """transe""" +815 7 loss """marginranking""" +815 7 regularizer """no""" +815 7 optimizer """adadelta""" +815 7 training_loop """owa""" +815 7 negative_sampler """basic""" +815 7 evaluator """rankbased""" +815 8 dataset """kinships""" +815 8 model """transe""" +815 8 loss """marginranking""" +815 8 regularizer """no""" +815 8 optimizer """adadelta""" +815 8 training_loop """owa""" +815 8 negative_sampler """basic""" +815 8 evaluator """rankbased""" +815 9 dataset """kinships""" +815 9 model """transe""" +815 9 loss """marginranking""" +815 9 regularizer """no""" +815 9 optimizer """adadelta""" +815 9 training_loop """owa""" +815 9 negative_sampler """basic""" +815 9 evaluator """rankbased""" +815 10 dataset """kinships""" +815 10 model """transe""" +815 10 loss """marginranking""" +815 10 regularizer """no""" +815 10 optimizer """adadelta""" +815 10 training_loop """owa""" +815 10 negative_sampler """basic""" +815 10 evaluator """rankbased""" +815 11 dataset """kinships""" +815 11 model """transe""" +815 11 loss """marginranking""" +815 11 regularizer """no""" +815 11 optimizer """adadelta""" +815 11 training_loop """owa""" +815 11 negative_sampler """basic""" +815 11 evaluator """rankbased""" +815 12 dataset """kinships""" +815 12 model """transe""" +815 12 loss """marginranking""" +815 12 regularizer """no""" +815 12 optimizer """adadelta""" +815 12 training_loop """owa""" +815 12 negative_sampler """basic""" +815 12 evaluator """rankbased""" +815 13 dataset """kinships""" +815 13 model """transe""" +815 13 loss """marginranking""" +815 13 regularizer """no""" +815 13 optimizer """adadelta""" +815 13 training_loop """owa""" +815 13 negative_sampler """basic""" +815 13 evaluator """rankbased""" +815 14 dataset """kinships""" +815 14 model """transe""" +815 14 loss """marginranking""" +815 14 regularizer """no""" +815 14 optimizer """adadelta""" +815 14 training_loop """owa""" +815 14 negative_sampler """basic""" +815 14 evaluator """rankbased""" +815 15 dataset """kinships""" +815 15 model """transe""" +815 15 loss """marginranking""" +815 15 regularizer """no""" +815 15 optimizer """adadelta""" +815 15 training_loop """owa""" +815 15 negative_sampler """basic""" +815 15 evaluator """rankbased""" +815 16 dataset """kinships""" +815 16 model """transe""" +815 16 loss """marginranking""" +815 16 regularizer """no""" +815 16 optimizer """adadelta""" +815 16 training_loop """owa""" +815 16 negative_sampler """basic""" +815 16 evaluator """rankbased""" +815 17 dataset """kinships""" +815 17 model """transe""" +815 17 loss """marginranking""" +815 17 regularizer """no""" +815 17 optimizer """adadelta""" +815 17 training_loop """owa""" +815 17 negative_sampler """basic""" +815 17 evaluator """rankbased""" +815 18 dataset """kinships""" +815 18 model """transe""" +815 18 loss """marginranking""" +815 18 regularizer """no""" +815 18 optimizer """adadelta""" +815 18 training_loop """owa""" +815 18 negative_sampler """basic""" +815 18 evaluator """rankbased""" +815 19 dataset """kinships""" +815 19 model """transe""" +815 19 loss """marginranking""" +815 19 regularizer """no""" +815 19 optimizer """adadelta""" +815 19 training_loop """owa""" +815 19 negative_sampler """basic""" +815 19 evaluator """rankbased""" +815 20 dataset """kinships""" +815 20 model """transe""" +815 20 loss """marginranking""" +815 20 regularizer """no""" +815 20 optimizer """adadelta""" +815 20 training_loop """owa""" +815 20 negative_sampler """basic""" +815 20 evaluator """rankbased""" +815 21 dataset """kinships""" +815 21 model """transe""" +815 21 loss """marginranking""" +815 21 regularizer """no""" +815 21 optimizer """adadelta""" +815 21 training_loop """owa""" +815 21 negative_sampler """basic""" +815 21 evaluator """rankbased""" +815 22 dataset """kinships""" +815 22 model """transe""" +815 22 loss """marginranking""" +815 22 regularizer """no""" +815 22 optimizer """adadelta""" +815 22 training_loop """owa""" +815 22 negative_sampler """basic""" +815 22 evaluator """rankbased""" +815 23 dataset """kinships""" +815 23 model """transe""" +815 23 loss """marginranking""" +815 23 regularizer """no""" +815 23 optimizer """adadelta""" +815 23 training_loop """owa""" +815 23 negative_sampler """basic""" +815 23 evaluator """rankbased""" +815 24 dataset """kinships""" +815 24 model """transe""" +815 24 loss """marginranking""" +815 24 regularizer """no""" +815 24 optimizer """adadelta""" +815 24 training_loop """owa""" +815 24 negative_sampler """basic""" +815 24 evaluator """rankbased""" +815 25 dataset """kinships""" +815 25 model """transe""" +815 25 loss """marginranking""" +815 25 regularizer """no""" +815 25 optimizer """adadelta""" +815 25 training_loop """owa""" +815 25 negative_sampler """basic""" +815 25 evaluator """rankbased""" +815 26 dataset """kinships""" +815 26 model """transe""" +815 26 loss """marginranking""" +815 26 regularizer """no""" +815 26 optimizer """adadelta""" +815 26 training_loop """owa""" +815 26 negative_sampler """basic""" +815 26 evaluator """rankbased""" +815 27 dataset """kinships""" +815 27 model """transe""" +815 27 loss """marginranking""" +815 27 regularizer """no""" +815 27 optimizer """adadelta""" +815 27 training_loop """owa""" +815 27 negative_sampler """basic""" +815 27 evaluator """rankbased""" +815 28 dataset """kinships""" +815 28 model """transe""" +815 28 loss """marginranking""" +815 28 regularizer """no""" +815 28 optimizer """adadelta""" +815 28 training_loop """owa""" +815 28 negative_sampler """basic""" +815 28 evaluator """rankbased""" +815 29 dataset """kinships""" +815 29 model """transe""" +815 29 loss """marginranking""" +815 29 regularizer """no""" +815 29 optimizer """adadelta""" +815 29 training_loop """owa""" +815 29 negative_sampler """basic""" +815 29 evaluator """rankbased""" +815 30 dataset """kinships""" +815 30 model """transe""" +815 30 loss """marginranking""" +815 30 regularizer """no""" +815 30 optimizer """adadelta""" +815 30 training_loop """owa""" +815 30 negative_sampler """basic""" +815 30 evaluator """rankbased""" +815 31 dataset """kinships""" +815 31 model """transe""" +815 31 loss """marginranking""" +815 31 regularizer """no""" +815 31 optimizer """adadelta""" +815 31 training_loop """owa""" +815 31 negative_sampler """basic""" +815 31 evaluator """rankbased""" +815 32 dataset """kinships""" +815 32 model """transe""" +815 32 loss """marginranking""" +815 32 regularizer """no""" +815 32 optimizer """adadelta""" +815 32 training_loop """owa""" +815 32 negative_sampler """basic""" +815 32 evaluator """rankbased""" +815 33 dataset """kinships""" +815 33 model """transe""" +815 33 loss """marginranking""" +815 33 regularizer """no""" +815 33 optimizer """adadelta""" +815 33 training_loop """owa""" +815 33 negative_sampler """basic""" +815 33 evaluator """rankbased""" +815 34 dataset """kinships""" +815 34 model """transe""" +815 34 loss """marginranking""" +815 34 regularizer """no""" +815 34 optimizer """adadelta""" +815 34 training_loop """owa""" +815 34 negative_sampler """basic""" +815 34 evaluator """rankbased""" +815 35 dataset """kinships""" +815 35 model """transe""" +815 35 loss """marginranking""" +815 35 regularizer """no""" +815 35 optimizer """adadelta""" +815 35 training_loop """owa""" +815 35 negative_sampler """basic""" +815 35 evaluator """rankbased""" +815 36 dataset """kinships""" +815 36 model """transe""" +815 36 loss """marginranking""" +815 36 regularizer """no""" +815 36 optimizer """adadelta""" +815 36 training_loop """owa""" +815 36 negative_sampler """basic""" +815 36 evaluator """rankbased""" +815 37 dataset """kinships""" +815 37 model """transe""" +815 37 loss """marginranking""" +815 37 regularizer """no""" +815 37 optimizer """adadelta""" +815 37 training_loop """owa""" +815 37 negative_sampler """basic""" +815 37 evaluator """rankbased""" +815 38 dataset """kinships""" +815 38 model """transe""" +815 38 loss """marginranking""" +815 38 regularizer """no""" +815 38 optimizer """adadelta""" +815 38 training_loop """owa""" +815 38 negative_sampler """basic""" +815 38 evaluator """rankbased""" +815 39 dataset """kinships""" +815 39 model """transe""" +815 39 loss """marginranking""" +815 39 regularizer """no""" +815 39 optimizer """adadelta""" +815 39 training_loop """owa""" +815 39 negative_sampler """basic""" +815 39 evaluator """rankbased""" +815 40 dataset """kinships""" +815 40 model """transe""" +815 40 loss """marginranking""" +815 40 regularizer """no""" +815 40 optimizer """adadelta""" +815 40 training_loop """owa""" +815 40 negative_sampler """basic""" +815 40 evaluator """rankbased""" +815 41 dataset """kinships""" +815 41 model """transe""" +815 41 loss """marginranking""" +815 41 regularizer """no""" +815 41 optimizer """adadelta""" +815 41 training_loop """owa""" +815 41 negative_sampler """basic""" +815 41 evaluator """rankbased""" +815 42 dataset """kinships""" +815 42 model """transe""" +815 42 loss """marginranking""" +815 42 regularizer """no""" +815 42 optimizer """adadelta""" +815 42 training_loop """owa""" +815 42 negative_sampler """basic""" +815 42 evaluator """rankbased""" +815 43 dataset """kinships""" +815 43 model """transe""" +815 43 loss """marginranking""" +815 43 regularizer """no""" +815 43 optimizer """adadelta""" +815 43 training_loop """owa""" +815 43 negative_sampler """basic""" +815 43 evaluator """rankbased""" +815 44 dataset """kinships""" +815 44 model """transe""" +815 44 loss """marginranking""" +815 44 regularizer """no""" +815 44 optimizer """adadelta""" +815 44 training_loop """owa""" +815 44 negative_sampler """basic""" +815 44 evaluator """rankbased""" +815 45 dataset """kinships""" +815 45 model """transe""" +815 45 loss """marginranking""" +815 45 regularizer """no""" +815 45 optimizer """adadelta""" +815 45 training_loop """owa""" +815 45 negative_sampler """basic""" +815 45 evaluator """rankbased""" +815 46 dataset """kinships""" +815 46 model """transe""" +815 46 loss """marginranking""" +815 46 regularizer """no""" +815 46 optimizer """adadelta""" +815 46 training_loop """owa""" +815 46 negative_sampler """basic""" +815 46 evaluator """rankbased""" +815 47 dataset """kinships""" +815 47 model """transe""" +815 47 loss """marginranking""" +815 47 regularizer """no""" +815 47 optimizer """adadelta""" +815 47 training_loop """owa""" +815 47 negative_sampler """basic""" +815 47 evaluator """rankbased""" +815 48 dataset """kinships""" +815 48 model """transe""" +815 48 loss """marginranking""" +815 48 regularizer """no""" +815 48 optimizer """adadelta""" +815 48 training_loop """owa""" +815 48 negative_sampler """basic""" +815 48 evaluator """rankbased""" +815 49 dataset """kinships""" +815 49 model """transe""" +815 49 loss """marginranking""" +815 49 regularizer """no""" +815 49 optimizer """adadelta""" +815 49 training_loop """owa""" +815 49 negative_sampler """basic""" +815 49 evaluator """rankbased""" +815 50 dataset """kinships""" +815 50 model """transe""" +815 50 loss """marginranking""" +815 50 regularizer """no""" +815 50 optimizer """adadelta""" +815 50 training_loop """owa""" +815 50 negative_sampler """basic""" +815 50 evaluator """rankbased""" +815 51 dataset """kinships""" +815 51 model """transe""" +815 51 loss """marginranking""" +815 51 regularizer """no""" +815 51 optimizer """adadelta""" +815 51 training_loop """owa""" +815 51 negative_sampler """basic""" +815 51 evaluator """rankbased""" +815 52 dataset """kinships""" +815 52 model """transe""" +815 52 loss """marginranking""" +815 52 regularizer """no""" +815 52 optimizer """adadelta""" +815 52 training_loop """owa""" +815 52 negative_sampler """basic""" +815 52 evaluator """rankbased""" +815 53 dataset """kinships""" +815 53 model """transe""" +815 53 loss """marginranking""" +815 53 regularizer """no""" +815 53 optimizer """adadelta""" +815 53 training_loop """owa""" +815 53 negative_sampler """basic""" +815 53 evaluator """rankbased""" +815 54 dataset """kinships""" +815 54 model """transe""" +815 54 loss """marginranking""" +815 54 regularizer """no""" +815 54 optimizer """adadelta""" +815 54 training_loop """owa""" +815 54 negative_sampler """basic""" +815 54 evaluator """rankbased""" +815 55 dataset """kinships""" +815 55 model """transe""" +815 55 loss """marginranking""" +815 55 regularizer """no""" +815 55 optimizer """adadelta""" +815 55 training_loop """owa""" +815 55 negative_sampler """basic""" +815 55 evaluator """rankbased""" +815 56 dataset """kinships""" +815 56 model """transe""" +815 56 loss """marginranking""" +815 56 regularizer """no""" +815 56 optimizer """adadelta""" +815 56 training_loop """owa""" +815 56 negative_sampler """basic""" +815 56 evaluator """rankbased""" +815 57 dataset """kinships""" +815 57 model """transe""" +815 57 loss """marginranking""" +815 57 regularizer """no""" +815 57 optimizer """adadelta""" +815 57 training_loop """owa""" +815 57 negative_sampler """basic""" +815 57 evaluator """rankbased""" +815 58 dataset """kinships""" +815 58 model """transe""" +815 58 loss """marginranking""" +815 58 regularizer """no""" +815 58 optimizer """adadelta""" +815 58 training_loop """owa""" +815 58 negative_sampler """basic""" +815 58 evaluator """rankbased""" +815 59 dataset """kinships""" +815 59 model """transe""" +815 59 loss """marginranking""" +815 59 regularizer """no""" +815 59 optimizer """adadelta""" +815 59 training_loop """owa""" +815 59 negative_sampler """basic""" +815 59 evaluator """rankbased""" +815 60 dataset """kinships""" +815 60 model """transe""" +815 60 loss """marginranking""" +815 60 regularizer """no""" +815 60 optimizer """adadelta""" +815 60 training_loop """owa""" +815 60 negative_sampler """basic""" +815 60 evaluator """rankbased""" +815 61 dataset """kinships""" +815 61 model """transe""" +815 61 loss """marginranking""" +815 61 regularizer """no""" +815 61 optimizer """adadelta""" +815 61 training_loop """owa""" +815 61 negative_sampler """basic""" +815 61 evaluator """rankbased""" +815 62 dataset """kinships""" +815 62 model """transe""" +815 62 loss """marginranking""" +815 62 regularizer """no""" +815 62 optimizer """adadelta""" +815 62 training_loop """owa""" +815 62 negative_sampler """basic""" +815 62 evaluator """rankbased""" +815 63 dataset """kinships""" +815 63 model """transe""" +815 63 loss """marginranking""" +815 63 regularizer """no""" +815 63 optimizer """adadelta""" +815 63 training_loop """owa""" +815 63 negative_sampler """basic""" +815 63 evaluator """rankbased""" +815 64 dataset """kinships""" +815 64 model """transe""" +815 64 loss """marginranking""" +815 64 regularizer """no""" +815 64 optimizer """adadelta""" +815 64 training_loop """owa""" +815 64 negative_sampler """basic""" +815 64 evaluator """rankbased""" +815 65 dataset """kinships""" +815 65 model """transe""" +815 65 loss """marginranking""" +815 65 regularizer """no""" +815 65 optimizer """adadelta""" +815 65 training_loop """owa""" +815 65 negative_sampler """basic""" +815 65 evaluator """rankbased""" +815 66 dataset """kinships""" +815 66 model """transe""" +815 66 loss """marginranking""" +815 66 regularizer """no""" +815 66 optimizer """adadelta""" +815 66 training_loop """owa""" +815 66 negative_sampler """basic""" +815 66 evaluator """rankbased""" +815 67 dataset """kinships""" +815 67 model """transe""" +815 67 loss """marginranking""" +815 67 regularizer """no""" +815 67 optimizer """adadelta""" +815 67 training_loop """owa""" +815 67 negative_sampler """basic""" +815 67 evaluator """rankbased""" +815 68 dataset """kinships""" +815 68 model """transe""" +815 68 loss """marginranking""" +815 68 regularizer """no""" +815 68 optimizer """adadelta""" +815 68 training_loop """owa""" +815 68 negative_sampler """basic""" +815 68 evaluator """rankbased""" +815 69 dataset """kinships""" +815 69 model """transe""" +815 69 loss """marginranking""" +815 69 regularizer """no""" +815 69 optimizer """adadelta""" +815 69 training_loop """owa""" +815 69 negative_sampler """basic""" +815 69 evaluator """rankbased""" +815 70 dataset """kinships""" +815 70 model """transe""" +815 70 loss """marginranking""" +815 70 regularizer """no""" +815 70 optimizer """adadelta""" +815 70 training_loop """owa""" +815 70 negative_sampler """basic""" +815 70 evaluator """rankbased""" +815 71 dataset """kinships""" +815 71 model """transe""" +815 71 loss """marginranking""" +815 71 regularizer """no""" +815 71 optimizer """adadelta""" +815 71 training_loop """owa""" +815 71 negative_sampler """basic""" +815 71 evaluator """rankbased""" +815 72 dataset """kinships""" +815 72 model """transe""" +815 72 loss """marginranking""" +815 72 regularizer """no""" +815 72 optimizer """adadelta""" +815 72 training_loop """owa""" +815 72 negative_sampler """basic""" +815 72 evaluator """rankbased""" +815 73 dataset """kinships""" +815 73 model """transe""" +815 73 loss """marginranking""" +815 73 regularizer """no""" +815 73 optimizer """adadelta""" +815 73 training_loop """owa""" +815 73 negative_sampler """basic""" +815 73 evaluator """rankbased""" +815 74 dataset """kinships""" +815 74 model """transe""" +815 74 loss """marginranking""" +815 74 regularizer """no""" +815 74 optimizer """adadelta""" +815 74 training_loop """owa""" +815 74 negative_sampler """basic""" +815 74 evaluator """rankbased""" +815 75 dataset """kinships""" +815 75 model """transe""" +815 75 loss """marginranking""" +815 75 regularizer """no""" +815 75 optimizer """adadelta""" +815 75 training_loop """owa""" +815 75 negative_sampler """basic""" +815 75 evaluator """rankbased""" +815 76 dataset """kinships""" +815 76 model """transe""" +815 76 loss """marginranking""" +815 76 regularizer """no""" +815 76 optimizer """adadelta""" +815 76 training_loop """owa""" +815 76 negative_sampler """basic""" +815 76 evaluator """rankbased""" +815 77 dataset """kinships""" +815 77 model """transe""" +815 77 loss """marginranking""" +815 77 regularizer """no""" +815 77 optimizer """adadelta""" +815 77 training_loop """owa""" +815 77 negative_sampler """basic""" +815 77 evaluator """rankbased""" +815 78 dataset """kinships""" +815 78 model """transe""" +815 78 loss """marginranking""" +815 78 regularizer """no""" +815 78 optimizer """adadelta""" +815 78 training_loop """owa""" +815 78 negative_sampler """basic""" +815 78 evaluator """rankbased""" +815 79 dataset """kinships""" +815 79 model """transe""" +815 79 loss """marginranking""" +815 79 regularizer """no""" +815 79 optimizer """adadelta""" +815 79 training_loop """owa""" +815 79 negative_sampler """basic""" +815 79 evaluator """rankbased""" +815 80 dataset """kinships""" +815 80 model """transe""" +815 80 loss """marginranking""" +815 80 regularizer """no""" +815 80 optimizer """adadelta""" +815 80 training_loop """owa""" +815 80 negative_sampler """basic""" +815 80 evaluator """rankbased""" +815 81 dataset """kinships""" +815 81 model """transe""" +815 81 loss """marginranking""" +815 81 regularizer """no""" +815 81 optimizer """adadelta""" +815 81 training_loop """owa""" +815 81 negative_sampler """basic""" +815 81 evaluator """rankbased""" +815 82 dataset """kinships""" +815 82 model """transe""" +815 82 loss """marginranking""" +815 82 regularizer """no""" +815 82 optimizer """adadelta""" +815 82 training_loop """owa""" +815 82 negative_sampler """basic""" +815 82 evaluator """rankbased""" +815 83 dataset """kinships""" +815 83 model """transe""" +815 83 loss """marginranking""" +815 83 regularizer """no""" +815 83 optimizer """adadelta""" +815 83 training_loop """owa""" +815 83 negative_sampler """basic""" +815 83 evaluator """rankbased""" +815 84 dataset """kinships""" +815 84 model """transe""" +815 84 loss """marginranking""" +815 84 regularizer """no""" +815 84 optimizer """adadelta""" +815 84 training_loop """owa""" +815 84 negative_sampler """basic""" +815 84 evaluator """rankbased""" +815 85 dataset """kinships""" +815 85 model """transe""" +815 85 loss """marginranking""" +815 85 regularizer """no""" +815 85 optimizer """adadelta""" +815 85 training_loop """owa""" +815 85 negative_sampler """basic""" +815 85 evaluator """rankbased""" +815 86 dataset """kinships""" +815 86 model """transe""" +815 86 loss """marginranking""" +815 86 regularizer """no""" +815 86 optimizer """adadelta""" +815 86 training_loop """owa""" +815 86 negative_sampler """basic""" +815 86 evaluator """rankbased""" +815 87 dataset """kinships""" +815 87 model """transe""" +815 87 loss """marginranking""" +815 87 regularizer """no""" +815 87 optimizer """adadelta""" +815 87 training_loop """owa""" +815 87 negative_sampler """basic""" +815 87 evaluator """rankbased""" +815 88 dataset """kinships""" +815 88 model """transe""" +815 88 loss """marginranking""" +815 88 regularizer """no""" +815 88 optimizer """adadelta""" +815 88 training_loop """owa""" +815 88 negative_sampler """basic""" +815 88 evaluator """rankbased""" +815 89 dataset """kinships""" +815 89 model """transe""" +815 89 loss """marginranking""" +815 89 regularizer """no""" +815 89 optimizer """adadelta""" +815 89 training_loop """owa""" +815 89 negative_sampler """basic""" +815 89 evaluator """rankbased""" +815 90 dataset """kinships""" +815 90 model """transe""" +815 90 loss """marginranking""" +815 90 regularizer """no""" +815 90 optimizer """adadelta""" +815 90 training_loop """owa""" +815 90 negative_sampler """basic""" +815 90 evaluator """rankbased""" +815 91 dataset """kinships""" +815 91 model """transe""" +815 91 loss """marginranking""" +815 91 regularizer """no""" +815 91 optimizer """adadelta""" +815 91 training_loop """owa""" +815 91 negative_sampler """basic""" +815 91 evaluator """rankbased""" +815 92 dataset """kinships""" +815 92 model """transe""" +815 92 loss """marginranking""" +815 92 regularizer """no""" +815 92 optimizer """adadelta""" +815 92 training_loop """owa""" +815 92 negative_sampler """basic""" +815 92 evaluator """rankbased""" +815 93 dataset """kinships""" +815 93 model """transe""" +815 93 loss """marginranking""" +815 93 regularizer """no""" +815 93 optimizer """adadelta""" +815 93 training_loop """owa""" +815 93 negative_sampler """basic""" +815 93 evaluator """rankbased""" +815 94 dataset """kinships""" +815 94 model """transe""" +815 94 loss """marginranking""" +815 94 regularizer """no""" +815 94 optimizer """adadelta""" +815 94 training_loop """owa""" +815 94 negative_sampler """basic""" +815 94 evaluator """rankbased""" +815 95 dataset """kinships""" +815 95 model """transe""" +815 95 loss """marginranking""" +815 95 regularizer """no""" +815 95 optimizer """adadelta""" +815 95 training_loop """owa""" +815 95 negative_sampler """basic""" +815 95 evaluator """rankbased""" +815 96 dataset """kinships""" +815 96 model """transe""" +815 96 loss """marginranking""" +815 96 regularizer """no""" +815 96 optimizer """adadelta""" +815 96 training_loop """owa""" +815 96 negative_sampler """basic""" +815 96 evaluator """rankbased""" +815 97 dataset """kinships""" +815 97 model """transe""" +815 97 loss """marginranking""" +815 97 regularizer """no""" +815 97 optimizer """adadelta""" +815 97 training_loop """owa""" +815 97 negative_sampler """basic""" +815 97 evaluator """rankbased""" +815 98 dataset """kinships""" +815 98 model """transe""" +815 98 loss """marginranking""" +815 98 regularizer """no""" +815 98 optimizer """adadelta""" +815 98 training_loop """owa""" +815 98 negative_sampler """basic""" +815 98 evaluator """rankbased""" +815 99 dataset """kinships""" +815 99 model """transe""" +815 99 loss """marginranking""" +815 99 regularizer """no""" +815 99 optimizer """adadelta""" +815 99 training_loop """owa""" +815 99 negative_sampler """basic""" +815 99 evaluator """rankbased""" +815 100 dataset """kinships""" +815 100 model """transe""" +815 100 loss """marginranking""" +815 100 regularizer """no""" +815 100 optimizer """adadelta""" +815 100 training_loop """owa""" +815 100 negative_sampler """basic""" +815 100 evaluator """rankbased""" +816 1 model.embedding_dim 2.0 +816 1 model.scoring_fct_norm 2.0 +816 1 loss.margin 3.1936375714698575 +816 1 negative_sampler.num_negs_per_pos 48.0 +816 1 training.batch_size 0.0 +816 2 model.embedding_dim 1.0 +816 2 model.scoring_fct_norm 1.0 +816 2 loss.margin 3.758172063784195 +816 2 negative_sampler.num_negs_per_pos 74.0 +816 2 training.batch_size 2.0 +816 3 model.embedding_dim 0.0 +816 3 model.scoring_fct_norm 2.0 +816 3 loss.margin 2.1678607377027164 +816 3 negative_sampler.num_negs_per_pos 49.0 +816 3 training.batch_size 2.0 +816 4 model.embedding_dim 1.0 +816 4 model.scoring_fct_norm 1.0 +816 4 loss.margin 6.059954411781794 +816 4 negative_sampler.num_negs_per_pos 75.0 +816 4 training.batch_size 2.0 +816 5 model.embedding_dim 2.0 +816 5 model.scoring_fct_norm 1.0 +816 5 loss.margin 6.894341465167884 +816 5 negative_sampler.num_negs_per_pos 38.0 +816 5 training.batch_size 0.0 +816 6 model.embedding_dim 2.0 +816 6 model.scoring_fct_norm 2.0 +816 6 loss.margin 7.4259559829830435 +816 6 negative_sampler.num_negs_per_pos 98.0 +816 6 training.batch_size 2.0 +816 7 model.embedding_dim 1.0 +816 7 model.scoring_fct_norm 1.0 +816 7 loss.margin 8.57713049456651 +816 7 negative_sampler.num_negs_per_pos 7.0 +816 7 training.batch_size 2.0 +816 8 model.embedding_dim 1.0 +816 8 model.scoring_fct_norm 1.0 +816 8 loss.margin 2.971211828712894 +816 8 negative_sampler.num_negs_per_pos 17.0 +816 8 training.batch_size 0.0 +816 9 model.embedding_dim 2.0 +816 9 model.scoring_fct_norm 1.0 +816 9 loss.margin 6.88759459955524 +816 9 negative_sampler.num_negs_per_pos 79.0 +816 9 training.batch_size 0.0 +816 10 model.embedding_dim 1.0 +816 10 model.scoring_fct_norm 1.0 +816 10 loss.margin 5.815045670854633 +816 10 negative_sampler.num_negs_per_pos 4.0 +816 10 training.batch_size 2.0 +816 11 model.embedding_dim 1.0 +816 11 model.scoring_fct_norm 1.0 +816 11 loss.margin 1.1528011708939991 +816 11 negative_sampler.num_negs_per_pos 93.0 +816 11 training.batch_size 0.0 +816 12 model.embedding_dim 1.0 +816 12 model.scoring_fct_norm 1.0 +816 12 loss.margin 3.782524035320795 +816 12 negative_sampler.num_negs_per_pos 44.0 +816 12 training.batch_size 2.0 +816 13 model.embedding_dim 1.0 +816 13 model.scoring_fct_norm 2.0 +816 13 loss.margin 6.9900013632988305 +816 13 negative_sampler.num_negs_per_pos 66.0 +816 13 training.batch_size 2.0 +816 14 model.embedding_dim 2.0 +816 14 model.scoring_fct_norm 2.0 +816 14 loss.margin 4.851373282324609 +816 14 negative_sampler.num_negs_per_pos 36.0 +816 14 training.batch_size 0.0 +816 15 model.embedding_dim 0.0 +816 15 model.scoring_fct_norm 1.0 +816 15 loss.margin 5.574446125826259 +816 15 negative_sampler.num_negs_per_pos 95.0 +816 15 training.batch_size 2.0 +816 16 model.embedding_dim 0.0 +816 16 model.scoring_fct_norm 2.0 +816 16 loss.margin 5.3042236087121895 +816 16 negative_sampler.num_negs_per_pos 90.0 +816 16 training.batch_size 2.0 +816 17 model.embedding_dim 0.0 +816 17 model.scoring_fct_norm 1.0 +816 17 loss.margin 9.04198326977347 +816 17 negative_sampler.num_negs_per_pos 46.0 +816 17 training.batch_size 1.0 +816 18 model.embedding_dim 1.0 +816 18 model.scoring_fct_norm 1.0 +816 18 loss.margin 4.371480979582798 +816 18 negative_sampler.num_negs_per_pos 1.0 +816 18 training.batch_size 0.0 +816 19 model.embedding_dim 1.0 +816 19 model.scoring_fct_norm 1.0 +816 19 loss.margin 1.6281911318336766 +816 19 negative_sampler.num_negs_per_pos 46.0 +816 19 training.batch_size 1.0 +816 20 model.embedding_dim 0.0 +816 20 model.scoring_fct_norm 2.0 +816 20 loss.margin 4.580278945604005 +816 20 negative_sampler.num_negs_per_pos 35.0 +816 20 training.batch_size 1.0 +816 21 model.embedding_dim 1.0 +816 21 model.scoring_fct_norm 1.0 +816 21 loss.margin 6.1532186797105695 +816 21 negative_sampler.num_negs_per_pos 14.0 +816 21 training.batch_size 1.0 +816 22 model.embedding_dim 2.0 +816 22 model.scoring_fct_norm 1.0 +816 22 loss.margin 4.470763028206225 +816 22 negative_sampler.num_negs_per_pos 5.0 +816 22 training.batch_size 1.0 +816 23 model.embedding_dim 2.0 +816 23 model.scoring_fct_norm 1.0 +816 23 loss.margin 7.981465616650764 +816 23 negative_sampler.num_negs_per_pos 45.0 +816 23 training.batch_size 0.0 +816 24 model.embedding_dim 2.0 +816 24 model.scoring_fct_norm 2.0 +816 24 loss.margin 8.964826293162139 +816 24 negative_sampler.num_negs_per_pos 33.0 +816 24 training.batch_size 0.0 +816 25 model.embedding_dim 1.0 +816 25 model.scoring_fct_norm 1.0 +816 25 loss.margin 3.7089403255595936 +816 25 negative_sampler.num_negs_per_pos 32.0 +816 25 training.batch_size 1.0 +816 26 model.embedding_dim 0.0 +816 26 model.scoring_fct_norm 2.0 +816 26 loss.margin 9.999671633179744 +816 26 negative_sampler.num_negs_per_pos 4.0 +816 26 training.batch_size 1.0 +816 27 model.embedding_dim 1.0 +816 27 model.scoring_fct_norm 1.0 +816 27 loss.margin 6.514118044573513 +816 27 negative_sampler.num_negs_per_pos 49.0 +816 27 training.batch_size 2.0 +816 28 model.embedding_dim 0.0 +816 28 model.scoring_fct_norm 1.0 +816 28 loss.margin 2.8990552067214033 +816 28 negative_sampler.num_negs_per_pos 2.0 +816 28 training.batch_size 1.0 +816 29 model.embedding_dim 1.0 +816 29 model.scoring_fct_norm 1.0 +816 29 loss.margin 1.818770959850474 +816 29 negative_sampler.num_negs_per_pos 60.0 +816 29 training.batch_size 0.0 +816 30 model.embedding_dim 2.0 +816 30 model.scoring_fct_norm 1.0 +816 30 loss.margin 3.213935662714353 +816 30 negative_sampler.num_negs_per_pos 18.0 +816 30 training.batch_size 2.0 +816 31 model.embedding_dim 0.0 +816 31 model.scoring_fct_norm 1.0 +816 31 loss.margin 9.34518176698098 +816 31 negative_sampler.num_negs_per_pos 97.0 +816 31 training.batch_size 0.0 +816 32 model.embedding_dim 1.0 +816 32 model.scoring_fct_norm 2.0 +816 32 loss.margin 7.016143792712247 +816 32 negative_sampler.num_negs_per_pos 12.0 +816 32 training.batch_size 1.0 +816 33 model.embedding_dim 1.0 +816 33 model.scoring_fct_norm 1.0 +816 33 loss.margin 7.609894095770225 +816 33 negative_sampler.num_negs_per_pos 84.0 +816 33 training.batch_size 2.0 +816 34 model.embedding_dim 0.0 +816 34 model.scoring_fct_norm 2.0 +816 34 loss.margin 9.057222729941566 +816 34 negative_sampler.num_negs_per_pos 97.0 +816 34 training.batch_size 1.0 +816 35 model.embedding_dim 2.0 +816 35 model.scoring_fct_norm 2.0 +816 35 loss.margin 4.001992256848268 +816 35 negative_sampler.num_negs_per_pos 30.0 +816 35 training.batch_size 1.0 +816 36 model.embedding_dim 0.0 +816 36 model.scoring_fct_norm 1.0 +816 36 loss.margin 7.898838361676674 +816 36 negative_sampler.num_negs_per_pos 98.0 +816 36 training.batch_size 2.0 +816 37 model.embedding_dim 0.0 +816 37 model.scoring_fct_norm 1.0 +816 37 loss.margin 1.98135000849547 +816 37 negative_sampler.num_negs_per_pos 84.0 +816 37 training.batch_size 2.0 +816 38 model.embedding_dim 2.0 +816 38 model.scoring_fct_norm 2.0 +816 38 loss.margin 7.945564996741998 +816 38 negative_sampler.num_negs_per_pos 91.0 +816 38 training.batch_size 0.0 +816 39 model.embedding_dim 2.0 +816 39 model.scoring_fct_norm 2.0 +816 39 loss.margin 5.70332190796647 +816 39 negative_sampler.num_negs_per_pos 77.0 +816 39 training.batch_size 1.0 +816 40 model.embedding_dim 2.0 +816 40 model.scoring_fct_norm 2.0 +816 40 loss.margin 7.20315419729792 +816 40 negative_sampler.num_negs_per_pos 14.0 +816 40 training.batch_size 0.0 +816 41 model.embedding_dim 1.0 +816 41 model.scoring_fct_norm 1.0 +816 41 loss.margin 3.8923958287247156 +816 41 negative_sampler.num_negs_per_pos 85.0 +816 41 training.batch_size 2.0 +816 42 model.embedding_dim 1.0 +816 42 model.scoring_fct_norm 1.0 +816 42 loss.margin 9.266080972520012 +816 42 negative_sampler.num_negs_per_pos 86.0 +816 42 training.batch_size 2.0 +816 43 model.embedding_dim 1.0 +816 43 model.scoring_fct_norm 1.0 +816 43 loss.margin 3.913075850106822 +816 43 negative_sampler.num_negs_per_pos 58.0 +816 43 training.batch_size 0.0 +816 44 model.embedding_dim 0.0 +816 44 model.scoring_fct_norm 1.0 +816 44 loss.margin 4.18527785934679 +816 44 negative_sampler.num_negs_per_pos 45.0 +816 44 training.batch_size 1.0 +816 45 model.embedding_dim 0.0 +816 45 model.scoring_fct_norm 2.0 +816 45 loss.margin 2.682413886873217 +816 45 negative_sampler.num_negs_per_pos 41.0 +816 45 training.batch_size 0.0 +816 46 model.embedding_dim 0.0 +816 46 model.scoring_fct_norm 1.0 +816 46 loss.margin 1.7430320331156302 +816 46 negative_sampler.num_negs_per_pos 74.0 +816 46 training.batch_size 0.0 +816 47 model.embedding_dim 0.0 +816 47 model.scoring_fct_norm 1.0 +816 47 loss.margin 4.050972907540141 +816 47 negative_sampler.num_negs_per_pos 89.0 +816 47 training.batch_size 0.0 +816 48 model.embedding_dim 1.0 +816 48 model.scoring_fct_norm 2.0 +816 48 loss.margin 4.455386004504712 +816 48 negative_sampler.num_negs_per_pos 97.0 +816 48 training.batch_size 2.0 +816 49 model.embedding_dim 2.0 +816 49 model.scoring_fct_norm 2.0 +816 49 loss.margin 5.223268306578886 +816 49 negative_sampler.num_negs_per_pos 25.0 +816 49 training.batch_size 0.0 +816 50 model.embedding_dim 2.0 +816 50 model.scoring_fct_norm 1.0 +816 50 loss.margin 7.817783927756282 +816 50 negative_sampler.num_negs_per_pos 90.0 +816 50 training.batch_size 1.0 +816 51 model.embedding_dim 0.0 +816 51 model.scoring_fct_norm 2.0 +816 51 loss.margin 5.525092361074634 +816 51 negative_sampler.num_negs_per_pos 67.0 +816 51 training.batch_size 2.0 +816 52 model.embedding_dim 0.0 +816 52 model.scoring_fct_norm 1.0 +816 52 loss.margin 4.795404023138281 +816 52 negative_sampler.num_negs_per_pos 5.0 +816 52 training.batch_size 1.0 +816 53 model.embedding_dim 1.0 +816 53 model.scoring_fct_norm 1.0 +816 53 loss.margin 4.051262240353189 +816 53 negative_sampler.num_negs_per_pos 47.0 +816 53 training.batch_size 0.0 +816 54 model.embedding_dim 1.0 +816 54 model.scoring_fct_norm 1.0 +816 54 loss.margin 3.042165275771616 +816 54 negative_sampler.num_negs_per_pos 45.0 +816 54 training.batch_size 1.0 +816 55 model.embedding_dim 0.0 +816 55 model.scoring_fct_norm 1.0 +816 55 loss.margin 3.9194918792614355 +816 55 negative_sampler.num_negs_per_pos 70.0 +816 55 training.batch_size 1.0 +816 56 model.embedding_dim 2.0 +816 56 model.scoring_fct_norm 2.0 +816 56 loss.margin 8.42348160924901 +816 56 negative_sampler.num_negs_per_pos 49.0 +816 56 training.batch_size 1.0 +816 57 model.embedding_dim 1.0 +816 57 model.scoring_fct_norm 1.0 +816 57 loss.margin 0.9368988866020431 +816 57 negative_sampler.num_negs_per_pos 61.0 +816 57 training.batch_size 1.0 +816 58 model.embedding_dim 0.0 +816 58 model.scoring_fct_norm 2.0 +816 58 loss.margin 3.8848519822002836 +816 58 negative_sampler.num_negs_per_pos 82.0 +816 58 training.batch_size 0.0 +816 59 model.embedding_dim 0.0 +816 59 model.scoring_fct_norm 2.0 +816 59 loss.margin 3.4991079017568576 +816 59 negative_sampler.num_negs_per_pos 83.0 +816 59 training.batch_size 1.0 +816 60 model.embedding_dim 0.0 +816 60 model.scoring_fct_norm 2.0 +816 60 loss.margin 3.4197555033576874 +816 60 negative_sampler.num_negs_per_pos 44.0 +816 60 training.batch_size 0.0 +816 61 model.embedding_dim 0.0 +816 61 model.scoring_fct_norm 2.0 +816 61 loss.margin 9.140766662404188 +816 61 negative_sampler.num_negs_per_pos 10.0 +816 61 training.batch_size 1.0 +816 62 model.embedding_dim 2.0 +816 62 model.scoring_fct_norm 1.0 +816 62 loss.margin 8.329795509197282 +816 62 negative_sampler.num_negs_per_pos 82.0 +816 62 training.batch_size 0.0 +816 63 model.embedding_dim 1.0 +816 63 model.scoring_fct_norm 2.0 +816 63 loss.margin 1.4173723748675746 +816 63 negative_sampler.num_negs_per_pos 94.0 +816 63 training.batch_size 1.0 +816 64 model.embedding_dim 0.0 +816 64 model.scoring_fct_norm 1.0 +816 64 loss.margin 9.316270618322358 +816 64 negative_sampler.num_negs_per_pos 9.0 +816 64 training.batch_size 2.0 +816 65 model.embedding_dim 1.0 +816 65 model.scoring_fct_norm 2.0 +816 65 loss.margin 6.728403012093576 +816 65 negative_sampler.num_negs_per_pos 81.0 +816 65 training.batch_size 1.0 +816 66 model.embedding_dim 2.0 +816 66 model.scoring_fct_norm 1.0 +816 66 loss.margin 1.0810461097667579 +816 66 negative_sampler.num_negs_per_pos 85.0 +816 66 training.batch_size 2.0 +816 67 model.embedding_dim 1.0 +816 67 model.scoring_fct_norm 2.0 +816 67 loss.margin 4.800423066770024 +816 67 negative_sampler.num_negs_per_pos 72.0 +816 67 training.batch_size 1.0 +816 68 model.embedding_dim 2.0 +816 68 model.scoring_fct_norm 1.0 +816 68 loss.margin 2.398589954603401 +816 68 negative_sampler.num_negs_per_pos 18.0 +816 68 training.batch_size 1.0 +816 69 model.embedding_dim 2.0 +816 69 model.scoring_fct_norm 2.0 +816 69 loss.margin 3.623978005950333 +816 69 negative_sampler.num_negs_per_pos 71.0 +816 69 training.batch_size 0.0 +816 70 model.embedding_dim 0.0 +816 70 model.scoring_fct_norm 2.0 +816 70 loss.margin 4.062641500218214 +816 70 negative_sampler.num_negs_per_pos 85.0 +816 70 training.batch_size 0.0 +816 71 model.embedding_dim 2.0 +816 71 model.scoring_fct_norm 1.0 +816 71 loss.margin 6.155653149821917 +816 71 negative_sampler.num_negs_per_pos 16.0 +816 71 training.batch_size 1.0 +816 72 model.embedding_dim 0.0 +816 72 model.scoring_fct_norm 2.0 +816 72 loss.margin 1.5532347372720534 +816 72 negative_sampler.num_negs_per_pos 65.0 +816 72 training.batch_size 2.0 +816 73 model.embedding_dim 1.0 +816 73 model.scoring_fct_norm 1.0 +816 73 loss.margin 3.84966527706241 +816 73 negative_sampler.num_negs_per_pos 74.0 +816 73 training.batch_size 0.0 +816 74 model.embedding_dim 0.0 +816 74 model.scoring_fct_norm 1.0 +816 74 loss.margin 3.021853164208138 +816 74 negative_sampler.num_negs_per_pos 51.0 +816 74 training.batch_size 1.0 +816 75 model.embedding_dim 1.0 +816 75 model.scoring_fct_norm 2.0 +816 75 loss.margin 1.6788370709714802 +816 75 negative_sampler.num_negs_per_pos 83.0 +816 75 training.batch_size 1.0 +816 76 model.embedding_dim 1.0 +816 76 model.scoring_fct_norm 2.0 +816 76 loss.margin 3.6429304969485052 +816 76 negative_sampler.num_negs_per_pos 35.0 +816 76 training.batch_size 0.0 +816 77 model.embedding_dim 0.0 +816 77 model.scoring_fct_norm 2.0 +816 77 loss.margin 0.8187853485739485 +816 77 negative_sampler.num_negs_per_pos 37.0 +816 77 training.batch_size 1.0 +816 78 model.embedding_dim 2.0 +816 78 model.scoring_fct_norm 2.0 +816 78 loss.margin 1.1753036076535939 +816 78 negative_sampler.num_negs_per_pos 83.0 +816 78 training.batch_size 2.0 +816 79 model.embedding_dim 1.0 +816 79 model.scoring_fct_norm 2.0 +816 79 loss.margin 6.994356326862333 +816 79 negative_sampler.num_negs_per_pos 4.0 +816 79 training.batch_size 1.0 +816 80 model.embedding_dim 1.0 +816 80 model.scoring_fct_norm 1.0 +816 80 loss.margin 8.786154273772901 +816 80 negative_sampler.num_negs_per_pos 24.0 +816 80 training.batch_size 2.0 +816 81 model.embedding_dim 2.0 +816 81 model.scoring_fct_norm 2.0 +816 81 loss.margin 8.248550094713362 +816 81 negative_sampler.num_negs_per_pos 43.0 +816 81 training.batch_size 1.0 +816 82 model.embedding_dim 1.0 +816 82 model.scoring_fct_norm 1.0 +816 82 loss.margin 6.963660136534247 +816 82 negative_sampler.num_negs_per_pos 23.0 +816 82 training.batch_size 0.0 +816 83 model.embedding_dim 0.0 +816 83 model.scoring_fct_norm 1.0 +816 83 loss.margin 3.888858396988283 +816 83 negative_sampler.num_negs_per_pos 84.0 +816 83 training.batch_size 1.0 +816 84 model.embedding_dim 0.0 +816 84 model.scoring_fct_norm 2.0 +816 84 loss.margin 7.619533772600524 +816 84 negative_sampler.num_negs_per_pos 44.0 +816 84 training.batch_size 1.0 +816 85 model.embedding_dim 1.0 +816 85 model.scoring_fct_norm 2.0 +816 85 loss.margin 4.69907874666912 +816 85 negative_sampler.num_negs_per_pos 74.0 +816 85 training.batch_size 0.0 +816 86 model.embedding_dim 0.0 +816 86 model.scoring_fct_norm 2.0 +816 86 loss.margin 7.225713247697285 +816 86 negative_sampler.num_negs_per_pos 84.0 +816 86 training.batch_size 2.0 +816 87 model.embedding_dim 0.0 +816 87 model.scoring_fct_norm 2.0 +816 87 loss.margin 9.20071262086415 +816 87 negative_sampler.num_negs_per_pos 62.0 +816 87 training.batch_size 2.0 +816 88 model.embedding_dim 0.0 +816 88 model.scoring_fct_norm 1.0 +816 88 loss.margin 8.188667994382087 +816 88 negative_sampler.num_negs_per_pos 24.0 +816 88 training.batch_size 1.0 +816 89 model.embedding_dim 2.0 +816 89 model.scoring_fct_norm 1.0 +816 89 loss.margin 1.21389337394198 +816 89 negative_sampler.num_negs_per_pos 49.0 +816 89 training.batch_size 2.0 +816 90 model.embedding_dim 0.0 +816 90 model.scoring_fct_norm 2.0 +816 90 loss.margin 3.5661458638935457 +816 90 negative_sampler.num_negs_per_pos 75.0 +816 90 training.batch_size 1.0 +816 91 model.embedding_dim 0.0 +816 91 model.scoring_fct_norm 2.0 +816 91 loss.margin 9.511506803078412 +816 91 negative_sampler.num_negs_per_pos 23.0 +816 91 training.batch_size 1.0 +816 92 model.embedding_dim 0.0 +816 92 model.scoring_fct_norm 2.0 +816 92 loss.margin 8.375059519536602 +816 92 negative_sampler.num_negs_per_pos 93.0 +816 92 training.batch_size 2.0 +816 93 model.embedding_dim 1.0 +816 93 model.scoring_fct_norm 2.0 +816 93 loss.margin 0.5381531750116201 +816 93 negative_sampler.num_negs_per_pos 77.0 +816 93 training.batch_size 2.0 +816 94 model.embedding_dim 0.0 +816 94 model.scoring_fct_norm 2.0 +816 94 loss.margin 7.684530419643091 +816 94 negative_sampler.num_negs_per_pos 64.0 +816 94 training.batch_size 1.0 +816 95 model.embedding_dim 0.0 +816 95 model.scoring_fct_norm 1.0 +816 95 loss.margin 0.6020091310469187 +816 95 negative_sampler.num_negs_per_pos 27.0 +816 95 training.batch_size 2.0 +816 96 model.embedding_dim 0.0 +816 96 model.scoring_fct_norm 1.0 +816 96 loss.margin 6.178200957194545 +816 96 negative_sampler.num_negs_per_pos 55.0 +816 96 training.batch_size 2.0 +816 97 model.embedding_dim 0.0 +816 97 model.scoring_fct_norm 2.0 +816 97 loss.margin 2.1000075447634408 +816 97 negative_sampler.num_negs_per_pos 94.0 +816 97 training.batch_size 0.0 +816 98 model.embedding_dim 1.0 +816 98 model.scoring_fct_norm 2.0 +816 98 loss.margin 8.362669119877177 +816 98 negative_sampler.num_negs_per_pos 91.0 +816 98 training.batch_size 0.0 +816 99 model.embedding_dim 1.0 +816 99 model.scoring_fct_norm 1.0 +816 99 loss.margin 6.6844434950958735 +816 99 negative_sampler.num_negs_per_pos 98.0 +816 99 training.batch_size 0.0 +816 100 model.embedding_dim 1.0 +816 100 model.scoring_fct_norm 2.0 +816 100 loss.margin 6.743593992515262 +816 100 negative_sampler.num_negs_per_pos 4.0 +816 100 training.batch_size 0.0 +816 1 dataset """kinships""" +816 1 model """transe""" +816 1 loss """marginranking""" +816 1 regularizer """no""" +816 1 optimizer """adadelta""" +816 1 training_loop """owa""" +816 1 negative_sampler """basic""" +816 1 evaluator """rankbased""" +816 2 dataset """kinships""" +816 2 model """transe""" +816 2 loss """marginranking""" +816 2 regularizer """no""" +816 2 optimizer """adadelta""" +816 2 training_loop """owa""" +816 2 negative_sampler """basic""" +816 2 evaluator """rankbased""" +816 3 dataset """kinships""" +816 3 model """transe""" +816 3 loss """marginranking""" +816 3 regularizer """no""" +816 3 optimizer """adadelta""" +816 3 training_loop """owa""" +816 3 negative_sampler """basic""" +816 3 evaluator """rankbased""" +816 4 dataset """kinships""" +816 4 model """transe""" +816 4 loss """marginranking""" +816 4 regularizer """no""" +816 4 optimizer """adadelta""" +816 4 training_loop """owa""" +816 4 negative_sampler """basic""" +816 4 evaluator """rankbased""" +816 5 dataset """kinships""" +816 5 model """transe""" +816 5 loss """marginranking""" +816 5 regularizer """no""" +816 5 optimizer """adadelta""" +816 5 training_loop """owa""" +816 5 negative_sampler """basic""" +816 5 evaluator """rankbased""" +816 6 dataset """kinships""" +816 6 model """transe""" +816 6 loss """marginranking""" +816 6 regularizer """no""" +816 6 optimizer """adadelta""" +816 6 training_loop """owa""" +816 6 negative_sampler """basic""" +816 6 evaluator """rankbased""" +816 7 dataset """kinships""" +816 7 model """transe""" +816 7 loss """marginranking""" +816 7 regularizer """no""" +816 7 optimizer """adadelta""" +816 7 training_loop """owa""" +816 7 negative_sampler """basic""" +816 7 evaluator """rankbased""" +816 8 dataset """kinships""" +816 8 model """transe""" +816 8 loss """marginranking""" +816 8 regularizer """no""" +816 8 optimizer """adadelta""" +816 8 training_loop """owa""" +816 8 negative_sampler """basic""" +816 8 evaluator """rankbased""" +816 9 dataset """kinships""" +816 9 model """transe""" +816 9 loss """marginranking""" +816 9 regularizer """no""" +816 9 optimizer """adadelta""" +816 9 training_loop """owa""" +816 9 negative_sampler """basic""" +816 9 evaluator """rankbased""" +816 10 dataset """kinships""" +816 10 model """transe""" +816 10 loss """marginranking""" +816 10 regularizer """no""" +816 10 optimizer """adadelta""" +816 10 training_loop """owa""" +816 10 negative_sampler """basic""" +816 10 evaluator """rankbased""" +816 11 dataset """kinships""" +816 11 model """transe""" +816 11 loss """marginranking""" +816 11 regularizer """no""" +816 11 optimizer """adadelta""" +816 11 training_loop """owa""" +816 11 negative_sampler """basic""" +816 11 evaluator """rankbased""" +816 12 dataset """kinships""" +816 12 model """transe""" +816 12 loss """marginranking""" +816 12 regularizer """no""" +816 12 optimizer """adadelta""" +816 12 training_loop """owa""" +816 12 negative_sampler """basic""" +816 12 evaluator """rankbased""" +816 13 dataset """kinships""" +816 13 model """transe""" +816 13 loss """marginranking""" +816 13 regularizer """no""" +816 13 optimizer """adadelta""" +816 13 training_loop """owa""" +816 13 negative_sampler """basic""" +816 13 evaluator """rankbased""" +816 14 dataset """kinships""" +816 14 model """transe""" +816 14 loss """marginranking""" +816 14 regularizer """no""" +816 14 optimizer """adadelta""" +816 14 training_loop """owa""" +816 14 negative_sampler """basic""" +816 14 evaluator """rankbased""" +816 15 dataset """kinships""" +816 15 model """transe""" +816 15 loss """marginranking""" +816 15 regularizer """no""" +816 15 optimizer """adadelta""" +816 15 training_loop """owa""" +816 15 negative_sampler """basic""" +816 15 evaluator """rankbased""" +816 16 dataset """kinships""" +816 16 model """transe""" +816 16 loss """marginranking""" +816 16 regularizer """no""" +816 16 optimizer """adadelta""" +816 16 training_loop """owa""" +816 16 negative_sampler """basic""" +816 16 evaluator """rankbased""" +816 17 dataset """kinships""" +816 17 model """transe""" +816 17 loss """marginranking""" +816 17 regularizer """no""" +816 17 optimizer """adadelta""" +816 17 training_loop """owa""" +816 17 negative_sampler """basic""" +816 17 evaluator """rankbased""" +816 18 dataset """kinships""" +816 18 model """transe""" +816 18 loss """marginranking""" +816 18 regularizer """no""" +816 18 optimizer """adadelta""" +816 18 training_loop """owa""" +816 18 negative_sampler """basic""" +816 18 evaluator """rankbased""" +816 19 dataset """kinships""" +816 19 model """transe""" +816 19 loss """marginranking""" +816 19 regularizer """no""" +816 19 optimizer """adadelta""" +816 19 training_loop """owa""" +816 19 negative_sampler """basic""" +816 19 evaluator """rankbased""" +816 20 dataset """kinships""" +816 20 model """transe""" +816 20 loss """marginranking""" +816 20 regularizer """no""" +816 20 optimizer """adadelta""" +816 20 training_loop """owa""" +816 20 negative_sampler """basic""" +816 20 evaluator """rankbased""" +816 21 dataset """kinships""" +816 21 model """transe""" +816 21 loss """marginranking""" +816 21 regularizer """no""" +816 21 optimizer """adadelta""" +816 21 training_loop """owa""" +816 21 negative_sampler """basic""" +816 21 evaluator """rankbased""" +816 22 dataset """kinships""" +816 22 model """transe""" +816 22 loss """marginranking""" +816 22 regularizer """no""" +816 22 optimizer """adadelta""" +816 22 training_loop """owa""" +816 22 negative_sampler """basic""" +816 22 evaluator """rankbased""" +816 23 dataset """kinships""" +816 23 model """transe""" +816 23 loss """marginranking""" +816 23 regularizer """no""" +816 23 optimizer """adadelta""" +816 23 training_loop """owa""" +816 23 negative_sampler """basic""" +816 23 evaluator """rankbased""" +816 24 dataset """kinships""" +816 24 model """transe""" +816 24 loss """marginranking""" +816 24 regularizer """no""" +816 24 optimizer """adadelta""" +816 24 training_loop """owa""" +816 24 negative_sampler """basic""" +816 24 evaluator """rankbased""" +816 25 dataset """kinships""" +816 25 model """transe""" +816 25 loss """marginranking""" +816 25 regularizer """no""" +816 25 optimizer """adadelta""" +816 25 training_loop """owa""" +816 25 negative_sampler """basic""" +816 25 evaluator """rankbased""" +816 26 dataset """kinships""" +816 26 model """transe""" +816 26 loss """marginranking""" +816 26 regularizer """no""" +816 26 optimizer """adadelta""" +816 26 training_loop """owa""" +816 26 negative_sampler """basic""" +816 26 evaluator """rankbased""" +816 27 dataset """kinships""" +816 27 model """transe""" +816 27 loss """marginranking""" +816 27 regularizer """no""" +816 27 optimizer """adadelta""" +816 27 training_loop """owa""" +816 27 negative_sampler """basic""" +816 27 evaluator """rankbased""" +816 28 dataset """kinships""" +816 28 model """transe""" +816 28 loss """marginranking""" +816 28 regularizer """no""" +816 28 optimizer """adadelta""" +816 28 training_loop """owa""" +816 28 negative_sampler """basic""" +816 28 evaluator """rankbased""" +816 29 dataset """kinships""" +816 29 model """transe""" +816 29 loss """marginranking""" +816 29 regularizer """no""" +816 29 optimizer """adadelta""" +816 29 training_loop """owa""" +816 29 negative_sampler """basic""" +816 29 evaluator """rankbased""" +816 30 dataset """kinships""" +816 30 model """transe""" +816 30 loss """marginranking""" +816 30 regularizer """no""" +816 30 optimizer """adadelta""" +816 30 training_loop """owa""" +816 30 negative_sampler """basic""" +816 30 evaluator """rankbased""" +816 31 dataset """kinships""" +816 31 model """transe""" +816 31 loss """marginranking""" +816 31 regularizer """no""" +816 31 optimizer """adadelta""" +816 31 training_loop """owa""" +816 31 negative_sampler """basic""" +816 31 evaluator """rankbased""" +816 32 dataset """kinships""" +816 32 model """transe""" +816 32 loss """marginranking""" +816 32 regularizer """no""" +816 32 optimizer """adadelta""" +816 32 training_loop """owa""" +816 32 negative_sampler """basic""" +816 32 evaluator """rankbased""" +816 33 dataset """kinships""" +816 33 model """transe""" +816 33 loss """marginranking""" +816 33 regularizer """no""" +816 33 optimizer """adadelta""" +816 33 training_loop """owa""" +816 33 negative_sampler """basic""" +816 33 evaluator """rankbased""" +816 34 dataset """kinships""" +816 34 model """transe""" +816 34 loss """marginranking""" +816 34 regularizer """no""" +816 34 optimizer """adadelta""" +816 34 training_loop """owa""" +816 34 negative_sampler """basic""" +816 34 evaluator """rankbased""" +816 35 dataset """kinships""" +816 35 model """transe""" +816 35 loss """marginranking""" +816 35 regularizer """no""" +816 35 optimizer """adadelta""" +816 35 training_loop """owa""" +816 35 negative_sampler """basic""" +816 35 evaluator """rankbased""" +816 36 dataset """kinships""" +816 36 model """transe""" +816 36 loss """marginranking""" +816 36 regularizer """no""" +816 36 optimizer """adadelta""" +816 36 training_loop """owa""" +816 36 negative_sampler """basic""" +816 36 evaluator """rankbased""" +816 37 dataset """kinships""" +816 37 model """transe""" +816 37 loss """marginranking""" +816 37 regularizer """no""" +816 37 optimizer """adadelta""" +816 37 training_loop """owa""" +816 37 negative_sampler """basic""" +816 37 evaluator """rankbased""" +816 38 dataset """kinships""" +816 38 model """transe""" +816 38 loss """marginranking""" +816 38 regularizer """no""" +816 38 optimizer """adadelta""" +816 38 training_loop """owa""" +816 38 negative_sampler """basic""" +816 38 evaluator """rankbased""" +816 39 dataset """kinships""" +816 39 model """transe""" +816 39 loss """marginranking""" +816 39 regularizer """no""" +816 39 optimizer """adadelta""" +816 39 training_loop """owa""" +816 39 negative_sampler """basic""" +816 39 evaluator """rankbased""" +816 40 dataset """kinships""" +816 40 model """transe""" +816 40 loss """marginranking""" +816 40 regularizer """no""" +816 40 optimizer """adadelta""" +816 40 training_loop """owa""" +816 40 negative_sampler """basic""" +816 40 evaluator """rankbased""" +816 41 dataset """kinships""" +816 41 model """transe""" +816 41 loss """marginranking""" +816 41 regularizer """no""" +816 41 optimizer """adadelta""" +816 41 training_loop """owa""" +816 41 negative_sampler """basic""" +816 41 evaluator """rankbased""" +816 42 dataset """kinships""" +816 42 model """transe""" +816 42 loss """marginranking""" +816 42 regularizer """no""" +816 42 optimizer """adadelta""" +816 42 training_loop """owa""" +816 42 negative_sampler """basic""" +816 42 evaluator """rankbased""" +816 43 dataset """kinships""" +816 43 model """transe""" +816 43 loss """marginranking""" +816 43 regularizer """no""" +816 43 optimizer """adadelta""" +816 43 training_loop """owa""" +816 43 negative_sampler """basic""" +816 43 evaluator """rankbased""" +816 44 dataset """kinships""" +816 44 model """transe""" +816 44 loss """marginranking""" +816 44 regularizer """no""" +816 44 optimizer """adadelta""" +816 44 training_loop """owa""" +816 44 negative_sampler """basic""" +816 44 evaluator """rankbased""" +816 45 dataset """kinships""" +816 45 model """transe""" +816 45 loss """marginranking""" +816 45 regularizer """no""" +816 45 optimizer """adadelta""" +816 45 training_loop """owa""" +816 45 negative_sampler """basic""" +816 45 evaluator """rankbased""" +816 46 dataset """kinships""" +816 46 model """transe""" +816 46 loss """marginranking""" +816 46 regularizer """no""" +816 46 optimizer """adadelta""" +816 46 training_loop """owa""" +816 46 negative_sampler """basic""" +816 46 evaluator """rankbased""" +816 47 dataset """kinships""" +816 47 model """transe""" +816 47 loss """marginranking""" +816 47 regularizer """no""" +816 47 optimizer """adadelta""" +816 47 training_loop """owa""" +816 47 negative_sampler """basic""" +816 47 evaluator """rankbased""" +816 48 dataset """kinships""" +816 48 model """transe""" +816 48 loss """marginranking""" +816 48 regularizer """no""" +816 48 optimizer """adadelta""" +816 48 training_loop """owa""" +816 48 negative_sampler """basic""" +816 48 evaluator """rankbased""" +816 49 dataset """kinships""" +816 49 model """transe""" +816 49 loss """marginranking""" +816 49 regularizer """no""" +816 49 optimizer """adadelta""" +816 49 training_loop """owa""" +816 49 negative_sampler """basic""" +816 49 evaluator """rankbased""" +816 50 dataset """kinships""" +816 50 model """transe""" +816 50 loss """marginranking""" +816 50 regularizer """no""" +816 50 optimizer """adadelta""" +816 50 training_loop """owa""" +816 50 negative_sampler """basic""" +816 50 evaluator """rankbased""" +816 51 dataset """kinships""" +816 51 model """transe""" +816 51 loss """marginranking""" +816 51 regularizer """no""" +816 51 optimizer """adadelta""" +816 51 training_loop """owa""" +816 51 negative_sampler """basic""" +816 51 evaluator """rankbased""" +816 52 dataset """kinships""" +816 52 model """transe""" +816 52 loss """marginranking""" +816 52 regularizer """no""" +816 52 optimizer """adadelta""" +816 52 training_loop """owa""" +816 52 negative_sampler """basic""" +816 52 evaluator """rankbased""" +816 53 dataset """kinships""" +816 53 model """transe""" +816 53 loss """marginranking""" +816 53 regularizer """no""" +816 53 optimizer """adadelta""" +816 53 training_loop """owa""" +816 53 negative_sampler """basic""" +816 53 evaluator """rankbased""" +816 54 dataset """kinships""" +816 54 model """transe""" +816 54 loss """marginranking""" +816 54 regularizer """no""" +816 54 optimizer """adadelta""" +816 54 training_loop """owa""" +816 54 negative_sampler """basic""" +816 54 evaluator """rankbased""" +816 55 dataset """kinships""" +816 55 model """transe""" +816 55 loss """marginranking""" +816 55 regularizer """no""" +816 55 optimizer """adadelta""" +816 55 training_loop """owa""" +816 55 negative_sampler """basic""" +816 55 evaluator """rankbased""" +816 56 dataset """kinships""" +816 56 model """transe""" +816 56 loss """marginranking""" +816 56 regularizer """no""" +816 56 optimizer """adadelta""" +816 56 training_loop """owa""" +816 56 negative_sampler """basic""" +816 56 evaluator """rankbased""" +816 57 dataset """kinships""" +816 57 model """transe""" +816 57 loss """marginranking""" +816 57 regularizer """no""" +816 57 optimizer """adadelta""" +816 57 training_loop """owa""" +816 57 negative_sampler """basic""" +816 57 evaluator """rankbased""" +816 58 dataset """kinships""" +816 58 model """transe""" +816 58 loss """marginranking""" +816 58 regularizer """no""" +816 58 optimizer """adadelta""" +816 58 training_loop """owa""" +816 58 negative_sampler """basic""" +816 58 evaluator """rankbased""" +816 59 dataset """kinships""" +816 59 model """transe""" +816 59 loss """marginranking""" +816 59 regularizer """no""" +816 59 optimizer """adadelta""" +816 59 training_loop """owa""" +816 59 negative_sampler """basic""" +816 59 evaluator """rankbased""" +816 60 dataset """kinships""" +816 60 model """transe""" +816 60 loss """marginranking""" +816 60 regularizer """no""" +816 60 optimizer """adadelta""" +816 60 training_loop """owa""" +816 60 negative_sampler """basic""" +816 60 evaluator """rankbased""" +816 61 dataset """kinships""" +816 61 model """transe""" +816 61 loss """marginranking""" +816 61 regularizer """no""" +816 61 optimizer """adadelta""" +816 61 training_loop """owa""" +816 61 negative_sampler """basic""" +816 61 evaluator """rankbased""" +816 62 dataset """kinships""" +816 62 model """transe""" +816 62 loss """marginranking""" +816 62 regularizer """no""" +816 62 optimizer """adadelta""" +816 62 training_loop """owa""" +816 62 negative_sampler """basic""" +816 62 evaluator """rankbased""" +816 63 dataset """kinships""" +816 63 model """transe""" +816 63 loss """marginranking""" +816 63 regularizer """no""" +816 63 optimizer """adadelta""" +816 63 training_loop """owa""" +816 63 negative_sampler """basic""" +816 63 evaluator """rankbased""" +816 64 dataset """kinships""" +816 64 model """transe""" +816 64 loss """marginranking""" +816 64 regularizer """no""" +816 64 optimizer """adadelta""" +816 64 training_loop """owa""" +816 64 negative_sampler """basic""" +816 64 evaluator """rankbased""" +816 65 dataset """kinships""" +816 65 model """transe""" +816 65 loss """marginranking""" +816 65 regularizer """no""" +816 65 optimizer """adadelta""" +816 65 training_loop """owa""" +816 65 negative_sampler """basic""" +816 65 evaluator """rankbased""" +816 66 dataset """kinships""" +816 66 model """transe""" +816 66 loss """marginranking""" +816 66 regularizer """no""" +816 66 optimizer """adadelta""" +816 66 training_loop """owa""" +816 66 negative_sampler """basic""" +816 66 evaluator """rankbased""" +816 67 dataset """kinships""" +816 67 model """transe""" +816 67 loss """marginranking""" +816 67 regularizer """no""" +816 67 optimizer """adadelta""" +816 67 training_loop """owa""" +816 67 negative_sampler """basic""" +816 67 evaluator """rankbased""" +816 68 dataset """kinships""" +816 68 model """transe""" +816 68 loss """marginranking""" +816 68 regularizer """no""" +816 68 optimizer """adadelta""" +816 68 training_loop """owa""" +816 68 negative_sampler """basic""" +816 68 evaluator """rankbased""" +816 69 dataset """kinships""" +816 69 model """transe""" +816 69 loss """marginranking""" +816 69 regularizer """no""" +816 69 optimizer """adadelta""" +816 69 training_loop """owa""" +816 69 negative_sampler """basic""" +816 69 evaluator """rankbased""" +816 70 dataset """kinships""" +816 70 model """transe""" +816 70 loss """marginranking""" +816 70 regularizer """no""" +816 70 optimizer """adadelta""" +816 70 training_loop """owa""" +816 70 negative_sampler """basic""" +816 70 evaluator """rankbased""" +816 71 dataset """kinships""" +816 71 model """transe""" +816 71 loss """marginranking""" +816 71 regularizer """no""" +816 71 optimizer """adadelta""" +816 71 training_loop """owa""" +816 71 negative_sampler """basic""" +816 71 evaluator """rankbased""" +816 72 dataset """kinships""" +816 72 model """transe""" +816 72 loss """marginranking""" +816 72 regularizer """no""" +816 72 optimizer """adadelta""" +816 72 training_loop """owa""" +816 72 negative_sampler """basic""" +816 72 evaluator """rankbased""" +816 73 dataset """kinships""" +816 73 model """transe""" +816 73 loss """marginranking""" +816 73 regularizer """no""" +816 73 optimizer """adadelta""" +816 73 training_loop """owa""" +816 73 negative_sampler """basic""" +816 73 evaluator """rankbased""" +816 74 dataset """kinships""" +816 74 model """transe""" +816 74 loss """marginranking""" +816 74 regularizer """no""" +816 74 optimizer """adadelta""" +816 74 training_loop """owa""" +816 74 negative_sampler """basic""" +816 74 evaluator """rankbased""" +816 75 dataset """kinships""" +816 75 model """transe""" +816 75 loss """marginranking""" +816 75 regularizer """no""" +816 75 optimizer """adadelta""" +816 75 training_loop """owa""" +816 75 negative_sampler """basic""" +816 75 evaluator """rankbased""" +816 76 dataset """kinships""" +816 76 model """transe""" +816 76 loss """marginranking""" +816 76 regularizer """no""" +816 76 optimizer """adadelta""" +816 76 training_loop """owa""" +816 76 negative_sampler """basic""" +816 76 evaluator """rankbased""" +816 77 dataset """kinships""" +816 77 model """transe""" +816 77 loss """marginranking""" +816 77 regularizer """no""" +816 77 optimizer """adadelta""" +816 77 training_loop """owa""" +816 77 negative_sampler """basic""" +816 77 evaluator """rankbased""" +816 78 dataset """kinships""" +816 78 model """transe""" +816 78 loss """marginranking""" +816 78 regularizer """no""" +816 78 optimizer """adadelta""" +816 78 training_loop """owa""" +816 78 negative_sampler """basic""" +816 78 evaluator """rankbased""" +816 79 dataset """kinships""" +816 79 model """transe""" +816 79 loss """marginranking""" +816 79 regularizer """no""" +816 79 optimizer """adadelta""" +816 79 training_loop """owa""" +816 79 negative_sampler """basic""" +816 79 evaluator """rankbased""" +816 80 dataset """kinships""" +816 80 model """transe""" +816 80 loss """marginranking""" +816 80 regularizer """no""" +816 80 optimizer """adadelta""" +816 80 training_loop """owa""" +816 80 negative_sampler """basic""" +816 80 evaluator """rankbased""" +816 81 dataset """kinships""" +816 81 model """transe""" +816 81 loss """marginranking""" +816 81 regularizer """no""" +816 81 optimizer """adadelta""" +816 81 training_loop """owa""" +816 81 negative_sampler """basic""" +816 81 evaluator """rankbased""" +816 82 dataset """kinships""" +816 82 model """transe""" +816 82 loss """marginranking""" +816 82 regularizer """no""" +816 82 optimizer """adadelta""" +816 82 training_loop """owa""" +816 82 negative_sampler """basic""" +816 82 evaluator """rankbased""" +816 83 dataset """kinships""" +816 83 model """transe""" +816 83 loss """marginranking""" +816 83 regularizer """no""" +816 83 optimizer """adadelta""" +816 83 training_loop """owa""" +816 83 negative_sampler """basic""" +816 83 evaluator """rankbased""" +816 84 dataset """kinships""" +816 84 model """transe""" +816 84 loss """marginranking""" +816 84 regularizer """no""" +816 84 optimizer """adadelta""" +816 84 training_loop """owa""" +816 84 negative_sampler """basic""" +816 84 evaluator """rankbased""" +816 85 dataset """kinships""" +816 85 model """transe""" +816 85 loss """marginranking""" +816 85 regularizer """no""" +816 85 optimizer """adadelta""" +816 85 training_loop """owa""" +816 85 negative_sampler """basic""" +816 85 evaluator """rankbased""" +816 86 dataset """kinships""" +816 86 model """transe""" +816 86 loss """marginranking""" +816 86 regularizer """no""" +816 86 optimizer """adadelta""" +816 86 training_loop """owa""" +816 86 negative_sampler """basic""" +816 86 evaluator """rankbased""" +816 87 dataset """kinships""" +816 87 model """transe""" +816 87 loss """marginranking""" +816 87 regularizer """no""" +816 87 optimizer """adadelta""" +816 87 training_loop """owa""" +816 87 negative_sampler """basic""" +816 87 evaluator """rankbased""" +816 88 dataset """kinships""" +816 88 model """transe""" +816 88 loss """marginranking""" +816 88 regularizer """no""" +816 88 optimizer """adadelta""" +816 88 training_loop """owa""" +816 88 negative_sampler """basic""" +816 88 evaluator """rankbased""" +816 89 dataset """kinships""" +816 89 model """transe""" +816 89 loss """marginranking""" +816 89 regularizer """no""" +816 89 optimizer """adadelta""" +816 89 training_loop """owa""" +816 89 negative_sampler """basic""" +816 89 evaluator """rankbased""" +816 90 dataset """kinships""" +816 90 model """transe""" +816 90 loss """marginranking""" +816 90 regularizer """no""" +816 90 optimizer """adadelta""" +816 90 training_loop """owa""" +816 90 negative_sampler """basic""" +816 90 evaluator """rankbased""" +816 91 dataset """kinships""" +816 91 model """transe""" +816 91 loss """marginranking""" +816 91 regularizer """no""" +816 91 optimizer """adadelta""" +816 91 training_loop """owa""" +816 91 negative_sampler """basic""" +816 91 evaluator """rankbased""" +816 92 dataset """kinships""" +816 92 model """transe""" +816 92 loss """marginranking""" +816 92 regularizer """no""" +816 92 optimizer """adadelta""" +816 92 training_loop """owa""" +816 92 negative_sampler """basic""" +816 92 evaluator """rankbased""" +816 93 dataset """kinships""" +816 93 model """transe""" +816 93 loss """marginranking""" +816 93 regularizer """no""" +816 93 optimizer """adadelta""" +816 93 training_loop """owa""" +816 93 negative_sampler """basic""" +816 93 evaluator """rankbased""" +816 94 dataset """kinships""" +816 94 model """transe""" +816 94 loss """marginranking""" +816 94 regularizer """no""" +816 94 optimizer """adadelta""" +816 94 training_loop """owa""" +816 94 negative_sampler """basic""" +816 94 evaluator """rankbased""" +816 95 dataset """kinships""" +816 95 model """transe""" +816 95 loss """marginranking""" +816 95 regularizer """no""" +816 95 optimizer """adadelta""" +816 95 training_loop """owa""" +816 95 negative_sampler """basic""" +816 95 evaluator """rankbased""" +816 96 dataset """kinships""" +816 96 model """transe""" +816 96 loss """marginranking""" +816 96 regularizer """no""" +816 96 optimizer """adadelta""" +816 96 training_loop """owa""" +816 96 negative_sampler """basic""" +816 96 evaluator """rankbased""" +816 97 dataset """kinships""" +816 97 model """transe""" +816 97 loss """marginranking""" +816 97 regularizer """no""" +816 97 optimizer """adadelta""" +816 97 training_loop """owa""" +816 97 negative_sampler """basic""" +816 97 evaluator """rankbased""" +816 98 dataset """kinships""" +816 98 model """transe""" +816 98 loss """marginranking""" +816 98 regularizer """no""" +816 98 optimizer """adadelta""" +816 98 training_loop """owa""" +816 98 negative_sampler """basic""" +816 98 evaluator """rankbased""" +816 99 dataset """kinships""" +816 99 model """transe""" +816 99 loss """marginranking""" +816 99 regularizer """no""" +816 99 optimizer """adadelta""" +816 99 training_loop """owa""" +816 99 negative_sampler """basic""" +816 99 evaluator """rankbased""" +816 100 dataset """kinships""" +816 100 model """transe""" +816 100 loss """marginranking""" +816 100 regularizer """no""" +816 100 optimizer """adadelta""" +816 100 training_loop """owa""" +816 100 negative_sampler """basic""" +816 100 evaluator """rankbased""" +817 1 model.embedding_dim 0.0 +817 1 model.scoring_fct_norm 1.0 +817 1 loss.margin 9.144413410964756 +817 1 loss.adversarial_temperature 0.6735810099446442 +817 1 negative_sampler.num_negs_per_pos 68.0 +817 1 training.batch_size 1.0 +817 2 model.embedding_dim 1.0 +817 2 model.scoring_fct_norm 2.0 +817 2 loss.margin 11.714117149634887 +817 2 loss.adversarial_temperature 0.13424766677234568 +817 2 negative_sampler.num_negs_per_pos 22.0 +817 2 training.batch_size 0.0 +817 3 model.embedding_dim 2.0 +817 3 model.scoring_fct_norm 2.0 +817 3 loss.margin 12.486543936888845 +817 3 loss.adversarial_temperature 0.8620188436546276 +817 3 negative_sampler.num_negs_per_pos 87.0 +817 3 training.batch_size 0.0 +817 4 model.embedding_dim 2.0 +817 4 model.scoring_fct_norm 2.0 +817 4 loss.margin 27.049126239203737 +817 4 loss.adversarial_temperature 0.5900052212600049 +817 4 negative_sampler.num_negs_per_pos 11.0 +817 4 training.batch_size 1.0 +817 5 model.embedding_dim 1.0 +817 5 model.scoring_fct_norm 2.0 +817 5 loss.margin 22.87146541107389 +817 5 loss.adversarial_temperature 0.5900078797529092 +817 5 negative_sampler.num_negs_per_pos 79.0 +817 5 training.batch_size 1.0 +817 6 model.embedding_dim 0.0 +817 6 model.scoring_fct_norm 2.0 +817 6 loss.margin 19.363016769040456 +817 6 loss.adversarial_temperature 0.41866629844055936 +817 6 negative_sampler.num_negs_per_pos 46.0 +817 6 training.batch_size 1.0 +817 7 model.embedding_dim 0.0 +817 7 model.scoring_fct_norm 1.0 +817 7 loss.margin 15.860167619903155 +817 7 loss.adversarial_temperature 0.6201675198977604 +817 7 negative_sampler.num_negs_per_pos 32.0 +817 7 training.batch_size 0.0 +817 8 model.embedding_dim 0.0 +817 8 model.scoring_fct_norm 1.0 +817 8 loss.margin 24.344644206737676 +817 8 loss.adversarial_temperature 0.9159282716350351 +817 8 negative_sampler.num_negs_per_pos 9.0 +817 8 training.batch_size 1.0 +817 9 model.embedding_dim 2.0 +817 9 model.scoring_fct_norm 2.0 +817 9 loss.margin 9.848973543975868 +817 9 loss.adversarial_temperature 0.6294847569139723 +817 9 negative_sampler.num_negs_per_pos 34.0 +817 9 training.batch_size 1.0 +817 10 model.embedding_dim 0.0 +817 10 model.scoring_fct_norm 1.0 +817 10 loss.margin 13.22874808879407 +817 10 loss.adversarial_temperature 0.5588420349140385 +817 10 negative_sampler.num_negs_per_pos 60.0 +817 10 training.batch_size 0.0 +817 11 model.embedding_dim 1.0 +817 11 model.scoring_fct_norm 2.0 +817 11 loss.margin 9.199841374153143 +817 11 loss.adversarial_temperature 0.6575171721485465 +817 11 negative_sampler.num_negs_per_pos 32.0 +817 11 training.batch_size 0.0 +817 12 model.embedding_dim 0.0 +817 12 model.scoring_fct_norm 1.0 +817 12 loss.margin 19.355047240509926 +817 12 loss.adversarial_temperature 0.4751159639445681 +817 12 negative_sampler.num_negs_per_pos 15.0 +817 12 training.batch_size 0.0 +817 13 model.embedding_dim 1.0 +817 13 model.scoring_fct_norm 1.0 +817 13 loss.margin 16.874639693554986 +817 13 loss.adversarial_temperature 0.7919753018866696 +817 13 negative_sampler.num_negs_per_pos 97.0 +817 13 training.batch_size 1.0 +817 14 model.embedding_dim 2.0 +817 14 model.scoring_fct_norm 1.0 +817 14 loss.margin 5.182210438984759 +817 14 loss.adversarial_temperature 0.6950931525725951 +817 14 negative_sampler.num_negs_per_pos 15.0 +817 14 training.batch_size 0.0 +817 15 model.embedding_dim 2.0 +817 15 model.scoring_fct_norm 1.0 +817 15 loss.margin 7.445758148889772 +817 15 loss.adversarial_temperature 0.5543596144226703 +817 15 negative_sampler.num_negs_per_pos 41.0 +817 15 training.batch_size 1.0 +817 16 model.embedding_dim 0.0 +817 16 model.scoring_fct_norm 2.0 +817 16 loss.margin 13.801305555147847 +817 16 loss.adversarial_temperature 0.6425146837691642 +817 16 negative_sampler.num_negs_per_pos 12.0 +817 16 training.batch_size 0.0 +817 17 model.embedding_dim 1.0 +817 17 model.scoring_fct_norm 2.0 +817 17 loss.margin 16.61119957847664 +817 17 loss.adversarial_temperature 0.8748419912573903 +817 17 negative_sampler.num_negs_per_pos 58.0 +817 17 training.batch_size 0.0 +817 18 model.embedding_dim 0.0 +817 18 model.scoring_fct_norm 1.0 +817 18 loss.margin 10.429175660584956 +817 18 loss.adversarial_temperature 0.4319198394925895 +817 18 negative_sampler.num_negs_per_pos 30.0 +817 18 training.batch_size 2.0 +817 19 model.embedding_dim 1.0 +817 19 model.scoring_fct_norm 1.0 +817 19 loss.margin 14.440590032482772 +817 19 loss.adversarial_temperature 0.731068889639132 +817 19 negative_sampler.num_negs_per_pos 38.0 +817 19 training.batch_size 1.0 +817 20 model.embedding_dim 1.0 +817 20 model.scoring_fct_norm 2.0 +817 20 loss.margin 2.5708936652290797 +817 20 loss.adversarial_temperature 0.3738625164767656 +817 20 negative_sampler.num_negs_per_pos 8.0 +817 20 training.batch_size 0.0 +817 21 model.embedding_dim 2.0 +817 21 model.scoring_fct_norm 2.0 +817 21 loss.margin 8.96153320337654 +817 21 loss.adversarial_temperature 0.7034545190994856 +817 21 negative_sampler.num_negs_per_pos 19.0 +817 21 training.batch_size 2.0 +817 22 model.embedding_dim 2.0 +817 22 model.scoring_fct_norm 1.0 +817 22 loss.margin 15.631757044292508 +817 22 loss.adversarial_temperature 0.5406934058099753 +817 22 negative_sampler.num_negs_per_pos 45.0 +817 22 training.batch_size 2.0 +817 23 model.embedding_dim 1.0 +817 23 model.scoring_fct_norm 2.0 +817 23 loss.margin 5.300737624337817 +817 23 loss.adversarial_temperature 0.3611079592093125 +817 23 negative_sampler.num_negs_per_pos 15.0 +817 23 training.batch_size 2.0 +817 24 model.embedding_dim 0.0 +817 24 model.scoring_fct_norm 1.0 +817 24 loss.margin 9.158772874647388 +817 24 loss.adversarial_temperature 0.24354276854680443 +817 24 negative_sampler.num_negs_per_pos 39.0 +817 24 training.batch_size 0.0 +817 25 model.embedding_dim 1.0 +817 25 model.scoring_fct_norm 1.0 +817 25 loss.margin 11.985615968612901 +817 25 loss.adversarial_temperature 0.13741214741528568 +817 25 negative_sampler.num_negs_per_pos 22.0 +817 25 training.batch_size 2.0 +817 26 model.embedding_dim 0.0 +817 26 model.scoring_fct_norm 1.0 +817 26 loss.margin 29.577594458464628 +817 26 loss.adversarial_temperature 0.671674560826353 +817 26 negative_sampler.num_negs_per_pos 64.0 +817 26 training.batch_size 2.0 +817 27 model.embedding_dim 2.0 +817 27 model.scoring_fct_norm 1.0 +817 27 loss.margin 1.4120579077453892 +817 27 loss.adversarial_temperature 0.5141671042550272 +817 27 negative_sampler.num_negs_per_pos 75.0 +817 27 training.batch_size 0.0 +817 28 model.embedding_dim 0.0 +817 28 model.scoring_fct_norm 2.0 +817 28 loss.margin 23.892684444483297 +817 28 loss.adversarial_temperature 0.12459687169328022 +817 28 negative_sampler.num_negs_per_pos 54.0 +817 28 training.batch_size 1.0 +817 29 model.embedding_dim 2.0 +817 29 model.scoring_fct_norm 2.0 +817 29 loss.margin 9.601958295358394 +817 29 loss.adversarial_temperature 0.5592881599742772 +817 29 negative_sampler.num_negs_per_pos 91.0 +817 29 training.batch_size 0.0 +817 30 model.embedding_dim 2.0 +817 30 model.scoring_fct_norm 1.0 +817 30 loss.margin 12.548935541087841 +817 30 loss.adversarial_temperature 0.9244559034967071 +817 30 negative_sampler.num_negs_per_pos 3.0 +817 30 training.batch_size 1.0 +817 31 model.embedding_dim 1.0 +817 31 model.scoring_fct_norm 1.0 +817 31 loss.margin 8.9056369443498 +817 31 loss.adversarial_temperature 0.5052267524971061 +817 31 negative_sampler.num_negs_per_pos 33.0 +817 31 training.batch_size 0.0 +817 32 model.embedding_dim 2.0 +817 32 model.scoring_fct_norm 1.0 +817 32 loss.margin 11.60218461276889 +817 32 loss.adversarial_temperature 0.9265547701959467 +817 32 negative_sampler.num_negs_per_pos 79.0 +817 32 training.batch_size 1.0 +817 33 model.embedding_dim 1.0 +817 33 model.scoring_fct_norm 1.0 +817 33 loss.margin 28.842758809513654 +817 33 loss.adversarial_temperature 0.7090777123275455 +817 33 negative_sampler.num_negs_per_pos 49.0 +817 33 training.batch_size 1.0 +817 34 model.embedding_dim 2.0 +817 34 model.scoring_fct_norm 1.0 +817 34 loss.margin 22.503459398432334 +817 34 loss.adversarial_temperature 0.3738304107130159 +817 34 negative_sampler.num_negs_per_pos 78.0 +817 34 training.batch_size 0.0 +817 35 model.embedding_dim 2.0 +817 35 model.scoring_fct_norm 2.0 +817 35 loss.margin 13.869016744941858 +817 35 loss.adversarial_temperature 0.7927479265763944 +817 35 negative_sampler.num_negs_per_pos 31.0 +817 35 training.batch_size 0.0 +817 36 model.embedding_dim 2.0 +817 36 model.scoring_fct_norm 1.0 +817 36 loss.margin 5.257130111903726 +817 36 loss.adversarial_temperature 0.5838611245062044 +817 36 negative_sampler.num_negs_per_pos 70.0 +817 36 training.batch_size 0.0 +817 37 model.embedding_dim 1.0 +817 37 model.scoring_fct_norm 2.0 +817 37 loss.margin 24.599592820235355 +817 37 loss.adversarial_temperature 0.4114144563287353 +817 37 negative_sampler.num_negs_per_pos 29.0 +817 37 training.batch_size 0.0 +817 38 model.embedding_dim 1.0 +817 38 model.scoring_fct_norm 2.0 +817 38 loss.margin 20.727499525897308 +817 38 loss.adversarial_temperature 0.8385943933621661 +817 38 negative_sampler.num_negs_per_pos 47.0 +817 38 training.batch_size 1.0 +817 39 model.embedding_dim 1.0 +817 39 model.scoring_fct_norm 1.0 +817 39 loss.margin 7.312583836586544 +817 39 loss.adversarial_temperature 0.764407410035721 +817 39 negative_sampler.num_negs_per_pos 25.0 +817 39 training.batch_size 2.0 +817 40 model.embedding_dim 1.0 +817 40 model.scoring_fct_norm 1.0 +817 40 loss.margin 9.62304072163697 +817 40 loss.adversarial_temperature 0.9262832368446284 +817 40 negative_sampler.num_negs_per_pos 66.0 +817 40 training.batch_size 1.0 +817 41 model.embedding_dim 1.0 +817 41 model.scoring_fct_norm 1.0 +817 41 loss.margin 24.863120593613672 +817 41 loss.adversarial_temperature 0.6247821569880108 +817 41 negative_sampler.num_negs_per_pos 33.0 +817 41 training.batch_size 2.0 +817 42 model.embedding_dim 1.0 +817 42 model.scoring_fct_norm 2.0 +817 42 loss.margin 25.099381130875816 +817 42 loss.adversarial_temperature 0.5679492249655046 +817 42 negative_sampler.num_negs_per_pos 15.0 +817 42 training.batch_size 1.0 +817 43 model.embedding_dim 0.0 +817 43 model.scoring_fct_norm 2.0 +817 43 loss.margin 10.83755422697149 +817 43 loss.adversarial_temperature 0.5031238160205102 +817 43 negative_sampler.num_negs_per_pos 71.0 +817 43 training.batch_size 2.0 +817 44 model.embedding_dim 0.0 +817 44 model.scoring_fct_norm 1.0 +817 44 loss.margin 16.87132153596501 +817 44 loss.adversarial_temperature 0.9316639808396032 +817 44 negative_sampler.num_negs_per_pos 40.0 +817 44 training.batch_size 1.0 +817 45 model.embedding_dim 2.0 +817 45 model.scoring_fct_norm 2.0 +817 45 loss.margin 19.736835195558648 +817 45 loss.adversarial_temperature 0.912216572414962 +817 45 negative_sampler.num_negs_per_pos 73.0 +817 45 training.batch_size 2.0 +817 46 model.embedding_dim 1.0 +817 46 model.scoring_fct_norm 2.0 +817 46 loss.margin 29.51330663671521 +817 46 loss.adversarial_temperature 0.3812682747589877 +817 46 negative_sampler.num_negs_per_pos 87.0 +817 46 training.batch_size 0.0 +817 47 model.embedding_dim 2.0 +817 47 model.scoring_fct_norm 1.0 +817 47 loss.margin 9.92545331341012 +817 47 loss.adversarial_temperature 0.5588534963890833 +817 47 negative_sampler.num_negs_per_pos 51.0 +817 47 training.batch_size 0.0 +817 48 model.embedding_dim 1.0 +817 48 model.scoring_fct_norm 1.0 +817 48 loss.margin 28.133286203837585 +817 48 loss.adversarial_temperature 0.3637289426346212 +817 48 negative_sampler.num_negs_per_pos 33.0 +817 48 training.batch_size 2.0 +817 49 model.embedding_dim 2.0 +817 49 model.scoring_fct_norm 2.0 +817 49 loss.margin 22.659828257159532 +817 49 loss.adversarial_temperature 0.5987991767107982 +817 49 negative_sampler.num_negs_per_pos 23.0 +817 49 training.batch_size 0.0 +817 50 model.embedding_dim 1.0 +817 50 model.scoring_fct_norm 2.0 +817 50 loss.margin 19.775225029955216 +817 50 loss.adversarial_temperature 0.5091109413723823 +817 50 negative_sampler.num_negs_per_pos 49.0 +817 50 training.batch_size 0.0 +817 51 model.embedding_dim 2.0 +817 51 model.scoring_fct_norm 2.0 +817 51 loss.margin 17.31205071629092 +817 51 loss.adversarial_temperature 0.29536387644242523 +817 51 negative_sampler.num_negs_per_pos 83.0 +817 51 training.batch_size 0.0 +817 52 model.embedding_dim 2.0 +817 52 model.scoring_fct_norm 1.0 +817 52 loss.margin 13.94172267762662 +817 52 loss.adversarial_temperature 0.2972732672518478 +817 52 negative_sampler.num_negs_per_pos 7.0 +817 52 training.batch_size 2.0 +817 53 model.embedding_dim 1.0 +817 53 model.scoring_fct_norm 2.0 +817 53 loss.margin 17.26592479618622 +817 53 loss.adversarial_temperature 0.8107899614128343 +817 53 negative_sampler.num_negs_per_pos 76.0 +817 53 training.batch_size 1.0 +817 54 model.embedding_dim 0.0 +817 54 model.scoring_fct_norm 2.0 +817 54 loss.margin 23.20767080102363 +817 54 loss.adversarial_temperature 0.6284356158900938 +817 54 negative_sampler.num_negs_per_pos 80.0 +817 54 training.batch_size 2.0 +817 55 model.embedding_dim 1.0 +817 55 model.scoring_fct_norm 2.0 +817 55 loss.margin 23.819778338139407 +817 55 loss.adversarial_temperature 0.4860764600919274 +817 55 negative_sampler.num_negs_per_pos 55.0 +817 55 training.batch_size 0.0 +817 56 model.embedding_dim 2.0 +817 56 model.scoring_fct_norm 2.0 +817 56 loss.margin 12.641388713688233 +817 56 loss.adversarial_temperature 0.5040206635828562 +817 56 negative_sampler.num_negs_per_pos 92.0 +817 56 training.batch_size 1.0 +817 57 model.embedding_dim 1.0 +817 57 model.scoring_fct_norm 1.0 +817 57 loss.margin 4.212448674177246 +817 57 loss.adversarial_temperature 0.5862084617495843 +817 57 negative_sampler.num_negs_per_pos 38.0 +817 57 training.batch_size 2.0 +817 58 model.embedding_dim 0.0 +817 58 model.scoring_fct_norm 2.0 +817 58 loss.margin 12.121944830887493 +817 58 loss.adversarial_temperature 0.6331137467218277 +817 58 negative_sampler.num_negs_per_pos 98.0 +817 58 training.batch_size 1.0 +817 59 model.embedding_dim 0.0 +817 59 model.scoring_fct_norm 2.0 +817 59 loss.margin 9.1446525144503 +817 59 loss.adversarial_temperature 0.3114255170615496 +817 59 negative_sampler.num_negs_per_pos 82.0 +817 59 training.batch_size 0.0 +817 60 model.embedding_dim 2.0 +817 60 model.scoring_fct_norm 2.0 +817 60 loss.margin 8.731684209538702 +817 60 loss.adversarial_temperature 0.965658757767628 +817 60 negative_sampler.num_negs_per_pos 44.0 +817 60 training.batch_size 2.0 +817 61 model.embedding_dim 1.0 +817 61 model.scoring_fct_norm 1.0 +817 61 loss.margin 4.7088690848307255 +817 61 loss.adversarial_temperature 0.9662882858585502 +817 61 negative_sampler.num_negs_per_pos 49.0 +817 61 training.batch_size 0.0 +817 62 model.embedding_dim 1.0 +817 62 model.scoring_fct_norm 2.0 +817 62 loss.margin 2.6080468807775015 +817 62 loss.adversarial_temperature 0.6547454947900759 +817 62 negative_sampler.num_negs_per_pos 35.0 +817 62 training.batch_size 0.0 +817 63 model.embedding_dim 1.0 +817 63 model.scoring_fct_norm 1.0 +817 63 loss.margin 26.8051316540326 +817 63 loss.adversarial_temperature 0.6360318949846059 +817 63 negative_sampler.num_negs_per_pos 36.0 +817 63 training.batch_size 0.0 +817 64 model.embedding_dim 1.0 +817 64 model.scoring_fct_norm 2.0 +817 64 loss.margin 21.614325314803303 +817 64 loss.adversarial_temperature 0.6441571575702917 +817 64 negative_sampler.num_negs_per_pos 58.0 +817 64 training.batch_size 0.0 +817 65 model.embedding_dim 2.0 +817 65 model.scoring_fct_norm 1.0 +817 65 loss.margin 28.639065812112026 +817 65 loss.adversarial_temperature 0.5774725217619234 +817 65 negative_sampler.num_negs_per_pos 67.0 +817 65 training.batch_size 0.0 +817 66 model.embedding_dim 2.0 +817 66 model.scoring_fct_norm 1.0 +817 66 loss.margin 13.681204230438272 +817 66 loss.adversarial_temperature 0.90987723551864 +817 66 negative_sampler.num_negs_per_pos 76.0 +817 66 training.batch_size 2.0 +817 67 model.embedding_dim 0.0 +817 67 model.scoring_fct_norm 1.0 +817 67 loss.margin 21.569033832894355 +817 67 loss.adversarial_temperature 0.21293437272435684 +817 67 negative_sampler.num_negs_per_pos 33.0 +817 67 training.batch_size 2.0 +817 68 model.embedding_dim 0.0 +817 68 model.scoring_fct_norm 1.0 +817 68 loss.margin 23.651003898674965 +817 68 loss.adversarial_temperature 0.3439169182092252 +817 68 negative_sampler.num_negs_per_pos 10.0 +817 68 training.batch_size 1.0 +817 69 model.embedding_dim 0.0 +817 69 model.scoring_fct_norm 2.0 +817 69 loss.margin 15.535797650444739 +817 69 loss.adversarial_temperature 0.9211007062765182 +817 69 negative_sampler.num_negs_per_pos 0.0 +817 69 training.batch_size 2.0 +817 70 model.embedding_dim 1.0 +817 70 model.scoring_fct_norm 1.0 +817 70 loss.margin 9.453861853741683 +817 70 loss.adversarial_temperature 0.3086315357208173 +817 70 negative_sampler.num_negs_per_pos 6.0 +817 70 training.batch_size 2.0 +817 71 model.embedding_dim 1.0 +817 71 model.scoring_fct_norm 1.0 +817 71 loss.margin 26.34336510629966 +817 71 loss.adversarial_temperature 0.47348009983526773 +817 71 negative_sampler.num_negs_per_pos 23.0 +817 71 training.batch_size 2.0 +817 72 model.embedding_dim 0.0 +817 72 model.scoring_fct_norm 2.0 +817 72 loss.margin 28.134361807000595 +817 72 loss.adversarial_temperature 0.43166610549867823 +817 72 negative_sampler.num_negs_per_pos 62.0 +817 72 training.batch_size 0.0 +817 73 model.embedding_dim 2.0 +817 73 model.scoring_fct_norm 2.0 +817 73 loss.margin 15.603918752100084 +817 73 loss.adversarial_temperature 0.40873674343694577 +817 73 negative_sampler.num_negs_per_pos 9.0 +817 73 training.batch_size 1.0 +817 74 model.embedding_dim 1.0 +817 74 model.scoring_fct_norm 1.0 +817 74 loss.margin 6.98274139354394 +817 74 loss.adversarial_temperature 0.6891004063337439 +817 74 negative_sampler.num_negs_per_pos 2.0 +817 74 training.batch_size 1.0 +817 75 model.embedding_dim 1.0 +817 75 model.scoring_fct_norm 1.0 +817 75 loss.margin 24.677872531394126 +817 75 loss.adversarial_temperature 0.9308722330836449 +817 75 negative_sampler.num_negs_per_pos 6.0 +817 75 training.batch_size 2.0 +817 76 model.embedding_dim 2.0 +817 76 model.scoring_fct_norm 1.0 +817 76 loss.margin 13.958019271050222 +817 76 loss.adversarial_temperature 0.25221281889058084 +817 76 negative_sampler.num_negs_per_pos 80.0 +817 76 training.batch_size 1.0 +817 77 model.embedding_dim 0.0 +817 77 model.scoring_fct_norm 2.0 +817 77 loss.margin 23.30490049149196 +817 77 loss.adversarial_temperature 0.28303013560229373 +817 77 negative_sampler.num_negs_per_pos 54.0 +817 77 training.batch_size 0.0 +817 78 model.embedding_dim 1.0 +817 78 model.scoring_fct_norm 1.0 +817 78 loss.margin 19.41368100596219 +817 78 loss.adversarial_temperature 0.7223809693312733 +817 78 negative_sampler.num_negs_per_pos 16.0 +817 78 training.batch_size 1.0 +817 79 model.embedding_dim 1.0 +817 79 model.scoring_fct_norm 2.0 +817 79 loss.margin 29.387263073082288 +817 79 loss.adversarial_temperature 0.6419005913026546 +817 79 negative_sampler.num_negs_per_pos 67.0 +817 79 training.batch_size 1.0 +817 80 model.embedding_dim 2.0 +817 80 model.scoring_fct_norm 2.0 +817 80 loss.margin 8.357989276243515 +817 80 loss.adversarial_temperature 0.14962853181344668 +817 80 negative_sampler.num_negs_per_pos 91.0 +817 80 training.batch_size 0.0 +817 81 model.embedding_dim 1.0 +817 81 model.scoring_fct_norm 2.0 +817 81 loss.margin 17.71603420718864 +817 81 loss.adversarial_temperature 0.2652186755328682 +817 81 negative_sampler.num_negs_per_pos 70.0 +817 81 training.batch_size 1.0 +817 82 model.embedding_dim 2.0 +817 82 model.scoring_fct_norm 1.0 +817 82 loss.margin 20.394027981110654 +817 82 loss.adversarial_temperature 0.7088658437971339 +817 82 negative_sampler.num_negs_per_pos 9.0 +817 82 training.batch_size 1.0 +817 83 model.embedding_dim 0.0 +817 83 model.scoring_fct_norm 1.0 +817 83 loss.margin 10.90001478387482 +817 83 loss.adversarial_temperature 0.3772430175829231 +817 83 negative_sampler.num_negs_per_pos 49.0 +817 83 training.batch_size 2.0 +817 84 model.embedding_dim 2.0 +817 84 model.scoring_fct_norm 2.0 +817 84 loss.margin 15.075651609174333 +817 84 loss.adversarial_temperature 0.2726041013620843 +817 84 negative_sampler.num_negs_per_pos 24.0 +817 84 training.batch_size 0.0 +817 85 model.embedding_dim 0.0 +817 85 model.scoring_fct_norm 2.0 +817 85 loss.margin 9.01899859419145 +817 85 loss.adversarial_temperature 0.136219386842749 +817 85 negative_sampler.num_negs_per_pos 40.0 +817 85 training.batch_size 2.0 +817 86 model.embedding_dim 0.0 +817 86 model.scoring_fct_norm 2.0 +817 86 loss.margin 18.027872113607103 +817 86 loss.adversarial_temperature 0.7583281986517565 +817 86 negative_sampler.num_negs_per_pos 32.0 +817 86 training.batch_size 1.0 +817 87 model.embedding_dim 2.0 +817 87 model.scoring_fct_norm 1.0 +817 87 loss.margin 12.348951730713136 +817 87 loss.adversarial_temperature 0.5295394478962497 +817 87 negative_sampler.num_negs_per_pos 99.0 +817 87 training.batch_size 1.0 +817 88 model.embedding_dim 0.0 +817 88 model.scoring_fct_norm 2.0 +817 88 loss.margin 20.525591146672152 +817 88 loss.adversarial_temperature 0.9213048760135072 +817 88 negative_sampler.num_negs_per_pos 71.0 +817 88 training.batch_size 2.0 +817 89 model.embedding_dim 2.0 +817 89 model.scoring_fct_norm 2.0 +817 89 loss.margin 7.686234742503825 +817 89 loss.adversarial_temperature 0.7818945118282962 +817 89 negative_sampler.num_negs_per_pos 44.0 +817 89 training.batch_size 2.0 +817 90 model.embedding_dim 1.0 +817 90 model.scoring_fct_norm 1.0 +817 90 loss.margin 29.915181421374477 +817 90 loss.adversarial_temperature 0.2866017704729883 +817 90 negative_sampler.num_negs_per_pos 59.0 +817 90 training.batch_size 2.0 +817 91 model.embedding_dim 0.0 +817 91 model.scoring_fct_norm 2.0 +817 91 loss.margin 16.127709307977437 +817 91 loss.adversarial_temperature 0.19662525965763522 +817 91 negative_sampler.num_negs_per_pos 27.0 +817 91 training.batch_size 1.0 +817 92 model.embedding_dim 1.0 +817 92 model.scoring_fct_norm 2.0 +817 92 loss.margin 15.846935598271754 +817 92 loss.adversarial_temperature 0.9497103698458806 +817 92 negative_sampler.num_negs_per_pos 23.0 +817 92 training.batch_size 0.0 +817 93 model.embedding_dim 0.0 +817 93 model.scoring_fct_norm 2.0 +817 93 loss.margin 22.4519732401439 +817 93 loss.adversarial_temperature 0.9445615499130448 +817 93 negative_sampler.num_negs_per_pos 29.0 +817 93 training.batch_size 2.0 +817 94 model.embedding_dim 0.0 +817 94 model.scoring_fct_norm 2.0 +817 94 loss.margin 23.625731007454153 +817 94 loss.adversarial_temperature 0.14528165627125703 +817 94 negative_sampler.num_negs_per_pos 31.0 +817 94 training.batch_size 1.0 +817 95 model.embedding_dim 1.0 +817 95 model.scoring_fct_norm 1.0 +817 95 loss.margin 7.559510895835937 +817 95 loss.adversarial_temperature 0.3523641364499379 +817 95 negative_sampler.num_negs_per_pos 72.0 +817 95 training.batch_size 2.0 +817 96 model.embedding_dim 1.0 +817 96 model.scoring_fct_norm 1.0 +817 96 loss.margin 6.168602167145267 +817 96 loss.adversarial_temperature 0.23912526492611683 +817 96 negative_sampler.num_negs_per_pos 79.0 +817 96 training.batch_size 0.0 +817 97 model.embedding_dim 0.0 +817 97 model.scoring_fct_norm 1.0 +817 97 loss.margin 15.794489352121476 +817 97 loss.adversarial_temperature 0.48090211857602616 +817 97 negative_sampler.num_negs_per_pos 35.0 +817 97 training.batch_size 0.0 +817 98 model.embedding_dim 2.0 +817 98 model.scoring_fct_norm 2.0 +817 98 loss.margin 12.49445224384183 +817 98 loss.adversarial_temperature 0.2797311977545699 +817 98 negative_sampler.num_negs_per_pos 25.0 +817 98 training.batch_size 0.0 +817 99 model.embedding_dim 1.0 +817 99 model.scoring_fct_norm 1.0 +817 99 loss.margin 10.792333102098759 +817 99 loss.adversarial_temperature 0.1360319181469617 +817 99 negative_sampler.num_negs_per_pos 55.0 +817 99 training.batch_size 0.0 +817 100 model.embedding_dim 0.0 +817 100 model.scoring_fct_norm 1.0 +817 100 loss.margin 24.070121502085886 +817 100 loss.adversarial_temperature 0.6243896754846912 +817 100 negative_sampler.num_negs_per_pos 76.0 +817 100 training.batch_size 2.0 +817 1 dataset """kinships""" +817 1 model """transe""" +817 1 loss """nssa""" +817 1 regularizer """no""" +817 1 optimizer """adadelta""" +817 1 training_loop """owa""" +817 1 negative_sampler """basic""" +817 1 evaluator """rankbased""" +817 2 dataset """kinships""" +817 2 model """transe""" +817 2 loss """nssa""" +817 2 regularizer """no""" +817 2 optimizer """adadelta""" +817 2 training_loop """owa""" +817 2 negative_sampler """basic""" +817 2 evaluator """rankbased""" +817 3 dataset """kinships""" +817 3 model """transe""" +817 3 loss """nssa""" +817 3 regularizer """no""" +817 3 optimizer """adadelta""" +817 3 training_loop """owa""" +817 3 negative_sampler """basic""" +817 3 evaluator """rankbased""" +817 4 dataset """kinships""" +817 4 model """transe""" +817 4 loss """nssa""" +817 4 regularizer """no""" +817 4 optimizer """adadelta""" +817 4 training_loop """owa""" +817 4 negative_sampler """basic""" +817 4 evaluator """rankbased""" +817 5 dataset """kinships""" +817 5 model """transe""" +817 5 loss """nssa""" +817 5 regularizer """no""" +817 5 optimizer """adadelta""" +817 5 training_loop """owa""" +817 5 negative_sampler """basic""" +817 5 evaluator """rankbased""" +817 6 dataset """kinships""" +817 6 model """transe""" +817 6 loss """nssa""" +817 6 regularizer """no""" +817 6 optimizer """adadelta""" +817 6 training_loop """owa""" +817 6 negative_sampler """basic""" +817 6 evaluator """rankbased""" +817 7 dataset """kinships""" +817 7 model """transe""" +817 7 loss """nssa""" +817 7 regularizer """no""" +817 7 optimizer """adadelta""" +817 7 training_loop """owa""" +817 7 negative_sampler """basic""" +817 7 evaluator """rankbased""" +817 8 dataset """kinships""" +817 8 model """transe""" +817 8 loss """nssa""" +817 8 regularizer """no""" +817 8 optimizer """adadelta""" +817 8 training_loop """owa""" +817 8 negative_sampler """basic""" +817 8 evaluator """rankbased""" +817 9 dataset """kinships""" +817 9 model """transe""" +817 9 loss """nssa""" +817 9 regularizer """no""" +817 9 optimizer """adadelta""" +817 9 training_loop """owa""" +817 9 negative_sampler """basic""" +817 9 evaluator """rankbased""" +817 10 dataset """kinships""" +817 10 model """transe""" +817 10 loss """nssa""" +817 10 regularizer """no""" +817 10 optimizer """adadelta""" +817 10 training_loop """owa""" +817 10 negative_sampler """basic""" +817 10 evaluator """rankbased""" +817 11 dataset """kinships""" +817 11 model """transe""" +817 11 loss """nssa""" +817 11 regularizer """no""" +817 11 optimizer """adadelta""" +817 11 training_loop """owa""" +817 11 negative_sampler """basic""" +817 11 evaluator """rankbased""" +817 12 dataset """kinships""" +817 12 model """transe""" +817 12 loss """nssa""" +817 12 regularizer """no""" +817 12 optimizer """adadelta""" +817 12 training_loop """owa""" +817 12 negative_sampler """basic""" +817 12 evaluator """rankbased""" +817 13 dataset """kinships""" +817 13 model """transe""" +817 13 loss """nssa""" +817 13 regularizer """no""" +817 13 optimizer """adadelta""" +817 13 training_loop """owa""" +817 13 negative_sampler """basic""" +817 13 evaluator """rankbased""" +817 14 dataset """kinships""" +817 14 model """transe""" +817 14 loss """nssa""" +817 14 regularizer """no""" +817 14 optimizer """adadelta""" +817 14 training_loop """owa""" +817 14 negative_sampler """basic""" +817 14 evaluator """rankbased""" +817 15 dataset """kinships""" +817 15 model """transe""" +817 15 loss """nssa""" +817 15 regularizer """no""" +817 15 optimizer """adadelta""" +817 15 training_loop """owa""" +817 15 negative_sampler """basic""" +817 15 evaluator """rankbased""" +817 16 dataset """kinships""" +817 16 model """transe""" +817 16 loss """nssa""" +817 16 regularizer """no""" +817 16 optimizer """adadelta""" +817 16 training_loop """owa""" +817 16 negative_sampler """basic""" +817 16 evaluator """rankbased""" +817 17 dataset """kinships""" +817 17 model """transe""" +817 17 loss """nssa""" +817 17 regularizer """no""" +817 17 optimizer """adadelta""" +817 17 training_loop """owa""" +817 17 negative_sampler """basic""" +817 17 evaluator """rankbased""" +817 18 dataset """kinships""" +817 18 model """transe""" +817 18 loss """nssa""" +817 18 regularizer """no""" +817 18 optimizer """adadelta""" +817 18 training_loop """owa""" +817 18 negative_sampler """basic""" +817 18 evaluator """rankbased""" +817 19 dataset """kinships""" +817 19 model """transe""" +817 19 loss """nssa""" +817 19 regularizer """no""" +817 19 optimizer """adadelta""" +817 19 training_loop """owa""" +817 19 negative_sampler """basic""" +817 19 evaluator """rankbased""" +817 20 dataset """kinships""" +817 20 model """transe""" +817 20 loss """nssa""" +817 20 regularizer """no""" +817 20 optimizer """adadelta""" +817 20 training_loop """owa""" +817 20 negative_sampler """basic""" +817 20 evaluator """rankbased""" +817 21 dataset """kinships""" +817 21 model """transe""" +817 21 loss """nssa""" +817 21 regularizer """no""" +817 21 optimizer """adadelta""" +817 21 training_loop """owa""" +817 21 negative_sampler """basic""" +817 21 evaluator """rankbased""" +817 22 dataset """kinships""" +817 22 model """transe""" +817 22 loss """nssa""" +817 22 regularizer """no""" +817 22 optimizer """adadelta""" +817 22 training_loop """owa""" +817 22 negative_sampler """basic""" +817 22 evaluator """rankbased""" +817 23 dataset """kinships""" +817 23 model """transe""" +817 23 loss """nssa""" +817 23 regularizer """no""" +817 23 optimizer """adadelta""" +817 23 training_loop """owa""" +817 23 negative_sampler """basic""" +817 23 evaluator """rankbased""" +817 24 dataset """kinships""" +817 24 model """transe""" +817 24 loss """nssa""" +817 24 regularizer """no""" +817 24 optimizer """adadelta""" +817 24 training_loop """owa""" +817 24 negative_sampler """basic""" +817 24 evaluator """rankbased""" +817 25 dataset """kinships""" +817 25 model """transe""" +817 25 loss """nssa""" +817 25 regularizer """no""" +817 25 optimizer """adadelta""" +817 25 training_loop """owa""" +817 25 negative_sampler """basic""" +817 25 evaluator """rankbased""" +817 26 dataset """kinships""" +817 26 model """transe""" +817 26 loss """nssa""" +817 26 regularizer """no""" +817 26 optimizer """adadelta""" +817 26 training_loop """owa""" +817 26 negative_sampler """basic""" +817 26 evaluator """rankbased""" +817 27 dataset """kinships""" +817 27 model """transe""" +817 27 loss """nssa""" +817 27 regularizer """no""" +817 27 optimizer """adadelta""" +817 27 training_loop """owa""" +817 27 negative_sampler """basic""" +817 27 evaluator """rankbased""" +817 28 dataset """kinships""" +817 28 model """transe""" +817 28 loss """nssa""" +817 28 regularizer """no""" +817 28 optimizer """adadelta""" +817 28 training_loop """owa""" +817 28 negative_sampler """basic""" +817 28 evaluator """rankbased""" +817 29 dataset """kinships""" +817 29 model """transe""" +817 29 loss """nssa""" +817 29 regularizer """no""" +817 29 optimizer """adadelta""" +817 29 training_loop """owa""" +817 29 negative_sampler """basic""" +817 29 evaluator """rankbased""" +817 30 dataset """kinships""" +817 30 model """transe""" +817 30 loss """nssa""" +817 30 regularizer """no""" +817 30 optimizer """adadelta""" +817 30 training_loop """owa""" +817 30 negative_sampler """basic""" +817 30 evaluator """rankbased""" +817 31 dataset """kinships""" +817 31 model """transe""" +817 31 loss """nssa""" +817 31 regularizer """no""" +817 31 optimizer """adadelta""" +817 31 training_loop """owa""" +817 31 negative_sampler """basic""" +817 31 evaluator """rankbased""" +817 32 dataset """kinships""" +817 32 model """transe""" +817 32 loss """nssa""" +817 32 regularizer """no""" +817 32 optimizer """adadelta""" +817 32 training_loop """owa""" +817 32 negative_sampler """basic""" +817 32 evaluator """rankbased""" +817 33 dataset """kinships""" +817 33 model """transe""" +817 33 loss """nssa""" +817 33 regularizer """no""" +817 33 optimizer """adadelta""" +817 33 training_loop """owa""" +817 33 negative_sampler """basic""" +817 33 evaluator """rankbased""" +817 34 dataset """kinships""" +817 34 model """transe""" +817 34 loss """nssa""" +817 34 regularizer """no""" +817 34 optimizer """adadelta""" +817 34 training_loop """owa""" +817 34 negative_sampler """basic""" +817 34 evaluator """rankbased""" +817 35 dataset """kinships""" +817 35 model """transe""" +817 35 loss """nssa""" +817 35 regularizer """no""" +817 35 optimizer """adadelta""" +817 35 training_loop """owa""" +817 35 negative_sampler """basic""" +817 35 evaluator """rankbased""" +817 36 dataset """kinships""" +817 36 model """transe""" +817 36 loss """nssa""" +817 36 regularizer """no""" +817 36 optimizer """adadelta""" +817 36 training_loop """owa""" +817 36 negative_sampler """basic""" +817 36 evaluator """rankbased""" +817 37 dataset """kinships""" +817 37 model """transe""" +817 37 loss """nssa""" +817 37 regularizer """no""" +817 37 optimizer """adadelta""" +817 37 training_loop """owa""" +817 37 negative_sampler """basic""" +817 37 evaluator """rankbased""" +817 38 dataset """kinships""" +817 38 model """transe""" +817 38 loss """nssa""" +817 38 regularizer """no""" +817 38 optimizer """adadelta""" +817 38 training_loop """owa""" +817 38 negative_sampler """basic""" +817 38 evaluator """rankbased""" +817 39 dataset """kinships""" +817 39 model """transe""" +817 39 loss """nssa""" +817 39 regularizer """no""" +817 39 optimizer """adadelta""" +817 39 training_loop """owa""" +817 39 negative_sampler """basic""" +817 39 evaluator """rankbased""" +817 40 dataset """kinships""" +817 40 model """transe""" +817 40 loss """nssa""" +817 40 regularizer """no""" +817 40 optimizer """adadelta""" +817 40 training_loop """owa""" +817 40 negative_sampler """basic""" +817 40 evaluator """rankbased""" +817 41 dataset """kinships""" +817 41 model """transe""" +817 41 loss """nssa""" +817 41 regularizer """no""" +817 41 optimizer """adadelta""" +817 41 training_loop """owa""" +817 41 negative_sampler """basic""" +817 41 evaluator """rankbased""" +817 42 dataset """kinships""" +817 42 model """transe""" +817 42 loss """nssa""" +817 42 regularizer """no""" +817 42 optimizer """adadelta""" +817 42 training_loop """owa""" +817 42 negative_sampler """basic""" +817 42 evaluator """rankbased""" +817 43 dataset """kinships""" +817 43 model """transe""" +817 43 loss """nssa""" +817 43 regularizer """no""" +817 43 optimizer """adadelta""" +817 43 training_loop """owa""" +817 43 negative_sampler """basic""" +817 43 evaluator """rankbased""" +817 44 dataset """kinships""" +817 44 model """transe""" +817 44 loss """nssa""" +817 44 regularizer """no""" +817 44 optimizer """adadelta""" +817 44 training_loop """owa""" +817 44 negative_sampler """basic""" +817 44 evaluator """rankbased""" +817 45 dataset """kinships""" +817 45 model """transe""" +817 45 loss """nssa""" +817 45 regularizer """no""" +817 45 optimizer """adadelta""" +817 45 training_loop """owa""" +817 45 negative_sampler """basic""" +817 45 evaluator """rankbased""" +817 46 dataset """kinships""" +817 46 model """transe""" +817 46 loss """nssa""" +817 46 regularizer """no""" +817 46 optimizer """adadelta""" +817 46 training_loop """owa""" +817 46 negative_sampler """basic""" +817 46 evaluator """rankbased""" +817 47 dataset """kinships""" +817 47 model """transe""" +817 47 loss """nssa""" +817 47 regularizer """no""" +817 47 optimizer """adadelta""" +817 47 training_loop """owa""" +817 47 negative_sampler """basic""" +817 47 evaluator """rankbased""" +817 48 dataset """kinships""" +817 48 model """transe""" +817 48 loss """nssa""" +817 48 regularizer """no""" +817 48 optimizer """adadelta""" +817 48 training_loop """owa""" +817 48 negative_sampler """basic""" +817 48 evaluator """rankbased""" +817 49 dataset """kinships""" +817 49 model """transe""" +817 49 loss """nssa""" +817 49 regularizer """no""" +817 49 optimizer """adadelta""" +817 49 training_loop """owa""" +817 49 negative_sampler """basic""" +817 49 evaluator """rankbased""" +817 50 dataset """kinships""" +817 50 model """transe""" +817 50 loss """nssa""" +817 50 regularizer """no""" +817 50 optimizer """adadelta""" +817 50 training_loop """owa""" +817 50 negative_sampler """basic""" +817 50 evaluator """rankbased""" +817 51 dataset """kinships""" +817 51 model """transe""" +817 51 loss """nssa""" +817 51 regularizer """no""" +817 51 optimizer """adadelta""" +817 51 training_loop """owa""" +817 51 negative_sampler """basic""" +817 51 evaluator """rankbased""" +817 52 dataset """kinships""" +817 52 model """transe""" +817 52 loss """nssa""" +817 52 regularizer """no""" +817 52 optimizer """adadelta""" +817 52 training_loop """owa""" +817 52 negative_sampler """basic""" +817 52 evaluator """rankbased""" +817 53 dataset """kinships""" +817 53 model """transe""" +817 53 loss """nssa""" +817 53 regularizer """no""" +817 53 optimizer """adadelta""" +817 53 training_loop """owa""" +817 53 negative_sampler """basic""" +817 53 evaluator """rankbased""" +817 54 dataset """kinships""" +817 54 model """transe""" +817 54 loss """nssa""" +817 54 regularizer """no""" +817 54 optimizer """adadelta""" +817 54 training_loop """owa""" +817 54 negative_sampler """basic""" +817 54 evaluator """rankbased""" +817 55 dataset """kinships""" +817 55 model """transe""" +817 55 loss """nssa""" +817 55 regularizer """no""" +817 55 optimizer """adadelta""" +817 55 training_loop """owa""" +817 55 negative_sampler """basic""" +817 55 evaluator """rankbased""" +817 56 dataset """kinships""" +817 56 model """transe""" +817 56 loss """nssa""" +817 56 regularizer """no""" +817 56 optimizer """adadelta""" +817 56 training_loop """owa""" +817 56 negative_sampler """basic""" +817 56 evaluator """rankbased""" +817 57 dataset """kinships""" +817 57 model """transe""" +817 57 loss """nssa""" +817 57 regularizer """no""" +817 57 optimizer """adadelta""" +817 57 training_loop """owa""" +817 57 negative_sampler """basic""" +817 57 evaluator """rankbased""" +817 58 dataset """kinships""" +817 58 model """transe""" +817 58 loss """nssa""" +817 58 regularizer """no""" +817 58 optimizer """adadelta""" +817 58 training_loop """owa""" +817 58 negative_sampler """basic""" +817 58 evaluator """rankbased""" +817 59 dataset """kinships""" +817 59 model """transe""" +817 59 loss """nssa""" +817 59 regularizer """no""" +817 59 optimizer """adadelta""" +817 59 training_loop """owa""" +817 59 negative_sampler """basic""" +817 59 evaluator """rankbased""" +817 60 dataset """kinships""" +817 60 model """transe""" +817 60 loss """nssa""" +817 60 regularizer """no""" +817 60 optimizer """adadelta""" +817 60 training_loop """owa""" +817 60 negative_sampler """basic""" +817 60 evaluator """rankbased""" +817 61 dataset """kinships""" +817 61 model """transe""" +817 61 loss """nssa""" +817 61 regularizer """no""" +817 61 optimizer """adadelta""" +817 61 training_loop """owa""" +817 61 negative_sampler """basic""" +817 61 evaluator """rankbased""" +817 62 dataset """kinships""" +817 62 model """transe""" +817 62 loss """nssa""" +817 62 regularizer """no""" +817 62 optimizer """adadelta""" +817 62 training_loop """owa""" +817 62 negative_sampler """basic""" +817 62 evaluator """rankbased""" +817 63 dataset """kinships""" +817 63 model """transe""" +817 63 loss """nssa""" +817 63 regularizer """no""" +817 63 optimizer """adadelta""" +817 63 training_loop """owa""" +817 63 negative_sampler """basic""" +817 63 evaluator """rankbased""" +817 64 dataset """kinships""" +817 64 model """transe""" +817 64 loss """nssa""" +817 64 regularizer """no""" +817 64 optimizer """adadelta""" +817 64 training_loop """owa""" +817 64 negative_sampler """basic""" +817 64 evaluator """rankbased""" +817 65 dataset """kinships""" +817 65 model """transe""" +817 65 loss """nssa""" +817 65 regularizer """no""" +817 65 optimizer """adadelta""" +817 65 training_loop """owa""" +817 65 negative_sampler """basic""" +817 65 evaluator """rankbased""" +817 66 dataset """kinships""" +817 66 model """transe""" +817 66 loss """nssa""" +817 66 regularizer """no""" +817 66 optimizer """adadelta""" +817 66 training_loop """owa""" +817 66 negative_sampler """basic""" +817 66 evaluator """rankbased""" +817 67 dataset """kinships""" +817 67 model """transe""" +817 67 loss """nssa""" +817 67 regularizer """no""" +817 67 optimizer """adadelta""" +817 67 training_loop """owa""" +817 67 negative_sampler """basic""" +817 67 evaluator """rankbased""" +817 68 dataset """kinships""" +817 68 model """transe""" +817 68 loss """nssa""" +817 68 regularizer """no""" +817 68 optimizer """adadelta""" +817 68 training_loop """owa""" +817 68 negative_sampler """basic""" +817 68 evaluator """rankbased""" +817 69 dataset """kinships""" +817 69 model """transe""" +817 69 loss """nssa""" +817 69 regularizer """no""" +817 69 optimizer """adadelta""" +817 69 training_loop """owa""" +817 69 negative_sampler """basic""" +817 69 evaluator """rankbased""" +817 70 dataset """kinships""" +817 70 model """transe""" +817 70 loss """nssa""" +817 70 regularizer """no""" +817 70 optimizer """adadelta""" +817 70 training_loop """owa""" +817 70 negative_sampler """basic""" +817 70 evaluator """rankbased""" +817 71 dataset """kinships""" +817 71 model """transe""" +817 71 loss """nssa""" +817 71 regularizer """no""" +817 71 optimizer """adadelta""" +817 71 training_loop """owa""" +817 71 negative_sampler """basic""" +817 71 evaluator """rankbased""" +817 72 dataset """kinships""" +817 72 model """transe""" +817 72 loss """nssa""" +817 72 regularizer """no""" +817 72 optimizer """adadelta""" +817 72 training_loop """owa""" +817 72 negative_sampler """basic""" +817 72 evaluator """rankbased""" +817 73 dataset """kinships""" +817 73 model """transe""" +817 73 loss """nssa""" +817 73 regularizer """no""" +817 73 optimizer """adadelta""" +817 73 training_loop """owa""" +817 73 negative_sampler """basic""" +817 73 evaluator """rankbased""" +817 74 dataset """kinships""" +817 74 model """transe""" +817 74 loss """nssa""" +817 74 regularizer """no""" +817 74 optimizer """adadelta""" +817 74 training_loop """owa""" +817 74 negative_sampler """basic""" +817 74 evaluator """rankbased""" +817 75 dataset """kinships""" +817 75 model """transe""" +817 75 loss """nssa""" +817 75 regularizer """no""" +817 75 optimizer """adadelta""" +817 75 training_loop """owa""" +817 75 negative_sampler """basic""" +817 75 evaluator """rankbased""" +817 76 dataset """kinships""" +817 76 model """transe""" +817 76 loss """nssa""" +817 76 regularizer """no""" +817 76 optimizer """adadelta""" +817 76 training_loop """owa""" +817 76 negative_sampler """basic""" +817 76 evaluator """rankbased""" +817 77 dataset """kinships""" +817 77 model """transe""" +817 77 loss """nssa""" +817 77 regularizer """no""" +817 77 optimizer """adadelta""" +817 77 training_loop """owa""" +817 77 negative_sampler """basic""" +817 77 evaluator """rankbased""" +817 78 dataset """kinships""" +817 78 model """transe""" +817 78 loss """nssa""" +817 78 regularizer """no""" +817 78 optimizer """adadelta""" +817 78 training_loop """owa""" +817 78 negative_sampler """basic""" +817 78 evaluator """rankbased""" +817 79 dataset """kinships""" +817 79 model """transe""" +817 79 loss """nssa""" +817 79 regularizer """no""" +817 79 optimizer """adadelta""" +817 79 training_loop """owa""" +817 79 negative_sampler """basic""" +817 79 evaluator """rankbased""" +817 80 dataset """kinships""" +817 80 model """transe""" +817 80 loss """nssa""" +817 80 regularizer """no""" +817 80 optimizer """adadelta""" +817 80 training_loop """owa""" +817 80 negative_sampler """basic""" +817 80 evaluator """rankbased""" +817 81 dataset """kinships""" +817 81 model """transe""" +817 81 loss """nssa""" +817 81 regularizer """no""" +817 81 optimizer """adadelta""" +817 81 training_loop """owa""" +817 81 negative_sampler """basic""" +817 81 evaluator """rankbased""" +817 82 dataset """kinships""" +817 82 model """transe""" +817 82 loss """nssa""" +817 82 regularizer """no""" +817 82 optimizer """adadelta""" +817 82 training_loop """owa""" +817 82 negative_sampler """basic""" +817 82 evaluator """rankbased""" +817 83 dataset """kinships""" +817 83 model """transe""" +817 83 loss """nssa""" +817 83 regularizer """no""" +817 83 optimizer """adadelta""" +817 83 training_loop """owa""" +817 83 negative_sampler """basic""" +817 83 evaluator """rankbased""" +817 84 dataset """kinships""" +817 84 model """transe""" +817 84 loss """nssa""" +817 84 regularizer """no""" +817 84 optimizer """adadelta""" +817 84 training_loop """owa""" +817 84 negative_sampler """basic""" +817 84 evaluator """rankbased""" +817 85 dataset """kinships""" +817 85 model """transe""" +817 85 loss """nssa""" +817 85 regularizer """no""" +817 85 optimizer """adadelta""" +817 85 training_loop """owa""" +817 85 negative_sampler """basic""" +817 85 evaluator """rankbased""" +817 86 dataset """kinships""" +817 86 model """transe""" +817 86 loss """nssa""" +817 86 regularizer """no""" +817 86 optimizer """adadelta""" +817 86 training_loop """owa""" +817 86 negative_sampler """basic""" +817 86 evaluator """rankbased""" +817 87 dataset """kinships""" +817 87 model """transe""" +817 87 loss """nssa""" +817 87 regularizer """no""" +817 87 optimizer """adadelta""" +817 87 training_loop """owa""" +817 87 negative_sampler """basic""" +817 87 evaluator """rankbased""" +817 88 dataset """kinships""" +817 88 model """transe""" +817 88 loss """nssa""" +817 88 regularizer """no""" +817 88 optimizer """adadelta""" +817 88 training_loop """owa""" +817 88 negative_sampler """basic""" +817 88 evaluator """rankbased""" +817 89 dataset """kinships""" +817 89 model """transe""" +817 89 loss """nssa""" +817 89 regularizer """no""" +817 89 optimizer """adadelta""" +817 89 training_loop """owa""" +817 89 negative_sampler """basic""" +817 89 evaluator """rankbased""" +817 90 dataset """kinships""" +817 90 model """transe""" +817 90 loss """nssa""" +817 90 regularizer """no""" +817 90 optimizer """adadelta""" +817 90 training_loop """owa""" +817 90 negative_sampler """basic""" +817 90 evaluator """rankbased""" +817 91 dataset """kinships""" +817 91 model """transe""" +817 91 loss """nssa""" +817 91 regularizer """no""" +817 91 optimizer """adadelta""" +817 91 training_loop """owa""" +817 91 negative_sampler """basic""" +817 91 evaluator """rankbased""" +817 92 dataset """kinships""" +817 92 model """transe""" +817 92 loss """nssa""" +817 92 regularizer """no""" +817 92 optimizer """adadelta""" +817 92 training_loop """owa""" +817 92 negative_sampler """basic""" +817 92 evaluator """rankbased""" +817 93 dataset """kinships""" +817 93 model """transe""" +817 93 loss """nssa""" +817 93 regularizer """no""" +817 93 optimizer """adadelta""" +817 93 training_loop """owa""" +817 93 negative_sampler """basic""" +817 93 evaluator """rankbased""" +817 94 dataset """kinships""" +817 94 model """transe""" +817 94 loss """nssa""" +817 94 regularizer """no""" +817 94 optimizer """adadelta""" +817 94 training_loop """owa""" +817 94 negative_sampler """basic""" +817 94 evaluator """rankbased""" +817 95 dataset """kinships""" +817 95 model """transe""" +817 95 loss """nssa""" +817 95 regularizer """no""" +817 95 optimizer """adadelta""" +817 95 training_loop """owa""" +817 95 negative_sampler """basic""" +817 95 evaluator """rankbased""" +817 96 dataset """kinships""" +817 96 model """transe""" +817 96 loss """nssa""" +817 96 regularizer """no""" +817 96 optimizer """adadelta""" +817 96 training_loop """owa""" +817 96 negative_sampler """basic""" +817 96 evaluator """rankbased""" +817 97 dataset """kinships""" +817 97 model """transe""" +817 97 loss """nssa""" +817 97 regularizer """no""" +817 97 optimizer """adadelta""" +817 97 training_loop """owa""" +817 97 negative_sampler """basic""" +817 97 evaluator """rankbased""" +817 98 dataset """kinships""" +817 98 model """transe""" +817 98 loss """nssa""" +817 98 regularizer """no""" +817 98 optimizer """adadelta""" +817 98 training_loop """owa""" +817 98 negative_sampler """basic""" +817 98 evaluator """rankbased""" +817 99 dataset """kinships""" +817 99 model """transe""" +817 99 loss """nssa""" +817 99 regularizer """no""" +817 99 optimizer """adadelta""" +817 99 training_loop """owa""" +817 99 negative_sampler """basic""" +817 99 evaluator """rankbased""" +817 100 dataset """kinships""" +817 100 model """transe""" +817 100 loss """nssa""" +817 100 regularizer """no""" +817 100 optimizer """adadelta""" +817 100 training_loop """owa""" +817 100 negative_sampler """basic""" +817 100 evaluator """rankbased""" +818 1 model.embedding_dim 1.0 +818 1 model.scoring_fct_norm 2.0 +818 1 loss.margin 24.52457683618978 +818 1 loss.adversarial_temperature 0.1454993078049982 +818 1 negative_sampler.num_negs_per_pos 88.0 +818 1 training.batch_size 1.0 +818 2 model.embedding_dim 2.0 +818 2 model.scoring_fct_norm 1.0 +818 2 loss.margin 7.572869706499419 +818 2 loss.adversarial_temperature 0.6990112718565608 +818 2 negative_sampler.num_negs_per_pos 92.0 +818 2 training.batch_size 0.0 +818 3 model.embedding_dim 1.0 +818 3 model.scoring_fct_norm 1.0 +818 3 loss.margin 14.015230646067216 +818 3 loss.adversarial_temperature 0.709571813531826 +818 3 negative_sampler.num_negs_per_pos 76.0 +818 3 training.batch_size 0.0 +818 4 model.embedding_dim 2.0 +818 4 model.scoring_fct_norm 2.0 +818 4 loss.margin 17.786629811428615 +818 4 loss.adversarial_temperature 0.5796534291333054 +818 4 negative_sampler.num_negs_per_pos 29.0 +818 4 training.batch_size 2.0 +818 5 model.embedding_dim 0.0 +818 5 model.scoring_fct_norm 2.0 +818 5 loss.margin 2.6665482232424083 +818 5 loss.adversarial_temperature 0.5644103357668673 +818 5 negative_sampler.num_negs_per_pos 65.0 +818 5 training.batch_size 2.0 +818 6 model.embedding_dim 1.0 +818 6 model.scoring_fct_norm 2.0 +818 6 loss.margin 29.835927876998497 +818 6 loss.adversarial_temperature 0.8555921358529042 +818 6 negative_sampler.num_negs_per_pos 96.0 +818 6 training.batch_size 0.0 +818 7 model.embedding_dim 2.0 +818 7 model.scoring_fct_norm 1.0 +818 7 loss.margin 29.819882958267335 +818 7 loss.adversarial_temperature 0.6045676343948542 +818 7 negative_sampler.num_negs_per_pos 38.0 +818 7 training.batch_size 2.0 +818 8 model.embedding_dim 1.0 +818 8 model.scoring_fct_norm 2.0 +818 8 loss.margin 13.274472917259528 +818 8 loss.adversarial_temperature 0.8947240688281028 +818 8 negative_sampler.num_negs_per_pos 83.0 +818 8 training.batch_size 2.0 +818 9 model.embedding_dim 2.0 +818 9 model.scoring_fct_norm 1.0 +818 9 loss.margin 29.157327510292923 +818 9 loss.adversarial_temperature 0.9189167660423556 +818 9 negative_sampler.num_negs_per_pos 77.0 +818 9 training.batch_size 1.0 +818 10 model.embedding_dim 0.0 +818 10 model.scoring_fct_norm 2.0 +818 10 loss.margin 8.69105103982703 +818 10 loss.adversarial_temperature 0.7737581870094493 +818 10 negative_sampler.num_negs_per_pos 34.0 +818 10 training.batch_size 0.0 +818 11 model.embedding_dim 2.0 +818 11 model.scoring_fct_norm 1.0 +818 11 loss.margin 13.672618853293251 +818 11 loss.adversarial_temperature 0.7828047086223701 +818 11 negative_sampler.num_negs_per_pos 66.0 +818 11 training.batch_size 1.0 +818 12 model.embedding_dim 0.0 +818 12 model.scoring_fct_norm 2.0 +818 12 loss.margin 1.1987151487553902 +818 12 loss.adversarial_temperature 0.18100118035321205 +818 12 negative_sampler.num_negs_per_pos 19.0 +818 12 training.batch_size 1.0 +818 13 model.embedding_dim 2.0 +818 13 model.scoring_fct_norm 1.0 +818 13 loss.margin 7.405020855823286 +818 13 loss.adversarial_temperature 0.6504613450440802 +818 13 negative_sampler.num_negs_per_pos 87.0 +818 13 training.batch_size 1.0 +818 14 model.embedding_dim 1.0 +818 14 model.scoring_fct_norm 1.0 +818 14 loss.margin 8.986360406934812 +818 14 loss.adversarial_temperature 0.8413928819210753 +818 14 negative_sampler.num_negs_per_pos 80.0 +818 14 training.batch_size 0.0 +818 15 model.embedding_dim 0.0 +818 15 model.scoring_fct_norm 2.0 +818 15 loss.margin 15.965112314094316 +818 15 loss.adversarial_temperature 0.579517792460034 +818 15 negative_sampler.num_negs_per_pos 20.0 +818 15 training.batch_size 1.0 +818 16 model.embedding_dim 2.0 +818 16 model.scoring_fct_norm 2.0 +818 16 loss.margin 5.782665377747005 +818 16 loss.adversarial_temperature 0.27737000935791856 +818 16 negative_sampler.num_negs_per_pos 64.0 +818 16 training.batch_size 0.0 +818 17 model.embedding_dim 2.0 +818 17 model.scoring_fct_norm 2.0 +818 17 loss.margin 5.740273177726996 +818 17 loss.adversarial_temperature 0.23481106632663898 +818 17 negative_sampler.num_negs_per_pos 40.0 +818 17 training.batch_size 0.0 +818 18 model.embedding_dim 0.0 +818 18 model.scoring_fct_norm 1.0 +818 18 loss.margin 24.156929665150678 +818 18 loss.adversarial_temperature 0.9411361952664099 +818 18 negative_sampler.num_negs_per_pos 16.0 +818 18 training.batch_size 1.0 +818 19 model.embedding_dim 0.0 +818 19 model.scoring_fct_norm 2.0 +818 19 loss.margin 1.3734216944256126 +818 19 loss.adversarial_temperature 0.789786520033798 +818 19 negative_sampler.num_negs_per_pos 86.0 +818 19 training.batch_size 0.0 +818 20 model.embedding_dim 1.0 +818 20 model.scoring_fct_norm 1.0 +818 20 loss.margin 6.050074475480292 +818 20 loss.adversarial_temperature 0.7626453396738058 +818 20 negative_sampler.num_negs_per_pos 46.0 +818 20 training.batch_size 1.0 +818 21 model.embedding_dim 0.0 +818 21 model.scoring_fct_norm 1.0 +818 21 loss.margin 10.91346472839194 +818 21 loss.adversarial_temperature 0.7364414944335234 +818 21 negative_sampler.num_negs_per_pos 92.0 +818 21 training.batch_size 0.0 +818 22 model.embedding_dim 2.0 +818 22 model.scoring_fct_norm 1.0 +818 22 loss.margin 22.30369757920693 +818 22 loss.adversarial_temperature 0.5506295031468342 +818 22 negative_sampler.num_negs_per_pos 7.0 +818 22 training.batch_size 0.0 +818 23 model.embedding_dim 0.0 +818 23 model.scoring_fct_norm 1.0 +818 23 loss.margin 7.851413122042738 +818 23 loss.adversarial_temperature 0.3353394944483835 +818 23 negative_sampler.num_negs_per_pos 54.0 +818 23 training.batch_size 1.0 +818 24 model.embedding_dim 2.0 +818 24 model.scoring_fct_norm 1.0 +818 24 loss.margin 25.48486910937176 +818 24 loss.adversarial_temperature 0.8033622082560369 +818 24 negative_sampler.num_negs_per_pos 15.0 +818 24 training.batch_size 1.0 +818 25 model.embedding_dim 2.0 +818 25 model.scoring_fct_norm 2.0 +818 25 loss.margin 1.3258326555361462 +818 25 loss.adversarial_temperature 0.9467340298970592 +818 25 negative_sampler.num_negs_per_pos 39.0 +818 25 training.batch_size 2.0 +818 26 model.embedding_dim 1.0 +818 26 model.scoring_fct_norm 2.0 +818 26 loss.margin 10.436996389061518 +818 26 loss.adversarial_temperature 0.8508164397778986 +818 26 negative_sampler.num_negs_per_pos 67.0 +818 26 training.batch_size 1.0 +818 27 model.embedding_dim 2.0 +818 27 model.scoring_fct_norm 2.0 +818 27 loss.margin 5.877892223342531 +818 27 loss.adversarial_temperature 0.8597053520786169 +818 27 negative_sampler.num_negs_per_pos 0.0 +818 27 training.batch_size 0.0 +818 28 model.embedding_dim 0.0 +818 28 model.scoring_fct_norm 2.0 +818 28 loss.margin 26.076075927214525 +818 28 loss.adversarial_temperature 0.31222247452402707 +818 28 negative_sampler.num_negs_per_pos 1.0 +818 28 training.batch_size 1.0 +818 29 model.embedding_dim 1.0 +818 29 model.scoring_fct_norm 1.0 +818 29 loss.margin 9.961839822710285 +818 29 loss.adversarial_temperature 0.4838412431325927 +818 29 negative_sampler.num_negs_per_pos 74.0 +818 29 training.batch_size 1.0 +818 30 model.embedding_dim 1.0 +818 30 model.scoring_fct_norm 1.0 +818 30 loss.margin 29.4104578405614 +818 30 loss.adversarial_temperature 0.2542657364025111 +818 30 negative_sampler.num_negs_per_pos 46.0 +818 30 training.batch_size 1.0 +818 31 model.embedding_dim 1.0 +818 31 model.scoring_fct_norm 1.0 +818 31 loss.margin 12.458479813150586 +818 31 loss.adversarial_temperature 0.49047124008148746 +818 31 negative_sampler.num_negs_per_pos 92.0 +818 31 training.batch_size 1.0 +818 32 model.embedding_dim 0.0 +818 32 model.scoring_fct_norm 1.0 +818 32 loss.margin 12.58702322637144 +818 32 loss.adversarial_temperature 0.3832604848138498 +818 32 negative_sampler.num_negs_per_pos 23.0 +818 32 training.batch_size 0.0 +818 33 model.embedding_dim 1.0 +818 33 model.scoring_fct_norm 2.0 +818 33 loss.margin 7.8260839232959345 +818 33 loss.adversarial_temperature 0.2342002634036143 +818 33 negative_sampler.num_negs_per_pos 74.0 +818 33 training.batch_size 0.0 +818 34 model.embedding_dim 1.0 +818 34 model.scoring_fct_norm 2.0 +818 34 loss.margin 21.967157331049563 +818 34 loss.adversarial_temperature 0.6403603563939325 +818 34 negative_sampler.num_negs_per_pos 77.0 +818 34 training.batch_size 2.0 +818 35 model.embedding_dim 0.0 +818 35 model.scoring_fct_norm 2.0 +818 35 loss.margin 6.378699550237004 +818 35 loss.adversarial_temperature 0.4757567183422259 +818 35 negative_sampler.num_negs_per_pos 22.0 +818 35 training.batch_size 0.0 +818 36 model.embedding_dim 2.0 +818 36 model.scoring_fct_norm 1.0 +818 36 loss.margin 24.70244755890359 +818 36 loss.adversarial_temperature 0.4643516294483573 +818 36 negative_sampler.num_negs_per_pos 34.0 +818 36 training.batch_size 2.0 +818 37 model.embedding_dim 1.0 +818 37 model.scoring_fct_norm 1.0 +818 37 loss.margin 13.802676506690906 +818 37 loss.adversarial_temperature 0.39365254673048367 +818 37 negative_sampler.num_negs_per_pos 60.0 +818 37 training.batch_size 1.0 +818 38 model.embedding_dim 1.0 +818 38 model.scoring_fct_norm 2.0 +818 38 loss.margin 13.585499799910625 +818 38 loss.adversarial_temperature 0.10759271113788815 +818 38 negative_sampler.num_negs_per_pos 1.0 +818 38 training.batch_size 1.0 +818 39 model.embedding_dim 0.0 +818 39 model.scoring_fct_norm 2.0 +818 39 loss.margin 13.720148375935272 +818 39 loss.adversarial_temperature 0.37971984842190354 +818 39 negative_sampler.num_negs_per_pos 7.0 +818 39 training.batch_size 2.0 +818 40 model.embedding_dim 2.0 +818 40 model.scoring_fct_norm 2.0 +818 40 loss.margin 20.150717293186794 +818 40 loss.adversarial_temperature 0.11675104346750553 +818 40 negative_sampler.num_negs_per_pos 40.0 +818 40 training.batch_size 1.0 +818 41 model.embedding_dim 0.0 +818 41 model.scoring_fct_norm 2.0 +818 41 loss.margin 20.760023231298046 +818 41 loss.adversarial_temperature 0.7294313849362879 +818 41 negative_sampler.num_negs_per_pos 24.0 +818 41 training.batch_size 1.0 +818 42 model.embedding_dim 1.0 +818 42 model.scoring_fct_norm 2.0 +818 42 loss.margin 12.571452959350465 +818 42 loss.adversarial_temperature 0.8529437614895853 +818 42 negative_sampler.num_negs_per_pos 72.0 +818 42 training.batch_size 0.0 +818 43 model.embedding_dim 2.0 +818 43 model.scoring_fct_norm 2.0 +818 43 loss.margin 4.4913444479801825 +818 43 loss.adversarial_temperature 0.1473229684310622 +818 43 negative_sampler.num_negs_per_pos 6.0 +818 43 training.batch_size 2.0 +818 44 model.embedding_dim 2.0 +818 44 model.scoring_fct_norm 2.0 +818 44 loss.margin 7.347694384547457 +818 44 loss.adversarial_temperature 0.8216025033161081 +818 44 negative_sampler.num_negs_per_pos 30.0 +818 44 training.batch_size 0.0 +818 45 model.embedding_dim 1.0 +818 45 model.scoring_fct_norm 2.0 +818 45 loss.margin 4.958902062251056 +818 45 loss.adversarial_temperature 0.9412137141147566 +818 45 negative_sampler.num_negs_per_pos 41.0 +818 45 training.batch_size 0.0 +818 46 model.embedding_dim 0.0 +818 46 model.scoring_fct_norm 1.0 +818 46 loss.margin 18.67706631331231 +818 46 loss.adversarial_temperature 0.8190736643178275 +818 46 negative_sampler.num_negs_per_pos 22.0 +818 46 training.batch_size 1.0 +818 47 model.embedding_dim 2.0 +818 47 model.scoring_fct_norm 1.0 +818 47 loss.margin 22.312435207104812 +818 47 loss.adversarial_temperature 0.8267062240334109 +818 47 negative_sampler.num_negs_per_pos 26.0 +818 47 training.batch_size 1.0 +818 48 model.embedding_dim 2.0 +818 48 model.scoring_fct_norm 2.0 +818 48 loss.margin 18.800507880376983 +818 48 loss.adversarial_temperature 0.209560293192861 +818 48 negative_sampler.num_negs_per_pos 83.0 +818 48 training.batch_size 0.0 +818 49 model.embedding_dim 0.0 +818 49 model.scoring_fct_norm 1.0 +818 49 loss.margin 5.366168508296329 +818 49 loss.adversarial_temperature 0.5255248491431304 +818 49 negative_sampler.num_negs_per_pos 24.0 +818 49 training.batch_size 0.0 +818 50 model.embedding_dim 1.0 +818 50 model.scoring_fct_norm 2.0 +818 50 loss.margin 20.755562581005034 +818 50 loss.adversarial_temperature 0.5416126688848598 +818 50 negative_sampler.num_negs_per_pos 3.0 +818 50 training.batch_size 2.0 +818 51 model.embedding_dim 0.0 +818 51 model.scoring_fct_norm 1.0 +818 51 loss.margin 26.720944892543706 +818 51 loss.adversarial_temperature 0.9234670571098537 +818 51 negative_sampler.num_negs_per_pos 57.0 +818 51 training.batch_size 2.0 +818 52 model.embedding_dim 1.0 +818 52 model.scoring_fct_norm 1.0 +818 52 loss.margin 5.8427355956725995 +818 52 loss.adversarial_temperature 0.8211891688295156 +818 52 negative_sampler.num_negs_per_pos 70.0 +818 52 training.batch_size 1.0 +818 53 model.embedding_dim 2.0 +818 53 model.scoring_fct_norm 2.0 +818 53 loss.margin 16.737992974262017 +818 53 loss.adversarial_temperature 0.97052087903133 +818 53 negative_sampler.num_negs_per_pos 72.0 +818 53 training.batch_size 0.0 +818 54 model.embedding_dim 0.0 +818 54 model.scoring_fct_norm 1.0 +818 54 loss.margin 19.011602720597836 +818 54 loss.adversarial_temperature 0.5829936639471678 +818 54 negative_sampler.num_negs_per_pos 3.0 +818 54 training.batch_size 2.0 +818 55 model.embedding_dim 2.0 +818 55 model.scoring_fct_norm 2.0 +818 55 loss.margin 4.1097559419177525 +818 55 loss.adversarial_temperature 0.263507569760554 +818 55 negative_sampler.num_negs_per_pos 93.0 +818 55 training.batch_size 2.0 +818 56 model.embedding_dim 2.0 +818 56 model.scoring_fct_norm 2.0 +818 56 loss.margin 2.4214384004993637 +818 56 loss.adversarial_temperature 0.16918951579806152 +818 56 negative_sampler.num_negs_per_pos 62.0 +818 56 training.batch_size 1.0 +818 57 model.embedding_dim 1.0 +818 57 model.scoring_fct_norm 1.0 +818 57 loss.margin 17.635574679674693 +818 57 loss.adversarial_temperature 0.46428619128256154 +818 57 negative_sampler.num_negs_per_pos 42.0 +818 57 training.batch_size 2.0 +818 58 model.embedding_dim 1.0 +818 58 model.scoring_fct_norm 2.0 +818 58 loss.margin 9.63122644781252 +818 58 loss.adversarial_temperature 0.2864287413636731 +818 58 negative_sampler.num_negs_per_pos 99.0 +818 58 training.batch_size 2.0 +818 59 model.embedding_dim 2.0 +818 59 model.scoring_fct_norm 1.0 +818 59 loss.margin 6.529342466263527 +818 59 loss.adversarial_temperature 0.44479823836856613 +818 59 negative_sampler.num_negs_per_pos 96.0 +818 59 training.batch_size 2.0 +818 60 model.embedding_dim 1.0 +818 60 model.scoring_fct_norm 1.0 +818 60 loss.margin 19.87240134623539 +818 60 loss.adversarial_temperature 0.3032902543814355 +818 60 negative_sampler.num_negs_per_pos 8.0 +818 60 training.batch_size 0.0 +818 61 model.embedding_dim 1.0 +818 61 model.scoring_fct_norm 2.0 +818 61 loss.margin 14.549123757828246 +818 61 loss.adversarial_temperature 0.724465568710036 +818 61 negative_sampler.num_negs_per_pos 42.0 +818 61 training.batch_size 2.0 +818 62 model.embedding_dim 2.0 +818 62 model.scoring_fct_norm 2.0 +818 62 loss.margin 12.04361167658692 +818 62 loss.adversarial_temperature 0.44850270326143826 +818 62 negative_sampler.num_negs_per_pos 61.0 +818 62 training.batch_size 2.0 +818 63 model.embedding_dim 2.0 +818 63 model.scoring_fct_norm 1.0 +818 63 loss.margin 16.835827117919017 +818 63 loss.adversarial_temperature 0.9586864236682883 +818 63 negative_sampler.num_negs_per_pos 12.0 +818 63 training.batch_size 1.0 +818 64 model.embedding_dim 2.0 +818 64 model.scoring_fct_norm 2.0 +818 64 loss.margin 5.606914003775771 +818 64 loss.adversarial_temperature 0.4873816686855029 +818 64 negative_sampler.num_negs_per_pos 40.0 +818 64 training.batch_size 2.0 +818 65 model.embedding_dim 2.0 +818 65 model.scoring_fct_norm 1.0 +818 65 loss.margin 2.466514298525211 +818 65 loss.adversarial_temperature 0.38543987927704904 +818 65 negative_sampler.num_negs_per_pos 51.0 +818 65 training.batch_size 0.0 +818 66 model.embedding_dim 2.0 +818 66 model.scoring_fct_norm 1.0 +818 66 loss.margin 15.896592027833911 +818 66 loss.adversarial_temperature 0.5747199134038364 +818 66 negative_sampler.num_negs_per_pos 97.0 +818 66 training.batch_size 2.0 +818 67 model.embedding_dim 2.0 +818 67 model.scoring_fct_norm 2.0 +818 67 loss.margin 22.145581185716573 +818 67 loss.adversarial_temperature 0.36520142413611756 +818 67 negative_sampler.num_negs_per_pos 30.0 +818 67 training.batch_size 1.0 +818 68 model.embedding_dim 1.0 +818 68 model.scoring_fct_norm 1.0 +818 68 loss.margin 19.836064586937244 +818 68 loss.adversarial_temperature 0.6600316307440868 +818 68 negative_sampler.num_negs_per_pos 98.0 +818 68 training.batch_size 2.0 +818 69 model.embedding_dim 2.0 +818 69 model.scoring_fct_norm 1.0 +818 69 loss.margin 16.1154965531128 +818 69 loss.adversarial_temperature 0.32152691428357716 +818 69 negative_sampler.num_negs_per_pos 86.0 +818 69 training.batch_size 2.0 +818 70 model.embedding_dim 2.0 +818 70 model.scoring_fct_norm 1.0 +818 70 loss.margin 6.305622642728566 +818 70 loss.adversarial_temperature 0.2129380558311062 +818 70 negative_sampler.num_negs_per_pos 90.0 +818 70 training.batch_size 2.0 +818 71 model.embedding_dim 2.0 +818 71 model.scoring_fct_norm 2.0 +818 71 loss.margin 19.06781250342112 +818 71 loss.adversarial_temperature 0.28923767917408594 +818 71 negative_sampler.num_negs_per_pos 4.0 +818 71 training.batch_size 2.0 +818 72 model.embedding_dim 1.0 +818 72 model.scoring_fct_norm 2.0 +818 72 loss.margin 24.920367916878575 +818 72 loss.adversarial_temperature 0.999836650057736 +818 72 negative_sampler.num_negs_per_pos 32.0 +818 72 training.batch_size 2.0 +818 73 model.embedding_dim 2.0 +818 73 model.scoring_fct_norm 2.0 +818 73 loss.margin 13.422358255980761 +818 73 loss.adversarial_temperature 0.2610276588369271 +818 73 negative_sampler.num_negs_per_pos 61.0 +818 73 training.batch_size 1.0 +818 74 model.embedding_dim 0.0 +818 74 model.scoring_fct_norm 2.0 +818 74 loss.margin 6.158600744109833 +818 74 loss.adversarial_temperature 0.5920399482727224 +818 74 negative_sampler.num_negs_per_pos 11.0 +818 74 training.batch_size 0.0 +818 75 model.embedding_dim 1.0 +818 75 model.scoring_fct_norm 1.0 +818 75 loss.margin 27.957209659535533 +818 75 loss.adversarial_temperature 0.3326007117579113 +818 75 negative_sampler.num_negs_per_pos 5.0 +818 75 training.batch_size 2.0 +818 76 model.embedding_dim 0.0 +818 76 model.scoring_fct_norm 2.0 +818 76 loss.margin 1.7756755665716988 +818 76 loss.adversarial_temperature 0.1354854778061616 +818 76 negative_sampler.num_negs_per_pos 82.0 +818 76 training.batch_size 1.0 +818 77 model.embedding_dim 1.0 +818 77 model.scoring_fct_norm 2.0 +818 77 loss.margin 24.73753854189849 +818 77 loss.adversarial_temperature 0.7799589103336751 +818 77 negative_sampler.num_negs_per_pos 54.0 +818 77 training.batch_size 1.0 +818 78 model.embedding_dim 2.0 +818 78 model.scoring_fct_norm 2.0 +818 78 loss.margin 13.904738838626935 +818 78 loss.adversarial_temperature 0.7287897977148983 +818 78 negative_sampler.num_negs_per_pos 59.0 +818 78 training.batch_size 1.0 +818 79 model.embedding_dim 1.0 +818 79 model.scoring_fct_norm 2.0 +818 79 loss.margin 2.8399601691778544 +818 79 loss.adversarial_temperature 0.2523859327210783 +818 79 negative_sampler.num_negs_per_pos 86.0 +818 79 training.batch_size 2.0 +818 80 model.embedding_dim 0.0 +818 80 model.scoring_fct_norm 2.0 +818 80 loss.margin 17.283621804383372 +818 80 loss.adversarial_temperature 0.8760846399695723 +818 80 negative_sampler.num_negs_per_pos 79.0 +818 80 training.batch_size 0.0 +818 81 model.embedding_dim 0.0 +818 81 model.scoring_fct_norm 1.0 +818 81 loss.margin 6.528882656725016 +818 81 loss.adversarial_temperature 0.126185064394395 +818 81 negative_sampler.num_negs_per_pos 59.0 +818 81 training.batch_size 1.0 +818 82 model.embedding_dim 1.0 +818 82 model.scoring_fct_norm 1.0 +818 82 loss.margin 1.4885791168894418 +818 82 loss.adversarial_temperature 0.8211227476868589 +818 82 negative_sampler.num_negs_per_pos 19.0 +818 82 training.batch_size 0.0 +818 83 model.embedding_dim 2.0 +818 83 model.scoring_fct_norm 1.0 +818 83 loss.margin 15.615920943519303 +818 83 loss.adversarial_temperature 0.15700399169959128 +818 83 negative_sampler.num_negs_per_pos 16.0 +818 83 training.batch_size 1.0 +818 84 model.embedding_dim 2.0 +818 84 model.scoring_fct_norm 1.0 +818 84 loss.margin 10.761389768311542 +818 84 loss.adversarial_temperature 0.2012160439451735 +818 84 negative_sampler.num_negs_per_pos 78.0 +818 84 training.batch_size 2.0 +818 85 model.embedding_dim 2.0 +818 85 model.scoring_fct_norm 2.0 +818 85 loss.margin 21.461186148862513 +818 85 loss.adversarial_temperature 0.8646691002611278 +818 85 negative_sampler.num_negs_per_pos 25.0 +818 85 training.batch_size 0.0 +818 86 model.embedding_dim 2.0 +818 86 model.scoring_fct_norm 1.0 +818 86 loss.margin 17.739814454229133 +818 86 loss.adversarial_temperature 0.8030996708827209 +818 86 negative_sampler.num_negs_per_pos 56.0 +818 86 training.batch_size 1.0 +818 87 model.embedding_dim 1.0 +818 87 model.scoring_fct_norm 1.0 +818 87 loss.margin 3.5672107284353554 +818 87 loss.adversarial_temperature 0.40563904956288677 +818 87 negative_sampler.num_negs_per_pos 46.0 +818 87 training.batch_size 0.0 +818 88 model.embedding_dim 0.0 +818 88 model.scoring_fct_norm 1.0 +818 88 loss.margin 19.267892504250188 +818 88 loss.adversarial_temperature 0.13438577444043134 +818 88 negative_sampler.num_negs_per_pos 58.0 +818 88 training.batch_size 0.0 +818 89 model.embedding_dim 2.0 +818 89 model.scoring_fct_norm 1.0 +818 89 loss.margin 12.94277366415341 +818 89 loss.adversarial_temperature 0.7450653424724085 +818 89 negative_sampler.num_negs_per_pos 2.0 +818 89 training.batch_size 0.0 +818 90 model.embedding_dim 0.0 +818 90 model.scoring_fct_norm 1.0 +818 90 loss.margin 15.136374300427857 +818 90 loss.adversarial_temperature 0.7619207264719512 +818 90 negative_sampler.num_negs_per_pos 9.0 +818 90 training.batch_size 1.0 +818 91 model.embedding_dim 1.0 +818 91 model.scoring_fct_norm 1.0 +818 91 loss.margin 14.264633165819225 +818 91 loss.adversarial_temperature 0.9969400240903232 +818 91 negative_sampler.num_negs_per_pos 48.0 +818 91 training.batch_size 1.0 +818 92 model.embedding_dim 1.0 +818 92 model.scoring_fct_norm 1.0 +818 92 loss.margin 14.736834649835572 +818 92 loss.adversarial_temperature 0.3948360441919715 +818 92 negative_sampler.num_negs_per_pos 77.0 +818 92 training.batch_size 0.0 +818 93 model.embedding_dim 1.0 +818 93 model.scoring_fct_norm 1.0 +818 93 loss.margin 18.674731192439715 +818 93 loss.adversarial_temperature 0.8330006777281486 +818 93 negative_sampler.num_negs_per_pos 41.0 +818 93 training.batch_size 0.0 +818 94 model.embedding_dim 2.0 +818 94 model.scoring_fct_norm 1.0 +818 94 loss.margin 15.965848906636255 +818 94 loss.adversarial_temperature 0.8347153373256403 +818 94 negative_sampler.num_negs_per_pos 79.0 +818 94 training.batch_size 1.0 +818 95 model.embedding_dim 2.0 +818 95 model.scoring_fct_norm 1.0 +818 95 loss.margin 7.932183119527809 +818 95 loss.adversarial_temperature 0.6766595332792722 +818 95 negative_sampler.num_negs_per_pos 58.0 +818 95 training.batch_size 2.0 +818 96 model.embedding_dim 2.0 +818 96 model.scoring_fct_norm 1.0 +818 96 loss.margin 23.817126299198804 +818 96 loss.adversarial_temperature 0.40256827944039486 +818 96 negative_sampler.num_negs_per_pos 51.0 +818 96 training.batch_size 2.0 +818 97 model.embedding_dim 2.0 +818 97 model.scoring_fct_norm 2.0 +818 97 loss.margin 10.303091212955 +818 97 loss.adversarial_temperature 0.27227587651733187 +818 97 negative_sampler.num_negs_per_pos 66.0 +818 97 training.batch_size 1.0 +818 98 model.embedding_dim 0.0 +818 98 model.scoring_fct_norm 1.0 +818 98 loss.margin 16.472720753969714 +818 98 loss.adversarial_temperature 0.715486815547944 +818 98 negative_sampler.num_negs_per_pos 67.0 +818 98 training.batch_size 2.0 +818 99 model.embedding_dim 1.0 +818 99 model.scoring_fct_norm 1.0 +818 99 loss.margin 13.42237269287565 +818 99 loss.adversarial_temperature 0.7539939209475551 +818 99 negative_sampler.num_negs_per_pos 61.0 +818 99 training.batch_size 0.0 +818 100 model.embedding_dim 2.0 +818 100 model.scoring_fct_norm 1.0 +818 100 loss.margin 22.42547297816612 +818 100 loss.adversarial_temperature 0.2984058131558808 +818 100 negative_sampler.num_negs_per_pos 50.0 +818 100 training.batch_size 1.0 +818 1 dataset """kinships""" +818 1 model """transe""" +818 1 loss """nssa""" +818 1 regularizer """no""" +818 1 optimizer """adadelta""" +818 1 training_loop """owa""" +818 1 negative_sampler """basic""" +818 1 evaluator """rankbased""" +818 2 dataset """kinships""" +818 2 model """transe""" +818 2 loss """nssa""" +818 2 regularizer """no""" +818 2 optimizer """adadelta""" +818 2 training_loop """owa""" +818 2 negative_sampler """basic""" +818 2 evaluator """rankbased""" +818 3 dataset """kinships""" +818 3 model """transe""" +818 3 loss """nssa""" +818 3 regularizer """no""" +818 3 optimizer """adadelta""" +818 3 training_loop """owa""" +818 3 negative_sampler """basic""" +818 3 evaluator """rankbased""" +818 4 dataset """kinships""" +818 4 model """transe""" +818 4 loss """nssa""" +818 4 regularizer """no""" +818 4 optimizer """adadelta""" +818 4 training_loop """owa""" +818 4 negative_sampler """basic""" +818 4 evaluator """rankbased""" +818 5 dataset """kinships""" +818 5 model """transe""" +818 5 loss """nssa""" +818 5 regularizer """no""" +818 5 optimizer """adadelta""" +818 5 training_loop """owa""" +818 5 negative_sampler """basic""" +818 5 evaluator """rankbased""" +818 6 dataset """kinships""" +818 6 model """transe""" +818 6 loss """nssa""" +818 6 regularizer """no""" +818 6 optimizer """adadelta""" +818 6 training_loop """owa""" +818 6 negative_sampler """basic""" +818 6 evaluator """rankbased""" +818 7 dataset """kinships""" +818 7 model """transe""" +818 7 loss """nssa""" +818 7 regularizer """no""" +818 7 optimizer """adadelta""" +818 7 training_loop """owa""" +818 7 negative_sampler """basic""" +818 7 evaluator """rankbased""" +818 8 dataset """kinships""" +818 8 model """transe""" +818 8 loss """nssa""" +818 8 regularizer """no""" +818 8 optimizer """adadelta""" +818 8 training_loop """owa""" +818 8 negative_sampler """basic""" +818 8 evaluator """rankbased""" +818 9 dataset """kinships""" +818 9 model """transe""" +818 9 loss """nssa""" +818 9 regularizer """no""" +818 9 optimizer """adadelta""" +818 9 training_loop """owa""" +818 9 negative_sampler """basic""" +818 9 evaluator """rankbased""" +818 10 dataset """kinships""" +818 10 model """transe""" +818 10 loss """nssa""" +818 10 regularizer """no""" +818 10 optimizer """adadelta""" +818 10 training_loop """owa""" +818 10 negative_sampler """basic""" +818 10 evaluator """rankbased""" +818 11 dataset """kinships""" +818 11 model """transe""" +818 11 loss """nssa""" +818 11 regularizer """no""" +818 11 optimizer """adadelta""" +818 11 training_loop """owa""" +818 11 negative_sampler """basic""" +818 11 evaluator """rankbased""" +818 12 dataset """kinships""" +818 12 model """transe""" +818 12 loss """nssa""" +818 12 regularizer """no""" +818 12 optimizer """adadelta""" +818 12 training_loop """owa""" +818 12 negative_sampler """basic""" +818 12 evaluator """rankbased""" +818 13 dataset """kinships""" +818 13 model """transe""" +818 13 loss """nssa""" +818 13 regularizer """no""" +818 13 optimizer """adadelta""" +818 13 training_loop """owa""" +818 13 negative_sampler """basic""" +818 13 evaluator """rankbased""" +818 14 dataset """kinships""" +818 14 model """transe""" +818 14 loss """nssa""" +818 14 regularizer """no""" +818 14 optimizer """adadelta""" +818 14 training_loop """owa""" +818 14 negative_sampler """basic""" +818 14 evaluator """rankbased""" +818 15 dataset """kinships""" +818 15 model """transe""" +818 15 loss """nssa""" +818 15 regularizer """no""" +818 15 optimizer """adadelta""" +818 15 training_loop """owa""" +818 15 negative_sampler """basic""" +818 15 evaluator """rankbased""" +818 16 dataset """kinships""" +818 16 model """transe""" +818 16 loss """nssa""" +818 16 regularizer """no""" +818 16 optimizer """adadelta""" +818 16 training_loop """owa""" +818 16 negative_sampler """basic""" +818 16 evaluator """rankbased""" +818 17 dataset """kinships""" +818 17 model """transe""" +818 17 loss """nssa""" +818 17 regularizer """no""" +818 17 optimizer """adadelta""" +818 17 training_loop """owa""" +818 17 negative_sampler """basic""" +818 17 evaluator """rankbased""" +818 18 dataset """kinships""" +818 18 model """transe""" +818 18 loss """nssa""" +818 18 regularizer """no""" +818 18 optimizer """adadelta""" +818 18 training_loop """owa""" +818 18 negative_sampler """basic""" +818 18 evaluator """rankbased""" +818 19 dataset """kinships""" +818 19 model """transe""" +818 19 loss """nssa""" +818 19 regularizer """no""" +818 19 optimizer """adadelta""" +818 19 training_loop """owa""" +818 19 negative_sampler """basic""" +818 19 evaluator """rankbased""" +818 20 dataset """kinships""" +818 20 model """transe""" +818 20 loss """nssa""" +818 20 regularizer """no""" +818 20 optimizer """adadelta""" +818 20 training_loop """owa""" +818 20 negative_sampler """basic""" +818 20 evaluator """rankbased""" +818 21 dataset """kinships""" +818 21 model """transe""" +818 21 loss """nssa""" +818 21 regularizer """no""" +818 21 optimizer """adadelta""" +818 21 training_loop """owa""" +818 21 negative_sampler """basic""" +818 21 evaluator """rankbased""" +818 22 dataset """kinships""" +818 22 model """transe""" +818 22 loss """nssa""" +818 22 regularizer """no""" +818 22 optimizer """adadelta""" +818 22 training_loop """owa""" +818 22 negative_sampler """basic""" +818 22 evaluator """rankbased""" +818 23 dataset """kinships""" +818 23 model """transe""" +818 23 loss """nssa""" +818 23 regularizer """no""" +818 23 optimizer """adadelta""" +818 23 training_loop """owa""" +818 23 negative_sampler """basic""" +818 23 evaluator """rankbased""" +818 24 dataset """kinships""" +818 24 model """transe""" +818 24 loss """nssa""" +818 24 regularizer """no""" +818 24 optimizer """adadelta""" +818 24 training_loop """owa""" +818 24 negative_sampler """basic""" +818 24 evaluator """rankbased""" +818 25 dataset """kinships""" +818 25 model """transe""" +818 25 loss """nssa""" +818 25 regularizer """no""" +818 25 optimizer """adadelta""" +818 25 training_loop """owa""" +818 25 negative_sampler """basic""" +818 25 evaluator """rankbased""" +818 26 dataset """kinships""" +818 26 model """transe""" +818 26 loss """nssa""" +818 26 regularizer """no""" +818 26 optimizer """adadelta""" +818 26 training_loop """owa""" +818 26 negative_sampler """basic""" +818 26 evaluator """rankbased""" +818 27 dataset """kinships""" +818 27 model """transe""" +818 27 loss """nssa""" +818 27 regularizer """no""" +818 27 optimizer """adadelta""" +818 27 training_loop """owa""" +818 27 negative_sampler """basic""" +818 27 evaluator """rankbased""" +818 28 dataset """kinships""" +818 28 model """transe""" +818 28 loss """nssa""" +818 28 regularizer """no""" +818 28 optimizer """adadelta""" +818 28 training_loop """owa""" +818 28 negative_sampler """basic""" +818 28 evaluator """rankbased""" +818 29 dataset """kinships""" +818 29 model """transe""" +818 29 loss """nssa""" +818 29 regularizer """no""" +818 29 optimizer """adadelta""" +818 29 training_loop """owa""" +818 29 negative_sampler """basic""" +818 29 evaluator """rankbased""" +818 30 dataset """kinships""" +818 30 model """transe""" +818 30 loss """nssa""" +818 30 regularizer """no""" +818 30 optimizer """adadelta""" +818 30 training_loop """owa""" +818 30 negative_sampler """basic""" +818 30 evaluator """rankbased""" +818 31 dataset """kinships""" +818 31 model """transe""" +818 31 loss """nssa""" +818 31 regularizer """no""" +818 31 optimizer """adadelta""" +818 31 training_loop """owa""" +818 31 negative_sampler """basic""" +818 31 evaluator """rankbased""" +818 32 dataset """kinships""" +818 32 model """transe""" +818 32 loss """nssa""" +818 32 regularizer """no""" +818 32 optimizer """adadelta""" +818 32 training_loop """owa""" +818 32 negative_sampler """basic""" +818 32 evaluator """rankbased""" +818 33 dataset """kinships""" +818 33 model """transe""" +818 33 loss """nssa""" +818 33 regularizer """no""" +818 33 optimizer """adadelta""" +818 33 training_loop """owa""" +818 33 negative_sampler """basic""" +818 33 evaluator """rankbased""" +818 34 dataset """kinships""" +818 34 model """transe""" +818 34 loss """nssa""" +818 34 regularizer """no""" +818 34 optimizer """adadelta""" +818 34 training_loop """owa""" +818 34 negative_sampler """basic""" +818 34 evaluator """rankbased""" +818 35 dataset """kinships""" +818 35 model """transe""" +818 35 loss """nssa""" +818 35 regularizer """no""" +818 35 optimizer """adadelta""" +818 35 training_loop """owa""" +818 35 negative_sampler """basic""" +818 35 evaluator """rankbased""" +818 36 dataset """kinships""" +818 36 model """transe""" +818 36 loss """nssa""" +818 36 regularizer """no""" +818 36 optimizer """adadelta""" +818 36 training_loop """owa""" +818 36 negative_sampler """basic""" +818 36 evaluator """rankbased""" +818 37 dataset """kinships""" +818 37 model """transe""" +818 37 loss """nssa""" +818 37 regularizer """no""" +818 37 optimizer """adadelta""" +818 37 training_loop """owa""" +818 37 negative_sampler """basic""" +818 37 evaluator """rankbased""" +818 38 dataset """kinships""" +818 38 model """transe""" +818 38 loss """nssa""" +818 38 regularizer """no""" +818 38 optimizer """adadelta""" +818 38 training_loop """owa""" +818 38 negative_sampler """basic""" +818 38 evaluator """rankbased""" +818 39 dataset """kinships""" +818 39 model """transe""" +818 39 loss """nssa""" +818 39 regularizer """no""" +818 39 optimizer """adadelta""" +818 39 training_loop """owa""" +818 39 negative_sampler """basic""" +818 39 evaluator """rankbased""" +818 40 dataset """kinships""" +818 40 model """transe""" +818 40 loss """nssa""" +818 40 regularizer """no""" +818 40 optimizer """adadelta""" +818 40 training_loop """owa""" +818 40 negative_sampler """basic""" +818 40 evaluator """rankbased""" +818 41 dataset """kinships""" +818 41 model """transe""" +818 41 loss """nssa""" +818 41 regularizer """no""" +818 41 optimizer """adadelta""" +818 41 training_loop """owa""" +818 41 negative_sampler """basic""" +818 41 evaluator """rankbased""" +818 42 dataset """kinships""" +818 42 model """transe""" +818 42 loss """nssa""" +818 42 regularizer """no""" +818 42 optimizer """adadelta""" +818 42 training_loop """owa""" +818 42 negative_sampler """basic""" +818 42 evaluator """rankbased""" +818 43 dataset """kinships""" +818 43 model """transe""" +818 43 loss """nssa""" +818 43 regularizer """no""" +818 43 optimizer """adadelta""" +818 43 training_loop """owa""" +818 43 negative_sampler """basic""" +818 43 evaluator """rankbased""" +818 44 dataset """kinships""" +818 44 model """transe""" +818 44 loss """nssa""" +818 44 regularizer """no""" +818 44 optimizer """adadelta""" +818 44 training_loop """owa""" +818 44 negative_sampler """basic""" +818 44 evaluator """rankbased""" +818 45 dataset """kinships""" +818 45 model """transe""" +818 45 loss """nssa""" +818 45 regularizer """no""" +818 45 optimizer """adadelta""" +818 45 training_loop """owa""" +818 45 negative_sampler """basic""" +818 45 evaluator """rankbased""" +818 46 dataset """kinships""" +818 46 model """transe""" +818 46 loss """nssa""" +818 46 regularizer """no""" +818 46 optimizer """adadelta""" +818 46 training_loop """owa""" +818 46 negative_sampler """basic""" +818 46 evaluator """rankbased""" +818 47 dataset """kinships""" +818 47 model """transe""" +818 47 loss """nssa""" +818 47 regularizer """no""" +818 47 optimizer """adadelta""" +818 47 training_loop """owa""" +818 47 negative_sampler """basic""" +818 47 evaluator """rankbased""" +818 48 dataset """kinships""" +818 48 model """transe""" +818 48 loss """nssa""" +818 48 regularizer """no""" +818 48 optimizer """adadelta""" +818 48 training_loop """owa""" +818 48 negative_sampler """basic""" +818 48 evaluator """rankbased""" +818 49 dataset """kinships""" +818 49 model """transe""" +818 49 loss """nssa""" +818 49 regularizer """no""" +818 49 optimizer """adadelta""" +818 49 training_loop """owa""" +818 49 negative_sampler """basic""" +818 49 evaluator """rankbased""" +818 50 dataset """kinships""" +818 50 model """transe""" +818 50 loss """nssa""" +818 50 regularizer """no""" +818 50 optimizer """adadelta""" +818 50 training_loop """owa""" +818 50 negative_sampler """basic""" +818 50 evaluator """rankbased""" +818 51 dataset """kinships""" +818 51 model """transe""" +818 51 loss """nssa""" +818 51 regularizer """no""" +818 51 optimizer """adadelta""" +818 51 training_loop """owa""" +818 51 negative_sampler """basic""" +818 51 evaluator """rankbased""" +818 52 dataset """kinships""" +818 52 model """transe""" +818 52 loss """nssa""" +818 52 regularizer """no""" +818 52 optimizer """adadelta""" +818 52 training_loop """owa""" +818 52 negative_sampler """basic""" +818 52 evaluator """rankbased""" +818 53 dataset """kinships""" +818 53 model """transe""" +818 53 loss """nssa""" +818 53 regularizer """no""" +818 53 optimizer """adadelta""" +818 53 training_loop """owa""" +818 53 negative_sampler """basic""" +818 53 evaluator """rankbased""" +818 54 dataset """kinships""" +818 54 model """transe""" +818 54 loss """nssa""" +818 54 regularizer """no""" +818 54 optimizer """adadelta""" +818 54 training_loop """owa""" +818 54 negative_sampler """basic""" +818 54 evaluator """rankbased""" +818 55 dataset """kinships""" +818 55 model """transe""" +818 55 loss """nssa""" +818 55 regularizer """no""" +818 55 optimizer """adadelta""" +818 55 training_loop """owa""" +818 55 negative_sampler """basic""" +818 55 evaluator """rankbased""" +818 56 dataset """kinships""" +818 56 model """transe""" +818 56 loss """nssa""" +818 56 regularizer """no""" +818 56 optimizer """adadelta""" +818 56 training_loop """owa""" +818 56 negative_sampler """basic""" +818 56 evaluator """rankbased""" +818 57 dataset """kinships""" +818 57 model """transe""" +818 57 loss """nssa""" +818 57 regularizer """no""" +818 57 optimizer """adadelta""" +818 57 training_loop """owa""" +818 57 negative_sampler """basic""" +818 57 evaluator """rankbased""" +818 58 dataset """kinships""" +818 58 model """transe""" +818 58 loss """nssa""" +818 58 regularizer """no""" +818 58 optimizer """adadelta""" +818 58 training_loop """owa""" +818 58 negative_sampler """basic""" +818 58 evaluator """rankbased""" +818 59 dataset """kinships""" +818 59 model """transe""" +818 59 loss """nssa""" +818 59 regularizer """no""" +818 59 optimizer """adadelta""" +818 59 training_loop """owa""" +818 59 negative_sampler """basic""" +818 59 evaluator """rankbased""" +818 60 dataset """kinships""" +818 60 model """transe""" +818 60 loss """nssa""" +818 60 regularizer """no""" +818 60 optimizer """adadelta""" +818 60 training_loop """owa""" +818 60 negative_sampler """basic""" +818 60 evaluator """rankbased""" +818 61 dataset """kinships""" +818 61 model """transe""" +818 61 loss """nssa""" +818 61 regularizer """no""" +818 61 optimizer """adadelta""" +818 61 training_loop """owa""" +818 61 negative_sampler """basic""" +818 61 evaluator """rankbased""" +818 62 dataset """kinships""" +818 62 model """transe""" +818 62 loss """nssa""" +818 62 regularizer """no""" +818 62 optimizer """adadelta""" +818 62 training_loop """owa""" +818 62 negative_sampler """basic""" +818 62 evaluator """rankbased""" +818 63 dataset """kinships""" +818 63 model """transe""" +818 63 loss """nssa""" +818 63 regularizer """no""" +818 63 optimizer """adadelta""" +818 63 training_loop """owa""" +818 63 negative_sampler """basic""" +818 63 evaluator """rankbased""" +818 64 dataset """kinships""" +818 64 model """transe""" +818 64 loss """nssa""" +818 64 regularizer """no""" +818 64 optimizer """adadelta""" +818 64 training_loop """owa""" +818 64 negative_sampler """basic""" +818 64 evaluator """rankbased""" +818 65 dataset """kinships""" +818 65 model """transe""" +818 65 loss """nssa""" +818 65 regularizer """no""" +818 65 optimizer """adadelta""" +818 65 training_loop """owa""" +818 65 negative_sampler """basic""" +818 65 evaluator """rankbased""" +818 66 dataset """kinships""" +818 66 model """transe""" +818 66 loss """nssa""" +818 66 regularizer """no""" +818 66 optimizer """adadelta""" +818 66 training_loop """owa""" +818 66 negative_sampler """basic""" +818 66 evaluator """rankbased""" +818 67 dataset """kinships""" +818 67 model """transe""" +818 67 loss """nssa""" +818 67 regularizer """no""" +818 67 optimizer """adadelta""" +818 67 training_loop """owa""" +818 67 negative_sampler """basic""" +818 67 evaluator """rankbased""" +818 68 dataset """kinships""" +818 68 model """transe""" +818 68 loss """nssa""" +818 68 regularizer """no""" +818 68 optimizer """adadelta""" +818 68 training_loop """owa""" +818 68 negative_sampler """basic""" +818 68 evaluator """rankbased""" +818 69 dataset """kinships""" +818 69 model """transe""" +818 69 loss """nssa""" +818 69 regularizer """no""" +818 69 optimizer """adadelta""" +818 69 training_loop """owa""" +818 69 negative_sampler """basic""" +818 69 evaluator """rankbased""" +818 70 dataset """kinships""" +818 70 model """transe""" +818 70 loss """nssa""" +818 70 regularizer """no""" +818 70 optimizer """adadelta""" +818 70 training_loop """owa""" +818 70 negative_sampler """basic""" +818 70 evaluator """rankbased""" +818 71 dataset """kinships""" +818 71 model """transe""" +818 71 loss """nssa""" +818 71 regularizer """no""" +818 71 optimizer """adadelta""" +818 71 training_loop """owa""" +818 71 negative_sampler """basic""" +818 71 evaluator """rankbased""" +818 72 dataset """kinships""" +818 72 model """transe""" +818 72 loss """nssa""" +818 72 regularizer """no""" +818 72 optimizer """adadelta""" +818 72 training_loop """owa""" +818 72 negative_sampler """basic""" +818 72 evaluator """rankbased""" +818 73 dataset """kinships""" +818 73 model """transe""" +818 73 loss """nssa""" +818 73 regularizer """no""" +818 73 optimizer """adadelta""" +818 73 training_loop """owa""" +818 73 negative_sampler """basic""" +818 73 evaluator """rankbased""" +818 74 dataset """kinships""" +818 74 model """transe""" +818 74 loss """nssa""" +818 74 regularizer """no""" +818 74 optimizer """adadelta""" +818 74 training_loop """owa""" +818 74 negative_sampler """basic""" +818 74 evaluator """rankbased""" +818 75 dataset """kinships""" +818 75 model """transe""" +818 75 loss """nssa""" +818 75 regularizer """no""" +818 75 optimizer """adadelta""" +818 75 training_loop """owa""" +818 75 negative_sampler """basic""" +818 75 evaluator """rankbased""" +818 76 dataset """kinships""" +818 76 model """transe""" +818 76 loss """nssa""" +818 76 regularizer """no""" +818 76 optimizer """adadelta""" +818 76 training_loop """owa""" +818 76 negative_sampler """basic""" +818 76 evaluator """rankbased""" +818 77 dataset """kinships""" +818 77 model """transe""" +818 77 loss """nssa""" +818 77 regularizer """no""" +818 77 optimizer """adadelta""" +818 77 training_loop """owa""" +818 77 negative_sampler """basic""" +818 77 evaluator """rankbased""" +818 78 dataset """kinships""" +818 78 model """transe""" +818 78 loss """nssa""" +818 78 regularizer """no""" +818 78 optimizer """adadelta""" +818 78 training_loop """owa""" +818 78 negative_sampler """basic""" +818 78 evaluator """rankbased""" +818 79 dataset """kinships""" +818 79 model """transe""" +818 79 loss """nssa""" +818 79 regularizer """no""" +818 79 optimizer """adadelta""" +818 79 training_loop """owa""" +818 79 negative_sampler """basic""" +818 79 evaluator """rankbased""" +818 80 dataset """kinships""" +818 80 model """transe""" +818 80 loss """nssa""" +818 80 regularizer """no""" +818 80 optimizer """adadelta""" +818 80 training_loop """owa""" +818 80 negative_sampler """basic""" +818 80 evaluator """rankbased""" +818 81 dataset """kinships""" +818 81 model """transe""" +818 81 loss """nssa""" +818 81 regularizer """no""" +818 81 optimizer """adadelta""" +818 81 training_loop """owa""" +818 81 negative_sampler """basic""" +818 81 evaluator """rankbased""" +818 82 dataset """kinships""" +818 82 model """transe""" +818 82 loss """nssa""" +818 82 regularizer """no""" +818 82 optimizer """adadelta""" +818 82 training_loop """owa""" +818 82 negative_sampler """basic""" +818 82 evaluator """rankbased""" +818 83 dataset """kinships""" +818 83 model """transe""" +818 83 loss """nssa""" +818 83 regularizer """no""" +818 83 optimizer """adadelta""" +818 83 training_loop """owa""" +818 83 negative_sampler """basic""" +818 83 evaluator """rankbased""" +818 84 dataset """kinships""" +818 84 model """transe""" +818 84 loss """nssa""" +818 84 regularizer """no""" +818 84 optimizer """adadelta""" +818 84 training_loop """owa""" +818 84 negative_sampler """basic""" +818 84 evaluator """rankbased""" +818 85 dataset """kinships""" +818 85 model """transe""" +818 85 loss """nssa""" +818 85 regularizer """no""" +818 85 optimizer """adadelta""" +818 85 training_loop """owa""" +818 85 negative_sampler """basic""" +818 85 evaluator """rankbased""" +818 86 dataset """kinships""" +818 86 model """transe""" +818 86 loss """nssa""" +818 86 regularizer """no""" +818 86 optimizer """adadelta""" +818 86 training_loop """owa""" +818 86 negative_sampler """basic""" +818 86 evaluator """rankbased""" +818 87 dataset """kinships""" +818 87 model """transe""" +818 87 loss """nssa""" +818 87 regularizer """no""" +818 87 optimizer """adadelta""" +818 87 training_loop """owa""" +818 87 negative_sampler """basic""" +818 87 evaluator """rankbased""" +818 88 dataset """kinships""" +818 88 model """transe""" +818 88 loss """nssa""" +818 88 regularizer """no""" +818 88 optimizer """adadelta""" +818 88 training_loop """owa""" +818 88 negative_sampler """basic""" +818 88 evaluator """rankbased""" +818 89 dataset """kinships""" +818 89 model """transe""" +818 89 loss """nssa""" +818 89 regularizer """no""" +818 89 optimizer """adadelta""" +818 89 training_loop """owa""" +818 89 negative_sampler """basic""" +818 89 evaluator """rankbased""" +818 90 dataset """kinships""" +818 90 model """transe""" +818 90 loss """nssa""" +818 90 regularizer """no""" +818 90 optimizer """adadelta""" +818 90 training_loop """owa""" +818 90 negative_sampler """basic""" +818 90 evaluator """rankbased""" +818 91 dataset """kinships""" +818 91 model """transe""" +818 91 loss """nssa""" +818 91 regularizer """no""" +818 91 optimizer """adadelta""" +818 91 training_loop """owa""" +818 91 negative_sampler """basic""" +818 91 evaluator """rankbased""" +818 92 dataset """kinships""" +818 92 model """transe""" +818 92 loss """nssa""" +818 92 regularizer """no""" +818 92 optimizer """adadelta""" +818 92 training_loop """owa""" +818 92 negative_sampler """basic""" +818 92 evaluator """rankbased""" +818 93 dataset """kinships""" +818 93 model """transe""" +818 93 loss """nssa""" +818 93 regularizer """no""" +818 93 optimizer """adadelta""" +818 93 training_loop """owa""" +818 93 negative_sampler """basic""" +818 93 evaluator """rankbased""" +818 94 dataset """kinships""" +818 94 model """transe""" +818 94 loss """nssa""" +818 94 regularizer """no""" +818 94 optimizer """adadelta""" +818 94 training_loop """owa""" +818 94 negative_sampler """basic""" +818 94 evaluator """rankbased""" +818 95 dataset """kinships""" +818 95 model """transe""" +818 95 loss """nssa""" +818 95 regularizer """no""" +818 95 optimizer """adadelta""" +818 95 training_loop """owa""" +818 95 negative_sampler """basic""" +818 95 evaluator """rankbased""" +818 96 dataset """kinships""" +818 96 model """transe""" +818 96 loss """nssa""" +818 96 regularizer """no""" +818 96 optimizer """adadelta""" +818 96 training_loop """owa""" +818 96 negative_sampler """basic""" +818 96 evaluator """rankbased""" +818 97 dataset """kinships""" +818 97 model """transe""" +818 97 loss """nssa""" +818 97 regularizer """no""" +818 97 optimizer """adadelta""" +818 97 training_loop """owa""" +818 97 negative_sampler """basic""" +818 97 evaluator """rankbased""" +818 98 dataset """kinships""" +818 98 model """transe""" +818 98 loss """nssa""" +818 98 regularizer """no""" +818 98 optimizer """adadelta""" +818 98 training_loop """owa""" +818 98 negative_sampler """basic""" +818 98 evaluator """rankbased""" +818 99 dataset """kinships""" +818 99 model """transe""" +818 99 loss """nssa""" +818 99 regularizer """no""" +818 99 optimizer """adadelta""" +818 99 training_loop """owa""" +818 99 negative_sampler """basic""" +818 99 evaluator """rankbased""" +818 100 dataset """kinships""" +818 100 model """transe""" +818 100 loss """nssa""" +818 100 regularizer """no""" +818 100 optimizer """adadelta""" +818 100 training_loop """owa""" +818 100 negative_sampler """basic""" +818 100 evaluator """rankbased""" +819 1 model.embedding_dim 1.0 +819 1 model.scoring_fct_norm 2.0 +819 1 optimizer.lr 0.0011688571929113938 +819 1 training.batch_size 2.0 +819 1 training.label_smoothing 0.00752684210672482 +819 2 model.embedding_dim 1.0 +819 2 model.scoring_fct_norm 2.0 +819 2 optimizer.lr 0.0018762174315525855 +819 2 training.batch_size 1.0 +819 2 training.label_smoothing 0.45675487111204394 +819 3 model.embedding_dim 2.0 +819 3 model.scoring_fct_norm 2.0 +819 3 optimizer.lr 0.009555413167255796 +819 3 training.batch_size 2.0 +819 3 training.label_smoothing 0.06436683471965082 +819 4 model.embedding_dim 2.0 +819 4 model.scoring_fct_norm 2.0 +819 4 optimizer.lr 0.0023491722818209243 +819 4 training.batch_size 1.0 +819 4 training.label_smoothing 0.0019457127679666335 +819 5 model.embedding_dim 1.0 +819 5 model.scoring_fct_norm 2.0 +819 5 optimizer.lr 0.014987706094434955 +819 5 training.batch_size 2.0 +819 5 training.label_smoothing 0.015032234433276493 +819 6 model.embedding_dim 0.0 +819 6 model.scoring_fct_norm 2.0 +819 6 optimizer.lr 0.007379875773690787 +819 6 training.batch_size 2.0 +819 6 training.label_smoothing 0.13746813331674165 +819 7 model.embedding_dim 1.0 +819 7 model.scoring_fct_norm 2.0 +819 7 optimizer.lr 0.006921083409353898 +819 7 training.batch_size 2.0 +819 7 training.label_smoothing 0.015313039542331312 +819 8 model.embedding_dim 2.0 +819 8 model.scoring_fct_norm 2.0 +819 8 optimizer.lr 0.061370423647045466 +819 8 training.batch_size 2.0 +819 8 training.label_smoothing 0.2697504036287913 +819 9 model.embedding_dim 2.0 +819 9 model.scoring_fct_norm 1.0 +819 9 optimizer.lr 0.008261005320004382 +819 9 training.batch_size 1.0 +819 9 training.label_smoothing 0.05645235709118134 +819 10 model.embedding_dim 2.0 +819 10 model.scoring_fct_norm 2.0 +819 10 optimizer.lr 0.011440118553280726 +819 10 training.batch_size 2.0 +819 10 training.label_smoothing 0.3665157757659146 +819 11 model.embedding_dim 0.0 +819 11 model.scoring_fct_norm 1.0 +819 11 optimizer.lr 0.005089627060306415 +819 11 training.batch_size 0.0 +819 11 training.label_smoothing 0.02956039047121732 +819 12 model.embedding_dim 1.0 +819 12 model.scoring_fct_norm 1.0 +819 12 optimizer.lr 0.0069142813578832755 +819 12 training.batch_size 0.0 +819 12 training.label_smoothing 0.11269620061111311 +819 13 model.embedding_dim 2.0 +819 13 model.scoring_fct_norm 1.0 +819 13 optimizer.lr 0.02167837495070326 +819 13 training.batch_size 0.0 +819 13 training.label_smoothing 0.2742256684173835 +819 14 model.embedding_dim 2.0 +819 14 model.scoring_fct_norm 2.0 +819 14 optimizer.lr 0.0128198455023542 +819 14 training.batch_size 1.0 +819 14 training.label_smoothing 0.010730891696007352 +819 15 model.embedding_dim 1.0 +819 15 model.scoring_fct_norm 1.0 +819 15 optimizer.lr 0.02140753382150367 +819 15 training.batch_size 1.0 +819 15 training.label_smoothing 0.0016722388450410922 +819 16 model.embedding_dim 1.0 +819 16 model.scoring_fct_norm 2.0 +819 16 optimizer.lr 0.01913304260749266 +819 16 training.batch_size 0.0 +819 16 training.label_smoothing 0.00124196667391414 +819 17 model.embedding_dim 2.0 +819 17 model.scoring_fct_norm 1.0 +819 17 optimizer.lr 0.016274752516864138 +819 17 training.batch_size 2.0 +819 17 training.label_smoothing 0.005527876580546724 +819 18 model.embedding_dim 1.0 +819 18 model.scoring_fct_norm 1.0 +819 18 optimizer.lr 0.05335215017625808 +819 18 training.batch_size 1.0 +819 18 training.label_smoothing 0.001233802797144778 +819 19 model.embedding_dim 2.0 +819 19 model.scoring_fct_norm 1.0 +819 19 optimizer.lr 0.0034962218510665066 +819 19 training.batch_size 1.0 +819 19 training.label_smoothing 0.0032798899468975307 +819 20 model.embedding_dim 2.0 +819 20 model.scoring_fct_norm 1.0 +819 20 optimizer.lr 0.08942857147811369 +819 20 training.batch_size 0.0 +819 20 training.label_smoothing 0.43328051006182555 +819 21 model.embedding_dim 2.0 +819 21 model.scoring_fct_norm 2.0 +819 21 optimizer.lr 0.011173693150694 +819 21 training.batch_size 2.0 +819 21 training.label_smoothing 0.005786705533030796 +819 22 model.embedding_dim 1.0 +819 22 model.scoring_fct_norm 1.0 +819 22 optimizer.lr 0.013063317467220153 +819 22 training.batch_size 0.0 +819 22 training.label_smoothing 0.20306211404263128 +819 23 model.embedding_dim 2.0 +819 23 model.scoring_fct_norm 2.0 +819 23 optimizer.lr 0.026864678653047334 +819 23 training.batch_size 1.0 +819 23 training.label_smoothing 0.0013625980312701985 +819 24 model.embedding_dim 0.0 +819 24 model.scoring_fct_norm 1.0 +819 24 optimizer.lr 0.07483892730119207 +819 24 training.batch_size 1.0 +819 24 training.label_smoothing 0.15890431747651038 +819 25 model.embedding_dim 0.0 +819 25 model.scoring_fct_norm 2.0 +819 25 optimizer.lr 0.004593872971576733 +819 25 training.batch_size 1.0 +819 25 training.label_smoothing 0.14390361236995422 +819 26 model.embedding_dim 1.0 +819 26 model.scoring_fct_norm 1.0 +819 26 optimizer.lr 0.0026302053733485583 +819 26 training.batch_size 2.0 +819 26 training.label_smoothing 0.01331895090375867 +819 27 model.embedding_dim 0.0 +819 27 model.scoring_fct_norm 1.0 +819 27 optimizer.lr 0.032416832540430304 +819 27 training.batch_size 2.0 +819 27 training.label_smoothing 0.01164372641593441 +819 28 model.embedding_dim 2.0 +819 28 model.scoring_fct_norm 2.0 +819 28 optimizer.lr 0.012175108140431878 +819 28 training.batch_size 0.0 +819 28 training.label_smoothing 0.020927999050932535 +819 29 model.embedding_dim 1.0 +819 29 model.scoring_fct_norm 1.0 +819 29 optimizer.lr 0.004521623244637214 +819 29 training.batch_size 1.0 +819 29 training.label_smoothing 0.2144323966862874 +819 30 model.embedding_dim 2.0 +819 30 model.scoring_fct_norm 1.0 +819 30 optimizer.lr 0.04002678318791054 +819 30 training.batch_size 0.0 +819 30 training.label_smoothing 0.08736549031204659 +819 31 model.embedding_dim 2.0 +819 31 model.scoring_fct_norm 1.0 +819 31 optimizer.lr 0.00848757111951625 +819 31 training.batch_size 1.0 +819 31 training.label_smoothing 0.4431803699388669 +819 32 model.embedding_dim 1.0 +819 32 model.scoring_fct_norm 2.0 +819 32 optimizer.lr 0.012981975107148868 +819 32 training.batch_size 1.0 +819 32 training.label_smoothing 0.0010144325059311822 +819 33 model.embedding_dim 1.0 +819 33 model.scoring_fct_norm 1.0 +819 33 optimizer.lr 0.006081135504506888 +819 33 training.batch_size 2.0 +819 33 training.label_smoothing 0.8127598674273687 +819 34 model.embedding_dim 2.0 +819 34 model.scoring_fct_norm 2.0 +819 34 optimizer.lr 0.09509223851960867 +819 34 training.batch_size 1.0 +819 34 training.label_smoothing 0.01471990792632144 +819 35 model.embedding_dim 0.0 +819 35 model.scoring_fct_norm 2.0 +819 35 optimizer.lr 0.036973578933414054 +819 35 training.batch_size 1.0 +819 35 training.label_smoothing 0.008084053222009353 +819 36 model.embedding_dim 2.0 +819 36 model.scoring_fct_norm 1.0 +819 36 optimizer.lr 0.0011300023231109485 +819 36 training.batch_size 2.0 +819 36 training.label_smoothing 0.6035459091030393 +819 37 model.embedding_dim 2.0 +819 37 model.scoring_fct_norm 1.0 +819 37 optimizer.lr 0.00328753624855312 +819 37 training.batch_size 0.0 +819 37 training.label_smoothing 0.08719260121913372 +819 38 model.embedding_dim 2.0 +819 38 model.scoring_fct_norm 2.0 +819 38 optimizer.lr 0.015174232261743567 +819 38 training.batch_size 0.0 +819 38 training.label_smoothing 0.003928693454328118 +819 39 model.embedding_dim 2.0 +819 39 model.scoring_fct_norm 2.0 +819 39 optimizer.lr 0.002618586676458315 +819 39 training.batch_size 1.0 +819 39 training.label_smoothing 0.06435831763456235 +819 40 model.embedding_dim 0.0 +819 40 model.scoring_fct_norm 2.0 +819 40 optimizer.lr 0.018797353629151255 +819 40 training.batch_size 0.0 +819 40 training.label_smoothing 0.08874809600411342 +819 41 model.embedding_dim 2.0 +819 41 model.scoring_fct_norm 2.0 +819 41 optimizer.lr 0.00803077029512673 +819 41 training.batch_size 1.0 +819 41 training.label_smoothing 0.006102657571123486 +819 42 model.embedding_dim 0.0 +819 42 model.scoring_fct_norm 2.0 +819 42 optimizer.lr 0.01570177504363224 +819 42 training.batch_size 2.0 +819 42 training.label_smoothing 0.0028215407707345053 +819 43 model.embedding_dim 2.0 +819 43 model.scoring_fct_norm 1.0 +819 43 optimizer.lr 0.04589920211868061 +819 43 training.batch_size 0.0 +819 43 training.label_smoothing 0.0035574672542994694 +819 44 model.embedding_dim 1.0 +819 44 model.scoring_fct_norm 1.0 +819 44 optimizer.lr 0.012638844813388436 +819 44 training.batch_size 0.0 +819 44 training.label_smoothing 0.15494055714650248 +819 45 model.embedding_dim 1.0 +819 45 model.scoring_fct_norm 1.0 +819 45 optimizer.lr 0.0019930339168336814 +819 45 training.batch_size 1.0 +819 45 training.label_smoothing 0.0029513974270433336 +819 46 model.embedding_dim 1.0 +819 46 model.scoring_fct_norm 2.0 +819 46 optimizer.lr 0.003510301116592324 +819 46 training.batch_size 0.0 +819 46 training.label_smoothing 0.13719799497559323 +819 47 model.embedding_dim 1.0 +819 47 model.scoring_fct_norm 1.0 +819 47 optimizer.lr 0.007703123126895796 +819 47 training.batch_size 0.0 +819 47 training.label_smoothing 0.011791797741981745 +819 48 model.embedding_dim 0.0 +819 48 model.scoring_fct_norm 2.0 +819 48 optimizer.lr 0.0472295285410564 +819 48 training.batch_size 2.0 +819 48 training.label_smoothing 0.9812151038667961 +819 49 model.embedding_dim 0.0 +819 49 model.scoring_fct_norm 2.0 +819 49 optimizer.lr 0.0652820241952816 +819 49 training.batch_size 2.0 +819 49 training.label_smoothing 0.0056346537237663905 +819 50 model.embedding_dim 0.0 +819 50 model.scoring_fct_norm 1.0 +819 50 optimizer.lr 0.003698062504583531 +819 50 training.batch_size 2.0 +819 50 training.label_smoothing 0.08400791924248933 +819 51 model.embedding_dim 0.0 +819 51 model.scoring_fct_norm 1.0 +819 51 optimizer.lr 0.004675695888099212 +819 51 training.batch_size 2.0 +819 51 training.label_smoothing 0.28090753576482785 +819 52 model.embedding_dim 1.0 +819 52 model.scoring_fct_norm 1.0 +819 52 optimizer.lr 0.001996684056707696 +819 52 training.batch_size 2.0 +819 52 training.label_smoothing 0.00456793618947155 +819 53 model.embedding_dim 0.0 +819 53 model.scoring_fct_norm 2.0 +819 53 optimizer.lr 0.0028264669776065913 +819 53 training.batch_size 2.0 +819 53 training.label_smoothing 0.1449510356894556 +819 54 model.embedding_dim 0.0 +819 54 model.scoring_fct_norm 1.0 +819 54 optimizer.lr 0.02432177564859244 +819 54 training.batch_size 1.0 +819 54 training.label_smoothing 0.022598907176924272 +819 55 model.embedding_dim 2.0 +819 55 model.scoring_fct_norm 2.0 +819 55 optimizer.lr 0.005870956469357394 +819 55 training.batch_size 0.0 +819 55 training.label_smoothing 0.009163727621147129 +819 56 model.embedding_dim 1.0 +819 56 model.scoring_fct_norm 2.0 +819 56 optimizer.lr 0.07150069281911813 +819 56 training.batch_size 2.0 +819 56 training.label_smoothing 0.021937055788419858 +819 57 model.embedding_dim 0.0 +819 57 model.scoring_fct_norm 1.0 +819 57 optimizer.lr 0.05482519754502187 +819 57 training.batch_size 2.0 +819 57 training.label_smoothing 0.006009159811036206 +819 58 model.embedding_dim 2.0 +819 58 model.scoring_fct_norm 2.0 +819 58 optimizer.lr 0.001274152610455691 +819 58 training.batch_size 0.0 +819 58 training.label_smoothing 0.013389589623647105 +819 59 model.embedding_dim 2.0 +819 59 model.scoring_fct_norm 1.0 +819 59 optimizer.lr 0.011619688350295993 +819 59 training.batch_size 1.0 +819 59 training.label_smoothing 0.8146349261155682 +819 60 model.embedding_dim 1.0 +819 60 model.scoring_fct_norm 2.0 +819 60 optimizer.lr 0.08790027713261829 +819 60 training.batch_size 1.0 +819 60 training.label_smoothing 0.4822930751790632 +819 61 model.embedding_dim 0.0 +819 61 model.scoring_fct_norm 2.0 +819 61 optimizer.lr 0.002754572255458387 +819 61 training.batch_size 2.0 +819 61 training.label_smoothing 0.2780135251511261 +819 62 model.embedding_dim 2.0 +819 62 model.scoring_fct_norm 2.0 +819 62 optimizer.lr 0.011675041179253928 +819 62 training.batch_size 0.0 +819 62 training.label_smoothing 0.009091261727736753 +819 63 model.embedding_dim 2.0 +819 63 model.scoring_fct_norm 2.0 +819 63 optimizer.lr 0.07203597080525626 +819 63 training.batch_size 0.0 +819 63 training.label_smoothing 0.005842315254570499 +819 64 model.embedding_dim 1.0 +819 64 model.scoring_fct_norm 2.0 +819 64 optimizer.lr 0.07819894549018347 +819 64 training.batch_size 1.0 +819 64 training.label_smoothing 0.0019088746549369773 +819 65 model.embedding_dim 0.0 +819 65 model.scoring_fct_norm 2.0 +819 65 optimizer.lr 0.05500698887079732 +819 65 training.batch_size 2.0 +819 65 training.label_smoothing 0.28346035117700286 +819 66 model.embedding_dim 0.0 +819 66 model.scoring_fct_norm 2.0 +819 66 optimizer.lr 0.06220165276550549 +819 66 training.batch_size 0.0 +819 66 training.label_smoothing 0.01836430760343861 +819 67 model.embedding_dim 0.0 +819 67 model.scoring_fct_norm 1.0 +819 67 optimizer.lr 0.0025971084805349333 +819 67 training.batch_size 1.0 +819 67 training.label_smoothing 0.015293624569961426 +819 68 model.embedding_dim 1.0 +819 68 model.scoring_fct_norm 1.0 +819 68 optimizer.lr 0.010207926120627727 +819 68 training.batch_size 2.0 +819 68 training.label_smoothing 0.04136270037312598 +819 69 model.embedding_dim 1.0 +819 69 model.scoring_fct_norm 2.0 +819 69 optimizer.lr 0.0022151613937674472 +819 69 training.batch_size 1.0 +819 69 training.label_smoothing 0.025944380779782887 +819 70 model.embedding_dim 0.0 +819 70 model.scoring_fct_norm 2.0 +819 70 optimizer.lr 0.06095071772173325 +819 70 training.batch_size 1.0 +819 70 training.label_smoothing 0.5547566339066097 +819 71 model.embedding_dim 1.0 +819 71 model.scoring_fct_norm 2.0 +819 71 optimizer.lr 0.0022118718734055525 +819 71 training.batch_size 1.0 +819 71 training.label_smoothing 0.7279880099043615 +819 72 model.embedding_dim 0.0 +819 72 model.scoring_fct_norm 2.0 +819 72 optimizer.lr 0.012557570441588164 +819 72 training.batch_size 2.0 +819 72 training.label_smoothing 0.0985887844851156 +819 73 model.embedding_dim 2.0 +819 73 model.scoring_fct_norm 1.0 +819 73 optimizer.lr 0.0019774211806729137 +819 73 training.batch_size 1.0 +819 73 training.label_smoothing 0.0012419454759682866 +819 74 model.embedding_dim 1.0 +819 74 model.scoring_fct_norm 1.0 +819 74 optimizer.lr 0.0019267122446072777 +819 74 training.batch_size 2.0 +819 74 training.label_smoothing 0.522950002620419 +819 75 model.embedding_dim 1.0 +819 75 model.scoring_fct_norm 2.0 +819 75 optimizer.lr 0.013987192596431545 +819 75 training.batch_size 0.0 +819 75 training.label_smoothing 0.004703031603992077 +819 76 model.embedding_dim 0.0 +819 76 model.scoring_fct_norm 2.0 +819 76 optimizer.lr 0.002277578368175809 +819 76 training.batch_size 0.0 +819 76 training.label_smoothing 0.03910882509743253 +819 77 model.embedding_dim 2.0 +819 77 model.scoring_fct_norm 2.0 +819 77 optimizer.lr 0.003405360099058084 +819 77 training.batch_size 1.0 +819 77 training.label_smoothing 0.7099035887123922 +819 78 model.embedding_dim 1.0 +819 78 model.scoring_fct_norm 2.0 +819 78 optimizer.lr 0.003291982304089906 +819 78 training.batch_size 2.0 +819 78 training.label_smoothing 0.8732962101921259 +819 79 model.embedding_dim 2.0 +819 79 model.scoring_fct_norm 1.0 +819 79 optimizer.lr 0.011330810508281195 +819 79 training.batch_size 1.0 +819 79 training.label_smoothing 0.0012847361879086854 +819 80 model.embedding_dim 1.0 +819 80 model.scoring_fct_norm 2.0 +819 80 optimizer.lr 0.003321775896308767 +819 80 training.batch_size 1.0 +819 80 training.label_smoothing 0.0011416440947914495 +819 81 model.embedding_dim 0.0 +819 81 model.scoring_fct_norm 2.0 +819 81 optimizer.lr 0.08355549790210033 +819 81 training.batch_size 0.0 +819 81 training.label_smoothing 0.0040691963154752 +819 82 model.embedding_dim 0.0 +819 82 model.scoring_fct_norm 2.0 +819 82 optimizer.lr 0.0012255911276912697 +819 82 training.batch_size 0.0 +819 82 training.label_smoothing 0.07149874817106318 +819 83 model.embedding_dim 2.0 +819 83 model.scoring_fct_norm 2.0 +819 83 optimizer.lr 0.03308825595412098 +819 83 training.batch_size 2.0 +819 83 training.label_smoothing 0.015501007449571442 +819 84 model.embedding_dim 0.0 +819 84 model.scoring_fct_norm 2.0 +819 84 optimizer.lr 0.005319595733019315 +819 84 training.batch_size 0.0 +819 84 training.label_smoothing 0.9149713972896995 +819 85 model.embedding_dim 2.0 +819 85 model.scoring_fct_norm 2.0 +819 85 optimizer.lr 0.0024566269428396626 +819 85 training.batch_size 2.0 +819 85 training.label_smoothing 0.07834527519768258 +819 86 model.embedding_dim 2.0 +819 86 model.scoring_fct_norm 2.0 +819 86 optimizer.lr 0.056605389282500126 +819 86 training.batch_size 2.0 +819 86 training.label_smoothing 0.0031158962697315205 +819 87 model.embedding_dim 2.0 +819 87 model.scoring_fct_norm 2.0 +819 87 optimizer.lr 0.036856031482074246 +819 87 training.batch_size 2.0 +819 87 training.label_smoothing 0.00326829023035635 +819 88 model.embedding_dim 1.0 +819 88 model.scoring_fct_norm 1.0 +819 88 optimizer.lr 0.0016885802545044228 +819 88 training.batch_size 2.0 +819 88 training.label_smoothing 0.06098326590383102 +819 89 model.embedding_dim 0.0 +819 89 model.scoring_fct_norm 2.0 +819 89 optimizer.lr 0.006974389189953807 +819 89 training.batch_size 0.0 +819 89 training.label_smoothing 0.0012358482901922258 +819 90 model.embedding_dim 2.0 +819 90 model.scoring_fct_norm 2.0 +819 90 optimizer.lr 0.001050463395108159 +819 90 training.batch_size 1.0 +819 90 training.label_smoothing 0.005926957362996259 +819 91 model.embedding_dim 0.0 +819 91 model.scoring_fct_norm 2.0 +819 91 optimizer.lr 0.017240995874572777 +819 91 training.batch_size 0.0 +819 91 training.label_smoothing 0.00911110113133029 +819 92 model.embedding_dim 1.0 +819 92 model.scoring_fct_norm 1.0 +819 92 optimizer.lr 0.09245605380001724 +819 92 training.batch_size 1.0 +819 92 training.label_smoothing 0.002740982073690993 +819 93 model.embedding_dim 1.0 +819 93 model.scoring_fct_norm 2.0 +819 93 optimizer.lr 0.0010193861139844137 +819 93 training.batch_size 0.0 +819 93 training.label_smoothing 0.13342549270208554 +819 94 model.embedding_dim 1.0 +819 94 model.scoring_fct_norm 1.0 +819 94 optimizer.lr 0.01568547818505405 +819 94 training.batch_size 1.0 +819 94 training.label_smoothing 0.006956157056623051 +819 95 model.embedding_dim 2.0 +819 95 model.scoring_fct_norm 2.0 +819 95 optimizer.lr 0.046401124354831694 +819 95 training.batch_size 1.0 +819 95 training.label_smoothing 0.012059061590524214 +819 96 model.embedding_dim 2.0 +819 96 model.scoring_fct_norm 2.0 +819 96 optimizer.lr 0.018609722659713723 +819 96 training.batch_size 1.0 +819 96 training.label_smoothing 0.20233085433388168 +819 97 model.embedding_dim 2.0 +819 97 model.scoring_fct_norm 2.0 +819 97 optimizer.lr 0.006106641720295917 +819 97 training.batch_size 0.0 +819 97 training.label_smoothing 0.01719130292903393 +819 98 model.embedding_dim 0.0 +819 98 model.scoring_fct_norm 1.0 +819 98 optimizer.lr 0.041652698976504934 +819 98 training.batch_size 1.0 +819 98 training.label_smoothing 0.009374482111288615 +819 99 model.embedding_dim 0.0 +819 99 model.scoring_fct_norm 2.0 +819 99 optimizer.lr 0.03220041219470774 +819 99 training.batch_size 1.0 +819 99 training.label_smoothing 0.0015771909974018063 +819 100 model.embedding_dim 0.0 +819 100 model.scoring_fct_norm 2.0 +819 100 optimizer.lr 0.006609342255593892 +819 100 training.batch_size 1.0 +819 100 training.label_smoothing 0.34265186739055614 +819 1 dataset """kinships""" +819 1 model """transe""" +819 1 loss """bceaftersigmoid""" +819 1 regularizer """no""" +819 1 optimizer """adam""" +819 1 training_loop """lcwa""" +819 1 evaluator """rankbased""" +819 2 dataset """kinships""" +819 2 model """transe""" +819 2 loss """bceaftersigmoid""" +819 2 regularizer """no""" +819 2 optimizer """adam""" +819 2 training_loop """lcwa""" +819 2 evaluator """rankbased""" +819 3 dataset """kinships""" +819 3 model """transe""" +819 3 loss """bceaftersigmoid""" +819 3 regularizer """no""" +819 3 optimizer """adam""" +819 3 training_loop """lcwa""" +819 3 evaluator """rankbased""" +819 4 dataset """kinships""" +819 4 model """transe""" +819 4 loss """bceaftersigmoid""" +819 4 regularizer """no""" +819 4 optimizer """adam""" +819 4 training_loop """lcwa""" +819 4 evaluator """rankbased""" +819 5 dataset """kinships""" +819 5 model """transe""" +819 5 loss """bceaftersigmoid""" +819 5 regularizer """no""" +819 5 optimizer """adam""" +819 5 training_loop """lcwa""" +819 5 evaluator """rankbased""" +819 6 dataset """kinships""" +819 6 model """transe""" +819 6 loss """bceaftersigmoid""" +819 6 regularizer """no""" +819 6 optimizer """adam""" +819 6 training_loop """lcwa""" +819 6 evaluator """rankbased""" +819 7 dataset """kinships""" +819 7 model """transe""" +819 7 loss """bceaftersigmoid""" +819 7 regularizer """no""" +819 7 optimizer """adam""" +819 7 training_loop """lcwa""" +819 7 evaluator """rankbased""" +819 8 dataset """kinships""" +819 8 model """transe""" +819 8 loss """bceaftersigmoid""" +819 8 regularizer """no""" +819 8 optimizer """adam""" +819 8 training_loop """lcwa""" +819 8 evaluator """rankbased""" +819 9 dataset """kinships""" +819 9 model """transe""" +819 9 loss """bceaftersigmoid""" +819 9 regularizer """no""" +819 9 optimizer """adam""" +819 9 training_loop """lcwa""" +819 9 evaluator """rankbased""" +819 10 dataset """kinships""" +819 10 model """transe""" +819 10 loss """bceaftersigmoid""" +819 10 regularizer """no""" +819 10 optimizer """adam""" +819 10 training_loop """lcwa""" +819 10 evaluator """rankbased""" +819 11 dataset """kinships""" +819 11 model """transe""" +819 11 loss """bceaftersigmoid""" +819 11 regularizer """no""" +819 11 optimizer """adam""" +819 11 training_loop """lcwa""" +819 11 evaluator """rankbased""" +819 12 dataset """kinships""" +819 12 model """transe""" +819 12 loss """bceaftersigmoid""" +819 12 regularizer """no""" +819 12 optimizer """adam""" +819 12 training_loop """lcwa""" +819 12 evaluator """rankbased""" +819 13 dataset """kinships""" +819 13 model """transe""" +819 13 loss """bceaftersigmoid""" +819 13 regularizer """no""" +819 13 optimizer """adam""" +819 13 training_loop """lcwa""" +819 13 evaluator """rankbased""" +819 14 dataset """kinships""" +819 14 model """transe""" +819 14 loss """bceaftersigmoid""" +819 14 regularizer """no""" +819 14 optimizer """adam""" +819 14 training_loop """lcwa""" +819 14 evaluator """rankbased""" +819 15 dataset """kinships""" +819 15 model """transe""" +819 15 loss """bceaftersigmoid""" +819 15 regularizer """no""" +819 15 optimizer """adam""" +819 15 training_loop """lcwa""" +819 15 evaluator """rankbased""" +819 16 dataset """kinships""" +819 16 model """transe""" +819 16 loss """bceaftersigmoid""" +819 16 regularizer """no""" +819 16 optimizer """adam""" +819 16 training_loop """lcwa""" +819 16 evaluator """rankbased""" +819 17 dataset """kinships""" +819 17 model """transe""" +819 17 loss """bceaftersigmoid""" +819 17 regularizer """no""" +819 17 optimizer """adam""" +819 17 training_loop """lcwa""" +819 17 evaluator """rankbased""" +819 18 dataset """kinships""" +819 18 model """transe""" +819 18 loss """bceaftersigmoid""" +819 18 regularizer """no""" +819 18 optimizer """adam""" +819 18 training_loop """lcwa""" +819 18 evaluator """rankbased""" +819 19 dataset """kinships""" +819 19 model """transe""" +819 19 loss """bceaftersigmoid""" +819 19 regularizer """no""" +819 19 optimizer """adam""" +819 19 training_loop """lcwa""" +819 19 evaluator """rankbased""" +819 20 dataset """kinships""" +819 20 model """transe""" +819 20 loss """bceaftersigmoid""" +819 20 regularizer """no""" +819 20 optimizer """adam""" +819 20 training_loop """lcwa""" +819 20 evaluator """rankbased""" +819 21 dataset """kinships""" +819 21 model """transe""" +819 21 loss """bceaftersigmoid""" +819 21 regularizer """no""" +819 21 optimizer """adam""" +819 21 training_loop """lcwa""" +819 21 evaluator """rankbased""" +819 22 dataset """kinships""" +819 22 model """transe""" +819 22 loss """bceaftersigmoid""" +819 22 regularizer """no""" +819 22 optimizer """adam""" +819 22 training_loop """lcwa""" +819 22 evaluator """rankbased""" +819 23 dataset """kinships""" +819 23 model """transe""" +819 23 loss """bceaftersigmoid""" +819 23 regularizer """no""" +819 23 optimizer """adam""" +819 23 training_loop """lcwa""" +819 23 evaluator """rankbased""" +819 24 dataset """kinships""" +819 24 model """transe""" +819 24 loss """bceaftersigmoid""" +819 24 regularizer """no""" +819 24 optimizer """adam""" +819 24 training_loop """lcwa""" +819 24 evaluator """rankbased""" +819 25 dataset """kinships""" +819 25 model """transe""" +819 25 loss """bceaftersigmoid""" +819 25 regularizer """no""" +819 25 optimizer """adam""" +819 25 training_loop """lcwa""" +819 25 evaluator """rankbased""" +819 26 dataset """kinships""" +819 26 model """transe""" +819 26 loss """bceaftersigmoid""" +819 26 regularizer """no""" +819 26 optimizer """adam""" +819 26 training_loop """lcwa""" +819 26 evaluator """rankbased""" +819 27 dataset """kinships""" +819 27 model """transe""" +819 27 loss """bceaftersigmoid""" +819 27 regularizer """no""" +819 27 optimizer """adam""" +819 27 training_loop """lcwa""" +819 27 evaluator """rankbased""" +819 28 dataset """kinships""" +819 28 model """transe""" +819 28 loss """bceaftersigmoid""" +819 28 regularizer """no""" +819 28 optimizer """adam""" +819 28 training_loop """lcwa""" +819 28 evaluator """rankbased""" +819 29 dataset """kinships""" +819 29 model """transe""" +819 29 loss """bceaftersigmoid""" +819 29 regularizer """no""" +819 29 optimizer """adam""" +819 29 training_loop """lcwa""" +819 29 evaluator """rankbased""" +819 30 dataset """kinships""" +819 30 model """transe""" +819 30 loss """bceaftersigmoid""" +819 30 regularizer """no""" +819 30 optimizer """adam""" +819 30 training_loop """lcwa""" +819 30 evaluator """rankbased""" +819 31 dataset """kinships""" +819 31 model """transe""" +819 31 loss """bceaftersigmoid""" +819 31 regularizer """no""" +819 31 optimizer """adam""" +819 31 training_loop """lcwa""" +819 31 evaluator """rankbased""" +819 32 dataset """kinships""" +819 32 model """transe""" +819 32 loss """bceaftersigmoid""" +819 32 regularizer """no""" +819 32 optimizer """adam""" +819 32 training_loop """lcwa""" +819 32 evaluator """rankbased""" +819 33 dataset """kinships""" +819 33 model """transe""" +819 33 loss """bceaftersigmoid""" +819 33 regularizer """no""" +819 33 optimizer """adam""" +819 33 training_loop """lcwa""" +819 33 evaluator """rankbased""" +819 34 dataset """kinships""" +819 34 model """transe""" +819 34 loss """bceaftersigmoid""" +819 34 regularizer """no""" +819 34 optimizer """adam""" +819 34 training_loop """lcwa""" +819 34 evaluator """rankbased""" +819 35 dataset """kinships""" +819 35 model """transe""" +819 35 loss """bceaftersigmoid""" +819 35 regularizer """no""" +819 35 optimizer """adam""" +819 35 training_loop """lcwa""" +819 35 evaluator """rankbased""" +819 36 dataset """kinships""" +819 36 model """transe""" +819 36 loss """bceaftersigmoid""" +819 36 regularizer """no""" +819 36 optimizer """adam""" +819 36 training_loop """lcwa""" +819 36 evaluator """rankbased""" +819 37 dataset """kinships""" +819 37 model """transe""" +819 37 loss """bceaftersigmoid""" +819 37 regularizer """no""" +819 37 optimizer """adam""" +819 37 training_loop """lcwa""" +819 37 evaluator """rankbased""" +819 38 dataset """kinships""" +819 38 model """transe""" +819 38 loss """bceaftersigmoid""" +819 38 regularizer """no""" +819 38 optimizer """adam""" +819 38 training_loop """lcwa""" +819 38 evaluator """rankbased""" +819 39 dataset """kinships""" +819 39 model """transe""" +819 39 loss """bceaftersigmoid""" +819 39 regularizer """no""" +819 39 optimizer """adam""" +819 39 training_loop """lcwa""" +819 39 evaluator """rankbased""" +819 40 dataset """kinships""" +819 40 model """transe""" +819 40 loss """bceaftersigmoid""" +819 40 regularizer """no""" +819 40 optimizer """adam""" +819 40 training_loop """lcwa""" +819 40 evaluator """rankbased""" +819 41 dataset """kinships""" +819 41 model """transe""" +819 41 loss """bceaftersigmoid""" +819 41 regularizer """no""" +819 41 optimizer """adam""" +819 41 training_loop """lcwa""" +819 41 evaluator """rankbased""" +819 42 dataset """kinships""" +819 42 model """transe""" +819 42 loss """bceaftersigmoid""" +819 42 regularizer """no""" +819 42 optimizer """adam""" +819 42 training_loop """lcwa""" +819 42 evaluator """rankbased""" +819 43 dataset """kinships""" +819 43 model """transe""" +819 43 loss """bceaftersigmoid""" +819 43 regularizer """no""" +819 43 optimizer """adam""" +819 43 training_loop """lcwa""" +819 43 evaluator """rankbased""" +819 44 dataset """kinships""" +819 44 model """transe""" +819 44 loss """bceaftersigmoid""" +819 44 regularizer """no""" +819 44 optimizer """adam""" +819 44 training_loop """lcwa""" +819 44 evaluator """rankbased""" +819 45 dataset """kinships""" +819 45 model """transe""" +819 45 loss """bceaftersigmoid""" +819 45 regularizer """no""" +819 45 optimizer """adam""" +819 45 training_loop """lcwa""" +819 45 evaluator """rankbased""" +819 46 dataset """kinships""" +819 46 model """transe""" +819 46 loss """bceaftersigmoid""" +819 46 regularizer """no""" +819 46 optimizer """adam""" +819 46 training_loop """lcwa""" +819 46 evaluator """rankbased""" +819 47 dataset """kinships""" +819 47 model """transe""" +819 47 loss """bceaftersigmoid""" +819 47 regularizer """no""" +819 47 optimizer """adam""" +819 47 training_loop """lcwa""" +819 47 evaluator """rankbased""" +819 48 dataset """kinships""" +819 48 model """transe""" +819 48 loss """bceaftersigmoid""" +819 48 regularizer """no""" +819 48 optimizer """adam""" +819 48 training_loop """lcwa""" +819 48 evaluator """rankbased""" +819 49 dataset """kinships""" +819 49 model """transe""" +819 49 loss """bceaftersigmoid""" +819 49 regularizer """no""" +819 49 optimizer """adam""" +819 49 training_loop """lcwa""" +819 49 evaluator """rankbased""" +819 50 dataset """kinships""" +819 50 model """transe""" +819 50 loss """bceaftersigmoid""" +819 50 regularizer """no""" +819 50 optimizer """adam""" +819 50 training_loop """lcwa""" +819 50 evaluator """rankbased""" +819 51 dataset """kinships""" +819 51 model """transe""" +819 51 loss """bceaftersigmoid""" +819 51 regularizer """no""" +819 51 optimizer """adam""" +819 51 training_loop """lcwa""" +819 51 evaluator """rankbased""" +819 52 dataset """kinships""" +819 52 model """transe""" +819 52 loss """bceaftersigmoid""" +819 52 regularizer """no""" +819 52 optimizer """adam""" +819 52 training_loop """lcwa""" +819 52 evaluator """rankbased""" +819 53 dataset """kinships""" +819 53 model """transe""" +819 53 loss """bceaftersigmoid""" +819 53 regularizer """no""" +819 53 optimizer """adam""" +819 53 training_loop """lcwa""" +819 53 evaluator """rankbased""" +819 54 dataset """kinships""" +819 54 model """transe""" +819 54 loss """bceaftersigmoid""" +819 54 regularizer """no""" +819 54 optimizer """adam""" +819 54 training_loop """lcwa""" +819 54 evaluator """rankbased""" +819 55 dataset """kinships""" +819 55 model """transe""" +819 55 loss """bceaftersigmoid""" +819 55 regularizer """no""" +819 55 optimizer """adam""" +819 55 training_loop """lcwa""" +819 55 evaluator """rankbased""" +819 56 dataset """kinships""" +819 56 model """transe""" +819 56 loss """bceaftersigmoid""" +819 56 regularizer """no""" +819 56 optimizer """adam""" +819 56 training_loop """lcwa""" +819 56 evaluator """rankbased""" +819 57 dataset """kinships""" +819 57 model """transe""" +819 57 loss """bceaftersigmoid""" +819 57 regularizer """no""" +819 57 optimizer """adam""" +819 57 training_loop """lcwa""" +819 57 evaluator """rankbased""" +819 58 dataset """kinships""" +819 58 model """transe""" +819 58 loss """bceaftersigmoid""" +819 58 regularizer """no""" +819 58 optimizer """adam""" +819 58 training_loop """lcwa""" +819 58 evaluator """rankbased""" +819 59 dataset """kinships""" +819 59 model """transe""" +819 59 loss """bceaftersigmoid""" +819 59 regularizer """no""" +819 59 optimizer """adam""" +819 59 training_loop """lcwa""" +819 59 evaluator """rankbased""" +819 60 dataset """kinships""" +819 60 model """transe""" +819 60 loss """bceaftersigmoid""" +819 60 regularizer """no""" +819 60 optimizer """adam""" +819 60 training_loop """lcwa""" +819 60 evaluator """rankbased""" +819 61 dataset """kinships""" +819 61 model """transe""" +819 61 loss """bceaftersigmoid""" +819 61 regularizer """no""" +819 61 optimizer """adam""" +819 61 training_loop """lcwa""" +819 61 evaluator """rankbased""" +819 62 dataset """kinships""" +819 62 model """transe""" +819 62 loss """bceaftersigmoid""" +819 62 regularizer """no""" +819 62 optimizer """adam""" +819 62 training_loop """lcwa""" +819 62 evaluator """rankbased""" +819 63 dataset """kinships""" +819 63 model """transe""" +819 63 loss """bceaftersigmoid""" +819 63 regularizer """no""" +819 63 optimizer """adam""" +819 63 training_loop """lcwa""" +819 63 evaluator """rankbased""" +819 64 dataset """kinships""" +819 64 model """transe""" +819 64 loss """bceaftersigmoid""" +819 64 regularizer """no""" +819 64 optimizer """adam""" +819 64 training_loop """lcwa""" +819 64 evaluator """rankbased""" +819 65 dataset """kinships""" +819 65 model """transe""" +819 65 loss """bceaftersigmoid""" +819 65 regularizer """no""" +819 65 optimizer """adam""" +819 65 training_loop """lcwa""" +819 65 evaluator """rankbased""" +819 66 dataset """kinships""" +819 66 model """transe""" +819 66 loss """bceaftersigmoid""" +819 66 regularizer """no""" +819 66 optimizer """adam""" +819 66 training_loop """lcwa""" +819 66 evaluator """rankbased""" +819 67 dataset """kinships""" +819 67 model """transe""" +819 67 loss """bceaftersigmoid""" +819 67 regularizer """no""" +819 67 optimizer """adam""" +819 67 training_loop """lcwa""" +819 67 evaluator """rankbased""" +819 68 dataset """kinships""" +819 68 model """transe""" +819 68 loss """bceaftersigmoid""" +819 68 regularizer """no""" +819 68 optimizer """adam""" +819 68 training_loop """lcwa""" +819 68 evaluator """rankbased""" +819 69 dataset """kinships""" +819 69 model """transe""" +819 69 loss """bceaftersigmoid""" +819 69 regularizer """no""" +819 69 optimizer """adam""" +819 69 training_loop """lcwa""" +819 69 evaluator """rankbased""" +819 70 dataset """kinships""" +819 70 model """transe""" +819 70 loss """bceaftersigmoid""" +819 70 regularizer """no""" +819 70 optimizer """adam""" +819 70 training_loop """lcwa""" +819 70 evaluator """rankbased""" +819 71 dataset """kinships""" +819 71 model """transe""" +819 71 loss """bceaftersigmoid""" +819 71 regularizer """no""" +819 71 optimizer """adam""" +819 71 training_loop """lcwa""" +819 71 evaluator """rankbased""" +819 72 dataset """kinships""" +819 72 model """transe""" +819 72 loss """bceaftersigmoid""" +819 72 regularizer """no""" +819 72 optimizer """adam""" +819 72 training_loop """lcwa""" +819 72 evaluator """rankbased""" +819 73 dataset """kinships""" +819 73 model """transe""" +819 73 loss """bceaftersigmoid""" +819 73 regularizer """no""" +819 73 optimizer """adam""" +819 73 training_loop """lcwa""" +819 73 evaluator """rankbased""" +819 74 dataset """kinships""" +819 74 model """transe""" +819 74 loss """bceaftersigmoid""" +819 74 regularizer """no""" +819 74 optimizer """adam""" +819 74 training_loop """lcwa""" +819 74 evaluator """rankbased""" +819 75 dataset """kinships""" +819 75 model """transe""" +819 75 loss """bceaftersigmoid""" +819 75 regularizer """no""" +819 75 optimizer """adam""" +819 75 training_loop """lcwa""" +819 75 evaluator """rankbased""" +819 76 dataset """kinships""" +819 76 model """transe""" +819 76 loss """bceaftersigmoid""" +819 76 regularizer """no""" +819 76 optimizer """adam""" +819 76 training_loop """lcwa""" +819 76 evaluator """rankbased""" +819 77 dataset """kinships""" +819 77 model """transe""" +819 77 loss """bceaftersigmoid""" +819 77 regularizer """no""" +819 77 optimizer """adam""" +819 77 training_loop """lcwa""" +819 77 evaluator """rankbased""" +819 78 dataset """kinships""" +819 78 model """transe""" +819 78 loss """bceaftersigmoid""" +819 78 regularizer """no""" +819 78 optimizer """adam""" +819 78 training_loop """lcwa""" +819 78 evaluator """rankbased""" +819 79 dataset """kinships""" +819 79 model """transe""" +819 79 loss """bceaftersigmoid""" +819 79 regularizer """no""" +819 79 optimizer """adam""" +819 79 training_loop """lcwa""" +819 79 evaluator """rankbased""" +819 80 dataset """kinships""" +819 80 model """transe""" +819 80 loss """bceaftersigmoid""" +819 80 regularizer """no""" +819 80 optimizer """adam""" +819 80 training_loop """lcwa""" +819 80 evaluator """rankbased""" +819 81 dataset """kinships""" +819 81 model """transe""" +819 81 loss """bceaftersigmoid""" +819 81 regularizer """no""" +819 81 optimizer """adam""" +819 81 training_loop """lcwa""" +819 81 evaluator """rankbased""" +819 82 dataset """kinships""" +819 82 model """transe""" +819 82 loss """bceaftersigmoid""" +819 82 regularizer """no""" +819 82 optimizer """adam""" +819 82 training_loop """lcwa""" +819 82 evaluator """rankbased""" +819 83 dataset """kinships""" +819 83 model """transe""" +819 83 loss """bceaftersigmoid""" +819 83 regularizer """no""" +819 83 optimizer """adam""" +819 83 training_loop """lcwa""" +819 83 evaluator """rankbased""" +819 84 dataset """kinships""" +819 84 model """transe""" +819 84 loss """bceaftersigmoid""" +819 84 regularizer """no""" +819 84 optimizer """adam""" +819 84 training_loop """lcwa""" +819 84 evaluator """rankbased""" +819 85 dataset """kinships""" +819 85 model """transe""" +819 85 loss """bceaftersigmoid""" +819 85 regularizer """no""" +819 85 optimizer """adam""" +819 85 training_loop """lcwa""" +819 85 evaluator """rankbased""" +819 86 dataset """kinships""" +819 86 model """transe""" +819 86 loss """bceaftersigmoid""" +819 86 regularizer """no""" +819 86 optimizer """adam""" +819 86 training_loop """lcwa""" +819 86 evaluator """rankbased""" +819 87 dataset """kinships""" +819 87 model """transe""" +819 87 loss """bceaftersigmoid""" +819 87 regularizer """no""" +819 87 optimizer """adam""" +819 87 training_loop """lcwa""" +819 87 evaluator """rankbased""" +819 88 dataset """kinships""" +819 88 model """transe""" +819 88 loss """bceaftersigmoid""" +819 88 regularizer """no""" +819 88 optimizer """adam""" +819 88 training_loop """lcwa""" +819 88 evaluator """rankbased""" +819 89 dataset """kinships""" +819 89 model """transe""" +819 89 loss """bceaftersigmoid""" +819 89 regularizer """no""" +819 89 optimizer """adam""" +819 89 training_loop """lcwa""" +819 89 evaluator """rankbased""" +819 90 dataset """kinships""" +819 90 model """transe""" +819 90 loss """bceaftersigmoid""" +819 90 regularizer """no""" +819 90 optimizer """adam""" +819 90 training_loop """lcwa""" +819 90 evaluator """rankbased""" +819 91 dataset """kinships""" +819 91 model """transe""" +819 91 loss """bceaftersigmoid""" +819 91 regularizer """no""" +819 91 optimizer """adam""" +819 91 training_loop """lcwa""" +819 91 evaluator """rankbased""" +819 92 dataset """kinships""" +819 92 model """transe""" +819 92 loss """bceaftersigmoid""" +819 92 regularizer """no""" +819 92 optimizer """adam""" +819 92 training_loop """lcwa""" +819 92 evaluator """rankbased""" +819 93 dataset """kinships""" +819 93 model """transe""" +819 93 loss """bceaftersigmoid""" +819 93 regularizer """no""" +819 93 optimizer """adam""" +819 93 training_loop """lcwa""" +819 93 evaluator """rankbased""" +819 94 dataset """kinships""" +819 94 model """transe""" +819 94 loss """bceaftersigmoid""" +819 94 regularizer """no""" +819 94 optimizer """adam""" +819 94 training_loop """lcwa""" +819 94 evaluator """rankbased""" +819 95 dataset """kinships""" +819 95 model """transe""" +819 95 loss """bceaftersigmoid""" +819 95 regularizer """no""" +819 95 optimizer """adam""" +819 95 training_loop """lcwa""" +819 95 evaluator """rankbased""" +819 96 dataset """kinships""" +819 96 model """transe""" +819 96 loss """bceaftersigmoid""" +819 96 regularizer """no""" +819 96 optimizer """adam""" +819 96 training_loop """lcwa""" +819 96 evaluator """rankbased""" +819 97 dataset """kinships""" +819 97 model """transe""" +819 97 loss """bceaftersigmoid""" +819 97 regularizer """no""" +819 97 optimizer """adam""" +819 97 training_loop """lcwa""" +819 97 evaluator """rankbased""" +819 98 dataset """kinships""" +819 98 model """transe""" +819 98 loss """bceaftersigmoid""" +819 98 regularizer """no""" +819 98 optimizer """adam""" +819 98 training_loop """lcwa""" +819 98 evaluator """rankbased""" +819 99 dataset """kinships""" +819 99 model """transe""" +819 99 loss """bceaftersigmoid""" +819 99 regularizer """no""" +819 99 optimizer """adam""" +819 99 training_loop """lcwa""" +819 99 evaluator """rankbased""" +819 100 dataset """kinships""" +819 100 model """transe""" +819 100 loss """bceaftersigmoid""" +819 100 regularizer """no""" +819 100 optimizer """adam""" +819 100 training_loop """lcwa""" +819 100 evaluator """rankbased""" +820 1 model.embedding_dim 1.0 +820 1 model.scoring_fct_norm 1.0 +820 1 optimizer.lr 0.0016954530152435909 +820 1 training.batch_size 2.0 +820 1 training.label_smoothing 0.00155950337698159 +820 2 model.embedding_dim 0.0 +820 2 model.scoring_fct_norm 2.0 +820 2 optimizer.lr 0.006908490071090819 +820 2 training.batch_size 0.0 +820 2 training.label_smoothing 0.0018759823831025028 +820 3 model.embedding_dim 2.0 +820 3 model.scoring_fct_norm 1.0 +820 3 optimizer.lr 0.05865369260378646 +820 3 training.batch_size 1.0 +820 3 training.label_smoothing 0.009822897033600108 +820 4 model.embedding_dim 0.0 +820 4 model.scoring_fct_norm 1.0 +820 4 optimizer.lr 0.05935689750201863 +820 4 training.batch_size 1.0 +820 4 training.label_smoothing 0.002853718084132506 +820 5 model.embedding_dim 0.0 +820 5 model.scoring_fct_norm 2.0 +820 5 optimizer.lr 0.07581951852170919 +820 5 training.batch_size 1.0 +820 5 training.label_smoothing 0.025258029915515477 +820 6 model.embedding_dim 0.0 +820 6 model.scoring_fct_norm 2.0 +820 6 optimizer.lr 0.0018749698629226535 +820 6 training.batch_size 1.0 +820 6 training.label_smoothing 0.002574577824620218 +820 7 model.embedding_dim 1.0 +820 7 model.scoring_fct_norm 1.0 +820 7 optimizer.lr 0.01622822477031468 +820 7 training.batch_size 1.0 +820 7 training.label_smoothing 0.001454633722141714 +820 8 model.embedding_dim 2.0 +820 8 model.scoring_fct_norm 1.0 +820 8 optimizer.lr 0.010659418511202584 +820 8 training.batch_size 1.0 +820 8 training.label_smoothing 0.0030191719403352205 +820 9 model.embedding_dim 2.0 +820 9 model.scoring_fct_norm 2.0 +820 9 optimizer.lr 0.05903967848500339 +820 9 training.batch_size 0.0 +820 9 training.label_smoothing 0.02146444286422026 +820 10 model.embedding_dim 2.0 +820 10 model.scoring_fct_norm 1.0 +820 10 optimizer.lr 0.001016747071122966 +820 10 training.batch_size 1.0 +820 10 training.label_smoothing 0.017143120772993375 +820 11 model.embedding_dim 2.0 +820 11 model.scoring_fct_norm 2.0 +820 11 optimizer.lr 0.07879420137758335 +820 11 training.batch_size 1.0 +820 11 training.label_smoothing 0.16263528110381265 +820 12 model.embedding_dim 2.0 +820 12 model.scoring_fct_norm 2.0 +820 12 optimizer.lr 0.008971528002779703 +820 12 training.batch_size 1.0 +820 12 training.label_smoothing 0.19738096924449955 +820 13 model.embedding_dim 2.0 +820 13 model.scoring_fct_norm 2.0 +820 13 optimizer.lr 0.03286737690927783 +820 13 training.batch_size 2.0 +820 13 training.label_smoothing 0.0019015469383730328 +820 14 model.embedding_dim 2.0 +820 14 model.scoring_fct_norm 1.0 +820 14 optimizer.lr 0.015064828183223985 +820 14 training.batch_size 0.0 +820 14 training.label_smoothing 0.09893981999142432 +820 15 model.embedding_dim 1.0 +820 15 model.scoring_fct_norm 2.0 +820 15 optimizer.lr 0.04246657102619257 +820 15 training.batch_size 0.0 +820 15 training.label_smoothing 0.004249357321161695 +820 16 model.embedding_dim 2.0 +820 16 model.scoring_fct_norm 1.0 +820 16 optimizer.lr 0.012428831330934751 +820 16 training.batch_size 2.0 +820 16 training.label_smoothing 0.40031661187046386 +820 17 model.embedding_dim 1.0 +820 17 model.scoring_fct_norm 2.0 +820 17 optimizer.lr 0.05506656191448812 +820 17 training.batch_size 0.0 +820 17 training.label_smoothing 0.04132557793527076 +820 18 model.embedding_dim 2.0 +820 18 model.scoring_fct_norm 2.0 +820 18 optimizer.lr 0.0017294930238275236 +820 18 training.batch_size 2.0 +820 18 training.label_smoothing 0.003910929102389483 +820 19 model.embedding_dim 1.0 +820 19 model.scoring_fct_norm 2.0 +820 19 optimizer.lr 0.001498621900176697 +820 19 training.batch_size 0.0 +820 19 training.label_smoothing 0.05109295442202893 +820 20 model.embedding_dim 0.0 +820 20 model.scoring_fct_norm 1.0 +820 20 optimizer.lr 0.0028709696224372976 +820 20 training.batch_size 1.0 +820 20 training.label_smoothing 0.030771486809406567 +820 21 model.embedding_dim 2.0 +820 21 model.scoring_fct_norm 1.0 +820 21 optimizer.lr 0.050746752207572764 +820 21 training.batch_size 0.0 +820 21 training.label_smoothing 0.8173917012643728 +820 22 model.embedding_dim 0.0 +820 22 model.scoring_fct_norm 2.0 +820 22 optimizer.lr 0.0010384125011145082 +820 22 training.batch_size 2.0 +820 22 training.label_smoothing 0.007038009135595417 +820 23 model.embedding_dim 2.0 +820 23 model.scoring_fct_norm 1.0 +820 23 optimizer.lr 0.00238677117932457 +820 23 training.batch_size 2.0 +820 23 training.label_smoothing 0.23407722302376727 +820 24 model.embedding_dim 1.0 +820 24 model.scoring_fct_norm 2.0 +820 24 optimizer.lr 0.012639679583366075 +820 24 training.batch_size 1.0 +820 24 training.label_smoothing 0.01277178461981422 +820 25 model.embedding_dim 2.0 +820 25 model.scoring_fct_norm 1.0 +820 25 optimizer.lr 0.004673585356751173 +820 25 training.batch_size 1.0 +820 25 training.label_smoothing 0.006479391710642433 +820 26 model.embedding_dim 2.0 +820 26 model.scoring_fct_norm 2.0 +820 26 optimizer.lr 0.010054370321797929 +820 26 training.batch_size 1.0 +820 26 training.label_smoothing 0.20217124890831825 +820 27 model.embedding_dim 2.0 +820 27 model.scoring_fct_norm 2.0 +820 27 optimizer.lr 0.0365963065528664 +820 27 training.batch_size 2.0 +820 27 training.label_smoothing 0.015422884643806195 +820 28 model.embedding_dim 0.0 +820 28 model.scoring_fct_norm 2.0 +820 28 optimizer.lr 0.039820675190698734 +820 28 training.batch_size 0.0 +820 28 training.label_smoothing 0.7498585331693441 +820 29 model.embedding_dim 1.0 +820 29 model.scoring_fct_norm 2.0 +820 29 optimizer.lr 0.016194910874370718 +820 29 training.batch_size 0.0 +820 29 training.label_smoothing 0.20403825652609034 +820 30 model.embedding_dim 2.0 +820 30 model.scoring_fct_norm 2.0 +820 30 optimizer.lr 0.005505537320321843 +820 30 training.batch_size 1.0 +820 30 training.label_smoothing 0.0034953036491304863 +820 31 model.embedding_dim 2.0 +820 31 model.scoring_fct_norm 1.0 +820 31 optimizer.lr 0.01586023939988812 +820 31 training.batch_size 1.0 +820 31 training.label_smoothing 0.00358372772748311 +820 32 model.embedding_dim 1.0 +820 32 model.scoring_fct_norm 1.0 +820 32 optimizer.lr 0.0017703057714072911 +820 32 training.batch_size 0.0 +820 32 training.label_smoothing 0.3663475755833827 +820 33 model.embedding_dim 0.0 +820 33 model.scoring_fct_norm 2.0 +820 33 optimizer.lr 0.0015730173664865147 +820 33 training.batch_size 1.0 +820 33 training.label_smoothing 0.012535410407798158 +820 34 model.embedding_dim 0.0 +820 34 model.scoring_fct_norm 1.0 +820 34 optimizer.lr 0.0704264371459246 +820 34 training.batch_size 1.0 +820 34 training.label_smoothing 0.021049828083672343 +820 35 model.embedding_dim 0.0 +820 35 model.scoring_fct_norm 2.0 +820 35 optimizer.lr 0.09010961285553036 +820 35 training.batch_size 0.0 +820 35 training.label_smoothing 0.050689924139679064 +820 36 model.embedding_dim 2.0 +820 36 model.scoring_fct_norm 1.0 +820 36 optimizer.lr 0.05342452981919793 +820 36 training.batch_size 1.0 +820 36 training.label_smoothing 0.7345615040977383 +820 37 model.embedding_dim 2.0 +820 37 model.scoring_fct_norm 2.0 +820 37 optimizer.lr 0.025565519682294913 +820 37 training.batch_size 2.0 +820 37 training.label_smoothing 0.01796391391708443 +820 38 model.embedding_dim 0.0 +820 38 model.scoring_fct_norm 2.0 +820 38 optimizer.lr 0.03549037519993687 +820 38 training.batch_size 2.0 +820 38 training.label_smoothing 0.018217241369806757 +820 39 model.embedding_dim 1.0 +820 39 model.scoring_fct_norm 1.0 +820 39 optimizer.lr 0.014219681261023671 +820 39 training.batch_size 2.0 +820 39 training.label_smoothing 0.025466325125055034 +820 40 model.embedding_dim 2.0 +820 40 model.scoring_fct_norm 1.0 +820 40 optimizer.lr 0.001336143048007082 +820 40 training.batch_size 2.0 +820 40 training.label_smoothing 0.03817174802788032 +820 41 model.embedding_dim 0.0 +820 41 model.scoring_fct_norm 1.0 +820 41 optimizer.lr 0.008170844206109824 +820 41 training.batch_size 0.0 +820 41 training.label_smoothing 0.19609624501384226 +820 42 model.embedding_dim 2.0 +820 42 model.scoring_fct_norm 2.0 +820 42 optimizer.lr 0.08687544780566933 +820 42 training.batch_size 1.0 +820 42 training.label_smoothing 0.007617762613209234 +820 43 model.embedding_dim 2.0 +820 43 model.scoring_fct_norm 1.0 +820 43 optimizer.lr 0.0019544384430505335 +820 43 training.batch_size 0.0 +820 43 training.label_smoothing 0.00593453443063208 +820 44 model.embedding_dim 2.0 +820 44 model.scoring_fct_norm 1.0 +820 44 optimizer.lr 0.09490861610055104 +820 44 training.batch_size 0.0 +820 44 training.label_smoothing 0.004886936978356513 +820 45 model.embedding_dim 2.0 +820 45 model.scoring_fct_norm 1.0 +820 45 optimizer.lr 0.010587968311122447 +820 45 training.batch_size 0.0 +820 45 training.label_smoothing 0.014189713751690819 +820 46 model.embedding_dim 2.0 +820 46 model.scoring_fct_norm 1.0 +820 46 optimizer.lr 0.07148373792794921 +820 46 training.batch_size 1.0 +820 46 training.label_smoothing 0.014148570456207035 +820 47 model.embedding_dim 2.0 +820 47 model.scoring_fct_norm 2.0 +820 47 optimizer.lr 0.05962008591451893 +820 47 training.batch_size 0.0 +820 47 training.label_smoothing 0.009970880601648968 +820 48 model.embedding_dim 2.0 +820 48 model.scoring_fct_norm 2.0 +820 48 optimizer.lr 0.060044056798494415 +820 48 training.batch_size 1.0 +820 48 training.label_smoothing 0.6039141176003989 +820 49 model.embedding_dim 0.0 +820 49 model.scoring_fct_norm 1.0 +820 49 optimizer.lr 0.0035198833623027426 +820 49 training.batch_size 1.0 +820 49 training.label_smoothing 0.008498052434426865 +820 50 model.embedding_dim 1.0 +820 50 model.scoring_fct_norm 1.0 +820 50 optimizer.lr 0.00474547942245915 +820 50 training.batch_size 1.0 +820 50 training.label_smoothing 0.9103623797666337 +820 51 model.embedding_dim 1.0 +820 51 model.scoring_fct_norm 2.0 +820 51 optimizer.lr 0.053858283528603224 +820 51 training.batch_size 0.0 +820 51 training.label_smoothing 0.0186784308238184 +820 52 model.embedding_dim 1.0 +820 52 model.scoring_fct_norm 1.0 +820 52 optimizer.lr 0.0033241598536973933 +820 52 training.batch_size 0.0 +820 52 training.label_smoothing 0.2817876349004411 +820 53 model.embedding_dim 2.0 +820 53 model.scoring_fct_norm 1.0 +820 53 optimizer.lr 0.027839306063937728 +820 53 training.batch_size 1.0 +820 53 training.label_smoothing 0.001227173857753866 +820 54 model.embedding_dim 2.0 +820 54 model.scoring_fct_norm 1.0 +820 54 optimizer.lr 0.0011837431381285205 +820 54 training.batch_size 2.0 +820 54 training.label_smoothing 0.005099640031400018 +820 55 model.embedding_dim 0.0 +820 55 model.scoring_fct_norm 2.0 +820 55 optimizer.lr 0.02130058060661652 +820 55 training.batch_size 2.0 +820 55 training.label_smoothing 0.0016909441506473024 +820 56 model.embedding_dim 2.0 +820 56 model.scoring_fct_norm 2.0 +820 56 optimizer.lr 0.05894603315207059 +820 56 training.batch_size 1.0 +820 56 training.label_smoothing 0.002509152880021342 +820 57 model.embedding_dim 2.0 +820 57 model.scoring_fct_norm 2.0 +820 57 optimizer.lr 0.01930725187433785 +820 57 training.batch_size 1.0 +820 57 training.label_smoothing 0.06448354204431558 +820 58 model.embedding_dim 2.0 +820 58 model.scoring_fct_norm 2.0 +820 58 optimizer.lr 0.007608261688125726 +820 58 training.batch_size 1.0 +820 58 training.label_smoothing 0.008936407687298191 +820 59 model.embedding_dim 2.0 +820 59 model.scoring_fct_norm 2.0 +820 59 optimizer.lr 0.0018421282570867178 +820 59 training.batch_size 0.0 +820 59 training.label_smoothing 0.5468818055542403 +820 60 model.embedding_dim 1.0 +820 60 model.scoring_fct_norm 2.0 +820 60 optimizer.lr 0.0015275088920263576 +820 60 training.batch_size 1.0 +820 60 training.label_smoothing 0.11699900350346705 +820 61 model.embedding_dim 2.0 +820 61 model.scoring_fct_norm 1.0 +820 61 optimizer.lr 0.03324552587013107 +820 61 training.batch_size 0.0 +820 61 training.label_smoothing 0.03997515189238266 +820 62 model.embedding_dim 0.0 +820 62 model.scoring_fct_norm 2.0 +820 62 optimizer.lr 0.053384721139984125 +820 62 training.batch_size 2.0 +820 62 training.label_smoothing 0.027027993017342915 +820 63 model.embedding_dim 1.0 +820 63 model.scoring_fct_norm 1.0 +820 63 optimizer.lr 0.025156120090199553 +820 63 training.batch_size 2.0 +820 63 training.label_smoothing 0.009508644231577856 +820 64 model.embedding_dim 0.0 +820 64 model.scoring_fct_norm 2.0 +820 64 optimizer.lr 0.004194953561409019 +820 64 training.batch_size 2.0 +820 64 training.label_smoothing 0.13901869383068993 +820 65 model.embedding_dim 2.0 +820 65 model.scoring_fct_norm 2.0 +820 65 optimizer.lr 0.0024844137726399034 +820 65 training.batch_size 2.0 +820 65 training.label_smoothing 0.3933137302588238 +820 66 model.embedding_dim 1.0 +820 66 model.scoring_fct_norm 1.0 +820 66 optimizer.lr 0.007038944952098766 +820 66 training.batch_size 1.0 +820 66 training.label_smoothing 0.12356359761227081 +820 67 model.embedding_dim 0.0 +820 67 model.scoring_fct_norm 2.0 +820 67 optimizer.lr 0.032302307683669526 +820 67 training.batch_size 1.0 +820 67 training.label_smoothing 0.002266590868331271 +820 68 model.embedding_dim 2.0 +820 68 model.scoring_fct_norm 2.0 +820 68 optimizer.lr 0.09796484832629734 +820 68 training.batch_size 1.0 +820 68 training.label_smoothing 0.19807195995961638 +820 69 model.embedding_dim 1.0 +820 69 model.scoring_fct_norm 2.0 +820 69 optimizer.lr 0.01813021811433102 +820 69 training.batch_size 2.0 +820 69 training.label_smoothing 0.20754538399341002 +820 70 model.embedding_dim 2.0 +820 70 model.scoring_fct_norm 1.0 +820 70 optimizer.lr 0.015700238453262266 +820 70 training.batch_size 1.0 +820 70 training.label_smoothing 0.6576255894898697 +820 71 model.embedding_dim 0.0 +820 71 model.scoring_fct_norm 2.0 +820 71 optimizer.lr 0.0047472263801330745 +820 71 training.batch_size 0.0 +820 71 training.label_smoothing 0.01789336938350691 +820 72 model.embedding_dim 1.0 +820 72 model.scoring_fct_norm 2.0 +820 72 optimizer.lr 0.007013724039004368 +820 72 training.batch_size 1.0 +820 72 training.label_smoothing 0.004642965990044701 +820 73 model.embedding_dim 2.0 +820 73 model.scoring_fct_norm 1.0 +820 73 optimizer.lr 0.01801381896382391 +820 73 training.batch_size 0.0 +820 73 training.label_smoothing 0.030409201490086697 +820 74 model.embedding_dim 2.0 +820 74 model.scoring_fct_norm 2.0 +820 74 optimizer.lr 0.0013100747033594172 +820 74 training.batch_size 1.0 +820 74 training.label_smoothing 0.0025137948131392063 +820 75 model.embedding_dim 2.0 +820 75 model.scoring_fct_norm 1.0 +820 75 optimizer.lr 0.08979629862358036 +820 75 training.batch_size 0.0 +820 75 training.label_smoothing 0.11170912165939392 +820 76 model.embedding_dim 0.0 +820 76 model.scoring_fct_norm 1.0 +820 76 optimizer.lr 0.0465529282117653 +820 76 training.batch_size 1.0 +820 76 training.label_smoothing 0.06428753999688938 +820 77 model.embedding_dim 2.0 +820 77 model.scoring_fct_norm 1.0 +820 77 optimizer.lr 0.005652842381822773 +820 77 training.batch_size 0.0 +820 77 training.label_smoothing 0.011001438478800155 +820 78 model.embedding_dim 2.0 +820 78 model.scoring_fct_norm 2.0 +820 78 optimizer.lr 0.0053665854943978335 +820 78 training.batch_size 1.0 +820 78 training.label_smoothing 0.0035981744064753733 +820 79 model.embedding_dim 1.0 +820 79 model.scoring_fct_norm 2.0 +820 79 optimizer.lr 0.0017057440028190264 +820 79 training.batch_size 0.0 +820 79 training.label_smoothing 0.8618138534870243 +820 80 model.embedding_dim 1.0 +820 80 model.scoring_fct_norm 2.0 +820 80 optimizer.lr 0.09488935817605784 +820 80 training.batch_size 0.0 +820 80 training.label_smoothing 0.0011458123464628857 +820 81 model.embedding_dim 2.0 +820 81 model.scoring_fct_norm 2.0 +820 81 optimizer.lr 0.08128158625892538 +820 81 training.batch_size 2.0 +820 81 training.label_smoothing 0.009613369073782142 +820 82 model.embedding_dim 0.0 +820 82 model.scoring_fct_norm 2.0 +820 82 optimizer.lr 0.013449732528374506 +820 82 training.batch_size 2.0 +820 82 training.label_smoothing 0.010852533308264583 +820 83 model.embedding_dim 0.0 +820 83 model.scoring_fct_norm 2.0 +820 83 optimizer.lr 0.0011354405972930526 +820 83 training.batch_size 0.0 +820 83 training.label_smoothing 0.07532166116355045 +820 84 model.embedding_dim 0.0 +820 84 model.scoring_fct_norm 2.0 +820 84 optimizer.lr 0.03475613498288417 +820 84 training.batch_size 1.0 +820 84 training.label_smoothing 0.0037916446304732394 +820 85 model.embedding_dim 1.0 +820 85 model.scoring_fct_norm 2.0 +820 85 optimizer.lr 0.0018154969308091602 +820 85 training.batch_size 1.0 +820 85 training.label_smoothing 0.0036829812841228846 +820 86 model.embedding_dim 0.0 +820 86 model.scoring_fct_norm 1.0 +820 86 optimizer.lr 0.003918570104087266 +820 86 training.batch_size 2.0 +820 86 training.label_smoothing 0.02488266058885506 +820 87 model.embedding_dim 1.0 +820 87 model.scoring_fct_norm 1.0 +820 87 optimizer.lr 0.05095589060729542 +820 87 training.batch_size 2.0 +820 87 training.label_smoothing 0.01636037076235414 +820 88 model.embedding_dim 1.0 +820 88 model.scoring_fct_norm 2.0 +820 88 optimizer.lr 0.023194515506657812 +820 88 training.batch_size 2.0 +820 88 training.label_smoothing 0.01670195026884498 +820 89 model.embedding_dim 2.0 +820 89 model.scoring_fct_norm 1.0 +820 89 optimizer.lr 0.02550972137873094 +820 89 training.batch_size 1.0 +820 89 training.label_smoothing 0.02894090192831906 +820 90 model.embedding_dim 1.0 +820 90 model.scoring_fct_norm 2.0 +820 90 optimizer.lr 0.004602161285077191 +820 90 training.batch_size 2.0 +820 90 training.label_smoothing 0.28897999781096373 +820 91 model.embedding_dim 2.0 +820 91 model.scoring_fct_norm 2.0 +820 91 optimizer.lr 0.004282594002977974 +820 91 training.batch_size 0.0 +820 91 training.label_smoothing 0.0021161846675501286 +820 92 model.embedding_dim 1.0 +820 92 model.scoring_fct_norm 1.0 +820 92 optimizer.lr 0.0066471828694430555 +820 92 training.batch_size 1.0 +820 92 training.label_smoothing 0.003941218193410395 +820 93 model.embedding_dim 1.0 +820 93 model.scoring_fct_norm 1.0 +820 93 optimizer.lr 0.009214778855058321 +820 93 training.batch_size 2.0 +820 93 training.label_smoothing 0.9730965448851077 +820 94 model.embedding_dim 2.0 +820 94 model.scoring_fct_norm 1.0 +820 94 optimizer.lr 0.05220999017977394 +820 94 training.batch_size 2.0 +820 94 training.label_smoothing 0.8516797470403379 +820 95 model.embedding_dim 1.0 +820 95 model.scoring_fct_norm 2.0 +820 95 optimizer.lr 0.013184391765390446 +820 95 training.batch_size 1.0 +820 95 training.label_smoothing 0.26924116470887294 +820 96 model.embedding_dim 2.0 +820 96 model.scoring_fct_norm 1.0 +820 96 optimizer.lr 0.0033094305066303366 +820 96 training.batch_size 0.0 +820 96 training.label_smoothing 0.006683969174988245 +820 97 model.embedding_dim 1.0 +820 97 model.scoring_fct_norm 2.0 +820 97 optimizer.lr 0.0015374182770207933 +820 97 training.batch_size 2.0 +820 97 training.label_smoothing 0.5474210808118292 +820 98 model.embedding_dim 2.0 +820 98 model.scoring_fct_norm 1.0 +820 98 optimizer.lr 0.0695653772545016 +820 98 training.batch_size 1.0 +820 98 training.label_smoothing 0.017045826841460696 +820 99 model.embedding_dim 0.0 +820 99 model.scoring_fct_norm 2.0 +820 99 optimizer.lr 0.0852140270411376 +820 99 training.batch_size 2.0 +820 99 training.label_smoothing 0.004371957339408188 +820 100 model.embedding_dim 1.0 +820 100 model.scoring_fct_norm 2.0 +820 100 optimizer.lr 0.01746927395605885 +820 100 training.batch_size 2.0 +820 100 training.label_smoothing 0.13203605273944907 +820 1 dataset """kinships""" +820 1 model """transe""" +820 1 loss """softplus""" +820 1 regularizer """no""" +820 1 optimizer """adam""" +820 1 training_loop """lcwa""" +820 1 evaluator """rankbased""" +820 2 dataset """kinships""" +820 2 model """transe""" +820 2 loss """softplus""" +820 2 regularizer """no""" +820 2 optimizer """adam""" +820 2 training_loop """lcwa""" +820 2 evaluator """rankbased""" +820 3 dataset """kinships""" +820 3 model """transe""" +820 3 loss """softplus""" +820 3 regularizer """no""" +820 3 optimizer """adam""" +820 3 training_loop """lcwa""" +820 3 evaluator """rankbased""" +820 4 dataset """kinships""" +820 4 model """transe""" +820 4 loss """softplus""" +820 4 regularizer """no""" +820 4 optimizer """adam""" +820 4 training_loop """lcwa""" +820 4 evaluator """rankbased""" +820 5 dataset """kinships""" +820 5 model """transe""" +820 5 loss """softplus""" +820 5 regularizer """no""" +820 5 optimizer """adam""" +820 5 training_loop """lcwa""" +820 5 evaluator """rankbased""" +820 6 dataset """kinships""" +820 6 model """transe""" +820 6 loss """softplus""" +820 6 regularizer """no""" +820 6 optimizer """adam""" +820 6 training_loop """lcwa""" +820 6 evaluator """rankbased""" +820 7 dataset """kinships""" +820 7 model """transe""" +820 7 loss """softplus""" +820 7 regularizer """no""" +820 7 optimizer """adam""" +820 7 training_loop """lcwa""" +820 7 evaluator """rankbased""" +820 8 dataset """kinships""" +820 8 model """transe""" +820 8 loss """softplus""" +820 8 regularizer """no""" +820 8 optimizer """adam""" +820 8 training_loop """lcwa""" +820 8 evaluator """rankbased""" +820 9 dataset """kinships""" +820 9 model """transe""" +820 9 loss """softplus""" +820 9 regularizer """no""" +820 9 optimizer """adam""" +820 9 training_loop """lcwa""" +820 9 evaluator """rankbased""" +820 10 dataset """kinships""" +820 10 model """transe""" +820 10 loss """softplus""" +820 10 regularizer """no""" +820 10 optimizer """adam""" +820 10 training_loop """lcwa""" +820 10 evaluator """rankbased""" +820 11 dataset """kinships""" +820 11 model """transe""" +820 11 loss """softplus""" +820 11 regularizer """no""" +820 11 optimizer """adam""" +820 11 training_loop """lcwa""" +820 11 evaluator """rankbased""" +820 12 dataset """kinships""" +820 12 model """transe""" +820 12 loss """softplus""" +820 12 regularizer """no""" +820 12 optimizer """adam""" +820 12 training_loop """lcwa""" +820 12 evaluator """rankbased""" +820 13 dataset """kinships""" +820 13 model """transe""" +820 13 loss """softplus""" +820 13 regularizer """no""" +820 13 optimizer """adam""" +820 13 training_loop """lcwa""" +820 13 evaluator """rankbased""" +820 14 dataset """kinships""" +820 14 model """transe""" +820 14 loss """softplus""" +820 14 regularizer """no""" +820 14 optimizer """adam""" +820 14 training_loop """lcwa""" +820 14 evaluator """rankbased""" +820 15 dataset """kinships""" +820 15 model """transe""" +820 15 loss """softplus""" +820 15 regularizer """no""" +820 15 optimizer """adam""" +820 15 training_loop """lcwa""" +820 15 evaluator """rankbased""" +820 16 dataset """kinships""" +820 16 model """transe""" +820 16 loss """softplus""" +820 16 regularizer """no""" +820 16 optimizer """adam""" +820 16 training_loop """lcwa""" +820 16 evaluator """rankbased""" +820 17 dataset """kinships""" +820 17 model """transe""" +820 17 loss """softplus""" +820 17 regularizer """no""" +820 17 optimizer """adam""" +820 17 training_loop """lcwa""" +820 17 evaluator """rankbased""" +820 18 dataset """kinships""" +820 18 model """transe""" +820 18 loss """softplus""" +820 18 regularizer """no""" +820 18 optimizer """adam""" +820 18 training_loop """lcwa""" +820 18 evaluator """rankbased""" +820 19 dataset """kinships""" +820 19 model """transe""" +820 19 loss """softplus""" +820 19 regularizer """no""" +820 19 optimizer """adam""" +820 19 training_loop """lcwa""" +820 19 evaluator """rankbased""" +820 20 dataset """kinships""" +820 20 model """transe""" +820 20 loss """softplus""" +820 20 regularizer """no""" +820 20 optimizer """adam""" +820 20 training_loop """lcwa""" +820 20 evaluator """rankbased""" +820 21 dataset """kinships""" +820 21 model """transe""" +820 21 loss """softplus""" +820 21 regularizer """no""" +820 21 optimizer """adam""" +820 21 training_loop """lcwa""" +820 21 evaluator """rankbased""" +820 22 dataset """kinships""" +820 22 model """transe""" +820 22 loss """softplus""" +820 22 regularizer """no""" +820 22 optimizer """adam""" +820 22 training_loop """lcwa""" +820 22 evaluator """rankbased""" +820 23 dataset """kinships""" +820 23 model """transe""" +820 23 loss """softplus""" +820 23 regularizer """no""" +820 23 optimizer """adam""" +820 23 training_loop """lcwa""" +820 23 evaluator """rankbased""" +820 24 dataset """kinships""" +820 24 model """transe""" +820 24 loss """softplus""" +820 24 regularizer """no""" +820 24 optimizer """adam""" +820 24 training_loop """lcwa""" +820 24 evaluator """rankbased""" +820 25 dataset """kinships""" +820 25 model """transe""" +820 25 loss """softplus""" +820 25 regularizer """no""" +820 25 optimizer """adam""" +820 25 training_loop """lcwa""" +820 25 evaluator """rankbased""" +820 26 dataset """kinships""" +820 26 model """transe""" +820 26 loss """softplus""" +820 26 regularizer """no""" +820 26 optimizer """adam""" +820 26 training_loop """lcwa""" +820 26 evaluator """rankbased""" +820 27 dataset """kinships""" +820 27 model """transe""" +820 27 loss """softplus""" +820 27 regularizer """no""" +820 27 optimizer """adam""" +820 27 training_loop """lcwa""" +820 27 evaluator """rankbased""" +820 28 dataset """kinships""" +820 28 model """transe""" +820 28 loss """softplus""" +820 28 regularizer """no""" +820 28 optimizer """adam""" +820 28 training_loop """lcwa""" +820 28 evaluator """rankbased""" +820 29 dataset """kinships""" +820 29 model """transe""" +820 29 loss """softplus""" +820 29 regularizer """no""" +820 29 optimizer """adam""" +820 29 training_loop """lcwa""" +820 29 evaluator """rankbased""" +820 30 dataset """kinships""" +820 30 model """transe""" +820 30 loss """softplus""" +820 30 regularizer """no""" +820 30 optimizer """adam""" +820 30 training_loop """lcwa""" +820 30 evaluator """rankbased""" +820 31 dataset """kinships""" +820 31 model """transe""" +820 31 loss """softplus""" +820 31 regularizer """no""" +820 31 optimizer """adam""" +820 31 training_loop """lcwa""" +820 31 evaluator """rankbased""" +820 32 dataset """kinships""" +820 32 model """transe""" +820 32 loss """softplus""" +820 32 regularizer """no""" +820 32 optimizer """adam""" +820 32 training_loop """lcwa""" +820 32 evaluator """rankbased""" +820 33 dataset """kinships""" +820 33 model """transe""" +820 33 loss """softplus""" +820 33 regularizer """no""" +820 33 optimizer """adam""" +820 33 training_loop """lcwa""" +820 33 evaluator """rankbased""" +820 34 dataset """kinships""" +820 34 model """transe""" +820 34 loss """softplus""" +820 34 regularizer """no""" +820 34 optimizer """adam""" +820 34 training_loop """lcwa""" +820 34 evaluator """rankbased""" +820 35 dataset """kinships""" +820 35 model """transe""" +820 35 loss """softplus""" +820 35 regularizer """no""" +820 35 optimizer """adam""" +820 35 training_loop """lcwa""" +820 35 evaluator """rankbased""" +820 36 dataset """kinships""" +820 36 model """transe""" +820 36 loss """softplus""" +820 36 regularizer """no""" +820 36 optimizer """adam""" +820 36 training_loop """lcwa""" +820 36 evaluator """rankbased""" +820 37 dataset """kinships""" +820 37 model """transe""" +820 37 loss """softplus""" +820 37 regularizer """no""" +820 37 optimizer """adam""" +820 37 training_loop """lcwa""" +820 37 evaluator """rankbased""" +820 38 dataset """kinships""" +820 38 model """transe""" +820 38 loss """softplus""" +820 38 regularizer """no""" +820 38 optimizer """adam""" +820 38 training_loop """lcwa""" +820 38 evaluator """rankbased""" +820 39 dataset """kinships""" +820 39 model """transe""" +820 39 loss """softplus""" +820 39 regularizer """no""" +820 39 optimizer """adam""" +820 39 training_loop """lcwa""" +820 39 evaluator """rankbased""" +820 40 dataset """kinships""" +820 40 model """transe""" +820 40 loss """softplus""" +820 40 regularizer """no""" +820 40 optimizer """adam""" +820 40 training_loop """lcwa""" +820 40 evaluator """rankbased""" +820 41 dataset """kinships""" +820 41 model """transe""" +820 41 loss """softplus""" +820 41 regularizer """no""" +820 41 optimizer """adam""" +820 41 training_loop """lcwa""" +820 41 evaluator """rankbased""" +820 42 dataset """kinships""" +820 42 model """transe""" +820 42 loss """softplus""" +820 42 regularizer """no""" +820 42 optimizer """adam""" +820 42 training_loop """lcwa""" +820 42 evaluator """rankbased""" +820 43 dataset """kinships""" +820 43 model """transe""" +820 43 loss """softplus""" +820 43 regularizer """no""" +820 43 optimizer """adam""" +820 43 training_loop """lcwa""" +820 43 evaluator """rankbased""" +820 44 dataset """kinships""" +820 44 model """transe""" +820 44 loss """softplus""" +820 44 regularizer """no""" +820 44 optimizer """adam""" +820 44 training_loop """lcwa""" +820 44 evaluator """rankbased""" +820 45 dataset """kinships""" +820 45 model """transe""" +820 45 loss """softplus""" +820 45 regularizer """no""" +820 45 optimizer """adam""" +820 45 training_loop """lcwa""" +820 45 evaluator """rankbased""" +820 46 dataset """kinships""" +820 46 model """transe""" +820 46 loss """softplus""" +820 46 regularizer """no""" +820 46 optimizer """adam""" +820 46 training_loop """lcwa""" +820 46 evaluator """rankbased""" +820 47 dataset """kinships""" +820 47 model """transe""" +820 47 loss """softplus""" +820 47 regularizer """no""" +820 47 optimizer """adam""" +820 47 training_loop """lcwa""" +820 47 evaluator """rankbased""" +820 48 dataset """kinships""" +820 48 model """transe""" +820 48 loss """softplus""" +820 48 regularizer """no""" +820 48 optimizer """adam""" +820 48 training_loop """lcwa""" +820 48 evaluator """rankbased""" +820 49 dataset """kinships""" +820 49 model """transe""" +820 49 loss """softplus""" +820 49 regularizer """no""" +820 49 optimizer """adam""" +820 49 training_loop """lcwa""" +820 49 evaluator """rankbased""" +820 50 dataset """kinships""" +820 50 model """transe""" +820 50 loss """softplus""" +820 50 regularizer """no""" +820 50 optimizer """adam""" +820 50 training_loop """lcwa""" +820 50 evaluator """rankbased""" +820 51 dataset """kinships""" +820 51 model """transe""" +820 51 loss """softplus""" +820 51 regularizer """no""" +820 51 optimizer """adam""" +820 51 training_loop """lcwa""" +820 51 evaluator """rankbased""" +820 52 dataset """kinships""" +820 52 model """transe""" +820 52 loss """softplus""" +820 52 regularizer """no""" +820 52 optimizer """adam""" +820 52 training_loop """lcwa""" +820 52 evaluator """rankbased""" +820 53 dataset """kinships""" +820 53 model """transe""" +820 53 loss """softplus""" +820 53 regularizer """no""" +820 53 optimizer """adam""" +820 53 training_loop """lcwa""" +820 53 evaluator """rankbased""" +820 54 dataset """kinships""" +820 54 model """transe""" +820 54 loss """softplus""" +820 54 regularizer """no""" +820 54 optimizer """adam""" +820 54 training_loop """lcwa""" +820 54 evaluator """rankbased""" +820 55 dataset """kinships""" +820 55 model """transe""" +820 55 loss """softplus""" +820 55 regularizer """no""" +820 55 optimizer """adam""" +820 55 training_loop """lcwa""" +820 55 evaluator """rankbased""" +820 56 dataset """kinships""" +820 56 model """transe""" +820 56 loss """softplus""" +820 56 regularizer """no""" +820 56 optimizer """adam""" +820 56 training_loop """lcwa""" +820 56 evaluator """rankbased""" +820 57 dataset """kinships""" +820 57 model """transe""" +820 57 loss """softplus""" +820 57 regularizer """no""" +820 57 optimizer """adam""" +820 57 training_loop """lcwa""" +820 57 evaluator """rankbased""" +820 58 dataset """kinships""" +820 58 model """transe""" +820 58 loss """softplus""" +820 58 regularizer """no""" +820 58 optimizer """adam""" +820 58 training_loop """lcwa""" +820 58 evaluator """rankbased""" +820 59 dataset """kinships""" +820 59 model """transe""" +820 59 loss """softplus""" +820 59 regularizer """no""" +820 59 optimizer """adam""" +820 59 training_loop """lcwa""" +820 59 evaluator """rankbased""" +820 60 dataset """kinships""" +820 60 model """transe""" +820 60 loss """softplus""" +820 60 regularizer """no""" +820 60 optimizer """adam""" +820 60 training_loop """lcwa""" +820 60 evaluator """rankbased""" +820 61 dataset """kinships""" +820 61 model """transe""" +820 61 loss """softplus""" +820 61 regularizer """no""" +820 61 optimizer """adam""" +820 61 training_loop """lcwa""" +820 61 evaluator """rankbased""" +820 62 dataset """kinships""" +820 62 model """transe""" +820 62 loss """softplus""" +820 62 regularizer """no""" +820 62 optimizer """adam""" +820 62 training_loop """lcwa""" +820 62 evaluator """rankbased""" +820 63 dataset """kinships""" +820 63 model """transe""" +820 63 loss """softplus""" +820 63 regularizer """no""" +820 63 optimizer """adam""" +820 63 training_loop """lcwa""" +820 63 evaluator """rankbased""" +820 64 dataset """kinships""" +820 64 model """transe""" +820 64 loss """softplus""" +820 64 regularizer """no""" +820 64 optimizer """adam""" +820 64 training_loop """lcwa""" +820 64 evaluator """rankbased""" +820 65 dataset """kinships""" +820 65 model """transe""" +820 65 loss """softplus""" +820 65 regularizer """no""" +820 65 optimizer """adam""" +820 65 training_loop """lcwa""" +820 65 evaluator """rankbased""" +820 66 dataset """kinships""" +820 66 model """transe""" +820 66 loss """softplus""" +820 66 regularizer """no""" +820 66 optimizer """adam""" +820 66 training_loop """lcwa""" +820 66 evaluator """rankbased""" +820 67 dataset """kinships""" +820 67 model """transe""" +820 67 loss """softplus""" +820 67 regularizer """no""" +820 67 optimizer """adam""" +820 67 training_loop """lcwa""" +820 67 evaluator """rankbased""" +820 68 dataset """kinships""" +820 68 model """transe""" +820 68 loss """softplus""" +820 68 regularizer """no""" +820 68 optimizer """adam""" +820 68 training_loop """lcwa""" +820 68 evaluator """rankbased""" +820 69 dataset """kinships""" +820 69 model """transe""" +820 69 loss """softplus""" +820 69 regularizer """no""" +820 69 optimizer """adam""" +820 69 training_loop """lcwa""" +820 69 evaluator """rankbased""" +820 70 dataset """kinships""" +820 70 model """transe""" +820 70 loss """softplus""" +820 70 regularizer """no""" +820 70 optimizer """adam""" +820 70 training_loop """lcwa""" +820 70 evaluator """rankbased""" +820 71 dataset """kinships""" +820 71 model """transe""" +820 71 loss """softplus""" +820 71 regularizer """no""" +820 71 optimizer """adam""" +820 71 training_loop """lcwa""" +820 71 evaluator """rankbased""" +820 72 dataset """kinships""" +820 72 model """transe""" +820 72 loss """softplus""" +820 72 regularizer """no""" +820 72 optimizer """adam""" +820 72 training_loop """lcwa""" +820 72 evaluator """rankbased""" +820 73 dataset """kinships""" +820 73 model """transe""" +820 73 loss """softplus""" +820 73 regularizer """no""" +820 73 optimizer """adam""" +820 73 training_loop """lcwa""" +820 73 evaluator """rankbased""" +820 74 dataset """kinships""" +820 74 model """transe""" +820 74 loss """softplus""" +820 74 regularizer """no""" +820 74 optimizer """adam""" +820 74 training_loop """lcwa""" +820 74 evaluator """rankbased""" +820 75 dataset """kinships""" +820 75 model """transe""" +820 75 loss """softplus""" +820 75 regularizer """no""" +820 75 optimizer """adam""" +820 75 training_loop """lcwa""" +820 75 evaluator """rankbased""" +820 76 dataset """kinships""" +820 76 model """transe""" +820 76 loss """softplus""" +820 76 regularizer """no""" +820 76 optimizer """adam""" +820 76 training_loop """lcwa""" +820 76 evaluator """rankbased""" +820 77 dataset """kinships""" +820 77 model """transe""" +820 77 loss """softplus""" +820 77 regularizer """no""" +820 77 optimizer """adam""" +820 77 training_loop """lcwa""" +820 77 evaluator """rankbased""" +820 78 dataset """kinships""" +820 78 model """transe""" +820 78 loss """softplus""" +820 78 regularizer """no""" +820 78 optimizer """adam""" +820 78 training_loop """lcwa""" +820 78 evaluator """rankbased""" +820 79 dataset """kinships""" +820 79 model """transe""" +820 79 loss """softplus""" +820 79 regularizer """no""" +820 79 optimizer """adam""" +820 79 training_loop """lcwa""" +820 79 evaluator """rankbased""" +820 80 dataset """kinships""" +820 80 model """transe""" +820 80 loss """softplus""" +820 80 regularizer """no""" +820 80 optimizer """adam""" +820 80 training_loop """lcwa""" +820 80 evaluator """rankbased""" +820 81 dataset """kinships""" +820 81 model """transe""" +820 81 loss """softplus""" +820 81 regularizer """no""" +820 81 optimizer """adam""" +820 81 training_loop """lcwa""" +820 81 evaluator """rankbased""" +820 82 dataset """kinships""" +820 82 model """transe""" +820 82 loss """softplus""" +820 82 regularizer """no""" +820 82 optimizer """adam""" +820 82 training_loop """lcwa""" +820 82 evaluator """rankbased""" +820 83 dataset """kinships""" +820 83 model """transe""" +820 83 loss """softplus""" +820 83 regularizer """no""" +820 83 optimizer """adam""" +820 83 training_loop """lcwa""" +820 83 evaluator """rankbased""" +820 84 dataset """kinships""" +820 84 model """transe""" +820 84 loss """softplus""" +820 84 regularizer """no""" +820 84 optimizer """adam""" +820 84 training_loop """lcwa""" +820 84 evaluator """rankbased""" +820 85 dataset """kinships""" +820 85 model """transe""" +820 85 loss """softplus""" +820 85 regularizer """no""" +820 85 optimizer """adam""" +820 85 training_loop """lcwa""" +820 85 evaluator """rankbased""" +820 86 dataset """kinships""" +820 86 model """transe""" +820 86 loss """softplus""" +820 86 regularizer """no""" +820 86 optimizer """adam""" +820 86 training_loop """lcwa""" +820 86 evaluator """rankbased""" +820 87 dataset """kinships""" +820 87 model """transe""" +820 87 loss """softplus""" +820 87 regularizer """no""" +820 87 optimizer """adam""" +820 87 training_loop """lcwa""" +820 87 evaluator """rankbased""" +820 88 dataset """kinships""" +820 88 model """transe""" +820 88 loss """softplus""" +820 88 regularizer """no""" +820 88 optimizer """adam""" +820 88 training_loop """lcwa""" +820 88 evaluator """rankbased""" +820 89 dataset """kinships""" +820 89 model """transe""" +820 89 loss """softplus""" +820 89 regularizer """no""" +820 89 optimizer """adam""" +820 89 training_loop """lcwa""" +820 89 evaluator """rankbased""" +820 90 dataset """kinships""" +820 90 model """transe""" +820 90 loss """softplus""" +820 90 regularizer """no""" +820 90 optimizer """adam""" +820 90 training_loop """lcwa""" +820 90 evaluator """rankbased""" +820 91 dataset """kinships""" +820 91 model """transe""" +820 91 loss """softplus""" +820 91 regularizer """no""" +820 91 optimizer """adam""" +820 91 training_loop """lcwa""" +820 91 evaluator """rankbased""" +820 92 dataset """kinships""" +820 92 model """transe""" +820 92 loss """softplus""" +820 92 regularizer """no""" +820 92 optimizer """adam""" +820 92 training_loop """lcwa""" +820 92 evaluator """rankbased""" +820 93 dataset """kinships""" +820 93 model """transe""" +820 93 loss """softplus""" +820 93 regularizer """no""" +820 93 optimizer """adam""" +820 93 training_loop """lcwa""" +820 93 evaluator """rankbased""" +820 94 dataset """kinships""" +820 94 model """transe""" +820 94 loss """softplus""" +820 94 regularizer """no""" +820 94 optimizer """adam""" +820 94 training_loop """lcwa""" +820 94 evaluator """rankbased""" +820 95 dataset """kinships""" +820 95 model """transe""" +820 95 loss """softplus""" +820 95 regularizer """no""" +820 95 optimizer """adam""" +820 95 training_loop """lcwa""" +820 95 evaluator """rankbased""" +820 96 dataset """kinships""" +820 96 model """transe""" +820 96 loss """softplus""" +820 96 regularizer """no""" +820 96 optimizer """adam""" +820 96 training_loop """lcwa""" +820 96 evaluator """rankbased""" +820 97 dataset """kinships""" +820 97 model """transe""" +820 97 loss """softplus""" +820 97 regularizer """no""" +820 97 optimizer """adam""" +820 97 training_loop """lcwa""" +820 97 evaluator """rankbased""" +820 98 dataset """kinships""" +820 98 model """transe""" +820 98 loss """softplus""" +820 98 regularizer """no""" +820 98 optimizer """adam""" +820 98 training_loop """lcwa""" +820 98 evaluator """rankbased""" +820 99 dataset """kinships""" +820 99 model """transe""" +820 99 loss """softplus""" +820 99 regularizer """no""" +820 99 optimizer """adam""" +820 99 training_loop """lcwa""" +820 99 evaluator """rankbased""" +820 100 dataset """kinships""" +820 100 model """transe""" +820 100 loss """softplus""" +820 100 regularizer """no""" +820 100 optimizer """adam""" +820 100 training_loop """lcwa""" +820 100 evaluator """rankbased""" +821 1 model.embedding_dim 2.0 +821 1 model.scoring_fct_norm 1.0 +821 1 optimizer.lr 0.008455507215841812 +821 1 training.batch_size 0.0 +821 1 training.label_smoothing 0.1326580229434966 +821 2 model.embedding_dim 0.0 +821 2 model.scoring_fct_norm 1.0 +821 2 optimizer.lr 0.019858589503329666 +821 2 training.batch_size 2.0 +821 2 training.label_smoothing 0.085122550272798 +821 3 model.embedding_dim 1.0 +821 3 model.scoring_fct_norm 1.0 +821 3 optimizer.lr 0.07764758103608224 +821 3 training.batch_size 2.0 +821 3 training.label_smoothing 0.0310235230703172 +821 4 model.embedding_dim 1.0 +821 4 model.scoring_fct_norm 2.0 +821 4 optimizer.lr 0.027882059311358346 +821 4 training.batch_size 2.0 +821 4 training.label_smoothing 0.00306956701199709 +821 5 model.embedding_dim 1.0 +821 5 model.scoring_fct_norm 1.0 +821 5 optimizer.lr 0.0036843012978702455 +821 5 training.batch_size 2.0 +821 5 training.label_smoothing 0.37918207710249985 +821 6 model.embedding_dim 2.0 +821 6 model.scoring_fct_norm 1.0 +821 6 optimizer.lr 0.07029514922229863 +821 6 training.batch_size 2.0 +821 6 training.label_smoothing 0.2146603669519276 +821 7 model.embedding_dim 2.0 +821 7 model.scoring_fct_norm 2.0 +821 7 optimizer.lr 0.005913673115226626 +821 7 training.batch_size 1.0 +821 7 training.label_smoothing 0.033443011393472576 +821 8 model.embedding_dim 0.0 +821 8 model.scoring_fct_norm 2.0 +821 8 optimizer.lr 0.003055871794080174 +821 8 training.batch_size 2.0 +821 8 training.label_smoothing 0.023081295993315754 +821 9 model.embedding_dim 0.0 +821 9 model.scoring_fct_norm 2.0 +821 9 optimizer.lr 0.0920162902639993 +821 9 training.batch_size 0.0 +821 9 training.label_smoothing 0.36423966826824317 +821 10 model.embedding_dim 1.0 +821 10 model.scoring_fct_norm 1.0 +821 10 optimizer.lr 0.002327393464169768 +821 10 training.batch_size 0.0 +821 10 training.label_smoothing 0.1035545303854224 +821 11 model.embedding_dim 2.0 +821 11 model.scoring_fct_norm 2.0 +821 11 optimizer.lr 0.0037016634830589175 +821 11 training.batch_size 0.0 +821 11 training.label_smoothing 0.13596611354616667 +821 12 model.embedding_dim 2.0 +821 12 model.scoring_fct_norm 2.0 +821 12 optimizer.lr 0.03402086500162671 +821 12 training.batch_size 1.0 +821 12 training.label_smoothing 0.004810677636113524 +821 13 model.embedding_dim 2.0 +821 13 model.scoring_fct_norm 2.0 +821 13 optimizer.lr 0.022712611129106747 +821 13 training.batch_size 0.0 +821 13 training.label_smoothing 0.0011285367663380534 +821 14 model.embedding_dim 2.0 +821 14 model.scoring_fct_norm 1.0 +821 14 optimizer.lr 0.0035538058486420236 +821 14 training.batch_size 0.0 +821 14 training.label_smoothing 0.014620403633675614 +821 15 model.embedding_dim 1.0 +821 15 model.scoring_fct_norm 2.0 +821 15 optimizer.lr 0.003563204882060669 +821 15 training.batch_size 2.0 +821 15 training.label_smoothing 0.020121735888055253 +821 16 model.embedding_dim 2.0 +821 16 model.scoring_fct_norm 1.0 +821 16 optimizer.lr 0.0061059699564804905 +821 16 training.batch_size 1.0 +821 16 training.label_smoothing 0.008624759564587041 +821 17 model.embedding_dim 0.0 +821 17 model.scoring_fct_norm 1.0 +821 17 optimizer.lr 0.018988884674183124 +821 17 training.batch_size 0.0 +821 17 training.label_smoothing 0.04791801657734682 +821 18 model.embedding_dim 2.0 +821 18 model.scoring_fct_norm 1.0 +821 18 optimizer.lr 0.023787743664113962 +821 18 training.batch_size 2.0 +821 18 training.label_smoothing 0.011512687273467657 +821 19 model.embedding_dim 1.0 +821 19 model.scoring_fct_norm 2.0 +821 19 optimizer.lr 0.04227506418272117 +821 19 training.batch_size 0.0 +821 19 training.label_smoothing 0.08574627826571832 +821 20 model.embedding_dim 0.0 +821 20 model.scoring_fct_norm 1.0 +821 20 optimizer.lr 0.003996139333829943 +821 20 training.batch_size 2.0 +821 20 training.label_smoothing 0.009554688527633306 +821 21 model.embedding_dim 2.0 +821 21 model.scoring_fct_norm 2.0 +821 21 optimizer.lr 0.02066242618192468 +821 21 training.batch_size 2.0 +821 21 training.label_smoothing 0.4028193661305166 +821 22 model.embedding_dim 2.0 +821 22 model.scoring_fct_norm 1.0 +821 22 optimizer.lr 0.014006749819277691 +821 22 training.batch_size 1.0 +821 22 training.label_smoothing 0.03145361450164931 +821 23 model.embedding_dim 1.0 +821 23 model.scoring_fct_norm 1.0 +821 23 optimizer.lr 0.07033558535739916 +821 23 training.batch_size 1.0 +821 23 training.label_smoothing 0.010564595641959081 +821 24 model.embedding_dim 2.0 +821 24 model.scoring_fct_norm 2.0 +821 24 optimizer.lr 0.0014594960333447196 +821 24 training.batch_size 1.0 +821 24 training.label_smoothing 0.03246856599186756 +821 25 model.embedding_dim 2.0 +821 25 model.scoring_fct_norm 1.0 +821 25 optimizer.lr 0.01941202644336483 +821 25 training.batch_size 1.0 +821 25 training.label_smoothing 0.00854305956460872 +821 26 model.embedding_dim 0.0 +821 26 model.scoring_fct_norm 2.0 +821 26 optimizer.lr 0.009839824555829471 +821 26 training.batch_size 0.0 +821 26 training.label_smoothing 0.0015008292136644685 +821 27 model.embedding_dim 0.0 +821 27 model.scoring_fct_norm 2.0 +821 27 optimizer.lr 0.0034321306739310974 +821 27 training.batch_size 0.0 +821 27 training.label_smoothing 0.01132287574679264 +821 28 model.embedding_dim 2.0 +821 28 model.scoring_fct_norm 1.0 +821 28 optimizer.lr 0.0023454756234394304 +821 28 training.batch_size 0.0 +821 28 training.label_smoothing 0.6250421888207562 +821 29 model.embedding_dim 1.0 +821 29 model.scoring_fct_norm 1.0 +821 29 optimizer.lr 0.0032177855557865274 +821 29 training.batch_size 0.0 +821 29 training.label_smoothing 0.5538400180525308 +821 30 model.embedding_dim 2.0 +821 30 model.scoring_fct_norm 2.0 +821 30 optimizer.lr 0.0013528042815676722 +821 30 training.batch_size 2.0 +821 30 training.label_smoothing 0.9846019005454827 +821 31 model.embedding_dim 2.0 +821 31 model.scoring_fct_norm 1.0 +821 31 optimizer.lr 0.001478796367916955 +821 31 training.batch_size 0.0 +821 31 training.label_smoothing 0.27628803075488173 +821 32 model.embedding_dim 1.0 +821 32 model.scoring_fct_norm 1.0 +821 32 optimizer.lr 0.002770590162728144 +821 32 training.batch_size 1.0 +821 32 training.label_smoothing 0.00563102419207852 +821 33 model.embedding_dim 1.0 +821 33 model.scoring_fct_norm 2.0 +821 33 optimizer.lr 0.0026785792091343166 +821 33 training.batch_size 0.0 +821 33 training.label_smoothing 0.01989814959755341 +821 34 model.embedding_dim 0.0 +821 34 model.scoring_fct_norm 1.0 +821 34 optimizer.lr 0.0013101397960965785 +821 34 training.batch_size 2.0 +821 34 training.label_smoothing 0.0028741385043656066 +821 35 model.embedding_dim 2.0 +821 35 model.scoring_fct_norm 2.0 +821 35 optimizer.lr 0.0028139565369796875 +821 35 training.batch_size 2.0 +821 35 training.label_smoothing 0.0067101948648185025 +821 36 model.embedding_dim 0.0 +821 36 model.scoring_fct_norm 1.0 +821 36 optimizer.lr 0.0015457128174074409 +821 36 training.batch_size 1.0 +821 36 training.label_smoothing 0.020690104959431358 +821 37 model.embedding_dim 1.0 +821 37 model.scoring_fct_norm 1.0 +821 37 optimizer.lr 0.014757771780396937 +821 37 training.batch_size 2.0 +821 37 training.label_smoothing 0.04811499229901277 +821 38 model.embedding_dim 0.0 +821 38 model.scoring_fct_norm 1.0 +821 38 optimizer.lr 0.0021679071242181247 +821 38 training.batch_size 1.0 +821 38 training.label_smoothing 0.030614590956911224 +821 39 model.embedding_dim 2.0 +821 39 model.scoring_fct_norm 2.0 +821 39 optimizer.lr 0.058754901697372186 +821 39 training.batch_size 2.0 +821 39 training.label_smoothing 0.042454615392428424 +821 40 model.embedding_dim 1.0 +821 40 model.scoring_fct_norm 1.0 +821 40 optimizer.lr 0.004549781860581609 +821 40 training.batch_size 2.0 +821 40 training.label_smoothing 0.0011867766266129218 +821 41 model.embedding_dim 0.0 +821 41 model.scoring_fct_norm 2.0 +821 41 optimizer.lr 0.02307371472372364 +821 41 training.batch_size 2.0 +821 41 training.label_smoothing 0.0033484629461349126 +821 42 model.embedding_dim 2.0 +821 42 model.scoring_fct_norm 1.0 +821 42 optimizer.lr 0.006983964112238859 +821 42 training.batch_size 1.0 +821 42 training.label_smoothing 0.0031353512807869204 +821 43 model.embedding_dim 1.0 +821 43 model.scoring_fct_norm 2.0 +821 43 optimizer.lr 0.012208340544416545 +821 43 training.batch_size 0.0 +821 43 training.label_smoothing 0.6481406668535439 +821 44 model.embedding_dim 1.0 +821 44 model.scoring_fct_norm 2.0 +821 44 optimizer.lr 0.0888999554169153 +821 44 training.batch_size 2.0 +821 44 training.label_smoothing 0.10426220887758812 +821 45 model.embedding_dim 0.0 +821 45 model.scoring_fct_norm 2.0 +821 45 optimizer.lr 0.0214351909261907 +821 45 training.batch_size 2.0 +821 45 training.label_smoothing 0.022958285909911762 +821 46 model.embedding_dim 2.0 +821 46 model.scoring_fct_norm 1.0 +821 46 optimizer.lr 0.001751165357984583 +821 46 training.batch_size 2.0 +821 46 training.label_smoothing 0.020953691976165822 +821 47 model.embedding_dim 0.0 +821 47 model.scoring_fct_norm 2.0 +821 47 optimizer.lr 0.006849423776489155 +821 47 training.batch_size 2.0 +821 47 training.label_smoothing 0.06383364810063366 +821 48 model.embedding_dim 1.0 +821 48 model.scoring_fct_norm 2.0 +821 48 optimizer.lr 0.007926212409456172 +821 48 training.batch_size 1.0 +821 48 training.label_smoothing 0.16733071032932229 +821 49 model.embedding_dim 0.0 +821 49 model.scoring_fct_norm 2.0 +821 49 optimizer.lr 0.05641083939536525 +821 49 training.batch_size 0.0 +821 49 training.label_smoothing 0.01614252465215457 +821 50 model.embedding_dim 0.0 +821 50 model.scoring_fct_norm 2.0 +821 50 optimizer.lr 0.010665374219856479 +821 50 training.batch_size 0.0 +821 50 training.label_smoothing 0.0037890390733362643 +821 51 model.embedding_dim 0.0 +821 51 model.scoring_fct_norm 1.0 +821 51 optimizer.lr 0.005986007810333423 +821 51 training.batch_size 2.0 +821 51 training.label_smoothing 0.14371830323271595 +821 52 model.embedding_dim 0.0 +821 52 model.scoring_fct_norm 2.0 +821 52 optimizer.lr 0.01602652608463798 +821 52 training.batch_size 2.0 +821 52 training.label_smoothing 0.0029665614627273347 +821 53 model.embedding_dim 0.0 +821 53 model.scoring_fct_norm 2.0 +821 53 optimizer.lr 0.02051747823109389 +821 53 training.batch_size 1.0 +821 53 training.label_smoothing 0.0072049292162055375 +821 54 model.embedding_dim 2.0 +821 54 model.scoring_fct_norm 1.0 +821 54 optimizer.lr 0.021170645382114443 +821 54 training.batch_size 2.0 +821 54 training.label_smoothing 0.0010595014412930273 +821 55 model.embedding_dim 1.0 +821 55 model.scoring_fct_norm 2.0 +821 55 optimizer.lr 0.01783197920686352 +821 55 training.batch_size 1.0 +821 55 training.label_smoothing 0.6947232808668165 +821 56 model.embedding_dim 0.0 +821 56 model.scoring_fct_norm 1.0 +821 56 optimizer.lr 0.0031120724417658613 +821 56 training.batch_size 1.0 +821 56 training.label_smoothing 0.003260379599466102 +821 57 model.embedding_dim 0.0 +821 57 model.scoring_fct_norm 1.0 +821 57 optimizer.lr 0.007405241735371595 +821 57 training.batch_size 0.0 +821 57 training.label_smoothing 0.006469501442298945 +821 58 model.embedding_dim 2.0 +821 58 model.scoring_fct_norm 2.0 +821 58 optimizer.lr 0.0013479649210332012 +821 58 training.batch_size 0.0 +821 58 training.label_smoothing 0.20660862940338648 +821 59 model.embedding_dim 2.0 +821 59 model.scoring_fct_norm 1.0 +821 59 optimizer.lr 0.014754141711541246 +821 59 training.batch_size 0.0 +821 59 training.label_smoothing 0.0037784931763500196 +821 60 model.embedding_dim 0.0 +821 60 model.scoring_fct_norm 1.0 +821 60 optimizer.lr 0.010569357199163394 +821 60 training.batch_size 2.0 +821 60 training.label_smoothing 0.4559878184897412 +821 61 model.embedding_dim 0.0 +821 61 model.scoring_fct_norm 2.0 +821 61 optimizer.lr 0.00313178216539631 +821 61 training.batch_size 1.0 +821 61 training.label_smoothing 0.0023633736932355564 +821 62 model.embedding_dim 1.0 +821 62 model.scoring_fct_norm 2.0 +821 62 optimizer.lr 0.002932079755626482 +821 62 training.batch_size 1.0 +821 62 training.label_smoothing 0.018425286183385053 +821 63 model.embedding_dim 1.0 +821 63 model.scoring_fct_norm 1.0 +821 63 optimizer.lr 0.002231580665318212 +821 63 training.batch_size 2.0 +821 63 training.label_smoothing 0.11042147132787426 +821 64 model.embedding_dim 2.0 +821 64 model.scoring_fct_norm 2.0 +821 64 optimizer.lr 0.03844707641096993 +821 64 training.batch_size 1.0 +821 64 training.label_smoothing 0.010576547870629576 +821 65 model.embedding_dim 1.0 +821 65 model.scoring_fct_norm 2.0 +821 65 optimizer.lr 0.004621954186757808 +821 65 training.batch_size 2.0 +821 65 training.label_smoothing 0.008627310226559727 +821 66 model.embedding_dim 0.0 +821 66 model.scoring_fct_norm 1.0 +821 66 optimizer.lr 0.010901613427183165 +821 66 training.batch_size 1.0 +821 66 training.label_smoothing 0.009414613117728347 +821 67 model.embedding_dim 2.0 +821 67 model.scoring_fct_norm 2.0 +821 67 optimizer.lr 0.005842402247098315 +821 67 training.batch_size 1.0 +821 67 training.label_smoothing 0.12426054105008003 +821 68 model.embedding_dim 2.0 +821 68 model.scoring_fct_norm 1.0 +821 68 optimizer.lr 0.08653077346453443 +821 68 training.batch_size 2.0 +821 68 training.label_smoothing 0.08322191281478626 +821 69 model.embedding_dim 2.0 +821 69 model.scoring_fct_norm 1.0 +821 69 optimizer.lr 0.08644695116664154 +821 69 training.batch_size 1.0 +821 69 training.label_smoothing 0.05627831833554762 +821 70 model.embedding_dim 0.0 +821 70 model.scoring_fct_norm 2.0 +821 70 optimizer.lr 0.00848818551261338 +821 70 training.batch_size 1.0 +821 70 training.label_smoothing 0.007476939987025319 +821 71 model.embedding_dim 1.0 +821 71 model.scoring_fct_norm 2.0 +821 71 optimizer.lr 0.004643066560464243 +821 71 training.batch_size 1.0 +821 71 training.label_smoothing 0.9922514472836814 +821 72 model.embedding_dim 1.0 +821 72 model.scoring_fct_norm 2.0 +821 72 optimizer.lr 0.04685913563414697 +821 72 training.batch_size 2.0 +821 72 training.label_smoothing 0.9122289407969214 +821 73 model.embedding_dim 0.0 +821 73 model.scoring_fct_norm 1.0 +821 73 optimizer.lr 0.08165745807759682 +821 73 training.batch_size 0.0 +821 73 training.label_smoothing 0.14049393793427806 +821 74 model.embedding_dim 2.0 +821 74 model.scoring_fct_norm 1.0 +821 74 optimizer.lr 0.002831484184161482 +821 74 training.batch_size 1.0 +821 74 training.label_smoothing 0.6239792868272338 +821 75 model.embedding_dim 0.0 +821 75 model.scoring_fct_norm 1.0 +821 75 optimizer.lr 0.03682278233175172 +821 75 training.batch_size 1.0 +821 75 training.label_smoothing 0.029021737410668564 +821 76 model.embedding_dim 0.0 +821 76 model.scoring_fct_norm 2.0 +821 76 optimizer.lr 0.014576254832757547 +821 76 training.batch_size 0.0 +821 76 training.label_smoothing 0.007249589284435007 +821 77 model.embedding_dim 0.0 +821 77 model.scoring_fct_norm 2.0 +821 77 optimizer.lr 0.0024559636281605386 +821 77 training.batch_size 0.0 +821 77 training.label_smoothing 0.12963461584275457 +821 78 model.embedding_dim 1.0 +821 78 model.scoring_fct_norm 2.0 +821 78 optimizer.lr 0.00444859156838953 +821 78 training.batch_size 2.0 +821 78 training.label_smoothing 0.0010584774931727543 +821 79 model.embedding_dim 2.0 +821 79 model.scoring_fct_norm 1.0 +821 79 optimizer.lr 0.013026616767375997 +821 79 training.batch_size 2.0 +821 79 training.label_smoothing 0.012310234483624269 +821 80 model.embedding_dim 2.0 +821 80 model.scoring_fct_norm 2.0 +821 80 optimizer.lr 0.009772796133491354 +821 80 training.batch_size 2.0 +821 80 training.label_smoothing 0.001067830876319504 +821 81 model.embedding_dim 2.0 +821 81 model.scoring_fct_norm 1.0 +821 81 optimizer.lr 0.0379955732739335 +821 81 training.batch_size 0.0 +821 81 training.label_smoothing 0.1447355079382325 +821 82 model.embedding_dim 2.0 +821 82 model.scoring_fct_norm 1.0 +821 82 optimizer.lr 0.0069732318153379585 +821 82 training.batch_size 0.0 +821 82 training.label_smoothing 0.029739238528160756 +821 83 model.embedding_dim 0.0 +821 83 model.scoring_fct_norm 2.0 +821 83 optimizer.lr 0.0029046416401287568 +821 83 training.batch_size 2.0 +821 83 training.label_smoothing 0.0012661180340928768 +821 84 model.embedding_dim 0.0 +821 84 model.scoring_fct_norm 1.0 +821 84 optimizer.lr 0.0022074501202450185 +821 84 training.batch_size 1.0 +821 84 training.label_smoothing 0.22055245482446484 +821 85 model.embedding_dim 2.0 +821 85 model.scoring_fct_norm 1.0 +821 85 optimizer.lr 0.0012661416466391545 +821 85 training.batch_size 1.0 +821 85 training.label_smoothing 0.00993827534907081 +821 86 model.embedding_dim 0.0 +821 86 model.scoring_fct_norm 1.0 +821 86 optimizer.lr 0.005861498253285722 +821 86 training.batch_size 2.0 +821 86 training.label_smoothing 0.5071275442889901 +821 87 model.embedding_dim 1.0 +821 87 model.scoring_fct_norm 1.0 +821 87 optimizer.lr 0.07464799697972897 +821 87 training.batch_size 2.0 +821 87 training.label_smoothing 0.005646831171995427 +821 88 model.embedding_dim 2.0 +821 88 model.scoring_fct_norm 1.0 +821 88 optimizer.lr 0.07999809146606664 +821 88 training.batch_size 2.0 +821 88 training.label_smoothing 0.021327092354017602 +821 89 model.embedding_dim 2.0 +821 89 model.scoring_fct_norm 2.0 +821 89 optimizer.lr 0.003739912875156124 +821 89 training.batch_size 2.0 +821 89 training.label_smoothing 0.9543918152256461 +821 90 model.embedding_dim 0.0 +821 90 model.scoring_fct_norm 1.0 +821 90 optimizer.lr 0.0011936635911109512 +821 90 training.batch_size 2.0 +821 90 training.label_smoothing 0.5504121320198421 +821 91 model.embedding_dim 0.0 +821 91 model.scoring_fct_norm 2.0 +821 91 optimizer.lr 0.0011380728821057367 +821 91 training.batch_size 1.0 +821 91 training.label_smoothing 0.5123024505851788 +821 92 model.embedding_dim 1.0 +821 92 model.scoring_fct_norm 1.0 +821 92 optimizer.lr 0.009195737171541509 +821 92 training.batch_size 2.0 +821 92 training.label_smoothing 0.0029888257655306433 +821 93 model.embedding_dim 2.0 +821 93 model.scoring_fct_norm 1.0 +821 93 optimizer.lr 0.029813310987973688 +821 93 training.batch_size 1.0 +821 93 training.label_smoothing 0.05474165751972059 +821 94 model.embedding_dim 1.0 +821 94 model.scoring_fct_norm 2.0 +821 94 optimizer.lr 0.03968476896437424 +821 94 training.batch_size 2.0 +821 94 training.label_smoothing 0.003691015211046882 +821 95 model.embedding_dim 1.0 +821 95 model.scoring_fct_norm 2.0 +821 95 optimizer.lr 0.0057076646651218355 +821 95 training.batch_size 1.0 +821 95 training.label_smoothing 0.1092793170212815 +821 96 model.embedding_dim 0.0 +821 96 model.scoring_fct_norm 2.0 +821 96 optimizer.lr 0.007065175780973341 +821 96 training.batch_size 2.0 +821 96 training.label_smoothing 0.003006379945146034 +821 97 model.embedding_dim 0.0 +821 97 model.scoring_fct_norm 1.0 +821 97 optimizer.lr 0.017320202959305718 +821 97 training.batch_size 1.0 +821 97 training.label_smoothing 0.004815278775797705 +821 98 model.embedding_dim 1.0 +821 98 model.scoring_fct_norm 2.0 +821 98 optimizer.lr 0.004801720277068967 +821 98 training.batch_size 0.0 +821 98 training.label_smoothing 0.0015543378103858626 +821 99 model.embedding_dim 0.0 +821 99 model.scoring_fct_norm 2.0 +821 99 optimizer.lr 0.07559921015489156 +821 99 training.batch_size 2.0 +821 99 training.label_smoothing 0.22380946656268297 +821 100 model.embedding_dim 2.0 +821 100 model.scoring_fct_norm 2.0 +821 100 optimizer.lr 0.08956977931904594 +821 100 training.batch_size 2.0 +821 100 training.label_smoothing 0.19097168585024954 +821 1 dataset """kinships""" +821 1 model """transe""" +821 1 loss """bceaftersigmoid""" +821 1 regularizer """no""" +821 1 optimizer """adam""" +821 1 training_loop """lcwa""" +821 1 evaluator """rankbased""" +821 2 dataset """kinships""" +821 2 model """transe""" +821 2 loss """bceaftersigmoid""" +821 2 regularizer """no""" +821 2 optimizer """adam""" +821 2 training_loop """lcwa""" +821 2 evaluator """rankbased""" +821 3 dataset """kinships""" +821 3 model """transe""" +821 3 loss """bceaftersigmoid""" +821 3 regularizer """no""" +821 3 optimizer """adam""" +821 3 training_loop """lcwa""" +821 3 evaluator """rankbased""" +821 4 dataset """kinships""" +821 4 model """transe""" +821 4 loss """bceaftersigmoid""" +821 4 regularizer """no""" +821 4 optimizer """adam""" +821 4 training_loop """lcwa""" +821 4 evaluator """rankbased""" +821 5 dataset """kinships""" +821 5 model """transe""" +821 5 loss """bceaftersigmoid""" +821 5 regularizer """no""" +821 5 optimizer """adam""" +821 5 training_loop """lcwa""" +821 5 evaluator """rankbased""" +821 6 dataset """kinships""" +821 6 model """transe""" +821 6 loss """bceaftersigmoid""" +821 6 regularizer """no""" +821 6 optimizer """adam""" +821 6 training_loop """lcwa""" +821 6 evaluator """rankbased""" +821 7 dataset """kinships""" +821 7 model """transe""" +821 7 loss """bceaftersigmoid""" +821 7 regularizer """no""" +821 7 optimizer """adam""" +821 7 training_loop """lcwa""" +821 7 evaluator """rankbased""" +821 8 dataset """kinships""" +821 8 model """transe""" +821 8 loss """bceaftersigmoid""" +821 8 regularizer """no""" +821 8 optimizer """adam""" +821 8 training_loop """lcwa""" +821 8 evaluator """rankbased""" +821 9 dataset """kinships""" +821 9 model """transe""" +821 9 loss """bceaftersigmoid""" +821 9 regularizer """no""" +821 9 optimizer """adam""" +821 9 training_loop """lcwa""" +821 9 evaluator """rankbased""" +821 10 dataset """kinships""" +821 10 model """transe""" +821 10 loss """bceaftersigmoid""" +821 10 regularizer """no""" +821 10 optimizer """adam""" +821 10 training_loop """lcwa""" +821 10 evaluator """rankbased""" +821 11 dataset """kinships""" +821 11 model """transe""" +821 11 loss """bceaftersigmoid""" +821 11 regularizer """no""" +821 11 optimizer """adam""" +821 11 training_loop """lcwa""" +821 11 evaluator """rankbased""" +821 12 dataset """kinships""" +821 12 model """transe""" +821 12 loss """bceaftersigmoid""" +821 12 regularizer """no""" +821 12 optimizer """adam""" +821 12 training_loop """lcwa""" +821 12 evaluator """rankbased""" +821 13 dataset """kinships""" +821 13 model """transe""" +821 13 loss """bceaftersigmoid""" +821 13 regularizer """no""" +821 13 optimizer """adam""" +821 13 training_loop """lcwa""" +821 13 evaluator """rankbased""" +821 14 dataset """kinships""" +821 14 model """transe""" +821 14 loss """bceaftersigmoid""" +821 14 regularizer """no""" +821 14 optimizer """adam""" +821 14 training_loop """lcwa""" +821 14 evaluator """rankbased""" +821 15 dataset """kinships""" +821 15 model """transe""" +821 15 loss """bceaftersigmoid""" +821 15 regularizer """no""" +821 15 optimizer """adam""" +821 15 training_loop """lcwa""" +821 15 evaluator """rankbased""" +821 16 dataset """kinships""" +821 16 model """transe""" +821 16 loss """bceaftersigmoid""" +821 16 regularizer """no""" +821 16 optimizer """adam""" +821 16 training_loop """lcwa""" +821 16 evaluator """rankbased""" +821 17 dataset """kinships""" +821 17 model """transe""" +821 17 loss """bceaftersigmoid""" +821 17 regularizer """no""" +821 17 optimizer """adam""" +821 17 training_loop """lcwa""" +821 17 evaluator """rankbased""" +821 18 dataset """kinships""" +821 18 model """transe""" +821 18 loss """bceaftersigmoid""" +821 18 regularizer """no""" +821 18 optimizer """adam""" +821 18 training_loop """lcwa""" +821 18 evaluator """rankbased""" +821 19 dataset """kinships""" +821 19 model """transe""" +821 19 loss """bceaftersigmoid""" +821 19 regularizer """no""" +821 19 optimizer """adam""" +821 19 training_loop """lcwa""" +821 19 evaluator """rankbased""" +821 20 dataset """kinships""" +821 20 model """transe""" +821 20 loss """bceaftersigmoid""" +821 20 regularizer """no""" +821 20 optimizer """adam""" +821 20 training_loop """lcwa""" +821 20 evaluator """rankbased""" +821 21 dataset """kinships""" +821 21 model """transe""" +821 21 loss """bceaftersigmoid""" +821 21 regularizer """no""" +821 21 optimizer """adam""" +821 21 training_loop """lcwa""" +821 21 evaluator """rankbased""" +821 22 dataset """kinships""" +821 22 model """transe""" +821 22 loss """bceaftersigmoid""" +821 22 regularizer """no""" +821 22 optimizer """adam""" +821 22 training_loop """lcwa""" +821 22 evaluator """rankbased""" +821 23 dataset """kinships""" +821 23 model """transe""" +821 23 loss """bceaftersigmoid""" +821 23 regularizer """no""" +821 23 optimizer """adam""" +821 23 training_loop """lcwa""" +821 23 evaluator """rankbased""" +821 24 dataset """kinships""" +821 24 model """transe""" +821 24 loss """bceaftersigmoid""" +821 24 regularizer """no""" +821 24 optimizer """adam""" +821 24 training_loop """lcwa""" +821 24 evaluator """rankbased""" +821 25 dataset """kinships""" +821 25 model """transe""" +821 25 loss """bceaftersigmoid""" +821 25 regularizer """no""" +821 25 optimizer """adam""" +821 25 training_loop """lcwa""" +821 25 evaluator """rankbased""" +821 26 dataset """kinships""" +821 26 model """transe""" +821 26 loss """bceaftersigmoid""" +821 26 regularizer """no""" +821 26 optimizer """adam""" +821 26 training_loop """lcwa""" +821 26 evaluator """rankbased""" +821 27 dataset """kinships""" +821 27 model """transe""" +821 27 loss """bceaftersigmoid""" +821 27 regularizer """no""" +821 27 optimizer """adam""" +821 27 training_loop """lcwa""" +821 27 evaluator """rankbased""" +821 28 dataset """kinships""" +821 28 model """transe""" +821 28 loss """bceaftersigmoid""" +821 28 regularizer """no""" +821 28 optimizer """adam""" +821 28 training_loop """lcwa""" +821 28 evaluator """rankbased""" +821 29 dataset """kinships""" +821 29 model """transe""" +821 29 loss """bceaftersigmoid""" +821 29 regularizer """no""" +821 29 optimizer """adam""" +821 29 training_loop """lcwa""" +821 29 evaluator """rankbased""" +821 30 dataset """kinships""" +821 30 model """transe""" +821 30 loss """bceaftersigmoid""" +821 30 regularizer """no""" +821 30 optimizer """adam""" +821 30 training_loop """lcwa""" +821 30 evaluator """rankbased""" +821 31 dataset """kinships""" +821 31 model """transe""" +821 31 loss """bceaftersigmoid""" +821 31 regularizer """no""" +821 31 optimizer """adam""" +821 31 training_loop """lcwa""" +821 31 evaluator """rankbased""" +821 32 dataset """kinships""" +821 32 model """transe""" +821 32 loss """bceaftersigmoid""" +821 32 regularizer """no""" +821 32 optimizer """adam""" +821 32 training_loop """lcwa""" +821 32 evaluator """rankbased""" +821 33 dataset """kinships""" +821 33 model """transe""" +821 33 loss """bceaftersigmoid""" +821 33 regularizer """no""" +821 33 optimizer """adam""" +821 33 training_loop """lcwa""" +821 33 evaluator """rankbased""" +821 34 dataset """kinships""" +821 34 model """transe""" +821 34 loss """bceaftersigmoid""" +821 34 regularizer """no""" +821 34 optimizer """adam""" +821 34 training_loop """lcwa""" +821 34 evaluator """rankbased""" +821 35 dataset """kinships""" +821 35 model """transe""" +821 35 loss """bceaftersigmoid""" +821 35 regularizer """no""" +821 35 optimizer """adam""" +821 35 training_loop """lcwa""" +821 35 evaluator """rankbased""" +821 36 dataset """kinships""" +821 36 model """transe""" +821 36 loss """bceaftersigmoid""" +821 36 regularizer """no""" +821 36 optimizer """adam""" +821 36 training_loop """lcwa""" +821 36 evaluator """rankbased""" +821 37 dataset """kinships""" +821 37 model """transe""" +821 37 loss """bceaftersigmoid""" +821 37 regularizer """no""" +821 37 optimizer """adam""" +821 37 training_loop """lcwa""" +821 37 evaluator """rankbased""" +821 38 dataset """kinships""" +821 38 model """transe""" +821 38 loss """bceaftersigmoid""" +821 38 regularizer """no""" +821 38 optimizer """adam""" +821 38 training_loop """lcwa""" +821 38 evaluator """rankbased""" +821 39 dataset """kinships""" +821 39 model """transe""" +821 39 loss """bceaftersigmoid""" +821 39 regularizer """no""" +821 39 optimizer """adam""" +821 39 training_loop """lcwa""" +821 39 evaluator """rankbased""" +821 40 dataset """kinships""" +821 40 model """transe""" +821 40 loss """bceaftersigmoid""" +821 40 regularizer """no""" +821 40 optimizer """adam""" +821 40 training_loop """lcwa""" +821 40 evaluator """rankbased""" +821 41 dataset """kinships""" +821 41 model """transe""" +821 41 loss """bceaftersigmoid""" +821 41 regularizer """no""" +821 41 optimizer """adam""" +821 41 training_loop """lcwa""" +821 41 evaluator """rankbased""" +821 42 dataset """kinships""" +821 42 model """transe""" +821 42 loss """bceaftersigmoid""" +821 42 regularizer """no""" +821 42 optimizer """adam""" +821 42 training_loop """lcwa""" +821 42 evaluator """rankbased""" +821 43 dataset """kinships""" +821 43 model """transe""" +821 43 loss """bceaftersigmoid""" +821 43 regularizer """no""" +821 43 optimizer """adam""" +821 43 training_loop """lcwa""" +821 43 evaluator """rankbased""" +821 44 dataset """kinships""" +821 44 model """transe""" +821 44 loss """bceaftersigmoid""" +821 44 regularizer """no""" +821 44 optimizer """adam""" +821 44 training_loop """lcwa""" +821 44 evaluator """rankbased""" +821 45 dataset """kinships""" +821 45 model """transe""" +821 45 loss """bceaftersigmoid""" +821 45 regularizer """no""" +821 45 optimizer """adam""" +821 45 training_loop """lcwa""" +821 45 evaluator """rankbased""" +821 46 dataset """kinships""" +821 46 model """transe""" +821 46 loss """bceaftersigmoid""" +821 46 regularizer """no""" +821 46 optimizer """adam""" +821 46 training_loop """lcwa""" +821 46 evaluator """rankbased""" +821 47 dataset """kinships""" +821 47 model """transe""" +821 47 loss """bceaftersigmoid""" +821 47 regularizer """no""" +821 47 optimizer """adam""" +821 47 training_loop """lcwa""" +821 47 evaluator """rankbased""" +821 48 dataset """kinships""" +821 48 model """transe""" +821 48 loss """bceaftersigmoid""" +821 48 regularizer """no""" +821 48 optimizer """adam""" +821 48 training_loop """lcwa""" +821 48 evaluator """rankbased""" +821 49 dataset """kinships""" +821 49 model """transe""" +821 49 loss """bceaftersigmoid""" +821 49 regularizer """no""" +821 49 optimizer """adam""" +821 49 training_loop """lcwa""" +821 49 evaluator """rankbased""" +821 50 dataset """kinships""" +821 50 model """transe""" +821 50 loss """bceaftersigmoid""" +821 50 regularizer """no""" +821 50 optimizer """adam""" +821 50 training_loop """lcwa""" +821 50 evaluator """rankbased""" +821 51 dataset """kinships""" +821 51 model """transe""" +821 51 loss """bceaftersigmoid""" +821 51 regularizer """no""" +821 51 optimizer """adam""" +821 51 training_loop """lcwa""" +821 51 evaluator """rankbased""" +821 52 dataset """kinships""" +821 52 model """transe""" +821 52 loss """bceaftersigmoid""" +821 52 regularizer """no""" +821 52 optimizer """adam""" +821 52 training_loop """lcwa""" +821 52 evaluator """rankbased""" +821 53 dataset """kinships""" +821 53 model """transe""" +821 53 loss """bceaftersigmoid""" +821 53 regularizer """no""" +821 53 optimizer """adam""" +821 53 training_loop """lcwa""" +821 53 evaluator """rankbased""" +821 54 dataset """kinships""" +821 54 model """transe""" +821 54 loss """bceaftersigmoid""" +821 54 regularizer """no""" +821 54 optimizer """adam""" +821 54 training_loop """lcwa""" +821 54 evaluator """rankbased""" +821 55 dataset """kinships""" +821 55 model """transe""" +821 55 loss """bceaftersigmoid""" +821 55 regularizer """no""" +821 55 optimizer """adam""" +821 55 training_loop """lcwa""" +821 55 evaluator """rankbased""" +821 56 dataset """kinships""" +821 56 model """transe""" +821 56 loss """bceaftersigmoid""" +821 56 regularizer """no""" +821 56 optimizer """adam""" +821 56 training_loop """lcwa""" +821 56 evaluator """rankbased""" +821 57 dataset """kinships""" +821 57 model """transe""" +821 57 loss """bceaftersigmoid""" +821 57 regularizer """no""" +821 57 optimizer """adam""" +821 57 training_loop """lcwa""" +821 57 evaluator """rankbased""" +821 58 dataset """kinships""" +821 58 model """transe""" +821 58 loss """bceaftersigmoid""" +821 58 regularizer """no""" +821 58 optimizer """adam""" +821 58 training_loop """lcwa""" +821 58 evaluator """rankbased""" +821 59 dataset """kinships""" +821 59 model """transe""" +821 59 loss """bceaftersigmoid""" +821 59 regularizer """no""" +821 59 optimizer """adam""" +821 59 training_loop """lcwa""" +821 59 evaluator """rankbased""" +821 60 dataset """kinships""" +821 60 model """transe""" +821 60 loss """bceaftersigmoid""" +821 60 regularizer """no""" +821 60 optimizer """adam""" +821 60 training_loop """lcwa""" +821 60 evaluator """rankbased""" +821 61 dataset """kinships""" +821 61 model """transe""" +821 61 loss """bceaftersigmoid""" +821 61 regularizer """no""" +821 61 optimizer """adam""" +821 61 training_loop """lcwa""" +821 61 evaluator """rankbased""" +821 62 dataset """kinships""" +821 62 model """transe""" +821 62 loss """bceaftersigmoid""" +821 62 regularizer """no""" +821 62 optimizer """adam""" +821 62 training_loop """lcwa""" +821 62 evaluator """rankbased""" +821 63 dataset """kinships""" +821 63 model """transe""" +821 63 loss """bceaftersigmoid""" +821 63 regularizer """no""" +821 63 optimizer """adam""" +821 63 training_loop """lcwa""" +821 63 evaluator """rankbased""" +821 64 dataset """kinships""" +821 64 model """transe""" +821 64 loss """bceaftersigmoid""" +821 64 regularizer """no""" +821 64 optimizer """adam""" +821 64 training_loop """lcwa""" +821 64 evaluator """rankbased""" +821 65 dataset """kinships""" +821 65 model """transe""" +821 65 loss """bceaftersigmoid""" +821 65 regularizer """no""" +821 65 optimizer """adam""" +821 65 training_loop """lcwa""" +821 65 evaluator """rankbased""" +821 66 dataset """kinships""" +821 66 model """transe""" +821 66 loss """bceaftersigmoid""" +821 66 regularizer """no""" +821 66 optimizer """adam""" +821 66 training_loop """lcwa""" +821 66 evaluator """rankbased""" +821 67 dataset """kinships""" +821 67 model """transe""" +821 67 loss """bceaftersigmoid""" +821 67 regularizer """no""" +821 67 optimizer """adam""" +821 67 training_loop """lcwa""" +821 67 evaluator """rankbased""" +821 68 dataset """kinships""" +821 68 model """transe""" +821 68 loss """bceaftersigmoid""" +821 68 regularizer """no""" +821 68 optimizer """adam""" +821 68 training_loop """lcwa""" +821 68 evaluator """rankbased""" +821 69 dataset """kinships""" +821 69 model """transe""" +821 69 loss """bceaftersigmoid""" +821 69 regularizer """no""" +821 69 optimizer """adam""" +821 69 training_loop """lcwa""" +821 69 evaluator """rankbased""" +821 70 dataset """kinships""" +821 70 model """transe""" +821 70 loss """bceaftersigmoid""" +821 70 regularizer """no""" +821 70 optimizer """adam""" +821 70 training_loop """lcwa""" +821 70 evaluator """rankbased""" +821 71 dataset """kinships""" +821 71 model """transe""" +821 71 loss """bceaftersigmoid""" +821 71 regularizer """no""" +821 71 optimizer """adam""" +821 71 training_loop """lcwa""" +821 71 evaluator """rankbased""" +821 72 dataset """kinships""" +821 72 model """transe""" +821 72 loss """bceaftersigmoid""" +821 72 regularizer """no""" +821 72 optimizer """adam""" +821 72 training_loop """lcwa""" +821 72 evaluator """rankbased""" +821 73 dataset """kinships""" +821 73 model """transe""" +821 73 loss """bceaftersigmoid""" +821 73 regularizer """no""" +821 73 optimizer """adam""" +821 73 training_loop """lcwa""" +821 73 evaluator """rankbased""" +821 74 dataset """kinships""" +821 74 model """transe""" +821 74 loss """bceaftersigmoid""" +821 74 regularizer """no""" +821 74 optimizer """adam""" +821 74 training_loop """lcwa""" +821 74 evaluator """rankbased""" +821 75 dataset """kinships""" +821 75 model """transe""" +821 75 loss """bceaftersigmoid""" +821 75 regularizer """no""" +821 75 optimizer """adam""" +821 75 training_loop """lcwa""" +821 75 evaluator """rankbased""" +821 76 dataset """kinships""" +821 76 model """transe""" +821 76 loss """bceaftersigmoid""" +821 76 regularizer """no""" +821 76 optimizer """adam""" +821 76 training_loop """lcwa""" +821 76 evaluator """rankbased""" +821 77 dataset """kinships""" +821 77 model """transe""" +821 77 loss """bceaftersigmoid""" +821 77 regularizer """no""" +821 77 optimizer """adam""" +821 77 training_loop """lcwa""" +821 77 evaluator """rankbased""" +821 78 dataset """kinships""" +821 78 model """transe""" +821 78 loss """bceaftersigmoid""" +821 78 regularizer """no""" +821 78 optimizer """adam""" +821 78 training_loop """lcwa""" +821 78 evaluator """rankbased""" +821 79 dataset """kinships""" +821 79 model """transe""" +821 79 loss """bceaftersigmoid""" +821 79 regularizer """no""" +821 79 optimizer """adam""" +821 79 training_loop """lcwa""" +821 79 evaluator """rankbased""" +821 80 dataset """kinships""" +821 80 model """transe""" +821 80 loss """bceaftersigmoid""" +821 80 regularizer """no""" +821 80 optimizer """adam""" +821 80 training_loop """lcwa""" +821 80 evaluator """rankbased""" +821 81 dataset """kinships""" +821 81 model """transe""" +821 81 loss """bceaftersigmoid""" +821 81 regularizer """no""" +821 81 optimizer """adam""" +821 81 training_loop """lcwa""" +821 81 evaluator """rankbased""" +821 82 dataset """kinships""" +821 82 model """transe""" +821 82 loss """bceaftersigmoid""" +821 82 regularizer """no""" +821 82 optimizer """adam""" +821 82 training_loop """lcwa""" +821 82 evaluator """rankbased""" +821 83 dataset """kinships""" +821 83 model """transe""" +821 83 loss """bceaftersigmoid""" +821 83 regularizer """no""" +821 83 optimizer """adam""" +821 83 training_loop """lcwa""" +821 83 evaluator """rankbased""" +821 84 dataset """kinships""" +821 84 model """transe""" +821 84 loss """bceaftersigmoid""" +821 84 regularizer """no""" +821 84 optimizer """adam""" +821 84 training_loop """lcwa""" +821 84 evaluator """rankbased""" +821 85 dataset """kinships""" +821 85 model """transe""" +821 85 loss """bceaftersigmoid""" +821 85 regularizer """no""" +821 85 optimizer """adam""" +821 85 training_loop """lcwa""" +821 85 evaluator """rankbased""" +821 86 dataset """kinships""" +821 86 model """transe""" +821 86 loss """bceaftersigmoid""" +821 86 regularizer """no""" +821 86 optimizer """adam""" +821 86 training_loop """lcwa""" +821 86 evaluator """rankbased""" +821 87 dataset """kinships""" +821 87 model """transe""" +821 87 loss """bceaftersigmoid""" +821 87 regularizer """no""" +821 87 optimizer """adam""" +821 87 training_loop """lcwa""" +821 87 evaluator """rankbased""" +821 88 dataset """kinships""" +821 88 model """transe""" +821 88 loss """bceaftersigmoid""" +821 88 regularizer """no""" +821 88 optimizer """adam""" +821 88 training_loop """lcwa""" +821 88 evaluator """rankbased""" +821 89 dataset """kinships""" +821 89 model """transe""" +821 89 loss """bceaftersigmoid""" +821 89 regularizer """no""" +821 89 optimizer """adam""" +821 89 training_loop """lcwa""" +821 89 evaluator """rankbased""" +821 90 dataset """kinships""" +821 90 model """transe""" +821 90 loss """bceaftersigmoid""" +821 90 regularizer """no""" +821 90 optimizer """adam""" +821 90 training_loop """lcwa""" +821 90 evaluator """rankbased""" +821 91 dataset """kinships""" +821 91 model """transe""" +821 91 loss """bceaftersigmoid""" +821 91 regularizer """no""" +821 91 optimizer """adam""" +821 91 training_loop """lcwa""" +821 91 evaluator """rankbased""" +821 92 dataset """kinships""" +821 92 model """transe""" +821 92 loss """bceaftersigmoid""" +821 92 regularizer """no""" +821 92 optimizer """adam""" +821 92 training_loop """lcwa""" +821 92 evaluator """rankbased""" +821 93 dataset """kinships""" +821 93 model """transe""" +821 93 loss """bceaftersigmoid""" +821 93 regularizer """no""" +821 93 optimizer """adam""" +821 93 training_loop """lcwa""" +821 93 evaluator """rankbased""" +821 94 dataset """kinships""" +821 94 model """transe""" +821 94 loss """bceaftersigmoid""" +821 94 regularizer """no""" +821 94 optimizer """adam""" +821 94 training_loop """lcwa""" +821 94 evaluator """rankbased""" +821 95 dataset """kinships""" +821 95 model """transe""" +821 95 loss """bceaftersigmoid""" +821 95 regularizer """no""" +821 95 optimizer """adam""" +821 95 training_loop """lcwa""" +821 95 evaluator """rankbased""" +821 96 dataset """kinships""" +821 96 model """transe""" +821 96 loss """bceaftersigmoid""" +821 96 regularizer """no""" +821 96 optimizer """adam""" +821 96 training_loop """lcwa""" +821 96 evaluator """rankbased""" +821 97 dataset """kinships""" +821 97 model """transe""" +821 97 loss """bceaftersigmoid""" +821 97 regularizer """no""" +821 97 optimizer """adam""" +821 97 training_loop """lcwa""" +821 97 evaluator """rankbased""" +821 98 dataset """kinships""" +821 98 model """transe""" +821 98 loss """bceaftersigmoid""" +821 98 regularizer """no""" +821 98 optimizer """adam""" +821 98 training_loop """lcwa""" +821 98 evaluator """rankbased""" +821 99 dataset """kinships""" +821 99 model """transe""" +821 99 loss """bceaftersigmoid""" +821 99 regularizer """no""" +821 99 optimizer """adam""" +821 99 training_loop """lcwa""" +821 99 evaluator """rankbased""" +821 100 dataset """kinships""" +821 100 model """transe""" +821 100 loss """bceaftersigmoid""" +821 100 regularizer """no""" +821 100 optimizer """adam""" +821 100 training_loop """lcwa""" +821 100 evaluator """rankbased""" +822 1 model.embedding_dim 0.0 +822 1 model.scoring_fct_norm 2.0 +822 1 optimizer.lr 0.009519740112659427 +822 1 training.batch_size 2.0 +822 1 training.label_smoothing 0.004123637851271365 +822 2 model.embedding_dim 2.0 +822 2 model.scoring_fct_norm 2.0 +822 2 optimizer.lr 0.006270595112420692 +822 2 training.batch_size 2.0 +822 2 training.label_smoothing 0.03547890571336227 +822 3 model.embedding_dim 0.0 +822 3 model.scoring_fct_norm 1.0 +822 3 optimizer.lr 0.0036267321988189814 +822 3 training.batch_size 1.0 +822 3 training.label_smoothing 0.0014640056571063797 +822 4 model.embedding_dim 1.0 +822 4 model.scoring_fct_norm 1.0 +822 4 optimizer.lr 0.0034664476618068898 +822 4 training.batch_size 2.0 +822 4 training.label_smoothing 0.011496043681191686 +822 5 model.embedding_dim 2.0 +822 5 model.scoring_fct_norm 2.0 +822 5 optimizer.lr 0.0010237477221721665 +822 5 training.batch_size 0.0 +822 5 training.label_smoothing 0.34698087732345945 +822 6 model.embedding_dim 2.0 +822 6 model.scoring_fct_norm 1.0 +822 6 optimizer.lr 0.0010214019868951124 +822 6 training.batch_size 1.0 +822 6 training.label_smoothing 0.0060771688143995105 +822 7 model.embedding_dim 2.0 +822 7 model.scoring_fct_norm 1.0 +822 7 optimizer.lr 0.025920970924596963 +822 7 training.batch_size 0.0 +822 7 training.label_smoothing 0.9765887347897095 +822 8 model.embedding_dim 1.0 +822 8 model.scoring_fct_norm 1.0 +822 8 optimizer.lr 0.05105163471585991 +822 8 training.batch_size 2.0 +822 8 training.label_smoothing 0.00206539005442051 +822 9 model.embedding_dim 2.0 +822 9 model.scoring_fct_norm 2.0 +822 9 optimizer.lr 0.00967048200703255 +822 9 training.batch_size 0.0 +822 9 training.label_smoothing 0.05650831003464507 +822 10 model.embedding_dim 0.0 +822 10 model.scoring_fct_norm 1.0 +822 10 optimizer.lr 0.00410776403828367 +822 10 training.batch_size 2.0 +822 10 training.label_smoothing 0.017904897001401337 +822 11 model.embedding_dim 2.0 +822 11 model.scoring_fct_norm 2.0 +822 11 optimizer.lr 0.043282632809559324 +822 11 training.batch_size 1.0 +822 11 training.label_smoothing 0.7123915264661984 +822 12 model.embedding_dim 2.0 +822 12 model.scoring_fct_norm 1.0 +822 12 optimizer.lr 0.0038543024607760436 +822 12 training.batch_size 2.0 +822 12 training.label_smoothing 0.18456192955660933 +822 13 model.embedding_dim 1.0 +822 13 model.scoring_fct_norm 1.0 +822 13 optimizer.lr 0.025577086098522356 +822 13 training.batch_size 1.0 +822 13 training.label_smoothing 0.08333731201389702 +822 14 model.embedding_dim 1.0 +822 14 model.scoring_fct_norm 2.0 +822 14 optimizer.lr 0.08188893892350857 +822 14 training.batch_size 2.0 +822 14 training.label_smoothing 0.0353662737161259 +822 15 model.embedding_dim 0.0 +822 15 model.scoring_fct_norm 2.0 +822 15 optimizer.lr 0.03948401992113235 +822 15 training.batch_size 0.0 +822 15 training.label_smoothing 0.11948524913965991 +822 16 model.embedding_dim 2.0 +822 16 model.scoring_fct_norm 2.0 +822 16 optimizer.lr 0.02184735777571683 +822 16 training.batch_size 2.0 +822 16 training.label_smoothing 0.009272532891647674 +822 17 model.embedding_dim 1.0 +822 17 model.scoring_fct_norm 2.0 +822 17 optimizer.lr 0.0401445560024655 +822 17 training.batch_size 2.0 +822 17 training.label_smoothing 0.033542297831262065 +822 18 model.embedding_dim 0.0 +822 18 model.scoring_fct_norm 2.0 +822 18 optimizer.lr 0.001520580693047768 +822 18 training.batch_size 0.0 +822 18 training.label_smoothing 0.022504687723511534 +822 19 model.embedding_dim 2.0 +822 19 model.scoring_fct_norm 1.0 +822 19 optimizer.lr 0.020369488130331414 +822 19 training.batch_size 2.0 +822 19 training.label_smoothing 0.003233951021476599 +822 20 model.embedding_dim 1.0 +822 20 model.scoring_fct_norm 2.0 +822 20 optimizer.lr 0.0023862362861221055 +822 20 training.batch_size 0.0 +822 20 training.label_smoothing 0.025925238627644176 +822 21 model.embedding_dim 0.0 +822 21 model.scoring_fct_norm 1.0 +822 21 optimizer.lr 0.040609279722232064 +822 21 training.batch_size 2.0 +822 21 training.label_smoothing 0.010961919446973499 +822 22 model.embedding_dim 0.0 +822 22 model.scoring_fct_norm 1.0 +822 22 optimizer.lr 0.012195858097232845 +822 22 training.batch_size 0.0 +822 22 training.label_smoothing 0.01550636351077412 +822 23 model.embedding_dim 1.0 +822 23 model.scoring_fct_norm 2.0 +822 23 optimizer.lr 0.002728529877597112 +822 23 training.batch_size 2.0 +822 23 training.label_smoothing 0.03181770623066653 +822 24 model.embedding_dim 2.0 +822 24 model.scoring_fct_norm 1.0 +822 24 optimizer.lr 0.0011300918941011592 +822 24 training.batch_size 0.0 +822 24 training.label_smoothing 0.40789117083925136 +822 25 model.embedding_dim 2.0 +822 25 model.scoring_fct_norm 1.0 +822 25 optimizer.lr 0.01556789793243526 +822 25 training.batch_size 2.0 +822 25 training.label_smoothing 0.010860601917434412 +822 26 model.embedding_dim 2.0 +822 26 model.scoring_fct_norm 1.0 +822 26 optimizer.lr 0.011079167249674537 +822 26 training.batch_size 2.0 +822 26 training.label_smoothing 0.7119878975589526 +822 27 model.embedding_dim 2.0 +822 27 model.scoring_fct_norm 1.0 +822 27 optimizer.lr 0.01720099540193381 +822 27 training.batch_size 0.0 +822 27 training.label_smoothing 0.07573200214067391 +822 28 model.embedding_dim 1.0 +822 28 model.scoring_fct_norm 2.0 +822 28 optimizer.lr 0.019996466411977394 +822 28 training.batch_size 1.0 +822 28 training.label_smoothing 0.09936680435093319 +822 29 model.embedding_dim 2.0 +822 29 model.scoring_fct_norm 1.0 +822 29 optimizer.lr 0.07987354144019923 +822 29 training.batch_size 2.0 +822 29 training.label_smoothing 0.0017496406181436845 +822 30 model.embedding_dim 2.0 +822 30 model.scoring_fct_norm 2.0 +822 30 optimizer.lr 0.001997264801159296 +822 30 training.batch_size 1.0 +822 30 training.label_smoothing 0.010717038559537333 +822 31 model.embedding_dim 0.0 +822 31 model.scoring_fct_norm 2.0 +822 31 optimizer.lr 0.09390092572582494 +822 31 training.batch_size 2.0 +822 31 training.label_smoothing 0.0016590264549635875 +822 32 model.embedding_dim 2.0 +822 32 model.scoring_fct_norm 2.0 +822 32 optimizer.lr 0.04824429834179435 +822 32 training.batch_size 1.0 +822 32 training.label_smoothing 0.24726198731228735 +822 33 model.embedding_dim 1.0 +822 33 model.scoring_fct_norm 1.0 +822 33 optimizer.lr 0.026292922309197164 +822 33 training.batch_size 0.0 +822 33 training.label_smoothing 0.01746912012835988 +822 34 model.embedding_dim 2.0 +822 34 model.scoring_fct_norm 1.0 +822 34 optimizer.lr 0.003855020137655111 +822 34 training.batch_size 1.0 +822 34 training.label_smoothing 0.4833993163984107 +822 35 model.embedding_dim 0.0 +822 35 model.scoring_fct_norm 1.0 +822 35 optimizer.lr 0.0509054240571181 +822 35 training.batch_size 1.0 +822 35 training.label_smoothing 0.012861042341992014 +822 36 model.embedding_dim 2.0 +822 36 model.scoring_fct_norm 2.0 +822 36 optimizer.lr 0.011987471420980686 +822 36 training.batch_size 0.0 +822 36 training.label_smoothing 0.4360988021092578 +822 37 model.embedding_dim 2.0 +822 37 model.scoring_fct_norm 2.0 +822 37 optimizer.lr 0.08045972551782675 +822 37 training.batch_size 0.0 +822 37 training.label_smoothing 0.09694381725128622 +822 38 model.embedding_dim 1.0 +822 38 model.scoring_fct_norm 2.0 +822 38 optimizer.lr 0.017400440768435697 +822 38 training.batch_size 1.0 +822 38 training.label_smoothing 0.28017861442580333 +822 39 model.embedding_dim 0.0 +822 39 model.scoring_fct_norm 2.0 +822 39 optimizer.lr 0.001575283688793392 +822 39 training.batch_size 2.0 +822 39 training.label_smoothing 0.0023217246326363928 +822 40 model.embedding_dim 1.0 +822 40 model.scoring_fct_norm 1.0 +822 40 optimizer.lr 0.0034469892214161655 +822 40 training.batch_size 0.0 +822 40 training.label_smoothing 0.020932022680348657 +822 41 model.embedding_dim 2.0 +822 41 model.scoring_fct_norm 2.0 +822 41 optimizer.lr 0.008082799396090539 +822 41 training.batch_size 2.0 +822 41 training.label_smoothing 0.00842920607830544 +822 42 model.embedding_dim 2.0 +822 42 model.scoring_fct_norm 2.0 +822 42 optimizer.lr 0.0018498947092746333 +822 42 training.batch_size 2.0 +822 42 training.label_smoothing 0.24611007292635229 +822 43 model.embedding_dim 1.0 +822 43 model.scoring_fct_norm 2.0 +822 43 optimizer.lr 0.01646921495768607 +822 43 training.batch_size 0.0 +822 43 training.label_smoothing 0.30197153425741585 +822 44 model.embedding_dim 1.0 +822 44 model.scoring_fct_norm 1.0 +822 44 optimizer.lr 0.05198297091375915 +822 44 training.batch_size 2.0 +822 44 training.label_smoothing 0.22030639864173981 +822 45 model.embedding_dim 0.0 +822 45 model.scoring_fct_norm 2.0 +822 45 optimizer.lr 0.049603584917154296 +822 45 training.batch_size 1.0 +822 45 training.label_smoothing 0.004997053595406376 +822 46 model.embedding_dim 2.0 +822 46 model.scoring_fct_norm 2.0 +822 46 optimizer.lr 0.0010825625722309376 +822 46 training.batch_size 2.0 +822 46 training.label_smoothing 0.017366561778335586 +822 47 model.embedding_dim 1.0 +822 47 model.scoring_fct_norm 2.0 +822 47 optimizer.lr 0.002183529895608278 +822 47 training.batch_size 0.0 +822 47 training.label_smoothing 0.0346207340595523 +822 48 model.embedding_dim 2.0 +822 48 model.scoring_fct_norm 1.0 +822 48 optimizer.lr 0.007834561630613166 +822 48 training.batch_size 2.0 +822 48 training.label_smoothing 0.005129760110343081 +822 49 model.embedding_dim 2.0 +822 49 model.scoring_fct_norm 2.0 +822 49 optimizer.lr 0.014105829279692625 +822 49 training.batch_size 1.0 +822 49 training.label_smoothing 0.0013433421595454398 +822 50 model.embedding_dim 0.0 +822 50 model.scoring_fct_norm 2.0 +822 50 optimizer.lr 0.011446262275311364 +822 50 training.batch_size 2.0 +822 50 training.label_smoothing 0.09301955726269799 +822 51 model.embedding_dim 2.0 +822 51 model.scoring_fct_norm 2.0 +822 51 optimizer.lr 0.05523268424658106 +822 51 training.batch_size 1.0 +822 51 training.label_smoothing 0.023328558303274228 +822 52 model.embedding_dim 2.0 +822 52 model.scoring_fct_norm 1.0 +822 52 optimizer.lr 0.0025932351418952084 +822 52 training.batch_size 0.0 +822 52 training.label_smoothing 0.3397864954911403 +822 53 model.embedding_dim 2.0 +822 53 model.scoring_fct_norm 2.0 +822 53 optimizer.lr 0.00800284712971609 +822 53 training.batch_size 0.0 +822 53 training.label_smoothing 0.3380519455516738 +822 54 model.embedding_dim 2.0 +822 54 model.scoring_fct_norm 2.0 +822 54 optimizer.lr 0.002962695499161179 +822 54 training.batch_size 2.0 +822 54 training.label_smoothing 0.8968021309865748 +822 55 model.embedding_dim 1.0 +822 55 model.scoring_fct_norm 2.0 +822 55 optimizer.lr 0.023000523198640096 +822 55 training.batch_size 0.0 +822 55 training.label_smoothing 0.022501972047102265 +822 56 model.embedding_dim 0.0 +822 56 model.scoring_fct_norm 2.0 +822 56 optimizer.lr 0.0026380924168530368 +822 56 training.batch_size 1.0 +822 56 training.label_smoothing 0.0063951337332105295 +822 57 model.embedding_dim 0.0 +822 57 model.scoring_fct_norm 1.0 +822 57 optimizer.lr 0.0024323759778599463 +822 57 training.batch_size 0.0 +822 57 training.label_smoothing 0.006968413444149799 +822 58 model.embedding_dim 1.0 +822 58 model.scoring_fct_norm 2.0 +822 58 optimizer.lr 0.03259619800003365 +822 58 training.batch_size 1.0 +822 58 training.label_smoothing 0.08956421276580298 +822 59 model.embedding_dim 0.0 +822 59 model.scoring_fct_norm 1.0 +822 59 optimizer.lr 0.002256520961378405 +822 59 training.batch_size 2.0 +822 59 training.label_smoothing 0.005497804889847223 +822 60 model.embedding_dim 2.0 +822 60 model.scoring_fct_norm 1.0 +822 60 optimizer.lr 0.004987624683970636 +822 60 training.batch_size 1.0 +822 60 training.label_smoothing 0.01793834118221138 +822 61 model.embedding_dim 1.0 +822 61 model.scoring_fct_norm 2.0 +822 61 optimizer.lr 0.033990707598483064 +822 61 training.batch_size 1.0 +822 61 training.label_smoothing 0.010679369195672058 +822 62 model.embedding_dim 0.0 +822 62 model.scoring_fct_norm 2.0 +822 62 optimizer.lr 0.0013396329051003648 +822 62 training.batch_size 0.0 +822 62 training.label_smoothing 0.10757746198697875 +822 63 model.embedding_dim 0.0 +822 63 model.scoring_fct_norm 1.0 +822 63 optimizer.lr 0.004427824169568234 +822 63 training.batch_size 2.0 +822 63 training.label_smoothing 0.20624512593564398 +822 64 model.embedding_dim 0.0 +822 64 model.scoring_fct_norm 1.0 +822 64 optimizer.lr 0.013490732298794538 +822 64 training.batch_size 0.0 +822 64 training.label_smoothing 0.05855575824253923 +822 65 model.embedding_dim 2.0 +822 65 model.scoring_fct_norm 2.0 +822 65 optimizer.lr 0.01724571291481434 +822 65 training.batch_size 2.0 +822 65 training.label_smoothing 0.34582922484052236 +822 66 model.embedding_dim 1.0 +822 66 model.scoring_fct_norm 2.0 +822 66 optimizer.lr 0.0026912171395743183 +822 66 training.batch_size 1.0 +822 66 training.label_smoothing 0.4410867007605657 +822 67 model.embedding_dim 2.0 +822 67 model.scoring_fct_norm 1.0 +822 67 optimizer.lr 0.014298378807805753 +822 67 training.batch_size 2.0 +822 67 training.label_smoothing 0.021139445118581453 +822 68 model.embedding_dim 2.0 +822 68 model.scoring_fct_norm 2.0 +822 68 optimizer.lr 0.06970610608737544 +822 68 training.batch_size 0.0 +822 68 training.label_smoothing 0.910935249462113 +822 69 model.embedding_dim 2.0 +822 69 model.scoring_fct_norm 2.0 +822 69 optimizer.lr 0.012984947173433412 +822 69 training.batch_size 0.0 +822 69 training.label_smoothing 0.0033660654599077344 +822 70 model.embedding_dim 2.0 +822 70 model.scoring_fct_norm 2.0 +822 70 optimizer.lr 0.0023457083058358807 +822 70 training.batch_size 2.0 +822 70 training.label_smoothing 0.0040342116985023786 +822 71 model.embedding_dim 0.0 +822 71 model.scoring_fct_norm 1.0 +822 71 optimizer.lr 0.03255139225836503 +822 71 training.batch_size 0.0 +822 71 training.label_smoothing 0.3684822886346871 +822 72 model.embedding_dim 2.0 +822 72 model.scoring_fct_norm 1.0 +822 72 optimizer.lr 0.00486697459637409 +822 72 training.batch_size 1.0 +822 72 training.label_smoothing 0.008571290411578237 +822 73 model.embedding_dim 0.0 +822 73 model.scoring_fct_norm 2.0 +822 73 optimizer.lr 0.03161353121467198 +822 73 training.batch_size 0.0 +822 73 training.label_smoothing 0.0028017777990248568 +822 74 model.embedding_dim 0.0 +822 74 model.scoring_fct_norm 2.0 +822 74 optimizer.lr 0.04082744015156605 +822 74 training.batch_size 1.0 +822 74 training.label_smoothing 0.03258933621565232 +822 75 model.embedding_dim 2.0 +822 75 model.scoring_fct_norm 2.0 +822 75 optimizer.lr 0.01952391311554671 +822 75 training.batch_size 1.0 +822 75 training.label_smoothing 0.0011873032899303673 +822 76 model.embedding_dim 0.0 +822 76 model.scoring_fct_norm 1.0 +822 76 optimizer.lr 0.055315641664871774 +822 76 training.batch_size 0.0 +822 76 training.label_smoothing 0.0016358983154435766 +822 77 model.embedding_dim 1.0 +822 77 model.scoring_fct_norm 1.0 +822 77 optimizer.lr 0.016370298978225867 +822 77 training.batch_size 1.0 +822 77 training.label_smoothing 0.40147747151436813 +822 78 model.embedding_dim 2.0 +822 78 model.scoring_fct_norm 1.0 +822 78 optimizer.lr 0.0030742373715354258 +822 78 training.batch_size 0.0 +822 78 training.label_smoothing 0.03875722148102535 +822 79 model.embedding_dim 1.0 +822 79 model.scoring_fct_norm 1.0 +822 79 optimizer.lr 0.009625824135767999 +822 79 training.batch_size 0.0 +822 79 training.label_smoothing 0.00276515489269903 +822 80 model.embedding_dim 1.0 +822 80 model.scoring_fct_norm 2.0 +822 80 optimizer.lr 0.004589162541181777 +822 80 training.batch_size 0.0 +822 80 training.label_smoothing 0.4255633125262919 +822 81 model.embedding_dim 0.0 +822 81 model.scoring_fct_norm 1.0 +822 81 optimizer.lr 0.035367546499580314 +822 81 training.batch_size 2.0 +822 81 training.label_smoothing 0.0193128097843057 +822 82 model.embedding_dim 2.0 +822 82 model.scoring_fct_norm 1.0 +822 82 optimizer.lr 0.008508960120121796 +822 82 training.batch_size 1.0 +822 82 training.label_smoothing 0.14320638888374063 +822 83 model.embedding_dim 0.0 +822 83 model.scoring_fct_norm 2.0 +822 83 optimizer.lr 0.0014192504212052633 +822 83 training.batch_size 1.0 +822 83 training.label_smoothing 0.04150065404837549 +822 84 model.embedding_dim 2.0 +822 84 model.scoring_fct_norm 2.0 +822 84 optimizer.lr 0.0017598933910501948 +822 84 training.batch_size 2.0 +822 84 training.label_smoothing 0.0015210357362866501 +822 85 model.embedding_dim 2.0 +822 85 model.scoring_fct_norm 1.0 +822 85 optimizer.lr 0.0011749976945961383 +822 85 training.batch_size 1.0 +822 85 training.label_smoothing 0.015895547544023596 +822 86 model.embedding_dim 0.0 +822 86 model.scoring_fct_norm 2.0 +822 86 optimizer.lr 0.01253439340356378 +822 86 training.batch_size 2.0 +822 86 training.label_smoothing 0.10734913975533814 +822 87 model.embedding_dim 1.0 +822 87 model.scoring_fct_norm 2.0 +822 87 optimizer.lr 0.07094788942683013 +822 87 training.batch_size 0.0 +822 87 training.label_smoothing 0.10653538233913362 +822 88 model.embedding_dim 1.0 +822 88 model.scoring_fct_norm 1.0 +822 88 optimizer.lr 0.04552744748637299 +822 88 training.batch_size 2.0 +822 88 training.label_smoothing 0.6685415450600052 +822 89 model.embedding_dim 1.0 +822 89 model.scoring_fct_norm 2.0 +822 89 optimizer.lr 0.001622708850047527 +822 89 training.batch_size 2.0 +822 89 training.label_smoothing 0.0016868502387157118 +822 90 model.embedding_dim 0.0 +822 90 model.scoring_fct_norm 1.0 +822 90 optimizer.lr 0.021779903198147176 +822 90 training.batch_size 1.0 +822 90 training.label_smoothing 0.0015450151405559161 +822 91 model.embedding_dim 0.0 +822 91 model.scoring_fct_norm 1.0 +822 91 optimizer.lr 0.08045284930043152 +822 91 training.batch_size 1.0 +822 91 training.label_smoothing 0.004635967672374824 +822 92 model.embedding_dim 2.0 +822 92 model.scoring_fct_norm 2.0 +822 92 optimizer.lr 0.05757239390033995 +822 92 training.batch_size 1.0 +822 92 training.label_smoothing 0.22176948676398142 +822 93 model.embedding_dim 2.0 +822 93 model.scoring_fct_norm 1.0 +822 93 optimizer.lr 0.004097335652465504 +822 93 training.batch_size 0.0 +822 93 training.label_smoothing 0.10461597945519963 +822 94 model.embedding_dim 0.0 +822 94 model.scoring_fct_norm 1.0 +822 94 optimizer.lr 0.05469469284950186 +822 94 training.batch_size 2.0 +822 94 training.label_smoothing 0.01678148495554284 +822 95 model.embedding_dim 1.0 +822 95 model.scoring_fct_norm 2.0 +822 95 optimizer.lr 0.010893665320754897 +822 95 training.batch_size 1.0 +822 95 training.label_smoothing 0.7961586161330843 +822 96 model.embedding_dim 2.0 +822 96 model.scoring_fct_norm 2.0 +822 96 optimizer.lr 0.04360364883601727 +822 96 training.batch_size 1.0 +822 96 training.label_smoothing 0.0011501711479389993 +822 97 model.embedding_dim 0.0 +822 97 model.scoring_fct_norm 2.0 +822 97 optimizer.lr 0.024781124345560072 +822 97 training.batch_size 2.0 +822 97 training.label_smoothing 0.0020392681205275483 +822 98 model.embedding_dim 2.0 +822 98 model.scoring_fct_norm 2.0 +822 98 optimizer.lr 0.013021960956097473 +822 98 training.batch_size 2.0 +822 98 training.label_smoothing 0.0069788559758993795 +822 99 model.embedding_dim 1.0 +822 99 model.scoring_fct_norm 2.0 +822 99 optimizer.lr 0.07716831853672586 +822 99 training.batch_size 2.0 +822 99 training.label_smoothing 0.018220499435718934 +822 100 model.embedding_dim 0.0 +822 100 model.scoring_fct_norm 1.0 +822 100 optimizer.lr 0.026609455306690045 +822 100 training.batch_size 2.0 +822 100 training.label_smoothing 0.21706731213722533 +822 1 dataset """kinships""" +822 1 model """transe""" +822 1 loss """softplus""" +822 1 regularizer """no""" +822 1 optimizer """adam""" +822 1 training_loop """lcwa""" +822 1 evaluator """rankbased""" +822 2 dataset """kinships""" +822 2 model """transe""" +822 2 loss """softplus""" +822 2 regularizer """no""" +822 2 optimizer """adam""" +822 2 training_loop """lcwa""" +822 2 evaluator """rankbased""" +822 3 dataset """kinships""" +822 3 model """transe""" +822 3 loss """softplus""" +822 3 regularizer """no""" +822 3 optimizer """adam""" +822 3 training_loop """lcwa""" +822 3 evaluator """rankbased""" +822 4 dataset """kinships""" +822 4 model """transe""" +822 4 loss """softplus""" +822 4 regularizer """no""" +822 4 optimizer """adam""" +822 4 training_loop """lcwa""" +822 4 evaluator """rankbased""" +822 5 dataset """kinships""" +822 5 model """transe""" +822 5 loss """softplus""" +822 5 regularizer """no""" +822 5 optimizer """adam""" +822 5 training_loop """lcwa""" +822 5 evaluator """rankbased""" +822 6 dataset """kinships""" +822 6 model """transe""" +822 6 loss """softplus""" +822 6 regularizer """no""" +822 6 optimizer """adam""" +822 6 training_loop """lcwa""" +822 6 evaluator """rankbased""" +822 7 dataset """kinships""" +822 7 model """transe""" +822 7 loss """softplus""" +822 7 regularizer """no""" +822 7 optimizer """adam""" +822 7 training_loop """lcwa""" +822 7 evaluator """rankbased""" +822 8 dataset """kinships""" +822 8 model """transe""" +822 8 loss """softplus""" +822 8 regularizer """no""" +822 8 optimizer """adam""" +822 8 training_loop """lcwa""" +822 8 evaluator """rankbased""" +822 9 dataset """kinships""" +822 9 model """transe""" +822 9 loss """softplus""" +822 9 regularizer """no""" +822 9 optimizer """adam""" +822 9 training_loop """lcwa""" +822 9 evaluator """rankbased""" +822 10 dataset """kinships""" +822 10 model """transe""" +822 10 loss """softplus""" +822 10 regularizer """no""" +822 10 optimizer """adam""" +822 10 training_loop """lcwa""" +822 10 evaluator """rankbased""" +822 11 dataset """kinships""" +822 11 model """transe""" +822 11 loss """softplus""" +822 11 regularizer """no""" +822 11 optimizer """adam""" +822 11 training_loop """lcwa""" +822 11 evaluator """rankbased""" +822 12 dataset """kinships""" +822 12 model """transe""" +822 12 loss """softplus""" +822 12 regularizer """no""" +822 12 optimizer """adam""" +822 12 training_loop """lcwa""" +822 12 evaluator """rankbased""" +822 13 dataset """kinships""" +822 13 model """transe""" +822 13 loss """softplus""" +822 13 regularizer """no""" +822 13 optimizer """adam""" +822 13 training_loop """lcwa""" +822 13 evaluator """rankbased""" +822 14 dataset """kinships""" +822 14 model """transe""" +822 14 loss """softplus""" +822 14 regularizer """no""" +822 14 optimizer """adam""" +822 14 training_loop """lcwa""" +822 14 evaluator """rankbased""" +822 15 dataset """kinships""" +822 15 model """transe""" +822 15 loss """softplus""" +822 15 regularizer """no""" +822 15 optimizer """adam""" +822 15 training_loop """lcwa""" +822 15 evaluator """rankbased""" +822 16 dataset """kinships""" +822 16 model """transe""" +822 16 loss """softplus""" +822 16 regularizer """no""" +822 16 optimizer """adam""" +822 16 training_loop """lcwa""" +822 16 evaluator """rankbased""" +822 17 dataset """kinships""" +822 17 model """transe""" +822 17 loss """softplus""" +822 17 regularizer """no""" +822 17 optimizer """adam""" +822 17 training_loop """lcwa""" +822 17 evaluator """rankbased""" +822 18 dataset """kinships""" +822 18 model """transe""" +822 18 loss """softplus""" +822 18 regularizer """no""" +822 18 optimizer """adam""" +822 18 training_loop """lcwa""" +822 18 evaluator """rankbased""" +822 19 dataset """kinships""" +822 19 model """transe""" +822 19 loss """softplus""" +822 19 regularizer """no""" +822 19 optimizer """adam""" +822 19 training_loop """lcwa""" +822 19 evaluator """rankbased""" +822 20 dataset """kinships""" +822 20 model """transe""" +822 20 loss """softplus""" +822 20 regularizer """no""" +822 20 optimizer """adam""" +822 20 training_loop """lcwa""" +822 20 evaluator """rankbased""" +822 21 dataset """kinships""" +822 21 model """transe""" +822 21 loss """softplus""" +822 21 regularizer """no""" +822 21 optimizer """adam""" +822 21 training_loop """lcwa""" +822 21 evaluator """rankbased""" +822 22 dataset """kinships""" +822 22 model """transe""" +822 22 loss """softplus""" +822 22 regularizer """no""" +822 22 optimizer """adam""" +822 22 training_loop """lcwa""" +822 22 evaluator """rankbased""" +822 23 dataset """kinships""" +822 23 model """transe""" +822 23 loss """softplus""" +822 23 regularizer """no""" +822 23 optimizer """adam""" +822 23 training_loop """lcwa""" +822 23 evaluator """rankbased""" +822 24 dataset """kinships""" +822 24 model """transe""" +822 24 loss """softplus""" +822 24 regularizer """no""" +822 24 optimizer """adam""" +822 24 training_loop """lcwa""" +822 24 evaluator """rankbased""" +822 25 dataset """kinships""" +822 25 model """transe""" +822 25 loss """softplus""" +822 25 regularizer """no""" +822 25 optimizer """adam""" +822 25 training_loop """lcwa""" +822 25 evaluator """rankbased""" +822 26 dataset """kinships""" +822 26 model """transe""" +822 26 loss """softplus""" +822 26 regularizer """no""" +822 26 optimizer """adam""" +822 26 training_loop """lcwa""" +822 26 evaluator """rankbased""" +822 27 dataset """kinships""" +822 27 model """transe""" +822 27 loss """softplus""" +822 27 regularizer """no""" +822 27 optimizer """adam""" +822 27 training_loop """lcwa""" +822 27 evaluator """rankbased""" +822 28 dataset """kinships""" +822 28 model """transe""" +822 28 loss """softplus""" +822 28 regularizer """no""" +822 28 optimizer """adam""" +822 28 training_loop """lcwa""" +822 28 evaluator """rankbased""" +822 29 dataset """kinships""" +822 29 model """transe""" +822 29 loss """softplus""" +822 29 regularizer """no""" +822 29 optimizer """adam""" +822 29 training_loop """lcwa""" +822 29 evaluator """rankbased""" +822 30 dataset """kinships""" +822 30 model """transe""" +822 30 loss """softplus""" +822 30 regularizer """no""" +822 30 optimizer """adam""" +822 30 training_loop """lcwa""" +822 30 evaluator """rankbased""" +822 31 dataset """kinships""" +822 31 model """transe""" +822 31 loss """softplus""" +822 31 regularizer """no""" +822 31 optimizer """adam""" +822 31 training_loop """lcwa""" +822 31 evaluator """rankbased""" +822 32 dataset """kinships""" +822 32 model """transe""" +822 32 loss """softplus""" +822 32 regularizer """no""" +822 32 optimizer """adam""" +822 32 training_loop """lcwa""" +822 32 evaluator """rankbased""" +822 33 dataset """kinships""" +822 33 model """transe""" +822 33 loss """softplus""" +822 33 regularizer """no""" +822 33 optimizer """adam""" +822 33 training_loop """lcwa""" +822 33 evaluator """rankbased""" +822 34 dataset """kinships""" +822 34 model """transe""" +822 34 loss """softplus""" +822 34 regularizer """no""" +822 34 optimizer """adam""" +822 34 training_loop """lcwa""" +822 34 evaluator """rankbased""" +822 35 dataset """kinships""" +822 35 model """transe""" +822 35 loss """softplus""" +822 35 regularizer """no""" +822 35 optimizer """adam""" +822 35 training_loop """lcwa""" +822 35 evaluator """rankbased""" +822 36 dataset """kinships""" +822 36 model """transe""" +822 36 loss """softplus""" +822 36 regularizer """no""" +822 36 optimizer """adam""" +822 36 training_loop """lcwa""" +822 36 evaluator """rankbased""" +822 37 dataset """kinships""" +822 37 model """transe""" +822 37 loss """softplus""" +822 37 regularizer """no""" +822 37 optimizer """adam""" +822 37 training_loop """lcwa""" +822 37 evaluator """rankbased""" +822 38 dataset """kinships""" +822 38 model """transe""" +822 38 loss """softplus""" +822 38 regularizer """no""" +822 38 optimizer """adam""" +822 38 training_loop """lcwa""" +822 38 evaluator """rankbased""" +822 39 dataset """kinships""" +822 39 model """transe""" +822 39 loss """softplus""" +822 39 regularizer """no""" +822 39 optimizer """adam""" +822 39 training_loop """lcwa""" +822 39 evaluator """rankbased""" +822 40 dataset """kinships""" +822 40 model """transe""" +822 40 loss """softplus""" +822 40 regularizer """no""" +822 40 optimizer """adam""" +822 40 training_loop """lcwa""" +822 40 evaluator """rankbased""" +822 41 dataset """kinships""" +822 41 model """transe""" +822 41 loss """softplus""" +822 41 regularizer """no""" +822 41 optimizer """adam""" +822 41 training_loop """lcwa""" +822 41 evaluator """rankbased""" +822 42 dataset """kinships""" +822 42 model """transe""" +822 42 loss """softplus""" +822 42 regularizer """no""" +822 42 optimizer """adam""" +822 42 training_loop """lcwa""" +822 42 evaluator """rankbased""" +822 43 dataset """kinships""" +822 43 model """transe""" +822 43 loss """softplus""" +822 43 regularizer """no""" +822 43 optimizer """adam""" +822 43 training_loop """lcwa""" +822 43 evaluator """rankbased""" +822 44 dataset """kinships""" +822 44 model """transe""" +822 44 loss """softplus""" +822 44 regularizer """no""" +822 44 optimizer """adam""" +822 44 training_loop """lcwa""" +822 44 evaluator """rankbased""" +822 45 dataset """kinships""" +822 45 model """transe""" +822 45 loss """softplus""" +822 45 regularizer """no""" +822 45 optimizer """adam""" +822 45 training_loop """lcwa""" +822 45 evaluator """rankbased""" +822 46 dataset """kinships""" +822 46 model """transe""" +822 46 loss """softplus""" +822 46 regularizer """no""" +822 46 optimizer """adam""" +822 46 training_loop """lcwa""" +822 46 evaluator """rankbased""" +822 47 dataset """kinships""" +822 47 model """transe""" +822 47 loss """softplus""" +822 47 regularizer """no""" +822 47 optimizer """adam""" +822 47 training_loop """lcwa""" +822 47 evaluator """rankbased""" +822 48 dataset """kinships""" +822 48 model """transe""" +822 48 loss """softplus""" +822 48 regularizer """no""" +822 48 optimizer """adam""" +822 48 training_loop """lcwa""" +822 48 evaluator """rankbased""" +822 49 dataset """kinships""" +822 49 model """transe""" +822 49 loss """softplus""" +822 49 regularizer """no""" +822 49 optimizer """adam""" +822 49 training_loop """lcwa""" +822 49 evaluator """rankbased""" +822 50 dataset """kinships""" +822 50 model """transe""" +822 50 loss """softplus""" +822 50 regularizer """no""" +822 50 optimizer """adam""" +822 50 training_loop """lcwa""" +822 50 evaluator """rankbased""" +822 51 dataset """kinships""" +822 51 model """transe""" +822 51 loss """softplus""" +822 51 regularizer """no""" +822 51 optimizer """adam""" +822 51 training_loop """lcwa""" +822 51 evaluator """rankbased""" +822 52 dataset """kinships""" +822 52 model """transe""" +822 52 loss """softplus""" +822 52 regularizer """no""" +822 52 optimizer """adam""" +822 52 training_loop """lcwa""" +822 52 evaluator """rankbased""" +822 53 dataset """kinships""" +822 53 model """transe""" +822 53 loss """softplus""" +822 53 regularizer """no""" +822 53 optimizer """adam""" +822 53 training_loop """lcwa""" +822 53 evaluator """rankbased""" +822 54 dataset """kinships""" +822 54 model """transe""" +822 54 loss """softplus""" +822 54 regularizer """no""" +822 54 optimizer """adam""" +822 54 training_loop """lcwa""" +822 54 evaluator """rankbased""" +822 55 dataset """kinships""" +822 55 model """transe""" +822 55 loss """softplus""" +822 55 regularizer """no""" +822 55 optimizer """adam""" +822 55 training_loop """lcwa""" +822 55 evaluator """rankbased""" +822 56 dataset """kinships""" +822 56 model """transe""" +822 56 loss """softplus""" +822 56 regularizer """no""" +822 56 optimizer """adam""" +822 56 training_loop """lcwa""" +822 56 evaluator """rankbased""" +822 57 dataset """kinships""" +822 57 model """transe""" +822 57 loss """softplus""" +822 57 regularizer """no""" +822 57 optimizer """adam""" +822 57 training_loop """lcwa""" +822 57 evaluator """rankbased""" +822 58 dataset """kinships""" +822 58 model """transe""" +822 58 loss """softplus""" +822 58 regularizer """no""" +822 58 optimizer """adam""" +822 58 training_loop """lcwa""" +822 58 evaluator """rankbased""" +822 59 dataset """kinships""" +822 59 model """transe""" +822 59 loss """softplus""" +822 59 regularizer """no""" +822 59 optimizer """adam""" +822 59 training_loop """lcwa""" +822 59 evaluator """rankbased""" +822 60 dataset """kinships""" +822 60 model """transe""" +822 60 loss """softplus""" +822 60 regularizer """no""" +822 60 optimizer """adam""" +822 60 training_loop """lcwa""" +822 60 evaluator """rankbased""" +822 61 dataset """kinships""" +822 61 model """transe""" +822 61 loss """softplus""" +822 61 regularizer """no""" +822 61 optimizer """adam""" +822 61 training_loop """lcwa""" +822 61 evaluator """rankbased""" +822 62 dataset """kinships""" +822 62 model """transe""" +822 62 loss """softplus""" +822 62 regularizer """no""" +822 62 optimizer """adam""" +822 62 training_loop """lcwa""" +822 62 evaluator """rankbased""" +822 63 dataset """kinships""" +822 63 model """transe""" +822 63 loss """softplus""" +822 63 regularizer """no""" +822 63 optimizer """adam""" +822 63 training_loop """lcwa""" +822 63 evaluator """rankbased""" +822 64 dataset """kinships""" +822 64 model """transe""" +822 64 loss """softplus""" +822 64 regularizer """no""" +822 64 optimizer """adam""" +822 64 training_loop """lcwa""" +822 64 evaluator """rankbased""" +822 65 dataset """kinships""" +822 65 model """transe""" +822 65 loss """softplus""" +822 65 regularizer """no""" +822 65 optimizer """adam""" +822 65 training_loop """lcwa""" +822 65 evaluator """rankbased""" +822 66 dataset """kinships""" +822 66 model """transe""" +822 66 loss """softplus""" +822 66 regularizer """no""" +822 66 optimizer """adam""" +822 66 training_loop """lcwa""" +822 66 evaluator """rankbased""" +822 67 dataset """kinships""" +822 67 model """transe""" +822 67 loss """softplus""" +822 67 regularizer """no""" +822 67 optimizer """adam""" +822 67 training_loop """lcwa""" +822 67 evaluator """rankbased""" +822 68 dataset """kinships""" +822 68 model """transe""" +822 68 loss """softplus""" +822 68 regularizer """no""" +822 68 optimizer """adam""" +822 68 training_loop """lcwa""" +822 68 evaluator """rankbased""" +822 69 dataset """kinships""" +822 69 model """transe""" +822 69 loss """softplus""" +822 69 regularizer """no""" +822 69 optimizer """adam""" +822 69 training_loop """lcwa""" +822 69 evaluator """rankbased""" +822 70 dataset """kinships""" +822 70 model """transe""" +822 70 loss """softplus""" +822 70 regularizer """no""" +822 70 optimizer """adam""" +822 70 training_loop """lcwa""" +822 70 evaluator """rankbased""" +822 71 dataset """kinships""" +822 71 model """transe""" +822 71 loss """softplus""" +822 71 regularizer """no""" +822 71 optimizer """adam""" +822 71 training_loop """lcwa""" +822 71 evaluator """rankbased""" +822 72 dataset """kinships""" +822 72 model """transe""" +822 72 loss """softplus""" +822 72 regularizer """no""" +822 72 optimizer """adam""" +822 72 training_loop """lcwa""" +822 72 evaluator """rankbased""" +822 73 dataset """kinships""" +822 73 model """transe""" +822 73 loss """softplus""" +822 73 regularizer """no""" +822 73 optimizer """adam""" +822 73 training_loop """lcwa""" +822 73 evaluator """rankbased""" +822 74 dataset """kinships""" +822 74 model """transe""" +822 74 loss """softplus""" +822 74 regularizer """no""" +822 74 optimizer """adam""" +822 74 training_loop """lcwa""" +822 74 evaluator """rankbased""" +822 75 dataset """kinships""" +822 75 model """transe""" +822 75 loss """softplus""" +822 75 regularizer """no""" +822 75 optimizer """adam""" +822 75 training_loop """lcwa""" +822 75 evaluator """rankbased""" +822 76 dataset """kinships""" +822 76 model """transe""" +822 76 loss """softplus""" +822 76 regularizer """no""" +822 76 optimizer """adam""" +822 76 training_loop """lcwa""" +822 76 evaluator """rankbased""" +822 77 dataset """kinships""" +822 77 model """transe""" +822 77 loss """softplus""" +822 77 regularizer """no""" +822 77 optimizer """adam""" +822 77 training_loop """lcwa""" +822 77 evaluator """rankbased""" +822 78 dataset """kinships""" +822 78 model """transe""" +822 78 loss """softplus""" +822 78 regularizer """no""" +822 78 optimizer """adam""" +822 78 training_loop """lcwa""" +822 78 evaluator """rankbased""" +822 79 dataset """kinships""" +822 79 model """transe""" +822 79 loss """softplus""" +822 79 regularizer """no""" +822 79 optimizer """adam""" +822 79 training_loop """lcwa""" +822 79 evaluator """rankbased""" +822 80 dataset """kinships""" +822 80 model """transe""" +822 80 loss """softplus""" +822 80 regularizer """no""" +822 80 optimizer """adam""" +822 80 training_loop """lcwa""" +822 80 evaluator """rankbased""" +822 81 dataset """kinships""" +822 81 model """transe""" +822 81 loss """softplus""" +822 81 regularizer """no""" +822 81 optimizer """adam""" +822 81 training_loop """lcwa""" +822 81 evaluator """rankbased""" +822 82 dataset """kinships""" +822 82 model """transe""" +822 82 loss """softplus""" +822 82 regularizer """no""" +822 82 optimizer """adam""" +822 82 training_loop """lcwa""" +822 82 evaluator """rankbased""" +822 83 dataset """kinships""" +822 83 model """transe""" +822 83 loss """softplus""" +822 83 regularizer """no""" +822 83 optimizer """adam""" +822 83 training_loop """lcwa""" +822 83 evaluator """rankbased""" +822 84 dataset """kinships""" +822 84 model """transe""" +822 84 loss """softplus""" +822 84 regularizer """no""" +822 84 optimizer """adam""" +822 84 training_loop """lcwa""" +822 84 evaluator """rankbased""" +822 85 dataset """kinships""" +822 85 model """transe""" +822 85 loss """softplus""" +822 85 regularizer """no""" +822 85 optimizer """adam""" +822 85 training_loop """lcwa""" +822 85 evaluator """rankbased""" +822 86 dataset """kinships""" +822 86 model """transe""" +822 86 loss """softplus""" +822 86 regularizer """no""" +822 86 optimizer """adam""" +822 86 training_loop """lcwa""" +822 86 evaluator """rankbased""" +822 87 dataset """kinships""" +822 87 model """transe""" +822 87 loss """softplus""" +822 87 regularizer """no""" +822 87 optimizer """adam""" +822 87 training_loop """lcwa""" +822 87 evaluator """rankbased""" +822 88 dataset """kinships""" +822 88 model """transe""" +822 88 loss """softplus""" +822 88 regularizer """no""" +822 88 optimizer """adam""" +822 88 training_loop """lcwa""" +822 88 evaluator """rankbased""" +822 89 dataset """kinships""" +822 89 model """transe""" +822 89 loss """softplus""" +822 89 regularizer """no""" +822 89 optimizer """adam""" +822 89 training_loop """lcwa""" +822 89 evaluator """rankbased""" +822 90 dataset """kinships""" +822 90 model """transe""" +822 90 loss """softplus""" +822 90 regularizer """no""" +822 90 optimizer """adam""" +822 90 training_loop """lcwa""" +822 90 evaluator """rankbased""" +822 91 dataset """kinships""" +822 91 model """transe""" +822 91 loss """softplus""" +822 91 regularizer """no""" +822 91 optimizer """adam""" +822 91 training_loop """lcwa""" +822 91 evaluator """rankbased""" +822 92 dataset """kinships""" +822 92 model """transe""" +822 92 loss """softplus""" +822 92 regularizer """no""" +822 92 optimizer """adam""" +822 92 training_loop """lcwa""" +822 92 evaluator """rankbased""" +822 93 dataset """kinships""" +822 93 model """transe""" +822 93 loss """softplus""" +822 93 regularizer """no""" +822 93 optimizer """adam""" +822 93 training_loop """lcwa""" +822 93 evaluator """rankbased""" +822 94 dataset """kinships""" +822 94 model """transe""" +822 94 loss """softplus""" +822 94 regularizer """no""" +822 94 optimizer """adam""" +822 94 training_loop """lcwa""" +822 94 evaluator """rankbased""" +822 95 dataset """kinships""" +822 95 model """transe""" +822 95 loss """softplus""" +822 95 regularizer """no""" +822 95 optimizer """adam""" +822 95 training_loop """lcwa""" +822 95 evaluator """rankbased""" +822 96 dataset """kinships""" +822 96 model """transe""" +822 96 loss """softplus""" +822 96 regularizer """no""" +822 96 optimizer """adam""" +822 96 training_loop """lcwa""" +822 96 evaluator """rankbased""" +822 97 dataset """kinships""" +822 97 model """transe""" +822 97 loss """softplus""" +822 97 regularizer """no""" +822 97 optimizer """adam""" +822 97 training_loop """lcwa""" +822 97 evaluator """rankbased""" +822 98 dataset """kinships""" +822 98 model """transe""" +822 98 loss """softplus""" +822 98 regularizer """no""" +822 98 optimizer """adam""" +822 98 training_loop """lcwa""" +822 98 evaluator """rankbased""" +822 99 dataset """kinships""" +822 99 model """transe""" +822 99 loss """softplus""" +822 99 regularizer """no""" +822 99 optimizer """adam""" +822 99 training_loop """lcwa""" +822 99 evaluator """rankbased""" +822 100 dataset """kinships""" +822 100 model """transe""" +822 100 loss """softplus""" +822 100 regularizer """no""" +822 100 optimizer """adam""" +822 100 training_loop """lcwa""" +822 100 evaluator """rankbased""" +823 1 model.embedding_dim 0.0 +823 1 model.scoring_fct_norm 2.0 +823 1 optimizer.lr 0.012006881554709456 +823 1 training.batch_size 2.0 +823 1 training.label_smoothing 0.001302278423397398 +823 2 model.embedding_dim 0.0 +823 2 model.scoring_fct_norm 1.0 +823 2 optimizer.lr 0.002887578071082644 +823 2 training.batch_size 1.0 +823 2 training.label_smoothing 0.015439701841479381 +823 3 model.embedding_dim 0.0 +823 3 model.scoring_fct_norm 1.0 +823 3 optimizer.lr 0.08883374276210594 +823 3 training.batch_size 0.0 +823 3 training.label_smoothing 0.06067380127735337 +823 4 model.embedding_dim 2.0 +823 4 model.scoring_fct_norm 2.0 +823 4 optimizer.lr 0.051951620529673434 +823 4 training.batch_size 0.0 +823 4 training.label_smoothing 0.0010514206125969716 +823 5 model.embedding_dim 2.0 +823 5 model.scoring_fct_norm 1.0 +823 5 optimizer.lr 0.012277214544124261 +823 5 training.batch_size 2.0 +823 5 training.label_smoothing 0.1546841763198385 +823 6 model.embedding_dim 0.0 +823 6 model.scoring_fct_norm 1.0 +823 6 optimizer.lr 0.0012632829112708193 +823 6 training.batch_size 1.0 +823 6 training.label_smoothing 0.016878934338820355 +823 7 model.embedding_dim 2.0 +823 7 model.scoring_fct_norm 2.0 +823 7 optimizer.lr 0.010672136775479603 +823 7 training.batch_size 1.0 +823 7 training.label_smoothing 0.0022591915255126193 +823 8 model.embedding_dim 1.0 +823 8 model.scoring_fct_norm 2.0 +823 8 optimizer.lr 0.05445907271360015 +823 8 training.batch_size 1.0 +823 8 training.label_smoothing 0.002210618632079083 +823 9 model.embedding_dim 0.0 +823 9 model.scoring_fct_norm 1.0 +823 9 optimizer.lr 0.05668258986336883 +823 9 training.batch_size 0.0 +823 9 training.label_smoothing 0.0031083895302625366 +823 10 model.embedding_dim 1.0 +823 10 model.scoring_fct_norm 2.0 +823 10 optimizer.lr 0.01372254398449462 +823 10 training.batch_size 1.0 +823 10 training.label_smoothing 0.15846034194830314 +823 11 model.embedding_dim 0.0 +823 11 model.scoring_fct_norm 2.0 +823 11 optimizer.lr 0.007800092762856949 +823 11 training.batch_size 0.0 +823 11 training.label_smoothing 0.0037343585502272914 +823 12 model.embedding_dim 0.0 +823 12 model.scoring_fct_norm 2.0 +823 12 optimizer.lr 0.0015976527974398246 +823 12 training.batch_size 2.0 +823 12 training.label_smoothing 0.46207077470555813 +823 13 model.embedding_dim 1.0 +823 13 model.scoring_fct_norm 2.0 +823 13 optimizer.lr 0.00859548176309993 +823 13 training.batch_size 0.0 +823 13 training.label_smoothing 0.0032203703385562036 +823 14 model.embedding_dim 1.0 +823 14 model.scoring_fct_norm 2.0 +823 14 optimizer.lr 0.001514656931197232 +823 14 training.batch_size 2.0 +823 14 training.label_smoothing 0.0024641620929108086 +823 15 model.embedding_dim 1.0 +823 15 model.scoring_fct_norm 1.0 +823 15 optimizer.lr 0.0820880203957437 +823 15 training.batch_size 0.0 +823 15 training.label_smoothing 0.02154646472436062 +823 16 model.embedding_dim 0.0 +823 16 model.scoring_fct_norm 1.0 +823 16 optimizer.lr 0.0014199074199332837 +823 16 training.batch_size 2.0 +823 16 training.label_smoothing 0.04790047069966803 +823 17 model.embedding_dim 0.0 +823 17 model.scoring_fct_norm 2.0 +823 17 optimizer.lr 0.009252454598636146 +823 17 training.batch_size 1.0 +823 17 training.label_smoothing 0.012779478541967872 +823 18 model.embedding_dim 0.0 +823 18 model.scoring_fct_norm 2.0 +823 18 optimizer.lr 0.0024528155798511104 +823 18 training.batch_size 0.0 +823 18 training.label_smoothing 0.001545140637024701 +823 19 model.embedding_dim 2.0 +823 19 model.scoring_fct_norm 2.0 +823 19 optimizer.lr 0.002378630354125332 +823 19 training.batch_size 0.0 +823 19 training.label_smoothing 0.00559747906932293 +823 20 model.embedding_dim 0.0 +823 20 model.scoring_fct_norm 2.0 +823 20 optimizer.lr 0.025306317817513146 +823 20 training.batch_size 0.0 +823 20 training.label_smoothing 0.025892104791746178 +823 21 model.embedding_dim 2.0 +823 21 model.scoring_fct_norm 1.0 +823 21 optimizer.lr 0.005352994485984541 +823 21 training.batch_size 2.0 +823 21 training.label_smoothing 0.0023994446959691084 +823 22 model.embedding_dim 1.0 +823 22 model.scoring_fct_norm 2.0 +823 22 optimizer.lr 0.09748437580314968 +823 22 training.batch_size 2.0 +823 22 training.label_smoothing 0.21084096642805888 +823 23 model.embedding_dim 0.0 +823 23 model.scoring_fct_norm 2.0 +823 23 optimizer.lr 0.06756236592931864 +823 23 training.batch_size 1.0 +823 23 training.label_smoothing 0.002166188287022724 +823 24 model.embedding_dim 1.0 +823 24 model.scoring_fct_norm 2.0 +823 24 optimizer.lr 0.04019150622995421 +823 24 training.batch_size 2.0 +823 24 training.label_smoothing 0.01616431496525326 +823 25 model.embedding_dim 2.0 +823 25 model.scoring_fct_norm 1.0 +823 25 optimizer.lr 0.031890332657344346 +823 25 training.batch_size 0.0 +823 25 training.label_smoothing 0.1151943453680041 +823 26 model.embedding_dim 2.0 +823 26 model.scoring_fct_norm 2.0 +823 26 optimizer.lr 0.009258330103358255 +823 26 training.batch_size 2.0 +823 26 training.label_smoothing 0.04743909430127579 +823 27 model.embedding_dim 2.0 +823 27 model.scoring_fct_norm 2.0 +823 27 optimizer.lr 0.032138420079820665 +823 27 training.batch_size 0.0 +823 27 training.label_smoothing 0.5342353846271868 +823 28 model.embedding_dim 1.0 +823 28 model.scoring_fct_norm 2.0 +823 28 optimizer.lr 0.011893390040605085 +823 28 training.batch_size 2.0 +823 28 training.label_smoothing 0.012787863368046392 +823 29 model.embedding_dim 1.0 +823 29 model.scoring_fct_norm 2.0 +823 29 optimizer.lr 0.008284242468244617 +823 29 training.batch_size 2.0 +823 29 training.label_smoothing 0.0206974308418077 +823 30 model.embedding_dim 2.0 +823 30 model.scoring_fct_norm 2.0 +823 30 optimizer.lr 0.05943566951152162 +823 30 training.batch_size 0.0 +823 30 training.label_smoothing 0.021819999530724794 +823 31 model.embedding_dim 1.0 +823 31 model.scoring_fct_norm 1.0 +823 31 optimizer.lr 0.002721546772818552 +823 31 training.batch_size 1.0 +823 31 training.label_smoothing 0.0052949436941381895 +823 32 model.embedding_dim 1.0 +823 32 model.scoring_fct_norm 1.0 +823 32 optimizer.lr 0.01309558844391061 +823 32 training.batch_size 1.0 +823 32 training.label_smoothing 0.0010001206953918383 +823 33 model.embedding_dim 1.0 +823 33 model.scoring_fct_norm 2.0 +823 33 optimizer.lr 0.002462640331088718 +823 33 training.batch_size 0.0 +823 33 training.label_smoothing 0.2850153812725634 +823 34 model.embedding_dim 1.0 +823 34 model.scoring_fct_norm 1.0 +823 34 optimizer.lr 0.012721738618947735 +823 34 training.batch_size 0.0 +823 34 training.label_smoothing 0.9023666050441285 +823 35 model.embedding_dim 0.0 +823 35 model.scoring_fct_norm 1.0 +823 35 optimizer.lr 0.05865913830416101 +823 35 training.batch_size 1.0 +823 35 training.label_smoothing 0.0020681436135237285 +823 36 model.embedding_dim 1.0 +823 36 model.scoring_fct_norm 1.0 +823 36 optimizer.lr 0.002262926108404029 +823 36 training.batch_size 0.0 +823 36 training.label_smoothing 0.846025311263613 +823 37 model.embedding_dim 1.0 +823 37 model.scoring_fct_norm 2.0 +823 37 optimizer.lr 0.01165360325676971 +823 37 training.batch_size 2.0 +823 37 training.label_smoothing 0.5253382530177044 +823 38 model.embedding_dim 1.0 +823 38 model.scoring_fct_norm 1.0 +823 38 optimizer.lr 0.0011942263063006251 +823 38 training.batch_size 0.0 +823 38 training.label_smoothing 0.06375016790159879 +823 39 model.embedding_dim 0.0 +823 39 model.scoring_fct_norm 2.0 +823 39 optimizer.lr 0.0616574428515533 +823 39 training.batch_size 0.0 +823 39 training.label_smoothing 0.46513074182200914 +823 40 model.embedding_dim 0.0 +823 40 model.scoring_fct_norm 2.0 +823 40 optimizer.lr 0.013826709786488953 +823 40 training.batch_size 1.0 +823 40 training.label_smoothing 0.0019396205251991316 +823 41 model.embedding_dim 0.0 +823 41 model.scoring_fct_norm 1.0 +823 41 optimizer.lr 0.03992319892137133 +823 41 training.batch_size 2.0 +823 41 training.label_smoothing 0.2564727889084437 +823 42 model.embedding_dim 0.0 +823 42 model.scoring_fct_norm 2.0 +823 42 optimizer.lr 0.01890887850446446 +823 42 training.batch_size 1.0 +823 42 training.label_smoothing 0.11855989272990705 +823 43 model.embedding_dim 2.0 +823 43 model.scoring_fct_norm 2.0 +823 43 optimizer.lr 0.07987863881500507 +823 43 training.batch_size 0.0 +823 43 training.label_smoothing 0.2605133891936883 +823 44 model.embedding_dim 2.0 +823 44 model.scoring_fct_norm 2.0 +823 44 optimizer.lr 0.006120840849258859 +823 44 training.batch_size 2.0 +823 44 training.label_smoothing 0.07448887032944726 +823 45 model.embedding_dim 2.0 +823 45 model.scoring_fct_norm 1.0 +823 45 optimizer.lr 0.006703979506361534 +823 45 training.batch_size 1.0 +823 45 training.label_smoothing 0.0046975570073066585 +823 46 model.embedding_dim 1.0 +823 46 model.scoring_fct_norm 2.0 +823 46 optimizer.lr 0.04557467452672584 +823 46 training.batch_size 0.0 +823 46 training.label_smoothing 0.020388461576648483 +823 47 model.embedding_dim 0.0 +823 47 model.scoring_fct_norm 1.0 +823 47 optimizer.lr 0.001713875353210494 +823 47 training.batch_size 1.0 +823 47 training.label_smoothing 0.044363574469780384 +823 48 model.embedding_dim 1.0 +823 48 model.scoring_fct_norm 2.0 +823 48 optimizer.lr 0.04635525068869832 +823 48 training.batch_size 2.0 +823 48 training.label_smoothing 0.8956077881814993 +823 49 model.embedding_dim 2.0 +823 49 model.scoring_fct_norm 1.0 +823 49 optimizer.lr 0.013712818260172937 +823 49 training.batch_size 2.0 +823 49 training.label_smoothing 0.30850056270893134 +823 50 model.embedding_dim 0.0 +823 50 model.scoring_fct_norm 2.0 +823 50 optimizer.lr 0.006834027163066887 +823 50 training.batch_size 2.0 +823 50 training.label_smoothing 0.017185727707051207 +823 51 model.embedding_dim 2.0 +823 51 model.scoring_fct_norm 2.0 +823 51 optimizer.lr 0.009976520570680254 +823 51 training.batch_size 1.0 +823 51 training.label_smoothing 0.0019237733782385168 +823 52 model.embedding_dim 2.0 +823 52 model.scoring_fct_norm 1.0 +823 52 optimizer.lr 0.08826341544124758 +823 52 training.batch_size 0.0 +823 52 training.label_smoothing 0.022500719060803812 +823 53 model.embedding_dim 1.0 +823 53 model.scoring_fct_norm 2.0 +823 53 optimizer.lr 0.08714432503369474 +823 53 training.batch_size 2.0 +823 53 training.label_smoothing 0.18885048354388603 +823 54 model.embedding_dim 1.0 +823 54 model.scoring_fct_norm 2.0 +823 54 optimizer.lr 0.0010418699347712124 +823 54 training.batch_size 2.0 +823 54 training.label_smoothing 0.001228118256571345 +823 55 model.embedding_dim 0.0 +823 55 model.scoring_fct_norm 1.0 +823 55 optimizer.lr 0.0014778079700520543 +823 55 training.batch_size 1.0 +823 55 training.label_smoothing 0.030115185625986016 +823 56 model.embedding_dim 0.0 +823 56 model.scoring_fct_norm 2.0 +823 56 optimizer.lr 0.05602048241057678 +823 56 training.batch_size 2.0 +823 56 training.label_smoothing 0.9231643810267969 +823 57 model.embedding_dim 0.0 +823 57 model.scoring_fct_norm 1.0 +823 57 optimizer.lr 0.041784166465526296 +823 57 training.batch_size 1.0 +823 57 training.label_smoothing 0.158865036547892 +823 58 model.embedding_dim 2.0 +823 58 model.scoring_fct_norm 2.0 +823 58 optimizer.lr 0.0010728668937341086 +823 58 training.batch_size 1.0 +823 58 training.label_smoothing 0.0012248059981765892 +823 59 model.embedding_dim 1.0 +823 59 model.scoring_fct_norm 1.0 +823 59 optimizer.lr 0.001093190608672342 +823 59 training.batch_size 1.0 +823 59 training.label_smoothing 0.16140335208514792 +823 60 model.embedding_dim 0.0 +823 60 model.scoring_fct_norm 2.0 +823 60 optimizer.lr 0.043117280543526294 +823 60 training.batch_size 2.0 +823 60 training.label_smoothing 0.0012305916713304182 +823 61 model.embedding_dim 1.0 +823 61 model.scoring_fct_norm 2.0 +823 61 optimizer.lr 0.002640035626190802 +823 61 training.batch_size 1.0 +823 61 training.label_smoothing 0.07358872085424077 +823 62 model.embedding_dim 2.0 +823 62 model.scoring_fct_norm 1.0 +823 62 optimizer.lr 0.0020450880628708344 +823 62 training.batch_size 1.0 +823 62 training.label_smoothing 0.9959045596042738 +823 63 model.embedding_dim 1.0 +823 63 model.scoring_fct_norm 2.0 +823 63 optimizer.lr 0.008306385786396248 +823 63 training.batch_size 0.0 +823 63 training.label_smoothing 0.00897028134432331 +823 64 model.embedding_dim 0.0 +823 64 model.scoring_fct_norm 2.0 +823 64 optimizer.lr 0.002015344657845739 +823 64 training.batch_size 2.0 +823 64 training.label_smoothing 0.002986272664105831 +823 65 model.embedding_dim 0.0 +823 65 model.scoring_fct_norm 2.0 +823 65 optimizer.lr 0.003323551315835251 +823 65 training.batch_size 2.0 +823 65 training.label_smoothing 0.005262205252219567 +823 66 model.embedding_dim 0.0 +823 66 model.scoring_fct_norm 2.0 +823 66 optimizer.lr 0.06983159923979229 +823 66 training.batch_size 0.0 +823 66 training.label_smoothing 0.1309570903699699 +823 67 model.embedding_dim 1.0 +823 67 model.scoring_fct_norm 2.0 +823 67 optimizer.lr 0.006385923318556457 +823 67 training.batch_size 1.0 +823 67 training.label_smoothing 0.0023372403576099824 +823 68 model.embedding_dim 2.0 +823 68 model.scoring_fct_norm 1.0 +823 68 optimizer.lr 0.0010162124993480528 +823 68 training.batch_size 2.0 +823 68 training.label_smoothing 0.011798027634376491 +823 69 model.embedding_dim 2.0 +823 69 model.scoring_fct_norm 1.0 +823 69 optimizer.lr 0.0027566270422101856 +823 69 training.batch_size 1.0 +823 69 training.label_smoothing 0.006912627593335393 +823 70 model.embedding_dim 2.0 +823 70 model.scoring_fct_norm 2.0 +823 70 optimizer.lr 0.0011435527044859931 +823 70 training.batch_size 1.0 +823 70 training.label_smoothing 0.005476695981036726 +823 71 model.embedding_dim 0.0 +823 71 model.scoring_fct_norm 2.0 +823 71 optimizer.lr 0.0031735658995157763 +823 71 training.batch_size 2.0 +823 71 training.label_smoothing 0.002033328451403242 +823 72 model.embedding_dim 1.0 +823 72 model.scoring_fct_norm 1.0 +823 72 optimizer.lr 0.009388021861797083 +823 72 training.batch_size 1.0 +823 72 training.label_smoothing 0.007563318618323407 +823 73 model.embedding_dim 2.0 +823 73 model.scoring_fct_norm 1.0 +823 73 optimizer.lr 0.05416906313130107 +823 73 training.batch_size 2.0 +823 73 training.label_smoothing 0.0014056425767422671 +823 74 model.embedding_dim 1.0 +823 74 model.scoring_fct_norm 1.0 +823 74 optimizer.lr 0.0357835197009402 +823 74 training.batch_size 0.0 +823 74 training.label_smoothing 0.03332192424944555 +823 75 model.embedding_dim 2.0 +823 75 model.scoring_fct_norm 2.0 +823 75 optimizer.lr 0.058543606434132096 +823 75 training.batch_size 0.0 +823 75 training.label_smoothing 0.19772955575003776 +823 76 model.embedding_dim 0.0 +823 76 model.scoring_fct_norm 1.0 +823 76 optimizer.lr 0.0017452121639923493 +823 76 training.batch_size 0.0 +823 76 training.label_smoothing 0.04706216615940698 +823 77 model.embedding_dim 2.0 +823 77 model.scoring_fct_norm 2.0 +823 77 optimizer.lr 0.012436220314750463 +823 77 training.batch_size 0.0 +823 77 training.label_smoothing 0.16543173788488116 +823 78 model.embedding_dim 0.0 +823 78 model.scoring_fct_norm 2.0 +823 78 optimizer.lr 0.002247372882239742 +823 78 training.batch_size 0.0 +823 78 training.label_smoothing 0.1254662338871062 +823 79 model.embedding_dim 0.0 +823 79 model.scoring_fct_norm 1.0 +823 79 optimizer.lr 0.002866335997431064 +823 79 training.batch_size 0.0 +823 79 training.label_smoothing 0.09970053596124374 +823 80 model.embedding_dim 2.0 +823 80 model.scoring_fct_norm 1.0 +823 80 optimizer.lr 0.06646299992724608 +823 80 training.batch_size 1.0 +823 80 training.label_smoothing 0.08762003323680786 +823 81 model.embedding_dim 2.0 +823 81 model.scoring_fct_norm 1.0 +823 81 optimizer.lr 0.030039892312268543 +823 81 training.batch_size 0.0 +823 81 training.label_smoothing 0.22644849761141359 +823 82 model.embedding_dim 1.0 +823 82 model.scoring_fct_norm 2.0 +823 82 optimizer.lr 0.0014241336604225597 +823 82 training.batch_size 1.0 +823 82 training.label_smoothing 0.01866190302677773 +823 83 model.embedding_dim 2.0 +823 83 model.scoring_fct_norm 2.0 +823 83 optimizer.lr 0.0010276760958215416 +823 83 training.batch_size 0.0 +823 83 training.label_smoothing 0.265860119263243 +823 84 model.embedding_dim 2.0 +823 84 model.scoring_fct_norm 1.0 +823 84 optimizer.lr 0.01715203514925149 +823 84 training.batch_size 1.0 +823 84 training.label_smoothing 0.0026863935429880907 +823 85 model.embedding_dim 1.0 +823 85 model.scoring_fct_norm 1.0 +823 85 optimizer.lr 0.07271535215460598 +823 85 training.batch_size 1.0 +823 85 training.label_smoothing 0.02148697060043256 +823 86 model.embedding_dim 1.0 +823 86 model.scoring_fct_norm 1.0 +823 86 optimizer.lr 0.001085838230096315 +823 86 training.batch_size 0.0 +823 86 training.label_smoothing 0.0013043538312052607 +823 87 model.embedding_dim 1.0 +823 87 model.scoring_fct_norm 2.0 +823 87 optimizer.lr 0.04890378082102112 +823 87 training.batch_size 0.0 +823 87 training.label_smoothing 0.001959534677824475 +823 88 model.embedding_dim 0.0 +823 88 model.scoring_fct_norm 1.0 +823 88 optimizer.lr 0.001995309273150438 +823 88 training.batch_size 2.0 +823 88 training.label_smoothing 0.0015053115495103984 +823 89 model.embedding_dim 2.0 +823 89 model.scoring_fct_norm 1.0 +823 89 optimizer.lr 0.022362739200184546 +823 89 training.batch_size 1.0 +823 89 training.label_smoothing 0.004791208792214424 +823 90 model.embedding_dim 0.0 +823 90 model.scoring_fct_norm 1.0 +823 90 optimizer.lr 0.012191161210222642 +823 90 training.batch_size 1.0 +823 90 training.label_smoothing 0.6146646165612902 +823 91 model.embedding_dim 2.0 +823 91 model.scoring_fct_norm 1.0 +823 91 optimizer.lr 0.001739456730464184 +823 91 training.batch_size 0.0 +823 91 training.label_smoothing 0.06380452299351529 +823 92 model.embedding_dim 0.0 +823 92 model.scoring_fct_norm 1.0 +823 92 optimizer.lr 0.07990262785666832 +823 92 training.batch_size 0.0 +823 92 training.label_smoothing 0.006411302541217009 +823 93 model.embedding_dim 1.0 +823 93 model.scoring_fct_norm 2.0 +823 93 optimizer.lr 0.01358809574153026 +823 93 training.batch_size 0.0 +823 93 training.label_smoothing 0.7669372073885791 +823 94 model.embedding_dim 0.0 +823 94 model.scoring_fct_norm 1.0 +823 94 optimizer.lr 0.012809019540626276 +823 94 training.batch_size 2.0 +823 94 training.label_smoothing 0.008617502356099573 +823 95 model.embedding_dim 0.0 +823 95 model.scoring_fct_norm 1.0 +823 95 optimizer.lr 0.03852217649986406 +823 95 training.batch_size 1.0 +823 95 training.label_smoothing 0.08339737356837934 +823 96 model.embedding_dim 1.0 +823 96 model.scoring_fct_norm 2.0 +823 96 optimizer.lr 0.05923831024219134 +823 96 training.batch_size 2.0 +823 96 training.label_smoothing 0.05162703929011927 +823 97 model.embedding_dim 1.0 +823 97 model.scoring_fct_norm 2.0 +823 97 optimizer.lr 0.002356866925633757 +823 97 training.batch_size 2.0 +823 97 training.label_smoothing 0.002947915995296517 +823 98 model.embedding_dim 1.0 +823 98 model.scoring_fct_norm 2.0 +823 98 optimizer.lr 0.0566029876907729 +823 98 training.batch_size 1.0 +823 98 training.label_smoothing 0.42717112284346703 +823 99 model.embedding_dim 1.0 +823 99 model.scoring_fct_norm 1.0 +823 99 optimizer.lr 0.002934583974807194 +823 99 training.batch_size 1.0 +823 99 training.label_smoothing 0.003828314723327378 +823 100 model.embedding_dim 1.0 +823 100 model.scoring_fct_norm 2.0 +823 100 optimizer.lr 0.0015000695740057268 +823 100 training.batch_size 1.0 +823 100 training.label_smoothing 0.030525155563499124 +823 1 dataset """kinships""" +823 1 model """transe""" +823 1 loss """crossentropy""" +823 1 regularizer """no""" +823 1 optimizer """adam""" +823 1 training_loop """lcwa""" +823 1 evaluator """rankbased""" +823 2 dataset """kinships""" +823 2 model """transe""" +823 2 loss """crossentropy""" +823 2 regularizer """no""" +823 2 optimizer """adam""" +823 2 training_loop """lcwa""" +823 2 evaluator """rankbased""" +823 3 dataset """kinships""" +823 3 model """transe""" +823 3 loss """crossentropy""" +823 3 regularizer """no""" +823 3 optimizer """adam""" +823 3 training_loop """lcwa""" +823 3 evaluator """rankbased""" +823 4 dataset """kinships""" +823 4 model """transe""" +823 4 loss """crossentropy""" +823 4 regularizer """no""" +823 4 optimizer """adam""" +823 4 training_loop """lcwa""" +823 4 evaluator """rankbased""" +823 5 dataset """kinships""" +823 5 model """transe""" +823 5 loss """crossentropy""" +823 5 regularizer """no""" +823 5 optimizer """adam""" +823 5 training_loop """lcwa""" +823 5 evaluator """rankbased""" +823 6 dataset """kinships""" +823 6 model """transe""" +823 6 loss """crossentropy""" +823 6 regularizer """no""" +823 6 optimizer """adam""" +823 6 training_loop """lcwa""" +823 6 evaluator """rankbased""" +823 7 dataset """kinships""" +823 7 model """transe""" +823 7 loss """crossentropy""" +823 7 regularizer """no""" +823 7 optimizer """adam""" +823 7 training_loop """lcwa""" +823 7 evaluator """rankbased""" +823 8 dataset """kinships""" +823 8 model """transe""" +823 8 loss """crossentropy""" +823 8 regularizer """no""" +823 8 optimizer """adam""" +823 8 training_loop """lcwa""" +823 8 evaluator """rankbased""" +823 9 dataset """kinships""" +823 9 model """transe""" +823 9 loss """crossentropy""" +823 9 regularizer """no""" +823 9 optimizer """adam""" +823 9 training_loop """lcwa""" +823 9 evaluator """rankbased""" +823 10 dataset """kinships""" +823 10 model """transe""" +823 10 loss """crossentropy""" +823 10 regularizer """no""" +823 10 optimizer """adam""" +823 10 training_loop """lcwa""" +823 10 evaluator """rankbased""" +823 11 dataset """kinships""" +823 11 model """transe""" +823 11 loss """crossentropy""" +823 11 regularizer """no""" +823 11 optimizer """adam""" +823 11 training_loop """lcwa""" +823 11 evaluator """rankbased""" +823 12 dataset """kinships""" +823 12 model """transe""" +823 12 loss """crossentropy""" +823 12 regularizer """no""" +823 12 optimizer """adam""" +823 12 training_loop """lcwa""" +823 12 evaluator """rankbased""" +823 13 dataset """kinships""" +823 13 model """transe""" +823 13 loss """crossentropy""" +823 13 regularizer """no""" +823 13 optimizer """adam""" +823 13 training_loop """lcwa""" +823 13 evaluator """rankbased""" +823 14 dataset """kinships""" +823 14 model """transe""" +823 14 loss """crossentropy""" +823 14 regularizer """no""" +823 14 optimizer """adam""" +823 14 training_loop """lcwa""" +823 14 evaluator """rankbased""" +823 15 dataset """kinships""" +823 15 model """transe""" +823 15 loss """crossentropy""" +823 15 regularizer """no""" +823 15 optimizer """adam""" +823 15 training_loop """lcwa""" +823 15 evaluator """rankbased""" +823 16 dataset """kinships""" +823 16 model """transe""" +823 16 loss """crossentropy""" +823 16 regularizer """no""" +823 16 optimizer """adam""" +823 16 training_loop """lcwa""" +823 16 evaluator """rankbased""" +823 17 dataset """kinships""" +823 17 model """transe""" +823 17 loss """crossentropy""" +823 17 regularizer """no""" +823 17 optimizer """adam""" +823 17 training_loop """lcwa""" +823 17 evaluator """rankbased""" +823 18 dataset """kinships""" +823 18 model """transe""" +823 18 loss """crossentropy""" +823 18 regularizer """no""" +823 18 optimizer """adam""" +823 18 training_loop """lcwa""" +823 18 evaluator """rankbased""" +823 19 dataset """kinships""" +823 19 model """transe""" +823 19 loss """crossentropy""" +823 19 regularizer """no""" +823 19 optimizer """adam""" +823 19 training_loop """lcwa""" +823 19 evaluator """rankbased""" +823 20 dataset """kinships""" +823 20 model """transe""" +823 20 loss """crossentropy""" +823 20 regularizer """no""" +823 20 optimizer """adam""" +823 20 training_loop """lcwa""" +823 20 evaluator """rankbased""" +823 21 dataset """kinships""" +823 21 model """transe""" +823 21 loss """crossentropy""" +823 21 regularizer """no""" +823 21 optimizer """adam""" +823 21 training_loop """lcwa""" +823 21 evaluator """rankbased""" +823 22 dataset """kinships""" +823 22 model """transe""" +823 22 loss """crossentropy""" +823 22 regularizer """no""" +823 22 optimizer """adam""" +823 22 training_loop """lcwa""" +823 22 evaluator """rankbased""" +823 23 dataset """kinships""" +823 23 model """transe""" +823 23 loss """crossentropy""" +823 23 regularizer """no""" +823 23 optimizer """adam""" +823 23 training_loop """lcwa""" +823 23 evaluator """rankbased""" +823 24 dataset """kinships""" +823 24 model """transe""" +823 24 loss """crossentropy""" +823 24 regularizer """no""" +823 24 optimizer """adam""" +823 24 training_loop """lcwa""" +823 24 evaluator """rankbased""" +823 25 dataset """kinships""" +823 25 model """transe""" +823 25 loss """crossentropy""" +823 25 regularizer """no""" +823 25 optimizer """adam""" +823 25 training_loop """lcwa""" +823 25 evaluator """rankbased""" +823 26 dataset """kinships""" +823 26 model """transe""" +823 26 loss """crossentropy""" +823 26 regularizer """no""" +823 26 optimizer """adam""" +823 26 training_loop """lcwa""" +823 26 evaluator """rankbased""" +823 27 dataset """kinships""" +823 27 model """transe""" +823 27 loss """crossentropy""" +823 27 regularizer """no""" +823 27 optimizer """adam""" +823 27 training_loop """lcwa""" +823 27 evaluator """rankbased""" +823 28 dataset """kinships""" +823 28 model """transe""" +823 28 loss """crossentropy""" +823 28 regularizer """no""" +823 28 optimizer """adam""" +823 28 training_loop """lcwa""" +823 28 evaluator """rankbased""" +823 29 dataset """kinships""" +823 29 model """transe""" +823 29 loss """crossentropy""" +823 29 regularizer """no""" +823 29 optimizer """adam""" +823 29 training_loop """lcwa""" +823 29 evaluator """rankbased""" +823 30 dataset """kinships""" +823 30 model """transe""" +823 30 loss """crossentropy""" +823 30 regularizer """no""" +823 30 optimizer """adam""" +823 30 training_loop """lcwa""" +823 30 evaluator """rankbased""" +823 31 dataset """kinships""" +823 31 model """transe""" +823 31 loss """crossentropy""" +823 31 regularizer """no""" +823 31 optimizer """adam""" +823 31 training_loop """lcwa""" +823 31 evaluator """rankbased""" +823 32 dataset """kinships""" +823 32 model """transe""" +823 32 loss """crossentropy""" +823 32 regularizer """no""" +823 32 optimizer """adam""" +823 32 training_loop """lcwa""" +823 32 evaluator """rankbased""" +823 33 dataset """kinships""" +823 33 model """transe""" +823 33 loss """crossentropy""" +823 33 regularizer """no""" +823 33 optimizer """adam""" +823 33 training_loop """lcwa""" +823 33 evaluator """rankbased""" +823 34 dataset """kinships""" +823 34 model """transe""" +823 34 loss """crossentropy""" +823 34 regularizer """no""" +823 34 optimizer """adam""" +823 34 training_loop """lcwa""" +823 34 evaluator """rankbased""" +823 35 dataset """kinships""" +823 35 model """transe""" +823 35 loss """crossentropy""" +823 35 regularizer """no""" +823 35 optimizer """adam""" +823 35 training_loop """lcwa""" +823 35 evaluator """rankbased""" +823 36 dataset """kinships""" +823 36 model """transe""" +823 36 loss """crossentropy""" +823 36 regularizer """no""" +823 36 optimizer """adam""" +823 36 training_loop """lcwa""" +823 36 evaluator """rankbased""" +823 37 dataset """kinships""" +823 37 model """transe""" +823 37 loss """crossentropy""" +823 37 regularizer """no""" +823 37 optimizer """adam""" +823 37 training_loop """lcwa""" +823 37 evaluator """rankbased""" +823 38 dataset """kinships""" +823 38 model """transe""" +823 38 loss """crossentropy""" +823 38 regularizer """no""" +823 38 optimizer """adam""" +823 38 training_loop """lcwa""" +823 38 evaluator """rankbased""" +823 39 dataset """kinships""" +823 39 model """transe""" +823 39 loss """crossentropy""" +823 39 regularizer """no""" +823 39 optimizer """adam""" +823 39 training_loop """lcwa""" +823 39 evaluator """rankbased""" +823 40 dataset """kinships""" +823 40 model """transe""" +823 40 loss """crossentropy""" +823 40 regularizer """no""" +823 40 optimizer """adam""" +823 40 training_loop """lcwa""" +823 40 evaluator """rankbased""" +823 41 dataset """kinships""" +823 41 model """transe""" +823 41 loss """crossentropy""" +823 41 regularizer """no""" +823 41 optimizer """adam""" +823 41 training_loop """lcwa""" +823 41 evaluator """rankbased""" +823 42 dataset """kinships""" +823 42 model """transe""" +823 42 loss """crossentropy""" +823 42 regularizer """no""" +823 42 optimizer """adam""" +823 42 training_loop """lcwa""" +823 42 evaluator """rankbased""" +823 43 dataset """kinships""" +823 43 model """transe""" +823 43 loss """crossentropy""" +823 43 regularizer """no""" +823 43 optimizer """adam""" +823 43 training_loop """lcwa""" +823 43 evaluator """rankbased""" +823 44 dataset """kinships""" +823 44 model """transe""" +823 44 loss """crossentropy""" +823 44 regularizer """no""" +823 44 optimizer """adam""" +823 44 training_loop """lcwa""" +823 44 evaluator """rankbased""" +823 45 dataset """kinships""" +823 45 model """transe""" +823 45 loss """crossentropy""" +823 45 regularizer """no""" +823 45 optimizer """adam""" +823 45 training_loop """lcwa""" +823 45 evaluator """rankbased""" +823 46 dataset """kinships""" +823 46 model """transe""" +823 46 loss """crossentropy""" +823 46 regularizer """no""" +823 46 optimizer """adam""" +823 46 training_loop """lcwa""" +823 46 evaluator """rankbased""" +823 47 dataset """kinships""" +823 47 model """transe""" +823 47 loss """crossentropy""" +823 47 regularizer """no""" +823 47 optimizer """adam""" +823 47 training_loop """lcwa""" +823 47 evaluator """rankbased""" +823 48 dataset """kinships""" +823 48 model """transe""" +823 48 loss """crossentropy""" +823 48 regularizer """no""" +823 48 optimizer """adam""" +823 48 training_loop """lcwa""" +823 48 evaluator """rankbased""" +823 49 dataset """kinships""" +823 49 model """transe""" +823 49 loss """crossentropy""" +823 49 regularizer """no""" +823 49 optimizer """adam""" +823 49 training_loop """lcwa""" +823 49 evaluator """rankbased""" +823 50 dataset """kinships""" +823 50 model """transe""" +823 50 loss """crossentropy""" +823 50 regularizer """no""" +823 50 optimizer """adam""" +823 50 training_loop """lcwa""" +823 50 evaluator """rankbased""" +823 51 dataset """kinships""" +823 51 model """transe""" +823 51 loss """crossentropy""" +823 51 regularizer """no""" +823 51 optimizer """adam""" +823 51 training_loop """lcwa""" +823 51 evaluator """rankbased""" +823 52 dataset """kinships""" +823 52 model """transe""" +823 52 loss """crossentropy""" +823 52 regularizer """no""" +823 52 optimizer """adam""" +823 52 training_loop """lcwa""" +823 52 evaluator """rankbased""" +823 53 dataset """kinships""" +823 53 model """transe""" +823 53 loss """crossentropy""" +823 53 regularizer """no""" +823 53 optimizer """adam""" +823 53 training_loop """lcwa""" +823 53 evaluator """rankbased""" +823 54 dataset """kinships""" +823 54 model """transe""" +823 54 loss """crossentropy""" +823 54 regularizer """no""" +823 54 optimizer """adam""" +823 54 training_loop """lcwa""" +823 54 evaluator """rankbased""" +823 55 dataset """kinships""" +823 55 model """transe""" +823 55 loss """crossentropy""" +823 55 regularizer """no""" +823 55 optimizer """adam""" +823 55 training_loop """lcwa""" +823 55 evaluator """rankbased""" +823 56 dataset """kinships""" +823 56 model """transe""" +823 56 loss """crossentropy""" +823 56 regularizer """no""" +823 56 optimizer """adam""" +823 56 training_loop """lcwa""" +823 56 evaluator """rankbased""" +823 57 dataset """kinships""" +823 57 model """transe""" +823 57 loss """crossentropy""" +823 57 regularizer """no""" +823 57 optimizer """adam""" +823 57 training_loop """lcwa""" +823 57 evaluator """rankbased""" +823 58 dataset """kinships""" +823 58 model """transe""" +823 58 loss """crossentropy""" +823 58 regularizer """no""" +823 58 optimizer """adam""" +823 58 training_loop """lcwa""" +823 58 evaluator """rankbased""" +823 59 dataset """kinships""" +823 59 model """transe""" +823 59 loss """crossentropy""" +823 59 regularizer """no""" +823 59 optimizer """adam""" +823 59 training_loop """lcwa""" +823 59 evaluator """rankbased""" +823 60 dataset """kinships""" +823 60 model """transe""" +823 60 loss """crossentropy""" +823 60 regularizer """no""" +823 60 optimizer """adam""" +823 60 training_loop """lcwa""" +823 60 evaluator """rankbased""" +823 61 dataset """kinships""" +823 61 model """transe""" +823 61 loss """crossentropy""" +823 61 regularizer """no""" +823 61 optimizer """adam""" +823 61 training_loop """lcwa""" +823 61 evaluator """rankbased""" +823 62 dataset """kinships""" +823 62 model """transe""" +823 62 loss """crossentropy""" +823 62 regularizer """no""" +823 62 optimizer """adam""" +823 62 training_loop """lcwa""" +823 62 evaluator """rankbased""" +823 63 dataset """kinships""" +823 63 model """transe""" +823 63 loss """crossentropy""" +823 63 regularizer """no""" +823 63 optimizer """adam""" +823 63 training_loop """lcwa""" +823 63 evaluator """rankbased""" +823 64 dataset """kinships""" +823 64 model """transe""" +823 64 loss """crossentropy""" +823 64 regularizer """no""" +823 64 optimizer """adam""" +823 64 training_loop """lcwa""" +823 64 evaluator """rankbased""" +823 65 dataset """kinships""" +823 65 model """transe""" +823 65 loss """crossentropy""" +823 65 regularizer """no""" +823 65 optimizer """adam""" +823 65 training_loop """lcwa""" +823 65 evaluator """rankbased""" +823 66 dataset """kinships""" +823 66 model """transe""" +823 66 loss """crossentropy""" +823 66 regularizer """no""" +823 66 optimizer """adam""" +823 66 training_loop """lcwa""" +823 66 evaluator """rankbased""" +823 67 dataset """kinships""" +823 67 model """transe""" +823 67 loss """crossentropy""" +823 67 regularizer """no""" +823 67 optimizer """adam""" +823 67 training_loop """lcwa""" +823 67 evaluator """rankbased""" +823 68 dataset """kinships""" +823 68 model """transe""" +823 68 loss """crossentropy""" +823 68 regularizer """no""" +823 68 optimizer """adam""" +823 68 training_loop """lcwa""" +823 68 evaluator """rankbased""" +823 69 dataset """kinships""" +823 69 model """transe""" +823 69 loss """crossentropy""" +823 69 regularizer """no""" +823 69 optimizer """adam""" +823 69 training_loop """lcwa""" +823 69 evaluator """rankbased""" +823 70 dataset """kinships""" +823 70 model """transe""" +823 70 loss """crossentropy""" +823 70 regularizer """no""" +823 70 optimizer """adam""" +823 70 training_loop """lcwa""" +823 70 evaluator """rankbased""" +823 71 dataset """kinships""" +823 71 model """transe""" +823 71 loss """crossentropy""" +823 71 regularizer """no""" +823 71 optimizer """adam""" +823 71 training_loop """lcwa""" +823 71 evaluator """rankbased""" +823 72 dataset """kinships""" +823 72 model """transe""" +823 72 loss """crossentropy""" +823 72 regularizer """no""" +823 72 optimizer """adam""" +823 72 training_loop """lcwa""" +823 72 evaluator """rankbased""" +823 73 dataset """kinships""" +823 73 model """transe""" +823 73 loss """crossentropy""" +823 73 regularizer """no""" +823 73 optimizer """adam""" +823 73 training_loop """lcwa""" +823 73 evaluator """rankbased""" +823 74 dataset """kinships""" +823 74 model """transe""" +823 74 loss """crossentropy""" +823 74 regularizer """no""" +823 74 optimizer """adam""" +823 74 training_loop """lcwa""" +823 74 evaluator """rankbased""" +823 75 dataset """kinships""" +823 75 model """transe""" +823 75 loss """crossentropy""" +823 75 regularizer """no""" +823 75 optimizer """adam""" +823 75 training_loop """lcwa""" +823 75 evaluator """rankbased""" +823 76 dataset """kinships""" +823 76 model """transe""" +823 76 loss """crossentropy""" +823 76 regularizer """no""" +823 76 optimizer """adam""" +823 76 training_loop """lcwa""" +823 76 evaluator """rankbased""" +823 77 dataset """kinships""" +823 77 model """transe""" +823 77 loss """crossentropy""" +823 77 regularizer """no""" +823 77 optimizer """adam""" +823 77 training_loop """lcwa""" +823 77 evaluator """rankbased""" +823 78 dataset """kinships""" +823 78 model """transe""" +823 78 loss """crossentropy""" +823 78 regularizer """no""" +823 78 optimizer """adam""" +823 78 training_loop """lcwa""" +823 78 evaluator """rankbased""" +823 79 dataset """kinships""" +823 79 model """transe""" +823 79 loss """crossentropy""" +823 79 regularizer """no""" +823 79 optimizer """adam""" +823 79 training_loop """lcwa""" +823 79 evaluator """rankbased""" +823 80 dataset """kinships""" +823 80 model """transe""" +823 80 loss """crossentropy""" +823 80 regularizer """no""" +823 80 optimizer """adam""" +823 80 training_loop """lcwa""" +823 80 evaluator """rankbased""" +823 81 dataset """kinships""" +823 81 model """transe""" +823 81 loss """crossentropy""" +823 81 regularizer """no""" +823 81 optimizer """adam""" +823 81 training_loop """lcwa""" +823 81 evaluator """rankbased""" +823 82 dataset """kinships""" +823 82 model """transe""" +823 82 loss """crossentropy""" +823 82 regularizer """no""" +823 82 optimizer """adam""" +823 82 training_loop """lcwa""" +823 82 evaluator """rankbased""" +823 83 dataset """kinships""" +823 83 model """transe""" +823 83 loss """crossentropy""" +823 83 regularizer """no""" +823 83 optimizer """adam""" +823 83 training_loop """lcwa""" +823 83 evaluator """rankbased""" +823 84 dataset """kinships""" +823 84 model """transe""" +823 84 loss """crossentropy""" +823 84 regularizer """no""" +823 84 optimizer """adam""" +823 84 training_loop """lcwa""" +823 84 evaluator """rankbased""" +823 85 dataset """kinships""" +823 85 model """transe""" +823 85 loss """crossentropy""" +823 85 regularizer """no""" +823 85 optimizer """adam""" +823 85 training_loop """lcwa""" +823 85 evaluator """rankbased""" +823 86 dataset """kinships""" +823 86 model """transe""" +823 86 loss """crossentropy""" +823 86 regularizer """no""" +823 86 optimizer """adam""" +823 86 training_loop """lcwa""" +823 86 evaluator """rankbased""" +823 87 dataset """kinships""" +823 87 model """transe""" +823 87 loss """crossentropy""" +823 87 regularizer """no""" +823 87 optimizer """adam""" +823 87 training_loop """lcwa""" +823 87 evaluator """rankbased""" +823 88 dataset """kinships""" +823 88 model """transe""" +823 88 loss """crossentropy""" +823 88 regularizer """no""" +823 88 optimizer """adam""" +823 88 training_loop """lcwa""" +823 88 evaluator """rankbased""" +823 89 dataset """kinships""" +823 89 model """transe""" +823 89 loss """crossentropy""" +823 89 regularizer """no""" +823 89 optimizer """adam""" +823 89 training_loop """lcwa""" +823 89 evaluator """rankbased""" +823 90 dataset """kinships""" +823 90 model """transe""" +823 90 loss """crossentropy""" +823 90 regularizer """no""" +823 90 optimizer """adam""" +823 90 training_loop """lcwa""" +823 90 evaluator """rankbased""" +823 91 dataset """kinships""" +823 91 model """transe""" +823 91 loss """crossentropy""" +823 91 regularizer """no""" +823 91 optimizer """adam""" +823 91 training_loop """lcwa""" +823 91 evaluator """rankbased""" +823 92 dataset """kinships""" +823 92 model """transe""" +823 92 loss """crossentropy""" +823 92 regularizer """no""" +823 92 optimizer """adam""" +823 92 training_loop """lcwa""" +823 92 evaluator """rankbased""" +823 93 dataset """kinships""" +823 93 model """transe""" +823 93 loss """crossentropy""" +823 93 regularizer """no""" +823 93 optimizer """adam""" +823 93 training_loop """lcwa""" +823 93 evaluator """rankbased""" +823 94 dataset """kinships""" +823 94 model """transe""" +823 94 loss """crossentropy""" +823 94 regularizer """no""" +823 94 optimizer """adam""" +823 94 training_loop """lcwa""" +823 94 evaluator """rankbased""" +823 95 dataset """kinships""" +823 95 model """transe""" +823 95 loss """crossentropy""" +823 95 regularizer """no""" +823 95 optimizer """adam""" +823 95 training_loop """lcwa""" +823 95 evaluator """rankbased""" +823 96 dataset """kinships""" +823 96 model """transe""" +823 96 loss """crossentropy""" +823 96 regularizer """no""" +823 96 optimizer """adam""" +823 96 training_loop """lcwa""" +823 96 evaluator """rankbased""" +823 97 dataset """kinships""" +823 97 model """transe""" +823 97 loss """crossentropy""" +823 97 regularizer """no""" +823 97 optimizer """adam""" +823 97 training_loop """lcwa""" +823 97 evaluator """rankbased""" +823 98 dataset """kinships""" +823 98 model """transe""" +823 98 loss """crossentropy""" +823 98 regularizer """no""" +823 98 optimizer """adam""" +823 98 training_loop """lcwa""" +823 98 evaluator """rankbased""" +823 99 dataset """kinships""" +823 99 model """transe""" +823 99 loss """crossentropy""" +823 99 regularizer """no""" +823 99 optimizer """adam""" +823 99 training_loop """lcwa""" +823 99 evaluator """rankbased""" +823 100 dataset """kinships""" +823 100 model """transe""" +823 100 loss """crossentropy""" +823 100 regularizer """no""" +823 100 optimizer """adam""" +823 100 training_loop """lcwa""" +823 100 evaluator """rankbased""" +824 1 model.embedding_dim 1.0 +824 1 model.scoring_fct_norm 1.0 +824 1 optimizer.lr 0.004404078457290776 +824 1 training.batch_size 0.0 +824 1 training.label_smoothing 0.15328222644451073 +824 2 model.embedding_dim 0.0 +824 2 model.scoring_fct_norm 1.0 +824 2 optimizer.lr 0.04899414182778057 +824 2 training.batch_size 1.0 +824 2 training.label_smoothing 0.11322166793834493 +824 3 model.embedding_dim 1.0 +824 3 model.scoring_fct_norm 2.0 +824 3 optimizer.lr 0.004377131069677085 +824 3 training.batch_size 2.0 +824 3 training.label_smoothing 0.004890321514127404 +824 4 model.embedding_dim 2.0 +824 4 model.scoring_fct_norm 1.0 +824 4 optimizer.lr 0.0027965735931907714 +824 4 training.batch_size 0.0 +824 4 training.label_smoothing 0.006370054030545917 +824 5 model.embedding_dim 1.0 +824 5 model.scoring_fct_norm 2.0 +824 5 optimizer.lr 0.012369798384636097 +824 5 training.batch_size 2.0 +824 5 training.label_smoothing 0.4138079690461433 +824 6 model.embedding_dim 1.0 +824 6 model.scoring_fct_norm 1.0 +824 6 optimizer.lr 0.09127697708424058 +824 6 training.batch_size 0.0 +824 6 training.label_smoothing 0.008826170154437558 +824 7 model.embedding_dim 1.0 +824 7 model.scoring_fct_norm 2.0 +824 7 optimizer.lr 0.00567466293811735 +824 7 training.batch_size 0.0 +824 7 training.label_smoothing 0.0010438671463347002 +824 8 model.embedding_dim 1.0 +824 8 model.scoring_fct_norm 1.0 +824 8 optimizer.lr 0.002981867964140973 +824 8 training.batch_size 0.0 +824 8 training.label_smoothing 0.07525059627621315 +824 9 model.embedding_dim 1.0 +824 9 model.scoring_fct_norm 2.0 +824 9 optimizer.lr 0.0010962715279857725 +824 9 training.batch_size 1.0 +824 9 training.label_smoothing 0.002799388000255874 +824 10 model.embedding_dim 1.0 +824 10 model.scoring_fct_norm 2.0 +824 10 optimizer.lr 0.0012424314762904457 +824 10 training.batch_size 0.0 +824 10 training.label_smoothing 0.7900395117542509 +824 11 model.embedding_dim 0.0 +824 11 model.scoring_fct_norm 1.0 +824 11 optimizer.lr 0.06868824726510848 +824 11 training.batch_size 1.0 +824 11 training.label_smoothing 0.0025601139904320755 +824 12 model.embedding_dim 2.0 +824 12 model.scoring_fct_norm 2.0 +824 12 optimizer.lr 0.0012715779336337923 +824 12 training.batch_size 1.0 +824 12 training.label_smoothing 0.009063600265226238 +824 13 model.embedding_dim 2.0 +824 13 model.scoring_fct_norm 1.0 +824 13 optimizer.lr 0.08086305611522712 +824 13 training.batch_size 1.0 +824 13 training.label_smoothing 0.736929787970695 +824 14 model.embedding_dim 1.0 +824 14 model.scoring_fct_norm 1.0 +824 14 optimizer.lr 0.02652672313431747 +824 14 training.batch_size 0.0 +824 14 training.label_smoothing 0.019036949875740478 +824 15 model.embedding_dim 1.0 +824 15 model.scoring_fct_norm 2.0 +824 15 optimizer.lr 0.00232880572042099 +824 15 training.batch_size 2.0 +824 15 training.label_smoothing 0.7098438179494974 +824 16 model.embedding_dim 1.0 +824 16 model.scoring_fct_norm 1.0 +824 16 optimizer.lr 0.0011907842725024731 +824 16 training.batch_size 0.0 +824 16 training.label_smoothing 0.07388450997991104 +824 17 model.embedding_dim 0.0 +824 17 model.scoring_fct_norm 1.0 +824 17 optimizer.lr 0.0015788928289540848 +824 17 training.batch_size 0.0 +824 17 training.label_smoothing 0.08161213203648811 +824 18 model.embedding_dim 2.0 +824 18 model.scoring_fct_norm 2.0 +824 18 optimizer.lr 0.06969053383257456 +824 18 training.batch_size 1.0 +824 18 training.label_smoothing 0.03224808108116174 +824 19 model.embedding_dim 0.0 +824 19 model.scoring_fct_norm 2.0 +824 19 optimizer.lr 0.0033653671163136703 +824 19 training.batch_size 0.0 +824 19 training.label_smoothing 0.020349223361082313 +824 20 model.embedding_dim 0.0 +824 20 model.scoring_fct_norm 2.0 +824 20 optimizer.lr 0.007424049463866315 +824 20 training.batch_size 0.0 +824 20 training.label_smoothing 0.005680415010399246 +824 21 model.embedding_dim 2.0 +824 21 model.scoring_fct_norm 2.0 +824 21 optimizer.lr 0.0019456076438222152 +824 21 training.batch_size 2.0 +824 21 training.label_smoothing 0.0702703579183765 +824 22 model.embedding_dim 0.0 +824 22 model.scoring_fct_norm 1.0 +824 22 optimizer.lr 0.015475957851427273 +824 22 training.batch_size 1.0 +824 22 training.label_smoothing 0.01413903318723369 +824 23 model.embedding_dim 0.0 +824 23 model.scoring_fct_norm 1.0 +824 23 optimizer.lr 0.009221003041155562 +824 23 training.batch_size 1.0 +824 23 training.label_smoothing 0.20684138369451588 +824 24 model.embedding_dim 0.0 +824 24 model.scoring_fct_norm 1.0 +824 24 optimizer.lr 0.030474180801377163 +824 24 training.batch_size 0.0 +824 24 training.label_smoothing 0.03456904590221485 +824 25 model.embedding_dim 1.0 +824 25 model.scoring_fct_norm 2.0 +824 25 optimizer.lr 0.0034472983764447956 +824 25 training.batch_size 1.0 +824 25 training.label_smoothing 0.01718553987941757 +824 26 model.embedding_dim 2.0 +824 26 model.scoring_fct_norm 2.0 +824 26 optimizer.lr 0.001014684452352207 +824 26 training.batch_size 1.0 +824 26 training.label_smoothing 0.005675612063791133 +824 27 model.embedding_dim 0.0 +824 27 model.scoring_fct_norm 2.0 +824 27 optimizer.lr 0.03429484991880408 +824 27 training.batch_size 0.0 +824 27 training.label_smoothing 0.002796021874958871 +824 28 model.embedding_dim 1.0 +824 28 model.scoring_fct_norm 2.0 +824 28 optimizer.lr 0.002724612969568768 +824 28 training.batch_size 1.0 +824 28 training.label_smoothing 0.001182187779535795 +824 29 model.embedding_dim 0.0 +824 29 model.scoring_fct_norm 2.0 +824 29 optimizer.lr 0.002004005390926302 +824 29 training.batch_size 1.0 +824 29 training.label_smoothing 0.5988048615924539 +824 30 model.embedding_dim 0.0 +824 30 model.scoring_fct_norm 1.0 +824 30 optimizer.lr 0.011995962806949576 +824 30 training.batch_size 0.0 +824 30 training.label_smoothing 0.0024884970931716073 +824 31 model.embedding_dim 0.0 +824 31 model.scoring_fct_norm 1.0 +824 31 optimizer.lr 0.0014455273073857354 +824 31 training.batch_size 0.0 +824 31 training.label_smoothing 0.016109655838301547 +824 32 model.embedding_dim 0.0 +824 32 model.scoring_fct_norm 2.0 +824 32 optimizer.lr 0.0031315714440866118 +824 32 training.batch_size 1.0 +824 32 training.label_smoothing 0.01602387226159634 +824 33 model.embedding_dim 0.0 +824 33 model.scoring_fct_norm 1.0 +824 33 optimizer.lr 0.00142614095551486 +824 33 training.batch_size 1.0 +824 33 training.label_smoothing 0.2051078674693077 +824 34 model.embedding_dim 0.0 +824 34 model.scoring_fct_norm 1.0 +824 34 optimizer.lr 0.07066766016864866 +824 34 training.batch_size 0.0 +824 34 training.label_smoothing 0.22176065814030338 +824 35 model.embedding_dim 2.0 +824 35 model.scoring_fct_norm 2.0 +824 35 optimizer.lr 0.00520464616592163 +824 35 training.batch_size 1.0 +824 35 training.label_smoothing 0.002917484058132861 +824 36 model.embedding_dim 2.0 +824 36 model.scoring_fct_norm 1.0 +824 36 optimizer.lr 0.007825157552013757 +824 36 training.batch_size 0.0 +824 36 training.label_smoothing 0.6248850031800309 +824 37 model.embedding_dim 1.0 +824 37 model.scoring_fct_norm 2.0 +824 37 optimizer.lr 0.029838991877000343 +824 37 training.batch_size 2.0 +824 37 training.label_smoothing 0.059834839211726616 +824 38 model.embedding_dim 1.0 +824 38 model.scoring_fct_norm 1.0 +824 38 optimizer.lr 0.0026857572365114333 +824 38 training.batch_size 1.0 +824 38 training.label_smoothing 0.03000847776296237 +824 39 model.embedding_dim 2.0 +824 39 model.scoring_fct_norm 1.0 +824 39 optimizer.lr 0.024527278974438193 +824 39 training.batch_size 0.0 +824 39 training.label_smoothing 0.020465467079214508 +824 40 model.embedding_dim 1.0 +824 40 model.scoring_fct_norm 2.0 +824 40 optimizer.lr 0.022181476383797895 +824 40 training.batch_size 2.0 +824 40 training.label_smoothing 0.016757378267093028 +824 41 model.embedding_dim 1.0 +824 41 model.scoring_fct_norm 2.0 +824 41 optimizer.lr 0.0017927522260946132 +824 41 training.batch_size 2.0 +824 41 training.label_smoothing 0.002096087675294721 +824 42 model.embedding_dim 2.0 +824 42 model.scoring_fct_norm 1.0 +824 42 optimizer.lr 0.003956287850467597 +824 42 training.batch_size 2.0 +824 42 training.label_smoothing 0.002024691822728049 +824 43 model.embedding_dim 2.0 +824 43 model.scoring_fct_norm 1.0 +824 43 optimizer.lr 0.031221163173796352 +824 43 training.batch_size 1.0 +824 43 training.label_smoothing 0.003928453415153097 +824 44 model.embedding_dim 0.0 +824 44 model.scoring_fct_norm 2.0 +824 44 optimizer.lr 0.0032028069068575677 +824 44 training.batch_size 0.0 +824 44 training.label_smoothing 0.0051547143380350924 +824 45 model.embedding_dim 2.0 +824 45 model.scoring_fct_norm 2.0 +824 45 optimizer.lr 0.04696262164328492 +824 45 training.batch_size 0.0 +824 45 training.label_smoothing 0.31734035311774417 +824 46 model.embedding_dim 1.0 +824 46 model.scoring_fct_norm 2.0 +824 46 optimizer.lr 0.06194421768484254 +824 46 training.batch_size 2.0 +824 46 training.label_smoothing 0.005784424226544492 +824 47 model.embedding_dim 1.0 +824 47 model.scoring_fct_norm 1.0 +824 47 optimizer.lr 0.002806010497579396 +824 47 training.batch_size 2.0 +824 47 training.label_smoothing 0.06285053800002684 +824 48 model.embedding_dim 1.0 +824 48 model.scoring_fct_norm 1.0 +824 48 optimizer.lr 0.002554518990096077 +824 48 training.batch_size 0.0 +824 48 training.label_smoothing 0.011956790388344131 +824 49 model.embedding_dim 2.0 +824 49 model.scoring_fct_norm 1.0 +824 49 optimizer.lr 0.041038949277079566 +824 49 training.batch_size 0.0 +824 49 training.label_smoothing 0.35364897699416353 +824 50 model.embedding_dim 2.0 +824 50 model.scoring_fct_norm 2.0 +824 50 optimizer.lr 0.0023458115932509013 +824 50 training.batch_size 1.0 +824 50 training.label_smoothing 0.10786830913859814 +824 51 model.embedding_dim 0.0 +824 51 model.scoring_fct_norm 1.0 +824 51 optimizer.lr 0.001572675463162415 +824 51 training.batch_size 2.0 +824 51 training.label_smoothing 0.12362321148728897 +824 52 model.embedding_dim 2.0 +824 52 model.scoring_fct_norm 1.0 +824 52 optimizer.lr 0.0013009969668921779 +824 52 training.batch_size 2.0 +824 52 training.label_smoothing 0.5634983929944147 +824 53 model.embedding_dim 1.0 +824 53 model.scoring_fct_norm 1.0 +824 53 optimizer.lr 0.0024546937557148054 +824 53 training.batch_size 0.0 +824 53 training.label_smoothing 0.03898163102559607 +824 54 model.embedding_dim 2.0 +824 54 model.scoring_fct_norm 2.0 +824 54 optimizer.lr 0.011985085349277043 +824 54 training.batch_size 2.0 +824 54 training.label_smoothing 0.05457027065738992 +824 55 model.embedding_dim 2.0 +824 55 model.scoring_fct_norm 1.0 +824 55 optimizer.lr 0.08872668558775222 +824 55 training.batch_size 1.0 +824 55 training.label_smoothing 0.692114839707103 +824 56 model.embedding_dim 1.0 +824 56 model.scoring_fct_norm 1.0 +824 56 optimizer.lr 0.07796271183383892 +824 56 training.batch_size 1.0 +824 56 training.label_smoothing 0.053206590114099776 +824 57 model.embedding_dim 1.0 +824 57 model.scoring_fct_norm 2.0 +824 57 optimizer.lr 0.04045723507257885 +824 57 training.batch_size 2.0 +824 57 training.label_smoothing 0.36425482335594833 +824 58 model.embedding_dim 2.0 +824 58 model.scoring_fct_norm 1.0 +824 58 optimizer.lr 0.013398533772780082 +824 58 training.batch_size 0.0 +824 58 training.label_smoothing 0.1076100077290925 +824 59 model.embedding_dim 0.0 +824 59 model.scoring_fct_norm 2.0 +824 59 optimizer.lr 0.007717889788207439 +824 59 training.batch_size 2.0 +824 59 training.label_smoothing 0.901274050439065 +824 60 model.embedding_dim 2.0 +824 60 model.scoring_fct_norm 1.0 +824 60 optimizer.lr 0.009872370959599109 +824 60 training.batch_size 1.0 +824 60 training.label_smoothing 0.0014344186031640524 +824 61 model.embedding_dim 2.0 +824 61 model.scoring_fct_norm 1.0 +824 61 optimizer.lr 0.026289478153767035 +824 61 training.batch_size 0.0 +824 61 training.label_smoothing 0.004763780662288537 +824 62 model.embedding_dim 1.0 +824 62 model.scoring_fct_norm 2.0 +824 62 optimizer.lr 0.006547792635937738 +824 62 training.batch_size 0.0 +824 62 training.label_smoothing 0.0018358121988014378 +824 63 model.embedding_dim 2.0 +824 63 model.scoring_fct_norm 2.0 +824 63 optimizer.lr 0.004644447291307587 +824 63 training.batch_size 0.0 +824 63 training.label_smoothing 0.0012195388725980676 +824 64 model.embedding_dim 2.0 +824 64 model.scoring_fct_norm 2.0 +824 64 optimizer.lr 0.017513672891533237 +824 64 training.batch_size 1.0 +824 64 training.label_smoothing 0.004171430173666189 +824 65 model.embedding_dim 1.0 +824 65 model.scoring_fct_norm 1.0 +824 65 optimizer.lr 0.004083049713594474 +824 65 training.batch_size 2.0 +824 65 training.label_smoothing 0.02703617557249888 +824 66 model.embedding_dim 1.0 +824 66 model.scoring_fct_norm 1.0 +824 66 optimizer.lr 0.005898135908326271 +824 66 training.batch_size 1.0 +824 66 training.label_smoothing 0.009651409102097892 +824 67 model.embedding_dim 0.0 +824 67 model.scoring_fct_norm 2.0 +824 67 optimizer.lr 0.0014337920344841906 +824 67 training.batch_size 2.0 +824 67 training.label_smoothing 0.09274304756681272 +824 68 model.embedding_dim 2.0 +824 68 model.scoring_fct_norm 1.0 +824 68 optimizer.lr 0.002193543238937375 +824 68 training.batch_size 0.0 +824 68 training.label_smoothing 0.031131173859487063 +824 69 model.embedding_dim 2.0 +824 69 model.scoring_fct_norm 1.0 +824 69 optimizer.lr 0.09285799606239137 +824 69 training.batch_size 0.0 +824 69 training.label_smoothing 0.005701557367733202 +824 70 model.embedding_dim 1.0 +824 70 model.scoring_fct_norm 2.0 +824 70 optimizer.lr 0.011720228011729263 +824 70 training.batch_size 0.0 +824 70 training.label_smoothing 0.005548562599410146 +824 71 model.embedding_dim 2.0 +824 71 model.scoring_fct_norm 1.0 +824 71 optimizer.lr 0.05717237240866361 +824 71 training.batch_size 2.0 +824 71 training.label_smoothing 0.0010900371238920664 +824 72 model.embedding_dim 2.0 +824 72 model.scoring_fct_norm 1.0 +824 72 optimizer.lr 0.0012583966421100762 +824 72 training.batch_size 2.0 +824 72 training.label_smoothing 0.03755141347556581 +824 73 model.embedding_dim 1.0 +824 73 model.scoring_fct_norm 2.0 +824 73 optimizer.lr 0.09102873213105074 +824 73 training.batch_size 0.0 +824 73 training.label_smoothing 0.01191795764859326 +824 74 model.embedding_dim 1.0 +824 74 model.scoring_fct_norm 1.0 +824 74 optimizer.lr 0.0031808107808424086 +824 74 training.batch_size 2.0 +824 74 training.label_smoothing 0.006348793121146895 +824 75 model.embedding_dim 1.0 +824 75 model.scoring_fct_norm 1.0 +824 75 optimizer.lr 0.033750002795422535 +824 75 training.batch_size 2.0 +824 75 training.label_smoothing 0.010971343082874356 +824 76 model.embedding_dim 1.0 +824 76 model.scoring_fct_norm 1.0 +824 76 optimizer.lr 0.02850389391670778 +824 76 training.batch_size 2.0 +824 76 training.label_smoothing 0.008849154961717225 +824 77 model.embedding_dim 2.0 +824 77 model.scoring_fct_norm 1.0 +824 77 optimizer.lr 0.00536093222503356 +824 77 training.batch_size 0.0 +824 77 training.label_smoothing 0.13895659366994756 +824 78 model.embedding_dim 1.0 +824 78 model.scoring_fct_norm 1.0 +824 78 optimizer.lr 0.017083194144154764 +824 78 training.batch_size 2.0 +824 78 training.label_smoothing 0.03628662056109801 +824 79 model.embedding_dim 1.0 +824 79 model.scoring_fct_norm 2.0 +824 79 optimizer.lr 0.07431053975069433 +824 79 training.batch_size 1.0 +824 79 training.label_smoothing 0.3027473158959745 +824 80 model.embedding_dim 2.0 +824 80 model.scoring_fct_norm 1.0 +824 80 optimizer.lr 0.0014628572876348935 +824 80 training.batch_size 1.0 +824 80 training.label_smoothing 0.0011899599522152628 +824 81 model.embedding_dim 0.0 +824 81 model.scoring_fct_norm 1.0 +824 81 optimizer.lr 0.01692287289400852 +824 81 training.batch_size 1.0 +824 81 training.label_smoothing 0.01600635328323478 +824 82 model.embedding_dim 2.0 +824 82 model.scoring_fct_norm 1.0 +824 82 optimizer.lr 0.0034314708959284485 +824 82 training.batch_size 0.0 +824 82 training.label_smoothing 0.0016032220543283637 +824 83 model.embedding_dim 1.0 +824 83 model.scoring_fct_norm 1.0 +824 83 optimizer.lr 0.0035447839951550345 +824 83 training.batch_size 2.0 +824 83 training.label_smoothing 0.02187237824389227 +824 84 model.embedding_dim 0.0 +824 84 model.scoring_fct_norm 2.0 +824 84 optimizer.lr 0.010102684923551706 +824 84 training.batch_size 0.0 +824 84 training.label_smoothing 0.003880954884009191 +824 85 model.embedding_dim 1.0 +824 85 model.scoring_fct_norm 2.0 +824 85 optimizer.lr 0.002955168634729322 +824 85 training.batch_size 1.0 +824 85 training.label_smoothing 0.5047067807056593 +824 86 model.embedding_dim 2.0 +824 86 model.scoring_fct_norm 1.0 +824 86 optimizer.lr 0.003813500541194347 +824 86 training.batch_size 0.0 +824 86 training.label_smoothing 0.5209988214300771 +824 87 model.embedding_dim 2.0 +824 87 model.scoring_fct_norm 1.0 +824 87 optimizer.lr 0.042429094733370816 +824 87 training.batch_size 2.0 +824 87 training.label_smoothing 0.0010606234698561337 +824 88 model.embedding_dim 2.0 +824 88 model.scoring_fct_norm 2.0 +824 88 optimizer.lr 0.005733546081785456 +824 88 training.batch_size 1.0 +824 88 training.label_smoothing 0.06944125043860418 +824 89 model.embedding_dim 0.0 +824 89 model.scoring_fct_norm 2.0 +824 89 optimizer.lr 0.001992045887279968 +824 89 training.batch_size 0.0 +824 89 training.label_smoothing 0.014576704703187055 +824 90 model.embedding_dim 2.0 +824 90 model.scoring_fct_norm 2.0 +824 90 optimizer.lr 0.003595239120547382 +824 90 training.batch_size 0.0 +824 90 training.label_smoothing 0.00889445824708266 +824 91 model.embedding_dim 1.0 +824 91 model.scoring_fct_norm 2.0 +824 91 optimizer.lr 0.002863553479216511 +824 91 training.batch_size 2.0 +824 91 training.label_smoothing 0.02921532321360696 +824 92 model.embedding_dim 0.0 +824 92 model.scoring_fct_norm 1.0 +824 92 optimizer.lr 0.013636223419788818 +824 92 training.batch_size 2.0 +824 92 training.label_smoothing 0.00437217231490587 +824 93 model.embedding_dim 2.0 +824 93 model.scoring_fct_norm 1.0 +824 93 optimizer.lr 0.028929642958096077 +824 93 training.batch_size 1.0 +824 93 training.label_smoothing 0.12928907208278756 +824 94 model.embedding_dim 2.0 +824 94 model.scoring_fct_norm 1.0 +824 94 optimizer.lr 0.0884869054922064 +824 94 training.batch_size 2.0 +824 94 training.label_smoothing 0.0051687708148648 +824 95 model.embedding_dim 2.0 +824 95 model.scoring_fct_norm 2.0 +824 95 optimizer.lr 0.005810748156255126 +824 95 training.batch_size 2.0 +824 95 training.label_smoothing 0.08734489314117642 +824 96 model.embedding_dim 0.0 +824 96 model.scoring_fct_norm 1.0 +824 96 optimizer.lr 0.013058314952495008 +824 96 training.batch_size 0.0 +824 96 training.label_smoothing 0.23168477165693488 +824 97 model.embedding_dim 2.0 +824 97 model.scoring_fct_norm 1.0 +824 97 optimizer.lr 0.053819350879692585 +824 97 training.batch_size 2.0 +824 97 training.label_smoothing 0.001101336915403864 +824 98 model.embedding_dim 2.0 +824 98 model.scoring_fct_norm 1.0 +824 98 optimizer.lr 0.05885032288567446 +824 98 training.batch_size 2.0 +824 98 training.label_smoothing 0.3679516914644072 +824 99 model.embedding_dim 2.0 +824 99 model.scoring_fct_norm 2.0 +824 99 optimizer.lr 0.0022114178889912008 +824 99 training.batch_size 2.0 +824 99 training.label_smoothing 0.3862480709269565 +824 100 model.embedding_dim 0.0 +824 100 model.scoring_fct_norm 2.0 +824 100 optimizer.lr 0.030682398825667725 +824 100 training.batch_size 1.0 +824 100 training.label_smoothing 0.04121971621970546 +824 1 dataset """kinships""" +824 1 model """transe""" +824 1 loss """crossentropy""" +824 1 regularizer """no""" +824 1 optimizer """adam""" +824 1 training_loop """lcwa""" +824 1 evaluator """rankbased""" +824 2 dataset """kinships""" +824 2 model """transe""" +824 2 loss """crossentropy""" +824 2 regularizer """no""" +824 2 optimizer """adam""" +824 2 training_loop """lcwa""" +824 2 evaluator """rankbased""" +824 3 dataset """kinships""" +824 3 model """transe""" +824 3 loss """crossentropy""" +824 3 regularizer """no""" +824 3 optimizer """adam""" +824 3 training_loop """lcwa""" +824 3 evaluator """rankbased""" +824 4 dataset """kinships""" +824 4 model """transe""" +824 4 loss """crossentropy""" +824 4 regularizer """no""" +824 4 optimizer """adam""" +824 4 training_loop """lcwa""" +824 4 evaluator """rankbased""" +824 5 dataset """kinships""" +824 5 model """transe""" +824 5 loss """crossentropy""" +824 5 regularizer """no""" +824 5 optimizer """adam""" +824 5 training_loop """lcwa""" +824 5 evaluator """rankbased""" +824 6 dataset """kinships""" +824 6 model """transe""" +824 6 loss """crossentropy""" +824 6 regularizer """no""" +824 6 optimizer """adam""" +824 6 training_loop """lcwa""" +824 6 evaluator """rankbased""" +824 7 dataset """kinships""" +824 7 model """transe""" +824 7 loss """crossentropy""" +824 7 regularizer """no""" +824 7 optimizer """adam""" +824 7 training_loop """lcwa""" +824 7 evaluator """rankbased""" +824 8 dataset """kinships""" +824 8 model """transe""" +824 8 loss """crossentropy""" +824 8 regularizer """no""" +824 8 optimizer """adam""" +824 8 training_loop """lcwa""" +824 8 evaluator """rankbased""" +824 9 dataset """kinships""" +824 9 model """transe""" +824 9 loss """crossentropy""" +824 9 regularizer """no""" +824 9 optimizer """adam""" +824 9 training_loop """lcwa""" +824 9 evaluator """rankbased""" +824 10 dataset """kinships""" +824 10 model """transe""" +824 10 loss """crossentropy""" +824 10 regularizer """no""" +824 10 optimizer """adam""" +824 10 training_loop """lcwa""" +824 10 evaluator """rankbased""" +824 11 dataset """kinships""" +824 11 model """transe""" +824 11 loss """crossentropy""" +824 11 regularizer """no""" +824 11 optimizer """adam""" +824 11 training_loop """lcwa""" +824 11 evaluator """rankbased""" +824 12 dataset """kinships""" +824 12 model """transe""" +824 12 loss """crossentropy""" +824 12 regularizer """no""" +824 12 optimizer """adam""" +824 12 training_loop """lcwa""" +824 12 evaluator """rankbased""" +824 13 dataset """kinships""" +824 13 model """transe""" +824 13 loss """crossentropy""" +824 13 regularizer """no""" +824 13 optimizer """adam""" +824 13 training_loop """lcwa""" +824 13 evaluator """rankbased""" +824 14 dataset """kinships""" +824 14 model """transe""" +824 14 loss """crossentropy""" +824 14 regularizer """no""" +824 14 optimizer """adam""" +824 14 training_loop """lcwa""" +824 14 evaluator """rankbased""" +824 15 dataset """kinships""" +824 15 model """transe""" +824 15 loss """crossentropy""" +824 15 regularizer """no""" +824 15 optimizer """adam""" +824 15 training_loop """lcwa""" +824 15 evaluator """rankbased""" +824 16 dataset """kinships""" +824 16 model """transe""" +824 16 loss """crossentropy""" +824 16 regularizer """no""" +824 16 optimizer """adam""" +824 16 training_loop """lcwa""" +824 16 evaluator """rankbased""" +824 17 dataset """kinships""" +824 17 model """transe""" +824 17 loss """crossentropy""" +824 17 regularizer """no""" +824 17 optimizer """adam""" +824 17 training_loop """lcwa""" +824 17 evaluator """rankbased""" +824 18 dataset """kinships""" +824 18 model """transe""" +824 18 loss """crossentropy""" +824 18 regularizer """no""" +824 18 optimizer """adam""" +824 18 training_loop """lcwa""" +824 18 evaluator """rankbased""" +824 19 dataset """kinships""" +824 19 model """transe""" +824 19 loss """crossentropy""" +824 19 regularizer """no""" +824 19 optimizer """adam""" +824 19 training_loop """lcwa""" +824 19 evaluator """rankbased""" +824 20 dataset """kinships""" +824 20 model """transe""" +824 20 loss """crossentropy""" +824 20 regularizer """no""" +824 20 optimizer """adam""" +824 20 training_loop """lcwa""" +824 20 evaluator """rankbased""" +824 21 dataset """kinships""" +824 21 model """transe""" +824 21 loss """crossentropy""" +824 21 regularizer """no""" +824 21 optimizer """adam""" +824 21 training_loop """lcwa""" +824 21 evaluator """rankbased""" +824 22 dataset """kinships""" +824 22 model """transe""" +824 22 loss """crossentropy""" +824 22 regularizer """no""" +824 22 optimizer """adam""" +824 22 training_loop """lcwa""" +824 22 evaluator """rankbased""" +824 23 dataset """kinships""" +824 23 model """transe""" +824 23 loss """crossentropy""" +824 23 regularizer """no""" +824 23 optimizer """adam""" +824 23 training_loop """lcwa""" +824 23 evaluator """rankbased""" +824 24 dataset """kinships""" +824 24 model """transe""" +824 24 loss """crossentropy""" +824 24 regularizer """no""" +824 24 optimizer """adam""" +824 24 training_loop """lcwa""" +824 24 evaluator """rankbased""" +824 25 dataset """kinships""" +824 25 model """transe""" +824 25 loss """crossentropy""" +824 25 regularizer """no""" +824 25 optimizer """adam""" +824 25 training_loop """lcwa""" +824 25 evaluator """rankbased""" +824 26 dataset """kinships""" +824 26 model """transe""" +824 26 loss """crossentropy""" +824 26 regularizer """no""" +824 26 optimizer """adam""" +824 26 training_loop """lcwa""" +824 26 evaluator """rankbased""" +824 27 dataset """kinships""" +824 27 model """transe""" +824 27 loss """crossentropy""" +824 27 regularizer """no""" +824 27 optimizer """adam""" +824 27 training_loop """lcwa""" +824 27 evaluator """rankbased""" +824 28 dataset """kinships""" +824 28 model """transe""" +824 28 loss """crossentropy""" +824 28 regularizer """no""" +824 28 optimizer """adam""" +824 28 training_loop """lcwa""" +824 28 evaluator """rankbased""" +824 29 dataset """kinships""" +824 29 model """transe""" +824 29 loss """crossentropy""" +824 29 regularizer """no""" +824 29 optimizer """adam""" +824 29 training_loop """lcwa""" +824 29 evaluator """rankbased""" +824 30 dataset """kinships""" +824 30 model """transe""" +824 30 loss """crossentropy""" +824 30 regularizer """no""" +824 30 optimizer """adam""" +824 30 training_loop """lcwa""" +824 30 evaluator """rankbased""" +824 31 dataset """kinships""" +824 31 model """transe""" +824 31 loss """crossentropy""" +824 31 regularizer """no""" +824 31 optimizer """adam""" +824 31 training_loop """lcwa""" +824 31 evaluator """rankbased""" +824 32 dataset """kinships""" +824 32 model """transe""" +824 32 loss """crossentropy""" +824 32 regularizer """no""" +824 32 optimizer """adam""" +824 32 training_loop """lcwa""" +824 32 evaluator """rankbased""" +824 33 dataset """kinships""" +824 33 model """transe""" +824 33 loss """crossentropy""" +824 33 regularizer """no""" +824 33 optimizer """adam""" +824 33 training_loop """lcwa""" +824 33 evaluator """rankbased""" +824 34 dataset """kinships""" +824 34 model """transe""" +824 34 loss """crossentropy""" +824 34 regularizer """no""" +824 34 optimizer """adam""" +824 34 training_loop """lcwa""" +824 34 evaluator """rankbased""" +824 35 dataset """kinships""" +824 35 model """transe""" +824 35 loss """crossentropy""" +824 35 regularizer """no""" +824 35 optimizer """adam""" +824 35 training_loop """lcwa""" +824 35 evaluator """rankbased""" +824 36 dataset """kinships""" +824 36 model """transe""" +824 36 loss """crossentropy""" +824 36 regularizer """no""" +824 36 optimizer """adam""" +824 36 training_loop """lcwa""" +824 36 evaluator """rankbased""" +824 37 dataset """kinships""" +824 37 model """transe""" +824 37 loss """crossentropy""" +824 37 regularizer """no""" +824 37 optimizer """adam""" +824 37 training_loop """lcwa""" +824 37 evaluator """rankbased""" +824 38 dataset """kinships""" +824 38 model """transe""" +824 38 loss """crossentropy""" +824 38 regularizer """no""" +824 38 optimizer """adam""" +824 38 training_loop """lcwa""" +824 38 evaluator """rankbased""" +824 39 dataset """kinships""" +824 39 model """transe""" +824 39 loss """crossentropy""" +824 39 regularizer """no""" +824 39 optimizer """adam""" +824 39 training_loop """lcwa""" +824 39 evaluator """rankbased""" +824 40 dataset """kinships""" +824 40 model """transe""" +824 40 loss """crossentropy""" +824 40 regularizer """no""" +824 40 optimizer """adam""" +824 40 training_loop """lcwa""" +824 40 evaluator """rankbased""" +824 41 dataset """kinships""" +824 41 model """transe""" +824 41 loss """crossentropy""" +824 41 regularizer """no""" +824 41 optimizer """adam""" +824 41 training_loop """lcwa""" +824 41 evaluator """rankbased""" +824 42 dataset """kinships""" +824 42 model """transe""" +824 42 loss """crossentropy""" +824 42 regularizer """no""" +824 42 optimizer """adam""" +824 42 training_loop """lcwa""" +824 42 evaluator """rankbased""" +824 43 dataset """kinships""" +824 43 model """transe""" +824 43 loss """crossentropy""" +824 43 regularizer """no""" +824 43 optimizer """adam""" +824 43 training_loop """lcwa""" +824 43 evaluator """rankbased""" +824 44 dataset """kinships""" +824 44 model """transe""" +824 44 loss """crossentropy""" +824 44 regularizer """no""" +824 44 optimizer """adam""" +824 44 training_loop """lcwa""" +824 44 evaluator """rankbased""" +824 45 dataset """kinships""" +824 45 model """transe""" +824 45 loss """crossentropy""" +824 45 regularizer """no""" +824 45 optimizer """adam""" +824 45 training_loop """lcwa""" +824 45 evaluator """rankbased""" +824 46 dataset """kinships""" +824 46 model """transe""" +824 46 loss """crossentropy""" +824 46 regularizer """no""" +824 46 optimizer """adam""" +824 46 training_loop """lcwa""" +824 46 evaluator """rankbased""" +824 47 dataset """kinships""" +824 47 model """transe""" +824 47 loss """crossentropy""" +824 47 regularizer """no""" +824 47 optimizer """adam""" +824 47 training_loop """lcwa""" +824 47 evaluator """rankbased""" +824 48 dataset """kinships""" +824 48 model """transe""" +824 48 loss """crossentropy""" +824 48 regularizer """no""" +824 48 optimizer """adam""" +824 48 training_loop """lcwa""" +824 48 evaluator """rankbased""" +824 49 dataset """kinships""" +824 49 model """transe""" +824 49 loss """crossentropy""" +824 49 regularizer """no""" +824 49 optimizer """adam""" +824 49 training_loop """lcwa""" +824 49 evaluator """rankbased""" +824 50 dataset """kinships""" +824 50 model """transe""" +824 50 loss """crossentropy""" +824 50 regularizer """no""" +824 50 optimizer """adam""" +824 50 training_loop """lcwa""" +824 50 evaluator """rankbased""" +824 51 dataset """kinships""" +824 51 model """transe""" +824 51 loss """crossentropy""" +824 51 regularizer """no""" +824 51 optimizer """adam""" +824 51 training_loop """lcwa""" +824 51 evaluator """rankbased""" +824 52 dataset """kinships""" +824 52 model """transe""" +824 52 loss """crossentropy""" +824 52 regularizer """no""" +824 52 optimizer """adam""" +824 52 training_loop """lcwa""" +824 52 evaluator """rankbased""" +824 53 dataset """kinships""" +824 53 model """transe""" +824 53 loss """crossentropy""" +824 53 regularizer """no""" +824 53 optimizer """adam""" +824 53 training_loop """lcwa""" +824 53 evaluator """rankbased""" +824 54 dataset """kinships""" +824 54 model """transe""" +824 54 loss """crossentropy""" +824 54 regularizer """no""" +824 54 optimizer """adam""" +824 54 training_loop """lcwa""" +824 54 evaluator """rankbased""" +824 55 dataset """kinships""" +824 55 model """transe""" +824 55 loss """crossentropy""" +824 55 regularizer """no""" +824 55 optimizer """adam""" +824 55 training_loop """lcwa""" +824 55 evaluator """rankbased""" +824 56 dataset """kinships""" +824 56 model """transe""" +824 56 loss """crossentropy""" +824 56 regularizer """no""" +824 56 optimizer """adam""" +824 56 training_loop """lcwa""" +824 56 evaluator """rankbased""" +824 57 dataset """kinships""" +824 57 model """transe""" +824 57 loss """crossentropy""" +824 57 regularizer """no""" +824 57 optimizer """adam""" +824 57 training_loop """lcwa""" +824 57 evaluator """rankbased""" +824 58 dataset """kinships""" +824 58 model """transe""" +824 58 loss """crossentropy""" +824 58 regularizer """no""" +824 58 optimizer """adam""" +824 58 training_loop """lcwa""" +824 58 evaluator """rankbased""" +824 59 dataset """kinships""" +824 59 model """transe""" +824 59 loss """crossentropy""" +824 59 regularizer """no""" +824 59 optimizer """adam""" +824 59 training_loop """lcwa""" +824 59 evaluator """rankbased""" +824 60 dataset """kinships""" +824 60 model """transe""" +824 60 loss """crossentropy""" +824 60 regularizer """no""" +824 60 optimizer """adam""" +824 60 training_loop """lcwa""" +824 60 evaluator """rankbased""" +824 61 dataset """kinships""" +824 61 model """transe""" +824 61 loss """crossentropy""" +824 61 regularizer """no""" +824 61 optimizer """adam""" +824 61 training_loop """lcwa""" +824 61 evaluator """rankbased""" +824 62 dataset """kinships""" +824 62 model """transe""" +824 62 loss """crossentropy""" +824 62 regularizer """no""" +824 62 optimizer """adam""" +824 62 training_loop """lcwa""" +824 62 evaluator """rankbased""" +824 63 dataset """kinships""" +824 63 model """transe""" +824 63 loss """crossentropy""" +824 63 regularizer """no""" +824 63 optimizer """adam""" +824 63 training_loop """lcwa""" +824 63 evaluator """rankbased""" +824 64 dataset """kinships""" +824 64 model """transe""" +824 64 loss """crossentropy""" +824 64 regularizer """no""" +824 64 optimizer """adam""" +824 64 training_loop """lcwa""" +824 64 evaluator """rankbased""" +824 65 dataset """kinships""" +824 65 model """transe""" +824 65 loss """crossentropy""" +824 65 regularizer """no""" +824 65 optimizer """adam""" +824 65 training_loop """lcwa""" +824 65 evaluator """rankbased""" +824 66 dataset """kinships""" +824 66 model """transe""" +824 66 loss """crossentropy""" +824 66 regularizer """no""" +824 66 optimizer """adam""" +824 66 training_loop """lcwa""" +824 66 evaluator """rankbased""" +824 67 dataset """kinships""" +824 67 model """transe""" +824 67 loss """crossentropy""" +824 67 regularizer """no""" +824 67 optimizer """adam""" +824 67 training_loop """lcwa""" +824 67 evaluator """rankbased""" +824 68 dataset """kinships""" +824 68 model """transe""" +824 68 loss """crossentropy""" +824 68 regularizer """no""" +824 68 optimizer """adam""" +824 68 training_loop """lcwa""" +824 68 evaluator """rankbased""" +824 69 dataset """kinships""" +824 69 model """transe""" +824 69 loss """crossentropy""" +824 69 regularizer """no""" +824 69 optimizer """adam""" +824 69 training_loop """lcwa""" +824 69 evaluator """rankbased""" +824 70 dataset """kinships""" +824 70 model """transe""" +824 70 loss """crossentropy""" +824 70 regularizer """no""" +824 70 optimizer """adam""" +824 70 training_loop """lcwa""" +824 70 evaluator """rankbased""" +824 71 dataset """kinships""" +824 71 model """transe""" +824 71 loss """crossentropy""" +824 71 regularizer """no""" +824 71 optimizer """adam""" +824 71 training_loop """lcwa""" +824 71 evaluator """rankbased""" +824 72 dataset """kinships""" +824 72 model """transe""" +824 72 loss """crossentropy""" +824 72 regularizer """no""" +824 72 optimizer """adam""" +824 72 training_loop """lcwa""" +824 72 evaluator """rankbased""" +824 73 dataset """kinships""" +824 73 model """transe""" +824 73 loss """crossentropy""" +824 73 regularizer """no""" +824 73 optimizer """adam""" +824 73 training_loop """lcwa""" +824 73 evaluator """rankbased""" +824 74 dataset """kinships""" +824 74 model """transe""" +824 74 loss """crossentropy""" +824 74 regularizer """no""" +824 74 optimizer """adam""" +824 74 training_loop """lcwa""" +824 74 evaluator """rankbased""" +824 75 dataset """kinships""" +824 75 model """transe""" +824 75 loss """crossentropy""" +824 75 regularizer """no""" +824 75 optimizer """adam""" +824 75 training_loop """lcwa""" +824 75 evaluator """rankbased""" +824 76 dataset """kinships""" +824 76 model """transe""" +824 76 loss """crossentropy""" +824 76 regularizer """no""" +824 76 optimizer """adam""" +824 76 training_loop """lcwa""" +824 76 evaluator """rankbased""" +824 77 dataset """kinships""" +824 77 model """transe""" +824 77 loss """crossentropy""" +824 77 regularizer """no""" +824 77 optimizer """adam""" +824 77 training_loop """lcwa""" +824 77 evaluator """rankbased""" +824 78 dataset """kinships""" +824 78 model """transe""" +824 78 loss """crossentropy""" +824 78 regularizer """no""" +824 78 optimizer """adam""" +824 78 training_loop """lcwa""" +824 78 evaluator """rankbased""" +824 79 dataset """kinships""" +824 79 model """transe""" +824 79 loss """crossentropy""" +824 79 regularizer """no""" +824 79 optimizer """adam""" +824 79 training_loop """lcwa""" +824 79 evaluator """rankbased""" +824 80 dataset """kinships""" +824 80 model """transe""" +824 80 loss """crossentropy""" +824 80 regularizer """no""" +824 80 optimizer """adam""" +824 80 training_loop """lcwa""" +824 80 evaluator """rankbased""" +824 81 dataset """kinships""" +824 81 model """transe""" +824 81 loss """crossentropy""" +824 81 regularizer """no""" +824 81 optimizer """adam""" +824 81 training_loop """lcwa""" +824 81 evaluator """rankbased""" +824 82 dataset """kinships""" +824 82 model """transe""" +824 82 loss """crossentropy""" +824 82 regularizer """no""" +824 82 optimizer """adam""" +824 82 training_loop """lcwa""" +824 82 evaluator """rankbased""" +824 83 dataset """kinships""" +824 83 model """transe""" +824 83 loss """crossentropy""" +824 83 regularizer """no""" +824 83 optimizer """adam""" +824 83 training_loop """lcwa""" +824 83 evaluator """rankbased""" +824 84 dataset """kinships""" +824 84 model """transe""" +824 84 loss """crossentropy""" +824 84 regularizer """no""" +824 84 optimizer """adam""" +824 84 training_loop """lcwa""" +824 84 evaluator """rankbased""" +824 85 dataset """kinships""" +824 85 model """transe""" +824 85 loss """crossentropy""" +824 85 regularizer """no""" +824 85 optimizer """adam""" +824 85 training_loop """lcwa""" +824 85 evaluator """rankbased""" +824 86 dataset """kinships""" +824 86 model """transe""" +824 86 loss """crossentropy""" +824 86 regularizer """no""" +824 86 optimizer """adam""" +824 86 training_loop """lcwa""" +824 86 evaluator """rankbased""" +824 87 dataset """kinships""" +824 87 model """transe""" +824 87 loss """crossentropy""" +824 87 regularizer """no""" +824 87 optimizer """adam""" +824 87 training_loop """lcwa""" +824 87 evaluator """rankbased""" +824 88 dataset """kinships""" +824 88 model """transe""" +824 88 loss """crossentropy""" +824 88 regularizer """no""" +824 88 optimizer """adam""" +824 88 training_loop """lcwa""" +824 88 evaluator """rankbased""" +824 89 dataset """kinships""" +824 89 model """transe""" +824 89 loss """crossentropy""" +824 89 regularizer """no""" +824 89 optimizer """adam""" +824 89 training_loop """lcwa""" +824 89 evaluator """rankbased""" +824 90 dataset """kinships""" +824 90 model """transe""" +824 90 loss """crossentropy""" +824 90 regularizer """no""" +824 90 optimizer """adam""" +824 90 training_loop """lcwa""" +824 90 evaluator """rankbased""" +824 91 dataset """kinships""" +824 91 model """transe""" +824 91 loss """crossentropy""" +824 91 regularizer """no""" +824 91 optimizer """adam""" +824 91 training_loop """lcwa""" +824 91 evaluator """rankbased""" +824 92 dataset """kinships""" +824 92 model """transe""" +824 92 loss """crossentropy""" +824 92 regularizer """no""" +824 92 optimizer """adam""" +824 92 training_loop """lcwa""" +824 92 evaluator """rankbased""" +824 93 dataset """kinships""" +824 93 model """transe""" +824 93 loss """crossentropy""" +824 93 regularizer """no""" +824 93 optimizer """adam""" +824 93 training_loop """lcwa""" +824 93 evaluator """rankbased""" +824 94 dataset """kinships""" +824 94 model """transe""" +824 94 loss """crossentropy""" +824 94 regularizer """no""" +824 94 optimizer """adam""" +824 94 training_loop """lcwa""" +824 94 evaluator """rankbased""" +824 95 dataset """kinships""" +824 95 model """transe""" +824 95 loss """crossentropy""" +824 95 regularizer """no""" +824 95 optimizer """adam""" +824 95 training_loop """lcwa""" +824 95 evaluator """rankbased""" +824 96 dataset """kinships""" +824 96 model """transe""" +824 96 loss """crossentropy""" +824 96 regularizer """no""" +824 96 optimizer """adam""" +824 96 training_loop """lcwa""" +824 96 evaluator """rankbased""" +824 97 dataset """kinships""" +824 97 model """transe""" +824 97 loss """crossentropy""" +824 97 regularizer """no""" +824 97 optimizer """adam""" +824 97 training_loop """lcwa""" +824 97 evaluator """rankbased""" +824 98 dataset """kinships""" +824 98 model """transe""" +824 98 loss """crossentropy""" +824 98 regularizer """no""" +824 98 optimizer """adam""" +824 98 training_loop """lcwa""" +824 98 evaluator """rankbased""" +824 99 dataset """kinships""" +824 99 model """transe""" +824 99 loss """crossentropy""" +824 99 regularizer """no""" +824 99 optimizer """adam""" +824 99 training_loop """lcwa""" +824 99 evaluator """rankbased""" +824 100 dataset """kinships""" +824 100 model """transe""" +824 100 loss """crossentropy""" +824 100 regularizer """no""" +824 100 optimizer """adam""" +824 100 training_loop """lcwa""" +824 100 evaluator """rankbased""" +825 1 model.embedding_dim 1.0 +825 1 model.scoring_fct_norm 1.0 +825 1 optimizer.lr 0.034368219695628754 +825 1 negative_sampler.num_negs_per_pos 28.0 +825 1 training.batch_size 2.0 +825 2 model.embedding_dim 0.0 +825 2 model.scoring_fct_norm 2.0 +825 2 optimizer.lr 0.0044617365919101155 +825 2 negative_sampler.num_negs_per_pos 87.0 +825 2 training.batch_size 1.0 +825 3 model.embedding_dim 2.0 +825 3 model.scoring_fct_norm 1.0 +825 3 optimizer.lr 0.06278231797962076 +825 3 negative_sampler.num_negs_per_pos 70.0 +825 3 training.batch_size 1.0 +825 4 model.embedding_dim 1.0 +825 4 model.scoring_fct_norm 2.0 +825 4 optimizer.lr 0.05232408765114301 +825 4 negative_sampler.num_negs_per_pos 12.0 +825 4 training.batch_size 0.0 +825 5 model.embedding_dim 1.0 +825 5 model.scoring_fct_norm 2.0 +825 5 optimizer.lr 0.0018540519418155542 +825 5 negative_sampler.num_negs_per_pos 75.0 +825 5 training.batch_size 0.0 +825 6 model.embedding_dim 1.0 +825 6 model.scoring_fct_norm 2.0 +825 6 optimizer.lr 0.05621574539294106 +825 6 negative_sampler.num_negs_per_pos 97.0 +825 6 training.batch_size 0.0 +825 7 model.embedding_dim 2.0 +825 7 model.scoring_fct_norm 1.0 +825 7 optimizer.lr 0.0024875368420534416 +825 7 negative_sampler.num_negs_per_pos 41.0 +825 7 training.batch_size 1.0 +825 8 model.embedding_dim 1.0 +825 8 model.scoring_fct_norm 2.0 +825 8 optimizer.lr 0.006755436923844337 +825 8 negative_sampler.num_negs_per_pos 51.0 +825 8 training.batch_size 0.0 +825 9 model.embedding_dim 1.0 +825 9 model.scoring_fct_norm 2.0 +825 9 optimizer.lr 0.023313548507261955 +825 9 negative_sampler.num_negs_per_pos 42.0 +825 9 training.batch_size 0.0 +825 10 model.embedding_dim 1.0 +825 10 model.scoring_fct_norm 1.0 +825 10 optimizer.lr 0.007582525312527083 +825 10 negative_sampler.num_negs_per_pos 38.0 +825 10 training.batch_size 1.0 +825 11 model.embedding_dim 2.0 +825 11 model.scoring_fct_norm 1.0 +825 11 optimizer.lr 0.01779458736991571 +825 11 negative_sampler.num_negs_per_pos 60.0 +825 11 training.batch_size 1.0 +825 12 model.embedding_dim 0.0 +825 12 model.scoring_fct_norm 2.0 +825 12 optimizer.lr 0.0835028130447914 +825 12 negative_sampler.num_negs_per_pos 35.0 +825 12 training.batch_size 2.0 +825 13 model.embedding_dim 0.0 +825 13 model.scoring_fct_norm 2.0 +825 13 optimizer.lr 0.0025719969766147283 +825 13 negative_sampler.num_negs_per_pos 57.0 +825 13 training.batch_size 1.0 +825 14 model.embedding_dim 0.0 +825 14 model.scoring_fct_norm 2.0 +825 14 optimizer.lr 0.017006697531585624 +825 14 negative_sampler.num_negs_per_pos 1.0 +825 14 training.batch_size 0.0 +825 15 model.embedding_dim 1.0 +825 15 model.scoring_fct_norm 2.0 +825 15 optimizer.lr 0.005617458871678613 +825 15 negative_sampler.num_negs_per_pos 27.0 +825 15 training.batch_size 0.0 +825 16 model.embedding_dim 2.0 +825 16 model.scoring_fct_norm 2.0 +825 16 optimizer.lr 0.014649722375166502 +825 16 negative_sampler.num_negs_per_pos 36.0 +825 16 training.batch_size 2.0 +825 17 model.embedding_dim 0.0 +825 17 model.scoring_fct_norm 2.0 +825 17 optimizer.lr 0.012297623298516228 +825 17 negative_sampler.num_negs_per_pos 58.0 +825 17 training.batch_size 0.0 +825 18 model.embedding_dim 1.0 +825 18 model.scoring_fct_norm 2.0 +825 18 optimizer.lr 0.008288236817394035 +825 18 negative_sampler.num_negs_per_pos 42.0 +825 18 training.batch_size 2.0 +825 19 model.embedding_dim 1.0 +825 19 model.scoring_fct_norm 2.0 +825 19 optimizer.lr 0.019695132651140923 +825 19 negative_sampler.num_negs_per_pos 18.0 +825 19 training.batch_size 1.0 +825 20 model.embedding_dim 2.0 +825 20 model.scoring_fct_norm 2.0 +825 20 optimizer.lr 0.011630732700434689 +825 20 negative_sampler.num_negs_per_pos 36.0 +825 20 training.batch_size 2.0 +825 21 model.embedding_dim 0.0 +825 21 model.scoring_fct_norm 1.0 +825 21 optimizer.lr 0.06807862016697709 +825 21 negative_sampler.num_negs_per_pos 2.0 +825 21 training.batch_size 2.0 +825 22 model.embedding_dim 2.0 +825 22 model.scoring_fct_norm 1.0 +825 22 optimizer.lr 0.02547824828945486 +825 22 negative_sampler.num_negs_per_pos 34.0 +825 22 training.batch_size 0.0 +825 23 model.embedding_dim 0.0 +825 23 model.scoring_fct_norm 1.0 +825 23 optimizer.lr 0.0061952907919079715 +825 23 negative_sampler.num_negs_per_pos 65.0 +825 23 training.batch_size 0.0 +825 24 model.embedding_dim 2.0 +825 24 model.scoring_fct_norm 2.0 +825 24 optimizer.lr 0.009049251455707445 +825 24 negative_sampler.num_negs_per_pos 86.0 +825 24 training.batch_size 2.0 +825 25 model.embedding_dim 0.0 +825 25 model.scoring_fct_norm 2.0 +825 25 optimizer.lr 0.05555344423677207 +825 25 negative_sampler.num_negs_per_pos 78.0 +825 25 training.batch_size 2.0 +825 26 model.embedding_dim 1.0 +825 26 model.scoring_fct_norm 2.0 +825 26 optimizer.lr 0.020082567072212072 +825 26 negative_sampler.num_negs_per_pos 29.0 +825 26 training.batch_size 1.0 +825 27 model.embedding_dim 1.0 +825 27 model.scoring_fct_norm 1.0 +825 27 optimizer.lr 0.09850768690017198 +825 27 negative_sampler.num_negs_per_pos 65.0 +825 27 training.batch_size 0.0 +825 28 model.embedding_dim 2.0 +825 28 model.scoring_fct_norm 1.0 +825 28 optimizer.lr 0.055983067162142776 +825 28 negative_sampler.num_negs_per_pos 7.0 +825 28 training.batch_size 0.0 +825 29 model.embedding_dim 2.0 +825 29 model.scoring_fct_norm 2.0 +825 29 optimizer.lr 0.0028403924834372206 +825 29 negative_sampler.num_negs_per_pos 67.0 +825 29 training.batch_size 0.0 +825 30 model.embedding_dim 2.0 +825 30 model.scoring_fct_norm 1.0 +825 30 optimizer.lr 0.005013656253045319 +825 30 negative_sampler.num_negs_per_pos 68.0 +825 30 training.batch_size 1.0 +825 31 model.embedding_dim 1.0 +825 31 model.scoring_fct_norm 1.0 +825 31 optimizer.lr 0.0013925545687234826 +825 31 negative_sampler.num_negs_per_pos 82.0 +825 31 training.batch_size 1.0 +825 32 model.embedding_dim 0.0 +825 32 model.scoring_fct_norm 2.0 +825 32 optimizer.lr 0.028621186208835615 +825 32 negative_sampler.num_negs_per_pos 87.0 +825 32 training.batch_size 1.0 +825 33 model.embedding_dim 2.0 +825 33 model.scoring_fct_norm 2.0 +825 33 optimizer.lr 0.005707376602096541 +825 33 negative_sampler.num_negs_per_pos 96.0 +825 33 training.batch_size 0.0 +825 34 model.embedding_dim 0.0 +825 34 model.scoring_fct_norm 1.0 +825 34 optimizer.lr 0.05980247136900183 +825 34 negative_sampler.num_negs_per_pos 71.0 +825 34 training.batch_size 2.0 +825 35 model.embedding_dim 2.0 +825 35 model.scoring_fct_norm 1.0 +825 35 optimizer.lr 0.06130673261664726 +825 35 negative_sampler.num_negs_per_pos 22.0 +825 35 training.batch_size 0.0 +825 36 model.embedding_dim 2.0 +825 36 model.scoring_fct_norm 1.0 +825 36 optimizer.lr 0.019616378697510928 +825 36 negative_sampler.num_negs_per_pos 51.0 +825 36 training.batch_size 2.0 +825 37 model.embedding_dim 0.0 +825 37 model.scoring_fct_norm 1.0 +825 37 optimizer.lr 0.01864176023520159 +825 37 negative_sampler.num_negs_per_pos 46.0 +825 37 training.batch_size 2.0 +825 38 model.embedding_dim 1.0 +825 38 model.scoring_fct_norm 2.0 +825 38 optimizer.lr 0.0969832838072056 +825 38 negative_sampler.num_negs_per_pos 33.0 +825 38 training.batch_size 0.0 +825 39 model.embedding_dim 1.0 +825 39 model.scoring_fct_norm 2.0 +825 39 optimizer.lr 0.003353318482772109 +825 39 negative_sampler.num_negs_per_pos 41.0 +825 39 training.batch_size 2.0 +825 40 model.embedding_dim 0.0 +825 40 model.scoring_fct_norm 1.0 +825 40 optimizer.lr 0.022136260832987302 +825 40 negative_sampler.num_negs_per_pos 77.0 +825 40 training.batch_size 2.0 +825 41 model.embedding_dim 2.0 +825 41 model.scoring_fct_norm 1.0 +825 41 optimizer.lr 0.011506646597463695 +825 41 negative_sampler.num_negs_per_pos 60.0 +825 41 training.batch_size 1.0 +825 42 model.embedding_dim 0.0 +825 42 model.scoring_fct_norm 2.0 +825 42 optimizer.lr 0.0031195152166565536 +825 42 negative_sampler.num_negs_per_pos 99.0 +825 42 training.batch_size 1.0 +825 43 model.embedding_dim 0.0 +825 43 model.scoring_fct_norm 2.0 +825 43 optimizer.lr 0.002372264246239307 +825 43 negative_sampler.num_negs_per_pos 46.0 +825 43 training.batch_size 2.0 +825 44 model.embedding_dim 1.0 +825 44 model.scoring_fct_norm 2.0 +825 44 optimizer.lr 0.0038421169731970243 +825 44 negative_sampler.num_negs_per_pos 3.0 +825 44 training.batch_size 0.0 +825 45 model.embedding_dim 0.0 +825 45 model.scoring_fct_norm 2.0 +825 45 optimizer.lr 0.0011015300365723767 +825 45 negative_sampler.num_negs_per_pos 53.0 +825 45 training.batch_size 0.0 +825 46 model.embedding_dim 0.0 +825 46 model.scoring_fct_norm 1.0 +825 46 optimizer.lr 0.0016085645444165258 +825 46 negative_sampler.num_negs_per_pos 94.0 +825 46 training.batch_size 1.0 +825 47 model.embedding_dim 2.0 +825 47 model.scoring_fct_norm 1.0 +825 47 optimizer.lr 0.002086628207093311 +825 47 negative_sampler.num_negs_per_pos 98.0 +825 47 training.batch_size 0.0 +825 48 model.embedding_dim 1.0 +825 48 model.scoring_fct_norm 1.0 +825 48 optimizer.lr 0.08285037389389922 +825 48 negative_sampler.num_negs_per_pos 78.0 +825 48 training.batch_size 2.0 +825 49 model.embedding_dim 1.0 +825 49 model.scoring_fct_norm 2.0 +825 49 optimizer.lr 0.00285015258357611 +825 49 negative_sampler.num_negs_per_pos 1.0 +825 49 training.batch_size 2.0 +825 50 model.embedding_dim 1.0 +825 50 model.scoring_fct_norm 2.0 +825 50 optimizer.lr 0.0014444147650778025 +825 50 negative_sampler.num_negs_per_pos 91.0 +825 50 training.batch_size 0.0 +825 51 model.embedding_dim 0.0 +825 51 model.scoring_fct_norm 2.0 +825 51 optimizer.lr 0.018671953574009217 +825 51 negative_sampler.num_negs_per_pos 94.0 +825 51 training.batch_size 2.0 +825 52 model.embedding_dim 1.0 +825 52 model.scoring_fct_norm 1.0 +825 52 optimizer.lr 0.00403068715581234 +825 52 negative_sampler.num_negs_per_pos 57.0 +825 52 training.batch_size 1.0 +825 53 model.embedding_dim 1.0 +825 53 model.scoring_fct_norm 1.0 +825 53 optimizer.lr 0.08318667586183216 +825 53 negative_sampler.num_negs_per_pos 89.0 +825 53 training.batch_size 0.0 +825 54 model.embedding_dim 2.0 +825 54 model.scoring_fct_norm 2.0 +825 54 optimizer.lr 0.0016882537219833906 +825 54 negative_sampler.num_negs_per_pos 77.0 +825 54 training.batch_size 2.0 +825 55 model.embedding_dim 1.0 +825 55 model.scoring_fct_norm 1.0 +825 55 optimizer.lr 0.01760259125212249 +825 55 negative_sampler.num_negs_per_pos 59.0 +825 55 training.batch_size 1.0 +825 56 model.embedding_dim 2.0 +825 56 model.scoring_fct_norm 2.0 +825 56 optimizer.lr 0.0019689772029645356 +825 56 negative_sampler.num_negs_per_pos 76.0 +825 56 training.batch_size 1.0 +825 57 model.embedding_dim 0.0 +825 57 model.scoring_fct_norm 2.0 +825 57 optimizer.lr 0.01753399631461435 +825 57 negative_sampler.num_negs_per_pos 89.0 +825 57 training.batch_size 2.0 +825 58 model.embedding_dim 2.0 +825 58 model.scoring_fct_norm 1.0 +825 58 optimizer.lr 0.026957956086941563 +825 58 negative_sampler.num_negs_per_pos 79.0 +825 58 training.batch_size 1.0 +825 59 model.embedding_dim 0.0 +825 59 model.scoring_fct_norm 1.0 +825 59 optimizer.lr 0.004618612320168148 +825 59 negative_sampler.num_negs_per_pos 41.0 +825 59 training.batch_size 2.0 +825 60 model.embedding_dim 2.0 +825 60 model.scoring_fct_norm 2.0 +825 60 optimizer.lr 0.0033394449940191942 +825 60 negative_sampler.num_negs_per_pos 65.0 +825 60 training.batch_size 1.0 +825 61 model.embedding_dim 0.0 +825 61 model.scoring_fct_norm 1.0 +825 61 optimizer.lr 0.06304158849744812 +825 61 negative_sampler.num_negs_per_pos 48.0 +825 61 training.batch_size 0.0 +825 62 model.embedding_dim 1.0 +825 62 model.scoring_fct_norm 1.0 +825 62 optimizer.lr 0.017761229657728708 +825 62 negative_sampler.num_negs_per_pos 76.0 +825 62 training.batch_size 1.0 +825 63 model.embedding_dim 0.0 +825 63 model.scoring_fct_norm 2.0 +825 63 optimizer.lr 0.004466752276786559 +825 63 negative_sampler.num_negs_per_pos 27.0 +825 63 training.batch_size 1.0 +825 64 model.embedding_dim 1.0 +825 64 model.scoring_fct_norm 2.0 +825 64 optimizer.lr 0.08766112084194815 +825 64 negative_sampler.num_negs_per_pos 25.0 +825 64 training.batch_size 1.0 +825 65 model.embedding_dim 0.0 +825 65 model.scoring_fct_norm 1.0 +825 65 optimizer.lr 0.017488154142739475 +825 65 negative_sampler.num_negs_per_pos 62.0 +825 65 training.batch_size 1.0 +825 66 model.embedding_dim 0.0 +825 66 model.scoring_fct_norm 2.0 +825 66 optimizer.lr 0.018047858436267328 +825 66 negative_sampler.num_negs_per_pos 12.0 +825 66 training.batch_size 0.0 +825 67 model.embedding_dim 1.0 +825 67 model.scoring_fct_norm 2.0 +825 67 optimizer.lr 0.030406166712505903 +825 67 negative_sampler.num_negs_per_pos 98.0 +825 67 training.batch_size 1.0 +825 68 model.embedding_dim 0.0 +825 68 model.scoring_fct_norm 2.0 +825 68 optimizer.lr 0.08581978065277547 +825 68 negative_sampler.num_negs_per_pos 28.0 +825 68 training.batch_size 0.0 +825 69 model.embedding_dim 1.0 +825 69 model.scoring_fct_norm 2.0 +825 69 optimizer.lr 0.07339221395981423 +825 69 negative_sampler.num_negs_per_pos 51.0 +825 69 training.batch_size 2.0 +825 70 model.embedding_dim 2.0 +825 70 model.scoring_fct_norm 2.0 +825 70 optimizer.lr 0.045964218654691344 +825 70 negative_sampler.num_negs_per_pos 61.0 +825 70 training.batch_size 2.0 +825 71 model.embedding_dim 2.0 +825 71 model.scoring_fct_norm 2.0 +825 71 optimizer.lr 0.07624680809882603 +825 71 negative_sampler.num_negs_per_pos 6.0 +825 71 training.batch_size 2.0 +825 72 model.embedding_dim 2.0 +825 72 model.scoring_fct_norm 2.0 +825 72 optimizer.lr 0.0035686636176202966 +825 72 negative_sampler.num_negs_per_pos 44.0 +825 72 training.batch_size 0.0 +825 73 model.embedding_dim 2.0 +825 73 model.scoring_fct_norm 1.0 +825 73 optimizer.lr 0.05429349052439746 +825 73 negative_sampler.num_negs_per_pos 39.0 +825 73 training.batch_size 2.0 +825 74 model.embedding_dim 2.0 +825 74 model.scoring_fct_norm 2.0 +825 74 optimizer.lr 0.007011819214926566 +825 74 negative_sampler.num_negs_per_pos 77.0 +825 74 training.batch_size 0.0 +825 75 model.embedding_dim 2.0 +825 75 model.scoring_fct_norm 2.0 +825 75 optimizer.lr 0.020175211324995182 +825 75 negative_sampler.num_negs_per_pos 17.0 +825 75 training.batch_size 1.0 +825 76 model.embedding_dim 2.0 +825 76 model.scoring_fct_norm 1.0 +825 76 optimizer.lr 0.006056365972787921 +825 76 negative_sampler.num_negs_per_pos 92.0 +825 76 training.batch_size 0.0 +825 77 model.embedding_dim 1.0 +825 77 model.scoring_fct_norm 1.0 +825 77 optimizer.lr 0.0016917048564472905 +825 77 negative_sampler.num_negs_per_pos 47.0 +825 77 training.batch_size 1.0 +825 78 model.embedding_dim 0.0 +825 78 model.scoring_fct_norm 2.0 +825 78 optimizer.lr 0.005368936924155753 +825 78 negative_sampler.num_negs_per_pos 57.0 +825 78 training.batch_size 0.0 +825 79 model.embedding_dim 0.0 +825 79 model.scoring_fct_norm 1.0 +825 79 optimizer.lr 0.013065530447150534 +825 79 negative_sampler.num_negs_per_pos 58.0 +825 79 training.batch_size 1.0 +825 80 model.embedding_dim 0.0 +825 80 model.scoring_fct_norm 1.0 +825 80 optimizer.lr 0.001387520381978335 +825 80 negative_sampler.num_negs_per_pos 84.0 +825 80 training.batch_size 2.0 +825 81 model.embedding_dim 2.0 +825 81 model.scoring_fct_norm 2.0 +825 81 optimizer.lr 0.010383804417108367 +825 81 negative_sampler.num_negs_per_pos 56.0 +825 81 training.batch_size 2.0 +825 82 model.embedding_dim 0.0 +825 82 model.scoring_fct_norm 2.0 +825 82 optimizer.lr 0.00365602276319862 +825 82 negative_sampler.num_negs_per_pos 20.0 +825 82 training.batch_size 1.0 +825 83 model.embedding_dim 0.0 +825 83 model.scoring_fct_norm 2.0 +825 83 optimizer.lr 0.003371252828012305 +825 83 negative_sampler.num_negs_per_pos 7.0 +825 83 training.batch_size 1.0 +825 84 model.embedding_dim 2.0 +825 84 model.scoring_fct_norm 1.0 +825 84 optimizer.lr 0.030995309697040535 +825 84 negative_sampler.num_negs_per_pos 0.0 +825 84 training.batch_size 0.0 +825 85 model.embedding_dim 2.0 +825 85 model.scoring_fct_norm 2.0 +825 85 optimizer.lr 0.0326611298695699 +825 85 negative_sampler.num_negs_per_pos 91.0 +825 85 training.batch_size 2.0 +825 86 model.embedding_dim 1.0 +825 86 model.scoring_fct_norm 2.0 +825 86 optimizer.lr 0.019886348348928978 +825 86 negative_sampler.num_negs_per_pos 55.0 +825 86 training.batch_size 0.0 +825 87 model.embedding_dim 2.0 +825 87 model.scoring_fct_norm 1.0 +825 87 optimizer.lr 0.003276475210679916 +825 87 negative_sampler.num_negs_per_pos 45.0 +825 87 training.batch_size 2.0 +825 88 model.embedding_dim 2.0 +825 88 model.scoring_fct_norm 2.0 +825 88 optimizer.lr 0.029631698591731828 +825 88 negative_sampler.num_negs_per_pos 19.0 +825 88 training.batch_size 2.0 +825 89 model.embedding_dim 0.0 +825 89 model.scoring_fct_norm 2.0 +825 89 optimizer.lr 0.00211580922703582 +825 89 negative_sampler.num_negs_per_pos 44.0 +825 89 training.batch_size 1.0 +825 90 model.embedding_dim 2.0 +825 90 model.scoring_fct_norm 2.0 +825 90 optimizer.lr 0.0010484694569225994 +825 90 negative_sampler.num_negs_per_pos 53.0 +825 90 training.batch_size 1.0 +825 91 model.embedding_dim 2.0 +825 91 model.scoring_fct_norm 2.0 +825 91 optimizer.lr 0.0017215057327798764 +825 91 negative_sampler.num_negs_per_pos 8.0 +825 91 training.batch_size 2.0 +825 92 model.embedding_dim 0.0 +825 92 model.scoring_fct_norm 2.0 +825 92 optimizer.lr 0.006988032981625061 +825 92 negative_sampler.num_negs_per_pos 0.0 +825 92 training.batch_size 2.0 +825 93 model.embedding_dim 1.0 +825 93 model.scoring_fct_norm 2.0 +825 93 optimizer.lr 0.008690852612499234 +825 93 negative_sampler.num_negs_per_pos 92.0 +825 93 training.batch_size 0.0 +825 94 model.embedding_dim 2.0 +825 94 model.scoring_fct_norm 2.0 +825 94 optimizer.lr 0.004748544606327402 +825 94 negative_sampler.num_negs_per_pos 85.0 +825 94 training.batch_size 1.0 +825 95 model.embedding_dim 1.0 +825 95 model.scoring_fct_norm 1.0 +825 95 optimizer.lr 0.0015209795261962836 +825 95 negative_sampler.num_negs_per_pos 71.0 +825 95 training.batch_size 0.0 +825 96 model.embedding_dim 0.0 +825 96 model.scoring_fct_norm 2.0 +825 96 optimizer.lr 0.02433033977798031 +825 96 negative_sampler.num_negs_per_pos 61.0 +825 96 training.batch_size 0.0 +825 97 model.embedding_dim 0.0 +825 97 model.scoring_fct_norm 2.0 +825 97 optimizer.lr 0.09395080171788786 +825 97 negative_sampler.num_negs_per_pos 81.0 +825 97 training.batch_size 0.0 +825 98 model.embedding_dim 2.0 +825 98 model.scoring_fct_norm 2.0 +825 98 optimizer.lr 0.0819263893597743 +825 98 negative_sampler.num_negs_per_pos 86.0 +825 98 training.batch_size 1.0 +825 99 model.embedding_dim 1.0 +825 99 model.scoring_fct_norm 2.0 +825 99 optimizer.lr 0.0025947996575463707 +825 99 negative_sampler.num_negs_per_pos 19.0 +825 99 training.batch_size 0.0 +825 100 model.embedding_dim 1.0 +825 100 model.scoring_fct_norm 2.0 +825 100 optimizer.lr 0.002232967097266807 +825 100 negative_sampler.num_negs_per_pos 99.0 +825 100 training.batch_size 0.0 +825 1 dataset """kinships""" +825 1 model """transe""" +825 1 loss """bceaftersigmoid""" +825 1 regularizer """no""" +825 1 optimizer """adam""" +825 1 training_loop """owa""" +825 1 negative_sampler """basic""" +825 1 evaluator """rankbased""" +825 2 dataset """kinships""" +825 2 model """transe""" +825 2 loss """bceaftersigmoid""" +825 2 regularizer """no""" +825 2 optimizer """adam""" +825 2 training_loop """owa""" +825 2 negative_sampler """basic""" +825 2 evaluator """rankbased""" +825 3 dataset """kinships""" +825 3 model """transe""" +825 3 loss """bceaftersigmoid""" +825 3 regularizer """no""" +825 3 optimizer """adam""" +825 3 training_loop """owa""" +825 3 negative_sampler """basic""" +825 3 evaluator """rankbased""" +825 4 dataset """kinships""" +825 4 model """transe""" +825 4 loss """bceaftersigmoid""" +825 4 regularizer """no""" +825 4 optimizer """adam""" +825 4 training_loop """owa""" +825 4 negative_sampler """basic""" +825 4 evaluator """rankbased""" +825 5 dataset """kinships""" +825 5 model """transe""" +825 5 loss """bceaftersigmoid""" +825 5 regularizer """no""" +825 5 optimizer """adam""" +825 5 training_loop """owa""" +825 5 negative_sampler """basic""" +825 5 evaluator """rankbased""" +825 6 dataset """kinships""" +825 6 model """transe""" +825 6 loss """bceaftersigmoid""" +825 6 regularizer """no""" +825 6 optimizer """adam""" +825 6 training_loop """owa""" +825 6 negative_sampler """basic""" +825 6 evaluator """rankbased""" +825 7 dataset """kinships""" +825 7 model """transe""" +825 7 loss """bceaftersigmoid""" +825 7 regularizer """no""" +825 7 optimizer """adam""" +825 7 training_loop """owa""" +825 7 negative_sampler """basic""" +825 7 evaluator """rankbased""" +825 8 dataset """kinships""" +825 8 model """transe""" +825 8 loss """bceaftersigmoid""" +825 8 regularizer """no""" +825 8 optimizer """adam""" +825 8 training_loop """owa""" +825 8 negative_sampler """basic""" +825 8 evaluator """rankbased""" +825 9 dataset """kinships""" +825 9 model """transe""" +825 9 loss """bceaftersigmoid""" +825 9 regularizer """no""" +825 9 optimizer """adam""" +825 9 training_loop """owa""" +825 9 negative_sampler """basic""" +825 9 evaluator """rankbased""" +825 10 dataset """kinships""" +825 10 model """transe""" +825 10 loss """bceaftersigmoid""" +825 10 regularizer """no""" +825 10 optimizer """adam""" +825 10 training_loop """owa""" +825 10 negative_sampler """basic""" +825 10 evaluator """rankbased""" +825 11 dataset """kinships""" +825 11 model """transe""" +825 11 loss """bceaftersigmoid""" +825 11 regularizer """no""" +825 11 optimizer """adam""" +825 11 training_loop """owa""" +825 11 negative_sampler """basic""" +825 11 evaluator """rankbased""" +825 12 dataset """kinships""" +825 12 model """transe""" +825 12 loss """bceaftersigmoid""" +825 12 regularizer """no""" +825 12 optimizer """adam""" +825 12 training_loop """owa""" +825 12 negative_sampler """basic""" +825 12 evaluator """rankbased""" +825 13 dataset """kinships""" +825 13 model """transe""" +825 13 loss """bceaftersigmoid""" +825 13 regularizer """no""" +825 13 optimizer """adam""" +825 13 training_loop """owa""" +825 13 negative_sampler """basic""" +825 13 evaluator """rankbased""" +825 14 dataset """kinships""" +825 14 model """transe""" +825 14 loss """bceaftersigmoid""" +825 14 regularizer """no""" +825 14 optimizer """adam""" +825 14 training_loop """owa""" +825 14 negative_sampler """basic""" +825 14 evaluator """rankbased""" +825 15 dataset """kinships""" +825 15 model """transe""" +825 15 loss """bceaftersigmoid""" +825 15 regularizer """no""" +825 15 optimizer """adam""" +825 15 training_loop """owa""" +825 15 negative_sampler """basic""" +825 15 evaluator """rankbased""" +825 16 dataset """kinships""" +825 16 model """transe""" +825 16 loss """bceaftersigmoid""" +825 16 regularizer """no""" +825 16 optimizer """adam""" +825 16 training_loop """owa""" +825 16 negative_sampler """basic""" +825 16 evaluator """rankbased""" +825 17 dataset """kinships""" +825 17 model """transe""" +825 17 loss """bceaftersigmoid""" +825 17 regularizer """no""" +825 17 optimizer """adam""" +825 17 training_loop """owa""" +825 17 negative_sampler """basic""" +825 17 evaluator """rankbased""" +825 18 dataset """kinships""" +825 18 model """transe""" +825 18 loss """bceaftersigmoid""" +825 18 regularizer """no""" +825 18 optimizer """adam""" +825 18 training_loop """owa""" +825 18 negative_sampler """basic""" +825 18 evaluator """rankbased""" +825 19 dataset """kinships""" +825 19 model """transe""" +825 19 loss """bceaftersigmoid""" +825 19 regularizer """no""" +825 19 optimizer """adam""" +825 19 training_loop """owa""" +825 19 negative_sampler """basic""" +825 19 evaluator """rankbased""" +825 20 dataset """kinships""" +825 20 model """transe""" +825 20 loss """bceaftersigmoid""" +825 20 regularizer """no""" +825 20 optimizer """adam""" +825 20 training_loop """owa""" +825 20 negative_sampler """basic""" +825 20 evaluator """rankbased""" +825 21 dataset """kinships""" +825 21 model """transe""" +825 21 loss """bceaftersigmoid""" +825 21 regularizer """no""" +825 21 optimizer """adam""" +825 21 training_loop """owa""" +825 21 negative_sampler """basic""" +825 21 evaluator """rankbased""" +825 22 dataset """kinships""" +825 22 model """transe""" +825 22 loss """bceaftersigmoid""" +825 22 regularizer """no""" +825 22 optimizer """adam""" +825 22 training_loop """owa""" +825 22 negative_sampler """basic""" +825 22 evaluator """rankbased""" +825 23 dataset """kinships""" +825 23 model """transe""" +825 23 loss """bceaftersigmoid""" +825 23 regularizer """no""" +825 23 optimizer """adam""" +825 23 training_loop """owa""" +825 23 negative_sampler """basic""" +825 23 evaluator """rankbased""" +825 24 dataset """kinships""" +825 24 model """transe""" +825 24 loss """bceaftersigmoid""" +825 24 regularizer """no""" +825 24 optimizer """adam""" +825 24 training_loop """owa""" +825 24 negative_sampler """basic""" +825 24 evaluator """rankbased""" +825 25 dataset """kinships""" +825 25 model """transe""" +825 25 loss """bceaftersigmoid""" +825 25 regularizer """no""" +825 25 optimizer """adam""" +825 25 training_loop """owa""" +825 25 negative_sampler """basic""" +825 25 evaluator """rankbased""" +825 26 dataset """kinships""" +825 26 model """transe""" +825 26 loss """bceaftersigmoid""" +825 26 regularizer """no""" +825 26 optimizer """adam""" +825 26 training_loop """owa""" +825 26 negative_sampler """basic""" +825 26 evaluator """rankbased""" +825 27 dataset """kinships""" +825 27 model """transe""" +825 27 loss """bceaftersigmoid""" +825 27 regularizer """no""" +825 27 optimizer """adam""" +825 27 training_loop """owa""" +825 27 negative_sampler """basic""" +825 27 evaluator """rankbased""" +825 28 dataset """kinships""" +825 28 model """transe""" +825 28 loss """bceaftersigmoid""" +825 28 regularizer """no""" +825 28 optimizer """adam""" +825 28 training_loop """owa""" +825 28 negative_sampler """basic""" +825 28 evaluator """rankbased""" +825 29 dataset """kinships""" +825 29 model """transe""" +825 29 loss """bceaftersigmoid""" +825 29 regularizer """no""" +825 29 optimizer """adam""" +825 29 training_loop """owa""" +825 29 negative_sampler """basic""" +825 29 evaluator """rankbased""" +825 30 dataset """kinships""" +825 30 model """transe""" +825 30 loss """bceaftersigmoid""" +825 30 regularizer """no""" +825 30 optimizer """adam""" +825 30 training_loop """owa""" +825 30 negative_sampler """basic""" +825 30 evaluator """rankbased""" +825 31 dataset """kinships""" +825 31 model """transe""" +825 31 loss """bceaftersigmoid""" +825 31 regularizer """no""" +825 31 optimizer """adam""" +825 31 training_loop """owa""" +825 31 negative_sampler """basic""" +825 31 evaluator """rankbased""" +825 32 dataset """kinships""" +825 32 model """transe""" +825 32 loss """bceaftersigmoid""" +825 32 regularizer """no""" +825 32 optimizer """adam""" +825 32 training_loop """owa""" +825 32 negative_sampler """basic""" +825 32 evaluator """rankbased""" +825 33 dataset """kinships""" +825 33 model """transe""" +825 33 loss """bceaftersigmoid""" +825 33 regularizer """no""" +825 33 optimizer """adam""" +825 33 training_loop """owa""" +825 33 negative_sampler """basic""" +825 33 evaluator """rankbased""" +825 34 dataset """kinships""" +825 34 model """transe""" +825 34 loss """bceaftersigmoid""" +825 34 regularizer """no""" +825 34 optimizer """adam""" +825 34 training_loop """owa""" +825 34 negative_sampler """basic""" +825 34 evaluator """rankbased""" +825 35 dataset """kinships""" +825 35 model """transe""" +825 35 loss """bceaftersigmoid""" +825 35 regularizer """no""" +825 35 optimizer """adam""" +825 35 training_loop """owa""" +825 35 negative_sampler """basic""" +825 35 evaluator """rankbased""" +825 36 dataset """kinships""" +825 36 model """transe""" +825 36 loss """bceaftersigmoid""" +825 36 regularizer """no""" +825 36 optimizer """adam""" +825 36 training_loop """owa""" +825 36 negative_sampler """basic""" +825 36 evaluator """rankbased""" +825 37 dataset """kinships""" +825 37 model """transe""" +825 37 loss """bceaftersigmoid""" +825 37 regularizer """no""" +825 37 optimizer """adam""" +825 37 training_loop """owa""" +825 37 negative_sampler """basic""" +825 37 evaluator """rankbased""" +825 38 dataset """kinships""" +825 38 model """transe""" +825 38 loss """bceaftersigmoid""" +825 38 regularizer """no""" +825 38 optimizer """adam""" +825 38 training_loop """owa""" +825 38 negative_sampler """basic""" +825 38 evaluator """rankbased""" +825 39 dataset """kinships""" +825 39 model """transe""" +825 39 loss """bceaftersigmoid""" +825 39 regularizer """no""" +825 39 optimizer """adam""" +825 39 training_loop """owa""" +825 39 negative_sampler """basic""" +825 39 evaluator """rankbased""" +825 40 dataset """kinships""" +825 40 model """transe""" +825 40 loss """bceaftersigmoid""" +825 40 regularizer """no""" +825 40 optimizer """adam""" +825 40 training_loop """owa""" +825 40 negative_sampler """basic""" +825 40 evaluator """rankbased""" +825 41 dataset """kinships""" +825 41 model """transe""" +825 41 loss """bceaftersigmoid""" +825 41 regularizer """no""" +825 41 optimizer """adam""" +825 41 training_loop """owa""" +825 41 negative_sampler """basic""" +825 41 evaluator """rankbased""" +825 42 dataset """kinships""" +825 42 model """transe""" +825 42 loss """bceaftersigmoid""" +825 42 regularizer """no""" +825 42 optimizer """adam""" +825 42 training_loop """owa""" +825 42 negative_sampler """basic""" +825 42 evaluator """rankbased""" +825 43 dataset """kinships""" +825 43 model """transe""" +825 43 loss """bceaftersigmoid""" +825 43 regularizer """no""" +825 43 optimizer """adam""" +825 43 training_loop """owa""" +825 43 negative_sampler """basic""" +825 43 evaluator """rankbased""" +825 44 dataset """kinships""" +825 44 model """transe""" +825 44 loss """bceaftersigmoid""" +825 44 regularizer """no""" +825 44 optimizer """adam""" +825 44 training_loop """owa""" +825 44 negative_sampler """basic""" +825 44 evaluator """rankbased""" +825 45 dataset """kinships""" +825 45 model """transe""" +825 45 loss """bceaftersigmoid""" +825 45 regularizer """no""" +825 45 optimizer """adam""" +825 45 training_loop """owa""" +825 45 negative_sampler """basic""" +825 45 evaluator """rankbased""" +825 46 dataset """kinships""" +825 46 model """transe""" +825 46 loss """bceaftersigmoid""" +825 46 regularizer """no""" +825 46 optimizer """adam""" +825 46 training_loop """owa""" +825 46 negative_sampler """basic""" +825 46 evaluator """rankbased""" +825 47 dataset """kinships""" +825 47 model """transe""" +825 47 loss """bceaftersigmoid""" +825 47 regularizer """no""" +825 47 optimizer """adam""" +825 47 training_loop """owa""" +825 47 negative_sampler """basic""" +825 47 evaluator """rankbased""" +825 48 dataset """kinships""" +825 48 model """transe""" +825 48 loss """bceaftersigmoid""" +825 48 regularizer """no""" +825 48 optimizer """adam""" +825 48 training_loop """owa""" +825 48 negative_sampler """basic""" +825 48 evaluator """rankbased""" +825 49 dataset """kinships""" +825 49 model """transe""" +825 49 loss """bceaftersigmoid""" +825 49 regularizer """no""" +825 49 optimizer """adam""" +825 49 training_loop """owa""" +825 49 negative_sampler """basic""" +825 49 evaluator """rankbased""" +825 50 dataset """kinships""" +825 50 model """transe""" +825 50 loss """bceaftersigmoid""" +825 50 regularizer """no""" +825 50 optimizer """adam""" +825 50 training_loop """owa""" +825 50 negative_sampler """basic""" +825 50 evaluator """rankbased""" +825 51 dataset """kinships""" +825 51 model """transe""" +825 51 loss """bceaftersigmoid""" +825 51 regularizer """no""" +825 51 optimizer """adam""" +825 51 training_loop """owa""" +825 51 negative_sampler """basic""" +825 51 evaluator """rankbased""" +825 52 dataset """kinships""" +825 52 model """transe""" +825 52 loss """bceaftersigmoid""" +825 52 regularizer """no""" +825 52 optimizer """adam""" +825 52 training_loop """owa""" +825 52 negative_sampler """basic""" +825 52 evaluator """rankbased""" +825 53 dataset """kinships""" +825 53 model """transe""" +825 53 loss """bceaftersigmoid""" +825 53 regularizer """no""" +825 53 optimizer """adam""" +825 53 training_loop """owa""" +825 53 negative_sampler """basic""" +825 53 evaluator """rankbased""" +825 54 dataset """kinships""" +825 54 model """transe""" +825 54 loss """bceaftersigmoid""" +825 54 regularizer """no""" +825 54 optimizer """adam""" +825 54 training_loop """owa""" +825 54 negative_sampler """basic""" +825 54 evaluator """rankbased""" +825 55 dataset """kinships""" +825 55 model """transe""" +825 55 loss """bceaftersigmoid""" +825 55 regularizer """no""" +825 55 optimizer """adam""" +825 55 training_loop """owa""" +825 55 negative_sampler """basic""" +825 55 evaluator """rankbased""" +825 56 dataset """kinships""" +825 56 model """transe""" +825 56 loss """bceaftersigmoid""" +825 56 regularizer """no""" +825 56 optimizer """adam""" +825 56 training_loop """owa""" +825 56 negative_sampler """basic""" +825 56 evaluator """rankbased""" +825 57 dataset """kinships""" +825 57 model """transe""" +825 57 loss """bceaftersigmoid""" +825 57 regularizer """no""" +825 57 optimizer """adam""" +825 57 training_loop """owa""" +825 57 negative_sampler """basic""" +825 57 evaluator """rankbased""" +825 58 dataset """kinships""" +825 58 model """transe""" +825 58 loss """bceaftersigmoid""" +825 58 regularizer """no""" +825 58 optimizer """adam""" +825 58 training_loop """owa""" +825 58 negative_sampler """basic""" +825 58 evaluator """rankbased""" +825 59 dataset """kinships""" +825 59 model """transe""" +825 59 loss """bceaftersigmoid""" +825 59 regularizer """no""" +825 59 optimizer """adam""" +825 59 training_loop """owa""" +825 59 negative_sampler """basic""" +825 59 evaluator """rankbased""" +825 60 dataset """kinships""" +825 60 model """transe""" +825 60 loss """bceaftersigmoid""" +825 60 regularizer """no""" +825 60 optimizer """adam""" +825 60 training_loop """owa""" +825 60 negative_sampler """basic""" +825 60 evaluator """rankbased""" +825 61 dataset """kinships""" +825 61 model """transe""" +825 61 loss """bceaftersigmoid""" +825 61 regularizer """no""" +825 61 optimizer """adam""" +825 61 training_loop """owa""" +825 61 negative_sampler """basic""" +825 61 evaluator """rankbased""" +825 62 dataset """kinships""" +825 62 model """transe""" +825 62 loss """bceaftersigmoid""" +825 62 regularizer """no""" +825 62 optimizer """adam""" +825 62 training_loop """owa""" +825 62 negative_sampler """basic""" +825 62 evaluator """rankbased""" +825 63 dataset """kinships""" +825 63 model """transe""" +825 63 loss """bceaftersigmoid""" +825 63 regularizer """no""" +825 63 optimizer """adam""" +825 63 training_loop """owa""" +825 63 negative_sampler """basic""" +825 63 evaluator """rankbased""" +825 64 dataset """kinships""" +825 64 model """transe""" +825 64 loss """bceaftersigmoid""" +825 64 regularizer """no""" +825 64 optimizer """adam""" +825 64 training_loop """owa""" +825 64 negative_sampler """basic""" +825 64 evaluator """rankbased""" +825 65 dataset """kinships""" +825 65 model """transe""" +825 65 loss """bceaftersigmoid""" +825 65 regularizer """no""" +825 65 optimizer """adam""" +825 65 training_loop """owa""" +825 65 negative_sampler """basic""" +825 65 evaluator """rankbased""" +825 66 dataset """kinships""" +825 66 model """transe""" +825 66 loss """bceaftersigmoid""" +825 66 regularizer """no""" +825 66 optimizer """adam""" +825 66 training_loop """owa""" +825 66 negative_sampler """basic""" +825 66 evaluator """rankbased""" +825 67 dataset """kinships""" +825 67 model """transe""" +825 67 loss """bceaftersigmoid""" +825 67 regularizer """no""" +825 67 optimizer """adam""" +825 67 training_loop """owa""" +825 67 negative_sampler """basic""" +825 67 evaluator """rankbased""" +825 68 dataset """kinships""" +825 68 model """transe""" +825 68 loss """bceaftersigmoid""" +825 68 regularizer """no""" +825 68 optimizer """adam""" +825 68 training_loop """owa""" +825 68 negative_sampler """basic""" +825 68 evaluator """rankbased""" +825 69 dataset """kinships""" +825 69 model """transe""" +825 69 loss """bceaftersigmoid""" +825 69 regularizer """no""" +825 69 optimizer """adam""" +825 69 training_loop """owa""" +825 69 negative_sampler """basic""" +825 69 evaluator """rankbased""" +825 70 dataset """kinships""" +825 70 model """transe""" +825 70 loss """bceaftersigmoid""" +825 70 regularizer """no""" +825 70 optimizer """adam""" +825 70 training_loop """owa""" +825 70 negative_sampler """basic""" +825 70 evaluator """rankbased""" +825 71 dataset """kinships""" +825 71 model """transe""" +825 71 loss """bceaftersigmoid""" +825 71 regularizer """no""" +825 71 optimizer """adam""" +825 71 training_loop """owa""" +825 71 negative_sampler """basic""" +825 71 evaluator """rankbased""" +825 72 dataset """kinships""" +825 72 model """transe""" +825 72 loss """bceaftersigmoid""" +825 72 regularizer """no""" +825 72 optimizer """adam""" +825 72 training_loop """owa""" +825 72 negative_sampler """basic""" +825 72 evaluator """rankbased""" +825 73 dataset """kinships""" +825 73 model """transe""" +825 73 loss """bceaftersigmoid""" +825 73 regularizer """no""" +825 73 optimizer """adam""" +825 73 training_loop """owa""" +825 73 negative_sampler """basic""" +825 73 evaluator """rankbased""" +825 74 dataset """kinships""" +825 74 model """transe""" +825 74 loss """bceaftersigmoid""" +825 74 regularizer """no""" +825 74 optimizer """adam""" +825 74 training_loop """owa""" +825 74 negative_sampler """basic""" +825 74 evaluator """rankbased""" +825 75 dataset """kinships""" +825 75 model """transe""" +825 75 loss """bceaftersigmoid""" +825 75 regularizer """no""" +825 75 optimizer """adam""" +825 75 training_loop """owa""" +825 75 negative_sampler """basic""" +825 75 evaluator """rankbased""" +825 76 dataset """kinships""" +825 76 model """transe""" +825 76 loss """bceaftersigmoid""" +825 76 regularizer """no""" +825 76 optimizer """adam""" +825 76 training_loop """owa""" +825 76 negative_sampler """basic""" +825 76 evaluator """rankbased""" +825 77 dataset """kinships""" +825 77 model """transe""" +825 77 loss """bceaftersigmoid""" +825 77 regularizer """no""" +825 77 optimizer """adam""" +825 77 training_loop """owa""" +825 77 negative_sampler """basic""" +825 77 evaluator """rankbased""" +825 78 dataset """kinships""" +825 78 model """transe""" +825 78 loss """bceaftersigmoid""" +825 78 regularizer """no""" +825 78 optimizer """adam""" +825 78 training_loop """owa""" +825 78 negative_sampler """basic""" +825 78 evaluator """rankbased""" +825 79 dataset """kinships""" +825 79 model """transe""" +825 79 loss """bceaftersigmoid""" +825 79 regularizer """no""" +825 79 optimizer """adam""" +825 79 training_loop """owa""" +825 79 negative_sampler """basic""" +825 79 evaluator """rankbased""" +825 80 dataset """kinships""" +825 80 model """transe""" +825 80 loss """bceaftersigmoid""" +825 80 regularizer """no""" +825 80 optimizer """adam""" +825 80 training_loop """owa""" +825 80 negative_sampler """basic""" +825 80 evaluator """rankbased""" +825 81 dataset """kinships""" +825 81 model """transe""" +825 81 loss """bceaftersigmoid""" +825 81 regularizer """no""" +825 81 optimizer """adam""" +825 81 training_loop """owa""" +825 81 negative_sampler """basic""" +825 81 evaluator """rankbased""" +825 82 dataset """kinships""" +825 82 model """transe""" +825 82 loss """bceaftersigmoid""" +825 82 regularizer """no""" +825 82 optimizer """adam""" +825 82 training_loop """owa""" +825 82 negative_sampler """basic""" +825 82 evaluator """rankbased""" +825 83 dataset """kinships""" +825 83 model """transe""" +825 83 loss """bceaftersigmoid""" +825 83 regularizer """no""" +825 83 optimizer """adam""" +825 83 training_loop """owa""" +825 83 negative_sampler """basic""" +825 83 evaluator """rankbased""" +825 84 dataset """kinships""" +825 84 model """transe""" +825 84 loss """bceaftersigmoid""" +825 84 regularizer """no""" +825 84 optimizer """adam""" +825 84 training_loop """owa""" +825 84 negative_sampler """basic""" +825 84 evaluator """rankbased""" +825 85 dataset """kinships""" +825 85 model """transe""" +825 85 loss """bceaftersigmoid""" +825 85 regularizer """no""" +825 85 optimizer """adam""" +825 85 training_loop """owa""" +825 85 negative_sampler """basic""" +825 85 evaluator """rankbased""" +825 86 dataset """kinships""" +825 86 model """transe""" +825 86 loss """bceaftersigmoid""" +825 86 regularizer """no""" +825 86 optimizer """adam""" +825 86 training_loop """owa""" +825 86 negative_sampler """basic""" +825 86 evaluator """rankbased""" +825 87 dataset """kinships""" +825 87 model """transe""" +825 87 loss """bceaftersigmoid""" +825 87 regularizer """no""" +825 87 optimizer """adam""" +825 87 training_loop """owa""" +825 87 negative_sampler """basic""" +825 87 evaluator """rankbased""" +825 88 dataset """kinships""" +825 88 model """transe""" +825 88 loss """bceaftersigmoid""" +825 88 regularizer """no""" +825 88 optimizer """adam""" +825 88 training_loop """owa""" +825 88 negative_sampler """basic""" +825 88 evaluator """rankbased""" +825 89 dataset """kinships""" +825 89 model """transe""" +825 89 loss """bceaftersigmoid""" +825 89 regularizer """no""" +825 89 optimizer """adam""" +825 89 training_loop """owa""" +825 89 negative_sampler """basic""" +825 89 evaluator """rankbased""" +825 90 dataset """kinships""" +825 90 model """transe""" +825 90 loss """bceaftersigmoid""" +825 90 regularizer """no""" +825 90 optimizer """adam""" +825 90 training_loop """owa""" +825 90 negative_sampler """basic""" +825 90 evaluator """rankbased""" +825 91 dataset """kinships""" +825 91 model """transe""" +825 91 loss """bceaftersigmoid""" +825 91 regularizer """no""" +825 91 optimizer """adam""" +825 91 training_loop """owa""" +825 91 negative_sampler """basic""" +825 91 evaluator """rankbased""" +825 92 dataset """kinships""" +825 92 model """transe""" +825 92 loss """bceaftersigmoid""" +825 92 regularizer """no""" +825 92 optimizer """adam""" +825 92 training_loop """owa""" +825 92 negative_sampler """basic""" +825 92 evaluator """rankbased""" +825 93 dataset """kinships""" +825 93 model """transe""" +825 93 loss """bceaftersigmoid""" +825 93 regularizer """no""" +825 93 optimizer """adam""" +825 93 training_loop """owa""" +825 93 negative_sampler """basic""" +825 93 evaluator """rankbased""" +825 94 dataset """kinships""" +825 94 model """transe""" +825 94 loss """bceaftersigmoid""" +825 94 regularizer """no""" +825 94 optimizer """adam""" +825 94 training_loop """owa""" +825 94 negative_sampler """basic""" +825 94 evaluator """rankbased""" +825 95 dataset """kinships""" +825 95 model """transe""" +825 95 loss """bceaftersigmoid""" +825 95 regularizer """no""" +825 95 optimizer """adam""" +825 95 training_loop """owa""" +825 95 negative_sampler """basic""" +825 95 evaluator """rankbased""" +825 96 dataset """kinships""" +825 96 model """transe""" +825 96 loss """bceaftersigmoid""" +825 96 regularizer """no""" +825 96 optimizer """adam""" +825 96 training_loop """owa""" +825 96 negative_sampler """basic""" +825 96 evaluator """rankbased""" +825 97 dataset """kinships""" +825 97 model """transe""" +825 97 loss """bceaftersigmoid""" +825 97 regularizer """no""" +825 97 optimizer """adam""" +825 97 training_loop """owa""" +825 97 negative_sampler """basic""" +825 97 evaluator """rankbased""" +825 98 dataset """kinships""" +825 98 model """transe""" +825 98 loss """bceaftersigmoid""" +825 98 regularizer """no""" +825 98 optimizer """adam""" +825 98 training_loop """owa""" +825 98 negative_sampler """basic""" +825 98 evaluator """rankbased""" +825 99 dataset """kinships""" +825 99 model """transe""" +825 99 loss """bceaftersigmoid""" +825 99 regularizer """no""" +825 99 optimizer """adam""" +825 99 training_loop """owa""" +825 99 negative_sampler """basic""" +825 99 evaluator """rankbased""" +825 100 dataset """kinships""" +825 100 model """transe""" +825 100 loss """bceaftersigmoid""" +825 100 regularizer """no""" +825 100 optimizer """adam""" +825 100 training_loop """owa""" +825 100 negative_sampler """basic""" +825 100 evaluator """rankbased""" +826 1 model.embedding_dim 0.0 +826 1 model.scoring_fct_norm 1.0 +826 1 optimizer.lr 0.011936459297110983 +826 1 negative_sampler.num_negs_per_pos 11.0 +826 1 training.batch_size 0.0 +826 2 model.embedding_dim 2.0 +826 2 model.scoring_fct_norm 1.0 +826 2 optimizer.lr 0.017755974384287377 +826 2 negative_sampler.num_negs_per_pos 99.0 +826 2 training.batch_size 0.0 +826 3 model.embedding_dim 1.0 +826 3 model.scoring_fct_norm 2.0 +826 3 optimizer.lr 0.006390117614456057 +826 3 negative_sampler.num_negs_per_pos 1.0 +826 3 training.batch_size 2.0 +826 4 model.embedding_dim 2.0 +826 4 model.scoring_fct_norm 2.0 +826 4 optimizer.lr 0.006293067401100207 +826 4 negative_sampler.num_negs_per_pos 28.0 +826 4 training.batch_size 0.0 +826 5 model.embedding_dim 2.0 +826 5 model.scoring_fct_norm 2.0 +826 5 optimizer.lr 0.00873384245090744 +826 5 negative_sampler.num_negs_per_pos 72.0 +826 5 training.batch_size 0.0 +826 6 model.embedding_dim 2.0 +826 6 model.scoring_fct_norm 1.0 +826 6 optimizer.lr 0.0024868644247440955 +826 6 negative_sampler.num_negs_per_pos 44.0 +826 6 training.batch_size 2.0 +826 7 model.embedding_dim 0.0 +826 7 model.scoring_fct_norm 2.0 +826 7 optimizer.lr 0.002796308022056548 +826 7 negative_sampler.num_negs_per_pos 72.0 +826 7 training.batch_size 2.0 +826 8 model.embedding_dim 2.0 +826 8 model.scoring_fct_norm 1.0 +826 8 optimizer.lr 0.02758227798750029 +826 8 negative_sampler.num_negs_per_pos 77.0 +826 8 training.batch_size 0.0 +826 9 model.embedding_dim 1.0 +826 9 model.scoring_fct_norm 1.0 +826 9 optimizer.lr 0.001582644373765805 +826 9 negative_sampler.num_negs_per_pos 88.0 +826 9 training.batch_size 1.0 +826 10 model.embedding_dim 0.0 +826 10 model.scoring_fct_norm 2.0 +826 10 optimizer.lr 0.003448775881851992 +826 10 negative_sampler.num_negs_per_pos 18.0 +826 10 training.batch_size 0.0 +826 11 model.embedding_dim 2.0 +826 11 model.scoring_fct_norm 2.0 +826 11 optimizer.lr 0.002543158693434339 +826 11 negative_sampler.num_negs_per_pos 19.0 +826 11 training.batch_size 0.0 +826 12 model.embedding_dim 0.0 +826 12 model.scoring_fct_norm 2.0 +826 12 optimizer.lr 0.0011702312551473226 +826 12 negative_sampler.num_negs_per_pos 78.0 +826 12 training.batch_size 1.0 +826 13 model.embedding_dim 1.0 +826 13 model.scoring_fct_norm 1.0 +826 13 optimizer.lr 0.001206943670062782 +826 13 negative_sampler.num_negs_per_pos 78.0 +826 13 training.batch_size 1.0 +826 14 model.embedding_dim 1.0 +826 14 model.scoring_fct_norm 2.0 +826 14 optimizer.lr 0.03878314506240321 +826 14 negative_sampler.num_negs_per_pos 74.0 +826 14 training.batch_size 1.0 +826 15 model.embedding_dim 2.0 +826 15 model.scoring_fct_norm 2.0 +826 15 optimizer.lr 0.009006794609493024 +826 15 negative_sampler.num_negs_per_pos 20.0 +826 15 training.batch_size 1.0 +826 16 model.embedding_dim 0.0 +826 16 model.scoring_fct_norm 2.0 +826 16 optimizer.lr 0.018511239622601396 +826 16 negative_sampler.num_negs_per_pos 94.0 +826 16 training.batch_size 2.0 +826 17 model.embedding_dim 1.0 +826 17 model.scoring_fct_norm 1.0 +826 17 optimizer.lr 0.005727948115400679 +826 17 negative_sampler.num_negs_per_pos 76.0 +826 17 training.batch_size 0.0 +826 18 model.embedding_dim 0.0 +826 18 model.scoring_fct_norm 1.0 +826 18 optimizer.lr 0.08471794480177587 +826 18 negative_sampler.num_negs_per_pos 74.0 +826 18 training.batch_size 1.0 +826 19 model.embedding_dim 0.0 +826 19 model.scoring_fct_norm 2.0 +826 19 optimizer.lr 0.0020370415553240305 +826 19 negative_sampler.num_negs_per_pos 27.0 +826 19 training.batch_size 0.0 +826 20 model.embedding_dim 1.0 +826 20 model.scoring_fct_norm 1.0 +826 20 optimizer.lr 0.0014685612092306221 +826 20 negative_sampler.num_negs_per_pos 37.0 +826 20 training.batch_size 2.0 +826 21 model.embedding_dim 2.0 +826 21 model.scoring_fct_norm 1.0 +826 21 optimizer.lr 0.00545090107936843 +826 21 negative_sampler.num_negs_per_pos 96.0 +826 21 training.batch_size 0.0 +826 22 model.embedding_dim 0.0 +826 22 model.scoring_fct_norm 1.0 +826 22 optimizer.lr 0.04302974651638087 +826 22 negative_sampler.num_negs_per_pos 98.0 +826 22 training.batch_size 0.0 +826 23 model.embedding_dim 1.0 +826 23 model.scoring_fct_norm 2.0 +826 23 optimizer.lr 0.004609751296062899 +826 23 negative_sampler.num_negs_per_pos 7.0 +826 23 training.batch_size 0.0 +826 24 model.embedding_dim 2.0 +826 24 model.scoring_fct_norm 1.0 +826 24 optimizer.lr 0.007896753161883416 +826 24 negative_sampler.num_negs_per_pos 60.0 +826 24 training.batch_size 2.0 +826 25 model.embedding_dim 2.0 +826 25 model.scoring_fct_norm 2.0 +826 25 optimizer.lr 0.007015790069177247 +826 25 negative_sampler.num_negs_per_pos 65.0 +826 25 training.batch_size 2.0 +826 26 model.embedding_dim 0.0 +826 26 model.scoring_fct_norm 1.0 +826 26 optimizer.lr 0.004314354828484696 +826 26 negative_sampler.num_negs_per_pos 97.0 +826 26 training.batch_size 1.0 +826 27 model.embedding_dim 0.0 +826 27 model.scoring_fct_norm 1.0 +826 27 optimizer.lr 0.05337632932180749 +826 27 negative_sampler.num_negs_per_pos 76.0 +826 27 training.batch_size 1.0 +826 28 model.embedding_dim 2.0 +826 28 model.scoring_fct_norm 1.0 +826 28 optimizer.lr 0.08638946543640386 +826 28 negative_sampler.num_negs_per_pos 85.0 +826 28 training.batch_size 0.0 +826 29 model.embedding_dim 1.0 +826 29 model.scoring_fct_norm 1.0 +826 29 optimizer.lr 0.002542329576530123 +826 29 negative_sampler.num_negs_per_pos 58.0 +826 29 training.batch_size 1.0 +826 30 model.embedding_dim 2.0 +826 30 model.scoring_fct_norm 1.0 +826 30 optimizer.lr 0.04999963381144113 +826 30 negative_sampler.num_negs_per_pos 17.0 +826 30 training.batch_size 0.0 +826 31 model.embedding_dim 0.0 +826 31 model.scoring_fct_norm 1.0 +826 31 optimizer.lr 0.07461187933608537 +826 31 negative_sampler.num_negs_per_pos 13.0 +826 31 training.batch_size 0.0 +826 32 model.embedding_dim 0.0 +826 32 model.scoring_fct_norm 2.0 +826 32 optimizer.lr 0.08148887633916127 +826 32 negative_sampler.num_negs_per_pos 13.0 +826 32 training.batch_size 1.0 +826 33 model.embedding_dim 2.0 +826 33 model.scoring_fct_norm 2.0 +826 33 optimizer.lr 0.024430773139859637 +826 33 negative_sampler.num_negs_per_pos 81.0 +826 33 training.batch_size 0.0 +826 34 model.embedding_dim 2.0 +826 34 model.scoring_fct_norm 1.0 +826 34 optimizer.lr 0.013812436859689818 +826 34 negative_sampler.num_negs_per_pos 8.0 +826 34 training.batch_size 1.0 +826 35 model.embedding_dim 1.0 +826 35 model.scoring_fct_norm 2.0 +826 35 optimizer.lr 0.009496953741391973 +826 35 negative_sampler.num_negs_per_pos 4.0 +826 35 training.batch_size 1.0 +826 36 model.embedding_dim 2.0 +826 36 model.scoring_fct_norm 2.0 +826 36 optimizer.lr 0.033320948288619666 +826 36 negative_sampler.num_negs_per_pos 42.0 +826 36 training.batch_size 2.0 +826 37 model.embedding_dim 0.0 +826 37 model.scoring_fct_norm 1.0 +826 37 optimizer.lr 0.0034274256843653036 +826 37 negative_sampler.num_negs_per_pos 45.0 +826 37 training.batch_size 2.0 +826 38 model.embedding_dim 0.0 +826 38 model.scoring_fct_norm 2.0 +826 38 optimizer.lr 0.001025206227547188 +826 38 negative_sampler.num_negs_per_pos 75.0 +826 38 training.batch_size 0.0 +826 39 model.embedding_dim 2.0 +826 39 model.scoring_fct_norm 1.0 +826 39 optimizer.lr 0.014507468891723219 +826 39 negative_sampler.num_negs_per_pos 19.0 +826 39 training.batch_size 2.0 +826 40 model.embedding_dim 2.0 +826 40 model.scoring_fct_norm 1.0 +826 40 optimizer.lr 0.04556239067544406 +826 40 negative_sampler.num_negs_per_pos 58.0 +826 40 training.batch_size 1.0 +826 41 model.embedding_dim 1.0 +826 41 model.scoring_fct_norm 2.0 +826 41 optimizer.lr 0.0017968500019722112 +826 41 negative_sampler.num_negs_per_pos 84.0 +826 41 training.batch_size 2.0 +826 42 model.embedding_dim 0.0 +826 42 model.scoring_fct_norm 2.0 +826 42 optimizer.lr 0.0031135744596455747 +826 42 negative_sampler.num_negs_per_pos 23.0 +826 42 training.batch_size 1.0 +826 43 model.embedding_dim 0.0 +826 43 model.scoring_fct_norm 1.0 +826 43 optimizer.lr 0.010762596271284708 +826 43 negative_sampler.num_negs_per_pos 80.0 +826 43 training.batch_size 0.0 +826 44 model.embedding_dim 1.0 +826 44 model.scoring_fct_norm 1.0 +826 44 optimizer.lr 0.02738880889975217 +826 44 negative_sampler.num_negs_per_pos 46.0 +826 44 training.batch_size 0.0 +826 45 model.embedding_dim 0.0 +826 45 model.scoring_fct_norm 2.0 +826 45 optimizer.lr 0.053110059184054786 +826 45 negative_sampler.num_negs_per_pos 32.0 +826 45 training.batch_size 1.0 +826 46 model.embedding_dim 2.0 +826 46 model.scoring_fct_norm 1.0 +826 46 optimizer.lr 0.005853466240077264 +826 46 negative_sampler.num_negs_per_pos 15.0 +826 46 training.batch_size 0.0 +826 47 model.embedding_dim 2.0 +826 47 model.scoring_fct_norm 2.0 +826 47 optimizer.lr 0.0024996403230492154 +826 47 negative_sampler.num_negs_per_pos 73.0 +826 47 training.batch_size 1.0 +826 48 model.embedding_dim 2.0 +826 48 model.scoring_fct_norm 2.0 +826 48 optimizer.lr 0.018886825301140303 +826 48 negative_sampler.num_negs_per_pos 72.0 +826 48 training.batch_size 0.0 +826 49 model.embedding_dim 0.0 +826 49 model.scoring_fct_norm 1.0 +826 49 optimizer.lr 0.02219084166187353 +826 49 negative_sampler.num_negs_per_pos 96.0 +826 49 training.batch_size 1.0 +826 50 model.embedding_dim 0.0 +826 50 model.scoring_fct_norm 2.0 +826 50 optimizer.lr 0.0017732672735060473 +826 50 negative_sampler.num_negs_per_pos 50.0 +826 50 training.batch_size 1.0 +826 51 model.embedding_dim 2.0 +826 51 model.scoring_fct_norm 1.0 +826 51 optimizer.lr 0.001132073177722618 +826 51 negative_sampler.num_negs_per_pos 40.0 +826 51 training.batch_size 2.0 +826 52 model.embedding_dim 0.0 +826 52 model.scoring_fct_norm 1.0 +826 52 optimizer.lr 0.005707431904237138 +826 52 negative_sampler.num_negs_per_pos 45.0 +826 52 training.batch_size 1.0 +826 53 model.embedding_dim 2.0 +826 53 model.scoring_fct_norm 1.0 +826 53 optimizer.lr 0.0014044689523026186 +826 53 negative_sampler.num_negs_per_pos 17.0 +826 53 training.batch_size 1.0 +826 54 model.embedding_dim 2.0 +826 54 model.scoring_fct_norm 2.0 +826 54 optimizer.lr 0.0023807146544208677 +826 54 negative_sampler.num_negs_per_pos 97.0 +826 54 training.batch_size 2.0 +826 55 model.embedding_dim 2.0 +826 55 model.scoring_fct_norm 2.0 +826 55 optimizer.lr 0.007510100029651691 +826 55 negative_sampler.num_negs_per_pos 15.0 +826 55 training.batch_size 1.0 +826 56 model.embedding_dim 1.0 +826 56 model.scoring_fct_norm 2.0 +826 56 optimizer.lr 0.015152731802029919 +826 56 negative_sampler.num_negs_per_pos 70.0 +826 56 training.batch_size 0.0 +826 57 model.embedding_dim 1.0 +826 57 model.scoring_fct_norm 1.0 +826 57 optimizer.lr 0.02020730262481424 +826 57 negative_sampler.num_negs_per_pos 16.0 +826 57 training.batch_size 1.0 +826 58 model.embedding_dim 1.0 +826 58 model.scoring_fct_norm 2.0 +826 58 optimizer.lr 0.006319709765570588 +826 58 negative_sampler.num_negs_per_pos 85.0 +826 58 training.batch_size 2.0 +826 59 model.embedding_dim 2.0 +826 59 model.scoring_fct_norm 2.0 +826 59 optimizer.lr 0.0037046478884404776 +826 59 negative_sampler.num_negs_per_pos 75.0 +826 59 training.batch_size 0.0 +826 60 model.embedding_dim 0.0 +826 60 model.scoring_fct_norm 2.0 +826 60 optimizer.lr 0.01598598503773844 +826 60 negative_sampler.num_negs_per_pos 51.0 +826 60 training.batch_size 1.0 +826 61 model.embedding_dim 1.0 +826 61 model.scoring_fct_norm 1.0 +826 61 optimizer.lr 0.0034455094235302033 +826 61 negative_sampler.num_negs_per_pos 65.0 +826 61 training.batch_size 2.0 +826 62 model.embedding_dim 0.0 +826 62 model.scoring_fct_norm 1.0 +826 62 optimizer.lr 0.05068719475646361 +826 62 negative_sampler.num_negs_per_pos 54.0 +826 62 training.batch_size 1.0 +826 63 model.embedding_dim 1.0 +826 63 model.scoring_fct_norm 2.0 +826 63 optimizer.lr 0.013321916516673247 +826 63 negative_sampler.num_negs_per_pos 17.0 +826 63 training.batch_size 2.0 +826 64 model.embedding_dim 1.0 +826 64 model.scoring_fct_norm 2.0 +826 64 optimizer.lr 0.01591283260139595 +826 64 negative_sampler.num_negs_per_pos 79.0 +826 64 training.batch_size 2.0 +826 65 model.embedding_dim 0.0 +826 65 model.scoring_fct_norm 1.0 +826 65 optimizer.lr 0.09118082043739432 +826 65 negative_sampler.num_negs_per_pos 17.0 +826 65 training.batch_size 1.0 +826 66 model.embedding_dim 2.0 +826 66 model.scoring_fct_norm 1.0 +826 66 optimizer.lr 0.08667908466808581 +826 66 negative_sampler.num_negs_per_pos 11.0 +826 66 training.batch_size 1.0 +826 67 model.embedding_dim 1.0 +826 67 model.scoring_fct_norm 1.0 +826 67 optimizer.lr 0.05378913886243408 +826 67 negative_sampler.num_negs_per_pos 58.0 +826 67 training.batch_size 2.0 +826 68 model.embedding_dim 1.0 +826 68 model.scoring_fct_norm 1.0 +826 68 optimizer.lr 0.01131110270516611 +826 68 negative_sampler.num_negs_per_pos 73.0 +826 68 training.batch_size 1.0 +826 69 model.embedding_dim 2.0 +826 69 model.scoring_fct_norm 1.0 +826 69 optimizer.lr 0.013404375534099474 +826 69 negative_sampler.num_negs_per_pos 59.0 +826 69 training.batch_size 2.0 +826 70 model.embedding_dim 2.0 +826 70 model.scoring_fct_norm 2.0 +826 70 optimizer.lr 0.0017572705583627207 +826 70 negative_sampler.num_negs_per_pos 34.0 +826 70 training.batch_size 1.0 +826 71 model.embedding_dim 0.0 +826 71 model.scoring_fct_norm 2.0 +826 71 optimizer.lr 0.001489926677080422 +826 71 negative_sampler.num_negs_per_pos 5.0 +826 71 training.batch_size 2.0 +826 72 model.embedding_dim 2.0 +826 72 model.scoring_fct_norm 2.0 +826 72 optimizer.lr 0.0010172020550756739 +826 72 negative_sampler.num_negs_per_pos 29.0 +826 72 training.batch_size 2.0 +826 73 model.embedding_dim 0.0 +826 73 model.scoring_fct_norm 1.0 +826 73 optimizer.lr 0.0011373055088540634 +826 73 negative_sampler.num_negs_per_pos 68.0 +826 73 training.batch_size 1.0 +826 74 model.embedding_dim 1.0 +826 74 model.scoring_fct_norm 1.0 +826 74 optimizer.lr 0.0038651062124922296 +826 74 negative_sampler.num_negs_per_pos 36.0 +826 74 training.batch_size 2.0 +826 75 model.embedding_dim 2.0 +826 75 model.scoring_fct_norm 2.0 +826 75 optimizer.lr 0.0011029924034111173 +826 75 negative_sampler.num_negs_per_pos 70.0 +826 75 training.batch_size 1.0 +826 76 model.embedding_dim 0.0 +826 76 model.scoring_fct_norm 1.0 +826 76 optimizer.lr 0.0011929092695841163 +826 76 negative_sampler.num_negs_per_pos 40.0 +826 76 training.batch_size 2.0 +826 77 model.embedding_dim 0.0 +826 77 model.scoring_fct_norm 1.0 +826 77 optimizer.lr 0.0641774765801337 +826 77 negative_sampler.num_negs_per_pos 95.0 +826 77 training.batch_size 2.0 +826 78 model.embedding_dim 2.0 +826 78 model.scoring_fct_norm 1.0 +826 78 optimizer.lr 0.0026898833909253894 +826 78 negative_sampler.num_negs_per_pos 31.0 +826 78 training.batch_size 2.0 +826 79 model.embedding_dim 1.0 +826 79 model.scoring_fct_norm 1.0 +826 79 optimizer.lr 0.00289426757762576 +826 79 negative_sampler.num_negs_per_pos 76.0 +826 79 training.batch_size 2.0 +826 80 model.embedding_dim 2.0 +826 80 model.scoring_fct_norm 1.0 +826 80 optimizer.lr 0.0014912526258842203 +826 80 negative_sampler.num_negs_per_pos 38.0 +826 80 training.batch_size 2.0 +826 81 model.embedding_dim 0.0 +826 81 model.scoring_fct_norm 2.0 +826 81 optimizer.lr 0.014391370108867842 +826 81 negative_sampler.num_negs_per_pos 43.0 +826 81 training.batch_size 1.0 +826 82 model.embedding_dim 2.0 +826 82 model.scoring_fct_norm 1.0 +826 82 optimizer.lr 0.01010036727959525 +826 82 negative_sampler.num_negs_per_pos 58.0 +826 82 training.batch_size 2.0 +826 83 model.embedding_dim 0.0 +826 83 model.scoring_fct_norm 2.0 +826 83 optimizer.lr 0.0035326661439185475 +826 83 negative_sampler.num_negs_per_pos 11.0 +826 83 training.batch_size 2.0 +826 84 model.embedding_dim 2.0 +826 84 model.scoring_fct_norm 1.0 +826 84 optimizer.lr 0.013673154681410218 +826 84 negative_sampler.num_negs_per_pos 44.0 +826 84 training.batch_size 0.0 +826 85 model.embedding_dim 1.0 +826 85 model.scoring_fct_norm 2.0 +826 85 optimizer.lr 0.0049691095434396294 +826 85 negative_sampler.num_negs_per_pos 81.0 +826 85 training.batch_size 2.0 +826 86 model.embedding_dim 0.0 +826 86 model.scoring_fct_norm 1.0 +826 86 optimizer.lr 0.01587577173328781 +826 86 negative_sampler.num_negs_per_pos 74.0 +826 86 training.batch_size 1.0 +826 87 model.embedding_dim 2.0 +826 87 model.scoring_fct_norm 2.0 +826 87 optimizer.lr 0.005054633715444111 +826 87 negative_sampler.num_negs_per_pos 36.0 +826 87 training.batch_size 0.0 +826 88 model.embedding_dim 2.0 +826 88 model.scoring_fct_norm 1.0 +826 88 optimizer.lr 0.07955170380836431 +826 88 negative_sampler.num_negs_per_pos 34.0 +826 88 training.batch_size 1.0 +826 89 model.embedding_dim 2.0 +826 89 model.scoring_fct_norm 1.0 +826 89 optimizer.lr 0.012924632705172757 +826 89 negative_sampler.num_negs_per_pos 99.0 +826 89 training.batch_size 0.0 +826 90 model.embedding_dim 1.0 +826 90 model.scoring_fct_norm 1.0 +826 90 optimizer.lr 0.0021933569832589526 +826 90 negative_sampler.num_negs_per_pos 75.0 +826 90 training.batch_size 0.0 +826 91 model.embedding_dim 1.0 +826 91 model.scoring_fct_norm 2.0 +826 91 optimizer.lr 0.038128166319896066 +826 91 negative_sampler.num_negs_per_pos 30.0 +826 91 training.batch_size 0.0 +826 92 model.embedding_dim 0.0 +826 92 model.scoring_fct_norm 2.0 +826 92 optimizer.lr 0.005218253609456182 +826 92 negative_sampler.num_negs_per_pos 65.0 +826 92 training.batch_size 0.0 +826 93 model.embedding_dim 2.0 +826 93 model.scoring_fct_norm 1.0 +826 93 optimizer.lr 0.004997212535709821 +826 93 negative_sampler.num_negs_per_pos 92.0 +826 93 training.batch_size 1.0 +826 94 model.embedding_dim 1.0 +826 94 model.scoring_fct_norm 1.0 +826 94 optimizer.lr 0.0030665971493871267 +826 94 negative_sampler.num_negs_per_pos 18.0 +826 94 training.batch_size 0.0 +826 95 model.embedding_dim 0.0 +826 95 model.scoring_fct_norm 1.0 +826 95 optimizer.lr 0.003466564140728919 +826 95 negative_sampler.num_negs_per_pos 23.0 +826 95 training.batch_size 2.0 +826 96 model.embedding_dim 1.0 +826 96 model.scoring_fct_norm 1.0 +826 96 optimizer.lr 0.0019102119326947812 +826 96 negative_sampler.num_negs_per_pos 87.0 +826 96 training.batch_size 1.0 +826 97 model.embedding_dim 1.0 +826 97 model.scoring_fct_norm 2.0 +826 97 optimizer.lr 0.011830015804353488 +826 97 negative_sampler.num_negs_per_pos 23.0 +826 97 training.batch_size 0.0 +826 98 model.embedding_dim 1.0 +826 98 model.scoring_fct_norm 1.0 +826 98 optimizer.lr 0.007218088818285375 +826 98 negative_sampler.num_negs_per_pos 19.0 +826 98 training.batch_size 0.0 +826 99 model.embedding_dim 1.0 +826 99 model.scoring_fct_norm 2.0 +826 99 optimizer.lr 0.06725117374343552 +826 99 negative_sampler.num_negs_per_pos 32.0 +826 99 training.batch_size 1.0 +826 100 model.embedding_dim 1.0 +826 100 model.scoring_fct_norm 2.0 +826 100 optimizer.lr 0.0029497249888830413 +826 100 negative_sampler.num_negs_per_pos 86.0 +826 100 training.batch_size 1.0 +826 1 dataset """kinships""" +826 1 model """transe""" +826 1 loss """softplus""" +826 1 regularizer """no""" +826 1 optimizer """adam""" +826 1 training_loop """owa""" +826 1 negative_sampler """basic""" +826 1 evaluator """rankbased""" +826 2 dataset """kinships""" +826 2 model """transe""" +826 2 loss """softplus""" +826 2 regularizer """no""" +826 2 optimizer """adam""" +826 2 training_loop """owa""" +826 2 negative_sampler """basic""" +826 2 evaluator """rankbased""" +826 3 dataset """kinships""" +826 3 model """transe""" +826 3 loss """softplus""" +826 3 regularizer """no""" +826 3 optimizer """adam""" +826 3 training_loop """owa""" +826 3 negative_sampler """basic""" +826 3 evaluator """rankbased""" +826 4 dataset """kinships""" +826 4 model """transe""" +826 4 loss """softplus""" +826 4 regularizer """no""" +826 4 optimizer """adam""" +826 4 training_loop """owa""" +826 4 negative_sampler """basic""" +826 4 evaluator """rankbased""" +826 5 dataset """kinships""" +826 5 model """transe""" +826 5 loss """softplus""" +826 5 regularizer """no""" +826 5 optimizer """adam""" +826 5 training_loop """owa""" +826 5 negative_sampler """basic""" +826 5 evaluator """rankbased""" +826 6 dataset """kinships""" +826 6 model """transe""" +826 6 loss """softplus""" +826 6 regularizer """no""" +826 6 optimizer """adam""" +826 6 training_loop """owa""" +826 6 negative_sampler """basic""" +826 6 evaluator """rankbased""" +826 7 dataset """kinships""" +826 7 model """transe""" +826 7 loss """softplus""" +826 7 regularizer """no""" +826 7 optimizer """adam""" +826 7 training_loop """owa""" +826 7 negative_sampler """basic""" +826 7 evaluator """rankbased""" +826 8 dataset """kinships""" +826 8 model """transe""" +826 8 loss """softplus""" +826 8 regularizer """no""" +826 8 optimizer """adam""" +826 8 training_loop """owa""" +826 8 negative_sampler """basic""" +826 8 evaluator """rankbased""" +826 9 dataset """kinships""" +826 9 model """transe""" +826 9 loss """softplus""" +826 9 regularizer """no""" +826 9 optimizer """adam""" +826 9 training_loop """owa""" +826 9 negative_sampler """basic""" +826 9 evaluator """rankbased""" +826 10 dataset """kinships""" +826 10 model """transe""" +826 10 loss """softplus""" +826 10 regularizer """no""" +826 10 optimizer """adam""" +826 10 training_loop """owa""" +826 10 negative_sampler """basic""" +826 10 evaluator """rankbased""" +826 11 dataset """kinships""" +826 11 model """transe""" +826 11 loss """softplus""" +826 11 regularizer """no""" +826 11 optimizer """adam""" +826 11 training_loop """owa""" +826 11 negative_sampler """basic""" +826 11 evaluator """rankbased""" +826 12 dataset """kinships""" +826 12 model """transe""" +826 12 loss """softplus""" +826 12 regularizer """no""" +826 12 optimizer """adam""" +826 12 training_loop """owa""" +826 12 negative_sampler """basic""" +826 12 evaluator """rankbased""" +826 13 dataset """kinships""" +826 13 model """transe""" +826 13 loss """softplus""" +826 13 regularizer """no""" +826 13 optimizer """adam""" +826 13 training_loop """owa""" +826 13 negative_sampler """basic""" +826 13 evaluator """rankbased""" +826 14 dataset """kinships""" +826 14 model """transe""" +826 14 loss """softplus""" +826 14 regularizer """no""" +826 14 optimizer """adam""" +826 14 training_loop """owa""" +826 14 negative_sampler """basic""" +826 14 evaluator """rankbased""" +826 15 dataset """kinships""" +826 15 model """transe""" +826 15 loss """softplus""" +826 15 regularizer """no""" +826 15 optimizer """adam""" +826 15 training_loop """owa""" +826 15 negative_sampler """basic""" +826 15 evaluator """rankbased""" +826 16 dataset """kinships""" +826 16 model """transe""" +826 16 loss """softplus""" +826 16 regularizer """no""" +826 16 optimizer """adam""" +826 16 training_loop """owa""" +826 16 negative_sampler """basic""" +826 16 evaluator """rankbased""" +826 17 dataset """kinships""" +826 17 model """transe""" +826 17 loss """softplus""" +826 17 regularizer """no""" +826 17 optimizer """adam""" +826 17 training_loop """owa""" +826 17 negative_sampler """basic""" +826 17 evaluator """rankbased""" +826 18 dataset """kinships""" +826 18 model """transe""" +826 18 loss """softplus""" +826 18 regularizer """no""" +826 18 optimizer """adam""" +826 18 training_loop """owa""" +826 18 negative_sampler """basic""" +826 18 evaluator """rankbased""" +826 19 dataset """kinships""" +826 19 model """transe""" +826 19 loss """softplus""" +826 19 regularizer """no""" +826 19 optimizer """adam""" +826 19 training_loop """owa""" +826 19 negative_sampler """basic""" +826 19 evaluator """rankbased""" +826 20 dataset """kinships""" +826 20 model """transe""" +826 20 loss """softplus""" +826 20 regularizer """no""" +826 20 optimizer """adam""" +826 20 training_loop """owa""" +826 20 negative_sampler """basic""" +826 20 evaluator """rankbased""" +826 21 dataset """kinships""" +826 21 model """transe""" +826 21 loss """softplus""" +826 21 regularizer """no""" +826 21 optimizer """adam""" +826 21 training_loop """owa""" +826 21 negative_sampler """basic""" +826 21 evaluator """rankbased""" +826 22 dataset """kinships""" +826 22 model """transe""" +826 22 loss """softplus""" +826 22 regularizer """no""" +826 22 optimizer """adam""" +826 22 training_loop """owa""" +826 22 negative_sampler """basic""" +826 22 evaluator """rankbased""" +826 23 dataset """kinships""" +826 23 model """transe""" +826 23 loss """softplus""" +826 23 regularizer """no""" +826 23 optimizer """adam""" +826 23 training_loop """owa""" +826 23 negative_sampler """basic""" +826 23 evaluator """rankbased""" +826 24 dataset """kinships""" +826 24 model """transe""" +826 24 loss """softplus""" +826 24 regularizer """no""" +826 24 optimizer """adam""" +826 24 training_loop """owa""" +826 24 negative_sampler """basic""" +826 24 evaluator """rankbased""" +826 25 dataset """kinships""" +826 25 model """transe""" +826 25 loss """softplus""" +826 25 regularizer """no""" +826 25 optimizer """adam""" +826 25 training_loop """owa""" +826 25 negative_sampler """basic""" +826 25 evaluator """rankbased""" +826 26 dataset """kinships""" +826 26 model """transe""" +826 26 loss """softplus""" +826 26 regularizer """no""" +826 26 optimizer """adam""" +826 26 training_loop """owa""" +826 26 negative_sampler """basic""" +826 26 evaluator """rankbased""" +826 27 dataset """kinships""" +826 27 model """transe""" +826 27 loss """softplus""" +826 27 regularizer """no""" +826 27 optimizer """adam""" +826 27 training_loop """owa""" +826 27 negative_sampler """basic""" +826 27 evaluator """rankbased""" +826 28 dataset """kinships""" +826 28 model """transe""" +826 28 loss """softplus""" +826 28 regularizer """no""" +826 28 optimizer """adam""" +826 28 training_loop """owa""" +826 28 negative_sampler """basic""" +826 28 evaluator """rankbased""" +826 29 dataset """kinships""" +826 29 model """transe""" +826 29 loss """softplus""" +826 29 regularizer """no""" +826 29 optimizer """adam""" +826 29 training_loop """owa""" +826 29 negative_sampler """basic""" +826 29 evaluator """rankbased""" +826 30 dataset """kinships""" +826 30 model """transe""" +826 30 loss """softplus""" +826 30 regularizer """no""" +826 30 optimizer """adam""" +826 30 training_loop """owa""" +826 30 negative_sampler """basic""" +826 30 evaluator """rankbased""" +826 31 dataset """kinships""" +826 31 model """transe""" +826 31 loss """softplus""" +826 31 regularizer """no""" +826 31 optimizer """adam""" +826 31 training_loop """owa""" +826 31 negative_sampler """basic""" +826 31 evaluator """rankbased""" +826 32 dataset """kinships""" +826 32 model """transe""" +826 32 loss """softplus""" +826 32 regularizer """no""" +826 32 optimizer """adam""" +826 32 training_loop """owa""" +826 32 negative_sampler """basic""" +826 32 evaluator """rankbased""" +826 33 dataset """kinships""" +826 33 model """transe""" +826 33 loss """softplus""" +826 33 regularizer """no""" +826 33 optimizer """adam""" +826 33 training_loop """owa""" +826 33 negative_sampler """basic""" +826 33 evaluator """rankbased""" +826 34 dataset """kinships""" +826 34 model """transe""" +826 34 loss """softplus""" +826 34 regularizer """no""" +826 34 optimizer """adam""" +826 34 training_loop """owa""" +826 34 negative_sampler """basic""" +826 34 evaluator """rankbased""" +826 35 dataset """kinships""" +826 35 model """transe""" +826 35 loss """softplus""" +826 35 regularizer """no""" +826 35 optimizer """adam""" +826 35 training_loop """owa""" +826 35 negative_sampler """basic""" +826 35 evaluator """rankbased""" +826 36 dataset """kinships""" +826 36 model """transe""" +826 36 loss """softplus""" +826 36 regularizer """no""" +826 36 optimizer """adam""" +826 36 training_loop """owa""" +826 36 negative_sampler """basic""" +826 36 evaluator """rankbased""" +826 37 dataset """kinships""" +826 37 model """transe""" +826 37 loss """softplus""" +826 37 regularizer """no""" +826 37 optimizer """adam""" +826 37 training_loop """owa""" +826 37 negative_sampler """basic""" +826 37 evaluator """rankbased""" +826 38 dataset """kinships""" +826 38 model """transe""" +826 38 loss """softplus""" +826 38 regularizer """no""" +826 38 optimizer """adam""" +826 38 training_loop """owa""" +826 38 negative_sampler """basic""" +826 38 evaluator """rankbased""" +826 39 dataset """kinships""" +826 39 model """transe""" +826 39 loss """softplus""" +826 39 regularizer """no""" +826 39 optimizer """adam""" +826 39 training_loop """owa""" +826 39 negative_sampler """basic""" +826 39 evaluator """rankbased""" +826 40 dataset """kinships""" +826 40 model """transe""" +826 40 loss """softplus""" +826 40 regularizer """no""" +826 40 optimizer """adam""" +826 40 training_loop """owa""" +826 40 negative_sampler """basic""" +826 40 evaluator """rankbased""" +826 41 dataset """kinships""" +826 41 model """transe""" +826 41 loss """softplus""" +826 41 regularizer """no""" +826 41 optimizer """adam""" +826 41 training_loop """owa""" +826 41 negative_sampler """basic""" +826 41 evaluator """rankbased""" +826 42 dataset """kinships""" +826 42 model """transe""" +826 42 loss """softplus""" +826 42 regularizer """no""" +826 42 optimizer """adam""" +826 42 training_loop """owa""" +826 42 negative_sampler """basic""" +826 42 evaluator """rankbased""" +826 43 dataset """kinships""" +826 43 model """transe""" +826 43 loss """softplus""" +826 43 regularizer """no""" +826 43 optimizer """adam""" +826 43 training_loop """owa""" +826 43 negative_sampler """basic""" +826 43 evaluator """rankbased""" +826 44 dataset """kinships""" +826 44 model """transe""" +826 44 loss """softplus""" +826 44 regularizer """no""" +826 44 optimizer """adam""" +826 44 training_loop """owa""" +826 44 negative_sampler """basic""" +826 44 evaluator """rankbased""" +826 45 dataset """kinships""" +826 45 model """transe""" +826 45 loss """softplus""" +826 45 regularizer """no""" +826 45 optimizer """adam""" +826 45 training_loop """owa""" +826 45 negative_sampler """basic""" +826 45 evaluator """rankbased""" +826 46 dataset """kinships""" +826 46 model """transe""" +826 46 loss """softplus""" +826 46 regularizer """no""" +826 46 optimizer """adam""" +826 46 training_loop """owa""" +826 46 negative_sampler """basic""" +826 46 evaluator """rankbased""" +826 47 dataset """kinships""" +826 47 model """transe""" +826 47 loss """softplus""" +826 47 regularizer """no""" +826 47 optimizer """adam""" +826 47 training_loop """owa""" +826 47 negative_sampler """basic""" +826 47 evaluator """rankbased""" +826 48 dataset """kinships""" +826 48 model """transe""" +826 48 loss """softplus""" +826 48 regularizer """no""" +826 48 optimizer """adam""" +826 48 training_loop """owa""" +826 48 negative_sampler """basic""" +826 48 evaluator """rankbased""" +826 49 dataset """kinships""" +826 49 model """transe""" +826 49 loss """softplus""" +826 49 regularizer """no""" +826 49 optimizer """adam""" +826 49 training_loop """owa""" +826 49 negative_sampler """basic""" +826 49 evaluator """rankbased""" +826 50 dataset """kinships""" +826 50 model """transe""" +826 50 loss """softplus""" +826 50 regularizer """no""" +826 50 optimizer """adam""" +826 50 training_loop """owa""" +826 50 negative_sampler """basic""" +826 50 evaluator """rankbased""" +826 51 dataset """kinships""" +826 51 model """transe""" +826 51 loss """softplus""" +826 51 regularizer """no""" +826 51 optimizer """adam""" +826 51 training_loop """owa""" +826 51 negative_sampler """basic""" +826 51 evaluator """rankbased""" +826 52 dataset """kinships""" +826 52 model """transe""" +826 52 loss """softplus""" +826 52 regularizer """no""" +826 52 optimizer """adam""" +826 52 training_loop """owa""" +826 52 negative_sampler """basic""" +826 52 evaluator """rankbased""" +826 53 dataset """kinships""" +826 53 model """transe""" +826 53 loss """softplus""" +826 53 regularizer """no""" +826 53 optimizer """adam""" +826 53 training_loop """owa""" +826 53 negative_sampler """basic""" +826 53 evaluator """rankbased""" +826 54 dataset """kinships""" +826 54 model """transe""" +826 54 loss """softplus""" +826 54 regularizer """no""" +826 54 optimizer """adam""" +826 54 training_loop """owa""" +826 54 negative_sampler """basic""" +826 54 evaluator """rankbased""" +826 55 dataset """kinships""" +826 55 model """transe""" +826 55 loss """softplus""" +826 55 regularizer """no""" +826 55 optimizer """adam""" +826 55 training_loop """owa""" +826 55 negative_sampler """basic""" +826 55 evaluator """rankbased""" +826 56 dataset """kinships""" +826 56 model """transe""" +826 56 loss """softplus""" +826 56 regularizer """no""" +826 56 optimizer """adam""" +826 56 training_loop """owa""" +826 56 negative_sampler """basic""" +826 56 evaluator """rankbased""" +826 57 dataset """kinships""" +826 57 model """transe""" +826 57 loss """softplus""" +826 57 regularizer """no""" +826 57 optimizer """adam""" +826 57 training_loop """owa""" +826 57 negative_sampler """basic""" +826 57 evaluator """rankbased""" +826 58 dataset """kinships""" +826 58 model """transe""" +826 58 loss """softplus""" +826 58 regularizer """no""" +826 58 optimizer """adam""" +826 58 training_loop """owa""" +826 58 negative_sampler """basic""" +826 58 evaluator """rankbased""" +826 59 dataset """kinships""" +826 59 model """transe""" +826 59 loss """softplus""" +826 59 regularizer """no""" +826 59 optimizer """adam""" +826 59 training_loop """owa""" +826 59 negative_sampler """basic""" +826 59 evaluator """rankbased""" +826 60 dataset """kinships""" +826 60 model """transe""" +826 60 loss """softplus""" +826 60 regularizer """no""" +826 60 optimizer """adam""" +826 60 training_loop """owa""" +826 60 negative_sampler """basic""" +826 60 evaluator """rankbased""" +826 61 dataset """kinships""" +826 61 model """transe""" +826 61 loss """softplus""" +826 61 regularizer """no""" +826 61 optimizer """adam""" +826 61 training_loop """owa""" +826 61 negative_sampler """basic""" +826 61 evaluator """rankbased""" +826 62 dataset """kinships""" +826 62 model """transe""" +826 62 loss """softplus""" +826 62 regularizer """no""" +826 62 optimizer """adam""" +826 62 training_loop """owa""" +826 62 negative_sampler """basic""" +826 62 evaluator """rankbased""" +826 63 dataset """kinships""" +826 63 model """transe""" +826 63 loss """softplus""" +826 63 regularizer """no""" +826 63 optimizer """adam""" +826 63 training_loop """owa""" +826 63 negative_sampler """basic""" +826 63 evaluator """rankbased""" +826 64 dataset """kinships""" +826 64 model """transe""" +826 64 loss """softplus""" +826 64 regularizer """no""" +826 64 optimizer """adam""" +826 64 training_loop """owa""" +826 64 negative_sampler """basic""" +826 64 evaluator """rankbased""" +826 65 dataset """kinships""" +826 65 model """transe""" +826 65 loss """softplus""" +826 65 regularizer """no""" +826 65 optimizer """adam""" +826 65 training_loop """owa""" +826 65 negative_sampler """basic""" +826 65 evaluator """rankbased""" +826 66 dataset """kinships""" +826 66 model """transe""" +826 66 loss """softplus""" +826 66 regularizer """no""" +826 66 optimizer """adam""" +826 66 training_loop """owa""" +826 66 negative_sampler """basic""" +826 66 evaluator """rankbased""" +826 67 dataset """kinships""" +826 67 model """transe""" +826 67 loss """softplus""" +826 67 regularizer """no""" +826 67 optimizer """adam""" +826 67 training_loop """owa""" +826 67 negative_sampler """basic""" +826 67 evaluator """rankbased""" +826 68 dataset """kinships""" +826 68 model """transe""" +826 68 loss """softplus""" +826 68 regularizer """no""" +826 68 optimizer """adam""" +826 68 training_loop """owa""" +826 68 negative_sampler """basic""" +826 68 evaluator """rankbased""" +826 69 dataset """kinships""" +826 69 model """transe""" +826 69 loss """softplus""" +826 69 regularizer """no""" +826 69 optimizer """adam""" +826 69 training_loop """owa""" +826 69 negative_sampler """basic""" +826 69 evaluator """rankbased""" +826 70 dataset """kinships""" +826 70 model """transe""" +826 70 loss """softplus""" +826 70 regularizer """no""" +826 70 optimizer """adam""" +826 70 training_loop """owa""" +826 70 negative_sampler """basic""" +826 70 evaluator """rankbased""" +826 71 dataset """kinships""" +826 71 model """transe""" +826 71 loss """softplus""" +826 71 regularizer """no""" +826 71 optimizer """adam""" +826 71 training_loop """owa""" +826 71 negative_sampler """basic""" +826 71 evaluator """rankbased""" +826 72 dataset """kinships""" +826 72 model """transe""" +826 72 loss """softplus""" +826 72 regularizer """no""" +826 72 optimizer """adam""" +826 72 training_loop """owa""" +826 72 negative_sampler """basic""" +826 72 evaluator """rankbased""" +826 73 dataset """kinships""" +826 73 model """transe""" +826 73 loss """softplus""" +826 73 regularizer """no""" +826 73 optimizer """adam""" +826 73 training_loop """owa""" +826 73 negative_sampler """basic""" +826 73 evaluator """rankbased""" +826 74 dataset """kinships""" +826 74 model """transe""" +826 74 loss """softplus""" +826 74 regularizer """no""" +826 74 optimizer """adam""" +826 74 training_loop """owa""" +826 74 negative_sampler """basic""" +826 74 evaluator """rankbased""" +826 75 dataset """kinships""" +826 75 model """transe""" +826 75 loss """softplus""" +826 75 regularizer """no""" +826 75 optimizer """adam""" +826 75 training_loop """owa""" +826 75 negative_sampler """basic""" +826 75 evaluator """rankbased""" +826 76 dataset """kinships""" +826 76 model """transe""" +826 76 loss """softplus""" +826 76 regularizer """no""" +826 76 optimizer """adam""" +826 76 training_loop """owa""" +826 76 negative_sampler """basic""" +826 76 evaluator """rankbased""" +826 77 dataset """kinships""" +826 77 model """transe""" +826 77 loss """softplus""" +826 77 regularizer """no""" +826 77 optimizer """adam""" +826 77 training_loop """owa""" +826 77 negative_sampler """basic""" +826 77 evaluator """rankbased""" +826 78 dataset """kinships""" +826 78 model """transe""" +826 78 loss """softplus""" +826 78 regularizer """no""" +826 78 optimizer """adam""" +826 78 training_loop """owa""" +826 78 negative_sampler """basic""" +826 78 evaluator """rankbased""" +826 79 dataset """kinships""" +826 79 model """transe""" +826 79 loss """softplus""" +826 79 regularizer """no""" +826 79 optimizer """adam""" +826 79 training_loop """owa""" +826 79 negative_sampler """basic""" +826 79 evaluator """rankbased""" +826 80 dataset """kinships""" +826 80 model """transe""" +826 80 loss """softplus""" +826 80 regularizer """no""" +826 80 optimizer """adam""" +826 80 training_loop """owa""" +826 80 negative_sampler """basic""" +826 80 evaluator """rankbased""" +826 81 dataset """kinships""" +826 81 model """transe""" +826 81 loss """softplus""" +826 81 regularizer """no""" +826 81 optimizer """adam""" +826 81 training_loop """owa""" +826 81 negative_sampler """basic""" +826 81 evaluator """rankbased""" +826 82 dataset """kinships""" +826 82 model """transe""" +826 82 loss """softplus""" +826 82 regularizer """no""" +826 82 optimizer """adam""" +826 82 training_loop """owa""" +826 82 negative_sampler """basic""" +826 82 evaluator """rankbased""" +826 83 dataset """kinships""" +826 83 model """transe""" +826 83 loss """softplus""" +826 83 regularizer """no""" +826 83 optimizer """adam""" +826 83 training_loop """owa""" +826 83 negative_sampler """basic""" +826 83 evaluator """rankbased""" +826 84 dataset """kinships""" +826 84 model """transe""" +826 84 loss """softplus""" +826 84 regularizer """no""" +826 84 optimizer """adam""" +826 84 training_loop """owa""" +826 84 negative_sampler """basic""" +826 84 evaluator """rankbased""" +826 85 dataset """kinships""" +826 85 model """transe""" +826 85 loss """softplus""" +826 85 regularizer """no""" +826 85 optimizer """adam""" +826 85 training_loop """owa""" +826 85 negative_sampler """basic""" +826 85 evaluator """rankbased""" +826 86 dataset """kinships""" +826 86 model """transe""" +826 86 loss """softplus""" +826 86 regularizer """no""" +826 86 optimizer """adam""" +826 86 training_loop """owa""" +826 86 negative_sampler """basic""" +826 86 evaluator """rankbased""" +826 87 dataset """kinships""" +826 87 model """transe""" +826 87 loss """softplus""" +826 87 regularizer """no""" +826 87 optimizer """adam""" +826 87 training_loop """owa""" +826 87 negative_sampler """basic""" +826 87 evaluator """rankbased""" +826 88 dataset """kinships""" +826 88 model """transe""" +826 88 loss """softplus""" +826 88 regularizer """no""" +826 88 optimizer """adam""" +826 88 training_loop """owa""" +826 88 negative_sampler """basic""" +826 88 evaluator """rankbased""" +826 89 dataset """kinships""" +826 89 model """transe""" +826 89 loss """softplus""" +826 89 regularizer """no""" +826 89 optimizer """adam""" +826 89 training_loop """owa""" +826 89 negative_sampler """basic""" +826 89 evaluator """rankbased""" +826 90 dataset """kinships""" +826 90 model """transe""" +826 90 loss """softplus""" +826 90 regularizer """no""" +826 90 optimizer """adam""" +826 90 training_loop """owa""" +826 90 negative_sampler """basic""" +826 90 evaluator """rankbased""" +826 91 dataset """kinships""" +826 91 model """transe""" +826 91 loss """softplus""" +826 91 regularizer """no""" +826 91 optimizer """adam""" +826 91 training_loop """owa""" +826 91 negative_sampler """basic""" +826 91 evaluator """rankbased""" +826 92 dataset """kinships""" +826 92 model """transe""" +826 92 loss """softplus""" +826 92 regularizer """no""" +826 92 optimizer """adam""" +826 92 training_loop """owa""" +826 92 negative_sampler """basic""" +826 92 evaluator """rankbased""" +826 93 dataset """kinships""" +826 93 model """transe""" +826 93 loss """softplus""" +826 93 regularizer """no""" +826 93 optimizer """adam""" +826 93 training_loop """owa""" +826 93 negative_sampler """basic""" +826 93 evaluator """rankbased""" +826 94 dataset """kinships""" +826 94 model """transe""" +826 94 loss """softplus""" +826 94 regularizer """no""" +826 94 optimizer """adam""" +826 94 training_loop """owa""" +826 94 negative_sampler """basic""" +826 94 evaluator """rankbased""" +826 95 dataset """kinships""" +826 95 model """transe""" +826 95 loss """softplus""" +826 95 regularizer """no""" +826 95 optimizer """adam""" +826 95 training_loop """owa""" +826 95 negative_sampler """basic""" +826 95 evaluator """rankbased""" +826 96 dataset """kinships""" +826 96 model """transe""" +826 96 loss """softplus""" +826 96 regularizer """no""" +826 96 optimizer """adam""" +826 96 training_loop """owa""" +826 96 negative_sampler """basic""" +826 96 evaluator """rankbased""" +826 97 dataset """kinships""" +826 97 model """transe""" +826 97 loss """softplus""" +826 97 regularizer """no""" +826 97 optimizer """adam""" +826 97 training_loop """owa""" +826 97 negative_sampler """basic""" +826 97 evaluator """rankbased""" +826 98 dataset """kinships""" +826 98 model """transe""" +826 98 loss """softplus""" +826 98 regularizer """no""" +826 98 optimizer """adam""" +826 98 training_loop """owa""" +826 98 negative_sampler """basic""" +826 98 evaluator """rankbased""" +826 99 dataset """kinships""" +826 99 model """transe""" +826 99 loss """softplus""" +826 99 regularizer """no""" +826 99 optimizer """adam""" +826 99 training_loop """owa""" +826 99 negative_sampler """basic""" +826 99 evaluator """rankbased""" +826 100 dataset """kinships""" +826 100 model """transe""" +826 100 loss """softplus""" +826 100 regularizer """no""" +826 100 optimizer """adam""" +826 100 training_loop """owa""" +826 100 negative_sampler """basic""" +826 100 evaluator """rankbased""" +827 1 model.embedding_dim 0.0 +827 1 model.scoring_fct_norm 2.0 +827 1 optimizer.lr 0.011226322493558899 +827 1 negative_sampler.num_negs_per_pos 38.0 +827 1 training.batch_size 1.0 +827 2 model.embedding_dim 1.0 +827 2 model.scoring_fct_norm 2.0 +827 2 optimizer.lr 0.006429442063821957 +827 2 negative_sampler.num_negs_per_pos 6.0 +827 2 training.batch_size 1.0 +827 3 model.embedding_dim 0.0 +827 3 model.scoring_fct_norm 1.0 +827 3 optimizer.lr 0.01070142877624793 +827 3 negative_sampler.num_negs_per_pos 58.0 +827 3 training.batch_size 0.0 +827 4 model.embedding_dim 2.0 +827 4 model.scoring_fct_norm 2.0 +827 4 optimizer.lr 0.07624545078411675 +827 4 negative_sampler.num_negs_per_pos 26.0 +827 4 training.batch_size 1.0 +827 5 model.embedding_dim 1.0 +827 5 model.scoring_fct_norm 1.0 +827 5 optimizer.lr 0.002634474877326161 +827 5 negative_sampler.num_negs_per_pos 34.0 +827 5 training.batch_size 0.0 +827 6 model.embedding_dim 0.0 +827 6 model.scoring_fct_norm 2.0 +827 6 optimizer.lr 0.005512925493411606 +827 6 negative_sampler.num_negs_per_pos 12.0 +827 6 training.batch_size 0.0 +827 7 model.embedding_dim 2.0 +827 7 model.scoring_fct_norm 1.0 +827 7 optimizer.lr 0.035425476073299426 +827 7 negative_sampler.num_negs_per_pos 27.0 +827 7 training.batch_size 1.0 +827 8 model.embedding_dim 1.0 +827 8 model.scoring_fct_norm 2.0 +827 8 optimizer.lr 0.0022441134298293607 +827 8 negative_sampler.num_negs_per_pos 92.0 +827 8 training.batch_size 1.0 +827 9 model.embedding_dim 1.0 +827 9 model.scoring_fct_norm 2.0 +827 9 optimizer.lr 0.005552535584266334 +827 9 negative_sampler.num_negs_per_pos 34.0 +827 9 training.batch_size 1.0 +827 10 model.embedding_dim 1.0 +827 10 model.scoring_fct_norm 2.0 +827 10 optimizer.lr 0.06086516951683281 +827 10 negative_sampler.num_negs_per_pos 43.0 +827 10 training.batch_size 0.0 +827 11 model.embedding_dim 2.0 +827 11 model.scoring_fct_norm 2.0 +827 11 optimizer.lr 0.06159857990457771 +827 11 negative_sampler.num_negs_per_pos 98.0 +827 11 training.batch_size 2.0 +827 12 model.embedding_dim 2.0 +827 12 model.scoring_fct_norm 2.0 +827 12 optimizer.lr 0.02133711436295758 +827 12 negative_sampler.num_negs_per_pos 2.0 +827 12 training.batch_size 2.0 +827 13 model.embedding_dim 1.0 +827 13 model.scoring_fct_norm 2.0 +827 13 optimizer.lr 0.0041687549513745046 +827 13 negative_sampler.num_negs_per_pos 54.0 +827 13 training.batch_size 2.0 +827 14 model.embedding_dim 0.0 +827 14 model.scoring_fct_norm 2.0 +827 14 optimizer.lr 0.001951112637232799 +827 14 negative_sampler.num_negs_per_pos 9.0 +827 14 training.batch_size 1.0 +827 15 model.embedding_dim 2.0 +827 15 model.scoring_fct_norm 2.0 +827 15 optimizer.lr 0.013826025489698662 +827 15 negative_sampler.num_negs_per_pos 88.0 +827 15 training.batch_size 0.0 +827 16 model.embedding_dim 2.0 +827 16 model.scoring_fct_norm 1.0 +827 16 optimizer.lr 0.015993274510103194 +827 16 negative_sampler.num_negs_per_pos 30.0 +827 16 training.batch_size 0.0 +827 17 model.embedding_dim 2.0 +827 17 model.scoring_fct_norm 1.0 +827 17 optimizer.lr 0.0063021036559886105 +827 17 negative_sampler.num_negs_per_pos 38.0 +827 17 training.batch_size 1.0 +827 18 model.embedding_dim 0.0 +827 18 model.scoring_fct_norm 2.0 +827 18 optimizer.lr 0.0029754742306195617 +827 18 negative_sampler.num_negs_per_pos 55.0 +827 18 training.batch_size 1.0 +827 19 model.embedding_dim 2.0 +827 19 model.scoring_fct_norm 2.0 +827 19 optimizer.lr 0.00841188831198661 +827 19 negative_sampler.num_negs_per_pos 44.0 +827 19 training.batch_size 0.0 +827 20 model.embedding_dim 0.0 +827 20 model.scoring_fct_norm 2.0 +827 20 optimizer.lr 0.021570167171721027 +827 20 negative_sampler.num_negs_per_pos 40.0 +827 20 training.batch_size 0.0 +827 21 model.embedding_dim 0.0 +827 21 model.scoring_fct_norm 1.0 +827 21 optimizer.lr 0.0015611956628719 +827 21 negative_sampler.num_negs_per_pos 12.0 +827 21 training.batch_size 0.0 +827 22 model.embedding_dim 2.0 +827 22 model.scoring_fct_norm 2.0 +827 22 optimizer.lr 0.03890967241601521 +827 22 negative_sampler.num_negs_per_pos 61.0 +827 22 training.batch_size 2.0 +827 23 model.embedding_dim 1.0 +827 23 model.scoring_fct_norm 2.0 +827 23 optimizer.lr 0.00983135377059712 +827 23 negative_sampler.num_negs_per_pos 90.0 +827 23 training.batch_size 1.0 +827 24 model.embedding_dim 0.0 +827 24 model.scoring_fct_norm 1.0 +827 24 optimizer.lr 0.06596165780856116 +827 24 negative_sampler.num_negs_per_pos 42.0 +827 24 training.batch_size 1.0 +827 25 model.embedding_dim 2.0 +827 25 model.scoring_fct_norm 1.0 +827 25 optimizer.lr 0.0017219997076159317 +827 25 negative_sampler.num_negs_per_pos 30.0 +827 25 training.batch_size 0.0 +827 26 model.embedding_dim 2.0 +827 26 model.scoring_fct_norm 1.0 +827 26 optimizer.lr 0.03924637780603424 +827 26 negative_sampler.num_negs_per_pos 7.0 +827 26 training.batch_size 1.0 +827 27 model.embedding_dim 0.0 +827 27 model.scoring_fct_norm 2.0 +827 27 optimizer.lr 0.003805329487793719 +827 27 negative_sampler.num_negs_per_pos 60.0 +827 27 training.batch_size 1.0 +827 28 model.embedding_dim 0.0 +827 28 model.scoring_fct_norm 2.0 +827 28 optimizer.lr 0.044253424240134474 +827 28 negative_sampler.num_negs_per_pos 22.0 +827 28 training.batch_size 1.0 +827 29 model.embedding_dim 2.0 +827 29 model.scoring_fct_norm 2.0 +827 29 optimizer.lr 0.0017951776240374907 +827 29 negative_sampler.num_negs_per_pos 21.0 +827 29 training.batch_size 2.0 +827 30 model.embedding_dim 1.0 +827 30 model.scoring_fct_norm 1.0 +827 30 optimizer.lr 0.08699242799843379 +827 30 negative_sampler.num_negs_per_pos 95.0 +827 30 training.batch_size 1.0 +827 31 model.embedding_dim 0.0 +827 31 model.scoring_fct_norm 1.0 +827 31 optimizer.lr 0.020413844448974757 +827 31 negative_sampler.num_negs_per_pos 21.0 +827 31 training.batch_size 0.0 +827 32 model.embedding_dim 1.0 +827 32 model.scoring_fct_norm 2.0 +827 32 optimizer.lr 0.03826474790103826 +827 32 negative_sampler.num_negs_per_pos 64.0 +827 32 training.batch_size 0.0 +827 33 model.embedding_dim 2.0 +827 33 model.scoring_fct_norm 2.0 +827 33 optimizer.lr 0.07756357213439165 +827 33 negative_sampler.num_negs_per_pos 92.0 +827 33 training.batch_size 0.0 +827 34 model.embedding_dim 0.0 +827 34 model.scoring_fct_norm 1.0 +827 34 optimizer.lr 0.0014492656048133324 +827 34 negative_sampler.num_negs_per_pos 15.0 +827 34 training.batch_size 1.0 +827 35 model.embedding_dim 2.0 +827 35 model.scoring_fct_norm 1.0 +827 35 optimizer.lr 0.09429554324950686 +827 35 negative_sampler.num_negs_per_pos 96.0 +827 35 training.batch_size 2.0 +827 36 model.embedding_dim 0.0 +827 36 model.scoring_fct_norm 2.0 +827 36 optimizer.lr 0.017289880917697807 +827 36 negative_sampler.num_negs_per_pos 39.0 +827 36 training.batch_size 1.0 +827 37 model.embedding_dim 1.0 +827 37 model.scoring_fct_norm 2.0 +827 37 optimizer.lr 0.00115513125700299 +827 37 negative_sampler.num_negs_per_pos 68.0 +827 37 training.batch_size 0.0 +827 38 model.embedding_dim 1.0 +827 38 model.scoring_fct_norm 2.0 +827 38 optimizer.lr 0.011614247659654239 +827 38 negative_sampler.num_negs_per_pos 90.0 +827 38 training.batch_size 0.0 +827 39 model.embedding_dim 1.0 +827 39 model.scoring_fct_norm 2.0 +827 39 optimizer.lr 0.011952181833843934 +827 39 negative_sampler.num_negs_per_pos 66.0 +827 39 training.batch_size 2.0 +827 40 model.embedding_dim 2.0 +827 40 model.scoring_fct_norm 2.0 +827 40 optimizer.lr 0.0010052715690160076 +827 40 negative_sampler.num_negs_per_pos 58.0 +827 40 training.batch_size 1.0 +827 41 model.embedding_dim 1.0 +827 41 model.scoring_fct_norm 2.0 +827 41 optimizer.lr 0.00820565767522035 +827 41 negative_sampler.num_negs_per_pos 74.0 +827 41 training.batch_size 1.0 +827 42 model.embedding_dim 0.0 +827 42 model.scoring_fct_norm 1.0 +827 42 optimizer.lr 0.015610937166338747 +827 42 negative_sampler.num_negs_per_pos 83.0 +827 42 training.batch_size 1.0 +827 43 model.embedding_dim 2.0 +827 43 model.scoring_fct_norm 1.0 +827 43 optimizer.lr 0.012633571203754579 +827 43 negative_sampler.num_negs_per_pos 71.0 +827 43 training.batch_size 2.0 +827 44 model.embedding_dim 1.0 +827 44 model.scoring_fct_norm 2.0 +827 44 optimizer.lr 0.0010299802912884226 +827 44 negative_sampler.num_negs_per_pos 71.0 +827 44 training.batch_size 1.0 +827 45 model.embedding_dim 2.0 +827 45 model.scoring_fct_norm 1.0 +827 45 optimizer.lr 0.002048867316125168 +827 45 negative_sampler.num_negs_per_pos 18.0 +827 45 training.batch_size 2.0 +827 46 model.embedding_dim 0.0 +827 46 model.scoring_fct_norm 1.0 +827 46 optimizer.lr 0.0030694459859843793 +827 46 negative_sampler.num_negs_per_pos 8.0 +827 46 training.batch_size 0.0 +827 47 model.embedding_dim 2.0 +827 47 model.scoring_fct_norm 2.0 +827 47 optimizer.lr 0.012582379865565563 +827 47 negative_sampler.num_negs_per_pos 26.0 +827 47 training.batch_size 1.0 +827 48 model.embedding_dim 0.0 +827 48 model.scoring_fct_norm 1.0 +827 48 optimizer.lr 0.006232337875384365 +827 48 negative_sampler.num_negs_per_pos 5.0 +827 48 training.batch_size 0.0 +827 49 model.embedding_dim 1.0 +827 49 model.scoring_fct_norm 2.0 +827 49 optimizer.lr 0.0019942011580652576 +827 49 negative_sampler.num_negs_per_pos 45.0 +827 49 training.batch_size 0.0 +827 50 model.embedding_dim 1.0 +827 50 model.scoring_fct_norm 1.0 +827 50 optimizer.lr 0.002072797961735251 +827 50 negative_sampler.num_negs_per_pos 75.0 +827 50 training.batch_size 1.0 +827 51 model.embedding_dim 0.0 +827 51 model.scoring_fct_norm 1.0 +827 51 optimizer.lr 0.001170963138350197 +827 51 negative_sampler.num_negs_per_pos 95.0 +827 51 training.batch_size 0.0 +827 52 model.embedding_dim 0.0 +827 52 model.scoring_fct_norm 2.0 +827 52 optimizer.lr 0.004891729564056235 +827 52 negative_sampler.num_negs_per_pos 46.0 +827 52 training.batch_size 1.0 +827 53 model.embedding_dim 2.0 +827 53 model.scoring_fct_norm 2.0 +827 53 optimizer.lr 0.0018364049332528821 +827 53 negative_sampler.num_negs_per_pos 84.0 +827 53 training.batch_size 1.0 +827 54 model.embedding_dim 0.0 +827 54 model.scoring_fct_norm 2.0 +827 54 optimizer.lr 0.024138701166857224 +827 54 negative_sampler.num_negs_per_pos 62.0 +827 54 training.batch_size 2.0 +827 55 model.embedding_dim 1.0 +827 55 model.scoring_fct_norm 2.0 +827 55 optimizer.lr 0.023996731454609507 +827 55 negative_sampler.num_negs_per_pos 79.0 +827 55 training.batch_size 1.0 +827 56 model.embedding_dim 2.0 +827 56 model.scoring_fct_norm 2.0 +827 56 optimizer.lr 0.005565639224163402 +827 56 negative_sampler.num_negs_per_pos 84.0 +827 56 training.batch_size 0.0 +827 57 model.embedding_dim 1.0 +827 57 model.scoring_fct_norm 2.0 +827 57 optimizer.lr 0.030698145145490244 +827 57 negative_sampler.num_negs_per_pos 76.0 +827 57 training.batch_size 2.0 +827 58 model.embedding_dim 0.0 +827 58 model.scoring_fct_norm 2.0 +827 58 optimizer.lr 0.003989025596217569 +827 58 negative_sampler.num_negs_per_pos 48.0 +827 58 training.batch_size 0.0 +827 59 model.embedding_dim 0.0 +827 59 model.scoring_fct_norm 1.0 +827 59 optimizer.lr 0.0027883339066281884 +827 59 negative_sampler.num_negs_per_pos 54.0 +827 59 training.batch_size 0.0 +827 60 model.embedding_dim 1.0 +827 60 model.scoring_fct_norm 2.0 +827 60 optimizer.lr 0.027079954925008726 +827 60 negative_sampler.num_negs_per_pos 26.0 +827 60 training.batch_size 0.0 +827 61 model.embedding_dim 1.0 +827 61 model.scoring_fct_norm 2.0 +827 61 optimizer.lr 0.0010710837005853946 +827 61 negative_sampler.num_negs_per_pos 10.0 +827 61 training.batch_size 2.0 +827 62 model.embedding_dim 0.0 +827 62 model.scoring_fct_norm 1.0 +827 62 optimizer.lr 0.0037508446785531214 +827 62 negative_sampler.num_negs_per_pos 12.0 +827 62 training.batch_size 2.0 +827 63 model.embedding_dim 1.0 +827 63 model.scoring_fct_norm 2.0 +827 63 optimizer.lr 0.008964274848910636 +827 63 negative_sampler.num_negs_per_pos 45.0 +827 63 training.batch_size 1.0 +827 64 model.embedding_dim 2.0 +827 64 model.scoring_fct_norm 2.0 +827 64 optimizer.lr 0.0011592427024664342 +827 64 negative_sampler.num_negs_per_pos 78.0 +827 64 training.batch_size 2.0 +827 65 model.embedding_dim 1.0 +827 65 model.scoring_fct_norm 2.0 +827 65 optimizer.lr 0.01927249114241892 +827 65 negative_sampler.num_negs_per_pos 79.0 +827 65 training.batch_size 2.0 +827 66 model.embedding_dim 2.0 +827 66 model.scoring_fct_norm 1.0 +827 66 optimizer.lr 0.06073102913241449 +827 66 negative_sampler.num_negs_per_pos 32.0 +827 66 training.batch_size 1.0 +827 67 model.embedding_dim 0.0 +827 67 model.scoring_fct_norm 1.0 +827 67 optimizer.lr 0.018373605320598743 +827 67 negative_sampler.num_negs_per_pos 90.0 +827 67 training.batch_size 2.0 +827 68 model.embedding_dim 1.0 +827 68 model.scoring_fct_norm 2.0 +827 68 optimizer.lr 0.0018612900543535545 +827 68 negative_sampler.num_negs_per_pos 58.0 +827 68 training.batch_size 2.0 +827 69 model.embedding_dim 1.0 +827 69 model.scoring_fct_norm 2.0 +827 69 optimizer.lr 0.0015243850281095493 +827 69 negative_sampler.num_negs_per_pos 26.0 +827 69 training.batch_size 2.0 +827 70 model.embedding_dim 1.0 +827 70 model.scoring_fct_norm 2.0 +827 70 optimizer.lr 0.023306738103810044 +827 70 negative_sampler.num_negs_per_pos 94.0 +827 70 training.batch_size 1.0 +827 71 model.embedding_dim 2.0 +827 71 model.scoring_fct_norm 2.0 +827 71 optimizer.lr 0.0048430173167475585 +827 71 negative_sampler.num_negs_per_pos 60.0 +827 71 training.batch_size 0.0 +827 72 model.embedding_dim 2.0 +827 72 model.scoring_fct_norm 1.0 +827 72 optimizer.lr 0.034689104320551147 +827 72 negative_sampler.num_negs_per_pos 64.0 +827 72 training.batch_size 1.0 +827 73 model.embedding_dim 0.0 +827 73 model.scoring_fct_norm 2.0 +827 73 optimizer.lr 0.009759127784484461 +827 73 negative_sampler.num_negs_per_pos 35.0 +827 73 training.batch_size 0.0 +827 74 model.embedding_dim 0.0 +827 74 model.scoring_fct_norm 2.0 +827 74 optimizer.lr 0.0017440850653355486 +827 74 negative_sampler.num_negs_per_pos 1.0 +827 74 training.batch_size 2.0 +827 75 model.embedding_dim 1.0 +827 75 model.scoring_fct_norm 1.0 +827 75 optimizer.lr 0.007774730060179404 +827 75 negative_sampler.num_negs_per_pos 68.0 +827 75 training.batch_size 0.0 +827 76 model.embedding_dim 0.0 +827 76 model.scoring_fct_norm 2.0 +827 76 optimizer.lr 0.026786807414200482 +827 76 negative_sampler.num_negs_per_pos 80.0 +827 76 training.batch_size 2.0 +827 77 model.embedding_dim 0.0 +827 77 model.scoring_fct_norm 1.0 +827 77 optimizer.lr 0.04519332522697136 +827 77 negative_sampler.num_negs_per_pos 42.0 +827 77 training.batch_size 0.0 +827 78 model.embedding_dim 1.0 +827 78 model.scoring_fct_norm 2.0 +827 78 optimizer.lr 0.006331631274147901 +827 78 negative_sampler.num_negs_per_pos 26.0 +827 78 training.batch_size 1.0 +827 79 model.embedding_dim 1.0 +827 79 model.scoring_fct_norm 2.0 +827 79 optimizer.lr 0.019338327322266438 +827 79 negative_sampler.num_negs_per_pos 7.0 +827 79 training.batch_size 0.0 +827 80 model.embedding_dim 1.0 +827 80 model.scoring_fct_norm 1.0 +827 80 optimizer.lr 0.004037094366578158 +827 80 negative_sampler.num_negs_per_pos 94.0 +827 80 training.batch_size 2.0 +827 81 model.embedding_dim 1.0 +827 81 model.scoring_fct_norm 1.0 +827 81 optimizer.lr 0.0015706941857496707 +827 81 negative_sampler.num_negs_per_pos 98.0 +827 81 training.batch_size 0.0 +827 82 model.embedding_dim 1.0 +827 82 model.scoring_fct_norm 1.0 +827 82 optimizer.lr 0.02306865366000639 +827 82 negative_sampler.num_negs_per_pos 96.0 +827 82 training.batch_size 0.0 +827 83 model.embedding_dim 0.0 +827 83 model.scoring_fct_norm 2.0 +827 83 optimizer.lr 0.04204010372451795 +827 83 negative_sampler.num_negs_per_pos 85.0 +827 83 training.batch_size 1.0 +827 84 model.embedding_dim 2.0 +827 84 model.scoring_fct_norm 2.0 +827 84 optimizer.lr 0.004571381814259138 +827 84 negative_sampler.num_negs_per_pos 44.0 +827 84 training.batch_size 1.0 +827 85 model.embedding_dim 1.0 +827 85 model.scoring_fct_norm 1.0 +827 85 optimizer.lr 0.0014186393422370212 +827 85 negative_sampler.num_negs_per_pos 97.0 +827 85 training.batch_size 1.0 +827 86 model.embedding_dim 1.0 +827 86 model.scoring_fct_norm 1.0 +827 86 optimizer.lr 0.010588107610505427 +827 86 negative_sampler.num_negs_per_pos 71.0 +827 86 training.batch_size 2.0 +827 87 model.embedding_dim 2.0 +827 87 model.scoring_fct_norm 2.0 +827 87 optimizer.lr 0.0013932458125902102 +827 87 negative_sampler.num_negs_per_pos 55.0 +827 87 training.batch_size 0.0 +827 88 model.embedding_dim 1.0 +827 88 model.scoring_fct_norm 1.0 +827 88 optimizer.lr 0.003933221148006932 +827 88 negative_sampler.num_negs_per_pos 36.0 +827 88 training.batch_size 0.0 +827 89 model.embedding_dim 0.0 +827 89 model.scoring_fct_norm 2.0 +827 89 optimizer.lr 0.008095524441213125 +827 89 negative_sampler.num_negs_per_pos 30.0 +827 89 training.batch_size 0.0 +827 90 model.embedding_dim 0.0 +827 90 model.scoring_fct_norm 2.0 +827 90 optimizer.lr 0.0027638440690423522 +827 90 negative_sampler.num_negs_per_pos 47.0 +827 90 training.batch_size 2.0 +827 91 model.embedding_dim 1.0 +827 91 model.scoring_fct_norm 1.0 +827 91 optimizer.lr 0.05567555064211709 +827 91 negative_sampler.num_negs_per_pos 39.0 +827 91 training.batch_size 1.0 +827 92 model.embedding_dim 0.0 +827 92 model.scoring_fct_norm 1.0 +827 92 optimizer.lr 0.006808710203249357 +827 92 negative_sampler.num_negs_per_pos 59.0 +827 92 training.batch_size 1.0 +827 93 model.embedding_dim 2.0 +827 93 model.scoring_fct_norm 1.0 +827 93 optimizer.lr 0.0011459909623510469 +827 93 negative_sampler.num_negs_per_pos 98.0 +827 93 training.batch_size 2.0 +827 94 model.embedding_dim 0.0 +827 94 model.scoring_fct_norm 2.0 +827 94 optimizer.lr 0.013817771952083658 +827 94 negative_sampler.num_negs_per_pos 58.0 +827 94 training.batch_size 2.0 +827 95 model.embedding_dim 0.0 +827 95 model.scoring_fct_norm 2.0 +827 95 optimizer.lr 0.062492579349290756 +827 95 negative_sampler.num_negs_per_pos 95.0 +827 95 training.batch_size 1.0 +827 96 model.embedding_dim 1.0 +827 96 model.scoring_fct_norm 2.0 +827 96 optimizer.lr 0.004096622767382113 +827 96 negative_sampler.num_negs_per_pos 46.0 +827 96 training.batch_size 2.0 +827 97 model.embedding_dim 1.0 +827 97 model.scoring_fct_norm 1.0 +827 97 optimizer.lr 0.008962618056572218 +827 97 negative_sampler.num_negs_per_pos 4.0 +827 97 training.batch_size 2.0 +827 98 model.embedding_dim 0.0 +827 98 model.scoring_fct_norm 2.0 +827 98 optimizer.lr 0.00632357101847797 +827 98 negative_sampler.num_negs_per_pos 5.0 +827 98 training.batch_size 2.0 +827 99 model.embedding_dim 2.0 +827 99 model.scoring_fct_norm 1.0 +827 99 optimizer.lr 0.02114794140334938 +827 99 negative_sampler.num_negs_per_pos 96.0 +827 99 training.batch_size 0.0 +827 100 model.embedding_dim 2.0 +827 100 model.scoring_fct_norm 2.0 +827 100 optimizer.lr 0.007219880148925671 +827 100 negative_sampler.num_negs_per_pos 41.0 +827 100 training.batch_size 1.0 +827 1 dataset """kinships""" +827 1 model """transe""" +827 1 loss """bceaftersigmoid""" +827 1 regularizer """no""" +827 1 optimizer """adam""" +827 1 training_loop """owa""" +827 1 negative_sampler """basic""" +827 1 evaluator """rankbased""" +827 2 dataset """kinships""" +827 2 model """transe""" +827 2 loss """bceaftersigmoid""" +827 2 regularizer """no""" +827 2 optimizer """adam""" +827 2 training_loop """owa""" +827 2 negative_sampler """basic""" +827 2 evaluator """rankbased""" +827 3 dataset """kinships""" +827 3 model """transe""" +827 3 loss """bceaftersigmoid""" +827 3 regularizer """no""" +827 3 optimizer """adam""" +827 3 training_loop """owa""" +827 3 negative_sampler """basic""" +827 3 evaluator """rankbased""" +827 4 dataset """kinships""" +827 4 model """transe""" +827 4 loss """bceaftersigmoid""" +827 4 regularizer """no""" +827 4 optimizer """adam""" +827 4 training_loop """owa""" +827 4 negative_sampler """basic""" +827 4 evaluator """rankbased""" +827 5 dataset """kinships""" +827 5 model """transe""" +827 5 loss """bceaftersigmoid""" +827 5 regularizer """no""" +827 5 optimizer """adam""" +827 5 training_loop """owa""" +827 5 negative_sampler """basic""" +827 5 evaluator """rankbased""" +827 6 dataset """kinships""" +827 6 model """transe""" +827 6 loss """bceaftersigmoid""" +827 6 regularizer """no""" +827 6 optimizer """adam""" +827 6 training_loop """owa""" +827 6 negative_sampler """basic""" +827 6 evaluator """rankbased""" +827 7 dataset """kinships""" +827 7 model """transe""" +827 7 loss """bceaftersigmoid""" +827 7 regularizer """no""" +827 7 optimizer """adam""" +827 7 training_loop """owa""" +827 7 negative_sampler """basic""" +827 7 evaluator """rankbased""" +827 8 dataset """kinships""" +827 8 model """transe""" +827 8 loss """bceaftersigmoid""" +827 8 regularizer """no""" +827 8 optimizer """adam""" +827 8 training_loop """owa""" +827 8 negative_sampler """basic""" +827 8 evaluator """rankbased""" +827 9 dataset """kinships""" +827 9 model """transe""" +827 9 loss """bceaftersigmoid""" +827 9 regularizer """no""" +827 9 optimizer """adam""" +827 9 training_loop """owa""" +827 9 negative_sampler """basic""" +827 9 evaluator """rankbased""" +827 10 dataset """kinships""" +827 10 model """transe""" +827 10 loss """bceaftersigmoid""" +827 10 regularizer """no""" +827 10 optimizer """adam""" +827 10 training_loop """owa""" +827 10 negative_sampler """basic""" +827 10 evaluator """rankbased""" +827 11 dataset """kinships""" +827 11 model """transe""" +827 11 loss """bceaftersigmoid""" +827 11 regularizer """no""" +827 11 optimizer """adam""" +827 11 training_loop """owa""" +827 11 negative_sampler """basic""" +827 11 evaluator """rankbased""" +827 12 dataset """kinships""" +827 12 model """transe""" +827 12 loss """bceaftersigmoid""" +827 12 regularizer """no""" +827 12 optimizer """adam""" +827 12 training_loop """owa""" +827 12 negative_sampler """basic""" +827 12 evaluator """rankbased""" +827 13 dataset """kinships""" +827 13 model """transe""" +827 13 loss """bceaftersigmoid""" +827 13 regularizer """no""" +827 13 optimizer """adam""" +827 13 training_loop """owa""" +827 13 negative_sampler """basic""" +827 13 evaluator """rankbased""" +827 14 dataset """kinships""" +827 14 model """transe""" +827 14 loss """bceaftersigmoid""" +827 14 regularizer """no""" +827 14 optimizer """adam""" +827 14 training_loop """owa""" +827 14 negative_sampler """basic""" +827 14 evaluator """rankbased""" +827 15 dataset """kinships""" +827 15 model """transe""" +827 15 loss """bceaftersigmoid""" +827 15 regularizer """no""" +827 15 optimizer """adam""" +827 15 training_loop """owa""" +827 15 negative_sampler """basic""" +827 15 evaluator """rankbased""" +827 16 dataset """kinships""" +827 16 model """transe""" +827 16 loss """bceaftersigmoid""" +827 16 regularizer """no""" +827 16 optimizer """adam""" +827 16 training_loop """owa""" +827 16 negative_sampler """basic""" +827 16 evaluator """rankbased""" +827 17 dataset """kinships""" +827 17 model """transe""" +827 17 loss """bceaftersigmoid""" +827 17 regularizer """no""" +827 17 optimizer """adam""" +827 17 training_loop """owa""" +827 17 negative_sampler """basic""" +827 17 evaluator """rankbased""" +827 18 dataset """kinships""" +827 18 model """transe""" +827 18 loss """bceaftersigmoid""" +827 18 regularizer """no""" +827 18 optimizer """adam""" +827 18 training_loop """owa""" +827 18 negative_sampler """basic""" +827 18 evaluator """rankbased""" +827 19 dataset """kinships""" +827 19 model """transe""" +827 19 loss """bceaftersigmoid""" +827 19 regularizer """no""" +827 19 optimizer """adam""" +827 19 training_loop """owa""" +827 19 negative_sampler """basic""" +827 19 evaluator """rankbased""" +827 20 dataset """kinships""" +827 20 model """transe""" +827 20 loss """bceaftersigmoid""" +827 20 regularizer """no""" +827 20 optimizer """adam""" +827 20 training_loop """owa""" +827 20 negative_sampler """basic""" +827 20 evaluator """rankbased""" +827 21 dataset """kinships""" +827 21 model """transe""" +827 21 loss """bceaftersigmoid""" +827 21 regularizer """no""" +827 21 optimizer """adam""" +827 21 training_loop """owa""" +827 21 negative_sampler """basic""" +827 21 evaluator """rankbased""" +827 22 dataset """kinships""" +827 22 model """transe""" +827 22 loss """bceaftersigmoid""" +827 22 regularizer """no""" +827 22 optimizer """adam""" +827 22 training_loop """owa""" +827 22 negative_sampler """basic""" +827 22 evaluator """rankbased""" +827 23 dataset """kinships""" +827 23 model """transe""" +827 23 loss """bceaftersigmoid""" +827 23 regularizer """no""" +827 23 optimizer """adam""" +827 23 training_loop """owa""" +827 23 negative_sampler """basic""" +827 23 evaluator """rankbased""" +827 24 dataset """kinships""" +827 24 model """transe""" +827 24 loss """bceaftersigmoid""" +827 24 regularizer """no""" +827 24 optimizer """adam""" +827 24 training_loop """owa""" +827 24 negative_sampler """basic""" +827 24 evaluator """rankbased""" +827 25 dataset """kinships""" +827 25 model """transe""" +827 25 loss """bceaftersigmoid""" +827 25 regularizer """no""" +827 25 optimizer """adam""" +827 25 training_loop """owa""" +827 25 negative_sampler """basic""" +827 25 evaluator """rankbased""" +827 26 dataset """kinships""" +827 26 model """transe""" +827 26 loss """bceaftersigmoid""" +827 26 regularizer """no""" +827 26 optimizer """adam""" +827 26 training_loop """owa""" +827 26 negative_sampler """basic""" +827 26 evaluator """rankbased""" +827 27 dataset """kinships""" +827 27 model """transe""" +827 27 loss """bceaftersigmoid""" +827 27 regularizer """no""" +827 27 optimizer """adam""" +827 27 training_loop """owa""" +827 27 negative_sampler """basic""" +827 27 evaluator """rankbased""" +827 28 dataset """kinships""" +827 28 model """transe""" +827 28 loss """bceaftersigmoid""" +827 28 regularizer """no""" +827 28 optimizer """adam""" +827 28 training_loop """owa""" +827 28 negative_sampler """basic""" +827 28 evaluator """rankbased""" +827 29 dataset """kinships""" +827 29 model """transe""" +827 29 loss """bceaftersigmoid""" +827 29 regularizer """no""" +827 29 optimizer """adam""" +827 29 training_loop """owa""" +827 29 negative_sampler """basic""" +827 29 evaluator """rankbased""" +827 30 dataset """kinships""" +827 30 model """transe""" +827 30 loss """bceaftersigmoid""" +827 30 regularizer """no""" +827 30 optimizer """adam""" +827 30 training_loop """owa""" +827 30 negative_sampler """basic""" +827 30 evaluator """rankbased""" +827 31 dataset """kinships""" +827 31 model """transe""" +827 31 loss """bceaftersigmoid""" +827 31 regularizer """no""" +827 31 optimizer """adam""" +827 31 training_loop """owa""" +827 31 negative_sampler """basic""" +827 31 evaluator """rankbased""" +827 32 dataset """kinships""" +827 32 model """transe""" +827 32 loss """bceaftersigmoid""" +827 32 regularizer """no""" +827 32 optimizer """adam""" +827 32 training_loop """owa""" +827 32 negative_sampler """basic""" +827 32 evaluator """rankbased""" +827 33 dataset """kinships""" +827 33 model """transe""" +827 33 loss """bceaftersigmoid""" +827 33 regularizer """no""" +827 33 optimizer """adam""" +827 33 training_loop """owa""" +827 33 negative_sampler """basic""" +827 33 evaluator """rankbased""" +827 34 dataset """kinships""" +827 34 model """transe""" +827 34 loss """bceaftersigmoid""" +827 34 regularizer """no""" +827 34 optimizer """adam""" +827 34 training_loop """owa""" +827 34 negative_sampler """basic""" +827 34 evaluator """rankbased""" +827 35 dataset """kinships""" +827 35 model """transe""" +827 35 loss """bceaftersigmoid""" +827 35 regularizer """no""" +827 35 optimizer """adam""" +827 35 training_loop """owa""" +827 35 negative_sampler """basic""" +827 35 evaluator """rankbased""" +827 36 dataset """kinships""" +827 36 model """transe""" +827 36 loss """bceaftersigmoid""" +827 36 regularizer """no""" +827 36 optimizer """adam""" +827 36 training_loop """owa""" +827 36 negative_sampler """basic""" +827 36 evaluator """rankbased""" +827 37 dataset """kinships""" +827 37 model """transe""" +827 37 loss """bceaftersigmoid""" +827 37 regularizer """no""" +827 37 optimizer """adam""" +827 37 training_loop """owa""" +827 37 negative_sampler """basic""" +827 37 evaluator """rankbased""" +827 38 dataset """kinships""" +827 38 model """transe""" +827 38 loss """bceaftersigmoid""" +827 38 regularizer """no""" +827 38 optimizer """adam""" +827 38 training_loop """owa""" +827 38 negative_sampler """basic""" +827 38 evaluator """rankbased""" +827 39 dataset """kinships""" +827 39 model """transe""" +827 39 loss """bceaftersigmoid""" +827 39 regularizer """no""" +827 39 optimizer """adam""" +827 39 training_loop """owa""" +827 39 negative_sampler """basic""" +827 39 evaluator """rankbased""" +827 40 dataset """kinships""" +827 40 model """transe""" +827 40 loss """bceaftersigmoid""" +827 40 regularizer """no""" +827 40 optimizer """adam""" +827 40 training_loop """owa""" +827 40 negative_sampler """basic""" +827 40 evaluator """rankbased""" +827 41 dataset """kinships""" +827 41 model """transe""" +827 41 loss """bceaftersigmoid""" +827 41 regularizer """no""" +827 41 optimizer """adam""" +827 41 training_loop """owa""" +827 41 negative_sampler """basic""" +827 41 evaluator """rankbased""" +827 42 dataset """kinships""" +827 42 model """transe""" +827 42 loss """bceaftersigmoid""" +827 42 regularizer """no""" +827 42 optimizer """adam""" +827 42 training_loop """owa""" +827 42 negative_sampler """basic""" +827 42 evaluator """rankbased""" +827 43 dataset """kinships""" +827 43 model """transe""" +827 43 loss """bceaftersigmoid""" +827 43 regularizer """no""" +827 43 optimizer """adam""" +827 43 training_loop """owa""" +827 43 negative_sampler """basic""" +827 43 evaluator """rankbased""" +827 44 dataset """kinships""" +827 44 model """transe""" +827 44 loss """bceaftersigmoid""" +827 44 regularizer """no""" +827 44 optimizer """adam""" +827 44 training_loop """owa""" +827 44 negative_sampler """basic""" +827 44 evaluator """rankbased""" +827 45 dataset """kinships""" +827 45 model """transe""" +827 45 loss """bceaftersigmoid""" +827 45 regularizer """no""" +827 45 optimizer """adam""" +827 45 training_loop """owa""" +827 45 negative_sampler """basic""" +827 45 evaluator """rankbased""" +827 46 dataset """kinships""" +827 46 model """transe""" +827 46 loss """bceaftersigmoid""" +827 46 regularizer """no""" +827 46 optimizer """adam""" +827 46 training_loop """owa""" +827 46 negative_sampler """basic""" +827 46 evaluator """rankbased""" +827 47 dataset """kinships""" +827 47 model """transe""" +827 47 loss """bceaftersigmoid""" +827 47 regularizer """no""" +827 47 optimizer """adam""" +827 47 training_loop """owa""" +827 47 negative_sampler """basic""" +827 47 evaluator """rankbased""" +827 48 dataset """kinships""" +827 48 model """transe""" +827 48 loss """bceaftersigmoid""" +827 48 regularizer """no""" +827 48 optimizer """adam""" +827 48 training_loop """owa""" +827 48 negative_sampler """basic""" +827 48 evaluator """rankbased""" +827 49 dataset """kinships""" +827 49 model """transe""" +827 49 loss """bceaftersigmoid""" +827 49 regularizer """no""" +827 49 optimizer """adam""" +827 49 training_loop """owa""" +827 49 negative_sampler """basic""" +827 49 evaluator """rankbased""" +827 50 dataset """kinships""" +827 50 model """transe""" +827 50 loss """bceaftersigmoid""" +827 50 regularizer """no""" +827 50 optimizer """adam""" +827 50 training_loop """owa""" +827 50 negative_sampler """basic""" +827 50 evaluator """rankbased""" +827 51 dataset """kinships""" +827 51 model """transe""" +827 51 loss """bceaftersigmoid""" +827 51 regularizer """no""" +827 51 optimizer """adam""" +827 51 training_loop """owa""" +827 51 negative_sampler """basic""" +827 51 evaluator """rankbased""" +827 52 dataset """kinships""" +827 52 model """transe""" +827 52 loss """bceaftersigmoid""" +827 52 regularizer """no""" +827 52 optimizer """adam""" +827 52 training_loop """owa""" +827 52 negative_sampler """basic""" +827 52 evaluator """rankbased""" +827 53 dataset """kinships""" +827 53 model """transe""" +827 53 loss """bceaftersigmoid""" +827 53 regularizer """no""" +827 53 optimizer """adam""" +827 53 training_loop """owa""" +827 53 negative_sampler """basic""" +827 53 evaluator """rankbased""" +827 54 dataset """kinships""" +827 54 model """transe""" +827 54 loss """bceaftersigmoid""" +827 54 regularizer """no""" +827 54 optimizer """adam""" +827 54 training_loop """owa""" +827 54 negative_sampler """basic""" +827 54 evaluator """rankbased""" +827 55 dataset """kinships""" +827 55 model """transe""" +827 55 loss """bceaftersigmoid""" +827 55 regularizer """no""" +827 55 optimizer """adam""" +827 55 training_loop """owa""" +827 55 negative_sampler """basic""" +827 55 evaluator """rankbased""" +827 56 dataset """kinships""" +827 56 model """transe""" +827 56 loss """bceaftersigmoid""" +827 56 regularizer """no""" +827 56 optimizer """adam""" +827 56 training_loop """owa""" +827 56 negative_sampler """basic""" +827 56 evaluator """rankbased""" +827 57 dataset """kinships""" +827 57 model """transe""" +827 57 loss """bceaftersigmoid""" +827 57 regularizer """no""" +827 57 optimizer """adam""" +827 57 training_loop """owa""" +827 57 negative_sampler """basic""" +827 57 evaluator """rankbased""" +827 58 dataset """kinships""" +827 58 model """transe""" +827 58 loss """bceaftersigmoid""" +827 58 regularizer """no""" +827 58 optimizer """adam""" +827 58 training_loop """owa""" +827 58 negative_sampler """basic""" +827 58 evaluator """rankbased""" +827 59 dataset """kinships""" +827 59 model """transe""" +827 59 loss """bceaftersigmoid""" +827 59 regularizer """no""" +827 59 optimizer """adam""" +827 59 training_loop """owa""" +827 59 negative_sampler """basic""" +827 59 evaluator """rankbased""" +827 60 dataset """kinships""" +827 60 model """transe""" +827 60 loss """bceaftersigmoid""" +827 60 regularizer """no""" +827 60 optimizer """adam""" +827 60 training_loop """owa""" +827 60 negative_sampler """basic""" +827 60 evaluator """rankbased""" +827 61 dataset """kinships""" +827 61 model """transe""" +827 61 loss """bceaftersigmoid""" +827 61 regularizer """no""" +827 61 optimizer """adam""" +827 61 training_loop """owa""" +827 61 negative_sampler """basic""" +827 61 evaluator """rankbased""" +827 62 dataset """kinships""" +827 62 model """transe""" +827 62 loss """bceaftersigmoid""" +827 62 regularizer """no""" +827 62 optimizer """adam""" +827 62 training_loop """owa""" +827 62 negative_sampler """basic""" +827 62 evaluator """rankbased""" +827 63 dataset """kinships""" +827 63 model """transe""" +827 63 loss """bceaftersigmoid""" +827 63 regularizer """no""" +827 63 optimizer """adam""" +827 63 training_loop """owa""" +827 63 negative_sampler """basic""" +827 63 evaluator """rankbased""" +827 64 dataset """kinships""" +827 64 model """transe""" +827 64 loss """bceaftersigmoid""" +827 64 regularizer """no""" +827 64 optimizer """adam""" +827 64 training_loop """owa""" +827 64 negative_sampler """basic""" +827 64 evaluator """rankbased""" +827 65 dataset """kinships""" +827 65 model """transe""" +827 65 loss """bceaftersigmoid""" +827 65 regularizer """no""" +827 65 optimizer """adam""" +827 65 training_loop """owa""" +827 65 negative_sampler """basic""" +827 65 evaluator """rankbased""" +827 66 dataset """kinships""" +827 66 model """transe""" +827 66 loss """bceaftersigmoid""" +827 66 regularizer """no""" +827 66 optimizer """adam""" +827 66 training_loop """owa""" +827 66 negative_sampler """basic""" +827 66 evaluator """rankbased""" +827 67 dataset """kinships""" +827 67 model """transe""" +827 67 loss """bceaftersigmoid""" +827 67 regularizer """no""" +827 67 optimizer """adam""" +827 67 training_loop """owa""" +827 67 negative_sampler """basic""" +827 67 evaluator """rankbased""" +827 68 dataset """kinships""" +827 68 model """transe""" +827 68 loss """bceaftersigmoid""" +827 68 regularizer """no""" +827 68 optimizer """adam""" +827 68 training_loop """owa""" +827 68 negative_sampler """basic""" +827 68 evaluator """rankbased""" +827 69 dataset """kinships""" +827 69 model """transe""" +827 69 loss """bceaftersigmoid""" +827 69 regularizer """no""" +827 69 optimizer """adam""" +827 69 training_loop """owa""" +827 69 negative_sampler """basic""" +827 69 evaluator """rankbased""" +827 70 dataset """kinships""" +827 70 model """transe""" +827 70 loss """bceaftersigmoid""" +827 70 regularizer """no""" +827 70 optimizer """adam""" +827 70 training_loop """owa""" +827 70 negative_sampler """basic""" +827 70 evaluator """rankbased""" +827 71 dataset """kinships""" +827 71 model """transe""" +827 71 loss """bceaftersigmoid""" +827 71 regularizer """no""" +827 71 optimizer """adam""" +827 71 training_loop """owa""" +827 71 negative_sampler """basic""" +827 71 evaluator """rankbased""" +827 72 dataset """kinships""" +827 72 model """transe""" +827 72 loss """bceaftersigmoid""" +827 72 regularizer """no""" +827 72 optimizer """adam""" +827 72 training_loop """owa""" +827 72 negative_sampler """basic""" +827 72 evaluator """rankbased""" +827 73 dataset """kinships""" +827 73 model """transe""" +827 73 loss """bceaftersigmoid""" +827 73 regularizer """no""" +827 73 optimizer """adam""" +827 73 training_loop """owa""" +827 73 negative_sampler """basic""" +827 73 evaluator """rankbased""" +827 74 dataset """kinships""" +827 74 model """transe""" +827 74 loss """bceaftersigmoid""" +827 74 regularizer """no""" +827 74 optimizer """adam""" +827 74 training_loop """owa""" +827 74 negative_sampler """basic""" +827 74 evaluator """rankbased""" +827 75 dataset """kinships""" +827 75 model """transe""" +827 75 loss """bceaftersigmoid""" +827 75 regularizer """no""" +827 75 optimizer """adam""" +827 75 training_loop """owa""" +827 75 negative_sampler """basic""" +827 75 evaluator """rankbased""" +827 76 dataset """kinships""" +827 76 model """transe""" +827 76 loss """bceaftersigmoid""" +827 76 regularizer """no""" +827 76 optimizer """adam""" +827 76 training_loop """owa""" +827 76 negative_sampler """basic""" +827 76 evaluator """rankbased""" +827 77 dataset """kinships""" +827 77 model """transe""" +827 77 loss """bceaftersigmoid""" +827 77 regularizer """no""" +827 77 optimizer """adam""" +827 77 training_loop """owa""" +827 77 negative_sampler """basic""" +827 77 evaluator """rankbased""" +827 78 dataset """kinships""" +827 78 model """transe""" +827 78 loss """bceaftersigmoid""" +827 78 regularizer """no""" +827 78 optimizer """adam""" +827 78 training_loop """owa""" +827 78 negative_sampler """basic""" +827 78 evaluator """rankbased""" +827 79 dataset """kinships""" +827 79 model """transe""" +827 79 loss """bceaftersigmoid""" +827 79 regularizer """no""" +827 79 optimizer """adam""" +827 79 training_loop """owa""" +827 79 negative_sampler """basic""" +827 79 evaluator """rankbased""" +827 80 dataset """kinships""" +827 80 model """transe""" +827 80 loss """bceaftersigmoid""" +827 80 regularizer """no""" +827 80 optimizer """adam""" +827 80 training_loop """owa""" +827 80 negative_sampler """basic""" +827 80 evaluator """rankbased""" +827 81 dataset """kinships""" +827 81 model """transe""" +827 81 loss """bceaftersigmoid""" +827 81 regularizer """no""" +827 81 optimizer """adam""" +827 81 training_loop """owa""" +827 81 negative_sampler """basic""" +827 81 evaluator """rankbased""" +827 82 dataset """kinships""" +827 82 model """transe""" +827 82 loss """bceaftersigmoid""" +827 82 regularizer """no""" +827 82 optimizer """adam""" +827 82 training_loop """owa""" +827 82 negative_sampler """basic""" +827 82 evaluator """rankbased""" +827 83 dataset """kinships""" +827 83 model """transe""" +827 83 loss """bceaftersigmoid""" +827 83 regularizer """no""" +827 83 optimizer """adam""" +827 83 training_loop """owa""" +827 83 negative_sampler """basic""" +827 83 evaluator """rankbased""" +827 84 dataset """kinships""" +827 84 model """transe""" +827 84 loss """bceaftersigmoid""" +827 84 regularizer """no""" +827 84 optimizer """adam""" +827 84 training_loop """owa""" +827 84 negative_sampler """basic""" +827 84 evaluator """rankbased""" +827 85 dataset """kinships""" +827 85 model """transe""" +827 85 loss """bceaftersigmoid""" +827 85 regularizer """no""" +827 85 optimizer """adam""" +827 85 training_loop """owa""" +827 85 negative_sampler """basic""" +827 85 evaluator """rankbased""" +827 86 dataset """kinships""" +827 86 model """transe""" +827 86 loss """bceaftersigmoid""" +827 86 regularizer """no""" +827 86 optimizer """adam""" +827 86 training_loop """owa""" +827 86 negative_sampler """basic""" +827 86 evaluator """rankbased""" +827 87 dataset """kinships""" +827 87 model """transe""" +827 87 loss """bceaftersigmoid""" +827 87 regularizer """no""" +827 87 optimizer """adam""" +827 87 training_loop """owa""" +827 87 negative_sampler """basic""" +827 87 evaluator """rankbased""" +827 88 dataset """kinships""" +827 88 model """transe""" +827 88 loss """bceaftersigmoid""" +827 88 regularizer """no""" +827 88 optimizer """adam""" +827 88 training_loop """owa""" +827 88 negative_sampler """basic""" +827 88 evaluator """rankbased""" +827 89 dataset """kinships""" +827 89 model """transe""" +827 89 loss """bceaftersigmoid""" +827 89 regularizer """no""" +827 89 optimizer """adam""" +827 89 training_loop """owa""" +827 89 negative_sampler """basic""" +827 89 evaluator """rankbased""" +827 90 dataset """kinships""" +827 90 model """transe""" +827 90 loss """bceaftersigmoid""" +827 90 regularizer """no""" +827 90 optimizer """adam""" +827 90 training_loop """owa""" +827 90 negative_sampler """basic""" +827 90 evaluator """rankbased""" +827 91 dataset """kinships""" +827 91 model """transe""" +827 91 loss """bceaftersigmoid""" +827 91 regularizer """no""" +827 91 optimizer """adam""" +827 91 training_loop """owa""" +827 91 negative_sampler """basic""" +827 91 evaluator """rankbased""" +827 92 dataset """kinships""" +827 92 model """transe""" +827 92 loss """bceaftersigmoid""" +827 92 regularizer """no""" +827 92 optimizer """adam""" +827 92 training_loop """owa""" +827 92 negative_sampler """basic""" +827 92 evaluator """rankbased""" +827 93 dataset """kinships""" +827 93 model """transe""" +827 93 loss """bceaftersigmoid""" +827 93 regularizer """no""" +827 93 optimizer """adam""" +827 93 training_loop """owa""" +827 93 negative_sampler """basic""" +827 93 evaluator """rankbased""" +827 94 dataset """kinships""" +827 94 model """transe""" +827 94 loss """bceaftersigmoid""" +827 94 regularizer """no""" +827 94 optimizer """adam""" +827 94 training_loop """owa""" +827 94 negative_sampler """basic""" +827 94 evaluator """rankbased""" +827 95 dataset """kinships""" +827 95 model """transe""" +827 95 loss """bceaftersigmoid""" +827 95 regularizer """no""" +827 95 optimizer """adam""" +827 95 training_loop """owa""" +827 95 negative_sampler """basic""" +827 95 evaluator """rankbased""" +827 96 dataset """kinships""" +827 96 model """transe""" +827 96 loss """bceaftersigmoid""" +827 96 regularizer """no""" +827 96 optimizer """adam""" +827 96 training_loop """owa""" +827 96 negative_sampler """basic""" +827 96 evaluator """rankbased""" +827 97 dataset """kinships""" +827 97 model """transe""" +827 97 loss """bceaftersigmoid""" +827 97 regularizer """no""" +827 97 optimizer """adam""" +827 97 training_loop """owa""" +827 97 negative_sampler """basic""" +827 97 evaluator """rankbased""" +827 98 dataset """kinships""" +827 98 model """transe""" +827 98 loss """bceaftersigmoid""" +827 98 regularizer """no""" +827 98 optimizer """adam""" +827 98 training_loop """owa""" +827 98 negative_sampler """basic""" +827 98 evaluator """rankbased""" +827 99 dataset """kinships""" +827 99 model """transe""" +827 99 loss """bceaftersigmoid""" +827 99 regularizer """no""" +827 99 optimizer """adam""" +827 99 training_loop """owa""" +827 99 negative_sampler """basic""" +827 99 evaluator """rankbased""" +827 100 dataset """kinships""" +827 100 model """transe""" +827 100 loss """bceaftersigmoid""" +827 100 regularizer """no""" +827 100 optimizer """adam""" +827 100 training_loop """owa""" +827 100 negative_sampler """basic""" +827 100 evaluator """rankbased""" +828 1 model.embedding_dim 0.0 +828 1 model.scoring_fct_norm 1.0 +828 1 optimizer.lr 0.0015447309232601305 +828 1 negative_sampler.num_negs_per_pos 98.0 +828 1 training.batch_size 0.0 +828 2 model.embedding_dim 0.0 +828 2 model.scoring_fct_norm 1.0 +828 2 optimizer.lr 0.09408540299046227 +828 2 negative_sampler.num_negs_per_pos 0.0 +828 2 training.batch_size 0.0 +828 3 model.embedding_dim 2.0 +828 3 model.scoring_fct_norm 1.0 +828 3 optimizer.lr 0.01041130561606895 +828 3 negative_sampler.num_negs_per_pos 74.0 +828 3 training.batch_size 1.0 +828 4 model.embedding_dim 2.0 +828 4 model.scoring_fct_norm 1.0 +828 4 optimizer.lr 0.001312736288642099 +828 4 negative_sampler.num_negs_per_pos 88.0 +828 4 training.batch_size 2.0 +828 5 model.embedding_dim 1.0 +828 5 model.scoring_fct_norm 2.0 +828 5 optimizer.lr 0.0012616898232109136 +828 5 negative_sampler.num_negs_per_pos 6.0 +828 5 training.batch_size 0.0 +828 6 model.embedding_dim 0.0 +828 6 model.scoring_fct_norm 1.0 +828 6 optimizer.lr 0.01301896788880423 +828 6 negative_sampler.num_negs_per_pos 63.0 +828 6 training.batch_size 2.0 +828 7 model.embedding_dim 2.0 +828 7 model.scoring_fct_norm 2.0 +828 7 optimizer.lr 0.008613117673044263 +828 7 negative_sampler.num_negs_per_pos 34.0 +828 7 training.batch_size 0.0 +828 8 model.embedding_dim 0.0 +828 8 model.scoring_fct_norm 2.0 +828 8 optimizer.lr 0.009335580562050735 +828 8 negative_sampler.num_negs_per_pos 27.0 +828 8 training.batch_size 2.0 +828 9 model.embedding_dim 1.0 +828 9 model.scoring_fct_norm 2.0 +828 9 optimizer.lr 0.04495901911309132 +828 9 negative_sampler.num_negs_per_pos 62.0 +828 9 training.batch_size 2.0 +828 10 model.embedding_dim 1.0 +828 10 model.scoring_fct_norm 1.0 +828 10 optimizer.lr 0.0033798432943227983 +828 10 negative_sampler.num_negs_per_pos 90.0 +828 10 training.batch_size 2.0 +828 11 model.embedding_dim 0.0 +828 11 model.scoring_fct_norm 2.0 +828 11 optimizer.lr 0.0012046208281750127 +828 11 negative_sampler.num_negs_per_pos 28.0 +828 11 training.batch_size 1.0 +828 12 model.embedding_dim 0.0 +828 12 model.scoring_fct_norm 2.0 +828 12 optimizer.lr 0.002854054679860408 +828 12 negative_sampler.num_negs_per_pos 11.0 +828 12 training.batch_size 1.0 +828 13 model.embedding_dim 2.0 +828 13 model.scoring_fct_norm 2.0 +828 13 optimizer.lr 0.011175449320459226 +828 13 negative_sampler.num_negs_per_pos 13.0 +828 13 training.batch_size 1.0 +828 14 model.embedding_dim 2.0 +828 14 model.scoring_fct_norm 1.0 +828 14 optimizer.lr 0.018184083298740838 +828 14 negative_sampler.num_negs_per_pos 30.0 +828 14 training.batch_size 0.0 +828 15 model.embedding_dim 0.0 +828 15 model.scoring_fct_norm 1.0 +828 15 optimizer.lr 0.04844779713558996 +828 15 negative_sampler.num_negs_per_pos 84.0 +828 15 training.batch_size 2.0 +828 16 model.embedding_dim 2.0 +828 16 model.scoring_fct_norm 1.0 +828 16 optimizer.lr 0.019981502704939817 +828 16 negative_sampler.num_negs_per_pos 60.0 +828 16 training.batch_size 0.0 +828 17 model.embedding_dim 1.0 +828 17 model.scoring_fct_norm 1.0 +828 17 optimizer.lr 0.087134270030909 +828 17 negative_sampler.num_negs_per_pos 41.0 +828 17 training.batch_size 1.0 +828 18 model.embedding_dim 1.0 +828 18 model.scoring_fct_norm 1.0 +828 18 optimizer.lr 0.08206843990673239 +828 18 negative_sampler.num_negs_per_pos 37.0 +828 18 training.batch_size 1.0 +828 19 model.embedding_dim 1.0 +828 19 model.scoring_fct_norm 2.0 +828 19 optimizer.lr 0.045154226186505254 +828 19 negative_sampler.num_negs_per_pos 7.0 +828 19 training.batch_size 2.0 +828 20 model.embedding_dim 1.0 +828 20 model.scoring_fct_norm 2.0 +828 20 optimizer.lr 0.03508550424943274 +828 20 negative_sampler.num_negs_per_pos 18.0 +828 20 training.batch_size 1.0 +828 21 model.embedding_dim 2.0 +828 21 model.scoring_fct_norm 1.0 +828 21 optimizer.lr 0.0011870093840842142 +828 21 negative_sampler.num_negs_per_pos 78.0 +828 21 training.batch_size 1.0 +828 22 model.embedding_dim 1.0 +828 22 model.scoring_fct_norm 1.0 +828 22 optimizer.lr 0.010178267105933666 +828 22 negative_sampler.num_negs_per_pos 64.0 +828 22 training.batch_size 0.0 +828 23 model.embedding_dim 2.0 +828 23 model.scoring_fct_norm 2.0 +828 23 optimizer.lr 0.009643036370278904 +828 23 negative_sampler.num_negs_per_pos 85.0 +828 23 training.batch_size 2.0 +828 24 model.embedding_dim 1.0 +828 24 model.scoring_fct_norm 1.0 +828 24 optimizer.lr 0.0062127420348983015 +828 24 negative_sampler.num_negs_per_pos 90.0 +828 24 training.batch_size 1.0 +828 25 model.embedding_dim 0.0 +828 25 model.scoring_fct_norm 2.0 +828 25 optimizer.lr 0.09658728844855917 +828 25 negative_sampler.num_negs_per_pos 54.0 +828 25 training.batch_size 0.0 +828 26 model.embedding_dim 0.0 +828 26 model.scoring_fct_norm 1.0 +828 26 optimizer.lr 0.01706202665411027 +828 26 negative_sampler.num_negs_per_pos 76.0 +828 26 training.batch_size 2.0 +828 27 model.embedding_dim 1.0 +828 27 model.scoring_fct_norm 2.0 +828 27 optimizer.lr 0.0656852598650496 +828 27 negative_sampler.num_negs_per_pos 77.0 +828 27 training.batch_size 2.0 +828 28 model.embedding_dim 2.0 +828 28 model.scoring_fct_norm 2.0 +828 28 optimizer.lr 0.02918693949895773 +828 28 negative_sampler.num_negs_per_pos 36.0 +828 28 training.batch_size 0.0 +828 29 model.embedding_dim 2.0 +828 29 model.scoring_fct_norm 1.0 +828 29 optimizer.lr 0.0015249902830766974 +828 29 negative_sampler.num_negs_per_pos 89.0 +828 29 training.batch_size 1.0 +828 30 model.embedding_dim 0.0 +828 30 model.scoring_fct_norm 1.0 +828 30 optimizer.lr 0.06454849439289818 +828 30 negative_sampler.num_negs_per_pos 66.0 +828 30 training.batch_size 2.0 +828 31 model.embedding_dim 0.0 +828 31 model.scoring_fct_norm 2.0 +828 31 optimizer.lr 0.024693239600405637 +828 31 negative_sampler.num_negs_per_pos 47.0 +828 31 training.batch_size 1.0 +828 32 model.embedding_dim 2.0 +828 32 model.scoring_fct_norm 1.0 +828 32 optimizer.lr 0.07571149620232354 +828 32 negative_sampler.num_negs_per_pos 73.0 +828 32 training.batch_size 2.0 +828 33 model.embedding_dim 0.0 +828 33 model.scoring_fct_norm 1.0 +828 33 optimizer.lr 0.0017437457234010263 +828 33 negative_sampler.num_negs_per_pos 8.0 +828 33 training.batch_size 2.0 +828 34 model.embedding_dim 1.0 +828 34 model.scoring_fct_norm 2.0 +828 34 optimizer.lr 0.0038731121993402 +828 34 negative_sampler.num_negs_per_pos 3.0 +828 34 training.batch_size 2.0 +828 35 model.embedding_dim 2.0 +828 35 model.scoring_fct_norm 1.0 +828 35 optimizer.lr 0.0010103868406218005 +828 35 negative_sampler.num_negs_per_pos 32.0 +828 35 training.batch_size 1.0 +828 36 model.embedding_dim 2.0 +828 36 model.scoring_fct_norm 2.0 +828 36 optimizer.lr 0.009392886320826877 +828 36 negative_sampler.num_negs_per_pos 11.0 +828 36 training.batch_size 1.0 +828 37 model.embedding_dim 1.0 +828 37 model.scoring_fct_norm 2.0 +828 37 optimizer.lr 0.006569516076190077 +828 37 negative_sampler.num_negs_per_pos 41.0 +828 37 training.batch_size 0.0 +828 38 model.embedding_dim 1.0 +828 38 model.scoring_fct_norm 2.0 +828 38 optimizer.lr 0.0034212759888751216 +828 38 negative_sampler.num_negs_per_pos 75.0 +828 38 training.batch_size 2.0 +828 39 model.embedding_dim 1.0 +828 39 model.scoring_fct_norm 1.0 +828 39 optimizer.lr 0.001561368438848846 +828 39 negative_sampler.num_negs_per_pos 36.0 +828 39 training.batch_size 2.0 +828 40 model.embedding_dim 2.0 +828 40 model.scoring_fct_norm 2.0 +828 40 optimizer.lr 0.02189397103883658 +828 40 negative_sampler.num_negs_per_pos 19.0 +828 40 training.batch_size 0.0 +828 41 model.embedding_dim 2.0 +828 41 model.scoring_fct_norm 1.0 +828 41 optimizer.lr 0.003857969877135711 +828 41 negative_sampler.num_negs_per_pos 35.0 +828 41 training.batch_size 1.0 +828 42 model.embedding_dim 1.0 +828 42 model.scoring_fct_norm 2.0 +828 42 optimizer.lr 0.024676045756542087 +828 42 negative_sampler.num_negs_per_pos 71.0 +828 42 training.batch_size 0.0 +828 43 model.embedding_dim 0.0 +828 43 model.scoring_fct_norm 2.0 +828 43 optimizer.lr 0.005481678293234962 +828 43 negative_sampler.num_negs_per_pos 21.0 +828 43 training.batch_size 0.0 +828 44 model.embedding_dim 1.0 +828 44 model.scoring_fct_norm 1.0 +828 44 optimizer.lr 0.06485353957970048 +828 44 negative_sampler.num_negs_per_pos 36.0 +828 44 training.batch_size 0.0 +828 45 model.embedding_dim 2.0 +828 45 model.scoring_fct_norm 2.0 +828 45 optimizer.lr 0.09095536555526317 +828 45 negative_sampler.num_negs_per_pos 45.0 +828 45 training.batch_size 2.0 +828 46 model.embedding_dim 0.0 +828 46 model.scoring_fct_norm 1.0 +828 46 optimizer.lr 0.023321960484844424 +828 46 negative_sampler.num_negs_per_pos 79.0 +828 46 training.batch_size 1.0 +828 47 model.embedding_dim 2.0 +828 47 model.scoring_fct_norm 1.0 +828 47 optimizer.lr 0.09441718360890507 +828 47 negative_sampler.num_negs_per_pos 52.0 +828 47 training.batch_size 2.0 +828 48 model.embedding_dim 2.0 +828 48 model.scoring_fct_norm 2.0 +828 48 optimizer.lr 0.013455917397649255 +828 48 negative_sampler.num_negs_per_pos 6.0 +828 48 training.batch_size 0.0 +828 49 model.embedding_dim 1.0 +828 49 model.scoring_fct_norm 1.0 +828 49 optimizer.lr 0.008038576448681686 +828 49 negative_sampler.num_negs_per_pos 7.0 +828 49 training.batch_size 0.0 +828 50 model.embedding_dim 2.0 +828 50 model.scoring_fct_norm 2.0 +828 50 optimizer.lr 0.019814056534464188 +828 50 negative_sampler.num_negs_per_pos 12.0 +828 50 training.batch_size 2.0 +828 51 model.embedding_dim 2.0 +828 51 model.scoring_fct_norm 1.0 +828 51 optimizer.lr 0.028049117239384136 +828 51 negative_sampler.num_negs_per_pos 29.0 +828 51 training.batch_size 2.0 +828 52 model.embedding_dim 1.0 +828 52 model.scoring_fct_norm 2.0 +828 52 optimizer.lr 0.0053021178331644665 +828 52 negative_sampler.num_negs_per_pos 70.0 +828 52 training.batch_size 2.0 +828 53 model.embedding_dim 1.0 +828 53 model.scoring_fct_norm 2.0 +828 53 optimizer.lr 0.0663714485553116 +828 53 negative_sampler.num_negs_per_pos 73.0 +828 53 training.batch_size 2.0 +828 54 model.embedding_dim 0.0 +828 54 model.scoring_fct_norm 2.0 +828 54 optimizer.lr 0.08109131149254879 +828 54 negative_sampler.num_negs_per_pos 14.0 +828 54 training.batch_size 2.0 +828 55 model.embedding_dim 1.0 +828 55 model.scoring_fct_norm 1.0 +828 55 optimizer.lr 0.00615031334863365 +828 55 negative_sampler.num_negs_per_pos 67.0 +828 55 training.batch_size 2.0 +828 56 model.embedding_dim 0.0 +828 56 model.scoring_fct_norm 1.0 +828 56 optimizer.lr 0.00690636959469469 +828 56 negative_sampler.num_negs_per_pos 76.0 +828 56 training.batch_size 0.0 +828 57 model.embedding_dim 2.0 +828 57 model.scoring_fct_norm 1.0 +828 57 optimizer.lr 0.013417310358117248 +828 57 negative_sampler.num_negs_per_pos 19.0 +828 57 training.batch_size 0.0 +828 58 model.embedding_dim 0.0 +828 58 model.scoring_fct_norm 1.0 +828 58 optimizer.lr 0.005037917057189753 +828 58 negative_sampler.num_negs_per_pos 7.0 +828 58 training.batch_size 1.0 +828 59 model.embedding_dim 1.0 +828 59 model.scoring_fct_norm 2.0 +828 59 optimizer.lr 0.011483834861199253 +828 59 negative_sampler.num_negs_per_pos 78.0 +828 59 training.batch_size 2.0 +828 60 model.embedding_dim 2.0 +828 60 model.scoring_fct_norm 2.0 +828 60 optimizer.lr 0.003933144955624362 +828 60 negative_sampler.num_negs_per_pos 33.0 +828 60 training.batch_size 2.0 +828 61 model.embedding_dim 2.0 +828 61 model.scoring_fct_norm 2.0 +828 61 optimizer.lr 0.034758447987092406 +828 61 negative_sampler.num_negs_per_pos 82.0 +828 61 training.batch_size 0.0 +828 62 model.embedding_dim 1.0 +828 62 model.scoring_fct_norm 2.0 +828 62 optimizer.lr 0.005087099391871292 +828 62 negative_sampler.num_negs_per_pos 74.0 +828 62 training.batch_size 1.0 +828 63 model.embedding_dim 2.0 +828 63 model.scoring_fct_norm 2.0 +828 63 optimizer.lr 0.04493048685715493 +828 63 negative_sampler.num_negs_per_pos 36.0 +828 63 training.batch_size 1.0 +828 64 model.embedding_dim 0.0 +828 64 model.scoring_fct_norm 2.0 +828 64 optimizer.lr 0.05675351887761409 +828 64 negative_sampler.num_negs_per_pos 63.0 +828 64 training.batch_size 0.0 +828 65 model.embedding_dim 1.0 +828 65 model.scoring_fct_norm 2.0 +828 65 optimizer.lr 0.030440196112303464 +828 65 negative_sampler.num_negs_per_pos 37.0 +828 65 training.batch_size 2.0 +828 66 model.embedding_dim 0.0 +828 66 model.scoring_fct_norm 2.0 +828 66 optimizer.lr 0.0014838138730251151 +828 66 negative_sampler.num_negs_per_pos 45.0 +828 66 training.batch_size 1.0 +828 67 model.embedding_dim 0.0 +828 67 model.scoring_fct_norm 2.0 +828 67 optimizer.lr 0.005948086261778498 +828 67 negative_sampler.num_negs_per_pos 12.0 +828 67 training.batch_size 1.0 +828 68 model.embedding_dim 2.0 +828 68 model.scoring_fct_norm 1.0 +828 68 optimizer.lr 0.039891060434540326 +828 68 negative_sampler.num_negs_per_pos 98.0 +828 68 training.batch_size 2.0 +828 69 model.embedding_dim 2.0 +828 69 model.scoring_fct_norm 1.0 +828 69 optimizer.lr 0.0024590293579001606 +828 69 negative_sampler.num_negs_per_pos 32.0 +828 69 training.batch_size 2.0 +828 70 model.embedding_dim 0.0 +828 70 model.scoring_fct_norm 1.0 +828 70 optimizer.lr 0.055506762765767145 +828 70 negative_sampler.num_negs_per_pos 84.0 +828 70 training.batch_size 1.0 +828 71 model.embedding_dim 0.0 +828 71 model.scoring_fct_norm 1.0 +828 71 optimizer.lr 0.0018983835437291396 +828 71 negative_sampler.num_negs_per_pos 61.0 +828 71 training.batch_size 0.0 +828 72 model.embedding_dim 2.0 +828 72 model.scoring_fct_norm 1.0 +828 72 optimizer.lr 0.005124689509739755 +828 72 negative_sampler.num_negs_per_pos 25.0 +828 72 training.batch_size 2.0 +828 73 model.embedding_dim 2.0 +828 73 model.scoring_fct_norm 1.0 +828 73 optimizer.lr 0.05607783595025267 +828 73 negative_sampler.num_negs_per_pos 63.0 +828 73 training.batch_size 2.0 +828 74 model.embedding_dim 0.0 +828 74 model.scoring_fct_norm 2.0 +828 74 optimizer.lr 0.07154772334260537 +828 74 negative_sampler.num_negs_per_pos 0.0 +828 74 training.batch_size 1.0 +828 75 model.embedding_dim 0.0 +828 75 model.scoring_fct_norm 2.0 +828 75 optimizer.lr 0.056643445287156056 +828 75 negative_sampler.num_negs_per_pos 81.0 +828 75 training.batch_size 1.0 +828 76 model.embedding_dim 2.0 +828 76 model.scoring_fct_norm 1.0 +828 76 optimizer.lr 0.0476058252613905 +828 76 negative_sampler.num_negs_per_pos 88.0 +828 76 training.batch_size 1.0 +828 77 model.embedding_dim 1.0 +828 77 model.scoring_fct_norm 2.0 +828 77 optimizer.lr 0.003160127616493401 +828 77 negative_sampler.num_negs_per_pos 18.0 +828 77 training.batch_size 1.0 +828 78 model.embedding_dim 1.0 +828 78 model.scoring_fct_norm 1.0 +828 78 optimizer.lr 0.0010371571511503816 +828 78 negative_sampler.num_negs_per_pos 37.0 +828 78 training.batch_size 2.0 +828 79 model.embedding_dim 0.0 +828 79 model.scoring_fct_norm 1.0 +828 79 optimizer.lr 0.02270739318846613 +828 79 negative_sampler.num_negs_per_pos 38.0 +828 79 training.batch_size 1.0 +828 80 model.embedding_dim 1.0 +828 80 model.scoring_fct_norm 1.0 +828 80 optimizer.lr 0.02540020487416692 +828 80 negative_sampler.num_negs_per_pos 9.0 +828 80 training.batch_size 2.0 +828 81 model.embedding_dim 0.0 +828 81 model.scoring_fct_norm 2.0 +828 81 optimizer.lr 0.0020109585144787016 +828 81 negative_sampler.num_negs_per_pos 63.0 +828 81 training.batch_size 2.0 +828 82 model.embedding_dim 1.0 +828 82 model.scoring_fct_norm 2.0 +828 82 optimizer.lr 0.001308688998181998 +828 82 negative_sampler.num_negs_per_pos 74.0 +828 82 training.batch_size 1.0 +828 83 model.embedding_dim 0.0 +828 83 model.scoring_fct_norm 2.0 +828 83 optimizer.lr 0.009400508134552876 +828 83 negative_sampler.num_negs_per_pos 43.0 +828 83 training.batch_size 2.0 +828 84 model.embedding_dim 1.0 +828 84 model.scoring_fct_norm 2.0 +828 84 optimizer.lr 0.003399878377483052 +828 84 negative_sampler.num_negs_per_pos 11.0 +828 84 training.batch_size 2.0 +828 85 model.embedding_dim 1.0 +828 85 model.scoring_fct_norm 1.0 +828 85 optimizer.lr 0.0015710901387475742 +828 85 negative_sampler.num_negs_per_pos 91.0 +828 85 training.batch_size 0.0 +828 86 model.embedding_dim 0.0 +828 86 model.scoring_fct_norm 1.0 +828 86 optimizer.lr 0.021830248450932038 +828 86 negative_sampler.num_negs_per_pos 72.0 +828 86 training.batch_size 1.0 +828 87 model.embedding_dim 0.0 +828 87 model.scoring_fct_norm 2.0 +828 87 optimizer.lr 0.004530600599167192 +828 87 negative_sampler.num_negs_per_pos 71.0 +828 87 training.batch_size 0.0 +828 88 model.embedding_dim 2.0 +828 88 model.scoring_fct_norm 1.0 +828 88 optimizer.lr 0.0017673788183336916 +828 88 negative_sampler.num_negs_per_pos 3.0 +828 88 training.batch_size 1.0 +828 89 model.embedding_dim 2.0 +828 89 model.scoring_fct_norm 1.0 +828 89 optimizer.lr 0.02396859182988477 +828 89 negative_sampler.num_negs_per_pos 28.0 +828 89 training.batch_size 1.0 +828 90 model.embedding_dim 2.0 +828 90 model.scoring_fct_norm 1.0 +828 90 optimizer.lr 0.014115919209779561 +828 90 negative_sampler.num_negs_per_pos 40.0 +828 90 training.batch_size 2.0 +828 91 model.embedding_dim 2.0 +828 91 model.scoring_fct_norm 1.0 +828 91 optimizer.lr 0.017583847673000284 +828 91 negative_sampler.num_negs_per_pos 53.0 +828 91 training.batch_size 2.0 +828 92 model.embedding_dim 2.0 +828 92 model.scoring_fct_norm 2.0 +828 92 optimizer.lr 0.08372142549235627 +828 92 negative_sampler.num_negs_per_pos 97.0 +828 92 training.batch_size 2.0 +828 93 model.embedding_dim 2.0 +828 93 model.scoring_fct_norm 1.0 +828 93 optimizer.lr 0.03752650453451514 +828 93 negative_sampler.num_negs_per_pos 30.0 +828 93 training.batch_size 1.0 +828 94 model.embedding_dim 1.0 +828 94 model.scoring_fct_norm 1.0 +828 94 optimizer.lr 0.009231092712382368 +828 94 negative_sampler.num_negs_per_pos 46.0 +828 94 training.batch_size 2.0 +828 95 model.embedding_dim 1.0 +828 95 model.scoring_fct_norm 2.0 +828 95 optimizer.lr 0.09602974579455258 +828 95 negative_sampler.num_negs_per_pos 73.0 +828 95 training.batch_size 0.0 +828 96 model.embedding_dim 2.0 +828 96 model.scoring_fct_norm 1.0 +828 96 optimizer.lr 0.03752146754073472 +828 96 negative_sampler.num_negs_per_pos 80.0 +828 96 training.batch_size 1.0 +828 97 model.embedding_dim 1.0 +828 97 model.scoring_fct_norm 1.0 +828 97 optimizer.lr 0.002006004721946709 +828 97 negative_sampler.num_negs_per_pos 29.0 +828 97 training.batch_size 0.0 +828 98 model.embedding_dim 2.0 +828 98 model.scoring_fct_norm 2.0 +828 98 optimizer.lr 0.051342864405454436 +828 98 negative_sampler.num_negs_per_pos 72.0 +828 98 training.batch_size 2.0 +828 99 model.embedding_dim 2.0 +828 99 model.scoring_fct_norm 1.0 +828 99 optimizer.lr 0.01705769024771915 +828 99 negative_sampler.num_negs_per_pos 19.0 +828 99 training.batch_size 0.0 +828 100 model.embedding_dim 1.0 +828 100 model.scoring_fct_norm 1.0 +828 100 optimizer.lr 0.003877285133017557 +828 100 negative_sampler.num_negs_per_pos 58.0 +828 100 training.batch_size 2.0 +828 1 dataset """kinships""" +828 1 model """transe""" +828 1 loss """softplus""" +828 1 regularizer """no""" +828 1 optimizer """adam""" +828 1 training_loop """owa""" +828 1 negative_sampler """basic""" +828 1 evaluator """rankbased""" +828 2 dataset """kinships""" +828 2 model """transe""" +828 2 loss """softplus""" +828 2 regularizer """no""" +828 2 optimizer """adam""" +828 2 training_loop """owa""" +828 2 negative_sampler """basic""" +828 2 evaluator """rankbased""" +828 3 dataset """kinships""" +828 3 model """transe""" +828 3 loss """softplus""" +828 3 regularizer """no""" +828 3 optimizer """adam""" +828 3 training_loop """owa""" +828 3 negative_sampler """basic""" +828 3 evaluator """rankbased""" +828 4 dataset """kinships""" +828 4 model """transe""" +828 4 loss """softplus""" +828 4 regularizer """no""" +828 4 optimizer """adam""" +828 4 training_loop """owa""" +828 4 negative_sampler """basic""" +828 4 evaluator """rankbased""" +828 5 dataset """kinships""" +828 5 model """transe""" +828 5 loss """softplus""" +828 5 regularizer """no""" +828 5 optimizer """adam""" +828 5 training_loop """owa""" +828 5 negative_sampler """basic""" +828 5 evaluator """rankbased""" +828 6 dataset """kinships""" +828 6 model """transe""" +828 6 loss """softplus""" +828 6 regularizer """no""" +828 6 optimizer """adam""" +828 6 training_loop """owa""" +828 6 negative_sampler """basic""" +828 6 evaluator """rankbased""" +828 7 dataset """kinships""" +828 7 model """transe""" +828 7 loss """softplus""" +828 7 regularizer """no""" +828 7 optimizer """adam""" +828 7 training_loop """owa""" +828 7 negative_sampler """basic""" +828 7 evaluator """rankbased""" +828 8 dataset """kinships""" +828 8 model """transe""" +828 8 loss """softplus""" +828 8 regularizer """no""" +828 8 optimizer """adam""" +828 8 training_loop """owa""" +828 8 negative_sampler """basic""" +828 8 evaluator """rankbased""" +828 9 dataset """kinships""" +828 9 model """transe""" +828 9 loss """softplus""" +828 9 regularizer """no""" +828 9 optimizer """adam""" +828 9 training_loop """owa""" +828 9 negative_sampler """basic""" +828 9 evaluator """rankbased""" +828 10 dataset """kinships""" +828 10 model """transe""" +828 10 loss """softplus""" +828 10 regularizer """no""" +828 10 optimizer """adam""" +828 10 training_loop """owa""" +828 10 negative_sampler """basic""" +828 10 evaluator """rankbased""" +828 11 dataset """kinships""" +828 11 model """transe""" +828 11 loss """softplus""" +828 11 regularizer """no""" +828 11 optimizer """adam""" +828 11 training_loop """owa""" +828 11 negative_sampler """basic""" +828 11 evaluator """rankbased""" +828 12 dataset """kinships""" +828 12 model """transe""" +828 12 loss """softplus""" +828 12 regularizer """no""" +828 12 optimizer """adam""" +828 12 training_loop """owa""" +828 12 negative_sampler """basic""" +828 12 evaluator """rankbased""" +828 13 dataset """kinships""" +828 13 model """transe""" +828 13 loss """softplus""" +828 13 regularizer """no""" +828 13 optimizer """adam""" +828 13 training_loop """owa""" +828 13 negative_sampler """basic""" +828 13 evaluator """rankbased""" +828 14 dataset """kinships""" +828 14 model """transe""" +828 14 loss """softplus""" +828 14 regularizer """no""" +828 14 optimizer """adam""" +828 14 training_loop """owa""" +828 14 negative_sampler """basic""" +828 14 evaluator """rankbased""" +828 15 dataset """kinships""" +828 15 model """transe""" +828 15 loss """softplus""" +828 15 regularizer """no""" +828 15 optimizer """adam""" +828 15 training_loop """owa""" +828 15 negative_sampler """basic""" +828 15 evaluator """rankbased""" +828 16 dataset """kinships""" +828 16 model """transe""" +828 16 loss """softplus""" +828 16 regularizer """no""" +828 16 optimizer """adam""" +828 16 training_loop """owa""" +828 16 negative_sampler """basic""" +828 16 evaluator """rankbased""" +828 17 dataset """kinships""" +828 17 model """transe""" +828 17 loss """softplus""" +828 17 regularizer """no""" +828 17 optimizer """adam""" +828 17 training_loop """owa""" +828 17 negative_sampler """basic""" +828 17 evaluator """rankbased""" +828 18 dataset """kinships""" +828 18 model """transe""" +828 18 loss """softplus""" +828 18 regularizer """no""" +828 18 optimizer """adam""" +828 18 training_loop """owa""" +828 18 negative_sampler """basic""" +828 18 evaluator """rankbased""" +828 19 dataset """kinships""" +828 19 model """transe""" +828 19 loss """softplus""" +828 19 regularizer """no""" +828 19 optimizer """adam""" +828 19 training_loop """owa""" +828 19 negative_sampler """basic""" +828 19 evaluator """rankbased""" +828 20 dataset """kinships""" +828 20 model """transe""" +828 20 loss """softplus""" +828 20 regularizer """no""" +828 20 optimizer """adam""" +828 20 training_loop """owa""" +828 20 negative_sampler """basic""" +828 20 evaluator """rankbased""" +828 21 dataset """kinships""" +828 21 model """transe""" +828 21 loss """softplus""" +828 21 regularizer """no""" +828 21 optimizer """adam""" +828 21 training_loop """owa""" +828 21 negative_sampler """basic""" +828 21 evaluator """rankbased""" +828 22 dataset """kinships""" +828 22 model """transe""" +828 22 loss """softplus""" +828 22 regularizer """no""" +828 22 optimizer """adam""" +828 22 training_loop """owa""" +828 22 negative_sampler """basic""" +828 22 evaluator """rankbased""" +828 23 dataset """kinships""" +828 23 model """transe""" +828 23 loss """softplus""" +828 23 regularizer """no""" +828 23 optimizer """adam""" +828 23 training_loop """owa""" +828 23 negative_sampler """basic""" +828 23 evaluator """rankbased""" +828 24 dataset """kinships""" +828 24 model """transe""" +828 24 loss """softplus""" +828 24 regularizer """no""" +828 24 optimizer """adam""" +828 24 training_loop """owa""" +828 24 negative_sampler """basic""" +828 24 evaluator """rankbased""" +828 25 dataset """kinships""" +828 25 model """transe""" +828 25 loss """softplus""" +828 25 regularizer """no""" +828 25 optimizer """adam""" +828 25 training_loop """owa""" +828 25 negative_sampler """basic""" +828 25 evaluator """rankbased""" +828 26 dataset """kinships""" +828 26 model """transe""" +828 26 loss """softplus""" +828 26 regularizer """no""" +828 26 optimizer """adam""" +828 26 training_loop """owa""" +828 26 negative_sampler """basic""" +828 26 evaluator """rankbased""" +828 27 dataset """kinships""" +828 27 model """transe""" +828 27 loss """softplus""" +828 27 regularizer """no""" +828 27 optimizer """adam""" +828 27 training_loop """owa""" +828 27 negative_sampler """basic""" +828 27 evaluator """rankbased""" +828 28 dataset """kinships""" +828 28 model """transe""" +828 28 loss """softplus""" +828 28 regularizer """no""" +828 28 optimizer """adam""" +828 28 training_loop """owa""" +828 28 negative_sampler """basic""" +828 28 evaluator """rankbased""" +828 29 dataset """kinships""" +828 29 model """transe""" +828 29 loss """softplus""" +828 29 regularizer """no""" +828 29 optimizer """adam""" +828 29 training_loop """owa""" +828 29 negative_sampler """basic""" +828 29 evaluator """rankbased""" +828 30 dataset """kinships""" +828 30 model """transe""" +828 30 loss """softplus""" +828 30 regularizer """no""" +828 30 optimizer """adam""" +828 30 training_loop """owa""" +828 30 negative_sampler """basic""" +828 30 evaluator """rankbased""" +828 31 dataset """kinships""" +828 31 model """transe""" +828 31 loss """softplus""" +828 31 regularizer """no""" +828 31 optimizer """adam""" +828 31 training_loop """owa""" +828 31 negative_sampler """basic""" +828 31 evaluator """rankbased""" +828 32 dataset """kinships""" +828 32 model """transe""" +828 32 loss """softplus""" +828 32 regularizer """no""" +828 32 optimizer """adam""" +828 32 training_loop """owa""" +828 32 negative_sampler """basic""" +828 32 evaluator """rankbased""" +828 33 dataset """kinships""" +828 33 model """transe""" +828 33 loss """softplus""" +828 33 regularizer """no""" +828 33 optimizer """adam""" +828 33 training_loop """owa""" +828 33 negative_sampler """basic""" +828 33 evaluator """rankbased""" +828 34 dataset """kinships""" +828 34 model """transe""" +828 34 loss """softplus""" +828 34 regularizer """no""" +828 34 optimizer """adam""" +828 34 training_loop """owa""" +828 34 negative_sampler """basic""" +828 34 evaluator """rankbased""" +828 35 dataset """kinships""" +828 35 model """transe""" +828 35 loss """softplus""" +828 35 regularizer """no""" +828 35 optimizer """adam""" +828 35 training_loop """owa""" +828 35 negative_sampler """basic""" +828 35 evaluator """rankbased""" +828 36 dataset """kinships""" +828 36 model """transe""" +828 36 loss """softplus""" +828 36 regularizer """no""" +828 36 optimizer """adam""" +828 36 training_loop """owa""" +828 36 negative_sampler """basic""" +828 36 evaluator """rankbased""" +828 37 dataset """kinships""" +828 37 model """transe""" +828 37 loss """softplus""" +828 37 regularizer """no""" +828 37 optimizer """adam""" +828 37 training_loop """owa""" +828 37 negative_sampler """basic""" +828 37 evaluator """rankbased""" +828 38 dataset """kinships""" +828 38 model """transe""" +828 38 loss """softplus""" +828 38 regularizer """no""" +828 38 optimizer """adam""" +828 38 training_loop """owa""" +828 38 negative_sampler """basic""" +828 38 evaluator """rankbased""" +828 39 dataset """kinships""" +828 39 model """transe""" +828 39 loss """softplus""" +828 39 regularizer """no""" +828 39 optimizer """adam""" +828 39 training_loop """owa""" +828 39 negative_sampler """basic""" +828 39 evaluator """rankbased""" +828 40 dataset """kinships""" +828 40 model """transe""" +828 40 loss """softplus""" +828 40 regularizer """no""" +828 40 optimizer """adam""" +828 40 training_loop """owa""" +828 40 negative_sampler """basic""" +828 40 evaluator """rankbased""" +828 41 dataset """kinships""" +828 41 model """transe""" +828 41 loss """softplus""" +828 41 regularizer """no""" +828 41 optimizer """adam""" +828 41 training_loop """owa""" +828 41 negative_sampler """basic""" +828 41 evaluator """rankbased""" +828 42 dataset """kinships""" +828 42 model """transe""" +828 42 loss """softplus""" +828 42 regularizer """no""" +828 42 optimizer """adam""" +828 42 training_loop """owa""" +828 42 negative_sampler """basic""" +828 42 evaluator """rankbased""" +828 43 dataset """kinships""" +828 43 model """transe""" +828 43 loss """softplus""" +828 43 regularizer """no""" +828 43 optimizer """adam""" +828 43 training_loop """owa""" +828 43 negative_sampler """basic""" +828 43 evaluator """rankbased""" +828 44 dataset """kinships""" +828 44 model """transe""" +828 44 loss """softplus""" +828 44 regularizer """no""" +828 44 optimizer """adam""" +828 44 training_loop """owa""" +828 44 negative_sampler """basic""" +828 44 evaluator """rankbased""" +828 45 dataset """kinships""" +828 45 model """transe""" +828 45 loss """softplus""" +828 45 regularizer """no""" +828 45 optimizer """adam""" +828 45 training_loop """owa""" +828 45 negative_sampler """basic""" +828 45 evaluator """rankbased""" +828 46 dataset """kinships""" +828 46 model """transe""" +828 46 loss """softplus""" +828 46 regularizer """no""" +828 46 optimizer """adam""" +828 46 training_loop """owa""" +828 46 negative_sampler """basic""" +828 46 evaluator """rankbased""" +828 47 dataset """kinships""" +828 47 model """transe""" +828 47 loss """softplus""" +828 47 regularizer """no""" +828 47 optimizer """adam""" +828 47 training_loop """owa""" +828 47 negative_sampler """basic""" +828 47 evaluator """rankbased""" +828 48 dataset """kinships""" +828 48 model """transe""" +828 48 loss """softplus""" +828 48 regularizer """no""" +828 48 optimizer """adam""" +828 48 training_loop """owa""" +828 48 negative_sampler """basic""" +828 48 evaluator """rankbased""" +828 49 dataset """kinships""" +828 49 model """transe""" +828 49 loss """softplus""" +828 49 regularizer """no""" +828 49 optimizer """adam""" +828 49 training_loop """owa""" +828 49 negative_sampler """basic""" +828 49 evaluator """rankbased""" +828 50 dataset """kinships""" +828 50 model """transe""" +828 50 loss """softplus""" +828 50 regularizer """no""" +828 50 optimizer """adam""" +828 50 training_loop """owa""" +828 50 negative_sampler """basic""" +828 50 evaluator """rankbased""" +828 51 dataset """kinships""" +828 51 model """transe""" +828 51 loss """softplus""" +828 51 regularizer """no""" +828 51 optimizer """adam""" +828 51 training_loop """owa""" +828 51 negative_sampler """basic""" +828 51 evaluator """rankbased""" +828 52 dataset """kinships""" +828 52 model """transe""" +828 52 loss """softplus""" +828 52 regularizer """no""" +828 52 optimizer """adam""" +828 52 training_loop """owa""" +828 52 negative_sampler """basic""" +828 52 evaluator """rankbased""" +828 53 dataset """kinships""" +828 53 model """transe""" +828 53 loss """softplus""" +828 53 regularizer """no""" +828 53 optimizer """adam""" +828 53 training_loop """owa""" +828 53 negative_sampler """basic""" +828 53 evaluator """rankbased""" +828 54 dataset """kinships""" +828 54 model """transe""" +828 54 loss """softplus""" +828 54 regularizer """no""" +828 54 optimizer """adam""" +828 54 training_loop """owa""" +828 54 negative_sampler """basic""" +828 54 evaluator """rankbased""" +828 55 dataset """kinships""" +828 55 model """transe""" +828 55 loss """softplus""" +828 55 regularizer """no""" +828 55 optimizer """adam""" +828 55 training_loop """owa""" +828 55 negative_sampler """basic""" +828 55 evaluator """rankbased""" +828 56 dataset """kinships""" +828 56 model """transe""" +828 56 loss """softplus""" +828 56 regularizer """no""" +828 56 optimizer """adam""" +828 56 training_loop """owa""" +828 56 negative_sampler """basic""" +828 56 evaluator """rankbased""" +828 57 dataset """kinships""" +828 57 model """transe""" +828 57 loss """softplus""" +828 57 regularizer """no""" +828 57 optimizer """adam""" +828 57 training_loop """owa""" +828 57 negative_sampler """basic""" +828 57 evaluator """rankbased""" +828 58 dataset """kinships""" +828 58 model """transe""" +828 58 loss """softplus""" +828 58 regularizer """no""" +828 58 optimizer """adam""" +828 58 training_loop """owa""" +828 58 negative_sampler """basic""" +828 58 evaluator """rankbased""" +828 59 dataset """kinships""" +828 59 model """transe""" +828 59 loss """softplus""" +828 59 regularizer """no""" +828 59 optimizer """adam""" +828 59 training_loop """owa""" +828 59 negative_sampler """basic""" +828 59 evaluator """rankbased""" +828 60 dataset """kinships""" +828 60 model """transe""" +828 60 loss """softplus""" +828 60 regularizer """no""" +828 60 optimizer """adam""" +828 60 training_loop """owa""" +828 60 negative_sampler """basic""" +828 60 evaluator """rankbased""" +828 61 dataset """kinships""" +828 61 model """transe""" +828 61 loss """softplus""" +828 61 regularizer """no""" +828 61 optimizer """adam""" +828 61 training_loop """owa""" +828 61 negative_sampler """basic""" +828 61 evaluator """rankbased""" +828 62 dataset """kinships""" +828 62 model """transe""" +828 62 loss """softplus""" +828 62 regularizer """no""" +828 62 optimizer """adam""" +828 62 training_loop """owa""" +828 62 negative_sampler """basic""" +828 62 evaluator """rankbased""" +828 63 dataset """kinships""" +828 63 model """transe""" +828 63 loss """softplus""" +828 63 regularizer """no""" +828 63 optimizer """adam""" +828 63 training_loop """owa""" +828 63 negative_sampler """basic""" +828 63 evaluator """rankbased""" +828 64 dataset """kinships""" +828 64 model """transe""" +828 64 loss """softplus""" +828 64 regularizer """no""" +828 64 optimizer """adam""" +828 64 training_loop """owa""" +828 64 negative_sampler """basic""" +828 64 evaluator """rankbased""" +828 65 dataset """kinships""" +828 65 model """transe""" +828 65 loss """softplus""" +828 65 regularizer """no""" +828 65 optimizer """adam""" +828 65 training_loop """owa""" +828 65 negative_sampler """basic""" +828 65 evaluator """rankbased""" +828 66 dataset """kinships""" +828 66 model """transe""" +828 66 loss """softplus""" +828 66 regularizer """no""" +828 66 optimizer """adam""" +828 66 training_loop """owa""" +828 66 negative_sampler """basic""" +828 66 evaluator """rankbased""" +828 67 dataset """kinships""" +828 67 model """transe""" +828 67 loss """softplus""" +828 67 regularizer """no""" +828 67 optimizer """adam""" +828 67 training_loop """owa""" +828 67 negative_sampler """basic""" +828 67 evaluator """rankbased""" +828 68 dataset """kinships""" +828 68 model """transe""" +828 68 loss """softplus""" +828 68 regularizer """no""" +828 68 optimizer """adam""" +828 68 training_loop """owa""" +828 68 negative_sampler """basic""" +828 68 evaluator """rankbased""" +828 69 dataset """kinships""" +828 69 model """transe""" +828 69 loss """softplus""" +828 69 regularizer """no""" +828 69 optimizer """adam""" +828 69 training_loop """owa""" +828 69 negative_sampler """basic""" +828 69 evaluator """rankbased""" +828 70 dataset """kinships""" +828 70 model """transe""" +828 70 loss """softplus""" +828 70 regularizer """no""" +828 70 optimizer """adam""" +828 70 training_loop """owa""" +828 70 negative_sampler """basic""" +828 70 evaluator """rankbased""" +828 71 dataset """kinships""" +828 71 model """transe""" +828 71 loss """softplus""" +828 71 regularizer """no""" +828 71 optimizer """adam""" +828 71 training_loop """owa""" +828 71 negative_sampler """basic""" +828 71 evaluator """rankbased""" +828 72 dataset """kinships""" +828 72 model """transe""" +828 72 loss """softplus""" +828 72 regularizer """no""" +828 72 optimizer """adam""" +828 72 training_loop """owa""" +828 72 negative_sampler """basic""" +828 72 evaluator """rankbased""" +828 73 dataset """kinships""" +828 73 model """transe""" +828 73 loss """softplus""" +828 73 regularizer """no""" +828 73 optimizer """adam""" +828 73 training_loop """owa""" +828 73 negative_sampler """basic""" +828 73 evaluator """rankbased""" +828 74 dataset """kinships""" +828 74 model """transe""" +828 74 loss """softplus""" +828 74 regularizer """no""" +828 74 optimizer """adam""" +828 74 training_loop """owa""" +828 74 negative_sampler """basic""" +828 74 evaluator """rankbased""" +828 75 dataset """kinships""" +828 75 model """transe""" +828 75 loss """softplus""" +828 75 regularizer """no""" +828 75 optimizer """adam""" +828 75 training_loop """owa""" +828 75 negative_sampler """basic""" +828 75 evaluator """rankbased""" +828 76 dataset """kinships""" +828 76 model """transe""" +828 76 loss """softplus""" +828 76 regularizer """no""" +828 76 optimizer """adam""" +828 76 training_loop """owa""" +828 76 negative_sampler """basic""" +828 76 evaluator """rankbased""" +828 77 dataset """kinships""" +828 77 model """transe""" +828 77 loss """softplus""" +828 77 regularizer """no""" +828 77 optimizer """adam""" +828 77 training_loop """owa""" +828 77 negative_sampler """basic""" +828 77 evaluator """rankbased""" +828 78 dataset """kinships""" +828 78 model """transe""" +828 78 loss """softplus""" +828 78 regularizer """no""" +828 78 optimizer """adam""" +828 78 training_loop """owa""" +828 78 negative_sampler """basic""" +828 78 evaluator """rankbased""" +828 79 dataset """kinships""" +828 79 model """transe""" +828 79 loss """softplus""" +828 79 regularizer """no""" +828 79 optimizer """adam""" +828 79 training_loop """owa""" +828 79 negative_sampler """basic""" +828 79 evaluator """rankbased""" +828 80 dataset """kinships""" +828 80 model """transe""" +828 80 loss """softplus""" +828 80 regularizer """no""" +828 80 optimizer """adam""" +828 80 training_loop """owa""" +828 80 negative_sampler """basic""" +828 80 evaluator """rankbased""" +828 81 dataset """kinships""" +828 81 model """transe""" +828 81 loss """softplus""" +828 81 regularizer """no""" +828 81 optimizer """adam""" +828 81 training_loop """owa""" +828 81 negative_sampler """basic""" +828 81 evaluator """rankbased""" +828 82 dataset """kinships""" +828 82 model """transe""" +828 82 loss """softplus""" +828 82 regularizer """no""" +828 82 optimizer """adam""" +828 82 training_loop """owa""" +828 82 negative_sampler """basic""" +828 82 evaluator """rankbased""" +828 83 dataset """kinships""" +828 83 model """transe""" +828 83 loss """softplus""" +828 83 regularizer """no""" +828 83 optimizer """adam""" +828 83 training_loop """owa""" +828 83 negative_sampler """basic""" +828 83 evaluator """rankbased""" +828 84 dataset """kinships""" +828 84 model """transe""" +828 84 loss """softplus""" +828 84 regularizer """no""" +828 84 optimizer """adam""" +828 84 training_loop """owa""" +828 84 negative_sampler """basic""" +828 84 evaluator """rankbased""" +828 85 dataset """kinships""" +828 85 model """transe""" +828 85 loss """softplus""" +828 85 regularizer """no""" +828 85 optimizer """adam""" +828 85 training_loop """owa""" +828 85 negative_sampler """basic""" +828 85 evaluator """rankbased""" +828 86 dataset """kinships""" +828 86 model """transe""" +828 86 loss """softplus""" +828 86 regularizer """no""" +828 86 optimizer """adam""" +828 86 training_loop """owa""" +828 86 negative_sampler """basic""" +828 86 evaluator """rankbased""" +828 87 dataset """kinships""" +828 87 model """transe""" +828 87 loss """softplus""" +828 87 regularizer """no""" +828 87 optimizer """adam""" +828 87 training_loop """owa""" +828 87 negative_sampler """basic""" +828 87 evaluator """rankbased""" +828 88 dataset """kinships""" +828 88 model """transe""" +828 88 loss """softplus""" +828 88 regularizer """no""" +828 88 optimizer """adam""" +828 88 training_loop """owa""" +828 88 negative_sampler """basic""" +828 88 evaluator """rankbased""" +828 89 dataset """kinships""" +828 89 model """transe""" +828 89 loss """softplus""" +828 89 regularizer """no""" +828 89 optimizer """adam""" +828 89 training_loop """owa""" +828 89 negative_sampler """basic""" +828 89 evaluator """rankbased""" +828 90 dataset """kinships""" +828 90 model """transe""" +828 90 loss """softplus""" +828 90 regularizer """no""" +828 90 optimizer """adam""" +828 90 training_loop """owa""" +828 90 negative_sampler """basic""" +828 90 evaluator """rankbased""" +828 91 dataset """kinships""" +828 91 model """transe""" +828 91 loss """softplus""" +828 91 regularizer """no""" +828 91 optimizer """adam""" +828 91 training_loop """owa""" +828 91 negative_sampler """basic""" +828 91 evaluator """rankbased""" +828 92 dataset """kinships""" +828 92 model """transe""" +828 92 loss """softplus""" +828 92 regularizer """no""" +828 92 optimizer """adam""" +828 92 training_loop """owa""" +828 92 negative_sampler """basic""" +828 92 evaluator """rankbased""" +828 93 dataset """kinships""" +828 93 model """transe""" +828 93 loss """softplus""" +828 93 regularizer """no""" +828 93 optimizer """adam""" +828 93 training_loop """owa""" +828 93 negative_sampler """basic""" +828 93 evaluator """rankbased""" +828 94 dataset """kinships""" +828 94 model """transe""" +828 94 loss """softplus""" +828 94 regularizer """no""" +828 94 optimizer """adam""" +828 94 training_loop """owa""" +828 94 negative_sampler """basic""" +828 94 evaluator """rankbased""" +828 95 dataset """kinships""" +828 95 model """transe""" +828 95 loss """softplus""" +828 95 regularizer """no""" +828 95 optimizer """adam""" +828 95 training_loop """owa""" +828 95 negative_sampler """basic""" +828 95 evaluator """rankbased""" +828 96 dataset """kinships""" +828 96 model """transe""" +828 96 loss """softplus""" +828 96 regularizer """no""" +828 96 optimizer """adam""" +828 96 training_loop """owa""" +828 96 negative_sampler """basic""" +828 96 evaluator """rankbased""" +828 97 dataset """kinships""" +828 97 model """transe""" +828 97 loss """softplus""" +828 97 regularizer """no""" +828 97 optimizer """adam""" +828 97 training_loop """owa""" +828 97 negative_sampler """basic""" +828 97 evaluator """rankbased""" +828 98 dataset """kinships""" +828 98 model """transe""" +828 98 loss """softplus""" +828 98 regularizer """no""" +828 98 optimizer """adam""" +828 98 training_loop """owa""" +828 98 negative_sampler """basic""" +828 98 evaluator """rankbased""" +828 99 dataset """kinships""" +828 99 model """transe""" +828 99 loss """softplus""" +828 99 regularizer """no""" +828 99 optimizer """adam""" +828 99 training_loop """owa""" +828 99 negative_sampler """basic""" +828 99 evaluator """rankbased""" +828 100 dataset """kinships""" +828 100 model """transe""" +828 100 loss """softplus""" +828 100 regularizer """no""" +828 100 optimizer """adam""" +828 100 training_loop """owa""" +828 100 negative_sampler """basic""" +828 100 evaluator """rankbased""" +829 1 model.embedding_dim 1.0 +829 1 model.scoring_fct_norm 1.0 +829 1 loss.margin 4.165302021947178 +829 1 optimizer.lr 0.01427099017127122 +829 1 negative_sampler.num_negs_per_pos 53.0 +829 1 training.batch_size 1.0 +829 2 model.embedding_dim 0.0 +829 2 model.scoring_fct_norm 2.0 +829 2 loss.margin 0.6667422464847259 +829 2 optimizer.lr 0.007236034256784992 +829 2 negative_sampler.num_negs_per_pos 87.0 +829 2 training.batch_size 1.0 +829 3 model.embedding_dim 0.0 +829 3 model.scoring_fct_norm 1.0 +829 3 loss.margin 6.135974010848902 +829 3 optimizer.lr 0.003359526641790458 +829 3 negative_sampler.num_negs_per_pos 66.0 +829 3 training.batch_size 0.0 +829 4 model.embedding_dim 0.0 +829 4 model.scoring_fct_norm 2.0 +829 4 loss.margin 6.787655949933009 +829 4 optimizer.lr 0.010165742090528928 +829 4 negative_sampler.num_negs_per_pos 93.0 +829 4 training.batch_size 2.0 +829 5 model.embedding_dim 0.0 +829 5 model.scoring_fct_norm 2.0 +829 5 loss.margin 9.521557651693449 +829 5 optimizer.lr 0.010965501134233254 +829 5 negative_sampler.num_negs_per_pos 39.0 +829 5 training.batch_size 0.0 +829 6 model.embedding_dim 2.0 +829 6 model.scoring_fct_norm 2.0 +829 6 loss.margin 2.4728121971928303 +829 6 optimizer.lr 0.005114381481075441 +829 6 negative_sampler.num_negs_per_pos 83.0 +829 6 training.batch_size 1.0 +829 7 model.embedding_dim 1.0 +829 7 model.scoring_fct_norm 2.0 +829 7 loss.margin 1.020868744032029 +829 7 optimizer.lr 0.04322655644782575 +829 7 negative_sampler.num_negs_per_pos 7.0 +829 7 training.batch_size 2.0 +829 8 model.embedding_dim 2.0 +829 8 model.scoring_fct_norm 1.0 +829 8 loss.margin 3.723599173061029 +829 8 optimizer.lr 0.005843111917873205 +829 8 negative_sampler.num_negs_per_pos 43.0 +829 8 training.batch_size 1.0 +829 9 model.embedding_dim 0.0 +829 9 model.scoring_fct_norm 1.0 +829 9 loss.margin 5.115955251415495 +829 9 optimizer.lr 0.05303904675756396 +829 9 negative_sampler.num_negs_per_pos 31.0 +829 9 training.batch_size 1.0 +829 10 model.embedding_dim 0.0 +829 10 model.scoring_fct_norm 1.0 +829 10 loss.margin 7.856036148575973 +829 10 optimizer.lr 0.006094407626468287 +829 10 negative_sampler.num_negs_per_pos 24.0 +829 10 training.batch_size 1.0 +829 11 model.embedding_dim 2.0 +829 11 model.scoring_fct_norm 2.0 +829 11 loss.margin 1.0965705620717356 +829 11 optimizer.lr 0.005051208274741596 +829 11 negative_sampler.num_negs_per_pos 20.0 +829 11 training.batch_size 1.0 +829 12 model.embedding_dim 0.0 +829 12 model.scoring_fct_norm 2.0 +829 12 loss.margin 4.809585147713043 +829 12 optimizer.lr 0.011657376938953357 +829 12 negative_sampler.num_negs_per_pos 1.0 +829 12 training.batch_size 0.0 +829 13 model.embedding_dim 0.0 +829 13 model.scoring_fct_norm 2.0 +829 13 loss.margin 3.2963123328967674 +829 13 optimizer.lr 0.013678463783948706 +829 13 negative_sampler.num_negs_per_pos 46.0 +829 13 training.batch_size 1.0 +829 14 model.embedding_dim 1.0 +829 14 model.scoring_fct_norm 2.0 +829 14 loss.margin 7.002410022534476 +829 14 optimizer.lr 0.013833315979951352 +829 14 negative_sampler.num_negs_per_pos 70.0 +829 14 training.batch_size 0.0 +829 15 model.embedding_dim 0.0 +829 15 model.scoring_fct_norm 1.0 +829 15 loss.margin 7.821507306872557 +829 15 optimizer.lr 0.005311645083267994 +829 15 negative_sampler.num_negs_per_pos 98.0 +829 15 training.batch_size 1.0 +829 16 model.embedding_dim 0.0 +829 16 model.scoring_fct_norm 1.0 +829 16 loss.margin 6.515557878843151 +829 16 optimizer.lr 0.06546986048792049 +829 16 negative_sampler.num_negs_per_pos 12.0 +829 16 training.batch_size 1.0 +829 17 model.embedding_dim 0.0 +829 17 model.scoring_fct_norm 1.0 +829 17 loss.margin 7.577847521799255 +829 17 optimizer.lr 0.0017251275746192499 +829 17 negative_sampler.num_negs_per_pos 29.0 +829 17 training.batch_size 0.0 +829 18 model.embedding_dim 0.0 +829 18 model.scoring_fct_norm 2.0 +829 18 loss.margin 1.733505761871838 +829 18 optimizer.lr 0.09658359024995541 +829 18 negative_sampler.num_negs_per_pos 13.0 +829 18 training.batch_size 1.0 +829 19 model.embedding_dim 0.0 +829 19 model.scoring_fct_norm 2.0 +829 19 loss.margin 5.596718623721689 +829 19 optimizer.lr 0.008977865891743077 +829 19 negative_sampler.num_negs_per_pos 72.0 +829 19 training.batch_size 2.0 +829 20 model.embedding_dim 0.0 +829 20 model.scoring_fct_norm 2.0 +829 20 loss.margin 3.095300810444408 +829 20 optimizer.lr 0.0018601759546865056 +829 20 negative_sampler.num_negs_per_pos 20.0 +829 20 training.batch_size 0.0 +829 21 model.embedding_dim 0.0 +829 21 model.scoring_fct_norm 1.0 +829 21 loss.margin 2.1331424910448122 +829 21 optimizer.lr 0.0433297634943254 +829 21 negative_sampler.num_negs_per_pos 68.0 +829 21 training.batch_size 2.0 +829 22 model.embedding_dim 1.0 +829 22 model.scoring_fct_norm 1.0 +829 22 loss.margin 6.674609414179716 +829 22 optimizer.lr 0.006676093452647507 +829 22 negative_sampler.num_negs_per_pos 5.0 +829 22 training.batch_size 2.0 +829 23 model.embedding_dim 2.0 +829 23 model.scoring_fct_norm 1.0 +829 23 loss.margin 8.151954776161976 +829 23 optimizer.lr 0.0010168317919071516 +829 23 negative_sampler.num_negs_per_pos 68.0 +829 23 training.batch_size 0.0 +829 24 model.embedding_dim 0.0 +829 24 model.scoring_fct_norm 1.0 +829 24 loss.margin 0.5765754420708333 +829 24 optimizer.lr 0.00925022528888032 +829 24 negative_sampler.num_negs_per_pos 28.0 +829 24 training.batch_size 2.0 +829 25 model.embedding_dim 2.0 +829 25 model.scoring_fct_norm 1.0 +829 25 loss.margin 3.1678491097175168 +829 25 optimizer.lr 0.029332062330307038 +829 25 negative_sampler.num_negs_per_pos 85.0 +829 25 training.batch_size 0.0 +829 26 model.embedding_dim 0.0 +829 26 model.scoring_fct_norm 1.0 +829 26 loss.margin 6.840840403100006 +829 26 optimizer.lr 0.0018631521749530203 +829 26 negative_sampler.num_negs_per_pos 33.0 +829 26 training.batch_size 0.0 +829 27 model.embedding_dim 1.0 +829 27 model.scoring_fct_norm 2.0 +829 27 loss.margin 8.21844889360299 +829 27 optimizer.lr 0.04876833412493203 +829 27 negative_sampler.num_negs_per_pos 4.0 +829 27 training.batch_size 1.0 +829 28 model.embedding_dim 2.0 +829 28 model.scoring_fct_norm 1.0 +829 28 loss.margin 0.7150051561753179 +829 28 optimizer.lr 0.010575788411668879 +829 28 negative_sampler.num_negs_per_pos 20.0 +829 28 training.batch_size 2.0 +829 29 model.embedding_dim 0.0 +829 29 model.scoring_fct_norm 2.0 +829 29 loss.margin 1.3409953886510595 +829 29 optimizer.lr 0.014735556196651322 +829 29 negative_sampler.num_negs_per_pos 97.0 +829 29 training.batch_size 0.0 +829 30 model.embedding_dim 1.0 +829 30 model.scoring_fct_norm 1.0 +829 30 loss.margin 5.683246441560367 +829 30 optimizer.lr 0.001207456087843289 +829 30 negative_sampler.num_negs_per_pos 67.0 +829 30 training.batch_size 1.0 +829 31 model.embedding_dim 2.0 +829 31 model.scoring_fct_norm 2.0 +829 31 loss.margin 3.702247104279714 +829 31 optimizer.lr 0.013864473094139629 +829 31 negative_sampler.num_negs_per_pos 82.0 +829 31 training.batch_size 2.0 +829 32 model.embedding_dim 1.0 +829 32 model.scoring_fct_norm 2.0 +829 32 loss.margin 4.3044254322897535 +829 32 optimizer.lr 0.08559727292959633 +829 32 negative_sampler.num_negs_per_pos 28.0 +829 32 training.batch_size 1.0 +829 33 model.embedding_dim 0.0 +829 33 model.scoring_fct_norm 1.0 +829 33 loss.margin 5.244900875975824 +829 33 optimizer.lr 0.0045812031237123935 +829 33 negative_sampler.num_negs_per_pos 91.0 +829 33 training.batch_size 2.0 +829 34 model.embedding_dim 2.0 +829 34 model.scoring_fct_norm 1.0 +829 34 loss.margin 2.667631149348476 +829 34 optimizer.lr 0.002141916076229899 +829 34 negative_sampler.num_negs_per_pos 26.0 +829 34 training.batch_size 0.0 +829 35 model.embedding_dim 1.0 +829 35 model.scoring_fct_norm 2.0 +829 35 loss.margin 4.55558314811381 +829 35 optimizer.lr 0.0011906898435492905 +829 35 negative_sampler.num_negs_per_pos 91.0 +829 35 training.batch_size 2.0 +829 36 model.embedding_dim 1.0 +829 36 model.scoring_fct_norm 1.0 +829 36 loss.margin 4.749452351372352 +829 36 optimizer.lr 0.016421585009412893 +829 36 negative_sampler.num_negs_per_pos 66.0 +829 36 training.batch_size 2.0 +829 37 model.embedding_dim 0.0 +829 37 model.scoring_fct_norm 1.0 +829 37 loss.margin 4.1648254572728565 +829 37 optimizer.lr 0.06124623594797807 +829 37 negative_sampler.num_negs_per_pos 43.0 +829 37 training.batch_size 1.0 +829 38 model.embedding_dim 0.0 +829 38 model.scoring_fct_norm 2.0 +829 38 loss.margin 5.28381000228724 +829 38 optimizer.lr 0.0014073130125897813 +829 38 negative_sampler.num_negs_per_pos 2.0 +829 38 training.batch_size 1.0 +829 39 model.embedding_dim 1.0 +829 39 model.scoring_fct_norm 2.0 +829 39 loss.margin 4.5114171962880745 +829 39 optimizer.lr 0.06781179635546745 +829 39 negative_sampler.num_negs_per_pos 20.0 +829 39 training.batch_size 1.0 +829 40 model.embedding_dim 2.0 +829 40 model.scoring_fct_norm 2.0 +829 40 loss.margin 8.705153199442176 +829 40 optimizer.lr 0.02832862107580102 +829 40 negative_sampler.num_negs_per_pos 90.0 +829 40 training.batch_size 0.0 +829 41 model.embedding_dim 1.0 +829 41 model.scoring_fct_norm 1.0 +829 41 loss.margin 8.1208049403958 +829 41 optimizer.lr 0.0081733950050786 +829 41 negative_sampler.num_negs_per_pos 11.0 +829 41 training.batch_size 0.0 +829 42 model.embedding_dim 1.0 +829 42 model.scoring_fct_norm 1.0 +829 42 loss.margin 4.275773038463429 +829 42 optimizer.lr 0.007888649925221772 +829 42 negative_sampler.num_negs_per_pos 99.0 +829 42 training.batch_size 2.0 +829 43 model.embedding_dim 1.0 +829 43 model.scoring_fct_norm 2.0 +829 43 loss.margin 5.183122197319431 +829 43 optimizer.lr 0.0032292396131409925 +829 43 negative_sampler.num_negs_per_pos 52.0 +829 43 training.batch_size 0.0 +829 44 model.embedding_dim 0.0 +829 44 model.scoring_fct_norm 2.0 +829 44 loss.margin 3.4416273395574066 +829 44 optimizer.lr 0.026866524295970855 +829 44 negative_sampler.num_negs_per_pos 46.0 +829 44 training.batch_size 0.0 +829 45 model.embedding_dim 0.0 +829 45 model.scoring_fct_norm 1.0 +829 45 loss.margin 2.2677721917818854 +829 45 optimizer.lr 0.04584022370500082 +829 45 negative_sampler.num_negs_per_pos 85.0 +829 45 training.batch_size 0.0 +829 46 model.embedding_dim 0.0 +829 46 model.scoring_fct_norm 2.0 +829 46 loss.margin 4.732183736074721 +829 46 optimizer.lr 0.006538904267157867 +829 46 negative_sampler.num_negs_per_pos 41.0 +829 46 training.batch_size 0.0 +829 47 model.embedding_dim 1.0 +829 47 model.scoring_fct_norm 1.0 +829 47 loss.margin 5.214719754077172 +829 47 optimizer.lr 0.02232705968043854 +829 47 negative_sampler.num_negs_per_pos 53.0 +829 47 training.batch_size 2.0 +829 48 model.embedding_dim 2.0 +829 48 model.scoring_fct_norm 1.0 +829 48 loss.margin 4.993736557892079 +829 48 optimizer.lr 0.0015029549152042131 +829 48 negative_sampler.num_negs_per_pos 50.0 +829 48 training.batch_size 0.0 +829 49 model.embedding_dim 0.0 +829 49 model.scoring_fct_norm 1.0 +829 49 loss.margin 4.1313753167574845 +829 49 optimizer.lr 0.008572648404113927 +829 49 negative_sampler.num_negs_per_pos 80.0 +829 49 training.batch_size 1.0 +829 50 model.embedding_dim 2.0 +829 50 model.scoring_fct_norm 1.0 +829 50 loss.margin 3.740941026389604 +829 50 optimizer.lr 0.023995394839721185 +829 50 negative_sampler.num_negs_per_pos 3.0 +829 50 training.batch_size 2.0 +829 51 model.embedding_dim 2.0 +829 51 model.scoring_fct_norm 2.0 +829 51 loss.margin 1.8520294857157809 +829 51 optimizer.lr 0.0036454595164115418 +829 51 negative_sampler.num_negs_per_pos 40.0 +829 51 training.batch_size 2.0 +829 52 model.embedding_dim 0.0 +829 52 model.scoring_fct_norm 1.0 +829 52 loss.margin 7.886512286507863 +829 52 optimizer.lr 0.03658660078397943 +829 52 negative_sampler.num_negs_per_pos 78.0 +829 52 training.batch_size 2.0 +829 53 model.embedding_dim 2.0 +829 53 model.scoring_fct_norm 2.0 +829 53 loss.margin 9.82418667600638 +829 53 optimizer.lr 0.0011802409090105209 +829 53 negative_sampler.num_negs_per_pos 67.0 +829 53 training.batch_size 2.0 +829 54 model.embedding_dim 1.0 +829 54 model.scoring_fct_norm 1.0 +829 54 loss.margin 5.16058299729431 +829 54 optimizer.lr 0.019425056384421966 +829 54 negative_sampler.num_negs_per_pos 47.0 +829 54 training.batch_size 2.0 +829 55 model.embedding_dim 2.0 +829 55 model.scoring_fct_norm 2.0 +829 55 loss.margin 2.4959945908402 +829 55 optimizer.lr 0.01955647057317857 +829 55 negative_sampler.num_negs_per_pos 42.0 +829 55 training.batch_size 2.0 +829 56 model.embedding_dim 1.0 +829 56 model.scoring_fct_norm 2.0 +829 56 loss.margin 0.744966194338937 +829 56 optimizer.lr 0.02597242636636648 +829 56 negative_sampler.num_negs_per_pos 65.0 +829 56 training.batch_size 1.0 +829 57 model.embedding_dim 2.0 +829 57 model.scoring_fct_norm 2.0 +829 57 loss.margin 2.604703889385248 +829 57 optimizer.lr 0.011746956288714347 +829 57 negative_sampler.num_negs_per_pos 98.0 +829 57 training.batch_size 1.0 +829 58 model.embedding_dim 0.0 +829 58 model.scoring_fct_norm 2.0 +829 58 loss.margin 5.686587161587788 +829 58 optimizer.lr 0.06231026700274882 +829 58 negative_sampler.num_negs_per_pos 91.0 +829 58 training.batch_size 1.0 +829 59 model.embedding_dim 2.0 +829 59 model.scoring_fct_norm 1.0 +829 59 loss.margin 1.6240102208180343 +829 59 optimizer.lr 0.022450665981964154 +829 59 negative_sampler.num_negs_per_pos 98.0 +829 59 training.batch_size 0.0 +829 60 model.embedding_dim 0.0 +829 60 model.scoring_fct_norm 1.0 +829 60 loss.margin 8.01158915138809 +829 60 optimizer.lr 0.0032671387243347065 +829 60 negative_sampler.num_negs_per_pos 87.0 +829 60 training.batch_size 0.0 +829 61 model.embedding_dim 2.0 +829 61 model.scoring_fct_norm 1.0 +829 61 loss.margin 3.55674663505756 +829 61 optimizer.lr 0.016225376717946143 +829 61 negative_sampler.num_negs_per_pos 17.0 +829 61 training.batch_size 1.0 +829 62 model.embedding_dim 1.0 +829 62 model.scoring_fct_norm 1.0 +829 62 loss.margin 9.090704779283351 +829 62 optimizer.lr 0.06668644599661148 +829 62 negative_sampler.num_negs_per_pos 4.0 +829 62 training.batch_size 0.0 +829 63 model.embedding_dim 0.0 +829 63 model.scoring_fct_norm 1.0 +829 63 loss.margin 6.811156642808686 +829 63 optimizer.lr 0.09171841291508559 +829 63 negative_sampler.num_negs_per_pos 28.0 +829 63 training.batch_size 0.0 +829 64 model.embedding_dim 1.0 +829 64 model.scoring_fct_norm 2.0 +829 64 loss.margin 2.292050602482064 +829 64 optimizer.lr 0.02566944965823218 +829 64 negative_sampler.num_negs_per_pos 23.0 +829 64 training.batch_size 1.0 +829 65 model.embedding_dim 2.0 +829 65 model.scoring_fct_norm 1.0 +829 65 loss.margin 3.301536601749748 +829 65 optimizer.lr 0.0050154956425156735 +829 65 negative_sampler.num_negs_per_pos 10.0 +829 65 training.batch_size 0.0 +829 66 model.embedding_dim 1.0 +829 66 model.scoring_fct_norm 1.0 +829 66 loss.margin 8.034028495450723 +829 66 optimizer.lr 0.008819773741237784 +829 66 negative_sampler.num_negs_per_pos 32.0 +829 66 training.batch_size 2.0 +829 67 model.embedding_dim 2.0 +829 67 model.scoring_fct_norm 2.0 +829 67 loss.margin 3.134908384497968 +829 67 optimizer.lr 0.00109302693406603 +829 67 negative_sampler.num_negs_per_pos 13.0 +829 67 training.batch_size 2.0 +829 68 model.embedding_dim 0.0 +829 68 model.scoring_fct_norm 1.0 +829 68 loss.margin 6.870896202711166 +829 68 optimizer.lr 0.0016248845380257176 +829 68 negative_sampler.num_negs_per_pos 54.0 +829 68 training.batch_size 2.0 +829 69 model.embedding_dim 1.0 +829 69 model.scoring_fct_norm 2.0 +829 69 loss.margin 8.416527421083188 +829 69 optimizer.lr 0.030804922181508838 +829 69 negative_sampler.num_negs_per_pos 74.0 +829 69 training.batch_size 1.0 +829 70 model.embedding_dim 1.0 +829 70 model.scoring_fct_norm 2.0 +829 70 loss.margin 8.848957342127159 +829 70 optimizer.lr 0.05344200409602719 +829 70 negative_sampler.num_negs_per_pos 39.0 +829 70 training.batch_size 0.0 +829 71 model.embedding_dim 0.0 +829 71 model.scoring_fct_norm 1.0 +829 71 loss.margin 5.395633340738636 +829 71 optimizer.lr 0.007542048776046142 +829 71 negative_sampler.num_negs_per_pos 76.0 +829 71 training.batch_size 2.0 +829 72 model.embedding_dim 1.0 +829 72 model.scoring_fct_norm 2.0 +829 72 loss.margin 6.165303433319106 +829 72 optimizer.lr 0.0030572365282354793 +829 72 negative_sampler.num_negs_per_pos 60.0 +829 72 training.batch_size 0.0 +829 73 model.embedding_dim 0.0 +829 73 model.scoring_fct_norm 2.0 +829 73 loss.margin 1.7103131734082337 +829 73 optimizer.lr 0.02824270912184355 +829 73 negative_sampler.num_negs_per_pos 38.0 +829 73 training.batch_size 1.0 +829 74 model.embedding_dim 1.0 +829 74 model.scoring_fct_norm 1.0 +829 74 loss.margin 6.401066069634155 +829 74 optimizer.lr 0.0053723871752929045 +829 74 negative_sampler.num_negs_per_pos 96.0 +829 74 training.batch_size 0.0 +829 75 model.embedding_dim 2.0 +829 75 model.scoring_fct_norm 1.0 +829 75 loss.margin 2.74779041679559 +829 75 optimizer.lr 0.0013042519927277737 +829 75 negative_sampler.num_negs_per_pos 83.0 +829 75 training.batch_size 1.0 +829 76 model.embedding_dim 2.0 +829 76 model.scoring_fct_norm 1.0 +829 76 loss.margin 5.158463345306008 +829 76 optimizer.lr 0.0011436914406879391 +829 76 negative_sampler.num_negs_per_pos 3.0 +829 76 training.batch_size 1.0 +829 77 model.embedding_dim 1.0 +829 77 model.scoring_fct_norm 1.0 +829 77 loss.margin 4.084653402089774 +829 77 optimizer.lr 0.007724453912763147 +829 77 negative_sampler.num_negs_per_pos 4.0 +829 77 training.batch_size 1.0 +829 78 model.embedding_dim 2.0 +829 78 model.scoring_fct_norm 1.0 +829 78 loss.margin 9.165030168073539 +829 78 optimizer.lr 0.08012732247618176 +829 78 negative_sampler.num_negs_per_pos 85.0 +829 78 training.batch_size 1.0 +829 79 model.embedding_dim 0.0 +829 79 model.scoring_fct_norm 2.0 +829 79 loss.margin 6.459489400061667 +829 79 optimizer.lr 0.004481710664222397 +829 79 negative_sampler.num_negs_per_pos 36.0 +829 79 training.batch_size 1.0 +829 80 model.embedding_dim 2.0 +829 80 model.scoring_fct_norm 1.0 +829 80 loss.margin 6.479591473112771 +829 80 optimizer.lr 0.0030863328120586874 +829 80 negative_sampler.num_negs_per_pos 91.0 +829 80 training.batch_size 0.0 +829 81 model.embedding_dim 1.0 +829 81 model.scoring_fct_norm 1.0 +829 81 loss.margin 9.147515480404024 +829 81 optimizer.lr 0.01292708228962038 +829 81 negative_sampler.num_negs_per_pos 47.0 +829 81 training.batch_size 0.0 +829 82 model.embedding_dim 0.0 +829 82 model.scoring_fct_norm 1.0 +829 82 loss.margin 6.168273538054476 +829 82 optimizer.lr 0.04051023260461576 +829 82 negative_sampler.num_negs_per_pos 59.0 +829 82 training.batch_size 2.0 +829 83 model.embedding_dim 2.0 +829 83 model.scoring_fct_norm 2.0 +829 83 loss.margin 6.1085753753685585 +829 83 optimizer.lr 0.016700211499792617 +829 83 negative_sampler.num_negs_per_pos 42.0 +829 83 training.batch_size 0.0 +829 84 model.embedding_dim 0.0 +829 84 model.scoring_fct_norm 2.0 +829 84 loss.margin 1.2623448446620835 +829 84 optimizer.lr 0.001607361033886142 +829 84 negative_sampler.num_negs_per_pos 82.0 +829 84 training.batch_size 0.0 +829 85 model.embedding_dim 1.0 +829 85 model.scoring_fct_norm 2.0 +829 85 loss.margin 9.76745029109577 +829 85 optimizer.lr 0.04610542260760911 +829 85 negative_sampler.num_negs_per_pos 52.0 +829 85 training.batch_size 0.0 +829 86 model.embedding_dim 0.0 +829 86 model.scoring_fct_norm 1.0 +829 86 loss.margin 9.069396037194506 +829 86 optimizer.lr 0.06605875634875195 +829 86 negative_sampler.num_negs_per_pos 12.0 +829 86 training.batch_size 0.0 +829 87 model.embedding_dim 1.0 +829 87 model.scoring_fct_norm 2.0 +829 87 loss.margin 2.8200065422376404 +829 87 optimizer.lr 0.009540961229095046 +829 87 negative_sampler.num_negs_per_pos 41.0 +829 87 training.batch_size 1.0 +829 88 model.embedding_dim 2.0 +829 88 model.scoring_fct_norm 2.0 +829 88 loss.margin 8.819213888391097 +829 88 optimizer.lr 0.03697002299841218 +829 88 negative_sampler.num_negs_per_pos 15.0 +829 88 training.batch_size 0.0 +829 89 model.embedding_dim 1.0 +829 89 model.scoring_fct_norm 1.0 +829 89 loss.margin 5.955863783910904 +829 89 optimizer.lr 0.004420050519995433 +829 89 negative_sampler.num_negs_per_pos 33.0 +829 89 training.batch_size 1.0 +829 90 model.embedding_dim 1.0 +829 90 model.scoring_fct_norm 1.0 +829 90 loss.margin 7.852981779173158 +829 90 optimizer.lr 0.004078524143825172 +829 90 negative_sampler.num_negs_per_pos 36.0 +829 90 training.batch_size 0.0 +829 91 model.embedding_dim 2.0 +829 91 model.scoring_fct_norm 2.0 +829 91 loss.margin 8.834909234899449 +829 91 optimizer.lr 0.003002181092275726 +829 91 negative_sampler.num_negs_per_pos 53.0 +829 91 training.batch_size 0.0 +829 92 model.embedding_dim 2.0 +829 92 model.scoring_fct_norm 1.0 +829 92 loss.margin 3.900990758186385 +829 92 optimizer.lr 0.0744957559096537 +829 92 negative_sampler.num_negs_per_pos 21.0 +829 92 training.batch_size 1.0 +829 93 model.embedding_dim 1.0 +829 93 model.scoring_fct_norm 1.0 +829 93 loss.margin 5.87524725678538 +829 93 optimizer.lr 0.029600689740230533 +829 93 negative_sampler.num_negs_per_pos 79.0 +829 93 training.batch_size 2.0 +829 94 model.embedding_dim 0.0 +829 94 model.scoring_fct_norm 2.0 +829 94 loss.margin 5.939656935400771 +829 94 optimizer.lr 0.02564968151838385 +829 94 negative_sampler.num_negs_per_pos 9.0 +829 94 training.batch_size 1.0 +829 95 model.embedding_dim 0.0 +829 95 model.scoring_fct_norm 1.0 +829 95 loss.margin 5.028525730735226 +829 95 optimizer.lr 0.004023113412459095 +829 95 negative_sampler.num_negs_per_pos 48.0 +829 95 training.batch_size 2.0 +829 96 model.embedding_dim 1.0 +829 96 model.scoring_fct_norm 1.0 +829 96 loss.margin 4.354427338586487 +829 96 optimizer.lr 0.003272354187994116 +829 96 negative_sampler.num_negs_per_pos 89.0 +829 96 training.batch_size 2.0 +829 97 model.embedding_dim 1.0 +829 97 model.scoring_fct_norm 1.0 +829 97 loss.margin 7.679076461719385 +829 97 optimizer.lr 0.06936138640681774 +829 97 negative_sampler.num_negs_per_pos 35.0 +829 97 training.batch_size 2.0 +829 98 model.embedding_dim 2.0 +829 98 model.scoring_fct_norm 2.0 +829 98 loss.margin 2.7656233960268883 +829 98 optimizer.lr 0.09439103853175344 +829 98 negative_sampler.num_negs_per_pos 89.0 +829 98 training.batch_size 1.0 +829 99 model.embedding_dim 0.0 +829 99 model.scoring_fct_norm 1.0 +829 99 loss.margin 2.339239703903159 +829 99 optimizer.lr 0.07869014580465788 +829 99 negative_sampler.num_negs_per_pos 95.0 +829 99 training.batch_size 0.0 +829 100 model.embedding_dim 2.0 +829 100 model.scoring_fct_norm 1.0 +829 100 loss.margin 0.8321927768698211 +829 100 optimizer.lr 0.0012145536823026347 +829 100 negative_sampler.num_negs_per_pos 38.0 +829 100 training.batch_size 1.0 +829 1 dataset """kinships""" +829 1 model """transe""" +829 1 loss """marginranking""" +829 1 regularizer """no""" +829 1 optimizer """adam""" +829 1 training_loop """owa""" +829 1 negative_sampler """basic""" +829 1 evaluator """rankbased""" +829 2 dataset """kinships""" +829 2 model """transe""" +829 2 loss """marginranking""" +829 2 regularizer """no""" +829 2 optimizer """adam""" +829 2 training_loop """owa""" +829 2 negative_sampler """basic""" +829 2 evaluator """rankbased""" +829 3 dataset """kinships""" +829 3 model """transe""" +829 3 loss """marginranking""" +829 3 regularizer """no""" +829 3 optimizer """adam""" +829 3 training_loop """owa""" +829 3 negative_sampler """basic""" +829 3 evaluator """rankbased""" +829 4 dataset """kinships""" +829 4 model """transe""" +829 4 loss """marginranking""" +829 4 regularizer """no""" +829 4 optimizer """adam""" +829 4 training_loop """owa""" +829 4 negative_sampler """basic""" +829 4 evaluator """rankbased""" +829 5 dataset """kinships""" +829 5 model """transe""" +829 5 loss """marginranking""" +829 5 regularizer """no""" +829 5 optimizer """adam""" +829 5 training_loop """owa""" +829 5 negative_sampler """basic""" +829 5 evaluator """rankbased""" +829 6 dataset """kinships""" +829 6 model """transe""" +829 6 loss """marginranking""" +829 6 regularizer """no""" +829 6 optimizer """adam""" +829 6 training_loop """owa""" +829 6 negative_sampler """basic""" +829 6 evaluator """rankbased""" +829 7 dataset """kinships""" +829 7 model """transe""" +829 7 loss """marginranking""" +829 7 regularizer """no""" +829 7 optimizer """adam""" +829 7 training_loop """owa""" +829 7 negative_sampler """basic""" +829 7 evaluator """rankbased""" +829 8 dataset """kinships""" +829 8 model """transe""" +829 8 loss """marginranking""" +829 8 regularizer """no""" +829 8 optimizer """adam""" +829 8 training_loop """owa""" +829 8 negative_sampler """basic""" +829 8 evaluator """rankbased""" +829 9 dataset """kinships""" +829 9 model """transe""" +829 9 loss """marginranking""" +829 9 regularizer """no""" +829 9 optimizer """adam""" +829 9 training_loop """owa""" +829 9 negative_sampler """basic""" +829 9 evaluator """rankbased""" +829 10 dataset """kinships""" +829 10 model """transe""" +829 10 loss """marginranking""" +829 10 regularizer """no""" +829 10 optimizer """adam""" +829 10 training_loop """owa""" +829 10 negative_sampler """basic""" +829 10 evaluator """rankbased""" +829 11 dataset """kinships""" +829 11 model """transe""" +829 11 loss """marginranking""" +829 11 regularizer """no""" +829 11 optimizer """adam""" +829 11 training_loop """owa""" +829 11 negative_sampler """basic""" +829 11 evaluator """rankbased""" +829 12 dataset """kinships""" +829 12 model """transe""" +829 12 loss """marginranking""" +829 12 regularizer """no""" +829 12 optimizer """adam""" +829 12 training_loop """owa""" +829 12 negative_sampler """basic""" +829 12 evaluator """rankbased""" +829 13 dataset """kinships""" +829 13 model """transe""" +829 13 loss """marginranking""" +829 13 regularizer """no""" +829 13 optimizer """adam""" +829 13 training_loop """owa""" +829 13 negative_sampler """basic""" +829 13 evaluator """rankbased""" +829 14 dataset """kinships""" +829 14 model """transe""" +829 14 loss """marginranking""" +829 14 regularizer """no""" +829 14 optimizer """adam""" +829 14 training_loop """owa""" +829 14 negative_sampler """basic""" +829 14 evaluator """rankbased""" +829 15 dataset """kinships""" +829 15 model """transe""" +829 15 loss """marginranking""" +829 15 regularizer """no""" +829 15 optimizer """adam""" +829 15 training_loop """owa""" +829 15 negative_sampler """basic""" +829 15 evaluator """rankbased""" +829 16 dataset """kinships""" +829 16 model """transe""" +829 16 loss """marginranking""" +829 16 regularizer """no""" +829 16 optimizer """adam""" +829 16 training_loop """owa""" +829 16 negative_sampler """basic""" +829 16 evaluator """rankbased""" +829 17 dataset """kinships""" +829 17 model """transe""" +829 17 loss """marginranking""" +829 17 regularizer """no""" +829 17 optimizer """adam""" +829 17 training_loop """owa""" +829 17 negative_sampler """basic""" +829 17 evaluator """rankbased""" +829 18 dataset """kinships""" +829 18 model """transe""" +829 18 loss """marginranking""" +829 18 regularizer """no""" +829 18 optimizer """adam""" +829 18 training_loop """owa""" +829 18 negative_sampler """basic""" +829 18 evaluator """rankbased""" +829 19 dataset """kinships""" +829 19 model """transe""" +829 19 loss """marginranking""" +829 19 regularizer """no""" +829 19 optimizer """adam""" +829 19 training_loop """owa""" +829 19 negative_sampler """basic""" +829 19 evaluator """rankbased""" +829 20 dataset """kinships""" +829 20 model """transe""" +829 20 loss """marginranking""" +829 20 regularizer """no""" +829 20 optimizer """adam""" +829 20 training_loop """owa""" +829 20 negative_sampler """basic""" +829 20 evaluator """rankbased""" +829 21 dataset """kinships""" +829 21 model """transe""" +829 21 loss """marginranking""" +829 21 regularizer """no""" +829 21 optimizer """adam""" +829 21 training_loop """owa""" +829 21 negative_sampler """basic""" +829 21 evaluator """rankbased""" +829 22 dataset """kinships""" +829 22 model """transe""" +829 22 loss """marginranking""" +829 22 regularizer """no""" +829 22 optimizer """adam""" +829 22 training_loop """owa""" +829 22 negative_sampler """basic""" +829 22 evaluator """rankbased""" +829 23 dataset """kinships""" +829 23 model """transe""" +829 23 loss """marginranking""" +829 23 regularizer """no""" +829 23 optimizer """adam""" +829 23 training_loop """owa""" +829 23 negative_sampler """basic""" +829 23 evaluator """rankbased""" +829 24 dataset """kinships""" +829 24 model """transe""" +829 24 loss """marginranking""" +829 24 regularizer """no""" +829 24 optimizer """adam""" +829 24 training_loop """owa""" +829 24 negative_sampler """basic""" +829 24 evaluator """rankbased""" +829 25 dataset """kinships""" +829 25 model """transe""" +829 25 loss """marginranking""" +829 25 regularizer """no""" +829 25 optimizer """adam""" +829 25 training_loop """owa""" +829 25 negative_sampler """basic""" +829 25 evaluator """rankbased""" +829 26 dataset """kinships""" +829 26 model """transe""" +829 26 loss """marginranking""" +829 26 regularizer """no""" +829 26 optimizer """adam""" +829 26 training_loop """owa""" +829 26 negative_sampler """basic""" +829 26 evaluator """rankbased""" +829 27 dataset """kinships""" +829 27 model """transe""" +829 27 loss """marginranking""" +829 27 regularizer """no""" +829 27 optimizer """adam""" +829 27 training_loop """owa""" +829 27 negative_sampler """basic""" +829 27 evaluator """rankbased""" +829 28 dataset """kinships""" +829 28 model """transe""" +829 28 loss """marginranking""" +829 28 regularizer """no""" +829 28 optimizer """adam""" +829 28 training_loop """owa""" +829 28 negative_sampler """basic""" +829 28 evaluator """rankbased""" +829 29 dataset """kinships""" +829 29 model """transe""" +829 29 loss """marginranking""" +829 29 regularizer """no""" +829 29 optimizer """adam""" +829 29 training_loop """owa""" +829 29 negative_sampler """basic""" +829 29 evaluator """rankbased""" +829 30 dataset """kinships""" +829 30 model """transe""" +829 30 loss """marginranking""" +829 30 regularizer """no""" +829 30 optimizer """adam""" +829 30 training_loop """owa""" +829 30 negative_sampler """basic""" +829 30 evaluator """rankbased""" +829 31 dataset """kinships""" +829 31 model """transe""" +829 31 loss """marginranking""" +829 31 regularizer """no""" +829 31 optimizer """adam""" +829 31 training_loop """owa""" +829 31 negative_sampler """basic""" +829 31 evaluator """rankbased""" +829 32 dataset """kinships""" +829 32 model """transe""" +829 32 loss """marginranking""" +829 32 regularizer """no""" +829 32 optimizer """adam""" +829 32 training_loop """owa""" +829 32 negative_sampler """basic""" +829 32 evaluator """rankbased""" +829 33 dataset """kinships""" +829 33 model """transe""" +829 33 loss """marginranking""" +829 33 regularizer """no""" +829 33 optimizer """adam""" +829 33 training_loop """owa""" +829 33 negative_sampler """basic""" +829 33 evaluator """rankbased""" +829 34 dataset """kinships""" +829 34 model """transe""" +829 34 loss """marginranking""" +829 34 regularizer """no""" +829 34 optimizer """adam""" +829 34 training_loop """owa""" +829 34 negative_sampler """basic""" +829 34 evaluator """rankbased""" +829 35 dataset """kinships""" +829 35 model """transe""" +829 35 loss """marginranking""" +829 35 regularizer """no""" +829 35 optimizer """adam""" +829 35 training_loop """owa""" +829 35 negative_sampler """basic""" +829 35 evaluator """rankbased""" +829 36 dataset """kinships""" +829 36 model """transe""" +829 36 loss """marginranking""" +829 36 regularizer """no""" +829 36 optimizer """adam""" +829 36 training_loop """owa""" +829 36 negative_sampler """basic""" +829 36 evaluator """rankbased""" +829 37 dataset """kinships""" +829 37 model """transe""" +829 37 loss """marginranking""" +829 37 regularizer """no""" +829 37 optimizer """adam""" +829 37 training_loop """owa""" +829 37 negative_sampler """basic""" +829 37 evaluator """rankbased""" +829 38 dataset """kinships""" +829 38 model """transe""" +829 38 loss """marginranking""" +829 38 regularizer """no""" +829 38 optimizer """adam""" +829 38 training_loop """owa""" +829 38 negative_sampler """basic""" +829 38 evaluator """rankbased""" +829 39 dataset """kinships""" +829 39 model """transe""" +829 39 loss """marginranking""" +829 39 regularizer """no""" +829 39 optimizer """adam""" +829 39 training_loop """owa""" +829 39 negative_sampler """basic""" +829 39 evaluator """rankbased""" +829 40 dataset """kinships""" +829 40 model """transe""" +829 40 loss """marginranking""" +829 40 regularizer """no""" +829 40 optimizer """adam""" +829 40 training_loop """owa""" +829 40 negative_sampler """basic""" +829 40 evaluator """rankbased""" +829 41 dataset """kinships""" +829 41 model """transe""" +829 41 loss """marginranking""" +829 41 regularizer """no""" +829 41 optimizer """adam""" +829 41 training_loop """owa""" +829 41 negative_sampler """basic""" +829 41 evaluator """rankbased""" +829 42 dataset """kinships""" +829 42 model """transe""" +829 42 loss """marginranking""" +829 42 regularizer """no""" +829 42 optimizer """adam""" +829 42 training_loop """owa""" +829 42 negative_sampler """basic""" +829 42 evaluator """rankbased""" +829 43 dataset """kinships""" +829 43 model """transe""" +829 43 loss """marginranking""" +829 43 regularizer """no""" +829 43 optimizer """adam""" +829 43 training_loop """owa""" +829 43 negative_sampler """basic""" +829 43 evaluator """rankbased""" +829 44 dataset """kinships""" +829 44 model """transe""" +829 44 loss """marginranking""" +829 44 regularizer """no""" +829 44 optimizer """adam""" +829 44 training_loop """owa""" +829 44 negative_sampler """basic""" +829 44 evaluator """rankbased""" +829 45 dataset """kinships""" +829 45 model """transe""" +829 45 loss """marginranking""" +829 45 regularizer """no""" +829 45 optimizer """adam""" +829 45 training_loop """owa""" +829 45 negative_sampler """basic""" +829 45 evaluator """rankbased""" +829 46 dataset """kinships""" +829 46 model """transe""" +829 46 loss """marginranking""" +829 46 regularizer """no""" +829 46 optimizer """adam""" +829 46 training_loop """owa""" +829 46 negative_sampler """basic""" +829 46 evaluator """rankbased""" +829 47 dataset """kinships""" +829 47 model """transe""" +829 47 loss """marginranking""" +829 47 regularizer """no""" +829 47 optimizer """adam""" +829 47 training_loop """owa""" +829 47 negative_sampler """basic""" +829 47 evaluator """rankbased""" +829 48 dataset """kinships""" +829 48 model """transe""" +829 48 loss """marginranking""" +829 48 regularizer """no""" +829 48 optimizer """adam""" +829 48 training_loop """owa""" +829 48 negative_sampler """basic""" +829 48 evaluator """rankbased""" +829 49 dataset """kinships""" +829 49 model """transe""" +829 49 loss """marginranking""" +829 49 regularizer """no""" +829 49 optimizer """adam""" +829 49 training_loop """owa""" +829 49 negative_sampler """basic""" +829 49 evaluator """rankbased""" +829 50 dataset """kinships""" +829 50 model """transe""" +829 50 loss """marginranking""" +829 50 regularizer """no""" +829 50 optimizer """adam""" +829 50 training_loop """owa""" +829 50 negative_sampler """basic""" +829 50 evaluator """rankbased""" +829 51 dataset """kinships""" +829 51 model """transe""" +829 51 loss """marginranking""" +829 51 regularizer """no""" +829 51 optimizer """adam""" +829 51 training_loop """owa""" +829 51 negative_sampler """basic""" +829 51 evaluator """rankbased""" +829 52 dataset """kinships""" +829 52 model """transe""" +829 52 loss """marginranking""" +829 52 regularizer """no""" +829 52 optimizer """adam""" +829 52 training_loop """owa""" +829 52 negative_sampler """basic""" +829 52 evaluator """rankbased""" +829 53 dataset """kinships""" +829 53 model """transe""" +829 53 loss """marginranking""" +829 53 regularizer """no""" +829 53 optimizer """adam""" +829 53 training_loop """owa""" +829 53 negative_sampler """basic""" +829 53 evaluator """rankbased""" +829 54 dataset """kinships""" +829 54 model """transe""" +829 54 loss """marginranking""" +829 54 regularizer """no""" +829 54 optimizer """adam""" +829 54 training_loop """owa""" +829 54 negative_sampler """basic""" +829 54 evaluator """rankbased""" +829 55 dataset """kinships""" +829 55 model """transe""" +829 55 loss """marginranking""" +829 55 regularizer """no""" +829 55 optimizer """adam""" +829 55 training_loop """owa""" +829 55 negative_sampler """basic""" +829 55 evaluator """rankbased""" +829 56 dataset """kinships""" +829 56 model """transe""" +829 56 loss """marginranking""" +829 56 regularizer """no""" +829 56 optimizer """adam""" +829 56 training_loop """owa""" +829 56 negative_sampler """basic""" +829 56 evaluator """rankbased""" +829 57 dataset """kinships""" +829 57 model """transe""" +829 57 loss """marginranking""" +829 57 regularizer """no""" +829 57 optimizer """adam""" +829 57 training_loop """owa""" +829 57 negative_sampler """basic""" +829 57 evaluator """rankbased""" +829 58 dataset """kinships""" +829 58 model """transe""" +829 58 loss """marginranking""" +829 58 regularizer """no""" +829 58 optimizer """adam""" +829 58 training_loop """owa""" +829 58 negative_sampler """basic""" +829 58 evaluator """rankbased""" +829 59 dataset """kinships""" +829 59 model """transe""" +829 59 loss """marginranking""" +829 59 regularizer """no""" +829 59 optimizer """adam""" +829 59 training_loop """owa""" +829 59 negative_sampler """basic""" +829 59 evaluator """rankbased""" +829 60 dataset """kinships""" +829 60 model """transe""" +829 60 loss """marginranking""" +829 60 regularizer """no""" +829 60 optimizer """adam""" +829 60 training_loop """owa""" +829 60 negative_sampler """basic""" +829 60 evaluator """rankbased""" +829 61 dataset """kinships""" +829 61 model """transe""" +829 61 loss """marginranking""" +829 61 regularizer """no""" +829 61 optimizer """adam""" +829 61 training_loop """owa""" +829 61 negative_sampler """basic""" +829 61 evaluator """rankbased""" +829 62 dataset """kinships""" +829 62 model """transe""" +829 62 loss """marginranking""" +829 62 regularizer """no""" +829 62 optimizer """adam""" +829 62 training_loop """owa""" +829 62 negative_sampler """basic""" +829 62 evaluator """rankbased""" +829 63 dataset """kinships""" +829 63 model """transe""" +829 63 loss """marginranking""" +829 63 regularizer """no""" +829 63 optimizer """adam""" +829 63 training_loop """owa""" +829 63 negative_sampler """basic""" +829 63 evaluator """rankbased""" +829 64 dataset """kinships""" +829 64 model """transe""" +829 64 loss """marginranking""" +829 64 regularizer """no""" +829 64 optimizer """adam""" +829 64 training_loop """owa""" +829 64 negative_sampler """basic""" +829 64 evaluator """rankbased""" +829 65 dataset """kinships""" +829 65 model """transe""" +829 65 loss """marginranking""" +829 65 regularizer """no""" +829 65 optimizer """adam""" +829 65 training_loop """owa""" +829 65 negative_sampler """basic""" +829 65 evaluator """rankbased""" +829 66 dataset """kinships""" +829 66 model """transe""" +829 66 loss """marginranking""" +829 66 regularizer """no""" +829 66 optimizer """adam""" +829 66 training_loop """owa""" +829 66 negative_sampler """basic""" +829 66 evaluator """rankbased""" +829 67 dataset """kinships""" +829 67 model """transe""" +829 67 loss """marginranking""" +829 67 regularizer """no""" +829 67 optimizer """adam""" +829 67 training_loop """owa""" +829 67 negative_sampler """basic""" +829 67 evaluator """rankbased""" +829 68 dataset """kinships""" +829 68 model """transe""" +829 68 loss """marginranking""" +829 68 regularizer """no""" +829 68 optimizer """adam""" +829 68 training_loop """owa""" +829 68 negative_sampler """basic""" +829 68 evaluator """rankbased""" +829 69 dataset """kinships""" +829 69 model """transe""" +829 69 loss """marginranking""" +829 69 regularizer """no""" +829 69 optimizer """adam""" +829 69 training_loop """owa""" +829 69 negative_sampler """basic""" +829 69 evaluator """rankbased""" +829 70 dataset """kinships""" +829 70 model """transe""" +829 70 loss """marginranking""" +829 70 regularizer """no""" +829 70 optimizer """adam""" +829 70 training_loop """owa""" +829 70 negative_sampler """basic""" +829 70 evaluator """rankbased""" +829 71 dataset """kinships""" +829 71 model """transe""" +829 71 loss """marginranking""" +829 71 regularizer """no""" +829 71 optimizer """adam""" +829 71 training_loop """owa""" +829 71 negative_sampler """basic""" +829 71 evaluator """rankbased""" +829 72 dataset """kinships""" +829 72 model """transe""" +829 72 loss """marginranking""" +829 72 regularizer """no""" +829 72 optimizer """adam""" +829 72 training_loop """owa""" +829 72 negative_sampler """basic""" +829 72 evaluator """rankbased""" +829 73 dataset """kinships""" +829 73 model """transe""" +829 73 loss """marginranking""" +829 73 regularizer """no""" +829 73 optimizer """adam""" +829 73 training_loop """owa""" +829 73 negative_sampler """basic""" +829 73 evaluator """rankbased""" +829 74 dataset """kinships""" +829 74 model """transe""" +829 74 loss """marginranking""" +829 74 regularizer """no""" +829 74 optimizer """adam""" +829 74 training_loop """owa""" +829 74 negative_sampler """basic""" +829 74 evaluator """rankbased""" +829 75 dataset """kinships""" +829 75 model """transe""" +829 75 loss """marginranking""" +829 75 regularizer """no""" +829 75 optimizer """adam""" +829 75 training_loop """owa""" +829 75 negative_sampler """basic""" +829 75 evaluator """rankbased""" +829 76 dataset """kinships""" +829 76 model """transe""" +829 76 loss """marginranking""" +829 76 regularizer """no""" +829 76 optimizer """adam""" +829 76 training_loop """owa""" +829 76 negative_sampler """basic""" +829 76 evaluator """rankbased""" +829 77 dataset """kinships""" +829 77 model """transe""" +829 77 loss """marginranking""" +829 77 regularizer """no""" +829 77 optimizer """adam""" +829 77 training_loop """owa""" +829 77 negative_sampler """basic""" +829 77 evaluator """rankbased""" +829 78 dataset """kinships""" +829 78 model """transe""" +829 78 loss """marginranking""" +829 78 regularizer """no""" +829 78 optimizer """adam""" +829 78 training_loop """owa""" +829 78 negative_sampler """basic""" +829 78 evaluator """rankbased""" +829 79 dataset """kinships""" +829 79 model """transe""" +829 79 loss """marginranking""" +829 79 regularizer """no""" +829 79 optimizer """adam""" +829 79 training_loop """owa""" +829 79 negative_sampler """basic""" +829 79 evaluator """rankbased""" +829 80 dataset """kinships""" +829 80 model """transe""" +829 80 loss """marginranking""" +829 80 regularizer """no""" +829 80 optimizer """adam""" +829 80 training_loop """owa""" +829 80 negative_sampler """basic""" +829 80 evaluator """rankbased""" +829 81 dataset """kinships""" +829 81 model """transe""" +829 81 loss """marginranking""" +829 81 regularizer """no""" +829 81 optimizer """adam""" +829 81 training_loop """owa""" +829 81 negative_sampler """basic""" +829 81 evaluator """rankbased""" +829 82 dataset """kinships""" +829 82 model """transe""" +829 82 loss """marginranking""" +829 82 regularizer """no""" +829 82 optimizer """adam""" +829 82 training_loop """owa""" +829 82 negative_sampler """basic""" +829 82 evaluator """rankbased""" +829 83 dataset """kinships""" +829 83 model """transe""" +829 83 loss """marginranking""" +829 83 regularizer """no""" +829 83 optimizer """adam""" +829 83 training_loop """owa""" +829 83 negative_sampler """basic""" +829 83 evaluator """rankbased""" +829 84 dataset """kinships""" +829 84 model """transe""" +829 84 loss """marginranking""" +829 84 regularizer """no""" +829 84 optimizer """adam""" +829 84 training_loop """owa""" +829 84 negative_sampler """basic""" +829 84 evaluator """rankbased""" +829 85 dataset """kinships""" +829 85 model """transe""" +829 85 loss """marginranking""" +829 85 regularizer """no""" +829 85 optimizer """adam""" +829 85 training_loop """owa""" +829 85 negative_sampler """basic""" +829 85 evaluator """rankbased""" +829 86 dataset """kinships""" +829 86 model """transe""" +829 86 loss """marginranking""" +829 86 regularizer """no""" +829 86 optimizer """adam""" +829 86 training_loop """owa""" +829 86 negative_sampler """basic""" +829 86 evaluator """rankbased""" +829 87 dataset """kinships""" +829 87 model """transe""" +829 87 loss """marginranking""" +829 87 regularizer """no""" +829 87 optimizer """adam""" +829 87 training_loop """owa""" +829 87 negative_sampler """basic""" +829 87 evaluator """rankbased""" +829 88 dataset """kinships""" +829 88 model """transe""" +829 88 loss """marginranking""" +829 88 regularizer """no""" +829 88 optimizer """adam""" +829 88 training_loop """owa""" +829 88 negative_sampler """basic""" +829 88 evaluator """rankbased""" +829 89 dataset """kinships""" +829 89 model """transe""" +829 89 loss """marginranking""" +829 89 regularizer """no""" +829 89 optimizer """adam""" +829 89 training_loop """owa""" +829 89 negative_sampler """basic""" +829 89 evaluator """rankbased""" +829 90 dataset """kinships""" +829 90 model """transe""" +829 90 loss """marginranking""" +829 90 regularizer """no""" +829 90 optimizer """adam""" +829 90 training_loop """owa""" +829 90 negative_sampler """basic""" +829 90 evaluator """rankbased""" +829 91 dataset """kinships""" +829 91 model """transe""" +829 91 loss """marginranking""" +829 91 regularizer """no""" +829 91 optimizer """adam""" +829 91 training_loop """owa""" +829 91 negative_sampler """basic""" +829 91 evaluator """rankbased""" +829 92 dataset """kinships""" +829 92 model """transe""" +829 92 loss """marginranking""" +829 92 regularizer """no""" +829 92 optimizer """adam""" +829 92 training_loop """owa""" +829 92 negative_sampler """basic""" +829 92 evaluator """rankbased""" +829 93 dataset """kinships""" +829 93 model """transe""" +829 93 loss """marginranking""" +829 93 regularizer """no""" +829 93 optimizer """adam""" +829 93 training_loop """owa""" +829 93 negative_sampler """basic""" +829 93 evaluator """rankbased""" +829 94 dataset """kinships""" +829 94 model """transe""" +829 94 loss """marginranking""" +829 94 regularizer """no""" +829 94 optimizer """adam""" +829 94 training_loop """owa""" +829 94 negative_sampler """basic""" +829 94 evaluator """rankbased""" +829 95 dataset """kinships""" +829 95 model """transe""" +829 95 loss """marginranking""" +829 95 regularizer """no""" +829 95 optimizer """adam""" +829 95 training_loop """owa""" +829 95 negative_sampler """basic""" +829 95 evaluator """rankbased""" +829 96 dataset """kinships""" +829 96 model """transe""" +829 96 loss """marginranking""" +829 96 regularizer """no""" +829 96 optimizer """adam""" +829 96 training_loop """owa""" +829 96 negative_sampler """basic""" +829 96 evaluator """rankbased""" +829 97 dataset """kinships""" +829 97 model """transe""" +829 97 loss """marginranking""" +829 97 regularizer """no""" +829 97 optimizer """adam""" +829 97 training_loop """owa""" +829 97 negative_sampler """basic""" +829 97 evaluator """rankbased""" +829 98 dataset """kinships""" +829 98 model """transe""" +829 98 loss """marginranking""" +829 98 regularizer """no""" +829 98 optimizer """adam""" +829 98 training_loop """owa""" +829 98 negative_sampler """basic""" +829 98 evaluator """rankbased""" +829 99 dataset """kinships""" +829 99 model """transe""" +829 99 loss """marginranking""" +829 99 regularizer """no""" +829 99 optimizer """adam""" +829 99 training_loop """owa""" +829 99 negative_sampler """basic""" +829 99 evaluator """rankbased""" +829 100 dataset """kinships""" +829 100 model """transe""" +829 100 loss """marginranking""" +829 100 regularizer """no""" +829 100 optimizer """adam""" +829 100 training_loop """owa""" +829 100 negative_sampler """basic""" +829 100 evaluator """rankbased""" +830 1 model.embedding_dim 2.0 +830 1 model.scoring_fct_norm 1.0 +830 1 loss.margin 6.395642185092232 +830 1 optimizer.lr 0.07622302617957735 +830 1 negative_sampler.num_negs_per_pos 79.0 +830 1 training.batch_size 2.0 +830 2 model.embedding_dim 2.0 +830 2 model.scoring_fct_norm 1.0 +830 2 loss.margin 6.5069615614727745 +830 2 optimizer.lr 0.013642424852186445 +830 2 negative_sampler.num_negs_per_pos 65.0 +830 2 training.batch_size 0.0 +830 3 model.embedding_dim 2.0 +830 3 model.scoring_fct_norm 2.0 +830 3 loss.margin 2.9884031924537373 +830 3 optimizer.lr 0.01676667605586528 +830 3 negative_sampler.num_negs_per_pos 54.0 +830 3 training.batch_size 2.0 +830 4 model.embedding_dim 1.0 +830 4 model.scoring_fct_norm 2.0 +830 4 loss.margin 6.115409184792303 +830 4 optimizer.lr 0.05058547076210956 +830 4 negative_sampler.num_negs_per_pos 76.0 +830 4 training.batch_size 0.0 +830 5 model.embedding_dim 2.0 +830 5 model.scoring_fct_norm 2.0 +830 5 loss.margin 5.3135956282681835 +830 5 optimizer.lr 0.01170853870448269 +830 5 negative_sampler.num_negs_per_pos 78.0 +830 5 training.batch_size 1.0 +830 6 model.embedding_dim 2.0 +830 6 model.scoring_fct_norm 2.0 +830 6 loss.margin 7.2026002535334515 +830 6 optimizer.lr 0.01367651057469913 +830 6 negative_sampler.num_negs_per_pos 77.0 +830 6 training.batch_size 2.0 +830 7 model.embedding_dim 0.0 +830 7 model.scoring_fct_norm 2.0 +830 7 loss.margin 6.10200165359001 +830 7 optimizer.lr 0.004500775328267285 +830 7 negative_sampler.num_negs_per_pos 83.0 +830 7 training.batch_size 0.0 +830 8 model.embedding_dim 0.0 +830 8 model.scoring_fct_norm 2.0 +830 8 loss.margin 3.5972952658210553 +830 8 optimizer.lr 0.0641696728461312 +830 8 negative_sampler.num_negs_per_pos 77.0 +830 8 training.batch_size 1.0 +830 9 model.embedding_dim 1.0 +830 9 model.scoring_fct_norm 1.0 +830 9 loss.margin 5.55963897085843 +830 9 optimizer.lr 0.002358760617834533 +830 9 negative_sampler.num_negs_per_pos 72.0 +830 9 training.batch_size 2.0 +830 10 model.embedding_dim 0.0 +830 10 model.scoring_fct_norm 1.0 +830 10 loss.margin 5.313658297751877 +830 10 optimizer.lr 0.036224951576345775 +830 10 negative_sampler.num_negs_per_pos 6.0 +830 10 training.batch_size 0.0 +830 11 model.embedding_dim 0.0 +830 11 model.scoring_fct_norm 2.0 +830 11 loss.margin 1.7361050145943457 +830 11 optimizer.lr 0.004739014919601436 +830 11 negative_sampler.num_negs_per_pos 76.0 +830 11 training.batch_size 0.0 +830 12 model.embedding_dim 0.0 +830 12 model.scoring_fct_norm 1.0 +830 12 loss.margin 1.5774731669986024 +830 12 optimizer.lr 0.009603482962256172 +830 12 negative_sampler.num_negs_per_pos 75.0 +830 12 training.batch_size 1.0 +830 13 model.embedding_dim 2.0 +830 13 model.scoring_fct_norm 1.0 +830 13 loss.margin 7.3591577801986405 +830 13 optimizer.lr 0.001173765174107686 +830 13 negative_sampler.num_negs_per_pos 19.0 +830 13 training.batch_size 0.0 +830 14 model.embedding_dim 2.0 +830 14 model.scoring_fct_norm 1.0 +830 14 loss.margin 5.288932818488113 +830 14 optimizer.lr 0.0026407466109745915 +830 14 negative_sampler.num_negs_per_pos 1.0 +830 14 training.batch_size 1.0 +830 15 model.embedding_dim 2.0 +830 15 model.scoring_fct_norm 2.0 +830 15 loss.margin 1.8605882099278461 +830 15 optimizer.lr 0.0027888291165709053 +830 15 negative_sampler.num_negs_per_pos 32.0 +830 15 training.batch_size 2.0 +830 16 model.embedding_dim 1.0 +830 16 model.scoring_fct_norm 2.0 +830 16 loss.margin 1.2193600846457402 +830 16 optimizer.lr 0.026519402377940628 +830 16 negative_sampler.num_negs_per_pos 70.0 +830 16 training.batch_size 1.0 +830 17 model.embedding_dim 1.0 +830 17 model.scoring_fct_norm 2.0 +830 17 loss.margin 7.501092572684972 +830 17 optimizer.lr 0.027585430637736483 +830 17 negative_sampler.num_negs_per_pos 90.0 +830 17 training.batch_size 0.0 +830 18 model.embedding_dim 2.0 +830 18 model.scoring_fct_norm 1.0 +830 18 loss.margin 4.780242555207966 +830 18 optimizer.lr 0.006242017127980254 +830 18 negative_sampler.num_negs_per_pos 93.0 +830 18 training.batch_size 1.0 +830 19 model.embedding_dim 2.0 +830 19 model.scoring_fct_norm 1.0 +830 19 loss.margin 2.3376715408239246 +830 19 optimizer.lr 0.028773413533682873 +830 19 negative_sampler.num_negs_per_pos 72.0 +830 19 training.batch_size 2.0 +830 20 model.embedding_dim 2.0 +830 20 model.scoring_fct_norm 1.0 +830 20 loss.margin 3.676078908909769 +830 20 optimizer.lr 0.02529974323263608 +830 20 negative_sampler.num_negs_per_pos 59.0 +830 20 training.batch_size 2.0 +830 21 model.embedding_dim 1.0 +830 21 model.scoring_fct_norm 2.0 +830 21 loss.margin 2.4499301058594716 +830 21 optimizer.lr 0.05608913783377609 +830 21 negative_sampler.num_negs_per_pos 8.0 +830 21 training.batch_size 2.0 +830 22 model.embedding_dim 2.0 +830 22 model.scoring_fct_norm 1.0 +830 22 loss.margin 4.198211059962595 +830 22 optimizer.lr 0.003181089277607571 +830 22 negative_sampler.num_negs_per_pos 80.0 +830 22 training.batch_size 2.0 +830 23 model.embedding_dim 2.0 +830 23 model.scoring_fct_norm 1.0 +830 23 loss.margin 7.701728583655559 +830 23 optimizer.lr 0.027530882275381578 +830 23 negative_sampler.num_negs_per_pos 65.0 +830 23 training.batch_size 2.0 +830 24 model.embedding_dim 1.0 +830 24 model.scoring_fct_norm 2.0 +830 24 loss.margin 5.5604492415769435 +830 24 optimizer.lr 0.06566236074510327 +830 24 negative_sampler.num_negs_per_pos 8.0 +830 24 training.batch_size 0.0 +830 25 model.embedding_dim 2.0 +830 25 model.scoring_fct_norm 2.0 +830 25 loss.margin 3.8587792587425653 +830 25 optimizer.lr 0.022842111386259176 +830 25 negative_sampler.num_negs_per_pos 62.0 +830 25 training.batch_size 0.0 +830 26 model.embedding_dim 0.0 +830 26 model.scoring_fct_norm 1.0 +830 26 loss.margin 8.228202314587024 +830 26 optimizer.lr 0.007328259330597796 +830 26 negative_sampler.num_negs_per_pos 59.0 +830 26 training.batch_size 2.0 +830 27 model.embedding_dim 0.0 +830 27 model.scoring_fct_norm 1.0 +830 27 loss.margin 5.094194723322548 +830 27 optimizer.lr 0.010593609508629462 +830 27 negative_sampler.num_negs_per_pos 98.0 +830 27 training.batch_size 0.0 +830 28 model.embedding_dim 0.0 +830 28 model.scoring_fct_norm 1.0 +830 28 loss.margin 4.402153830834693 +830 28 optimizer.lr 0.034902206422063535 +830 28 negative_sampler.num_negs_per_pos 15.0 +830 28 training.batch_size 1.0 +830 29 model.embedding_dim 0.0 +830 29 model.scoring_fct_norm 2.0 +830 29 loss.margin 8.195902181044756 +830 29 optimizer.lr 0.014465491097224866 +830 29 negative_sampler.num_negs_per_pos 42.0 +830 29 training.batch_size 1.0 +830 30 model.embedding_dim 0.0 +830 30 model.scoring_fct_norm 2.0 +830 30 loss.margin 7.262786975116741 +830 30 optimizer.lr 0.001381364124336382 +830 30 negative_sampler.num_negs_per_pos 62.0 +830 30 training.batch_size 1.0 +830 31 model.embedding_dim 0.0 +830 31 model.scoring_fct_norm 2.0 +830 31 loss.margin 6.733495699124051 +830 31 optimizer.lr 0.026700859263677705 +830 31 negative_sampler.num_negs_per_pos 28.0 +830 31 training.batch_size 0.0 +830 32 model.embedding_dim 2.0 +830 32 model.scoring_fct_norm 1.0 +830 32 loss.margin 2.7957555613788987 +830 32 optimizer.lr 0.012080368529630622 +830 32 negative_sampler.num_negs_per_pos 76.0 +830 32 training.batch_size 0.0 +830 33 model.embedding_dim 2.0 +830 33 model.scoring_fct_norm 1.0 +830 33 loss.margin 5.127972597338475 +830 33 optimizer.lr 0.0013465410247315683 +830 33 negative_sampler.num_negs_per_pos 84.0 +830 33 training.batch_size 2.0 +830 34 model.embedding_dim 1.0 +830 34 model.scoring_fct_norm 2.0 +830 34 loss.margin 0.9205163581253231 +830 34 optimizer.lr 0.0331868078133166 +830 34 negative_sampler.num_negs_per_pos 71.0 +830 34 training.batch_size 0.0 +830 35 model.embedding_dim 1.0 +830 35 model.scoring_fct_norm 1.0 +830 35 loss.margin 5.212974695422722 +830 35 optimizer.lr 0.03268047747922103 +830 35 negative_sampler.num_negs_per_pos 13.0 +830 35 training.batch_size 0.0 +830 36 model.embedding_dim 0.0 +830 36 model.scoring_fct_norm 2.0 +830 36 loss.margin 2.0398910308684415 +830 36 optimizer.lr 0.001784890949159303 +830 36 negative_sampler.num_negs_per_pos 36.0 +830 36 training.batch_size 2.0 +830 37 model.embedding_dim 2.0 +830 37 model.scoring_fct_norm 2.0 +830 37 loss.margin 7.991639837978672 +830 37 optimizer.lr 0.03105781644844227 +830 37 negative_sampler.num_negs_per_pos 27.0 +830 37 training.batch_size 1.0 +830 38 model.embedding_dim 2.0 +830 38 model.scoring_fct_norm 1.0 +830 38 loss.margin 7.973442287786026 +830 38 optimizer.lr 0.001567036698639675 +830 38 negative_sampler.num_negs_per_pos 60.0 +830 38 training.batch_size 2.0 +830 39 model.embedding_dim 1.0 +830 39 model.scoring_fct_norm 1.0 +830 39 loss.margin 8.291526022064303 +830 39 optimizer.lr 0.06197915931118978 +830 39 negative_sampler.num_negs_per_pos 47.0 +830 39 training.batch_size 1.0 +830 40 model.embedding_dim 2.0 +830 40 model.scoring_fct_norm 2.0 +830 40 loss.margin 2.1133476762719523 +830 40 optimizer.lr 0.007130242039077334 +830 40 negative_sampler.num_negs_per_pos 76.0 +830 40 training.batch_size 0.0 +830 41 model.embedding_dim 1.0 +830 41 model.scoring_fct_norm 2.0 +830 41 loss.margin 8.881356137184428 +830 41 optimizer.lr 0.002309324225508368 +830 41 negative_sampler.num_negs_per_pos 26.0 +830 41 training.batch_size 1.0 +830 42 model.embedding_dim 2.0 +830 42 model.scoring_fct_norm 2.0 +830 42 loss.margin 2.4919400404188705 +830 42 optimizer.lr 0.004324084196893927 +830 42 negative_sampler.num_negs_per_pos 66.0 +830 42 training.batch_size 2.0 +830 43 model.embedding_dim 1.0 +830 43 model.scoring_fct_norm 1.0 +830 43 loss.margin 2.8522601845473874 +830 43 optimizer.lr 0.0018845774749974848 +830 43 negative_sampler.num_negs_per_pos 15.0 +830 43 training.batch_size 1.0 +830 44 model.embedding_dim 0.0 +830 44 model.scoring_fct_norm 1.0 +830 44 loss.margin 6.083812082718037 +830 44 optimizer.lr 0.002527152891919363 +830 44 negative_sampler.num_negs_per_pos 10.0 +830 44 training.batch_size 1.0 +830 45 model.embedding_dim 2.0 +830 45 model.scoring_fct_norm 2.0 +830 45 loss.margin 5.64213031140511 +830 45 optimizer.lr 0.04657528732702216 +830 45 negative_sampler.num_negs_per_pos 29.0 +830 45 training.batch_size 0.0 +830 46 model.embedding_dim 1.0 +830 46 model.scoring_fct_norm 2.0 +830 46 loss.margin 3.5956525811600892 +830 46 optimizer.lr 0.004499697684886679 +830 46 negative_sampler.num_negs_per_pos 57.0 +830 46 training.batch_size 0.0 +830 47 model.embedding_dim 1.0 +830 47 model.scoring_fct_norm 1.0 +830 47 loss.margin 1.4527379260474698 +830 47 optimizer.lr 0.03580099582221065 +830 47 negative_sampler.num_negs_per_pos 34.0 +830 47 training.batch_size 1.0 +830 48 model.embedding_dim 0.0 +830 48 model.scoring_fct_norm 2.0 +830 48 loss.margin 4.530951844814016 +830 48 optimizer.lr 0.0025635859468547783 +830 48 negative_sampler.num_negs_per_pos 82.0 +830 48 training.batch_size 2.0 +830 49 model.embedding_dim 1.0 +830 49 model.scoring_fct_norm 1.0 +830 49 loss.margin 9.300038420931966 +830 49 optimizer.lr 0.005506958995105724 +830 49 negative_sampler.num_negs_per_pos 30.0 +830 49 training.batch_size 0.0 +830 50 model.embedding_dim 0.0 +830 50 model.scoring_fct_norm 1.0 +830 50 loss.margin 1.154773225824424 +830 50 optimizer.lr 0.001288147776455199 +830 50 negative_sampler.num_negs_per_pos 12.0 +830 50 training.batch_size 1.0 +830 51 model.embedding_dim 2.0 +830 51 model.scoring_fct_norm 1.0 +830 51 loss.margin 3.063812820681623 +830 51 optimizer.lr 0.06430597081274696 +830 51 negative_sampler.num_negs_per_pos 39.0 +830 51 training.batch_size 2.0 +830 52 model.embedding_dim 1.0 +830 52 model.scoring_fct_norm 2.0 +830 52 loss.margin 2.3401668644354 +830 52 optimizer.lr 0.0014946084990392012 +830 52 negative_sampler.num_negs_per_pos 30.0 +830 52 training.batch_size 0.0 +830 53 model.embedding_dim 2.0 +830 53 model.scoring_fct_norm 2.0 +830 53 loss.margin 3.368865419034192 +830 53 optimizer.lr 0.0036556981542354454 +830 53 negative_sampler.num_negs_per_pos 79.0 +830 53 training.batch_size 0.0 +830 54 model.embedding_dim 0.0 +830 54 model.scoring_fct_norm 2.0 +830 54 loss.margin 7.23610194279138 +830 54 optimizer.lr 0.015213325129968496 +830 54 negative_sampler.num_negs_per_pos 45.0 +830 54 training.batch_size 2.0 +830 55 model.embedding_dim 1.0 +830 55 model.scoring_fct_norm 1.0 +830 55 loss.margin 8.073468156639603 +830 55 optimizer.lr 0.0010591367682893065 +830 55 negative_sampler.num_negs_per_pos 64.0 +830 55 training.batch_size 1.0 +830 56 model.embedding_dim 2.0 +830 56 model.scoring_fct_norm 1.0 +830 56 loss.margin 7.233996199609331 +830 56 optimizer.lr 0.027672284126263123 +830 56 negative_sampler.num_negs_per_pos 50.0 +830 56 training.batch_size 2.0 +830 57 model.embedding_dim 0.0 +830 57 model.scoring_fct_norm 1.0 +830 57 loss.margin 8.102429756260296 +830 57 optimizer.lr 0.00130631432659883 +830 57 negative_sampler.num_negs_per_pos 76.0 +830 57 training.batch_size 1.0 +830 58 model.embedding_dim 0.0 +830 58 model.scoring_fct_norm 1.0 +830 58 loss.margin 9.955718534685888 +830 58 optimizer.lr 0.004079120409204514 +830 58 negative_sampler.num_negs_per_pos 76.0 +830 58 training.batch_size 2.0 +830 59 model.embedding_dim 1.0 +830 59 model.scoring_fct_norm 2.0 +830 59 loss.margin 1.11081276817483 +830 59 optimizer.lr 0.0026089917134409263 +830 59 negative_sampler.num_negs_per_pos 80.0 +830 59 training.batch_size 2.0 +830 60 model.embedding_dim 0.0 +830 60 model.scoring_fct_norm 1.0 +830 60 loss.margin 2.883101651473466 +830 60 optimizer.lr 0.010238525009817874 +830 60 negative_sampler.num_negs_per_pos 87.0 +830 60 training.batch_size 2.0 +830 61 model.embedding_dim 2.0 +830 61 model.scoring_fct_norm 1.0 +830 61 loss.margin 9.142886641873154 +830 61 optimizer.lr 0.008027050424821415 +830 61 negative_sampler.num_negs_per_pos 83.0 +830 61 training.batch_size 1.0 +830 62 model.embedding_dim 1.0 +830 62 model.scoring_fct_norm 2.0 +830 62 loss.margin 7.029390210140846 +830 62 optimizer.lr 0.05352025124719094 +830 62 negative_sampler.num_negs_per_pos 66.0 +830 62 training.batch_size 2.0 +830 63 model.embedding_dim 0.0 +830 63 model.scoring_fct_norm 1.0 +830 63 loss.margin 5.2525086668353635 +830 63 optimizer.lr 0.004411936934543668 +830 63 negative_sampler.num_negs_per_pos 59.0 +830 63 training.batch_size 0.0 +830 64 model.embedding_dim 0.0 +830 64 model.scoring_fct_norm 1.0 +830 64 loss.margin 2.1845330338789113 +830 64 optimizer.lr 0.0033662248807218615 +830 64 negative_sampler.num_negs_per_pos 29.0 +830 64 training.batch_size 0.0 +830 65 model.embedding_dim 0.0 +830 65 model.scoring_fct_norm 2.0 +830 65 loss.margin 3.2381812060925745 +830 65 optimizer.lr 0.05520405033064464 +830 65 negative_sampler.num_negs_per_pos 51.0 +830 65 training.batch_size 0.0 +830 66 model.embedding_dim 2.0 +830 66 model.scoring_fct_norm 2.0 +830 66 loss.margin 4.524485632093656 +830 66 optimizer.lr 0.06037916356982663 +830 66 negative_sampler.num_negs_per_pos 56.0 +830 66 training.batch_size 0.0 +830 67 model.embedding_dim 0.0 +830 67 model.scoring_fct_norm 2.0 +830 67 loss.margin 5.246802830557765 +830 67 optimizer.lr 0.07371753136633735 +830 67 negative_sampler.num_negs_per_pos 65.0 +830 67 training.batch_size 0.0 +830 68 model.embedding_dim 1.0 +830 68 model.scoring_fct_norm 2.0 +830 68 loss.margin 7.024457805159909 +830 68 optimizer.lr 0.003973811857327383 +830 68 negative_sampler.num_negs_per_pos 61.0 +830 68 training.batch_size 1.0 +830 69 model.embedding_dim 2.0 +830 69 model.scoring_fct_norm 1.0 +830 69 loss.margin 4.228669120745911 +830 69 optimizer.lr 0.005980372938481395 +830 69 negative_sampler.num_negs_per_pos 51.0 +830 69 training.batch_size 0.0 +830 70 model.embedding_dim 0.0 +830 70 model.scoring_fct_norm 1.0 +830 70 loss.margin 5.6570612024252345 +830 70 optimizer.lr 0.0014300825866023815 +830 70 negative_sampler.num_negs_per_pos 93.0 +830 70 training.batch_size 0.0 +830 71 model.embedding_dim 1.0 +830 71 model.scoring_fct_norm 1.0 +830 71 loss.margin 5.910401886494077 +830 71 optimizer.lr 0.005618072312644587 +830 71 negative_sampler.num_negs_per_pos 84.0 +830 71 training.batch_size 2.0 +830 72 model.embedding_dim 1.0 +830 72 model.scoring_fct_norm 2.0 +830 72 loss.margin 4.667296061959828 +830 72 optimizer.lr 0.01008963132128882 +830 72 negative_sampler.num_negs_per_pos 2.0 +830 72 training.batch_size 1.0 +830 73 model.embedding_dim 0.0 +830 73 model.scoring_fct_norm 2.0 +830 73 loss.margin 0.9507158359631367 +830 73 optimizer.lr 0.0016634558211622878 +830 73 negative_sampler.num_negs_per_pos 47.0 +830 73 training.batch_size 0.0 +830 74 model.embedding_dim 0.0 +830 74 model.scoring_fct_norm 2.0 +830 74 loss.margin 8.087257142106562 +830 74 optimizer.lr 0.002527463292789085 +830 74 negative_sampler.num_negs_per_pos 79.0 +830 74 training.batch_size 1.0 +830 75 model.embedding_dim 1.0 +830 75 model.scoring_fct_norm 1.0 +830 75 loss.margin 8.813482926424205 +830 75 optimizer.lr 0.006372867688370566 +830 75 negative_sampler.num_negs_per_pos 19.0 +830 75 training.batch_size 1.0 +830 76 model.embedding_dim 2.0 +830 76 model.scoring_fct_norm 2.0 +830 76 loss.margin 7.078534478077973 +830 76 optimizer.lr 0.00640314991744282 +830 76 negative_sampler.num_negs_per_pos 30.0 +830 76 training.batch_size 2.0 +830 77 model.embedding_dim 1.0 +830 77 model.scoring_fct_norm 2.0 +830 77 loss.margin 3.3270509660387964 +830 77 optimizer.lr 0.07921932970832396 +830 77 negative_sampler.num_negs_per_pos 80.0 +830 77 training.batch_size 1.0 +830 78 model.embedding_dim 0.0 +830 78 model.scoring_fct_norm 1.0 +830 78 loss.margin 9.83751490576691 +830 78 optimizer.lr 0.007826694515640283 +830 78 negative_sampler.num_negs_per_pos 12.0 +830 78 training.batch_size 2.0 +830 79 model.embedding_dim 1.0 +830 79 model.scoring_fct_norm 2.0 +830 79 loss.margin 5.175993786618715 +830 79 optimizer.lr 0.0035061581773368693 +830 79 negative_sampler.num_negs_per_pos 22.0 +830 79 training.batch_size 1.0 +830 80 model.embedding_dim 1.0 +830 80 model.scoring_fct_norm 2.0 +830 80 loss.margin 9.903722835822546 +830 80 optimizer.lr 0.028927091949743987 +830 80 negative_sampler.num_negs_per_pos 13.0 +830 80 training.batch_size 0.0 +830 81 model.embedding_dim 2.0 +830 81 model.scoring_fct_norm 2.0 +830 81 loss.margin 6.9927076397865084 +830 81 optimizer.lr 0.09748359450276352 +830 81 negative_sampler.num_negs_per_pos 48.0 +830 81 training.batch_size 1.0 +830 82 model.embedding_dim 0.0 +830 82 model.scoring_fct_norm 1.0 +830 82 loss.margin 9.699626516316723 +830 82 optimizer.lr 0.0038548205913004853 +830 82 negative_sampler.num_negs_per_pos 93.0 +830 82 training.batch_size 2.0 +830 83 model.embedding_dim 1.0 +830 83 model.scoring_fct_norm 1.0 +830 83 loss.margin 1.023450974057655 +830 83 optimizer.lr 0.06600716234633677 +830 83 negative_sampler.num_negs_per_pos 86.0 +830 83 training.batch_size 0.0 +830 84 model.embedding_dim 0.0 +830 84 model.scoring_fct_norm 2.0 +830 84 loss.margin 2.826070475391741 +830 84 optimizer.lr 0.002333438245943114 +830 84 negative_sampler.num_negs_per_pos 56.0 +830 84 training.batch_size 2.0 +830 85 model.embedding_dim 1.0 +830 85 model.scoring_fct_norm 2.0 +830 85 loss.margin 3.839602389927467 +830 85 optimizer.lr 0.004269409495279852 +830 85 negative_sampler.num_negs_per_pos 44.0 +830 85 training.batch_size 1.0 +830 86 model.embedding_dim 2.0 +830 86 model.scoring_fct_norm 2.0 +830 86 loss.margin 8.599511794332411 +830 86 optimizer.lr 0.022205522859605344 +830 86 negative_sampler.num_negs_per_pos 88.0 +830 86 training.batch_size 2.0 +830 87 model.embedding_dim 0.0 +830 87 model.scoring_fct_norm 1.0 +830 87 loss.margin 3.9642105045599623 +830 87 optimizer.lr 0.002773823763818141 +830 87 negative_sampler.num_negs_per_pos 69.0 +830 87 training.batch_size 0.0 +830 88 model.embedding_dim 0.0 +830 88 model.scoring_fct_norm 1.0 +830 88 loss.margin 1.554007378540962 +830 88 optimizer.lr 0.0157105067117099 +830 88 negative_sampler.num_negs_per_pos 19.0 +830 88 training.batch_size 2.0 +830 89 model.embedding_dim 1.0 +830 89 model.scoring_fct_norm 1.0 +830 89 loss.margin 9.193179337575666 +830 89 optimizer.lr 0.0018894926731928402 +830 89 negative_sampler.num_negs_per_pos 13.0 +830 89 training.batch_size 1.0 +830 90 model.embedding_dim 2.0 +830 90 model.scoring_fct_norm 2.0 +830 90 loss.margin 3.610551864630117 +830 90 optimizer.lr 0.08195536466012109 +830 90 negative_sampler.num_negs_per_pos 95.0 +830 90 training.batch_size 0.0 +830 91 model.embedding_dim 0.0 +830 91 model.scoring_fct_norm 1.0 +830 91 loss.margin 6.311934909506781 +830 91 optimizer.lr 0.09899567757535274 +830 91 negative_sampler.num_negs_per_pos 74.0 +830 91 training.batch_size 0.0 +830 92 model.embedding_dim 1.0 +830 92 model.scoring_fct_norm 2.0 +830 92 loss.margin 5.312168566827449 +830 92 optimizer.lr 0.0024709117626614467 +830 92 negative_sampler.num_negs_per_pos 40.0 +830 92 training.batch_size 2.0 +830 93 model.embedding_dim 0.0 +830 93 model.scoring_fct_norm 1.0 +830 93 loss.margin 4.410794319146064 +830 93 optimizer.lr 0.028040512432559117 +830 93 negative_sampler.num_negs_per_pos 39.0 +830 93 training.batch_size 0.0 +830 94 model.embedding_dim 2.0 +830 94 model.scoring_fct_norm 1.0 +830 94 loss.margin 6.546209401194823 +830 94 optimizer.lr 0.0014001690373764539 +830 94 negative_sampler.num_negs_per_pos 24.0 +830 94 training.batch_size 0.0 +830 95 model.embedding_dim 0.0 +830 95 model.scoring_fct_norm 2.0 +830 95 loss.margin 8.221356956184357 +830 95 optimizer.lr 0.006801373734157025 +830 95 negative_sampler.num_negs_per_pos 69.0 +830 95 training.batch_size 1.0 +830 96 model.embedding_dim 0.0 +830 96 model.scoring_fct_norm 2.0 +830 96 loss.margin 6.431801787330079 +830 96 optimizer.lr 0.02159246355854701 +830 96 negative_sampler.num_negs_per_pos 42.0 +830 96 training.batch_size 2.0 +830 97 model.embedding_dim 2.0 +830 97 model.scoring_fct_norm 2.0 +830 97 loss.margin 5.3970785165366175 +830 97 optimizer.lr 0.0027348206439765273 +830 97 negative_sampler.num_negs_per_pos 54.0 +830 97 training.batch_size 2.0 +830 98 model.embedding_dim 0.0 +830 98 model.scoring_fct_norm 1.0 +830 98 loss.margin 2.6067410342650548 +830 98 optimizer.lr 0.01279921319097932 +830 98 negative_sampler.num_negs_per_pos 60.0 +830 98 training.batch_size 0.0 +830 99 model.embedding_dim 1.0 +830 99 model.scoring_fct_norm 1.0 +830 99 loss.margin 1.9876546254445315 +830 99 optimizer.lr 0.004652313426709168 +830 99 negative_sampler.num_negs_per_pos 7.0 +830 99 training.batch_size 2.0 +830 100 model.embedding_dim 0.0 +830 100 model.scoring_fct_norm 2.0 +830 100 loss.margin 8.206249180602915 +830 100 optimizer.lr 0.03233787930066992 +830 100 negative_sampler.num_negs_per_pos 51.0 +830 100 training.batch_size 0.0 +830 1 dataset """kinships""" +830 1 model """transe""" +830 1 loss """marginranking""" +830 1 regularizer """no""" +830 1 optimizer """adam""" +830 1 training_loop """owa""" +830 1 negative_sampler """basic""" +830 1 evaluator """rankbased""" +830 2 dataset """kinships""" +830 2 model """transe""" +830 2 loss """marginranking""" +830 2 regularizer """no""" +830 2 optimizer """adam""" +830 2 training_loop """owa""" +830 2 negative_sampler """basic""" +830 2 evaluator """rankbased""" +830 3 dataset """kinships""" +830 3 model """transe""" +830 3 loss """marginranking""" +830 3 regularizer """no""" +830 3 optimizer """adam""" +830 3 training_loop """owa""" +830 3 negative_sampler """basic""" +830 3 evaluator """rankbased""" +830 4 dataset """kinships""" +830 4 model """transe""" +830 4 loss """marginranking""" +830 4 regularizer """no""" +830 4 optimizer """adam""" +830 4 training_loop """owa""" +830 4 negative_sampler """basic""" +830 4 evaluator """rankbased""" +830 5 dataset """kinships""" +830 5 model """transe""" +830 5 loss """marginranking""" +830 5 regularizer """no""" +830 5 optimizer """adam""" +830 5 training_loop """owa""" +830 5 negative_sampler """basic""" +830 5 evaluator """rankbased""" +830 6 dataset """kinships""" +830 6 model """transe""" +830 6 loss """marginranking""" +830 6 regularizer """no""" +830 6 optimizer """adam""" +830 6 training_loop """owa""" +830 6 negative_sampler """basic""" +830 6 evaluator """rankbased""" +830 7 dataset """kinships""" +830 7 model """transe""" +830 7 loss """marginranking""" +830 7 regularizer """no""" +830 7 optimizer """adam""" +830 7 training_loop """owa""" +830 7 negative_sampler """basic""" +830 7 evaluator """rankbased""" +830 8 dataset """kinships""" +830 8 model """transe""" +830 8 loss """marginranking""" +830 8 regularizer """no""" +830 8 optimizer """adam""" +830 8 training_loop """owa""" +830 8 negative_sampler """basic""" +830 8 evaluator """rankbased""" +830 9 dataset """kinships""" +830 9 model """transe""" +830 9 loss """marginranking""" +830 9 regularizer """no""" +830 9 optimizer """adam""" +830 9 training_loop """owa""" +830 9 negative_sampler """basic""" +830 9 evaluator """rankbased""" +830 10 dataset """kinships""" +830 10 model """transe""" +830 10 loss """marginranking""" +830 10 regularizer """no""" +830 10 optimizer """adam""" +830 10 training_loop """owa""" +830 10 negative_sampler """basic""" +830 10 evaluator """rankbased""" +830 11 dataset """kinships""" +830 11 model """transe""" +830 11 loss """marginranking""" +830 11 regularizer """no""" +830 11 optimizer """adam""" +830 11 training_loop """owa""" +830 11 negative_sampler """basic""" +830 11 evaluator """rankbased""" +830 12 dataset """kinships""" +830 12 model """transe""" +830 12 loss """marginranking""" +830 12 regularizer """no""" +830 12 optimizer """adam""" +830 12 training_loop """owa""" +830 12 negative_sampler """basic""" +830 12 evaluator """rankbased""" +830 13 dataset """kinships""" +830 13 model """transe""" +830 13 loss """marginranking""" +830 13 regularizer """no""" +830 13 optimizer """adam""" +830 13 training_loop """owa""" +830 13 negative_sampler """basic""" +830 13 evaluator """rankbased""" +830 14 dataset """kinships""" +830 14 model """transe""" +830 14 loss """marginranking""" +830 14 regularizer """no""" +830 14 optimizer """adam""" +830 14 training_loop """owa""" +830 14 negative_sampler """basic""" +830 14 evaluator """rankbased""" +830 15 dataset """kinships""" +830 15 model """transe""" +830 15 loss """marginranking""" +830 15 regularizer """no""" +830 15 optimizer """adam""" +830 15 training_loop """owa""" +830 15 negative_sampler """basic""" +830 15 evaluator """rankbased""" +830 16 dataset """kinships""" +830 16 model """transe""" +830 16 loss """marginranking""" +830 16 regularizer """no""" +830 16 optimizer """adam""" +830 16 training_loop """owa""" +830 16 negative_sampler """basic""" +830 16 evaluator """rankbased""" +830 17 dataset """kinships""" +830 17 model """transe""" +830 17 loss """marginranking""" +830 17 regularizer """no""" +830 17 optimizer """adam""" +830 17 training_loop """owa""" +830 17 negative_sampler """basic""" +830 17 evaluator """rankbased""" +830 18 dataset """kinships""" +830 18 model """transe""" +830 18 loss """marginranking""" +830 18 regularizer """no""" +830 18 optimizer """adam""" +830 18 training_loop """owa""" +830 18 negative_sampler """basic""" +830 18 evaluator """rankbased""" +830 19 dataset """kinships""" +830 19 model """transe""" +830 19 loss """marginranking""" +830 19 regularizer """no""" +830 19 optimizer """adam""" +830 19 training_loop """owa""" +830 19 negative_sampler """basic""" +830 19 evaluator """rankbased""" +830 20 dataset """kinships""" +830 20 model """transe""" +830 20 loss """marginranking""" +830 20 regularizer """no""" +830 20 optimizer """adam""" +830 20 training_loop """owa""" +830 20 negative_sampler """basic""" +830 20 evaluator """rankbased""" +830 21 dataset """kinships""" +830 21 model """transe""" +830 21 loss """marginranking""" +830 21 regularizer """no""" +830 21 optimizer """adam""" +830 21 training_loop """owa""" +830 21 negative_sampler """basic""" +830 21 evaluator """rankbased""" +830 22 dataset """kinships""" +830 22 model """transe""" +830 22 loss """marginranking""" +830 22 regularizer """no""" +830 22 optimizer """adam""" +830 22 training_loop """owa""" +830 22 negative_sampler """basic""" +830 22 evaluator """rankbased""" +830 23 dataset """kinships""" +830 23 model """transe""" +830 23 loss """marginranking""" +830 23 regularizer """no""" +830 23 optimizer """adam""" +830 23 training_loop """owa""" +830 23 negative_sampler """basic""" +830 23 evaluator """rankbased""" +830 24 dataset """kinships""" +830 24 model """transe""" +830 24 loss """marginranking""" +830 24 regularizer """no""" +830 24 optimizer """adam""" +830 24 training_loop """owa""" +830 24 negative_sampler """basic""" +830 24 evaluator """rankbased""" +830 25 dataset """kinships""" +830 25 model """transe""" +830 25 loss """marginranking""" +830 25 regularizer """no""" +830 25 optimizer """adam""" +830 25 training_loop """owa""" +830 25 negative_sampler """basic""" +830 25 evaluator """rankbased""" +830 26 dataset """kinships""" +830 26 model """transe""" +830 26 loss """marginranking""" +830 26 regularizer """no""" +830 26 optimizer """adam""" +830 26 training_loop """owa""" +830 26 negative_sampler """basic""" +830 26 evaluator """rankbased""" +830 27 dataset """kinships""" +830 27 model """transe""" +830 27 loss """marginranking""" +830 27 regularizer """no""" +830 27 optimizer """adam""" +830 27 training_loop """owa""" +830 27 negative_sampler """basic""" +830 27 evaluator """rankbased""" +830 28 dataset """kinships""" +830 28 model """transe""" +830 28 loss """marginranking""" +830 28 regularizer """no""" +830 28 optimizer """adam""" +830 28 training_loop """owa""" +830 28 negative_sampler """basic""" +830 28 evaluator """rankbased""" +830 29 dataset """kinships""" +830 29 model """transe""" +830 29 loss """marginranking""" +830 29 regularizer """no""" +830 29 optimizer """adam""" +830 29 training_loop """owa""" +830 29 negative_sampler """basic""" +830 29 evaluator """rankbased""" +830 30 dataset """kinships""" +830 30 model """transe""" +830 30 loss """marginranking""" +830 30 regularizer """no""" +830 30 optimizer """adam""" +830 30 training_loop """owa""" +830 30 negative_sampler """basic""" +830 30 evaluator """rankbased""" +830 31 dataset """kinships""" +830 31 model """transe""" +830 31 loss """marginranking""" +830 31 regularizer """no""" +830 31 optimizer """adam""" +830 31 training_loop """owa""" +830 31 negative_sampler """basic""" +830 31 evaluator """rankbased""" +830 32 dataset """kinships""" +830 32 model """transe""" +830 32 loss """marginranking""" +830 32 regularizer """no""" +830 32 optimizer """adam""" +830 32 training_loop """owa""" +830 32 negative_sampler """basic""" +830 32 evaluator """rankbased""" +830 33 dataset """kinships""" +830 33 model """transe""" +830 33 loss """marginranking""" +830 33 regularizer """no""" +830 33 optimizer """adam""" +830 33 training_loop """owa""" +830 33 negative_sampler """basic""" +830 33 evaluator """rankbased""" +830 34 dataset """kinships""" +830 34 model """transe""" +830 34 loss """marginranking""" +830 34 regularizer """no""" +830 34 optimizer """adam""" +830 34 training_loop """owa""" +830 34 negative_sampler """basic""" +830 34 evaluator """rankbased""" +830 35 dataset """kinships""" +830 35 model """transe""" +830 35 loss """marginranking""" +830 35 regularizer """no""" +830 35 optimizer """adam""" +830 35 training_loop """owa""" +830 35 negative_sampler """basic""" +830 35 evaluator """rankbased""" +830 36 dataset """kinships""" +830 36 model """transe""" +830 36 loss """marginranking""" +830 36 regularizer """no""" +830 36 optimizer """adam""" +830 36 training_loop """owa""" +830 36 negative_sampler """basic""" +830 36 evaluator """rankbased""" +830 37 dataset """kinships""" +830 37 model """transe""" +830 37 loss """marginranking""" +830 37 regularizer """no""" +830 37 optimizer """adam""" +830 37 training_loop """owa""" +830 37 negative_sampler """basic""" +830 37 evaluator """rankbased""" +830 38 dataset """kinships""" +830 38 model """transe""" +830 38 loss """marginranking""" +830 38 regularizer """no""" +830 38 optimizer """adam""" +830 38 training_loop """owa""" +830 38 negative_sampler """basic""" +830 38 evaluator """rankbased""" +830 39 dataset """kinships""" +830 39 model """transe""" +830 39 loss """marginranking""" +830 39 regularizer """no""" +830 39 optimizer """adam""" +830 39 training_loop """owa""" +830 39 negative_sampler """basic""" +830 39 evaluator """rankbased""" +830 40 dataset """kinships""" +830 40 model """transe""" +830 40 loss """marginranking""" +830 40 regularizer """no""" +830 40 optimizer """adam""" +830 40 training_loop """owa""" +830 40 negative_sampler """basic""" +830 40 evaluator """rankbased""" +830 41 dataset """kinships""" +830 41 model """transe""" +830 41 loss """marginranking""" +830 41 regularizer """no""" +830 41 optimizer """adam""" +830 41 training_loop """owa""" +830 41 negative_sampler """basic""" +830 41 evaluator """rankbased""" +830 42 dataset """kinships""" +830 42 model """transe""" +830 42 loss """marginranking""" +830 42 regularizer """no""" +830 42 optimizer """adam""" +830 42 training_loop """owa""" +830 42 negative_sampler """basic""" +830 42 evaluator """rankbased""" +830 43 dataset """kinships""" +830 43 model """transe""" +830 43 loss """marginranking""" +830 43 regularizer """no""" +830 43 optimizer """adam""" +830 43 training_loop """owa""" +830 43 negative_sampler """basic""" +830 43 evaluator """rankbased""" +830 44 dataset """kinships""" +830 44 model """transe""" +830 44 loss """marginranking""" +830 44 regularizer """no""" +830 44 optimizer """adam""" +830 44 training_loop """owa""" +830 44 negative_sampler """basic""" +830 44 evaluator """rankbased""" +830 45 dataset """kinships""" +830 45 model """transe""" +830 45 loss """marginranking""" +830 45 regularizer """no""" +830 45 optimizer """adam""" +830 45 training_loop """owa""" +830 45 negative_sampler """basic""" +830 45 evaluator """rankbased""" +830 46 dataset """kinships""" +830 46 model """transe""" +830 46 loss """marginranking""" +830 46 regularizer """no""" +830 46 optimizer """adam""" +830 46 training_loop """owa""" +830 46 negative_sampler """basic""" +830 46 evaluator """rankbased""" +830 47 dataset """kinships""" +830 47 model """transe""" +830 47 loss """marginranking""" +830 47 regularizer """no""" +830 47 optimizer """adam""" +830 47 training_loop """owa""" +830 47 negative_sampler """basic""" +830 47 evaluator """rankbased""" +830 48 dataset """kinships""" +830 48 model """transe""" +830 48 loss """marginranking""" +830 48 regularizer """no""" +830 48 optimizer """adam""" +830 48 training_loop """owa""" +830 48 negative_sampler """basic""" +830 48 evaluator """rankbased""" +830 49 dataset """kinships""" +830 49 model """transe""" +830 49 loss """marginranking""" +830 49 regularizer """no""" +830 49 optimizer """adam""" +830 49 training_loop """owa""" +830 49 negative_sampler """basic""" +830 49 evaluator """rankbased""" +830 50 dataset """kinships""" +830 50 model """transe""" +830 50 loss """marginranking""" +830 50 regularizer """no""" +830 50 optimizer """adam""" +830 50 training_loop """owa""" +830 50 negative_sampler """basic""" +830 50 evaluator """rankbased""" +830 51 dataset """kinships""" +830 51 model """transe""" +830 51 loss """marginranking""" +830 51 regularizer """no""" +830 51 optimizer """adam""" +830 51 training_loop """owa""" +830 51 negative_sampler """basic""" +830 51 evaluator """rankbased""" +830 52 dataset """kinships""" +830 52 model """transe""" +830 52 loss """marginranking""" +830 52 regularizer """no""" +830 52 optimizer """adam""" +830 52 training_loop """owa""" +830 52 negative_sampler """basic""" +830 52 evaluator """rankbased""" +830 53 dataset """kinships""" +830 53 model """transe""" +830 53 loss """marginranking""" +830 53 regularizer """no""" +830 53 optimizer """adam""" +830 53 training_loop """owa""" +830 53 negative_sampler """basic""" +830 53 evaluator """rankbased""" +830 54 dataset """kinships""" +830 54 model """transe""" +830 54 loss """marginranking""" +830 54 regularizer """no""" +830 54 optimizer """adam""" +830 54 training_loop """owa""" +830 54 negative_sampler """basic""" +830 54 evaluator """rankbased""" +830 55 dataset """kinships""" +830 55 model """transe""" +830 55 loss """marginranking""" +830 55 regularizer """no""" +830 55 optimizer """adam""" +830 55 training_loop """owa""" +830 55 negative_sampler """basic""" +830 55 evaluator """rankbased""" +830 56 dataset """kinships""" +830 56 model """transe""" +830 56 loss """marginranking""" +830 56 regularizer """no""" +830 56 optimizer """adam""" +830 56 training_loop """owa""" +830 56 negative_sampler """basic""" +830 56 evaluator """rankbased""" +830 57 dataset """kinships""" +830 57 model """transe""" +830 57 loss """marginranking""" +830 57 regularizer """no""" +830 57 optimizer """adam""" +830 57 training_loop """owa""" +830 57 negative_sampler """basic""" +830 57 evaluator """rankbased""" +830 58 dataset """kinships""" +830 58 model """transe""" +830 58 loss """marginranking""" +830 58 regularizer """no""" +830 58 optimizer """adam""" +830 58 training_loop """owa""" +830 58 negative_sampler """basic""" +830 58 evaluator """rankbased""" +830 59 dataset """kinships""" +830 59 model """transe""" +830 59 loss """marginranking""" +830 59 regularizer """no""" +830 59 optimizer """adam""" +830 59 training_loop """owa""" +830 59 negative_sampler """basic""" +830 59 evaluator """rankbased""" +830 60 dataset """kinships""" +830 60 model """transe""" +830 60 loss """marginranking""" +830 60 regularizer """no""" +830 60 optimizer """adam""" +830 60 training_loop """owa""" +830 60 negative_sampler """basic""" +830 60 evaluator """rankbased""" +830 61 dataset """kinships""" +830 61 model """transe""" +830 61 loss """marginranking""" +830 61 regularizer """no""" +830 61 optimizer """adam""" +830 61 training_loop """owa""" +830 61 negative_sampler """basic""" +830 61 evaluator """rankbased""" +830 62 dataset """kinships""" +830 62 model """transe""" +830 62 loss """marginranking""" +830 62 regularizer """no""" +830 62 optimizer """adam""" +830 62 training_loop """owa""" +830 62 negative_sampler """basic""" +830 62 evaluator """rankbased""" +830 63 dataset """kinships""" +830 63 model """transe""" +830 63 loss """marginranking""" +830 63 regularizer """no""" +830 63 optimizer """adam""" +830 63 training_loop """owa""" +830 63 negative_sampler """basic""" +830 63 evaluator """rankbased""" +830 64 dataset """kinships""" +830 64 model """transe""" +830 64 loss """marginranking""" +830 64 regularizer """no""" +830 64 optimizer """adam""" +830 64 training_loop """owa""" +830 64 negative_sampler """basic""" +830 64 evaluator """rankbased""" +830 65 dataset """kinships""" +830 65 model """transe""" +830 65 loss """marginranking""" +830 65 regularizer """no""" +830 65 optimizer """adam""" +830 65 training_loop """owa""" +830 65 negative_sampler """basic""" +830 65 evaluator """rankbased""" +830 66 dataset """kinships""" +830 66 model """transe""" +830 66 loss """marginranking""" +830 66 regularizer """no""" +830 66 optimizer """adam""" +830 66 training_loop """owa""" +830 66 negative_sampler """basic""" +830 66 evaluator """rankbased""" +830 67 dataset """kinships""" +830 67 model """transe""" +830 67 loss """marginranking""" +830 67 regularizer """no""" +830 67 optimizer """adam""" +830 67 training_loop """owa""" +830 67 negative_sampler """basic""" +830 67 evaluator """rankbased""" +830 68 dataset """kinships""" +830 68 model """transe""" +830 68 loss """marginranking""" +830 68 regularizer """no""" +830 68 optimizer """adam""" +830 68 training_loop """owa""" +830 68 negative_sampler """basic""" +830 68 evaluator """rankbased""" +830 69 dataset """kinships""" +830 69 model """transe""" +830 69 loss """marginranking""" +830 69 regularizer """no""" +830 69 optimizer """adam""" +830 69 training_loop """owa""" +830 69 negative_sampler """basic""" +830 69 evaluator """rankbased""" +830 70 dataset """kinships""" +830 70 model """transe""" +830 70 loss """marginranking""" +830 70 regularizer """no""" +830 70 optimizer """adam""" +830 70 training_loop """owa""" +830 70 negative_sampler """basic""" +830 70 evaluator """rankbased""" +830 71 dataset """kinships""" +830 71 model """transe""" +830 71 loss """marginranking""" +830 71 regularizer """no""" +830 71 optimizer """adam""" +830 71 training_loop """owa""" +830 71 negative_sampler """basic""" +830 71 evaluator """rankbased""" +830 72 dataset """kinships""" +830 72 model """transe""" +830 72 loss """marginranking""" +830 72 regularizer """no""" +830 72 optimizer """adam""" +830 72 training_loop """owa""" +830 72 negative_sampler """basic""" +830 72 evaluator """rankbased""" +830 73 dataset """kinships""" +830 73 model """transe""" +830 73 loss """marginranking""" +830 73 regularizer """no""" +830 73 optimizer """adam""" +830 73 training_loop """owa""" +830 73 negative_sampler """basic""" +830 73 evaluator """rankbased""" +830 74 dataset """kinships""" +830 74 model """transe""" +830 74 loss """marginranking""" +830 74 regularizer """no""" +830 74 optimizer """adam""" +830 74 training_loop """owa""" +830 74 negative_sampler """basic""" +830 74 evaluator """rankbased""" +830 75 dataset """kinships""" +830 75 model """transe""" +830 75 loss """marginranking""" +830 75 regularizer """no""" +830 75 optimizer """adam""" +830 75 training_loop """owa""" +830 75 negative_sampler """basic""" +830 75 evaluator """rankbased""" +830 76 dataset """kinships""" +830 76 model """transe""" +830 76 loss """marginranking""" +830 76 regularizer """no""" +830 76 optimizer """adam""" +830 76 training_loop """owa""" +830 76 negative_sampler """basic""" +830 76 evaluator """rankbased""" +830 77 dataset """kinships""" +830 77 model """transe""" +830 77 loss """marginranking""" +830 77 regularizer """no""" +830 77 optimizer """adam""" +830 77 training_loop """owa""" +830 77 negative_sampler """basic""" +830 77 evaluator """rankbased""" +830 78 dataset """kinships""" +830 78 model """transe""" +830 78 loss """marginranking""" +830 78 regularizer """no""" +830 78 optimizer """adam""" +830 78 training_loop """owa""" +830 78 negative_sampler """basic""" +830 78 evaluator """rankbased""" +830 79 dataset """kinships""" +830 79 model """transe""" +830 79 loss """marginranking""" +830 79 regularizer """no""" +830 79 optimizer """adam""" +830 79 training_loop """owa""" +830 79 negative_sampler """basic""" +830 79 evaluator """rankbased""" +830 80 dataset """kinships""" +830 80 model """transe""" +830 80 loss """marginranking""" +830 80 regularizer """no""" +830 80 optimizer """adam""" +830 80 training_loop """owa""" +830 80 negative_sampler """basic""" +830 80 evaluator """rankbased""" +830 81 dataset """kinships""" +830 81 model """transe""" +830 81 loss """marginranking""" +830 81 regularizer """no""" +830 81 optimizer """adam""" +830 81 training_loop """owa""" +830 81 negative_sampler """basic""" +830 81 evaluator """rankbased""" +830 82 dataset """kinships""" +830 82 model """transe""" +830 82 loss """marginranking""" +830 82 regularizer """no""" +830 82 optimizer """adam""" +830 82 training_loop """owa""" +830 82 negative_sampler """basic""" +830 82 evaluator """rankbased""" +830 83 dataset """kinships""" +830 83 model """transe""" +830 83 loss """marginranking""" +830 83 regularizer """no""" +830 83 optimizer """adam""" +830 83 training_loop """owa""" +830 83 negative_sampler """basic""" +830 83 evaluator """rankbased""" +830 84 dataset """kinships""" +830 84 model """transe""" +830 84 loss """marginranking""" +830 84 regularizer """no""" +830 84 optimizer """adam""" +830 84 training_loop """owa""" +830 84 negative_sampler """basic""" +830 84 evaluator """rankbased""" +830 85 dataset """kinships""" +830 85 model """transe""" +830 85 loss """marginranking""" +830 85 regularizer """no""" +830 85 optimizer """adam""" +830 85 training_loop """owa""" +830 85 negative_sampler """basic""" +830 85 evaluator """rankbased""" +830 86 dataset """kinships""" +830 86 model """transe""" +830 86 loss """marginranking""" +830 86 regularizer """no""" +830 86 optimizer """adam""" +830 86 training_loop """owa""" +830 86 negative_sampler """basic""" +830 86 evaluator """rankbased""" +830 87 dataset """kinships""" +830 87 model """transe""" +830 87 loss """marginranking""" +830 87 regularizer """no""" +830 87 optimizer """adam""" +830 87 training_loop """owa""" +830 87 negative_sampler """basic""" +830 87 evaluator """rankbased""" +830 88 dataset """kinships""" +830 88 model """transe""" +830 88 loss """marginranking""" +830 88 regularizer """no""" +830 88 optimizer """adam""" +830 88 training_loop """owa""" +830 88 negative_sampler """basic""" +830 88 evaluator """rankbased""" +830 89 dataset """kinships""" +830 89 model """transe""" +830 89 loss """marginranking""" +830 89 regularizer """no""" +830 89 optimizer """adam""" +830 89 training_loop """owa""" +830 89 negative_sampler """basic""" +830 89 evaluator """rankbased""" +830 90 dataset """kinships""" +830 90 model """transe""" +830 90 loss """marginranking""" +830 90 regularizer """no""" +830 90 optimizer """adam""" +830 90 training_loop """owa""" +830 90 negative_sampler """basic""" +830 90 evaluator """rankbased""" +830 91 dataset """kinships""" +830 91 model """transe""" +830 91 loss """marginranking""" +830 91 regularizer """no""" +830 91 optimizer """adam""" +830 91 training_loop """owa""" +830 91 negative_sampler """basic""" +830 91 evaluator """rankbased""" +830 92 dataset """kinships""" +830 92 model """transe""" +830 92 loss """marginranking""" +830 92 regularizer """no""" +830 92 optimizer """adam""" +830 92 training_loop """owa""" +830 92 negative_sampler """basic""" +830 92 evaluator """rankbased""" +830 93 dataset """kinships""" +830 93 model """transe""" +830 93 loss """marginranking""" +830 93 regularizer """no""" +830 93 optimizer """adam""" +830 93 training_loop """owa""" +830 93 negative_sampler """basic""" +830 93 evaluator """rankbased""" +830 94 dataset """kinships""" +830 94 model """transe""" +830 94 loss """marginranking""" +830 94 regularizer """no""" +830 94 optimizer """adam""" +830 94 training_loop """owa""" +830 94 negative_sampler """basic""" +830 94 evaluator """rankbased""" +830 95 dataset """kinships""" +830 95 model """transe""" +830 95 loss """marginranking""" +830 95 regularizer """no""" +830 95 optimizer """adam""" +830 95 training_loop """owa""" +830 95 negative_sampler """basic""" +830 95 evaluator """rankbased""" +830 96 dataset """kinships""" +830 96 model """transe""" +830 96 loss """marginranking""" +830 96 regularizer """no""" +830 96 optimizer """adam""" +830 96 training_loop """owa""" +830 96 negative_sampler """basic""" +830 96 evaluator """rankbased""" +830 97 dataset """kinships""" +830 97 model """transe""" +830 97 loss """marginranking""" +830 97 regularizer """no""" +830 97 optimizer """adam""" +830 97 training_loop """owa""" +830 97 negative_sampler """basic""" +830 97 evaluator """rankbased""" +830 98 dataset """kinships""" +830 98 model """transe""" +830 98 loss """marginranking""" +830 98 regularizer """no""" +830 98 optimizer """adam""" +830 98 training_loop """owa""" +830 98 negative_sampler """basic""" +830 98 evaluator """rankbased""" +830 99 dataset """kinships""" +830 99 model """transe""" +830 99 loss """marginranking""" +830 99 regularizer """no""" +830 99 optimizer """adam""" +830 99 training_loop """owa""" +830 99 negative_sampler """basic""" +830 99 evaluator """rankbased""" +830 100 dataset """kinships""" +830 100 model """transe""" +830 100 loss """marginranking""" +830 100 regularizer """no""" +830 100 optimizer """adam""" +830 100 training_loop """owa""" +830 100 negative_sampler """basic""" +830 100 evaluator """rankbased""" +831 1 model.embedding_dim 0.0 +831 1 model.scoring_fct_norm 1.0 +831 1 loss.margin 8.075016396807364 +831 1 loss.adversarial_temperature 0.5339950114347437 +831 1 optimizer.lr 0.01708209393136911 +831 1 negative_sampler.num_negs_per_pos 16.0 +831 1 training.batch_size 2.0 +831 2 model.embedding_dim 1.0 +831 2 model.scoring_fct_norm 2.0 +831 2 loss.margin 2.5885415911790823 +831 2 loss.adversarial_temperature 0.1515759654439907 +831 2 optimizer.lr 0.0020448246510183887 +831 2 negative_sampler.num_negs_per_pos 84.0 +831 2 training.batch_size 2.0 +831 3 model.embedding_dim 0.0 +831 3 model.scoring_fct_norm 1.0 +831 3 loss.margin 17.210610656347935 +831 3 loss.adversarial_temperature 0.2566375812101581 +831 3 optimizer.lr 0.04664155395327694 +831 3 negative_sampler.num_negs_per_pos 29.0 +831 3 training.batch_size 2.0 +831 4 model.embedding_dim 2.0 +831 4 model.scoring_fct_norm 1.0 +831 4 loss.margin 13.546614430368331 +831 4 loss.adversarial_temperature 0.658271177858034 +831 4 optimizer.lr 0.001776054627596573 +831 4 negative_sampler.num_negs_per_pos 83.0 +831 4 training.batch_size 1.0 +831 5 model.embedding_dim 1.0 +831 5 model.scoring_fct_norm 1.0 +831 5 loss.margin 22.39178034210992 +831 5 loss.adversarial_temperature 0.7737514213085999 +831 5 optimizer.lr 0.002468890688683381 +831 5 negative_sampler.num_negs_per_pos 35.0 +831 5 training.batch_size 1.0 +831 6 model.embedding_dim 2.0 +831 6 model.scoring_fct_norm 2.0 +831 6 loss.margin 25.111432737492013 +831 6 loss.adversarial_temperature 0.8300821325445664 +831 6 optimizer.lr 0.0011084236458425323 +831 6 negative_sampler.num_negs_per_pos 56.0 +831 6 training.batch_size 2.0 +831 7 model.embedding_dim 1.0 +831 7 model.scoring_fct_norm 2.0 +831 7 loss.margin 18.813836011581497 +831 7 loss.adversarial_temperature 0.6193787706564954 +831 7 optimizer.lr 0.005114118237734158 +831 7 negative_sampler.num_negs_per_pos 29.0 +831 7 training.batch_size 0.0 +831 8 model.embedding_dim 0.0 +831 8 model.scoring_fct_norm 1.0 +831 8 loss.margin 22.016833364327038 +831 8 loss.adversarial_temperature 0.9115319893131573 +831 8 optimizer.lr 0.042259272028066217 +831 8 negative_sampler.num_negs_per_pos 92.0 +831 8 training.batch_size 2.0 +831 9 model.embedding_dim 2.0 +831 9 model.scoring_fct_norm 2.0 +831 9 loss.margin 11.62667810188355 +831 9 loss.adversarial_temperature 0.8443356622038017 +831 9 optimizer.lr 0.004367597486035122 +831 9 negative_sampler.num_negs_per_pos 16.0 +831 9 training.batch_size 0.0 +831 10 model.embedding_dim 2.0 +831 10 model.scoring_fct_norm 2.0 +831 10 loss.margin 28.01083761237071 +831 10 loss.adversarial_temperature 0.887639375155789 +831 10 optimizer.lr 0.037019954902678454 +831 10 negative_sampler.num_negs_per_pos 4.0 +831 10 training.batch_size 0.0 +831 11 model.embedding_dim 1.0 +831 11 model.scoring_fct_norm 1.0 +831 11 loss.margin 13.355405999018195 +831 11 loss.adversarial_temperature 0.21460619617415122 +831 11 optimizer.lr 0.012776186655805452 +831 11 negative_sampler.num_negs_per_pos 64.0 +831 11 training.batch_size 1.0 +831 12 model.embedding_dim 2.0 +831 12 model.scoring_fct_norm 1.0 +831 12 loss.margin 21.712551874872887 +831 12 loss.adversarial_temperature 0.15072159123811293 +831 12 optimizer.lr 0.0022498846876752993 +831 12 negative_sampler.num_negs_per_pos 5.0 +831 12 training.batch_size 0.0 +831 13 model.embedding_dim 1.0 +831 13 model.scoring_fct_norm 2.0 +831 13 loss.margin 25.466818778907488 +831 13 loss.adversarial_temperature 0.48512715083645097 +831 13 optimizer.lr 0.0036587908402952634 +831 13 negative_sampler.num_negs_per_pos 93.0 +831 13 training.batch_size 1.0 +831 14 model.embedding_dim 2.0 +831 14 model.scoring_fct_norm 1.0 +831 14 loss.margin 2.7599535240781607 +831 14 loss.adversarial_temperature 0.7239662079316533 +831 14 optimizer.lr 0.0027646927691274744 +831 14 negative_sampler.num_negs_per_pos 46.0 +831 14 training.batch_size 1.0 +831 15 model.embedding_dim 0.0 +831 15 model.scoring_fct_norm 1.0 +831 15 loss.margin 7.812534199028037 +831 15 loss.adversarial_temperature 0.5378167812647768 +831 15 optimizer.lr 0.06970665391612171 +831 15 negative_sampler.num_negs_per_pos 20.0 +831 15 training.batch_size 1.0 +831 16 model.embedding_dim 1.0 +831 16 model.scoring_fct_norm 2.0 +831 16 loss.margin 7.114431394371503 +831 16 loss.adversarial_temperature 0.21941897818670858 +831 16 optimizer.lr 0.0015130823165698162 +831 16 negative_sampler.num_negs_per_pos 86.0 +831 16 training.batch_size 2.0 +831 17 model.embedding_dim 1.0 +831 17 model.scoring_fct_norm 2.0 +831 17 loss.margin 28.84381161156663 +831 17 loss.adversarial_temperature 0.9967759942013538 +831 17 optimizer.lr 0.002564866048920371 +831 17 negative_sampler.num_negs_per_pos 7.0 +831 17 training.batch_size 0.0 +831 18 model.embedding_dim 2.0 +831 18 model.scoring_fct_norm 1.0 +831 18 loss.margin 9.861822448444567 +831 18 loss.adversarial_temperature 0.8691646678130628 +831 18 optimizer.lr 0.030320472555986324 +831 18 negative_sampler.num_negs_per_pos 21.0 +831 18 training.batch_size 0.0 +831 19 model.embedding_dim 2.0 +831 19 model.scoring_fct_norm 2.0 +831 19 loss.margin 12.680065301755162 +831 19 loss.adversarial_temperature 0.4889935376551652 +831 19 optimizer.lr 0.05709895273679339 +831 19 negative_sampler.num_negs_per_pos 50.0 +831 19 training.batch_size 0.0 +831 20 model.embedding_dim 0.0 +831 20 model.scoring_fct_norm 1.0 +831 20 loss.margin 4.315237083706155 +831 20 loss.adversarial_temperature 0.8946967748975254 +831 20 optimizer.lr 0.021153321566616874 +831 20 negative_sampler.num_negs_per_pos 53.0 +831 20 training.batch_size 2.0 +831 21 model.embedding_dim 1.0 +831 21 model.scoring_fct_norm 1.0 +831 21 loss.margin 21.39477197668208 +831 21 loss.adversarial_temperature 0.36715230790113595 +831 21 optimizer.lr 0.008105345733778321 +831 21 negative_sampler.num_negs_per_pos 57.0 +831 21 training.batch_size 0.0 +831 22 model.embedding_dim 0.0 +831 22 model.scoring_fct_norm 2.0 +831 22 loss.margin 17.596492390589557 +831 22 loss.adversarial_temperature 0.556696514826528 +831 22 optimizer.lr 0.001948630335895928 +831 22 negative_sampler.num_negs_per_pos 43.0 +831 22 training.batch_size 2.0 +831 23 model.embedding_dim 1.0 +831 23 model.scoring_fct_norm 1.0 +831 23 loss.margin 19.784285473360438 +831 23 loss.adversarial_temperature 0.8441519871195177 +831 23 optimizer.lr 0.006421481353696507 +831 23 negative_sampler.num_negs_per_pos 25.0 +831 23 training.batch_size 0.0 +831 24 model.embedding_dim 0.0 +831 24 model.scoring_fct_norm 1.0 +831 24 loss.margin 25.469207523917515 +831 24 loss.adversarial_temperature 0.3701452977781342 +831 24 optimizer.lr 0.02558415164926246 +831 24 negative_sampler.num_negs_per_pos 69.0 +831 24 training.batch_size 1.0 +831 25 model.embedding_dim 2.0 +831 25 model.scoring_fct_norm 2.0 +831 25 loss.margin 20.781124722603195 +831 25 loss.adversarial_temperature 0.29285855633824787 +831 25 optimizer.lr 0.04886625814031199 +831 25 negative_sampler.num_negs_per_pos 15.0 +831 25 training.batch_size 1.0 +831 26 model.embedding_dim 2.0 +831 26 model.scoring_fct_norm 1.0 +831 26 loss.margin 2.193595231587114 +831 26 loss.adversarial_temperature 0.23875213484744964 +831 26 optimizer.lr 0.002264993256761853 +831 26 negative_sampler.num_negs_per_pos 5.0 +831 26 training.batch_size 0.0 +831 27 model.embedding_dim 1.0 +831 27 model.scoring_fct_norm 2.0 +831 27 loss.margin 18.207286332257333 +831 27 loss.adversarial_temperature 0.8906629408544864 +831 27 optimizer.lr 0.02555841837915587 +831 27 negative_sampler.num_negs_per_pos 19.0 +831 27 training.batch_size 1.0 +831 28 model.embedding_dim 1.0 +831 28 model.scoring_fct_norm 2.0 +831 28 loss.margin 3.5654147154564324 +831 28 loss.adversarial_temperature 0.8832091369474433 +831 28 optimizer.lr 0.09958623314505093 +831 28 negative_sampler.num_negs_per_pos 3.0 +831 28 training.batch_size 1.0 +831 29 model.embedding_dim 2.0 +831 29 model.scoring_fct_norm 2.0 +831 29 loss.margin 8.02238568066247 +831 29 loss.adversarial_temperature 0.6125197805214666 +831 29 optimizer.lr 0.052823647617203325 +831 29 negative_sampler.num_negs_per_pos 59.0 +831 29 training.batch_size 1.0 +831 30 model.embedding_dim 1.0 +831 30 model.scoring_fct_norm 2.0 +831 30 loss.margin 18.280687714879008 +831 30 loss.adversarial_temperature 0.19237355397875122 +831 30 optimizer.lr 0.0015724859606665864 +831 30 negative_sampler.num_negs_per_pos 69.0 +831 30 training.batch_size 0.0 +831 31 model.embedding_dim 1.0 +831 31 model.scoring_fct_norm 2.0 +831 31 loss.margin 13.613111907047948 +831 31 loss.adversarial_temperature 0.9523552499375663 +831 31 optimizer.lr 0.032209095878228296 +831 31 negative_sampler.num_negs_per_pos 86.0 +831 31 training.batch_size 2.0 +831 32 model.embedding_dim 1.0 +831 32 model.scoring_fct_norm 1.0 +831 32 loss.margin 16.72381584285781 +831 32 loss.adversarial_temperature 0.3625648507596052 +831 32 optimizer.lr 0.0018614513156923476 +831 32 negative_sampler.num_negs_per_pos 83.0 +831 32 training.batch_size 2.0 +831 33 model.embedding_dim 0.0 +831 33 model.scoring_fct_norm 2.0 +831 33 loss.margin 4.090962965300729 +831 33 loss.adversarial_temperature 0.5413600174319383 +831 33 optimizer.lr 0.018137710699735413 +831 33 negative_sampler.num_negs_per_pos 91.0 +831 33 training.batch_size 1.0 +831 34 model.embedding_dim 0.0 +831 34 model.scoring_fct_norm 1.0 +831 34 loss.margin 9.5241487091474 +831 34 loss.adversarial_temperature 0.5393254096542857 +831 34 optimizer.lr 0.00122758130973237 +831 34 negative_sampler.num_negs_per_pos 21.0 +831 34 training.batch_size 0.0 +831 35 model.embedding_dim 1.0 +831 35 model.scoring_fct_norm 1.0 +831 35 loss.margin 2.15948518327331 +831 35 loss.adversarial_temperature 0.1909035054476026 +831 35 optimizer.lr 0.0079939496880091 +831 35 negative_sampler.num_negs_per_pos 40.0 +831 35 training.batch_size 2.0 +831 36 model.embedding_dim 0.0 +831 36 model.scoring_fct_norm 2.0 +831 36 loss.margin 16.44160878776068 +831 36 loss.adversarial_temperature 0.3334500492063819 +831 36 optimizer.lr 0.0023273343078833585 +831 36 negative_sampler.num_negs_per_pos 2.0 +831 36 training.batch_size 0.0 +831 37 model.embedding_dim 2.0 +831 37 model.scoring_fct_norm 2.0 +831 37 loss.margin 23.007457421772305 +831 37 loss.adversarial_temperature 0.8909807742182771 +831 37 optimizer.lr 0.05750239425419085 +831 37 negative_sampler.num_negs_per_pos 90.0 +831 37 training.batch_size 2.0 +831 38 model.embedding_dim 2.0 +831 38 model.scoring_fct_norm 1.0 +831 38 loss.margin 4.940097583425128 +831 38 loss.adversarial_temperature 0.5613130716576648 +831 38 optimizer.lr 0.06370631385829079 +831 38 negative_sampler.num_negs_per_pos 15.0 +831 38 training.batch_size 2.0 +831 39 model.embedding_dim 1.0 +831 39 model.scoring_fct_norm 1.0 +831 39 loss.margin 12.2032491078361 +831 39 loss.adversarial_temperature 0.36226042464301633 +831 39 optimizer.lr 0.0013432435678699837 +831 39 negative_sampler.num_negs_per_pos 97.0 +831 39 training.batch_size 1.0 +831 40 model.embedding_dim 2.0 +831 40 model.scoring_fct_norm 1.0 +831 40 loss.margin 28.72694884734248 +831 40 loss.adversarial_temperature 0.6462356109196586 +831 40 optimizer.lr 0.01389697673084459 +831 40 negative_sampler.num_negs_per_pos 35.0 +831 40 training.batch_size 0.0 +831 41 model.embedding_dim 1.0 +831 41 model.scoring_fct_norm 1.0 +831 41 loss.margin 19.392869267921032 +831 41 loss.adversarial_temperature 0.48256696607378047 +831 41 optimizer.lr 0.003371335826464714 +831 41 negative_sampler.num_negs_per_pos 3.0 +831 41 training.batch_size 0.0 +831 42 model.embedding_dim 1.0 +831 42 model.scoring_fct_norm 2.0 +831 42 loss.margin 4.573822773778593 +831 42 loss.adversarial_temperature 0.8334177121188429 +831 42 optimizer.lr 0.0844112408823141 +831 42 negative_sampler.num_negs_per_pos 3.0 +831 42 training.batch_size 1.0 +831 43 model.embedding_dim 1.0 +831 43 model.scoring_fct_norm 2.0 +831 43 loss.margin 16.735539431176107 +831 43 loss.adversarial_temperature 0.8699826406479635 +831 43 optimizer.lr 0.085211259094091 +831 43 negative_sampler.num_negs_per_pos 73.0 +831 43 training.batch_size 2.0 +831 44 model.embedding_dim 0.0 +831 44 model.scoring_fct_norm 2.0 +831 44 loss.margin 21.065335282250917 +831 44 loss.adversarial_temperature 0.4591581730695602 +831 44 optimizer.lr 0.00556562095902568 +831 44 negative_sampler.num_negs_per_pos 43.0 +831 44 training.batch_size 0.0 +831 45 model.embedding_dim 0.0 +831 45 model.scoring_fct_norm 1.0 +831 45 loss.margin 11.804704791717226 +831 45 loss.adversarial_temperature 0.7888421499988867 +831 45 optimizer.lr 0.017891915984423315 +831 45 negative_sampler.num_negs_per_pos 26.0 +831 45 training.batch_size 2.0 +831 46 model.embedding_dim 0.0 +831 46 model.scoring_fct_norm 2.0 +831 46 loss.margin 15.882031281791155 +831 46 loss.adversarial_temperature 0.5269565861800071 +831 46 optimizer.lr 0.0029830446552689158 +831 46 negative_sampler.num_negs_per_pos 47.0 +831 46 training.batch_size 2.0 +831 47 model.embedding_dim 0.0 +831 47 model.scoring_fct_norm 1.0 +831 47 loss.margin 3.3727187055522814 +831 47 loss.adversarial_temperature 0.7616689042598153 +831 47 optimizer.lr 0.0019978777073233313 +831 47 negative_sampler.num_negs_per_pos 54.0 +831 47 training.batch_size 2.0 +831 48 model.embedding_dim 1.0 +831 48 model.scoring_fct_norm 1.0 +831 48 loss.margin 23.5712393726268 +831 48 loss.adversarial_temperature 0.9112621800756806 +831 48 optimizer.lr 0.0010012847686070928 +831 48 negative_sampler.num_negs_per_pos 59.0 +831 48 training.batch_size 2.0 +831 49 model.embedding_dim 0.0 +831 49 model.scoring_fct_norm 2.0 +831 49 loss.margin 12.641157099305602 +831 49 loss.adversarial_temperature 0.778226069413715 +831 49 optimizer.lr 0.012018480658512867 +831 49 negative_sampler.num_negs_per_pos 58.0 +831 49 training.batch_size 0.0 +831 50 model.embedding_dim 1.0 +831 50 model.scoring_fct_norm 1.0 +831 50 loss.margin 24.636129484916157 +831 50 loss.adversarial_temperature 0.7799143433491919 +831 50 optimizer.lr 0.08168248684549381 +831 50 negative_sampler.num_negs_per_pos 41.0 +831 50 training.batch_size 2.0 +831 51 model.embedding_dim 1.0 +831 51 model.scoring_fct_norm 1.0 +831 51 loss.margin 5.8555085945578425 +831 51 loss.adversarial_temperature 0.48258154029454314 +831 51 optimizer.lr 0.007227765706013804 +831 51 negative_sampler.num_negs_per_pos 72.0 +831 51 training.batch_size 1.0 +831 52 model.embedding_dim 1.0 +831 52 model.scoring_fct_norm 2.0 +831 52 loss.margin 28.860631513441156 +831 52 loss.adversarial_temperature 0.15660964593185947 +831 52 optimizer.lr 0.0016952423178517883 +831 52 negative_sampler.num_negs_per_pos 16.0 +831 52 training.batch_size 2.0 +831 53 model.embedding_dim 2.0 +831 53 model.scoring_fct_norm 2.0 +831 53 loss.margin 8.399246957286218 +831 53 loss.adversarial_temperature 0.6800374098440688 +831 53 optimizer.lr 0.0021439636834326686 +831 53 negative_sampler.num_negs_per_pos 69.0 +831 53 training.batch_size 1.0 +831 54 model.embedding_dim 2.0 +831 54 model.scoring_fct_norm 2.0 +831 54 loss.margin 7.496791766037641 +831 54 loss.adversarial_temperature 0.24201246594408832 +831 54 optimizer.lr 0.018698107487030758 +831 54 negative_sampler.num_negs_per_pos 59.0 +831 54 training.batch_size 1.0 +831 55 model.embedding_dim 1.0 +831 55 model.scoring_fct_norm 2.0 +831 55 loss.margin 4.13992156591387 +831 55 loss.adversarial_temperature 0.6694344427313007 +831 55 optimizer.lr 0.07175729098662867 +831 55 negative_sampler.num_negs_per_pos 90.0 +831 55 training.batch_size 2.0 +831 56 model.embedding_dim 0.0 +831 56 model.scoring_fct_norm 2.0 +831 56 loss.margin 6.06071740812889 +831 56 loss.adversarial_temperature 0.7382746731984516 +831 56 optimizer.lr 0.07087124947529105 +831 56 negative_sampler.num_negs_per_pos 13.0 +831 56 training.batch_size 1.0 +831 57 model.embedding_dim 0.0 +831 57 model.scoring_fct_norm 1.0 +831 57 loss.margin 28.944141910726774 +831 57 loss.adversarial_temperature 0.7854046372335617 +831 57 optimizer.lr 0.04443633978985068 +831 57 negative_sampler.num_negs_per_pos 72.0 +831 57 training.batch_size 0.0 +831 58 model.embedding_dim 1.0 +831 58 model.scoring_fct_norm 2.0 +831 58 loss.margin 27.088412056807666 +831 58 loss.adversarial_temperature 0.2531016886232097 +831 58 optimizer.lr 0.0034676192459325095 +831 58 negative_sampler.num_negs_per_pos 71.0 +831 58 training.batch_size 0.0 +831 59 model.embedding_dim 0.0 +831 59 model.scoring_fct_norm 1.0 +831 59 loss.margin 13.50783503306811 +831 59 loss.adversarial_temperature 0.8231003018645962 +831 59 optimizer.lr 0.03954736809647553 +831 59 negative_sampler.num_negs_per_pos 76.0 +831 59 training.batch_size 2.0 +831 60 model.embedding_dim 2.0 +831 60 model.scoring_fct_norm 2.0 +831 60 loss.margin 27.17540469930317 +831 60 loss.adversarial_temperature 0.7252025102049217 +831 60 optimizer.lr 0.02239007518431862 +831 60 negative_sampler.num_negs_per_pos 71.0 +831 60 training.batch_size 2.0 +831 61 model.embedding_dim 2.0 +831 61 model.scoring_fct_norm 2.0 +831 61 loss.margin 20.822898537949698 +831 61 loss.adversarial_temperature 0.959170175847072 +831 61 optimizer.lr 0.05355522040942646 +831 61 negative_sampler.num_negs_per_pos 72.0 +831 61 training.batch_size 0.0 +831 62 model.embedding_dim 2.0 +831 62 model.scoring_fct_norm 2.0 +831 62 loss.margin 27.400509678335425 +831 62 loss.adversarial_temperature 0.791070496667287 +831 62 optimizer.lr 0.003101448085335446 +831 62 negative_sampler.num_negs_per_pos 0.0 +831 62 training.batch_size 1.0 +831 63 model.embedding_dim 0.0 +831 63 model.scoring_fct_norm 2.0 +831 63 loss.margin 17.654203854563395 +831 63 loss.adversarial_temperature 0.40576314989730666 +831 63 optimizer.lr 0.002938798238331588 +831 63 negative_sampler.num_negs_per_pos 33.0 +831 63 training.batch_size 0.0 +831 64 model.embedding_dim 0.0 +831 64 model.scoring_fct_norm 2.0 +831 64 loss.margin 23.054873194604173 +831 64 loss.adversarial_temperature 0.31498984790149576 +831 64 optimizer.lr 0.0806919438725058 +831 64 negative_sampler.num_negs_per_pos 30.0 +831 64 training.batch_size 0.0 +831 65 model.embedding_dim 0.0 +831 65 model.scoring_fct_norm 1.0 +831 65 loss.margin 5.774194856600879 +831 65 loss.adversarial_temperature 0.15808034018545172 +831 65 optimizer.lr 0.014883827199793255 +831 65 negative_sampler.num_negs_per_pos 34.0 +831 65 training.batch_size 0.0 +831 66 model.embedding_dim 2.0 +831 66 model.scoring_fct_norm 2.0 +831 66 loss.margin 28.679169647854202 +831 66 loss.adversarial_temperature 0.5545889370383881 +831 66 optimizer.lr 0.052410107840351355 +831 66 negative_sampler.num_negs_per_pos 45.0 +831 66 training.batch_size 1.0 +831 67 model.embedding_dim 2.0 +831 67 model.scoring_fct_norm 1.0 +831 67 loss.margin 8.009451785976154 +831 67 loss.adversarial_temperature 0.9550981818745825 +831 67 optimizer.lr 0.03122130195834055 +831 67 negative_sampler.num_negs_per_pos 18.0 +831 67 training.batch_size 0.0 +831 68 model.embedding_dim 2.0 +831 68 model.scoring_fct_norm 2.0 +831 68 loss.margin 2.377881169826384 +831 68 loss.adversarial_temperature 0.8758622785410303 +831 68 optimizer.lr 0.0026763120893498918 +831 68 negative_sampler.num_negs_per_pos 33.0 +831 68 training.batch_size 1.0 +831 69 model.embedding_dim 1.0 +831 69 model.scoring_fct_norm 2.0 +831 69 loss.margin 12.409023188935947 +831 69 loss.adversarial_temperature 0.40817619544427286 +831 69 optimizer.lr 0.09108167021610276 +831 69 negative_sampler.num_negs_per_pos 61.0 +831 69 training.batch_size 1.0 +831 70 model.embedding_dim 2.0 +831 70 model.scoring_fct_norm 1.0 +831 70 loss.margin 10.066904090948992 +831 70 loss.adversarial_temperature 0.9276015390315304 +831 70 optimizer.lr 0.020547264430921842 +831 70 negative_sampler.num_negs_per_pos 95.0 +831 70 training.batch_size 2.0 +831 71 model.embedding_dim 2.0 +831 71 model.scoring_fct_norm 1.0 +831 71 loss.margin 22.564510928512547 +831 71 loss.adversarial_temperature 0.7702651381202005 +831 71 optimizer.lr 0.0060998685257164864 +831 71 negative_sampler.num_negs_per_pos 52.0 +831 71 training.batch_size 1.0 +831 72 model.embedding_dim 1.0 +831 72 model.scoring_fct_norm 1.0 +831 72 loss.margin 11.87923002136724 +831 72 loss.adversarial_temperature 0.24844781588055231 +831 72 optimizer.lr 0.04258309784296253 +831 72 negative_sampler.num_negs_per_pos 94.0 +831 72 training.batch_size 0.0 +831 73 model.embedding_dim 2.0 +831 73 model.scoring_fct_norm 1.0 +831 73 loss.margin 24.416666668185048 +831 73 loss.adversarial_temperature 0.5895357443396819 +831 73 optimizer.lr 0.009787028710362264 +831 73 negative_sampler.num_negs_per_pos 1.0 +831 73 training.batch_size 2.0 +831 74 model.embedding_dim 0.0 +831 74 model.scoring_fct_norm 1.0 +831 74 loss.margin 27.57573661900519 +831 74 loss.adversarial_temperature 0.553456335025766 +831 74 optimizer.lr 0.022795732949686277 +831 74 negative_sampler.num_negs_per_pos 45.0 +831 74 training.batch_size 2.0 +831 75 model.embedding_dim 0.0 +831 75 model.scoring_fct_norm 2.0 +831 75 loss.margin 5.1681706212233305 +831 75 loss.adversarial_temperature 0.789611244806702 +831 75 optimizer.lr 0.008722966971861533 +831 75 negative_sampler.num_negs_per_pos 83.0 +831 75 training.batch_size 0.0 +831 76 model.embedding_dim 1.0 +831 76 model.scoring_fct_norm 1.0 +831 76 loss.margin 9.464689350258812 +831 76 loss.adversarial_temperature 0.5573220778523506 +831 76 optimizer.lr 0.026338768445501187 +831 76 negative_sampler.num_negs_per_pos 89.0 +831 76 training.batch_size 1.0 +831 77 model.embedding_dim 0.0 +831 77 model.scoring_fct_norm 1.0 +831 77 loss.margin 22.336436367531217 +831 77 loss.adversarial_temperature 0.1981452035416552 +831 77 optimizer.lr 0.0012077890570289442 +831 77 negative_sampler.num_negs_per_pos 14.0 +831 77 training.batch_size 1.0 +831 78 model.embedding_dim 0.0 +831 78 model.scoring_fct_norm 2.0 +831 78 loss.margin 19.385593185770176 +831 78 loss.adversarial_temperature 0.5173770474044816 +831 78 optimizer.lr 0.013172497951082248 +831 78 negative_sampler.num_negs_per_pos 55.0 +831 78 training.batch_size 0.0 +831 79 model.embedding_dim 1.0 +831 79 model.scoring_fct_norm 2.0 +831 79 loss.margin 23.821130593686913 +831 79 loss.adversarial_temperature 0.984531690311771 +831 79 optimizer.lr 0.003814809583257697 +831 79 negative_sampler.num_negs_per_pos 64.0 +831 79 training.batch_size 0.0 +831 80 model.embedding_dim 1.0 +831 80 model.scoring_fct_norm 1.0 +831 80 loss.margin 1.4958509271743066 +831 80 loss.adversarial_temperature 0.9849257394961721 +831 80 optimizer.lr 0.004187422172365523 +831 80 negative_sampler.num_negs_per_pos 35.0 +831 80 training.batch_size 0.0 +831 81 model.embedding_dim 2.0 +831 81 model.scoring_fct_norm 2.0 +831 81 loss.margin 23.424282221606962 +831 81 loss.adversarial_temperature 0.40628084959174227 +831 81 optimizer.lr 0.002458350248581157 +831 81 negative_sampler.num_negs_per_pos 89.0 +831 81 training.batch_size 1.0 +831 82 model.embedding_dim 2.0 +831 82 model.scoring_fct_norm 1.0 +831 82 loss.margin 27.621370496270597 +831 82 loss.adversarial_temperature 0.44880269520280924 +831 82 optimizer.lr 0.009666889945042303 +831 82 negative_sampler.num_negs_per_pos 96.0 +831 82 training.batch_size 0.0 +831 83 model.embedding_dim 2.0 +831 83 model.scoring_fct_norm 2.0 +831 83 loss.margin 19.417850671673477 +831 83 loss.adversarial_temperature 0.190030506568938 +831 83 optimizer.lr 0.003303608813967554 +831 83 negative_sampler.num_negs_per_pos 47.0 +831 83 training.batch_size 2.0 +831 84 model.embedding_dim 0.0 +831 84 model.scoring_fct_norm 2.0 +831 84 loss.margin 23.98796736720149 +831 84 loss.adversarial_temperature 0.9965928530198838 +831 84 optimizer.lr 0.03518876503260509 +831 84 negative_sampler.num_negs_per_pos 16.0 +831 84 training.batch_size 1.0 +831 85 model.embedding_dim 0.0 +831 85 model.scoring_fct_norm 1.0 +831 85 loss.margin 12.002343469808839 +831 85 loss.adversarial_temperature 0.7386545092442127 +831 85 optimizer.lr 0.08908004614153316 +831 85 negative_sampler.num_negs_per_pos 85.0 +831 85 training.batch_size 2.0 +831 86 model.embedding_dim 2.0 +831 86 model.scoring_fct_norm 1.0 +831 86 loss.margin 10.980034217133298 +831 86 loss.adversarial_temperature 0.8919894290753417 +831 86 optimizer.lr 0.001704775043746884 +831 86 negative_sampler.num_negs_per_pos 65.0 +831 86 training.batch_size 1.0 +831 87 model.embedding_dim 1.0 +831 87 model.scoring_fct_norm 2.0 +831 87 loss.margin 27.57769377853883 +831 87 loss.adversarial_temperature 0.8128928625445181 +831 87 optimizer.lr 0.0012822566356593611 +831 87 negative_sampler.num_negs_per_pos 70.0 +831 87 training.batch_size 0.0 +831 88 model.embedding_dim 1.0 +831 88 model.scoring_fct_norm 1.0 +831 88 loss.margin 4.302384233926594 +831 88 loss.adversarial_temperature 0.30041526345334607 +831 88 optimizer.lr 0.02902615311542626 +831 88 negative_sampler.num_negs_per_pos 48.0 +831 88 training.batch_size 0.0 +831 89 model.embedding_dim 0.0 +831 89 model.scoring_fct_norm 1.0 +831 89 loss.margin 23.677873726600815 +831 89 loss.adversarial_temperature 0.1833445694882587 +831 89 optimizer.lr 0.003388852889685122 +831 89 negative_sampler.num_negs_per_pos 41.0 +831 89 training.batch_size 1.0 +831 90 model.embedding_dim 2.0 +831 90 model.scoring_fct_norm 1.0 +831 90 loss.margin 18.12073525940763 +831 90 loss.adversarial_temperature 0.5371096368074725 +831 90 optimizer.lr 0.033009343847203776 +831 90 negative_sampler.num_negs_per_pos 1.0 +831 90 training.batch_size 2.0 +831 91 model.embedding_dim 1.0 +831 91 model.scoring_fct_norm 2.0 +831 91 loss.margin 13.37512014749133 +831 91 loss.adversarial_temperature 0.6239983279319801 +831 91 optimizer.lr 0.029592482488451154 +831 91 negative_sampler.num_negs_per_pos 33.0 +831 91 training.batch_size 2.0 +831 92 model.embedding_dim 0.0 +831 92 model.scoring_fct_norm 1.0 +831 92 loss.margin 29.546569129619577 +831 92 loss.adversarial_temperature 0.37321876416945854 +831 92 optimizer.lr 0.02342037609079821 +831 92 negative_sampler.num_negs_per_pos 53.0 +831 92 training.batch_size 2.0 +831 93 model.embedding_dim 1.0 +831 93 model.scoring_fct_norm 2.0 +831 93 loss.margin 8.862145026990195 +831 93 loss.adversarial_temperature 0.7858823740524098 +831 93 optimizer.lr 0.010484598491285862 +831 93 negative_sampler.num_negs_per_pos 4.0 +831 93 training.batch_size 0.0 +831 94 model.embedding_dim 1.0 +831 94 model.scoring_fct_norm 2.0 +831 94 loss.margin 15.537935488608248 +831 94 loss.adversarial_temperature 0.35939030054357435 +831 94 optimizer.lr 0.09828801326030724 +831 94 negative_sampler.num_negs_per_pos 59.0 +831 94 training.batch_size 0.0 +831 95 model.embedding_dim 1.0 +831 95 model.scoring_fct_norm 2.0 +831 95 loss.margin 25.36300647597139 +831 95 loss.adversarial_temperature 0.3447556859110328 +831 95 optimizer.lr 0.004248727145073171 +831 95 negative_sampler.num_negs_per_pos 99.0 +831 95 training.batch_size 0.0 +831 96 model.embedding_dim 2.0 +831 96 model.scoring_fct_norm 2.0 +831 96 loss.margin 7.779762209849874 +831 96 loss.adversarial_temperature 0.5602808598999403 +831 96 optimizer.lr 0.04472423366141797 +831 96 negative_sampler.num_negs_per_pos 19.0 +831 96 training.batch_size 1.0 +831 97 model.embedding_dim 2.0 +831 97 model.scoring_fct_norm 1.0 +831 97 loss.margin 9.16791120609507 +831 97 loss.adversarial_temperature 0.20665207540455877 +831 97 optimizer.lr 0.0012845762881348963 +831 97 negative_sampler.num_negs_per_pos 62.0 +831 97 training.batch_size 1.0 +831 98 model.embedding_dim 1.0 +831 98 model.scoring_fct_norm 2.0 +831 98 loss.margin 24.308226259573264 +831 98 loss.adversarial_temperature 0.4053442730815061 +831 98 optimizer.lr 0.03519941070560271 +831 98 negative_sampler.num_negs_per_pos 27.0 +831 98 training.batch_size 0.0 +831 99 model.embedding_dim 0.0 +831 99 model.scoring_fct_norm 1.0 +831 99 loss.margin 9.33009563432622 +831 99 loss.adversarial_temperature 0.6356405362497571 +831 99 optimizer.lr 0.005733021637694639 +831 99 negative_sampler.num_negs_per_pos 41.0 +831 99 training.batch_size 1.0 +831 100 model.embedding_dim 1.0 +831 100 model.scoring_fct_norm 2.0 +831 100 loss.margin 10.356417378345249 +831 100 loss.adversarial_temperature 0.3527148569643078 +831 100 optimizer.lr 0.023169412187746326 +831 100 negative_sampler.num_negs_per_pos 4.0 +831 100 training.batch_size 2.0 +831 1 dataset """kinships""" +831 1 model """transe""" +831 1 loss """nssa""" +831 1 regularizer """no""" +831 1 optimizer """adam""" +831 1 training_loop """owa""" +831 1 negative_sampler """basic""" +831 1 evaluator """rankbased""" +831 2 dataset """kinships""" +831 2 model """transe""" +831 2 loss """nssa""" +831 2 regularizer """no""" +831 2 optimizer """adam""" +831 2 training_loop """owa""" +831 2 negative_sampler """basic""" +831 2 evaluator """rankbased""" +831 3 dataset """kinships""" +831 3 model """transe""" +831 3 loss """nssa""" +831 3 regularizer """no""" +831 3 optimizer """adam""" +831 3 training_loop """owa""" +831 3 negative_sampler """basic""" +831 3 evaluator """rankbased""" +831 4 dataset """kinships""" +831 4 model """transe""" +831 4 loss """nssa""" +831 4 regularizer """no""" +831 4 optimizer """adam""" +831 4 training_loop """owa""" +831 4 negative_sampler """basic""" +831 4 evaluator """rankbased""" +831 5 dataset """kinships""" +831 5 model """transe""" +831 5 loss """nssa""" +831 5 regularizer """no""" +831 5 optimizer """adam""" +831 5 training_loop """owa""" +831 5 negative_sampler """basic""" +831 5 evaluator """rankbased""" +831 6 dataset """kinships""" +831 6 model """transe""" +831 6 loss """nssa""" +831 6 regularizer """no""" +831 6 optimizer """adam""" +831 6 training_loop """owa""" +831 6 negative_sampler """basic""" +831 6 evaluator """rankbased""" +831 7 dataset """kinships""" +831 7 model """transe""" +831 7 loss """nssa""" +831 7 regularizer """no""" +831 7 optimizer """adam""" +831 7 training_loop """owa""" +831 7 negative_sampler """basic""" +831 7 evaluator """rankbased""" +831 8 dataset """kinships""" +831 8 model """transe""" +831 8 loss """nssa""" +831 8 regularizer """no""" +831 8 optimizer """adam""" +831 8 training_loop """owa""" +831 8 negative_sampler """basic""" +831 8 evaluator """rankbased""" +831 9 dataset """kinships""" +831 9 model """transe""" +831 9 loss """nssa""" +831 9 regularizer """no""" +831 9 optimizer """adam""" +831 9 training_loop """owa""" +831 9 negative_sampler """basic""" +831 9 evaluator """rankbased""" +831 10 dataset """kinships""" +831 10 model """transe""" +831 10 loss """nssa""" +831 10 regularizer """no""" +831 10 optimizer """adam""" +831 10 training_loop """owa""" +831 10 negative_sampler """basic""" +831 10 evaluator """rankbased""" +831 11 dataset """kinships""" +831 11 model """transe""" +831 11 loss """nssa""" +831 11 regularizer """no""" +831 11 optimizer """adam""" +831 11 training_loop """owa""" +831 11 negative_sampler """basic""" +831 11 evaluator """rankbased""" +831 12 dataset """kinships""" +831 12 model """transe""" +831 12 loss """nssa""" +831 12 regularizer """no""" +831 12 optimizer """adam""" +831 12 training_loop """owa""" +831 12 negative_sampler """basic""" +831 12 evaluator """rankbased""" +831 13 dataset """kinships""" +831 13 model """transe""" +831 13 loss """nssa""" +831 13 regularizer """no""" +831 13 optimizer """adam""" +831 13 training_loop """owa""" +831 13 negative_sampler """basic""" +831 13 evaluator """rankbased""" +831 14 dataset """kinships""" +831 14 model """transe""" +831 14 loss """nssa""" +831 14 regularizer """no""" +831 14 optimizer """adam""" +831 14 training_loop """owa""" +831 14 negative_sampler """basic""" +831 14 evaluator """rankbased""" +831 15 dataset """kinships""" +831 15 model """transe""" +831 15 loss """nssa""" +831 15 regularizer """no""" +831 15 optimizer """adam""" +831 15 training_loop """owa""" +831 15 negative_sampler """basic""" +831 15 evaluator """rankbased""" +831 16 dataset """kinships""" +831 16 model """transe""" +831 16 loss """nssa""" +831 16 regularizer """no""" +831 16 optimizer """adam""" +831 16 training_loop """owa""" +831 16 negative_sampler """basic""" +831 16 evaluator """rankbased""" +831 17 dataset """kinships""" +831 17 model """transe""" +831 17 loss """nssa""" +831 17 regularizer """no""" +831 17 optimizer """adam""" +831 17 training_loop """owa""" +831 17 negative_sampler """basic""" +831 17 evaluator """rankbased""" +831 18 dataset """kinships""" +831 18 model """transe""" +831 18 loss """nssa""" +831 18 regularizer """no""" +831 18 optimizer """adam""" +831 18 training_loop """owa""" +831 18 negative_sampler """basic""" +831 18 evaluator """rankbased""" +831 19 dataset """kinships""" +831 19 model """transe""" +831 19 loss """nssa""" +831 19 regularizer """no""" +831 19 optimizer """adam""" +831 19 training_loop """owa""" +831 19 negative_sampler """basic""" +831 19 evaluator """rankbased""" +831 20 dataset """kinships""" +831 20 model """transe""" +831 20 loss """nssa""" +831 20 regularizer """no""" +831 20 optimizer """adam""" +831 20 training_loop """owa""" +831 20 negative_sampler """basic""" +831 20 evaluator """rankbased""" +831 21 dataset """kinships""" +831 21 model """transe""" +831 21 loss """nssa""" +831 21 regularizer """no""" +831 21 optimizer """adam""" +831 21 training_loop """owa""" +831 21 negative_sampler """basic""" +831 21 evaluator """rankbased""" +831 22 dataset """kinships""" +831 22 model """transe""" +831 22 loss """nssa""" +831 22 regularizer """no""" +831 22 optimizer """adam""" +831 22 training_loop """owa""" +831 22 negative_sampler """basic""" +831 22 evaluator """rankbased""" +831 23 dataset """kinships""" +831 23 model """transe""" +831 23 loss """nssa""" +831 23 regularizer """no""" +831 23 optimizer """adam""" +831 23 training_loop """owa""" +831 23 negative_sampler """basic""" +831 23 evaluator """rankbased""" +831 24 dataset """kinships""" +831 24 model """transe""" +831 24 loss """nssa""" +831 24 regularizer """no""" +831 24 optimizer """adam""" +831 24 training_loop """owa""" +831 24 negative_sampler """basic""" +831 24 evaluator """rankbased""" +831 25 dataset """kinships""" +831 25 model """transe""" +831 25 loss """nssa""" +831 25 regularizer """no""" +831 25 optimizer """adam""" +831 25 training_loop """owa""" +831 25 negative_sampler """basic""" +831 25 evaluator """rankbased""" +831 26 dataset """kinships""" +831 26 model """transe""" +831 26 loss """nssa""" +831 26 regularizer """no""" +831 26 optimizer """adam""" +831 26 training_loop """owa""" +831 26 negative_sampler """basic""" +831 26 evaluator """rankbased""" +831 27 dataset """kinships""" +831 27 model """transe""" +831 27 loss """nssa""" +831 27 regularizer """no""" +831 27 optimizer """adam""" +831 27 training_loop """owa""" +831 27 negative_sampler """basic""" +831 27 evaluator """rankbased""" +831 28 dataset """kinships""" +831 28 model """transe""" +831 28 loss """nssa""" +831 28 regularizer """no""" +831 28 optimizer """adam""" +831 28 training_loop """owa""" +831 28 negative_sampler """basic""" +831 28 evaluator """rankbased""" +831 29 dataset """kinships""" +831 29 model """transe""" +831 29 loss """nssa""" +831 29 regularizer """no""" +831 29 optimizer """adam""" +831 29 training_loop """owa""" +831 29 negative_sampler """basic""" +831 29 evaluator """rankbased""" +831 30 dataset """kinships""" +831 30 model """transe""" +831 30 loss """nssa""" +831 30 regularizer """no""" +831 30 optimizer """adam""" +831 30 training_loop """owa""" +831 30 negative_sampler """basic""" +831 30 evaluator """rankbased""" +831 31 dataset """kinships""" +831 31 model """transe""" +831 31 loss """nssa""" +831 31 regularizer """no""" +831 31 optimizer """adam""" +831 31 training_loop """owa""" +831 31 negative_sampler """basic""" +831 31 evaluator """rankbased""" +831 32 dataset """kinships""" +831 32 model """transe""" +831 32 loss """nssa""" +831 32 regularizer """no""" +831 32 optimizer """adam""" +831 32 training_loop """owa""" +831 32 negative_sampler """basic""" +831 32 evaluator """rankbased""" +831 33 dataset """kinships""" +831 33 model """transe""" +831 33 loss """nssa""" +831 33 regularizer """no""" +831 33 optimizer """adam""" +831 33 training_loop """owa""" +831 33 negative_sampler """basic""" +831 33 evaluator """rankbased""" +831 34 dataset """kinships""" +831 34 model """transe""" +831 34 loss """nssa""" +831 34 regularizer """no""" +831 34 optimizer """adam""" +831 34 training_loop """owa""" +831 34 negative_sampler """basic""" +831 34 evaluator """rankbased""" +831 35 dataset """kinships""" +831 35 model """transe""" +831 35 loss """nssa""" +831 35 regularizer """no""" +831 35 optimizer """adam""" +831 35 training_loop """owa""" +831 35 negative_sampler """basic""" +831 35 evaluator """rankbased""" +831 36 dataset """kinships""" +831 36 model """transe""" +831 36 loss """nssa""" +831 36 regularizer """no""" +831 36 optimizer """adam""" +831 36 training_loop """owa""" +831 36 negative_sampler """basic""" +831 36 evaluator """rankbased""" +831 37 dataset """kinships""" +831 37 model """transe""" +831 37 loss """nssa""" +831 37 regularizer """no""" +831 37 optimizer """adam""" +831 37 training_loop """owa""" +831 37 negative_sampler """basic""" +831 37 evaluator """rankbased""" +831 38 dataset """kinships""" +831 38 model """transe""" +831 38 loss """nssa""" +831 38 regularizer """no""" +831 38 optimizer """adam""" +831 38 training_loop """owa""" +831 38 negative_sampler """basic""" +831 38 evaluator """rankbased""" +831 39 dataset """kinships""" +831 39 model """transe""" +831 39 loss """nssa""" +831 39 regularizer """no""" +831 39 optimizer """adam""" +831 39 training_loop """owa""" +831 39 negative_sampler """basic""" +831 39 evaluator """rankbased""" +831 40 dataset """kinships""" +831 40 model """transe""" +831 40 loss """nssa""" +831 40 regularizer """no""" +831 40 optimizer """adam""" +831 40 training_loop """owa""" +831 40 negative_sampler """basic""" +831 40 evaluator """rankbased""" +831 41 dataset """kinships""" +831 41 model """transe""" +831 41 loss """nssa""" +831 41 regularizer """no""" +831 41 optimizer """adam""" +831 41 training_loop """owa""" +831 41 negative_sampler """basic""" +831 41 evaluator """rankbased""" +831 42 dataset """kinships""" +831 42 model """transe""" +831 42 loss """nssa""" +831 42 regularizer """no""" +831 42 optimizer """adam""" +831 42 training_loop """owa""" +831 42 negative_sampler """basic""" +831 42 evaluator """rankbased""" +831 43 dataset """kinships""" +831 43 model """transe""" +831 43 loss """nssa""" +831 43 regularizer """no""" +831 43 optimizer """adam""" +831 43 training_loop """owa""" +831 43 negative_sampler """basic""" +831 43 evaluator """rankbased""" +831 44 dataset """kinships""" +831 44 model """transe""" +831 44 loss """nssa""" +831 44 regularizer """no""" +831 44 optimizer """adam""" +831 44 training_loop """owa""" +831 44 negative_sampler """basic""" +831 44 evaluator """rankbased""" +831 45 dataset """kinships""" +831 45 model """transe""" +831 45 loss """nssa""" +831 45 regularizer """no""" +831 45 optimizer """adam""" +831 45 training_loop """owa""" +831 45 negative_sampler """basic""" +831 45 evaluator """rankbased""" +831 46 dataset """kinships""" +831 46 model """transe""" +831 46 loss """nssa""" +831 46 regularizer """no""" +831 46 optimizer """adam""" +831 46 training_loop """owa""" +831 46 negative_sampler """basic""" +831 46 evaluator """rankbased""" +831 47 dataset """kinships""" +831 47 model """transe""" +831 47 loss """nssa""" +831 47 regularizer """no""" +831 47 optimizer """adam""" +831 47 training_loop """owa""" +831 47 negative_sampler """basic""" +831 47 evaluator """rankbased""" +831 48 dataset """kinships""" +831 48 model """transe""" +831 48 loss """nssa""" +831 48 regularizer """no""" +831 48 optimizer """adam""" +831 48 training_loop """owa""" +831 48 negative_sampler """basic""" +831 48 evaluator """rankbased""" +831 49 dataset """kinships""" +831 49 model """transe""" +831 49 loss """nssa""" +831 49 regularizer """no""" +831 49 optimizer """adam""" +831 49 training_loop """owa""" +831 49 negative_sampler """basic""" +831 49 evaluator """rankbased""" +831 50 dataset """kinships""" +831 50 model """transe""" +831 50 loss """nssa""" +831 50 regularizer """no""" +831 50 optimizer """adam""" +831 50 training_loop """owa""" +831 50 negative_sampler """basic""" +831 50 evaluator """rankbased""" +831 51 dataset """kinships""" +831 51 model """transe""" +831 51 loss """nssa""" +831 51 regularizer """no""" +831 51 optimizer """adam""" +831 51 training_loop """owa""" +831 51 negative_sampler """basic""" +831 51 evaluator """rankbased""" +831 52 dataset """kinships""" +831 52 model """transe""" +831 52 loss """nssa""" +831 52 regularizer """no""" +831 52 optimizer """adam""" +831 52 training_loop """owa""" +831 52 negative_sampler """basic""" +831 52 evaluator """rankbased""" +831 53 dataset """kinships""" +831 53 model """transe""" +831 53 loss """nssa""" +831 53 regularizer """no""" +831 53 optimizer """adam""" +831 53 training_loop """owa""" +831 53 negative_sampler """basic""" +831 53 evaluator """rankbased""" +831 54 dataset """kinships""" +831 54 model """transe""" +831 54 loss """nssa""" +831 54 regularizer """no""" +831 54 optimizer """adam""" +831 54 training_loop """owa""" +831 54 negative_sampler """basic""" +831 54 evaluator """rankbased""" +831 55 dataset """kinships""" +831 55 model """transe""" +831 55 loss """nssa""" +831 55 regularizer """no""" +831 55 optimizer """adam""" +831 55 training_loop """owa""" +831 55 negative_sampler """basic""" +831 55 evaluator """rankbased""" +831 56 dataset """kinships""" +831 56 model """transe""" +831 56 loss """nssa""" +831 56 regularizer """no""" +831 56 optimizer """adam""" +831 56 training_loop """owa""" +831 56 negative_sampler """basic""" +831 56 evaluator """rankbased""" +831 57 dataset """kinships""" +831 57 model """transe""" +831 57 loss """nssa""" +831 57 regularizer """no""" +831 57 optimizer """adam""" +831 57 training_loop """owa""" +831 57 negative_sampler """basic""" +831 57 evaluator """rankbased""" +831 58 dataset """kinships""" +831 58 model """transe""" +831 58 loss """nssa""" +831 58 regularizer """no""" +831 58 optimizer """adam""" +831 58 training_loop """owa""" +831 58 negative_sampler """basic""" +831 58 evaluator """rankbased""" +831 59 dataset """kinships""" +831 59 model """transe""" +831 59 loss """nssa""" +831 59 regularizer """no""" +831 59 optimizer """adam""" +831 59 training_loop """owa""" +831 59 negative_sampler """basic""" +831 59 evaluator """rankbased""" +831 60 dataset """kinships""" +831 60 model """transe""" +831 60 loss """nssa""" +831 60 regularizer """no""" +831 60 optimizer """adam""" +831 60 training_loop """owa""" +831 60 negative_sampler """basic""" +831 60 evaluator """rankbased""" +831 61 dataset """kinships""" +831 61 model """transe""" +831 61 loss """nssa""" +831 61 regularizer """no""" +831 61 optimizer """adam""" +831 61 training_loop """owa""" +831 61 negative_sampler """basic""" +831 61 evaluator """rankbased""" +831 62 dataset """kinships""" +831 62 model """transe""" +831 62 loss """nssa""" +831 62 regularizer """no""" +831 62 optimizer """adam""" +831 62 training_loop """owa""" +831 62 negative_sampler """basic""" +831 62 evaluator """rankbased""" +831 63 dataset """kinships""" +831 63 model """transe""" +831 63 loss """nssa""" +831 63 regularizer """no""" +831 63 optimizer """adam""" +831 63 training_loop """owa""" +831 63 negative_sampler """basic""" +831 63 evaluator """rankbased""" +831 64 dataset """kinships""" +831 64 model """transe""" +831 64 loss """nssa""" +831 64 regularizer """no""" +831 64 optimizer """adam""" +831 64 training_loop """owa""" +831 64 negative_sampler """basic""" +831 64 evaluator """rankbased""" +831 65 dataset """kinships""" +831 65 model """transe""" +831 65 loss """nssa""" +831 65 regularizer """no""" +831 65 optimizer """adam""" +831 65 training_loop """owa""" +831 65 negative_sampler """basic""" +831 65 evaluator """rankbased""" +831 66 dataset """kinships""" +831 66 model """transe""" +831 66 loss """nssa""" +831 66 regularizer """no""" +831 66 optimizer """adam""" +831 66 training_loop """owa""" +831 66 negative_sampler """basic""" +831 66 evaluator """rankbased""" +831 67 dataset """kinships""" +831 67 model """transe""" +831 67 loss """nssa""" +831 67 regularizer """no""" +831 67 optimizer """adam""" +831 67 training_loop """owa""" +831 67 negative_sampler """basic""" +831 67 evaluator """rankbased""" +831 68 dataset """kinships""" +831 68 model """transe""" +831 68 loss """nssa""" +831 68 regularizer """no""" +831 68 optimizer """adam""" +831 68 training_loop """owa""" +831 68 negative_sampler """basic""" +831 68 evaluator """rankbased""" +831 69 dataset """kinships""" +831 69 model """transe""" +831 69 loss """nssa""" +831 69 regularizer """no""" +831 69 optimizer """adam""" +831 69 training_loop """owa""" +831 69 negative_sampler """basic""" +831 69 evaluator """rankbased""" +831 70 dataset """kinships""" +831 70 model """transe""" +831 70 loss """nssa""" +831 70 regularizer """no""" +831 70 optimizer """adam""" +831 70 training_loop """owa""" +831 70 negative_sampler """basic""" +831 70 evaluator """rankbased""" +831 71 dataset """kinships""" +831 71 model """transe""" +831 71 loss """nssa""" +831 71 regularizer """no""" +831 71 optimizer """adam""" +831 71 training_loop """owa""" +831 71 negative_sampler """basic""" +831 71 evaluator """rankbased""" +831 72 dataset """kinships""" +831 72 model """transe""" +831 72 loss """nssa""" +831 72 regularizer """no""" +831 72 optimizer """adam""" +831 72 training_loop """owa""" +831 72 negative_sampler """basic""" +831 72 evaluator """rankbased""" +831 73 dataset """kinships""" +831 73 model """transe""" +831 73 loss """nssa""" +831 73 regularizer """no""" +831 73 optimizer """adam""" +831 73 training_loop """owa""" +831 73 negative_sampler """basic""" +831 73 evaluator """rankbased""" +831 74 dataset """kinships""" +831 74 model """transe""" +831 74 loss """nssa""" +831 74 regularizer """no""" +831 74 optimizer """adam""" +831 74 training_loop """owa""" +831 74 negative_sampler """basic""" +831 74 evaluator """rankbased""" +831 75 dataset """kinships""" +831 75 model """transe""" +831 75 loss """nssa""" +831 75 regularizer """no""" +831 75 optimizer """adam""" +831 75 training_loop """owa""" +831 75 negative_sampler """basic""" +831 75 evaluator """rankbased""" +831 76 dataset """kinships""" +831 76 model """transe""" +831 76 loss """nssa""" +831 76 regularizer """no""" +831 76 optimizer """adam""" +831 76 training_loop """owa""" +831 76 negative_sampler """basic""" +831 76 evaluator """rankbased""" +831 77 dataset """kinships""" +831 77 model """transe""" +831 77 loss """nssa""" +831 77 regularizer """no""" +831 77 optimizer """adam""" +831 77 training_loop """owa""" +831 77 negative_sampler """basic""" +831 77 evaluator """rankbased""" +831 78 dataset """kinships""" +831 78 model """transe""" +831 78 loss """nssa""" +831 78 regularizer """no""" +831 78 optimizer """adam""" +831 78 training_loop """owa""" +831 78 negative_sampler """basic""" +831 78 evaluator """rankbased""" +831 79 dataset """kinships""" +831 79 model """transe""" +831 79 loss """nssa""" +831 79 regularizer """no""" +831 79 optimizer """adam""" +831 79 training_loop """owa""" +831 79 negative_sampler """basic""" +831 79 evaluator """rankbased""" +831 80 dataset """kinships""" +831 80 model """transe""" +831 80 loss """nssa""" +831 80 regularizer """no""" +831 80 optimizer """adam""" +831 80 training_loop """owa""" +831 80 negative_sampler """basic""" +831 80 evaluator """rankbased""" +831 81 dataset """kinships""" +831 81 model """transe""" +831 81 loss """nssa""" +831 81 regularizer """no""" +831 81 optimizer """adam""" +831 81 training_loop """owa""" +831 81 negative_sampler """basic""" +831 81 evaluator """rankbased""" +831 82 dataset """kinships""" +831 82 model """transe""" +831 82 loss """nssa""" +831 82 regularizer """no""" +831 82 optimizer """adam""" +831 82 training_loop """owa""" +831 82 negative_sampler """basic""" +831 82 evaluator """rankbased""" +831 83 dataset """kinships""" +831 83 model """transe""" +831 83 loss """nssa""" +831 83 regularizer """no""" +831 83 optimizer """adam""" +831 83 training_loop """owa""" +831 83 negative_sampler """basic""" +831 83 evaluator """rankbased""" +831 84 dataset """kinships""" +831 84 model """transe""" +831 84 loss """nssa""" +831 84 regularizer """no""" +831 84 optimizer """adam""" +831 84 training_loop """owa""" +831 84 negative_sampler """basic""" +831 84 evaluator """rankbased""" +831 85 dataset """kinships""" +831 85 model """transe""" +831 85 loss """nssa""" +831 85 regularizer """no""" +831 85 optimizer """adam""" +831 85 training_loop """owa""" +831 85 negative_sampler """basic""" +831 85 evaluator """rankbased""" +831 86 dataset """kinships""" +831 86 model """transe""" +831 86 loss """nssa""" +831 86 regularizer """no""" +831 86 optimizer """adam""" +831 86 training_loop """owa""" +831 86 negative_sampler """basic""" +831 86 evaluator """rankbased""" +831 87 dataset """kinships""" +831 87 model """transe""" +831 87 loss """nssa""" +831 87 regularizer """no""" +831 87 optimizer """adam""" +831 87 training_loop """owa""" +831 87 negative_sampler """basic""" +831 87 evaluator """rankbased""" +831 88 dataset """kinships""" +831 88 model """transe""" +831 88 loss """nssa""" +831 88 regularizer """no""" +831 88 optimizer """adam""" +831 88 training_loop """owa""" +831 88 negative_sampler """basic""" +831 88 evaluator """rankbased""" +831 89 dataset """kinships""" +831 89 model """transe""" +831 89 loss """nssa""" +831 89 regularizer """no""" +831 89 optimizer """adam""" +831 89 training_loop """owa""" +831 89 negative_sampler """basic""" +831 89 evaluator """rankbased""" +831 90 dataset """kinships""" +831 90 model """transe""" +831 90 loss """nssa""" +831 90 regularizer """no""" +831 90 optimizer """adam""" +831 90 training_loop """owa""" +831 90 negative_sampler """basic""" +831 90 evaluator """rankbased""" +831 91 dataset """kinships""" +831 91 model """transe""" +831 91 loss """nssa""" +831 91 regularizer """no""" +831 91 optimizer """adam""" +831 91 training_loop """owa""" +831 91 negative_sampler """basic""" +831 91 evaluator """rankbased""" +831 92 dataset """kinships""" +831 92 model """transe""" +831 92 loss """nssa""" +831 92 regularizer """no""" +831 92 optimizer """adam""" +831 92 training_loop """owa""" +831 92 negative_sampler """basic""" +831 92 evaluator """rankbased""" +831 93 dataset """kinships""" +831 93 model """transe""" +831 93 loss """nssa""" +831 93 regularizer """no""" +831 93 optimizer """adam""" +831 93 training_loop """owa""" +831 93 negative_sampler """basic""" +831 93 evaluator """rankbased""" +831 94 dataset """kinships""" +831 94 model """transe""" +831 94 loss """nssa""" +831 94 regularizer """no""" +831 94 optimizer """adam""" +831 94 training_loop """owa""" +831 94 negative_sampler """basic""" +831 94 evaluator """rankbased""" +831 95 dataset """kinships""" +831 95 model """transe""" +831 95 loss """nssa""" +831 95 regularizer """no""" +831 95 optimizer """adam""" +831 95 training_loop """owa""" +831 95 negative_sampler """basic""" +831 95 evaluator """rankbased""" +831 96 dataset """kinships""" +831 96 model """transe""" +831 96 loss """nssa""" +831 96 regularizer """no""" +831 96 optimizer """adam""" +831 96 training_loop """owa""" +831 96 negative_sampler """basic""" +831 96 evaluator """rankbased""" +831 97 dataset """kinships""" +831 97 model """transe""" +831 97 loss """nssa""" +831 97 regularizer """no""" +831 97 optimizer """adam""" +831 97 training_loop """owa""" +831 97 negative_sampler """basic""" +831 97 evaluator """rankbased""" +831 98 dataset """kinships""" +831 98 model """transe""" +831 98 loss """nssa""" +831 98 regularizer """no""" +831 98 optimizer """adam""" +831 98 training_loop """owa""" +831 98 negative_sampler """basic""" +831 98 evaluator """rankbased""" +831 99 dataset """kinships""" +831 99 model """transe""" +831 99 loss """nssa""" +831 99 regularizer """no""" +831 99 optimizer """adam""" +831 99 training_loop """owa""" +831 99 negative_sampler """basic""" +831 99 evaluator """rankbased""" +831 100 dataset """kinships""" +831 100 model """transe""" +831 100 loss """nssa""" +831 100 regularizer """no""" +831 100 optimizer """adam""" +831 100 training_loop """owa""" +831 100 negative_sampler """basic""" +831 100 evaluator """rankbased""" +832 1 model.embedding_dim 2.0 +832 1 model.scoring_fct_norm 1.0 +832 1 loss.margin 28.535252462066296 +832 1 loss.adversarial_temperature 0.17091254888891017 +832 1 optimizer.lr 0.0010736277024517172 +832 1 negative_sampler.num_negs_per_pos 37.0 +832 1 training.batch_size 2.0 +832 2 model.embedding_dim 1.0 +832 2 model.scoring_fct_norm 1.0 +832 2 loss.margin 28.752438318464943 +832 2 loss.adversarial_temperature 0.4329088972782916 +832 2 optimizer.lr 0.0019307452569216931 +832 2 negative_sampler.num_negs_per_pos 69.0 +832 2 training.batch_size 0.0 +832 3 model.embedding_dim 2.0 +832 3 model.scoring_fct_norm 2.0 +832 3 loss.margin 5.05217023107847 +832 3 loss.adversarial_temperature 0.942317875077956 +832 3 optimizer.lr 0.049911428968388225 +832 3 negative_sampler.num_negs_per_pos 87.0 +832 3 training.batch_size 0.0 +832 4 model.embedding_dim 0.0 +832 4 model.scoring_fct_norm 2.0 +832 4 loss.margin 26.042742806972047 +832 4 loss.adversarial_temperature 0.692174078256522 +832 4 optimizer.lr 0.07136773545970801 +832 4 negative_sampler.num_negs_per_pos 87.0 +832 4 training.batch_size 2.0 +832 5 model.embedding_dim 0.0 +832 5 model.scoring_fct_norm 1.0 +832 5 loss.margin 19.6115720954219 +832 5 loss.adversarial_temperature 0.26264163212468195 +832 5 optimizer.lr 0.019295963043630775 +832 5 negative_sampler.num_negs_per_pos 36.0 +832 5 training.batch_size 0.0 +832 6 model.embedding_dim 1.0 +832 6 model.scoring_fct_norm 2.0 +832 6 loss.margin 23.929631324004827 +832 6 loss.adversarial_temperature 0.19039666666602478 +832 6 optimizer.lr 0.012069331210635602 +832 6 negative_sampler.num_negs_per_pos 48.0 +832 6 training.batch_size 1.0 +832 7 model.embedding_dim 1.0 +832 7 model.scoring_fct_norm 1.0 +832 7 loss.margin 14.563625209531976 +832 7 loss.adversarial_temperature 0.6947057203576975 +832 7 optimizer.lr 0.0029190748591512625 +832 7 negative_sampler.num_negs_per_pos 71.0 +832 7 training.batch_size 0.0 +832 8 model.embedding_dim 0.0 +832 8 model.scoring_fct_norm 2.0 +832 8 loss.margin 25.5122283614475 +832 8 loss.adversarial_temperature 0.21652163031103805 +832 8 optimizer.lr 0.09505477233670717 +832 8 negative_sampler.num_negs_per_pos 85.0 +832 8 training.batch_size 0.0 +832 9 model.embedding_dim 0.0 +832 9 model.scoring_fct_norm 1.0 +832 9 loss.margin 1.6467681130788816 +832 9 loss.adversarial_temperature 0.5729949399760615 +832 9 optimizer.lr 0.0760800688368944 +832 9 negative_sampler.num_negs_per_pos 63.0 +832 9 training.batch_size 2.0 +832 10 model.embedding_dim 2.0 +832 10 model.scoring_fct_norm 2.0 +832 10 loss.margin 24.522600843546346 +832 10 loss.adversarial_temperature 0.29300129874305414 +832 10 optimizer.lr 0.026413009743895387 +832 10 negative_sampler.num_negs_per_pos 62.0 +832 10 training.batch_size 0.0 +832 11 model.embedding_dim 2.0 +832 11 model.scoring_fct_norm 2.0 +832 11 loss.margin 26.287852538418296 +832 11 loss.adversarial_temperature 0.9653906001684347 +832 11 optimizer.lr 0.0013591895959793126 +832 11 negative_sampler.num_negs_per_pos 25.0 +832 11 training.batch_size 0.0 +832 12 model.embedding_dim 1.0 +832 12 model.scoring_fct_norm 2.0 +832 12 loss.margin 27.80990874233821 +832 12 loss.adversarial_temperature 0.494179088221962 +832 12 optimizer.lr 0.003463805582390032 +832 12 negative_sampler.num_negs_per_pos 85.0 +832 12 training.batch_size 1.0 +832 13 model.embedding_dim 2.0 +832 13 model.scoring_fct_norm 1.0 +832 13 loss.margin 6.733475725071954 +832 13 loss.adversarial_temperature 0.299815224590587 +832 13 optimizer.lr 0.09785286803768199 +832 13 negative_sampler.num_negs_per_pos 15.0 +832 13 training.batch_size 2.0 +832 14 model.embedding_dim 2.0 +832 14 model.scoring_fct_norm 2.0 +832 14 loss.margin 16.072611724309834 +832 14 loss.adversarial_temperature 0.5371566234148452 +832 14 optimizer.lr 0.005535294267951137 +832 14 negative_sampler.num_negs_per_pos 58.0 +832 14 training.batch_size 0.0 +832 15 model.embedding_dim 1.0 +832 15 model.scoring_fct_norm 2.0 +832 15 loss.margin 25.082799510944998 +832 15 loss.adversarial_temperature 0.5193053333060952 +832 15 optimizer.lr 0.013029338473989622 +832 15 negative_sampler.num_negs_per_pos 68.0 +832 15 training.batch_size 0.0 +832 16 model.embedding_dim 1.0 +832 16 model.scoring_fct_norm 1.0 +832 16 loss.margin 5.070944987465197 +832 16 loss.adversarial_temperature 0.4242944145201355 +832 16 optimizer.lr 0.05011599529518004 +832 16 negative_sampler.num_negs_per_pos 94.0 +832 16 training.batch_size 0.0 +832 17 model.embedding_dim 2.0 +832 17 model.scoring_fct_norm 1.0 +832 17 loss.margin 1.5898937322587003 +832 17 loss.adversarial_temperature 0.47012743805377744 +832 17 optimizer.lr 0.00400441455770533 +832 17 negative_sampler.num_negs_per_pos 51.0 +832 17 training.batch_size 2.0 +832 18 model.embedding_dim 0.0 +832 18 model.scoring_fct_norm 2.0 +832 18 loss.margin 22.28051171333588 +832 18 loss.adversarial_temperature 0.3900668607677321 +832 18 optimizer.lr 0.0572569286585687 +832 18 negative_sampler.num_negs_per_pos 78.0 +832 18 training.batch_size 2.0 +832 19 model.embedding_dim 1.0 +832 19 model.scoring_fct_norm 2.0 +832 19 loss.margin 21.623032159733008 +832 19 loss.adversarial_temperature 0.10504645766904305 +832 19 optimizer.lr 0.0015631071950966918 +832 19 negative_sampler.num_negs_per_pos 0.0 +832 19 training.batch_size 0.0 +832 20 model.embedding_dim 1.0 +832 20 model.scoring_fct_norm 2.0 +832 20 loss.margin 12.579436465698961 +832 20 loss.adversarial_temperature 0.6462847928914864 +832 20 optimizer.lr 0.002110011645645097 +832 20 negative_sampler.num_negs_per_pos 93.0 +832 20 training.batch_size 2.0 +832 21 model.embedding_dim 0.0 +832 21 model.scoring_fct_norm 2.0 +832 21 loss.margin 1.3990809469762304 +832 21 loss.adversarial_temperature 0.27507183155327797 +832 21 optimizer.lr 0.04675291239229982 +832 21 negative_sampler.num_negs_per_pos 76.0 +832 21 training.batch_size 1.0 +832 22 model.embedding_dim 2.0 +832 22 model.scoring_fct_norm 2.0 +832 22 loss.margin 4.690550904605038 +832 22 loss.adversarial_temperature 0.8472350527383564 +832 22 optimizer.lr 0.0020274019630714345 +832 22 negative_sampler.num_negs_per_pos 41.0 +832 22 training.batch_size 2.0 +832 23 model.embedding_dim 2.0 +832 23 model.scoring_fct_norm 1.0 +832 23 loss.margin 22.705335961987803 +832 23 loss.adversarial_temperature 0.46798166906984795 +832 23 optimizer.lr 0.0011364704502901776 +832 23 negative_sampler.num_negs_per_pos 64.0 +832 23 training.batch_size 0.0 +832 24 model.embedding_dim 2.0 +832 24 model.scoring_fct_norm 2.0 +832 24 loss.margin 28.827023229401778 +832 24 loss.adversarial_temperature 0.18076366855843454 +832 24 optimizer.lr 0.018353355909959103 +832 24 negative_sampler.num_negs_per_pos 48.0 +832 24 training.batch_size 2.0 +832 25 model.embedding_dim 2.0 +832 25 model.scoring_fct_norm 1.0 +832 25 loss.margin 21.126596002591405 +832 25 loss.adversarial_temperature 0.5121700879587903 +832 25 optimizer.lr 0.023266552859640216 +832 25 negative_sampler.num_negs_per_pos 14.0 +832 25 training.batch_size 1.0 +832 26 model.embedding_dim 0.0 +832 26 model.scoring_fct_norm 2.0 +832 26 loss.margin 9.020844298551804 +832 26 loss.adversarial_temperature 0.42785917036510424 +832 26 optimizer.lr 0.01199468941585161 +832 26 negative_sampler.num_negs_per_pos 99.0 +832 26 training.batch_size 1.0 +832 27 model.embedding_dim 0.0 +832 27 model.scoring_fct_norm 2.0 +832 27 loss.margin 7.657332505164565 +832 27 loss.adversarial_temperature 0.27915115597037254 +832 27 optimizer.lr 0.025015239187416684 +832 27 negative_sampler.num_negs_per_pos 32.0 +832 27 training.batch_size 1.0 +832 28 model.embedding_dim 1.0 +832 28 model.scoring_fct_norm 2.0 +832 28 loss.margin 8.504761791226475 +832 28 loss.adversarial_temperature 0.6865261416793307 +832 28 optimizer.lr 0.007742362568691736 +832 28 negative_sampler.num_negs_per_pos 3.0 +832 28 training.batch_size 0.0 +832 29 model.embedding_dim 2.0 +832 29 model.scoring_fct_norm 1.0 +832 29 loss.margin 26.35399293414276 +832 29 loss.adversarial_temperature 0.7558807696485933 +832 29 optimizer.lr 0.0013462440250477288 +832 29 negative_sampler.num_negs_per_pos 13.0 +832 29 training.batch_size 2.0 +832 30 model.embedding_dim 0.0 +832 30 model.scoring_fct_norm 1.0 +832 30 loss.margin 18.524430023968996 +832 30 loss.adversarial_temperature 0.2662942043560493 +832 30 optimizer.lr 0.0024651433215822104 +832 30 negative_sampler.num_negs_per_pos 85.0 +832 30 training.batch_size 0.0 +832 31 model.embedding_dim 2.0 +832 31 model.scoring_fct_norm 2.0 +832 31 loss.margin 5.799782700108782 +832 31 loss.adversarial_temperature 0.636190111374272 +832 31 optimizer.lr 0.016046022385551497 +832 31 negative_sampler.num_negs_per_pos 31.0 +832 31 training.batch_size 2.0 +832 32 model.embedding_dim 0.0 +832 32 model.scoring_fct_norm 2.0 +832 32 loss.margin 23.62999524968293 +832 32 loss.adversarial_temperature 0.3312933328224281 +832 32 optimizer.lr 0.08427637969483695 +832 32 negative_sampler.num_negs_per_pos 57.0 +832 32 training.batch_size 1.0 +832 33 model.embedding_dim 0.0 +832 33 model.scoring_fct_norm 2.0 +832 33 loss.margin 11.011724963117597 +832 33 loss.adversarial_temperature 0.9340732401560954 +832 33 optimizer.lr 0.0016668700999253737 +832 33 negative_sampler.num_negs_per_pos 29.0 +832 33 training.batch_size 2.0 +832 34 model.embedding_dim 1.0 +832 34 model.scoring_fct_norm 1.0 +832 34 loss.margin 20.801539703612658 +832 34 loss.adversarial_temperature 0.18042389934164643 +832 34 optimizer.lr 0.0045960909488376505 +832 34 negative_sampler.num_negs_per_pos 70.0 +832 34 training.batch_size 2.0 +832 35 model.embedding_dim 0.0 +832 35 model.scoring_fct_norm 1.0 +832 35 loss.margin 10.325306132897511 +832 35 loss.adversarial_temperature 0.1819748705694501 +832 35 optimizer.lr 0.03682675674678365 +832 35 negative_sampler.num_negs_per_pos 43.0 +832 35 training.batch_size 1.0 +832 36 model.embedding_dim 0.0 +832 36 model.scoring_fct_norm 1.0 +832 36 loss.margin 2.0598455299612137 +832 36 loss.adversarial_temperature 0.10899212396542239 +832 36 optimizer.lr 0.09516839451313724 +832 36 negative_sampler.num_negs_per_pos 19.0 +832 36 training.batch_size 1.0 +832 37 model.embedding_dim 2.0 +832 37 model.scoring_fct_norm 1.0 +832 37 loss.margin 25.85776180085021 +832 37 loss.adversarial_temperature 0.7114734784905854 +832 37 optimizer.lr 0.013643802629374724 +832 37 negative_sampler.num_negs_per_pos 23.0 +832 37 training.batch_size 0.0 +832 38 model.embedding_dim 2.0 +832 38 model.scoring_fct_norm 1.0 +832 38 loss.margin 8.367245748373847 +832 38 loss.adversarial_temperature 0.8820971212315404 +832 38 optimizer.lr 0.014141009346345683 +832 38 negative_sampler.num_negs_per_pos 51.0 +832 38 training.batch_size 2.0 +832 39 model.embedding_dim 0.0 +832 39 model.scoring_fct_norm 1.0 +832 39 loss.margin 8.657684254491054 +832 39 loss.adversarial_temperature 0.6463899468487975 +832 39 optimizer.lr 0.013110820016959082 +832 39 negative_sampler.num_negs_per_pos 52.0 +832 39 training.batch_size 0.0 +832 40 model.embedding_dim 2.0 +832 40 model.scoring_fct_norm 2.0 +832 40 loss.margin 18.303819956081334 +832 40 loss.adversarial_temperature 0.8152467699350303 +832 40 optimizer.lr 0.001419277540527742 +832 40 negative_sampler.num_negs_per_pos 33.0 +832 40 training.batch_size 1.0 +832 41 model.embedding_dim 0.0 +832 41 model.scoring_fct_norm 1.0 +832 41 loss.margin 14.732833337815062 +832 41 loss.adversarial_temperature 0.3721176177511182 +832 41 optimizer.lr 0.001618743076393665 +832 41 negative_sampler.num_negs_per_pos 40.0 +832 41 training.batch_size 1.0 +832 42 model.embedding_dim 2.0 +832 42 model.scoring_fct_norm 2.0 +832 42 loss.margin 22.327082547473424 +832 42 loss.adversarial_temperature 0.8224066528123519 +832 42 optimizer.lr 0.006533437667736947 +832 42 negative_sampler.num_negs_per_pos 32.0 +832 42 training.batch_size 2.0 +832 43 model.embedding_dim 0.0 +832 43 model.scoring_fct_norm 2.0 +832 43 loss.margin 11.055272172613417 +832 43 loss.adversarial_temperature 0.4284454952785244 +832 43 optimizer.lr 0.016589507224218012 +832 43 negative_sampler.num_negs_per_pos 44.0 +832 43 training.batch_size 1.0 +832 44 model.embedding_dim 2.0 +832 44 model.scoring_fct_norm 1.0 +832 44 loss.margin 8.612111115725252 +832 44 loss.adversarial_temperature 0.9545378070091552 +832 44 optimizer.lr 0.002571522835386482 +832 44 negative_sampler.num_negs_per_pos 49.0 +832 44 training.batch_size 0.0 +832 45 model.embedding_dim 2.0 +832 45 model.scoring_fct_norm 1.0 +832 45 loss.margin 23.51940733327264 +832 45 loss.adversarial_temperature 0.682346472243315 +832 45 optimizer.lr 0.09817241921403731 +832 45 negative_sampler.num_negs_per_pos 15.0 +832 45 training.batch_size 1.0 +832 46 model.embedding_dim 0.0 +832 46 model.scoring_fct_norm 2.0 +832 46 loss.margin 22.481883738111083 +832 46 loss.adversarial_temperature 0.7057813750707418 +832 46 optimizer.lr 0.041223714209497817 +832 46 negative_sampler.num_negs_per_pos 10.0 +832 46 training.batch_size 1.0 +832 47 model.embedding_dim 2.0 +832 47 model.scoring_fct_norm 1.0 +832 47 loss.margin 11.023425334606713 +832 47 loss.adversarial_temperature 0.9983108582603808 +832 47 optimizer.lr 0.011767626433393484 +832 47 negative_sampler.num_negs_per_pos 32.0 +832 47 training.batch_size 0.0 +832 48 model.embedding_dim 1.0 +832 48 model.scoring_fct_norm 1.0 +832 48 loss.margin 11.143264644035849 +832 48 loss.adversarial_temperature 0.7680973018384968 +832 48 optimizer.lr 0.04782873482585229 +832 48 negative_sampler.num_negs_per_pos 54.0 +832 48 training.batch_size 1.0 +832 49 model.embedding_dim 0.0 +832 49 model.scoring_fct_norm 2.0 +832 49 loss.margin 28.83529583008327 +832 49 loss.adversarial_temperature 0.7543226637530607 +832 49 optimizer.lr 0.01581368834455655 +832 49 negative_sampler.num_negs_per_pos 19.0 +832 49 training.batch_size 0.0 +832 50 model.embedding_dim 1.0 +832 50 model.scoring_fct_norm 1.0 +832 50 loss.margin 8.603838340678669 +832 50 loss.adversarial_temperature 0.45839829411681643 +832 50 optimizer.lr 0.007494849013714379 +832 50 negative_sampler.num_negs_per_pos 58.0 +832 50 training.batch_size 2.0 +832 51 model.embedding_dim 1.0 +832 51 model.scoring_fct_norm 1.0 +832 51 loss.margin 15.668487021243909 +832 51 loss.adversarial_temperature 0.5758458826486113 +832 51 optimizer.lr 0.031415846664920276 +832 51 negative_sampler.num_negs_per_pos 18.0 +832 51 training.batch_size 0.0 +832 52 model.embedding_dim 0.0 +832 52 model.scoring_fct_norm 2.0 +832 52 loss.margin 25.237092183799987 +832 52 loss.adversarial_temperature 0.27970336295349607 +832 52 optimizer.lr 0.03549727836387244 +832 52 negative_sampler.num_negs_per_pos 20.0 +832 52 training.batch_size 0.0 +832 53 model.embedding_dim 1.0 +832 53 model.scoring_fct_norm 1.0 +832 53 loss.margin 10.533033475112164 +832 53 loss.adversarial_temperature 0.4206500004943544 +832 53 optimizer.lr 0.0014541766709202921 +832 53 negative_sampler.num_negs_per_pos 76.0 +832 53 training.batch_size 0.0 +832 54 model.embedding_dim 0.0 +832 54 model.scoring_fct_norm 1.0 +832 54 loss.margin 24.9927486447547 +832 54 loss.adversarial_temperature 0.25290575096510953 +832 54 optimizer.lr 0.041967756898893836 +832 54 negative_sampler.num_negs_per_pos 80.0 +832 54 training.batch_size 0.0 +832 55 model.embedding_dim 0.0 +832 55 model.scoring_fct_norm 2.0 +832 55 loss.margin 24.145971184890612 +832 55 loss.adversarial_temperature 0.6344691097352353 +832 55 optimizer.lr 0.0060923329427835515 +832 55 negative_sampler.num_negs_per_pos 51.0 +832 55 training.batch_size 2.0 +832 56 model.embedding_dim 0.0 +832 56 model.scoring_fct_norm 1.0 +832 56 loss.margin 20.895981483999858 +832 56 loss.adversarial_temperature 0.792695327040062 +832 56 optimizer.lr 0.020486154435328287 +832 56 negative_sampler.num_negs_per_pos 87.0 +832 56 training.batch_size 2.0 +832 57 model.embedding_dim 0.0 +832 57 model.scoring_fct_norm 1.0 +832 57 loss.margin 14.799685466932058 +832 57 loss.adversarial_temperature 0.17675710219296936 +832 57 optimizer.lr 0.0013341340462416234 +832 57 negative_sampler.num_negs_per_pos 72.0 +832 57 training.batch_size 2.0 +832 58 model.embedding_dim 0.0 +832 58 model.scoring_fct_norm 1.0 +832 58 loss.margin 8.80420770404005 +832 58 loss.adversarial_temperature 0.6733611046990358 +832 58 optimizer.lr 0.0047497694840319266 +832 58 negative_sampler.num_negs_per_pos 39.0 +832 58 training.batch_size 1.0 +832 59 model.embedding_dim 1.0 +832 59 model.scoring_fct_norm 2.0 +832 59 loss.margin 12.686455238114105 +832 59 loss.adversarial_temperature 0.7869220539814611 +832 59 optimizer.lr 0.0880198125621997 +832 59 negative_sampler.num_negs_per_pos 84.0 +832 59 training.batch_size 0.0 +832 60 model.embedding_dim 2.0 +832 60 model.scoring_fct_norm 1.0 +832 60 loss.margin 27.776609357049374 +832 60 loss.adversarial_temperature 0.6661509718307936 +832 60 optimizer.lr 0.0029366541011158443 +832 60 negative_sampler.num_negs_per_pos 16.0 +832 60 training.batch_size 1.0 +832 61 model.embedding_dim 2.0 +832 61 model.scoring_fct_norm 1.0 +832 61 loss.margin 27.878427862751995 +832 61 loss.adversarial_temperature 0.4446771705536495 +832 61 optimizer.lr 0.002472236954286563 +832 61 negative_sampler.num_negs_per_pos 81.0 +832 61 training.batch_size 0.0 +832 62 model.embedding_dim 0.0 +832 62 model.scoring_fct_norm 2.0 +832 62 loss.margin 6.677037298047572 +832 62 loss.adversarial_temperature 0.7827224402481054 +832 62 optimizer.lr 0.04098554526965183 +832 62 negative_sampler.num_negs_per_pos 93.0 +832 62 training.batch_size 1.0 +832 63 model.embedding_dim 0.0 +832 63 model.scoring_fct_norm 2.0 +832 63 loss.margin 8.523190981647065 +832 63 loss.adversarial_temperature 0.7338347480626469 +832 63 optimizer.lr 0.0026014395396209726 +832 63 negative_sampler.num_negs_per_pos 22.0 +832 63 training.batch_size 1.0 +832 64 model.embedding_dim 2.0 +832 64 model.scoring_fct_norm 1.0 +832 64 loss.margin 13.950843788527402 +832 64 loss.adversarial_temperature 0.11362125663516613 +832 64 optimizer.lr 0.0010210448926963578 +832 64 negative_sampler.num_negs_per_pos 74.0 +832 64 training.batch_size 2.0 +832 65 model.embedding_dim 1.0 +832 65 model.scoring_fct_norm 2.0 +832 65 loss.margin 3.598297141209713 +832 65 loss.adversarial_temperature 0.5248990312622047 +832 65 optimizer.lr 0.0024379614590109484 +832 65 negative_sampler.num_negs_per_pos 67.0 +832 65 training.batch_size 2.0 +832 66 model.embedding_dim 2.0 +832 66 model.scoring_fct_norm 1.0 +832 66 loss.margin 21.2214274679282 +832 66 loss.adversarial_temperature 0.24937352702731397 +832 66 optimizer.lr 0.006507974001272185 +832 66 negative_sampler.num_negs_per_pos 41.0 +832 66 training.batch_size 0.0 +832 67 model.embedding_dim 1.0 +832 67 model.scoring_fct_norm 2.0 +832 67 loss.margin 28.180119937993684 +832 67 loss.adversarial_temperature 0.9944555882269706 +832 67 optimizer.lr 0.055889963774569354 +832 67 negative_sampler.num_negs_per_pos 43.0 +832 67 training.batch_size 1.0 +832 68 model.embedding_dim 1.0 +832 68 model.scoring_fct_norm 1.0 +832 68 loss.margin 2.7168874480716605 +832 68 loss.adversarial_temperature 0.9993024546326131 +832 68 optimizer.lr 0.039490894599221775 +832 68 negative_sampler.num_negs_per_pos 98.0 +832 68 training.batch_size 2.0 +832 69 model.embedding_dim 2.0 +832 69 model.scoring_fct_norm 1.0 +832 69 loss.margin 20.15918963271528 +832 69 loss.adversarial_temperature 0.18010645146685533 +832 69 optimizer.lr 0.016957225014728988 +832 69 negative_sampler.num_negs_per_pos 81.0 +832 69 training.batch_size 0.0 +832 70 model.embedding_dim 1.0 +832 70 model.scoring_fct_norm 2.0 +832 70 loss.margin 29.284248661110073 +832 70 loss.adversarial_temperature 0.5623972356211752 +832 70 optimizer.lr 0.006953296028022311 +832 70 negative_sampler.num_negs_per_pos 60.0 +832 70 training.batch_size 1.0 +832 71 model.embedding_dim 0.0 +832 71 model.scoring_fct_norm 2.0 +832 71 loss.margin 27.923112975704576 +832 71 loss.adversarial_temperature 0.9380024813309557 +832 71 optimizer.lr 0.004195907332788941 +832 71 negative_sampler.num_negs_per_pos 79.0 +832 71 training.batch_size 0.0 +832 72 model.embedding_dim 2.0 +832 72 model.scoring_fct_norm 2.0 +832 72 loss.margin 17.01773611325159 +832 72 loss.adversarial_temperature 0.7326477971759564 +832 72 optimizer.lr 0.012293025477368397 +832 72 negative_sampler.num_negs_per_pos 89.0 +832 72 training.batch_size 2.0 +832 73 model.embedding_dim 2.0 +832 73 model.scoring_fct_norm 1.0 +832 73 loss.margin 14.592903000542508 +832 73 loss.adversarial_temperature 0.4827464747756999 +832 73 optimizer.lr 0.03657663598217394 +832 73 negative_sampler.num_negs_per_pos 58.0 +832 73 training.batch_size 0.0 +832 74 model.embedding_dim 2.0 +832 74 model.scoring_fct_norm 2.0 +832 74 loss.margin 6.756083150313735 +832 74 loss.adversarial_temperature 0.1989958911395632 +832 74 optimizer.lr 0.0018240993503739603 +832 74 negative_sampler.num_negs_per_pos 95.0 +832 74 training.batch_size 0.0 +832 75 model.embedding_dim 0.0 +832 75 model.scoring_fct_norm 2.0 +832 75 loss.margin 6.365996458790282 +832 75 loss.adversarial_temperature 0.3647716005144871 +832 75 optimizer.lr 0.001556385986499466 +832 75 negative_sampler.num_negs_per_pos 93.0 +832 75 training.batch_size 2.0 +832 76 model.embedding_dim 1.0 +832 76 model.scoring_fct_norm 1.0 +832 76 loss.margin 19.966136699100502 +832 76 loss.adversarial_temperature 0.5509619235077777 +832 76 optimizer.lr 0.006494467213806818 +832 76 negative_sampler.num_negs_per_pos 95.0 +832 76 training.batch_size 1.0 +832 77 model.embedding_dim 2.0 +832 77 model.scoring_fct_norm 1.0 +832 77 loss.margin 18.050650187998357 +832 77 loss.adversarial_temperature 0.9762576599624335 +832 77 optimizer.lr 0.041211279062881655 +832 77 negative_sampler.num_negs_per_pos 97.0 +832 77 training.batch_size 2.0 +832 78 model.embedding_dim 1.0 +832 78 model.scoring_fct_norm 2.0 +832 78 loss.margin 2.9985124037853508 +832 78 loss.adversarial_temperature 0.7673933305937505 +832 78 optimizer.lr 0.008988597098850254 +832 78 negative_sampler.num_negs_per_pos 25.0 +832 78 training.batch_size 0.0 +832 79 model.embedding_dim 1.0 +832 79 model.scoring_fct_norm 2.0 +832 79 loss.margin 16.74781154906219 +832 79 loss.adversarial_temperature 0.39437016855073526 +832 79 optimizer.lr 0.010762159138856461 +832 79 negative_sampler.num_negs_per_pos 33.0 +832 79 training.batch_size 1.0 +832 80 model.embedding_dim 2.0 +832 80 model.scoring_fct_norm 1.0 +832 80 loss.margin 4.268021633350218 +832 80 loss.adversarial_temperature 0.12068970319980514 +832 80 optimizer.lr 0.0010924142442865414 +832 80 negative_sampler.num_negs_per_pos 90.0 +832 80 training.batch_size 0.0 +832 81 model.embedding_dim 0.0 +832 81 model.scoring_fct_norm 1.0 +832 81 loss.margin 8.86143242277033 +832 81 loss.adversarial_temperature 0.430243212548883 +832 81 optimizer.lr 0.0018695041364048678 +832 81 negative_sampler.num_negs_per_pos 64.0 +832 81 training.batch_size 1.0 +832 82 model.embedding_dim 0.0 +832 82 model.scoring_fct_norm 1.0 +832 82 loss.margin 14.799452700500549 +832 82 loss.adversarial_temperature 0.1381607073700774 +832 82 optimizer.lr 0.002712961754837437 +832 82 negative_sampler.num_negs_per_pos 63.0 +832 82 training.batch_size 1.0 +832 83 model.embedding_dim 0.0 +832 83 model.scoring_fct_norm 1.0 +832 83 loss.margin 14.60441248115711 +832 83 loss.adversarial_temperature 0.8418381940789267 +832 83 optimizer.lr 0.07639264241538185 +832 83 negative_sampler.num_negs_per_pos 58.0 +832 83 training.batch_size 1.0 +832 84 model.embedding_dim 1.0 +832 84 model.scoring_fct_norm 2.0 +832 84 loss.margin 21.230848532043193 +832 84 loss.adversarial_temperature 0.7201304129385793 +832 84 optimizer.lr 0.0032308120850052446 +832 84 negative_sampler.num_negs_per_pos 59.0 +832 84 training.batch_size 0.0 +832 85 model.embedding_dim 1.0 +832 85 model.scoring_fct_norm 1.0 +832 85 loss.margin 4.121883902882841 +832 85 loss.adversarial_temperature 0.5269174846839375 +832 85 optimizer.lr 0.052072896032125025 +832 85 negative_sampler.num_negs_per_pos 21.0 +832 85 training.batch_size 2.0 +832 86 model.embedding_dim 2.0 +832 86 model.scoring_fct_norm 1.0 +832 86 loss.margin 19.17359127925003 +832 86 loss.adversarial_temperature 0.46297475075706995 +832 86 optimizer.lr 0.010492688040862019 +832 86 negative_sampler.num_negs_per_pos 83.0 +832 86 training.batch_size 1.0 +832 87 model.embedding_dim 2.0 +832 87 model.scoring_fct_norm 1.0 +832 87 loss.margin 20.71321667353 +832 87 loss.adversarial_temperature 0.12379802014393465 +832 87 optimizer.lr 0.00845699154177684 +832 87 negative_sampler.num_negs_per_pos 21.0 +832 87 training.batch_size 0.0 +832 88 model.embedding_dim 1.0 +832 88 model.scoring_fct_norm 1.0 +832 88 loss.margin 21.102972162335963 +832 88 loss.adversarial_temperature 0.5085871799973364 +832 88 optimizer.lr 0.010484961652538144 +832 88 negative_sampler.num_negs_per_pos 80.0 +832 88 training.batch_size 0.0 +832 89 model.embedding_dim 2.0 +832 89 model.scoring_fct_norm 2.0 +832 89 loss.margin 23.31115590077564 +832 89 loss.adversarial_temperature 0.3196799650796374 +832 89 optimizer.lr 0.008864936405804017 +832 89 negative_sampler.num_negs_per_pos 18.0 +832 89 training.batch_size 2.0 +832 90 model.embedding_dim 1.0 +832 90 model.scoring_fct_norm 2.0 +832 90 loss.margin 17.31635967024144 +832 90 loss.adversarial_temperature 0.9401712072655555 +832 90 optimizer.lr 0.001097156063041038 +832 90 negative_sampler.num_negs_per_pos 83.0 +832 90 training.batch_size 0.0 +832 91 model.embedding_dim 0.0 +832 91 model.scoring_fct_norm 2.0 +832 91 loss.margin 5.845919496972863 +832 91 loss.adversarial_temperature 0.6073582815925187 +832 91 optimizer.lr 0.03145162323288456 +832 91 negative_sampler.num_negs_per_pos 28.0 +832 91 training.batch_size 1.0 +832 92 model.embedding_dim 1.0 +832 92 model.scoring_fct_norm 1.0 +832 92 loss.margin 29.34825864573367 +832 92 loss.adversarial_temperature 0.8912991530926312 +832 92 optimizer.lr 0.014716323780554801 +832 92 negative_sampler.num_negs_per_pos 28.0 +832 92 training.batch_size 0.0 +832 93 model.embedding_dim 0.0 +832 93 model.scoring_fct_norm 2.0 +832 93 loss.margin 29.22137045727108 +832 93 loss.adversarial_temperature 0.45304313494791115 +832 93 optimizer.lr 0.012719244735052563 +832 93 negative_sampler.num_negs_per_pos 83.0 +832 93 training.batch_size 2.0 +832 94 model.embedding_dim 0.0 +832 94 model.scoring_fct_norm 1.0 +832 94 loss.margin 4.896892101126557 +832 94 loss.adversarial_temperature 0.6499529019308478 +832 94 optimizer.lr 0.013084667379487637 +832 94 negative_sampler.num_negs_per_pos 1.0 +832 94 training.batch_size 2.0 +832 95 model.embedding_dim 0.0 +832 95 model.scoring_fct_norm 2.0 +832 95 loss.margin 3.094084294279139 +832 95 loss.adversarial_temperature 0.10384466461040771 +832 95 optimizer.lr 0.034736463643079495 +832 95 negative_sampler.num_negs_per_pos 17.0 +832 95 training.batch_size 2.0 +832 96 model.embedding_dim 2.0 +832 96 model.scoring_fct_norm 1.0 +832 96 loss.margin 17.567319459109747 +832 96 loss.adversarial_temperature 0.11295628311208303 +832 96 optimizer.lr 0.017836877646622614 +832 96 negative_sampler.num_negs_per_pos 64.0 +832 96 training.batch_size 1.0 +832 97 model.embedding_dim 0.0 +832 97 model.scoring_fct_norm 2.0 +832 97 loss.margin 12.450443029467522 +832 97 loss.adversarial_temperature 0.6283206452484583 +832 97 optimizer.lr 0.021630420628872996 +832 97 negative_sampler.num_negs_per_pos 51.0 +832 97 training.batch_size 0.0 +832 98 model.embedding_dim 0.0 +832 98 model.scoring_fct_norm 2.0 +832 98 loss.margin 20.782689306607065 +832 98 loss.adversarial_temperature 0.4881848463734393 +832 98 optimizer.lr 0.05068699636781114 +832 98 negative_sampler.num_negs_per_pos 8.0 +832 98 training.batch_size 0.0 +832 99 model.embedding_dim 1.0 +832 99 model.scoring_fct_norm 2.0 +832 99 loss.margin 27.523667215555417 +832 99 loss.adversarial_temperature 0.8516910191799643 +832 99 optimizer.lr 0.023621748259294764 +832 99 negative_sampler.num_negs_per_pos 60.0 +832 99 training.batch_size 0.0 +832 100 model.embedding_dim 2.0 +832 100 model.scoring_fct_norm 1.0 +832 100 loss.margin 2.4133448966447126 +832 100 loss.adversarial_temperature 0.194180882241062 +832 100 optimizer.lr 0.004680960369113946 +832 100 negative_sampler.num_negs_per_pos 44.0 +832 100 training.batch_size 1.0 +832 1 dataset """kinships""" +832 1 model """transe""" +832 1 loss """nssa""" +832 1 regularizer """no""" +832 1 optimizer """adam""" +832 1 training_loop """owa""" +832 1 negative_sampler """basic""" +832 1 evaluator """rankbased""" +832 2 dataset """kinships""" +832 2 model """transe""" +832 2 loss """nssa""" +832 2 regularizer """no""" +832 2 optimizer """adam""" +832 2 training_loop """owa""" +832 2 negative_sampler """basic""" +832 2 evaluator """rankbased""" +832 3 dataset """kinships""" +832 3 model """transe""" +832 3 loss """nssa""" +832 3 regularizer """no""" +832 3 optimizer """adam""" +832 3 training_loop """owa""" +832 3 negative_sampler """basic""" +832 3 evaluator """rankbased""" +832 4 dataset """kinships""" +832 4 model """transe""" +832 4 loss """nssa""" +832 4 regularizer """no""" +832 4 optimizer """adam""" +832 4 training_loop """owa""" +832 4 negative_sampler """basic""" +832 4 evaluator """rankbased""" +832 5 dataset """kinships""" +832 5 model """transe""" +832 5 loss """nssa""" +832 5 regularizer """no""" +832 5 optimizer """adam""" +832 5 training_loop """owa""" +832 5 negative_sampler """basic""" +832 5 evaluator """rankbased""" +832 6 dataset """kinships""" +832 6 model """transe""" +832 6 loss """nssa""" +832 6 regularizer """no""" +832 6 optimizer """adam""" +832 6 training_loop """owa""" +832 6 negative_sampler """basic""" +832 6 evaluator """rankbased""" +832 7 dataset """kinships""" +832 7 model """transe""" +832 7 loss """nssa""" +832 7 regularizer """no""" +832 7 optimizer """adam""" +832 7 training_loop """owa""" +832 7 negative_sampler """basic""" +832 7 evaluator """rankbased""" +832 8 dataset """kinships""" +832 8 model """transe""" +832 8 loss """nssa""" +832 8 regularizer """no""" +832 8 optimizer """adam""" +832 8 training_loop """owa""" +832 8 negative_sampler """basic""" +832 8 evaluator """rankbased""" +832 9 dataset """kinships""" +832 9 model """transe""" +832 9 loss """nssa""" +832 9 regularizer """no""" +832 9 optimizer """adam""" +832 9 training_loop """owa""" +832 9 negative_sampler """basic""" +832 9 evaluator """rankbased""" +832 10 dataset """kinships""" +832 10 model """transe""" +832 10 loss """nssa""" +832 10 regularizer """no""" +832 10 optimizer """adam""" +832 10 training_loop """owa""" +832 10 negative_sampler """basic""" +832 10 evaluator """rankbased""" +832 11 dataset """kinships""" +832 11 model """transe""" +832 11 loss """nssa""" +832 11 regularizer """no""" +832 11 optimizer """adam""" +832 11 training_loop """owa""" +832 11 negative_sampler """basic""" +832 11 evaluator """rankbased""" +832 12 dataset """kinships""" +832 12 model """transe""" +832 12 loss """nssa""" +832 12 regularizer """no""" +832 12 optimizer """adam""" +832 12 training_loop """owa""" +832 12 negative_sampler """basic""" +832 12 evaluator """rankbased""" +832 13 dataset """kinships""" +832 13 model """transe""" +832 13 loss """nssa""" +832 13 regularizer """no""" +832 13 optimizer """adam""" +832 13 training_loop """owa""" +832 13 negative_sampler """basic""" +832 13 evaluator """rankbased""" +832 14 dataset """kinships""" +832 14 model """transe""" +832 14 loss """nssa""" +832 14 regularizer """no""" +832 14 optimizer """adam""" +832 14 training_loop """owa""" +832 14 negative_sampler """basic""" +832 14 evaluator """rankbased""" +832 15 dataset """kinships""" +832 15 model """transe""" +832 15 loss """nssa""" +832 15 regularizer """no""" +832 15 optimizer """adam""" +832 15 training_loop """owa""" +832 15 negative_sampler """basic""" +832 15 evaluator """rankbased""" +832 16 dataset """kinships""" +832 16 model """transe""" +832 16 loss """nssa""" +832 16 regularizer """no""" +832 16 optimizer """adam""" +832 16 training_loop """owa""" +832 16 negative_sampler """basic""" +832 16 evaluator """rankbased""" +832 17 dataset """kinships""" +832 17 model """transe""" +832 17 loss """nssa""" +832 17 regularizer """no""" +832 17 optimizer """adam""" +832 17 training_loop """owa""" +832 17 negative_sampler """basic""" +832 17 evaluator """rankbased""" +832 18 dataset """kinships""" +832 18 model """transe""" +832 18 loss """nssa""" +832 18 regularizer """no""" +832 18 optimizer """adam""" +832 18 training_loop """owa""" +832 18 negative_sampler """basic""" +832 18 evaluator """rankbased""" +832 19 dataset """kinships""" +832 19 model """transe""" +832 19 loss """nssa""" +832 19 regularizer """no""" +832 19 optimizer """adam""" +832 19 training_loop """owa""" +832 19 negative_sampler """basic""" +832 19 evaluator """rankbased""" +832 20 dataset """kinships""" +832 20 model """transe""" +832 20 loss """nssa""" +832 20 regularizer """no""" +832 20 optimizer """adam""" +832 20 training_loop """owa""" +832 20 negative_sampler """basic""" +832 20 evaluator """rankbased""" +832 21 dataset """kinships""" +832 21 model """transe""" +832 21 loss """nssa""" +832 21 regularizer """no""" +832 21 optimizer """adam""" +832 21 training_loop """owa""" +832 21 negative_sampler """basic""" +832 21 evaluator """rankbased""" +832 22 dataset """kinships""" +832 22 model """transe""" +832 22 loss """nssa""" +832 22 regularizer """no""" +832 22 optimizer """adam""" +832 22 training_loop """owa""" +832 22 negative_sampler """basic""" +832 22 evaluator """rankbased""" +832 23 dataset """kinships""" +832 23 model """transe""" +832 23 loss """nssa""" +832 23 regularizer """no""" +832 23 optimizer """adam""" +832 23 training_loop """owa""" +832 23 negative_sampler """basic""" +832 23 evaluator """rankbased""" +832 24 dataset """kinships""" +832 24 model """transe""" +832 24 loss """nssa""" +832 24 regularizer """no""" +832 24 optimizer """adam""" +832 24 training_loop """owa""" +832 24 negative_sampler """basic""" +832 24 evaluator """rankbased""" +832 25 dataset """kinships""" +832 25 model """transe""" +832 25 loss """nssa""" +832 25 regularizer """no""" +832 25 optimizer """adam""" +832 25 training_loop """owa""" +832 25 negative_sampler """basic""" +832 25 evaluator """rankbased""" +832 26 dataset """kinships""" +832 26 model """transe""" +832 26 loss """nssa""" +832 26 regularizer """no""" +832 26 optimizer """adam""" +832 26 training_loop """owa""" +832 26 negative_sampler """basic""" +832 26 evaluator """rankbased""" +832 27 dataset """kinships""" +832 27 model """transe""" +832 27 loss """nssa""" +832 27 regularizer """no""" +832 27 optimizer """adam""" +832 27 training_loop """owa""" +832 27 negative_sampler """basic""" +832 27 evaluator """rankbased""" +832 28 dataset """kinships""" +832 28 model """transe""" +832 28 loss """nssa""" +832 28 regularizer """no""" +832 28 optimizer """adam""" +832 28 training_loop """owa""" +832 28 negative_sampler """basic""" +832 28 evaluator """rankbased""" +832 29 dataset """kinships""" +832 29 model """transe""" +832 29 loss """nssa""" +832 29 regularizer """no""" +832 29 optimizer """adam""" +832 29 training_loop """owa""" +832 29 negative_sampler """basic""" +832 29 evaluator """rankbased""" +832 30 dataset """kinships""" +832 30 model """transe""" +832 30 loss """nssa""" +832 30 regularizer """no""" +832 30 optimizer """adam""" +832 30 training_loop """owa""" +832 30 negative_sampler """basic""" +832 30 evaluator """rankbased""" +832 31 dataset """kinships""" +832 31 model """transe""" +832 31 loss """nssa""" +832 31 regularizer """no""" +832 31 optimizer """adam""" +832 31 training_loop """owa""" +832 31 negative_sampler """basic""" +832 31 evaluator """rankbased""" +832 32 dataset """kinships""" +832 32 model """transe""" +832 32 loss """nssa""" +832 32 regularizer """no""" +832 32 optimizer """adam""" +832 32 training_loop """owa""" +832 32 negative_sampler """basic""" +832 32 evaluator """rankbased""" +832 33 dataset """kinships""" +832 33 model """transe""" +832 33 loss """nssa""" +832 33 regularizer """no""" +832 33 optimizer """adam""" +832 33 training_loop """owa""" +832 33 negative_sampler """basic""" +832 33 evaluator """rankbased""" +832 34 dataset """kinships""" +832 34 model """transe""" +832 34 loss """nssa""" +832 34 regularizer """no""" +832 34 optimizer """adam""" +832 34 training_loop """owa""" +832 34 negative_sampler """basic""" +832 34 evaluator """rankbased""" +832 35 dataset """kinships""" +832 35 model """transe""" +832 35 loss """nssa""" +832 35 regularizer """no""" +832 35 optimizer """adam""" +832 35 training_loop """owa""" +832 35 negative_sampler """basic""" +832 35 evaluator """rankbased""" +832 36 dataset """kinships""" +832 36 model """transe""" +832 36 loss """nssa""" +832 36 regularizer """no""" +832 36 optimizer """adam""" +832 36 training_loop """owa""" +832 36 negative_sampler """basic""" +832 36 evaluator """rankbased""" +832 37 dataset """kinships""" +832 37 model """transe""" +832 37 loss """nssa""" +832 37 regularizer """no""" +832 37 optimizer """adam""" +832 37 training_loop """owa""" +832 37 negative_sampler """basic""" +832 37 evaluator """rankbased""" +832 38 dataset """kinships""" +832 38 model """transe""" +832 38 loss """nssa""" +832 38 regularizer """no""" +832 38 optimizer """adam""" +832 38 training_loop """owa""" +832 38 negative_sampler """basic""" +832 38 evaluator """rankbased""" +832 39 dataset """kinships""" +832 39 model """transe""" +832 39 loss """nssa""" +832 39 regularizer """no""" +832 39 optimizer """adam""" +832 39 training_loop """owa""" +832 39 negative_sampler """basic""" +832 39 evaluator """rankbased""" +832 40 dataset """kinships""" +832 40 model """transe""" +832 40 loss """nssa""" +832 40 regularizer """no""" +832 40 optimizer """adam""" +832 40 training_loop """owa""" +832 40 negative_sampler """basic""" +832 40 evaluator """rankbased""" +832 41 dataset """kinships""" +832 41 model """transe""" +832 41 loss """nssa""" +832 41 regularizer """no""" +832 41 optimizer """adam""" +832 41 training_loop """owa""" +832 41 negative_sampler """basic""" +832 41 evaluator """rankbased""" +832 42 dataset """kinships""" +832 42 model """transe""" +832 42 loss """nssa""" +832 42 regularizer """no""" +832 42 optimizer """adam""" +832 42 training_loop """owa""" +832 42 negative_sampler """basic""" +832 42 evaluator """rankbased""" +832 43 dataset """kinships""" +832 43 model """transe""" +832 43 loss """nssa""" +832 43 regularizer """no""" +832 43 optimizer """adam""" +832 43 training_loop """owa""" +832 43 negative_sampler """basic""" +832 43 evaluator """rankbased""" +832 44 dataset """kinships""" +832 44 model """transe""" +832 44 loss """nssa""" +832 44 regularizer """no""" +832 44 optimizer """adam""" +832 44 training_loop """owa""" +832 44 negative_sampler """basic""" +832 44 evaluator """rankbased""" +832 45 dataset """kinships""" +832 45 model """transe""" +832 45 loss """nssa""" +832 45 regularizer """no""" +832 45 optimizer """adam""" +832 45 training_loop """owa""" +832 45 negative_sampler """basic""" +832 45 evaluator """rankbased""" +832 46 dataset """kinships""" +832 46 model """transe""" +832 46 loss """nssa""" +832 46 regularizer """no""" +832 46 optimizer """adam""" +832 46 training_loop """owa""" +832 46 negative_sampler """basic""" +832 46 evaluator """rankbased""" +832 47 dataset """kinships""" +832 47 model """transe""" +832 47 loss """nssa""" +832 47 regularizer """no""" +832 47 optimizer """adam""" +832 47 training_loop """owa""" +832 47 negative_sampler """basic""" +832 47 evaluator """rankbased""" +832 48 dataset """kinships""" +832 48 model """transe""" +832 48 loss """nssa""" +832 48 regularizer """no""" +832 48 optimizer """adam""" +832 48 training_loop """owa""" +832 48 negative_sampler """basic""" +832 48 evaluator """rankbased""" +832 49 dataset """kinships""" +832 49 model """transe""" +832 49 loss """nssa""" +832 49 regularizer """no""" +832 49 optimizer """adam""" +832 49 training_loop """owa""" +832 49 negative_sampler """basic""" +832 49 evaluator """rankbased""" +832 50 dataset """kinships""" +832 50 model """transe""" +832 50 loss """nssa""" +832 50 regularizer """no""" +832 50 optimizer """adam""" +832 50 training_loop """owa""" +832 50 negative_sampler """basic""" +832 50 evaluator """rankbased""" +832 51 dataset """kinships""" +832 51 model """transe""" +832 51 loss """nssa""" +832 51 regularizer """no""" +832 51 optimizer """adam""" +832 51 training_loop """owa""" +832 51 negative_sampler """basic""" +832 51 evaluator """rankbased""" +832 52 dataset """kinships""" +832 52 model """transe""" +832 52 loss """nssa""" +832 52 regularizer """no""" +832 52 optimizer """adam""" +832 52 training_loop """owa""" +832 52 negative_sampler """basic""" +832 52 evaluator """rankbased""" +832 53 dataset """kinships""" +832 53 model """transe""" +832 53 loss """nssa""" +832 53 regularizer """no""" +832 53 optimizer """adam""" +832 53 training_loop """owa""" +832 53 negative_sampler """basic""" +832 53 evaluator """rankbased""" +832 54 dataset """kinships""" +832 54 model """transe""" +832 54 loss """nssa""" +832 54 regularizer """no""" +832 54 optimizer """adam""" +832 54 training_loop """owa""" +832 54 negative_sampler """basic""" +832 54 evaluator """rankbased""" +832 55 dataset """kinships""" +832 55 model """transe""" +832 55 loss """nssa""" +832 55 regularizer """no""" +832 55 optimizer """adam""" +832 55 training_loop """owa""" +832 55 negative_sampler """basic""" +832 55 evaluator """rankbased""" +832 56 dataset """kinships""" +832 56 model """transe""" +832 56 loss """nssa""" +832 56 regularizer """no""" +832 56 optimizer """adam""" +832 56 training_loop """owa""" +832 56 negative_sampler """basic""" +832 56 evaluator """rankbased""" +832 57 dataset """kinships""" +832 57 model """transe""" +832 57 loss """nssa""" +832 57 regularizer """no""" +832 57 optimizer """adam""" +832 57 training_loop """owa""" +832 57 negative_sampler """basic""" +832 57 evaluator """rankbased""" +832 58 dataset """kinships""" +832 58 model """transe""" +832 58 loss """nssa""" +832 58 regularizer """no""" +832 58 optimizer """adam""" +832 58 training_loop """owa""" +832 58 negative_sampler """basic""" +832 58 evaluator """rankbased""" +832 59 dataset """kinships""" +832 59 model """transe""" +832 59 loss """nssa""" +832 59 regularizer """no""" +832 59 optimizer """adam""" +832 59 training_loop """owa""" +832 59 negative_sampler """basic""" +832 59 evaluator """rankbased""" +832 60 dataset """kinships""" +832 60 model """transe""" +832 60 loss """nssa""" +832 60 regularizer """no""" +832 60 optimizer """adam""" +832 60 training_loop """owa""" +832 60 negative_sampler """basic""" +832 60 evaluator """rankbased""" +832 61 dataset """kinships""" +832 61 model """transe""" +832 61 loss """nssa""" +832 61 regularizer """no""" +832 61 optimizer """adam""" +832 61 training_loop """owa""" +832 61 negative_sampler """basic""" +832 61 evaluator """rankbased""" +832 62 dataset """kinships""" +832 62 model """transe""" +832 62 loss """nssa""" +832 62 regularizer """no""" +832 62 optimizer """adam""" +832 62 training_loop """owa""" +832 62 negative_sampler """basic""" +832 62 evaluator """rankbased""" +832 63 dataset """kinships""" +832 63 model """transe""" +832 63 loss """nssa""" +832 63 regularizer """no""" +832 63 optimizer """adam""" +832 63 training_loop """owa""" +832 63 negative_sampler """basic""" +832 63 evaluator """rankbased""" +832 64 dataset """kinships""" +832 64 model """transe""" +832 64 loss """nssa""" +832 64 regularizer """no""" +832 64 optimizer """adam""" +832 64 training_loop """owa""" +832 64 negative_sampler """basic""" +832 64 evaluator """rankbased""" +832 65 dataset """kinships""" +832 65 model """transe""" +832 65 loss """nssa""" +832 65 regularizer """no""" +832 65 optimizer """adam""" +832 65 training_loop """owa""" +832 65 negative_sampler """basic""" +832 65 evaluator """rankbased""" +832 66 dataset """kinships""" +832 66 model """transe""" +832 66 loss """nssa""" +832 66 regularizer """no""" +832 66 optimizer """adam""" +832 66 training_loop """owa""" +832 66 negative_sampler """basic""" +832 66 evaluator """rankbased""" +832 67 dataset """kinships""" +832 67 model """transe""" +832 67 loss """nssa""" +832 67 regularizer """no""" +832 67 optimizer """adam""" +832 67 training_loop """owa""" +832 67 negative_sampler """basic""" +832 67 evaluator """rankbased""" +832 68 dataset """kinships""" +832 68 model """transe""" +832 68 loss """nssa""" +832 68 regularizer """no""" +832 68 optimizer """adam""" +832 68 training_loop """owa""" +832 68 negative_sampler """basic""" +832 68 evaluator """rankbased""" +832 69 dataset """kinships""" +832 69 model """transe""" +832 69 loss """nssa""" +832 69 regularizer """no""" +832 69 optimizer """adam""" +832 69 training_loop """owa""" +832 69 negative_sampler """basic""" +832 69 evaluator """rankbased""" +832 70 dataset """kinships""" +832 70 model """transe""" +832 70 loss """nssa""" +832 70 regularizer """no""" +832 70 optimizer """adam""" +832 70 training_loop """owa""" +832 70 negative_sampler """basic""" +832 70 evaluator """rankbased""" +832 71 dataset """kinships""" +832 71 model """transe""" +832 71 loss """nssa""" +832 71 regularizer """no""" +832 71 optimizer """adam""" +832 71 training_loop """owa""" +832 71 negative_sampler """basic""" +832 71 evaluator """rankbased""" +832 72 dataset """kinships""" +832 72 model """transe""" +832 72 loss """nssa""" +832 72 regularizer """no""" +832 72 optimizer """adam""" +832 72 training_loop """owa""" +832 72 negative_sampler """basic""" +832 72 evaluator """rankbased""" +832 73 dataset """kinships""" +832 73 model """transe""" +832 73 loss """nssa""" +832 73 regularizer """no""" +832 73 optimizer """adam""" +832 73 training_loop """owa""" +832 73 negative_sampler """basic""" +832 73 evaluator """rankbased""" +832 74 dataset """kinships""" +832 74 model """transe""" +832 74 loss """nssa""" +832 74 regularizer """no""" +832 74 optimizer """adam""" +832 74 training_loop """owa""" +832 74 negative_sampler """basic""" +832 74 evaluator """rankbased""" +832 75 dataset """kinships""" +832 75 model """transe""" +832 75 loss """nssa""" +832 75 regularizer """no""" +832 75 optimizer """adam""" +832 75 training_loop """owa""" +832 75 negative_sampler """basic""" +832 75 evaluator """rankbased""" +832 76 dataset """kinships""" +832 76 model """transe""" +832 76 loss """nssa""" +832 76 regularizer """no""" +832 76 optimizer """adam""" +832 76 training_loop """owa""" +832 76 negative_sampler """basic""" +832 76 evaluator """rankbased""" +832 77 dataset """kinships""" +832 77 model """transe""" +832 77 loss """nssa""" +832 77 regularizer """no""" +832 77 optimizer """adam""" +832 77 training_loop """owa""" +832 77 negative_sampler """basic""" +832 77 evaluator """rankbased""" +832 78 dataset """kinships""" +832 78 model """transe""" +832 78 loss """nssa""" +832 78 regularizer """no""" +832 78 optimizer """adam""" +832 78 training_loop """owa""" +832 78 negative_sampler """basic""" +832 78 evaluator """rankbased""" +832 79 dataset """kinships""" +832 79 model """transe""" +832 79 loss """nssa""" +832 79 regularizer """no""" +832 79 optimizer """adam""" +832 79 training_loop """owa""" +832 79 negative_sampler """basic""" +832 79 evaluator """rankbased""" +832 80 dataset """kinships""" +832 80 model """transe""" +832 80 loss """nssa""" +832 80 regularizer """no""" +832 80 optimizer """adam""" +832 80 training_loop """owa""" +832 80 negative_sampler """basic""" +832 80 evaluator """rankbased""" +832 81 dataset """kinships""" +832 81 model """transe""" +832 81 loss """nssa""" +832 81 regularizer """no""" +832 81 optimizer """adam""" +832 81 training_loop """owa""" +832 81 negative_sampler """basic""" +832 81 evaluator """rankbased""" +832 82 dataset """kinships""" +832 82 model """transe""" +832 82 loss """nssa""" +832 82 regularizer """no""" +832 82 optimizer """adam""" +832 82 training_loop """owa""" +832 82 negative_sampler """basic""" +832 82 evaluator """rankbased""" +832 83 dataset """kinships""" +832 83 model """transe""" +832 83 loss """nssa""" +832 83 regularizer """no""" +832 83 optimizer """adam""" +832 83 training_loop """owa""" +832 83 negative_sampler """basic""" +832 83 evaluator """rankbased""" +832 84 dataset """kinships""" +832 84 model """transe""" +832 84 loss """nssa""" +832 84 regularizer """no""" +832 84 optimizer """adam""" +832 84 training_loop """owa""" +832 84 negative_sampler """basic""" +832 84 evaluator """rankbased""" +832 85 dataset """kinships""" +832 85 model """transe""" +832 85 loss """nssa""" +832 85 regularizer """no""" +832 85 optimizer """adam""" +832 85 training_loop """owa""" +832 85 negative_sampler """basic""" +832 85 evaluator """rankbased""" +832 86 dataset """kinships""" +832 86 model """transe""" +832 86 loss """nssa""" +832 86 regularizer """no""" +832 86 optimizer """adam""" +832 86 training_loop """owa""" +832 86 negative_sampler """basic""" +832 86 evaluator """rankbased""" +832 87 dataset """kinships""" +832 87 model """transe""" +832 87 loss """nssa""" +832 87 regularizer """no""" +832 87 optimizer """adam""" +832 87 training_loop """owa""" +832 87 negative_sampler """basic""" +832 87 evaluator """rankbased""" +832 88 dataset """kinships""" +832 88 model """transe""" +832 88 loss """nssa""" +832 88 regularizer """no""" +832 88 optimizer """adam""" +832 88 training_loop """owa""" +832 88 negative_sampler """basic""" +832 88 evaluator """rankbased""" +832 89 dataset """kinships""" +832 89 model """transe""" +832 89 loss """nssa""" +832 89 regularizer """no""" +832 89 optimizer """adam""" +832 89 training_loop """owa""" +832 89 negative_sampler """basic""" +832 89 evaluator """rankbased""" +832 90 dataset """kinships""" +832 90 model """transe""" +832 90 loss """nssa""" +832 90 regularizer """no""" +832 90 optimizer """adam""" +832 90 training_loop """owa""" +832 90 negative_sampler """basic""" +832 90 evaluator """rankbased""" +832 91 dataset """kinships""" +832 91 model """transe""" +832 91 loss """nssa""" +832 91 regularizer """no""" +832 91 optimizer """adam""" +832 91 training_loop """owa""" +832 91 negative_sampler """basic""" +832 91 evaluator """rankbased""" +832 92 dataset """kinships""" +832 92 model """transe""" +832 92 loss """nssa""" +832 92 regularizer """no""" +832 92 optimizer """adam""" +832 92 training_loop """owa""" +832 92 negative_sampler """basic""" +832 92 evaluator """rankbased""" +832 93 dataset """kinships""" +832 93 model """transe""" +832 93 loss """nssa""" +832 93 regularizer """no""" +832 93 optimizer """adam""" +832 93 training_loop """owa""" +832 93 negative_sampler """basic""" +832 93 evaluator """rankbased""" +832 94 dataset """kinships""" +832 94 model """transe""" +832 94 loss """nssa""" +832 94 regularizer """no""" +832 94 optimizer """adam""" +832 94 training_loop """owa""" +832 94 negative_sampler """basic""" +832 94 evaluator """rankbased""" +832 95 dataset """kinships""" +832 95 model """transe""" +832 95 loss """nssa""" +832 95 regularizer """no""" +832 95 optimizer """adam""" +832 95 training_loop """owa""" +832 95 negative_sampler """basic""" +832 95 evaluator """rankbased""" +832 96 dataset """kinships""" +832 96 model """transe""" +832 96 loss """nssa""" +832 96 regularizer """no""" +832 96 optimizer """adam""" +832 96 training_loop """owa""" +832 96 negative_sampler """basic""" +832 96 evaluator """rankbased""" +832 97 dataset """kinships""" +832 97 model """transe""" +832 97 loss """nssa""" +832 97 regularizer """no""" +832 97 optimizer """adam""" +832 97 training_loop """owa""" +832 97 negative_sampler """basic""" +832 97 evaluator """rankbased""" +832 98 dataset """kinships""" +832 98 model """transe""" +832 98 loss """nssa""" +832 98 regularizer """no""" +832 98 optimizer """adam""" +832 98 training_loop """owa""" +832 98 negative_sampler """basic""" +832 98 evaluator """rankbased""" +832 99 dataset """kinships""" +832 99 model """transe""" +832 99 loss """nssa""" +832 99 regularizer """no""" +832 99 optimizer """adam""" +832 99 training_loop """owa""" +832 99 negative_sampler """basic""" +832 99 evaluator """rankbased""" +832 100 dataset """kinships""" +832 100 model """transe""" +832 100 loss """nssa""" +832 100 regularizer """no""" +832 100 optimizer """adam""" +832 100 training_loop """owa""" +832 100 negative_sampler """basic""" +832 100 evaluator """rankbased""" +833 1 model.embedding_dim 2.0 +833 1 model.scoring_fct_norm 2.0 +833 1 optimizer.lr 0.006939736010094786 +833 1 negative_sampler.num_negs_per_pos 60.0 +833 1 training.batch_size 2.0 +833 2 model.embedding_dim 2.0 +833 2 model.scoring_fct_norm 1.0 +833 2 optimizer.lr 0.0019811457879049693 +833 2 negative_sampler.num_negs_per_pos 56.0 +833 2 training.batch_size 2.0 +833 3 model.embedding_dim 1.0 +833 3 model.scoring_fct_norm 1.0 +833 3 optimizer.lr 0.06371871912047862 +833 3 negative_sampler.num_negs_per_pos 38.0 +833 3 training.batch_size 2.0 +833 4 model.embedding_dim 1.0 +833 4 model.scoring_fct_norm 2.0 +833 4 optimizer.lr 0.05548609723910135 +833 4 negative_sampler.num_negs_per_pos 39.0 +833 4 training.batch_size 2.0 +833 5 model.embedding_dim 0.0 +833 5 model.scoring_fct_norm 1.0 +833 5 optimizer.lr 0.015802163807730193 +833 5 negative_sampler.num_negs_per_pos 51.0 +833 5 training.batch_size 0.0 +833 6 model.embedding_dim 0.0 +833 6 model.scoring_fct_norm 2.0 +833 6 optimizer.lr 0.0037489498076749986 +833 6 negative_sampler.num_negs_per_pos 49.0 +833 6 training.batch_size 0.0 +833 7 model.embedding_dim 2.0 +833 7 model.scoring_fct_norm 1.0 +833 7 optimizer.lr 0.0020596766661521372 +833 7 negative_sampler.num_negs_per_pos 38.0 +833 7 training.batch_size 2.0 +833 8 model.embedding_dim 1.0 +833 8 model.scoring_fct_norm 1.0 +833 8 optimizer.lr 0.005568611861290296 +833 8 negative_sampler.num_negs_per_pos 61.0 +833 8 training.batch_size 1.0 +833 9 model.embedding_dim 0.0 +833 9 model.scoring_fct_norm 2.0 +833 9 optimizer.lr 0.0015069957546204275 +833 9 negative_sampler.num_negs_per_pos 92.0 +833 9 training.batch_size 0.0 +833 10 model.embedding_dim 1.0 +833 10 model.scoring_fct_norm 1.0 +833 10 optimizer.lr 0.010675269890634494 +833 10 negative_sampler.num_negs_per_pos 2.0 +833 10 training.batch_size 1.0 +833 11 model.embedding_dim 1.0 +833 11 model.scoring_fct_norm 2.0 +833 11 optimizer.lr 0.07742373434651766 +833 11 negative_sampler.num_negs_per_pos 68.0 +833 11 training.batch_size 0.0 +833 12 model.embedding_dim 1.0 +833 12 model.scoring_fct_norm 2.0 +833 12 optimizer.lr 0.0030030155805662135 +833 12 negative_sampler.num_negs_per_pos 13.0 +833 12 training.batch_size 1.0 +833 13 model.embedding_dim 1.0 +833 13 model.scoring_fct_norm 1.0 +833 13 optimizer.lr 0.00456815540895223 +833 13 negative_sampler.num_negs_per_pos 8.0 +833 13 training.batch_size 1.0 +833 14 model.embedding_dim 2.0 +833 14 model.scoring_fct_norm 1.0 +833 14 optimizer.lr 0.004775809547158745 +833 14 negative_sampler.num_negs_per_pos 31.0 +833 14 training.batch_size 0.0 +833 15 model.embedding_dim 0.0 +833 15 model.scoring_fct_norm 2.0 +833 15 optimizer.lr 0.06925685686052972 +833 15 negative_sampler.num_negs_per_pos 0.0 +833 15 training.batch_size 0.0 +833 16 model.embedding_dim 2.0 +833 16 model.scoring_fct_norm 2.0 +833 16 optimizer.lr 0.0036581645863959645 +833 16 negative_sampler.num_negs_per_pos 82.0 +833 16 training.batch_size 2.0 +833 17 model.embedding_dim 1.0 +833 17 model.scoring_fct_norm 2.0 +833 17 optimizer.lr 0.007089464478952175 +833 17 negative_sampler.num_negs_per_pos 50.0 +833 17 training.batch_size 0.0 +833 18 model.embedding_dim 1.0 +833 18 model.scoring_fct_norm 2.0 +833 18 optimizer.lr 0.058800320649546683 +833 18 negative_sampler.num_negs_per_pos 42.0 +833 18 training.batch_size 2.0 +833 19 model.embedding_dim 2.0 +833 19 model.scoring_fct_norm 1.0 +833 19 optimizer.lr 0.010429531527283566 +833 19 negative_sampler.num_negs_per_pos 45.0 +833 19 training.batch_size 2.0 +833 20 model.embedding_dim 2.0 +833 20 model.scoring_fct_norm 2.0 +833 20 optimizer.lr 0.012122531966507607 +833 20 negative_sampler.num_negs_per_pos 65.0 +833 20 training.batch_size 1.0 +833 21 model.embedding_dim 0.0 +833 21 model.scoring_fct_norm 2.0 +833 21 optimizer.lr 0.020535161179808217 +833 21 negative_sampler.num_negs_per_pos 32.0 +833 21 training.batch_size 1.0 +833 22 model.embedding_dim 2.0 +833 22 model.scoring_fct_norm 2.0 +833 22 optimizer.lr 0.007145195109005861 +833 22 negative_sampler.num_negs_per_pos 17.0 +833 22 training.batch_size 2.0 +833 23 model.embedding_dim 1.0 +833 23 model.scoring_fct_norm 2.0 +833 23 optimizer.lr 0.0029982002612797412 +833 23 negative_sampler.num_negs_per_pos 30.0 +833 23 training.batch_size 2.0 +833 24 model.embedding_dim 1.0 +833 24 model.scoring_fct_norm 1.0 +833 24 optimizer.lr 0.016552572881909196 +833 24 negative_sampler.num_negs_per_pos 82.0 +833 24 training.batch_size 2.0 +833 25 model.embedding_dim 0.0 +833 25 model.scoring_fct_norm 1.0 +833 25 optimizer.lr 0.0040043888255758345 +833 25 negative_sampler.num_negs_per_pos 45.0 +833 25 training.batch_size 0.0 +833 26 model.embedding_dim 0.0 +833 26 model.scoring_fct_norm 1.0 +833 26 optimizer.lr 0.001014335160903628 +833 26 negative_sampler.num_negs_per_pos 43.0 +833 26 training.batch_size 0.0 +833 27 model.embedding_dim 0.0 +833 27 model.scoring_fct_norm 2.0 +833 27 optimizer.lr 0.021386632891212187 +833 27 negative_sampler.num_negs_per_pos 50.0 +833 27 training.batch_size 0.0 +833 28 model.embedding_dim 0.0 +833 28 model.scoring_fct_norm 1.0 +833 28 optimizer.lr 0.030715041460368898 +833 28 negative_sampler.num_negs_per_pos 63.0 +833 28 training.batch_size 2.0 +833 29 model.embedding_dim 2.0 +833 29 model.scoring_fct_norm 1.0 +833 29 optimizer.lr 0.0031169768105452724 +833 29 negative_sampler.num_negs_per_pos 26.0 +833 29 training.batch_size 1.0 +833 30 model.embedding_dim 0.0 +833 30 model.scoring_fct_norm 1.0 +833 30 optimizer.lr 0.04927826843403709 +833 30 negative_sampler.num_negs_per_pos 94.0 +833 30 training.batch_size 1.0 +833 31 model.embedding_dim 2.0 +833 31 model.scoring_fct_norm 2.0 +833 31 optimizer.lr 0.08276581451065151 +833 31 negative_sampler.num_negs_per_pos 71.0 +833 31 training.batch_size 0.0 +833 32 model.embedding_dim 0.0 +833 32 model.scoring_fct_norm 2.0 +833 32 optimizer.lr 0.009729667057575941 +833 32 negative_sampler.num_negs_per_pos 13.0 +833 32 training.batch_size 1.0 +833 33 model.embedding_dim 1.0 +833 33 model.scoring_fct_norm 2.0 +833 33 optimizer.lr 0.003848348981839395 +833 33 negative_sampler.num_negs_per_pos 30.0 +833 33 training.batch_size 0.0 +833 34 model.embedding_dim 1.0 +833 34 model.scoring_fct_norm 2.0 +833 34 optimizer.lr 0.049219617669934844 +833 34 negative_sampler.num_negs_per_pos 42.0 +833 34 training.batch_size 2.0 +833 35 model.embedding_dim 1.0 +833 35 model.scoring_fct_norm 2.0 +833 35 optimizer.lr 0.03013507243184717 +833 35 negative_sampler.num_negs_per_pos 78.0 +833 35 training.batch_size 0.0 +833 36 model.embedding_dim 0.0 +833 36 model.scoring_fct_norm 1.0 +833 36 optimizer.lr 0.005242776153262435 +833 36 negative_sampler.num_negs_per_pos 12.0 +833 36 training.batch_size 1.0 +833 37 model.embedding_dim 0.0 +833 37 model.scoring_fct_norm 2.0 +833 37 optimizer.lr 0.023267565251932014 +833 37 negative_sampler.num_negs_per_pos 94.0 +833 37 training.batch_size 2.0 +833 38 model.embedding_dim 1.0 +833 38 model.scoring_fct_norm 2.0 +833 38 optimizer.lr 0.023421558824898256 +833 38 negative_sampler.num_negs_per_pos 0.0 +833 38 training.batch_size 1.0 +833 39 model.embedding_dim 2.0 +833 39 model.scoring_fct_norm 1.0 +833 39 optimizer.lr 0.06234754713586816 +833 39 negative_sampler.num_negs_per_pos 39.0 +833 39 training.batch_size 1.0 +833 40 model.embedding_dim 1.0 +833 40 model.scoring_fct_norm 1.0 +833 40 optimizer.lr 0.001074379902911968 +833 40 negative_sampler.num_negs_per_pos 47.0 +833 40 training.batch_size 0.0 +833 41 model.embedding_dim 2.0 +833 41 model.scoring_fct_norm 1.0 +833 41 optimizer.lr 0.004735049776701982 +833 41 negative_sampler.num_negs_per_pos 99.0 +833 41 training.batch_size 1.0 +833 42 model.embedding_dim 0.0 +833 42 model.scoring_fct_norm 2.0 +833 42 optimizer.lr 0.05927608691314445 +833 42 negative_sampler.num_negs_per_pos 9.0 +833 42 training.batch_size 1.0 +833 43 model.embedding_dim 1.0 +833 43 model.scoring_fct_norm 1.0 +833 43 optimizer.lr 0.004406970345949427 +833 43 negative_sampler.num_negs_per_pos 86.0 +833 43 training.batch_size 0.0 +833 44 model.embedding_dim 1.0 +833 44 model.scoring_fct_norm 2.0 +833 44 optimizer.lr 0.04069438415266948 +833 44 negative_sampler.num_negs_per_pos 84.0 +833 44 training.batch_size 0.0 +833 45 model.embedding_dim 1.0 +833 45 model.scoring_fct_norm 1.0 +833 45 optimizer.lr 0.006535450671993448 +833 45 negative_sampler.num_negs_per_pos 89.0 +833 45 training.batch_size 1.0 +833 46 model.embedding_dim 0.0 +833 46 model.scoring_fct_norm 1.0 +833 46 optimizer.lr 0.001185485508971506 +833 46 negative_sampler.num_negs_per_pos 79.0 +833 46 training.batch_size 2.0 +833 47 model.embedding_dim 2.0 +833 47 model.scoring_fct_norm 1.0 +833 47 optimizer.lr 0.008824841335857848 +833 47 negative_sampler.num_negs_per_pos 19.0 +833 47 training.batch_size 1.0 +833 48 model.embedding_dim 2.0 +833 48 model.scoring_fct_norm 1.0 +833 48 optimizer.lr 0.006432397468731842 +833 48 negative_sampler.num_negs_per_pos 47.0 +833 48 training.batch_size 0.0 +833 49 model.embedding_dim 1.0 +833 49 model.scoring_fct_norm 2.0 +833 49 optimizer.lr 0.0028826024881894344 +833 49 negative_sampler.num_negs_per_pos 78.0 +833 49 training.batch_size 0.0 +833 50 model.embedding_dim 2.0 +833 50 model.scoring_fct_norm 1.0 +833 50 optimizer.lr 0.006541459852131581 +833 50 negative_sampler.num_negs_per_pos 55.0 +833 50 training.batch_size 1.0 +833 51 model.embedding_dim 1.0 +833 51 model.scoring_fct_norm 2.0 +833 51 optimizer.lr 0.0020760274722308476 +833 51 negative_sampler.num_negs_per_pos 68.0 +833 51 training.batch_size 1.0 +833 52 model.embedding_dim 0.0 +833 52 model.scoring_fct_norm 1.0 +833 52 optimizer.lr 0.01702126503617635 +833 52 negative_sampler.num_negs_per_pos 0.0 +833 52 training.batch_size 2.0 +833 53 model.embedding_dim 2.0 +833 53 model.scoring_fct_norm 1.0 +833 53 optimizer.lr 0.05118863491519065 +833 53 negative_sampler.num_negs_per_pos 91.0 +833 53 training.batch_size 1.0 +833 54 model.embedding_dim 0.0 +833 54 model.scoring_fct_norm 1.0 +833 54 optimizer.lr 0.002468832818803753 +833 54 negative_sampler.num_negs_per_pos 98.0 +833 54 training.batch_size 1.0 +833 55 model.embedding_dim 1.0 +833 55 model.scoring_fct_norm 2.0 +833 55 optimizer.lr 0.018069491705478342 +833 55 negative_sampler.num_negs_per_pos 44.0 +833 55 training.batch_size 2.0 +833 56 model.embedding_dim 2.0 +833 56 model.scoring_fct_norm 2.0 +833 56 optimizer.lr 0.007447103888321239 +833 56 negative_sampler.num_negs_per_pos 92.0 +833 56 training.batch_size 1.0 +833 57 model.embedding_dim 1.0 +833 57 model.scoring_fct_norm 2.0 +833 57 optimizer.lr 0.03754078885625775 +833 57 negative_sampler.num_negs_per_pos 93.0 +833 57 training.batch_size 1.0 +833 58 model.embedding_dim 1.0 +833 58 model.scoring_fct_norm 1.0 +833 58 optimizer.lr 0.002202700772654552 +833 58 negative_sampler.num_negs_per_pos 46.0 +833 58 training.batch_size 2.0 +833 59 model.embedding_dim 0.0 +833 59 model.scoring_fct_norm 1.0 +833 59 optimizer.lr 0.08872082665484392 +833 59 negative_sampler.num_negs_per_pos 34.0 +833 59 training.batch_size 0.0 +833 60 model.embedding_dim 0.0 +833 60 model.scoring_fct_norm 2.0 +833 60 optimizer.lr 0.0037789004007077534 +833 60 negative_sampler.num_negs_per_pos 56.0 +833 60 training.batch_size 1.0 +833 61 model.embedding_dim 1.0 +833 61 model.scoring_fct_norm 2.0 +833 61 optimizer.lr 0.09105857975078152 +833 61 negative_sampler.num_negs_per_pos 68.0 +833 61 training.batch_size 2.0 +833 62 model.embedding_dim 0.0 +833 62 model.scoring_fct_norm 1.0 +833 62 optimizer.lr 0.010527395506621368 +833 62 negative_sampler.num_negs_per_pos 12.0 +833 62 training.batch_size 2.0 +833 63 model.embedding_dim 2.0 +833 63 model.scoring_fct_norm 1.0 +833 63 optimizer.lr 0.07196933616081196 +833 63 negative_sampler.num_negs_per_pos 87.0 +833 63 training.batch_size 1.0 +833 64 model.embedding_dim 0.0 +833 64 model.scoring_fct_norm 1.0 +833 64 optimizer.lr 0.00242373946259953 +833 64 negative_sampler.num_negs_per_pos 72.0 +833 64 training.batch_size 1.0 +833 65 model.embedding_dim 2.0 +833 65 model.scoring_fct_norm 1.0 +833 65 optimizer.lr 0.017953594945073533 +833 65 negative_sampler.num_negs_per_pos 78.0 +833 65 training.batch_size 1.0 +833 66 model.embedding_dim 2.0 +833 66 model.scoring_fct_norm 2.0 +833 66 optimizer.lr 0.0016176724050674704 +833 66 negative_sampler.num_negs_per_pos 85.0 +833 66 training.batch_size 0.0 +833 67 model.embedding_dim 0.0 +833 67 model.scoring_fct_norm 1.0 +833 67 optimizer.lr 0.018716397228540443 +833 67 negative_sampler.num_negs_per_pos 54.0 +833 67 training.batch_size 1.0 +833 68 model.embedding_dim 0.0 +833 68 model.scoring_fct_norm 1.0 +833 68 optimizer.lr 0.0015755607397488236 +833 68 negative_sampler.num_negs_per_pos 44.0 +833 68 training.batch_size 2.0 +833 69 model.embedding_dim 1.0 +833 69 model.scoring_fct_norm 1.0 +833 69 optimizer.lr 0.07709747954908647 +833 69 negative_sampler.num_negs_per_pos 3.0 +833 69 training.batch_size 2.0 +833 70 model.embedding_dim 1.0 +833 70 model.scoring_fct_norm 2.0 +833 70 optimizer.lr 0.020934113895308075 +833 70 negative_sampler.num_negs_per_pos 40.0 +833 70 training.batch_size 0.0 +833 71 model.embedding_dim 2.0 +833 71 model.scoring_fct_norm 1.0 +833 71 optimizer.lr 0.002667988656312351 +833 71 negative_sampler.num_negs_per_pos 10.0 +833 71 training.batch_size 2.0 +833 72 model.embedding_dim 1.0 +833 72 model.scoring_fct_norm 2.0 +833 72 optimizer.lr 0.023354524467261405 +833 72 negative_sampler.num_negs_per_pos 54.0 +833 72 training.batch_size 2.0 +833 73 model.embedding_dim 1.0 +833 73 model.scoring_fct_norm 1.0 +833 73 optimizer.lr 0.0029781616818328875 +833 73 negative_sampler.num_negs_per_pos 95.0 +833 73 training.batch_size 2.0 +833 74 model.embedding_dim 2.0 +833 74 model.scoring_fct_norm 1.0 +833 74 optimizer.lr 0.02550994504349104 +833 74 negative_sampler.num_negs_per_pos 63.0 +833 74 training.batch_size 1.0 +833 75 model.embedding_dim 0.0 +833 75 model.scoring_fct_norm 2.0 +833 75 optimizer.lr 0.006652893959816875 +833 75 negative_sampler.num_negs_per_pos 85.0 +833 75 training.batch_size 0.0 +833 76 model.embedding_dim 0.0 +833 76 model.scoring_fct_norm 1.0 +833 76 optimizer.lr 0.011730961162870916 +833 76 negative_sampler.num_negs_per_pos 77.0 +833 76 training.batch_size 1.0 +833 77 model.embedding_dim 0.0 +833 77 model.scoring_fct_norm 1.0 +833 77 optimizer.lr 0.0034825978839066398 +833 77 negative_sampler.num_negs_per_pos 68.0 +833 77 training.batch_size 2.0 +833 78 model.embedding_dim 2.0 +833 78 model.scoring_fct_norm 2.0 +833 78 optimizer.lr 0.0651760007103319 +833 78 negative_sampler.num_negs_per_pos 79.0 +833 78 training.batch_size 2.0 +833 79 model.embedding_dim 2.0 +833 79 model.scoring_fct_norm 2.0 +833 79 optimizer.lr 0.050236723282748294 +833 79 negative_sampler.num_negs_per_pos 27.0 +833 79 training.batch_size 1.0 +833 80 model.embedding_dim 0.0 +833 80 model.scoring_fct_norm 1.0 +833 80 optimizer.lr 0.0024134643863662516 +833 80 negative_sampler.num_negs_per_pos 38.0 +833 80 training.batch_size 2.0 +833 81 model.embedding_dim 1.0 +833 81 model.scoring_fct_norm 2.0 +833 81 optimizer.lr 0.0014645670549674832 +833 81 negative_sampler.num_negs_per_pos 2.0 +833 81 training.batch_size 0.0 +833 82 model.embedding_dim 0.0 +833 82 model.scoring_fct_norm 2.0 +833 82 optimizer.lr 0.005082287217522858 +833 82 negative_sampler.num_negs_per_pos 18.0 +833 82 training.batch_size 2.0 +833 83 model.embedding_dim 2.0 +833 83 model.scoring_fct_norm 2.0 +833 83 optimizer.lr 0.0086089272900576 +833 83 negative_sampler.num_negs_per_pos 32.0 +833 83 training.batch_size 0.0 +833 84 model.embedding_dim 2.0 +833 84 model.scoring_fct_norm 1.0 +833 84 optimizer.lr 0.060762703957310094 +833 84 negative_sampler.num_negs_per_pos 89.0 +833 84 training.batch_size 1.0 +833 85 model.embedding_dim 1.0 +833 85 model.scoring_fct_norm 1.0 +833 85 optimizer.lr 0.03713346348776227 +833 85 negative_sampler.num_negs_per_pos 35.0 +833 85 training.batch_size 1.0 +833 86 model.embedding_dim 0.0 +833 86 model.scoring_fct_norm 1.0 +833 86 optimizer.lr 0.036898541088748266 +833 86 negative_sampler.num_negs_per_pos 82.0 +833 86 training.batch_size 1.0 +833 87 model.embedding_dim 1.0 +833 87 model.scoring_fct_norm 2.0 +833 87 optimizer.lr 0.004605748834750782 +833 87 negative_sampler.num_negs_per_pos 41.0 +833 87 training.batch_size 1.0 +833 88 model.embedding_dim 2.0 +833 88 model.scoring_fct_norm 2.0 +833 88 optimizer.lr 0.05884725089477046 +833 88 negative_sampler.num_negs_per_pos 39.0 +833 88 training.batch_size 2.0 +833 89 model.embedding_dim 2.0 +833 89 model.scoring_fct_norm 2.0 +833 89 optimizer.lr 0.033852292654044505 +833 89 negative_sampler.num_negs_per_pos 74.0 +833 89 training.batch_size 2.0 +833 90 model.embedding_dim 0.0 +833 90 model.scoring_fct_norm 1.0 +833 90 optimizer.lr 0.010183089564187672 +833 90 negative_sampler.num_negs_per_pos 68.0 +833 90 training.batch_size 2.0 +833 91 model.embedding_dim 2.0 +833 91 model.scoring_fct_norm 1.0 +833 91 optimizer.lr 0.09468971708921636 +833 91 negative_sampler.num_negs_per_pos 23.0 +833 91 training.batch_size 0.0 +833 92 model.embedding_dim 2.0 +833 92 model.scoring_fct_norm 1.0 +833 92 optimizer.lr 0.005836833294717172 +833 92 negative_sampler.num_negs_per_pos 66.0 +833 92 training.batch_size 2.0 +833 93 model.embedding_dim 0.0 +833 93 model.scoring_fct_norm 1.0 +833 93 optimizer.lr 0.015739523075467672 +833 93 negative_sampler.num_negs_per_pos 64.0 +833 93 training.batch_size 0.0 +833 94 model.embedding_dim 1.0 +833 94 model.scoring_fct_norm 2.0 +833 94 optimizer.lr 0.018981524317383584 +833 94 negative_sampler.num_negs_per_pos 82.0 +833 94 training.batch_size 2.0 +833 95 model.embedding_dim 2.0 +833 95 model.scoring_fct_norm 2.0 +833 95 optimizer.lr 0.013363476109445636 +833 95 negative_sampler.num_negs_per_pos 60.0 +833 95 training.batch_size 0.0 +833 96 model.embedding_dim 2.0 +833 96 model.scoring_fct_norm 1.0 +833 96 optimizer.lr 0.0169987969397407 +833 96 negative_sampler.num_negs_per_pos 50.0 +833 96 training.batch_size 1.0 +833 97 model.embedding_dim 0.0 +833 97 model.scoring_fct_norm 1.0 +833 97 optimizer.lr 0.0036997490921207446 +833 97 negative_sampler.num_negs_per_pos 95.0 +833 97 training.batch_size 0.0 +833 98 model.embedding_dim 0.0 +833 98 model.scoring_fct_norm 2.0 +833 98 optimizer.lr 0.0037389921088292074 +833 98 negative_sampler.num_negs_per_pos 82.0 +833 98 training.batch_size 0.0 +833 99 model.embedding_dim 1.0 +833 99 model.scoring_fct_norm 2.0 +833 99 optimizer.lr 0.01296970305214536 +833 99 negative_sampler.num_negs_per_pos 12.0 +833 99 training.batch_size 2.0 +833 100 model.embedding_dim 2.0 +833 100 model.scoring_fct_norm 1.0 +833 100 optimizer.lr 0.09508654779462512 +833 100 negative_sampler.num_negs_per_pos 95.0 +833 100 training.batch_size 1.0 +833 1 dataset """wn18rr""" +833 1 model """transe""" +833 1 loss """bceaftersigmoid""" +833 1 regularizer """no""" +833 1 optimizer """adam""" +833 1 training_loop """owa""" +833 1 negative_sampler """basic""" +833 1 evaluator """rankbased""" +833 2 dataset """wn18rr""" +833 2 model """transe""" +833 2 loss """bceaftersigmoid""" +833 2 regularizer """no""" +833 2 optimizer """adam""" +833 2 training_loop """owa""" +833 2 negative_sampler """basic""" +833 2 evaluator """rankbased""" +833 3 dataset """wn18rr""" +833 3 model """transe""" +833 3 loss """bceaftersigmoid""" +833 3 regularizer """no""" +833 3 optimizer """adam""" +833 3 training_loop """owa""" +833 3 negative_sampler """basic""" +833 3 evaluator """rankbased""" +833 4 dataset """wn18rr""" +833 4 model """transe""" +833 4 loss """bceaftersigmoid""" +833 4 regularizer """no""" +833 4 optimizer """adam""" +833 4 training_loop """owa""" +833 4 negative_sampler """basic""" +833 4 evaluator """rankbased""" +833 5 dataset """wn18rr""" +833 5 model """transe""" +833 5 loss """bceaftersigmoid""" +833 5 regularizer """no""" +833 5 optimizer """adam""" +833 5 training_loop """owa""" +833 5 negative_sampler """basic""" +833 5 evaluator """rankbased""" +833 6 dataset """wn18rr""" +833 6 model """transe""" +833 6 loss """bceaftersigmoid""" +833 6 regularizer """no""" +833 6 optimizer """adam""" +833 6 training_loop """owa""" +833 6 negative_sampler """basic""" +833 6 evaluator """rankbased""" +833 7 dataset """wn18rr""" +833 7 model """transe""" +833 7 loss """bceaftersigmoid""" +833 7 regularizer """no""" +833 7 optimizer """adam""" +833 7 training_loop """owa""" +833 7 negative_sampler """basic""" +833 7 evaluator """rankbased""" +833 8 dataset """wn18rr""" +833 8 model """transe""" +833 8 loss """bceaftersigmoid""" +833 8 regularizer """no""" +833 8 optimizer """adam""" +833 8 training_loop """owa""" +833 8 negative_sampler """basic""" +833 8 evaluator """rankbased""" +833 9 dataset """wn18rr""" +833 9 model """transe""" +833 9 loss """bceaftersigmoid""" +833 9 regularizer """no""" +833 9 optimizer """adam""" +833 9 training_loop """owa""" +833 9 negative_sampler """basic""" +833 9 evaluator """rankbased""" +833 10 dataset """wn18rr""" +833 10 model """transe""" +833 10 loss """bceaftersigmoid""" +833 10 regularizer """no""" +833 10 optimizer """adam""" +833 10 training_loop """owa""" +833 10 negative_sampler """basic""" +833 10 evaluator """rankbased""" +833 11 dataset """wn18rr""" +833 11 model """transe""" +833 11 loss """bceaftersigmoid""" +833 11 regularizer """no""" +833 11 optimizer """adam""" +833 11 training_loop """owa""" +833 11 negative_sampler """basic""" +833 11 evaluator """rankbased""" +833 12 dataset """wn18rr""" +833 12 model """transe""" +833 12 loss """bceaftersigmoid""" +833 12 regularizer """no""" +833 12 optimizer """adam""" +833 12 training_loop """owa""" +833 12 negative_sampler """basic""" +833 12 evaluator """rankbased""" +833 13 dataset """wn18rr""" +833 13 model """transe""" +833 13 loss """bceaftersigmoid""" +833 13 regularizer """no""" +833 13 optimizer """adam""" +833 13 training_loop """owa""" +833 13 negative_sampler """basic""" +833 13 evaluator """rankbased""" +833 14 dataset """wn18rr""" +833 14 model """transe""" +833 14 loss """bceaftersigmoid""" +833 14 regularizer """no""" +833 14 optimizer """adam""" +833 14 training_loop """owa""" +833 14 negative_sampler """basic""" +833 14 evaluator """rankbased""" +833 15 dataset """wn18rr""" +833 15 model """transe""" +833 15 loss """bceaftersigmoid""" +833 15 regularizer """no""" +833 15 optimizer """adam""" +833 15 training_loop """owa""" +833 15 negative_sampler """basic""" +833 15 evaluator """rankbased""" +833 16 dataset """wn18rr""" +833 16 model """transe""" +833 16 loss """bceaftersigmoid""" +833 16 regularizer """no""" +833 16 optimizer """adam""" +833 16 training_loop """owa""" +833 16 negative_sampler """basic""" +833 16 evaluator """rankbased""" +833 17 dataset """wn18rr""" +833 17 model """transe""" +833 17 loss """bceaftersigmoid""" +833 17 regularizer """no""" +833 17 optimizer """adam""" +833 17 training_loop """owa""" +833 17 negative_sampler """basic""" +833 17 evaluator """rankbased""" +833 18 dataset """wn18rr""" +833 18 model """transe""" +833 18 loss """bceaftersigmoid""" +833 18 regularizer """no""" +833 18 optimizer """adam""" +833 18 training_loop """owa""" +833 18 negative_sampler """basic""" +833 18 evaluator """rankbased""" +833 19 dataset """wn18rr""" +833 19 model """transe""" +833 19 loss """bceaftersigmoid""" +833 19 regularizer """no""" +833 19 optimizer """adam""" +833 19 training_loop """owa""" +833 19 negative_sampler """basic""" +833 19 evaluator """rankbased""" +833 20 dataset """wn18rr""" +833 20 model """transe""" +833 20 loss """bceaftersigmoid""" +833 20 regularizer """no""" +833 20 optimizer """adam""" +833 20 training_loop """owa""" +833 20 negative_sampler """basic""" +833 20 evaluator """rankbased""" +833 21 dataset """wn18rr""" +833 21 model """transe""" +833 21 loss """bceaftersigmoid""" +833 21 regularizer """no""" +833 21 optimizer """adam""" +833 21 training_loop """owa""" +833 21 negative_sampler """basic""" +833 21 evaluator """rankbased""" +833 22 dataset """wn18rr""" +833 22 model """transe""" +833 22 loss """bceaftersigmoid""" +833 22 regularizer """no""" +833 22 optimizer """adam""" +833 22 training_loop """owa""" +833 22 negative_sampler """basic""" +833 22 evaluator """rankbased""" +833 23 dataset """wn18rr""" +833 23 model """transe""" +833 23 loss """bceaftersigmoid""" +833 23 regularizer """no""" +833 23 optimizer """adam""" +833 23 training_loop """owa""" +833 23 negative_sampler """basic""" +833 23 evaluator """rankbased""" +833 24 dataset """wn18rr""" +833 24 model """transe""" +833 24 loss """bceaftersigmoid""" +833 24 regularizer """no""" +833 24 optimizer """adam""" +833 24 training_loop """owa""" +833 24 negative_sampler """basic""" +833 24 evaluator """rankbased""" +833 25 dataset """wn18rr""" +833 25 model """transe""" +833 25 loss """bceaftersigmoid""" +833 25 regularizer """no""" +833 25 optimizer """adam""" +833 25 training_loop """owa""" +833 25 negative_sampler """basic""" +833 25 evaluator """rankbased""" +833 26 dataset """wn18rr""" +833 26 model """transe""" +833 26 loss """bceaftersigmoid""" +833 26 regularizer """no""" +833 26 optimizer """adam""" +833 26 training_loop """owa""" +833 26 negative_sampler """basic""" +833 26 evaluator """rankbased""" +833 27 dataset """wn18rr""" +833 27 model """transe""" +833 27 loss """bceaftersigmoid""" +833 27 regularizer """no""" +833 27 optimizer """adam""" +833 27 training_loop """owa""" +833 27 negative_sampler """basic""" +833 27 evaluator """rankbased""" +833 28 dataset """wn18rr""" +833 28 model """transe""" +833 28 loss """bceaftersigmoid""" +833 28 regularizer """no""" +833 28 optimizer """adam""" +833 28 training_loop """owa""" +833 28 negative_sampler """basic""" +833 28 evaluator """rankbased""" +833 29 dataset """wn18rr""" +833 29 model """transe""" +833 29 loss """bceaftersigmoid""" +833 29 regularizer """no""" +833 29 optimizer """adam""" +833 29 training_loop """owa""" +833 29 negative_sampler """basic""" +833 29 evaluator """rankbased""" +833 30 dataset """wn18rr""" +833 30 model """transe""" +833 30 loss """bceaftersigmoid""" +833 30 regularizer """no""" +833 30 optimizer """adam""" +833 30 training_loop """owa""" +833 30 negative_sampler """basic""" +833 30 evaluator """rankbased""" +833 31 dataset """wn18rr""" +833 31 model """transe""" +833 31 loss """bceaftersigmoid""" +833 31 regularizer """no""" +833 31 optimizer """adam""" +833 31 training_loop """owa""" +833 31 negative_sampler """basic""" +833 31 evaluator """rankbased""" +833 32 dataset """wn18rr""" +833 32 model """transe""" +833 32 loss """bceaftersigmoid""" +833 32 regularizer """no""" +833 32 optimizer """adam""" +833 32 training_loop """owa""" +833 32 negative_sampler """basic""" +833 32 evaluator """rankbased""" +833 33 dataset """wn18rr""" +833 33 model """transe""" +833 33 loss """bceaftersigmoid""" +833 33 regularizer """no""" +833 33 optimizer """adam""" +833 33 training_loop """owa""" +833 33 negative_sampler """basic""" +833 33 evaluator """rankbased""" +833 34 dataset """wn18rr""" +833 34 model """transe""" +833 34 loss """bceaftersigmoid""" +833 34 regularizer """no""" +833 34 optimizer """adam""" +833 34 training_loop """owa""" +833 34 negative_sampler """basic""" +833 34 evaluator """rankbased""" +833 35 dataset """wn18rr""" +833 35 model """transe""" +833 35 loss """bceaftersigmoid""" +833 35 regularizer """no""" +833 35 optimizer """adam""" +833 35 training_loop """owa""" +833 35 negative_sampler """basic""" +833 35 evaluator """rankbased""" +833 36 dataset """wn18rr""" +833 36 model """transe""" +833 36 loss """bceaftersigmoid""" +833 36 regularizer """no""" +833 36 optimizer """adam""" +833 36 training_loop """owa""" +833 36 negative_sampler """basic""" +833 36 evaluator """rankbased""" +833 37 dataset """wn18rr""" +833 37 model """transe""" +833 37 loss """bceaftersigmoid""" +833 37 regularizer """no""" +833 37 optimizer """adam""" +833 37 training_loop """owa""" +833 37 negative_sampler """basic""" +833 37 evaluator """rankbased""" +833 38 dataset """wn18rr""" +833 38 model """transe""" +833 38 loss """bceaftersigmoid""" +833 38 regularizer """no""" +833 38 optimizer """adam""" +833 38 training_loop """owa""" +833 38 negative_sampler """basic""" +833 38 evaluator """rankbased""" +833 39 dataset """wn18rr""" +833 39 model """transe""" +833 39 loss """bceaftersigmoid""" +833 39 regularizer """no""" +833 39 optimizer """adam""" +833 39 training_loop """owa""" +833 39 negative_sampler """basic""" +833 39 evaluator """rankbased""" +833 40 dataset """wn18rr""" +833 40 model """transe""" +833 40 loss """bceaftersigmoid""" +833 40 regularizer """no""" +833 40 optimizer """adam""" +833 40 training_loop """owa""" +833 40 negative_sampler """basic""" +833 40 evaluator """rankbased""" +833 41 dataset """wn18rr""" +833 41 model """transe""" +833 41 loss """bceaftersigmoid""" +833 41 regularizer """no""" +833 41 optimizer """adam""" +833 41 training_loop """owa""" +833 41 negative_sampler """basic""" +833 41 evaluator """rankbased""" +833 42 dataset """wn18rr""" +833 42 model """transe""" +833 42 loss """bceaftersigmoid""" +833 42 regularizer """no""" +833 42 optimizer """adam""" +833 42 training_loop """owa""" +833 42 negative_sampler """basic""" +833 42 evaluator """rankbased""" +833 43 dataset """wn18rr""" +833 43 model """transe""" +833 43 loss """bceaftersigmoid""" +833 43 regularizer """no""" +833 43 optimizer """adam""" +833 43 training_loop """owa""" +833 43 negative_sampler """basic""" +833 43 evaluator """rankbased""" +833 44 dataset """wn18rr""" +833 44 model """transe""" +833 44 loss """bceaftersigmoid""" +833 44 regularizer """no""" +833 44 optimizer """adam""" +833 44 training_loop """owa""" +833 44 negative_sampler """basic""" +833 44 evaluator """rankbased""" +833 45 dataset """wn18rr""" +833 45 model """transe""" +833 45 loss """bceaftersigmoid""" +833 45 regularizer """no""" +833 45 optimizer """adam""" +833 45 training_loop """owa""" +833 45 negative_sampler """basic""" +833 45 evaluator """rankbased""" +833 46 dataset """wn18rr""" +833 46 model """transe""" +833 46 loss """bceaftersigmoid""" +833 46 regularizer """no""" +833 46 optimizer """adam""" +833 46 training_loop """owa""" +833 46 negative_sampler """basic""" +833 46 evaluator """rankbased""" +833 47 dataset """wn18rr""" +833 47 model """transe""" +833 47 loss """bceaftersigmoid""" +833 47 regularizer """no""" +833 47 optimizer """adam""" +833 47 training_loop """owa""" +833 47 negative_sampler """basic""" +833 47 evaluator """rankbased""" +833 48 dataset """wn18rr""" +833 48 model """transe""" +833 48 loss """bceaftersigmoid""" +833 48 regularizer """no""" +833 48 optimizer """adam""" +833 48 training_loop """owa""" +833 48 negative_sampler """basic""" +833 48 evaluator """rankbased""" +833 49 dataset """wn18rr""" +833 49 model """transe""" +833 49 loss """bceaftersigmoid""" +833 49 regularizer """no""" +833 49 optimizer """adam""" +833 49 training_loop """owa""" +833 49 negative_sampler """basic""" +833 49 evaluator """rankbased""" +833 50 dataset """wn18rr""" +833 50 model """transe""" +833 50 loss """bceaftersigmoid""" +833 50 regularizer """no""" +833 50 optimizer """adam""" +833 50 training_loop """owa""" +833 50 negative_sampler """basic""" +833 50 evaluator """rankbased""" +833 51 dataset """wn18rr""" +833 51 model """transe""" +833 51 loss """bceaftersigmoid""" +833 51 regularizer """no""" +833 51 optimizer """adam""" +833 51 training_loop """owa""" +833 51 negative_sampler """basic""" +833 51 evaluator """rankbased""" +833 52 dataset """wn18rr""" +833 52 model """transe""" +833 52 loss """bceaftersigmoid""" +833 52 regularizer """no""" +833 52 optimizer """adam""" +833 52 training_loop """owa""" +833 52 negative_sampler """basic""" +833 52 evaluator """rankbased""" +833 53 dataset """wn18rr""" +833 53 model """transe""" +833 53 loss """bceaftersigmoid""" +833 53 regularizer """no""" +833 53 optimizer """adam""" +833 53 training_loop """owa""" +833 53 negative_sampler """basic""" +833 53 evaluator """rankbased""" +833 54 dataset """wn18rr""" +833 54 model """transe""" +833 54 loss """bceaftersigmoid""" +833 54 regularizer """no""" +833 54 optimizer """adam""" +833 54 training_loop """owa""" +833 54 negative_sampler """basic""" +833 54 evaluator """rankbased""" +833 55 dataset """wn18rr""" +833 55 model """transe""" +833 55 loss """bceaftersigmoid""" +833 55 regularizer """no""" +833 55 optimizer """adam""" +833 55 training_loop """owa""" +833 55 negative_sampler """basic""" +833 55 evaluator """rankbased""" +833 56 dataset """wn18rr""" +833 56 model """transe""" +833 56 loss """bceaftersigmoid""" +833 56 regularizer """no""" +833 56 optimizer """adam""" +833 56 training_loop """owa""" +833 56 negative_sampler """basic""" +833 56 evaluator """rankbased""" +833 57 dataset """wn18rr""" +833 57 model """transe""" +833 57 loss """bceaftersigmoid""" +833 57 regularizer """no""" +833 57 optimizer """adam""" +833 57 training_loop """owa""" +833 57 negative_sampler """basic""" +833 57 evaluator """rankbased""" +833 58 dataset """wn18rr""" +833 58 model """transe""" +833 58 loss """bceaftersigmoid""" +833 58 regularizer """no""" +833 58 optimizer """adam""" +833 58 training_loop """owa""" +833 58 negative_sampler """basic""" +833 58 evaluator """rankbased""" +833 59 dataset """wn18rr""" +833 59 model """transe""" +833 59 loss """bceaftersigmoid""" +833 59 regularizer """no""" +833 59 optimizer """adam""" +833 59 training_loop """owa""" +833 59 negative_sampler """basic""" +833 59 evaluator """rankbased""" +833 60 dataset """wn18rr""" +833 60 model """transe""" +833 60 loss """bceaftersigmoid""" +833 60 regularizer """no""" +833 60 optimizer """adam""" +833 60 training_loop """owa""" +833 60 negative_sampler """basic""" +833 60 evaluator """rankbased""" +833 61 dataset """wn18rr""" +833 61 model """transe""" +833 61 loss """bceaftersigmoid""" +833 61 regularizer """no""" +833 61 optimizer """adam""" +833 61 training_loop """owa""" +833 61 negative_sampler """basic""" +833 61 evaluator """rankbased""" +833 62 dataset """wn18rr""" +833 62 model """transe""" +833 62 loss """bceaftersigmoid""" +833 62 regularizer """no""" +833 62 optimizer """adam""" +833 62 training_loop """owa""" +833 62 negative_sampler """basic""" +833 62 evaluator """rankbased""" +833 63 dataset """wn18rr""" +833 63 model """transe""" +833 63 loss """bceaftersigmoid""" +833 63 regularizer """no""" +833 63 optimizer """adam""" +833 63 training_loop """owa""" +833 63 negative_sampler """basic""" +833 63 evaluator """rankbased""" +833 64 dataset """wn18rr""" +833 64 model """transe""" +833 64 loss """bceaftersigmoid""" +833 64 regularizer """no""" +833 64 optimizer """adam""" +833 64 training_loop """owa""" +833 64 negative_sampler """basic""" +833 64 evaluator """rankbased""" +833 65 dataset """wn18rr""" +833 65 model """transe""" +833 65 loss """bceaftersigmoid""" +833 65 regularizer """no""" +833 65 optimizer """adam""" +833 65 training_loop """owa""" +833 65 negative_sampler """basic""" +833 65 evaluator """rankbased""" +833 66 dataset """wn18rr""" +833 66 model """transe""" +833 66 loss """bceaftersigmoid""" +833 66 regularizer """no""" +833 66 optimizer """adam""" +833 66 training_loop """owa""" +833 66 negative_sampler """basic""" +833 66 evaluator """rankbased""" +833 67 dataset """wn18rr""" +833 67 model """transe""" +833 67 loss """bceaftersigmoid""" +833 67 regularizer """no""" +833 67 optimizer """adam""" +833 67 training_loop """owa""" +833 67 negative_sampler """basic""" +833 67 evaluator """rankbased""" +833 68 dataset """wn18rr""" +833 68 model """transe""" +833 68 loss """bceaftersigmoid""" +833 68 regularizer """no""" +833 68 optimizer """adam""" +833 68 training_loop """owa""" +833 68 negative_sampler """basic""" +833 68 evaluator """rankbased""" +833 69 dataset """wn18rr""" +833 69 model """transe""" +833 69 loss """bceaftersigmoid""" +833 69 regularizer """no""" +833 69 optimizer """adam""" +833 69 training_loop """owa""" +833 69 negative_sampler """basic""" +833 69 evaluator """rankbased""" +833 70 dataset """wn18rr""" +833 70 model """transe""" +833 70 loss """bceaftersigmoid""" +833 70 regularizer """no""" +833 70 optimizer """adam""" +833 70 training_loop """owa""" +833 70 negative_sampler """basic""" +833 70 evaluator """rankbased""" +833 71 dataset """wn18rr""" +833 71 model """transe""" +833 71 loss """bceaftersigmoid""" +833 71 regularizer """no""" +833 71 optimizer """adam""" +833 71 training_loop """owa""" +833 71 negative_sampler """basic""" +833 71 evaluator """rankbased""" +833 72 dataset """wn18rr""" +833 72 model """transe""" +833 72 loss """bceaftersigmoid""" +833 72 regularizer """no""" +833 72 optimizer """adam""" +833 72 training_loop """owa""" +833 72 negative_sampler """basic""" +833 72 evaluator """rankbased""" +833 73 dataset """wn18rr""" +833 73 model """transe""" +833 73 loss """bceaftersigmoid""" +833 73 regularizer """no""" +833 73 optimizer """adam""" +833 73 training_loop """owa""" +833 73 negative_sampler """basic""" +833 73 evaluator """rankbased""" +833 74 dataset """wn18rr""" +833 74 model """transe""" +833 74 loss """bceaftersigmoid""" +833 74 regularizer """no""" +833 74 optimizer """adam""" +833 74 training_loop """owa""" +833 74 negative_sampler """basic""" +833 74 evaluator """rankbased""" +833 75 dataset """wn18rr""" +833 75 model """transe""" +833 75 loss """bceaftersigmoid""" +833 75 regularizer """no""" +833 75 optimizer """adam""" +833 75 training_loop """owa""" +833 75 negative_sampler """basic""" +833 75 evaluator """rankbased""" +833 76 dataset """wn18rr""" +833 76 model """transe""" +833 76 loss """bceaftersigmoid""" +833 76 regularizer """no""" +833 76 optimizer """adam""" +833 76 training_loop """owa""" +833 76 negative_sampler """basic""" +833 76 evaluator """rankbased""" +833 77 dataset """wn18rr""" +833 77 model """transe""" +833 77 loss """bceaftersigmoid""" +833 77 regularizer """no""" +833 77 optimizer """adam""" +833 77 training_loop """owa""" +833 77 negative_sampler """basic""" +833 77 evaluator """rankbased""" +833 78 dataset """wn18rr""" +833 78 model """transe""" +833 78 loss """bceaftersigmoid""" +833 78 regularizer """no""" +833 78 optimizer """adam""" +833 78 training_loop """owa""" +833 78 negative_sampler """basic""" +833 78 evaluator """rankbased""" +833 79 dataset """wn18rr""" +833 79 model """transe""" +833 79 loss """bceaftersigmoid""" +833 79 regularizer """no""" +833 79 optimizer """adam""" +833 79 training_loop """owa""" +833 79 negative_sampler """basic""" +833 79 evaluator """rankbased""" +833 80 dataset """wn18rr""" +833 80 model """transe""" +833 80 loss """bceaftersigmoid""" +833 80 regularizer """no""" +833 80 optimizer """adam""" +833 80 training_loop """owa""" +833 80 negative_sampler """basic""" +833 80 evaluator """rankbased""" +833 81 dataset """wn18rr""" +833 81 model """transe""" +833 81 loss """bceaftersigmoid""" +833 81 regularizer """no""" +833 81 optimizer """adam""" +833 81 training_loop """owa""" +833 81 negative_sampler """basic""" +833 81 evaluator """rankbased""" +833 82 dataset """wn18rr""" +833 82 model """transe""" +833 82 loss """bceaftersigmoid""" +833 82 regularizer """no""" +833 82 optimizer """adam""" +833 82 training_loop """owa""" +833 82 negative_sampler """basic""" +833 82 evaluator """rankbased""" +833 83 dataset """wn18rr""" +833 83 model """transe""" +833 83 loss """bceaftersigmoid""" +833 83 regularizer """no""" +833 83 optimizer """adam""" +833 83 training_loop """owa""" +833 83 negative_sampler """basic""" +833 83 evaluator """rankbased""" +833 84 dataset """wn18rr""" +833 84 model """transe""" +833 84 loss """bceaftersigmoid""" +833 84 regularizer """no""" +833 84 optimizer """adam""" +833 84 training_loop """owa""" +833 84 negative_sampler """basic""" +833 84 evaluator """rankbased""" +833 85 dataset """wn18rr""" +833 85 model """transe""" +833 85 loss """bceaftersigmoid""" +833 85 regularizer """no""" +833 85 optimizer """adam""" +833 85 training_loop """owa""" +833 85 negative_sampler """basic""" +833 85 evaluator """rankbased""" +833 86 dataset """wn18rr""" +833 86 model """transe""" +833 86 loss """bceaftersigmoid""" +833 86 regularizer """no""" +833 86 optimizer """adam""" +833 86 training_loop """owa""" +833 86 negative_sampler """basic""" +833 86 evaluator """rankbased""" +833 87 dataset """wn18rr""" +833 87 model """transe""" +833 87 loss """bceaftersigmoid""" +833 87 regularizer """no""" +833 87 optimizer """adam""" +833 87 training_loop """owa""" +833 87 negative_sampler """basic""" +833 87 evaluator """rankbased""" +833 88 dataset """wn18rr""" +833 88 model """transe""" +833 88 loss """bceaftersigmoid""" +833 88 regularizer """no""" +833 88 optimizer """adam""" +833 88 training_loop """owa""" +833 88 negative_sampler """basic""" +833 88 evaluator """rankbased""" +833 89 dataset """wn18rr""" +833 89 model """transe""" +833 89 loss """bceaftersigmoid""" +833 89 regularizer """no""" +833 89 optimizer """adam""" +833 89 training_loop """owa""" +833 89 negative_sampler """basic""" +833 89 evaluator """rankbased""" +833 90 dataset """wn18rr""" +833 90 model """transe""" +833 90 loss """bceaftersigmoid""" +833 90 regularizer """no""" +833 90 optimizer """adam""" +833 90 training_loop """owa""" +833 90 negative_sampler """basic""" +833 90 evaluator """rankbased""" +833 91 dataset """wn18rr""" +833 91 model """transe""" +833 91 loss """bceaftersigmoid""" +833 91 regularizer """no""" +833 91 optimizer """adam""" +833 91 training_loop """owa""" +833 91 negative_sampler """basic""" +833 91 evaluator """rankbased""" +833 92 dataset """wn18rr""" +833 92 model """transe""" +833 92 loss """bceaftersigmoid""" +833 92 regularizer """no""" +833 92 optimizer """adam""" +833 92 training_loop """owa""" +833 92 negative_sampler """basic""" +833 92 evaluator """rankbased""" +833 93 dataset """wn18rr""" +833 93 model """transe""" +833 93 loss """bceaftersigmoid""" +833 93 regularizer """no""" +833 93 optimizer """adam""" +833 93 training_loop """owa""" +833 93 negative_sampler """basic""" +833 93 evaluator """rankbased""" +833 94 dataset """wn18rr""" +833 94 model """transe""" +833 94 loss """bceaftersigmoid""" +833 94 regularizer """no""" +833 94 optimizer """adam""" +833 94 training_loop """owa""" +833 94 negative_sampler """basic""" +833 94 evaluator """rankbased""" +833 95 dataset """wn18rr""" +833 95 model """transe""" +833 95 loss """bceaftersigmoid""" +833 95 regularizer """no""" +833 95 optimizer """adam""" +833 95 training_loop """owa""" +833 95 negative_sampler """basic""" +833 95 evaluator """rankbased""" +833 96 dataset """wn18rr""" +833 96 model """transe""" +833 96 loss """bceaftersigmoid""" +833 96 regularizer """no""" +833 96 optimizer """adam""" +833 96 training_loop """owa""" +833 96 negative_sampler """basic""" +833 96 evaluator """rankbased""" +833 97 dataset """wn18rr""" +833 97 model """transe""" +833 97 loss """bceaftersigmoid""" +833 97 regularizer """no""" +833 97 optimizer """adam""" +833 97 training_loop """owa""" +833 97 negative_sampler """basic""" +833 97 evaluator """rankbased""" +833 98 dataset """wn18rr""" +833 98 model """transe""" +833 98 loss """bceaftersigmoid""" +833 98 regularizer """no""" +833 98 optimizer """adam""" +833 98 training_loop """owa""" +833 98 negative_sampler """basic""" +833 98 evaluator """rankbased""" +833 99 dataset """wn18rr""" +833 99 model """transe""" +833 99 loss """bceaftersigmoid""" +833 99 regularizer """no""" +833 99 optimizer """adam""" +833 99 training_loop """owa""" +833 99 negative_sampler """basic""" +833 99 evaluator """rankbased""" +833 100 dataset """wn18rr""" +833 100 model """transe""" +833 100 loss """bceaftersigmoid""" +833 100 regularizer """no""" +833 100 optimizer """adam""" +833 100 training_loop """owa""" +833 100 negative_sampler """basic""" +833 100 evaluator """rankbased""" +834 1 model.embedding_dim 1.0 +834 1 model.scoring_fct_norm 2.0 +834 1 optimizer.lr 0.002052287606573366 +834 1 negative_sampler.num_negs_per_pos 6.0 +834 1 training.batch_size 2.0 +834 2 model.embedding_dim 2.0 +834 2 model.scoring_fct_norm 1.0 +834 2 optimizer.lr 0.001650306062247619 +834 2 negative_sampler.num_negs_per_pos 76.0 +834 2 training.batch_size 0.0 +834 3 model.embedding_dim 1.0 +834 3 model.scoring_fct_norm 2.0 +834 3 optimizer.lr 0.006117574335462465 +834 3 negative_sampler.num_negs_per_pos 63.0 +834 3 training.batch_size 0.0 +834 4 model.embedding_dim 2.0 +834 4 model.scoring_fct_norm 2.0 +834 4 optimizer.lr 0.017170384189316854 +834 4 negative_sampler.num_negs_per_pos 88.0 +834 4 training.batch_size 0.0 +834 5 model.embedding_dim 1.0 +834 5 model.scoring_fct_norm 2.0 +834 5 optimizer.lr 0.0014548326895113322 +834 5 negative_sampler.num_negs_per_pos 13.0 +834 5 training.batch_size 2.0 +834 6 model.embedding_dim 2.0 +834 6 model.scoring_fct_norm 1.0 +834 6 optimizer.lr 0.038597541941908606 +834 6 negative_sampler.num_negs_per_pos 40.0 +834 6 training.batch_size 1.0 +834 7 model.embedding_dim 2.0 +834 7 model.scoring_fct_norm 2.0 +834 7 optimizer.lr 0.02373135089632692 +834 7 negative_sampler.num_negs_per_pos 98.0 +834 7 training.batch_size 1.0 +834 8 model.embedding_dim 0.0 +834 8 model.scoring_fct_norm 1.0 +834 8 optimizer.lr 0.08821795196818409 +834 8 negative_sampler.num_negs_per_pos 64.0 +834 8 training.batch_size 2.0 +834 9 model.embedding_dim 2.0 +834 9 model.scoring_fct_norm 2.0 +834 9 optimizer.lr 0.003653972398612708 +834 9 negative_sampler.num_negs_per_pos 71.0 +834 9 training.batch_size 2.0 +834 10 model.embedding_dim 2.0 +834 10 model.scoring_fct_norm 2.0 +834 10 optimizer.lr 0.0024965026037028185 +834 10 negative_sampler.num_negs_per_pos 64.0 +834 10 training.batch_size 0.0 +834 11 model.embedding_dim 1.0 +834 11 model.scoring_fct_norm 2.0 +834 11 optimizer.lr 0.0028845825547240906 +834 11 negative_sampler.num_negs_per_pos 44.0 +834 11 training.batch_size 2.0 +834 12 model.embedding_dim 0.0 +834 12 model.scoring_fct_norm 1.0 +834 12 optimizer.lr 0.003960291683974064 +834 12 negative_sampler.num_negs_per_pos 61.0 +834 12 training.batch_size 1.0 +834 13 model.embedding_dim 2.0 +834 13 model.scoring_fct_norm 2.0 +834 13 optimizer.lr 0.002746286642895087 +834 13 negative_sampler.num_negs_per_pos 88.0 +834 13 training.batch_size 1.0 +834 14 model.embedding_dim 1.0 +834 14 model.scoring_fct_norm 2.0 +834 14 optimizer.lr 0.00434611370974155 +834 14 negative_sampler.num_negs_per_pos 42.0 +834 14 training.batch_size 1.0 +834 15 model.embedding_dim 0.0 +834 15 model.scoring_fct_norm 1.0 +834 15 optimizer.lr 0.01681863833952114 +834 15 negative_sampler.num_negs_per_pos 4.0 +834 15 training.batch_size 1.0 +834 16 model.embedding_dim 2.0 +834 16 model.scoring_fct_norm 2.0 +834 16 optimizer.lr 0.03830042243519807 +834 16 negative_sampler.num_negs_per_pos 63.0 +834 16 training.batch_size 1.0 +834 17 model.embedding_dim 2.0 +834 17 model.scoring_fct_norm 2.0 +834 17 optimizer.lr 0.0049359426557850205 +834 17 negative_sampler.num_negs_per_pos 86.0 +834 17 training.batch_size 2.0 +834 18 model.embedding_dim 0.0 +834 18 model.scoring_fct_norm 2.0 +834 18 optimizer.lr 0.02852405112163874 +834 18 negative_sampler.num_negs_per_pos 20.0 +834 18 training.batch_size 2.0 +834 19 model.embedding_dim 0.0 +834 19 model.scoring_fct_norm 1.0 +834 19 optimizer.lr 0.007627244752168985 +834 19 negative_sampler.num_negs_per_pos 31.0 +834 19 training.batch_size 0.0 +834 20 model.embedding_dim 0.0 +834 20 model.scoring_fct_norm 1.0 +834 20 optimizer.lr 0.04684869692662639 +834 20 negative_sampler.num_negs_per_pos 37.0 +834 20 training.batch_size 0.0 +834 21 model.embedding_dim 2.0 +834 21 model.scoring_fct_norm 2.0 +834 21 optimizer.lr 0.012996127665011821 +834 21 negative_sampler.num_negs_per_pos 94.0 +834 21 training.batch_size 1.0 +834 22 model.embedding_dim 1.0 +834 22 model.scoring_fct_norm 2.0 +834 22 optimizer.lr 0.00266913192220045 +834 22 negative_sampler.num_negs_per_pos 91.0 +834 22 training.batch_size 0.0 +834 23 model.embedding_dim 0.0 +834 23 model.scoring_fct_norm 1.0 +834 23 optimizer.lr 0.041709408529828455 +834 23 negative_sampler.num_negs_per_pos 68.0 +834 23 training.batch_size 1.0 +834 24 model.embedding_dim 0.0 +834 24 model.scoring_fct_norm 2.0 +834 24 optimizer.lr 0.005217191243933769 +834 24 negative_sampler.num_negs_per_pos 42.0 +834 24 training.batch_size 1.0 +834 25 model.embedding_dim 0.0 +834 25 model.scoring_fct_norm 1.0 +834 25 optimizer.lr 0.0015831482547716714 +834 25 negative_sampler.num_negs_per_pos 51.0 +834 25 training.batch_size 0.0 +834 26 model.embedding_dim 0.0 +834 26 model.scoring_fct_norm 2.0 +834 26 optimizer.lr 0.038638549403577395 +834 26 negative_sampler.num_negs_per_pos 21.0 +834 26 training.batch_size 1.0 +834 27 model.embedding_dim 1.0 +834 27 model.scoring_fct_norm 1.0 +834 27 optimizer.lr 0.03784542852954585 +834 27 negative_sampler.num_negs_per_pos 95.0 +834 27 training.batch_size 2.0 +834 28 model.embedding_dim 1.0 +834 28 model.scoring_fct_norm 1.0 +834 28 optimizer.lr 0.05346100028241425 +834 28 negative_sampler.num_negs_per_pos 86.0 +834 28 training.batch_size 2.0 +834 29 model.embedding_dim 2.0 +834 29 model.scoring_fct_norm 2.0 +834 29 optimizer.lr 0.012904948952600949 +834 29 negative_sampler.num_negs_per_pos 2.0 +834 29 training.batch_size 0.0 +834 30 model.embedding_dim 2.0 +834 30 model.scoring_fct_norm 2.0 +834 30 optimizer.lr 0.005512960157711752 +834 30 negative_sampler.num_negs_per_pos 53.0 +834 30 training.batch_size 2.0 +834 31 model.embedding_dim 1.0 +834 31 model.scoring_fct_norm 1.0 +834 31 optimizer.lr 0.020438372059581437 +834 31 negative_sampler.num_negs_per_pos 85.0 +834 31 training.batch_size 1.0 +834 32 model.embedding_dim 1.0 +834 32 model.scoring_fct_norm 1.0 +834 32 optimizer.lr 0.01332004387560149 +834 32 negative_sampler.num_negs_per_pos 43.0 +834 32 training.batch_size 0.0 +834 33 model.embedding_dim 1.0 +834 33 model.scoring_fct_norm 2.0 +834 33 optimizer.lr 0.0032575523597883353 +834 33 negative_sampler.num_negs_per_pos 16.0 +834 33 training.batch_size 0.0 +834 34 model.embedding_dim 0.0 +834 34 model.scoring_fct_norm 1.0 +834 34 optimizer.lr 0.006846068544125081 +834 34 negative_sampler.num_negs_per_pos 66.0 +834 34 training.batch_size 0.0 +834 35 model.embedding_dim 2.0 +834 35 model.scoring_fct_norm 2.0 +834 35 optimizer.lr 0.049265612874196216 +834 35 negative_sampler.num_negs_per_pos 96.0 +834 35 training.batch_size 0.0 +834 36 model.embedding_dim 0.0 +834 36 model.scoring_fct_norm 2.0 +834 36 optimizer.lr 0.0030731637090883543 +834 36 negative_sampler.num_negs_per_pos 95.0 +834 36 training.batch_size 2.0 +834 37 model.embedding_dim 1.0 +834 37 model.scoring_fct_norm 1.0 +834 37 optimizer.lr 0.0410738687260787 +834 37 negative_sampler.num_negs_per_pos 30.0 +834 37 training.batch_size 1.0 +834 38 model.embedding_dim 2.0 +834 38 model.scoring_fct_norm 2.0 +834 38 optimizer.lr 0.0027556093116446584 +834 38 negative_sampler.num_negs_per_pos 91.0 +834 38 training.batch_size 0.0 +834 39 model.embedding_dim 1.0 +834 39 model.scoring_fct_norm 2.0 +834 39 optimizer.lr 0.0010547658980941048 +834 39 negative_sampler.num_negs_per_pos 56.0 +834 39 training.batch_size 2.0 +834 40 model.embedding_dim 0.0 +834 40 model.scoring_fct_norm 2.0 +834 40 optimizer.lr 0.03362256153099666 +834 40 negative_sampler.num_negs_per_pos 53.0 +834 40 training.batch_size 2.0 +834 41 model.embedding_dim 1.0 +834 41 model.scoring_fct_norm 2.0 +834 41 optimizer.lr 0.002118880383149986 +834 41 negative_sampler.num_negs_per_pos 52.0 +834 41 training.batch_size 2.0 +834 42 model.embedding_dim 1.0 +834 42 model.scoring_fct_norm 2.0 +834 42 optimizer.lr 0.03506874504428929 +834 42 negative_sampler.num_negs_per_pos 84.0 +834 42 training.batch_size 0.0 +834 43 model.embedding_dim 1.0 +834 43 model.scoring_fct_norm 1.0 +834 43 optimizer.lr 0.021935254506532022 +834 43 negative_sampler.num_negs_per_pos 14.0 +834 43 training.batch_size 1.0 +834 44 model.embedding_dim 0.0 +834 44 model.scoring_fct_norm 2.0 +834 44 optimizer.lr 0.008549998056996145 +834 44 negative_sampler.num_negs_per_pos 74.0 +834 44 training.batch_size 1.0 +834 45 model.embedding_dim 2.0 +834 45 model.scoring_fct_norm 1.0 +834 45 optimizer.lr 0.006546485113649329 +834 45 negative_sampler.num_negs_per_pos 43.0 +834 45 training.batch_size 2.0 +834 46 model.embedding_dim 1.0 +834 46 model.scoring_fct_norm 2.0 +834 46 optimizer.lr 0.06107475763100761 +834 46 negative_sampler.num_negs_per_pos 91.0 +834 46 training.batch_size 1.0 +834 47 model.embedding_dim 0.0 +834 47 model.scoring_fct_norm 2.0 +834 47 optimizer.lr 0.0048185489200547075 +834 47 negative_sampler.num_negs_per_pos 38.0 +834 47 training.batch_size 1.0 +834 48 model.embedding_dim 2.0 +834 48 model.scoring_fct_norm 2.0 +834 48 optimizer.lr 0.012359641730672733 +834 48 negative_sampler.num_negs_per_pos 29.0 +834 48 training.batch_size 0.0 +834 49 model.embedding_dim 0.0 +834 49 model.scoring_fct_norm 1.0 +834 49 optimizer.lr 0.001887490756730521 +834 49 negative_sampler.num_negs_per_pos 46.0 +834 49 training.batch_size 1.0 +834 50 model.embedding_dim 2.0 +834 50 model.scoring_fct_norm 1.0 +834 50 optimizer.lr 0.026119807581814692 +834 50 negative_sampler.num_negs_per_pos 18.0 +834 50 training.batch_size 1.0 +834 51 model.embedding_dim 2.0 +834 51 model.scoring_fct_norm 2.0 +834 51 optimizer.lr 0.02033381005045013 +834 51 negative_sampler.num_negs_per_pos 62.0 +834 51 training.batch_size 2.0 +834 52 model.embedding_dim 2.0 +834 52 model.scoring_fct_norm 1.0 +834 52 optimizer.lr 0.015304088548797903 +834 52 negative_sampler.num_negs_per_pos 33.0 +834 52 training.batch_size 1.0 +834 53 model.embedding_dim 1.0 +834 53 model.scoring_fct_norm 2.0 +834 53 optimizer.lr 0.04063381214642987 +834 53 negative_sampler.num_negs_per_pos 36.0 +834 53 training.batch_size 2.0 +834 54 model.embedding_dim 0.0 +834 54 model.scoring_fct_norm 2.0 +834 54 optimizer.lr 0.00902953147135146 +834 54 negative_sampler.num_negs_per_pos 53.0 +834 54 training.batch_size 2.0 +834 55 model.embedding_dim 0.0 +834 55 model.scoring_fct_norm 2.0 +834 55 optimizer.lr 0.0015364610663254397 +834 55 negative_sampler.num_negs_per_pos 94.0 +834 55 training.batch_size 0.0 +834 56 model.embedding_dim 0.0 +834 56 model.scoring_fct_norm 1.0 +834 56 optimizer.lr 0.03356939831131237 +834 56 negative_sampler.num_negs_per_pos 5.0 +834 56 training.batch_size 2.0 +834 57 model.embedding_dim 2.0 +834 57 model.scoring_fct_norm 1.0 +834 57 optimizer.lr 0.0027972182381007606 +834 57 negative_sampler.num_negs_per_pos 56.0 +834 57 training.batch_size 0.0 +834 58 model.embedding_dim 0.0 +834 58 model.scoring_fct_norm 1.0 +834 58 optimizer.lr 0.02135881549534859 +834 58 negative_sampler.num_negs_per_pos 27.0 +834 58 training.batch_size 2.0 +834 59 model.embedding_dim 2.0 +834 59 model.scoring_fct_norm 2.0 +834 59 optimizer.lr 0.028529301849138006 +834 59 negative_sampler.num_negs_per_pos 53.0 +834 59 training.batch_size 1.0 +834 60 model.embedding_dim 0.0 +834 60 model.scoring_fct_norm 2.0 +834 60 optimizer.lr 0.0020856669615208633 +834 60 negative_sampler.num_negs_per_pos 59.0 +834 60 training.batch_size 2.0 +834 61 model.embedding_dim 1.0 +834 61 model.scoring_fct_norm 1.0 +834 61 optimizer.lr 0.0011930544210061612 +834 61 negative_sampler.num_negs_per_pos 14.0 +834 61 training.batch_size 2.0 +834 62 model.embedding_dim 0.0 +834 62 model.scoring_fct_norm 1.0 +834 62 optimizer.lr 0.004579785040234961 +834 62 negative_sampler.num_negs_per_pos 87.0 +834 62 training.batch_size 2.0 +834 63 model.embedding_dim 0.0 +834 63 model.scoring_fct_norm 2.0 +834 63 optimizer.lr 0.006202203300652156 +834 63 negative_sampler.num_negs_per_pos 38.0 +834 63 training.batch_size 2.0 +834 64 model.embedding_dim 2.0 +834 64 model.scoring_fct_norm 1.0 +834 64 optimizer.lr 0.05514672567440521 +834 64 negative_sampler.num_negs_per_pos 55.0 +834 64 training.batch_size 2.0 +834 65 model.embedding_dim 1.0 +834 65 model.scoring_fct_norm 1.0 +834 65 optimizer.lr 0.009579536458782314 +834 65 negative_sampler.num_negs_per_pos 83.0 +834 65 training.batch_size 0.0 +834 66 model.embedding_dim 0.0 +834 66 model.scoring_fct_norm 1.0 +834 66 optimizer.lr 0.0033388208323593322 +834 66 negative_sampler.num_negs_per_pos 47.0 +834 66 training.batch_size 0.0 +834 67 model.embedding_dim 0.0 +834 67 model.scoring_fct_norm 1.0 +834 67 optimizer.lr 0.0014839348864931678 +834 67 negative_sampler.num_negs_per_pos 52.0 +834 67 training.batch_size 0.0 +834 68 model.embedding_dim 2.0 +834 68 model.scoring_fct_norm 1.0 +834 68 optimizer.lr 0.025986315000466536 +834 68 negative_sampler.num_negs_per_pos 39.0 +834 68 training.batch_size 0.0 +834 69 model.embedding_dim 0.0 +834 69 model.scoring_fct_norm 1.0 +834 69 optimizer.lr 0.05936303596370288 +834 69 negative_sampler.num_negs_per_pos 31.0 +834 69 training.batch_size 0.0 +834 70 model.embedding_dim 1.0 +834 70 model.scoring_fct_norm 1.0 +834 70 optimizer.lr 0.08327197900946068 +834 70 negative_sampler.num_negs_per_pos 70.0 +834 70 training.batch_size 2.0 +834 71 model.embedding_dim 2.0 +834 71 model.scoring_fct_norm 2.0 +834 71 optimizer.lr 0.01584823910623076 +834 71 negative_sampler.num_negs_per_pos 22.0 +834 71 training.batch_size 2.0 +834 72 model.embedding_dim 1.0 +834 72 model.scoring_fct_norm 2.0 +834 72 optimizer.lr 0.01047875714597048 +834 72 negative_sampler.num_negs_per_pos 71.0 +834 72 training.batch_size 2.0 +834 73 model.embedding_dim 2.0 +834 73 model.scoring_fct_norm 1.0 +834 73 optimizer.lr 0.0032278467673582814 +834 73 negative_sampler.num_negs_per_pos 70.0 +834 73 training.batch_size 1.0 +834 74 model.embedding_dim 0.0 +834 74 model.scoring_fct_norm 1.0 +834 74 optimizer.lr 0.0010223813010131427 +834 74 negative_sampler.num_negs_per_pos 4.0 +834 74 training.batch_size 0.0 +834 75 model.embedding_dim 0.0 +834 75 model.scoring_fct_norm 1.0 +834 75 optimizer.lr 0.06938954422075276 +834 75 negative_sampler.num_negs_per_pos 91.0 +834 75 training.batch_size 1.0 +834 76 model.embedding_dim 2.0 +834 76 model.scoring_fct_norm 1.0 +834 76 optimizer.lr 0.009500581739976525 +834 76 negative_sampler.num_negs_per_pos 13.0 +834 76 training.batch_size 1.0 +834 77 model.embedding_dim 0.0 +834 77 model.scoring_fct_norm 1.0 +834 77 optimizer.lr 0.002228287365213901 +834 77 negative_sampler.num_negs_per_pos 14.0 +834 77 training.batch_size 0.0 +834 78 model.embedding_dim 1.0 +834 78 model.scoring_fct_norm 2.0 +834 78 optimizer.lr 0.03432687758453493 +834 78 negative_sampler.num_negs_per_pos 61.0 +834 78 training.batch_size 0.0 +834 79 model.embedding_dim 2.0 +834 79 model.scoring_fct_norm 1.0 +834 79 optimizer.lr 0.004869535898566374 +834 79 negative_sampler.num_negs_per_pos 47.0 +834 79 training.batch_size 2.0 +834 80 model.embedding_dim 1.0 +834 80 model.scoring_fct_norm 1.0 +834 80 optimizer.lr 0.007390368617480061 +834 80 negative_sampler.num_negs_per_pos 39.0 +834 80 training.batch_size 2.0 +834 81 model.embedding_dim 1.0 +834 81 model.scoring_fct_norm 2.0 +834 81 optimizer.lr 0.004887870905640503 +834 81 negative_sampler.num_negs_per_pos 38.0 +834 81 training.batch_size 0.0 +834 82 model.embedding_dim 0.0 +834 82 model.scoring_fct_norm 2.0 +834 82 optimizer.lr 0.007081137670939656 +834 82 negative_sampler.num_negs_per_pos 56.0 +834 82 training.batch_size 0.0 +834 83 model.embedding_dim 0.0 +834 83 model.scoring_fct_norm 2.0 +834 83 optimizer.lr 0.038089523866344045 +834 83 negative_sampler.num_negs_per_pos 86.0 +834 83 training.batch_size 0.0 +834 84 model.embedding_dim 2.0 +834 84 model.scoring_fct_norm 2.0 +834 84 optimizer.lr 0.006236223996496997 +834 84 negative_sampler.num_negs_per_pos 29.0 +834 84 training.batch_size 2.0 +834 85 model.embedding_dim 2.0 +834 85 model.scoring_fct_norm 2.0 +834 85 optimizer.lr 0.010873114176819748 +834 85 negative_sampler.num_negs_per_pos 2.0 +834 85 training.batch_size 2.0 +834 86 model.embedding_dim 0.0 +834 86 model.scoring_fct_norm 2.0 +834 86 optimizer.lr 0.004218591977366282 +834 86 negative_sampler.num_negs_per_pos 91.0 +834 86 training.batch_size 1.0 +834 87 model.embedding_dim 1.0 +834 87 model.scoring_fct_norm 2.0 +834 87 optimizer.lr 0.0026532842735964767 +834 87 negative_sampler.num_negs_per_pos 81.0 +834 87 training.batch_size 2.0 +834 88 model.embedding_dim 1.0 +834 88 model.scoring_fct_norm 2.0 +834 88 optimizer.lr 0.05853702903544951 +834 88 negative_sampler.num_negs_per_pos 76.0 +834 88 training.batch_size 0.0 +834 89 model.embedding_dim 1.0 +834 89 model.scoring_fct_norm 1.0 +834 89 optimizer.lr 0.013102418511420924 +834 89 negative_sampler.num_negs_per_pos 98.0 +834 89 training.batch_size 1.0 +834 90 model.embedding_dim 1.0 +834 90 model.scoring_fct_norm 2.0 +834 90 optimizer.lr 0.07191978079270334 +834 90 negative_sampler.num_negs_per_pos 17.0 +834 90 training.batch_size 0.0 +834 91 model.embedding_dim 2.0 +834 91 model.scoring_fct_norm 2.0 +834 91 optimizer.lr 0.007418434247043035 +834 91 negative_sampler.num_negs_per_pos 84.0 +834 91 training.batch_size 2.0 +834 92 model.embedding_dim 2.0 +834 92 model.scoring_fct_norm 2.0 +834 92 optimizer.lr 0.006740124488826986 +834 92 negative_sampler.num_negs_per_pos 25.0 +834 92 training.batch_size 2.0 +834 93 model.embedding_dim 1.0 +834 93 model.scoring_fct_norm 2.0 +834 93 optimizer.lr 0.0021775561948731547 +834 93 negative_sampler.num_negs_per_pos 30.0 +834 93 training.batch_size 2.0 +834 94 model.embedding_dim 1.0 +834 94 model.scoring_fct_norm 2.0 +834 94 optimizer.lr 0.008168445614925987 +834 94 negative_sampler.num_negs_per_pos 56.0 +834 94 training.batch_size 0.0 +834 95 model.embedding_dim 2.0 +834 95 model.scoring_fct_norm 2.0 +834 95 optimizer.lr 0.016747910893668675 +834 95 negative_sampler.num_negs_per_pos 84.0 +834 95 training.batch_size 1.0 +834 96 model.embedding_dim 1.0 +834 96 model.scoring_fct_norm 2.0 +834 96 optimizer.lr 0.004463077532235679 +834 96 negative_sampler.num_negs_per_pos 72.0 +834 96 training.batch_size 2.0 +834 97 model.embedding_dim 2.0 +834 97 model.scoring_fct_norm 2.0 +834 97 optimizer.lr 0.0010784495453405615 +834 97 negative_sampler.num_negs_per_pos 68.0 +834 97 training.batch_size 2.0 +834 98 model.embedding_dim 2.0 +834 98 model.scoring_fct_norm 1.0 +834 98 optimizer.lr 0.010367604808003065 +834 98 negative_sampler.num_negs_per_pos 82.0 +834 98 training.batch_size 2.0 +834 99 model.embedding_dim 1.0 +834 99 model.scoring_fct_norm 1.0 +834 99 optimizer.lr 0.0015069268976117339 +834 99 negative_sampler.num_negs_per_pos 94.0 +834 99 training.batch_size 1.0 +834 100 model.embedding_dim 1.0 +834 100 model.scoring_fct_norm 1.0 +834 100 optimizer.lr 0.05972111232103893 +834 100 negative_sampler.num_negs_per_pos 32.0 +834 100 training.batch_size 2.0 +834 1 dataset """wn18rr""" +834 1 model """transe""" +834 1 loss """softplus""" +834 1 regularizer """no""" +834 1 optimizer """adam""" +834 1 training_loop """owa""" +834 1 negative_sampler """basic""" +834 1 evaluator """rankbased""" +834 2 dataset """wn18rr""" +834 2 model """transe""" +834 2 loss """softplus""" +834 2 regularizer """no""" +834 2 optimizer """adam""" +834 2 training_loop """owa""" +834 2 negative_sampler """basic""" +834 2 evaluator """rankbased""" +834 3 dataset """wn18rr""" +834 3 model """transe""" +834 3 loss """softplus""" +834 3 regularizer """no""" +834 3 optimizer """adam""" +834 3 training_loop """owa""" +834 3 negative_sampler """basic""" +834 3 evaluator """rankbased""" +834 4 dataset """wn18rr""" +834 4 model """transe""" +834 4 loss """softplus""" +834 4 regularizer """no""" +834 4 optimizer """adam""" +834 4 training_loop """owa""" +834 4 negative_sampler """basic""" +834 4 evaluator """rankbased""" +834 5 dataset """wn18rr""" +834 5 model """transe""" +834 5 loss """softplus""" +834 5 regularizer """no""" +834 5 optimizer """adam""" +834 5 training_loop """owa""" +834 5 negative_sampler """basic""" +834 5 evaluator """rankbased""" +834 6 dataset """wn18rr""" +834 6 model """transe""" +834 6 loss """softplus""" +834 6 regularizer """no""" +834 6 optimizer """adam""" +834 6 training_loop """owa""" +834 6 negative_sampler """basic""" +834 6 evaluator """rankbased""" +834 7 dataset """wn18rr""" +834 7 model """transe""" +834 7 loss """softplus""" +834 7 regularizer """no""" +834 7 optimizer """adam""" +834 7 training_loop """owa""" +834 7 negative_sampler """basic""" +834 7 evaluator """rankbased""" +834 8 dataset """wn18rr""" +834 8 model """transe""" +834 8 loss """softplus""" +834 8 regularizer """no""" +834 8 optimizer """adam""" +834 8 training_loop """owa""" +834 8 negative_sampler """basic""" +834 8 evaluator """rankbased""" +834 9 dataset """wn18rr""" +834 9 model """transe""" +834 9 loss """softplus""" +834 9 regularizer """no""" +834 9 optimizer """adam""" +834 9 training_loop """owa""" +834 9 negative_sampler """basic""" +834 9 evaluator """rankbased""" +834 10 dataset """wn18rr""" +834 10 model """transe""" +834 10 loss """softplus""" +834 10 regularizer """no""" +834 10 optimizer """adam""" +834 10 training_loop """owa""" +834 10 negative_sampler """basic""" +834 10 evaluator """rankbased""" +834 11 dataset """wn18rr""" +834 11 model """transe""" +834 11 loss """softplus""" +834 11 regularizer """no""" +834 11 optimizer """adam""" +834 11 training_loop """owa""" +834 11 negative_sampler """basic""" +834 11 evaluator """rankbased""" +834 12 dataset """wn18rr""" +834 12 model """transe""" +834 12 loss """softplus""" +834 12 regularizer """no""" +834 12 optimizer """adam""" +834 12 training_loop """owa""" +834 12 negative_sampler """basic""" +834 12 evaluator """rankbased""" +834 13 dataset """wn18rr""" +834 13 model """transe""" +834 13 loss """softplus""" +834 13 regularizer """no""" +834 13 optimizer """adam""" +834 13 training_loop """owa""" +834 13 negative_sampler """basic""" +834 13 evaluator """rankbased""" +834 14 dataset """wn18rr""" +834 14 model """transe""" +834 14 loss """softplus""" +834 14 regularizer """no""" +834 14 optimizer """adam""" +834 14 training_loop """owa""" +834 14 negative_sampler """basic""" +834 14 evaluator """rankbased""" +834 15 dataset """wn18rr""" +834 15 model """transe""" +834 15 loss """softplus""" +834 15 regularizer """no""" +834 15 optimizer """adam""" +834 15 training_loop """owa""" +834 15 negative_sampler """basic""" +834 15 evaluator """rankbased""" +834 16 dataset """wn18rr""" +834 16 model """transe""" +834 16 loss """softplus""" +834 16 regularizer """no""" +834 16 optimizer """adam""" +834 16 training_loop """owa""" +834 16 negative_sampler """basic""" +834 16 evaluator """rankbased""" +834 17 dataset """wn18rr""" +834 17 model """transe""" +834 17 loss """softplus""" +834 17 regularizer """no""" +834 17 optimizer """adam""" +834 17 training_loop """owa""" +834 17 negative_sampler """basic""" +834 17 evaluator """rankbased""" +834 18 dataset """wn18rr""" +834 18 model """transe""" +834 18 loss """softplus""" +834 18 regularizer """no""" +834 18 optimizer """adam""" +834 18 training_loop """owa""" +834 18 negative_sampler """basic""" +834 18 evaluator """rankbased""" +834 19 dataset """wn18rr""" +834 19 model """transe""" +834 19 loss """softplus""" +834 19 regularizer """no""" +834 19 optimizer """adam""" +834 19 training_loop """owa""" +834 19 negative_sampler """basic""" +834 19 evaluator """rankbased""" +834 20 dataset """wn18rr""" +834 20 model """transe""" +834 20 loss """softplus""" +834 20 regularizer """no""" +834 20 optimizer """adam""" +834 20 training_loop """owa""" +834 20 negative_sampler """basic""" +834 20 evaluator """rankbased""" +834 21 dataset """wn18rr""" +834 21 model """transe""" +834 21 loss """softplus""" +834 21 regularizer """no""" +834 21 optimizer """adam""" +834 21 training_loop """owa""" +834 21 negative_sampler """basic""" +834 21 evaluator """rankbased""" +834 22 dataset """wn18rr""" +834 22 model """transe""" +834 22 loss """softplus""" +834 22 regularizer """no""" +834 22 optimizer """adam""" +834 22 training_loop """owa""" +834 22 negative_sampler """basic""" +834 22 evaluator """rankbased""" +834 23 dataset """wn18rr""" +834 23 model """transe""" +834 23 loss """softplus""" +834 23 regularizer """no""" +834 23 optimizer """adam""" +834 23 training_loop """owa""" +834 23 negative_sampler """basic""" +834 23 evaluator """rankbased""" +834 24 dataset """wn18rr""" +834 24 model """transe""" +834 24 loss """softplus""" +834 24 regularizer """no""" +834 24 optimizer """adam""" +834 24 training_loop """owa""" +834 24 negative_sampler """basic""" +834 24 evaluator """rankbased""" +834 25 dataset """wn18rr""" +834 25 model """transe""" +834 25 loss """softplus""" +834 25 regularizer """no""" +834 25 optimizer """adam""" +834 25 training_loop """owa""" +834 25 negative_sampler """basic""" +834 25 evaluator """rankbased""" +834 26 dataset """wn18rr""" +834 26 model """transe""" +834 26 loss """softplus""" +834 26 regularizer """no""" +834 26 optimizer """adam""" +834 26 training_loop """owa""" +834 26 negative_sampler """basic""" +834 26 evaluator """rankbased""" +834 27 dataset """wn18rr""" +834 27 model """transe""" +834 27 loss """softplus""" +834 27 regularizer """no""" +834 27 optimizer """adam""" +834 27 training_loop """owa""" +834 27 negative_sampler """basic""" +834 27 evaluator """rankbased""" +834 28 dataset """wn18rr""" +834 28 model """transe""" +834 28 loss """softplus""" +834 28 regularizer """no""" +834 28 optimizer """adam""" +834 28 training_loop """owa""" +834 28 negative_sampler """basic""" +834 28 evaluator """rankbased""" +834 29 dataset """wn18rr""" +834 29 model """transe""" +834 29 loss """softplus""" +834 29 regularizer """no""" +834 29 optimizer """adam""" +834 29 training_loop """owa""" +834 29 negative_sampler """basic""" +834 29 evaluator """rankbased""" +834 30 dataset """wn18rr""" +834 30 model """transe""" +834 30 loss """softplus""" +834 30 regularizer """no""" +834 30 optimizer """adam""" +834 30 training_loop """owa""" +834 30 negative_sampler """basic""" +834 30 evaluator """rankbased""" +834 31 dataset """wn18rr""" +834 31 model """transe""" +834 31 loss """softplus""" +834 31 regularizer """no""" +834 31 optimizer """adam""" +834 31 training_loop """owa""" +834 31 negative_sampler """basic""" +834 31 evaluator """rankbased""" +834 32 dataset """wn18rr""" +834 32 model """transe""" +834 32 loss """softplus""" +834 32 regularizer """no""" +834 32 optimizer """adam""" +834 32 training_loop """owa""" +834 32 negative_sampler """basic""" +834 32 evaluator """rankbased""" +834 33 dataset """wn18rr""" +834 33 model """transe""" +834 33 loss """softplus""" +834 33 regularizer """no""" +834 33 optimizer """adam""" +834 33 training_loop """owa""" +834 33 negative_sampler """basic""" +834 33 evaluator """rankbased""" +834 34 dataset """wn18rr""" +834 34 model """transe""" +834 34 loss """softplus""" +834 34 regularizer """no""" +834 34 optimizer """adam""" +834 34 training_loop """owa""" +834 34 negative_sampler """basic""" +834 34 evaluator """rankbased""" +834 35 dataset """wn18rr""" +834 35 model """transe""" +834 35 loss """softplus""" +834 35 regularizer """no""" +834 35 optimizer """adam""" +834 35 training_loop """owa""" +834 35 negative_sampler """basic""" +834 35 evaluator """rankbased""" +834 36 dataset """wn18rr""" +834 36 model """transe""" +834 36 loss """softplus""" +834 36 regularizer """no""" +834 36 optimizer """adam""" +834 36 training_loop """owa""" +834 36 negative_sampler """basic""" +834 36 evaluator """rankbased""" +834 37 dataset """wn18rr""" +834 37 model """transe""" +834 37 loss """softplus""" +834 37 regularizer """no""" +834 37 optimizer """adam""" +834 37 training_loop """owa""" +834 37 negative_sampler """basic""" +834 37 evaluator """rankbased""" +834 38 dataset """wn18rr""" +834 38 model """transe""" +834 38 loss """softplus""" +834 38 regularizer """no""" +834 38 optimizer """adam""" +834 38 training_loop """owa""" +834 38 negative_sampler """basic""" +834 38 evaluator """rankbased""" +834 39 dataset """wn18rr""" +834 39 model """transe""" +834 39 loss """softplus""" +834 39 regularizer """no""" +834 39 optimizer """adam""" +834 39 training_loop """owa""" +834 39 negative_sampler """basic""" +834 39 evaluator """rankbased""" +834 40 dataset """wn18rr""" +834 40 model """transe""" +834 40 loss """softplus""" +834 40 regularizer """no""" +834 40 optimizer """adam""" +834 40 training_loop """owa""" +834 40 negative_sampler """basic""" +834 40 evaluator """rankbased""" +834 41 dataset """wn18rr""" +834 41 model """transe""" +834 41 loss """softplus""" +834 41 regularizer """no""" +834 41 optimizer """adam""" +834 41 training_loop """owa""" +834 41 negative_sampler """basic""" +834 41 evaluator """rankbased""" +834 42 dataset """wn18rr""" +834 42 model """transe""" +834 42 loss """softplus""" +834 42 regularizer """no""" +834 42 optimizer """adam""" +834 42 training_loop """owa""" +834 42 negative_sampler """basic""" +834 42 evaluator """rankbased""" +834 43 dataset """wn18rr""" +834 43 model """transe""" +834 43 loss """softplus""" +834 43 regularizer """no""" +834 43 optimizer """adam""" +834 43 training_loop """owa""" +834 43 negative_sampler """basic""" +834 43 evaluator """rankbased""" +834 44 dataset """wn18rr""" +834 44 model """transe""" +834 44 loss """softplus""" +834 44 regularizer """no""" +834 44 optimizer """adam""" +834 44 training_loop """owa""" +834 44 negative_sampler """basic""" +834 44 evaluator """rankbased""" +834 45 dataset """wn18rr""" +834 45 model """transe""" +834 45 loss """softplus""" +834 45 regularizer """no""" +834 45 optimizer """adam""" +834 45 training_loop """owa""" +834 45 negative_sampler """basic""" +834 45 evaluator """rankbased""" +834 46 dataset """wn18rr""" +834 46 model """transe""" +834 46 loss """softplus""" +834 46 regularizer """no""" +834 46 optimizer """adam""" +834 46 training_loop """owa""" +834 46 negative_sampler """basic""" +834 46 evaluator """rankbased""" +834 47 dataset """wn18rr""" +834 47 model """transe""" +834 47 loss """softplus""" +834 47 regularizer """no""" +834 47 optimizer """adam""" +834 47 training_loop """owa""" +834 47 negative_sampler """basic""" +834 47 evaluator """rankbased""" +834 48 dataset """wn18rr""" +834 48 model """transe""" +834 48 loss """softplus""" +834 48 regularizer """no""" +834 48 optimizer """adam""" +834 48 training_loop """owa""" +834 48 negative_sampler """basic""" +834 48 evaluator """rankbased""" +834 49 dataset """wn18rr""" +834 49 model """transe""" +834 49 loss """softplus""" +834 49 regularizer """no""" +834 49 optimizer """adam""" +834 49 training_loop """owa""" +834 49 negative_sampler """basic""" +834 49 evaluator """rankbased""" +834 50 dataset """wn18rr""" +834 50 model """transe""" +834 50 loss """softplus""" +834 50 regularizer """no""" +834 50 optimizer """adam""" +834 50 training_loop """owa""" +834 50 negative_sampler """basic""" +834 50 evaluator """rankbased""" +834 51 dataset """wn18rr""" +834 51 model """transe""" +834 51 loss """softplus""" +834 51 regularizer """no""" +834 51 optimizer """adam""" +834 51 training_loop """owa""" +834 51 negative_sampler """basic""" +834 51 evaluator """rankbased""" +834 52 dataset """wn18rr""" +834 52 model """transe""" +834 52 loss """softplus""" +834 52 regularizer """no""" +834 52 optimizer """adam""" +834 52 training_loop """owa""" +834 52 negative_sampler """basic""" +834 52 evaluator """rankbased""" +834 53 dataset """wn18rr""" +834 53 model """transe""" +834 53 loss """softplus""" +834 53 regularizer """no""" +834 53 optimizer """adam""" +834 53 training_loop """owa""" +834 53 negative_sampler """basic""" +834 53 evaluator """rankbased""" +834 54 dataset """wn18rr""" +834 54 model """transe""" +834 54 loss """softplus""" +834 54 regularizer """no""" +834 54 optimizer """adam""" +834 54 training_loop """owa""" +834 54 negative_sampler """basic""" +834 54 evaluator """rankbased""" +834 55 dataset """wn18rr""" +834 55 model """transe""" +834 55 loss """softplus""" +834 55 regularizer """no""" +834 55 optimizer """adam""" +834 55 training_loop """owa""" +834 55 negative_sampler """basic""" +834 55 evaluator """rankbased""" +834 56 dataset """wn18rr""" +834 56 model """transe""" +834 56 loss """softplus""" +834 56 regularizer """no""" +834 56 optimizer """adam""" +834 56 training_loop """owa""" +834 56 negative_sampler """basic""" +834 56 evaluator """rankbased""" +834 57 dataset """wn18rr""" +834 57 model """transe""" +834 57 loss """softplus""" +834 57 regularizer """no""" +834 57 optimizer """adam""" +834 57 training_loop """owa""" +834 57 negative_sampler """basic""" +834 57 evaluator """rankbased""" +834 58 dataset """wn18rr""" +834 58 model """transe""" +834 58 loss """softplus""" +834 58 regularizer """no""" +834 58 optimizer """adam""" +834 58 training_loop """owa""" +834 58 negative_sampler """basic""" +834 58 evaluator """rankbased""" +834 59 dataset """wn18rr""" +834 59 model """transe""" +834 59 loss """softplus""" +834 59 regularizer """no""" +834 59 optimizer """adam""" +834 59 training_loop """owa""" +834 59 negative_sampler """basic""" +834 59 evaluator """rankbased""" +834 60 dataset """wn18rr""" +834 60 model """transe""" +834 60 loss """softplus""" +834 60 regularizer """no""" +834 60 optimizer """adam""" +834 60 training_loop """owa""" +834 60 negative_sampler """basic""" +834 60 evaluator """rankbased""" +834 61 dataset """wn18rr""" +834 61 model """transe""" +834 61 loss """softplus""" +834 61 regularizer """no""" +834 61 optimizer """adam""" +834 61 training_loop """owa""" +834 61 negative_sampler """basic""" +834 61 evaluator """rankbased""" +834 62 dataset """wn18rr""" +834 62 model """transe""" +834 62 loss """softplus""" +834 62 regularizer """no""" +834 62 optimizer """adam""" +834 62 training_loop """owa""" +834 62 negative_sampler """basic""" +834 62 evaluator """rankbased""" +834 63 dataset """wn18rr""" +834 63 model """transe""" +834 63 loss """softplus""" +834 63 regularizer """no""" +834 63 optimizer """adam""" +834 63 training_loop """owa""" +834 63 negative_sampler """basic""" +834 63 evaluator """rankbased""" +834 64 dataset """wn18rr""" +834 64 model """transe""" +834 64 loss """softplus""" +834 64 regularizer """no""" +834 64 optimizer """adam""" +834 64 training_loop """owa""" +834 64 negative_sampler """basic""" +834 64 evaluator """rankbased""" +834 65 dataset """wn18rr""" +834 65 model """transe""" +834 65 loss """softplus""" +834 65 regularizer """no""" +834 65 optimizer """adam""" +834 65 training_loop """owa""" +834 65 negative_sampler """basic""" +834 65 evaluator """rankbased""" +834 66 dataset """wn18rr""" +834 66 model """transe""" +834 66 loss """softplus""" +834 66 regularizer """no""" +834 66 optimizer """adam""" +834 66 training_loop """owa""" +834 66 negative_sampler """basic""" +834 66 evaluator """rankbased""" +834 67 dataset """wn18rr""" +834 67 model """transe""" +834 67 loss """softplus""" +834 67 regularizer """no""" +834 67 optimizer """adam""" +834 67 training_loop """owa""" +834 67 negative_sampler """basic""" +834 67 evaluator """rankbased""" +834 68 dataset """wn18rr""" +834 68 model """transe""" +834 68 loss """softplus""" +834 68 regularizer """no""" +834 68 optimizer """adam""" +834 68 training_loop """owa""" +834 68 negative_sampler """basic""" +834 68 evaluator """rankbased""" +834 69 dataset """wn18rr""" +834 69 model """transe""" +834 69 loss """softplus""" +834 69 regularizer """no""" +834 69 optimizer """adam""" +834 69 training_loop """owa""" +834 69 negative_sampler """basic""" +834 69 evaluator """rankbased""" +834 70 dataset """wn18rr""" +834 70 model """transe""" +834 70 loss """softplus""" +834 70 regularizer """no""" +834 70 optimizer """adam""" +834 70 training_loop """owa""" +834 70 negative_sampler """basic""" +834 70 evaluator """rankbased""" +834 71 dataset """wn18rr""" +834 71 model """transe""" +834 71 loss """softplus""" +834 71 regularizer """no""" +834 71 optimizer """adam""" +834 71 training_loop """owa""" +834 71 negative_sampler """basic""" +834 71 evaluator """rankbased""" +834 72 dataset """wn18rr""" +834 72 model """transe""" +834 72 loss """softplus""" +834 72 regularizer """no""" +834 72 optimizer """adam""" +834 72 training_loop """owa""" +834 72 negative_sampler """basic""" +834 72 evaluator """rankbased""" +834 73 dataset """wn18rr""" +834 73 model """transe""" +834 73 loss """softplus""" +834 73 regularizer """no""" +834 73 optimizer """adam""" +834 73 training_loop """owa""" +834 73 negative_sampler """basic""" +834 73 evaluator """rankbased""" +834 74 dataset """wn18rr""" +834 74 model """transe""" +834 74 loss """softplus""" +834 74 regularizer """no""" +834 74 optimizer """adam""" +834 74 training_loop """owa""" +834 74 negative_sampler """basic""" +834 74 evaluator """rankbased""" +834 75 dataset """wn18rr""" +834 75 model """transe""" +834 75 loss """softplus""" +834 75 regularizer """no""" +834 75 optimizer """adam""" +834 75 training_loop """owa""" +834 75 negative_sampler """basic""" +834 75 evaluator """rankbased""" +834 76 dataset """wn18rr""" +834 76 model """transe""" +834 76 loss """softplus""" +834 76 regularizer """no""" +834 76 optimizer """adam""" +834 76 training_loop """owa""" +834 76 negative_sampler """basic""" +834 76 evaluator """rankbased""" +834 77 dataset """wn18rr""" +834 77 model """transe""" +834 77 loss """softplus""" +834 77 regularizer """no""" +834 77 optimizer """adam""" +834 77 training_loop """owa""" +834 77 negative_sampler """basic""" +834 77 evaluator """rankbased""" +834 78 dataset """wn18rr""" +834 78 model """transe""" +834 78 loss """softplus""" +834 78 regularizer """no""" +834 78 optimizer """adam""" +834 78 training_loop """owa""" +834 78 negative_sampler """basic""" +834 78 evaluator """rankbased""" +834 79 dataset """wn18rr""" +834 79 model """transe""" +834 79 loss """softplus""" +834 79 regularizer """no""" +834 79 optimizer """adam""" +834 79 training_loop """owa""" +834 79 negative_sampler """basic""" +834 79 evaluator """rankbased""" +834 80 dataset """wn18rr""" +834 80 model """transe""" +834 80 loss """softplus""" +834 80 regularizer """no""" +834 80 optimizer """adam""" +834 80 training_loop """owa""" +834 80 negative_sampler """basic""" +834 80 evaluator """rankbased""" +834 81 dataset """wn18rr""" +834 81 model """transe""" +834 81 loss """softplus""" +834 81 regularizer """no""" +834 81 optimizer """adam""" +834 81 training_loop """owa""" +834 81 negative_sampler """basic""" +834 81 evaluator """rankbased""" +834 82 dataset """wn18rr""" +834 82 model """transe""" +834 82 loss """softplus""" +834 82 regularizer """no""" +834 82 optimizer """adam""" +834 82 training_loop """owa""" +834 82 negative_sampler """basic""" +834 82 evaluator """rankbased""" +834 83 dataset """wn18rr""" +834 83 model """transe""" +834 83 loss """softplus""" +834 83 regularizer """no""" +834 83 optimizer """adam""" +834 83 training_loop """owa""" +834 83 negative_sampler """basic""" +834 83 evaluator """rankbased""" +834 84 dataset """wn18rr""" +834 84 model """transe""" +834 84 loss """softplus""" +834 84 regularizer """no""" +834 84 optimizer """adam""" +834 84 training_loop """owa""" +834 84 negative_sampler """basic""" +834 84 evaluator """rankbased""" +834 85 dataset """wn18rr""" +834 85 model """transe""" +834 85 loss """softplus""" +834 85 regularizer """no""" +834 85 optimizer """adam""" +834 85 training_loop """owa""" +834 85 negative_sampler """basic""" +834 85 evaluator """rankbased""" +834 86 dataset """wn18rr""" +834 86 model """transe""" +834 86 loss """softplus""" +834 86 regularizer """no""" +834 86 optimizer """adam""" +834 86 training_loop """owa""" +834 86 negative_sampler """basic""" +834 86 evaluator """rankbased""" +834 87 dataset """wn18rr""" +834 87 model """transe""" +834 87 loss """softplus""" +834 87 regularizer """no""" +834 87 optimizer """adam""" +834 87 training_loop """owa""" +834 87 negative_sampler """basic""" +834 87 evaluator """rankbased""" +834 88 dataset """wn18rr""" +834 88 model """transe""" +834 88 loss """softplus""" +834 88 regularizer """no""" +834 88 optimizer """adam""" +834 88 training_loop """owa""" +834 88 negative_sampler """basic""" +834 88 evaluator """rankbased""" +834 89 dataset """wn18rr""" +834 89 model """transe""" +834 89 loss """softplus""" +834 89 regularizer """no""" +834 89 optimizer """adam""" +834 89 training_loop """owa""" +834 89 negative_sampler """basic""" +834 89 evaluator """rankbased""" +834 90 dataset """wn18rr""" +834 90 model """transe""" +834 90 loss """softplus""" +834 90 regularizer """no""" +834 90 optimizer """adam""" +834 90 training_loop """owa""" +834 90 negative_sampler """basic""" +834 90 evaluator """rankbased""" +834 91 dataset """wn18rr""" +834 91 model """transe""" +834 91 loss """softplus""" +834 91 regularizer """no""" +834 91 optimizer """adam""" +834 91 training_loop """owa""" +834 91 negative_sampler """basic""" +834 91 evaluator """rankbased""" +834 92 dataset """wn18rr""" +834 92 model """transe""" +834 92 loss """softplus""" +834 92 regularizer """no""" +834 92 optimizer """adam""" +834 92 training_loop """owa""" +834 92 negative_sampler """basic""" +834 92 evaluator """rankbased""" +834 93 dataset """wn18rr""" +834 93 model """transe""" +834 93 loss """softplus""" +834 93 regularizer """no""" +834 93 optimizer """adam""" +834 93 training_loop """owa""" +834 93 negative_sampler """basic""" +834 93 evaluator """rankbased""" +834 94 dataset """wn18rr""" +834 94 model """transe""" +834 94 loss """softplus""" +834 94 regularizer """no""" +834 94 optimizer """adam""" +834 94 training_loop """owa""" +834 94 negative_sampler """basic""" +834 94 evaluator """rankbased""" +834 95 dataset """wn18rr""" +834 95 model """transe""" +834 95 loss """softplus""" +834 95 regularizer """no""" +834 95 optimizer """adam""" +834 95 training_loop """owa""" +834 95 negative_sampler """basic""" +834 95 evaluator """rankbased""" +834 96 dataset """wn18rr""" +834 96 model """transe""" +834 96 loss """softplus""" +834 96 regularizer """no""" +834 96 optimizer """adam""" +834 96 training_loop """owa""" +834 96 negative_sampler """basic""" +834 96 evaluator """rankbased""" +834 97 dataset """wn18rr""" +834 97 model """transe""" +834 97 loss """softplus""" +834 97 regularizer """no""" +834 97 optimizer """adam""" +834 97 training_loop """owa""" +834 97 negative_sampler """basic""" +834 97 evaluator """rankbased""" +834 98 dataset """wn18rr""" +834 98 model """transe""" +834 98 loss """softplus""" +834 98 regularizer """no""" +834 98 optimizer """adam""" +834 98 training_loop """owa""" +834 98 negative_sampler """basic""" +834 98 evaluator """rankbased""" +834 99 dataset """wn18rr""" +834 99 model """transe""" +834 99 loss """softplus""" +834 99 regularizer """no""" +834 99 optimizer """adam""" +834 99 training_loop """owa""" +834 99 negative_sampler """basic""" +834 99 evaluator """rankbased""" +834 100 dataset """wn18rr""" +834 100 model """transe""" +834 100 loss """softplus""" +834 100 regularizer """no""" +834 100 optimizer """adam""" +834 100 training_loop """owa""" +834 100 negative_sampler """basic""" +834 100 evaluator """rankbased""" +835 1 model.embedding_dim 2.0 +835 1 model.scoring_fct_norm 1.0 +835 1 optimizer.lr 0.003726610874612352 +835 1 negative_sampler.num_negs_per_pos 16.0 +835 1 training.batch_size 2.0 +835 2 model.embedding_dim 1.0 +835 2 model.scoring_fct_norm 2.0 +835 2 optimizer.lr 0.008057869533854877 +835 2 negative_sampler.num_negs_per_pos 54.0 +835 2 training.batch_size 1.0 +835 3 model.embedding_dim 1.0 +835 3 model.scoring_fct_norm 2.0 +835 3 optimizer.lr 0.00732906061078658 +835 3 negative_sampler.num_negs_per_pos 2.0 +835 3 training.batch_size 0.0 +835 4 model.embedding_dim 2.0 +835 4 model.scoring_fct_norm 1.0 +835 4 optimizer.lr 0.007936958288416635 +835 4 negative_sampler.num_negs_per_pos 78.0 +835 4 training.batch_size 1.0 +835 5 model.embedding_dim 0.0 +835 5 model.scoring_fct_norm 2.0 +835 5 optimizer.lr 0.0016671797219984753 +835 5 negative_sampler.num_negs_per_pos 24.0 +835 5 training.batch_size 2.0 +835 6 model.embedding_dim 0.0 +835 6 model.scoring_fct_norm 1.0 +835 6 optimizer.lr 0.08047417693362265 +835 6 negative_sampler.num_negs_per_pos 49.0 +835 6 training.batch_size 2.0 +835 7 model.embedding_dim 0.0 +835 7 model.scoring_fct_norm 2.0 +835 7 optimizer.lr 0.017669728971219385 +835 7 negative_sampler.num_negs_per_pos 4.0 +835 7 training.batch_size 2.0 +835 8 model.embedding_dim 1.0 +835 8 model.scoring_fct_norm 1.0 +835 8 optimizer.lr 0.0046272449330806474 +835 8 negative_sampler.num_negs_per_pos 64.0 +835 8 training.batch_size 2.0 +835 9 model.embedding_dim 1.0 +835 9 model.scoring_fct_norm 2.0 +835 9 optimizer.lr 0.0071902539270745506 +835 9 negative_sampler.num_negs_per_pos 83.0 +835 9 training.batch_size 0.0 +835 10 model.embedding_dim 2.0 +835 10 model.scoring_fct_norm 1.0 +835 10 optimizer.lr 0.009625821521244636 +835 10 negative_sampler.num_negs_per_pos 7.0 +835 10 training.batch_size 2.0 +835 11 model.embedding_dim 2.0 +835 11 model.scoring_fct_norm 2.0 +835 11 optimizer.lr 0.035147573238609786 +835 11 negative_sampler.num_negs_per_pos 49.0 +835 11 training.batch_size 0.0 +835 12 model.embedding_dim 2.0 +835 12 model.scoring_fct_norm 1.0 +835 12 optimizer.lr 0.04368576781491843 +835 12 negative_sampler.num_negs_per_pos 77.0 +835 12 training.batch_size 2.0 +835 13 model.embedding_dim 0.0 +835 13 model.scoring_fct_norm 1.0 +835 13 optimizer.lr 0.0016120410149175575 +835 13 negative_sampler.num_negs_per_pos 65.0 +835 13 training.batch_size 0.0 +835 14 model.embedding_dim 2.0 +835 14 model.scoring_fct_norm 1.0 +835 14 optimizer.lr 0.006069759439615575 +835 14 negative_sampler.num_negs_per_pos 72.0 +835 14 training.batch_size 0.0 +835 15 model.embedding_dim 0.0 +835 15 model.scoring_fct_norm 2.0 +835 15 optimizer.lr 0.004978622219579226 +835 15 negative_sampler.num_negs_per_pos 30.0 +835 15 training.batch_size 0.0 +835 16 model.embedding_dim 0.0 +835 16 model.scoring_fct_norm 1.0 +835 16 optimizer.lr 0.0018707753206595012 +835 16 negative_sampler.num_negs_per_pos 7.0 +835 16 training.batch_size 0.0 +835 17 model.embedding_dim 1.0 +835 17 model.scoring_fct_norm 2.0 +835 17 optimizer.lr 0.0810153641433793 +835 17 negative_sampler.num_negs_per_pos 18.0 +835 17 training.batch_size 0.0 +835 18 model.embedding_dim 2.0 +835 18 model.scoring_fct_norm 2.0 +835 18 optimizer.lr 0.001449122157169809 +835 18 negative_sampler.num_negs_per_pos 91.0 +835 18 training.batch_size 0.0 +835 19 model.embedding_dim 1.0 +835 19 model.scoring_fct_norm 2.0 +835 19 optimizer.lr 0.073356019500395 +835 19 negative_sampler.num_negs_per_pos 14.0 +835 19 training.batch_size 2.0 +835 20 model.embedding_dim 2.0 +835 20 model.scoring_fct_norm 2.0 +835 20 optimizer.lr 0.04591961722490112 +835 20 negative_sampler.num_negs_per_pos 8.0 +835 20 training.batch_size 2.0 +835 21 model.embedding_dim 1.0 +835 21 model.scoring_fct_norm 2.0 +835 21 optimizer.lr 0.07236245562321261 +835 21 negative_sampler.num_negs_per_pos 69.0 +835 21 training.batch_size 2.0 +835 22 model.embedding_dim 0.0 +835 22 model.scoring_fct_norm 2.0 +835 22 optimizer.lr 0.0017127578095640522 +835 22 negative_sampler.num_negs_per_pos 52.0 +835 22 training.batch_size 2.0 +835 23 model.embedding_dim 2.0 +835 23 model.scoring_fct_norm 1.0 +835 23 optimizer.lr 0.005060677640602451 +835 23 negative_sampler.num_negs_per_pos 5.0 +835 23 training.batch_size 1.0 +835 24 model.embedding_dim 0.0 +835 24 model.scoring_fct_norm 1.0 +835 24 optimizer.lr 0.0819743737171311 +835 24 negative_sampler.num_negs_per_pos 90.0 +835 24 training.batch_size 2.0 +835 25 model.embedding_dim 0.0 +835 25 model.scoring_fct_norm 2.0 +835 25 optimizer.lr 0.007418565587278102 +835 25 negative_sampler.num_negs_per_pos 4.0 +835 25 training.batch_size 2.0 +835 26 model.embedding_dim 2.0 +835 26 model.scoring_fct_norm 2.0 +835 26 optimizer.lr 0.01137135599923218 +835 26 negative_sampler.num_negs_per_pos 13.0 +835 26 training.batch_size 1.0 +835 27 model.embedding_dim 1.0 +835 27 model.scoring_fct_norm 1.0 +835 27 optimizer.lr 0.07669362060121927 +835 27 negative_sampler.num_negs_per_pos 75.0 +835 27 training.batch_size 0.0 +835 28 model.embedding_dim 2.0 +835 28 model.scoring_fct_norm 1.0 +835 28 optimizer.lr 0.04563402352155886 +835 28 negative_sampler.num_negs_per_pos 4.0 +835 28 training.batch_size 0.0 +835 29 model.embedding_dim 2.0 +835 29 model.scoring_fct_norm 1.0 +835 29 optimizer.lr 0.009501869155528869 +835 29 negative_sampler.num_negs_per_pos 35.0 +835 29 training.batch_size 2.0 +835 30 model.embedding_dim 0.0 +835 30 model.scoring_fct_norm 2.0 +835 30 optimizer.lr 0.012923030027803156 +835 30 negative_sampler.num_negs_per_pos 52.0 +835 30 training.batch_size 1.0 +835 31 model.embedding_dim 0.0 +835 31 model.scoring_fct_norm 1.0 +835 31 optimizer.lr 0.046674285234718636 +835 31 negative_sampler.num_negs_per_pos 67.0 +835 31 training.batch_size 0.0 +835 32 model.embedding_dim 0.0 +835 32 model.scoring_fct_norm 2.0 +835 32 optimizer.lr 0.004584871921672176 +835 32 negative_sampler.num_negs_per_pos 14.0 +835 32 training.batch_size 1.0 +835 33 model.embedding_dim 0.0 +835 33 model.scoring_fct_norm 1.0 +835 33 optimizer.lr 0.09314916380052968 +835 33 negative_sampler.num_negs_per_pos 99.0 +835 33 training.batch_size 2.0 +835 34 model.embedding_dim 2.0 +835 34 model.scoring_fct_norm 2.0 +835 34 optimizer.lr 0.07591962874849296 +835 34 negative_sampler.num_negs_per_pos 9.0 +835 34 training.batch_size 1.0 +835 35 model.embedding_dim 0.0 +835 35 model.scoring_fct_norm 2.0 +835 35 optimizer.lr 0.05588208786750603 +835 35 negative_sampler.num_negs_per_pos 3.0 +835 35 training.batch_size 1.0 +835 36 model.embedding_dim 2.0 +835 36 model.scoring_fct_norm 2.0 +835 36 optimizer.lr 0.023269769882417222 +835 36 negative_sampler.num_negs_per_pos 45.0 +835 36 training.batch_size 1.0 +835 37 model.embedding_dim 2.0 +835 37 model.scoring_fct_norm 1.0 +835 37 optimizer.lr 0.014588777542477793 +835 37 negative_sampler.num_negs_per_pos 75.0 +835 37 training.batch_size 1.0 +835 38 model.embedding_dim 2.0 +835 38 model.scoring_fct_norm 1.0 +835 38 optimizer.lr 0.006983610067158873 +835 38 negative_sampler.num_negs_per_pos 65.0 +835 38 training.batch_size 2.0 +835 39 model.embedding_dim 2.0 +835 39 model.scoring_fct_norm 1.0 +835 39 optimizer.lr 0.0030691862997510606 +835 39 negative_sampler.num_negs_per_pos 64.0 +835 39 training.batch_size 1.0 +835 40 model.embedding_dim 1.0 +835 40 model.scoring_fct_norm 2.0 +835 40 optimizer.lr 0.011514505282856394 +835 40 negative_sampler.num_negs_per_pos 2.0 +835 40 training.batch_size 2.0 +835 41 model.embedding_dim 1.0 +835 41 model.scoring_fct_norm 2.0 +835 41 optimizer.lr 0.006918578382060841 +835 41 negative_sampler.num_negs_per_pos 93.0 +835 41 training.batch_size 0.0 +835 42 model.embedding_dim 0.0 +835 42 model.scoring_fct_norm 2.0 +835 42 optimizer.lr 0.03588075489723511 +835 42 negative_sampler.num_negs_per_pos 94.0 +835 42 training.batch_size 2.0 +835 43 model.embedding_dim 0.0 +835 43 model.scoring_fct_norm 1.0 +835 43 optimizer.lr 0.023825219890512908 +835 43 negative_sampler.num_negs_per_pos 85.0 +835 43 training.batch_size 1.0 +835 44 model.embedding_dim 0.0 +835 44 model.scoring_fct_norm 2.0 +835 44 optimizer.lr 0.021995440843237086 +835 44 negative_sampler.num_negs_per_pos 17.0 +835 44 training.batch_size 0.0 +835 45 model.embedding_dim 0.0 +835 45 model.scoring_fct_norm 1.0 +835 45 optimizer.lr 0.02851405684080987 +835 45 negative_sampler.num_negs_per_pos 56.0 +835 45 training.batch_size 1.0 +835 46 model.embedding_dim 2.0 +835 46 model.scoring_fct_norm 2.0 +835 46 optimizer.lr 0.0647639753024276 +835 46 negative_sampler.num_negs_per_pos 41.0 +835 46 training.batch_size 2.0 +835 47 model.embedding_dim 2.0 +835 47 model.scoring_fct_norm 1.0 +835 47 optimizer.lr 0.016528425778381442 +835 47 negative_sampler.num_negs_per_pos 65.0 +835 47 training.batch_size 1.0 +835 48 model.embedding_dim 0.0 +835 48 model.scoring_fct_norm 2.0 +835 48 optimizer.lr 0.014579909994642542 +835 48 negative_sampler.num_negs_per_pos 32.0 +835 48 training.batch_size 0.0 +835 49 model.embedding_dim 0.0 +835 49 model.scoring_fct_norm 2.0 +835 49 optimizer.lr 0.0010463717807125854 +835 49 negative_sampler.num_negs_per_pos 41.0 +835 49 training.batch_size 0.0 +835 50 model.embedding_dim 0.0 +835 50 model.scoring_fct_norm 1.0 +835 50 optimizer.lr 0.0030840685860345426 +835 50 negative_sampler.num_negs_per_pos 2.0 +835 50 training.batch_size 1.0 +835 51 model.embedding_dim 0.0 +835 51 model.scoring_fct_norm 2.0 +835 51 optimizer.lr 0.031038196426169645 +835 51 negative_sampler.num_negs_per_pos 90.0 +835 51 training.batch_size 2.0 +835 52 model.embedding_dim 0.0 +835 52 model.scoring_fct_norm 2.0 +835 52 optimizer.lr 0.0026867048940489983 +835 52 negative_sampler.num_negs_per_pos 57.0 +835 52 training.batch_size 2.0 +835 53 model.embedding_dim 0.0 +835 53 model.scoring_fct_norm 2.0 +835 53 optimizer.lr 0.021294073943917056 +835 53 negative_sampler.num_negs_per_pos 0.0 +835 53 training.batch_size 0.0 +835 54 model.embedding_dim 2.0 +835 54 model.scoring_fct_norm 2.0 +835 54 optimizer.lr 0.00880298882416389 +835 54 negative_sampler.num_negs_per_pos 45.0 +835 54 training.batch_size 1.0 +835 55 model.embedding_dim 2.0 +835 55 model.scoring_fct_norm 1.0 +835 55 optimizer.lr 0.0012073626238445119 +835 55 negative_sampler.num_negs_per_pos 13.0 +835 55 training.batch_size 1.0 +835 56 model.embedding_dim 0.0 +835 56 model.scoring_fct_norm 1.0 +835 56 optimizer.lr 0.03591987373763859 +835 56 negative_sampler.num_negs_per_pos 53.0 +835 56 training.batch_size 0.0 +835 57 model.embedding_dim 1.0 +835 57 model.scoring_fct_norm 2.0 +835 57 optimizer.lr 0.019026297745546646 +835 57 negative_sampler.num_negs_per_pos 8.0 +835 57 training.batch_size 2.0 +835 58 model.embedding_dim 2.0 +835 58 model.scoring_fct_norm 2.0 +835 58 optimizer.lr 0.008141408168268912 +835 58 negative_sampler.num_negs_per_pos 37.0 +835 58 training.batch_size 2.0 +835 59 model.embedding_dim 2.0 +835 59 model.scoring_fct_norm 2.0 +835 59 optimizer.lr 0.004521346669337802 +835 59 negative_sampler.num_negs_per_pos 52.0 +835 59 training.batch_size 2.0 +835 60 model.embedding_dim 0.0 +835 60 model.scoring_fct_norm 2.0 +835 60 optimizer.lr 0.001269146222920105 +835 60 negative_sampler.num_negs_per_pos 96.0 +835 60 training.batch_size 1.0 +835 61 model.embedding_dim 2.0 +835 61 model.scoring_fct_norm 2.0 +835 61 optimizer.lr 0.003844363884636037 +835 61 negative_sampler.num_negs_per_pos 68.0 +835 61 training.batch_size 2.0 +835 62 model.embedding_dim 0.0 +835 62 model.scoring_fct_norm 1.0 +835 62 optimizer.lr 0.0018713919089133873 +835 62 negative_sampler.num_negs_per_pos 13.0 +835 62 training.batch_size 0.0 +835 63 model.embedding_dim 1.0 +835 63 model.scoring_fct_norm 2.0 +835 63 optimizer.lr 0.002385158869324707 +835 63 negative_sampler.num_negs_per_pos 64.0 +835 63 training.batch_size 2.0 +835 64 model.embedding_dim 0.0 +835 64 model.scoring_fct_norm 2.0 +835 64 optimizer.lr 0.0032913630823968435 +835 64 negative_sampler.num_negs_per_pos 93.0 +835 64 training.batch_size 1.0 +835 65 model.embedding_dim 0.0 +835 65 model.scoring_fct_norm 1.0 +835 65 optimizer.lr 0.05262254725998149 +835 65 negative_sampler.num_negs_per_pos 15.0 +835 65 training.batch_size 0.0 +835 66 model.embedding_dim 1.0 +835 66 model.scoring_fct_norm 1.0 +835 66 optimizer.lr 0.012471461714399558 +835 66 negative_sampler.num_negs_per_pos 99.0 +835 66 training.batch_size 0.0 +835 67 model.embedding_dim 0.0 +835 67 model.scoring_fct_norm 2.0 +835 67 optimizer.lr 0.023701133319512423 +835 67 negative_sampler.num_negs_per_pos 99.0 +835 67 training.batch_size 2.0 +835 68 model.embedding_dim 1.0 +835 68 model.scoring_fct_norm 1.0 +835 68 optimizer.lr 0.0231939644910636 +835 68 negative_sampler.num_negs_per_pos 33.0 +835 68 training.batch_size 0.0 +835 69 model.embedding_dim 2.0 +835 69 model.scoring_fct_norm 2.0 +835 69 optimizer.lr 0.03665780301909116 +835 69 negative_sampler.num_negs_per_pos 46.0 +835 69 training.batch_size 0.0 +835 70 model.embedding_dim 1.0 +835 70 model.scoring_fct_norm 1.0 +835 70 optimizer.lr 0.012523404385659503 +835 70 negative_sampler.num_negs_per_pos 29.0 +835 70 training.batch_size 1.0 +835 71 model.embedding_dim 0.0 +835 71 model.scoring_fct_norm 2.0 +835 71 optimizer.lr 0.0013089628603591477 +835 71 negative_sampler.num_negs_per_pos 73.0 +835 71 training.batch_size 1.0 +835 72 model.embedding_dim 2.0 +835 72 model.scoring_fct_norm 1.0 +835 72 optimizer.lr 0.002467235647608254 +835 72 negative_sampler.num_negs_per_pos 50.0 +835 72 training.batch_size 0.0 +835 73 model.embedding_dim 2.0 +835 73 model.scoring_fct_norm 1.0 +835 73 optimizer.lr 0.06856040824304244 +835 73 negative_sampler.num_negs_per_pos 38.0 +835 73 training.batch_size 1.0 +835 74 model.embedding_dim 0.0 +835 74 model.scoring_fct_norm 2.0 +835 74 optimizer.lr 0.003551884096291525 +835 74 negative_sampler.num_negs_per_pos 59.0 +835 74 training.batch_size 0.0 +835 75 model.embedding_dim 0.0 +835 75 model.scoring_fct_norm 1.0 +835 75 optimizer.lr 0.023810262398174634 +835 75 negative_sampler.num_negs_per_pos 19.0 +835 75 training.batch_size 0.0 +835 76 model.embedding_dim 0.0 +835 76 model.scoring_fct_norm 2.0 +835 76 optimizer.lr 0.0024431069400428313 +835 76 negative_sampler.num_negs_per_pos 74.0 +835 76 training.batch_size 1.0 +835 77 model.embedding_dim 0.0 +835 77 model.scoring_fct_norm 1.0 +835 77 optimizer.lr 0.03067779723877519 +835 77 negative_sampler.num_negs_per_pos 1.0 +835 77 training.batch_size 0.0 +835 78 model.embedding_dim 2.0 +835 78 model.scoring_fct_norm 1.0 +835 78 optimizer.lr 0.006254087548196104 +835 78 negative_sampler.num_negs_per_pos 55.0 +835 78 training.batch_size 0.0 +835 79 model.embedding_dim 1.0 +835 79 model.scoring_fct_norm 1.0 +835 79 optimizer.lr 0.022984023903453603 +835 79 negative_sampler.num_negs_per_pos 20.0 +835 79 training.batch_size 0.0 +835 80 model.embedding_dim 2.0 +835 80 model.scoring_fct_norm 1.0 +835 80 optimizer.lr 0.09507180265140805 +835 80 negative_sampler.num_negs_per_pos 26.0 +835 80 training.batch_size 0.0 +835 81 model.embedding_dim 1.0 +835 81 model.scoring_fct_norm 2.0 +835 81 optimizer.lr 0.00445272111788555 +835 81 negative_sampler.num_negs_per_pos 39.0 +835 81 training.batch_size 0.0 +835 82 model.embedding_dim 0.0 +835 82 model.scoring_fct_norm 2.0 +835 82 optimizer.lr 0.005175102374128655 +835 82 negative_sampler.num_negs_per_pos 78.0 +835 82 training.batch_size 0.0 +835 83 model.embedding_dim 2.0 +835 83 model.scoring_fct_norm 2.0 +835 83 optimizer.lr 0.053006084631282535 +835 83 negative_sampler.num_negs_per_pos 65.0 +835 83 training.batch_size 0.0 +835 84 model.embedding_dim 0.0 +835 84 model.scoring_fct_norm 2.0 +835 84 optimizer.lr 0.025558239395740756 +835 84 negative_sampler.num_negs_per_pos 48.0 +835 84 training.batch_size 2.0 +835 85 model.embedding_dim 2.0 +835 85 model.scoring_fct_norm 1.0 +835 85 optimizer.lr 0.0017647908474412 +835 85 negative_sampler.num_negs_per_pos 75.0 +835 85 training.batch_size 1.0 +835 86 model.embedding_dim 0.0 +835 86 model.scoring_fct_norm 2.0 +835 86 optimizer.lr 0.03129481314006157 +835 86 negative_sampler.num_negs_per_pos 53.0 +835 86 training.batch_size 1.0 +835 87 model.embedding_dim 0.0 +835 87 model.scoring_fct_norm 1.0 +835 87 optimizer.lr 0.004924868106355196 +835 87 negative_sampler.num_negs_per_pos 44.0 +835 87 training.batch_size 1.0 +835 88 model.embedding_dim 0.0 +835 88 model.scoring_fct_norm 2.0 +835 88 optimizer.lr 0.007517960396719217 +835 88 negative_sampler.num_negs_per_pos 48.0 +835 88 training.batch_size 2.0 +835 89 model.embedding_dim 0.0 +835 89 model.scoring_fct_norm 2.0 +835 89 optimizer.lr 0.020748990679209687 +835 89 negative_sampler.num_negs_per_pos 3.0 +835 89 training.batch_size 0.0 +835 90 model.embedding_dim 1.0 +835 90 model.scoring_fct_norm 1.0 +835 90 optimizer.lr 0.0026179013158354036 +835 90 negative_sampler.num_negs_per_pos 14.0 +835 90 training.batch_size 2.0 +835 91 model.embedding_dim 2.0 +835 91 model.scoring_fct_norm 1.0 +835 91 optimizer.lr 0.023997273723925853 +835 91 negative_sampler.num_negs_per_pos 80.0 +835 91 training.batch_size 1.0 +835 92 model.embedding_dim 0.0 +835 92 model.scoring_fct_norm 1.0 +835 92 optimizer.lr 0.03069401342680147 +835 92 negative_sampler.num_negs_per_pos 71.0 +835 92 training.batch_size 2.0 +835 93 model.embedding_dim 2.0 +835 93 model.scoring_fct_norm 2.0 +835 93 optimizer.lr 0.006131422341046658 +835 93 negative_sampler.num_negs_per_pos 31.0 +835 93 training.batch_size 0.0 +835 94 model.embedding_dim 1.0 +835 94 model.scoring_fct_norm 1.0 +835 94 optimizer.lr 0.053665096320100275 +835 94 negative_sampler.num_negs_per_pos 43.0 +835 94 training.batch_size 0.0 +835 95 model.embedding_dim 1.0 +835 95 model.scoring_fct_norm 2.0 +835 95 optimizer.lr 0.014181631450611114 +835 95 negative_sampler.num_negs_per_pos 61.0 +835 95 training.batch_size 1.0 +835 96 model.embedding_dim 1.0 +835 96 model.scoring_fct_norm 1.0 +835 96 optimizer.lr 0.06717110569107287 +835 96 negative_sampler.num_negs_per_pos 13.0 +835 96 training.batch_size 2.0 +835 97 model.embedding_dim 0.0 +835 97 model.scoring_fct_norm 2.0 +835 97 optimizer.lr 0.0024734299533634356 +835 97 negative_sampler.num_negs_per_pos 1.0 +835 97 training.batch_size 0.0 +835 98 model.embedding_dim 0.0 +835 98 model.scoring_fct_norm 1.0 +835 98 optimizer.lr 0.0013150528892859376 +835 98 negative_sampler.num_negs_per_pos 45.0 +835 98 training.batch_size 2.0 +835 99 model.embedding_dim 2.0 +835 99 model.scoring_fct_norm 2.0 +835 99 optimizer.lr 0.0010489534469287278 +835 99 negative_sampler.num_negs_per_pos 83.0 +835 99 training.batch_size 1.0 +835 100 model.embedding_dim 2.0 +835 100 model.scoring_fct_norm 1.0 +835 100 optimizer.lr 0.0012586259713406043 +835 100 negative_sampler.num_negs_per_pos 41.0 +835 100 training.batch_size 0.0 +835 1 dataset """wn18rr""" +835 1 model """transe""" +835 1 loss """bceaftersigmoid""" +835 1 regularizer """no""" +835 1 optimizer """adam""" +835 1 training_loop """owa""" +835 1 negative_sampler """basic""" +835 1 evaluator """rankbased""" +835 2 dataset """wn18rr""" +835 2 model """transe""" +835 2 loss """bceaftersigmoid""" +835 2 regularizer """no""" +835 2 optimizer """adam""" +835 2 training_loop """owa""" +835 2 negative_sampler """basic""" +835 2 evaluator """rankbased""" +835 3 dataset """wn18rr""" +835 3 model """transe""" +835 3 loss """bceaftersigmoid""" +835 3 regularizer """no""" +835 3 optimizer """adam""" +835 3 training_loop """owa""" +835 3 negative_sampler """basic""" +835 3 evaluator """rankbased""" +835 4 dataset """wn18rr""" +835 4 model """transe""" +835 4 loss """bceaftersigmoid""" +835 4 regularizer """no""" +835 4 optimizer """adam""" +835 4 training_loop """owa""" +835 4 negative_sampler """basic""" +835 4 evaluator """rankbased""" +835 5 dataset """wn18rr""" +835 5 model """transe""" +835 5 loss """bceaftersigmoid""" +835 5 regularizer """no""" +835 5 optimizer """adam""" +835 5 training_loop """owa""" +835 5 negative_sampler """basic""" +835 5 evaluator """rankbased""" +835 6 dataset """wn18rr""" +835 6 model """transe""" +835 6 loss """bceaftersigmoid""" +835 6 regularizer """no""" +835 6 optimizer """adam""" +835 6 training_loop """owa""" +835 6 negative_sampler """basic""" +835 6 evaluator """rankbased""" +835 7 dataset """wn18rr""" +835 7 model """transe""" +835 7 loss """bceaftersigmoid""" +835 7 regularizer """no""" +835 7 optimizer """adam""" +835 7 training_loop """owa""" +835 7 negative_sampler """basic""" +835 7 evaluator """rankbased""" +835 8 dataset """wn18rr""" +835 8 model """transe""" +835 8 loss """bceaftersigmoid""" +835 8 regularizer """no""" +835 8 optimizer """adam""" +835 8 training_loop """owa""" +835 8 negative_sampler """basic""" +835 8 evaluator """rankbased""" +835 9 dataset """wn18rr""" +835 9 model """transe""" +835 9 loss """bceaftersigmoid""" +835 9 regularizer """no""" +835 9 optimizer """adam""" +835 9 training_loop """owa""" +835 9 negative_sampler """basic""" +835 9 evaluator """rankbased""" +835 10 dataset """wn18rr""" +835 10 model """transe""" +835 10 loss """bceaftersigmoid""" +835 10 regularizer """no""" +835 10 optimizer """adam""" +835 10 training_loop """owa""" +835 10 negative_sampler """basic""" +835 10 evaluator """rankbased""" +835 11 dataset """wn18rr""" +835 11 model """transe""" +835 11 loss """bceaftersigmoid""" +835 11 regularizer """no""" +835 11 optimizer """adam""" +835 11 training_loop """owa""" +835 11 negative_sampler """basic""" +835 11 evaluator """rankbased""" +835 12 dataset """wn18rr""" +835 12 model """transe""" +835 12 loss """bceaftersigmoid""" +835 12 regularizer """no""" +835 12 optimizer """adam""" +835 12 training_loop """owa""" +835 12 negative_sampler """basic""" +835 12 evaluator """rankbased""" +835 13 dataset """wn18rr""" +835 13 model """transe""" +835 13 loss """bceaftersigmoid""" +835 13 regularizer """no""" +835 13 optimizer """adam""" +835 13 training_loop """owa""" +835 13 negative_sampler """basic""" +835 13 evaluator """rankbased""" +835 14 dataset """wn18rr""" +835 14 model """transe""" +835 14 loss """bceaftersigmoid""" +835 14 regularizer """no""" +835 14 optimizer """adam""" +835 14 training_loop """owa""" +835 14 negative_sampler """basic""" +835 14 evaluator """rankbased""" +835 15 dataset """wn18rr""" +835 15 model """transe""" +835 15 loss """bceaftersigmoid""" +835 15 regularizer """no""" +835 15 optimizer """adam""" +835 15 training_loop """owa""" +835 15 negative_sampler """basic""" +835 15 evaluator """rankbased""" +835 16 dataset """wn18rr""" +835 16 model """transe""" +835 16 loss """bceaftersigmoid""" +835 16 regularizer """no""" +835 16 optimizer """adam""" +835 16 training_loop """owa""" +835 16 negative_sampler """basic""" +835 16 evaluator """rankbased""" +835 17 dataset """wn18rr""" +835 17 model """transe""" +835 17 loss """bceaftersigmoid""" +835 17 regularizer """no""" +835 17 optimizer """adam""" +835 17 training_loop """owa""" +835 17 negative_sampler """basic""" +835 17 evaluator """rankbased""" +835 18 dataset """wn18rr""" +835 18 model """transe""" +835 18 loss """bceaftersigmoid""" +835 18 regularizer """no""" +835 18 optimizer """adam""" +835 18 training_loop """owa""" +835 18 negative_sampler """basic""" +835 18 evaluator """rankbased""" +835 19 dataset """wn18rr""" +835 19 model """transe""" +835 19 loss """bceaftersigmoid""" +835 19 regularizer """no""" +835 19 optimizer """adam""" +835 19 training_loop """owa""" +835 19 negative_sampler """basic""" +835 19 evaluator """rankbased""" +835 20 dataset """wn18rr""" +835 20 model """transe""" +835 20 loss """bceaftersigmoid""" +835 20 regularizer """no""" +835 20 optimizer """adam""" +835 20 training_loop """owa""" +835 20 negative_sampler """basic""" +835 20 evaluator """rankbased""" +835 21 dataset """wn18rr""" +835 21 model """transe""" +835 21 loss """bceaftersigmoid""" +835 21 regularizer """no""" +835 21 optimizer """adam""" +835 21 training_loop """owa""" +835 21 negative_sampler """basic""" +835 21 evaluator """rankbased""" +835 22 dataset """wn18rr""" +835 22 model """transe""" +835 22 loss """bceaftersigmoid""" +835 22 regularizer """no""" +835 22 optimizer """adam""" +835 22 training_loop """owa""" +835 22 negative_sampler """basic""" +835 22 evaluator """rankbased""" +835 23 dataset """wn18rr""" +835 23 model """transe""" +835 23 loss """bceaftersigmoid""" +835 23 regularizer """no""" +835 23 optimizer """adam""" +835 23 training_loop """owa""" +835 23 negative_sampler """basic""" +835 23 evaluator """rankbased""" +835 24 dataset """wn18rr""" +835 24 model """transe""" +835 24 loss """bceaftersigmoid""" +835 24 regularizer """no""" +835 24 optimizer """adam""" +835 24 training_loop """owa""" +835 24 negative_sampler """basic""" +835 24 evaluator """rankbased""" +835 25 dataset """wn18rr""" +835 25 model """transe""" +835 25 loss """bceaftersigmoid""" +835 25 regularizer """no""" +835 25 optimizer """adam""" +835 25 training_loop """owa""" +835 25 negative_sampler """basic""" +835 25 evaluator """rankbased""" +835 26 dataset """wn18rr""" +835 26 model """transe""" +835 26 loss """bceaftersigmoid""" +835 26 regularizer """no""" +835 26 optimizer """adam""" +835 26 training_loop """owa""" +835 26 negative_sampler """basic""" +835 26 evaluator """rankbased""" +835 27 dataset """wn18rr""" +835 27 model """transe""" +835 27 loss """bceaftersigmoid""" +835 27 regularizer """no""" +835 27 optimizer """adam""" +835 27 training_loop """owa""" +835 27 negative_sampler """basic""" +835 27 evaluator """rankbased""" +835 28 dataset """wn18rr""" +835 28 model """transe""" +835 28 loss """bceaftersigmoid""" +835 28 regularizer """no""" +835 28 optimizer """adam""" +835 28 training_loop """owa""" +835 28 negative_sampler """basic""" +835 28 evaluator """rankbased""" +835 29 dataset """wn18rr""" +835 29 model """transe""" +835 29 loss """bceaftersigmoid""" +835 29 regularizer """no""" +835 29 optimizer """adam""" +835 29 training_loop """owa""" +835 29 negative_sampler """basic""" +835 29 evaluator """rankbased""" +835 30 dataset """wn18rr""" +835 30 model """transe""" +835 30 loss """bceaftersigmoid""" +835 30 regularizer """no""" +835 30 optimizer """adam""" +835 30 training_loop """owa""" +835 30 negative_sampler """basic""" +835 30 evaluator """rankbased""" +835 31 dataset """wn18rr""" +835 31 model """transe""" +835 31 loss """bceaftersigmoid""" +835 31 regularizer """no""" +835 31 optimizer """adam""" +835 31 training_loop """owa""" +835 31 negative_sampler """basic""" +835 31 evaluator """rankbased""" +835 32 dataset """wn18rr""" +835 32 model """transe""" +835 32 loss """bceaftersigmoid""" +835 32 regularizer """no""" +835 32 optimizer """adam""" +835 32 training_loop """owa""" +835 32 negative_sampler """basic""" +835 32 evaluator """rankbased""" +835 33 dataset """wn18rr""" +835 33 model """transe""" +835 33 loss """bceaftersigmoid""" +835 33 regularizer """no""" +835 33 optimizer """adam""" +835 33 training_loop """owa""" +835 33 negative_sampler """basic""" +835 33 evaluator """rankbased""" +835 34 dataset """wn18rr""" +835 34 model """transe""" +835 34 loss """bceaftersigmoid""" +835 34 regularizer """no""" +835 34 optimizer """adam""" +835 34 training_loop """owa""" +835 34 negative_sampler """basic""" +835 34 evaluator """rankbased""" +835 35 dataset """wn18rr""" +835 35 model """transe""" +835 35 loss """bceaftersigmoid""" +835 35 regularizer """no""" +835 35 optimizer """adam""" +835 35 training_loop """owa""" +835 35 negative_sampler """basic""" +835 35 evaluator """rankbased""" +835 36 dataset """wn18rr""" +835 36 model """transe""" +835 36 loss """bceaftersigmoid""" +835 36 regularizer """no""" +835 36 optimizer """adam""" +835 36 training_loop """owa""" +835 36 negative_sampler """basic""" +835 36 evaluator """rankbased""" +835 37 dataset """wn18rr""" +835 37 model """transe""" +835 37 loss """bceaftersigmoid""" +835 37 regularizer """no""" +835 37 optimizer """adam""" +835 37 training_loop """owa""" +835 37 negative_sampler """basic""" +835 37 evaluator """rankbased""" +835 38 dataset """wn18rr""" +835 38 model """transe""" +835 38 loss """bceaftersigmoid""" +835 38 regularizer """no""" +835 38 optimizer """adam""" +835 38 training_loop """owa""" +835 38 negative_sampler """basic""" +835 38 evaluator """rankbased""" +835 39 dataset """wn18rr""" +835 39 model """transe""" +835 39 loss """bceaftersigmoid""" +835 39 regularizer """no""" +835 39 optimizer """adam""" +835 39 training_loop """owa""" +835 39 negative_sampler """basic""" +835 39 evaluator """rankbased""" +835 40 dataset """wn18rr""" +835 40 model """transe""" +835 40 loss """bceaftersigmoid""" +835 40 regularizer """no""" +835 40 optimizer """adam""" +835 40 training_loop """owa""" +835 40 negative_sampler """basic""" +835 40 evaluator """rankbased""" +835 41 dataset """wn18rr""" +835 41 model """transe""" +835 41 loss """bceaftersigmoid""" +835 41 regularizer """no""" +835 41 optimizer """adam""" +835 41 training_loop """owa""" +835 41 negative_sampler """basic""" +835 41 evaluator """rankbased""" +835 42 dataset """wn18rr""" +835 42 model """transe""" +835 42 loss """bceaftersigmoid""" +835 42 regularizer """no""" +835 42 optimizer """adam""" +835 42 training_loop """owa""" +835 42 negative_sampler """basic""" +835 42 evaluator """rankbased""" +835 43 dataset """wn18rr""" +835 43 model """transe""" +835 43 loss """bceaftersigmoid""" +835 43 regularizer """no""" +835 43 optimizer """adam""" +835 43 training_loop """owa""" +835 43 negative_sampler """basic""" +835 43 evaluator """rankbased""" +835 44 dataset """wn18rr""" +835 44 model """transe""" +835 44 loss """bceaftersigmoid""" +835 44 regularizer """no""" +835 44 optimizer """adam""" +835 44 training_loop """owa""" +835 44 negative_sampler """basic""" +835 44 evaluator """rankbased""" +835 45 dataset """wn18rr""" +835 45 model """transe""" +835 45 loss """bceaftersigmoid""" +835 45 regularizer """no""" +835 45 optimizer """adam""" +835 45 training_loop """owa""" +835 45 negative_sampler """basic""" +835 45 evaluator """rankbased""" +835 46 dataset """wn18rr""" +835 46 model """transe""" +835 46 loss """bceaftersigmoid""" +835 46 regularizer """no""" +835 46 optimizer """adam""" +835 46 training_loop """owa""" +835 46 negative_sampler """basic""" +835 46 evaluator """rankbased""" +835 47 dataset """wn18rr""" +835 47 model """transe""" +835 47 loss """bceaftersigmoid""" +835 47 regularizer """no""" +835 47 optimizer """adam""" +835 47 training_loop """owa""" +835 47 negative_sampler """basic""" +835 47 evaluator """rankbased""" +835 48 dataset """wn18rr""" +835 48 model """transe""" +835 48 loss """bceaftersigmoid""" +835 48 regularizer """no""" +835 48 optimizer """adam""" +835 48 training_loop """owa""" +835 48 negative_sampler """basic""" +835 48 evaluator """rankbased""" +835 49 dataset """wn18rr""" +835 49 model """transe""" +835 49 loss """bceaftersigmoid""" +835 49 regularizer """no""" +835 49 optimizer """adam""" +835 49 training_loop """owa""" +835 49 negative_sampler """basic""" +835 49 evaluator """rankbased""" +835 50 dataset """wn18rr""" +835 50 model """transe""" +835 50 loss """bceaftersigmoid""" +835 50 regularizer """no""" +835 50 optimizer """adam""" +835 50 training_loop """owa""" +835 50 negative_sampler """basic""" +835 50 evaluator """rankbased""" +835 51 dataset """wn18rr""" +835 51 model """transe""" +835 51 loss """bceaftersigmoid""" +835 51 regularizer """no""" +835 51 optimizer """adam""" +835 51 training_loop """owa""" +835 51 negative_sampler """basic""" +835 51 evaluator """rankbased""" +835 52 dataset """wn18rr""" +835 52 model """transe""" +835 52 loss """bceaftersigmoid""" +835 52 regularizer """no""" +835 52 optimizer """adam""" +835 52 training_loop """owa""" +835 52 negative_sampler """basic""" +835 52 evaluator """rankbased""" +835 53 dataset """wn18rr""" +835 53 model """transe""" +835 53 loss """bceaftersigmoid""" +835 53 regularizer """no""" +835 53 optimizer """adam""" +835 53 training_loop """owa""" +835 53 negative_sampler """basic""" +835 53 evaluator """rankbased""" +835 54 dataset """wn18rr""" +835 54 model """transe""" +835 54 loss """bceaftersigmoid""" +835 54 regularizer """no""" +835 54 optimizer """adam""" +835 54 training_loop """owa""" +835 54 negative_sampler """basic""" +835 54 evaluator """rankbased""" +835 55 dataset """wn18rr""" +835 55 model """transe""" +835 55 loss """bceaftersigmoid""" +835 55 regularizer """no""" +835 55 optimizer """adam""" +835 55 training_loop """owa""" +835 55 negative_sampler """basic""" +835 55 evaluator """rankbased""" +835 56 dataset """wn18rr""" +835 56 model """transe""" +835 56 loss """bceaftersigmoid""" +835 56 regularizer """no""" +835 56 optimizer """adam""" +835 56 training_loop """owa""" +835 56 negative_sampler """basic""" +835 56 evaluator """rankbased""" +835 57 dataset """wn18rr""" +835 57 model """transe""" +835 57 loss """bceaftersigmoid""" +835 57 regularizer """no""" +835 57 optimizer """adam""" +835 57 training_loop """owa""" +835 57 negative_sampler """basic""" +835 57 evaluator """rankbased""" +835 58 dataset """wn18rr""" +835 58 model """transe""" +835 58 loss """bceaftersigmoid""" +835 58 regularizer """no""" +835 58 optimizer """adam""" +835 58 training_loop """owa""" +835 58 negative_sampler """basic""" +835 58 evaluator """rankbased""" +835 59 dataset """wn18rr""" +835 59 model """transe""" +835 59 loss """bceaftersigmoid""" +835 59 regularizer """no""" +835 59 optimizer """adam""" +835 59 training_loop """owa""" +835 59 negative_sampler """basic""" +835 59 evaluator """rankbased""" +835 60 dataset """wn18rr""" +835 60 model """transe""" +835 60 loss """bceaftersigmoid""" +835 60 regularizer """no""" +835 60 optimizer """adam""" +835 60 training_loop """owa""" +835 60 negative_sampler """basic""" +835 60 evaluator """rankbased""" +835 61 dataset """wn18rr""" +835 61 model """transe""" +835 61 loss """bceaftersigmoid""" +835 61 regularizer """no""" +835 61 optimizer """adam""" +835 61 training_loop """owa""" +835 61 negative_sampler """basic""" +835 61 evaluator """rankbased""" +835 62 dataset """wn18rr""" +835 62 model """transe""" +835 62 loss """bceaftersigmoid""" +835 62 regularizer """no""" +835 62 optimizer """adam""" +835 62 training_loop """owa""" +835 62 negative_sampler """basic""" +835 62 evaluator """rankbased""" +835 63 dataset """wn18rr""" +835 63 model """transe""" +835 63 loss """bceaftersigmoid""" +835 63 regularizer """no""" +835 63 optimizer """adam""" +835 63 training_loop """owa""" +835 63 negative_sampler """basic""" +835 63 evaluator """rankbased""" +835 64 dataset """wn18rr""" +835 64 model """transe""" +835 64 loss """bceaftersigmoid""" +835 64 regularizer """no""" +835 64 optimizer """adam""" +835 64 training_loop """owa""" +835 64 negative_sampler """basic""" +835 64 evaluator """rankbased""" +835 65 dataset """wn18rr""" +835 65 model """transe""" +835 65 loss """bceaftersigmoid""" +835 65 regularizer """no""" +835 65 optimizer """adam""" +835 65 training_loop """owa""" +835 65 negative_sampler """basic""" +835 65 evaluator """rankbased""" +835 66 dataset """wn18rr""" +835 66 model """transe""" +835 66 loss """bceaftersigmoid""" +835 66 regularizer """no""" +835 66 optimizer """adam""" +835 66 training_loop """owa""" +835 66 negative_sampler """basic""" +835 66 evaluator """rankbased""" +835 67 dataset """wn18rr""" +835 67 model """transe""" +835 67 loss """bceaftersigmoid""" +835 67 regularizer """no""" +835 67 optimizer """adam""" +835 67 training_loop """owa""" +835 67 negative_sampler """basic""" +835 67 evaluator """rankbased""" +835 68 dataset """wn18rr""" +835 68 model """transe""" +835 68 loss """bceaftersigmoid""" +835 68 regularizer """no""" +835 68 optimizer """adam""" +835 68 training_loop """owa""" +835 68 negative_sampler """basic""" +835 68 evaluator """rankbased""" +835 69 dataset """wn18rr""" +835 69 model """transe""" +835 69 loss """bceaftersigmoid""" +835 69 regularizer """no""" +835 69 optimizer """adam""" +835 69 training_loop """owa""" +835 69 negative_sampler """basic""" +835 69 evaluator """rankbased""" +835 70 dataset """wn18rr""" +835 70 model """transe""" +835 70 loss """bceaftersigmoid""" +835 70 regularizer """no""" +835 70 optimizer """adam""" +835 70 training_loop """owa""" +835 70 negative_sampler """basic""" +835 70 evaluator """rankbased""" +835 71 dataset """wn18rr""" +835 71 model """transe""" +835 71 loss """bceaftersigmoid""" +835 71 regularizer """no""" +835 71 optimizer """adam""" +835 71 training_loop """owa""" +835 71 negative_sampler """basic""" +835 71 evaluator """rankbased""" +835 72 dataset """wn18rr""" +835 72 model """transe""" +835 72 loss """bceaftersigmoid""" +835 72 regularizer """no""" +835 72 optimizer """adam""" +835 72 training_loop """owa""" +835 72 negative_sampler """basic""" +835 72 evaluator """rankbased""" +835 73 dataset """wn18rr""" +835 73 model """transe""" +835 73 loss """bceaftersigmoid""" +835 73 regularizer """no""" +835 73 optimizer """adam""" +835 73 training_loop """owa""" +835 73 negative_sampler """basic""" +835 73 evaluator """rankbased""" +835 74 dataset """wn18rr""" +835 74 model """transe""" +835 74 loss """bceaftersigmoid""" +835 74 regularizer """no""" +835 74 optimizer """adam""" +835 74 training_loop """owa""" +835 74 negative_sampler """basic""" +835 74 evaluator """rankbased""" +835 75 dataset """wn18rr""" +835 75 model """transe""" +835 75 loss """bceaftersigmoid""" +835 75 regularizer """no""" +835 75 optimizer """adam""" +835 75 training_loop """owa""" +835 75 negative_sampler """basic""" +835 75 evaluator """rankbased""" +835 76 dataset """wn18rr""" +835 76 model """transe""" +835 76 loss """bceaftersigmoid""" +835 76 regularizer """no""" +835 76 optimizer """adam""" +835 76 training_loop """owa""" +835 76 negative_sampler """basic""" +835 76 evaluator """rankbased""" +835 77 dataset """wn18rr""" +835 77 model """transe""" +835 77 loss """bceaftersigmoid""" +835 77 regularizer """no""" +835 77 optimizer """adam""" +835 77 training_loop """owa""" +835 77 negative_sampler """basic""" +835 77 evaluator """rankbased""" +835 78 dataset """wn18rr""" +835 78 model """transe""" +835 78 loss """bceaftersigmoid""" +835 78 regularizer """no""" +835 78 optimizer """adam""" +835 78 training_loop """owa""" +835 78 negative_sampler """basic""" +835 78 evaluator """rankbased""" +835 79 dataset """wn18rr""" +835 79 model """transe""" +835 79 loss """bceaftersigmoid""" +835 79 regularizer """no""" +835 79 optimizer """adam""" +835 79 training_loop """owa""" +835 79 negative_sampler """basic""" +835 79 evaluator """rankbased""" +835 80 dataset """wn18rr""" +835 80 model """transe""" +835 80 loss """bceaftersigmoid""" +835 80 regularizer """no""" +835 80 optimizer """adam""" +835 80 training_loop """owa""" +835 80 negative_sampler """basic""" +835 80 evaluator """rankbased""" +835 81 dataset """wn18rr""" +835 81 model """transe""" +835 81 loss """bceaftersigmoid""" +835 81 regularizer """no""" +835 81 optimizer """adam""" +835 81 training_loop """owa""" +835 81 negative_sampler """basic""" +835 81 evaluator """rankbased""" +835 82 dataset """wn18rr""" +835 82 model """transe""" +835 82 loss """bceaftersigmoid""" +835 82 regularizer """no""" +835 82 optimizer """adam""" +835 82 training_loop """owa""" +835 82 negative_sampler """basic""" +835 82 evaluator """rankbased""" +835 83 dataset """wn18rr""" +835 83 model """transe""" +835 83 loss """bceaftersigmoid""" +835 83 regularizer """no""" +835 83 optimizer """adam""" +835 83 training_loop """owa""" +835 83 negative_sampler """basic""" +835 83 evaluator """rankbased""" +835 84 dataset """wn18rr""" +835 84 model """transe""" +835 84 loss """bceaftersigmoid""" +835 84 regularizer """no""" +835 84 optimizer """adam""" +835 84 training_loop """owa""" +835 84 negative_sampler """basic""" +835 84 evaluator """rankbased""" +835 85 dataset """wn18rr""" +835 85 model """transe""" +835 85 loss """bceaftersigmoid""" +835 85 regularizer """no""" +835 85 optimizer """adam""" +835 85 training_loop """owa""" +835 85 negative_sampler """basic""" +835 85 evaluator """rankbased""" +835 86 dataset """wn18rr""" +835 86 model """transe""" +835 86 loss """bceaftersigmoid""" +835 86 regularizer """no""" +835 86 optimizer """adam""" +835 86 training_loop """owa""" +835 86 negative_sampler """basic""" +835 86 evaluator """rankbased""" +835 87 dataset """wn18rr""" +835 87 model """transe""" +835 87 loss """bceaftersigmoid""" +835 87 regularizer """no""" +835 87 optimizer """adam""" +835 87 training_loop """owa""" +835 87 negative_sampler """basic""" +835 87 evaluator """rankbased""" +835 88 dataset """wn18rr""" +835 88 model """transe""" +835 88 loss """bceaftersigmoid""" +835 88 regularizer """no""" +835 88 optimizer """adam""" +835 88 training_loop """owa""" +835 88 negative_sampler """basic""" +835 88 evaluator """rankbased""" +835 89 dataset """wn18rr""" +835 89 model """transe""" +835 89 loss """bceaftersigmoid""" +835 89 regularizer """no""" +835 89 optimizer """adam""" +835 89 training_loop """owa""" +835 89 negative_sampler """basic""" +835 89 evaluator """rankbased""" +835 90 dataset """wn18rr""" +835 90 model """transe""" +835 90 loss """bceaftersigmoid""" +835 90 regularizer """no""" +835 90 optimizer """adam""" +835 90 training_loop """owa""" +835 90 negative_sampler """basic""" +835 90 evaluator """rankbased""" +835 91 dataset """wn18rr""" +835 91 model """transe""" +835 91 loss """bceaftersigmoid""" +835 91 regularizer """no""" +835 91 optimizer """adam""" +835 91 training_loop """owa""" +835 91 negative_sampler """basic""" +835 91 evaluator """rankbased""" +835 92 dataset """wn18rr""" +835 92 model """transe""" +835 92 loss """bceaftersigmoid""" +835 92 regularizer """no""" +835 92 optimizer """adam""" +835 92 training_loop """owa""" +835 92 negative_sampler """basic""" +835 92 evaluator """rankbased""" +835 93 dataset """wn18rr""" +835 93 model """transe""" +835 93 loss """bceaftersigmoid""" +835 93 regularizer """no""" +835 93 optimizer """adam""" +835 93 training_loop """owa""" +835 93 negative_sampler """basic""" +835 93 evaluator """rankbased""" +835 94 dataset """wn18rr""" +835 94 model """transe""" +835 94 loss """bceaftersigmoid""" +835 94 regularizer """no""" +835 94 optimizer """adam""" +835 94 training_loop """owa""" +835 94 negative_sampler """basic""" +835 94 evaluator """rankbased""" +835 95 dataset """wn18rr""" +835 95 model """transe""" +835 95 loss """bceaftersigmoid""" +835 95 regularizer """no""" +835 95 optimizer """adam""" +835 95 training_loop """owa""" +835 95 negative_sampler """basic""" +835 95 evaluator """rankbased""" +835 96 dataset """wn18rr""" +835 96 model """transe""" +835 96 loss """bceaftersigmoid""" +835 96 regularizer """no""" +835 96 optimizer """adam""" +835 96 training_loop """owa""" +835 96 negative_sampler """basic""" +835 96 evaluator """rankbased""" +835 97 dataset """wn18rr""" +835 97 model """transe""" +835 97 loss """bceaftersigmoid""" +835 97 regularizer """no""" +835 97 optimizer """adam""" +835 97 training_loop """owa""" +835 97 negative_sampler """basic""" +835 97 evaluator """rankbased""" +835 98 dataset """wn18rr""" +835 98 model """transe""" +835 98 loss """bceaftersigmoid""" +835 98 regularizer """no""" +835 98 optimizer """adam""" +835 98 training_loop """owa""" +835 98 negative_sampler """basic""" +835 98 evaluator """rankbased""" +835 99 dataset """wn18rr""" +835 99 model """transe""" +835 99 loss """bceaftersigmoid""" +835 99 regularizer """no""" +835 99 optimizer """adam""" +835 99 training_loop """owa""" +835 99 negative_sampler """basic""" +835 99 evaluator """rankbased""" +835 100 dataset """wn18rr""" +835 100 model """transe""" +835 100 loss """bceaftersigmoid""" +835 100 regularizer """no""" +835 100 optimizer """adam""" +835 100 training_loop """owa""" +835 100 negative_sampler """basic""" +835 100 evaluator """rankbased""" +836 1 model.embedding_dim 2.0 +836 1 model.scoring_fct_norm 1.0 +836 1 optimizer.lr 0.00570099970587152 +836 1 negative_sampler.num_negs_per_pos 57.0 +836 1 training.batch_size 0.0 +836 2 model.embedding_dim 2.0 +836 2 model.scoring_fct_norm 2.0 +836 2 optimizer.lr 0.09496318129476326 +836 2 negative_sampler.num_negs_per_pos 12.0 +836 2 training.batch_size 2.0 +836 3 model.embedding_dim 1.0 +836 3 model.scoring_fct_norm 1.0 +836 3 optimizer.lr 0.019583970208227323 +836 3 negative_sampler.num_negs_per_pos 19.0 +836 3 training.batch_size 0.0 +836 4 model.embedding_dim 0.0 +836 4 model.scoring_fct_norm 2.0 +836 4 optimizer.lr 0.07073098110829266 +836 4 negative_sampler.num_negs_per_pos 59.0 +836 4 training.batch_size 0.0 +836 5 model.embedding_dim 0.0 +836 5 model.scoring_fct_norm 2.0 +836 5 optimizer.lr 0.027374479612433913 +836 5 negative_sampler.num_negs_per_pos 0.0 +836 5 training.batch_size 2.0 +836 6 model.embedding_dim 1.0 +836 6 model.scoring_fct_norm 2.0 +836 6 optimizer.lr 0.009383169744119405 +836 6 negative_sampler.num_negs_per_pos 8.0 +836 6 training.batch_size 2.0 +836 7 model.embedding_dim 2.0 +836 7 model.scoring_fct_norm 2.0 +836 7 optimizer.lr 0.03706304613372344 +836 7 negative_sampler.num_negs_per_pos 50.0 +836 7 training.batch_size 2.0 +836 8 model.embedding_dim 1.0 +836 8 model.scoring_fct_norm 2.0 +836 8 optimizer.lr 0.001232348297290593 +836 8 negative_sampler.num_negs_per_pos 23.0 +836 8 training.batch_size 1.0 +836 9 model.embedding_dim 1.0 +836 9 model.scoring_fct_norm 1.0 +836 9 optimizer.lr 0.09359814003923152 +836 9 negative_sampler.num_negs_per_pos 41.0 +836 9 training.batch_size 2.0 +836 10 model.embedding_dim 1.0 +836 10 model.scoring_fct_norm 2.0 +836 10 optimizer.lr 0.012065178359501811 +836 10 negative_sampler.num_negs_per_pos 50.0 +836 10 training.batch_size 0.0 +836 11 model.embedding_dim 2.0 +836 11 model.scoring_fct_norm 2.0 +836 11 optimizer.lr 0.025115737888539047 +836 11 negative_sampler.num_negs_per_pos 47.0 +836 11 training.batch_size 2.0 +836 12 model.embedding_dim 0.0 +836 12 model.scoring_fct_norm 1.0 +836 12 optimizer.lr 0.005691503630417457 +836 12 negative_sampler.num_negs_per_pos 14.0 +836 12 training.batch_size 0.0 +836 13 model.embedding_dim 0.0 +836 13 model.scoring_fct_norm 2.0 +836 13 optimizer.lr 0.0029784259723580914 +836 13 negative_sampler.num_negs_per_pos 31.0 +836 13 training.batch_size 1.0 +836 14 model.embedding_dim 1.0 +836 14 model.scoring_fct_norm 1.0 +836 14 optimizer.lr 0.008447935555222812 +836 14 negative_sampler.num_negs_per_pos 72.0 +836 14 training.batch_size 2.0 +836 15 model.embedding_dim 1.0 +836 15 model.scoring_fct_norm 1.0 +836 15 optimizer.lr 0.0022098507399225128 +836 15 negative_sampler.num_negs_per_pos 77.0 +836 15 training.batch_size 0.0 +836 16 model.embedding_dim 1.0 +836 16 model.scoring_fct_norm 2.0 +836 16 optimizer.lr 0.007791380120364341 +836 16 negative_sampler.num_negs_per_pos 76.0 +836 16 training.batch_size 1.0 +836 17 model.embedding_dim 1.0 +836 17 model.scoring_fct_norm 1.0 +836 17 optimizer.lr 0.0023243858678054846 +836 17 negative_sampler.num_negs_per_pos 79.0 +836 17 training.batch_size 0.0 +836 18 model.embedding_dim 0.0 +836 18 model.scoring_fct_norm 1.0 +836 18 optimizer.lr 0.0021426911644689085 +836 18 negative_sampler.num_negs_per_pos 24.0 +836 18 training.batch_size 1.0 +836 19 model.embedding_dim 0.0 +836 19 model.scoring_fct_norm 2.0 +836 19 optimizer.lr 0.003178972778746851 +836 19 negative_sampler.num_negs_per_pos 62.0 +836 19 training.batch_size 2.0 +836 20 model.embedding_dim 2.0 +836 20 model.scoring_fct_norm 1.0 +836 20 optimizer.lr 0.06899950430981304 +836 20 negative_sampler.num_negs_per_pos 70.0 +836 20 training.batch_size 0.0 +836 21 model.embedding_dim 0.0 +836 21 model.scoring_fct_norm 2.0 +836 21 optimizer.lr 0.00396541863273106 +836 21 negative_sampler.num_negs_per_pos 52.0 +836 21 training.batch_size 2.0 +836 22 model.embedding_dim 0.0 +836 22 model.scoring_fct_norm 1.0 +836 22 optimizer.lr 0.0145023551696663 +836 22 negative_sampler.num_negs_per_pos 30.0 +836 22 training.batch_size 0.0 +836 23 model.embedding_dim 0.0 +836 23 model.scoring_fct_norm 1.0 +836 23 optimizer.lr 0.04191328064691194 +836 23 negative_sampler.num_negs_per_pos 10.0 +836 23 training.batch_size 0.0 +836 24 model.embedding_dim 0.0 +836 24 model.scoring_fct_norm 2.0 +836 24 optimizer.lr 0.0025452169114220553 +836 24 negative_sampler.num_negs_per_pos 46.0 +836 24 training.batch_size 2.0 +836 25 model.embedding_dim 0.0 +836 25 model.scoring_fct_norm 2.0 +836 25 optimizer.lr 0.017126798026042094 +836 25 negative_sampler.num_negs_per_pos 33.0 +836 25 training.batch_size 0.0 +836 26 model.embedding_dim 1.0 +836 26 model.scoring_fct_norm 1.0 +836 26 optimizer.lr 0.07016407737057286 +836 26 negative_sampler.num_negs_per_pos 97.0 +836 26 training.batch_size 1.0 +836 27 model.embedding_dim 0.0 +836 27 model.scoring_fct_norm 1.0 +836 27 optimizer.lr 0.039733681433226226 +836 27 negative_sampler.num_negs_per_pos 38.0 +836 27 training.batch_size 0.0 +836 28 model.embedding_dim 1.0 +836 28 model.scoring_fct_norm 1.0 +836 28 optimizer.lr 0.03358187552680235 +836 28 negative_sampler.num_negs_per_pos 73.0 +836 28 training.batch_size 0.0 +836 29 model.embedding_dim 2.0 +836 29 model.scoring_fct_norm 1.0 +836 29 optimizer.lr 0.0029008450583003355 +836 29 negative_sampler.num_negs_per_pos 6.0 +836 29 training.batch_size 2.0 +836 30 model.embedding_dim 2.0 +836 30 model.scoring_fct_norm 2.0 +836 30 optimizer.lr 0.01816725418337419 +836 30 negative_sampler.num_negs_per_pos 68.0 +836 30 training.batch_size 1.0 +836 31 model.embedding_dim 1.0 +836 31 model.scoring_fct_norm 2.0 +836 31 optimizer.lr 0.0024135724449253033 +836 31 negative_sampler.num_negs_per_pos 53.0 +836 31 training.batch_size 1.0 +836 32 model.embedding_dim 2.0 +836 32 model.scoring_fct_norm 2.0 +836 32 optimizer.lr 0.0072121004485654035 +836 32 negative_sampler.num_negs_per_pos 79.0 +836 32 training.batch_size 0.0 +836 33 model.embedding_dim 1.0 +836 33 model.scoring_fct_norm 1.0 +836 33 optimizer.lr 0.0017895074065740406 +836 33 negative_sampler.num_negs_per_pos 90.0 +836 33 training.batch_size 0.0 +836 34 model.embedding_dim 0.0 +836 34 model.scoring_fct_norm 2.0 +836 34 optimizer.lr 0.006463644172853715 +836 34 negative_sampler.num_negs_per_pos 49.0 +836 34 training.batch_size 2.0 +836 35 model.embedding_dim 2.0 +836 35 model.scoring_fct_norm 2.0 +836 35 optimizer.lr 0.019607705737343106 +836 35 negative_sampler.num_negs_per_pos 95.0 +836 35 training.batch_size 2.0 +836 36 model.embedding_dim 0.0 +836 36 model.scoring_fct_norm 2.0 +836 36 optimizer.lr 0.002635728399442041 +836 36 negative_sampler.num_negs_per_pos 18.0 +836 36 training.batch_size 2.0 +836 37 model.embedding_dim 0.0 +836 37 model.scoring_fct_norm 1.0 +836 37 optimizer.lr 0.0028739329958815665 +836 37 negative_sampler.num_negs_per_pos 74.0 +836 37 training.batch_size 0.0 +836 38 model.embedding_dim 0.0 +836 38 model.scoring_fct_norm 2.0 +836 38 optimizer.lr 0.012912334329049294 +836 38 negative_sampler.num_negs_per_pos 81.0 +836 38 training.batch_size 2.0 +836 39 model.embedding_dim 2.0 +836 39 model.scoring_fct_norm 2.0 +836 39 optimizer.lr 0.0024502660378522252 +836 39 negative_sampler.num_negs_per_pos 88.0 +836 39 training.batch_size 1.0 +836 40 model.embedding_dim 1.0 +836 40 model.scoring_fct_norm 2.0 +836 40 optimizer.lr 0.009404177925373458 +836 40 negative_sampler.num_negs_per_pos 59.0 +836 40 training.batch_size 2.0 +836 41 model.embedding_dim 2.0 +836 41 model.scoring_fct_norm 2.0 +836 41 optimizer.lr 0.0034551672955617087 +836 41 negative_sampler.num_negs_per_pos 11.0 +836 41 training.batch_size 0.0 +836 42 model.embedding_dim 0.0 +836 42 model.scoring_fct_norm 2.0 +836 42 optimizer.lr 0.03314654340953004 +836 42 negative_sampler.num_negs_per_pos 28.0 +836 42 training.batch_size 1.0 +836 43 model.embedding_dim 2.0 +836 43 model.scoring_fct_norm 1.0 +836 43 optimizer.lr 0.031458440641296166 +836 43 negative_sampler.num_negs_per_pos 59.0 +836 43 training.batch_size 0.0 +836 44 model.embedding_dim 2.0 +836 44 model.scoring_fct_norm 1.0 +836 44 optimizer.lr 0.005486945819063019 +836 44 negative_sampler.num_negs_per_pos 45.0 +836 44 training.batch_size 2.0 +836 45 model.embedding_dim 1.0 +836 45 model.scoring_fct_norm 2.0 +836 45 optimizer.lr 0.0073474217975190315 +836 45 negative_sampler.num_negs_per_pos 96.0 +836 45 training.batch_size 1.0 +836 46 model.embedding_dim 1.0 +836 46 model.scoring_fct_norm 1.0 +836 46 optimizer.lr 0.0024047874148289635 +836 46 negative_sampler.num_negs_per_pos 87.0 +836 46 training.batch_size 1.0 +836 47 model.embedding_dim 0.0 +836 47 model.scoring_fct_norm 1.0 +836 47 optimizer.lr 0.0971344055541764 +836 47 negative_sampler.num_negs_per_pos 52.0 +836 47 training.batch_size 2.0 +836 48 model.embedding_dim 0.0 +836 48 model.scoring_fct_norm 1.0 +836 48 optimizer.lr 0.015731278111407968 +836 48 negative_sampler.num_negs_per_pos 36.0 +836 48 training.batch_size 0.0 +836 49 model.embedding_dim 1.0 +836 49 model.scoring_fct_norm 2.0 +836 49 optimizer.lr 0.0015761990686913477 +836 49 negative_sampler.num_negs_per_pos 11.0 +836 49 training.batch_size 2.0 +836 50 model.embedding_dim 0.0 +836 50 model.scoring_fct_norm 1.0 +836 50 optimizer.lr 0.0012317973384446844 +836 50 negative_sampler.num_negs_per_pos 49.0 +836 50 training.batch_size 1.0 +836 51 model.embedding_dim 1.0 +836 51 model.scoring_fct_norm 1.0 +836 51 optimizer.lr 0.0900101557621851 +836 51 negative_sampler.num_negs_per_pos 15.0 +836 51 training.batch_size 2.0 +836 52 model.embedding_dim 0.0 +836 52 model.scoring_fct_norm 2.0 +836 52 optimizer.lr 0.07493079740503958 +836 52 negative_sampler.num_negs_per_pos 65.0 +836 52 training.batch_size 2.0 +836 53 model.embedding_dim 0.0 +836 53 model.scoring_fct_norm 1.0 +836 53 optimizer.lr 0.00824201660783897 +836 53 negative_sampler.num_negs_per_pos 48.0 +836 53 training.batch_size 0.0 +836 54 model.embedding_dim 0.0 +836 54 model.scoring_fct_norm 1.0 +836 54 optimizer.lr 0.03324718028331867 +836 54 negative_sampler.num_negs_per_pos 85.0 +836 54 training.batch_size 1.0 +836 55 model.embedding_dim 1.0 +836 55 model.scoring_fct_norm 1.0 +836 55 optimizer.lr 0.0013840305770757158 +836 55 negative_sampler.num_negs_per_pos 20.0 +836 55 training.batch_size 1.0 +836 56 model.embedding_dim 2.0 +836 56 model.scoring_fct_norm 2.0 +836 56 optimizer.lr 0.005436436276855133 +836 56 negative_sampler.num_negs_per_pos 22.0 +836 56 training.batch_size 1.0 +836 57 model.embedding_dim 1.0 +836 57 model.scoring_fct_norm 2.0 +836 57 optimizer.lr 0.07516078462971933 +836 57 negative_sampler.num_negs_per_pos 74.0 +836 57 training.batch_size 1.0 +836 58 model.embedding_dim 2.0 +836 58 model.scoring_fct_norm 1.0 +836 58 optimizer.lr 0.01724332646890785 +836 58 negative_sampler.num_negs_per_pos 62.0 +836 58 training.batch_size 2.0 +836 59 model.embedding_dim 1.0 +836 59 model.scoring_fct_norm 2.0 +836 59 optimizer.lr 0.08578797152226382 +836 59 negative_sampler.num_negs_per_pos 53.0 +836 59 training.batch_size 0.0 +836 60 model.embedding_dim 2.0 +836 60 model.scoring_fct_norm 1.0 +836 60 optimizer.lr 0.0041389212914164026 +836 60 negative_sampler.num_negs_per_pos 23.0 +836 60 training.batch_size 2.0 +836 61 model.embedding_dim 0.0 +836 61 model.scoring_fct_norm 2.0 +836 61 optimizer.lr 0.005248495567875628 +836 61 negative_sampler.num_negs_per_pos 16.0 +836 61 training.batch_size 0.0 +836 62 model.embedding_dim 1.0 +836 62 model.scoring_fct_norm 1.0 +836 62 optimizer.lr 0.00806708733794425 +836 62 negative_sampler.num_negs_per_pos 23.0 +836 62 training.batch_size 2.0 +836 63 model.embedding_dim 0.0 +836 63 model.scoring_fct_norm 2.0 +836 63 optimizer.lr 0.042626591633357705 +836 63 negative_sampler.num_negs_per_pos 28.0 +836 63 training.batch_size 0.0 +836 64 model.embedding_dim 0.0 +836 64 model.scoring_fct_norm 1.0 +836 64 optimizer.lr 0.06886171395490509 +836 64 negative_sampler.num_negs_per_pos 50.0 +836 64 training.batch_size 2.0 +836 65 model.embedding_dim 2.0 +836 65 model.scoring_fct_norm 2.0 +836 65 optimizer.lr 0.08196683131107518 +836 65 negative_sampler.num_negs_per_pos 83.0 +836 65 training.batch_size 1.0 +836 66 model.embedding_dim 1.0 +836 66 model.scoring_fct_norm 2.0 +836 66 optimizer.lr 0.0010691094500756243 +836 66 negative_sampler.num_negs_per_pos 44.0 +836 66 training.batch_size 0.0 +836 67 model.embedding_dim 1.0 +836 67 model.scoring_fct_norm 2.0 +836 67 optimizer.lr 0.002396413948582704 +836 67 negative_sampler.num_negs_per_pos 54.0 +836 67 training.batch_size 1.0 +836 68 model.embedding_dim 0.0 +836 68 model.scoring_fct_norm 1.0 +836 68 optimizer.lr 0.0025053951532197772 +836 68 negative_sampler.num_negs_per_pos 36.0 +836 68 training.batch_size 1.0 +836 69 model.embedding_dim 1.0 +836 69 model.scoring_fct_norm 2.0 +836 69 optimizer.lr 0.003724487838867815 +836 69 negative_sampler.num_negs_per_pos 34.0 +836 69 training.batch_size 0.0 +836 70 model.embedding_dim 2.0 +836 70 model.scoring_fct_norm 2.0 +836 70 optimizer.lr 0.0035758672098093218 +836 70 negative_sampler.num_negs_per_pos 87.0 +836 70 training.batch_size 2.0 +836 71 model.embedding_dim 0.0 +836 71 model.scoring_fct_norm 2.0 +836 71 optimizer.lr 0.02755709510832631 +836 71 negative_sampler.num_negs_per_pos 29.0 +836 71 training.batch_size 2.0 +836 72 model.embedding_dim 2.0 +836 72 model.scoring_fct_norm 1.0 +836 72 optimizer.lr 0.010509369778244309 +836 72 negative_sampler.num_negs_per_pos 73.0 +836 72 training.batch_size 0.0 +836 73 model.embedding_dim 1.0 +836 73 model.scoring_fct_norm 1.0 +836 73 optimizer.lr 0.013342249330332494 +836 73 negative_sampler.num_negs_per_pos 66.0 +836 73 training.batch_size 1.0 +836 74 model.embedding_dim 2.0 +836 74 model.scoring_fct_norm 2.0 +836 74 optimizer.lr 0.006240961529321368 +836 74 negative_sampler.num_negs_per_pos 34.0 +836 74 training.batch_size 2.0 +836 75 model.embedding_dim 2.0 +836 75 model.scoring_fct_norm 1.0 +836 75 optimizer.lr 0.09762749460262121 +836 75 negative_sampler.num_negs_per_pos 27.0 +836 75 training.batch_size 0.0 +836 76 model.embedding_dim 2.0 +836 76 model.scoring_fct_norm 2.0 +836 76 optimizer.lr 0.0015054008706084454 +836 76 negative_sampler.num_negs_per_pos 65.0 +836 76 training.batch_size 1.0 +836 77 model.embedding_dim 2.0 +836 77 model.scoring_fct_norm 2.0 +836 77 optimizer.lr 0.02438853634767435 +836 77 negative_sampler.num_negs_per_pos 54.0 +836 77 training.batch_size 0.0 +836 78 model.embedding_dim 0.0 +836 78 model.scoring_fct_norm 2.0 +836 78 optimizer.lr 0.003346416433516319 +836 78 negative_sampler.num_negs_per_pos 17.0 +836 78 training.batch_size 2.0 +836 79 model.embedding_dim 2.0 +836 79 model.scoring_fct_norm 2.0 +836 79 optimizer.lr 0.07754960006223006 +836 79 negative_sampler.num_negs_per_pos 0.0 +836 79 training.batch_size 2.0 +836 80 model.embedding_dim 1.0 +836 80 model.scoring_fct_norm 1.0 +836 80 optimizer.lr 0.07049537223211018 +836 80 negative_sampler.num_negs_per_pos 57.0 +836 80 training.batch_size 0.0 +836 81 model.embedding_dim 1.0 +836 81 model.scoring_fct_norm 1.0 +836 81 optimizer.lr 0.053361472023730885 +836 81 negative_sampler.num_negs_per_pos 51.0 +836 81 training.batch_size 2.0 +836 82 model.embedding_dim 0.0 +836 82 model.scoring_fct_norm 1.0 +836 82 optimizer.lr 0.004214667870918433 +836 82 negative_sampler.num_negs_per_pos 19.0 +836 82 training.batch_size 1.0 +836 83 model.embedding_dim 1.0 +836 83 model.scoring_fct_norm 2.0 +836 83 optimizer.lr 0.038782123921161316 +836 83 negative_sampler.num_negs_per_pos 6.0 +836 83 training.batch_size 0.0 +836 84 model.embedding_dim 2.0 +836 84 model.scoring_fct_norm 1.0 +836 84 optimizer.lr 0.004433621880308426 +836 84 negative_sampler.num_negs_per_pos 57.0 +836 84 training.batch_size 1.0 +836 85 model.embedding_dim 0.0 +836 85 model.scoring_fct_norm 2.0 +836 85 optimizer.lr 0.050233306906512884 +836 85 negative_sampler.num_negs_per_pos 43.0 +836 85 training.batch_size 1.0 +836 86 model.embedding_dim 1.0 +836 86 model.scoring_fct_norm 2.0 +836 86 optimizer.lr 0.019241875263938703 +836 86 negative_sampler.num_negs_per_pos 55.0 +836 86 training.batch_size 1.0 +836 87 model.embedding_dim 0.0 +836 87 model.scoring_fct_norm 2.0 +836 87 optimizer.lr 0.01095002480249958 +836 87 negative_sampler.num_negs_per_pos 47.0 +836 87 training.batch_size 0.0 +836 88 model.embedding_dim 2.0 +836 88 model.scoring_fct_norm 1.0 +836 88 optimizer.lr 0.00924057465450801 +836 88 negative_sampler.num_negs_per_pos 0.0 +836 88 training.batch_size 0.0 +836 89 model.embedding_dim 0.0 +836 89 model.scoring_fct_norm 1.0 +836 89 optimizer.lr 0.0023303353133558077 +836 89 negative_sampler.num_negs_per_pos 91.0 +836 89 training.batch_size 1.0 +836 90 model.embedding_dim 1.0 +836 90 model.scoring_fct_norm 1.0 +836 90 optimizer.lr 0.005131590883584475 +836 90 negative_sampler.num_negs_per_pos 59.0 +836 90 training.batch_size 1.0 +836 91 model.embedding_dim 0.0 +836 91 model.scoring_fct_norm 2.0 +836 91 optimizer.lr 0.03977742218888142 +836 91 negative_sampler.num_negs_per_pos 81.0 +836 91 training.batch_size 1.0 +836 92 model.embedding_dim 0.0 +836 92 model.scoring_fct_norm 1.0 +836 92 optimizer.lr 0.009548200086220957 +836 92 negative_sampler.num_negs_per_pos 17.0 +836 92 training.batch_size 1.0 +836 93 model.embedding_dim 1.0 +836 93 model.scoring_fct_norm 1.0 +836 93 optimizer.lr 0.028084003039707334 +836 93 negative_sampler.num_negs_per_pos 74.0 +836 93 training.batch_size 1.0 +836 94 model.embedding_dim 0.0 +836 94 model.scoring_fct_norm 1.0 +836 94 optimizer.lr 0.0016386704661780476 +836 94 negative_sampler.num_negs_per_pos 85.0 +836 94 training.batch_size 0.0 +836 95 model.embedding_dim 2.0 +836 95 model.scoring_fct_norm 2.0 +836 95 optimizer.lr 0.002087872599173185 +836 95 negative_sampler.num_negs_per_pos 25.0 +836 95 training.batch_size 2.0 +836 96 model.embedding_dim 1.0 +836 96 model.scoring_fct_norm 2.0 +836 96 optimizer.lr 0.0023704251991579885 +836 96 negative_sampler.num_negs_per_pos 43.0 +836 96 training.batch_size 2.0 +836 97 model.embedding_dim 1.0 +836 97 model.scoring_fct_norm 1.0 +836 97 optimizer.lr 0.025167154349318977 +836 97 negative_sampler.num_negs_per_pos 24.0 +836 97 training.batch_size 0.0 +836 98 model.embedding_dim 1.0 +836 98 model.scoring_fct_norm 2.0 +836 98 optimizer.lr 0.007041113938813328 +836 98 negative_sampler.num_negs_per_pos 76.0 +836 98 training.batch_size 0.0 +836 99 model.embedding_dim 1.0 +836 99 model.scoring_fct_norm 2.0 +836 99 optimizer.lr 0.0011050280920157935 +836 99 negative_sampler.num_negs_per_pos 74.0 +836 99 training.batch_size 0.0 +836 100 model.embedding_dim 0.0 +836 100 model.scoring_fct_norm 2.0 +836 100 optimizer.lr 0.01259642867474371 +836 100 negative_sampler.num_negs_per_pos 28.0 +836 100 training.batch_size 2.0 +836 1 dataset """wn18rr""" +836 1 model """transe""" +836 1 loss """softplus""" +836 1 regularizer """no""" +836 1 optimizer """adam""" +836 1 training_loop """owa""" +836 1 negative_sampler """basic""" +836 1 evaluator """rankbased""" +836 2 dataset """wn18rr""" +836 2 model """transe""" +836 2 loss """softplus""" +836 2 regularizer """no""" +836 2 optimizer """adam""" +836 2 training_loop """owa""" +836 2 negative_sampler """basic""" +836 2 evaluator """rankbased""" +836 3 dataset """wn18rr""" +836 3 model """transe""" +836 3 loss """softplus""" +836 3 regularizer """no""" +836 3 optimizer """adam""" +836 3 training_loop """owa""" +836 3 negative_sampler """basic""" +836 3 evaluator """rankbased""" +836 4 dataset """wn18rr""" +836 4 model """transe""" +836 4 loss """softplus""" +836 4 regularizer """no""" +836 4 optimizer """adam""" +836 4 training_loop """owa""" +836 4 negative_sampler """basic""" +836 4 evaluator """rankbased""" +836 5 dataset """wn18rr""" +836 5 model """transe""" +836 5 loss """softplus""" +836 5 regularizer """no""" +836 5 optimizer """adam""" +836 5 training_loop """owa""" +836 5 negative_sampler """basic""" +836 5 evaluator """rankbased""" +836 6 dataset """wn18rr""" +836 6 model """transe""" +836 6 loss """softplus""" +836 6 regularizer """no""" +836 6 optimizer """adam""" +836 6 training_loop """owa""" +836 6 negative_sampler """basic""" +836 6 evaluator """rankbased""" +836 7 dataset """wn18rr""" +836 7 model """transe""" +836 7 loss """softplus""" +836 7 regularizer """no""" +836 7 optimizer """adam""" +836 7 training_loop """owa""" +836 7 negative_sampler """basic""" +836 7 evaluator """rankbased""" +836 8 dataset """wn18rr""" +836 8 model """transe""" +836 8 loss """softplus""" +836 8 regularizer """no""" +836 8 optimizer """adam""" +836 8 training_loop """owa""" +836 8 negative_sampler """basic""" +836 8 evaluator """rankbased""" +836 9 dataset """wn18rr""" +836 9 model """transe""" +836 9 loss """softplus""" +836 9 regularizer """no""" +836 9 optimizer """adam""" +836 9 training_loop """owa""" +836 9 negative_sampler """basic""" +836 9 evaluator """rankbased""" +836 10 dataset """wn18rr""" +836 10 model """transe""" +836 10 loss """softplus""" +836 10 regularizer """no""" +836 10 optimizer """adam""" +836 10 training_loop """owa""" +836 10 negative_sampler """basic""" +836 10 evaluator """rankbased""" +836 11 dataset """wn18rr""" +836 11 model """transe""" +836 11 loss """softplus""" +836 11 regularizer """no""" +836 11 optimizer """adam""" +836 11 training_loop """owa""" +836 11 negative_sampler """basic""" +836 11 evaluator """rankbased""" +836 12 dataset """wn18rr""" +836 12 model """transe""" +836 12 loss """softplus""" +836 12 regularizer """no""" +836 12 optimizer """adam""" +836 12 training_loop """owa""" +836 12 negative_sampler """basic""" +836 12 evaluator """rankbased""" +836 13 dataset """wn18rr""" +836 13 model """transe""" +836 13 loss """softplus""" +836 13 regularizer """no""" +836 13 optimizer """adam""" +836 13 training_loop """owa""" +836 13 negative_sampler """basic""" +836 13 evaluator """rankbased""" +836 14 dataset """wn18rr""" +836 14 model """transe""" +836 14 loss """softplus""" +836 14 regularizer """no""" +836 14 optimizer """adam""" +836 14 training_loop """owa""" +836 14 negative_sampler """basic""" +836 14 evaluator """rankbased""" +836 15 dataset """wn18rr""" +836 15 model """transe""" +836 15 loss """softplus""" +836 15 regularizer """no""" +836 15 optimizer """adam""" +836 15 training_loop """owa""" +836 15 negative_sampler """basic""" +836 15 evaluator """rankbased""" +836 16 dataset """wn18rr""" +836 16 model """transe""" +836 16 loss """softplus""" +836 16 regularizer """no""" +836 16 optimizer """adam""" +836 16 training_loop """owa""" +836 16 negative_sampler """basic""" +836 16 evaluator """rankbased""" +836 17 dataset """wn18rr""" +836 17 model """transe""" +836 17 loss """softplus""" +836 17 regularizer """no""" +836 17 optimizer """adam""" +836 17 training_loop """owa""" +836 17 negative_sampler """basic""" +836 17 evaluator """rankbased""" +836 18 dataset """wn18rr""" +836 18 model """transe""" +836 18 loss """softplus""" +836 18 regularizer """no""" +836 18 optimizer """adam""" +836 18 training_loop """owa""" +836 18 negative_sampler """basic""" +836 18 evaluator """rankbased""" +836 19 dataset """wn18rr""" +836 19 model """transe""" +836 19 loss """softplus""" +836 19 regularizer """no""" +836 19 optimizer """adam""" +836 19 training_loop """owa""" +836 19 negative_sampler """basic""" +836 19 evaluator """rankbased""" +836 20 dataset """wn18rr""" +836 20 model """transe""" +836 20 loss """softplus""" +836 20 regularizer """no""" +836 20 optimizer """adam""" +836 20 training_loop """owa""" +836 20 negative_sampler """basic""" +836 20 evaluator """rankbased""" +836 21 dataset """wn18rr""" +836 21 model """transe""" +836 21 loss """softplus""" +836 21 regularizer """no""" +836 21 optimizer """adam""" +836 21 training_loop """owa""" +836 21 negative_sampler """basic""" +836 21 evaluator """rankbased""" +836 22 dataset """wn18rr""" +836 22 model """transe""" +836 22 loss """softplus""" +836 22 regularizer """no""" +836 22 optimizer """adam""" +836 22 training_loop """owa""" +836 22 negative_sampler """basic""" +836 22 evaluator """rankbased""" +836 23 dataset """wn18rr""" +836 23 model """transe""" +836 23 loss """softplus""" +836 23 regularizer """no""" +836 23 optimizer """adam""" +836 23 training_loop """owa""" +836 23 negative_sampler """basic""" +836 23 evaluator """rankbased""" +836 24 dataset """wn18rr""" +836 24 model """transe""" +836 24 loss """softplus""" +836 24 regularizer """no""" +836 24 optimizer """adam""" +836 24 training_loop """owa""" +836 24 negative_sampler """basic""" +836 24 evaluator """rankbased""" +836 25 dataset """wn18rr""" +836 25 model """transe""" +836 25 loss """softplus""" +836 25 regularizer """no""" +836 25 optimizer """adam""" +836 25 training_loop """owa""" +836 25 negative_sampler """basic""" +836 25 evaluator """rankbased""" +836 26 dataset """wn18rr""" +836 26 model """transe""" +836 26 loss """softplus""" +836 26 regularizer """no""" +836 26 optimizer """adam""" +836 26 training_loop """owa""" +836 26 negative_sampler """basic""" +836 26 evaluator """rankbased""" +836 27 dataset """wn18rr""" +836 27 model """transe""" +836 27 loss """softplus""" +836 27 regularizer """no""" +836 27 optimizer """adam""" +836 27 training_loop """owa""" +836 27 negative_sampler """basic""" +836 27 evaluator """rankbased""" +836 28 dataset """wn18rr""" +836 28 model """transe""" +836 28 loss """softplus""" +836 28 regularizer """no""" +836 28 optimizer """adam""" +836 28 training_loop """owa""" +836 28 negative_sampler """basic""" +836 28 evaluator """rankbased""" +836 29 dataset """wn18rr""" +836 29 model """transe""" +836 29 loss """softplus""" +836 29 regularizer """no""" +836 29 optimizer """adam""" +836 29 training_loop """owa""" +836 29 negative_sampler """basic""" +836 29 evaluator """rankbased""" +836 30 dataset """wn18rr""" +836 30 model """transe""" +836 30 loss """softplus""" +836 30 regularizer """no""" +836 30 optimizer """adam""" +836 30 training_loop """owa""" +836 30 negative_sampler """basic""" +836 30 evaluator """rankbased""" +836 31 dataset """wn18rr""" +836 31 model """transe""" +836 31 loss """softplus""" +836 31 regularizer """no""" +836 31 optimizer """adam""" +836 31 training_loop """owa""" +836 31 negative_sampler """basic""" +836 31 evaluator """rankbased""" +836 32 dataset """wn18rr""" +836 32 model """transe""" +836 32 loss """softplus""" +836 32 regularizer """no""" +836 32 optimizer """adam""" +836 32 training_loop """owa""" +836 32 negative_sampler """basic""" +836 32 evaluator """rankbased""" +836 33 dataset """wn18rr""" +836 33 model """transe""" +836 33 loss """softplus""" +836 33 regularizer """no""" +836 33 optimizer """adam""" +836 33 training_loop """owa""" +836 33 negative_sampler """basic""" +836 33 evaluator """rankbased""" +836 34 dataset """wn18rr""" +836 34 model """transe""" +836 34 loss """softplus""" +836 34 regularizer """no""" +836 34 optimizer """adam""" +836 34 training_loop """owa""" +836 34 negative_sampler """basic""" +836 34 evaluator """rankbased""" +836 35 dataset """wn18rr""" +836 35 model """transe""" +836 35 loss """softplus""" +836 35 regularizer """no""" +836 35 optimizer """adam""" +836 35 training_loop """owa""" +836 35 negative_sampler """basic""" +836 35 evaluator """rankbased""" +836 36 dataset """wn18rr""" +836 36 model """transe""" +836 36 loss """softplus""" +836 36 regularizer """no""" +836 36 optimizer """adam""" +836 36 training_loop """owa""" +836 36 negative_sampler """basic""" +836 36 evaluator """rankbased""" +836 37 dataset """wn18rr""" +836 37 model """transe""" +836 37 loss """softplus""" +836 37 regularizer """no""" +836 37 optimizer """adam""" +836 37 training_loop """owa""" +836 37 negative_sampler """basic""" +836 37 evaluator """rankbased""" +836 38 dataset """wn18rr""" +836 38 model """transe""" +836 38 loss """softplus""" +836 38 regularizer """no""" +836 38 optimizer """adam""" +836 38 training_loop """owa""" +836 38 negative_sampler """basic""" +836 38 evaluator """rankbased""" +836 39 dataset """wn18rr""" +836 39 model """transe""" +836 39 loss """softplus""" +836 39 regularizer """no""" +836 39 optimizer """adam""" +836 39 training_loop """owa""" +836 39 negative_sampler """basic""" +836 39 evaluator """rankbased""" +836 40 dataset """wn18rr""" +836 40 model """transe""" +836 40 loss """softplus""" +836 40 regularizer """no""" +836 40 optimizer """adam""" +836 40 training_loop """owa""" +836 40 negative_sampler """basic""" +836 40 evaluator """rankbased""" +836 41 dataset """wn18rr""" +836 41 model """transe""" +836 41 loss """softplus""" +836 41 regularizer """no""" +836 41 optimizer """adam""" +836 41 training_loop """owa""" +836 41 negative_sampler """basic""" +836 41 evaluator """rankbased""" +836 42 dataset """wn18rr""" +836 42 model """transe""" +836 42 loss """softplus""" +836 42 regularizer """no""" +836 42 optimizer """adam""" +836 42 training_loop """owa""" +836 42 negative_sampler """basic""" +836 42 evaluator """rankbased""" +836 43 dataset """wn18rr""" +836 43 model """transe""" +836 43 loss """softplus""" +836 43 regularizer """no""" +836 43 optimizer """adam""" +836 43 training_loop """owa""" +836 43 negative_sampler """basic""" +836 43 evaluator """rankbased""" +836 44 dataset """wn18rr""" +836 44 model """transe""" +836 44 loss """softplus""" +836 44 regularizer """no""" +836 44 optimizer """adam""" +836 44 training_loop """owa""" +836 44 negative_sampler """basic""" +836 44 evaluator """rankbased""" +836 45 dataset """wn18rr""" +836 45 model """transe""" +836 45 loss """softplus""" +836 45 regularizer """no""" +836 45 optimizer """adam""" +836 45 training_loop """owa""" +836 45 negative_sampler """basic""" +836 45 evaluator """rankbased""" +836 46 dataset """wn18rr""" +836 46 model """transe""" +836 46 loss """softplus""" +836 46 regularizer """no""" +836 46 optimizer """adam""" +836 46 training_loop """owa""" +836 46 negative_sampler """basic""" +836 46 evaluator """rankbased""" +836 47 dataset """wn18rr""" +836 47 model """transe""" +836 47 loss """softplus""" +836 47 regularizer """no""" +836 47 optimizer """adam""" +836 47 training_loop """owa""" +836 47 negative_sampler """basic""" +836 47 evaluator """rankbased""" +836 48 dataset """wn18rr""" +836 48 model """transe""" +836 48 loss """softplus""" +836 48 regularizer """no""" +836 48 optimizer """adam""" +836 48 training_loop """owa""" +836 48 negative_sampler """basic""" +836 48 evaluator """rankbased""" +836 49 dataset """wn18rr""" +836 49 model """transe""" +836 49 loss """softplus""" +836 49 regularizer """no""" +836 49 optimizer """adam""" +836 49 training_loop """owa""" +836 49 negative_sampler """basic""" +836 49 evaluator """rankbased""" +836 50 dataset """wn18rr""" +836 50 model """transe""" +836 50 loss """softplus""" +836 50 regularizer """no""" +836 50 optimizer """adam""" +836 50 training_loop """owa""" +836 50 negative_sampler """basic""" +836 50 evaluator """rankbased""" +836 51 dataset """wn18rr""" +836 51 model """transe""" +836 51 loss """softplus""" +836 51 regularizer """no""" +836 51 optimizer """adam""" +836 51 training_loop """owa""" +836 51 negative_sampler """basic""" +836 51 evaluator """rankbased""" +836 52 dataset """wn18rr""" +836 52 model """transe""" +836 52 loss """softplus""" +836 52 regularizer """no""" +836 52 optimizer """adam""" +836 52 training_loop """owa""" +836 52 negative_sampler """basic""" +836 52 evaluator """rankbased""" +836 53 dataset """wn18rr""" +836 53 model """transe""" +836 53 loss """softplus""" +836 53 regularizer """no""" +836 53 optimizer """adam""" +836 53 training_loop """owa""" +836 53 negative_sampler """basic""" +836 53 evaluator """rankbased""" +836 54 dataset """wn18rr""" +836 54 model """transe""" +836 54 loss """softplus""" +836 54 regularizer """no""" +836 54 optimizer """adam""" +836 54 training_loop """owa""" +836 54 negative_sampler """basic""" +836 54 evaluator """rankbased""" +836 55 dataset """wn18rr""" +836 55 model """transe""" +836 55 loss """softplus""" +836 55 regularizer """no""" +836 55 optimizer """adam""" +836 55 training_loop """owa""" +836 55 negative_sampler """basic""" +836 55 evaluator """rankbased""" +836 56 dataset """wn18rr""" +836 56 model """transe""" +836 56 loss """softplus""" +836 56 regularizer """no""" +836 56 optimizer """adam""" +836 56 training_loop """owa""" +836 56 negative_sampler """basic""" +836 56 evaluator """rankbased""" +836 57 dataset """wn18rr""" +836 57 model """transe""" +836 57 loss """softplus""" +836 57 regularizer """no""" +836 57 optimizer """adam""" +836 57 training_loop """owa""" +836 57 negative_sampler """basic""" +836 57 evaluator """rankbased""" +836 58 dataset """wn18rr""" +836 58 model """transe""" +836 58 loss """softplus""" +836 58 regularizer """no""" +836 58 optimizer """adam""" +836 58 training_loop """owa""" +836 58 negative_sampler """basic""" +836 58 evaluator """rankbased""" +836 59 dataset """wn18rr""" +836 59 model """transe""" +836 59 loss """softplus""" +836 59 regularizer """no""" +836 59 optimizer """adam""" +836 59 training_loop """owa""" +836 59 negative_sampler """basic""" +836 59 evaluator """rankbased""" +836 60 dataset """wn18rr""" +836 60 model """transe""" +836 60 loss """softplus""" +836 60 regularizer """no""" +836 60 optimizer """adam""" +836 60 training_loop """owa""" +836 60 negative_sampler """basic""" +836 60 evaluator """rankbased""" +836 61 dataset """wn18rr""" +836 61 model """transe""" +836 61 loss """softplus""" +836 61 regularizer """no""" +836 61 optimizer """adam""" +836 61 training_loop """owa""" +836 61 negative_sampler """basic""" +836 61 evaluator """rankbased""" +836 62 dataset """wn18rr""" +836 62 model """transe""" +836 62 loss """softplus""" +836 62 regularizer """no""" +836 62 optimizer """adam""" +836 62 training_loop """owa""" +836 62 negative_sampler """basic""" +836 62 evaluator """rankbased""" +836 63 dataset """wn18rr""" +836 63 model """transe""" +836 63 loss """softplus""" +836 63 regularizer """no""" +836 63 optimizer """adam""" +836 63 training_loop """owa""" +836 63 negative_sampler """basic""" +836 63 evaluator """rankbased""" +836 64 dataset """wn18rr""" +836 64 model """transe""" +836 64 loss """softplus""" +836 64 regularizer """no""" +836 64 optimizer """adam""" +836 64 training_loop """owa""" +836 64 negative_sampler """basic""" +836 64 evaluator """rankbased""" +836 65 dataset """wn18rr""" +836 65 model """transe""" +836 65 loss """softplus""" +836 65 regularizer """no""" +836 65 optimizer """adam""" +836 65 training_loop """owa""" +836 65 negative_sampler """basic""" +836 65 evaluator """rankbased""" +836 66 dataset """wn18rr""" +836 66 model """transe""" +836 66 loss """softplus""" +836 66 regularizer """no""" +836 66 optimizer """adam""" +836 66 training_loop """owa""" +836 66 negative_sampler """basic""" +836 66 evaluator """rankbased""" +836 67 dataset """wn18rr""" +836 67 model """transe""" +836 67 loss """softplus""" +836 67 regularizer """no""" +836 67 optimizer """adam""" +836 67 training_loop """owa""" +836 67 negative_sampler """basic""" +836 67 evaluator """rankbased""" +836 68 dataset """wn18rr""" +836 68 model """transe""" +836 68 loss """softplus""" +836 68 regularizer """no""" +836 68 optimizer """adam""" +836 68 training_loop """owa""" +836 68 negative_sampler """basic""" +836 68 evaluator """rankbased""" +836 69 dataset """wn18rr""" +836 69 model """transe""" +836 69 loss """softplus""" +836 69 regularizer """no""" +836 69 optimizer """adam""" +836 69 training_loop """owa""" +836 69 negative_sampler """basic""" +836 69 evaluator """rankbased""" +836 70 dataset """wn18rr""" +836 70 model """transe""" +836 70 loss """softplus""" +836 70 regularizer """no""" +836 70 optimizer """adam""" +836 70 training_loop """owa""" +836 70 negative_sampler """basic""" +836 70 evaluator """rankbased""" +836 71 dataset """wn18rr""" +836 71 model """transe""" +836 71 loss """softplus""" +836 71 regularizer """no""" +836 71 optimizer """adam""" +836 71 training_loop """owa""" +836 71 negative_sampler """basic""" +836 71 evaluator """rankbased""" +836 72 dataset """wn18rr""" +836 72 model """transe""" +836 72 loss """softplus""" +836 72 regularizer """no""" +836 72 optimizer """adam""" +836 72 training_loop """owa""" +836 72 negative_sampler """basic""" +836 72 evaluator """rankbased""" +836 73 dataset """wn18rr""" +836 73 model """transe""" +836 73 loss """softplus""" +836 73 regularizer """no""" +836 73 optimizer """adam""" +836 73 training_loop """owa""" +836 73 negative_sampler """basic""" +836 73 evaluator """rankbased""" +836 74 dataset """wn18rr""" +836 74 model """transe""" +836 74 loss """softplus""" +836 74 regularizer """no""" +836 74 optimizer """adam""" +836 74 training_loop """owa""" +836 74 negative_sampler """basic""" +836 74 evaluator """rankbased""" +836 75 dataset """wn18rr""" +836 75 model """transe""" +836 75 loss """softplus""" +836 75 regularizer """no""" +836 75 optimizer """adam""" +836 75 training_loop """owa""" +836 75 negative_sampler """basic""" +836 75 evaluator """rankbased""" +836 76 dataset """wn18rr""" +836 76 model """transe""" +836 76 loss """softplus""" +836 76 regularizer """no""" +836 76 optimizer """adam""" +836 76 training_loop """owa""" +836 76 negative_sampler """basic""" +836 76 evaluator """rankbased""" +836 77 dataset """wn18rr""" +836 77 model """transe""" +836 77 loss """softplus""" +836 77 regularizer """no""" +836 77 optimizer """adam""" +836 77 training_loop """owa""" +836 77 negative_sampler """basic""" +836 77 evaluator """rankbased""" +836 78 dataset """wn18rr""" +836 78 model """transe""" +836 78 loss """softplus""" +836 78 regularizer """no""" +836 78 optimizer """adam""" +836 78 training_loop """owa""" +836 78 negative_sampler """basic""" +836 78 evaluator """rankbased""" +836 79 dataset """wn18rr""" +836 79 model """transe""" +836 79 loss """softplus""" +836 79 regularizer """no""" +836 79 optimizer """adam""" +836 79 training_loop """owa""" +836 79 negative_sampler """basic""" +836 79 evaluator """rankbased""" +836 80 dataset """wn18rr""" +836 80 model """transe""" +836 80 loss """softplus""" +836 80 regularizer """no""" +836 80 optimizer """adam""" +836 80 training_loop """owa""" +836 80 negative_sampler """basic""" +836 80 evaluator """rankbased""" +836 81 dataset """wn18rr""" +836 81 model """transe""" +836 81 loss """softplus""" +836 81 regularizer """no""" +836 81 optimizer """adam""" +836 81 training_loop """owa""" +836 81 negative_sampler """basic""" +836 81 evaluator """rankbased""" +836 82 dataset """wn18rr""" +836 82 model """transe""" +836 82 loss """softplus""" +836 82 regularizer """no""" +836 82 optimizer """adam""" +836 82 training_loop """owa""" +836 82 negative_sampler """basic""" +836 82 evaluator """rankbased""" +836 83 dataset """wn18rr""" +836 83 model """transe""" +836 83 loss """softplus""" +836 83 regularizer """no""" +836 83 optimizer """adam""" +836 83 training_loop """owa""" +836 83 negative_sampler """basic""" +836 83 evaluator """rankbased""" +836 84 dataset """wn18rr""" +836 84 model """transe""" +836 84 loss """softplus""" +836 84 regularizer """no""" +836 84 optimizer """adam""" +836 84 training_loop """owa""" +836 84 negative_sampler """basic""" +836 84 evaluator """rankbased""" +836 85 dataset """wn18rr""" +836 85 model """transe""" +836 85 loss """softplus""" +836 85 regularizer """no""" +836 85 optimizer """adam""" +836 85 training_loop """owa""" +836 85 negative_sampler """basic""" +836 85 evaluator """rankbased""" +836 86 dataset """wn18rr""" +836 86 model """transe""" +836 86 loss """softplus""" +836 86 regularizer """no""" +836 86 optimizer """adam""" +836 86 training_loop """owa""" +836 86 negative_sampler """basic""" +836 86 evaluator """rankbased""" +836 87 dataset """wn18rr""" +836 87 model """transe""" +836 87 loss """softplus""" +836 87 regularizer """no""" +836 87 optimizer """adam""" +836 87 training_loop """owa""" +836 87 negative_sampler """basic""" +836 87 evaluator """rankbased""" +836 88 dataset """wn18rr""" +836 88 model """transe""" +836 88 loss """softplus""" +836 88 regularizer """no""" +836 88 optimizer """adam""" +836 88 training_loop """owa""" +836 88 negative_sampler """basic""" +836 88 evaluator """rankbased""" +836 89 dataset """wn18rr""" +836 89 model """transe""" +836 89 loss """softplus""" +836 89 regularizer """no""" +836 89 optimizer """adam""" +836 89 training_loop """owa""" +836 89 negative_sampler """basic""" +836 89 evaluator """rankbased""" +836 90 dataset """wn18rr""" +836 90 model """transe""" +836 90 loss """softplus""" +836 90 regularizer """no""" +836 90 optimizer """adam""" +836 90 training_loop """owa""" +836 90 negative_sampler """basic""" +836 90 evaluator """rankbased""" +836 91 dataset """wn18rr""" +836 91 model """transe""" +836 91 loss """softplus""" +836 91 regularizer """no""" +836 91 optimizer """adam""" +836 91 training_loop """owa""" +836 91 negative_sampler """basic""" +836 91 evaluator """rankbased""" +836 92 dataset """wn18rr""" +836 92 model """transe""" +836 92 loss """softplus""" +836 92 regularizer """no""" +836 92 optimizer """adam""" +836 92 training_loop """owa""" +836 92 negative_sampler """basic""" +836 92 evaluator """rankbased""" +836 93 dataset """wn18rr""" +836 93 model """transe""" +836 93 loss """softplus""" +836 93 regularizer """no""" +836 93 optimizer """adam""" +836 93 training_loop """owa""" +836 93 negative_sampler """basic""" +836 93 evaluator """rankbased""" +836 94 dataset """wn18rr""" +836 94 model """transe""" +836 94 loss """softplus""" +836 94 regularizer """no""" +836 94 optimizer """adam""" +836 94 training_loop """owa""" +836 94 negative_sampler """basic""" +836 94 evaluator """rankbased""" +836 95 dataset """wn18rr""" +836 95 model """transe""" +836 95 loss """softplus""" +836 95 regularizer """no""" +836 95 optimizer """adam""" +836 95 training_loop """owa""" +836 95 negative_sampler """basic""" +836 95 evaluator """rankbased""" +836 96 dataset """wn18rr""" +836 96 model """transe""" +836 96 loss """softplus""" +836 96 regularizer """no""" +836 96 optimizer """adam""" +836 96 training_loop """owa""" +836 96 negative_sampler """basic""" +836 96 evaluator """rankbased""" +836 97 dataset """wn18rr""" +836 97 model """transe""" +836 97 loss """softplus""" +836 97 regularizer """no""" +836 97 optimizer """adam""" +836 97 training_loop """owa""" +836 97 negative_sampler """basic""" +836 97 evaluator """rankbased""" +836 98 dataset """wn18rr""" +836 98 model """transe""" +836 98 loss """softplus""" +836 98 regularizer """no""" +836 98 optimizer """adam""" +836 98 training_loop """owa""" +836 98 negative_sampler """basic""" +836 98 evaluator """rankbased""" +836 99 dataset """wn18rr""" +836 99 model """transe""" +836 99 loss """softplus""" +836 99 regularizer """no""" +836 99 optimizer """adam""" +836 99 training_loop """owa""" +836 99 negative_sampler """basic""" +836 99 evaluator """rankbased""" +836 100 dataset """wn18rr""" +836 100 model """transe""" +836 100 loss """softplus""" +836 100 regularizer """no""" +836 100 optimizer """adam""" +836 100 training_loop """owa""" +836 100 negative_sampler """basic""" +836 100 evaluator """rankbased""" +837 1 model.embedding_dim 1.0 +837 1 model.scoring_fct_norm 2.0 +837 1 loss.margin 3.5274266568647854 +837 1 optimizer.lr 0.0017712383445401788 +837 1 negative_sampler.num_negs_per_pos 5.0 +837 1 training.batch_size 2.0 +837 2 model.embedding_dim 2.0 +837 2 model.scoring_fct_norm 2.0 +837 2 loss.margin 9.441352877640641 +837 2 optimizer.lr 0.02443793918828204 +837 2 negative_sampler.num_negs_per_pos 2.0 +837 2 training.batch_size 2.0 +837 3 model.embedding_dim 2.0 +837 3 model.scoring_fct_norm 2.0 +837 3 loss.margin 6.071301863333844 +837 3 optimizer.lr 0.0820779799622402 +837 3 negative_sampler.num_negs_per_pos 88.0 +837 3 training.batch_size 0.0 +837 4 model.embedding_dim 0.0 +837 4 model.scoring_fct_norm 2.0 +837 4 loss.margin 9.314151305745812 +837 4 optimizer.lr 0.05945724170559306 +837 4 negative_sampler.num_negs_per_pos 64.0 +837 4 training.batch_size 0.0 +837 5 model.embedding_dim 2.0 +837 5 model.scoring_fct_norm 2.0 +837 5 loss.margin 3.905452728815993 +837 5 optimizer.lr 0.022941145699217926 +837 5 negative_sampler.num_negs_per_pos 32.0 +837 5 training.batch_size 2.0 +837 6 model.embedding_dim 2.0 +837 6 model.scoring_fct_norm 2.0 +837 6 loss.margin 1.8773274957931958 +837 6 optimizer.lr 0.013532915511440065 +837 6 negative_sampler.num_negs_per_pos 58.0 +837 6 training.batch_size 2.0 +837 7 model.embedding_dim 1.0 +837 7 model.scoring_fct_norm 1.0 +837 7 loss.margin 8.30425642330013 +837 7 optimizer.lr 0.006253945811729595 +837 7 negative_sampler.num_negs_per_pos 86.0 +837 7 training.batch_size 1.0 +837 8 model.embedding_dim 0.0 +837 8 model.scoring_fct_norm 1.0 +837 8 loss.margin 4.861293142350702 +837 8 optimizer.lr 0.01359989462250817 +837 8 negative_sampler.num_negs_per_pos 2.0 +837 8 training.batch_size 1.0 +837 9 model.embedding_dim 1.0 +837 9 model.scoring_fct_norm 2.0 +837 9 loss.margin 4.422282478509686 +837 9 optimizer.lr 0.00988967538282225 +837 9 negative_sampler.num_negs_per_pos 25.0 +837 9 training.batch_size 1.0 +837 10 model.embedding_dim 2.0 +837 10 model.scoring_fct_norm 2.0 +837 10 loss.margin 2.426244834826671 +837 10 optimizer.lr 0.009198852332852892 +837 10 negative_sampler.num_negs_per_pos 95.0 +837 10 training.batch_size 0.0 +837 11 model.embedding_dim 2.0 +837 11 model.scoring_fct_norm 2.0 +837 11 loss.margin 4.143028650824767 +837 11 optimizer.lr 0.01126206129006782 +837 11 negative_sampler.num_negs_per_pos 37.0 +837 11 training.batch_size 1.0 +837 12 model.embedding_dim 2.0 +837 12 model.scoring_fct_norm 1.0 +837 12 loss.margin 9.222971568195591 +837 12 optimizer.lr 0.0024202429567005963 +837 12 negative_sampler.num_negs_per_pos 79.0 +837 12 training.batch_size 0.0 +837 13 model.embedding_dim 1.0 +837 13 model.scoring_fct_norm 1.0 +837 13 loss.margin 4.955942277200082 +837 13 optimizer.lr 0.001738989532658534 +837 13 negative_sampler.num_negs_per_pos 75.0 +837 13 training.batch_size 2.0 +837 14 model.embedding_dim 1.0 +837 14 model.scoring_fct_norm 2.0 +837 14 loss.margin 4.105716448134512 +837 14 optimizer.lr 0.016470451872624723 +837 14 negative_sampler.num_negs_per_pos 37.0 +837 14 training.batch_size 0.0 +837 15 model.embedding_dim 1.0 +837 15 model.scoring_fct_norm 1.0 +837 15 loss.margin 7.546504206669793 +837 15 optimizer.lr 0.0012810720614620833 +837 15 negative_sampler.num_negs_per_pos 91.0 +837 15 training.batch_size 0.0 +837 16 model.embedding_dim 2.0 +837 16 model.scoring_fct_norm 2.0 +837 16 loss.margin 1.4619604577193506 +837 16 optimizer.lr 0.0010164807602446562 +837 16 negative_sampler.num_negs_per_pos 69.0 +837 16 training.batch_size 0.0 +837 17 model.embedding_dim 2.0 +837 17 model.scoring_fct_norm 1.0 +837 17 loss.margin 1.3715016537995552 +837 17 optimizer.lr 0.008328736052248438 +837 17 negative_sampler.num_negs_per_pos 18.0 +837 17 training.batch_size 2.0 +837 18 model.embedding_dim 2.0 +837 18 model.scoring_fct_norm 2.0 +837 18 loss.margin 8.971218263636027 +837 18 optimizer.lr 0.03994474858972353 +837 18 negative_sampler.num_negs_per_pos 36.0 +837 18 training.batch_size 1.0 +837 19 model.embedding_dim 0.0 +837 19 model.scoring_fct_norm 1.0 +837 19 loss.margin 7.262516303277976 +837 19 optimizer.lr 0.02560926007286083 +837 19 negative_sampler.num_negs_per_pos 84.0 +837 19 training.batch_size 1.0 +837 20 model.embedding_dim 2.0 +837 20 model.scoring_fct_norm 1.0 +837 20 loss.margin 4.540732366896505 +837 20 optimizer.lr 0.0036830472691540554 +837 20 negative_sampler.num_negs_per_pos 83.0 +837 20 training.batch_size 1.0 +837 21 model.embedding_dim 2.0 +837 21 model.scoring_fct_norm 2.0 +837 21 loss.margin 9.450971543514019 +837 21 optimizer.lr 0.06994017342488014 +837 21 negative_sampler.num_negs_per_pos 36.0 +837 21 training.batch_size 2.0 +837 22 model.embedding_dim 2.0 +837 22 model.scoring_fct_norm 1.0 +837 22 loss.margin 3.6017811180550394 +837 22 optimizer.lr 0.0011304296298630552 +837 22 negative_sampler.num_negs_per_pos 61.0 +837 22 training.batch_size 2.0 +837 23 model.embedding_dim 0.0 +837 23 model.scoring_fct_norm 2.0 +837 23 loss.margin 5.4796430193803145 +837 23 optimizer.lr 0.002753364657597133 +837 23 negative_sampler.num_negs_per_pos 70.0 +837 23 training.batch_size 0.0 +837 24 model.embedding_dim 1.0 +837 24 model.scoring_fct_norm 2.0 +837 24 loss.margin 7.398885331426775 +837 24 optimizer.lr 0.04911961955458914 +837 24 negative_sampler.num_negs_per_pos 23.0 +837 24 training.batch_size 0.0 +837 25 model.embedding_dim 0.0 +837 25 model.scoring_fct_norm 1.0 +837 25 loss.margin 6.748494248521857 +837 25 optimizer.lr 0.012799027440040434 +837 25 negative_sampler.num_negs_per_pos 84.0 +837 25 training.batch_size 1.0 +837 26 model.embedding_dim 1.0 +837 26 model.scoring_fct_norm 1.0 +837 26 loss.margin 6.6094637716832905 +837 26 optimizer.lr 0.017878891737841518 +837 26 negative_sampler.num_negs_per_pos 35.0 +837 26 training.batch_size 1.0 +837 27 model.embedding_dim 2.0 +837 27 model.scoring_fct_norm 1.0 +837 27 loss.margin 0.8880202120485936 +837 27 optimizer.lr 0.001580341899122223 +837 27 negative_sampler.num_negs_per_pos 80.0 +837 27 training.batch_size 1.0 +837 28 model.embedding_dim 1.0 +837 28 model.scoring_fct_norm 1.0 +837 28 loss.margin 0.9925819489120511 +837 28 optimizer.lr 0.02304751446477333 +837 28 negative_sampler.num_negs_per_pos 51.0 +837 28 training.batch_size 0.0 +837 29 model.embedding_dim 2.0 +837 29 model.scoring_fct_norm 1.0 +837 29 loss.margin 4.296967817449794 +837 29 optimizer.lr 0.008158032395278335 +837 29 negative_sampler.num_negs_per_pos 68.0 +837 29 training.batch_size 1.0 +837 30 model.embedding_dim 2.0 +837 30 model.scoring_fct_norm 1.0 +837 30 loss.margin 3.9509926018107056 +837 30 optimizer.lr 0.033003719203478304 +837 30 negative_sampler.num_negs_per_pos 8.0 +837 30 training.batch_size 2.0 +837 31 model.embedding_dim 2.0 +837 31 model.scoring_fct_norm 2.0 +837 31 loss.margin 0.8221619566017422 +837 31 optimizer.lr 0.0013858241922571559 +837 31 negative_sampler.num_negs_per_pos 20.0 +837 31 training.batch_size 1.0 +837 32 model.embedding_dim 0.0 +837 32 model.scoring_fct_norm 2.0 +837 32 loss.margin 9.564808001154079 +837 32 optimizer.lr 0.002620490899040497 +837 32 negative_sampler.num_negs_per_pos 59.0 +837 32 training.batch_size 2.0 +837 33 model.embedding_dim 2.0 +837 33 model.scoring_fct_norm 1.0 +837 33 loss.margin 9.880056376482363 +837 33 optimizer.lr 0.013042195155688257 +837 33 negative_sampler.num_negs_per_pos 45.0 +837 33 training.batch_size 0.0 +837 34 model.embedding_dim 1.0 +837 34 model.scoring_fct_norm 2.0 +837 34 loss.margin 1.3983279661715522 +837 34 optimizer.lr 0.01908058590067817 +837 34 negative_sampler.num_negs_per_pos 67.0 +837 34 training.batch_size 0.0 +837 35 model.embedding_dim 2.0 +837 35 model.scoring_fct_norm 1.0 +837 35 loss.margin 7.745904147670799 +837 35 optimizer.lr 0.001540905094320666 +837 35 negative_sampler.num_negs_per_pos 82.0 +837 35 training.batch_size 2.0 +837 36 model.embedding_dim 1.0 +837 36 model.scoring_fct_norm 2.0 +837 36 loss.margin 7.410672614381673 +837 36 optimizer.lr 0.008019395741077393 +837 36 negative_sampler.num_negs_per_pos 45.0 +837 36 training.batch_size 1.0 +837 37 model.embedding_dim 2.0 +837 37 model.scoring_fct_norm 2.0 +837 37 loss.margin 8.682682107785686 +837 37 optimizer.lr 0.00911628921598707 +837 37 negative_sampler.num_negs_per_pos 35.0 +837 37 training.batch_size 0.0 +837 38 model.embedding_dim 0.0 +837 38 model.scoring_fct_norm 1.0 +837 38 loss.margin 3.3999429793587126 +837 38 optimizer.lr 0.039560140267609345 +837 38 negative_sampler.num_negs_per_pos 56.0 +837 38 training.batch_size 1.0 +837 39 model.embedding_dim 1.0 +837 39 model.scoring_fct_norm 2.0 +837 39 loss.margin 8.911123185690043 +837 39 optimizer.lr 0.057985360371951324 +837 39 negative_sampler.num_negs_per_pos 33.0 +837 39 training.batch_size 0.0 +837 40 model.embedding_dim 1.0 +837 40 model.scoring_fct_norm 1.0 +837 40 loss.margin 2.2689422748034542 +837 40 optimizer.lr 0.0015940402730827217 +837 40 negative_sampler.num_negs_per_pos 77.0 +837 40 training.batch_size 0.0 +837 41 model.embedding_dim 0.0 +837 41 model.scoring_fct_norm 2.0 +837 41 loss.margin 0.952516781497022 +837 41 optimizer.lr 0.003657753418572859 +837 41 negative_sampler.num_negs_per_pos 13.0 +837 41 training.batch_size 0.0 +837 42 model.embedding_dim 2.0 +837 42 model.scoring_fct_norm 1.0 +837 42 loss.margin 4.748589161103947 +837 42 optimizer.lr 0.0011024190581058822 +837 42 negative_sampler.num_negs_per_pos 23.0 +837 42 training.batch_size 1.0 +837 43 model.embedding_dim 0.0 +837 43 model.scoring_fct_norm 2.0 +837 43 loss.margin 9.862929237953004 +837 43 optimizer.lr 0.018292699765141442 +837 43 negative_sampler.num_negs_per_pos 17.0 +837 43 training.batch_size 2.0 +837 44 model.embedding_dim 0.0 +837 44 model.scoring_fct_norm 1.0 +837 44 loss.margin 9.104018750081698 +837 44 optimizer.lr 0.009341380680509999 +837 44 negative_sampler.num_negs_per_pos 20.0 +837 44 training.batch_size 2.0 +837 45 model.embedding_dim 0.0 +837 45 model.scoring_fct_norm 1.0 +837 45 loss.margin 7.808936001968477 +837 45 optimizer.lr 0.003210221482411599 +837 45 negative_sampler.num_negs_per_pos 64.0 +837 45 training.batch_size 1.0 +837 46 model.embedding_dim 2.0 +837 46 model.scoring_fct_norm 1.0 +837 46 loss.margin 5.440628473976114 +837 46 optimizer.lr 0.032406809301026455 +837 46 negative_sampler.num_negs_per_pos 95.0 +837 46 training.batch_size 2.0 +837 47 model.embedding_dim 0.0 +837 47 model.scoring_fct_norm 1.0 +837 47 loss.margin 7.502857221647002 +837 47 optimizer.lr 0.06616253540904572 +837 47 negative_sampler.num_negs_per_pos 55.0 +837 47 training.batch_size 2.0 +837 48 model.embedding_dim 0.0 +837 48 model.scoring_fct_norm 2.0 +837 48 loss.margin 1.9924210944373761 +837 48 optimizer.lr 0.006480645253838281 +837 48 negative_sampler.num_negs_per_pos 38.0 +837 48 training.batch_size 0.0 +837 49 model.embedding_dim 2.0 +837 49 model.scoring_fct_norm 2.0 +837 49 loss.margin 1.6535051264085603 +837 49 optimizer.lr 0.034639673248329624 +837 49 negative_sampler.num_negs_per_pos 79.0 +837 49 training.batch_size 2.0 +837 50 model.embedding_dim 0.0 +837 50 model.scoring_fct_norm 1.0 +837 50 loss.margin 3.3922894972911357 +837 50 optimizer.lr 0.0015284305912132512 +837 50 negative_sampler.num_negs_per_pos 75.0 +837 50 training.batch_size 0.0 +837 51 model.embedding_dim 0.0 +837 51 model.scoring_fct_norm 2.0 +837 51 loss.margin 3.0780402976341117 +837 51 optimizer.lr 0.001314957197480183 +837 51 negative_sampler.num_negs_per_pos 62.0 +837 51 training.batch_size 0.0 +837 52 model.embedding_dim 1.0 +837 52 model.scoring_fct_norm 2.0 +837 52 loss.margin 7.149383918508667 +837 52 optimizer.lr 0.026798600056078467 +837 52 negative_sampler.num_negs_per_pos 1.0 +837 52 training.batch_size 0.0 +837 53 model.embedding_dim 0.0 +837 53 model.scoring_fct_norm 1.0 +837 53 loss.margin 7.9575438742678966 +837 53 optimizer.lr 0.0014638641388748153 +837 53 negative_sampler.num_negs_per_pos 65.0 +837 53 training.batch_size 0.0 +837 54 model.embedding_dim 0.0 +837 54 model.scoring_fct_norm 2.0 +837 54 loss.margin 5.225594636783351 +837 54 optimizer.lr 0.004861041481845595 +837 54 negative_sampler.num_negs_per_pos 50.0 +837 54 training.batch_size 2.0 +837 55 model.embedding_dim 0.0 +837 55 model.scoring_fct_norm 1.0 +837 55 loss.margin 5.422212637776955 +837 55 optimizer.lr 0.03074835554786664 +837 55 negative_sampler.num_negs_per_pos 78.0 +837 55 training.batch_size 1.0 +837 56 model.embedding_dim 0.0 +837 56 model.scoring_fct_norm 1.0 +837 56 loss.margin 5.22550777469593 +837 56 optimizer.lr 0.0018195017273182471 +837 56 negative_sampler.num_negs_per_pos 14.0 +837 56 training.batch_size 0.0 +837 57 model.embedding_dim 0.0 +837 57 model.scoring_fct_norm 1.0 +837 57 loss.margin 8.73163375232328 +837 57 optimizer.lr 0.07131072965872637 +837 57 negative_sampler.num_negs_per_pos 42.0 +837 57 training.batch_size 1.0 +837 58 model.embedding_dim 0.0 +837 58 model.scoring_fct_norm 2.0 +837 58 loss.margin 9.081810994404046 +837 58 optimizer.lr 0.0022948185083767063 +837 58 negative_sampler.num_negs_per_pos 2.0 +837 58 training.batch_size 2.0 +837 59 model.embedding_dim 1.0 +837 59 model.scoring_fct_norm 1.0 +837 59 loss.margin 7.926462979504997 +837 59 optimizer.lr 0.004524650280264719 +837 59 negative_sampler.num_negs_per_pos 84.0 +837 59 training.batch_size 0.0 +837 60 model.embedding_dim 1.0 +837 60 model.scoring_fct_norm 2.0 +837 60 loss.margin 4.624244004094508 +837 60 optimizer.lr 0.005336012246788266 +837 60 negative_sampler.num_negs_per_pos 26.0 +837 60 training.batch_size 2.0 +837 61 model.embedding_dim 1.0 +837 61 model.scoring_fct_norm 1.0 +837 61 loss.margin 5.303102440503464 +837 61 optimizer.lr 0.012409300852328648 +837 61 negative_sampler.num_negs_per_pos 76.0 +837 61 training.batch_size 2.0 +837 62 model.embedding_dim 1.0 +837 62 model.scoring_fct_norm 1.0 +837 62 loss.margin 9.414390203471624 +837 62 optimizer.lr 0.02982941487622054 +837 62 negative_sampler.num_negs_per_pos 27.0 +837 62 training.batch_size 0.0 +837 63 model.embedding_dim 1.0 +837 63 model.scoring_fct_norm 1.0 +837 63 loss.margin 7.334874347607216 +837 63 optimizer.lr 0.0010393236939507916 +837 63 negative_sampler.num_negs_per_pos 62.0 +837 63 training.batch_size 2.0 +837 64 model.embedding_dim 0.0 +837 64 model.scoring_fct_norm 2.0 +837 64 loss.margin 6.744387834561586 +837 64 optimizer.lr 0.0012238795689058066 +837 64 negative_sampler.num_negs_per_pos 88.0 +837 64 training.batch_size 0.0 +837 65 model.embedding_dim 0.0 +837 65 model.scoring_fct_norm 2.0 +837 65 loss.margin 2.4428729385839896 +837 65 optimizer.lr 0.003941270859847199 +837 65 negative_sampler.num_negs_per_pos 93.0 +837 65 training.batch_size 1.0 +837 66 model.embedding_dim 1.0 +837 66 model.scoring_fct_norm 1.0 +837 66 loss.margin 3.788005261760035 +837 66 optimizer.lr 0.003151309552489092 +837 66 negative_sampler.num_negs_per_pos 82.0 +837 66 training.batch_size 0.0 +837 67 model.embedding_dim 1.0 +837 67 model.scoring_fct_norm 2.0 +837 67 loss.margin 0.714764826675917 +837 67 optimizer.lr 0.07832910447055778 +837 67 negative_sampler.num_negs_per_pos 68.0 +837 67 training.batch_size 2.0 +837 68 model.embedding_dim 2.0 +837 68 model.scoring_fct_norm 1.0 +837 68 loss.margin 6.533173518771353 +837 68 optimizer.lr 0.03587282766730837 +837 68 negative_sampler.num_negs_per_pos 36.0 +837 68 training.batch_size 1.0 +837 69 model.embedding_dim 2.0 +837 69 model.scoring_fct_norm 2.0 +837 69 loss.margin 4.5529718522620914 +837 69 optimizer.lr 0.085083857948375 +837 69 negative_sampler.num_negs_per_pos 20.0 +837 69 training.batch_size 2.0 +837 70 model.embedding_dim 1.0 +837 70 model.scoring_fct_norm 2.0 +837 70 loss.margin 4.51231945070049 +837 70 optimizer.lr 0.0017266023479424956 +837 70 negative_sampler.num_negs_per_pos 42.0 +837 70 training.batch_size 2.0 +837 71 model.embedding_dim 0.0 +837 71 model.scoring_fct_norm 1.0 +837 71 loss.margin 8.528755499099441 +837 71 optimizer.lr 0.08702835108535954 +837 71 negative_sampler.num_negs_per_pos 94.0 +837 71 training.batch_size 0.0 +837 72 model.embedding_dim 0.0 +837 72 model.scoring_fct_norm 1.0 +837 72 loss.margin 9.888215753316253 +837 72 optimizer.lr 0.025396452179427587 +837 72 negative_sampler.num_negs_per_pos 36.0 +837 72 training.batch_size 1.0 +837 73 model.embedding_dim 1.0 +837 73 model.scoring_fct_norm 2.0 +837 73 loss.margin 1.8952519215480237 +837 73 optimizer.lr 0.002890893388941221 +837 73 negative_sampler.num_negs_per_pos 66.0 +837 73 training.batch_size 0.0 +837 74 model.embedding_dim 0.0 +837 74 model.scoring_fct_norm 1.0 +837 74 loss.margin 4.177020633337573 +837 74 optimizer.lr 0.0036122511295247676 +837 74 negative_sampler.num_negs_per_pos 64.0 +837 74 training.batch_size 1.0 +837 75 model.embedding_dim 2.0 +837 75 model.scoring_fct_norm 1.0 +837 75 loss.margin 6.894694281324742 +837 75 optimizer.lr 0.004222532285237787 +837 75 negative_sampler.num_negs_per_pos 27.0 +837 75 training.batch_size 0.0 +837 76 model.embedding_dim 1.0 +837 76 model.scoring_fct_norm 1.0 +837 76 loss.margin 1.5398702163325653 +837 76 optimizer.lr 0.0018493208183572064 +837 76 negative_sampler.num_negs_per_pos 28.0 +837 76 training.batch_size 0.0 +837 77 model.embedding_dim 1.0 +837 77 model.scoring_fct_norm 1.0 +837 77 loss.margin 6.557778781278281 +837 77 optimizer.lr 0.0027317532122047667 +837 77 negative_sampler.num_negs_per_pos 12.0 +837 77 training.batch_size 0.0 +837 78 model.embedding_dim 1.0 +837 78 model.scoring_fct_norm 2.0 +837 78 loss.margin 2.6116938294732455 +837 78 optimizer.lr 0.006408464430877335 +837 78 negative_sampler.num_negs_per_pos 43.0 +837 78 training.batch_size 2.0 +837 79 model.embedding_dim 1.0 +837 79 model.scoring_fct_norm 1.0 +837 79 loss.margin 3.2858348529472807 +837 79 optimizer.lr 0.023120705088427583 +837 79 negative_sampler.num_negs_per_pos 52.0 +837 79 training.batch_size 2.0 +837 80 model.embedding_dim 2.0 +837 80 model.scoring_fct_norm 2.0 +837 80 loss.margin 6.122962312021192 +837 80 optimizer.lr 0.002507356214728765 +837 80 negative_sampler.num_negs_per_pos 81.0 +837 80 training.batch_size 1.0 +837 81 model.embedding_dim 0.0 +837 81 model.scoring_fct_norm 1.0 +837 81 loss.margin 6.913900780408354 +837 81 optimizer.lr 0.0018136562655496275 +837 81 negative_sampler.num_negs_per_pos 9.0 +837 81 training.batch_size 2.0 +837 82 model.embedding_dim 0.0 +837 82 model.scoring_fct_norm 2.0 +837 82 loss.margin 9.735351141815062 +837 82 optimizer.lr 0.0019297320373081312 +837 82 negative_sampler.num_negs_per_pos 30.0 +837 82 training.batch_size 2.0 +837 83 model.embedding_dim 1.0 +837 83 model.scoring_fct_norm 1.0 +837 83 loss.margin 8.842321855420073 +837 83 optimizer.lr 0.00451308540036601 +837 83 negative_sampler.num_negs_per_pos 63.0 +837 83 training.batch_size 0.0 +837 84 model.embedding_dim 2.0 +837 84 model.scoring_fct_norm 1.0 +837 84 loss.margin 4.027767026390003 +837 84 optimizer.lr 0.0015704568530285335 +837 84 negative_sampler.num_negs_per_pos 3.0 +837 84 training.batch_size 0.0 +837 85 model.embedding_dim 2.0 +837 85 model.scoring_fct_norm 2.0 +837 85 loss.margin 3.8485570628273997 +837 85 optimizer.lr 0.0016370770693218664 +837 85 negative_sampler.num_negs_per_pos 36.0 +837 85 training.batch_size 2.0 +837 86 model.embedding_dim 2.0 +837 86 model.scoring_fct_norm 2.0 +837 86 loss.margin 4.561449535272989 +837 86 optimizer.lr 0.026141903663917193 +837 86 negative_sampler.num_negs_per_pos 84.0 +837 86 training.batch_size 1.0 +837 87 model.embedding_dim 1.0 +837 87 model.scoring_fct_norm 1.0 +837 87 loss.margin 7.190848830917842 +837 87 optimizer.lr 0.007916690635554204 +837 87 negative_sampler.num_negs_per_pos 20.0 +837 87 training.batch_size 0.0 +837 88 model.embedding_dim 1.0 +837 88 model.scoring_fct_norm 2.0 +837 88 loss.margin 5.9380038138214015 +837 88 optimizer.lr 0.014714165330376306 +837 88 negative_sampler.num_negs_per_pos 16.0 +837 88 training.batch_size 0.0 +837 89 model.embedding_dim 1.0 +837 89 model.scoring_fct_norm 1.0 +837 89 loss.margin 8.312379560794133 +837 89 optimizer.lr 0.009326410033606077 +837 89 negative_sampler.num_negs_per_pos 86.0 +837 89 training.batch_size 1.0 +837 90 model.embedding_dim 2.0 +837 90 model.scoring_fct_norm 1.0 +837 90 loss.margin 0.6391413119066907 +837 90 optimizer.lr 0.04504099074997804 +837 90 negative_sampler.num_negs_per_pos 79.0 +837 90 training.batch_size 0.0 +837 91 model.embedding_dim 1.0 +837 91 model.scoring_fct_norm 1.0 +837 91 loss.margin 1.2354627351941603 +837 91 optimizer.lr 0.0041833796524516536 +837 91 negative_sampler.num_negs_per_pos 40.0 +837 91 training.batch_size 2.0 +837 92 model.embedding_dim 2.0 +837 92 model.scoring_fct_norm 1.0 +837 92 loss.margin 2.3786006114055023 +837 92 optimizer.lr 0.030584068861322287 +837 92 negative_sampler.num_negs_per_pos 99.0 +837 92 training.batch_size 0.0 +837 1 dataset """wn18rr""" +837 1 model """transe""" +837 1 loss """marginranking""" +837 1 regularizer """no""" +837 1 optimizer """adam""" +837 1 training_loop """owa""" +837 1 negative_sampler """basic""" +837 1 evaluator """rankbased""" +837 2 dataset """wn18rr""" +837 2 model """transe""" +837 2 loss """marginranking""" +837 2 regularizer """no""" +837 2 optimizer """adam""" +837 2 training_loop """owa""" +837 2 negative_sampler """basic""" +837 2 evaluator """rankbased""" +837 3 dataset """wn18rr""" +837 3 model """transe""" +837 3 loss """marginranking""" +837 3 regularizer """no""" +837 3 optimizer """adam""" +837 3 training_loop """owa""" +837 3 negative_sampler """basic""" +837 3 evaluator """rankbased""" +837 4 dataset """wn18rr""" +837 4 model """transe""" +837 4 loss """marginranking""" +837 4 regularizer """no""" +837 4 optimizer """adam""" +837 4 training_loop """owa""" +837 4 negative_sampler """basic""" +837 4 evaluator """rankbased""" +837 5 dataset """wn18rr""" +837 5 model """transe""" +837 5 loss """marginranking""" +837 5 regularizer """no""" +837 5 optimizer """adam""" +837 5 training_loop """owa""" +837 5 negative_sampler """basic""" +837 5 evaluator """rankbased""" +837 6 dataset """wn18rr""" +837 6 model """transe""" +837 6 loss """marginranking""" +837 6 regularizer """no""" +837 6 optimizer """adam""" +837 6 training_loop """owa""" +837 6 negative_sampler """basic""" +837 6 evaluator """rankbased""" +837 7 dataset """wn18rr""" +837 7 model """transe""" +837 7 loss """marginranking""" +837 7 regularizer """no""" +837 7 optimizer """adam""" +837 7 training_loop """owa""" +837 7 negative_sampler """basic""" +837 7 evaluator """rankbased""" +837 8 dataset """wn18rr""" +837 8 model """transe""" +837 8 loss """marginranking""" +837 8 regularizer """no""" +837 8 optimizer """adam""" +837 8 training_loop """owa""" +837 8 negative_sampler """basic""" +837 8 evaluator """rankbased""" +837 9 dataset """wn18rr""" +837 9 model """transe""" +837 9 loss """marginranking""" +837 9 regularizer """no""" +837 9 optimizer """adam""" +837 9 training_loop """owa""" +837 9 negative_sampler """basic""" +837 9 evaluator """rankbased""" +837 10 dataset """wn18rr""" +837 10 model """transe""" +837 10 loss """marginranking""" +837 10 regularizer """no""" +837 10 optimizer """adam""" +837 10 training_loop """owa""" +837 10 negative_sampler """basic""" +837 10 evaluator """rankbased""" +837 11 dataset """wn18rr""" +837 11 model """transe""" +837 11 loss """marginranking""" +837 11 regularizer """no""" +837 11 optimizer """adam""" +837 11 training_loop """owa""" +837 11 negative_sampler """basic""" +837 11 evaluator """rankbased""" +837 12 dataset """wn18rr""" +837 12 model """transe""" +837 12 loss """marginranking""" +837 12 regularizer """no""" +837 12 optimizer """adam""" +837 12 training_loop """owa""" +837 12 negative_sampler """basic""" +837 12 evaluator """rankbased""" +837 13 dataset """wn18rr""" +837 13 model """transe""" +837 13 loss """marginranking""" +837 13 regularizer """no""" +837 13 optimizer """adam""" +837 13 training_loop """owa""" +837 13 negative_sampler """basic""" +837 13 evaluator """rankbased""" +837 14 dataset """wn18rr""" +837 14 model """transe""" +837 14 loss """marginranking""" +837 14 regularizer """no""" +837 14 optimizer """adam""" +837 14 training_loop """owa""" +837 14 negative_sampler """basic""" +837 14 evaluator """rankbased""" +837 15 dataset """wn18rr""" +837 15 model """transe""" +837 15 loss """marginranking""" +837 15 regularizer """no""" +837 15 optimizer """adam""" +837 15 training_loop """owa""" +837 15 negative_sampler """basic""" +837 15 evaluator """rankbased""" +837 16 dataset """wn18rr""" +837 16 model """transe""" +837 16 loss """marginranking""" +837 16 regularizer """no""" +837 16 optimizer """adam""" +837 16 training_loop """owa""" +837 16 negative_sampler """basic""" +837 16 evaluator """rankbased""" +837 17 dataset """wn18rr""" +837 17 model """transe""" +837 17 loss """marginranking""" +837 17 regularizer """no""" +837 17 optimizer """adam""" +837 17 training_loop """owa""" +837 17 negative_sampler """basic""" +837 17 evaluator """rankbased""" +837 18 dataset """wn18rr""" +837 18 model """transe""" +837 18 loss """marginranking""" +837 18 regularizer """no""" +837 18 optimizer """adam""" +837 18 training_loop """owa""" +837 18 negative_sampler """basic""" +837 18 evaluator """rankbased""" +837 19 dataset """wn18rr""" +837 19 model """transe""" +837 19 loss """marginranking""" +837 19 regularizer """no""" +837 19 optimizer """adam""" +837 19 training_loop """owa""" +837 19 negative_sampler """basic""" +837 19 evaluator """rankbased""" +837 20 dataset """wn18rr""" +837 20 model """transe""" +837 20 loss """marginranking""" +837 20 regularizer """no""" +837 20 optimizer """adam""" +837 20 training_loop """owa""" +837 20 negative_sampler """basic""" +837 20 evaluator """rankbased""" +837 21 dataset """wn18rr""" +837 21 model """transe""" +837 21 loss """marginranking""" +837 21 regularizer """no""" +837 21 optimizer """adam""" +837 21 training_loop """owa""" +837 21 negative_sampler """basic""" +837 21 evaluator """rankbased""" +837 22 dataset """wn18rr""" +837 22 model """transe""" +837 22 loss """marginranking""" +837 22 regularizer """no""" +837 22 optimizer """adam""" +837 22 training_loop """owa""" +837 22 negative_sampler """basic""" +837 22 evaluator """rankbased""" +837 23 dataset """wn18rr""" +837 23 model """transe""" +837 23 loss """marginranking""" +837 23 regularizer """no""" +837 23 optimizer """adam""" +837 23 training_loop """owa""" +837 23 negative_sampler """basic""" +837 23 evaluator """rankbased""" +837 24 dataset """wn18rr""" +837 24 model """transe""" +837 24 loss """marginranking""" +837 24 regularizer """no""" +837 24 optimizer """adam""" +837 24 training_loop """owa""" +837 24 negative_sampler """basic""" +837 24 evaluator """rankbased""" +837 25 dataset """wn18rr""" +837 25 model """transe""" +837 25 loss """marginranking""" +837 25 regularizer """no""" +837 25 optimizer """adam""" +837 25 training_loop """owa""" +837 25 negative_sampler """basic""" +837 25 evaluator """rankbased""" +837 26 dataset """wn18rr""" +837 26 model """transe""" +837 26 loss """marginranking""" +837 26 regularizer """no""" +837 26 optimizer """adam""" +837 26 training_loop """owa""" +837 26 negative_sampler """basic""" +837 26 evaluator """rankbased""" +837 27 dataset """wn18rr""" +837 27 model """transe""" +837 27 loss """marginranking""" +837 27 regularizer """no""" +837 27 optimizer """adam""" +837 27 training_loop """owa""" +837 27 negative_sampler """basic""" +837 27 evaluator """rankbased""" +837 28 dataset """wn18rr""" +837 28 model """transe""" +837 28 loss """marginranking""" +837 28 regularizer """no""" +837 28 optimizer """adam""" +837 28 training_loop """owa""" +837 28 negative_sampler """basic""" +837 28 evaluator """rankbased""" +837 29 dataset """wn18rr""" +837 29 model """transe""" +837 29 loss """marginranking""" +837 29 regularizer """no""" +837 29 optimizer """adam""" +837 29 training_loop """owa""" +837 29 negative_sampler """basic""" +837 29 evaluator """rankbased""" +837 30 dataset """wn18rr""" +837 30 model """transe""" +837 30 loss """marginranking""" +837 30 regularizer """no""" +837 30 optimizer """adam""" +837 30 training_loop """owa""" +837 30 negative_sampler """basic""" +837 30 evaluator """rankbased""" +837 31 dataset """wn18rr""" +837 31 model """transe""" +837 31 loss """marginranking""" +837 31 regularizer """no""" +837 31 optimizer """adam""" +837 31 training_loop """owa""" +837 31 negative_sampler """basic""" +837 31 evaluator """rankbased""" +837 32 dataset """wn18rr""" +837 32 model """transe""" +837 32 loss """marginranking""" +837 32 regularizer """no""" +837 32 optimizer """adam""" +837 32 training_loop """owa""" +837 32 negative_sampler """basic""" +837 32 evaluator """rankbased""" +837 33 dataset """wn18rr""" +837 33 model """transe""" +837 33 loss """marginranking""" +837 33 regularizer """no""" +837 33 optimizer """adam""" +837 33 training_loop """owa""" +837 33 negative_sampler """basic""" +837 33 evaluator """rankbased""" +837 34 dataset """wn18rr""" +837 34 model """transe""" +837 34 loss """marginranking""" +837 34 regularizer """no""" +837 34 optimizer """adam""" +837 34 training_loop """owa""" +837 34 negative_sampler """basic""" +837 34 evaluator """rankbased""" +837 35 dataset """wn18rr""" +837 35 model """transe""" +837 35 loss """marginranking""" +837 35 regularizer """no""" +837 35 optimizer """adam""" +837 35 training_loop """owa""" +837 35 negative_sampler """basic""" +837 35 evaluator """rankbased""" +837 36 dataset """wn18rr""" +837 36 model """transe""" +837 36 loss """marginranking""" +837 36 regularizer """no""" +837 36 optimizer """adam""" +837 36 training_loop """owa""" +837 36 negative_sampler """basic""" +837 36 evaluator """rankbased""" +837 37 dataset """wn18rr""" +837 37 model """transe""" +837 37 loss """marginranking""" +837 37 regularizer """no""" +837 37 optimizer """adam""" +837 37 training_loop """owa""" +837 37 negative_sampler """basic""" +837 37 evaluator """rankbased""" +837 38 dataset """wn18rr""" +837 38 model """transe""" +837 38 loss """marginranking""" +837 38 regularizer """no""" +837 38 optimizer """adam""" +837 38 training_loop """owa""" +837 38 negative_sampler """basic""" +837 38 evaluator """rankbased""" +837 39 dataset """wn18rr""" +837 39 model """transe""" +837 39 loss """marginranking""" +837 39 regularizer """no""" +837 39 optimizer """adam""" +837 39 training_loop """owa""" +837 39 negative_sampler """basic""" +837 39 evaluator """rankbased""" +837 40 dataset """wn18rr""" +837 40 model """transe""" +837 40 loss """marginranking""" +837 40 regularizer """no""" +837 40 optimizer """adam""" +837 40 training_loop """owa""" +837 40 negative_sampler """basic""" +837 40 evaluator """rankbased""" +837 41 dataset """wn18rr""" +837 41 model """transe""" +837 41 loss """marginranking""" +837 41 regularizer """no""" +837 41 optimizer """adam""" +837 41 training_loop """owa""" +837 41 negative_sampler """basic""" +837 41 evaluator """rankbased""" +837 42 dataset """wn18rr""" +837 42 model """transe""" +837 42 loss """marginranking""" +837 42 regularizer """no""" +837 42 optimizer """adam""" +837 42 training_loop """owa""" +837 42 negative_sampler """basic""" +837 42 evaluator """rankbased""" +837 43 dataset """wn18rr""" +837 43 model """transe""" +837 43 loss """marginranking""" +837 43 regularizer """no""" +837 43 optimizer """adam""" +837 43 training_loop """owa""" +837 43 negative_sampler """basic""" +837 43 evaluator """rankbased""" +837 44 dataset """wn18rr""" +837 44 model """transe""" +837 44 loss """marginranking""" +837 44 regularizer """no""" +837 44 optimizer """adam""" +837 44 training_loop """owa""" +837 44 negative_sampler """basic""" +837 44 evaluator """rankbased""" +837 45 dataset """wn18rr""" +837 45 model """transe""" +837 45 loss """marginranking""" +837 45 regularizer """no""" +837 45 optimizer """adam""" +837 45 training_loop """owa""" +837 45 negative_sampler """basic""" +837 45 evaluator """rankbased""" +837 46 dataset """wn18rr""" +837 46 model """transe""" +837 46 loss """marginranking""" +837 46 regularizer """no""" +837 46 optimizer """adam""" +837 46 training_loop """owa""" +837 46 negative_sampler """basic""" +837 46 evaluator """rankbased""" +837 47 dataset """wn18rr""" +837 47 model """transe""" +837 47 loss """marginranking""" +837 47 regularizer """no""" +837 47 optimizer """adam""" +837 47 training_loop """owa""" +837 47 negative_sampler """basic""" +837 47 evaluator """rankbased""" +837 48 dataset """wn18rr""" +837 48 model """transe""" +837 48 loss """marginranking""" +837 48 regularizer """no""" +837 48 optimizer """adam""" +837 48 training_loop """owa""" +837 48 negative_sampler """basic""" +837 48 evaluator """rankbased""" +837 49 dataset """wn18rr""" +837 49 model """transe""" +837 49 loss """marginranking""" +837 49 regularizer """no""" +837 49 optimizer """adam""" +837 49 training_loop """owa""" +837 49 negative_sampler """basic""" +837 49 evaluator """rankbased""" +837 50 dataset """wn18rr""" +837 50 model """transe""" +837 50 loss """marginranking""" +837 50 regularizer """no""" +837 50 optimizer """adam""" +837 50 training_loop """owa""" +837 50 negative_sampler """basic""" +837 50 evaluator """rankbased""" +837 51 dataset """wn18rr""" +837 51 model """transe""" +837 51 loss """marginranking""" +837 51 regularizer """no""" +837 51 optimizer """adam""" +837 51 training_loop """owa""" +837 51 negative_sampler """basic""" +837 51 evaluator """rankbased""" +837 52 dataset """wn18rr""" +837 52 model """transe""" +837 52 loss """marginranking""" +837 52 regularizer """no""" +837 52 optimizer """adam""" +837 52 training_loop """owa""" +837 52 negative_sampler """basic""" +837 52 evaluator """rankbased""" +837 53 dataset """wn18rr""" +837 53 model """transe""" +837 53 loss """marginranking""" +837 53 regularizer """no""" +837 53 optimizer """adam""" +837 53 training_loop """owa""" +837 53 negative_sampler """basic""" +837 53 evaluator """rankbased""" +837 54 dataset """wn18rr""" +837 54 model """transe""" +837 54 loss """marginranking""" +837 54 regularizer """no""" +837 54 optimizer """adam""" +837 54 training_loop """owa""" +837 54 negative_sampler """basic""" +837 54 evaluator """rankbased""" +837 55 dataset """wn18rr""" +837 55 model """transe""" +837 55 loss """marginranking""" +837 55 regularizer """no""" +837 55 optimizer """adam""" +837 55 training_loop """owa""" +837 55 negative_sampler """basic""" +837 55 evaluator """rankbased""" +837 56 dataset """wn18rr""" +837 56 model """transe""" +837 56 loss """marginranking""" +837 56 regularizer """no""" +837 56 optimizer """adam""" +837 56 training_loop """owa""" +837 56 negative_sampler """basic""" +837 56 evaluator """rankbased""" +837 57 dataset """wn18rr""" +837 57 model """transe""" +837 57 loss """marginranking""" +837 57 regularizer """no""" +837 57 optimizer """adam""" +837 57 training_loop """owa""" +837 57 negative_sampler """basic""" +837 57 evaluator """rankbased""" +837 58 dataset """wn18rr""" +837 58 model """transe""" +837 58 loss """marginranking""" +837 58 regularizer """no""" +837 58 optimizer """adam""" +837 58 training_loop """owa""" +837 58 negative_sampler """basic""" +837 58 evaluator """rankbased""" +837 59 dataset """wn18rr""" +837 59 model """transe""" +837 59 loss """marginranking""" +837 59 regularizer """no""" +837 59 optimizer """adam""" +837 59 training_loop """owa""" +837 59 negative_sampler """basic""" +837 59 evaluator """rankbased""" +837 60 dataset """wn18rr""" +837 60 model """transe""" +837 60 loss """marginranking""" +837 60 regularizer """no""" +837 60 optimizer """adam""" +837 60 training_loop """owa""" +837 60 negative_sampler """basic""" +837 60 evaluator """rankbased""" +837 61 dataset """wn18rr""" +837 61 model """transe""" +837 61 loss """marginranking""" +837 61 regularizer """no""" +837 61 optimizer """adam""" +837 61 training_loop """owa""" +837 61 negative_sampler """basic""" +837 61 evaluator """rankbased""" +837 62 dataset """wn18rr""" +837 62 model """transe""" +837 62 loss """marginranking""" +837 62 regularizer """no""" +837 62 optimizer """adam""" +837 62 training_loop """owa""" +837 62 negative_sampler """basic""" +837 62 evaluator """rankbased""" +837 63 dataset """wn18rr""" +837 63 model """transe""" +837 63 loss """marginranking""" +837 63 regularizer """no""" +837 63 optimizer """adam""" +837 63 training_loop """owa""" +837 63 negative_sampler """basic""" +837 63 evaluator """rankbased""" +837 64 dataset """wn18rr""" +837 64 model """transe""" +837 64 loss """marginranking""" +837 64 regularizer """no""" +837 64 optimizer """adam""" +837 64 training_loop """owa""" +837 64 negative_sampler """basic""" +837 64 evaluator """rankbased""" +837 65 dataset """wn18rr""" +837 65 model """transe""" +837 65 loss """marginranking""" +837 65 regularizer """no""" +837 65 optimizer """adam""" +837 65 training_loop """owa""" +837 65 negative_sampler """basic""" +837 65 evaluator """rankbased""" +837 66 dataset """wn18rr""" +837 66 model """transe""" +837 66 loss """marginranking""" +837 66 regularizer """no""" +837 66 optimizer """adam""" +837 66 training_loop """owa""" +837 66 negative_sampler """basic""" +837 66 evaluator """rankbased""" +837 67 dataset """wn18rr""" +837 67 model """transe""" +837 67 loss """marginranking""" +837 67 regularizer """no""" +837 67 optimizer """adam""" +837 67 training_loop """owa""" +837 67 negative_sampler """basic""" +837 67 evaluator """rankbased""" +837 68 dataset """wn18rr""" +837 68 model """transe""" +837 68 loss """marginranking""" +837 68 regularizer """no""" +837 68 optimizer """adam""" +837 68 training_loop """owa""" +837 68 negative_sampler """basic""" +837 68 evaluator """rankbased""" +837 69 dataset """wn18rr""" +837 69 model """transe""" +837 69 loss """marginranking""" +837 69 regularizer """no""" +837 69 optimizer """adam""" +837 69 training_loop """owa""" +837 69 negative_sampler """basic""" +837 69 evaluator """rankbased""" +837 70 dataset """wn18rr""" +837 70 model """transe""" +837 70 loss """marginranking""" +837 70 regularizer """no""" +837 70 optimizer """adam""" +837 70 training_loop """owa""" +837 70 negative_sampler """basic""" +837 70 evaluator """rankbased""" +837 71 dataset """wn18rr""" +837 71 model """transe""" +837 71 loss """marginranking""" +837 71 regularizer """no""" +837 71 optimizer """adam""" +837 71 training_loop """owa""" +837 71 negative_sampler """basic""" +837 71 evaluator """rankbased""" +837 72 dataset """wn18rr""" +837 72 model """transe""" +837 72 loss """marginranking""" +837 72 regularizer """no""" +837 72 optimizer """adam""" +837 72 training_loop """owa""" +837 72 negative_sampler """basic""" +837 72 evaluator """rankbased""" +837 73 dataset """wn18rr""" +837 73 model """transe""" +837 73 loss """marginranking""" +837 73 regularizer """no""" +837 73 optimizer """adam""" +837 73 training_loop """owa""" +837 73 negative_sampler """basic""" +837 73 evaluator """rankbased""" +837 74 dataset """wn18rr""" +837 74 model """transe""" +837 74 loss """marginranking""" +837 74 regularizer """no""" +837 74 optimizer """adam""" +837 74 training_loop """owa""" +837 74 negative_sampler """basic""" +837 74 evaluator """rankbased""" +837 75 dataset """wn18rr""" +837 75 model """transe""" +837 75 loss """marginranking""" +837 75 regularizer """no""" +837 75 optimizer """adam""" +837 75 training_loop """owa""" +837 75 negative_sampler """basic""" +837 75 evaluator """rankbased""" +837 76 dataset """wn18rr""" +837 76 model """transe""" +837 76 loss """marginranking""" +837 76 regularizer """no""" +837 76 optimizer """adam""" +837 76 training_loop """owa""" +837 76 negative_sampler """basic""" +837 76 evaluator """rankbased""" +837 77 dataset """wn18rr""" +837 77 model """transe""" +837 77 loss """marginranking""" +837 77 regularizer """no""" +837 77 optimizer """adam""" +837 77 training_loop """owa""" +837 77 negative_sampler """basic""" +837 77 evaluator """rankbased""" +837 78 dataset """wn18rr""" +837 78 model """transe""" +837 78 loss """marginranking""" +837 78 regularizer """no""" +837 78 optimizer """adam""" +837 78 training_loop """owa""" +837 78 negative_sampler """basic""" +837 78 evaluator """rankbased""" +837 79 dataset """wn18rr""" +837 79 model """transe""" +837 79 loss """marginranking""" +837 79 regularizer """no""" +837 79 optimizer """adam""" +837 79 training_loop """owa""" +837 79 negative_sampler """basic""" +837 79 evaluator """rankbased""" +837 80 dataset """wn18rr""" +837 80 model """transe""" +837 80 loss """marginranking""" +837 80 regularizer """no""" +837 80 optimizer """adam""" +837 80 training_loop """owa""" +837 80 negative_sampler """basic""" +837 80 evaluator """rankbased""" +837 81 dataset """wn18rr""" +837 81 model """transe""" +837 81 loss """marginranking""" +837 81 regularizer """no""" +837 81 optimizer """adam""" +837 81 training_loop """owa""" +837 81 negative_sampler """basic""" +837 81 evaluator """rankbased""" +837 82 dataset """wn18rr""" +837 82 model """transe""" +837 82 loss """marginranking""" +837 82 regularizer """no""" +837 82 optimizer """adam""" +837 82 training_loop """owa""" +837 82 negative_sampler """basic""" +837 82 evaluator """rankbased""" +837 83 dataset """wn18rr""" +837 83 model """transe""" +837 83 loss """marginranking""" +837 83 regularizer """no""" +837 83 optimizer """adam""" +837 83 training_loop """owa""" +837 83 negative_sampler """basic""" +837 83 evaluator """rankbased""" +837 84 dataset """wn18rr""" +837 84 model """transe""" +837 84 loss """marginranking""" +837 84 regularizer """no""" +837 84 optimizer """adam""" +837 84 training_loop """owa""" +837 84 negative_sampler """basic""" +837 84 evaluator """rankbased""" +837 85 dataset """wn18rr""" +837 85 model """transe""" +837 85 loss """marginranking""" +837 85 regularizer """no""" +837 85 optimizer """adam""" +837 85 training_loop """owa""" +837 85 negative_sampler """basic""" +837 85 evaluator """rankbased""" +837 86 dataset """wn18rr""" +837 86 model """transe""" +837 86 loss """marginranking""" +837 86 regularizer """no""" +837 86 optimizer """adam""" +837 86 training_loop """owa""" +837 86 negative_sampler """basic""" +837 86 evaluator """rankbased""" +837 87 dataset """wn18rr""" +837 87 model """transe""" +837 87 loss """marginranking""" +837 87 regularizer """no""" +837 87 optimizer """adam""" +837 87 training_loop """owa""" +837 87 negative_sampler """basic""" +837 87 evaluator """rankbased""" +837 88 dataset """wn18rr""" +837 88 model """transe""" +837 88 loss """marginranking""" +837 88 regularizer """no""" +837 88 optimizer """adam""" +837 88 training_loop """owa""" +837 88 negative_sampler """basic""" +837 88 evaluator """rankbased""" +837 89 dataset """wn18rr""" +837 89 model """transe""" +837 89 loss """marginranking""" +837 89 regularizer """no""" +837 89 optimizer """adam""" +837 89 training_loop """owa""" +837 89 negative_sampler """basic""" +837 89 evaluator """rankbased""" +837 90 dataset """wn18rr""" +837 90 model """transe""" +837 90 loss """marginranking""" +837 90 regularizer """no""" +837 90 optimizer """adam""" +837 90 training_loop """owa""" +837 90 negative_sampler """basic""" +837 90 evaluator """rankbased""" +837 91 dataset """wn18rr""" +837 91 model """transe""" +837 91 loss """marginranking""" +837 91 regularizer """no""" +837 91 optimizer """adam""" +837 91 training_loop """owa""" +837 91 negative_sampler """basic""" +837 91 evaluator """rankbased""" +837 92 dataset """wn18rr""" +837 92 model """transe""" +837 92 loss """marginranking""" +837 92 regularizer """no""" +837 92 optimizer """adam""" +837 92 training_loop """owa""" +837 92 negative_sampler """basic""" +837 92 evaluator """rankbased""" +838 1 model.embedding_dim 2.0 +838 1 model.scoring_fct_norm 1.0 +838 1 loss.margin 6.680680325141207 +838 1 optimizer.lr 0.0029596841814311992 +838 1 negative_sampler.num_negs_per_pos 2.0 +838 1 training.batch_size 0.0 +838 2 model.embedding_dim 2.0 +838 2 model.scoring_fct_norm 2.0 +838 2 loss.margin 2.1783301595956033 +838 2 optimizer.lr 0.010047793513053727 +838 2 negative_sampler.num_negs_per_pos 20.0 +838 2 training.batch_size 1.0 +838 3 model.embedding_dim 0.0 +838 3 model.scoring_fct_norm 1.0 +838 3 loss.margin 9.6215241014337 +838 3 optimizer.lr 0.0679100700072068 +838 3 negative_sampler.num_negs_per_pos 25.0 +838 3 training.batch_size 2.0 +838 4 model.embedding_dim 0.0 +838 4 model.scoring_fct_norm 2.0 +838 4 loss.margin 9.837348731620791 +838 4 optimizer.lr 0.002085683901273244 +838 4 negative_sampler.num_negs_per_pos 99.0 +838 4 training.batch_size 0.0 +838 5 model.embedding_dim 0.0 +838 5 model.scoring_fct_norm 2.0 +838 5 loss.margin 2.1920133503263264 +838 5 optimizer.lr 0.05945889014826674 +838 5 negative_sampler.num_negs_per_pos 27.0 +838 5 training.batch_size 1.0 +838 6 model.embedding_dim 0.0 +838 6 model.scoring_fct_norm 1.0 +838 6 loss.margin 8.853442936322546 +838 6 optimizer.lr 0.006695510440413558 +838 6 negative_sampler.num_negs_per_pos 79.0 +838 6 training.batch_size 0.0 +838 7 model.embedding_dim 2.0 +838 7 model.scoring_fct_norm 2.0 +838 7 loss.margin 9.15139201142425 +838 7 optimizer.lr 0.02720105804408736 +838 7 negative_sampler.num_negs_per_pos 25.0 +838 7 training.batch_size 2.0 +838 8 model.embedding_dim 0.0 +838 8 model.scoring_fct_norm 1.0 +838 8 loss.margin 6.195295407395179 +838 8 optimizer.lr 0.061019464058227314 +838 8 negative_sampler.num_negs_per_pos 89.0 +838 8 training.batch_size 2.0 +838 9 model.embedding_dim 1.0 +838 9 model.scoring_fct_norm 2.0 +838 9 loss.margin 8.960551688592572 +838 9 optimizer.lr 0.002490419872787004 +838 9 negative_sampler.num_negs_per_pos 8.0 +838 9 training.batch_size 2.0 +838 10 model.embedding_dim 2.0 +838 10 model.scoring_fct_norm 1.0 +838 10 loss.margin 8.960015306197215 +838 10 optimizer.lr 0.004703829960931034 +838 10 negative_sampler.num_negs_per_pos 34.0 +838 10 training.batch_size 1.0 +838 11 model.embedding_dim 1.0 +838 11 model.scoring_fct_norm 2.0 +838 11 loss.margin 3.2975208667145037 +838 11 optimizer.lr 0.0671724526773737 +838 11 negative_sampler.num_negs_per_pos 72.0 +838 11 training.batch_size 2.0 +838 12 model.embedding_dim 1.0 +838 12 model.scoring_fct_norm 2.0 +838 12 loss.margin 5.383117919511541 +838 12 optimizer.lr 0.006370013365781765 +838 12 negative_sampler.num_negs_per_pos 13.0 +838 12 training.batch_size 0.0 +838 13 model.embedding_dim 1.0 +838 13 model.scoring_fct_norm 2.0 +838 13 loss.margin 1.4489511924944807 +838 13 optimizer.lr 0.003942068876938197 +838 13 negative_sampler.num_negs_per_pos 71.0 +838 13 training.batch_size 2.0 +838 14 model.embedding_dim 2.0 +838 14 model.scoring_fct_norm 1.0 +838 14 loss.margin 9.443672861875017 +838 14 optimizer.lr 0.0012137644363626595 +838 14 negative_sampler.num_negs_per_pos 44.0 +838 14 training.batch_size 1.0 +838 15 model.embedding_dim 1.0 +838 15 model.scoring_fct_norm 1.0 +838 15 loss.margin 2.0323319324100035 +838 15 optimizer.lr 0.007181400773336337 +838 15 negative_sampler.num_negs_per_pos 0.0 +838 15 training.batch_size 1.0 +838 16 model.embedding_dim 0.0 +838 16 model.scoring_fct_norm 2.0 +838 16 loss.margin 1.1350442425092475 +838 16 optimizer.lr 0.029283663671314106 +838 16 negative_sampler.num_negs_per_pos 91.0 +838 16 training.batch_size 0.0 +838 17 model.embedding_dim 0.0 +838 17 model.scoring_fct_norm 1.0 +838 17 loss.margin 8.086939996400528 +838 17 optimizer.lr 0.007321493933158179 +838 17 negative_sampler.num_negs_per_pos 7.0 +838 17 training.batch_size 2.0 +838 18 model.embedding_dim 1.0 +838 18 model.scoring_fct_norm 2.0 +838 18 loss.margin 3.537682117941529 +838 18 optimizer.lr 0.0015607609193506445 +838 18 negative_sampler.num_negs_per_pos 43.0 +838 18 training.batch_size 2.0 +838 19 model.embedding_dim 1.0 +838 19 model.scoring_fct_norm 1.0 +838 19 loss.margin 3.700289349505074 +838 19 optimizer.lr 0.01679907418849362 +838 19 negative_sampler.num_negs_per_pos 70.0 +838 19 training.batch_size 0.0 +838 20 model.embedding_dim 1.0 +838 20 model.scoring_fct_norm 1.0 +838 20 loss.margin 8.05005402062042 +838 20 optimizer.lr 0.0902369303939156 +838 20 negative_sampler.num_negs_per_pos 10.0 +838 20 training.batch_size 0.0 +838 21 model.embedding_dim 0.0 +838 21 model.scoring_fct_norm 2.0 +838 21 loss.margin 6.458235982855478 +838 21 optimizer.lr 0.021513097579351683 +838 21 negative_sampler.num_negs_per_pos 70.0 +838 21 training.batch_size 0.0 +838 22 model.embedding_dim 1.0 +838 22 model.scoring_fct_norm 2.0 +838 22 loss.margin 3.7850205656340656 +838 22 optimizer.lr 0.08183417262816885 +838 22 negative_sampler.num_negs_per_pos 36.0 +838 22 training.batch_size 2.0 +838 23 model.embedding_dim 1.0 +838 23 model.scoring_fct_norm 1.0 +838 23 loss.margin 5.4778169602348346 +838 23 optimizer.lr 0.05929517320903997 +838 23 negative_sampler.num_negs_per_pos 34.0 +838 23 training.batch_size 1.0 +838 24 model.embedding_dim 0.0 +838 24 model.scoring_fct_norm 1.0 +838 24 loss.margin 4.206706617263805 +838 24 optimizer.lr 0.030847810666393825 +838 24 negative_sampler.num_negs_per_pos 65.0 +838 24 training.batch_size 2.0 +838 25 model.embedding_dim 0.0 +838 25 model.scoring_fct_norm 1.0 +838 25 loss.margin 4.7138201893042275 +838 25 optimizer.lr 0.0036012077476776086 +838 25 negative_sampler.num_negs_per_pos 77.0 +838 25 training.batch_size 1.0 +838 26 model.embedding_dim 0.0 +838 26 model.scoring_fct_norm 2.0 +838 26 loss.margin 8.822563842408899 +838 26 optimizer.lr 0.004156260532601282 +838 26 negative_sampler.num_negs_per_pos 91.0 +838 26 training.batch_size 2.0 +838 27 model.embedding_dim 1.0 +838 27 model.scoring_fct_norm 2.0 +838 27 loss.margin 4.188199767746166 +838 27 optimizer.lr 0.08168412595635115 +838 27 negative_sampler.num_negs_per_pos 64.0 +838 27 training.batch_size 0.0 +838 28 model.embedding_dim 2.0 +838 28 model.scoring_fct_norm 1.0 +838 28 loss.margin 0.9058052304788711 +838 28 optimizer.lr 0.0019693370026924277 +838 28 negative_sampler.num_negs_per_pos 15.0 +838 28 training.batch_size 1.0 +838 29 model.embedding_dim 2.0 +838 29 model.scoring_fct_norm 1.0 +838 29 loss.margin 4.7096088017215765 +838 29 optimizer.lr 0.03718807123692707 +838 29 negative_sampler.num_negs_per_pos 86.0 +838 29 training.batch_size 2.0 +838 30 model.embedding_dim 2.0 +838 30 model.scoring_fct_norm 1.0 +838 30 loss.margin 6.67479198554142 +838 30 optimizer.lr 0.01567298934676551 +838 30 negative_sampler.num_negs_per_pos 84.0 +838 30 training.batch_size 0.0 +838 31 model.embedding_dim 2.0 +838 31 model.scoring_fct_norm 1.0 +838 31 loss.margin 8.761891867049183 +838 31 optimizer.lr 0.01682043096538256 +838 31 negative_sampler.num_negs_per_pos 3.0 +838 31 training.batch_size 2.0 +838 32 model.embedding_dim 1.0 +838 32 model.scoring_fct_norm 1.0 +838 32 loss.margin 5.640661245303586 +838 32 optimizer.lr 0.0020800959540136802 +838 32 negative_sampler.num_negs_per_pos 17.0 +838 32 training.batch_size 0.0 +838 33 model.embedding_dim 1.0 +838 33 model.scoring_fct_norm 2.0 +838 33 loss.margin 3.791615874801644 +838 33 optimizer.lr 0.06703317192240575 +838 33 negative_sampler.num_negs_per_pos 49.0 +838 33 training.batch_size 1.0 +838 34 model.embedding_dim 2.0 +838 34 model.scoring_fct_norm 1.0 +838 34 loss.margin 3.951588742516878 +838 34 optimizer.lr 0.04784300862375404 +838 34 negative_sampler.num_negs_per_pos 88.0 +838 34 training.batch_size 0.0 +838 35 model.embedding_dim 1.0 +838 35 model.scoring_fct_norm 2.0 +838 35 loss.margin 4.522098938801904 +838 35 optimizer.lr 0.011796302100869809 +838 35 negative_sampler.num_negs_per_pos 78.0 +838 35 training.batch_size 0.0 +838 36 model.embedding_dim 2.0 +838 36 model.scoring_fct_norm 2.0 +838 36 loss.margin 8.23102240457892 +838 36 optimizer.lr 0.006903285201842201 +838 36 negative_sampler.num_negs_per_pos 90.0 +838 36 training.batch_size 1.0 +838 37 model.embedding_dim 1.0 +838 37 model.scoring_fct_norm 2.0 +838 37 loss.margin 5.570436982283329 +838 37 optimizer.lr 0.01975905292258796 +838 37 negative_sampler.num_negs_per_pos 98.0 +838 37 training.batch_size 1.0 +838 38 model.embedding_dim 0.0 +838 38 model.scoring_fct_norm 1.0 +838 38 loss.margin 5.302018133206694 +838 38 optimizer.lr 0.0030658846357464417 +838 38 negative_sampler.num_negs_per_pos 47.0 +838 38 training.batch_size 1.0 +838 39 model.embedding_dim 2.0 +838 39 model.scoring_fct_norm 2.0 +838 39 loss.margin 8.094792619356568 +838 39 optimizer.lr 0.005658097797012378 +838 39 negative_sampler.num_negs_per_pos 98.0 +838 39 training.batch_size 2.0 +838 40 model.embedding_dim 2.0 +838 40 model.scoring_fct_norm 2.0 +838 40 loss.margin 2.0089339999675015 +838 40 optimizer.lr 0.08030532422932571 +838 40 negative_sampler.num_negs_per_pos 55.0 +838 40 training.batch_size 2.0 +838 41 model.embedding_dim 0.0 +838 41 model.scoring_fct_norm 2.0 +838 41 loss.margin 7.346886839309641 +838 41 optimizer.lr 0.011467476527252428 +838 41 negative_sampler.num_negs_per_pos 33.0 +838 41 training.batch_size 1.0 +838 42 model.embedding_dim 1.0 +838 42 model.scoring_fct_norm 1.0 +838 42 loss.margin 4.906688971111484 +838 42 optimizer.lr 0.0019684917474478967 +838 42 negative_sampler.num_negs_per_pos 51.0 +838 42 training.batch_size 2.0 +838 43 model.embedding_dim 0.0 +838 43 model.scoring_fct_norm 1.0 +838 43 loss.margin 5.1626374722646045 +838 43 optimizer.lr 0.014986294869188756 +838 43 negative_sampler.num_negs_per_pos 90.0 +838 43 training.batch_size 1.0 +838 44 model.embedding_dim 2.0 +838 44 model.scoring_fct_norm 2.0 +838 44 loss.margin 9.028940483304988 +838 44 optimizer.lr 0.09891257499293292 +838 44 negative_sampler.num_negs_per_pos 43.0 +838 44 training.batch_size 2.0 +838 45 model.embedding_dim 1.0 +838 45 model.scoring_fct_norm 1.0 +838 45 loss.margin 7.321455854520662 +838 45 optimizer.lr 0.02232997776851808 +838 45 negative_sampler.num_negs_per_pos 45.0 +838 45 training.batch_size 2.0 +838 46 model.embedding_dim 0.0 +838 46 model.scoring_fct_norm 1.0 +838 46 loss.margin 6.447949054383731 +838 46 optimizer.lr 0.002782987264569471 +838 46 negative_sampler.num_negs_per_pos 65.0 +838 46 training.batch_size 2.0 +838 47 model.embedding_dim 0.0 +838 47 model.scoring_fct_norm 2.0 +838 47 loss.margin 0.790196530508644 +838 47 optimizer.lr 0.0032150896064713284 +838 47 negative_sampler.num_negs_per_pos 13.0 +838 47 training.batch_size 1.0 +838 48 model.embedding_dim 1.0 +838 48 model.scoring_fct_norm 1.0 +838 48 loss.margin 8.049408821626285 +838 48 optimizer.lr 0.05922896836908576 +838 48 negative_sampler.num_negs_per_pos 36.0 +838 48 training.batch_size 2.0 +838 49 model.embedding_dim 1.0 +838 49 model.scoring_fct_norm 1.0 +838 49 loss.margin 4.065772650713137 +838 49 optimizer.lr 0.038842293579895056 +838 49 negative_sampler.num_negs_per_pos 91.0 +838 49 training.batch_size 2.0 +838 50 model.embedding_dim 2.0 +838 50 model.scoring_fct_norm 1.0 +838 50 loss.margin 4.261752228476276 +838 50 optimizer.lr 0.010895099011984748 +838 50 negative_sampler.num_negs_per_pos 96.0 +838 50 training.batch_size 0.0 +838 51 model.embedding_dim 1.0 +838 51 model.scoring_fct_norm 1.0 +838 51 loss.margin 1.8522345004048344 +838 51 optimizer.lr 0.0065535316954403134 +838 51 negative_sampler.num_negs_per_pos 78.0 +838 51 training.batch_size 2.0 +838 52 model.embedding_dim 2.0 +838 52 model.scoring_fct_norm 2.0 +838 52 loss.margin 6.141239905305039 +838 52 optimizer.lr 0.0018989768678045068 +838 52 negative_sampler.num_negs_per_pos 0.0 +838 52 training.batch_size 0.0 +838 53 model.embedding_dim 2.0 +838 53 model.scoring_fct_norm 1.0 +838 53 loss.margin 8.827618946859852 +838 53 optimizer.lr 0.004640859169286359 +838 53 negative_sampler.num_negs_per_pos 25.0 +838 53 training.batch_size 2.0 +838 54 model.embedding_dim 0.0 +838 54 model.scoring_fct_norm 1.0 +838 54 loss.margin 4.287289147024489 +838 54 optimizer.lr 0.025194276629167952 +838 54 negative_sampler.num_negs_per_pos 57.0 +838 54 training.batch_size 1.0 +838 55 model.embedding_dim 1.0 +838 55 model.scoring_fct_norm 2.0 +838 55 loss.margin 7.088118775906201 +838 55 optimizer.lr 0.01950229087607703 +838 55 negative_sampler.num_negs_per_pos 36.0 +838 55 training.batch_size 2.0 +838 56 model.embedding_dim 2.0 +838 56 model.scoring_fct_norm 2.0 +838 56 loss.margin 0.706440632777722 +838 56 optimizer.lr 0.0038061551023571044 +838 56 negative_sampler.num_negs_per_pos 69.0 +838 56 training.batch_size 0.0 +838 57 model.embedding_dim 2.0 +838 57 model.scoring_fct_norm 1.0 +838 57 loss.margin 0.8928462792451726 +838 57 optimizer.lr 0.006766190780657224 +838 57 negative_sampler.num_negs_per_pos 20.0 +838 57 training.batch_size 1.0 +838 58 model.embedding_dim 0.0 +838 58 model.scoring_fct_norm 1.0 +838 58 loss.margin 1.8427840721195048 +838 58 optimizer.lr 0.08830068285194861 +838 58 negative_sampler.num_negs_per_pos 96.0 +838 58 training.batch_size 2.0 +838 59 model.embedding_dim 1.0 +838 59 model.scoring_fct_norm 1.0 +838 59 loss.margin 2.848599641803223 +838 59 optimizer.lr 0.01559876372903923 +838 59 negative_sampler.num_negs_per_pos 90.0 +838 59 training.batch_size 2.0 +838 60 model.embedding_dim 0.0 +838 60 model.scoring_fct_norm 2.0 +838 60 loss.margin 1.7755057106830556 +838 60 optimizer.lr 0.008599978078450456 +838 60 negative_sampler.num_negs_per_pos 11.0 +838 60 training.batch_size 0.0 +838 61 model.embedding_dim 1.0 +838 61 model.scoring_fct_norm 2.0 +838 61 loss.margin 7.597118362385363 +838 61 optimizer.lr 0.002266742601546356 +838 61 negative_sampler.num_negs_per_pos 76.0 +838 61 training.batch_size 1.0 +838 62 model.embedding_dim 1.0 +838 62 model.scoring_fct_norm 2.0 +838 62 loss.margin 2.2501123143129536 +838 62 optimizer.lr 0.003219958158972328 +838 62 negative_sampler.num_negs_per_pos 61.0 +838 62 training.batch_size 0.0 +838 63 model.embedding_dim 1.0 +838 63 model.scoring_fct_norm 2.0 +838 63 loss.margin 3.9682867204836487 +838 63 optimizer.lr 0.057945707557765284 +838 63 negative_sampler.num_negs_per_pos 48.0 +838 63 training.batch_size 2.0 +838 64 model.embedding_dim 2.0 +838 64 model.scoring_fct_norm 2.0 +838 64 loss.margin 2.455635519458398 +838 64 optimizer.lr 0.07350874204505053 +838 64 negative_sampler.num_negs_per_pos 20.0 +838 64 training.batch_size 0.0 +838 65 model.embedding_dim 0.0 +838 65 model.scoring_fct_norm 2.0 +838 65 loss.margin 3.4920794012502103 +838 65 optimizer.lr 0.003214860885702306 +838 65 negative_sampler.num_negs_per_pos 93.0 +838 65 training.batch_size 0.0 +838 66 model.embedding_dim 2.0 +838 66 model.scoring_fct_norm 2.0 +838 66 loss.margin 3.1365737271982446 +838 66 optimizer.lr 0.033749987576769405 +838 66 negative_sampler.num_negs_per_pos 6.0 +838 66 training.batch_size 2.0 +838 67 model.embedding_dim 2.0 +838 67 model.scoring_fct_norm 1.0 +838 67 loss.margin 1.9248790716810449 +838 67 optimizer.lr 0.05536623257492037 +838 67 negative_sampler.num_negs_per_pos 80.0 +838 67 training.batch_size 0.0 +838 68 model.embedding_dim 0.0 +838 68 model.scoring_fct_norm 1.0 +838 68 loss.margin 5.828248926416274 +838 68 optimizer.lr 0.03361673009248055 +838 68 negative_sampler.num_negs_per_pos 30.0 +838 68 training.batch_size 0.0 +838 69 model.embedding_dim 0.0 +838 69 model.scoring_fct_norm 1.0 +838 69 loss.margin 9.045875913100458 +838 69 optimizer.lr 0.041409271804585895 +838 69 negative_sampler.num_negs_per_pos 94.0 +838 69 training.batch_size 2.0 +838 70 model.embedding_dim 1.0 +838 70 model.scoring_fct_norm 2.0 +838 70 loss.margin 5.938555560392027 +838 70 optimizer.lr 0.004268183538734724 +838 70 negative_sampler.num_negs_per_pos 30.0 +838 70 training.batch_size 2.0 +838 71 model.embedding_dim 0.0 +838 71 model.scoring_fct_norm 1.0 +838 71 loss.margin 8.007204998503479 +838 71 optimizer.lr 0.004247779791767365 +838 71 negative_sampler.num_negs_per_pos 28.0 +838 71 training.batch_size 0.0 +838 72 model.embedding_dim 0.0 +838 72 model.scoring_fct_norm 2.0 +838 72 loss.margin 3.0601810028633976 +838 72 optimizer.lr 0.033346946708958644 +838 72 negative_sampler.num_negs_per_pos 28.0 +838 72 training.batch_size 0.0 +838 73 model.embedding_dim 1.0 +838 73 model.scoring_fct_norm 1.0 +838 73 loss.margin 3.453090817935507 +838 73 optimizer.lr 0.0021397706878214405 +838 73 negative_sampler.num_negs_per_pos 18.0 +838 73 training.batch_size 1.0 +838 74 model.embedding_dim 0.0 +838 74 model.scoring_fct_norm 1.0 +838 74 loss.margin 8.769117657929582 +838 74 optimizer.lr 0.0011528071823257015 +838 74 negative_sampler.num_negs_per_pos 27.0 +838 74 training.batch_size 2.0 +838 75 model.embedding_dim 1.0 +838 75 model.scoring_fct_norm 2.0 +838 75 loss.margin 6.468695898685477 +838 75 optimizer.lr 0.0014317201475298252 +838 75 negative_sampler.num_negs_per_pos 10.0 +838 75 training.batch_size 0.0 +838 76 model.embedding_dim 1.0 +838 76 model.scoring_fct_norm 1.0 +838 76 loss.margin 0.8141122973570505 +838 76 optimizer.lr 0.09782670035737853 +838 76 negative_sampler.num_negs_per_pos 72.0 +838 76 training.batch_size 1.0 +838 77 model.embedding_dim 2.0 +838 77 model.scoring_fct_norm 1.0 +838 77 loss.margin 4.690163097001788 +838 77 optimizer.lr 0.020701222321456787 +838 77 negative_sampler.num_negs_per_pos 82.0 +838 77 training.batch_size 0.0 +838 78 model.embedding_dim 1.0 +838 78 model.scoring_fct_norm 2.0 +838 78 loss.margin 1.3261822448962586 +838 78 optimizer.lr 0.01949730905592839 +838 78 negative_sampler.num_negs_per_pos 81.0 +838 78 training.batch_size 0.0 +838 79 model.embedding_dim 0.0 +838 79 model.scoring_fct_norm 1.0 +838 79 loss.margin 2.115913151924414 +838 79 optimizer.lr 0.016675210735516987 +838 79 negative_sampler.num_negs_per_pos 5.0 +838 79 training.batch_size 0.0 +838 80 model.embedding_dim 2.0 +838 80 model.scoring_fct_norm 1.0 +838 80 loss.margin 8.018530624594797 +838 80 optimizer.lr 0.0013216432021463844 +838 80 negative_sampler.num_negs_per_pos 28.0 +838 80 training.batch_size 2.0 +838 81 model.embedding_dim 2.0 +838 81 model.scoring_fct_norm 1.0 +838 81 loss.margin 7.540294668101763 +838 81 optimizer.lr 0.012541585020967552 +838 81 negative_sampler.num_negs_per_pos 79.0 +838 81 training.batch_size 2.0 +838 82 model.embedding_dim 2.0 +838 82 model.scoring_fct_norm 1.0 +838 82 loss.margin 1.8794130084365153 +838 82 optimizer.lr 0.005511826775340641 +838 82 negative_sampler.num_negs_per_pos 8.0 +838 82 training.batch_size 0.0 +838 83 model.embedding_dim 0.0 +838 83 model.scoring_fct_norm 2.0 +838 83 loss.margin 0.8202760039732879 +838 83 optimizer.lr 0.04716686288653284 +838 83 negative_sampler.num_negs_per_pos 88.0 +838 83 training.batch_size 1.0 +838 84 model.embedding_dim 1.0 +838 84 model.scoring_fct_norm 1.0 +838 84 loss.margin 2.2370512738747568 +838 84 optimizer.lr 0.005232881208348838 +838 84 negative_sampler.num_negs_per_pos 55.0 +838 84 training.batch_size 0.0 +838 85 model.embedding_dim 2.0 +838 85 model.scoring_fct_norm 2.0 +838 85 loss.margin 6.743335410049049 +838 85 optimizer.lr 0.007306854657051777 +838 85 negative_sampler.num_negs_per_pos 61.0 +838 85 training.batch_size 0.0 +838 86 model.embedding_dim 1.0 +838 86 model.scoring_fct_norm 2.0 +838 86 loss.margin 2.808455785729642 +838 86 optimizer.lr 0.017031807757724372 +838 86 negative_sampler.num_negs_per_pos 41.0 +838 86 training.batch_size 1.0 +838 87 model.embedding_dim 1.0 +838 87 model.scoring_fct_norm 2.0 +838 87 loss.margin 4.126022145310055 +838 87 optimizer.lr 0.06267279212424488 +838 87 negative_sampler.num_negs_per_pos 7.0 +838 87 training.batch_size 1.0 +838 88 model.embedding_dim 2.0 +838 88 model.scoring_fct_norm 1.0 +838 88 loss.margin 1.1009340220214991 +838 88 optimizer.lr 0.002991078699022419 +838 88 negative_sampler.num_negs_per_pos 86.0 +838 88 training.batch_size 0.0 +838 89 model.embedding_dim 1.0 +838 89 model.scoring_fct_norm 2.0 +838 89 loss.margin 0.7873484912463171 +838 89 optimizer.lr 0.0014989959243809007 +838 89 negative_sampler.num_negs_per_pos 45.0 +838 89 training.batch_size 1.0 +838 90 model.embedding_dim 1.0 +838 90 model.scoring_fct_norm 2.0 +838 90 loss.margin 1.4464159887618524 +838 90 optimizer.lr 0.0017421994023226078 +838 90 negative_sampler.num_negs_per_pos 97.0 +838 90 training.batch_size 0.0 +838 91 model.embedding_dim 2.0 +838 91 model.scoring_fct_norm 1.0 +838 91 loss.margin 5.579662433758887 +838 91 optimizer.lr 0.07371370674176636 +838 91 negative_sampler.num_negs_per_pos 14.0 +838 91 training.batch_size 0.0 +838 92 model.embedding_dim 1.0 +838 92 model.scoring_fct_norm 2.0 +838 92 loss.margin 1.7256875063373625 +838 92 optimizer.lr 0.003988726833931709 +838 92 negative_sampler.num_negs_per_pos 61.0 +838 92 training.batch_size 2.0 +838 93 model.embedding_dim 2.0 +838 93 model.scoring_fct_norm 2.0 +838 93 loss.margin 1.007967315537716 +838 93 optimizer.lr 0.006110728848935247 +838 93 negative_sampler.num_negs_per_pos 55.0 +838 93 training.batch_size 1.0 +838 94 model.embedding_dim 1.0 +838 94 model.scoring_fct_norm 2.0 +838 94 loss.margin 6.925265665614746 +838 94 optimizer.lr 0.007500749454546958 +838 94 negative_sampler.num_negs_per_pos 88.0 +838 94 training.batch_size 0.0 +838 95 model.embedding_dim 1.0 +838 95 model.scoring_fct_norm 1.0 +838 95 loss.margin 9.39017129070837 +838 95 optimizer.lr 0.045296903759717885 +838 95 negative_sampler.num_negs_per_pos 76.0 +838 95 training.batch_size 0.0 +838 96 model.embedding_dim 2.0 +838 96 model.scoring_fct_norm 2.0 +838 96 loss.margin 9.756185005645033 +838 96 optimizer.lr 0.016162060441004673 +838 96 negative_sampler.num_negs_per_pos 47.0 +838 96 training.batch_size 2.0 +838 97 model.embedding_dim 0.0 +838 97 model.scoring_fct_norm 1.0 +838 97 loss.margin 9.174714887600222 +838 97 optimizer.lr 0.07832235743910256 +838 97 negative_sampler.num_negs_per_pos 16.0 +838 97 training.batch_size 1.0 +838 98 model.embedding_dim 1.0 +838 98 model.scoring_fct_norm 2.0 +838 98 loss.margin 1.7704052404619675 +838 98 optimizer.lr 0.0036247408403661334 +838 98 negative_sampler.num_negs_per_pos 10.0 +838 98 training.batch_size 1.0 +838 99 model.embedding_dim 2.0 +838 99 model.scoring_fct_norm 2.0 +838 99 loss.margin 4.897372386678289 +838 99 optimizer.lr 0.005417679072753141 +838 99 negative_sampler.num_negs_per_pos 96.0 +838 99 training.batch_size 0.0 +838 100 model.embedding_dim 0.0 +838 100 model.scoring_fct_norm 2.0 +838 100 loss.margin 9.50580040024163 +838 100 optimizer.lr 0.03888689406463955 +838 100 negative_sampler.num_negs_per_pos 28.0 +838 100 training.batch_size 1.0 +838 1 dataset """wn18rr""" +838 1 model """transe""" +838 1 loss """marginranking""" +838 1 regularizer """no""" +838 1 optimizer """adam""" +838 1 training_loop """owa""" +838 1 negative_sampler """basic""" +838 1 evaluator """rankbased""" +838 2 dataset """wn18rr""" +838 2 model """transe""" +838 2 loss """marginranking""" +838 2 regularizer """no""" +838 2 optimizer """adam""" +838 2 training_loop """owa""" +838 2 negative_sampler """basic""" +838 2 evaluator """rankbased""" +838 3 dataset """wn18rr""" +838 3 model """transe""" +838 3 loss """marginranking""" +838 3 regularizer """no""" +838 3 optimizer """adam""" +838 3 training_loop """owa""" +838 3 negative_sampler """basic""" +838 3 evaluator """rankbased""" +838 4 dataset """wn18rr""" +838 4 model """transe""" +838 4 loss """marginranking""" +838 4 regularizer """no""" +838 4 optimizer """adam""" +838 4 training_loop """owa""" +838 4 negative_sampler """basic""" +838 4 evaluator """rankbased""" +838 5 dataset """wn18rr""" +838 5 model """transe""" +838 5 loss """marginranking""" +838 5 regularizer """no""" +838 5 optimizer """adam""" +838 5 training_loop """owa""" +838 5 negative_sampler """basic""" +838 5 evaluator """rankbased""" +838 6 dataset """wn18rr""" +838 6 model """transe""" +838 6 loss """marginranking""" +838 6 regularizer """no""" +838 6 optimizer """adam""" +838 6 training_loop """owa""" +838 6 negative_sampler """basic""" +838 6 evaluator """rankbased""" +838 7 dataset """wn18rr""" +838 7 model """transe""" +838 7 loss """marginranking""" +838 7 regularizer """no""" +838 7 optimizer """adam""" +838 7 training_loop """owa""" +838 7 negative_sampler """basic""" +838 7 evaluator """rankbased""" +838 8 dataset """wn18rr""" +838 8 model """transe""" +838 8 loss """marginranking""" +838 8 regularizer """no""" +838 8 optimizer """adam""" +838 8 training_loop """owa""" +838 8 negative_sampler """basic""" +838 8 evaluator """rankbased""" +838 9 dataset """wn18rr""" +838 9 model """transe""" +838 9 loss """marginranking""" +838 9 regularizer """no""" +838 9 optimizer """adam""" +838 9 training_loop """owa""" +838 9 negative_sampler """basic""" +838 9 evaluator """rankbased""" +838 10 dataset """wn18rr""" +838 10 model """transe""" +838 10 loss """marginranking""" +838 10 regularizer """no""" +838 10 optimizer """adam""" +838 10 training_loop """owa""" +838 10 negative_sampler """basic""" +838 10 evaluator """rankbased""" +838 11 dataset """wn18rr""" +838 11 model """transe""" +838 11 loss """marginranking""" +838 11 regularizer """no""" +838 11 optimizer """adam""" +838 11 training_loop """owa""" +838 11 negative_sampler """basic""" +838 11 evaluator """rankbased""" +838 12 dataset """wn18rr""" +838 12 model """transe""" +838 12 loss """marginranking""" +838 12 regularizer """no""" +838 12 optimizer """adam""" +838 12 training_loop """owa""" +838 12 negative_sampler """basic""" +838 12 evaluator """rankbased""" +838 13 dataset """wn18rr""" +838 13 model """transe""" +838 13 loss """marginranking""" +838 13 regularizer """no""" +838 13 optimizer """adam""" +838 13 training_loop """owa""" +838 13 negative_sampler """basic""" +838 13 evaluator """rankbased""" +838 14 dataset """wn18rr""" +838 14 model """transe""" +838 14 loss """marginranking""" +838 14 regularizer """no""" +838 14 optimizer """adam""" +838 14 training_loop """owa""" +838 14 negative_sampler """basic""" +838 14 evaluator """rankbased""" +838 15 dataset """wn18rr""" +838 15 model """transe""" +838 15 loss """marginranking""" +838 15 regularizer """no""" +838 15 optimizer """adam""" +838 15 training_loop """owa""" +838 15 negative_sampler """basic""" +838 15 evaluator """rankbased""" +838 16 dataset """wn18rr""" +838 16 model """transe""" +838 16 loss """marginranking""" +838 16 regularizer """no""" +838 16 optimizer """adam""" +838 16 training_loop """owa""" +838 16 negative_sampler """basic""" +838 16 evaluator """rankbased""" +838 17 dataset """wn18rr""" +838 17 model """transe""" +838 17 loss """marginranking""" +838 17 regularizer """no""" +838 17 optimizer """adam""" +838 17 training_loop """owa""" +838 17 negative_sampler """basic""" +838 17 evaluator """rankbased""" +838 18 dataset """wn18rr""" +838 18 model """transe""" +838 18 loss """marginranking""" +838 18 regularizer """no""" +838 18 optimizer """adam""" +838 18 training_loop """owa""" +838 18 negative_sampler """basic""" +838 18 evaluator """rankbased""" +838 19 dataset """wn18rr""" +838 19 model """transe""" +838 19 loss """marginranking""" +838 19 regularizer """no""" +838 19 optimizer """adam""" +838 19 training_loop """owa""" +838 19 negative_sampler """basic""" +838 19 evaluator """rankbased""" +838 20 dataset """wn18rr""" +838 20 model """transe""" +838 20 loss """marginranking""" +838 20 regularizer """no""" +838 20 optimizer """adam""" +838 20 training_loop """owa""" +838 20 negative_sampler """basic""" +838 20 evaluator """rankbased""" +838 21 dataset """wn18rr""" +838 21 model """transe""" +838 21 loss """marginranking""" +838 21 regularizer """no""" +838 21 optimizer """adam""" +838 21 training_loop """owa""" +838 21 negative_sampler """basic""" +838 21 evaluator """rankbased""" +838 22 dataset """wn18rr""" +838 22 model """transe""" +838 22 loss """marginranking""" +838 22 regularizer """no""" +838 22 optimizer """adam""" +838 22 training_loop """owa""" +838 22 negative_sampler """basic""" +838 22 evaluator """rankbased""" +838 23 dataset """wn18rr""" +838 23 model """transe""" +838 23 loss """marginranking""" +838 23 regularizer """no""" +838 23 optimizer """adam""" +838 23 training_loop """owa""" +838 23 negative_sampler """basic""" +838 23 evaluator """rankbased""" +838 24 dataset """wn18rr""" +838 24 model """transe""" +838 24 loss """marginranking""" +838 24 regularizer """no""" +838 24 optimizer """adam""" +838 24 training_loop """owa""" +838 24 negative_sampler """basic""" +838 24 evaluator """rankbased""" +838 25 dataset """wn18rr""" +838 25 model """transe""" +838 25 loss """marginranking""" +838 25 regularizer """no""" +838 25 optimizer """adam""" +838 25 training_loop """owa""" +838 25 negative_sampler """basic""" +838 25 evaluator """rankbased""" +838 26 dataset """wn18rr""" +838 26 model """transe""" +838 26 loss """marginranking""" +838 26 regularizer """no""" +838 26 optimizer """adam""" +838 26 training_loop """owa""" +838 26 negative_sampler """basic""" +838 26 evaluator """rankbased""" +838 27 dataset """wn18rr""" +838 27 model """transe""" +838 27 loss """marginranking""" +838 27 regularizer """no""" +838 27 optimizer """adam""" +838 27 training_loop """owa""" +838 27 negative_sampler """basic""" +838 27 evaluator """rankbased""" +838 28 dataset """wn18rr""" +838 28 model """transe""" +838 28 loss """marginranking""" +838 28 regularizer """no""" +838 28 optimizer """adam""" +838 28 training_loop """owa""" +838 28 negative_sampler """basic""" +838 28 evaluator """rankbased""" +838 29 dataset """wn18rr""" +838 29 model """transe""" +838 29 loss """marginranking""" +838 29 regularizer """no""" +838 29 optimizer """adam""" +838 29 training_loop """owa""" +838 29 negative_sampler """basic""" +838 29 evaluator """rankbased""" +838 30 dataset """wn18rr""" +838 30 model """transe""" +838 30 loss """marginranking""" +838 30 regularizer """no""" +838 30 optimizer """adam""" +838 30 training_loop """owa""" +838 30 negative_sampler """basic""" +838 30 evaluator """rankbased""" +838 31 dataset """wn18rr""" +838 31 model """transe""" +838 31 loss """marginranking""" +838 31 regularizer """no""" +838 31 optimizer """adam""" +838 31 training_loop """owa""" +838 31 negative_sampler """basic""" +838 31 evaluator """rankbased""" +838 32 dataset """wn18rr""" +838 32 model """transe""" +838 32 loss """marginranking""" +838 32 regularizer """no""" +838 32 optimizer """adam""" +838 32 training_loop """owa""" +838 32 negative_sampler """basic""" +838 32 evaluator """rankbased""" +838 33 dataset """wn18rr""" +838 33 model """transe""" +838 33 loss """marginranking""" +838 33 regularizer """no""" +838 33 optimizer """adam""" +838 33 training_loop """owa""" +838 33 negative_sampler """basic""" +838 33 evaluator """rankbased""" +838 34 dataset """wn18rr""" +838 34 model """transe""" +838 34 loss """marginranking""" +838 34 regularizer """no""" +838 34 optimizer """adam""" +838 34 training_loop """owa""" +838 34 negative_sampler """basic""" +838 34 evaluator """rankbased""" +838 35 dataset """wn18rr""" +838 35 model """transe""" +838 35 loss """marginranking""" +838 35 regularizer """no""" +838 35 optimizer """adam""" +838 35 training_loop """owa""" +838 35 negative_sampler """basic""" +838 35 evaluator """rankbased""" +838 36 dataset """wn18rr""" +838 36 model """transe""" +838 36 loss """marginranking""" +838 36 regularizer """no""" +838 36 optimizer """adam""" +838 36 training_loop """owa""" +838 36 negative_sampler """basic""" +838 36 evaluator """rankbased""" +838 37 dataset """wn18rr""" +838 37 model """transe""" +838 37 loss """marginranking""" +838 37 regularizer """no""" +838 37 optimizer """adam""" +838 37 training_loop """owa""" +838 37 negative_sampler """basic""" +838 37 evaluator """rankbased""" +838 38 dataset """wn18rr""" +838 38 model """transe""" +838 38 loss """marginranking""" +838 38 regularizer """no""" +838 38 optimizer """adam""" +838 38 training_loop """owa""" +838 38 negative_sampler """basic""" +838 38 evaluator """rankbased""" +838 39 dataset """wn18rr""" +838 39 model """transe""" +838 39 loss """marginranking""" +838 39 regularizer """no""" +838 39 optimizer """adam""" +838 39 training_loop """owa""" +838 39 negative_sampler """basic""" +838 39 evaluator """rankbased""" +838 40 dataset """wn18rr""" +838 40 model """transe""" +838 40 loss """marginranking""" +838 40 regularizer """no""" +838 40 optimizer """adam""" +838 40 training_loop """owa""" +838 40 negative_sampler """basic""" +838 40 evaluator """rankbased""" +838 41 dataset """wn18rr""" +838 41 model """transe""" +838 41 loss """marginranking""" +838 41 regularizer """no""" +838 41 optimizer """adam""" +838 41 training_loop """owa""" +838 41 negative_sampler """basic""" +838 41 evaluator """rankbased""" +838 42 dataset """wn18rr""" +838 42 model """transe""" +838 42 loss """marginranking""" +838 42 regularizer """no""" +838 42 optimizer """adam""" +838 42 training_loop """owa""" +838 42 negative_sampler """basic""" +838 42 evaluator """rankbased""" +838 43 dataset """wn18rr""" +838 43 model """transe""" +838 43 loss """marginranking""" +838 43 regularizer """no""" +838 43 optimizer """adam""" +838 43 training_loop """owa""" +838 43 negative_sampler """basic""" +838 43 evaluator """rankbased""" +838 44 dataset """wn18rr""" +838 44 model """transe""" +838 44 loss """marginranking""" +838 44 regularizer """no""" +838 44 optimizer """adam""" +838 44 training_loop """owa""" +838 44 negative_sampler """basic""" +838 44 evaluator """rankbased""" +838 45 dataset """wn18rr""" +838 45 model """transe""" +838 45 loss """marginranking""" +838 45 regularizer """no""" +838 45 optimizer """adam""" +838 45 training_loop """owa""" +838 45 negative_sampler """basic""" +838 45 evaluator """rankbased""" +838 46 dataset """wn18rr""" +838 46 model """transe""" +838 46 loss """marginranking""" +838 46 regularizer """no""" +838 46 optimizer """adam""" +838 46 training_loop """owa""" +838 46 negative_sampler """basic""" +838 46 evaluator """rankbased""" +838 47 dataset """wn18rr""" +838 47 model """transe""" +838 47 loss """marginranking""" +838 47 regularizer """no""" +838 47 optimizer """adam""" +838 47 training_loop """owa""" +838 47 negative_sampler """basic""" +838 47 evaluator """rankbased""" +838 48 dataset """wn18rr""" +838 48 model """transe""" +838 48 loss """marginranking""" +838 48 regularizer """no""" +838 48 optimizer """adam""" +838 48 training_loop """owa""" +838 48 negative_sampler """basic""" +838 48 evaluator """rankbased""" +838 49 dataset """wn18rr""" +838 49 model """transe""" +838 49 loss """marginranking""" +838 49 regularizer """no""" +838 49 optimizer """adam""" +838 49 training_loop """owa""" +838 49 negative_sampler """basic""" +838 49 evaluator """rankbased""" +838 50 dataset """wn18rr""" +838 50 model """transe""" +838 50 loss """marginranking""" +838 50 regularizer """no""" +838 50 optimizer """adam""" +838 50 training_loop """owa""" +838 50 negative_sampler """basic""" +838 50 evaluator """rankbased""" +838 51 dataset """wn18rr""" +838 51 model """transe""" +838 51 loss """marginranking""" +838 51 regularizer """no""" +838 51 optimizer """adam""" +838 51 training_loop """owa""" +838 51 negative_sampler """basic""" +838 51 evaluator """rankbased""" +838 52 dataset """wn18rr""" +838 52 model """transe""" +838 52 loss """marginranking""" +838 52 regularizer """no""" +838 52 optimizer """adam""" +838 52 training_loop """owa""" +838 52 negative_sampler """basic""" +838 52 evaluator """rankbased""" +838 53 dataset """wn18rr""" +838 53 model """transe""" +838 53 loss """marginranking""" +838 53 regularizer """no""" +838 53 optimizer """adam""" +838 53 training_loop """owa""" +838 53 negative_sampler """basic""" +838 53 evaluator """rankbased""" +838 54 dataset """wn18rr""" +838 54 model """transe""" +838 54 loss """marginranking""" +838 54 regularizer """no""" +838 54 optimizer """adam""" +838 54 training_loop """owa""" +838 54 negative_sampler """basic""" +838 54 evaluator """rankbased""" +838 55 dataset """wn18rr""" +838 55 model """transe""" +838 55 loss """marginranking""" +838 55 regularizer """no""" +838 55 optimizer """adam""" +838 55 training_loop """owa""" +838 55 negative_sampler """basic""" +838 55 evaluator """rankbased""" +838 56 dataset """wn18rr""" +838 56 model """transe""" +838 56 loss """marginranking""" +838 56 regularizer """no""" +838 56 optimizer """adam""" +838 56 training_loop """owa""" +838 56 negative_sampler """basic""" +838 56 evaluator """rankbased""" +838 57 dataset """wn18rr""" +838 57 model """transe""" +838 57 loss """marginranking""" +838 57 regularizer """no""" +838 57 optimizer """adam""" +838 57 training_loop """owa""" +838 57 negative_sampler """basic""" +838 57 evaluator """rankbased""" +838 58 dataset """wn18rr""" +838 58 model """transe""" +838 58 loss """marginranking""" +838 58 regularizer """no""" +838 58 optimizer """adam""" +838 58 training_loop """owa""" +838 58 negative_sampler """basic""" +838 58 evaluator """rankbased""" +838 59 dataset """wn18rr""" +838 59 model """transe""" +838 59 loss """marginranking""" +838 59 regularizer """no""" +838 59 optimizer """adam""" +838 59 training_loop """owa""" +838 59 negative_sampler """basic""" +838 59 evaluator """rankbased""" +838 60 dataset """wn18rr""" +838 60 model """transe""" +838 60 loss """marginranking""" +838 60 regularizer """no""" +838 60 optimizer """adam""" +838 60 training_loop """owa""" +838 60 negative_sampler """basic""" +838 60 evaluator """rankbased""" +838 61 dataset """wn18rr""" +838 61 model """transe""" +838 61 loss """marginranking""" +838 61 regularizer """no""" +838 61 optimizer """adam""" +838 61 training_loop """owa""" +838 61 negative_sampler """basic""" +838 61 evaluator """rankbased""" +838 62 dataset """wn18rr""" +838 62 model """transe""" +838 62 loss """marginranking""" +838 62 regularizer """no""" +838 62 optimizer """adam""" +838 62 training_loop """owa""" +838 62 negative_sampler """basic""" +838 62 evaluator """rankbased""" +838 63 dataset """wn18rr""" +838 63 model """transe""" +838 63 loss """marginranking""" +838 63 regularizer """no""" +838 63 optimizer """adam""" +838 63 training_loop """owa""" +838 63 negative_sampler """basic""" +838 63 evaluator """rankbased""" +838 64 dataset """wn18rr""" +838 64 model """transe""" +838 64 loss """marginranking""" +838 64 regularizer """no""" +838 64 optimizer """adam""" +838 64 training_loop """owa""" +838 64 negative_sampler """basic""" +838 64 evaluator """rankbased""" +838 65 dataset """wn18rr""" +838 65 model """transe""" +838 65 loss """marginranking""" +838 65 regularizer """no""" +838 65 optimizer """adam""" +838 65 training_loop """owa""" +838 65 negative_sampler """basic""" +838 65 evaluator """rankbased""" +838 66 dataset """wn18rr""" +838 66 model """transe""" +838 66 loss """marginranking""" +838 66 regularizer """no""" +838 66 optimizer """adam""" +838 66 training_loop """owa""" +838 66 negative_sampler """basic""" +838 66 evaluator """rankbased""" +838 67 dataset """wn18rr""" +838 67 model """transe""" +838 67 loss """marginranking""" +838 67 regularizer """no""" +838 67 optimizer """adam""" +838 67 training_loop """owa""" +838 67 negative_sampler """basic""" +838 67 evaluator """rankbased""" +838 68 dataset """wn18rr""" +838 68 model """transe""" +838 68 loss """marginranking""" +838 68 regularizer """no""" +838 68 optimizer """adam""" +838 68 training_loop """owa""" +838 68 negative_sampler """basic""" +838 68 evaluator """rankbased""" +838 69 dataset """wn18rr""" +838 69 model """transe""" +838 69 loss """marginranking""" +838 69 regularizer """no""" +838 69 optimizer """adam""" +838 69 training_loop """owa""" +838 69 negative_sampler """basic""" +838 69 evaluator """rankbased""" +838 70 dataset """wn18rr""" +838 70 model """transe""" +838 70 loss """marginranking""" +838 70 regularizer """no""" +838 70 optimizer """adam""" +838 70 training_loop """owa""" +838 70 negative_sampler """basic""" +838 70 evaluator """rankbased""" +838 71 dataset """wn18rr""" +838 71 model """transe""" +838 71 loss """marginranking""" +838 71 regularizer """no""" +838 71 optimizer """adam""" +838 71 training_loop """owa""" +838 71 negative_sampler """basic""" +838 71 evaluator """rankbased""" +838 72 dataset """wn18rr""" +838 72 model """transe""" +838 72 loss """marginranking""" +838 72 regularizer """no""" +838 72 optimizer """adam""" +838 72 training_loop """owa""" +838 72 negative_sampler """basic""" +838 72 evaluator """rankbased""" +838 73 dataset """wn18rr""" +838 73 model """transe""" +838 73 loss """marginranking""" +838 73 regularizer """no""" +838 73 optimizer """adam""" +838 73 training_loop """owa""" +838 73 negative_sampler """basic""" +838 73 evaluator """rankbased""" +838 74 dataset """wn18rr""" +838 74 model """transe""" +838 74 loss """marginranking""" +838 74 regularizer """no""" +838 74 optimizer """adam""" +838 74 training_loop """owa""" +838 74 negative_sampler """basic""" +838 74 evaluator """rankbased""" +838 75 dataset """wn18rr""" +838 75 model """transe""" +838 75 loss """marginranking""" +838 75 regularizer """no""" +838 75 optimizer """adam""" +838 75 training_loop """owa""" +838 75 negative_sampler """basic""" +838 75 evaluator """rankbased""" +838 76 dataset """wn18rr""" +838 76 model """transe""" +838 76 loss """marginranking""" +838 76 regularizer """no""" +838 76 optimizer """adam""" +838 76 training_loop """owa""" +838 76 negative_sampler """basic""" +838 76 evaluator """rankbased""" +838 77 dataset """wn18rr""" +838 77 model """transe""" +838 77 loss """marginranking""" +838 77 regularizer """no""" +838 77 optimizer """adam""" +838 77 training_loop """owa""" +838 77 negative_sampler """basic""" +838 77 evaluator """rankbased""" +838 78 dataset """wn18rr""" +838 78 model """transe""" +838 78 loss """marginranking""" +838 78 regularizer """no""" +838 78 optimizer """adam""" +838 78 training_loop """owa""" +838 78 negative_sampler """basic""" +838 78 evaluator """rankbased""" +838 79 dataset """wn18rr""" +838 79 model """transe""" +838 79 loss """marginranking""" +838 79 regularizer """no""" +838 79 optimizer """adam""" +838 79 training_loop """owa""" +838 79 negative_sampler """basic""" +838 79 evaluator """rankbased""" +838 80 dataset """wn18rr""" +838 80 model """transe""" +838 80 loss """marginranking""" +838 80 regularizer """no""" +838 80 optimizer """adam""" +838 80 training_loop """owa""" +838 80 negative_sampler """basic""" +838 80 evaluator """rankbased""" +838 81 dataset """wn18rr""" +838 81 model """transe""" +838 81 loss """marginranking""" +838 81 regularizer """no""" +838 81 optimizer """adam""" +838 81 training_loop """owa""" +838 81 negative_sampler """basic""" +838 81 evaluator """rankbased""" +838 82 dataset """wn18rr""" +838 82 model """transe""" +838 82 loss """marginranking""" +838 82 regularizer """no""" +838 82 optimizer """adam""" +838 82 training_loop """owa""" +838 82 negative_sampler """basic""" +838 82 evaluator """rankbased""" +838 83 dataset """wn18rr""" +838 83 model """transe""" +838 83 loss """marginranking""" +838 83 regularizer """no""" +838 83 optimizer """adam""" +838 83 training_loop """owa""" +838 83 negative_sampler """basic""" +838 83 evaluator """rankbased""" +838 84 dataset """wn18rr""" +838 84 model """transe""" +838 84 loss """marginranking""" +838 84 regularizer """no""" +838 84 optimizer """adam""" +838 84 training_loop """owa""" +838 84 negative_sampler """basic""" +838 84 evaluator """rankbased""" +838 85 dataset """wn18rr""" +838 85 model """transe""" +838 85 loss """marginranking""" +838 85 regularizer """no""" +838 85 optimizer """adam""" +838 85 training_loop """owa""" +838 85 negative_sampler """basic""" +838 85 evaluator """rankbased""" +838 86 dataset """wn18rr""" +838 86 model """transe""" +838 86 loss """marginranking""" +838 86 regularizer """no""" +838 86 optimizer """adam""" +838 86 training_loop """owa""" +838 86 negative_sampler """basic""" +838 86 evaluator """rankbased""" +838 87 dataset """wn18rr""" +838 87 model """transe""" +838 87 loss """marginranking""" +838 87 regularizer """no""" +838 87 optimizer """adam""" +838 87 training_loop """owa""" +838 87 negative_sampler """basic""" +838 87 evaluator """rankbased""" +838 88 dataset """wn18rr""" +838 88 model """transe""" +838 88 loss """marginranking""" +838 88 regularizer """no""" +838 88 optimizer """adam""" +838 88 training_loop """owa""" +838 88 negative_sampler """basic""" +838 88 evaluator """rankbased""" +838 89 dataset """wn18rr""" +838 89 model """transe""" +838 89 loss """marginranking""" +838 89 regularizer """no""" +838 89 optimizer """adam""" +838 89 training_loop """owa""" +838 89 negative_sampler """basic""" +838 89 evaluator """rankbased""" +838 90 dataset """wn18rr""" +838 90 model """transe""" +838 90 loss """marginranking""" +838 90 regularizer """no""" +838 90 optimizer """adam""" +838 90 training_loop """owa""" +838 90 negative_sampler """basic""" +838 90 evaluator """rankbased""" +838 91 dataset """wn18rr""" +838 91 model """transe""" +838 91 loss """marginranking""" +838 91 regularizer """no""" +838 91 optimizer """adam""" +838 91 training_loop """owa""" +838 91 negative_sampler """basic""" +838 91 evaluator """rankbased""" +838 92 dataset """wn18rr""" +838 92 model """transe""" +838 92 loss """marginranking""" +838 92 regularizer """no""" +838 92 optimizer """adam""" +838 92 training_loop """owa""" +838 92 negative_sampler """basic""" +838 92 evaluator """rankbased""" +838 93 dataset """wn18rr""" +838 93 model """transe""" +838 93 loss """marginranking""" +838 93 regularizer """no""" +838 93 optimizer """adam""" +838 93 training_loop """owa""" +838 93 negative_sampler """basic""" +838 93 evaluator """rankbased""" +838 94 dataset """wn18rr""" +838 94 model """transe""" +838 94 loss """marginranking""" +838 94 regularizer """no""" +838 94 optimizer """adam""" +838 94 training_loop """owa""" +838 94 negative_sampler """basic""" +838 94 evaluator """rankbased""" +838 95 dataset """wn18rr""" +838 95 model """transe""" +838 95 loss """marginranking""" +838 95 regularizer """no""" +838 95 optimizer """adam""" +838 95 training_loop """owa""" +838 95 negative_sampler """basic""" +838 95 evaluator """rankbased""" +838 96 dataset """wn18rr""" +838 96 model """transe""" +838 96 loss """marginranking""" +838 96 regularizer """no""" +838 96 optimizer """adam""" +838 96 training_loop """owa""" +838 96 negative_sampler """basic""" +838 96 evaluator """rankbased""" +838 97 dataset """wn18rr""" +838 97 model """transe""" +838 97 loss """marginranking""" +838 97 regularizer """no""" +838 97 optimizer """adam""" +838 97 training_loop """owa""" +838 97 negative_sampler """basic""" +838 97 evaluator """rankbased""" +838 98 dataset """wn18rr""" +838 98 model """transe""" +838 98 loss """marginranking""" +838 98 regularizer """no""" +838 98 optimizer """adam""" +838 98 training_loop """owa""" +838 98 negative_sampler """basic""" +838 98 evaluator """rankbased""" +838 99 dataset """wn18rr""" +838 99 model """transe""" +838 99 loss """marginranking""" +838 99 regularizer """no""" +838 99 optimizer """adam""" +838 99 training_loop """owa""" +838 99 negative_sampler """basic""" +838 99 evaluator """rankbased""" +838 100 dataset """wn18rr""" +838 100 model """transe""" +838 100 loss """marginranking""" +838 100 regularizer """no""" +838 100 optimizer """adam""" +838 100 training_loop """owa""" +838 100 negative_sampler """basic""" +838 100 evaluator """rankbased""" +839 1 model.embedding_dim 2.0 +839 1 model.scoring_fct_norm 1.0 +839 1 loss.margin 24.67697094500776 +839 1 loss.adversarial_temperature 0.7178612177545425 +839 1 optimizer.lr 0.0012690472182337502 +839 1 negative_sampler.num_negs_per_pos 73.0 +839 1 training.batch_size 0.0 +839 2 model.embedding_dim 0.0 +839 2 model.scoring_fct_norm 1.0 +839 2 loss.margin 23.462104909024404 +839 2 loss.adversarial_temperature 0.9986842196099048 +839 2 optimizer.lr 0.002352286508911314 +839 2 negative_sampler.num_negs_per_pos 67.0 +839 2 training.batch_size 1.0 +839 3 model.embedding_dim 2.0 +839 3 model.scoring_fct_norm 2.0 +839 3 loss.margin 24.27135725755136 +839 3 loss.adversarial_temperature 0.8167214510995536 +839 3 optimizer.lr 0.01471187842540798 +839 3 negative_sampler.num_negs_per_pos 64.0 +839 3 training.batch_size 1.0 +839 4 model.embedding_dim 0.0 +839 4 model.scoring_fct_norm 2.0 +839 4 loss.margin 13.461448560028675 +839 4 loss.adversarial_temperature 0.7717067148267899 +839 4 optimizer.lr 0.001994461604480288 +839 4 negative_sampler.num_negs_per_pos 63.0 +839 4 training.batch_size 1.0 +839 5 model.embedding_dim 2.0 +839 5 model.scoring_fct_norm 1.0 +839 5 loss.margin 3.759014986715186 +839 5 loss.adversarial_temperature 0.90014509978331 +839 5 optimizer.lr 0.027805637050133485 +839 5 negative_sampler.num_negs_per_pos 9.0 +839 5 training.batch_size 1.0 +839 6 model.embedding_dim 2.0 +839 6 model.scoring_fct_norm 2.0 +839 6 loss.margin 25.76494964408774 +839 6 loss.adversarial_temperature 0.7521311827220127 +839 6 optimizer.lr 0.005843219306863862 +839 6 negative_sampler.num_negs_per_pos 92.0 +839 6 training.batch_size 0.0 +839 7 model.embedding_dim 0.0 +839 7 model.scoring_fct_norm 2.0 +839 7 loss.margin 20.332447481857432 +839 7 loss.adversarial_temperature 0.6360286515150443 +839 7 optimizer.lr 0.05292235534914316 +839 7 negative_sampler.num_negs_per_pos 70.0 +839 7 training.batch_size 0.0 +839 8 model.embedding_dim 1.0 +839 8 model.scoring_fct_norm 2.0 +839 8 loss.margin 15.764745532802372 +839 8 loss.adversarial_temperature 0.1424212616963734 +839 8 optimizer.lr 0.0013718514142428992 +839 8 negative_sampler.num_negs_per_pos 25.0 +839 8 training.batch_size 0.0 +839 9 model.embedding_dim 0.0 +839 9 model.scoring_fct_norm 2.0 +839 9 loss.margin 13.587350769076874 +839 9 loss.adversarial_temperature 0.8053825952545273 +839 9 optimizer.lr 0.012198334784568499 +839 9 negative_sampler.num_negs_per_pos 77.0 +839 9 training.batch_size 2.0 +839 10 model.embedding_dim 0.0 +839 10 model.scoring_fct_norm 1.0 +839 10 loss.margin 7.782342910187904 +839 10 loss.adversarial_temperature 0.6374652528341845 +839 10 optimizer.lr 0.04427973593690996 +839 10 negative_sampler.num_negs_per_pos 74.0 +839 10 training.batch_size 1.0 +839 11 model.embedding_dim 1.0 +839 11 model.scoring_fct_norm 1.0 +839 11 loss.margin 14.088348288745346 +839 11 loss.adversarial_temperature 0.37151925392475493 +839 11 optimizer.lr 0.0019481589960222388 +839 11 negative_sampler.num_negs_per_pos 31.0 +839 11 training.batch_size 2.0 +839 12 model.embedding_dim 1.0 +839 12 model.scoring_fct_norm 2.0 +839 12 loss.margin 17.215587551238617 +839 12 loss.adversarial_temperature 0.1727401765322799 +839 12 optimizer.lr 0.0010979362285657467 +839 12 negative_sampler.num_negs_per_pos 76.0 +839 12 training.batch_size 1.0 +839 13 model.embedding_dim 1.0 +839 13 model.scoring_fct_norm 1.0 +839 13 loss.margin 1.3738361699894972 +839 13 loss.adversarial_temperature 0.10429337389736681 +839 13 optimizer.lr 0.08720734259578306 +839 13 negative_sampler.num_negs_per_pos 56.0 +839 13 training.batch_size 0.0 +839 14 model.embedding_dim 1.0 +839 14 model.scoring_fct_norm 1.0 +839 14 loss.margin 5.303449302559093 +839 14 loss.adversarial_temperature 0.4779036028701872 +839 14 optimizer.lr 0.002113497572773753 +839 14 negative_sampler.num_negs_per_pos 42.0 +839 14 training.batch_size 0.0 +839 15 model.embedding_dim 2.0 +839 15 model.scoring_fct_norm 1.0 +839 15 loss.margin 21.16357802787734 +839 15 loss.adversarial_temperature 0.8121292817743617 +839 15 optimizer.lr 0.03583712044766279 +839 15 negative_sampler.num_negs_per_pos 63.0 +839 15 training.batch_size 1.0 +839 16 model.embedding_dim 2.0 +839 16 model.scoring_fct_norm 2.0 +839 16 loss.margin 15.805605995095993 +839 16 loss.adversarial_temperature 0.20033802534028033 +839 16 optimizer.lr 0.010918396719528909 +839 16 negative_sampler.num_negs_per_pos 27.0 +839 16 training.batch_size 0.0 +839 17 model.embedding_dim 1.0 +839 17 model.scoring_fct_norm 2.0 +839 17 loss.margin 15.289064327929188 +839 17 loss.adversarial_temperature 0.857595478520776 +839 17 optimizer.lr 0.002534244505917073 +839 17 negative_sampler.num_negs_per_pos 36.0 +839 17 training.batch_size 2.0 +839 18 model.embedding_dim 1.0 +839 18 model.scoring_fct_norm 2.0 +839 18 loss.margin 17.927039650784376 +839 18 loss.adversarial_temperature 0.9516244947494792 +839 18 optimizer.lr 0.006078698714282542 +839 18 negative_sampler.num_negs_per_pos 34.0 +839 18 training.batch_size 1.0 +839 19 model.embedding_dim 1.0 +839 19 model.scoring_fct_norm 2.0 +839 19 loss.margin 26.63484260772072 +839 19 loss.adversarial_temperature 0.32333311885084065 +839 19 optimizer.lr 0.09241483897013021 +839 19 negative_sampler.num_negs_per_pos 68.0 +839 19 training.batch_size 1.0 +839 20 model.embedding_dim 2.0 +839 20 model.scoring_fct_norm 2.0 +839 20 loss.margin 13.36923996302946 +839 20 loss.adversarial_temperature 0.7364958769527994 +839 20 optimizer.lr 0.004241734974997204 +839 20 negative_sampler.num_negs_per_pos 12.0 +839 20 training.batch_size 1.0 +839 21 model.embedding_dim 2.0 +839 21 model.scoring_fct_norm 2.0 +839 21 loss.margin 22.242282365543083 +839 21 loss.adversarial_temperature 0.31379943237267344 +839 21 optimizer.lr 0.07471902801113096 +839 21 negative_sampler.num_negs_per_pos 40.0 +839 21 training.batch_size 0.0 +839 22 model.embedding_dim 0.0 +839 22 model.scoring_fct_norm 2.0 +839 22 loss.margin 23.499492150573126 +839 22 loss.adversarial_temperature 0.442018782246622 +839 22 optimizer.lr 0.0019099323332075857 +839 22 negative_sampler.num_negs_per_pos 49.0 +839 22 training.batch_size 0.0 +839 23 model.embedding_dim 1.0 +839 23 model.scoring_fct_norm 2.0 +839 23 loss.margin 25.274168417599018 +839 23 loss.adversarial_temperature 0.9357666992625412 +839 23 optimizer.lr 0.007943510258818797 +839 23 negative_sampler.num_negs_per_pos 42.0 +839 23 training.batch_size 0.0 +839 24 model.embedding_dim 0.0 +839 24 model.scoring_fct_norm 2.0 +839 24 loss.margin 27.974797423649214 +839 24 loss.adversarial_temperature 0.664716570439038 +839 24 optimizer.lr 0.03807839328716045 +839 24 negative_sampler.num_negs_per_pos 75.0 +839 24 training.batch_size 0.0 +839 25 model.embedding_dim 2.0 +839 25 model.scoring_fct_norm 2.0 +839 25 loss.margin 6.427307187790926 +839 25 loss.adversarial_temperature 0.6333167492678586 +839 25 optimizer.lr 0.012832936084106813 +839 25 negative_sampler.num_negs_per_pos 57.0 +839 25 training.batch_size 1.0 +839 26 model.embedding_dim 1.0 +839 26 model.scoring_fct_norm 1.0 +839 26 loss.margin 17.05600839424677 +839 26 loss.adversarial_temperature 0.8445525471338409 +839 26 optimizer.lr 0.017929399181656355 +839 26 negative_sampler.num_negs_per_pos 87.0 +839 26 training.batch_size 2.0 +839 27 model.embedding_dim 2.0 +839 27 model.scoring_fct_norm 2.0 +839 27 loss.margin 16.201453077522874 +839 27 loss.adversarial_temperature 0.5975659260090306 +839 27 optimizer.lr 0.007828232941323045 +839 27 negative_sampler.num_negs_per_pos 75.0 +839 27 training.batch_size 1.0 +839 28 model.embedding_dim 0.0 +839 28 model.scoring_fct_norm 2.0 +839 28 loss.margin 22.400081215517112 +839 28 loss.adversarial_temperature 0.24469732731762112 +839 28 optimizer.lr 0.0024173758758223307 +839 28 negative_sampler.num_negs_per_pos 94.0 +839 28 training.batch_size 0.0 +839 29 model.embedding_dim 0.0 +839 29 model.scoring_fct_norm 1.0 +839 29 loss.margin 15.037309071897786 +839 29 loss.adversarial_temperature 0.9304108927147698 +839 29 optimizer.lr 0.03477136019852783 +839 29 negative_sampler.num_negs_per_pos 50.0 +839 29 training.batch_size 0.0 +839 30 model.embedding_dim 0.0 +839 30 model.scoring_fct_norm 1.0 +839 30 loss.margin 8.478492459911624 +839 30 loss.adversarial_temperature 0.17276464290909405 +839 30 optimizer.lr 0.005503027275044978 +839 30 negative_sampler.num_negs_per_pos 2.0 +839 30 training.batch_size 0.0 +839 31 model.embedding_dim 2.0 +839 31 model.scoring_fct_norm 2.0 +839 31 loss.margin 7.226970523231203 +839 31 loss.adversarial_temperature 0.8186926550162485 +839 31 optimizer.lr 0.02114916404775056 +839 31 negative_sampler.num_negs_per_pos 73.0 +839 31 training.batch_size 0.0 +839 32 model.embedding_dim 1.0 +839 32 model.scoring_fct_norm 2.0 +839 32 loss.margin 11.17542611286737 +839 32 loss.adversarial_temperature 0.18038365727564815 +839 32 optimizer.lr 0.01580400433860255 +839 32 negative_sampler.num_negs_per_pos 28.0 +839 32 training.batch_size 1.0 +839 33 model.embedding_dim 1.0 +839 33 model.scoring_fct_norm 2.0 +839 33 loss.margin 6.341913161673025 +839 33 loss.adversarial_temperature 0.970175511318416 +839 33 optimizer.lr 0.022905494825703872 +839 33 negative_sampler.num_negs_per_pos 23.0 +839 33 training.batch_size 2.0 +839 34 model.embedding_dim 0.0 +839 34 model.scoring_fct_norm 1.0 +839 34 loss.margin 18.770813933083907 +839 34 loss.adversarial_temperature 0.9962824814717215 +839 34 optimizer.lr 0.0023253516613441472 +839 34 negative_sampler.num_negs_per_pos 17.0 +839 34 training.batch_size 1.0 +839 35 model.embedding_dim 1.0 +839 35 model.scoring_fct_norm 1.0 +839 35 loss.margin 25.943684366319516 +839 35 loss.adversarial_temperature 0.142656863092055 +839 35 optimizer.lr 0.0017683166893737833 +839 35 negative_sampler.num_negs_per_pos 21.0 +839 35 training.batch_size 1.0 +839 36 model.embedding_dim 0.0 +839 36 model.scoring_fct_norm 2.0 +839 36 loss.margin 12.958016350412409 +839 36 loss.adversarial_temperature 0.8695853575075138 +839 36 optimizer.lr 0.016327102797080606 +839 36 negative_sampler.num_negs_per_pos 76.0 +839 36 training.batch_size 0.0 +839 37 model.embedding_dim 0.0 +839 37 model.scoring_fct_norm 2.0 +839 37 loss.margin 16.20421179327101 +839 37 loss.adversarial_temperature 0.8578008894382219 +839 37 optimizer.lr 0.00568003438139598 +839 37 negative_sampler.num_negs_per_pos 25.0 +839 37 training.batch_size 2.0 +839 38 model.embedding_dim 1.0 +839 38 model.scoring_fct_norm 1.0 +839 38 loss.margin 14.787013595802765 +839 38 loss.adversarial_temperature 0.6393868586763599 +839 38 optimizer.lr 0.0016581427431971024 +839 38 negative_sampler.num_negs_per_pos 2.0 +839 38 training.batch_size 0.0 +839 39 model.embedding_dim 0.0 +839 39 model.scoring_fct_norm 2.0 +839 39 loss.margin 7.387069273853271 +839 39 loss.adversarial_temperature 0.33688017344350657 +839 39 optimizer.lr 0.008375678981375563 +839 39 negative_sampler.num_negs_per_pos 30.0 +839 39 training.batch_size 2.0 +839 40 model.embedding_dim 1.0 +839 40 model.scoring_fct_norm 2.0 +839 40 loss.margin 17.676866432157535 +839 40 loss.adversarial_temperature 0.762082919959476 +839 40 optimizer.lr 0.019071293742536088 +839 40 negative_sampler.num_negs_per_pos 41.0 +839 40 training.batch_size 1.0 +839 41 model.embedding_dim 2.0 +839 41 model.scoring_fct_norm 2.0 +839 41 loss.margin 6.177611733283406 +839 41 loss.adversarial_temperature 0.14552066814451486 +839 41 optimizer.lr 0.047673633797873455 +839 41 negative_sampler.num_negs_per_pos 85.0 +839 41 training.batch_size 1.0 +839 42 model.embedding_dim 2.0 +839 42 model.scoring_fct_norm 1.0 +839 42 loss.margin 25.444247217095544 +839 42 loss.adversarial_temperature 0.6928650459394415 +839 42 optimizer.lr 0.004676388143186361 +839 42 negative_sampler.num_negs_per_pos 52.0 +839 42 training.batch_size 2.0 +839 43 model.embedding_dim 0.0 +839 43 model.scoring_fct_norm 1.0 +839 43 loss.margin 27.876374258523825 +839 43 loss.adversarial_temperature 0.9148636166247578 +839 43 optimizer.lr 0.03835784485645181 +839 43 negative_sampler.num_negs_per_pos 89.0 +839 43 training.batch_size 0.0 +839 44 model.embedding_dim 2.0 +839 44 model.scoring_fct_norm 2.0 +839 44 loss.margin 7.19368977443558 +839 44 loss.adversarial_temperature 0.870613979729992 +839 44 optimizer.lr 0.0016489180215498785 +839 44 negative_sampler.num_negs_per_pos 81.0 +839 44 training.batch_size 0.0 +839 45 model.embedding_dim 1.0 +839 45 model.scoring_fct_norm 2.0 +839 45 loss.margin 18.346206685778128 +839 45 loss.adversarial_temperature 0.27212079619574214 +839 45 optimizer.lr 0.001762449212090928 +839 45 negative_sampler.num_negs_per_pos 9.0 +839 45 training.batch_size 0.0 +839 46 model.embedding_dim 1.0 +839 46 model.scoring_fct_norm 1.0 +839 46 loss.margin 14.138616247904366 +839 46 loss.adversarial_temperature 0.1511698879642781 +839 46 optimizer.lr 0.01941520736614365 +839 46 negative_sampler.num_negs_per_pos 57.0 +839 46 training.batch_size 0.0 +839 47 model.embedding_dim 1.0 +839 47 model.scoring_fct_norm 2.0 +839 47 loss.margin 8.308535332180442 +839 47 loss.adversarial_temperature 0.6549543696081417 +839 47 optimizer.lr 0.004771018049096476 +839 47 negative_sampler.num_negs_per_pos 10.0 +839 47 training.batch_size 2.0 +839 48 model.embedding_dim 0.0 +839 48 model.scoring_fct_norm 1.0 +839 48 loss.margin 1.2444549161244336 +839 48 loss.adversarial_temperature 0.2998854243818686 +839 48 optimizer.lr 0.008461578438567112 +839 48 negative_sampler.num_negs_per_pos 74.0 +839 48 training.batch_size 0.0 +839 49 model.embedding_dim 2.0 +839 49 model.scoring_fct_norm 2.0 +839 49 loss.margin 3.8597171381195836 +839 49 loss.adversarial_temperature 0.9801367612916724 +839 49 optimizer.lr 0.00446140977096691 +839 49 negative_sampler.num_negs_per_pos 34.0 +839 49 training.batch_size 1.0 +839 50 model.embedding_dim 1.0 +839 50 model.scoring_fct_norm 2.0 +839 50 loss.margin 20.35700292039203 +839 50 loss.adversarial_temperature 0.9094228785400086 +839 50 optimizer.lr 0.0035339889669551007 +839 50 negative_sampler.num_negs_per_pos 14.0 +839 50 training.batch_size 0.0 +839 51 model.embedding_dim 0.0 +839 51 model.scoring_fct_norm 2.0 +839 51 loss.margin 9.086854256815046 +839 51 loss.adversarial_temperature 0.10490721108446938 +839 51 optimizer.lr 0.04453786041595186 +839 51 negative_sampler.num_negs_per_pos 43.0 +839 51 training.batch_size 1.0 +839 52 model.embedding_dim 2.0 +839 52 model.scoring_fct_norm 1.0 +839 52 loss.margin 23.098657472335397 +839 52 loss.adversarial_temperature 0.4457309254661309 +839 52 optimizer.lr 0.00982423186757845 +839 52 negative_sampler.num_negs_per_pos 37.0 +839 52 training.batch_size 1.0 +839 53 model.embedding_dim 0.0 +839 53 model.scoring_fct_norm 1.0 +839 53 loss.margin 12.632574023887843 +839 53 loss.adversarial_temperature 0.9261074656734745 +839 53 optimizer.lr 0.0024106935962761116 +839 53 negative_sampler.num_negs_per_pos 79.0 +839 53 training.batch_size 2.0 +839 54 model.embedding_dim 2.0 +839 54 model.scoring_fct_norm 1.0 +839 54 loss.margin 11.001470456666578 +839 54 loss.adversarial_temperature 0.9248140662667291 +839 54 optimizer.lr 0.04012864490738501 +839 54 negative_sampler.num_negs_per_pos 12.0 +839 54 training.batch_size 2.0 +839 55 model.embedding_dim 2.0 +839 55 model.scoring_fct_norm 2.0 +839 55 loss.margin 3.4519387496966862 +839 55 loss.adversarial_temperature 0.21678870929058158 +839 55 optimizer.lr 0.0020159594647358578 +839 55 negative_sampler.num_negs_per_pos 47.0 +839 55 training.batch_size 2.0 +839 56 model.embedding_dim 2.0 +839 56 model.scoring_fct_norm 1.0 +839 56 loss.margin 28.66482916264977 +839 56 loss.adversarial_temperature 0.9991233633371509 +839 56 optimizer.lr 0.053931648209769985 +839 56 negative_sampler.num_negs_per_pos 41.0 +839 56 training.batch_size 1.0 +839 57 model.embedding_dim 2.0 +839 57 model.scoring_fct_norm 1.0 +839 57 loss.margin 20.412748330813617 +839 57 loss.adversarial_temperature 0.8729150152667459 +839 57 optimizer.lr 0.00247041111166068 +839 57 negative_sampler.num_negs_per_pos 51.0 +839 57 training.batch_size 0.0 +839 58 model.embedding_dim 0.0 +839 58 model.scoring_fct_norm 1.0 +839 58 loss.margin 23.637187686086612 +839 58 loss.adversarial_temperature 0.6078803949685112 +839 58 optimizer.lr 0.005306956263948923 +839 58 negative_sampler.num_negs_per_pos 34.0 +839 58 training.batch_size 2.0 +839 59 model.embedding_dim 1.0 +839 59 model.scoring_fct_norm 2.0 +839 59 loss.margin 12.335653275742771 +839 59 loss.adversarial_temperature 0.8693235183659211 +839 59 optimizer.lr 0.003108174888021655 +839 59 negative_sampler.num_negs_per_pos 61.0 +839 59 training.batch_size 0.0 +839 60 model.embedding_dim 0.0 +839 60 model.scoring_fct_norm 1.0 +839 60 loss.margin 13.527113483121553 +839 60 loss.adversarial_temperature 0.6673555054728488 +839 60 optimizer.lr 0.004411153124053455 +839 60 negative_sampler.num_negs_per_pos 29.0 +839 60 training.batch_size 0.0 +839 61 model.embedding_dim 0.0 +839 61 model.scoring_fct_norm 2.0 +839 61 loss.margin 10.567456308676666 +839 61 loss.adversarial_temperature 0.4201215153038047 +839 61 optimizer.lr 0.03357046137250594 +839 61 negative_sampler.num_negs_per_pos 23.0 +839 61 training.batch_size 0.0 +839 62 model.embedding_dim 2.0 +839 62 model.scoring_fct_norm 2.0 +839 62 loss.margin 24.179295271463175 +839 62 loss.adversarial_temperature 0.5806908601386983 +839 62 optimizer.lr 0.07438660858061415 +839 62 negative_sampler.num_negs_per_pos 36.0 +839 62 training.batch_size 2.0 +839 63 model.embedding_dim 0.0 +839 63 model.scoring_fct_norm 1.0 +839 63 loss.margin 3.6051502952344796 +839 63 loss.adversarial_temperature 0.6514197995035217 +839 63 optimizer.lr 0.025889051737387637 +839 63 negative_sampler.num_negs_per_pos 68.0 +839 63 training.batch_size 2.0 +839 64 model.embedding_dim 0.0 +839 64 model.scoring_fct_norm 2.0 +839 64 loss.margin 11.01619597756341 +839 64 loss.adversarial_temperature 0.4951160851483053 +839 64 optimizer.lr 0.001797929619878902 +839 64 negative_sampler.num_negs_per_pos 62.0 +839 64 training.batch_size 2.0 +839 65 model.embedding_dim 1.0 +839 65 model.scoring_fct_norm 1.0 +839 65 loss.margin 19.17267876148556 +839 65 loss.adversarial_temperature 0.5695200199368764 +839 65 optimizer.lr 0.03248774668580669 +839 65 negative_sampler.num_negs_per_pos 44.0 +839 65 training.batch_size 0.0 +839 66 model.embedding_dim 0.0 +839 66 model.scoring_fct_norm 2.0 +839 66 loss.margin 13.868693313641812 +839 66 loss.adversarial_temperature 0.86130202337124 +839 66 optimizer.lr 0.02370617370166206 +839 66 negative_sampler.num_negs_per_pos 89.0 +839 66 training.batch_size 1.0 +839 67 model.embedding_dim 0.0 +839 67 model.scoring_fct_norm 2.0 +839 67 loss.margin 6.677200965866981 +839 67 loss.adversarial_temperature 0.5884669388283623 +839 67 optimizer.lr 0.0035181811435634172 +839 67 negative_sampler.num_negs_per_pos 7.0 +839 67 training.batch_size 1.0 +839 68 model.embedding_dim 2.0 +839 68 model.scoring_fct_norm 2.0 +839 68 loss.margin 17.670128802414087 +839 68 loss.adversarial_temperature 0.9256487412577257 +839 68 optimizer.lr 0.0016340233268533916 +839 68 negative_sampler.num_negs_per_pos 43.0 +839 68 training.batch_size 0.0 +839 69 model.embedding_dim 1.0 +839 69 model.scoring_fct_norm 1.0 +839 69 loss.margin 11.688877430571848 +839 69 loss.adversarial_temperature 0.9147565411734823 +839 69 optimizer.lr 0.009202892593097084 +839 69 negative_sampler.num_negs_per_pos 99.0 +839 69 training.batch_size 0.0 +839 70 model.embedding_dim 1.0 +839 70 model.scoring_fct_norm 2.0 +839 70 loss.margin 5.123399672107035 +839 70 loss.adversarial_temperature 0.6893675003365437 +839 70 optimizer.lr 0.0010113046857280375 +839 70 negative_sampler.num_negs_per_pos 57.0 +839 70 training.batch_size 0.0 +839 71 model.embedding_dim 2.0 +839 71 model.scoring_fct_norm 1.0 +839 71 loss.margin 11.258233294031516 +839 71 loss.adversarial_temperature 0.669560656550837 +839 71 optimizer.lr 0.008219710896152402 +839 71 negative_sampler.num_negs_per_pos 75.0 +839 71 training.batch_size 2.0 +839 72 model.embedding_dim 0.0 +839 72 model.scoring_fct_norm 1.0 +839 72 loss.margin 25.842937294331573 +839 72 loss.adversarial_temperature 0.14188591520651558 +839 72 optimizer.lr 0.052469097701597134 +839 72 negative_sampler.num_negs_per_pos 93.0 +839 72 training.batch_size 1.0 +839 73 model.embedding_dim 0.0 +839 73 model.scoring_fct_norm 2.0 +839 73 loss.margin 10.833353319371241 +839 73 loss.adversarial_temperature 0.3504838810470136 +839 73 optimizer.lr 0.021969862033371958 +839 73 negative_sampler.num_negs_per_pos 1.0 +839 73 training.batch_size 1.0 +839 74 model.embedding_dim 0.0 +839 74 model.scoring_fct_norm 2.0 +839 74 loss.margin 16.439348733718003 +839 74 loss.adversarial_temperature 0.4832491575840888 +839 74 optimizer.lr 0.001586353097880546 +839 74 negative_sampler.num_negs_per_pos 53.0 +839 74 training.batch_size 2.0 +839 75 model.embedding_dim 2.0 +839 75 model.scoring_fct_norm 1.0 +839 75 loss.margin 27.254272784352693 +839 75 loss.adversarial_temperature 0.6120878273077737 +839 75 optimizer.lr 0.04735174270167436 +839 75 negative_sampler.num_negs_per_pos 63.0 +839 75 training.batch_size 2.0 +839 76 model.embedding_dim 0.0 +839 76 model.scoring_fct_norm 2.0 +839 76 loss.margin 11.410684430787047 +839 76 loss.adversarial_temperature 0.5797305519042654 +839 76 optimizer.lr 0.013800524515274746 +839 76 negative_sampler.num_negs_per_pos 55.0 +839 76 training.batch_size 0.0 +839 77 model.embedding_dim 1.0 +839 77 model.scoring_fct_norm 2.0 +839 77 loss.margin 25.35727143317863 +839 77 loss.adversarial_temperature 0.5309996193467641 +839 77 optimizer.lr 0.0666270838273655 +839 77 negative_sampler.num_negs_per_pos 60.0 +839 77 training.batch_size 1.0 +839 78 model.embedding_dim 0.0 +839 78 model.scoring_fct_norm 1.0 +839 78 loss.margin 18.754963554973095 +839 78 loss.adversarial_temperature 0.3222257756141366 +839 78 optimizer.lr 0.020251846063964095 +839 78 negative_sampler.num_negs_per_pos 67.0 +839 78 training.batch_size 2.0 +839 79 model.embedding_dim 1.0 +839 79 model.scoring_fct_norm 2.0 +839 79 loss.margin 27.929856245509697 +839 79 loss.adversarial_temperature 0.8072160333324762 +839 79 optimizer.lr 0.026534349624625576 +839 79 negative_sampler.num_negs_per_pos 81.0 +839 79 training.batch_size 2.0 +839 80 model.embedding_dim 2.0 +839 80 model.scoring_fct_norm 2.0 +839 80 loss.margin 1.952740289126247 +839 80 loss.adversarial_temperature 0.21875292564134358 +839 80 optimizer.lr 0.0016832010441086213 +839 80 negative_sampler.num_negs_per_pos 40.0 +839 80 training.batch_size 2.0 +839 81 model.embedding_dim 2.0 +839 81 model.scoring_fct_norm 2.0 +839 81 loss.margin 6.2144261504510805 +839 81 loss.adversarial_temperature 0.3351492993849452 +839 81 optimizer.lr 0.023279811923817246 +839 81 negative_sampler.num_negs_per_pos 12.0 +839 81 training.batch_size 2.0 +839 82 model.embedding_dim 1.0 +839 82 model.scoring_fct_norm 2.0 +839 82 loss.margin 26.23349902467671 +839 82 loss.adversarial_temperature 0.6380410229529766 +839 82 optimizer.lr 0.006578920084856439 +839 82 negative_sampler.num_negs_per_pos 26.0 +839 82 training.batch_size 1.0 +839 83 model.embedding_dim 1.0 +839 83 model.scoring_fct_norm 1.0 +839 83 loss.margin 17.12811100662507 +839 83 loss.adversarial_temperature 0.9070547642122007 +839 83 optimizer.lr 0.005483125745632391 +839 83 negative_sampler.num_negs_per_pos 53.0 +839 83 training.batch_size 0.0 +839 84 model.embedding_dim 0.0 +839 84 model.scoring_fct_norm 1.0 +839 84 loss.margin 2.4791028054454616 +839 84 loss.adversarial_temperature 0.45201929369897714 +839 84 optimizer.lr 0.002209619797942499 +839 84 negative_sampler.num_negs_per_pos 68.0 +839 84 training.batch_size 0.0 +839 85 model.embedding_dim 0.0 +839 85 model.scoring_fct_norm 2.0 +839 85 loss.margin 26.14860451734861 +839 85 loss.adversarial_temperature 0.39438139270468486 +839 85 optimizer.lr 0.012850214443476594 +839 85 negative_sampler.num_negs_per_pos 36.0 +839 85 training.batch_size 1.0 +839 86 model.embedding_dim 0.0 +839 86 model.scoring_fct_norm 2.0 +839 86 loss.margin 21.798191043492157 +839 86 loss.adversarial_temperature 0.9886340774587778 +839 86 optimizer.lr 0.07666514811064655 +839 86 negative_sampler.num_negs_per_pos 74.0 +839 86 training.batch_size 0.0 +839 87 model.embedding_dim 0.0 +839 87 model.scoring_fct_norm 2.0 +839 87 loss.margin 19.203681165997402 +839 87 loss.adversarial_temperature 0.16605522730017228 +839 87 optimizer.lr 0.0026317486421154547 +839 87 negative_sampler.num_negs_per_pos 85.0 +839 87 training.batch_size 0.0 +839 1 dataset """wn18rr""" +839 1 model """transe""" +839 1 loss """nssa""" +839 1 regularizer """no""" +839 1 optimizer """adam""" +839 1 training_loop """owa""" +839 1 negative_sampler """basic""" +839 1 evaluator """rankbased""" +839 2 dataset """wn18rr""" +839 2 model """transe""" +839 2 loss """nssa""" +839 2 regularizer """no""" +839 2 optimizer """adam""" +839 2 training_loop """owa""" +839 2 negative_sampler """basic""" +839 2 evaluator """rankbased""" +839 3 dataset """wn18rr""" +839 3 model """transe""" +839 3 loss """nssa""" +839 3 regularizer """no""" +839 3 optimizer """adam""" +839 3 training_loop """owa""" +839 3 negative_sampler """basic""" +839 3 evaluator """rankbased""" +839 4 dataset """wn18rr""" +839 4 model """transe""" +839 4 loss """nssa""" +839 4 regularizer """no""" +839 4 optimizer """adam""" +839 4 training_loop """owa""" +839 4 negative_sampler """basic""" +839 4 evaluator """rankbased""" +839 5 dataset """wn18rr""" +839 5 model """transe""" +839 5 loss """nssa""" +839 5 regularizer """no""" +839 5 optimizer """adam""" +839 5 training_loop """owa""" +839 5 negative_sampler """basic""" +839 5 evaluator """rankbased""" +839 6 dataset """wn18rr""" +839 6 model """transe""" +839 6 loss """nssa""" +839 6 regularizer """no""" +839 6 optimizer """adam""" +839 6 training_loop """owa""" +839 6 negative_sampler """basic""" +839 6 evaluator """rankbased""" +839 7 dataset """wn18rr""" +839 7 model """transe""" +839 7 loss """nssa""" +839 7 regularizer """no""" +839 7 optimizer """adam""" +839 7 training_loop """owa""" +839 7 negative_sampler """basic""" +839 7 evaluator """rankbased""" +839 8 dataset """wn18rr""" +839 8 model """transe""" +839 8 loss """nssa""" +839 8 regularizer """no""" +839 8 optimizer """adam""" +839 8 training_loop """owa""" +839 8 negative_sampler """basic""" +839 8 evaluator """rankbased""" +839 9 dataset """wn18rr""" +839 9 model """transe""" +839 9 loss """nssa""" +839 9 regularizer """no""" +839 9 optimizer """adam""" +839 9 training_loop """owa""" +839 9 negative_sampler """basic""" +839 9 evaluator """rankbased""" +839 10 dataset """wn18rr""" +839 10 model """transe""" +839 10 loss """nssa""" +839 10 regularizer """no""" +839 10 optimizer """adam""" +839 10 training_loop """owa""" +839 10 negative_sampler """basic""" +839 10 evaluator """rankbased""" +839 11 dataset """wn18rr""" +839 11 model """transe""" +839 11 loss """nssa""" +839 11 regularizer """no""" +839 11 optimizer """adam""" +839 11 training_loop """owa""" +839 11 negative_sampler """basic""" +839 11 evaluator """rankbased""" +839 12 dataset """wn18rr""" +839 12 model """transe""" +839 12 loss """nssa""" +839 12 regularizer """no""" +839 12 optimizer """adam""" +839 12 training_loop """owa""" +839 12 negative_sampler """basic""" +839 12 evaluator """rankbased""" +839 13 dataset """wn18rr""" +839 13 model """transe""" +839 13 loss """nssa""" +839 13 regularizer """no""" +839 13 optimizer """adam""" +839 13 training_loop """owa""" +839 13 negative_sampler """basic""" +839 13 evaluator """rankbased""" +839 14 dataset """wn18rr""" +839 14 model """transe""" +839 14 loss """nssa""" +839 14 regularizer """no""" +839 14 optimizer """adam""" +839 14 training_loop """owa""" +839 14 negative_sampler """basic""" +839 14 evaluator """rankbased""" +839 15 dataset """wn18rr""" +839 15 model """transe""" +839 15 loss """nssa""" +839 15 regularizer """no""" +839 15 optimizer """adam""" +839 15 training_loop """owa""" +839 15 negative_sampler """basic""" +839 15 evaluator """rankbased""" +839 16 dataset """wn18rr""" +839 16 model """transe""" +839 16 loss """nssa""" +839 16 regularizer """no""" +839 16 optimizer """adam""" +839 16 training_loop """owa""" +839 16 negative_sampler """basic""" +839 16 evaluator """rankbased""" +839 17 dataset """wn18rr""" +839 17 model """transe""" +839 17 loss """nssa""" +839 17 regularizer """no""" +839 17 optimizer """adam""" +839 17 training_loop """owa""" +839 17 negative_sampler """basic""" +839 17 evaluator """rankbased""" +839 18 dataset """wn18rr""" +839 18 model """transe""" +839 18 loss """nssa""" +839 18 regularizer """no""" +839 18 optimizer """adam""" +839 18 training_loop """owa""" +839 18 negative_sampler """basic""" +839 18 evaluator """rankbased""" +839 19 dataset """wn18rr""" +839 19 model """transe""" +839 19 loss """nssa""" +839 19 regularizer """no""" +839 19 optimizer """adam""" +839 19 training_loop """owa""" +839 19 negative_sampler """basic""" +839 19 evaluator """rankbased""" +839 20 dataset """wn18rr""" +839 20 model """transe""" +839 20 loss """nssa""" +839 20 regularizer """no""" +839 20 optimizer """adam""" +839 20 training_loop """owa""" +839 20 negative_sampler """basic""" +839 20 evaluator """rankbased""" +839 21 dataset """wn18rr""" +839 21 model """transe""" +839 21 loss """nssa""" +839 21 regularizer """no""" +839 21 optimizer """adam""" +839 21 training_loop """owa""" +839 21 negative_sampler """basic""" +839 21 evaluator """rankbased""" +839 22 dataset """wn18rr""" +839 22 model """transe""" +839 22 loss """nssa""" +839 22 regularizer """no""" +839 22 optimizer """adam""" +839 22 training_loop """owa""" +839 22 negative_sampler """basic""" +839 22 evaluator """rankbased""" +839 23 dataset """wn18rr""" +839 23 model """transe""" +839 23 loss """nssa""" +839 23 regularizer """no""" +839 23 optimizer """adam""" +839 23 training_loop """owa""" +839 23 negative_sampler """basic""" +839 23 evaluator """rankbased""" +839 24 dataset """wn18rr""" +839 24 model """transe""" +839 24 loss """nssa""" +839 24 regularizer """no""" +839 24 optimizer """adam""" +839 24 training_loop """owa""" +839 24 negative_sampler """basic""" +839 24 evaluator """rankbased""" +839 25 dataset """wn18rr""" +839 25 model """transe""" +839 25 loss """nssa""" +839 25 regularizer """no""" +839 25 optimizer """adam""" +839 25 training_loop """owa""" +839 25 negative_sampler """basic""" +839 25 evaluator """rankbased""" +839 26 dataset """wn18rr""" +839 26 model """transe""" +839 26 loss """nssa""" +839 26 regularizer """no""" +839 26 optimizer """adam""" +839 26 training_loop """owa""" +839 26 negative_sampler """basic""" +839 26 evaluator """rankbased""" +839 27 dataset """wn18rr""" +839 27 model """transe""" +839 27 loss """nssa""" +839 27 regularizer """no""" +839 27 optimizer """adam""" +839 27 training_loop """owa""" +839 27 negative_sampler """basic""" +839 27 evaluator """rankbased""" +839 28 dataset """wn18rr""" +839 28 model """transe""" +839 28 loss """nssa""" +839 28 regularizer """no""" +839 28 optimizer """adam""" +839 28 training_loop """owa""" +839 28 negative_sampler """basic""" +839 28 evaluator """rankbased""" +839 29 dataset """wn18rr""" +839 29 model """transe""" +839 29 loss """nssa""" +839 29 regularizer """no""" +839 29 optimizer """adam""" +839 29 training_loop """owa""" +839 29 negative_sampler """basic""" +839 29 evaluator """rankbased""" +839 30 dataset """wn18rr""" +839 30 model """transe""" +839 30 loss """nssa""" +839 30 regularizer """no""" +839 30 optimizer """adam""" +839 30 training_loop """owa""" +839 30 negative_sampler """basic""" +839 30 evaluator """rankbased""" +839 31 dataset """wn18rr""" +839 31 model """transe""" +839 31 loss """nssa""" +839 31 regularizer """no""" +839 31 optimizer """adam""" +839 31 training_loop """owa""" +839 31 negative_sampler """basic""" +839 31 evaluator """rankbased""" +839 32 dataset """wn18rr""" +839 32 model """transe""" +839 32 loss """nssa""" +839 32 regularizer """no""" +839 32 optimizer """adam""" +839 32 training_loop """owa""" +839 32 negative_sampler """basic""" +839 32 evaluator """rankbased""" +839 33 dataset """wn18rr""" +839 33 model """transe""" +839 33 loss """nssa""" +839 33 regularizer """no""" +839 33 optimizer """adam""" +839 33 training_loop """owa""" +839 33 negative_sampler """basic""" +839 33 evaluator """rankbased""" +839 34 dataset """wn18rr""" +839 34 model """transe""" +839 34 loss """nssa""" +839 34 regularizer """no""" +839 34 optimizer """adam""" +839 34 training_loop """owa""" +839 34 negative_sampler """basic""" +839 34 evaluator """rankbased""" +839 35 dataset """wn18rr""" +839 35 model """transe""" +839 35 loss """nssa""" +839 35 regularizer """no""" +839 35 optimizer """adam""" +839 35 training_loop """owa""" +839 35 negative_sampler """basic""" +839 35 evaluator """rankbased""" +839 36 dataset """wn18rr""" +839 36 model """transe""" +839 36 loss """nssa""" +839 36 regularizer """no""" +839 36 optimizer """adam""" +839 36 training_loop """owa""" +839 36 negative_sampler """basic""" +839 36 evaluator """rankbased""" +839 37 dataset """wn18rr""" +839 37 model """transe""" +839 37 loss """nssa""" +839 37 regularizer """no""" +839 37 optimizer """adam""" +839 37 training_loop """owa""" +839 37 negative_sampler """basic""" +839 37 evaluator """rankbased""" +839 38 dataset """wn18rr""" +839 38 model """transe""" +839 38 loss """nssa""" +839 38 regularizer """no""" +839 38 optimizer """adam""" +839 38 training_loop """owa""" +839 38 negative_sampler """basic""" +839 38 evaluator """rankbased""" +839 39 dataset """wn18rr""" +839 39 model """transe""" +839 39 loss """nssa""" +839 39 regularizer """no""" +839 39 optimizer """adam""" +839 39 training_loop """owa""" +839 39 negative_sampler """basic""" +839 39 evaluator """rankbased""" +839 40 dataset """wn18rr""" +839 40 model """transe""" +839 40 loss """nssa""" +839 40 regularizer """no""" +839 40 optimizer """adam""" +839 40 training_loop """owa""" +839 40 negative_sampler """basic""" +839 40 evaluator """rankbased""" +839 41 dataset """wn18rr""" +839 41 model """transe""" +839 41 loss """nssa""" +839 41 regularizer """no""" +839 41 optimizer """adam""" +839 41 training_loop """owa""" +839 41 negative_sampler """basic""" +839 41 evaluator """rankbased""" +839 42 dataset """wn18rr""" +839 42 model """transe""" +839 42 loss """nssa""" +839 42 regularizer """no""" +839 42 optimizer """adam""" +839 42 training_loop """owa""" +839 42 negative_sampler """basic""" +839 42 evaluator """rankbased""" +839 43 dataset """wn18rr""" +839 43 model """transe""" +839 43 loss """nssa""" +839 43 regularizer """no""" +839 43 optimizer """adam""" +839 43 training_loop """owa""" +839 43 negative_sampler """basic""" +839 43 evaluator """rankbased""" +839 44 dataset """wn18rr""" +839 44 model """transe""" +839 44 loss """nssa""" +839 44 regularizer """no""" +839 44 optimizer """adam""" +839 44 training_loop """owa""" +839 44 negative_sampler """basic""" +839 44 evaluator """rankbased""" +839 45 dataset """wn18rr""" +839 45 model """transe""" +839 45 loss """nssa""" +839 45 regularizer """no""" +839 45 optimizer """adam""" +839 45 training_loop """owa""" +839 45 negative_sampler """basic""" +839 45 evaluator """rankbased""" +839 46 dataset """wn18rr""" +839 46 model """transe""" +839 46 loss """nssa""" +839 46 regularizer """no""" +839 46 optimizer """adam""" +839 46 training_loop """owa""" +839 46 negative_sampler """basic""" +839 46 evaluator """rankbased""" +839 47 dataset """wn18rr""" +839 47 model """transe""" +839 47 loss """nssa""" +839 47 regularizer """no""" +839 47 optimizer """adam""" +839 47 training_loop """owa""" +839 47 negative_sampler """basic""" +839 47 evaluator """rankbased""" +839 48 dataset """wn18rr""" +839 48 model """transe""" +839 48 loss """nssa""" +839 48 regularizer """no""" +839 48 optimizer """adam""" +839 48 training_loop """owa""" +839 48 negative_sampler """basic""" +839 48 evaluator """rankbased""" +839 49 dataset """wn18rr""" +839 49 model """transe""" +839 49 loss """nssa""" +839 49 regularizer """no""" +839 49 optimizer """adam""" +839 49 training_loop """owa""" +839 49 negative_sampler """basic""" +839 49 evaluator """rankbased""" +839 50 dataset """wn18rr""" +839 50 model """transe""" +839 50 loss """nssa""" +839 50 regularizer """no""" +839 50 optimizer """adam""" +839 50 training_loop """owa""" +839 50 negative_sampler """basic""" +839 50 evaluator """rankbased""" +839 51 dataset """wn18rr""" +839 51 model """transe""" +839 51 loss """nssa""" +839 51 regularizer """no""" +839 51 optimizer """adam""" +839 51 training_loop """owa""" +839 51 negative_sampler """basic""" +839 51 evaluator """rankbased""" +839 52 dataset """wn18rr""" +839 52 model """transe""" +839 52 loss """nssa""" +839 52 regularizer """no""" +839 52 optimizer """adam""" +839 52 training_loop """owa""" +839 52 negative_sampler """basic""" +839 52 evaluator """rankbased""" +839 53 dataset """wn18rr""" +839 53 model """transe""" +839 53 loss """nssa""" +839 53 regularizer """no""" +839 53 optimizer """adam""" +839 53 training_loop """owa""" +839 53 negative_sampler """basic""" +839 53 evaluator """rankbased""" +839 54 dataset """wn18rr""" +839 54 model """transe""" +839 54 loss """nssa""" +839 54 regularizer """no""" +839 54 optimizer """adam""" +839 54 training_loop """owa""" +839 54 negative_sampler """basic""" +839 54 evaluator """rankbased""" +839 55 dataset """wn18rr""" +839 55 model """transe""" +839 55 loss """nssa""" +839 55 regularizer """no""" +839 55 optimizer """adam""" +839 55 training_loop """owa""" +839 55 negative_sampler """basic""" +839 55 evaluator """rankbased""" +839 56 dataset """wn18rr""" +839 56 model """transe""" +839 56 loss """nssa""" +839 56 regularizer """no""" +839 56 optimizer """adam""" +839 56 training_loop """owa""" +839 56 negative_sampler """basic""" +839 56 evaluator """rankbased""" +839 57 dataset """wn18rr""" +839 57 model """transe""" +839 57 loss """nssa""" +839 57 regularizer """no""" +839 57 optimizer """adam""" +839 57 training_loop """owa""" +839 57 negative_sampler """basic""" +839 57 evaluator """rankbased""" +839 58 dataset """wn18rr""" +839 58 model """transe""" +839 58 loss """nssa""" +839 58 regularizer """no""" +839 58 optimizer """adam""" +839 58 training_loop """owa""" +839 58 negative_sampler """basic""" +839 58 evaluator """rankbased""" +839 59 dataset """wn18rr""" +839 59 model """transe""" +839 59 loss """nssa""" +839 59 regularizer """no""" +839 59 optimizer """adam""" +839 59 training_loop """owa""" +839 59 negative_sampler """basic""" +839 59 evaluator """rankbased""" +839 60 dataset """wn18rr""" +839 60 model """transe""" +839 60 loss """nssa""" +839 60 regularizer """no""" +839 60 optimizer """adam""" +839 60 training_loop """owa""" +839 60 negative_sampler """basic""" +839 60 evaluator """rankbased""" +839 61 dataset """wn18rr""" +839 61 model """transe""" +839 61 loss """nssa""" +839 61 regularizer """no""" +839 61 optimizer """adam""" +839 61 training_loop """owa""" +839 61 negative_sampler """basic""" +839 61 evaluator """rankbased""" +839 62 dataset """wn18rr""" +839 62 model """transe""" +839 62 loss """nssa""" +839 62 regularizer """no""" +839 62 optimizer """adam""" +839 62 training_loop """owa""" +839 62 negative_sampler """basic""" +839 62 evaluator """rankbased""" +839 63 dataset """wn18rr""" +839 63 model """transe""" +839 63 loss """nssa""" +839 63 regularizer """no""" +839 63 optimizer """adam""" +839 63 training_loop """owa""" +839 63 negative_sampler """basic""" +839 63 evaluator """rankbased""" +839 64 dataset """wn18rr""" +839 64 model """transe""" +839 64 loss """nssa""" +839 64 regularizer """no""" +839 64 optimizer """adam""" +839 64 training_loop """owa""" +839 64 negative_sampler """basic""" +839 64 evaluator """rankbased""" +839 65 dataset """wn18rr""" +839 65 model """transe""" +839 65 loss """nssa""" +839 65 regularizer """no""" +839 65 optimizer """adam""" +839 65 training_loop """owa""" +839 65 negative_sampler """basic""" +839 65 evaluator """rankbased""" +839 66 dataset """wn18rr""" +839 66 model """transe""" +839 66 loss """nssa""" +839 66 regularizer """no""" +839 66 optimizer """adam""" +839 66 training_loop """owa""" +839 66 negative_sampler """basic""" +839 66 evaluator """rankbased""" +839 67 dataset """wn18rr""" +839 67 model """transe""" +839 67 loss """nssa""" +839 67 regularizer """no""" +839 67 optimizer """adam""" +839 67 training_loop """owa""" +839 67 negative_sampler """basic""" +839 67 evaluator """rankbased""" +839 68 dataset """wn18rr""" +839 68 model """transe""" +839 68 loss """nssa""" +839 68 regularizer """no""" +839 68 optimizer """adam""" +839 68 training_loop """owa""" +839 68 negative_sampler """basic""" +839 68 evaluator """rankbased""" +839 69 dataset """wn18rr""" +839 69 model """transe""" +839 69 loss """nssa""" +839 69 regularizer """no""" +839 69 optimizer """adam""" +839 69 training_loop """owa""" +839 69 negative_sampler """basic""" +839 69 evaluator """rankbased""" +839 70 dataset """wn18rr""" +839 70 model """transe""" +839 70 loss """nssa""" +839 70 regularizer """no""" +839 70 optimizer """adam""" +839 70 training_loop """owa""" +839 70 negative_sampler """basic""" +839 70 evaluator """rankbased""" +839 71 dataset """wn18rr""" +839 71 model """transe""" +839 71 loss """nssa""" +839 71 regularizer """no""" +839 71 optimizer """adam""" +839 71 training_loop """owa""" +839 71 negative_sampler """basic""" +839 71 evaluator """rankbased""" +839 72 dataset """wn18rr""" +839 72 model """transe""" +839 72 loss """nssa""" +839 72 regularizer """no""" +839 72 optimizer """adam""" +839 72 training_loop """owa""" +839 72 negative_sampler """basic""" +839 72 evaluator """rankbased""" +839 73 dataset """wn18rr""" +839 73 model """transe""" +839 73 loss """nssa""" +839 73 regularizer """no""" +839 73 optimizer """adam""" +839 73 training_loop """owa""" +839 73 negative_sampler """basic""" +839 73 evaluator """rankbased""" +839 74 dataset """wn18rr""" +839 74 model """transe""" +839 74 loss """nssa""" +839 74 regularizer """no""" +839 74 optimizer """adam""" +839 74 training_loop """owa""" +839 74 negative_sampler """basic""" +839 74 evaluator """rankbased""" +839 75 dataset """wn18rr""" +839 75 model """transe""" +839 75 loss """nssa""" +839 75 regularizer """no""" +839 75 optimizer """adam""" +839 75 training_loop """owa""" +839 75 negative_sampler """basic""" +839 75 evaluator """rankbased""" +839 76 dataset """wn18rr""" +839 76 model """transe""" +839 76 loss """nssa""" +839 76 regularizer """no""" +839 76 optimizer """adam""" +839 76 training_loop """owa""" +839 76 negative_sampler """basic""" +839 76 evaluator """rankbased""" +839 77 dataset """wn18rr""" +839 77 model """transe""" +839 77 loss """nssa""" +839 77 regularizer """no""" +839 77 optimizer """adam""" +839 77 training_loop """owa""" +839 77 negative_sampler """basic""" +839 77 evaluator """rankbased""" +839 78 dataset """wn18rr""" +839 78 model """transe""" +839 78 loss """nssa""" +839 78 regularizer """no""" +839 78 optimizer """adam""" +839 78 training_loop """owa""" +839 78 negative_sampler """basic""" +839 78 evaluator """rankbased""" +839 79 dataset """wn18rr""" +839 79 model """transe""" +839 79 loss """nssa""" +839 79 regularizer """no""" +839 79 optimizer """adam""" +839 79 training_loop """owa""" +839 79 negative_sampler """basic""" +839 79 evaluator """rankbased""" +839 80 dataset """wn18rr""" +839 80 model """transe""" +839 80 loss """nssa""" +839 80 regularizer """no""" +839 80 optimizer """adam""" +839 80 training_loop """owa""" +839 80 negative_sampler """basic""" +839 80 evaluator """rankbased""" +839 81 dataset """wn18rr""" +839 81 model """transe""" +839 81 loss """nssa""" +839 81 regularizer """no""" +839 81 optimizer """adam""" +839 81 training_loop """owa""" +839 81 negative_sampler """basic""" +839 81 evaluator """rankbased""" +839 82 dataset """wn18rr""" +839 82 model """transe""" +839 82 loss """nssa""" +839 82 regularizer """no""" +839 82 optimizer """adam""" +839 82 training_loop """owa""" +839 82 negative_sampler """basic""" +839 82 evaluator """rankbased""" +839 83 dataset """wn18rr""" +839 83 model """transe""" +839 83 loss """nssa""" +839 83 regularizer """no""" +839 83 optimizer """adam""" +839 83 training_loop """owa""" +839 83 negative_sampler """basic""" +839 83 evaluator """rankbased""" +839 84 dataset """wn18rr""" +839 84 model """transe""" +839 84 loss """nssa""" +839 84 regularizer """no""" +839 84 optimizer """adam""" +839 84 training_loop """owa""" +839 84 negative_sampler """basic""" +839 84 evaluator """rankbased""" +839 85 dataset """wn18rr""" +839 85 model """transe""" +839 85 loss """nssa""" +839 85 regularizer """no""" +839 85 optimizer """adam""" +839 85 training_loop """owa""" +839 85 negative_sampler """basic""" +839 85 evaluator """rankbased""" +839 86 dataset """wn18rr""" +839 86 model """transe""" +839 86 loss """nssa""" +839 86 regularizer """no""" +839 86 optimizer """adam""" +839 86 training_loop """owa""" +839 86 negative_sampler """basic""" +839 86 evaluator """rankbased""" +839 87 dataset """wn18rr""" +839 87 model """transe""" +839 87 loss """nssa""" +839 87 regularizer """no""" +839 87 optimizer """adam""" +839 87 training_loop """owa""" +839 87 negative_sampler """basic""" +839 87 evaluator """rankbased""" +840 1 model.embedding_dim 1.0 +840 1 model.scoring_fct_norm 2.0 +840 1 loss.margin 3.5092416489148377 +840 1 loss.adversarial_temperature 0.2441560165839144 +840 1 optimizer.lr 0.046577240247352644 +840 1 negative_sampler.num_negs_per_pos 95.0 +840 1 training.batch_size 0.0 +840 2 model.embedding_dim 2.0 +840 2 model.scoring_fct_norm 1.0 +840 2 loss.margin 1.9943069663197954 +840 2 loss.adversarial_temperature 0.9135303814183208 +840 2 optimizer.lr 0.013516447611999625 +840 2 negative_sampler.num_negs_per_pos 99.0 +840 2 training.batch_size 1.0 +840 3 model.embedding_dim 2.0 +840 3 model.scoring_fct_norm 2.0 +840 3 loss.margin 27.88682649003906 +840 3 loss.adversarial_temperature 0.4621519857235889 +840 3 optimizer.lr 0.01850627961059186 +840 3 negative_sampler.num_negs_per_pos 64.0 +840 3 training.batch_size 2.0 +840 4 model.embedding_dim 2.0 +840 4 model.scoring_fct_norm 1.0 +840 4 loss.margin 3.1725670688453387 +840 4 loss.adversarial_temperature 0.4315552419377059 +840 4 optimizer.lr 0.0027947409102605037 +840 4 negative_sampler.num_negs_per_pos 63.0 +840 4 training.batch_size 0.0 +840 5 model.embedding_dim 1.0 +840 5 model.scoring_fct_norm 1.0 +840 5 loss.margin 19.298758661259836 +840 5 loss.adversarial_temperature 0.33384349562850535 +840 5 optimizer.lr 0.0025147814234781247 +840 5 negative_sampler.num_negs_per_pos 8.0 +840 5 training.batch_size 0.0 +840 6 model.embedding_dim 1.0 +840 6 model.scoring_fct_norm 2.0 +840 6 loss.margin 13.109855078941782 +840 6 loss.adversarial_temperature 0.3940302047588384 +840 6 optimizer.lr 0.008771693671565384 +840 6 negative_sampler.num_negs_per_pos 63.0 +840 6 training.batch_size 2.0 +840 7 model.embedding_dim 0.0 +840 7 model.scoring_fct_norm 1.0 +840 7 loss.margin 2.9191787735356285 +840 7 loss.adversarial_temperature 0.5495498932185959 +840 7 optimizer.lr 0.001020646076684107 +840 7 negative_sampler.num_negs_per_pos 93.0 +840 7 training.batch_size 0.0 +840 8 model.embedding_dim 1.0 +840 8 model.scoring_fct_norm 1.0 +840 8 loss.margin 20.940874690911564 +840 8 loss.adversarial_temperature 0.9632996191449549 +840 8 optimizer.lr 0.005668678683436182 +840 8 negative_sampler.num_negs_per_pos 89.0 +840 8 training.batch_size 0.0 +840 9 model.embedding_dim 1.0 +840 9 model.scoring_fct_norm 2.0 +840 9 loss.margin 22.380687624190177 +840 9 loss.adversarial_temperature 0.9377756088597108 +840 9 optimizer.lr 0.005583632602548096 +840 9 negative_sampler.num_negs_per_pos 94.0 +840 9 training.batch_size 1.0 +840 10 model.embedding_dim 0.0 +840 10 model.scoring_fct_norm 1.0 +840 10 loss.margin 17.51955164410637 +840 10 loss.adversarial_temperature 0.7601817907459354 +840 10 optimizer.lr 0.03091666954690977 +840 10 negative_sampler.num_negs_per_pos 2.0 +840 10 training.batch_size 1.0 +840 11 model.embedding_dim 0.0 +840 11 model.scoring_fct_norm 2.0 +840 11 loss.margin 23.07235598491796 +840 11 loss.adversarial_temperature 0.6787147823693993 +840 11 optimizer.lr 0.0048888923924974015 +840 11 negative_sampler.num_negs_per_pos 89.0 +840 11 training.batch_size 1.0 +840 12 model.embedding_dim 2.0 +840 12 model.scoring_fct_norm 1.0 +840 12 loss.margin 20.71471592765105 +840 12 loss.adversarial_temperature 0.843566921297578 +840 12 optimizer.lr 0.005077225585287502 +840 12 negative_sampler.num_negs_per_pos 24.0 +840 12 training.batch_size 0.0 +840 13 model.embedding_dim 0.0 +840 13 model.scoring_fct_norm 2.0 +840 13 loss.margin 16.50719858240739 +840 13 loss.adversarial_temperature 0.9557521644567388 +840 13 optimizer.lr 0.002157817081156238 +840 13 negative_sampler.num_negs_per_pos 65.0 +840 13 training.batch_size 2.0 +840 14 model.embedding_dim 0.0 +840 14 model.scoring_fct_norm 2.0 +840 14 loss.margin 16.250061296601256 +840 14 loss.adversarial_temperature 0.9283964138545899 +840 14 optimizer.lr 0.003365429278341808 +840 14 negative_sampler.num_negs_per_pos 40.0 +840 14 training.batch_size 2.0 +840 15 model.embedding_dim 2.0 +840 15 model.scoring_fct_norm 2.0 +840 15 loss.margin 20.035703498621412 +840 15 loss.adversarial_temperature 0.66114782023693 +840 15 optimizer.lr 0.06659479451400631 +840 15 negative_sampler.num_negs_per_pos 90.0 +840 15 training.batch_size 0.0 +840 16 model.embedding_dim 2.0 +840 16 model.scoring_fct_norm 2.0 +840 16 loss.margin 3.7885773156111564 +840 16 loss.adversarial_temperature 0.24308498799025144 +840 16 optimizer.lr 0.0041854380713308575 +840 16 negative_sampler.num_negs_per_pos 47.0 +840 16 training.batch_size 0.0 +840 17 model.embedding_dim 1.0 +840 17 model.scoring_fct_norm 2.0 +840 17 loss.margin 15.864823406025563 +840 17 loss.adversarial_temperature 0.8763683267045216 +840 17 optimizer.lr 0.0421345474118972 +840 17 negative_sampler.num_negs_per_pos 51.0 +840 17 training.batch_size 0.0 +840 18 model.embedding_dim 0.0 +840 18 model.scoring_fct_norm 2.0 +840 18 loss.margin 4.82097878696801 +840 18 loss.adversarial_temperature 0.5408749288307785 +840 18 optimizer.lr 0.003030573342451176 +840 18 negative_sampler.num_negs_per_pos 84.0 +840 18 training.batch_size 1.0 +840 19 model.embedding_dim 2.0 +840 19 model.scoring_fct_norm 1.0 +840 19 loss.margin 23.285506312974317 +840 19 loss.adversarial_temperature 0.9405520682160453 +840 19 optimizer.lr 0.0012067996661439245 +840 19 negative_sampler.num_negs_per_pos 33.0 +840 19 training.batch_size 2.0 +840 20 model.embedding_dim 1.0 +840 20 model.scoring_fct_norm 2.0 +840 20 loss.margin 24.540625616849965 +840 20 loss.adversarial_temperature 0.9787045791486184 +840 20 optimizer.lr 0.0015643025639644453 +840 20 negative_sampler.num_negs_per_pos 21.0 +840 20 training.batch_size 1.0 +840 21 model.embedding_dim 2.0 +840 21 model.scoring_fct_norm 2.0 +840 21 loss.margin 7.852669778109698 +840 21 loss.adversarial_temperature 0.48370828242088726 +840 21 optimizer.lr 0.004746218093531714 +840 21 negative_sampler.num_negs_per_pos 72.0 +840 21 training.batch_size 0.0 +840 22 model.embedding_dim 1.0 +840 22 model.scoring_fct_norm 1.0 +840 22 loss.margin 1.2498272700209128 +840 22 loss.adversarial_temperature 0.810656924140509 +840 22 optimizer.lr 0.009260605319441199 +840 22 negative_sampler.num_negs_per_pos 19.0 +840 22 training.batch_size 0.0 +840 23 model.embedding_dim 1.0 +840 23 model.scoring_fct_norm 1.0 +840 23 loss.margin 26.27683252230686 +840 23 loss.adversarial_temperature 0.6941580739046236 +840 23 optimizer.lr 0.0023256860529223397 +840 23 negative_sampler.num_negs_per_pos 8.0 +840 23 training.batch_size 1.0 +840 24 model.embedding_dim 0.0 +840 24 model.scoring_fct_norm 1.0 +840 24 loss.margin 9.34338289492295 +840 24 loss.adversarial_temperature 0.5026687536131135 +840 24 optimizer.lr 0.013064199517933944 +840 24 negative_sampler.num_negs_per_pos 23.0 +840 24 training.batch_size 0.0 +840 25 model.embedding_dim 2.0 +840 25 model.scoring_fct_norm 2.0 +840 25 loss.margin 24.60693334224084 +840 25 loss.adversarial_temperature 0.84791379248672 +840 25 optimizer.lr 0.023531531059021733 +840 25 negative_sampler.num_negs_per_pos 72.0 +840 25 training.batch_size 0.0 +840 26 model.embedding_dim 0.0 +840 26 model.scoring_fct_norm 1.0 +840 26 loss.margin 12.195463972993723 +840 26 loss.adversarial_temperature 0.5564893927593044 +840 26 optimizer.lr 0.01640123967839562 +840 26 negative_sampler.num_negs_per_pos 84.0 +840 26 training.batch_size 2.0 +840 27 model.embedding_dim 1.0 +840 27 model.scoring_fct_norm 1.0 +840 27 loss.margin 24.526217706642374 +840 27 loss.adversarial_temperature 0.38542960923651864 +840 27 optimizer.lr 0.008533345004922737 +840 27 negative_sampler.num_negs_per_pos 29.0 +840 27 training.batch_size 2.0 +840 28 model.embedding_dim 2.0 +840 28 model.scoring_fct_norm 2.0 +840 28 loss.margin 5.375276943939173 +840 28 loss.adversarial_temperature 0.38921864574371295 +840 28 optimizer.lr 0.06305431317912681 +840 28 negative_sampler.num_negs_per_pos 66.0 +840 28 training.batch_size 2.0 +840 29 model.embedding_dim 0.0 +840 29 model.scoring_fct_norm 2.0 +840 29 loss.margin 24.388921849935095 +840 29 loss.adversarial_temperature 0.22197887862883045 +840 29 optimizer.lr 0.07951433860459987 +840 29 negative_sampler.num_negs_per_pos 32.0 +840 29 training.batch_size 0.0 +840 30 model.embedding_dim 1.0 +840 30 model.scoring_fct_norm 1.0 +840 30 loss.margin 26.244025080051816 +840 30 loss.adversarial_temperature 0.8460760979519746 +840 30 optimizer.lr 0.07775043067301378 +840 30 negative_sampler.num_negs_per_pos 0.0 +840 30 training.batch_size 2.0 +840 31 model.embedding_dim 2.0 +840 31 model.scoring_fct_norm 2.0 +840 31 loss.margin 17.97200080843387 +840 31 loss.adversarial_temperature 0.232230055290288 +840 31 optimizer.lr 0.08267164004641687 +840 31 negative_sampler.num_negs_per_pos 38.0 +840 31 training.batch_size 0.0 +840 32 model.embedding_dim 1.0 +840 32 model.scoring_fct_norm 1.0 +840 32 loss.margin 9.327535104354038 +840 32 loss.adversarial_temperature 0.5683676781676014 +840 32 optimizer.lr 0.003713611592607898 +840 32 negative_sampler.num_negs_per_pos 0.0 +840 32 training.batch_size 1.0 +840 33 model.embedding_dim 2.0 +840 33 model.scoring_fct_norm 1.0 +840 33 loss.margin 24.284692765015524 +840 33 loss.adversarial_temperature 0.3021443285590764 +840 33 optimizer.lr 0.004447847925186811 +840 33 negative_sampler.num_negs_per_pos 66.0 +840 33 training.batch_size 2.0 +840 34 model.embedding_dim 1.0 +840 34 model.scoring_fct_norm 2.0 +840 34 loss.margin 11.430977165012031 +840 34 loss.adversarial_temperature 0.8199387244975987 +840 34 optimizer.lr 0.03387582381782403 +840 34 negative_sampler.num_negs_per_pos 25.0 +840 34 training.batch_size 1.0 +840 35 model.embedding_dim 0.0 +840 35 model.scoring_fct_norm 1.0 +840 35 loss.margin 10.86525610565772 +840 35 loss.adversarial_temperature 0.6541245674500706 +840 35 optimizer.lr 0.01885053766681276 +840 35 negative_sampler.num_negs_per_pos 93.0 +840 35 training.batch_size 2.0 +840 36 model.embedding_dim 0.0 +840 36 model.scoring_fct_norm 2.0 +840 36 loss.margin 27.193436667811017 +840 36 loss.adversarial_temperature 0.8458222618808887 +840 36 optimizer.lr 0.024948068743718427 +840 36 negative_sampler.num_negs_per_pos 34.0 +840 36 training.batch_size 1.0 +840 37 model.embedding_dim 2.0 +840 37 model.scoring_fct_norm 1.0 +840 37 loss.margin 1.6089599998062485 +840 37 loss.adversarial_temperature 0.5344044058263024 +840 37 optimizer.lr 0.014671245077875136 +840 37 negative_sampler.num_negs_per_pos 97.0 +840 37 training.batch_size 1.0 +840 38 model.embedding_dim 1.0 +840 38 model.scoring_fct_norm 2.0 +840 38 loss.margin 22.799548343238992 +840 38 loss.adversarial_temperature 0.9948533713877126 +840 38 optimizer.lr 0.0022340713133093674 +840 38 negative_sampler.num_negs_per_pos 31.0 +840 38 training.batch_size 2.0 +840 39 model.embedding_dim 2.0 +840 39 model.scoring_fct_norm 1.0 +840 39 loss.margin 13.7664429929624 +840 39 loss.adversarial_temperature 0.15193208607622935 +840 39 optimizer.lr 0.08256463166106019 +840 39 negative_sampler.num_negs_per_pos 6.0 +840 39 training.batch_size 1.0 +840 40 model.embedding_dim 2.0 +840 40 model.scoring_fct_norm 1.0 +840 40 loss.margin 25.097805954024032 +840 40 loss.adversarial_temperature 0.6423167304049342 +840 40 optimizer.lr 0.0034520352330896587 +840 40 negative_sampler.num_negs_per_pos 27.0 +840 40 training.batch_size 1.0 +840 41 model.embedding_dim 1.0 +840 41 model.scoring_fct_norm 2.0 +840 41 loss.margin 16.048195330335822 +840 41 loss.adversarial_temperature 0.949290315911005 +840 41 optimizer.lr 0.0054490238769251504 +840 41 negative_sampler.num_negs_per_pos 64.0 +840 41 training.batch_size 0.0 +840 42 model.embedding_dim 2.0 +840 42 model.scoring_fct_norm 1.0 +840 42 loss.margin 17.456973356939713 +840 42 loss.adversarial_temperature 0.6290745520615594 +840 42 optimizer.lr 0.02516341930451683 +840 42 negative_sampler.num_negs_per_pos 83.0 +840 42 training.batch_size 1.0 +840 43 model.embedding_dim 0.0 +840 43 model.scoring_fct_norm 1.0 +840 43 loss.margin 9.233049995898469 +840 43 loss.adversarial_temperature 0.9218629442106879 +840 43 optimizer.lr 0.0024681141666670924 +840 43 negative_sampler.num_negs_per_pos 19.0 +840 43 training.batch_size 0.0 +840 44 model.embedding_dim 2.0 +840 44 model.scoring_fct_norm 1.0 +840 44 loss.margin 27.59492292691766 +840 44 loss.adversarial_temperature 0.25385873658193214 +840 44 optimizer.lr 0.07989369695988219 +840 44 negative_sampler.num_negs_per_pos 45.0 +840 44 training.batch_size 1.0 +840 45 model.embedding_dim 0.0 +840 45 model.scoring_fct_norm 2.0 +840 45 loss.margin 7.674307116714655 +840 45 loss.adversarial_temperature 0.31979357151803944 +840 45 optimizer.lr 0.002671422730461796 +840 45 negative_sampler.num_negs_per_pos 10.0 +840 45 training.batch_size 1.0 +840 46 model.embedding_dim 0.0 +840 46 model.scoring_fct_norm 1.0 +840 46 loss.margin 12.737470736390055 +840 46 loss.adversarial_temperature 0.24492427921442209 +840 46 optimizer.lr 0.06471759580992296 +840 46 negative_sampler.num_negs_per_pos 46.0 +840 46 training.batch_size 2.0 +840 47 model.embedding_dim 1.0 +840 47 model.scoring_fct_norm 2.0 +840 47 loss.margin 21.244131294669888 +840 47 loss.adversarial_temperature 0.9805905675363252 +840 47 optimizer.lr 0.048041368888048615 +840 47 negative_sampler.num_negs_per_pos 17.0 +840 47 training.batch_size 2.0 +840 48 model.embedding_dim 1.0 +840 48 model.scoring_fct_norm 1.0 +840 48 loss.margin 15.22737534713643 +840 48 loss.adversarial_temperature 0.9194412742270864 +840 48 optimizer.lr 0.032159914859441446 +840 48 negative_sampler.num_negs_per_pos 22.0 +840 48 training.batch_size 1.0 +840 49 model.embedding_dim 2.0 +840 49 model.scoring_fct_norm 1.0 +840 49 loss.margin 21.38299871440809 +840 49 loss.adversarial_temperature 0.5036371981824374 +840 49 optimizer.lr 0.01699253081689182 +840 49 negative_sampler.num_negs_per_pos 20.0 +840 49 training.batch_size 1.0 +840 50 model.embedding_dim 2.0 +840 50 model.scoring_fct_norm 2.0 +840 50 loss.margin 7.722065367877971 +840 50 loss.adversarial_temperature 0.5511923272679248 +840 50 optimizer.lr 0.06068728967579736 +840 50 negative_sampler.num_negs_per_pos 94.0 +840 50 training.batch_size 1.0 +840 51 model.embedding_dim 0.0 +840 51 model.scoring_fct_norm 1.0 +840 51 loss.margin 15.190911443590313 +840 51 loss.adversarial_temperature 0.22612749896632173 +840 51 optimizer.lr 0.0011337851221250438 +840 51 negative_sampler.num_negs_per_pos 76.0 +840 51 training.batch_size 2.0 +840 52 model.embedding_dim 1.0 +840 52 model.scoring_fct_norm 1.0 +840 52 loss.margin 3.4264582689240783 +840 52 loss.adversarial_temperature 0.2809627972917491 +840 52 optimizer.lr 0.05207164172667355 +840 52 negative_sampler.num_negs_per_pos 37.0 +840 52 training.batch_size 2.0 +840 53 model.embedding_dim 2.0 +840 53 model.scoring_fct_norm 1.0 +840 53 loss.margin 27.803373638353108 +840 53 loss.adversarial_temperature 0.9762425728181142 +840 53 optimizer.lr 0.007563375914195201 +840 53 negative_sampler.num_negs_per_pos 5.0 +840 53 training.batch_size 1.0 +840 54 model.embedding_dim 0.0 +840 54 model.scoring_fct_norm 1.0 +840 54 loss.margin 25.043182712809266 +840 54 loss.adversarial_temperature 0.6008940907562081 +840 54 optimizer.lr 0.042615310068998345 +840 54 negative_sampler.num_negs_per_pos 63.0 +840 54 training.batch_size 1.0 +840 55 model.embedding_dim 1.0 +840 55 model.scoring_fct_norm 2.0 +840 55 loss.margin 10.075935178924658 +840 55 loss.adversarial_temperature 0.4866065392307946 +840 55 optimizer.lr 0.0020204494001069264 +840 55 negative_sampler.num_negs_per_pos 6.0 +840 55 training.batch_size 1.0 +840 56 model.embedding_dim 0.0 +840 56 model.scoring_fct_norm 1.0 +840 56 loss.margin 11.800814733842618 +840 56 loss.adversarial_temperature 0.647292573266044 +840 56 optimizer.lr 0.0013521658507982671 +840 56 negative_sampler.num_negs_per_pos 50.0 +840 56 training.batch_size 1.0 +840 57 model.embedding_dim 0.0 +840 57 model.scoring_fct_norm 1.0 +840 57 loss.margin 16.886724781100146 +840 57 loss.adversarial_temperature 0.6519261941800015 +840 57 optimizer.lr 0.0011554323702787429 +840 57 negative_sampler.num_negs_per_pos 57.0 +840 57 training.batch_size 2.0 +840 58 model.embedding_dim 2.0 +840 58 model.scoring_fct_norm 2.0 +840 58 loss.margin 26.31932112104492 +840 58 loss.adversarial_temperature 0.6772357479334916 +840 58 optimizer.lr 0.0014197963510543677 +840 58 negative_sampler.num_negs_per_pos 62.0 +840 58 training.batch_size 1.0 +840 59 model.embedding_dim 0.0 +840 59 model.scoring_fct_norm 2.0 +840 59 loss.margin 10.783964994203279 +840 59 loss.adversarial_temperature 0.7893825473082573 +840 59 optimizer.lr 0.029693427846694306 +840 59 negative_sampler.num_negs_per_pos 30.0 +840 59 training.batch_size 0.0 +840 60 model.embedding_dim 2.0 +840 60 model.scoring_fct_norm 1.0 +840 60 loss.margin 7.495859525591016 +840 60 loss.adversarial_temperature 0.8062684485112 +840 60 optimizer.lr 0.004025702635999013 +840 60 negative_sampler.num_negs_per_pos 92.0 +840 60 training.batch_size 1.0 +840 61 model.embedding_dim 2.0 +840 61 model.scoring_fct_norm 2.0 +840 61 loss.margin 9.101397952641452 +840 61 loss.adversarial_temperature 0.7849083547982162 +840 61 optimizer.lr 0.005213383780616326 +840 61 negative_sampler.num_negs_per_pos 36.0 +840 61 training.batch_size 1.0 +840 62 model.embedding_dim 2.0 +840 62 model.scoring_fct_norm 1.0 +840 62 loss.margin 14.008819881461038 +840 62 loss.adversarial_temperature 0.5817201939987882 +840 62 optimizer.lr 0.009534521300522434 +840 62 negative_sampler.num_negs_per_pos 57.0 +840 62 training.batch_size 1.0 +840 63 model.embedding_dim 2.0 +840 63 model.scoring_fct_norm 1.0 +840 63 loss.margin 12.421553797872383 +840 63 loss.adversarial_temperature 0.6358469340360906 +840 63 optimizer.lr 0.026061686596884494 +840 63 negative_sampler.num_negs_per_pos 45.0 +840 63 training.batch_size 1.0 +840 64 model.embedding_dim 0.0 +840 64 model.scoring_fct_norm 1.0 +840 64 loss.margin 26.91201612107197 +840 64 loss.adversarial_temperature 0.12539907079805115 +840 64 optimizer.lr 0.009044779359394031 +840 64 negative_sampler.num_negs_per_pos 18.0 +840 64 training.batch_size 0.0 +840 65 model.embedding_dim 1.0 +840 65 model.scoring_fct_norm 1.0 +840 65 loss.margin 26.356934418009786 +840 65 loss.adversarial_temperature 0.9868152520573495 +840 65 optimizer.lr 0.005566630776237018 +840 65 negative_sampler.num_negs_per_pos 74.0 +840 65 training.batch_size 2.0 +840 66 model.embedding_dim 0.0 +840 66 model.scoring_fct_norm 1.0 +840 66 loss.margin 6.083142502017703 +840 66 loss.adversarial_temperature 0.8920399736448424 +840 66 optimizer.lr 0.015274330729339375 +840 66 negative_sampler.num_negs_per_pos 51.0 +840 66 training.batch_size 2.0 +840 67 model.embedding_dim 0.0 +840 67 model.scoring_fct_norm 1.0 +840 67 loss.margin 29.005024384944953 +840 67 loss.adversarial_temperature 0.18213094078340816 +840 67 optimizer.lr 0.027918300153467853 +840 67 negative_sampler.num_negs_per_pos 1.0 +840 67 training.batch_size 0.0 +840 68 model.embedding_dim 2.0 +840 68 model.scoring_fct_norm 2.0 +840 68 loss.margin 24.277634577419924 +840 68 loss.adversarial_temperature 0.16866086792915003 +840 68 optimizer.lr 0.09315948052209179 +840 68 negative_sampler.num_negs_per_pos 41.0 +840 68 training.batch_size 2.0 +840 69 model.embedding_dim 1.0 +840 69 model.scoring_fct_norm 2.0 +840 69 loss.margin 19.48064912325507 +840 69 loss.adversarial_temperature 0.30965783129185526 +840 69 optimizer.lr 0.036055460896129723 +840 69 negative_sampler.num_negs_per_pos 83.0 +840 69 training.batch_size 2.0 +840 70 model.embedding_dim 0.0 +840 70 model.scoring_fct_norm 2.0 +840 70 loss.margin 24.334589648436943 +840 70 loss.adversarial_temperature 0.4332191307540801 +840 70 optimizer.lr 0.0016611584497300189 +840 70 negative_sampler.num_negs_per_pos 31.0 +840 70 training.batch_size 1.0 +840 71 model.embedding_dim 0.0 +840 71 model.scoring_fct_norm 2.0 +840 71 loss.margin 11.84150544994413 +840 71 loss.adversarial_temperature 0.16283724506043704 +840 71 optimizer.lr 0.02141199216630532 +840 71 negative_sampler.num_negs_per_pos 46.0 +840 71 training.batch_size 2.0 +840 72 model.embedding_dim 0.0 +840 72 model.scoring_fct_norm 2.0 +840 72 loss.margin 23.47333558216402 +840 72 loss.adversarial_temperature 0.569024459248502 +840 72 optimizer.lr 0.010415635666533161 +840 72 negative_sampler.num_negs_per_pos 63.0 +840 72 training.batch_size 0.0 +840 73 model.embedding_dim 0.0 +840 73 model.scoring_fct_norm 2.0 +840 73 loss.margin 19.52417110495326 +840 73 loss.adversarial_temperature 0.8822564385631454 +840 73 optimizer.lr 0.009437331317324967 +840 73 negative_sampler.num_negs_per_pos 59.0 +840 73 training.batch_size 0.0 +840 74 model.embedding_dim 1.0 +840 74 model.scoring_fct_norm 2.0 +840 74 loss.margin 26.106432823196943 +840 74 loss.adversarial_temperature 0.1078191251196582 +840 74 optimizer.lr 0.01522650795660544 +840 74 negative_sampler.num_negs_per_pos 23.0 +840 74 training.batch_size 0.0 +840 75 model.embedding_dim 0.0 +840 75 model.scoring_fct_norm 2.0 +840 75 loss.margin 4.528796102657333 +840 75 loss.adversarial_temperature 0.8624263954191446 +840 75 optimizer.lr 0.006209853403298 +840 75 negative_sampler.num_negs_per_pos 97.0 +840 75 training.batch_size 1.0 +840 76 model.embedding_dim 2.0 +840 76 model.scoring_fct_norm 2.0 +840 76 loss.margin 2.954272735472389 +840 76 loss.adversarial_temperature 0.9001089011775096 +840 76 optimizer.lr 0.006994532417881381 +840 76 negative_sampler.num_negs_per_pos 89.0 +840 76 training.batch_size 0.0 +840 77 model.embedding_dim 1.0 +840 77 model.scoring_fct_norm 1.0 +840 77 loss.margin 9.6382086598214 +840 77 loss.adversarial_temperature 0.7447470029028185 +840 77 optimizer.lr 0.040883215859673826 +840 77 negative_sampler.num_negs_per_pos 77.0 +840 77 training.batch_size 1.0 +840 78 model.embedding_dim 1.0 +840 78 model.scoring_fct_norm 1.0 +840 78 loss.margin 28.296707766285785 +840 78 loss.adversarial_temperature 0.8811921974824104 +840 78 optimizer.lr 0.00294381684738956 +840 78 negative_sampler.num_negs_per_pos 61.0 +840 78 training.batch_size 2.0 +840 79 model.embedding_dim 0.0 +840 79 model.scoring_fct_norm 1.0 +840 79 loss.margin 28.819158735620476 +840 79 loss.adversarial_temperature 0.4520108565856169 +840 79 optimizer.lr 0.0015126633044210795 +840 79 negative_sampler.num_negs_per_pos 86.0 +840 79 training.batch_size 1.0 +840 80 model.embedding_dim 2.0 +840 80 model.scoring_fct_norm 2.0 +840 80 loss.margin 3.1654888421411593 +840 80 loss.adversarial_temperature 0.9346291819210559 +840 80 optimizer.lr 0.001131019188215663 +840 80 negative_sampler.num_negs_per_pos 19.0 +840 80 training.batch_size 0.0 +840 81 model.embedding_dim 1.0 +840 81 model.scoring_fct_norm 1.0 +840 81 loss.margin 2.432081527645884 +840 81 loss.adversarial_temperature 0.5824513453276494 +840 81 optimizer.lr 0.0047618690074762585 +840 81 negative_sampler.num_negs_per_pos 30.0 +840 81 training.batch_size 0.0 +840 82 model.embedding_dim 2.0 +840 82 model.scoring_fct_norm 2.0 +840 82 loss.margin 13.969220942556388 +840 82 loss.adversarial_temperature 0.4765627526471393 +840 82 optimizer.lr 0.07369826489559218 +840 82 negative_sampler.num_negs_per_pos 6.0 +840 82 training.batch_size 1.0 +840 83 model.embedding_dim 1.0 +840 83 model.scoring_fct_norm 1.0 +840 83 loss.margin 2.0045843331465782 +840 83 loss.adversarial_temperature 0.38141726443454915 +840 83 optimizer.lr 0.010374559283442232 +840 83 negative_sampler.num_negs_per_pos 67.0 +840 83 training.batch_size 0.0 +840 84 model.embedding_dim 2.0 +840 84 model.scoring_fct_norm 1.0 +840 84 loss.margin 24.176894870497296 +840 84 loss.adversarial_temperature 0.38086862717250486 +840 84 optimizer.lr 0.0016608689106633835 +840 84 negative_sampler.num_negs_per_pos 94.0 +840 84 training.batch_size 2.0 +840 85 model.embedding_dim 1.0 +840 85 model.scoring_fct_norm 2.0 +840 85 loss.margin 16.333887596360704 +840 85 loss.adversarial_temperature 0.4881354194722906 +840 85 optimizer.lr 0.04073836592127983 +840 85 negative_sampler.num_negs_per_pos 90.0 +840 85 training.batch_size 0.0 +840 86 model.embedding_dim 2.0 +840 86 model.scoring_fct_norm 2.0 +840 86 loss.margin 4.921700125143105 +840 86 loss.adversarial_temperature 0.6054672181820011 +840 86 optimizer.lr 0.0027790089508656223 +840 86 negative_sampler.num_negs_per_pos 25.0 +840 86 training.batch_size 0.0 +840 87 model.embedding_dim 1.0 +840 87 model.scoring_fct_norm 1.0 +840 87 loss.margin 10.111750348180054 +840 87 loss.adversarial_temperature 0.5283270763700595 +840 87 optimizer.lr 0.014910161664580942 +840 87 negative_sampler.num_negs_per_pos 58.0 +840 87 training.batch_size 1.0 +840 88 model.embedding_dim 0.0 +840 88 model.scoring_fct_norm 2.0 +840 88 loss.margin 24.706728167254834 +840 88 loss.adversarial_temperature 0.8888418575642788 +840 88 optimizer.lr 0.021566056988678858 +840 88 negative_sampler.num_negs_per_pos 46.0 +840 88 training.batch_size 2.0 +840 89 model.embedding_dim 0.0 +840 89 model.scoring_fct_norm 1.0 +840 89 loss.margin 28.255615012159282 +840 89 loss.adversarial_temperature 0.2625759383442148 +840 89 optimizer.lr 0.005193699587361986 +840 89 negative_sampler.num_negs_per_pos 86.0 +840 89 training.batch_size 1.0 +840 90 model.embedding_dim 0.0 +840 90 model.scoring_fct_norm 2.0 +840 90 loss.margin 10.575542767438742 +840 90 loss.adversarial_temperature 0.8007508836539547 +840 90 optimizer.lr 0.06846824408658919 +840 90 negative_sampler.num_negs_per_pos 49.0 +840 90 training.batch_size 2.0 +840 91 model.embedding_dim 2.0 +840 91 model.scoring_fct_norm 1.0 +840 91 loss.margin 8.669792144105667 +840 91 loss.adversarial_temperature 0.9610490616695674 +840 91 optimizer.lr 0.0018453560290270404 +840 91 negative_sampler.num_negs_per_pos 76.0 +840 91 training.batch_size 2.0 +840 92 model.embedding_dim 2.0 +840 92 model.scoring_fct_norm 2.0 +840 92 loss.margin 17.045290084169903 +840 92 loss.adversarial_temperature 0.8614575486366851 +840 92 optimizer.lr 0.0013494500089841633 +840 92 negative_sampler.num_negs_per_pos 9.0 +840 92 training.batch_size 0.0 +840 93 model.embedding_dim 0.0 +840 93 model.scoring_fct_norm 2.0 +840 93 loss.margin 13.143961375936925 +840 93 loss.adversarial_temperature 0.498461173607499 +840 93 optimizer.lr 0.00860592607374031 +840 93 negative_sampler.num_negs_per_pos 68.0 +840 93 training.batch_size 2.0 +840 94 model.embedding_dim 0.0 +840 94 model.scoring_fct_norm 1.0 +840 94 loss.margin 27.606346487876827 +840 94 loss.adversarial_temperature 0.9139633055311439 +840 94 optimizer.lr 0.09373009836859758 +840 94 negative_sampler.num_negs_per_pos 10.0 +840 94 training.batch_size 2.0 +840 95 model.embedding_dim 1.0 +840 95 model.scoring_fct_norm 1.0 +840 95 loss.margin 22.351827426699966 +840 95 loss.adversarial_temperature 0.9744275582994214 +840 95 optimizer.lr 0.001972451992407908 +840 95 negative_sampler.num_negs_per_pos 71.0 +840 95 training.batch_size 2.0 +840 96 model.embedding_dim 1.0 +840 96 model.scoring_fct_norm 2.0 +840 96 loss.margin 13.749441436406135 +840 96 loss.adversarial_temperature 0.585493061688775 +840 96 optimizer.lr 0.060844048686457666 +840 96 negative_sampler.num_negs_per_pos 1.0 +840 96 training.batch_size 1.0 +840 97 model.embedding_dim 2.0 +840 97 model.scoring_fct_norm 1.0 +840 97 loss.margin 2.376100315424195 +840 97 loss.adversarial_temperature 0.7909514603504928 +840 97 optimizer.lr 0.004089579703197091 +840 97 negative_sampler.num_negs_per_pos 16.0 +840 97 training.batch_size 0.0 +840 98 model.embedding_dim 0.0 +840 98 model.scoring_fct_norm 1.0 +840 98 loss.margin 18.713198695661927 +840 98 loss.adversarial_temperature 0.6332237121427721 +840 98 optimizer.lr 0.05499877752652506 +840 98 negative_sampler.num_negs_per_pos 25.0 +840 98 training.batch_size 1.0 +840 99 model.embedding_dim 1.0 +840 99 model.scoring_fct_norm 1.0 +840 99 loss.margin 9.31227036355027 +840 99 loss.adversarial_temperature 0.582352070206859 +840 99 optimizer.lr 0.021506676095702455 +840 99 negative_sampler.num_negs_per_pos 3.0 +840 99 training.batch_size 1.0 +840 100 model.embedding_dim 1.0 +840 100 model.scoring_fct_norm 2.0 +840 100 loss.margin 12.24997349828937 +840 100 loss.adversarial_temperature 0.8637584137383499 +840 100 optimizer.lr 0.001135634774882495 +840 100 negative_sampler.num_negs_per_pos 13.0 +840 100 training.batch_size 1.0 +840 1 dataset """wn18rr""" +840 1 model """transe""" +840 1 loss """nssa""" +840 1 regularizer """no""" +840 1 optimizer """adam""" +840 1 training_loop """owa""" +840 1 negative_sampler """basic""" +840 1 evaluator """rankbased""" +840 2 dataset """wn18rr""" +840 2 model """transe""" +840 2 loss """nssa""" +840 2 regularizer """no""" +840 2 optimizer """adam""" +840 2 training_loop """owa""" +840 2 negative_sampler """basic""" +840 2 evaluator """rankbased""" +840 3 dataset """wn18rr""" +840 3 model """transe""" +840 3 loss """nssa""" +840 3 regularizer """no""" +840 3 optimizer """adam""" +840 3 training_loop """owa""" +840 3 negative_sampler """basic""" +840 3 evaluator """rankbased""" +840 4 dataset """wn18rr""" +840 4 model """transe""" +840 4 loss """nssa""" +840 4 regularizer """no""" +840 4 optimizer """adam""" +840 4 training_loop """owa""" +840 4 negative_sampler """basic""" +840 4 evaluator """rankbased""" +840 5 dataset """wn18rr""" +840 5 model """transe""" +840 5 loss """nssa""" +840 5 regularizer """no""" +840 5 optimizer """adam""" +840 5 training_loop """owa""" +840 5 negative_sampler """basic""" +840 5 evaluator """rankbased""" +840 6 dataset """wn18rr""" +840 6 model """transe""" +840 6 loss """nssa""" +840 6 regularizer """no""" +840 6 optimizer """adam""" +840 6 training_loop """owa""" +840 6 negative_sampler """basic""" +840 6 evaluator """rankbased""" +840 7 dataset """wn18rr""" +840 7 model """transe""" +840 7 loss """nssa""" +840 7 regularizer """no""" +840 7 optimizer """adam""" +840 7 training_loop """owa""" +840 7 negative_sampler """basic""" +840 7 evaluator """rankbased""" +840 8 dataset """wn18rr""" +840 8 model """transe""" +840 8 loss """nssa""" +840 8 regularizer """no""" +840 8 optimizer """adam""" +840 8 training_loop """owa""" +840 8 negative_sampler """basic""" +840 8 evaluator """rankbased""" +840 9 dataset """wn18rr""" +840 9 model """transe""" +840 9 loss """nssa""" +840 9 regularizer """no""" +840 9 optimizer """adam""" +840 9 training_loop """owa""" +840 9 negative_sampler """basic""" +840 9 evaluator """rankbased""" +840 10 dataset """wn18rr""" +840 10 model """transe""" +840 10 loss """nssa""" +840 10 regularizer """no""" +840 10 optimizer """adam""" +840 10 training_loop """owa""" +840 10 negative_sampler """basic""" +840 10 evaluator """rankbased""" +840 11 dataset """wn18rr""" +840 11 model """transe""" +840 11 loss """nssa""" +840 11 regularizer """no""" +840 11 optimizer """adam""" +840 11 training_loop """owa""" +840 11 negative_sampler """basic""" +840 11 evaluator """rankbased""" +840 12 dataset """wn18rr""" +840 12 model """transe""" +840 12 loss """nssa""" +840 12 regularizer """no""" +840 12 optimizer """adam""" +840 12 training_loop """owa""" +840 12 negative_sampler """basic""" +840 12 evaluator """rankbased""" +840 13 dataset """wn18rr""" +840 13 model """transe""" +840 13 loss """nssa""" +840 13 regularizer """no""" +840 13 optimizer """adam""" +840 13 training_loop """owa""" +840 13 negative_sampler """basic""" +840 13 evaluator """rankbased""" +840 14 dataset """wn18rr""" +840 14 model """transe""" +840 14 loss """nssa""" +840 14 regularizer """no""" +840 14 optimizer """adam""" +840 14 training_loop """owa""" +840 14 negative_sampler """basic""" +840 14 evaluator """rankbased""" +840 15 dataset """wn18rr""" +840 15 model """transe""" +840 15 loss """nssa""" +840 15 regularizer """no""" +840 15 optimizer """adam""" +840 15 training_loop """owa""" +840 15 negative_sampler """basic""" +840 15 evaluator """rankbased""" +840 16 dataset """wn18rr""" +840 16 model """transe""" +840 16 loss """nssa""" +840 16 regularizer """no""" +840 16 optimizer """adam""" +840 16 training_loop """owa""" +840 16 negative_sampler """basic""" +840 16 evaluator """rankbased""" +840 17 dataset """wn18rr""" +840 17 model """transe""" +840 17 loss """nssa""" +840 17 regularizer """no""" +840 17 optimizer """adam""" +840 17 training_loop """owa""" +840 17 negative_sampler """basic""" +840 17 evaluator """rankbased""" +840 18 dataset """wn18rr""" +840 18 model """transe""" +840 18 loss """nssa""" +840 18 regularizer """no""" +840 18 optimizer """adam""" +840 18 training_loop """owa""" +840 18 negative_sampler """basic""" +840 18 evaluator """rankbased""" +840 19 dataset """wn18rr""" +840 19 model """transe""" +840 19 loss """nssa""" +840 19 regularizer """no""" +840 19 optimizer """adam""" +840 19 training_loop """owa""" +840 19 negative_sampler """basic""" +840 19 evaluator """rankbased""" +840 20 dataset """wn18rr""" +840 20 model """transe""" +840 20 loss """nssa""" +840 20 regularizer """no""" +840 20 optimizer """adam""" +840 20 training_loop """owa""" +840 20 negative_sampler """basic""" +840 20 evaluator """rankbased""" +840 21 dataset """wn18rr""" +840 21 model """transe""" +840 21 loss """nssa""" +840 21 regularizer """no""" +840 21 optimizer """adam""" +840 21 training_loop """owa""" +840 21 negative_sampler """basic""" +840 21 evaluator """rankbased""" +840 22 dataset """wn18rr""" +840 22 model """transe""" +840 22 loss """nssa""" +840 22 regularizer """no""" +840 22 optimizer """adam""" +840 22 training_loop """owa""" +840 22 negative_sampler """basic""" +840 22 evaluator """rankbased""" +840 23 dataset """wn18rr""" +840 23 model """transe""" +840 23 loss """nssa""" +840 23 regularizer """no""" +840 23 optimizer """adam""" +840 23 training_loop """owa""" +840 23 negative_sampler """basic""" +840 23 evaluator """rankbased""" +840 24 dataset """wn18rr""" +840 24 model """transe""" +840 24 loss """nssa""" +840 24 regularizer """no""" +840 24 optimizer """adam""" +840 24 training_loop """owa""" +840 24 negative_sampler """basic""" +840 24 evaluator """rankbased""" +840 25 dataset """wn18rr""" +840 25 model """transe""" +840 25 loss """nssa""" +840 25 regularizer """no""" +840 25 optimizer """adam""" +840 25 training_loop """owa""" +840 25 negative_sampler """basic""" +840 25 evaluator """rankbased""" +840 26 dataset """wn18rr""" +840 26 model """transe""" +840 26 loss """nssa""" +840 26 regularizer """no""" +840 26 optimizer """adam""" +840 26 training_loop """owa""" +840 26 negative_sampler """basic""" +840 26 evaluator """rankbased""" +840 27 dataset """wn18rr""" +840 27 model """transe""" +840 27 loss """nssa""" +840 27 regularizer """no""" +840 27 optimizer """adam""" +840 27 training_loop """owa""" +840 27 negative_sampler """basic""" +840 27 evaluator """rankbased""" +840 28 dataset """wn18rr""" +840 28 model """transe""" +840 28 loss """nssa""" +840 28 regularizer """no""" +840 28 optimizer """adam""" +840 28 training_loop """owa""" +840 28 negative_sampler """basic""" +840 28 evaluator """rankbased""" +840 29 dataset """wn18rr""" +840 29 model """transe""" +840 29 loss """nssa""" +840 29 regularizer """no""" +840 29 optimizer """adam""" +840 29 training_loop """owa""" +840 29 negative_sampler """basic""" +840 29 evaluator """rankbased""" +840 30 dataset """wn18rr""" +840 30 model """transe""" +840 30 loss """nssa""" +840 30 regularizer """no""" +840 30 optimizer """adam""" +840 30 training_loop """owa""" +840 30 negative_sampler """basic""" +840 30 evaluator """rankbased""" +840 31 dataset """wn18rr""" +840 31 model """transe""" +840 31 loss """nssa""" +840 31 regularizer """no""" +840 31 optimizer """adam""" +840 31 training_loop """owa""" +840 31 negative_sampler """basic""" +840 31 evaluator """rankbased""" +840 32 dataset """wn18rr""" +840 32 model """transe""" +840 32 loss """nssa""" +840 32 regularizer """no""" +840 32 optimizer """adam""" +840 32 training_loop """owa""" +840 32 negative_sampler """basic""" +840 32 evaluator """rankbased""" +840 33 dataset """wn18rr""" +840 33 model """transe""" +840 33 loss """nssa""" +840 33 regularizer """no""" +840 33 optimizer """adam""" +840 33 training_loop """owa""" +840 33 negative_sampler """basic""" +840 33 evaluator """rankbased""" +840 34 dataset """wn18rr""" +840 34 model """transe""" +840 34 loss """nssa""" +840 34 regularizer """no""" +840 34 optimizer """adam""" +840 34 training_loop """owa""" +840 34 negative_sampler """basic""" +840 34 evaluator """rankbased""" +840 35 dataset """wn18rr""" +840 35 model """transe""" +840 35 loss """nssa""" +840 35 regularizer """no""" +840 35 optimizer """adam""" +840 35 training_loop """owa""" +840 35 negative_sampler """basic""" +840 35 evaluator """rankbased""" +840 36 dataset """wn18rr""" +840 36 model """transe""" +840 36 loss """nssa""" +840 36 regularizer """no""" +840 36 optimizer """adam""" +840 36 training_loop """owa""" +840 36 negative_sampler """basic""" +840 36 evaluator """rankbased""" +840 37 dataset """wn18rr""" +840 37 model """transe""" +840 37 loss """nssa""" +840 37 regularizer """no""" +840 37 optimizer """adam""" +840 37 training_loop """owa""" +840 37 negative_sampler """basic""" +840 37 evaluator """rankbased""" +840 38 dataset """wn18rr""" +840 38 model """transe""" +840 38 loss """nssa""" +840 38 regularizer """no""" +840 38 optimizer """adam""" +840 38 training_loop """owa""" +840 38 negative_sampler """basic""" +840 38 evaluator """rankbased""" +840 39 dataset """wn18rr""" +840 39 model """transe""" +840 39 loss """nssa""" +840 39 regularizer """no""" +840 39 optimizer """adam""" +840 39 training_loop """owa""" +840 39 negative_sampler """basic""" +840 39 evaluator """rankbased""" +840 40 dataset """wn18rr""" +840 40 model """transe""" +840 40 loss """nssa""" +840 40 regularizer """no""" +840 40 optimizer """adam""" +840 40 training_loop """owa""" +840 40 negative_sampler """basic""" +840 40 evaluator """rankbased""" +840 41 dataset """wn18rr""" +840 41 model """transe""" +840 41 loss """nssa""" +840 41 regularizer """no""" +840 41 optimizer """adam""" +840 41 training_loop """owa""" +840 41 negative_sampler """basic""" +840 41 evaluator """rankbased""" +840 42 dataset """wn18rr""" +840 42 model """transe""" +840 42 loss """nssa""" +840 42 regularizer """no""" +840 42 optimizer """adam""" +840 42 training_loop """owa""" +840 42 negative_sampler """basic""" +840 42 evaluator """rankbased""" +840 43 dataset """wn18rr""" +840 43 model """transe""" +840 43 loss """nssa""" +840 43 regularizer """no""" +840 43 optimizer """adam""" +840 43 training_loop """owa""" +840 43 negative_sampler """basic""" +840 43 evaluator """rankbased""" +840 44 dataset """wn18rr""" +840 44 model """transe""" +840 44 loss """nssa""" +840 44 regularizer """no""" +840 44 optimizer """adam""" +840 44 training_loop """owa""" +840 44 negative_sampler """basic""" +840 44 evaluator """rankbased""" +840 45 dataset """wn18rr""" +840 45 model """transe""" +840 45 loss """nssa""" +840 45 regularizer """no""" +840 45 optimizer """adam""" +840 45 training_loop """owa""" +840 45 negative_sampler """basic""" +840 45 evaluator """rankbased""" +840 46 dataset """wn18rr""" +840 46 model """transe""" +840 46 loss """nssa""" +840 46 regularizer """no""" +840 46 optimizer """adam""" +840 46 training_loop """owa""" +840 46 negative_sampler """basic""" +840 46 evaluator """rankbased""" +840 47 dataset """wn18rr""" +840 47 model """transe""" +840 47 loss """nssa""" +840 47 regularizer """no""" +840 47 optimizer """adam""" +840 47 training_loop """owa""" +840 47 negative_sampler """basic""" +840 47 evaluator """rankbased""" +840 48 dataset """wn18rr""" +840 48 model """transe""" +840 48 loss """nssa""" +840 48 regularizer """no""" +840 48 optimizer """adam""" +840 48 training_loop """owa""" +840 48 negative_sampler """basic""" +840 48 evaluator """rankbased""" +840 49 dataset """wn18rr""" +840 49 model """transe""" +840 49 loss """nssa""" +840 49 regularizer """no""" +840 49 optimizer """adam""" +840 49 training_loop """owa""" +840 49 negative_sampler """basic""" +840 49 evaluator """rankbased""" +840 50 dataset """wn18rr""" +840 50 model """transe""" +840 50 loss """nssa""" +840 50 regularizer """no""" +840 50 optimizer """adam""" +840 50 training_loop """owa""" +840 50 negative_sampler """basic""" +840 50 evaluator """rankbased""" +840 51 dataset """wn18rr""" +840 51 model """transe""" +840 51 loss """nssa""" +840 51 regularizer """no""" +840 51 optimizer """adam""" +840 51 training_loop """owa""" +840 51 negative_sampler """basic""" +840 51 evaluator """rankbased""" +840 52 dataset """wn18rr""" +840 52 model """transe""" +840 52 loss """nssa""" +840 52 regularizer """no""" +840 52 optimizer """adam""" +840 52 training_loop """owa""" +840 52 negative_sampler """basic""" +840 52 evaluator """rankbased""" +840 53 dataset """wn18rr""" +840 53 model """transe""" +840 53 loss """nssa""" +840 53 regularizer """no""" +840 53 optimizer """adam""" +840 53 training_loop """owa""" +840 53 negative_sampler """basic""" +840 53 evaluator """rankbased""" +840 54 dataset """wn18rr""" +840 54 model """transe""" +840 54 loss """nssa""" +840 54 regularizer """no""" +840 54 optimizer """adam""" +840 54 training_loop """owa""" +840 54 negative_sampler """basic""" +840 54 evaluator """rankbased""" +840 55 dataset """wn18rr""" +840 55 model """transe""" +840 55 loss """nssa""" +840 55 regularizer """no""" +840 55 optimizer """adam""" +840 55 training_loop """owa""" +840 55 negative_sampler """basic""" +840 55 evaluator """rankbased""" +840 56 dataset """wn18rr""" +840 56 model """transe""" +840 56 loss """nssa""" +840 56 regularizer """no""" +840 56 optimizer """adam""" +840 56 training_loop """owa""" +840 56 negative_sampler """basic""" +840 56 evaluator """rankbased""" +840 57 dataset """wn18rr""" +840 57 model """transe""" +840 57 loss """nssa""" +840 57 regularizer """no""" +840 57 optimizer """adam""" +840 57 training_loop """owa""" +840 57 negative_sampler """basic""" +840 57 evaluator """rankbased""" +840 58 dataset """wn18rr""" +840 58 model """transe""" +840 58 loss """nssa""" +840 58 regularizer """no""" +840 58 optimizer """adam""" +840 58 training_loop """owa""" +840 58 negative_sampler """basic""" +840 58 evaluator """rankbased""" +840 59 dataset """wn18rr""" +840 59 model """transe""" +840 59 loss """nssa""" +840 59 regularizer """no""" +840 59 optimizer """adam""" +840 59 training_loop """owa""" +840 59 negative_sampler """basic""" +840 59 evaluator """rankbased""" +840 60 dataset """wn18rr""" +840 60 model """transe""" +840 60 loss """nssa""" +840 60 regularizer """no""" +840 60 optimizer """adam""" +840 60 training_loop """owa""" +840 60 negative_sampler """basic""" +840 60 evaluator """rankbased""" +840 61 dataset """wn18rr""" +840 61 model """transe""" +840 61 loss """nssa""" +840 61 regularizer """no""" +840 61 optimizer """adam""" +840 61 training_loop """owa""" +840 61 negative_sampler """basic""" +840 61 evaluator """rankbased""" +840 62 dataset """wn18rr""" +840 62 model """transe""" +840 62 loss """nssa""" +840 62 regularizer """no""" +840 62 optimizer """adam""" +840 62 training_loop """owa""" +840 62 negative_sampler """basic""" +840 62 evaluator """rankbased""" +840 63 dataset """wn18rr""" +840 63 model """transe""" +840 63 loss """nssa""" +840 63 regularizer """no""" +840 63 optimizer """adam""" +840 63 training_loop """owa""" +840 63 negative_sampler """basic""" +840 63 evaluator """rankbased""" +840 64 dataset """wn18rr""" +840 64 model """transe""" +840 64 loss """nssa""" +840 64 regularizer """no""" +840 64 optimizer """adam""" +840 64 training_loop """owa""" +840 64 negative_sampler """basic""" +840 64 evaluator """rankbased""" +840 65 dataset """wn18rr""" +840 65 model """transe""" +840 65 loss """nssa""" +840 65 regularizer """no""" +840 65 optimizer """adam""" +840 65 training_loop """owa""" +840 65 negative_sampler """basic""" +840 65 evaluator """rankbased""" +840 66 dataset """wn18rr""" +840 66 model """transe""" +840 66 loss """nssa""" +840 66 regularizer """no""" +840 66 optimizer """adam""" +840 66 training_loop """owa""" +840 66 negative_sampler """basic""" +840 66 evaluator """rankbased""" +840 67 dataset """wn18rr""" +840 67 model """transe""" +840 67 loss """nssa""" +840 67 regularizer """no""" +840 67 optimizer """adam""" +840 67 training_loop """owa""" +840 67 negative_sampler """basic""" +840 67 evaluator """rankbased""" +840 68 dataset """wn18rr""" +840 68 model """transe""" +840 68 loss """nssa""" +840 68 regularizer """no""" +840 68 optimizer """adam""" +840 68 training_loop """owa""" +840 68 negative_sampler """basic""" +840 68 evaluator """rankbased""" +840 69 dataset """wn18rr""" +840 69 model """transe""" +840 69 loss """nssa""" +840 69 regularizer """no""" +840 69 optimizer """adam""" +840 69 training_loop """owa""" +840 69 negative_sampler """basic""" +840 69 evaluator """rankbased""" +840 70 dataset """wn18rr""" +840 70 model """transe""" +840 70 loss """nssa""" +840 70 regularizer """no""" +840 70 optimizer """adam""" +840 70 training_loop """owa""" +840 70 negative_sampler """basic""" +840 70 evaluator """rankbased""" +840 71 dataset """wn18rr""" +840 71 model """transe""" +840 71 loss """nssa""" +840 71 regularizer """no""" +840 71 optimizer """adam""" +840 71 training_loop """owa""" +840 71 negative_sampler """basic""" +840 71 evaluator """rankbased""" +840 72 dataset """wn18rr""" +840 72 model """transe""" +840 72 loss """nssa""" +840 72 regularizer """no""" +840 72 optimizer """adam""" +840 72 training_loop """owa""" +840 72 negative_sampler """basic""" +840 72 evaluator """rankbased""" +840 73 dataset """wn18rr""" +840 73 model """transe""" +840 73 loss """nssa""" +840 73 regularizer """no""" +840 73 optimizer """adam""" +840 73 training_loop """owa""" +840 73 negative_sampler """basic""" +840 73 evaluator """rankbased""" +840 74 dataset """wn18rr""" +840 74 model """transe""" +840 74 loss """nssa""" +840 74 regularizer """no""" +840 74 optimizer """adam""" +840 74 training_loop """owa""" +840 74 negative_sampler """basic""" +840 74 evaluator """rankbased""" +840 75 dataset """wn18rr""" +840 75 model """transe""" +840 75 loss """nssa""" +840 75 regularizer """no""" +840 75 optimizer """adam""" +840 75 training_loop """owa""" +840 75 negative_sampler """basic""" +840 75 evaluator """rankbased""" +840 76 dataset """wn18rr""" +840 76 model """transe""" +840 76 loss """nssa""" +840 76 regularizer """no""" +840 76 optimizer """adam""" +840 76 training_loop """owa""" +840 76 negative_sampler """basic""" +840 76 evaluator """rankbased""" +840 77 dataset """wn18rr""" +840 77 model """transe""" +840 77 loss """nssa""" +840 77 regularizer """no""" +840 77 optimizer """adam""" +840 77 training_loop """owa""" +840 77 negative_sampler """basic""" +840 77 evaluator """rankbased""" +840 78 dataset """wn18rr""" +840 78 model """transe""" +840 78 loss """nssa""" +840 78 regularizer """no""" +840 78 optimizer """adam""" +840 78 training_loop """owa""" +840 78 negative_sampler """basic""" +840 78 evaluator """rankbased""" +840 79 dataset """wn18rr""" +840 79 model """transe""" +840 79 loss """nssa""" +840 79 regularizer """no""" +840 79 optimizer """adam""" +840 79 training_loop """owa""" +840 79 negative_sampler """basic""" +840 79 evaluator """rankbased""" +840 80 dataset """wn18rr""" +840 80 model """transe""" +840 80 loss """nssa""" +840 80 regularizer """no""" +840 80 optimizer """adam""" +840 80 training_loop """owa""" +840 80 negative_sampler """basic""" +840 80 evaluator """rankbased""" +840 81 dataset """wn18rr""" +840 81 model """transe""" +840 81 loss """nssa""" +840 81 regularizer """no""" +840 81 optimizer """adam""" +840 81 training_loop """owa""" +840 81 negative_sampler """basic""" +840 81 evaluator """rankbased""" +840 82 dataset """wn18rr""" +840 82 model """transe""" +840 82 loss """nssa""" +840 82 regularizer """no""" +840 82 optimizer """adam""" +840 82 training_loop """owa""" +840 82 negative_sampler """basic""" +840 82 evaluator """rankbased""" +840 83 dataset """wn18rr""" +840 83 model """transe""" +840 83 loss """nssa""" +840 83 regularizer """no""" +840 83 optimizer """adam""" +840 83 training_loop """owa""" +840 83 negative_sampler """basic""" +840 83 evaluator """rankbased""" +840 84 dataset """wn18rr""" +840 84 model """transe""" +840 84 loss """nssa""" +840 84 regularizer """no""" +840 84 optimizer """adam""" +840 84 training_loop """owa""" +840 84 negative_sampler """basic""" +840 84 evaluator """rankbased""" +840 85 dataset """wn18rr""" +840 85 model """transe""" +840 85 loss """nssa""" +840 85 regularizer """no""" +840 85 optimizer """adam""" +840 85 training_loop """owa""" +840 85 negative_sampler """basic""" +840 85 evaluator """rankbased""" +840 86 dataset """wn18rr""" +840 86 model """transe""" +840 86 loss """nssa""" +840 86 regularizer """no""" +840 86 optimizer """adam""" +840 86 training_loop """owa""" +840 86 negative_sampler """basic""" +840 86 evaluator """rankbased""" +840 87 dataset """wn18rr""" +840 87 model """transe""" +840 87 loss """nssa""" +840 87 regularizer """no""" +840 87 optimizer """adam""" +840 87 training_loop """owa""" +840 87 negative_sampler """basic""" +840 87 evaluator """rankbased""" +840 88 dataset """wn18rr""" +840 88 model """transe""" +840 88 loss """nssa""" +840 88 regularizer """no""" +840 88 optimizer """adam""" +840 88 training_loop """owa""" +840 88 negative_sampler """basic""" +840 88 evaluator """rankbased""" +840 89 dataset """wn18rr""" +840 89 model """transe""" +840 89 loss """nssa""" +840 89 regularizer """no""" +840 89 optimizer """adam""" +840 89 training_loop """owa""" +840 89 negative_sampler """basic""" +840 89 evaluator """rankbased""" +840 90 dataset """wn18rr""" +840 90 model """transe""" +840 90 loss """nssa""" +840 90 regularizer """no""" +840 90 optimizer """adam""" +840 90 training_loop """owa""" +840 90 negative_sampler """basic""" +840 90 evaluator """rankbased""" +840 91 dataset """wn18rr""" +840 91 model """transe""" +840 91 loss """nssa""" +840 91 regularizer """no""" +840 91 optimizer """adam""" +840 91 training_loop """owa""" +840 91 negative_sampler """basic""" +840 91 evaluator """rankbased""" +840 92 dataset """wn18rr""" +840 92 model """transe""" +840 92 loss """nssa""" +840 92 regularizer """no""" +840 92 optimizer """adam""" +840 92 training_loop """owa""" +840 92 negative_sampler """basic""" +840 92 evaluator """rankbased""" +840 93 dataset """wn18rr""" +840 93 model """transe""" +840 93 loss """nssa""" +840 93 regularizer """no""" +840 93 optimizer """adam""" +840 93 training_loop """owa""" +840 93 negative_sampler """basic""" +840 93 evaluator """rankbased""" +840 94 dataset """wn18rr""" +840 94 model """transe""" +840 94 loss """nssa""" +840 94 regularizer """no""" +840 94 optimizer """adam""" +840 94 training_loop """owa""" +840 94 negative_sampler """basic""" +840 94 evaluator """rankbased""" +840 95 dataset """wn18rr""" +840 95 model """transe""" +840 95 loss """nssa""" +840 95 regularizer """no""" +840 95 optimizer """adam""" +840 95 training_loop """owa""" +840 95 negative_sampler """basic""" +840 95 evaluator """rankbased""" +840 96 dataset """wn18rr""" +840 96 model """transe""" +840 96 loss """nssa""" +840 96 regularizer """no""" +840 96 optimizer """adam""" +840 96 training_loop """owa""" +840 96 negative_sampler """basic""" +840 96 evaluator """rankbased""" +840 97 dataset """wn18rr""" +840 97 model """transe""" +840 97 loss """nssa""" +840 97 regularizer """no""" +840 97 optimizer """adam""" +840 97 training_loop """owa""" +840 97 negative_sampler """basic""" +840 97 evaluator """rankbased""" +840 98 dataset """wn18rr""" +840 98 model """transe""" +840 98 loss """nssa""" +840 98 regularizer """no""" +840 98 optimizer """adam""" +840 98 training_loop """owa""" +840 98 negative_sampler """basic""" +840 98 evaluator """rankbased""" +840 99 dataset """wn18rr""" +840 99 model """transe""" +840 99 loss """nssa""" +840 99 regularizer """no""" +840 99 optimizer """adam""" +840 99 training_loop """owa""" +840 99 negative_sampler """basic""" +840 99 evaluator """rankbased""" +840 100 dataset """wn18rr""" +840 100 model """transe""" +840 100 loss """nssa""" +840 100 regularizer """no""" +840 100 optimizer """adam""" +840 100 training_loop """owa""" +840 100 negative_sampler """basic""" +840 100 evaluator """rankbased""" +841 1 model.embedding_dim 1.0 +841 1 model.scoring_fct_norm 2.0 +841 1 optimizer.lr 0.011152450074926278 +841 1 training.batch_size 0.0 +841 1 training.label_smoothing 0.015250965987359305 +841 2 model.embedding_dim 1.0 +841 2 model.scoring_fct_norm 2.0 +841 2 optimizer.lr 0.004757127700174576 +841 2 training.batch_size 0.0 +841 2 training.label_smoothing 0.01765894006586721 +841 3 model.embedding_dim 1.0 +841 3 model.scoring_fct_norm 1.0 +841 3 optimizer.lr 0.010957276896577252 +841 3 training.batch_size 1.0 +841 3 training.label_smoothing 0.03356902410651174 +841 4 model.embedding_dim 2.0 +841 4 model.scoring_fct_norm 1.0 +841 4 optimizer.lr 0.006265068484302193 +841 4 training.batch_size 1.0 +841 4 training.label_smoothing 0.00831339181887436 +841 5 model.embedding_dim 1.0 +841 5 model.scoring_fct_norm 1.0 +841 5 optimizer.lr 0.004502284802514863 +841 5 training.batch_size 1.0 +841 5 training.label_smoothing 0.05709876998225372 +841 6 model.embedding_dim 2.0 +841 6 model.scoring_fct_norm 1.0 +841 6 optimizer.lr 0.08064434214465155 +841 6 training.batch_size 0.0 +841 6 training.label_smoothing 0.1862511289544603 +841 7 model.embedding_dim 2.0 +841 7 model.scoring_fct_norm 1.0 +841 7 optimizer.lr 0.007291213333625623 +841 7 training.batch_size 1.0 +841 7 training.label_smoothing 0.24182354428293115 +841 1 dataset """wn18rr""" +841 1 model """transe""" +841 1 loss """crossentropy""" +841 1 regularizer """no""" +841 1 optimizer """adam""" +841 1 training_loop """lcwa""" +841 1 evaluator """rankbased""" +841 2 dataset """wn18rr""" +841 2 model """transe""" +841 2 loss """crossentropy""" +841 2 regularizer """no""" +841 2 optimizer """adam""" +841 2 training_loop """lcwa""" +841 2 evaluator """rankbased""" +841 3 dataset """wn18rr""" +841 3 model """transe""" +841 3 loss """crossentropy""" +841 3 regularizer """no""" +841 3 optimizer """adam""" +841 3 training_loop """lcwa""" +841 3 evaluator """rankbased""" +841 4 dataset """wn18rr""" +841 4 model """transe""" +841 4 loss """crossentropy""" +841 4 regularizer """no""" +841 4 optimizer """adam""" +841 4 training_loop """lcwa""" +841 4 evaluator """rankbased""" +841 5 dataset """wn18rr""" +841 5 model """transe""" +841 5 loss """crossentropy""" +841 5 regularizer """no""" +841 5 optimizer """adam""" +841 5 training_loop """lcwa""" +841 5 evaluator """rankbased""" +841 6 dataset """wn18rr""" +841 6 model """transe""" +841 6 loss """crossentropy""" +841 6 regularizer """no""" +841 6 optimizer """adam""" +841 6 training_loop """lcwa""" +841 6 evaluator """rankbased""" +841 7 dataset """wn18rr""" +841 7 model """transe""" +841 7 loss """crossentropy""" +841 7 regularizer """no""" +841 7 optimizer """adam""" +841 7 training_loop """lcwa""" +841 7 evaluator """rankbased""" +842 1 model.embedding_dim 2.0 +842 1 model.scoring_fct_norm 1.0 +842 1 optimizer.lr 0.06577547124853932 +842 1 training.batch_size 1.0 +842 1 training.label_smoothing 0.001975804249310287 +842 2 model.embedding_dim 0.0 +842 2 model.scoring_fct_norm 2.0 +842 2 optimizer.lr 0.0020528457563362486 +842 2 training.batch_size 1.0 +842 2 training.label_smoothing 0.4609204621798411 +842 3 model.embedding_dim 0.0 +842 3 model.scoring_fct_norm 2.0 +842 3 optimizer.lr 0.0718094180607809 +842 3 training.batch_size 2.0 +842 3 training.label_smoothing 0.6262720299132691 +842 4 model.embedding_dim 0.0 +842 4 model.scoring_fct_norm 1.0 +842 4 optimizer.lr 0.012620851842399855 +842 4 training.batch_size 2.0 +842 4 training.label_smoothing 0.3049301273602833 +842 5 model.embedding_dim 0.0 +842 5 model.scoring_fct_norm 1.0 +842 5 optimizer.lr 0.08764208549317774 +842 5 training.batch_size 2.0 +842 5 training.label_smoothing 0.01585442336560557 +842 6 model.embedding_dim 1.0 +842 6 model.scoring_fct_norm 2.0 +842 6 optimizer.lr 0.006842142411759795 +842 6 training.batch_size 0.0 +842 6 training.label_smoothing 0.38257824724276546 +842 7 model.embedding_dim 1.0 +842 7 model.scoring_fct_norm 2.0 +842 7 optimizer.lr 0.02032769312173236 +842 7 training.batch_size 1.0 +842 7 training.label_smoothing 0.05066399538019208 +842 8 model.embedding_dim 1.0 +842 8 model.scoring_fct_norm 2.0 +842 8 optimizer.lr 0.001347538125567758 +842 8 training.batch_size 2.0 +842 8 training.label_smoothing 0.0900248335005741 +842 9 model.embedding_dim 0.0 +842 9 model.scoring_fct_norm 1.0 +842 9 optimizer.lr 0.07319713544536033 +842 9 training.batch_size 2.0 +842 9 training.label_smoothing 0.027796701406318857 +842 10 model.embedding_dim 0.0 +842 10 model.scoring_fct_norm 2.0 +842 10 optimizer.lr 0.0029883350600210705 +842 10 training.batch_size 1.0 +842 10 training.label_smoothing 0.4737068845723712 +842 11 model.embedding_dim 0.0 +842 11 model.scoring_fct_norm 2.0 +842 11 optimizer.lr 0.0740018554501505 +842 11 training.batch_size 2.0 +842 11 training.label_smoothing 0.42751925123905044 +842 12 model.embedding_dim 0.0 +842 12 model.scoring_fct_norm 2.0 +842 12 optimizer.lr 0.011195972284628257 +842 12 training.batch_size 0.0 +842 12 training.label_smoothing 0.4941455008722081 +842 13 model.embedding_dim 1.0 +842 13 model.scoring_fct_norm 2.0 +842 13 optimizer.lr 0.0021012291044500953 +842 13 training.batch_size 0.0 +842 13 training.label_smoothing 0.0017099093801784466 +842 14 model.embedding_dim 0.0 +842 14 model.scoring_fct_norm 2.0 +842 14 optimizer.lr 0.001716318434693229 +842 14 training.batch_size 0.0 +842 14 training.label_smoothing 0.008089928416938542 +842 15 model.embedding_dim 1.0 +842 15 model.scoring_fct_norm 1.0 +842 15 optimizer.lr 0.07156408311370784 +842 15 training.batch_size 1.0 +842 15 training.label_smoothing 0.005644336714324451 +842 16 model.embedding_dim 0.0 +842 16 model.scoring_fct_norm 2.0 +842 16 optimizer.lr 0.02211530634985553 +842 16 training.batch_size 0.0 +842 16 training.label_smoothing 0.027583441271267394 +842 17 model.embedding_dim 1.0 +842 17 model.scoring_fct_norm 2.0 +842 17 optimizer.lr 0.027113409268672284 +842 17 training.batch_size 0.0 +842 17 training.label_smoothing 0.013208515793312352 +842 1 dataset """wn18rr""" +842 1 model """transe""" +842 1 loss """crossentropy""" +842 1 regularizer """no""" +842 1 optimizer """adam""" +842 1 training_loop """lcwa""" +842 1 evaluator """rankbased""" +842 2 dataset """wn18rr""" +842 2 model """transe""" +842 2 loss """crossentropy""" +842 2 regularizer """no""" +842 2 optimizer """adam""" +842 2 training_loop """lcwa""" +842 2 evaluator """rankbased""" +842 3 dataset """wn18rr""" +842 3 model """transe""" +842 3 loss """crossentropy""" +842 3 regularizer """no""" +842 3 optimizer """adam""" +842 3 training_loop """lcwa""" +842 3 evaluator """rankbased""" +842 4 dataset """wn18rr""" +842 4 model """transe""" +842 4 loss """crossentropy""" +842 4 regularizer """no""" +842 4 optimizer """adam""" +842 4 training_loop """lcwa""" +842 4 evaluator """rankbased""" +842 5 dataset """wn18rr""" +842 5 model """transe""" +842 5 loss """crossentropy""" +842 5 regularizer """no""" +842 5 optimizer """adam""" +842 5 training_loop """lcwa""" +842 5 evaluator """rankbased""" +842 6 dataset """wn18rr""" +842 6 model """transe""" +842 6 loss """crossentropy""" +842 6 regularizer """no""" +842 6 optimizer """adam""" +842 6 training_loop """lcwa""" +842 6 evaluator """rankbased""" +842 7 dataset """wn18rr""" +842 7 model """transe""" +842 7 loss """crossentropy""" +842 7 regularizer """no""" +842 7 optimizer """adam""" +842 7 training_loop """lcwa""" +842 7 evaluator """rankbased""" +842 8 dataset """wn18rr""" +842 8 model """transe""" +842 8 loss """crossentropy""" +842 8 regularizer """no""" +842 8 optimizer """adam""" +842 8 training_loop """lcwa""" +842 8 evaluator """rankbased""" +842 9 dataset """wn18rr""" +842 9 model """transe""" +842 9 loss """crossentropy""" +842 9 regularizer """no""" +842 9 optimizer """adam""" +842 9 training_loop """lcwa""" +842 9 evaluator """rankbased""" +842 10 dataset """wn18rr""" +842 10 model """transe""" +842 10 loss """crossentropy""" +842 10 regularizer """no""" +842 10 optimizer """adam""" +842 10 training_loop """lcwa""" +842 10 evaluator """rankbased""" +842 11 dataset """wn18rr""" +842 11 model """transe""" +842 11 loss """crossentropy""" +842 11 regularizer """no""" +842 11 optimizer """adam""" +842 11 training_loop """lcwa""" +842 11 evaluator """rankbased""" +842 12 dataset """wn18rr""" +842 12 model """transe""" +842 12 loss """crossentropy""" +842 12 regularizer """no""" +842 12 optimizer """adam""" +842 12 training_loop """lcwa""" +842 12 evaluator """rankbased""" +842 13 dataset """wn18rr""" +842 13 model """transe""" +842 13 loss """crossentropy""" +842 13 regularizer """no""" +842 13 optimizer """adam""" +842 13 training_loop """lcwa""" +842 13 evaluator """rankbased""" +842 14 dataset """wn18rr""" +842 14 model """transe""" +842 14 loss """crossentropy""" +842 14 regularizer """no""" +842 14 optimizer """adam""" +842 14 training_loop """lcwa""" +842 14 evaluator """rankbased""" +842 15 dataset """wn18rr""" +842 15 model """transe""" +842 15 loss """crossentropy""" +842 15 regularizer """no""" +842 15 optimizer """adam""" +842 15 training_loop """lcwa""" +842 15 evaluator """rankbased""" +842 16 dataset """wn18rr""" +842 16 model """transe""" +842 16 loss """crossentropy""" +842 16 regularizer """no""" +842 16 optimizer """adam""" +842 16 training_loop """lcwa""" +842 16 evaluator """rankbased""" +842 17 dataset """wn18rr""" +842 17 model """transe""" +842 17 loss """crossentropy""" +842 17 regularizer """no""" +842 17 optimizer """adam""" +842 17 training_loop """lcwa""" +842 17 evaluator """rankbased""" +843 1 model.embedding_dim 1.0 +843 1 model.scoring_fct_norm 2.0 +843 1 optimizer.lr 0.02128435184686903 +843 1 training.batch_size 2.0 +843 1 training.label_smoothing 0.8448924713967877 +843 2 model.embedding_dim 2.0 +843 2 model.scoring_fct_norm 1.0 +843 2 optimizer.lr 0.001009964111974803 +843 2 training.batch_size 1.0 +843 2 training.label_smoothing 0.5214877947143315 +843 3 model.embedding_dim 1.0 +843 3 model.scoring_fct_norm 2.0 +843 3 optimizer.lr 0.003993875371271986 +843 3 training.batch_size 2.0 +843 3 training.label_smoothing 0.10069969341047096 +843 1 dataset """wn18rr""" +843 1 model """transe""" +843 1 loss """bceaftersigmoid""" +843 1 regularizer """no""" +843 1 optimizer """adam""" +843 1 training_loop """lcwa""" +843 1 evaluator """rankbased""" +843 2 dataset """wn18rr""" +843 2 model """transe""" +843 2 loss """bceaftersigmoid""" +843 2 regularizer """no""" +843 2 optimizer """adam""" +843 2 training_loop """lcwa""" +843 2 evaluator """rankbased""" +843 3 dataset """wn18rr""" +843 3 model """transe""" +843 3 loss """bceaftersigmoid""" +843 3 regularizer """no""" +843 3 optimizer """adam""" +843 3 training_loop """lcwa""" +843 3 evaluator """rankbased""" +844 1 model.embedding_dim 1.0 +844 1 model.scoring_fct_norm 1.0 +844 1 optimizer.lr 0.038157148546586285 +844 1 training.batch_size 0.0 +844 1 training.label_smoothing 0.008108600625366077 +844 2 model.embedding_dim 1.0 +844 2 model.scoring_fct_norm 1.0 +844 2 optimizer.lr 0.009191257784915313 +844 2 training.batch_size 0.0 +844 2 training.label_smoothing 0.0010376480345036811 +844 3 model.embedding_dim 2.0 +844 3 model.scoring_fct_norm 1.0 +844 3 optimizer.lr 0.03062768225223052 +844 3 training.batch_size 1.0 +844 3 training.label_smoothing 0.024148072877258217 +844 4 model.embedding_dim 2.0 +844 4 model.scoring_fct_norm 1.0 +844 4 optimizer.lr 0.02417666714848924 +844 4 training.batch_size 1.0 +844 4 training.label_smoothing 0.05405902338812602 +844 5 model.embedding_dim 1.0 +844 5 model.scoring_fct_norm 2.0 +844 5 optimizer.lr 0.017100886550842225 +844 5 training.batch_size 0.0 +844 5 training.label_smoothing 0.0016634943241294203 +844 6 model.embedding_dim 2.0 +844 6 model.scoring_fct_norm 1.0 +844 6 optimizer.lr 0.06104220952809516 +844 6 training.batch_size 2.0 +844 6 training.label_smoothing 0.011257887646762589 +844 1 dataset """wn18rr""" +844 1 model """transe""" +844 1 loss """softplus""" +844 1 regularizer """no""" +844 1 optimizer """adam""" +844 1 training_loop """lcwa""" +844 1 evaluator """rankbased""" +844 2 dataset """wn18rr""" +844 2 model """transe""" +844 2 loss """softplus""" +844 2 regularizer """no""" +844 2 optimizer """adam""" +844 2 training_loop """lcwa""" +844 2 evaluator """rankbased""" +844 3 dataset """wn18rr""" +844 3 model """transe""" +844 3 loss """softplus""" +844 3 regularizer """no""" +844 3 optimizer """adam""" +844 3 training_loop """lcwa""" +844 3 evaluator """rankbased""" +844 4 dataset """wn18rr""" +844 4 model """transe""" +844 4 loss """softplus""" +844 4 regularizer """no""" +844 4 optimizer """adam""" +844 4 training_loop """lcwa""" +844 4 evaluator """rankbased""" +844 5 dataset """wn18rr""" +844 5 model """transe""" +844 5 loss """softplus""" +844 5 regularizer """no""" +844 5 optimizer """adam""" +844 5 training_loop """lcwa""" +844 5 evaluator """rankbased""" +844 6 dataset """wn18rr""" +844 6 model """transe""" +844 6 loss """softplus""" +844 6 regularizer """no""" +844 6 optimizer """adam""" +844 6 training_loop """lcwa""" +844 6 evaluator """rankbased""" +845 1 model.embedding_dim 2.0 +845 1 model.scoring_fct_norm 2.0 +845 1 optimizer.lr 0.0021757168074703326 +845 1 training.batch_size 0.0 +845 1 training.label_smoothing 0.01289923272966402 +845 2 model.embedding_dim 1.0 +845 2 model.scoring_fct_norm 2.0 +845 2 optimizer.lr 0.0059109284926087274 +845 2 training.batch_size 0.0 +845 2 training.label_smoothing 0.5923689671624571 +845 3 model.embedding_dim 2.0 +845 3 model.scoring_fct_norm 2.0 +845 3 optimizer.lr 0.009101493797192618 +845 3 training.batch_size 1.0 +845 3 training.label_smoothing 0.3265871584586197 +845 4 model.embedding_dim 2.0 +845 4 model.scoring_fct_norm 2.0 +845 4 optimizer.lr 0.008478211425922701 +845 4 training.batch_size 0.0 +845 4 training.label_smoothing 0.025427991376945548 +845 5 model.embedding_dim 1.0 +845 5 model.scoring_fct_norm 2.0 +845 5 optimizer.lr 0.0017489121968184388 +845 5 training.batch_size 0.0 +845 5 training.label_smoothing 0.030118920559070454 +845 6 model.embedding_dim 2.0 +845 6 model.scoring_fct_norm 2.0 +845 6 optimizer.lr 0.09417394306620365 +845 6 training.batch_size 1.0 +845 6 training.label_smoothing 0.729522932157706 +845 1 dataset """wn18rr""" +845 1 model """transe""" +845 1 loss """bceaftersigmoid""" +845 1 regularizer """no""" +845 1 optimizer """adam""" +845 1 training_loop """lcwa""" +845 1 evaluator """rankbased""" +845 2 dataset """wn18rr""" +845 2 model """transe""" +845 2 loss """bceaftersigmoid""" +845 2 regularizer """no""" +845 2 optimizer """adam""" +845 2 training_loop """lcwa""" +845 2 evaluator """rankbased""" +845 3 dataset """wn18rr""" +845 3 model """transe""" +845 3 loss """bceaftersigmoid""" +845 3 regularizer """no""" +845 3 optimizer """adam""" +845 3 training_loop """lcwa""" +845 3 evaluator """rankbased""" +845 4 dataset """wn18rr""" +845 4 model """transe""" +845 4 loss """bceaftersigmoid""" +845 4 regularizer """no""" +845 4 optimizer """adam""" +845 4 training_loop """lcwa""" +845 4 evaluator """rankbased""" +845 5 dataset """wn18rr""" +845 5 model """transe""" +845 5 loss """bceaftersigmoid""" +845 5 regularizer """no""" +845 5 optimizer """adam""" +845 5 training_loop """lcwa""" +845 5 evaluator """rankbased""" +845 6 dataset """wn18rr""" +845 6 model """transe""" +845 6 loss """bceaftersigmoid""" +845 6 regularizer """no""" +845 6 optimizer """adam""" +845 6 training_loop """lcwa""" +845 6 evaluator """rankbased""" +846 1 model.embedding_dim 2.0 +846 1 model.scoring_fct_norm 1.0 +846 1 optimizer.lr 0.0011049153751436596 +846 1 training.batch_size 2.0 +846 1 training.label_smoothing 0.00200051768009458 +846 2 model.embedding_dim 0.0 +846 2 model.scoring_fct_norm 2.0 +846 2 optimizer.lr 0.025084135279279416 +846 2 training.batch_size 1.0 +846 2 training.label_smoothing 0.8222212334721111 +846 3 model.embedding_dim 1.0 +846 3 model.scoring_fct_norm 2.0 +846 3 optimizer.lr 0.006748169082058384 +846 3 training.batch_size 2.0 +846 3 training.label_smoothing 0.765991286528522 +846 4 model.embedding_dim 1.0 +846 4 model.scoring_fct_norm 1.0 +846 4 optimizer.lr 0.044764678441006636 +846 4 training.batch_size 2.0 +846 4 training.label_smoothing 0.0010108305011915497 +846 5 model.embedding_dim 1.0 +846 5 model.scoring_fct_norm 1.0 +846 5 optimizer.lr 0.03610538936562912 +846 5 training.batch_size 1.0 +846 5 training.label_smoothing 0.08395373134425405 +846 6 model.embedding_dim 2.0 +846 6 model.scoring_fct_norm 1.0 +846 6 optimizer.lr 0.01492394487730924 +846 6 training.batch_size 1.0 +846 6 training.label_smoothing 0.02041079284656488 +846 1 dataset """wn18rr""" +846 1 model """transe""" +846 1 loss """softplus""" +846 1 regularizer """no""" +846 1 optimizer """adam""" +846 1 training_loop """lcwa""" +846 1 evaluator """rankbased""" +846 2 dataset """wn18rr""" +846 2 model """transe""" +846 2 loss """softplus""" +846 2 regularizer """no""" +846 2 optimizer """adam""" +846 2 training_loop """lcwa""" +846 2 evaluator """rankbased""" +846 3 dataset """wn18rr""" +846 3 model """transe""" +846 3 loss """softplus""" +846 3 regularizer """no""" +846 3 optimizer """adam""" +846 3 training_loop """lcwa""" +846 3 evaluator """rankbased""" +846 4 dataset """wn18rr""" +846 4 model """transe""" +846 4 loss """softplus""" +846 4 regularizer """no""" +846 4 optimizer """adam""" +846 4 training_loop """lcwa""" +846 4 evaluator """rankbased""" +846 5 dataset """wn18rr""" +846 5 model """transe""" +846 5 loss """softplus""" +846 5 regularizer """no""" +846 5 optimizer """adam""" +846 5 training_loop """lcwa""" +846 5 evaluator """rankbased""" +846 6 dataset """wn18rr""" +846 6 model """transe""" +846 6 loss """softplus""" +846 6 regularizer """no""" +846 6 optimizer """adam""" +846 6 training_loop """lcwa""" +846 6 evaluator """rankbased""" +847 1 model.embedding_dim 1.0 +847 1 model.scoring_fct_norm 2.0 +847 1 loss.margin 24.52865122864364 +847 1 loss.adversarial_temperature 0.18290070958407384 +847 1 optimizer.lr 0.06696010652175527 +847 1 negative_sampler.num_negs_per_pos 37.0 +847 1 training.batch_size 3.0 +847 2 model.embedding_dim 0.0 +847 2 model.scoring_fct_norm 2.0 +847 2 loss.margin 22.634736565217114 +847 2 loss.adversarial_temperature 0.13313074510883147 +847 2 optimizer.lr 0.08373045484207539 +847 2 negative_sampler.num_negs_per_pos 16.0 +847 2 training.batch_size 0.0 +847 3 model.embedding_dim 2.0 +847 3 model.scoring_fct_norm 2.0 +847 3 loss.margin 17.446279957849434 +847 3 loss.adversarial_temperature 0.6295335420419511 +847 3 optimizer.lr 0.0016014515179016981 +847 3 negative_sampler.num_negs_per_pos 39.0 +847 3 training.batch_size 2.0 +847 4 model.embedding_dim 1.0 +847 4 model.scoring_fct_norm 1.0 +847 4 loss.margin 5.2380717598636 +847 4 loss.adversarial_temperature 0.4935640436313996 +847 4 optimizer.lr 0.0011910729561192658 +847 4 negative_sampler.num_negs_per_pos 21.0 +847 4 training.batch_size 2.0 +847 5 model.embedding_dim 1.0 +847 5 model.scoring_fct_norm 1.0 +847 5 loss.margin 12.102012157821836 +847 5 loss.adversarial_temperature 0.7943374407088291 +847 5 optimizer.lr 0.003386905578191566 +847 5 negative_sampler.num_negs_per_pos 45.0 +847 5 training.batch_size 2.0 +847 6 model.embedding_dim 2.0 +847 6 model.scoring_fct_norm 2.0 +847 6 loss.margin 24.25332002514181 +847 6 loss.adversarial_temperature 0.8589724007744037 +847 6 optimizer.lr 0.010555439060287842 +847 6 negative_sampler.num_negs_per_pos 2.0 +847 6 training.batch_size 1.0 +847 7 model.embedding_dim 1.0 +847 7 model.scoring_fct_norm 2.0 +847 7 loss.margin 7.231498159697658 +847 7 loss.adversarial_temperature 0.28160561978449594 +847 7 optimizer.lr 0.0020430794818359025 +847 7 negative_sampler.num_negs_per_pos 4.0 +847 7 training.batch_size 2.0 +847 8 model.embedding_dim 2.0 +847 8 model.scoring_fct_norm 2.0 +847 8 loss.margin 18.78679657173491 +847 8 loss.adversarial_temperature 0.7749968389996528 +847 8 optimizer.lr 0.01795863289698528 +847 8 negative_sampler.num_negs_per_pos 22.0 +847 8 training.batch_size 2.0 +847 9 model.embedding_dim 1.0 +847 9 model.scoring_fct_norm 2.0 +847 9 loss.margin 13.967421917933983 +847 9 loss.adversarial_temperature 0.9399622651261592 +847 9 optimizer.lr 0.001303471266305819 +847 9 negative_sampler.num_negs_per_pos 35.0 +847 9 training.batch_size 2.0 +847 10 model.embedding_dim 2.0 +847 10 model.scoring_fct_norm 2.0 +847 10 loss.margin 11.469559450296998 +847 10 loss.adversarial_temperature 0.9294737733895425 +847 10 optimizer.lr 0.04960553539278743 +847 10 negative_sampler.num_negs_per_pos 1.0 +847 10 training.batch_size 1.0 +847 11 model.embedding_dim 0.0 +847 11 model.scoring_fct_norm 1.0 +847 11 loss.margin 10.511376119941978 +847 11 loss.adversarial_temperature 0.3303455403426504 +847 11 optimizer.lr 0.0046335258051757935 +847 11 negative_sampler.num_negs_per_pos 25.0 +847 11 training.batch_size 1.0 +847 12 model.embedding_dim 0.0 +847 12 model.scoring_fct_norm 2.0 +847 12 loss.margin 16.538251656202213 +847 12 loss.adversarial_temperature 0.41794917105714147 +847 12 optimizer.lr 0.00408584919651442 +847 12 negative_sampler.num_negs_per_pos 44.0 +847 12 training.batch_size 1.0 +847 13 model.embedding_dim 0.0 +847 13 model.scoring_fct_norm 1.0 +847 13 loss.margin 8.65987581746387 +847 13 loss.adversarial_temperature 0.20704821934175552 +847 13 optimizer.lr 0.00976603832388368 +847 13 negative_sampler.num_negs_per_pos 41.0 +847 13 training.batch_size 2.0 +847 14 model.embedding_dim 2.0 +847 14 model.scoring_fct_norm 2.0 +847 14 loss.margin 16.538143017601733 +847 14 loss.adversarial_temperature 0.7305809247608424 +847 14 optimizer.lr 0.0924758019002496 +847 14 negative_sampler.num_negs_per_pos 42.0 +847 14 training.batch_size 3.0 +847 15 model.embedding_dim 1.0 +847 15 model.scoring_fct_norm 2.0 +847 15 loss.margin 11.231160796296466 +847 15 loss.adversarial_temperature 0.7999592744260289 +847 15 optimizer.lr 0.07586025474453187 +847 15 negative_sampler.num_negs_per_pos 2.0 +847 15 training.batch_size 0.0 +847 16 model.embedding_dim 2.0 +847 16 model.scoring_fct_norm 1.0 +847 16 loss.margin 23.847999922225537 +847 16 loss.adversarial_temperature 0.8136394029940052 +847 16 optimizer.lr 0.014117587470271102 +847 16 negative_sampler.num_negs_per_pos 29.0 +847 16 training.batch_size 3.0 +847 17 model.embedding_dim 2.0 +847 17 model.scoring_fct_norm 2.0 +847 17 loss.margin 2.0656166703376604 +847 17 loss.adversarial_temperature 0.2607319464813075 +847 17 optimizer.lr 0.004919701689156907 +847 17 negative_sampler.num_negs_per_pos 34.0 +847 17 training.batch_size 3.0 +847 18 model.embedding_dim 0.0 +847 18 model.scoring_fct_norm 2.0 +847 18 loss.margin 11.690284024139007 +847 18 loss.adversarial_temperature 0.7560491475310507 +847 18 optimizer.lr 0.0014806400920104035 +847 18 negative_sampler.num_negs_per_pos 44.0 +847 18 training.batch_size 1.0 +847 19 model.embedding_dim 2.0 +847 19 model.scoring_fct_norm 1.0 +847 19 loss.margin 18.119355813382207 +847 19 loss.adversarial_temperature 0.368277209667739 +847 19 optimizer.lr 0.0016285134086791284 +847 19 negative_sampler.num_negs_per_pos 10.0 +847 19 training.batch_size 1.0 +847 20 model.embedding_dim 0.0 +847 20 model.scoring_fct_norm 1.0 +847 20 loss.margin 18.633742286818816 +847 20 loss.adversarial_temperature 0.1827790222723439 +847 20 optimizer.lr 0.004515537679666708 +847 20 negative_sampler.num_negs_per_pos 13.0 +847 20 training.batch_size 1.0 +847 21 model.embedding_dim 1.0 +847 21 model.scoring_fct_norm 1.0 +847 21 loss.margin 12.929716622783971 +847 21 loss.adversarial_temperature 0.7024064049099087 +847 21 optimizer.lr 0.04330412067775219 +847 21 negative_sampler.num_negs_per_pos 19.0 +847 21 training.batch_size 1.0 +847 22 model.embedding_dim 0.0 +847 22 model.scoring_fct_norm 2.0 +847 22 loss.margin 14.479715392076876 +847 22 loss.adversarial_temperature 0.5899771442539623 +847 22 optimizer.lr 0.08711020432342383 +847 22 negative_sampler.num_negs_per_pos 34.0 +847 22 training.batch_size 2.0 +847 23 model.embedding_dim 2.0 +847 23 model.scoring_fct_norm 1.0 +847 23 loss.margin 23.631960702643365 +847 23 loss.adversarial_temperature 0.25169932531125283 +847 23 optimizer.lr 0.002754288791061264 +847 23 negative_sampler.num_negs_per_pos 24.0 +847 23 training.batch_size 0.0 +847 24 model.embedding_dim 0.0 +847 24 model.scoring_fct_norm 2.0 +847 24 loss.margin 17.58092167953912 +847 24 loss.adversarial_temperature 0.7281440959994975 +847 24 optimizer.lr 0.011156585467237135 +847 24 negative_sampler.num_negs_per_pos 24.0 +847 24 training.batch_size 0.0 +847 25 model.embedding_dim 1.0 +847 25 model.scoring_fct_norm 2.0 +847 25 loss.margin 10.593298050465679 +847 25 loss.adversarial_temperature 0.5473146985570375 +847 25 optimizer.lr 0.09567828702987209 +847 25 negative_sampler.num_negs_per_pos 10.0 +847 25 training.batch_size 2.0 +847 26 model.embedding_dim 0.0 +847 26 model.scoring_fct_norm 2.0 +847 26 loss.margin 7.977246968391842 +847 26 loss.adversarial_temperature 0.47373855981100077 +847 26 optimizer.lr 0.053188701325863556 +847 26 negative_sampler.num_negs_per_pos 5.0 +847 26 training.batch_size 1.0 +847 27 model.embedding_dim 0.0 +847 27 model.scoring_fct_norm 1.0 +847 27 loss.margin 22.99831435626088 +847 27 loss.adversarial_temperature 0.8887782850783887 +847 27 optimizer.lr 0.019309835856767085 +847 27 negative_sampler.num_negs_per_pos 13.0 +847 27 training.batch_size 2.0 +847 28 model.embedding_dim 2.0 +847 28 model.scoring_fct_norm 1.0 +847 28 loss.margin 15.183592168737652 +847 28 loss.adversarial_temperature 0.41897714878157366 +847 28 optimizer.lr 0.05425213252415542 +847 28 negative_sampler.num_negs_per_pos 29.0 +847 28 training.batch_size 3.0 +847 29 model.embedding_dim 1.0 +847 29 model.scoring_fct_norm 1.0 +847 29 loss.margin 20.69982572354754 +847 29 loss.adversarial_temperature 0.5669220678768356 +847 29 optimizer.lr 0.04129815038149376 +847 29 negative_sampler.num_negs_per_pos 34.0 +847 29 training.batch_size 1.0 +847 30 model.embedding_dim 2.0 +847 30 model.scoring_fct_norm 1.0 +847 30 loss.margin 14.66575934113755 +847 30 loss.adversarial_temperature 0.17821029340276218 +847 30 optimizer.lr 0.025681530960058683 +847 30 negative_sampler.num_negs_per_pos 30.0 +847 30 training.batch_size 1.0 +847 31 model.embedding_dim 0.0 +847 31 model.scoring_fct_norm 2.0 +847 31 loss.margin 15.55318280498088 +847 31 loss.adversarial_temperature 0.2191788775852057 +847 31 optimizer.lr 0.011020264467681186 +847 31 negative_sampler.num_negs_per_pos 45.0 +847 31 training.batch_size 3.0 +847 32 model.embedding_dim 1.0 +847 32 model.scoring_fct_norm 2.0 +847 32 loss.margin 16.11198608180259 +847 32 loss.adversarial_temperature 0.927083503529485 +847 32 optimizer.lr 0.04691465441209528 +847 32 negative_sampler.num_negs_per_pos 33.0 +847 32 training.batch_size 3.0 +847 33 model.embedding_dim 1.0 +847 33 model.scoring_fct_norm 2.0 +847 33 loss.margin 16.589974674182123 +847 33 loss.adversarial_temperature 0.2401916125403677 +847 33 optimizer.lr 0.0022338391178573645 +847 33 negative_sampler.num_negs_per_pos 8.0 +847 33 training.batch_size 3.0 +847 34 model.embedding_dim 2.0 +847 34 model.scoring_fct_norm 2.0 +847 34 loss.margin 22.160808411551137 +847 34 loss.adversarial_temperature 0.2967136999593849 +847 34 optimizer.lr 0.0010503724108696661 +847 34 negative_sampler.num_negs_per_pos 49.0 +847 34 training.batch_size 1.0 +847 35 model.embedding_dim 2.0 +847 35 model.scoring_fct_norm 2.0 +847 35 loss.margin 4.685860895886179 +847 35 loss.adversarial_temperature 0.11558851057114627 +847 35 optimizer.lr 0.018491170781218532 +847 35 negative_sampler.num_negs_per_pos 19.0 +847 35 training.batch_size 3.0 +847 36 model.embedding_dim 2.0 +847 36 model.scoring_fct_norm 1.0 +847 36 loss.margin 8.486211345187542 +847 36 loss.adversarial_temperature 0.8497202847928012 +847 36 optimizer.lr 0.0015636262927247032 +847 36 negative_sampler.num_negs_per_pos 41.0 +847 36 training.batch_size 2.0 +847 37 model.embedding_dim 0.0 +847 37 model.scoring_fct_norm 1.0 +847 37 loss.margin 27.410770792631748 +847 37 loss.adversarial_temperature 0.9137088775893524 +847 37 optimizer.lr 0.044842842044037434 +847 37 negative_sampler.num_negs_per_pos 5.0 +847 37 training.batch_size 3.0 +847 38 model.embedding_dim 0.0 +847 38 model.scoring_fct_norm 2.0 +847 38 loss.margin 19.356057479695053 +847 38 loss.adversarial_temperature 0.31585813981624444 +847 38 optimizer.lr 0.011463707640891684 +847 38 negative_sampler.num_negs_per_pos 29.0 +847 38 training.batch_size 3.0 +847 39 model.embedding_dim 1.0 +847 39 model.scoring_fct_norm 2.0 +847 39 loss.margin 17.78305004827311 +847 39 loss.adversarial_temperature 0.9440301741427236 +847 39 optimizer.lr 0.021469321593773034 +847 39 negative_sampler.num_negs_per_pos 46.0 +847 39 training.batch_size 1.0 +847 40 model.embedding_dim 1.0 +847 40 model.scoring_fct_norm 2.0 +847 40 loss.margin 9.997666565540882 +847 40 loss.adversarial_temperature 0.325264036081913 +847 40 optimizer.lr 0.0014712038354549783 +847 40 negative_sampler.num_negs_per_pos 47.0 +847 40 training.batch_size 1.0 +847 41 model.embedding_dim 1.0 +847 41 model.scoring_fct_norm 1.0 +847 41 loss.margin 16.43886832826692 +847 41 loss.adversarial_temperature 0.6384726320688418 +847 41 optimizer.lr 0.04024033745471832 +847 41 negative_sampler.num_negs_per_pos 32.0 +847 41 training.batch_size 3.0 +847 42 model.embedding_dim 1.0 +847 42 model.scoring_fct_norm 1.0 +847 42 loss.margin 10.093194378838476 +847 42 loss.adversarial_temperature 0.45046462094125284 +847 42 optimizer.lr 0.0015091371770896713 +847 42 negative_sampler.num_negs_per_pos 1.0 +847 42 training.batch_size 2.0 +847 43 model.embedding_dim 2.0 +847 43 model.scoring_fct_norm 1.0 +847 43 loss.margin 22.56669588887138 +847 43 loss.adversarial_temperature 0.3491045233761165 +847 43 optimizer.lr 0.01822638875648764 +847 43 negative_sampler.num_negs_per_pos 14.0 +847 43 training.batch_size 2.0 +847 44 model.embedding_dim 2.0 +847 44 model.scoring_fct_norm 1.0 +847 44 loss.margin 12.373638283615524 +847 44 loss.adversarial_temperature 0.4196640915481784 +847 44 optimizer.lr 0.0055997877761546164 +847 44 negative_sampler.num_negs_per_pos 30.0 +847 44 training.batch_size 2.0 +847 45 model.embedding_dim 1.0 +847 45 model.scoring_fct_norm 2.0 +847 45 loss.margin 12.106772663717084 +847 45 loss.adversarial_temperature 0.12755494107717905 +847 45 optimizer.lr 0.026157018233560266 +847 45 negative_sampler.num_negs_per_pos 21.0 +847 45 training.batch_size 0.0 +847 46 model.embedding_dim 2.0 +847 46 model.scoring_fct_norm 2.0 +847 46 loss.margin 3.465793423689601 +847 46 loss.adversarial_temperature 0.19328435527751014 +847 46 optimizer.lr 0.0025102119640703106 +847 46 negative_sampler.num_negs_per_pos 38.0 +847 46 training.batch_size 2.0 +847 47 model.embedding_dim 1.0 +847 47 model.scoring_fct_norm 1.0 +847 47 loss.margin 22.2742556875037 +847 47 loss.adversarial_temperature 0.5112349615377839 +847 47 optimizer.lr 0.01328996728224359 +847 47 negative_sampler.num_negs_per_pos 7.0 +847 47 training.batch_size 0.0 +847 1 dataset """yago310""" +847 1 model """transe""" +847 1 loss """nssa""" +847 1 regularizer """no""" +847 1 optimizer """adam""" +847 1 training_loop """owa""" +847 1 negative_sampler """basic""" +847 1 evaluator """rankbased""" +847 2 dataset """yago310""" +847 2 model """transe""" +847 2 loss """nssa""" +847 2 regularizer """no""" +847 2 optimizer """adam""" +847 2 training_loop """owa""" +847 2 negative_sampler """basic""" +847 2 evaluator """rankbased""" +847 3 dataset """yago310""" +847 3 model """transe""" +847 3 loss """nssa""" +847 3 regularizer """no""" +847 3 optimizer """adam""" +847 3 training_loop """owa""" +847 3 negative_sampler """basic""" +847 3 evaluator """rankbased""" +847 4 dataset """yago310""" +847 4 model """transe""" +847 4 loss """nssa""" +847 4 regularizer """no""" +847 4 optimizer """adam""" +847 4 training_loop """owa""" +847 4 negative_sampler """basic""" +847 4 evaluator """rankbased""" +847 5 dataset """yago310""" +847 5 model """transe""" +847 5 loss """nssa""" +847 5 regularizer """no""" +847 5 optimizer """adam""" +847 5 training_loop """owa""" +847 5 negative_sampler """basic""" +847 5 evaluator """rankbased""" +847 6 dataset """yago310""" +847 6 model """transe""" +847 6 loss """nssa""" +847 6 regularizer """no""" +847 6 optimizer """adam""" +847 6 training_loop """owa""" +847 6 negative_sampler """basic""" +847 6 evaluator """rankbased""" +847 7 dataset """yago310""" +847 7 model """transe""" +847 7 loss """nssa""" +847 7 regularizer """no""" +847 7 optimizer """adam""" +847 7 training_loop """owa""" +847 7 negative_sampler """basic""" +847 7 evaluator """rankbased""" +847 8 dataset """yago310""" +847 8 model """transe""" +847 8 loss """nssa""" +847 8 regularizer """no""" +847 8 optimizer """adam""" +847 8 training_loop """owa""" +847 8 negative_sampler """basic""" +847 8 evaluator """rankbased""" +847 9 dataset """yago310""" +847 9 model """transe""" +847 9 loss """nssa""" +847 9 regularizer """no""" +847 9 optimizer """adam""" +847 9 training_loop """owa""" +847 9 negative_sampler """basic""" +847 9 evaluator """rankbased""" +847 10 dataset """yago310""" +847 10 model """transe""" +847 10 loss """nssa""" +847 10 regularizer """no""" +847 10 optimizer """adam""" +847 10 training_loop """owa""" +847 10 negative_sampler """basic""" +847 10 evaluator """rankbased""" +847 11 dataset """yago310""" +847 11 model """transe""" +847 11 loss """nssa""" +847 11 regularizer """no""" +847 11 optimizer """adam""" +847 11 training_loop """owa""" +847 11 negative_sampler """basic""" +847 11 evaluator """rankbased""" +847 12 dataset """yago310""" +847 12 model """transe""" +847 12 loss """nssa""" +847 12 regularizer """no""" +847 12 optimizer """adam""" +847 12 training_loop """owa""" +847 12 negative_sampler """basic""" +847 12 evaluator """rankbased""" +847 13 dataset """yago310""" +847 13 model """transe""" +847 13 loss """nssa""" +847 13 regularizer """no""" +847 13 optimizer """adam""" +847 13 training_loop """owa""" +847 13 negative_sampler """basic""" +847 13 evaluator """rankbased""" +847 14 dataset """yago310""" +847 14 model """transe""" +847 14 loss """nssa""" +847 14 regularizer """no""" +847 14 optimizer """adam""" +847 14 training_loop """owa""" +847 14 negative_sampler """basic""" +847 14 evaluator """rankbased""" +847 15 dataset """yago310""" +847 15 model """transe""" +847 15 loss """nssa""" +847 15 regularizer """no""" +847 15 optimizer """adam""" +847 15 training_loop """owa""" +847 15 negative_sampler """basic""" +847 15 evaluator """rankbased""" +847 16 dataset """yago310""" +847 16 model """transe""" +847 16 loss """nssa""" +847 16 regularizer """no""" +847 16 optimizer """adam""" +847 16 training_loop """owa""" +847 16 negative_sampler """basic""" +847 16 evaluator """rankbased""" +847 17 dataset """yago310""" +847 17 model """transe""" +847 17 loss """nssa""" +847 17 regularizer """no""" +847 17 optimizer """adam""" +847 17 training_loop """owa""" +847 17 negative_sampler """basic""" +847 17 evaluator """rankbased""" +847 18 dataset """yago310""" +847 18 model """transe""" +847 18 loss """nssa""" +847 18 regularizer """no""" +847 18 optimizer """adam""" +847 18 training_loop """owa""" +847 18 negative_sampler """basic""" +847 18 evaluator """rankbased""" +847 19 dataset """yago310""" +847 19 model """transe""" +847 19 loss """nssa""" +847 19 regularizer """no""" +847 19 optimizer """adam""" +847 19 training_loop """owa""" +847 19 negative_sampler """basic""" +847 19 evaluator """rankbased""" +847 20 dataset """yago310""" +847 20 model """transe""" +847 20 loss """nssa""" +847 20 regularizer """no""" +847 20 optimizer """adam""" +847 20 training_loop """owa""" +847 20 negative_sampler """basic""" +847 20 evaluator """rankbased""" +847 21 dataset """yago310""" +847 21 model """transe""" +847 21 loss """nssa""" +847 21 regularizer """no""" +847 21 optimizer """adam""" +847 21 training_loop """owa""" +847 21 negative_sampler """basic""" +847 21 evaluator """rankbased""" +847 22 dataset """yago310""" +847 22 model """transe""" +847 22 loss """nssa""" +847 22 regularizer """no""" +847 22 optimizer """adam""" +847 22 training_loop """owa""" +847 22 negative_sampler """basic""" +847 22 evaluator """rankbased""" +847 23 dataset """yago310""" +847 23 model """transe""" +847 23 loss """nssa""" +847 23 regularizer """no""" +847 23 optimizer """adam""" +847 23 training_loop """owa""" +847 23 negative_sampler """basic""" +847 23 evaluator """rankbased""" +847 24 dataset """yago310""" +847 24 model """transe""" +847 24 loss """nssa""" +847 24 regularizer """no""" +847 24 optimizer """adam""" +847 24 training_loop """owa""" +847 24 negative_sampler """basic""" +847 24 evaluator """rankbased""" +847 25 dataset """yago310""" +847 25 model """transe""" +847 25 loss """nssa""" +847 25 regularizer """no""" +847 25 optimizer """adam""" +847 25 training_loop """owa""" +847 25 negative_sampler """basic""" +847 25 evaluator """rankbased""" +847 26 dataset """yago310""" +847 26 model """transe""" +847 26 loss """nssa""" +847 26 regularizer """no""" +847 26 optimizer """adam""" +847 26 training_loop """owa""" +847 26 negative_sampler """basic""" +847 26 evaluator """rankbased""" +847 27 dataset """yago310""" +847 27 model """transe""" +847 27 loss """nssa""" +847 27 regularizer """no""" +847 27 optimizer """adam""" +847 27 training_loop """owa""" +847 27 negative_sampler """basic""" +847 27 evaluator """rankbased""" +847 28 dataset """yago310""" +847 28 model """transe""" +847 28 loss """nssa""" +847 28 regularizer """no""" +847 28 optimizer """adam""" +847 28 training_loop """owa""" +847 28 negative_sampler """basic""" +847 28 evaluator """rankbased""" +847 29 dataset """yago310""" +847 29 model """transe""" +847 29 loss """nssa""" +847 29 regularizer """no""" +847 29 optimizer """adam""" +847 29 training_loop """owa""" +847 29 negative_sampler """basic""" +847 29 evaluator """rankbased""" +847 30 dataset """yago310""" +847 30 model """transe""" +847 30 loss """nssa""" +847 30 regularizer """no""" +847 30 optimizer """adam""" +847 30 training_loop """owa""" +847 30 negative_sampler """basic""" +847 30 evaluator """rankbased""" +847 31 dataset """yago310""" +847 31 model """transe""" +847 31 loss """nssa""" +847 31 regularizer """no""" +847 31 optimizer """adam""" +847 31 training_loop """owa""" +847 31 negative_sampler """basic""" +847 31 evaluator """rankbased""" +847 32 dataset """yago310""" +847 32 model """transe""" +847 32 loss """nssa""" +847 32 regularizer """no""" +847 32 optimizer """adam""" +847 32 training_loop """owa""" +847 32 negative_sampler """basic""" +847 32 evaluator """rankbased""" +847 33 dataset """yago310""" +847 33 model """transe""" +847 33 loss """nssa""" +847 33 regularizer """no""" +847 33 optimizer """adam""" +847 33 training_loop """owa""" +847 33 negative_sampler """basic""" +847 33 evaluator """rankbased""" +847 34 dataset """yago310""" +847 34 model """transe""" +847 34 loss """nssa""" +847 34 regularizer """no""" +847 34 optimizer """adam""" +847 34 training_loop """owa""" +847 34 negative_sampler """basic""" +847 34 evaluator """rankbased""" +847 35 dataset """yago310""" +847 35 model """transe""" +847 35 loss """nssa""" +847 35 regularizer """no""" +847 35 optimizer """adam""" +847 35 training_loop """owa""" +847 35 negative_sampler """basic""" +847 35 evaluator """rankbased""" +847 36 dataset """yago310""" +847 36 model """transe""" +847 36 loss """nssa""" +847 36 regularizer """no""" +847 36 optimizer """adam""" +847 36 training_loop """owa""" +847 36 negative_sampler """basic""" +847 36 evaluator """rankbased""" +847 37 dataset """yago310""" +847 37 model """transe""" +847 37 loss """nssa""" +847 37 regularizer """no""" +847 37 optimizer """adam""" +847 37 training_loop """owa""" +847 37 negative_sampler """basic""" +847 37 evaluator """rankbased""" +847 38 dataset """yago310""" +847 38 model """transe""" +847 38 loss """nssa""" +847 38 regularizer """no""" +847 38 optimizer """adam""" +847 38 training_loop """owa""" +847 38 negative_sampler """basic""" +847 38 evaluator """rankbased""" +847 39 dataset """yago310""" +847 39 model """transe""" +847 39 loss """nssa""" +847 39 regularizer """no""" +847 39 optimizer """adam""" +847 39 training_loop """owa""" +847 39 negative_sampler """basic""" +847 39 evaluator """rankbased""" +847 40 dataset """yago310""" +847 40 model """transe""" +847 40 loss """nssa""" +847 40 regularizer """no""" +847 40 optimizer """adam""" +847 40 training_loop """owa""" +847 40 negative_sampler """basic""" +847 40 evaluator """rankbased""" +847 41 dataset """yago310""" +847 41 model """transe""" +847 41 loss """nssa""" +847 41 regularizer """no""" +847 41 optimizer """adam""" +847 41 training_loop """owa""" +847 41 negative_sampler """basic""" +847 41 evaluator """rankbased""" +847 42 dataset """yago310""" +847 42 model """transe""" +847 42 loss """nssa""" +847 42 regularizer """no""" +847 42 optimizer """adam""" +847 42 training_loop """owa""" +847 42 negative_sampler """basic""" +847 42 evaluator """rankbased""" +847 43 dataset """yago310""" +847 43 model """transe""" +847 43 loss """nssa""" +847 43 regularizer """no""" +847 43 optimizer """adam""" +847 43 training_loop """owa""" +847 43 negative_sampler """basic""" +847 43 evaluator """rankbased""" +847 44 dataset """yago310""" +847 44 model """transe""" +847 44 loss """nssa""" +847 44 regularizer """no""" +847 44 optimizer """adam""" +847 44 training_loop """owa""" +847 44 negative_sampler """basic""" +847 44 evaluator """rankbased""" +847 45 dataset """yago310""" +847 45 model """transe""" +847 45 loss """nssa""" +847 45 regularizer """no""" +847 45 optimizer """adam""" +847 45 training_loop """owa""" +847 45 negative_sampler """basic""" +847 45 evaluator """rankbased""" +847 46 dataset """yago310""" +847 46 model """transe""" +847 46 loss """nssa""" +847 46 regularizer """no""" +847 46 optimizer """adam""" +847 46 training_loop """owa""" +847 46 negative_sampler """basic""" +847 46 evaluator """rankbased""" +847 47 dataset """yago310""" +847 47 model """transe""" +847 47 loss """nssa""" +847 47 regularizer """no""" +847 47 optimizer """adam""" +847 47 training_loop """owa""" +847 47 negative_sampler """basic""" +847 47 evaluator """rankbased""" +848 1 model.embedding_dim 0.0 +848 1 model.scoring_fct_norm 2.0 +848 1 loss.margin 22.08417384681835 +848 1 loss.adversarial_temperature 0.4884683001471124 +848 1 optimizer.lr 0.0019786444161504304 +848 1 negative_sampler.num_negs_per_pos 47.0 +848 1 training.batch_size 3.0 +848 2 model.embedding_dim 0.0 +848 2 model.scoring_fct_norm 2.0 +848 2 loss.margin 2.518620333627705 +848 2 loss.adversarial_temperature 0.11856221839881415 +848 2 optimizer.lr 0.002526939882267509 +848 2 negative_sampler.num_negs_per_pos 25.0 +848 2 training.batch_size 3.0 +848 3 model.embedding_dim 2.0 +848 3 model.scoring_fct_norm 2.0 +848 3 loss.margin 29.22900622065814 +848 3 loss.adversarial_temperature 0.866241169373752 +848 3 optimizer.lr 0.09899483672074103 +848 3 negative_sampler.num_negs_per_pos 38.0 +848 3 training.batch_size 3.0 +848 4 model.embedding_dim 1.0 +848 4 model.scoring_fct_norm 2.0 +848 4 loss.margin 29.826025503198313 +848 4 loss.adversarial_temperature 0.25452692371487606 +848 4 optimizer.lr 0.030081074582223463 +848 4 negative_sampler.num_negs_per_pos 21.0 +848 4 training.batch_size 3.0 +848 5 model.embedding_dim 1.0 +848 5 model.scoring_fct_norm 1.0 +848 5 loss.margin 12.130696312286736 +848 5 loss.adversarial_temperature 0.16673631631237634 +848 5 optimizer.lr 0.02446251603671612 +848 5 negative_sampler.num_negs_per_pos 42.0 +848 5 training.batch_size 1.0 +848 6 model.embedding_dim 2.0 +848 6 model.scoring_fct_norm 1.0 +848 6 loss.margin 3.541312686140174 +848 6 loss.adversarial_temperature 0.16087430082300574 +848 6 optimizer.lr 0.002496802148750733 +848 6 negative_sampler.num_negs_per_pos 23.0 +848 6 training.batch_size 2.0 +848 7 model.embedding_dim 1.0 +848 7 model.scoring_fct_norm 2.0 +848 7 loss.margin 17.285461959342385 +848 7 loss.adversarial_temperature 0.10341585891947463 +848 7 optimizer.lr 0.0018569774582037804 +848 7 negative_sampler.num_negs_per_pos 24.0 +848 7 training.batch_size 3.0 +848 8 model.embedding_dim 0.0 +848 8 model.scoring_fct_norm 2.0 +848 8 loss.margin 24.93487486893314 +848 8 loss.adversarial_temperature 0.5230112650353798 +848 8 optimizer.lr 0.01518495577762888 +848 8 negative_sampler.num_negs_per_pos 17.0 +848 8 training.batch_size 1.0 +848 9 model.embedding_dim 2.0 +848 9 model.scoring_fct_norm 1.0 +848 9 loss.margin 21.253189508975343 +848 9 loss.adversarial_temperature 0.8461069892205466 +848 9 optimizer.lr 0.014345200445884927 +848 9 negative_sampler.num_negs_per_pos 28.0 +848 9 training.batch_size 0.0 +848 10 model.embedding_dim 0.0 +848 10 model.scoring_fct_norm 1.0 +848 10 loss.margin 2.7787552733914707 +848 10 loss.adversarial_temperature 0.8804015181680555 +848 10 optimizer.lr 0.001240155995735905 +848 10 negative_sampler.num_negs_per_pos 23.0 +848 10 training.batch_size 2.0 +848 11 model.embedding_dim 2.0 +848 11 model.scoring_fct_norm 2.0 +848 11 loss.margin 13.297758603775163 +848 11 loss.adversarial_temperature 0.244832698335879 +848 11 optimizer.lr 0.0019808462643317684 +848 11 negative_sampler.num_negs_per_pos 9.0 +848 11 training.batch_size 0.0 +848 12 model.embedding_dim 0.0 +848 12 model.scoring_fct_norm 1.0 +848 12 loss.margin 7.95156989197471 +848 12 loss.adversarial_temperature 0.13293866827878897 +848 12 optimizer.lr 0.013446846956486928 +848 12 negative_sampler.num_negs_per_pos 21.0 +848 12 training.batch_size 3.0 +848 13 model.embedding_dim 2.0 +848 13 model.scoring_fct_norm 2.0 +848 13 loss.margin 11.321398249543016 +848 13 loss.adversarial_temperature 0.7286564889084249 +848 13 optimizer.lr 0.001128771443930853 +848 13 negative_sampler.num_negs_per_pos 46.0 +848 13 training.batch_size 0.0 +848 14 model.embedding_dim 0.0 +848 14 model.scoring_fct_norm 1.0 +848 14 loss.margin 6.664385343528773 +848 14 loss.adversarial_temperature 0.8379486152970113 +848 14 optimizer.lr 0.005000850729272613 +848 14 negative_sampler.num_negs_per_pos 2.0 +848 14 training.batch_size 1.0 +848 15 model.embedding_dim 1.0 +848 15 model.scoring_fct_norm 2.0 +848 15 loss.margin 22.713707561655553 +848 15 loss.adversarial_temperature 0.4923164623108601 +848 15 optimizer.lr 0.001563123792233284 +848 15 negative_sampler.num_negs_per_pos 5.0 +848 15 training.batch_size 2.0 +848 16 model.embedding_dim 1.0 +848 16 model.scoring_fct_norm 1.0 +848 16 loss.margin 6.1003474048108375 +848 16 loss.adversarial_temperature 0.4790897266405709 +848 16 optimizer.lr 0.0010348603968536735 +848 16 negative_sampler.num_negs_per_pos 3.0 +848 16 training.batch_size 3.0 +848 17 model.embedding_dim 0.0 +848 17 model.scoring_fct_norm 1.0 +848 17 loss.margin 28.661731100807465 +848 17 loss.adversarial_temperature 0.3869463391755581 +848 17 optimizer.lr 0.0011426482278346804 +848 17 negative_sampler.num_negs_per_pos 21.0 +848 17 training.batch_size 3.0 +848 18 model.embedding_dim 0.0 +848 18 model.scoring_fct_norm 1.0 +848 18 loss.margin 6.761834115986923 +848 18 loss.adversarial_temperature 0.7479042890961753 +848 18 optimizer.lr 0.023452808824809 +848 18 negative_sampler.num_negs_per_pos 12.0 +848 18 training.batch_size 3.0 +848 19 model.embedding_dim 0.0 +848 19 model.scoring_fct_norm 1.0 +848 19 loss.margin 2.6851602087434236 +848 19 loss.adversarial_temperature 0.11029898837427216 +848 19 optimizer.lr 0.012367959071431869 +848 19 negative_sampler.num_negs_per_pos 48.0 +848 19 training.batch_size 2.0 +848 20 model.embedding_dim 2.0 +848 20 model.scoring_fct_norm 1.0 +848 20 loss.margin 18.321485636266388 +848 20 loss.adversarial_temperature 0.3029138850387191 +848 20 optimizer.lr 0.02924500107996482 +848 20 negative_sampler.num_negs_per_pos 46.0 +848 20 training.batch_size 1.0 +848 21 model.embedding_dim 1.0 +848 21 model.scoring_fct_norm 2.0 +848 21 loss.margin 10.88998241756226 +848 21 loss.adversarial_temperature 0.6935233885075796 +848 21 optimizer.lr 0.039339097180842435 +848 21 negative_sampler.num_negs_per_pos 1.0 +848 21 training.batch_size 2.0 +848 22 model.embedding_dim 0.0 +848 22 model.scoring_fct_norm 2.0 +848 22 loss.margin 23.644460699291447 +848 22 loss.adversarial_temperature 0.5789777117517896 +848 22 optimizer.lr 0.02028210570251817 +848 22 negative_sampler.num_negs_per_pos 44.0 +848 22 training.batch_size 0.0 +848 23 model.embedding_dim 0.0 +848 23 model.scoring_fct_norm 1.0 +848 23 loss.margin 25.69375447835783 +848 23 loss.adversarial_temperature 0.5243063939962563 +848 23 optimizer.lr 0.025773018563362556 +848 23 negative_sampler.num_negs_per_pos 31.0 +848 23 training.batch_size 2.0 +848 24 model.embedding_dim 0.0 +848 24 model.scoring_fct_norm 2.0 +848 24 loss.margin 5.366190097092311 +848 24 loss.adversarial_temperature 0.41695858113910467 +848 24 optimizer.lr 0.0014485046768894021 +848 24 negative_sampler.num_negs_per_pos 44.0 +848 24 training.batch_size 1.0 +848 25 model.embedding_dim 1.0 +848 25 model.scoring_fct_norm 1.0 +848 25 loss.margin 2.16470118171263 +848 25 loss.adversarial_temperature 0.30816654891892103 +848 25 optimizer.lr 0.04817721895122817 +848 25 negative_sampler.num_negs_per_pos 9.0 +848 25 training.batch_size 2.0 +848 26 model.embedding_dim 2.0 +848 26 model.scoring_fct_norm 1.0 +848 26 loss.margin 13.520762784036863 +848 26 loss.adversarial_temperature 0.7988905227144505 +848 26 optimizer.lr 0.01088525510651169 +848 26 negative_sampler.num_negs_per_pos 41.0 +848 26 training.batch_size 2.0 +848 27 model.embedding_dim 1.0 +848 27 model.scoring_fct_norm 2.0 +848 27 loss.margin 23.867673563534293 +848 27 loss.adversarial_temperature 0.867308955375089 +848 27 optimizer.lr 0.08555053051184945 +848 27 negative_sampler.num_negs_per_pos 9.0 +848 27 training.batch_size 2.0 +848 28 model.embedding_dim 0.0 +848 28 model.scoring_fct_norm 1.0 +848 28 loss.margin 24.444840324885238 +848 28 loss.adversarial_temperature 0.5917362011252524 +848 28 optimizer.lr 0.002194040327573707 +848 28 negative_sampler.num_negs_per_pos 27.0 +848 28 training.batch_size 0.0 +848 29 model.embedding_dim 2.0 +848 29 model.scoring_fct_norm 1.0 +848 29 loss.margin 4.755737060717866 +848 29 loss.adversarial_temperature 0.6792186023728788 +848 29 optimizer.lr 0.006496872587684057 +848 29 negative_sampler.num_negs_per_pos 9.0 +848 29 training.batch_size 2.0 +848 30 model.embedding_dim 2.0 +848 30 model.scoring_fct_norm 1.0 +848 30 loss.margin 13.983038732117091 +848 30 loss.adversarial_temperature 0.6326689623199573 +848 30 optimizer.lr 0.008115755359880806 +848 30 negative_sampler.num_negs_per_pos 48.0 +848 30 training.batch_size 3.0 +848 31 model.embedding_dim 2.0 +848 31 model.scoring_fct_norm 1.0 +848 31 loss.margin 5.416577612430791 +848 31 loss.adversarial_temperature 0.8644056094119998 +848 31 optimizer.lr 0.009943603590569677 +848 31 negative_sampler.num_negs_per_pos 12.0 +848 31 training.batch_size 2.0 +848 32 model.embedding_dim 0.0 +848 32 model.scoring_fct_norm 2.0 +848 32 loss.margin 10.116337718485516 +848 32 loss.adversarial_temperature 0.7854669786112942 +848 32 optimizer.lr 0.010305863087127439 +848 32 negative_sampler.num_negs_per_pos 35.0 +848 32 training.batch_size 1.0 +848 33 model.embedding_dim 1.0 +848 33 model.scoring_fct_norm 2.0 +848 33 loss.margin 27.337830332415493 +848 33 loss.adversarial_temperature 0.2147584693768477 +848 33 optimizer.lr 0.003219299355491445 +848 33 negative_sampler.num_negs_per_pos 34.0 +848 33 training.batch_size 2.0 +848 34 model.embedding_dim 1.0 +848 34 model.scoring_fct_norm 2.0 +848 34 loss.margin 7.093600033735932 +848 34 loss.adversarial_temperature 0.5869434080647459 +848 34 optimizer.lr 0.0076659741895909305 +848 34 negative_sampler.num_negs_per_pos 0.0 +848 34 training.batch_size 2.0 +848 35 model.embedding_dim 0.0 +848 35 model.scoring_fct_norm 2.0 +848 35 loss.margin 10.1261159440982 +848 35 loss.adversarial_temperature 0.49018220829907566 +848 35 optimizer.lr 0.0030704494353853934 +848 35 negative_sampler.num_negs_per_pos 24.0 +848 35 training.batch_size 3.0 +848 36 model.embedding_dim 2.0 +848 36 model.scoring_fct_norm 2.0 +848 36 loss.margin 18.082781942169166 +848 36 loss.adversarial_temperature 0.9094841017349042 +848 36 optimizer.lr 0.02006082560684743 +848 36 negative_sampler.num_negs_per_pos 40.0 +848 36 training.batch_size 1.0 +848 37 model.embedding_dim 2.0 +848 37 model.scoring_fct_norm 2.0 +848 37 loss.margin 15.79020352958809 +848 37 loss.adversarial_temperature 0.4073241286048668 +848 37 optimizer.lr 0.026453258060012256 +848 37 negative_sampler.num_negs_per_pos 41.0 +848 37 training.batch_size 3.0 +848 38 model.embedding_dim 2.0 +848 38 model.scoring_fct_norm 1.0 +848 38 loss.margin 12.975601897265049 +848 38 loss.adversarial_temperature 0.609773837416561 +848 38 optimizer.lr 0.05570166876641638 +848 38 negative_sampler.num_negs_per_pos 43.0 +848 38 training.batch_size 1.0 +848 39 model.embedding_dim 2.0 +848 39 model.scoring_fct_norm 1.0 +848 39 loss.margin 23.37772819975459 +848 39 loss.adversarial_temperature 0.11319272578084243 +848 39 optimizer.lr 0.003420422548521496 +848 39 negative_sampler.num_negs_per_pos 29.0 +848 39 training.batch_size 0.0 +848 40 model.embedding_dim 1.0 +848 40 model.scoring_fct_norm 2.0 +848 40 loss.margin 2.584299626643962 +848 40 loss.adversarial_temperature 0.6888701346926395 +848 40 optimizer.lr 0.05049548581140349 +848 40 negative_sampler.num_negs_per_pos 33.0 +848 40 training.batch_size 1.0 +848 41 model.embedding_dim 2.0 +848 41 model.scoring_fct_norm 1.0 +848 41 loss.margin 9.62320184679427 +848 41 loss.adversarial_temperature 0.9376049306136007 +848 41 optimizer.lr 0.0010395731692791794 +848 41 negative_sampler.num_negs_per_pos 41.0 +848 41 training.batch_size 0.0 +848 42 model.embedding_dim 0.0 +848 42 model.scoring_fct_norm 2.0 +848 42 loss.margin 5.748030129651419 +848 42 loss.adversarial_temperature 0.704673079747032 +848 42 optimizer.lr 0.018474519459938898 +848 42 negative_sampler.num_negs_per_pos 34.0 +848 42 training.batch_size 3.0 +848 43 model.embedding_dim 2.0 +848 43 model.scoring_fct_norm 2.0 +848 43 loss.margin 3.523824864183207 +848 43 loss.adversarial_temperature 0.9829373610364348 +848 43 optimizer.lr 0.008260066050482108 +848 43 negative_sampler.num_negs_per_pos 39.0 +848 43 training.batch_size 2.0 +848 44 model.embedding_dim 0.0 +848 44 model.scoring_fct_norm 1.0 +848 44 loss.margin 29.096246537955615 +848 44 loss.adversarial_temperature 0.7918485839671656 +848 44 optimizer.lr 0.022922115065145562 +848 44 negative_sampler.num_negs_per_pos 32.0 +848 44 training.batch_size 0.0 +848 45 model.embedding_dim 1.0 +848 45 model.scoring_fct_norm 1.0 +848 45 loss.margin 9.870104058396805 +848 45 loss.adversarial_temperature 0.9600521814160036 +848 45 optimizer.lr 0.03547072280856838 +848 45 negative_sampler.num_negs_per_pos 48.0 +848 45 training.batch_size 1.0 +848 46 model.embedding_dim 2.0 +848 46 model.scoring_fct_norm 1.0 +848 46 loss.margin 28.62122303939792 +848 46 loss.adversarial_temperature 0.40066103612580306 +848 46 optimizer.lr 0.017908235639510897 +848 46 negative_sampler.num_negs_per_pos 13.0 +848 46 training.batch_size 1.0 +848 47 model.embedding_dim 0.0 +848 47 model.scoring_fct_norm 2.0 +848 47 loss.margin 16.472123736020492 +848 47 loss.adversarial_temperature 0.11810823486765626 +848 47 optimizer.lr 0.02305765543789608 +848 47 negative_sampler.num_negs_per_pos 13.0 +848 47 training.batch_size 2.0 +848 48 model.embedding_dim 1.0 +848 48 model.scoring_fct_norm 1.0 +848 48 loss.margin 25.025400994724297 +848 48 loss.adversarial_temperature 0.6838035607704056 +848 48 optimizer.lr 0.043337498624142254 +848 48 negative_sampler.num_negs_per_pos 21.0 +848 48 training.batch_size 0.0 +848 49 model.embedding_dim 2.0 +848 49 model.scoring_fct_norm 1.0 +848 49 loss.margin 13.249331972582228 +848 49 loss.adversarial_temperature 0.28626728468270807 +848 49 optimizer.lr 0.05102564535112728 +848 49 negative_sampler.num_negs_per_pos 26.0 +848 49 training.batch_size 0.0 +848 50 model.embedding_dim 1.0 +848 50 model.scoring_fct_norm 1.0 +848 50 loss.margin 22.30794218466 +848 50 loss.adversarial_temperature 0.1894683634149687 +848 50 optimizer.lr 0.002272942214713171 +848 50 negative_sampler.num_negs_per_pos 13.0 +848 50 training.batch_size 1.0 +848 51 model.embedding_dim 0.0 +848 51 model.scoring_fct_norm 1.0 +848 51 loss.margin 21.977785957242617 +848 51 loss.adversarial_temperature 0.5966778218215487 +848 51 optimizer.lr 0.07267804520674072 +848 51 negative_sampler.num_negs_per_pos 47.0 +848 51 training.batch_size 3.0 +848 52 model.embedding_dim 2.0 +848 52 model.scoring_fct_norm 1.0 +848 52 loss.margin 6.945622320186126 +848 52 loss.adversarial_temperature 0.37109862854216036 +848 52 optimizer.lr 0.004565642218353508 +848 52 negative_sampler.num_negs_per_pos 14.0 +848 52 training.batch_size 3.0 +848 53 model.embedding_dim 1.0 +848 53 model.scoring_fct_norm 1.0 +848 53 loss.margin 1.2250793585458744 +848 53 loss.adversarial_temperature 0.16231222743648022 +848 53 optimizer.lr 0.009141431152676302 +848 53 negative_sampler.num_negs_per_pos 47.0 +848 53 training.batch_size 3.0 +848 54 model.embedding_dim 1.0 +848 54 model.scoring_fct_norm 1.0 +848 54 loss.margin 15.08376554896251 +848 54 loss.adversarial_temperature 0.518009261571851 +848 54 optimizer.lr 0.08310995526685623 +848 54 negative_sampler.num_negs_per_pos 20.0 +848 54 training.batch_size 0.0 +848 55 model.embedding_dim 2.0 +848 55 model.scoring_fct_norm 1.0 +848 55 loss.margin 14.599301130117885 +848 55 loss.adversarial_temperature 0.7340461813018826 +848 55 optimizer.lr 0.001877922076319364 +848 55 negative_sampler.num_negs_per_pos 15.0 +848 55 training.batch_size 2.0 +848 56 model.embedding_dim 0.0 +848 56 model.scoring_fct_norm 1.0 +848 56 loss.margin 5.742363349743135 +848 56 loss.adversarial_temperature 0.6305293991417817 +848 56 optimizer.lr 0.008245663037899685 +848 56 negative_sampler.num_negs_per_pos 23.0 +848 56 training.batch_size 0.0 +848 57 model.embedding_dim 0.0 +848 57 model.scoring_fct_norm 1.0 +848 57 loss.margin 27.02764133406525 +848 57 loss.adversarial_temperature 0.3543578627962126 +848 57 optimizer.lr 0.010383329969651968 +848 57 negative_sampler.num_negs_per_pos 12.0 +848 57 training.batch_size 2.0 +848 58 model.embedding_dim 2.0 +848 58 model.scoring_fct_norm 2.0 +848 58 loss.margin 12.427842025809543 +848 58 loss.adversarial_temperature 0.24864431825047806 +848 58 optimizer.lr 0.04869733094697649 +848 58 negative_sampler.num_negs_per_pos 18.0 +848 58 training.batch_size 1.0 +848 59 model.embedding_dim 0.0 +848 59 model.scoring_fct_norm 1.0 +848 59 loss.margin 28.166493378315593 +848 59 loss.adversarial_temperature 0.34545626782844663 +848 59 optimizer.lr 0.04134285255877016 +848 59 negative_sampler.num_negs_per_pos 34.0 +848 59 training.batch_size 1.0 +848 60 model.embedding_dim 2.0 +848 60 model.scoring_fct_norm 1.0 +848 60 loss.margin 12.168577572113403 +848 60 loss.adversarial_temperature 0.8050093103149805 +848 60 optimizer.lr 0.0016264648506998714 +848 60 negative_sampler.num_negs_per_pos 15.0 +848 60 training.batch_size 2.0 +848 61 model.embedding_dim 2.0 +848 61 model.scoring_fct_norm 2.0 +848 61 loss.margin 7.284161098803681 +848 61 loss.adversarial_temperature 0.6649835429106045 +848 61 optimizer.lr 0.011332486120973075 +848 61 negative_sampler.num_negs_per_pos 35.0 +848 61 training.batch_size 2.0 +848 62 model.embedding_dim 2.0 +848 62 model.scoring_fct_norm 1.0 +848 62 loss.margin 21.77174020119001 +848 62 loss.adversarial_temperature 0.26231491442858546 +848 62 optimizer.lr 0.0058436032658075905 +848 62 negative_sampler.num_negs_per_pos 25.0 +848 62 training.batch_size 1.0 +848 63 model.embedding_dim 2.0 +848 63 model.scoring_fct_norm 2.0 +848 63 loss.margin 22.32785273116452 +848 63 loss.adversarial_temperature 0.9707175220101159 +848 63 optimizer.lr 0.004142843319094996 +848 63 negative_sampler.num_negs_per_pos 34.0 +848 63 training.batch_size 1.0 +848 64 model.embedding_dim 0.0 +848 64 model.scoring_fct_norm 2.0 +848 64 loss.margin 24.336830531044015 +848 64 loss.adversarial_temperature 0.8401596392825889 +848 64 optimizer.lr 0.09995016897120025 +848 64 negative_sampler.num_negs_per_pos 48.0 +848 64 training.batch_size 0.0 +848 65 model.embedding_dim 0.0 +848 65 model.scoring_fct_norm 2.0 +848 65 loss.margin 9.88651880006755 +848 65 loss.adversarial_temperature 0.2019261124464748 +848 65 optimizer.lr 0.007950455393464766 +848 65 negative_sampler.num_negs_per_pos 29.0 +848 65 training.batch_size 0.0 +848 66 model.embedding_dim 1.0 +848 66 model.scoring_fct_norm 1.0 +848 66 loss.margin 10.950569321281117 +848 66 loss.adversarial_temperature 0.9147899804097352 +848 66 optimizer.lr 0.05348925924452557 +848 66 negative_sampler.num_negs_per_pos 45.0 +848 66 training.batch_size 2.0 +848 67 model.embedding_dim 0.0 +848 67 model.scoring_fct_norm 2.0 +848 67 loss.margin 4.28145856530944 +848 67 loss.adversarial_temperature 0.13138391616982958 +848 67 optimizer.lr 0.0036730893617227004 +848 67 negative_sampler.num_negs_per_pos 28.0 +848 67 training.batch_size 2.0 +848 68 model.embedding_dim 1.0 +848 68 model.scoring_fct_norm 2.0 +848 68 loss.margin 20.24815888740113 +848 68 loss.adversarial_temperature 0.6939627065284732 +848 68 optimizer.lr 0.08676071698151318 +848 68 negative_sampler.num_negs_per_pos 7.0 +848 68 training.batch_size 2.0 +848 69 model.embedding_dim 0.0 +848 69 model.scoring_fct_norm 1.0 +848 69 loss.margin 6.746471833220704 +848 69 loss.adversarial_temperature 0.6673990829279565 +848 69 optimizer.lr 0.01802416440693935 +848 69 negative_sampler.num_negs_per_pos 26.0 +848 69 training.batch_size 3.0 +848 70 model.embedding_dim 2.0 +848 70 model.scoring_fct_norm 2.0 +848 70 loss.margin 4.128593621368742 +848 70 loss.adversarial_temperature 0.5501038196081227 +848 70 optimizer.lr 0.024712021234981583 +848 70 negative_sampler.num_negs_per_pos 39.0 +848 70 training.batch_size 3.0 +848 71 model.embedding_dim 0.0 +848 71 model.scoring_fct_norm 2.0 +848 71 loss.margin 29.60510272436485 +848 71 loss.adversarial_temperature 0.8796643735552326 +848 71 optimizer.lr 0.014879750439934048 +848 71 negative_sampler.num_negs_per_pos 30.0 +848 71 training.batch_size 0.0 +848 72 model.embedding_dim 1.0 +848 72 model.scoring_fct_norm 1.0 +848 72 loss.margin 10.298809939192093 +848 72 loss.adversarial_temperature 0.33886349079944755 +848 72 optimizer.lr 0.00748788770183728 +848 72 negative_sampler.num_negs_per_pos 40.0 +848 72 training.batch_size 0.0 +848 73 model.embedding_dim 2.0 +848 73 model.scoring_fct_norm 2.0 +848 73 loss.margin 14.569170266806305 +848 73 loss.adversarial_temperature 0.9355055350076134 +848 73 optimizer.lr 0.016740868651853476 +848 73 negative_sampler.num_negs_per_pos 43.0 +848 73 training.batch_size 0.0 +848 74 model.embedding_dim 0.0 +848 74 model.scoring_fct_norm 2.0 +848 74 loss.margin 20.126135820058074 +848 74 loss.adversarial_temperature 0.9562049587606627 +848 74 optimizer.lr 0.07242782106115972 +848 74 negative_sampler.num_negs_per_pos 37.0 +848 74 training.batch_size 0.0 +848 1 dataset """yago310""" +848 1 model """transe""" +848 1 loss """nssa""" +848 1 regularizer """no""" +848 1 optimizer """adam""" +848 1 training_loop """owa""" +848 1 negative_sampler """basic""" +848 1 evaluator """rankbased""" +848 2 dataset """yago310""" +848 2 model """transe""" +848 2 loss """nssa""" +848 2 regularizer """no""" +848 2 optimizer """adam""" +848 2 training_loop """owa""" +848 2 negative_sampler """basic""" +848 2 evaluator """rankbased""" +848 3 dataset """yago310""" +848 3 model """transe""" +848 3 loss """nssa""" +848 3 regularizer """no""" +848 3 optimizer """adam""" +848 3 training_loop """owa""" +848 3 negative_sampler """basic""" +848 3 evaluator """rankbased""" +848 4 dataset """yago310""" +848 4 model """transe""" +848 4 loss """nssa""" +848 4 regularizer """no""" +848 4 optimizer """adam""" +848 4 training_loop """owa""" +848 4 negative_sampler """basic""" +848 4 evaluator """rankbased""" +848 5 dataset """yago310""" +848 5 model """transe""" +848 5 loss """nssa""" +848 5 regularizer """no""" +848 5 optimizer """adam""" +848 5 training_loop """owa""" +848 5 negative_sampler """basic""" +848 5 evaluator """rankbased""" +848 6 dataset """yago310""" +848 6 model """transe""" +848 6 loss """nssa""" +848 6 regularizer """no""" +848 6 optimizer """adam""" +848 6 training_loop """owa""" +848 6 negative_sampler """basic""" +848 6 evaluator """rankbased""" +848 7 dataset """yago310""" +848 7 model """transe""" +848 7 loss """nssa""" +848 7 regularizer """no""" +848 7 optimizer """adam""" +848 7 training_loop """owa""" +848 7 negative_sampler """basic""" +848 7 evaluator """rankbased""" +848 8 dataset """yago310""" +848 8 model """transe""" +848 8 loss """nssa""" +848 8 regularizer """no""" +848 8 optimizer """adam""" +848 8 training_loop """owa""" +848 8 negative_sampler """basic""" +848 8 evaluator """rankbased""" +848 9 dataset """yago310""" +848 9 model """transe""" +848 9 loss """nssa""" +848 9 regularizer """no""" +848 9 optimizer """adam""" +848 9 training_loop """owa""" +848 9 negative_sampler """basic""" +848 9 evaluator """rankbased""" +848 10 dataset """yago310""" +848 10 model """transe""" +848 10 loss """nssa""" +848 10 regularizer """no""" +848 10 optimizer """adam""" +848 10 training_loop """owa""" +848 10 negative_sampler """basic""" +848 10 evaluator """rankbased""" +848 11 dataset """yago310""" +848 11 model """transe""" +848 11 loss """nssa""" +848 11 regularizer """no""" +848 11 optimizer """adam""" +848 11 training_loop """owa""" +848 11 negative_sampler """basic""" +848 11 evaluator """rankbased""" +848 12 dataset """yago310""" +848 12 model """transe""" +848 12 loss """nssa""" +848 12 regularizer """no""" +848 12 optimizer """adam""" +848 12 training_loop """owa""" +848 12 negative_sampler """basic""" +848 12 evaluator """rankbased""" +848 13 dataset """yago310""" +848 13 model """transe""" +848 13 loss """nssa""" +848 13 regularizer """no""" +848 13 optimizer """adam""" +848 13 training_loop """owa""" +848 13 negative_sampler """basic""" +848 13 evaluator """rankbased""" +848 14 dataset """yago310""" +848 14 model """transe""" +848 14 loss """nssa""" +848 14 regularizer """no""" +848 14 optimizer """adam""" +848 14 training_loop """owa""" +848 14 negative_sampler """basic""" +848 14 evaluator """rankbased""" +848 15 dataset """yago310""" +848 15 model """transe""" +848 15 loss """nssa""" +848 15 regularizer """no""" +848 15 optimizer """adam""" +848 15 training_loop """owa""" +848 15 negative_sampler """basic""" +848 15 evaluator """rankbased""" +848 16 dataset """yago310""" +848 16 model """transe""" +848 16 loss """nssa""" +848 16 regularizer """no""" +848 16 optimizer """adam""" +848 16 training_loop """owa""" +848 16 negative_sampler """basic""" +848 16 evaluator """rankbased""" +848 17 dataset """yago310""" +848 17 model """transe""" +848 17 loss """nssa""" +848 17 regularizer """no""" +848 17 optimizer """adam""" +848 17 training_loop """owa""" +848 17 negative_sampler """basic""" +848 17 evaluator """rankbased""" +848 18 dataset """yago310""" +848 18 model """transe""" +848 18 loss """nssa""" +848 18 regularizer """no""" +848 18 optimizer """adam""" +848 18 training_loop """owa""" +848 18 negative_sampler """basic""" +848 18 evaluator """rankbased""" +848 19 dataset """yago310""" +848 19 model """transe""" +848 19 loss """nssa""" +848 19 regularizer """no""" +848 19 optimizer """adam""" +848 19 training_loop """owa""" +848 19 negative_sampler """basic""" +848 19 evaluator """rankbased""" +848 20 dataset """yago310""" +848 20 model """transe""" +848 20 loss """nssa""" +848 20 regularizer """no""" +848 20 optimizer """adam""" +848 20 training_loop """owa""" +848 20 negative_sampler """basic""" +848 20 evaluator """rankbased""" +848 21 dataset """yago310""" +848 21 model """transe""" +848 21 loss """nssa""" +848 21 regularizer """no""" +848 21 optimizer """adam""" +848 21 training_loop """owa""" +848 21 negative_sampler """basic""" +848 21 evaluator """rankbased""" +848 22 dataset """yago310""" +848 22 model """transe""" +848 22 loss """nssa""" +848 22 regularizer """no""" +848 22 optimizer """adam""" +848 22 training_loop """owa""" +848 22 negative_sampler """basic""" +848 22 evaluator """rankbased""" +848 23 dataset """yago310""" +848 23 model """transe""" +848 23 loss """nssa""" +848 23 regularizer """no""" +848 23 optimizer """adam""" +848 23 training_loop """owa""" +848 23 negative_sampler """basic""" +848 23 evaluator """rankbased""" +848 24 dataset """yago310""" +848 24 model """transe""" +848 24 loss """nssa""" +848 24 regularizer """no""" +848 24 optimizer """adam""" +848 24 training_loop """owa""" +848 24 negative_sampler """basic""" +848 24 evaluator """rankbased""" +848 25 dataset """yago310""" +848 25 model """transe""" +848 25 loss """nssa""" +848 25 regularizer """no""" +848 25 optimizer """adam""" +848 25 training_loop """owa""" +848 25 negative_sampler """basic""" +848 25 evaluator """rankbased""" +848 26 dataset """yago310""" +848 26 model """transe""" +848 26 loss """nssa""" +848 26 regularizer """no""" +848 26 optimizer """adam""" +848 26 training_loop """owa""" +848 26 negative_sampler """basic""" +848 26 evaluator """rankbased""" +848 27 dataset """yago310""" +848 27 model """transe""" +848 27 loss """nssa""" +848 27 regularizer """no""" +848 27 optimizer """adam""" +848 27 training_loop """owa""" +848 27 negative_sampler """basic""" +848 27 evaluator """rankbased""" +848 28 dataset """yago310""" +848 28 model """transe""" +848 28 loss """nssa""" +848 28 regularizer """no""" +848 28 optimizer """adam""" +848 28 training_loop """owa""" +848 28 negative_sampler """basic""" +848 28 evaluator """rankbased""" +848 29 dataset """yago310""" +848 29 model """transe""" +848 29 loss """nssa""" +848 29 regularizer """no""" +848 29 optimizer """adam""" +848 29 training_loop """owa""" +848 29 negative_sampler """basic""" +848 29 evaluator """rankbased""" +848 30 dataset """yago310""" +848 30 model """transe""" +848 30 loss """nssa""" +848 30 regularizer """no""" +848 30 optimizer """adam""" +848 30 training_loop """owa""" +848 30 negative_sampler """basic""" +848 30 evaluator """rankbased""" +848 31 dataset """yago310""" +848 31 model """transe""" +848 31 loss """nssa""" +848 31 regularizer """no""" +848 31 optimizer """adam""" +848 31 training_loop """owa""" +848 31 negative_sampler """basic""" +848 31 evaluator """rankbased""" +848 32 dataset """yago310""" +848 32 model """transe""" +848 32 loss """nssa""" +848 32 regularizer """no""" +848 32 optimizer """adam""" +848 32 training_loop """owa""" +848 32 negative_sampler """basic""" +848 32 evaluator """rankbased""" +848 33 dataset """yago310""" +848 33 model """transe""" +848 33 loss """nssa""" +848 33 regularizer """no""" +848 33 optimizer """adam""" +848 33 training_loop """owa""" +848 33 negative_sampler """basic""" +848 33 evaluator """rankbased""" +848 34 dataset """yago310""" +848 34 model """transe""" +848 34 loss """nssa""" +848 34 regularizer """no""" +848 34 optimizer """adam""" +848 34 training_loop """owa""" +848 34 negative_sampler """basic""" +848 34 evaluator """rankbased""" +848 35 dataset """yago310""" +848 35 model """transe""" +848 35 loss """nssa""" +848 35 regularizer """no""" +848 35 optimizer """adam""" +848 35 training_loop """owa""" +848 35 negative_sampler """basic""" +848 35 evaluator """rankbased""" +848 36 dataset """yago310""" +848 36 model """transe""" +848 36 loss """nssa""" +848 36 regularizer """no""" +848 36 optimizer """adam""" +848 36 training_loop """owa""" +848 36 negative_sampler """basic""" +848 36 evaluator """rankbased""" +848 37 dataset """yago310""" +848 37 model """transe""" +848 37 loss """nssa""" +848 37 regularizer """no""" +848 37 optimizer """adam""" +848 37 training_loop """owa""" +848 37 negative_sampler """basic""" +848 37 evaluator """rankbased""" +848 38 dataset """yago310""" +848 38 model """transe""" +848 38 loss """nssa""" +848 38 regularizer """no""" +848 38 optimizer """adam""" +848 38 training_loop """owa""" +848 38 negative_sampler """basic""" +848 38 evaluator """rankbased""" +848 39 dataset """yago310""" +848 39 model """transe""" +848 39 loss """nssa""" +848 39 regularizer """no""" +848 39 optimizer """adam""" +848 39 training_loop """owa""" +848 39 negative_sampler """basic""" +848 39 evaluator """rankbased""" +848 40 dataset """yago310""" +848 40 model """transe""" +848 40 loss """nssa""" +848 40 regularizer """no""" +848 40 optimizer """adam""" +848 40 training_loop """owa""" +848 40 negative_sampler """basic""" +848 40 evaluator """rankbased""" +848 41 dataset """yago310""" +848 41 model """transe""" +848 41 loss """nssa""" +848 41 regularizer """no""" +848 41 optimizer """adam""" +848 41 training_loop """owa""" +848 41 negative_sampler """basic""" +848 41 evaluator """rankbased""" +848 42 dataset """yago310""" +848 42 model """transe""" +848 42 loss """nssa""" +848 42 regularizer """no""" +848 42 optimizer """adam""" +848 42 training_loop """owa""" +848 42 negative_sampler """basic""" +848 42 evaluator """rankbased""" +848 43 dataset """yago310""" +848 43 model """transe""" +848 43 loss """nssa""" +848 43 regularizer """no""" +848 43 optimizer """adam""" +848 43 training_loop """owa""" +848 43 negative_sampler """basic""" +848 43 evaluator """rankbased""" +848 44 dataset """yago310""" +848 44 model """transe""" +848 44 loss """nssa""" +848 44 regularizer """no""" +848 44 optimizer """adam""" +848 44 training_loop """owa""" +848 44 negative_sampler """basic""" +848 44 evaluator """rankbased""" +848 45 dataset """yago310""" +848 45 model """transe""" +848 45 loss """nssa""" +848 45 regularizer """no""" +848 45 optimizer """adam""" +848 45 training_loop """owa""" +848 45 negative_sampler """basic""" +848 45 evaluator """rankbased""" +848 46 dataset """yago310""" +848 46 model """transe""" +848 46 loss """nssa""" +848 46 regularizer """no""" +848 46 optimizer """adam""" +848 46 training_loop """owa""" +848 46 negative_sampler """basic""" +848 46 evaluator """rankbased""" +848 47 dataset """yago310""" +848 47 model """transe""" +848 47 loss """nssa""" +848 47 regularizer """no""" +848 47 optimizer """adam""" +848 47 training_loop """owa""" +848 47 negative_sampler """basic""" +848 47 evaluator """rankbased""" +848 48 dataset """yago310""" +848 48 model """transe""" +848 48 loss """nssa""" +848 48 regularizer """no""" +848 48 optimizer """adam""" +848 48 training_loop """owa""" +848 48 negative_sampler """basic""" +848 48 evaluator """rankbased""" +848 49 dataset """yago310""" +848 49 model """transe""" +848 49 loss """nssa""" +848 49 regularizer """no""" +848 49 optimizer """adam""" +848 49 training_loop """owa""" +848 49 negative_sampler """basic""" +848 49 evaluator """rankbased""" +848 50 dataset """yago310""" +848 50 model """transe""" +848 50 loss """nssa""" +848 50 regularizer """no""" +848 50 optimizer """adam""" +848 50 training_loop """owa""" +848 50 negative_sampler """basic""" +848 50 evaluator """rankbased""" +848 51 dataset """yago310""" +848 51 model """transe""" +848 51 loss """nssa""" +848 51 regularizer """no""" +848 51 optimizer """adam""" +848 51 training_loop """owa""" +848 51 negative_sampler """basic""" +848 51 evaluator """rankbased""" +848 52 dataset """yago310""" +848 52 model """transe""" +848 52 loss """nssa""" +848 52 regularizer """no""" +848 52 optimizer """adam""" +848 52 training_loop """owa""" +848 52 negative_sampler """basic""" +848 52 evaluator """rankbased""" +848 53 dataset """yago310""" +848 53 model """transe""" +848 53 loss """nssa""" +848 53 regularizer """no""" +848 53 optimizer """adam""" +848 53 training_loop """owa""" +848 53 negative_sampler """basic""" +848 53 evaluator """rankbased""" +848 54 dataset """yago310""" +848 54 model """transe""" +848 54 loss """nssa""" +848 54 regularizer """no""" +848 54 optimizer """adam""" +848 54 training_loop """owa""" +848 54 negative_sampler """basic""" +848 54 evaluator """rankbased""" +848 55 dataset """yago310""" +848 55 model """transe""" +848 55 loss """nssa""" +848 55 regularizer """no""" +848 55 optimizer """adam""" +848 55 training_loop """owa""" +848 55 negative_sampler """basic""" +848 55 evaluator """rankbased""" +848 56 dataset """yago310""" +848 56 model """transe""" +848 56 loss """nssa""" +848 56 regularizer """no""" +848 56 optimizer """adam""" +848 56 training_loop """owa""" +848 56 negative_sampler """basic""" +848 56 evaluator """rankbased""" +848 57 dataset """yago310""" +848 57 model """transe""" +848 57 loss """nssa""" +848 57 regularizer """no""" +848 57 optimizer """adam""" +848 57 training_loop """owa""" +848 57 negative_sampler """basic""" +848 57 evaluator """rankbased""" +848 58 dataset """yago310""" +848 58 model """transe""" +848 58 loss """nssa""" +848 58 regularizer """no""" +848 58 optimizer """adam""" +848 58 training_loop """owa""" +848 58 negative_sampler """basic""" +848 58 evaluator """rankbased""" +848 59 dataset """yago310""" +848 59 model """transe""" +848 59 loss """nssa""" +848 59 regularizer """no""" +848 59 optimizer """adam""" +848 59 training_loop """owa""" +848 59 negative_sampler """basic""" +848 59 evaluator """rankbased""" +848 60 dataset """yago310""" +848 60 model """transe""" +848 60 loss """nssa""" +848 60 regularizer """no""" +848 60 optimizer """adam""" +848 60 training_loop """owa""" +848 60 negative_sampler """basic""" +848 60 evaluator """rankbased""" +848 61 dataset """yago310""" +848 61 model """transe""" +848 61 loss """nssa""" +848 61 regularizer """no""" +848 61 optimizer """adam""" +848 61 training_loop """owa""" +848 61 negative_sampler """basic""" +848 61 evaluator """rankbased""" +848 62 dataset """yago310""" +848 62 model """transe""" +848 62 loss """nssa""" +848 62 regularizer """no""" +848 62 optimizer """adam""" +848 62 training_loop """owa""" +848 62 negative_sampler """basic""" +848 62 evaluator """rankbased""" +848 63 dataset """yago310""" +848 63 model """transe""" +848 63 loss """nssa""" +848 63 regularizer """no""" +848 63 optimizer """adam""" +848 63 training_loop """owa""" +848 63 negative_sampler """basic""" +848 63 evaluator """rankbased""" +848 64 dataset """yago310""" +848 64 model """transe""" +848 64 loss """nssa""" +848 64 regularizer """no""" +848 64 optimizer """adam""" +848 64 training_loop """owa""" +848 64 negative_sampler """basic""" +848 64 evaluator """rankbased""" +848 65 dataset """yago310""" +848 65 model """transe""" +848 65 loss """nssa""" +848 65 regularizer """no""" +848 65 optimizer """adam""" +848 65 training_loop """owa""" +848 65 negative_sampler """basic""" +848 65 evaluator """rankbased""" +848 66 dataset """yago310""" +848 66 model """transe""" +848 66 loss """nssa""" +848 66 regularizer """no""" +848 66 optimizer """adam""" +848 66 training_loop """owa""" +848 66 negative_sampler """basic""" +848 66 evaluator """rankbased""" +848 67 dataset """yago310""" +848 67 model """transe""" +848 67 loss """nssa""" +848 67 regularizer """no""" +848 67 optimizer """adam""" +848 67 training_loop """owa""" +848 67 negative_sampler """basic""" +848 67 evaluator """rankbased""" +848 68 dataset """yago310""" +848 68 model """transe""" +848 68 loss """nssa""" +848 68 regularizer """no""" +848 68 optimizer """adam""" +848 68 training_loop """owa""" +848 68 negative_sampler """basic""" +848 68 evaluator """rankbased""" +848 69 dataset """yago310""" +848 69 model """transe""" +848 69 loss """nssa""" +848 69 regularizer """no""" +848 69 optimizer """adam""" +848 69 training_loop """owa""" +848 69 negative_sampler """basic""" +848 69 evaluator """rankbased""" +848 70 dataset """yago310""" +848 70 model """transe""" +848 70 loss """nssa""" +848 70 regularizer """no""" +848 70 optimizer """adam""" +848 70 training_loop """owa""" +848 70 negative_sampler """basic""" +848 70 evaluator """rankbased""" +848 71 dataset """yago310""" +848 71 model """transe""" +848 71 loss """nssa""" +848 71 regularizer """no""" +848 71 optimizer """adam""" +848 71 training_loop """owa""" +848 71 negative_sampler """basic""" +848 71 evaluator """rankbased""" +848 72 dataset """yago310""" +848 72 model """transe""" +848 72 loss """nssa""" +848 72 regularizer """no""" +848 72 optimizer """adam""" +848 72 training_loop """owa""" +848 72 negative_sampler """basic""" +848 72 evaluator """rankbased""" +848 73 dataset """yago310""" +848 73 model """transe""" +848 73 loss """nssa""" +848 73 regularizer """no""" +848 73 optimizer """adam""" +848 73 training_loop """owa""" +848 73 negative_sampler """basic""" +848 73 evaluator """rankbased""" +848 74 dataset """yago310""" +848 74 model """transe""" +848 74 loss """nssa""" +848 74 regularizer """no""" +848 74 optimizer """adam""" +848 74 training_loop """owa""" +848 74 negative_sampler """basic""" +848 74 evaluator """rankbased""" +849 1 model.embedding_dim 2.0 +849 1 model.scoring_fct_norm 2.0 +849 1 loss.margin 2.657409556132747 +849 1 optimizer.lr 0.00601463642142232 +849 1 negative_sampler.num_negs_per_pos 39.0 +849 1 training.batch_size 2.0 +849 2 model.embedding_dim 0.0 +849 2 model.scoring_fct_norm 2.0 +849 2 loss.margin 8.984667535851415 +849 2 optimizer.lr 0.07432342186838704 +849 2 negative_sampler.num_negs_per_pos 40.0 +849 2 training.batch_size 1.0 +849 3 model.embedding_dim 0.0 +849 3 model.scoring_fct_norm 1.0 +849 3 loss.margin 6.463854846181602 +849 3 optimizer.lr 0.03735744706883088 +849 3 negative_sampler.num_negs_per_pos 38.0 +849 3 training.batch_size 0.0 +849 4 model.embedding_dim 1.0 +849 4 model.scoring_fct_norm 1.0 +849 4 loss.margin 2.6752081303324973 +849 4 optimizer.lr 0.027053554676045248 +849 4 negative_sampler.num_negs_per_pos 29.0 +849 4 training.batch_size 3.0 +849 5 model.embedding_dim 1.0 +849 5 model.scoring_fct_norm 2.0 +849 5 loss.margin 4.866400897801323 +849 5 optimizer.lr 0.0011541530677187776 +849 5 negative_sampler.num_negs_per_pos 2.0 +849 5 training.batch_size 0.0 +849 6 model.embedding_dim 0.0 +849 6 model.scoring_fct_norm 2.0 +849 6 loss.margin 0.7628622972007879 +849 6 optimizer.lr 0.0022041797547352254 +849 6 negative_sampler.num_negs_per_pos 28.0 +849 6 training.batch_size 2.0 +849 7 model.embedding_dim 0.0 +849 7 model.scoring_fct_norm 2.0 +849 7 loss.margin 8.054485818600021 +849 7 optimizer.lr 0.0021618879953821993 +849 7 negative_sampler.num_negs_per_pos 0.0 +849 7 training.batch_size 1.0 +849 8 model.embedding_dim 1.0 +849 8 model.scoring_fct_norm 1.0 +849 8 loss.margin 1.8759240333246245 +849 8 optimizer.lr 0.0013735776405160347 +849 8 negative_sampler.num_negs_per_pos 31.0 +849 8 training.batch_size 1.0 +849 9 model.embedding_dim 1.0 +849 9 model.scoring_fct_norm 2.0 +849 9 loss.margin 8.19980224789878 +849 9 optimizer.lr 0.011199772187239884 +849 9 negative_sampler.num_negs_per_pos 16.0 +849 9 training.batch_size 0.0 +849 10 model.embedding_dim 0.0 +849 10 model.scoring_fct_norm 2.0 +849 10 loss.margin 3.432188182915101 +849 10 optimizer.lr 0.07758178074068828 +849 10 negative_sampler.num_negs_per_pos 45.0 +849 10 training.batch_size 1.0 +849 11 model.embedding_dim 1.0 +849 11 model.scoring_fct_norm 1.0 +849 11 loss.margin 5.636409549522655 +849 11 optimizer.lr 0.002573768302514827 +849 11 negative_sampler.num_negs_per_pos 40.0 +849 11 training.batch_size 3.0 +849 12 model.embedding_dim 1.0 +849 12 model.scoring_fct_norm 2.0 +849 12 loss.margin 8.799310074214011 +849 12 optimizer.lr 0.0021030669987587023 +849 12 negative_sampler.num_negs_per_pos 36.0 +849 12 training.batch_size 2.0 +849 13 model.embedding_dim 1.0 +849 13 model.scoring_fct_norm 2.0 +849 13 loss.margin 9.761793491533062 +849 13 optimizer.lr 0.05575634714913751 +849 13 negative_sampler.num_negs_per_pos 25.0 +849 13 training.batch_size 3.0 +849 14 model.embedding_dim 2.0 +849 14 model.scoring_fct_norm 2.0 +849 14 loss.margin 8.642546195792756 +849 14 optimizer.lr 0.005177670728283681 +849 14 negative_sampler.num_negs_per_pos 1.0 +849 14 training.batch_size 1.0 +849 15 model.embedding_dim 1.0 +849 15 model.scoring_fct_norm 1.0 +849 15 loss.margin 0.9896833595141088 +849 15 optimizer.lr 0.03735849152479736 +849 15 negative_sampler.num_negs_per_pos 23.0 +849 15 training.batch_size 2.0 +849 16 model.embedding_dim 2.0 +849 16 model.scoring_fct_norm 2.0 +849 16 loss.margin 7.211456920163325 +849 16 optimizer.lr 0.0044588589023952276 +849 16 negative_sampler.num_negs_per_pos 20.0 +849 16 training.batch_size 2.0 +849 17 model.embedding_dim 1.0 +849 17 model.scoring_fct_norm 2.0 +849 17 loss.margin 5.226484289471025 +849 17 optimizer.lr 0.06412824331051231 +849 17 negative_sampler.num_negs_per_pos 6.0 +849 17 training.batch_size 2.0 +849 18 model.embedding_dim 0.0 +849 18 model.scoring_fct_norm 2.0 +849 18 loss.margin 9.27773972912267 +849 18 optimizer.lr 0.001321947174202 +849 18 negative_sampler.num_negs_per_pos 18.0 +849 18 training.batch_size 2.0 +849 19 model.embedding_dim 2.0 +849 19 model.scoring_fct_norm 1.0 +849 19 loss.margin 2.457003341824106 +849 19 optimizer.lr 0.0016921097920524055 +849 19 negative_sampler.num_negs_per_pos 29.0 +849 19 training.batch_size 0.0 +849 20 model.embedding_dim 2.0 +849 20 model.scoring_fct_norm 1.0 +849 20 loss.margin 8.92658138340966 +849 20 optimizer.lr 0.0013236665356882455 +849 20 negative_sampler.num_negs_per_pos 36.0 +849 20 training.batch_size 1.0 +849 21 model.embedding_dim 2.0 +849 21 model.scoring_fct_norm 2.0 +849 21 loss.margin 1.0874583837673701 +849 21 optimizer.lr 0.009453350157097121 +849 21 negative_sampler.num_negs_per_pos 48.0 +849 21 training.batch_size 3.0 +849 22 model.embedding_dim 0.0 +849 22 model.scoring_fct_norm 1.0 +849 22 loss.margin 2.637592012795342 +849 22 optimizer.lr 0.001025063196244436 +849 22 negative_sampler.num_negs_per_pos 4.0 +849 22 training.batch_size 2.0 +849 23 model.embedding_dim 1.0 +849 23 model.scoring_fct_norm 1.0 +849 23 loss.margin 8.480436284458257 +849 23 optimizer.lr 0.012144010273545452 +849 23 negative_sampler.num_negs_per_pos 25.0 +849 23 training.batch_size 1.0 +849 24 model.embedding_dim 0.0 +849 24 model.scoring_fct_norm 2.0 +849 24 loss.margin 7.133959725425864 +849 24 optimizer.lr 0.041167634499220826 +849 24 negative_sampler.num_negs_per_pos 2.0 +849 24 training.batch_size 3.0 +849 25 model.embedding_dim 0.0 +849 25 model.scoring_fct_norm 2.0 +849 25 loss.margin 5.375475886363463 +849 25 optimizer.lr 0.047052057470533246 +849 25 negative_sampler.num_negs_per_pos 40.0 +849 25 training.batch_size 3.0 +849 26 model.embedding_dim 0.0 +849 26 model.scoring_fct_norm 1.0 +849 26 loss.margin 3.3019232611100415 +849 26 optimizer.lr 0.0023476578735296535 +849 26 negative_sampler.num_negs_per_pos 19.0 +849 26 training.batch_size 2.0 +849 27 model.embedding_dim 1.0 +849 27 model.scoring_fct_norm 2.0 +849 27 loss.margin 8.063279346114637 +849 27 optimizer.lr 0.006201147344584732 +849 27 negative_sampler.num_negs_per_pos 9.0 +849 27 training.batch_size 2.0 +849 28 model.embedding_dim 2.0 +849 28 model.scoring_fct_norm 2.0 +849 28 loss.margin 8.361477735226813 +849 28 optimizer.lr 0.011743707398389704 +849 28 negative_sampler.num_negs_per_pos 26.0 +849 28 training.batch_size 1.0 +849 29 model.embedding_dim 2.0 +849 29 model.scoring_fct_norm 2.0 +849 29 loss.margin 5.702341762063136 +849 29 optimizer.lr 0.005890422799557918 +849 29 negative_sampler.num_negs_per_pos 7.0 +849 29 training.batch_size 2.0 +849 30 model.embedding_dim 2.0 +849 30 model.scoring_fct_norm 1.0 +849 30 loss.margin 5.367567202154639 +849 30 optimizer.lr 0.0217179267695009 +849 30 negative_sampler.num_negs_per_pos 31.0 +849 30 training.batch_size 3.0 +849 31 model.embedding_dim 2.0 +849 31 model.scoring_fct_norm 2.0 +849 31 loss.margin 0.9022044984105541 +849 31 optimizer.lr 0.0028413942272775554 +849 31 negative_sampler.num_negs_per_pos 25.0 +849 31 training.batch_size 0.0 +849 32 model.embedding_dim 1.0 +849 32 model.scoring_fct_norm 2.0 +849 32 loss.margin 4.870518874404683 +849 32 optimizer.lr 0.001839594339136914 +849 32 negative_sampler.num_negs_per_pos 9.0 +849 32 training.batch_size 1.0 +849 33 model.embedding_dim 0.0 +849 33 model.scoring_fct_norm 1.0 +849 33 loss.margin 1.1468859171221262 +849 33 optimizer.lr 0.0020532799862991854 +849 33 negative_sampler.num_negs_per_pos 34.0 +849 33 training.batch_size 1.0 +849 34 model.embedding_dim 1.0 +849 34 model.scoring_fct_norm 1.0 +849 34 loss.margin 9.420926851946895 +849 34 optimizer.lr 0.0015429020092462018 +849 34 negative_sampler.num_negs_per_pos 18.0 +849 34 training.batch_size 3.0 +849 35 model.embedding_dim 2.0 +849 35 model.scoring_fct_norm 1.0 +849 35 loss.margin 5.137528745107155 +849 35 optimizer.lr 0.0013180029595046313 +849 35 negative_sampler.num_negs_per_pos 40.0 +849 35 training.batch_size 2.0 +849 36 model.embedding_dim 0.0 +849 36 model.scoring_fct_norm 1.0 +849 36 loss.margin 1.0589322842332474 +849 36 optimizer.lr 0.0013354785553971904 +849 36 negative_sampler.num_negs_per_pos 47.0 +849 36 training.batch_size 0.0 +849 1 dataset """yago310""" +849 1 model """transe""" +849 1 loss """marginranking""" +849 1 regularizer """no""" +849 1 optimizer """adam""" +849 1 training_loop """owa""" +849 1 negative_sampler """basic""" +849 1 evaluator """rankbased""" +849 2 dataset """yago310""" +849 2 model """transe""" +849 2 loss """marginranking""" +849 2 regularizer """no""" +849 2 optimizer """adam""" +849 2 training_loop """owa""" +849 2 negative_sampler """basic""" +849 2 evaluator """rankbased""" +849 3 dataset """yago310""" +849 3 model """transe""" +849 3 loss """marginranking""" +849 3 regularizer """no""" +849 3 optimizer """adam""" +849 3 training_loop """owa""" +849 3 negative_sampler """basic""" +849 3 evaluator """rankbased""" +849 4 dataset """yago310""" +849 4 model """transe""" +849 4 loss """marginranking""" +849 4 regularizer """no""" +849 4 optimizer """adam""" +849 4 training_loop """owa""" +849 4 negative_sampler """basic""" +849 4 evaluator """rankbased""" +849 5 dataset """yago310""" +849 5 model """transe""" +849 5 loss """marginranking""" +849 5 regularizer """no""" +849 5 optimizer """adam""" +849 5 training_loop """owa""" +849 5 negative_sampler """basic""" +849 5 evaluator """rankbased""" +849 6 dataset """yago310""" +849 6 model """transe""" +849 6 loss """marginranking""" +849 6 regularizer """no""" +849 6 optimizer """adam""" +849 6 training_loop """owa""" +849 6 negative_sampler """basic""" +849 6 evaluator """rankbased""" +849 7 dataset """yago310""" +849 7 model """transe""" +849 7 loss """marginranking""" +849 7 regularizer """no""" +849 7 optimizer """adam""" +849 7 training_loop """owa""" +849 7 negative_sampler """basic""" +849 7 evaluator """rankbased""" +849 8 dataset """yago310""" +849 8 model """transe""" +849 8 loss """marginranking""" +849 8 regularizer """no""" +849 8 optimizer """adam""" +849 8 training_loop """owa""" +849 8 negative_sampler """basic""" +849 8 evaluator """rankbased""" +849 9 dataset """yago310""" +849 9 model """transe""" +849 9 loss """marginranking""" +849 9 regularizer """no""" +849 9 optimizer """adam""" +849 9 training_loop """owa""" +849 9 negative_sampler """basic""" +849 9 evaluator """rankbased""" +849 10 dataset """yago310""" +849 10 model """transe""" +849 10 loss """marginranking""" +849 10 regularizer """no""" +849 10 optimizer """adam""" +849 10 training_loop """owa""" +849 10 negative_sampler """basic""" +849 10 evaluator """rankbased""" +849 11 dataset """yago310""" +849 11 model """transe""" +849 11 loss """marginranking""" +849 11 regularizer """no""" +849 11 optimizer """adam""" +849 11 training_loop """owa""" +849 11 negative_sampler """basic""" +849 11 evaluator """rankbased""" +849 12 dataset """yago310""" +849 12 model """transe""" +849 12 loss """marginranking""" +849 12 regularizer """no""" +849 12 optimizer """adam""" +849 12 training_loop """owa""" +849 12 negative_sampler """basic""" +849 12 evaluator """rankbased""" +849 13 dataset """yago310""" +849 13 model """transe""" +849 13 loss """marginranking""" +849 13 regularizer """no""" +849 13 optimizer """adam""" +849 13 training_loop """owa""" +849 13 negative_sampler """basic""" +849 13 evaluator """rankbased""" +849 14 dataset """yago310""" +849 14 model """transe""" +849 14 loss """marginranking""" +849 14 regularizer """no""" +849 14 optimizer """adam""" +849 14 training_loop """owa""" +849 14 negative_sampler """basic""" +849 14 evaluator """rankbased""" +849 15 dataset """yago310""" +849 15 model """transe""" +849 15 loss """marginranking""" +849 15 regularizer """no""" +849 15 optimizer """adam""" +849 15 training_loop """owa""" +849 15 negative_sampler """basic""" +849 15 evaluator """rankbased""" +849 16 dataset """yago310""" +849 16 model """transe""" +849 16 loss """marginranking""" +849 16 regularizer """no""" +849 16 optimizer """adam""" +849 16 training_loop """owa""" +849 16 negative_sampler """basic""" +849 16 evaluator """rankbased""" +849 17 dataset """yago310""" +849 17 model """transe""" +849 17 loss """marginranking""" +849 17 regularizer """no""" +849 17 optimizer """adam""" +849 17 training_loop """owa""" +849 17 negative_sampler """basic""" +849 17 evaluator """rankbased""" +849 18 dataset """yago310""" +849 18 model """transe""" +849 18 loss """marginranking""" +849 18 regularizer """no""" +849 18 optimizer """adam""" +849 18 training_loop """owa""" +849 18 negative_sampler """basic""" +849 18 evaluator """rankbased""" +849 19 dataset """yago310""" +849 19 model """transe""" +849 19 loss """marginranking""" +849 19 regularizer """no""" +849 19 optimizer """adam""" +849 19 training_loop """owa""" +849 19 negative_sampler """basic""" +849 19 evaluator """rankbased""" +849 20 dataset """yago310""" +849 20 model """transe""" +849 20 loss """marginranking""" +849 20 regularizer """no""" +849 20 optimizer """adam""" +849 20 training_loop """owa""" +849 20 negative_sampler """basic""" +849 20 evaluator """rankbased""" +849 21 dataset """yago310""" +849 21 model """transe""" +849 21 loss """marginranking""" +849 21 regularizer """no""" +849 21 optimizer """adam""" +849 21 training_loop """owa""" +849 21 negative_sampler """basic""" +849 21 evaluator """rankbased""" +849 22 dataset """yago310""" +849 22 model """transe""" +849 22 loss """marginranking""" +849 22 regularizer """no""" +849 22 optimizer """adam""" +849 22 training_loop """owa""" +849 22 negative_sampler """basic""" +849 22 evaluator """rankbased""" +849 23 dataset """yago310""" +849 23 model """transe""" +849 23 loss """marginranking""" +849 23 regularizer """no""" +849 23 optimizer """adam""" +849 23 training_loop """owa""" +849 23 negative_sampler """basic""" +849 23 evaluator """rankbased""" +849 24 dataset """yago310""" +849 24 model """transe""" +849 24 loss """marginranking""" +849 24 regularizer """no""" +849 24 optimizer """adam""" +849 24 training_loop """owa""" +849 24 negative_sampler """basic""" +849 24 evaluator """rankbased""" +849 25 dataset """yago310""" +849 25 model """transe""" +849 25 loss """marginranking""" +849 25 regularizer """no""" +849 25 optimizer """adam""" +849 25 training_loop """owa""" +849 25 negative_sampler """basic""" +849 25 evaluator """rankbased""" +849 26 dataset """yago310""" +849 26 model """transe""" +849 26 loss """marginranking""" +849 26 regularizer """no""" +849 26 optimizer """adam""" +849 26 training_loop """owa""" +849 26 negative_sampler """basic""" +849 26 evaluator """rankbased""" +849 27 dataset """yago310""" +849 27 model """transe""" +849 27 loss """marginranking""" +849 27 regularizer """no""" +849 27 optimizer """adam""" +849 27 training_loop """owa""" +849 27 negative_sampler """basic""" +849 27 evaluator """rankbased""" +849 28 dataset """yago310""" +849 28 model """transe""" +849 28 loss """marginranking""" +849 28 regularizer """no""" +849 28 optimizer """adam""" +849 28 training_loop """owa""" +849 28 negative_sampler """basic""" +849 28 evaluator """rankbased""" +849 29 dataset """yago310""" +849 29 model """transe""" +849 29 loss """marginranking""" +849 29 regularizer """no""" +849 29 optimizer """adam""" +849 29 training_loop """owa""" +849 29 negative_sampler """basic""" +849 29 evaluator """rankbased""" +849 30 dataset """yago310""" +849 30 model """transe""" +849 30 loss """marginranking""" +849 30 regularizer """no""" +849 30 optimizer """adam""" +849 30 training_loop """owa""" +849 30 negative_sampler """basic""" +849 30 evaluator """rankbased""" +849 31 dataset """yago310""" +849 31 model """transe""" +849 31 loss """marginranking""" +849 31 regularizer """no""" +849 31 optimizer """adam""" +849 31 training_loop """owa""" +849 31 negative_sampler """basic""" +849 31 evaluator """rankbased""" +849 32 dataset """yago310""" +849 32 model """transe""" +849 32 loss """marginranking""" +849 32 regularizer """no""" +849 32 optimizer """adam""" +849 32 training_loop """owa""" +849 32 negative_sampler """basic""" +849 32 evaluator """rankbased""" +849 33 dataset """yago310""" +849 33 model """transe""" +849 33 loss """marginranking""" +849 33 regularizer """no""" +849 33 optimizer """adam""" +849 33 training_loop """owa""" +849 33 negative_sampler """basic""" +849 33 evaluator """rankbased""" +849 34 dataset """yago310""" +849 34 model """transe""" +849 34 loss """marginranking""" +849 34 regularizer """no""" +849 34 optimizer """adam""" +849 34 training_loop """owa""" +849 34 negative_sampler """basic""" +849 34 evaluator """rankbased""" +849 35 dataset """yago310""" +849 35 model """transe""" +849 35 loss """marginranking""" +849 35 regularizer """no""" +849 35 optimizer """adam""" +849 35 training_loop """owa""" +849 35 negative_sampler """basic""" +849 35 evaluator """rankbased""" +849 36 dataset """yago310""" +849 36 model """transe""" +849 36 loss """marginranking""" +849 36 regularizer """no""" +849 36 optimizer """adam""" +849 36 training_loop """owa""" +849 36 negative_sampler """basic""" +849 36 evaluator """rankbased""" +850 1 model.embedding_dim 2.0 +850 1 model.scoring_fct_norm 2.0 +850 1 loss.margin 4.743993532598544 +850 1 optimizer.lr 0.05547631440640861 +850 1 negative_sampler.num_negs_per_pos 37.0 +850 1 training.batch_size 3.0 +850 2 model.embedding_dim 2.0 +850 2 model.scoring_fct_norm 1.0 +850 2 loss.margin 5.964618990743501 +850 2 optimizer.lr 0.004867147091736996 +850 2 negative_sampler.num_negs_per_pos 43.0 +850 2 training.batch_size 3.0 +850 3 model.embedding_dim 0.0 +850 3 model.scoring_fct_norm 2.0 +850 3 loss.margin 2.788511228266535 +850 3 optimizer.lr 0.0067733605598269955 +850 3 negative_sampler.num_negs_per_pos 49.0 +850 3 training.batch_size 0.0 +850 4 model.embedding_dim 0.0 +850 4 model.scoring_fct_norm 1.0 +850 4 loss.margin 9.26198586534415 +850 4 optimizer.lr 0.002199381388674365 +850 4 negative_sampler.num_negs_per_pos 10.0 +850 4 training.batch_size 1.0 +850 5 model.embedding_dim 1.0 +850 5 model.scoring_fct_norm 2.0 +850 5 loss.margin 1.1890187657597913 +850 5 optimizer.lr 0.0020708710254994144 +850 5 negative_sampler.num_negs_per_pos 32.0 +850 5 training.batch_size 1.0 +850 6 model.embedding_dim 0.0 +850 6 model.scoring_fct_norm 2.0 +850 6 loss.margin 9.133849163927717 +850 6 optimizer.lr 0.0026484407207448893 +850 6 negative_sampler.num_negs_per_pos 10.0 +850 6 training.batch_size 2.0 +850 7 model.embedding_dim 1.0 +850 7 model.scoring_fct_norm 2.0 +850 7 loss.margin 1.4732450359927256 +850 7 optimizer.lr 0.0392519750359342 +850 7 negative_sampler.num_negs_per_pos 13.0 +850 7 training.batch_size 1.0 +850 8 model.embedding_dim 0.0 +850 8 model.scoring_fct_norm 1.0 +850 8 loss.margin 6.371087684561577 +850 8 optimizer.lr 0.02553415057635149 +850 8 negative_sampler.num_negs_per_pos 46.0 +850 8 training.batch_size 0.0 +850 9 model.embedding_dim 2.0 +850 9 model.scoring_fct_norm 2.0 +850 9 loss.margin 1.254027985545334 +850 9 optimizer.lr 0.0031399070933718328 +850 9 negative_sampler.num_negs_per_pos 19.0 +850 9 training.batch_size 3.0 +850 10 model.embedding_dim 2.0 +850 10 model.scoring_fct_norm 2.0 +850 10 loss.margin 5.811207960889409 +850 10 optimizer.lr 0.01428606555466465 +850 10 negative_sampler.num_negs_per_pos 25.0 +850 10 training.batch_size 2.0 +850 11 model.embedding_dim 0.0 +850 11 model.scoring_fct_norm 2.0 +850 11 loss.margin 6.053151093035583 +850 11 optimizer.lr 0.08095217643972538 +850 11 negative_sampler.num_negs_per_pos 39.0 +850 11 training.batch_size 3.0 +850 12 model.embedding_dim 0.0 +850 12 model.scoring_fct_norm 2.0 +850 12 loss.margin 1.8121817720714029 +850 12 optimizer.lr 0.001924340364241195 +850 12 negative_sampler.num_negs_per_pos 14.0 +850 12 training.batch_size 2.0 +850 13 model.embedding_dim 2.0 +850 13 model.scoring_fct_norm 2.0 +850 13 loss.margin 0.8089913694352658 +850 13 optimizer.lr 0.06973919553684454 +850 13 negative_sampler.num_negs_per_pos 37.0 +850 13 training.batch_size 1.0 +850 14 model.embedding_dim 1.0 +850 14 model.scoring_fct_norm 2.0 +850 14 loss.margin 0.645275314749764 +850 14 optimizer.lr 0.005770502959843387 +850 14 negative_sampler.num_negs_per_pos 41.0 +850 14 training.batch_size 3.0 +850 15 model.embedding_dim 2.0 +850 15 model.scoring_fct_norm 1.0 +850 15 loss.margin 2.003722909442347 +850 15 optimizer.lr 0.006609848268060631 +850 15 negative_sampler.num_negs_per_pos 13.0 +850 15 training.batch_size 1.0 +850 16 model.embedding_dim 2.0 +850 16 model.scoring_fct_norm 2.0 +850 16 loss.margin 9.86567216305461 +850 16 optimizer.lr 0.0011757151258664498 +850 16 negative_sampler.num_negs_per_pos 25.0 +850 16 training.batch_size 3.0 +850 17 model.embedding_dim 0.0 +850 17 model.scoring_fct_norm 1.0 +850 17 loss.margin 6.323438828846497 +850 17 optimizer.lr 0.0027936197748740935 +850 17 negative_sampler.num_negs_per_pos 3.0 +850 17 training.batch_size 1.0 +850 18 model.embedding_dim 2.0 +850 18 model.scoring_fct_norm 1.0 +850 18 loss.margin 2.4960767642299824 +850 18 optimizer.lr 0.0026650384421859777 +850 18 negative_sampler.num_negs_per_pos 4.0 +850 18 training.batch_size 1.0 +850 19 model.embedding_dim 2.0 +850 19 model.scoring_fct_norm 1.0 +850 19 loss.margin 9.867690958177553 +850 19 optimizer.lr 0.0012362485170614931 +850 19 negative_sampler.num_negs_per_pos 31.0 +850 19 training.batch_size 3.0 +850 20 model.embedding_dim 0.0 +850 20 model.scoring_fct_norm 2.0 +850 20 loss.margin 1.5411723548054137 +850 20 optimizer.lr 0.0013691566280725708 +850 20 negative_sampler.num_negs_per_pos 37.0 +850 20 training.batch_size 3.0 +850 21 model.embedding_dim 1.0 +850 21 model.scoring_fct_norm 1.0 +850 21 loss.margin 6.429771547247825 +850 21 optimizer.lr 0.005595280412754447 +850 21 negative_sampler.num_negs_per_pos 36.0 +850 21 training.batch_size 3.0 +850 22 model.embedding_dim 0.0 +850 22 model.scoring_fct_norm 2.0 +850 22 loss.margin 1.6160169807583913 +850 22 optimizer.lr 0.0671163574447868 +850 22 negative_sampler.num_negs_per_pos 12.0 +850 22 training.batch_size 3.0 +850 23 model.embedding_dim 1.0 +850 23 model.scoring_fct_norm 1.0 +850 23 loss.margin 1.237977394912861 +850 23 optimizer.lr 0.029624132496805948 +850 23 negative_sampler.num_negs_per_pos 34.0 +850 23 training.batch_size 0.0 +850 24 model.embedding_dim 2.0 +850 24 model.scoring_fct_norm 2.0 +850 24 loss.margin 5.624043409833944 +850 24 optimizer.lr 0.0099965335330807 +850 24 negative_sampler.num_negs_per_pos 32.0 +850 24 training.batch_size 3.0 +850 25 model.embedding_dim 1.0 +850 25 model.scoring_fct_norm 1.0 +850 25 loss.margin 5.0982735961515955 +850 25 optimizer.lr 0.003861150486464212 +850 25 negative_sampler.num_negs_per_pos 49.0 +850 25 training.batch_size 0.0 +850 26 model.embedding_dim 0.0 +850 26 model.scoring_fct_norm 1.0 +850 26 loss.margin 6.6437696964496045 +850 26 optimizer.lr 0.031268703321481864 +850 26 negative_sampler.num_negs_per_pos 30.0 +850 26 training.batch_size 0.0 +850 27 model.embedding_dim 0.0 +850 27 model.scoring_fct_norm 2.0 +850 27 loss.margin 7.905708879968246 +850 27 optimizer.lr 0.008040710261633064 +850 27 negative_sampler.num_negs_per_pos 35.0 +850 27 training.batch_size 0.0 +850 28 model.embedding_dim 2.0 +850 28 model.scoring_fct_norm 1.0 +850 28 loss.margin 2.232707735759999 +850 28 optimizer.lr 0.06999716043562977 +850 28 negative_sampler.num_negs_per_pos 31.0 +850 28 training.batch_size 3.0 +850 29 model.embedding_dim 2.0 +850 29 model.scoring_fct_norm 1.0 +850 29 loss.margin 2.5793752683131377 +850 29 optimizer.lr 0.001955130354076292 +850 29 negative_sampler.num_negs_per_pos 20.0 +850 29 training.batch_size 2.0 +850 30 model.embedding_dim 2.0 +850 30 model.scoring_fct_norm 1.0 +850 30 loss.margin 1.061329083541257 +850 30 optimizer.lr 0.04591537344198143 +850 30 negative_sampler.num_negs_per_pos 0.0 +850 30 training.batch_size 2.0 +850 31 model.embedding_dim 2.0 +850 31 model.scoring_fct_norm 1.0 +850 31 loss.margin 8.168663008334436 +850 31 optimizer.lr 0.06239651670357271 +850 31 negative_sampler.num_negs_per_pos 40.0 +850 31 training.batch_size 0.0 +850 32 model.embedding_dim 0.0 +850 32 model.scoring_fct_norm 2.0 +850 32 loss.margin 9.701788966629996 +850 32 optimizer.lr 0.015725808967355204 +850 32 negative_sampler.num_negs_per_pos 23.0 +850 32 training.batch_size 3.0 +850 33 model.embedding_dim 2.0 +850 33 model.scoring_fct_norm 1.0 +850 33 loss.margin 9.553001166956879 +850 33 optimizer.lr 0.003126799287733558 +850 33 negative_sampler.num_negs_per_pos 17.0 +850 33 training.batch_size 3.0 +850 34 model.embedding_dim 1.0 +850 34 model.scoring_fct_norm 1.0 +850 34 loss.margin 4.1211224249151694 +850 34 optimizer.lr 0.004162639734863767 +850 34 negative_sampler.num_negs_per_pos 46.0 +850 34 training.batch_size 0.0 +850 35 model.embedding_dim 0.0 +850 35 model.scoring_fct_norm 2.0 +850 35 loss.margin 3.054338123841906 +850 35 optimizer.lr 0.029835629054083213 +850 35 negative_sampler.num_negs_per_pos 3.0 +850 35 training.batch_size 0.0 +850 36 model.embedding_dim 0.0 +850 36 model.scoring_fct_norm 2.0 +850 36 loss.margin 6.434330437549923 +850 36 optimizer.lr 0.048645537420310955 +850 36 negative_sampler.num_negs_per_pos 11.0 +850 36 training.batch_size 1.0 +850 37 model.embedding_dim 1.0 +850 37 model.scoring_fct_norm 2.0 +850 37 loss.margin 4.87602964842792 +850 37 optimizer.lr 0.003074570059382606 +850 37 negative_sampler.num_negs_per_pos 15.0 +850 37 training.batch_size 0.0 +850 38 model.embedding_dim 0.0 +850 38 model.scoring_fct_norm 1.0 +850 38 loss.margin 3.512564696167769 +850 38 optimizer.lr 0.00956636350547086 +850 38 negative_sampler.num_negs_per_pos 29.0 +850 38 training.batch_size 1.0 +850 39 model.embedding_dim 0.0 +850 39 model.scoring_fct_norm 1.0 +850 39 loss.margin 9.213133317174778 +850 39 optimizer.lr 0.034455251305004195 +850 39 negative_sampler.num_negs_per_pos 36.0 +850 39 training.batch_size 1.0 +850 40 model.embedding_dim 2.0 +850 40 model.scoring_fct_norm 1.0 +850 40 loss.margin 2.4542688416615546 +850 40 optimizer.lr 0.01914607137331589 +850 40 negative_sampler.num_negs_per_pos 24.0 +850 40 training.batch_size 0.0 +850 41 model.embedding_dim 0.0 +850 41 model.scoring_fct_norm 1.0 +850 41 loss.margin 4.9308630759978485 +850 41 optimizer.lr 0.0025533600717228093 +850 41 negative_sampler.num_negs_per_pos 16.0 +850 41 training.batch_size 2.0 +850 42 model.embedding_dim 2.0 +850 42 model.scoring_fct_norm 2.0 +850 42 loss.margin 3.50048840794453 +850 42 optimizer.lr 0.005428476774163456 +850 42 negative_sampler.num_negs_per_pos 9.0 +850 42 training.batch_size 1.0 +850 43 model.embedding_dim 0.0 +850 43 model.scoring_fct_norm 2.0 +850 43 loss.margin 3.8798518171343765 +850 43 optimizer.lr 0.021901200504419915 +850 43 negative_sampler.num_negs_per_pos 5.0 +850 43 training.batch_size 0.0 +850 44 model.embedding_dim 0.0 +850 44 model.scoring_fct_norm 1.0 +850 44 loss.margin 0.7723065477216139 +850 44 optimizer.lr 0.039651054527357704 +850 44 negative_sampler.num_negs_per_pos 32.0 +850 44 training.batch_size 0.0 +850 45 model.embedding_dim 1.0 +850 45 model.scoring_fct_norm 2.0 +850 45 loss.margin 5.184664979065973 +850 45 optimizer.lr 0.002063423179114938 +850 45 negative_sampler.num_negs_per_pos 39.0 +850 45 training.batch_size 3.0 +850 46 model.embedding_dim 0.0 +850 46 model.scoring_fct_norm 1.0 +850 46 loss.margin 3.7399033441323417 +850 46 optimizer.lr 0.012673072296539327 +850 46 negative_sampler.num_negs_per_pos 30.0 +850 46 training.batch_size 0.0 +850 47 model.embedding_dim 0.0 +850 47 model.scoring_fct_norm 1.0 +850 47 loss.margin 7.136042668703851 +850 47 optimizer.lr 0.009261595747225437 +850 47 negative_sampler.num_negs_per_pos 12.0 +850 47 training.batch_size 2.0 +850 48 model.embedding_dim 2.0 +850 48 model.scoring_fct_norm 1.0 +850 48 loss.margin 6.297943836415446 +850 48 optimizer.lr 0.005140646412395006 +850 48 negative_sampler.num_negs_per_pos 29.0 +850 48 training.batch_size 0.0 +850 49 model.embedding_dim 0.0 +850 49 model.scoring_fct_norm 1.0 +850 49 loss.margin 7.114658293019653 +850 49 optimizer.lr 0.04432938416876318 +850 49 negative_sampler.num_negs_per_pos 44.0 +850 49 training.batch_size 3.0 +850 50 model.embedding_dim 1.0 +850 50 model.scoring_fct_norm 2.0 +850 50 loss.margin 2.2617420192845588 +850 50 optimizer.lr 0.0038968777328562167 +850 50 negative_sampler.num_negs_per_pos 30.0 +850 50 training.batch_size 3.0 +850 51 model.embedding_dim 0.0 +850 51 model.scoring_fct_norm 1.0 +850 51 loss.margin 9.988451469497459 +850 51 optimizer.lr 0.02860063122225744 +850 51 negative_sampler.num_negs_per_pos 6.0 +850 51 training.batch_size 0.0 +850 52 model.embedding_dim 0.0 +850 52 model.scoring_fct_norm 2.0 +850 52 loss.margin 6.217621076371108 +850 52 optimizer.lr 0.00112869218165766 +850 52 negative_sampler.num_negs_per_pos 41.0 +850 52 training.batch_size 0.0 +850 53 model.embedding_dim 0.0 +850 53 model.scoring_fct_norm 1.0 +850 53 loss.margin 6.423601185329048 +850 53 optimizer.lr 0.00438907385770874 +850 53 negative_sampler.num_negs_per_pos 5.0 +850 53 training.batch_size 3.0 +850 54 model.embedding_dim 0.0 +850 54 model.scoring_fct_norm 2.0 +850 54 loss.margin 2.8275209709034144 +850 54 optimizer.lr 0.08202974467092372 +850 54 negative_sampler.num_negs_per_pos 14.0 +850 54 training.batch_size 2.0 +850 55 model.embedding_dim 1.0 +850 55 model.scoring_fct_norm 1.0 +850 55 loss.margin 9.651281720913245 +850 55 optimizer.lr 0.0017592608331291214 +850 55 negative_sampler.num_negs_per_pos 6.0 +850 55 training.batch_size 3.0 +850 56 model.embedding_dim 0.0 +850 56 model.scoring_fct_norm 2.0 +850 56 loss.margin 7.368605798216084 +850 56 optimizer.lr 0.012486859466700667 +850 56 negative_sampler.num_negs_per_pos 41.0 +850 56 training.batch_size 2.0 +850 57 model.embedding_dim 0.0 +850 57 model.scoring_fct_norm 2.0 +850 57 loss.margin 3.14918769435472 +850 57 optimizer.lr 0.03742974107763145 +850 57 negative_sampler.num_negs_per_pos 16.0 +850 57 training.batch_size 2.0 +850 58 model.embedding_dim 2.0 +850 58 model.scoring_fct_norm 2.0 +850 58 loss.margin 4.968946740860971 +850 58 optimizer.lr 0.002952001163168956 +850 58 negative_sampler.num_negs_per_pos 31.0 +850 58 training.batch_size 1.0 +850 59 model.embedding_dim 2.0 +850 59 model.scoring_fct_norm 2.0 +850 59 loss.margin 0.9063405019204558 +850 59 optimizer.lr 0.06530559490304848 +850 59 negative_sampler.num_negs_per_pos 28.0 +850 59 training.batch_size 2.0 +850 60 model.embedding_dim 2.0 +850 60 model.scoring_fct_norm 1.0 +850 60 loss.margin 9.497007483309513 +850 60 optimizer.lr 0.0011112595395108703 +850 60 negative_sampler.num_negs_per_pos 43.0 +850 60 training.batch_size 3.0 +850 61 model.embedding_dim 2.0 +850 61 model.scoring_fct_norm 2.0 +850 61 loss.margin 3.1876652927541724 +850 61 optimizer.lr 0.013413549721566609 +850 61 negative_sampler.num_negs_per_pos 27.0 +850 61 training.batch_size 2.0 +850 62 model.embedding_dim 0.0 +850 62 model.scoring_fct_norm 2.0 +850 62 loss.margin 8.01128408856651 +850 62 optimizer.lr 0.015650231425316814 +850 62 negative_sampler.num_negs_per_pos 30.0 +850 62 training.batch_size 0.0 +850 63 model.embedding_dim 1.0 +850 63 model.scoring_fct_norm 2.0 +850 63 loss.margin 1.3029019378046895 +850 63 optimizer.lr 0.018596128205169888 +850 63 negative_sampler.num_negs_per_pos 33.0 +850 63 training.batch_size 1.0 +850 64 model.embedding_dim 1.0 +850 64 model.scoring_fct_norm 2.0 +850 64 loss.margin 8.767684179366801 +850 64 optimizer.lr 0.0013679806214555886 +850 64 negative_sampler.num_negs_per_pos 47.0 +850 64 training.batch_size 2.0 +850 65 model.embedding_dim 1.0 +850 65 model.scoring_fct_norm 1.0 +850 65 loss.margin 8.969505256290413 +850 65 optimizer.lr 0.007184398228870086 +850 65 negative_sampler.num_negs_per_pos 20.0 +850 65 training.batch_size 3.0 +850 66 model.embedding_dim 2.0 +850 66 model.scoring_fct_norm 1.0 +850 66 loss.margin 7.72240428650573 +850 66 optimizer.lr 0.0025490177933132274 +850 66 negative_sampler.num_negs_per_pos 16.0 +850 66 training.batch_size 2.0 +850 67 model.embedding_dim 2.0 +850 67 model.scoring_fct_norm 1.0 +850 67 loss.margin 4.450220471539315 +850 67 optimizer.lr 0.001923608445834174 +850 67 negative_sampler.num_negs_per_pos 3.0 +850 67 training.batch_size 3.0 +850 68 model.embedding_dim 2.0 +850 68 model.scoring_fct_norm 2.0 +850 68 loss.margin 5.081127304811217 +850 68 optimizer.lr 0.010803183573429995 +850 68 negative_sampler.num_negs_per_pos 1.0 +850 68 training.batch_size 2.0 +850 69 model.embedding_dim 1.0 +850 69 model.scoring_fct_norm 1.0 +850 69 loss.margin 4.9694652037914855 +850 69 optimizer.lr 0.011971732516131922 +850 69 negative_sampler.num_negs_per_pos 2.0 +850 69 training.batch_size 3.0 +850 70 model.embedding_dim 1.0 +850 70 model.scoring_fct_norm 1.0 +850 70 loss.margin 6.462478438960961 +850 70 optimizer.lr 0.0012777546390478436 +850 70 negative_sampler.num_negs_per_pos 9.0 +850 70 training.batch_size 0.0 +850 71 model.embedding_dim 0.0 +850 71 model.scoring_fct_norm 1.0 +850 71 loss.margin 1.9999796752759742 +850 71 optimizer.lr 0.024817093832856383 +850 71 negative_sampler.num_negs_per_pos 47.0 +850 71 training.batch_size 1.0 +850 72 model.embedding_dim 0.0 +850 72 model.scoring_fct_norm 2.0 +850 72 loss.margin 8.605848760130318 +850 72 optimizer.lr 0.008445909461849624 +850 72 negative_sampler.num_negs_per_pos 48.0 +850 72 training.batch_size 1.0 +850 73 model.embedding_dim 2.0 +850 73 model.scoring_fct_norm 1.0 +850 73 loss.margin 3.923908104934284 +850 73 optimizer.lr 0.02148556981777471 +850 73 negative_sampler.num_negs_per_pos 1.0 +850 73 training.batch_size 1.0 +850 1 dataset """yago310""" +850 1 model """transe""" +850 1 loss """marginranking""" +850 1 regularizer """no""" +850 1 optimizer """adam""" +850 1 training_loop """owa""" +850 1 negative_sampler """basic""" +850 1 evaluator """rankbased""" +850 2 dataset """yago310""" +850 2 model """transe""" +850 2 loss """marginranking""" +850 2 regularizer """no""" +850 2 optimizer """adam""" +850 2 training_loop """owa""" +850 2 negative_sampler """basic""" +850 2 evaluator """rankbased""" +850 3 dataset """yago310""" +850 3 model """transe""" +850 3 loss """marginranking""" +850 3 regularizer """no""" +850 3 optimizer """adam""" +850 3 training_loop """owa""" +850 3 negative_sampler """basic""" +850 3 evaluator """rankbased""" +850 4 dataset """yago310""" +850 4 model """transe""" +850 4 loss """marginranking""" +850 4 regularizer """no""" +850 4 optimizer """adam""" +850 4 training_loop """owa""" +850 4 negative_sampler """basic""" +850 4 evaluator """rankbased""" +850 5 dataset """yago310""" +850 5 model """transe""" +850 5 loss """marginranking""" +850 5 regularizer """no""" +850 5 optimizer """adam""" +850 5 training_loop """owa""" +850 5 negative_sampler """basic""" +850 5 evaluator """rankbased""" +850 6 dataset """yago310""" +850 6 model """transe""" +850 6 loss """marginranking""" +850 6 regularizer """no""" +850 6 optimizer """adam""" +850 6 training_loop """owa""" +850 6 negative_sampler """basic""" +850 6 evaluator """rankbased""" +850 7 dataset """yago310""" +850 7 model """transe""" +850 7 loss """marginranking""" +850 7 regularizer """no""" +850 7 optimizer """adam""" +850 7 training_loop """owa""" +850 7 negative_sampler """basic""" +850 7 evaluator """rankbased""" +850 8 dataset """yago310""" +850 8 model """transe""" +850 8 loss """marginranking""" +850 8 regularizer """no""" +850 8 optimizer """adam""" +850 8 training_loop """owa""" +850 8 negative_sampler """basic""" +850 8 evaluator """rankbased""" +850 9 dataset """yago310""" +850 9 model """transe""" +850 9 loss """marginranking""" +850 9 regularizer """no""" +850 9 optimizer """adam""" +850 9 training_loop """owa""" +850 9 negative_sampler """basic""" +850 9 evaluator """rankbased""" +850 10 dataset """yago310""" +850 10 model """transe""" +850 10 loss """marginranking""" +850 10 regularizer """no""" +850 10 optimizer """adam""" +850 10 training_loop """owa""" +850 10 negative_sampler """basic""" +850 10 evaluator """rankbased""" +850 11 dataset """yago310""" +850 11 model """transe""" +850 11 loss """marginranking""" +850 11 regularizer """no""" +850 11 optimizer """adam""" +850 11 training_loop """owa""" +850 11 negative_sampler """basic""" +850 11 evaluator """rankbased""" +850 12 dataset """yago310""" +850 12 model """transe""" +850 12 loss """marginranking""" +850 12 regularizer """no""" +850 12 optimizer """adam""" +850 12 training_loop """owa""" +850 12 negative_sampler """basic""" +850 12 evaluator """rankbased""" +850 13 dataset """yago310""" +850 13 model """transe""" +850 13 loss """marginranking""" +850 13 regularizer """no""" +850 13 optimizer """adam""" +850 13 training_loop """owa""" +850 13 negative_sampler """basic""" +850 13 evaluator """rankbased""" +850 14 dataset """yago310""" +850 14 model """transe""" +850 14 loss """marginranking""" +850 14 regularizer """no""" +850 14 optimizer """adam""" +850 14 training_loop """owa""" +850 14 negative_sampler """basic""" +850 14 evaluator """rankbased""" +850 15 dataset """yago310""" +850 15 model """transe""" +850 15 loss """marginranking""" +850 15 regularizer """no""" +850 15 optimizer """adam""" +850 15 training_loop """owa""" +850 15 negative_sampler """basic""" +850 15 evaluator """rankbased""" +850 16 dataset """yago310""" +850 16 model """transe""" +850 16 loss """marginranking""" +850 16 regularizer """no""" +850 16 optimizer """adam""" +850 16 training_loop """owa""" +850 16 negative_sampler """basic""" +850 16 evaluator """rankbased""" +850 17 dataset """yago310""" +850 17 model """transe""" +850 17 loss """marginranking""" +850 17 regularizer """no""" +850 17 optimizer """adam""" +850 17 training_loop """owa""" +850 17 negative_sampler """basic""" +850 17 evaluator """rankbased""" +850 18 dataset """yago310""" +850 18 model """transe""" +850 18 loss """marginranking""" +850 18 regularizer """no""" +850 18 optimizer """adam""" +850 18 training_loop """owa""" +850 18 negative_sampler """basic""" +850 18 evaluator """rankbased""" +850 19 dataset """yago310""" +850 19 model """transe""" +850 19 loss """marginranking""" +850 19 regularizer """no""" +850 19 optimizer """adam""" +850 19 training_loop """owa""" +850 19 negative_sampler """basic""" +850 19 evaluator """rankbased""" +850 20 dataset """yago310""" +850 20 model """transe""" +850 20 loss """marginranking""" +850 20 regularizer """no""" +850 20 optimizer """adam""" +850 20 training_loop """owa""" +850 20 negative_sampler """basic""" +850 20 evaluator """rankbased""" +850 21 dataset """yago310""" +850 21 model """transe""" +850 21 loss """marginranking""" +850 21 regularizer """no""" +850 21 optimizer """adam""" +850 21 training_loop """owa""" +850 21 negative_sampler """basic""" +850 21 evaluator """rankbased""" +850 22 dataset """yago310""" +850 22 model """transe""" +850 22 loss """marginranking""" +850 22 regularizer """no""" +850 22 optimizer """adam""" +850 22 training_loop """owa""" +850 22 negative_sampler """basic""" +850 22 evaluator """rankbased""" +850 23 dataset """yago310""" +850 23 model """transe""" +850 23 loss """marginranking""" +850 23 regularizer """no""" +850 23 optimizer """adam""" +850 23 training_loop """owa""" +850 23 negative_sampler """basic""" +850 23 evaluator """rankbased""" +850 24 dataset """yago310""" +850 24 model """transe""" +850 24 loss """marginranking""" +850 24 regularizer """no""" +850 24 optimizer """adam""" +850 24 training_loop """owa""" +850 24 negative_sampler """basic""" +850 24 evaluator """rankbased""" +850 25 dataset """yago310""" +850 25 model """transe""" +850 25 loss """marginranking""" +850 25 regularizer """no""" +850 25 optimizer """adam""" +850 25 training_loop """owa""" +850 25 negative_sampler """basic""" +850 25 evaluator """rankbased""" +850 26 dataset """yago310""" +850 26 model """transe""" +850 26 loss """marginranking""" +850 26 regularizer """no""" +850 26 optimizer """adam""" +850 26 training_loop """owa""" +850 26 negative_sampler """basic""" +850 26 evaluator """rankbased""" +850 27 dataset """yago310""" +850 27 model """transe""" +850 27 loss """marginranking""" +850 27 regularizer """no""" +850 27 optimizer """adam""" +850 27 training_loop """owa""" +850 27 negative_sampler """basic""" +850 27 evaluator """rankbased""" +850 28 dataset """yago310""" +850 28 model """transe""" +850 28 loss """marginranking""" +850 28 regularizer """no""" +850 28 optimizer """adam""" +850 28 training_loop """owa""" +850 28 negative_sampler """basic""" +850 28 evaluator """rankbased""" +850 29 dataset """yago310""" +850 29 model """transe""" +850 29 loss """marginranking""" +850 29 regularizer """no""" +850 29 optimizer """adam""" +850 29 training_loop """owa""" +850 29 negative_sampler """basic""" +850 29 evaluator """rankbased""" +850 30 dataset """yago310""" +850 30 model """transe""" +850 30 loss """marginranking""" +850 30 regularizer """no""" +850 30 optimizer """adam""" +850 30 training_loop """owa""" +850 30 negative_sampler """basic""" +850 30 evaluator """rankbased""" +850 31 dataset """yago310""" +850 31 model """transe""" +850 31 loss """marginranking""" +850 31 regularizer """no""" +850 31 optimizer """adam""" +850 31 training_loop """owa""" +850 31 negative_sampler """basic""" +850 31 evaluator """rankbased""" +850 32 dataset """yago310""" +850 32 model """transe""" +850 32 loss """marginranking""" +850 32 regularizer """no""" +850 32 optimizer """adam""" +850 32 training_loop """owa""" +850 32 negative_sampler """basic""" +850 32 evaluator """rankbased""" +850 33 dataset """yago310""" +850 33 model """transe""" +850 33 loss """marginranking""" +850 33 regularizer """no""" +850 33 optimizer """adam""" +850 33 training_loop """owa""" +850 33 negative_sampler """basic""" +850 33 evaluator """rankbased""" +850 34 dataset """yago310""" +850 34 model """transe""" +850 34 loss """marginranking""" +850 34 regularizer """no""" +850 34 optimizer """adam""" +850 34 training_loop """owa""" +850 34 negative_sampler """basic""" +850 34 evaluator """rankbased""" +850 35 dataset """yago310""" +850 35 model """transe""" +850 35 loss """marginranking""" +850 35 regularizer """no""" +850 35 optimizer """adam""" +850 35 training_loop """owa""" +850 35 negative_sampler """basic""" +850 35 evaluator """rankbased""" +850 36 dataset """yago310""" +850 36 model """transe""" +850 36 loss """marginranking""" +850 36 regularizer """no""" +850 36 optimizer """adam""" +850 36 training_loop """owa""" +850 36 negative_sampler """basic""" +850 36 evaluator """rankbased""" +850 37 dataset """yago310""" +850 37 model """transe""" +850 37 loss """marginranking""" +850 37 regularizer """no""" +850 37 optimizer """adam""" +850 37 training_loop """owa""" +850 37 negative_sampler """basic""" +850 37 evaluator """rankbased""" +850 38 dataset """yago310""" +850 38 model """transe""" +850 38 loss """marginranking""" +850 38 regularizer """no""" +850 38 optimizer """adam""" +850 38 training_loop """owa""" +850 38 negative_sampler """basic""" +850 38 evaluator """rankbased""" +850 39 dataset """yago310""" +850 39 model """transe""" +850 39 loss """marginranking""" +850 39 regularizer """no""" +850 39 optimizer """adam""" +850 39 training_loop """owa""" +850 39 negative_sampler """basic""" +850 39 evaluator """rankbased""" +850 40 dataset """yago310""" +850 40 model """transe""" +850 40 loss """marginranking""" +850 40 regularizer """no""" +850 40 optimizer """adam""" +850 40 training_loop """owa""" +850 40 negative_sampler """basic""" +850 40 evaluator """rankbased""" +850 41 dataset """yago310""" +850 41 model """transe""" +850 41 loss """marginranking""" +850 41 regularizer """no""" +850 41 optimizer """adam""" +850 41 training_loop """owa""" +850 41 negative_sampler """basic""" +850 41 evaluator """rankbased""" +850 42 dataset """yago310""" +850 42 model """transe""" +850 42 loss """marginranking""" +850 42 regularizer """no""" +850 42 optimizer """adam""" +850 42 training_loop """owa""" +850 42 negative_sampler """basic""" +850 42 evaluator """rankbased""" +850 43 dataset """yago310""" +850 43 model """transe""" +850 43 loss """marginranking""" +850 43 regularizer """no""" +850 43 optimizer """adam""" +850 43 training_loop """owa""" +850 43 negative_sampler """basic""" +850 43 evaluator """rankbased""" +850 44 dataset """yago310""" +850 44 model """transe""" +850 44 loss """marginranking""" +850 44 regularizer """no""" +850 44 optimizer """adam""" +850 44 training_loop """owa""" +850 44 negative_sampler """basic""" +850 44 evaluator """rankbased""" +850 45 dataset """yago310""" +850 45 model """transe""" +850 45 loss """marginranking""" +850 45 regularizer """no""" +850 45 optimizer """adam""" +850 45 training_loop """owa""" +850 45 negative_sampler """basic""" +850 45 evaluator """rankbased""" +850 46 dataset """yago310""" +850 46 model """transe""" +850 46 loss """marginranking""" +850 46 regularizer """no""" +850 46 optimizer """adam""" +850 46 training_loop """owa""" +850 46 negative_sampler """basic""" +850 46 evaluator """rankbased""" +850 47 dataset """yago310""" +850 47 model """transe""" +850 47 loss """marginranking""" +850 47 regularizer """no""" +850 47 optimizer """adam""" +850 47 training_loop """owa""" +850 47 negative_sampler """basic""" +850 47 evaluator """rankbased""" +850 48 dataset """yago310""" +850 48 model """transe""" +850 48 loss """marginranking""" +850 48 regularizer """no""" +850 48 optimizer """adam""" +850 48 training_loop """owa""" +850 48 negative_sampler """basic""" +850 48 evaluator """rankbased""" +850 49 dataset """yago310""" +850 49 model """transe""" +850 49 loss """marginranking""" +850 49 regularizer """no""" +850 49 optimizer """adam""" +850 49 training_loop """owa""" +850 49 negative_sampler """basic""" +850 49 evaluator """rankbased""" +850 50 dataset """yago310""" +850 50 model """transe""" +850 50 loss """marginranking""" +850 50 regularizer """no""" +850 50 optimizer """adam""" +850 50 training_loop """owa""" +850 50 negative_sampler """basic""" +850 50 evaluator """rankbased""" +850 51 dataset """yago310""" +850 51 model """transe""" +850 51 loss """marginranking""" +850 51 regularizer """no""" +850 51 optimizer """adam""" +850 51 training_loop """owa""" +850 51 negative_sampler """basic""" +850 51 evaluator """rankbased""" +850 52 dataset """yago310""" +850 52 model """transe""" +850 52 loss """marginranking""" +850 52 regularizer """no""" +850 52 optimizer """adam""" +850 52 training_loop """owa""" +850 52 negative_sampler """basic""" +850 52 evaluator """rankbased""" +850 53 dataset """yago310""" +850 53 model """transe""" +850 53 loss """marginranking""" +850 53 regularizer """no""" +850 53 optimizer """adam""" +850 53 training_loop """owa""" +850 53 negative_sampler """basic""" +850 53 evaluator """rankbased""" +850 54 dataset """yago310""" +850 54 model """transe""" +850 54 loss """marginranking""" +850 54 regularizer """no""" +850 54 optimizer """adam""" +850 54 training_loop """owa""" +850 54 negative_sampler """basic""" +850 54 evaluator """rankbased""" +850 55 dataset """yago310""" +850 55 model """transe""" +850 55 loss """marginranking""" +850 55 regularizer """no""" +850 55 optimizer """adam""" +850 55 training_loop """owa""" +850 55 negative_sampler """basic""" +850 55 evaluator """rankbased""" +850 56 dataset """yago310""" +850 56 model """transe""" +850 56 loss """marginranking""" +850 56 regularizer """no""" +850 56 optimizer """adam""" +850 56 training_loop """owa""" +850 56 negative_sampler """basic""" +850 56 evaluator """rankbased""" +850 57 dataset """yago310""" +850 57 model """transe""" +850 57 loss """marginranking""" +850 57 regularizer """no""" +850 57 optimizer """adam""" +850 57 training_loop """owa""" +850 57 negative_sampler """basic""" +850 57 evaluator """rankbased""" +850 58 dataset """yago310""" +850 58 model """transe""" +850 58 loss """marginranking""" +850 58 regularizer """no""" +850 58 optimizer """adam""" +850 58 training_loop """owa""" +850 58 negative_sampler """basic""" +850 58 evaluator """rankbased""" +850 59 dataset """yago310""" +850 59 model """transe""" +850 59 loss """marginranking""" +850 59 regularizer """no""" +850 59 optimizer """adam""" +850 59 training_loop """owa""" +850 59 negative_sampler """basic""" +850 59 evaluator """rankbased""" +850 60 dataset """yago310""" +850 60 model """transe""" +850 60 loss """marginranking""" +850 60 regularizer """no""" +850 60 optimizer """adam""" +850 60 training_loop """owa""" +850 60 negative_sampler """basic""" +850 60 evaluator """rankbased""" +850 61 dataset """yago310""" +850 61 model """transe""" +850 61 loss """marginranking""" +850 61 regularizer """no""" +850 61 optimizer """adam""" +850 61 training_loop """owa""" +850 61 negative_sampler """basic""" +850 61 evaluator """rankbased""" +850 62 dataset """yago310""" +850 62 model """transe""" +850 62 loss """marginranking""" +850 62 regularizer """no""" +850 62 optimizer """adam""" +850 62 training_loop """owa""" +850 62 negative_sampler """basic""" +850 62 evaluator """rankbased""" +850 63 dataset """yago310""" +850 63 model """transe""" +850 63 loss """marginranking""" +850 63 regularizer """no""" +850 63 optimizer """adam""" +850 63 training_loop """owa""" +850 63 negative_sampler """basic""" +850 63 evaluator """rankbased""" +850 64 dataset """yago310""" +850 64 model """transe""" +850 64 loss """marginranking""" +850 64 regularizer """no""" +850 64 optimizer """adam""" +850 64 training_loop """owa""" +850 64 negative_sampler """basic""" +850 64 evaluator """rankbased""" +850 65 dataset """yago310""" +850 65 model """transe""" +850 65 loss """marginranking""" +850 65 regularizer """no""" +850 65 optimizer """adam""" +850 65 training_loop """owa""" +850 65 negative_sampler """basic""" +850 65 evaluator """rankbased""" +850 66 dataset """yago310""" +850 66 model """transe""" +850 66 loss """marginranking""" +850 66 regularizer """no""" +850 66 optimizer """adam""" +850 66 training_loop """owa""" +850 66 negative_sampler """basic""" +850 66 evaluator """rankbased""" +850 67 dataset """yago310""" +850 67 model """transe""" +850 67 loss """marginranking""" +850 67 regularizer """no""" +850 67 optimizer """adam""" +850 67 training_loop """owa""" +850 67 negative_sampler """basic""" +850 67 evaluator """rankbased""" +850 68 dataset """yago310""" +850 68 model """transe""" +850 68 loss """marginranking""" +850 68 regularizer """no""" +850 68 optimizer """adam""" +850 68 training_loop """owa""" +850 68 negative_sampler """basic""" +850 68 evaluator """rankbased""" +850 69 dataset """yago310""" +850 69 model """transe""" +850 69 loss """marginranking""" +850 69 regularizer """no""" +850 69 optimizer """adam""" +850 69 training_loop """owa""" +850 69 negative_sampler """basic""" +850 69 evaluator """rankbased""" +850 70 dataset """yago310""" +850 70 model """transe""" +850 70 loss """marginranking""" +850 70 regularizer """no""" +850 70 optimizer """adam""" +850 70 training_loop """owa""" +850 70 negative_sampler """basic""" +850 70 evaluator """rankbased""" +850 71 dataset """yago310""" +850 71 model """transe""" +850 71 loss """marginranking""" +850 71 regularizer """no""" +850 71 optimizer """adam""" +850 71 training_loop """owa""" +850 71 negative_sampler """basic""" +850 71 evaluator """rankbased""" +850 72 dataset """yago310""" +850 72 model """transe""" +850 72 loss """marginranking""" +850 72 regularizer """no""" +850 72 optimizer """adam""" +850 72 training_loop """owa""" +850 72 negative_sampler """basic""" +850 72 evaluator """rankbased""" +850 73 dataset """yago310""" +850 73 model """transe""" +850 73 loss """marginranking""" +850 73 regularizer """no""" +850 73 optimizer """adam""" +850 73 training_loop """owa""" +850 73 negative_sampler """basic""" +850 73 evaluator """rankbased""" +851 1 model.embedding_dim 1.0 +851 1 model.scoring_fct_norm 1.0 +851 1 optimizer.lr 0.08099287361515743 +851 1 negative_sampler.num_negs_per_pos 5.0 +851 1 training.batch_size 0.0 +851 2 model.embedding_dim 0.0 +851 2 model.scoring_fct_norm 2.0 +851 2 optimizer.lr 0.040602866197171676 +851 2 negative_sampler.num_negs_per_pos 3.0 +851 2 training.batch_size 3.0 +851 3 model.embedding_dim 1.0 +851 3 model.scoring_fct_norm 1.0 +851 3 optimizer.lr 0.020487294656607553 +851 3 negative_sampler.num_negs_per_pos 3.0 +851 3 training.batch_size 2.0 +851 4 model.embedding_dim 1.0 +851 4 model.scoring_fct_norm 2.0 +851 4 optimizer.lr 0.07072477834526968 +851 4 negative_sampler.num_negs_per_pos 32.0 +851 4 training.batch_size 3.0 +851 5 model.embedding_dim 2.0 +851 5 model.scoring_fct_norm 1.0 +851 5 optimizer.lr 0.0027951366352542586 +851 5 negative_sampler.num_negs_per_pos 16.0 +851 5 training.batch_size 2.0 +851 6 model.embedding_dim 0.0 +851 6 model.scoring_fct_norm 1.0 +851 6 optimizer.lr 0.006910709645506556 +851 6 negative_sampler.num_negs_per_pos 32.0 +851 6 training.batch_size 2.0 +851 7 model.embedding_dim 2.0 +851 7 model.scoring_fct_norm 1.0 +851 7 optimizer.lr 0.001819306984794943 +851 7 negative_sampler.num_negs_per_pos 14.0 +851 7 training.batch_size 0.0 +851 8 model.embedding_dim 1.0 +851 8 model.scoring_fct_norm 1.0 +851 8 optimizer.lr 0.04872203299018867 +851 8 negative_sampler.num_negs_per_pos 15.0 +851 8 training.batch_size 2.0 +851 9 model.embedding_dim 1.0 +851 9 model.scoring_fct_norm 1.0 +851 9 optimizer.lr 0.008113230974387575 +851 9 negative_sampler.num_negs_per_pos 29.0 +851 9 training.batch_size 2.0 +851 10 model.embedding_dim 2.0 +851 10 model.scoring_fct_norm 1.0 +851 10 optimizer.lr 0.0011956516405490169 +851 10 negative_sampler.num_negs_per_pos 8.0 +851 10 training.batch_size 2.0 +851 11 model.embedding_dim 0.0 +851 11 model.scoring_fct_norm 2.0 +851 11 optimizer.lr 0.02168400834656942 +851 11 negative_sampler.num_negs_per_pos 21.0 +851 11 training.batch_size 2.0 +851 12 model.embedding_dim 0.0 +851 12 model.scoring_fct_norm 1.0 +851 12 optimizer.lr 0.07349140811701677 +851 12 negative_sampler.num_negs_per_pos 4.0 +851 12 training.batch_size 0.0 +851 13 model.embedding_dim 2.0 +851 13 model.scoring_fct_norm 2.0 +851 13 optimizer.lr 0.03946079610277034 +851 13 negative_sampler.num_negs_per_pos 46.0 +851 13 training.batch_size 3.0 +851 14 model.embedding_dim 0.0 +851 14 model.scoring_fct_norm 2.0 +851 14 optimizer.lr 0.06341254733516294 +851 14 negative_sampler.num_negs_per_pos 8.0 +851 14 training.batch_size 2.0 +851 15 model.embedding_dim 0.0 +851 15 model.scoring_fct_norm 1.0 +851 15 optimizer.lr 0.03424717810975029 +851 15 negative_sampler.num_negs_per_pos 48.0 +851 15 training.batch_size 0.0 +851 16 model.embedding_dim 2.0 +851 16 model.scoring_fct_norm 1.0 +851 16 optimizer.lr 0.0027826114674703788 +851 16 negative_sampler.num_negs_per_pos 44.0 +851 16 training.batch_size 0.0 +851 17 model.embedding_dim 0.0 +851 17 model.scoring_fct_norm 1.0 +851 17 optimizer.lr 0.03009028736594327 +851 17 negative_sampler.num_negs_per_pos 13.0 +851 17 training.batch_size 1.0 +851 18 model.embedding_dim 1.0 +851 18 model.scoring_fct_norm 1.0 +851 18 optimizer.lr 0.006953449828106799 +851 18 negative_sampler.num_negs_per_pos 15.0 +851 18 training.batch_size 1.0 +851 19 model.embedding_dim 2.0 +851 19 model.scoring_fct_norm 1.0 +851 19 optimizer.lr 0.0023264234189003946 +851 19 negative_sampler.num_negs_per_pos 9.0 +851 19 training.batch_size 1.0 +851 20 model.embedding_dim 2.0 +851 20 model.scoring_fct_norm 1.0 +851 20 optimizer.lr 0.002506281996744387 +851 20 negative_sampler.num_negs_per_pos 36.0 +851 20 training.batch_size 1.0 +851 21 model.embedding_dim 0.0 +851 21 model.scoring_fct_norm 1.0 +851 21 optimizer.lr 0.003024293802256065 +851 21 negative_sampler.num_negs_per_pos 17.0 +851 21 training.batch_size 2.0 +851 22 model.embedding_dim 2.0 +851 22 model.scoring_fct_norm 2.0 +851 22 optimizer.lr 0.016861521005012092 +851 22 negative_sampler.num_negs_per_pos 0.0 +851 22 training.batch_size 1.0 +851 23 model.embedding_dim 1.0 +851 23 model.scoring_fct_norm 1.0 +851 23 optimizer.lr 0.008095610783947925 +851 23 negative_sampler.num_negs_per_pos 44.0 +851 23 training.batch_size 0.0 +851 24 model.embedding_dim 2.0 +851 24 model.scoring_fct_norm 1.0 +851 24 optimizer.lr 0.003597976721438521 +851 24 negative_sampler.num_negs_per_pos 0.0 +851 24 training.batch_size 1.0 +851 25 model.embedding_dim 1.0 +851 25 model.scoring_fct_norm 1.0 +851 25 optimizer.lr 0.005365534600699123 +851 25 negative_sampler.num_negs_per_pos 15.0 +851 25 training.batch_size 0.0 +851 26 model.embedding_dim 0.0 +851 26 model.scoring_fct_norm 2.0 +851 26 optimizer.lr 0.0014991435882950545 +851 26 negative_sampler.num_negs_per_pos 35.0 +851 26 training.batch_size 1.0 +851 27 model.embedding_dim 1.0 +851 27 model.scoring_fct_norm 2.0 +851 27 optimizer.lr 0.045362457713673865 +851 27 negative_sampler.num_negs_per_pos 41.0 +851 27 training.batch_size 1.0 +851 28 model.embedding_dim 0.0 +851 28 model.scoring_fct_norm 2.0 +851 28 optimizer.lr 0.005840344893539028 +851 28 negative_sampler.num_negs_per_pos 36.0 +851 28 training.batch_size 0.0 +851 29 model.embedding_dim 1.0 +851 29 model.scoring_fct_norm 1.0 +851 29 optimizer.lr 0.03902224971449768 +851 29 negative_sampler.num_negs_per_pos 39.0 +851 29 training.batch_size 3.0 +851 30 model.embedding_dim 1.0 +851 30 model.scoring_fct_norm 1.0 +851 30 optimizer.lr 0.003410989167274406 +851 30 negative_sampler.num_negs_per_pos 16.0 +851 30 training.batch_size 3.0 +851 31 model.embedding_dim 1.0 +851 31 model.scoring_fct_norm 1.0 +851 31 optimizer.lr 0.03789774464426936 +851 31 negative_sampler.num_negs_per_pos 49.0 +851 31 training.batch_size 1.0 +851 32 model.embedding_dim 1.0 +851 32 model.scoring_fct_norm 1.0 +851 32 optimizer.lr 0.009745136940822631 +851 32 negative_sampler.num_negs_per_pos 4.0 +851 32 training.batch_size 1.0 +851 33 model.embedding_dim 0.0 +851 33 model.scoring_fct_norm 1.0 +851 33 optimizer.lr 0.0014018635230635137 +851 33 negative_sampler.num_negs_per_pos 34.0 +851 33 training.batch_size 0.0 +851 34 model.embedding_dim 2.0 +851 34 model.scoring_fct_norm 2.0 +851 34 optimizer.lr 0.00204514361243921 +851 34 negative_sampler.num_negs_per_pos 41.0 +851 34 training.batch_size 1.0 +851 35 model.embedding_dim 2.0 +851 35 model.scoring_fct_norm 1.0 +851 35 optimizer.lr 0.002110184314381085 +851 35 negative_sampler.num_negs_per_pos 44.0 +851 35 training.batch_size 2.0 +851 36 model.embedding_dim 1.0 +851 36 model.scoring_fct_norm 1.0 +851 36 optimizer.lr 0.022600717128188928 +851 36 negative_sampler.num_negs_per_pos 3.0 +851 36 training.batch_size 1.0 +851 37 model.embedding_dim 1.0 +851 37 model.scoring_fct_norm 2.0 +851 37 optimizer.lr 0.05942369635215413 +851 37 negative_sampler.num_negs_per_pos 48.0 +851 37 training.batch_size 1.0 +851 38 model.embedding_dim 0.0 +851 38 model.scoring_fct_norm 1.0 +851 38 optimizer.lr 0.028975267868645363 +851 38 negative_sampler.num_negs_per_pos 37.0 +851 38 training.batch_size 3.0 +851 39 model.embedding_dim 2.0 +851 39 model.scoring_fct_norm 1.0 +851 39 optimizer.lr 0.008013509906898066 +851 39 negative_sampler.num_negs_per_pos 11.0 +851 39 training.batch_size 3.0 +851 40 model.embedding_dim 0.0 +851 40 model.scoring_fct_norm 1.0 +851 40 optimizer.lr 0.0010974257717206645 +851 40 negative_sampler.num_negs_per_pos 43.0 +851 40 training.batch_size 3.0 +851 41 model.embedding_dim 1.0 +851 41 model.scoring_fct_norm 1.0 +851 41 optimizer.lr 0.0011675498121926772 +851 41 negative_sampler.num_negs_per_pos 3.0 +851 41 training.batch_size 0.0 +851 42 model.embedding_dim 0.0 +851 42 model.scoring_fct_norm 1.0 +851 42 optimizer.lr 0.003659529889272173 +851 42 negative_sampler.num_negs_per_pos 9.0 +851 42 training.batch_size 1.0 +851 43 model.embedding_dim 1.0 +851 43 model.scoring_fct_norm 2.0 +851 43 optimizer.lr 0.014775403231772812 +851 43 negative_sampler.num_negs_per_pos 9.0 +851 43 training.batch_size 3.0 +851 44 model.embedding_dim 2.0 +851 44 model.scoring_fct_norm 2.0 +851 44 optimizer.lr 0.00412001214180137 +851 44 negative_sampler.num_negs_per_pos 6.0 +851 44 training.batch_size 0.0 +851 1 dataset """yago310""" +851 1 model """transe""" +851 1 loss """softplus""" +851 1 regularizer """no""" +851 1 optimizer """adam""" +851 1 training_loop """owa""" +851 1 negative_sampler """basic""" +851 1 evaluator """rankbased""" +851 2 dataset """yago310""" +851 2 model """transe""" +851 2 loss """softplus""" +851 2 regularizer """no""" +851 2 optimizer """adam""" +851 2 training_loop """owa""" +851 2 negative_sampler """basic""" +851 2 evaluator """rankbased""" +851 3 dataset """yago310""" +851 3 model """transe""" +851 3 loss """softplus""" +851 3 regularizer """no""" +851 3 optimizer """adam""" +851 3 training_loop """owa""" +851 3 negative_sampler """basic""" +851 3 evaluator """rankbased""" +851 4 dataset """yago310""" +851 4 model """transe""" +851 4 loss """softplus""" +851 4 regularizer """no""" +851 4 optimizer """adam""" +851 4 training_loop """owa""" +851 4 negative_sampler """basic""" +851 4 evaluator """rankbased""" +851 5 dataset """yago310""" +851 5 model """transe""" +851 5 loss """softplus""" +851 5 regularizer """no""" +851 5 optimizer """adam""" +851 5 training_loop """owa""" +851 5 negative_sampler """basic""" +851 5 evaluator """rankbased""" +851 6 dataset """yago310""" +851 6 model """transe""" +851 6 loss """softplus""" +851 6 regularizer """no""" +851 6 optimizer """adam""" +851 6 training_loop """owa""" +851 6 negative_sampler """basic""" +851 6 evaluator """rankbased""" +851 7 dataset """yago310""" +851 7 model """transe""" +851 7 loss """softplus""" +851 7 regularizer """no""" +851 7 optimizer """adam""" +851 7 training_loop """owa""" +851 7 negative_sampler """basic""" +851 7 evaluator """rankbased""" +851 8 dataset """yago310""" +851 8 model """transe""" +851 8 loss """softplus""" +851 8 regularizer """no""" +851 8 optimizer """adam""" +851 8 training_loop """owa""" +851 8 negative_sampler """basic""" +851 8 evaluator """rankbased""" +851 9 dataset """yago310""" +851 9 model """transe""" +851 9 loss """softplus""" +851 9 regularizer """no""" +851 9 optimizer """adam""" +851 9 training_loop """owa""" +851 9 negative_sampler """basic""" +851 9 evaluator """rankbased""" +851 10 dataset """yago310""" +851 10 model """transe""" +851 10 loss """softplus""" +851 10 regularizer """no""" +851 10 optimizer """adam""" +851 10 training_loop """owa""" +851 10 negative_sampler """basic""" +851 10 evaluator """rankbased""" +851 11 dataset """yago310""" +851 11 model """transe""" +851 11 loss """softplus""" +851 11 regularizer """no""" +851 11 optimizer """adam""" +851 11 training_loop """owa""" +851 11 negative_sampler """basic""" +851 11 evaluator """rankbased""" +851 12 dataset """yago310""" +851 12 model """transe""" +851 12 loss """softplus""" +851 12 regularizer """no""" +851 12 optimizer """adam""" +851 12 training_loop """owa""" +851 12 negative_sampler """basic""" +851 12 evaluator """rankbased""" +851 13 dataset """yago310""" +851 13 model """transe""" +851 13 loss """softplus""" +851 13 regularizer """no""" +851 13 optimizer """adam""" +851 13 training_loop """owa""" +851 13 negative_sampler """basic""" +851 13 evaluator """rankbased""" +851 14 dataset """yago310""" +851 14 model """transe""" +851 14 loss """softplus""" +851 14 regularizer """no""" +851 14 optimizer """adam""" +851 14 training_loop """owa""" +851 14 negative_sampler """basic""" +851 14 evaluator """rankbased""" +851 15 dataset """yago310""" +851 15 model """transe""" +851 15 loss """softplus""" +851 15 regularizer """no""" +851 15 optimizer """adam""" +851 15 training_loop """owa""" +851 15 negative_sampler """basic""" +851 15 evaluator """rankbased""" +851 16 dataset """yago310""" +851 16 model """transe""" +851 16 loss """softplus""" +851 16 regularizer """no""" +851 16 optimizer """adam""" +851 16 training_loop """owa""" +851 16 negative_sampler """basic""" +851 16 evaluator """rankbased""" +851 17 dataset """yago310""" +851 17 model """transe""" +851 17 loss """softplus""" +851 17 regularizer """no""" +851 17 optimizer """adam""" +851 17 training_loop """owa""" +851 17 negative_sampler """basic""" +851 17 evaluator """rankbased""" +851 18 dataset """yago310""" +851 18 model """transe""" +851 18 loss """softplus""" +851 18 regularizer """no""" +851 18 optimizer """adam""" +851 18 training_loop """owa""" +851 18 negative_sampler """basic""" +851 18 evaluator """rankbased""" +851 19 dataset """yago310""" +851 19 model """transe""" +851 19 loss """softplus""" +851 19 regularizer """no""" +851 19 optimizer """adam""" +851 19 training_loop """owa""" +851 19 negative_sampler """basic""" +851 19 evaluator """rankbased""" +851 20 dataset """yago310""" +851 20 model """transe""" +851 20 loss """softplus""" +851 20 regularizer """no""" +851 20 optimizer """adam""" +851 20 training_loop """owa""" +851 20 negative_sampler """basic""" +851 20 evaluator """rankbased""" +851 21 dataset """yago310""" +851 21 model """transe""" +851 21 loss """softplus""" +851 21 regularizer """no""" +851 21 optimizer """adam""" +851 21 training_loop """owa""" +851 21 negative_sampler """basic""" +851 21 evaluator """rankbased""" +851 22 dataset """yago310""" +851 22 model """transe""" +851 22 loss """softplus""" +851 22 regularizer """no""" +851 22 optimizer """adam""" +851 22 training_loop """owa""" +851 22 negative_sampler """basic""" +851 22 evaluator """rankbased""" +851 23 dataset """yago310""" +851 23 model """transe""" +851 23 loss """softplus""" +851 23 regularizer """no""" +851 23 optimizer """adam""" +851 23 training_loop """owa""" +851 23 negative_sampler """basic""" +851 23 evaluator """rankbased""" +851 24 dataset """yago310""" +851 24 model """transe""" +851 24 loss """softplus""" +851 24 regularizer """no""" +851 24 optimizer """adam""" +851 24 training_loop """owa""" +851 24 negative_sampler """basic""" +851 24 evaluator """rankbased""" +851 25 dataset """yago310""" +851 25 model """transe""" +851 25 loss """softplus""" +851 25 regularizer """no""" +851 25 optimizer """adam""" +851 25 training_loop """owa""" +851 25 negative_sampler """basic""" +851 25 evaluator """rankbased""" +851 26 dataset """yago310""" +851 26 model """transe""" +851 26 loss """softplus""" +851 26 regularizer """no""" +851 26 optimizer """adam""" +851 26 training_loop """owa""" +851 26 negative_sampler """basic""" +851 26 evaluator """rankbased""" +851 27 dataset """yago310""" +851 27 model """transe""" +851 27 loss """softplus""" +851 27 regularizer """no""" +851 27 optimizer """adam""" +851 27 training_loop """owa""" +851 27 negative_sampler """basic""" +851 27 evaluator """rankbased""" +851 28 dataset """yago310""" +851 28 model """transe""" +851 28 loss """softplus""" +851 28 regularizer """no""" +851 28 optimizer """adam""" +851 28 training_loop """owa""" +851 28 negative_sampler """basic""" +851 28 evaluator """rankbased""" +851 29 dataset """yago310""" +851 29 model """transe""" +851 29 loss """softplus""" +851 29 regularizer """no""" +851 29 optimizer """adam""" +851 29 training_loop """owa""" +851 29 negative_sampler """basic""" +851 29 evaluator """rankbased""" +851 30 dataset """yago310""" +851 30 model """transe""" +851 30 loss """softplus""" +851 30 regularizer """no""" +851 30 optimizer """adam""" +851 30 training_loop """owa""" +851 30 negative_sampler """basic""" +851 30 evaluator """rankbased""" +851 31 dataset """yago310""" +851 31 model """transe""" +851 31 loss """softplus""" +851 31 regularizer """no""" +851 31 optimizer """adam""" +851 31 training_loop """owa""" +851 31 negative_sampler """basic""" +851 31 evaluator """rankbased""" +851 32 dataset """yago310""" +851 32 model """transe""" +851 32 loss """softplus""" +851 32 regularizer """no""" +851 32 optimizer """adam""" +851 32 training_loop """owa""" +851 32 negative_sampler """basic""" +851 32 evaluator """rankbased""" +851 33 dataset """yago310""" +851 33 model """transe""" +851 33 loss """softplus""" +851 33 regularizer """no""" +851 33 optimizer """adam""" +851 33 training_loop """owa""" +851 33 negative_sampler """basic""" +851 33 evaluator """rankbased""" +851 34 dataset """yago310""" +851 34 model """transe""" +851 34 loss """softplus""" +851 34 regularizer """no""" +851 34 optimizer """adam""" +851 34 training_loop """owa""" +851 34 negative_sampler """basic""" +851 34 evaluator """rankbased""" +851 35 dataset """yago310""" +851 35 model """transe""" +851 35 loss """softplus""" +851 35 regularizer """no""" +851 35 optimizer """adam""" +851 35 training_loop """owa""" +851 35 negative_sampler """basic""" +851 35 evaluator """rankbased""" +851 36 dataset """yago310""" +851 36 model """transe""" +851 36 loss """softplus""" +851 36 regularizer """no""" +851 36 optimizer """adam""" +851 36 training_loop """owa""" +851 36 negative_sampler """basic""" +851 36 evaluator """rankbased""" +851 37 dataset """yago310""" +851 37 model """transe""" +851 37 loss """softplus""" +851 37 regularizer """no""" +851 37 optimizer """adam""" +851 37 training_loop """owa""" +851 37 negative_sampler """basic""" +851 37 evaluator """rankbased""" +851 38 dataset """yago310""" +851 38 model """transe""" +851 38 loss """softplus""" +851 38 regularizer """no""" +851 38 optimizer """adam""" +851 38 training_loop """owa""" +851 38 negative_sampler """basic""" +851 38 evaluator """rankbased""" +851 39 dataset """yago310""" +851 39 model """transe""" +851 39 loss """softplus""" +851 39 regularizer """no""" +851 39 optimizer """adam""" +851 39 training_loop """owa""" +851 39 negative_sampler """basic""" +851 39 evaluator """rankbased""" +851 40 dataset """yago310""" +851 40 model """transe""" +851 40 loss """softplus""" +851 40 regularizer """no""" +851 40 optimizer """adam""" +851 40 training_loop """owa""" +851 40 negative_sampler """basic""" +851 40 evaluator """rankbased""" +851 41 dataset """yago310""" +851 41 model """transe""" +851 41 loss """softplus""" +851 41 regularizer """no""" +851 41 optimizer """adam""" +851 41 training_loop """owa""" +851 41 negative_sampler """basic""" +851 41 evaluator """rankbased""" +851 42 dataset """yago310""" +851 42 model """transe""" +851 42 loss """softplus""" +851 42 regularizer """no""" +851 42 optimizer """adam""" +851 42 training_loop """owa""" +851 42 negative_sampler """basic""" +851 42 evaluator """rankbased""" +851 43 dataset """yago310""" +851 43 model """transe""" +851 43 loss """softplus""" +851 43 regularizer """no""" +851 43 optimizer """adam""" +851 43 training_loop """owa""" +851 43 negative_sampler """basic""" +851 43 evaluator """rankbased""" +851 44 dataset """yago310""" +851 44 model """transe""" +851 44 loss """softplus""" +851 44 regularizer """no""" +851 44 optimizer """adam""" +851 44 training_loop """owa""" +851 44 negative_sampler """basic""" +851 44 evaluator """rankbased""" +852 1 model.embedding_dim 2.0 +852 1 model.scoring_fct_norm 2.0 +852 1 optimizer.lr 0.0696106249013627 +852 1 negative_sampler.num_negs_per_pos 19.0 +852 1 training.batch_size 2.0 +852 2 model.embedding_dim 2.0 +852 2 model.scoring_fct_norm 1.0 +852 2 optimizer.lr 0.06321917836368791 +852 2 negative_sampler.num_negs_per_pos 17.0 +852 2 training.batch_size 3.0 +852 3 model.embedding_dim 1.0 +852 3 model.scoring_fct_norm 2.0 +852 3 optimizer.lr 0.01681138013814932 +852 3 negative_sampler.num_negs_per_pos 43.0 +852 3 training.batch_size 3.0 +852 4 model.embedding_dim 0.0 +852 4 model.scoring_fct_norm 1.0 +852 4 optimizer.lr 0.001334332340911008 +852 4 negative_sampler.num_negs_per_pos 10.0 +852 4 training.batch_size 2.0 +852 5 model.embedding_dim 0.0 +852 5 model.scoring_fct_norm 1.0 +852 5 optimizer.lr 0.006700322631051071 +852 5 negative_sampler.num_negs_per_pos 10.0 +852 5 training.batch_size 3.0 +852 6 model.embedding_dim 2.0 +852 6 model.scoring_fct_norm 2.0 +852 6 optimizer.lr 0.002762604730973316 +852 6 negative_sampler.num_negs_per_pos 37.0 +852 6 training.batch_size 0.0 +852 7 model.embedding_dim 0.0 +852 7 model.scoring_fct_norm 1.0 +852 7 optimizer.lr 0.010822114818757087 +852 7 negative_sampler.num_negs_per_pos 39.0 +852 7 training.batch_size 2.0 +852 8 model.embedding_dim 1.0 +852 8 model.scoring_fct_norm 1.0 +852 8 optimizer.lr 0.04953476425794088 +852 8 negative_sampler.num_negs_per_pos 41.0 +852 8 training.batch_size 2.0 +852 9 model.embedding_dim 0.0 +852 9 model.scoring_fct_norm 1.0 +852 9 optimizer.lr 0.0018153135738424458 +852 9 negative_sampler.num_negs_per_pos 14.0 +852 9 training.batch_size 1.0 +852 10 model.embedding_dim 1.0 +852 10 model.scoring_fct_norm 2.0 +852 10 optimizer.lr 0.0032831716617816122 +852 10 negative_sampler.num_negs_per_pos 44.0 +852 10 training.batch_size 2.0 +852 11 model.embedding_dim 1.0 +852 11 model.scoring_fct_norm 1.0 +852 11 optimizer.lr 0.020294177981123984 +852 11 negative_sampler.num_negs_per_pos 1.0 +852 11 training.batch_size 0.0 +852 12 model.embedding_dim 1.0 +852 12 model.scoring_fct_norm 1.0 +852 12 optimizer.lr 0.00128258965570708 +852 12 negative_sampler.num_negs_per_pos 36.0 +852 12 training.batch_size 2.0 +852 13 model.embedding_dim 1.0 +852 13 model.scoring_fct_norm 2.0 +852 13 optimizer.lr 0.0021454305091149236 +852 13 negative_sampler.num_negs_per_pos 42.0 +852 13 training.batch_size 2.0 +852 14 model.embedding_dim 1.0 +852 14 model.scoring_fct_norm 2.0 +852 14 optimizer.lr 0.0052724537348656534 +852 14 negative_sampler.num_negs_per_pos 43.0 +852 14 training.batch_size 0.0 +852 15 model.embedding_dim 2.0 +852 15 model.scoring_fct_norm 2.0 +852 15 optimizer.lr 0.007683063126459105 +852 15 negative_sampler.num_negs_per_pos 40.0 +852 15 training.batch_size 3.0 +852 16 model.embedding_dim 2.0 +852 16 model.scoring_fct_norm 1.0 +852 16 optimizer.lr 0.0012026688829982193 +852 16 negative_sampler.num_negs_per_pos 9.0 +852 16 training.batch_size 3.0 +852 17 model.embedding_dim 2.0 +852 17 model.scoring_fct_norm 1.0 +852 17 optimizer.lr 0.03588777850536966 +852 17 negative_sampler.num_negs_per_pos 26.0 +852 17 training.batch_size 1.0 +852 18 model.embedding_dim 2.0 +852 18 model.scoring_fct_norm 2.0 +852 18 optimizer.lr 0.022249717670837242 +852 18 negative_sampler.num_negs_per_pos 22.0 +852 18 training.batch_size 1.0 +852 19 model.embedding_dim 1.0 +852 19 model.scoring_fct_norm 1.0 +852 19 optimizer.lr 0.0063687570114458235 +852 19 negative_sampler.num_negs_per_pos 25.0 +852 19 training.batch_size 2.0 +852 20 model.embedding_dim 1.0 +852 20 model.scoring_fct_norm 2.0 +852 20 optimizer.lr 0.08029165278930404 +852 20 negative_sampler.num_negs_per_pos 47.0 +852 20 training.batch_size 2.0 +852 21 model.embedding_dim 0.0 +852 21 model.scoring_fct_norm 2.0 +852 21 optimizer.lr 0.007653071161909662 +852 21 negative_sampler.num_negs_per_pos 27.0 +852 21 training.batch_size 0.0 +852 22 model.embedding_dim 2.0 +852 22 model.scoring_fct_norm 2.0 +852 22 optimizer.lr 0.006413647100734336 +852 22 negative_sampler.num_negs_per_pos 20.0 +852 22 training.batch_size 3.0 +852 23 model.embedding_dim 2.0 +852 23 model.scoring_fct_norm 2.0 +852 23 optimizer.lr 0.0011113191445040636 +852 23 negative_sampler.num_negs_per_pos 36.0 +852 23 training.batch_size 0.0 +852 24 model.embedding_dim 2.0 +852 24 model.scoring_fct_norm 1.0 +852 24 optimizer.lr 0.05032588625769807 +852 24 negative_sampler.num_negs_per_pos 30.0 +852 24 training.batch_size 1.0 +852 25 model.embedding_dim 1.0 +852 25 model.scoring_fct_norm 1.0 +852 25 optimizer.lr 0.01999011271899504 +852 25 negative_sampler.num_negs_per_pos 43.0 +852 25 training.batch_size 1.0 +852 26 model.embedding_dim 2.0 +852 26 model.scoring_fct_norm 2.0 +852 26 optimizer.lr 0.0037685645443006955 +852 26 negative_sampler.num_negs_per_pos 11.0 +852 26 training.batch_size 0.0 +852 27 model.embedding_dim 0.0 +852 27 model.scoring_fct_norm 2.0 +852 27 optimizer.lr 0.003382590654476876 +852 27 negative_sampler.num_negs_per_pos 34.0 +852 27 training.batch_size 1.0 +852 28 model.embedding_dim 0.0 +852 28 model.scoring_fct_norm 1.0 +852 28 optimizer.lr 0.0061249905708249735 +852 28 negative_sampler.num_negs_per_pos 18.0 +852 28 training.batch_size 0.0 +852 29 model.embedding_dim 0.0 +852 29 model.scoring_fct_norm 2.0 +852 29 optimizer.lr 0.00314088941537914 +852 29 negative_sampler.num_negs_per_pos 45.0 +852 29 training.batch_size 3.0 +852 30 model.embedding_dim 1.0 +852 30 model.scoring_fct_norm 1.0 +852 30 optimizer.lr 0.002287238599475833 +852 30 negative_sampler.num_negs_per_pos 9.0 +852 30 training.batch_size 0.0 +852 31 model.embedding_dim 0.0 +852 31 model.scoring_fct_norm 2.0 +852 31 optimizer.lr 0.024287366147552756 +852 31 negative_sampler.num_negs_per_pos 14.0 +852 31 training.batch_size 1.0 +852 32 model.embedding_dim 0.0 +852 32 model.scoring_fct_norm 1.0 +852 32 optimizer.lr 0.033160534878346686 +852 32 negative_sampler.num_negs_per_pos 11.0 +852 32 training.batch_size 1.0 +852 33 model.embedding_dim 0.0 +852 33 model.scoring_fct_norm 2.0 +852 33 optimizer.lr 0.0037345797843115736 +852 33 negative_sampler.num_negs_per_pos 30.0 +852 33 training.batch_size 2.0 +852 34 model.embedding_dim 0.0 +852 34 model.scoring_fct_norm 1.0 +852 34 optimizer.lr 0.0012596092869065242 +852 34 negative_sampler.num_negs_per_pos 13.0 +852 34 training.batch_size 2.0 +852 35 model.embedding_dim 1.0 +852 35 model.scoring_fct_norm 1.0 +852 35 optimizer.lr 0.0037709868643152947 +852 35 negative_sampler.num_negs_per_pos 3.0 +852 35 training.batch_size 1.0 +852 36 model.embedding_dim 2.0 +852 36 model.scoring_fct_norm 2.0 +852 36 optimizer.lr 0.003556711735502009 +852 36 negative_sampler.num_negs_per_pos 3.0 +852 36 training.batch_size 0.0 +852 37 model.embedding_dim 2.0 +852 37 model.scoring_fct_norm 1.0 +852 37 optimizer.lr 0.0070487567669726145 +852 37 negative_sampler.num_negs_per_pos 23.0 +852 37 training.batch_size 2.0 +852 38 model.embedding_dim 0.0 +852 38 model.scoring_fct_norm 1.0 +852 38 optimizer.lr 0.002281949862524715 +852 38 negative_sampler.num_negs_per_pos 25.0 +852 38 training.batch_size 2.0 +852 39 model.embedding_dim 1.0 +852 39 model.scoring_fct_norm 1.0 +852 39 optimizer.lr 0.005540351887920672 +852 39 negative_sampler.num_negs_per_pos 18.0 +852 39 training.batch_size 2.0 +852 40 model.embedding_dim 1.0 +852 40 model.scoring_fct_norm 1.0 +852 40 optimizer.lr 0.004877381893410401 +852 40 negative_sampler.num_negs_per_pos 9.0 +852 40 training.batch_size 0.0 +852 41 model.embedding_dim 0.0 +852 41 model.scoring_fct_norm 1.0 +852 41 optimizer.lr 0.0705232398668152 +852 41 negative_sampler.num_negs_per_pos 44.0 +852 41 training.batch_size 2.0 +852 42 model.embedding_dim 0.0 +852 42 model.scoring_fct_norm 1.0 +852 42 optimizer.lr 0.0017192057665000493 +852 42 negative_sampler.num_negs_per_pos 14.0 +852 42 training.batch_size 2.0 +852 43 model.embedding_dim 0.0 +852 43 model.scoring_fct_norm 1.0 +852 43 optimizer.lr 0.010605365474625513 +852 43 negative_sampler.num_negs_per_pos 10.0 +852 43 training.batch_size 3.0 +852 44 model.embedding_dim 0.0 +852 44 model.scoring_fct_norm 1.0 +852 44 optimizer.lr 0.0012820608627636327 +852 44 negative_sampler.num_negs_per_pos 31.0 +852 44 training.batch_size 1.0 +852 45 model.embedding_dim 0.0 +852 45 model.scoring_fct_norm 1.0 +852 45 optimizer.lr 0.012252844548505764 +852 45 negative_sampler.num_negs_per_pos 37.0 +852 45 training.batch_size 2.0 +852 46 model.embedding_dim 2.0 +852 46 model.scoring_fct_norm 2.0 +852 46 optimizer.lr 0.001245873907965525 +852 46 negative_sampler.num_negs_per_pos 19.0 +852 46 training.batch_size 1.0 +852 47 model.embedding_dim 1.0 +852 47 model.scoring_fct_norm 2.0 +852 47 optimizer.lr 0.012168750503359292 +852 47 negative_sampler.num_negs_per_pos 38.0 +852 47 training.batch_size 3.0 +852 48 model.embedding_dim 1.0 +852 48 model.scoring_fct_norm 1.0 +852 48 optimizer.lr 0.032651316771112604 +852 48 negative_sampler.num_negs_per_pos 46.0 +852 48 training.batch_size 0.0 +852 49 model.embedding_dim 2.0 +852 49 model.scoring_fct_norm 1.0 +852 49 optimizer.lr 0.006080039028136177 +852 49 negative_sampler.num_negs_per_pos 22.0 +852 49 training.batch_size 0.0 +852 50 model.embedding_dim 0.0 +852 50 model.scoring_fct_norm 2.0 +852 50 optimizer.lr 0.025786201460891556 +852 50 negative_sampler.num_negs_per_pos 36.0 +852 50 training.batch_size 0.0 +852 51 model.embedding_dim 0.0 +852 51 model.scoring_fct_norm 2.0 +852 51 optimizer.lr 0.007626135976013346 +852 51 negative_sampler.num_negs_per_pos 48.0 +852 51 training.batch_size 0.0 +852 52 model.embedding_dim 1.0 +852 52 model.scoring_fct_norm 2.0 +852 52 optimizer.lr 0.06697494243030756 +852 52 negative_sampler.num_negs_per_pos 42.0 +852 52 training.batch_size 0.0 +852 53 model.embedding_dim 2.0 +852 53 model.scoring_fct_norm 1.0 +852 53 optimizer.lr 0.004793553743063959 +852 53 negative_sampler.num_negs_per_pos 5.0 +852 53 training.batch_size 2.0 +852 54 model.embedding_dim 0.0 +852 54 model.scoring_fct_norm 2.0 +852 54 optimizer.lr 0.006884097357443137 +852 54 negative_sampler.num_negs_per_pos 2.0 +852 54 training.batch_size 3.0 +852 55 model.embedding_dim 2.0 +852 55 model.scoring_fct_norm 1.0 +852 55 optimizer.lr 0.0021251483623847007 +852 55 negative_sampler.num_negs_per_pos 37.0 +852 55 training.batch_size 3.0 +852 56 model.embedding_dim 0.0 +852 56 model.scoring_fct_norm 2.0 +852 56 optimizer.lr 0.020352983778307236 +852 56 negative_sampler.num_negs_per_pos 14.0 +852 56 training.batch_size 2.0 +852 57 model.embedding_dim 0.0 +852 57 model.scoring_fct_norm 2.0 +852 57 optimizer.lr 0.044016295524610116 +852 57 negative_sampler.num_negs_per_pos 18.0 +852 57 training.batch_size 0.0 +852 58 model.embedding_dim 0.0 +852 58 model.scoring_fct_norm 1.0 +852 58 optimizer.lr 0.007745418104873031 +852 58 negative_sampler.num_negs_per_pos 39.0 +852 58 training.batch_size 0.0 +852 59 model.embedding_dim 2.0 +852 59 model.scoring_fct_norm 2.0 +852 59 optimizer.lr 0.0410118378412114 +852 59 negative_sampler.num_negs_per_pos 46.0 +852 59 training.batch_size 2.0 +852 60 model.embedding_dim 2.0 +852 60 model.scoring_fct_norm 2.0 +852 60 optimizer.lr 0.01079941450823993 +852 60 negative_sampler.num_negs_per_pos 42.0 +852 60 training.batch_size 0.0 +852 61 model.embedding_dim 2.0 +852 61 model.scoring_fct_norm 1.0 +852 61 optimizer.lr 0.0014255594661470143 +852 61 negative_sampler.num_negs_per_pos 24.0 +852 61 training.batch_size 1.0 +852 62 model.embedding_dim 2.0 +852 62 model.scoring_fct_norm 2.0 +852 62 optimizer.lr 0.028496188769150787 +852 62 negative_sampler.num_negs_per_pos 32.0 +852 62 training.batch_size 0.0 +852 63 model.embedding_dim 1.0 +852 63 model.scoring_fct_norm 2.0 +852 63 optimizer.lr 0.02136195753775336 +852 63 negative_sampler.num_negs_per_pos 40.0 +852 63 training.batch_size 1.0 +852 64 model.embedding_dim 0.0 +852 64 model.scoring_fct_norm 2.0 +852 64 optimizer.lr 0.0028210208549791335 +852 64 negative_sampler.num_negs_per_pos 26.0 +852 64 training.batch_size 1.0 +852 65 model.embedding_dim 2.0 +852 65 model.scoring_fct_norm 1.0 +852 65 optimizer.lr 0.020191331123432115 +852 65 negative_sampler.num_negs_per_pos 4.0 +852 65 training.batch_size 0.0 +852 66 model.embedding_dim 2.0 +852 66 model.scoring_fct_norm 2.0 +852 66 optimizer.lr 0.005684828653412793 +852 66 negative_sampler.num_negs_per_pos 2.0 +852 66 training.batch_size 3.0 +852 67 model.embedding_dim 0.0 +852 67 model.scoring_fct_norm 2.0 +852 67 optimizer.lr 0.09826849190700156 +852 67 negative_sampler.num_negs_per_pos 19.0 +852 67 training.batch_size 3.0 +852 68 model.embedding_dim 2.0 +852 68 model.scoring_fct_norm 2.0 +852 68 optimizer.lr 0.0013928667784270082 +852 68 negative_sampler.num_negs_per_pos 20.0 +852 68 training.batch_size 2.0 +852 69 model.embedding_dim 0.0 +852 69 model.scoring_fct_norm 1.0 +852 69 optimizer.lr 0.09190674016003318 +852 69 negative_sampler.num_negs_per_pos 7.0 +852 69 training.batch_size 2.0 +852 70 model.embedding_dim 2.0 +852 70 model.scoring_fct_norm 1.0 +852 70 optimizer.lr 0.0012498964700149871 +852 70 negative_sampler.num_negs_per_pos 42.0 +852 70 training.batch_size 1.0 +852 71 model.embedding_dim 2.0 +852 71 model.scoring_fct_norm 2.0 +852 71 optimizer.lr 0.0023915437791031243 +852 71 negative_sampler.num_negs_per_pos 5.0 +852 71 training.batch_size 2.0 +852 72 model.embedding_dim 2.0 +852 72 model.scoring_fct_norm 2.0 +852 72 optimizer.lr 0.047020210479856626 +852 72 negative_sampler.num_negs_per_pos 45.0 +852 72 training.batch_size 1.0 +852 73 model.embedding_dim 1.0 +852 73 model.scoring_fct_norm 2.0 +852 73 optimizer.lr 0.06612958541909364 +852 73 negative_sampler.num_negs_per_pos 18.0 +852 73 training.batch_size 0.0 +852 74 model.embedding_dim 2.0 +852 74 model.scoring_fct_norm 2.0 +852 74 optimizer.lr 0.005927703933759572 +852 74 negative_sampler.num_negs_per_pos 31.0 +852 74 training.batch_size 1.0 +852 75 model.embedding_dim 2.0 +852 75 model.scoring_fct_norm 2.0 +852 75 optimizer.lr 0.00473596811425717 +852 75 negative_sampler.num_negs_per_pos 23.0 +852 75 training.batch_size 1.0 +852 1 dataset """yago310""" +852 1 model """transe""" +852 1 loss """softplus""" +852 1 regularizer """no""" +852 1 optimizer """adam""" +852 1 training_loop """owa""" +852 1 negative_sampler """basic""" +852 1 evaluator """rankbased""" +852 2 dataset """yago310""" +852 2 model """transe""" +852 2 loss """softplus""" +852 2 regularizer """no""" +852 2 optimizer """adam""" +852 2 training_loop """owa""" +852 2 negative_sampler """basic""" +852 2 evaluator """rankbased""" +852 3 dataset """yago310""" +852 3 model """transe""" +852 3 loss """softplus""" +852 3 regularizer """no""" +852 3 optimizer """adam""" +852 3 training_loop """owa""" +852 3 negative_sampler """basic""" +852 3 evaluator """rankbased""" +852 4 dataset """yago310""" +852 4 model """transe""" +852 4 loss """softplus""" +852 4 regularizer """no""" +852 4 optimizer """adam""" +852 4 training_loop """owa""" +852 4 negative_sampler """basic""" +852 4 evaluator """rankbased""" +852 5 dataset """yago310""" +852 5 model """transe""" +852 5 loss """softplus""" +852 5 regularizer """no""" +852 5 optimizer """adam""" +852 5 training_loop """owa""" +852 5 negative_sampler """basic""" +852 5 evaluator """rankbased""" +852 6 dataset """yago310""" +852 6 model """transe""" +852 6 loss """softplus""" +852 6 regularizer """no""" +852 6 optimizer """adam""" +852 6 training_loop """owa""" +852 6 negative_sampler """basic""" +852 6 evaluator """rankbased""" +852 7 dataset """yago310""" +852 7 model """transe""" +852 7 loss """softplus""" +852 7 regularizer """no""" +852 7 optimizer """adam""" +852 7 training_loop """owa""" +852 7 negative_sampler """basic""" +852 7 evaluator """rankbased""" +852 8 dataset """yago310""" +852 8 model """transe""" +852 8 loss """softplus""" +852 8 regularizer """no""" +852 8 optimizer """adam""" +852 8 training_loop """owa""" +852 8 negative_sampler """basic""" +852 8 evaluator """rankbased""" +852 9 dataset """yago310""" +852 9 model """transe""" +852 9 loss """softplus""" +852 9 regularizer """no""" +852 9 optimizer """adam""" +852 9 training_loop """owa""" +852 9 negative_sampler """basic""" +852 9 evaluator """rankbased""" +852 10 dataset """yago310""" +852 10 model """transe""" +852 10 loss """softplus""" +852 10 regularizer """no""" +852 10 optimizer """adam""" +852 10 training_loop """owa""" +852 10 negative_sampler """basic""" +852 10 evaluator """rankbased""" +852 11 dataset """yago310""" +852 11 model """transe""" +852 11 loss """softplus""" +852 11 regularizer """no""" +852 11 optimizer """adam""" +852 11 training_loop """owa""" +852 11 negative_sampler """basic""" +852 11 evaluator """rankbased""" +852 12 dataset """yago310""" +852 12 model """transe""" +852 12 loss """softplus""" +852 12 regularizer """no""" +852 12 optimizer """adam""" +852 12 training_loop """owa""" +852 12 negative_sampler """basic""" +852 12 evaluator """rankbased""" +852 13 dataset """yago310""" +852 13 model """transe""" +852 13 loss """softplus""" +852 13 regularizer """no""" +852 13 optimizer """adam""" +852 13 training_loop """owa""" +852 13 negative_sampler """basic""" +852 13 evaluator """rankbased""" +852 14 dataset """yago310""" +852 14 model """transe""" +852 14 loss """softplus""" +852 14 regularizer """no""" +852 14 optimizer """adam""" +852 14 training_loop """owa""" +852 14 negative_sampler """basic""" +852 14 evaluator """rankbased""" +852 15 dataset """yago310""" +852 15 model """transe""" +852 15 loss """softplus""" +852 15 regularizer """no""" +852 15 optimizer """adam""" +852 15 training_loop """owa""" +852 15 negative_sampler """basic""" +852 15 evaluator """rankbased""" +852 16 dataset """yago310""" +852 16 model """transe""" +852 16 loss """softplus""" +852 16 regularizer """no""" +852 16 optimizer """adam""" +852 16 training_loop """owa""" +852 16 negative_sampler """basic""" +852 16 evaluator """rankbased""" +852 17 dataset """yago310""" +852 17 model """transe""" +852 17 loss """softplus""" +852 17 regularizer """no""" +852 17 optimizer """adam""" +852 17 training_loop """owa""" +852 17 negative_sampler """basic""" +852 17 evaluator """rankbased""" +852 18 dataset """yago310""" +852 18 model """transe""" +852 18 loss """softplus""" +852 18 regularizer """no""" +852 18 optimizer """adam""" +852 18 training_loop """owa""" +852 18 negative_sampler """basic""" +852 18 evaluator """rankbased""" +852 19 dataset """yago310""" +852 19 model """transe""" +852 19 loss """softplus""" +852 19 regularizer """no""" +852 19 optimizer """adam""" +852 19 training_loop """owa""" +852 19 negative_sampler """basic""" +852 19 evaluator """rankbased""" +852 20 dataset """yago310""" +852 20 model """transe""" +852 20 loss """softplus""" +852 20 regularizer """no""" +852 20 optimizer """adam""" +852 20 training_loop """owa""" +852 20 negative_sampler """basic""" +852 20 evaluator """rankbased""" +852 21 dataset """yago310""" +852 21 model """transe""" +852 21 loss """softplus""" +852 21 regularizer """no""" +852 21 optimizer """adam""" +852 21 training_loop """owa""" +852 21 negative_sampler """basic""" +852 21 evaluator """rankbased""" +852 22 dataset """yago310""" +852 22 model """transe""" +852 22 loss """softplus""" +852 22 regularizer """no""" +852 22 optimizer """adam""" +852 22 training_loop """owa""" +852 22 negative_sampler """basic""" +852 22 evaluator """rankbased""" +852 23 dataset """yago310""" +852 23 model """transe""" +852 23 loss """softplus""" +852 23 regularizer """no""" +852 23 optimizer """adam""" +852 23 training_loop """owa""" +852 23 negative_sampler """basic""" +852 23 evaluator """rankbased""" +852 24 dataset """yago310""" +852 24 model """transe""" +852 24 loss """softplus""" +852 24 regularizer """no""" +852 24 optimizer """adam""" +852 24 training_loop """owa""" +852 24 negative_sampler """basic""" +852 24 evaluator """rankbased""" +852 25 dataset """yago310""" +852 25 model """transe""" +852 25 loss """softplus""" +852 25 regularizer """no""" +852 25 optimizer """adam""" +852 25 training_loop """owa""" +852 25 negative_sampler """basic""" +852 25 evaluator """rankbased""" +852 26 dataset """yago310""" +852 26 model """transe""" +852 26 loss """softplus""" +852 26 regularizer """no""" +852 26 optimizer """adam""" +852 26 training_loop """owa""" +852 26 negative_sampler """basic""" +852 26 evaluator """rankbased""" +852 27 dataset """yago310""" +852 27 model """transe""" +852 27 loss """softplus""" +852 27 regularizer """no""" +852 27 optimizer """adam""" +852 27 training_loop """owa""" +852 27 negative_sampler """basic""" +852 27 evaluator """rankbased""" +852 28 dataset """yago310""" +852 28 model """transe""" +852 28 loss """softplus""" +852 28 regularizer """no""" +852 28 optimizer """adam""" +852 28 training_loop """owa""" +852 28 negative_sampler """basic""" +852 28 evaluator """rankbased""" +852 29 dataset """yago310""" +852 29 model """transe""" +852 29 loss """softplus""" +852 29 regularizer """no""" +852 29 optimizer """adam""" +852 29 training_loop """owa""" +852 29 negative_sampler """basic""" +852 29 evaluator """rankbased""" +852 30 dataset """yago310""" +852 30 model """transe""" +852 30 loss """softplus""" +852 30 regularizer """no""" +852 30 optimizer """adam""" +852 30 training_loop """owa""" +852 30 negative_sampler """basic""" +852 30 evaluator """rankbased""" +852 31 dataset """yago310""" +852 31 model """transe""" +852 31 loss """softplus""" +852 31 regularizer """no""" +852 31 optimizer """adam""" +852 31 training_loop """owa""" +852 31 negative_sampler """basic""" +852 31 evaluator """rankbased""" +852 32 dataset """yago310""" +852 32 model """transe""" +852 32 loss """softplus""" +852 32 regularizer """no""" +852 32 optimizer """adam""" +852 32 training_loop """owa""" +852 32 negative_sampler """basic""" +852 32 evaluator """rankbased""" +852 33 dataset """yago310""" +852 33 model """transe""" +852 33 loss """softplus""" +852 33 regularizer """no""" +852 33 optimizer """adam""" +852 33 training_loop """owa""" +852 33 negative_sampler """basic""" +852 33 evaluator """rankbased""" +852 34 dataset """yago310""" +852 34 model """transe""" +852 34 loss """softplus""" +852 34 regularizer """no""" +852 34 optimizer """adam""" +852 34 training_loop """owa""" +852 34 negative_sampler """basic""" +852 34 evaluator """rankbased""" +852 35 dataset """yago310""" +852 35 model """transe""" +852 35 loss """softplus""" +852 35 regularizer """no""" +852 35 optimizer """adam""" +852 35 training_loop """owa""" +852 35 negative_sampler """basic""" +852 35 evaluator """rankbased""" +852 36 dataset """yago310""" +852 36 model """transe""" +852 36 loss """softplus""" +852 36 regularizer """no""" +852 36 optimizer """adam""" +852 36 training_loop """owa""" +852 36 negative_sampler """basic""" +852 36 evaluator """rankbased""" +852 37 dataset """yago310""" +852 37 model """transe""" +852 37 loss """softplus""" +852 37 regularizer """no""" +852 37 optimizer """adam""" +852 37 training_loop """owa""" +852 37 negative_sampler """basic""" +852 37 evaluator """rankbased""" +852 38 dataset """yago310""" +852 38 model """transe""" +852 38 loss """softplus""" +852 38 regularizer """no""" +852 38 optimizer """adam""" +852 38 training_loop """owa""" +852 38 negative_sampler """basic""" +852 38 evaluator """rankbased""" +852 39 dataset """yago310""" +852 39 model """transe""" +852 39 loss """softplus""" +852 39 regularizer """no""" +852 39 optimizer """adam""" +852 39 training_loop """owa""" +852 39 negative_sampler """basic""" +852 39 evaluator """rankbased""" +852 40 dataset """yago310""" +852 40 model """transe""" +852 40 loss """softplus""" +852 40 regularizer """no""" +852 40 optimizer """adam""" +852 40 training_loop """owa""" +852 40 negative_sampler """basic""" +852 40 evaluator """rankbased""" +852 41 dataset """yago310""" +852 41 model """transe""" +852 41 loss """softplus""" +852 41 regularizer """no""" +852 41 optimizer """adam""" +852 41 training_loop """owa""" +852 41 negative_sampler """basic""" +852 41 evaluator """rankbased""" +852 42 dataset """yago310""" +852 42 model """transe""" +852 42 loss """softplus""" +852 42 regularizer """no""" +852 42 optimizer """adam""" +852 42 training_loop """owa""" +852 42 negative_sampler """basic""" +852 42 evaluator """rankbased""" +852 43 dataset """yago310""" +852 43 model """transe""" +852 43 loss """softplus""" +852 43 regularizer """no""" +852 43 optimizer """adam""" +852 43 training_loop """owa""" +852 43 negative_sampler """basic""" +852 43 evaluator """rankbased""" +852 44 dataset """yago310""" +852 44 model """transe""" +852 44 loss """softplus""" +852 44 regularizer """no""" +852 44 optimizer """adam""" +852 44 training_loop """owa""" +852 44 negative_sampler """basic""" +852 44 evaluator """rankbased""" +852 45 dataset """yago310""" +852 45 model """transe""" +852 45 loss """softplus""" +852 45 regularizer """no""" +852 45 optimizer """adam""" +852 45 training_loop """owa""" +852 45 negative_sampler """basic""" +852 45 evaluator """rankbased""" +852 46 dataset """yago310""" +852 46 model """transe""" +852 46 loss """softplus""" +852 46 regularizer """no""" +852 46 optimizer """adam""" +852 46 training_loop """owa""" +852 46 negative_sampler """basic""" +852 46 evaluator """rankbased""" +852 47 dataset """yago310""" +852 47 model """transe""" +852 47 loss """softplus""" +852 47 regularizer """no""" +852 47 optimizer """adam""" +852 47 training_loop """owa""" +852 47 negative_sampler """basic""" +852 47 evaluator """rankbased""" +852 48 dataset """yago310""" +852 48 model """transe""" +852 48 loss """softplus""" +852 48 regularizer """no""" +852 48 optimizer """adam""" +852 48 training_loop """owa""" +852 48 negative_sampler """basic""" +852 48 evaluator """rankbased""" +852 49 dataset """yago310""" +852 49 model """transe""" +852 49 loss """softplus""" +852 49 regularizer """no""" +852 49 optimizer """adam""" +852 49 training_loop """owa""" +852 49 negative_sampler """basic""" +852 49 evaluator """rankbased""" +852 50 dataset """yago310""" +852 50 model """transe""" +852 50 loss """softplus""" +852 50 regularizer """no""" +852 50 optimizer """adam""" +852 50 training_loop """owa""" +852 50 negative_sampler """basic""" +852 50 evaluator """rankbased""" +852 51 dataset """yago310""" +852 51 model """transe""" +852 51 loss """softplus""" +852 51 regularizer """no""" +852 51 optimizer """adam""" +852 51 training_loop """owa""" +852 51 negative_sampler """basic""" +852 51 evaluator """rankbased""" +852 52 dataset """yago310""" +852 52 model """transe""" +852 52 loss """softplus""" +852 52 regularizer """no""" +852 52 optimizer """adam""" +852 52 training_loop """owa""" +852 52 negative_sampler """basic""" +852 52 evaluator """rankbased""" +852 53 dataset """yago310""" +852 53 model """transe""" +852 53 loss """softplus""" +852 53 regularizer """no""" +852 53 optimizer """adam""" +852 53 training_loop """owa""" +852 53 negative_sampler """basic""" +852 53 evaluator """rankbased""" +852 54 dataset """yago310""" +852 54 model """transe""" +852 54 loss """softplus""" +852 54 regularizer """no""" +852 54 optimizer """adam""" +852 54 training_loop """owa""" +852 54 negative_sampler """basic""" +852 54 evaluator """rankbased""" +852 55 dataset """yago310""" +852 55 model """transe""" +852 55 loss """softplus""" +852 55 regularizer """no""" +852 55 optimizer """adam""" +852 55 training_loop """owa""" +852 55 negative_sampler """basic""" +852 55 evaluator """rankbased""" +852 56 dataset """yago310""" +852 56 model """transe""" +852 56 loss """softplus""" +852 56 regularizer """no""" +852 56 optimizer """adam""" +852 56 training_loop """owa""" +852 56 negative_sampler """basic""" +852 56 evaluator """rankbased""" +852 57 dataset """yago310""" +852 57 model """transe""" +852 57 loss """softplus""" +852 57 regularizer """no""" +852 57 optimizer """adam""" +852 57 training_loop """owa""" +852 57 negative_sampler """basic""" +852 57 evaluator """rankbased""" +852 58 dataset """yago310""" +852 58 model """transe""" +852 58 loss """softplus""" +852 58 regularizer """no""" +852 58 optimizer """adam""" +852 58 training_loop """owa""" +852 58 negative_sampler """basic""" +852 58 evaluator """rankbased""" +852 59 dataset """yago310""" +852 59 model """transe""" +852 59 loss """softplus""" +852 59 regularizer """no""" +852 59 optimizer """adam""" +852 59 training_loop """owa""" +852 59 negative_sampler """basic""" +852 59 evaluator """rankbased""" +852 60 dataset """yago310""" +852 60 model """transe""" +852 60 loss """softplus""" +852 60 regularizer """no""" +852 60 optimizer """adam""" +852 60 training_loop """owa""" +852 60 negative_sampler """basic""" +852 60 evaluator """rankbased""" +852 61 dataset """yago310""" +852 61 model """transe""" +852 61 loss """softplus""" +852 61 regularizer """no""" +852 61 optimizer """adam""" +852 61 training_loop """owa""" +852 61 negative_sampler """basic""" +852 61 evaluator """rankbased""" +852 62 dataset """yago310""" +852 62 model """transe""" +852 62 loss """softplus""" +852 62 regularizer """no""" +852 62 optimizer """adam""" +852 62 training_loop """owa""" +852 62 negative_sampler """basic""" +852 62 evaluator """rankbased""" +852 63 dataset """yago310""" +852 63 model """transe""" +852 63 loss """softplus""" +852 63 regularizer """no""" +852 63 optimizer """adam""" +852 63 training_loop """owa""" +852 63 negative_sampler """basic""" +852 63 evaluator """rankbased""" +852 64 dataset """yago310""" +852 64 model """transe""" +852 64 loss """softplus""" +852 64 regularizer """no""" +852 64 optimizer """adam""" +852 64 training_loop """owa""" +852 64 negative_sampler """basic""" +852 64 evaluator """rankbased""" +852 65 dataset """yago310""" +852 65 model """transe""" +852 65 loss """softplus""" +852 65 regularizer """no""" +852 65 optimizer """adam""" +852 65 training_loop """owa""" +852 65 negative_sampler """basic""" +852 65 evaluator """rankbased""" +852 66 dataset """yago310""" +852 66 model """transe""" +852 66 loss """softplus""" +852 66 regularizer """no""" +852 66 optimizer """adam""" +852 66 training_loop """owa""" +852 66 negative_sampler """basic""" +852 66 evaluator """rankbased""" +852 67 dataset """yago310""" +852 67 model """transe""" +852 67 loss """softplus""" +852 67 regularizer """no""" +852 67 optimizer """adam""" +852 67 training_loop """owa""" +852 67 negative_sampler """basic""" +852 67 evaluator """rankbased""" +852 68 dataset """yago310""" +852 68 model """transe""" +852 68 loss """softplus""" +852 68 regularizer """no""" +852 68 optimizer """adam""" +852 68 training_loop """owa""" +852 68 negative_sampler """basic""" +852 68 evaluator """rankbased""" +852 69 dataset """yago310""" +852 69 model """transe""" +852 69 loss """softplus""" +852 69 regularizer """no""" +852 69 optimizer """adam""" +852 69 training_loop """owa""" +852 69 negative_sampler """basic""" +852 69 evaluator """rankbased""" +852 70 dataset """yago310""" +852 70 model """transe""" +852 70 loss """softplus""" +852 70 regularizer """no""" +852 70 optimizer """adam""" +852 70 training_loop """owa""" +852 70 negative_sampler """basic""" +852 70 evaluator """rankbased""" +852 71 dataset """yago310""" +852 71 model """transe""" +852 71 loss """softplus""" +852 71 regularizer """no""" +852 71 optimizer """adam""" +852 71 training_loop """owa""" +852 71 negative_sampler """basic""" +852 71 evaluator """rankbased""" +852 72 dataset """yago310""" +852 72 model """transe""" +852 72 loss """softplus""" +852 72 regularizer """no""" +852 72 optimizer """adam""" +852 72 training_loop """owa""" +852 72 negative_sampler """basic""" +852 72 evaluator """rankbased""" +852 73 dataset """yago310""" +852 73 model """transe""" +852 73 loss """softplus""" +852 73 regularizer """no""" +852 73 optimizer """adam""" +852 73 training_loop """owa""" +852 73 negative_sampler """basic""" +852 73 evaluator """rankbased""" +852 74 dataset """yago310""" +852 74 model """transe""" +852 74 loss """softplus""" +852 74 regularizer """no""" +852 74 optimizer """adam""" +852 74 training_loop """owa""" +852 74 negative_sampler """basic""" +852 74 evaluator """rankbased""" +852 75 dataset """yago310""" +852 75 model """transe""" +852 75 loss """softplus""" +852 75 regularizer """no""" +852 75 optimizer """adam""" +852 75 training_loop """owa""" +852 75 negative_sampler """basic""" +852 75 evaluator """rankbased""" +853 1 model.embedding_dim 2.0 +853 1 model.scoring_fct_norm 2.0 +853 1 optimizer.lr 0.0035763361692312877 +853 1 negative_sampler.num_negs_per_pos 23.0 +853 1 training.batch_size 2.0 +853 2 model.embedding_dim 0.0 +853 2 model.scoring_fct_norm 1.0 +853 2 optimizer.lr 0.0029553324363198914 +853 2 negative_sampler.num_negs_per_pos 5.0 +853 2 training.batch_size 3.0 +853 3 model.embedding_dim 1.0 +853 3 model.scoring_fct_norm 2.0 +853 3 optimizer.lr 0.026191193219773822 +853 3 negative_sampler.num_negs_per_pos 32.0 +853 3 training.batch_size 2.0 +853 4 model.embedding_dim 0.0 +853 4 model.scoring_fct_norm 2.0 +853 4 optimizer.lr 0.005848135707286356 +853 4 negative_sampler.num_negs_per_pos 39.0 +853 4 training.batch_size 1.0 +853 5 model.embedding_dim 1.0 +853 5 model.scoring_fct_norm 2.0 +853 5 optimizer.lr 0.005372549128713282 +853 5 negative_sampler.num_negs_per_pos 6.0 +853 5 training.batch_size 3.0 +853 6 model.embedding_dim 0.0 +853 6 model.scoring_fct_norm 2.0 +853 6 optimizer.lr 0.045981742522213535 +853 6 negative_sampler.num_negs_per_pos 37.0 +853 6 training.batch_size 3.0 +853 7 model.embedding_dim 0.0 +853 7 model.scoring_fct_norm 2.0 +853 7 optimizer.lr 0.06641361790233813 +853 7 negative_sampler.num_negs_per_pos 10.0 +853 7 training.batch_size 0.0 +853 8 model.embedding_dim 2.0 +853 8 model.scoring_fct_norm 1.0 +853 8 optimizer.lr 0.006070552208375476 +853 8 negative_sampler.num_negs_per_pos 6.0 +853 8 training.batch_size 1.0 +853 9 model.embedding_dim 0.0 +853 9 model.scoring_fct_norm 2.0 +853 9 optimizer.lr 0.0010544255745273277 +853 9 negative_sampler.num_negs_per_pos 0.0 +853 9 training.batch_size 3.0 +853 10 model.embedding_dim 1.0 +853 10 model.scoring_fct_norm 1.0 +853 10 optimizer.lr 0.04707393083403744 +853 10 negative_sampler.num_negs_per_pos 49.0 +853 10 training.batch_size 3.0 +853 11 model.embedding_dim 0.0 +853 11 model.scoring_fct_norm 1.0 +853 11 optimizer.lr 0.026579061143446204 +853 11 negative_sampler.num_negs_per_pos 29.0 +853 11 training.batch_size 2.0 +853 12 model.embedding_dim 1.0 +853 12 model.scoring_fct_norm 2.0 +853 12 optimizer.lr 0.001386400829926998 +853 12 negative_sampler.num_negs_per_pos 25.0 +853 12 training.batch_size 3.0 +853 13 model.embedding_dim 0.0 +853 13 model.scoring_fct_norm 2.0 +853 13 optimizer.lr 0.05499207862715293 +853 13 negative_sampler.num_negs_per_pos 11.0 +853 13 training.batch_size 1.0 +853 14 model.embedding_dim 1.0 +853 14 model.scoring_fct_norm 2.0 +853 14 optimizer.lr 0.0028831881586503932 +853 14 negative_sampler.num_negs_per_pos 36.0 +853 14 training.batch_size 3.0 +853 15 model.embedding_dim 1.0 +853 15 model.scoring_fct_norm 2.0 +853 15 optimizer.lr 0.011040243281661411 +853 15 negative_sampler.num_negs_per_pos 49.0 +853 15 training.batch_size 0.0 +853 16 model.embedding_dim 1.0 +853 16 model.scoring_fct_norm 1.0 +853 16 optimizer.lr 0.021547064976007092 +853 16 negative_sampler.num_negs_per_pos 15.0 +853 16 training.batch_size 1.0 +853 17 model.embedding_dim 2.0 +853 17 model.scoring_fct_norm 2.0 +853 17 optimizer.lr 0.0019420911186595223 +853 17 negative_sampler.num_negs_per_pos 31.0 +853 17 training.batch_size 2.0 +853 18 model.embedding_dim 0.0 +853 18 model.scoring_fct_norm 1.0 +853 18 optimizer.lr 0.0060985171713389946 +853 18 negative_sampler.num_negs_per_pos 48.0 +853 18 training.batch_size 0.0 +853 19 model.embedding_dim 1.0 +853 19 model.scoring_fct_norm 1.0 +853 19 optimizer.lr 0.001412154844848352 +853 19 negative_sampler.num_negs_per_pos 6.0 +853 19 training.batch_size 2.0 +853 20 model.embedding_dim 0.0 +853 20 model.scoring_fct_norm 2.0 +853 20 optimizer.lr 0.029997827934652416 +853 20 negative_sampler.num_negs_per_pos 26.0 +853 20 training.batch_size 3.0 +853 21 model.embedding_dim 1.0 +853 21 model.scoring_fct_norm 2.0 +853 21 optimizer.lr 0.04466880264697546 +853 21 negative_sampler.num_negs_per_pos 22.0 +853 21 training.batch_size 1.0 +853 22 model.embedding_dim 0.0 +853 22 model.scoring_fct_norm 1.0 +853 22 optimizer.lr 0.0010966301537030747 +853 22 negative_sampler.num_negs_per_pos 24.0 +853 22 training.batch_size 0.0 +853 23 model.embedding_dim 2.0 +853 23 model.scoring_fct_norm 1.0 +853 23 optimizer.lr 0.004712914099218399 +853 23 negative_sampler.num_negs_per_pos 17.0 +853 23 training.batch_size 0.0 +853 24 model.embedding_dim 0.0 +853 24 model.scoring_fct_norm 2.0 +853 24 optimizer.lr 0.0014536899586660868 +853 24 negative_sampler.num_negs_per_pos 41.0 +853 24 training.batch_size 3.0 +853 25 model.embedding_dim 1.0 +853 25 model.scoring_fct_norm 2.0 +853 25 optimizer.lr 0.016157429423466044 +853 25 negative_sampler.num_negs_per_pos 18.0 +853 25 training.batch_size 3.0 +853 26 model.embedding_dim 0.0 +853 26 model.scoring_fct_norm 1.0 +853 26 optimizer.lr 0.0019442283595748559 +853 26 negative_sampler.num_negs_per_pos 19.0 +853 26 training.batch_size 3.0 +853 27 model.embedding_dim 1.0 +853 27 model.scoring_fct_norm 2.0 +853 27 optimizer.lr 0.0037197080094242157 +853 27 negative_sampler.num_negs_per_pos 14.0 +853 27 training.batch_size 3.0 +853 28 model.embedding_dim 2.0 +853 28 model.scoring_fct_norm 1.0 +853 28 optimizer.lr 0.01866603288734708 +853 28 negative_sampler.num_negs_per_pos 22.0 +853 28 training.batch_size 3.0 +853 29 model.embedding_dim 1.0 +853 29 model.scoring_fct_norm 1.0 +853 29 optimizer.lr 0.004426947226609597 +853 29 negative_sampler.num_negs_per_pos 43.0 +853 29 training.batch_size 2.0 +853 30 model.embedding_dim 1.0 +853 30 model.scoring_fct_norm 1.0 +853 30 optimizer.lr 0.004906931992294971 +853 30 negative_sampler.num_negs_per_pos 13.0 +853 30 training.batch_size 0.0 +853 31 model.embedding_dim 1.0 +853 31 model.scoring_fct_norm 1.0 +853 31 optimizer.lr 0.003080579610418227 +853 31 negative_sampler.num_negs_per_pos 47.0 +853 31 training.batch_size 2.0 +853 32 model.embedding_dim 2.0 +853 32 model.scoring_fct_norm 2.0 +853 32 optimizer.lr 0.006176514910491302 +853 32 negative_sampler.num_negs_per_pos 41.0 +853 32 training.batch_size 1.0 +853 33 model.embedding_dim 0.0 +853 33 model.scoring_fct_norm 2.0 +853 33 optimizer.lr 0.0016893397616208733 +853 33 negative_sampler.num_negs_per_pos 11.0 +853 33 training.batch_size 1.0 +853 34 model.embedding_dim 1.0 +853 34 model.scoring_fct_norm 1.0 +853 34 optimizer.lr 0.001043600105573911 +853 34 negative_sampler.num_negs_per_pos 22.0 +853 34 training.batch_size 2.0 +853 35 model.embedding_dim 1.0 +853 35 model.scoring_fct_norm 2.0 +853 35 optimizer.lr 0.044255106203473386 +853 35 negative_sampler.num_negs_per_pos 7.0 +853 35 training.batch_size 0.0 +853 36 model.embedding_dim 0.0 +853 36 model.scoring_fct_norm 2.0 +853 36 optimizer.lr 0.0601599021100722 +853 36 negative_sampler.num_negs_per_pos 19.0 +853 36 training.batch_size 2.0 +853 37 model.embedding_dim 2.0 +853 37 model.scoring_fct_norm 2.0 +853 37 optimizer.lr 0.057811122241056404 +853 37 negative_sampler.num_negs_per_pos 21.0 +853 37 training.batch_size 1.0 +853 38 model.embedding_dim 0.0 +853 38 model.scoring_fct_norm 2.0 +853 38 optimizer.lr 0.0017216450381166803 +853 38 negative_sampler.num_negs_per_pos 46.0 +853 38 training.batch_size 2.0 +853 39 model.embedding_dim 2.0 +853 39 model.scoring_fct_norm 1.0 +853 39 optimizer.lr 0.0016418660060698457 +853 39 negative_sampler.num_negs_per_pos 13.0 +853 39 training.batch_size 1.0 +853 40 model.embedding_dim 0.0 +853 40 model.scoring_fct_norm 2.0 +853 40 optimizer.lr 0.004414267968862394 +853 40 negative_sampler.num_negs_per_pos 15.0 +853 40 training.batch_size 2.0 +853 41 model.embedding_dim 1.0 +853 41 model.scoring_fct_norm 2.0 +853 41 optimizer.lr 0.002376134513836805 +853 41 negative_sampler.num_negs_per_pos 38.0 +853 41 training.batch_size 3.0 +853 42 model.embedding_dim 0.0 +853 42 model.scoring_fct_norm 2.0 +853 42 optimizer.lr 0.008368196327717393 +853 42 negative_sampler.num_negs_per_pos 24.0 +853 42 training.batch_size 2.0 +853 43 model.embedding_dim 1.0 +853 43 model.scoring_fct_norm 1.0 +853 43 optimizer.lr 0.005233582051894998 +853 43 negative_sampler.num_negs_per_pos 3.0 +853 43 training.batch_size 0.0 +853 44 model.embedding_dim 0.0 +853 44 model.scoring_fct_norm 2.0 +853 44 optimizer.lr 0.01911486578774442 +853 44 negative_sampler.num_negs_per_pos 29.0 +853 44 training.batch_size 1.0 +853 45 model.embedding_dim 1.0 +853 45 model.scoring_fct_norm 1.0 +853 45 optimizer.lr 0.0022078100402243305 +853 45 negative_sampler.num_negs_per_pos 14.0 +853 45 training.batch_size 1.0 +853 46 model.embedding_dim 0.0 +853 46 model.scoring_fct_norm 2.0 +853 46 optimizer.lr 0.02006629721732934 +853 46 negative_sampler.num_negs_per_pos 27.0 +853 46 training.batch_size 1.0 +853 47 model.embedding_dim 2.0 +853 47 model.scoring_fct_norm 2.0 +853 47 optimizer.lr 0.004095516348899365 +853 47 negative_sampler.num_negs_per_pos 8.0 +853 47 training.batch_size 2.0 +853 48 model.embedding_dim 0.0 +853 48 model.scoring_fct_norm 1.0 +853 48 optimizer.lr 0.024934282783489588 +853 48 negative_sampler.num_negs_per_pos 5.0 +853 48 training.batch_size 1.0 +853 49 model.embedding_dim 0.0 +853 49 model.scoring_fct_norm 2.0 +853 49 optimizer.lr 0.0014621452633507956 +853 49 negative_sampler.num_negs_per_pos 14.0 +853 49 training.batch_size 2.0 +853 50 model.embedding_dim 1.0 +853 50 model.scoring_fct_norm 1.0 +853 50 optimizer.lr 0.05178272092501965 +853 50 negative_sampler.num_negs_per_pos 2.0 +853 50 training.batch_size 3.0 +853 51 model.embedding_dim 2.0 +853 51 model.scoring_fct_norm 2.0 +853 51 optimizer.lr 0.013094742363987388 +853 51 negative_sampler.num_negs_per_pos 46.0 +853 51 training.batch_size 0.0 +853 1 dataset """yago310""" +853 1 model """transe""" +853 1 loss """bceaftersigmoid""" +853 1 regularizer """no""" +853 1 optimizer """adam""" +853 1 training_loop """owa""" +853 1 negative_sampler """basic""" +853 1 evaluator """rankbased""" +853 2 dataset """yago310""" +853 2 model """transe""" +853 2 loss """bceaftersigmoid""" +853 2 regularizer """no""" +853 2 optimizer """adam""" +853 2 training_loop """owa""" +853 2 negative_sampler """basic""" +853 2 evaluator """rankbased""" +853 3 dataset """yago310""" +853 3 model """transe""" +853 3 loss """bceaftersigmoid""" +853 3 regularizer """no""" +853 3 optimizer """adam""" +853 3 training_loop """owa""" +853 3 negative_sampler """basic""" +853 3 evaluator """rankbased""" +853 4 dataset """yago310""" +853 4 model """transe""" +853 4 loss """bceaftersigmoid""" +853 4 regularizer """no""" +853 4 optimizer """adam""" +853 4 training_loop """owa""" +853 4 negative_sampler """basic""" +853 4 evaluator """rankbased""" +853 5 dataset """yago310""" +853 5 model """transe""" +853 5 loss """bceaftersigmoid""" +853 5 regularizer """no""" +853 5 optimizer """adam""" +853 5 training_loop """owa""" +853 5 negative_sampler """basic""" +853 5 evaluator """rankbased""" +853 6 dataset """yago310""" +853 6 model """transe""" +853 6 loss """bceaftersigmoid""" +853 6 regularizer """no""" +853 6 optimizer """adam""" +853 6 training_loop """owa""" +853 6 negative_sampler """basic""" +853 6 evaluator """rankbased""" +853 7 dataset """yago310""" +853 7 model """transe""" +853 7 loss """bceaftersigmoid""" +853 7 regularizer """no""" +853 7 optimizer """adam""" +853 7 training_loop """owa""" +853 7 negative_sampler """basic""" +853 7 evaluator """rankbased""" +853 8 dataset """yago310""" +853 8 model """transe""" +853 8 loss """bceaftersigmoid""" +853 8 regularizer """no""" +853 8 optimizer """adam""" +853 8 training_loop """owa""" +853 8 negative_sampler """basic""" +853 8 evaluator """rankbased""" +853 9 dataset """yago310""" +853 9 model """transe""" +853 9 loss """bceaftersigmoid""" +853 9 regularizer """no""" +853 9 optimizer """adam""" +853 9 training_loop """owa""" +853 9 negative_sampler """basic""" +853 9 evaluator """rankbased""" +853 10 dataset """yago310""" +853 10 model """transe""" +853 10 loss """bceaftersigmoid""" +853 10 regularizer """no""" +853 10 optimizer """adam""" +853 10 training_loop """owa""" +853 10 negative_sampler """basic""" +853 10 evaluator """rankbased""" +853 11 dataset """yago310""" +853 11 model """transe""" +853 11 loss """bceaftersigmoid""" +853 11 regularizer """no""" +853 11 optimizer """adam""" +853 11 training_loop """owa""" +853 11 negative_sampler """basic""" +853 11 evaluator """rankbased""" +853 12 dataset """yago310""" +853 12 model """transe""" +853 12 loss """bceaftersigmoid""" +853 12 regularizer """no""" +853 12 optimizer """adam""" +853 12 training_loop """owa""" +853 12 negative_sampler """basic""" +853 12 evaluator """rankbased""" +853 13 dataset """yago310""" +853 13 model """transe""" +853 13 loss """bceaftersigmoid""" +853 13 regularizer """no""" +853 13 optimizer """adam""" +853 13 training_loop """owa""" +853 13 negative_sampler """basic""" +853 13 evaluator """rankbased""" +853 14 dataset """yago310""" +853 14 model """transe""" +853 14 loss """bceaftersigmoid""" +853 14 regularizer """no""" +853 14 optimizer """adam""" +853 14 training_loop """owa""" +853 14 negative_sampler """basic""" +853 14 evaluator """rankbased""" +853 15 dataset """yago310""" +853 15 model """transe""" +853 15 loss """bceaftersigmoid""" +853 15 regularizer """no""" +853 15 optimizer """adam""" +853 15 training_loop """owa""" +853 15 negative_sampler """basic""" +853 15 evaluator """rankbased""" +853 16 dataset """yago310""" +853 16 model """transe""" +853 16 loss """bceaftersigmoid""" +853 16 regularizer """no""" +853 16 optimizer """adam""" +853 16 training_loop """owa""" +853 16 negative_sampler """basic""" +853 16 evaluator """rankbased""" +853 17 dataset """yago310""" +853 17 model """transe""" +853 17 loss """bceaftersigmoid""" +853 17 regularizer """no""" +853 17 optimizer """adam""" +853 17 training_loop """owa""" +853 17 negative_sampler """basic""" +853 17 evaluator """rankbased""" +853 18 dataset """yago310""" +853 18 model """transe""" +853 18 loss """bceaftersigmoid""" +853 18 regularizer """no""" +853 18 optimizer """adam""" +853 18 training_loop """owa""" +853 18 negative_sampler """basic""" +853 18 evaluator """rankbased""" +853 19 dataset """yago310""" +853 19 model """transe""" +853 19 loss """bceaftersigmoid""" +853 19 regularizer """no""" +853 19 optimizer """adam""" +853 19 training_loop """owa""" +853 19 negative_sampler """basic""" +853 19 evaluator """rankbased""" +853 20 dataset """yago310""" +853 20 model """transe""" +853 20 loss """bceaftersigmoid""" +853 20 regularizer """no""" +853 20 optimizer """adam""" +853 20 training_loop """owa""" +853 20 negative_sampler """basic""" +853 20 evaluator """rankbased""" +853 21 dataset """yago310""" +853 21 model """transe""" +853 21 loss """bceaftersigmoid""" +853 21 regularizer """no""" +853 21 optimizer """adam""" +853 21 training_loop """owa""" +853 21 negative_sampler """basic""" +853 21 evaluator """rankbased""" +853 22 dataset """yago310""" +853 22 model """transe""" +853 22 loss """bceaftersigmoid""" +853 22 regularizer """no""" +853 22 optimizer """adam""" +853 22 training_loop """owa""" +853 22 negative_sampler """basic""" +853 22 evaluator """rankbased""" +853 23 dataset """yago310""" +853 23 model """transe""" +853 23 loss """bceaftersigmoid""" +853 23 regularizer """no""" +853 23 optimizer """adam""" +853 23 training_loop """owa""" +853 23 negative_sampler """basic""" +853 23 evaluator """rankbased""" +853 24 dataset """yago310""" +853 24 model """transe""" +853 24 loss """bceaftersigmoid""" +853 24 regularizer """no""" +853 24 optimizer """adam""" +853 24 training_loop """owa""" +853 24 negative_sampler """basic""" +853 24 evaluator """rankbased""" +853 25 dataset """yago310""" +853 25 model """transe""" +853 25 loss """bceaftersigmoid""" +853 25 regularizer """no""" +853 25 optimizer """adam""" +853 25 training_loop """owa""" +853 25 negative_sampler """basic""" +853 25 evaluator """rankbased""" +853 26 dataset """yago310""" +853 26 model """transe""" +853 26 loss """bceaftersigmoid""" +853 26 regularizer """no""" +853 26 optimizer """adam""" +853 26 training_loop """owa""" +853 26 negative_sampler """basic""" +853 26 evaluator """rankbased""" +853 27 dataset """yago310""" +853 27 model """transe""" +853 27 loss """bceaftersigmoid""" +853 27 regularizer """no""" +853 27 optimizer """adam""" +853 27 training_loop """owa""" +853 27 negative_sampler """basic""" +853 27 evaluator """rankbased""" +853 28 dataset """yago310""" +853 28 model """transe""" +853 28 loss """bceaftersigmoid""" +853 28 regularizer """no""" +853 28 optimizer """adam""" +853 28 training_loop """owa""" +853 28 negative_sampler """basic""" +853 28 evaluator """rankbased""" +853 29 dataset """yago310""" +853 29 model """transe""" +853 29 loss """bceaftersigmoid""" +853 29 regularizer """no""" +853 29 optimizer """adam""" +853 29 training_loop """owa""" +853 29 negative_sampler """basic""" +853 29 evaluator """rankbased""" +853 30 dataset """yago310""" +853 30 model """transe""" +853 30 loss """bceaftersigmoid""" +853 30 regularizer """no""" +853 30 optimizer """adam""" +853 30 training_loop """owa""" +853 30 negative_sampler """basic""" +853 30 evaluator """rankbased""" +853 31 dataset """yago310""" +853 31 model """transe""" +853 31 loss """bceaftersigmoid""" +853 31 regularizer """no""" +853 31 optimizer """adam""" +853 31 training_loop """owa""" +853 31 negative_sampler """basic""" +853 31 evaluator """rankbased""" +853 32 dataset """yago310""" +853 32 model """transe""" +853 32 loss """bceaftersigmoid""" +853 32 regularizer """no""" +853 32 optimizer """adam""" +853 32 training_loop """owa""" +853 32 negative_sampler """basic""" +853 32 evaluator """rankbased""" +853 33 dataset """yago310""" +853 33 model """transe""" +853 33 loss """bceaftersigmoid""" +853 33 regularizer """no""" +853 33 optimizer """adam""" +853 33 training_loop """owa""" +853 33 negative_sampler """basic""" +853 33 evaluator """rankbased""" +853 34 dataset """yago310""" +853 34 model """transe""" +853 34 loss """bceaftersigmoid""" +853 34 regularizer """no""" +853 34 optimizer """adam""" +853 34 training_loop """owa""" +853 34 negative_sampler """basic""" +853 34 evaluator """rankbased""" +853 35 dataset """yago310""" +853 35 model """transe""" +853 35 loss """bceaftersigmoid""" +853 35 regularizer """no""" +853 35 optimizer """adam""" +853 35 training_loop """owa""" +853 35 negative_sampler """basic""" +853 35 evaluator """rankbased""" +853 36 dataset """yago310""" +853 36 model """transe""" +853 36 loss """bceaftersigmoid""" +853 36 regularizer """no""" +853 36 optimizer """adam""" +853 36 training_loop """owa""" +853 36 negative_sampler """basic""" +853 36 evaluator """rankbased""" +853 37 dataset """yago310""" +853 37 model """transe""" +853 37 loss """bceaftersigmoid""" +853 37 regularizer """no""" +853 37 optimizer """adam""" +853 37 training_loop """owa""" +853 37 negative_sampler """basic""" +853 37 evaluator """rankbased""" +853 38 dataset """yago310""" +853 38 model """transe""" +853 38 loss """bceaftersigmoid""" +853 38 regularizer """no""" +853 38 optimizer """adam""" +853 38 training_loop """owa""" +853 38 negative_sampler """basic""" +853 38 evaluator """rankbased""" +853 39 dataset """yago310""" +853 39 model """transe""" +853 39 loss """bceaftersigmoid""" +853 39 regularizer """no""" +853 39 optimizer """adam""" +853 39 training_loop """owa""" +853 39 negative_sampler """basic""" +853 39 evaluator """rankbased""" +853 40 dataset """yago310""" +853 40 model """transe""" +853 40 loss """bceaftersigmoid""" +853 40 regularizer """no""" +853 40 optimizer """adam""" +853 40 training_loop """owa""" +853 40 negative_sampler """basic""" +853 40 evaluator """rankbased""" +853 41 dataset """yago310""" +853 41 model """transe""" +853 41 loss """bceaftersigmoid""" +853 41 regularizer """no""" +853 41 optimizer """adam""" +853 41 training_loop """owa""" +853 41 negative_sampler """basic""" +853 41 evaluator """rankbased""" +853 42 dataset """yago310""" +853 42 model """transe""" +853 42 loss """bceaftersigmoid""" +853 42 regularizer """no""" +853 42 optimizer """adam""" +853 42 training_loop """owa""" +853 42 negative_sampler """basic""" +853 42 evaluator """rankbased""" +853 43 dataset """yago310""" +853 43 model """transe""" +853 43 loss """bceaftersigmoid""" +853 43 regularizer """no""" +853 43 optimizer """adam""" +853 43 training_loop """owa""" +853 43 negative_sampler """basic""" +853 43 evaluator """rankbased""" +853 44 dataset """yago310""" +853 44 model """transe""" +853 44 loss """bceaftersigmoid""" +853 44 regularizer """no""" +853 44 optimizer """adam""" +853 44 training_loop """owa""" +853 44 negative_sampler """basic""" +853 44 evaluator """rankbased""" +853 45 dataset """yago310""" +853 45 model """transe""" +853 45 loss """bceaftersigmoid""" +853 45 regularizer """no""" +853 45 optimizer """adam""" +853 45 training_loop """owa""" +853 45 negative_sampler """basic""" +853 45 evaluator """rankbased""" +853 46 dataset """yago310""" +853 46 model """transe""" +853 46 loss """bceaftersigmoid""" +853 46 regularizer """no""" +853 46 optimizer """adam""" +853 46 training_loop """owa""" +853 46 negative_sampler """basic""" +853 46 evaluator """rankbased""" +853 47 dataset """yago310""" +853 47 model """transe""" +853 47 loss """bceaftersigmoid""" +853 47 regularizer """no""" +853 47 optimizer """adam""" +853 47 training_loop """owa""" +853 47 negative_sampler """basic""" +853 47 evaluator """rankbased""" +853 48 dataset """yago310""" +853 48 model """transe""" +853 48 loss """bceaftersigmoid""" +853 48 regularizer """no""" +853 48 optimizer """adam""" +853 48 training_loop """owa""" +853 48 negative_sampler """basic""" +853 48 evaluator """rankbased""" +853 49 dataset """yago310""" +853 49 model """transe""" +853 49 loss """bceaftersigmoid""" +853 49 regularizer """no""" +853 49 optimizer """adam""" +853 49 training_loop """owa""" +853 49 negative_sampler """basic""" +853 49 evaluator """rankbased""" +853 50 dataset """yago310""" +853 50 model """transe""" +853 50 loss """bceaftersigmoid""" +853 50 regularizer """no""" +853 50 optimizer """adam""" +853 50 training_loop """owa""" +853 50 negative_sampler """basic""" +853 50 evaluator """rankbased""" +853 51 dataset """yago310""" +853 51 model """transe""" +853 51 loss """bceaftersigmoid""" +853 51 regularizer """no""" +853 51 optimizer """adam""" +853 51 training_loop """owa""" +853 51 negative_sampler """basic""" +853 51 evaluator """rankbased""" +854 1 model.embedding_dim 0.0 +854 1 model.scoring_fct_norm 1.0 +854 1 optimizer.lr 0.009191676395736655 +854 1 negative_sampler.num_negs_per_pos 21.0 +854 1 training.batch_size 0.0 +854 2 model.embedding_dim 2.0 +854 2 model.scoring_fct_norm 2.0 +854 2 optimizer.lr 0.03423846213088298 +854 2 negative_sampler.num_negs_per_pos 35.0 +854 2 training.batch_size 2.0 +854 3 model.embedding_dim 2.0 +854 3 model.scoring_fct_norm 1.0 +854 3 optimizer.lr 0.006232444960406748 +854 3 negative_sampler.num_negs_per_pos 38.0 +854 3 training.batch_size 0.0 +854 4 model.embedding_dim 0.0 +854 4 model.scoring_fct_norm 1.0 +854 4 optimizer.lr 0.0015907341128370593 +854 4 negative_sampler.num_negs_per_pos 3.0 +854 4 training.batch_size 3.0 +854 5 model.embedding_dim 1.0 +854 5 model.scoring_fct_norm 2.0 +854 5 optimizer.lr 0.02431813482686974 +854 5 negative_sampler.num_negs_per_pos 49.0 +854 5 training.batch_size 1.0 +854 6 model.embedding_dim 2.0 +854 6 model.scoring_fct_norm 1.0 +854 6 optimizer.lr 0.019780168759091735 +854 6 negative_sampler.num_negs_per_pos 4.0 +854 6 training.batch_size 0.0 +854 7 model.embedding_dim 2.0 +854 7 model.scoring_fct_norm 1.0 +854 7 optimizer.lr 0.009666829047058198 +854 7 negative_sampler.num_negs_per_pos 32.0 +854 7 training.batch_size 0.0 +854 8 model.embedding_dim 1.0 +854 8 model.scoring_fct_norm 2.0 +854 8 optimizer.lr 0.02302862813367195 +854 8 negative_sampler.num_negs_per_pos 23.0 +854 8 training.batch_size 1.0 +854 9 model.embedding_dim 2.0 +854 9 model.scoring_fct_norm 1.0 +854 9 optimizer.lr 0.002683099837481165 +854 9 negative_sampler.num_negs_per_pos 38.0 +854 9 training.batch_size 0.0 +854 10 model.embedding_dim 2.0 +854 10 model.scoring_fct_norm 1.0 +854 10 optimizer.lr 0.05548816638512798 +854 10 negative_sampler.num_negs_per_pos 0.0 +854 10 training.batch_size 1.0 +854 11 model.embedding_dim 1.0 +854 11 model.scoring_fct_norm 2.0 +854 11 optimizer.lr 0.005603213208187267 +854 11 negative_sampler.num_negs_per_pos 37.0 +854 11 training.batch_size 3.0 +854 12 model.embedding_dim 2.0 +854 12 model.scoring_fct_norm 1.0 +854 12 optimizer.lr 0.004451844889910041 +854 12 negative_sampler.num_negs_per_pos 36.0 +854 12 training.batch_size 2.0 +854 13 model.embedding_dim 1.0 +854 13 model.scoring_fct_norm 2.0 +854 13 optimizer.lr 0.03581048742858742 +854 13 negative_sampler.num_negs_per_pos 23.0 +854 13 training.batch_size 3.0 +854 14 model.embedding_dim 2.0 +854 14 model.scoring_fct_norm 1.0 +854 14 optimizer.lr 0.04858754208338453 +854 14 negative_sampler.num_negs_per_pos 12.0 +854 14 training.batch_size 3.0 +854 15 model.embedding_dim 2.0 +854 15 model.scoring_fct_norm 2.0 +854 15 optimizer.lr 0.0012834888854845702 +854 15 negative_sampler.num_negs_per_pos 32.0 +854 15 training.batch_size 3.0 +854 16 model.embedding_dim 1.0 +854 16 model.scoring_fct_norm 1.0 +854 16 optimizer.lr 0.0017429613228299608 +854 16 negative_sampler.num_negs_per_pos 40.0 +854 16 training.batch_size 0.0 +854 17 model.embedding_dim 0.0 +854 17 model.scoring_fct_norm 1.0 +854 17 optimizer.lr 0.01667738210830024 +854 17 negative_sampler.num_negs_per_pos 41.0 +854 17 training.batch_size 0.0 +854 18 model.embedding_dim 2.0 +854 18 model.scoring_fct_norm 1.0 +854 18 optimizer.lr 0.001945390701655499 +854 18 negative_sampler.num_negs_per_pos 48.0 +854 18 training.batch_size 0.0 +854 19 model.embedding_dim 2.0 +854 19 model.scoring_fct_norm 1.0 +854 19 optimizer.lr 0.0023556431714156314 +854 19 negative_sampler.num_negs_per_pos 48.0 +854 19 training.batch_size 1.0 +854 20 model.embedding_dim 2.0 +854 20 model.scoring_fct_norm 1.0 +854 20 optimizer.lr 0.012434593164659552 +854 20 negative_sampler.num_negs_per_pos 42.0 +854 20 training.batch_size 3.0 +854 21 model.embedding_dim 2.0 +854 21 model.scoring_fct_norm 2.0 +854 21 optimizer.lr 0.01107043771945726 +854 21 negative_sampler.num_negs_per_pos 21.0 +854 21 training.batch_size 2.0 +854 22 model.embedding_dim 1.0 +854 22 model.scoring_fct_norm 1.0 +854 22 optimizer.lr 0.05788521713327689 +854 22 negative_sampler.num_negs_per_pos 16.0 +854 22 training.batch_size 3.0 +854 23 model.embedding_dim 2.0 +854 23 model.scoring_fct_norm 2.0 +854 23 optimizer.lr 0.018987642467549776 +854 23 negative_sampler.num_negs_per_pos 26.0 +854 23 training.batch_size 1.0 +854 24 model.embedding_dim 1.0 +854 24 model.scoring_fct_norm 2.0 +854 24 optimizer.lr 0.021482348902508306 +854 24 negative_sampler.num_negs_per_pos 5.0 +854 24 training.batch_size 0.0 +854 25 model.embedding_dim 2.0 +854 25 model.scoring_fct_norm 2.0 +854 25 optimizer.lr 0.0013132085466224575 +854 25 negative_sampler.num_negs_per_pos 23.0 +854 25 training.batch_size 0.0 +854 26 model.embedding_dim 0.0 +854 26 model.scoring_fct_norm 1.0 +854 26 optimizer.lr 0.0012727441259275912 +854 26 negative_sampler.num_negs_per_pos 9.0 +854 26 training.batch_size 1.0 +854 27 model.embedding_dim 2.0 +854 27 model.scoring_fct_norm 2.0 +854 27 optimizer.lr 0.09437688391097852 +854 27 negative_sampler.num_negs_per_pos 15.0 +854 27 training.batch_size 3.0 +854 28 model.embedding_dim 1.0 +854 28 model.scoring_fct_norm 2.0 +854 28 optimizer.lr 0.07260121651873616 +854 28 negative_sampler.num_negs_per_pos 35.0 +854 28 training.batch_size 0.0 +854 29 model.embedding_dim 2.0 +854 29 model.scoring_fct_norm 2.0 +854 29 optimizer.lr 0.0026087388670192344 +854 29 negative_sampler.num_negs_per_pos 34.0 +854 29 training.batch_size 1.0 +854 30 model.embedding_dim 2.0 +854 30 model.scoring_fct_norm 1.0 +854 30 optimizer.lr 0.008731292519338688 +854 30 negative_sampler.num_negs_per_pos 32.0 +854 30 training.batch_size 0.0 +854 31 model.embedding_dim 0.0 +854 31 model.scoring_fct_norm 1.0 +854 31 optimizer.lr 0.006538903217668805 +854 31 negative_sampler.num_negs_per_pos 26.0 +854 31 training.batch_size 0.0 +854 32 model.embedding_dim 1.0 +854 32 model.scoring_fct_norm 1.0 +854 32 optimizer.lr 0.0034767818992958656 +854 32 negative_sampler.num_negs_per_pos 45.0 +854 32 training.batch_size 0.0 +854 33 model.embedding_dim 0.0 +854 33 model.scoring_fct_norm 2.0 +854 33 optimizer.lr 0.029669650852289918 +854 33 negative_sampler.num_negs_per_pos 38.0 +854 33 training.batch_size 1.0 +854 34 model.embedding_dim 2.0 +854 34 model.scoring_fct_norm 1.0 +854 34 optimizer.lr 0.02405317257071634 +854 34 negative_sampler.num_negs_per_pos 19.0 +854 34 training.batch_size 2.0 +854 35 model.embedding_dim 1.0 +854 35 model.scoring_fct_norm 2.0 +854 35 optimizer.lr 0.002908057201872914 +854 35 negative_sampler.num_negs_per_pos 43.0 +854 35 training.batch_size 3.0 +854 36 model.embedding_dim 1.0 +854 36 model.scoring_fct_norm 2.0 +854 36 optimizer.lr 0.025669801110427996 +854 36 negative_sampler.num_negs_per_pos 0.0 +854 36 training.batch_size 1.0 +854 37 model.embedding_dim 1.0 +854 37 model.scoring_fct_norm 1.0 +854 37 optimizer.lr 0.034956493247753 +854 37 negative_sampler.num_negs_per_pos 5.0 +854 37 training.batch_size 1.0 +854 38 model.embedding_dim 1.0 +854 38 model.scoring_fct_norm 1.0 +854 38 optimizer.lr 0.08397889902244637 +854 38 negative_sampler.num_negs_per_pos 10.0 +854 38 training.batch_size 1.0 +854 39 model.embedding_dim 0.0 +854 39 model.scoring_fct_norm 1.0 +854 39 optimizer.lr 0.09649922497046948 +854 39 negative_sampler.num_negs_per_pos 41.0 +854 39 training.batch_size 0.0 +854 40 model.embedding_dim 0.0 +854 40 model.scoring_fct_norm 1.0 +854 40 optimizer.lr 0.07536920068816111 +854 40 negative_sampler.num_negs_per_pos 22.0 +854 40 training.batch_size 1.0 +854 41 model.embedding_dim 0.0 +854 41 model.scoring_fct_norm 1.0 +854 41 optimizer.lr 0.002216935720827088 +854 41 negative_sampler.num_negs_per_pos 20.0 +854 41 training.batch_size 2.0 +854 42 model.embedding_dim 1.0 +854 42 model.scoring_fct_norm 1.0 +854 42 optimizer.lr 0.002182847078775276 +854 42 negative_sampler.num_negs_per_pos 40.0 +854 42 training.batch_size 0.0 +854 43 model.embedding_dim 1.0 +854 43 model.scoring_fct_norm 1.0 +854 43 optimizer.lr 0.015385014343720303 +854 43 negative_sampler.num_negs_per_pos 1.0 +854 43 training.batch_size 2.0 +854 44 model.embedding_dim 2.0 +854 44 model.scoring_fct_norm 1.0 +854 44 optimizer.lr 0.0013763371034567551 +854 44 negative_sampler.num_negs_per_pos 22.0 +854 44 training.batch_size 0.0 +854 45 model.embedding_dim 1.0 +854 45 model.scoring_fct_norm 1.0 +854 45 optimizer.lr 0.009771544776867234 +854 45 negative_sampler.num_negs_per_pos 31.0 +854 45 training.batch_size 2.0 +854 46 model.embedding_dim 2.0 +854 46 model.scoring_fct_norm 1.0 +854 46 optimizer.lr 0.004472751889101309 +854 46 negative_sampler.num_negs_per_pos 11.0 +854 46 training.batch_size 1.0 +854 47 model.embedding_dim 2.0 +854 47 model.scoring_fct_norm 2.0 +854 47 optimizer.lr 0.0011406025157011097 +854 47 negative_sampler.num_negs_per_pos 44.0 +854 47 training.batch_size 1.0 +854 48 model.embedding_dim 2.0 +854 48 model.scoring_fct_norm 1.0 +854 48 optimizer.lr 0.004601626907993296 +854 48 negative_sampler.num_negs_per_pos 34.0 +854 48 training.batch_size 2.0 +854 49 model.embedding_dim 1.0 +854 49 model.scoring_fct_norm 2.0 +854 49 optimizer.lr 0.007417061837981143 +854 49 negative_sampler.num_negs_per_pos 9.0 +854 49 training.batch_size 1.0 +854 50 model.embedding_dim 1.0 +854 50 model.scoring_fct_norm 1.0 +854 50 optimizer.lr 0.026856183960075553 +854 50 negative_sampler.num_negs_per_pos 47.0 +854 50 training.batch_size 2.0 +854 51 model.embedding_dim 2.0 +854 51 model.scoring_fct_norm 2.0 +854 51 optimizer.lr 0.004220074780610927 +854 51 negative_sampler.num_negs_per_pos 15.0 +854 51 training.batch_size 2.0 +854 52 model.embedding_dim 0.0 +854 52 model.scoring_fct_norm 2.0 +854 52 optimizer.lr 0.006137168977756311 +854 52 negative_sampler.num_negs_per_pos 25.0 +854 52 training.batch_size 3.0 +854 53 model.embedding_dim 0.0 +854 53 model.scoring_fct_norm 2.0 +854 53 optimizer.lr 0.0309277539447034 +854 53 negative_sampler.num_negs_per_pos 4.0 +854 53 training.batch_size 3.0 +854 54 model.embedding_dim 0.0 +854 54 model.scoring_fct_norm 1.0 +854 54 optimizer.lr 0.006774688418111782 +854 54 negative_sampler.num_negs_per_pos 1.0 +854 54 training.batch_size 3.0 +854 55 model.embedding_dim 0.0 +854 55 model.scoring_fct_norm 1.0 +854 55 optimizer.lr 0.017236312256247964 +854 55 negative_sampler.num_negs_per_pos 32.0 +854 55 training.batch_size 2.0 +854 56 model.embedding_dim 0.0 +854 56 model.scoring_fct_norm 1.0 +854 56 optimizer.lr 0.0023033907696780737 +854 56 negative_sampler.num_negs_per_pos 14.0 +854 56 training.batch_size 2.0 +854 57 model.embedding_dim 1.0 +854 57 model.scoring_fct_norm 2.0 +854 57 optimizer.lr 0.00526545502537305 +854 57 negative_sampler.num_negs_per_pos 12.0 +854 57 training.batch_size 2.0 +854 58 model.embedding_dim 2.0 +854 58 model.scoring_fct_norm 2.0 +854 58 optimizer.lr 0.011201516037268735 +854 58 negative_sampler.num_negs_per_pos 32.0 +854 58 training.batch_size 0.0 +854 59 model.embedding_dim 1.0 +854 59 model.scoring_fct_norm 2.0 +854 59 optimizer.lr 0.001225095995118682 +854 59 negative_sampler.num_negs_per_pos 20.0 +854 59 training.batch_size 2.0 +854 60 model.embedding_dim 0.0 +854 60 model.scoring_fct_norm 1.0 +854 60 optimizer.lr 0.02144609152793345 +854 60 negative_sampler.num_negs_per_pos 20.0 +854 60 training.batch_size 3.0 +854 61 model.embedding_dim 1.0 +854 61 model.scoring_fct_norm 1.0 +854 61 optimizer.lr 0.00536569784263242 +854 61 negative_sampler.num_negs_per_pos 46.0 +854 61 training.batch_size 1.0 +854 62 model.embedding_dim 2.0 +854 62 model.scoring_fct_norm 1.0 +854 62 optimizer.lr 0.022605188795627916 +854 62 negative_sampler.num_negs_per_pos 40.0 +854 62 training.batch_size 1.0 +854 63 model.embedding_dim 2.0 +854 63 model.scoring_fct_norm 2.0 +854 63 optimizer.lr 0.02239040257966339 +854 63 negative_sampler.num_negs_per_pos 46.0 +854 63 training.batch_size 3.0 +854 64 model.embedding_dim 0.0 +854 64 model.scoring_fct_norm 2.0 +854 64 optimizer.lr 0.014889802216089577 +854 64 negative_sampler.num_negs_per_pos 16.0 +854 64 training.batch_size 2.0 +854 65 model.embedding_dim 2.0 +854 65 model.scoring_fct_norm 2.0 +854 65 optimizer.lr 0.043592062482552214 +854 65 negative_sampler.num_negs_per_pos 42.0 +854 65 training.batch_size 3.0 +854 66 model.embedding_dim 1.0 +854 66 model.scoring_fct_norm 2.0 +854 66 optimizer.lr 0.001054340997678809 +854 66 negative_sampler.num_negs_per_pos 4.0 +854 66 training.batch_size 0.0 +854 67 model.embedding_dim 2.0 +854 67 model.scoring_fct_norm 1.0 +854 67 optimizer.lr 0.028947420787038734 +854 67 negative_sampler.num_negs_per_pos 24.0 +854 67 training.batch_size 2.0 +854 68 model.embedding_dim 0.0 +854 68 model.scoring_fct_norm 2.0 +854 68 optimizer.lr 0.013250016925213451 +854 68 negative_sampler.num_negs_per_pos 7.0 +854 68 training.batch_size 0.0 +854 69 model.embedding_dim 0.0 +854 69 model.scoring_fct_norm 1.0 +854 69 optimizer.lr 0.024880264797222638 +854 69 negative_sampler.num_negs_per_pos 18.0 +854 69 training.batch_size 0.0 +854 70 model.embedding_dim 1.0 +854 70 model.scoring_fct_norm 2.0 +854 70 optimizer.lr 0.027899272519049334 +854 70 negative_sampler.num_negs_per_pos 27.0 +854 70 training.batch_size 2.0 +854 71 model.embedding_dim 0.0 +854 71 model.scoring_fct_norm 1.0 +854 71 optimizer.lr 0.005370855987710215 +854 71 negative_sampler.num_negs_per_pos 37.0 +854 71 training.batch_size 2.0 +854 72 model.embedding_dim 1.0 +854 72 model.scoring_fct_norm 1.0 +854 72 optimizer.lr 0.03866861583304049 +854 72 negative_sampler.num_negs_per_pos 40.0 +854 72 training.batch_size 3.0 +854 73 model.embedding_dim 0.0 +854 73 model.scoring_fct_norm 2.0 +854 73 optimizer.lr 0.008997848804503277 +854 73 negative_sampler.num_negs_per_pos 12.0 +854 73 training.batch_size 0.0 +854 74 model.embedding_dim 0.0 +854 74 model.scoring_fct_norm 1.0 +854 74 optimizer.lr 0.008544136945360497 +854 74 negative_sampler.num_negs_per_pos 21.0 +854 74 training.batch_size 2.0 +854 75 model.embedding_dim 1.0 +854 75 model.scoring_fct_norm 1.0 +854 75 optimizer.lr 0.003823780240680582 +854 75 negative_sampler.num_negs_per_pos 8.0 +854 75 training.batch_size 0.0 +854 76 model.embedding_dim 2.0 +854 76 model.scoring_fct_norm 1.0 +854 76 optimizer.lr 0.004727760663494246 +854 76 negative_sampler.num_negs_per_pos 21.0 +854 76 training.batch_size 2.0 +854 1 dataset """yago310""" +854 1 model """transe""" +854 1 loss """bceaftersigmoid""" +854 1 regularizer """no""" +854 1 optimizer """adam""" +854 1 training_loop """owa""" +854 1 negative_sampler """basic""" +854 1 evaluator """rankbased""" +854 2 dataset """yago310""" +854 2 model """transe""" +854 2 loss """bceaftersigmoid""" +854 2 regularizer """no""" +854 2 optimizer """adam""" +854 2 training_loop """owa""" +854 2 negative_sampler """basic""" +854 2 evaluator """rankbased""" +854 3 dataset """yago310""" +854 3 model """transe""" +854 3 loss """bceaftersigmoid""" +854 3 regularizer """no""" +854 3 optimizer """adam""" +854 3 training_loop """owa""" +854 3 negative_sampler """basic""" +854 3 evaluator """rankbased""" +854 4 dataset """yago310""" +854 4 model """transe""" +854 4 loss """bceaftersigmoid""" +854 4 regularizer """no""" +854 4 optimizer """adam""" +854 4 training_loop """owa""" +854 4 negative_sampler """basic""" +854 4 evaluator """rankbased""" +854 5 dataset """yago310""" +854 5 model """transe""" +854 5 loss """bceaftersigmoid""" +854 5 regularizer """no""" +854 5 optimizer """adam""" +854 5 training_loop """owa""" +854 5 negative_sampler """basic""" +854 5 evaluator """rankbased""" +854 6 dataset """yago310""" +854 6 model """transe""" +854 6 loss """bceaftersigmoid""" +854 6 regularizer """no""" +854 6 optimizer """adam""" +854 6 training_loop """owa""" +854 6 negative_sampler """basic""" +854 6 evaluator """rankbased""" +854 7 dataset """yago310""" +854 7 model """transe""" +854 7 loss """bceaftersigmoid""" +854 7 regularizer """no""" +854 7 optimizer """adam""" +854 7 training_loop """owa""" +854 7 negative_sampler """basic""" +854 7 evaluator """rankbased""" +854 8 dataset """yago310""" +854 8 model """transe""" +854 8 loss """bceaftersigmoid""" +854 8 regularizer """no""" +854 8 optimizer """adam""" +854 8 training_loop """owa""" +854 8 negative_sampler """basic""" +854 8 evaluator """rankbased""" +854 9 dataset """yago310""" +854 9 model """transe""" +854 9 loss """bceaftersigmoid""" +854 9 regularizer """no""" +854 9 optimizer """adam""" +854 9 training_loop """owa""" +854 9 negative_sampler """basic""" +854 9 evaluator """rankbased""" +854 10 dataset """yago310""" +854 10 model """transe""" +854 10 loss """bceaftersigmoid""" +854 10 regularizer """no""" +854 10 optimizer """adam""" +854 10 training_loop """owa""" +854 10 negative_sampler """basic""" +854 10 evaluator """rankbased""" +854 11 dataset """yago310""" +854 11 model """transe""" +854 11 loss """bceaftersigmoid""" +854 11 regularizer """no""" +854 11 optimizer """adam""" +854 11 training_loop """owa""" +854 11 negative_sampler """basic""" +854 11 evaluator """rankbased""" +854 12 dataset """yago310""" +854 12 model """transe""" +854 12 loss """bceaftersigmoid""" +854 12 regularizer """no""" +854 12 optimizer """adam""" +854 12 training_loop """owa""" +854 12 negative_sampler """basic""" +854 12 evaluator """rankbased""" +854 13 dataset """yago310""" +854 13 model """transe""" +854 13 loss """bceaftersigmoid""" +854 13 regularizer """no""" +854 13 optimizer """adam""" +854 13 training_loop """owa""" +854 13 negative_sampler """basic""" +854 13 evaluator """rankbased""" +854 14 dataset """yago310""" +854 14 model """transe""" +854 14 loss """bceaftersigmoid""" +854 14 regularizer """no""" +854 14 optimizer """adam""" +854 14 training_loop """owa""" +854 14 negative_sampler """basic""" +854 14 evaluator """rankbased""" +854 15 dataset """yago310""" +854 15 model """transe""" +854 15 loss """bceaftersigmoid""" +854 15 regularizer """no""" +854 15 optimizer """adam""" +854 15 training_loop """owa""" +854 15 negative_sampler """basic""" +854 15 evaluator """rankbased""" +854 16 dataset """yago310""" +854 16 model """transe""" +854 16 loss """bceaftersigmoid""" +854 16 regularizer """no""" +854 16 optimizer """adam""" +854 16 training_loop """owa""" +854 16 negative_sampler """basic""" +854 16 evaluator """rankbased""" +854 17 dataset """yago310""" +854 17 model """transe""" +854 17 loss """bceaftersigmoid""" +854 17 regularizer """no""" +854 17 optimizer """adam""" +854 17 training_loop """owa""" +854 17 negative_sampler """basic""" +854 17 evaluator """rankbased""" +854 18 dataset """yago310""" +854 18 model """transe""" +854 18 loss """bceaftersigmoid""" +854 18 regularizer """no""" +854 18 optimizer """adam""" +854 18 training_loop """owa""" +854 18 negative_sampler """basic""" +854 18 evaluator """rankbased""" +854 19 dataset """yago310""" +854 19 model """transe""" +854 19 loss """bceaftersigmoid""" +854 19 regularizer """no""" +854 19 optimizer """adam""" +854 19 training_loop """owa""" +854 19 negative_sampler """basic""" +854 19 evaluator """rankbased""" +854 20 dataset """yago310""" +854 20 model """transe""" +854 20 loss """bceaftersigmoid""" +854 20 regularizer """no""" +854 20 optimizer """adam""" +854 20 training_loop """owa""" +854 20 negative_sampler """basic""" +854 20 evaluator """rankbased""" +854 21 dataset """yago310""" +854 21 model """transe""" +854 21 loss """bceaftersigmoid""" +854 21 regularizer """no""" +854 21 optimizer """adam""" +854 21 training_loop """owa""" +854 21 negative_sampler """basic""" +854 21 evaluator """rankbased""" +854 22 dataset """yago310""" +854 22 model """transe""" +854 22 loss """bceaftersigmoid""" +854 22 regularizer """no""" +854 22 optimizer """adam""" +854 22 training_loop """owa""" +854 22 negative_sampler """basic""" +854 22 evaluator """rankbased""" +854 23 dataset """yago310""" +854 23 model """transe""" +854 23 loss """bceaftersigmoid""" +854 23 regularizer """no""" +854 23 optimizer """adam""" +854 23 training_loop """owa""" +854 23 negative_sampler """basic""" +854 23 evaluator """rankbased""" +854 24 dataset """yago310""" +854 24 model """transe""" +854 24 loss """bceaftersigmoid""" +854 24 regularizer """no""" +854 24 optimizer """adam""" +854 24 training_loop """owa""" +854 24 negative_sampler """basic""" +854 24 evaluator """rankbased""" +854 25 dataset """yago310""" +854 25 model """transe""" +854 25 loss """bceaftersigmoid""" +854 25 regularizer """no""" +854 25 optimizer """adam""" +854 25 training_loop """owa""" +854 25 negative_sampler """basic""" +854 25 evaluator """rankbased""" +854 26 dataset """yago310""" +854 26 model """transe""" +854 26 loss """bceaftersigmoid""" +854 26 regularizer """no""" +854 26 optimizer """adam""" +854 26 training_loop """owa""" +854 26 negative_sampler """basic""" +854 26 evaluator """rankbased""" +854 27 dataset """yago310""" +854 27 model """transe""" +854 27 loss """bceaftersigmoid""" +854 27 regularizer """no""" +854 27 optimizer """adam""" +854 27 training_loop """owa""" +854 27 negative_sampler """basic""" +854 27 evaluator """rankbased""" +854 28 dataset """yago310""" +854 28 model """transe""" +854 28 loss """bceaftersigmoid""" +854 28 regularizer """no""" +854 28 optimizer """adam""" +854 28 training_loop """owa""" +854 28 negative_sampler """basic""" +854 28 evaluator """rankbased""" +854 29 dataset """yago310""" +854 29 model """transe""" +854 29 loss """bceaftersigmoid""" +854 29 regularizer """no""" +854 29 optimizer """adam""" +854 29 training_loop """owa""" +854 29 negative_sampler """basic""" +854 29 evaluator """rankbased""" +854 30 dataset """yago310""" +854 30 model """transe""" +854 30 loss """bceaftersigmoid""" +854 30 regularizer """no""" +854 30 optimizer """adam""" +854 30 training_loop """owa""" +854 30 negative_sampler """basic""" +854 30 evaluator """rankbased""" +854 31 dataset """yago310""" +854 31 model """transe""" +854 31 loss """bceaftersigmoid""" +854 31 regularizer """no""" +854 31 optimizer """adam""" +854 31 training_loop """owa""" +854 31 negative_sampler """basic""" +854 31 evaluator """rankbased""" +854 32 dataset """yago310""" +854 32 model """transe""" +854 32 loss """bceaftersigmoid""" +854 32 regularizer """no""" +854 32 optimizer """adam""" +854 32 training_loop """owa""" +854 32 negative_sampler """basic""" +854 32 evaluator """rankbased""" +854 33 dataset """yago310""" +854 33 model """transe""" +854 33 loss """bceaftersigmoid""" +854 33 regularizer """no""" +854 33 optimizer """adam""" +854 33 training_loop """owa""" +854 33 negative_sampler """basic""" +854 33 evaluator """rankbased""" +854 34 dataset """yago310""" +854 34 model """transe""" +854 34 loss """bceaftersigmoid""" +854 34 regularizer """no""" +854 34 optimizer """adam""" +854 34 training_loop """owa""" +854 34 negative_sampler """basic""" +854 34 evaluator """rankbased""" +854 35 dataset """yago310""" +854 35 model """transe""" +854 35 loss """bceaftersigmoid""" +854 35 regularizer """no""" +854 35 optimizer """adam""" +854 35 training_loop """owa""" +854 35 negative_sampler """basic""" +854 35 evaluator """rankbased""" +854 36 dataset """yago310""" +854 36 model """transe""" +854 36 loss """bceaftersigmoid""" +854 36 regularizer """no""" +854 36 optimizer """adam""" +854 36 training_loop """owa""" +854 36 negative_sampler """basic""" +854 36 evaluator """rankbased""" +854 37 dataset """yago310""" +854 37 model """transe""" +854 37 loss """bceaftersigmoid""" +854 37 regularizer """no""" +854 37 optimizer """adam""" +854 37 training_loop """owa""" +854 37 negative_sampler """basic""" +854 37 evaluator """rankbased""" +854 38 dataset """yago310""" +854 38 model """transe""" +854 38 loss """bceaftersigmoid""" +854 38 regularizer """no""" +854 38 optimizer """adam""" +854 38 training_loop """owa""" +854 38 negative_sampler """basic""" +854 38 evaluator """rankbased""" +854 39 dataset """yago310""" +854 39 model """transe""" +854 39 loss """bceaftersigmoid""" +854 39 regularizer """no""" +854 39 optimizer """adam""" +854 39 training_loop """owa""" +854 39 negative_sampler """basic""" +854 39 evaluator """rankbased""" +854 40 dataset """yago310""" +854 40 model """transe""" +854 40 loss """bceaftersigmoid""" +854 40 regularizer """no""" +854 40 optimizer """adam""" +854 40 training_loop """owa""" +854 40 negative_sampler """basic""" +854 40 evaluator """rankbased""" +854 41 dataset """yago310""" +854 41 model """transe""" +854 41 loss """bceaftersigmoid""" +854 41 regularizer """no""" +854 41 optimizer """adam""" +854 41 training_loop """owa""" +854 41 negative_sampler """basic""" +854 41 evaluator """rankbased""" +854 42 dataset """yago310""" +854 42 model """transe""" +854 42 loss """bceaftersigmoid""" +854 42 regularizer """no""" +854 42 optimizer """adam""" +854 42 training_loop """owa""" +854 42 negative_sampler """basic""" +854 42 evaluator """rankbased""" +854 43 dataset """yago310""" +854 43 model """transe""" +854 43 loss """bceaftersigmoid""" +854 43 regularizer """no""" +854 43 optimizer """adam""" +854 43 training_loop """owa""" +854 43 negative_sampler """basic""" +854 43 evaluator """rankbased""" +854 44 dataset """yago310""" +854 44 model """transe""" +854 44 loss """bceaftersigmoid""" +854 44 regularizer """no""" +854 44 optimizer """adam""" +854 44 training_loop """owa""" +854 44 negative_sampler """basic""" +854 44 evaluator """rankbased""" +854 45 dataset """yago310""" +854 45 model """transe""" +854 45 loss """bceaftersigmoid""" +854 45 regularizer """no""" +854 45 optimizer """adam""" +854 45 training_loop """owa""" +854 45 negative_sampler """basic""" +854 45 evaluator """rankbased""" +854 46 dataset """yago310""" +854 46 model """transe""" +854 46 loss """bceaftersigmoid""" +854 46 regularizer """no""" +854 46 optimizer """adam""" +854 46 training_loop """owa""" +854 46 negative_sampler """basic""" +854 46 evaluator """rankbased""" +854 47 dataset """yago310""" +854 47 model """transe""" +854 47 loss """bceaftersigmoid""" +854 47 regularizer """no""" +854 47 optimizer """adam""" +854 47 training_loop """owa""" +854 47 negative_sampler """basic""" +854 47 evaluator """rankbased""" +854 48 dataset """yago310""" +854 48 model """transe""" +854 48 loss """bceaftersigmoid""" +854 48 regularizer """no""" +854 48 optimizer """adam""" +854 48 training_loop """owa""" +854 48 negative_sampler """basic""" +854 48 evaluator """rankbased""" +854 49 dataset """yago310""" +854 49 model """transe""" +854 49 loss """bceaftersigmoid""" +854 49 regularizer """no""" +854 49 optimizer """adam""" +854 49 training_loop """owa""" +854 49 negative_sampler """basic""" +854 49 evaluator """rankbased""" +854 50 dataset """yago310""" +854 50 model """transe""" +854 50 loss """bceaftersigmoid""" +854 50 regularizer """no""" +854 50 optimizer """adam""" +854 50 training_loop """owa""" +854 50 negative_sampler """basic""" +854 50 evaluator """rankbased""" +854 51 dataset """yago310""" +854 51 model """transe""" +854 51 loss """bceaftersigmoid""" +854 51 regularizer """no""" +854 51 optimizer """adam""" +854 51 training_loop """owa""" +854 51 negative_sampler """basic""" +854 51 evaluator """rankbased""" +854 52 dataset """yago310""" +854 52 model """transe""" +854 52 loss """bceaftersigmoid""" +854 52 regularizer """no""" +854 52 optimizer """adam""" +854 52 training_loop """owa""" +854 52 negative_sampler """basic""" +854 52 evaluator """rankbased""" +854 53 dataset """yago310""" +854 53 model """transe""" +854 53 loss """bceaftersigmoid""" +854 53 regularizer """no""" +854 53 optimizer """adam""" +854 53 training_loop """owa""" +854 53 negative_sampler """basic""" +854 53 evaluator """rankbased""" +854 54 dataset """yago310""" +854 54 model """transe""" +854 54 loss """bceaftersigmoid""" +854 54 regularizer """no""" +854 54 optimizer """adam""" +854 54 training_loop """owa""" +854 54 negative_sampler """basic""" +854 54 evaluator """rankbased""" +854 55 dataset """yago310""" +854 55 model """transe""" +854 55 loss """bceaftersigmoid""" +854 55 regularizer """no""" +854 55 optimizer """adam""" +854 55 training_loop """owa""" +854 55 negative_sampler """basic""" +854 55 evaluator """rankbased""" +854 56 dataset """yago310""" +854 56 model """transe""" +854 56 loss """bceaftersigmoid""" +854 56 regularizer """no""" +854 56 optimizer """adam""" +854 56 training_loop """owa""" +854 56 negative_sampler """basic""" +854 56 evaluator """rankbased""" +854 57 dataset """yago310""" +854 57 model """transe""" +854 57 loss """bceaftersigmoid""" +854 57 regularizer """no""" +854 57 optimizer """adam""" +854 57 training_loop """owa""" +854 57 negative_sampler """basic""" +854 57 evaluator """rankbased""" +854 58 dataset """yago310""" +854 58 model """transe""" +854 58 loss """bceaftersigmoid""" +854 58 regularizer """no""" +854 58 optimizer """adam""" +854 58 training_loop """owa""" +854 58 negative_sampler """basic""" +854 58 evaluator """rankbased""" +854 59 dataset """yago310""" +854 59 model """transe""" +854 59 loss """bceaftersigmoid""" +854 59 regularizer """no""" +854 59 optimizer """adam""" +854 59 training_loop """owa""" +854 59 negative_sampler """basic""" +854 59 evaluator """rankbased""" +854 60 dataset """yago310""" +854 60 model """transe""" +854 60 loss """bceaftersigmoid""" +854 60 regularizer """no""" +854 60 optimizer """adam""" +854 60 training_loop """owa""" +854 60 negative_sampler """basic""" +854 60 evaluator """rankbased""" +854 61 dataset """yago310""" +854 61 model """transe""" +854 61 loss """bceaftersigmoid""" +854 61 regularizer """no""" +854 61 optimizer """adam""" +854 61 training_loop """owa""" +854 61 negative_sampler """basic""" +854 61 evaluator """rankbased""" +854 62 dataset """yago310""" +854 62 model """transe""" +854 62 loss """bceaftersigmoid""" +854 62 regularizer """no""" +854 62 optimizer """adam""" +854 62 training_loop """owa""" +854 62 negative_sampler """basic""" +854 62 evaluator """rankbased""" +854 63 dataset """yago310""" +854 63 model """transe""" +854 63 loss """bceaftersigmoid""" +854 63 regularizer """no""" +854 63 optimizer """adam""" +854 63 training_loop """owa""" +854 63 negative_sampler """basic""" +854 63 evaluator """rankbased""" +854 64 dataset """yago310""" +854 64 model """transe""" +854 64 loss """bceaftersigmoid""" +854 64 regularizer """no""" +854 64 optimizer """adam""" +854 64 training_loop """owa""" +854 64 negative_sampler """basic""" +854 64 evaluator """rankbased""" +854 65 dataset """yago310""" +854 65 model """transe""" +854 65 loss """bceaftersigmoid""" +854 65 regularizer """no""" +854 65 optimizer """adam""" +854 65 training_loop """owa""" +854 65 negative_sampler """basic""" +854 65 evaluator """rankbased""" +854 66 dataset """yago310""" +854 66 model """transe""" +854 66 loss """bceaftersigmoid""" +854 66 regularizer """no""" +854 66 optimizer """adam""" +854 66 training_loop """owa""" +854 66 negative_sampler """basic""" +854 66 evaluator """rankbased""" +854 67 dataset """yago310""" +854 67 model """transe""" +854 67 loss """bceaftersigmoid""" +854 67 regularizer """no""" +854 67 optimizer """adam""" +854 67 training_loop """owa""" +854 67 negative_sampler """basic""" +854 67 evaluator """rankbased""" +854 68 dataset """yago310""" +854 68 model """transe""" +854 68 loss """bceaftersigmoid""" +854 68 regularizer """no""" +854 68 optimizer """adam""" +854 68 training_loop """owa""" +854 68 negative_sampler """basic""" +854 68 evaluator """rankbased""" +854 69 dataset """yago310""" +854 69 model """transe""" +854 69 loss """bceaftersigmoid""" +854 69 regularizer """no""" +854 69 optimizer """adam""" +854 69 training_loop """owa""" +854 69 negative_sampler """basic""" +854 69 evaluator """rankbased""" +854 70 dataset """yago310""" +854 70 model """transe""" +854 70 loss """bceaftersigmoid""" +854 70 regularizer """no""" +854 70 optimizer """adam""" +854 70 training_loop """owa""" +854 70 negative_sampler """basic""" +854 70 evaluator """rankbased""" +854 71 dataset """yago310""" +854 71 model """transe""" +854 71 loss """bceaftersigmoid""" +854 71 regularizer """no""" +854 71 optimizer """adam""" +854 71 training_loop """owa""" +854 71 negative_sampler """basic""" +854 71 evaluator """rankbased""" +854 72 dataset """yago310""" +854 72 model """transe""" +854 72 loss """bceaftersigmoid""" +854 72 regularizer """no""" +854 72 optimizer """adam""" +854 72 training_loop """owa""" +854 72 negative_sampler """basic""" +854 72 evaluator """rankbased""" +854 73 dataset """yago310""" +854 73 model """transe""" +854 73 loss """bceaftersigmoid""" +854 73 regularizer """no""" +854 73 optimizer """adam""" +854 73 training_loop """owa""" +854 73 negative_sampler """basic""" +854 73 evaluator """rankbased""" +854 74 dataset """yago310""" +854 74 model """transe""" +854 74 loss """bceaftersigmoid""" +854 74 regularizer """no""" +854 74 optimizer """adam""" +854 74 training_loop """owa""" +854 74 negative_sampler """basic""" +854 74 evaluator """rankbased""" +854 75 dataset """yago310""" +854 75 model """transe""" +854 75 loss """bceaftersigmoid""" +854 75 regularizer """no""" +854 75 optimizer """adam""" +854 75 training_loop """owa""" +854 75 negative_sampler """basic""" +854 75 evaluator """rankbased""" +854 76 dataset """yago310""" +854 76 model """transe""" +854 76 loss """bceaftersigmoid""" +854 76 regularizer """no""" +854 76 optimizer """adam""" +854 76 training_loop """owa""" +854 76 negative_sampler """basic""" +854 76 evaluator """rankbased""" +855 1 model.embedding_dim 0.0 +855 1 model.scoring_fct_norm 1.0 +855 1 regularizer.weight 0.2608238116948514 +855 1 optimizer.lr 0.052231514063987614 +855 1 negative_sampler.num_negs_per_pos 32.0 +855 1 training.batch_size 0.0 +855 2 model.embedding_dim 2.0 +855 2 model.scoring_fct_norm 1.0 +855 2 regularizer.weight 0.04249605442687954 +855 2 optimizer.lr 0.020677442892248685 +855 2 negative_sampler.num_negs_per_pos 76.0 +855 2 training.batch_size 1.0 +855 3 model.embedding_dim 1.0 +855 3 model.scoring_fct_norm 2.0 +855 3 regularizer.weight 0.150732263483491 +855 3 optimizer.lr 0.07035405013070037 +855 3 negative_sampler.num_negs_per_pos 85.0 +855 3 training.batch_size 0.0 +855 4 model.embedding_dim 2.0 +855 4 model.scoring_fct_norm 2.0 +855 4 regularizer.weight 0.18379303531360228 +855 4 optimizer.lr 0.0013373857278150894 +855 4 negative_sampler.num_negs_per_pos 84.0 +855 4 training.batch_size 2.0 +855 5 model.embedding_dim 0.0 +855 5 model.scoring_fct_norm 2.0 +855 5 regularizer.weight 0.26690221280339904 +855 5 optimizer.lr 0.0025045847122644746 +855 5 negative_sampler.num_negs_per_pos 73.0 +855 5 training.batch_size 0.0 +855 6 model.embedding_dim 0.0 +855 6 model.scoring_fct_norm 1.0 +855 6 regularizer.weight 0.027792983902025618 +855 6 optimizer.lr 0.008268497250768019 +855 6 negative_sampler.num_negs_per_pos 36.0 +855 6 training.batch_size 1.0 +855 7 model.embedding_dim 0.0 +855 7 model.scoring_fct_norm 2.0 +855 7 regularizer.weight 0.028736896287998123 +855 7 optimizer.lr 0.019473884270830022 +855 7 negative_sampler.num_negs_per_pos 27.0 +855 7 training.batch_size 2.0 +855 8 model.embedding_dim 2.0 +855 8 model.scoring_fct_norm 1.0 +855 8 regularizer.weight 0.2591181967774633 +855 8 optimizer.lr 0.0017243096808687518 +855 8 negative_sampler.num_negs_per_pos 27.0 +855 8 training.batch_size 1.0 +855 9 model.embedding_dim 2.0 +855 9 model.scoring_fct_norm 1.0 +855 9 regularizer.weight 0.132136364419816 +855 9 optimizer.lr 0.005116492535365195 +855 9 negative_sampler.num_negs_per_pos 63.0 +855 9 training.batch_size 1.0 +855 10 model.embedding_dim 1.0 +855 10 model.scoring_fct_norm 2.0 +855 10 regularizer.weight 0.07759577284700782 +855 10 optimizer.lr 0.005027377124639394 +855 10 negative_sampler.num_negs_per_pos 66.0 +855 10 training.batch_size 2.0 +855 11 model.embedding_dim 2.0 +855 11 model.scoring_fct_norm 1.0 +855 11 regularizer.weight 0.08407889061665663 +855 11 optimizer.lr 0.009987191263188167 +855 11 negative_sampler.num_negs_per_pos 15.0 +855 11 training.batch_size 1.0 +855 12 model.embedding_dim 1.0 +855 12 model.scoring_fct_norm 1.0 +855 12 regularizer.weight 0.09265651791015218 +855 12 optimizer.lr 0.0037190018355203187 +855 12 negative_sampler.num_negs_per_pos 38.0 +855 12 training.batch_size 1.0 +855 13 model.embedding_dim 1.0 +855 13 model.scoring_fct_norm 1.0 +855 13 regularizer.weight 0.09466831881350511 +855 13 optimizer.lr 0.0014367186394609306 +855 13 negative_sampler.num_negs_per_pos 36.0 +855 13 training.batch_size 0.0 +855 14 model.embedding_dim 2.0 +855 14 model.scoring_fct_norm 2.0 +855 14 regularizer.weight 0.040791480726522146 +855 14 optimizer.lr 0.042361198274604404 +855 14 negative_sampler.num_negs_per_pos 27.0 +855 14 training.batch_size 0.0 +855 15 model.embedding_dim 0.0 +855 15 model.scoring_fct_norm 2.0 +855 15 regularizer.weight 0.014154023209703825 +855 15 optimizer.lr 0.001808184716572063 +855 15 negative_sampler.num_negs_per_pos 72.0 +855 15 training.batch_size 2.0 +855 16 model.embedding_dim 0.0 +855 16 model.scoring_fct_norm 2.0 +855 16 regularizer.weight 0.04443733479596835 +855 16 optimizer.lr 0.0025252459372671838 +855 16 negative_sampler.num_negs_per_pos 74.0 +855 16 training.batch_size 1.0 +855 17 model.embedding_dim 1.0 +855 17 model.scoring_fct_norm 2.0 +855 17 regularizer.weight 0.032048901113032435 +855 17 optimizer.lr 0.0621916975912121 +855 17 negative_sampler.num_negs_per_pos 96.0 +855 17 training.batch_size 1.0 +855 18 model.embedding_dim 1.0 +855 18 model.scoring_fct_norm 2.0 +855 18 regularizer.weight 0.08436192467339562 +855 18 optimizer.lr 0.0771720382951961 +855 18 negative_sampler.num_negs_per_pos 56.0 +855 18 training.batch_size 1.0 +855 19 model.embedding_dim 0.0 +855 19 model.scoring_fct_norm 2.0 +855 19 regularizer.weight 0.010986727535359392 +855 19 optimizer.lr 0.06179563946644618 +855 19 negative_sampler.num_negs_per_pos 57.0 +855 19 training.batch_size 1.0 +855 20 model.embedding_dim 0.0 +855 20 model.scoring_fct_norm 2.0 +855 20 regularizer.weight 0.08638787508459851 +855 20 optimizer.lr 0.01944255345629183 +855 20 negative_sampler.num_negs_per_pos 70.0 +855 20 training.batch_size 0.0 +855 21 model.embedding_dim 2.0 +855 21 model.scoring_fct_norm 1.0 +855 21 regularizer.weight 0.12671355999536232 +855 21 optimizer.lr 0.051745456924752596 +855 21 negative_sampler.num_negs_per_pos 1.0 +855 21 training.batch_size 0.0 +855 22 model.embedding_dim 0.0 +855 22 model.scoring_fct_norm 2.0 +855 22 regularizer.weight 0.2446642718479369 +855 22 optimizer.lr 0.009314904325946135 +855 22 negative_sampler.num_negs_per_pos 91.0 +855 22 training.batch_size 2.0 +855 1 dataset """fb15k237""" +855 1 model """transh""" +855 1 loss """bceaftersigmoid""" +855 1 regularizer """transh""" +855 1 optimizer """adam""" +855 1 training_loop """owa""" +855 1 negative_sampler """basic""" +855 1 evaluator """rankbased""" +855 2 dataset """fb15k237""" +855 2 model """transh""" +855 2 loss """bceaftersigmoid""" +855 2 regularizer """transh""" +855 2 optimizer """adam""" +855 2 training_loop """owa""" +855 2 negative_sampler """basic""" +855 2 evaluator """rankbased""" +855 3 dataset """fb15k237""" +855 3 model """transh""" +855 3 loss """bceaftersigmoid""" +855 3 regularizer """transh""" +855 3 optimizer """adam""" +855 3 training_loop """owa""" +855 3 negative_sampler """basic""" +855 3 evaluator """rankbased""" +855 4 dataset """fb15k237""" +855 4 model """transh""" +855 4 loss """bceaftersigmoid""" +855 4 regularizer """transh""" +855 4 optimizer """adam""" +855 4 training_loop """owa""" +855 4 negative_sampler """basic""" +855 4 evaluator """rankbased""" +855 5 dataset """fb15k237""" +855 5 model """transh""" +855 5 loss """bceaftersigmoid""" +855 5 regularizer """transh""" +855 5 optimizer """adam""" +855 5 training_loop """owa""" +855 5 negative_sampler """basic""" +855 5 evaluator """rankbased""" +855 6 dataset """fb15k237""" +855 6 model """transh""" +855 6 loss """bceaftersigmoid""" +855 6 regularizer """transh""" +855 6 optimizer """adam""" +855 6 training_loop """owa""" +855 6 negative_sampler """basic""" +855 6 evaluator """rankbased""" +855 7 dataset """fb15k237""" +855 7 model """transh""" +855 7 loss """bceaftersigmoid""" +855 7 regularizer """transh""" +855 7 optimizer """adam""" +855 7 training_loop """owa""" +855 7 negative_sampler """basic""" +855 7 evaluator """rankbased""" +855 8 dataset """fb15k237""" +855 8 model """transh""" +855 8 loss """bceaftersigmoid""" +855 8 regularizer """transh""" +855 8 optimizer """adam""" +855 8 training_loop """owa""" +855 8 negative_sampler """basic""" +855 8 evaluator """rankbased""" +855 9 dataset """fb15k237""" +855 9 model """transh""" +855 9 loss """bceaftersigmoid""" +855 9 regularizer """transh""" +855 9 optimizer """adam""" +855 9 training_loop """owa""" +855 9 negative_sampler """basic""" +855 9 evaluator """rankbased""" +855 10 dataset """fb15k237""" +855 10 model """transh""" +855 10 loss """bceaftersigmoid""" +855 10 regularizer """transh""" +855 10 optimizer """adam""" +855 10 training_loop """owa""" +855 10 negative_sampler """basic""" +855 10 evaluator """rankbased""" +855 11 dataset """fb15k237""" +855 11 model """transh""" +855 11 loss """bceaftersigmoid""" +855 11 regularizer """transh""" +855 11 optimizer """adam""" +855 11 training_loop """owa""" +855 11 negative_sampler """basic""" +855 11 evaluator """rankbased""" +855 12 dataset """fb15k237""" +855 12 model """transh""" +855 12 loss """bceaftersigmoid""" +855 12 regularizer """transh""" +855 12 optimizer """adam""" +855 12 training_loop """owa""" +855 12 negative_sampler """basic""" +855 12 evaluator """rankbased""" +855 13 dataset """fb15k237""" +855 13 model """transh""" +855 13 loss """bceaftersigmoid""" +855 13 regularizer """transh""" +855 13 optimizer """adam""" +855 13 training_loop """owa""" +855 13 negative_sampler """basic""" +855 13 evaluator """rankbased""" +855 14 dataset """fb15k237""" +855 14 model """transh""" +855 14 loss """bceaftersigmoid""" +855 14 regularizer """transh""" +855 14 optimizer """adam""" +855 14 training_loop """owa""" +855 14 negative_sampler """basic""" +855 14 evaluator """rankbased""" +855 15 dataset """fb15k237""" +855 15 model """transh""" +855 15 loss """bceaftersigmoid""" +855 15 regularizer """transh""" +855 15 optimizer """adam""" +855 15 training_loop """owa""" +855 15 negative_sampler """basic""" +855 15 evaluator """rankbased""" +855 16 dataset """fb15k237""" +855 16 model """transh""" +855 16 loss """bceaftersigmoid""" +855 16 regularizer """transh""" +855 16 optimizer """adam""" +855 16 training_loop """owa""" +855 16 negative_sampler """basic""" +855 16 evaluator """rankbased""" +855 17 dataset """fb15k237""" +855 17 model """transh""" +855 17 loss """bceaftersigmoid""" +855 17 regularizer """transh""" +855 17 optimizer """adam""" +855 17 training_loop """owa""" +855 17 negative_sampler """basic""" +855 17 evaluator """rankbased""" +855 18 dataset """fb15k237""" +855 18 model """transh""" +855 18 loss """bceaftersigmoid""" +855 18 regularizer """transh""" +855 18 optimizer """adam""" +855 18 training_loop """owa""" +855 18 negative_sampler """basic""" +855 18 evaluator """rankbased""" +855 19 dataset """fb15k237""" +855 19 model """transh""" +855 19 loss """bceaftersigmoid""" +855 19 regularizer """transh""" +855 19 optimizer """adam""" +855 19 training_loop """owa""" +855 19 negative_sampler """basic""" +855 19 evaluator """rankbased""" +855 20 dataset """fb15k237""" +855 20 model """transh""" +855 20 loss """bceaftersigmoid""" +855 20 regularizer """transh""" +855 20 optimizer """adam""" +855 20 training_loop """owa""" +855 20 negative_sampler """basic""" +855 20 evaluator """rankbased""" +855 21 dataset """fb15k237""" +855 21 model """transh""" +855 21 loss """bceaftersigmoid""" +855 21 regularizer """transh""" +855 21 optimizer """adam""" +855 21 training_loop """owa""" +855 21 negative_sampler """basic""" +855 21 evaluator """rankbased""" +855 22 dataset """fb15k237""" +855 22 model """transh""" +855 22 loss """bceaftersigmoid""" +855 22 regularizer """transh""" +855 22 optimizer """adam""" +855 22 training_loop """owa""" +855 22 negative_sampler """basic""" +855 22 evaluator """rankbased""" +856 1 model.embedding_dim 0.0 +856 1 model.scoring_fct_norm 1.0 +856 1 regularizer.weight 0.05928505068324066 +856 1 optimizer.lr 0.01283275912666378 +856 1 negative_sampler.num_negs_per_pos 65.0 +856 1 training.batch_size 1.0 +856 2 model.embedding_dim 2.0 +856 2 model.scoring_fct_norm 2.0 +856 2 regularizer.weight 0.23826467678244553 +856 2 optimizer.lr 0.005083980142167141 +856 2 negative_sampler.num_negs_per_pos 1.0 +856 2 training.batch_size 2.0 +856 3 model.embedding_dim 1.0 +856 3 model.scoring_fct_norm 1.0 +856 3 regularizer.weight 0.042892455387008796 +856 3 optimizer.lr 0.07026011719072926 +856 3 negative_sampler.num_negs_per_pos 88.0 +856 3 training.batch_size 2.0 +856 4 model.embedding_dim 2.0 +856 4 model.scoring_fct_norm 1.0 +856 4 regularizer.weight 0.22652490167479353 +856 4 optimizer.lr 0.003943291162363489 +856 4 negative_sampler.num_negs_per_pos 8.0 +856 4 training.batch_size 0.0 +856 5 model.embedding_dim 2.0 +856 5 model.scoring_fct_norm 1.0 +856 5 regularizer.weight 0.017070561362657728 +856 5 optimizer.lr 0.0033185270696844934 +856 5 negative_sampler.num_negs_per_pos 67.0 +856 5 training.batch_size 2.0 +856 6 model.embedding_dim 0.0 +856 6 model.scoring_fct_norm 2.0 +856 6 regularizer.weight 0.12622142089285002 +856 6 optimizer.lr 0.030981753413388703 +856 6 negative_sampler.num_negs_per_pos 54.0 +856 6 training.batch_size 0.0 +856 7 model.embedding_dim 1.0 +856 7 model.scoring_fct_norm 1.0 +856 7 regularizer.weight 0.018382485930285526 +856 7 optimizer.lr 0.004057198690340791 +856 7 negative_sampler.num_negs_per_pos 44.0 +856 7 training.batch_size 2.0 +856 8 model.embedding_dim 1.0 +856 8 model.scoring_fct_norm 2.0 +856 8 regularizer.weight 0.014169793500762903 +856 8 optimizer.lr 0.00203942140481272 +856 8 negative_sampler.num_negs_per_pos 45.0 +856 8 training.batch_size 0.0 +856 9 model.embedding_dim 0.0 +856 9 model.scoring_fct_norm 1.0 +856 9 regularizer.weight 0.026690263569934453 +856 9 optimizer.lr 0.03942636482069758 +856 9 negative_sampler.num_negs_per_pos 71.0 +856 9 training.batch_size 1.0 +856 10 model.embedding_dim 0.0 +856 10 model.scoring_fct_norm 1.0 +856 10 regularizer.weight 0.14444372504076042 +856 10 optimizer.lr 0.0011926799857284115 +856 10 negative_sampler.num_negs_per_pos 74.0 +856 10 training.batch_size 1.0 +856 11 model.embedding_dim 0.0 +856 11 model.scoring_fct_norm 2.0 +856 11 regularizer.weight 0.0611003882366734 +856 11 optimizer.lr 0.003042320287978393 +856 11 negative_sampler.num_negs_per_pos 86.0 +856 11 training.batch_size 0.0 +856 12 model.embedding_dim 0.0 +856 12 model.scoring_fct_norm 2.0 +856 12 regularizer.weight 0.09437831721159874 +856 12 optimizer.lr 0.013685763324289467 +856 12 negative_sampler.num_negs_per_pos 76.0 +856 12 training.batch_size 1.0 +856 13 model.embedding_dim 0.0 +856 13 model.scoring_fct_norm 1.0 +856 13 regularizer.weight 0.0543041940673981 +856 13 optimizer.lr 0.004545464602321348 +856 13 negative_sampler.num_negs_per_pos 89.0 +856 13 training.batch_size 0.0 +856 14 model.embedding_dim 1.0 +856 14 model.scoring_fct_norm 1.0 +856 14 regularizer.weight 0.042890869066667725 +856 14 optimizer.lr 0.012764338597324466 +856 14 negative_sampler.num_negs_per_pos 55.0 +856 14 training.batch_size 2.0 +856 15 model.embedding_dim 2.0 +856 15 model.scoring_fct_norm 1.0 +856 15 regularizer.weight 0.03582670298418601 +856 15 optimizer.lr 0.03627283156056637 +856 15 negative_sampler.num_negs_per_pos 3.0 +856 15 training.batch_size 2.0 +856 16 model.embedding_dim 0.0 +856 16 model.scoring_fct_norm 2.0 +856 16 regularizer.weight 0.06275050313824715 +856 16 optimizer.lr 0.013576991801817163 +856 16 negative_sampler.num_negs_per_pos 28.0 +856 16 training.batch_size 0.0 +856 17 model.embedding_dim 1.0 +856 17 model.scoring_fct_norm 1.0 +856 17 regularizer.weight 0.04594290487405925 +856 17 optimizer.lr 0.0022624539897084356 +856 17 negative_sampler.num_negs_per_pos 11.0 +856 17 training.batch_size 2.0 +856 18 model.embedding_dim 1.0 +856 18 model.scoring_fct_norm 1.0 +856 18 regularizer.weight 0.03235994997351674 +856 18 optimizer.lr 0.02863230650825224 +856 18 negative_sampler.num_negs_per_pos 12.0 +856 18 training.batch_size 0.0 +856 1 dataset """fb15k237""" +856 1 model """transh""" +856 1 loss """softplus""" +856 1 regularizer """transh""" +856 1 optimizer """adam""" +856 1 training_loop """owa""" +856 1 negative_sampler """basic""" +856 1 evaluator """rankbased""" +856 2 dataset """fb15k237""" +856 2 model """transh""" +856 2 loss """softplus""" +856 2 regularizer """transh""" +856 2 optimizer """adam""" +856 2 training_loop """owa""" +856 2 negative_sampler """basic""" +856 2 evaluator """rankbased""" +856 3 dataset """fb15k237""" +856 3 model """transh""" +856 3 loss """softplus""" +856 3 regularizer """transh""" +856 3 optimizer """adam""" +856 3 training_loop """owa""" +856 3 negative_sampler """basic""" +856 3 evaluator """rankbased""" +856 4 dataset """fb15k237""" +856 4 model """transh""" +856 4 loss """softplus""" +856 4 regularizer """transh""" +856 4 optimizer """adam""" +856 4 training_loop """owa""" +856 4 negative_sampler """basic""" +856 4 evaluator """rankbased""" +856 5 dataset """fb15k237""" +856 5 model """transh""" +856 5 loss """softplus""" +856 5 regularizer """transh""" +856 5 optimizer """adam""" +856 5 training_loop """owa""" +856 5 negative_sampler """basic""" +856 5 evaluator """rankbased""" +856 6 dataset """fb15k237""" +856 6 model """transh""" +856 6 loss """softplus""" +856 6 regularizer """transh""" +856 6 optimizer """adam""" +856 6 training_loop """owa""" +856 6 negative_sampler """basic""" +856 6 evaluator """rankbased""" +856 7 dataset """fb15k237""" +856 7 model """transh""" +856 7 loss """softplus""" +856 7 regularizer """transh""" +856 7 optimizer """adam""" +856 7 training_loop """owa""" +856 7 negative_sampler """basic""" +856 7 evaluator """rankbased""" +856 8 dataset """fb15k237""" +856 8 model """transh""" +856 8 loss """softplus""" +856 8 regularizer """transh""" +856 8 optimizer """adam""" +856 8 training_loop """owa""" +856 8 negative_sampler """basic""" +856 8 evaluator """rankbased""" +856 9 dataset """fb15k237""" +856 9 model """transh""" +856 9 loss """softplus""" +856 9 regularizer """transh""" +856 9 optimizer """adam""" +856 9 training_loop """owa""" +856 9 negative_sampler """basic""" +856 9 evaluator """rankbased""" +856 10 dataset """fb15k237""" +856 10 model """transh""" +856 10 loss """softplus""" +856 10 regularizer """transh""" +856 10 optimizer """adam""" +856 10 training_loop """owa""" +856 10 negative_sampler """basic""" +856 10 evaluator """rankbased""" +856 11 dataset """fb15k237""" +856 11 model """transh""" +856 11 loss """softplus""" +856 11 regularizer """transh""" +856 11 optimizer """adam""" +856 11 training_loop """owa""" +856 11 negative_sampler """basic""" +856 11 evaluator """rankbased""" +856 12 dataset """fb15k237""" +856 12 model """transh""" +856 12 loss """softplus""" +856 12 regularizer """transh""" +856 12 optimizer """adam""" +856 12 training_loop """owa""" +856 12 negative_sampler """basic""" +856 12 evaluator """rankbased""" +856 13 dataset """fb15k237""" +856 13 model """transh""" +856 13 loss """softplus""" +856 13 regularizer """transh""" +856 13 optimizer """adam""" +856 13 training_loop """owa""" +856 13 negative_sampler """basic""" +856 13 evaluator """rankbased""" +856 14 dataset """fb15k237""" +856 14 model """transh""" +856 14 loss """softplus""" +856 14 regularizer """transh""" +856 14 optimizer """adam""" +856 14 training_loop """owa""" +856 14 negative_sampler """basic""" +856 14 evaluator """rankbased""" +856 15 dataset """fb15k237""" +856 15 model """transh""" +856 15 loss """softplus""" +856 15 regularizer """transh""" +856 15 optimizer """adam""" +856 15 training_loop """owa""" +856 15 negative_sampler """basic""" +856 15 evaluator """rankbased""" +856 16 dataset """fb15k237""" +856 16 model """transh""" +856 16 loss """softplus""" +856 16 regularizer """transh""" +856 16 optimizer """adam""" +856 16 training_loop """owa""" +856 16 negative_sampler """basic""" +856 16 evaluator """rankbased""" +856 17 dataset """fb15k237""" +856 17 model """transh""" +856 17 loss """softplus""" +856 17 regularizer """transh""" +856 17 optimizer """adam""" +856 17 training_loop """owa""" +856 17 negative_sampler """basic""" +856 17 evaluator """rankbased""" +856 18 dataset """fb15k237""" +856 18 model """transh""" +856 18 loss """softplus""" +856 18 regularizer """transh""" +856 18 optimizer """adam""" +856 18 training_loop """owa""" +856 18 negative_sampler """basic""" +856 18 evaluator """rankbased""" +857 1 model.embedding_dim 2.0 +857 1 model.scoring_fct_norm 2.0 +857 1 regularizer.weight 0.20764434791980396 +857 1 optimizer.lr 0.02836879833726131 +857 1 negative_sampler.num_negs_per_pos 3.0 +857 1 training.batch_size 1.0 +857 2 model.embedding_dim 1.0 +857 2 model.scoring_fct_norm 2.0 +857 2 regularizer.weight 0.07474421162505787 +857 2 optimizer.lr 0.0015165469907173247 +857 2 negative_sampler.num_negs_per_pos 8.0 +857 2 training.batch_size 0.0 +857 3 model.embedding_dim 1.0 +857 3 model.scoring_fct_norm 1.0 +857 3 regularizer.weight 0.030453965754599986 +857 3 optimizer.lr 0.03161812571794381 +857 3 negative_sampler.num_negs_per_pos 0.0 +857 3 training.batch_size 2.0 +857 4 model.embedding_dim 0.0 +857 4 model.scoring_fct_norm 2.0 +857 4 regularizer.weight 0.0175060946843903 +857 4 optimizer.lr 0.05551988282736177 +857 4 negative_sampler.num_negs_per_pos 6.0 +857 4 training.batch_size 1.0 +857 5 model.embedding_dim 1.0 +857 5 model.scoring_fct_norm 2.0 +857 5 regularizer.weight 0.2666965843279513 +857 5 optimizer.lr 0.0032321764939001294 +857 5 negative_sampler.num_negs_per_pos 54.0 +857 5 training.batch_size 1.0 +857 6 model.embedding_dim 0.0 +857 6 model.scoring_fct_norm 1.0 +857 6 regularizer.weight 0.06626135318975687 +857 6 optimizer.lr 0.04313160927865822 +857 6 negative_sampler.num_negs_per_pos 68.0 +857 6 training.batch_size 0.0 +857 7 model.embedding_dim 1.0 +857 7 model.scoring_fct_norm 1.0 +857 7 regularizer.weight 0.014905706770463965 +857 7 optimizer.lr 0.019182586722456273 +857 7 negative_sampler.num_negs_per_pos 62.0 +857 7 training.batch_size 0.0 +857 8 model.embedding_dim 2.0 +857 8 model.scoring_fct_norm 1.0 +857 8 regularizer.weight 0.1475898501246488 +857 8 optimizer.lr 0.030370518139009747 +857 8 negative_sampler.num_negs_per_pos 16.0 +857 8 training.batch_size 1.0 +857 9 model.embedding_dim 1.0 +857 9 model.scoring_fct_norm 1.0 +857 9 regularizer.weight 0.01891134404104328 +857 9 optimizer.lr 0.0603718542222925 +857 9 negative_sampler.num_negs_per_pos 30.0 +857 9 training.batch_size 1.0 +857 10 model.embedding_dim 1.0 +857 10 model.scoring_fct_norm 1.0 +857 10 regularizer.weight 0.12873181013088736 +857 10 optimizer.lr 0.004712338502080373 +857 10 negative_sampler.num_negs_per_pos 47.0 +857 10 training.batch_size 0.0 +857 11 model.embedding_dim 2.0 +857 11 model.scoring_fct_norm 1.0 +857 11 regularizer.weight 0.08475403214537175 +857 11 optimizer.lr 0.0036857510809762115 +857 11 negative_sampler.num_negs_per_pos 5.0 +857 11 training.batch_size 2.0 +857 12 model.embedding_dim 1.0 +857 12 model.scoring_fct_norm 2.0 +857 12 regularizer.weight 0.0157724247747697 +857 12 optimizer.lr 0.010274588295410777 +857 12 negative_sampler.num_negs_per_pos 7.0 +857 12 training.batch_size 2.0 +857 13 model.embedding_dim 2.0 +857 13 model.scoring_fct_norm 2.0 +857 13 regularizer.weight 0.22760933909776498 +857 13 optimizer.lr 0.08557769978195516 +857 13 negative_sampler.num_negs_per_pos 69.0 +857 13 training.batch_size 1.0 +857 14 model.embedding_dim 2.0 +857 14 model.scoring_fct_norm 1.0 +857 14 regularizer.weight 0.04162065797523759 +857 14 optimizer.lr 0.07635552575019666 +857 14 negative_sampler.num_negs_per_pos 79.0 +857 14 training.batch_size 2.0 +857 15 model.embedding_dim 1.0 +857 15 model.scoring_fct_norm 2.0 +857 15 regularizer.weight 0.055684412646752265 +857 15 optimizer.lr 0.002840303040637302 +857 15 negative_sampler.num_negs_per_pos 95.0 +857 15 training.batch_size 2.0 +857 16 model.embedding_dim 0.0 +857 16 model.scoring_fct_norm 1.0 +857 16 regularizer.weight 0.03762892203763677 +857 16 optimizer.lr 0.061228956089842086 +857 16 negative_sampler.num_negs_per_pos 2.0 +857 16 training.batch_size 1.0 +857 17 model.embedding_dim 2.0 +857 17 model.scoring_fct_norm 2.0 +857 17 regularizer.weight 0.031009280271972577 +857 17 optimizer.lr 0.014332716517108924 +857 17 negative_sampler.num_negs_per_pos 4.0 +857 17 training.batch_size 2.0 +857 18 model.embedding_dim 2.0 +857 18 model.scoring_fct_norm 1.0 +857 18 regularizer.weight 0.05412289827488722 +857 18 optimizer.lr 0.005357316687227987 +857 18 negative_sampler.num_negs_per_pos 35.0 +857 18 training.batch_size 1.0 +857 19 model.embedding_dim 1.0 +857 19 model.scoring_fct_norm 2.0 +857 19 regularizer.weight 0.21987796982527508 +857 19 optimizer.lr 0.005750513675040335 +857 19 negative_sampler.num_negs_per_pos 71.0 +857 19 training.batch_size 0.0 +857 20 model.embedding_dim 0.0 +857 20 model.scoring_fct_norm 1.0 +857 20 regularizer.weight 0.0718512515287968 +857 20 optimizer.lr 0.07540493899911145 +857 20 negative_sampler.num_negs_per_pos 55.0 +857 20 training.batch_size 2.0 +857 21 model.embedding_dim 1.0 +857 21 model.scoring_fct_norm 2.0 +857 21 regularizer.weight 0.12644931661152814 +857 21 optimizer.lr 0.003187652180592328 +857 21 negative_sampler.num_negs_per_pos 72.0 +857 21 training.batch_size 2.0 +857 22 model.embedding_dim 2.0 +857 22 model.scoring_fct_norm 1.0 +857 22 regularizer.weight 0.029880598906568006 +857 22 optimizer.lr 0.032348155594876735 +857 22 negative_sampler.num_negs_per_pos 99.0 +857 22 training.batch_size 1.0 +857 23 model.embedding_dim 1.0 +857 23 model.scoring_fct_norm 1.0 +857 23 regularizer.weight 0.1918274580034656 +857 23 optimizer.lr 0.003203513942590703 +857 23 negative_sampler.num_negs_per_pos 33.0 +857 23 training.batch_size 0.0 +857 24 model.embedding_dim 2.0 +857 24 model.scoring_fct_norm 2.0 +857 24 regularizer.weight 0.0650135465530251 +857 24 optimizer.lr 0.01572710661544537 +857 24 negative_sampler.num_negs_per_pos 18.0 +857 24 training.batch_size 2.0 +857 25 model.embedding_dim 0.0 +857 25 model.scoring_fct_norm 1.0 +857 25 regularizer.weight 0.011016048321601518 +857 25 optimizer.lr 0.048827366041233194 +857 25 negative_sampler.num_negs_per_pos 88.0 +857 25 training.batch_size 1.0 +857 26 model.embedding_dim 1.0 +857 26 model.scoring_fct_norm 2.0 +857 26 regularizer.weight 0.012608924068641588 +857 26 optimizer.lr 0.02549865260957559 +857 26 negative_sampler.num_negs_per_pos 98.0 +857 26 training.batch_size 1.0 +857 27 model.embedding_dim 2.0 +857 27 model.scoring_fct_norm 1.0 +857 27 regularizer.weight 0.18370687180987993 +857 27 optimizer.lr 0.0070849426005423115 +857 27 negative_sampler.num_negs_per_pos 58.0 +857 27 training.batch_size 1.0 +857 28 model.embedding_dim 1.0 +857 28 model.scoring_fct_norm 1.0 +857 28 regularizer.weight 0.04418342083053212 +857 28 optimizer.lr 0.047454661375330304 +857 28 negative_sampler.num_negs_per_pos 1.0 +857 28 training.batch_size 1.0 +857 29 model.embedding_dim 2.0 +857 29 model.scoring_fct_norm 1.0 +857 29 regularizer.weight 0.07414525941747495 +857 29 optimizer.lr 0.016065492916608592 +857 29 negative_sampler.num_negs_per_pos 28.0 +857 29 training.batch_size 1.0 +857 30 model.embedding_dim 1.0 +857 30 model.scoring_fct_norm 1.0 +857 30 regularizer.weight 0.17026541287648203 +857 30 optimizer.lr 0.01937879875096042 +857 30 negative_sampler.num_negs_per_pos 30.0 +857 30 training.batch_size 2.0 +857 31 model.embedding_dim 1.0 +857 31 model.scoring_fct_norm 2.0 +857 31 regularizer.weight 0.02374301130816411 +857 31 optimizer.lr 0.004750141692447035 +857 31 negative_sampler.num_negs_per_pos 50.0 +857 31 training.batch_size 1.0 +857 32 model.embedding_dim 1.0 +857 32 model.scoring_fct_norm 2.0 +857 32 regularizer.weight 0.2621049502201242 +857 32 optimizer.lr 0.0015797470649744033 +857 32 negative_sampler.num_negs_per_pos 97.0 +857 32 training.batch_size 2.0 +857 33 model.embedding_dim 2.0 +857 33 model.scoring_fct_norm 2.0 +857 33 regularizer.weight 0.033213825204013725 +857 33 optimizer.lr 0.009280519056983055 +857 33 negative_sampler.num_negs_per_pos 17.0 +857 33 training.batch_size 1.0 +857 34 model.embedding_dim 1.0 +857 34 model.scoring_fct_norm 1.0 +857 34 regularizer.weight 0.013681321443275265 +857 34 optimizer.lr 0.014475916476420938 +857 34 negative_sampler.num_negs_per_pos 55.0 +857 34 training.batch_size 0.0 +857 35 model.embedding_dim 1.0 +857 35 model.scoring_fct_norm 2.0 +857 35 regularizer.weight 0.04969489305000205 +857 35 optimizer.lr 0.0041145853188133425 +857 35 negative_sampler.num_negs_per_pos 73.0 +857 35 training.batch_size 1.0 +857 36 model.embedding_dim 0.0 +857 36 model.scoring_fct_norm 2.0 +857 36 regularizer.weight 0.25640944931333154 +857 36 optimizer.lr 0.002942245343326105 +857 36 negative_sampler.num_negs_per_pos 30.0 +857 36 training.batch_size 1.0 +857 37 model.embedding_dim 2.0 +857 37 model.scoring_fct_norm 1.0 +857 37 regularizer.weight 0.10958486281324 +857 37 optimizer.lr 0.033703958783210274 +857 37 negative_sampler.num_negs_per_pos 70.0 +857 37 training.batch_size 1.0 +857 38 model.embedding_dim 1.0 +857 38 model.scoring_fct_norm 2.0 +857 38 regularizer.weight 0.01357977588125885 +857 38 optimizer.lr 0.0012315981861401903 +857 38 negative_sampler.num_negs_per_pos 10.0 +857 38 training.batch_size 0.0 +857 39 model.embedding_dim 1.0 +857 39 model.scoring_fct_norm 2.0 +857 39 regularizer.weight 0.11262656310696978 +857 39 optimizer.lr 0.040549730992401195 +857 39 negative_sampler.num_negs_per_pos 5.0 +857 39 training.batch_size 0.0 +857 40 model.embedding_dim 2.0 +857 40 model.scoring_fct_norm 1.0 +857 40 regularizer.weight 0.18231000067244277 +857 40 optimizer.lr 0.0028576723787208406 +857 40 negative_sampler.num_negs_per_pos 3.0 +857 40 training.batch_size 2.0 +857 41 model.embedding_dim 1.0 +857 41 model.scoring_fct_norm 1.0 +857 41 regularizer.weight 0.013052078148232723 +857 41 optimizer.lr 0.027323077960386268 +857 41 negative_sampler.num_negs_per_pos 29.0 +857 41 training.batch_size 2.0 +857 42 model.embedding_dim 0.0 +857 42 model.scoring_fct_norm 2.0 +857 42 regularizer.weight 0.020111647532462425 +857 42 optimizer.lr 0.023221647344577503 +857 42 negative_sampler.num_negs_per_pos 36.0 +857 42 training.batch_size 0.0 +857 43 model.embedding_dim 0.0 +857 43 model.scoring_fct_norm 1.0 +857 43 regularizer.weight 0.027199000290693352 +857 43 optimizer.lr 0.00329510315275366 +857 43 negative_sampler.num_negs_per_pos 30.0 +857 43 training.batch_size 0.0 +857 1 dataset """fb15k237""" +857 1 model """transh""" +857 1 loss """bceaftersigmoid""" +857 1 regularizer """transh""" +857 1 optimizer """adam""" +857 1 training_loop """owa""" +857 1 negative_sampler """basic""" +857 1 evaluator """rankbased""" +857 2 dataset """fb15k237""" +857 2 model """transh""" +857 2 loss """bceaftersigmoid""" +857 2 regularizer """transh""" +857 2 optimizer """adam""" +857 2 training_loop """owa""" +857 2 negative_sampler """basic""" +857 2 evaluator """rankbased""" +857 3 dataset """fb15k237""" +857 3 model """transh""" +857 3 loss """bceaftersigmoid""" +857 3 regularizer """transh""" +857 3 optimizer """adam""" +857 3 training_loop """owa""" +857 3 negative_sampler """basic""" +857 3 evaluator """rankbased""" +857 4 dataset """fb15k237""" +857 4 model """transh""" +857 4 loss """bceaftersigmoid""" +857 4 regularizer """transh""" +857 4 optimizer """adam""" +857 4 training_loop """owa""" +857 4 negative_sampler """basic""" +857 4 evaluator """rankbased""" +857 5 dataset """fb15k237""" +857 5 model """transh""" +857 5 loss """bceaftersigmoid""" +857 5 regularizer """transh""" +857 5 optimizer """adam""" +857 5 training_loop """owa""" +857 5 negative_sampler """basic""" +857 5 evaluator """rankbased""" +857 6 dataset """fb15k237""" +857 6 model """transh""" +857 6 loss """bceaftersigmoid""" +857 6 regularizer """transh""" +857 6 optimizer """adam""" +857 6 training_loop """owa""" +857 6 negative_sampler """basic""" +857 6 evaluator """rankbased""" +857 7 dataset """fb15k237""" +857 7 model """transh""" +857 7 loss """bceaftersigmoid""" +857 7 regularizer """transh""" +857 7 optimizer """adam""" +857 7 training_loop """owa""" +857 7 negative_sampler """basic""" +857 7 evaluator """rankbased""" +857 8 dataset """fb15k237""" +857 8 model """transh""" +857 8 loss """bceaftersigmoid""" +857 8 regularizer """transh""" +857 8 optimizer """adam""" +857 8 training_loop """owa""" +857 8 negative_sampler """basic""" +857 8 evaluator """rankbased""" +857 9 dataset """fb15k237""" +857 9 model """transh""" +857 9 loss """bceaftersigmoid""" +857 9 regularizer """transh""" +857 9 optimizer """adam""" +857 9 training_loop """owa""" +857 9 negative_sampler """basic""" +857 9 evaluator """rankbased""" +857 10 dataset """fb15k237""" +857 10 model """transh""" +857 10 loss """bceaftersigmoid""" +857 10 regularizer """transh""" +857 10 optimizer """adam""" +857 10 training_loop """owa""" +857 10 negative_sampler """basic""" +857 10 evaluator """rankbased""" +857 11 dataset """fb15k237""" +857 11 model """transh""" +857 11 loss """bceaftersigmoid""" +857 11 regularizer """transh""" +857 11 optimizer """adam""" +857 11 training_loop """owa""" +857 11 negative_sampler """basic""" +857 11 evaluator """rankbased""" +857 12 dataset """fb15k237""" +857 12 model """transh""" +857 12 loss """bceaftersigmoid""" +857 12 regularizer """transh""" +857 12 optimizer """adam""" +857 12 training_loop """owa""" +857 12 negative_sampler """basic""" +857 12 evaluator """rankbased""" +857 13 dataset """fb15k237""" +857 13 model """transh""" +857 13 loss """bceaftersigmoid""" +857 13 regularizer """transh""" +857 13 optimizer """adam""" +857 13 training_loop """owa""" +857 13 negative_sampler """basic""" +857 13 evaluator """rankbased""" +857 14 dataset """fb15k237""" +857 14 model """transh""" +857 14 loss """bceaftersigmoid""" +857 14 regularizer """transh""" +857 14 optimizer """adam""" +857 14 training_loop """owa""" +857 14 negative_sampler """basic""" +857 14 evaluator """rankbased""" +857 15 dataset """fb15k237""" +857 15 model """transh""" +857 15 loss """bceaftersigmoid""" +857 15 regularizer """transh""" +857 15 optimizer """adam""" +857 15 training_loop """owa""" +857 15 negative_sampler """basic""" +857 15 evaluator """rankbased""" +857 16 dataset """fb15k237""" +857 16 model """transh""" +857 16 loss """bceaftersigmoid""" +857 16 regularizer """transh""" +857 16 optimizer """adam""" +857 16 training_loop """owa""" +857 16 negative_sampler """basic""" +857 16 evaluator """rankbased""" +857 17 dataset """fb15k237""" +857 17 model """transh""" +857 17 loss """bceaftersigmoid""" +857 17 regularizer """transh""" +857 17 optimizer """adam""" +857 17 training_loop """owa""" +857 17 negative_sampler """basic""" +857 17 evaluator """rankbased""" +857 18 dataset """fb15k237""" +857 18 model """transh""" +857 18 loss """bceaftersigmoid""" +857 18 regularizer """transh""" +857 18 optimizer """adam""" +857 18 training_loop """owa""" +857 18 negative_sampler """basic""" +857 18 evaluator """rankbased""" +857 19 dataset """fb15k237""" +857 19 model """transh""" +857 19 loss """bceaftersigmoid""" +857 19 regularizer """transh""" +857 19 optimizer """adam""" +857 19 training_loop """owa""" +857 19 negative_sampler """basic""" +857 19 evaluator """rankbased""" +857 20 dataset """fb15k237""" +857 20 model """transh""" +857 20 loss """bceaftersigmoid""" +857 20 regularizer """transh""" +857 20 optimizer """adam""" +857 20 training_loop """owa""" +857 20 negative_sampler """basic""" +857 20 evaluator """rankbased""" +857 21 dataset """fb15k237""" +857 21 model """transh""" +857 21 loss """bceaftersigmoid""" +857 21 regularizer """transh""" +857 21 optimizer """adam""" +857 21 training_loop """owa""" +857 21 negative_sampler """basic""" +857 21 evaluator """rankbased""" +857 22 dataset """fb15k237""" +857 22 model """transh""" +857 22 loss """bceaftersigmoid""" +857 22 regularizer """transh""" +857 22 optimizer """adam""" +857 22 training_loop """owa""" +857 22 negative_sampler """basic""" +857 22 evaluator """rankbased""" +857 23 dataset """fb15k237""" +857 23 model """transh""" +857 23 loss """bceaftersigmoid""" +857 23 regularizer """transh""" +857 23 optimizer """adam""" +857 23 training_loop """owa""" +857 23 negative_sampler """basic""" +857 23 evaluator """rankbased""" +857 24 dataset """fb15k237""" +857 24 model """transh""" +857 24 loss """bceaftersigmoid""" +857 24 regularizer """transh""" +857 24 optimizer """adam""" +857 24 training_loop """owa""" +857 24 negative_sampler """basic""" +857 24 evaluator """rankbased""" +857 25 dataset """fb15k237""" +857 25 model """transh""" +857 25 loss """bceaftersigmoid""" +857 25 regularizer """transh""" +857 25 optimizer """adam""" +857 25 training_loop """owa""" +857 25 negative_sampler """basic""" +857 25 evaluator """rankbased""" +857 26 dataset """fb15k237""" +857 26 model """transh""" +857 26 loss """bceaftersigmoid""" +857 26 regularizer """transh""" +857 26 optimizer """adam""" +857 26 training_loop """owa""" +857 26 negative_sampler """basic""" +857 26 evaluator """rankbased""" +857 27 dataset """fb15k237""" +857 27 model """transh""" +857 27 loss """bceaftersigmoid""" +857 27 regularizer """transh""" +857 27 optimizer """adam""" +857 27 training_loop """owa""" +857 27 negative_sampler """basic""" +857 27 evaluator """rankbased""" +857 28 dataset """fb15k237""" +857 28 model """transh""" +857 28 loss """bceaftersigmoid""" +857 28 regularizer """transh""" +857 28 optimizer """adam""" +857 28 training_loop """owa""" +857 28 negative_sampler """basic""" +857 28 evaluator """rankbased""" +857 29 dataset """fb15k237""" +857 29 model """transh""" +857 29 loss """bceaftersigmoid""" +857 29 regularizer """transh""" +857 29 optimizer """adam""" +857 29 training_loop """owa""" +857 29 negative_sampler """basic""" +857 29 evaluator """rankbased""" +857 30 dataset """fb15k237""" +857 30 model """transh""" +857 30 loss """bceaftersigmoid""" +857 30 regularizer """transh""" +857 30 optimizer """adam""" +857 30 training_loop """owa""" +857 30 negative_sampler """basic""" +857 30 evaluator """rankbased""" +857 31 dataset """fb15k237""" +857 31 model """transh""" +857 31 loss """bceaftersigmoid""" +857 31 regularizer """transh""" +857 31 optimizer """adam""" +857 31 training_loop """owa""" +857 31 negative_sampler """basic""" +857 31 evaluator """rankbased""" +857 32 dataset """fb15k237""" +857 32 model """transh""" +857 32 loss """bceaftersigmoid""" +857 32 regularizer """transh""" +857 32 optimizer """adam""" +857 32 training_loop """owa""" +857 32 negative_sampler """basic""" +857 32 evaluator """rankbased""" +857 33 dataset """fb15k237""" +857 33 model """transh""" +857 33 loss """bceaftersigmoid""" +857 33 regularizer """transh""" +857 33 optimizer """adam""" +857 33 training_loop """owa""" +857 33 negative_sampler """basic""" +857 33 evaluator """rankbased""" +857 34 dataset """fb15k237""" +857 34 model """transh""" +857 34 loss """bceaftersigmoid""" +857 34 regularizer """transh""" +857 34 optimizer """adam""" +857 34 training_loop """owa""" +857 34 negative_sampler """basic""" +857 34 evaluator """rankbased""" +857 35 dataset """fb15k237""" +857 35 model """transh""" +857 35 loss """bceaftersigmoid""" +857 35 regularizer """transh""" +857 35 optimizer """adam""" +857 35 training_loop """owa""" +857 35 negative_sampler """basic""" +857 35 evaluator """rankbased""" +857 36 dataset """fb15k237""" +857 36 model """transh""" +857 36 loss """bceaftersigmoid""" +857 36 regularizer """transh""" +857 36 optimizer """adam""" +857 36 training_loop """owa""" +857 36 negative_sampler """basic""" +857 36 evaluator """rankbased""" +857 37 dataset """fb15k237""" +857 37 model """transh""" +857 37 loss """bceaftersigmoid""" +857 37 regularizer """transh""" +857 37 optimizer """adam""" +857 37 training_loop """owa""" +857 37 negative_sampler """basic""" +857 37 evaluator """rankbased""" +857 38 dataset """fb15k237""" +857 38 model """transh""" +857 38 loss """bceaftersigmoid""" +857 38 regularizer """transh""" +857 38 optimizer """adam""" +857 38 training_loop """owa""" +857 38 negative_sampler """basic""" +857 38 evaluator """rankbased""" +857 39 dataset """fb15k237""" +857 39 model """transh""" +857 39 loss """bceaftersigmoid""" +857 39 regularizer """transh""" +857 39 optimizer """adam""" +857 39 training_loop """owa""" +857 39 negative_sampler """basic""" +857 39 evaluator """rankbased""" +857 40 dataset """fb15k237""" +857 40 model """transh""" +857 40 loss """bceaftersigmoid""" +857 40 regularizer """transh""" +857 40 optimizer """adam""" +857 40 training_loop """owa""" +857 40 negative_sampler """basic""" +857 40 evaluator """rankbased""" +857 41 dataset """fb15k237""" +857 41 model """transh""" +857 41 loss """bceaftersigmoid""" +857 41 regularizer """transh""" +857 41 optimizer """adam""" +857 41 training_loop """owa""" +857 41 negative_sampler """basic""" +857 41 evaluator """rankbased""" +857 42 dataset """fb15k237""" +857 42 model """transh""" +857 42 loss """bceaftersigmoid""" +857 42 regularizer """transh""" +857 42 optimizer """adam""" +857 42 training_loop """owa""" +857 42 negative_sampler """basic""" +857 42 evaluator """rankbased""" +857 43 dataset """fb15k237""" +857 43 model """transh""" +857 43 loss """bceaftersigmoid""" +857 43 regularizer """transh""" +857 43 optimizer """adam""" +857 43 training_loop """owa""" +857 43 negative_sampler """basic""" +857 43 evaluator """rankbased""" +858 1 model.embedding_dim 0.0 +858 1 model.scoring_fct_norm 2.0 +858 1 regularizer.weight 0.07870167371172077 +858 1 optimizer.lr 0.09098883188908946 +858 1 negative_sampler.num_negs_per_pos 18.0 +858 1 training.batch_size 2.0 +858 2 model.embedding_dim 2.0 +858 2 model.scoring_fct_norm 1.0 +858 2 regularizer.weight 0.010293686752035335 +858 2 optimizer.lr 0.032651031432606406 +858 2 negative_sampler.num_negs_per_pos 41.0 +858 2 training.batch_size 0.0 +858 3 model.embedding_dim 0.0 +858 3 model.scoring_fct_norm 1.0 +858 3 regularizer.weight 0.0630955263060986 +858 3 optimizer.lr 0.0952545419829976 +858 3 negative_sampler.num_negs_per_pos 79.0 +858 3 training.batch_size 1.0 +858 4 model.embedding_dim 2.0 +858 4 model.scoring_fct_norm 1.0 +858 4 regularizer.weight 0.07768593553094975 +858 4 optimizer.lr 0.012983423169949581 +858 4 negative_sampler.num_negs_per_pos 50.0 +858 4 training.batch_size 2.0 +858 5 model.embedding_dim 1.0 +858 5 model.scoring_fct_norm 2.0 +858 5 regularizer.weight 0.17651150722842127 +858 5 optimizer.lr 0.014158146339352895 +858 5 negative_sampler.num_negs_per_pos 2.0 +858 5 training.batch_size 1.0 +858 6 model.embedding_dim 0.0 +858 6 model.scoring_fct_norm 1.0 +858 6 regularizer.weight 0.025671640537197643 +858 6 optimizer.lr 0.002336802919269598 +858 6 negative_sampler.num_negs_per_pos 21.0 +858 6 training.batch_size 1.0 +858 7 model.embedding_dim 2.0 +858 7 model.scoring_fct_norm 1.0 +858 7 regularizer.weight 0.030881716937868717 +858 7 optimizer.lr 0.044101174421346895 +858 7 negative_sampler.num_negs_per_pos 4.0 +858 7 training.batch_size 0.0 +858 8 model.embedding_dim 0.0 +858 8 model.scoring_fct_norm 2.0 +858 8 regularizer.weight 0.025854869410949857 +858 8 optimizer.lr 0.03270764088969278 +858 8 negative_sampler.num_negs_per_pos 17.0 +858 8 training.batch_size 1.0 +858 9 model.embedding_dim 2.0 +858 9 model.scoring_fct_norm 1.0 +858 9 regularizer.weight 0.2627349751297553 +858 9 optimizer.lr 0.02673064860354775 +858 9 negative_sampler.num_negs_per_pos 46.0 +858 9 training.batch_size 1.0 +858 10 model.embedding_dim 1.0 +858 10 model.scoring_fct_norm 2.0 +858 10 regularizer.weight 0.02041392042745345 +858 10 optimizer.lr 0.014748849937931094 +858 10 negative_sampler.num_negs_per_pos 59.0 +858 10 training.batch_size 2.0 +858 11 model.embedding_dim 2.0 +858 11 model.scoring_fct_norm 2.0 +858 11 regularizer.weight 0.02152921290818454 +858 11 optimizer.lr 0.012334415164123758 +858 11 negative_sampler.num_negs_per_pos 17.0 +858 11 training.batch_size 1.0 +858 12 model.embedding_dim 2.0 +858 12 model.scoring_fct_norm 1.0 +858 12 regularizer.weight 0.06070443086002276 +858 12 optimizer.lr 0.09279086300570054 +858 12 negative_sampler.num_negs_per_pos 92.0 +858 12 training.batch_size 0.0 +858 13 model.embedding_dim 0.0 +858 13 model.scoring_fct_norm 2.0 +858 13 regularizer.weight 0.012974092340413804 +858 13 optimizer.lr 0.021495262088862675 +858 13 negative_sampler.num_negs_per_pos 32.0 +858 13 training.batch_size 2.0 +858 14 model.embedding_dim 2.0 +858 14 model.scoring_fct_norm 1.0 +858 14 regularizer.weight 0.018694664934742696 +858 14 optimizer.lr 0.015618400934182273 +858 14 negative_sampler.num_negs_per_pos 95.0 +858 14 training.batch_size 2.0 +858 15 model.embedding_dim 2.0 +858 15 model.scoring_fct_norm 2.0 +858 15 regularizer.weight 0.10617768916781768 +858 15 optimizer.lr 0.03446546188026409 +858 15 negative_sampler.num_negs_per_pos 43.0 +858 15 training.batch_size 1.0 +858 16 model.embedding_dim 0.0 +858 16 model.scoring_fct_norm 2.0 +858 16 regularizer.weight 0.09288764997987214 +858 16 optimizer.lr 0.021983325854722287 +858 16 negative_sampler.num_negs_per_pos 68.0 +858 16 training.batch_size 0.0 +858 17 model.embedding_dim 2.0 +858 17 model.scoring_fct_norm 2.0 +858 17 regularizer.weight 0.04023057861595018 +858 17 optimizer.lr 0.02046168612879229 +858 17 negative_sampler.num_negs_per_pos 19.0 +858 17 training.batch_size 0.0 +858 18 model.embedding_dim 1.0 +858 18 model.scoring_fct_norm 2.0 +858 18 regularizer.weight 0.02766047360623137 +858 18 optimizer.lr 0.015687483820719463 +858 18 negative_sampler.num_negs_per_pos 51.0 +858 18 training.batch_size 0.0 +858 19 model.embedding_dim 0.0 +858 19 model.scoring_fct_norm 1.0 +858 19 regularizer.weight 0.01149200300394434 +858 19 optimizer.lr 0.009147289515687568 +858 19 negative_sampler.num_negs_per_pos 12.0 +858 19 training.batch_size 2.0 +858 20 model.embedding_dim 1.0 +858 20 model.scoring_fct_norm 1.0 +858 20 regularizer.weight 0.018514852570656395 +858 20 optimizer.lr 0.004402472104793406 +858 20 negative_sampler.num_negs_per_pos 29.0 +858 20 training.batch_size 2.0 +858 21 model.embedding_dim 1.0 +858 21 model.scoring_fct_norm 1.0 +858 21 regularizer.weight 0.09608020834695317 +858 21 optimizer.lr 0.011499391473725246 +858 21 negative_sampler.num_negs_per_pos 40.0 +858 21 training.batch_size 0.0 +858 22 model.embedding_dim 0.0 +858 22 model.scoring_fct_norm 1.0 +858 22 regularizer.weight 0.16125887088949828 +858 22 optimizer.lr 0.008588869419154434 +858 22 negative_sampler.num_negs_per_pos 40.0 +858 22 training.batch_size 2.0 +858 23 model.embedding_dim 2.0 +858 23 model.scoring_fct_norm 2.0 +858 23 regularizer.weight 0.02309880775613411 +858 23 optimizer.lr 0.0017063463136699649 +858 23 negative_sampler.num_negs_per_pos 68.0 +858 23 training.batch_size 1.0 +858 24 model.embedding_dim 2.0 +858 24 model.scoring_fct_norm 1.0 +858 24 regularizer.weight 0.04501853791330566 +858 24 optimizer.lr 0.0011799526095283702 +858 24 negative_sampler.num_negs_per_pos 0.0 +858 24 training.batch_size 2.0 +858 25 model.embedding_dim 2.0 +858 25 model.scoring_fct_norm 1.0 +858 25 regularizer.weight 0.02967098327104339 +858 25 optimizer.lr 0.01101189822223932 +858 25 negative_sampler.num_negs_per_pos 69.0 +858 25 training.batch_size 1.0 +858 26 model.embedding_dim 1.0 +858 26 model.scoring_fct_norm 1.0 +858 26 regularizer.weight 0.028387720216405125 +858 26 optimizer.lr 0.013932393622428645 +858 26 negative_sampler.num_negs_per_pos 76.0 +858 26 training.batch_size 2.0 +858 27 model.embedding_dim 1.0 +858 27 model.scoring_fct_norm 1.0 +858 27 regularizer.weight 0.013695209435142899 +858 27 optimizer.lr 0.01542054490020867 +858 27 negative_sampler.num_negs_per_pos 26.0 +858 27 training.batch_size 0.0 +858 28 model.embedding_dim 0.0 +858 28 model.scoring_fct_norm 1.0 +858 28 regularizer.weight 0.0689800706299213 +858 28 optimizer.lr 0.014763014229337047 +858 28 negative_sampler.num_negs_per_pos 9.0 +858 28 training.batch_size 2.0 +858 29 model.embedding_dim 1.0 +858 29 model.scoring_fct_norm 2.0 +858 29 regularizer.weight 0.13540655179823333 +858 29 optimizer.lr 0.00130917393541557 +858 29 negative_sampler.num_negs_per_pos 68.0 +858 29 training.batch_size 2.0 +858 30 model.embedding_dim 0.0 +858 30 model.scoring_fct_norm 2.0 +858 30 regularizer.weight 0.05588801265045612 +858 30 optimizer.lr 0.004045366354660909 +858 30 negative_sampler.num_negs_per_pos 0.0 +858 30 training.batch_size 1.0 +858 31 model.embedding_dim 2.0 +858 31 model.scoring_fct_norm 2.0 +858 31 regularizer.weight 0.03699661597570067 +858 31 optimizer.lr 0.0022350441286672837 +858 31 negative_sampler.num_negs_per_pos 53.0 +858 31 training.batch_size 1.0 +858 32 model.embedding_dim 2.0 +858 32 model.scoring_fct_norm 1.0 +858 32 regularizer.weight 0.06157722739915019 +858 32 optimizer.lr 0.06744421367657998 +858 32 negative_sampler.num_negs_per_pos 73.0 +858 32 training.batch_size 2.0 +858 33 model.embedding_dim 0.0 +858 33 model.scoring_fct_norm 1.0 +858 33 regularizer.weight 0.026773890086446383 +858 33 optimizer.lr 0.001313539966502172 +858 33 negative_sampler.num_negs_per_pos 9.0 +858 33 training.batch_size 2.0 +858 34 model.embedding_dim 2.0 +858 34 model.scoring_fct_norm 1.0 +858 34 regularizer.weight 0.04331777157076179 +858 34 optimizer.lr 0.06436735183602384 +858 34 negative_sampler.num_negs_per_pos 18.0 +858 34 training.batch_size 0.0 +858 35 model.embedding_dim 1.0 +858 35 model.scoring_fct_norm 1.0 +858 35 regularizer.weight 0.02852375053353182 +858 35 optimizer.lr 0.012076252358301724 +858 35 negative_sampler.num_negs_per_pos 79.0 +858 35 training.batch_size 1.0 +858 36 model.embedding_dim 2.0 +858 36 model.scoring_fct_norm 2.0 +858 36 regularizer.weight 0.02388562008606427 +858 36 optimizer.lr 0.013023516042411542 +858 36 negative_sampler.num_negs_per_pos 19.0 +858 36 training.batch_size 2.0 +858 37 model.embedding_dim 0.0 +858 37 model.scoring_fct_norm 2.0 +858 37 regularizer.weight 0.019222201654778604 +858 37 optimizer.lr 0.05545338706933109 +858 37 negative_sampler.num_negs_per_pos 19.0 +858 37 training.batch_size 1.0 +858 38 model.embedding_dim 2.0 +858 38 model.scoring_fct_norm 2.0 +858 38 regularizer.weight 0.03233941713303484 +858 38 optimizer.lr 0.0017654274620056538 +858 38 negative_sampler.num_negs_per_pos 66.0 +858 38 training.batch_size 1.0 +858 39 model.embedding_dim 1.0 +858 39 model.scoring_fct_norm 2.0 +858 39 regularizer.weight 0.03619590418928721 +858 39 optimizer.lr 0.049624418850201184 +858 39 negative_sampler.num_negs_per_pos 95.0 +858 39 training.batch_size 1.0 +858 40 model.embedding_dim 1.0 +858 40 model.scoring_fct_norm 2.0 +858 40 regularizer.weight 0.012517082559227616 +858 40 optimizer.lr 0.012719407286286137 +858 40 negative_sampler.num_negs_per_pos 92.0 +858 40 training.batch_size 1.0 +858 41 model.embedding_dim 0.0 +858 41 model.scoring_fct_norm 1.0 +858 41 regularizer.weight 0.08519217817608055 +858 41 optimizer.lr 0.07543599322036924 +858 41 negative_sampler.num_negs_per_pos 73.0 +858 41 training.batch_size 1.0 +858 42 model.embedding_dim 2.0 +858 42 model.scoring_fct_norm 2.0 +858 42 regularizer.weight 0.013786570060422535 +858 42 optimizer.lr 0.027090283203624888 +858 42 negative_sampler.num_negs_per_pos 16.0 +858 42 training.batch_size 0.0 +858 43 model.embedding_dim 0.0 +858 43 model.scoring_fct_norm 1.0 +858 43 regularizer.weight 0.06326017001887854 +858 43 optimizer.lr 0.001014339622672012 +858 43 negative_sampler.num_negs_per_pos 9.0 +858 43 training.batch_size 1.0 +858 1 dataset """fb15k237""" +858 1 model """transh""" +858 1 loss """softplus""" +858 1 regularizer """transh""" +858 1 optimizer """adam""" +858 1 training_loop """owa""" +858 1 negative_sampler """basic""" +858 1 evaluator """rankbased""" +858 2 dataset """fb15k237""" +858 2 model """transh""" +858 2 loss """softplus""" +858 2 regularizer """transh""" +858 2 optimizer """adam""" +858 2 training_loop """owa""" +858 2 negative_sampler """basic""" +858 2 evaluator """rankbased""" +858 3 dataset """fb15k237""" +858 3 model """transh""" +858 3 loss """softplus""" +858 3 regularizer """transh""" +858 3 optimizer """adam""" +858 3 training_loop """owa""" +858 3 negative_sampler """basic""" +858 3 evaluator """rankbased""" +858 4 dataset """fb15k237""" +858 4 model """transh""" +858 4 loss """softplus""" +858 4 regularizer """transh""" +858 4 optimizer """adam""" +858 4 training_loop """owa""" +858 4 negative_sampler """basic""" +858 4 evaluator """rankbased""" +858 5 dataset """fb15k237""" +858 5 model """transh""" +858 5 loss """softplus""" +858 5 regularizer """transh""" +858 5 optimizer """adam""" +858 5 training_loop """owa""" +858 5 negative_sampler """basic""" +858 5 evaluator """rankbased""" +858 6 dataset """fb15k237""" +858 6 model """transh""" +858 6 loss """softplus""" +858 6 regularizer """transh""" +858 6 optimizer """adam""" +858 6 training_loop """owa""" +858 6 negative_sampler """basic""" +858 6 evaluator """rankbased""" +858 7 dataset """fb15k237""" +858 7 model """transh""" +858 7 loss """softplus""" +858 7 regularizer """transh""" +858 7 optimizer """adam""" +858 7 training_loop """owa""" +858 7 negative_sampler """basic""" +858 7 evaluator """rankbased""" +858 8 dataset """fb15k237""" +858 8 model """transh""" +858 8 loss """softplus""" +858 8 regularizer """transh""" +858 8 optimizer """adam""" +858 8 training_loop """owa""" +858 8 negative_sampler """basic""" +858 8 evaluator """rankbased""" +858 9 dataset """fb15k237""" +858 9 model """transh""" +858 9 loss """softplus""" +858 9 regularizer """transh""" +858 9 optimizer """adam""" +858 9 training_loop """owa""" +858 9 negative_sampler """basic""" +858 9 evaluator """rankbased""" +858 10 dataset """fb15k237""" +858 10 model """transh""" +858 10 loss """softplus""" +858 10 regularizer """transh""" +858 10 optimizer """adam""" +858 10 training_loop """owa""" +858 10 negative_sampler """basic""" +858 10 evaluator """rankbased""" +858 11 dataset """fb15k237""" +858 11 model """transh""" +858 11 loss """softplus""" +858 11 regularizer """transh""" +858 11 optimizer """adam""" +858 11 training_loop """owa""" +858 11 negative_sampler """basic""" +858 11 evaluator """rankbased""" +858 12 dataset """fb15k237""" +858 12 model """transh""" +858 12 loss """softplus""" +858 12 regularizer """transh""" +858 12 optimizer """adam""" +858 12 training_loop """owa""" +858 12 negative_sampler """basic""" +858 12 evaluator """rankbased""" +858 13 dataset """fb15k237""" +858 13 model """transh""" +858 13 loss """softplus""" +858 13 regularizer """transh""" +858 13 optimizer """adam""" +858 13 training_loop """owa""" +858 13 negative_sampler """basic""" +858 13 evaluator """rankbased""" +858 14 dataset """fb15k237""" +858 14 model """transh""" +858 14 loss """softplus""" +858 14 regularizer """transh""" +858 14 optimizer """adam""" +858 14 training_loop """owa""" +858 14 negative_sampler """basic""" +858 14 evaluator """rankbased""" +858 15 dataset """fb15k237""" +858 15 model """transh""" +858 15 loss """softplus""" +858 15 regularizer """transh""" +858 15 optimizer """adam""" +858 15 training_loop """owa""" +858 15 negative_sampler """basic""" +858 15 evaluator """rankbased""" +858 16 dataset """fb15k237""" +858 16 model """transh""" +858 16 loss """softplus""" +858 16 regularizer """transh""" +858 16 optimizer """adam""" +858 16 training_loop """owa""" +858 16 negative_sampler """basic""" +858 16 evaluator """rankbased""" +858 17 dataset """fb15k237""" +858 17 model """transh""" +858 17 loss """softplus""" +858 17 regularizer """transh""" +858 17 optimizer """adam""" +858 17 training_loop """owa""" +858 17 negative_sampler """basic""" +858 17 evaluator """rankbased""" +858 18 dataset """fb15k237""" +858 18 model """transh""" +858 18 loss """softplus""" +858 18 regularizer """transh""" +858 18 optimizer """adam""" +858 18 training_loop """owa""" +858 18 negative_sampler """basic""" +858 18 evaluator """rankbased""" +858 19 dataset """fb15k237""" +858 19 model """transh""" +858 19 loss """softplus""" +858 19 regularizer """transh""" +858 19 optimizer """adam""" +858 19 training_loop """owa""" +858 19 negative_sampler """basic""" +858 19 evaluator """rankbased""" +858 20 dataset """fb15k237""" +858 20 model """transh""" +858 20 loss """softplus""" +858 20 regularizer """transh""" +858 20 optimizer """adam""" +858 20 training_loop """owa""" +858 20 negative_sampler """basic""" +858 20 evaluator """rankbased""" +858 21 dataset """fb15k237""" +858 21 model """transh""" +858 21 loss """softplus""" +858 21 regularizer """transh""" +858 21 optimizer """adam""" +858 21 training_loop """owa""" +858 21 negative_sampler """basic""" +858 21 evaluator """rankbased""" +858 22 dataset """fb15k237""" +858 22 model """transh""" +858 22 loss """softplus""" +858 22 regularizer """transh""" +858 22 optimizer """adam""" +858 22 training_loop """owa""" +858 22 negative_sampler """basic""" +858 22 evaluator """rankbased""" +858 23 dataset """fb15k237""" +858 23 model """transh""" +858 23 loss """softplus""" +858 23 regularizer """transh""" +858 23 optimizer """adam""" +858 23 training_loop """owa""" +858 23 negative_sampler """basic""" +858 23 evaluator """rankbased""" +858 24 dataset """fb15k237""" +858 24 model """transh""" +858 24 loss """softplus""" +858 24 regularizer """transh""" +858 24 optimizer """adam""" +858 24 training_loop """owa""" +858 24 negative_sampler """basic""" +858 24 evaluator """rankbased""" +858 25 dataset """fb15k237""" +858 25 model """transh""" +858 25 loss """softplus""" +858 25 regularizer """transh""" +858 25 optimizer """adam""" +858 25 training_loop """owa""" +858 25 negative_sampler """basic""" +858 25 evaluator """rankbased""" +858 26 dataset """fb15k237""" +858 26 model """transh""" +858 26 loss """softplus""" +858 26 regularizer """transh""" +858 26 optimizer """adam""" +858 26 training_loop """owa""" +858 26 negative_sampler """basic""" +858 26 evaluator """rankbased""" +858 27 dataset """fb15k237""" +858 27 model """transh""" +858 27 loss """softplus""" +858 27 regularizer """transh""" +858 27 optimizer """adam""" +858 27 training_loop """owa""" +858 27 negative_sampler """basic""" +858 27 evaluator """rankbased""" +858 28 dataset """fb15k237""" +858 28 model """transh""" +858 28 loss """softplus""" +858 28 regularizer """transh""" +858 28 optimizer """adam""" +858 28 training_loop """owa""" +858 28 negative_sampler """basic""" +858 28 evaluator """rankbased""" +858 29 dataset """fb15k237""" +858 29 model """transh""" +858 29 loss """softplus""" +858 29 regularizer """transh""" +858 29 optimizer """adam""" +858 29 training_loop """owa""" +858 29 negative_sampler """basic""" +858 29 evaluator """rankbased""" +858 30 dataset """fb15k237""" +858 30 model """transh""" +858 30 loss """softplus""" +858 30 regularizer """transh""" +858 30 optimizer """adam""" +858 30 training_loop """owa""" +858 30 negative_sampler """basic""" +858 30 evaluator """rankbased""" +858 31 dataset """fb15k237""" +858 31 model """transh""" +858 31 loss """softplus""" +858 31 regularizer """transh""" +858 31 optimizer """adam""" +858 31 training_loop """owa""" +858 31 negative_sampler """basic""" +858 31 evaluator """rankbased""" +858 32 dataset """fb15k237""" +858 32 model """transh""" +858 32 loss """softplus""" +858 32 regularizer """transh""" +858 32 optimizer """adam""" +858 32 training_loop """owa""" +858 32 negative_sampler """basic""" +858 32 evaluator """rankbased""" +858 33 dataset """fb15k237""" +858 33 model """transh""" +858 33 loss """softplus""" +858 33 regularizer """transh""" +858 33 optimizer """adam""" +858 33 training_loop """owa""" +858 33 negative_sampler """basic""" +858 33 evaluator """rankbased""" +858 34 dataset """fb15k237""" +858 34 model """transh""" +858 34 loss """softplus""" +858 34 regularizer """transh""" +858 34 optimizer """adam""" +858 34 training_loop """owa""" +858 34 negative_sampler """basic""" +858 34 evaluator """rankbased""" +858 35 dataset """fb15k237""" +858 35 model """transh""" +858 35 loss """softplus""" +858 35 regularizer """transh""" +858 35 optimizer """adam""" +858 35 training_loop """owa""" +858 35 negative_sampler """basic""" +858 35 evaluator """rankbased""" +858 36 dataset """fb15k237""" +858 36 model """transh""" +858 36 loss """softplus""" +858 36 regularizer """transh""" +858 36 optimizer """adam""" +858 36 training_loop """owa""" +858 36 negative_sampler """basic""" +858 36 evaluator """rankbased""" +858 37 dataset """fb15k237""" +858 37 model """transh""" +858 37 loss """softplus""" +858 37 regularizer """transh""" +858 37 optimizer """adam""" +858 37 training_loop """owa""" +858 37 negative_sampler """basic""" +858 37 evaluator """rankbased""" +858 38 dataset """fb15k237""" +858 38 model """transh""" +858 38 loss """softplus""" +858 38 regularizer """transh""" +858 38 optimizer """adam""" +858 38 training_loop """owa""" +858 38 negative_sampler """basic""" +858 38 evaluator """rankbased""" +858 39 dataset """fb15k237""" +858 39 model """transh""" +858 39 loss """softplus""" +858 39 regularizer """transh""" +858 39 optimizer """adam""" +858 39 training_loop """owa""" +858 39 negative_sampler """basic""" +858 39 evaluator """rankbased""" +858 40 dataset """fb15k237""" +858 40 model """transh""" +858 40 loss """softplus""" +858 40 regularizer """transh""" +858 40 optimizer """adam""" +858 40 training_loop """owa""" +858 40 negative_sampler """basic""" +858 40 evaluator """rankbased""" +858 41 dataset """fb15k237""" +858 41 model """transh""" +858 41 loss """softplus""" +858 41 regularizer """transh""" +858 41 optimizer """adam""" +858 41 training_loop """owa""" +858 41 negative_sampler """basic""" +858 41 evaluator """rankbased""" +858 42 dataset """fb15k237""" +858 42 model """transh""" +858 42 loss """softplus""" +858 42 regularizer """transh""" +858 42 optimizer """adam""" +858 42 training_loop """owa""" +858 42 negative_sampler """basic""" +858 42 evaluator """rankbased""" +858 43 dataset """fb15k237""" +858 43 model """transh""" +858 43 loss """softplus""" +858 43 regularizer """transh""" +858 43 optimizer """adam""" +858 43 training_loop """owa""" +858 43 negative_sampler """basic""" +858 43 evaluator """rankbased""" +859 1 model.embedding_dim 1.0 +859 1 model.scoring_fct_norm 2.0 +859 1 loss.margin 21.291547855331288 +859 1 loss.adversarial_temperature 0.5438748673222613 +859 1 regularizer.weight 0.2422446864562169 +859 1 optimizer.lr 0.03221850503080363 +859 1 negative_sampler.num_negs_per_pos 53.0 +859 1 training.batch_size 1.0 +859 2 model.embedding_dim 1.0 +859 2 model.scoring_fct_norm 2.0 +859 2 loss.margin 4.5008238025137555 +859 2 loss.adversarial_temperature 0.5799736525106683 +859 2 regularizer.weight 0.15335074802188325 +859 2 optimizer.lr 0.008481626650738688 +859 2 negative_sampler.num_negs_per_pos 83.0 +859 2 training.batch_size 1.0 +859 3 model.embedding_dim 1.0 +859 3 model.scoring_fct_norm 2.0 +859 3 loss.margin 22.215707719250755 +859 3 loss.adversarial_temperature 0.6036091232746517 +859 3 regularizer.weight 0.11078080889774382 +859 3 optimizer.lr 0.0016457635637294275 +859 3 negative_sampler.num_negs_per_pos 50.0 +859 3 training.batch_size 0.0 +859 4 model.embedding_dim 1.0 +859 4 model.scoring_fct_norm 2.0 +859 4 loss.margin 22.85848234308813 +859 4 loss.adversarial_temperature 0.8860447604829874 +859 4 regularizer.weight 0.019694490684513895 +859 4 optimizer.lr 0.013410300924778141 +859 4 negative_sampler.num_negs_per_pos 7.0 +859 4 training.batch_size 2.0 +859 5 model.embedding_dim 0.0 +859 5 model.scoring_fct_norm 2.0 +859 5 loss.margin 24.66575899308172 +859 5 loss.adversarial_temperature 0.4079444751581771 +859 5 regularizer.weight 0.030651239460292905 +859 5 optimizer.lr 0.0014750588651984305 +859 5 negative_sampler.num_negs_per_pos 17.0 +859 5 training.batch_size 1.0 +859 6 model.embedding_dim 0.0 +859 6 model.scoring_fct_norm 2.0 +859 6 loss.margin 20.498047311243244 +859 6 loss.adversarial_temperature 0.7661521818181272 +859 6 regularizer.weight 0.02074807961505718 +859 6 optimizer.lr 0.015476709389986364 +859 6 negative_sampler.num_negs_per_pos 29.0 +859 6 training.batch_size 2.0 +859 7 model.embedding_dim 2.0 +859 7 model.scoring_fct_norm 1.0 +859 7 loss.margin 23.721917515293473 +859 7 loss.adversarial_temperature 0.5227886501633228 +859 7 regularizer.weight 0.010902862676418806 +859 7 optimizer.lr 0.0025626519744842637 +859 7 negative_sampler.num_negs_per_pos 75.0 +859 7 training.batch_size 1.0 +859 8 model.embedding_dim 2.0 +859 8 model.scoring_fct_norm 2.0 +859 8 loss.margin 6.796663035946504 +859 8 loss.adversarial_temperature 0.7940255947956792 +859 8 regularizer.weight 0.17393919360723042 +859 8 optimizer.lr 0.07576704728622269 +859 8 negative_sampler.num_negs_per_pos 32.0 +859 8 training.batch_size 2.0 +859 9 model.embedding_dim 2.0 +859 9 model.scoring_fct_norm 1.0 +859 9 loss.margin 27.65953688811578 +859 9 loss.adversarial_temperature 0.8506719213811939 +859 9 regularizer.weight 0.040031630301230865 +859 9 optimizer.lr 0.001972993357779963 +859 9 negative_sampler.num_negs_per_pos 17.0 +859 9 training.batch_size 2.0 +859 10 model.embedding_dim 1.0 +859 10 model.scoring_fct_norm 1.0 +859 10 loss.margin 7.365961227161025 +859 10 loss.adversarial_temperature 0.19132458242141526 +859 10 regularizer.weight 0.017218906038714284 +859 10 optimizer.lr 0.0045977509720395784 +859 10 negative_sampler.num_negs_per_pos 66.0 +859 10 training.batch_size 1.0 +859 11 model.embedding_dim 0.0 +859 11 model.scoring_fct_norm 2.0 +859 11 loss.margin 1.4270944853983876 +859 11 loss.adversarial_temperature 0.16248014273054892 +859 11 regularizer.weight 0.21486351538555157 +859 11 optimizer.lr 0.011374587933647205 +859 11 negative_sampler.num_negs_per_pos 20.0 +859 11 training.batch_size 0.0 +859 12 model.embedding_dim 1.0 +859 12 model.scoring_fct_norm 1.0 +859 12 loss.margin 12.186574281211493 +859 12 loss.adversarial_temperature 0.8775985460624336 +859 12 regularizer.weight 0.01848271230402668 +859 12 optimizer.lr 0.009769062293213856 +859 12 negative_sampler.num_negs_per_pos 51.0 +859 12 training.batch_size 1.0 +859 13 model.embedding_dim 2.0 +859 13 model.scoring_fct_norm 2.0 +859 13 loss.margin 17.37617021469754 +859 13 loss.adversarial_temperature 0.7711615718981876 +859 13 regularizer.weight 0.029055498498971582 +859 13 optimizer.lr 0.09037658387838421 +859 13 negative_sampler.num_negs_per_pos 94.0 +859 13 training.batch_size 2.0 +859 14 model.embedding_dim 0.0 +859 14 model.scoring_fct_norm 1.0 +859 14 loss.margin 24.750415719604547 +859 14 loss.adversarial_temperature 0.2021557423865644 +859 14 regularizer.weight 0.01133099406086696 +859 14 optimizer.lr 0.01688612546895342 +859 14 negative_sampler.num_negs_per_pos 52.0 +859 14 training.batch_size 0.0 +859 15 model.embedding_dim 1.0 +859 15 model.scoring_fct_norm 1.0 +859 15 loss.margin 20.892393247471915 +859 15 loss.adversarial_temperature 0.9473892187747693 +859 15 regularizer.weight 0.1147535412837989 +859 15 optimizer.lr 0.06799565153813462 +859 15 negative_sampler.num_negs_per_pos 56.0 +859 15 training.batch_size 2.0 +859 16 model.embedding_dim 0.0 +859 16 model.scoring_fct_norm 1.0 +859 16 loss.margin 6.963136312416948 +859 16 loss.adversarial_temperature 0.751128527636088 +859 16 regularizer.weight 0.04610048498812076 +859 16 optimizer.lr 0.041273050701789014 +859 16 negative_sampler.num_negs_per_pos 17.0 +859 16 training.batch_size 2.0 +859 17 model.embedding_dim 0.0 +859 17 model.scoring_fct_norm 2.0 +859 17 loss.margin 10.541265885752345 +859 17 loss.adversarial_temperature 0.6415189557235117 +859 17 regularizer.weight 0.014584006344591545 +859 17 optimizer.lr 0.013026752127970139 +859 17 negative_sampler.num_negs_per_pos 67.0 +859 17 training.batch_size 1.0 +859 18 model.embedding_dim 1.0 +859 18 model.scoring_fct_norm 2.0 +859 18 loss.margin 25.43933055539604 +859 18 loss.adversarial_temperature 0.6790614005740836 +859 18 regularizer.weight 0.010344354562467866 +859 18 optimizer.lr 0.04809836907359111 +859 18 negative_sampler.num_negs_per_pos 21.0 +859 18 training.batch_size 2.0 +859 19 model.embedding_dim 2.0 +859 19 model.scoring_fct_norm 1.0 +859 19 loss.margin 8.425672787500998 +859 19 loss.adversarial_temperature 0.29686248398244003 +859 19 regularizer.weight 0.023992148385290932 +859 19 optimizer.lr 0.018045654142788546 +859 19 negative_sampler.num_negs_per_pos 53.0 +859 19 training.batch_size 0.0 +859 20 model.embedding_dim 2.0 +859 20 model.scoring_fct_norm 1.0 +859 20 loss.margin 8.710394749684658 +859 20 loss.adversarial_temperature 0.4865514601883873 +859 20 regularizer.weight 0.02328914759013405 +859 20 optimizer.lr 0.0013482315649690833 +859 20 negative_sampler.num_negs_per_pos 24.0 +859 20 training.batch_size 1.0 +859 21 model.embedding_dim 2.0 +859 21 model.scoring_fct_norm 2.0 +859 21 loss.margin 24.37269946509304 +859 21 loss.adversarial_temperature 0.4654963284285011 +859 21 regularizer.weight 0.027655999248232345 +859 21 optimizer.lr 0.03108953439985511 +859 21 negative_sampler.num_negs_per_pos 22.0 +859 21 training.batch_size 0.0 +859 22 model.embedding_dim 1.0 +859 22 model.scoring_fct_norm 2.0 +859 22 loss.margin 11.010397573798496 +859 22 loss.adversarial_temperature 0.2506269887848224 +859 22 regularizer.weight 0.0439137304073344 +859 22 optimizer.lr 0.010576744065724253 +859 22 negative_sampler.num_negs_per_pos 30.0 +859 22 training.batch_size 2.0 +859 23 model.embedding_dim 1.0 +859 23 model.scoring_fct_norm 2.0 +859 23 loss.margin 5.744829682430148 +859 23 loss.adversarial_temperature 0.4350922564155891 +859 23 regularizer.weight 0.025815657834717308 +859 23 optimizer.lr 0.010556896709382571 +859 23 negative_sampler.num_negs_per_pos 61.0 +859 23 training.batch_size 2.0 +859 24 model.embedding_dim 1.0 +859 24 model.scoring_fct_norm 1.0 +859 24 loss.margin 3.0714259513927926 +859 24 loss.adversarial_temperature 0.5137319158721343 +859 24 regularizer.weight 0.09120924816337465 +859 24 optimizer.lr 0.045607162786960385 +859 24 negative_sampler.num_negs_per_pos 12.0 +859 24 training.batch_size 1.0 +859 25 model.embedding_dim 0.0 +859 25 model.scoring_fct_norm 2.0 +859 25 loss.margin 3.173524661386798 +859 25 loss.adversarial_temperature 0.6338895028746183 +859 25 regularizer.weight 0.10184304149092305 +859 25 optimizer.lr 0.005624708031577927 +859 25 negative_sampler.num_negs_per_pos 35.0 +859 25 training.batch_size 1.0 +859 1 dataset """fb15k237""" +859 1 model """transh""" +859 1 loss """nssa""" +859 1 regularizer """transh""" +859 1 optimizer """adam""" +859 1 training_loop """owa""" +859 1 negative_sampler """basic""" +859 1 evaluator """rankbased""" +859 2 dataset """fb15k237""" +859 2 model """transh""" +859 2 loss """nssa""" +859 2 regularizer """transh""" +859 2 optimizer """adam""" +859 2 training_loop """owa""" +859 2 negative_sampler """basic""" +859 2 evaluator """rankbased""" +859 3 dataset """fb15k237""" +859 3 model """transh""" +859 3 loss """nssa""" +859 3 regularizer """transh""" +859 3 optimizer """adam""" +859 3 training_loop """owa""" +859 3 negative_sampler """basic""" +859 3 evaluator """rankbased""" +859 4 dataset """fb15k237""" +859 4 model """transh""" +859 4 loss """nssa""" +859 4 regularizer """transh""" +859 4 optimizer """adam""" +859 4 training_loop """owa""" +859 4 negative_sampler """basic""" +859 4 evaluator """rankbased""" +859 5 dataset """fb15k237""" +859 5 model """transh""" +859 5 loss """nssa""" +859 5 regularizer """transh""" +859 5 optimizer """adam""" +859 5 training_loop """owa""" +859 5 negative_sampler """basic""" +859 5 evaluator """rankbased""" +859 6 dataset """fb15k237""" +859 6 model """transh""" +859 6 loss """nssa""" +859 6 regularizer """transh""" +859 6 optimizer """adam""" +859 6 training_loop """owa""" +859 6 negative_sampler """basic""" +859 6 evaluator """rankbased""" +859 7 dataset """fb15k237""" +859 7 model """transh""" +859 7 loss """nssa""" +859 7 regularizer """transh""" +859 7 optimizer """adam""" +859 7 training_loop """owa""" +859 7 negative_sampler """basic""" +859 7 evaluator """rankbased""" +859 8 dataset """fb15k237""" +859 8 model """transh""" +859 8 loss """nssa""" +859 8 regularizer """transh""" +859 8 optimizer """adam""" +859 8 training_loop """owa""" +859 8 negative_sampler """basic""" +859 8 evaluator """rankbased""" +859 9 dataset """fb15k237""" +859 9 model """transh""" +859 9 loss """nssa""" +859 9 regularizer """transh""" +859 9 optimizer """adam""" +859 9 training_loop """owa""" +859 9 negative_sampler """basic""" +859 9 evaluator """rankbased""" +859 10 dataset """fb15k237""" +859 10 model """transh""" +859 10 loss """nssa""" +859 10 regularizer """transh""" +859 10 optimizer """adam""" +859 10 training_loop """owa""" +859 10 negative_sampler """basic""" +859 10 evaluator """rankbased""" +859 11 dataset """fb15k237""" +859 11 model """transh""" +859 11 loss """nssa""" +859 11 regularizer """transh""" +859 11 optimizer """adam""" +859 11 training_loop """owa""" +859 11 negative_sampler """basic""" +859 11 evaluator """rankbased""" +859 12 dataset """fb15k237""" +859 12 model """transh""" +859 12 loss """nssa""" +859 12 regularizer """transh""" +859 12 optimizer """adam""" +859 12 training_loop """owa""" +859 12 negative_sampler """basic""" +859 12 evaluator """rankbased""" +859 13 dataset """fb15k237""" +859 13 model """transh""" +859 13 loss """nssa""" +859 13 regularizer """transh""" +859 13 optimizer """adam""" +859 13 training_loop """owa""" +859 13 negative_sampler """basic""" +859 13 evaluator """rankbased""" +859 14 dataset """fb15k237""" +859 14 model """transh""" +859 14 loss """nssa""" +859 14 regularizer """transh""" +859 14 optimizer """adam""" +859 14 training_loop """owa""" +859 14 negative_sampler """basic""" +859 14 evaluator """rankbased""" +859 15 dataset """fb15k237""" +859 15 model """transh""" +859 15 loss """nssa""" +859 15 regularizer """transh""" +859 15 optimizer """adam""" +859 15 training_loop """owa""" +859 15 negative_sampler """basic""" +859 15 evaluator """rankbased""" +859 16 dataset """fb15k237""" +859 16 model """transh""" +859 16 loss """nssa""" +859 16 regularizer """transh""" +859 16 optimizer """adam""" +859 16 training_loop """owa""" +859 16 negative_sampler """basic""" +859 16 evaluator """rankbased""" +859 17 dataset """fb15k237""" +859 17 model """transh""" +859 17 loss """nssa""" +859 17 regularizer """transh""" +859 17 optimizer """adam""" +859 17 training_loop """owa""" +859 17 negative_sampler """basic""" +859 17 evaluator """rankbased""" +859 18 dataset """fb15k237""" +859 18 model """transh""" +859 18 loss """nssa""" +859 18 regularizer """transh""" +859 18 optimizer """adam""" +859 18 training_loop """owa""" +859 18 negative_sampler """basic""" +859 18 evaluator """rankbased""" +859 19 dataset """fb15k237""" +859 19 model """transh""" +859 19 loss """nssa""" +859 19 regularizer """transh""" +859 19 optimizer """adam""" +859 19 training_loop """owa""" +859 19 negative_sampler """basic""" +859 19 evaluator """rankbased""" +859 20 dataset """fb15k237""" +859 20 model """transh""" +859 20 loss """nssa""" +859 20 regularizer """transh""" +859 20 optimizer """adam""" +859 20 training_loop """owa""" +859 20 negative_sampler """basic""" +859 20 evaluator """rankbased""" +859 21 dataset """fb15k237""" +859 21 model """transh""" +859 21 loss """nssa""" +859 21 regularizer """transh""" +859 21 optimizer """adam""" +859 21 training_loop """owa""" +859 21 negative_sampler """basic""" +859 21 evaluator """rankbased""" +859 22 dataset """fb15k237""" +859 22 model """transh""" +859 22 loss """nssa""" +859 22 regularizer """transh""" +859 22 optimizer """adam""" +859 22 training_loop """owa""" +859 22 negative_sampler """basic""" +859 22 evaluator """rankbased""" +859 23 dataset """fb15k237""" +859 23 model """transh""" +859 23 loss """nssa""" +859 23 regularizer """transh""" +859 23 optimizer """adam""" +859 23 training_loop """owa""" +859 23 negative_sampler """basic""" +859 23 evaluator """rankbased""" +859 24 dataset """fb15k237""" +859 24 model """transh""" +859 24 loss """nssa""" +859 24 regularizer """transh""" +859 24 optimizer """adam""" +859 24 training_loop """owa""" +859 24 negative_sampler """basic""" +859 24 evaluator """rankbased""" +859 25 dataset """fb15k237""" +859 25 model """transh""" +859 25 loss """nssa""" +859 25 regularizer """transh""" +859 25 optimizer """adam""" +859 25 training_loop """owa""" +859 25 negative_sampler """basic""" +859 25 evaluator """rankbased""" +860 1 model.embedding_dim 0.0 +860 1 model.scoring_fct_norm 2.0 +860 1 loss.margin 13.040732971529566 +860 1 loss.adversarial_temperature 0.12923167841962094 +860 1 regularizer.weight 0.013504229710804243 +860 1 optimizer.lr 0.024129285860305195 +860 1 negative_sampler.num_negs_per_pos 18.0 +860 1 training.batch_size 1.0 +860 2 model.embedding_dim 1.0 +860 2 model.scoring_fct_norm 2.0 +860 2 loss.margin 29.606322344307607 +860 2 loss.adversarial_temperature 0.6045853157290908 +860 2 regularizer.weight 0.015631399706695408 +860 2 optimizer.lr 0.021770735417334378 +860 2 negative_sampler.num_negs_per_pos 82.0 +860 2 training.batch_size 2.0 +860 3 model.embedding_dim 1.0 +860 3 model.scoring_fct_norm 1.0 +860 3 loss.margin 15.06414557799568 +860 3 loss.adversarial_temperature 0.22977632461396588 +860 3 regularizer.weight 0.06762288087332796 +860 3 optimizer.lr 0.020068179649662253 +860 3 negative_sampler.num_negs_per_pos 39.0 +860 3 training.batch_size 1.0 +860 4 model.embedding_dim 1.0 +860 4 model.scoring_fct_norm 2.0 +860 4 loss.margin 19.594303003577867 +860 4 loss.adversarial_temperature 0.6803630514877718 +860 4 regularizer.weight 0.03001477657972761 +860 4 optimizer.lr 0.005225675623250757 +860 4 negative_sampler.num_negs_per_pos 97.0 +860 4 training.batch_size 1.0 +860 5 model.embedding_dim 0.0 +860 5 model.scoring_fct_norm 1.0 +860 5 loss.margin 25.206286026103765 +860 5 loss.adversarial_temperature 0.1421403094380114 +860 5 regularizer.weight 0.09801770475422054 +860 5 optimizer.lr 0.011952567097089658 +860 5 negative_sampler.num_negs_per_pos 2.0 +860 5 training.batch_size 1.0 +860 6 model.embedding_dim 2.0 +860 6 model.scoring_fct_norm 2.0 +860 6 loss.margin 10.904315178777122 +860 6 loss.adversarial_temperature 0.7850631068555266 +860 6 regularizer.weight 0.021694083874673847 +860 6 optimizer.lr 0.061219229399459875 +860 6 negative_sampler.num_negs_per_pos 29.0 +860 6 training.batch_size 1.0 +860 7 model.embedding_dim 1.0 +860 7 model.scoring_fct_norm 1.0 +860 7 loss.margin 5.733909168534785 +860 7 loss.adversarial_temperature 0.41777883204619465 +860 7 regularizer.weight 0.23672620397520977 +860 7 optimizer.lr 0.0783731104170443 +860 7 negative_sampler.num_negs_per_pos 39.0 +860 7 training.batch_size 2.0 +860 8 model.embedding_dim 0.0 +860 8 model.scoring_fct_norm 2.0 +860 8 loss.margin 13.350451039754827 +860 8 loss.adversarial_temperature 0.27557573313906053 +860 8 regularizer.weight 0.02640383771602691 +860 8 optimizer.lr 0.05409052879776967 +860 8 negative_sampler.num_negs_per_pos 58.0 +860 8 training.batch_size 1.0 +860 9 model.embedding_dim 0.0 +860 9 model.scoring_fct_norm 2.0 +860 9 loss.margin 25.399407409930845 +860 9 loss.adversarial_temperature 0.7343134286536311 +860 9 regularizer.weight 0.19204421323779317 +860 9 optimizer.lr 0.017480869797595496 +860 9 negative_sampler.num_negs_per_pos 6.0 +860 9 training.batch_size 1.0 +860 10 model.embedding_dim 2.0 +860 10 model.scoring_fct_norm 2.0 +860 10 loss.margin 21.27501398978264 +860 10 loss.adversarial_temperature 0.4749264704003216 +860 10 regularizer.weight 0.14831032288590562 +860 10 optimizer.lr 0.05151081314780098 +860 10 negative_sampler.num_negs_per_pos 97.0 +860 10 training.batch_size 0.0 +860 11 model.embedding_dim 2.0 +860 11 model.scoring_fct_norm 1.0 +860 11 loss.margin 10.881492655986163 +860 11 loss.adversarial_temperature 0.4610865185101088 +860 11 regularizer.weight 0.09877192400418573 +860 11 optimizer.lr 0.035194054892675145 +860 11 negative_sampler.num_negs_per_pos 4.0 +860 11 training.batch_size 0.0 +860 12 model.embedding_dim 2.0 +860 12 model.scoring_fct_norm 2.0 +860 12 loss.margin 11.355690799220884 +860 12 loss.adversarial_temperature 0.981148762472126 +860 12 regularizer.weight 0.28026635891119855 +860 12 optimizer.lr 0.0021446117398185196 +860 12 negative_sampler.num_negs_per_pos 49.0 +860 12 training.batch_size 0.0 +860 13 model.embedding_dim 0.0 +860 13 model.scoring_fct_norm 2.0 +860 13 loss.margin 17.484592821741327 +860 13 loss.adversarial_temperature 0.6279994072582726 +860 13 regularizer.weight 0.10183239022641177 +860 13 optimizer.lr 0.01791379253688843 +860 13 negative_sampler.num_negs_per_pos 72.0 +860 13 training.batch_size 1.0 +860 14 model.embedding_dim 1.0 +860 14 model.scoring_fct_norm 1.0 +860 14 loss.margin 7.217692165713224 +860 14 loss.adversarial_temperature 0.9766049997219557 +860 14 regularizer.weight 0.14240137079239468 +860 14 optimizer.lr 0.06179934324555984 +860 14 negative_sampler.num_negs_per_pos 74.0 +860 14 training.batch_size 2.0 +860 15 model.embedding_dim 1.0 +860 15 model.scoring_fct_norm 2.0 +860 15 loss.margin 17.01930826013895 +860 15 loss.adversarial_temperature 0.17089173244413824 +860 15 regularizer.weight 0.11221262753652146 +860 15 optimizer.lr 0.013527540167062018 +860 15 negative_sampler.num_negs_per_pos 42.0 +860 15 training.batch_size 0.0 +860 16 model.embedding_dim 1.0 +860 16 model.scoring_fct_norm 2.0 +860 16 loss.margin 6.491139811285484 +860 16 loss.adversarial_temperature 0.609987343569769 +860 16 regularizer.weight 0.042525798076880894 +860 16 optimizer.lr 0.021233178207464405 +860 16 negative_sampler.num_negs_per_pos 96.0 +860 16 training.batch_size 1.0 +860 17 model.embedding_dim 1.0 +860 17 model.scoring_fct_norm 1.0 +860 17 loss.margin 26.63017159085043 +860 17 loss.adversarial_temperature 0.4495372247583578 +860 17 regularizer.weight 0.07093095049939692 +860 17 optimizer.lr 0.0010394140443045034 +860 17 negative_sampler.num_negs_per_pos 36.0 +860 17 training.batch_size 2.0 +860 18 model.embedding_dim 0.0 +860 18 model.scoring_fct_norm 1.0 +860 18 loss.margin 10.283643828316114 +860 18 loss.adversarial_temperature 0.7118649590578809 +860 18 regularizer.weight 0.06518152047622507 +860 18 optimizer.lr 0.0024627727357675743 +860 18 negative_sampler.num_negs_per_pos 99.0 +860 18 training.batch_size 0.0 +860 19 model.embedding_dim 0.0 +860 19 model.scoring_fct_norm 1.0 +860 19 loss.margin 11.005571994342192 +860 19 loss.adversarial_temperature 0.37206540003696775 +860 19 regularizer.weight 0.11145294691839767 +860 19 optimizer.lr 0.048762137136269486 +860 19 negative_sampler.num_negs_per_pos 0.0 +860 19 training.batch_size 2.0 +860 20 model.embedding_dim 1.0 +860 20 model.scoring_fct_norm 2.0 +860 20 loss.margin 24.369384186172073 +860 20 loss.adversarial_temperature 0.2343813490214558 +860 20 regularizer.weight 0.11733117907607678 +860 20 optimizer.lr 0.0025882320414552787 +860 20 negative_sampler.num_negs_per_pos 46.0 +860 20 training.batch_size 2.0 +860 21 model.embedding_dim 1.0 +860 21 model.scoring_fct_norm 2.0 +860 21 loss.margin 17.991677314427992 +860 21 loss.adversarial_temperature 0.501780711018745 +860 21 regularizer.weight 0.10842482512497975 +860 21 optimizer.lr 0.0025185262474502826 +860 21 negative_sampler.num_negs_per_pos 14.0 +860 21 training.batch_size 1.0 +860 22 model.embedding_dim 2.0 +860 22 model.scoring_fct_norm 2.0 +860 22 loss.margin 22.320515944203727 +860 22 loss.adversarial_temperature 0.5930307173608754 +860 22 regularizer.weight 0.04728621838745721 +860 22 optimizer.lr 0.03922265965789778 +860 22 negative_sampler.num_negs_per_pos 87.0 +860 22 training.batch_size 0.0 +860 23 model.embedding_dim 0.0 +860 23 model.scoring_fct_norm 1.0 +860 23 loss.margin 19.226058190660158 +860 23 loss.adversarial_temperature 0.35173276297239287 +860 23 regularizer.weight 0.08322659981780366 +860 23 optimizer.lr 0.008721274420144975 +860 23 negative_sampler.num_negs_per_pos 15.0 +860 23 training.batch_size 0.0 +860 24 model.embedding_dim 2.0 +860 24 model.scoring_fct_norm 1.0 +860 24 loss.margin 22.39015394738934 +860 24 loss.adversarial_temperature 0.6570999528146656 +860 24 regularizer.weight 0.21777823170383928 +860 24 optimizer.lr 0.02600266773080179 +860 24 negative_sampler.num_negs_per_pos 57.0 +860 24 training.batch_size 0.0 +860 25 model.embedding_dim 1.0 +860 25 model.scoring_fct_norm 1.0 +860 25 loss.margin 8.723225786786577 +860 25 loss.adversarial_temperature 0.24870033464677016 +860 25 regularizer.weight 0.019928213622976244 +860 25 optimizer.lr 0.0012725554191821212 +860 25 negative_sampler.num_negs_per_pos 2.0 +860 25 training.batch_size 2.0 +860 26 model.embedding_dim 1.0 +860 26 model.scoring_fct_norm 2.0 +860 26 loss.margin 22.417737046322234 +860 26 loss.adversarial_temperature 0.7602840787751318 +860 26 regularizer.weight 0.018102434411168117 +860 26 optimizer.lr 0.012496866199773025 +860 26 negative_sampler.num_negs_per_pos 24.0 +860 26 training.batch_size 1.0 +860 27 model.embedding_dim 1.0 +860 27 model.scoring_fct_norm 2.0 +860 27 loss.margin 2.492975420625818 +860 27 loss.adversarial_temperature 0.1936883944711499 +860 27 regularizer.weight 0.04475514029600445 +860 27 optimizer.lr 0.057565313436652416 +860 27 negative_sampler.num_negs_per_pos 35.0 +860 27 training.batch_size 1.0 +860 28 model.embedding_dim 0.0 +860 28 model.scoring_fct_norm 2.0 +860 28 loss.margin 10.828715895784903 +860 28 loss.adversarial_temperature 0.9255562907667373 +860 28 regularizer.weight 0.040163528850553194 +860 28 optimizer.lr 0.02561910317187168 +860 28 negative_sampler.num_negs_per_pos 70.0 +860 28 training.batch_size 0.0 +860 29 model.embedding_dim 1.0 +860 29 model.scoring_fct_norm 2.0 +860 29 loss.margin 12.07671587938641 +860 29 loss.adversarial_temperature 0.6801335953145782 +860 29 regularizer.weight 0.030917394976487822 +860 29 optimizer.lr 0.0071972065078186286 +860 29 negative_sampler.num_negs_per_pos 77.0 +860 29 training.batch_size 0.0 +860 30 model.embedding_dim 2.0 +860 30 model.scoring_fct_norm 2.0 +860 30 loss.margin 11.466896396870332 +860 30 loss.adversarial_temperature 0.3854838023701376 +860 30 regularizer.weight 0.07998656178115701 +860 30 optimizer.lr 0.05221927742495435 +860 30 negative_sampler.num_negs_per_pos 97.0 +860 30 training.batch_size 0.0 +860 31 model.embedding_dim 1.0 +860 31 model.scoring_fct_norm 1.0 +860 31 loss.margin 12.21799185765573 +860 31 loss.adversarial_temperature 0.1700569302036802 +860 31 regularizer.weight 0.13730438437521392 +860 31 optimizer.lr 0.03748000171333295 +860 31 negative_sampler.num_negs_per_pos 72.0 +860 31 training.batch_size 2.0 +860 32 model.embedding_dim 0.0 +860 32 model.scoring_fct_norm 2.0 +860 32 loss.margin 1.930942849824412 +860 32 loss.adversarial_temperature 0.8059087194613596 +860 32 regularizer.weight 0.031218973009081656 +860 32 optimizer.lr 0.0018883148030298246 +860 32 negative_sampler.num_negs_per_pos 51.0 +860 32 training.batch_size 2.0 +860 33 model.embedding_dim 0.0 +860 33 model.scoring_fct_norm 1.0 +860 33 loss.margin 3.4774348092117275 +860 33 loss.adversarial_temperature 0.12109843214574942 +860 33 regularizer.weight 0.07333057858039024 +860 33 optimizer.lr 0.002523461729843962 +860 33 negative_sampler.num_negs_per_pos 63.0 +860 33 training.batch_size 0.0 +860 34 model.embedding_dim 1.0 +860 34 model.scoring_fct_norm 2.0 +860 34 loss.margin 15.385392587285422 +860 34 loss.adversarial_temperature 0.532679080371079 +860 34 regularizer.weight 0.06168110392932998 +860 34 optimizer.lr 0.003087609406886367 +860 34 negative_sampler.num_negs_per_pos 83.0 +860 34 training.batch_size 1.0 +860 35 model.embedding_dim 1.0 +860 35 model.scoring_fct_norm 1.0 +860 35 loss.margin 19.861821086090295 +860 35 loss.adversarial_temperature 0.2188035675065261 +860 35 regularizer.weight 0.09717277279664557 +860 35 optimizer.lr 0.010956804534318697 +860 35 negative_sampler.num_negs_per_pos 76.0 +860 35 training.batch_size 2.0 +860 36 model.embedding_dim 2.0 +860 36 model.scoring_fct_norm 1.0 +860 36 loss.margin 8.163417299271401 +860 36 loss.adversarial_temperature 0.2893398735052758 +860 36 regularizer.weight 0.035778359599287476 +860 36 optimizer.lr 0.001152420860453127 +860 36 negative_sampler.num_negs_per_pos 85.0 +860 36 training.batch_size 1.0 +860 37 model.embedding_dim 0.0 +860 37 model.scoring_fct_norm 2.0 +860 37 loss.margin 9.344589644145763 +860 37 loss.adversarial_temperature 0.7939629100108359 +860 37 regularizer.weight 0.016225144282255306 +860 37 optimizer.lr 0.006289180083904393 +860 37 negative_sampler.num_negs_per_pos 6.0 +860 37 training.batch_size 0.0 +860 38 model.embedding_dim 2.0 +860 38 model.scoring_fct_norm 2.0 +860 38 loss.margin 2.166348947563044 +860 38 loss.adversarial_temperature 0.10499227180570868 +860 38 regularizer.weight 0.024330296579223525 +860 38 optimizer.lr 0.0029914038902768532 +860 38 negative_sampler.num_negs_per_pos 20.0 +860 38 training.batch_size 1.0 +860 39 model.embedding_dim 0.0 +860 39 model.scoring_fct_norm 1.0 +860 39 loss.margin 17.75970603875751 +860 39 loss.adversarial_temperature 0.8456593731473221 +860 39 regularizer.weight 0.1484826187567285 +860 39 optimizer.lr 0.044571757950187474 +860 39 negative_sampler.num_negs_per_pos 39.0 +860 39 training.batch_size 0.0 +860 40 model.embedding_dim 0.0 +860 40 model.scoring_fct_norm 2.0 +860 40 loss.margin 21.176783876744977 +860 40 loss.adversarial_temperature 0.32590944766008006 +860 40 regularizer.weight 0.05642177397747507 +860 40 optimizer.lr 0.03051335019907423 +860 40 negative_sampler.num_negs_per_pos 8.0 +860 40 training.batch_size 0.0 +860 41 model.embedding_dim 2.0 +860 41 model.scoring_fct_norm 1.0 +860 41 loss.margin 20.398877220319203 +860 41 loss.adversarial_temperature 0.7938987862335178 +860 41 regularizer.weight 0.04086278155745553 +860 41 optimizer.lr 0.032028697354620335 +860 41 negative_sampler.num_negs_per_pos 94.0 +860 41 training.batch_size 1.0 +860 42 model.embedding_dim 1.0 +860 42 model.scoring_fct_norm 2.0 +860 42 loss.margin 25.57915225066304 +860 42 loss.adversarial_temperature 0.7526455608784046 +860 42 regularizer.weight 0.08818494535000292 +860 42 optimizer.lr 0.012769579379441044 +860 42 negative_sampler.num_negs_per_pos 54.0 +860 42 training.batch_size 2.0 +860 1 dataset """fb15k237""" +860 1 model """transh""" +860 1 loss """nssa""" +860 1 regularizer """transh""" +860 1 optimizer """adam""" +860 1 training_loop """owa""" +860 1 negative_sampler """basic""" +860 1 evaluator """rankbased""" +860 2 dataset """fb15k237""" +860 2 model """transh""" +860 2 loss """nssa""" +860 2 regularizer """transh""" +860 2 optimizer """adam""" +860 2 training_loop """owa""" +860 2 negative_sampler """basic""" +860 2 evaluator """rankbased""" +860 3 dataset """fb15k237""" +860 3 model """transh""" +860 3 loss """nssa""" +860 3 regularizer """transh""" +860 3 optimizer """adam""" +860 3 training_loop """owa""" +860 3 negative_sampler """basic""" +860 3 evaluator """rankbased""" +860 4 dataset """fb15k237""" +860 4 model """transh""" +860 4 loss """nssa""" +860 4 regularizer """transh""" +860 4 optimizer """adam""" +860 4 training_loop """owa""" +860 4 negative_sampler """basic""" +860 4 evaluator """rankbased""" +860 5 dataset """fb15k237""" +860 5 model """transh""" +860 5 loss """nssa""" +860 5 regularizer """transh""" +860 5 optimizer """adam""" +860 5 training_loop """owa""" +860 5 negative_sampler """basic""" +860 5 evaluator """rankbased""" +860 6 dataset """fb15k237""" +860 6 model """transh""" +860 6 loss """nssa""" +860 6 regularizer """transh""" +860 6 optimizer """adam""" +860 6 training_loop """owa""" +860 6 negative_sampler """basic""" +860 6 evaluator """rankbased""" +860 7 dataset """fb15k237""" +860 7 model """transh""" +860 7 loss """nssa""" +860 7 regularizer """transh""" +860 7 optimizer """adam""" +860 7 training_loop """owa""" +860 7 negative_sampler """basic""" +860 7 evaluator """rankbased""" +860 8 dataset """fb15k237""" +860 8 model """transh""" +860 8 loss """nssa""" +860 8 regularizer """transh""" +860 8 optimizer """adam""" +860 8 training_loop """owa""" +860 8 negative_sampler """basic""" +860 8 evaluator """rankbased""" +860 9 dataset """fb15k237""" +860 9 model """transh""" +860 9 loss """nssa""" +860 9 regularizer """transh""" +860 9 optimizer """adam""" +860 9 training_loop """owa""" +860 9 negative_sampler """basic""" +860 9 evaluator """rankbased""" +860 10 dataset """fb15k237""" +860 10 model """transh""" +860 10 loss """nssa""" +860 10 regularizer """transh""" +860 10 optimizer """adam""" +860 10 training_loop """owa""" +860 10 negative_sampler """basic""" +860 10 evaluator """rankbased""" +860 11 dataset """fb15k237""" +860 11 model """transh""" +860 11 loss """nssa""" +860 11 regularizer """transh""" +860 11 optimizer """adam""" +860 11 training_loop """owa""" +860 11 negative_sampler """basic""" +860 11 evaluator """rankbased""" +860 12 dataset """fb15k237""" +860 12 model """transh""" +860 12 loss """nssa""" +860 12 regularizer """transh""" +860 12 optimizer """adam""" +860 12 training_loop """owa""" +860 12 negative_sampler """basic""" +860 12 evaluator """rankbased""" +860 13 dataset """fb15k237""" +860 13 model """transh""" +860 13 loss """nssa""" +860 13 regularizer """transh""" +860 13 optimizer """adam""" +860 13 training_loop """owa""" +860 13 negative_sampler """basic""" +860 13 evaluator """rankbased""" +860 14 dataset """fb15k237""" +860 14 model """transh""" +860 14 loss """nssa""" +860 14 regularizer """transh""" +860 14 optimizer """adam""" +860 14 training_loop """owa""" +860 14 negative_sampler """basic""" +860 14 evaluator """rankbased""" +860 15 dataset """fb15k237""" +860 15 model """transh""" +860 15 loss """nssa""" +860 15 regularizer """transh""" +860 15 optimizer """adam""" +860 15 training_loop """owa""" +860 15 negative_sampler """basic""" +860 15 evaluator """rankbased""" +860 16 dataset """fb15k237""" +860 16 model """transh""" +860 16 loss """nssa""" +860 16 regularizer """transh""" +860 16 optimizer """adam""" +860 16 training_loop """owa""" +860 16 negative_sampler """basic""" +860 16 evaluator """rankbased""" +860 17 dataset """fb15k237""" +860 17 model """transh""" +860 17 loss """nssa""" +860 17 regularizer """transh""" +860 17 optimizer """adam""" +860 17 training_loop """owa""" +860 17 negative_sampler """basic""" +860 17 evaluator """rankbased""" +860 18 dataset """fb15k237""" +860 18 model """transh""" +860 18 loss """nssa""" +860 18 regularizer """transh""" +860 18 optimizer """adam""" +860 18 training_loop """owa""" +860 18 negative_sampler """basic""" +860 18 evaluator """rankbased""" +860 19 dataset """fb15k237""" +860 19 model """transh""" +860 19 loss """nssa""" +860 19 regularizer """transh""" +860 19 optimizer """adam""" +860 19 training_loop """owa""" +860 19 negative_sampler """basic""" +860 19 evaluator """rankbased""" +860 20 dataset """fb15k237""" +860 20 model """transh""" +860 20 loss """nssa""" +860 20 regularizer """transh""" +860 20 optimizer """adam""" +860 20 training_loop """owa""" +860 20 negative_sampler """basic""" +860 20 evaluator """rankbased""" +860 21 dataset """fb15k237""" +860 21 model """transh""" +860 21 loss """nssa""" +860 21 regularizer """transh""" +860 21 optimizer """adam""" +860 21 training_loop """owa""" +860 21 negative_sampler """basic""" +860 21 evaluator """rankbased""" +860 22 dataset """fb15k237""" +860 22 model """transh""" +860 22 loss """nssa""" +860 22 regularizer """transh""" +860 22 optimizer """adam""" +860 22 training_loop """owa""" +860 22 negative_sampler """basic""" +860 22 evaluator """rankbased""" +860 23 dataset """fb15k237""" +860 23 model """transh""" +860 23 loss """nssa""" +860 23 regularizer """transh""" +860 23 optimizer """adam""" +860 23 training_loop """owa""" +860 23 negative_sampler """basic""" +860 23 evaluator """rankbased""" +860 24 dataset """fb15k237""" +860 24 model """transh""" +860 24 loss """nssa""" +860 24 regularizer """transh""" +860 24 optimizer """adam""" +860 24 training_loop """owa""" +860 24 negative_sampler """basic""" +860 24 evaluator """rankbased""" +860 25 dataset """fb15k237""" +860 25 model """transh""" +860 25 loss """nssa""" +860 25 regularizer """transh""" +860 25 optimizer """adam""" +860 25 training_loop """owa""" +860 25 negative_sampler """basic""" +860 25 evaluator """rankbased""" +860 26 dataset """fb15k237""" +860 26 model """transh""" +860 26 loss """nssa""" +860 26 regularizer """transh""" +860 26 optimizer """adam""" +860 26 training_loop """owa""" +860 26 negative_sampler """basic""" +860 26 evaluator """rankbased""" +860 27 dataset """fb15k237""" +860 27 model """transh""" +860 27 loss """nssa""" +860 27 regularizer """transh""" +860 27 optimizer """adam""" +860 27 training_loop """owa""" +860 27 negative_sampler """basic""" +860 27 evaluator """rankbased""" +860 28 dataset """fb15k237""" +860 28 model """transh""" +860 28 loss """nssa""" +860 28 regularizer """transh""" +860 28 optimizer """adam""" +860 28 training_loop """owa""" +860 28 negative_sampler """basic""" +860 28 evaluator """rankbased""" +860 29 dataset """fb15k237""" +860 29 model """transh""" +860 29 loss """nssa""" +860 29 regularizer """transh""" +860 29 optimizer """adam""" +860 29 training_loop """owa""" +860 29 negative_sampler """basic""" +860 29 evaluator """rankbased""" +860 30 dataset """fb15k237""" +860 30 model """transh""" +860 30 loss """nssa""" +860 30 regularizer """transh""" +860 30 optimizer """adam""" +860 30 training_loop """owa""" +860 30 negative_sampler """basic""" +860 30 evaluator """rankbased""" +860 31 dataset """fb15k237""" +860 31 model """transh""" +860 31 loss """nssa""" +860 31 regularizer """transh""" +860 31 optimizer """adam""" +860 31 training_loop """owa""" +860 31 negative_sampler """basic""" +860 31 evaluator """rankbased""" +860 32 dataset """fb15k237""" +860 32 model """transh""" +860 32 loss """nssa""" +860 32 regularizer """transh""" +860 32 optimizer """adam""" +860 32 training_loop """owa""" +860 32 negative_sampler """basic""" +860 32 evaluator """rankbased""" +860 33 dataset """fb15k237""" +860 33 model """transh""" +860 33 loss """nssa""" +860 33 regularizer """transh""" +860 33 optimizer """adam""" +860 33 training_loop """owa""" +860 33 negative_sampler """basic""" +860 33 evaluator """rankbased""" +860 34 dataset """fb15k237""" +860 34 model """transh""" +860 34 loss """nssa""" +860 34 regularizer """transh""" +860 34 optimizer """adam""" +860 34 training_loop """owa""" +860 34 negative_sampler """basic""" +860 34 evaluator """rankbased""" +860 35 dataset """fb15k237""" +860 35 model """transh""" +860 35 loss """nssa""" +860 35 regularizer """transh""" +860 35 optimizer """adam""" +860 35 training_loop """owa""" +860 35 negative_sampler """basic""" +860 35 evaluator """rankbased""" +860 36 dataset """fb15k237""" +860 36 model """transh""" +860 36 loss """nssa""" +860 36 regularizer """transh""" +860 36 optimizer """adam""" +860 36 training_loop """owa""" +860 36 negative_sampler """basic""" +860 36 evaluator """rankbased""" +860 37 dataset """fb15k237""" +860 37 model """transh""" +860 37 loss """nssa""" +860 37 regularizer """transh""" +860 37 optimizer """adam""" +860 37 training_loop """owa""" +860 37 negative_sampler """basic""" +860 37 evaluator """rankbased""" +860 38 dataset """fb15k237""" +860 38 model """transh""" +860 38 loss """nssa""" +860 38 regularizer """transh""" +860 38 optimizer """adam""" +860 38 training_loop """owa""" +860 38 negative_sampler """basic""" +860 38 evaluator """rankbased""" +860 39 dataset """fb15k237""" +860 39 model """transh""" +860 39 loss """nssa""" +860 39 regularizer """transh""" +860 39 optimizer """adam""" +860 39 training_loop """owa""" +860 39 negative_sampler """basic""" +860 39 evaluator """rankbased""" +860 40 dataset """fb15k237""" +860 40 model """transh""" +860 40 loss """nssa""" +860 40 regularizer """transh""" +860 40 optimizer """adam""" +860 40 training_loop """owa""" +860 40 negative_sampler """basic""" +860 40 evaluator """rankbased""" +860 41 dataset """fb15k237""" +860 41 model """transh""" +860 41 loss """nssa""" +860 41 regularizer """transh""" +860 41 optimizer """adam""" +860 41 training_loop """owa""" +860 41 negative_sampler """basic""" +860 41 evaluator """rankbased""" +860 42 dataset """fb15k237""" +860 42 model """transh""" +860 42 loss """nssa""" +860 42 regularizer """transh""" +860 42 optimizer """adam""" +860 42 training_loop """owa""" +860 42 negative_sampler """basic""" +860 42 evaluator """rankbased""" +861 1 model.embedding_dim 2.0 +861 1 model.scoring_fct_norm 1.0 +861 1 loss.margin 0.7386634416207515 +861 1 regularizer.weight 0.07580855055091286 +861 1 optimizer.lr 0.00455688233999766 +861 1 negative_sampler.num_negs_per_pos 31.0 +861 1 training.batch_size 0.0 +861 2 model.embedding_dim 1.0 +861 2 model.scoring_fct_norm 2.0 +861 2 loss.margin 8.806712705788975 +861 2 regularizer.weight 0.0422665035918709 +861 2 optimizer.lr 0.008281475043071198 +861 2 negative_sampler.num_negs_per_pos 71.0 +861 2 training.batch_size 1.0 +861 3 model.embedding_dim 2.0 +861 3 model.scoring_fct_norm 1.0 +861 3 loss.margin 0.9878816691028842 +861 3 regularizer.weight 0.08135057823834621 +861 3 optimizer.lr 0.058903633280105804 +861 3 negative_sampler.num_negs_per_pos 17.0 +861 3 training.batch_size 2.0 +861 4 model.embedding_dim 0.0 +861 4 model.scoring_fct_norm 2.0 +861 4 loss.margin 8.836898383913528 +861 4 regularizer.weight 0.03082670528295891 +861 4 optimizer.lr 0.05937720393710451 +861 4 negative_sampler.num_negs_per_pos 31.0 +861 4 training.batch_size 1.0 +861 5 model.embedding_dim 0.0 +861 5 model.scoring_fct_norm 2.0 +861 5 loss.margin 8.260179318391662 +861 5 regularizer.weight 0.19329745521404204 +861 5 optimizer.lr 0.0096125494426022 +861 5 negative_sampler.num_negs_per_pos 43.0 +861 5 training.batch_size 2.0 +861 6 model.embedding_dim 2.0 +861 6 model.scoring_fct_norm 2.0 +861 6 loss.margin 7.229473021793508 +861 6 regularizer.weight 0.027463070631734636 +861 6 optimizer.lr 0.00834556823131543 +861 6 negative_sampler.num_negs_per_pos 25.0 +861 6 training.batch_size 2.0 +861 7 model.embedding_dim 1.0 +861 7 model.scoring_fct_norm 2.0 +861 7 loss.margin 9.560388310296881 +861 7 regularizer.weight 0.029407054217958098 +861 7 optimizer.lr 0.06761609891097198 +861 7 negative_sampler.num_negs_per_pos 34.0 +861 7 training.batch_size 2.0 +861 8 model.embedding_dim 0.0 +861 8 model.scoring_fct_norm 2.0 +861 8 loss.margin 1.0910993569278553 +861 8 regularizer.weight 0.0169513545681259 +861 8 optimizer.lr 0.022626789592388005 +861 8 negative_sampler.num_negs_per_pos 97.0 +861 8 training.batch_size 2.0 +861 9 model.embedding_dim 2.0 +861 9 model.scoring_fct_norm 2.0 +861 9 loss.margin 9.843303558128508 +861 9 regularizer.weight 0.018162177418819172 +861 9 optimizer.lr 0.001054926774573159 +861 9 negative_sampler.num_negs_per_pos 64.0 +861 9 training.batch_size 2.0 +861 10 model.embedding_dim 0.0 +861 10 model.scoring_fct_norm 1.0 +861 10 loss.margin 2.626461460526691 +861 10 regularizer.weight 0.06232170768106916 +861 10 optimizer.lr 0.022720035186009967 +861 10 negative_sampler.num_negs_per_pos 19.0 +861 10 training.batch_size 0.0 +861 11 model.embedding_dim 0.0 +861 11 model.scoring_fct_norm 1.0 +861 11 loss.margin 6.237696022077071 +861 11 regularizer.weight 0.015057033555075148 +861 11 optimizer.lr 0.007769854975074169 +861 11 negative_sampler.num_negs_per_pos 57.0 +861 11 training.batch_size 0.0 +861 12 model.embedding_dim 2.0 +861 12 model.scoring_fct_norm 1.0 +861 12 loss.margin 3.512700422575379 +861 12 regularizer.weight 0.031196699990775945 +861 12 optimizer.lr 0.008106266510176005 +861 12 negative_sampler.num_negs_per_pos 14.0 +861 12 training.batch_size 2.0 +861 13 model.embedding_dim 1.0 +861 13 model.scoring_fct_norm 1.0 +861 13 loss.margin 8.272749650635513 +861 13 regularizer.weight 0.08799314365970291 +861 13 optimizer.lr 0.0013806094282087014 +861 13 negative_sampler.num_negs_per_pos 74.0 +861 13 training.batch_size 0.0 +861 14 model.embedding_dim 0.0 +861 14 model.scoring_fct_norm 1.0 +861 14 loss.margin 1.6729858121234236 +861 14 regularizer.weight 0.2608139741223714 +861 14 optimizer.lr 0.0011999538577912082 +861 14 negative_sampler.num_negs_per_pos 94.0 +861 14 training.batch_size 0.0 +861 15 model.embedding_dim 2.0 +861 15 model.scoring_fct_norm 1.0 +861 15 loss.margin 4.765295835354095 +861 15 regularizer.weight 0.20603713877815244 +861 15 optimizer.lr 0.005934777447625122 +861 15 negative_sampler.num_negs_per_pos 30.0 +861 15 training.batch_size 0.0 +861 16 model.embedding_dim 0.0 +861 16 model.scoring_fct_norm 2.0 +861 16 loss.margin 9.650874578226674 +861 16 regularizer.weight 0.033393978709577825 +861 16 optimizer.lr 0.06086102633697322 +861 16 negative_sampler.num_negs_per_pos 4.0 +861 16 training.batch_size 1.0 +861 17 model.embedding_dim 0.0 +861 17 model.scoring_fct_norm 2.0 +861 17 loss.margin 7.566084468851617 +861 17 regularizer.weight 0.06401273988476573 +861 17 optimizer.lr 0.004924087199078247 +861 17 negative_sampler.num_negs_per_pos 71.0 +861 17 training.batch_size 2.0 +861 18 model.embedding_dim 1.0 +861 18 model.scoring_fct_norm 2.0 +861 18 loss.margin 8.843513378054382 +861 18 regularizer.weight 0.06993619736093841 +861 18 optimizer.lr 0.019248005629733292 +861 18 negative_sampler.num_negs_per_pos 73.0 +861 18 training.batch_size 2.0 +861 19 model.embedding_dim 1.0 +861 19 model.scoring_fct_norm 1.0 +861 19 loss.margin 7.815688946832501 +861 19 regularizer.weight 0.024958145687232085 +861 19 optimizer.lr 0.001259500555232499 +861 19 negative_sampler.num_negs_per_pos 9.0 +861 19 training.batch_size 2.0 +861 1 dataset """fb15k237""" +861 1 model """transh""" +861 1 loss """marginranking""" +861 1 regularizer """transh""" +861 1 optimizer """adam""" +861 1 training_loop """owa""" +861 1 negative_sampler """basic""" +861 1 evaluator """rankbased""" +861 2 dataset """fb15k237""" +861 2 model """transh""" +861 2 loss """marginranking""" +861 2 regularizer """transh""" +861 2 optimizer """adam""" +861 2 training_loop """owa""" +861 2 negative_sampler """basic""" +861 2 evaluator """rankbased""" +861 3 dataset """fb15k237""" +861 3 model """transh""" +861 3 loss """marginranking""" +861 3 regularizer """transh""" +861 3 optimizer """adam""" +861 3 training_loop """owa""" +861 3 negative_sampler """basic""" +861 3 evaluator """rankbased""" +861 4 dataset """fb15k237""" +861 4 model """transh""" +861 4 loss """marginranking""" +861 4 regularizer """transh""" +861 4 optimizer """adam""" +861 4 training_loop """owa""" +861 4 negative_sampler """basic""" +861 4 evaluator """rankbased""" +861 5 dataset """fb15k237""" +861 5 model """transh""" +861 5 loss """marginranking""" +861 5 regularizer """transh""" +861 5 optimizer """adam""" +861 5 training_loop """owa""" +861 5 negative_sampler """basic""" +861 5 evaluator """rankbased""" +861 6 dataset """fb15k237""" +861 6 model """transh""" +861 6 loss """marginranking""" +861 6 regularizer """transh""" +861 6 optimizer """adam""" +861 6 training_loop """owa""" +861 6 negative_sampler """basic""" +861 6 evaluator """rankbased""" +861 7 dataset """fb15k237""" +861 7 model """transh""" +861 7 loss """marginranking""" +861 7 regularizer """transh""" +861 7 optimizer """adam""" +861 7 training_loop """owa""" +861 7 negative_sampler """basic""" +861 7 evaluator """rankbased""" +861 8 dataset """fb15k237""" +861 8 model """transh""" +861 8 loss """marginranking""" +861 8 regularizer """transh""" +861 8 optimizer """adam""" +861 8 training_loop """owa""" +861 8 negative_sampler """basic""" +861 8 evaluator """rankbased""" +861 9 dataset """fb15k237""" +861 9 model """transh""" +861 9 loss """marginranking""" +861 9 regularizer """transh""" +861 9 optimizer """adam""" +861 9 training_loop """owa""" +861 9 negative_sampler """basic""" +861 9 evaluator """rankbased""" +861 10 dataset """fb15k237""" +861 10 model """transh""" +861 10 loss """marginranking""" +861 10 regularizer """transh""" +861 10 optimizer """adam""" +861 10 training_loop """owa""" +861 10 negative_sampler """basic""" +861 10 evaluator """rankbased""" +861 11 dataset """fb15k237""" +861 11 model """transh""" +861 11 loss """marginranking""" +861 11 regularizer """transh""" +861 11 optimizer """adam""" +861 11 training_loop """owa""" +861 11 negative_sampler """basic""" +861 11 evaluator """rankbased""" +861 12 dataset """fb15k237""" +861 12 model """transh""" +861 12 loss """marginranking""" +861 12 regularizer """transh""" +861 12 optimizer """adam""" +861 12 training_loop """owa""" +861 12 negative_sampler """basic""" +861 12 evaluator """rankbased""" +861 13 dataset """fb15k237""" +861 13 model """transh""" +861 13 loss """marginranking""" +861 13 regularizer """transh""" +861 13 optimizer """adam""" +861 13 training_loop """owa""" +861 13 negative_sampler """basic""" +861 13 evaluator """rankbased""" +861 14 dataset """fb15k237""" +861 14 model """transh""" +861 14 loss """marginranking""" +861 14 regularizer """transh""" +861 14 optimizer """adam""" +861 14 training_loop """owa""" +861 14 negative_sampler """basic""" +861 14 evaluator """rankbased""" +861 15 dataset """fb15k237""" +861 15 model """transh""" +861 15 loss """marginranking""" +861 15 regularizer """transh""" +861 15 optimizer """adam""" +861 15 training_loop """owa""" +861 15 negative_sampler """basic""" +861 15 evaluator """rankbased""" +861 16 dataset """fb15k237""" +861 16 model """transh""" +861 16 loss """marginranking""" +861 16 regularizer """transh""" +861 16 optimizer """adam""" +861 16 training_loop """owa""" +861 16 negative_sampler """basic""" +861 16 evaluator """rankbased""" +861 17 dataset """fb15k237""" +861 17 model """transh""" +861 17 loss """marginranking""" +861 17 regularizer """transh""" +861 17 optimizer """adam""" +861 17 training_loop """owa""" +861 17 negative_sampler """basic""" +861 17 evaluator """rankbased""" +861 18 dataset """fb15k237""" +861 18 model """transh""" +861 18 loss """marginranking""" +861 18 regularizer """transh""" +861 18 optimizer """adam""" +861 18 training_loop """owa""" +861 18 negative_sampler """basic""" +861 18 evaluator """rankbased""" +861 19 dataset """fb15k237""" +861 19 model """transh""" +861 19 loss """marginranking""" +861 19 regularizer """transh""" +861 19 optimizer """adam""" +861 19 training_loop """owa""" +861 19 negative_sampler """basic""" +861 19 evaluator """rankbased""" +862 1 model.embedding_dim 2.0 +862 1 model.scoring_fct_norm 2.0 +862 1 loss.margin 5.520527108037526 +862 1 regularizer.weight 0.04481470905871123 +862 1 optimizer.lr 0.007341902771192309 +862 1 negative_sampler.num_negs_per_pos 34.0 +862 1 training.batch_size 1.0 +862 2 model.embedding_dim 2.0 +862 2 model.scoring_fct_norm 1.0 +862 2 loss.margin 4.215194397937191 +862 2 regularizer.weight 0.052246466818310876 +862 2 optimizer.lr 0.003395291260557415 +862 2 negative_sampler.num_negs_per_pos 10.0 +862 2 training.batch_size 0.0 +862 3 model.embedding_dim 2.0 +862 3 model.scoring_fct_norm 1.0 +862 3 loss.margin 2.1130167353142144 +862 3 regularizer.weight 0.014060710574496378 +862 3 optimizer.lr 0.026137051532802037 +862 3 negative_sampler.num_negs_per_pos 76.0 +862 3 training.batch_size 0.0 +862 4 model.embedding_dim 1.0 +862 4 model.scoring_fct_norm 2.0 +862 4 loss.margin 9.59485371912475 +862 4 regularizer.weight 0.22845267382550202 +862 4 optimizer.lr 0.0011550350003775266 +862 4 negative_sampler.num_negs_per_pos 77.0 +862 4 training.batch_size 1.0 +862 5 model.embedding_dim 2.0 +862 5 model.scoring_fct_norm 2.0 +862 5 loss.margin 1.3926401713549759 +862 5 regularizer.weight 0.19112173733851562 +862 5 optimizer.lr 0.07588721677516354 +862 5 negative_sampler.num_negs_per_pos 45.0 +862 5 training.batch_size 2.0 +862 6 model.embedding_dim 2.0 +862 6 model.scoring_fct_norm 1.0 +862 6 loss.margin 3.260649879880417 +862 6 regularizer.weight 0.012047456944288595 +862 6 optimizer.lr 0.0012271254676864544 +862 6 negative_sampler.num_negs_per_pos 43.0 +862 6 training.batch_size 2.0 +862 7 model.embedding_dim 0.0 +862 7 model.scoring_fct_norm 2.0 +862 7 loss.margin 3.017237364440712 +862 7 regularizer.weight 0.1155754080675015 +862 7 optimizer.lr 0.0016897974347427033 +862 7 negative_sampler.num_negs_per_pos 35.0 +862 7 training.batch_size 2.0 +862 8 model.embedding_dim 1.0 +862 8 model.scoring_fct_norm 1.0 +862 8 loss.margin 1.8243876485955095 +862 8 regularizer.weight 0.01733689276872596 +862 8 optimizer.lr 0.09870142273600578 +862 8 negative_sampler.num_negs_per_pos 14.0 +862 8 training.batch_size 0.0 +862 9 model.embedding_dim 2.0 +862 9 model.scoring_fct_norm 2.0 +862 9 loss.margin 6.869286319232965 +862 9 regularizer.weight 0.17613416759751965 +862 9 optimizer.lr 0.0038529461106917356 +862 9 negative_sampler.num_negs_per_pos 43.0 +862 9 training.batch_size 0.0 +862 10 model.embedding_dim 2.0 +862 10 model.scoring_fct_norm 2.0 +862 10 loss.margin 7.044081431683228 +862 10 regularizer.weight 0.022901798233491158 +862 10 optimizer.lr 0.021120365046468623 +862 10 negative_sampler.num_negs_per_pos 80.0 +862 10 training.batch_size 2.0 +862 11 model.embedding_dim 0.0 +862 11 model.scoring_fct_norm 1.0 +862 11 loss.margin 1.6783819995871654 +862 11 regularizer.weight 0.024974123744584225 +862 11 optimizer.lr 0.02951434417758716 +862 11 negative_sampler.num_negs_per_pos 26.0 +862 11 training.batch_size 1.0 +862 12 model.embedding_dim 0.0 +862 12 model.scoring_fct_norm 1.0 +862 12 loss.margin 0.7812086489067074 +862 12 regularizer.weight 0.04113303160741427 +862 12 optimizer.lr 0.0021246235702523544 +862 12 negative_sampler.num_negs_per_pos 30.0 +862 12 training.batch_size 2.0 +862 13 model.embedding_dim 2.0 +862 13 model.scoring_fct_norm 1.0 +862 13 loss.margin 2.7350022987010973 +862 13 regularizer.weight 0.04015412566490791 +862 13 optimizer.lr 0.0905083108404431 +862 13 negative_sampler.num_negs_per_pos 21.0 +862 13 training.batch_size 0.0 +862 14 model.embedding_dim 0.0 +862 14 model.scoring_fct_norm 2.0 +862 14 loss.margin 3.1232520118737903 +862 14 regularizer.weight 0.21534838457765346 +862 14 optimizer.lr 0.01948039368483468 +862 14 negative_sampler.num_negs_per_pos 32.0 +862 14 training.batch_size 1.0 +862 15 model.embedding_dim 0.0 +862 15 model.scoring_fct_norm 2.0 +862 15 loss.margin 1.1200629870294183 +862 15 regularizer.weight 0.2766135650788387 +862 15 optimizer.lr 0.01824428514064685 +862 15 negative_sampler.num_negs_per_pos 73.0 +862 15 training.batch_size 1.0 +862 16 model.embedding_dim 1.0 +862 16 model.scoring_fct_norm 1.0 +862 16 loss.margin 7.35801991298584 +862 16 regularizer.weight 0.19628823242225144 +862 16 optimizer.lr 0.0032766692027750366 +862 16 negative_sampler.num_negs_per_pos 76.0 +862 16 training.batch_size 1.0 +862 17 model.embedding_dim 0.0 +862 17 model.scoring_fct_norm 2.0 +862 17 loss.margin 8.663896061107945 +862 17 regularizer.weight 0.012503436402398524 +862 17 optimizer.lr 0.0570448551553351 +862 17 negative_sampler.num_negs_per_pos 63.0 +862 17 training.batch_size 1.0 +862 18 model.embedding_dim 1.0 +862 18 model.scoring_fct_norm 2.0 +862 18 loss.margin 2.425619463020426 +862 18 regularizer.weight 0.015112188467836709 +862 18 optimizer.lr 0.0962308682425223 +862 18 negative_sampler.num_negs_per_pos 40.0 +862 18 training.batch_size 2.0 +862 19 model.embedding_dim 1.0 +862 19 model.scoring_fct_norm 1.0 +862 19 loss.margin 1.1862124497605446 +862 19 regularizer.weight 0.01172030496838681 +862 19 optimizer.lr 0.02889501384403275 +862 19 negative_sampler.num_negs_per_pos 63.0 +862 19 training.batch_size 1.0 +862 20 model.embedding_dim 2.0 +862 20 model.scoring_fct_norm 1.0 +862 20 loss.margin 8.20979897447194 +862 20 regularizer.weight 0.014015899344070442 +862 20 optimizer.lr 0.003851793418810387 +862 20 negative_sampler.num_negs_per_pos 16.0 +862 20 training.batch_size 1.0 +862 21 model.embedding_dim 0.0 +862 21 model.scoring_fct_norm 2.0 +862 21 loss.margin 0.5836651907973263 +862 21 regularizer.weight 0.062259675233214726 +862 21 optimizer.lr 0.010734475897845104 +862 21 negative_sampler.num_negs_per_pos 97.0 +862 21 training.batch_size 1.0 +862 22 model.embedding_dim 0.0 +862 22 model.scoring_fct_norm 1.0 +862 22 loss.margin 9.663812521456084 +862 22 regularizer.weight 0.012488156594407412 +862 22 optimizer.lr 0.0032370380009191665 +862 22 negative_sampler.num_negs_per_pos 18.0 +862 22 training.batch_size 0.0 +862 23 model.embedding_dim 0.0 +862 23 model.scoring_fct_norm 2.0 +862 23 loss.margin 8.836462878260786 +862 23 regularizer.weight 0.011666034737629427 +862 23 optimizer.lr 0.0018445348510204515 +862 23 negative_sampler.num_negs_per_pos 29.0 +862 23 training.batch_size 1.0 +862 24 model.embedding_dim 1.0 +862 24 model.scoring_fct_norm 2.0 +862 24 loss.margin 1.2392677892899135 +862 24 regularizer.weight 0.06989480022368322 +862 24 optimizer.lr 0.011851001739181813 +862 24 negative_sampler.num_negs_per_pos 67.0 +862 24 training.batch_size 0.0 +862 25 model.embedding_dim 0.0 +862 25 model.scoring_fct_norm 2.0 +862 25 loss.margin 3.6633173068647293 +862 25 regularizer.weight 0.025800553466173444 +862 25 optimizer.lr 0.009097660694478078 +862 25 negative_sampler.num_negs_per_pos 10.0 +862 25 training.batch_size 2.0 +862 26 model.embedding_dim 0.0 +862 26 model.scoring_fct_norm 1.0 +862 26 loss.margin 2.3908242807515094 +862 26 regularizer.weight 0.21309982363903507 +862 26 optimizer.lr 0.005726096655521002 +862 26 negative_sampler.num_negs_per_pos 49.0 +862 26 training.batch_size 2.0 +862 27 model.embedding_dim 0.0 +862 27 model.scoring_fct_norm 1.0 +862 27 loss.margin 9.843255403547834 +862 27 regularizer.weight 0.01736182920456609 +862 27 optimizer.lr 0.0011619304905310939 +862 27 negative_sampler.num_negs_per_pos 62.0 +862 27 training.batch_size 2.0 +862 28 model.embedding_dim 0.0 +862 28 model.scoring_fct_norm 1.0 +862 28 loss.margin 8.484182872669736 +862 28 regularizer.weight 0.021937030276106644 +862 28 optimizer.lr 0.05339780998310452 +862 28 negative_sampler.num_negs_per_pos 3.0 +862 28 training.batch_size 1.0 +862 29 model.embedding_dim 0.0 +862 29 model.scoring_fct_norm 2.0 +862 29 loss.margin 4.535688015680615 +862 29 regularizer.weight 0.07444331085031514 +862 29 optimizer.lr 0.00245205966355763 +862 29 negative_sampler.num_negs_per_pos 33.0 +862 29 training.batch_size 0.0 +862 30 model.embedding_dim 2.0 +862 30 model.scoring_fct_norm 2.0 +862 30 loss.margin 3.933992266872173 +862 30 regularizer.weight 0.08831680423530645 +862 30 optimizer.lr 0.013532559759470998 +862 30 negative_sampler.num_negs_per_pos 5.0 +862 30 training.batch_size 2.0 +862 31 model.embedding_dim 2.0 +862 31 model.scoring_fct_norm 2.0 +862 31 loss.margin 5.194857748935098 +862 31 regularizer.weight 0.03775921957017001 +862 31 optimizer.lr 0.0016262007500738643 +862 31 negative_sampler.num_negs_per_pos 90.0 +862 31 training.batch_size 0.0 +862 32 model.embedding_dim 2.0 +862 32 model.scoring_fct_norm 2.0 +862 32 loss.margin 6.8023623489949 +862 32 regularizer.weight 0.03275308125956578 +862 32 optimizer.lr 0.0021354768348701087 +862 32 negative_sampler.num_negs_per_pos 32.0 +862 32 training.batch_size 0.0 +862 33 model.embedding_dim 2.0 +862 33 model.scoring_fct_norm 2.0 +862 33 loss.margin 7.624980626049246 +862 33 regularizer.weight 0.040082231764553576 +862 33 optimizer.lr 0.011589763453975374 +862 33 negative_sampler.num_negs_per_pos 48.0 +862 33 training.batch_size 2.0 +862 34 model.embedding_dim 0.0 +862 34 model.scoring_fct_norm 1.0 +862 34 loss.margin 9.6951785331089 +862 34 regularizer.weight 0.06419384839935602 +862 34 optimizer.lr 0.0703613968625751 +862 34 negative_sampler.num_negs_per_pos 98.0 +862 34 training.batch_size 2.0 +862 35 model.embedding_dim 0.0 +862 35 model.scoring_fct_norm 2.0 +862 35 loss.margin 4.537166172415712 +862 35 regularizer.weight 0.09596758625546534 +862 35 optimizer.lr 0.009011203086245085 +862 35 negative_sampler.num_negs_per_pos 97.0 +862 35 training.batch_size 1.0 +862 36 model.embedding_dim 0.0 +862 36 model.scoring_fct_norm 1.0 +862 36 loss.margin 1.4189275983314682 +862 36 regularizer.weight 0.010831689402380397 +862 36 optimizer.lr 0.09451567251599167 +862 36 negative_sampler.num_negs_per_pos 3.0 +862 36 training.batch_size 2.0 +862 37 model.embedding_dim 2.0 +862 37 model.scoring_fct_norm 2.0 +862 37 loss.margin 7.8440840722053045 +862 37 regularizer.weight 0.1875014192506352 +862 37 optimizer.lr 0.03770115333893653 +862 37 negative_sampler.num_negs_per_pos 48.0 +862 37 training.batch_size 0.0 +862 38 model.embedding_dim 1.0 +862 38 model.scoring_fct_norm 2.0 +862 38 loss.margin 6.829122724299948 +862 38 regularizer.weight 0.10719959484589218 +862 38 optimizer.lr 0.0014486742076016545 +862 38 negative_sampler.num_negs_per_pos 15.0 +862 38 training.batch_size 0.0 +862 1 dataset """fb15k237""" +862 1 model """transh""" +862 1 loss """marginranking""" +862 1 regularizer """transh""" +862 1 optimizer """adam""" +862 1 training_loop """owa""" +862 1 negative_sampler """basic""" +862 1 evaluator """rankbased""" +862 2 dataset """fb15k237""" +862 2 model """transh""" +862 2 loss """marginranking""" +862 2 regularizer """transh""" +862 2 optimizer """adam""" +862 2 training_loop """owa""" +862 2 negative_sampler """basic""" +862 2 evaluator """rankbased""" +862 3 dataset """fb15k237""" +862 3 model """transh""" +862 3 loss """marginranking""" +862 3 regularizer """transh""" +862 3 optimizer """adam""" +862 3 training_loop """owa""" +862 3 negative_sampler """basic""" +862 3 evaluator """rankbased""" +862 4 dataset """fb15k237""" +862 4 model """transh""" +862 4 loss """marginranking""" +862 4 regularizer """transh""" +862 4 optimizer """adam""" +862 4 training_loop """owa""" +862 4 negative_sampler """basic""" +862 4 evaluator """rankbased""" +862 5 dataset """fb15k237""" +862 5 model """transh""" +862 5 loss """marginranking""" +862 5 regularizer """transh""" +862 5 optimizer """adam""" +862 5 training_loop """owa""" +862 5 negative_sampler """basic""" +862 5 evaluator """rankbased""" +862 6 dataset """fb15k237""" +862 6 model """transh""" +862 6 loss """marginranking""" +862 6 regularizer """transh""" +862 6 optimizer """adam""" +862 6 training_loop """owa""" +862 6 negative_sampler """basic""" +862 6 evaluator """rankbased""" +862 7 dataset """fb15k237""" +862 7 model """transh""" +862 7 loss """marginranking""" +862 7 regularizer """transh""" +862 7 optimizer """adam""" +862 7 training_loop """owa""" +862 7 negative_sampler """basic""" +862 7 evaluator """rankbased""" +862 8 dataset """fb15k237""" +862 8 model """transh""" +862 8 loss """marginranking""" +862 8 regularizer """transh""" +862 8 optimizer """adam""" +862 8 training_loop """owa""" +862 8 negative_sampler """basic""" +862 8 evaluator """rankbased""" +862 9 dataset """fb15k237""" +862 9 model """transh""" +862 9 loss """marginranking""" +862 9 regularizer """transh""" +862 9 optimizer """adam""" +862 9 training_loop """owa""" +862 9 negative_sampler """basic""" +862 9 evaluator """rankbased""" +862 10 dataset """fb15k237""" +862 10 model """transh""" +862 10 loss """marginranking""" +862 10 regularizer """transh""" +862 10 optimizer """adam""" +862 10 training_loop """owa""" +862 10 negative_sampler """basic""" +862 10 evaluator """rankbased""" +862 11 dataset """fb15k237""" +862 11 model """transh""" +862 11 loss """marginranking""" +862 11 regularizer """transh""" +862 11 optimizer """adam""" +862 11 training_loop """owa""" +862 11 negative_sampler """basic""" +862 11 evaluator """rankbased""" +862 12 dataset """fb15k237""" +862 12 model """transh""" +862 12 loss """marginranking""" +862 12 regularizer """transh""" +862 12 optimizer """adam""" +862 12 training_loop """owa""" +862 12 negative_sampler """basic""" +862 12 evaluator """rankbased""" +862 13 dataset """fb15k237""" +862 13 model """transh""" +862 13 loss """marginranking""" +862 13 regularizer """transh""" +862 13 optimizer """adam""" +862 13 training_loop """owa""" +862 13 negative_sampler """basic""" +862 13 evaluator """rankbased""" +862 14 dataset """fb15k237""" +862 14 model """transh""" +862 14 loss """marginranking""" +862 14 regularizer """transh""" +862 14 optimizer """adam""" +862 14 training_loop """owa""" +862 14 negative_sampler """basic""" +862 14 evaluator """rankbased""" +862 15 dataset """fb15k237""" +862 15 model """transh""" +862 15 loss """marginranking""" +862 15 regularizer """transh""" +862 15 optimizer """adam""" +862 15 training_loop """owa""" +862 15 negative_sampler """basic""" +862 15 evaluator """rankbased""" +862 16 dataset """fb15k237""" +862 16 model """transh""" +862 16 loss """marginranking""" +862 16 regularizer """transh""" +862 16 optimizer """adam""" +862 16 training_loop """owa""" +862 16 negative_sampler """basic""" +862 16 evaluator """rankbased""" +862 17 dataset """fb15k237""" +862 17 model """transh""" +862 17 loss """marginranking""" +862 17 regularizer """transh""" +862 17 optimizer """adam""" +862 17 training_loop """owa""" +862 17 negative_sampler """basic""" +862 17 evaluator """rankbased""" +862 18 dataset """fb15k237""" +862 18 model """transh""" +862 18 loss """marginranking""" +862 18 regularizer """transh""" +862 18 optimizer """adam""" +862 18 training_loop """owa""" +862 18 negative_sampler """basic""" +862 18 evaluator """rankbased""" +862 19 dataset """fb15k237""" +862 19 model """transh""" +862 19 loss """marginranking""" +862 19 regularizer """transh""" +862 19 optimizer """adam""" +862 19 training_loop """owa""" +862 19 negative_sampler """basic""" +862 19 evaluator """rankbased""" +862 20 dataset """fb15k237""" +862 20 model """transh""" +862 20 loss """marginranking""" +862 20 regularizer """transh""" +862 20 optimizer """adam""" +862 20 training_loop """owa""" +862 20 negative_sampler """basic""" +862 20 evaluator """rankbased""" +862 21 dataset """fb15k237""" +862 21 model """transh""" +862 21 loss """marginranking""" +862 21 regularizer """transh""" +862 21 optimizer """adam""" +862 21 training_loop """owa""" +862 21 negative_sampler """basic""" +862 21 evaluator """rankbased""" +862 22 dataset """fb15k237""" +862 22 model """transh""" +862 22 loss """marginranking""" +862 22 regularizer """transh""" +862 22 optimizer """adam""" +862 22 training_loop """owa""" +862 22 negative_sampler """basic""" +862 22 evaluator """rankbased""" +862 23 dataset """fb15k237""" +862 23 model """transh""" +862 23 loss """marginranking""" +862 23 regularizer """transh""" +862 23 optimizer """adam""" +862 23 training_loop """owa""" +862 23 negative_sampler """basic""" +862 23 evaluator """rankbased""" +862 24 dataset """fb15k237""" +862 24 model """transh""" +862 24 loss """marginranking""" +862 24 regularizer """transh""" +862 24 optimizer """adam""" +862 24 training_loop """owa""" +862 24 negative_sampler """basic""" +862 24 evaluator """rankbased""" +862 25 dataset """fb15k237""" +862 25 model """transh""" +862 25 loss """marginranking""" +862 25 regularizer """transh""" +862 25 optimizer """adam""" +862 25 training_loop """owa""" +862 25 negative_sampler """basic""" +862 25 evaluator """rankbased""" +862 26 dataset """fb15k237""" +862 26 model """transh""" +862 26 loss """marginranking""" +862 26 regularizer """transh""" +862 26 optimizer """adam""" +862 26 training_loop """owa""" +862 26 negative_sampler """basic""" +862 26 evaluator """rankbased""" +862 27 dataset """fb15k237""" +862 27 model """transh""" +862 27 loss """marginranking""" +862 27 regularizer """transh""" +862 27 optimizer """adam""" +862 27 training_loop """owa""" +862 27 negative_sampler """basic""" +862 27 evaluator """rankbased""" +862 28 dataset """fb15k237""" +862 28 model """transh""" +862 28 loss """marginranking""" +862 28 regularizer """transh""" +862 28 optimizer """adam""" +862 28 training_loop """owa""" +862 28 negative_sampler """basic""" +862 28 evaluator """rankbased""" +862 29 dataset """fb15k237""" +862 29 model """transh""" +862 29 loss """marginranking""" +862 29 regularizer """transh""" +862 29 optimizer """adam""" +862 29 training_loop """owa""" +862 29 negative_sampler """basic""" +862 29 evaluator """rankbased""" +862 30 dataset """fb15k237""" +862 30 model """transh""" +862 30 loss """marginranking""" +862 30 regularizer """transh""" +862 30 optimizer """adam""" +862 30 training_loop """owa""" +862 30 negative_sampler """basic""" +862 30 evaluator """rankbased""" +862 31 dataset """fb15k237""" +862 31 model """transh""" +862 31 loss """marginranking""" +862 31 regularizer """transh""" +862 31 optimizer """adam""" +862 31 training_loop """owa""" +862 31 negative_sampler """basic""" +862 31 evaluator """rankbased""" +862 32 dataset """fb15k237""" +862 32 model """transh""" +862 32 loss """marginranking""" +862 32 regularizer """transh""" +862 32 optimizer """adam""" +862 32 training_loop """owa""" +862 32 negative_sampler """basic""" +862 32 evaluator """rankbased""" +862 33 dataset """fb15k237""" +862 33 model """transh""" +862 33 loss """marginranking""" +862 33 regularizer """transh""" +862 33 optimizer """adam""" +862 33 training_loop """owa""" +862 33 negative_sampler """basic""" +862 33 evaluator """rankbased""" +862 34 dataset """fb15k237""" +862 34 model """transh""" +862 34 loss """marginranking""" +862 34 regularizer """transh""" +862 34 optimizer """adam""" +862 34 training_loop """owa""" +862 34 negative_sampler """basic""" +862 34 evaluator """rankbased""" +862 35 dataset """fb15k237""" +862 35 model """transh""" +862 35 loss """marginranking""" +862 35 regularizer """transh""" +862 35 optimizer """adam""" +862 35 training_loop """owa""" +862 35 negative_sampler """basic""" +862 35 evaluator """rankbased""" +862 36 dataset """fb15k237""" +862 36 model """transh""" +862 36 loss """marginranking""" +862 36 regularizer """transh""" +862 36 optimizer """adam""" +862 36 training_loop """owa""" +862 36 negative_sampler """basic""" +862 36 evaluator """rankbased""" +862 37 dataset """fb15k237""" +862 37 model """transh""" +862 37 loss """marginranking""" +862 37 regularizer """transh""" +862 37 optimizer """adam""" +862 37 training_loop """owa""" +862 37 negative_sampler """basic""" +862 37 evaluator """rankbased""" +862 38 dataset """fb15k237""" +862 38 model """transh""" +862 38 loss """marginranking""" +862 38 regularizer """transh""" +862 38 optimizer """adam""" +862 38 training_loop """owa""" +862 38 negative_sampler """basic""" +862 38 evaluator """rankbased""" +863 1 model.embedding_dim 2.0 +863 1 model.scoring_fct_norm 2.0 +863 1 regularizer.weight 0.012142775571704043 +863 1 optimizer.lr 0.06641655607847438 +863 1 training.batch_size 0.0 +863 1 training.label_smoothing 0.006643944032754066 +863 2 model.embedding_dim 0.0 +863 2 model.scoring_fct_norm 2.0 +863 2 regularizer.weight 0.0957107859897087 +863 2 optimizer.lr 0.017223614253782847 +863 2 training.batch_size 0.0 +863 2 training.label_smoothing 0.0017283682827328533 +863 3 model.embedding_dim 2.0 +863 3 model.scoring_fct_norm 1.0 +863 3 regularizer.weight 0.07369325091484737 +863 3 optimizer.lr 0.0010934222176387765 +863 3 training.batch_size 0.0 +863 3 training.label_smoothing 0.001113446496476352 +863 4 model.embedding_dim 0.0 +863 4 model.scoring_fct_norm 2.0 +863 4 regularizer.weight 0.09432683392130085 +863 4 optimizer.lr 0.0011017289448078573 +863 4 training.batch_size 1.0 +863 4 training.label_smoothing 0.013119920862526883 +863 5 model.embedding_dim 0.0 +863 5 model.scoring_fct_norm 1.0 +863 5 regularizer.weight 0.2557212780597152 +863 5 optimizer.lr 0.001367063787885191 +863 5 training.batch_size 0.0 +863 5 training.label_smoothing 0.07558449871570555 +863 6 model.embedding_dim 1.0 +863 6 model.scoring_fct_norm 1.0 +863 6 regularizer.weight 0.012680425050760126 +863 6 optimizer.lr 0.005155735179744638 +863 6 training.batch_size 0.0 +863 6 training.label_smoothing 0.27668031341584837 +863 7 model.embedding_dim 2.0 +863 7 model.scoring_fct_norm 2.0 +863 7 regularizer.weight 0.20368258345023577 +863 7 optimizer.lr 0.0018351702402927086 +863 7 training.batch_size 2.0 +863 7 training.label_smoothing 0.40154950917974325 +863 1 dataset """fb15k237""" +863 1 model """transh""" +863 1 loss """bceaftersigmoid""" +863 1 regularizer """transh""" +863 1 optimizer """adam""" +863 1 training_loop """lcwa""" +863 1 evaluator """rankbased""" +863 2 dataset """fb15k237""" +863 2 model """transh""" +863 2 loss """bceaftersigmoid""" +863 2 regularizer """transh""" +863 2 optimizer """adam""" +863 2 training_loop """lcwa""" +863 2 evaluator """rankbased""" +863 3 dataset """fb15k237""" +863 3 model """transh""" +863 3 loss """bceaftersigmoid""" +863 3 regularizer """transh""" +863 3 optimizer """adam""" +863 3 training_loop """lcwa""" +863 3 evaluator """rankbased""" +863 4 dataset """fb15k237""" +863 4 model """transh""" +863 4 loss """bceaftersigmoid""" +863 4 regularizer """transh""" +863 4 optimizer """adam""" +863 4 training_loop """lcwa""" +863 4 evaluator """rankbased""" +863 5 dataset """fb15k237""" +863 5 model """transh""" +863 5 loss """bceaftersigmoid""" +863 5 regularizer """transh""" +863 5 optimizer """adam""" +863 5 training_loop """lcwa""" +863 5 evaluator """rankbased""" +863 6 dataset """fb15k237""" +863 6 model """transh""" +863 6 loss """bceaftersigmoid""" +863 6 regularizer """transh""" +863 6 optimizer """adam""" +863 6 training_loop """lcwa""" +863 6 evaluator """rankbased""" +863 7 dataset """fb15k237""" +863 7 model """transh""" +863 7 loss """bceaftersigmoid""" +863 7 regularizer """transh""" +863 7 optimizer """adam""" +863 7 training_loop """lcwa""" +863 7 evaluator """rankbased""" +864 1 model.embedding_dim 0.0 +864 1 model.scoring_fct_norm 2.0 +864 1 regularizer.weight 0.03364088720235358 +864 1 optimizer.lr 0.08728767155294781 +864 1 training.batch_size 1.0 +864 1 training.label_smoothing 0.11626731587910245 +864 2 model.embedding_dim 1.0 +864 2 model.scoring_fct_norm 2.0 +864 2 regularizer.weight 0.13055708589702328 +864 2 optimizer.lr 0.002408364004064257 +864 2 training.batch_size 2.0 +864 2 training.label_smoothing 0.008467970049030356 +864 3 model.embedding_dim 0.0 +864 3 model.scoring_fct_norm 2.0 +864 3 regularizer.weight 0.22265335506055375 +864 3 optimizer.lr 0.051087258196912774 +864 3 training.batch_size 2.0 +864 3 training.label_smoothing 0.01668770020301983 +864 4 model.embedding_dim 0.0 +864 4 model.scoring_fct_norm 2.0 +864 4 regularizer.weight 0.026093059898486363 +864 4 optimizer.lr 0.003669989146105706 +864 4 training.batch_size 1.0 +864 4 training.label_smoothing 0.0022144473446322707 +864 5 model.embedding_dim 1.0 +864 5 model.scoring_fct_norm 1.0 +864 5 regularizer.weight 0.04330467769723105 +864 5 optimizer.lr 0.013878973541153911 +864 5 training.batch_size 0.0 +864 5 training.label_smoothing 0.11820726554776373 +864 6 model.embedding_dim 2.0 +864 6 model.scoring_fct_norm 2.0 +864 6 regularizer.weight 0.01834384194372959 +864 6 optimizer.lr 0.0011179276106256688 +864 6 training.batch_size 0.0 +864 6 training.label_smoothing 0.09694926889347924 +864 7 model.embedding_dim 1.0 +864 7 model.scoring_fct_norm 2.0 +864 7 regularizer.weight 0.017253565821361647 +864 7 optimizer.lr 0.005546978107035926 +864 7 training.batch_size 0.0 +864 7 training.label_smoothing 0.8968198725944724 +864 8 model.embedding_dim 1.0 +864 8 model.scoring_fct_norm 2.0 +864 8 regularizer.weight 0.041262733926045354 +864 8 optimizer.lr 0.0012781450099691298 +864 8 training.batch_size 0.0 +864 8 training.label_smoothing 0.09534042082253721 +864 9 model.embedding_dim 0.0 +864 9 model.scoring_fct_norm 2.0 +864 9 regularizer.weight 0.23005380186372523 +864 9 optimizer.lr 0.0012294395835150214 +864 9 training.batch_size 2.0 +864 9 training.label_smoothing 0.0013468561704625957 +864 10 model.embedding_dim 0.0 +864 10 model.scoring_fct_norm 1.0 +864 10 regularizer.weight 0.08456147930704543 +864 10 optimizer.lr 0.005203319299591233 +864 10 training.batch_size 2.0 +864 10 training.label_smoothing 0.07555396233183287 +864 1 dataset """fb15k237""" +864 1 model """transh""" +864 1 loss """softplus""" +864 1 regularizer """transh""" +864 1 optimizer """adam""" +864 1 training_loop """lcwa""" +864 1 evaluator """rankbased""" +864 2 dataset """fb15k237""" +864 2 model """transh""" +864 2 loss """softplus""" +864 2 regularizer """transh""" +864 2 optimizer """adam""" +864 2 training_loop """lcwa""" +864 2 evaluator """rankbased""" +864 3 dataset """fb15k237""" +864 3 model """transh""" +864 3 loss """softplus""" +864 3 regularizer """transh""" +864 3 optimizer """adam""" +864 3 training_loop """lcwa""" +864 3 evaluator """rankbased""" +864 4 dataset """fb15k237""" +864 4 model """transh""" +864 4 loss """softplus""" +864 4 regularizer """transh""" +864 4 optimizer """adam""" +864 4 training_loop """lcwa""" +864 4 evaluator """rankbased""" +864 5 dataset """fb15k237""" +864 5 model """transh""" +864 5 loss """softplus""" +864 5 regularizer """transh""" +864 5 optimizer """adam""" +864 5 training_loop """lcwa""" +864 5 evaluator """rankbased""" +864 6 dataset """fb15k237""" +864 6 model """transh""" +864 6 loss """softplus""" +864 6 regularizer """transh""" +864 6 optimizer """adam""" +864 6 training_loop """lcwa""" +864 6 evaluator """rankbased""" +864 7 dataset """fb15k237""" +864 7 model """transh""" +864 7 loss """softplus""" +864 7 regularizer """transh""" +864 7 optimizer """adam""" +864 7 training_loop """lcwa""" +864 7 evaluator """rankbased""" +864 8 dataset """fb15k237""" +864 8 model """transh""" +864 8 loss """softplus""" +864 8 regularizer """transh""" +864 8 optimizer """adam""" +864 8 training_loop """lcwa""" +864 8 evaluator """rankbased""" +864 9 dataset """fb15k237""" +864 9 model """transh""" +864 9 loss """softplus""" +864 9 regularizer """transh""" +864 9 optimizer """adam""" +864 9 training_loop """lcwa""" +864 9 evaluator """rankbased""" +864 10 dataset """fb15k237""" +864 10 model """transh""" +864 10 loss """softplus""" +864 10 regularizer """transh""" +864 10 optimizer """adam""" +864 10 training_loop """lcwa""" +864 10 evaluator """rankbased""" +865 1 model.embedding_dim 1.0 +865 1 model.scoring_fct_norm 2.0 +865 1 regularizer.weight 0.038245679037805624 +865 1 optimizer.lr 0.032721172160427615 +865 1 training.batch_size 2.0 +865 1 training.label_smoothing 0.07353546574426192 +865 2 model.embedding_dim 1.0 +865 2 model.scoring_fct_norm 2.0 +865 2 regularizer.weight 0.017147470196432712 +865 2 optimizer.lr 0.029055204209728923 +865 2 training.batch_size 0.0 +865 2 training.label_smoothing 0.004007592502472593 +865 3 model.embedding_dim 2.0 +865 3 model.scoring_fct_norm 2.0 +865 3 regularizer.weight 0.01641161469067293 +865 3 optimizer.lr 0.015885308012757598 +865 3 training.batch_size 0.0 +865 3 training.label_smoothing 0.39897236448452095 +865 4 model.embedding_dim 1.0 +865 4 model.scoring_fct_norm 1.0 +865 4 regularizer.weight 0.15138887660438838 +865 4 optimizer.lr 0.00613917323902178 +865 4 training.batch_size 0.0 +865 4 training.label_smoothing 0.0015163085906154865 +865 5 model.embedding_dim 1.0 +865 5 model.scoring_fct_norm 2.0 +865 5 regularizer.weight 0.29200130654110845 +865 5 optimizer.lr 0.07702852984093284 +865 5 training.batch_size 2.0 +865 5 training.label_smoothing 0.03213653836113594 +865 6 model.embedding_dim 0.0 +865 6 model.scoring_fct_norm 1.0 +865 6 regularizer.weight 0.08015518453209348 +865 6 optimizer.lr 0.01152838867955822 +865 6 training.batch_size 2.0 +865 6 training.label_smoothing 0.002115308389540573 +865 7 model.embedding_dim 2.0 +865 7 model.scoring_fct_norm 2.0 +865 7 regularizer.weight 0.15763402363919704 +865 7 optimizer.lr 0.038744314688828316 +865 7 training.batch_size 1.0 +865 7 training.label_smoothing 0.004920988077780607 +865 8 model.embedding_dim 2.0 +865 8 model.scoring_fct_norm 2.0 +865 8 regularizer.weight 0.03513278170839718 +865 8 optimizer.lr 0.005881651546141031 +865 8 training.batch_size 2.0 +865 8 training.label_smoothing 0.0011238915401450265 +865 9 model.embedding_dim 1.0 +865 9 model.scoring_fct_norm 1.0 +865 9 regularizer.weight 0.2817808989450712 +865 9 optimizer.lr 0.008352549574239242 +865 9 training.batch_size 2.0 +865 9 training.label_smoothing 0.0171720793432155 +865 1 dataset """fb15k237""" +865 1 model """transh""" +865 1 loss """bceaftersigmoid""" +865 1 regularizer """transh""" +865 1 optimizer """adam""" +865 1 training_loop """lcwa""" +865 1 evaluator """rankbased""" +865 2 dataset """fb15k237""" +865 2 model """transh""" +865 2 loss """bceaftersigmoid""" +865 2 regularizer """transh""" +865 2 optimizer """adam""" +865 2 training_loop """lcwa""" +865 2 evaluator """rankbased""" +865 3 dataset """fb15k237""" +865 3 model """transh""" +865 3 loss """bceaftersigmoid""" +865 3 regularizer """transh""" +865 3 optimizer """adam""" +865 3 training_loop """lcwa""" +865 3 evaluator """rankbased""" +865 4 dataset """fb15k237""" +865 4 model """transh""" +865 4 loss """bceaftersigmoid""" +865 4 regularizer """transh""" +865 4 optimizer """adam""" +865 4 training_loop """lcwa""" +865 4 evaluator """rankbased""" +865 5 dataset """fb15k237""" +865 5 model """transh""" +865 5 loss """bceaftersigmoid""" +865 5 regularizer """transh""" +865 5 optimizer """adam""" +865 5 training_loop """lcwa""" +865 5 evaluator """rankbased""" +865 6 dataset """fb15k237""" +865 6 model """transh""" +865 6 loss """bceaftersigmoid""" +865 6 regularizer """transh""" +865 6 optimizer """adam""" +865 6 training_loop """lcwa""" +865 6 evaluator """rankbased""" +865 7 dataset """fb15k237""" +865 7 model """transh""" +865 7 loss """bceaftersigmoid""" +865 7 regularizer """transh""" +865 7 optimizer """adam""" +865 7 training_loop """lcwa""" +865 7 evaluator """rankbased""" +865 8 dataset """fb15k237""" +865 8 model """transh""" +865 8 loss """bceaftersigmoid""" +865 8 regularizer """transh""" +865 8 optimizer """adam""" +865 8 training_loop """lcwa""" +865 8 evaluator """rankbased""" +865 9 dataset """fb15k237""" +865 9 model """transh""" +865 9 loss """bceaftersigmoid""" +865 9 regularizer """transh""" +865 9 optimizer """adam""" +865 9 training_loop """lcwa""" +865 9 evaluator """rankbased""" +866 1 model.embedding_dim 0.0 +866 1 model.scoring_fct_norm 2.0 +866 1 regularizer.weight 0.01600965343646364 +866 1 optimizer.lr 0.009864183127730564 +866 1 training.batch_size 0.0 +866 1 training.label_smoothing 0.5362561882696739 +866 2 model.embedding_dim 1.0 +866 2 model.scoring_fct_norm 1.0 +866 2 regularizer.weight 0.03823243266780428 +866 2 optimizer.lr 0.0034857050271885834 +866 2 training.batch_size 0.0 +866 2 training.label_smoothing 0.02650822259885458 +866 3 model.embedding_dim 0.0 +866 3 model.scoring_fct_norm 1.0 +866 3 regularizer.weight 0.020756250074591028 +866 3 optimizer.lr 0.0030156306705235716 +866 3 training.batch_size 1.0 +866 3 training.label_smoothing 0.04917345899419515 +866 4 model.embedding_dim 2.0 +866 4 model.scoring_fct_norm 1.0 +866 4 regularizer.weight 0.08454693179029733 +866 4 optimizer.lr 0.0819707095448887 +866 4 training.batch_size 0.0 +866 4 training.label_smoothing 0.22285432090303908 +866 5 model.embedding_dim 0.0 +866 5 model.scoring_fct_norm 2.0 +866 5 regularizer.weight 0.09700007563107683 +866 5 optimizer.lr 0.010091927879774214 +866 5 training.batch_size 2.0 +866 5 training.label_smoothing 0.15005123717707908 +866 6 model.embedding_dim 0.0 +866 6 model.scoring_fct_norm 2.0 +866 6 regularizer.weight 0.13185450383734967 +866 6 optimizer.lr 0.0031046208134082203 +866 6 training.batch_size 2.0 +866 6 training.label_smoothing 0.7361388904187689 +866 7 model.embedding_dim 2.0 +866 7 model.scoring_fct_norm 2.0 +866 7 regularizer.weight 0.10556456970593811 +866 7 optimizer.lr 0.001003201482580494 +866 7 training.batch_size 0.0 +866 7 training.label_smoothing 0.35345194006555314 +866 8 model.embedding_dim 1.0 +866 8 model.scoring_fct_norm 2.0 +866 8 regularizer.weight 0.1923188355062819 +866 8 optimizer.lr 0.013672219445812374 +866 8 training.batch_size 1.0 +866 8 training.label_smoothing 0.062380872949704895 +866 9 model.embedding_dim 0.0 +866 9 model.scoring_fct_norm 2.0 +866 9 regularizer.weight 0.07452494988230658 +866 9 optimizer.lr 0.0014062383671173163 +866 9 training.batch_size 0.0 +866 9 training.label_smoothing 0.002566931283432715 +866 10 model.embedding_dim 1.0 +866 10 model.scoring_fct_norm 1.0 +866 10 regularizer.weight 0.2862658585146639 +866 10 optimizer.lr 0.03939810622937287 +866 10 training.batch_size 2.0 +866 10 training.label_smoothing 0.403086254386729 +866 11 model.embedding_dim 1.0 +866 11 model.scoring_fct_norm 2.0 +866 11 regularizer.weight 0.020669071257353535 +866 11 optimizer.lr 0.0016956084919431177 +866 11 training.batch_size 1.0 +866 11 training.label_smoothing 0.004120152193078891 +866 12 model.embedding_dim 2.0 +866 12 model.scoring_fct_norm 1.0 +866 12 regularizer.weight 0.0512195676269518 +866 12 optimizer.lr 0.02353947349538614 +866 12 training.batch_size 0.0 +866 12 training.label_smoothing 0.02669317959729815 +866 13 model.embedding_dim 2.0 +866 13 model.scoring_fct_norm 2.0 +866 13 regularizer.weight 0.019302994890073453 +866 13 optimizer.lr 0.06507345902103608 +866 13 training.batch_size 2.0 +866 13 training.label_smoothing 0.034250521519527816 +866 1 dataset """fb15k237""" +866 1 model """transh""" +866 1 loss """softplus""" +866 1 regularizer """transh""" +866 1 optimizer """adam""" +866 1 training_loop """lcwa""" +866 1 evaluator """rankbased""" +866 2 dataset """fb15k237""" +866 2 model """transh""" +866 2 loss """softplus""" +866 2 regularizer """transh""" +866 2 optimizer """adam""" +866 2 training_loop """lcwa""" +866 2 evaluator """rankbased""" +866 3 dataset """fb15k237""" +866 3 model """transh""" +866 3 loss """softplus""" +866 3 regularizer """transh""" +866 3 optimizer """adam""" +866 3 training_loop """lcwa""" +866 3 evaluator """rankbased""" +866 4 dataset """fb15k237""" +866 4 model """transh""" +866 4 loss """softplus""" +866 4 regularizer """transh""" +866 4 optimizer """adam""" +866 4 training_loop """lcwa""" +866 4 evaluator """rankbased""" +866 5 dataset """fb15k237""" +866 5 model """transh""" +866 5 loss """softplus""" +866 5 regularizer """transh""" +866 5 optimizer """adam""" +866 5 training_loop """lcwa""" +866 5 evaluator """rankbased""" +866 6 dataset """fb15k237""" +866 6 model """transh""" +866 6 loss """softplus""" +866 6 regularizer """transh""" +866 6 optimizer """adam""" +866 6 training_loop """lcwa""" +866 6 evaluator """rankbased""" +866 7 dataset """fb15k237""" +866 7 model """transh""" +866 7 loss """softplus""" +866 7 regularizer """transh""" +866 7 optimizer """adam""" +866 7 training_loop """lcwa""" +866 7 evaluator """rankbased""" +866 8 dataset """fb15k237""" +866 8 model """transh""" +866 8 loss """softplus""" +866 8 regularizer """transh""" +866 8 optimizer """adam""" +866 8 training_loop """lcwa""" +866 8 evaluator """rankbased""" +866 9 dataset """fb15k237""" +866 9 model """transh""" +866 9 loss """softplus""" +866 9 regularizer """transh""" +866 9 optimizer """adam""" +866 9 training_loop """lcwa""" +866 9 evaluator """rankbased""" +866 10 dataset """fb15k237""" +866 10 model """transh""" +866 10 loss """softplus""" +866 10 regularizer """transh""" +866 10 optimizer """adam""" +866 10 training_loop """lcwa""" +866 10 evaluator """rankbased""" +866 11 dataset """fb15k237""" +866 11 model """transh""" +866 11 loss """softplus""" +866 11 regularizer """transh""" +866 11 optimizer """adam""" +866 11 training_loop """lcwa""" +866 11 evaluator """rankbased""" +866 12 dataset """fb15k237""" +866 12 model """transh""" +866 12 loss """softplus""" +866 12 regularizer """transh""" +866 12 optimizer """adam""" +866 12 training_loop """lcwa""" +866 12 evaluator """rankbased""" +866 13 dataset """fb15k237""" +866 13 model """transh""" +866 13 loss """softplus""" +866 13 regularizer """transh""" +866 13 optimizer """adam""" +866 13 training_loop """lcwa""" +866 13 evaluator """rankbased""" +867 1 model.embedding_dim 2.0 +867 1 model.scoring_fct_norm 1.0 +867 1 regularizer.weight 0.011302704839941318 +867 1 optimizer.lr 0.005308469471709657 +867 1 training.batch_size 2.0 +867 1 training.label_smoothing 0.19216588649865154 +867 2 model.embedding_dim 0.0 +867 2 model.scoring_fct_norm 1.0 +867 2 regularizer.weight 0.010486191203987232 +867 2 optimizer.lr 0.04110391971421373 +867 2 training.batch_size 2.0 +867 2 training.label_smoothing 0.5334646815179358 +867 3 model.embedding_dim 2.0 +867 3 model.scoring_fct_norm 2.0 +867 3 regularizer.weight 0.09670279905835186 +867 3 optimizer.lr 0.09797272252501057 +867 3 training.batch_size 0.0 +867 3 training.label_smoothing 0.015611314157773771 +867 4 model.embedding_dim 1.0 +867 4 model.scoring_fct_norm 1.0 +867 4 regularizer.weight 0.025001706269866718 +867 4 optimizer.lr 0.09576221458225105 +867 4 training.batch_size 2.0 +867 4 training.label_smoothing 0.0359926150715092 +867 5 model.embedding_dim 2.0 +867 5 model.scoring_fct_norm 2.0 +867 5 regularizer.weight 0.099399158698948 +867 5 optimizer.lr 0.06502328710356148 +867 5 training.batch_size 1.0 +867 5 training.label_smoothing 0.0014260157931593122 +867 6 model.embedding_dim 2.0 +867 6 model.scoring_fct_norm 1.0 +867 6 regularizer.weight 0.0376451466792992 +867 6 optimizer.lr 0.00714591903639208 +867 6 training.batch_size 1.0 +867 6 training.label_smoothing 0.0031246017785490944 +867 7 model.embedding_dim 0.0 +867 7 model.scoring_fct_norm 1.0 +867 7 regularizer.weight 0.14831898307177366 +867 7 optimizer.lr 0.0038581013317132247 +867 7 training.batch_size 1.0 +867 7 training.label_smoothing 0.007718052087962576 +867 1 dataset """fb15k237""" +867 1 model """transh""" +867 1 loss """crossentropy""" +867 1 regularizer """transh""" +867 1 optimizer """adam""" +867 1 training_loop """lcwa""" +867 1 evaluator """rankbased""" +867 2 dataset """fb15k237""" +867 2 model """transh""" +867 2 loss """crossentropy""" +867 2 regularizer """transh""" +867 2 optimizer """adam""" +867 2 training_loop """lcwa""" +867 2 evaluator """rankbased""" +867 3 dataset """fb15k237""" +867 3 model """transh""" +867 3 loss """crossentropy""" +867 3 regularizer """transh""" +867 3 optimizer """adam""" +867 3 training_loop """lcwa""" +867 3 evaluator """rankbased""" +867 4 dataset """fb15k237""" +867 4 model """transh""" +867 4 loss """crossentropy""" +867 4 regularizer """transh""" +867 4 optimizer """adam""" +867 4 training_loop """lcwa""" +867 4 evaluator """rankbased""" +867 5 dataset """fb15k237""" +867 5 model """transh""" +867 5 loss """crossentropy""" +867 5 regularizer """transh""" +867 5 optimizer """adam""" +867 5 training_loop """lcwa""" +867 5 evaluator """rankbased""" +867 6 dataset """fb15k237""" +867 6 model """transh""" +867 6 loss """crossentropy""" +867 6 regularizer """transh""" +867 6 optimizer """adam""" +867 6 training_loop """lcwa""" +867 6 evaluator """rankbased""" +867 7 dataset """fb15k237""" +867 7 model """transh""" +867 7 loss """crossentropy""" +867 7 regularizer """transh""" +867 7 optimizer """adam""" +867 7 training_loop """lcwa""" +867 7 evaluator """rankbased""" +868 1 model.embedding_dim 0.0 +868 1 model.scoring_fct_norm 1.0 +868 1 regularizer.weight 0.013366554277090013 +868 1 optimizer.lr 0.009967346862271206 +868 1 training.batch_size 2.0 +868 1 training.label_smoothing 0.004079423287500728 +868 2 model.embedding_dim 0.0 +868 2 model.scoring_fct_norm 1.0 +868 2 regularizer.weight 0.01853558456778702 +868 2 optimizer.lr 0.028286213134686034 +868 2 training.batch_size 1.0 +868 2 training.label_smoothing 0.007658379004773596 +868 3 model.embedding_dim 0.0 +868 3 model.scoring_fct_norm 1.0 +868 3 regularizer.weight 0.014894186335794328 +868 3 optimizer.lr 0.05590182265128406 +868 3 training.batch_size 2.0 +868 3 training.label_smoothing 0.03382682854542728 +868 4 model.embedding_dim 2.0 +868 4 model.scoring_fct_norm 2.0 +868 4 regularizer.weight 0.012703708400608218 +868 4 optimizer.lr 0.01725896188279544 +868 4 training.batch_size 1.0 +868 4 training.label_smoothing 0.13127755924820986 +868 5 model.embedding_dim 2.0 +868 5 model.scoring_fct_norm 2.0 +868 5 regularizer.weight 0.03380115757210262 +868 5 optimizer.lr 0.0010868976867248086 +868 5 training.batch_size 1.0 +868 5 training.label_smoothing 0.006666051340918973 +868 6 model.embedding_dim 0.0 +868 6 model.scoring_fct_norm 2.0 +868 6 regularizer.weight 0.08344156417237449 +868 6 optimizer.lr 0.008291666376687448 +868 6 training.batch_size 1.0 +868 6 training.label_smoothing 0.019472293242034183 +868 7 model.embedding_dim 2.0 +868 7 model.scoring_fct_norm 2.0 +868 7 regularizer.weight 0.07855589826097052 +868 7 optimizer.lr 0.0012913612515769073 +868 7 training.batch_size 0.0 +868 7 training.label_smoothing 0.008649160492356856 +868 8 model.embedding_dim 1.0 +868 8 model.scoring_fct_norm 1.0 +868 8 regularizer.weight 0.02609305236629288 +868 8 optimizer.lr 0.004730862850036544 +868 8 training.batch_size 2.0 +868 8 training.label_smoothing 0.07020278757866955 +868 9 model.embedding_dim 1.0 +868 9 model.scoring_fct_norm 1.0 +868 9 regularizer.weight 0.02713203531413095 +868 9 optimizer.lr 0.015753702079056107 +868 9 training.batch_size 0.0 +868 9 training.label_smoothing 0.0023386190836582887 +868 10 model.embedding_dim 2.0 +868 10 model.scoring_fct_norm 2.0 +868 10 regularizer.weight 0.11504844088719329 +868 10 optimizer.lr 0.02277368587989424 +868 10 training.batch_size 1.0 +868 10 training.label_smoothing 0.05286811200981724 +868 11 model.embedding_dim 2.0 +868 11 model.scoring_fct_norm 2.0 +868 11 regularizer.weight 0.1516728782676801 +868 11 optimizer.lr 0.006720079331902569 +868 11 training.batch_size 0.0 +868 11 training.label_smoothing 0.001540627729927172 +868 1 dataset """fb15k237""" +868 1 model """transh""" +868 1 loss """crossentropy""" +868 1 regularizer """transh""" +868 1 optimizer """adam""" +868 1 training_loop """lcwa""" +868 1 evaluator """rankbased""" +868 2 dataset """fb15k237""" +868 2 model """transh""" +868 2 loss """crossentropy""" +868 2 regularizer """transh""" +868 2 optimizer """adam""" +868 2 training_loop """lcwa""" +868 2 evaluator """rankbased""" +868 3 dataset """fb15k237""" +868 3 model """transh""" +868 3 loss """crossentropy""" +868 3 regularizer """transh""" +868 3 optimizer """adam""" +868 3 training_loop """lcwa""" +868 3 evaluator """rankbased""" +868 4 dataset """fb15k237""" +868 4 model """transh""" +868 4 loss """crossentropy""" +868 4 regularizer """transh""" +868 4 optimizer """adam""" +868 4 training_loop """lcwa""" +868 4 evaluator """rankbased""" +868 5 dataset """fb15k237""" +868 5 model """transh""" +868 5 loss """crossentropy""" +868 5 regularizer """transh""" +868 5 optimizer """adam""" +868 5 training_loop """lcwa""" +868 5 evaluator """rankbased""" +868 6 dataset """fb15k237""" +868 6 model """transh""" +868 6 loss """crossentropy""" +868 6 regularizer """transh""" +868 6 optimizer """adam""" +868 6 training_loop """lcwa""" +868 6 evaluator """rankbased""" +868 7 dataset """fb15k237""" +868 7 model """transh""" +868 7 loss """crossentropy""" +868 7 regularizer """transh""" +868 7 optimizer """adam""" +868 7 training_loop """lcwa""" +868 7 evaluator """rankbased""" +868 8 dataset """fb15k237""" +868 8 model """transh""" +868 8 loss """crossentropy""" +868 8 regularizer """transh""" +868 8 optimizer """adam""" +868 8 training_loop """lcwa""" +868 8 evaluator """rankbased""" +868 9 dataset """fb15k237""" +868 9 model """transh""" +868 9 loss """crossentropy""" +868 9 regularizer """transh""" +868 9 optimizer """adam""" +868 9 training_loop """lcwa""" +868 9 evaluator """rankbased""" +868 10 dataset """fb15k237""" +868 10 model """transh""" +868 10 loss """crossentropy""" +868 10 regularizer """transh""" +868 10 optimizer """adam""" +868 10 training_loop """lcwa""" +868 10 evaluator """rankbased""" +868 11 dataset """fb15k237""" +868 11 model """transh""" +868 11 loss """crossentropy""" +868 11 regularizer """transh""" +868 11 optimizer """adam""" +868 11 training_loop """lcwa""" +868 11 evaluator """rankbased""" +869 1 model.embedding_dim 2.0 +869 1 model.scoring_fct_norm 1.0 +869 1 regularizer.weight 0.13293810915619697 +869 1 optimizer.lr 0.004203394317206855 +869 1 training.batch_size 0.0 +869 1 training.label_smoothing 0.0683134408231186 +869 2 model.embedding_dim 2.0 +869 2 model.scoring_fct_norm 2.0 +869 2 regularizer.weight 0.0506078935701347 +869 2 optimizer.lr 0.0015461957228067812 +869 2 training.batch_size 2.0 +869 2 training.label_smoothing 0.015338182930256511 +869 3 model.embedding_dim 0.0 +869 3 model.scoring_fct_norm 1.0 +869 3 regularizer.weight 0.12398359191554076 +869 3 optimizer.lr 0.05601680677805387 +869 3 training.batch_size 2.0 +869 3 training.label_smoothing 0.001722228230419127 +869 4 model.embedding_dim 0.0 +869 4 model.scoring_fct_norm 2.0 +869 4 regularizer.weight 0.01050446381085867 +869 4 optimizer.lr 0.002262878094847487 +869 4 training.batch_size 0.0 +869 4 training.label_smoothing 0.05191079573285809 +869 5 model.embedding_dim 1.0 +869 5 model.scoring_fct_norm 2.0 +869 5 regularizer.weight 0.0350924414660073 +869 5 optimizer.lr 0.0014570827373583102 +869 5 training.batch_size 1.0 +869 5 training.label_smoothing 0.00708846121182605 +869 6 model.embedding_dim 2.0 +869 6 model.scoring_fct_norm 2.0 +869 6 regularizer.weight 0.17547637940091454 +869 6 optimizer.lr 0.03625830714626229 +869 6 training.batch_size 1.0 +869 6 training.label_smoothing 0.0010551256903049515 +869 7 model.embedding_dim 0.0 +869 7 model.scoring_fct_norm 1.0 +869 7 regularizer.weight 0.04696023450079867 +869 7 optimizer.lr 0.07447997244407428 +869 7 training.batch_size 1.0 +869 7 training.label_smoothing 0.003596138604056153 +869 8 model.embedding_dim 1.0 +869 8 model.scoring_fct_norm 1.0 +869 8 regularizer.weight 0.05015074841243354 +869 8 optimizer.lr 0.08542182422808084 +869 8 training.batch_size 2.0 +869 8 training.label_smoothing 0.11947077499575431 +869 9 model.embedding_dim 0.0 +869 9 model.scoring_fct_norm 1.0 +869 9 regularizer.weight 0.07961431502274197 +869 9 optimizer.lr 0.05856915089481067 +869 9 training.batch_size 1.0 +869 9 training.label_smoothing 0.09196089781180769 +869 10 model.embedding_dim 0.0 +869 10 model.scoring_fct_norm 2.0 +869 10 regularizer.weight 0.10969991553215012 +869 10 optimizer.lr 0.0021881667918942117 +869 10 training.batch_size 1.0 +869 10 training.label_smoothing 0.009706618349726689 +869 11 model.embedding_dim 2.0 +869 11 model.scoring_fct_norm 2.0 +869 11 regularizer.weight 0.16694805495273551 +869 11 optimizer.lr 0.004243805135333805 +869 11 training.batch_size 0.0 +869 11 training.label_smoothing 0.7341881736455815 +869 12 model.embedding_dim 1.0 +869 12 model.scoring_fct_norm 1.0 +869 12 regularizer.weight 0.04796682954320452 +869 12 optimizer.lr 0.013664412113099482 +869 12 training.batch_size 2.0 +869 12 training.label_smoothing 0.012219319144529714 +869 13 model.embedding_dim 2.0 +869 13 model.scoring_fct_norm 2.0 +869 13 regularizer.weight 0.013856756238437996 +869 13 optimizer.lr 0.0015163479691494438 +869 13 training.batch_size 1.0 +869 13 training.label_smoothing 0.009449173850938733 +869 14 model.embedding_dim 0.0 +869 14 model.scoring_fct_norm 1.0 +869 14 regularizer.weight 0.23008604779408 +869 14 optimizer.lr 0.009992284758215085 +869 14 training.batch_size 2.0 +869 14 training.label_smoothing 0.001165675879470223 +869 15 model.embedding_dim 2.0 +869 15 model.scoring_fct_norm 1.0 +869 15 regularizer.weight 0.11681995426679709 +869 15 optimizer.lr 0.001544602628965098 +869 15 training.batch_size 0.0 +869 15 training.label_smoothing 0.19773060386910649 +869 16 model.embedding_dim 0.0 +869 16 model.scoring_fct_norm 2.0 +869 16 regularizer.weight 0.07942745960390345 +869 16 optimizer.lr 0.007129629172096219 +869 16 training.batch_size 0.0 +869 16 training.label_smoothing 0.010496075623319834 +869 17 model.embedding_dim 0.0 +869 17 model.scoring_fct_norm 2.0 +869 17 regularizer.weight 0.27446567318322607 +869 17 optimizer.lr 0.001100070873201311 +869 17 training.batch_size 2.0 +869 17 training.label_smoothing 0.001311829877632083 +869 18 model.embedding_dim 0.0 +869 18 model.scoring_fct_norm 1.0 +869 18 regularizer.weight 0.06005326801902641 +869 18 optimizer.lr 0.0268965003950342 +869 18 training.batch_size 1.0 +869 18 training.label_smoothing 0.9138207504983428 +869 19 model.embedding_dim 1.0 +869 19 model.scoring_fct_norm 2.0 +869 19 regularizer.weight 0.06991265880953913 +869 19 optimizer.lr 0.056906443088756926 +869 19 training.batch_size 0.0 +869 19 training.label_smoothing 0.013155491834717481 +869 20 model.embedding_dim 1.0 +869 20 model.scoring_fct_norm 2.0 +869 20 regularizer.weight 0.010171258412513458 +869 20 optimizer.lr 0.005371982553516335 +869 20 training.batch_size 1.0 +869 20 training.label_smoothing 0.19428795818058067 +869 21 model.embedding_dim 1.0 +869 21 model.scoring_fct_norm 2.0 +869 21 regularizer.weight 0.016363278733399995 +869 21 optimizer.lr 0.001974795530209231 +869 21 training.batch_size 1.0 +869 21 training.label_smoothing 0.9147048399878309 +869 22 model.embedding_dim 0.0 +869 22 model.scoring_fct_norm 1.0 +869 22 regularizer.weight 0.2318110604535097 +869 22 optimizer.lr 0.03687103623066637 +869 22 training.batch_size 2.0 +869 22 training.label_smoothing 0.3739464760704351 +869 23 model.embedding_dim 0.0 +869 23 model.scoring_fct_norm 1.0 +869 23 regularizer.weight 0.11956925486487793 +869 23 optimizer.lr 0.003419820047176131 +869 23 training.batch_size 1.0 +869 23 training.label_smoothing 0.1021444724613386 +869 24 model.embedding_dim 1.0 +869 24 model.scoring_fct_norm 2.0 +869 24 regularizer.weight 0.07442118078061702 +869 24 optimizer.lr 0.02128773963682486 +869 24 training.batch_size 1.0 +869 24 training.label_smoothing 0.001204262523363697 +869 25 model.embedding_dim 0.0 +869 25 model.scoring_fct_norm 1.0 +869 25 regularizer.weight 0.1203988253162805 +869 25 optimizer.lr 0.001194273484395714 +869 25 training.batch_size 1.0 +869 25 training.label_smoothing 0.007875257175809959 +869 26 model.embedding_dim 1.0 +869 26 model.scoring_fct_norm 1.0 +869 26 regularizer.weight 0.0690075881087414 +869 26 optimizer.lr 0.001198350884574666 +869 26 training.batch_size 1.0 +869 26 training.label_smoothing 0.02420521872069016 +869 27 model.embedding_dim 2.0 +869 27 model.scoring_fct_norm 2.0 +869 27 regularizer.weight 0.013226292650096215 +869 27 optimizer.lr 0.03269243123899455 +869 27 training.batch_size 0.0 +869 27 training.label_smoothing 0.19772748183695352 +869 28 model.embedding_dim 0.0 +869 28 model.scoring_fct_norm 2.0 +869 28 regularizer.weight 0.03245224366597689 +869 28 optimizer.lr 0.04875085692677176 +869 28 training.batch_size 0.0 +869 28 training.label_smoothing 0.4244204620474965 +869 29 model.embedding_dim 0.0 +869 29 model.scoring_fct_norm 1.0 +869 29 regularizer.weight 0.051393515221195604 +869 29 optimizer.lr 0.0018305525263699123 +869 29 training.batch_size 0.0 +869 29 training.label_smoothing 0.0040401183591095 +869 30 model.embedding_dim 0.0 +869 30 model.scoring_fct_norm 2.0 +869 30 regularizer.weight 0.1650757183376805 +869 30 optimizer.lr 0.003941770887150888 +869 30 training.batch_size 2.0 +869 30 training.label_smoothing 0.0027107530067723346 +869 31 model.embedding_dim 1.0 +869 31 model.scoring_fct_norm 1.0 +869 31 regularizer.weight 0.16621965312132542 +869 31 optimizer.lr 0.0028879709474547775 +869 31 training.batch_size 1.0 +869 31 training.label_smoothing 0.0629704696869199 +869 32 model.embedding_dim 1.0 +869 32 model.scoring_fct_norm 1.0 +869 32 regularizer.weight 0.06542567968287397 +869 32 optimizer.lr 0.05653032440745022 +869 32 training.batch_size 2.0 +869 32 training.label_smoothing 0.018682873116815466 +869 33 model.embedding_dim 2.0 +869 33 model.scoring_fct_norm 1.0 +869 33 regularizer.weight 0.013292524697167168 +869 33 optimizer.lr 0.039570858720571325 +869 33 training.batch_size 1.0 +869 33 training.label_smoothing 0.0039348151597384876 +869 34 model.embedding_dim 0.0 +869 34 model.scoring_fct_norm 2.0 +869 34 regularizer.weight 0.023523004431597914 +869 34 optimizer.lr 0.0028828343246448634 +869 34 training.batch_size 2.0 +869 34 training.label_smoothing 0.20103330872981073 +869 35 model.embedding_dim 2.0 +869 35 model.scoring_fct_norm 2.0 +869 35 regularizer.weight 0.05671067218288957 +869 35 optimizer.lr 0.06265408644686618 +869 35 training.batch_size 1.0 +869 35 training.label_smoothing 0.0010577187435073102 +869 36 model.embedding_dim 1.0 +869 36 model.scoring_fct_norm 1.0 +869 36 regularizer.weight 0.07634615024013273 +869 36 optimizer.lr 0.015498077793525581 +869 36 training.batch_size 0.0 +869 36 training.label_smoothing 0.0031336330444816296 +869 37 model.embedding_dim 1.0 +869 37 model.scoring_fct_norm 2.0 +869 37 regularizer.weight 0.03962161907832374 +869 37 optimizer.lr 0.009431253604105377 +869 37 training.batch_size 2.0 +869 37 training.label_smoothing 0.39201109243969884 +869 38 model.embedding_dim 0.0 +869 38 model.scoring_fct_norm 2.0 +869 38 regularizer.weight 0.26123147820297055 +869 38 optimizer.lr 0.01597119545553963 +869 38 training.batch_size 2.0 +869 38 training.label_smoothing 0.591099967507938 +869 39 model.embedding_dim 2.0 +869 39 model.scoring_fct_norm 1.0 +869 39 regularizer.weight 0.0957145923536363 +869 39 optimizer.lr 0.038356840029931116 +869 39 training.batch_size 2.0 +869 39 training.label_smoothing 0.41409237106386104 +869 40 model.embedding_dim 0.0 +869 40 model.scoring_fct_norm 1.0 +869 40 regularizer.weight 0.09383124258493669 +869 40 optimizer.lr 0.03815227061611138 +869 40 training.batch_size 0.0 +869 40 training.label_smoothing 0.038336550175772054 +869 41 model.embedding_dim 2.0 +869 41 model.scoring_fct_norm 1.0 +869 41 regularizer.weight 0.05118695213941476 +869 41 optimizer.lr 0.00283858348688028 +869 41 training.batch_size 0.0 +869 41 training.label_smoothing 0.2218911758480457 +869 42 model.embedding_dim 1.0 +869 42 model.scoring_fct_norm 2.0 +869 42 regularizer.weight 0.019779900526975133 +869 42 optimizer.lr 0.001194196716871608 +869 42 training.batch_size 0.0 +869 42 training.label_smoothing 0.009730573593844936 +869 43 model.embedding_dim 1.0 +869 43 model.scoring_fct_norm 2.0 +869 43 regularizer.weight 0.2742819678240949 +869 43 optimizer.lr 0.0012760159782943265 +869 43 training.batch_size 2.0 +869 43 training.label_smoothing 0.012406794223876142 +869 44 model.embedding_dim 1.0 +869 44 model.scoring_fct_norm 2.0 +869 44 regularizer.weight 0.0971678172448755 +869 44 optimizer.lr 0.0021557581031401638 +869 44 training.batch_size 0.0 +869 44 training.label_smoothing 0.9818878636662807 +869 45 model.embedding_dim 0.0 +869 45 model.scoring_fct_norm 1.0 +869 45 regularizer.weight 0.11536206923108583 +869 45 optimizer.lr 0.0025525983196401218 +869 45 training.batch_size 0.0 +869 45 training.label_smoothing 0.001256191543712792 +869 46 model.embedding_dim 2.0 +869 46 model.scoring_fct_norm 1.0 +869 46 regularizer.weight 0.012473773122049768 +869 46 optimizer.lr 0.01147811674025827 +869 46 training.batch_size 1.0 +869 46 training.label_smoothing 0.17074926060105555 +869 47 model.embedding_dim 2.0 +869 47 model.scoring_fct_norm 2.0 +869 47 regularizer.weight 0.017061178423045276 +869 47 optimizer.lr 0.00508561840470022 +869 47 training.batch_size 0.0 +869 47 training.label_smoothing 0.3701447823981324 +869 48 model.embedding_dim 1.0 +869 48 model.scoring_fct_norm 2.0 +869 48 regularizer.weight 0.0847946560500996 +869 48 optimizer.lr 0.0011118472262970787 +869 48 training.batch_size 2.0 +869 48 training.label_smoothing 0.0022426703309787496 +869 49 model.embedding_dim 1.0 +869 49 model.scoring_fct_norm 2.0 +869 49 regularizer.weight 0.17368254367468025 +869 49 optimizer.lr 0.038988603900735434 +869 49 training.batch_size 2.0 +869 49 training.label_smoothing 0.23938678006029568 +869 50 model.embedding_dim 2.0 +869 50 model.scoring_fct_norm 2.0 +869 50 regularizer.weight 0.06794164930604063 +869 50 optimizer.lr 0.0034472964471478147 +869 50 training.batch_size 2.0 +869 50 training.label_smoothing 0.001009493653010739 +869 51 model.embedding_dim 1.0 +869 51 model.scoring_fct_norm 2.0 +869 51 regularizer.weight 0.025798209547459743 +869 51 optimizer.lr 0.023426585329381334 +869 51 training.batch_size 1.0 +869 51 training.label_smoothing 0.01839954820197516 +869 52 model.embedding_dim 0.0 +869 52 model.scoring_fct_norm 1.0 +869 52 regularizer.weight 0.21232938630816672 +869 52 optimizer.lr 0.002688225439088235 +869 52 training.batch_size 2.0 +869 52 training.label_smoothing 0.01150378571847462 +869 53 model.embedding_dim 1.0 +869 53 model.scoring_fct_norm 2.0 +869 53 regularizer.weight 0.056782556911636424 +869 53 optimizer.lr 0.0019810865160341915 +869 53 training.batch_size 2.0 +869 53 training.label_smoothing 0.040628361019401114 +869 54 model.embedding_dim 2.0 +869 54 model.scoring_fct_norm 1.0 +869 54 regularizer.weight 0.031358783157322295 +869 54 optimizer.lr 0.0025256107247439413 +869 54 training.batch_size 1.0 +869 54 training.label_smoothing 0.13070113210623632 +869 55 model.embedding_dim 2.0 +869 55 model.scoring_fct_norm 2.0 +869 55 regularizer.weight 0.060722908691600574 +869 55 optimizer.lr 0.004739060913692826 +869 55 training.batch_size 2.0 +869 55 training.label_smoothing 0.5954113405841852 +869 56 model.embedding_dim 1.0 +869 56 model.scoring_fct_norm 1.0 +869 56 regularizer.weight 0.01594351732644244 +869 56 optimizer.lr 0.032317721925584186 +869 56 training.batch_size 2.0 +869 56 training.label_smoothing 0.5757201133236444 +869 57 model.embedding_dim 1.0 +869 57 model.scoring_fct_norm 2.0 +869 57 regularizer.weight 0.1328941709953109 +869 57 optimizer.lr 0.01732482875028124 +869 57 training.batch_size 1.0 +869 57 training.label_smoothing 0.5484991444009631 +869 58 model.embedding_dim 0.0 +869 58 model.scoring_fct_norm 2.0 +869 58 regularizer.weight 0.03466596456590222 +869 58 optimizer.lr 0.010651429038004092 +869 58 training.batch_size 2.0 +869 58 training.label_smoothing 0.16821159228044538 +869 59 model.embedding_dim 1.0 +869 59 model.scoring_fct_norm 2.0 +869 59 regularizer.weight 0.0898059143266694 +869 59 optimizer.lr 0.007589635241993115 +869 59 training.batch_size 1.0 +869 59 training.label_smoothing 0.06589378139093145 +869 60 model.embedding_dim 1.0 +869 60 model.scoring_fct_norm 2.0 +869 60 regularizer.weight 0.0109264456517282 +869 60 optimizer.lr 0.005967775546252907 +869 60 training.batch_size 2.0 +869 60 training.label_smoothing 0.015210352041673508 +869 61 model.embedding_dim 2.0 +869 61 model.scoring_fct_norm 2.0 +869 61 regularizer.weight 0.02700596735196016 +869 61 optimizer.lr 0.007374727122060844 +869 61 training.batch_size 0.0 +869 61 training.label_smoothing 0.009476581983645172 +869 62 model.embedding_dim 0.0 +869 62 model.scoring_fct_norm 2.0 +869 62 regularizer.weight 0.15053169306796785 +869 62 optimizer.lr 0.030467913278421015 +869 62 training.batch_size 0.0 +869 62 training.label_smoothing 0.018133502859883077 +869 63 model.embedding_dim 1.0 +869 63 model.scoring_fct_norm 1.0 +869 63 regularizer.weight 0.029899063859142386 +869 63 optimizer.lr 0.006896229847496817 +869 63 training.batch_size 0.0 +869 63 training.label_smoothing 0.09132818734770912 +869 64 model.embedding_dim 0.0 +869 64 model.scoring_fct_norm 1.0 +869 64 regularizer.weight 0.17621871375702144 +869 64 optimizer.lr 0.002101355310915914 +869 64 training.batch_size 0.0 +869 64 training.label_smoothing 0.04623884979534262 +869 65 model.embedding_dim 0.0 +869 65 model.scoring_fct_norm 2.0 +869 65 regularizer.weight 0.10200451662378976 +869 65 optimizer.lr 0.004570580337779312 +869 65 training.batch_size 0.0 +869 65 training.label_smoothing 0.0012708825758923237 +869 66 model.embedding_dim 1.0 +869 66 model.scoring_fct_norm 1.0 +869 66 regularizer.weight 0.28738193166521586 +869 66 optimizer.lr 0.01570301652704991 +869 66 training.batch_size 0.0 +869 66 training.label_smoothing 0.10703075914404238 +869 67 model.embedding_dim 2.0 +869 67 model.scoring_fct_norm 1.0 +869 67 regularizer.weight 0.013769708224103982 +869 67 optimizer.lr 0.022186526385051388 +869 67 training.batch_size 2.0 +869 67 training.label_smoothing 0.0023380215356560547 +869 68 model.embedding_dim 0.0 +869 68 model.scoring_fct_norm 1.0 +869 68 regularizer.weight 0.11154601072201006 +869 68 optimizer.lr 0.0019031902221927092 +869 68 training.batch_size 0.0 +869 68 training.label_smoothing 0.10794555525719357 +869 69 model.embedding_dim 2.0 +869 69 model.scoring_fct_norm 2.0 +869 69 regularizer.weight 0.08689065678582303 +869 69 optimizer.lr 0.002921142026912591 +869 69 training.batch_size 1.0 +869 69 training.label_smoothing 0.3821889990519443 +869 70 model.embedding_dim 2.0 +869 70 model.scoring_fct_norm 1.0 +869 70 regularizer.weight 0.01189026213735413 +869 70 optimizer.lr 0.0026133337575102583 +869 70 training.batch_size 2.0 +869 70 training.label_smoothing 0.13185610425057298 +869 71 model.embedding_dim 1.0 +869 71 model.scoring_fct_norm 1.0 +869 71 regularizer.weight 0.04103031912968442 +869 71 optimizer.lr 0.008133941567848458 +869 71 training.batch_size 0.0 +869 71 training.label_smoothing 0.015263075852632276 +869 72 model.embedding_dim 0.0 +869 72 model.scoring_fct_norm 1.0 +869 72 regularizer.weight 0.020958335616224295 +869 72 optimizer.lr 0.005028191981518428 +869 72 training.batch_size 1.0 +869 72 training.label_smoothing 0.179653342396004 +869 73 model.embedding_dim 1.0 +869 73 model.scoring_fct_norm 2.0 +869 73 regularizer.weight 0.03761148240563482 +869 73 optimizer.lr 0.005829434252373451 +869 73 training.batch_size 1.0 +869 73 training.label_smoothing 0.8849692567072318 +869 74 model.embedding_dim 2.0 +869 74 model.scoring_fct_norm 2.0 +869 74 regularizer.weight 0.15142086611876804 +869 74 optimizer.lr 0.09874684878224259 +869 74 training.batch_size 1.0 +869 74 training.label_smoothing 0.031860099166111204 +869 75 model.embedding_dim 0.0 +869 75 model.scoring_fct_norm 1.0 +869 75 regularizer.weight 0.011132656000931154 +869 75 optimizer.lr 0.004448978042625191 +869 75 training.batch_size 2.0 +869 75 training.label_smoothing 0.05059351326254748 +869 76 model.embedding_dim 0.0 +869 76 model.scoring_fct_norm 1.0 +869 76 regularizer.weight 0.18930311032483535 +869 76 optimizer.lr 0.004035885035324527 +869 76 training.batch_size 2.0 +869 76 training.label_smoothing 0.12320386415694377 +869 77 model.embedding_dim 2.0 +869 77 model.scoring_fct_norm 2.0 +869 77 regularizer.weight 0.018703882448567532 +869 77 optimizer.lr 0.0010416359245162495 +869 77 training.batch_size 1.0 +869 77 training.label_smoothing 0.043710474751595424 +869 78 model.embedding_dim 1.0 +869 78 model.scoring_fct_norm 1.0 +869 78 regularizer.weight 0.03964292193788624 +869 78 optimizer.lr 0.006880566709224961 +869 78 training.batch_size 2.0 +869 78 training.label_smoothing 0.00768546531052078 +869 79 model.embedding_dim 1.0 +869 79 model.scoring_fct_norm 1.0 +869 79 regularizer.weight 0.055494214700993805 +869 79 optimizer.lr 0.02043031735454555 +869 79 training.batch_size 2.0 +869 79 training.label_smoothing 0.2742860147208762 +869 80 model.embedding_dim 1.0 +869 80 model.scoring_fct_norm 1.0 +869 80 regularizer.weight 0.25503228972555864 +869 80 optimizer.lr 0.00484028472536669 +869 80 training.batch_size 0.0 +869 80 training.label_smoothing 0.07753557026422891 +869 81 model.embedding_dim 1.0 +869 81 model.scoring_fct_norm 2.0 +869 81 regularizer.weight 0.18829357289696377 +869 81 optimizer.lr 0.016902144881889694 +869 81 training.batch_size 1.0 +869 81 training.label_smoothing 0.003958149767420599 +869 82 model.embedding_dim 2.0 +869 82 model.scoring_fct_norm 1.0 +869 82 regularizer.weight 0.0396408985981106 +869 82 optimizer.lr 0.0020210180953919746 +869 82 training.batch_size 0.0 +869 82 training.label_smoothing 0.0010110223308909662 +869 83 model.embedding_dim 1.0 +869 83 model.scoring_fct_norm 1.0 +869 83 regularizer.weight 0.04212191015180297 +869 83 optimizer.lr 0.002367111008820368 +869 83 training.batch_size 2.0 +869 83 training.label_smoothing 0.2327027068349107 +869 84 model.embedding_dim 2.0 +869 84 model.scoring_fct_norm 2.0 +869 84 regularizer.weight 0.060428740506170156 +869 84 optimizer.lr 0.0012647552601966797 +869 84 training.batch_size 1.0 +869 84 training.label_smoothing 0.7674293946855621 +869 85 model.embedding_dim 0.0 +869 85 model.scoring_fct_norm 1.0 +869 85 regularizer.weight 0.012169680410400193 +869 85 optimizer.lr 0.0026248626440965404 +869 85 training.batch_size 1.0 +869 85 training.label_smoothing 0.14925685005745512 +869 86 model.embedding_dim 0.0 +869 86 model.scoring_fct_norm 1.0 +869 86 regularizer.weight 0.13511365264928843 +869 86 optimizer.lr 0.02269589003542742 +869 86 training.batch_size 2.0 +869 86 training.label_smoothing 0.5815748466066889 +869 87 model.embedding_dim 0.0 +869 87 model.scoring_fct_norm 2.0 +869 87 regularizer.weight 0.09751646612142847 +869 87 optimizer.lr 0.020529730696059473 +869 87 training.batch_size 1.0 +869 87 training.label_smoothing 0.0010791967388645962 +869 88 model.embedding_dim 2.0 +869 88 model.scoring_fct_norm 2.0 +869 88 regularizer.weight 0.03847763409595597 +869 88 optimizer.lr 0.0013668691394460666 +869 88 training.batch_size 2.0 +869 88 training.label_smoothing 0.002075475641855504 +869 89 model.embedding_dim 1.0 +869 89 model.scoring_fct_norm 2.0 +869 89 regularizer.weight 0.07135835537852565 +869 89 optimizer.lr 0.05727226883566642 +869 89 training.batch_size 2.0 +869 89 training.label_smoothing 0.029744130453678824 +869 90 model.embedding_dim 1.0 +869 90 model.scoring_fct_norm 2.0 +869 90 regularizer.weight 0.020184064157415194 +869 90 optimizer.lr 0.08126610873388644 +869 90 training.batch_size 0.0 +869 90 training.label_smoothing 0.0013266299583217776 +869 91 model.embedding_dim 0.0 +869 91 model.scoring_fct_norm 1.0 +869 91 regularizer.weight 0.02913843781050946 +869 91 optimizer.lr 0.0029180393103523886 +869 91 training.batch_size 1.0 +869 91 training.label_smoothing 0.5162471408882043 +869 92 model.embedding_dim 0.0 +869 92 model.scoring_fct_norm 2.0 +869 92 regularizer.weight 0.030932443233710756 +869 92 optimizer.lr 0.08757274945565673 +869 92 training.batch_size 1.0 +869 92 training.label_smoothing 0.27548032473377315 +869 93 model.embedding_dim 0.0 +869 93 model.scoring_fct_norm 2.0 +869 93 regularizer.weight 0.0824703551899864 +869 93 optimizer.lr 0.034979529923790845 +869 93 training.batch_size 0.0 +869 93 training.label_smoothing 0.09929333181653666 +869 94 model.embedding_dim 1.0 +869 94 model.scoring_fct_norm 2.0 +869 94 regularizer.weight 0.08825519783609452 +869 94 optimizer.lr 0.001047719360692159 +869 94 training.batch_size 0.0 +869 94 training.label_smoothing 0.8095999607254781 +869 95 model.embedding_dim 1.0 +869 95 model.scoring_fct_norm 2.0 +869 95 regularizer.weight 0.19527676565801852 +869 95 optimizer.lr 0.041059955704893805 +869 95 training.batch_size 2.0 +869 95 training.label_smoothing 0.10270562324545737 +869 96 model.embedding_dim 0.0 +869 96 model.scoring_fct_norm 2.0 +869 96 regularizer.weight 0.02413281547276785 +869 96 optimizer.lr 0.006280258603858857 +869 96 training.batch_size 2.0 +869 96 training.label_smoothing 0.0036279852310751484 +869 97 model.embedding_dim 0.0 +869 97 model.scoring_fct_norm 2.0 +869 97 regularizer.weight 0.20146145173956032 +869 97 optimizer.lr 0.0034212332473984 +869 97 training.batch_size 1.0 +869 97 training.label_smoothing 0.002338722523290249 +869 98 model.embedding_dim 0.0 +869 98 model.scoring_fct_norm 2.0 +869 98 regularizer.weight 0.2835669610574928 +869 98 optimizer.lr 0.029027211821601256 +869 98 training.batch_size 1.0 +869 98 training.label_smoothing 0.00218897114057833 +869 99 model.embedding_dim 2.0 +869 99 model.scoring_fct_norm 2.0 +869 99 regularizer.weight 0.04021910605637457 +869 99 optimizer.lr 0.012151686406761472 +869 99 training.batch_size 1.0 +869 99 training.label_smoothing 0.017241181061942803 +869 100 model.embedding_dim 2.0 +869 100 model.scoring_fct_norm 1.0 +869 100 regularizer.weight 0.06660556827621145 +869 100 optimizer.lr 0.07885020504306546 +869 100 training.batch_size 2.0 +869 100 training.label_smoothing 0.007182007489895421 +869 1 dataset """kinships""" +869 1 model """transh""" +869 1 loss """bceaftersigmoid""" +869 1 regularizer """transh""" +869 1 optimizer """adam""" +869 1 training_loop """lcwa""" +869 1 evaluator """rankbased""" +869 2 dataset """kinships""" +869 2 model """transh""" +869 2 loss """bceaftersigmoid""" +869 2 regularizer """transh""" +869 2 optimizer """adam""" +869 2 training_loop """lcwa""" +869 2 evaluator """rankbased""" +869 3 dataset """kinships""" +869 3 model """transh""" +869 3 loss """bceaftersigmoid""" +869 3 regularizer """transh""" +869 3 optimizer """adam""" +869 3 training_loop """lcwa""" +869 3 evaluator """rankbased""" +869 4 dataset """kinships""" +869 4 model """transh""" +869 4 loss """bceaftersigmoid""" +869 4 regularizer """transh""" +869 4 optimizer """adam""" +869 4 training_loop """lcwa""" +869 4 evaluator """rankbased""" +869 5 dataset """kinships""" +869 5 model """transh""" +869 5 loss """bceaftersigmoid""" +869 5 regularizer """transh""" +869 5 optimizer """adam""" +869 5 training_loop """lcwa""" +869 5 evaluator """rankbased""" +869 6 dataset """kinships""" +869 6 model """transh""" +869 6 loss """bceaftersigmoid""" +869 6 regularizer """transh""" +869 6 optimizer """adam""" +869 6 training_loop """lcwa""" +869 6 evaluator """rankbased""" +869 7 dataset """kinships""" +869 7 model """transh""" +869 7 loss """bceaftersigmoid""" +869 7 regularizer """transh""" +869 7 optimizer """adam""" +869 7 training_loop """lcwa""" +869 7 evaluator """rankbased""" +869 8 dataset """kinships""" +869 8 model """transh""" +869 8 loss """bceaftersigmoid""" +869 8 regularizer """transh""" +869 8 optimizer """adam""" +869 8 training_loop """lcwa""" +869 8 evaluator """rankbased""" +869 9 dataset """kinships""" +869 9 model """transh""" +869 9 loss """bceaftersigmoid""" +869 9 regularizer """transh""" +869 9 optimizer """adam""" +869 9 training_loop """lcwa""" +869 9 evaluator """rankbased""" +869 10 dataset """kinships""" +869 10 model """transh""" +869 10 loss """bceaftersigmoid""" +869 10 regularizer """transh""" +869 10 optimizer """adam""" +869 10 training_loop """lcwa""" +869 10 evaluator """rankbased""" +869 11 dataset """kinships""" +869 11 model """transh""" +869 11 loss """bceaftersigmoid""" +869 11 regularizer """transh""" +869 11 optimizer """adam""" +869 11 training_loop """lcwa""" +869 11 evaluator """rankbased""" +869 12 dataset """kinships""" +869 12 model """transh""" +869 12 loss """bceaftersigmoid""" +869 12 regularizer """transh""" +869 12 optimizer """adam""" +869 12 training_loop """lcwa""" +869 12 evaluator """rankbased""" +869 13 dataset """kinships""" +869 13 model """transh""" +869 13 loss """bceaftersigmoid""" +869 13 regularizer """transh""" +869 13 optimizer """adam""" +869 13 training_loop """lcwa""" +869 13 evaluator """rankbased""" +869 14 dataset """kinships""" +869 14 model """transh""" +869 14 loss """bceaftersigmoid""" +869 14 regularizer """transh""" +869 14 optimizer """adam""" +869 14 training_loop """lcwa""" +869 14 evaluator """rankbased""" +869 15 dataset """kinships""" +869 15 model """transh""" +869 15 loss """bceaftersigmoid""" +869 15 regularizer """transh""" +869 15 optimizer """adam""" +869 15 training_loop """lcwa""" +869 15 evaluator """rankbased""" +869 16 dataset """kinships""" +869 16 model """transh""" +869 16 loss """bceaftersigmoid""" +869 16 regularizer """transh""" +869 16 optimizer """adam""" +869 16 training_loop """lcwa""" +869 16 evaluator """rankbased""" +869 17 dataset """kinships""" +869 17 model """transh""" +869 17 loss """bceaftersigmoid""" +869 17 regularizer """transh""" +869 17 optimizer """adam""" +869 17 training_loop """lcwa""" +869 17 evaluator """rankbased""" +869 18 dataset """kinships""" +869 18 model """transh""" +869 18 loss """bceaftersigmoid""" +869 18 regularizer """transh""" +869 18 optimizer """adam""" +869 18 training_loop """lcwa""" +869 18 evaluator """rankbased""" +869 19 dataset """kinships""" +869 19 model """transh""" +869 19 loss """bceaftersigmoid""" +869 19 regularizer """transh""" +869 19 optimizer """adam""" +869 19 training_loop """lcwa""" +869 19 evaluator """rankbased""" +869 20 dataset """kinships""" +869 20 model """transh""" +869 20 loss """bceaftersigmoid""" +869 20 regularizer """transh""" +869 20 optimizer """adam""" +869 20 training_loop """lcwa""" +869 20 evaluator """rankbased""" +869 21 dataset """kinships""" +869 21 model """transh""" +869 21 loss """bceaftersigmoid""" +869 21 regularizer """transh""" +869 21 optimizer """adam""" +869 21 training_loop """lcwa""" +869 21 evaluator """rankbased""" +869 22 dataset """kinships""" +869 22 model """transh""" +869 22 loss """bceaftersigmoid""" +869 22 regularizer """transh""" +869 22 optimizer """adam""" +869 22 training_loop """lcwa""" +869 22 evaluator """rankbased""" +869 23 dataset """kinships""" +869 23 model """transh""" +869 23 loss """bceaftersigmoid""" +869 23 regularizer """transh""" +869 23 optimizer """adam""" +869 23 training_loop """lcwa""" +869 23 evaluator """rankbased""" +869 24 dataset """kinships""" +869 24 model """transh""" +869 24 loss """bceaftersigmoid""" +869 24 regularizer """transh""" +869 24 optimizer """adam""" +869 24 training_loop """lcwa""" +869 24 evaluator """rankbased""" +869 25 dataset """kinships""" +869 25 model """transh""" +869 25 loss """bceaftersigmoid""" +869 25 regularizer """transh""" +869 25 optimizer """adam""" +869 25 training_loop """lcwa""" +869 25 evaluator """rankbased""" +869 26 dataset """kinships""" +869 26 model """transh""" +869 26 loss """bceaftersigmoid""" +869 26 regularizer """transh""" +869 26 optimizer """adam""" +869 26 training_loop """lcwa""" +869 26 evaluator """rankbased""" +869 27 dataset """kinships""" +869 27 model """transh""" +869 27 loss """bceaftersigmoid""" +869 27 regularizer """transh""" +869 27 optimizer """adam""" +869 27 training_loop """lcwa""" +869 27 evaluator """rankbased""" +869 28 dataset """kinships""" +869 28 model """transh""" +869 28 loss """bceaftersigmoid""" +869 28 regularizer """transh""" +869 28 optimizer """adam""" +869 28 training_loop """lcwa""" +869 28 evaluator """rankbased""" +869 29 dataset """kinships""" +869 29 model """transh""" +869 29 loss """bceaftersigmoid""" +869 29 regularizer """transh""" +869 29 optimizer """adam""" +869 29 training_loop """lcwa""" +869 29 evaluator """rankbased""" +869 30 dataset """kinships""" +869 30 model """transh""" +869 30 loss """bceaftersigmoid""" +869 30 regularizer """transh""" +869 30 optimizer """adam""" +869 30 training_loop """lcwa""" +869 30 evaluator """rankbased""" +869 31 dataset """kinships""" +869 31 model """transh""" +869 31 loss """bceaftersigmoid""" +869 31 regularizer """transh""" +869 31 optimizer """adam""" +869 31 training_loop """lcwa""" +869 31 evaluator """rankbased""" +869 32 dataset """kinships""" +869 32 model """transh""" +869 32 loss """bceaftersigmoid""" +869 32 regularizer """transh""" +869 32 optimizer """adam""" +869 32 training_loop """lcwa""" +869 32 evaluator """rankbased""" +869 33 dataset """kinships""" +869 33 model """transh""" +869 33 loss """bceaftersigmoid""" +869 33 regularizer """transh""" +869 33 optimizer """adam""" +869 33 training_loop """lcwa""" +869 33 evaluator """rankbased""" +869 34 dataset """kinships""" +869 34 model """transh""" +869 34 loss """bceaftersigmoid""" +869 34 regularizer """transh""" +869 34 optimizer """adam""" +869 34 training_loop """lcwa""" +869 34 evaluator """rankbased""" +869 35 dataset """kinships""" +869 35 model """transh""" +869 35 loss """bceaftersigmoid""" +869 35 regularizer """transh""" +869 35 optimizer """adam""" +869 35 training_loop """lcwa""" +869 35 evaluator """rankbased""" +869 36 dataset """kinships""" +869 36 model """transh""" +869 36 loss """bceaftersigmoid""" +869 36 regularizer """transh""" +869 36 optimizer """adam""" +869 36 training_loop """lcwa""" +869 36 evaluator """rankbased""" +869 37 dataset """kinships""" +869 37 model """transh""" +869 37 loss """bceaftersigmoid""" +869 37 regularizer """transh""" +869 37 optimizer """adam""" +869 37 training_loop """lcwa""" +869 37 evaluator """rankbased""" +869 38 dataset """kinships""" +869 38 model """transh""" +869 38 loss """bceaftersigmoid""" +869 38 regularizer """transh""" +869 38 optimizer """adam""" +869 38 training_loop """lcwa""" +869 38 evaluator """rankbased""" +869 39 dataset """kinships""" +869 39 model """transh""" +869 39 loss """bceaftersigmoid""" +869 39 regularizer """transh""" +869 39 optimizer """adam""" +869 39 training_loop """lcwa""" +869 39 evaluator """rankbased""" +869 40 dataset """kinships""" +869 40 model """transh""" +869 40 loss """bceaftersigmoid""" +869 40 regularizer """transh""" +869 40 optimizer """adam""" +869 40 training_loop """lcwa""" +869 40 evaluator """rankbased""" +869 41 dataset """kinships""" +869 41 model """transh""" +869 41 loss """bceaftersigmoid""" +869 41 regularizer """transh""" +869 41 optimizer """adam""" +869 41 training_loop """lcwa""" +869 41 evaluator """rankbased""" +869 42 dataset """kinships""" +869 42 model """transh""" +869 42 loss """bceaftersigmoid""" +869 42 regularizer """transh""" +869 42 optimizer """adam""" +869 42 training_loop """lcwa""" +869 42 evaluator """rankbased""" +869 43 dataset """kinships""" +869 43 model """transh""" +869 43 loss """bceaftersigmoid""" +869 43 regularizer """transh""" +869 43 optimizer """adam""" +869 43 training_loop """lcwa""" +869 43 evaluator """rankbased""" +869 44 dataset """kinships""" +869 44 model """transh""" +869 44 loss """bceaftersigmoid""" +869 44 regularizer """transh""" +869 44 optimizer """adam""" +869 44 training_loop """lcwa""" +869 44 evaluator """rankbased""" +869 45 dataset """kinships""" +869 45 model """transh""" +869 45 loss """bceaftersigmoid""" +869 45 regularizer """transh""" +869 45 optimizer """adam""" +869 45 training_loop """lcwa""" +869 45 evaluator """rankbased""" +869 46 dataset """kinships""" +869 46 model """transh""" +869 46 loss """bceaftersigmoid""" +869 46 regularizer """transh""" +869 46 optimizer """adam""" +869 46 training_loop """lcwa""" +869 46 evaluator """rankbased""" +869 47 dataset """kinships""" +869 47 model """transh""" +869 47 loss """bceaftersigmoid""" +869 47 regularizer """transh""" +869 47 optimizer """adam""" +869 47 training_loop """lcwa""" +869 47 evaluator """rankbased""" +869 48 dataset """kinships""" +869 48 model """transh""" +869 48 loss """bceaftersigmoid""" +869 48 regularizer """transh""" +869 48 optimizer """adam""" +869 48 training_loop """lcwa""" +869 48 evaluator """rankbased""" +869 49 dataset """kinships""" +869 49 model """transh""" +869 49 loss """bceaftersigmoid""" +869 49 regularizer """transh""" +869 49 optimizer """adam""" +869 49 training_loop """lcwa""" +869 49 evaluator """rankbased""" +869 50 dataset """kinships""" +869 50 model """transh""" +869 50 loss """bceaftersigmoid""" +869 50 regularizer """transh""" +869 50 optimizer """adam""" +869 50 training_loop """lcwa""" +869 50 evaluator """rankbased""" +869 51 dataset """kinships""" +869 51 model """transh""" +869 51 loss """bceaftersigmoid""" +869 51 regularizer """transh""" +869 51 optimizer """adam""" +869 51 training_loop """lcwa""" +869 51 evaluator """rankbased""" +869 52 dataset """kinships""" +869 52 model """transh""" +869 52 loss """bceaftersigmoid""" +869 52 regularizer """transh""" +869 52 optimizer """adam""" +869 52 training_loop """lcwa""" +869 52 evaluator """rankbased""" +869 53 dataset """kinships""" +869 53 model """transh""" +869 53 loss """bceaftersigmoid""" +869 53 regularizer """transh""" +869 53 optimizer """adam""" +869 53 training_loop """lcwa""" +869 53 evaluator """rankbased""" +869 54 dataset """kinships""" +869 54 model """transh""" +869 54 loss """bceaftersigmoid""" +869 54 regularizer """transh""" +869 54 optimizer """adam""" +869 54 training_loop """lcwa""" +869 54 evaluator """rankbased""" +869 55 dataset """kinships""" +869 55 model """transh""" +869 55 loss """bceaftersigmoid""" +869 55 regularizer """transh""" +869 55 optimizer """adam""" +869 55 training_loop """lcwa""" +869 55 evaluator """rankbased""" +869 56 dataset """kinships""" +869 56 model """transh""" +869 56 loss """bceaftersigmoid""" +869 56 regularizer """transh""" +869 56 optimizer """adam""" +869 56 training_loop """lcwa""" +869 56 evaluator """rankbased""" +869 57 dataset """kinships""" +869 57 model """transh""" +869 57 loss """bceaftersigmoid""" +869 57 regularizer """transh""" +869 57 optimizer """adam""" +869 57 training_loop """lcwa""" +869 57 evaluator """rankbased""" +869 58 dataset """kinships""" +869 58 model """transh""" +869 58 loss """bceaftersigmoid""" +869 58 regularizer """transh""" +869 58 optimizer """adam""" +869 58 training_loop """lcwa""" +869 58 evaluator """rankbased""" +869 59 dataset """kinships""" +869 59 model """transh""" +869 59 loss """bceaftersigmoid""" +869 59 regularizer """transh""" +869 59 optimizer """adam""" +869 59 training_loop """lcwa""" +869 59 evaluator """rankbased""" +869 60 dataset """kinships""" +869 60 model """transh""" +869 60 loss """bceaftersigmoid""" +869 60 regularizer """transh""" +869 60 optimizer """adam""" +869 60 training_loop """lcwa""" +869 60 evaluator """rankbased""" +869 61 dataset """kinships""" +869 61 model """transh""" +869 61 loss """bceaftersigmoid""" +869 61 regularizer """transh""" +869 61 optimizer """adam""" +869 61 training_loop """lcwa""" +869 61 evaluator """rankbased""" +869 62 dataset """kinships""" +869 62 model """transh""" +869 62 loss """bceaftersigmoid""" +869 62 regularizer """transh""" +869 62 optimizer """adam""" +869 62 training_loop """lcwa""" +869 62 evaluator """rankbased""" +869 63 dataset """kinships""" +869 63 model """transh""" +869 63 loss """bceaftersigmoid""" +869 63 regularizer """transh""" +869 63 optimizer """adam""" +869 63 training_loop """lcwa""" +869 63 evaluator """rankbased""" +869 64 dataset """kinships""" +869 64 model """transh""" +869 64 loss """bceaftersigmoid""" +869 64 regularizer """transh""" +869 64 optimizer """adam""" +869 64 training_loop """lcwa""" +869 64 evaluator """rankbased""" +869 65 dataset """kinships""" +869 65 model """transh""" +869 65 loss """bceaftersigmoid""" +869 65 regularizer """transh""" +869 65 optimizer """adam""" +869 65 training_loop """lcwa""" +869 65 evaluator """rankbased""" +869 66 dataset """kinships""" +869 66 model """transh""" +869 66 loss """bceaftersigmoid""" +869 66 regularizer """transh""" +869 66 optimizer """adam""" +869 66 training_loop """lcwa""" +869 66 evaluator """rankbased""" +869 67 dataset """kinships""" +869 67 model """transh""" +869 67 loss """bceaftersigmoid""" +869 67 regularizer """transh""" +869 67 optimizer """adam""" +869 67 training_loop """lcwa""" +869 67 evaluator """rankbased""" +869 68 dataset """kinships""" +869 68 model """transh""" +869 68 loss """bceaftersigmoid""" +869 68 regularizer """transh""" +869 68 optimizer """adam""" +869 68 training_loop """lcwa""" +869 68 evaluator """rankbased""" +869 69 dataset """kinships""" +869 69 model """transh""" +869 69 loss """bceaftersigmoid""" +869 69 regularizer """transh""" +869 69 optimizer """adam""" +869 69 training_loop """lcwa""" +869 69 evaluator """rankbased""" +869 70 dataset """kinships""" +869 70 model """transh""" +869 70 loss """bceaftersigmoid""" +869 70 regularizer """transh""" +869 70 optimizer """adam""" +869 70 training_loop """lcwa""" +869 70 evaluator """rankbased""" +869 71 dataset """kinships""" +869 71 model """transh""" +869 71 loss """bceaftersigmoid""" +869 71 regularizer """transh""" +869 71 optimizer """adam""" +869 71 training_loop """lcwa""" +869 71 evaluator """rankbased""" +869 72 dataset """kinships""" +869 72 model """transh""" +869 72 loss """bceaftersigmoid""" +869 72 regularizer """transh""" +869 72 optimizer """adam""" +869 72 training_loop """lcwa""" +869 72 evaluator """rankbased""" +869 73 dataset """kinships""" +869 73 model """transh""" +869 73 loss """bceaftersigmoid""" +869 73 regularizer """transh""" +869 73 optimizer """adam""" +869 73 training_loop """lcwa""" +869 73 evaluator """rankbased""" +869 74 dataset """kinships""" +869 74 model """transh""" +869 74 loss """bceaftersigmoid""" +869 74 regularizer """transh""" +869 74 optimizer """adam""" +869 74 training_loop """lcwa""" +869 74 evaluator """rankbased""" +869 75 dataset """kinships""" +869 75 model """transh""" +869 75 loss """bceaftersigmoid""" +869 75 regularizer """transh""" +869 75 optimizer """adam""" +869 75 training_loop """lcwa""" +869 75 evaluator """rankbased""" +869 76 dataset """kinships""" +869 76 model """transh""" +869 76 loss """bceaftersigmoid""" +869 76 regularizer """transh""" +869 76 optimizer """adam""" +869 76 training_loop """lcwa""" +869 76 evaluator """rankbased""" +869 77 dataset """kinships""" +869 77 model """transh""" +869 77 loss """bceaftersigmoid""" +869 77 regularizer """transh""" +869 77 optimizer """adam""" +869 77 training_loop """lcwa""" +869 77 evaluator """rankbased""" +869 78 dataset """kinships""" +869 78 model """transh""" +869 78 loss """bceaftersigmoid""" +869 78 regularizer """transh""" +869 78 optimizer """adam""" +869 78 training_loop """lcwa""" +869 78 evaluator """rankbased""" +869 79 dataset """kinships""" +869 79 model """transh""" +869 79 loss """bceaftersigmoid""" +869 79 regularizer """transh""" +869 79 optimizer """adam""" +869 79 training_loop """lcwa""" +869 79 evaluator """rankbased""" +869 80 dataset """kinships""" +869 80 model """transh""" +869 80 loss """bceaftersigmoid""" +869 80 regularizer """transh""" +869 80 optimizer """adam""" +869 80 training_loop """lcwa""" +869 80 evaluator """rankbased""" +869 81 dataset """kinships""" +869 81 model """transh""" +869 81 loss """bceaftersigmoid""" +869 81 regularizer """transh""" +869 81 optimizer """adam""" +869 81 training_loop """lcwa""" +869 81 evaluator """rankbased""" +869 82 dataset """kinships""" +869 82 model """transh""" +869 82 loss """bceaftersigmoid""" +869 82 regularizer """transh""" +869 82 optimizer """adam""" +869 82 training_loop """lcwa""" +869 82 evaluator """rankbased""" +869 83 dataset """kinships""" +869 83 model """transh""" +869 83 loss """bceaftersigmoid""" +869 83 regularizer """transh""" +869 83 optimizer """adam""" +869 83 training_loop """lcwa""" +869 83 evaluator """rankbased""" +869 84 dataset """kinships""" +869 84 model """transh""" +869 84 loss """bceaftersigmoid""" +869 84 regularizer """transh""" +869 84 optimizer """adam""" +869 84 training_loop """lcwa""" +869 84 evaluator """rankbased""" +869 85 dataset """kinships""" +869 85 model """transh""" +869 85 loss """bceaftersigmoid""" +869 85 regularizer """transh""" +869 85 optimizer """adam""" +869 85 training_loop """lcwa""" +869 85 evaluator """rankbased""" +869 86 dataset """kinships""" +869 86 model """transh""" +869 86 loss """bceaftersigmoid""" +869 86 regularizer """transh""" +869 86 optimizer """adam""" +869 86 training_loop """lcwa""" +869 86 evaluator """rankbased""" +869 87 dataset """kinships""" +869 87 model """transh""" +869 87 loss """bceaftersigmoid""" +869 87 regularizer """transh""" +869 87 optimizer """adam""" +869 87 training_loop """lcwa""" +869 87 evaluator """rankbased""" +869 88 dataset """kinships""" +869 88 model """transh""" +869 88 loss """bceaftersigmoid""" +869 88 regularizer """transh""" +869 88 optimizer """adam""" +869 88 training_loop """lcwa""" +869 88 evaluator """rankbased""" +869 89 dataset """kinships""" +869 89 model """transh""" +869 89 loss """bceaftersigmoid""" +869 89 regularizer """transh""" +869 89 optimizer """adam""" +869 89 training_loop """lcwa""" +869 89 evaluator """rankbased""" +869 90 dataset """kinships""" +869 90 model """transh""" +869 90 loss """bceaftersigmoid""" +869 90 regularizer """transh""" +869 90 optimizer """adam""" +869 90 training_loop """lcwa""" +869 90 evaluator """rankbased""" +869 91 dataset """kinships""" +869 91 model """transh""" +869 91 loss """bceaftersigmoid""" +869 91 regularizer """transh""" +869 91 optimizer """adam""" +869 91 training_loop """lcwa""" +869 91 evaluator """rankbased""" +869 92 dataset """kinships""" +869 92 model """transh""" +869 92 loss """bceaftersigmoid""" +869 92 regularizer """transh""" +869 92 optimizer """adam""" +869 92 training_loop """lcwa""" +869 92 evaluator """rankbased""" +869 93 dataset """kinships""" +869 93 model """transh""" +869 93 loss """bceaftersigmoid""" +869 93 regularizer """transh""" +869 93 optimizer """adam""" +869 93 training_loop """lcwa""" +869 93 evaluator """rankbased""" +869 94 dataset """kinships""" +869 94 model """transh""" +869 94 loss """bceaftersigmoid""" +869 94 regularizer """transh""" +869 94 optimizer """adam""" +869 94 training_loop """lcwa""" +869 94 evaluator """rankbased""" +869 95 dataset """kinships""" +869 95 model """transh""" +869 95 loss """bceaftersigmoid""" +869 95 regularizer """transh""" +869 95 optimizer """adam""" +869 95 training_loop """lcwa""" +869 95 evaluator """rankbased""" +869 96 dataset """kinships""" +869 96 model """transh""" +869 96 loss """bceaftersigmoid""" +869 96 regularizer """transh""" +869 96 optimizer """adam""" +869 96 training_loop """lcwa""" +869 96 evaluator """rankbased""" +869 97 dataset """kinships""" +869 97 model """transh""" +869 97 loss """bceaftersigmoid""" +869 97 regularizer """transh""" +869 97 optimizer """adam""" +869 97 training_loop """lcwa""" +869 97 evaluator """rankbased""" +869 98 dataset """kinships""" +869 98 model """transh""" +869 98 loss """bceaftersigmoid""" +869 98 regularizer """transh""" +869 98 optimizer """adam""" +869 98 training_loop """lcwa""" +869 98 evaluator """rankbased""" +869 99 dataset """kinships""" +869 99 model """transh""" +869 99 loss """bceaftersigmoid""" +869 99 regularizer """transh""" +869 99 optimizer """adam""" +869 99 training_loop """lcwa""" +869 99 evaluator """rankbased""" +869 100 dataset """kinships""" +869 100 model """transh""" +869 100 loss """bceaftersigmoid""" +869 100 regularizer """transh""" +869 100 optimizer """adam""" +869 100 training_loop """lcwa""" +869 100 evaluator """rankbased""" +870 1 model.embedding_dim 1.0 +870 1 model.scoring_fct_norm 2.0 +870 1 regularizer.weight 0.05776024571531923 +870 1 optimizer.lr 0.0116016748681091 +870 1 training.batch_size 0.0 +870 1 training.label_smoothing 0.09723171815091941 +870 2 model.embedding_dim 2.0 +870 2 model.scoring_fct_norm 1.0 +870 2 regularizer.weight 0.18601946208928705 +870 2 optimizer.lr 0.055157373768440605 +870 2 training.batch_size 2.0 +870 2 training.label_smoothing 0.012833350385359942 +870 3 model.embedding_dim 2.0 +870 3 model.scoring_fct_norm 1.0 +870 3 regularizer.weight 0.08170246325799996 +870 3 optimizer.lr 0.003872824530544975 +870 3 training.batch_size 2.0 +870 3 training.label_smoothing 0.0036622138761477074 +870 4 model.embedding_dim 2.0 +870 4 model.scoring_fct_norm 1.0 +870 4 regularizer.weight 0.031488613879415865 +870 4 optimizer.lr 0.010201616767873043 +870 4 training.batch_size 0.0 +870 4 training.label_smoothing 0.5681626221614596 +870 5 model.embedding_dim 1.0 +870 5 model.scoring_fct_norm 2.0 +870 5 regularizer.weight 0.049488268269143695 +870 5 optimizer.lr 0.05056622242323514 +870 5 training.batch_size 1.0 +870 5 training.label_smoothing 0.08220949100061523 +870 6 model.embedding_dim 0.0 +870 6 model.scoring_fct_norm 1.0 +870 6 regularizer.weight 0.14828492384020225 +870 6 optimizer.lr 0.033765390287060354 +870 6 training.batch_size 1.0 +870 6 training.label_smoothing 0.001145779757838288 +870 7 model.embedding_dim 0.0 +870 7 model.scoring_fct_norm 2.0 +870 7 regularizer.weight 0.077039517288836 +870 7 optimizer.lr 0.004825628549851357 +870 7 training.batch_size 2.0 +870 7 training.label_smoothing 0.8486012993113461 +870 8 model.embedding_dim 1.0 +870 8 model.scoring_fct_norm 2.0 +870 8 regularizer.weight 0.2586886524616011 +870 8 optimizer.lr 0.011470973470863259 +870 8 training.batch_size 0.0 +870 8 training.label_smoothing 0.18406821692629521 +870 9 model.embedding_dim 0.0 +870 9 model.scoring_fct_norm 2.0 +870 9 regularizer.weight 0.10618139195837141 +870 9 optimizer.lr 0.0018443750758227165 +870 9 training.batch_size 1.0 +870 9 training.label_smoothing 0.0027579314425322213 +870 10 model.embedding_dim 0.0 +870 10 model.scoring_fct_norm 2.0 +870 10 regularizer.weight 0.07932616862619568 +870 10 optimizer.lr 0.022519963956894696 +870 10 training.batch_size 0.0 +870 10 training.label_smoothing 0.007517801648211512 +870 11 model.embedding_dim 1.0 +870 11 model.scoring_fct_norm 1.0 +870 11 regularizer.weight 0.035022346950245704 +870 11 optimizer.lr 0.006211632813048 +870 11 training.batch_size 1.0 +870 11 training.label_smoothing 0.006722119549929908 +870 12 model.embedding_dim 2.0 +870 12 model.scoring_fct_norm 1.0 +870 12 regularizer.weight 0.04122201181752476 +870 12 optimizer.lr 0.007653539322683017 +870 12 training.batch_size 0.0 +870 12 training.label_smoothing 0.013891212973938355 +870 13 model.embedding_dim 2.0 +870 13 model.scoring_fct_norm 2.0 +870 13 regularizer.weight 0.013289055124574458 +870 13 optimizer.lr 0.022478722536329012 +870 13 training.batch_size 1.0 +870 13 training.label_smoothing 0.4994740311790532 +870 14 model.embedding_dim 0.0 +870 14 model.scoring_fct_norm 1.0 +870 14 regularizer.weight 0.2147777499835971 +870 14 optimizer.lr 0.017281421756865593 +870 14 training.batch_size 2.0 +870 14 training.label_smoothing 0.03737728810464742 +870 15 model.embedding_dim 1.0 +870 15 model.scoring_fct_norm 1.0 +870 15 regularizer.weight 0.12800745563661628 +870 15 optimizer.lr 0.016119256170912016 +870 15 training.batch_size 2.0 +870 15 training.label_smoothing 0.020275111231809043 +870 16 model.embedding_dim 0.0 +870 16 model.scoring_fct_norm 1.0 +870 16 regularizer.weight 0.10953217039820216 +870 16 optimizer.lr 0.006410861145980501 +870 16 training.batch_size 2.0 +870 16 training.label_smoothing 0.012371298434590762 +870 17 model.embedding_dim 2.0 +870 17 model.scoring_fct_norm 1.0 +870 17 regularizer.weight 0.07158124391907171 +870 17 optimizer.lr 0.056539382570986674 +870 17 training.batch_size 1.0 +870 17 training.label_smoothing 0.001860639302466041 +870 18 model.embedding_dim 0.0 +870 18 model.scoring_fct_norm 1.0 +870 18 regularizer.weight 0.29620794236502945 +870 18 optimizer.lr 0.008347279878582858 +870 18 training.batch_size 1.0 +870 18 training.label_smoothing 0.009824950361280234 +870 19 model.embedding_dim 1.0 +870 19 model.scoring_fct_norm 2.0 +870 19 regularizer.weight 0.011657749270708135 +870 19 optimizer.lr 0.01909609408221272 +870 19 training.batch_size 1.0 +870 19 training.label_smoothing 0.37410662961426133 +870 20 model.embedding_dim 1.0 +870 20 model.scoring_fct_norm 2.0 +870 20 regularizer.weight 0.134864819124688 +870 20 optimizer.lr 0.008951846381652698 +870 20 training.batch_size 2.0 +870 20 training.label_smoothing 0.011562844316280577 +870 21 model.embedding_dim 0.0 +870 21 model.scoring_fct_norm 2.0 +870 21 regularizer.weight 0.10284508470395128 +870 21 optimizer.lr 0.003979467706552332 +870 21 training.batch_size 0.0 +870 21 training.label_smoothing 0.05065923316796261 +870 22 model.embedding_dim 1.0 +870 22 model.scoring_fct_norm 1.0 +870 22 regularizer.weight 0.012052105482716366 +870 22 optimizer.lr 0.009634385289941955 +870 22 training.batch_size 0.0 +870 22 training.label_smoothing 0.1709392820296856 +870 23 model.embedding_dim 2.0 +870 23 model.scoring_fct_norm 2.0 +870 23 regularizer.weight 0.14401187599342719 +870 23 optimizer.lr 0.012993302686551793 +870 23 training.batch_size 0.0 +870 23 training.label_smoothing 0.0029311354552275415 +870 24 model.embedding_dim 0.0 +870 24 model.scoring_fct_norm 2.0 +870 24 regularizer.weight 0.23540505449756796 +870 24 optimizer.lr 0.021586304320621644 +870 24 training.batch_size 0.0 +870 24 training.label_smoothing 0.0020782585921068726 +870 25 model.embedding_dim 1.0 +870 25 model.scoring_fct_norm 2.0 +870 25 regularizer.weight 0.03686932620000836 +870 25 optimizer.lr 0.002667109011411907 +870 25 training.batch_size 1.0 +870 25 training.label_smoothing 0.0030547366465194722 +870 26 model.embedding_dim 0.0 +870 26 model.scoring_fct_norm 2.0 +870 26 regularizer.weight 0.21466408204750437 +870 26 optimizer.lr 0.06877084814135595 +870 26 training.batch_size 0.0 +870 26 training.label_smoothing 0.005934492984431996 +870 27 model.embedding_dim 2.0 +870 27 model.scoring_fct_norm 1.0 +870 27 regularizer.weight 0.014913841516436013 +870 27 optimizer.lr 0.008624019517775552 +870 27 training.batch_size 0.0 +870 27 training.label_smoothing 0.019452600883760746 +870 28 model.embedding_dim 0.0 +870 28 model.scoring_fct_norm 2.0 +870 28 regularizer.weight 0.0422409933610137 +870 28 optimizer.lr 0.0010102926998239138 +870 28 training.batch_size 2.0 +870 28 training.label_smoothing 0.2767042039926026 +870 29 model.embedding_dim 2.0 +870 29 model.scoring_fct_norm 1.0 +870 29 regularizer.weight 0.20986887979029048 +870 29 optimizer.lr 0.05278989201821863 +870 29 training.batch_size 1.0 +870 29 training.label_smoothing 0.0026320829802215884 +870 30 model.embedding_dim 1.0 +870 30 model.scoring_fct_norm 1.0 +870 30 regularizer.weight 0.04581750370239397 +870 30 optimizer.lr 0.0013915138206595625 +870 30 training.batch_size 1.0 +870 30 training.label_smoothing 0.022293482306100995 +870 31 model.embedding_dim 1.0 +870 31 model.scoring_fct_norm 1.0 +870 31 regularizer.weight 0.1477081613618958 +870 31 optimizer.lr 0.0014454520495830253 +870 31 training.batch_size 0.0 +870 31 training.label_smoothing 0.2693210661457458 +870 32 model.embedding_dim 1.0 +870 32 model.scoring_fct_norm 1.0 +870 32 regularizer.weight 0.08626569030434907 +870 32 optimizer.lr 0.002835186119458101 +870 32 training.batch_size 2.0 +870 32 training.label_smoothing 0.03535419054623542 +870 33 model.embedding_dim 1.0 +870 33 model.scoring_fct_norm 2.0 +870 33 regularizer.weight 0.13448434837413464 +870 33 optimizer.lr 0.0011463447195408471 +870 33 training.batch_size 0.0 +870 33 training.label_smoothing 0.05194278713224721 +870 34 model.embedding_dim 0.0 +870 34 model.scoring_fct_norm 2.0 +870 34 regularizer.weight 0.015397022629474063 +870 34 optimizer.lr 0.001267958174473071 +870 34 training.batch_size 0.0 +870 34 training.label_smoothing 0.015014039155350638 +870 35 model.embedding_dim 1.0 +870 35 model.scoring_fct_norm 2.0 +870 35 regularizer.weight 0.18697725600830226 +870 35 optimizer.lr 0.008607564868012571 +870 35 training.batch_size 2.0 +870 35 training.label_smoothing 0.7955665747532675 +870 36 model.embedding_dim 0.0 +870 36 model.scoring_fct_norm 1.0 +870 36 regularizer.weight 0.017612414991158086 +870 36 optimizer.lr 0.0028480083176802046 +870 36 training.batch_size 0.0 +870 36 training.label_smoothing 0.033302170993846546 +870 37 model.embedding_dim 2.0 +870 37 model.scoring_fct_norm 1.0 +870 37 regularizer.weight 0.29433637556910175 +870 37 optimizer.lr 0.020899518205728246 +870 37 training.batch_size 0.0 +870 37 training.label_smoothing 0.30563765385634434 +870 38 model.embedding_dim 0.0 +870 38 model.scoring_fct_norm 1.0 +870 38 regularizer.weight 0.022650172602387927 +870 38 optimizer.lr 0.015627567702950956 +870 38 training.batch_size 0.0 +870 38 training.label_smoothing 0.1790350260121234 +870 39 model.embedding_dim 2.0 +870 39 model.scoring_fct_norm 2.0 +870 39 regularizer.weight 0.034690469127055375 +870 39 optimizer.lr 0.00898248831344492 +870 39 training.batch_size 1.0 +870 39 training.label_smoothing 0.03438980337786791 +870 40 model.embedding_dim 2.0 +870 40 model.scoring_fct_norm 1.0 +870 40 regularizer.weight 0.0331693156653709 +870 40 optimizer.lr 0.027705656340942957 +870 40 training.batch_size 1.0 +870 40 training.label_smoothing 0.0022468167881986014 +870 41 model.embedding_dim 1.0 +870 41 model.scoring_fct_norm 2.0 +870 41 regularizer.weight 0.08469044347861054 +870 41 optimizer.lr 0.05672119224366417 +870 41 training.batch_size 1.0 +870 41 training.label_smoothing 0.0034066379855117726 +870 42 model.embedding_dim 2.0 +870 42 model.scoring_fct_norm 2.0 +870 42 regularizer.weight 0.015683892608034787 +870 42 optimizer.lr 0.04015218182842482 +870 42 training.batch_size 2.0 +870 42 training.label_smoothing 0.021068983954254314 +870 43 model.embedding_dim 2.0 +870 43 model.scoring_fct_norm 2.0 +870 43 regularizer.weight 0.01015466316773271 +870 43 optimizer.lr 0.026786741431078474 +870 43 training.batch_size 1.0 +870 43 training.label_smoothing 0.8691138227828915 +870 44 model.embedding_dim 0.0 +870 44 model.scoring_fct_norm 1.0 +870 44 regularizer.weight 0.2386810397371081 +870 44 optimizer.lr 0.014772052521789704 +870 44 training.batch_size 1.0 +870 44 training.label_smoothing 0.5629001833737984 +870 45 model.embedding_dim 1.0 +870 45 model.scoring_fct_norm 1.0 +870 45 regularizer.weight 0.018059451293416563 +870 45 optimizer.lr 0.004061780930172766 +870 45 training.batch_size 0.0 +870 45 training.label_smoothing 0.002554893266186541 +870 46 model.embedding_dim 0.0 +870 46 model.scoring_fct_norm 2.0 +870 46 regularizer.weight 0.11399999805814873 +870 46 optimizer.lr 0.003054254326773253 +870 46 training.batch_size 1.0 +870 46 training.label_smoothing 0.0032151786746055125 +870 47 model.embedding_dim 2.0 +870 47 model.scoring_fct_norm 2.0 +870 47 regularizer.weight 0.02556812337340723 +870 47 optimizer.lr 0.013622520040731613 +870 47 training.batch_size 1.0 +870 47 training.label_smoothing 0.0017425235301880549 +870 48 model.embedding_dim 0.0 +870 48 model.scoring_fct_norm 2.0 +870 48 regularizer.weight 0.01098197624612428 +870 48 optimizer.lr 0.005926859327916296 +870 48 training.batch_size 2.0 +870 48 training.label_smoothing 0.03566879111934071 +870 49 model.embedding_dim 1.0 +870 49 model.scoring_fct_norm 2.0 +870 49 regularizer.weight 0.014448934773569573 +870 49 optimizer.lr 0.02815321893461827 +870 49 training.batch_size 1.0 +870 49 training.label_smoothing 0.09247942189995478 +870 50 model.embedding_dim 0.0 +870 50 model.scoring_fct_norm 1.0 +870 50 regularizer.weight 0.08610134582469123 +870 50 optimizer.lr 0.024863458462840732 +870 50 training.batch_size 2.0 +870 50 training.label_smoothing 0.04255515229804417 +870 51 model.embedding_dim 0.0 +870 51 model.scoring_fct_norm 2.0 +870 51 regularizer.weight 0.289737229245163 +870 51 optimizer.lr 0.007516738613509853 +870 51 training.batch_size 2.0 +870 51 training.label_smoothing 0.07006317468824855 +870 52 model.embedding_dim 1.0 +870 52 model.scoring_fct_norm 2.0 +870 52 regularizer.weight 0.18670183598549966 +870 52 optimizer.lr 0.01706445244551904 +870 52 training.batch_size 1.0 +870 52 training.label_smoothing 0.025622037860385135 +870 53 model.embedding_dim 2.0 +870 53 model.scoring_fct_norm 2.0 +870 53 regularizer.weight 0.2093859338761838 +870 53 optimizer.lr 0.059942902742187845 +870 53 training.batch_size 1.0 +870 53 training.label_smoothing 0.29634433828776074 +870 54 model.embedding_dim 2.0 +870 54 model.scoring_fct_norm 1.0 +870 54 regularizer.weight 0.2962471822644133 +870 54 optimizer.lr 0.006784464584805825 +870 54 training.batch_size 1.0 +870 54 training.label_smoothing 0.09257496390385138 +870 55 model.embedding_dim 1.0 +870 55 model.scoring_fct_norm 2.0 +870 55 regularizer.weight 0.2763210415604853 +870 55 optimizer.lr 0.011276116940307822 +870 55 training.batch_size 2.0 +870 55 training.label_smoothing 0.0632296607183567 +870 56 model.embedding_dim 1.0 +870 56 model.scoring_fct_norm 2.0 +870 56 regularizer.weight 0.10412039223045562 +870 56 optimizer.lr 0.008408154403425174 +870 56 training.batch_size 2.0 +870 56 training.label_smoothing 0.2682181901312064 +870 57 model.embedding_dim 1.0 +870 57 model.scoring_fct_norm 1.0 +870 57 regularizer.weight 0.026232881420352402 +870 57 optimizer.lr 0.001948831245961641 +870 57 training.batch_size 2.0 +870 57 training.label_smoothing 0.013436529303387332 +870 58 model.embedding_dim 1.0 +870 58 model.scoring_fct_norm 1.0 +870 58 regularizer.weight 0.17971940141973022 +870 58 optimizer.lr 0.013206140829676657 +870 58 training.batch_size 1.0 +870 58 training.label_smoothing 0.003707811302366383 +870 59 model.embedding_dim 0.0 +870 59 model.scoring_fct_norm 1.0 +870 59 regularizer.weight 0.12224791493227832 +870 59 optimizer.lr 0.0029035020794562576 +870 59 training.batch_size 2.0 +870 59 training.label_smoothing 0.22125664102252163 +870 60 model.embedding_dim 0.0 +870 60 model.scoring_fct_norm 2.0 +870 60 regularizer.weight 0.21934231774432708 +870 60 optimizer.lr 0.014632141341761062 +870 60 training.batch_size 2.0 +870 60 training.label_smoothing 0.9892925845787316 +870 61 model.embedding_dim 2.0 +870 61 model.scoring_fct_norm 1.0 +870 61 regularizer.weight 0.013978813881392915 +870 61 optimizer.lr 0.004512723738397374 +870 61 training.batch_size 2.0 +870 61 training.label_smoothing 0.0023836196685694034 +870 62 model.embedding_dim 0.0 +870 62 model.scoring_fct_norm 2.0 +870 62 regularizer.weight 0.09574405155044388 +870 62 optimizer.lr 0.0014925689256261017 +870 62 training.batch_size 1.0 +870 62 training.label_smoothing 0.023989022903029417 +870 63 model.embedding_dim 2.0 +870 63 model.scoring_fct_norm 1.0 +870 63 regularizer.weight 0.12707295899837479 +870 63 optimizer.lr 0.09228364268511315 +870 63 training.batch_size 0.0 +870 63 training.label_smoothing 0.7487007695632601 +870 64 model.embedding_dim 2.0 +870 64 model.scoring_fct_norm 1.0 +870 64 regularizer.weight 0.04250786169208991 +870 64 optimizer.lr 0.00462105943307677 +870 64 training.batch_size 1.0 +870 64 training.label_smoothing 0.20859776500887728 +870 65 model.embedding_dim 0.0 +870 65 model.scoring_fct_norm 2.0 +870 65 regularizer.weight 0.016186319468890875 +870 65 optimizer.lr 0.02262289128341087 +870 65 training.batch_size 0.0 +870 65 training.label_smoothing 0.014374165142757335 +870 66 model.embedding_dim 1.0 +870 66 model.scoring_fct_norm 2.0 +870 66 regularizer.weight 0.0231455798400843 +870 66 optimizer.lr 0.013207303187764993 +870 66 training.batch_size 2.0 +870 66 training.label_smoothing 0.3133589127669772 +870 67 model.embedding_dim 1.0 +870 67 model.scoring_fct_norm 1.0 +870 67 regularizer.weight 0.027427190556942215 +870 67 optimizer.lr 0.015263350022207365 +870 67 training.batch_size 1.0 +870 67 training.label_smoothing 0.002375019889255229 +870 68 model.embedding_dim 1.0 +870 68 model.scoring_fct_norm 2.0 +870 68 regularizer.weight 0.0808551849309846 +870 68 optimizer.lr 0.004375746709246675 +870 68 training.batch_size 1.0 +870 68 training.label_smoothing 0.006648570803824885 +870 69 model.embedding_dim 2.0 +870 69 model.scoring_fct_norm 2.0 +870 69 regularizer.weight 0.09581805816356474 +870 69 optimizer.lr 0.007981865837654674 +870 69 training.batch_size 0.0 +870 69 training.label_smoothing 0.15310435611417406 +870 70 model.embedding_dim 0.0 +870 70 model.scoring_fct_norm 1.0 +870 70 regularizer.weight 0.013295989629695654 +870 70 optimizer.lr 0.026711708147207903 +870 70 training.batch_size 0.0 +870 70 training.label_smoothing 0.9701193836712341 +870 71 model.embedding_dim 0.0 +870 71 model.scoring_fct_norm 2.0 +870 71 regularizer.weight 0.1462222920578022 +870 71 optimizer.lr 0.04670619858917314 +870 71 training.batch_size 2.0 +870 71 training.label_smoothing 0.7725065850567455 +870 72 model.embedding_dim 1.0 +870 72 model.scoring_fct_norm 1.0 +870 72 regularizer.weight 0.02348609711800086 +870 72 optimizer.lr 0.0043802656951123925 +870 72 training.batch_size 0.0 +870 72 training.label_smoothing 0.021346108695693578 +870 73 model.embedding_dim 1.0 +870 73 model.scoring_fct_norm 2.0 +870 73 regularizer.weight 0.05535297374946715 +870 73 optimizer.lr 0.0013673864496472957 +870 73 training.batch_size 1.0 +870 73 training.label_smoothing 0.22771057466196964 +870 74 model.embedding_dim 1.0 +870 74 model.scoring_fct_norm 2.0 +870 74 regularizer.weight 0.16288921827223085 +870 74 optimizer.lr 0.0180299314163422 +870 74 training.batch_size 0.0 +870 74 training.label_smoothing 0.006044436824479539 +870 75 model.embedding_dim 0.0 +870 75 model.scoring_fct_norm 2.0 +870 75 regularizer.weight 0.02115444910348725 +870 75 optimizer.lr 0.0029922161983146427 +870 75 training.batch_size 1.0 +870 75 training.label_smoothing 0.23744993628481476 +870 76 model.embedding_dim 1.0 +870 76 model.scoring_fct_norm 2.0 +870 76 regularizer.weight 0.04156101100376721 +870 76 optimizer.lr 0.04440336375058405 +870 76 training.batch_size 1.0 +870 76 training.label_smoothing 0.28499064620695275 +870 77 model.embedding_dim 1.0 +870 77 model.scoring_fct_norm 2.0 +870 77 regularizer.weight 0.012039267683676668 +870 77 optimizer.lr 0.01580206428839737 +870 77 training.batch_size 2.0 +870 77 training.label_smoothing 0.4463474720850712 +870 78 model.embedding_dim 0.0 +870 78 model.scoring_fct_norm 2.0 +870 78 regularizer.weight 0.1565597539177371 +870 78 optimizer.lr 0.07862414894912818 +870 78 training.batch_size 2.0 +870 78 training.label_smoothing 0.010106209336025091 +870 79 model.embedding_dim 2.0 +870 79 model.scoring_fct_norm 2.0 +870 79 regularizer.weight 0.026978105344581154 +870 79 optimizer.lr 0.02532688314994727 +870 79 training.batch_size 0.0 +870 79 training.label_smoothing 0.0640756119933128 +870 80 model.embedding_dim 2.0 +870 80 model.scoring_fct_norm 2.0 +870 80 regularizer.weight 0.0461204561474983 +870 80 optimizer.lr 0.0013039342322414041 +870 80 training.batch_size 1.0 +870 80 training.label_smoothing 0.05216604597092909 +870 81 model.embedding_dim 2.0 +870 81 model.scoring_fct_norm 1.0 +870 81 regularizer.weight 0.11201597180014292 +870 81 optimizer.lr 0.012289962056139574 +870 81 training.batch_size 2.0 +870 81 training.label_smoothing 0.19544203670632937 +870 82 model.embedding_dim 0.0 +870 82 model.scoring_fct_norm 2.0 +870 82 regularizer.weight 0.02890838719592968 +870 82 optimizer.lr 0.0012070454766274611 +870 82 training.batch_size 0.0 +870 82 training.label_smoothing 0.29611980890087175 +870 83 model.embedding_dim 2.0 +870 83 model.scoring_fct_norm 1.0 +870 83 regularizer.weight 0.027387372290318117 +870 83 optimizer.lr 0.00791629642934775 +870 83 training.batch_size 2.0 +870 83 training.label_smoothing 0.0034417443941235104 +870 84 model.embedding_dim 0.0 +870 84 model.scoring_fct_norm 1.0 +870 84 regularizer.weight 0.015442598021239583 +870 84 optimizer.lr 0.00536181270878674 +870 84 training.batch_size 2.0 +870 84 training.label_smoothing 0.017137837763056097 +870 85 model.embedding_dim 2.0 +870 85 model.scoring_fct_norm 1.0 +870 85 regularizer.weight 0.044635191089464454 +870 85 optimizer.lr 0.011046384659058177 +870 85 training.batch_size 2.0 +870 85 training.label_smoothing 0.13852779293619616 +870 86 model.embedding_dim 2.0 +870 86 model.scoring_fct_norm 2.0 +870 86 regularizer.weight 0.29673878029289136 +870 86 optimizer.lr 0.0016343733069625844 +870 86 training.batch_size 1.0 +870 86 training.label_smoothing 0.005986687144113247 +870 87 model.embedding_dim 2.0 +870 87 model.scoring_fct_norm 2.0 +870 87 regularizer.weight 0.18210683621017149 +870 87 optimizer.lr 0.0755159782230528 +870 87 training.batch_size 1.0 +870 87 training.label_smoothing 0.0930464987636538 +870 88 model.embedding_dim 2.0 +870 88 model.scoring_fct_norm 1.0 +870 88 regularizer.weight 0.020322395658256442 +870 88 optimizer.lr 0.009223383891450946 +870 88 training.batch_size 1.0 +870 88 training.label_smoothing 0.28824317868432175 +870 89 model.embedding_dim 2.0 +870 89 model.scoring_fct_norm 2.0 +870 89 regularizer.weight 0.2527311967750676 +870 89 optimizer.lr 0.010186348548463113 +870 89 training.batch_size 1.0 +870 89 training.label_smoothing 0.00762359678047422 +870 90 model.embedding_dim 1.0 +870 90 model.scoring_fct_norm 2.0 +870 90 regularizer.weight 0.16205067611128696 +870 90 optimizer.lr 0.0026364433448803305 +870 90 training.batch_size 1.0 +870 90 training.label_smoothing 0.0036073450692185156 +870 91 model.embedding_dim 1.0 +870 91 model.scoring_fct_norm 1.0 +870 91 regularizer.weight 0.27638240209237874 +870 91 optimizer.lr 0.044523183468459064 +870 91 training.batch_size 1.0 +870 91 training.label_smoothing 0.01920828035860692 +870 92 model.embedding_dim 1.0 +870 92 model.scoring_fct_norm 2.0 +870 92 regularizer.weight 0.010871488791608718 +870 92 optimizer.lr 0.08774822756107793 +870 92 training.batch_size 2.0 +870 92 training.label_smoothing 0.19151257002503463 +870 93 model.embedding_dim 0.0 +870 93 model.scoring_fct_norm 1.0 +870 93 regularizer.weight 0.013119193655998866 +870 93 optimizer.lr 0.0011515363399072528 +870 93 training.batch_size 1.0 +870 93 training.label_smoothing 0.0013363720589554618 +870 94 model.embedding_dim 2.0 +870 94 model.scoring_fct_norm 2.0 +870 94 regularizer.weight 0.011705578134829376 +870 94 optimizer.lr 0.02099530129870855 +870 94 training.batch_size 2.0 +870 94 training.label_smoothing 0.026638983695825853 +870 95 model.embedding_dim 2.0 +870 95 model.scoring_fct_norm 1.0 +870 95 regularizer.weight 0.14706306263927227 +870 95 optimizer.lr 0.002414282307715348 +870 95 training.batch_size 2.0 +870 95 training.label_smoothing 0.6195909763313502 +870 96 model.embedding_dim 0.0 +870 96 model.scoring_fct_norm 1.0 +870 96 regularizer.weight 0.0910806792125442 +870 96 optimizer.lr 0.045423421618751625 +870 96 training.batch_size 0.0 +870 96 training.label_smoothing 0.0014631328061473093 +870 97 model.embedding_dim 1.0 +870 97 model.scoring_fct_norm 2.0 +870 97 regularizer.weight 0.05065361249786991 +870 97 optimizer.lr 0.0251984760787153 +870 97 training.batch_size 0.0 +870 97 training.label_smoothing 0.5538165919603136 +870 98 model.embedding_dim 1.0 +870 98 model.scoring_fct_norm 1.0 +870 98 regularizer.weight 0.19695754483417582 +870 98 optimizer.lr 0.002765884146129443 +870 98 training.batch_size 2.0 +870 98 training.label_smoothing 0.3677465992513597 +870 99 model.embedding_dim 1.0 +870 99 model.scoring_fct_norm 2.0 +870 99 regularizer.weight 0.0883705666624189 +870 99 optimizer.lr 0.009221709750037499 +870 99 training.batch_size 0.0 +870 99 training.label_smoothing 0.5003437618461561 +870 100 model.embedding_dim 0.0 +870 100 model.scoring_fct_norm 2.0 +870 100 regularizer.weight 0.20783118425660915 +870 100 optimizer.lr 0.026156664136332018 +870 100 training.batch_size 0.0 +870 100 training.label_smoothing 0.002614192905736802 +870 1 dataset """kinships""" +870 1 model """transh""" +870 1 loss """softplus""" +870 1 regularizer """transh""" +870 1 optimizer """adam""" +870 1 training_loop """lcwa""" +870 1 evaluator """rankbased""" +870 2 dataset """kinships""" +870 2 model """transh""" +870 2 loss """softplus""" +870 2 regularizer """transh""" +870 2 optimizer """adam""" +870 2 training_loop """lcwa""" +870 2 evaluator """rankbased""" +870 3 dataset """kinships""" +870 3 model """transh""" +870 3 loss """softplus""" +870 3 regularizer """transh""" +870 3 optimizer """adam""" +870 3 training_loop """lcwa""" +870 3 evaluator """rankbased""" +870 4 dataset """kinships""" +870 4 model """transh""" +870 4 loss """softplus""" +870 4 regularizer """transh""" +870 4 optimizer """adam""" +870 4 training_loop """lcwa""" +870 4 evaluator """rankbased""" +870 5 dataset """kinships""" +870 5 model """transh""" +870 5 loss """softplus""" +870 5 regularizer """transh""" +870 5 optimizer """adam""" +870 5 training_loop """lcwa""" +870 5 evaluator """rankbased""" +870 6 dataset """kinships""" +870 6 model """transh""" +870 6 loss """softplus""" +870 6 regularizer """transh""" +870 6 optimizer """adam""" +870 6 training_loop """lcwa""" +870 6 evaluator """rankbased""" +870 7 dataset """kinships""" +870 7 model """transh""" +870 7 loss """softplus""" +870 7 regularizer """transh""" +870 7 optimizer """adam""" +870 7 training_loop """lcwa""" +870 7 evaluator """rankbased""" +870 8 dataset """kinships""" +870 8 model """transh""" +870 8 loss """softplus""" +870 8 regularizer """transh""" +870 8 optimizer """adam""" +870 8 training_loop """lcwa""" +870 8 evaluator """rankbased""" +870 9 dataset """kinships""" +870 9 model """transh""" +870 9 loss """softplus""" +870 9 regularizer """transh""" +870 9 optimizer """adam""" +870 9 training_loop """lcwa""" +870 9 evaluator """rankbased""" +870 10 dataset """kinships""" +870 10 model """transh""" +870 10 loss """softplus""" +870 10 regularizer """transh""" +870 10 optimizer """adam""" +870 10 training_loop """lcwa""" +870 10 evaluator """rankbased""" +870 11 dataset """kinships""" +870 11 model """transh""" +870 11 loss """softplus""" +870 11 regularizer """transh""" +870 11 optimizer """adam""" +870 11 training_loop """lcwa""" +870 11 evaluator """rankbased""" +870 12 dataset """kinships""" +870 12 model """transh""" +870 12 loss """softplus""" +870 12 regularizer """transh""" +870 12 optimizer """adam""" +870 12 training_loop """lcwa""" +870 12 evaluator """rankbased""" +870 13 dataset """kinships""" +870 13 model """transh""" +870 13 loss """softplus""" +870 13 regularizer """transh""" +870 13 optimizer """adam""" +870 13 training_loop """lcwa""" +870 13 evaluator """rankbased""" +870 14 dataset """kinships""" +870 14 model """transh""" +870 14 loss """softplus""" +870 14 regularizer """transh""" +870 14 optimizer """adam""" +870 14 training_loop """lcwa""" +870 14 evaluator """rankbased""" +870 15 dataset """kinships""" +870 15 model """transh""" +870 15 loss """softplus""" +870 15 regularizer """transh""" +870 15 optimizer """adam""" +870 15 training_loop """lcwa""" +870 15 evaluator """rankbased""" +870 16 dataset """kinships""" +870 16 model """transh""" +870 16 loss """softplus""" +870 16 regularizer """transh""" +870 16 optimizer """adam""" +870 16 training_loop """lcwa""" +870 16 evaluator """rankbased""" +870 17 dataset """kinships""" +870 17 model """transh""" +870 17 loss """softplus""" +870 17 regularizer """transh""" +870 17 optimizer """adam""" +870 17 training_loop """lcwa""" +870 17 evaluator """rankbased""" +870 18 dataset """kinships""" +870 18 model """transh""" +870 18 loss """softplus""" +870 18 regularizer """transh""" +870 18 optimizer """adam""" +870 18 training_loop """lcwa""" +870 18 evaluator """rankbased""" +870 19 dataset """kinships""" +870 19 model """transh""" +870 19 loss """softplus""" +870 19 regularizer """transh""" +870 19 optimizer """adam""" +870 19 training_loop """lcwa""" +870 19 evaluator """rankbased""" +870 20 dataset """kinships""" +870 20 model """transh""" +870 20 loss """softplus""" +870 20 regularizer """transh""" +870 20 optimizer """adam""" +870 20 training_loop """lcwa""" +870 20 evaluator """rankbased""" +870 21 dataset """kinships""" +870 21 model """transh""" +870 21 loss """softplus""" +870 21 regularizer """transh""" +870 21 optimizer """adam""" +870 21 training_loop """lcwa""" +870 21 evaluator """rankbased""" +870 22 dataset """kinships""" +870 22 model """transh""" +870 22 loss """softplus""" +870 22 regularizer """transh""" +870 22 optimizer """adam""" +870 22 training_loop """lcwa""" +870 22 evaluator """rankbased""" +870 23 dataset """kinships""" +870 23 model """transh""" +870 23 loss """softplus""" +870 23 regularizer """transh""" +870 23 optimizer """adam""" +870 23 training_loop """lcwa""" +870 23 evaluator """rankbased""" +870 24 dataset """kinships""" +870 24 model """transh""" +870 24 loss """softplus""" +870 24 regularizer """transh""" +870 24 optimizer """adam""" +870 24 training_loop """lcwa""" +870 24 evaluator """rankbased""" +870 25 dataset """kinships""" +870 25 model """transh""" +870 25 loss """softplus""" +870 25 regularizer """transh""" +870 25 optimizer """adam""" +870 25 training_loop """lcwa""" +870 25 evaluator """rankbased""" +870 26 dataset """kinships""" +870 26 model """transh""" +870 26 loss """softplus""" +870 26 regularizer """transh""" +870 26 optimizer """adam""" +870 26 training_loop """lcwa""" +870 26 evaluator """rankbased""" +870 27 dataset """kinships""" +870 27 model """transh""" +870 27 loss """softplus""" +870 27 regularizer """transh""" +870 27 optimizer """adam""" +870 27 training_loop """lcwa""" +870 27 evaluator """rankbased""" +870 28 dataset """kinships""" +870 28 model """transh""" +870 28 loss """softplus""" +870 28 regularizer """transh""" +870 28 optimizer """adam""" +870 28 training_loop """lcwa""" +870 28 evaluator """rankbased""" +870 29 dataset """kinships""" +870 29 model """transh""" +870 29 loss """softplus""" +870 29 regularizer """transh""" +870 29 optimizer """adam""" +870 29 training_loop """lcwa""" +870 29 evaluator """rankbased""" +870 30 dataset """kinships""" +870 30 model """transh""" +870 30 loss """softplus""" +870 30 regularizer """transh""" +870 30 optimizer """adam""" +870 30 training_loop """lcwa""" +870 30 evaluator """rankbased""" +870 31 dataset """kinships""" +870 31 model """transh""" +870 31 loss """softplus""" +870 31 regularizer """transh""" +870 31 optimizer """adam""" +870 31 training_loop """lcwa""" +870 31 evaluator """rankbased""" +870 32 dataset """kinships""" +870 32 model """transh""" +870 32 loss """softplus""" +870 32 regularizer """transh""" +870 32 optimizer """adam""" +870 32 training_loop """lcwa""" +870 32 evaluator """rankbased""" +870 33 dataset """kinships""" +870 33 model """transh""" +870 33 loss """softplus""" +870 33 regularizer """transh""" +870 33 optimizer """adam""" +870 33 training_loop """lcwa""" +870 33 evaluator """rankbased""" +870 34 dataset """kinships""" +870 34 model """transh""" +870 34 loss """softplus""" +870 34 regularizer """transh""" +870 34 optimizer """adam""" +870 34 training_loop """lcwa""" +870 34 evaluator """rankbased""" +870 35 dataset """kinships""" +870 35 model """transh""" +870 35 loss """softplus""" +870 35 regularizer """transh""" +870 35 optimizer """adam""" +870 35 training_loop """lcwa""" +870 35 evaluator """rankbased""" +870 36 dataset """kinships""" +870 36 model """transh""" +870 36 loss """softplus""" +870 36 regularizer """transh""" +870 36 optimizer """adam""" +870 36 training_loop """lcwa""" +870 36 evaluator """rankbased""" +870 37 dataset """kinships""" +870 37 model """transh""" +870 37 loss """softplus""" +870 37 regularizer """transh""" +870 37 optimizer """adam""" +870 37 training_loop """lcwa""" +870 37 evaluator """rankbased""" +870 38 dataset """kinships""" +870 38 model """transh""" +870 38 loss """softplus""" +870 38 regularizer """transh""" +870 38 optimizer """adam""" +870 38 training_loop """lcwa""" +870 38 evaluator """rankbased""" +870 39 dataset """kinships""" +870 39 model """transh""" +870 39 loss """softplus""" +870 39 regularizer """transh""" +870 39 optimizer """adam""" +870 39 training_loop """lcwa""" +870 39 evaluator """rankbased""" +870 40 dataset """kinships""" +870 40 model """transh""" +870 40 loss """softplus""" +870 40 regularizer """transh""" +870 40 optimizer """adam""" +870 40 training_loop """lcwa""" +870 40 evaluator """rankbased""" +870 41 dataset """kinships""" +870 41 model """transh""" +870 41 loss """softplus""" +870 41 regularizer """transh""" +870 41 optimizer """adam""" +870 41 training_loop """lcwa""" +870 41 evaluator """rankbased""" +870 42 dataset """kinships""" +870 42 model """transh""" +870 42 loss """softplus""" +870 42 regularizer """transh""" +870 42 optimizer """adam""" +870 42 training_loop """lcwa""" +870 42 evaluator """rankbased""" +870 43 dataset """kinships""" +870 43 model """transh""" +870 43 loss """softplus""" +870 43 regularizer """transh""" +870 43 optimizer """adam""" +870 43 training_loop """lcwa""" +870 43 evaluator """rankbased""" +870 44 dataset """kinships""" +870 44 model """transh""" +870 44 loss """softplus""" +870 44 regularizer """transh""" +870 44 optimizer """adam""" +870 44 training_loop """lcwa""" +870 44 evaluator """rankbased""" +870 45 dataset """kinships""" +870 45 model """transh""" +870 45 loss """softplus""" +870 45 regularizer """transh""" +870 45 optimizer """adam""" +870 45 training_loop """lcwa""" +870 45 evaluator """rankbased""" +870 46 dataset """kinships""" +870 46 model """transh""" +870 46 loss """softplus""" +870 46 regularizer """transh""" +870 46 optimizer """adam""" +870 46 training_loop """lcwa""" +870 46 evaluator """rankbased""" +870 47 dataset """kinships""" +870 47 model """transh""" +870 47 loss """softplus""" +870 47 regularizer """transh""" +870 47 optimizer """adam""" +870 47 training_loop """lcwa""" +870 47 evaluator """rankbased""" +870 48 dataset """kinships""" +870 48 model """transh""" +870 48 loss """softplus""" +870 48 regularizer """transh""" +870 48 optimizer """adam""" +870 48 training_loop """lcwa""" +870 48 evaluator """rankbased""" +870 49 dataset """kinships""" +870 49 model """transh""" +870 49 loss """softplus""" +870 49 regularizer """transh""" +870 49 optimizer """adam""" +870 49 training_loop """lcwa""" +870 49 evaluator """rankbased""" +870 50 dataset """kinships""" +870 50 model """transh""" +870 50 loss """softplus""" +870 50 regularizer """transh""" +870 50 optimizer """adam""" +870 50 training_loop """lcwa""" +870 50 evaluator """rankbased""" +870 51 dataset """kinships""" +870 51 model """transh""" +870 51 loss """softplus""" +870 51 regularizer """transh""" +870 51 optimizer """adam""" +870 51 training_loop """lcwa""" +870 51 evaluator """rankbased""" +870 52 dataset """kinships""" +870 52 model """transh""" +870 52 loss """softplus""" +870 52 regularizer """transh""" +870 52 optimizer """adam""" +870 52 training_loop """lcwa""" +870 52 evaluator """rankbased""" +870 53 dataset """kinships""" +870 53 model """transh""" +870 53 loss """softplus""" +870 53 regularizer """transh""" +870 53 optimizer """adam""" +870 53 training_loop """lcwa""" +870 53 evaluator """rankbased""" +870 54 dataset """kinships""" +870 54 model """transh""" +870 54 loss """softplus""" +870 54 regularizer """transh""" +870 54 optimizer """adam""" +870 54 training_loop """lcwa""" +870 54 evaluator """rankbased""" +870 55 dataset """kinships""" +870 55 model """transh""" +870 55 loss """softplus""" +870 55 regularizer """transh""" +870 55 optimizer """adam""" +870 55 training_loop """lcwa""" +870 55 evaluator """rankbased""" +870 56 dataset """kinships""" +870 56 model """transh""" +870 56 loss """softplus""" +870 56 regularizer """transh""" +870 56 optimizer """adam""" +870 56 training_loop """lcwa""" +870 56 evaluator """rankbased""" +870 57 dataset """kinships""" +870 57 model """transh""" +870 57 loss """softplus""" +870 57 regularizer """transh""" +870 57 optimizer """adam""" +870 57 training_loop """lcwa""" +870 57 evaluator """rankbased""" +870 58 dataset """kinships""" +870 58 model """transh""" +870 58 loss """softplus""" +870 58 regularizer """transh""" +870 58 optimizer """adam""" +870 58 training_loop """lcwa""" +870 58 evaluator """rankbased""" +870 59 dataset """kinships""" +870 59 model """transh""" +870 59 loss """softplus""" +870 59 regularizer """transh""" +870 59 optimizer """adam""" +870 59 training_loop """lcwa""" +870 59 evaluator """rankbased""" +870 60 dataset """kinships""" +870 60 model """transh""" +870 60 loss """softplus""" +870 60 regularizer """transh""" +870 60 optimizer """adam""" +870 60 training_loop """lcwa""" +870 60 evaluator """rankbased""" +870 61 dataset """kinships""" +870 61 model """transh""" +870 61 loss """softplus""" +870 61 regularizer """transh""" +870 61 optimizer """adam""" +870 61 training_loop """lcwa""" +870 61 evaluator """rankbased""" +870 62 dataset """kinships""" +870 62 model """transh""" +870 62 loss """softplus""" +870 62 regularizer """transh""" +870 62 optimizer """adam""" +870 62 training_loop """lcwa""" +870 62 evaluator """rankbased""" +870 63 dataset """kinships""" +870 63 model """transh""" +870 63 loss """softplus""" +870 63 regularizer """transh""" +870 63 optimizer """adam""" +870 63 training_loop """lcwa""" +870 63 evaluator """rankbased""" +870 64 dataset """kinships""" +870 64 model """transh""" +870 64 loss """softplus""" +870 64 regularizer """transh""" +870 64 optimizer """adam""" +870 64 training_loop """lcwa""" +870 64 evaluator """rankbased""" +870 65 dataset """kinships""" +870 65 model """transh""" +870 65 loss """softplus""" +870 65 regularizer """transh""" +870 65 optimizer """adam""" +870 65 training_loop """lcwa""" +870 65 evaluator """rankbased""" +870 66 dataset """kinships""" +870 66 model """transh""" +870 66 loss """softplus""" +870 66 regularizer """transh""" +870 66 optimizer """adam""" +870 66 training_loop """lcwa""" +870 66 evaluator """rankbased""" +870 67 dataset """kinships""" +870 67 model """transh""" +870 67 loss """softplus""" +870 67 regularizer """transh""" +870 67 optimizer """adam""" +870 67 training_loop """lcwa""" +870 67 evaluator """rankbased""" +870 68 dataset """kinships""" +870 68 model """transh""" +870 68 loss """softplus""" +870 68 regularizer """transh""" +870 68 optimizer """adam""" +870 68 training_loop """lcwa""" +870 68 evaluator """rankbased""" +870 69 dataset """kinships""" +870 69 model """transh""" +870 69 loss """softplus""" +870 69 regularizer """transh""" +870 69 optimizer """adam""" +870 69 training_loop """lcwa""" +870 69 evaluator """rankbased""" +870 70 dataset """kinships""" +870 70 model """transh""" +870 70 loss """softplus""" +870 70 regularizer """transh""" +870 70 optimizer """adam""" +870 70 training_loop """lcwa""" +870 70 evaluator """rankbased""" +870 71 dataset """kinships""" +870 71 model """transh""" +870 71 loss """softplus""" +870 71 regularizer """transh""" +870 71 optimizer """adam""" +870 71 training_loop """lcwa""" +870 71 evaluator """rankbased""" +870 72 dataset """kinships""" +870 72 model """transh""" +870 72 loss """softplus""" +870 72 regularizer """transh""" +870 72 optimizer """adam""" +870 72 training_loop """lcwa""" +870 72 evaluator """rankbased""" +870 73 dataset """kinships""" +870 73 model """transh""" +870 73 loss """softplus""" +870 73 regularizer """transh""" +870 73 optimizer """adam""" +870 73 training_loop """lcwa""" +870 73 evaluator """rankbased""" +870 74 dataset """kinships""" +870 74 model """transh""" +870 74 loss """softplus""" +870 74 regularizer """transh""" +870 74 optimizer """adam""" +870 74 training_loop """lcwa""" +870 74 evaluator """rankbased""" +870 75 dataset """kinships""" +870 75 model """transh""" +870 75 loss """softplus""" +870 75 regularizer """transh""" +870 75 optimizer """adam""" +870 75 training_loop """lcwa""" +870 75 evaluator """rankbased""" +870 76 dataset """kinships""" +870 76 model """transh""" +870 76 loss """softplus""" +870 76 regularizer """transh""" +870 76 optimizer """adam""" +870 76 training_loop """lcwa""" +870 76 evaluator """rankbased""" +870 77 dataset """kinships""" +870 77 model """transh""" +870 77 loss """softplus""" +870 77 regularizer """transh""" +870 77 optimizer """adam""" +870 77 training_loop """lcwa""" +870 77 evaluator """rankbased""" +870 78 dataset """kinships""" +870 78 model """transh""" +870 78 loss """softplus""" +870 78 regularizer """transh""" +870 78 optimizer """adam""" +870 78 training_loop """lcwa""" +870 78 evaluator """rankbased""" +870 79 dataset """kinships""" +870 79 model """transh""" +870 79 loss """softplus""" +870 79 regularizer """transh""" +870 79 optimizer """adam""" +870 79 training_loop """lcwa""" +870 79 evaluator """rankbased""" +870 80 dataset """kinships""" +870 80 model """transh""" +870 80 loss """softplus""" +870 80 regularizer """transh""" +870 80 optimizer """adam""" +870 80 training_loop """lcwa""" +870 80 evaluator """rankbased""" +870 81 dataset """kinships""" +870 81 model """transh""" +870 81 loss """softplus""" +870 81 regularizer """transh""" +870 81 optimizer """adam""" +870 81 training_loop """lcwa""" +870 81 evaluator """rankbased""" +870 82 dataset """kinships""" +870 82 model """transh""" +870 82 loss """softplus""" +870 82 regularizer """transh""" +870 82 optimizer """adam""" +870 82 training_loop """lcwa""" +870 82 evaluator """rankbased""" +870 83 dataset """kinships""" +870 83 model """transh""" +870 83 loss """softplus""" +870 83 regularizer """transh""" +870 83 optimizer """adam""" +870 83 training_loop """lcwa""" +870 83 evaluator """rankbased""" +870 84 dataset """kinships""" +870 84 model """transh""" +870 84 loss """softplus""" +870 84 regularizer """transh""" +870 84 optimizer """adam""" +870 84 training_loop """lcwa""" +870 84 evaluator """rankbased""" +870 85 dataset """kinships""" +870 85 model """transh""" +870 85 loss """softplus""" +870 85 regularizer """transh""" +870 85 optimizer """adam""" +870 85 training_loop """lcwa""" +870 85 evaluator """rankbased""" +870 86 dataset """kinships""" +870 86 model """transh""" +870 86 loss """softplus""" +870 86 regularizer """transh""" +870 86 optimizer """adam""" +870 86 training_loop """lcwa""" +870 86 evaluator """rankbased""" +870 87 dataset """kinships""" +870 87 model """transh""" +870 87 loss """softplus""" +870 87 regularizer """transh""" +870 87 optimizer """adam""" +870 87 training_loop """lcwa""" +870 87 evaluator """rankbased""" +870 88 dataset """kinships""" +870 88 model """transh""" +870 88 loss """softplus""" +870 88 regularizer """transh""" +870 88 optimizer """adam""" +870 88 training_loop """lcwa""" +870 88 evaluator """rankbased""" +870 89 dataset """kinships""" +870 89 model """transh""" +870 89 loss """softplus""" +870 89 regularizer """transh""" +870 89 optimizer """adam""" +870 89 training_loop """lcwa""" +870 89 evaluator """rankbased""" +870 90 dataset """kinships""" +870 90 model """transh""" +870 90 loss """softplus""" +870 90 regularizer """transh""" +870 90 optimizer """adam""" +870 90 training_loop """lcwa""" +870 90 evaluator """rankbased""" +870 91 dataset """kinships""" +870 91 model """transh""" +870 91 loss """softplus""" +870 91 regularizer """transh""" +870 91 optimizer """adam""" +870 91 training_loop """lcwa""" +870 91 evaluator """rankbased""" +870 92 dataset """kinships""" +870 92 model """transh""" +870 92 loss """softplus""" +870 92 regularizer """transh""" +870 92 optimizer """adam""" +870 92 training_loop """lcwa""" +870 92 evaluator """rankbased""" +870 93 dataset """kinships""" +870 93 model """transh""" +870 93 loss """softplus""" +870 93 regularizer """transh""" +870 93 optimizer """adam""" +870 93 training_loop """lcwa""" +870 93 evaluator """rankbased""" +870 94 dataset """kinships""" +870 94 model """transh""" +870 94 loss """softplus""" +870 94 regularizer """transh""" +870 94 optimizer """adam""" +870 94 training_loop """lcwa""" +870 94 evaluator """rankbased""" +870 95 dataset """kinships""" +870 95 model """transh""" +870 95 loss """softplus""" +870 95 regularizer """transh""" +870 95 optimizer """adam""" +870 95 training_loop """lcwa""" +870 95 evaluator """rankbased""" +870 96 dataset """kinships""" +870 96 model """transh""" +870 96 loss """softplus""" +870 96 regularizer """transh""" +870 96 optimizer """adam""" +870 96 training_loop """lcwa""" +870 96 evaluator """rankbased""" +870 97 dataset """kinships""" +870 97 model """transh""" +870 97 loss """softplus""" +870 97 regularizer """transh""" +870 97 optimizer """adam""" +870 97 training_loop """lcwa""" +870 97 evaluator """rankbased""" +870 98 dataset """kinships""" +870 98 model """transh""" +870 98 loss """softplus""" +870 98 regularizer """transh""" +870 98 optimizer """adam""" +870 98 training_loop """lcwa""" +870 98 evaluator """rankbased""" +870 99 dataset """kinships""" +870 99 model """transh""" +870 99 loss """softplus""" +870 99 regularizer """transh""" +870 99 optimizer """adam""" +870 99 training_loop """lcwa""" +870 99 evaluator """rankbased""" +870 100 dataset """kinships""" +870 100 model """transh""" +870 100 loss """softplus""" +870 100 regularizer """transh""" +870 100 optimizer """adam""" +870 100 training_loop """lcwa""" +870 100 evaluator """rankbased""" +871 1 model.embedding_dim 2.0 +871 1 model.scoring_fct_norm 2.0 +871 1 regularizer.weight 0.11618491088671236 +871 1 optimizer.lr 0.010921349172400131 +871 1 training.batch_size 1.0 +871 1 training.label_smoothing 0.0015647654364563343 +871 2 model.embedding_dim 0.0 +871 2 model.scoring_fct_norm 2.0 +871 2 regularizer.weight 0.017349168977085768 +871 2 optimizer.lr 0.014247127724154601 +871 2 training.batch_size 0.0 +871 2 training.label_smoothing 0.10208704514134191 +871 3 model.embedding_dim 0.0 +871 3 model.scoring_fct_norm 2.0 +871 3 regularizer.weight 0.227143632303875 +871 3 optimizer.lr 0.03390392809209024 +871 3 training.batch_size 0.0 +871 3 training.label_smoothing 0.06632332812110349 +871 4 model.embedding_dim 0.0 +871 4 model.scoring_fct_norm 2.0 +871 4 regularizer.weight 0.02176953157525366 +871 4 optimizer.lr 0.054925070910988034 +871 4 training.batch_size 2.0 +871 4 training.label_smoothing 0.004168688594067592 +871 5 model.embedding_dim 2.0 +871 5 model.scoring_fct_norm 2.0 +871 5 regularizer.weight 0.11657989605775099 +871 5 optimizer.lr 0.03316366425922839 +871 5 training.batch_size 2.0 +871 5 training.label_smoothing 0.00448786480527042 +871 6 model.embedding_dim 0.0 +871 6 model.scoring_fct_norm 2.0 +871 6 regularizer.weight 0.10753490687224161 +871 6 optimizer.lr 0.023503527055614478 +871 6 training.batch_size 2.0 +871 6 training.label_smoothing 0.24269202749639734 +871 7 model.embedding_dim 1.0 +871 7 model.scoring_fct_norm 2.0 +871 7 regularizer.weight 0.014967934215985397 +871 7 optimizer.lr 0.04653687237145146 +871 7 training.batch_size 1.0 +871 7 training.label_smoothing 0.023253538283411212 +871 8 model.embedding_dim 0.0 +871 8 model.scoring_fct_norm 1.0 +871 8 regularizer.weight 0.018511619616162635 +871 8 optimizer.lr 0.07866612702379586 +871 8 training.batch_size 0.0 +871 8 training.label_smoothing 0.027235278170256696 +871 9 model.embedding_dim 1.0 +871 9 model.scoring_fct_norm 1.0 +871 9 regularizer.weight 0.018772783907163054 +871 9 optimizer.lr 0.08311228540629018 +871 9 training.batch_size 2.0 +871 9 training.label_smoothing 0.02005490986391176 +871 10 model.embedding_dim 2.0 +871 10 model.scoring_fct_norm 1.0 +871 10 regularizer.weight 0.06775321569438283 +871 10 optimizer.lr 0.031449410391420615 +871 10 training.batch_size 2.0 +871 10 training.label_smoothing 0.022165453296065292 +871 11 model.embedding_dim 0.0 +871 11 model.scoring_fct_norm 1.0 +871 11 regularizer.weight 0.019868770953698205 +871 11 optimizer.lr 0.03521577781362974 +871 11 training.batch_size 2.0 +871 11 training.label_smoothing 0.01916787575373556 +871 12 model.embedding_dim 0.0 +871 12 model.scoring_fct_norm 2.0 +871 12 regularizer.weight 0.03282403595988164 +871 12 optimizer.lr 0.018680778613379675 +871 12 training.batch_size 1.0 +871 12 training.label_smoothing 0.0014405226538312078 +871 13 model.embedding_dim 1.0 +871 13 model.scoring_fct_norm 1.0 +871 13 regularizer.weight 0.11220078719612225 +871 13 optimizer.lr 0.010172670232570109 +871 13 training.batch_size 2.0 +871 13 training.label_smoothing 0.05087770513430713 +871 14 model.embedding_dim 2.0 +871 14 model.scoring_fct_norm 2.0 +871 14 regularizer.weight 0.07736946952105471 +871 14 optimizer.lr 0.00716053810501424 +871 14 training.batch_size 0.0 +871 14 training.label_smoothing 0.05534211895029257 +871 15 model.embedding_dim 2.0 +871 15 model.scoring_fct_norm 2.0 +871 15 regularizer.weight 0.02286177287876155 +871 15 optimizer.lr 0.04942536571932234 +871 15 training.batch_size 0.0 +871 15 training.label_smoothing 0.227284139002677 +871 16 model.embedding_dim 0.0 +871 16 model.scoring_fct_norm 2.0 +871 16 regularizer.weight 0.053787918945117745 +871 16 optimizer.lr 0.030375111887798855 +871 16 training.batch_size 2.0 +871 16 training.label_smoothing 0.0038626858002606987 +871 17 model.embedding_dim 2.0 +871 17 model.scoring_fct_norm 1.0 +871 17 regularizer.weight 0.04594810856429139 +871 17 optimizer.lr 0.016144791104495834 +871 17 training.batch_size 2.0 +871 17 training.label_smoothing 0.030745062552026442 +871 18 model.embedding_dim 1.0 +871 18 model.scoring_fct_norm 2.0 +871 18 regularizer.weight 0.011211737021129249 +871 18 optimizer.lr 0.09517920623558089 +871 18 training.batch_size 2.0 +871 18 training.label_smoothing 0.0031094637444564308 +871 19 model.embedding_dim 2.0 +871 19 model.scoring_fct_norm 2.0 +871 19 regularizer.weight 0.13283554976335904 +871 19 optimizer.lr 0.005925451532279938 +871 19 training.batch_size 0.0 +871 19 training.label_smoothing 0.0018647327351830445 +871 20 model.embedding_dim 2.0 +871 20 model.scoring_fct_norm 1.0 +871 20 regularizer.weight 0.048967494928996096 +871 20 optimizer.lr 0.08076796067506065 +871 20 training.batch_size 2.0 +871 20 training.label_smoothing 0.1653505047893449 +871 21 model.embedding_dim 2.0 +871 21 model.scoring_fct_norm 2.0 +871 21 regularizer.weight 0.010001676541895228 +871 21 optimizer.lr 0.022000661394838857 +871 21 training.batch_size 0.0 +871 21 training.label_smoothing 0.0029750416029970003 +871 22 model.embedding_dim 2.0 +871 22 model.scoring_fct_norm 2.0 +871 22 regularizer.weight 0.032132881356351256 +871 22 optimizer.lr 0.011738445706886787 +871 22 training.batch_size 0.0 +871 22 training.label_smoothing 0.6445448445143228 +871 23 model.embedding_dim 2.0 +871 23 model.scoring_fct_norm 2.0 +871 23 regularizer.weight 0.02588950490831764 +871 23 optimizer.lr 0.03145021249539975 +871 23 training.batch_size 2.0 +871 23 training.label_smoothing 0.005503174024189179 +871 24 model.embedding_dim 1.0 +871 24 model.scoring_fct_norm 2.0 +871 24 regularizer.weight 0.05529814130798912 +871 24 optimizer.lr 0.005739311654744952 +871 24 training.batch_size 2.0 +871 24 training.label_smoothing 0.17860745546908433 +871 25 model.embedding_dim 2.0 +871 25 model.scoring_fct_norm 2.0 +871 25 regularizer.weight 0.1187370024602321 +871 25 optimizer.lr 0.0496681310653942 +871 25 training.batch_size 2.0 +871 25 training.label_smoothing 0.4232068735201204 +871 26 model.embedding_dim 2.0 +871 26 model.scoring_fct_norm 1.0 +871 26 regularizer.weight 0.21588163570206545 +871 26 optimizer.lr 0.0033550266519023686 +871 26 training.batch_size 1.0 +871 26 training.label_smoothing 0.016010771214554412 +871 27 model.embedding_dim 1.0 +871 27 model.scoring_fct_norm 2.0 +871 27 regularizer.weight 0.06523435001985581 +871 27 optimizer.lr 0.09682013215943198 +871 27 training.batch_size 2.0 +871 27 training.label_smoothing 0.002615815961045555 +871 28 model.embedding_dim 1.0 +871 28 model.scoring_fct_norm 1.0 +871 28 regularizer.weight 0.06961766958272983 +871 28 optimizer.lr 0.02377470821202362 +871 28 training.batch_size 1.0 +871 28 training.label_smoothing 0.00328516545161492 +871 29 model.embedding_dim 0.0 +871 29 model.scoring_fct_norm 1.0 +871 29 regularizer.weight 0.017456593842989285 +871 29 optimizer.lr 0.043822001948753506 +871 29 training.batch_size 0.0 +871 29 training.label_smoothing 0.0027822492641280767 +871 30 model.embedding_dim 0.0 +871 30 model.scoring_fct_norm 1.0 +871 30 regularizer.weight 0.021384976226012993 +871 30 optimizer.lr 0.002036828429589575 +871 30 training.batch_size 2.0 +871 30 training.label_smoothing 0.30448971162221583 +871 31 model.embedding_dim 0.0 +871 31 model.scoring_fct_norm 1.0 +871 31 regularizer.weight 0.2578974929660719 +871 31 optimizer.lr 0.008177099040787192 +871 31 training.batch_size 0.0 +871 31 training.label_smoothing 0.0010227679707959475 +871 32 model.embedding_dim 0.0 +871 32 model.scoring_fct_norm 1.0 +871 32 regularizer.weight 0.03858751605910067 +871 32 optimizer.lr 0.004668366069450363 +871 32 training.batch_size 2.0 +871 32 training.label_smoothing 0.2091223109156126 +871 33 model.embedding_dim 0.0 +871 33 model.scoring_fct_norm 1.0 +871 33 regularizer.weight 0.07858007523811991 +871 33 optimizer.lr 0.005081332314031216 +871 33 training.batch_size 1.0 +871 33 training.label_smoothing 0.002997153683929245 +871 34 model.embedding_dim 1.0 +871 34 model.scoring_fct_norm 1.0 +871 34 regularizer.weight 0.0491348988075975 +871 34 optimizer.lr 0.003132187010168385 +871 34 training.batch_size 0.0 +871 34 training.label_smoothing 0.6016490126029522 +871 35 model.embedding_dim 0.0 +871 35 model.scoring_fct_norm 1.0 +871 35 regularizer.weight 0.02885153039773366 +871 35 optimizer.lr 0.004307122181289759 +871 35 training.batch_size 0.0 +871 35 training.label_smoothing 0.002986958916619672 +871 36 model.embedding_dim 2.0 +871 36 model.scoring_fct_norm 1.0 +871 36 regularizer.weight 0.10542338773528667 +871 36 optimizer.lr 0.03138444143596324 +871 36 training.batch_size 2.0 +871 36 training.label_smoothing 0.5741436325508243 +871 37 model.embedding_dim 2.0 +871 37 model.scoring_fct_norm 2.0 +871 37 regularizer.weight 0.0454994622658734 +871 37 optimizer.lr 0.0010564682439196155 +871 37 training.batch_size 0.0 +871 37 training.label_smoothing 0.10782370367597864 +871 38 model.embedding_dim 0.0 +871 38 model.scoring_fct_norm 2.0 +871 38 regularizer.weight 0.05343066946610864 +871 38 optimizer.lr 0.002801945154923803 +871 38 training.batch_size 1.0 +871 38 training.label_smoothing 0.05834207098273956 +871 39 model.embedding_dim 1.0 +871 39 model.scoring_fct_norm 1.0 +871 39 regularizer.weight 0.24618823676488502 +871 39 optimizer.lr 0.06537814724769514 +871 39 training.batch_size 1.0 +871 39 training.label_smoothing 0.008164312323171512 +871 40 model.embedding_dim 1.0 +871 40 model.scoring_fct_norm 2.0 +871 40 regularizer.weight 0.1899232986896966 +871 40 optimizer.lr 0.023391213315372913 +871 40 training.batch_size 2.0 +871 40 training.label_smoothing 0.038146197735876436 +871 41 model.embedding_dim 0.0 +871 41 model.scoring_fct_norm 2.0 +871 41 regularizer.weight 0.022001524372738945 +871 41 optimizer.lr 0.020536585435862807 +871 41 training.batch_size 2.0 +871 41 training.label_smoothing 0.3938553651584133 +871 42 model.embedding_dim 0.0 +871 42 model.scoring_fct_norm 2.0 +871 42 regularizer.weight 0.24749135766825467 +871 42 optimizer.lr 0.004864647435190595 +871 42 training.batch_size 0.0 +871 42 training.label_smoothing 0.15742381611580444 +871 43 model.embedding_dim 0.0 +871 43 model.scoring_fct_norm 1.0 +871 43 regularizer.weight 0.08601075485675613 +871 43 optimizer.lr 0.06997521715211177 +871 43 training.batch_size 1.0 +871 43 training.label_smoothing 0.360477862746031 +871 44 model.embedding_dim 1.0 +871 44 model.scoring_fct_norm 2.0 +871 44 regularizer.weight 0.012939264648668005 +871 44 optimizer.lr 0.002916932317091595 +871 44 training.batch_size 2.0 +871 44 training.label_smoothing 0.011252441510520385 +871 45 model.embedding_dim 0.0 +871 45 model.scoring_fct_norm 2.0 +871 45 regularizer.weight 0.020018623606135138 +871 45 optimizer.lr 0.018497708744004103 +871 45 training.batch_size 1.0 +871 45 training.label_smoothing 0.0065666864501296 +871 46 model.embedding_dim 1.0 +871 46 model.scoring_fct_norm 1.0 +871 46 regularizer.weight 0.0664795374655563 +871 46 optimizer.lr 0.0016729993560664258 +871 46 training.batch_size 2.0 +871 46 training.label_smoothing 0.00985522478999908 +871 47 model.embedding_dim 0.0 +871 47 model.scoring_fct_norm 2.0 +871 47 regularizer.weight 0.03412282260598565 +871 47 optimizer.lr 0.04200112693665501 +871 47 training.batch_size 0.0 +871 47 training.label_smoothing 0.20037431733598132 +871 48 model.embedding_dim 2.0 +871 48 model.scoring_fct_norm 1.0 +871 48 regularizer.weight 0.18302038594686298 +871 48 optimizer.lr 0.0011301063658007903 +871 48 training.batch_size 2.0 +871 48 training.label_smoothing 0.0037628473097150827 +871 49 model.embedding_dim 1.0 +871 49 model.scoring_fct_norm 1.0 +871 49 regularizer.weight 0.04732397902444892 +871 49 optimizer.lr 0.002072802306451403 +871 49 training.batch_size 1.0 +871 49 training.label_smoothing 0.001321053845335483 +871 50 model.embedding_dim 1.0 +871 50 model.scoring_fct_norm 1.0 +871 50 regularizer.weight 0.03680434403307605 +871 50 optimizer.lr 0.003818318831584078 +871 50 training.batch_size 0.0 +871 50 training.label_smoothing 0.0028866108954413872 +871 51 model.embedding_dim 1.0 +871 51 model.scoring_fct_norm 2.0 +871 51 regularizer.weight 0.04748581324109312 +871 51 optimizer.lr 0.029667412124451106 +871 51 training.batch_size 1.0 +871 51 training.label_smoothing 0.24390346812692354 +871 52 model.embedding_dim 0.0 +871 52 model.scoring_fct_norm 2.0 +871 52 regularizer.weight 0.05539886386660201 +871 52 optimizer.lr 0.010090627280559865 +871 52 training.batch_size 2.0 +871 52 training.label_smoothing 0.0270337865360298 +871 53 model.embedding_dim 2.0 +871 53 model.scoring_fct_norm 2.0 +871 53 regularizer.weight 0.20674119367236699 +871 53 optimizer.lr 0.003425550222480051 +871 53 training.batch_size 2.0 +871 53 training.label_smoothing 0.20989630644755503 +871 54 model.embedding_dim 0.0 +871 54 model.scoring_fct_norm 2.0 +871 54 regularizer.weight 0.08494497513444974 +871 54 optimizer.lr 0.004691566900221602 +871 54 training.batch_size 2.0 +871 54 training.label_smoothing 0.004076900564026085 +871 55 model.embedding_dim 1.0 +871 55 model.scoring_fct_norm 2.0 +871 55 regularizer.weight 0.1269842321135874 +871 55 optimizer.lr 0.008191489183924846 +871 55 training.batch_size 0.0 +871 55 training.label_smoothing 0.48338969334936494 +871 56 model.embedding_dim 1.0 +871 56 model.scoring_fct_norm 1.0 +871 56 regularizer.weight 0.014325939942661828 +871 56 optimizer.lr 0.001265690029899228 +871 56 training.batch_size 0.0 +871 56 training.label_smoothing 0.016389709262842108 +871 57 model.embedding_dim 2.0 +871 57 model.scoring_fct_norm 1.0 +871 57 regularizer.weight 0.026817701249197808 +871 57 optimizer.lr 0.025081910679400073 +871 57 training.batch_size 1.0 +871 57 training.label_smoothing 0.04120709383796675 +871 58 model.embedding_dim 0.0 +871 58 model.scoring_fct_norm 2.0 +871 58 regularizer.weight 0.1508248809940164 +871 58 optimizer.lr 0.002689683171725654 +871 58 training.batch_size 0.0 +871 58 training.label_smoothing 0.010693810044272552 +871 59 model.embedding_dim 2.0 +871 59 model.scoring_fct_norm 2.0 +871 59 regularizer.weight 0.011188182015307553 +871 59 optimizer.lr 0.008074815817878242 +871 59 training.batch_size 1.0 +871 59 training.label_smoothing 0.06456917684407547 +871 60 model.embedding_dim 1.0 +871 60 model.scoring_fct_norm 1.0 +871 60 regularizer.weight 0.05678742699525588 +871 60 optimizer.lr 0.016990581693272958 +871 60 training.batch_size 2.0 +871 60 training.label_smoothing 0.05241827060300141 +871 61 model.embedding_dim 1.0 +871 61 model.scoring_fct_norm 2.0 +871 61 regularizer.weight 0.25824621368720196 +871 61 optimizer.lr 0.03693956190610797 +871 61 training.batch_size 2.0 +871 61 training.label_smoothing 0.16749254806653338 +871 62 model.embedding_dim 0.0 +871 62 model.scoring_fct_norm 1.0 +871 62 regularizer.weight 0.053156467463094825 +871 62 optimizer.lr 0.018163719538426028 +871 62 training.batch_size 2.0 +871 62 training.label_smoothing 0.010957753108798606 +871 63 model.embedding_dim 0.0 +871 63 model.scoring_fct_norm 2.0 +871 63 regularizer.weight 0.03793995806637723 +871 63 optimizer.lr 0.002879290315686639 +871 63 training.batch_size 0.0 +871 63 training.label_smoothing 0.004648917804330368 +871 64 model.embedding_dim 2.0 +871 64 model.scoring_fct_norm 2.0 +871 64 regularizer.weight 0.20167580898913914 +871 64 optimizer.lr 0.06499429879224744 +871 64 training.batch_size 0.0 +871 64 training.label_smoothing 0.006338240955896793 +871 65 model.embedding_dim 2.0 +871 65 model.scoring_fct_norm 1.0 +871 65 regularizer.weight 0.045395931561294935 +871 65 optimizer.lr 0.02603989377481765 +871 65 training.batch_size 2.0 +871 65 training.label_smoothing 0.10856503297302336 +871 66 model.embedding_dim 2.0 +871 66 model.scoring_fct_norm 2.0 +871 66 regularizer.weight 0.02419303874384231 +871 66 optimizer.lr 0.003024207274162799 +871 66 training.batch_size 1.0 +871 66 training.label_smoothing 0.22434479747517141 +871 67 model.embedding_dim 1.0 +871 67 model.scoring_fct_norm 1.0 +871 67 regularizer.weight 0.16846301388351512 +871 67 optimizer.lr 0.08777001554002128 +871 67 training.batch_size 1.0 +871 67 training.label_smoothing 0.0057733542529337925 +871 68 model.embedding_dim 2.0 +871 68 model.scoring_fct_norm 2.0 +871 68 regularizer.weight 0.07167422331832804 +871 68 optimizer.lr 0.002110349382815136 +871 68 training.batch_size 0.0 +871 68 training.label_smoothing 0.006257446710703379 +871 69 model.embedding_dim 1.0 +871 69 model.scoring_fct_norm 2.0 +871 69 regularizer.weight 0.018679847821866224 +871 69 optimizer.lr 0.09702314469665832 +871 69 training.batch_size 2.0 +871 69 training.label_smoothing 0.018901520608979573 +871 70 model.embedding_dim 1.0 +871 70 model.scoring_fct_norm 1.0 +871 70 regularizer.weight 0.14096335684013853 +871 70 optimizer.lr 0.0016836681026517158 +871 70 training.batch_size 1.0 +871 70 training.label_smoothing 0.21351199984536082 +871 71 model.embedding_dim 0.0 +871 71 model.scoring_fct_norm 2.0 +871 71 regularizer.weight 0.0263721472426519 +871 71 optimizer.lr 0.03344329227406964 +871 71 training.batch_size 0.0 +871 71 training.label_smoothing 0.05010185275925931 +871 72 model.embedding_dim 2.0 +871 72 model.scoring_fct_norm 2.0 +871 72 regularizer.weight 0.03389119721354115 +871 72 optimizer.lr 0.002263522262887221 +871 72 training.batch_size 2.0 +871 72 training.label_smoothing 0.016565931240872078 +871 73 model.embedding_dim 1.0 +871 73 model.scoring_fct_norm 2.0 +871 73 regularizer.weight 0.014245434272336096 +871 73 optimizer.lr 0.016397030785010074 +871 73 training.batch_size 2.0 +871 73 training.label_smoothing 0.009109040105446064 +871 74 model.embedding_dim 2.0 +871 74 model.scoring_fct_norm 1.0 +871 74 regularizer.weight 0.24562393719224607 +871 74 optimizer.lr 0.001271492865141331 +871 74 training.batch_size 0.0 +871 74 training.label_smoothing 0.005385893654879166 +871 75 model.embedding_dim 1.0 +871 75 model.scoring_fct_norm 2.0 +871 75 regularizer.weight 0.15880263169695713 +871 75 optimizer.lr 0.001973161137032792 +871 75 training.batch_size 1.0 +871 75 training.label_smoothing 0.6394062092486382 +871 76 model.embedding_dim 2.0 +871 76 model.scoring_fct_norm 1.0 +871 76 regularizer.weight 0.09686050074195356 +871 76 optimizer.lr 0.006896650911695592 +871 76 training.batch_size 2.0 +871 76 training.label_smoothing 0.23416603389391794 +871 77 model.embedding_dim 1.0 +871 77 model.scoring_fct_norm 1.0 +871 77 regularizer.weight 0.0531665407518376 +871 77 optimizer.lr 0.004754077764281731 +871 77 training.batch_size 2.0 +871 77 training.label_smoothing 0.001160735473087953 +871 78 model.embedding_dim 2.0 +871 78 model.scoring_fct_norm 2.0 +871 78 regularizer.weight 0.04235910358944383 +871 78 optimizer.lr 0.07508482502473447 +871 78 training.batch_size 2.0 +871 78 training.label_smoothing 0.07120404220869939 +871 79 model.embedding_dim 1.0 +871 79 model.scoring_fct_norm 1.0 +871 79 regularizer.weight 0.011917639103127048 +871 79 optimizer.lr 0.008429742208746233 +871 79 training.batch_size 2.0 +871 79 training.label_smoothing 0.17178812525841664 +871 80 model.embedding_dim 0.0 +871 80 model.scoring_fct_norm 1.0 +871 80 regularizer.weight 0.13713180404914913 +871 80 optimizer.lr 0.02048888702565154 +871 80 training.batch_size 0.0 +871 80 training.label_smoothing 0.005347487599028891 +871 81 model.embedding_dim 1.0 +871 81 model.scoring_fct_norm 2.0 +871 81 regularizer.weight 0.03531545071575782 +871 81 optimizer.lr 0.0014446617977956328 +871 81 training.batch_size 0.0 +871 81 training.label_smoothing 0.3211559868648083 +871 82 model.embedding_dim 0.0 +871 82 model.scoring_fct_norm 2.0 +871 82 regularizer.weight 0.09165677213909787 +871 82 optimizer.lr 0.05622781974013419 +871 82 training.batch_size 0.0 +871 82 training.label_smoothing 0.004320328519483717 +871 83 model.embedding_dim 0.0 +871 83 model.scoring_fct_norm 1.0 +871 83 regularizer.weight 0.22860908486975098 +871 83 optimizer.lr 0.039279513930471736 +871 83 training.batch_size 2.0 +871 83 training.label_smoothing 0.003893373566672639 +871 84 model.embedding_dim 1.0 +871 84 model.scoring_fct_norm 1.0 +871 84 regularizer.weight 0.018386476746697838 +871 84 optimizer.lr 0.004864892326742659 +871 84 training.batch_size 1.0 +871 84 training.label_smoothing 0.006850520941446358 +871 85 model.embedding_dim 2.0 +871 85 model.scoring_fct_norm 2.0 +871 85 regularizer.weight 0.18875049239260902 +871 85 optimizer.lr 0.00528741033516821 +871 85 training.batch_size 2.0 +871 85 training.label_smoothing 0.04178297753216786 +871 86 model.embedding_dim 1.0 +871 86 model.scoring_fct_norm 2.0 +871 86 regularizer.weight 0.1703364545687023 +871 86 optimizer.lr 0.005131572366261128 +871 86 training.batch_size 1.0 +871 86 training.label_smoothing 0.2150003472097389 +871 87 model.embedding_dim 0.0 +871 87 model.scoring_fct_norm 1.0 +871 87 regularizer.weight 0.11583000561140895 +871 87 optimizer.lr 0.04991809754369758 +871 87 training.batch_size 2.0 +871 87 training.label_smoothing 0.012107652927760582 +871 88 model.embedding_dim 2.0 +871 88 model.scoring_fct_norm 1.0 +871 88 regularizer.weight 0.10577226282811306 +871 88 optimizer.lr 0.011154938217334811 +871 88 training.batch_size 2.0 +871 88 training.label_smoothing 0.01787690424468646 +871 89 model.embedding_dim 1.0 +871 89 model.scoring_fct_norm 1.0 +871 89 regularizer.weight 0.011920561921629297 +871 89 optimizer.lr 0.002319419247363701 +871 89 training.batch_size 1.0 +871 89 training.label_smoothing 0.0010593959882925239 +871 90 model.embedding_dim 0.0 +871 90 model.scoring_fct_norm 2.0 +871 90 regularizer.weight 0.2470522486462534 +871 90 optimizer.lr 0.020880097667126982 +871 90 training.batch_size 2.0 +871 90 training.label_smoothing 0.0067598972788249735 +871 91 model.embedding_dim 1.0 +871 91 model.scoring_fct_norm 2.0 +871 91 regularizer.weight 0.0662922517126615 +871 91 optimizer.lr 0.0011369605623478083 +871 91 training.batch_size 2.0 +871 91 training.label_smoothing 0.398289049639222 +871 92 model.embedding_dim 0.0 +871 92 model.scoring_fct_norm 1.0 +871 92 regularizer.weight 0.0668086338491683 +871 92 optimizer.lr 0.01328929831178315 +871 92 training.batch_size 0.0 +871 92 training.label_smoothing 0.0970107872180863 +871 93 model.embedding_dim 1.0 +871 93 model.scoring_fct_norm 1.0 +871 93 regularizer.weight 0.0636980884789565 +871 93 optimizer.lr 0.008767828023743566 +871 93 training.batch_size 2.0 +871 93 training.label_smoothing 0.06454342776885731 +871 94 model.embedding_dim 2.0 +871 94 model.scoring_fct_norm 1.0 +871 94 regularizer.weight 0.02251539826135197 +871 94 optimizer.lr 0.0384582109596325 +871 94 training.batch_size 1.0 +871 94 training.label_smoothing 0.01812113503688827 +871 95 model.embedding_dim 2.0 +871 95 model.scoring_fct_norm 2.0 +871 95 regularizer.weight 0.014208679916636949 +871 95 optimizer.lr 0.0013549805684367087 +871 95 training.batch_size 0.0 +871 95 training.label_smoothing 0.039480159802982896 +871 96 model.embedding_dim 2.0 +871 96 model.scoring_fct_norm 1.0 +871 96 regularizer.weight 0.08659319602699522 +871 96 optimizer.lr 0.03390514008014335 +871 96 training.batch_size 1.0 +871 96 training.label_smoothing 0.7019692194173582 +871 97 model.embedding_dim 2.0 +871 97 model.scoring_fct_norm 2.0 +871 97 regularizer.weight 0.08110330725008923 +871 97 optimizer.lr 0.05307067124703797 +871 97 training.batch_size 0.0 +871 97 training.label_smoothing 0.004424072521890557 +871 98 model.embedding_dim 1.0 +871 98 model.scoring_fct_norm 2.0 +871 98 regularizer.weight 0.020300560434176735 +871 98 optimizer.lr 0.003766447791905452 +871 98 training.batch_size 0.0 +871 98 training.label_smoothing 0.003928318301001086 +871 99 model.embedding_dim 0.0 +871 99 model.scoring_fct_norm 1.0 +871 99 regularizer.weight 0.010759526448411069 +871 99 optimizer.lr 0.011431145808014999 +871 99 training.batch_size 0.0 +871 99 training.label_smoothing 0.0035607146328926983 +871 100 model.embedding_dim 1.0 +871 100 model.scoring_fct_norm 1.0 +871 100 regularizer.weight 0.014259592996060302 +871 100 optimizer.lr 0.02191348335088289 +871 100 training.batch_size 2.0 +871 100 training.label_smoothing 0.007037517673438835 +871 1 dataset """kinships""" +871 1 model """transh""" +871 1 loss """bceaftersigmoid""" +871 1 regularizer """transh""" +871 1 optimizer """adam""" +871 1 training_loop """lcwa""" +871 1 evaluator """rankbased""" +871 2 dataset """kinships""" +871 2 model """transh""" +871 2 loss """bceaftersigmoid""" +871 2 regularizer """transh""" +871 2 optimizer """adam""" +871 2 training_loop """lcwa""" +871 2 evaluator """rankbased""" +871 3 dataset """kinships""" +871 3 model """transh""" +871 3 loss """bceaftersigmoid""" +871 3 regularizer """transh""" +871 3 optimizer """adam""" +871 3 training_loop """lcwa""" +871 3 evaluator """rankbased""" +871 4 dataset """kinships""" +871 4 model """transh""" +871 4 loss """bceaftersigmoid""" +871 4 regularizer """transh""" +871 4 optimizer """adam""" +871 4 training_loop """lcwa""" +871 4 evaluator """rankbased""" +871 5 dataset """kinships""" +871 5 model """transh""" +871 5 loss """bceaftersigmoid""" +871 5 regularizer """transh""" +871 5 optimizer """adam""" +871 5 training_loop """lcwa""" +871 5 evaluator """rankbased""" +871 6 dataset """kinships""" +871 6 model """transh""" +871 6 loss """bceaftersigmoid""" +871 6 regularizer """transh""" +871 6 optimizer """adam""" +871 6 training_loop """lcwa""" +871 6 evaluator """rankbased""" +871 7 dataset """kinships""" +871 7 model """transh""" +871 7 loss """bceaftersigmoid""" +871 7 regularizer """transh""" +871 7 optimizer """adam""" +871 7 training_loop """lcwa""" +871 7 evaluator """rankbased""" +871 8 dataset """kinships""" +871 8 model """transh""" +871 8 loss """bceaftersigmoid""" +871 8 regularizer """transh""" +871 8 optimizer """adam""" +871 8 training_loop """lcwa""" +871 8 evaluator """rankbased""" +871 9 dataset """kinships""" +871 9 model """transh""" +871 9 loss """bceaftersigmoid""" +871 9 regularizer """transh""" +871 9 optimizer """adam""" +871 9 training_loop """lcwa""" +871 9 evaluator """rankbased""" +871 10 dataset """kinships""" +871 10 model """transh""" +871 10 loss """bceaftersigmoid""" +871 10 regularizer """transh""" +871 10 optimizer """adam""" +871 10 training_loop """lcwa""" +871 10 evaluator """rankbased""" +871 11 dataset """kinships""" +871 11 model """transh""" +871 11 loss """bceaftersigmoid""" +871 11 regularizer """transh""" +871 11 optimizer """adam""" +871 11 training_loop """lcwa""" +871 11 evaluator """rankbased""" +871 12 dataset """kinships""" +871 12 model """transh""" +871 12 loss """bceaftersigmoid""" +871 12 regularizer """transh""" +871 12 optimizer """adam""" +871 12 training_loop """lcwa""" +871 12 evaluator """rankbased""" +871 13 dataset """kinships""" +871 13 model """transh""" +871 13 loss """bceaftersigmoid""" +871 13 regularizer """transh""" +871 13 optimizer """adam""" +871 13 training_loop """lcwa""" +871 13 evaluator """rankbased""" +871 14 dataset """kinships""" +871 14 model """transh""" +871 14 loss """bceaftersigmoid""" +871 14 regularizer """transh""" +871 14 optimizer """adam""" +871 14 training_loop """lcwa""" +871 14 evaluator """rankbased""" +871 15 dataset """kinships""" +871 15 model """transh""" +871 15 loss """bceaftersigmoid""" +871 15 regularizer """transh""" +871 15 optimizer """adam""" +871 15 training_loop """lcwa""" +871 15 evaluator """rankbased""" +871 16 dataset """kinships""" +871 16 model """transh""" +871 16 loss """bceaftersigmoid""" +871 16 regularizer """transh""" +871 16 optimizer """adam""" +871 16 training_loop """lcwa""" +871 16 evaluator """rankbased""" +871 17 dataset """kinships""" +871 17 model """transh""" +871 17 loss """bceaftersigmoid""" +871 17 regularizer """transh""" +871 17 optimizer """adam""" +871 17 training_loop """lcwa""" +871 17 evaluator """rankbased""" +871 18 dataset """kinships""" +871 18 model """transh""" +871 18 loss """bceaftersigmoid""" +871 18 regularizer """transh""" +871 18 optimizer """adam""" +871 18 training_loop """lcwa""" +871 18 evaluator """rankbased""" +871 19 dataset """kinships""" +871 19 model """transh""" +871 19 loss """bceaftersigmoid""" +871 19 regularizer """transh""" +871 19 optimizer """adam""" +871 19 training_loop """lcwa""" +871 19 evaluator """rankbased""" +871 20 dataset """kinships""" +871 20 model """transh""" +871 20 loss """bceaftersigmoid""" +871 20 regularizer """transh""" +871 20 optimizer """adam""" +871 20 training_loop """lcwa""" +871 20 evaluator """rankbased""" +871 21 dataset """kinships""" +871 21 model """transh""" +871 21 loss """bceaftersigmoid""" +871 21 regularizer """transh""" +871 21 optimizer """adam""" +871 21 training_loop """lcwa""" +871 21 evaluator """rankbased""" +871 22 dataset """kinships""" +871 22 model """transh""" +871 22 loss """bceaftersigmoid""" +871 22 regularizer """transh""" +871 22 optimizer """adam""" +871 22 training_loop """lcwa""" +871 22 evaluator """rankbased""" +871 23 dataset """kinships""" +871 23 model """transh""" +871 23 loss """bceaftersigmoid""" +871 23 regularizer """transh""" +871 23 optimizer """adam""" +871 23 training_loop """lcwa""" +871 23 evaluator """rankbased""" +871 24 dataset """kinships""" +871 24 model """transh""" +871 24 loss """bceaftersigmoid""" +871 24 regularizer """transh""" +871 24 optimizer """adam""" +871 24 training_loop """lcwa""" +871 24 evaluator """rankbased""" +871 25 dataset """kinships""" +871 25 model """transh""" +871 25 loss """bceaftersigmoid""" +871 25 regularizer """transh""" +871 25 optimizer """adam""" +871 25 training_loop """lcwa""" +871 25 evaluator """rankbased""" +871 26 dataset """kinships""" +871 26 model """transh""" +871 26 loss """bceaftersigmoid""" +871 26 regularizer """transh""" +871 26 optimizer """adam""" +871 26 training_loop """lcwa""" +871 26 evaluator """rankbased""" +871 27 dataset """kinships""" +871 27 model """transh""" +871 27 loss """bceaftersigmoid""" +871 27 regularizer """transh""" +871 27 optimizer """adam""" +871 27 training_loop """lcwa""" +871 27 evaluator """rankbased""" +871 28 dataset """kinships""" +871 28 model """transh""" +871 28 loss """bceaftersigmoid""" +871 28 regularizer """transh""" +871 28 optimizer """adam""" +871 28 training_loop """lcwa""" +871 28 evaluator """rankbased""" +871 29 dataset """kinships""" +871 29 model """transh""" +871 29 loss """bceaftersigmoid""" +871 29 regularizer """transh""" +871 29 optimizer """adam""" +871 29 training_loop """lcwa""" +871 29 evaluator """rankbased""" +871 30 dataset """kinships""" +871 30 model """transh""" +871 30 loss """bceaftersigmoid""" +871 30 regularizer """transh""" +871 30 optimizer """adam""" +871 30 training_loop """lcwa""" +871 30 evaluator """rankbased""" +871 31 dataset """kinships""" +871 31 model """transh""" +871 31 loss """bceaftersigmoid""" +871 31 regularizer """transh""" +871 31 optimizer """adam""" +871 31 training_loop """lcwa""" +871 31 evaluator """rankbased""" +871 32 dataset """kinships""" +871 32 model """transh""" +871 32 loss """bceaftersigmoid""" +871 32 regularizer """transh""" +871 32 optimizer """adam""" +871 32 training_loop """lcwa""" +871 32 evaluator """rankbased""" +871 33 dataset """kinships""" +871 33 model """transh""" +871 33 loss """bceaftersigmoid""" +871 33 regularizer """transh""" +871 33 optimizer """adam""" +871 33 training_loop """lcwa""" +871 33 evaluator """rankbased""" +871 34 dataset """kinships""" +871 34 model """transh""" +871 34 loss """bceaftersigmoid""" +871 34 regularizer """transh""" +871 34 optimizer """adam""" +871 34 training_loop """lcwa""" +871 34 evaluator """rankbased""" +871 35 dataset """kinships""" +871 35 model """transh""" +871 35 loss """bceaftersigmoid""" +871 35 regularizer """transh""" +871 35 optimizer """adam""" +871 35 training_loop """lcwa""" +871 35 evaluator """rankbased""" +871 36 dataset """kinships""" +871 36 model """transh""" +871 36 loss """bceaftersigmoid""" +871 36 regularizer """transh""" +871 36 optimizer """adam""" +871 36 training_loop """lcwa""" +871 36 evaluator """rankbased""" +871 37 dataset """kinships""" +871 37 model """transh""" +871 37 loss """bceaftersigmoid""" +871 37 regularizer """transh""" +871 37 optimizer """adam""" +871 37 training_loop """lcwa""" +871 37 evaluator """rankbased""" +871 38 dataset """kinships""" +871 38 model """transh""" +871 38 loss """bceaftersigmoid""" +871 38 regularizer """transh""" +871 38 optimizer """adam""" +871 38 training_loop """lcwa""" +871 38 evaluator """rankbased""" +871 39 dataset """kinships""" +871 39 model """transh""" +871 39 loss """bceaftersigmoid""" +871 39 regularizer """transh""" +871 39 optimizer """adam""" +871 39 training_loop """lcwa""" +871 39 evaluator """rankbased""" +871 40 dataset """kinships""" +871 40 model """transh""" +871 40 loss """bceaftersigmoid""" +871 40 regularizer """transh""" +871 40 optimizer """adam""" +871 40 training_loop """lcwa""" +871 40 evaluator """rankbased""" +871 41 dataset """kinships""" +871 41 model """transh""" +871 41 loss """bceaftersigmoid""" +871 41 regularizer """transh""" +871 41 optimizer """adam""" +871 41 training_loop """lcwa""" +871 41 evaluator """rankbased""" +871 42 dataset """kinships""" +871 42 model """transh""" +871 42 loss """bceaftersigmoid""" +871 42 regularizer """transh""" +871 42 optimizer """adam""" +871 42 training_loop """lcwa""" +871 42 evaluator """rankbased""" +871 43 dataset """kinships""" +871 43 model """transh""" +871 43 loss """bceaftersigmoid""" +871 43 regularizer """transh""" +871 43 optimizer """adam""" +871 43 training_loop """lcwa""" +871 43 evaluator """rankbased""" +871 44 dataset """kinships""" +871 44 model """transh""" +871 44 loss """bceaftersigmoid""" +871 44 regularizer """transh""" +871 44 optimizer """adam""" +871 44 training_loop """lcwa""" +871 44 evaluator """rankbased""" +871 45 dataset """kinships""" +871 45 model """transh""" +871 45 loss """bceaftersigmoid""" +871 45 regularizer """transh""" +871 45 optimizer """adam""" +871 45 training_loop """lcwa""" +871 45 evaluator """rankbased""" +871 46 dataset """kinships""" +871 46 model """transh""" +871 46 loss """bceaftersigmoid""" +871 46 regularizer """transh""" +871 46 optimizer """adam""" +871 46 training_loop """lcwa""" +871 46 evaluator """rankbased""" +871 47 dataset """kinships""" +871 47 model """transh""" +871 47 loss """bceaftersigmoid""" +871 47 regularizer """transh""" +871 47 optimizer """adam""" +871 47 training_loop """lcwa""" +871 47 evaluator """rankbased""" +871 48 dataset """kinships""" +871 48 model """transh""" +871 48 loss """bceaftersigmoid""" +871 48 regularizer """transh""" +871 48 optimizer """adam""" +871 48 training_loop """lcwa""" +871 48 evaluator """rankbased""" +871 49 dataset """kinships""" +871 49 model """transh""" +871 49 loss """bceaftersigmoid""" +871 49 regularizer """transh""" +871 49 optimizer """adam""" +871 49 training_loop """lcwa""" +871 49 evaluator """rankbased""" +871 50 dataset """kinships""" +871 50 model """transh""" +871 50 loss """bceaftersigmoid""" +871 50 regularizer """transh""" +871 50 optimizer """adam""" +871 50 training_loop """lcwa""" +871 50 evaluator """rankbased""" +871 51 dataset """kinships""" +871 51 model """transh""" +871 51 loss """bceaftersigmoid""" +871 51 regularizer """transh""" +871 51 optimizer """adam""" +871 51 training_loop """lcwa""" +871 51 evaluator """rankbased""" +871 52 dataset """kinships""" +871 52 model """transh""" +871 52 loss """bceaftersigmoid""" +871 52 regularizer """transh""" +871 52 optimizer """adam""" +871 52 training_loop """lcwa""" +871 52 evaluator """rankbased""" +871 53 dataset """kinships""" +871 53 model """transh""" +871 53 loss """bceaftersigmoid""" +871 53 regularizer """transh""" +871 53 optimizer """adam""" +871 53 training_loop """lcwa""" +871 53 evaluator """rankbased""" +871 54 dataset """kinships""" +871 54 model """transh""" +871 54 loss """bceaftersigmoid""" +871 54 regularizer """transh""" +871 54 optimizer """adam""" +871 54 training_loop """lcwa""" +871 54 evaluator """rankbased""" +871 55 dataset """kinships""" +871 55 model """transh""" +871 55 loss """bceaftersigmoid""" +871 55 regularizer """transh""" +871 55 optimizer """adam""" +871 55 training_loop """lcwa""" +871 55 evaluator """rankbased""" +871 56 dataset """kinships""" +871 56 model """transh""" +871 56 loss """bceaftersigmoid""" +871 56 regularizer """transh""" +871 56 optimizer """adam""" +871 56 training_loop """lcwa""" +871 56 evaluator """rankbased""" +871 57 dataset """kinships""" +871 57 model """transh""" +871 57 loss """bceaftersigmoid""" +871 57 regularizer """transh""" +871 57 optimizer """adam""" +871 57 training_loop """lcwa""" +871 57 evaluator """rankbased""" +871 58 dataset """kinships""" +871 58 model """transh""" +871 58 loss """bceaftersigmoid""" +871 58 regularizer """transh""" +871 58 optimizer """adam""" +871 58 training_loop """lcwa""" +871 58 evaluator """rankbased""" +871 59 dataset """kinships""" +871 59 model """transh""" +871 59 loss """bceaftersigmoid""" +871 59 regularizer """transh""" +871 59 optimizer """adam""" +871 59 training_loop """lcwa""" +871 59 evaluator """rankbased""" +871 60 dataset """kinships""" +871 60 model """transh""" +871 60 loss """bceaftersigmoid""" +871 60 regularizer """transh""" +871 60 optimizer """adam""" +871 60 training_loop """lcwa""" +871 60 evaluator """rankbased""" +871 61 dataset """kinships""" +871 61 model """transh""" +871 61 loss """bceaftersigmoid""" +871 61 regularizer """transh""" +871 61 optimizer """adam""" +871 61 training_loop """lcwa""" +871 61 evaluator """rankbased""" +871 62 dataset """kinships""" +871 62 model """transh""" +871 62 loss """bceaftersigmoid""" +871 62 regularizer """transh""" +871 62 optimizer """adam""" +871 62 training_loop """lcwa""" +871 62 evaluator """rankbased""" +871 63 dataset """kinships""" +871 63 model """transh""" +871 63 loss """bceaftersigmoid""" +871 63 regularizer """transh""" +871 63 optimizer """adam""" +871 63 training_loop """lcwa""" +871 63 evaluator """rankbased""" +871 64 dataset """kinships""" +871 64 model """transh""" +871 64 loss """bceaftersigmoid""" +871 64 regularizer """transh""" +871 64 optimizer """adam""" +871 64 training_loop """lcwa""" +871 64 evaluator """rankbased""" +871 65 dataset """kinships""" +871 65 model """transh""" +871 65 loss """bceaftersigmoid""" +871 65 regularizer """transh""" +871 65 optimizer """adam""" +871 65 training_loop """lcwa""" +871 65 evaluator """rankbased""" +871 66 dataset """kinships""" +871 66 model """transh""" +871 66 loss """bceaftersigmoid""" +871 66 regularizer """transh""" +871 66 optimizer """adam""" +871 66 training_loop """lcwa""" +871 66 evaluator """rankbased""" +871 67 dataset """kinships""" +871 67 model """transh""" +871 67 loss """bceaftersigmoid""" +871 67 regularizer """transh""" +871 67 optimizer """adam""" +871 67 training_loop """lcwa""" +871 67 evaluator """rankbased""" +871 68 dataset """kinships""" +871 68 model """transh""" +871 68 loss """bceaftersigmoid""" +871 68 regularizer """transh""" +871 68 optimizer """adam""" +871 68 training_loop """lcwa""" +871 68 evaluator """rankbased""" +871 69 dataset """kinships""" +871 69 model """transh""" +871 69 loss """bceaftersigmoid""" +871 69 regularizer """transh""" +871 69 optimizer """adam""" +871 69 training_loop """lcwa""" +871 69 evaluator """rankbased""" +871 70 dataset """kinships""" +871 70 model """transh""" +871 70 loss """bceaftersigmoid""" +871 70 regularizer """transh""" +871 70 optimizer """adam""" +871 70 training_loop """lcwa""" +871 70 evaluator """rankbased""" +871 71 dataset """kinships""" +871 71 model """transh""" +871 71 loss """bceaftersigmoid""" +871 71 regularizer """transh""" +871 71 optimizer """adam""" +871 71 training_loop """lcwa""" +871 71 evaluator """rankbased""" +871 72 dataset """kinships""" +871 72 model """transh""" +871 72 loss """bceaftersigmoid""" +871 72 regularizer """transh""" +871 72 optimizer """adam""" +871 72 training_loop """lcwa""" +871 72 evaluator """rankbased""" +871 73 dataset """kinships""" +871 73 model """transh""" +871 73 loss """bceaftersigmoid""" +871 73 regularizer """transh""" +871 73 optimizer """adam""" +871 73 training_loop """lcwa""" +871 73 evaluator """rankbased""" +871 74 dataset """kinships""" +871 74 model """transh""" +871 74 loss """bceaftersigmoid""" +871 74 regularizer """transh""" +871 74 optimizer """adam""" +871 74 training_loop """lcwa""" +871 74 evaluator """rankbased""" +871 75 dataset """kinships""" +871 75 model """transh""" +871 75 loss """bceaftersigmoid""" +871 75 regularizer """transh""" +871 75 optimizer """adam""" +871 75 training_loop """lcwa""" +871 75 evaluator """rankbased""" +871 76 dataset """kinships""" +871 76 model """transh""" +871 76 loss """bceaftersigmoid""" +871 76 regularizer """transh""" +871 76 optimizer """adam""" +871 76 training_loop """lcwa""" +871 76 evaluator """rankbased""" +871 77 dataset """kinships""" +871 77 model """transh""" +871 77 loss """bceaftersigmoid""" +871 77 regularizer """transh""" +871 77 optimizer """adam""" +871 77 training_loop """lcwa""" +871 77 evaluator """rankbased""" +871 78 dataset """kinships""" +871 78 model """transh""" +871 78 loss """bceaftersigmoid""" +871 78 regularizer """transh""" +871 78 optimizer """adam""" +871 78 training_loop """lcwa""" +871 78 evaluator """rankbased""" +871 79 dataset """kinships""" +871 79 model """transh""" +871 79 loss """bceaftersigmoid""" +871 79 regularizer """transh""" +871 79 optimizer """adam""" +871 79 training_loop """lcwa""" +871 79 evaluator """rankbased""" +871 80 dataset """kinships""" +871 80 model """transh""" +871 80 loss """bceaftersigmoid""" +871 80 regularizer """transh""" +871 80 optimizer """adam""" +871 80 training_loop """lcwa""" +871 80 evaluator """rankbased""" +871 81 dataset """kinships""" +871 81 model """transh""" +871 81 loss """bceaftersigmoid""" +871 81 regularizer """transh""" +871 81 optimizer """adam""" +871 81 training_loop """lcwa""" +871 81 evaluator """rankbased""" +871 82 dataset """kinships""" +871 82 model """transh""" +871 82 loss """bceaftersigmoid""" +871 82 regularizer """transh""" +871 82 optimizer """adam""" +871 82 training_loop """lcwa""" +871 82 evaluator """rankbased""" +871 83 dataset """kinships""" +871 83 model """transh""" +871 83 loss """bceaftersigmoid""" +871 83 regularizer """transh""" +871 83 optimizer """adam""" +871 83 training_loop """lcwa""" +871 83 evaluator """rankbased""" +871 84 dataset """kinships""" +871 84 model """transh""" +871 84 loss """bceaftersigmoid""" +871 84 regularizer """transh""" +871 84 optimizer """adam""" +871 84 training_loop """lcwa""" +871 84 evaluator """rankbased""" +871 85 dataset """kinships""" +871 85 model """transh""" +871 85 loss """bceaftersigmoid""" +871 85 regularizer """transh""" +871 85 optimizer """adam""" +871 85 training_loop """lcwa""" +871 85 evaluator """rankbased""" +871 86 dataset """kinships""" +871 86 model """transh""" +871 86 loss """bceaftersigmoid""" +871 86 regularizer """transh""" +871 86 optimizer """adam""" +871 86 training_loop """lcwa""" +871 86 evaluator """rankbased""" +871 87 dataset """kinships""" +871 87 model """transh""" +871 87 loss """bceaftersigmoid""" +871 87 regularizer """transh""" +871 87 optimizer """adam""" +871 87 training_loop """lcwa""" +871 87 evaluator """rankbased""" +871 88 dataset """kinships""" +871 88 model """transh""" +871 88 loss """bceaftersigmoid""" +871 88 regularizer """transh""" +871 88 optimizer """adam""" +871 88 training_loop """lcwa""" +871 88 evaluator """rankbased""" +871 89 dataset """kinships""" +871 89 model """transh""" +871 89 loss """bceaftersigmoid""" +871 89 regularizer """transh""" +871 89 optimizer """adam""" +871 89 training_loop """lcwa""" +871 89 evaluator """rankbased""" +871 90 dataset """kinships""" +871 90 model """transh""" +871 90 loss """bceaftersigmoid""" +871 90 regularizer """transh""" +871 90 optimizer """adam""" +871 90 training_loop """lcwa""" +871 90 evaluator """rankbased""" +871 91 dataset """kinships""" +871 91 model """transh""" +871 91 loss """bceaftersigmoid""" +871 91 regularizer """transh""" +871 91 optimizer """adam""" +871 91 training_loop """lcwa""" +871 91 evaluator """rankbased""" +871 92 dataset """kinships""" +871 92 model """transh""" +871 92 loss """bceaftersigmoid""" +871 92 regularizer """transh""" +871 92 optimizer """adam""" +871 92 training_loop """lcwa""" +871 92 evaluator """rankbased""" +871 93 dataset """kinships""" +871 93 model """transh""" +871 93 loss """bceaftersigmoid""" +871 93 regularizer """transh""" +871 93 optimizer """adam""" +871 93 training_loop """lcwa""" +871 93 evaluator """rankbased""" +871 94 dataset """kinships""" +871 94 model """transh""" +871 94 loss """bceaftersigmoid""" +871 94 regularizer """transh""" +871 94 optimizer """adam""" +871 94 training_loop """lcwa""" +871 94 evaluator """rankbased""" +871 95 dataset """kinships""" +871 95 model """transh""" +871 95 loss """bceaftersigmoid""" +871 95 regularizer """transh""" +871 95 optimizer """adam""" +871 95 training_loop """lcwa""" +871 95 evaluator """rankbased""" +871 96 dataset """kinships""" +871 96 model """transh""" +871 96 loss """bceaftersigmoid""" +871 96 regularizer """transh""" +871 96 optimizer """adam""" +871 96 training_loop """lcwa""" +871 96 evaluator """rankbased""" +871 97 dataset """kinships""" +871 97 model """transh""" +871 97 loss """bceaftersigmoid""" +871 97 regularizer """transh""" +871 97 optimizer """adam""" +871 97 training_loop """lcwa""" +871 97 evaluator """rankbased""" +871 98 dataset """kinships""" +871 98 model """transh""" +871 98 loss """bceaftersigmoid""" +871 98 regularizer """transh""" +871 98 optimizer """adam""" +871 98 training_loop """lcwa""" +871 98 evaluator """rankbased""" +871 99 dataset """kinships""" +871 99 model """transh""" +871 99 loss """bceaftersigmoid""" +871 99 regularizer """transh""" +871 99 optimizer """adam""" +871 99 training_loop """lcwa""" +871 99 evaluator """rankbased""" +871 100 dataset """kinships""" +871 100 model """transh""" +871 100 loss """bceaftersigmoid""" +871 100 regularizer """transh""" +871 100 optimizer """adam""" +871 100 training_loop """lcwa""" +871 100 evaluator """rankbased""" +872 1 model.embedding_dim 2.0 +872 1 model.scoring_fct_norm 2.0 +872 1 regularizer.weight 0.08344857184591224 +872 1 optimizer.lr 0.05974274321879497 +872 1 training.batch_size 1.0 +872 1 training.label_smoothing 0.02762834806784398 +872 2 model.embedding_dim 0.0 +872 2 model.scoring_fct_norm 1.0 +872 2 regularizer.weight 0.26751729078085834 +872 2 optimizer.lr 0.002121242782509806 +872 2 training.batch_size 0.0 +872 2 training.label_smoothing 0.018352725563873074 +872 3 model.embedding_dim 2.0 +872 3 model.scoring_fct_norm 1.0 +872 3 regularizer.weight 0.0152443907218205 +872 3 optimizer.lr 0.0043252689360768525 +872 3 training.batch_size 2.0 +872 3 training.label_smoothing 0.05357045556155822 +872 4 model.embedding_dim 1.0 +872 4 model.scoring_fct_norm 2.0 +872 4 regularizer.weight 0.02067658284947952 +872 4 optimizer.lr 0.025390565711448893 +872 4 training.batch_size 0.0 +872 4 training.label_smoothing 0.0015824795644972338 +872 5 model.embedding_dim 2.0 +872 5 model.scoring_fct_norm 2.0 +872 5 regularizer.weight 0.1719193790093002 +872 5 optimizer.lr 0.02311157316542063 +872 5 training.batch_size 0.0 +872 5 training.label_smoothing 0.007443492318532696 +872 6 model.embedding_dim 2.0 +872 6 model.scoring_fct_norm 1.0 +872 6 regularizer.weight 0.04516738014943769 +872 6 optimizer.lr 0.01069661490841419 +872 6 training.batch_size 0.0 +872 6 training.label_smoothing 0.7271693391131013 +872 7 model.embedding_dim 0.0 +872 7 model.scoring_fct_norm 2.0 +872 7 regularizer.weight 0.03632369216575881 +872 7 optimizer.lr 0.0017590498373301435 +872 7 training.batch_size 1.0 +872 7 training.label_smoothing 0.00354856276252909 +872 8 model.embedding_dim 0.0 +872 8 model.scoring_fct_norm 2.0 +872 8 regularizer.weight 0.024347717341023416 +872 8 optimizer.lr 0.0015849530993104059 +872 8 training.batch_size 1.0 +872 8 training.label_smoothing 0.3225360540388397 +872 9 model.embedding_dim 1.0 +872 9 model.scoring_fct_norm 2.0 +872 9 regularizer.weight 0.019608934959327206 +872 9 optimizer.lr 0.0203051220349168 +872 9 training.batch_size 0.0 +872 9 training.label_smoothing 0.001036252511455953 +872 10 model.embedding_dim 1.0 +872 10 model.scoring_fct_norm 2.0 +872 10 regularizer.weight 0.019584812171319004 +872 10 optimizer.lr 0.025551981555068472 +872 10 training.batch_size 0.0 +872 10 training.label_smoothing 0.10321768791758043 +872 11 model.embedding_dim 1.0 +872 11 model.scoring_fct_norm 2.0 +872 11 regularizer.weight 0.03180332521926308 +872 11 optimizer.lr 0.02778768202925843 +872 11 training.batch_size 0.0 +872 11 training.label_smoothing 0.0036457329561450295 +872 12 model.embedding_dim 0.0 +872 12 model.scoring_fct_norm 1.0 +872 12 regularizer.weight 0.20800106173331473 +872 12 optimizer.lr 0.03580009632197542 +872 12 training.batch_size 1.0 +872 12 training.label_smoothing 0.07866927394298745 +872 13 model.embedding_dim 2.0 +872 13 model.scoring_fct_norm 1.0 +872 13 regularizer.weight 0.050657640380254804 +872 13 optimizer.lr 0.00296968638138223 +872 13 training.batch_size 0.0 +872 13 training.label_smoothing 0.22690952921277366 +872 14 model.embedding_dim 1.0 +872 14 model.scoring_fct_norm 2.0 +872 14 regularizer.weight 0.19051508822677235 +872 14 optimizer.lr 0.0012772262857092106 +872 14 training.batch_size 0.0 +872 14 training.label_smoothing 0.6857377563737999 +872 15 model.embedding_dim 1.0 +872 15 model.scoring_fct_norm 1.0 +872 15 regularizer.weight 0.09778418425781486 +872 15 optimizer.lr 0.05457310076937663 +872 15 training.batch_size 2.0 +872 15 training.label_smoothing 0.06733237250554079 +872 16 model.embedding_dim 0.0 +872 16 model.scoring_fct_norm 1.0 +872 16 regularizer.weight 0.029665440994255173 +872 16 optimizer.lr 0.014731827529788744 +872 16 training.batch_size 2.0 +872 16 training.label_smoothing 0.003738016965794302 +872 17 model.embedding_dim 0.0 +872 17 model.scoring_fct_norm 2.0 +872 17 regularizer.weight 0.049347678392785595 +872 17 optimizer.lr 0.0016802495073725242 +872 17 training.batch_size 1.0 +872 17 training.label_smoothing 0.01684028185424749 +872 18 model.embedding_dim 1.0 +872 18 model.scoring_fct_norm 2.0 +872 18 regularizer.weight 0.02299953446039714 +872 18 optimizer.lr 0.011801295672370577 +872 18 training.batch_size 1.0 +872 18 training.label_smoothing 0.004387855189720944 +872 19 model.embedding_dim 2.0 +872 19 model.scoring_fct_norm 1.0 +872 19 regularizer.weight 0.200424194671676 +872 19 optimizer.lr 0.019198355704167367 +872 19 training.batch_size 0.0 +872 19 training.label_smoothing 0.025858622549616815 +872 20 model.embedding_dim 1.0 +872 20 model.scoring_fct_norm 2.0 +872 20 regularizer.weight 0.03878407969674955 +872 20 optimizer.lr 0.015784209348328092 +872 20 training.batch_size 0.0 +872 20 training.label_smoothing 0.010599184592006828 +872 21 model.embedding_dim 0.0 +872 21 model.scoring_fct_norm 2.0 +872 21 regularizer.weight 0.14621831740202942 +872 21 optimizer.lr 0.005771795550318506 +872 21 training.batch_size 1.0 +872 21 training.label_smoothing 0.8205693038273125 +872 22 model.embedding_dim 2.0 +872 22 model.scoring_fct_norm 1.0 +872 22 regularizer.weight 0.020271990185347477 +872 22 optimizer.lr 0.003443827396017417 +872 22 training.batch_size 2.0 +872 22 training.label_smoothing 0.018899378742573514 +872 23 model.embedding_dim 2.0 +872 23 model.scoring_fct_norm 2.0 +872 23 regularizer.weight 0.02671393615883798 +872 23 optimizer.lr 0.038229807731095355 +872 23 training.batch_size 2.0 +872 23 training.label_smoothing 0.014918604720331468 +872 24 model.embedding_dim 2.0 +872 24 model.scoring_fct_norm 2.0 +872 24 regularizer.weight 0.010048407048831939 +872 24 optimizer.lr 0.09000710393307547 +872 24 training.batch_size 0.0 +872 24 training.label_smoothing 0.004339360528585232 +872 25 model.embedding_dim 0.0 +872 25 model.scoring_fct_norm 1.0 +872 25 regularizer.weight 0.024949674197779173 +872 25 optimizer.lr 0.00758409565092215 +872 25 training.batch_size 1.0 +872 25 training.label_smoothing 0.0030983636777516576 +872 26 model.embedding_dim 0.0 +872 26 model.scoring_fct_norm 2.0 +872 26 regularizer.weight 0.09719593977138674 +872 26 optimizer.lr 0.005052234128318494 +872 26 training.batch_size 1.0 +872 26 training.label_smoothing 0.28000313381142505 +872 27 model.embedding_dim 2.0 +872 27 model.scoring_fct_norm 2.0 +872 27 regularizer.weight 0.20817552287558075 +872 27 optimizer.lr 0.07365971871694613 +872 27 training.batch_size 0.0 +872 27 training.label_smoothing 0.06587270540726066 +872 28 model.embedding_dim 2.0 +872 28 model.scoring_fct_norm 1.0 +872 28 regularizer.weight 0.1413613075075203 +872 28 optimizer.lr 0.004079676115071601 +872 28 training.batch_size 0.0 +872 28 training.label_smoothing 0.09235393895495872 +872 29 model.embedding_dim 2.0 +872 29 model.scoring_fct_norm 1.0 +872 29 regularizer.weight 0.07046393318742572 +872 29 optimizer.lr 0.004094005286765502 +872 29 training.batch_size 2.0 +872 29 training.label_smoothing 0.012385095059876823 +872 30 model.embedding_dim 1.0 +872 30 model.scoring_fct_norm 2.0 +872 30 regularizer.weight 0.055459537612132424 +872 30 optimizer.lr 0.012947784485933918 +872 30 training.batch_size 2.0 +872 30 training.label_smoothing 0.2616700532021776 +872 31 model.embedding_dim 1.0 +872 31 model.scoring_fct_norm 1.0 +872 31 regularizer.weight 0.05583311980966801 +872 31 optimizer.lr 0.0482600537143391 +872 31 training.batch_size 1.0 +872 31 training.label_smoothing 0.004080713716657149 +872 32 model.embedding_dim 2.0 +872 32 model.scoring_fct_norm 2.0 +872 32 regularizer.weight 0.29971242159111255 +872 32 optimizer.lr 0.002079896602298195 +872 32 training.batch_size 2.0 +872 32 training.label_smoothing 0.008517462201189461 +872 33 model.embedding_dim 2.0 +872 33 model.scoring_fct_norm 1.0 +872 33 regularizer.weight 0.05915195396256172 +872 33 optimizer.lr 0.008779733751528797 +872 33 training.batch_size 1.0 +872 33 training.label_smoothing 0.0778209670494637 +872 34 model.embedding_dim 2.0 +872 34 model.scoring_fct_norm 2.0 +872 34 regularizer.weight 0.20613834350651164 +872 34 optimizer.lr 0.0014922881126664782 +872 34 training.batch_size 2.0 +872 34 training.label_smoothing 0.07413392563538534 +872 35 model.embedding_dim 1.0 +872 35 model.scoring_fct_norm 2.0 +872 35 regularizer.weight 0.16754623362354013 +872 35 optimizer.lr 0.053701568911928066 +872 35 training.batch_size 1.0 +872 35 training.label_smoothing 0.0010371661475475124 +872 36 model.embedding_dim 0.0 +872 36 model.scoring_fct_norm 1.0 +872 36 regularizer.weight 0.018080848268204577 +872 36 optimizer.lr 0.0016419545320497558 +872 36 training.batch_size 0.0 +872 36 training.label_smoothing 0.05574171356872344 +872 37 model.embedding_dim 1.0 +872 37 model.scoring_fct_norm 2.0 +872 37 regularizer.weight 0.018388874451353173 +872 37 optimizer.lr 0.011543900922849999 +872 37 training.batch_size 0.0 +872 37 training.label_smoothing 0.9943752281743296 +872 38 model.embedding_dim 0.0 +872 38 model.scoring_fct_norm 1.0 +872 38 regularizer.weight 0.03920514399773353 +872 38 optimizer.lr 0.005711245498586353 +872 38 training.batch_size 1.0 +872 38 training.label_smoothing 0.2088026851434016 +872 39 model.embedding_dim 2.0 +872 39 model.scoring_fct_norm 2.0 +872 39 regularizer.weight 0.04884323013918633 +872 39 optimizer.lr 0.022351423168631913 +872 39 training.batch_size 1.0 +872 39 training.label_smoothing 0.6429289689634301 +872 40 model.embedding_dim 0.0 +872 40 model.scoring_fct_norm 1.0 +872 40 regularizer.weight 0.06535260160169744 +872 40 optimizer.lr 0.0024386529618291913 +872 40 training.batch_size 2.0 +872 40 training.label_smoothing 0.02649095338353653 +872 41 model.embedding_dim 2.0 +872 41 model.scoring_fct_norm 1.0 +872 41 regularizer.weight 0.23387799310834878 +872 41 optimizer.lr 0.0028746001505516043 +872 41 training.batch_size 1.0 +872 41 training.label_smoothing 0.7176193532403424 +872 42 model.embedding_dim 2.0 +872 42 model.scoring_fct_norm 1.0 +872 42 regularizer.weight 0.17058739918580249 +872 42 optimizer.lr 0.00863661868175371 +872 42 training.batch_size 0.0 +872 42 training.label_smoothing 0.0015388332075132142 +872 43 model.embedding_dim 1.0 +872 43 model.scoring_fct_norm 1.0 +872 43 regularizer.weight 0.18031800687699992 +872 43 optimizer.lr 0.006703823778341941 +872 43 training.batch_size 0.0 +872 43 training.label_smoothing 0.0029507057462964275 +872 44 model.embedding_dim 2.0 +872 44 model.scoring_fct_norm 2.0 +872 44 regularizer.weight 0.02801060461009573 +872 44 optimizer.lr 0.0571773023676632 +872 44 training.batch_size 2.0 +872 44 training.label_smoothing 0.6225015959777583 +872 45 model.embedding_dim 1.0 +872 45 model.scoring_fct_norm 2.0 +872 45 regularizer.weight 0.06146693720956608 +872 45 optimizer.lr 0.08283225460854418 +872 45 training.batch_size 2.0 +872 45 training.label_smoothing 0.49312909799225335 +872 46 model.embedding_dim 0.0 +872 46 model.scoring_fct_norm 2.0 +872 46 regularizer.weight 0.012055014079536668 +872 46 optimizer.lr 0.0021158461194981013 +872 46 training.batch_size 1.0 +872 46 training.label_smoothing 0.9617945412950072 +872 47 model.embedding_dim 0.0 +872 47 model.scoring_fct_norm 2.0 +872 47 regularizer.weight 0.010112415459495582 +872 47 optimizer.lr 0.0020690539366059134 +872 47 training.batch_size 1.0 +872 47 training.label_smoothing 0.0109901350189291 +872 48 model.embedding_dim 0.0 +872 48 model.scoring_fct_norm 2.0 +872 48 regularizer.weight 0.036823534560530585 +872 48 optimizer.lr 0.030660155483400366 +872 48 training.batch_size 0.0 +872 48 training.label_smoothing 0.042779913189750315 +872 49 model.embedding_dim 1.0 +872 49 model.scoring_fct_norm 2.0 +872 49 regularizer.weight 0.27383322995263676 +872 49 optimizer.lr 0.0046687462068959636 +872 49 training.batch_size 2.0 +872 49 training.label_smoothing 0.05258402558024721 +872 50 model.embedding_dim 1.0 +872 50 model.scoring_fct_norm 1.0 +872 50 regularizer.weight 0.02909918014587603 +872 50 optimizer.lr 0.06444753388893416 +872 50 training.batch_size 1.0 +872 50 training.label_smoothing 0.04886694139837473 +872 51 model.embedding_dim 0.0 +872 51 model.scoring_fct_norm 1.0 +872 51 regularizer.weight 0.08058248132009704 +872 51 optimizer.lr 0.003004128740591837 +872 51 training.batch_size 2.0 +872 51 training.label_smoothing 0.007395260388861201 +872 52 model.embedding_dim 0.0 +872 52 model.scoring_fct_norm 2.0 +872 52 regularizer.weight 0.01881454542902263 +872 52 optimizer.lr 0.003685737692477775 +872 52 training.batch_size 0.0 +872 52 training.label_smoothing 0.0364644449759177 +872 53 model.embedding_dim 1.0 +872 53 model.scoring_fct_norm 2.0 +872 53 regularizer.weight 0.11929616073603588 +872 53 optimizer.lr 0.0011094174956735776 +872 53 training.batch_size 1.0 +872 53 training.label_smoothing 0.6953666865479243 +872 54 model.embedding_dim 1.0 +872 54 model.scoring_fct_norm 1.0 +872 54 regularizer.weight 0.030265930215864833 +872 54 optimizer.lr 0.006887728985265586 +872 54 training.batch_size 1.0 +872 54 training.label_smoothing 0.16938111168886594 +872 55 model.embedding_dim 2.0 +872 55 model.scoring_fct_norm 2.0 +872 55 regularizer.weight 0.09771684632528385 +872 55 optimizer.lr 0.0028779646139858086 +872 55 training.batch_size 1.0 +872 55 training.label_smoothing 0.043322006378976735 +872 56 model.embedding_dim 2.0 +872 56 model.scoring_fct_norm 1.0 +872 56 regularizer.weight 0.040745264266925915 +872 56 optimizer.lr 0.044811056975351586 +872 56 training.batch_size 1.0 +872 56 training.label_smoothing 0.004111036259933263 +872 57 model.embedding_dim 2.0 +872 57 model.scoring_fct_norm 2.0 +872 57 regularizer.weight 0.10367800556821322 +872 57 optimizer.lr 0.039426867249543064 +872 57 training.batch_size 1.0 +872 57 training.label_smoothing 0.0023749640156353487 +872 58 model.embedding_dim 2.0 +872 58 model.scoring_fct_norm 1.0 +872 58 regularizer.weight 0.2170269008230818 +872 58 optimizer.lr 0.08581160842161813 +872 58 training.batch_size 1.0 +872 58 training.label_smoothing 0.09328346369296148 +872 59 model.embedding_dim 2.0 +872 59 model.scoring_fct_norm 1.0 +872 59 regularizer.weight 0.06844100482471956 +872 59 optimizer.lr 0.004371457106259329 +872 59 training.batch_size 0.0 +872 59 training.label_smoothing 0.2387853276165412 +872 60 model.embedding_dim 2.0 +872 60 model.scoring_fct_norm 1.0 +872 60 regularizer.weight 0.01780770405273981 +872 60 optimizer.lr 0.03538080462623122 +872 60 training.batch_size 0.0 +872 60 training.label_smoothing 0.0016612691475413454 +872 61 model.embedding_dim 0.0 +872 61 model.scoring_fct_norm 2.0 +872 61 regularizer.weight 0.038550127005515994 +872 61 optimizer.lr 0.00197281125993391 +872 61 training.batch_size 1.0 +872 61 training.label_smoothing 0.35498100619614936 +872 62 model.embedding_dim 0.0 +872 62 model.scoring_fct_norm 1.0 +872 62 regularizer.weight 0.03306842535905058 +872 62 optimizer.lr 0.023151740223269133 +872 62 training.batch_size 2.0 +872 62 training.label_smoothing 0.0018685766315625135 +872 63 model.embedding_dim 2.0 +872 63 model.scoring_fct_norm 1.0 +872 63 regularizer.weight 0.04653695379453822 +872 63 optimizer.lr 0.07261216872016682 +872 63 training.batch_size 0.0 +872 63 training.label_smoothing 0.03203926040190839 +872 64 model.embedding_dim 2.0 +872 64 model.scoring_fct_norm 1.0 +872 64 regularizer.weight 0.02059391021782406 +872 64 optimizer.lr 0.007710215140159525 +872 64 training.batch_size 1.0 +872 64 training.label_smoothing 0.008003682794716898 +872 65 model.embedding_dim 1.0 +872 65 model.scoring_fct_norm 2.0 +872 65 regularizer.weight 0.09601082950783005 +872 65 optimizer.lr 0.0563175664509872 +872 65 training.batch_size 1.0 +872 65 training.label_smoothing 0.013019271478885697 +872 66 model.embedding_dim 2.0 +872 66 model.scoring_fct_norm 2.0 +872 66 regularizer.weight 0.1252393159204968 +872 66 optimizer.lr 0.0010995891234068542 +872 66 training.batch_size 1.0 +872 66 training.label_smoothing 0.014835911074032863 +872 67 model.embedding_dim 2.0 +872 67 model.scoring_fct_norm 2.0 +872 67 regularizer.weight 0.06115189593329877 +872 67 optimizer.lr 0.0014610621729712829 +872 67 training.batch_size 0.0 +872 67 training.label_smoothing 0.0791044294001675 +872 68 model.embedding_dim 0.0 +872 68 model.scoring_fct_norm 1.0 +872 68 regularizer.weight 0.08034640810212358 +872 68 optimizer.lr 0.0025344037023561135 +872 68 training.batch_size 2.0 +872 68 training.label_smoothing 0.003349895578798949 +872 69 model.embedding_dim 2.0 +872 69 model.scoring_fct_norm 2.0 +872 69 regularizer.weight 0.07964776024046501 +872 69 optimizer.lr 0.047185029108920244 +872 69 training.batch_size 0.0 +872 69 training.label_smoothing 0.031716938785320774 +872 70 model.embedding_dim 1.0 +872 70 model.scoring_fct_norm 2.0 +872 70 regularizer.weight 0.0743545388328212 +872 70 optimizer.lr 0.0018360853299938958 +872 70 training.batch_size 1.0 +872 70 training.label_smoothing 0.001982547656665804 +872 71 model.embedding_dim 0.0 +872 71 model.scoring_fct_norm 1.0 +872 71 regularizer.weight 0.14119335132363953 +872 71 optimizer.lr 0.001695022660238918 +872 71 training.batch_size 2.0 +872 71 training.label_smoothing 0.05136436474944023 +872 72 model.embedding_dim 2.0 +872 72 model.scoring_fct_norm 2.0 +872 72 regularizer.weight 0.011363595851987715 +872 72 optimizer.lr 0.02934457687228421 +872 72 training.batch_size 0.0 +872 72 training.label_smoothing 0.00571620479532905 +872 73 model.embedding_dim 2.0 +872 73 model.scoring_fct_norm 2.0 +872 73 regularizer.weight 0.14339907217672804 +872 73 optimizer.lr 0.03640858184461216 +872 73 training.batch_size 0.0 +872 73 training.label_smoothing 0.05576379977401635 +872 74 model.embedding_dim 2.0 +872 74 model.scoring_fct_norm 1.0 +872 74 regularizer.weight 0.028664107128263366 +872 74 optimizer.lr 0.002212153296940075 +872 74 training.batch_size 1.0 +872 74 training.label_smoothing 0.001715203808035516 +872 75 model.embedding_dim 2.0 +872 75 model.scoring_fct_norm 2.0 +872 75 regularizer.weight 0.054639846266069186 +872 75 optimizer.lr 0.003956922201199749 +872 75 training.batch_size 2.0 +872 75 training.label_smoothing 0.8648192993260337 +872 76 model.embedding_dim 0.0 +872 76 model.scoring_fct_norm 2.0 +872 76 regularizer.weight 0.019673428153920234 +872 76 optimizer.lr 0.019516517399007748 +872 76 training.batch_size 1.0 +872 76 training.label_smoothing 0.020365431411881532 +872 77 model.embedding_dim 1.0 +872 77 model.scoring_fct_norm 1.0 +872 77 regularizer.weight 0.010690302248910493 +872 77 optimizer.lr 0.0022728301755907537 +872 77 training.batch_size 0.0 +872 77 training.label_smoothing 0.0017898676169732196 +872 78 model.embedding_dim 0.0 +872 78 model.scoring_fct_norm 2.0 +872 78 regularizer.weight 0.027411850002271754 +872 78 optimizer.lr 0.006633742840813592 +872 78 training.batch_size 0.0 +872 78 training.label_smoothing 0.0950243869611134 +872 79 model.embedding_dim 0.0 +872 79 model.scoring_fct_norm 1.0 +872 79 regularizer.weight 0.1409375816970108 +872 79 optimizer.lr 0.0015022168737352844 +872 79 training.batch_size 2.0 +872 79 training.label_smoothing 0.0013525911237974022 +872 80 model.embedding_dim 1.0 +872 80 model.scoring_fct_norm 2.0 +872 80 regularizer.weight 0.022208337742958505 +872 80 optimizer.lr 0.010066209028233854 +872 80 training.batch_size 2.0 +872 80 training.label_smoothing 0.09100276938154976 +872 81 model.embedding_dim 2.0 +872 81 model.scoring_fct_norm 1.0 +872 81 regularizer.weight 0.22086343081623094 +872 81 optimizer.lr 0.08283591611002897 +872 81 training.batch_size 0.0 +872 81 training.label_smoothing 0.005924207622503369 +872 82 model.embedding_dim 0.0 +872 82 model.scoring_fct_norm 1.0 +872 82 regularizer.weight 0.02952667571221245 +872 82 optimizer.lr 0.005253793283091006 +872 82 training.batch_size 0.0 +872 82 training.label_smoothing 0.06086008870953377 +872 83 model.embedding_dim 2.0 +872 83 model.scoring_fct_norm 2.0 +872 83 regularizer.weight 0.08880360285869676 +872 83 optimizer.lr 0.0010941358044688648 +872 83 training.batch_size 2.0 +872 83 training.label_smoothing 0.016121769331651454 +872 84 model.embedding_dim 0.0 +872 84 model.scoring_fct_norm 1.0 +872 84 regularizer.weight 0.1482413199637748 +872 84 optimizer.lr 0.012345028488087632 +872 84 training.batch_size 2.0 +872 84 training.label_smoothing 0.011450542310284112 +872 85 model.embedding_dim 2.0 +872 85 model.scoring_fct_norm 1.0 +872 85 regularizer.weight 0.01055527422502668 +872 85 optimizer.lr 0.006950704756464497 +872 85 training.batch_size 1.0 +872 85 training.label_smoothing 0.06839340888825449 +872 86 model.embedding_dim 2.0 +872 86 model.scoring_fct_norm 2.0 +872 86 regularizer.weight 0.016463970816880242 +872 86 optimizer.lr 0.0360057050997241 +872 86 training.batch_size 2.0 +872 86 training.label_smoothing 0.0032473849381545006 +872 87 model.embedding_dim 0.0 +872 87 model.scoring_fct_norm 2.0 +872 87 regularizer.weight 0.2825012519071709 +872 87 optimizer.lr 0.001741609830035017 +872 87 training.batch_size 2.0 +872 87 training.label_smoothing 0.003659148241284794 +872 88 model.embedding_dim 1.0 +872 88 model.scoring_fct_norm 2.0 +872 88 regularizer.weight 0.08202680480689153 +872 88 optimizer.lr 0.0027006656756386545 +872 88 training.batch_size 2.0 +872 88 training.label_smoothing 0.02879171166746671 +872 89 model.embedding_dim 1.0 +872 89 model.scoring_fct_norm 1.0 +872 89 regularizer.weight 0.20940836614731934 +872 89 optimizer.lr 0.003151386983216718 +872 89 training.batch_size 2.0 +872 89 training.label_smoothing 0.1647712411766111 +872 90 model.embedding_dim 2.0 +872 90 model.scoring_fct_norm 1.0 +872 90 regularizer.weight 0.011895154590737991 +872 90 optimizer.lr 0.0032301728074915327 +872 90 training.batch_size 2.0 +872 90 training.label_smoothing 0.29868496981998555 +872 91 model.embedding_dim 0.0 +872 91 model.scoring_fct_norm 2.0 +872 91 regularizer.weight 0.01871893975096581 +872 91 optimizer.lr 0.021465273204187278 +872 91 training.batch_size 1.0 +872 91 training.label_smoothing 0.047134271605681746 +872 92 model.embedding_dim 2.0 +872 92 model.scoring_fct_norm 1.0 +872 92 regularizer.weight 0.16342983916672374 +872 92 optimizer.lr 0.006477148429529334 +872 92 training.batch_size 2.0 +872 92 training.label_smoothing 0.5822986348402646 +872 93 model.embedding_dim 2.0 +872 93 model.scoring_fct_norm 2.0 +872 93 regularizer.weight 0.01634738043291649 +872 93 optimizer.lr 0.014437209497086054 +872 93 training.batch_size 1.0 +872 93 training.label_smoothing 0.528120572825349 +872 94 model.embedding_dim 0.0 +872 94 model.scoring_fct_norm 2.0 +872 94 regularizer.weight 0.020130735852397802 +872 94 optimizer.lr 0.05506754326460725 +872 94 training.batch_size 0.0 +872 94 training.label_smoothing 0.009379739603355523 +872 95 model.embedding_dim 0.0 +872 95 model.scoring_fct_norm 1.0 +872 95 regularizer.weight 0.22446746476507315 +872 95 optimizer.lr 0.01714274162763157 +872 95 training.batch_size 0.0 +872 95 training.label_smoothing 0.00230631463357901 +872 96 model.embedding_dim 1.0 +872 96 model.scoring_fct_norm 2.0 +872 96 regularizer.weight 0.013053855768652676 +872 96 optimizer.lr 0.009105422128892898 +872 96 training.batch_size 1.0 +872 96 training.label_smoothing 0.008121569438449147 +872 97 model.embedding_dim 1.0 +872 97 model.scoring_fct_norm 2.0 +872 97 regularizer.weight 0.1659796611219487 +872 97 optimizer.lr 0.03806864973127981 +872 97 training.batch_size 1.0 +872 97 training.label_smoothing 0.6508486620848063 +872 98 model.embedding_dim 2.0 +872 98 model.scoring_fct_norm 2.0 +872 98 regularizer.weight 0.024927049542674492 +872 98 optimizer.lr 0.01231151231740608 +872 98 training.batch_size 1.0 +872 98 training.label_smoothing 0.42643805464267664 +872 99 model.embedding_dim 2.0 +872 99 model.scoring_fct_norm 2.0 +872 99 regularizer.weight 0.08489040628603634 +872 99 optimizer.lr 0.011119975829296268 +872 99 training.batch_size 0.0 +872 99 training.label_smoothing 0.35137374588651954 +872 100 model.embedding_dim 0.0 +872 100 model.scoring_fct_norm 1.0 +872 100 regularizer.weight 0.03389768701728846 +872 100 optimizer.lr 0.007014067817097573 +872 100 training.batch_size 2.0 +872 100 training.label_smoothing 0.0024992054743637857 +872 1 dataset """kinships""" +872 1 model """transh""" +872 1 loss """softplus""" +872 1 regularizer """transh""" +872 1 optimizer """adam""" +872 1 training_loop """lcwa""" +872 1 evaluator """rankbased""" +872 2 dataset """kinships""" +872 2 model """transh""" +872 2 loss """softplus""" +872 2 regularizer """transh""" +872 2 optimizer """adam""" +872 2 training_loop """lcwa""" +872 2 evaluator """rankbased""" +872 3 dataset """kinships""" +872 3 model """transh""" +872 3 loss """softplus""" +872 3 regularizer """transh""" +872 3 optimizer """adam""" +872 3 training_loop """lcwa""" +872 3 evaluator """rankbased""" +872 4 dataset """kinships""" +872 4 model """transh""" +872 4 loss """softplus""" +872 4 regularizer """transh""" +872 4 optimizer """adam""" +872 4 training_loop """lcwa""" +872 4 evaluator """rankbased""" +872 5 dataset """kinships""" +872 5 model """transh""" +872 5 loss """softplus""" +872 5 regularizer """transh""" +872 5 optimizer """adam""" +872 5 training_loop """lcwa""" +872 5 evaluator """rankbased""" +872 6 dataset """kinships""" +872 6 model """transh""" +872 6 loss """softplus""" +872 6 regularizer """transh""" +872 6 optimizer """adam""" +872 6 training_loop """lcwa""" +872 6 evaluator """rankbased""" +872 7 dataset """kinships""" +872 7 model """transh""" +872 7 loss """softplus""" +872 7 regularizer """transh""" +872 7 optimizer """adam""" +872 7 training_loop """lcwa""" +872 7 evaluator """rankbased""" +872 8 dataset """kinships""" +872 8 model """transh""" +872 8 loss """softplus""" +872 8 regularizer """transh""" +872 8 optimizer """adam""" +872 8 training_loop """lcwa""" +872 8 evaluator """rankbased""" +872 9 dataset """kinships""" +872 9 model """transh""" +872 9 loss """softplus""" +872 9 regularizer """transh""" +872 9 optimizer """adam""" +872 9 training_loop """lcwa""" +872 9 evaluator """rankbased""" +872 10 dataset """kinships""" +872 10 model """transh""" +872 10 loss """softplus""" +872 10 regularizer """transh""" +872 10 optimizer """adam""" +872 10 training_loop """lcwa""" +872 10 evaluator """rankbased""" +872 11 dataset """kinships""" +872 11 model """transh""" +872 11 loss """softplus""" +872 11 regularizer """transh""" +872 11 optimizer """adam""" +872 11 training_loop """lcwa""" +872 11 evaluator """rankbased""" +872 12 dataset """kinships""" +872 12 model """transh""" +872 12 loss """softplus""" +872 12 regularizer """transh""" +872 12 optimizer """adam""" +872 12 training_loop """lcwa""" +872 12 evaluator """rankbased""" +872 13 dataset """kinships""" +872 13 model """transh""" +872 13 loss """softplus""" +872 13 regularizer """transh""" +872 13 optimizer """adam""" +872 13 training_loop """lcwa""" +872 13 evaluator """rankbased""" +872 14 dataset """kinships""" +872 14 model """transh""" +872 14 loss """softplus""" +872 14 regularizer """transh""" +872 14 optimizer """adam""" +872 14 training_loop """lcwa""" +872 14 evaluator """rankbased""" +872 15 dataset """kinships""" +872 15 model """transh""" +872 15 loss """softplus""" +872 15 regularizer """transh""" +872 15 optimizer """adam""" +872 15 training_loop """lcwa""" +872 15 evaluator """rankbased""" +872 16 dataset """kinships""" +872 16 model """transh""" +872 16 loss """softplus""" +872 16 regularizer """transh""" +872 16 optimizer """adam""" +872 16 training_loop """lcwa""" +872 16 evaluator """rankbased""" +872 17 dataset """kinships""" +872 17 model """transh""" +872 17 loss """softplus""" +872 17 regularizer """transh""" +872 17 optimizer """adam""" +872 17 training_loop """lcwa""" +872 17 evaluator """rankbased""" +872 18 dataset """kinships""" +872 18 model """transh""" +872 18 loss """softplus""" +872 18 regularizer """transh""" +872 18 optimizer """adam""" +872 18 training_loop """lcwa""" +872 18 evaluator """rankbased""" +872 19 dataset """kinships""" +872 19 model """transh""" +872 19 loss """softplus""" +872 19 regularizer """transh""" +872 19 optimizer """adam""" +872 19 training_loop """lcwa""" +872 19 evaluator """rankbased""" +872 20 dataset """kinships""" +872 20 model """transh""" +872 20 loss """softplus""" +872 20 regularizer """transh""" +872 20 optimizer """adam""" +872 20 training_loop """lcwa""" +872 20 evaluator """rankbased""" +872 21 dataset """kinships""" +872 21 model """transh""" +872 21 loss """softplus""" +872 21 regularizer """transh""" +872 21 optimizer """adam""" +872 21 training_loop """lcwa""" +872 21 evaluator """rankbased""" +872 22 dataset """kinships""" +872 22 model """transh""" +872 22 loss """softplus""" +872 22 regularizer """transh""" +872 22 optimizer """adam""" +872 22 training_loop """lcwa""" +872 22 evaluator """rankbased""" +872 23 dataset """kinships""" +872 23 model """transh""" +872 23 loss """softplus""" +872 23 regularizer """transh""" +872 23 optimizer """adam""" +872 23 training_loop """lcwa""" +872 23 evaluator """rankbased""" +872 24 dataset """kinships""" +872 24 model """transh""" +872 24 loss """softplus""" +872 24 regularizer """transh""" +872 24 optimizer """adam""" +872 24 training_loop """lcwa""" +872 24 evaluator """rankbased""" +872 25 dataset """kinships""" +872 25 model """transh""" +872 25 loss """softplus""" +872 25 regularizer """transh""" +872 25 optimizer """adam""" +872 25 training_loop """lcwa""" +872 25 evaluator """rankbased""" +872 26 dataset """kinships""" +872 26 model """transh""" +872 26 loss """softplus""" +872 26 regularizer """transh""" +872 26 optimizer """adam""" +872 26 training_loop """lcwa""" +872 26 evaluator """rankbased""" +872 27 dataset """kinships""" +872 27 model """transh""" +872 27 loss """softplus""" +872 27 regularizer """transh""" +872 27 optimizer """adam""" +872 27 training_loop """lcwa""" +872 27 evaluator """rankbased""" +872 28 dataset """kinships""" +872 28 model """transh""" +872 28 loss """softplus""" +872 28 regularizer """transh""" +872 28 optimizer """adam""" +872 28 training_loop """lcwa""" +872 28 evaluator """rankbased""" +872 29 dataset """kinships""" +872 29 model """transh""" +872 29 loss """softplus""" +872 29 regularizer """transh""" +872 29 optimizer """adam""" +872 29 training_loop """lcwa""" +872 29 evaluator """rankbased""" +872 30 dataset """kinships""" +872 30 model """transh""" +872 30 loss """softplus""" +872 30 regularizer """transh""" +872 30 optimizer """adam""" +872 30 training_loop """lcwa""" +872 30 evaluator """rankbased""" +872 31 dataset """kinships""" +872 31 model """transh""" +872 31 loss """softplus""" +872 31 regularizer """transh""" +872 31 optimizer """adam""" +872 31 training_loop """lcwa""" +872 31 evaluator """rankbased""" +872 32 dataset """kinships""" +872 32 model """transh""" +872 32 loss """softplus""" +872 32 regularizer """transh""" +872 32 optimizer """adam""" +872 32 training_loop """lcwa""" +872 32 evaluator """rankbased""" +872 33 dataset """kinships""" +872 33 model """transh""" +872 33 loss """softplus""" +872 33 regularizer """transh""" +872 33 optimizer """adam""" +872 33 training_loop """lcwa""" +872 33 evaluator """rankbased""" +872 34 dataset """kinships""" +872 34 model """transh""" +872 34 loss """softplus""" +872 34 regularizer """transh""" +872 34 optimizer """adam""" +872 34 training_loop """lcwa""" +872 34 evaluator """rankbased""" +872 35 dataset """kinships""" +872 35 model """transh""" +872 35 loss """softplus""" +872 35 regularizer """transh""" +872 35 optimizer """adam""" +872 35 training_loop """lcwa""" +872 35 evaluator """rankbased""" +872 36 dataset """kinships""" +872 36 model """transh""" +872 36 loss """softplus""" +872 36 regularizer """transh""" +872 36 optimizer """adam""" +872 36 training_loop """lcwa""" +872 36 evaluator """rankbased""" +872 37 dataset """kinships""" +872 37 model """transh""" +872 37 loss """softplus""" +872 37 regularizer """transh""" +872 37 optimizer """adam""" +872 37 training_loop """lcwa""" +872 37 evaluator """rankbased""" +872 38 dataset """kinships""" +872 38 model """transh""" +872 38 loss """softplus""" +872 38 regularizer """transh""" +872 38 optimizer """adam""" +872 38 training_loop """lcwa""" +872 38 evaluator """rankbased""" +872 39 dataset """kinships""" +872 39 model """transh""" +872 39 loss """softplus""" +872 39 regularizer """transh""" +872 39 optimizer """adam""" +872 39 training_loop """lcwa""" +872 39 evaluator """rankbased""" +872 40 dataset """kinships""" +872 40 model """transh""" +872 40 loss """softplus""" +872 40 regularizer """transh""" +872 40 optimizer """adam""" +872 40 training_loop """lcwa""" +872 40 evaluator """rankbased""" +872 41 dataset """kinships""" +872 41 model """transh""" +872 41 loss """softplus""" +872 41 regularizer """transh""" +872 41 optimizer """adam""" +872 41 training_loop """lcwa""" +872 41 evaluator """rankbased""" +872 42 dataset """kinships""" +872 42 model """transh""" +872 42 loss """softplus""" +872 42 regularizer """transh""" +872 42 optimizer """adam""" +872 42 training_loop """lcwa""" +872 42 evaluator """rankbased""" +872 43 dataset """kinships""" +872 43 model """transh""" +872 43 loss """softplus""" +872 43 regularizer """transh""" +872 43 optimizer """adam""" +872 43 training_loop """lcwa""" +872 43 evaluator """rankbased""" +872 44 dataset """kinships""" +872 44 model """transh""" +872 44 loss """softplus""" +872 44 regularizer """transh""" +872 44 optimizer """adam""" +872 44 training_loop """lcwa""" +872 44 evaluator """rankbased""" +872 45 dataset """kinships""" +872 45 model """transh""" +872 45 loss """softplus""" +872 45 regularizer """transh""" +872 45 optimizer """adam""" +872 45 training_loop """lcwa""" +872 45 evaluator """rankbased""" +872 46 dataset """kinships""" +872 46 model """transh""" +872 46 loss """softplus""" +872 46 regularizer """transh""" +872 46 optimizer """adam""" +872 46 training_loop """lcwa""" +872 46 evaluator """rankbased""" +872 47 dataset """kinships""" +872 47 model """transh""" +872 47 loss """softplus""" +872 47 regularizer """transh""" +872 47 optimizer """adam""" +872 47 training_loop """lcwa""" +872 47 evaluator """rankbased""" +872 48 dataset """kinships""" +872 48 model """transh""" +872 48 loss """softplus""" +872 48 regularizer """transh""" +872 48 optimizer """adam""" +872 48 training_loop """lcwa""" +872 48 evaluator """rankbased""" +872 49 dataset """kinships""" +872 49 model """transh""" +872 49 loss """softplus""" +872 49 regularizer """transh""" +872 49 optimizer """adam""" +872 49 training_loop """lcwa""" +872 49 evaluator """rankbased""" +872 50 dataset """kinships""" +872 50 model """transh""" +872 50 loss """softplus""" +872 50 regularizer """transh""" +872 50 optimizer """adam""" +872 50 training_loop """lcwa""" +872 50 evaluator """rankbased""" +872 51 dataset """kinships""" +872 51 model """transh""" +872 51 loss """softplus""" +872 51 regularizer """transh""" +872 51 optimizer """adam""" +872 51 training_loop """lcwa""" +872 51 evaluator """rankbased""" +872 52 dataset """kinships""" +872 52 model """transh""" +872 52 loss """softplus""" +872 52 regularizer """transh""" +872 52 optimizer """adam""" +872 52 training_loop """lcwa""" +872 52 evaluator """rankbased""" +872 53 dataset """kinships""" +872 53 model """transh""" +872 53 loss """softplus""" +872 53 regularizer """transh""" +872 53 optimizer """adam""" +872 53 training_loop """lcwa""" +872 53 evaluator """rankbased""" +872 54 dataset """kinships""" +872 54 model """transh""" +872 54 loss """softplus""" +872 54 regularizer """transh""" +872 54 optimizer """adam""" +872 54 training_loop """lcwa""" +872 54 evaluator """rankbased""" +872 55 dataset """kinships""" +872 55 model """transh""" +872 55 loss """softplus""" +872 55 regularizer """transh""" +872 55 optimizer """adam""" +872 55 training_loop """lcwa""" +872 55 evaluator """rankbased""" +872 56 dataset """kinships""" +872 56 model """transh""" +872 56 loss """softplus""" +872 56 regularizer """transh""" +872 56 optimizer """adam""" +872 56 training_loop """lcwa""" +872 56 evaluator """rankbased""" +872 57 dataset """kinships""" +872 57 model """transh""" +872 57 loss """softplus""" +872 57 regularizer """transh""" +872 57 optimizer """adam""" +872 57 training_loop """lcwa""" +872 57 evaluator """rankbased""" +872 58 dataset """kinships""" +872 58 model """transh""" +872 58 loss """softplus""" +872 58 regularizer """transh""" +872 58 optimizer """adam""" +872 58 training_loop """lcwa""" +872 58 evaluator """rankbased""" +872 59 dataset """kinships""" +872 59 model """transh""" +872 59 loss """softplus""" +872 59 regularizer """transh""" +872 59 optimizer """adam""" +872 59 training_loop """lcwa""" +872 59 evaluator """rankbased""" +872 60 dataset """kinships""" +872 60 model """transh""" +872 60 loss """softplus""" +872 60 regularizer """transh""" +872 60 optimizer """adam""" +872 60 training_loop """lcwa""" +872 60 evaluator """rankbased""" +872 61 dataset """kinships""" +872 61 model """transh""" +872 61 loss """softplus""" +872 61 regularizer """transh""" +872 61 optimizer """adam""" +872 61 training_loop """lcwa""" +872 61 evaluator """rankbased""" +872 62 dataset """kinships""" +872 62 model """transh""" +872 62 loss """softplus""" +872 62 regularizer """transh""" +872 62 optimizer """adam""" +872 62 training_loop """lcwa""" +872 62 evaluator """rankbased""" +872 63 dataset """kinships""" +872 63 model """transh""" +872 63 loss """softplus""" +872 63 regularizer """transh""" +872 63 optimizer """adam""" +872 63 training_loop """lcwa""" +872 63 evaluator """rankbased""" +872 64 dataset """kinships""" +872 64 model """transh""" +872 64 loss """softplus""" +872 64 regularizer """transh""" +872 64 optimizer """adam""" +872 64 training_loop """lcwa""" +872 64 evaluator """rankbased""" +872 65 dataset """kinships""" +872 65 model """transh""" +872 65 loss """softplus""" +872 65 regularizer """transh""" +872 65 optimizer """adam""" +872 65 training_loop """lcwa""" +872 65 evaluator """rankbased""" +872 66 dataset """kinships""" +872 66 model """transh""" +872 66 loss """softplus""" +872 66 regularizer """transh""" +872 66 optimizer """adam""" +872 66 training_loop """lcwa""" +872 66 evaluator """rankbased""" +872 67 dataset """kinships""" +872 67 model """transh""" +872 67 loss """softplus""" +872 67 regularizer """transh""" +872 67 optimizer """adam""" +872 67 training_loop """lcwa""" +872 67 evaluator """rankbased""" +872 68 dataset """kinships""" +872 68 model """transh""" +872 68 loss """softplus""" +872 68 regularizer """transh""" +872 68 optimizer """adam""" +872 68 training_loop """lcwa""" +872 68 evaluator """rankbased""" +872 69 dataset """kinships""" +872 69 model """transh""" +872 69 loss """softplus""" +872 69 regularizer """transh""" +872 69 optimizer """adam""" +872 69 training_loop """lcwa""" +872 69 evaluator """rankbased""" +872 70 dataset """kinships""" +872 70 model """transh""" +872 70 loss """softplus""" +872 70 regularizer """transh""" +872 70 optimizer """adam""" +872 70 training_loop """lcwa""" +872 70 evaluator """rankbased""" +872 71 dataset """kinships""" +872 71 model """transh""" +872 71 loss """softplus""" +872 71 regularizer """transh""" +872 71 optimizer """adam""" +872 71 training_loop """lcwa""" +872 71 evaluator """rankbased""" +872 72 dataset """kinships""" +872 72 model """transh""" +872 72 loss """softplus""" +872 72 regularizer """transh""" +872 72 optimizer """adam""" +872 72 training_loop """lcwa""" +872 72 evaluator """rankbased""" +872 73 dataset """kinships""" +872 73 model """transh""" +872 73 loss """softplus""" +872 73 regularizer """transh""" +872 73 optimizer """adam""" +872 73 training_loop """lcwa""" +872 73 evaluator """rankbased""" +872 74 dataset """kinships""" +872 74 model """transh""" +872 74 loss """softplus""" +872 74 regularizer """transh""" +872 74 optimizer """adam""" +872 74 training_loop """lcwa""" +872 74 evaluator """rankbased""" +872 75 dataset """kinships""" +872 75 model """transh""" +872 75 loss """softplus""" +872 75 regularizer """transh""" +872 75 optimizer """adam""" +872 75 training_loop """lcwa""" +872 75 evaluator """rankbased""" +872 76 dataset """kinships""" +872 76 model """transh""" +872 76 loss """softplus""" +872 76 regularizer """transh""" +872 76 optimizer """adam""" +872 76 training_loop """lcwa""" +872 76 evaluator """rankbased""" +872 77 dataset """kinships""" +872 77 model """transh""" +872 77 loss """softplus""" +872 77 regularizer """transh""" +872 77 optimizer """adam""" +872 77 training_loop """lcwa""" +872 77 evaluator """rankbased""" +872 78 dataset """kinships""" +872 78 model """transh""" +872 78 loss """softplus""" +872 78 regularizer """transh""" +872 78 optimizer """adam""" +872 78 training_loop """lcwa""" +872 78 evaluator """rankbased""" +872 79 dataset """kinships""" +872 79 model """transh""" +872 79 loss """softplus""" +872 79 regularizer """transh""" +872 79 optimizer """adam""" +872 79 training_loop """lcwa""" +872 79 evaluator """rankbased""" +872 80 dataset """kinships""" +872 80 model """transh""" +872 80 loss """softplus""" +872 80 regularizer """transh""" +872 80 optimizer """adam""" +872 80 training_loop """lcwa""" +872 80 evaluator """rankbased""" +872 81 dataset """kinships""" +872 81 model """transh""" +872 81 loss """softplus""" +872 81 regularizer """transh""" +872 81 optimizer """adam""" +872 81 training_loop """lcwa""" +872 81 evaluator """rankbased""" +872 82 dataset """kinships""" +872 82 model """transh""" +872 82 loss """softplus""" +872 82 regularizer """transh""" +872 82 optimizer """adam""" +872 82 training_loop """lcwa""" +872 82 evaluator """rankbased""" +872 83 dataset """kinships""" +872 83 model """transh""" +872 83 loss """softplus""" +872 83 regularizer """transh""" +872 83 optimizer """adam""" +872 83 training_loop """lcwa""" +872 83 evaluator """rankbased""" +872 84 dataset """kinships""" +872 84 model """transh""" +872 84 loss """softplus""" +872 84 regularizer """transh""" +872 84 optimizer """adam""" +872 84 training_loop """lcwa""" +872 84 evaluator """rankbased""" +872 85 dataset """kinships""" +872 85 model """transh""" +872 85 loss """softplus""" +872 85 regularizer """transh""" +872 85 optimizer """adam""" +872 85 training_loop """lcwa""" +872 85 evaluator """rankbased""" +872 86 dataset """kinships""" +872 86 model """transh""" +872 86 loss """softplus""" +872 86 regularizer """transh""" +872 86 optimizer """adam""" +872 86 training_loop """lcwa""" +872 86 evaluator """rankbased""" +872 87 dataset """kinships""" +872 87 model """transh""" +872 87 loss """softplus""" +872 87 regularizer """transh""" +872 87 optimizer """adam""" +872 87 training_loop """lcwa""" +872 87 evaluator """rankbased""" +872 88 dataset """kinships""" +872 88 model """transh""" +872 88 loss """softplus""" +872 88 regularizer """transh""" +872 88 optimizer """adam""" +872 88 training_loop """lcwa""" +872 88 evaluator """rankbased""" +872 89 dataset """kinships""" +872 89 model """transh""" +872 89 loss """softplus""" +872 89 regularizer """transh""" +872 89 optimizer """adam""" +872 89 training_loop """lcwa""" +872 89 evaluator """rankbased""" +872 90 dataset """kinships""" +872 90 model """transh""" +872 90 loss """softplus""" +872 90 regularizer """transh""" +872 90 optimizer """adam""" +872 90 training_loop """lcwa""" +872 90 evaluator """rankbased""" +872 91 dataset """kinships""" +872 91 model """transh""" +872 91 loss """softplus""" +872 91 regularizer """transh""" +872 91 optimizer """adam""" +872 91 training_loop """lcwa""" +872 91 evaluator """rankbased""" +872 92 dataset """kinships""" +872 92 model """transh""" +872 92 loss """softplus""" +872 92 regularizer """transh""" +872 92 optimizer """adam""" +872 92 training_loop """lcwa""" +872 92 evaluator """rankbased""" +872 93 dataset """kinships""" +872 93 model """transh""" +872 93 loss """softplus""" +872 93 regularizer """transh""" +872 93 optimizer """adam""" +872 93 training_loop """lcwa""" +872 93 evaluator """rankbased""" +872 94 dataset """kinships""" +872 94 model """transh""" +872 94 loss """softplus""" +872 94 regularizer """transh""" +872 94 optimizer """adam""" +872 94 training_loop """lcwa""" +872 94 evaluator """rankbased""" +872 95 dataset """kinships""" +872 95 model """transh""" +872 95 loss """softplus""" +872 95 regularizer """transh""" +872 95 optimizer """adam""" +872 95 training_loop """lcwa""" +872 95 evaluator """rankbased""" +872 96 dataset """kinships""" +872 96 model """transh""" +872 96 loss """softplus""" +872 96 regularizer """transh""" +872 96 optimizer """adam""" +872 96 training_loop """lcwa""" +872 96 evaluator """rankbased""" +872 97 dataset """kinships""" +872 97 model """transh""" +872 97 loss """softplus""" +872 97 regularizer """transh""" +872 97 optimizer """adam""" +872 97 training_loop """lcwa""" +872 97 evaluator """rankbased""" +872 98 dataset """kinships""" +872 98 model """transh""" +872 98 loss """softplus""" +872 98 regularizer """transh""" +872 98 optimizer """adam""" +872 98 training_loop """lcwa""" +872 98 evaluator """rankbased""" +872 99 dataset """kinships""" +872 99 model """transh""" +872 99 loss """softplus""" +872 99 regularizer """transh""" +872 99 optimizer """adam""" +872 99 training_loop """lcwa""" +872 99 evaluator """rankbased""" +872 100 dataset """kinships""" +872 100 model """transh""" +872 100 loss """softplus""" +872 100 regularizer """transh""" +872 100 optimizer """adam""" +872 100 training_loop """lcwa""" +872 100 evaluator """rankbased""" +873 1 model.embedding_dim 1.0 +873 1 model.scoring_fct_norm 2.0 +873 1 regularizer.weight 0.23044889924519488 +873 1 optimizer.lr 0.007092214817349854 +873 1 training.batch_size 2.0 +873 1 training.label_smoothing 0.029752400102481084 +873 2 model.embedding_dim 0.0 +873 2 model.scoring_fct_norm 2.0 +873 2 regularizer.weight 0.03188261872937505 +873 2 optimizer.lr 0.0015660456119686043 +873 2 training.batch_size 0.0 +873 2 training.label_smoothing 0.18123847976397783 +873 3 model.embedding_dim 2.0 +873 3 model.scoring_fct_norm 1.0 +873 3 regularizer.weight 0.04557123004252539 +873 3 optimizer.lr 0.01893205087967451 +873 3 training.batch_size 2.0 +873 3 training.label_smoothing 0.018849566911937784 +873 4 model.embedding_dim 2.0 +873 4 model.scoring_fct_norm 1.0 +873 4 regularizer.weight 0.051655815298425715 +873 4 optimizer.lr 0.06794891269565963 +873 4 training.batch_size 1.0 +873 4 training.label_smoothing 0.005163479340360659 +873 5 model.embedding_dim 2.0 +873 5 model.scoring_fct_norm 1.0 +873 5 regularizer.weight 0.07196160035537444 +873 5 optimizer.lr 0.0011202505154867602 +873 5 training.batch_size 1.0 +873 5 training.label_smoothing 0.5450282234645896 +873 6 model.embedding_dim 0.0 +873 6 model.scoring_fct_norm 1.0 +873 6 regularizer.weight 0.015290504187225405 +873 6 optimizer.lr 0.0025052226283553414 +873 6 training.batch_size 0.0 +873 6 training.label_smoothing 0.019987159275601348 +873 7 model.embedding_dim 0.0 +873 7 model.scoring_fct_norm 1.0 +873 7 regularizer.weight 0.24468056529154306 +873 7 optimizer.lr 0.016840532280469736 +873 7 training.batch_size 2.0 +873 7 training.label_smoothing 0.0011687619824393863 +873 8 model.embedding_dim 0.0 +873 8 model.scoring_fct_norm 1.0 +873 8 regularizer.weight 0.14006098425716776 +873 8 optimizer.lr 0.0013019081480068677 +873 8 training.batch_size 0.0 +873 8 training.label_smoothing 0.05087910371030116 +873 9 model.embedding_dim 2.0 +873 9 model.scoring_fct_norm 1.0 +873 9 regularizer.weight 0.05397585800315885 +873 9 optimizer.lr 0.031068613428175813 +873 9 training.batch_size 2.0 +873 9 training.label_smoothing 0.14687558647773669 +873 10 model.embedding_dim 2.0 +873 10 model.scoring_fct_norm 2.0 +873 10 regularizer.weight 0.08068472464858417 +873 10 optimizer.lr 0.007338768770264426 +873 10 training.batch_size 0.0 +873 10 training.label_smoothing 0.03428991232234829 +873 11 model.embedding_dim 0.0 +873 11 model.scoring_fct_norm 2.0 +873 11 regularizer.weight 0.017933936918868194 +873 11 optimizer.lr 0.006653001497649165 +873 11 training.batch_size 0.0 +873 11 training.label_smoothing 0.6035159891269262 +873 12 model.embedding_dim 1.0 +873 12 model.scoring_fct_norm 1.0 +873 12 regularizer.weight 0.044512855719424704 +873 12 optimizer.lr 0.011772902460311508 +873 12 training.batch_size 1.0 +873 12 training.label_smoothing 0.14006287942189996 +873 13 model.embedding_dim 1.0 +873 13 model.scoring_fct_norm 1.0 +873 13 regularizer.weight 0.17474882423311824 +873 13 optimizer.lr 0.0026988163987532727 +873 13 training.batch_size 2.0 +873 13 training.label_smoothing 0.0018255238906459462 +873 14 model.embedding_dim 1.0 +873 14 model.scoring_fct_norm 2.0 +873 14 regularizer.weight 0.05237374501851125 +873 14 optimizer.lr 0.0014121568950991989 +873 14 training.batch_size 1.0 +873 14 training.label_smoothing 0.03774127149758084 +873 15 model.embedding_dim 0.0 +873 15 model.scoring_fct_norm 2.0 +873 15 regularizer.weight 0.03521734261005059 +873 15 optimizer.lr 0.03329371911054278 +873 15 training.batch_size 0.0 +873 15 training.label_smoothing 0.0034806629107631795 +873 16 model.embedding_dim 0.0 +873 16 model.scoring_fct_norm 1.0 +873 16 regularizer.weight 0.01005232603623322 +873 16 optimizer.lr 0.0014562140714957014 +873 16 training.batch_size 0.0 +873 16 training.label_smoothing 0.08075718157527588 +873 17 model.embedding_dim 1.0 +873 17 model.scoring_fct_norm 2.0 +873 17 regularizer.weight 0.012540168646074816 +873 17 optimizer.lr 0.009884557022901216 +873 17 training.batch_size 0.0 +873 17 training.label_smoothing 0.1241208121189641 +873 18 model.embedding_dim 1.0 +873 18 model.scoring_fct_norm 1.0 +873 18 regularizer.weight 0.014760030284968745 +873 18 optimizer.lr 0.07254103028040582 +873 18 training.batch_size 2.0 +873 18 training.label_smoothing 0.003242414828896295 +873 19 model.embedding_dim 1.0 +873 19 model.scoring_fct_norm 1.0 +873 19 regularizer.weight 0.01201092379850857 +873 19 optimizer.lr 0.00268380371828537 +873 19 training.batch_size 1.0 +873 19 training.label_smoothing 0.0035716215920747166 +873 20 model.embedding_dim 0.0 +873 20 model.scoring_fct_norm 1.0 +873 20 regularizer.weight 0.16147519488388007 +873 20 optimizer.lr 0.06664460037780198 +873 20 training.batch_size 0.0 +873 20 training.label_smoothing 0.0012443388840310404 +873 21 model.embedding_dim 0.0 +873 21 model.scoring_fct_norm 2.0 +873 21 regularizer.weight 0.017236473598094305 +873 21 optimizer.lr 0.06337058723703028 +873 21 training.batch_size 1.0 +873 21 training.label_smoothing 0.06224928677709579 +873 22 model.embedding_dim 2.0 +873 22 model.scoring_fct_norm 2.0 +873 22 regularizer.weight 0.013561201074108152 +873 22 optimizer.lr 0.0021630895192672928 +873 22 training.batch_size 2.0 +873 22 training.label_smoothing 0.001224606557507923 +873 23 model.embedding_dim 0.0 +873 23 model.scoring_fct_norm 1.0 +873 23 regularizer.weight 0.020155377824628003 +873 23 optimizer.lr 0.026953665179392645 +873 23 training.batch_size 0.0 +873 23 training.label_smoothing 0.0011672544614178164 +873 24 model.embedding_dim 2.0 +873 24 model.scoring_fct_norm 1.0 +873 24 regularizer.weight 0.025430460780266322 +873 24 optimizer.lr 0.0011293000967086161 +873 24 training.batch_size 2.0 +873 24 training.label_smoothing 0.029665197151435754 +873 25 model.embedding_dim 2.0 +873 25 model.scoring_fct_norm 1.0 +873 25 regularizer.weight 0.22260313063643805 +873 25 optimizer.lr 0.005870914092869887 +873 25 training.batch_size 2.0 +873 25 training.label_smoothing 0.06562912469019513 +873 26 model.embedding_dim 1.0 +873 26 model.scoring_fct_norm 1.0 +873 26 regularizer.weight 0.10426461348842929 +873 26 optimizer.lr 0.04084416212023987 +873 26 training.batch_size 2.0 +873 26 training.label_smoothing 0.01843222755492393 +873 27 model.embedding_dim 2.0 +873 27 model.scoring_fct_norm 2.0 +873 27 regularizer.weight 0.10183529705172445 +873 27 optimizer.lr 0.02075622747599402 +873 27 training.batch_size 0.0 +873 27 training.label_smoothing 0.13123013373988096 +873 28 model.embedding_dim 0.0 +873 28 model.scoring_fct_norm 2.0 +873 28 regularizer.weight 0.02809450781074596 +873 28 optimizer.lr 0.0314093120066119 +873 28 training.batch_size 1.0 +873 28 training.label_smoothing 0.0169015690074634 +873 29 model.embedding_dim 2.0 +873 29 model.scoring_fct_norm 1.0 +873 29 regularizer.weight 0.18420681536895894 +873 29 optimizer.lr 0.06061610489437271 +873 29 training.batch_size 0.0 +873 29 training.label_smoothing 0.10890551822290356 +873 30 model.embedding_dim 2.0 +873 30 model.scoring_fct_norm 1.0 +873 30 regularizer.weight 0.018297229157099415 +873 30 optimizer.lr 0.07896649866468458 +873 30 training.batch_size 2.0 +873 30 training.label_smoothing 0.024311295661773973 +873 31 model.embedding_dim 2.0 +873 31 model.scoring_fct_norm 2.0 +873 31 regularizer.weight 0.0395277527959104 +873 31 optimizer.lr 0.0016054515293255115 +873 31 training.batch_size 2.0 +873 31 training.label_smoothing 0.012467696714162639 +873 32 model.embedding_dim 0.0 +873 32 model.scoring_fct_norm 1.0 +873 32 regularizer.weight 0.11435417802891547 +873 32 optimizer.lr 0.03046326947859889 +873 32 training.batch_size 1.0 +873 32 training.label_smoothing 0.04898373400701164 +873 33 model.embedding_dim 2.0 +873 33 model.scoring_fct_norm 2.0 +873 33 regularizer.weight 0.11954271609578834 +873 33 optimizer.lr 0.0041803775255596035 +873 33 training.batch_size 0.0 +873 33 training.label_smoothing 0.01242259263176773 +873 34 model.embedding_dim 0.0 +873 34 model.scoring_fct_norm 2.0 +873 34 regularizer.weight 0.03854275977048363 +873 34 optimizer.lr 0.04994232373687879 +873 34 training.batch_size 2.0 +873 34 training.label_smoothing 0.007842532224435962 +873 35 model.embedding_dim 1.0 +873 35 model.scoring_fct_norm 1.0 +873 35 regularizer.weight 0.016699533562205094 +873 35 optimizer.lr 0.0074983075117182095 +873 35 training.batch_size 0.0 +873 35 training.label_smoothing 0.0017577943707765278 +873 36 model.embedding_dim 2.0 +873 36 model.scoring_fct_norm 2.0 +873 36 regularizer.weight 0.016855306303173632 +873 36 optimizer.lr 0.0037990186295860603 +873 36 training.batch_size 1.0 +873 36 training.label_smoothing 0.029420206091316903 +873 37 model.embedding_dim 2.0 +873 37 model.scoring_fct_norm 1.0 +873 37 regularizer.weight 0.11887707714259821 +873 37 optimizer.lr 0.04684635835576798 +873 37 training.batch_size 0.0 +873 37 training.label_smoothing 0.1178938613835086 +873 38 model.embedding_dim 0.0 +873 38 model.scoring_fct_norm 2.0 +873 38 regularizer.weight 0.05250801976577236 +873 38 optimizer.lr 0.03872147307427729 +873 38 training.batch_size 1.0 +873 38 training.label_smoothing 0.004966093309600809 +873 39 model.embedding_dim 0.0 +873 39 model.scoring_fct_norm 2.0 +873 39 regularizer.weight 0.24122987979851054 +873 39 optimizer.lr 0.001009296639741536 +873 39 training.batch_size 2.0 +873 39 training.label_smoothing 0.004185486919475742 +873 40 model.embedding_dim 2.0 +873 40 model.scoring_fct_norm 1.0 +873 40 regularizer.weight 0.010809873158322257 +873 40 optimizer.lr 0.03449345830597032 +873 40 training.batch_size 2.0 +873 40 training.label_smoothing 0.00340711441400153 +873 41 model.embedding_dim 2.0 +873 41 model.scoring_fct_norm 2.0 +873 41 regularizer.weight 0.17530152891718762 +873 41 optimizer.lr 0.0037481155601146753 +873 41 training.batch_size 2.0 +873 41 training.label_smoothing 0.0014204529644050844 +873 42 model.embedding_dim 2.0 +873 42 model.scoring_fct_norm 2.0 +873 42 regularizer.weight 0.07923045274025833 +873 42 optimizer.lr 0.004685789494302557 +873 42 training.batch_size 0.0 +873 42 training.label_smoothing 0.09607750023331123 +873 43 model.embedding_dim 1.0 +873 43 model.scoring_fct_norm 1.0 +873 43 regularizer.weight 0.023087728963096247 +873 43 optimizer.lr 0.09706499019185662 +873 43 training.batch_size 0.0 +873 43 training.label_smoothing 0.004269859192207925 +873 44 model.embedding_dim 0.0 +873 44 model.scoring_fct_norm 2.0 +873 44 regularizer.weight 0.22439101195667757 +873 44 optimizer.lr 0.001010425069328521 +873 44 training.batch_size 1.0 +873 44 training.label_smoothing 0.909992509848004 +873 45 model.embedding_dim 1.0 +873 45 model.scoring_fct_norm 2.0 +873 45 regularizer.weight 0.014638988485653382 +873 45 optimizer.lr 0.0019071658924421175 +873 45 training.batch_size 1.0 +873 45 training.label_smoothing 0.01393891459467757 +873 46 model.embedding_dim 1.0 +873 46 model.scoring_fct_norm 1.0 +873 46 regularizer.weight 0.04795281880471189 +873 46 optimizer.lr 0.01213881250176471 +873 46 training.batch_size 0.0 +873 46 training.label_smoothing 0.004354636626782298 +873 47 model.embedding_dim 0.0 +873 47 model.scoring_fct_norm 1.0 +873 47 regularizer.weight 0.022808243351642297 +873 47 optimizer.lr 0.0067770545927228094 +873 47 training.batch_size 2.0 +873 47 training.label_smoothing 0.0013166237332276358 +873 48 model.embedding_dim 1.0 +873 48 model.scoring_fct_norm 2.0 +873 48 regularizer.weight 0.011968001682291573 +873 48 optimizer.lr 0.0010413807763676377 +873 48 training.batch_size 1.0 +873 48 training.label_smoothing 0.09771770897003446 +873 49 model.embedding_dim 1.0 +873 49 model.scoring_fct_norm 1.0 +873 49 regularizer.weight 0.10875662787611685 +873 49 optimizer.lr 0.005906987123831196 +873 49 training.batch_size 2.0 +873 49 training.label_smoothing 0.004382763776844235 +873 50 model.embedding_dim 2.0 +873 50 model.scoring_fct_norm 1.0 +873 50 regularizer.weight 0.027805314844929086 +873 50 optimizer.lr 0.001816875987628347 +873 50 training.batch_size 0.0 +873 50 training.label_smoothing 0.3649593254972873 +873 51 model.embedding_dim 2.0 +873 51 model.scoring_fct_norm 2.0 +873 51 regularizer.weight 0.01833111765193997 +873 51 optimizer.lr 0.03007909850300961 +873 51 training.batch_size 1.0 +873 51 training.label_smoothing 0.03264545236407226 +873 52 model.embedding_dim 1.0 +873 52 model.scoring_fct_norm 1.0 +873 52 regularizer.weight 0.15697717045700108 +873 52 optimizer.lr 0.008051505163084584 +873 52 training.batch_size 1.0 +873 52 training.label_smoothing 0.027532789292004645 +873 53 model.embedding_dim 2.0 +873 53 model.scoring_fct_norm 1.0 +873 53 regularizer.weight 0.038043782855477194 +873 53 optimizer.lr 0.05584883229242888 +873 53 training.batch_size 1.0 +873 53 training.label_smoothing 0.8635605350083619 +873 54 model.embedding_dim 2.0 +873 54 model.scoring_fct_norm 1.0 +873 54 regularizer.weight 0.015620987098159721 +873 54 optimizer.lr 0.0015630853049198837 +873 54 training.batch_size 0.0 +873 54 training.label_smoothing 0.0010432431698243658 +873 55 model.embedding_dim 1.0 +873 55 model.scoring_fct_norm 2.0 +873 55 regularizer.weight 0.02298767249353236 +873 55 optimizer.lr 0.009113895328042498 +873 55 training.batch_size 2.0 +873 55 training.label_smoothing 0.13387845096448442 +873 56 model.embedding_dim 1.0 +873 56 model.scoring_fct_norm 1.0 +873 56 regularizer.weight 0.010285625332260802 +873 56 optimizer.lr 0.022031389455108817 +873 56 training.batch_size 2.0 +873 56 training.label_smoothing 0.0015604936735802066 +873 57 model.embedding_dim 2.0 +873 57 model.scoring_fct_norm 1.0 +873 57 regularizer.weight 0.183948384068285 +873 57 optimizer.lr 0.0014717159878735195 +873 57 training.batch_size 0.0 +873 57 training.label_smoothing 0.0026301948770881874 +873 58 model.embedding_dim 2.0 +873 58 model.scoring_fct_norm 1.0 +873 58 regularizer.weight 0.17123264138449903 +873 58 optimizer.lr 0.09431935506355958 +873 58 training.batch_size 1.0 +873 58 training.label_smoothing 0.01327078437944893 +873 59 model.embedding_dim 0.0 +873 59 model.scoring_fct_norm 2.0 +873 59 regularizer.weight 0.29023829387259786 +873 59 optimizer.lr 0.02570368451806833 +873 59 training.batch_size 0.0 +873 59 training.label_smoothing 0.0014090733264268507 +873 60 model.embedding_dim 2.0 +873 60 model.scoring_fct_norm 2.0 +873 60 regularizer.weight 0.23837047360375505 +873 60 optimizer.lr 0.060145305627710234 +873 60 training.batch_size 0.0 +873 60 training.label_smoothing 0.0012500789997814856 +873 61 model.embedding_dim 0.0 +873 61 model.scoring_fct_norm 2.0 +873 61 regularizer.weight 0.2621961299194847 +873 61 optimizer.lr 0.005225756725013761 +873 61 training.batch_size 2.0 +873 61 training.label_smoothing 0.01530608318764202 +873 62 model.embedding_dim 0.0 +873 62 model.scoring_fct_norm 1.0 +873 62 regularizer.weight 0.01862679085279124 +873 62 optimizer.lr 0.07827944994790119 +873 62 training.batch_size 2.0 +873 62 training.label_smoothing 0.6611317783741151 +873 63 model.embedding_dim 0.0 +873 63 model.scoring_fct_norm 2.0 +873 63 regularizer.weight 0.01355734783286348 +873 63 optimizer.lr 0.0037655732421914 +873 63 training.batch_size 2.0 +873 63 training.label_smoothing 0.0028271069288155273 +873 64 model.embedding_dim 0.0 +873 64 model.scoring_fct_norm 2.0 +873 64 regularizer.weight 0.16383395800369624 +873 64 optimizer.lr 0.0010238873217198405 +873 64 training.batch_size 2.0 +873 64 training.label_smoothing 0.7284098865040922 +873 65 model.embedding_dim 0.0 +873 65 model.scoring_fct_norm 2.0 +873 65 regularizer.weight 0.17849852771070826 +873 65 optimizer.lr 0.08371724382085009 +873 65 training.batch_size 1.0 +873 65 training.label_smoothing 0.45334131751751633 +873 66 model.embedding_dim 0.0 +873 66 model.scoring_fct_norm 1.0 +873 66 regularizer.weight 0.13876958183332475 +873 66 optimizer.lr 0.08792409303943367 +873 66 training.batch_size 2.0 +873 66 training.label_smoothing 0.7124091705746615 +873 67 model.embedding_dim 2.0 +873 67 model.scoring_fct_norm 2.0 +873 67 regularizer.weight 0.2819495615552468 +873 67 optimizer.lr 0.001970569432497491 +873 67 training.batch_size 2.0 +873 67 training.label_smoothing 0.16528026279639105 +873 68 model.embedding_dim 0.0 +873 68 model.scoring_fct_norm 2.0 +873 68 regularizer.weight 0.039240059916281385 +873 68 optimizer.lr 0.004270006492141958 +873 68 training.batch_size 0.0 +873 68 training.label_smoothing 0.08457225943254153 +873 69 model.embedding_dim 1.0 +873 69 model.scoring_fct_norm 1.0 +873 69 regularizer.weight 0.15422049868185594 +873 69 optimizer.lr 0.0062494767886456006 +873 69 training.batch_size 0.0 +873 69 training.label_smoothing 0.017559476115445977 +873 70 model.embedding_dim 1.0 +873 70 model.scoring_fct_norm 1.0 +873 70 regularizer.weight 0.21024121102211152 +873 70 optimizer.lr 0.04005977489353337 +873 70 training.batch_size 0.0 +873 70 training.label_smoothing 0.2346212513724188 +873 71 model.embedding_dim 2.0 +873 71 model.scoring_fct_norm 1.0 +873 71 regularizer.weight 0.06775491484858673 +873 71 optimizer.lr 0.009410425319693393 +873 71 training.batch_size 1.0 +873 71 training.label_smoothing 0.03294036665131663 +873 72 model.embedding_dim 2.0 +873 72 model.scoring_fct_norm 2.0 +873 72 regularizer.weight 0.07156300454559308 +873 72 optimizer.lr 0.008990582297301174 +873 72 training.batch_size 1.0 +873 72 training.label_smoothing 0.0027930982884913744 +873 73 model.embedding_dim 2.0 +873 73 model.scoring_fct_norm 1.0 +873 73 regularizer.weight 0.1899571305141063 +873 73 optimizer.lr 0.004770650660228512 +873 73 training.batch_size 1.0 +873 73 training.label_smoothing 0.008696426464073367 +873 74 model.embedding_dim 0.0 +873 74 model.scoring_fct_norm 1.0 +873 74 regularizer.weight 0.02779127156047977 +873 74 optimizer.lr 0.0016194548670270354 +873 74 training.batch_size 1.0 +873 74 training.label_smoothing 0.003512023750100407 +873 75 model.embedding_dim 0.0 +873 75 model.scoring_fct_norm 1.0 +873 75 regularizer.weight 0.062393534961419535 +873 75 optimizer.lr 0.004366932826371505 +873 75 training.batch_size 0.0 +873 75 training.label_smoothing 0.3064823076815208 +873 76 model.embedding_dim 0.0 +873 76 model.scoring_fct_norm 1.0 +873 76 regularizer.weight 0.18074087050762538 +873 76 optimizer.lr 0.0045162215894347674 +873 76 training.batch_size 1.0 +873 76 training.label_smoothing 0.008017239805118053 +873 77 model.embedding_dim 2.0 +873 77 model.scoring_fct_norm 1.0 +873 77 regularizer.weight 0.014148576269028107 +873 77 optimizer.lr 0.007063386272602309 +873 77 training.batch_size 1.0 +873 77 training.label_smoothing 0.026980129588865292 +873 78 model.embedding_dim 2.0 +873 78 model.scoring_fct_norm 2.0 +873 78 regularizer.weight 0.04924310796497632 +873 78 optimizer.lr 0.01640867705037998 +873 78 training.batch_size 2.0 +873 78 training.label_smoothing 0.004896431303305031 +873 79 model.embedding_dim 2.0 +873 79 model.scoring_fct_norm 2.0 +873 79 regularizer.weight 0.026114819806939544 +873 79 optimizer.lr 0.03612394484643982 +873 79 training.batch_size 0.0 +873 79 training.label_smoothing 0.3743192470742438 +873 80 model.embedding_dim 2.0 +873 80 model.scoring_fct_norm 1.0 +873 80 regularizer.weight 0.012632380034905622 +873 80 optimizer.lr 0.007170972324984584 +873 80 training.batch_size 0.0 +873 80 training.label_smoothing 0.0049440363220377365 +873 81 model.embedding_dim 1.0 +873 81 model.scoring_fct_norm 2.0 +873 81 regularizer.weight 0.04116734647638476 +873 81 optimizer.lr 0.017714706614767716 +873 81 training.batch_size 0.0 +873 81 training.label_smoothing 0.3848707687880959 +873 82 model.embedding_dim 2.0 +873 82 model.scoring_fct_norm 1.0 +873 82 regularizer.weight 0.02753269668592268 +873 82 optimizer.lr 0.010399805093101925 +873 82 training.batch_size 2.0 +873 82 training.label_smoothing 0.24212067342837523 +873 83 model.embedding_dim 0.0 +873 83 model.scoring_fct_norm 2.0 +873 83 regularizer.weight 0.02447480475621276 +873 83 optimizer.lr 0.005611503539922855 +873 83 training.batch_size 0.0 +873 83 training.label_smoothing 0.0018578537599570975 +873 84 model.embedding_dim 2.0 +873 84 model.scoring_fct_norm 2.0 +873 84 regularizer.weight 0.021891675297651642 +873 84 optimizer.lr 0.00197713608376194 +873 84 training.batch_size 1.0 +873 84 training.label_smoothing 0.8566463138586324 +873 85 model.embedding_dim 0.0 +873 85 model.scoring_fct_norm 2.0 +873 85 regularizer.weight 0.06798682196128761 +873 85 optimizer.lr 0.003004466909441178 +873 85 training.batch_size 2.0 +873 85 training.label_smoothing 0.0018292497542492993 +873 86 model.embedding_dim 2.0 +873 86 model.scoring_fct_norm 1.0 +873 86 regularizer.weight 0.12995761195848785 +873 86 optimizer.lr 0.004053355854895077 +873 86 training.batch_size 1.0 +873 86 training.label_smoothing 0.08232668643909741 +873 87 model.embedding_dim 1.0 +873 87 model.scoring_fct_norm 2.0 +873 87 regularizer.weight 0.06544796687265218 +873 87 optimizer.lr 0.040286619645914463 +873 87 training.batch_size 1.0 +873 87 training.label_smoothing 0.9283756818266886 +873 88 model.embedding_dim 0.0 +873 88 model.scoring_fct_norm 2.0 +873 88 regularizer.weight 0.03304361894788547 +873 88 optimizer.lr 0.0081109374614182 +873 88 training.batch_size 0.0 +873 88 training.label_smoothing 0.009288448542731727 +873 89 model.embedding_dim 2.0 +873 89 model.scoring_fct_norm 1.0 +873 89 regularizer.weight 0.06109036466751107 +873 89 optimizer.lr 0.024528176905083243 +873 89 training.batch_size 0.0 +873 89 training.label_smoothing 0.024069886101994387 +873 90 model.embedding_dim 1.0 +873 90 model.scoring_fct_norm 1.0 +873 90 regularizer.weight 0.10612402279722169 +873 90 optimizer.lr 0.002475167502747867 +873 90 training.batch_size 2.0 +873 90 training.label_smoothing 0.8393663343678484 +873 91 model.embedding_dim 1.0 +873 91 model.scoring_fct_norm 2.0 +873 91 regularizer.weight 0.058695738331108485 +873 91 optimizer.lr 0.0035091709380207906 +873 91 training.batch_size 0.0 +873 91 training.label_smoothing 0.9458746707434217 +873 92 model.embedding_dim 2.0 +873 92 model.scoring_fct_norm 2.0 +873 92 regularizer.weight 0.2987911365820848 +873 92 optimizer.lr 0.0017441737041782983 +873 92 training.batch_size 1.0 +873 92 training.label_smoothing 0.00447842530722087 +873 93 model.embedding_dim 0.0 +873 93 model.scoring_fct_norm 2.0 +873 93 regularizer.weight 0.08561130201291277 +873 93 optimizer.lr 0.0030416228876056156 +873 93 training.batch_size 0.0 +873 93 training.label_smoothing 0.3296153306014725 +873 94 model.embedding_dim 0.0 +873 94 model.scoring_fct_norm 1.0 +873 94 regularizer.weight 0.05805854239331536 +873 94 optimizer.lr 0.006950083589031519 +873 94 training.batch_size 0.0 +873 94 training.label_smoothing 0.021440309185595738 +873 95 model.embedding_dim 2.0 +873 95 model.scoring_fct_norm 1.0 +873 95 regularizer.weight 0.16981832571717664 +873 95 optimizer.lr 0.0024643566405316967 +873 95 training.batch_size 1.0 +873 95 training.label_smoothing 0.22212488353590049 +873 96 model.embedding_dim 1.0 +873 96 model.scoring_fct_norm 2.0 +873 96 regularizer.weight 0.10873133669398659 +873 96 optimizer.lr 0.004253765701767579 +873 96 training.batch_size 2.0 +873 96 training.label_smoothing 0.007317721747692376 +873 97 model.embedding_dim 0.0 +873 97 model.scoring_fct_norm 1.0 +873 97 regularizer.weight 0.061876232418594954 +873 97 optimizer.lr 0.003569264987193957 +873 97 training.batch_size 0.0 +873 97 training.label_smoothing 0.006010502127136315 +873 98 model.embedding_dim 2.0 +873 98 model.scoring_fct_norm 2.0 +873 98 regularizer.weight 0.13064705403114812 +873 98 optimizer.lr 0.010142429980364248 +873 98 training.batch_size 0.0 +873 98 training.label_smoothing 0.00812105023142894 +873 99 model.embedding_dim 0.0 +873 99 model.scoring_fct_norm 1.0 +873 99 regularizer.weight 0.1713347377925937 +873 99 optimizer.lr 0.0015097705073877736 +873 99 training.batch_size 2.0 +873 99 training.label_smoothing 0.002273348751573085 +873 100 model.embedding_dim 1.0 +873 100 model.scoring_fct_norm 2.0 +873 100 regularizer.weight 0.024179402930242008 +873 100 optimizer.lr 0.02843625923693987 +873 100 training.batch_size 2.0 +873 100 training.label_smoothing 0.025191293742277656 +873 1 dataset """kinships""" +873 1 model """transh""" +873 1 loss """crossentropy""" +873 1 regularizer """transh""" +873 1 optimizer """adam""" +873 1 training_loop """lcwa""" +873 1 evaluator """rankbased""" +873 2 dataset """kinships""" +873 2 model """transh""" +873 2 loss """crossentropy""" +873 2 regularizer """transh""" +873 2 optimizer """adam""" +873 2 training_loop """lcwa""" +873 2 evaluator """rankbased""" +873 3 dataset """kinships""" +873 3 model """transh""" +873 3 loss """crossentropy""" +873 3 regularizer """transh""" +873 3 optimizer """adam""" +873 3 training_loop """lcwa""" +873 3 evaluator """rankbased""" +873 4 dataset """kinships""" +873 4 model """transh""" +873 4 loss """crossentropy""" +873 4 regularizer """transh""" +873 4 optimizer """adam""" +873 4 training_loop """lcwa""" +873 4 evaluator """rankbased""" +873 5 dataset """kinships""" +873 5 model """transh""" +873 5 loss """crossentropy""" +873 5 regularizer """transh""" +873 5 optimizer """adam""" +873 5 training_loop """lcwa""" +873 5 evaluator """rankbased""" +873 6 dataset """kinships""" +873 6 model """transh""" +873 6 loss """crossentropy""" +873 6 regularizer """transh""" +873 6 optimizer """adam""" +873 6 training_loop """lcwa""" +873 6 evaluator """rankbased""" +873 7 dataset """kinships""" +873 7 model """transh""" +873 7 loss """crossentropy""" +873 7 regularizer """transh""" +873 7 optimizer """adam""" +873 7 training_loop """lcwa""" +873 7 evaluator """rankbased""" +873 8 dataset """kinships""" +873 8 model """transh""" +873 8 loss """crossentropy""" +873 8 regularizer """transh""" +873 8 optimizer """adam""" +873 8 training_loop """lcwa""" +873 8 evaluator """rankbased""" +873 9 dataset """kinships""" +873 9 model """transh""" +873 9 loss """crossentropy""" +873 9 regularizer """transh""" +873 9 optimizer """adam""" +873 9 training_loop """lcwa""" +873 9 evaluator """rankbased""" +873 10 dataset """kinships""" +873 10 model """transh""" +873 10 loss """crossentropy""" +873 10 regularizer """transh""" +873 10 optimizer """adam""" +873 10 training_loop """lcwa""" +873 10 evaluator """rankbased""" +873 11 dataset """kinships""" +873 11 model """transh""" +873 11 loss """crossentropy""" +873 11 regularizer """transh""" +873 11 optimizer """adam""" +873 11 training_loop """lcwa""" +873 11 evaluator """rankbased""" +873 12 dataset """kinships""" +873 12 model """transh""" +873 12 loss """crossentropy""" +873 12 regularizer """transh""" +873 12 optimizer """adam""" +873 12 training_loop """lcwa""" +873 12 evaluator """rankbased""" +873 13 dataset """kinships""" +873 13 model """transh""" +873 13 loss """crossentropy""" +873 13 regularizer """transh""" +873 13 optimizer """adam""" +873 13 training_loop """lcwa""" +873 13 evaluator """rankbased""" +873 14 dataset """kinships""" +873 14 model """transh""" +873 14 loss """crossentropy""" +873 14 regularizer """transh""" +873 14 optimizer """adam""" +873 14 training_loop """lcwa""" +873 14 evaluator """rankbased""" +873 15 dataset """kinships""" +873 15 model """transh""" +873 15 loss """crossentropy""" +873 15 regularizer """transh""" +873 15 optimizer """adam""" +873 15 training_loop """lcwa""" +873 15 evaluator """rankbased""" +873 16 dataset """kinships""" +873 16 model """transh""" +873 16 loss """crossentropy""" +873 16 regularizer """transh""" +873 16 optimizer """adam""" +873 16 training_loop """lcwa""" +873 16 evaluator """rankbased""" +873 17 dataset """kinships""" +873 17 model """transh""" +873 17 loss """crossentropy""" +873 17 regularizer """transh""" +873 17 optimizer """adam""" +873 17 training_loop """lcwa""" +873 17 evaluator """rankbased""" +873 18 dataset """kinships""" +873 18 model """transh""" +873 18 loss """crossentropy""" +873 18 regularizer """transh""" +873 18 optimizer """adam""" +873 18 training_loop """lcwa""" +873 18 evaluator """rankbased""" +873 19 dataset """kinships""" +873 19 model """transh""" +873 19 loss """crossentropy""" +873 19 regularizer """transh""" +873 19 optimizer """adam""" +873 19 training_loop """lcwa""" +873 19 evaluator """rankbased""" +873 20 dataset """kinships""" +873 20 model """transh""" +873 20 loss """crossentropy""" +873 20 regularizer """transh""" +873 20 optimizer """adam""" +873 20 training_loop """lcwa""" +873 20 evaluator """rankbased""" +873 21 dataset """kinships""" +873 21 model """transh""" +873 21 loss """crossentropy""" +873 21 regularizer """transh""" +873 21 optimizer """adam""" +873 21 training_loop """lcwa""" +873 21 evaluator """rankbased""" +873 22 dataset """kinships""" +873 22 model """transh""" +873 22 loss """crossentropy""" +873 22 regularizer """transh""" +873 22 optimizer """adam""" +873 22 training_loop """lcwa""" +873 22 evaluator """rankbased""" +873 23 dataset """kinships""" +873 23 model """transh""" +873 23 loss """crossentropy""" +873 23 regularizer """transh""" +873 23 optimizer """adam""" +873 23 training_loop """lcwa""" +873 23 evaluator """rankbased""" +873 24 dataset """kinships""" +873 24 model """transh""" +873 24 loss """crossentropy""" +873 24 regularizer """transh""" +873 24 optimizer """adam""" +873 24 training_loop """lcwa""" +873 24 evaluator """rankbased""" +873 25 dataset """kinships""" +873 25 model """transh""" +873 25 loss """crossentropy""" +873 25 regularizer """transh""" +873 25 optimizer """adam""" +873 25 training_loop """lcwa""" +873 25 evaluator """rankbased""" +873 26 dataset """kinships""" +873 26 model """transh""" +873 26 loss """crossentropy""" +873 26 regularizer """transh""" +873 26 optimizer """adam""" +873 26 training_loop """lcwa""" +873 26 evaluator """rankbased""" +873 27 dataset """kinships""" +873 27 model """transh""" +873 27 loss """crossentropy""" +873 27 regularizer """transh""" +873 27 optimizer """adam""" +873 27 training_loop """lcwa""" +873 27 evaluator """rankbased""" +873 28 dataset """kinships""" +873 28 model """transh""" +873 28 loss """crossentropy""" +873 28 regularizer """transh""" +873 28 optimizer """adam""" +873 28 training_loop """lcwa""" +873 28 evaluator """rankbased""" +873 29 dataset """kinships""" +873 29 model """transh""" +873 29 loss """crossentropy""" +873 29 regularizer """transh""" +873 29 optimizer """adam""" +873 29 training_loop """lcwa""" +873 29 evaluator """rankbased""" +873 30 dataset """kinships""" +873 30 model """transh""" +873 30 loss """crossentropy""" +873 30 regularizer """transh""" +873 30 optimizer """adam""" +873 30 training_loop """lcwa""" +873 30 evaluator """rankbased""" +873 31 dataset """kinships""" +873 31 model """transh""" +873 31 loss """crossentropy""" +873 31 regularizer """transh""" +873 31 optimizer """adam""" +873 31 training_loop """lcwa""" +873 31 evaluator """rankbased""" +873 32 dataset """kinships""" +873 32 model """transh""" +873 32 loss """crossentropy""" +873 32 regularizer """transh""" +873 32 optimizer """adam""" +873 32 training_loop """lcwa""" +873 32 evaluator """rankbased""" +873 33 dataset """kinships""" +873 33 model """transh""" +873 33 loss """crossentropy""" +873 33 regularizer """transh""" +873 33 optimizer """adam""" +873 33 training_loop """lcwa""" +873 33 evaluator """rankbased""" +873 34 dataset """kinships""" +873 34 model """transh""" +873 34 loss """crossentropy""" +873 34 regularizer """transh""" +873 34 optimizer """adam""" +873 34 training_loop """lcwa""" +873 34 evaluator """rankbased""" +873 35 dataset """kinships""" +873 35 model """transh""" +873 35 loss """crossentropy""" +873 35 regularizer """transh""" +873 35 optimizer """adam""" +873 35 training_loop """lcwa""" +873 35 evaluator """rankbased""" +873 36 dataset """kinships""" +873 36 model """transh""" +873 36 loss """crossentropy""" +873 36 regularizer """transh""" +873 36 optimizer """adam""" +873 36 training_loop """lcwa""" +873 36 evaluator """rankbased""" +873 37 dataset """kinships""" +873 37 model """transh""" +873 37 loss """crossentropy""" +873 37 regularizer """transh""" +873 37 optimizer """adam""" +873 37 training_loop """lcwa""" +873 37 evaluator """rankbased""" +873 38 dataset """kinships""" +873 38 model """transh""" +873 38 loss """crossentropy""" +873 38 regularizer """transh""" +873 38 optimizer """adam""" +873 38 training_loop """lcwa""" +873 38 evaluator """rankbased""" +873 39 dataset """kinships""" +873 39 model """transh""" +873 39 loss """crossentropy""" +873 39 regularizer """transh""" +873 39 optimizer """adam""" +873 39 training_loop """lcwa""" +873 39 evaluator """rankbased""" +873 40 dataset """kinships""" +873 40 model """transh""" +873 40 loss """crossentropy""" +873 40 regularizer """transh""" +873 40 optimizer """adam""" +873 40 training_loop """lcwa""" +873 40 evaluator """rankbased""" +873 41 dataset """kinships""" +873 41 model """transh""" +873 41 loss """crossentropy""" +873 41 regularizer """transh""" +873 41 optimizer """adam""" +873 41 training_loop """lcwa""" +873 41 evaluator """rankbased""" +873 42 dataset """kinships""" +873 42 model """transh""" +873 42 loss """crossentropy""" +873 42 regularizer """transh""" +873 42 optimizer """adam""" +873 42 training_loop """lcwa""" +873 42 evaluator """rankbased""" +873 43 dataset """kinships""" +873 43 model """transh""" +873 43 loss """crossentropy""" +873 43 regularizer """transh""" +873 43 optimizer """adam""" +873 43 training_loop """lcwa""" +873 43 evaluator """rankbased""" +873 44 dataset """kinships""" +873 44 model """transh""" +873 44 loss """crossentropy""" +873 44 regularizer """transh""" +873 44 optimizer """adam""" +873 44 training_loop """lcwa""" +873 44 evaluator """rankbased""" +873 45 dataset """kinships""" +873 45 model """transh""" +873 45 loss """crossentropy""" +873 45 regularizer """transh""" +873 45 optimizer """adam""" +873 45 training_loop """lcwa""" +873 45 evaluator """rankbased""" +873 46 dataset """kinships""" +873 46 model """transh""" +873 46 loss """crossentropy""" +873 46 regularizer """transh""" +873 46 optimizer """adam""" +873 46 training_loop """lcwa""" +873 46 evaluator """rankbased""" +873 47 dataset """kinships""" +873 47 model """transh""" +873 47 loss """crossentropy""" +873 47 regularizer """transh""" +873 47 optimizer """adam""" +873 47 training_loop """lcwa""" +873 47 evaluator """rankbased""" +873 48 dataset """kinships""" +873 48 model """transh""" +873 48 loss """crossentropy""" +873 48 regularizer """transh""" +873 48 optimizer """adam""" +873 48 training_loop """lcwa""" +873 48 evaluator """rankbased""" +873 49 dataset """kinships""" +873 49 model """transh""" +873 49 loss """crossentropy""" +873 49 regularizer """transh""" +873 49 optimizer """adam""" +873 49 training_loop """lcwa""" +873 49 evaluator """rankbased""" +873 50 dataset """kinships""" +873 50 model """transh""" +873 50 loss """crossentropy""" +873 50 regularizer """transh""" +873 50 optimizer """adam""" +873 50 training_loop """lcwa""" +873 50 evaluator """rankbased""" +873 51 dataset """kinships""" +873 51 model """transh""" +873 51 loss """crossentropy""" +873 51 regularizer """transh""" +873 51 optimizer """adam""" +873 51 training_loop """lcwa""" +873 51 evaluator """rankbased""" +873 52 dataset """kinships""" +873 52 model """transh""" +873 52 loss """crossentropy""" +873 52 regularizer """transh""" +873 52 optimizer """adam""" +873 52 training_loop """lcwa""" +873 52 evaluator """rankbased""" +873 53 dataset """kinships""" +873 53 model """transh""" +873 53 loss """crossentropy""" +873 53 regularizer """transh""" +873 53 optimizer """adam""" +873 53 training_loop """lcwa""" +873 53 evaluator """rankbased""" +873 54 dataset """kinships""" +873 54 model """transh""" +873 54 loss """crossentropy""" +873 54 regularizer """transh""" +873 54 optimizer """adam""" +873 54 training_loop """lcwa""" +873 54 evaluator """rankbased""" +873 55 dataset """kinships""" +873 55 model """transh""" +873 55 loss """crossentropy""" +873 55 regularizer """transh""" +873 55 optimizer """adam""" +873 55 training_loop """lcwa""" +873 55 evaluator """rankbased""" +873 56 dataset """kinships""" +873 56 model """transh""" +873 56 loss """crossentropy""" +873 56 regularizer """transh""" +873 56 optimizer """adam""" +873 56 training_loop """lcwa""" +873 56 evaluator """rankbased""" +873 57 dataset """kinships""" +873 57 model """transh""" +873 57 loss """crossentropy""" +873 57 regularizer """transh""" +873 57 optimizer """adam""" +873 57 training_loop """lcwa""" +873 57 evaluator """rankbased""" +873 58 dataset """kinships""" +873 58 model """transh""" +873 58 loss """crossentropy""" +873 58 regularizer """transh""" +873 58 optimizer """adam""" +873 58 training_loop """lcwa""" +873 58 evaluator """rankbased""" +873 59 dataset """kinships""" +873 59 model """transh""" +873 59 loss """crossentropy""" +873 59 regularizer """transh""" +873 59 optimizer """adam""" +873 59 training_loop """lcwa""" +873 59 evaluator """rankbased""" +873 60 dataset """kinships""" +873 60 model """transh""" +873 60 loss """crossentropy""" +873 60 regularizer """transh""" +873 60 optimizer """adam""" +873 60 training_loop """lcwa""" +873 60 evaluator """rankbased""" +873 61 dataset """kinships""" +873 61 model """transh""" +873 61 loss """crossentropy""" +873 61 regularizer """transh""" +873 61 optimizer """adam""" +873 61 training_loop """lcwa""" +873 61 evaluator """rankbased""" +873 62 dataset """kinships""" +873 62 model """transh""" +873 62 loss """crossentropy""" +873 62 regularizer """transh""" +873 62 optimizer """adam""" +873 62 training_loop """lcwa""" +873 62 evaluator """rankbased""" +873 63 dataset """kinships""" +873 63 model """transh""" +873 63 loss """crossentropy""" +873 63 regularizer """transh""" +873 63 optimizer """adam""" +873 63 training_loop """lcwa""" +873 63 evaluator """rankbased""" +873 64 dataset """kinships""" +873 64 model """transh""" +873 64 loss """crossentropy""" +873 64 regularizer """transh""" +873 64 optimizer """adam""" +873 64 training_loop """lcwa""" +873 64 evaluator """rankbased""" +873 65 dataset """kinships""" +873 65 model """transh""" +873 65 loss """crossentropy""" +873 65 regularizer """transh""" +873 65 optimizer """adam""" +873 65 training_loop """lcwa""" +873 65 evaluator """rankbased""" +873 66 dataset """kinships""" +873 66 model """transh""" +873 66 loss """crossentropy""" +873 66 regularizer """transh""" +873 66 optimizer """adam""" +873 66 training_loop """lcwa""" +873 66 evaluator """rankbased""" +873 67 dataset """kinships""" +873 67 model """transh""" +873 67 loss """crossentropy""" +873 67 regularizer """transh""" +873 67 optimizer """adam""" +873 67 training_loop """lcwa""" +873 67 evaluator """rankbased""" +873 68 dataset """kinships""" +873 68 model """transh""" +873 68 loss """crossentropy""" +873 68 regularizer """transh""" +873 68 optimizer """adam""" +873 68 training_loop """lcwa""" +873 68 evaluator """rankbased""" +873 69 dataset """kinships""" +873 69 model """transh""" +873 69 loss """crossentropy""" +873 69 regularizer """transh""" +873 69 optimizer """adam""" +873 69 training_loop """lcwa""" +873 69 evaluator """rankbased""" +873 70 dataset """kinships""" +873 70 model """transh""" +873 70 loss """crossentropy""" +873 70 regularizer """transh""" +873 70 optimizer """adam""" +873 70 training_loop """lcwa""" +873 70 evaluator """rankbased""" +873 71 dataset """kinships""" +873 71 model """transh""" +873 71 loss """crossentropy""" +873 71 regularizer """transh""" +873 71 optimizer """adam""" +873 71 training_loop """lcwa""" +873 71 evaluator """rankbased""" +873 72 dataset """kinships""" +873 72 model """transh""" +873 72 loss """crossentropy""" +873 72 regularizer """transh""" +873 72 optimizer """adam""" +873 72 training_loop """lcwa""" +873 72 evaluator """rankbased""" +873 73 dataset """kinships""" +873 73 model """transh""" +873 73 loss """crossentropy""" +873 73 regularizer """transh""" +873 73 optimizer """adam""" +873 73 training_loop """lcwa""" +873 73 evaluator """rankbased""" +873 74 dataset """kinships""" +873 74 model """transh""" +873 74 loss """crossentropy""" +873 74 regularizer """transh""" +873 74 optimizer """adam""" +873 74 training_loop """lcwa""" +873 74 evaluator """rankbased""" +873 75 dataset """kinships""" +873 75 model """transh""" +873 75 loss """crossentropy""" +873 75 regularizer """transh""" +873 75 optimizer """adam""" +873 75 training_loop """lcwa""" +873 75 evaluator """rankbased""" +873 76 dataset """kinships""" +873 76 model """transh""" +873 76 loss """crossentropy""" +873 76 regularizer """transh""" +873 76 optimizer """adam""" +873 76 training_loop """lcwa""" +873 76 evaluator """rankbased""" +873 77 dataset """kinships""" +873 77 model """transh""" +873 77 loss """crossentropy""" +873 77 regularizer """transh""" +873 77 optimizer """adam""" +873 77 training_loop """lcwa""" +873 77 evaluator """rankbased""" +873 78 dataset """kinships""" +873 78 model """transh""" +873 78 loss """crossentropy""" +873 78 regularizer """transh""" +873 78 optimizer """adam""" +873 78 training_loop """lcwa""" +873 78 evaluator """rankbased""" +873 79 dataset """kinships""" +873 79 model """transh""" +873 79 loss """crossentropy""" +873 79 regularizer """transh""" +873 79 optimizer """adam""" +873 79 training_loop """lcwa""" +873 79 evaluator """rankbased""" +873 80 dataset """kinships""" +873 80 model """transh""" +873 80 loss """crossentropy""" +873 80 regularizer """transh""" +873 80 optimizer """adam""" +873 80 training_loop """lcwa""" +873 80 evaluator """rankbased""" +873 81 dataset """kinships""" +873 81 model """transh""" +873 81 loss """crossentropy""" +873 81 regularizer """transh""" +873 81 optimizer """adam""" +873 81 training_loop """lcwa""" +873 81 evaluator """rankbased""" +873 82 dataset """kinships""" +873 82 model """transh""" +873 82 loss """crossentropy""" +873 82 regularizer """transh""" +873 82 optimizer """adam""" +873 82 training_loop """lcwa""" +873 82 evaluator """rankbased""" +873 83 dataset """kinships""" +873 83 model """transh""" +873 83 loss """crossentropy""" +873 83 regularizer """transh""" +873 83 optimizer """adam""" +873 83 training_loop """lcwa""" +873 83 evaluator """rankbased""" +873 84 dataset """kinships""" +873 84 model """transh""" +873 84 loss """crossentropy""" +873 84 regularizer """transh""" +873 84 optimizer """adam""" +873 84 training_loop """lcwa""" +873 84 evaluator """rankbased""" +873 85 dataset """kinships""" +873 85 model """transh""" +873 85 loss """crossentropy""" +873 85 regularizer """transh""" +873 85 optimizer """adam""" +873 85 training_loop """lcwa""" +873 85 evaluator """rankbased""" +873 86 dataset """kinships""" +873 86 model """transh""" +873 86 loss """crossentropy""" +873 86 regularizer """transh""" +873 86 optimizer """adam""" +873 86 training_loop """lcwa""" +873 86 evaluator """rankbased""" +873 87 dataset """kinships""" +873 87 model """transh""" +873 87 loss """crossentropy""" +873 87 regularizer """transh""" +873 87 optimizer """adam""" +873 87 training_loop """lcwa""" +873 87 evaluator """rankbased""" +873 88 dataset """kinships""" +873 88 model """transh""" +873 88 loss """crossentropy""" +873 88 regularizer """transh""" +873 88 optimizer """adam""" +873 88 training_loop """lcwa""" +873 88 evaluator """rankbased""" +873 89 dataset """kinships""" +873 89 model """transh""" +873 89 loss """crossentropy""" +873 89 regularizer """transh""" +873 89 optimizer """adam""" +873 89 training_loop """lcwa""" +873 89 evaluator """rankbased""" +873 90 dataset """kinships""" +873 90 model """transh""" +873 90 loss """crossentropy""" +873 90 regularizer """transh""" +873 90 optimizer """adam""" +873 90 training_loop """lcwa""" +873 90 evaluator """rankbased""" +873 91 dataset """kinships""" +873 91 model """transh""" +873 91 loss """crossentropy""" +873 91 regularizer """transh""" +873 91 optimizer """adam""" +873 91 training_loop """lcwa""" +873 91 evaluator """rankbased""" +873 92 dataset """kinships""" +873 92 model """transh""" +873 92 loss """crossentropy""" +873 92 regularizer """transh""" +873 92 optimizer """adam""" +873 92 training_loop """lcwa""" +873 92 evaluator """rankbased""" +873 93 dataset """kinships""" +873 93 model """transh""" +873 93 loss """crossentropy""" +873 93 regularizer """transh""" +873 93 optimizer """adam""" +873 93 training_loop """lcwa""" +873 93 evaluator """rankbased""" +873 94 dataset """kinships""" +873 94 model """transh""" +873 94 loss """crossentropy""" +873 94 regularizer """transh""" +873 94 optimizer """adam""" +873 94 training_loop """lcwa""" +873 94 evaluator """rankbased""" +873 95 dataset """kinships""" +873 95 model """transh""" +873 95 loss """crossentropy""" +873 95 regularizer """transh""" +873 95 optimizer """adam""" +873 95 training_loop """lcwa""" +873 95 evaluator """rankbased""" +873 96 dataset """kinships""" +873 96 model """transh""" +873 96 loss """crossentropy""" +873 96 regularizer """transh""" +873 96 optimizer """adam""" +873 96 training_loop """lcwa""" +873 96 evaluator """rankbased""" +873 97 dataset """kinships""" +873 97 model """transh""" +873 97 loss """crossentropy""" +873 97 regularizer """transh""" +873 97 optimizer """adam""" +873 97 training_loop """lcwa""" +873 97 evaluator """rankbased""" +873 98 dataset """kinships""" +873 98 model """transh""" +873 98 loss """crossentropy""" +873 98 regularizer """transh""" +873 98 optimizer """adam""" +873 98 training_loop """lcwa""" +873 98 evaluator """rankbased""" +873 99 dataset """kinships""" +873 99 model """transh""" +873 99 loss """crossentropy""" +873 99 regularizer """transh""" +873 99 optimizer """adam""" +873 99 training_loop """lcwa""" +873 99 evaluator """rankbased""" +873 100 dataset """kinships""" +873 100 model """transh""" +873 100 loss """crossentropy""" +873 100 regularizer """transh""" +873 100 optimizer """adam""" +873 100 training_loop """lcwa""" +873 100 evaluator """rankbased""" +874 1 model.embedding_dim 1.0 +874 1 model.scoring_fct_norm 2.0 +874 1 regularizer.weight 0.03864664807057361 +874 1 optimizer.lr 0.0018506751307987713 +874 1 training.batch_size 2.0 +874 1 training.label_smoothing 0.05253337633765147 +874 2 model.embedding_dim 2.0 +874 2 model.scoring_fct_norm 1.0 +874 2 regularizer.weight 0.04116479177071508 +874 2 optimizer.lr 0.0016105516950231824 +874 2 training.batch_size 0.0 +874 2 training.label_smoothing 0.03856917713921187 +874 3 model.embedding_dim 1.0 +874 3 model.scoring_fct_norm 1.0 +874 3 regularizer.weight 0.01075827852890302 +874 3 optimizer.lr 0.0029980080858204687 +874 3 training.batch_size 1.0 +874 3 training.label_smoothing 0.011129140596820925 +874 4 model.embedding_dim 0.0 +874 4 model.scoring_fct_norm 1.0 +874 4 regularizer.weight 0.08282502285701394 +874 4 optimizer.lr 0.007169778983787707 +874 4 training.batch_size 1.0 +874 4 training.label_smoothing 0.0012268652166989124 +874 5 model.embedding_dim 0.0 +874 5 model.scoring_fct_norm 1.0 +874 5 regularizer.weight 0.03043581704434002 +874 5 optimizer.lr 0.0016635189990291438 +874 5 training.batch_size 0.0 +874 5 training.label_smoothing 0.002524615989528562 +874 6 model.embedding_dim 0.0 +874 6 model.scoring_fct_norm 2.0 +874 6 regularizer.weight 0.22320692483135293 +874 6 optimizer.lr 0.060079231625654664 +874 6 training.batch_size 1.0 +874 6 training.label_smoothing 0.0011726575755339182 +874 7 model.embedding_dim 2.0 +874 7 model.scoring_fct_norm 1.0 +874 7 regularizer.weight 0.19643769860584442 +874 7 optimizer.lr 0.009954241300089123 +874 7 training.batch_size 1.0 +874 7 training.label_smoothing 0.005202708611576289 +874 8 model.embedding_dim 1.0 +874 8 model.scoring_fct_norm 2.0 +874 8 regularizer.weight 0.013467542218844537 +874 8 optimizer.lr 0.0022842615675598204 +874 8 training.batch_size 2.0 +874 8 training.label_smoothing 0.0015119863870505705 +874 9 model.embedding_dim 1.0 +874 9 model.scoring_fct_norm 1.0 +874 9 regularizer.weight 0.06819355010091344 +874 9 optimizer.lr 0.06193178546395608 +874 9 training.batch_size 0.0 +874 9 training.label_smoothing 0.041180013420197885 +874 10 model.embedding_dim 2.0 +874 10 model.scoring_fct_norm 1.0 +874 10 regularizer.weight 0.035117576459556515 +874 10 optimizer.lr 0.0012092347543852706 +874 10 training.batch_size 1.0 +874 10 training.label_smoothing 0.005892689816970684 +874 11 model.embedding_dim 0.0 +874 11 model.scoring_fct_norm 1.0 +874 11 regularizer.weight 0.1385221476573736 +874 11 optimizer.lr 0.07834740249543083 +874 11 training.batch_size 0.0 +874 11 training.label_smoothing 0.042163925319633394 +874 12 model.embedding_dim 2.0 +874 12 model.scoring_fct_norm 1.0 +874 12 regularizer.weight 0.011131680891893259 +874 12 optimizer.lr 0.04500160590432381 +874 12 training.batch_size 1.0 +874 12 training.label_smoothing 0.0010813736235619493 +874 13 model.embedding_dim 1.0 +874 13 model.scoring_fct_norm 2.0 +874 13 regularizer.weight 0.0523738430148541 +874 13 optimizer.lr 0.029300253736445053 +874 13 training.batch_size 2.0 +874 13 training.label_smoothing 0.06018694272174792 +874 14 model.embedding_dim 0.0 +874 14 model.scoring_fct_norm 1.0 +874 14 regularizer.weight 0.04085001230654592 +874 14 optimizer.lr 0.002079131641372481 +874 14 training.batch_size 2.0 +874 14 training.label_smoothing 0.23212256312236873 +874 15 model.embedding_dim 1.0 +874 15 model.scoring_fct_norm 2.0 +874 15 regularizer.weight 0.040093347528051354 +874 15 optimizer.lr 0.009032677650921766 +874 15 training.batch_size 1.0 +874 15 training.label_smoothing 0.014673746315855241 +874 16 model.embedding_dim 1.0 +874 16 model.scoring_fct_norm 2.0 +874 16 regularizer.weight 0.044820115476416796 +874 16 optimizer.lr 0.006547119547479234 +874 16 training.batch_size 2.0 +874 16 training.label_smoothing 0.10687765420940257 +874 17 model.embedding_dim 1.0 +874 17 model.scoring_fct_norm 1.0 +874 17 regularizer.weight 0.24939118639186375 +874 17 optimizer.lr 0.019491578659550427 +874 17 training.batch_size 1.0 +874 17 training.label_smoothing 0.0032838067810786182 +874 18 model.embedding_dim 2.0 +874 18 model.scoring_fct_norm 1.0 +874 18 regularizer.weight 0.017037033155688063 +874 18 optimizer.lr 0.005377560168843179 +874 18 training.batch_size 2.0 +874 18 training.label_smoothing 0.04119603301122083 +874 19 model.embedding_dim 0.0 +874 19 model.scoring_fct_norm 2.0 +874 19 regularizer.weight 0.05821100847832866 +874 19 optimizer.lr 0.07295275476597998 +874 19 training.batch_size 1.0 +874 19 training.label_smoothing 0.0013933891457806812 +874 20 model.embedding_dim 1.0 +874 20 model.scoring_fct_norm 1.0 +874 20 regularizer.weight 0.2679527338224795 +874 20 optimizer.lr 0.0025727746088639396 +874 20 training.batch_size 1.0 +874 20 training.label_smoothing 0.010594396194355312 +874 21 model.embedding_dim 1.0 +874 21 model.scoring_fct_norm 1.0 +874 21 regularizer.weight 0.011329691964001309 +874 21 optimizer.lr 0.02352166370243492 +874 21 training.batch_size 2.0 +874 21 training.label_smoothing 0.08005540343917439 +874 22 model.embedding_dim 0.0 +874 22 model.scoring_fct_norm 2.0 +874 22 regularizer.weight 0.024533101621823115 +874 22 optimizer.lr 0.011204905705996904 +874 22 training.batch_size 0.0 +874 22 training.label_smoothing 0.00210194101123406 +874 23 model.embedding_dim 1.0 +874 23 model.scoring_fct_norm 1.0 +874 23 regularizer.weight 0.030772016109447797 +874 23 optimizer.lr 0.0032410034757958616 +874 23 training.batch_size 2.0 +874 23 training.label_smoothing 0.0022493995361646904 +874 24 model.embedding_dim 1.0 +874 24 model.scoring_fct_norm 1.0 +874 24 regularizer.weight 0.012607613073603856 +874 24 optimizer.lr 0.03208881748209162 +874 24 training.batch_size 1.0 +874 24 training.label_smoothing 0.051692065099026116 +874 25 model.embedding_dim 2.0 +874 25 model.scoring_fct_norm 1.0 +874 25 regularizer.weight 0.09165640212661313 +874 25 optimizer.lr 0.005171505786125521 +874 25 training.batch_size 1.0 +874 25 training.label_smoothing 0.09827819696544207 +874 26 model.embedding_dim 0.0 +874 26 model.scoring_fct_norm 2.0 +874 26 regularizer.weight 0.05464276104961863 +874 26 optimizer.lr 0.006029392210046956 +874 26 training.batch_size 2.0 +874 26 training.label_smoothing 0.21604307424132674 +874 27 model.embedding_dim 0.0 +874 27 model.scoring_fct_norm 1.0 +874 27 regularizer.weight 0.09026831301141662 +874 27 optimizer.lr 0.017933023771765817 +874 27 training.batch_size 0.0 +874 27 training.label_smoothing 0.012649495015455403 +874 28 model.embedding_dim 2.0 +874 28 model.scoring_fct_norm 2.0 +874 28 regularizer.weight 0.010555175999532623 +874 28 optimizer.lr 0.0075941180222119375 +874 28 training.batch_size 0.0 +874 28 training.label_smoothing 0.04014296104822662 +874 29 model.embedding_dim 2.0 +874 29 model.scoring_fct_norm 1.0 +874 29 regularizer.weight 0.010711974228168303 +874 29 optimizer.lr 0.049019455863652694 +874 29 training.batch_size 1.0 +874 29 training.label_smoothing 0.0035693721096025886 +874 30 model.embedding_dim 1.0 +874 30 model.scoring_fct_norm 1.0 +874 30 regularizer.weight 0.016896418273455227 +874 30 optimizer.lr 0.0012552250384094915 +874 30 training.batch_size 0.0 +874 30 training.label_smoothing 0.005310446703198696 +874 31 model.embedding_dim 0.0 +874 31 model.scoring_fct_norm 1.0 +874 31 regularizer.weight 0.017236393179659554 +874 31 optimizer.lr 0.0023005435947107225 +874 31 training.batch_size 1.0 +874 31 training.label_smoothing 0.014744553911768661 +874 32 model.embedding_dim 0.0 +874 32 model.scoring_fct_norm 2.0 +874 32 regularizer.weight 0.03180710108090128 +874 32 optimizer.lr 0.004743535638931299 +874 32 training.batch_size 0.0 +874 32 training.label_smoothing 0.0026250803418505484 +874 33 model.embedding_dim 2.0 +874 33 model.scoring_fct_norm 1.0 +874 33 regularizer.weight 0.10993990509623214 +874 33 optimizer.lr 0.0014552686794382463 +874 33 training.batch_size 1.0 +874 33 training.label_smoothing 0.002360207383701794 +874 34 model.embedding_dim 1.0 +874 34 model.scoring_fct_norm 2.0 +874 34 regularizer.weight 0.014364145578722403 +874 34 optimizer.lr 0.02846284587600906 +874 34 training.batch_size 0.0 +874 34 training.label_smoothing 0.15597544027447593 +874 35 model.embedding_dim 1.0 +874 35 model.scoring_fct_norm 1.0 +874 35 regularizer.weight 0.04153184365879201 +874 35 optimizer.lr 0.006022094619842153 +874 35 training.batch_size 2.0 +874 35 training.label_smoothing 0.015401302618370588 +874 36 model.embedding_dim 1.0 +874 36 model.scoring_fct_norm 2.0 +874 36 regularizer.weight 0.012626198317012323 +874 36 optimizer.lr 0.017104698617839094 +874 36 training.batch_size 2.0 +874 36 training.label_smoothing 0.46685044016823696 +874 37 model.embedding_dim 2.0 +874 37 model.scoring_fct_norm 2.0 +874 37 regularizer.weight 0.02597450387863345 +874 37 optimizer.lr 0.018715529225319313 +874 37 training.batch_size 1.0 +874 37 training.label_smoothing 0.02745531985116591 +874 38 model.embedding_dim 1.0 +874 38 model.scoring_fct_norm 2.0 +874 38 regularizer.weight 0.030154888295157042 +874 38 optimizer.lr 0.01831549373475105 +874 38 training.batch_size 0.0 +874 38 training.label_smoothing 0.003353689543990481 +874 39 model.embedding_dim 1.0 +874 39 model.scoring_fct_norm 1.0 +874 39 regularizer.weight 0.061562034993070024 +874 39 optimizer.lr 0.01594277168872941 +874 39 training.batch_size 0.0 +874 39 training.label_smoothing 0.0013728097265382714 +874 40 model.embedding_dim 1.0 +874 40 model.scoring_fct_norm 1.0 +874 40 regularizer.weight 0.1026562394886907 +874 40 optimizer.lr 0.0067025202371733174 +874 40 training.batch_size 1.0 +874 40 training.label_smoothing 0.08367190570889439 +874 41 model.embedding_dim 1.0 +874 41 model.scoring_fct_norm 1.0 +874 41 regularizer.weight 0.07855774226151778 +874 41 optimizer.lr 0.05358131662079392 +874 41 training.batch_size 2.0 +874 41 training.label_smoothing 0.073850715716627 +874 42 model.embedding_dim 2.0 +874 42 model.scoring_fct_norm 1.0 +874 42 regularizer.weight 0.0175509376937004 +874 42 optimizer.lr 0.013631405246933933 +874 42 training.batch_size 2.0 +874 42 training.label_smoothing 0.0011663943672886885 +874 43 model.embedding_dim 2.0 +874 43 model.scoring_fct_norm 1.0 +874 43 regularizer.weight 0.023953231077590554 +874 43 optimizer.lr 0.017077270601755606 +874 43 training.batch_size 0.0 +874 43 training.label_smoothing 0.009718725612999744 +874 44 model.embedding_dim 1.0 +874 44 model.scoring_fct_norm 1.0 +874 44 regularizer.weight 0.11881109676925572 +874 44 optimizer.lr 0.012035723402275347 +874 44 training.batch_size 0.0 +874 44 training.label_smoothing 0.015696166812563625 +874 45 model.embedding_dim 2.0 +874 45 model.scoring_fct_norm 1.0 +874 45 regularizer.weight 0.05932526340434218 +874 45 optimizer.lr 0.011907707715298371 +874 45 training.batch_size 2.0 +874 45 training.label_smoothing 0.1659071539098804 +874 46 model.embedding_dim 1.0 +874 46 model.scoring_fct_norm 2.0 +874 46 regularizer.weight 0.01143857422667591 +874 46 optimizer.lr 0.006720601350211613 +874 46 training.batch_size 2.0 +874 46 training.label_smoothing 0.0022065550559148875 +874 47 model.embedding_dim 2.0 +874 47 model.scoring_fct_norm 1.0 +874 47 regularizer.weight 0.14908794790533614 +874 47 optimizer.lr 0.008624556568362042 +874 47 training.batch_size 1.0 +874 47 training.label_smoothing 0.057768190002704764 +874 48 model.embedding_dim 2.0 +874 48 model.scoring_fct_norm 1.0 +874 48 regularizer.weight 0.013136979118169998 +874 48 optimizer.lr 0.0467360151876648 +874 48 training.batch_size 0.0 +874 48 training.label_smoothing 0.001721123401836221 +874 49 model.embedding_dim 1.0 +874 49 model.scoring_fct_norm 1.0 +874 49 regularizer.weight 0.24560260032200182 +874 49 optimizer.lr 0.008263514847435417 +874 49 training.batch_size 0.0 +874 49 training.label_smoothing 0.285148646771924 +874 50 model.embedding_dim 2.0 +874 50 model.scoring_fct_norm 1.0 +874 50 regularizer.weight 0.16481228991428346 +874 50 optimizer.lr 0.017341964735305897 +874 50 training.batch_size 0.0 +874 50 training.label_smoothing 0.39224325333484744 +874 51 model.embedding_dim 0.0 +874 51 model.scoring_fct_norm 2.0 +874 51 regularizer.weight 0.02325869857103664 +874 51 optimizer.lr 0.04912289787573454 +874 51 training.batch_size 2.0 +874 51 training.label_smoothing 0.0030668799237259857 +874 52 model.embedding_dim 2.0 +874 52 model.scoring_fct_norm 2.0 +874 52 regularizer.weight 0.25319838377242704 +874 52 optimizer.lr 0.003653482889798019 +874 52 training.batch_size 2.0 +874 52 training.label_smoothing 0.002318433666656295 +874 53 model.embedding_dim 1.0 +874 53 model.scoring_fct_norm 1.0 +874 53 regularizer.weight 0.18874686424187942 +874 53 optimizer.lr 0.0027790487579467836 +874 53 training.batch_size 0.0 +874 53 training.label_smoothing 0.16939470020250502 +874 54 model.embedding_dim 2.0 +874 54 model.scoring_fct_norm 1.0 +874 54 regularizer.weight 0.02885135300465367 +874 54 optimizer.lr 0.0016825588557331675 +874 54 training.batch_size 0.0 +874 54 training.label_smoothing 0.0753049950591942 +874 55 model.embedding_dim 0.0 +874 55 model.scoring_fct_norm 1.0 +874 55 regularizer.weight 0.09451335224093066 +874 55 optimizer.lr 0.012437348893920846 +874 55 training.batch_size 2.0 +874 55 training.label_smoothing 0.7353005687529819 +874 56 model.embedding_dim 1.0 +874 56 model.scoring_fct_norm 1.0 +874 56 regularizer.weight 0.074857733089773 +874 56 optimizer.lr 0.008387888854242135 +874 56 training.batch_size 0.0 +874 56 training.label_smoothing 0.0017509738260290137 +874 57 model.embedding_dim 2.0 +874 57 model.scoring_fct_norm 1.0 +874 57 regularizer.weight 0.29861959334277605 +874 57 optimizer.lr 0.05252266873423328 +874 57 training.batch_size 0.0 +874 57 training.label_smoothing 0.5273904627829168 +874 58 model.embedding_dim 2.0 +874 58 model.scoring_fct_norm 2.0 +874 58 regularizer.weight 0.10499411133106072 +874 58 optimizer.lr 0.02683313870204466 +874 58 training.batch_size 1.0 +874 58 training.label_smoothing 0.0030505898727243175 +874 59 model.embedding_dim 0.0 +874 59 model.scoring_fct_norm 2.0 +874 59 regularizer.weight 0.019731073695958445 +874 59 optimizer.lr 0.010071200244147474 +874 59 training.batch_size 0.0 +874 59 training.label_smoothing 0.19543867704584086 +874 60 model.embedding_dim 2.0 +874 60 model.scoring_fct_norm 1.0 +874 60 regularizer.weight 0.10522418411227247 +874 60 optimizer.lr 0.0038646959187555916 +874 60 training.batch_size 0.0 +874 60 training.label_smoothing 0.0012643870108869132 +874 61 model.embedding_dim 2.0 +874 61 model.scoring_fct_norm 2.0 +874 61 regularizer.weight 0.08222868666571316 +874 61 optimizer.lr 0.0024588016093843265 +874 61 training.batch_size 1.0 +874 61 training.label_smoothing 0.15444583878434778 +874 62 model.embedding_dim 1.0 +874 62 model.scoring_fct_norm 2.0 +874 62 regularizer.weight 0.03264123557916235 +874 62 optimizer.lr 0.011634703128257921 +874 62 training.batch_size 1.0 +874 62 training.label_smoothing 0.0019511211377133044 +874 63 model.embedding_dim 1.0 +874 63 model.scoring_fct_norm 2.0 +874 63 regularizer.weight 0.054103430125391566 +874 63 optimizer.lr 0.0020236632719658277 +874 63 training.batch_size 2.0 +874 63 training.label_smoothing 0.0014699677226266343 +874 64 model.embedding_dim 1.0 +874 64 model.scoring_fct_norm 1.0 +874 64 regularizer.weight 0.0104213696251317 +874 64 optimizer.lr 0.008654204502390822 +874 64 training.batch_size 0.0 +874 64 training.label_smoothing 0.10208532690303043 +874 65 model.embedding_dim 1.0 +874 65 model.scoring_fct_norm 1.0 +874 65 regularizer.weight 0.011907016466057536 +874 65 optimizer.lr 0.0025244764662848288 +874 65 training.batch_size 2.0 +874 65 training.label_smoothing 0.07417846748164018 +874 66 model.embedding_dim 1.0 +874 66 model.scoring_fct_norm 2.0 +874 66 regularizer.weight 0.058641493333578425 +874 66 optimizer.lr 0.014934808860629662 +874 66 training.batch_size 2.0 +874 66 training.label_smoothing 0.018816488205872253 +874 67 model.embedding_dim 2.0 +874 67 model.scoring_fct_norm 2.0 +874 67 regularizer.weight 0.020010139842363397 +874 67 optimizer.lr 0.015571712076127281 +874 67 training.batch_size 2.0 +874 67 training.label_smoothing 0.5169064433436507 +874 68 model.embedding_dim 0.0 +874 68 model.scoring_fct_norm 2.0 +874 68 regularizer.weight 0.2857926693840416 +874 68 optimizer.lr 0.08481298325690069 +874 68 training.batch_size 1.0 +874 68 training.label_smoothing 0.08818641714828722 +874 69 model.embedding_dim 0.0 +874 69 model.scoring_fct_norm 2.0 +874 69 regularizer.weight 0.08471150196101226 +874 69 optimizer.lr 0.02585871751595875 +874 69 training.batch_size 0.0 +874 69 training.label_smoothing 0.002180107988656052 +874 70 model.embedding_dim 2.0 +874 70 model.scoring_fct_norm 1.0 +874 70 regularizer.weight 0.2077075315467993 +874 70 optimizer.lr 0.002520749010163025 +874 70 training.batch_size 2.0 +874 70 training.label_smoothing 0.00241204195979601 +874 71 model.embedding_dim 1.0 +874 71 model.scoring_fct_norm 1.0 +874 71 regularizer.weight 0.23833807771199728 +874 71 optimizer.lr 0.003647618236300831 +874 71 training.batch_size 0.0 +874 71 training.label_smoothing 0.002498271464332581 +874 72 model.embedding_dim 2.0 +874 72 model.scoring_fct_norm 1.0 +874 72 regularizer.weight 0.012932282114839886 +874 72 optimizer.lr 0.05337766008600103 +874 72 training.batch_size 1.0 +874 72 training.label_smoothing 0.07104299555947949 +874 73 model.embedding_dim 1.0 +874 73 model.scoring_fct_norm 1.0 +874 73 regularizer.weight 0.022868710969590762 +874 73 optimizer.lr 0.039990232998654944 +874 73 training.batch_size 2.0 +874 73 training.label_smoothing 0.0234314210219293 +874 74 model.embedding_dim 1.0 +874 74 model.scoring_fct_norm 2.0 +874 74 regularizer.weight 0.031530110495217825 +874 74 optimizer.lr 0.0021118815660012345 +874 74 training.batch_size 2.0 +874 74 training.label_smoothing 0.049772430484907586 +874 75 model.embedding_dim 0.0 +874 75 model.scoring_fct_norm 2.0 +874 75 regularizer.weight 0.13245065672584658 +874 75 optimizer.lr 0.0010532102835965331 +874 75 training.batch_size 1.0 +874 75 training.label_smoothing 0.00229733784256452 +874 76 model.embedding_dim 2.0 +874 76 model.scoring_fct_norm 2.0 +874 76 regularizer.weight 0.09988718449971629 +874 76 optimizer.lr 0.0010122619884761533 +874 76 training.batch_size 0.0 +874 76 training.label_smoothing 0.8061278287870828 +874 77 model.embedding_dim 2.0 +874 77 model.scoring_fct_norm 2.0 +874 77 regularizer.weight 0.03254073927129694 +874 77 optimizer.lr 0.0011593991797749059 +874 77 training.batch_size 2.0 +874 77 training.label_smoothing 0.0050405015262997375 +874 78 model.embedding_dim 2.0 +874 78 model.scoring_fct_norm 1.0 +874 78 regularizer.weight 0.18838751578018878 +874 78 optimizer.lr 0.039370911787365605 +874 78 training.batch_size 0.0 +874 78 training.label_smoothing 0.010897156065973683 +874 79 model.embedding_dim 2.0 +874 79 model.scoring_fct_norm 2.0 +874 79 regularizer.weight 0.026743536622656584 +874 79 optimizer.lr 0.034295139477858005 +874 79 training.batch_size 0.0 +874 79 training.label_smoothing 0.002309795072267428 +874 80 model.embedding_dim 2.0 +874 80 model.scoring_fct_norm 1.0 +874 80 regularizer.weight 0.23835827716302432 +874 80 optimizer.lr 0.007395495620289574 +874 80 training.batch_size 1.0 +874 80 training.label_smoothing 0.04347575307315651 +874 81 model.embedding_dim 1.0 +874 81 model.scoring_fct_norm 2.0 +874 81 regularizer.weight 0.05049554251217496 +874 81 optimizer.lr 0.008930446096005566 +874 81 training.batch_size 2.0 +874 81 training.label_smoothing 0.02795583221280405 +874 82 model.embedding_dim 0.0 +874 82 model.scoring_fct_norm 2.0 +874 82 regularizer.weight 0.17840561276084632 +874 82 optimizer.lr 0.08659129335383582 +874 82 training.batch_size 0.0 +874 82 training.label_smoothing 0.05794882135169081 +874 83 model.embedding_dim 1.0 +874 83 model.scoring_fct_norm 2.0 +874 83 regularizer.weight 0.011312227013741601 +874 83 optimizer.lr 0.08564593298019646 +874 83 training.batch_size 1.0 +874 83 training.label_smoothing 0.019696829789813502 +874 84 model.embedding_dim 2.0 +874 84 model.scoring_fct_norm 2.0 +874 84 regularizer.weight 0.15318668081343484 +874 84 optimizer.lr 0.0692336089119584 +874 84 training.batch_size 1.0 +874 84 training.label_smoothing 0.3554669000914089 +874 85 model.embedding_dim 0.0 +874 85 model.scoring_fct_norm 1.0 +874 85 regularizer.weight 0.011629061988233698 +874 85 optimizer.lr 0.003744154435753403 +874 85 training.batch_size 1.0 +874 85 training.label_smoothing 0.001846596337152768 +874 86 model.embedding_dim 1.0 +874 86 model.scoring_fct_norm 2.0 +874 86 regularizer.weight 0.01779264811161118 +874 86 optimizer.lr 0.05127723830529243 +874 86 training.batch_size 1.0 +874 86 training.label_smoothing 0.00435842235083022 +874 87 model.embedding_dim 0.0 +874 87 model.scoring_fct_norm 1.0 +874 87 regularizer.weight 0.1138923754093661 +874 87 optimizer.lr 0.019508595638170135 +874 87 training.batch_size 2.0 +874 87 training.label_smoothing 0.018276954855013676 +874 88 model.embedding_dim 0.0 +874 88 model.scoring_fct_norm 2.0 +874 88 regularizer.weight 0.04365991615313636 +874 88 optimizer.lr 0.001519528411462405 +874 88 training.batch_size 1.0 +874 88 training.label_smoothing 0.5161153111149884 +874 89 model.embedding_dim 0.0 +874 89 model.scoring_fct_norm 1.0 +874 89 regularizer.weight 0.019671882359691573 +874 89 optimizer.lr 0.01679906671242533 +874 89 training.batch_size 0.0 +874 89 training.label_smoothing 0.003560058154901972 +874 90 model.embedding_dim 1.0 +874 90 model.scoring_fct_norm 2.0 +874 90 regularizer.weight 0.025155808524939066 +874 90 optimizer.lr 0.013651897985577517 +874 90 training.batch_size 2.0 +874 90 training.label_smoothing 0.007205388341789074 +874 91 model.embedding_dim 1.0 +874 91 model.scoring_fct_norm 2.0 +874 91 regularizer.weight 0.04733408280995062 +874 91 optimizer.lr 0.013924367302061855 +874 91 training.batch_size 2.0 +874 91 training.label_smoothing 0.9814459072647972 +874 92 model.embedding_dim 1.0 +874 92 model.scoring_fct_norm 2.0 +874 92 regularizer.weight 0.014489873287210065 +874 92 optimizer.lr 0.035705259600323225 +874 92 training.batch_size 2.0 +874 92 training.label_smoothing 0.24708287607609983 +874 93 model.embedding_dim 2.0 +874 93 model.scoring_fct_norm 2.0 +874 93 regularizer.weight 0.20567993689032738 +874 93 optimizer.lr 0.08475137433683924 +874 93 training.batch_size 0.0 +874 93 training.label_smoothing 0.008660975399778118 +874 94 model.embedding_dim 2.0 +874 94 model.scoring_fct_norm 2.0 +874 94 regularizer.weight 0.08350581085865075 +874 94 optimizer.lr 0.005395609422159657 +874 94 training.batch_size 0.0 +874 94 training.label_smoothing 0.19903273596039314 +874 95 model.embedding_dim 1.0 +874 95 model.scoring_fct_norm 1.0 +874 95 regularizer.weight 0.04425616119494998 +874 95 optimizer.lr 0.021499632791662004 +874 95 training.batch_size 1.0 +874 95 training.label_smoothing 0.007924889403171022 +874 96 model.embedding_dim 1.0 +874 96 model.scoring_fct_norm 1.0 +874 96 regularizer.weight 0.2472764331033864 +874 96 optimizer.lr 0.010214425886751893 +874 96 training.batch_size 2.0 +874 96 training.label_smoothing 0.0452081445736535 +874 97 model.embedding_dim 0.0 +874 97 model.scoring_fct_norm 2.0 +874 97 regularizer.weight 0.17451119279445837 +874 97 optimizer.lr 0.0011182373146350778 +874 97 training.batch_size 1.0 +874 97 training.label_smoothing 0.0025960542191236 +874 98 model.embedding_dim 1.0 +874 98 model.scoring_fct_norm 1.0 +874 98 regularizer.weight 0.1839416166830385 +874 98 optimizer.lr 0.0012859850211007571 +874 98 training.batch_size 1.0 +874 98 training.label_smoothing 0.15990023991040442 +874 99 model.embedding_dim 1.0 +874 99 model.scoring_fct_norm 1.0 +874 99 regularizer.weight 0.01700304733699199 +874 99 optimizer.lr 0.00409758231530938 +874 99 training.batch_size 2.0 +874 99 training.label_smoothing 0.6794842360847335 +874 100 model.embedding_dim 1.0 +874 100 model.scoring_fct_norm 2.0 +874 100 regularizer.weight 0.010877955543769517 +874 100 optimizer.lr 0.020663251277793362 +874 100 training.batch_size 1.0 +874 100 training.label_smoothing 0.010752509874166228 +874 1 dataset """kinships""" +874 1 model """transh""" +874 1 loss """crossentropy""" +874 1 regularizer """transh""" +874 1 optimizer """adam""" +874 1 training_loop """lcwa""" +874 1 evaluator """rankbased""" +874 2 dataset """kinships""" +874 2 model """transh""" +874 2 loss """crossentropy""" +874 2 regularizer """transh""" +874 2 optimizer """adam""" +874 2 training_loop """lcwa""" +874 2 evaluator """rankbased""" +874 3 dataset """kinships""" +874 3 model """transh""" +874 3 loss """crossentropy""" +874 3 regularizer """transh""" +874 3 optimizer """adam""" +874 3 training_loop """lcwa""" +874 3 evaluator """rankbased""" +874 4 dataset """kinships""" +874 4 model """transh""" +874 4 loss """crossentropy""" +874 4 regularizer """transh""" +874 4 optimizer """adam""" +874 4 training_loop """lcwa""" +874 4 evaluator """rankbased""" +874 5 dataset """kinships""" +874 5 model """transh""" +874 5 loss """crossentropy""" +874 5 regularizer """transh""" +874 5 optimizer """adam""" +874 5 training_loop """lcwa""" +874 5 evaluator """rankbased""" +874 6 dataset """kinships""" +874 6 model """transh""" +874 6 loss """crossentropy""" +874 6 regularizer """transh""" +874 6 optimizer """adam""" +874 6 training_loop """lcwa""" +874 6 evaluator """rankbased""" +874 7 dataset """kinships""" +874 7 model """transh""" +874 7 loss """crossentropy""" +874 7 regularizer """transh""" +874 7 optimizer """adam""" +874 7 training_loop """lcwa""" +874 7 evaluator """rankbased""" +874 8 dataset """kinships""" +874 8 model """transh""" +874 8 loss """crossentropy""" +874 8 regularizer """transh""" +874 8 optimizer """adam""" +874 8 training_loop """lcwa""" +874 8 evaluator """rankbased""" +874 9 dataset """kinships""" +874 9 model """transh""" +874 9 loss """crossentropy""" +874 9 regularizer """transh""" +874 9 optimizer """adam""" +874 9 training_loop """lcwa""" +874 9 evaluator """rankbased""" +874 10 dataset """kinships""" +874 10 model """transh""" +874 10 loss """crossentropy""" +874 10 regularizer """transh""" +874 10 optimizer """adam""" +874 10 training_loop """lcwa""" +874 10 evaluator """rankbased""" +874 11 dataset """kinships""" +874 11 model """transh""" +874 11 loss """crossentropy""" +874 11 regularizer """transh""" +874 11 optimizer """adam""" +874 11 training_loop """lcwa""" +874 11 evaluator """rankbased""" +874 12 dataset """kinships""" +874 12 model """transh""" +874 12 loss """crossentropy""" +874 12 regularizer """transh""" +874 12 optimizer """adam""" +874 12 training_loop """lcwa""" +874 12 evaluator """rankbased""" +874 13 dataset """kinships""" +874 13 model """transh""" +874 13 loss """crossentropy""" +874 13 regularizer """transh""" +874 13 optimizer """adam""" +874 13 training_loop """lcwa""" +874 13 evaluator """rankbased""" +874 14 dataset """kinships""" +874 14 model """transh""" +874 14 loss """crossentropy""" +874 14 regularizer """transh""" +874 14 optimizer """adam""" +874 14 training_loop """lcwa""" +874 14 evaluator """rankbased""" +874 15 dataset """kinships""" +874 15 model """transh""" +874 15 loss """crossentropy""" +874 15 regularizer """transh""" +874 15 optimizer """adam""" +874 15 training_loop """lcwa""" +874 15 evaluator """rankbased""" +874 16 dataset """kinships""" +874 16 model """transh""" +874 16 loss """crossentropy""" +874 16 regularizer """transh""" +874 16 optimizer """adam""" +874 16 training_loop """lcwa""" +874 16 evaluator """rankbased""" +874 17 dataset """kinships""" +874 17 model """transh""" +874 17 loss """crossentropy""" +874 17 regularizer """transh""" +874 17 optimizer """adam""" +874 17 training_loop """lcwa""" +874 17 evaluator """rankbased""" +874 18 dataset """kinships""" +874 18 model """transh""" +874 18 loss """crossentropy""" +874 18 regularizer """transh""" +874 18 optimizer """adam""" +874 18 training_loop """lcwa""" +874 18 evaluator """rankbased""" +874 19 dataset """kinships""" +874 19 model """transh""" +874 19 loss """crossentropy""" +874 19 regularizer """transh""" +874 19 optimizer """adam""" +874 19 training_loop """lcwa""" +874 19 evaluator """rankbased""" +874 20 dataset """kinships""" +874 20 model """transh""" +874 20 loss """crossentropy""" +874 20 regularizer """transh""" +874 20 optimizer """adam""" +874 20 training_loop """lcwa""" +874 20 evaluator """rankbased""" +874 21 dataset """kinships""" +874 21 model """transh""" +874 21 loss """crossentropy""" +874 21 regularizer """transh""" +874 21 optimizer """adam""" +874 21 training_loop """lcwa""" +874 21 evaluator """rankbased""" +874 22 dataset """kinships""" +874 22 model """transh""" +874 22 loss """crossentropy""" +874 22 regularizer """transh""" +874 22 optimizer """adam""" +874 22 training_loop """lcwa""" +874 22 evaluator """rankbased""" +874 23 dataset """kinships""" +874 23 model """transh""" +874 23 loss """crossentropy""" +874 23 regularizer """transh""" +874 23 optimizer """adam""" +874 23 training_loop """lcwa""" +874 23 evaluator """rankbased""" +874 24 dataset """kinships""" +874 24 model """transh""" +874 24 loss """crossentropy""" +874 24 regularizer """transh""" +874 24 optimizer """adam""" +874 24 training_loop """lcwa""" +874 24 evaluator """rankbased""" +874 25 dataset """kinships""" +874 25 model """transh""" +874 25 loss """crossentropy""" +874 25 regularizer """transh""" +874 25 optimizer """adam""" +874 25 training_loop """lcwa""" +874 25 evaluator """rankbased""" +874 26 dataset """kinships""" +874 26 model """transh""" +874 26 loss """crossentropy""" +874 26 regularizer """transh""" +874 26 optimizer """adam""" +874 26 training_loop """lcwa""" +874 26 evaluator """rankbased""" +874 27 dataset """kinships""" +874 27 model """transh""" +874 27 loss """crossentropy""" +874 27 regularizer """transh""" +874 27 optimizer """adam""" +874 27 training_loop """lcwa""" +874 27 evaluator """rankbased""" +874 28 dataset """kinships""" +874 28 model """transh""" +874 28 loss """crossentropy""" +874 28 regularizer """transh""" +874 28 optimizer """adam""" +874 28 training_loop """lcwa""" +874 28 evaluator """rankbased""" +874 29 dataset """kinships""" +874 29 model """transh""" +874 29 loss """crossentropy""" +874 29 regularizer """transh""" +874 29 optimizer """adam""" +874 29 training_loop """lcwa""" +874 29 evaluator """rankbased""" +874 30 dataset """kinships""" +874 30 model """transh""" +874 30 loss """crossentropy""" +874 30 regularizer """transh""" +874 30 optimizer """adam""" +874 30 training_loop """lcwa""" +874 30 evaluator """rankbased""" +874 31 dataset """kinships""" +874 31 model """transh""" +874 31 loss """crossentropy""" +874 31 regularizer """transh""" +874 31 optimizer """adam""" +874 31 training_loop """lcwa""" +874 31 evaluator """rankbased""" +874 32 dataset """kinships""" +874 32 model """transh""" +874 32 loss """crossentropy""" +874 32 regularizer """transh""" +874 32 optimizer """adam""" +874 32 training_loop """lcwa""" +874 32 evaluator """rankbased""" +874 33 dataset """kinships""" +874 33 model """transh""" +874 33 loss """crossentropy""" +874 33 regularizer """transh""" +874 33 optimizer """adam""" +874 33 training_loop """lcwa""" +874 33 evaluator """rankbased""" +874 34 dataset """kinships""" +874 34 model """transh""" +874 34 loss """crossentropy""" +874 34 regularizer """transh""" +874 34 optimizer """adam""" +874 34 training_loop """lcwa""" +874 34 evaluator """rankbased""" +874 35 dataset """kinships""" +874 35 model """transh""" +874 35 loss """crossentropy""" +874 35 regularizer """transh""" +874 35 optimizer """adam""" +874 35 training_loop """lcwa""" +874 35 evaluator """rankbased""" +874 36 dataset """kinships""" +874 36 model """transh""" +874 36 loss """crossentropy""" +874 36 regularizer """transh""" +874 36 optimizer """adam""" +874 36 training_loop """lcwa""" +874 36 evaluator """rankbased""" +874 37 dataset """kinships""" +874 37 model """transh""" +874 37 loss """crossentropy""" +874 37 regularizer """transh""" +874 37 optimizer """adam""" +874 37 training_loop """lcwa""" +874 37 evaluator """rankbased""" +874 38 dataset """kinships""" +874 38 model """transh""" +874 38 loss """crossentropy""" +874 38 regularizer """transh""" +874 38 optimizer """adam""" +874 38 training_loop """lcwa""" +874 38 evaluator """rankbased""" +874 39 dataset """kinships""" +874 39 model """transh""" +874 39 loss """crossentropy""" +874 39 regularizer """transh""" +874 39 optimizer """adam""" +874 39 training_loop """lcwa""" +874 39 evaluator """rankbased""" +874 40 dataset """kinships""" +874 40 model """transh""" +874 40 loss """crossentropy""" +874 40 regularizer """transh""" +874 40 optimizer """adam""" +874 40 training_loop """lcwa""" +874 40 evaluator """rankbased""" +874 41 dataset """kinships""" +874 41 model """transh""" +874 41 loss """crossentropy""" +874 41 regularizer """transh""" +874 41 optimizer """adam""" +874 41 training_loop """lcwa""" +874 41 evaluator """rankbased""" +874 42 dataset """kinships""" +874 42 model """transh""" +874 42 loss """crossentropy""" +874 42 regularizer """transh""" +874 42 optimizer """adam""" +874 42 training_loop """lcwa""" +874 42 evaluator """rankbased""" +874 43 dataset """kinships""" +874 43 model """transh""" +874 43 loss """crossentropy""" +874 43 regularizer """transh""" +874 43 optimizer """adam""" +874 43 training_loop """lcwa""" +874 43 evaluator """rankbased""" +874 44 dataset """kinships""" +874 44 model """transh""" +874 44 loss """crossentropy""" +874 44 regularizer """transh""" +874 44 optimizer """adam""" +874 44 training_loop """lcwa""" +874 44 evaluator """rankbased""" +874 45 dataset """kinships""" +874 45 model """transh""" +874 45 loss """crossentropy""" +874 45 regularizer """transh""" +874 45 optimizer """adam""" +874 45 training_loop """lcwa""" +874 45 evaluator """rankbased""" +874 46 dataset """kinships""" +874 46 model """transh""" +874 46 loss """crossentropy""" +874 46 regularizer """transh""" +874 46 optimizer """adam""" +874 46 training_loop """lcwa""" +874 46 evaluator """rankbased""" +874 47 dataset """kinships""" +874 47 model """transh""" +874 47 loss """crossentropy""" +874 47 regularizer """transh""" +874 47 optimizer """adam""" +874 47 training_loop """lcwa""" +874 47 evaluator """rankbased""" +874 48 dataset """kinships""" +874 48 model """transh""" +874 48 loss """crossentropy""" +874 48 regularizer """transh""" +874 48 optimizer """adam""" +874 48 training_loop """lcwa""" +874 48 evaluator """rankbased""" +874 49 dataset """kinships""" +874 49 model """transh""" +874 49 loss """crossentropy""" +874 49 regularizer """transh""" +874 49 optimizer """adam""" +874 49 training_loop """lcwa""" +874 49 evaluator """rankbased""" +874 50 dataset """kinships""" +874 50 model """transh""" +874 50 loss """crossentropy""" +874 50 regularizer """transh""" +874 50 optimizer """adam""" +874 50 training_loop """lcwa""" +874 50 evaluator """rankbased""" +874 51 dataset """kinships""" +874 51 model """transh""" +874 51 loss """crossentropy""" +874 51 regularizer """transh""" +874 51 optimizer """adam""" +874 51 training_loop """lcwa""" +874 51 evaluator """rankbased""" +874 52 dataset """kinships""" +874 52 model """transh""" +874 52 loss """crossentropy""" +874 52 regularizer """transh""" +874 52 optimizer """adam""" +874 52 training_loop """lcwa""" +874 52 evaluator """rankbased""" +874 53 dataset """kinships""" +874 53 model """transh""" +874 53 loss """crossentropy""" +874 53 regularizer """transh""" +874 53 optimizer """adam""" +874 53 training_loop """lcwa""" +874 53 evaluator """rankbased""" +874 54 dataset """kinships""" +874 54 model """transh""" +874 54 loss """crossentropy""" +874 54 regularizer """transh""" +874 54 optimizer """adam""" +874 54 training_loop """lcwa""" +874 54 evaluator """rankbased""" +874 55 dataset """kinships""" +874 55 model """transh""" +874 55 loss """crossentropy""" +874 55 regularizer """transh""" +874 55 optimizer """adam""" +874 55 training_loop """lcwa""" +874 55 evaluator """rankbased""" +874 56 dataset """kinships""" +874 56 model """transh""" +874 56 loss """crossentropy""" +874 56 regularizer """transh""" +874 56 optimizer """adam""" +874 56 training_loop """lcwa""" +874 56 evaluator """rankbased""" +874 57 dataset """kinships""" +874 57 model """transh""" +874 57 loss """crossentropy""" +874 57 regularizer """transh""" +874 57 optimizer """adam""" +874 57 training_loop """lcwa""" +874 57 evaluator """rankbased""" +874 58 dataset """kinships""" +874 58 model """transh""" +874 58 loss """crossentropy""" +874 58 regularizer """transh""" +874 58 optimizer """adam""" +874 58 training_loop """lcwa""" +874 58 evaluator """rankbased""" +874 59 dataset """kinships""" +874 59 model """transh""" +874 59 loss """crossentropy""" +874 59 regularizer """transh""" +874 59 optimizer """adam""" +874 59 training_loop """lcwa""" +874 59 evaluator """rankbased""" +874 60 dataset """kinships""" +874 60 model """transh""" +874 60 loss """crossentropy""" +874 60 regularizer """transh""" +874 60 optimizer """adam""" +874 60 training_loop """lcwa""" +874 60 evaluator """rankbased""" +874 61 dataset """kinships""" +874 61 model """transh""" +874 61 loss """crossentropy""" +874 61 regularizer """transh""" +874 61 optimizer """adam""" +874 61 training_loop """lcwa""" +874 61 evaluator """rankbased""" +874 62 dataset """kinships""" +874 62 model """transh""" +874 62 loss """crossentropy""" +874 62 regularizer """transh""" +874 62 optimizer """adam""" +874 62 training_loop """lcwa""" +874 62 evaluator """rankbased""" +874 63 dataset """kinships""" +874 63 model """transh""" +874 63 loss """crossentropy""" +874 63 regularizer """transh""" +874 63 optimizer """adam""" +874 63 training_loop """lcwa""" +874 63 evaluator """rankbased""" +874 64 dataset """kinships""" +874 64 model """transh""" +874 64 loss """crossentropy""" +874 64 regularizer """transh""" +874 64 optimizer """adam""" +874 64 training_loop """lcwa""" +874 64 evaluator """rankbased""" +874 65 dataset """kinships""" +874 65 model """transh""" +874 65 loss """crossentropy""" +874 65 regularizer """transh""" +874 65 optimizer """adam""" +874 65 training_loop """lcwa""" +874 65 evaluator """rankbased""" +874 66 dataset """kinships""" +874 66 model """transh""" +874 66 loss """crossentropy""" +874 66 regularizer """transh""" +874 66 optimizer """adam""" +874 66 training_loop """lcwa""" +874 66 evaluator """rankbased""" +874 67 dataset """kinships""" +874 67 model """transh""" +874 67 loss """crossentropy""" +874 67 regularizer """transh""" +874 67 optimizer """adam""" +874 67 training_loop """lcwa""" +874 67 evaluator """rankbased""" +874 68 dataset """kinships""" +874 68 model """transh""" +874 68 loss """crossentropy""" +874 68 regularizer """transh""" +874 68 optimizer """adam""" +874 68 training_loop """lcwa""" +874 68 evaluator """rankbased""" +874 69 dataset """kinships""" +874 69 model """transh""" +874 69 loss """crossentropy""" +874 69 regularizer """transh""" +874 69 optimizer """adam""" +874 69 training_loop """lcwa""" +874 69 evaluator """rankbased""" +874 70 dataset """kinships""" +874 70 model """transh""" +874 70 loss """crossentropy""" +874 70 regularizer """transh""" +874 70 optimizer """adam""" +874 70 training_loop """lcwa""" +874 70 evaluator """rankbased""" +874 71 dataset """kinships""" +874 71 model """transh""" +874 71 loss """crossentropy""" +874 71 regularizer """transh""" +874 71 optimizer """adam""" +874 71 training_loop """lcwa""" +874 71 evaluator """rankbased""" +874 72 dataset """kinships""" +874 72 model """transh""" +874 72 loss """crossentropy""" +874 72 regularizer """transh""" +874 72 optimizer """adam""" +874 72 training_loop """lcwa""" +874 72 evaluator """rankbased""" +874 73 dataset """kinships""" +874 73 model """transh""" +874 73 loss """crossentropy""" +874 73 regularizer """transh""" +874 73 optimizer """adam""" +874 73 training_loop """lcwa""" +874 73 evaluator """rankbased""" +874 74 dataset """kinships""" +874 74 model """transh""" +874 74 loss """crossentropy""" +874 74 regularizer """transh""" +874 74 optimizer """adam""" +874 74 training_loop """lcwa""" +874 74 evaluator """rankbased""" +874 75 dataset """kinships""" +874 75 model """transh""" +874 75 loss """crossentropy""" +874 75 regularizer """transh""" +874 75 optimizer """adam""" +874 75 training_loop """lcwa""" +874 75 evaluator """rankbased""" +874 76 dataset """kinships""" +874 76 model """transh""" +874 76 loss """crossentropy""" +874 76 regularizer """transh""" +874 76 optimizer """adam""" +874 76 training_loop """lcwa""" +874 76 evaluator """rankbased""" +874 77 dataset """kinships""" +874 77 model """transh""" +874 77 loss """crossentropy""" +874 77 regularizer """transh""" +874 77 optimizer """adam""" +874 77 training_loop """lcwa""" +874 77 evaluator """rankbased""" +874 78 dataset """kinships""" +874 78 model """transh""" +874 78 loss """crossentropy""" +874 78 regularizer """transh""" +874 78 optimizer """adam""" +874 78 training_loop """lcwa""" +874 78 evaluator """rankbased""" +874 79 dataset """kinships""" +874 79 model """transh""" +874 79 loss """crossentropy""" +874 79 regularizer """transh""" +874 79 optimizer """adam""" +874 79 training_loop """lcwa""" +874 79 evaluator """rankbased""" +874 80 dataset """kinships""" +874 80 model """transh""" +874 80 loss """crossentropy""" +874 80 regularizer """transh""" +874 80 optimizer """adam""" +874 80 training_loop """lcwa""" +874 80 evaluator """rankbased""" +874 81 dataset """kinships""" +874 81 model """transh""" +874 81 loss """crossentropy""" +874 81 regularizer """transh""" +874 81 optimizer """adam""" +874 81 training_loop """lcwa""" +874 81 evaluator """rankbased""" +874 82 dataset """kinships""" +874 82 model """transh""" +874 82 loss """crossentropy""" +874 82 regularizer """transh""" +874 82 optimizer """adam""" +874 82 training_loop """lcwa""" +874 82 evaluator """rankbased""" +874 83 dataset """kinships""" +874 83 model """transh""" +874 83 loss """crossentropy""" +874 83 regularizer """transh""" +874 83 optimizer """adam""" +874 83 training_loop """lcwa""" +874 83 evaluator """rankbased""" +874 84 dataset """kinships""" +874 84 model """transh""" +874 84 loss """crossentropy""" +874 84 regularizer """transh""" +874 84 optimizer """adam""" +874 84 training_loop """lcwa""" +874 84 evaluator """rankbased""" +874 85 dataset """kinships""" +874 85 model """transh""" +874 85 loss """crossentropy""" +874 85 regularizer """transh""" +874 85 optimizer """adam""" +874 85 training_loop """lcwa""" +874 85 evaluator """rankbased""" +874 86 dataset """kinships""" +874 86 model """transh""" +874 86 loss """crossentropy""" +874 86 regularizer """transh""" +874 86 optimizer """adam""" +874 86 training_loop """lcwa""" +874 86 evaluator """rankbased""" +874 87 dataset """kinships""" +874 87 model """transh""" +874 87 loss """crossentropy""" +874 87 regularizer """transh""" +874 87 optimizer """adam""" +874 87 training_loop """lcwa""" +874 87 evaluator """rankbased""" +874 88 dataset """kinships""" +874 88 model """transh""" +874 88 loss """crossentropy""" +874 88 regularizer """transh""" +874 88 optimizer """adam""" +874 88 training_loop """lcwa""" +874 88 evaluator """rankbased""" +874 89 dataset """kinships""" +874 89 model """transh""" +874 89 loss """crossentropy""" +874 89 regularizer """transh""" +874 89 optimizer """adam""" +874 89 training_loop """lcwa""" +874 89 evaluator """rankbased""" +874 90 dataset """kinships""" +874 90 model """transh""" +874 90 loss """crossentropy""" +874 90 regularizer """transh""" +874 90 optimizer """adam""" +874 90 training_loop """lcwa""" +874 90 evaluator """rankbased""" +874 91 dataset """kinships""" +874 91 model """transh""" +874 91 loss """crossentropy""" +874 91 regularizer """transh""" +874 91 optimizer """adam""" +874 91 training_loop """lcwa""" +874 91 evaluator """rankbased""" +874 92 dataset """kinships""" +874 92 model """transh""" +874 92 loss """crossentropy""" +874 92 regularizer """transh""" +874 92 optimizer """adam""" +874 92 training_loop """lcwa""" +874 92 evaluator """rankbased""" +874 93 dataset """kinships""" +874 93 model """transh""" +874 93 loss """crossentropy""" +874 93 regularizer """transh""" +874 93 optimizer """adam""" +874 93 training_loop """lcwa""" +874 93 evaluator """rankbased""" +874 94 dataset """kinships""" +874 94 model """transh""" +874 94 loss """crossentropy""" +874 94 regularizer """transh""" +874 94 optimizer """adam""" +874 94 training_loop """lcwa""" +874 94 evaluator """rankbased""" +874 95 dataset """kinships""" +874 95 model """transh""" +874 95 loss """crossentropy""" +874 95 regularizer """transh""" +874 95 optimizer """adam""" +874 95 training_loop """lcwa""" +874 95 evaluator """rankbased""" +874 96 dataset """kinships""" +874 96 model """transh""" +874 96 loss """crossentropy""" +874 96 regularizer """transh""" +874 96 optimizer """adam""" +874 96 training_loop """lcwa""" +874 96 evaluator """rankbased""" +874 97 dataset """kinships""" +874 97 model """transh""" +874 97 loss """crossentropy""" +874 97 regularizer """transh""" +874 97 optimizer """adam""" +874 97 training_loop """lcwa""" +874 97 evaluator """rankbased""" +874 98 dataset """kinships""" +874 98 model """transh""" +874 98 loss """crossentropy""" +874 98 regularizer """transh""" +874 98 optimizer """adam""" +874 98 training_loop """lcwa""" +874 98 evaluator """rankbased""" +874 99 dataset """kinships""" +874 99 model """transh""" +874 99 loss """crossentropy""" +874 99 regularizer """transh""" +874 99 optimizer """adam""" +874 99 training_loop """lcwa""" +874 99 evaluator """rankbased""" +874 100 dataset """kinships""" +874 100 model """transh""" +874 100 loss """crossentropy""" +874 100 regularizer """transh""" +874 100 optimizer """adam""" +874 100 training_loop """lcwa""" +874 100 evaluator """rankbased""" +875 1 model.embedding_dim 0.0 +875 1 model.scoring_fct_norm 1.0 +875 1 regularizer.weight 0.09204253048655055 +875 1 optimizer.lr 0.021010559286201394 +875 1 negative_sampler.num_negs_per_pos 5.0 +875 1 training.batch_size 2.0 +875 2 model.embedding_dim 1.0 +875 2 model.scoring_fct_norm 2.0 +875 2 regularizer.weight 0.146496483469773 +875 2 optimizer.lr 0.03016389472015162 +875 2 negative_sampler.num_negs_per_pos 17.0 +875 2 training.batch_size 0.0 +875 3 model.embedding_dim 1.0 +875 3 model.scoring_fct_norm 2.0 +875 3 regularizer.weight 0.08953148335048774 +875 3 optimizer.lr 0.07172658537280883 +875 3 negative_sampler.num_negs_per_pos 44.0 +875 3 training.batch_size 2.0 +875 4 model.embedding_dim 1.0 +875 4 model.scoring_fct_norm 2.0 +875 4 regularizer.weight 0.08072339090534936 +875 4 optimizer.lr 0.006579320914649528 +875 4 negative_sampler.num_negs_per_pos 75.0 +875 4 training.batch_size 0.0 +875 5 model.embedding_dim 2.0 +875 5 model.scoring_fct_norm 1.0 +875 5 regularizer.weight 0.11590236810250978 +875 5 optimizer.lr 0.03059072883558053 +875 5 negative_sampler.num_negs_per_pos 79.0 +875 5 training.batch_size 0.0 +875 6 model.embedding_dim 0.0 +875 6 model.scoring_fct_norm 2.0 +875 6 regularizer.weight 0.025589988427376077 +875 6 optimizer.lr 0.013800415818739033 +875 6 negative_sampler.num_negs_per_pos 16.0 +875 6 training.batch_size 0.0 +875 7 model.embedding_dim 2.0 +875 7 model.scoring_fct_norm 2.0 +875 7 regularizer.weight 0.04968795282658956 +875 7 optimizer.lr 0.0015220757556661901 +875 7 negative_sampler.num_negs_per_pos 91.0 +875 7 training.batch_size 2.0 +875 8 model.embedding_dim 2.0 +875 8 model.scoring_fct_norm 1.0 +875 8 regularizer.weight 0.016066947606970357 +875 8 optimizer.lr 0.03920346820095254 +875 8 negative_sampler.num_negs_per_pos 28.0 +875 8 training.batch_size 1.0 +875 9 model.embedding_dim 2.0 +875 9 model.scoring_fct_norm 2.0 +875 9 regularizer.weight 0.08975164600326635 +875 9 optimizer.lr 0.015379650868055735 +875 9 negative_sampler.num_negs_per_pos 62.0 +875 9 training.batch_size 1.0 +875 10 model.embedding_dim 1.0 +875 10 model.scoring_fct_norm 2.0 +875 10 regularizer.weight 0.019690024185911377 +875 10 optimizer.lr 0.01674136184861204 +875 10 negative_sampler.num_negs_per_pos 56.0 +875 10 training.batch_size 2.0 +875 11 model.embedding_dim 2.0 +875 11 model.scoring_fct_norm 1.0 +875 11 regularizer.weight 0.29962101505363337 +875 11 optimizer.lr 0.00206450773469975 +875 11 negative_sampler.num_negs_per_pos 18.0 +875 11 training.batch_size 2.0 +875 12 model.embedding_dim 0.0 +875 12 model.scoring_fct_norm 2.0 +875 12 regularizer.weight 0.11186932007278352 +875 12 optimizer.lr 0.041106786499966796 +875 12 negative_sampler.num_negs_per_pos 52.0 +875 12 training.batch_size 0.0 +875 13 model.embedding_dim 2.0 +875 13 model.scoring_fct_norm 2.0 +875 13 regularizer.weight 0.032479059013682504 +875 13 optimizer.lr 0.0017939759963607309 +875 13 negative_sampler.num_negs_per_pos 53.0 +875 13 training.batch_size 2.0 +875 14 model.embedding_dim 1.0 +875 14 model.scoring_fct_norm 2.0 +875 14 regularizer.weight 0.028927125800745225 +875 14 optimizer.lr 0.006603610349806739 +875 14 negative_sampler.num_negs_per_pos 51.0 +875 14 training.batch_size 0.0 +875 15 model.embedding_dim 0.0 +875 15 model.scoring_fct_norm 1.0 +875 15 regularizer.weight 0.034993141593367354 +875 15 optimizer.lr 0.001568320038696941 +875 15 negative_sampler.num_negs_per_pos 80.0 +875 15 training.batch_size 1.0 +875 16 model.embedding_dim 0.0 +875 16 model.scoring_fct_norm 1.0 +875 16 regularizer.weight 0.21402474114135187 +875 16 optimizer.lr 0.0752020839071154 +875 16 negative_sampler.num_negs_per_pos 88.0 +875 16 training.batch_size 1.0 +875 17 model.embedding_dim 2.0 +875 17 model.scoring_fct_norm 1.0 +875 17 regularizer.weight 0.0328431172274496 +875 17 optimizer.lr 0.014461396346395884 +875 17 negative_sampler.num_negs_per_pos 71.0 +875 17 training.batch_size 2.0 +875 18 model.embedding_dim 0.0 +875 18 model.scoring_fct_norm 2.0 +875 18 regularizer.weight 0.011988834196124332 +875 18 optimizer.lr 0.01382440537200701 +875 18 negative_sampler.num_negs_per_pos 62.0 +875 18 training.batch_size 0.0 +875 19 model.embedding_dim 1.0 +875 19 model.scoring_fct_norm 1.0 +875 19 regularizer.weight 0.19090869304386301 +875 19 optimizer.lr 0.016625978822566563 +875 19 negative_sampler.num_negs_per_pos 84.0 +875 19 training.batch_size 2.0 +875 20 model.embedding_dim 0.0 +875 20 model.scoring_fct_norm 1.0 +875 20 regularizer.weight 0.06342016775849003 +875 20 optimizer.lr 0.002073838067280791 +875 20 negative_sampler.num_negs_per_pos 63.0 +875 20 training.batch_size 0.0 +875 21 model.embedding_dim 0.0 +875 21 model.scoring_fct_norm 2.0 +875 21 regularizer.weight 0.028401822420788828 +875 21 optimizer.lr 0.04188509918026196 +875 21 negative_sampler.num_negs_per_pos 22.0 +875 21 training.batch_size 2.0 +875 22 model.embedding_dim 0.0 +875 22 model.scoring_fct_norm 2.0 +875 22 regularizer.weight 0.015808036893265556 +875 22 optimizer.lr 0.09129921400722815 +875 22 negative_sampler.num_negs_per_pos 66.0 +875 22 training.batch_size 1.0 +875 23 model.embedding_dim 2.0 +875 23 model.scoring_fct_norm 1.0 +875 23 regularizer.weight 0.022990353433422622 +875 23 optimizer.lr 0.0023637063232434035 +875 23 negative_sampler.num_negs_per_pos 53.0 +875 23 training.batch_size 2.0 +875 24 model.embedding_dim 2.0 +875 24 model.scoring_fct_norm 1.0 +875 24 regularizer.weight 0.07130854628877255 +875 24 optimizer.lr 0.048471180721392955 +875 24 negative_sampler.num_negs_per_pos 40.0 +875 24 training.batch_size 2.0 +875 25 model.embedding_dim 0.0 +875 25 model.scoring_fct_norm 1.0 +875 25 regularizer.weight 0.067797922333081 +875 25 optimizer.lr 0.0034348316979979534 +875 25 negative_sampler.num_negs_per_pos 34.0 +875 25 training.batch_size 2.0 +875 26 model.embedding_dim 1.0 +875 26 model.scoring_fct_norm 1.0 +875 26 regularizer.weight 0.06259193385276282 +875 26 optimizer.lr 0.04254787581765033 +875 26 negative_sampler.num_negs_per_pos 7.0 +875 26 training.batch_size 0.0 +875 27 model.embedding_dim 0.0 +875 27 model.scoring_fct_norm 1.0 +875 27 regularizer.weight 0.16797862238809808 +875 27 optimizer.lr 0.01401744119336292 +875 27 negative_sampler.num_negs_per_pos 48.0 +875 27 training.batch_size 0.0 +875 28 model.embedding_dim 0.0 +875 28 model.scoring_fct_norm 2.0 +875 28 regularizer.weight 0.04468565318457293 +875 28 optimizer.lr 0.00239732961085829 +875 28 negative_sampler.num_negs_per_pos 31.0 +875 28 training.batch_size 2.0 +875 29 model.embedding_dim 2.0 +875 29 model.scoring_fct_norm 1.0 +875 29 regularizer.weight 0.02836448351756994 +875 29 optimizer.lr 0.0029063221369917793 +875 29 negative_sampler.num_negs_per_pos 95.0 +875 29 training.batch_size 1.0 +875 30 model.embedding_dim 1.0 +875 30 model.scoring_fct_norm 2.0 +875 30 regularizer.weight 0.05348787285077272 +875 30 optimizer.lr 0.0917951770700546 +875 30 negative_sampler.num_negs_per_pos 92.0 +875 30 training.batch_size 1.0 +875 31 model.embedding_dim 1.0 +875 31 model.scoring_fct_norm 1.0 +875 31 regularizer.weight 0.05038294090965051 +875 31 optimizer.lr 0.0038840689382967338 +875 31 negative_sampler.num_negs_per_pos 57.0 +875 31 training.batch_size 2.0 +875 32 model.embedding_dim 0.0 +875 32 model.scoring_fct_norm 1.0 +875 32 regularizer.weight 0.018456682526570647 +875 32 optimizer.lr 0.00593029329077163 +875 32 negative_sampler.num_negs_per_pos 15.0 +875 32 training.batch_size 2.0 +875 33 model.embedding_dim 1.0 +875 33 model.scoring_fct_norm 1.0 +875 33 regularizer.weight 0.22369593639301233 +875 33 optimizer.lr 0.002731982669322097 +875 33 negative_sampler.num_negs_per_pos 61.0 +875 33 training.batch_size 0.0 +875 34 model.embedding_dim 1.0 +875 34 model.scoring_fct_norm 1.0 +875 34 regularizer.weight 0.02295352167200271 +875 34 optimizer.lr 0.017232361742444965 +875 34 negative_sampler.num_negs_per_pos 39.0 +875 34 training.batch_size 2.0 +875 35 model.embedding_dim 2.0 +875 35 model.scoring_fct_norm 1.0 +875 35 regularizer.weight 0.11755458502414773 +875 35 optimizer.lr 0.011965236806544434 +875 35 negative_sampler.num_negs_per_pos 13.0 +875 35 training.batch_size 1.0 +875 36 model.embedding_dim 1.0 +875 36 model.scoring_fct_norm 1.0 +875 36 regularizer.weight 0.05257574345509951 +875 36 optimizer.lr 0.029498259246776837 +875 36 negative_sampler.num_negs_per_pos 32.0 +875 36 training.batch_size 1.0 +875 37 model.embedding_dim 0.0 +875 37 model.scoring_fct_norm 1.0 +875 37 regularizer.weight 0.01911334873537711 +875 37 optimizer.lr 0.0035273655882419315 +875 37 negative_sampler.num_negs_per_pos 49.0 +875 37 training.batch_size 0.0 +875 38 model.embedding_dim 0.0 +875 38 model.scoring_fct_norm 2.0 +875 38 regularizer.weight 0.2158459606548639 +875 38 optimizer.lr 0.0027594235047961138 +875 38 negative_sampler.num_negs_per_pos 83.0 +875 38 training.batch_size 0.0 +875 39 model.embedding_dim 0.0 +875 39 model.scoring_fct_norm 2.0 +875 39 regularizer.weight 0.15830818202862584 +875 39 optimizer.lr 0.004201509823554333 +875 39 negative_sampler.num_negs_per_pos 11.0 +875 39 training.batch_size 0.0 +875 40 model.embedding_dim 0.0 +875 40 model.scoring_fct_norm 2.0 +875 40 regularizer.weight 0.03730382908890528 +875 40 optimizer.lr 0.06172016982037394 +875 40 negative_sampler.num_negs_per_pos 65.0 +875 40 training.batch_size 2.0 +875 41 model.embedding_dim 1.0 +875 41 model.scoring_fct_norm 2.0 +875 41 regularizer.weight 0.08554568757687431 +875 41 optimizer.lr 0.025182678355830015 +875 41 negative_sampler.num_negs_per_pos 11.0 +875 41 training.batch_size 1.0 +875 42 model.embedding_dim 0.0 +875 42 model.scoring_fct_norm 1.0 +875 42 regularizer.weight 0.026167678788578546 +875 42 optimizer.lr 0.0218875001961188 +875 42 negative_sampler.num_negs_per_pos 41.0 +875 42 training.batch_size 0.0 +875 43 model.embedding_dim 2.0 +875 43 model.scoring_fct_norm 1.0 +875 43 regularizer.weight 0.011183081368041426 +875 43 optimizer.lr 0.014394670420363603 +875 43 negative_sampler.num_negs_per_pos 65.0 +875 43 training.batch_size 1.0 +875 44 model.embedding_dim 2.0 +875 44 model.scoring_fct_norm 2.0 +875 44 regularizer.weight 0.11211148628706319 +875 44 optimizer.lr 0.005941121945639177 +875 44 negative_sampler.num_negs_per_pos 58.0 +875 44 training.batch_size 2.0 +875 45 model.embedding_dim 1.0 +875 45 model.scoring_fct_norm 1.0 +875 45 regularizer.weight 0.014307101647853196 +875 45 optimizer.lr 0.001082955956976517 +875 45 negative_sampler.num_negs_per_pos 23.0 +875 45 training.batch_size 1.0 +875 46 model.embedding_dim 1.0 +875 46 model.scoring_fct_norm 2.0 +875 46 regularizer.weight 0.012342966591934392 +875 46 optimizer.lr 0.012062433752517955 +875 46 negative_sampler.num_negs_per_pos 34.0 +875 46 training.batch_size 1.0 +875 47 model.embedding_dim 2.0 +875 47 model.scoring_fct_norm 1.0 +875 47 regularizer.weight 0.17948942245617514 +875 47 optimizer.lr 0.03464317244299699 +875 47 negative_sampler.num_negs_per_pos 35.0 +875 47 training.batch_size 0.0 +875 48 model.embedding_dim 0.0 +875 48 model.scoring_fct_norm 1.0 +875 48 regularizer.weight 0.03167409771707488 +875 48 optimizer.lr 0.0067231796552880965 +875 48 negative_sampler.num_negs_per_pos 12.0 +875 48 training.batch_size 1.0 +875 49 model.embedding_dim 1.0 +875 49 model.scoring_fct_norm 2.0 +875 49 regularizer.weight 0.24763751892761085 +875 49 optimizer.lr 0.006842506428598471 +875 49 negative_sampler.num_negs_per_pos 67.0 +875 49 training.batch_size 0.0 +875 50 model.embedding_dim 0.0 +875 50 model.scoring_fct_norm 2.0 +875 50 regularizer.weight 0.015332199624961254 +875 50 optimizer.lr 0.024433239475017146 +875 50 negative_sampler.num_negs_per_pos 30.0 +875 50 training.batch_size 2.0 +875 51 model.embedding_dim 2.0 +875 51 model.scoring_fct_norm 2.0 +875 51 regularizer.weight 0.013869128697356296 +875 51 optimizer.lr 0.00868616178103401 +875 51 negative_sampler.num_negs_per_pos 7.0 +875 51 training.batch_size 0.0 +875 52 model.embedding_dim 2.0 +875 52 model.scoring_fct_norm 1.0 +875 52 regularizer.weight 0.017749057716827832 +875 52 optimizer.lr 0.041294619449204636 +875 52 negative_sampler.num_negs_per_pos 16.0 +875 52 training.batch_size 0.0 +875 53 model.embedding_dim 1.0 +875 53 model.scoring_fct_norm 2.0 +875 53 regularizer.weight 0.035368801219744374 +875 53 optimizer.lr 0.03281888924499518 +875 53 negative_sampler.num_negs_per_pos 59.0 +875 53 training.batch_size 2.0 +875 54 model.embedding_dim 2.0 +875 54 model.scoring_fct_norm 1.0 +875 54 regularizer.weight 0.01765585075122238 +875 54 optimizer.lr 0.0057832521043867085 +875 54 negative_sampler.num_negs_per_pos 88.0 +875 54 training.batch_size 0.0 +875 55 model.embedding_dim 1.0 +875 55 model.scoring_fct_norm 2.0 +875 55 regularizer.weight 0.06334851853264023 +875 55 optimizer.lr 0.01753140760562828 +875 55 negative_sampler.num_negs_per_pos 16.0 +875 55 training.batch_size 0.0 +875 56 model.embedding_dim 2.0 +875 56 model.scoring_fct_norm 1.0 +875 56 regularizer.weight 0.19862239745336357 +875 56 optimizer.lr 0.012433525118971623 +875 56 negative_sampler.num_negs_per_pos 68.0 +875 56 training.batch_size 2.0 +875 57 model.embedding_dim 1.0 +875 57 model.scoring_fct_norm 1.0 +875 57 regularizer.weight 0.03262621013716405 +875 57 optimizer.lr 0.08596351856072959 +875 57 negative_sampler.num_negs_per_pos 27.0 +875 57 training.batch_size 0.0 +875 58 model.embedding_dim 2.0 +875 58 model.scoring_fct_norm 2.0 +875 58 regularizer.weight 0.012340285197690419 +875 58 optimizer.lr 0.015629003275272984 +875 58 negative_sampler.num_negs_per_pos 43.0 +875 58 training.batch_size 1.0 +875 59 model.embedding_dim 2.0 +875 59 model.scoring_fct_norm 2.0 +875 59 regularizer.weight 0.030945433400743034 +875 59 optimizer.lr 0.007270659623797713 +875 59 negative_sampler.num_negs_per_pos 74.0 +875 59 training.batch_size 1.0 +875 60 model.embedding_dim 0.0 +875 60 model.scoring_fct_norm 2.0 +875 60 regularizer.weight 0.06434798086815684 +875 60 optimizer.lr 0.056405660616119825 +875 60 negative_sampler.num_negs_per_pos 34.0 +875 60 training.batch_size 2.0 +875 61 model.embedding_dim 0.0 +875 61 model.scoring_fct_norm 1.0 +875 61 regularizer.weight 0.018119133066169617 +875 61 optimizer.lr 0.05305261974173357 +875 61 negative_sampler.num_negs_per_pos 48.0 +875 61 training.batch_size 2.0 +875 62 model.embedding_dim 0.0 +875 62 model.scoring_fct_norm 1.0 +875 62 regularizer.weight 0.059328518272863805 +875 62 optimizer.lr 0.01042701907333774 +875 62 negative_sampler.num_negs_per_pos 0.0 +875 62 training.batch_size 1.0 +875 63 model.embedding_dim 1.0 +875 63 model.scoring_fct_norm 1.0 +875 63 regularizer.weight 0.1860377347937463 +875 63 optimizer.lr 0.004189466935003437 +875 63 negative_sampler.num_negs_per_pos 76.0 +875 63 training.batch_size 2.0 +875 64 model.embedding_dim 1.0 +875 64 model.scoring_fct_norm 2.0 +875 64 regularizer.weight 0.01193045867724571 +875 64 optimizer.lr 0.0019752319723158264 +875 64 negative_sampler.num_negs_per_pos 25.0 +875 64 training.batch_size 0.0 +875 65 model.embedding_dim 0.0 +875 65 model.scoring_fct_norm 2.0 +875 65 regularizer.weight 0.01410705795590817 +875 65 optimizer.lr 0.06381621087242874 +875 65 negative_sampler.num_negs_per_pos 43.0 +875 65 training.batch_size 0.0 +875 66 model.embedding_dim 0.0 +875 66 model.scoring_fct_norm 2.0 +875 66 regularizer.weight 0.02366017138426575 +875 66 optimizer.lr 0.0052438354584271474 +875 66 negative_sampler.num_negs_per_pos 20.0 +875 66 training.batch_size 1.0 +875 67 model.embedding_dim 1.0 +875 67 model.scoring_fct_norm 2.0 +875 67 regularizer.weight 0.055734109482371036 +875 67 optimizer.lr 0.006449441984185117 +875 67 negative_sampler.num_negs_per_pos 65.0 +875 67 training.batch_size 2.0 +875 68 model.embedding_dim 0.0 +875 68 model.scoring_fct_norm 1.0 +875 68 regularizer.weight 0.18192646012347488 +875 68 optimizer.lr 0.0455808372525212 +875 68 negative_sampler.num_negs_per_pos 7.0 +875 68 training.batch_size 1.0 +875 69 model.embedding_dim 0.0 +875 69 model.scoring_fct_norm 1.0 +875 69 regularizer.weight 0.23916112154902447 +875 69 optimizer.lr 0.030450802737594682 +875 69 negative_sampler.num_negs_per_pos 62.0 +875 69 training.batch_size 1.0 +875 70 model.embedding_dim 2.0 +875 70 model.scoring_fct_norm 1.0 +875 70 regularizer.weight 0.10823149245984001 +875 70 optimizer.lr 0.006902495311361445 +875 70 negative_sampler.num_negs_per_pos 85.0 +875 70 training.batch_size 1.0 +875 71 model.embedding_dim 0.0 +875 71 model.scoring_fct_norm 1.0 +875 71 regularizer.weight 0.2585145365025404 +875 71 optimizer.lr 0.010545334727328833 +875 71 negative_sampler.num_negs_per_pos 46.0 +875 71 training.batch_size 1.0 +875 72 model.embedding_dim 0.0 +875 72 model.scoring_fct_norm 2.0 +875 72 regularizer.weight 0.037003986838432486 +875 72 optimizer.lr 0.001445081547410265 +875 72 negative_sampler.num_negs_per_pos 34.0 +875 72 training.batch_size 0.0 +875 73 model.embedding_dim 0.0 +875 73 model.scoring_fct_norm 2.0 +875 73 regularizer.weight 0.017879107009417094 +875 73 optimizer.lr 0.06383673045933569 +875 73 negative_sampler.num_negs_per_pos 77.0 +875 73 training.batch_size 0.0 +875 74 model.embedding_dim 1.0 +875 74 model.scoring_fct_norm 1.0 +875 74 regularizer.weight 0.02469890967232057 +875 74 optimizer.lr 0.04503746891164607 +875 74 negative_sampler.num_negs_per_pos 21.0 +875 74 training.batch_size 0.0 +875 75 model.embedding_dim 0.0 +875 75 model.scoring_fct_norm 2.0 +875 75 regularizer.weight 0.09207927915663604 +875 75 optimizer.lr 0.06286151725657609 +875 75 negative_sampler.num_negs_per_pos 23.0 +875 75 training.batch_size 2.0 +875 76 model.embedding_dim 0.0 +875 76 model.scoring_fct_norm 2.0 +875 76 regularizer.weight 0.15618907014358185 +875 76 optimizer.lr 0.009593268740126571 +875 76 negative_sampler.num_negs_per_pos 92.0 +875 76 training.batch_size 0.0 +875 77 model.embedding_dim 2.0 +875 77 model.scoring_fct_norm 1.0 +875 77 regularizer.weight 0.28588929539488467 +875 77 optimizer.lr 0.0033498547130276293 +875 77 negative_sampler.num_negs_per_pos 20.0 +875 77 training.batch_size 0.0 +875 78 model.embedding_dim 2.0 +875 78 model.scoring_fct_norm 1.0 +875 78 regularizer.weight 0.09717599745651806 +875 78 optimizer.lr 0.0015434352047741617 +875 78 negative_sampler.num_negs_per_pos 52.0 +875 78 training.batch_size 1.0 +875 79 model.embedding_dim 0.0 +875 79 model.scoring_fct_norm 2.0 +875 79 regularizer.weight 0.19970518614840463 +875 79 optimizer.lr 0.00145803030027377 +875 79 negative_sampler.num_negs_per_pos 47.0 +875 79 training.batch_size 2.0 +875 80 model.embedding_dim 1.0 +875 80 model.scoring_fct_norm 1.0 +875 80 regularizer.weight 0.017211541292537054 +875 80 optimizer.lr 0.018022836675324005 +875 80 negative_sampler.num_negs_per_pos 95.0 +875 80 training.batch_size 2.0 +875 81 model.embedding_dim 0.0 +875 81 model.scoring_fct_norm 2.0 +875 81 regularizer.weight 0.11057152589150201 +875 81 optimizer.lr 0.004669822841166888 +875 81 negative_sampler.num_negs_per_pos 97.0 +875 81 training.batch_size 2.0 +875 82 model.embedding_dim 1.0 +875 82 model.scoring_fct_norm 2.0 +875 82 regularizer.weight 0.022052408329002376 +875 82 optimizer.lr 0.002698898264939606 +875 82 negative_sampler.num_negs_per_pos 7.0 +875 82 training.batch_size 2.0 +875 83 model.embedding_dim 0.0 +875 83 model.scoring_fct_norm 2.0 +875 83 regularizer.weight 0.07846424869170274 +875 83 optimizer.lr 0.0011434875140209758 +875 83 negative_sampler.num_negs_per_pos 20.0 +875 83 training.batch_size 2.0 +875 84 model.embedding_dim 0.0 +875 84 model.scoring_fct_norm 2.0 +875 84 regularizer.weight 0.2574624586527535 +875 84 optimizer.lr 0.006318904851394533 +875 84 negative_sampler.num_negs_per_pos 46.0 +875 84 training.batch_size 1.0 +875 85 model.embedding_dim 2.0 +875 85 model.scoring_fct_norm 2.0 +875 85 regularizer.weight 0.08818811970555293 +875 85 optimizer.lr 0.0493358526190866 +875 85 negative_sampler.num_negs_per_pos 50.0 +875 85 training.batch_size 0.0 +875 86 model.embedding_dim 1.0 +875 86 model.scoring_fct_norm 1.0 +875 86 regularizer.weight 0.03289674522928011 +875 86 optimizer.lr 0.05321192694211095 +875 86 negative_sampler.num_negs_per_pos 71.0 +875 86 training.batch_size 0.0 +875 87 model.embedding_dim 1.0 +875 87 model.scoring_fct_norm 2.0 +875 87 regularizer.weight 0.17703413772217486 +875 87 optimizer.lr 0.0022132410358875433 +875 87 negative_sampler.num_negs_per_pos 10.0 +875 87 training.batch_size 2.0 +875 88 model.embedding_dim 1.0 +875 88 model.scoring_fct_norm 2.0 +875 88 regularizer.weight 0.03254020383752851 +875 88 optimizer.lr 0.0010057982629078101 +875 88 negative_sampler.num_negs_per_pos 28.0 +875 88 training.batch_size 2.0 +875 89 model.embedding_dim 1.0 +875 89 model.scoring_fct_norm 1.0 +875 89 regularizer.weight 0.2085294974081333 +875 89 optimizer.lr 0.005434716197090534 +875 89 negative_sampler.num_negs_per_pos 49.0 +875 89 training.batch_size 2.0 +875 90 model.embedding_dim 1.0 +875 90 model.scoring_fct_norm 2.0 +875 90 regularizer.weight 0.1249362868519484 +875 90 optimizer.lr 0.04831224692776164 +875 90 negative_sampler.num_negs_per_pos 49.0 +875 90 training.batch_size 0.0 +875 91 model.embedding_dim 1.0 +875 91 model.scoring_fct_norm 1.0 +875 91 regularizer.weight 0.010250366366863632 +875 91 optimizer.lr 0.05773668370405791 +875 91 negative_sampler.num_negs_per_pos 7.0 +875 91 training.batch_size 0.0 +875 92 model.embedding_dim 2.0 +875 92 model.scoring_fct_norm 1.0 +875 92 regularizer.weight 0.011332077241888013 +875 92 optimizer.lr 0.06611793600652636 +875 92 negative_sampler.num_negs_per_pos 52.0 +875 92 training.batch_size 1.0 +875 93 model.embedding_dim 2.0 +875 93 model.scoring_fct_norm 1.0 +875 93 regularizer.weight 0.2171514008932229 +875 93 optimizer.lr 0.0032944742005340225 +875 93 negative_sampler.num_negs_per_pos 92.0 +875 93 training.batch_size 1.0 +875 94 model.embedding_dim 2.0 +875 94 model.scoring_fct_norm 2.0 +875 94 regularizer.weight 0.16465684649532147 +875 94 optimizer.lr 0.002701435722242259 +875 94 negative_sampler.num_negs_per_pos 71.0 +875 94 training.batch_size 1.0 +875 95 model.embedding_dim 1.0 +875 95 model.scoring_fct_norm 2.0 +875 95 regularizer.weight 0.03287897577590308 +875 95 optimizer.lr 0.026141302992644926 +875 95 negative_sampler.num_negs_per_pos 7.0 +875 95 training.batch_size 2.0 +875 96 model.embedding_dim 2.0 +875 96 model.scoring_fct_norm 1.0 +875 96 regularizer.weight 0.018430262104968086 +875 96 optimizer.lr 0.0011964249087613565 +875 96 negative_sampler.num_negs_per_pos 58.0 +875 96 training.batch_size 0.0 +875 97 model.embedding_dim 0.0 +875 97 model.scoring_fct_norm 1.0 +875 97 regularizer.weight 0.1921514635609034 +875 97 optimizer.lr 0.01645882555274718 +875 97 negative_sampler.num_negs_per_pos 75.0 +875 97 training.batch_size 2.0 +875 98 model.embedding_dim 2.0 +875 98 model.scoring_fct_norm 2.0 +875 98 regularizer.weight 0.09092488718293147 +875 98 optimizer.lr 0.008712969785135458 +875 98 negative_sampler.num_negs_per_pos 54.0 +875 98 training.batch_size 0.0 +875 99 model.embedding_dim 0.0 +875 99 model.scoring_fct_norm 1.0 +875 99 regularizer.weight 0.025817756015070314 +875 99 optimizer.lr 0.0031364370398518334 +875 99 negative_sampler.num_negs_per_pos 34.0 +875 99 training.batch_size 2.0 +875 100 model.embedding_dim 2.0 +875 100 model.scoring_fct_norm 2.0 +875 100 regularizer.weight 0.01622702915426192 +875 100 optimizer.lr 0.02437947199173658 +875 100 negative_sampler.num_negs_per_pos 23.0 +875 100 training.batch_size 1.0 +875 1 dataset """kinships""" +875 1 model """transh""" +875 1 loss """bceaftersigmoid""" +875 1 regularizer """transh""" +875 1 optimizer """adam""" +875 1 training_loop """owa""" +875 1 negative_sampler """basic""" +875 1 evaluator """rankbased""" +875 2 dataset """kinships""" +875 2 model """transh""" +875 2 loss """bceaftersigmoid""" +875 2 regularizer """transh""" +875 2 optimizer """adam""" +875 2 training_loop """owa""" +875 2 negative_sampler """basic""" +875 2 evaluator """rankbased""" +875 3 dataset """kinships""" +875 3 model """transh""" +875 3 loss """bceaftersigmoid""" +875 3 regularizer """transh""" +875 3 optimizer """adam""" +875 3 training_loop """owa""" +875 3 negative_sampler """basic""" +875 3 evaluator """rankbased""" +875 4 dataset """kinships""" +875 4 model """transh""" +875 4 loss """bceaftersigmoid""" +875 4 regularizer """transh""" +875 4 optimizer """adam""" +875 4 training_loop """owa""" +875 4 negative_sampler """basic""" +875 4 evaluator """rankbased""" +875 5 dataset """kinships""" +875 5 model """transh""" +875 5 loss """bceaftersigmoid""" +875 5 regularizer """transh""" +875 5 optimizer """adam""" +875 5 training_loop """owa""" +875 5 negative_sampler """basic""" +875 5 evaluator """rankbased""" +875 6 dataset """kinships""" +875 6 model """transh""" +875 6 loss """bceaftersigmoid""" +875 6 regularizer """transh""" +875 6 optimizer """adam""" +875 6 training_loop """owa""" +875 6 negative_sampler """basic""" +875 6 evaluator """rankbased""" +875 7 dataset """kinships""" +875 7 model """transh""" +875 7 loss """bceaftersigmoid""" +875 7 regularizer """transh""" +875 7 optimizer """adam""" +875 7 training_loop """owa""" +875 7 negative_sampler """basic""" +875 7 evaluator """rankbased""" +875 8 dataset """kinships""" +875 8 model """transh""" +875 8 loss """bceaftersigmoid""" +875 8 regularizer """transh""" +875 8 optimizer """adam""" +875 8 training_loop """owa""" +875 8 negative_sampler """basic""" +875 8 evaluator """rankbased""" +875 9 dataset """kinships""" +875 9 model """transh""" +875 9 loss """bceaftersigmoid""" +875 9 regularizer """transh""" +875 9 optimizer """adam""" +875 9 training_loop """owa""" +875 9 negative_sampler """basic""" +875 9 evaluator """rankbased""" +875 10 dataset """kinships""" +875 10 model """transh""" +875 10 loss """bceaftersigmoid""" +875 10 regularizer """transh""" +875 10 optimizer """adam""" +875 10 training_loop """owa""" +875 10 negative_sampler """basic""" +875 10 evaluator """rankbased""" +875 11 dataset """kinships""" +875 11 model """transh""" +875 11 loss """bceaftersigmoid""" +875 11 regularizer """transh""" +875 11 optimizer """adam""" +875 11 training_loop """owa""" +875 11 negative_sampler """basic""" +875 11 evaluator """rankbased""" +875 12 dataset """kinships""" +875 12 model """transh""" +875 12 loss """bceaftersigmoid""" +875 12 regularizer """transh""" +875 12 optimizer """adam""" +875 12 training_loop """owa""" +875 12 negative_sampler """basic""" +875 12 evaluator """rankbased""" +875 13 dataset """kinships""" +875 13 model """transh""" +875 13 loss """bceaftersigmoid""" +875 13 regularizer """transh""" +875 13 optimizer """adam""" +875 13 training_loop """owa""" +875 13 negative_sampler """basic""" +875 13 evaluator """rankbased""" +875 14 dataset """kinships""" +875 14 model """transh""" +875 14 loss """bceaftersigmoid""" +875 14 regularizer """transh""" +875 14 optimizer """adam""" +875 14 training_loop """owa""" +875 14 negative_sampler """basic""" +875 14 evaluator """rankbased""" +875 15 dataset """kinships""" +875 15 model """transh""" +875 15 loss """bceaftersigmoid""" +875 15 regularizer """transh""" +875 15 optimizer """adam""" +875 15 training_loop """owa""" +875 15 negative_sampler """basic""" +875 15 evaluator """rankbased""" +875 16 dataset """kinships""" +875 16 model """transh""" +875 16 loss """bceaftersigmoid""" +875 16 regularizer """transh""" +875 16 optimizer """adam""" +875 16 training_loop """owa""" +875 16 negative_sampler """basic""" +875 16 evaluator """rankbased""" +875 17 dataset """kinships""" +875 17 model """transh""" +875 17 loss """bceaftersigmoid""" +875 17 regularizer """transh""" +875 17 optimizer """adam""" +875 17 training_loop """owa""" +875 17 negative_sampler """basic""" +875 17 evaluator """rankbased""" +875 18 dataset """kinships""" +875 18 model """transh""" +875 18 loss """bceaftersigmoid""" +875 18 regularizer """transh""" +875 18 optimizer """adam""" +875 18 training_loop """owa""" +875 18 negative_sampler """basic""" +875 18 evaluator """rankbased""" +875 19 dataset """kinships""" +875 19 model """transh""" +875 19 loss """bceaftersigmoid""" +875 19 regularizer """transh""" +875 19 optimizer """adam""" +875 19 training_loop """owa""" +875 19 negative_sampler """basic""" +875 19 evaluator """rankbased""" +875 20 dataset """kinships""" +875 20 model """transh""" +875 20 loss """bceaftersigmoid""" +875 20 regularizer """transh""" +875 20 optimizer """adam""" +875 20 training_loop """owa""" +875 20 negative_sampler """basic""" +875 20 evaluator """rankbased""" +875 21 dataset """kinships""" +875 21 model """transh""" +875 21 loss """bceaftersigmoid""" +875 21 regularizer """transh""" +875 21 optimizer """adam""" +875 21 training_loop """owa""" +875 21 negative_sampler """basic""" +875 21 evaluator """rankbased""" +875 22 dataset """kinships""" +875 22 model """transh""" +875 22 loss """bceaftersigmoid""" +875 22 regularizer """transh""" +875 22 optimizer """adam""" +875 22 training_loop """owa""" +875 22 negative_sampler """basic""" +875 22 evaluator """rankbased""" +875 23 dataset """kinships""" +875 23 model """transh""" +875 23 loss """bceaftersigmoid""" +875 23 regularizer """transh""" +875 23 optimizer """adam""" +875 23 training_loop """owa""" +875 23 negative_sampler """basic""" +875 23 evaluator """rankbased""" +875 24 dataset """kinships""" +875 24 model """transh""" +875 24 loss """bceaftersigmoid""" +875 24 regularizer """transh""" +875 24 optimizer """adam""" +875 24 training_loop """owa""" +875 24 negative_sampler """basic""" +875 24 evaluator """rankbased""" +875 25 dataset """kinships""" +875 25 model """transh""" +875 25 loss """bceaftersigmoid""" +875 25 regularizer """transh""" +875 25 optimizer """adam""" +875 25 training_loop """owa""" +875 25 negative_sampler """basic""" +875 25 evaluator """rankbased""" +875 26 dataset """kinships""" +875 26 model """transh""" +875 26 loss """bceaftersigmoid""" +875 26 regularizer """transh""" +875 26 optimizer """adam""" +875 26 training_loop """owa""" +875 26 negative_sampler """basic""" +875 26 evaluator """rankbased""" +875 27 dataset """kinships""" +875 27 model """transh""" +875 27 loss """bceaftersigmoid""" +875 27 regularizer """transh""" +875 27 optimizer """adam""" +875 27 training_loop """owa""" +875 27 negative_sampler """basic""" +875 27 evaluator """rankbased""" +875 28 dataset """kinships""" +875 28 model """transh""" +875 28 loss """bceaftersigmoid""" +875 28 regularizer """transh""" +875 28 optimizer """adam""" +875 28 training_loop """owa""" +875 28 negative_sampler """basic""" +875 28 evaluator """rankbased""" +875 29 dataset """kinships""" +875 29 model """transh""" +875 29 loss """bceaftersigmoid""" +875 29 regularizer """transh""" +875 29 optimizer """adam""" +875 29 training_loop """owa""" +875 29 negative_sampler """basic""" +875 29 evaluator """rankbased""" +875 30 dataset """kinships""" +875 30 model """transh""" +875 30 loss """bceaftersigmoid""" +875 30 regularizer """transh""" +875 30 optimizer """adam""" +875 30 training_loop """owa""" +875 30 negative_sampler """basic""" +875 30 evaluator """rankbased""" +875 31 dataset """kinships""" +875 31 model """transh""" +875 31 loss """bceaftersigmoid""" +875 31 regularizer """transh""" +875 31 optimizer """adam""" +875 31 training_loop """owa""" +875 31 negative_sampler """basic""" +875 31 evaluator """rankbased""" +875 32 dataset """kinships""" +875 32 model """transh""" +875 32 loss """bceaftersigmoid""" +875 32 regularizer """transh""" +875 32 optimizer """adam""" +875 32 training_loop """owa""" +875 32 negative_sampler """basic""" +875 32 evaluator """rankbased""" +875 33 dataset """kinships""" +875 33 model """transh""" +875 33 loss """bceaftersigmoid""" +875 33 regularizer """transh""" +875 33 optimizer """adam""" +875 33 training_loop """owa""" +875 33 negative_sampler """basic""" +875 33 evaluator """rankbased""" +875 34 dataset """kinships""" +875 34 model """transh""" +875 34 loss """bceaftersigmoid""" +875 34 regularizer """transh""" +875 34 optimizer """adam""" +875 34 training_loop """owa""" +875 34 negative_sampler """basic""" +875 34 evaluator """rankbased""" +875 35 dataset """kinships""" +875 35 model """transh""" +875 35 loss """bceaftersigmoid""" +875 35 regularizer """transh""" +875 35 optimizer """adam""" +875 35 training_loop """owa""" +875 35 negative_sampler """basic""" +875 35 evaluator """rankbased""" +875 36 dataset """kinships""" +875 36 model """transh""" +875 36 loss """bceaftersigmoid""" +875 36 regularizer """transh""" +875 36 optimizer """adam""" +875 36 training_loop """owa""" +875 36 negative_sampler """basic""" +875 36 evaluator """rankbased""" +875 37 dataset """kinships""" +875 37 model """transh""" +875 37 loss """bceaftersigmoid""" +875 37 regularizer """transh""" +875 37 optimizer """adam""" +875 37 training_loop """owa""" +875 37 negative_sampler """basic""" +875 37 evaluator """rankbased""" +875 38 dataset """kinships""" +875 38 model """transh""" +875 38 loss """bceaftersigmoid""" +875 38 regularizer """transh""" +875 38 optimizer """adam""" +875 38 training_loop """owa""" +875 38 negative_sampler """basic""" +875 38 evaluator """rankbased""" +875 39 dataset """kinships""" +875 39 model """transh""" +875 39 loss """bceaftersigmoid""" +875 39 regularizer """transh""" +875 39 optimizer """adam""" +875 39 training_loop """owa""" +875 39 negative_sampler """basic""" +875 39 evaluator """rankbased""" +875 40 dataset """kinships""" +875 40 model """transh""" +875 40 loss """bceaftersigmoid""" +875 40 regularizer """transh""" +875 40 optimizer """adam""" +875 40 training_loop """owa""" +875 40 negative_sampler """basic""" +875 40 evaluator """rankbased""" +875 41 dataset """kinships""" +875 41 model """transh""" +875 41 loss """bceaftersigmoid""" +875 41 regularizer """transh""" +875 41 optimizer """adam""" +875 41 training_loop """owa""" +875 41 negative_sampler """basic""" +875 41 evaluator """rankbased""" +875 42 dataset """kinships""" +875 42 model """transh""" +875 42 loss """bceaftersigmoid""" +875 42 regularizer """transh""" +875 42 optimizer """adam""" +875 42 training_loop """owa""" +875 42 negative_sampler """basic""" +875 42 evaluator """rankbased""" +875 43 dataset """kinships""" +875 43 model """transh""" +875 43 loss """bceaftersigmoid""" +875 43 regularizer """transh""" +875 43 optimizer """adam""" +875 43 training_loop """owa""" +875 43 negative_sampler """basic""" +875 43 evaluator """rankbased""" +875 44 dataset """kinships""" +875 44 model """transh""" +875 44 loss """bceaftersigmoid""" +875 44 regularizer """transh""" +875 44 optimizer """adam""" +875 44 training_loop """owa""" +875 44 negative_sampler """basic""" +875 44 evaluator """rankbased""" +875 45 dataset """kinships""" +875 45 model """transh""" +875 45 loss """bceaftersigmoid""" +875 45 regularizer """transh""" +875 45 optimizer """adam""" +875 45 training_loop """owa""" +875 45 negative_sampler """basic""" +875 45 evaluator """rankbased""" +875 46 dataset """kinships""" +875 46 model """transh""" +875 46 loss """bceaftersigmoid""" +875 46 regularizer """transh""" +875 46 optimizer """adam""" +875 46 training_loop """owa""" +875 46 negative_sampler """basic""" +875 46 evaluator """rankbased""" +875 47 dataset """kinships""" +875 47 model """transh""" +875 47 loss """bceaftersigmoid""" +875 47 regularizer """transh""" +875 47 optimizer """adam""" +875 47 training_loop """owa""" +875 47 negative_sampler """basic""" +875 47 evaluator """rankbased""" +875 48 dataset """kinships""" +875 48 model """transh""" +875 48 loss """bceaftersigmoid""" +875 48 regularizer """transh""" +875 48 optimizer """adam""" +875 48 training_loop """owa""" +875 48 negative_sampler """basic""" +875 48 evaluator """rankbased""" +875 49 dataset """kinships""" +875 49 model """transh""" +875 49 loss """bceaftersigmoid""" +875 49 regularizer """transh""" +875 49 optimizer """adam""" +875 49 training_loop """owa""" +875 49 negative_sampler """basic""" +875 49 evaluator """rankbased""" +875 50 dataset """kinships""" +875 50 model """transh""" +875 50 loss """bceaftersigmoid""" +875 50 regularizer """transh""" +875 50 optimizer """adam""" +875 50 training_loop """owa""" +875 50 negative_sampler """basic""" +875 50 evaluator """rankbased""" +875 51 dataset """kinships""" +875 51 model """transh""" +875 51 loss """bceaftersigmoid""" +875 51 regularizer """transh""" +875 51 optimizer """adam""" +875 51 training_loop """owa""" +875 51 negative_sampler """basic""" +875 51 evaluator """rankbased""" +875 52 dataset """kinships""" +875 52 model """transh""" +875 52 loss """bceaftersigmoid""" +875 52 regularizer """transh""" +875 52 optimizer """adam""" +875 52 training_loop """owa""" +875 52 negative_sampler """basic""" +875 52 evaluator """rankbased""" +875 53 dataset """kinships""" +875 53 model """transh""" +875 53 loss """bceaftersigmoid""" +875 53 regularizer """transh""" +875 53 optimizer """adam""" +875 53 training_loop """owa""" +875 53 negative_sampler """basic""" +875 53 evaluator """rankbased""" +875 54 dataset """kinships""" +875 54 model """transh""" +875 54 loss """bceaftersigmoid""" +875 54 regularizer """transh""" +875 54 optimizer """adam""" +875 54 training_loop """owa""" +875 54 negative_sampler """basic""" +875 54 evaluator """rankbased""" +875 55 dataset """kinships""" +875 55 model """transh""" +875 55 loss """bceaftersigmoid""" +875 55 regularizer """transh""" +875 55 optimizer """adam""" +875 55 training_loop """owa""" +875 55 negative_sampler """basic""" +875 55 evaluator """rankbased""" +875 56 dataset """kinships""" +875 56 model """transh""" +875 56 loss """bceaftersigmoid""" +875 56 regularizer """transh""" +875 56 optimizer """adam""" +875 56 training_loop """owa""" +875 56 negative_sampler """basic""" +875 56 evaluator """rankbased""" +875 57 dataset """kinships""" +875 57 model """transh""" +875 57 loss """bceaftersigmoid""" +875 57 regularizer """transh""" +875 57 optimizer """adam""" +875 57 training_loop """owa""" +875 57 negative_sampler """basic""" +875 57 evaluator """rankbased""" +875 58 dataset """kinships""" +875 58 model """transh""" +875 58 loss """bceaftersigmoid""" +875 58 regularizer """transh""" +875 58 optimizer """adam""" +875 58 training_loop """owa""" +875 58 negative_sampler """basic""" +875 58 evaluator """rankbased""" +875 59 dataset """kinships""" +875 59 model """transh""" +875 59 loss """bceaftersigmoid""" +875 59 regularizer """transh""" +875 59 optimizer """adam""" +875 59 training_loop """owa""" +875 59 negative_sampler """basic""" +875 59 evaluator """rankbased""" +875 60 dataset """kinships""" +875 60 model """transh""" +875 60 loss """bceaftersigmoid""" +875 60 regularizer """transh""" +875 60 optimizer """adam""" +875 60 training_loop """owa""" +875 60 negative_sampler """basic""" +875 60 evaluator """rankbased""" +875 61 dataset """kinships""" +875 61 model """transh""" +875 61 loss """bceaftersigmoid""" +875 61 regularizer """transh""" +875 61 optimizer """adam""" +875 61 training_loop """owa""" +875 61 negative_sampler """basic""" +875 61 evaluator """rankbased""" +875 62 dataset """kinships""" +875 62 model """transh""" +875 62 loss """bceaftersigmoid""" +875 62 regularizer """transh""" +875 62 optimizer """adam""" +875 62 training_loop """owa""" +875 62 negative_sampler """basic""" +875 62 evaluator """rankbased""" +875 63 dataset """kinships""" +875 63 model """transh""" +875 63 loss """bceaftersigmoid""" +875 63 regularizer """transh""" +875 63 optimizer """adam""" +875 63 training_loop """owa""" +875 63 negative_sampler """basic""" +875 63 evaluator """rankbased""" +875 64 dataset """kinships""" +875 64 model """transh""" +875 64 loss """bceaftersigmoid""" +875 64 regularizer """transh""" +875 64 optimizer """adam""" +875 64 training_loop """owa""" +875 64 negative_sampler """basic""" +875 64 evaluator """rankbased""" +875 65 dataset """kinships""" +875 65 model """transh""" +875 65 loss """bceaftersigmoid""" +875 65 regularizer """transh""" +875 65 optimizer """adam""" +875 65 training_loop """owa""" +875 65 negative_sampler """basic""" +875 65 evaluator """rankbased""" +875 66 dataset """kinships""" +875 66 model """transh""" +875 66 loss """bceaftersigmoid""" +875 66 regularizer """transh""" +875 66 optimizer """adam""" +875 66 training_loop """owa""" +875 66 negative_sampler """basic""" +875 66 evaluator """rankbased""" +875 67 dataset """kinships""" +875 67 model """transh""" +875 67 loss """bceaftersigmoid""" +875 67 regularizer """transh""" +875 67 optimizer """adam""" +875 67 training_loop """owa""" +875 67 negative_sampler """basic""" +875 67 evaluator """rankbased""" +875 68 dataset """kinships""" +875 68 model """transh""" +875 68 loss """bceaftersigmoid""" +875 68 regularizer """transh""" +875 68 optimizer """adam""" +875 68 training_loop """owa""" +875 68 negative_sampler """basic""" +875 68 evaluator """rankbased""" +875 69 dataset """kinships""" +875 69 model """transh""" +875 69 loss """bceaftersigmoid""" +875 69 regularizer """transh""" +875 69 optimizer """adam""" +875 69 training_loop """owa""" +875 69 negative_sampler """basic""" +875 69 evaluator """rankbased""" +875 70 dataset """kinships""" +875 70 model """transh""" +875 70 loss """bceaftersigmoid""" +875 70 regularizer """transh""" +875 70 optimizer """adam""" +875 70 training_loop """owa""" +875 70 negative_sampler """basic""" +875 70 evaluator """rankbased""" +875 71 dataset """kinships""" +875 71 model """transh""" +875 71 loss """bceaftersigmoid""" +875 71 regularizer """transh""" +875 71 optimizer """adam""" +875 71 training_loop """owa""" +875 71 negative_sampler """basic""" +875 71 evaluator """rankbased""" +875 72 dataset """kinships""" +875 72 model """transh""" +875 72 loss """bceaftersigmoid""" +875 72 regularizer """transh""" +875 72 optimizer """adam""" +875 72 training_loop """owa""" +875 72 negative_sampler """basic""" +875 72 evaluator """rankbased""" +875 73 dataset """kinships""" +875 73 model """transh""" +875 73 loss """bceaftersigmoid""" +875 73 regularizer """transh""" +875 73 optimizer """adam""" +875 73 training_loop """owa""" +875 73 negative_sampler """basic""" +875 73 evaluator """rankbased""" +875 74 dataset """kinships""" +875 74 model """transh""" +875 74 loss """bceaftersigmoid""" +875 74 regularizer """transh""" +875 74 optimizer """adam""" +875 74 training_loop """owa""" +875 74 negative_sampler """basic""" +875 74 evaluator """rankbased""" +875 75 dataset """kinships""" +875 75 model """transh""" +875 75 loss """bceaftersigmoid""" +875 75 regularizer """transh""" +875 75 optimizer """adam""" +875 75 training_loop """owa""" +875 75 negative_sampler """basic""" +875 75 evaluator """rankbased""" +875 76 dataset """kinships""" +875 76 model """transh""" +875 76 loss """bceaftersigmoid""" +875 76 regularizer """transh""" +875 76 optimizer """adam""" +875 76 training_loop """owa""" +875 76 negative_sampler """basic""" +875 76 evaluator """rankbased""" +875 77 dataset """kinships""" +875 77 model """transh""" +875 77 loss """bceaftersigmoid""" +875 77 regularizer """transh""" +875 77 optimizer """adam""" +875 77 training_loop """owa""" +875 77 negative_sampler """basic""" +875 77 evaluator """rankbased""" +875 78 dataset """kinships""" +875 78 model """transh""" +875 78 loss """bceaftersigmoid""" +875 78 regularizer """transh""" +875 78 optimizer """adam""" +875 78 training_loop """owa""" +875 78 negative_sampler """basic""" +875 78 evaluator """rankbased""" +875 79 dataset """kinships""" +875 79 model """transh""" +875 79 loss """bceaftersigmoid""" +875 79 regularizer """transh""" +875 79 optimizer """adam""" +875 79 training_loop """owa""" +875 79 negative_sampler """basic""" +875 79 evaluator """rankbased""" +875 80 dataset """kinships""" +875 80 model """transh""" +875 80 loss """bceaftersigmoid""" +875 80 regularizer """transh""" +875 80 optimizer """adam""" +875 80 training_loop """owa""" +875 80 negative_sampler """basic""" +875 80 evaluator """rankbased""" +875 81 dataset """kinships""" +875 81 model """transh""" +875 81 loss """bceaftersigmoid""" +875 81 regularizer """transh""" +875 81 optimizer """adam""" +875 81 training_loop """owa""" +875 81 negative_sampler """basic""" +875 81 evaluator """rankbased""" +875 82 dataset """kinships""" +875 82 model """transh""" +875 82 loss """bceaftersigmoid""" +875 82 regularizer """transh""" +875 82 optimizer """adam""" +875 82 training_loop """owa""" +875 82 negative_sampler """basic""" +875 82 evaluator """rankbased""" +875 83 dataset """kinships""" +875 83 model """transh""" +875 83 loss """bceaftersigmoid""" +875 83 regularizer """transh""" +875 83 optimizer """adam""" +875 83 training_loop """owa""" +875 83 negative_sampler """basic""" +875 83 evaluator """rankbased""" +875 84 dataset """kinships""" +875 84 model """transh""" +875 84 loss """bceaftersigmoid""" +875 84 regularizer """transh""" +875 84 optimizer """adam""" +875 84 training_loop """owa""" +875 84 negative_sampler """basic""" +875 84 evaluator """rankbased""" +875 85 dataset """kinships""" +875 85 model """transh""" +875 85 loss """bceaftersigmoid""" +875 85 regularizer """transh""" +875 85 optimizer """adam""" +875 85 training_loop """owa""" +875 85 negative_sampler """basic""" +875 85 evaluator """rankbased""" +875 86 dataset """kinships""" +875 86 model """transh""" +875 86 loss """bceaftersigmoid""" +875 86 regularizer """transh""" +875 86 optimizer """adam""" +875 86 training_loop """owa""" +875 86 negative_sampler """basic""" +875 86 evaluator """rankbased""" +875 87 dataset """kinships""" +875 87 model """transh""" +875 87 loss """bceaftersigmoid""" +875 87 regularizer """transh""" +875 87 optimizer """adam""" +875 87 training_loop """owa""" +875 87 negative_sampler """basic""" +875 87 evaluator """rankbased""" +875 88 dataset """kinships""" +875 88 model """transh""" +875 88 loss """bceaftersigmoid""" +875 88 regularizer """transh""" +875 88 optimizer """adam""" +875 88 training_loop """owa""" +875 88 negative_sampler """basic""" +875 88 evaluator """rankbased""" +875 89 dataset """kinships""" +875 89 model """transh""" +875 89 loss """bceaftersigmoid""" +875 89 regularizer """transh""" +875 89 optimizer """adam""" +875 89 training_loop """owa""" +875 89 negative_sampler """basic""" +875 89 evaluator """rankbased""" +875 90 dataset """kinships""" +875 90 model """transh""" +875 90 loss """bceaftersigmoid""" +875 90 regularizer """transh""" +875 90 optimizer """adam""" +875 90 training_loop """owa""" +875 90 negative_sampler """basic""" +875 90 evaluator """rankbased""" +875 91 dataset """kinships""" +875 91 model """transh""" +875 91 loss """bceaftersigmoid""" +875 91 regularizer """transh""" +875 91 optimizer """adam""" +875 91 training_loop """owa""" +875 91 negative_sampler """basic""" +875 91 evaluator """rankbased""" +875 92 dataset """kinships""" +875 92 model """transh""" +875 92 loss """bceaftersigmoid""" +875 92 regularizer """transh""" +875 92 optimizer """adam""" +875 92 training_loop """owa""" +875 92 negative_sampler """basic""" +875 92 evaluator """rankbased""" +875 93 dataset """kinships""" +875 93 model """transh""" +875 93 loss """bceaftersigmoid""" +875 93 regularizer """transh""" +875 93 optimizer """adam""" +875 93 training_loop """owa""" +875 93 negative_sampler """basic""" +875 93 evaluator """rankbased""" +875 94 dataset """kinships""" +875 94 model """transh""" +875 94 loss """bceaftersigmoid""" +875 94 regularizer """transh""" +875 94 optimizer """adam""" +875 94 training_loop """owa""" +875 94 negative_sampler """basic""" +875 94 evaluator """rankbased""" +875 95 dataset """kinships""" +875 95 model """transh""" +875 95 loss """bceaftersigmoid""" +875 95 regularizer """transh""" +875 95 optimizer """adam""" +875 95 training_loop """owa""" +875 95 negative_sampler """basic""" +875 95 evaluator """rankbased""" +875 96 dataset """kinships""" +875 96 model """transh""" +875 96 loss """bceaftersigmoid""" +875 96 regularizer """transh""" +875 96 optimizer """adam""" +875 96 training_loop """owa""" +875 96 negative_sampler """basic""" +875 96 evaluator """rankbased""" +875 97 dataset """kinships""" +875 97 model """transh""" +875 97 loss """bceaftersigmoid""" +875 97 regularizer """transh""" +875 97 optimizer """adam""" +875 97 training_loop """owa""" +875 97 negative_sampler """basic""" +875 97 evaluator """rankbased""" +875 98 dataset """kinships""" +875 98 model """transh""" +875 98 loss """bceaftersigmoid""" +875 98 regularizer """transh""" +875 98 optimizer """adam""" +875 98 training_loop """owa""" +875 98 negative_sampler """basic""" +875 98 evaluator """rankbased""" +875 99 dataset """kinships""" +875 99 model """transh""" +875 99 loss """bceaftersigmoid""" +875 99 regularizer """transh""" +875 99 optimizer """adam""" +875 99 training_loop """owa""" +875 99 negative_sampler """basic""" +875 99 evaluator """rankbased""" +875 100 dataset """kinships""" +875 100 model """transh""" +875 100 loss """bceaftersigmoid""" +875 100 regularizer """transh""" +875 100 optimizer """adam""" +875 100 training_loop """owa""" +875 100 negative_sampler """basic""" +875 100 evaluator """rankbased""" +876 1 model.embedding_dim 2.0 +876 1 model.scoring_fct_norm 1.0 +876 1 regularizer.weight 0.07050160293257546 +876 1 optimizer.lr 0.006151622553448177 +876 1 negative_sampler.num_negs_per_pos 5.0 +876 1 training.batch_size 0.0 +876 2 model.embedding_dim 0.0 +876 2 model.scoring_fct_norm 1.0 +876 2 regularizer.weight 0.0433753650905696 +876 2 optimizer.lr 0.0039158490947956825 +876 2 negative_sampler.num_negs_per_pos 71.0 +876 2 training.batch_size 2.0 +876 3 model.embedding_dim 2.0 +876 3 model.scoring_fct_norm 2.0 +876 3 regularizer.weight 0.25041643225289184 +876 3 optimizer.lr 0.007410150022084148 +876 3 negative_sampler.num_negs_per_pos 28.0 +876 3 training.batch_size 1.0 +876 4 model.embedding_dim 2.0 +876 4 model.scoring_fct_norm 2.0 +876 4 regularizer.weight 0.08755073731187057 +876 4 optimizer.lr 0.029108925532835138 +876 4 negative_sampler.num_negs_per_pos 8.0 +876 4 training.batch_size 1.0 +876 5 model.embedding_dim 0.0 +876 5 model.scoring_fct_norm 2.0 +876 5 regularizer.weight 0.013942840946193066 +876 5 optimizer.lr 0.001183172286175073 +876 5 negative_sampler.num_negs_per_pos 60.0 +876 5 training.batch_size 0.0 +876 6 model.embedding_dim 0.0 +876 6 model.scoring_fct_norm 2.0 +876 6 regularizer.weight 0.15753321763505398 +876 6 optimizer.lr 0.011626548121374251 +876 6 negative_sampler.num_negs_per_pos 78.0 +876 6 training.batch_size 0.0 +876 7 model.embedding_dim 1.0 +876 7 model.scoring_fct_norm 2.0 +876 7 regularizer.weight 0.25373503209600845 +876 7 optimizer.lr 0.03277142327164303 +876 7 negative_sampler.num_negs_per_pos 43.0 +876 7 training.batch_size 0.0 +876 8 model.embedding_dim 2.0 +876 8 model.scoring_fct_norm 2.0 +876 8 regularizer.weight 0.2517333051389491 +876 8 optimizer.lr 0.061470124877801985 +876 8 negative_sampler.num_negs_per_pos 96.0 +876 8 training.batch_size 1.0 +876 9 model.embedding_dim 2.0 +876 9 model.scoring_fct_norm 2.0 +876 9 regularizer.weight 0.2652461988466675 +876 9 optimizer.lr 0.001257963476370204 +876 9 negative_sampler.num_negs_per_pos 53.0 +876 9 training.batch_size 2.0 +876 10 model.embedding_dim 2.0 +876 10 model.scoring_fct_norm 1.0 +876 10 regularizer.weight 0.1953417073673541 +876 10 optimizer.lr 0.019918940029715414 +876 10 negative_sampler.num_negs_per_pos 76.0 +876 10 training.batch_size 1.0 +876 11 model.embedding_dim 1.0 +876 11 model.scoring_fct_norm 1.0 +876 11 regularizer.weight 0.1668950316229834 +876 11 optimizer.lr 0.0023198679168975824 +876 11 negative_sampler.num_negs_per_pos 21.0 +876 11 training.batch_size 2.0 +876 12 model.embedding_dim 2.0 +876 12 model.scoring_fct_norm 1.0 +876 12 regularizer.weight 0.20581010207256412 +876 12 optimizer.lr 0.001146498349822713 +876 12 negative_sampler.num_negs_per_pos 36.0 +876 12 training.batch_size 0.0 +876 13 model.embedding_dim 2.0 +876 13 model.scoring_fct_norm 2.0 +876 13 regularizer.weight 0.032027034593466605 +876 13 optimizer.lr 0.0013482804799229329 +876 13 negative_sampler.num_negs_per_pos 73.0 +876 13 training.batch_size 1.0 +876 14 model.embedding_dim 0.0 +876 14 model.scoring_fct_norm 2.0 +876 14 regularizer.weight 0.06121551316605308 +876 14 optimizer.lr 0.008997171862397825 +876 14 negative_sampler.num_negs_per_pos 97.0 +876 14 training.batch_size 0.0 +876 15 model.embedding_dim 2.0 +876 15 model.scoring_fct_norm 2.0 +876 15 regularizer.weight 0.22923857962238212 +876 15 optimizer.lr 0.03782278573583503 +876 15 negative_sampler.num_negs_per_pos 67.0 +876 15 training.batch_size 0.0 +876 16 model.embedding_dim 2.0 +876 16 model.scoring_fct_norm 2.0 +876 16 regularizer.weight 0.2459537896613087 +876 16 optimizer.lr 0.0688129590816012 +876 16 negative_sampler.num_negs_per_pos 91.0 +876 16 training.batch_size 1.0 +876 17 model.embedding_dim 0.0 +876 17 model.scoring_fct_norm 1.0 +876 17 regularizer.weight 0.13783276256687813 +876 17 optimizer.lr 0.0015352943515975527 +876 17 negative_sampler.num_negs_per_pos 95.0 +876 17 training.batch_size 0.0 +876 18 model.embedding_dim 0.0 +876 18 model.scoring_fct_norm 1.0 +876 18 regularizer.weight 0.06543171382515199 +876 18 optimizer.lr 0.012426308925663883 +876 18 negative_sampler.num_negs_per_pos 86.0 +876 18 training.batch_size 1.0 +876 19 model.embedding_dim 0.0 +876 19 model.scoring_fct_norm 1.0 +876 19 regularizer.weight 0.13392330388530427 +876 19 optimizer.lr 0.006172007134511864 +876 19 negative_sampler.num_negs_per_pos 68.0 +876 19 training.batch_size 2.0 +876 20 model.embedding_dim 2.0 +876 20 model.scoring_fct_norm 2.0 +876 20 regularizer.weight 0.09473757237819094 +876 20 optimizer.lr 0.09503411672844267 +876 20 negative_sampler.num_negs_per_pos 22.0 +876 20 training.batch_size 0.0 +876 21 model.embedding_dim 0.0 +876 21 model.scoring_fct_norm 2.0 +876 21 regularizer.weight 0.2946735732870897 +876 21 optimizer.lr 0.034890212667134164 +876 21 negative_sampler.num_negs_per_pos 42.0 +876 21 training.batch_size 1.0 +876 22 model.embedding_dim 0.0 +876 22 model.scoring_fct_norm 2.0 +876 22 regularizer.weight 0.023675140453437465 +876 22 optimizer.lr 0.08361301076626776 +876 22 negative_sampler.num_negs_per_pos 57.0 +876 22 training.batch_size 2.0 +876 23 model.embedding_dim 0.0 +876 23 model.scoring_fct_norm 1.0 +876 23 regularizer.weight 0.0958612031986447 +876 23 optimizer.lr 0.07031122789525181 +876 23 negative_sampler.num_negs_per_pos 26.0 +876 23 training.batch_size 0.0 +876 24 model.embedding_dim 0.0 +876 24 model.scoring_fct_norm 1.0 +876 24 regularizer.weight 0.0807289975912859 +876 24 optimizer.lr 0.015336557193883611 +876 24 negative_sampler.num_negs_per_pos 92.0 +876 24 training.batch_size 2.0 +876 25 model.embedding_dim 2.0 +876 25 model.scoring_fct_norm 1.0 +876 25 regularizer.weight 0.06018122944315362 +876 25 optimizer.lr 0.0025481865567523318 +876 25 negative_sampler.num_negs_per_pos 8.0 +876 25 training.batch_size 0.0 +876 26 model.embedding_dim 0.0 +876 26 model.scoring_fct_norm 1.0 +876 26 regularizer.weight 0.092867400755015 +876 26 optimizer.lr 0.013939393078156111 +876 26 negative_sampler.num_negs_per_pos 72.0 +876 26 training.batch_size 2.0 +876 27 model.embedding_dim 2.0 +876 27 model.scoring_fct_norm 2.0 +876 27 regularizer.weight 0.16757922409624995 +876 27 optimizer.lr 0.008937342803952397 +876 27 negative_sampler.num_negs_per_pos 75.0 +876 27 training.batch_size 2.0 +876 28 model.embedding_dim 2.0 +876 28 model.scoring_fct_norm 1.0 +876 28 regularizer.weight 0.05057147980741553 +876 28 optimizer.lr 0.0016097223423513194 +876 28 negative_sampler.num_negs_per_pos 12.0 +876 28 training.batch_size 1.0 +876 29 model.embedding_dim 1.0 +876 29 model.scoring_fct_norm 1.0 +876 29 regularizer.weight 0.012174530083326961 +876 29 optimizer.lr 0.03860466593304021 +876 29 negative_sampler.num_negs_per_pos 9.0 +876 29 training.batch_size 2.0 +876 30 model.embedding_dim 2.0 +876 30 model.scoring_fct_norm 2.0 +876 30 regularizer.weight 0.026709112656680353 +876 30 optimizer.lr 0.01219557185042761 +876 30 negative_sampler.num_negs_per_pos 58.0 +876 30 training.batch_size 1.0 +876 31 model.embedding_dim 2.0 +876 31 model.scoring_fct_norm 2.0 +876 31 regularizer.weight 0.08635338897872592 +876 31 optimizer.lr 0.001289534023513436 +876 31 negative_sampler.num_negs_per_pos 88.0 +876 31 training.batch_size 2.0 +876 32 model.embedding_dim 1.0 +876 32 model.scoring_fct_norm 2.0 +876 32 regularizer.weight 0.04544984480971057 +876 32 optimizer.lr 0.0035027551228581024 +876 32 negative_sampler.num_negs_per_pos 25.0 +876 32 training.batch_size 2.0 +876 33 model.embedding_dim 2.0 +876 33 model.scoring_fct_norm 1.0 +876 33 regularizer.weight 0.11738146603947418 +876 33 optimizer.lr 0.004601568044482285 +876 33 negative_sampler.num_negs_per_pos 10.0 +876 33 training.batch_size 2.0 +876 34 model.embedding_dim 1.0 +876 34 model.scoring_fct_norm 1.0 +876 34 regularizer.weight 0.025245787267377064 +876 34 optimizer.lr 0.00969951714250023 +876 34 negative_sampler.num_negs_per_pos 58.0 +876 34 training.batch_size 1.0 +876 35 model.embedding_dim 1.0 +876 35 model.scoring_fct_norm 1.0 +876 35 regularizer.weight 0.05354921948954719 +876 35 optimizer.lr 0.023723608287656236 +876 35 negative_sampler.num_negs_per_pos 40.0 +876 35 training.batch_size 1.0 +876 36 model.embedding_dim 1.0 +876 36 model.scoring_fct_norm 2.0 +876 36 regularizer.weight 0.016277143716861113 +876 36 optimizer.lr 0.0426887963463481 +876 36 negative_sampler.num_negs_per_pos 19.0 +876 36 training.batch_size 0.0 +876 37 model.embedding_dim 1.0 +876 37 model.scoring_fct_norm 2.0 +876 37 regularizer.weight 0.2535341611790433 +876 37 optimizer.lr 0.003903858277579731 +876 37 negative_sampler.num_negs_per_pos 94.0 +876 37 training.batch_size 2.0 +876 38 model.embedding_dim 0.0 +876 38 model.scoring_fct_norm 1.0 +876 38 regularizer.weight 0.050567046763679346 +876 38 optimizer.lr 0.020180531312956263 +876 38 negative_sampler.num_negs_per_pos 74.0 +876 38 training.batch_size 0.0 +876 39 model.embedding_dim 0.0 +876 39 model.scoring_fct_norm 1.0 +876 39 regularizer.weight 0.057506478257681175 +876 39 optimizer.lr 0.011965268551419717 +876 39 negative_sampler.num_negs_per_pos 36.0 +876 39 training.batch_size 2.0 +876 40 model.embedding_dim 2.0 +876 40 model.scoring_fct_norm 2.0 +876 40 regularizer.weight 0.03383064748115078 +876 40 optimizer.lr 0.022950646965200237 +876 40 negative_sampler.num_negs_per_pos 11.0 +876 40 training.batch_size 0.0 +876 41 model.embedding_dim 2.0 +876 41 model.scoring_fct_norm 1.0 +876 41 regularizer.weight 0.02784544384170822 +876 41 optimizer.lr 0.006713819678083076 +876 41 negative_sampler.num_negs_per_pos 50.0 +876 41 training.batch_size 1.0 +876 42 model.embedding_dim 2.0 +876 42 model.scoring_fct_norm 1.0 +876 42 regularizer.weight 0.01901653758431615 +876 42 optimizer.lr 0.017055334727084055 +876 42 negative_sampler.num_negs_per_pos 13.0 +876 42 training.batch_size 0.0 +876 43 model.embedding_dim 1.0 +876 43 model.scoring_fct_norm 1.0 +876 43 regularizer.weight 0.02015857820091796 +876 43 optimizer.lr 0.00895906474950095 +876 43 negative_sampler.num_negs_per_pos 76.0 +876 43 training.batch_size 2.0 +876 44 model.embedding_dim 1.0 +876 44 model.scoring_fct_norm 1.0 +876 44 regularizer.weight 0.23417608450687896 +876 44 optimizer.lr 0.0012720192002798935 +876 44 negative_sampler.num_negs_per_pos 78.0 +876 44 training.batch_size 1.0 +876 45 model.embedding_dim 0.0 +876 45 model.scoring_fct_norm 2.0 +876 45 regularizer.weight 0.0788773348303645 +876 45 optimizer.lr 0.0058263000867649255 +876 45 negative_sampler.num_negs_per_pos 58.0 +876 45 training.batch_size 0.0 +876 46 model.embedding_dim 1.0 +876 46 model.scoring_fct_norm 1.0 +876 46 regularizer.weight 0.011436636165230007 +876 46 optimizer.lr 0.001081705557595416 +876 46 negative_sampler.num_negs_per_pos 27.0 +876 46 training.batch_size 2.0 +876 47 model.embedding_dim 2.0 +876 47 model.scoring_fct_norm 2.0 +876 47 regularizer.weight 0.08673430782480333 +876 47 optimizer.lr 0.042010357439564314 +876 47 negative_sampler.num_negs_per_pos 65.0 +876 47 training.batch_size 1.0 +876 48 model.embedding_dim 1.0 +876 48 model.scoring_fct_norm 1.0 +876 48 regularizer.weight 0.01114946781674348 +876 48 optimizer.lr 0.017908796101898 +876 48 negative_sampler.num_negs_per_pos 63.0 +876 48 training.batch_size 0.0 +876 49 model.embedding_dim 0.0 +876 49 model.scoring_fct_norm 2.0 +876 49 regularizer.weight 0.023193728712567523 +876 49 optimizer.lr 0.007113400102852351 +876 49 negative_sampler.num_negs_per_pos 87.0 +876 49 training.batch_size 2.0 +876 50 model.embedding_dim 2.0 +876 50 model.scoring_fct_norm 2.0 +876 50 regularizer.weight 0.022902148596904405 +876 50 optimizer.lr 0.041092940077487136 +876 50 negative_sampler.num_negs_per_pos 6.0 +876 50 training.batch_size 0.0 +876 51 model.embedding_dim 2.0 +876 51 model.scoring_fct_norm 1.0 +876 51 regularizer.weight 0.299883592006735 +876 51 optimizer.lr 0.0014419499842446345 +876 51 negative_sampler.num_negs_per_pos 61.0 +876 51 training.batch_size 1.0 +876 52 model.embedding_dim 1.0 +876 52 model.scoring_fct_norm 2.0 +876 52 regularizer.weight 0.08456463944115061 +876 52 optimizer.lr 0.015078116768636872 +876 52 negative_sampler.num_negs_per_pos 12.0 +876 52 training.batch_size 2.0 +876 53 model.embedding_dim 0.0 +876 53 model.scoring_fct_norm 1.0 +876 53 regularizer.weight 0.01571110669124819 +876 53 optimizer.lr 0.09942948468211599 +876 53 negative_sampler.num_negs_per_pos 62.0 +876 53 training.batch_size 0.0 +876 54 model.embedding_dim 0.0 +876 54 model.scoring_fct_norm 1.0 +876 54 regularizer.weight 0.03867638252268512 +876 54 optimizer.lr 0.07595241536856524 +876 54 negative_sampler.num_negs_per_pos 60.0 +876 54 training.batch_size 1.0 +876 55 model.embedding_dim 2.0 +876 55 model.scoring_fct_norm 1.0 +876 55 regularizer.weight 0.09108015355406719 +876 55 optimizer.lr 0.01137299147136055 +876 55 negative_sampler.num_negs_per_pos 50.0 +876 55 training.batch_size 1.0 +876 56 model.embedding_dim 1.0 +876 56 model.scoring_fct_norm 1.0 +876 56 regularizer.weight 0.014862364161760276 +876 56 optimizer.lr 0.009010431864398502 +876 56 negative_sampler.num_negs_per_pos 83.0 +876 56 training.batch_size 2.0 +876 57 model.embedding_dim 1.0 +876 57 model.scoring_fct_norm 1.0 +876 57 regularizer.weight 0.28852696444137044 +876 57 optimizer.lr 0.03751256596664326 +876 57 negative_sampler.num_negs_per_pos 73.0 +876 57 training.batch_size 2.0 +876 58 model.embedding_dim 1.0 +876 58 model.scoring_fct_norm 2.0 +876 58 regularizer.weight 0.12105579179369746 +876 58 optimizer.lr 0.018614680083778395 +876 58 negative_sampler.num_negs_per_pos 14.0 +876 58 training.batch_size 1.0 +876 59 model.embedding_dim 0.0 +876 59 model.scoring_fct_norm 1.0 +876 59 regularizer.weight 0.011539167302331158 +876 59 optimizer.lr 0.004927990827654355 +876 59 negative_sampler.num_negs_per_pos 19.0 +876 59 training.batch_size 1.0 +876 60 model.embedding_dim 0.0 +876 60 model.scoring_fct_norm 1.0 +876 60 regularizer.weight 0.1684461982431321 +876 60 optimizer.lr 0.0022302088140015194 +876 60 negative_sampler.num_negs_per_pos 95.0 +876 60 training.batch_size 2.0 +876 61 model.embedding_dim 0.0 +876 61 model.scoring_fct_norm 2.0 +876 61 regularizer.weight 0.01954562627721831 +876 61 optimizer.lr 0.034136959640610344 +876 61 negative_sampler.num_negs_per_pos 73.0 +876 61 training.batch_size 0.0 +876 62 model.embedding_dim 0.0 +876 62 model.scoring_fct_norm 2.0 +876 62 regularizer.weight 0.013155824781405595 +876 62 optimizer.lr 0.0876350827828059 +876 62 negative_sampler.num_negs_per_pos 79.0 +876 62 training.batch_size 0.0 +876 63 model.embedding_dim 2.0 +876 63 model.scoring_fct_norm 1.0 +876 63 regularizer.weight 0.11954507633001893 +876 63 optimizer.lr 0.0011559489286579697 +876 63 negative_sampler.num_negs_per_pos 50.0 +876 63 training.batch_size 1.0 +876 64 model.embedding_dim 2.0 +876 64 model.scoring_fct_norm 1.0 +876 64 regularizer.weight 0.12752932528408736 +876 64 optimizer.lr 0.0015638113284794756 +876 64 negative_sampler.num_negs_per_pos 14.0 +876 64 training.batch_size 2.0 +876 65 model.embedding_dim 2.0 +876 65 model.scoring_fct_norm 1.0 +876 65 regularizer.weight 0.054653945022767506 +876 65 optimizer.lr 0.002415401234484744 +876 65 negative_sampler.num_negs_per_pos 95.0 +876 65 training.batch_size 0.0 +876 66 model.embedding_dim 0.0 +876 66 model.scoring_fct_norm 2.0 +876 66 regularizer.weight 0.08518875613099648 +876 66 optimizer.lr 0.006126110279699844 +876 66 negative_sampler.num_negs_per_pos 12.0 +876 66 training.batch_size 1.0 +876 67 model.embedding_dim 2.0 +876 67 model.scoring_fct_norm 2.0 +876 67 regularizer.weight 0.13144028139001743 +876 67 optimizer.lr 0.003496212668962834 +876 67 negative_sampler.num_negs_per_pos 58.0 +876 67 training.batch_size 1.0 +876 68 model.embedding_dim 2.0 +876 68 model.scoring_fct_norm 1.0 +876 68 regularizer.weight 0.028908739987267 +876 68 optimizer.lr 0.006529753167548479 +876 68 negative_sampler.num_negs_per_pos 90.0 +876 68 training.batch_size 0.0 +876 69 model.embedding_dim 0.0 +876 69 model.scoring_fct_norm 2.0 +876 69 regularizer.weight 0.14421573099093343 +876 69 optimizer.lr 0.0010207513267361094 +876 69 negative_sampler.num_negs_per_pos 73.0 +876 69 training.batch_size 2.0 +876 70 model.embedding_dim 2.0 +876 70 model.scoring_fct_norm 2.0 +876 70 regularizer.weight 0.059399132015280444 +876 70 optimizer.lr 0.001815006644923871 +876 70 negative_sampler.num_negs_per_pos 16.0 +876 70 training.batch_size 1.0 +876 71 model.embedding_dim 2.0 +876 71 model.scoring_fct_norm 2.0 +876 71 regularizer.weight 0.01796801892310308 +876 71 optimizer.lr 0.04452442534893148 +876 71 negative_sampler.num_negs_per_pos 27.0 +876 71 training.batch_size 0.0 +876 72 model.embedding_dim 2.0 +876 72 model.scoring_fct_norm 1.0 +876 72 regularizer.weight 0.011750461030747754 +876 72 optimizer.lr 0.008257177519230954 +876 72 negative_sampler.num_negs_per_pos 65.0 +876 72 training.batch_size 2.0 +876 73 model.embedding_dim 0.0 +876 73 model.scoring_fct_norm 1.0 +876 73 regularizer.weight 0.02030659594550752 +876 73 optimizer.lr 0.046123223949667544 +876 73 negative_sampler.num_negs_per_pos 67.0 +876 73 training.batch_size 1.0 +876 74 model.embedding_dim 2.0 +876 74 model.scoring_fct_norm 1.0 +876 74 regularizer.weight 0.021826806927827215 +876 74 optimizer.lr 0.018303292181373592 +876 74 negative_sampler.num_negs_per_pos 84.0 +876 74 training.batch_size 2.0 +876 75 model.embedding_dim 1.0 +876 75 model.scoring_fct_norm 1.0 +876 75 regularizer.weight 0.1009415326738064 +876 75 optimizer.lr 0.008050721048490717 +876 75 negative_sampler.num_negs_per_pos 5.0 +876 75 training.batch_size 1.0 +876 76 model.embedding_dim 0.0 +876 76 model.scoring_fct_norm 2.0 +876 76 regularizer.weight 0.017404193974624504 +876 76 optimizer.lr 0.03234269421869209 +876 76 negative_sampler.num_negs_per_pos 95.0 +876 76 training.batch_size 0.0 +876 77 model.embedding_dim 2.0 +876 77 model.scoring_fct_norm 1.0 +876 77 regularizer.weight 0.12767580754510421 +876 77 optimizer.lr 0.005132478099357371 +876 77 negative_sampler.num_negs_per_pos 96.0 +876 77 training.batch_size 0.0 +876 78 model.embedding_dim 0.0 +876 78 model.scoring_fct_norm 2.0 +876 78 regularizer.weight 0.041345077905613425 +876 78 optimizer.lr 0.05502353197286819 +876 78 negative_sampler.num_negs_per_pos 13.0 +876 78 training.batch_size 1.0 +876 79 model.embedding_dim 2.0 +876 79 model.scoring_fct_norm 2.0 +876 79 regularizer.weight 0.048962854684756245 +876 79 optimizer.lr 0.05419152036231665 +876 79 negative_sampler.num_negs_per_pos 58.0 +876 79 training.batch_size 1.0 +876 80 model.embedding_dim 1.0 +876 80 model.scoring_fct_norm 2.0 +876 80 regularizer.weight 0.15887599117845486 +876 80 optimizer.lr 0.053457518476773734 +876 80 negative_sampler.num_negs_per_pos 88.0 +876 80 training.batch_size 2.0 +876 81 model.embedding_dim 0.0 +876 81 model.scoring_fct_norm 1.0 +876 81 regularizer.weight 0.16740347214407234 +876 81 optimizer.lr 0.07708359016681053 +876 81 negative_sampler.num_negs_per_pos 69.0 +876 81 training.batch_size 2.0 +876 82 model.embedding_dim 2.0 +876 82 model.scoring_fct_norm 2.0 +876 82 regularizer.weight 0.05349789482347047 +876 82 optimizer.lr 0.018163259414426293 +876 82 negative_sampler.num_negs_per_pos 18.0 +876 82 training.batch_size 1.0 +876 83 model.embedding_dim 0.0 +876 83 model.scoring_fct_norm 2.0 +876 83 regularizer.weight 0.1998165209615335 +876 83 optimizer.lr 0.0025568010209936405 +876 83 negative_sampler.num_negs_per_pos 63.0 +876 83 training.batch_size 1.0 +876 84 model.embedding_dim 0.0 +876 84 model.scoring_fct_norm 1.0 +876 84 regularizer.weight 0.25227555538909885 +876 84 optimizer.lr 0.018783109191461297 +876 84 negative_sampler.num_negs_per_pos 75.0 +876 84 training.batch_size 2.0 +876 85 model.embedding_dim 0.0 +876 85 model.scoring_fct_norm 1.0 +876 85 regularizer.weight 0.0898162419230749 +876 85 optimizer.lr 0.020321284425631775 +876 85 negative_sampler.num_negs_per_pos 36.0 +876 85 training.batch_size 0.0 +876 86 model.embedding_dim 2.0 +876 86 model.scoring_fct_norm 1.0 +876 86 regularizer.weight 0.1059876455592182 +876 86 optimizer.lr 0.06235456498409773 +876 86 negative_sampler.num_negs_per_pos 96.0 +876 86 training.batch_size 0.0 +876 87 model.embedding_dim 1.0 +876 87 model.scoring_fct_norm 2.0 +876 87 regularizer.weight 0.15153694013995392 +876 87 optimizer.lr 0.001267120613506394 +876 87 negative_sampler.num_negs_per_pos 51.0 +876 87 training.batch_size 2.0 +876 88 model.embedding_dim 2.0 +876 88 model.scoring_fct_norm 2.0 +876 88 regularizer.weight 0.10823496783113896 +876 88 optimizer.lr 0.07223819245438444 +876 88 negative_sampler.num_negs_per_pos 42.0 +876 88 training.batch_size 2.0 +876 89 model.embedding_dim 0.0 +876 89 model.scoring_fct_norm 2.0 +876 89 regularizer.weight 0.09546573021490551 +876 89 optimizer.lr 0.007401874193740502 +876 89 negative_sampler.num_negs_per_pos 12.0 +876 89 training.batch_size 2.0 +876 90 model.embedding_dim 2.0 +876 90 model.scoring_fct_norm 2.0 +876 90 regularizer.weight 0.01299915074047827 +876 90 optimizer.lr 0.016212721878437024 +876 90 negative_sampler.num_negs_per_pos 26.0 +876 90 training.batch_size 2.0 +876 91 model.embedding_dim 1.0 +876 91 model.scoring_fct_norm 2.0 +876 91 regularizer.weight 0.14335855360674674 +876 91 optimizer.lr 0.04732866208950675 +876 91 negative_sampler.num_negs_per_pos 42.0 +876 91 training.batch_size 0.0 +876 92 model.embedding_dim 0.0 +876 92 model.scoring_fct_norm 2.0 +876 92 regularizer.weight 0.020866403972334652 +876 92 optimizer.lr 0.0023588270899140896 +876 92 negative_sampler.num_negs_per_pos 32.0 +876 92 training.batch_size 0.0 +876 93 model.embedding_dim 0.0 +876 93 model.scoring_fct_norm 2.0 +876 93 regularizer.weight 0.02355883386275341 +876 93 optimizer.lr 0.0031733294615340357 +876 93 negative_sampler.num_negs_per_pos 40.0 +876 93 training.batch_size 1.0 +876 94 model.embedding_dim 0.0 +876 94 model.scoring_fct_norm 2.0 +876 94 regularizer.weight 0.05827197604398654 +876 94 optimizer.lr 0.025894622996807018 +876 94 negative_sampler.num_negs_per_pos 31.0 +876 94 training.batch_size 2.0 +876 95 model.embedding_dim 1.0 +876 95 model.scoring_fct_norm 1.0 +876 95 regularizer.weight 0.011073844739798394 +876 95 optimizer.lr 0.08036144414752316 +876 95 negative_sampler.num_negs_per_pos 29.0 +876 95 training.batch_size 2.0 +876 96 model.embedding_dim 1.0 +876 96 model.scoring_fct_norm 1.0 +876 96 regularizer.weight 0.02155513665506762 +876 96 optimizer.lr 0.010085561783996517 +876 96 negative_sampler.num_negs_per_pos 94.0 +876 96 training.batch_size 1.0 +876 97 model.embedding_dim 0.0 +876 97 model.scoring_fct_norm 1.0 +876 97 regularizer.weight 0.0517533694064891 +876 97 optimizer.lr 0.018526574555778823 +876 97 negative_sampler.num_negs_per_pos 90.0 +876 97 training.batch_size 0.0 +876 98 model.embedding_dim 2.0 +876 98 model.scoring_fct_norm 1.0 +876 98 regularizer.weight 0.2366638981598233 +876 98 optimizer.lr 0.021941594349086033 +876 98 negative_sampler.num_negs_per_pos 22.0 +876 98 training.batch_size 2.0 +876 99 model.embedding_dim 2.0 +876 99 model.scoring_fct_norm 1.0 +876 99 regularizer.weight 0.055641034277258944 +876 99 optimizer.lr 0.008629510231338226 +876 99 negative_sampler.num_negs_per_pos 3.0 +876 99 training.batch_size 2.0 +876 100 model.embedding_dim 1.0 +876 100 model.scoring_fct_norm 2.0 +876 100 regularizer.weight 0.08803281771934446 +876 100 optimizer.lr 0.0015715568383701708 +876 100 negative_sampler.num_negs_per_pos 76.0 +876 100 training.batch_size 0.0 +876 1 dataset """kinships""" +876 1 model """transh""" +876 1 loss """softplus""" +876 1 regularizer """transh""" +876 1 optimizer """adam""" +876 1 training_loop """owa""" +876 1 negative_sampler """basic""" +876 1 evaluator """rankbased""" +876 2 dataset """kinships""" +876 2 model """transh""" +876 2 loss """softplus""" +876 2 regularizer """transh""" +876 2 optimizer """adam""" +876 2 training_loop """owa""" +876 2 negative_sampler """basic""" +876 2 evaluator """rankbased""" +876 3 dataset """kinships""" +876 3 model """transh""" +876 3 loss """softplus""" +876 3 regularizer """transh""" +876 3 optimizer """adam""" +876 3 training_loop """owa""" +876 3 negative_sampler """basic""" +876 3 evaluator """rankbased""" +876 4 dataset """kinships""" +876 4 model """transh""" +876 4 loss """softplus""" +876 4 regularizer """transh""" +876 4 optimizer """adam""" +876 4 training_loop """owa""" +876 4 negative_sampler """basic""" +876 4 evaluator """rankbased""" +876 5 dataset """kinships""" +876 5 model """transh""" +876 5 loss """softplus""" +876 5 regularizer """transh""" +876 5 optimizer """adam""" +876 5 training_loop """owa""" +876 5 negative_sampler """basic""" +876 5 evaluator """rankbased""" +876 6 dataset """kinships""" +876 6 model """transh""" +876 6 loss """softplus""" +876 6 regularizer """transh""" +876 6 optimizer """adam""" +876 6 training_loop """owa""" +876 6 negative_sampler """basic""" +876 6 evaluator """rankbased""" +876 7 dataset """kinships""" +876 7 model """transh""" +876 7 loss """softplus""" +876 7 regularizer """transh""" +876 7 optimizer """adam""" +876 7 training_loop """owa""" +876 7 negative_sampler """basic""" +876 7 evaluator """rankbased""" +876 8 dataset """kinships""" +876 8 model """transh""" +876 8 loss """softplus""" +876 8 regularizer """transh""" +876 8 optimizer """adam""" +876 8 training_loop """owa""" +876 8 negative_sampler """basic""" +876 8 evaluator """rankbased""" +876 9 dataset """kinships""" +876 9 model """transh""" +876 9 loss """softplus""" +876 9 regularizer """transh""" +876 9 optimizer """adam""" +876 9 training_loop """owa""" +876 9 negative_sampler """basic""" +876 9 evaluator """rankbased""" +876 10 dataset """kinships""" +876 10 model """transh""" +876 10 loss """softplus""" +876 10 regularizer """transh""" +876 10 optimizer """adam""" +876 10 training_loop """owa""" +876 10 negative_sampler """basic""" +876 10 evaluator """rankbased""" +876 11 dataset """kinships""" +876 11 model """transh""" +876 11 loss """softplus""" +876 11 regularizer """transh""" +876 11 optimizer """adam""" +876 11 training_loop """owa""" +876 11 negative_sampler """basic""" +876 11 evaluator """rankbased""" +876 12 dataset """kinships""" +876 12 model """transh""" +876 12 loss """softplus""" +876 12 regularizer """transh""" +876 12 optimizer """adam""" +876 12 training_loop """owa""" +876 12 negative_sampler """basic""" +876 12 evaluator """rankbased""" +876 13 dataset """kinships""" +876 13 model """transh""" +876 13 loss """softplus""" +876 13 regularizer """transh""" +876 13 optimizer """adam""" +876 13 training_loop """owa""" +876 13 negative_sampler """basic""" +876 13 evaluator """rankbased""" +876 14 dataset """kinships""" +876 14 model """transh""" +876 14 loss """softplus""" +876 14 regularizer """transh""" +876 14 optimizer """adam""" +876 14 training_loop """owa""" +876 14 negative_sampler """basic""" +876 14 evaluator """rankbased""" +876 15 dataset """kinships""" +876 15 model """transh""" +876 15 loss """softplus""" +876 15 regularizer """transh""" +876 15 optimizer """adam""" +876 15 training_loop """owa""" +876 15 negative_sampler """basic""" +876 15 evaluator """rankbased""" +876 16 dataset """kinships""" +876 16 model """transh""" +876 16 loss """softplus""" +876 16 regularizer """transh""" +876 16 optimizer """adam""" +876 16 training_loop """owa""" +876 16 negative_sampler """basic""" +876 16 evaluator """rankbased""" +876 17 dataset """kinships""" +876 17 model """transh""" +876 17 loss """softplus""" +876 17 regularizer """transh""" +876 17 optimizer """adam""" +876 17 training_loop """owa""" +876 17 negative_sampler """basic""" +876 17 evaluator """rankbased""" +876 18 dataset """kinships""" +876 18 model """transh""" +876 18 loss """softplus""" +876 18 regularizer """transh""" +876 18 optimizer """adam""" +876 18 training_loop """owa""" +876 18 negative_sampler """basic""" +876 18 evaluator """rankbased""" +876 19 dataset """kinships""" +876 19 model """transh""" +876 19 loss """softplus""" +876 19 regularizer """transh""" +876 19 optimizer """adam""" +876 19 training_loop """owa""" +876 19 negative_sampler """basic""" +876 19 evaluator """rankbased""" +876 20 dataset """kinships""" +876 20 model """transh""" +876 20 loss """softplus""" +876 20 regularizer """transh""" +876 20 optimizer """adam""" +876 20 training_loop """owa""" +876 20 negative_sampler """basic""" +876 20 evaluator """rankbased""" +876 21 dataset """kinships""" +876 21 model """transh""" +876 21 loss """softplus""" +876 21 regularizer """transh""" +876 21 optimizer """adam""" +876 21 training_loop """owa""" +876 21 negative_sampler """basic""" +876 21 evaluator """rankbased""" +876 22 dataset """kinships""" +876 22 model """transh""" +876 22 loss """softplus""" +876 22 regularizer """transh""" +876 22 optimizer """adam""" +876 22 training_loop """owa""" +876 22 negative_sampler """basic""" +876 22 evaluator """rankbased""" +876 23 dataset """kinships""" +876 23 model """transh""" +876 23 loss """softplus""" +876 23 regularizer """transh""" +876 23 optimizer """adam""" +876 23 training_loop """owa""" +876 23 negative_sampler """basic""" +876 23 evaluator """rankbased""" +876 24 dataset """kinships""" +876 24 model """transh""" +876 24 loss """softplus""" +876 24 regularizer """transh""" +876 24 optimizer """adam""" +876 24 training_loop """owa""" +876 24 negative_sampler """basic""" +876 24 evaluator """rankbased""" +876 25 dataset """kinships""" +876 25 model """transh""" +876 25 loss """softplus""" +876 25 regularizer """transh""" +876 25 optimizer """adam""" +876 25 training_loop """owa""" +876 25 negative_sampler """basic""" +876 25 evaluator """rankbased""" +876 26 dataset """kinships""" +876 26 model """transh""" +876 26 loss """softplus""" +876 26 regularizer """transh""" +876 26 optimizer """adam""" +876 26 training_loop """owa""" +876 26 negative_sampler """basic""" +876 26 evaluator """rankbased""" +876 27 dataset """kinships""" +876 27 model """transh""" +876 27 loss """softplus""" +876 27 regularizer """transh""" +876 27 optimizer """adam""" +876 27 training_loop """owa""" +876 27 negative_sampler """basic""" +876 27 evaluator """rankbased""" +876 28 dataset """kinships""" +876 28 model """transh""" +876 28 loss """softplus""" +876 28 regularizer """transh""" +876 28 optimizer """adam""" +876 28 training_loop """owa""" +876 28 negative_sampler """basic""" +876 28 evaluator """rankbased""" +876 29 dataset """kinships""" +876 29 model """transh""" +876 29 loss """softplus""" +876 29 regularizer """transh""" +876 29 optimizer """adam""" +876 29 training_loop """owa""" +876 29 negative_sampler """basic""" +876 29 evaluator """rankbased""" +876 30 dataset """kinships""" +876 30 model """transh""" +876 30 loss """softplus""" +876 30 regularizer """transh""" +876 30 optimizer """adam""" +876 30 training_loop """owa""" +876 30 negative_sampler """basic""" +876 30 evaluator """rankbased""" +876 31 dataset """kinships""" +876 31 model """transh""" +876 31 loss """softplus""" +876 31 regularizer """transh""" +876 31 optimizer """adam""" +876 31 training_loop """owa""" +876 31 negative_sampler """basic""" +876 31 evaluator """rankbased""" +876 32 dataset """kinships""" +876 32 model """transh""" +876 32 loss """softplus""" +876 32 regularizer """transh""" +876 32 optimizer """adam""" +876 32 training_loop """owa""" +876 32 negative_sampler """basic""" +876 32 evaluator """rankbased""" +876 33 dataset """kinships""" +876 33 model """transh""" +876 33 loss """softplus""" +876 33 regularizer """transh""" +876 33 optimizer """adam""" +876 33 training_loop """owa""" +876 33 negative_sampler """basic""" +876 33 evaluator """rankbased""" +876 34 dataset """kinships""" +876 34 model """transh""" +876 34 loss """softplus""" +876 34 regularizer """transh""" +876 34 optimizer """adam""" +876 34 training_loop """owa""" +876 34 negative_sampler """basic""" +876 34 evaluator """rankbased""" +876 35 dataset """kinships""" +876 35 model """transh""" +876 35 loss """softplus""" +876 35 regularizer """transh""" +876 35 optimizer """adam""" +876 35 training_loop """owa""" +876 35 negative_sampler """basic""" +876 35 evaluator """rankbased""" +876 36 dataset """kinships""" +876 36 model """transh""" +876 36 loss """softplus""" +876 36 regularizer """transh""" +876 36 optimizer """adam""" +876 36 training_loop """owa""" +876 36 negative_sampler """basic""" +876 36 evaluator """rankbased""" +876 37 dataset """kinships""" +876 37 model """transh""" +876 37 loss """softplus""" +876 37 regularizer """transh""" +876 37 optimizer """adam""" +876 37 training_loop """owa""" +876 37 negative_sampler """basic""" +876 37 evaluator """rankbased""" +876 38 dataset """kinships""" +876 38 model """transh""" +876 38 loss """softplus""" +876 38 regularizer """transh""" +876 38 optimizer """adam""" +876 38 training_loop """owa""" +876 38 negative_sampler """basic""" +876 38 evaluator """rankbased""" +876 39 dataset """kinships""" +876 39 model """transh""" +876 39 loss """softplus""" +876 39 regularizer """transh""" +876 39 optimizer """adam""" +876 39 training_loop """owa""" +876 39 negative_sampler """basic""" +876 39 evaluator """rankbased""" +876 40 dataset """kinships""" +876 40 model """transh""" +876 40 loss """softplus""" +876 40 regularizer """transh""" +876 40 optimizer """adam""" +876 40 training_loop """owa""" +876 40 negative_sampler """basic""" +876 40 evaluator """rankbased""" +876 41 dataset """kinships""" +876 41 model """transh""" +876 41 loss """softplus""" +876 41 regularizer """transh""" +876 41 optimizer """adam""" +876 41 training_loop """owa""" +876 41 negative_sampler """basic""" +876 41 evaluator """rankbased""" +876 42 dataset """kinships""" +876 42 model """transh""" +876 42 loss """softplus""" +876 42 regularizer """transh""" +876 42 optimizer """adam""" +876 42 training_loop """owa""" +876 42 negative_sampler """basic""" +876 42 evaluator """rankbased""" +876 43 dataset """kinships""" +876 43 model """transh""" +876 43 loss """softplus""" +876 43 regularizer """transh""" +876 43 optimizer """adam""" +876 43 training_loop """owa""" +876 43 negative_sampler """basic""" +876 43 evaluator """rankbased""" +876 44 dataset """kinships""" +876 44 model """transh""" +876 44 loss """softplus""" +876 44 regularizer """transh""" +876 44 optimizer """adam""" +876 44 training_loop """owa""" +876 44 negative_sampler """basic""" +876 44 evaluator """rankbased""" +876 45 dataset """kinships""" +876 45 model """transh""" +876 45 loss """softplus""" +876 45 regularizer """transh""" +876 45 optimizer """adam""" +876 45 training_loop """owa""" +876 45 negative_sampler """basic""" +876 45 evaluator """rankbased""" +876 46 dataset """kinships""" +876 46 model """transh""" +876 46 loss """softplus""" +876 46 regularizer """transh""" +876 46 optimizer """adam""" +876 46 training_loop """owa""" +876 46 negative_sampler """basic""" +876 46 evaluator """rankbased""" +876 47 dataset """kinships""" +876 47 model """transh""" +876 47 loss """softplus""" +876 47 regularizer """transh""" +876 47 optimizer """adam""" +876 47 training_loop """owa""" +876 47 negative_sampler """basic""" +876 47 evaluator """rankbased""" +876 48 dataset """kinships""" +876 48 model """transh""" +876 48 loss """softplus""" +876 48 regularizer """transh""" +876 48 optimizer """adam""" +876 48 training_loop """owa""" +876 48 negative_sampler """basic""" +876 48 evaluator """rankbased""" +876 49 dataset """kinships""" +876 49 model """transh""" +876 49 loss """softplus""" +876 49 regularizer """transh""" +876 49 optimizer """adam""" +876 49 training_loop """owa""" +876 49 negative_sampler """basic""" +876 49 evaluator """rankbased""" +876 50 dataset """kinships""" +876 50 model """transh""" +876 50 loss """softplus""" +876 50 regularizer """transh""" +876 50 optimizer """adam""" +876 50 training_loop """owa""" +876 50 negative_sampler """basic""" +876 50 evaluator """rankbased""" +876 51 dataset """kinships""" +876 51 model """transh""" +876 51 loss """softplus""" +876 51 regularizer """transh""" +876 51 optimizer """adam""" +876 51 training_loop """owa""" +876 51 negative_sampler """basic""" +876 51 evaluator """rankbased""" +876 52 dataset """kinships""" +876 52 model """transh""" +876 52 loss """softplus""" +876 52 regularizer """transh""" +876 52 optimizer """adam""" +876 52 training_loop """owa""" +876 52 negative_sampler """basic""" +876 52 evaluator """rankbased""" +876 53 dataset """kinships""" +876 53 model """transh""" +876 53 loss """softplus""" +876 53 regularizer """transh""" +876 53 optimizer """adam""" +876 53 training_loop """owa""" +876 53 negative_sampler """basic""" +876 53 evaluator """rankbased""" +876 54 dataset """kinships""" +876 54 model """transh""" +876 54 loss """softplus""" +876 54 regularizer """transh""" +876 54 optimizer """adam""" +876 54 training_loop """owa""" +876 54 negative_sampler """basic""" +876 54 evaluator """rankbased""" +876 55 dataset """kinships""" +876 55 model """transh""" +876 55 loss """softplus""" +876 55 regularizer """transh""" +876 55 optimizer """adam""" +876 55 training_loop """owa""" +876 55 negative_sampler """basic""" +876 55 evaluator """rankbased""" +876 56 dataset """kinships""" +876 56 model """transh""" +876 56 loss """softplus""" +876 56 regularizer """transh""" +876 56 optimizer """adam""" +876 56 training_loop """owa""" +876 56 negative_sampler """basic""" +876 56 evaluator """rankbased""" +876 57 dataset """kinships""" +876 57 model """transh""" +876 57 loss """softplus""" +876 57 regularizer """transh""" +876 57 optimizer """adam""" +876 57 training_loop """owa""" +876 57 negative_sampler """basic""" +876 57 evaluator """rankbased""" +876 58 dataset """kinships""" +876 58 model """transh""" +876 58 loss """softplus""" +876 58 regularizer """transh""" +876 58 optimizer """adam""" +876 58 training_loop """owa""" +876 58 negative_sampler """basic""" +876 58 evaluator """rankbased""" +876 59 dataset """kinships""" +876 59 model """transh""" +876 59 loss """softplus""" +876 59 regularizer """transh""" +876 59 optimizer """adam""" +876 59 training_loop """owa""" +876 59 negative_sampler """basic""" +876 59 evaluator """rankbased""" +876 60 dataset """kinships""" +876 60 model """transh""" +876 60 loss """softplus""" +876 60 regularizer """transh""" +876 60 optimizer """adam""" +876 60 training_loop """owa""" +876 60 negative_sampler """basic""" +876 60 evaluator """rankbased""" +876 61 dataset """kinships""" +876 61 model """transh""" +876 61 loss """softplus""" +876 61 regularizer """transh""" +876 61 optimizer """adam""" +876 61 training_loop """owa""" +876 61 negative_sampler """basic""" +876 61 evaluator """rankbased""" +876 62 dataset """kinships""" +876 62 model """transh""" +876 62 loss """softplus""" +876 62 regularizer """transh""" +876 62 optimizer """adam""" +876 62 training_loop """owa""" +876 62 negative_sampler """basic""" +876 62 evaluator """rankbased""" +876 63 dataset """kinships""" +876 63 model """transh""" +876 63 loss """softplus""" +876 63 regularizer """transh""" +876 63 optimizer """adam""" +876 63 training_loop """owa""" +876 63 negative_sampler """basic""" +876 63 evaluator """rankbased""" +876 64 dataset """kinships""" +876 64 model """transh""" +876 64 loss """softplus""" +876 64 regularizer """transh""" +876 64 optimizer """adam""" +876 64 training_loop """owa""" +876 64 negative_sampler """basic""" +876 64 evaluator """rankbased""" +876 65 dataset """kinships""" +876 65 model """transh""" +876 65 loss """softplus""" +876 65 regularizer """transh""" +876 65 optimizer """adam""" +876 65 training_loop """owa""" +876 65 negative_sampler """basic""" +876 65 evaluator """rankbased""" +876 66 dataset """kinships""" +876 66 model """transh""" +876 66 loss """softplus""" +876 66 regularizer """transh""" +876 66 optimizer """adam""" +876 66 training_loop """owa""" +876 66 negative_sampler """basic""" +876 66 evaluator """rankbased""" +876 67 dataset """kinships""" +876 67 model """transh""" +876 67 loss """softplus""" +876 67 regularizer """transh""" +876 67 optimizer """adam""" +876 67 training_loop """owa""" +876 67 negative_sampler """basic""" +876 67 evaluator """rankbased""" +876 68 dataset """kinships""" +876 68 model """transh""" +876 68 loss """softplus""" +876 68 regularizer """transh""" +876 68 optimizer """adam""" +876 68 training_loop """owa""" +876 68 negative_sampler """basic""" +876 68 evaluator """rankbased""" +876 69 dataset """kinships""" +876 69 model """transh""" +876 69 loss """softplus""" +876 69 regularizer """transh""" +876 69 optimizer """adam""" +876 69 training_loop """owa""" +876 69 negative_sampler """basic""" +876 69 evaluator """rankbased""" +876 70 dataset """kinships""" +876 70 model """transh""" +876 70 loss """softplus""" +876 70 regularizer """transh""" +876 70 optimizer """adam""" +876 70 training_loop """owa""" +876 70 negative_sampler """basic""" +876 70 evaluator """rankbased""" +876 71 dataset """kinships""" +876 71 model """transh""" +876 71 loss """softplus""" +876 71 regularizer """transh""" +876 71 optimizer """adam""" +876 71 training_loop """owa""" +876 71 negative_sampler """basic""" +876 71 evaluator """rankbased""" +876 72 dataset """kinships""" +876 72 model """transh""" +876 72 loss """softplus""" +876 72 regularizer """transh""" +876 72 optimizer """adam""" +876 72 training_loop """owa""" +876 72 negative_sampler """basic""" +876 72 evaluator """rankbased""" +876 73 dataset """kinships""" +876 73 model """transh""" +876 73 loss """softplus""" +876 73 regularizer """transh""" +876 73 optimizer """adam""" +876 73 training_loop """owa""" +876 73 negative_sampler """basic""" +876 73 evaluator """rankbased""" +876 74 dataset """kinships""" +876 74 model """transh""" +876 74 loss """softplus""" +876 74 regularizer """transh""" +876 74 optimizer """adam""" +876 74 training_loop """owa""" +876 74 negative_sampler """basic""" +876 74 evaluator """rankbased""" +876 75 dataset """kinships""" +876 75 model """transh""" +876 75 loss """softplus""" +876 75 regularizer """transh""" +876 75 optimizer """adam""" +876 75 training_loop """owa""" +876 75 negative_sampler """basic""" +876 75 evaluator """rankbased""" +876 76 dataset """kinships""" +876 76 model """transh""" +876 76 loss """softplus""" +876 76 regularizer """transh""" +876 76 optimizer """adam""" +876 76 training_loop """owa""" +876 76 negative_sampler """basic""" +876 76 evaluator """rankbased""" +876 77 dataset """kinships""" +876 77 model """transh""" +876 77 loss """softplus""" +876 77 regularizer """transh""" +876 77 optimizer """adam""" +876 77 training_loop """owa""" +876 77 negative_sampler """basic""" +876 77 evaluator """rankbased""" +876 78 dataset """kinships""" +876 78 model """transh""" +876 78 loss """softplus""" +876 78 regularizer """transh""" +876 78 optimizer """adam""" +876 78 training_loop """owa""" +876 78 negative_sampler """basic""" +876 78 evaluator """rankbased""" +876 79 dataset """kinships""" +876 79 model """transh""" +876 79 loss """softplus""" +876 79 regularizer """transh""" +876 79 optimizer """adam""" +876 79 training_loop """owa""" +876 79 negative_sampler """basic""" +876 79 evaluator """rankbased""" +876 80 dataset """kinships""" +876 80 model """transh""" +876 80 loss """softplus""" +876 80 regularizer """transh""" +876 80 optimizer """adam""" +876 80 training_loop """owa""" +876 80 negative_sampler """basic""" +876 80 evaluator """rankbased""" +876 81 dataset """kinships""" +876 81 model """transh""" +876 81 loss """softplus""" +876 81 regularizer """transh""" +876 81 optimizer """adam""" +876 81 training_loop """owa""" +876 81 negative_sampler """basic""" +876 81 evaluator """rankbased""" +876 82 dataset """kinships""" +876 82 model """transh""" +876 82 loss """softplus""" +876 82 regularizer """transh""" +876 82 optimizer """adam""" +876 82 training_loop """owa""" +876 82 negative_sampler """basic""" +876 82 evaluator """rankbased""" +876 83 dataset """kinships""" +876 83 model """transh""" +876 83 loss """softplus""" +876 83 regularizer """transh""" +876 83 optimizer """adam""" +876 83 training_loop """owa""" +876 83 negative_sampler """basic""" +876 83 evaluator """rankbased""" +876 84 dataset """kinships""" +876 84 model """transh""" +876 84 loss """softplus""" +876 84 regularizer """transh""" +876 84 optimizer """adam""" +876 84 training_loop """owa""" +876 84 negative_sampler """basic""" +876 84 evaluator """rankbased""" +876 85 dataset """kinships""" +876 85 model """transh""" +876 85 loss """softplus""" +876 85 regularizer """transh""" +876 85 optimizer """adam""" +876 85 training_loop """owa""" +876 85 negative_sampler """basic""" +876 85 evaluator """rankbased""" +876 86 dataset """kinships""" +876 86 model """transh""" +876 86 loss """softplus""" +876 86 regularizer """transh""" +876 86 optimizer """adam""" +876 86 training_loop """owa""" +876 86 negative_sampler """basic""" +876 86 evaluator """rankbased""" +876 87 dataset """kinships""" +876 87 model """transh""" +876 87 loss """softplus""" +876 87 regularizer """transh""" +876 87 optimizer """adam""" +876 87 training_loop """owa""" +876 87 negative_sampler """basic""" +876 87 evaluator """rankbased""" +876 88 dataset """kinships""" +876 88 model """transh""" +876 88 loss """softplus""" +876 88 regularizer """transh""" +876 88 optimizer """adam""" +876 88 training_loop """owa""" +876 88 negative_sampler """basic""" +876 88 evaluator """rankbased""" +876 89 dataset """kinships""" +876 89 model """transh""" +876 89 loss """softplus""" +876 89 regularizer """transh""" +876 89 optimizer """adam""" +876 89 training_loop """owa""" +876 89 negative_sampler """basic""" +876 89 evaluator """rankbased""" +876 90 dataset """kinships""" +876 90 model """transh""" +876 90 loss """softplus""" +876 90 regularizer """transh""" +876 90 optimizer """adam""" +876 90 training_loop """owa""" +876 90 negative_sampler """basic""" +876 90 evaluator """rankbased""" +876 91 dataset """kinships""" +876 91 model """transh""" +876 91 loss """softplus""" +876 91 regularizer """transh""" +876 91 optimizer """adam""" +876 91 training_loop """owa""" +876 91 negative_sampler """basic""" +876 91 evaluator """rankbased""" +876 92 dataset """kinships""" +876 92 model """transh""" +876 92 loss """softplus""" +876 92 regularizer """transh""" +876 92 optimizer """adam""" +876 92 training_loop """owa""" +876 92 negative_sampler """basic""" +876 92 evaluator """rankbased""" +876 93 dataset """kinships""" +876 93 model """transh""" +876 93 loss """softplus""" +876 93 regularizer """transh""" +876 93 optimizer """adam""" +876 93 training_loop """owa""" +876 93 negative_sampler """basic""" +876 93 evaluator """rankbased""" +876 94 dataset """kinships""" +876 94 model """transh""" +876 94 loss """softplus""" +876 94 regularizer """transh""" +876 94 optimizer """adam""" +876 94 training_loop """owa""" +876 94 negative_sampler """basic""" +876 94 evaluator """rankbased""" +876 95 dataset """kinships""" +876 95 model """transh""" +876 95 loss """softplus""" +876 95 regularizer """transh""" +876 95 optimizer """adam""" +876 95 training_loop """owa""" +876 95 negative_sampler """basic""" +876 95 evaluator """rankbased""" +876 96 dataset """kinships""" +876 96 model """transh""" +876 96 loss """softplus""" +876 96 regularizer """transh""" +876 96 optimizer """adam""" +876 96 training_loop """owa""" +876 96 negative_sampler """basic""" +876 96 evaluator """rankbased""" +876 97 dataset """kinships""" +876 97 model """transh""" +876 97 loss """softplus""" +876 97 regularizer """transh""" +876 97 optimizer """adam""" +876 97 training_loop """owa""" +876 97 negative_sampler """basic""" +876 97 evaluator """rankbased""" +876 98 dataset """kinships""" +876 98 model """transh""" +876 98 loss """softplus""" +876 98 regularizer """transh""" +876 98 optimizer """adam""" +876 98 training_loop """owa""" +876 98 negative_sampler """basic""" +876 98 evaluator """rankbased""" +876 99 dataset """kinships""" +876 99 model """transh""" +876 99 loss """softplus""" +876 99 regularizer """transh""" +876 99 optimizer """adam""" +876 99 training_loop """owa""" +876 99 negative_sampler """basic""" +876 99 evaluator """rankbased""" +876 100 dataset """kinships""" +876 100 model """transh""" +876 100 loss """softplus""" +876 100 regularizer """transh""" +876 100 optimizer """adam""" +876 100 training_loop """owa""" +876 100 negative_sampler """basic""" +876 100 evaluator """rankbased""" +877 1 model.embedding_dim 1.0 +877 1 model.scoring_fct_norm 2.0 +877 1 regularizer.weight 0.09792505632969935 +877 1 optimizer.lr 0.025129837224492337 +877 1 negative_sampler.num_negs_per_pos 66.0 +877 1 training.batch_size 0.0 +877 2 model.embedding_dim 2.0 +877 2 model.scoring_fct_norm 1.0 +877 2 regularizer.weight 0.18348741569263263 +877 2 optimizer.lr 0.01771867589258248 +877 2 negative_sampler.num_negs_per_pos 10.0 +877 2 training.batch_size 0.0 +877 3 model.embedding_dim 2.0 +877 3 model.scoring_fct_norm 1.0 +877 3 regularizer.weight 0.04452299672548227 +877 3 optimizer.lr 0.003649387016626492 +877 3 negative_sampler.num_negs_per_pos 18.0 +877 3 training.batch_size 0.0 +877 4 model.embedding_dim 2.0 +877 4 model.scoring_fct_norm 2.0 +877 4 regularizer.weight 0.044548820506653326 +877 4 optimizer.lr 0.008572319854798987 +877 4 negative_sampler.num_negs_per_pos 52.0 +877 4 training.batch_size 2.0 +877 5 model.embedding_dim 1.0 +877 5 model.scoring_fct_norm 1.0 +877 5 regularizer.weight 0.022459932281365336 +877 5 optimizer.lr 0.007025924660686532 +877 5 negative_sampler.num_negs_per_pos 22.0 +877 5 training.batch_size 1.0 +877 6 model.embedding_dim 0.0 +877 6 model.scoring_fct_norm 2.0 +877 6 regularizer.weight 0.014707580149021817 +877 6 optimizer.lr 0.010471314325275542 +877 6 negative_sampler.num_negs_per_pos 11.0 +877 6 training.batch_size 1.0 +877 7 model.embedding_dim 2.0 +877 7 model.scoring_fct_norm 2.0 +877 7 regularizer.weight 0.1653261786355288 +877 7 optimizer.lr 0.003969513311422467 +877 7 negative_sampler.num_negs_per_pos 62.0 +877 7 training.batch_size 2.0 +877 8 model.embedding_dim 1.0 +877 8 model.scoring_fct_norm 1.0 +877 8 regularizer.weight 0.014396536711159492 +877 8 optimizer.lr 0.03127019133906906 +877 8 negative_sampler.num_negs_per_pos 66.0 +877 8 training.batch_size 1.0 +877 9 model.embedding_dim 1.0 +877 9 model.scoring_fct_norm 2.0 +877 9 regularizer.weight 0.01224301697972053 +877 9 optimizer.lr 0.034523903290419766 +877 9 negative_sampler.num_negs_per_pos 31.0 +877 9 training.batch_size 1.0 +877 10 model.embedding_dim 0.0 +877 10 model.scoring_fct_norm 2.0 +877 10 regularizer.weight 0.09732205853724149 +877 10 optimizer.lr 0.0017083967464255234 +877 10 negative_sampler.num_negs_per_pos 79.0 +877 10 training.batch_size 0.0 +877 11 model.embedding_dim 1.0 +877 11 model.scoring_fct_norm 2.0 +877 11 regularizer.weight 0.11761966824681543 +877 11 optimizer.lr 0.007641316411588606 +877 11 negative_sampler.num_negs_per_pos 34.0 +877 11 training.batch_size 1.0 +877 12 model.embedding_dim 0.0 +877 12 model.scoring_fct_norm 1.0 +877 12 regularizer.weight 0.05206091200402174 +877 12 optimizer.lr 0.028597297939207234 +877 12 negative_sampler.num_negs_per_pos 51.0 +877 12 training.batch_size 1.0 +877 13 model.embedding_dim 0.0 +877 13 model.scoring_fct_norm 1.0 +877 13 regularizer.weight 0.055320629593701605 +877 13 optimizer.lr 0.010157369866065006 +877 13 negative_sampler.num_negs_per_pos 34.0 +877 13 training.batch_size 2.0 +877 14 model.embedding_dim 0.0 +877 14 model.scoring_fct_norm 2.0 +877 14 regularizer.weight 0.04642600503991182 +877 14 optimizer.lr 0.004007207956242819 +877 14 negative_sampler.num_negs_per_pos 39.0 +877 14 training.batch_size 2.0 +877 15 model.embedding_dim 1.0 +877 15 model.scoring_fct_norm 2.0 +877 15 regularizer.weight 0.09524028965172789 +877 15 optimizer.lr 0.0011128281787769157 +877 15 negative_sampler.num_negs_per_pos 65.0 +877 15 training.batch_size 0.0 +877 16 model.embedding_dim 1.0 +877 16 model.scoring_fct_norm 2.0 +877 16 regularizer.weight 0.1256931342658095 +877 16 optimizer.lr 0.020867474985249063 +877 16 negative_sampler.num_negs_per_pos 15.0 +877 16 training.batch_size 1.0 +877 17 model.embedding_dim 2.0 +877 17 model.scoring_fct_norm 2.0 +877 17 regularizer.weight 0.034672845319557215 +877 17 optimizer.lr 0.06790266433103656 +877 17 negative_sampler.num_negs_per_pos 69.0 +877 17 training.batch_size 1.0 +877 18 model.embedding_dim 1.0 +877 18 model.scoring_fct_norm 1.0 +877 18 regularizer.weight 0.235084442836976 +877 18 optimizer.lr 0.017500422734316318 +877 18 negative_sampler.num_negs_per_pos 34.0 +877 18 training.batch_size 1.0 +877 19 model.embedding_dim 0.0 +877 19 model.scoring_fct_norm 1.0 +877 19 regularizer.weight 0.04714664310890081 +877 19 optimizer.lr 0.006803325381992418 +877 19 negative_sampler.num_negs_per_pos 51.0 +877 19 training.batch_size 0.0 +877 20 model.embedding_dim 0.0 +877 20 model.scoring_fct_norm 1.0 +877 20 regularizer.weight 0.020993292464258972 +877 20 optimizer.lr 0.001993408598820611 +877 20 negative_sampler.num_negs_per_pos 1.0 +877 20 training.batch_size 0.0 +877 21 model.embedding_dim 0.0 +877 21 model.scoring_fct_norm 2.0 +877 21 regularizer.weight 0.10177102838691651 +877 21 optimizer.lr 0.012062252109960016 +877 21 negative_sampler.num_negs_per_pos 51.0 +877 21 training.batch_size 2.0 +877 22 model.embedding_dim 1.0 +877 22 model.scoring_fct_norm 1.0 +877 22 regularizer.weight 0.014784429147889959 +877 22 optimizer.lr 0.058989557435894624 +877 22 negative_sampler.num_negs_per_pos 9.0 +877 22 training.batch_size 0.0 +877 23 model.embedding_dim 2.0 +877 23 model.scoring_fct_norm 2.0 +877 23 regularizer.weight 0.1605362177991917 +877 23 optimizer.lr 0.04289336633846469 +877 23 negative_sampler.num_negs_per_pos 82.0 +877 23 training.batch_size 2.0 +877 24 model.embedding_dim 2.0 +877 24 model.scoring_fct_norm 2.0 +877 24 regularizer.weight 0.02953013474415467 +877 24 optimizer.lr 0.09674591723471296 +877 24 negative_sampler.num_negs_per_pos 70.0 +877 24 training.batch_size 0.0 +877 25 model.embedding_dim 2.0 +877 25 model.scoring_fct_norm 1.0 +877 25 regularizer.weight 0.11918402485382311 +877 25 optimizer.lr 0.007936798210018997 +877 25 negative_sampler.num_negs_per_pos 46.0 +877 25 training.batch_size 2.0 +877 26 model.embedding_dim 1.0 +877 26 model.scoring_fct_norm 2.0 +877 26 regularizer.weight 0.08167639566540234 +877 26 optimizer.lr 0.01963461181248964 +877 26 negative_sampler.num_negs_per_pos 35.0 +877 26 training.batch_size 1.0 +877 27 model.embedding_dim 1.0 +877 27 model.scoring_fct_norm 2.0 +877 27 regularizer.weight 0.0696179017994167 +877 27 optimizer.lr 0.00501397630367922 +877 27 negative_sampler.num_negs_per_pos 0.0 +877 27 training.batch_size 1.0 +877 28 model.embedding_dim 1.0 +877 28 model.scoring_fct_norm 1.0 +877 28 regularizer.weight 0.025718080804283033 +877 28 optimizer.lr 0.006661600463603119 +877 28 negative_sampler.num_negs_per_pos 61.0 +877 28 training.batch_size 2.0 +877 29 model.embedding_dim 1.0 +877 29 model.scoring_fct_norm 2.0 +877 29 regularizer.weight 0.02367308809014172 +877 29 optimizer.lr 0.004634399133098687 +877 29 negative_sampler.num_negs_per_pos 22.0 +877 29 training.batch_size 1.0 +877 30 model.embedding_dim 1.0 +877 30 model.scoring_fct_norm 1.0 +877 30 regularizer.weight 0.16541692764663798 +877 30 optimizer.lr 0.020270480981472667 +877 30 negative_sampler.num_negs_per_pos 60.0 +877 30 training.batch_size 1.0 +877 31 model.embedding_dim 1.0 +877 31 model.scoring_fct_norm 2.0 +877 31 regularizer.weight 0.09902943847557442 +877 31 optimizer.lr 0.0660189989111587 +877 31 negative_sampler.num_negs_per_pos 94.0 +877 31 training.batch_size 1.0 +877 32 model.embedding_dim 2.0 +877 32 model.scoring_fct_norm 2.0 +877 32 regularizer.weight 0.039636156148241224 +877 32 optimizer.lr 0.0011814081746847158 +877 32 negative_sampler.num_negs_per_pos 12.0 +877 32 training.batch_size 1.0 +877 33 model.embedding_dim 0.0 +877 33 model.scoring_fct_norm 1.0 +877 33 regularizer.weight 0.260124017050927 +877 33 optimizer.lr 0.002684801848435292 +877 33 negative_sampler.num_negs_per_pos 85.0 +877 33 training.batch_size 0.0 +877 34 model.embedding_dim 0.0 +877 34 model.scoring_fct_norm 2.0 +877 34 regularizer.weight 0.013196824360066096 +877 34 optimizer.lr 0.0030426422482855127 +877 34 negative_sampler.num_negs_per_pos 78.0 +877 34 training.batch_size 1.0 +877 35 model.embedding_dim 2.0 +877 35 model.scoring_fct_norm 2.0 +877 35 regularizer.weight 0.041460919831964545 +877 35 optimizer.lr 0.02365148650095878 +877 35 negative_sampler.num_negs_per_pos 8.0 +877 35 training.batch_size 0.0 +877 36 model.embedding_dim 1.0 +877 36 model.scoring_fct_norm 1.0 +877 36 regularizer.weight 0.07740508488906134 +877 36 optimizer.lr 0.007917230073411679 +877 36 negative_sampler.num_negs_per_pos 42.0 +877 36 training.batch_size 2.0 +877 37 model.embedding_dim 0.0 +877 37 model.scoring_fct_norm 1.0 +877 37 regularizer.weight 0.040532836314992035 +877 37 optimizer.lr 0.0362297999151278 +877 37 negative_sampler.num_negs_per_pos 28.0 +877 37 training.batch_size 0.0 +877 38 model.embedding_dim 1.0 +877 38 model.scoring_fct_norm 1.0 +877 38 regularizer.weight 0.031177648189771538 +877 38 optimizer.lr 0.004829323160405805 +877 38 negative_sampler.num_negs_per_pos 9.0 +877 38 training.batch_size 1.0 +877 39 model.embedding_dim 2.0 +877 39 model.scoring_fct_norm 1.0 +877 39 regularizer.weight 0.022497167699357716 +877 39 optimizer.lr 0.013489217344305028 +877 39 negative_sampler.num_negs_per_pos 54.0 +877 39 training.batch_size 0.0 +877 40 model.embedding_dim 0.0 +877 40 model.scoring_fct_norm 2.0 +877 40 regularizer.weight 0.011166703758320356 +877 40 optimizer.lr 0.019245934304134685 +877 40 negative_sampler.num_negs_per_pos 9.0 +877 40 training.batch_size 0.0 +877 41 model.embedding_dim 2.0 +877 41 model.scoring_fct_norm 2.0 +877 41 regularizer.weight 0.11348578310999674 +877 41 optimizer.lr 0.0016183029663463355 +877 41 negative_sampler.num_negs_per_pos 59.0 +877 41 training.batch_size 1.0 +877 42 model.embedding_dim 1.0 +877 42 model.scoring_fct_norm 2.0 +877 42 regularizer.weight 0.018603595137514247 +877 42 optimizer.lr 0.001507057988389841 +877 42 negative_sampler.num_negs_per_pos 21.0 +877 42 training.batch_size 1.0 +877 43 model.embedding_dim 1.0 +877 43 model.scoring_fct_norm 1.0 +877 43 regularizer.weight 0.010263953466041462 +877 43 optimizer.lr 0.02017148656169448 +877 43 negative_sampler.num_negs_per_pos 48.0 +877 43 training.batch_size 0.0 +877 44 model.embedding_dim 2.0 +877 44 model.scoring_fct_norm 1.0 +877 44 regularizer.weight 0.0501091461034752 +877 44 optimizer.lr 0.0013297801425244153 +877 44 negative_sampler.num_negs_per_pos 7.0 +877 44 training.batch_size 1.0 +877 45 model.embedding_dim 1.0 +877 45 model.scoring_fct_norm 1.0 +877 45 regularizer.weight 0.038875027719619724 +877 45 optimizer.lr 0.0010393551730351236 +877 45 negative_sampler.num_negs_per_pos 23.0 +877 45 training.batch_size 2.0 +877 46 model.embedding_dim 2.0 +877 46 model.scoring_fct_norm 1.0 +877 46 regularizer.weight 0.2754053947471808 +877 46 optimizer.lr 0.0021506406132885767 +877 46 negative_sampler.num_negs_per_pos 48.0 +877 46 training.batch_size 0.0 +877 47 model.embedding_dim 2.0 +877 47 model.scoring_fct_norm 1.0 +877 47 regularizer.weight 0.14420352568110612 +877 47 optimizer.lr 0.004408205323626579 +877 47 negative_sampler.num_negs_per_pos 28.0 +877 47 training.batch_size 2.0 +877 48 model.embedding_dim 2.0 +877 48 model.scoring_fct_norm 2.0 +877 48 regularizer.weight 0.26877055132955474 +877 48 optimizer.lr 0.0027469418087599905 +877 48 negative_sampler.num_negs_per_pos 60.0 +877 48 training.batch_size 2.0 +877 49 model.embedding_dim 1.0 +877 49 model.scoring_fct_norm 1.0 +877 49 regularizer.weight 0.05954972106099557 +877 49 optimizer.lr 0.025079713742967958 +877 49 negative_sampler.num_negs_per_pos 34.0 +877 49 training.batch_size 1.0 +877 50 model.embedding_dim 2.0 +877 50 model.scoring_fct_norm 1.0 +877 50 regularizer.weight 0.17166502956018742 +877 50 optimizer.lr 0.0035329478344906465 +877 50 negative_sampler.num_negs_per_pos 69.0 +877 50 training.batch_size 0.0 +877 51 model.embedding_dim 0.0 +877 51 model.scoring_fct_norm 1.0 +877 51 regularizer.weight 0.015157401412748607 +877 51 optimizer.lr 0.0010097419907931517 +877 51 negative_sampler.num_negs_per_pos 9.0 +877 51 training.batch_size 0.0 +877 52 model.embedding_dim 1.0 +877 52 model.scoring_fct_norm 2.0 +877 52 regularizer.weight 0.03492004927665619 +877 52 optimizer.lr 0.044160108509528936 +877 52 negative_sampler.num_negs_per_pos 15.0 +877 52 training.batch_size 2.0 +877 53 model.embedding_dim 1.0 +877 53 model.scoring_fct_norm 2.0 +877 53 regularizer.weight 0.06281734435295266 +877 53 optimizer.lr 0.03178191983453581 +877 53 negative_sampler.num_negs_per_pos 25.0 +877 53 training.batch_size 1.0 +877 54 model.embedding_dim 1.0 +877 54 model.scoring_fct_norm 2.0 +877 54 regularizer.weight 0.02148765784820187 +877 54 optimizer.lr 0.0013237877467273133 +877 54 negative_sampler.num_negs_per_pos 96.0 +877 54 training.batch_size 0.0 +877 55 model.embedding_dim 2.0 +877 55 model.scoring_fct_norm 1.0 +877 55 regularizer.weight 0.011828232589112924 +877 55 optimizer.lr 0.0029374169216175128 +877 55 negative_sampler.num_negs_per_pos 92.0 +877 55 training.batch_size 2.0 +877 56 model.embedding_dim 2.0 +877 56 model.scoring_fct_norm 2.0 +877 56 regularizer.weight 0.05499980734863331 +877 56 optimizer.lr 0.0015403169740326756 +877 56 negative_sampler.num_negs_per_pos 33.0 +877 56 training.batch_size 2.0 +877 57 model.embedding_dim 0.0 +877 57 model.scoring_fct_norm 1.0 +877 57 regularizer.weight 0.04592654090638271 +877 57 optimizer.lr 0.0018487031760655189 +877 57 negative_sampler.num_negs_per_pos 81.0 +877 57 training.batch_size 0.0 +877 58 model.embedding_dim 2.0 +877 58 model.scoring_fct_norm 2.0 +877 58 regularizer.weight 0.0877065286371169 +877 58 optimizer.lr 0.0021885733555437187 +877 58 negative_sampler.num_negs_per_pos 69.0 +877 58 training.batch_size 1.0 +877 59 model.embedding_dim 1.0 +877 59 model.scoring_fct_norm 2.0 +877 59 regularizer.weight 0.0160519154931156 +877 59 optimizer.lr 0.08326875651409174 +877 59 negative_sampler.num_negs_per_pos 95.0 +877 59 training.batch_size 0.0 +877 60 model.embedding_dim 1.0 +877 60 model.scoring_fct_norm 2.0 +877 60 regularizer.weight 0.1985160547278244 +877 60 optimizer.lr 0.0021171689494116166 +877 60 negative_sampler.num_negs_per_pos 61.0 +877 60 training.batch_size 2.0 +877 61 model.embedding_dim 2.0 +877 61 model.scoring_fct_norm 1.0 +877 61 regularizer.weight 0.06261079118979443 +877 61 optimizer.lr 0.05560253783093041 +877 61 negative_sampler.num_negs_per_pos 69.0 +877 61 training.batch_size 1.0 +877 62 model.embedding_dim 2.0 +877 62 model.scoring_fct_norm 1.0 +877 62 regularizer.weight 0.02433501647992541 +877 62 optimizer.lr 0.04067703464726324 +877 62 negative_sampler.num_negs_per_pos 43.0 +877 62 training.batch_size 2.0 +877 63 model.embedding_dim 2.0 +877 63 model.scoring_fct_norm 2.0 +877 63 regularizer.weight 0.03682311293544939 +877 63 optimizer.lr 0.07479961400934385 +877 63 negative_sampler.num_negs_per_pos 23.0 +877 63 training.batch_size 2.0 +877 64 model.embedding_dim 1.0 +877 64 model.scoring_fct_norm 2.0 +877 64 regularizer.weight 0.1975762858051792 +877 64 optimizer.lr 0.018215677843782602 +877 64 negative_sampler.num_negs_per_pos 8.0 +877 64 training.batch_size 0.0 +877 65 model.embedding_dim 1.0 +877 65 model.scoring_fct_norm 2.0 +877 65 regularizer.weight 0.013896112077502869 +877 65 optimizer.lr 0.07477007321317149 +877 65 negative_sampler.num_negs_per_pos 23.0 +877 65 training.batch_size 1.0 +877 66 model.embedding_dim 0.0 +877 66 model.scoring_fct_norm 1.0 +877 66 regularizer.weight 0.029344626535751813 +877 66 optimizer.lr 0.011712319694159744 +877 66 negative_sampler.num_negs_per_pos 26.0 +877 66 training.batch_size 0.0 +877 67 model.embedding_dim 2.0 +877 67 model.scoring_fct_norm 2.0 +877 67 regularizer.weight 0.21880895386352855 +877 67 optimizer.lr 0.006585648941475342 +877 67 negative_sampler.num_negs_per_pos 56.0 +877 67 training.batch_size 1.0 +877 68 model.embedding_dim 0.0 +877 68 model.scoring_fct_norm 1.0 +877 68 regularizer.weight 0.10004807760817208 +877 68 optimizer.lr 0.002924795885446615 +877 68 negative_sampler.num_negs_per_pos 27.0 +877 68 training.batch_size 1.0 +877 69 model.embedding_dim 0.0 +877 69 model.scoring_fct_norm 1.0 +877 69 regularizer.weight 0.012881190605092014 +877 69 optimizer.lr 0.012339486728849046 +877 69 negative_sampler.num_negs_per_pos 93.0 +877 69 training.batch_size 1.0 +877 70 model.embedding_dim 0.0 +877 70 model.scoring_fct_norm 2.0 +877 70 regularizer.weight 0.01569658504545182 +877 70 optimizer.lr 0.003541593129958657 +877 70 negative_sampler.num_negs_per_pos 96.0 +877 70 training.batch_size 1.0 +877 71 model.embedding_dim 2.0 +877 71 model.scoring_fct_norm 1.0 +877 71 regularizer.weight 0.018691483552749036 +877 71 optimizer.lr 0.06995855891230424 +877 71 negative_sampler.num_negs_per_pos 37.0 +877 71 training.batch_size 1.0 +877 72 model.embedding_dim 2.0 +877 72 model.scoring_fct_norm 1.0 +877 72 regularizer.weight 0.20595257118390362 +877 72 optimizer.lr 0.0014901425609067953 +877 72 negative_sampler.num_negs_per_pos 18.0 +877 72 training.batch_size 2.0 +877 73 model.embedding_dim 1.0 +877 73 model.scoring_fct_norm 2.0 +877 73 regularizer.weight 0.05906658539976398 +877 73 optimizer.lr 0.006946194116335009 +877 73 negative_sampler.num_negs_per_pos 34.0 +877 73 training.batch_size 0.0 +877 74 model.embedding_dim 1.0 +877 74 model.scoring_fct_norm 1.0 +877 74 regularizer.weight 0.2807470558983135 +877 74 optimizer.lr 0.008897435369540673 +877 74 negative_sampler.num_negs_per_pos 4.0 +877 74 training.batch_size 2.0 +877 75 model.embedding_dim 1.0 +877 75 model.scoring_fct_norm 1.0 +877 75 regularizer.weight 0.0457917856129774 +877 75 optimizer.lr 0.003108333722375054 +877 75 negative_sampler.num_negs_per_pos 62.0 +877 75 training.batch_size 1.0 +877 76 model.embedding_dim 2.0 +877 76 model.scoring_fct_norm 1.0 +877 76 regularizer.weight 0.06949253196288444 +877 76 optimizer.lr 0.04439827549786196 +877 76 negative_sampler.num_negs_per_pos 6.0 +877 76 training.batch_size 1.0 +877 77 model.embedding_dim 2.0 +877 77 model.scoring_fct_norm 1.0 +877 77 regularizer.weight 0.02016783545835199 +877 77 optimizer.lr 0.030224929486400733 +877 77 negative_sampler.num_negs_per_pos 7.0 +877 77 training.batch_size 2.0 +877 78 model.embedding_dim 0.0 +877 78 model.scoring_fct_norm 1.0 +877 78 regularizer.weight 0.02124747637199801 +877 78 optimizer.lr 0.002544944503670913 +877 78 negative_sampler.num_negs_per_pos 96.0 +877 78 training.batch_size 2.0 +877 79 model.embedding_dim 1.0 +877 79 model.scoring_fct_norm 1.0 +877 79 regularizer.weight 0.05818314665094811 +877 79 optimizer.lr 0.03927583319819086 +877 79 negative_sampler.num_negs_per_pos 74.0 +877 79 training.batch_size 1.0 +877 80 model.embedding_dim 0.0 +877 80 model.scoring_fct_norm 2.0 +877 80 regularizer.weight 0.11950335611193366 +877 80 optimizer.lr 0.002586977908376031 +877 80 negative_sampler.num_negs_per_pos 50.0 +877 80 training.batch_size 0.0 +877 81 model.embedding_dim 1.0 +877 81 model.scoring_fct_norm 2.0 +877 81 regularizer.weight 0.05801694184644579 +877 81 optimizer.lr 0.013241067038311691 +877 81 negative_sampler.num_negs_per_pos 99.0 +877 81 training.batch_size 0.0 +877 82 model.embedding_dim 0.0 +877 82 model.scoring_fct_norm 1.0 +877 82 regularizer.weight 0.07951510464205594 +877 82 optimizer.lr 0.002525208531296315 +877 82 negative_sampler.num_negs_per_pos 25.0 +877 82 training.batch_size 2.0 +877 83 model.embedding_dim 1.0 +877 83 model.scoring_fct_norm 2.0 +877 83 regularizer.weight 0.010156849207371157 +877 83 optimizer.lr 0.0212796014720212 +877 83 negative_sampler.num_negs_per_pos 8.0 +877 83 training.batch_size 1.0 +877 84 model.embedding_dim 2.0 +877 84 model.scoring_fct_norm 2.0 +877 84 regularizer.weight 0.1973643101294393 +877 84 optimizer.lr 0.027228636811593633 +877 84 negative_sampler.num_negs_per_pos 2.0 +877 84 training.batch_size 0.0 +877 85 model.embedding_dim 0.0 +877 85 model.scoring_fct_norm 2.0 +877 85 regularizer.weight 0.02305153632087527 +877 85 optimizer.lr 0.004324791344705258 +877 85 negative_sampler.num_negs_per_pos 4.0 +877 85 training.batch_size 2.0 +877 86 model.embedding_dim 0.0 +877 86 model.scoring_fct_norm 2.0 +877 86 regularizer.weight 0.03997380116771187 +877 86 optimizer.lr 0.07255148667397021 +877 86 negative_sampler.num_negs_per_pos 77.0 +877 86 training.batch_size 0.0 +877 87 model.embedding_dim 1.0 +877 87 model.scoring_fct_norm 1.0 +877 87 regularizer.weight 0.050233616732897 +877 87 optimizer.lr 0.0026083307931608476 +877 87 negative_sampler.num_negs_per_pos 66.0 +877 87 training.batch_size 0.0 +877 88 model.embedding_dim 0.0 +877 88 model.scoring_fct_norm 2.0 +877 88 regularizer.weight 0.058530679990828366 +877 88 optimizer.lr 0.04814091932389391 +877 88 negative_sampler.num_negs_per_pos 99.0 +877 88 training.batch_size 1.0 +877 89 model.embedding_dim 1.0 +877 89 model.scoring_fct_norm 1.0 +877 89 regularizer.weight 0.01135869593593815 +877 89 optimizer.lr 0.002758773215245794 +877 89 negative_sampler.num_negs_per_pos 44.0 +877 89 training.batch_size 1.0 +877 90 model.embedding_dim 2.0 +877 90 model.scoring_fct_norm 2.0 +877 90 regularizer.weight 0.12958850936765307 +877 90 optimizer.lr 0.010675401351015358 +877 90 negative_sampler.num_negs_per_pos 55.0 +877 90 training.batch_size 2.0 +877 91 model.embedding_dim 1.0 +877 91 model.scoring_fct_norm 1.0 +877 91 regularizer.weight 0.10385346616118529 +877 91 optimizer.lr 0.0656938078298695 +877 91 negative_sampler.num_negs_per_pos 93.0 +877 91 training.batch_size 0.0 +877 92 model.embedding_dim 0.0 +877 92 model.scoring_fct_norm 1.0 +877 92 regularizer.weight 0.031995430200810575 +877 92 optimizer.lr 0.003075868710606531 +877 92 negative_sampler.num_negs_per_pos 82.0 +877 92 training.batch_size 1.0 +877 93 model.embedding_dim 2.0 +877 93 model.scoring_fct_norm 2.0 +877 93 regularizer.weight 0.1933115352874353 +877 93 optimizer.lr 0.0016820207199538153 +877 93 negative_sampler.num_negs_per_pos 9.0 +877 93 training.batch_size 2.0 +877 94 model.embedding_dim 0.0 +877 94 model.scoring_fct_norm 1.0 +877 94 regularizer.weight 0.09103978839439218 +877 94 optimizer.lr 0.00507871578961555 +877 94 negative_sampler.num_negs_per_pos 95.0 +877 94 training.batch_size 1.0 +877 95 model.embedding_dim 0.0 +877 95 model.scoring_fct_norm 1.0 +877 95 regularizer.weight 0.1374467461133879 +877 95 optimizer.lr 0.0023839969087542973 +877 95 negative_sampler.num_negs_per_pos 40.0 +877 95 training.batch_size 1.0 +877 96 model.embedding_dim 2.0 +877 96 model.scoring_fct_norm 1.0 +877 96 regularizer.weight 0.03281294780918738 +877 96 optimizer.lr 0.03850783207647418 +877 96 negative_sampler.num_negs_per_pos 11.0 +877 96 training.batch_size 1.0 +877 97 model.embedding_dim 0.0 +877 97 model.scoring_fct_norm 2.0 +877 97 regularizer.weight 0.08473046614523685 +877 97 optimizer.lr 0.00748470719987558 +877 97 negative_sampler.num_negs_per_pos 10.0 +877 97 training.batch_size 0.0 +877 98 model.embedding_dim 1.0 +877 98 model.scoring_fct_norm 2.0 +877 98 regularizer.weight 0.019714538622122708 +877 98 optimizer.lr 0.003934256423809585 +877 98 negative_sampler.num_negs_per_pos 96.0 +877 98 training.batch_size 2.0 +877 99 model.embedding_dim 2.0 +877 99 model.scoring_fct_norm 1.0 +877 99 regularizer.weight 0.032224756682134634 +877 99 optimizer.lr 0.003406870550574458 +877 99 negative_sampler.num_negs_per_pos 33.0 +877 99 training.batch_size 1.0 +877 100 model.embedding_dim 2.0 +877 100 model.scoring_fct_norm 1.0 +877 100 regularizer.weight 0.12712803339634365 +877 100 optimizer.lr 0.014336654978869132 +877 100 negative_sampler.num_negs_per_pos 67.0 +877 100 training.batch_size 2.0 +877 1 dataset """kinships""" +877 1 model """transh""" +877 1 loss """bceaftersigmoid""" +877 1 regularizer """transh""" +877 1 optimizer """adam""" +877 1 training_loop """owa""" +877 1 negative_sampler """basic""" +877 1 evaluator """rankbased""" +877 2 dataset """kinships""" +877 2 model """transh""" +877 2 loss """bceaftersigmoid""" +877 2 regularizer """transh""" +877 2 optimizer """adam""" +877 2 training_loop """owa""" +877 2 negative_sampler """basic""" +877 2 evaluator """rankbased""" +877 3 dataset """kinships""" +877 3 model """transh""" +877 3 loss """bceaftersigmoid""" +877 3 regularizer """transh""" +877 3 optimizer """adam""" +877 3 training_loop """owa""" +877 3 negative_sampler """basic""" +877 3 evaluator """rankbased""" +877 4 dataset """kinships""" +877 4 model """transh""" +877 4 loss """bceaftersigmoid""" +877 4 regularizer """transh""" +877 4 optimizer """adam""" +877 4 training_loop """owa""" +877 4 negative_sampler """basic""" +877 4 evaluator """rankbased""" +877 5 dataset """kinships""" +877 5 model """transh""" +877 5 loss """bceaftersigmoid""" +877 5 regularizer """transh""" +877 5 optimizer """adam""" +877 5 training_loop """owa""" +877 5 negative_sampler """basic""" +877 5 evaluator """rankbased""" +877 6 dataset """kinships""" +877 6 model """transh""" +877 6 loss """bceaftersigmoid""" +877 6 regularizer """transh""" +877 6 optimizer """adam""" +877 6 training_loop """owa""" +877 6 negative_sampler """basic""" +877 6 evaluator """rankbased""" +877 7 dataset """kinships""" +877 7 model """transh""" +877 7 loss """bceaftersigmoid""" +877 7 regularizer """transh""" +877 7 optimizer """adam""" +877 7 training_loop """owa""" +877 7 negative_sampler """basic""" +877 7 evaluator """rankbased""" +877 8 dataset """kinships""" +877 8 model """transh""" +877 8 loss """bceaftersigmoid""" +877 8 regularizer """transh""" +877 8 optimizer """adam""" +877 8 training_loop """owa""" +877 8 negative_sampler """basic""" +877 8 evaluator """rankbased""" +877 9 dataset """kinships""" +877 9 model """transh""" +877 9 loss """bceaftersigmoid""" +877 9 regularizer """transh""" +877 9 optimizer """adam""" +877 9 training_loop """owa""" +877 9 negative_sampler """basic""" +877 9 evaluator """rankbased""" +877 10 dataset """kinships""" +877 10 model """transh""" +877 10 loss """bceaftersigmoid""" +877 10 regularizer """transh""" +877 10 optimizer """adam""" +877 10 training_loop """owa""" +877 10 negative_sampler """basic""" +877 10 evaluator """rankbased""" +877 11 dataset """kinships""" +877 11 model """transh""" +877 11 loss """bceaftersigmoid""" +877 11 regularizer """transh""" +877 11 optimizer """adam""" +877 11 training_loop """owa""" +877 11 negative_sampler """basic""" +877 11 evaluator """rankbased""" +877 12 dataset """kinships""" +877 12 model """transh""" +877 12 loss """bceaftersigmoid""" +877 12 regularizer """transh""" +877 12 optimizer """adam""" +877 12 training_loop """owa""" +877 12 negative_sampler """basic""" +877 12 evaluator """rankbased""" +877 13 dataset """kinships""" +877 13 model """transh""" +877 13 loss """bceaftersigmoid""" +877 13 regularizer """transh""" +877 13 optimizer """adam""" +877 13 training_loop """owa""" +877 13 negative_sampler """basic""" +877 13 evaluator """rankbased""" +877 14 dataset """kinships""" +877 14 model """transh""" +877 14 loss """bceaftersigmoid""" +877 14 regularizer """transh""" +877 14 optimizer """adam""" +877 14 training_loop """owa""" +877 14 negative_sampler """basic""" +877 14 evaluator """rankbased""" +877 15 dataset """kinships""" +877 15 model """transh""" +877 15 loss """bceaftersigmoid""" +877 15 regularizer """transh""" +877 15 optimizer """adam""" +877 15 training_loop """owa""" +877 15 negative_sampler """basic""" +877 15 evaluator """rankbased""" +877 16 dataset """kinships""" +877 16 model """transh""" +877 16 loss """bceaftersigmoid""" +877 16 regularizer """transh""" +877 16 optimizer """adam""" +877 16 training_loop """owa""" +877 16 negative_sampler """basic""" +877 16 evaluator """rankbased""" +877 17 dataset """kinships""" +877 17 model """transh""" +877 17 loss """bceaftersigmoid""" +877 17 regularizer """transh""" +877 17 optimizer """adam""" +877 17 training_loop """owa""" +877 17 negative_sampler """basic""" +877 17 evaluator """rankbased""" +877 18 dataset """kinships""" +877 18 model """transh""" +877 18 loss """bceaftersigmoid""" +877 18 regularizer """transh""" +877 18 optimizer """adam""" +877 18 training_loop """owa""" +877 18 negative_sampler """basic""" +877 18 evaluator """rankbased""" +877 19 dataset """kinships""" +877 19 model """transh""" +877 19 loss """bceaftersigmoid""" +877 19 regularizer """transh""" +877 19 optimizer """adam""" +877 19 training_loop """owa""" +877 19 negative_sampler """basic""" +877 19 evaluator """rankbased""" +877 20 dataset """kinships""" +877 20 model """transh""" +877 20 loss """bceaftersigmoid""" +877 20 regularizer """transh""" +877 20 optimizer """adam""" +877 20 training_loop """owa""" +877 20 negative_sampler """basic""" +877 20 evaluator """rankbased""" +877 21 dataset """kinships""" +877 21 model """transh""" +877 21 loss """bceaftersigmoid""" +877 21 regularizer """transh""" +877 21 optimizer """adam""" +877 21 training_loop """owa""" +877 21 negative_sampler """basic""" +877 21 evaluator """rankbased""" +877 22 dataset """kinships""" +877 22 model """transh""" +877 22 loss """bceaftersigmoid""" +877 22 regularizer """transh""" +877 22 optimizer """adam""" +877 22 training_loop """owa""" +877 22 negative_sampler """basic""" +877 22 evaluator """rankbased""" +877 23 dataset """kinships""" +877 23 model """transh""" +877 23 loss """bceaftersigmoid""" +877 23 regularizer """transh""" +877 23 optimizer """adam""" +877 23 training_loop """owa""" +877 23 negative_sampler """basic""" +877 23 evaluator """rankbased""" +877 24 dataset """kinships""" +877 24 model """transh""" +877 24 loss """bceaftersigmoid""" +877 24 regularizer """transh""" +877 24 optimizer """adam""" +877 24 training_loop """owa""" +877 24 negative_sampler """basic""" +877 24 evaluator """rankbased""" +877 25 dataset """kinships""" +877 25 model """transh""" +877 25 loss """bceaftersigmoid""" +877 25 regularizer """transh""" +877 25 optimizer """adam""" +877 25 training_loop """owa""" +877 25 negative_sampler """basic""" +877 25 evaluator """rankbased""" +877 26 dataset """kinships""" +877 26 model """transh""" +877 26 loss """bceaftersigmoid""" +877 26 regularizer """transh""" +877 26 optimizer """adam""" +877 26 training_loop """owa""" +877 26 negative_sampler """basic""" +877 26 evaluator """rankbased""" +877 27 dataset """kinships""" +877 27 model """transh""" +877 27 loss """bceaftersigmoid""" +877 27 regularizer """transh""" +877 27 optimizer """adam""" +877 27 training_loop """owa""" +877 27 negative_sampler """basic""" +877 27 evaluator """rankbased""" +877 28 dataset """kinships""" +877 28 model """transh""" +877 28 loss """bceaftersigmoid""" +877 28 regularizer """transh""" +877 28 optimizer """adam""" +877 28 training_loop """owa""" +877 28 negative_sampler """basic""" +877 28 evaluator """rankbased""" +877 29 dataset """kinships""" +877 29 model """transh""" +877 29 loss """bceaftersigmoid""" +877 29 regularizer """transh""" +877 29 optimizer """adam""" +877 29 training_loop """owa""" +877 29 negative_sampler """basic""" +877 29 evaluator """rankbased""" +877 30 dataset """kinships""" +877 30 model """transh""" +877 30 loss """bceaftersigmoid""" +877 30 regularizer """transh""" +877 30 optimizer """adam""" +877 30 training_loop """owa""" +877 30 negative_sampler """basic""" +877 30 evaluator """rankbased""" +877 31 dataset """kinships""" +877 31 model """transh""" +877 31 loss """bceaftersigmoid""" +877 31 regularizer """transh""" +877 31 optimizer """adam""" +877 31 training_loop """owa""" +877 31 negative_sampler """basic""" +877 31 evaluator """rankbased""" +877 32 dataset """kinships""" +877 32 model """transh""" +877 32 loss """bceaftersigmoid""" +877 32 regularizer """transh""" +877 32 optimizer """adam""" +877 32 training_loop """owa""" +877 32 negative_sampler """basic""" +877 32 evaluator """rankbased""" +877 33 dataset """kinships""" +877 33 model """transh""" +877 33 loss """bceaftersigmoid""" +877 33 regularizer """transh""" +877 33 optimizer """adam""" +877 33 training_loop """owa""" +877 33 negative_sampler """basic""" +877 33 evaluator """rankbased""" +877 34 dataset """kinships""" +877 34 model """transh""" +877 34 loss """bceaftersigmoid""" +877 34 regularizer """transh""" +877 34 optimizer """adam""" +877 34 training_loop """owa""" +877 34 negative_sampler """basic""" +877 34 evaluator """rankbased""" +877 35 dataset """kinships""" +877 35 model """transh""" +877 35 loss """bceaftersigmoid""" +877 35 regularizer """transh""" +877 35 optimizer """adam""" +877 35 training_loop """owa""" +877 35 negative_sampler """basic""" +877 35 evaluator """rankbased""" +877 36 dataset """kinships""" +877 36 model """transh""" +877 36 loss """bceaftersigmoid""" +877 36 regularizer """transh""" +877 36 optimizer """adam""" +877 36 training_loop """owa""" +877 36 negative_sampler """basic""" +877 36 evaluator """rankbased""" +877 37 dataset """kinships""" +877 37 model """transh""" +877 37 loss """bceaftersigmoid""" +877 37 regularizer """transh""" +877 37 optimizer """adam""" +877 37 training_loop """owa""" +877 37 negative_sampler """basic""" +877 37 evaluator """rankbased""" +877 38 dataset """kinships""" +877 38 model """transh""" +877 38 loss """bceaftersigmoid""" +877 38 regularizer """transh""" +877 38 optimizer """adam""" +877 38 training_loop """owa""" +877 38 negative_sampler """basic""" +877 38 evaluator """rankbased""" +877 39 dataset """kinships""" +877 39 model """transh""" +877 39 loss """bceaftersigmoid""" +877 39 regularizer """transh""" +877 39 optimizer """adam""" +877 39 training_loop """owa""" +877 39 negative_sampler """basic""" +877 39 evaluator """rankbased""" +877 40 dataset """kinships""" +877 40 model """transh""" +877 40 loss """bceaftersigmoid""" +877 40 regularizer """transh""" +877 40 optimizer """adam""" +877 40 training_loop """owa""" +877 40 negative_sampler """basic""" +877 40 evaluator """rankbased""" +877 41 dataset """kinships""" +877 41 model """transh""" +877 41 loss """bceaftersigmoid""" +877 41 regularizer """transh""" +877 41 optimizer """adam""" +877 41 training_loop """owa""" +877 41 negative_sampler """basic""" +877 41 evaluator """rankbased""" +877 42 dataset """kinships""" +877 42 model """transh""" +877 42 loss """bceaftersigmoid""" +877 42 regularizer """transh""" +877 42 optimizer """adam""" +877 42 training_loop """owa""" +877 42 negative_sampler """basic""" +877 42 evaluator """rankbased""" +877 43 dataset """kinships""" +877 43 model """transh""" +877 43 loss """bceaftersigmoid""" +877 43 regularizer """transh""" +877 43 optimizer """adam""" +877 43 training_loop """owa""" +877 43 negative_sampler """basic""" +877 43 evaluator """rankbased""" +877 44 dataset """kinships""" +877 44 model """transh""" +877 44 loss """bceaftersigmoid""" +877 44 regularizer """transh""" +877 44 optimizer """adam""" +877 44 training_loop """owa""" +877 44 negative_sampler """basic""" +877 44 evaluator """rankbased""" +877 45 dataset """kinships""" +877 45 model """transh""" +877 45 loss """bceaftersigmoid""" +877 45 regularizer """transh""" +877 45 optimizer """adam""" +877 45 training_loop """owa""" +877 45 negative_sampler """basic""" +877 45 evaluator """rankbased""" +877 46 dataset """kinships""" +877 46 model """transh""" +877 46 loss """bceaftersigmoid""" +877 46 regularizer """transh""" +877 46 optimizer """adam""" +877 46 training_loop """owa""" +877 46 negative_sampler """basic""" +877 46 evaluator """rankbased""" +877 47 dataset """kinships""" +877 47 model """transh""" +877 47 loss """bceaftersigmoid""" +877 47 regularizer """transh""" +877 47 optimizer """adam""" +877 47 training_loop """owa""" +877 47 negative_sampler """basic""" +877 47 evaluator """rankbased""" +877 48 dataset """kinships""" +877 48 model """transh""" +877 48 loss """bceaftersigmoid""" +877 48 regularizer """transh""" +877 48 optimizer """adam""" +877 48 training_loop """owa""" +877 48 negative_sampler """basic""" +877 48 evaluator """rankbased""" +877 49 dataset """kinships""" +877 49 model """transh""" +877 49 loss """bceaftersigmoid""" +877 49 regularizer """transh""" +877 49 optimizer """adam""" +877 49 training_loop """owa""" +877 49 negative_sampler """basic""" +877 49 evaluator """rankbased""" +877 50 dataset """kinships""" +877 50 model """transh""" +877 50 loss """bceaftersigmoid""" +877 50 regularizer """transh""" +877 50 optimizer """adam""" +877 50 training_loop """owa""" +877 50 negative_sampler """basic""" +877 50 evaluator """rankbased""" +877 51 dataset """kinships""" +877 51 model """transh""" +877 51 loss """bceaftersigmoid""" +877 51 regularizer """transh""" +877 51 optimizer """adam""" +877 51 training_loop """owa""" +877 51 negative_sampler """basic""" +877 51 evaluator """rankbased""" +877 52 dataset """kinships""" +877 52 model """transh""" +877 52 loss """bceaftersigmoid""" +877 52 regularizer """transh""" +877 52 optimizer """adam""" +877 52 training_loop """owa""" +877 52 negative_sampler """basic""" +877 52 evaluator """rankbased""" +877 53 dataset """kinships""" +877 53 model """transh""" +877 53 loss """bceaftersigmoid""" +877 53 regularizer """transh""" +877 53 optimizer """adam""" +877 53 training_loop """owa""" +877 53 negative_sampler """basic""" +877 53 evaluator """rankbased""" +877 54 dataset """kinships""" +877 54 model """transh""" +877 54 loss """bceaftersigmoid""" +877 54 regularizer """transh""" +877 54 optimizer """adam""" +877 54 training_loop """owa""" +877 54 negative_sampler """basic""" +877 54 evaluator """rankbased""" +877 55 dataset """kinships""" +877 55 model """transh""" +877 55 loss """bceaftersigmoid""" +877 55 regularizer """transh""" +877 55 optimizer """adam""" +877 55 training_loop """owa""" +877 55 negative_sampler """basic""" +877 55 evaluator """rankbased""" +877 56 dataset """kinships""" +877 56 model """transh""" +877 56 loss """bceaftersigmoid""" +877 56 regularizer """transh""" +877 56 optimizer """adam""" +877 56 training_loop """owa""" +877 56 negative_sampler """basic""" +877 56 evaluator """rankbased""" +877 57 dataset """kinships""" +877 57 model """transh""" +877 57 loss """bceaftersigmoid""" +877 57 regularizer """transh""" +877 57 optimizer """adam""" +877 57 training_loop """owa""" +877 57 negative_sampler """basic""" +877 57 evaluator """rankbased""" +877 58 dataset """kinships""" +877 58 model """transh""" +877 58 loss """bceaftersigmoid""" +877 58 regularizer """transh""" +877 58 optimizer """adam""" +877 58 training_loop """owa""" +877 58 negative_sampler """basic""" +877 58 evaluator """rankbased""" +877 59 dataset """kinships""" +877 59 model """transh""" +877 59 loss """bceaftersigmoid""" +877 59 regularizer """transh""" +877 59 optimizer """adam""" +877 59 training_loop """owa""" +877 59 negative_sampler """basic""" +877 59 evaluator """rankbased""" +877 60 dataset """kinships""" +877 60 model """transh""" +877 60 loss """bceaftersigmoid""" +877 60 regularizer """transh""" +877 60 optimizer """adam""" +877 60 training_loop """owa""" +877 60 negative_sampler """basic""" +877 60 evaluator """rankbased""" +877 61 dataset """kinships""" +877 61 model """transh""" +877 61 loss """bceaftersigmoid""" +877 61 regularizer """transh""" +877 61 optimizer """adam""" +877 61 training_loop """owa""" +877 61 negative_sampler """basic""" +877 61 evaluator """rankbased""" +877 62 dataset """kinships""" +877 62 model """transh""" +877 62 loss """bceaftersigmoid""" +877 62 regularizer """transh""" +877 62 optimizer """adam""" +877 62 training_loop """owa""" +877 62 negative_sampler """basic""" +877 62 evaluator """rankbased""" +877 63 dataset """kinships""" +877 63 model """transh""" +877 63 loss """bceaftersigmoid""" +877 63 regularizer """transh""" +877 63 optimizer """adam""" +877 63 training_loop """owa""" +877 63 negative_sampler """basic""" +877 63 evaluator """rankbased""" +877 64 dataset """kinships""" +877 64 model """transh""" +877 64 loss """bceaftersigmoid""" +877 64 regularizer """transh""" +877 64 optimizer """adam""" +877 64 training_loop """owa""" +877 64 negative_sampler """basic""" +877 64 evaluator """rankbased""" +877 65 dataset """kinships""" +877 65 model """transh""" +877 65 loss """bceaftersigmoid""" +877 65 regularizer """transh""" +877 65 optimizer """adam""" +877 65 training_loop """owa""" +877 65 negative_sampler """basic""" +877 65 evaluator """rankbased""" +877 66 dataset """kinships""" +877 66 model """transh""" +877 66 loss """bceaftersigmoid""" +877 66 regularizer """transh""" +877 66 optimizer """adam""" +877 66 training_loop """owa""" +877 66 negative_sampler """basic""" +877 66 evaluator """rankbased""" +877 67 dataset """kinships""" +877 67 model """transh""" +877 67 loss """bceaftersigmoid""" +877 67 regularizer """transh""" +877 67 optimizer """adam""" +877 67 training_loop """owa""" +877 67 negative_sampler """basic""" +877 67 evaluator """rankbased""" +877 68 dataset """kinships""" +877 68 model """transh""" +877 68 loss """bceaftersigmoid""" +877 68 regularizer """transh""" +877 68 optimizer """adam""" +877 68 training_loop """owa""" +877 68 negative_sampler """basic""" +877 68 evaluator """rankbased""" +877 69 dataset """kinships""" +877 69 model """transh""" +877 69 loss """bceaftersigmoid""" +877 69 regularizer """transh""" +877 69 optimizer """adam""" +877 69 training_loop """owa""" +877 69 negative_sampler """basic""" +877 69 evaluator """rankbased""" +877 70 dataset """kinships""" +877 70 model """transh""" +877 70 loss """bceaftersigmoid""" +877 70 regularizer """transh""" +877 70 optimizer """adam""" +877 70 training_loop """owa""" +877 70 negative_sampler """basic""" +877 70 evaluator """rankbased""" +877 71 dataset """kinships""" +877 71 model """transh""" +877 71 loss """bceaftersigmoid""" +877 71 regularizer """transh""" +877 71 optimizer """adam""" +877 71 training_loop """owa""" +877 71 negative_sampler """basic""" +877 71 evaluator """rankbased""" +877 72 dataset """kinships""" +877 72 model """transh""" +877 72 loss """bceaftersigmoid""" +877 72 regularizer """transh""" +877 72 optimizer """adam""" +877 72 training_loop """owa""" +877 72 negative_sampler """basic""" +877 72 evaluator """rankbased""" +877 73 dataset """kinships""" +877 73 model """transh""" +877 73 loss """bceaftersigmoid""" +877 73 regularizer """transh""" +877 73 optimizer """adam""" +877 73 training_loop """owa""" +877 73 negative_sampler """basic""" +877 73 evaluator """rankbased""" +877 74 dataset """kinships""" +877 74 model """transh""" +877 74 loss """bceaftersigmoid""" +877 74 regularizer """transh""" +877 74 optimizer """adam""" +877 74 training_loop """owa""" +877 74 negative_sampler """basic""" +877 74 evaluator """rankbased""" +877 75 dataset """kinships""" +877 75 model """transh""" +877 75 loss """bceaftersigmoid""" +877 75 regularizer """transh""" +877 75 optimizer """adam""" +877 75 training_loop """owa""" +877 75 negative_sampler """basic""" +877 75 evaluator """rankbased""" +877 76 dataset """kinships""" +877 76 model """transh""" +877 76 loss """bceaftersigmoid""" +877 76 regularizer """transh""" +877 76 optimizer """adam""" +877 76 training_loop """owa""" +877 76 negative_sampler """basic""" +877 76 evaluator """rankbased""" +877 77 dataset """kinships""" +877 77 model """transh""" +877 77 loss """bceaftersigmoid""" +877 77 regularizer """transh""" +877 77 optimizer """adam""" +877 77 training_loop """owa""" +877 77 negative_sampler """basic""" +877 77 evaluator """rankbased""" +877 78 dataset """kinships""" +877 78 model """transh""" +877 78 loss """bceaftersigmoid""" +877 78 regularizer """transh""" +877 78 optimizer """adam""" +877 78 training_loop """owa""" +877 78 negative_sampler """basic""" +877 78 evaluator """rankbased""" +877 79 dataset """kinships""" +877 79 model """transh""" +877 79 loss """bceaftersigmoid""" +877 79 regularizer """transh""" +877 79 optimizer """adam""" +877 79 training_loop """owa""" +877 79 negative_sampler """basic""" +877 79 evaluator """rankbased""" +877 80 dataset """kinships""" +877 80 model """transh""" +877 80 loss """bceaftersigmoid""" +877 80 regularizer """transh""" +877 80 optimizer """adam""" +877 80 training_loop """owa""" +877 80 negative_sampler """basic""" +877 80 evaluator """rankbased""" +877 81 dataset """kinships""" +877 81 model """transh""" +877 81 loss """bceaftersigmoid""" +877 81 regularizer """transh""" +877 81 optimizer """adam""" +877 81 training_loop """owa""" +877 81 negative_sampler """basic""" +877 81 evaluator """rankbased""" +877 82 dataset """kinships""" +877 82 model """transh""" +877 82 loss """bceaftersigmoid""" +877 82 regularizer """transh""" +877 82 optimizer """adam""" +877 82 training_loop """owa""" +877 82 negative_sampler """basic""" +877 82 evaluator """rankbased""" +877 83 dataset """kinships""" +877 83 model """transh""" +877 83 loss """bceaftersigmoid""" +877 83 regularizer """transh""" +877 83 optimizer """adam""" +877 83 training_loop """owa""" +877 83 negative_sampler """basic""" +877 83 evaluator """rankbased""" +877 84 dataset """kinships""" +877 84 model """transh""" +877 84 loss """bceaftersigmoid""" +877 84 regularizer """transh""" +877 84 optimizer """adam""" +877 84 training_loop """owa""" +877 84 negative_sampler """basic""" +877 84 evaluator """rankbased""" +877 85 dataset """kinships""" +877 85 model """transh""" +877 85 loss """bceaftersigmoid""" +877 85 regularizer """transh""" +877 85 optimizer """adam""" +877 85 training_loop """owa""" +877 85 negative_sampler """basic""" +877 85 evaluator """rankbased""" +877 86 dataset """kinships""" +877 86 model """transh""" +877 86 loss """bceaftersigmoid""" +877 86 regularizer """transh""" +877 86 optimizer """adam""" +877 86 training_loop """owa""" +877 86 negative_sampler """basic""" +877 86 evaluator """rankbased""" +877 87 dataset """kinships""" +877 87 model """transh""" +877 87 loss """bceaftersigmoid""" +877 87 regularizer """transh""" +877 87 optimizer """adam""" +877 87 training_loop """owa""" +877 87 negative_sampler """basic""" +877 87 evaluator """rankbased""" +877 88 dataset """kinships""" +877 88 model """transh""" +877 88 loss """bceaftersigmoid""" +877 88 regularizer """transh""" +877 88 optimizer """adam""" +877 88 training_loop """owa""" +877 88 negative_sampler """basic""" +877 88 evaluator """rankbased""" +877 89 dataset """kinships""" +877 89 model """transh""" +877 89 loss """bceaftersigmoid""" +877 89 regularizer """transh""" +877 89 optimizer """adam""" +877 89 training_loop """owa""" +877 89 negative_sampler """basic""" +877 89 evaluator """rankbased""" +877 90 dataset """kinships""" +877 90 model """transh""" +877 90 loss """bceaftersigmoid""" +877 90 regularizer """transh""" +877 90 optimizer """adam""" +877 90 training_loop """owa""" +877 90 negative_sampler """basic""" +877 90 evaluator """rankbased""" +877 91 dataset """kinships""" +877 91 model """transh""" +877 91 loss """bceaftersigmoid""" +877 91 regularizer """transh""" +877 91 optimizer """adam""" +877 91 training_loop """owa""" +877 91 negative_sampler """basic""" +877 91 evaluator """rankbased""" +877 92 dataset """kinships""" +877 92 model """transh""" +877 92 loss """bceaftersigmoid""" +877 92 regularizer """transh""" +877 92 optimizer """adam""" +877 92 training_loop """owa""" +877 92 negative_sampler """basic""" +877 92 evaluator """rankbased""" +877 93 dataset """kinships""" +877 93 model """transh""" +877 93 loss """bceaftersigmoid""" +877 93 regularizer """transh""" +877 93 optimizer """adam""" +877 93 training_loop """owa""" +877 93 negative_sampler """basic""" +877 93 evaluator """rankbased""" +877 94 dataset """kinships""" +877 94 model """transh""" +877 94 loss """bceaftersigmoid""" +877 94 regularizer """transh""" +877 94 optimizer """adam""" +877 94 training_loop """owa""" +877 94 negative_sampler """basic""" +877 94 evaluator """rankbased""" +877 95 dataset """kinships""" +877 95 model """transh""" +877 95 loss """bceaftersigmoid""" +877 95 regularizer """transh""" +877 95 optimizer """adam""" +877 95 training_loop """owa""" +877 95 negative_sampler """basic""" +877 95 evaluator """rankbased""" +877 96 dataset """kinships""" +877 96 model """transh""" +877 96 loss """bceaftersigmoid""" +877 96 regularizer """transh""" +877 96 optimizer """adam""" +877 96 training_loop """owa""" +877 96 negative_sampler """basic""" +877 96 evaluator """rankbased""" +877 97 dataset """kinships""" +877 97 model """transh""" +877 97 loss """bceaftersigmoid""" +877 97 regularizer """transh""" +877 97 optimizer """adam""" +877 97 training_loop """owa""" +877 97 negative_sampler """basic""" +877 97 evaluator """rankbased""" +877 98 dataset """kinships""" +877 98 model """transh""" +877 98 loss """bceaftersigmoid""" +877 98 regularizer """transh""" +877 98 optimizer """adam""" +877 98 training_loop """owa""" +877 98 negative_sampler """basic""" +877 98 evaluator """rankbased""" +877 99 dataset """kinships""" +877 99 model """transh""" +877 99 loss """bceaftersigmoid""" +877 99 regularizer """transh""" +877 99 optimizer """adam""" +877 99 training_loop """owa""" +877 99 negative_sampler """basic""" +877 99 evaluator """rankbased""" +877 100 dataset """kinships""" +877 100 model """transh""" +877 100 loss """bceaftersigmoid""" +877 100 regularizer """transh""" +877 100 optimizer """adam""" +877 100 training_loop """owa""" +877 100 negative_sampler """basic""" +877 100 evaluator """rankbased""" +878 1 model.embedding_dim 0.0 +878 1 model.scoring_fct_norm 1.0 +878 1 regularizer.weight 0.24557356809300504 +878 1 optimizer.lr 0.05763919060951607 +878 1 negative_sampler.num_negs_per_pos 88.0 +878 1 training.batch_size 1.0 +878 2 model.embedding_dim 2.0 +878 2 model.scoring_fct_norm 1.0 +878 2 regularizer.weight 0.03689207634467754 +878 2 optimizer.lr 0.010350488803763967 +878 2 negative_sampler.num_negs_per_pos 63.0 +878 2 training.batch_size 2.0 +878 3 model.embedding_dim 2.0 +878 3 model.scoring_fct_norm 1.0 +878 3 regularizer.weight 0.016479130589110873 +878 3 optimizer.lr 0.005656650507420802 +878 3 negative_sampler.num_negs_per_pos 16.0 +878 3 training.batch_size 2.0 +878 4 model.embedding_dim 2.0 +878 4 model.scoring_fct_norm 2.0 +878 4 regularizer.weight 0.1473096529460781 +878 4 optimizer.lr 0.0013024970049148245 +878 4 negative_sampler.num_negs_per_pos 60.0 +878 4 training.batch_size 2.0 +878 5 model.embedding_dim 2.0 +878 5 model.scoring_fct_norm 1.0 +878 5 regularizer.weight 0.05215387845703181 +878 5 optimizer.lr 0.022669214883793986 +878 5 negative_sampler.num_negs_per_pos 13.0 +878 5 training.batch_size 2.0 +878 6 model.embedding_dim 1.0 +878 6 model.scoring_fct_norm 2.0 +878 6 regularizer.weight 0.029393340691815678 +878 6 optimizer.lr 0.015592590300697517 +878 6 negative_sampler.num_negs_per_pos 86.0 +878 6 training.batch_size 1.0 +878 7 model.embedding_dim 1.0 +878 7 model.scoring_fct_norm 1.0 +878 7 regularizer.weight 0.196567817051091 +878 7 optimizer.lr 0.0010923915215068469 +878 7 negative_sampler.num_negs_per_pos 82.0 +878 7 training.batch_size 0.0 +878 8 model.embedding_dim 1.0 +878 8 model.scoring_fct_norm 1.0 +878 8 regularizer.weight 0.06650116836710096 +878 8 optimizer.lr 0.032012956562484665 +878 8 negative_sampler.num_negs_per_pos 11.0 +878 8 training.batch_size 1.0 +878 9 model.embedding_dim 2.0 +878 9 model.scoring_fct_norm 1.0 +878 9 regularizer.weight 0.014988287417013435 +878 9 optimizer.lr 0.008281841395662458 +878 9 negative_sampler.num_negs_per_pos 78.0 +878 9 training.batch_size 0.0 +878 10 model.embedding_dim 2.0 +878 10 model.scoring_fct_norm 1.0 +878 10 regularizer.weight 0.05059680174153562 +878 10 optimizer.lr 0.011859533856406704 +878 10 negative_sampler.num_negs_per_pos 66.0 +878 10 training.batch_size 0.0 +878 11 model.embedding_dim 2.0 +878 11 model.scoring_fct_norm 1.0 +878 11 regularizer.weight 0.01435042801639008 +878 11 optimizer.lr 0.0349172325985223 +878 11 negative_sampler.num_negs_per_pos 69.0 +878 11 training.batch_size 0.0 +878 12 model.embedding_dim 2.0 +878 12 model.scoring_fct_norm 2.0 +878 12 regularizer.weight 0.2831891417131453 +878 12 optimizer.lr 0.04122145823758564 +878 12 negative_sampler.num_negs_per_pos 17.0 +878 12 training.batch_size 1.0 +878 13 model.embedding_dim 0.0 +878 13 model.scoring_fct_norm 2.0 +878 13 regularizer.weight 0.10286471858807063 +878 13 optimizer.lr 0.004406833906763203 +878 13 negative_sampler.num_negs_per_pos 75.0 +878 13 training.batch_size 0.0 +878 14 model.embedding_dim 0.0 +878 14 model.scoring_fct_norm 1.0 +878 14 regularizer.weight 0.01946193887174146 +878 14 optimizer.lr 0.021096914607965377 +878 14 negative_sampler.num_negs_per_pos 22.0 +878 14 training.batch_size 1.0 +878 15 model.embedding_dim 1.0 +878 15 model.scoring_fct_norm 1.0 +878 15 regularizer.weight 0.03666208058330958 +878 15 optimizer.lr 0.06292903055525428 +878 15 negative_sampler.num_negs_per_pos 86.0 +878 15 training.batch_size 2.0 +878 16 model.embedding_dim 1.0 +878 16 model.scoring_fct_norm 2.0 +878 16 regularizer.weight 0.0619358168429839 +878 16 optimizer.lr 0.01939096516027597 +878 16 negative_sampler.num_negs_per_pos 50.0 +878 16 training.batch_size 0.0 +878 17 model.embedding_dim 0.0 +878 17 model.scoring_fct_norm 2.0 +878 17 regularizer.weight 0.07594725996037789 +878 17 optimizer.lr 0.0670755021020601 +878 17 negative_sampler.num_negs_per_pos 48.0 +878 17 training.batch_size 0.0 +878 18 model.embedding_dim 1.0 +878 18 model.scoring_fct_norm 1.0 +878 18 regularizer.weight 0.0766349347918957 +878 18 optimizer.lr 0.02584599386504154 +878 18 negative_sampler.num_negs_per_pos 38.0 +878 18 training.batch_size 2.0 +878 19 model.embedding_dim 0.0 +878 19 model.scoring_fct_norm 1.0 +878 19 regularizer.weight 0.1513147570279817 +878 19 optimizer.lr 0.06199876859941083 +878 19 negative_sampler.num_negs_per_pos 94.0 +878 19 training.batch_size 0.0 +878 20 model.embedding_dim 2.0 +878 20 model.scoring_fct_norm 1.0 +878 20 regularizer.weight 0.17703799814153567 +878 20 optimizer.lr 0.0012208273288542278 +878 20 negative_sampler.num_negs_per_pos 89.0 +878 20 training.batch_size 1.0 +878 21 model.embedding_dim 0.0 +878 21 model.scoring_fct_norm 2.0 +878 21 regularizer.weight 0.15343795407156727 +878 21 optimizer.lr 0.04630598247613 +878 21 negative_sampler.num_negs_per_pos 29.0 +878 21 training.batch_size 2.0 +878 22 model.embedding_dim 2.0 +878 22 model.scoring_fct_norm 1.0 +878 22 regularizer.weight 0.04405287905511756 +878 22 optimizer.lr 0.004793717483104221 +878 22 negative_sampler.num_negs_per_pos 84.0 +878 22 training.batch_size 2.0 +878 23 model.embedding_dim 0.0 +878 23 model.scoring_fct_norm 1.0 +878 23 regularizer.weight 0.16264407342308776 +878 23 optimizer.lr 0.0024439260554992444 +878 23 negative_sampler.num_negs_per_pos 24.0 +878 23 training.batch_size 0.0 +878 24 model.embedding_dim 0.0 +878 24 model.scoring_fct_norm 2.0 +878 24 regularizer.weight 0.15681043848184922 +878 24 optimizer.lr 0.005950517054376471 +878 24 negative_sampler.num_negs_per_pos 23.0 +878 24 training.batch_size 1.0 +878 25 model.embedding_dim 0.0 +878 25 model.scoring_fct_norm 2.0 +878 25 regularizer.weight 0.03551316741199454 +878 25 optimizer.lr 0.003552371032074142 +878 25 negative_sampler.num_negs_per_pos 0.0 +878 25 training.batch_size 1.0 +878 26 model.embedding_dim 1.0 +878 26 model.scoring_fct_norm 2.0 +878 26 regularizer.weight 0.12386233927553059 +878 26 optimizer.lr 0.0028507260187925518 +878 26 negative_sampler.num_negs_per_pos 74.0 +878 26 training.batch_size 2.0 +878 27 model.embedding_dim 2.0 +878 27 model.scoring_fct_norm 1.0 +878 27 regularizer.weight 0.07903624955243554 +878 27 optimizer.lr 0.0017301673548568734 +878 27 negative_sampler.num_negs_per_pos 20.0 +878 27 training.batch_size 1.0 +878 28 model.embedding_dim 1.0 +878 28 model.scoring_fct_norm 1.0 +878 28 regularizer.weight 0.12642345817836442 +878 28 optimizer.lr 0.006790139980933376 +878 28 negative_sampler.num_negs_per_pos 9.0 +878 28 training.batch_size 2.0 +878 29 model.embedding_dim 2.0 +878 29 model.scoring_fct_norm 1.0 +878 29 regularizer.weight 0.27393484534643026 +878 29 optimizer.lr 0.005645519224245916 +878 29 negative_sampler.num_negs_per_pos 67.0 +878 29 training.batch_size 2.0 +878 30 model.embedding_dim 2.0 +878 30 model.scoring_fct_norm 2.0 +878 30 regularizer.weight 0.0793036136568132 +878 30 optimizer.lr 0.0034448225342991676 +878 30 negative_sampler.num_negs_per_pos 91.0 +878 30 training.batch_size 1.0 +878 31 model.embedding_dim 2.0 +878 31 model.scoring_fct_norm 1.0 +878 31 regularizer.weight 0.07362130483021019 +878 31 optimizer.lr 0.00878444906453187 +878 31 negative_sampler.num_negs_per_pos 86.0 +878 31 training.batch_size 0.0 +878 32 model.embedding_dim 0.0 +878 32 model.scoring_fct_norm 2.0 +878 32 regularizer.weight 0.02793571205135456 +878 32 optimizer.lr 0.005089102995642828 +878 32 negative_sampler.num_negs_per_pos 20.0 +878 32 training.batch_size 2.0 +878 33 model.embedding_dim 1.0 +878 33 model.scoring_fct_norm 2.0 +878 33 regularizer.weight 0.053963810858473536 +878 33 optimizer.lr 0.03697308633513331 +878 33 negative_sampler.num_negs_per_pos 29.0 +878 33 training.batch_size 0.0 +878 34 model.embedding_dim 2.0 +878 34 model.scoring_fct_norm 1.0 +878 34 regularizer.weight 0.026112205553119168 +878 34 optimizer.lr 0.0014147636321244783 +878 34 negative_sampler.num_negs_per_pos 38.0 +878 34 training.batch_size 1.0 +878 35 model.embedding_dim 0.0 +878 35 model.scoring_fct_norm 2.0 +878 35 regularizer.weight 0.046703223718676266 +878 35 optimizer.lr 0.0021672504937613394 +878 35 negative_sampler.num_negs_per_pos 51.0 +878 35 training.batch_size 1.0 +878 36 model.embedding_dim 2.0 +878 36 model.scoring_fct_norm 2.0 +878 36 regularizer.weight 0.14134965676094788 +878 36 optimizer.lr 0.028542541107371425 +878 36 negative_sampler.num_negs_per_pos 16.0 +878 36 training.batch_size 1.0 +878 37 model.embedding_dim 1.0 +878 37 model.scoring_fct_norm 2.0 +878 37 regularizer.weight 0.024169363879142855 +878 37 optimizer.lr 0.02408971520511181 +878 37 negative_sampler.num_negs_per_pos 83.0 +878 37 training.batch_size 2.0 +878 38 model.embedding_dim 0.0 +878 38 model.scoring_fct_norm 1.0 +878 38 regularizer.weight 0.013783974873474951 +878 38 optimizer.lr 0.014566027737550852 +878 38 negative_sampler.num_negs_per_pos 60.0 +878 38 training.batch_size 1.0 +878 39 model.embedding_dim 2.0 +878 39 model.scoring_fct_norm 2.0 +878 39 regularizer.weight 0.024235276974622855 +878 39 optimizer.lr 0.001347799352095259 +878 39 negative_sampler.num_negs_per_pos 20.0 +878 39 training.batch_size 0.0 +878 40 model.embedding_dim 0.0 +878 40 model.scoring_fct_norm 1.0 +878 40 regularizer.weight 0.06222478770014963 +878 40 optimizer.lr 0.06972646466731178 +878 40 negative_sampler.num_negs_per_pos 61.0 +878 40 training.batch_size 1.0 +878 41 model.embedding_dim 0.0 +878 41 model.scoring_fct_norm 1.0 +878 41 regularizer.weight 0.0266203415832466 +878 41 optimizer.lr 0.0010291487888832677 +878 41 negative_sampler.num_negs_per_pos 34.0 +878 41 training.batch_size 1.0 +878 42 model.embedding_dim 2.0 +878 42 model.scoring_fct_norm 1.0 +878 42 regularizer.weight 0.12209190892142202 +878 42 optimizer.lr 0.09955474248954538 +878 42 negative_sampler.num_negs_per_pos 97.0 +878 42 training.batch_size 0.0 +878 43 model.embedding_dim 0.0 +878 43 model.scoring_fct_norm 1.0 +878 43 regularizer.weight 0.05753067489586402 +878 43 optimizer.lr 0.0010938664992651951 +878 43 negative_sampler.num_negs_per_pos 33.0 +878 43 training.batch_size 0.0 +878 44 model.embedding_dim 0.0 +878 44 model.scoring_fct_norm 1.0 +878 44 regularizer.weight 0.08899849851960333 +878 44 optimizer.lr 0.012108515282117075 +878 44 negative_sampler.num_negs_per_pos 94.0 +878 44 training.batch_size 2.0 +878 45 model.embedding_dim 1.0 +878 45 model.scoring_fct_norm 1.0 +878 45 regularizer.weight 0.010813072602360784 +878 45 optimizer.lr 0.014067385655280852 +878 45 negative_sampler.num_negs_per_pos 24.0 +878 45 training.batch_size 0.0 +878 46 model.embedding_dim 1.0 +878 46 model.scoring_fct_norm 2.0 +878 46 regularizer.weight 0.14583264871772844 +878 46 optimizer.lr 0.015302298197029869 +878 46 negative_sampler.num_negs_per_pos 91.0 +878 46 training.batch_size 2.0 +878 47 model.embedding_dim 1.0 +878 47 model.scoring_fct_norm 1.0 +878 47 regularizer.weight 0.24642761323635226 +878 47 optimizer.lr 0.02645796183757504 +878 47 negative_sampler.num_negs_per_pos 75.0 +878 47 training.batch_size 2.0 +878 48 model.embedding_dim 1.0 +878 48 model.scoring_fct_norm 1.0 +878 48 regularizer.weight 0.04298262509167951 +878 48 optimizer.lr 0.028303752488764 +878 48 negative_sampler.num_negs_per_pos 82.0 +878 48 training.batch_size 2.0 +878 49 model.embedding_dim 1.0 +878 49 model.scoring_fct_norm 2.0 +878 49 regularizer.weight 0.11870987512888703 +878 49 optimizer.lr 0.08987814765754432 +878 49 negative_sampler.num_negs_per_pos 38.0 +878 49 training.batch_size 1.0 +878 50 model.embedding_dim 2.0 +878 50 model.scoring_fct_norm 2.0 +878 50 regularizer.weight 0.11943113006898201 +878 50 optimizer.lr 0.03383091020195139 +878 50 negative_sampler.num_negs_per_pos 95.0 +878 50 training.batch_size 1.0 +878 51 model.embedding_dim 1.0 +878 51 model.scoring_fct_norm 1.0 +878 51 regularizer.weight 0.03252119597200099 +878 51 optimizer.lr 0.07013861901908611 +878 51 negative_sampler.num_negs_per_pos 24.0 +878 51 training.batch_size 1.0 +878 52 model.embedding_dim 2.0 +878 52 model.scoring_fct_norm 1.0 +878 52 regularizer.weight 0.029328140231676035 +878 52 optimizer.lr 0.006045893278773873 +878 52 negative_sampler.num_negs_per_pos 91.0 +878 52 training.batch_size 1.0 +878 53 model.embedding_dim 2.0 +878 53 model.scoring_fct_norm 1.0 +878 53 regularizer.weight 0.13334602930332093 +878 53 optimizer.lr 0.0038455549134000693 +878 53 negative_sampler.num_negs_per_pos 45.0 +878 53 training.batch_size 2.0 +878 54 model.embedding_dim 1.0 +878 54 model.scoring_fct_norm 2.0 +878 54 regularizer.weight 0.16259758181118777 +878 54 optimizer.lr 0.043496128505726865 +878 54 negative_sampler.num_negs_per_pos 14.0 +878 54 training.batch_size 2.0 +878 55 model.embedding_dim 2.0 +878 55 model.scoring_fct_norm 1.0 +878 55 regularizer.weight 0.052314930946893094 +878 55 optimizer.lr 0.003302639237103943 +878 55 negative_sampler.num_negs_per_pos 26.0 +878 55 training.batch_size 1.0 +878 56 model.embedding_dim 1.0 +878 56 model.scoring_fct_norm 2.0 +878 56 regularizer.weight 0.23279510371875275 +878 56 optimizer.lr 0.06092929109576229 +878 56 negative_sampler.num_negs_per_pos 33.0 +878 56 training.batch_size 1.0 +878 57 model.embedding_dim 1.0 +878 57 model.scoring_fct_norm 2.0 +878 57 regularizer.weight 0.032347214392239275 +878 57 optimizer.lr 0.06280809340889966 +878 57 negative_sampler.num_negs_per_pos 92.0 +878 57 training.batch_size 0.0 +878 58 model.embedding_dim 2.0 +878 58 model.scoring_fct_norm 1.0 +878 58 regularizer.weight 0.1869207923282755 +878 58 optimizer.lr 0.06994434410452285 +878 58 negative_sampler.num_negs_per_pos 73.0 +878 58 training.batch_size 2.0 +878 59 model.embedding_dim 0.0 +878 59 model.scoring_fct_norm 2.0 +878 59 regularizer.weight 0.02243354799278246 +878 59 optimizer.lr 0.09741629136992182 +878 59 negative_sampler.num_negs_per_pos 77.0 +878 59 training.batch_size 1.0 +878 60 model.embedding_dim 1.0 +878 60 model.scoring_fct_norm 1.0 +878 60 regularizer.weight 0.021317246194564737 +878 60 optimizer.lr 0.0025386900217945225 +878 60 negative_sampler.num_negs_per_pos 35.0 +878 60 training.batch_size 2.0 +878 61 model.embedding_dim 1.0 +878 61 model.scoring_fct_norm 2.0 +878 61 regularizer.weight 0.23239372465693725 +878 61 optimizer.lr 0.01954254923227688 +878 61 negative_sampler.num_negs_per_pos 91.0 +878 61 training.batch_size 1.0 +878 62 model.embedding_dim 2.0 +878 62 model.scoring_fct_norm 1.0 +878 62 regularizer.weight 0.04272438560683292 +878 62 optimizer.lr 0.015397838902757914 +878 62 negative_sampler.num_negs_per_pos 6.0 +878 62 training.batch_size 1.0 +878 63 model.embedding_dim 0.0 +878 63 model.scoring_fct_norm 1.0 +878 63 regularizer.weight 0.06513016527903695 +878 63 optimizer.lr 0.002294933161008375 +878 63 negative_sampler.num_negs_per_pos 24.0 +878 63 training.batch_size 2.0 +878 64 model.embedding_dim 2.0 +878 64 model.scoring_fct_norm 2.0 +878 64 regularizer.weight 0.16565506323604626 +878 64 optimizer.lr 0.001740834836081125 +878 64 negative_sampler.num_negs_per_pos 96.0 +878 64 training.batch_size 2.0 +878 65 model.embedding_dim 2.0 +878 65 model.scoring_fct_norm 2.0 +878 65 regularizer.weight 0.0856629002765863 +878 65 optimizer.lr 0.0012208522998576662 +878 65 negative_sampler.num_negs_per_pos 53.0 +878 65 training.batch_size 0.0 +878 66 model.embedding_dim 1.0 +878 66 model.scoring_fct_norm 2.0 +878 66 regularizer.weight 0.02975905703639381 +878 66 optimizer.lr 0.0027030579393554867 +878 66 negative_sampler.num_negs_per_pos 78.0 +878 66 training.batch_size 2.0 +878 67 model.embedding_dim 0.0 +878 67 model.scoring_fct_norm 1.0 +878 67 regularizer.weight 0.17487730360942394 +878 67 optimizer.lr 0.0025393494262229864 +878 67 negative_sampler.num_negs_per_pos 65.0 +878 67 training.batch_size 2.0 +878 68 model.embedding_dim 1.0 +878 68 model.scoring_fct_norm 1.0 +878 68 regularizer.weight 0.01406861306673558 +878 68 optimizer.lr 0.0555638796150746 +878 68 negative_sampler.num_negs_per_pos 69.0 +878 68 training.batch_size 2.0 +878 69 model.embedding_dim 0.0 +878 69 model.scoring_fct_norm 1.0 +878 69 regularizer.weight 0.055186414655935295 +878 69 optimizer.lr 0.018323711882996026 +878 69 negative_sampler.num_negs_per_pos 18.0 +878 69 training.batch_size 0.0 +878 70 model.embedding_dim 2.0 +878 70 model.scoring_fct_norm 2.0 +878 70 regularizer.weight 0.29759088075690804 +878 70 optimizer.lr 0.0011998963143212819 +878 70 negative_sampler.num_negs_per_pos 44.0 +878 70 training.batch_size 2.0 +878 71 model.embedding_dim 2.0 +878 71 model.scoring_fct_norm 2.0 +878 71 regularizer.weight 0.014656734702578417 +878 71 optimizer.lr 0.0010291442908638953 +878 71 negative_sampler.num_negs_per_pos 61.0 +878 71 training.batch_size 1.0 +878 72 model.embedding_dim 1.0 +878 72 model.scoring_fct_norm 2.0 +878 72 regularizer.weight 0.010814568359607693 +878 72 optimizer.lr 0.028033034570720233 +878 72 negative_sampler.num_negs_per_pos 90.0 +878 72 training.batch_size 1.0 +878 73 model.embedding_dim 2.0 +878 73 model.scoring_fct_norm 1.0 +878 73 regularizer.weight 0.0494441804197282 +878 73 optimizer.lr 0.03172795088408137 +878 73 negative_sampler.num_negs_per_pos 30.0 +878 73 training.batch_size 0.0 +878 74 model.embedding_dim 2.0 +878 74 model.scoring_fct_norm 2.0 +878 74 regularizer.weight 0.04558843863998434 +878 74 optimizer.lr 0.04155388222835131 +878 74 negative_sampler.num_negs_per_pos 4.0 +878 74 training.batch_size 0.0 +878 75 model.embedding_dim 0.0 +878 75 model.scoring_fct_norm 2.0 +878 75 regularizer.weight 0.014002791354124666 +878 75 optimizer.lr 0.07850990702525931 +878 75 negative_sampler.num_negs_per_pos 12.0 +878 75 training.batch_size 2.0 +878 76 model.embedding_dim 2.0 +878 76 model.scoring_fct_norm 2.0 +878 76 regularizer.weight 0.012138559227187587 +878 76 optimizer.lr 0.0016103443930860814 +878 76 negative_sampler.num_negs_per_pos 4.0 +878 76 training.batch_size 2.0 +878 77 model.embedding_dim 2.0 +878 77 model.scoring_fct_norm 2.0 +878 77 regularizer.weight 0.05044829915799134 +878 77 optimizer.lr 0.0035758237377698362 +878 77 negative_sampler.num_negs_per_pos 35.0 +878 77 training.batch_size 2.0 +878 78 model.embedding_dim 1.0 +878 78 model.scoring_fct_norm 1.0 +878 78 regularizer.weight 0.14741467924093848 +878 78 optimizer.lr 0.0039844453559639385 +878 78 negative_sampler.num_negs_per_pos 25.0 +878 78 training.batch_size 1.0 +878 79 model.embedding_dim 2.0 +878 79 model.scoring_fct_norm 1.0 +878 79 regularizer.weight 0.014664259024663069 +878 79 optimizer.lr 0.06235347281137393 +878 79 negative_sampler.num_negs_per_pos 17.0 +878 79 training.batch_size 0.0 +878 80 model.embedding_dim 0.0 +878 80 model.scoring_fct_norm 2.0 +878 80 regularizer.weight 0.0408412085524768 +878 80 optimizer.lr 0.004840166273899737 +878 80 negative_sampler.num_negs_per_pos 69.0 +878 80 training.batch_size 1.0 +878 81 model.embedding_dim 1.0 +878 81 model.scoring_fct_norm 2.0 +878 81 regularizer.weight 0.02926984682433089 +878 81 optimizer.lr 0.040573089703939115 +878 81 negative_sampler.num_negs_per_pos 34.0 +878 81 training.batch_size 1.0 +878 82 model.embedding_dim 0.0 +878 82 model.scoring_fct_norm 1.0 +878 82 regularizer.weight 0.014920219896874667 +878 82 optimizer.lr 0.009005662908251527 +878 82 negative_sampler.num_negs_per_pos 8.0 +878 82 training.batch_size 1.0 +878 83 model.embedding_dim 0.0 +878 83 model.scoring_fct_norm 1.0 +878 83 regularizer.weight 0.10319263513501419 +878 83 optimizer.lr 0.01272399455103235 +878 83 negative_sampler.num_negs_per_pos 16.0 +878 83 training.batch_size 2.0 +878 84 model.embedding_dim 2.0 +878 84 model.scoring_fct_norm 2.0 +878 84 regularizer.weight 0.025538772162846075 +878 84 optimizer.lr 0.002399372412880842 +878 84 negative_sampler.num_negs_per_pos 37.0 +878 84 training.batch_size 0.0 +878 85 model.embedding_dim 2.0 +878 85 model.scoring_fct_norm 2.0 +878 85 regularizer.weight 0.015206301334263862 +878 85 optimizer.lr 0.09644143977422574 +878 85 negative_sampler.num_negs_per_pos 99.0 +878 85 training.batch_size 0.0 +878 86 model.embedding_dim 1.0 +878 86 model.scoring_fct_norm 1.0 +878 86 regularizer.weight 0.2870184741037749 +878 86 optimizer.lr 0.0847195123066239 +878 86 negative_sampler.num_negs_per_pos 68.0 +878 86 training.batch_size 1.0 +878 87 model.embedding_dim 2.0 +878 87 model.scoring_fct_norm 2.0 +878 87 regularizer.weight 0.13098544432739956 +878 87 optimizer.lr 0.006990534097541677 +878 87 negative_sampler.num_negs_per_pos 10.0 +878 87 training.batch_size 0.0 +878 88 model.embedding_dim 0.0 +878 88 model.scoring_fct_norm 2.0 +878 88 regularizer.weight 0.06199886055331584 +878 88 optimizer.lr 0.0013364520369563227 +878 88 negative_sampler.num_negs_per_pos 75.0 +878 88 training.batch_size 0.0 +878 89 model.embedding_dim 2.0 +878 89 model.scoring_fct_norm 2.0 +878 89 regularizer.weight 0.2699187176742102 +878 89 optimizer.lr 0.00114000750611511 +878 89 negative_sampler.num_negs_per_pos 15.0 +878 89 training.batch_size 2.0 +878 90 model.embedding_dim 0.0 +878 90 model.scoring_fct_norm 1.0 +878 90 regularizer.weight 0.17948369397729003 +878 90 optimizer.lr 0.03726110364894623 +878 90 negative_sampler.num_negs_per_pos 47.0 +878 90 training.batch_size 1.0 +878 91 model.embedding_dim 0.0 +878 91 model.scoring_fct_norm 2.0 +878 91 regularizer.weight 0.010426511072056567 +878 91 optimizer.lr 0.04370976588456389 +878 91 negative_sampler.num_negs_per_pos 37.0 +878 91 training.batch_size 2.0 +878 92 model.embedding_dim 1.0 +878 92 model.scoring_fct_norm 1.0 +878 92 regularizer.weight 0.18710042497010945 +878 92 optimizer.lr 0.032238701148441996 +878 92 negative_sampler.num_negs_per_pos 2.0 +878 92 training.batch_size 0.0 +878 93 model.embedding_dim 2.0 +878 93 model.scoring_fct_norm 1.0 +878 93 regularizer.weight 0.033380550772916434 +878 93 optimizer.lr 0.001471755135309602 +878 93 negative_sampler.num_negs_per_pos 99.0 +878 93 training.batch_size 1.0 +878 94 model.embedding_dim 1.0 +878 94 model.scoring_fct_norm 2.0 +878 94 regularizer.weight 0.052663988858419 +878 94 optimizer.lr 0.0021050557435649715 +878 94 negative_sampler.num_negs_per_pos 7.0 +878 94 training.batch_size 0.0 +878 95 model.embedding_dim 0.0 +878 95 model.scoring_fct_norm 2.0 +878 95 regularizer.weight 0.20437658109456477 +878 95 optimizer.lr 0.01871289837425086 +878 95 negative_sampler.num_negs_per_pos 50.0 +878 95 training.batch_size 2.0 +878 96 model.embedding_dim 0.0 +878 96 model.scoring_fct_norm 2.0 +878 96 regularizer.weight 0.019547411038683172 +878 96 optimizer.lr 0.001342458377300999 +878 96 negative_sampler.num_negs_per_pos 88.0 +878 96 training.batch_size 2.0 +878 97 model.embedding_dim 0.0 +878 97 model.scoring_fct_norm 1.0 +878 97 regularizer.weight 0.058071268988942715 +878 97 optimizer.lr 0.0019129746092901127 +878 97 negative_sampler.num_negs_per_pos 0.0 +878 97 training.batch_size 1.0 +878 98 model.embedding_dim 0.0 +878 98 model.scoring_fct_norm 1.0 +878 98 regularizer.weight 0.21704595387818554 +878 98 optimizer.lr 0.05595177440977425 +878 98 negative_sampler.num_negs_per_pos 65.0 +878 98 training.batch_size 0.0 +878 99 model.embedding_dim 0.0 +878 99 model.scoring_fct_norm 1.0 +878 99 regularizer.weight 0.04674853843756812 +878 99 optimizer.lr 0.016333755489083585 +878 99 negative_sampler.num_negs_per_pos 17.0 +878 99 training.batch_size 1.0 +878 100 model.embedding_dim 1.0 +878 100 model.scoring_fct_norm 2.0 +878 100 regularizer.weight 0.03167992066268923 +878 100 optimizer.lr 0.009724558257696855 +878 100 negative_sampler.num_negs_per_pos 64.0 +878 100 training.batch_size 2.0 +878 1 dataset """kinships""" +878 1 model """transh""" +878 1 loss """softplus""" +878 1 regularizer """transh""" +878 1 optimizer """adam""" +878 1 training_loop """owa""" +878 1 negative_sampler """basic""" +878 1 evaluator """rankbased""" +878 2 dataset """kinships""" +878 2 model """transh""" +878 2 loss """softplus""" +878 2 regularizer """transh""" +878 2 optimizer """adam""" +878 2 training_loop """owa""" +878 2 negative_sampler """basic""" +878 2 evaluator """rankbased""" +878 3 dataset """kinships""" +878 3 model """transh""" +878 3 loss """softplus""" +878 3 regularizer """transh""" +878 3 optimizer """adam""" +878 3 training_loop """owa""" +878 3 negative_sampler """basic""" +878 3 evaluator """rankbased""" +878 4 dataset """kinships""" +878 4 model """transh""" +878 4 loss """softplus""" +878 4 regularizer """transh""" +878 4 optimizer """adam""" +878 4 training_loop """owa""" +878 4 negative_sampler """basic""" +878 4 evaluator """rankbased""" +878 5 dataset """kinships""" +878 5 model """transh""" +878 5 loss """softplus""" +878 5 regularizer """transh""" +878 5 optimizer """adam""" +878 5 training_loop """owa""" +878 5 negative_sampler """basic""" +878 5 evaluator """rankbased""" +878 6 dataset """kinships""" +878 6 model """transh""" +878 6 loss """softplus""" +878 6 regularizer """transh""" +878 6 optimizer """adam""" +878 6 training_loop """owa""" +878 6 negative_sampler """basic""" +878 6 evaluator """rankbased""" +878 7 dataset """kinships""" +878 7 model """transh""" +878 7 loss """softplus""" +878 7 regularizer """transh""" +878 7 optimizer """adam""" +878 7 training_loop """owa""" +878 7 negative_sampler """basic""" +878 7 evaluator """rankbased""" +878 8 dataset """kinships""" +878 8 model """transh""" +878 8 loss """softplus""" +878 8 regularizer """transh""" +878 8 optimizer """adam""" +878 8 training_loop """owa""" +878 8 negative_sampler """basic""" +878 8 evaluator """rankbased""" +878 9 dataset """kinships""" +878 9 model """transh""" +878 9 loss """softplus""" +878 9 regularizer """transh""" +878 9 optimizer """adam""" +878 9 training_loop """owa""" +878 9 negative_sampler """basic""" +878 9 evaluator """rankbased""" +878 10 dataset """kinships""" +878 10 model """transh""" +878 10 loss """softplus""" +878 10 regularizer """transh""" +878 10 optimizer """adam""" +878 10 training_loop """owa""" +878 10 negative_sampler """basic""" +878 10 evaluator """rankbased""" +878 11 dataset """kinships""" +878 11 model """transh""" +878 11 loss """softplus""" +878 11 regularizer """transh""" +878 11 optimizer """adam""" +878 11 training_loop """owa""" +878 11 negative_sampler """basic""" +878 11 evaluator """rankbased""" +878 12 dataset """kinships""" +878 12 model """transh""" +878 12 loss """softplus""" +878 12 regularizer """transh""" +878 12 optimizer """adam""" +878 12 training_loop """owa""" +878 12 negative_sampler """basic""" +878 12 evaluator """rankbased""" +878 13 dataset """kinships""" +878 13 model """transh""" +878 13 loss """softplus""" +878 13 regularizer """transh""" +878 13 optimizer """adam""" +878 13 training_loop """owa""" +878 13 negative_sampler """basic""" +878 13 evaluator """rankbased""" +878 14 dataset """kinships""" +878 14 model """transh""" +878 14 loss """softplus""" +878 14 regularizer """transh""" +878 14 optimizer """adam""" +878 14 training_loop """owa""" +878 14 negative_sampler """basic""" +878 14 evaluator """rankbased""" +878 15 dataset """kinships""" +878 15 model """transh""" +878 15 loss """softplus""" +878 15 regularizer """transh""" +878 15 optimizer """adam""" +878 15 training_loop """owa""" +878 15 negative_sampler """basic""" +878 15 evaluator """rankbased""" +878 16 dataset """kinships""" +878 16 model """transh""" +878 16 loss """softplus""" +878 16 regularizer """transh""" +878 16 optimizer """adam""" +878 16 training_loop """owa""" +878 16 negative_sampler """basic""" +878 16 evaluator """rankbased""" +878 17 dataset """kinships""" +878 17 model """transh""" +878 17 loss """softplus""" +878 17 regularizer """transh""" +878 17 optimizer """adam""" +878 17 training_loop """owa""" +878 17 negative_sampler """basic""" +878 17 evaluator """rankbased""" +878 18 dataset """kinships""" +878 18 model """transh""" +878 18 loss """softplus""" +878 18 regularizer """transh""" +878 18 optimizer """adam""" +878 18 training_loop """owa""" +878 18 negative_sampler """basic""" +878 18 evaluator """rankbased""" +878 19 dataset """kinships""" +878 19 model """transh""" +878 19 loss """softplus""" +878 19 regularizer """transh""" +878 19 optimizer """adam""" +878 19 training_loop """owa""" +878 19 negative_sampler """basic""" +878 19 evaluator """rankbased""" +878 20 dataset """kinships""" +878 20 model """transh""" +878 20 loss """softplus""" +878 20 regularizer """transh""" +878 20 optimizer """adam""" +878 20 training_loop """owa""" +878 20 negative_sampler """basic""" +878 20 evaluator """rankbased""" +878 21 dataset """kinships""" +878 21 model """transh""" +878 21 loss """softplus""" +878 21 regularizer """transh""" +878 21 optimizer """adam""" +878 21 training_loop """owa""" +878 21 negative_sampler """basic""" +878 21 evaluator """rankbased""" +878 22 dataset """kinships""" +878 22 model """transh""" +878 22 loss """softplus""" +878 22 regularizer """transh""" +878 22 optimizer """adam""" +878 22 training_loop """owa""" +878 22 negative_sampler """basic""" +878 22 evaluator """rankbased""" +878 23 dataset """kinships""" +878 23 model """transh""" +878 23 loss """softplus""" +878 23 regularizer """transh""" +878 23 optimizer """adam""" +878 23 training_loop """owa""" +878 23 negative_sampler """basic""" +878 23 evaluator """rankbased""" +878 24 dataset """kinships""" +878 24 model """transh""" +878 24 loss """softplus""" +878 24 regularizer """transh""" +878 24 optimizer """adam""" +878 24 training_loop """owa""" +878 24 negative_sampler """basic""" +878 24 evaluator """rankbased""" +878 25 dataset """kinships""" +878 25 model """transh""" +878 25 loss """softplus""" +878 25 regularizer """transh""" +878 25 optimizer """adam""" +878 25 training_loop """owa""" +878 25 negative_sampler """basic""" +878 25 evaluator """rankbased""" +878 26 dataset """kinships""" +878 26 model """transh""" +878 26 loss """softplus""" +878 26 regularizer """transh""" +878 26 optimizer """adam""" +878 26 training_loop """owa""" +878 26 negative_sampler """basic""" +878 26 evaluator """rankbased""" +878 27 dataset """kinships""" +878 27 model """transh""" +878 27 loss """softplus""" +878 27 regularizer """transh""" +878 27 optimizer """adam""" +878 27 training_loop """owa""" +878 27 negative_sampler """basic""" +878 27 evaluator """rankbased""" +878 28 dataset """kinships""" +878 28 model """transh""" +878 28 loss """softplus""" +878 28 regularizer """transh""" +878 28 optimizer """adam""" +878 28 training_loop """owa""" +878 28 negative_sampler """basic""" +878 28 evaluator """rankbased""" +878 29 dataset """kinships""" +878 29 model """transh""" +878 29 loss """softplus""" +878 29 regularizer """transh""" +878 29 optimizer """adam""" +878 29 training_loop """owa""" +878 29 negative_sampler """basic""" +878 29 evaluator """rankbased""" +878 30 dataset """kinships""" +878 30 model """transh""" +878 30 loss """softplus""" +878 30 regularizer """transh""" +878 30 optimizer """adam""" +878 30 training_loop """owa""" +878 30 negative_sampler """basic""" +878 30 evaluator """rankbased""" +878 31 dataset """kinships""" +878 31 model """transh""" +878 31 loss """softplus""" +878 31 regularizer """transh""" +878 31 optimizer """adam""" +878 31 training_loop """owa""" +878 31 negative_sampler """basic""" +878 31 evaluator """rankbased""" +878 32 dataset """kinships""" +878 32 model """transh""" +878 32 loss """softplus""" +878 32 regularizer """transh""" +878 32 optimizer """adam""" +878 32 training_loop """owa""" +878 32 negative_sampler """basic""" +878 32 evaluator """rankbased""" +878 33 dataset """kinships""" +878 33 model """transh""" +878 33 loss """softplus""" +878 33 regularizer """transh""" +878 33 optimizer """adam""" +878 33 training_loop """owa""" +878 33 negative_sampler """basic""" +878 33 evaluator """rankbased""" +878 34 dataset """kinships""" +878 34 model """transh""" +878 34 loss """softplus""" +878 34 regularizer """transh""" +878 34 optimizer """adam""" +878 34 training_loop """owa""" +878 34 negative_sampler """basic""" +878 34 evaluator """rankbased""" +878 35 dataset """kinships""" +878 35 model """transh""" +878 35 loss """softplus""" +878 35 regularizer """transh""" +878 35 optimizer """adam""" +878 35 training_loop """owa""" +878 35 negative_sampler """basic""" +878 35 evaluator """rankbased""" +878 36 dataset """kinships""" +878 36 model """transh""" +878 36 loss """softplus""" +878 36 regularizer """transh""" +878 36 optimizer """adam""" +878 36 training_loop """owa""" +878 36 negative_sampler """basic""" +878 36 evaluator """rankbased""" +878 37 dataset """kinships""" +878 37 model """transh""" +878 37 loss """softplus""" +878 37 regularizer """transh""" +878 37 optimizer """adam""" +878 37 training_loop """owa""" +878 37 negative_sampler """basic""" +878 37 evaluator """rankbased""" +878 38 dataset """kinships""" +878 38 model """transh""" +878 38 loss """softplus""" +878 38 regularizer """transh""" +878 38 optimizer """adam""" +878 38 training_loop """owa""" +878 38 negative_sampler """basic""" +878 38 evaluator """rankbased""" +878 39 dataset """kinships""" +878 39 model """transh""" +878 39 loss """softplus""" +878 39 regularizer """transh""" +878 39 optimizer """adam""" +878 39 training_loop """owa""" +878 39 negative_sampler """basic""" +878 39 evaluator """rankbased""" +878 40 dataset """kinships""" +878 40 model """transh""" +878 40 loss """softplus""" +878 40 regularizer """transh""" +878 40 optimizer """adam""" +878 40 training_loop """owa""" +878 40 negative_sampler """basic""" +878 40 evaluator """rankbased""" +878 41 dataset """kinships""" +878 41 model """transh""" +878 41 loss """softplus""" +878 41 regularizer """transh""" +878 41 optimizer """adam""" +878 41 training_loop """owa""" +878 41 negative_sampler """basic""" +878 41 evaluator """rankbased""" +878 42 dataset """kinships""" +878 42 model """transh""" +878 42 loss """softplus""" +878 42 regularizer """transh""" +878 42 optimizer """adam""" +878 42 training_loop """owa""" +878 42 negative_sampler """basic""" +878 42 evaluator """rankbased""" +878 43 dataset """kinships""" +878 43 model """transh""" +878 43 loss """softplus""" +878 43 regularizer """transh""" +878 43 optimizer """adam""" +878 43 training_loop """owa""" +878 43 negative_sampler """basic""" +878 43 evaluator """rankbased""" +878 44 dataset """kinships""" +878 44 model """transh""" +878 44 loss """softplus""" +878 44 regularizer """transh""" +878 44 optimizer """adam""" +878 44 training_loop """owa""" +878 44 negative_sampler """basic""" +878 44 evaluator """rankbased""" +878 45 dataset """kinships""" +878 45 model """transh""" +878 45 loss """softplus""" +878 45 regularizer """transh""" +878 45 optimizer """adam""" +878 45 training_loop """owa""" +878 45 negative_sampler """basic""" +878 45 evaluator """rankbased""" +878 46 dataset """kinships""" +878 46 model """transh""" +878 46 loss """softplus""" +878 46 regularizer """transh""" +878 46 optimizer """adam""" +878 46 training_loop """owa""" +878 46 negative_sampler """basic""" +878 46 evaluator """rankbased""" +878 47 dataset """kinships""" +878 47 model """transh""" +878 47 loss """softplus""" +878 47 regularizer """transh""" +878 47 optimizer """adam""" +878 47 training_loop """owa""" +878 47 negative_sampler """basic""" +878 47 evaluator """rankbased""" +878 48 dataset """kinships""" +878 48 model """transh""" +878 48 loss """softplus""" +878 48 regularizer """transh""" +878 48 optimizer """adam""" +878 48 training_loop """owa""" +878 48 negative_sampler """basic""" +878 48 evaluator """rankbased""" +878 49 dataset """kinships""" +878 49 model """transh""" +878 49 loss """softplus""" +878 49 regularizer """transh""" +878 49 optimizer """adam""" +878 49 training_loop """owa""" +878 49 negative_sampler """basic""" +878 49 evaluator """rankbased""" +878 50 dataset """kinships""" +878 50 model """transh""" +878 50 loss """softplus""" +878 50 regularizer """transh""" +878 50 optimizer """adam""" +878 50 training_loop """owa""" +878 50 negative_sampler """basic""" +878 50 evaluator """rankbased""" +878 51 dataset """kinships""" +878 51 model """transh""" +878 51 loss """softplus""" +878 51 regularizer """transh""" +878 51 optimizer """adam""" +878 51 training_loop """owa""" +878 51 negative_sampler """basic""" +878 51 evaluator """rankbased""" +878 52 dataset """kinships""" +878 52 model """transh""" +878 52 loss """softplus""" +878 52 regularizer """transh""" +878 52 optimizer """adam""" +878 52 training_loop """owa""" +878 52 negative_sampler """basic""" +878 52 evaluator """rankbased""" +878 53 dataset """kinships""" +878 53 model """transh""" +878 53 loss """softplus""" +878 53 regularizer """transh""" +878 53 optimizer """adam""" +878 53 training_loop """owa""" +878 53 negative_sampler """basic""" +878 53 evaluator """rankbased""" +878 54 dataset """kinships""" +878 54 model """transh""" +878 54 loss """softplus""" +878 54 regularizer """transh""" +878 54 optimizer """adam""" +878 54 training_loop """owa""" +878 54 negative_sampler """basic""" +878 54 evaluator """rankbased""" +878 55 dataset """kinships""" +878 55 model """transh""" +878 55 loss """softplus""" +878 55 regularizer """transh""" +878 55 optimizer """adam""" +878 55 training_loop """owa""" +878 55 negative_sampler """basic""" +878 55 evaluator """rankbased""" +878 56 dataset """kinships""" +878 56 model """transh""" +878 56 loss """softplus""" +878 56 regularizer """transh""" +878 56 optimizer """adam""" +878 56 training_loop """owa""" +878 56 negative_sampler """basic""" +878 56 evaluator """rankbased""" +878 57 dataset """kinships""" +878 57 model """transh""" +878 57 loss """softplus""" +878 57 regularizer """transh""" +878 57 optimizer """adam""" +878 57 training_loop """owa""" +878 57 negative_sampler """basic""" +878 57 evaluator """rankbased""" +878 58 dataset """kinships""" +878 58 model """transh""" +878 58 loss """softplus""" +878 58 regularizer """transh""" +878 58 optimizer """adam""" +878 58 training_loop """owa""" +878 58 negative_sampler """basic""" +878 58 evaluator """rankbased""" +878 59 dataset """kinships""" +878 59 model """transh""" +878 59 loss """softplus""" +878 59 regularizer """transh""" +878 59 optimizer """adam""" +878 59 training_loop """owa""" +878 59 negative_sampler """basic""" +878 59 evaluator """rankbased""" +878 60 dataset """kinships""" +878 60 model """transh""" +878 60 loss """softplus""" +878 60 regularizer """transh""" +878 60 optimizer """adam""" +878 60 training_loop """owa""" +878 60 negative_sampler """basic""" +878 60 evaluator """rankbased""" +878 61 dataset """kinships""" +878 61 model """transh""" +878 61 loss """softplus""" +878 61 regularizer """transh""" +878 61 optimizer """adam""" +878 61 training_loop """owa""" +878 61 negative_sampler """basic""" +878 61 evaluator """rankbased""" +878 62 dataset """kinships""" +878 62 model """transh""" +878 62 loss """softplus""" +878 62 regularizer """transh""" +878 62 optimizer """adam""" +878 62 training_loop """owa""" +878 62 negative_sampler """basic""" +878 62 evaluator """rankbased""" +878 63 dataset """kinships""" +878 63 model """transh""" +878 63 loss """softplus""" +878 63 regularizer """transh""" +878 63 optimizer """adam""" +878 63 training_loop """owa""" +878 63 negative_sampler """basic""" +878 63 evaluator """rankbased""" +878 64 dataset """kinships""" +878 64 model """transh""" +878 64 loss """softplus""" +878 64 regularizer """transh""" +878 64 optimizer """adam""" +878 64 training_loop """owa""" +878 64 negative_sampler """basic""" +878 64 evaluator """rankbased""" +878 65 dataset """kinships""" +878 65 model """transh""" +878 65 loss """softplus""" +878 65 regularizer """transh""" +878 65 optimizer """adam""" +878 65 training_loop """owa""" +878 65 negative_sampler """basic""" +878 65 evaluator """rankbased""" +878 66 dataset """kinships""" +878 66 model """transh""" +878 66 loss """softplus""" +878 66 regularizer """transh""" +878 66 optimizer """adam""" +878 66 training_loop """owa""" +878 66 negative_sampler """basic""" +878 66 evaluator """rankbased""" +878 67 dataset """kinships""" +878 67 model """transh""" +878 67 loss """softplus""" +878 67 regularizer """transh""" +878 67 optimizer """adam""" +878 67 training_loop """owa""" +878 67 negative_sampler """basic""" +878 67 evaluator """rankbased""" +878 68 dataset """kinships""" +878 68 model """transh""" +878 68 loss """softplus""" +878 68 regularizer """transh""" +878 68 optimizer """adam""" +878 68 training_loop """owa""" +878 68 negative_sampler """basic""" +878 68 evaluator """rankbased""" +878 69 dataset """kinships""" +878 69 model """transh""" +878 69 loss """softplus""" +878 69 regularizer """transh""" +878 69 optimizer """adam""" +878 69 training_loop """owa""" +878 69 negative_sampler """basic""" +878 69 evaluator """rankbased""" +878 70 dataset """kinships""" +878 70 model """transh""" +878 70 loss """softplus""" +878 70 regularizer """transh""" +878 70 optimizer """adam""" +878 70 training_loop """owa""" +878 70 negative_sampler """basic""" +878 70 evaluator """rankbased""" +878 71 dataset """kinships""" +878 71 model """transh""" +878 71 loss """softplus""" +878 71 regularizer """transh""" +878 71 optimizer """adam""" +878 71 training_loop """owa""" +878 71 negative_sampler """basic""" +878 71 evaluator """rankbased""" +878 72 dataset """kinships""" +878 72 model """transh""" +878 72 loss """softplus""" +878 72 regularizer """transh""" +878 72 optimizer """adam""" +878 72 training_loop """owa""" +878 72 negative_sampler """basic""" +878 72 evaluator """rankbased""" +878 73 dataset """kinships""" +878 73 model """transh""" +878 73 loss """softplus""" +878 73 regularizer """transh""" +878 73 optimizer """adam""" +878 73 training_loop """owa""" +878 73 negative_sampler """basic""" +878 73 evaluator """rankbased""" +878 74 dataset """kinships""" +878 74 model """transh""" +878 74 loss """softplus""" +878 74 regularizer """transh""" +878 74 optimizer """adam""" +878 74 training_loop """owa""" +878 74 negative_sampler """basic""" +878 74 evaluator """rankbased""" +878 75 dataset """kinships""" +878 75 model """transh""" +878 75 loss """softplus""" +878 75 regularizer """transh""" +878 75 optimizer """adam""" +878 75 training_loop """owa""" +878 75 negative_sampler """basic""" +878 75 evaluator """rankbased""" +878 76 dataset """kinships""" +878 76 model """transh""" +878 76 loss """softplus""" +878 76 regularizer """transh""" +878 76 optimizer """adam""" +878 76 training_loop """owa""" +878 76 negative_sampler """basic""" +878 76 evaluator """rankbased""" +878 77 dataset """kinships""" +878 77 model """transh""" +878 77 loss """softplus""" +878 77 regularizer """transh""" +878 77 optimizer """adam""" +878 77 training_loop """owa""" +878 77 negative_sampler """basic""" +878 77 evaluator """rankbased""" +878 78 dataset """kinships""" +878 78 model """transh""" +878 78 loss """softplus""" +878 78 regularizer """transh""" +878 78 optimizer """adam""" +878 78 training_loop """owa""" +878 78 negative_sampler """basic""" +878 78 evaluator """rankbased""" +878 79 dataset """kinships""" +878 79 model """transh""" +878 79 loss """softplus""" +878 79 regularizer """transh""" +878 79 optimizer """adam""" +878 79 training_loop """owa""" +878 79 negative_sampler """basic""" +878 79 evaluator """rankbased""" +878 80 dataset """kinships""" +878 80 model """transh""" +878 80 loss """softplus""" +878 80 regularizer """transh""" +878 80 optimizer """adam""" +878 80 training_loop """owa""" +878 80 negative_sampler """basic""" +878 80 evaluator """rankbased""" +878 81 dataset """kinships""" +878 81 model """transh""" +878 81 loss """softplus""" +878 81 regularizer """transh""" +878 81 optimizer """adam""" +878 81 training_loop """owa""" +878 81 negative_sampler """basic""" +878 81 evaluator """rankbased""" +878 82 dataset """kinships""" +878 82 model """transh""" +878 82 loss """softplus""" +878 82 regularizer """transh""" +878 82 optimizer """adam""" +878 82 training_loop """owa""" +878 82 negative_sampler """basic""" +878 82 evaluator """rankbased""" +878 83 dataset """kinships""" +878 83 model """transh""" +878 83 loss """softplus""" +878 83 regularizer """transh""" +878 83 optimizer """adam""" +878 83 training_loop """owa""" +878 83 negative_sampler """basic""" +878 83 evaluator """rankbased""" +878 84 dataset """kinships""" +878 84 model """transh""" +878 84 loss """softplus""" +878 84 regularizer """transh""" +878 84 optimizer """adam""" +878 84 training_loop """owa""" +878 84 negative_sampler """basic""" +878 84 evaluator """rankbased""" +878 85 dataset """kinships""" +878 85 model """transh""" +878 85 loss """softplus""" +878 85 regularizer """transh""" +878 85 optimizer """adam""" +878 85 training_loop """owa""" +878 85 negative_sampler """basic""" +878 85 evaluator """rankbased""" +878 86 dataset """kinships""" +878 86 model """transh""" +878 86 loss """softplus""" +878 86 regularizer """transh""" +878 86 optimizer """adam""" +878 86 training_loop """owa""" +878 86 negative_sampler """basic""" +878 86 evaluator """rankbased""" +878 87 dataset """kinships""" +878 87 model """transh""" +878 87 loss """softplus""" +878 87 regularizer """transh""" +878 87 optimizer """adam""" +878 87 training_loop """owa""" +878 87 negative_sampler """basic""" +878 87 evaluator """rankbased""" +878 88 dataset """kinships""" +878 88 model """transh""" +878 88 loss """softplus""" +878 88 regularizer """transh""" +878 88 optimizer """adam""" +878 88 training_loop """owa""" +878 88 negative_sampler """basic""" +878 88 evaluator """rankbased""" +878 89 dataset """kinships""" +878 89 model """transh""" +878 89 loss """softplus""" +878 89 regularizer """transh""" +878 89 optimizer """adam""" +878 89 training_loop """owa""" +878 89 negative_sampler """basic""" +878 89 evaluator """rankbased""" +878 90 dataset """kinships""" +878 90 model """transh""" +878 90 loss """softplus""" +878 90 regularizer """transh""" +878 90 optimizer """adam""" +878 90 training_loop """owa""" +878 90 negative_sampler """basic""" +878 90 evaluator """rankbased""" +878 91 dataset """kinships""" +878 91 model """transh""" +878 91 loss """softplus""" +878 91 regularizer """transh""" +878 91 optimizer """adam""" +878 91 training_loop """owa""" +878 91 negative_sampler """basic""" +878 91 evaluator """rankbased""" +878 92 dataset """kinships""" +878 92 model """transh""" +878 92 loss """softplus""" +878 92 regularizer """transh""" +878 92 optimizer """adam""" +878 92 training_loop """owa""" +878 92 negative_sampler """basic""" +878 92 evaluator """rankbased""" +878 93 dataset """kinships""" +878 93 model """transh""" +878 93 loss """softplus""" +878 93 regularizer """transh""" +878 93 optimizer """adam""" +878 93 training_loop """owa""" +878 93 negative_sampler """basic""" +878 93 evaluator """rankbased""" +878 94 dataset """kinships""" +878 94 model """transh""" +878 94 loss """softplus""" +878 94 regularizer """transh""" +878 94 optimizer """adam""" +878 94 training_loop """owa""" +878 94 negative_sampler """basic""" +878 94 evaluator """rankbased""" +878 95 dataset """kinships""" +878 95 model """transh""" +878 95 loss """softplus""" +878 95 regularizer """transh""" +878 95 optimizer """adam""" +878 95 training_loop """owa""" +878 95 negative_sampler """basic""" +878 95 evaluator """rankbased""" +878 96 dataset """kinships""" +878 96 model """transh""" +878 96 loss """softplus""" +878 96 regularizer """transh""" +878 96 optimizer """adam""" +878 96 training_loop """owa""" +878 96 negative_sampler """basic""" +878 96 evaluator """rankbased""" +878 97 dataset """kinships""" +878 97 model """transh""" +878 97 loss """softplus""" +878 97 regularizer """transh""" +878 97 optimizer """adam""" +878 97 training_loop """owa""" +878 97 negative_sampler """basic""" +878 97 evaluator """rankbased""" +878 98 dataset """kinships""" +878 98 model """transh""" +878 98 loss """softplus""" +878 98 regularizer """transh""" +878 98 optimizer """adam""" +878 98 training_loop """owa""" +878 98 negative_sampler """basic""" +878 98 evaluator """rankbased""" +878 99 dataset """kinships""" +878 99 model """transh""" +878 99 loss """softplus""" +878 99 regularizer """transh""" +878 99 optimizer """adam""" +878 99 training_loop """owa""" +878 99 negative_sampler """basic""" +878 99 evaluator """rankbased""" +878 100 dataset """kinships""" +878 100 model """transh""" +878 100 loss """softplus""" +878 100 regularizer """transh""" +878 100 optimizer """adam""" +878 100 training_loop """owa""" +878 100 negative_sampler """basic""" +878 100 evaluator """rankbased""" +879 1 model.embedding_dim 0.0 +879 1 model.scoring_fct_norm 1.0 +879 1 loss.margin 5.40544821152139 +879 1 regularizer.weight 0.016834431650298468 +879 1 optimizer.lr 0.0013529457096515024 +879 1 negative_sampler.num_negs_per_pos 71.0 +879 1 training.batch_size 1.0 +879 2 model.embedding_dim 2.0 +879 2 model.scoring_fct_norm 2.0 +879 2 loss.margin 7.948372686445888 +879 2 regularizer.weight 0.0462668960435148 +879 2 optimizer.lr 0.0019777265972597464 +879 2 negative_sampler.num_negs_per_pos 39.0 +879 2 training.batch_size 1.0 +879 3 model.embedding_dim 2.0 +879 3 model.scoring_fct_norm 2.0 +879 3 loss.margin 9.680039889440993 +879 3 regularizer.weight 0.04678851531325834 +879 3 optimizer.lr 0.021469230840178617 +879 3 negative_sampler.num_negs_per_pos 36.0 +879 3 training.batch_size 0.0 +879 4 model.embedding_dim 1.0 +879 4 model.scoring_fct_norm 1.0 +879 4 loss.margin 4.01126519510563 +879 4 regularizer.weight 0.0936764238739268 +879 4 optimizer.lr 0.0064542158973641866 +879 4 negative_sampler.num_negs_per_pos 71.0 +879 4 training.batch_size 0.0 +879 5 model.embedding_dim 0.0 +879 5 model.scoring_fct_norm 1.0 +879 5 loss.margin 9.221776402580389 +879 5 regularizer.weight 0.018275559834666034 +879 5 optimizer.lr 0.039979399624319345 +879 5 negative_sampler.num_negs_per_pos 37.0 +879 5 training.batch_size 0.0 +879 6 model.embedding_dim 2.0 +879 6 model.scoring_fct_norm 1.0 +879 6 loss.margin 5.182823884694198 +879 6 regularizer.weight 0.2557169894160501 +879 6 optimizer.lr 0.0918144152930222 +879 6 negative_sampler.num_negs_per_pos 33.0 +879 6 training.batch_size 2.0 +879 7 model.embedding_dim 0.0 +879 7 model.scoring_fct_norm 2.0 +879 7 loss.margin 2.7438877020148023 +879 7 regularizer.weight 0.0324650234421398 +879 7 optimizer.lr 0.09110612130801597 +879 7 negative_sampler.num_negs_per_pos 26.0 +879 7 training.batch_size 2.0 +879 8 model.embedding_dim 0.0 +879 8 model.scoring_fct_norm 2.0 +879 8 loss.margin 5.593365593216679 +879 8 regularizer.weight 0.019829146966049222 +879 8 optimizer.lr 0.009679100462513347 +879 8 negative_sampler.num_negs_per_pos 31.0 +879 8 training.batch_size 1.0 +879 9 model.embedding_dim 2.0 +879 9 model.scoring_fct_norm 1.0 +879 9 loss.margin 6.66802263460475 +879 9 regularizer.weight 0.08282401664998036 +879 9 optimizer.lr 0.022640792513661048 +879 9 negative_sampler.num_negs_per_pos 16.0 +879 9 training.batch_size 1.0 +879 10 model.embedding_dim 1.0 +879 10 model.scoring_fct_norm 2.0 +879 10 loss.margin 7.87554277597615 +879 10 regularizer.weight 0.05025385712749979 +879 10 optimizer.lr 0.04136963444483716 +879 10 negative_sampler.num_negs_per_pos 20.0 +879 10 training.batch_size 0.0 +879 11 model.embedding_dim 2.0 +879 11 model.scoring_fct_norm 2.0 +879 11 loss.margin 2.5549045701978383 +879 11 regularizer.weight 0.01041771785672023 +879 11 optimizer.lr 0.0016938986462719707 +879 11 negative_sampler.num_negs_per_pos 67.0 +879 11 training.batch_size 0.0 +879 12 model.embedding_dim 1.0 +879 12 model.scoring_fct_norm 2.0 +879 12 loss.margin 2.9846158339415574 +879 12 regularizer.weight 0.0319089864245907 +879 12 optimizer.lr 0.09433663210535453 +879 12 negative_sampler.num_negs_per_pos 59.0 +879 12 training.batch_size 1.0 +879 13 model.embedding_dim 0.0 +879 13 model.scoring_fct_norm 2.0 +879 13 loss.margin 1.3857572057743655 +879 13 regularizer.weight 0.06063389922807343 +879 13 optimizer.lr 0.017882013060224196 +879 13 negative_sampler.num_negs_per_pos 78.0 +879 13 training.batch_size 2.0 +879 14 model.embedding_dim 2.0 +879 14 model.scoring_fct_norm 2.0 +879 14 loss.margin 3.535075559833931 +879 14 regularizer.weight 0.010052300817451147 +879 14 optimizer.lr 0.018612512001823494 +879 14 negative_sampler.num_negs_per_pos 99.0 +879 14 training.batch_size 2.0 +879 15 model.embedding_dim 0.0 +879 15 model.scoring_fct_norm 1.0 +879 15 loss.margin 1.8210568981381305 +879 15 regularizer.weight 0.04228395946286067 +879 15 optimizer.lr 0.0014016001671762143 +879 15 negative_sampler.num_negs_per_pos 80.0 +879 15 training.batch_size 0.0 +879 16 model.embedding_dim 1.0 +879 16 model.scoring_fct_norm 2.0 +879 16 loss.margin 6.6123674534179715 +879 16 regularizer.weight 0.03573304678375519 +879 16 optimizer.lr 0.005764614531001431 +879 16 negative_sampler.num_negs_per_pos 78.0 +879 16 training.batch_size 2.0 +879 17 model.embedding_dim 0.0 +879 17 model.scoring_fct_norm 2.0 +879 17 loss.margin 2.0845238679205833 +879 17 regularizer.weight 0.19859609744075962 +879 17 optimizer.lr 0.001076811117961487 +879 17 negative_sampler.num_negs_per_pos 3.0 +879 17 training.batch_size 0.0 +879 18 model.embedding_dim 0.0 +879 18 model.scoring_fct_norm 2.0 +879 18 loss.margin 4.523288644587808 +879 18 regularizer.weight 0.032033049205691146 +879 18 optimizer.lr 0.01867923851299494 +879 18 negative_sampler.num_negs_per_pos 92.0 +879 18 training.batch_size 0.0 +879 19 model.embedding_dim 0.0 +879 19 model.scoring_fct_norm 1.0 +879 19 loss.margin 6.882529792328084 +879 19 regularizer.weight 0.27218769763940276 +879 19 optimizer.lr 0.08289650068688144 +879 19 negative_sampler.num_negs_per_pos 60.0 +879 19 training.batch_size 0.0 +879 20 model.embedding_dim 2.0 +879 20 model.scoring_fct_norm 1.0 +879 20 loss.margin 6.587616648976007 +879 20 regularizer.weight 0.042831249740615605 +879 20 optimizer.lr 0.045650336884084715 +879 20 negative_sampler.num_negs_per_pos 41.0 +879 20 training.batch_size 0.0 +879 21 model.embedding_dim 1.0 +879 21 model.scoring_fct_norm 2.0 +879 21 loss.margin 6.935417670229696 +879 21 regularizer.weight 0.0866411582790336 +879 21 optimizer.lr 0.055207104539936394 +879 21 negative_sampler.num_negs_per_pos 35.0 +879 21 training.batch_size 0.0 +879 22 model.embedding_dim 2.0 +879 22 model.scoring_fct_norm 2.0 +879 22 loss.margin 8.404241951329732 +879 22 regularizer.weight 0.10616543133920543 +879 22 optimizer.lr 0.0025313322746415255 +879 22 negative_sampler.num_negs_per_pos 95.0 +879 22 training.batch_size 1.0 +879 23 model.embedding_dim 1.0 +879 23 model.scoring_fct_norm 1.0 +879 23 loss.margin 7.805527250518839 +879 23 regularizer.weight 0.01151572235176969 +879 23 optimizer.lr 0.01242662667865177 +879 23 negative_sampler.num_negs_per_pos 19.0 +879 23 training.batch_size 0.0 +879 24 model.embedding_dim 2.0 +879 24 model.scoring_fct_norm 1.0 +879 24 loss.margin 8.533795637597997 +879 24 regularizer.weight 0.0401132588407678 +879 24 optimizer.lr 0.002303966794682743 +879 24 negative_sampler.num_negs_per_pos 7.0 +879 24 training.batch_size 0.0 +879 25 model.embedding_dim 1.0 +879 25 model.scoring_fct_norm 1.0 +879 25 loss.margin 4.081792305243292 +879 25 regularizer.weight 0.016720585637384624 +879 25 optimizer.lr 0.010114933701420224 +879 25 negative_sampler.num_negs_per_pos 70.0 +879 25 training.batch_size 0.0 +879 26 model.embedding_dim 1.0 +879 26 model.scoring_fct_norm 1.0 +879 26 loss.margin 7.618261741805634 +879 26 regularizer.weight 0.07546683982045309 +879 26 optimizer.lr 0.011309163295694327 +879 26 negative_sampler.num_negs_per_pos 29.0 +879 26 training.batch_size 1.0 +879 27 model.embedding_dim 2.0 +879 27 model.scoring_fct_norm 2.0 +879 27 loss.margin 4.479677014471487 +879 27 regularizer.weight 0.030031765726230217 +879 27 optimizer.lr 0.02823056679479876 +879 27 negative_sampler.num_negs_per_pos 53.0 +879 27 training.batch_size 2.0 +879 28 model.embedding_dim 0.0 +879 28 model.scoring_fct_norm 2.0 +879 28 loss.margin 4.522090947959001 +879 28 regularizer.weight 0.11668713698677667 +879 28 optimizer.lr 0.0028248914494153135 +879 28 negative_sampler.num_negs_per_pos 76.0 +879 28 training.batch_size 1.0 +879 29 model.embedding_dim 0.0 +879 29 model.scoring_fct_norm 1.0 +879 29 loss.margin 9.638830244079465 +879 29 regularizer.weight 0.013134572164345201 +879 29 optimizer.lr 0.06063569374675717 +879 29 negative_sampler.num_negs_per_pos 91.0 +879 29 training.batch_size 2.0 +879 30 model.embedding_dim 0.0 +879 30 model.scoring_fct_norm 1.0 +879 30 loss.margin 5.109710900871875 +879 30 regularizer.weight 0.026117927589650553 +879 30 optimizer.lr 0.040597815103024223 +879 30 negative_sampler.num_negs_per_pos 24.0 +879 30 training.batch_size 0.0 +879 31 model.embedding_dim 1.0 +879 31 model.scoring_fct_norm 1.0 +879 31 loss.margin 3.611250671016732 +879 31 regularizer.weight 0.18290950033117348 +879 31 optimizer.lr 0.03010586245382717 +879 31 negative_sampler.num_negs_per_pos 37.0 +879 31 training.batch_size 0.0 +879 32 model.embedding_dim 1.0 +879 32 model.scoring_fct_norm 2.0 +879 32 loss.margin 7.538406728828078 +879 32 regularizer.weight 0.02785856942160479 +879 32 optimizer.lr 0.0032643220917382343 +879 32 negative_sampler.num_negs_per_pos 66.0 +879 32 training.batch_size 1.0 +879 33 model.embedding_dim 0.0 +879 33 model.scoring_fct_norm 1.0 +879 33 loss.margin 2.7602295631228815 +879 33 regularizer.weight 0.07679558953089438 +879 33 optimizer.lr 0.003970769515315673 +879 33 negative_sampler.num_negs_per_pos 44.0 +879 33 training.batch_size 0.0 +879 34 model.embedding_dim 2.0 +879 34 model.scoring_fct_norm 2.0 +879 34 loss.margin 1.7770201444711171 +879 34 regularizer.weight 0.22251671541146423 +879 34 optimizer.lr 0.03186189238512829 +879 34 negative_sampler.num_negs_per_pos 39.0 +879 34 training.batch_size 2.0 +879 35 model.embedding_dim 0.0 +879 35 model.scoring_fct_norm 2.0 +879 35 loss.margin 3.4575657365104657 +879 35 regularizer.weight 0.18829678086522195 +879 35 optimizer.lr 0.0012943567137699147 +879 35 negative_sampler.num_negs_per_pos 95.0 +879 35 training.batch_size 0.0 +879 36 model.embedding_dim 0.0 +879 36 model.scoring_fct_norm 1.0 +879 36 loss.margin 2.444414345744491 +879 36 regularizer.weight 0.11536878737872741 +879 36 optimizer.lr 0.0018712355343076857 +879 36 negative_sampler.num_negs_per_pos 36.0 +879 36 training.batch_size 0.0 +879 37 model.embedding_dim 0.0 +879 37 model.scoring_fct_norm 2.0 +879 37 loss.margin 7.753420504033965 +879 37 regularizer.weight 0.05411185145274949 +879 37 optimizer.lr 0.05073924810160479 +879 37 negative_sampler.num_negs_per_pos 99.0 +879 37 training.batch_size 1.0 +879 38 model.embedding_dim 1.0 +879 38 model.scoring_fct_norm 1.0 +879 38 loss.margin 5.106589561407486 +879 38 regularizer.weight 0.25331559577186324 +879 38 optimizer.lr 0.002273089674192461 +879 38 negative_sampler.num_negs_per_pos 13.0 +879 38 training.batch_size 2.0 +879 39 model.embedding_dim 2.0 +879 39 model.scoring_fct_norm 2.0 +879 39 loss.margin 2.354726036342574 +879 39 regularizer.weight 0.016766340515826133 +879 39 optimizer.lr 0.07405641652004342 +879 39 negative_sampler.num_negs_per_pos 6.0 +879 39 training.batch_size 0.0 +879 40 model.embedding_dim 1.0 +879 40 model.scoring_fct_norm 2.0 +879 40 loss.margin 5.237650360262972 +879 40 regularizer.weight 0.1093742535328976 +879 40 optimizer.lr 0.06986293604226611 +879 40 negative_sampler.num_negs_per_pos 24.0 +879 40 training.batch_size 1.0 +879 41 model.embedding_dim 1.0 +879 41 model.scoring_fct_norm 2.0 +879 41 loss.margin 1.3732785107443102 +879 41 regularizer.weight 0.010802592563584297 +879 41 optimizer.lr 0.005465744143382696 +879 41 negative_sampler.num_negs_per_pos 57.0 +879 41 training.batch_size 0.0 +879 42 model.embedding_dim 2.0 +879 42 model.scoring_fct_norm 1.0 +879 42 loss.margin 8.37512185265516 +879 42 regularizer.weight 0.026253260583353435 +879 42 optimizer.lr 0.09572751074601714 +879 42 negative_sampler.num_negs_per_pos 24.0 +879 42 training.batch_size 2.0 +879 43 model.embedding_dim 0.0 +879 43 model.scoring_fct_norm 1.0 +879 43 loss.margin 0.7583941441547262 +879 43 regularizer.weight 0.17161561923034133 +879 43 optimizer.lr 0.0015813073178303574 +879 43 negative_sampler.num_negs_per_pos 32.0 +879 43 training.batch_size 0.0 +879 44 model.embedding_dim 1.0 +879 44 model.scoring_fct_norm 1.0 +879 44 loss.margin 9.934675944197638 +879 44 regularizer.weight 0.05423411678237705 +879 44 optimizer.lr 0.019930935223208945 +879 44 negative_sampler.num_negs_per_pos 82.0 +879 44 training.batch_size 0.0 +879 45 model.embedding_dim 0.0 +879 45 model.scoring_fct_norm 1.0 +879 45 loss.margin 6.285467696096609 +879 45 regularizer.weight 0.013640246665699676 +879 45 optimizer.lr 0.02190256719698448 +879 45 negative_sampler.num_negs_per_pos 41.0 +879 45 training.batch_size 2.0 +879 46 model.embedding_dim 2.0 +879 46 model.scoring_fct_norm 2.0 +879 46 loss.margin 3.2968530101880122 +879 46 regularizer.weight 0.29950511359526694 +879 46 optimizer.lr 0.004916872927890935 +879 46 negative_sampler.num_negs_per_pos 92.0 +879 46 training.batch_size 1.0 +879 47 model.embedding_dim 1.0 +879 47 model.scoring_fct_norm 2.0 +879 47 loss.margin 2.8873174830431094 +879 47 regularizer.weight 0.1065879373005874 +879 47 optimizer.lr 0.002773419899975546 +879 47 negative_sampler.num_negs_per_pos 71.0 +879 47 training.batch_size 2.0 +879 48 model.embedding_dim 2.0 +879 48 model.scoring_fct_norm 2.0 +879 48 loss.margin 1.9865490483041883 +879 48 regularizer.weight 0.09652695196646377 +879 48 optimizer.lr 0.0052547260526267245 +879 48 negative_sampler.num_negs_per_pos 28.0 +879 48 training.batch_size 2.0 +879 49 model.embedding_dim 0.0 +879 49 model.scoring_fct_norm 2.0 +879 49 loss.margin 1.3188593516781442 +879 49 regularizer.weight 0.03231502567502326 +879 49 optimizer.lr 0.003915317108589442 +879 49 negative_sampler.num_negs_per_pos 42.0 +879 49 training.batch_size 0.0 +879 50 model.embedding_dim 1.0 +879 50 model.scoring_fct_norm 2.0 +879 50 loss.margin 7.47655092645065 +879 50 regularizer.weight 0.016959430809039666 +879 50 optimizer.lr 0.0027910739198496646 +879 50 negative_sampler.num_negs_per_pos 50.0 +879 50 training.batch_size 1.0 +879 51 model.embedding_dim 1.0 +879 51 model.scoring_fct_norm 1.0 +879 51 loss.margin 4.104699891683877 +879 51 regularizer.weight 0.08011686495700687 +879 51 optimizer.lr 0.02078329277009562 +879 51 negative_sampler.num_negs_per_pos 72.0 +879 51 training.batch_size 2.0 +879 52 model.embedding_dim 2.0 +879 52 model.scoring_fct_norm 1.0 +879 52 loss.margin 5.238231192278232 +879 52 regularizer.weight 0.04966745083297975 +879 52 optimizer.lr 0.06456857226506253 +879 52 negative_sampler.num_negs_per_pos 86.0 +879 52 training.batch_size 0.0 +879 53 model.embedding_dim 1.0 +879 53 model.scoring_fct_norm 1.0 +879 53 loss.margin 5.98988243083614 +879 53 regularizer.weight 0.0480503756974645 +879 53 optimizer.lr 0.07399218641466757 +879 53 negative_sampler.num_negs_per_pos 19.0 +879 53 training.batch_size 2.0 +879 54 model.embedding_dim 2.0 +879 54 model.scoring_fct_norm 2.0 +879 54 loss.margin 3.993761443995404 +879 54 regularizer.weight 0.024537661842436182 +879 54 optimizer.lr 0.057096784983715225 +879 54 negative_sampler.num_negs_per_pos 19.0 +879 54 training.batch_size 0.0 +879 55 model.embedding_dim 1.0 +879 55 model.scoring_fct_norm 2.0 +879 55 loss.margin 0.6899779922293077 +879 55 regularizer.weight 0.021845449434995088 +879 55 optimizer.lr 0.0012555006304620568 +879 55 negative_sampler.num_negs_per_pos 72.0 +879 55 training.batch_size 2.0 +879 56 model.embedding_dim 2.0 +879 56 model.scoring_fct_norm 1.0 +879 56 loss.margin 0.7394219571781944 +879 56 regularizer.weight 0.03886921515600431 +879 56 optimizer.lr 0.010194018157497601 +879 56 negative_sampler.num_negs_per_pos 80.0 +879 56 training.batch_size 0.0 +879 57 model.embedding_dim 2.0 +879 57 model.scoring_fct_norm 2.0 +879 57 loss.margin 6.73619852433104 +879 57 regularizer.weight 0.1274394019829497 +879 57 optimizer.lr 0.015277503487201756 +879 57 negative_sampler.num_negs_per_pos 99.0 +879 57 training.batch_size 1.0 +879 58 model.embedding_dim 1.0 +879 58 model.scoring_fct_norm 2.0 +879 58 loss.margin 5.791770393768333 +879 58 regularizer.weight 0.010652038133422519 +879 58 optimizer.lr 0.008960509220798286 +879 58 negative_sampler.num_negs_per_pos 35.0 +879 58 training.batch_size 2.0 +879 59 model.embedding_dim 2.0 +879 59 model.scoring_fct_norm 2.0 +879 59 loss.margin 3.304626269410203 +879 59 regularizer.weight 0.013759125744036739 +879 59 optimizer.lr 0.04771449781900035 +879 59 negative_sampler.num_negs_per_pos 81.0 +879 59 training.batch_size 1.0 +879 60 model.embedding_dim 0.0 +879 60 model.scoring_fct_norm 2.0 +879 60 loss.margin 9.802689996072578 +879 60 regularizer.weight 0.012164856612957351 +879 60 optimizer.lr 0.02328609151668137 +879 60 negative_sampler.num_negs_per_pos 13.0 +879 60 training.batch_size 0.0 +879 61 model.embedding_dim 0.0 +879 61 model.scoring_fct_norm 2.0 +879 61 loss.margin 5.600606763238331 +879 61 regularizer.weight 0.05424078624856784 +879 61 optimizer.lr 0.007907901849830737 +879 61 negative_sampler.num_negs_per_pos 66.0 +879 61 training.batch_size 0.0 +879 62 model.embedding_dim 2.0 +879 62 model.scoring_fct_norm 2.0 +879 62 loss.margin 8.399679495457125 +879 62 regularizer.weight 0.024728405695085623 +879 62 optimizer.lr 0.026939870711758407 +879 62 negative_sampler.num_negs_per_pos 75.0 +879 62 training.batch_size 2.0 +879 63 model.embedding_dim 0.0 +879 63 model.scoring_fct_norm 1.0 +879 63 loss.margin 0.6322121966708352 +879 63 regularizer.weight 0.04635701190565113 +879 63 optimizer.lr 0.07746055764375091 +879 63 negative_sampler.num_negs_per_pos 64.0 +879 63 training.batch_size 1.0 +879 64 model.embedding_dim 0.0 +879 64 model.scoring_fct_norm 1.0 +879 64 loss.margin 1.6986074922313352 +879 64 regularizer.weight 0.022834157572950067 +879 64 optimizer.lr 0.006685408703412912 +879 64 negative_sampler.num_negs_per_pos 12.0 +879 64 training.batch_size 2.0 +879 65 model.embedding_dim 0.0 +879 65 model.scoring_fct_norm 1.0 +879 65 loss.margin 5.818997340631 +879 65 regularizer.weight 0.026160512927771155 +879 65 optimizer.lr 0.017344320791782424 +879 65 negative_sampler.num_negs_per_pos 43.0 +879 65 training.batch_size 2.0 +879 66 model.embedding_dim 1.0 +879 66 model.scoring_fct_norm 2.0 +879 66 loss.margin 5.676139617814067 +879 66 regularizer.weight 0.20645107538666146 +879 66 optimizer.lr 0.0015640232389944443 +879 66 negative_sampler.num_negs_per_pos 1.0 +879 66 training.batch_size 0.0 +879 67 model.embedding_dim 2.0 +879 67 model.scoring_fct_norm 1.0 +879 67 loss.margin 6.487490595621318 +879 67 regularizer.weight 0.019778482908556032 +879 67 optimizer.lr 0.007578630045401408 +879 67 negative_sampler.num_negs_per_pos 35.0 +879 67 training.batch_size 1.0 +879 68 model.embedding_dim 2.0 +879 68 model.scoring_fct_norm 1.0 +879 68 loss.margin 5.3805099729629156 +879 68 regularizer.weight 0.012216781135518221 +879 68 optimizer.lr 0.06057323137523528 +879 68 negative_sampler.num_negs_per_pos 76.0 +879 68 training.batch_size 2.0 +879 69 model.embedding_dim 1.0 +879 69 model.scoring_fct_norm 2.0 +879 69 loss.margin 1.368716146722818 +879 69 regularizer.weight 0.013507305692544921 +879 69 optimizer.lr 0.0014430805997449354 +879 69 negative_sampler.num_negs_per_pos 11.0 +879 69 training.batch_size 0.0 +879 70 model.embedding_dim 2.0 +879 70 model.scoring_fct_norm 2.0 +879 70 loss.margin 4.069400082858316 +879 70 regularizer.weight 0.019417964502292596 +879 70 optimizer.lr 0.00795683303133406 +879 70 negative_sampler.num_negs_per_pos 58.0 +879 70 training.batch_size 2.0 +879 71 model.embedding_dim 1.0 +879 71 model.scoring_fct_norm 2.0 +879 71 loss.margin 7.984881212097174 +879 71 regularizer.weight 0.11089919835794143 +879 71 optimizer.lr 0.01846705440532861 +879 71 negative_sampler.num_negs_per_pos 79.0 +879 71 training.batch_size 1.0 +879 72 model.embedding_dim 0.0 +879 72 model.scoring_fct_norm 1.0 +879 72 loss.margin 6.31962123907617 +879 72 regularizer.weight 0.10403811370089618 +879 72 optimizer.lr 0.0045153569389895145 +879 72 negative_sampler.num_negs_per_pos 75.0 +879 72 training.batch_size 0.0 +879 73 model.embedding_dim 0.0 +879 73 model.scoring_fct_norm 2.0 +879 73 loss.margin 8.95752916816157 +879 73 regularizer.weight 0.06075257183324657 +879 73 optimizer.lr 0.0010903799588716013 +879 73 negative_sampler.num_negs_per_pos 64.0 +879 73 training.batch_size 2.0 +879 74 model.embedding_dim 1.0 +879 74 model.scoring_fct_norm 2.0 +879 74 loss.margin 5.005285630338992 +879 74 regularizer.weight 0.27776095831295994 +879 74 optimizer.lr 0.09904366354410245 +879 74 negative_sampler.num_negs_per_pos 0.0 +879 74 training.batch_size 1.0 +879 75 model.embedding_dim 1.0 +879 75 model.scoring_fct_norm 2.0 +879 75 loss.margin 4.546440616308325 +879 75 regularizer.weight 0.012206537122819236 +879 75 optimizer.lr 0.054409991434128 +879 75 negative_sampler.num_negs_per_pos 45.0 +879 75 training.batch_size 2.0 +879 76 model.embedding_dim 2.0 +879 76 model.scoring_fct_norm 1.0 +879 76 loss.margin 9.995617126866993 +879 76 regularizer.weight 0.0559209902181084 +879 76 optimizer.lr 0.06993741207707889 +879 76 negative_sampler.num_negs_per_pos 54.0 +879 76 training.batch_size 0.0 +879 77 model.embedding_dim 0.0 +879 77 model.scoring_fct_norm 2.0 +879 77 loss.margin 9.272378542451175 +879 77 regularizer.weight 0.11357734622585086 +879 77 optimizer.lr 0.001025134200100244 +879 77 negative_sampler.num_negs_per_pos 80.0 +879 77 training.batch_size 1.0 +879 78 model.embedding_dim 1.0 +879 78 model.scoring_fct_norm 1.0 +879 78 loss.margin 9.374722925759553 +879 78 regularizer.weight 0.10708035271128034 +879 78 optimizer.lr 0.0016335738210701716 +879 78 negative_sampler.num_negs_per_pos 55.0 +879 78 training.batch_size 1.0 +879 79 model.embedding_dim 0.0 +879 79 model.scoring_fct_norm 1.0 +879 79 loss.margin 8.212662470527981 +879 79 regularizer.weight 0.1993758089806403 +879 79 optimizer.lr 0.029470737208570463 +879 79 negative_sampler.num_negs_per_pos 5.0 +879 79 training.batch_size 2.0 +879 80 model.embedding_dim 2.0 +879 80 model.scoring_fct_norm 1.0 +879 80 loss.margin 9.674200906938832 +879 80 regularizer.weight 0.02521217669343075 +879 80 optimizer.lr 0.005530352798243248 +879 80 negative_sampler.num_negs_per_pos 23.0 +879 80 training.batch_size 0.0 +879 81 model.embedding_dim 2.0 +879 81 model.scoring_fct_norm 1.0 +879 81 loss.margin 2.4641856133604647 +879 81 regularizer.weight 0.015244411958990543 +879 81 optimizer.lr 0.017890638610802593 +879 81 negative_sampler.num_negs_per_pos 60.0 +879 81 training.batch_size 1.0 +879 82 model.embedding_dim 0.0 +879 82 model.scoring_fct_norm 1.0 +879 82 loss.margin 8.346330308537523 +879 82 regularizer.weight 0.2573418601090644 +879 82 optimizer.lr 0.05516143189383208 +879 82 negative_sampler.num_negs_per_pos 78.0 +879 82 training.batch_size 2.0 +879 83 model.embedding_dim 2.0 +879 83 model.scoring_fct_norm 2.0 +879 83 loss.margin 2.1784909869226414 +879 83 regularizer.weight 0.171633446914582 +879 83 optimizer.lr 0.007099283440305385 +879 83 negative_sampler.num_negs_per_pos 75.0 +879 83 training.batch_size 2.0 +879 84 model.embedding_dim 1.0 +879 84 model.scoring_fct_norm 2.0 +879 84 loss.margin 5.198537033604739 +879 84 regularizer.weight 0.013867419018050626 +879 84 optimizer.lr 0.0037779565845428484 +879 84 negative_sampler.num_negs_per_pos 32.0 +879 84 training.batch_size 1.0 +879 85 model.embedding_dim 1.0 +879 85 model.scoring_fct_norm 1.0 +879 85 loss.margin 1.4766371459199323 +879 85 regularizer.weight 0.07484398310225447 +879 85 optimizer.lr 0.003005314959980049 +879 85 negative_sampler.num_negs_per_pos 8.0 +879 85 training.batch_size 1.0 +879 86 model.embedding_dim 2.0 +879 86 model.scoring_fct_norm 2.0 +879 86 loss.margin 1.9860125941252065 +879 86 regularizer.weight 0.12581950822644278 +879 86 optimizer.lr 0.015614484115566828 +879 86 negative_sampler.num_negs_per_pos 44.0 +879 86 training.batch_size 2.0 +879 87 model.embedding_dim 0.0 +879 87 model.scoring_fct_norm 1.0 +879 87 loss.margin 6.705656552839348 +879 87 regularizer.weight 0.0177356170246702 +879 87 optimizer.lr 0.028646813140352823 +879 87 negative_sampler.num_negs_per_pos 55.0 +879 87 training.batch_size 0.0 +879 88 model.embedding_dim 0.0 +879 88 model.scoring_fct_norm 2.0 +879 88 loss.margin 8.066236679876475 +879 88 regularizer.weight 0.010715797220209041 +879 88 optimizer.lr 0.03076507248876108 +879 88 negative_sampler.num_negs_per_pos 66.0 +879 88 training.batch_size 2.0 +879 89 model.embedding_dim 2.0 +879 89 model.scoring_fct_norm 1.0 +879 89 loss.margin 8.819413203454836 +879 89 regularizer.weight 0.06309650617978443 +879 89 optimizer.lr 0.0010054622722435185 +879 89 negative_sampler.num_negs_per_pos 89.0 +879 89 training.batch_size 0.0 +879 90 model.embedding_dim 1.0 +879 90 model.scoring_fct_norm 2.0 +879 90 loss.margin 9.481382434360711 +879 90 regularizer.weight 0.0753633476863958 +879 90 optimizer.lr 0.01306885994776097 +879 90 negative_sampler.num_negs_per_pos 85.0 +879 90 training.batch_size 2.0 +879 91 model.embedding_dim 0.0 +879 91 model.scoring_fct_norm 1.0 +879 91 loss.margin 3.6099489148692 +879 91 regularizer.weight 0.05307916106134207 +879 91 optimizer.lr 0.001053127883602554 +879 91 negative_sampler.num_negs_per_pos 24.0 +879 91 training.batch_size 0.0 +879 92 model.embedding_dim 2.0 +879 92 model.scoring_fct_norm 2.0 +879 92 loss.margin 6.832008820021178 +879 92 regularizer.weight 0.04103326431542706 +879 92 optimizer.lr 0.011860249329085975 +879 92 negative_sampler.num_negs_per_pos 78.0 +879 92 training.batch_size 2.0 +879 93 model.embedding_dim 2.0 +879 93 model.scoring_fct_norm 2.0 +879 93 loss.margin 2.599385253881106 +879 93 regularizer.weight 0.13852238262552802 +879 93 optimizer.lr 0.007893791408465998 +879 93 negative_sampler.num_negs_per_pos 38.0 +879 93 training.batch_size 2.0 +879 94 model.embedding_dim 1.0 +879 94 model.scoring_fct_norm 2.0 +879 94 loss.margin 4.239274437343756 +879 94 regularizer.weight 0.20360892999707852 +879 94 optimizer.lr 0.0011569819375253996 +879 94 negative_sampler.num_negs_per_pos 93.0 +879 94 training.batch_size 0.0 +879 95 model.embedding_dim 2.0 +879 95 model.scoring_fct_norm 2.0 +879 95 loss.margin 7.413631612822707 +879 95 regularizer.weight 0.28891874752683105 +879 95 optimizer.lr 0.006676932255884212 +879 95 negative_sampler.num_negs_per_pos 63.0 +879 95 training.batch_size 1.0 +879 96 model.embedding_dim 2.0 +879 96 model.scoring_fct_norm 1.0 +879 96 loss.margin 2.346994942782238 +879 96 regularizer.weight 0.02131368196523172 +879 96 optimizer.lr 0.02972152365260555 +879 96 negative_sampler.num_negs_per_pos 64.0 +879 96 training.batch_size 2.0 +879 97 model.embedding_dim 2.0 +879 97 model.scoring_fct_norm 2.0 +879 97 loss.margin 6.877114948546631 +879 97 regularizer.weight 0.05809606087513377 +879 97 optimizer.lr 0.060935728035469754 +879 97 negative_sampler.num_negs_per_pos 44.0 +879 97 training.batch_size 2.0 +879 98 model.embedding_dim 2.0 +879 98 model.scoring_fct_norm 1.0 +879 98 loss.margin 8.4212999266283 +879 98 regularizer.weight 0.047024320334427154 +879 98 optimizer.lr 0.0017304577021515072 +879 98 negative_sampler.num_negs_per_pos 30.0 +879 98 training.batch_size 1.0 +879 99 model.embedding_dim 1.0 +879 99 model.scoring_fct_norm 2.0 +879 99 loss.margin 6.72427440474751 +879 99 regularizer.weight 0.20914890613154496 +879 99 optimizer.lr 0.04518933220557812 +879 99 negative_sampler.num_negs_per_pos 64.0 +879 99 training.batch_size 1.0 +879 100 model.embedding_dim 0.0 +879 100 model.scoring_fct_norm 1.0 +879 100 loss.margin 3.456178657297721 +879 100 regularizer.weight 0.029932045305627864 +879 100 optimizer.lr 0.007167717070988785 +879 100 negative_sampler.num_negs_per_pos 43.0 +879 100 training.batch_size 1.0 +879 1 dataset """kinships""" +879 1 model """transh""" +879 1 loss """marginranking""" +879 1 regularizer """transh""" +879 1 optimizer """adam""" +879 1 training_loop """owa""" +879 1 negative_sampler """basic""" +879 1 evaluator """rankbased""" +879 2 dataset """kinships""" +879 2 model """transh""" +879 2 loss """marginranking""" +879 2 regularizer """transh""" +879 2 optimizer """adam""" +879 2 training_loop """owa""" +879 2 negative_sampler """basic""" +879 2 evaluator """rankbased""" +879 3 dataset """kinships""" +879 3 model """transh""" +879 3 loss """marginranking""" +879 3 regularizer """transh""" +879 3 optimizer """adam""" +879 3 training_loop """owa""" +879 3 negative_sampler """basic""" +879 3 evaluator """rankbased""" +879 4 dataset """kinships""" +879 4 model """transh""" +879 4 loss """marginranking""" +879 4 regularizer """transh""" +879 4 optimizer """adam""" +879 4 training_loop """owa""" +879 4 negative_sampler """basic""" +879 4 evaluator """rankbased""" +879 5 dataset """kinships""" +879 5 model """transh""" +879 5 loss """marginranking""" +879 5 regularizer """transh""" +879 5 optimizer """adam""" +879 5 training_loop """owa""" +879 5 negative_sampler """basic""" +879 5 evaluator """rankbased""" +879 6 dataset """kinships""" +879 6 model """transh""" +879 6 loss """marginranking""" +879 6 regularizer """transh""" +879 6 optimizer """adam""" +879 6 training_loop """owa""" +879 6 negative_sampler """basic""" +879 6 evaluator """rankbased""" +879 7 dataset """kinships""" +879 7 model """transh""" +879 7 loss """marginranking""" +879 7 regularizer """transh""" +879 7 optimizer """adam""" +879 7 training_loop """owa""" +879 7 negative_sampler """basic""" +879 7 evaluator """rankbased""" +879 8 dataset """kinships""" +879 8 model """transh""" +879 8 loss """marginranking""" +879 8 regularizer """transh""" +879 8 optimizer """adam""" +879 8 training_loop """owa""" +879 8 negative_sampler """basic""" +879 8 evaluator """rankbased""" +879 9 dataset """kinships""" +879 9 model """transh""" +879 9 loss """marginranking""" +879 9 regularizer """transh""" +879 9 optimizer """adam""" +879 9 training_loop """owa""" +879 9 negative_sampler """basic""" +879 9 evaluator """rankbased""" +879 10 dataset """kinships""" +879 10 model """transh""" +879 10 loss """marginranking""" +879 10 regularizer """transh""" +879 10 optimizer """adam""" +879 10 training_loop """owa""" +879 10 negative_sampler """basic""" +879 10 evaluator """rankbased""" +879 11 dataset """kinships""" +879 11 model """transh""" +879 11 loss """marginranking""" +879 11 regularizer """transh""" +879 11 optimizer """adam""" +879 11 training_loop """owa""" +879 11 negative_sampler """basic""" +879 11 evaluator """rankbased""" +879 12 dataset """kinships""" +879 12 model """transh""" +879 12 loss """marginranking""" +879 12 regularizer """transh""" +879 12 optimizer """adam""" +879 12 training_loop """owa""" +879 12 negative_sampler """basic""" +879 12 evaluator """rankbased""" +879 13 dataset """kinships""" +879 13 model """transh""" +879 13 loss """marginranking""" +879 13 regularizer """transh""" +879 13 optimizer """adam""" +879 13 training_loop """owa""" +879 13 negative_sampler """basic""" +879 13 evaluator """rankbased""" +879 14 dataset """kinships""" +879 14 model """transh""" +879 14 loss """marginranking""" +879 14 regularizer """transh""" +879 14 optimizer """adam""" +879 14 training_loop """owa""" +879 14 negative_sampler """basic""" +879 14 evaluator """rankbased""" +879 15 dataset """kinships""" +879 15 model """transh""" +879 15 loss """marginranking""" +879 15 regularizer """transh""" +879 15 optimizer """adam""" +879 15 training_loop """owa""" +879 15 negative_sampler """basic""" +879 15 evaluator """rankbased""" +879 16 dataset """kinships""" +879 16 model """transh""" +879 16 loss """marginranking""" +879 16 regularizer """transh""" +879 16 optimizer """adam""" +879 16 training_loop """owa""" +879 16 negative_sampler """basic""" +879 16 evaluator """rankbased""" +879 17 dataset """kinships""" +879 17 model """transh""" +879 17 loss """marginranking""" +879 17 regularizer """transh""" +879 17 optimizer """adam""" +879 17 training_loop """owa""" +879 17 negative_sampler """basic""" +879 17 evaluator """rankbased""" +879 18 dataset """kinships""" +879 18 model """transh""" +879 18 loss """marginranking""" +879 18 regularizer """transh""" +879 18 optimizer """adam""" +879 18 training_loop """owa""" +879 18 negative_sampler """basic""" +879 18 evaluator """rankbased""" +879 19 dataset """kinships""" +879 19 model """transh""" +879 19 loss """marginranking""" +879 19 regularizer """transh""" +879 19 optimizer """adam""" +879 19 training_loop """owa""" +879 19 negative_sampler """basic""" +879 19 evaluator """rankbased""" +879 20 dataset """kinships""" +879 20 model """transh""" +879 20 loss """marginranking""" +879 20 regularizer """transh""" +879 20 optimizer """adam""" +879 20 training_loop """owa""" +879 20 negative_sampler """basic""" +879 20 evaluator """rankbased""" +879 21 dataset """kinships""" +879 21 model """transh""" +879 21 loss """marginranking""" +879 21 regularizer """transh""" +879 21 optimizer """adam""" +879 21 training_loop """owa""" +879 21 negative_sampler """basic""" +879 21 evaluator """rankbased""" +879 22 dataset """kinships""" +879 22 model """transh""" +879 22 loss """marginranking""" +879 22 regularizer """transh""" +879 22 optimizer """adam""" +879 22 training_loop """owa""" +879 22 negative_sampler """basic""" +879 22 evaluator """rankbased""" +879 23 dataset """kinships""" +879 23 model """transh""" +879 23 loss """marginranking""" +879 23 regularizer """transh""" +879 23 optimizer """adam""" +879 23 training_loop """owa""" +879 23 negative_sampler """basic""" +879 23 evaluator """rankbased""" +879 24 dataset """kinships""" +879 24 model """transh""" +879 24 loss """marginranking""" +879 24 regularizer """transh""" +879 24 optimizer """adam""" +879 24 training_loop """owa""" +879 24 negative_sampler """basic""" +879 24 evaluator """rankbased""" +879 25 dataset """kinships""" +879 25 model """transh""" +879 25 loss """marginranking""" +879 25 regularizer """transh""" +879 25 optimizer """adam""" +879 25 training_loop """owa""" +879 25 negative_sampler """basic""" +879 25 evaluator """rankbased""" +879 26 dataset """kinships""" +879 26 model """transh""" +879 26 loss """marginranking""" +879 26 regularizer """transh""" +879 26 optimizer """adam""" +879 26 training_loop """owa""" +879 26 negative_sampler """basic""" +879 26 evaluator """rankbased""" +879 27 dataset """kinships""" +879 27 model """transh""" +879 27 loss """marginranking""" +879 27 regularizer """transh""" +879 27 optimizer """adam""" +879 27 training_loop """owa""" +879 27 negative_sampler """basic""" +879 27 evaluator """rankbased""" +879 28 dataset """kinships""" +879 28 model """transh""" +879 28 loss """marginranking""" +879 28 regularizer """transh""" +879 28 optimizer """adam""" +879 28 training_loop """owa""" +879 28 negative_sampler """basic""" +879 28 evaluator """rankbased""" +879 29 dataset """kinships""" +879 29 model """transh""" +879 29 loss """marginranking""" +879 29 regularizer """transh""" +879 29 optimizer """adam""" +879 29 training_loop """owa""" +879 29 negative_sampler """basic""" +879 29 evaluator """rankbased""" +879 30 dataset """kinships""" +879 30 model """transh""" +879 30 loss """marginranking""" +879 30 regularizer """transh""" +879 30 optimizer """adam""" +879 30 training_loop """owa""" +879 30 negative_sampler """basic""" +879 30 evaluator """rankbased""" +879 31 dataset """kinships""" +879 31 model """transh""" +879 31 loss """marginranking""" +879 31 regularizer """transh""" +879 31 optimizer """adam""" +879 31 training_loop """owa""" +879 31 negative_sampler """basic""" +879 31 evaluator """rankbased""" +879 32 dataset """kinships""" +879 32 model """transh""" +879 32 loss """marginranking""" +879 32 regularizer """transh""" +879 32 optimizer """adam""" +879 32 training_loop """owa""" +879 32 negative_sampler """basic""" +879 32 evaluator """rankbased""" +879 33 dataset """kinships""" +879 33 model """transh""" +879 33 loss """marginranking""" +879 33 regularizer """transh""" +879 33 optimizer """adam""" +879 33 training_loop """owa""" +879 33 negative_sampler """basic""" +879 33 evaluator """rankbased""" +879 34 dataset """kinships""" +879 34 model """transh""" +879 34 loss """marginranking""" +879 34 regularizer """transh""" +879 34 optimizer """adam""" +879 34 training_loop """owa""" +879 34 negative_sampler """basic""" +879 34 evaluator """rankbased""" +879 35 dataset """kinships""" +879 35 model """transh""" +879 35 loss """marginranking""" +879 35 regularizer """transh""" +879 35 optimizer """adam""" +879 35 training_loop """owa""" +879 35 negative_sampler """basic""" +879 35 evaluator """rankbased""" +879 36 dataset """kinships""" +879 36 model """transh""" +879 36 loss """marginranking""" +879 36 regularizer """transh""" +879 36 optimizer """adam""" +879 36 training_loop """owa""" +879 36 negative_sampler """basic""" +879 36 evaluator """rankbased""" +879 37 dataset """kinships""" +879 37 model """transh""" +879 37 loss """marginranking""" +879 37 regularizer """transh""" +879 37 optimizer """adam""" +879 37 training_loop """owa""" +879 37 negative_sampler """basic""" +879 37 evaluator """rankbased""" +879 38 dataset """kinships""" +879 38 model """transh""" +879 38 loss """marginranking""" +879 38 regularizer """transh""" +879 38 optimizer """adam""" +879 38 training_loop """owa""" +879 38 negative_sampler """basic""" +879 38 evaluator """rankbased""" +879 39 dataset """kinships""" +879 39 model """transh""" +879 39 loss """marginranking""" +879 39 regularizer """transh""" +879 39 optimizer """adam""" +879 39 training_loop """owa""" +879 39 negative_sampler """basic""" +879 39 evaluator """rankbased""" +879 40 dataset """kinships""" +879 40 model """transh""" +879 40 loss """marginranking""" +879 40 regularizer """transh""" +879 40 optimizer """adam""" +879 40 training_loop """owa""" +879 40 negative_sampler """basic""" +879 40 evaluator """rankbased""" +879 41 dataset """kinships""" +879 41 model """transh""" +879 41 loss """marginranking""" +879 41 regularizer """transh""" +879 41 optimizer """adam""" +879 41 training_loop """owa""" +879 41 negative_sampler """basic""" +879 41 evaluator """rankbased""" +879 42 dataset """kinships""" +879 42 model """transh""" +879 42 loss """marginranking""" +879 42 regularizer """transh""" +879 42 optimizer """adam""" +879 42 training_loop """owa""" +879 42 negative_sampler """basic""" +879 42 evaluator """rankbased""" +879 43 dataset """kinships""" +879 43 model """transh""" +879 43 loss """marginranking""" +879 43 regularizer """transh""" +879 43 optimizer """adam""" +879 43 training_loop """owa""" +879 43 negative_sampler """basic""" +879 43 evaluator """rankbased""" +879 44 dataset """kinships""" +879 44 model """transh""" +879 44 loss """marginranking""" +879 44 regularizer """transh""" +879 44 optimizer """adam""" +879 44 training_loop """owa""" +879 44 negative_sampler """basic""" +879 44 evaluator """rankbased""" +879 45 dataset """kinships""" +879 45 model """transh""" +879 45 loss """marginranking""" +879 45 regularizer """transh""" +879 45 optimizer """adam""" +879 45 training_loop """owa""" +879 45 negative_sampler """basic""" +879 45 evaluator """rankbased""" +879 46 dataset """kinships""" +879 46 model """transh""" +879 46 loss """marginranking""" +879 46 regularizer """transh""" +879 46 optimizer """adam""" +879 46 training_loop """owa""" +879 46 negative_sampler """basic""" +879 46 evaluator """rankbased""" +879 47 dataset """kinships""" +879 47 model """transh""" +879 47 loss """marginranking""" +879 47 regularizer """transh""" +879 47 optimizer """adam""" +879 47 training_loop """owa""" +879 47 negative_sampler """basic""" +879 47 evaluator """rankbased""" +879 48 dataset """kinships""" +879 48 model """transh""" +879 48 loss """marginranking""" +879 48 regularizer """transh""" +879 48 optimizer """adam""" +879 48 training_loop """owa""" +879 48 negative_sampler """basic""" +879 48 evaluator """rankbased""" +879 49 dataset """kinships""" +879 49 model """transh""" +879 49 loss """marginranking""" +879 49 regularizer """transh""" +879 49 optimizer """adam""" +879 49 training_loop """owa""" +879 49 negative_sampler """basic""" +879 49 evaluator """rankbased""" +879 50 dataset """kinships""" +879 50 model """transh""" +879 50 loss """marginranking""" +879 50 regularizer """transh""" +879 50 optimizer """adam""" +879 50 training_loop """owa""" +879 50 negative_sampler """basic""" +879 50 evaluator """rankbased""" +879 51 dataset """kinships""" +879 51 model """transh""" +879 51 loss """marginranking""" +879 51 regularizer """transh""" +879 51 optimizer """adam""" +879 51 training_loop """owa""" +879 51 negative_sampler """basic""" +879 51 evaluator """rankbased""" +879 52 dataset """kinships""" +879 52 model """transh""" +879 52 loss """marginranking""" +879 52 regularizer """transh""" +879 52 optimizer """adam""" +879 52 training_loop """owa""" +879 52 negative_sampler """basic""" +879 52 evaluator """rankbased""" +879 53 dataset """kinships""" +879 53 model """transh""" +879 53 loss """marginranking""" +879 53 regularizer """transh""" +879 53 optimizer """adam""" +879 53 training_loop """owa""" +879 53 negative_sampler """basic""" +879 53 evaluator """rankbased""" +879 54 dataset """kinships""" +879 54 model """transh""" +879 54 loss """marginranking""" +879 54 regularizer """transh""" +879 54 optimizer """adam""" +879 54 training_loop """owa""" +879 54 negative_sampler """basic""" +879 54 evaluator """rankbased""" +879 55 dataset """kinships""" +879 55 model """transh""" +879 55 loss """marginranking""" +879 55 regularizer """transh""" +879 55 optimizer """adam""" +879 55 training_loop """owa""" +879 55 negative_sampler """basic""" +879 55 evaluator """rankbased""" +879 56 dataset """kinships""" +879 56 model """transh""" +879 56 loss """marginranking""" +879 56 regularizer """transh""" +879 56 optimizer """adam""" +879 56 training_loop """owa""" +879 56 negative_sampler """basic""" +879 56 evaluator """rankbased""" +879 57 dataset """kinships""" +879 57 model """transh""" +879 57 loss """marginranking""" +879 57 regularizer """transh""" +879 57 optimizer """adam""" +879 57 training_loop """owa""" +879 57 negative_sampler """basic""" +879 57 evaluator """rankbased""" +879 58 dataset """kinships""" +879 58 model """transh""" +879 58 loss """marginranking""" +879 58 regularizer """transh""" +879 58 optimizer """adam""" +879 58 training_loop """owa""" +879 58 negative_sampler """basic""" +879 58 evaluator """rankbased""" +879 59 dataset """kinships""" +879 59 model """transh""" +879 59 loss """marginranking""" +879 59 regularizer """transh""" +879 59 optimizer """adam""" +879 59 training_loop """owa""" +879 59 negative_sampler """basic""" +879 59 evaluator """rankbased""" +879 60 dataset """kinships""" +879 60 model """transh""" +879 60 loss """marginranking""" +879 60 regularizer """transh""" +879 60 optimizer """adam""" +879 60 training_loop """owa""" +879 60 negative_sampler """basic""" +879 60 evaluator """rankbased""" +879 61 dataset """kinships""" +879 61 model """transh""" +879 61 loss """marginranking""" +879 61 regularizer """transh""" +879 61 optimizer """adam""" +879 61 training_loop """owa""" +879 61 negative_sampler """basic""" +879 61 evaluator """rankbased""" +879 62 dataset """kinships""" +879 62 model """transh""" +879 62 loss """marginranking""" +879 62 regularizer """transh""" +879 62 optimizer """adam""" +879 62 training_loop """owa""" +879 62 negative_sampler """basic""" +879 62 evaluator """rankbased""" +879 63 dataset """kinships""" +879 63 model """transh""" +879 63 loss """marginranking""" +879 63 regularizer """transh""" +879 63 optimizer """adam""" +879 63 training_loop """owa""" +879 63 negative_sampler """basic""" +879 63 evaluator """rankbased""" +879 64 dataset """kinships""" +879 64 model """transh""" +879 64 loss """marginranking""" +879 64 regularizer """transh""" +879 64 optimizer """adam""" +879 64 training_loop """owa""" +879 64 negative_sampler """basic""" +879 64 evaluator """rankbased""" +879 65 dataset """kinships""" +879 65 model """transh""" +879 65 loss """marginranking""" +879 65 regularizer """transh""" +879 65 optimizer """adam""" +879 65 training_loop """owa""" +879 65 negative_sampler """basic""" +879 65 evaluator """rankbased""" +879 66 dataset """kinships""" +879 66 model """transh""" +879 66 loss """marginranking""" +879 66 regularizer """transh""" +879 66 optimizer """adam""" +879 66 training_loop """owa""" +879 66 negative_sampler """basic""" +879 66 evaluator """rankbased""" +879 67 dataset """kinships""" +879 67 model """transh""" +879 67 loss """marginranking""" +879 67 regularizer """transh""" +879 67 optimizer """adam""" +879 67 training_loop """owa""" +879 67 negative_sampler """basic""" +879 67 evaluator """rankbased""" +879 68 dataset """kinships""" +879 68 model """transh""" +879 68 loss """marginranking""" +879 68 regularizer """transh""" +879 68 optimizer """adam""" +879 68 training_loop """owa""" +879 68 negative_sampler """basic""" +879 68 evaluator """rankbased""" +879 69 dataset """kinships""" +879 69 model """transh""" +879 69 loss """marginranking""" +879 69 regularizer """transh""" +879 69 optimizer """adam""" +879 69 training_loop """owa""" +879 69 negative_sampler """basic""" +879 69 evaluator """rankbased""" +879 70 dataset """kinships""" +879 70 model """transh""" +879 70 loss """marginranking""" +879 70 regularizer """transh""" +879 70 optimizer """adam""" +879 70 training_loop """owa""" +879 70 negative_sampler """basic""" +879 70 evaluator """rankbased""" +879 71 dataset """kinships""" +879 71 model """transh""" +879 71 loss """marginranking""" +879 71 regularizer """transh""" +879 71 optimizer """adam""" +879 71 training_loop """owa""" +879 71 negative_sampler """basic""" +879 71 evaluator """rankbased""" +879 72 dataset """kinships""" +879 72 model """transh""" +879 72 loss """marginranking""" +879 72 regularizer """transh""" +879 72 optimizer """adam""" +879 72 training_loop """owa""" +879 72 negative_sampler """basic""" +879 72 evaluator """rankbased""" +879 73 dataset """kinships""" +879 73 model """transh""" +879 73 loss """marginranking""" +879 73 regularizer """transh""" +879 73 optimizer """adam""" +879 73 training_loop """owa""" +879 73 negative_sampler """basic""" +879 73 evaluator """rankbased""" +879 74 dataset """kinships""" +879 74 model """transh""" +879 74 loss """marginranking""" +879 74 regularizer """transh""" +879 74 optimizer """adam""" +879 74 training_loop """owa""" +879 74 negative_sampler """basic""" +879 74 evaluator """rankbased""" +879 75 dataset """kinships""" +879 75 model """transh""" +879 75 loss """marginranking""" +879 75 regularizer """transh""" +879 75 optimizer """adam""" +879 75 training_loop """owa""" +879 75 negative_sampler """basic""" +879 75 evaluator """rankbased""" +879 76 dataset """kinships""" +879 76 model """transh""" +879 76 loss """marginranking""" +879 76 regularizer """transh""" +879 76 optimizer """adam""" +879 76 training_loop """owa""" +879 76 negative_sampler """basic""" +879 76 evaluator """rankbased""" +879 77 dataset """kinships""" +879 77 model """transh""" +879 77 loss """marginranking""" +879 77 regularizer """transh""" +879 77 optimizer """adam""" +879 77 training_loop """owa""" +879 77 negative_sampler """basic""" +879 77 evaluator """rankbased""" +879 78 dataset """kinships""" +879 78 model """transh""" +879 78 loss """marginranking""" +879 78 regularizer """transh""" +879 78 optimizer """adam""" +879 78 training_loop """owa""" +879 78 negative_sampler """basic""" +879 78 evaluator """rankbased""" +879 79 dataset """kinships""" +879 79 model """transh""" +879 79 loss """marginranking""" +879 79 regularizer """transh""" +879 79 optimizer """adam""" +879 79 training_loop """owa""" +879 79 negative_sampler """basic""" +879 79 evaluator """rankbased""" +879 80 dataset """kinships""" +879 80 model """transh""" +879 80 loss """marginranking""" +879 80 regularizer """transh""" +879 80 optimizer """adam""" +879 80 training_loop """owa""" +879 80 negative_sampler """basic""" +879 80 evaluator """rankbased""" +879 81 dataset """kinships""" +879 81 model """transh""" +879 81 loss """marginranking""" +879 81 regularizer """transh""" +879 81 optimizer """adam""" +879 81 training_loop """owa""" +879 81 negative_sampler """basic""" +879 81 evaluator """rankbased""" +879 82 dataset """kinships""" +879 82 model """transh""" +879 82 loss """marginranking""" +879 82 regularizer """transh""" +879 82 optimizer """adam""" +879 82 training_loop """owa""" +879 82 negative_sampler """basic""" +879 82 evaluator """rankbased""" +879 83 dataset """kinships""" +879 83 model """transh""" +879 83 loss """marginranking""" +879 83 regularizer """transh""" +879 83 optimizer """adam""" +879 83 training_loop """owa""" +879 83 negative_sampler """basic""" +879 83 evaluator """rankbased""" +879 84 dataset """kinships""" +879 84 model """transh""" +879 84 loss """marginranking""" +879 84 regularizer """transh""" +879 84 optimizer """adam""" +879 84 training_loop """owa""" +879 84 negative_sampler """basic""" +879 84 evaluator """rankbased""" +879 85 dataset """kinships""" +879 85 model """transh""" +879 85 loss """marginranking""" +879 85 regularizer """transh""" +879 85 optimizer """adam""" +879 85 training_loop """owa""" +879 85 negative_sampler """basic""" +879 85 evaluator """rankbased""" +879 86 dataset """kinships""" +879 86 model """transh""" +879 86 loss """marginranking""" +879 86 regularizer """transh""" +879 86 optimizer """adam""" +879 86 training_loop """owa""" +879 86 negative_sampler """basic""" +879 86 evaluator """rankbased""" +879 87 dataset """kinships""" +879 87 model """transh""" +879 87 loss """marginranking""" +879 87 regularizer """transh""" +879 87 optimizer """adam""" +879 87 training_loop """owa""" +879 87 negative_sampler """basic""" +879 87 evaluator """rankbased""" +879 88 dataset """kinships""" +879 88 model """transh""" +879 88 loss """marginranking""" +879 88 regularizer """transh""" +879 88 optimizer """adam""" +879 88 training_loop """owa""" +879 88 negative_sampler """basic""" +879 88 evaluator """rankbased""" +879 89 dataset """kinships""" +879 89 model """transh""" +879 89 loss """marginranking""" +879 89 regularizer """transh""" +879 89 optimizer """adam""" +879 89 training_loop """owa""" +879 89 negative_sampler """basic""" +879 89 evaluator """rankbased""" +879 90 dataset """kinships""" +879 90 model """transh""" +879 90 loss """marginranking""" +879 90 regularizer """transh""" +879 90 optimizer """adam""" +879 90 training_loop """owa""" +879 90 negative_sampler """basic""" +879 90 evaluator """rankbased""" +879 91 dataset """kinships""" +879 91 model """transh""" +879 91 loss """marginranking""" +879 91 regularizer """transh""" +879 91 optimizer """adam""" +879 91 training_loop """owa""" +879 91 negative_sampler """basic""" +879 91 evaluator """rankbased""" +879 92 dataset """kinships""" +879 92 model """transh""" +879 92 loss """marginranking""" +879 92 regularizer """transh""" +879 92 optimizer """adam""" +879 92 training_loop """owa""" +879 92 negative_sampler """basic""" +879 92 evaluator """rankbased""" +879 93 dataset """kinships""" +879 93 model """transh""" +879 93 loss """marginranking""" +879 93 regularizer """transh""" +879 93 optimizer """adam""" +879 93 training_loop """owa""" +879 93 negative_sampler """basic""" +879 93 evaluator """rankbased""" +879 94 dataset """kinships""" +879 94 model """transh""" +879 94 loss """marginranking""" +879 94 regularizer """transh""" +879 94 optimizer """adam""" +879 94 training_loop """owa""" +879 94 negative_sampler """basic""" +879 94 evaluator """rankbased""" +879 95 dataset """kinships""" +879 95 model """transh""" +879 95 loss """marginranking""" +879 95 regularizer """transh""" +879 95 optimizer """adam""" +879 95 training_loop """owa""" +879 95 negative_sampler """basic""" +879 95 evaluator """rankbased""" +879 96 dataset """kinships""" +879 96 model """transh""" +879 96 loss """marginranking""" +879 96 regularizer """transh""" +879 96 optimizer """adam""" +879 96 training_loop """owa""" +879 96 negative_sampler """basic""" +879 96 evaluator """rankbased""" +879 97 dataset """kinships""" +879 97 model """transh""" +879 97 loss """marginranking""" +879 97 regularizer """transh""" +879 97 optimizer """adam""" +879 97 training_loop """owa""" +879 97 negative_sampler """basic""" +879 97 evaluator """rankbased""" +879 98 dataset """kinships""" +879 98 model """transh""" +879 98 loss """marginranking""" +879 98 regularizer """transh""" +879 98 optimizer """adam""" +879 98 training_loop """owa""" +879 98 negative_sampler """basic""" +879 98 evaluator """rankbased""" +879 99 dataset """kinships""" +879 99 model """transh""" +879 99 loss """marginranking""" +879 99 regularizer """transh""" +879 99 optimizer """adam""" +879 99 training_loop """owa""" +879 99 negative_sampler """basic""" +879 99 evaluator """rankbased""" +879 100 dataset """kinships""" +879 100 model """transh""" +879 100 loss """marginranking""" +879 100 regularizer """transh""" +879 100 optimizer """adam""" +879 100 training_loop """owa""" +879 100 negative_sampler """basic""" +879 100 evaluator """rankbased""" +880 1 model.embedding_dim 1.0 +880 1 model.scoring_fct_norm 2.0 +880 1 loss.margin 3.98636086353993 +880 1 regularizer.weight 0.03454169434606282 +880 1 optimizer.lr 0.036328740017850795 +880 1 negative_sampler.num_negs_per_pos 37.0 +880 1 training.batch_size 2.0 +880 2 model.embedding_dim 1.0 +880 2 model.scoring_fct_norm 2.0 +880 2 loss.margin 0.8959362918537572 +880 2 regularizer.weight 0.0653881350787918 +880 2 optimizer.lr 0.04701793378842604 +880 2 negative_sampler.num_negs_per_pos 53.0 +880 2 training.batch_size 0.0 +880 3 model.embedding_dim 1.0 +880 3 model.scoring_fct_norm 1.0 +880 3 loss.margin 0.5795985283835937 +880 3 regularizer.weight 0.09102332176337434 +880 3 optimizer.lr 0.022332754343281032 +880 3 negative_sampler.num_negs_per_pos 75.0 +880 3 training.batch_size 0.0 +880 4 model.embedding_dim 2.0 +880 4 model.scoring_fct_norm 1.0 +880 4 loss.margin 6.37421296007001 +880 4 regularizer.weight 0.025681319243322447 +880 4 optimizer.lr 0.05930560919098182 +880 4 negative_sampler.num_negs_per_pos 69.0 +880 4 training.batch_size 1.0 +880 5 model.embedding_dim 1.0 +880 5 model.scoring_fct_norm 2.0 +880 5 loss.margin 2.067141294951332 +880 5 regularizer.weight 0.03835938315842266 +880 5 optimizer.lr 0.08664086922217942 +880 5 negative_sampler.num_negs_per_pos 61.0 +880 5 training.batch_size 2.0 +880 6 model.embedding_dim 0.0 +880 6 model.scoring_fct_norm 2.0 +880 6 loss.margin 9.261557501788152 +880 6 regularizer.weight 0.12296957388012515 +880 6 optimizer.lr 0.008008515241686833 +880 6 negative_sampler.num_negs_per_pos 93.0 +880 6 training.batch_size 0.0 +880 7 model.embedding_dim 2.0 +880 7 model.scoring_fct_norm 2.0 +880 7 loss.margin 2.2591392187046555 +880 7 regularizer.weight 0.12499433025148361 +880 7 optimizer.lr 0.0010018918563450387 +880 7 negative_sampler.num_negs_per_pos 96.0 +880 7 training.batch_size 0.0 +880 8 model.embedding_dim 0.0 +880 8 model.scoring_fct_norm 1.0 +880 8 loss.margin 1.1667791426145944 +880 8 regularizer.weight 0.014423587203935802 +880 8 optimizer.lr 0.03691921395947873 +880 8 negative_sampler.num_negs_per_pos 17.0 +880 8 training.batch_size 0.0 +880 9 model.embedding_dim 1.0 +880 9 model.scoring_fct_norm 1.0 +880 9 loss.margin 0.8901224234259905 +880 9 regularizer.weight 0.010544257660779195 +880 9 optimizer.lr 0.0014559078984247647 +880 9 negative_sampler.num_negs_per_pos 43.0 +880 9 training.batch_size 0.0 +880 10 model.embedding_dim 1.0 +880 10 model.scoring_fct_norm 2.0 +880 10 loss.margin 4.657499645273836 +880 10 regularizer.weight 0.026583765556229898 +880 10 optimizer.lr 0.001025852414576348 +880 10 negative_sampler.num_negs_per_pos 27.0 +880 10 training.batch_size 0.0 +880 11 model.embedding_dim 2.0 +880 11 model.scoring_fct_norm 1.0 +880 11 loss.margin 8.066405993856929 +880 11 regularizer.weight 0.01248326379424315 +880 11 optimizer.lr 0.012061312579699064 +880 11 negative_sampler.num_negs_per_pos 33.0 +880 11 training.batch_size 0.0 +880 12 model.embedding_dim 0.0 +880 12 model.scoring_fct_norm 1.0 +880 12 loss.margin 8.144533315117364 +880 12 regularizer.weight 0.0396282838224135 +880 12 optimizer.lr 0.03860989912756172 +880 12 negative_sampler.num_negs_per_pos 90.0 +880 12 training.batch_size 1.0 +880 13 model.embedding_dim 0.0 +880 13 model.scoring_fct_norm 2.0 +880 13 loss.margin 0.8951931176744675 +880 13 regularizer.weight 0.010634408011100174 +880 13 optimizer.lr 0.014028671245441049 +880 13 negative_sampler.num_negs_per_pos 18.0 +880 13 training.batch_size 1.0 +880 14 model.embedding_dim 0.0 +880 14 model.scoring_fct_norm 2.0 +880 14 loss.margin 8.091820832006935 +880 14 regularizer.weight 0.2669288224766638 +880 14 optimizer.lr 0.09037735820962893 +880 14 negative_sampler.num_negs_per_pos 42.0 +880 14 training.batch_size 0.0 +880 15 model.embedding_dim 2.0 +880 15 model.scoring_fct_norm 1.0 +880 15 loss.margin 3.216497294187781 +880 15 regularizer.weight 0.017686481252221664 +880 15 optimizer.lr 0.0016469111449399338 +880 15 negative_sampler.num_negs_per_pos 39.0 +880 15 training.batch_size 1.0 +880 16 model.embedding_dim 1.0 +880 16 model.scoring_fct_norm 2.0 +880 16 loss.margin 2.8257029209079993 +880 16 regularizer.weight 0.10453275592322048 +880 16 optimizer.lr 0.004779476703149978 +880 16 negative_sampler.num_negs_per_pos 12.0 +880 16 training.batch_size 0.0 +880 17 model.embedding_dim 2.0 +880 17 model.scoring_fct_norm 1.0 +880 17 loss.margin 2.0670550470820976 +880 17 regularizer.weight 0.2999225220161268 +880 17 optimizer.lr 0.0935178854490219 +880 17 negative_sampler.num_negs_per_pos 58.0 +880 17 training.batch_size 2.0 +880 18 model.embedding_dim 2.0 +880 18 model.scoring_fct_norm 1.0 +880 18 loss.margin 4.7590077554860875 +880 18 regularizer.weight 0.06670038819042713 +880 18 optimizer.lr 0.007765633948910711 +880 18 negative_sampler.num_negs_per_pos 52.0 +880 18 training.batch_size 0.0 +880 19 model.embedding_dim 1.0 +880 19 model.scoring_fct_norm 2.0 +880 19 loss.margin 9.477858284826661 +880 19 regularizer.weight 0.05763858260044634 +880 19 optimizer.lr 0.001315666606437538 +880 19 negative_sampler.num_negs_per_pos 38.0 +880 19 training.batch_size 1.0 +880 20 model.embedding_dim 0.0 +880 20 model.scoring_fct_norm 2.0 +880 20 loss.margin 0.7165711475179821 +880 20 regularizer.weight 0.05063536463871621 +880 20 optimizer.lr 0.01796019587283867 +880 20 negative_sampler.num_negs_per_pos 36.0 +880 20 training.batch_size 1.0 +880 21 model.embedding_dim 2.0 +880 21 model.scoring_fct_norm 2.0 +880 21 loss.margin 8.598227083166032 +880 21 regularizer.weight 0.08492764948720544 +880 21 optimizer.lr 0.06236771147583868 +880 21 negative_sampler.num_negs_per_pos 85.0 +880 21 training.batch_size 2.0 +880 22 model.embedding_dim 0.0 +880 22 model.scoring_fct_norm 1.0 +880 22 loss.margin 2.173185640104841 +880 22 regularizer.weight 0.22494856975960545 +880 22 optimizer.lr 0.00254001070078208 +880 22 negative_sampler.num_negs_per_pos 23.0 +880 22 training.batch_size 0.0 +880 23 model.embedding_dim 1.0 +880 23 model.scoring_fct_norm 2.0 +880 23 loss.margin 7.007104949325435 +880 23 regularizer.weight 0.07601820693123261 +880 23 optimizer.lr 0.04418821448638871 +880 23 negative_sampler.num_negs_per_pos 78.0 +880 23 training.batch_size 1.0 +880 24 model.embedding_dim 2.0 +880 24 model.scoring_fct_norm 1.0 +880 24 loss.margin 6.163212353992105 +880 24 regularizer.weight 0.018778711936766633 +880 24 optimizer.lr 0.008665319262021883 +880 24 negative_sampler.num_negs_per_pos 79.0 +880 24 training.batch_size 2.0 +880 25 model.embedding_dim 2.0 +880 25 model.scoring_fct_norm 1.0 +880 25 loss.margin 6.112309493344667 +880 25 regularizer.weight 0.01418395769829999 +880 25 optimizer.lr 0.0012957728006700382 +880 25 negative_sampler.num_negs_per_pos 6.0 +880 25 training.batch_size 0.0 +880 26 model.embedding_dim 2.0 +880 26 model.scoring_fct_norm 2.0 +880 26 loss.margin 3.2887084311907575 +880 26 regularizer.weight 0.2504380099970761 +880 26 optimizer.lr 0.00878178619534707 +880 26 negative_sampler.num_negs_per_pos 39.0 +880 26 training.batch_size 0.0 +880 27 model.embedding_dim 0.0 +880 27 model.scoring_fct_norm 1.0 +880 27 loss.margin 7.426441445416413 +880 27 regularizer.weight 0.23689096146529853 +880 27 optimizer.lr 0.007840322823944049 +880 27 negative_sampler.num_negs_per_pos 72.0 +880 27 training.batch_size 0.0 +880 28 model.embedding_dim 2.0 +880 28 model.scoring_fct_norm 2.0 +880 28 loss.margin 5.387054598143074 +880 28 regularizer.weight 0.11119508276349228 +880 28 optimizer.lr 0.007544173755744897 +880 28 negative_sampler.num_negs_per_pos 85.0 +880 28 training.batch_size 1.0 +880 29 model.embedding_dim 2.0 +880 29 model.scoring_fct_norm 1.0 +880 29 loss.margin 8.371760938831574 +880 29 regularizer.weight 0.12802846219329783 +880 29 optimizer.lr 0.0036462122861901387 +880 29 negative_sampler.num_negs_per_pos 96.0 +880 29 training.batch_size 2.0 +880 30 model.embedding_dim 2.0 +880 30 model.scoring_fct_norm 1.0 +880 30 loss.margin 4.953651693506554 +880 30 regularizer.weight 0.04483844261974027 +880 30 optimizer.lr 0.021388207307073366 +880 30 negative_sampler.num_negs_per_pos 34.0 +880 30 training.batch_size 2.0 +880 31 model.embedding_dim 0.0 +880 31 model.scoring_fct_norm 1.0 +880 31 loss.margin 3.7774086205860233 +880 31 regularizer.weight 0.15176660880466405 +880 31 optimizer.lr 0.00925913554218567 +880 31 negative_sampler.num_negs_per_pos 90.0 +880 31 training.batch_size 0.0 +880 32 model.embedding_dim 0.0 +880 32 model.scoring_fct_norm 1.0 +880 32 loss.margin 7.134902994135387 +880 32 regularizer.weight 0.06434616304330563 +880 32 optimizer.lr 0.0644620935259966 +880 32 negative_sampler.num_negs_per_pos 6.0 +880 32 training.batch_size 1.0 +880 33 model.embedding_dim 1.0 +880 33 model.scoring_fct_norm 2.0 +880 33 loss.margin 5.239124236042095 +880 33 regularizer.weight 0.01721127487308674 +880 33 optimizer.lr 0.006390486325471995 +880 33 negative_sampler.num_negs_per_pos 53.0 +880 33 training.batch_size 1.0 +880 34 model.embedding_dim 2.0 +880 34 model.scoring_fct_norm 1.0 +880 34 loss.margin 3.1949433924822386 +880 34 regularizer.weight 0.2793868995194258 +880 34 optimizer.lr 0.009779110447702327 +880 34 negative_sampler.num_negs_per_pos 87.0 +880 34 training.batch_size 2.0 +880 35 model.embedding_dim 2.0 +880 35 model.scoring_fct_norm 1.0 +880 35 loss.margin 3.3842521250145694 +880 35 regularizer.weight 0.05335632668201963 +880 35 optimizer.lr 0.018035928093091635 +880 35 negative_sampler.num_negs_per_pos 71.0 +880 35 training.batch_size 1.0 +880 36 model.embedding_dim 2.0 +880 36 model.scoring_fct_norm 1.0 +880 36 loss.margin 3.6451598143299977 +880 36 regularizer.weight 0.014307848075571065 +880 36 optimizer.lr 0.0038914439031165313 +880 36 negative_sampler.num_negs_per_pos 93.0 +880 36 training.batch_size 2.0 +880 37 model.embedding_dim 1.0 +880 37 model.scoring_fct_norm 1.0 +880 37 loss.margin 4.38210717275693 +880 37 regularizer.weight 0.1860969575651075 +880 37 optimizer.lr 0.03845340027721391 +880 37 negative_sampler.num_negs_per_pos 82.0 +880 37 training.batch_size 2.0 +880 38 model.embedding_dim 1.0 +880 38 model.scoring_fct_norm 1.0 +880 38 loss.margin 6.242081169184889 +880 38 regularizer.weight 0.02561506788566748 +880 38 optimizer.lr 0.008822025841399603 +880 38 negative_sampler.num_negs_per_pos 22.0 +880 38 training.batch_size 0.0 +880 39 model.embedding_dim 1.0 +880 39 model.scoring_fct_norm 1.0 +880 39 loss.margin 1.7910173590257352 +880 39 regularizer.weight 0.02912074644623383 +880 39 optimizer.lr 0.022154141837940858 +880 39 negative_sampler.num_negs_per_pos 13.0 +880 39 training.batch_size 1.0 +880 40 model.embedding_dim 0.0 +880 40 model.scoring_fct_norm 2.0 +880 40 loss.margin 6.258105576492288 +880 40 regularizer.weight 0.09447278254903042 +880 40 optimizer.lr 0.02875069228600252 +880 40 negative_sampler.num_negs_per_pos 75.0 +880 40 training.batch_size 0.0 +880 41 model.embedding_dim 0.0 +880 41 model.scoring_fct_norm 2.0 +880 41 loss.margin 9.354202835748847 +880 41 regularizer.weight 0.1313677051146723 +880 41 optimizer.lr 0.003026460599643624 +880 41 negative_sampler.num_negs_per_pos 8.0 +880 41 training.batch_size 1.0 +880 42 model.embedding_dim 0.0 +880 42 model.scoring_fct_norm 1.0 +880 42 loss.margin 1.5963975818644984 +880 42 regularizer.weight 0.01742574794470912 +880 42 optimizer.lr 0.0022257847710569142 +880 42 negative_sampler.num_negs_per_pos 1.0 +880 42 training.batch_size 1.0 +880 43 model.embedding_dim 0.0 +880 43 model.scoring_fct_norm 1.0 +880 43 loss.margin 7.23952403968079 +880 43 regularizer.weight 0.0688964502430422 +880 43 optimizer.lr 0.0024634806321543745 +880 43 negative_sampler.num_negs_per_pos 87.0 +880 43 training.batch_size 2.0 +880 44 model.embedding_dim 1.0 +880 44 model.scoring_fct_norm 2.0 +880 44 loss.margin 5.729190521438451 +880 44 regularizer.weight 0.07725621360825788 +880 44 optimizer.lr 0.006378842087154945 +880 44 negative_sampler.num_negs_per_pos 86.0 +880 44 training.batch_size 0.0 +880 45 model.embedding_dim 0.0 +880 45 model.scoring_fct_norm 1.0 +880 45 loss.margin 5.777090634194322 +880 45 regularizer.weight 0.01075965991124499 +880 45 optimizer.lr 0.08534118273283729 +880 45 negative_sampler.num_negs_per_pos 87.0 +880 45 training.batch_size 1.0 +880 46 model.embedding_dim 1.0 +880 46 model.scoring_fct_norm 2.0 +880 46 loss.margin 7.207305744918605 +880 46 regularizer.weight 0.035063893124045356 +880 46 optimizer.lr 0.02924361190283367 +880 46 negative_sampler.num_negs_per_pos 79.0 +880 46 training.batch_size 2.0 +880 47 model.embedding_dim 0.0 +880 47 model.scoring_fct_norm 2.0 +880 47 loss.margin 2.198493388037977 +880 47 regularizer.weight 0.03150524196925818 +880 47 optimizer.lr 0.001824187532891053 +880 47 negative_sampler.num_negs_per_pos 48.0 +880 47 training.batch_size 0.0 +880 48 model.embedding_dim 2.0 +880 48 model.scoring_fct_norm 2.0 +880 48 loss.margin 9.786681156020466 +880 48 regularizer.weight 0.11324931643713719 +880 48 optimizer.lr 0.002959666651238174 +880 48 negative_sampler.num_negs_per_pos 73.0 +880 48 training.batch_size 0.0 +880 49 model.embedding_dim 2.0 +880 49 model.scoring_fct_norm 2.0 +880 49 loss.margin 4.201407035894116 +880 49 regularizer.weight 0.01945743512676459 +880 49 optimizer.lr 0.06200309511489792 +880 49 negative_sampler.num_negs_per_pos 21.0 +880 49 training.batch_size 2.0 +880 50 model.embedding_dim 0.0 +880 50 model.scoring_fct_norm 1.0 +880 50 loss.margin 3.534651729747497 +880 50 regularizer.weight 0.03071821438042805 +880 50 optimizer.lr 0.0012488251220356943 +880 50 negative_sampler.num_negs_per_pos 63.0 +880 50 training.batch_size 2.0 +880 51 model.embedding_dim 0.0 +880 51 model.scoring_fct_norm 2.0 +880 51 loss.margin 4.368425253280543 +880 51 regularizer.weight 0.048044025870881364 +880 51 optimizer.lr 0.008163149707147668 +880 51 negative_sampler.num_negs_per_pos 86.0 +880 51 training.batch_size 2.0 +880 52 model.embedding_dim 2.0 +880 52 model.scoring_fct_norm 2.0 +880 52 loss.margin 3.756891436608009 +880 52 regularizer.weight 0.030898705229126865 +880 52 optimizer.lr 0.007933898288135106 +880 52 negative_sampler.num_negs_per_pos 50.0 +880 52 training.batch_size 0.0 +880 53 model.embedding_dim 1.0 +880 53 model.scoring_fct_norm 2.0 +880 53 loss.margin 2.4157557927626407 +880 53 regularizer.weight 0.13249708148247033 +880 53 optimizer.lr 0.004743703450708412 +880 53 negative_sampler.num_negs_per_pos 15.0 +880 53 training.batch_size 1.0 +880 54 model.embedding_dim 1.0 +880 54 model.scoring_fct_norm 2.0 +880 54 loss.margin 9.204878549585318 +880 54 regularizer.weight 0.11238170581642347 +880 54 optimizer.lr 0.0023033079746806735 +880 54 negative_sampler.num_negs_per_pos 96.0 +880 54 training.batch_size 0.0 +880 55 model.embedding_dim 1.0 +880 55 model.scoring_fct_norm 2.0 +880 55 loss.margin 6.576292018562386 +880 55 regularizer.weight 0.033809069736591486 +880 55 optimizer.lr 0.009565309568694998 +880 55 negative_sampler.num_negs_per_pos 60.0 +880 55 training.batch_size 2.0 +880 56 model.embedding_dim 2.0 +880 56 model.scoring_fct_norm 2.0 +880 56 loss.margin 3.6570017709264104 +880 56 regularizer.weight 0.06421583823296562 +880 56 optimizer.lr 0.007344394627860787 +880 56 negative_sampler.num_negs_per_pos 12.0 +880 56 training.batch_size 2.0 +880 57 model.embedding_dim 0.0 +880 57 model.scoring_fct_norm 1.0 +880 57 loss.margin 0.865769063860022 +880 57 regularizer.weight 0.2506711372294468 +880 57 optimizer.lr 0.008251871099350806 +880 57 negative_sampler.num_negs_per_pos 0.0 +880 57 training.batch_size 2.0 +880 58 model.embedding_dim 1.0 +880 58 model.scoring_fct_norm 2.0 +880 58 loss.margin 7.41436285357685 +880 58 regularizer.weight 0.04891126914210384 +880 58 optimizer.lr 0.008454194855003677 +880 58 negative_sampler.num_negs_per_pos 91.0 +880 58 training.batch_size 1.0 +880 59 model.embedding_dim 0.0 +880 59 model.scoring_fct_norm 1.0 +880 59 loss.margin 8.422310426092523 +880 59 regularizer.weight 0.035008097526241386 +880 59 optimizer.lr 0.031098827459145616 +880 59 negative_sampler.num_negs_per_pos 24.0 +880 59 training.batch_size 2.0 +880 60 model.embedding_dim 0.0 +880 60 model.scoring_fct_norm 1.0 +880 60 loss.margin 8.552074600198175 +880 60 regularizer.weight 0.011944003391625461 +880 60 optimizer.lr 0.012280686540300087 +880 60 negative_sampler.num_negs_per_pos 9.0 +880 60 training.batch_size 2.0 +880 61 model.embedding_dim 1.0 +880 61 model.scoring_fct_norm 2.0 +880 61 loss.margin 1.5604097157157963 +880 61 regularizer.weight 0.1417088746555475 +880 61 optimizer.lr 0.006358715649779976 +880 61 negative_sampler.num_negs_per_pos 42.0 +880 61 training.batch_size 1.0 +880 62 model.embedding_dim 0.0 +880 62 model.scoring_fct_norm 2.0 +880 62 loss.margin 6.545244340068279 +880 62 regularizer.weight 0.2430281686159534 +880 62 optimizer.lr 0.0036101894185268098 +880 62 negative_sampler.num_negs_per_pos 64.0 +880 62 training.batch_size 0.0 +880 63 model.embedding_dim 2.0 +880 63 model.scoring_fct_norm 2.0 +880 63 loss.margin 2.8025852810569885 +880 63 regularizer.weight 0.012287477128696194 +880 63 optimizer.lr 0.0011042379446945898 +880 63 negative_sampler.num_negs_per_pos 11.0 +880 63 training.batch_size 1.0 +880 64 model.embedding_dim 2.0 +880 64 model.scoring_fct_norm 2.0 +880 64 loss.margin 5.710807903089246 +880 64 regularizer.weight 0.08847050670187272 +880 64 optimizer.lr 0.01069538939379383 +880 64 negative_sampler.num_negs_per_pos 69.0 +880 64 training.batch_size 1.0 +880 65 model.embedding_dim 2.0 +880 65 model.scoring_fct_norm 1.0 +880 65 loss.margin 5.994915572528738 +880 65 regularizer.weight 0.27207093127928433 +880 65 optimizer.lr 0.00760255084535348 +880 65 negative_sampler.num_negs_per_pos 67.0 +880 65 training.batch_size 1.0 +880 66 model.embedding_dim 2.0 +880 66 model.scoring_fct_norm 1.0 +880 66 loss.margin 8.402083357227355 +880 66 regularizer.weight 0.030241726669006336 +880 66 optimizer.lr 0.005424041584757277 +880 66 negative_sampler.num_negs_per_pos 57.0 +880 66 training.batch_size 1.0 +880 67 model.embedding_dim 2.0 +880 67 model.scoring_fct_norm 2.0 +880 67 loss.margin 1.2893455062714345 +880 67 regularizer.weight 0.014727322568981644 +880 67 optimizer.lr 0.007837900509083912 +880 67 negative_sampler.num_negs_per_pos 84.0 +880 67 training.batch_size 0.0 +880 68 model.embedding_dim 1.0 +880 68 model.scoring_fct_norm 1.0 +880 68 loss.margin 5.806426519806616 +880 68 regularizer.weight 0.17201604444963128 +880 68 optimizer.lr 0.009232315177716013 +880 68 negative_sampler.num_negs_per_pos 95.0 +880 68 training.batch_size 2.0 +880 69 model.embedding_dim 0.0 +880 69 model.scoring_fct_norm 1.0 +880 69 loss.margin 9.757783934773995 +880 69 regularizer.weight 0.018197585658984883 +880 69 optimizer.lr 0.00808550892537389 +880 69 negative_sampler.num_negs_per_pos 97.0 +880 69 training.batch_size 0.0 +880 70 model.embedding_dim 2.0 +880 70 model.scoring_fct_norm 2.0 +880 70 loss.margin 6.423908173244998 +880 70 regularizer.weight 0.03532587926389606 +880 70 optimizer.lr 0.009139106096179787 +880 70 negative_sampler.num_negs_per_pos 16.0 +880 70 training.batch_size 1.0 +880 71 model.embedding_dim 0.0 +880 71 model.scoring_fct_norm 1.0 +880 71 loss.margin 9.69833103217856 +880 71 regularizer.weight 0.01135750047865786 +880 71 optimizer.lr 0.001147776072360605 +880 71 negative_sampler.num_negs_per_pos 12.0 +880 71 training.batch_size 1.0 +880 72 model.embedding_dim 2.0 +880 72 model.scoring_fct_norm 2.0 +880 72 loss.margin 6.953446150498982 +880 72 regularizer.weight 0.06200659094720928 +880 72 optimizer.lr 0.005636060836841207 +880 72 negative_sampler.num_negs_per_pos 57.0 +880 72 training.batch_size 2.0 +880 73 model.embedding_dim 2.0 +880 73 model.scoring_fct_norm 2.0 +880 73 loss.margin 7.077553044968818 +880 73 regularizer.weight 0.07720935490507624 +880 73 optimizer.lr 0.001228610496779774 +880 73 negative_sampler.num_negs_per_pos 82.0 +880 73 training.batch_size 0.0 +880 74 model.embedding_dim 2.0 +880 74 model.scoring_fct_norm 1.0 +880 74 loss.margin 4.969394587905088 +880 74 regularizer.weight 0.06892327164271847 +880 74 optimizer.lr 0.005187430691863184 +880 74 negative_sampler.num_negs_per_pos 79.0 +880 74 training.batch_size 1.0 +880 75 model.embedding_dim 1.0 +880 75 model.scoring_fct_norm 2.0 +880 75 loss.margin 3.5487662517859517 +880 75 regularizer.weight 0.01626453068426107 +880 75 optimizer.lr 0.01844734415146988 +880 75 negative_sampler.num_negs_per_pos 4.0 +880 75 training.batch_size 0.0 +880 76 model.embedding_dim 2.0 +880 76 model.scoring_fct_norm 2.0 +880 76 loss.margin 1.89328448819377 +880 76 regularizer.weight 0.13741390911470075 +880 76 optimizer.lr 0.059193059068688235 +880 76 negative_sampler.num_negs_per_pos 48.0 +880 76 training.batch_size 2.0 +880 77 model.embedding_dim 1.0 +880 77 model.scoring_fct_norm 1.0 +880 77 loss.margin 7.959649367984334 +880 77 regularizer.weight 0.03887597162193555 +880 77 optimizer.lr 0.0659437672253936 +880 77 negative_sampler.num_negs_per_pos 8.0 +880 77 training.batch_size 0.0 +880 78 model.embedding_dim 2.0 +880 78 model.scoring_fct_norm 1.0 +880 78 loss.margin 2.4419001157999753 +880 78 regularizer.weight 0.05033816525695218 +880 78 optimizer.lr 0.001617531825072601 +880 78 negative_sampler.num_negs_per_pos 10.0 +880 78 training.batch_size 1.0 +880 79 model.embedding_dim 2.0 +880 79 model.scoring_fct_norm 2.0 +880 79 loss.margin 7.58966873264573 +880 79 regularizer.weight 0.12091588906541714 +880 79 optimizer.lr 0.0016084519461661974 +880 79 negative_sampler.num_negs_per_pos 97.0 +880 79 training.batch_size 2.0 +880 80 model.embedding_dim 1.0 +880 80 model.scoring_fct_norm 1.0 +880 80 loss.margin 2.0166362427750926 +880 80 regularizer.weight 0.027657778512321766 +880 80 optimizer.lr 0.05763621022673382 +880 80 negative_sampler.num_negs_per_pos 11.0 +880 80 training.batch_size 2.0 +880 81 model.embedding_dim 0.0 +880 81 model.scoring_fct_norm 1.0 +880 81 loss.margin 7.0007608278805415 +880 81 regularizer.weight 0.10851757155298984 +880 81 optimizer.lr 0.010988756364683674 +880 81 negative_sampler.num_negs_per_pos 8.0 +880 81 training.batch_size 2.0 +880 82 model.embedding_dim 1.0 +880 82 model.scoring_fct_norm 1.0 +880 82 loss.margin 2.889470393932642 +880 82 regularizer.weight 0.0803838917981977 +880 82 optimizer.lr 0.023003130912229754 +880 82 negative_sampler.num_negs_per_pos 76.0 +880 82 training.batch_size 1.0 +880 83 model.embedding_dim 2.0 +880 83 model.scoring_fct_norm 2.0 +880 83 loss.margin 9.231857618513105 +880 83 regularizer.weight 0.11229147184051153 +880 83 optimizer.lr 0.027260511893077485 +880 83 negative_sampler.num_negs_per_pos 37.0 +880 83 training.batch_size 1.0 +880 84 model.embedding_dim 2.0 +880 84 model.scoring_fct_norm 2.0 +880 84 loss.margin 7.823092945135602 +880 84 regularizer.weight 0.0322977560018813 +880 84 optimizer.lr 0.0021704960993536937 +880 84 negative_sampler.num_negs_per_pos 38.0 +880 84 training.batch_size 0.0 +880 85 model.embedding_dim 1.0 +880 85 model.scoring_fct_norm 2.0 +880 85 loss.margin 2.0632828118966007 +880 85 regularizer.weight 0.15835183766301855 +880 85 optimizer.lr 0.004097795396718496 +880 85 negative_sampler.num_negs_per_pos 29.0 +880 85 training.batch_size 1.0 +880 86 model.embedding_dim 1.0 +880 86 model.scoring_fct_norm 1.0 +880 86 loss.margin 3.8814505707999993 +880 86 regularizer.weight 0.09123954180520241 +880 86 optimizer.lr 0.08728697682206997 +880 86 negative_sampler.num_negs_per_pos 37.0 +880 86 training.batch_size 1.0 +880 87 model.embedding_dim 0.0 +880 87 model.scoring_fct_norm 2.0 +880 87 loss.margin 6.462068565064502 +880 87 regularizer.weight 0.014556594453287289 +880 87 optimizer.lr 0.009205427396259994 +880 87 negative_sampler.num_negs_per_pos 42.0 +880 87 training.batch_size 1.0 +880 88 model.embedding_dim 2.0 +880 88 model.scoring_fct_norm 1.0 +880 88 loss.margin 1.1109668354659543 +880 88 regularizer.weight 0.08620981004099695 +880 88 optimizer.lr 0.05444903141575664 +880 88 negative_sampler.num_negs_per_pos 5.0 +880 88 training.batch_size 1.0 +880 89 model.embedding_dim 2.0 +880 89 model.scoring_fct_norm 1.0 +880 89 loss.margin 5.4990084997712305 +880 89 regularizer.weight 0.020707270015852163 +880 89 optimizer.lr 0.004023143340802626 +880 89 negative_sampler.num_negs_per_pos 94.0 +880 89 training.batch_size 2.0 +880 90 model.embedding_dim 2.0 +880 90 model.scoring_fct_norm 1.0 +880 90 loss.margin 3.2422803383821903 +880 90 regularizer.weight 0.2820667982076495 +880 90 optimizer.lr 0.010171367432350573 +880 90 negative_sampler.num_negs_per_pos 12.0 +880 90 training.batch_size 0.0 +880 91 model.embedding_dim 1.0 +880 91 model.scoring_fct_norm 1.0 +880 91 loss.margin 6.911012332053128 +880 91 regularizer.weight 0.02355941731571102 +880 91 optimizer.lr 0.05584974208245735 +880 91 negative_sampler.num_negs_per_pos 9.0 +880 91 training.batch_size 2.0 +880 92 model.embedding_dim 1.0 +880 92 model.scoring_fct_norm 1.0 +880 92 loss.margin 7.1457927479337915 +880 92 regularizer.weight 0.018851724230871905 +880 92 optimizer.lr 0.039733719302196026 +880 92 negative_sampler.num_negs_per_pos 47.0 +880 92 training.batch_size 0.0 +880 93 model.embedding_dim 0.0 +880 93 model.scoring_fct_norm 1.0 +880 93 loss.margin 5.837626871795338 +880 93 regularizer.weight 0.270922724793717 +880 93 optimizer.lr 0.06707490953194903 +880 93 negative_sampler.num_negs_per_pos 6.0 +880 93 training.batch_size 2.0 +880 94 model.embedding_dim 0.0 +880 94 model.scoring_fct_norm 2.0 +880 94 loss.margin 4.962281370160615 +880 94 regularizer.weight 0.016104492806012116 +880 94 optimizer.lr 0.030375044054492713 +880 94 negative_sampler.num_negs_per_pos 95.0 +880 94 training.batch_size 1.0 +880 95 model.embedding_dim 0.0 +880 95 model.scoring_fct_norm 2.0 +880 95 loss.margin 6.097935931190487 +880 95 regularizer.weight 0.22026934051340502 +880 95 optimizer.lr 0.007629372589777687 +880 95 negative_sampler.num_negs_per_pos 77.0 +880 95 training.batch_size 1.0 +880 96 model.embedding_dim 0.0 +880 96 model.scoring_fct_norm 1.0 +880 96 loss.margin 8.50156067939389 +880 96 regularizer.weight 0.08734744068201873 +880 96 optimizer.lr 0.019476346959161225 +880 96 negative_sampler.num_negs_per_pos 93.0 +880 96 training.batch_size 2.0 +880 97 model.embedding_dim 0.0 +880 97 model.scoring_fct_norm 2.0 +880 97 loss.margin 6.836517132475643 +880 97 regularizer.weight 0.010420378086001703 +880 97 optimizer.lr 0.001504518007713409 +880 97 negative_sampler.num_negs_per_pos 53.0 +880 97 training.batch_size 0.0 +880 98 model.embedding_dim 0.0 +880 98 model.scoring_fct_norm 1.0 +880 98 loss.margin 0.5795092960679822 +880 98 regularizer.weight 0.011460179156221731 +880 98 optimizer.lr 0.004673836655098741 +880 98 negative_sampler.num_negs_per_pos 98.0 +880 98 training.batch_size 2.0 +880 99 model.embedding_dim 1.0 +880 99 model.scoring_fct_norm 2.0 +880 99 loss.margin 7.0270932338800485 +880 99 regularizer.weight 0.1634982771680798 +880 99 optimizer.lr 0.06077388298181602 +880 99 negative_sampler.num_negs_per_pos 12.0 +880 99 training.batch_size 0.0 +880 100 model.embedding_dim 0.0 +880 100 model.scoring_fct_norm 2.0 +880 100 loss.margin 9.134501729228193 +880 100 regularizer.weight 0.14312734644307984 +880 100 optimizer.lr 0.04381767835974897 +880 100 negative_sampler.num_negs_per_pos 58.0 +880 100 training.batch_size 1.0 +880 1 dataset """kinships""" +880 1 model """transh""" +880 1 loss """marginranking""" +880 1 regularizer """transh""" +880 1 optimizer """adam""" +880 1 training_loop """owa""" +880 1 negative_sampler """basic""" +880 1 evaluator """rankbased""" +880 2 dataset """kinships""" +880 2 model """transh""" +880 2 loss """marginranking""" +880 2 regularizer """transh""" +880 2 optimizer """adam""" +880 2 training_loop """owa""" +880 2 negative_sampler """basic""" +880 2 evaluator """rankbased""" +880 3 dataset """kinships""" +880 3 model """transh""" +880 3 loss """marginranking""" +880 3 regularizer """transh""" +880 3 optimizer """adam""" +880 3 training_loop """owa""" +880 3 negative_sampler """basic""" +880 3 evaluator """rankbased""" +880 4 dataset """kinships""" +880 4 model """transh""" +880 4 loss """marginranking""" +880 4 regularizer """transh""" +880 4 optimizer """adam""" +880 4 training_loop """owa""" +880 4 negative_sampler """basic""" +880 4 evaluator """rankbased""" +880 5 dataset """kinships""" +880 5 model """transh""" +880 5 loss """marginranking""" +880 5 regularizer """transh""" +880 5 optimizer """adam""" +880 5 training_loop """owa""" +880 5 negative_sampler """basic""" +880 5 evaluator """rankbased""" +880 6 dataset """kinships""" +880 6 model """transh""" +880 6 loss """marginranking""" +880 6 regularizer """transh""" +880 6 optimizer """adam""" +880 6 training_loop """owa""" +880 6 negative_sampler """basic""" +880 6 evaluator """rankbased""" +880 7 dataset """kinships""" +880 7 model """transh""" +880 7 loss """marginranking""" +880 7 regularizer """transh""" +880 7 optimizer """adam""" +880 7 training_loop """owa""" +880 7 negative_sampler """basic""" +880 7 evaluator """rankbased""" +880 8 dataset """kinships""" +880 8 model """transh""" +880 8 loss """marginranking""" +880 8 regularizer """transh""" +880 8 optimizer """adam""" +880 8 training_loop """owa""" +880 8 negative_sampler """basic""" +880 8 evaluator """rankbased""" +880 9 dataset """kinships""" +880 9 model """transh""" +880 9 loss """marginranking""" +880 9 regularizer """transh""" +880 9 optimizer """adam""" +880 9 training_loop """owa""" +880 9 negative_sampler """basic""" +880 9 evaluator """rankbased""" +880 10 dataset """kinships""" +880 10 model """transh""" +880 10 loss """marginranking""" +880 10 regularizer """transh""" +880 10 optimizer """adam""" +880 10 training_loop """owa""" +880 10 negative_sampler """basic""" +880 10 evaluator """rankbased""" +880 11 dataset """kinships""" +880 11 model """transh""" +880 11 loss """marginranking""" +880 11 regularizer """transh""" +880 11 optimizer """adam""" +880 11 training_loop """owa""" +880 11 negative_sampler """basic""" +880 11 evaluator """rankbased""" +880 12 dataset """kinships""" +880 12 model """transh""" +880 12 loss """marginranking""" +880 12 regularizer """transh""" +880 12 optimizer """adam""" +880 12 training_loop """owa""" +880 12 negative_sampler """basic""" +880 12 evaluator """rankbased""" +880 13 dataset """kinships""" +880 13 model """transh""" +880 13 loss """marginranking""" +880 13 regularizer """transh""" +880 13 optimizer """adam""" +880 13 training_loop """owa""" +880 13 negative_sampler """basic""" +880 13 evaluator """rankbased""" +880 14 dataset """kinships""" +880 14 model """transh""" +880 14 loss """marginranking""" +880 14 regularizer """transh""" +880 14 optimizer """adam""" +880 14 training_loop """owa""" +880 14 negative_sampler """basic""" +880 14 evaluator """rankbased""" +880 15 dataset """kinships""" +880 15 model """transh""" +880 15 loss """marginranking""" +880 15 regularizer """transh""" +880 15 optimizer """adam""" +880 15 training_loop """owa""" +880 15 negative_sampler """basic""" +880 15 evaluator """rankbased""" +880 16 dataset """kinships""" +880 16 model """transh""" +880 16 loss """marginranking""" +880 16 regularizer """transh""" +880 16 optimizer """adam""" +880 16 training_loop """owa""" +880 16 negative_sampler """basic""" +880 16 evaluator """rankbased""" +880 17 dataset """kinships""" +880 17 model """transh""" +880 17 loss """marginranking""" +880 17 regularizer """transh""" +880 17 optimizer """adam""" +880 17 training_loop """owa""" +880 17 negative_sampler """basic""" +880 17 evaluator """rankbased""" +880 18 dataset """kinships""" +880 18 model """transh""" +880 18 loss """marginranking""" +880 18 regularizer """transh""" +880 18 optimizer """adam""" +880 18 training_loop """owa""" +880 18 negative_sampler """basic""" +880 18 evaluator """rankbased""" +880 19 dataset """kinships""" +880 19 model """transh""" +880 19 loss """marginranking""" +880 19 regularizer """transh""" +880 19 optimizer """adam""" +880 19 training_loop """owa""" +880 19 negative_sampler """basic""" +880 19 evaluator """rankbased""" +880 20 dataset """kinships""" +880 20 model """transh""" +880 20 loss """marginranking""" +880 20 regularizer """transh""" +880 20 optimizer """adam""" +880 20 training_loop """owa""" +880 20 negative_sampler """basic""" +880 20 evaluator """rankbased""" +880 21 dataset """kinships""" +880 21 model """transh""" +880 21 loss """marginranking""" +880 21 regularizer """transh""" +880 21 optimizer """adam""" +880 21 training_loop """owa""" +880 21 negative_sampler """basic""" +880 21 evaluator """rankbased""" +880 22 dataset """kinships""" +880 22 model """transh""" +880 22 loss """marginranking""" +880 22 regularizer """transh""" +880 22 optimizer """adam""" +880 22 training_loop """owa""" +880 22 negative_sampler """basic""" +880 22 evaluator """rankbased""" +880 23 dataset """kinships""" +880 23 model """transh""" +880 23 loss """marginranking""" +880 23 regularizer """transh""" +880 23 optimizer """adam""" +880 23 training_loop """owa""" +880 23 negative_sampler """basic""" +880 23 evaluator """rankbased""" +880 24 dataset """kinships""" +880 24 model """transh""" +880 24 loss """marginranking""" +880 24 regularizer """transh""" +880 24 optimizer """adam""" +880 24 training_loop """owa""" +880 24 negative_sampler """basic""" +880 24 evaluator """rankbased""" +880 25 dataset """kinships""" +880 25 model """transh""" +880 25 loss """marginranking""" +880 25 regularizer """transh""" +880 25 optimizer """adam""" +880 25 training_loop """owa""" +880 25 negative_sampler """basic""" +880 25 evaluator """rankbased""" +880 26 dataset """kinships""" +880 26 model """transh""" +880 26 loss """marginranking""" +880 26 regularizer """transh""" +880 26 optimizer """adam""" +880 26 training_loop """owa""" +880 26 negative_sampler """basic""" +880 26 evaluator """rankbased""" +880 27 dataset """kinships""" +880 27 model """transh""" +880 27 loss """marginranking""" +880 27 regularizer """transh""" +880 27 optimizer """adam""" +880 27 training_loop """owa""" +880 27 negative_sampler """basic""" +880 27 evaluator """rankbased""" +880 28 dataset """kinships""" +880 28 model """transh""" +880 28 loss """marginranking""" +880 28 regularizer """transh""" +880 28 optimizer """adam""" +880 28 training_loop """owa""" +880 28 negative_sampler """basic""" +880 28 evaluator """rankbased""" +880 29 dataset """kinships""" +880 29 model """transh""" +880 29 loss """marginranking""" +880 29 regularizer """transh""" +880 29 optimizer """adam""" +880 29 training_loop """owa""" +880 29 negative_sampler """basic""" +880 29 evaluator """rankbased""" +880 30 dataset """kinships""" +880 30 model """transh""" +880 30 loss """marginranking""" +880 30 regularizer """transh""" +880 30 optimizer """adam""" +880 30 training_loop """owa""" +880 30 negative_sampler """basic""" +880 30 evaluator """rankbased""" +880 31 dataset """kinships""" +880 31 model """transh""" +880 31 loss """marginranking""" +880 31 regularizer """transh""" +880 31 optimizer """adam""" +880 31 training_loop """owa""" +880 31 negative_sampler """basic""" +880 31 evaluator """rankbased""" +880 32 dataset """kinships""" +880 32 model """transh""" +880 32 loss """marginranking""" +880 32 regularizer """transh""" +880 32 optimizer """adam""" +880 32 training_loop """owa""" +880 32 negative_sampler """basic""" +880 32 evaluator """rankbased""" +880 33 dataset """kinships""" +880 33 model """transh""" +880 33 loss """marginranking""" +880 33 regularizer """transh""" +880 33 optimizer """adam""" +880 33 training_loop """owa""" +880 33 negative_sampler """basic""" +880 33 evaluator """rankbased""" +880 34 dataset """kinships""" +880 34 model """transh""" +880 34 loss """marginranking""" +880 34 regularizer """transh""" +880 34 optimizer """adam""" +880 34 training_loop """owa""" +880 34 negative_sampler """basic""" +880 34 evaluator """rankbased""" +880 35 dataset """kinships""" +880 35 model """transh""" +880 35 loss """marginranking""" +880 35 regularizer """transh""" +880 35 optimizer """adam""" +880 35 training_loop """owa""" +880 35 negative_sampler """basic""" +880 35 evaluator """rankbased""" +880 36 dataset """kinships""" +880 36 model """transh""" +880 36 loss """marginranking""" +880 36 regularizer """transh""" +880 36 optimizer """adam""" +880 36 training_loop """owa""" +880 36 negative_sampler """basic""" +880 36 evaluator """rankbased""" +880 37 dataset """kinships""" +880 37 model """transh""" +880 37 loss """marginranking""" +880 37 regularizer """transh""" +880 37 optimizer """adam""" +880 37 training_loop """owa""" +880 37 negative_sampler """basic""" +880 37 evaluator """rankbased""" +880 38 dataset """kinships""" +880 38 model """transh""" +880 38 loss """marginranking""" +880 38 regularizer """transh""" +880 38 optimizer """adam""" +880 38 training_loop """owa""" +880 38 negative_sampler """basic""" +880 38 evaluator """rankbased""" +880 39 dataset """kinships""" +880 39 model """transh""" +880 39 loss """marginranking""" +880 39 regularizer """transh""" +880 39 optimizer """adam""" +880 39 training_loop """owa""" +880 39 negative_sampler """basic""" +880 39 evaluator """rankbased""" +880 40 dataset """kinships""" +880 40 model """transh""" +880 40 loss """marginranking""" +880 40 regularizer """transh""" +880 40 optimizer """adam""" +880 40 training_loop """owa""" +880 40 negative_sampler """basic""" +880 40 evaluator """rankbased""" +880 41 dataset """kinships""" +880 41 model """transh""" +880 41 loss """marginranking""" +880 41 regularizer """transh""" +880 41 optimizer """adam""" +880 41 training_loop """owa""" +880 41 negative_sampler """basic""" +880 41 evaluator """rankbased""" +880 42 dataset """kinships""" +880 42 model """transh""" +880 42 loss """marginranking""" +880 42 regularizer """transh""" +880 42 optimizer """adam""" +880 42 training_loop """owa""" +880 42 negative_sampler """basic""" +880 42 evaluator """rankbased""" +880 43 dataset """kinships""" +880 43 model """transh""" +880 43 loss """marginranking""" +880 43 regularizer """transh""" +880 43 optimizer """adam""" +880 43 training_loop """owa""" +880 43 negative_sampler """basic""" +880 43 evaluator """rankbased""" +880 44 dataset """kinships""" +880 44 model """transh""" +880 44 loss """marginranking""" +880 44 regularizer """transh""" +880 44 optimizer """adam""" +880 44 training_loop """owa""" +880 44 negative_sampler """basic""" +880 44 evaluator """rankbased""" +880 45 dataset """kinships""" +880 45 model """transh""" +880 45 loss """marginranking""" +880 45 regularizer """transh""" +880 45 optimizer """adam""" +880 45 training_loop """owa""" +880 45 negative_sampler """basic""" +880 45 evaluator """rankbased""" +880 46 dataset """kinships""" +880 46 model """transh""" +880 46 loss """marginranking""" +880 46 regularizer """transh""" +880 46 optimizer """adam""" +880 46 training_loop """owa""" +880 46 negative_sampler """basic""" +880 46 evaluator """rankbased""" +880 47 dataset """kinships""" +880 47 model """transh""" +880 47 loss """marginranking""" +880 47 regularizer """transh""" +880 47 optimizer """adam""" +880 47 training_loop """owa""" +880 47 negative_sampler """basic""" +880 47 evaluator """rankbased""" +880 48 dataset """kinships""" +880 48 model """transh""" +880 48 loss """marginranking""" +880 48 regularizer """transh""" +880 48 optimizer """adam""" +880 48 training_loop """owa""" +880 48 negative_sampler """basic""" +880 48 evaluator """rankbased""" +880 49 dataset """kinships""" +880 49 model """transh""" +880 49 loss """marginranking""" +880 49 regularizer """transh""" +880 49 optimizer """adam""" +880 49 training_loop """owa""" +880 49 negative_sampler """basic""" +880 49 evaluator """rankbased""" +880 50 dataset """kinships""" +880 50 model """transh""" +880 50 loss """marginranking""" +880 50 regularizer """transh""" +880 50 optimizer """adam""" +880 50 training_loop """owa""" +880 50 negative_sampler """basic""" +880 50 evaluator """rankbased""" +880 51 dataset """kinships""" +880 51 model """transh""" +880 51 loss """marginranking""" +880 51 regularizer """transh""" +880 51 optimizer """adam""" +880 51 training_loop """owa""" +880 51 negative_sampler """basic""" +880 51 evaluator """rankbased""" +880 52 dataset """kinships""" +880 52 model """transh""" +880 52 loss """marginranking""" +880 52 regularizer """transh""" +880 52 optimizer """adam""" +880 52 training_loop """owa""" +880 52 negative_sampler """basic""" +880 52 evaluator """rankbased""" +880 53 dataset """kinships""" +880 53 model """transh""" +880 53 loss """marginranking""" +880 53 regularizer """transh""" +880 53 optimizer """adam""" +880 53 training_loop """owa""" +880 53 negative_sampler """basic""" +880 53 evaluator """rankbased""" +880 54 dataset """kinships""" +880 54 model """transh""" +880 54 loss """marginranking""" +880 54 regularizer """transh""" +880 54 optimizer """adam""" +880 54 training_loop """owa""" +880 54 negative_sampler """basic""" +880 54 evaluator """rankbased""" +880 55 dataset """kinships""" +880 55 model """transh""" +880 55 loss """marginranking""" +880 55 regularizer """transh""" +880 55 optimizer """adam""" +880 55 training_loop """owa""" +880 55 negative_sampler """basic""" +880 55 evaluator """rankbased""" +880 56 dataset """kinships""" +880 56 model """transh""" +880 56 loss """marginranking""" +880 56 regularizer """transh""" +880 56 optimizer """adam""" +880 56 training_loop """owa""" +880 56 negative_sampler """basic""" +880 56 evaluator """rankbased""" +880 57 dataset """kinships""" +880 57 model """transh""" +880 57 loss """marginranking""" +880 57 regularizer """transh""" +880 57 optimizer """adam""" +880 57 training_loop """owa""" +880 57 negative_sampler """basic""" +880 57 evaluator """rankbased""" +880 58 dataset """kinships""" +880 58 model """transh""" +880 58 loss """marginranking""" +880 58 regularizer """transh""" +880 58 optimizer """adam""" +880 58 training_loop """owa""" +880 58 negative_sampler """basic""" +880 58 evaluator """rankbased""" +880 59 dataset """kinships""" +880 59 model """transh""" +880 59 loss """marginranking""" +880 59 regularizer """transh""" +880 59 optimizer """adam""" +880 59 training_loop """owa""" +880 59 negative_sampler """basic""" +880 59 evaluator """rankbased""" +880 60 dataset """kinships""" +880 60 model """transh""" +880 60 loss """marginranking""" +880 60 regularizer """transh""" +880 60 optimizer """adam""" +880 60 training_loop """owa""" +880 60 negative_sampler """basic""" +880 60 evaluator """rankbased""" +880 61 dataset """kinships""" +880 61 model """transh""" +880 61 loss """marginranking""" +880 61 regularizer """transh""" +880 61 optimizer """adam""" +880 61 training_loop """owa""" +880 61 negative_sampler """basic""" +880 61 evaluator """rankbased""" +880 62 dataset """kinships""" +880 62 model """transh""" +880 62 loss """marginranking""" +880 62 regularizer """transh""" +880 62 optimizer """adam""" +880 62 training_loop """owa""" +880 62 negative_sampler """basic""" +880 62 evaluator """rankbased""" +880 63 dataset """kinships""" +880 63 model """transh""" +880 63 loss """marginranking""" +880 63 regularizer """transh""" +880 63 optimizer """adam""" +880 63 training_loop """owa""" +880 63 negative_sampler """basic""" +880 63 evaluator """rankbased""" +880 64 dataset """kinships""" +880 64 model """transh""" +880 64 loss """marginranking""" +880 64 regularizer """transh""" +880 64 optimizer """adam""" +880 64 training_loop """owa""" +880 64 negative_sampler """basic""" +880 64 evaluator """rankbased""" +880 65 dataset """kinships""" +880 65 model """transh""" +880 65 loss """marginranking""" +880 65 regularizer """transh""" +880 65 optimizer """adam""" +880 65 training_loop """owa""" +880 65 negative_sampler """basic""" +880 65 evaluator """rankbased""" +880 66 dataset """kinships""" +880 66 model """transh""" +880 66 loss """marginranking""" +880 66 regularizer """transh""" +880 66 optimizer """adam""" +880 66 training_loop """owa""" +880 66 negative_sampler """basic""" +880 66 evaluator """rankbased""" +880 67 dataset """kinships""" +880 67 model """transh""" +880 67 loss """marginranking""" +880 67 regularizer """transh""" +880 67 optimizer """adam""" +880 67 training_loop """owa""" +880 67 negative_sampler """basic""" +880 67 evaluator """rankbased""" +880 68 dataset """kinships""" +880 68 model """transh""" +880 68 loss """marginranking""" +880 68 regularizer """transh""" +880 68 optimizer """adam""" +880 68 training_loop """owa""" +880 68 negative_sampler """basic""" +880 68 evaluator """rankbased""" +880 69 dataset """kinships""" +880 69 model """transh""" +880 69 loss """marginranking""" +880 69 regularizer """transh""" +880 69 optimizer """adam""" +880 69 training_loop """owa""" +880 69 negative_sampler """basic""" +880 69 evaluator """rankbased""" +880 70 dataset """kinships""" +880 70 model """transh""" +880 70 loss """marginranking""" +880 70 regularizer """transh""" +880 70 optimizer """adam""" +880 70 training_loop """owa""" +880 70 negative_sampler """basic""" +880 70 evaluator """rankbased""" +880 71 dataset """kinships""" +880 71 model """transh""" +880 71 loss """marginranking""" +880 71 regularizer """transh""" +880 71 optimizer """adam""" +880 71 training_loop """owa""" +880 71 negative_sampler """basic""" +880 71 evaluator """rankbased""" +880 72 dataset """kinships""" +880 72 model """transh""" +880 72 loss """marginranking""" +880 72 regularizer """transh""" +880 72 optimizer """adam""" +880 72 training_loop """owa""" +880 72 negative_sampler """basic""" +880 72 evaluator """rankbased""" +880 73 dataset """kinships""" +880 73 model """transh""" +880 73 loss """marginranking""" +880 73 regularizer """transh""" +880 73 optimizer """adam""" +880 73 training_loop """owa""" +880 73 negative_sampler """basic""" +880 73 evaluator """rankbased""" +880 74 dataset """kinships""" +880 74 model """transh""" +880 74 loss """marginranking""" +880 74 regularizer """transh""" +880 74 optimizer """adam""" +880 74 training_loop """owa""" +880 74 negative_sampler """basic""" +880 74 evaluator """rankbased""" +880 75 dataset """kinships""" +880 75 model """transh""" +880 75 loss """marginranking""" +880 75 regularizer """transh""" +880 75 optimizer """adam""" +880 75 training_loop """owa""" +880 75 negative_sampler """basic""" +880 75 evaluator """rankbased""" +880 76 dataset """kinships""" +880 76 model """transh""" +880 76 loss """marginranking""" +880 76 regularizer """transh""" +880 76 optimizer """adam""" +880 76 training_loop """owa""" +880 76 negative_sampler """basic""" +880 76 evaluator """rankbased""" +880 77 dataset """kinships""" +880 77 model """transh""" +880 77 loss """marginranking""" +880 77 regularizer """transh""" +880 77 optimizer """adam""" +880 77 training_loop """owa""" +880 77 negative_sampler """basic""" +880 77 evaluator """rankbased""" +880 78 dataset """kinships""" +880 78 model """transh""" +880 78 loss """marginranking""" +880 78 regularizer """transh""" +880 78 optimizer """adam""" +880 78 training_loop """owa""" +880 78 negative_sampler """basic""" +880 78 evaluator """rankbased""" +880 79 dataset """kinships""" +880 79 model """transh""" +880 79 loss """marginranking""" +880 79 regularizer """transh""" +880 79 optimizer """adam""" +880 79 training_loop """owa""" +880 79 negative_sampler """basic""" +880 79 evaluator """rankbased""" +880 80 dataset """kinships""" +880 80 model """transh""" +880 80 loss """marginranking""" +880 80 regularizer """transh""" +880 80 optimizer """adam""" +880 80 training_loop """owa""" +880 80 negative_sampler """basic""" +880 80 evaluator """rankbased""" +880 81 dataset """kinships""" +880 81 model """transh""" +880 81 loss """marginranking""" +880 81 regularizer """transh""" +880 81 optimizer """adam""" +880 81 training_loop """owa""" +880 81 negative_sampler """basic""" +880 81 evaluator """rankbased""" +880 82 dataset """kinships""" +880 82 model """transh""" +880 82 loss """marginranking""" +880 82 regularizer """transh""" +880 82 optimizer """adam""" +880 82 training_loop """owa""" +880 82 negative_sampler """basic""" +880 82 evaluator """rankbased""" +880 83 dataset """kinships""" +880 83 model """transh""" +880 83 loss """marginranking""" +880 83 regularizer """transh""" +880 83 optimizer """adam""" +880 83 training_loop """owa""" +880 83 negative_sampler """basic""" +880 83 evaluator """rankbased""" +880 84 dataset """kinships""" +880 84 model """transh""" +880 84 loss """marginranking""" +880 84 regularizer """transh""" +880 84 optimizer """adam""" +880 84 training_loop """owa""" +880 84 negative_sampler """basic""" +880 84 evaluator """rankbased""" +880 85 dataset """kinships""" +880 85 model """transh""" +880 85 loss """marginranking""" +880 85 regularizer """transh""" +880 85 optimizer """adam""" +880 85 training_loop """owa""" +880 85 negative_sampler """basic""" +880 85 evaluator """rankbased""" +880 86 dataset """kinships""" +880 86 model """transh""" +880 86 loss """marginranking""" +880 86 regularizer """transh""" +880 86 optimizer """adam""" +880 86 training_loop """owa""" +880 86 negative_sampler """basic""" +880 86 evaluator """rankbased""" +880 87 dataset """kinships""" +880 87 model """transh""" +880 87 loss """marginranking""" +880 87 regularizer """transh""" +880 87 optimizer """adam""" +880 87 training_loop """owa""" +880 87 negative_sampler """basic""" +880 87 evaluator """rankbased""" +880 88 dataset """kinships""" +880 88 model """transh""" +880 88 loss """marginranking""" +880 88 regularizer """transh""" +880 88 optimizer """adam""" +880 88 training_loop """owa""" +880 88 negative_sampler """basic""" +880 88 evaluator """rankbased""" +880 89 dataset """kinships""" +880 89 model """transh""" +880 89 loss """marginranking""" +880 89 regularizer """transh""" +880 89 optimizer """adam""" +880 89 training_loop """owa""" +880 89 negative_sampler """basic""" +880 89 evaluator """rankbased""" +880 90 dataset """kinships""" +880 90 model """transh""" +880 90 loss """marginranking""" +880 90 regularizer """transh""" +880 90 optimizer """adam""" +880 90 training_loop """owa""" +880 90 negative_sampler """basic""" +880 90 evaluator """rankbased""" +880 91 dataset """kinships""" +880 91 model """transh""" +880 91 loss """marginranking""" +880 91 regularizer """transh""" +880 91 optimizer """adam""" +880 91 training_loop """owa""" +880 91 negative_sampler """basic""" +880 91 evaluator """rankbased""" +880 92 dataset """kinships""" +880 92 model """transh""" +880 92 loss """marginranking""" +880 92 regularizer """transh""" +880 92 optimizer """adam""" +880 92 training_loop """owa""" +880 92 negative_sampler """basic""" +880 92 evaluator """rankbased""" +880 93 dataset """kinships""" +880 93 model """transh""" +880 93 loss """marginranking""" +880 93 regularizer """transh""" +880 93 optimizer """adam""" +880 93 training_loop """owa""" +880 93 negative_sampler """basic""" +880 93 evaluator """rankbased""" +880 94 dataset """kinships""" +880 94 model """transh""" +880 94 loss """marginranking""" +880 94 regularizer """transh""" +880 94 optimizer """adam""" +880 94 training_loop """owa""" +880 94 negative_sampler """basic""" +880 94 evaluator """rankbased""" +880 95 dataset """kinships""" +880 95 model """transh""" +880 95 loss """marginranking""" +880 95 regularizer """transh""" +880 95 optimizer """adam""" +880 95 training_loop """owa""" +880 95 negative_sampler """basic""" +880 95 evaluator """rankbased""" +880 96 dataset """kinships""" +880 96 model """transh""" +880 96 loss """marginranking""" +880 96 regularizer """transh""" +880 96 optimizer """adam""" +880 96 training_loop """owa""" +880 96 negative_sampler """basic""" +880 96 evaluator """rankbased""" +880 97 dataset """kinships""" +880 97 model """transh""" +880 97 loss """marginranking""" +880 97 regularizer """transh""" +880 97 optimizer """adam""" +880 97 training_loop """owa""" +880 97 negative_sampler """basic""" +880 97 evaluator """rankbased""" +880 98 dataset """kinships""" +880 98 model """transh""" +880 98 loss """marginranking""" +880 98 regularizer """transh""" +880 98 optimizer """adam""" +880 98 training_loop """owa""" +880 98 negative_sampler """basic""" +880 98 evaluator """rankbased""" +880 99 dataset """kinships""" +880 99 model """transh""" +880 99 loss """marginranking""" +880 99 regularizer """transh""" +880 99 optimizer """adam""" +880 99 training_loop """owa""" +880 99 negative_sampler """basic""" +880 99 evaluator """rankbased""" +880 100 dataset """kinships""" +880 100 model """transh""" +880 100 loss """marginranking""" +880 100 regularizer """transh""" +880 100 optimizer """adam""" +880 100 training_loop """owa""" +880 100 negative_sampler """basic""" +880 100 evaluator """rankbased""" +881 1 model.embedding_dim 0.0 +881 1 model.scoring_fct_norm 2.0 +881 1 loss.margin 3.381170373816725 +881 1 loss.adversarial_temperature 0.9343960364079188 +881 1 regularizer.weight 0.19791996469256734 +881 1 optimizer.lr 0.0066901981437457745 +881 1 negative_sampler.num_negs_per_pos 46.0 +881 1 training.batch_size 0.0 +881 2 model.embedding_dim 1.0 +881 2 model.scoring_fct_norm 2.0 +881 2 loss.margin 25.779739820188915 +881 2 loss.adversarial_temperature 0.6030774750950155 +881 2 regularizer.weight 0.1062312518483436 +881 2 optimizer.lr 0.04397076333030622 +881 2 negative_sampler.num_negs_per_pos 73.0 +881 2 training.batch_size 0.0 +881 3 model.embedding_dim 2.0 +881 3 model.scoring_fct_norm 2.0 +881 3 loss.margin 26.071451378569446 +881 3 loss.adversarial_temperature 0.5754903319473487 +881 3 regularizer.weight 0.023836429969028666 +881 3 optimizer.lr 0.001553761580456491 +881 3 negative_sampler.num_negs_per_pos 29.0 +881 3 training.batch_size 1.0 +881 4 model.embedding_dim 2.0 +881 4 model.scoring_fct_norm 2.0 +881 4 loss.margin 22.397825091627393 +881 4 loss.adversarial_temperature 0.70379193543418 +881 4 regularizer.weight 0.014163805244702165 +881 4 optimizer.lr 0.005639279680482049 +881 4 negative_sampler.num_negs_per_pos 67.0 +881 4 training.batch_size 1.0 +881 5 model.embedding_dim 2.0 +881 5 model.scoring_fct_norm 1.0 +881 5 loss.margin 25.951871249031132 +881 5 loss.adversarial_temperature 0.8629932933181526 +881 5 regularizer.weight 0.2759955194711078 +881 5 optimizer.lr 0.035273983069407246 +881 5 negative_sampler.num_negs_per_pos 22.0 +881 5 training.batch_size 1.0 +881 6 model.embedding_dim 1.0 +881 6 model.scoring_fct_norm 1.0 +881 6 loss.margin 12.55666405990032 +881 6 loss.adversarial_temperature 0.29625946176197326 +881 6 regularizer.weight 0.014101742344073832 +881 6 optimizer.lr 0.004867820335110317 +881 6 negative_sampler.num_negs_per_pos 8.0 +881 6 training.batch_size 1.0 +881 7 model.embedding_dim 0.0 +881 7 model.scoring_fct_norm 2.0 +881 7 loss.margin 10.36740625861343 +881 7 loss.adversarial_temperature 0.67398474811683 +881 7 regularizer.weight 0.143287220982583 +881 7 optimizer.lr 0.002975381844758178 +881 7 negative_sampler.num_negs_per_pos 69.0 +881 7 training.batch_size 1.0 +881 8 model.embedding_dim 1.0 +881 8 model.scoring_fct_norm 2.0 +881 8 loss.margin 9.246490598899342 +881 8 loss.adversarial_temperature 0.8895326065172365 +881 8 regularizer.weight 0.030699895146469633 +881 8 optimizer.lr 0.015738332251888632 +881 8 negative_sampler.num_negs_per_pos 14.0 +881 8 training.batch_size 2.0 +881 9 model.embedding_dim 2.0 +881 9 model.scoring_fct_norm 2.0 +881 9 loss.margin 10.095044930174987 +881 9 loss.adversarial_temperature 0.7623301757871318 +881 9 regularizer.weight 0.025760972504268922 +881 9 optimizer.lr 0.06875921015861235 +881 9 negative_sampler.num_negs_per_pos 12.0 +881 9 training.batch_size 2.0 +881 10 model.embedding_dim 2.0 +881 10 model.scoring_fct_norm 1.0 +881 10 loss.margin 28.39947752777343 +881 10 loss.adversarial_temperature 0.5416451240356349 +881 10 regularizer.weight 0.044966785809444236 +881 10 optimizer.lr 0.00157086741770472 +881 10 negative_sampler.num_negs_per_pos 54.0 +881 10 training.batch_size 1.0 +881 11 model.embedding_dim 0.0 +881 11 model.scoring_fct_norm 2.0 +881 11 loss.margin 29.052597481280415 +881 11 loss.adversarial_temperature 0.15182169713278817 +881 11 regularizer.weight 0.11853274445561146 +881 11 optimizer.lr 0.010187309262042622 +881 11 negative_sampler.num_negs_per_pos 91.0 +881 11 training.batch_size 1.0 +881 12 model.embedding_dim 0.0 +881 12 model.scoring_fct_norm 1.0 +881 12 loss.margin 2.819264201248397 +881 12 loss.adversarial_temperature 0.3645612884612601 +881 12 regularizer.weight 0.13534961369494647 +881 12 optimizer.lr 0.047863263915425114 +881 12 negative_sampler.num_negs_per_pos 48.0 +881 12 training.batch_size 1.0 +881 13 model.embedding_dim 0.0 +881 13 model.scoring_fct_norm 1.0 +881 13 loss.margin 12.371644619682366 +881 13 loss.adversarial_temperature 0.4296357196856089 +881 13 regularizer.weight 0.059865417897300266 +881 13 optimizer.lr 0.08868106199310558 +881 13 negative_sampler.num_negs_per_pos 18.0 +881 13 training.batch_size 2.0 +881 14 model.embedding_dim 1.0 +881 14 model.scoring_fct_norm 2.0 +881 14 loss.margin 28.881515479385737 +881 14 loss.adversarial_temperature 0.39561466693333347 +881 14 regularizer.weight 0.15219475305040098 +881 14 optimizer.lr 0.03937747075621072 +881 14 negative_sampler.num_negs_per_pos 51.0 +881 14 training.batch_size 1.0 +881 15 model.embedding_dim 0.0 +881 15 model.scoring_fct_norm 1.0 +881 15 loss.margin 7.949567381827796 +881 15 loss.adversarial_temperature 0.3546626386959222 +881 15 regularizer.weight 0.11268293797588737 +881 15 optimizer.lr 0.024270214002036942 +881 15 negative_sampler.num_negs_per_pos 99.0 +881 15 training.batch_size 1.0 +881 16 model.embedding_dim 0.0 +881 16 model.scoring_fct_norm 1.0 +881 16 loss.margin 15.669395282873793 +881 16 loss.adversarial_temperature 0.3345458863470112 +881 16 regularizer.weight 0.24882694304416694 +881 16 optimizer.lr 0.007913541657931686 +881 16 negative_sampler.num_negs_per_pos 69.0 +881 16 training.batch_size 2.0 +881 17 model.embedding_dim 2.0 +881 17 model.scoring_fct_norm 1.0 +881 17 loss.margin 7.640778184152637 +881 17 loss.adversarial_temperature 0.5988364869132585 +881 17 regularizer.weight 0.21033516529587526 +881 17 optimizer.lr 0.0026295808977315495 +881 17 negative_sampler.num_negs_per_pos 88.0 +881 17 training.batch_size 1.0 +881 18 model.embedding_dim 1.0 +881 18 model.scoring_fct_norm 2.0 +881 18 loss.margin 17.54799920278907 +881 18 loss.adversarial_temperature 0.7452405920108196 +881 18 regularizer.weight 0.07948549128487191 +881 18 optimizer.lr 0.0016934475553789827 +881 18 negative_sampler.num_negs_per_pos 55.0 +881 18 training.batch_size 2.0 +881 19 model.embedding_dim 2.0 +881 19 model.scoring_fct_norm 2.0 +881 19 loss.margin 1.7647504821285946 +881 19 loss.adversarial_temperature 0.9122913241922046 +881 19 regularizer.weight 0.09454276249271974 +881 19 optimizer.lr 0.002327655422642151 +881 19 negative_sampler.num_negs_per_pos 58.0 +881 19 training.batch_size 2.0 +881 20 model.embedding_dim 0.0 +881 20 model.scoring_fct_norm 2.0 +881 20 loss.margin 2.540021264890358 +881 20 loss.adversarial_temperature 0.39648879407677984 +881 20 regularizer.weight 0.05102094890524316 +881 20 optimizer.lr 0.05346076612930928 +881 20 negative_sampler.num_negs_per_pos 58.0 +881 20 training.batch_size 0.0 +881 21 model.embedding_dim 1.0 +881 21 model.scoring_fct_norm 1.0 +881 21 loss.margin 14.149500248293862 +881 21 loss.adversarial_temperature 0.6112866645285938 +881 21 regularizer.weight 0.16611238509050702 +881 21 optimizer.lr 0.0063717530375615086 +881 21 negative_sampler.num_negs_per_pos 7.0 +881 21 training.batch_size 0.0 +881 22 model.embedding_dim 0.0 +881 22 model.scoring_fct_norm 1.0 +881 22 loss.margin 21.05525394353753 +881 22 loss.adversarial_temperature 0.9813715193309084 +881 22 regularizer.weight 0.04930672081314452 +881 22 optimizer.lr 0.021639562197649873 +881 22 negative_sampler.num_negs_per_pos 89.0 +881 22 training.batch_size 1.0 +881 23 model.embedding_dim 2.0 +881 23 model.scoring_fct_norm 2.0 +881 23 loss.margin 17.557810506960898 +881 23 loss.adversarial_temperature 0.667331118793117 +881 23 regularizer.weight 0.049514628940292287 +881 23 optimizer.lr 0.020238007388136563 +881 23 negative_sampler.num_negs_per_pos 59.0 +881 23 training.batch_size 1.0 +881 24 model.embedding_dim 1.0 +881 24 model.scoring_fct_norm 1.0 +881 24 loss.margin 21.2539016970308 +881 24 loss.adversarial_temperature 0.3551403308560037 +881 24 regularizer.weight 0.1509858781898655 +881 24 optimizer.lr 0.09806140216128502 +881 24 negative_sampler.num_negs_per_pos 18.0 +881 24 training.batch_size 2.0 +881 25 model.embedding_dim 2.0 +881 25 model.scoring_fct_norm 2.0 +881 25 loss.margin 20.12792584549692 +881 25 loss.adversarial_temperature 0.38601609353326816 +881 25 regularizer.weight 0.1874540190534776 +881 25 optimizer.lr 0.012912647851550343 +881 25 negative_sampler.num_negs_per_pos 22.0 +881 25 training.batch_size 0.0 +881 26 model.embedding_dim 1.0 +881 26 model.scoring_fct_norm 2.0 +881 26 loss.margin 14.89738988911841 +881 26 loss.adversarial_temperature 0.41797746053965457 +881 26 regularizer.weight 0.017468402774503695 +881 26 optimizer.lr 0.023182981915599473 +881 26 negative_sampler.num_negs_per_pos 6.0 +881 26 training.batch_size 1.0 +881 27 model.embedding_dim 2.0 +881 27 model.scoring_fct_norm 2.0 +881 27 loss.margin 14.197055980341668 +881 27 loss.adversarial_temperature 0.9112971213549749 +881 27 regularizer.weight 0.25542181674075287 +881 27 optimizer.lr 0.02040453042086116 +881 27 negative_sampler.num_negs_per_pos 6.0 +881 27 training.batch_size 1.0 +881 28 model.embedding_dim 1.0 +881 28 model.scoring_fct_norm 1.0 +881 28 loss.margin 12.60095005833325 +881 28 loss.adversarial_temperature 0.27481719748741074 +881 28 regularizer.weight 0.2224074947470495 +881 28 optimizer.lr 0.07140610953540177 +881 28 negative_sampler.num_negs_per_pos 67.0 +881 28 training.batch_size 2.0 +881 29 model.embedding_dim 1.0 +881 29 model.scoring_fct_norm 2.0 +881 29 loss.margin 29.729905578517858 +881 29 loss.adversarial_temperature 0.13834573794771934 +881 29 regularizer.weight 0.09983818167084856 +881 29 optimizer.lr 0.06317351895050345 +881 29 negative_sampler.num_negs_per_pos 94.0 +881 29 training.batch_size 2.0 +881 30 model.embedding_dim 2.0 +881 30 model.scoring_fct_norm 2.0 +881 30 loss.margin 16.743077844940025 +881 30 loss.adversarial_temperature 0.23877904801982622 +881 30 regularizer.weight 0.13940570552435805 +881 30 optimizer.lr 0.0016267922913722006 +881 30 negative_sampler.num_negs_per_pos 55.0 +881 30 training.batch_size 0.0 +881 31 model.embedding_dim 2.0 +881 31 model.scoring_fct_norm 1.0 +881 31 loss.margin 4.363795793913501 +881 31 loss.adversarial_temperature 0.12515215634878094 +881 31 regularizer.weight 0.11128107412462537 +881 31 optimizer.lr 0.0034465947165794064 +881 31 negative_sampler.num_negs_per_pos 9.0 +881 31 training.batch_size 1.0 +881 32 model.embedding_dim 2.0 +881 32 model.scoring_fct_norm 2.0 +881 32 loss.margin 9.927156409441858 +881 32 loss.adversarial_temperature 0.8580639316245344 +881 32 regularizer.weight 0.16412276159137718 +881 32 optimizer.lr 0.04386559704648929 +881 32 negative_sampler.num_negs_per_pos 20.0 +881 32 training.batch_size 0.0 +881 33 model.embedding_dim 1.0 +881 33 model.scoring_fct_norm 2.0 +881 33 loss.margin 12.535363672648149 +881 33 loss.adversarial_temperature 0.9741727268330268 +881 33 regularizer.weight 0.03342414877262251 +881 33 optimizer.lr 0.011198641851433499 +881 33 negative_sampler.num_negs_per_pos 85.0 +881 33 training.batch_size 2.0 +881 34 model.embedding_dim 1.0 +881 34 model.scoring_fct_norm 1.0 +881 34 loss.margin 9.721323889643822 +881 34 loss.adversarial_temperature 0.8157911186788659 +881 34 regularizer.weight 0.2771559075876248 +881 34 optimizer.lr 0.09197301788056618 +881 34 negative_sampler.num_negs_per_pos 57.0 +881 34 training.batch_size 2.0 +881 35 model.embedding_dim 1.0 +881 35 model.scoring_fct_norm 1.0 +881 35 loss.margin 11.880566103927194 +881 35 loss.adversarial_temperature 0.23812634664409446 +881 35 regularizer.weight 0.1668713327380278 +881 35 optimizer.lr 0.001537469445748736 +881 35 negative_sampler.num_negs_per_pos 11.0 +881 35 training.batch_size 1.0 +881 36 model.embedding_dim 2.0 +881 36 model.scoring_fct_norm 1.0 +881 36 loss.margin 16.933531231018783 +881 36 loss.adversarial_temperature 0.4960379309516054 +881 36 regularizer.weight 0.06901590615318366 +881 36 optimizer.lr 0.08061230111180011 +881 36 negative_sampler.num_negs_per_pos 23.0 +881 36 training.batch_size 2.0 +881 37 model.embedding_dim 1.0 +881 37 model.scoring_fct_norm 2.0 +881 37 loss.margin 15.025131901863494 +881 37 loss.adversarial_temperature 0.253709520258743 +881 37 regularizer.weight 0.03198400444511386 +881 37 optimizer.lr 0.001452886594885868 +881 37 negative_sampler.num_negs_per_pos 51.0 +881 37 training.batch_size 0.0 +881 38 model.embedding_dim 2.0 +881 38 model.scoring_fct_norm 1.0 +881 38 loss.margin 26.180524370174258 +881 38 loss.adversarial_temperature 0.7969173081687646 +881 38 regularizer.weight 0.014810969029318232 +881 38 optimizer.lr 0.003252838308590691 +881 38 negative_sampler.num_negs_per_pos 70.0 +881 38 training.batch_size 0.0 +881 39 model.embedding_dim 2.0 +881 39 model.scoring_fct_norm 2.0 +881 39 loss.margin 2.167478988342346 +881 39 loss.adversarial_temperature 0.426837540857815 +881 39 regularizer.weight 0.019214037048371137 +881 39 optimizer.lr 0.0030878598512452298 +881 39 negative_sampler.num_negs_per_pos 85.0 +881 39 training.batch_size 0.0 +881 40 model.embedding_dim 2.0 +881 40 model.scoring_fct_norm 2.0 +881 40 loss.margin 20.98971729300185 +881 40 loss.adversarial_temperature 0.18106349217547207 +881 40 regularizer.weight 0.0851884688823182 +881 40 optimizer.lr 0.054603467252162766 +881 40 negative_sampler.num_negs_per_pos 23.0 +881 40 training.batch_size 1.0 +881 41 model.embedding_dim 0.0 +881 41 model.scoring_fct_norm 2.0 +881 41 loss.margin 6.6325920379021985 +881 41 loss.adversarial_temperature 0.1334659036840718 +881 41 regularizer.weight 0.27303009069475903 +881 41 optimizer.lr 0.006097590779750653 +881 41 negative_sampler.num_negs_per_pos 73.0 +881 41 training.batch_size 0.0 +881 42 model.embedding_dim 1.0 +881 42 model.scoring_fct_norm 2.0 +881 42 loss.margin 3.098181553662787 +881 42 loss.adversarial_temperature 0.17356583874072762 +881 42 regularizer.weight 0.04484306815433547 +881 42 optimizer.lr 0.02660530046406066 +881 42 negative_sampler.num_negs_per_pos 40.0 +881 42 training.batch_size 1.0 +881 43 model.embedding_dim 2.0 +881 43 model.scoring_fct_norm 1.0 +881 43 loss.margin 10.03264034669952 +881 43 loss.adversarial_temperature 0.8362834500031592 +881 43 regularizer.weight 0.04068149536317837 +881 43 optimizer.lr 0.0013656777526443514 +881 43 negative_sampler.num_negs_per_pos 76.0 +881 43 training.batch_size 2.0 +881 44 model.embedding_dim 1.0 +881 44 model.scoring_fct_norm 1.0 +881 44 loss.margin 24.07667666373523 +881 44 loss.adversarial_temperature 0.1388750505260926 +881 44 regularizer.weight 0.07249843812475752 +881 44 optimizer.lr 0.004053327797691804 +881 44 negative_sampler.num_negs_per_pos 11.0 +881 44 training.batch_size 1.0 +881 45 model.embedding_dim 2.0 +881 45 model.scoring_fct_norm 1.0 +881 45 loss.margin 15.428171818982152 +881 45 loss.adversarial_temperature 0.9380760762251202 +881 45 regularizer.weight 0.025030048952322474 +881 45 optimizer.lr 0.0013859726113191846 +881 45 negative_sampler.num_negs_per_pos 22.0 +881 45 training.batch_size 2.0 +881 46 model.embedding_dim 0.0 +881 46 model.scoring_fct_norm 1.0 +881 46 loss.margin 22.273417510893577 +881 46 loss.adversarial_temperature 0.19079106919644945 +881 46 regularizer.weight 0.2718677429740139 +881 46 optimizer.lr 0.0029365621267110416 +881 46 negative_sampler.num_negs_per_pos 4.0 +881 46 training.batch_size 1.0 +881 47 model.embedding_dim 0.0 +881 47 model.scoring_fct_norm 2.0 +881 47 loss.margin 13.6754303748227 +881 47 loss.adversarial_temperature 0.6661115589300219 +881 47 regularizer.weight 0.023462884740040463 +881 47 optimizer.lr 0.0012512286692893824 +881 47 negative_sampler.num_negs_per_pos 6.0 +881 47 training.batch_size 1.0 +881 48 model.embedding_dim 2.0 +881 48 model.scoring_fct_norm 2.0 +881 48 loss.margin 24.85733117459632 +881 48 loss.adversarial_temperature 0.9237958287169804 +881 48 regularizer.weight 0.011257523966092234 +881 48 optimizer.lr 0.037554273151956356 +881 48 negative_sampler.num_negs_per_pos 0.0 +881 48 training.batch_size 0.0 +881 49 model.embedding_dim 2.0 +881 49 model.scoring_fct_norm 1.0 +881 49 loss.margin 25.67989165538 +881 49 loss.adversarial_temperature 0.9328166484857614 +881 49 regularizer.weight 0.013490436793315538 +881 49 optimizer.lr 0.003418789705129511 +881 49 negative_sampler.num_negs_per_pos 7.0 +881 49 training.batch_size 1.0 +881 50 model.embedding_dim 2.0 +881 50 model.scoring_fct_norm 1.0 +881 50 loss.margin 27.088140299621134 +881 50 loss.adversarial_temperature 0.9649848940081014 +881 50 regularizer.weight 0.07978033046148178 +881 50 optimizer.lr 0.006814048936717262 +881 50 negative_sampler.num_negs_per_pos 62.0 +881 50 training.batch_size 0.0 +881 51 model.embedding_dim 2.0 +881 51 model.scoring_fct_norm 1.0 +881 51 loss.margin 29.774444747543313 +881 51 loss.adversarial_temperature 0.8827253232324801 +881 51 regularizer.weight 0.03054211696707975 +881 51 optimizer.lr 0.005858412008333764 +881 51 negative_sampler.num_negs_per_pos 84.0 +881 51 training.batch_size 0.0 +881 52 model.embedding_dim 1.0 +881 52 model.scoring_fct_norm 1.0 +881 52 loss.margin 8.404500353053033 +881 52 loss.adversarial_temperature 0.6834417646165746 +881 52 regularizer.weight 0.04892646082735611 +881 52 optimizer.lr 0.001417617857695902 +881 52 negative_sampler.num_negs_per_pos 27.0 +881 52 training.batch_size 0.0 +881 53 model.embedding_dim 1.0 +881 53 model.scoring_fct_norm 1.0 +881 53 loss.margin 19.8496048448605 +881 53 loss.adversarial_temperature 0.7044688694777209 +881 53 regularizer.weight 0.01931404107052335 +881 53 optimizer.lr 0.031918083003938294 +881 53 negative_sampler.num_negs_per_pos 59.0 +881 53 training.batch_size 1.0 +881 54 model.embedding_dim 0.0 +881 54 model.scoring_fct_norm 2.0 +881 54 loss.margin 9.894385347198698 +881 54 loss.adversarial_temperature 0.5881270448436802 +881 54 regularizer.weight 0.010391636839172148 +881 54 optimizer.lr 0.0033610085778330136 +881 54 negative_sampler.num_negs_per_pos 79.0 +881 54 training.batch_size 2.0 +881 55 model.embedding_dim 0.0 +881 55 model.scoring_fct_norm 1.0 +881 55 loss.margin 4.945181483444334 +881 55 loss.adversarial_temperature 0.5182865943939503 +881 55 regularizer.weight 0.15929345051844077 +881 55 optimizer.lr 0.024840408862577063 +881 55 negative_sampler.num_negs_per_pos 25.0 +881 55 training.batch_size 2.0 +881 56 model.embedding_dim 0.0 +881 56 model.scoring_fct_norm 1.0 +881 56 loss.margin 23.91402594627205 +881 56 loss.adversarial_temperature 0.6919544813465122 +881 56 regularizer.weight 0.27372741813092977 +881 56 optimizer.lr 0.052727966893416585 +881 56 negative_sampler.num_negs_per_pos 4.0 +881 56 training.batch_size 0.0 +881 57 model.embedding_dim 2.0 +881 57 model.scoring_fct_norm 2.0 +881 57 loss.margin 9.419645502512788 +881 57 loss.adversarial_temperature 0.9232225468566414 +881 57 regularizer.weight 0.05465216820492862 +881 57 optimizer.lr 0.004028882866530746 +881 57 negative_sampler.num_negs_per_pos 89.0 +881 57 training.batch_size 0.0 +881 58 model.embedding_dim 1.0 +881 58 model.scoring_fct_norm 2.0 +881 58 loss.margin 11.563470343931773 +881 58 loss.adversarial_temperature 0.9084063795157709 +881 58 regularizer.weight 0.02568675363400919 +881 58 optimizer.lr 0.014443403625979024 +881 58 negative_sampler.num_negs_per_pos 14.0 +881 58 training.batch_size 2.0 +881 59 model.embedding_dim 0.0 +881 59 model.scoring_fct_norm 1.0 +881 59 loss.margin 3.570734497095934 +881 59 loss.adversarial_temperature 0.8860898589490916 +881 59 regularizer.weight 0.013633501455160492 +881 59 optimizer.lr 0.0020975134714643446 +881 59 negative_sampler.num_negs_per_pos 97.0 +881 59 training.batch_size 2.0 +881 60 model.embedding_dim 1.0 +881 60 model.scoring_fct_norm 1.0 +881 60 loss.margin 29.635267801791453 +881 60 loss.adversarial_temperature 0.6262529294392388 +881 60 regularizer.weight 0.011766207931350674 +881 60 optimizer.lr 0.02243138594400953 +881 60 negative_sampler.num_negs_per_pos 23.0 +881 60 training.batch_size 2.0 +881 61 model.embedding_dim 2.0 +881 61 model.scoring_fct_norm 1.0 +881 61 loss.margin 24.883742733600467 +881 61 loss.adversarial_temperature 0.16878039303183745 +881 61 regularizer.weight 0.15789096011565734 +881 61 optimizer.lr 0.02183375039734511 +881 61 negative_sampler.num_negs_per_pos 13.0 +881 61 training.batch_size 2.0 +881 62 model.embedding_dim 2.0 +881 62 model.scoring_fct_norm 1.0 +881 62 loss.margin 8.35994111641767 +881 62 loss.adversarial_temperature 0.8460065880828376 +881 62 regularizer.weight 0.29760125634046214 +881 62 optimizer.lr 0.0239386091627642 +881 62 negative_sampler.num_negs_per_pos 0.0 +881 62 training.batch_size 1.0 +881 63 model.embedding_dim 1.0 +881 63 model.scoring_fct_norm 2.0 +881 63 loss.margin 18.448015880221902 +881 63 loss.adversarial_temperature 0.7210837152401153 +881 63 regularizer.weight 0.1416746194293011 +881 63 optimizer.lr 0.004137861882648104 +881 63 negative_sampler.num_negs_per_pos 54.0 +881 63 training.batch_size 2.0 +881 64 model.embedding_dim 1.0 +881 64 model.scoring_fct_norm 1.0 +881 64 loss.margin 25.834508907331035 +881 64 loss.adversarial_temperature 0.5718144676689785 +881 64 regularizer.weight 0.050401580875579756 +881 64 optimizer.lr 0.007401829700000144 +881 64 negative_sampler.num_negs_per_pos 25.0 +881 64 training.batch_size 2.0 +881 65 model.embedding_dim 1.0 +881 65 model.scoring_fct_norm 1.0 +881 65 loss.margin 6.251473941797305 +881 65 loss.adversarial_temperature 0.29174749919067355 +881 65 regularizer.weight 0.09605779057816191 +881 65 optimizer.lr 0.002368860593113308 +881 65 negative_sampler.num_negs_per_pos 84.0 +881 65 training.batch_size 1.0 +881 66 model.embedding_dim 0.0 +881 66 model.scoring_fct_norm 1.0 +881 66 loss.margin 23.246738323125673 +881 66 loss.adversarial_temperature 0.3197460727902055 +881 66 regularizer.weight 0.042638175414881505 +881 66 optimizer.lr 0.0012331477253798419 +881 66 negative_sampler.num_negs_per_pos 26.0 +881 66 training.batch_size 0.0 +881 67 model.embedding_dim 0.0 +881 67 model.scoring_fct_norm 1.0 +881 67 loss.margin 24.563459280194756 +881 67 loss.adversarial_temperature 0.9338282305073632 +881 67 regularizer.weight 0.04819928935714561 +881 67 optimizer.lr 0.08989975943697082 +881 67 negative_sampler.num_negs_per_pos 68.0 +881 67 training.batch_size 0.0 +881 68 model.embedding_dim 0.0 +881 68 model.scoring_fct_norm 2.0 +881 68 loss.margin 1.295418954775008 +881 68 loss.adversarial_temperature 0.8866218337652683 +881 68 regularizer.weight 0.05924959780549289 +881 68 optimizer.lr 0.08880423067218511 +881 68 negative_sampler.num_negs_per_pos 26.0 +881 68 training.batch_size 1.0 +881 69 model.embedding_dim 1.0 +881 69 model.scoring_fct_norm 2.0 +881 69 loss.margin 9.75729800673576 +881 69 loss.adversarial_temperature 0.8708087155353798 +881 69 regularizer.weight 0.2402932463720819 +881 69 optimizer.lr 0.033334893936009656 +881 69 negative_sampler.num_negs_per_pos 80.0 +881 69 training.batch_size 2.0 +881 70 model.embedding_dim 2.0 +881 70 model.scoring_fct_norm 1.0 +881 70 loss.margin 10.390646326513009 +881 70 loss.adversarial_temperature 0.3047035802812945 +881 70 regularizer.weight 0.057132027374393254 +881 70 optimizer.lr 0.04294763643133327 +881 70 negative_sampler.num_negs_per_pos 85.0 +881 70 training.batch_size 2.0 +881 71 model.embedding_dim 2.0 +881 71 model.scoring_fct_norm 2.0 +881 71 loss.margin 21.886790067542925 +881 71 loss.adversarial_temperature 0.20084960920776623 +881 71 regularizer.weight 0.010514172586299112 +881 71 optimizer.lr 0.0221899406633951 +881 71 negative_sampler.num_negs_per_pos 93.0 +881 71 training.batch_size 1.0 +881 72 model.embedding_dim 2.0 +881 72 model.scoring_fct_norm 1.0 +881 72 loss.margin 16.159288152626285 +881 72 loss.adversarial_temperature 0.9340812028660215 +881 72 regularizer.weight 0.10606392830650818 +881 72 optimizer.lr 0.018742799710866766 +881 72 negative_sampler.num_negs_per_pos 2.0 +881 72 training.batch_size 1.0 +881 73 model.embedding_dim 2.0 +881 73 model.scoring_fct_norm 1.0 +881 73 loss.margin 23.51840328302126 +881 73 loss.adversarial_temperature 0.23577951135775138 +881 73 regularizer.weight 0.17487484012516216 +881 73 optimizer.lr 0.014684511946094403 +881 73 negative_sampler.num_negs_per_pos 53.0 +881 73 training.batch_size 2.0 +881 74 model.embedding_dim 0.0 +881 74 model.scoring_fct_norm 1.0 +881 74 loss.margin 1.5795796241635396 +881 74 loss.adversarial_temperature 0.31576862550392715 +881 74 regularizer.weight 0.07056007928179203 +881 74 optimizer.lr 0.004544252971738795 +881 74 negative_sampler.num_negs_per_pos 10.0 +881 74 training.batch_size 0.0 +881 75 model.embedding_dim 1.0 +881 75 model.scoring_fct_norm 1.0 +881 75 loss.margin 6.283926304290587 +881 75 loss.adversarial_temperature 0.5695794176079889 +881 75 regularizer.weight 0.015521048925039204 +881 75 optimizer.lr 0.016887412616814798 +881 75 negative_sampler.num_negs_per_pos 23.0 +881 75 training.batch_size 1.0 +881 76 model.embedding_dim 0.0 +881 76 model.scoring_fct_norm 1.0 +881 76 loss.margin 16.278414759857096 +881 76 loss.adversarial_temperature 0.39985023538891706 +881 76 regularizer.weight 0.14689483324128377 +881 76 optimizer.lr 0.0750625212284805 +881 76 negative_sampler.num_negs_per_pos 79.0 +881 76 training.batch_size 2.0 +881 77 model.embedding_dim 0.0 +881 77 model.scoring_fct_norm 1.0 +881 77 loss.margin 2.2177690267982357 +881 77 loss.adversarial_temperature 0.5268396424486899 +881 77 regularizer.weight 0.039624794275949886 +881 77 optimizer.lr 0.009836921431945518 +881 77 negative_sampler.num_negs_per_pos 61.0 +881 77 training.batch_size 1.0 +881 78 model.embedding_dim 1.0 +881 78 model.scoring_fct_norm 1.0 +881 78 loss.margin 4.82364983308263 +881 78 loss.adversarial_temperature 0.27386606402357716 +881 78 regularizer.weight 0.27026579609005447 +881 78 optimizer.lr 0.0012680069063211966 +881 78 negative_sampler.num_negs_per_pos 10.0 +881 78 training.batch_size 0.0 +881 79 model.embedding_dim 0.0 +881 79 model.scoring_fct_norm 2.0 +881 79 loss.margin 12.185078245826798 +881 79 loss.adversarial_temperature 0.4380793053591031 +881 79 regularizer.weight 0.05931094815241381 +881 79 optimizer.lr 0.0051693366063506494 +881 79 negative_sampler.num_negs_per_pos 54.0 +881 79 training.batch_size 2.0 +881 80 model.embedding_dim 0.0 +881 80 model.scoring_fct_norm 2.0 +881 80 loss.margin 20.12583849911409 +881 80 loss.adversarial_temperature 0.4876083541040539 +881 80 regularizer.weight 0.1999512212173858 +881 80 optimizer.lr 0.0015447274894735213 +881 80 negative_sampler.num_negs_per_pos 90.0 +881 80 training.batch_size 2.0 +881 81 model.embedding_dim 1.0 +881 81 model.scoring_fct_norm 1.0 +881 81 loss.margin 15.91296537139784 +881 81 loss.adversarial_temperature 0.9659720124908983 +881 81 regularizer.weight 0.07329071260079308 +881 81 optimizer.lr 0.0024050658737523644 +881 81 negative_sampler.num_negs_per_pos 17.0 +881 81 training.batch_size 1.0 +881 82 model.embedding_dim 1.0 +881 82 model.scoring_fct_norm 2.0 +881 82 loss.margin 19.463787739271464 +881 82 loss.adversarial_temperature 0.12884504912160552 +881 82 regularizer.weight 0.22364415105819838 +881 82 optimizer.lr 0.0022028418329981627 +881 82 negative_sampler.num_negs_per_pos 70.0 +881 82 training.batch_size 2.0 +881 83 model.embedding_dim 1.0 +881 83 model.scoring_fct_norm 2.0 +881 83 loss.margin 14.3371292225322 +881 83 loss.adversarial_temperature 0.9243203244652446 +881 83 regularizer.weight 0.09149179614244561 +881 83 optimizer.lr 0.009945613651558317 +881 83 negative_sampler.num_negs_per_pos 70.0 +881 83 training.batch_size 1.0 +881 84 model.embedding_dim 0.0 +881 84 model.scoring_fct_norm 1.0 +881 84 loss.margin 21.038259737606225 +881 84 loss.adversarial_temperature 0.9023984062419522 +881 84 regularizer.weight 0.010158491144301044 +881 84 optimizer.lr 0.0019671223832500606 +881 84 negative_sampler.num_negs_per_pos 5.0 +881 84 training.batch_size 0.0 +881 85 model.embedding_dim 2.0 +881 85 model.scoring_fct_norm 1.0 +881 85 loss.margin 28.880394254182495 +881 85 loss.adversarial_temperature 0.6570748570509488 +881 85 regularizer.weight 0.10593207382158652 +881 85 optimizer.lr 0.0032920584947347957 +881 85 negative_sampler.num_negs_per_pos 95.0 +881 85 training.batch_size 1.0 +881 86 model.embedding_dim 0.0 +881 86 model.scoring_fct_norm 1.0 +881 86 loss.margin 29.827515880638874 +881 86 loss.adversarial_temperature 0.8122647080105999 +881 86 regularizer.weight 0.059534105527382926 +881 86 optimizer.lr 0.09490673852537745 +881 86 negative_sampler.num_negs_per_pos 15.0 +881 86 training.batch_size 0.0 +881 87 model.embedding_dim 2.0 +881 87 model.scoring_fct_norm 2.0 +881 87 loss.margin 11.428316800844044 +881 87 loss.adversarial_temperature 0.1674554412906346 +881 87 regularizer.weight 0.0137226764513265 +881 87 optimizer.lr 0.005074095180886594 +881 87 negative_sampler.num_negs_per_pos 63.0 +881 87 training.batch_size 1.0 +881 88 model.embedding_dim 1.0 +881 88 model.scoring_fct_norm 1.0 +881 88 loss.margin 14.907800321962688 +881 88 loss.adversarial_temperature 0.2416751116888144 +881 88 regularizer.weight 0.2895705566070631 +881 88 optimizer.lr 0.008256499654222397 +881 88 negative_sampler.num_negs_per_pos 68.0 +881 88 training.batch_size 1.0 +881 89 model.embedding_dim 0.0 +881 89 model.scoring_fct_norm 2.0 +881 89 loss.margin 21.547084066109118 +881 89 loss.adversarial_temperature 0.37035493663193 +881 89 regularizer.weight 0.10777121571431568 +881 89 optimizer.lr 0.0013618712863078788 +881 89 negative_sampler.num_negs_per_pos 61.0 +881 89 training.batch_size 0.0 +881 90 model.embedding_dim 1.0 +881 90 model.scoring_fct_norm 1.0 +881 90 loss.margin 12.270652132550385 +881 90 loss.adversarial_temperature 0.402096987961473 +881 90 regularizer.weight 0.1317477640603045 +881 90 optimizer.lr 0.002057708378513603 +881 90 negative_sampler.num_negs_per_pos 68.0 +881 90 training.batch_size 0.0 +881 91 model.embedding_dim 0.0 +881 91 model.scoring_fct_norm 2.0 +881 91 loss.margin 20.540734590268904 +881 91 loss.adversarial_temperature 0.6543362645637316 +881 91 regularizer.weight 0.13116988764720316 +881 91 optimizer.lr 0.04207199837119501 +881 91 negative_sampler.num_negs_per_pos 29.0 +881 91 training.batch_size 2.0 +881 92 model.embedding_dim 2.0 +881 92 model.scoring_fct_norm 2.0 +881 92 loss.margin 19.647870086716377 +881 92 loss.adversarial_temperature 0.11722540712505142 +881 92 regularizer.weight 0.08293540704779828 +881 92 optimizer.lr 0.030216629238544804 +881 92 negative_sampler.num_negs_per_pos 60.0 +881 92 training.batch_size 1.0 +881 93 model.embedding_dim 0.0 +881 93 model.scoring_fct_norm 1.0 +881 93 loss.margin 18.98761059985181 +881 93 loss.adversarial_temperature 0.7632799675487543 +881 93 regularizer.weight 0.025839653580495083 +881 93 optimizer.lr 0.02726793899861425 +881 93 negative_sampler.num_negs_per_pos 23.0 +881 93 training.batch_size 0.0 +881 94 model.embedding_dim 2.0 +881 94 model.scoring_fct_norm 1.0 +881 94 loss.margin 5.480403596788691 +881 94 loss.adversarial_temperature 0.21522125089173594 +881 94 regularizer.weight 0.022300089243665753 +881 94 optimizer.lr 0.018968100893165068 +881 94 negative_sampler.num_negs_per_pos 0.0 +881 94 training.batch_size 2.0 +881 95 model.embedding_dim 0.0 +881 95 model.scoring_fct_norm 1.0 +881 95 loss.margin 8.24350652387859 +881 95 loss.adversarial_temperature 0.2842352785081691 +881 95 regularizer.weight 0.1781062603775124 +881 95 optimizer.lr 0.015309024650174422 +881 95 negative_sampler.num_negs_per_pos 47.0 +881 95 training.batch_size 1.0 +881 96 model.embedding_dim 1.0 +881 96 model.scoring_fct_norm 2.0 +881 96 loss.margin 7.234192647988963 +881 96 loss.adversarial_temperature 0.6861847206741893 +881 96 regularizer.weight 0.01679417849160086 +881 96 optimizer.lr 0.00408693746514975 +881 96 negative_sampler.num_negs_per_pos 68.0 +881 96 training.batch_size 2.0 +881 97 model.embedding_dim 2.0 +881 97 model.scoring_fct_norm 1.0 +881 97 loss.margin 18.905886796896738 +881 97 loss.adversarial_temperature 0.6859941448041028 +881 97 regularizer.weight 0.10422374106410665 +881 97 optimizer.lr 0.01690310034479082 +881 97 negative_sampler.num_negs_per_pos 72.0 +881 97 training.batch_size 1.0 +881 98 model.embedding_dim 2.0 +881 98 model.scoring_fct_norm 2.0 +881 98 loss.margin 15.054837174594901 +881 98 loss.adversarial_temperature 0.6798003025470735 +881 98 regularizer.weight 0.08221625342039907 +881 98 optimizer.lr 0.0032474994235766213 +881 98 negative_sampler.num_negs_per_pos 37.0 +881 98 training.batch_size 2.0 +881 99 model.embedding_dim 0.0 +881 99 model.scoring_fct_norm 1.0 +881 99 loss.margin 18.435922652413183 +881 99 loss.adversarial_temperature 0.3957699990066148 +881 99 regularizer.weight 0.020292229018596415 +881 99 optimizer.lr 0.0025221704691377315 +881 99 negative_sampler.num_negs_per_pos 20.0 +881 99 training.batch_size 1.0 +881 100 model.embedding_dim 2.0 +881 100 model.scoring_fct_norm 1.0 +881 100 loss.margin 17.076450414407283 +881 100 loss.adversarial_temperature 0.8881203711623182 +881 100 regularizer.weight 0.07527857854212007 +881 100 optimizer.lr 0.08908471612025218 +881 100 negative_sampler.num_negs_per_pos 66.0 +881 100 training.batch_size 0.0 +881 1 dataset """kinships""" +881 1 model """transh""" +881 1 loss """nssa""" +881 1 regularizer """transh""" +881 1 optimizer """adam""" +881 1 training_loop """owa""" +881 1 negative_sampler """basic""" +881 1 evaluator """rankbased""" +881 2 dataset """kinships""" +881 2 model """transh""" +881 2 loss """nssa""" +881 2 regularizer """transh""" +881 2 optimizer """adam""" +881 2 training_loop """owa""" +881 2 negative_sampler """basic""" +881 2 evaluator """rankbased""" +881 3 dataset """kinships""" +881 3 model """transh""" +881 3 loss """nssa""" +881 3 regularizer """transh""" +881 3 optimizer """adam""" +881 3 training_loop """owa""" +881 3 negative_sampler """basic""" +881 3 evaluator """rankbased""" +881 4 dataset """kinships""" +881 4 model """transh""" +881 4 loss """nssa""" +881 4 regularizer """transh""" +881 4 optimizer """adam""" +881 4 training_loop """owa""" +881 4 negative_sampler """basic""" +881 4 evaluator """rankbased""" +881 5 dataset """kinships""" +881 5 model """transh""" +881 5 loss """nssa""" +881 5 regularizer """transh""" +881 5 optimizer """adam""" +881 5 training_loop """owa""" +881 5 negative_sampler """basic""" +881 5 evaluator """rankbased""" +881 6 dataset """kinships""" +881 6 model """transh""" +881 6 loss """nssa""" +881 6 regularizer """transh""" +881 6 optimizer """adam""" +881 6 training_loop """owa""" +881 6 negative_sampler """basic""" +881 6 evaluator """rankbased""" +881 7 dataset """kinships""" +881 7 model """transh""" +881 7 loss """nssa""" +881 7 regularizer """transh""" +881 7 optimizer """adam""" +881 7 training_loop """owa""" +881 7 negative_sampler """basic""" +881 7 evaluator """rankbased""" +881 8 dataset """kinships""" +881 8 model """transh""" +881 8 loss """nssa""" +881 8 regularizer """transh""" +881 8 optimizer """adam""" +881 8 training_loop """owa""" +881 8 negative_sampler """basic""" +881 8 evaluator """rankbased""" +881 9 dataset """kinships""" +881 9 model """transh""" +881 9 loss """nssa""" +881 9 regularizer """transh""" +881 9 optimizer """adam""" +881 9 training_loop """owa""" +881 9 negative_sampler """basic""" +881 9 evaluator """rankbased""" +881 10 dataset """kinships""" +881 10 model """transh""" +881 10 loss """nssa""" +881 10 regularizer """transh""" +881 10 optimizer """adam""" +881 10 training_loop """owa""" +881 10 negative_sampler """basic""" +881 10 evaluator """rankbased""" +881 11 dataset """kinships""" +881 11 model """transh""" +881 11 loss """nssa""" +881 11 regularizer """transh""" +881 11 optimizer """adam""" +881 11 training_loop """owa""" +881 11 negative_sampler """basic""" +881 11 evaluator """rankbased""" +881 12 dataset """kinships""" +881 12 model """transh""" +881 12 loss """nssa""" +881 12 regularizer """transh""" +881 12 optimizer """adam""" +881 12 training_loop """owa""" +881 12 negative_sampler """basic""" +881 12 evaluator """rankbased""" +881 13 dataset """kinships""" +881 13 model """transh""" +881 13 loss """nssa""" +881 13 regularizer """transh""" +881 13 optimizer """adam""" +881 13 training_loop """owa""" +881 13 negative_sampler """basic""" +881 13 evaluator """rankbased""" +881 14 dataset """kinships""" +881 14 model """transh""" +881 14 loss """nssa""" +881 14 regularizer """transh""" +881 14 optimizer """adam""" +881 14 training_loop """owa""" +881 14 negative_sampler """basic""" +881 14 evaluator """rankbased""" +881 15 dataset """kinships""" +881 15 model """transh""" +881 15 loss """nssa""" +881 15 regularizer """transh""" +881 15 optimizer """adam""" +881 15 training_loop """owa""" +881 15 negative_sampler """basic""" +881 15 evaluator """rankbased""" +881 16 dataset """kinships""" +881 16 model """transh""" +881 16 loss """nssa""" +881 16 regularizer """transh""" +881 16 optimizer """adam""" +881 16 training_loop """owa""" +881 16 negative_sampler """basic""" +881 16 evaluator """rankbased""" +881 17 dataset """kinships""" +881 17 model """transh""" +881 17 loss """nssa""" +881 17 regularizer """transh""" +881 17 optimizer """adam""" +881 17 training_loop """owa""" +881 17 negative_sampler """basic""" +881 17 evaluator """rankbased""" +881 18 dataset """kinships""" +881 18 model """transh""" +881 18 loss """nssa""" +881 18 regularizer """transh""" +881 18 optimizer """adam""" +881 18 training_loop """owa""" +881 18 negative_sampler """basic""" +881 18 evaluator """rankbased""" +881 19 dataset """kinships""" +881 19 model """transh""" +881 19 loss """nssa""" +881 19 regularizer """transh""" +881 19 optimizer """adam""" +881 19 training_loop """owa""" +881 19 negative_sampler """basic""" +881 19 evaluator """rankbased""" +881 20 dataset """kinships""" +881 20 model """transh""" +881 20 loss """nssa""" +881 20 regularizer """transh""" +881 20 optimizer """adam""" +881 20 training_loop """owa""" +881 20 negative_sampler """basic""" +881 20 evaluator """rankbased""" +881 21 dataset """kinships""" +881 21 model """transh""" +881 21 loss """nssa""" +881 21 regularizer """transh""" +881 21 optimizer """adam""" +881 21 training_loop """owa""" +881 21 negative_sampler """basic""" +881 21 evaluator """rankbased""" +881 22 dataset """kinships""" +881 22 model """transh""" +881 22 loss """nssa""" +881 22 regularizer """transh""" +881 22 optimizer """adam""" +881 22 training_loop """owa""" +881 22 negative_sampler """basic""" +881 22 evaluator """rankbased""" +881 23 dataset """kinships""" +881 23 model """transh""" +881 23 loss """nssa""" +881 23 regularizer """transh""" +881 23 optimizer """adam""" +881 23 training_loop """owa""" +881 23 negative_sampler """basic""" +881 23 evaluator """rankbased""" +881 24 dataset """kinships""" +881 24 model """transh""" +881 24 loss """nssa""" +881 24 regularizer """transh""" +881 24 optimizer """adam""" +881 24 training_loop """owa""" +881 24 negative_sampler """basic""" +881 24 evaluator """rankbased""" +881 25 dataset """kinships""" +881 25 model """transh""" +881 25 loss """nssa""" +881 25 regularizer """transh""" +881 25 optimizer """adam""" +881 25 training_loop """owa""" +881 25 negative_sampler """basic""" +881 25 evaluator """rankbased""" +881 26 dataset """kinships""" +881 26 model """transh""" +881 26 loss """nssa""" +881 26 regularizer """transh""" +881 26 optimizer """adam""" +881 26 training_loop """owa""" +881 26 negative_sampler """basic""" +881 26 evaluator """rankbased""" +881 27 dataset """kinships""" +881 27 model """transh""" +881 27 loss """nssa""" +881 27 regularizer """transh""" +881 27 optimizer """adam""" +881 27 training_loop """owa""" +881 27 negative_sampler """basic""" +881 27 evaluator """rankbased""" +881 28 dataset """kinships""" +881 28 model """transh""" +881 28 loss """nssa""" +881 28 regularizer """transh""" +881 28 optimizer """adam""" +881 28 training_loop """owa""" +881 28 negative_sampler """basic""" +881 28 evaluator """rankbased""" +881 29 dataset """kinships""" +881 29 model """transh""" +881 29 loss """nssa""" +881 29 regularizer """transh""" +881 29 optimizer """adam""" +881 29 training_loop """owa""" +881 29 negative_sampler """basic""" +881 29 evaluator """rankbased""" +881 30 dataset """kinships""" +881 30 model """transh""" +881 30 loss """nssa""" +881 30 regularizer """transh""" +881 30 optimizer """adam""" +881 30 training_loop """owa""" +881 30 negative_sampler """basic""" +881 30 evaluator """rankbased""" +881 31 dataset """kinships""" +881 31 model """transh""" +881 31 loss """nssa""" +881 31 regularizer """transh""" +881 31 optimizer """adam""" +881 31 training_loop """owa""" +881 31 negative_sampler """basic""" +881 31 evaluator """rankbased""" +881 32 dataset """kinships""" +881 32 model """transh""" +881 32 loss """nssa""" +881 32 regularizer """transh""" +881 32 optimizer """adam""" +881 32 training_loop """owa""" +881 32 negative_sampler """basic""" +881 32 evaluator """rankbased""" +881 33 dataset """kinships""" +881 33 model """transh""" +881 33 loss """nssa""" +881 33 regularizer """transh""" +881 33 optimizer """adam""" +881 33 training_loop """owa""" +881 33 negative_sampler """basic""" +881 33 evaluator """rankbased""" +881 34 dataset """kinships""" +881 34 model """transh""" +881 34 loss """nssa""" +881 34 regularizer """transh""" +881 34 optimizer """adam""" +881 34 training_loop """owa""" +881 34 negative_sampler """basic""" +881 34 evaluator """rankbased""" +881 35 dataset """kinships""" +881 35 model """transh""" +881 35 loss """nssa""" +881 35 regularizer """transh""" +881 35 optimizer """adam""" +881 35 training_loop """owa""" +881 35 negative_sampler """basic""" +881 35 evaluator """rankbased""" +881 36 dataset """kinships""" +881 36 model """transh""" +881 36 loss """nssa""" +881 36 regularizer """transh""" +881 36 optimizer """adam""" +881 36 training_loop """owa""" +881 36 negative_sampler """basic""" +881 36 evaluator """rankbased""" +881 37 dataset """kinships""" +881 37 model """transh""" +881 37 loss """nssa""" +881 37 regularizer """transh""" +881 37 optimizer """adam""" +881 37 training_loop """owa""" +881 37 negative_sampler """basic""" +881 37 evaluator """rankbased""" +881 38 dataset """kinships""" +881 38 model """transh""" +881 38 loss """nssa""" +881 38 regularizer """transh""" +881 38 optimizer """adam""" +881 38 training_loop """owa""" +881 38 negative_sampler """basic""" +881 38 evaluator """rankbased""" +881 39 dataset """kinships""" +881 39 model """transh""" +881 39 loss """nssa""" +881 39 regularizer """transh""" +881 39 optimizer """adam""" +881 39 training_loop """owa""" +881 39 negative_sampler """basic""" +881 39 evaluator """rankbased""" +881 40 dataset """kinships""" +881 40 model """transh""" +881 40 loss """nssa""" +881 40 regularizer """transh""" +881 40 optimizer """adam""" +881 40 training_loop """owa""" +881 40 negative_sampler """basic""" +881 40 evaluator """rankbased""" +881 41 dataset """kinships""" +881 41 model """transh""" +881 41 loss """nssa""" +881 41 regularizer """transh""" +881 41 optimizer """adam""" +881 41 training_loop """owa""" +881 41 negative_sampler """basic""" +881 41 evaluator """rankbased""" +881 42 dataset """kinships""" +881 42 model """transh""" +881 42 loss """nssa""" +881 42 regularizer """transh""" +881 42 optimizer """adam""" +881 42 training_loop """owa""" +881 42 negative_sampler """basic""" +881 42 evaluator """rankbased""" +881 43 dataset """kinships""" +881 43 model """transh""" +881 43 loss """nssa""" +881 43 regularizer """transh""" +881 43 optimizer """adam""" +881 43 training_loop """owa""" +881 43 negative_sampler """basic""" +881 43 evaluator """rankbased""" +881 44 dataset """kinships""" +881 44 model """transh""" +881 44 loss """nssa""" +881 44 regularizer """transh""" +881 44 optimizer """adam""" +881 44 training_loop """owa""" +881 44 negative_sampler """basic""" +881 44 evaluator """rankbased""" +881 45 dataset """kinships""" +881 45 model """transh""" +881 45 loss """nssa""" +881 45 regularizer """transh""" +881 45 optimizer """adam""" +881 45 training_loop """owa""" +881 45 negative_sampler """basic""" +881 45 evaluator """rankbased""" +881 46 dataset """kinships""" +881 46 model """transh""" +881 46 loss """nssa""" +881 46 regularizer """transh""" +881 46 optimizer """adam""" +881 46 training_loop """owa""" +881 46 negative_sampler """basic""" +881 46 evaluator """rankbased""" +881 47 dataset """kinships""" +881 47 model """transh""" +881 47 loss """nssa""" +881 47 regularizer """transh""" +881 47 optimizer """adam""" +881 47 training_loop """owa""" +881 47 negative_sampler """basic""" +881 47 evaluator """rankbased""" +881 48 dataset """kinships""" +881 48 model """transh""" +881 48 loss """nssa""" +881 48 regularizer """transh""" +881 48 optimizer """adam""" +881 48 training_loop """owa""" +881 48 negative_sampler """basic""" +881 48 evaluator """rankbased""" +881 49 dataset """kinships""" +881 49 model """transh""" +881 49 loss """nssa""" +881 49 regularizer """transh""" +881 49 optimizer """adam""" +881 49 training_loop """owa""" +881 49 negative_sampler """basic""" +881 49 evaluator """rankbased""" +881 50 dataset """kinships""" +881 50 model """transh""" +881 50 loss """nssa""" +881 50 regularizer """transh""" +881 50 optimizer """adam""" +881 50 training_loop """owa""" +881 50 negative_sampler """basic""" +881 50 evaluator """rankbased""" +881 51 dataset """kinships""" +881 51 model """transh""" +881 51 loss """nssa""" +881 51 regularizer """transh""" +881 51 optimizer """adam""" +881 51 training_loop """owa""" +881 51 negative_sampler """basic""" +881 51 evaluator """rankbased""" +881 52 dataset """kinships""" +881 52 model """transh""" +881 52 loss """nssa""" +881 52 regularizer """transh""" +881 52 optimizer """adam""" +881 52 training_loop """owa""" +881 52 negative_sampler """basic""" +881 52 evaluator """rankbased""" +881 53 dataset """kinships""" +881 53 model """transh""" +881 53 loss """nssa""" +881 53 regularizer """transh""" +881 53 optimizer """adam""" +881 53 training_loop """owa""" +881 53 negative_sampler """basic""" +881 53 evaluator """rankbased""" +881 54 dataset """kinships""" +881 54 model """transh""" +881 54 loss """nssa""" +881 54 regularizer """transh""" +881 54 optimizer """adam""" +881 54 training_loop """owa""" +881 54 negative_sampler """basic""" +881 54 evaluator """rankbased""" +881 55 dataset """kinships""" +881 55 model """transh""" +881 55 loss """nssa""" +881 55 regularizer """transh""" +881 55 optimizer """adam""" +881 55 training_loop """owa""" +881 55 negative_sampler """basic""" +881 55 evaluator """rankbased""" +881 56 dataset """kinships""" +881 56 model """transh""" +881 56 loss """nssa""" +881 56 regularizer """transh""" +881 56 optimizer """adam""" +881 56 training_loop """owa""" +881 56 negative_sampler """basic""" +881 56 evaluator """rankbased""" +881 57 dataset """kinships""" +881 57 model """transh""" +881 57 loss """nssa""" +881 57 regularizer """transh""" +881 57 optimizer """adam""" +881 57 training_loop """owa""" +881 57 negative_sampler """basic""" +881 57 evaluator """rankbased""" +881 58 dataset """kinships""" +881 58 model """transh""" +881 58 loss """nssa""" +881 58 regularizer """transh""" +881 58 optimizer """adam""" +881 58 training_loop """owa""" +881 58 negative_sampler """basic""" +881 58 evaluator """rankbased""" +881 59 dataset """kinships""" +881 59 model """transh""" +881 59 loss """nssa""" +881 59 regularizer """transh""" +881 59 optimizer """adam""" +881 59 training_loop """owa""" +881 59 negative_sampler """basic""" +881 59 evaluator """rankbased""" +881 60 dataset """kinships""" +881 60 model """transh""" +881 60 loss """nssa""" +881 60 regularizer """transh""" +881 60 optimizer """adam""" +881 60 training_loop """owa""" +881 60 negative_sampler """basic""" +881 60 evaluator """rankbased""" +881 61 dataset """kinships""" +881 61 model """transh""" +881 61 loss """nssa""" +881 61 regularizer """transh""" +881 61 optimizer """adam""" +881 61 training_loop """owa""" +881 61 negative_sampler """basic""" +881 61 evaluator """rankbased""" +881 62 dataset """kinships""" +881 62 model """transh""" +881 62 loss """nssa""" +881 62 regularizer """transh""" +881 62 optimizer """adam""" +881 62 training_loop """owa""" +881 62 negative_sampler """basic""" +881 62 evaluator """rankbased""" +881 63 dataset """kinships""" +881 63 model """transh""" +881 63 loss """nssa""" +881 63 regularizer """transh""" +881 63 optimizer """adam""" +881 63 training_loop """owa""" +881 63 negative_sampler """basic""" +881 63 evaluator """rankbased""" +881 64 dataset """kinships""" +881 64 model """transh""" +881 64 loss """nssa""" +881 64 regularizer """transh""" +881 64 optimizer """adam""" +881 64 training_loop """owa""" +881 64 negative_sampler """basic""" +881 64 evaluator """rankbased""" +881 65 dataset """kinships""" +881 65 model """transh""" +881 65 loss """nssa""" +881 65 regularizer """transh""" +881 65 optimizer """adam""" +881 65 training_loop """owa""" +881 65 negative_sampler """basic""" +881 65 evaluator """rankbased""" +881 66 dataset """kinships""" +881 66 model """transh""" +881 66 loss """nssa""" +881 66 regularizer """transh""" +881 66 optimizer """adam""" +881 66 training_loop """owa""" +881 66 negative_sampler """basic""" +881 66 evaluator """rankbased""" +881 67 dataset """kinships""" +881 67 model """transh""" +881 67 loss """nssa""" +881 67 regularizer """transh""" +881 67 optimizer """adam""" +881 67 training_loop """owa""" +881 67 negative_sampler """basic""" +881 67 evaluator """rankbased""" +881 68 dataset """kinships""" +881 68 model """transh""" +881 68 loss """nssa""" +881 68 regularizer """transh""" +881 68 optimizer """adam""" +881 68 training_loop """owa""" +881 68 negative_sampler """basic""" +881 68 evaluator """rankbased""" +881 69 dataset """kinships""" +881 69 model """transh""" +881 69 loss """nssa""" +881 69 regularizer """transh""" +881 69 optimizer """adam""" +881 69 training_loop """owa""" +881 69 negative_sampler """basic""" +881 69 evaluator """rankbased""" +881 70 dataset """kinships""" +881 70 model """transh""" +881 70 loss """nssa""" +881 70 regularizer """transh""" +881 70 optimizer """adam""" +881 70 training_loop """owa""" +881 70 negative_sampler """basic""" +881 70 evaluator """rankbased""" +881 71 dataset """kinships""" +881 71 model """transh""" +881 71 loss """nssa""" +881 71 regularizer """transh""" +881 71 optimizer """adam""" +881 71 training_loop """owa""" +881 71 negative_sampler """basic""" +881 71 evaluator """rankbased""" +881 72 dataset """kinships""" +881 72 model """transh""" +881 72 loss """nssa""" +881 72 regularizer """transh""" +881 72 optimizer """adam""" +881 72 training_loop """owa""" +881 72 negative_sampler """basic""" +881 72 evaluator """rankbased""" +881 73 dataset """kinships""" +881 73 model """transh""" +881 73 loss """nssa""" +881 73 regularizer """transh""" +881 73 optimizer """adam""" +881 73 training_loop """owa""" +881 73 negative_sampler """basic""" +881 73 evaluator """rankbased""" +881 74 dataset """kinships""" +881 74 model """transh""" +881 74 loss """nssa""" +881 74 regularizer """transh""" +881 74 optimizer """adam""" +881 74 training_loop """owa""" +881 74 negative_sampler """basic""" +881 74 evaluator """rankbased""" +881 75 dataset """kinships""" +881 75 model """transh""" +881 75 loss """nssa""" +881 75 regularizer """transh""" +881 75 optimizer """adam""" +881 75 training_loop """owa""" +881 75 negative_sampler """basic""" +881 75 evaluator """rankbased""" +881 76 dataset """kinships""" +881 76 model """transh""" +881 76 loss """nssa""" +881 76 regularizer """transh""" +881 76 optimizer """adam""" +881 76 training_loop """owa""" +881 76 negative_sampler """basic""" +881 76 evaluator """rankbased""" +881 77 dataset """kinships""" +881 77 model """transh""" +881 77 loss """nssa""" +881 77 regularizer """transh""" +881 77 optimizer """adam""" +881 77 training_loop """owa""" +881 77 negative_sampler """basic""" +881 77 evaluator """rankbased""" +881 78 dataset """kinships""" +881 78 model """transh""" +881 78 loss """nssa""" +881 78 regularizer """transh""" +881 78 optimizer """adam""" +881 78 training_loop """owa""" +881 78 negative_sampler """basic""" +881 78 evaluator """rankbased""" +881 79 dataset """kinships""" +881 79 model """transh""" +881 79 loss """nssa""" +881 79 regularizer """transh""" +881 79 optimizer """adam""" +881 79 training_loop """owa""" +881 79 negative_sampler """basic""" +881 79 evaluator """rankbased""" +881 80 dataset """kinships""" +881 80 model """transh""" +881 80 loss """nssa""" +881 80 regularizer """transh""" +881 80 optimizer """adam""" +881 80 training_loop """owa""" +881 80 negative_sampler """basic""" +881 80 evaluator """rankbased""" +881 81 dataset """kinships""" +881 81 model """transh""" +881 81 loss """nssa""" +881 81 regularizer """transh""" +881 81 optimizer """adam""" +881 81 training_loop """owa""" +881 81 negative_sampler """basic""" +881 81 evaluator """rankbased""" +881 82 dataset """kinships""" +881 82 model """transh""" +881 82 loss """nssa""" +881 82 regularizer """transh""" +881 82 optimizer """adam""" +881 82 training_loop """owa""" +881 82 negative_sampler """basic""" +881 82 evaluator """rankbased""" +881 83 dataset """kinships""" +881 83 model """transh""" +881 83 loss """nssa""" +881 83 regularizer """transh""" +881 83 optimizer """adam""" +881 83 training_loop """owa""" +881 83 negative_sampler """basic""" +881 83 evaluator """rankbased""" +881 84 dataset """kinships""" +881 84 model """transh""" +881 84 loss """nssa""" +881 84 regularizer """transh""" +881 84 optimizer """adam""" +881 84 training_loop """owa""" +881 84 negative_sampler """basic""" +881 84 evaluator """rankbased""" +881 85 dataset """kinships""" +881 85 model """transh""" +881 85 loss """nssa""" +881 85 regularizer """transh""" +881 85 optimizer """adam""" +881 85 training_loop """owa""" +881 85 negative_sampler """basic""" +881 85 evaluator """rankbased""" +881 86 dataset """kinships""" +881 86 model """transh""" +881 86 loss """nssa""" +881 86 regularizer """transh""" +881 86 optimizer """adam""" +881 86 training_loop """owa""" +881 86 negative_sampler """basic""" +881 86 evaluator """rankbased""" +881 87 dataset """kinships""" +881 87 model """transh""" +881 87 loss """nssa""" +881 87 regularizer """transh""" +881 87 optimizer """adam""" +881 87 training_loop """owa""" +881 87 negative_sampler """basic""" +881 87 evaluator """rankbased""" +881 88 dataset """kinships""" +881 88 model """transh""" +881 88 loss """nssa""" +881 88 regularizer """transh""" +881 88 optimizer """adam""" +881 88 training_loop """owa""" +881 88 negative_sampler """basic""" +881 88 evaluator """rankbased""" +881 89 dataset """kinships""" +881 89 model """transh""" +881 89 loss """nssa""" +881 89 regularizer """transh""" +881 89 optimizer """adam""" +881 89 training_loop """owa""" +881 89 negative_sampler """basic""" +881 89 evaluator """rankbased""" +881 90 dataset """kinships""" +881 90 model """transh""" +881 90 loss """nssa""" +881 90 regularizer """transh""" +881 90 optimizer """adam""" +881 90 training_loop """owa""" +881 90 negative_sampler """basic""" +881 90 evaluator """rankbased""" +881 91 dataset """kinships""" +881 91 model """transh""" +881 91 loss """nssa""" +881 91 regularizer """transh""" +881 91 optimizer """adam""" +881 91 training_loop """owa""" +881 91 negative_sampler """basic""" +881 91 evaluator """rankbased""" +881 92 dataset """kinships""" +881 92 model """transh""" +881 92 loss """nssa""" +881 92 regularizer """transh""" +881 92 optimizer """adam""" +881 92 training_loop """owa""" +881 92 negative_sampler """basic""" +881 92 evaluator """rankbased""" +881 93 dataset """kinships""" +881 93 model """transh""" +881 93 loss """nssa""" +881 93 regularizer """transh""" +881 93 optimizer """adam""" +881 93 training_loop """owa""" +881 93 negative_sampler """basic""" +881 93 evaluator """rankbased""" +881 94 dataset """kinships""" +881 94 model """transh""" +881 94 loss """nssa""" +881 94 regularizer """transh""" +881 94 optimizer """adam""" +881 94 training_loop """owa""" +881 94 negative_sampler """basic""" +881 94 evaluator """rankbased""" +881 95 dataset """kinships""" +881 95 model """transh""" +881 95 loss """nssa""" +881 95 regularizer """transh""" +881 95 optimizer """adam""" +881 95 training_loop """owa""" +881 95 negative_sampler """basic""" +881 95 evaluator """rankbased""" +881 96 dataset """kinships""" +881 96 model """transh""" +881 96 loss """nssa""" +881 96 regularizer """transh""" +881 96 optimizer """adam""" +881 96 training_loop """owa""" +881 96 negative_sampler """basic""" +881 96 evaluator """rankbased""" +881 97 dataset """kinships""" +881 97 model """transh""" +881 97 loss """nssa""" +881 97 regularizer """transh""" +881 97 optimizer """adam""" +881 97 training_loop """owa""" +881 97 negative_sampler """basic""" +881 97 evaluator """rankbased""" +881 98 dataset """kinships""" +881 98 model """transh""" +881 98 loss """nssa""" +881 98 regularizer """transh""" +881 98 optimizer """adam""" +881 98 training_loop """owa""" +881 98 negative_sampler """basic""" +881 98 evaluator """rankbased""" +881 99 dataset """kinships""" +881 99 model """transh""" +881 99 loss """nssa""" +881 99 regularizer """transh""" +881 99 optimizer """adam""" +881 99 training_loop """owa""" +881 99 negative_sampler """basic""" +881 99 evaluator """rankbased""" +881 100 dataset """kinships""" +881 100 model """transh""" +881 100 loss """nssa""" +881 100 regularizer """transh""" +881 100 optimizer """adam""" +881 100 training_loop """owa""" +881 100 negative_sampler """basic""" +881 100 evaluator """rankbased""" +882 1 model.embedding_dim 1.0 +882 1 model.scoring_fct_norm 2.0 +882 1 loss.margin 11.938644677606675 +882 1 loss.adversarial_temperature 0.3716155732616431 +882 1 regularizer.weight 0.15063309100401923 +882 1 optimizer.lr 0.0013471870532269729 +882 1 negative_sampler.num_negs_per_pos 26.0 +882 1 training.batch_size 0.0 +882 2 model.embedding_dim 2.0 +882 2 model.scoring_fct_norm 1.0 +882 2 loss.margin 16.24028561773048 +882 2 loss.adversarial_temperature 0.15270301159532745 +882 2 regularizer.weight 0.0113664106617766 +882 2 optimizer.lr 0.04471914809916191 +882 2 negative_sampler.num_negs_per_pos 30.0 +882 2 training.batch_size 2.0 +882 3 model.embedding_dim 1.0 +882 3 model.scoring_fct_norm 1.0 +882 3 loss.margin 17.5817195579213 +882 3 loss.adversarial_temperature 0.33416826157716506 +882 3 regularizer.weight 0.0229939907093345 +882 3 optimizer.lr 0.03088523570459835 +882 3 negative_sampler.num_negs_per_pos 20.0 +882 3 training.batch_size 0.0 +882 4 model.embedding_dim 0.0 +882 4 model.scoring_fct_norm 1.0 +882 4 loss.margin 23.403784383189972 +882 4 loss.adversarial_temperature 0.5231008838331245 +882 4 regularizer.weight 0.027105325432551224 +882 4 optimizer.lr 0.0026459794404474876 +882 4 negative_sampler.num_negs_per_pos 40.0 +882 4 training.batch_size 0.0 +882 5 model.embedding_dim 0.0 +882 5 model.scoring_fct_norm 2.0 +882 5 loss.margin 17.996125811091996 +882 5 loss.adversarial_temperature 0.61789827658053 +882 5 regularizer.weight 0.032825775230386225 +882 5 optimizer.lr 0.01656371286051358 +882 5 negative_sampler.num_negs_per_pos 41.0 +882 5 training.batch_size 0.0 +882 6 model.embedding_dim 2.0 +882 6 model.scoring_fct_norm 2.0 +882 6 loss.margin 29.73572424104859 +882 6 loss.adversarial_temperature 0.2213393291429071 +882 6 regularizer.weight 0.10302483778322306 +882 6 optimizer.lr 0.08723173987996137 +882 6 negative_sampler.num_negs_per_pos 69.0 +882 6 training.batch_size 0.0 +882 7 model.embedding_dim 2.0 +882 7 model.scoring_fct_norm 1.0 +882 7 loss.margin 10.179777201807887 +882 7 loss.adversarial_temperature 0.8513835347213542 +882 7 regularizer.weight 0.06966827914126336 +882 7 optimizer.lr 0.0014348979957088028 +882 7 negative_sampler.num_negs_per_pos 15.0 +882 7 training.batch_size 2.0 +882 8 model.embedding_dim 2.0 +882 8 model.scoring_fct_norm 2.0 +882 8 loss.margin 29.76201841699904 +882 8 loss.adversarial_temperature 0.9365563554256605 +882 8 regularizer.weight 0.014279046842894754 +882 8 optimizer.lr 0.0020333524167217352 +882 8 negative_sampler.num_negs_per_pos 4.0 +882 8 training.batch_size 0.0 +882 9 model.embedding_dim 2.0 +882 9 model.scoring_fct_norm 1.0 +882 9 loss.margin 7.101089108286967 +882 9 loss.adversarial_temperature 0.6320252640098372 +882 9 regularizer.weight 0.07196920973708337 +882 9 optimizer.lr 0.009364625376618928 +882 9 negative_sampler.num_negs_per_pos 49.0 +882 9 training.batch_size 1.0 +882 10 model.embedding_dim 1.0 +882 10 model.scoring_fct_norm 1.0 +882 10 loss.margin 29.462002328011945 +882 10 loss.adversarial_temperature 0.1741668139763362 +882 10 regularizer.weight 0.018837051923926942 +882 10 optimizer.lr 0.05108453308790152 +882 10 negative_sampler.num_negs_per_pos 68.0 +882 10 training.batch_size 0.0 +882 11 model.embedding_dim 1.0 +882 11 model.scoring_fct_norm 1.0 +882 11 loss.margin 20.022340692857313 +882 11 loss.adversarial_temperature 0.9260543340634068 +882 11 regularizer.weight 0.02814828053723568 +882 11 optimizer.lr 0.03588898329063567 +882 11 negative_sampler.num_negs_per_pos 86.0 +882 11 training.batch_size 0.0 +882 12 model.embedding_dim 0.0 +882 12 model.scoring_fct_norm 2.0 +882 12 loss.margin 7.1410748720526325 +882 12 loss.adversarial_temperature 0.12421772514283766 +882 12 regularizer.weight 0.21047187887714822 +882 12 optimizer.lr 0.002498273096108022 +882 12 negative_sampler.num_negs_per_pos 85.0 +882 12 training.batch_size 0.0 +882 13 model.embedding_dim 0.0 +882 13 model.scoring_fct_norm 2.0 +882 13 loss.margin 20.017782959798073 +882 13 loss.adversarial_temperature 0.5289374977681756 +882 13 regularizer.weight 0.013574076379655752 +882 13 optimizer.lr 0.010073751711721491 +882 13 negative_sampler.num_negs_per_pos 96.0 +882 13 training.batch_size 2.0 +882 14 model.embedding_dim 0.0 +882 14 model.scoring_fct_norm 2.0 +882 14 loss.margin 4.964656623032969 +882 14 loss.adversarial_temperature 0.9317111850348825 +882 14 regularizer.weight 0.03596057748664488 +882 14 optimizer.lr 0.010686599146092341 +882 14 negative_sampler.num_negs_per_pos 70.0 +882 14 training.batch_size 1.0 +882 15 model.embedding_dim 2.0 +882 15 model.scoring_fct_norm 2.0 +882 15 loss.margin 15.879077465680243 +882 15 loss.adversarial_temperature 0.6740638877215579 +882 15 regularizer.weight 0.02061433885507659 +882 15 optimizer.lr 0.03253890887776218 +882 15 negative_sampler.num_negs_per_pos 68.0 +882 15 training.batch_size 0.0 +882 16 model.embedding_dim 1.0 +882 16 model.scoring_fct_norm 2.0 +882 16 loss.margin 29.10417465625714 +882 16 loss.adversarial_temperature 0.6701030678576458 +882 16 regularizer.weight 0.030074107233963317 +882 16 optimizer.lr 0.002378339587299524 +882 16 negative_sampler.num_negs_per_pos 58.0 +882 16 training.batch_size 0.0 +882 17 model.embedding_dim 0.0 +882 17 model.scoring_fct_norm 2.0 +882 17 loss.margin 23.76195566471633 +882 17 loss.adversarial_temperature 0.814899860079266 +882 17 regularizer.weight 0.09749586793669744 +882 17 optimizer.lr 0.001362520292302337 +882 17 negative_sampler.num_negs_per_pos 95.0 +882 17 training.batch_size 2.0 +882 18 model.embedding_dim 0.0 +882 18 model.scoring_fct_norm 2.0 +882 18 loss.margin 22.930515199162002 +882 18 loss.adversarial_temperature 0.13773779704167888 +882 18 regularizer.weight 0.04055752587490976 +882 18 optimizer.lr 0.0011839489440652305 +882 18 negative_sampler.num_negs_per_pos 0.0 +882 18 training.batch_size 0.0 +882 19 model.embedding_dim 0.0 +882 19 model.scoring_fct_norm 1.0 +882 19 loss.margin 6.730044566529358 +882 19 loss.adversarial_temperature 0.12874574385956541 +882 19 regularizer.weight 0.12549666658912575 +882 19 optimizer.lr 0.007855196609604079 +882 19 negative_sampler.num_negs_per_pos 6.0 +882 19 training.batch_size 2.0 +882 20 model.embedding_dim 2.0 +882 20 model.scoring_fct_norm 1.0 +882 20 loss.margin 10.148536489842197 +882 20 loss.adversarial_temperature 0.8669993270077261 +882 20 regularizer.weight 0.045221932999924636 +882 20 optimizer.lr 0.00539386190943072 +882 20 negative_sampler.num_negs_per_pos 64.0 +882 20 training.batch_size 2.0 +882 21 model.embedding_dim 1.0 +882 21 model.scoring_fct_norm 2.0 +882 21 loss.margin 3.7650883092734766 +882 21 loss.adversarial_temperature 0.9456838164754796 +882 21 regularizer.weight 0.017729410540203716 +882 21 optimizer.lr 0.0022831239170665175 +882 21 negative_sampler.num_negs_per_pos 68.0 +882 21 training.batch_size 1.0 +882 22 model.embedding_dim 0.0 +882 22 model.scoring_fct_norm 1.0 +882 22 loss.margin 29.40182484424107 +882 22 loss.adversarial_temperature 0.21644656826856318 +882 22 regularizer.weight 0.021898674282493456 +882 22 optimizer.lr 0.033447336758230556 +882 22 negative_sampler.num_negs_per_pos 61.0 +882 22 training.batch_size 1.0 +882 23 model.embedding_dim 0.0 +882 23 model.scoring_fct_norm 1.0 +882 23 loss.margin 19.872733686058968 +882 23 loss.adversarial_temperature 0.680611390473247 +882 23 regularizer.weight 0.01791484856131736 +882 23 optimizer.lr 0.001364578672424319 +882 23 negative_sampler.num_negs_per_pos 8.0 +882 23 training.batch_size 0.0 +882 24 model.embedding_dim 2.0 +882 24 model.scoring_fct_norm 1.0 +882 24 loss.margin 8.102472022170701 +882 24 loss.adversarial_temperature 0.5911855183888423 +882 24 regularizer.weight 0.17650926753937562 +882 24 optimizer.lr 0.006836267275876053 +882 24 negative_sampler.num_negs_per_pos 78.0 +882 24 training.batch_size 0.0 +882 25 model.embedding_dim 0.0 +882 25 model.scoring_fct_norm 2.0 +882 25 loss.margin 1.6133664842524924 +882 25 loss.adversarial_temperature 0.20853515227043995 +882 25 regularizer.weight 0.012783046154449071 +882 25 optimizer.lr 0.008464254969534953 +882 25 negative_sampler.num_negs_per_pos 54.0 +882 25 training.batch_size 0.0 +882 26 model.embedding_dim 1.0 +882 26 model.scoring_fct_norm 1.0 +882 26 loss.margin 12.922091020064384 +882 26 loss.adversarial_temperature 0.17660579559893708 +882 26 regularizer.weight 0.02853381779298401 +882 26 optimizer.lr 0.002141059969559149 +882 26 negative_sampler.num_negs_per_pos 65.0 +882 26 training.batch_size 0.0 +882 27 model.embedding_dim 1.0 +882 27 model.scoring_fct_norm 2.0 +882 27 loss.margin 6.3374357032000885 +882 27 loss.adversarial_temperature 0.9668017147406771 +882 27 regularizer.weight 0.10772969167292766 +882 27 optimizer.lr 0.0030120843103600857 +882 27 negative_sampler.num_negs_per_pos 25.0 +882 27 training.batch_size 2.0 +882 28 model.embedding_dim 1.0 +882 28 model.scoring_fct_norm 2.0 +882 28 loss.margin 6.826699848234785 +882 28 loss.adversarial_temperature 0.5565786396229491 +882 28 regularizer.weight 0.02709310286778101 +882 28 optimizer.lr 0.008528293599524443 +882 28 negative_sampler.num_negs_per_pos 29.0 +882 28 training.batch_size 1.0 +882 29 model.embedding_dim 2.0 +882 29 model.scoring_fct_norm 1.0 +882 29 loss.margin 14.703043921634578 +882 29 loss.adversarial_temperature 0.24907287210973691 +882 29 regularizer.weight 0.034492644836942345 +882 29 optimizer.lr 0.0024831136437182353 +882 29 negative_sampler.num_negs_per_pos 62.0 +882 29 training.batch_size 1.0 +882 30 model.embedding_dim 2.0 +882 30 model.scoring_fct_norm 1.0 +882 30 loss.margin 4.541277331231166 +882 30 loss.adversarial_temperature 0.24542948140404025 +882 30 regularizer.weight 0.056392464404420424 +882 30 optimizer.lr 0.045011265767267924 +882 30 negative_sampler.num_negs_per_pos 0.0 +882 30 training.batch_size 2.0 +882 31 model.embedding_dim 2.0 +882 31 model.scoring_fct_norm 2.0 +882 31 loss.margin 25.69299589381542 +882 31 loss.adversarial_temperature 0.3703137192337683 +882 31 regularizer.weight 0.23162563834201844 +882 31 optimizer.lr 0.0011500908450585783 +882 31 negative_sampler.num_negs_per_pos 18.0 +882 31 training.batch_size 0.0 +882 32 model.embedding_dim 1.0 +882 32 model.scoring_fct_norm 2.0 +882 32 loss.margin 10.530882852996827 +882 32 loss.adversarial_temperature 0.931995968491365 +882 32 regularizer.weight 0.01672308666542724 +882 32 optimizer.lr 0.08803885892751102 +882 32 negative_sampler.num_negs_per_pos 0.0 +882 32 training.batch_size 1.0 +882 33 model.embedding_dim 2.0 +882 33 model.scoring_fct_norm 2.0 +882 33 loss.margin 20.55897782085674 +882 33 loss.adversarial_temperature 0.7585201288915853 +882 33 regularizer.weight 0.021199093860948467 +882 33 optimizer.lr 0.017833300151540398 +882 33 negative_sampler.num_negs_per_pos 90.0 +882 33 training.batch_size 2.0 +882 34 model.embedding_dim 2.0 +882 34 model.scoring_fct_norm 2.0 +882 34 loss.margin 12.410721916586592 +882 34 loss.adversarial_temperature 0.7786142062274984 +882 34 regularizer.weight 0.020426782357259882 +882 34 optimizer.lr 0.0032792014149176994 +882 34 negative_sampler.num_negs_per_pos 74.0 +882 34 training.batch_size 1.0 +882 35 model.embedding_dim 0.0 +882 35 model.scoring_fct_norm 1.0 +882 35 loss.margin 28.600910723206532 +882 35 loss.adversarial_temperature 0.6667135453664301 +882 35 regularizer.weight 0.23707853844965573 +882 35 optimizer.lr 0.07524199974751478 +882 35 negative_sampler.num_negs_per_pos 59.0 +882 35 training.batch_size 0.0 +882 36 model.embedding_dim 2.0 +882 36 model.scoring_fct_norm 2.0 +882 36 loss.margin 8.164538131199894 +882 36 loss.adversarial_temperature 0.6039730709380431 +882 36 regularizer.weight 0.0823212254315391 +882 36 optimizer.lr 0.09097499222515017 +882 36 negative_sampler.num_negs_per_pos 85.0 +882 36 training.batch_size 2.0 +882 37 model.embedding_dim 1.0 +882 37 model.scoring_fct_norm 1.0 +882 37 loss.margin 16.50708695556932 +882 37 loss.adversarial_temperature 0.9652562169242759 +882 37 regularizer.weight 0.0867058550203261 +882 37 optimizer.lr 0.020753699991376457 +882 37 negative_sampler.num_negs_per_pos 95.0 +882 37 training.batch_size 2.0 +882 38 model.embedding_dim 1.0 +882 38 model.scoring_fct_norm 2.0 +882 38 loss.margin 4.463625028572624 +882 38 loss.adversarial_temperature 0.966299128351274 +882 38 regularizer.weight 0.010823930052470523 +882 38 optimizer.lr 0.0011882551128773614 +882 38 negative_sampler.num_negs_per_pos 7.0 +882 38 training.batch_size 2.0 +882 39 model.embedding_dim 0.0 +882 39 model.scoring_fct_norm 1.0 +882 39 loss.margin 16.006923013637575 +882 39 loss.adversarial_temperature 0.9721025091949465 +882 39 regularizer.weight 0.019893359995802957 +882 39 optimizer.lr 0.05507665015801305 +882 39 negative_sampler.num_negs_per_pos 4.0 +882 39 training.batch_size 1.0 +882 40 model.embedding_dim 0.0 +882 40 model.scoring_fct_norm 1.0 +882 40 loss.margin 9.117354394879522 +882 40 loss.adversarial_temperature 0.8692076073290304 +882 40 regularizer.weight 0.012171746688271135 +882 40 optimizer.lr 0.005363108580144186 +882 40 negative_sampler.num_negs_per_pos 33.0 +882 40 training.batch_size 1.0 +882 41 model.embedding_dim 1.0 +882 41 model.scoring_fct_norm 1.0 +882 41 loss.margin 6.180592074526407 +882 41 loss.adversarial_temperature 0.5593855592190499 +882 41 regularizer.weight 0.03481560538114764 +882 41 optimizer.lr 0.0010096565552741815 +882 41 negative_sampler.num_negs_per_pos 81.0 +882 41 training.batch_size 0.0 +882 42 model.embedding_dim 1.0 +882 42 model.scoring_fct_norm 2.0 +882 42 loss.margin 19.51629784955086 +882 42 loss.adversarial_temperature 0.4112691831035987 +882 42 regularizer.weight 0.04152079687897227 +882 42 optimizer.lr 0.011719050231689418 +882 42 negative_sampler.num_negs_per_pos 34.0 +882 42 training.batch_size 0.0 +882 43 model.embedding_dim 0.0 +882 43 model.scoring_fct_norm 2.0 +882 43 loss.margin 15.935752989921845 +882 43 loss.adversarial_temperature 0.27832675080679536 +882 43 regularizer.weight 0.10029571068600844 +882 43 optimizer.lr 0.011475729718427361 +882 43 negative_sampler.num_negs_per_pos 30.0 +882 43 training.batch_size 1.0 +882 44 model.embedding_dim 2.0 +882 44 model.scoring_fct_norm 1.0 +882 44 loss.margin 26.545904874056294 +882 44 loss.adversarial_temperature 0.5379767833504571 +882 44 regularizer.weight 0.03307915826712608 +882 44 optimizer.lr 0.05051498717097444 +882 44 negative_sampler.num_negs_per_pos 2.0 +882 44 training.batch_size 2.0 +882 45 model.embedding_dim 2.0 +882 45 model.scoring_fct_norm 1.0 +882 45 loss.margin 19.883723789742113 +882 45 loss.adversarial_temperature 0.9290956561942858 +882 45 regularizer.weight 0.29337056157278824 +882 45 optimizer.lr 0.012391987401841782 +882 45 negative_sampler.num_negs_per_pos 41.0 +882 45 training.batch_size 0.0 +882 46 model.embedding_dim 1.0 +882 46 model.scoring_fct_norm 1.0 +882 46 loss.margin 3.8422800393082936 +882 46 loss.adversarial_temperature 0.34732591739074214 +882 46 regularizer.weight 0.16577050608179972 +882 46 optimizer.lr 0.014243684066571094 +882 46 negative_sampler.num_negs_per_pos 89.0 +882 46 training.batch_size 1.0 +882 47 model.embedding_dim 1.0 +882 47 model.scoring_fct_norm 2.0 +882 47 loss.margin 17.473024077866675 +882 47 loss.adversarial_temperature 0.5907568368452167 +882 47 regularizer.weight 0.21434252507405313 +882 47 optimizer.lr 0.010967489209155825 +882 47 negative_sampler.num_negs_per_pos 50.0 +882 47 training.batch_size 1.0 +882 48 model.embedding_dim 1.0 +882 48 model.scoring_fct_norm 1.0 +882 48 loss.margin 3.0914657119268223 +882 48 loss.adversarial_temperature 0.1775168926609218 +882 48 regularizer.weight 0.0735179405117633 +882 48 optimizer.lr 0.0023021734009481976 +882 48 negative_sampler.num_negs_per_pos 62.0 +882 48 training.batch_size 1.0 +882 49 model.embedding_dim 1.0 +882 49 model.scoring_fct_norm 1.0 +882 49 loss.margin 5.505433221523593 +882 49 loss.adversarial_temperature 0.6083061073774392 +882 49 regularizer.weight 0.06838869925115142 +882 49 optimizer.lr 0.012399448289973972 +882 49 negative_sampler.num_negs_per_pos 74.0 +882 49 training.batch_size 2.0 +882 50 model.embedding_dim 1.0 +882 50 model.scoring_fct_norm 1.0 +882 50 loss.margin 14.309386846587447 +882 50 loss.adversarial_temperature 0.7616850607021814 +882 50 regularizer.weight 0.04896014135229339 +882 50 optimizer.lr 0.009435326347234365 +882 50 negative_sampler.num_negs_per_pos 64.0 +882 50 training.batch_size 2.0 +882 51 model.embedding_dim 2.0 +882 51 model.scoring_fct_norm 2.0 +882 51 loss.margin 26.638960329007915 +882 51 loss.adversarial_temperature 0.948140941479433 +882 51 regularizer.weight 0.08834514977721292 +882 51 optimizer.lr 0.01946675088015944 +882 51 negative_sampler.num_negs_per_pos 38.0 +882 51 training.batch_size 2.0 +882 52 model.embedding_dim 2.0 +882 52 model.scoring_fct_norm 2.0 +882 52 loss.margin 17.276444808732816 +882 52 loss.adversarial_temperature 0.7855047472712444 +882 52 regularizer.weight 0.031612693618756134 +882 52 optimizer.lr 0.05832908102641968 +882 52 negative_sampler.num_negs_per_pos 30.0 +882 52 training.batch_size 0.0 +882 53 model.embedding_dim 0.0 +882 53 model.scoring_fct_norm 1.0 +882 53 loss.margin 15.441129214209754 +882 53 loss.adversarial_temperature 0.5766094764558142 +882 53 regularizer.weight 0.025363451009112484 +882 53 optimizer.lr 0.004130693429395788 +882 53 negative_sampler.num_negs_per_pos 58.0 +882 53 training.batch_size 2.0 +882 54 model.embedding_dim 1.0 +882 54 model.scoring_fct_norm 2.0 +882 54 loss.margin 4.33236397842912 +882 54 loss.adversarial_temperature 0.8907531264188442 +882 54 regularizer.weight 0.15074877886251545 +882 54 optimizer.lr 0.04946439853471789 +882 54 negative_sampler.num_negs_per_pos 32.0 +882 54 training.batch_size 1.0 +882 55 model.embedding_dim 2.0 +882 55 model.scoring_fct_norm 2.0 +882 55 loss.margin 19.160865162247344 +882 55 loss.adversarial_temperature 0.18880071044609148 +882 55 regularizer.weight 0.04128650399354523 +882 55 optimizer.lr 0.005495465264819957 +882 55 negative_sampler.num_negs_per_pos 74.0 +882 55 training.batch_size 0.0 +882 56 model.embedding_dim 2.0 +882 56 model.scoring_fct_norm 1.0 +882 56 loss.margin 7.356777502303937 +882 56 loss.adversarial_temperature 0.8529547628204396 +882 56 regularizer.weight 0.0925991952860135 +882 56 optimizer.lr 0.08240996689804748 +882 56 negative_sampler.num_negs_per_pos 12.0 +882 56 training.batch_size 2.0 +882 57 model.embedding_dim 1.0 +882 57 model.scoring_fct_norm 2.0 +882 57 loss.margin 18.58479601917995 +882 57 loss.adversarial_temperature 0.37507820934686564 +882 57 regularizer.weight 0.012697534228867643 +882 57 optimizer.lr 0.0014180111370448083 +882 57 negative_sampler.num_negs_per_pos 57.0 +882 57 training.batch_size 0.0 +882 58 model.embedding_dim 0.0 +882 58 model.scoring_fct_norm 1.0 +882 58 loss.margin 27.076608508289546 +882 58 loss.adversarial_temperature 0.33399780181892175 +882 58 regularizer.weight 0.20645293309637772 +882 58 optimizer.lr 0.007290261634277331 +882 58 negative_sampler.num_negs_per_pos 90.0 +882 58 training.batch_size 0.0 +882 59 model.embedding_dim 2.0 +882 59 model.scoring_fct_norm 2.0 +882 59 loss.margin 8.005135197488647 +882 59 loss.adversarial_temperature 0.7571427887953388 +882 59 regularizer.weight 0.012086573942489123 +882 59 optimizer.lr 0.002677144858832694 +882 59 negative_sampler.num_negs_per_pos 0.0 +882 59 training.batch_size 1.0 +882 60 model.embedding_dim 0.0 +882 60 model.scoring_fct_norm 2.0 +882 60 loss.margin 18.19263920370929 +882 60 loss.adversarial_temperature 0.6054181221731316 +882 60 regularizer.weight 0.1024831349700286 +882 60 optimizer.lr 0.0035453224633437983 +882 60 negative_sampler.num_negs_per_pos 26.0 +882 60 training.batch_size 0.0 +882 61 model.embedding_dim 1.0 +882 61 model.scoring_fct_norm 2.0 +882 61 loss.margin 13.850372312700284 +882 61 loss.adversarial_temperature 0.5638486571795989 +882 61 regularizer.weight 0.18081729641137065 +882 61 optimizer.lr 0.03200943578021978 +882 61 negative_sampler.num_negs_per_pos 80.0 +882 61 training.batch_size 1.0 +882 62 model.embedding_dim 2.0 +882 62 model.scoring_fct_norm 2.0 +882 62 loss.margin 10.53481782059362 +882 62 loss.adversarial_temperature 0.2140397385988328 +882 62 regularizer.weight 0.013010398427904217 +882 62 optimizer.lr 0.005229308774792516 +882 62 negative_sampler.num_negs_per_pos 19.0 +882 62 training.batch_size 0.0 +882 63 model.embedding_dim 2.0 +882 63 model.scoring_fct_norm 1.0 +882 63 loss.margin 29.378866249532948 +882 63 loss.adversarial_temperature 0.44576697807770527 +882 63 regularizer.weight 0.015887834108801936 +882 63 optimizer.lr 0.04803361474408774 +882 63 negative_sampler.num_negs_per_pos 20.0 +882 63 training.batch_size 2.0 +882 64 model.embedding_dim 1.0 +882 64 model.scoring_fct_norm 2.0 +882 64 loss.margin 17.137557084086634 +882 64 loss.adversarial_temperature 0.3817595315455271 +882 64 regularizer.weight 0.17144701287149955 +882 64 optimizer.lr 0.016569393293274226 +882 64 negative_sampler.num_negs_per_pos 75.0 +882 64 training.batch_size 1.0 +882 65 model.embedding_dim 2.0 +882 65 model.scoring_fct_norm 2.0 +882 65 loss.margin 2.5398928024106304 +882 65 loss.adversarial_temperature 0.5184148507096512 +882 65 regularizer.weight 0.20827630748930936 +882 65 optimizer.lr 0.0027199827909993946 +882 65 negative_sampler.num_negs_per_pos 19.0 +882 65 training.batch_size 2.0 +882 66 model.embedding_dim 1.0 +882 66 model.scoring_fct_norm 2.0 +882 66 loss.margin 5.158163415460062 +882 66 loss.adversarial_temperature 0.6991368404842385 +882 66 regularizer.weight 0.03929017786670562 +882 66 optimizer.lr 0.03302657839641834 +882 66 negative_sampler.num_negs_per_pos 94.0 +882 66 training.batch_size 2.0 +882 67 model.embedding_dim 2.0 +882 67 model.scoring_fct_norm 2.0 +882 67 loss.margin 9.094900905799175 +882 67 loss.adversarial_temperature 0.25439961038126413 +882 67 regularizer.weight 0.0642593640434103 +882 67 optimizer.lr 0.009479046705325714 +882 67 negative_sampler.num_negs_per_pos 86.0 +882 67 training.batch_size 1.0 +882 68 model.embedding_dim 2.0 +882 68 model.scoring_fct_norm 2.0 +882 68 loss.margin 7.652520105078982 +882 68 loss.adversarial_temperature 0.19715329805529103 +882 68 regularizer.weight 0.014645494340160773 +882 68 optimizer.lr 0.0028997849813249413 +882 68 negative_sampler.num_negs_per_pos 99.0 +882 68 training.batch_size 0.0 +882 69 model.embedding_dim 2.0 +882 69 model.scoring_fct_norm 1.0 +882 69 loss.margin 21.22024624575269 +882 69 loss.adversarial_temperature 0.20317934120436004 +882 69 regularizer.weight 0.025791610951743857 +882 69 optimizer.lr 0.024683652406834095 +882 69 negative_sampler.num_negs_per_pos 70.0 +882 69 training.batch_size 2.0 +882 70 model.embedding_dim 1.0 +882 70 model.scoring_fct_norm 2.0 +882 70 loss.margin 19.628752103615 +882 70 loss.adversarial_temperature 0.8348117870848627 +882 70 regularizer.weight 0.07687000167008694 +882 70 optimizer.lr 0.0012721481270354551 +882 70 negative_sampler.num_negs_per_pos 10.0 +882 70 training.batch_size 0.0 +882 71 model.embedding_dim 2.0 +882 71 model.scoring_fct_norm 2.0 +882 71 loss.margin 26.784223681880245 +882 71 loss.adversarial_temperature 0.6667364328280934 +882 71 regularizer.weight 0.014011329913879711 +882 71 optimizer.lr 0.006547253950534114 +882 71 negative_sampler.num_negs_per_pos 61.0 +882 71 training.batch_size 2.0 +882 72 model.embedding_dim 2.0 +882 72 model.scoring_fct_norm 2.0 +882 72 loss.margin 5.629268510136637 +882 72 loss.adversarial_temperature 0.9738298628081992 +882 72 regularizer.weight 0.08274515955018011 +882 72 optimizer.lr 0.0032784134223010444 +882 72 negative_sampler.num_negs_per_pos 11.0 +882 72 training.batch_size 1.0 +882 73 model.embedding_dim 0.0 +882 73 model.scoring_fct_norm 2.0 +882 73 loss.margin 6.1818100038309876 +882 73 loss.adversarial_temperature 0.6096417755177586 +882 73 regularizer.weight 0.025116671825819725 +882 73 optimizer.lr 0.0045824139973737864 +882 73 negative_sampler.num_negs_per_pos 41.0 +882 73 training.batch_size 1.0 +882 74 model.embedding_dim 2.0 +882 74 model.scoring_fct_norm 2.0 +882 74 loss.margin 16.660399482728348 +882 74 loss.adversarial_temperature 0.3165945580689825 +882 74 regularizer.weight 0.05009014710344538 +882 74 optimizer.lr 0.0011146154280825846 +882 74 negative_sampler.num_negs_per_pos 71.0 +882 74 training.batch_size 0.0 +882 75 model.embedding_dim 1.0 +882 75 model.scoring_fct_norm 1.0 +882 75 loss.margin 6.42486262876123 +882 75 loss.adversarial_temperature 0.40507416213201186 +882 75 regularizer.weight 0.017360497431949545 +882 75 optimizer.lr 0.08693012895796227 +882 75 negative_sampler.num_negs_per_pos 36.0 +882 75 training.batch_size 2.0 +882 76 model.embedding_dim 1.0 +882 76 model.scoring_fct_norm 2.0 +882 76 loss.margin 17.570085842291405 +882 76 loss.adversarial_temperature 0.29413128260724203 +882 76 regularizer.weight 0.1500170039267493 +882 76 optimizer.lr 0.041394223056410884 +882 76 negative_sampler.num_negs_per_pos 9.0 +882 76 training.batch_size 2.0 +882 77 model.embedding_dim 0.0 +882 77 model.scoring_fct_norm 1.0 +882 77 loss.margin 11.020630963724479 +882 77 loss.adversarial_temperature 0.5273349850500847 +882 77 regularizer.weight 0.09404789122429641 +882 77 optimizer.lr 0.02612232668707993 +882 77 negative_sampler.num_negs_per_pos 40.0 +882 77 training.batch_size 1.0 +882 78 model.embedding_dim 1.0 +882 78 model.scoring_fct_norm 1.0 +882 78 loss.margin 1.8680442817799747 +882 78 loss.adversarial_temperature 0.554366475983839 +882 78 regularizer.weight 0.1303585024493409 +882 78 optimizer.lr 0.014686097201309957 +882 78 negative_sampler.num_negs_per_pos 32.0 +882 78 training.batch_size 0.0 +882 79 model.embedding_dim 1.0 +882 79 model.scoring_fct_norm 2.0 +882 79 loss.margin 4.640874353036556 +882 79 loss.adversarial_temperature 0.5941055517709881 +882 79 regularizer.weight 0.012703347428230637 +882 79 optimizer.lr 0.018286095461309382 +882 79 negative_sampler.num_negs_per_pos 12.0 +882 79 training.batch_size 1.0 +882 80 model.embedding_dim 1.0 +882 80 model.scoring_fct_norm 2.0 +882 80 loss.margin 17.646502312600465 +882 80 loss.adversarial_temperature 0.11694260133345122 +882 80 regularizer.weight 0.021890046603561145 +882 80 optimizer.lr 0.07553114972132904 +882 80 negative_sampler.num_negs_per_pos 48.0 +882 80 training.batch_size 0.0 +882 81 model.embedding_dim 2.0 +882 81 model.scoring_fct_norm 1.0 +882 81 loss.margin 29.73207241663892 +882 81 loss.adversarial_temperature 0.12197557268085932 +882 81 regularizer.weight 0.12103472752149295 +882 81 optimizer.lr 0.010551388993259195 +882 81 negative_sampler.num_negs_per_pos 18.0 +882 81 training.batch_size 2.0 +882 82 model.embedding_dim 2.0 +882 82 model.scoring_fct_norm 2.0 +882 82 loss.margin 13.734571732437713 +882 82 loss.adversarial_temperature 0.5861908550239102 +882 82 regularizer.weight 0.015141538479730187 +882 82 optimizer.lr 0.0051243256998801225 +882 82 negative_sampler.num_negs_per_pos 57.0 +882 82 training.batch_size 1.0 +882 83 model.embedding_dim 1.0 +882 83 model.scoring_fct_norm 2.0 +882 83 loss.margin 10.793123248607188 +882 83 loss.adversarial_temperature 0.9256916256775927 +882 83 regularizer.weight 0.15194761154735031 +882 83 optimizer.lr 0.003789857025329718 +882 83 negative_sampler.num_negs_per_pos 63.0 +882 83 training.batch_size 1.0 +882 84 model.embedding_dim 1.0 +882 84 model.scoring_fct_norm 1.0 +882 84 loss.margin 10.270910281768654 +882 84 loss.adversarial_temperature 0.33022993657661975 +882 84 regularizer.weight 0.020774636260225292 +882 84 optimizer.lr 0.03233564986835656 +882 84 negative_sampler.num_negs_per_pos 31.0 +882 84 training.batch_size 2.0 +882 85 model.embedding_dim 2.0 +882 85 model.scoring_fct_norm 2.0 +882 85 loss.margin 9.107388315891733 +882 85 loss.adversarial_temperature 0.3154213285715788 +882 85 regularizer.weight 0.07278047324791821 +882 85 optimizer.lr 0.003964918015409163 +882 85 negative_sampler.num_negs_per_pos 82.0 +882 85 training.batch_size 1.0 +882 86 model.embedding_dim 1.0 +882 86 model.scoring_fct_norm 2.0 +882 86 loss.margin 15.311135850562614 +882 86 loss.adversarial_temperature 0.18917668698131573 +882 86 regularizer.weight 0.2515998229416368 +882 86 optimizer.lr 0.006703704357281508 +882 86 negative_sampler.num_negs_per_pos 38.0 +882 86 training.batch_size 1.0 +882 87 model.embedding_dim 0.0 +882 87 model.scoring_fct_norm 2.0 +882 87 loss.margin 27.646590418178832 +882 87 loss.adversarial_temperature 0.6520417169489461 +882 87 regularizer.weight 0.21806164633560554 +882 87 optimizer.lr 0.011837743287467745 +882 87 negative_sampler.num_negs_per_pos 3.0 +882 87 training.batch_size 2.0 +882 88 model.embedding_dim 1.0 +882 88 model.scoring_fct_norm 1.0 +882 88 loss.margin 6.20069679204623 +882 88 loss.adversarial_temperature 0.526436567668126 +882 88 regularizer.weight 0.04021184522590329 +882 88 optimizer.lr 0.0013863334549147491 +882 88 negative_sampler.num_negs_per_pos 2.0 +882 88 training.batch_size 2.0 +882 89 model.embedding_dim 1.0 +882 89 model.scoring_fct_norm 1.0 +882 89 loss.margin 10.76801661608451 +882 89 loss.adversarial_temperature 0.4449936801850033 +882 89 regularizer.weight 0.09191715416710897 +882 89 optimizer.lr 0.004420299888600428 +882 89 negative_sampler.num_negs_per_pos 74.0 +882 89 training.batch_size 0.0 +882 90 model.embedding_dim 0.0 +882 90 model.scoring_fct_norm 2.0 +882 90 loss.margin 27.56863683038852 +882 90 loss.adversarial_temperature 0.6046131885750196 +882 90 regularizer.weight 0.04043023380350327 +882 90 optimizer.lr 0.00685846689592413 +882 90 negative_sampler.num_negs_per_pos 27.0 +882 90 training.batch_size 1.0 +882 91 model.embedding_dim 2.0 +882 91 model.scoring_fct_norm 1.0 +882 91 loss.margin 23.83999719343506 +882 91 loss.adversarial_temperature 0.20945347716151524 +882 91 regularizer.weight 0.07547672447719304 +882 91 optimizer.lr 0.002534584789976713 +882 91 negative_sampler.num_negs_per_pos 63.0 +882 91 training.batch_size 1.0 +882 92 model.embedding_dim 0.0 +882 92 model.scoring_fct_norm 1.0 +882 92 loss.margin 27.191148268612828 +882 92 loss.adversarial_temperature 0.4893904377917595 +882 92 regularizer.weight 0.12898892157314248 +882 92 optimizer.lr 0.026388169848164673 +882 92 negative_sampler.num_negs_per_pos 57.0 +882 92 training.batch_size 0.0 +882 93 model.embedding_dim 2.0 +882 93 model.scoring_fct_norm 2.0 +882 93 loss.margin 20.274928637218167 +882 93 loss.adversarial_temperature 0.8666197651472873 +882 93 regularizer.weight 0.1259601598831654 +882 93 optimizer.lr 0.0076125490809551615 +882 93 negative_sampler.num_negs_per_pos 86.0 +882 93 training.batch_size 0.0 +882 94 model.embedding_dim 0.0 +882 94 model.scoring_fct_norm 2.0 +882 94 loss.margin 8.215066087939809 +882 94 loss.adversarial_temperature 0.14794680883547706 +882 94 regularizer.weight 0.07969579269155458 +882 94 optimizer.lr 0.03857626257315041 +882 94 negative_sampler.num_negs_per_pos 89.0 +882 94 training.batch_size 2.0 +882 95 model.embedding_dim 0.0 +882 95 model.scoring_fct_norm 2.0 +882 95 loss.margin 1.235583384843332 +882 95 loss.adversarial_temperature 0.21637950074989848 +882 95 regularizer.weight 0.01873885111159162 +882 95 optimizer.lr 0.008760774693384535 +882 95 negative_sampler.num_negs_per_pos 20.0 +882 95 training.batch_size 1.0 +882 96 model.embedding_dim 1.0 +882 96 model.scoring_fct_norm 1.0 +882 96 loss.margin 25.012216353535848 +882 96 loss.adversarial_temperature 0.8963306073300754 +882 96 regularizer.weight 0.0999508246645951 +882 96 optimizer.lr 0.006546821247675138 +882 96 negative_sampler.num_negs_per_pos 43.0 +882 96 training.batch_size 2.0 +882 97 model.embedding_dim 0.0 +882 97 model.scoring_fct_norm 2.0 +882 97 loss.margin 20.694969986867214 +882 97 loss.adversarial_temperature 0.26457495742006837 +882 97 regularizer.weight 0.07765576934867872 +882 97 optimizer.lr 0.008926174354458248 +882 97 negative_sampler.num_negs_per_pos 95.0 +882 97 training.batch_size 2.0 +882 98 model.embedding_dim 0.0 +882 98 model.scoring_fct_norm 1.0 +882 98 loss.margin 28.90829959884376 +882 98 loss.adversarial_temperature 0.8911427087480747 +882 98 regularizer.weight 0.011277896682030681 +882 98 optimizer.lr 0.03918162946362746 +882 98 negative_sampler.num_negs_per_pos 92.0 +882 98 training.batch_size 1.0 +882 99 model.embedding_dim 0.0 +882 99 model.scoring_fct_norm 1.0 +882 99 loss.margin 22.845291237300575 +882 99 loss.adversarial_temperature 0.5511524191923823 +882 99 regularizer.weight 0.09367749811195694 +882 99 optimizer.lr 0.013772858963866277 +882 99 negative_sampler.num_negs_per_pos 45.0 +882 99 training.batch_size 2.0 +882 100 model.embedding_dim 1.0 +882 100 model.scoring_fct_norm 2.0 +882 100 loss.margin 21.03640701665088 +882 100 loss.adversarial_temperature 0.1640524528008184 +882 100 regularizer.weight 0.14563906869281257 +882 100 optimizer.lr 0.0018596417019071938 +882 100 negative_sampler.num_negs_per_pos 26.0 +882 100 training.batch_size 0.0 +882 1 dataset """kinships""" +882 1 model """transh""" +882 1 loss """nssa""" +882 1 regularizer """transh""" +882 1 optimizer """adam""" +882 1 training_loop """owa""" +882 1 negative_sampler """basic""" +882 1 evaluator """rankbased""" +882 2 dataset """kinships""" +882 2 model """transh""" +882 2 loss """nssa""" +882 2 regularizer """transh""" +882 2 optimizer """adam""" +882 2 training_loop """owa""" +882 2 negative_sampler """basic""" +882 2 evaluator """rankbased""" +882 3 dataset """kinships""" +882 3 model """transh""" +882 3 loss """nssa""" +882 3 regularizer """transh""" +882 3 optimizer """adam""" +882 3 training_loop """owa""" +882 3 negative_sampler """basic""" +882 3 evaluator """rankbased""" +882 4 dataset """kinships""" +882 4 model """transh""" +882 4 loss """nssa""" +882 4 regularizer """transh""" +882 4 optimizer """adam""" +882 4 training_loop """owa""" +882 4 negative_sampler """basic""" +882 4 evaluator """rankbased""" +882 5 dataset """kinships""" +882 5 model """transh""" +882 5 loss """nssa""" +882 5 regularizer """transh""" +882 5 optimizer """adam""" +882 5 training_loop """owa""" +882 5 negative_sampler """basic""" +882 5 evaluator """rankbased""" +882 6 dataset """kinships""" +882 6 model """transh""" +882 6 loss """nssa""" +882 6 regularizer """transh""" +882 6 optimizer """adam""" +882 6 training_loop """owa""" +882 6 negative_sampler """basic""" +882 6 evaluator """rankbased""" +882 7 dataset """kinships""" +882 7 model """transh""" +882 7 loss """nssa""" +882 7 regularizer """transh""" +882 7 optimizer """adam""" +882 7 training_loop """owa""" +882 7 negative_sampler """basic""" +882 7 evaluator """rankbased""" +882 8 dataset """kinships""" +882 8 model """transh""" +882 8 loss """nssa""" +882 8 regularizer """transh""" +882 8 optimizer """adam""" +882 8 training_loop """owa""" +882 8 negative_sampler """basic""" +882 8 evaluator """rankbased""" +882 9 dataset """kinships""" +882 9 model """transh""" +882 9 loss """nssa""" +882 9 regularizer """transh""" +882 9 optimizer """adam""" +882 9 training_loop """owa""" +882 9 negative_sampler """basic""" +882 9 evaluator """rankbased""" +882 10 dataset """kinships""" +882 10 model """transh""" +882 10 loss """nssa""" +882 10 regularizer """transh""" +882 10 optimizer """adam""" +882 10 training_loop """owa""" +882 10 negative_sampler """basic""" +882 10 evaluator """rankbased""" +882 11 dataset """kinships""" +882 11 model """transh""" +882 11 loss """nssa""" +882 11 regularizer """transh""" +882 11 optimizer """adam""" +882 11 training_loop """owa""" +882 11 negative_sampler """basic""" +882 11 evaluator """rankbased""" +882 12 dataset """kinships""" +882 12 model """transh""" +882 12 loss """nssa""" +882 12 regularizer """transh""" +882 12 optimizer """adam""" +882 12 training_loop """owa""" +882 12 negative_sampler """basic""" +882 12 evaluator """rankbased""" +882 13 dataset """kinships""" +882 13 model """transh""" +882 13 loss """nssa""" +882 13 regularizer """transh""" +882 13 optimizer """adam""" +882 13 training_loop """owa""" +882 13 negative_sampler """basic""" +882 13 evaluator """rankbased""" +882 14 dataset """kinships""" +882 14 model """transh""" +882 14 loss """nssa""" +882 14 regularizer """transh""" +882 14 optimizer """adam""" +882 14 training_loop """owa""" +882 14 negative_sampler """basic""" +882 14 evaluator """rankbased""" +882 15 dataset """kinships""" +882 15 model """transh""" +882 15 loss """nssa""" +882 15 regularizer """transh""" +882 15 optimizer """adam""" +882 15 training_loop """owa""" +882 15 negative_sampler """basic""" +882 15 evaluator """rankbased""" +882 16 dataset """kinships""" +882 16 model """transh""" +882 16 loss """nssa""" +882 16 regularizer """transh""" +882 16 optimizer """adam""" +882 16 training_loop """owa""" +882 16 negative_sampler """basic""" +882 16 evaluator """rankbased""" +882 17 dataset """kinships""" +882 17 model """transh""" +882 17 loss """nssa""" +882 17 regularizer """transh""" +882 17 optimizer """adam""" +882 17 training_loop """owa""" +882 17 negative_sampler """basic""" +882 17 evaluator """rankbased""" +882 18 dataset """kinships""" +882 18 model """transh""" +882 18 loss """nssa""" +882 18 regularizer """transh""" +882 18 optimizer """adam""" +882 18 training_loop """owa""" +882 18 negative_sampler """basic""" +882 18 evaluator """rankbased""" +882 19 dataset """kinships""" +882 19 model """transh""" +882 19 loss """nssa""" +882 19 regularizer """transh""" +882 19 optimizer """adam""" +882 19 training_loop """owa""" +882 19 negative_sampler """basic""" +882 19 evaluator """rankbased""" +882 20 dataset """kinships""" +882 20 model """transh""" +882 20 loss """nssa""" +882 20 regularizer """transh""" +882 20 optimizer """adam""" +882 20 training_loop """owa""" +882 20 negative_sampler """basic""" +882 20 evaluator """rankbased""" +882 21 dataset """kinships""" +882 21 model """transh""" +882 21 loss """nssa""" +882 21 regularizer """transh""" +882 21 optimizer """adam""" +882 21 training_loop """owa""" +882 21 negative_sampler """basic""" +882 21 evaluator """rankbased""" +882 22 dataset """kinships""" +882 22 model """transh""" +882 22 loss """nssa""" +882 22 regularizer """transh""" +882 22 optimizer """adam""" +882 22 training_loop """owa""" +882 22 negative_sampler """basic""" +882 22 evaluator """rankbased""" +882 23 dataset """kinships""" +882 23 model """transh""" +882 23 loss """nssa""" +882 23 regularizer """transh""" +882 23 optimizer """adam""" +882 23 training_loop """owa""" +882 23 negative_sampler """basic""" +882 23 evaluator """rankbased""" +882 24 dataset """kinships""" +882 24 model """transh""" +882 24 loss """nssa""" +882 24 regularizer """transh""" +882 24 optimizer """adam""" +882 24 training_loop """owa""" +882 24 negative_sampler """basic""" +882 24 evaluator """rankbased""" +882 25 dataset """kinships""" +882 25 model """transh""" +882 25 loss """nssa""" +882 25 regularizer """transh""" +882 25 optimizer """adam""" +882 25 training_loop """owa""" +882 25 negative_sampler """basic""" +882 25 evaluator """rankbased""" +882 26 dataset """kinships""" +882 26 model """transh""" +882 26 loss """nssa""" +882 26 regularizer """transh""" +882 26 optimizer """adam""" +882 26 training_loop """owa""" +882 26 negative_sampler """basic""" +882 26 evaluator """rankbased""" +882 27 dataset """kinships""" +882 27 model """transh""" +882 27 loss """nssa""" +882 27 regularizer """transh""" +882 27 optimizer """adam""" +882 27 training_loop """owa""" +882 27 negative_sampler """basic""" +882 27 evaluator """rankbased""" +882 28 dataset """kinships""" +882 28 model """transh""" +882 28 loss """nssa""" +882 28 regularizer """transh""" +882 28 optimizer """adam""" +882 28 training_loop """owa""" +882 28 negative_sampler """basic""" +882 28 evaluator """rankbased""" +882 29 dataset """kinships""" +882 29 model """transh""" +882 29 loss """nssa""" +882 29 regularizer """transh""" +882 29 optimizer """adam""" +882 29 training_loop """owa""" +882 29 negative_sampler """basic""" +882 29 evaluator """rankbased""" +882 30 dataset """kinships""" +882 30 model """transh""" +882 30 loss """nssa""" +882 30 regularizer """transh""" +882 30 optimizer """adam""" +882 30 training_loop """owa""" +882 30 negative_sampler """basic""" +882 30 evaluator """rankbased""" +882 31 dataset """kinships""" +882 31 model """transh""" +882 31 loss """nssa""" +882 31 regularizer """transh""" +882 31 optimizer """adam""" +882 31 training_loop """owa""" +882 31 negative_sampler """basic""" +882 31 evaluator """rankbased""" +882 32 dataset """kinships""" +882 32 model """transh""" +882 32 loss """nssa""" +882 32 regularizer """transh""" +882 32 optimizer """adam""" +882 32 training_loop """owa""" +882 32 negative_sampler """basic""" +882 32 evaluator """rankbased""" +882 33 dataset """kinships""" +882 33 model """transh""" +882 33 loss """nssa""" +882 33 regularizer """transh""" +882 33 optimizer """adam""" +882 33 training_loop """owa""" +882 33 negative_sampler """basic""" +882 33 evaluator """rankbased""" +882 34 dataset """kinships""" +882 34 model """transh""" +882 34 loss """nssa""" +882 34 regularizer """transh""" +882 34 optimizer """adam""" +882 34 training_loop """owa""" +882 34 negative_sampler """basic""" +882 34 evaluator """rankbased""" +882 35 dataset """kinships""" +882 35 model """transh""" +882 35 loss """nssa""" +882 35 regularizer """transh""" +882 35 optimizer """adam""" +882 35 training_loop """owa""" +882 35 negative_sampler """basic""" +882 35 evaluator """rankbased""" +882 36 dataset """kinships""" +882 36 model """transh""" +882 36 loss """nssa""" +882 36 regularizer """transh""" +882 36 optimizer """adam""" +882 36 training_loop """owa""" +882 36 negative_sampler """basic""" +882 36 evaluator """rankbased""" +882 37 dataset """kinships""" +882 37 model """transh""" +882 37 loss """nssa""" +882 37 regularizer """transh""" +882 37 optimizer """adam""" +882 37 training_loop """owa""" +882 37 negative_sampler """basic""" +882 37 evaluator """rankbased""" +882 38 dataset """kinships""" +882 38 model """transh""" +882 38 loss """nssa""" +882 38 regularizer """transh""" +882 38 optimizer """adam""" +882 38 training_loop """owa""" +882 38 negative_sampler """basic""" +882 38 evaluator """rankbased""" +882 39 dataset """kinships""" +882 39 model """transh""" +882 39 loss """nssa""" +882 39 regularizer """transh""" +882 39 optimizer """adam""" +882 39 training_loop """owa""" +882 39 negative_sampler """basic""" +882 39 evaluator """rankbased""" +882 40 dataset """kinships""" +882 40 model """transh""" +882 40 loss """nssa""" +882 40 regularizer """transh""" +882 40 optimizer """adam""" +882 40 training_loop """owa""" +882 40 negative_sampler """basic""" +882 40 evaluator """rankbased""" +882 41 dataset """kinships""" +882 41 model """transh""" +882 41 loss """nssa""" +882 41 regularizer """transh""" +882 41 optimizer """adam""" +882 41 training_loop """owa""" +882 41 negative_sampler """basic""" +882 41 evaluator """rankbased""" +882 42 dataset """kinships""" +882 42 model """transh""" +882 42 loss """nssa""" +882 42 regularizer """transh""" +882 42 optimizer """adam""" +882 42 training_loop """owa""" +882 42 negative_sampler """basic""" +882 42 evaluator """rankbased""" +882 43 dataset """kinships""" +882 43 model """transh""" +882 43 loss """nssa""" +882 43 regularizer """transh""" +882 43 optimizer """adam""" +882 43 training_loop """owa""" +882 43 negative_sampler """basic""" +882 43 evaluator """rankbased""" +882 44 dataset """kinships""" +882 44 model """transh""" +882 44 loss """nssa""" +882 44 regularizer """transh""" +882 44 optimizer """adam""" +882 44 training_loop """owa""" +882 44 negative_sampler """basic""" +882 44 evaluator """rankbased""" +882 45 dataset """kinships""" +882 45 model """transh""" +882 45 loss """nssa""" +882 45 regularizer """transh""" +882 45 optimizer """adam""" +882 45 training_loop """owa""" +882 45 negative_sampler """basic""" +882 45 evaluator """rankbased""" +882 46 dataset """kinships""" +882 46 model """transh""" +882 46 loss """nssa""" +882 46 regularizer """transh""" +882 46 optimizer """adam""" +882 46 training_loop """owa""" +882 46 negative_sampler """basic""" +882 46 evaluator """rankbased""" +882 47 dataset """kinships""" +882 47 model """transh""" +882 47 loss """nssa""" +882 47 regularizer """transh""" +882 47 optimizer """adam""" +882 47 training_loop """owa""" +882 47 negative_sampler """basic""" +882 47 evaluator """rankbased""" +882 48 dataset """kinships""" +882 48 model """transh""" +882 48 loss """nssa""" +882 48 regularizer """transh""" +882 48 optimizer """adam""" +882 48 training_loop """owa""" +882 48 negative_sampler """basic""" +882 48 evaluator """rankbased""" +882 49 dataset """kinships""" +882 49 model """transh""" +882 49 loss """nssa""" +882 49 regularizer """transh""" +882 49 optimizer """adam""" +882 49 training_loop """owa""" +882 49 negative_sampler """basic""" +882 49 evaluator """rankbased""" +882 50 dataset """kinships""" +882 50 model """transh""" +882 50 loss """nssa""" +882 50 regularizer """transh""" +882 50 optimizer """adam""" +882 50 training_loop """owa""" +882 50 negative_sampler """basic""" +882 50 evaluator """rankbased""" +882 51 dataset """kinships""" +882 51 model """transh""" +882 51 loss """nssa""" +882 51 regularizer """transh""" +882 51 optimizer """adam""" +882 51 training_loop """owa""" +882 51 negative_sampler """basic""" +882 51 evaluator """rankbased""" +882 52 dataset """kinships""" +882 52 model """transh""" +882 52 loss """nssa""" +882 52 regularizer """transh""" +882 52 optimizer """adam""" +882 52 training_loop """owa""" +882 52 negative_sampler """basic""" +882 52 evaluator """rankbased""" +882 53 dataset """kinships""" +882 53 model """transh""" +882 53 loss """nssa""" +882 53 regularizer """transh""" +882 53 optimizer """adam""" +882 53 training_loop """owa""" +882 53 negative_sampler """basic""" +882 53 evaluator """rankbased""" +882 54 dataset """kinships""" +882 54 model """transh""" +882 54 loss """nssa""" +882 54 regularizer """transh""" +882 54 optimizer """adam""" +882 54 training_loop """owa""" +882 54 negative_sampler """basic""" +882 54 evaluator """rankbased""" +882 55 dataset """kinships""" +882 55 model """transh""" +882 55 loss """nssa""" +882 55 regularizer """transh""" +882 55 optimizer """adam""" +882 55 training_loop """owa""" +882 55 negative_sampler """basic""" +882 55 evaluator """rankbased""" +882 56 dataset """kinships""" +882 56 model """transh""" +882 56 loss """nssa""" +882 56 regularizer """transh""" +882 56 optimizer """adam""" +882 56 training_loop """owa""" +882 56 negative_sampler """basic""" +882 56 evaluator """rankbased""" +882 57 dataset """kinships""" +882 57 model """transh""" +882 57 loss """nssa""" +882 57 regularizer """transh""" +882 57 optimizer """adam""" +882 57 training_loop """owa""" +882 57 negative_sampler """basic""" +882 57 evaluator """rankbased""" +882 58 dataset """kinships""" +882 58 model """transh""" +882 58 loss """nssa""" +882 58 regularizer """transh""" +882 58 optimizer """adam""" +882 58 training_loop """owa""" +882 58 negative_sampler """basic""" +882 58 evaluator """rankbased""" +882 59 dataset """kinships""" +882 59 model """transh""" +882 59 loss """nssa""" +882 59 regularizer """transh""" +882 59 optimizer """adam""" +882 59 training_loop """owa""" +882 59 negative_sampler """basic""" +882 59 evaluator """rankbased""" +882 60 dataset """kinships""" +882 60 model """transh""" +882 60 loss """nssa""" +882 60 regularizer """transh""" +882 60 optimizer """adam""" +882 60 training_loop """owa""" +882 60 negative_sampler """basic""" +882 60 evaluator """rankbased""" +882 61 dataset """kinships""" +882 61 model """transh""" +882 61 loss """nssa""" +882 61 regularizer """transh""" +882 61 optimizer """adam""" +882 61 training_loop """owa""" +882 61 negative_sampler """basic""" +882 61 evaluator """rankbased""" +882 62 dataset """kinships""" +882 62 model """transh""" +882 62 loss """nssa""" +882 62 regularizer """transh""" +882 62 optimizer """adam""" +882 62 training_loop """owa""" +882 62 negative_sampler """basic""" +882 62 evaluator """rankbased""" +882 63 dataset """kinships""" +882 63 model """transh""" +882 63 loss """nssa""" +882 63 regularizer """transh""" +882 63 optimizer """adam""" +882 63 training_loop """owa""" +882 63 negative_sampler """basic""" +882 63 evaluator """rankbased""" +882 64 dataset """kinships""" +882 64 model """transh""" +882 64 loss """nssa""" +882 64 regularizer """transh""" +882 64 optimizer """adam""" +882 64 training_loop """owa""" +882 64 negative_sampler """basic""" +882 64 evaluator """rankbased""" +882 65 dataset """kinships""" +882 65 model """transh""" +882 65 loss """nssa""" +882 65 regularizer """transh""" +882 65 optimizer """adam""" +882 65 training_loop """owa""" +882 65 negative_sampler """basic""" +882 65 evaluator """rankbased""" +882 66 dataset """kinships""" +882 66 model """transh""" +882 66 loss """nssa""" +882 66 regularizer """transh""" +882 66 optimizer """adam""" +882 66 training_loop """owa""" +882 66 negative_sampler """basic""" +882 66 evaluator """rankbased""" +882 67 dataset """kinships""" +882 67 model """transh""" +882 67 loss """nssa""" +882 67 regularizer """transh""" +882 67 optimizer """adam""" +882 67 training_loop """owa""" +882 67 negative_sampler """basic""" +882 67 evaluator """rankbased""" +882 68 dataset """kinships""" +882 68 model """transh""" +882 68 loss """nssa""" +882 68 regularizer """transh""" +882 68 optimizer """adam""" +882 68 training_loop """owa""" +882 68 negative_sampler """basic""" +882 68 evaluator """rankbased""" +882 69 dataset """kinships""" +882 69 model """transh""" +882 69 loss """nssa""" +882 69 regularizer """transh""" +882 69 optimizer """adam""" +882 69 training_loop """owa""" +882 69 negative_sampler """basic""" +882 69 evaluator """rankbased""" +882 70 dataset """kinships""" +882 70 model """transh""" +882 70 loss """nssa""" +882 70 regularizer """transh""" +882 70 optimizer """adam""" +882 70 training_loop """owa""" +882 70 negative_sampler """basic""" +882 70 evaluator """rankbased""" +882 71 dataset """kinships""" +882 71 model """transh""" +882 71 loss """nssa""" +882 71 regularizer """transh""" +882 71 optimizer """adam""" +882 71 training_loop """owa""" +882 71 negative_sampler """basic""" +882 71 evaluator """rankbased""" +882 72 dataset """kinships""" +882 72 model """transh""" +882 72 loss """nssa""" +882 72 regularizer """transh""" +882 72 optimizer """adam""" +882 72 training_loop """owa""" +882 72 negative_sampler """basic""" +882 72 evaluator """rankbased""" +882 73 dataset """kinships""" +882 73 model """transh""" +882 73 loss """nssa""" +882 73 regularizer """transh""" +882 73 optimizer """adam""" +882 73 training_loop """owa""" +882 73 negative_sampler """basic""" +882 73 evaluator """rankbased""" +882 74 dataset """kinships""" +882 74 model """transh""" +882 74 loss """nssa""" +882 74 regularizer """transh""" +882 74 optimizer """adam""" +882 74 training_loop """owa""" +882 74 negative_sampler """basic""" +882 74 evaluator """rankbased""" +882 75 dataset """kinships""" +882 75 model """transh""" +882 75 loss """nssa""" +882 75 regularizer """transh""" +882 75 optimizer """adam""" +882 75 training_loop """owa""" +882 75 negative_sampler """basic""" +882 75 evaluator """rankbased""" +882 76 dataset """kinships""" +882 76 model """transh""" +882 76 loss """nssa""" +882 76 regularizer """transh""" +882 76 optimizer """adam""" +882 76 training_loop """owa""" +882 76 negative_sampler """basic""" +882 76 evaluator """rankbased""" +882 77 dataset """kinships""" +882 77 model """transh""" +882 77 loss """nssa""" +882 77 regularizer """transh""" +882 77 optimizer """adam""" +882 77 training_loop """owa""" +882 77 negative_sampler """basic""" +882 77 evaluator """rankbased""" +882 78 dataset """kinships""" +882 78 model """transh""" +882 78 loss """nssa""" +882 78 regularizer """transh""" +882 78 optimizer """adam""" +882 78 training_loop """owa""" +882 78 negative_sampler """basic""" +882 78 evaluator """rankbased""" +882 79 dataset """kinships""" +882 79 model """transh""" +882 79 loss """nssa""" +882 79 regularizer """transh""" +882 79 optimizer """adam""" +882 79 training_loop """owa""" +882 79 negative_sampler """basic""" +882 79 evaluator """rankbased""" +882 80 dataset """kinships""" +882 80 model """transh""" +882 80 loss """nssa""" +882 80 regularizer """transh""" +882 80 optimizer """adam""" +882 80 training_loop """owa""" +882 80 negative_sampler """basic""" +882 80 evaluator """rankbased""" +882 81 dataset """kinships""" +882 81 model """transh""" +882 81 loss """nssa""" +882 81 regularizer """transh""" +882 81 optimizer """adam""" +882 81 training_loop """owa""" +882 81 negative_sampler """basic""" +882 81 evaluator """rankbased""" +882 82 dataset """kinships""" +882 82 model """transh""" +882 82 loss """nssa""" +882 82 regularizer """transh""" +882 82 optimizer """adam""" +882 82 training_loop """owa""" +882 82 negative_sampler """basic""" +882 82 evaluator """rankbased""" +882 83 dataset """kinships""" +882 83 model """transh""" +882 83 loss """nssa""" +882 83 regularizer """transh""" +882 83 optimizer """adam""" +882 83 training_loop """owa""" +882 83 negative_sampler """basic""" +882 83 evaluator """rankbased""" +882 84 dataset """kinships""" +882 84 model """transh""" +882 84 loss """nssa""" +882 84 regularizer """transh""" +882 84 optimizer """adam""" +882 84 training_loop """owa""" +882 84 negative_sampler """basic""" +882 84 evaluator """rankbased""" +882 85 dataset """kinships""" +882 85 model """transh""" +882 85 loss """nssa""" +882 85 regularizer """transh""" +882 85 optimizer """adam""" +882 85 training_loop """owa""" +882 85 negative_sampler """basic""" +882 85 evaluator """rankbased""" +882 86 dataset """kinships""" +882 86 model """transh""" +882 86 loss """nssa""" +882 86 regularizer """transh""" +882 86 optimizer """adam""" +882 86 training_loop """owa""" +882 86 negative_sampler """basic""" +882 86 evaluator """rankbased""" +882 87 dataset """kinships""" +882 87 model """transh""" +882 87 loss """nssa""" +882 87 regularizer """transh""" +882 87 optimizer """adam""" +882 87 training_loop """owa""" +882 87 negative_sampler """basic""" +882 87 evaluator """rankbased""" +882 88 dataset """kinships""" +882 88 model """transh""" +882 88 loss """nssa""" +882 88 regularizer """transh""" +882 88 optimizer """adam""" +882 88 training_loop """owa""" +882 88 negative_sampler """basic""" +882 88 evaluator """rankbased""" +882 89 dataset """kinships""" +882 89 model """transh""" +882 89 loss """nssa""" +882 89 regularizer """transh""" +882 89 optimizer """adam""" +882 89 training_loop """owa""" +882 89 negative_sampler """basic""" +882 89 evaluator """rankbased""" +882 90 dataset """kinships""" +882 90 model """transh""" +882 90 loss """nssa""" +882 90 regularizer """transh""" +882 90 optimizer """adam""" +882 90 training_loop """owa""" +882 90 negative_sampler """basic""" +882 90 evaluator """rankbased""" +882 91 dataset """kinships""" +882 91 model """transh""" +882 91 loss """nssa""" +882 91 regularizer """transh""" +882 91 optimizer """adam""" +882 91 training_loop """owa""" +882 91 negative_sampler """basic""" +882 91 evaluator """rankbased""" +882 92 dataset """kinships""" +882 92 model """transh""" +882 92 loss """nssa""" +882 92 regularizer """transh""" +882 92 optimizer """adam""" +882 92 training_loop """owa""" +882 92 negative_sampler """basic""" +882 92 evaluator """rankbased""" +882 93 dataset """kinships""" +882 93 model """transh""" +882 93 loss """nssa""" +882 93 regularizer """transh""" +882 93 optimizer """adam""" +882 93 training_loop """owa""" +882 93 negative_sampler """basic""" +882 93 evaluator """rankbased""" +882 94 dataset """kinships""" +882 94 model """transh""" +882 94 loss """nssa""" +882 94 regularizer """transh""" +882 94 optimizer """adam""" +882 94 training_loop """owa""" +882 94 negative_sampler """basic""" +882 94 evaluator """rankbased""" +882 95 dataset """kinships""" +882 95 model """transh""" +882 95 loss """nssa""" +882 95 regularizer """transh""" +882 95 optimizer """adam""" +882 95 training_loop """owa""" +882 95 negative_sampler """basic""" +882 95 evaluator """rankbased""" +882 96 dataset """kinships""" +882 96 model """transh""" +882 96 loss """nssa""" +882 96 regularizer """transh""" +882 96 optimizer """adam""" +882 96 training_loop """owa""" +882 96 negative_sampler """basic""" +882 96 evaluator """rankbased""" +882 97 dataset """kinships""" +882 97 model """transh""" +882 97 loss """nssa""" +882 97 regularizer """transh""" +882 97 optimizer """adam""" +882 97 training_loop """owa""" +882 97 negative_sampler """basic""" +882 97 evaluator """rankbased""" +882 98 dataset """kinships""" +882 98 model """transh""" +882 98 loss """nssa""" +882 98 regularizer """transh""" +882 98 optimizer """adam""" +882 98 training_loop """owa""" +882 98 negative_sampler """basic""" +882 98 evaluator """rankbased""" +882 99 dataset """kinships""" +882 99 model """transh""" +882 99 loss """nssa""" +882 99 regularizer """transh""" +882 99 optimizer """adam""" +882 99 training_loop """owa""" +882 99 negative_sampler """basic""" +882 99 evaluator """rankbased""" +882 100 dataset """kinships""" +882 100 model """transh""" +882 100 loss """nssa""" +882 100 regularizer """transh""" +882 100 optimizer """adam""" +882 100 training_loop """owa""" +882 100 negative_sampler """basic""" +882 100 evaluator """rankbased""" +883 1 model.embedding_dim 0.0 +883 1 model.scoring_fct_norm 1.0 +883 1 loss.margin 1.4877430992770266 +883 1 loss.adversarial_temperature 0.40588867039927656 +883 1 regularizer.weight 0.1295987672641778 +883 1 optimizer.lr 0.01715849633358601 +883 1 negative_sampler.num_negs_per_pos 57.0 +883 1 training.batch_size 2.0 +883 2 model.embedding_dim 1.0 +883 2 model.scoring_fct_norm 1.0 +883 2 loss.margin 19.26745032443448 +883 2 loss.adversarial_temperature 0.7199707848091522 +883 2 regularizer.weight 0.2760389566550658 +883 2 optimizer.lr 0.0017000215062757673 +883 2 negative_sampler.num_negs_per_pos 10.0 +883 2 training.batch_size 0.0 +883 3 model.embedding_dim 1.0 +883 3 model.scoring_fct_norm 1.0 +883 3 loss.margin 2.579998936866074 +883 3 loss.adversarial_temperature 0.7160801232365819 +883 3 regularizer.weight 0.0965348369771332 +883 3 optimizer.lr 0.04069260983873924 +883 3 negative_sampler.num_negs_per_pos 38.0 +883 3 training.batch_size 2.0 +883 4 model.embedding_dim 2.0 +883 4 model.scoring_fct_norm 1.0 +883 4 loss.margin 7.64642503811552 +883 4 loss.adversarial_temperature 0.3812419893557206 +883 4 regularizer.weight 0.24786081343702324 +883 4 optimizer.lr 0.008700882042893135 +883 4 negative_sampler.num_negs_per_pos 37.0 +883 4 training.batch_size 1.0 +883 5 model.embedding_dim 2.0 +883 5 model.scoring_fct_norm 2.0 +883 5 loss.margin 19.973042705456965 +883 5 loss.adversarial_temperature 0.6814325680077584 +883 5 regularizer.weight 0.11633923840381571 +883 5 optimizer.lr 0.05224989812006157 +883 5 negative_sampler.num_negs_per_pos 25.0 +883 5 training.batch_size 0.0 +883 6 model.embedding_dim 1.0 +883 6 model.scoring_fct_norm 2.0 +883 6 loss.margin 25.627748855841226 +883 6 loss.adversarial_temperature 0.5935912275638388 +883 6 regularizer.weight 0.04904828090302575 +883 6 optimizer.lr 0.001056179394302068 +883 6 negative_sampler.num_negs_per_pos 51.0 +883 6 training.batch_size 0.0 +883 7 model.embedding_dim 0.0 +883 7 model.scoring_fct_norm 1.0 +883 7 loss.margin 14.371231551646055 +883 7 loss.adversarial_temperature 0.5597593177191441 +883 7 regularizer.weight 0.029528781760260366 +883 7 optimizer.lr 0.007205114510958703 +883 7 negative_sampler.num_negs_per_pos 29.0 +883 7 training.batch_size 2.0 +883 8 model.embedding_dim 1.0 +883 8 model.scoring_fct_norm 2.0 +883 8 loss.margin 19.879804430829658 +883 8 loss.adversarial_temperature 0.39126401100691255 +883 8 regularizer.weight 0.024403519762100846 +883 8 optimizer.lr 0.0013852035764212384 +883 8 negative_sampler.num_negs_per_pos 64.0 +883 8 training.batch_size 2.0 +883 9 model.embedding_dim 0.0 +883 9 model.scoring_fct_norm 2.0 +883 9 loss.margin 29.41357747674021 +883 9 loss.adversarial_temperature 0.4305731013172398 +883 9 regularizer.weight 0.01533889565698169 +883 9 optimizer.lr 0.0010690755231421184 +883 9 negative_sampler.num_negs_per_pos 12.0 +883 9 training.batch_size 0.0 +883 10 model.embedding_dim 1.0 +883 10 model.scoring_fct_norm 2.0 +883 10 loss.margin 23.173994762084497 +883 10 loss.adversarial_temperature 0.2564657216589645 +883 10 regularizer.weight 0.016767431719009587 +883 10 optimizer.lr 0.0015811985041028008 +883 10 negative_sampler.num_negs_per_pos 1.0 +883 10 training.batch_size 0.0 +883 11 model.embedding_dim 1.0 +883 11 model.scoring_fct_norm 2.0 +883 11 loss.margin 23.85584793996272 +883 11 loss.adversarial_temperature 0.16103969321790979 +883 11 regularizer.weight 0.060278881524752824 +883 11 optimizer.lr 0.004461041170625894 +883 11 negative_sampler.num_negs_per_pos 96.0 +883 11 training.batch_size 0.0 +883 12 model.embedding_dim 2.0 +883 12 model.scoring_fct_norm 1.0 +883 12 loss.margin 2.101284200899239 +883 12 loss.adversarial_temperature 0.5717151936128024 +883 12 regularizer.weight 0.06352840629820102 +883 12 optimizer.lr 0.09040152793539759 +883 12 negative_sampler.num_negs_per_pos 47.0 +883 12 training.batch_size 2.0 +883 13 model.embedding_dim 2.0 +883 13 model.scoring_fct_norm 2.0 +883 13 loss.margin 27.405778949350275 +883 13 loss.adversarial_temperature 0.6148594426015231 +883 13 regularizer.weight 0.25872447718156155 +883 13 optimizer.lr 0.042691408787012906 +883 13 negative_sampler.num_negs_per_pos 81.0 +883 13 training.batch_size 0.0 +883 14 model.embedding_dim 2.0 +883 14 model.scoring_fct_norm 1.0 +883 14 loss.margin 28.535613958308893 +883 14 loss.adversarial_temperature 0.5039106622131564 +883 14 regularizer.weight 0.017642642200663597 +883 14 optimizer.lr 0.00352484184892864 +883 14 negative_sampler.num_negs_per_pos 82.0 +883 14 training.batch_size 1.0 +883 15 model.embedding_dim 0.0 +883 15 model.scoring_fct_norm 1.0 +883 15 loss.margin 25.709648918953636 +883 15 loss.adversarial_temperature 0.10029285945841815 +883 15 regularizer.weight 0.01280022765946593 +883 15 optimizer.lr 0.0013514244376673097 +883 15 negative_sampler.num_negs_per_pos 12.0 +883 15 training.batch_size 2.0 +883 16 model.embedding_dim 0.0 +883 16 model.scoring_fct_norm 1.0 +883 16 loss.margin 5.893197767095236 +883 16 loss.adversarial_temperature 0.9462943392683143 +883 16 regularizer.weight 0.14409862214800348 +883 16 optimizer.lr 0.035123716908553704 +883 16 negative_sampler.num_negs_per_pos 0.0 +883 16 training.batch_size 2.0 +883 17 model.embedding_dim 1.0 +883 17 model.scoring_fct_norm 1.0 +883 17 loss.margin 9.299904664920456 +883 17 loss.adversarial_temperature 0.17378562025986444 +883 17 regularizer.weight 0.09180698316637899 +883 17 optimizer.lr 0.0025199048829386487 +883 17 negative_sampler.num_negs_per_pos 5.0 +883 17 training.batch_size 0.0 +883 18 model.embedding_dim 1.0 +883 18 model.scoring_fct_norm 1.0 +883 18 loss.margin 9.742074191934138 +883 18 loss.adversarial_temperature 0.5218063285323118 +883 18 regularizer.weight 0.02107242259394743 +883 18 optimizer.lr 0.017568137906944482 +883 18 negative_sampler.num_negs_per_pos 40.0 +883 18 training.batch_size 2.0 +883 19 model.embedding_dim 0.0 +883 19 model.scoring_fct_norm 2.0 +883 19 loss.margin 19.40054181812724 +883 19 loss.adversarial_temperature 0.2227662180678684 +883 19 regularizer.weight 0.03331400474882599 +883 19 optimizer.lr 0.02029023361454984 +883 19 negative_sampler.num_negs_per_pos 59.0 +883 19 training.batch_size 0.0 +883 20 model.embedding_dim 1.0 +883 20 model.scoring_fct_norm 2.0 +883 20 loss.margin 10.31381135371325 +883 20 loss.adversarial_temperature 0.19994271228053823 +883 20 regularizer.weight 0.18690853285778417 +883 20 optimizer.lr 0.007673133309770336 +883 20 negative_sampler.num_negs_per_pos 53.0 +883 20 training.batch_size 1.0 +883 21 model.embedding_dim 2.0 +883 21 model.scoring_fct_norm 2.0 +883 21 loss.margin 6.331372959817811 +883 21 loss.adversarial_temperature 0.9978336163424291 +883 21 regularizer.weight 0.015979500829528995 +883 21 optimizer.lr 0.0023378622634533156 +883 21 negative_sampler.num_negs_per_pos 27.0 +883 21 training.batch_size 2.0 +883 22 model.embedding_dim 0.0 +883 22 model.scoring_fct_norm 2.0 +883 22 loss.margin 10.332103950558128 +883 22 loss.adversarial_temperature 0.19482638384485704 +883 22 regularizer.weight 0.02630030740234972 +883 22 optimizer.lr 0.010613166707625106 +883 22 negative_sampler.num_negs_per_pos 64.0 +883 22 training.batch_size 1.0 +883 23 model.embedding_dim 1.0 +883 23 model.scoring_fct_norm 2.0 +883 23 loss.margin 10.359733811791884 +883 23 loss.adversarial_temperature 0.5254537641442358 +883 23 regularizer.weight 0.05640899092405736 +883 23 optimizer.lr 0.0011898631375596505 +883 23 negative_sampler.num_negs_per_pos 65.0 +883 23 training.batch_size 0.0 +883 24 model.embedding_dim 0.0 +883 24 model.scoring_fct_norm 2.0 +883 24 loss.margin 11.30901536539662 +883 24 loss.adversarial_temperature 0.29779087300202806 +883 24 regularizer.weight 0.032671029031282814 +883 24 optimizer.lr 0.0014465865906625732 +883 24 negative_sampler.num_negs_per_pos 27.0 +883 24 training.batch_size 0.0 +883 25 model.embedding_dim 1.0 +883 25 model.scoring_fct_norm 2.0 +883 25 loss.margin 16.107978978261322 +883 25 loss.adversarial_temperature 0.183411673344535 +883 25 regularizer.weight 0.0116488503580956 +883 25 optimizer.lr 0.08331635692989661 +883 25 negative_sampler.num_negs_per_pos 18.0 +883 25 training.batch_size 1.0 +883 26 model.embedding_dim 2.0 +883 26 model.scoring_fct_norm 2.0 +883 26 loss.margin 1.430355416451063 +883 26 loss.adversarial_temperature 0.45731824096901696 +883 26 regularizer.weight 0.09336025763260605 +883 26 optimizer.lr 0.08038095030055331 +883 26 negative_sampler.num_negs_per_pos 53.0 +883 26 training.batch_size 0.0 +883 27 model.embedding_dim 0.0 +883 27 model.scoring_fct_norm 2.0 +883 27 loss.margin 15.088022343270637 +883 27 loss.adversarial_temperature 0.502598407918345 +883 27 regularizer.weight 0.226856314481346 +883 27 optimizer.lr 0.04069373550063246 +883 27 negative_sampler.num_negs_per_pos 16.0 +883 27 training.batch_size 1.0 +883 28 model.embedding_dim 2.0 +883 28 model.scoring_fct_norm 1.0 +883 28 loss.margin 20.928137902704435 +883 28 loss.adversarial_temperature 0.23094643124524536 +883 28 regularizer.weight 0.022895082023726355 +883 28 optimizer.lr 0.002028953404007479 +883 28 negative_sampler.num_negs_per_pos 15.0 +883 28 training.batch_size 1.0 +883 29 model.embedding_dim 1.0 +883 29 model.scoring_fct_norm 2.0 +883 29 loss.margin 1.030559448204214 +883 29 loss.adversarial_temperature 0.2740155550209705 +883 29 regularizer.weight 0.05214482056629121 +883 29 optimizer.lr 0.030354696369307044 +883 29 negative_sampler.num_negs_per_pos 3.0 +883 29 training.batch_size 2.0 +883 30 model.embedding_dim 2.0 +883 30 model.scoring_fct_norm 2.0 +883 30 loss.margin 13.266243181204857 +883 30 loss.adversarial_temperature 0.6194391634082965 +883 30 regularizer.weight 0.04705370992255392 +883 30 optimizer.lr 0.08535082160810646 +883 30 negative_sampler.num_negs_per_pos 93.0 +883 30 training.batch_size 1.0 +883 31 model.embedding_dim 1.0 +883 31 model.scoring_fct_norm 2.0 +883 31 loss.margin 16.531273948239487 +883 31 loss.adversarial_temperature 0.3330074159666751 +883 31 regularizer.weight 0.03674926301907734 +883 31 optimizer.lr 0.05459754089620555 +883 31 negative_sampler.num_negs_per_pos 37.0 +883 31 training.batch_size 1.0 +883 32 model.embedding_dim 0.0 +883 32 model.scoring_fct_norm 1.0 +883 32 loss.margin 17.68921879570285 +883 32 loss.adversarial_temperature 0.48565524989876674 +883 32 regularizer.weight 0.03504583579781143 +883 32 optimizer.lr 0.009680053007717754 +883 32 negative_sampler.num_negs_per_pos 30.0 +883 32 training.batch_size 2.0 +883 33 model.embedding_dim 1.0 +883 33 model.scoring_fct_norm 2.0 +883 33 loss.margin 16.512710510855186 +883 33 loss.adversarial_temperature 0.2625060225583409 +883 33 regularizer.weight 0.015816404144651634 +883 33 optimizer.lr 0.06778112181417668 +883 33 negative_sampler.num_negs_per_pos 0.0 +883 33 training.batch_size 0.0 +883 34 model.embedding_dim 0.0 +883 34 model.scoring_fct_norm 2.0 +883 34 loss.margin 12.260431566345588 +883 34 loss.adversarial_temperature 0.5882809997198226 +883 34 regularizer.weight 0.26703718684364713 +883 34 optimizer.lr 0.02466411162792342 +883 34 negative_sampler.num_negs_per_pos 43.0 +883 34 training.batch_size 0.0 +883 35 model.embedding_dim 1.0 +883 35 model.scoring_fct_norm 2.0 +883 35 loss.margin 8.255075684733258 +883 35 loss.adversarial_temperature 0.8616695531261694 +883 35 regularizer.weight 0.04306467915729177 +883 35 optimizer.lr 0.006683652346863806 +883 35 negative_sampler.num_negs_per_pos 38.0 +883 35 training.batch_size 0.0 +883 36 model.embedding_dim 2.0 +883 36 model.scoring_fct_norm 2.0 +883 36 loss.margin 13.621708207113507 +883 36 loss.adversarial_temperature 0.5395313747884117 +883 36 regularizer.weight 0.011889560555921335 +883 36 optimizer.lr 0.002370109146086079 +883 36 negative_sampler.num_negs_per_pos 92.0 +883 36 training.batch_size 2.0 +883 37 model.embedding_dim 2.0 +883 37 model.scoring_fct_norm 2.0 +883 37 loss.margin 26.00522746827981 +883 37 loss.adversarial_temperature 0.17361769331082563 +883 37 regularizer.weight 0.011149992742087559 +883 37 optimizer.lr 0.009094478891293367 +883 37 negative_sampler.num_negs_per_pos 64.0 +883 37 training.batch_size 2.0 +883 38 model.embedding_dim 2.0 +883 38 model.scoring_fct_norm 1.0 +883 38 loss.margin 14.50399381650808 +883 38 loss.adversarial_temperature 0.42148003287082664 +883 38 regularizer.weight 0.03678116436857142 +883 38 optimizer.lr 0.004148810531290328 +883 38 negative_sampler.num_negs_per_pos 14.0 +883 38 training.batch_size 2.0 +883 39 model.embedding_dim 2.0 +883 39 model.scoring_fct_norm 1.0 +883 39 loss.margin 5.283283498853594 +883 39 loss.adversarial_temperature 0.5715780325713103 +883 39 regularizer.weight 0.015811550015192903 +883 39 optimizer.lr 0.022737289481485918 +883 39 negative_sampler.num_negs_per_pos 39.0 +883 39 training.batch_size 0.0 +883 40 model.embedding_dim 2.0 +883 40 model.scoring_fct_norm 1.0 +883 40 loss.margin 6.094020408379824 +883 40 loss.adversarial_temperature 0.19665930874221463 +883 40 regularizer.weight 0.19835640658617712 +883 40 optimizer.lr 0.052270940814304896 +883 40 negative_sampler.num_negs_per_pos 96.0 +883 40 training.batch_size 2.0 +883 41 model.embedding_dim 2.0 +883 41 model.scoring_fct_norm 1.0 +883 41 loss.margin 25.159502585661727 +883 41 loss.adversarial_temperature 0.2664781257081124 +883 41 regularizer.weight 0.01979075930881202 +883 41 optimizer.lr 0.08851544126283399 +883 41 negative_sampler.num_negs_per_pos 68.0 +883 41 training.batch_size 2.0 +883 42 model.embedding_dim 2.0 +883 42 model.scoring_fct_norm 1.0 +883 42 loss.margin 12.457571724761396 +883 42 loss.adversarial_temperature 0.5254645477034146 +883 42 regularizer.weight 0.039318180931997904 +883 42 optimizer.lr 0.0026486328402710014 +883 42 negative_sampler.num_negs_per_pos 81.0 +883 42 training.batch_size 0.0 +883 43 model.embedding_dim 0.0 +883 43 model.scoring_fct_norm 2.0 +883 43 loss.margin 17.257818453747756 +883 43 loss.adversarial_temperature 0.8802294282722439 +883 43 regularizer.weight 0.024944898743624502 +883 43 optimizer.lr 0.043055175686125026 +883 43 negative_sampler.num_negs_per_pos 52.0 +883 43 training.batch_size 1.0 +883 44 model.embedding_dim 0.0 +883 44 model.scoring_fct_norm 2.0 +883 44 loss.margin 20.874470471651303 +883 44 loss.adversarial_temperature 0.9091784918959912 +883 44 regularizer.weight 0.013012112063013033 +883 44 optimizer.lr 0.005294199857064936 +883 44 negative_sampler.num_negs_per_pos 45.0 +883 44 training.batch_size 0.0 +883 45 model.embedding_dim 2.0 +883 45 model.scoring_fct_norm 1.0 +883 45 loss.margin 6.583768220724876 +883 45 loss.adversarial_temperature 0.9869796325145374 +883 45 regularizer.weight 0.030015925406169302 +883 45 optimizer.lr 0.01085434649325894 +883 45 negative_sampler.num_negs_per_pos 62.0 +883 45 training.batch_size 0.0 +883 46 model.embedding_dim 0.0 +883 46 model.scoring_fct_norm 1.0 +883 46 loss.margin 23.114005763098557 +883 46 loss.adversarial_temperature 0.9773992043243102 +883 46 regularizer.weight 0.05644989838613672 +883 46 optimizer.lr 0.060890310289679825 +883 46 negative_sampler.num_negs_per_pos 25.0 +883 46 training.batch_size 1.0 +883 47 model.embedding_dim 1.0 +883 47 model.scoring_fct_norm 1.0 +883 47 loss.margin 26.858108799056847 +883 47 loss.adversarial_temperature 0.2393928658247819 +883 47 regularizer.weight 0.07832703128422983 +883 47 optimizer.lr 0.08759362865900538 +883 47 negative_sampler.num_negs_per_pos 97.0 +883 47 training.batch_size 0.0 +883 48 model.embedding_dim 1.0 +883 48 model.scoring_fct_norm 2.0 +883 48 loss.margin 10.228184958248624 +883 48 loss.adversarial_temperature 0.20014660506694607 +883 48 regularizer.weight 0.04927428505598886 +883 48 optimizer.lr 0.010897658764168552 +883 48 negative_sampler.num_negs_per_pos 35.0 +883 48 training.batch_size 2.0 +883 49 model.embedding_dim 0.0 +883 49 model.scoring_fct_norm 2.0 +883 49 loss.margin 5.494105026626483 +883 49 loss.adversarial_temperature 0.3255514162752293 +883 49 regularizer.weight 0.1582048784240394 +883 49 optimizer.lr 0.0031813355369852427 +883 49 negative_sampler.num_negs_per_pos 16.0 +883 49 training.batch_size 0.0 +883 50 model.embedding_dim 1.0 +883 50 model.scoring_fct_norm 1.0 +883 50 loss.margin 3.8244341067138232 +883 50 loss.adversarial_temperature 0.7205433746514405 +883 50 regularizer.weight 0.03050355880339909 +883 50 optimizer.lr 0.016112439860355942 +883 50 negative_sampler.num_negs_per_pos 59.0 +883 50 training.batch_size 1.0 +883 51 model.embedding_dim 1.0 +883 51 model.scoring_fct_norm 2.0 +883 51 loss.margin 24.410620964835797 +883 51 loss.adversarial_temperature 0.40542797734851277 +883 51 regularizer.weight 0.012606246608041787 +883 51 optimizer.lr 0.004432328635669041 +883 51 negative_sampler.num_negs_per_pos 22.0 +883 51 training.batch_size 1.0 +883 52 model.embedding_dim 0.0 +883 52 model.scoring_fct_norm 2.0 +883 52 loss.margin 17.68851182781168 +883 52 loss.adversarial_temperature 0.694215248901229 +883 52 regularizer.weight 0.08507124627029922 +883 52 optimizer.lr 0.023449976050416824 +883 52 negative_sampler.num_negs_per_pos 35.0 +883 52 training.batch_size 2.0 +883 53 model.embedding_dim 1.0 +883 53 model.scoring_fct_norm 1.0 +883 53 loss.margin 2.3346107103631533 +883 53 loss.adversarial_temperature 0.2792048813325083 +883 53 regularizer.weight 0.036040272116708155 +883 53 optimizer.lr 0.0063900096644992745 +883 53 negative_sampler.num_negs_per_pos 25.0 +883 53 training.batch_size 2.0 +883 54 model.embedding_dim 2.0 +883 54 model.scoring_fct_norm 1.0 +883 54 loss.margin 10.499158905353136 +883 54 loss.adversarial_temperature 0.9933680827930137 +883 54 regularizer.weight 0.18046017332258535 +883 54 optimizer.lr 0.008730870626339704 +883 54 negative_sampler.num_negs_per_pos 64.0 +883 54 training.batch_size 2.0 +883 55 model.embedding_dim 0.0 +883 55 model.scoring_fct_norm 2.0 +883 55 loss.margin 17.669519907943688 +883 55 loss.adversarial_temperature 0.2809321918860974 +883 55 regularizer.weight 0.03665876745400247 +883 55 optimizer.lr 0.014235312713759454 +883 55 negative_sampler.num_negs_per_pos 73.0 +883 55 training.batch_size 2.0 +883 56 model.embedding_dim 2.0 +883 56 model.scoring_fct_norm 1.0 +883 56 loss.margin 5.929362361271656 +883 56 loss.adversarial_temperature 0.14420736896278616 +883 56 regularizer.weight 0.05401052397726682 +883 56 optimizer.lr 0.0023099826493348675 +883 56 negative_sampler.num_negs_per_pos 84.0 +883 56 training.batch_size 2.0 +883 57 model.embedding_dim 0.0 +883 57 model.scoring_fct_norm 2.0 +883 57 loss.margin 2.4285656126295745 +883 57 loss.adversarial_temperature 0.2707718913591861 +883 57 regularizer.weight 0.019108475343620725 +883 57 optimizer.lr 0.011291363460256009 +883 57 negative_sampler.num_negs_per_pos 0.0 +883 57 training.batch_size 0.0 +883 58 model.embedding_dim 2.0 +883 58 model.scoring_fct_norm 1.0 +883 58 loss.margin 8.403096883715106 +883 58 loss.adversarial_temperature 0.16708460173167022 +883 58 regularizer.weight 0.045308182788421275 +883 58 optimizer.lr 0.008990007342926825 +883 58 negative_sampler.num_negs_per_pos 51.0 +883 58 training.batch_size 0.0 +883 59 model.embedding_dim 1.0 +883 59 model.scoring_fct_norm 1.0 +883 59 loss.margin 21.769404430796406 +883 59 loss.adversarial_temperature 0.8323431927640145 +883 59 regularizer.weight 0.026592493063903194 +883 59 optimizer.lr 0.0013047318507563256 +883 59 negative_sampler.num_negs_per_pos 81.0 +883 59 training.batch_size 1.0 +883 60 model.embedding_dim 0.0 +883 60 model.scoring_fct_norm 1.0 +883 60 loss.margin 14.718619778032835 +883 60 loss.adversarial_temperature 0.9332398728264372 +883 60 regularizer.weight 0.2884622680738276 +883 60 optimizer.lr 0.002536307499088226 +883 60 negative_sampler.num_negs_per_pos 27.0 +883 60 training.batch_size 1.0 +883 61 model.embedding_dim 2.0 +883 61 model.scoring_fct_norm 1.0 +883 61 loss.margin 29.80998402477578 +883 61 loss.adversarial_temperature 0.336190074199767 +883 61 regularizer.weight 0.14749640226940045 +883 61 optimizer.lr 0.0591025988829184 +883 61 negative_sampler.num_negs_per_pos 99.0 +883 61 training.batch_size 0.0 +883 62 model.embedding_dim 0.0 +883 62 model.scoring_fct_norm 1.0 +883 62 loss.margin 11.917537254334164 +883 62 loss.adversarial_temperature 0.40494002161807585 +883 62 regularizer.weight 0.031695275746548776 +883 62 optimizer.lr 0.018491788657789436 +883 62 negative_sampler.num_negs_per_pos 60.0 +883 62 training.batch_size 1.0 +883 63 model.embedding_dim 1.0 +883 63 model.scoring_fct_norm 1.0 +883 63 loss.margin 18.347498571469664 +883 63 loss.adversarial_temperature 0.42440356712833466 +883 63 regularizer.weight 0.013160356640978274 +883 63 optimizer.lr 0.03380229203753809 +883 63 negative_sampler.num_negs_per_pos 76.0 +883 63 training.batch_size 0.0 +883 64 model.embedding_dim 2.0 +883 64 model.scoring_fct_norm 2.0 +883 64 loss.margin 25.447634585962753 +883 64 loss.adversarial_temperature 0.8347684008379676 +883 64 regularizer.weight 0.036254164983060576 +883 64 optimizer.lr 0.017360565514835894 +883 64 negative_sampler.num_negs_per_pos 72.0 +883 64 training.batch_size 2.0 +883 65 model.embedding_dim 2.0 +883 65 model.scoring_fct_norm 2.0 +883 65 loss.margin 16.08682537097543 +883 65 loss.adversarial_temperature 0.52591046939023 +883 65 regularizer.weight 0.03452633546630055 +883 65 optimizer.lr 0.024358406248533974 +883 65 negative_sampler.num_negs_per_pos 60.0 +883 65 training.batch_size 0.0 +883 66 model.embedding_dim 2.0 +883 66 model.scoring_fct_norm 1.0 +883 66 loss.margin 4.915331759154881 +883 66 loss.adversarial_temperature 0.5073837222662015 +883 66 regularizer.weight 0.013425908506289861 +883 66 optimizer.lr 0.02268364865412252 +883 66 negative_sampler.num_negs_per_pos 65.0 +883 66 training.batch_size 0.0 +883 1 dataset """wn18rr""" +883 1 model """transh""" +883 1 loss """nssa""" +883 1 regularizer """transh""" +883 1 optimizer """adam""" +883 1 training_loop """owa""" +883 1 negative_sampler """basic""" +883 1 evaluator """rankbased""" +883 2 dataset """wn18rr""" +883 2 model """transh""" +883 2 loss """nssa""" +883 2 regularizer """transh""" +883 2 optimizer """adam""" +883 2 training_loop """owa""" +883 2 negative_sampler """basic""" +883 2 evaluator """rankbased""" +883 3 dataset """wn18rr""" +883 3 model """transh""" +883 3 loss """nssa""" +883 3 regularizer """transh""" +883 3 optimizer """adam""" +883 3 training_loop """owa""" +883 3 negative_sampler """basic""" +883 3 evaluator """rankbased""" +883 4 dataset """wn18rr""" +883 4 model """transh""" +883 4 loss """nssa""" +883 4 regularizer """transh""" +883 4 optimizer """adam""" +883 4 training_loop """owa""" +883 4 negative_sampler """basic""" +883 4 evaluator """rankbased""" +883 5 dataset """wn18rr""" +883 5 model """transh""" +883 5 loss """nssa""" +883 5 regularizer """transh""" +883 5 optimizer """adam""" +883 5 training_loop """owa""" +883 5 negative_sampler """basic""" +883 5 evaluator """rankbased""" +883 6 dataset """wn18rr""" +883 6 model """transh""" +883 6 loss """nssa""" +883 6 regularizer """transh""" +883 6 optimizer """adam""" +883 6 training_loop """owa""" +883 6 negative_sampler """basic""" +883 6 evaluator """rankbased""" +883 7 dataset """wn18rr""" +883 7 model """transh""" +883 7 loss """nssa""" +883 7 regularizer """transh""" +883 7 optimizer """adam""" +883 7 training_loop """owa""" +883 7 negative_sampler """basic""" +883 7 evaluator """rankbased""" +883 8 dataset """wn18rr""" +883 8 model """transh""" +883 8 loss """nssa""" +883 8 regularizer """transh""" +883 8 optimizer """adam""" +883 8 training_loop """owa""" +883 8 negative_sampler """basic""" +883 8 evaluator """rankbased""" +883 9 dataset """wn18rr""" +883 9 model """transh""" +883 9 loss """nssa""" +883 9 regularizer """transh""" +883 9 optimizer """adam""" +883 9 training_loop """owa""" +883 9 negative_sampler """basic""" +883 9 evaluator """rankbased""" +883 10 dataset """wn18rr""" +883 10 model """transh""" +883 10 loss """nssa""" +883 10 regularizer """transh""" +883 10 optimizer """adam""" +883 10 training_loop """owa""" +883 10 negative_sampler """basic""" +883 10 evaluator """rankbased""" +883 11 dataset """wn18rr""" +883 11 model """transh""" +883 11 loss """nssa""" +883 11 regularizer """transh""" +883 11 optimizer """adam""" +883 11 training_loop """owa""" +883 11 negative_sampler """basic""" +883 11 evaluator """rankbased""" +883 12 dataset """wn18rr""" +883 12 model """transh""" +883 12 loss """nssa""" +883 12 regularizer """transh""" +883 12 optimizer """adam""" +883 12 training_loop """owa""" +883 12 negative_sampler """basic""" +883 12 evaluator """rankbased""" +883 13 dataset """wn18rr""" +883 13 model """transh""" +883 13 loss """nssa""" +883 13 regularizer """transh""" +883 13 optimizer """adam""" +883 13 training_loop """owa""" +883 13 negative_sampler """basic""" +883 13 evaluator """rankbased""" +883 14 dataset """wn18rr""" +883 14 model """transh""" +883 14 loss """nssa""" +883 14 regularizer """transh""" +883 14 optimizer """adam""" +883 14 training_loop """owa""" +883 14 negative_sampler """basic""" +883 14 evaluator """rankbased""" +883 15 dataset """wn18rr""" +883 15 model """transh""" +883 15 loss """nssa""" +883 15 regularizer """transh""" +883 15 optimizer """adam""" +883 15 training_loop """owa""" +883 15 negative_sampler """basic""" +883 15 evaluator """rankbased""" +883 16 dataset """wn18rr""" +883 16 model """transh""" +883 16 loss """nssa""" +883 16 regularizer """transh""" +883 16 optimizer """adam""" +883 16 training_loop """owa""" +883 16 negative_sampler """basic""" +883 16 evaluator """rankbased""" +883 17 dataset """wn18rr""" +883 17 model """transh""" +883 17 loss """nssa""" +883 17 regularizer """transh""" +883 17 optimizer """adam""" +883 17 training_loop """owa""" +883 17 negative_sampler """basic""" +883 17 evaluator """rankbased""" +883 18 dataset """wn18rr""" +883 18 model """transh""" +883 18 loss """nssa""" +883 18 regularizer """transh""" +883 18 optimizer """adam""" +883 18 training_loop """owa""" +883 18 negative_sampler """basic""" +883 18 evaluator """rankbased""" +883 19 dataset """wn18rr""" +883 19 model """transh""" +883 19 loss """nssa""" +883 19 regularizer """transh""" +883 19 optimizer """adam""" +883 19 training_loop """owa""" +883 19 negative_sampler """basic""" +883 19 evaluator """rankbased""" +883 20 dataset """wn18rr""" +883 20 model """transh""" +883 20 loss """nssa""" +883 20 regularizer """transh""" +883 20 optimizer """adam""" +883 20 training_loop """owa""" +883 20 negative_sampler """basic""" +883 20 evaluator """rankbased""" +883 21 dataset """wn18rr""" +883 21 model """transh""" +883 21 loss """nssa""" +883 21 regularizer """transh""" +883 21 optimizer """adam""" +883 21 training_loop """owa""" +883 21 negative_sampler """basic""" +883 21 evaluator """rankbased""" +883 22 dataset """wn18rr""" +883 22 model """transh""" +883 22 loss """nssa""" +883 22 regularizer """transh""" +883 22 optimizer """adam""" +883 22 training_loop """owa""" +883 22 negative_sampler """basic""" +883 22 evaluator """rankbased""" +883 23 dataset """wn18rr""" +883 23 model """transh""" +883 23 loss """nssa""" +883 23 regularizer """transh""" +883 23 optimizer """adam""" +883 23 training_loop """owa""" +883 23 negative_sampler """basic""" +883 23 evaluator """rankbased""" +883 24 dataset """wn18rr""" +883 24 model """transh""" +883 24 loss """nssa""" +883 24 regularizer """transh""" +883 24 optimizer """adam""" +883 24 training_loop """owa""" +883 24 negative_sampler """basic""" +883 24 evaluator """rankbased""" +883 25 dataset """wn18rr""" +883 25 model """transh""" +883 25 loss """nssa""" +883 25 regularizer """transh""" +883 25 optimizer """adam""" +883 25 training_loop """owa""" +883 25 negative_sampler """basic""" +883 25 evaluator """rankbased""" +883 26 dataset """wn18rr""" +883 26 model """transh""" +883 26 loss """nssa""" +883 26 regularizer """transh""" +883 26 optimizer """adam""" +883 26 training_loop """owa""" +883 26 negative_sampler """basic""" +883 26 evaluator """rankbased""" +883 27 dataset """wn18rr""" +883 27 model """transh""" +883 27 loss """nssa""" +883 27 regularizer """transh""" +883 27 optimizer """adam""" +883 27 training_loop """owa""" +883 27 negative_sampler """basic""" +883 27 evaluator """rankbased""" +883 28 dataset """wn18rr""" +883 28 model """transh""" +883 28 loss """nssa""" +883 28 regularizer """transh""" +883 28 optimizer """adam""" +883 28 training_loop """owa""" +883 28 negative_sampler """basic""" +883 28 evaluator """rankbased""" +883 29 dataset """wn18rr""" +883 29 model """transh""" +883 29 loss """nssa""" +883 29 regularizer """transh""" +883 29 optimizer """adam""" +883 29 training_loop """owa""" +883 29 negative_sampler """basic""" +883 29 evaluator """rankbased""" +883 30 dataset """wn18rr""" +883 30 model """transh""" +883 30 loss """nssa""" +883 30 regularizer """transh""" +883 30 optimizer """adam""" +883 30 training_loop """owa""" +883 30 negative_sampler """basic""" +883 30 evaluator """rankbased""" +883 31 dataset """wn18rr""" +883 31 model """transh""" +883 31 loss """nssa""" +883 31 regularizer """transh""" +883 31 optimizer """adam""" +883 31 training_loop """owa""" +883 31 negative_sampler """basic""" +883 31 evaluator """rankbased""" +883 32 dataset """wn18rr""" +883 32 model """transh""" +883 32 loss """nssa""" +883 32 regularizer """transh""" +883 32 optimizer """adam""" +883 32 training_loop """owa""" +883 32 negative_sampler """basic""" +883 32 evaluator """rankbased""" +883 33 dataset """wn18rr""" +883 33 model """transh""" +883 33 loss """nssa""" +883 33 regularizer """transh""" +883 33 optimizer """adam""" +883 33 training_loop """owa""" +883 33 negative_sampler """basic""" +883 33 evaluator """rankbased""" +883 34 dataset """wn18rr""" +883 34 model """transh""" +883 34 loss """nssa""" +883 34 regularizer """transh""" +883 34 optimizer """adam""" +883 34 training_loop """owa""" +883 34 negative_sampler """basic""" +883 34 evaluator """rankbased""" +883 35 dataset """wn18rr""" +883 35 model """transh""" +883 35 loss """nssa""" +883 35 regularizer """transh""" +883 35 optimizer """adam""" +883 35 training_loop """owa""" +883 35 negative_sampler """basic""" +883 35 evaluator """rankbased""" +883 36 dataset """wn18rr""" +883 36 model """transh""" +883 36 loss """nssa""" +883 36 regularizer """transh""" +883 36 optimizer """adam""" +883 36 training_loop """owa""" +883 36 negative_sampler """basic""" +883 36 evaluator """rankbased""" +883 37 dataset """wn18rr""" +883 37 model """transh""" +883 37 loss """nssa""" +883 37 regularizer """transh""" +883 37 optimizer """adam""" +883 37 training_loop """owa""" +883 37 negative_sampler """basic""" +883 37 evaluator """rankbased""" +883 38 dataset """wn18rr""" +883 38 model """transh""" +883 38 loss """nssa""" +883 38 regularizer """transh""" +883 38 optimizer """adam""" +883 38 training_loop """owa""" +883 38 negative_sampler """basic""" +883 38 evaluator """rankbased""" +883 39 dataset """wn18rr""" +883 39 model """transh""" +883 39 loss """nssa""" +883 39 regularizer """transh""" +883 39 optimizer """adam""" +883 39 training_loop """owa""" +883 39 negative_sampler """basic""" +883 39 evaluator """rankbased""" +883 40 dataset """wn18rr""" +883 40 model """transh""" +883 40 loss """nssa""" +883 40 regularizer """transh""" +883 40 optimizer """adam""" +883 40 training_loop """owa""" +883 40 negative_sampler """basic""" +883 40 evaluator """rankbased""" +883 41 dataset """wn18rr""" +883 41 model """transh""" +883 41 loss """nssa""" +883 41 regularizer """transh""" +883 41 optimizer """adam""" +883 41 training_loop """owa""" +883 41 negative_sampler """basic""" +883 41 evaluator """rankbased""" +883 42 dataset """wn18rr""" +883 42 model """transh""" +883 42 loss """nssa""" +883 42 regularizer """transh""" +883 42 optimizer """adam""" +883 42 training_loop """owa""" +883 42 negative_sampler """basic""" +883 42 evaluator """rankbased""" +883 43 dataset """wn18rr""" +883 43 model """transh""" +883 43 loss """nssa""" +883 43 regularizer """transh""" +883 43 optimizer """adam""" +883 43 training_loop """owa""" +883 43 negative_sampler """basic""" +883 43 evaluator """rankbased""" +883 44 dataset """wn18rr""" +883 44 model """transh""" +883 44 loss """nssa""" +883 44 regularizer """transh""" +883 44 optimizer """adam""" +883 44 training_loop """owa""" +883 44 negative_sampler """basic""" +883 44 evaluator """rankbased""" +883 45 dataset """wn18rr""" +883 45 model """transh""" +883 45 loss """nssa""" +883 45 regularizer """transh""" +883 45 optimizer """adam""" +883 45 training_loop """owa""" +883 45 negative_sampler """basic""" +883 45 evaluator """rankbased""" +883 46 dataset """wn18rr""" +883 46 model """transh""" +883 46 loss """nssa""" +883 46 regularizer """transh""" +883 46 optimizer """adam""" +883 46 training_loop """owa""" +883 46 negative_sampler """basic""" +883 46 evaluator """rankbased""" +883 47 dataset """wn18rr""" +883 47 model """transh""" +883 47 loss """nssa""" +883 47 regularizer """transh""" +883 47 optimizer """adam""" +883 47 training_loop """owa""" +883 47 negative_sampler """basic""" +883 47 evaluator """rankbased""" +883 48 dataset """wn18rr""" +883 48 model """transh""" +883 48 loss """nssa""" +883 48 regularizer """transh""" +883 48 optimizer """adam""" +883 48 training_loop """owa""" +883 48 negative_sampler """basic""" +883 48 evaluator """rankbased""" +883 49 dataset """wn18rr""" +883 49 model """transh""" +883 49 loss """nssa""" +883 49 regularizer """transh""" +883 49 optimizer """adam""" +883 49 training_loop """owa""" +883 49 negative_sampler """basic""" +883 49 evaluator """rankbased""" +883 50 dataset """wn18rr""" +883 50 model """transh""" +883 50 loss """nssa""" +883 50 regularizer """transh""" +883 50 optimizer """adam""" +883 50 training_loop """owa""" +883 50 negative_sampler """basic""" +883 50 evaluator """rankbased""" +883 51 dataset """wn18rr""" +883 51 model """transh""" +883 51 loss """nssa""" +883 51 regularizer """transh""" +883 51 optimizer """adam""" +883 51 training_loop """owa""" +883 51 negative_sampler """basic""" +883 51 evaluator """rankbased""" +883 52 dataset """wn18rr""" +883 52 model """transh""" +883 52 loss """nssa""" +883 52 regularizer """transh""" +883 52 optimizer """adam""" +883 52 training_loop """owa""" +883 52 negative_sampler """basic""" +883 52 evaluator """rankbased""" +883 53 dataset """wn18rr""" +883 53 model """transh""" +883 53 loss """nssa""" +883 53 regularizer """transh""" +883 53 optimizer """adam""" +883 53 training_loop """owa""" +883 53 negative_sampler """basic""" +883 53 evaluator """rankbased""" +883 54 dataset """wn18rr""" +883 54 model """transh""" +883 54 loss """nssa""" +883 54 regularizer """transh""" +883 54 optimizer """adam""" +883 54 training_loop """owa""" +883 54 negative_sampler """basic""" +883 54 evaluator """rankbased""" +883 55 dataset """wn18rr""" +883 55 model """transh""" +883 55 loss """nssa""" +883 55 regularizer """transh""" +883 55 optimizer """adam""" +883 55 training_loop """owa""" +883 55 negative_sampler """basic""" +883 55 evaluator """rankbased""" +883 56 dataset """wn18rr""" +883 56 model """transh""" +883 56 loss """nssa""" +883 56 regularizer """transh""" +883 56 optimizer """adam""" +883 56 training_loop """owa""" +883 56 negative_sampler """basic""" +883 56 evaluator """rankbased""" +883 57 dataset """wn18rr""" +883 57 model """transh""" +883 57 loss """nssa""" +883 57 regularizer """transh""" +883 57 optimizer """adam""" +883 57 training_loop """owa""" +883 57 negative_sampler """basic""" +883 57 evaluator """rankbased""" +883 58 dataset """wn18rr""" +883 58 model """transh""" +883 58 loss """nssa""" +883 58 regularizer """transh""" +883 58 optimizer """adam""" +883 58 training_loop """owa""" +883 58 negative_sampler """basic""" +883 58 evaluator """rankbased""" +883 59 dataset """wn18rr""" +883 59 model """transh""" +883 59 loss """nssa""" +883 59 regularizer """transh""" +883 59 optimizer """adam""" +883 59 training_loop """owa""" +883 59 negative_sampler """basic""" +883 59 evaluator """rankbased""" +883 60 dataset """wn18rr""" +883 60 model """transh""" +883 60 loss """nssa""" +883 60 regularizer """transh""" +883 60 optimizer """adam""" +883 60 training_loop """owa""" +883 60 negative_sampler """basic""" +883 60 evaluator """rankbased""" +883 61 dataset """wn18rr""" +883 61 model """transh""" +883 61 loss """nssa""" +883 61 regularizer """transh""" +883 61 optimizer """adam""" +883 61 training_loop """owa""" +883 61 negative_sampler """basic""" +883 61 evaluator """rankbased""" +883 62 dataset """wn18rr""" +883 62 model """transh""" +883 62 loss """nssa""" +883 62 regularizer """transh""" +883 62 optimizer """adam""" +883 62 training_loop """owa""" +883 62 negative_sampler """basic""" +883 62 evaluator """rankbased""" +883 63 dataset """wn18rr""" +883 63 model """transh""" +883 63 loss """nssa""" +883 63 regularizer """transh""" +883 63 optimizer """adam""" +883 63 training_loop """owa""" +883 63 negative_sampler """basic""" +883 63 evaluator """rankbased""" +883 64 dataset """wn18rr""" +883 64 model """transh""" +883 64 loss """nssa""" +883 64 regularizer """transh""" +883 64 optimizer """adam""" +883 64 training_loop """owa""" +883 64 negative_sampler """basic""" +883 64 evaluator """rankbased""" +883 65 dataset """wn18rr""" +883 65 model """transh""" +883 65 loss """nssa""" +883 65 regularizer """transh""" +883 65 optimizer """adam""" +883 65 training_loop """owa""" +883 65 negative_sampler """basic""" +883 65 evaluator """rankbased""" +883 66 dataset """wn18rr""" +883 66 model """transh""" +883 66 loss """nssa""" +883 66 regularizer """transh""" +883 66 optimizer """adam""" +883 66 training_loop """owa""" +883 66 negative_sampler """basic""" +883 66 evaluator """rankbased""" +884 1 model.embedding_dim 1.0 +884 1 model.scoring_fct_norm 2.0 +884 1 loss.margin 24.04791210434506 +884 1 loss.adversarial_temperature 0.4001231068949612 +884 1 regularizer.weight 0.2635887233159223 +884 1 optimizer.lr 0.004647605099999779 +884 1 negative_sampler.num_negs_per_pos 36.0 +884 1 training.batch_size 0.0 +884 2 model.embedding_dim 1.0 +884 2 model.scoring_fct_norm 2.0 +884 2 loss.margin 8.5104033995426 +884 2 loss.adversarial_temperature 0.6818176980977031 +884 2 regularizer.weight 0.02307327518517174 +884 2 optimizer.lr 0.010273030076217366 +884 2 negative_sampler.num_negs_per_pos 99.0 +884 2 training.batch_size 2.0 +884 3 model.embedding_dim 0.0 +884 3 model.scoring_fct_norm 2.0 +884 3 loss.margin 23.514721103251077 +884 3 loss.adversarial_temperature 0.778814770385485 +884 3 regularizer.weight 0.04505816116495883 +884 3 optimizer.lr 0.03186224005578611 +884 3 negative_sampler.num_negs_per_pos 56.0 +884 3 training.batch_size 0.0 +884 4 model.embedding_dim 2.0 +884 4 model.scoring_fct_norm 2.0 +884 4 loss.margin 1.1575000996750673 +884 4 loss.adversarial_temperature 0.6675487087949015 +884 4 regularizer.weight 0.026845679845586497 +884 4 optimizer.lr 0.023664164504805203 +884 4 negative_sampler.num_negs_per_pos 61.0 +884 4 training.batch_size 1.0 +884 5 model.embedding_dim 0.0 +884 5 model.scoring_fct_norm 2.0 +884 5 loss.margin 27.146253147449006 +884 5 loss.adversarial_temperature 0.5471613961411324 +884 5 regularizer.weight 0.028631511718619395 +884 5 optimizer.lr 0.0018245042092224814 +884 5 negative_sampler.num_negs_per_pos 1.0 +884 5 training.batch_size 2.0 +884 6 model.embedding_dim 0.0 +884 6 model.scoring_fct_norm 1.0 +884 6 loss.margin 21.491296165588672 +884 6 loss.adversarial_temperature 0.9793019823740536 +884 6 regularizer.weight 0.22045926748324896 +884 6 optimizer.lr 0.06039318125194543 +884 6 negative_sampler.num_negs_per_pos 88.0 +884 6 training.batch_size 0.0 +884 7 model.embedding_dim 1.0 +884 7 model.scoring_fct_norm 1.0 +884 7 loss.margin 4.0132315793818005 +884 7 loss.adversarial_temperature 0.1151018283738774 +884 7 regularizer.weight 0.2806491495819814 +884 7 optimizer.lr 0.0012744253747952992 +884 7 negative_sampler.num_negs_per_pos 83.0 +884 7 training.batch_size 1.0 +884 8 model.embedding_dim 2.0 +884 8 model.scoring_fct_norm 2.0 +884 8 loss.margin 13.15401352124969 +884 8 loss.adversarial_temperature 0.8834022957199309 +884 8 regularizer.weight 0.011188049298376896 +884 8 optimizer.lr 0.008005951919425552 +884 8 negative_sampler.num_negs_per_pos 69.0 +884 8 training.batch_size 2.0 +884 9 model.embedding_dim 1.0 +884 9 model.scoring_fct_norm 1.0 +884 9 loss.margin 19.184607031401132 +884 9 loss.adversarial_temperature 0.7496525431040485 +884 9 regularizer.weight 0.030313311407660436 +884 9 optimizer.lr 0.009877247781943226 +884 9 negative_sampler.num_negs_per_pos 30.0 +884 9 training.batch_size 0.0 +884 10 model.embedding_dim 0.0 +884 10 model.scoring_fct_norm 1.0 +884 10 loss.margin 25.27196113061092 +884 10 loss.adversarial_temperature 0.6819884203537842 +884 10 regularizer.weight 0.22992752214733486 +884 10 optimizer.lr 0.0011171536990504695 +884 10 negative_sampler.num_negs_per_pos 58.0 +884 10 training.batch_size 0.0 +884 11 model.embedding_dim 2.0 +884 11 model.scoring_fct_norm 1.0 +884 11 loss.margin 14.669953419540597 +884 11 loss.adversarial_temperature 0.15157829575025847 +884 11 regularizer.weight 0.18407148964298942 +884 11 optimizer.lr 0.003712508790421765 +884 11 negative_sampler.num_negs_per_pos 70.0 +884 11 training.batch_size 2.0 +884 12 model.embedding_dim 2.0 +884 12 model.scoring_fct_norm 2.0 +884 12 loss.margin 9.08276253510757 +884 12 loss.adversarial_temperature 0.8418910176326733 +884 12 regularizer.weight 0.020144315603218572 +884 12 optimizer.lr 0.023855174160173187 +884 12 negative_sampler.num_negs_per_pos 12.0 +884 12 training.batch_size 1.0 +884 13 model.embedding_dim 2.0 +884 13 model.scoring_fct_norm 1.0 +884 13 loss.margin 10.93379969950826 +884 13 loss.adversarial_temperature 0.4992952087462704 +884 13 regularizer.weight 0.2455336977725269 +884 13 optimizer.lr 0.04084810616349172 +884 13 negative_sampler.num_negs_per_pos 57.0 +884 13 training.batch_size 2.0 +884 14 model.embedding_dim 0.0 +884 14 model.scoring_fct_norm 2.0 +884 14 loss.margin 28.889613922865003 +884 14 loss.adversarial_temperature 0.9031117010607626 +884 14 regularizer.weight 0.06889211552048874 +884 14 optimizer.lr 0.01040517877448912 +884 14 negative_sampler.num_negs_per_pos 57.0 +884 14 training.batch_size 0.0 +884 15 model.embedding_dim 1.0 +884 15 model.scoring_fct_norm 1.0 +884 15 loss.margin 29.449386706446766 +884 15 loss.adversarial_temperature 0.38340026625926116 +884 15 regularizer.weight 0.12022971823216008 +884 15 optimizer.lr 0.006646472512643829 +884 15 negative_sampler.num_negs_per_pos 34.0 +884 15 training.batch_size 0.0 +884 16 model.embedding_dim 1.0 +884 16 model.scoring_fct_norm 1.0 +884 16 loss.margin 21.957520540088296 +884 16 loss.adversarial_temperature 0.4380305964187161 +884 16 regularizer.weight 0.08252912413509339 +884 16 optimizer.lr 0.0023310505363193404 +884 16 negative_sampler.num_negs_per_pos 30.0 +884 16 training.batch_size 2.0 +884 17 model.embedding_dim 0.0 +884 17 model.scoring_fct_norm 1.0 +884 17 loss.margin 2.8959477458644485 +884 17 loss.adversarial_temperature 0.4596322007664574 +884 17 regularizer.weight 0.07414641987072348 +884 17 optimizer.lr 0.002084340268735361 +884 17 negative_sampler.num_negs_per_pos 72.0 +884 17 training.batch_size 2.0 +884 18 model.embedding_dim 1.0 +884 18 model.scoring_fct_norm 2.0 +884 18 loss.margin 16.884421775767446 +884 18 loss.adversarial_temperature 0.6108104825167935 +884 18 regularizer.weight 0.05330937021474769 +884 18 optimizer.lr 0.005621272819693336 +884 18 negative_sampler.num_negs_per_pos 41.0 +884 18 training.batch_size 0.0 +884 19 model.embedding_dim 2.0 +884 19 model.scoring_fct_norm 2.0 +884 19 loss.margin 18.58553727810464 +884 19 loss.adversarial_temperature 0.1631459830456539 +884 19 regularizer.weight 0.010504053078008162 +884 19 optimizer.lr 0.0065603137146814285 +884 19 negative_sampler.num_negs_per_pos 9.0 +884 19 training.batch_size 1.0 +884 20 model.embedding_dim 1.0 +884 20 model.scoring_fct_norm 1.0 +884 20 loss.margin 27.93272456753427 +884 20 loss.adversarial_temperature 0.8389207018714466 +884 20 regularizer.weight 0.020215771397392487 +884 20 optimizer.lr 0.0011149796980381196 +884 20 negative_sampler.num_negs_per_pos 91.0 +884 20 training.batch_size 0.0 +884 21 model.embedding_dim 1.0 +884 21 model.scoring_fct_norm 1.0 +884 21 loss.margin 2.9798456523400545 +884 21 loss.adversarial_temperature 0.12926351562713187 +884 21 regularizer.weight 0.1350312928384126 +884 21 optimizer.lr 0.015753634243172384 +884 21 negative_sampler.num_negs_per_pos 49.0 +884 21 training.batch_size 1.0 +884 22 model.embedding_dim 1.0 +884 22 model.scoring_fct_norm 1.0 +884 22 loss.margin 3.054966884131629 +884 22 loss.adversarial_temperature 0.28375504152159226 +884 22 regularizer.weight 0.04849749835311239 +884 22 optimizer.lr 0.0013232940837293017 +884 22 negative_sampler.num_negs_per_pos 53.0 +884 22 training.batch_size 2.0 +884 23 model.embedding_dim 1.0 +884 23 model.scoring_fct_norm 2.0 +884 23 loss.margin 27.09649580729978 +884 23 loss.adversarial_temperature 0.2485998708604759 +884 23 regularizer.weight 0.04410385870778051 +884 23 optimizer.lr 0.005932004010140301 +884 23 negative_sampler.num_negs_per_pos 62.0 +884 23 training.batch_size 1.0 +884 24 model.embedding_dim 0.0 +884 24 model.scoring_fct_norm 2.0 +884 24 loss.margin 1.9407577385811272 +884 24 loss.adversarial_temperature 0.8865752753360904 +884 24 regularizer.weight 0.012186948858799897 +884 24 optimizer.lr 0.00852305716749741 +884 24 negative_sampler.num_negs_per_pos 20.0 +884 24 training.batch_size 0.0 +884 25 model.embedding_dim 0.0 +884 25 model.scoring_fct_norm 2.0 +884 25 loss.margin 17.281425140663988 +884 25 loss.adversarial_temperature 0.5654376527964668 +884 25 regularizer.weight 0.021915595679722934 +884 25 optimizer.lr 0.006660168621781543 +884 25 negative_sampler.num_negs_per_pos 49.0 +884 25 training.batch_size 2.0 +884 26 model.embedding_dim 1.0 +884 26 model.scoring_fct_norm 1.0 +884 26 loss.margin 4.850744053762693 +884 26 loss.adversarial_temperature 0.8581813756537359 +884 26 regularizer.weight 0.04315541250413146 +884 26 optimizer.lr 0.07521728764842067 +884 26 negative_sampler.num_negs_per_pos 24.0 +884 26 training.batch_size 1.0 +884 27 model.embedding_dim 0.0 +884 27 model.scoring_fct_norm 1.0 +884 27 loss.margin 8.758342204455694 +884 27 loss.adversarial_temperature 0.5745347464047174 +884 27 regularizer.weight 0.05848235045247314 +884 27 optimizer.lr 0.015218770107406538 +884 27 negative_sampler.num_negs_per_pos 81.0 +884 27 training.batch_size 1.0 +884 28 model.embedding_dim 2.0 +884 28 model.scoring_fct_norm 1.0 +884 28 loss.margin 24.056547612410423 +884 28 loss.adversarial_temperature 0.5696939660021884 +884 28 regularizer.weight 0.018375979319823592 +884 28 optimizer.lr 0.0023021708670014347 +884 28 negative_sampler.num_negs_per_pos 92.0 +884 28 training.batch_size 1.0 +884 29 model.embedding_dim 1.0 +884 29 model.scoring_fct_norm 1.0 +884 29 loss.margin 16.641749597829374 +884 29 loss.adversarial_temperature 0.884843889453264 +884 29 regularizer.weight 0.011939550038002166 +884 29 optimizer.lr 0.011232036672818945 +884 29 negative_sampler.num_negs_per_pos 52.0 +884 29 training.batch_size 1.0 +884 30 model.embedding_dim 1.0 +884 30 model.scoring_fct_norm 2.0 +884 30 loss.margin 9.526160955410528 +884 30 loss.adversarial_temperature 0.8719320824749186 +884 30 regularizer.weight 0.015022400460217376 +884 30 optimizer.lr 0.01390096725843263 +884 30 negative_sampler.num_negs_per_pos 27.0 +884 30 training.batch_size 0.0 +884 31 model.embedding_dim 1.0 +884 31 model.scoring_fct_norm 2.0 +884 31 loss.margin 9.705415540031336 +884 31 loss.adversarial_temperature 0.39870094770673203 +884 31 regularizer.weight 0.16071727068143013 +884 31 optimizer.lr 0.0020205467330126747 +884 31 negative_sampler.num_negs_per_pos 66.0 +884 31 training.batch_size 0.0 +884 32 model.embedding_dim 0.0 +884 32 model.scoring_fct_norm 1.0 +884 32 loss.margin 20.657229753712507 +884 32 loss.adversarial_temperature 0.6655910525102677 +884 32 regularizer.weight 0.2953564282947453 +884 32 optimizer.lr 0.0011105320606287264 +884 32 negative_sampler.num_negs_per_pos 82.0 +884 32 training.batch_size 0.0 +884 33 model.embedding_dim 1.0 +884 33 model.scoring_fct_norm 2.0 +884 33 loss.margin 3.4155608892927054 +884 33 loss.adversarial_temperature 0.9796966435490365 +884 33 regularizer.weight 0.05665969705878825 +884 33 optimizer.lr 0.06097785257951043 +884 33 negative_sampler.num_negs_per_pos 98.0 +884 33 training.batch_size 2.0 +884 34 model.embedding_dim 2.0 +884 34 model.scoring_fct_norm 2.0 +884 34 loss.margin 18.901272707485127 +884 34 loss.adversarial_temperature 0.15439713075828906 +884 34 regularizer.weight 0.021394881706795366 +884 34 optimizer.lr 0.011025377525720357 +884 34 negative_sampler.num_negs_per_pos 98.0 +884 34 training.batch_size 2.0 +884 35 model.embedding_dim 1.0 +884 35 model.scoring_fct_norm 1.0 +884 35 loss.margin 9.338740472166094 +884 35 loss.adversarial_temperature 0.4727938468610809 +884 35 regularizer.weight 0.020335851083919092 +884 35 optimizer.lr 0.002180102828549131 +884 35 negative_sampler.num_negs_per_pos 90.0 +884 35 training.batch_size 2.0 +884 36 model.embedding_dim 2.0 +884 36 model.scoring_fct_norm 2.0 +884 36 loss.margin 2.161228258789614 +884 36 loss.adversarial_temperature 0.6504305653046745 +884 36 regularizer.weight 0.013455584353719215 +884 36 optimizer.lr 0.0015924776706136648 +884 36 negative_sampler.num_negs_per_pos 65.0 +884 36 training.batch_size 1.0 +884 37 model.embedding_dim 2.0 +884 37 model.scoring_fct_norm 1.0 +884 37 loss.margin 5.594588746493119 +884 37 loss.adversarial_temperature 0.6770610990623368 +884 37 regularizer.weight 0.022078635708550502 +884 37 optimizer.lr 0.006684372065437921 +884 37 negative_sampler.num_negs_per_pos 85.0 +884 37 training.batch_size 2.0 +884 38 model.embedding_dim 1.0 +884 38 model.scoring_fct_norm 1.0 +884 38 loss.margin 20.034380117109322 +884 38 loss.adversarial_temperature 0.7726764912028214 +884 38 regularizer.weight 0.02287780646197494 +884 38 optimizer.lr 0.01924522313743462 +884 38 negative_sampler.num_negs_per_pos 39.0 +884 38 training.batch_size 0.0 +884 39 model.embedding_dim 1.0 +884 39 model.scoring_fct_norm 1.0 +884 39 loss.margin 22.97621624791091 +884 39 loss.adversarial_temperature 0.16085668718819146 +884 39 regularizer.weight 0.10683217948117624 +884 39 optimizer.lr 0.0038728717863308463 +884 39 negative_sampler.num_negs_per_pos 99.0 +884 39 training.batch_size 0.0 +884 40 model.embedding_dim 0.0 +884 40 model.scoring_fct_norm 2.0 +884 40 loss.margin 20.97657456660576 +884 40 loss.adversarial_temperature 0.4355900541986207 +884 40 regularizer.weight 0.2233265521455233 +884 40 optimizer.lr 0.003650878956764909 +884 40 negative_sampler.num_negs_per_pos 38.0 +884 40 training.batch_size 2.0 +884 41 model.embedding_dim 1.0 +884 41 model.scoring_fct_norm 2.0 +884 41 loss.margin 20.497324274534733 +884 41 loss.adversarial_temperature 0.9095649873693628 +884 41 regularizer.weight 0.03560305721359108 +884 41 optimizer.lr 0.08077862213895834 +884 41 negative_sampler.num_negs_per_pos 74.0 +884 41 training.batch_size 2.0 +884 42 model.embedding_dim 2.0 +884 42 model.scoring_fct_norm 1.0 +884 42 loss.margin 7.748744036995378 +884 42 loss.adversarial_temperature 0.24469689630245756 +884 42 regularizer.weight 0.057153583056036655 +884 42 optimizer.lr 0.010136977616907931 +884 42 negative_sampler.num_negs_per_pos 60.0 +884 42 training.batch_size 0.0 +884 43 model.embedding_dim 0.0 +884 43 model.scoring_fct_norm 1.0 +884 43 loss.margin 28.638096552004733 +884 43 loss.adversarial_temperature 0.33296106562469596 +884 43 regularizer.weight 0.02187845398134649 +884 43 optimizer.lr 0.009009888946986148 +884 43 negative_sampler.num_negs_per_pos 94.0 +884 43 training.batch_size 2.0 +884 44 model.embedding_dim 2.0 +884 44 model.scoring_fct_norm 1.0 +884 44 loss.margin 20.201454131761704 +884 44 loss.adversarial_temperature 0.4275895074537165 +884 44 regularizer.weight 0.14475549448274613 +884 44 optimizer.lr 0.0019584023498110457 +884 44 negative_sampler.num_negs_per_pos 77.0 +884 44 training.batch_size 1.0 +884 45 model.embedding_dim 1.0 +884 45 model.scoring_fct_norm 1.0 +884 45 loss.margin 21.598533816803048 +884 45 loss.adversarial_temperature 0.39819520971740086 +884 45 regularizer.weight 0.027864885805165635 +884 45 optimizer.lr 0.0015530067530243948 +884 45 negative_sampler.num_negs_per_pos 49.0 +884 45 training.batch_size 2.0 +884 46 model.embedding_dim 2.0 +884 46 model.scoring_fct_norm 2.0 +884 46 loss.margin 6.931973650282789 +884 46 loss.adversarial_temperature 0.18753167165134643 +884 46 regularizer.weight 0.011894253183498729 +884 46 optimizer.lr 0.001832923178056454 +884 46 negative_sampler.num_negs_per_pos 79.0 +884 46 training.batch_size 1.0 +884 47 model.embedding_dim 1.0 +884 47 model.scoring_fct_norm 1.0 +884 47 loss.margin 18.11650579681898 +884 47 loss.adversarial_temperature 0.5521598467327206 +884 47 regularizer.weight 0.17994669273227332 +884 47 optimizer.lr 0.030005121358660777 +884 47 negative_sampler.num_negs_per_pos 38.0 +884 47 training.batch_size 1.0 +884 48 model.embedding_dim 2.0 +884 48 model.scoring_fct_norm 1.0 +884 48 loss.margin 7.528816481019474 +884 48 loss.adversarial_temperature 0.12217092394325377 +884 48 regularizer.weight 0.01721608804430708 +884 48 optimizer.lr 0.0019483746751800056 +884 48 negative_sampler.num_negs_per_pos 51.0 +884 48 training.batch_size 0.0 +884 49 model.embedding_dim 0.0 +884 49 model.scoring_fct_norm 2.0 +884 49 loss.margin 26.179941698134417 +884 49 loss.adversarial_temperature 0.9526205225339536 +884 49 regularizer.weight 0.014081731540177663 +884 49 optimizer.lr 0.03903589216754356 +884 49 negative_sampler.num_negs_per_pos 99.0 +884 49 training.batch_size 1.0 +884 50 model.embedding_dim 1.0 +884 50 model.scoring_fct_norm 1.0 +884 50 loss.margin 16.174935108921396 +884 50 loss.adversarial_temperature 0.48665277294870024 +884 50 regularizer.weight 0.030528985577755016 +884 50 optimizer.lr 0.09841396819210364 +884 50 negative_sampler.num_negs_per_pos 91.0 +884 50 training.batch_size 1.0 +884 51 model.embedding_dim 0.0 +884 51 model.scoring_fct_norm 1.0 +884 51 loss.margin 2.241022085092122 +884 51 loss.adversarial_temperature 0.21234939477578707 +884 51 regularizer.weight 0.023405988671517827 +884 51 optimizer.lr 0.0036922903535770014 +884 51 negative_sampler.num_negs_per_pos 0.0 +884 51 training.batch_size 2.0 +884 52 model.embedding_dim 0.0 +884 52 model.scoring_fct_norm 1.0 +884 52 loss.margin 26.13459107239111 +884 52 loss.adversarial_temperature 0.6597478942208875 +884 52 regularizer.weight 0.05652054267499222 +884 52 optimizer.lr 0.00602470692081693 +884 52 negative_sampler.num_negs_per_pos 66.0 +884 52 training.batch_size 1.0 +884 53 model.embedding_dim 1.0 +884 53 model.scoring_fct_norm 2.0 +884 53 loss.margin 25.200886752648564 +884 53 loss.adversarial_temperature 0.18311707008745529 +884 53 regularizer.weight 0.01440102851041408 +884 53 optimizer.lr 0.0373126064218792 +884 53 negative_sampler.num_negs_per_pos 29.0 +884 53 training.batch_size 1.0 +884 54 model.embedding_dim 1.0 +884 54 model.scoring_fct_norm 2.0 +884 54 loss.margin 1.4967685392518035 +884 54 loss.adversarial_temperature 0.7731336797688303 +884 54 regularizer.weight 0.023196391305393707 +884 54 optimizer.lr 0.016084067892927052 +884 54 negative_sampler.num_negs_per_pos 37.0 +884 54 training.batch_size 2.0 +884 55 model.embedding_dim 1.0 +884 55 model.scoring_fct_norm 1.0 +884 55 loss.margin 11.510097765718298 +884 55 loss.adversarial_temperature 0.7609675045929541 +884 55 regularizer.weight 0.01811874403128455 +884 55 optimizer.lr 0.007106129923903185 +884 55 negative_sampler.num_negs_per_pos 34.0 +884 55 training.batch_size 2.0 +884 56 model.embedding_dim 1.0 +884 56 model.scoring_fct_norm 1.0 +884 56 loss.margin 3.738307919700669 +884 56 loss.adversarial_temperature 0.5011226745331362 +884 56 regularizer.weight 0.021130396270111585 +884 56 optimizer.lr 0.0575168009188498 +884 56 negative_sampler.num_negs_per_pos 36.0 +884 56 training.batch_size 1.0 +884 57 model.embedding_dim 0.0 +884 57 model.scoring_fct_norm 1.0 +884 57 loss.margin 27.276238691168928 +884 57 loss.adversarial_temperature 0.5665720852944224 +884 57 regularizer.weight 0.014573674848337546 +884 57 optimizer.lr 0.004972519918440311 +884 57 negative_sampler.num_negs_per_pos 90.0 +884 57 training.batch_size 0.0 +884 58 model.embedding_dim 1.0 +884 58 model.scoring_fct_norm 2.0 +884 58 loss.margin 13.020156006320864 +884 58 loss.adversarial_temperature 0.9211112866773649 +884 58 regularizer.weight 0.01622089115547021 +884 58 optimizer.lr 0.07560025472089094 +884 58 negative_sampler.num_negs_per_pos 83.0 +884 58 training.batch_size 0.0 +884 59 model.embedding_dim 1.0 +884 59 model.scoring_fct_norm 1.0 +884 59 loss.margin 7.6512884011956155 +884 59 loss.adversarial_temperature 0.5209154301888281 +884 59 regularizer.weight 0.03291896030517661 +884 59 optimizer.lr 0.0017674250343882286 +884 59 negative_sampler.num_negs_per_pos 18.0 +884 59 training.batch_size 1.0 +884 60 model.embedding_dim 1.0 +884 60 model.scoring_fct_norm 2.0 +884 60 loss.margin 8.759569579057082 +884 60 loss.adversarial_temperature 0.9669959828601107 +884 60 regularizer.weight 0.04019398331821518 +884 60 optimizer.lr 0.007076999746117885 +884 60 negative_sampler.num_negs_per_pos 60.0 +884 60 training.batch_size 0.0 +884 61 model.embedding_dim 2.0 +884 61 model.scoring_fct_norm 1.0 +884 61 loss.margin 22.7894172083726 +884 61 loss.adversarial_temperature 0.35726026575200953 +884 61 regularizer.weight 0.027491908509842688 +884 61 optimizer.lr 0.05481368678024323 +884 61 negative_sampler.num_negs_per_pos 0.0 +884 61 training.batch_size 1.0 +884 62 model.embedding_dim 2.0 +884 62 model.scoring_fct_norm 1.0 +884 62 loss.margin 16.457975271157675 +884 62 loss.adversarial_temperature 0.2459982176301897 +884 62 regularizer.weight 0.06253928952710244 +884 62 optimizer.lr 0.008379535845152936 +884 62 negative_sampler.num_negs_per_pos 59.0 +884 62 training.batch_size 0.0 +884 63 model.embedding_dim 2.0 +884 63 model.scoring_fct_norm 2.0 +884 63 loss.margin 5.599638189403588 +884 63 loss.adversarial_temperature 0.4416895856291975 +884 63 regularizer.weight 0.03314504525525081 +884 63 optimizer.lr 0.05595129444849727 +884 63 negative_sampler.num_negs_per_pos 34.0 +884 63 training.batch_size 1.0 +884 64 model.embedding_dim 2.0 +884 64 model.scoring_fct_norm 2.0 +884 64 loss.margin 25.30591363964264 +884 64 loss.adversarial_temperature 0.24706668395166984 +884 64 regularizer.weight 0.11835338593751202 +884 64 optimizer.lr 0.0018203559925621033 +884 64 negative_sampler.num_negs_per_pos 14.0 +884 64 training.batch_size 2.0 +884 65 model.embedding_dim 1.0 +884 65 model.scoring_fct_norm 1.0 +884 65 loss.margin 23.738254205169717 +884 65 loss.adversarial_temperature 0.17738833388027192 +884 65 regularizer.weight 0.14677684596194912 +884 65 optimizer.lr 0.008115848742966668 +884 65 negative_sampler.num_negs_per_pos 96.0 +884 65 training.batch_size 1.0 +884 66 model.embedding_dim 2.0 +884 66 model.scoring_fct_norm 1.0 +884 66 loss.margin 1.581441211392383 +884 66 loss.adversarial_temperature 0.7313949087430416 +884 66 regularizer.weight 0.0358498929840893 +884 66 optimizer.lr 0.007265700944463421 +884 66 negative_sampler.num_negs_per_pos 6.0 +884 66 training.batch_size 1.0 +884 67 model.embedding_dim 0.0 +884 67 model.scoring_fct_norm 2.0 +884 67 loss.margin 18.042620518472365 +884 67 loss.adversarial_temperature 0.7483291128240933 +884 67 regularizer.weight 0.11634630265822486 +884 67 optimizer.lr 0.05723514399766953 +884 67 negative_sampler.num_negs_per_pos 52.0 +884 67 training.batch_size 2.0 +884 68 model.embedding_dim 1.0 +884 68 model.scoring_fct_norm 2.0 +884 68 loss.margin 17.545610430340616 +884 68 loss.adversarial_temperature 0.172046608399056 +884 68 regularizer.weight 0.10453054528741741 +884 68 optimizer.lr 0.0030118554540282823 +884 68 negative_sampler.num_negs_per_pos 18.0 +884 68 training.batch_size 0.0 +884 69 model.embedding_dim 2.0 +884 69 model.scoring_fct_norm 2.0 +884 69 loss.margin 11.700028967273404 +884 69 loss.adversarial_temperature 0.8321430778613951 +884 69 regularizer.weight 0.03436106081056227 +884 69 optimizer.lr 0.00641262786650754 +884 69 negative_sampler.num_negs_per_pos 20.0 +884 69 training.batch_size 0.0 +884 70 model.embedding_dim 0.0 +884 70 model.scoring_fct_norm 1.0 +884 70 loss.margin 8.220576233886533 +884 70 loss.adversarial_temperature 0.5347042754430267 +884 70 regularizer.weight 0.015087636032341664 +884 70 optimizer.lr 0.07932625650403023 +884 70 negative_sampler.num_negs_per_pos 26.0 +884 70 training.batch_size 2.0 +884 71 model.embedding_dim 1.0 +884 71 model.scoring_fct_norm 2.0 +884 71 loss.margin 13.909573490527478 +884 71 loss.adversarial_temperature 0.6652851922890648 +884 71 regularizer.weight 0.021334308904430405 +884 71 optimizer.lr 0.02477826867956278 +884 71 negative_sampler.num_negs_per_pos 2.0 +884 71 training.batch_size 0.0 +884 72 model.embedding_dim 1.0 +884 72 model.scoring_fct_norm 2.0 +884 72 loss.margin 13.51637433135009 +884 72 loss.adversarial_temperature 0.5382761737847256 +884 72 regularizer.weight 0.07957888002280669 +884 72 optimizer.lr 0.022074182028492626 +884 72 negative_sampler.num_negs_per_pos 57.0 +884 72 training.batch_size 2.0 +884 73 model.embedding_dim 2.0 +884 73 model.scoring_fct_norm 2.0 +884 73 loss.margin 1.6730958776348552 +884 73 loss.adversarial_temperature 0.894347258909111 +884 73 regularizer.weight 0.012002225200651218 +884 73 optimizer.lr 0.03262396020440589 +884 73 negative_sampler.num_negs_per_pos 21.0 +884 73 training.batch_size 1.0 +884 74 model.embedding_dim 0.0 +884 74 model.scoring_fct_norm 1.0 +884 74 loss.margin 14.866536258100691 +884 74 loss.adversarial_temperature 0.8226504826399379 +884 74 regularizer.weight 0.11758709616059247 +884 74 optimizer.lr 0.08900948094130434 +884 74 negative_sampler.num_negs_per_pos 8.0 +884 74 training.batch_size 1.0 +884 75 model.embedding_dim 2.0 +884 75 model.scoring_fct_norm 2.0 +884 75 loss.margin 11.506912035216267 +884 75 loss.adversarial_temperature 0.8173894682316579 +884 75 regularizer.weight 0.06685079580961995 +884 75 optimizer.lr 0.015915760126810945 +884 75 negative_sampler.num_negs_per_pos 29.0 +884 75 training.batch_size 0.0 +884 76 model.embedding_dim 0.0 +884 76 model.scoring_fct_norm 2.0 +884 76 loss.margin 17.826662757830267 +884 76 loss.adversarial_temperature 0.42138573895365683 +884 76 regularizer.weight 0.015647588817611364 +884 76 optimizer.lr 0.003509255798574614 +884 76 negative_sampler.num_negs_per_pos 46.0 +884 76 training.batch_size 2.0 +884 77 model.embedding_dim 1.0 +884 77 model.scoring_fct_norm 1.0 +884 77 loss.margin 8.451670330128627 +884 77 loss.adversarial_temperature 0.21230639270172147 +884 77 regularizer.weight 0.011134208809191007 +884 77 optimizer.lr 0.0034017672693315826 +884 77 negative_sampler.num_negs_per_pos 62.0 +884 77 training.batch_size 2.0 +884 78 model.embedding_dim 2.0 +884 78 model.scoring_fct_norm 1.0 +884 78 loss.margin 23.685421002767963 +884 78 loss.adversarial_temperature 0.11682732498125564 +884 78 regularizer.weight 0.020525786351573483 +884 78 optimizer.lr 0.047659587427142255 +884 78 negative_sampler.num_negs_per_pos 45.0 +884 78 training.batch_size 1.0 +884 79 model.embedding_dim 1.0 +884 79 model.scoring_fct_norm 2.0 +884 79 loss.margin 16.738715942203804 +884 79 loss.adversarial_temperature 0.47034278097415516 +884 79 regularizer.weight 0.25765160293213646 +884 79 optimizer.lr 0.030084025600263183 +884 79 negative_sampler.num_negs_per_pos 34.0 +884 79 training.batch_size 1.0 +884 80 model.embedding_dim 2.0 +884 80 model.scoring_fct_norm 1.0 +884 80 loss.margin 18.09746361986552 +884 80 loss.adversarial_temperature 0.16274403463062706 +884 80 regularizer.weight 0.18235455719568974 +884 80 optimizer.lr 0.041730119389154434 +884 80 negative_sampler.num_negs_per_pos 26.0 +884 80 training.batch_size 2.0 +884 81 model.embedding_dim 1.0 +884 81 model.scoring_fct_norm 1.0 +884 81 loss.margin 19.93781830217411 +884 81 loss.adversarial_temperature 0.8297617890013348 +884 81 regularizer.weight 0.26543746063550355 +884 81 optimizer.lr 0.009372017497436793 +884 81 negative_sampler.num_negs_per_pos 84.0 +884 81 training.batch_size 1.0 +884 82 model.embedding_dim 1.0 +884 82 model.scoring_fct_norm 2.0 +884 82 loss.margin 13.999652214732734 +884 82 loss.adversarial_temperature 0.6617178080948473 +884 82 regularizer.weight 0.05693216983056799 +884 82 optimizer.lr 0.09983832261931565 +884 82 negative_sampler.num_negs_per_pos 72.0 +884 82 training.batch_size 1.0 +884 83 model.embedding_dim 0.0 +884 83 model.scoring_fct_norm 2.0 +884 83 loss.margin 15.93274219533114 +884 83 loss.adversarial_temperature 0.38452225837679693 +884 83 regularizer.weight 0.021420474546145173 +884 83 optimizer.lr 0.032046859281261844 +884 83 negative_sampler.num_negs_per_pos 35.0 +884 83 training.batch_size 1.0 +884 84 model.embedding_dim 0.0 +884 84 model.scoring_fct_norm 1.0 +884 84 loss.margin 11.641824025731207 +884 84 loss.adversarial_temperature 0.8803067061107416 +884 84 regularizer.weight 0.20534771916854308 +884 84 optimizer.lr 0.0025385659792185686 +884 84 negative_sampler.num_negs_per_pos 93.0 +884 84 training.batch_size 0.0 +884 85 model.embedding_dim 0.0 +884 85 model.scoring_fct_norm 1.0 +884 85 loss.margin 2.9864566390172778 +884 85 loss.adversarial_temperature 0.773999784064065 +884 85 regularizer.weight 0.06884398989575005 +884 85 optimizer.lr 0.0014573464111164217 +884 85 negative_sampler.num_negs_per_pos 96.0 +884 85 training.batch_size 1.0 +884 86 model.embedding_dim 2.0 +884 86 model.scoring_fct_norm 1.0 +884 86 loss.margin 12.576472379370204 +884 86 loss.adversarial_temperature 0.1074848867923749 +884 86 regularizer.weight 0.04200422483878791 +884 86 optimizer.lr 0.001262973512812218 +884 86 negative_sampler.num_negs_per_pos 12.0 +884 86 training.batch_size 1.0 +884 87 model.embedding_dim 2.0 +884 87 model.scoring_fct_norm 2.0 +884 87 loss.margin 26.756834342899396 +884 87 loss.adversarial_temperature 0.7232633212647455 +884 87 regularizer.weight 0.0303909748558144 +884 87 optimizer.lr 0.03626443729546378 +884 87 negative_sampler.num_negs_per_pos 47.0 +884 87 training.batch_size 1.0 +884 88 model.embedding_dim 1.0 +884 88 model.scoring_fct_norm 1.0 +884 88 loss.margin 12.68294510011355 +884 88 loss.adversarial_temperature 0.7776476948477342 +884 88 regularizer.weight 0.022408295518863707 +884 88 optimizer.lr 0.039155236205826734 +884 88 negative_sampler.num_negs_per_pos 8.0 +884 88 training.batch_size 1.0 +884 89 model.embedding_dim 0.0 +884 89 model.scoring_fct_norm 2.0 +884 89 loss.margin 22.67129533729382 +884 89 loss.adversarial_temperature 0.2828306614244891 +884 89 regularizer.weight 0.2780874860188153 +884 89 optimizer.lr 0.006582611197520196 +884 89 negative_sampler.num_negs_per_pos 17.0 +884 89 training.batch_size 1.0 +884 90 model.embedding_dim 0.0 +884 90 model.scoring_fct_norm 2.0 +884 90 loss.margin 6.679413823513213 +884 90 loss.adversarial_temperature 0.8392533892894142 +884 90 regularizer.weight 0.20517753645044673 +884 90 optimizer.lr 0.015635649707506666 +884 90 negative_sampler.num_negs_per_pos 61.0 +884 90 training.batch_size 0.0 +884 91 model.embedding_dim 1.0 +884 91 model.scoring_fct_norm 1.0 +884 91 loss.margin 12.81560946844142 +884 91 loss.adversarial_temperature 0.5875799267613047 +884 91 regularizer.weight 0.10989877989342367 +884 91 optimizer.lr 0.02081525088358549 +884 91 negative_sampler.num_negs_per_pos 22.0 +884 91 training.batch_size 0.0 +884 92 model.embedding_dim 0.0 +884 92 model.scoring_fct_norm 2.0 +884 92 loss.margin 17.555612340749356 +884 92 loss.adversarial_temperature 0.514822937115615 +884 92 regularizer.weight 0.14035095340442957 +884 92 optimizer.lr 0.0030185147972310463 +884 92 negative_sampler.num_negs_per_pos 68.0 +884 92 training.batch_size 2.0 +884 93 model.embedding_dim 1.0 +884 93 model.scoring_fct_norm 1.0 +884 93 loss.margin 17.999223355470054 +884 93 loss.adversarial_temperature 0.23406800824936141 +884 93 regularizer.weight 0.014979415694294474 +884 93 optimizer.lr 0.014544862334934502 +884 93 negative_sampler.num_negs_per_pos 55.0 +884 93 training.batch_size 1.0 +884 94 model.embedding_dim 0.0 +884 94 model.scoring_fct_norm 1.0 +884 94 loss.margin 23.666175123102114 +884 94 loss.adversarial_temperature 0.5271965071757713 +884 94 regularizer.weight 0.13074831796741024 +884 94 optimizer.lr 0.04924372394960556 +884 94 negative_sampler.num_negs_per_pos 22.0 +884 94 training.batch_size 0.0 +884 95 model.embedding_dim 0.0 +884 95 model.scoring_fct_norm 2.0 +884 95 loss.margin 17.34410966208727 +884 95 loss.adversarial_temperature 0.5040494390504486 +884 95 regularizer.weight 0.07424993956495321 +884 95 optimizer.lr 0.00794937679215387 +884 95 negative_sampler.num_negs_per_pos 50.0 +884 95 training.batch_size 2.0 +884 96 model.embedding_dim 1.0 +884 96 model.scoring_fct_norm 1.0 +884 96 loss.margin 27.193295609766647 +884 96 loss.adversarial_temperature 0.5631020207121195 +884 96 regularizer.weight 0.01185720102912606 +884 96 optimizer.lr 0.016748850899232372 +884 96 negative_sampler.num_negs_per_pos 38.0 +884 96 training.batch_size 1.0 +884 97 model.embedding_dim 1.0 +884 97 model.scoring_fct_norm 2.0 +884 97 loss.margin 3.106245051289522 +884 97 loss.adversarial_temperature 0.8895061386827756 +884 97 regularizer.weight 0.03961199130671644 +884 97 optimizer.lr 0.09000513796645289 +884 97 negative_sampler.num_negs_per_pos 66.0 +884 97 training.batch_size 0.0 +884 98 model.embedding_dim 2.0 +884 98 model.scoring_fct_norm 2.0 +884 98 loss.margin 21.442380089425892 +884 98 loss.adversarial_temperature 0.8299429233874129 +884 98 regularizer.weight 0.10349282221691485 +884 98 optimizer.lr 0.04875538122512167 +884 98 negative_sampler.num_negs_per_pos 18.0 +884 98 training.batch_size 2.0 +884 99 model.embedding_dim 2.0 +884 99 model.scoring_fct_norm 2.0 +884 99 loss.margin 10.913106178087732 +884 99 loss.adversarial_temperature 0.26732151632458123 +884 99 regularizer.weight 0.10611625827758177 +884 99 optimizer.lr 0.006810953444627542 +884 99 negative_sampler.num_negs_per_pos 13.0 +884 99 training.batch_size 0.0 +884 100 model.embedding_dim 2.0 +884 100 model.scoring_fct_norm 1.0 +884 100 loss.margin 21.957392743655948 +884 100 loss.adversarial_temperature 0.8776464466883959 +884 100 regularizer.weight 0.097725122866446 +884 100 optimizer.lr 0.009652657424394686 +884 100 negative_sampler.num_negs_per_pos 11.0 +884 100 training.batch_size 1.0 +884 1 dataset """wn18rr""" +884 1 model """transh""" +884 1 loss """nssa""" +884 1 regularizer """transh""" +884 1 optimizer """adam""" +884 1 training_loop """owa""" +884 1 negative_sampler """basic""" +884 1 evaluator """rankbased""" +884 2 dataset """wn18rr""" +884 2 model """transh""" +884 2 loss """nssa""" +884 2 regularizer """transh""" +884 2 optimizer """adam""" +884 2 training_loop """owa""" +884 2 negative_sampler """basic""" +884 2 evaluator """rankbased""" +884 3 dataset """wn18rr""" +884 3 model """transh""" +884 3 loss """nssa""" +884 3 regularizer """transh""" +884 3 optimizer """adam""" +884 3 training_loop """owa""" +884 3 negative_sampler """basic""" +884 3 evaluator """rankbased""" +884 4 dataset """wn18rr""" +884 4 model """transh""" +884 4 loss """nssa""" +884 4 regularizer """transh""" +884 4 optimizer """adam""" +884 4 training_loop """owa""" +884 4 negative_sampler """basic""" +884 4 evaluator """rankbased""" +884 5 dataset """wn18rr""" +884 5 model """transh""" +884 5 loss """nssa""" +884 5 regularizer """transh""" +884 5 optimizer """adam""" +884 5 training_loop """owa""" +884 5 negative_sampler """basic""" +884 5 evaluator """rankbased""" +884 6 dataset """wn18rr""" +884 6 model """transh""" +884 6 loss """nssa""" +884 6 regularizer """transh""" +884 6 optimizer """adam""" +884 6 training_loop """owa""" +884 6 negative_sampler """basic""" +884 6 evaluator """rankbased""" +884 7 dataset """wn18rr""" +884 7 model """transh""" +884 7 loss """nssa""" +884 7 regularizer """transh""" +884 7 optimizer """adam""" +884 7 training_loop """owa""" +884 7 negative_sampler """basic""" +884 7 evaluator """rankbased""" +884 8 dataset """wn18rr""" +884 8 model """transh""" +884 8 loss """nssa""" +884 8 regularizer """transh""" +884 8 optimizer """adam""" +884 8 training_loop """owa""" +884 8 negative_sampler """basic""" +884 8 evaluator """rankbased""" +884 9 dataset """wn18rr""" +884 9 model """transh""" +884 9 loss """nssa""" +884 9 regularizer """transh""" +884 9 optimizer """adam""" +884 9 training_loop """owa""" +884 9 negative_sampler """basic""" +884 9 evaluator """rankbased""" +884 10 dataset """wn18rr""" +884 10 model """transh""" +884 10 loss """nssa""" +884 10 regularizer """transh""" +884 10 optimizer """adam""" +884 10 training_loop """owa""" +884 10 negative_sampler """basic""" +884 10 evaluator """rankbased""" +884 11 dataset """wn18rr""" +884 11 model """transh""" +884 11 loss """nssa""" +884 11 regularizer """transh""" +884 11 optimizer """adam""" +884 11 training_loop """owa""" +884 11 negative_sampler """basic""" +884 11 evaluator """rankbased""" +884 12 dataset """wn18rr""" +884 12 model """transh""" +884 12 loss """nssa""" +884 12 regularizer """transh""" +884 12 optimizer """adam""" +884 12 training_loop """owa""" +884 12 negative_sampler """basic""" +884 12 evaluator """rankbased""" +884 13 dataset """wn18rr""" +884 13 model """transh""" +884 13 loss """nssa""" +884 13 regularizer """transh""" +884 13 optimizer """adam""" +884 13 training_loop """owa""" +884 13 negative_sampler """basic""" +884 13 evaluator """rankbased""" +884 14 dataset """wn18rr""" +884 14 model """transh""" +884 14 loss """nssa""" +884 14 regularizer """transh""" +884 14 optimizer """adam""" +884 14 training_loop """owa""" +884 14 negative_sampler """basic""" +884 14 evaluator """rankbased""" +884 15 dataset """wn18rr""" +884 15 model """transh""" +884 15 loss """nssa""" +884 15 regularizer """transh""" +884 15 optimizer """adam""" +884 15 training_loop """owa""" +884 15 negative_sampler """basic""" +884 15 evaluator """rankbased""" +884 16 dataset """wn18rr""" +884 16 model """transh""" +884 16 loss """nssa""" +884 16 regularizer """transh""" +884 16 optimizer """adam""" +884 16 training_loop """owa""" +884 16 negative_sampler """basic""" +884 16 evaluator """rankbased""" +884 17 dataset """wn18rr""" +884 17 model """transh""" +884 17 loss """nssa""" +884 17 regularizer """transh""" +884 17 optimizer """adam""" +884 17 training_loop """owa""" +884 17 negative_sampler """basic""" +884 17 evaluator """rankbased""" +884 18 dataset """wn18rr""" +884 18 model """transh""" +884 18 loss """nssa""" +884 18 regularizer """transh""" +884 18 optimizer """adam""" +884 18 training_loop """owa""" +884 18 negative_sampler """basic""" +884 18 evaluator """rankbased""" +884 19 dataset """wn18rr""" +884 19 model """transh""" +884 19 loss """nssa""" +884 19 regularizer """transh""" +884 19 optimizer """adam""" +884 19 training_loop """owa""" +884 19 negative_sampler """basic""" +884 19 evaluator """rankbased""" +884 20 dataset """wn18rr""" +884 20 model """transh""" +884 20 loss """nssa""" +884 20 regularizer """transh""" +884 20 optimizer """adam""" +884 20 training_loop """owa""" +884 20 negative_sampler """basic""" +884 20 evaluator """rankbased""" +884 21 dataset """wn18rr""" +884 21 model """transh""" +884 21 loss """nssa""" +884 21 regularizer """transh""" +884 21 optimizer """adam""" +884 21 training_loop """owa""" +884 21 negative_sampler """basic""" +884 21 evaluator """rankbased""" +884 22 dataset """wn18rr""" +884 22 model """transh""" +884 22 loss """nssa""" +884 22 regularizer """transh""" +884 22 optimizer """adam""" +884 22 training_loop """owa""" +884 22 negative_sampler """basic""" +884 22 evaluator """rankbased""" +884 23 dataset """wn18rr""" +884 23 model """transh""" +884 23 loss """nssa""" +884 23 regularizer """transh""" +884 23 optimizer """adam""" +884 23 training_loop """owa""" +884 23 negative_sampler """basic""" +884 23 evaluator """rankbased""" +884 24 dataset """wn18rr""" +884 24 model """transh""" +884 24 loss """nssa""" +884 24 regularizer """transh""" +884 24 optimizer """adam""" +884 24 training_loop """owa""" +884 24 negative_sampler """basic""" +884 24 evaluator """rankbased""" +884 25 dataset """wn18rr""" +884 25 model """transh""" +884 25 loss """nssa""" +884 25 regularizer """transh""" +884 25 optimizer """adam""" +884 25 training_loop """owa""" +884 25 negative_sampler """basic""" +884 25 evaluator """rankbased""" +884 26 dataset """wn18rr""" +884 26 model """transh""" +884 26 loss """nssa""" +884 26 regularizer """transh""" +884 26 optimizer """adam""" +884 26 training_loop """owa""" +884 26 negative_sampler """basic""" +884 26 evaluator """rankbased""" +884 27 dataset """wn18rr""" +884 27 model """transh""" +884 27 loss """nssa""" +884 27 regularizer """transh""" +884 27 optimizer """adam""" +884 27 training_loop """owa""" +884 27 negative_sampler """basic""" +884 27 evaluator """rankbased""" +884 28 dataset """wn18rr""" +884 28 model """transh""" +884 28 loss """nssa""" +884 28 regularizer """transh""" +884 28 optimizer """adam""" +884 28 training_loop """owa""" +884 28 negative_sampler """basic""" +884 28 evaluator """rankbased""" +884 29 dataset """wn18rr""" +884 29 model """transh""" +884 29 loss """nssa""" +884 29 regularizer """transh""" +884 29 optimizer """adam""" +884 29 training_loop """owa""" +884 29 negative_sampler """basic""" +884 29 evaluator """rankbased""" +884 30 dataset """wn18rr""" +884 30 model """transh""" +884 30 loss """nssa""" +884 30 regularizer """transh""" +884 30 optimizer """adam""" +884 30 training_loop """owa""" +884 30 negative_sampler """basic""" +884 30 evaluator """rankbased""" +884 31 dataset """wn18rr""" +884 31 model """transh""" +884 31 loss """nssa""" +884 31 regularizer """transh""" +884 31 optimizer """adam""" +884 31 training_loop """owa""" +884 31 negative_sampler """basic""" +884 31 evaluator """rankbased""" +884 32 dataset """wn18rr""" +884 32 model """transh""" +884 32 loss """nssa""" +884 32 regularizer """transh""" +884 32 optimizer """adam""" +884 32 training_loop """owa""" +884 32 negative_sampler """basic""" +884 32 evaluator """rankbased""" +884 33 dataset """wn18rr""" +884 33 model """transh""" +884 33 loss """nssa""" +884 33 regularizer """transh""" +884 33 optimizer """adam""" +884 33 training_loop """owa""" +884 33 negative_sampler """basic""" +884 33 evaluator """rankbased""" +884 34 dataset """wn18rr""" +884 34 model """transh""" +884 34 loss """nssa""" +884 34 regularizer """transh""" +884 34 optimizer """adam""" +884 34 training_loop """owa""" +884 34 negative_sampler """basic""" +884 34 evaluator """rankbased""" +884 35 dataset """wn18rr""" +884 35 model """transh""" +884 35 loss """nssa""" +884 35 regularizer """transh""" +884 35 optimizer """adam""" +884 35 training_loop """owa""" +884 35 negative_sampler """basic""" +884 35 evaluator """rankbased""" +884 36 dataset """wn18rr""" +884 36 model """transh""" +884 36 loss """nssa""" +884 36 regularizer """transh""" +884 36 optimizer """adam""" +884 36 training_loop """owa""" +884 36 negative_sampler """basic""" +884 36 evaluator """rankbased""" +884 37 dataset """wn18rr""" +884 37 model """transh""" +884 37 loss """nssa""" +884 37 regularizer """transh""" +884 37 optimizer """adam""" +884 37 training_loop """owa""" +884 37 negative_sampler """basic""" +884 37 evaluator """rankbased""" +884 38 dataset """wn18rr""" +884 38 model """transh""" +884 38 loss """nssa""" +884 38 regularizer """transh""" +884 38 optimizer """adam""" +884 38 training_loop """owa""" +884 38 negative_sampler """basic""" +884 38 evaluator """rankbased""" +884 39 dataset """wn18rr""" +884 39 model """transh""" +884 39 loss """nssa""" +884 39 regularizer """transh""" +884 39 optimizer """adam""" +884 39 training_loop """owa""" +884 39 negative_sampler """basic""" +884 39 evaluator """rankbased""" +884 40 dataset """wn18rr""" +884 40 model """transh""" +884 40 loss """nssa""" +884 40 regularizer """transh""" +884 40 optimizer """adam""" +884 40 training_loop """owa""" +884 40 negative_sampler """basic""" +884 40 evaluator """rankbased""" +884 41 dataset """wn18rr""" +884 41 model """transh""" +884 41 loss """nssa""" +884 41 regularizer """transh""" +884 41 optimizer """adam""" +884 41 training_loop """owa""" +884 41 negative_sampler """basic""" +884 41 evaluator """rankbased""" +884 42 dataset """wn18rr""" +884 42 model """transh""" +884 42 loss """nssa""" +884 42 regularizer """transh""" +884 42 optimizer """adam""" +884 42 training_loop """owa""" +884 42 negative_sampler """basic""" +884 42 evaluator """rankbased""" +884 43 dataset """wn18rr""" +884 43 model """transh""" +884 43 loss """nssa""" +884 43 regularizer """transh""" +884 43 optimizer """adam""" +884 43 training_loop """owa""" +884 43 negative_sampler """basic""" +884 43 evaluator """rankbased""" +884 44 dataset """wn18rr""" +884 44 model """transh""" +884 44 loss """nssa""" +884 44 regularizer """transh""" +884 44 optimizer """adam""" +884 44 training_loop """owa""" +884 44 negative_sampler """basic""" +884 44 evaluator """rankbased""" +884 45 dataset """wn18rr""" +884 45 model """transh""" +884 45 loss """nssa""" +884 45 regularizer """transh""" +884 45 optimizer """adam""" +884 45 training_loop """owa""" +884 45 negative_sampler """basic""" +884 45 evaluator """rankbased""" +884 46 dataset """wn18rr""" +884 46 model """transh""" +884 46 loss """nssa""" +884 46 regularizer """transh""" +884 46 optimizer """adam""" +884 46 training_loop """owa""" +884 46 negative_sampler """basic""" +884 46 evaluator """rankbased""" +884 47 dataset """wn18rr""" +884 47 model """transh""" +884 47 loss """nssa""" +884 47 regularizer """transh""" +884 47 optimizer """adam""" +884 47 training_loop """owa""" +884 47 negative_sampler """basic""" +884 47 evaluator """rankbased""" +884 48 dataset """wn18rr""" +884 48 model """transh""" +884 48 loss """nssa""" +884 48 regularizer """transh""" +884 48 optimizer """adam""" +884 48 training_loop """owa""" +884 48 negative_sampler """basic""" +884 48 evaluator """rankbased""" +884 49 dataset """wn18rr""" +884 49 model """transh""" +884 49 loss """nssa""" +884 49 regularizer """transh""" +884 49 optimizer """adam""" +884 49 training_loop """owa""" +884 49 negative_sampler """basic""" +884 49 evaluator """rankbased""" +884 50 dataset """wn18rr""" +884 50 model """transh""" +884 50 loss """nssa""" +884 50 regularizer """transh""" +884 50 optimizer """adam""" +884 50 training_loop """owa""" +884 50 negative_sampler """basic""" +884 50 evaluator """rankbased""" +884 51 dataset """wn18rr""" +884 51 model """transh""" +884 51 loss """nssa""" +884 51 regularizer """transh""" +884 51 optimizer """adam""" +884 51 training_loop """owa""" +884 51 negative_sampler """basic""" +884 51 evaluator """rankbased""" +884 52 dataset """wn18rr""" +884 52 model """transh""" +884 52 loss """nssa""" +884 52 regularizer """transh""" +884 52 optimizer """adam""" +884 52 training_loop """owa""" +884 52 negative_sampler """basic""" +884 52 evaluator """rankbased""" +884 53 dataset """wn18rr""" +884 53 model """transh""" +884 53 loss """nssa""" +884 53 regularizer """transh""" +884 53 optimizer """adam""" +884 53 training_loop """owa""" +884 53 negative_sampler """basic""" +884 53 evaluator """rankbased""" +884 54 dataset """wn18rr""" +884 54 model """transh""" +884 54 loss """nssa""" +884 54 regularizer """transh""" +884 54 optimizer """adam""" +884 54 training_loop """owa""" +884 54 negative_sampler """basic""" +884 54 evaluator """rankbased""" +884 55 dataset """wn18rr""" +884 55 model """transh""" +884 55 loss """nssa""" +884 55 regularizer """transh""" +884 55 optimizer """adam""" +884 55 training_loop """owa""" +884 55 negative_sampler """basic""" +884 55 evaluator """rankbased""" +884 56 dataset """wn18rr""" +884 56 model """transh""" +884 56 loss """nssa""" +884 56 regularizer """transh""" +884 56 optimizer """adam""" +884 56 training_loop """owa""" +884 56 negative_sampler """basic""" +884 56 evaluator """rankbased""" +884 57 dataset """wn18rr""" +884 57 model """transh""" +884 57 loss """nssa""" +884 57 regularizer """transh""" +884 57 optimizer """adam""" +884 57 training_loop """owa""" +884 57 negative_sampler """basic""" +884 57 evaluator """rankbased""" +884 58 dataset """wn18rr""" +884 58 model """transh""" +884 58 loss """nssa""" +884 58 regularizer """transh""" +884 58 optimizer """adam""" +884 58 training_loop """owa""" +884 58 negative_sampler """basic""" +884 58 evaluator """rankbased""" +884 59 dataset """wn18rr""" +884 59 model """transh""" +884 59 loss """nssa""" +884 59 regularizer """transh""" +884 59 optimizer """adam""" +884 59 training_loop """owa""" +884 59 negative_sampler """basic""" +884 59 evaluator """rankbased""" +884 60 dataset """wn18rr""" +884 60 model """transh""" +884 60 loss """nssa""" +884 60 regularizer """transh""" +884 60 optimizer """adam""" +884 60 training_loop """owa""" +884 60 negative_sampler """basic""" +884 60 evaluator """rankbased""" +884 61 dataset """wn18rr""" +884 61 model """transh""" +884 61 loss """nssa""" +884 61 regularizer """transh""" +884 61 optimizer """adam""" +884 61 training_loop """owa""" +884 61 negative_sampler """basic""" +884 61 evaluator """rankbased""" +884 62 dataset """wn18rr""" +884 62 model """transh""" +884 62 loss """nssa""" +884 62 regularizer """transh""" +884 62 optimizer """adam""" +884 62 training_loop """owa""" +884 62 negative_sampler """basic""" +884 62 evaluator """rankbased""" +884 63 dataset """wn18rr""" +884 63 model """transh""" +884 63 loss """nssa""" +884 63 regularizer """transh""" +884 63 optimizer """adam""" +884 63 training_loop """owa""" +884 63 negative_sampler """basic""" +884 63 evaluator """rankbased""" +884 64 dataset """wn18rr""" +884 64 model """transh""" +884 64 loss """nssa""" +884 64 regularizer """transh""" +884 64 optimizer """adam""" +884 64 training_loop """owa""" +884 64 negative_sampler """basic""" +884 64 evaluator """rankbased""" +884 65 dataset """wn18rr""" +884 65 model """transh""" +884 65 loss """nssa""" +884 65 regularizer """transh""" +884 65 optimizer """adam""" +884 65 training_loop """owa""" +884 65 negative_sampler """basic""" +884 65 evaluator """rankbased""" +884 66 dataset """wn18rr""" +884 66 model """transh""" +884 66 loss """nssa""" +884 66 regularizer """transh""" +884 66 optimizer """adam""" +884 66 training_loop """owa""" +884 66 negative_sampler """basic""" +884 66 evaluator """rankbased""" +884 67 dataset """wn18rr""" +884 67 model """transh""" +884 67 loss """nssa""" +884 67 regularizer """transh""" +884 67 optimizer """adam""" +884 67 training_loop """owa""" +884 67 negative_sampler """basic""" +884 67 evaluator """rankbased""" +884 68 dataset """wn18rr""" +884 68 model """transh""" +884 68 loss """nssa""" +884 68 regularizer """transh""" +884 68 optimizer """adam""" +884 68 training_loop """owa""" +884 68 negative_sampler """basic""" +884 68 evaluator """rankbased""" +884 69 dataset """wn18rr""" +884 69 model """transh""" +884 69 loss """nssa""" +884 69 regularizer """transh""" +884 69 optimizer """adam""" +884 69 training_loop """owa""" +884 69 negative_sampler """basic""" +884 69 evaluator """rankbased""" +884 70 dataset """wn18rr""" +884 70 model """transh""" +884 70 loss """nssa""" +884 70 regularizer """transh""" +884 70 optimizer """adam""" +884 70 training_loop """owa""" +884 70 negative_sampler """basic""" +884 70 evaluator """rankbased""" +884 71 dataset """wn18rr""" +884 71 model """transh""" +884 71 loss """nssa""" +884 71 regularizer """transh""" +884 71 optimizer """adam""" +884 71 training_loop """owa""" +884 71 negative_sampler """basic""" +884 71 evaluator """rankbased""" +884 72 dataset """wn18rr""" +884 72 model """transh""" +884 72 loss """nssa""" +884 72 regularizer """transh""" +884 72 optimizer """adam""" +884 72 training_loop """owa""" +884 72 negative_sampler """basic""" +884 72 evaluator """rankbased""" +884 73 dataset """wn18rr""" +884 73 model """transh""" +884 73 loss """nssa""" +884 73 regularizer """transh""" +884 73 optimizer """adam""" +884 73 training_loop """owa""" +884 73 negative_sampler """basic""" +884 73 evaluator """rankbased""" +884 74 dataset """wn18rr""" +884 74 model """transh""" +884 74 loss """nssa""" +884 74 regularizer """transh""" +884 74 optimizer """adam""" +884 74 training_loop """owa""" +884 74 negative_sampler """basic""" +884 74 evaluator """rankbased""" +884 75 dataset """wn18rr""" +884 75 model """transh""" +884 75 loss """nssa""" +884 75 regularizer """transh""" +884 75 optimizer """adam""" +884 75 training_loop """owa""" +884 75 negative_sampler """basic""" +884 75 evaluator """rankbased""" +884 76 dataset """wn18rr""" +884 76 model """transh""" +884 76 loss """nssa""" +884 76 regularizer """transh""" +884 76 optimizer """adam""" +884 76 training_loop """owa""" +884 76 negative_sampler """basic""" +884 76 evaluator """rankbased""" +884 77 dataset """wn18rr""" +884 77 model """transh""" +884 77 loss """nssa""" +884 77 regularizer """transh""" +884 77 optimizer """adam""" +884 77 training_loop """owa""" +884 77 negative_sampler """basic""" +884 77 evaluator """rankbased""" +884 78 dataset """wn18rr""" +884 78 model """transh""" +884 78 loss """nssa""" +884 78 regularizer """transh""" +884 78 optimizer """adam""" +884 78 training_loop """owa""" +884 78 negative_sampler """basic""" +884 78 evaluator """rankbased""" +884 79 dataset """wn18rr""" +884 79 model """transh""" +884 79 loss """nssa""" +884 79 regularizer """transh""" +884 79 optimizer """adam""" +884 79 training_loop """owa""" +884 79 negative_sampler """basic""" +884 79 evaluator """rankbased""" +884 80 dataset """wn18rr""" +884 80 model """transh""" +884 80 loss """nssa""" +884 80 regularizer """transh""" +884 80 optimizer """adam""" +884 80 training_loop """owa""" +884 80 negative_sampler """basic""" +884 80 evaluator """rankbased""" +884 81 dataset """wn18rr""" +884 81 model """transh""" +884 81 loss """nssa""" +884 81 regularizer """transh""" +884 81 optimizer """adam""" +884 81 training_loop """owa""" +884 81 negative_sampler """basic""" +884 81 evaluator """rankbased""" +884 82 dataset """wn18rr""" +884 82 model """transh""" +884 82 loss """nssa""" +884 82 regularizer """transh""" +884 82 optimizer """adam""" +884 82 training_loop """owa""" +884 82 negative_sampler """basic""" +884 82 evaluator """rankbased""" +884 83 dataset """wn18rr""" +884 83 model """transh""" +884 83 loss """nssa""" +884 83 regularizer """transh""" +884 83 optimizer """adam""" +884 83 training_loop """owa""" +884 83 negative_sampler """basic""" +884 83 evaluator """rankbased""" +884 84 dataset """wn18rr""" +884 84 model """transh""" +884 84 loss """nssa""" +884 84 regularizer """transh""" +884 84 optimizer """adam""" +884 84 training_loop """owa""" +884 84 negative_sampler """basic""" +884 84 evaluator """rankbased""" +884 85 dataset """wn18rr""" +884 85 model """transh""" +884 85 loss """nssa""" +884 85 regularizer """transh""" +884 85 optimizer """adam""" +884 85 training_loop """owa""" +884 85 negative_sampler """basic""" +884 85 evaluator """rankbased""" +884 86 dataset """wn18rr""" +884 86 model """transh""" +884 86 loss """nssa""" +884 86 regularizer """transh""" +884 86 optimizer """adam""" +884 86 training_loop """owa""" +884 86 negative_sampler """basic""" +884 86 evaluator """rankbased""" +884 87 dataset """wn18rr""" +884 87 model """transh""" +884 87 loss """nssa""" +884 87 regularizer """transh""" +884 87 optimizer """adam""" +884 87 training_loop """owa""" +884 87 negative_sampler """basic""" +884 87 evaluator """rankbased""" +884 88 dataset """wn18rr""" +884 88 model """transh""" +884 88 loss """nssa""" +884 88 regularizer """transh""" +884 88 optimizer """adam""" +884 88 training_loop """owa""" +884 88 negative_sampler """basic""" +884 88 evaluator """rankbased""" +884 89 dataset """wn18rr""" +884 89 model """transh""" +884 89 loss """nssa""" +884 89 regularizer """transh""" +884 89 optimizer """adam""" +884 89 training_loop """owa""" +884 89 negative_sampler """basic""" +884 89 evaluator """rankbased""" +884 90 dataset """wn18rr""" +884 90 model """transh""" +884 90 loss """nssa""" +884 90 regularizer """transh""" +884 90 optimizer """adam""" +884 90 training_loop """owa""" +884 90 negative_sampler """basic""" +884 90 evaluator """rankbased""" +884 91 dataset """wn18rr""" +884 91 model """transh""" +884 91 loss """nssa""" +884 91 regularizer """transh""" +884 91 optimizer """adam""" +884 91 training_loop """owa""" +884 91 negative_sampler """basic""" +884 91 evaluator """rankbased""" +884 92 dataset """wn18rr""" +884 92 model """transh""" +884 92 loss """nssa""" +884 92 regularizer """transh""" +884 92 optimizer """adam""" +884 92 training_loop """owa""" +884 92 negative_sampler """basic""" +884 92 evaluator """rankbased""" +884 93 dataset """wn18rr""" +884 93 model """transh""" +884 93 loss """nssa""" +884 93 regularizer """transh""" +884 93 optimizer """adam""" +884 93 training_loop """owa""" +884 93 negative_sampler """basic""" +884 93 evaluator """rankbased""" +884 94 dataset """wn18rr""" +884 94 model """transh""" +884 94 loss """nssa""" +884 94 regularizer """transh""" +884 94 optimizer """adam""" +884 94 training_loop """owa""" +884 94 negative_sampler """basic""" +884 94 evaluator """rankbased""" +884 95 dataset """wn18rr""" +884 95 model """transh""" +884 95 loss """nssa""" +884 95 regularizer """transh""" +884 95 optimizer """adam""" +884 95 training_loop """owa""" +884 95 negative_sampler """basic""" +884 95 evaluator """rankbased""" +884 96 dataset """wn18rr""" +884 96 model """transh""" +884 96 loss """nssa""" +884 96 regularizer """transh""" +884 96 optimizer """adam""" +884 96 training_loop """owa""" +884 96 negative_sampler """basic""" +884 96 evaluator """rankbased""" +884 97 dataset """wn18rr""" +884 97 model """transh""" +884 97 loss """nssa""" +884 97 regularizer """transh""" +884 97 optimizer """adam""" +884 97 training_loop """owa""" +884 97 negative_sampler """basic""" +884 97 evaluator """rankbased""" +884 98 dataset """wn18rr""" +884 98 model """transh""" +884 98 loss """nssa""" +884 98 regularizer """transh""" +884 98 optimizer """adam""" +884 98 training_loop """owa""" +884 98 negative_sampler """basic""" +884 98 evaluator """rankbased""" +884 99 dataset """wn18rr""" +884 99 model """transh""" +884 99 loss """nssa""" +884 99 regularizer """transh""" +884 99 optimizer """adam""" +884 99 training_loop """owa""" +884 99 negative_sampler """basic""" +884 99 evaluator """rankbased""" +884 100 dataset """wn18rr""" +884 100 model """transh""" +884 100 loss """nssa""" +884 100 regularizer """transh""" +884 100 optimizer """adam""" +884 100 training_loop """owa""" +884 100 negative_sampler """basic""" +884 100 evaluator """rankbased""" +885 1 model.embedding_dim 0.0 +885 1 model.scoring_fct_norm 1.0 +885 1 loss.margin 6.800058164580814 +885 1 regularizer.weight 0.022149342435954702 +885 1 optimizer.lr 0.031341083955737134 +885 1 negative_sampler.num_negs_per_pos 85.0 +885 1 training.batch_size 1.0 +885 2 model.embedding_dim 1.0 +885 2 model.scoring_fct_norm 1.0 +885 2 loss.margin 5.354555185991145 +885 2 regularizer.weight 0.010229758839174846 +885 2 optimizer.lr 0.0180078302158055 +885 2 negative_sampler.num_negs_per_pos 46.0 +885 2 training.batch_size 0.0 +885 3 model.embedding_dim 2.0 +885 3 model.scoring_fct_norm 1.0 +885 3 loss.margin 5.692834015827767 +885 3 regularizer.weight 0.027101956047353704 +885 3 optimizer.lr 0.006458568606656652 +885 3 negative_sampler.num_negs_per_pos 24.0 +885 3 training.batch_size 1.0 +885 4 model.embedding_dim 2.0 +885 4 model.scoring_fct_norm 1.0 +885 4 loss.margin 3.8866497582400954 +885 4 regularizer.weight 0.2752809041704412 +885 4 optimizer.lr 0.004513514546866101 +885 4 negative_sampler.num_negs_per_pos 37.0 +885 4 training.batch_size 1.0 +885 5 model.embedding_dim 2.0 +885 5 model.scoring_fct_norm 1.0 +885 5 loss.margin 6.690745673441714 +885 5 regularizer.weight 0.01802514690970966 +885 5 optimizer.lr 0.0012137633526641536 +885 5 negative_sampler.num_negs_per_pos 2.0 +885 5 training.batch_size 0.0 +885 6 model.embedding_dim 2.0 +885 6 model.scoring_fct_norm 1.0 +885 6 loss.margin 3.513690111262467 +885 6 regularizer.weight 0.023048359761811583 +885 6 optimizer.lr 0.00926734393803942 +885 6 negative_sampler.num_negs_per_pos 95.0 +885 6 training.batch_size 2.0 +885 7 model.embedding_dim 0.0 +885 7 model.scoring_fct_norm 2.0 +885 7 loss.margin 2.4581393324708807 +885 7 regularizer.weight 0.09697401269689451 +885 7 optimizer.lr 0.001583730566850799 +885 7 negative_sampler.num_negs_per_pos 65.0 +885 7 training.batch_size 0.0 +885 8 model.embedding_dim 2.0 +885 8 model.scoring_fct_norm 1.0 +885 8 loss.margin 5.808661381821781 +885 8 regularizer.weight 0.03127420663083934 +885 8 optimizer.lr 0.06343466036214138 +885 8 negative_sampler.num_negs_per_pos 82.0 +885 8 training.batch_size 1.0 +885 9 model.embedding_dim 0.0 +885 9 model.scoring_fct_norm 2.0 +885 9 loss.margin 2.715558999947582 +885 9 regularizer.weight 0.16589684233403085 +885 9 optimizer.lr 0.001872485776253779 +885 9 negative_sampler.num_negs_per_pos 8.0 +885 9 training.batch_size 1.0 +885 10 model.embedding_dim 1.0 +885 10 model.scoring_fct_norm 1.0 +885 10 loss.margin 6.103194401495359 +885 10 regularizer.weight 0.04908423352213934 +885 10 optimizer.lr 0.03110649669678218 +885 10 negative_sampler.num_negs_per_pos 77.0 +885 10 training.batch_size 0.0 +885 11 model.embedding_dim 0.0 +885 11 model.scoring_fct_norm 1.0 +885 11 loss.margin 1.90501275641347 +885 11 regularizer.weight 0.17554282741747101 +885 11 optimizer.lr 0.0032993591806753037 +885 11 negative_sampler.num_negs_per_pos 92.0 +885 11 training.batch_size 0.0 +885 12 model.embedding_dim 2.0 +885 12 model.scoring_fct_norm 2.0 +885 12 loss.margin 9.093302385781142 +885 12 regularizer.weight 0.07095928343998018 +885 12 optimizer.lr 0.007975970681913231 +885 12 negative_sampler.num_negs_per_pos 70.0 +885 12 training.batch_size 0.0 +885 13 model.embedding_dim 1.0 +885 13 model.scoring_fct_norm 2.0 +885 13 loss.margin 6.79539999495831 +885 13 regularizer.weight 0.09876412561870473 +885 13 optimizer.lr 0.026252514540777778 +885 13 negative_sampler.num_negs_per_pos 25.0 +885 13 training.batch_size 0.0 +885 14 model.embedding_dim 0.0 +885 14 model.scoring_fct_norm 1.0 +885 14 loss.margin 6.236148162280682 +885 14 regularizer.weight 0.022881668204046298 +885 14 optimizer.lr 0.0021096796020977438 +885 14 negative_sampler.num_negs_per_pos 35.0 +885 14 training.batch_size 1.0 +885 15 model.embedding_dim 2.0 +885 15 model.scoring_fct_norm 1.0 +885 15 loss.margin 6.630470731936455 +885 15 regularizer.weight 0.027020846219878596 +885 15 optimizer.lr 0.006059440087557019 +885 15 negative_sampler.num_negs_per_pos 27.0 +885 15 training.batch_size 2.0 +885 16 model.embedding_dim 0.0 +885 16 model.scoring_fct_norm 2.0 +885 16 loss.margin 1.5739376629450648 +885 16 regularizer.weight 0.04216600755221151 +885 16 optimizer.lr 0.006439867531461071 +885 16 negative_sampler.num_negs_per_pos 60.0 +885 16 training.batch_size 0.0 +885 17 model.embedding_dim 0.0 +885 17 model.scoring_fct_norm 2.0 +885 17 loss.margin 1.5834129067685532 +885 17 regularizer.weight 0.09560815459089968 +885 17 optimizer.lr 0.001315609075417346 +885 17 negative_sampler.num_negs_per_pos 24.0 +885 17 training.batch_size 0.0 +885 18 model.embedding_dim 0.0 +885 18 model.scoring_fct_norm 2.0 +885 18 loss.margin 6.524922786926404 +885 18 regularizer.weight 0.011037735259526001 +885 18 optimizer.lr 0.0014547104768359745 +885 18 negative_sampler.num_negs_per_pos 2.0 +885 18 training.batch_size 0.0 +885 19 model.embedding_dim 2.0 +885 19 model.scoring_fct_norm 1.0 +885 19 loss.margin 6.420657773608199 +885 19 regularizer.weight 0.2532076658836417 +885 19 optimizer.lr 0.06458120874851754 +885 19 negative_sampler.num_negs_per_pos 40.0 +885 19 training.batch_size 1.0 +885 20 model.embedding_dim 1.0 +885 20 model.scoring_fct_norm 2.0 +885 20 loss.margin 3.465153717711558 +885 20 regularizer.weight 0.1943528090821582 +885 20 optimizer.lr 0.002928978500025487 +885 20 negative_sampler.num_negs_per_pos 31.0 +885 20 training.batch_size 1.0 +885 21 model.embedding_dim 0.0 +885 21 model.scoring_fct_norm 1.0 +885 21 loss.margin 1.1490775955661774 +885 21 regularizer.weight 0.21394428820537276 +885 21 optimizer.lr 0.003678177542787899 +885 21 negative_sampler.num_negs_per_pos 56.0 +885 21 training.batch_size 1.0 +885 22 model.embedding_dim 2.0 +885 22 model.scoring_fct_norm 2.0 +885 22 loss.margin 9.944515744881393 +885 22 regularizer.weight 0.030619786174584603 +885 22 optimizer.lr 0.0033595316322311297 +885 22 negative_sampler.num_negs_per_pos 80.0 +885 22 training.batch_size 0.0 +885 23 model.embedding_dim 0.0 +885 23 model.scoring_fct_norm 1.0 +885 23 loss.margin 4.58417992728098 +885 23 regularizer.weight 0.05905535610129456 +885 23 optimizer.lr 0.009320555049749072 +885 23 negative_sampler.num_negs_per_pos 57.0 +885 23 training.batch_size 2.0 +885 24 model.embedding_dim 1.0 +885 24 model.scoring_fct_norm 1.0 +885 24 loss.margin 5.36710131116171 +885 24 regularizer.weight 0.25343605843052297 +885 24 optimizer.lr 0.027330440418572976 +885 24 negative_sampler.num_negs_per_pos 75.0 +885 24 training.batch_size 2.0 +885 25 model.embedding_dim 2.0 +885 25 model.scoring_fct_norm 2.0 +885 25 loss.margin 0.8119371717155945 +885 25 regularizer.weight 0.031491239645912265 +885 25 optimizer.lr 0.04506394214770969 +885 25 negative_sampler.num_negs_per_pos 62.0 +885 25 training.batch_size 1.0 +885 26 model.embedding_dim 1.0 +885 26 model.scoring_fct_norm 2.0 +885 26 loss.margin 4.274017497438505 +885 26 regularizer.weight 0.1562705122383408 +885 26 optimizer.lr 0.0015904480674984991 +885 26 negative_sampler.num_negs_per_pos 52.0 +885 26 training.batch_size 1.0 +885 27 model.embedding_dim 0.0 +885 27 model.scoring_fct_norm 2.0 +885 27 loss.margin 0.5104524531472913 +885 27 regularizer.weight 0.10061310534086838 +885 27 optimizer.lr 0.016941401188968336 +885 27 negative_sampler.num_negs_per_pos 71.0 +885 27 training.batch_size 2.0 +885 28 model.embedding_dim 0.0 +885 28 model.scoring_fct_norm 2.0 +885 28 loss.margin 8.918687723259822 +885 28 regularizer.weight 0.014445687286554395 +885 28 optimizer.lr 0.03556853723862582 +885 28 negative_sampler.num_negs_per_pos 39.0 +885 28 training.batch_size 0.0 +885 29 model.embedding_dim 1.0 +885 29 model.scoring_fct_norm 2.0 +885 29 loss.margin 2.9763713116145802 +885 29 regularizer.weight 0.01474835437425041 +885 29 optimizer.lr 0.0018639618194294955 +885 29 negative_sampler.num_negs_per_pos 31.0 +885 29 training.batch_size 0.0 +885 30 model.embedding_dim 2.0 +885 30 model.scoring_fct_norm 1.0 +885 30 loss.margin 2.4212781063926507 +885 30 regularizer.weight 0.0347043783200433 +885 30 optimizer.lr 0.003699385842945326 +885 30 negative_sampler.num_negs_per_pos 25.0 +885 30 training.batch_size 0.0 +885 31 model.embedding_dim 0.0 +885 31 model.scoring_fct_norm 2.0 +885 31 loss.margin 5.958105074082937 +885 31 regularizer.weight 0.028850003355231735 +885 31 optimizer.lr 0.00182151607561264 +885 31 negative_sampler.num_negs_per_pos 85.0 +885 31 training.batch_size 1.0 +885 32 model.embedding_dim 2.0 +885 32 model.scoring_fct_norm 2.0 +885 32 loss.margin 1.6870920653168606 +885 32 regularizer.weight 0.02665731805453698 +885 32 optimizer.lr 0.0621097092597524 +885 32 negative_sampler.num_negs_per_pos 11.0 +885 32 training.batch_size 1.0 +885 33 model.embedding_dim 1.0 +885 33 model.scoring_fct_norm 2.0 +885 33 loss.margin 9.68991407511536 +885 33 regularizer.weight 0.01942763034939987 +885 33 optimizer.lr 0.05251100033922488 +885 33 negative_sampler.num_negs_per_pos 97.0 +885 33 training.batch_size 1.0 +885 34 model.embedding_dim 0.0 +885 34 model.scoring_fct_norm 1.0 +885 34 loss.margin 7.585986732262785 +885 34 regularizer.weight 0.018493318791736928 +885 34 optimizer.lr 0.0025702179704768426 +885 34 negative_sampler.num_negs_per_pos 84.0 +885 34 training.batch_size 0.0 +885 35 model.embedding_dim 1.0 +885 35 model.scoring_fct_norm 2.0 +885 35 loss.margin 0.6718890641334576 +885 35 regularizer.weight 0.07179945619420786 +885 35 optimizer.lr 0.011731422824217214 +885 35 negative_sampler.num_negs_per_pos 80.0 +885 35 training.batch_size 0.0 +885 36 model.embedding_dim 0.0 +885 36 model.scoring_fct_norm 1.0 +885 36 loss.margin 1.557394522517551 +885 36 regularizer.weight 0.017668551610645233 +885 36 optimizer.lr 0.023575779457336406 +885 36 negative_sampler.num_negs_per_pos 71.0 +885 36 training.batch_size 2.0 +885 37 model.embedding_dim 2.0 +885 37 model.scoring_fct_norm 1.0 +885 37 loss.margin 7.848847146558481 +885 37 regularizer.weight 0.019682227596295277 +885 37 optimizer.lr 0.005553932626818673 +885 37 negative_sampler.num_negs_per_pos 53.0 +885 37 training.batch_size 2.0 +885 38 model.embedding_dim 0.0 +885 38 model.scoring_fct_norm 1.0 +885 38 loss.margin 9.328974755972093 +885 38 regularizer.weight 0.04175812073427525 +885 38 optimizer.lr 0.0027298503722226783 +885 38 negative_sampler.num_negs_per_pos 96.0 +885 38 training.batch_size 2.0 +885 39 model.embedding_dim 0.0 +885 39 model.scoring_fct_norm 1.0 +885 39 loss.margin 2.4238974908289337 +885 39 regularizer.weight 0.2619872886313704 +885 39 optimizer.lr 0.0032259687998636823 +885 39 negative_sampler.num_negs_per_pos 32.0 +885 39 training.batch_size 2.0 +885 40 model.embedding_dim 1.0 +885 40 model.scoring_fct_norm 2.0 +885 40 loss.margin 2.897888067312019 +885 40 regularizer.weight 0.017757542901387854 +885 40 optimizer.lr 0.0068318870503645025 +885 40 negative_sampler.num_negs_per_pos 31.0 +885 40 training.batch_size 1.0 +885 41 model.embedding_dim 1.0 +885 41 model.scoring_fct_norm 2.0 +885 41 loss.margin 2.823116689110071 +885 41 regularizer.weight 0.039367840413082555 +885 41 optimizer.lr 0.002858839253785374 +885 41 negative_sampler.num_negs_per_pos 71.0 +885 41 training.batch_size 1.0 +885 42 model.embedding_dim 0.0 +885 42 model.scoring_fct_norm 2.0 +885 42 loss.margin 0.8458472428351087 +885 42 regularizer.weight 0.011732310716848477 +885 42 optimizer.lr 0.011469976453364998 +885 42 negative_sampler.num_negs_per_pos 7.0 +885 42 training.batch_size 0.0 +885 43 model.embedding_dim 0.0 +885 43 model.scoring_fct_norm 2.0 +885 43 loss.margin 6.459190417640091 +885 43 regularizer.weight 0.14529521839796838 +885 43 optimizer.lr 0.0021040792632119397 +885 43 negative_sampler.num_negs_per_pos 59.0 +885 43 training.batch_size 1.0 +885 44 model.embedding_dim 2.0 +885 44 model.scoring_fct_norm 1.0 +885 44 loss.margin 5.132651747046105 +885 44 regularizer.weight 0.026178949265300764 +885 44 optimizer.lr 0.005156322793974953 +885 44 negative_sampler.num_negs_per_pos 8.0 +885 44 training.batch_size 1.0 +885 45 model.embedding_dim 1.0 +885 45 model.scoring_fct_norm 2.0 +885 45 loss.margin 1.0100462518167728 +885 45 regularizer.weight 0.01212466617086663 +885 45 optimizer.lr 0.018202510330412477 +885 45 negative_sampler.num_negs_per_pos 30.0 +885 45 training.batch_size 2.0 +885 46 model.embedding_dim 1.0 +885 46 model.scoring_fct_norm 1.0 +885 46 loss.margin 3.352860291245161 +885 46 regularizer.weight 0.037322715092660816 +885 46 optimizer.lr 0.07458821418264908 +885 46 negative_sampler.num_negs_per_pos 18.0 +885 46 training.batch_size 0.0 +885 47 model.embedding_dim 2.0 +885 47 model.scoring_fct_norm 1.0 +885 47 loss.margin 8.187349197130704 +885 47 regularizer.weight 0.11807177561541309 +885 47 optimizer.lr 0.0011021225577221281 +885 47 negative_sampler.num_negs_per_pos 99.0 +885 47 training.batch_size 0.0 +885 48 model.embedding_dim 2.0 +885 48 model.scoring_fct_norm 1.0 +885 48 loss.margin 9.398458084205021 +885 48 regularizer.weight 0.030861532847536524 +885 48 optimizer.lr 0.04729528651829725 +885 48 negative_sampler.num_negs_per_pos 87.0 +885 48 training.batch_size 0.0 +885 49 model.embedding_dim 1.0 +885 49 model.scoring_fct_norm 1.0 +885 49 loss.margin 3.1641438061206113 +885 49 regularizer.weight 0.02736842209285809 +885 49 optimizer.lr 0.0024553423130286977 +885 49 negative_sampler.num_negs_per_pos 54.0 +885 49 training.batch_size 2.0 +885 50 model.embedding_dim 2.0 +885 50 model.scoring_fct_norm 2.0 +885 50 loss.margin 0.5216653837985075 +885 50 regularizer.weight 0.011452269753518181 +885 50 optimizer.lr 0.010428090504783492 +885 50 negative_sampler.num_negs_per_pos 25.0 +885 50 training.batch_size 1.0 +885 1 dataset """wn18rr""" +885 1 model """transh""" +885 1 loss """marginranking""" +885 1 regularizer """transh""" +885 1 optimizer """adam""" +885 1 training_loop """owa""" +885 1 negative_sampler """basic""" +885 1 evaluator """rankbased""" +885 2 dataset """wn18rr""" +885 2 model """transh""" +885 2 loss """marginranking""" +885 2 regularizer """transh""" +885 2 optimizer """adam""" +885 2 training_loop """owa""" +885 2 negative_sampler """basic""" +885 2 evaluator """rankbased""" +885 3 dataset """wn18rr""" +885 3 model """transh""" +885 3 loss """marginranking""" +885 3 regularizer """transh""" +885 3 optimizer """adam""" +885 3 training_loop """owa""" +885 3 negative_sampler """basic""" +885 3 evaluator """rankbased""" +885 4 dataset """wn18rr""" +885 4 model """transh""" +885 4 loss """marginranking""" +885 4 regularizer """transh""" +885 4 optimizer """adam""" +885 4 training_loop """owa""" +885 4 negative_sampler """basic""" +885 4 evaluator """rankbased""" +885 5 dataset """wn18rr""" +885 5 model """transh""" +885 5 loss """marginranking""" +885 5 regularizer """transh""" +885 5 optimizer """adam""" +885 5 training_loop """owa""" +885 5 negative_sampler """basic""" +885 5 evaluator """rankbased""" +885 6 dataset """wn18rr""" +885 6 model """transh""" +885 6 loss """marginranking""" +885 6 regularizer """transh""" +885 6 optimizer """adam""" +885 6 training_loop """owa""" +885 6 negative_sampler """basic""" +885 6 evaluator """rankbased""" +885 7 dataset """wn18rr""" +885 7 model """transh""" +885 7 loss """marginranking""" +885 7 regularizer """transh""" +885 7 optimizer """adam""" +885 7 training_loop """owa""" +885 7 negative_sampler """basic""" +885 7 evaluator """rankbased""" +885 8 dataset """wn18rr""" +885 8 model """transh""" +885 8 loss """marginranking""" +885 8 regularizer """transh""" +885 8 optimizer """adam""" +885 8 training_loop """owa""" +885 8 negative_sampler """basic""" +885 8 evaluator """rankbased""" +885 9 dataset """wn18rr""" +885 9 model """transh""" +885 9 loss """marginranking""" +885 9 regularizer """transh""" +885 9 optimizer """adam""" +885 9 training_loop """owa""" +885 9 negative_sampler """basic""" +885 9 evaluator """rankbased""" +885 10 dataset """wn18rr""" +885 10 model """transh""" +885 10 loss """marginranking""" +885 10 regularizer """transh""" +885 10 optimizer """adam""" +885 10 training_loop """owa""" +885 10 negative_sampler """basic""" +885 10 evaluator """rankbased""" +885 11 dataset """wn18rr""" +885 11 model """transh""" +885 11 loss """marginranking""" +885 11 regularizer """transh""" +885 11 optimizer """adam""" +885 11 training_loop """owa""" +885 11 negative_sampler """basic""" +885 11 evaluator """rankbased""" +885 12 dataset """wn18rr""" +885 12 model """transh""" +885 12 loss """marginranking""" +885 12 regularizer """transh""" +885 12 optimizer """adam""" +885 12 training_loop """owa""" +885 12 negative_sampler """basic""" +885 12 evaluator """rankbased""" +885 13 dataset """wn18rr""" +885 13 model """transh""" +885 13 loss """marginranking""" +885 13 regularizer """transh""" +885 13 optimizer """adam""" +885 13 training_loop """owa""" +885 13 negative_sampler """basic""" +885 13 evaluator """rankbased""" +885 14 dataset """wn18rr""" +885 14 model """transh""" +885 14 loss """marginranking""" +885 14 regularizer """transh""" +885 14 optimizer """adam""" +885 14 training_loop """owa""" +885 14 negative_sampler """basic""" +885 14 evaluator """rankbased""" +885 15 dataset """wn18rr""" +885 15 model """transh""" +885 15 loss """marginranking""" +885 15 regularizer """transh""" +885 15 optimizer """adam""" +885 15 training_loop """owa""" +885 15 negative_sampler """basic""" +885 15 evaluator """rankbased""" +885 16 dataset """wn18rr""" +885 16 model """transh""" +885 16 loss """marginranking""" +885 16 regularizer """transh""" +885 16 optimizer """adam""" +885 16 training_loop """owa""" +885 16 negative_sampler """basic""" +885 16 evaluator """rankbased""" +885 17 dataset """wn18rr""" +885 17 model """transh""" +885 17 loss """marginranking""" +885 17 regularizer """transh""" +885 17 optimizer """adam""" +885 17 training_loop """owa""" +885 17 negative_sampler """basic""" +885 17 evaluator """rankbased""" +885 18 dataset """wn18rr""" +885 18 model """transh""" +885 18 loss """marginranking""" +885 18 regularizer """transh""" +885 18 optimizer """adam""" +885 18 training_loop """owa""" +885 18 negative_sampler """basic""" +885 18 evaluator """rankbased""" +885 19 dataset """wn18rr""" +885 19 model """transh""" +885 19 loss """marginranking""" +885 19 regularizer """transh""" +885 19 optimizer """adam""" +885 19 training_loop """owa""" +885 19 negative_sampler """basic""" +885 19 evaluator """rankbased""" +885 20 dataset """wn18rr""" +885 20 model """transh""" +885 20 loss """marginranking""" +885 20 regularizer """transh""" +885 20 optimizer """adam""" +885 20 training_loop """owa""" +885 20 negative_sampler """basic""" +885 20 evaluator """rankbased""" +885 21 dataset """wn18rr""" +885 21 model """transh""" +885 21 loss """marginranking""" +885 21 regularizer """transh""" +885 21 optimizer """adam""" +885 21 training_loop """owa""" +885 21 negative_sampler """basic""" +885 21 evaluator """rankbased""" +885 22 dataset """wn18rr""" +885 22 model """transh""" +885 22 loss """marginranking""" +885 22 regularizer """transh""" +885 22 optimizer """adam""" +885 22 training_loop """owa""" +885 22 negative_sampler """basic""" +885 22 evaluator """rankbased""" +885 23 dataset """wn18rr""" +885 23 model """transh""" +885 23 loss """marginranking""" +885 23 regularizer """transh""" +885 23 optimizer """adam""" +885 23 training_loop """owa""" +885 23 negative_sampler """basic""" +885 23 evaluator """rankbased""" +885 24 dataset """wn18rr""" +885 24 model """transh""" +885 24 loss """marginranking""" +885 24 regularizer """transh""" +885 24 optimizer """adam""" +885 24 training_loop """owa""" +885 24 negative_sampler """basic""" +885 24 evaluator """rankbased""" +885 25 dataset """wn18rr""" +885 25 model """transh""" +885 25 loss """marginranking""" +885 25 regularizer """transh""" +885 25 optimizer """adam""" +885 25 training_loop """owa""" +885 25 negative_sampler """basic""" +885 25 evaluator """rankbased""" +885 26 dataset """wn18rr""" +885 26 model """transh""" +885 26 loss """marginranking""" +885 26 regularizer """transh""" +885 26 optimizer """adam""" +885 26 training_loop """owa""" +885 26 negative_sampler """basic""" +885 26 evaluator """rankbased""" +885 27 dataset """wn18rr""" +885 27 model """transh""" +885 27 loss """marginranking""" +885 27 regularizer """transh""" +885 27 optimizer """adam""" +885 27 training_loop """owa""" +885 27 negative_sampler """basic""" +885 27 evaluator """rankbased""" +885 28 dataset """wn18rr""" +885 28 model """transh""" +885 28 loss """marginranking""" +885 28 regularizer """transh""" +885 28 optimizer """adam""" +885 28 training_loop """owa""" +885 28 negative_sampler """basic""" +885 28 evaluator """rankbased""" +885 29 dataset """wn18rr""" +885 29 model """transh""" +885 29 loss """marginranking""" +885 29 regularizer """transh""" +885 29 optimizer """adam""" +885 29 training_loop """owa""" +885 29 negative_sampler """basic""" +885 29 evaluator """rankbased""" +885 30 dataset """wn18rr""" +885 30 model """transh""" +885 30 loss """marginranking""" +885 30 regularizer """transh""" +885 30 optimizer """adam""" +885 30 training_loop """owa""" +885 30 negative_sampler """basic""" +885 30 evaluator """rankbased""" +885 31 dataset """wn18rr""" +885 31 model """transh""" +885 31 loss """marginranking""" +885 31 regularizer """transh""" +885 31 optimizer """adam""" +885 31 training_loop """owa""" +885 31 negative_sampler """basic""" +885 31 evaluator """rankbased""" +885 32 dataset """wn18rr""" +885 32 model """transh""" +885 32 loss """marginranking""" +885 32 regularizer """transh""" +885 32 optimizer """adam""" +885 32 training_loop """owa""" +885 32 negative_sampler """basic""" +885 32 evaluator """rankbased""" +885 33 dataset """wn18rr""" +885 33 model """transh""" +885 33 loss """marginranking""" +885 33 regularizer """transh""" +885 33 optimizer """adam""" +885 33 training_loop """owa""" +885 33 negative_sampler """basic""" +885 33 evaluator """rankbased""" +885 34 dataset """wn18rr""" +885 34 model """transh""" +885 34 loss """marginranking""" +885 34 regularizer """transh""" +885 34 optimizer """adam""" +885 34 training_loop """owa""" +885 34 negative_sampler """basic""" +885 34 evaluator """rankbased""" +885 35 dataset """wn18rr""" +885 35 model """transh""" +885 35 loss """marginranking""" +885 35 regularizer """transh""" +885 35 optimizer """adam""" +885 35 training_loop """owa""" +885 35 negative_sampler """basic""" +885 35 evaluator """rankbased""" +885 36 dataset """wn18rr""" +885 36 model """transh""" +885 36 loss """marginranking""" +885 36 regularizer """transh""" +885 36 optimizer """adam""" +885 36 training_loop """owa""" +885 36 negative_sampler """basic""" +885 36 evaluator """rankbased""" +885 37 dataset """wn18rr""" +885 37 model """transh""" +885 37 loss """marginranking""" +885 37 regularizer """transh""" +885 37 optimizer """adam""" +885 37 training_loop """owa""" +885 37 negative_sampler """basic""" +885 37 evaluator """rankbased""" +885 38 dataset """wn18rr""" +885 38 model """transh""" +885 38 loss """marginranking""" +885 38 regularizer """transh""" +885 38 optimizer """adam""" +885 38 training_loop """owa""" +885 38 negative_sampler """basic""" +885 38 evaluator """rankbased""" +885 39 dataset """wn18rr""" +885 39 model """transh""" +885 39 loss """marginranking""" +885 39 regularizer """transh""" +885 39 optimizer """adam""" +885 39 training_loop """owa""" +885 39 negative_sampler """basic""" +885 39 evaluator """rankbased""" +885 40 dataset """wn18rr""" +885 40 model """transh""" +885 40 loss """marginranking""" +885 40 regularizer """transh""" +885 40 optimizer """adam""" +885 40 training_loop """owa""" +885 40 negative_sampler """basic""" +885 40 evaluator """rankbased""" +885 41 dataset """wn18rr""" +885 41 model """transh""" +885 41 loss """marginranking""" +885 41 regularizer """transh""" +885 41 optimizer """adam""" +885 41 training_loop """owa""" +885 41 negative_sampler """basic""" +885 41 evaluator """rankbased""" +885 42 dataset """wn18rr""" +885 42 model """transh""" +885 42 loss """marginranking""" +885 42 regularizer """transh""" +885 42 optimizer """adam""" +885 42 training_loop """owa""" +885 42 negative_sampler """basic""" +885 42 evaluator """rankbased""" +885 43 dataset """wn18rr""" +885 43 model """transh""" +885 43 loss """marginranking""" +885 43 regularizer """transh""" +885 43 optimizer """adam""" +885 43 training_loop """owa""" +885 43 negative_sampler """basic""" +885 43 evaluator """rankbased""" +885 44 dataset """wn18rr""" +885 44 model """transh""" +885 44 loss """marginranking""" +885 44 regularizer """transh""" +885 44 optimizer """adam""" +885 44 training_loop """owa""" +885 44 negative_sampler """basic""" +885 44 evaluator """rankbased""" +885 45 dataset """wn18rr""" +885 45 model """transh""" +885 45 loss """marginranking""" +885 45 regularizer """transh""" +885 45 optimizer """adam""" +885 45 training_loop """owa""" +885 45 negative_sampler """basic""" +885 45 evaluator """rankbased""" +885 46 dataset """wn18rr""" +885 46 model """transh""" +885 46 loss """marginranking""" +885 46 regularizer """transh""" +885 46 optimizer """adam""" +885 46 training_loop """owa""" +885 46 negative_sampler """basic""" +885 46 evaluator """rankbased""" +885 47 dataset """wn18rr""" +885 47 model """transh""" +885 47 loss """marginranking""" +885 47 regularizer """transh""" +885 47 optimizer """adam""" +885 47 training_loop """owa""" +885 47 negative_sampler """basic""" +885 47 evaluator """rankbased""" +885 48 dataset """wn18rr""" +885 48 model """transh""" +885 48 loss """marginranking""" +885 48 regularizer """transh""" +885 48 optimizer """adam""" +885 48 training_loop """owa""" +885 48 negative_sampler """basic""" +885 48 evaluator """rankbased""" +885 49 dataset """wn18rr""" +885 49 model """transh""" +885 49 loss """marginranking""" +885 49 regularizer """transh""" +885 49 optimizer """adam""" +885 49 training_loop """owa""" +885 49 negative_sampler """basic""" +885 49 evaluator """rankbased""" +885 50 dataset """wn18rr""" +885 50 model """transh""" +885 50 loss """marginranking""" +885 50 regularizer """transh""" +885 50 optimizer """adam""" +885 50 training_loop """owa""" +885 50 negative_sampler """basic""" +885 50 evaluator """rankbased""" +886 1 model.embedding_dim 0.0 +886 1 model.scoring_fct_norm 2.0 +886 1 loss.margin 4.92973749067809 +886 1 regularizer.weight 0.20374051123582396 +886 1 optimizer.lr 0.007797050004220391 +886 1 negative_sampler.num_negs_per_pos 28.0 +886 1 training.batch_size 2.0 +886 2 model.embedding_dim 0.0 +886 2 model.scoring_fct_norm 1.0 +886 2 loss.margin 3.719624647834024 +886 2 regularizer.weight 0.157332569560841 +886 2 optimizer.lr 0.013744437850376913 +886 2 negative_sampler.num_negs_per_pos 68.0 +886 2 training.batch_size 1.0 +886 3 model.embedding_dim 2.0 +886 3 model.scoring_fct_norm 1.0 +886 3 loss.margin 6.317193640117343 +886 3 regularizer.weight 0.012706697380628903 +886 3 optimizer.lr 0.003784697312309583 +886 3 negative_sampler.num_negs_per_pos 99.0 +886 3 training.batch_size 0.0 +886 4 model.embedding_dim 1.0 +886 4 model.scoring_fct_norm 1.0 +886 4 loss.margin 3.846478901484688 +886 4 regularizer.weight 0.09101989641861112 +886 4 optimizer.lr 0.001932493789708279 +886 4 negative_sampler.num_negs_per_pos 31.0 +886 4 training.batch_size 0.0 +886 5 model.embedding_dim 1.0 +886 5 model.scoring_fct_norm 1.0 +886 5 loss.margin 7.181226686751129 +886 5 regularizer.weight 0.04471447535726843 +886 5 optimizer.lr 0.019377038118105597 +886 5 negative_sampler.num_negs_per_pos 91.0 +886 5 training.batch_size 0.0 +886 6 model.embedding_dim 2.0 +886 6 model.scoring_fct_norm 2.0 +886 6 loss.margin 9.280256227910805 +886 6 regularizer.weight 0.04443983126354159 +886 6 optimizer.lr 0.007044127196307333 +886 6 negative_sampler.num_negs_per_pos 83.0 +886 6 training.batch_size 0.0 +886 7 model.embedding_dim 1.0 +886 7 model.scoring_fct_norm 2.0 +886 7 loss.margin 6.284916937502395 +886 7 regularizer.weight 0.014163022803165299 +886 7 optimizer.lr 0.0011500245963045055 +886 7 negative_sampler.num_negs_per_pos 19.0 +886 7 training.batch_size 2.0 +886 8 model.embedding_dim 1.0 +886 8 model.scoring_fct_norm 2.0 +886 8 loss.margin 2.1040783314953813 +886 8 regularizer.weight 0.032319247729148765 +886 8 optimizer.lr 0.030720574745434572 +886 8 negative_sampler.num_negs_per_pos 96.0 +886 8 training.batch_size 0.0 +886 9 model.embedding_dim 2.0 +886 9 model.scoring_fct_norm 2.0 +886 9 loss.margin 6.145815467402301 +886 9 regularizer.weight 0.056106976103646046 +886 9 optimizer.lr 0.0015576472257863997 +886 9 negative_sampler.num_negs_per_pos 36.0 +886 9 training.batch_size 0.0 +886 10 model.embedding_dim 0.0 +886 10 model.scoring_fct_norm 2.0 +886 10 loss.margin 5.833279919387805 +886 10 regularizer.weight 0.11025298700075113 +886 10 optimizer.lr 0.09119858701426904 +886 10 negative_sampler.num_negs_per_pos 43.0 +886 10 training.batch_size 1.0 +886 11 model.embedding_dim 2.0 +886 11 model.scoring_fct_norm 2.0 +886 11 loss.margin 5.571963629783982 +886 11 regularizer.weight 0.22087819580329449 +886 11 optimizer.lr 0.049826222302342474 +886 11 negative_sampler.num_negs_per_pos 12.0 +886 11 training.batch_size 1.0 +886 12 model.embedding_dim 2.0 +886 12 model.scoring_fct_norm 2.0 +886 12 loss.margin 9.943865740216731 +886 12 regularizer.weight 0.04042463394364997 +886 12 optimizer.lr 0.002333056919189569 +886 12 negative_sampler.num_negs_per_pos 24.0 +886 12 training.batch_size 1.0 +886 13 model.embedding_dim 1.0 +886 13 model.scoring_fct_norm 2.0 +886 13 loss.margin 8.247351494967468 +886 13 regularizer.weight 0.1392701273794757 +886 13 optimizer.lr 0.060337316692334664 +886 13 negative_sampler.num_negs_per_pos 31.0 +886 13 training.batch_size 1.0 +886 14 model.embedding_dim 2.0 +886 14 model.scoring_fct_norm 2.0 +886 14 loss.margin 1.965916717997533 +886 14 regularizer.weight 0.07451606374375612 +886 14 optimizer.lr 0.02669036852687815 +886 14 negative_sampler.num_negs_per_pos 42.0 +886 14 training.batch_size 0.0 +886 15 model.embedding_dim 0.0 +886 15 model.scoring_fct_norm 1.0 +886 15 loss.margin 6.80581947747056 +886 15 regularizer.weight 0.01251152868734885 +886 15 optimizer.lr 0.0018386140725204624 +886 15 negative_sampler.num_negs_per_pos 83.0 +886 15 training.batch_size 1.0 +886 16 model.embedding_dim 0.0 +886 16 model.scoring_fct_norm 1.0 +886 16 loss.margin 7.482695757345547 +886 16 regularizer.weight 0.16655140795634704 +886 16 optimizer.lr 0.01833013411739896 +886 16 negative_sampler.num_negs_per_pos 91.0 +886 16 training.batch_size 2.0 +886 17 model.embedding_dim 1.0 +886 17 model.scoring_fct_norm 1.0 +886 17 loss.margin 4.602225165609748 +886 17 regularizer.weight 0.0405608435655913 +886 17 optimizer.lr 0.002043791415521652 +886 17 negative_sampler.num_negs_per_pos 5.0 +886 17 training.batch_size 2.0 +886 18 model.embedding_dim 0.0 +886 18 model.scoring_fct_norm 2.0 +886 18 loss.margin 2.1227197932658703 +886 18 regularizer.weight 0.024532211902445734 +886 18 optimizer.lr 0.008497888036130394 +886 18 negative_sampler.num_negs_per_pos 49.0 +886 18 training.batch_size 1.0 +886 19 model.embedding_dim 2.0 +886 19 model.scoring_fct_norm 1.0 +886 19 loss.margin 3.7899533427494965 +886 19 regularizer.weight 0.07977694230331821 +886 19 optimizer.lr 0.07832146253050462 +886 19 negative_sampler.num_negs_per_pos 18.0 +886 19 training.batch_size 1.0 +886 20 model.embedding_dim 0.0 +886 20 model.scoring_fct_norm 2.0 +886 20 loss.margin 4.901786540681949 +886 20 regularizer.weight 0.13954605857005614 +886 20 optimizer.lr 0.0012674529467693338 +886 20 negative_sampler.num_negs_per_pos 44.0 +886 20 training.batch_size 0.0 +886 21 model.embedding_dim 1.0 +886 21 model.scoring_fct_norm 1.0 +886 21 loss.margin 8.022796938482113 +886 21 regularizer.weight 0.07722072152877474 +886 21 optimizer.lr 0.002264400827996176 +886 21 negative_sampler.num_negs_per_pos 79.0 +886 21 training.batch_size 2.0 +886 22 model.embedding_dim 1.0 +886 22 model.scoring_fct_norm 2.0 +886 22 loss.margin 0.5625782236463182 +886 22 regularizer.weight 0.03698265688780867 +886 22 optimizer.lr 0.005640887837989108 +886 22 negative_sampler.num_negs_per_pos 85.0 +886 22 training.batch_size 0.0 +886 23 model.embedding_dim 0.0 +886 23 model.scoring_fct_norm 2.0 +886 23 loss.margin 7.424578962550069 +886 23 regularizer.weight 0.21139121522383886 +886 23 optimizer.lr 0.005619947617949333 +886 23 negative_sampler.num_negs_per_pos 16.0 +886 23 training.batch_size 2.0 +886 24 model.embedding_dim 0.0 +886 24 model.scoring_fct_norm 1.0 +886 24 loss.margin 5.783062064843335 +886 24 regularizer.weight 0.27451341501133375 +886 24 optimizer.lr 0.003380836503254136 +886 24 negative_sampler.num_negs_per_pos 99.0 +886 24 training.batch_size 0.0 +886 25 model.embedding_dim 0.0 +886 25 model.scoring_fct_norm 1.0 +886 25 loss.margin 7.097055310719549 +886 25 regularizer.weight 0.29416428834689007 +886 25 optimizer.lr 0.0018811885554298575 +886 25 negative_sampler.num_negs_per_pos 66.0 +886 25 training.batch_size 2.0 +886 26 model.embedding_dim 0.0 +886 26 model.scoring_fct_norm 1.0 +886 26 loss.margin 8.801477907030414 +886 26 regularizer.weight 0.12489087339998732 +886 26 optimizer.lr 0.0012217036722492107 +886 26 negative_sampler.num_negs_per_pos 0.0 +886 26 training.batch_size 1.0 +886 27 model.embedding_dim 0.0 +886 27 model.scoring_fct_norm 1.0 +886 27 loss.margin 4.377184879755826 +886 27 regularizer.weight 0.1726054375121643 +886 27 optimizer.lr 0.03361750673024217 +886 27 negative_sampler.num_negs_per_pos 22.0 +886 27 training.batch_size 1.0 +886 28 model.embedding_dim 1.0 +886 28 model.scoring_fct_norm 1.0 +886 28 loss.margin 3.6669610158343744 +886 28 regularizer.weight 0.25709748612259287 +886 28 optimizer.lr 0.003987391533529634 +886 28 negative_sampler.num_negs_per_pos 29.0 +886 28 training.batch_size 0.0 +886 29 model.embedding_dim 1.0 +886 29 model.scoring_fct_norm 2.0 +886 29 loss.margin 7.624804414355123 +886 29 regularizer.weight 0.06453959610581228 +886 29 optimizer.lr 0.023791878696930505 +886 29 negative_sampler.num_negs_per_pos 28.0 +886 29 training.batch_size 1.0 +886 30 model.embedding_dim 1.0 +886 30 model.scoring_fct_norm 2.0 +886 30 loss.margin 7.511996881791675 +886 30 regularizer.weight 0.02197940287762647 +886 30 optimizer.lr 0.01918107136097012 +886 30 negative_sampler.num_negs_per_pos 13.0 +886 30 training.batch_size 2.0 +886 31 model.embedding_dim 1.0 +886 31 model.scoring_fct_norm 1.0 +886 31 loss.margin 7.41068594779178 +886 31 regularizer.weight 0.04851237581844238 +886 31 optimizer.lr 0.021774567177306466 +886 31 negative_sampler.num_negs_per_pos 39.0 +886 31 training.batch_size 2.0 +886 32 model.embedding_dim 1.0 +886 32 model.scoring_fct_norm 2.0 +886 32 loss.margin 9.35197829249115 +886 32 regularizer.weight 0.05690479600837755 +886 32 optimizer.lr 0.006512693084317983 +886 32 negative_sampler.num_negs_per_pos 20.0 +886 32 training.batch_size 1.0 +886 33 model.embedding_dim 2.0 +886 33 model.scoring_fct_norm 1.0 +886 33 loss.margin 5.69731994332439 +886 33 regularizer.weight 0.04790085551238875 +886 33 optimizer.lr 0.0025793082085074563 +886 33 negative_sampler.num_negs_per_pos 43.0 +886 33 training.batch_size 1.0 +886 34 model.embedding_dim 2.0 +886 34 model.scoring_fct_norm 2.0 +886 34 loss.margin 2.5002268511231036 +886 34 regularizer.weight 0.2379698829887456 +886 34 optimizer.lr 0.004901416875262552 +886 34 negative_sampler.num_negs_per_pos 22.0 +886 34 training.batch_size 1.0 +886 35 model.embedding_dim 1.0 +886 35 model.scoring_fct_norm 2.0 +886 35 loss.margin 3.0647654717658988 +886 35 regularizer.weight 0.02913978183779922 +886 35 optimizer.lr 0.04635453536609869 +886 35 negative_sampler.num_negs_per_pos 99.0 +886 35 training.batch_size 1.0 +886 36 model.embedding_dim 2.0 +886 36 model.scoring_fct_norm 1.0 +886 36 loss.margin 2.468485107642731 +886 36 regularizer.weight 0.04376601342109928 +886 36 optimizer.lr 0.024219868868913138 +886 36 negative_sampler.num_negs_per_pos 8.0 +886 36 training.batch_size 2.0 +886 37 model.embedding_dim 0.0 +886 37 model.scoring_fct_norm 1.0 +886 37 loss.margin 3.379480511377917 +886 37 regularizer.weight 0.03133790634488939 +886 37 optimizer.lr 0.0021388441347303566 +886 37 negative_sampler.num_negs_per_pos 99.0 +886 37 training.batch_size 1.0 +886 38 model.embedding_dim 2.0 +886 38 model.scoring_fct_norm 2.0 +886 38 loss.margin 9.152880926955229 +886 38 regularizer.weight 0.09332659892658848 +886 38 optimizer.lr 0.06951959868268785 +886 38 negative_sampler.num_negs_per_pos 42.0 +886 38 training.batch_size 0.0 +886 39 model.embedding_dim 2.0 +886 39 model.scoring_fct_norm 1.0 +886 39 loss.margin 7.819132957213591 +886 39 regularizer.weight 0.017749179693641856 +886 39 optimizer.lr 0.005117279667752156 +886 39 negative_sampler.num_negs_per_pos 14.0 +886 39 training.batch_size 1.0 +886 40 model.embedding_dim 2.0 +886 40 model.scoring_fct_norm 1.0 +886 40 loss.margin 4.554268295233957 +886 40 regularizer.weight 0.18442184438370043 +886 40 optimizer.lr 0.0010335815765536323 +886 40 negative_sampler.num_negs_per_pos 87.0 +886 40 training.batch_size 1.0 +886 41 model.embedding_dim 0.0 +886 41 model.scoring_fct_norm 2.0 +886 41 loss.margin 7.392503305045732 +886 41 regularizer.weight 0.044515986790643815 +886 41 optimizer.lr 0.002143116950584718 +886 41 negative_sampler.num_negs_per_pos 85.0 +886 41 training.batch_size 2.0 +886 42 model.embedding_dim 1.0 +886 42 model.scoring_fct_norm 1.0 +886 42 loss.margin 9.42687344131685 +886 42 regularizer.weight 0.15958267849465915 +886 42 optimizer.lr 0.0046469518512440786 +886 42 negative_sampler.num_negs_per_pos 71.0 +886 42 training.batch_size 1.0 +886 43 model.embedding_dim 1.0 +886 43 model.scoring_fct_norm 2.0 +886 43 loss.margin 5.223388343972266 +886 43 regularizer.weight 0.031066541476683782 +886 43 optimizer.lr 0.0012054611041692393 +886 43 negative_sampler.num_negs_per_pos 82.0 +886 43 training.batch_size 0.0 +886 44 model.embedding_dim 0.0 +886 44 model.scoring_fct_norm 2.0 +886 44 loss.margin 7.326964628252318 +886 44 regularizer.weight 0.021321130122593117 +886 44 optimizer.lr 0.039239481729720284 +886 44 negative_sampler.num_negs_per_pos 84.0 +886 44 training.batch_size 0.0 +886 45 model.embedding_dim 0.0 +886 45 model.scoring_fct_norm 2.0 +886 45 loss.margin 5.254681981703243 +886 45 regularizer.weight 0.010940692285818867 +886 45 optimizer.lr 0.01580114530272336 +886 45 negative_sampler.num_negs_per_pos 56.0 +886 45 training.batch_size 0.0 +886 46 model.embedding_dim 0.0 +886 46 model.scoring_fct_norm 1.0 +886 46 loss.margin 7.455726003355403 +886 46 regularizer.weight 0.014573696622483933 +886 46 optimizer.lr 0.0094813365200673 +886 46 negative_sampler.num_negs_per_pos 68.0 +886 46 training.batch_size 2.0 +886 47 model.embedding_dim 1.0 +886 47 model.scoring_fct_norm 2.0 +886 47 loss.margin 8.904502819106646 +886 47 regularizer.weight 0.2123546212784166 +886 47 optimizer.lr 0.02431942344385164 +886 47 negative_sampler.num_negs_per_pos 22.0 +886 47 training.batch_size 2.0 +886 48 model.embedding_dim 2.0 +886 48 model.scoring_fct_norm 1.0 +886 48 loss.margin 0.5634353084748098 +886 48 regularizer.weight 0.11597937916100522 +886 48 optimizer.lr 0.002285238790052718 +886 48 negative_sampler.num_negs_per_pos 47.0 +886 48 training.batch_size 2.0 +886 49 model.embedding_dim 2.0 +886 49 model.scoring_fct_norm 1.0 +886 49 loss.margin 0.8386193181712369 +886 49 regularizer.weight 0.10238921235186896 +886 49 optimizer.lr 0.026322916699710465 +886 49 negative_sampler.num_negs_per_pos 71.0 +886 49 training.batch_size 2.0 +886 50 model.embedding_dim 0.0 +886 50 model.scoring_fct_norm 2.0 +886 50 loss.margin 1.9546971365941368 +886 50 regularizer.weight 0.11402829529689391 +886 50 optimizer.lr 0.005194128536088834 +886 50 negative_sampler.num_negs_per_pos 82.0 +886 50 training.batch_size 2.0 +886 51 model.embedding_dim 2.0 +886 51 model.scoring_fct_norm 2.0 +886 51 loss.margin 5.477935065280705 +886 51 regularizer.weight 0.03056979617477454 +886 51 optimizer.lr 0.03359456157730437 +886 51 negative_sampler.num_negs_per_pos 86.0 +886 51 training.batch_size 1.0 +886 52 model.embedding_dim 0.0 +886 52 model.scoring_fct_norm 1.0 +886 52 loss.margin 2.9938225371816176 +886 52 regularizer.weight 0.20321098353298822 +886 52 optimizer.lr 0.021174558210976954 +886 52 negative_sampler.num_negs_per_pos 81.0 +886 52 training.batch_size 2.0 +886 53 model.embedding_dim 0.0 +886 53 model.scoring_fct_norm 2.0 +886 53 loss.margin 4.719190691553079 +886 53 regularizer.weight 0.03499258343276712 +886 53 optimizer.lr 0.012420361542707172 +886 53 negative_sampler.num_negs_per_pos 86.0 +886 53 training.batch_size 2.0 +886 54 model.embedding_dim 0.0 +886 54 model.scoring_fct_norm 1.0 +886 54 loss.margin 9.52985167266915 +886 54 regularizer.weight 0.03830656859317186 +886 54 optimizer.lr 0.08440005271744856 +886 54 negative_sampler.num_negs_per_pos 89.0 +886 54 training.batch_size 2.0 +886 55 model.embedding_dim 1.0 +886 55 model.scoring_fct_norm 2.0 +886 55 loss.margin 0.5300854776512826 +886 55 regularizer.weight 0.1007232776665197 +886 55 optimizer.lr 0.08816031866701074 +886 55 negative_sampler.num_negs_per_pos 22.0 +886 55 training.batch_size 2.0 +886 56 model.embedding_dim 2.0 +886 56 model.scoring_fct_norm 2.0 +886 56 loss.margin 3.8184243363226558 +886 56 regularizer.weight 0.04364943210501131 +886 56 optimizer.lr 0.09232757863169001 +886 56 negative_sampler.num_negs_per_pos 94.0 +886 56 training.batch_size 0.0 +886 57 model.embedding_dim 2.0 +886 57 model.scoring_fct_norm 1.0 +886 57 loss.margin 2.3703977689922153 +886 57 regularizer.weight 0.016804520583168833 +886 57 optimizer.lr 0.007871292373593805 +886 57 negative_sampler.num_negs_per_pos 15.0 +886 57 training.batch_size 1.0 +886 58 model.embedding_dim 1.0 +886 58 model.scoring_fct_norm 2.0 +886 58 loss.margin 3.154089936782421 +886 58 regularizer.weight 0.03326433536055319 +886 58 optimizer.lr 0.06672332426334239 +886 58 negative_sampler.num_negs_per_pos 16.0 +886 58 training.batch_size 0.0 +886 59 model.embedding_dim 0.0 +886 59 model.scoring_fct_norm 1.0 +886 59 loss.margin 4.7585001272013745 +886 59 regularizer.weight 0.10028230350770648 +886 59 optimizer.lr 0.00831761975708991 +886 59 negative_sampler.num_negs_per_pos 6.0 +886 59 training.batch_size 1.0 +886 60 model.embedding_dim 2.0 +886 60 model.scoring_fct_norm 2.0 +886 60 loss.margin 4.256803466884941 +886 60 regularizer.weight 0.12882101132066492 +886 60 optimizer.lr 0.002086293054446331 +886 60 negative_sampler.num_negs_per_pos 80.0 +886 60 training.batch_size 0.0 +886 61 model.embedding_dim 0.0 +886 61 model.scoring_fct_norm 1.0 +886 61 loss.margin 8.65736632216447 +886 61 regularizer.weight 0.2850208936649122 +886 61 optimizer.lr 0.02494288379526917 +886 61 negative_sampler.num_negs_per_pos 28.0 +886 61 training.batch_size 1.0 +886 62 model.embedding_dim 1.0 +886 62 model.scoring_fct_norm 2.0 +886 62 loss.margin 9.707084598887485 +886 62 regularizer.weight 0.04956512960207327 +886 62 optimizer.lr 0.011220148806116974 +886 62 negative_sampler.num_negs_per_pos 74.0 +886 62 training.batch_size 0.0 +886 63 model.embedding_dim 1.0 +886 63 model.scoring_fct_norm 1.0 +886 63 loss.margin 7.224066244588502 +886 63 regularizer.weight 0.05286584980713682 +886 63 optimizer.lr 0.0016690544731774613 +886 63 negative_sampler.num_negs_per_pos 16.0 +886 63 training.batch_size 2.0 +886 64 model.embedding_dim 1.0 +886 64 model.scoring_fct_norm 2.0 +886 64 loss.margin 5.364474482147432 +886 64 regularizer.weight 0.23075625655432133 +886 64 optimizer.lr 0.02258451698835368 +886 64 negative_sampler.num_negs_per_pos 86.0 +886 64 training.batch_size 0.0 +886 65 model.embedding_dim 0.0 +886 65 model.scoring_fct_norm 2.0 +886 65 loss.margin 6.282992784100459 +886 65 regularizer.weight 0.23821112211277062 +886 65 optimizer.lr 0.0015025586467007255 +886 65 negative_sampler.num_negs_per_pos 85.0 +886 65 training.batch_size 2.0 +886 66 model.embedding_dim 0.0 +886 66 model.scoring_fct_norm 1.0 +886 66 loss.margin 1.162753568244356 +886 66 regularizer.weight 0.0178972838624758 +886 66 optimizer.lr 0.0038880749776230117 +886 66 negative_sampler.num_negs_per_pos 74.0 +886 66 training.batch_size 0.0 +886 67 model.embedding_dim 2.0 +886 67 model.scoring_fct_norm 2.0 +886 67 loss.margin 9.032026116444172 +886 67 regularizer.weight 0.021218893670172313 +886 67 optimizer.lr 0.0013480085166501074 +886 67 negative_sampler.num_negs_per_pos 55.0 +886 67 training.batch_size 2.0 +886 68 model.embedding_dim 2.0 +886 68 model.scoring_fct_norm 1.0 +886 68 loss.margin 7.697197184986283 +886 68 regularizer.weight 0.2505927516581823 +886 68 optimizer.lr 0.06759104502796226 +886 68 negative_sampler.num_negs_per_pos 87.0 +886 68 training.batch_size 2.0 +886 69 model.embedding_dim 0.0 +886 69 model.scoring_fct_norm 1.0 +886 69 loss.margin 9.750403593141977 +886 69 regularizer.weight 0.025556802806760943 +886 69 optimizer.lr 0.028880425509740064 +886 69 negative_sampler.num_negs_per_pos 36.0 +886 69 training.batch_size 1.0 +886 70 model.embedding_dim 0.0 +886 70 model.scoring_fct_norm 2.0 +886 70 loss.margin 8.409773991955618 +886 70 regularizer.weight 0.17777916679315808 +886 70 optimizer.lr 0.08089793971757898 +886 70 negative_sampler.num_negs_per_pos 43.0 +886 70 training.batch_size 2.0 +886 71 model.embedding_dim 2.0 +886 71 model.scoring_fct_norm 1.0 +886 71 loss.margin 7.2990092868757035 +886 71 regularizer.weight 0.15338062701841296 +886 71 optimizer.lr 0.012300643072691726 +886 71 negative_sampler.num_negs_per_pos 28.0 +886 71 training.batch_size 2.0 +886 72 model.embedding_dim 1.0 +886 72 model.scoring_fct_norm 1.0 +886 72 loss.margin 8.289464316010832 +886 72 regularizer.weight 0.08336861714322008 +886 72 optimizer.lr 0.0024844536752644014 +886 72 negative_sampler.num_negs_per_pos 20.0 +886 72 training.batch_size 2.0 +886 73 model.embedding_dim 1.0 +886 73 model.scoring_fct_norm 1.0 +886 73 loss.margin 6.969026174147005 +886 73 regularizer.weight 0.2602258766136006 +886 73 optimizer.lr 0.030732389699076827 +886 73 negative_sampler.num_negs_per_pos 83.0 +886 73 training.batch_size 1.0 +886 74 model.embedding_dim 2.0 +886 74 model.scoring_fct_norm 2.0 +886 74 loss.margin 8.708187190399816 +886 74 regularizer.weight 0.012027372008538052 +886 74 optimizer.lr 0.0032735679332571007 +886 74 negative_sampler.num_negs_per_pos 52.0 +886 74 training.batch_size 0.0 +886 75 model.embedding_dim 2.0 +886 75 model.scoring_fct_norm 2.0 +886 75 loss.margin 8.031517426992423 +886 75 regularizer.weight 0.01588302356857568 +886 75 optimizer.lr 0.017749900837506077 +886 75 negative_sampler.num_negs_per_pos 22.0 +886 75 training.batch_size 1.0 +886 76 model.embedding_dim 0.0 +886 76 model.scoring_fct_norm 2.0 +886 76 loss.margin 7.093483327553795 +886 76 regularizer.weight 0.08718901660783494 +886 76 optimizer.lr 0.03691835648974887 +886 76 negative_sampler.num_negs_per_pos 45.0 +886 76 training.batch_size 1.0 +886 77 model.embedding_dim 1.0 +886 77 model.scoring_fct_norm 1.0 +886 77 loss.margin 4.2473481081487785 +886 77 regularizer.weight 0.054303163111455645 +886 77 optimizer.lr 0.005439424482703313 +886 77 negative_sampler.num_negs_per_pos 27.0 +886 77 training.batch_size 1.0 +886 78 model.embedding_dim 1.0 +886 78 model.scoring_fct_norm 2.0 +886 78 loss.margin 5.9946774988533695 +886 78 regularizer.weight 0.019664295171180266 +886 78 optimizer.lr 0.027047233354741036 +886 78 negative_sampler.num_negs_per_pos 51.0 +886 78 training.batch_size 0.0 +886 79 model.embedding_dim 1.0 +886 79 model.scoring_fct_norm 2.0 +886 79 loss.margin 9.817580626188212 +886 79 regularizer.weight 0.2043377275116225 +886 79 optimizer.lr 0.0037600214230095476 +886 79 negative_sampler.num_negs_per_pos 29.0 +886 79 training.batch_size 0.0 +886 80 model.embedding_dim 1.0 +886 80 model.scoring_fct_norm 2.0 +886 80 loss.margin 3.814919066199456 +886 80 regularizer.weight 0.011709010310895652 +886 80 optimizer.lr 0.012678688575720135 +886 80 negative_sampler.num_negs_per_pos 40.0 +886 80 training.batch_size 1.0 +886 81 model.embedding_dim 0.0 +886 81 model.scoring_fct_norm 2.0 +886 81 loss.margin 8.549030468685167 +886 81 regularizer.weight 0.23768324131167895 +886 81 optimizer.lr 0.011612863808991762 +886 81 negative_sampler.num_negs_per_pos 18.0 +886 81 training.batch_size 1.0 +886 82 model.embedding_dim 1.0 +886 82 model.scoring_fct_norm 2.0 +886 82 loss.margin 0.636988535483567 +886 82 regularizer.weight 0.27489027013501205 +886 82 optimizer.lr 0.05259469715005527 +886 82 negative_sampler.num_negs_per_pos 27.0 +886 82 training.batch_size 2.0 +886 83 model.embedding_dim 0.0 +886 83 model.scoring_fct_norm 2.0 +886 83 loss.margin 1.3238533507798758 +886 83 regularizer.weight 0.014169631717095994 +886 83 optimizer.lr 0.04744799589969897 +886 83 negative_sampler.num_negs_per_pos 57.0 +886 83 training.batch_size 1.0 +886 84 model.embedding_dim 0.0 +886 84 model.scoring_fct_norm 2.0 +886 84 loss.margin 9.297453972308846 +886 84 regularizer.weight 0.11002726591630774 +886 84 optimizer.lr 0.004964113652858807 +886 84 negative_sampler.num_negs_per_pos 63.0 +886 84 training.batch_size 2.0 +886 85 model.embedding_dim 0.0 +886 85 model.scoring_fct_norm 1.0 +886 85 loss.margin 6.613056987266363 +886 85 regularizer.weight 0.034199689276043096 +886 85 optimizer.lr 0.09704011947418971 +886 85 negative_sampler.num_negs_per_pos 62.0 +886 85 training.batch_size 1.0 +886 86 model.embedding_dim 1.0 +886 86 model.scoring_fct_norm 2.0 +886 86 loss.margin 5.787667923091507 +886 86 regularizer.weight 0.23627869117989903 +886 86 optimizer.lr 0.04474358710310497 +886 86 negative_sampler.num_negs_per_pos 47.0 +886 86 training.batch_size 0.0 +886 87 model.embedding_dim 2.0 +886 87 model.scoring_fct_norm 2.0 +886 87 loss.margin 7.05755707565619 +886 87 regularizer.weight 0.020610801262279748 +886 87 optimizer.lr 0.014402574271611752 +886 87 negative_sampler.num_negs_per_pos 31.0 +886 87 training.batch_size 2.0 +886 88 model.embedding_dim 2.0 +886 88 model.scoring_fct_norm 2.0 +886 88 loss.margin 1.7947230715433582 +886 88 regularizer.weight 0.25364124770892194 +886 88 optimizer.lr 0.004646232527760048 +886 88 negative_sampler.num_negs_per_pos 42.0 +886 88 training.batch_size 0.0 +886 89 model.embedding_dim 2.0 +886 89 model.scoring_fct_norm 2.0 +886 89 loss.margin 6.725909120994433 +886 89 regularizer.weight 0.027963862635577288 +886 89 optimizer.lr 0.001091742929221056 +886 89 negative_sampler.num_negs_per_pos 9.0 +886 89 training.batch_size 1.0 +886 90 model.embedding_dim 1.0 +886 90 model.scoring_fct_norm 1.0 +886 90 loss.margin 7.355556887562994 +886 90 regularizer.weight 0.02402634420918417 +886 90 optimizer.lr 0.004737824338039654 +886 90 negative_sampler.num_negs_per_pos 24.0 +886 90 training.batch_size 1.0 +886 91 model.embedding_dim 0.0 +886 91 model.scoring_fct_norm 2.0 +886 91 loss.margin 2.007543915828188 +886 91 regularizer.weight 0.26626421049519267 +886 91 optimizer.lr 0.08009606563933198 +886 91 negative_sampler.num_negs_per_pos 25.0 +886 91 training.batch_size 0.0 +886 92 model.embedding_dim 0.0 +886 92 model.scoring_fct_norm 1.0 +886 92 loss.margin 7.330477828100394 +886 92 regularizer.weight 0.05262166061259214 +886 92 optimizer.lr 0.01823589244350868 +886 92 negative_sampler.num_negs_per_pos 57.0 +886 92 training.batch_size 2.0 +886 93 model.embedding_dim 0.0 +886 93 model.scoring_fct_norm 1.0 +886 93 loss.margin 0.7942227373585421 +886 93 regularizer.weight 0.011369988146631538 +886 93 optimizer.lr 0.0011586012713170645 +886 93 negative_sampler.num_negs_per_pos 89.0 +886 93 training.batch_size 0.0 +886 94 model.embedding_dim 1.0 +886 94 model.scoring_fct_norm 2.0 +886 94 loss.margin 2.6542204247525443 +886 94 regularizer.weight 0.10732469048495762 +886 94 optimizer.lr 0.0050882148138798175 +886 94 negative_sampler.num_negs_per_pos 33.0 +886 94 training.batch_size 1.0 +886 95 model.embedding_dim 0.0 +886 95 model.scoring_fct_norm 2.0 +886 95 loss.margin 4.43651485455266 +886 95 regularizer.weight 0.02157222504647016 +886 95 optimizer.lr 0.09929357038275141 +886 95 negative_sampler.num_negs_per_pos 74.0 +886 95 training.batch_size 0.0 +886 96 model.embedding_dim 0.0 +886 96 model.scoring_fct_norm 2.0 +886 96 loss.margin 1.3067183899464048 +886 96 regularizer.weight 0.18001048608120687 +886 96 optimizer.lr 0.003234553245356744 +886 96 negative_sampler.num_negs_per_pos 66.0 +886 96 training.batch_size 2.0 +886 97 model.embedding_dim 2.0 +886 97 model.scoring_fct_norm 1.0 +886 97 loss.margin 5.142234958357809 +886 97 regularizer.weight 0.17453969390381927 +886 97 optimizer.lr 0.005677673312463208 +886 97 negative_sampler.num_negs_per_pos 53.0 +886 97 training.batch_size 0.0 +886 1 dataset """wn18rr""" +886 1 model """transh""" +886 1 loss """marginranking""" +886 1 regularizer """transh""" +886 1 optimizer """adam""" +886 1 training_loop """owa""" +886 1 negative_sampler """basic""" +886 1 evaluator """rankbased""" +886 2 dataset """wn18rr""" +886 2 model """transh""" +886 2 loss """marginranking""" +886 2 regularizer """transh""" +886 2 optimizer """adam""" +886 2 training_loop """owa""" +886 2 negative_sampler """basic""" +886 2 evaluator """rankbased""" +886 3 dataset """wn18rr""" +886 3 model """transh""" +886 3 loss """marginranking""" +886 3 regularizer """transh""" +886 3 optimizer """adam""" +886 3 training_loop """owa""" +886 3 negative_sampler """basic""" +886 3 evaluator """rankbased""" +886 4 dataset """wn18rr""" +886 4 model """transh""" +886 4 loss """marginranking""" +886 4 regularizer """transh""" +886 4 optimizer """adam""" +886 4 training_loop """owa""" +886 4 negative_sampler """basic""" +886 4 evaluator """rankbased""" +886 5 dataset """wn18rr""" +886 5 model """transh""" +886 5 loss """marginranking""" +886 5 regularizer """transh""" +886 5 optimizer """adam""" +886 5 training_loop """owa""" +886 5 negative_sampler """basic""" +886 5 evaluator """rankbased""" +886 6 dataset """wn18rr""" +886 6 model """transh""" +886 6 loss """marginranking""" +886 6 regularizer """transh""" +886 6 optimizer """adam""" +886 6 training_loop """owa""" +886 6 negative_sampler """basic""" +886 6 evaluator """rankbased""" +886 7 dataset """wn18rr""" +886 7 model """transh""" +886 7 loss """marginranking""" +886 7 regularizer """transh""" +886 7 optimizer """adam""" +886 7 training_loop """owa""" +886 7 negative_sampler """basic""" +886 7 evaluator """rankbased""" +886 8 dataset """wn18rr""" +886 8 model """transh""" +886 8 loss """marginranking""" +886 8 regularizer """transh""" +886 8 optimizer """adam""" +886 8 training_loop """owa""" +886 8 negative_sampler """basic""" +886 8 evaluator """rankbased""" +886 9 dataset """wn18rr""" +886 9 model """transh""" +886 9 loss """marginranking""" +886 9 regularizer """transh""" +886 9 optimizer """adam""" +886 9 training_loop """owa""" +886 9 negative_sampler """basic""" +886 9 evaluator """rankbased""" +886 10 dataset """wn18rr""" +886 10 model """transh""" +886 10 loss """marginranking""" +886 10 regularizer """transh""" +886 10 optimizer """adam""" +886 10 training_loop """owa""" +886 10 negative_sampler """basic""" +886 10 evaluator """rankbased""" +886 11 dataset """wn18rr""" +886 11 model """transh""" +886 11 loss """marginranking""" +886 11 regularizer """transh""" +886 11 optimizer """adam""" +886 11 training_loop """owa""" +886 11 negative_sampler """basic""" +886 11 evaluator """rankbased""" +886 12 dataset """wn18rr""" +886 12 model """transh""" +886 12 loss """marginranking""" +886 12 regularizer """transh""" +886 12 optimizer """adam""" +886 12 training_loop """owa""" +886 12 negative_sampler """basic""" +886 12 evaluator """rankbased""" +886 13 dataset """wn18rr""" +886 13 model """transh""" +886 13 loss """marginranking""" +886 13 regularizer """transh""" +886 13 optimizer """adam""" +886 13 training_loop """owa""" +886 13 negative_sampler """basic""" +886 13 evaluator """rankbased""" +886 14 dataset """wn18rr""" +886 14 model """transh""" +886 14 loss """marginranking""" +886 14 regularizer """transh""" +886 14 optimizer """adam""" +886 14 training_loop """owa""" +886 14 negative_sampler """basic""" +886 14 evaluator """rankbased""" +886 15 dataset """wn18rr""" +886 15 model """transh""" +886 15 loss """marginranking""" +886 15 regularizer """transh""" +886 15 optimizer """adam""" +886 15 training_loop """owa""" +886 15 negative_sampler """basic""" +886 15 evaluator """rankbased""" +886 16 dataset """wn18rr""" +886 16 model """transh""" +886 16 loss """marginranking""" +886 16 regularizer """transh""" +886 16 optimizer """adam""" +886 16 training_loop """owa""" +886 16 negative_sampler """basic""" +886 16 evaluator """rankbased""" +886 17 dataset """wn18rr""" +886 17 model """transh""" +886 17 loss """marginranking""" +886 17 regularizer """transh""" +886 17 optimizer """adam""" +886 17 training_loop """owa""" +886 17 negative_sampler """basic""" +886 17 evaluator """rankbased""" +886 18 dataset """wn18rr""" +886 18 model """transh""" +886 18 loss """marginranking""" +886 18 regularizer """transh""" +886 18 optimizer """adam""" +886 18 training_loop """owa""" +886 18 negative_sampler """basic""" +886 18 evaluator """rankbased""" +886 19 dataset """wn18rr""" +886 19 model """transh""" +886 19 loss """marginranking""" +886 19 regularizer """transh""" +886 19 optimizer """adam""" +886 19 training_loop """owa""" +886 19 negative_sampler """basic""" +886 19 evaluator """rankbased""" +886 20 dataset """wn18rr""" +886 20 model """transh""" +886 20 loss """marginranking""" +886 20 regularizer """transh""" +886 20 optimizer """adam""" +886 20 training_loop """owa""" +886 20 negative_sampler """basic""" +886 20 evaluator """rankbased""" +886 21 dataset """wn18rr""" +886 21 model """transh""" +886 21 loss """marginranking""" +886 21 regularizer """transh""" +886 21 optimizer """adam""" +886 21 training_loop """owa""" +886 21 negative_sampler """basic""" +886 21 evaluator """rankbased""" +886 22 dataset """wn18rr""" +886 22 model """transh""" +886 22 loss """marginranking""" +886 22 regularizer """transh""" +886 22 optimizer """adam""" +886 22 training_loop """owa""" +886 22 negative_sampler """basic""" +886 22 evaluator """rankbased""" +886 23 dataset """wn18rr""" +886 23 model """transh""" +886 23 loss """marginranking""" +886 23 regularizer """transh""" +886 23 optimizer """adam""" +886 23 training_loop """owa""" +886 23 negative_sampler """basic""" +886 23 evaluator """rankbased""" +886 24 dataset """wn18rr""" +886 24 model """transh""" +886 24 loss """marginranking""" +886 24 regularizer """transh""" +886 24 optimizer """adam""" +886 24 training_loop """owa""" +886 24 negative_sampler """basic""" +886 24 evaluator """rankbased""" +886 25 dataset """wn18rr""" +886 25 model """transh""" +886 25 loss """marginranking""" +886 25 regularizer """transh""" +886 25 optimizer """adam""" +886 25 training_loop """owa""" +886 25 negative_sampler """basic""" +886 25 evaluator """rankbased""" +886 26 dataset """wn18rr""" +886 26 model """transh""" +886 26 loss """marginranking""" +886 26 regularizer """transh""" +886 26 optimizer """adam""" +886 26 training_loop """owa""" +886 26 negative_sampler """basic""" +886 26 evaluator """rankbased""" +886 27 dataset """wn18rr""" +886 27 model """transh""" +886 27 loss """marginranking""" +886 27 regularizer """transh""" +886 27 optimizer """adam""" +886 27 training_loop """owa""" +886 27 negative_sampler """basic""" +886 27 evaluator """rankbased""" +886 28 dataset """wn18rr""" +886 28 model """transh""" +886 28 loss """marginranking""" +886 28 regularizer """transh""" +886 28 optimizer """adam""" +886 28 training_loop """owa""" +886 28 negative_sampler """basic""" +886 28 evaluator """rankbased""" +886 29 dataset """wn18rr""" +886 29 model """transh""" +886 29 loss """marginranking""" +886 29 regularizer """transh""" +886 29 optimizer """adam""" +886 29 training_loop """owa""" +886 29 negative_sampler """basic""" +886 29 evaluator """rankbased""" +886 30 dataset """wn18rr""" +886 30 model """transh""" +886 30 loss """marginranking""" +886 30 regularizer """transh""" +886 30 optimizer """adam""" +886 30 training_loop """owa""" +886 30 negative_sampler """basic""" +886 30 evaluator """rankbased""" +886 31 dataset """wn18rr""" +886 31 model """transh""" +886 31 loss """marginranking""" +886 31 regularizer """transh""" +886 31 optimizer """adam""" +886 31 training_loop """owa""" +886 31 negative_sampler """basic""" +886 31 evaluator """rankbased""" +886 32 dataset """wn18rr""" +886 32 model """transh""" +886 32 loss """marginranking""" +886 32 regularizer """transh""" +886 32 optimizer """adam""" +886 32 training_loop """owa""" +886 32 negative_sampler """basic""" +886 32 evaluator """rankbased""" +886 33 dataset """wn18rr""" +886 33 model """transh""" +886 33 loss """marginranking""" +886 33 regularizer """transh""" +886 33 optimizer """adam""" +886 33 training_loop """owa""" +886 33 negative_sampler """basic""" +886 33 evaluator """rankbased""" +886 34 dataset """wn18rr""" +886 34 model """transh""" +886 34 loss """marginranking""" +886 34 regularizer """transh""" +886 34 optimizer """adam""" +886 34 training_loop """owa""" +886 34 negative_sampler """basic""" +886 34 evaluator """rankbased""" +886 35 dataset """wn18rr""" +886 35 model """transh""" +886 35 loss """marginranking""" +886 35 regularizer """transh""" +886 35 optimizer """adam""" +886 35 training_loop """owa""" +886 35 negative_sampler """basic""" +886 35 evaluator """rankbased""" +886 36 dataset """wn18rr""" +886 36 model """transh""" +886 36 loss """marginranking""" +886 36 regularizer """transh""" +886 36 optimizer """adam""" +886 36 training_loop """owa""" +886 36 negative_sampler """basic""" +886 36 evaluator """rankbased""" +886 37 dataset """wn18rr""" +886 37 model """transh""" +886 37 loss """marginranking""" +886 37 regularizer """transh""" +886 37 optimizer """adam""" +886 37 training_loop """owa""" +886 37 negative_sampler """basic""" +886 37 evaluator """rankbased""" +886 38 dataset """wn18rr""" +886 38 model """transh""" +886 38 loss """marginranking""" +886 38 regularizer """transh""" +886 38 optimizer """adam""" +886 38 training_loop """owa""" +886 38 negative_sampler """basic""" +886 38 evaluator """rankbased""" +886 39 dataset """wn18rr""" +886 39 model """transh""" +886 39 loss """marginranking""" +886 39 regularizer """transh""" +886 39 optimizer """adam""" +886 39 training_loop """owa""" +886 39 negative_sampler """basic""" +886 39 evaluator """rankbased""" +886 40 dataset """wn18rr""" +886 40 model """transh""" +886 40 loss """marginranking""" +886 40 regularizer """transh""" +886 40 optimizer """adam""" +886 40 training_loop """owa""" +886 40 negative_sampler """basic""" +886 40 evaluator """rankbased""" +886 41 dataset """wn18rr""" +886 41 model """transh""" +886 41 loss """marginranking""" +886 41 regularizer """transh""" +886 41 optimizer """adam""" +886 41 training_loop """owa""" +886 41 negative_sampler """basic""" +886 41 evaluator """rankbased""" +886 42 dataset """wn18rr""" +886 42 model """transh""" +886 42 loss """marginranking""" +886 42 regularizer """transh""" +886 42 optimizer """adam""" +886 42 training_loop """owa""" +886 42 negative_sampler """basic""" +886 42 evaluator """rankbased""" +886 43 dataset """wn18rr""" +886 43 model """transh""" +886 43 loss """marginranking""" +886 43 regularizer """transh""" +886 43 optimizer """adam""" +886 43 training_loop """owa""" +886 43 negative_sampler """basic""" +886 43 evaluator """rankbased""" +886 44 dataset """wn18rr""" +886 44 model """transh""" +886 44 loss """marginranking""" +886 44 regularizer """transh""" +886 44 optimizer """adam""" +886 44 training_loop """owa""" +886 44 negative_sampler """basic""" +886 44 evaluator """rankbased""" +886 45 dataset """wn18rr""" +886 45 model """transh""" +886 45 loss """marginranking""" +886 45 regularizer """transh""" +886 45 optimizer """adam""" +886 45 training_loop """owa""" +886 45 negative_sampler """basic""" +886 45 evaluator """rankbased""" +886 46 dataset """wn18rr""" +886 46 model """transh""" +886 46 loss """marginranking""" +886 46 regularizer """transh""" +886 46 optimizer """adam""" +886 46 training_loop """owa""" +886 46 negative_sampler """basic""" +886 46 evaluator """rankbased""" +886 47 dataset """wn18rr""" +886 47 model """transh""" +886 47 loss """marginranking""" +886 47 regularizer """transh""" +886 47 optimizer """adam""" +886 47 training_loop """owa""" +886 47 negative_sampler """basic""" +886 47 evaluator """rankbased""" +886 48 dataset """wn18rr""" +886 48 model """transh""" +886 48 loss """marginranking""" +886 48 regularizer """transh""" +886 48 optimizer """adam""" +886 48 training_loop """owa""" +886 48 negative_sampler """basic""" +886 48 evaluator """rankbased""" +886 49 dataset """wn18rr""" +886 49 model """transh""" +886 49 loss """marginranking""" +886 49 regularizer """transh""" +886 49 optimizer """adam""" +886 49 training_loop """owa""" +886 49 negative_sampler """basic""" +886 49 evaluator """rankbased""" +886 50 dataset """wn18rr""" +886 50 model """transh""" +886 50 loss """marginranking""" +886 50 regularizer """transh""" +886 50 optimizer """adam""" +886 50 training_loop """owa""" +886 50 negative_sampler """basic""" +886 50 evaluator """rankbased""" +886 51 dataset """wn18rr""" +886 51 model """transh""" +886 51 loss """marginranking""" +886 51 regularizer """transh""" +886 51 optimizer """adam""" +886 51 training_loop """owa""" +886 51 negative_sampler """basic""" +886 51 evaluator """rankbased""" +886 52 dataset """wn18rr""" +886 52 model """transh""" +886 52 loss """marginranking""" +886 52 regularizer """transh""" +886 52 optimizer """adam""" +886 52 training_loop """owa""" +886 52 negative_sampler """basic""" +886 52 evaluator """rankbased""" +886 53 dataset """wn18rr""" +886 53 model """transh""" +886 53 loss """marginranking""" +886 53 regularizer """transh""" +886 53 optimizer """adam""" +886 53 training_loop """owa""" +886 53 negative_sampler """basic""" +886 53 evaluator """rankbased""" +886 54 dataset """wn18rr""" +886 54 model """transh""" +886 54 loss """marginranking""" +886 54 regularizer """transh""" +886 54 optimizer """adam""" +886 54 training_loop """owa""" +886 54 negative_sampler """basic""" +886 54 evaluator """rankbased""" +886 55 dataset """wn18rr""" +886 55 model """transh""" +886 55 loss """marginranking""" +886 55 regularizer """transh""" +886 55 optimizer """adam""" +886 55 training_loop """owa""" +886 55 negative_sampler """basic""" +886 55 evaluator """rankbased""" +886 56 dataset """wn18rr""" +886 56 model """transh""" +886 56 loss """marginranking""" +886 56 regularizer """transh""" +886 56 optimizer """adam""" +886 56 training_loop """owa""" +886 56 negative_sampler """basic""" +886 56 evaluator """rankbased""" +886 57 dataset """wn18rr""" +886 57 model """transh""" +886 57 loss """marginranking""" +886 57 regularizer """transh""" +886 57 optimizer """adam""" +886 57 training_loop """owa""" +886 57 negative_sampler """basic""" +886 57 evaluator """rankbased""" +886 58 dataset """wn18rr""" +886 58 model """transh""" +886 58 loss """marginranking""" +886 58 regularizer """transh""" +886 58 optimizer """adam""" +886 58 training_loop """owa""" +886 58 negative_sampler """basic""" +886 58 evaluator """rankbased""" +886 59 dataset """wn18rr""" +886 59 model """transh""" +886 59 loss """marginranking""" +886 59 regularizer """transh""" +886 59 optimizer """adam""" +886 59 training_loop """owa""" +886 59 negative_sampler """basic""" +886 59 evaluator """rankbased""" +886 60 dataset """wn18rr""" +886 60 model """transh""" +886 60 loss """marginranking""" +886 60 regularizer """transh""" +886 60 optimizer """adam""" +886 60 training_loop """owa""" +886 60 negative_sampler """basic""" +886 60 evaluator """rankbased""" +886 61 dataset """wn18rr""" +886 61 model """transh""" +886 61 loss """marginranking""" +886 61 regularizer """transh""" +886 61 optimizer """adam""" +886 61 training_loop """owa""" +886 61 negative_sampler """basic""" +886 61 evaluator """rankbased""" +886 62 dataset """wn18rr""" +886 62 model """transh""" +886 62 loss """marginranking""" +886 62 regularizer """transh""" +886 62 optimizer """adam""" +886 62 training_loop """owa""" +886 62 negative_sampler """basic""" +886 62 evaluator """rankbased""" +886 63 dataset """wn18rr""" +886 63 model """transh""" +886 63 loss """marginranking""" +886 63 regularizer """transh""" +886 63 optimizer """adam""" +886 63 training_loop """owa""" +886 63 negative_sampler """basic""" +886 63 evaluator """rankbased""" +886 64 dataset """wn18rr""" +886 64 model """transh""" +886 64 loss """marginranking""" +886 64 regularizer """transh""" +886 64 optimizer """adam""" +886 64 training_loop """owa""" +886 64 negative_sampler """basic""" +886 64 evaluator """rankbased""" +886 65 dataset """wn18rr""" +886 65 model """transh""" +886 65 loss """marginranking""" +886 65 regularizer """transh""" +886 65 optimizer """adam""" +886 65 training_loop """owa""" +886 65 negative_sampler """basic""" +886 65 evaluator """rankbased""" +886 66 dataset """wn18rr""" +886 66 model """transh""" +886 66 loss """marginranking""" +886 66 regularizer """transh""" +886 66 optimizer """adam""" +886 66 training_loop """owa""" +886 66 negative_sampler """basic""" +886 66 evaluator """rankbased""" +886 67 dataset """wn18rr""" +886 67 model """transh""" +886 67 loss """marginranking""" +886 67 regularizer """transh""" +886 67 optimizer """adam""" +886 67 training_loop """owa""" +886 67 negative_sampler """basic""" +886 67 evaluator """rankbased""" +886 68 dataset """wn18rr""" +886 68 model """transh""" +886 68 loss """marginranking""" +886 68 regularizer """transh""" +886 68 optimizer """adam""" +886 68 training_loop """owa""" +886 68 negative_sampler """basic""" +886 68 evaluator """rankbased""" +886 69 dataset """wn18rr""" +886 69 model """transh""" +886 69 loss """marginranking""" +886 69 regularizer """transh""" +886 69 optimizer """adam""" +886 69 training_loop """owa""" +886 69 negative_sampler """basic""" +886 69 evaluator """rankbased""" +886 70 dataset """wn18rr""" +886 70 model """transh""" +886 70 loss """marginranking""" +886 70 regularizer """transh""" +886 70 optimizer """adam""" +886 70 training_loop """owa""" +886 70 negative_sampler """basic""" +886 70 evaluator """rankbased""" +886 71 dataset """wn18rr""" +886 71 model """transh""" +886 71 loss """marginranking""" +886 71 regularizer """transh""" +886 71 optimizer """adam""" +886 71 training_loop """owa""" +886 71 negative_sampler """basic""" +886 71 evaluator """rankbased""" +886 72 dataset """wn18rr""" +886 72 model """transh""" +886 72 loss """marginranking""" +886 72 regularizer """transh""" +886 72 optimizer """adam""" +886 72 training_loop """owa""" +886 72 negative_sampler """basic""" +886 72 evaluator """rankbased""" +886 73 dataset """wn18rr""" +886 73 model """transh""" +886 73 loss """marginranking""" +886 73 regularizer """transh""" +886 73 optimizer """adam""" +886 73 training_loop """owa""" +886 73 negative_sampler """basic""" +886 73 evaluator """rankbased""" +886 74 dataset """wn18rr""" +886 74 model """transh""" +886 74 loss """marginranking""" +886 74 regularizer """transh""" +886 74 optimizer """adam""" +886 74 training_loop """owa""" +886 74 negative_sampler """basic""" +886 74 evaluator """rankbased""" +886 75 dataset """wn18rr""" +886 75 model """transh""" +886 75 loss """marginranking""" +886 75 regularizer """transh""" +886 75 optimizer """adam""" +886 75 training_loop """owa""" +886 75 negative_sampler """basic""" +886 75 evaluator """rankbased""" +886 76 dataset """wn18rr""" +886 76 model """transh""" +886 76 loss """marginranking""" +886 76 regularizer """transh""" +886 76 optimizer """adam""" +886 76 training_loop """owa""" +886 76 negative_sampler """basic""" +886 76 evaluator """rankbased""" +886 77 dataset """wn18rr""" +886 77 model """transh""" +886 77 loss """marginranking""" +886 77 regularizer """transh""" +886 77 optimizer """adam""" +886 77 training_loop """owa""" +886 77 negative_sampler """basic""" +886 77 evaluator """rankbased""" +886 78 dataset """wn18rr""" +886 78 model """transh""" +886 78 loss """marginranking""" +886 78 regularizer """transh""" +886 78 optimizer """adam""" +886 78 training_loop """owa""" +886 78 negative_sampler """basic""" +886 78 evaluator """rankbased""" +886 79 dataset """wn18rr""" +886 79 model """transh""" +886 79 loss """marginranking""" +886 79 regularizer """transh""" +886 79 optimizer """adam""" +886 79 training_loop """owa""" +886 79 negative_sampler """basic""" +886 79 evaluator """rankbased""" +886 80 dataset """wn18rr""" +886 80 model """transh""" +886 80 loss """marginranking""" +886 80 regularizer """transh""" +886 80 optimizer """adam""" +886 80 training_loop """owa""" +886 80 negative_sampler """basic""" +886 80 evaluator """rankbased""" +886 81 dataset """wn18rr""" +886 81 model """transh""" +886 81 loss """marginranking""" +886 81 regularizer """transh""" +886 81 optimizer """adam""" +886 81 training_loop """owa""" +886 81 negative_sampler """basic""" +886 81 evaluator """rankbased""" +886 82 dataset """wn18rr""" +886 82 model """transh""" +886 82 loss """marginranking""" +886 82 regularizer """transh""" +886 82 optimizer """adam""" +886 82 training_loop """owa""" +886 82 negative_sampler """basic""" +886 82 evaluator """rankbased""" +886 83 dataset """wn18rr""" +886 83 model """transh""" +886 83 loss """marginranking""" +886 83 regularizer """transh""" +886 83 optimizer """adam""" +886 83 training_loop """owa""" +886 83 negative_sampler """basic""" +886 83 evaluator """rankbased""" +886 84 dataset """wn18rr""" +886 84 model """transh""" +886 84 loss """marginranking""" +886 84 regularizer """transh""" +886 84 optimizer """adam""" +886 84 training_loop """owa""" +886 84 negative_sampler """basic""" +886 84 evaluator """rankbased""" +886 85 dataset """wn18rr""" +886 85 model """transh""" +886 85 loss """marginranking""" +886 85 regularizer """transh""" +886 85 optimizer """adam""" +886 85 training_loop """owa""" +886 85 negative_sampler """basic""" +886 85 evaluator """rankbased""" +886 86 dataset """wn18rr""" +886 86 model """transh""" +886 86 loss """marginranking""" +886 86 regularizer """transh""" +886 86 optimizer """adam""" +886 86 training_loop """owa""" +886 86 negative_sampler """basic""" +886 86 evaluator """rankbased""" +886 87 dataset """wn18rr""" +886 87 model """transh""" +886 87 loss """marginranking""" +886 87 regularizer """transh""" +886 87 optimizer """adam""" +886 87 training_loop """owa""" +886 87 negative_sampler """basic""" +886 87 evaluator """rankbased""" +886 88 dataset """wn18rr""" +886 88 model """transh""" +886 88 loss """marginranking""" +886 88 regularizer """transh""" +886 88 optimizer """adam""" +886 88 training_loop """owa""" +886 88 negative_sampler """basic""" +886 88 evaluator """rankbased""" +886 89 dataset """wn18rr""" +886 89 model """transh""" +886 89 loss """marginranking""" +886 89 regularizer """transh""" +886 89 optimizer """adam""" +886 89 training_loop """owa""" +886 89 negative_sampler """basic""" +886 89 evaluator """rankbased""" +886 90 dataset """wn18rr""" +886 90 model """transh""" +886 90 loss """marginranking""" +886 90 regularizer """transh""" +886 90 optimizer """adam""" +886 90 training_loop """owa""" +886 90 negative_sampler """basic""" +886 90 evaluator """rankbased""" +886 91 dataset """wn18rr""" +886 91 model """transh""" +886 91 loss """marginranking""" +886 91 regularizer """transh""" +886 91 optimizer """adam""" +886 91 training_loop """owa""" +886 91 negative_sampler """basic""" +886 91 evaluator """rankbased""" +886 92 dataset """wn18rr""" +886 92 model """transh""" +886 92 loss """marginranking""" +886 92 regularizer """transh""" +886 92 optimizer """adam""" +886 92 training_loop """owa""" +886 92 negative_sampler """basic""" +886 92 evaluator """rankbased""" +886 93 dataset """wn18rr""" +886 93 model """transh""" +886 93 loss """marginranking""" +886 93 regularizer """transh""" +886 93 optimizer """adam""" +886 93 training_loop """owa""" +886 93 negative_sampler """basic""" +886 93 evaluator """rankbased""" +886 94 dataset """wn18rr""" +886 94 model """transh""" +886 94 loss """marginranking""" +886 94 regularizer """transh""" +886 94 optimizer """adam""" +886 94 training_loop """owa""" +886 94 negative_sampler """basic""" +886 94 evaluator """rankbased""" +886 95 dataset """wn18rr""" +886 95 model """transh""" +886 95 loss """marginranking""" +886 95 regularizer """transh""" +886 95 optimizer """adam""" +886 95 training_loop """owa""" +886 95 negative_sampler """basic""" +886 95 evaluator """rankbased""" +886 96 dataset """wn18rr""" +886 96 model """transh""" +886 96 loss """marginranking""" +886 96 regularizer """transh""" +886 96 optimizer """adam""" +886 96 training_loop """owa""" +886 96 negative_sampler """basic""" +886 96 evaluator """rankbased""" +886 97 dataset """wn18rr""" +886 97 model """transh""" +886 97 loss """marginranking""" +886 97 regularizer """transh""" +886 97 optimizer """adam""" +886 97 training_loop """owa""" +886 97 negative_sampler """basic""" +886 97 evaluator """rankbased""" +887 1 model.embedding_dim 2.0 +887 1 model.scoring_fct_norm 1.0 +887 1 regularizer.weight 0.023696000952143886 +887 1 optimizer.lr 0.001641989344684456 +887 1 negative_sampler.num_negs_per_pos 2.0 +887 1 training.batch_size 2.0 +887 2 model.embedding_dim 0.0 +887 2 model.scoring_fct_norm 1.0 +887 2 regularizer.weight 0.011989879133348376 +887 2 optimizer.lr 0.08937254606128273 +887 2 negative_sampler.num_negs_per_pos 20.0 +887 2 training.batch_size 0.0 +887 3 model.embedding_dim 1.0 +887 3 model.scoring_fct_norm 1.0 +887 3 regularizer.weight 0.07952431701008786 +887 3 optimizer.lr 0.03890443260650458 +887 3 negative_sampler.num_negs_per_pos 94.0 +887 3 training.batch_size 2.0 +887 4 model.embedding_dim 2.0 +887 4 model.scoring_fct_norm 2.0 +887 4 regularizer.weight 0.14052384343059607 +887 4 optimizer.lr 0.004666919633683924 +887 4 negative_sampler.num_negs_per_pos 81.0 +887 4 training.batch_size 1.0 +887 5 model.embedding_dim 1.0 +887 5 model.scoring_fct_norm 1.0 +887 5 regularizer.weight 0.013802621588136742 +887 5 optimizer.lr 0.01573253867634027 +887 5 negative_sampler.num_negs_per_pos 57.0 +887 5 training.batch_size 2.0 +887 6 model.embedding_dim 0.0 +887 6 model.scoring_fct_norm 2.0 +887 6 regularizer.weight 0.04639640833282759 +887 6 optimizer.lr 0.005464969934790494 +887 6 negative_sampler.num_negs_per_pos 44.0 +887 6 training.batch_size 0.0 +887 7 model.embedding_dim 0.0 +887 7 model.scoring_fct_norm 2.0 +887 7 regularizer.weight 0.10028885659036814 +887 7 optimizer.lr 0.032190690337445836 +887 7 negative_sampler.num_negs_per_pos 15.0 +887 7 training.batch_size 0.0 +887 8 model.embedding_dim 0.0 +887 8 model.scoring_fct_norm 2.0 +887 8 regularizer.weight 0.18851855623176178 +887 8 optimizer.lr 0.004191469045029407 +887 8 negative_sampler.num_negs_per_pos 62.0 +887 8 training.batch_size 2.0 +887 9 model.embedding_dim 2.0 +887 9 model.scoring_fct_norm 2.0 +887 9 regularizer.weight 0.12744762530095657 +887 9 optimizer.lr 0.01082465812001471 +887 9 negative_sampler.num_negs_per_pos 33.0 +887 9 training.batch_size 1.0 +887 10 model.embedding_dim 2.0 +887 10 model.scoring_fct_norm 1.0 +887 10 regularizer.weight 0.03583488465255994 +887 10 optimizer.lr 0.01853668295874666 +887 10 negative_sampler.num_negs_per_pos 68.0 +887 10 training.batch_size 2.0 +887 11 model.embedding_dim 1.0 +887 11 model.scoring_fct_norm 2.0 +887 11 regularizer.weight 0.11586059455255558 +887 11 optimizer.lr 0.0012108800850911938 +887 11 negative_sampler.num_negs_per_pos 96.0 +887 11 training.batch_size 2.0 +887 12 model.embedding_dim 0.0 +887 12 model.scoring_fct_norm 2.0 +887 12 regularizer.weight 0.03663997126213709 +887 12 optimizer.lr 0.07732394321903363 +887 12 negative_sampler.num_negs_per_pos 34.0 +887 12 training.batch_size 1.0 +887 13 model.embedding_dim 2.0 +887 13 model.scoring_fct_norm 1.0 +887 13 regularizer.weight 0.10863510089489978 +887 13 optimizer.lr 0.004049473471499681 +887 13 negative_sampler.num_negs_per_pos 76.0 +887 13 training.batch_size 2.0 +887 14 model.embedding_dim 1.0 +887 14 model.scoring_fct_norm 1.0 +887 14 regularizer.weight 0.03147860958626182 +887 14 optimizer.lr 0.019941641784729985 +887 14 negative_sampler.num_negs_per_pos 15.0 +887 14 training.batch_size 0.0 +887 15 model.embedding_dim 0.0 +887 15 model.scoring_fct_norm 2.0 +887 15 regularizer.weight 0.15906075788904506 +887 15 optimizer.lr 0.022587412491752345 +887 15 negative_sampler.num_negs_per_pos 2.0 +887 15 training.batch_size 1.0 +887 16 model.embedding_dim 0.0 +887 16 model.scoring_fct_norm 1.0 +887 16 regularizer.weight 0.02116156698925893 +887 16 optimizer.lr 0.0018742930910785485 +887 16 negative_sampler.num_negs_per_pos 58.0 +887 16 training.batch_size 0.0 +887 17 model.embedding_dim 1.0 +887 17 model.scoring_fct_norm 2.0 +887 17 regularizer.weight 0.21919029319835928 +887 17 optimizer.lr 0.0012380465569812283 +887 17 negative_sampler.num_negs_per_pos 54.0 +887 17 training.batch_size 0.0 +887 18 model.embedding_dim 2.0 +887 18 model.scoring_fct_norm 1.0 +887 18 regularizer.weight 0.036817727080475864 +887 18 optimizer.lr 0.0023158729047824847 +887 18 negative_sampler.num_negs_per_pos 29.0 +887 18 training.batch_size 0.0 +887 19 model.embedding_dim 1.0 +887 19 model.scoring_fct_norm 2.0 +887 19 regularizer.weight 0.24207549173672466 +887 19 optimizer.lr 0.0751518287983805 +887 19 negative_sampler.num_negs_per_pos 12.0 +887 19 training.batch_size 2.0 +887 20 model.embedding_dim 2.0 +887 20 model.scoring_fct_norm 1.0 +887 20 regularizer.weight 0.07376415487228459 +887 20 optimizer.lr 0.0330376628564078 +887 20 negative_sampler.num_negs_per_pos 65.0 +887 20 training.batch_size 0.0 +887 21 model.embedding_dim 2.0 +887 21 model.scoring_fct_norm 1.0 +887 21 regularizer.weight 0.04861101548983157 +887 21 optimizer.lr 0.005264441877267693 +887 21 negative_sampler.num_negs_per_pos 1.0 +887 21 training.batch_size 0.0 +887 22 model.embedding_dim 1.0 +887 22 model.scoring_fct_norm 1.0 +887 22 regularizer.weight 0.023985064529989317 +887 22 optimizer.lr 0.0067594967867940425 +887 22 negative_sampler.num_negs_per_pos 16.0 +887 22 training.batch_size 2.0 +887 23 model.embedding_dim 2.0 +887 23 model.scoring_fct_norm 2.0 +887 23 regularizer.weight 0.011644816770625347 +887 23 optimizer.lr 0.0016253222739571918 +887 23 negative_sampler.num_negs_per_pos 66.0 +887 23 training.batch_size 2.0 +887 24 model.embedding_dim 1.0 +887 24 model.scoring_fct_norm 2.0 +887 24 regularizer.weight 0.015487868293839673 +887 24 optimizer.lr 0.008582078392938772 +887 24 negative_sampler.num_negs_per_pos 28.0 +887 24 training.batch_size 1.0 +887 25 model.embedding_dim 0.0 +887 25 model.scoring_fct_norm 1.0 +887 25 regularizer.weight 0.11489043997805017 +887 25 optimizer.lr 0.028063600106151824 +887 25 negative_sampler.num_negs_per_pos 90.0 +887 25 training.batch_size 0.0 +887 26 model.embedding_dim 0.0 +887 26 model.scoring_fct_norm 2.0 +887 26 regularizer.weight 0.02387728447781534 +887 26 optimizer.lr 0.0019907080285239717 +887 26 negative_sampler.num_negs_per_pos 47.0 +887 26 training.batch_size 2.0 +887 27 model.embedding_dim 1.0 +887 27 model.scoring_fct_norm 1.0 +887 27 regularizer.weight 0.20119015692041506 +887 27 optimizer.lr 0.0877647871847143 +887 27 negative_sampler.num_negs_per_pos 99.0 +887 27 training.batch_size 2.0 +887 28 model.embedding_dim 2.0 +887 28 model.scoring_fct_norm 2.0 +887 28 regularizer.weight 0.034651813121756615 +887 28 optimizer.lr 0.0011140952208908463 +887 28 negative_sampler.num_negs_per_pos 18.0 +887 28 training.batch_size 1.0 +887 29 model.embedding_dim 1.0 +887 29 model.scoring_fct_norm 2.0 +887 29 regularizer.weight 0.0148585487477628 +887 29 optimizer.lr 0.009852659731406426 +887 29 negative_sampler.num_negs_per_pos 71.0 +887 29 training.batch_size 0.0 +887 30 model.embedding_dim 0.0 +887 30 model.scoring_fct_norm 1.0 +887 30 regularizer.weight 0.05567244763209503 +887 30 optimizer.lr 0.022373804438020978 +887 30 negative_sampler.num_negs_per_pos 44.0 +887 30 training.batch_size 2.0 +887 31 model.embedding_dim 1.0 +887 31 model.scoring_fct_norm 2.0 +887 31 regularizer.weight 0.2972868432067836 +887 31 optimizer.lr 0.04520631332751309 +887 31 negative_sampler.num_negs_per_pos 49.0 +887 31 training.batch_size 0.0 +887 32 model.embedding_dim 0.0 +887 32 model.scoring_fct_norm 2.0 +887 32 regularizer.weight 0.10015227141656899 +887 32 optimizer.lr 0.006116264877921713 +887 32 negative_sampler.num_negs_per_pos 10.0 +887 32 training.batch_size 2.0 +887 33 model.embedding_dim 2.0 +887 33 model.scoring_fct_norm 2.0 +887 33 regularizer.weight 0.057457344803239434 +887 33 optimizer.lr 0.0019374674943260736 +887 33 negative_sampler.num_negs_per_pos 99.0 +887 33 training.batch_size 0.0 +887 34 model.embedding_dim 0.0 +887 34 model.scoring_fct_norm 1.0 +887 34 regularizer.weight 0.023519860425282098 +887 34 optimizer.lr 0.06820083934495429 +887 34 negative_sampler.num_negs_per_pos 74.0 +887 34 training.batch_size 0.0 +887 35 model.embedding_dim 1.0 +887 35 model.scoring_fct_norm 1.0 +887 35 regularizer.weight 0.09099825797701248 +887 35 optimizer.lr 0.03185951499588391 +887 35 negative_sampler.num_negs_per_pos 53.0 +887 35 training.batch_size 2.0 +887 36 model.embedding_dim 0.0 +887 36 model.scoring_fct_norm 2.0 +887 36 regularizer.weight 0.02428735059804587 +887 36 optimizer.lr 0.001113840568548908 +887 36 negative_sampler.num_negs_per_pos 91.0 +887 36 training.batch_size 1.0 +887 37 model.embedding_dim 1.0 +887 37 model.scoring_fct_norm 2.0 +887 37 regularizer.weight 0.13879019256238412 +887 37 optimizer.lr 0.0013759098340216356 +887 37 negative_sampler.num_negs_per_pos 20.0 +887 37 training.batch_size 0.0 +887 38 model.embedding_dim 2.0 +887 38 model.scoring_fct_norm 1.0 +887 38 regularizer.weight 0.047847244104637594 +887 38 optimizer.lr 0.004675215226522568 +887 38 negative_sampler.num_negs_per_pos 28.0 +887 38 training.batch_size 2.0 +887 39 model.embedding_dim 1.0 +887 39 model.scoring_fct_norm 1.0 +887 39 regularizer.weight 0.025468218289712 +887 39 optimizer.lr 0.010944522566713681 +887 39 negative_sampler.num_negs_per_pos 91.0 +887 39 training.batch_size 0.0 +887 40 model.embedding_dim 0.0 +887 40 model.scoring_fct_norm 1.0 +887 40 regularizer.weight 0.09343796215000338 +887 40 optimizer.lr 0.06431147755901664 +887 40 negative_sampler.num_negs_per_pos 1.0 +887 40 training.batch_size 2.0 +887 41 model.embedding_dim 1.0 +887 41 model.scoring_fct_norm 1.0 +887 41 regularizer.weight 0.05058046361356646 +887 41 optimizer.lr 0.05881401919532382 +887 41 negative_sampler.num_negs_per_pos 73.0 +887 41 training.batch_size 2.0 +887 42 model.embedding_dim 2.0 +887 42 model.scoring_fct_norm 2.0 +887 42 regularizer.weight 0.01490156730571822 +887 42 optimizer.lr 0.00555063626531815 +887 42 negative_sampler.num_negs_per_pos 38.0 +887 42 training.batch_size 2.0 +887 43 model.embedding_dim 0.0 +887 43 model.scoring_fct_norm 2.0 +887 43 regularizer.weight 0.17883210906634045 +887 43 optimizer.lr 0.015252753270342671 +887 43 negative_sampler.num_negs_per_pos 88.0 +887 43 training.batch_size 0.0 +887 44 model.embedding_dim 0.0 +887 44 model.scoring_fct_norm 2.0 +887 44 regularizer.weight 0.010640046141268655 +887 44 optimizer.lr 0.001164330209231949 +887 44 negative_sampler.num_negs_per_pos 56.0 +887 44 training.batch_size 0.0 +887 45 model.embedding_dim 1.0 +887 45 model.scoring_fct_norm 1.0 +887 45 regularizer.weight 0.03891853387239107 +887 45 optimizer.lr 0.010172462202993294 +887 45 negative_sampler.num_negs_per_pos 59.0 +887 45 training.batch_size 0.0 +887 46 model.embedding_dim 2.0 +887 46 model.scoring_fct_norm 1.0 +887 46 regularizer.weight 0.029686030835489874 +887 46 optimizer.lr 0.008058030744035938 +887 46 negative_sampler.num_negs_per_pos 32.0 +887 46 training.batch_size 1.0 +887 47 model.embedding_dim 1.0 +887 47 model.scoring_fct_norm 1.0 +887 47 regularizer.weight 0.04265360437819509 +887 47 optimizer.lr 0.05090068548585127 +887 47 negative_sampler.num_negs_per_pos 84.0 +887 47 training.batch_size 0.0 +887 48 model.embedding_dim 0.0 +887 48 model.scoring_fct_norm 1.0 +887 48 regularizer.weight 0.017948218129999294 +887 48 optimizer.lr 0.03346399310038471 +887 48 negative_sampler.num_negs_per_pos 39.0 +887 48 training.batch_size 2.0 +887 49 model.embedding_dim 0.0 +887 49 model.scoring_fct_norm 2.0 +887 49 regularizer.weight 0.013618903710224804 +887 49 optimizer.lr 0.0016228971762587475 +887 49 negative_sampler.num_negs_per_pos 41.0 +887 49 training.batch_size 1.0 +887 50 model.embedding_dim 0.0 +887 50 model.scoring_fct_norm 1.0 +887 50 regularizer.weight 0.16264807040791773 +887 50 optimizer.lr 0.00371887823491545 +887 50 negative_sampler.num_negs_per_pos 47.0 +887 50 training.batch_size 1.0 +887 51 model.embedding_dim 1.0 +887 51 model.scoring_fct_norm 1.0 +887 51 regularizer.weight 0.1836430031727579 +887 51 optimizer.lr 0.029896171774458365 +887 51 negative_sampler.num_negs_per_pos 21.0 +887 51 training.batch_size 0.0 +887 52 model.embedding_dim 1.0 +887 52 model.scoring_fct_norm 2.0 +887 52 regularizer.weight 0.017972920251719506 +887 52 optimizer.lr 0.02138329421673904 +887 52 negative_sampler.num_negs_per_pos 84.0 +887 52 training.batch_size 0.0 +887 53 model.embedding_dim 0.0 +887 53 model.scoring_fct_norm 1.0 +887 53 regularizer.weight 0.09061018401183868 +887 53 optimizer.lr 0.0017996620935715674 +887 53 negative_sampler.num_negs_per_pos 94.0 +887 53 training.batch_size 2.0 +887 54 model.embedding_dim 2.0 +887 54 model.scoring_fct_norm 1.0 +887 54 regularizer.weight 0.1151513251223286 +887 54 optimizer.lr 0.02510550710816701 +887 54 negative_sampler.num_negs_per_pos 95.0 +887 54 training.batch_size 0.0 +887 55 model.embedding_dim 0.0 +887 55 model.scoring_fct_norm 1.0 +887 55 regularizer.weight 0.1677487950425794 +887 55 optimizer.lr 0.006026819963976737 +887 55 negative_sampler.num_negs_per_pos 27.0 +887 55 training.batch_size 2.0 +887 56 model.embedding_dim 1.0 +887 56 model.scoring_fct_norm 2.0 +887 56 regularizer.weight 0.2610842032076089 +887 56 optimizer.lr 0.08259969424684756 +887 56 negative_sampler.num_negs_per_pos 7.0 +887 56 training.batch_size 2.0 +887 57 model.embedding_dim 0.0 +887 57 model.scoring_fct_norm 2.0 +887 57 regularizer.weight 0.27313207094201913 +887 57 optimizer.lr 0.008214968513621424 +887 57 negative_sampler.num_negs_per_pos 34.0 +887 57 training.batch_size 2.0 +887 58 model.embedding_dim 1.0 +887 58 model.scoring_fct_norm 2.0 +887 58 regularizer.weight 0.010376692421438514 +887 58 optimizer.lr 0.03535753909686747 +887 58 negative_sampler.num_negs_per_pos 21.0 +887 58 training.batch_size 2.0 +887 59 model.embedding_dim 1.0 +887 59 model.scoring_fct_norm 1.0 +887 59 regularizer.weight 0.14552574653847603 +887 59 optimizer.lr 0.09173850744845718 +887 59 negative_sampler.num_negs_per_pos 50.0 +887 59 training.batch_size 0.0 +887 60 model.embedding_dim 2.0 +887 60 model.scoring_fct_norm 1.0 +887 60 regularizer.weight 0.04858713812201384 +887 60 optimizer.lr 0.005773004066675538 +887 60 negative_sampler.num_negs_per_pos 60.0 +887 60 training.batch_size 1.0 +887 61 model.embedding_dim 2.0 +887 61 model.scoring_fct_norm 1.0 +887 61 regularizer.weight 0.031127555338594324 +887 61 optimizer.lr 0.07458390649022151 +887 61 negative_sampler.num_negs_per_pos 21.0 +887 61 training.batch_size 1.0 +887 62 model.embedding_dim 1.0 +887 62 model.scoring_fct_norm 2.0 +887 62 regularizer.weight 0.012702110098251888 +887 62 optimizer.lr 0.0021332424531695948 +887 62 negative_sampler.num_negs_per_pos 29.0 +887 62 training.batch_size 2.0 +887 63 model.embedding_dim 1.0 +887 63 model.scoring_fct_norm 2.0 +887 63 regularizer.weight 0.012010874070712583 +887 63 optimizer.lr 0.02145070981069588 +887 63 negative_sampler.num_negs_per_pos 19.0 +887 63 training.batch_size 0.0 +887 64 model.embedding_dim 0.0 +887 64 model.scoring_fct_norm 2.0 +887 64 regularizer.weight 0.012532850536585774 +887 64 optimizer.lr 0.010813725731357384 +887 64 negative_sampler.num_negs_per_pos 79.0 +887 64 training.batch_size 0.0 +887 65 model.embedding_dim 0.0 +887 65 model.scoring_fct_norm 2.0 +887 65 regularizer.weight 0.20702012884148246 +887 65 optimizer.lr 0.002748908902289087 +887 65 negative_sampler.num_negs_per_pos 9.0 +887 65 training.batch_size 1.0 +887 66 model.embedding_dim 0.0 +887 66 model.scoring_fct_norm 1.0 +887 66 regularizer.weight 0.14118408179089206 +887 66 optimizer.lr 0.007526753138489865 +887 66 negative_sampler.num_negs_per_pos 22.0 +887 66 training.batch_size 1.0 +887 67 model.embedding_dim 0.0 +887 67 model.scoring_fct_norm 1.0 +887 67 regularizer.weight 0.06086041766696365 +887 67 optimizer.lr 0.09819186292828513 +887 67 negative_sampler.num_negs_per_pos 37.0 +887 67 training.batch_size 2.0 +887 68 model.embedding_dim 1.0 +887 68 model.scoring_fct_norm 1.0 +887 68 regularizer.weight 0.25915709396702796 +887 68 optimizer.lr 0.003536186691145149 +887 68 negative_sampler.num_negs_per_pos 95.0 +887 68 training.batch_size 1.0 +887 69 model.embedding_dim 1.0 +887 69 model.scoring_fct_norm 1.0 +887 69 regularizer.weight 0.018395894168325676 +887 69 optimizer.lr 0.005681381127729132 +887 69 negative_sampler.num_negs_per_pos 78.0 +887 69 training.batch_size 1.0 +887 70 model.embedding_dim 1.0 +887 70 model.scoring_fct_norm 1.0 +887 70 regularizer.weight 0.1737641393435732 +887 70 optimizer.lr 0.004608018542980236 +887 70 negative_sampler.num_negs_per_pos 27.0 +887 70 training.batch_size 0.0 +887 1 dataset """wn18rr""" +887 1 model """transh""" +887 1 loss """bceaftersigmoid""" +887 1 regularizer """transh""" +887 1 optimizer """adam""" +887 1 training_loop """owa""" +887 1 negative_sampler """basic""" +887 1 evaluator """rankbased""" +887 2 dataset """wn18rr""" +887 2 model """transh""" +887 2 loss """bceaftersigmoid""" +887 2 regularizer """transh""" +887 2 optimizer """adam""" +887 2 training_loop """owa""" +887 2 negative_sampler """basic""" +887 2 evaluator """rankbased""" +887 3 dataset """wn18rr""" +887 3 model """transh""" +887 3 loss """bceaftersigmoid""" +887 3 regularizer """transh""" +887 3 optimizer """adam""" +887 3 training_loop """owa""" +887 3 negative_sampler """basic""" +887 3 evaluator """rankbased""" +887 4 dataset """wn18rr""" +887 4 model """transh""" +887 4 loss """bceaftersigmoid""" +887 4 regularizer """transh""" +887 4 optimizer """adam""" +887 4 training_loop """owa""" +887 4 negative_sampler """basic""" +887 4 evaluator """rankbased""" +887 5 dataset """wn18rr""" +887 5 model """transh""" +887 5 loss """bceaftersigmoid""" +887 5 regularizer """transh""" +887 5 optimizer """adam""" +887 5 training_loop """owa""" +887 5 negative_sampler """basic""" +887 5 evaluator """rankbased""" +887 6 dataset """wn18rr""" +887 6 model """transh""" +887 6 loss """bceaftersigmoid""" +887 6 regularizer """transh""" +887 6 optimizer """adam""" +887 6 training_loop """owa""" +887 6 negative_sampler """basic""" +887 6 evaluator """rankbased""" +887 7 dataset """wn18rr""" +887 7 model """transh""" +887 7 loss """bceaftersigmoid""" +887 7 regularizer """transh""" +887 7 optimizer """adam""" +887 7 training_loop """owa""" +887 7 negative_sampler """basic""" +887 7 evaluator """rankbased""" +887 8 dataset """wn18rr""" +887 8 model """transh""" +887 8 loss """bceaftersigmoid""" +887 8 regularizer """transh""" +887 8 optimizer """adam""" +887 8 training_loop """owa""" +887 8 negative_sampler """basic""" +887 8 evaluator """rankbased""" +887 9 dataset """wn18rr""" +887 9 model """transh""" +887 9 loss """bceaftersigmoid""" +887 9 regularizer """transh""" +887 9 optimizer """adam""" +887 9 training_loop """owa""" +887 9 negative_sampler """basic""" +887 9 evaluator """rankbased""" +887 10 dataset """wn18rr""" +887 10 model """transh""" +887 10 loss """bceaftersigmoid""" +887 10 regularizer """transh""" +887 10 optimizer """adam""" +887 10 training_loop """owa""" +887 10 negative_sampler """basic""" +887 10 evaluator """rankbased""" +887 11 dataset """wn18rr""" +887 11 model """transh""" +887 11 loss """bceaftersigmoid""" +887 11 regularizer """transh""" +887 11 optimizer """adam""" +887 11 training_loop """owa""" +887 11 negative_sampler """basic""" +887 11 evaluator """rankbased""" +887 12 dataset """wn18rr""" +887 12 model """transh""" +887 12 loss """bceaftersigmoid""" +887 12 regularizer """transh""" +887 12 optimizer """adam""" +887 12 training_loop """owa""" +887 12 negative_sampler """basic""" +887 12 evaluator """rankbased""" +887 13 dataset """wn18rr""" +887 13 model """transh""" +887 13 loss """bceaftersigmoid""" +887 13 regularizer """transh""" +887 13 optimizer """adam""" +887 13 training_loop """owa""" +887 13 negative_sampler """basic""" +887 13 evaluator """rankbased""" +887 14 dataset """wn18rr""" +887 14 model """transh""" +887 14 loss """bceaftersigmoid""" +887 14 regularizer """transh""" +887 14 optimizer """adam""" +887 14 training_loop """owa""" +887 14 negative_sampler """basic""" +887 14 evaluator """rankbased""" +887 15 dataset """wn18rr""" +887 15 model """transh""" +887 15 loss """bceaftersigmoid""" +887 15 regularizer """transh""" +887 15 optimizer """adam""" +887 15 training_loop """owa""" +887 15 negative_sampler """basic""" +887 15 evaluator """rankbased""" +887 16 dataset """wn18rr""" +887 16 model """transh""" +887 16 loss """bceaftersigmoid""" +887 16 regularizer """transh""" +887 16 optimizer """adam""" +887 16 training_loop """owa""" +887 16 negative_sampler """basic""" +887 16 evaluator """rankbased""" +887 17 dataset """wn18rr""" +887 17 model """transh""" +887 17 loss """bceaftersigmoid""" +887 17 regularizer """transh""" +887 17 optimizer """adam""" +887 17 training_loop """owa""" +887 17 negative_sampler """basic""" +887 17 evaluator """rankbased""" +887 18 dataset """wn18rr""" +887 18 model """transh""" +887 18 loss """bceaftersigmoid""" +887 18 regularizer """transh""" +887 18 optimizer """adam""" +887 18 training_loop """owa""" +887 18 negative_sampler """basic""" +887 18 evaluator """rankbased""" +887 19 dataset """wn18rr""" +887 19 model """transh""" +887 19 loss """bceaftersigmoid""" +887 19 regularizer """transh""" +887 19 optimizer """adam""" +887 19 training_loop """owa""" +887 19 negative_sampler """basic""" +887 19 evaluator """rankbased""" +887 20 dataset """wn18rr""" +887 20 model """transh""" +887 20 loss """bceaftersigmoid""" +887 20 regularizer """transh""" +887 20 optimizer """adam""" +887 20 training_loop """owa""" +887 20 negative_sampler """basic""" +887 20 evaluator """rankbased""" +887 21 dataset """wn18rr""" +887 21 model """transh""" +887 21 loss """bceaftersigmoid""" +887 21 regularizer """transh""" +887 21 optimizer """adam""" +887 21 training_loop """owa""" +887 21 negative_sampler """basic""" +887 21 evaluator """rankbased""" +887 22 dataset """wn18rr""" +887 22 model """transh""" +887 22 loss """bceaftersigmoid""" +887 22 regularizer """transh""" +887 22 optimizer """adam""" +887 22 training_loop """owa""" +887 22 negative_sampler """basic""" +887 22 evaluator """rankbased""" +887 23 dataset """wn18rr""" +887 23 model """transh""" +887 23 loss """bceaftersigmoid""" +887 23 regularizer """transh""" +887 23 optimizer """adam""" +887 23 training_loop """owa""" +887 23 negative_sampler """basic""" +887 23 evaluator """rankbased""" +887 24 dataset """wn18rr""" +887 24 model """transh""" +887 24 loss """bceaftersigmoid""" +887 24 regularizer """transh""" +887 24 optimizer """adam""" +887 24 training_loop """owa""" +887 24 negative_sampler """basic""" +887 24 evaluator """rankbased""" +887 25 dataset """wn18rr""" +887 25 model """transh""" +887 25 loss """bceaftersigmoid""" +887 25 regularizer """transh""" +887 25 optimizer """adam""" +887 25 training_loop """owa""" +887 25 negative_sampler """basic""" +887 25 evaluator """rankbased""" +887 26 dataset """wn18rr""" +887 26 model """transh""" +887 26 loss """bceaftersigmoid""" +887 26 regularizer """transh""" +887 26 optimizer """adam""" +887 26 training_loop """owa""" +887 26 negative_sampler """basic""" +887 26 evaluator """rankbased""" +887 27 dataset """wn18rr""" +887 27 model """transh""" +887 27 loss """bceaftersigmoid""" +887 27 regularizer """transh""" +887 27 optimizer """adam""" +887 27 training_loop """owa""" +887 27 negative_sampler """basic""" +887 27 evaluator """rankbased""" +887 28 dataset """wn18rr""" +887 28 model """transh""" +887 28 loss """bceaftersigmoid""" +887 28 regularizer """transh""" +887 28 optimizer """adam""" +887 28 training_loop """owa""" +887 28 negative_sampler """basic""" +887 28 evaluator """rankbased""" +887 29 dataset """wn18rr""" +887 29 model """transh""" +887 29 loss """bceaftersigmoid""" +887 29 regularizer """transh""" +887 29 optimizer """adam""" +887 29 training_loop """owa""" +887 29 negative_sampler """basic""" +887 29 evaluator """rankbased""" +887 30 dataset """wn18rr""" +887 30 model """transh""" +887 30 loss """bceaftersigmoid""" +887 30 regularizer """transh""" +887 30 optimizer """adam""" +887 30 training_loop """owa""" +887 30 negative_sampler """basic""" +887 30 evaluator """rankbased""" +887 31 dataset """wn18rr""" +887 31 model """transh""" +887 31 loss """bceaftersigmoid""" +887 31 regularizer """transh""" +887 31 optimizer """adam""" +887 31 training_loop """owa""" +887 31 negative_sampler """basic""" +887 31 evaluator """rankbased""" +887 32 dataset """wn18rr""" +887 32 model """transh""" +887 32 loss """bceaftersigmoid""" +887 32 regularizer """transh""" +887 32 optimizer """adam""" +887 32 training_loop """owa""" +887 32 negative_sampler """basic""" +887 32 evaluator """rankbased""" +887 33 dataset """wn18rr""" +887 33 model """transh""" +887 33 loss """bceaftersigmoid""" +887 33 regularizer """transh""" +887 33 optimizer """adam""" +887 33 training_loop """owa""" +887 33 negative_sampler """basic""" +887 33 evaluator """rankbased""" +887 34 dataset """wn18rr""" +887 34 model """transh""" +887 34 loss """bceaftersigmoid""" +887 34 regularizer """transh""" +887 34 optimizer """adam""" +887 34 training_loop """owa""" +887 34 negative_sampler """basic""" +887 34 evaluator """rankbased""" +887 35 dataset """wn18rr""" +887 35 model """transh""" +887 35 loss """bceaftersigmoid""" +887 35 regularizer """transh""" +887 35 optimizer """adam""" +887 35 training_loop """owa""" +887 35 negative_sampler """basic""" +887 35 evaluator """rankbased""" +887 36 dataset """wn18rr""" +887 36 model """transh""" +887 36 loss """bceaftersigmoid""" +887 36 regularizer """transh""" +887 36 optimizer """adam""" +887 36 training_loop """owa""" +887 36 negative_sampler """basic""" +887 36 evaluator """rankbased""" +887 37 dataset """wn18rr""" +887 37 model """transh""" +887 37 loss """bceaftersigmoid""" +887 37 regularizer """transh""" +887 37 optimizer """adam""" +887 37 training_loop """owa""" +887 37 negative_sampler """basic""" +887 37 evaluator """rankbased""" +887 38 dataset """wn18rr""" +887 38 model """transh""" +887 38 loss """bceaftersigmoid""" +887 38 regularizer """transh""" +887 38 optimizer """adam""" +887 38 training_loop """owa""" +887 38 negative_sampler """basic""" +887 38 evaluator """rankbased""" +887 39 dataset """wn18rr""" +887 39 model """transh""" +887 39 loss """bceaftersigmoid""" +887 39 regularizer """transh""" +887 39 optimizer """adam""" +887 39 training_loop """owa""" +887 39 negative_sampler """basic""" +887 39 evaluator """rankbased""" +887 40 dataset """wn18rr""" +887 40 model """transh""" +887 40 loss """bceaftersigmoid""" +887 40 regularizer """transh""" +887 40 optimizer """adam""" +887 40 training_loop """owa""" +887 40 negative_sampler """basic""" +887 40 evaluator """rankbased""" +887 41 dataset """wn18rr""" +887 41 model """transh""" +887 41 loss """bceaftersigmoid""" +887 41 regularizer """transh""" +887 41 optimizer """adam""" +887 41 training_loop """owa""" +887 41 negative_sampler """basic""" +887 41 evaluator """rankbased""" +887 42 dataset """wn18rr""" +887 42 model """transh""" +887 42 loss """bceaftersigmoid""" +887 42 regularizer """transh""" +887 42 optimizer """adam""" +887 42 training_loop """owa""" +887 42 negative_sampler """basic""" +887 42 evaluator """rankbased""" +887 43 dataset """wn18rr""" +887 43 model """transh""" +887 43 loss """bceaftersigmoid""" +887 43 regularizer """transh""" +887 43 optimizer """adam""" +887 43 training_loop """owa""" +887 43 negative_sampler """basic""" +887 43 evaluator """rankbased""" +887 44 dataset """wn18rr""" +887 44 model """transh""" +887 44 loss """bceaftersigmoid""" +887 44 regularizer """transh""" +887 44 optimizer """adam""" +887 44 training_loop """owa""" +887 44 negative_sampler """basic""" +887 44 evaluator """rankbased""" +887 45 dataset """wn18rr""" +887 45 model """transh""" +887 45 loss """bceaftersigmoid""" +887 45 regularizer """transh""" +887 45 optimizer """adam""" +887 45 training_loop """owa""" +887 45 negative_sampler """basic""" +887 45 evaluator """rankbased""" +887 46 dataset """wn18rr""" +887 46 model """transh""" +887 46 loss """bceaftersigmoid""" +887 46 regularizer """transh""" +887 46 optimizer """adam""" +887 46 training_loop """owa""" +887 46 negative_sampler """basic""" +887 46 evaluator """rankbased""" +887 47 dataset """wn18rr""" +887 47 model """transh""" +887 47 loss """bceaftersigmoid""" +887 47 regularizer """transh""" +887 47 optimizer """adam""" +887 47 training_loop """owa""" +887 47 negative_sampler """basic""" +887 47 evaluator """rankbased""" +887 48 dataset """wn18rr""" +887 48 model """transh""" +887 48 loss """bceaftersigmoid""" +887 48 regularizer """transh""" +887 48 optimizer """adam""" +887 48 training_loop """owa""" +887 48 negative_sampler """basic""" +887 48 evaluator """rankbased""" +887 49 dataset """wn18rr""" +887 49 model """transh""" +887 49 loss """bceaftersigmoid""" +887 49 regularizer """transh""" +887 49 optimizer """adam""" +887 49 training_loop """owa""" +887 49 negative_sampler """basic""" +887 49 evaluator """rankbased""" +887 50 dataset """wn18rr""" +887 50 model """transh""" +887 50 loss """bceaftersigmoid""" +887 50 regularizer """transh""" +887 50 optimizer """adam""" +887 50 training_loop """owa""" +887 50 negative_sampler """basic""" +887 50 evaluator """rankbased""" +887 51 dataset """wn18rr""" +887 51 model """transh""" +887 51 loss """bceaftersigmoid""" +887 51 regularizer """transh""" +887 51 optimizer """adam""" +887 51 training_loop """owa""" +887 51 negative_sampler """basic""" +887 51 evaluator """rankbased""" +887 52 dataset """wn18rr""" +887 52 model """transh""" +887 52 loss """bceaftersigmoid""" +887 52 regularizer """transh""" +887 52 optimizer """adam""" +887 52 training_loop """owa""" +887 52 negative_sampler """basic""" +887 52 evaluator """rankbased""" +887 53 dataset """wn18rr""" +887 53 model """transh""" +887 53 loss """bceaftersigmoid""" +887 53 regularizer """transh""" +887 53 optimizer """adam""" +887 53 training_loop """owa""" +887 53 negative_sampler """basic""" +887 53 evaluator """rankbased""" +887 54 dataset """wn18rr""" +887 54 model """transh""" +887 54 loss """bceaftersigmoid""" +887 54 regularizer """transh""" +887 54 optimizer """adam""" +887 54 training_loop """owa""" +887 54 negative_sampler """basic""" +887 54 evaluator """rankbased""" +887 55 dataset """wn18rr""" +887 55 model """transh""" +887 55 loss """bceaftersigmoid""" +887 55 regularizer """transh""" +887 55 optimizer """adam""" +887 55 training_loop """owa""" +887 55 negative_sampler """basic""" +887 55 evaluator """rankbased""" +887 56 dataset """wn18rr""" +887 56 model """transh""" +887 56 loss """bceaftersigmoid""" +887 56 regularizer """transh""" +887 56 optimizer """adam""" +887 56 training_loop """owa""" +887 56 negative_sampler """basic""" +887 56 evaluator """rankbased""" +887 57 dataset """wn18rr""" +887 57 model """transh""" +887 57 loss """bceaftersigmoid""" +887 57 regularizer """transh""" +887 57 optimizer """adam""" +887 57 training_loop """owa""" +887 57 negative_sampler """basic""" +887 57 evaluator """rankbased""" +887 58 dataset """wn18rr""" +887 58 model """transh""" +887 58 loss """bceaftersigmoid""" +887 58 regularizer """transh""" +887 58 optimizer """adam""" +887 58 training_loop """owa""" +887 58 negative_sampler """basic""" +887 58 evaluator """rankbased""" +887 59 dataset """wn18rr""" +887 59 model """transh""" +887 59 loss """bceaftersigmoid""" +887 59 regularizer """transh""" +887 59 optimizer """adam""" +887 59 training_loop """owa""" +887 59 negative_sampler """basic""" +887 59 evaluator """rankbased""" +887 60 dataset """wn18rr""" +887 60 model """transh""" +887 60 loss """bceaftersigmoid""" +887 60 regularizer """transh""" +887 60 optimizer """adam""" +887 60 training_loop """owa""" +887 60 negative_sampler """basic""" +887 60 evaluator """rankbased""" +887 61 dataset """wn18rr""" +887 61 model """transh""" +887 61 loss """bceaftersigmoid""" +887 61 regularizer """transh""" +887 61 optimizer """adam""" +887 61 training_loop """owa""" +887 61 negative_sampler """basic""" +887 61 evaluator """rankbased""" +887 62 dataset """wn18rr""" +887 62 model """transh""" +887 62 loss """bceaftersigmoid""" +887 62 regularizer """transh""" +887 62 optimizer """adam""" +887 62 training_loop """owa""" +887 62 negative_sampler """basic""" +887 62 evaluator """rankbased""" +887 63 dataset """wn18rr""" +887 63 model """transh""" +887 63 loss """bceaftersigmoid""" +887 63 regularizer """transh""" +887 63 optimizer """adam""" +887 63 training_loop """owa""" +887 63 negative_sampler """basic""" +887 63 evaluator """rankbased""" +887 64 dataset """wn18rr""" +887 64 model """transh""" +887 64 loss """bceaftersigmoid""" +887 64 regularizer """transh""" +887 64 optimizer """adam""" +887 64 training_loop """owa""" +887 64 negative_sampler """basic""" +887 64 evaluator """rankbased""" +887 65 dataset """wn18rr""" +887 65 model """transh""" +887 65 loss """bceaftersigmoid""" +887 65 regularizer """transh""" +887 65 optimizer """adam""" +887 65 training_loop """owa""" +887 65 negative_sampler """basic""" +887 65 evaluator """rankbased""" +887 66 dataset """wn18rr""" +887 66 model """transh""" +887 66 loss """bceaftersigmoid""" +887 66 regularizer """transh""" +887 66 optimizer """adam""" +887 66 training_loop """owa""" +887 66 negative_sampler """basic""" +887 66 evaluator """rankbased""" +887 67 dataset """wn18rr""" +887 67 model """transh""" +887 67 loss """bceaftersigmoid""" +887 67 regularizer """transh""" +887 67 optimizer """adam""" +887 67 training_loop """owa""" +887 67 negative_sampler """basic""" +887 67 evaluator """rankbased""" +887 68 dataset """wn18rr""" +887 68 model """transh""" +887 68 loss """bceaftersigmoid""" +887 68 regularizer """transh""" +887 68 optimizer """adam""" +887 68 training_loop """owa""" +887 68 negative_sampler """basic""" +887 68 evaluator """rankbased""" +887 69 dataset """wn18rr""" +887 69 model """transh""" +887 69 loss """bceaftersigmoid""" +887 69 regularizer """transh""" +887 69 optimizer """adam""" +887 69 training_loop """owa""" +887 69 negative_sampler """basic""" +887 69 evaluator """rankbased""" +887 70 dataset """wn18rr""" +887 70 model """transh""" +887 70 loss """bceaftersigmoid""" +887 70 regularizer """transh""" +887 70 optimizer """adam""" +887 70 training_loop """owa""" +887 70 negative_sampler """basic""" +887 70 evaluator """rankbased""" +888 1 model.embedding_dim 2.0 +888 1 model.scoring_fct_norm 2.0 +888 1 regularizer.weight 0.11468123356139506 +888 1 optimizer.lr 0.002697846707544926 +888 1 negative_sampler.num_negs_per_pos 66.0 +888 1 training.batch_size 1.0 +888 2 model.embedding_dim 1.0 +888 2 model.scoring_fct_norm 2.0 +888 2 regularizer.weight 0.03336165870224071 +888 2 optimizer.lr 0.0013194427344034232 +888 2 negative_sampler.num_negs_per_pos 92.0 +888 2 training.batch_size 1.0 +888 3 model.embedding_dim 1.0 +888 3 model.scoring_fct_norm 2.0 +888 3 regularizer.weight 0.020803273431978655 +888 3 optimizer.lr 0.04364261876448332 +888 3 negative_sampler.num_negs_per_pos 42.0 +888 3 training.batch_size 1.0 +888 4 model.embedding_dim 0.0 +888 4 model.scoring_fct_norm 1.0 +888 4 regularizer.weight 0.050140091844464774 +888 4 optimizer.lr 0.016412187793274035 +888 4 negative_sampler.num_negs_per_pos 61.0 +888 4 training.batch_size 1.0 +888 5 model.embedding_dim 1.0 +888 5 model.scoring_fct_norm 1.0 +888 5 regularizer.weight 0.20309717001299513 +888 5 optimizer.lr 0.015367891438387722 +888 5 negative_sampler.num_negs_per_pos 50.0 +888 5 training.batch_size 1.0 +888 6 model.embedding_dim 0.0 +888 6 model.scoring_fct_norm 2.0 +888 6 regularizer.weight 0.07117413756910727 +888 6 optimizer.lr 0.0021306482458430987 +888 6 negative_sampler.num_negs_per_pos 44.0 +888 6 training.batch_size 1.0 +888 7 model.embedding_dim 1.0 +888 7 model.scoring_fct_norm 1.0 +888 7 regularizer.weight 0.015030701390997584 +888 7 optimizer.lr 0.08715622146308717 +888 7 negative_sampler.num_negs_per_pos 26.0 +888 7 training.batch_size 2.0 +888 8 model.embedding_dim 2.0 +888 8 model.scoring_fct_norm 1.0 +888 8 regularizer.weight 0.07256958413787545 +888 8 optimizer.lr 0.001932732769334841 +888 8 negative_sampler.num_negs_per_pos 59.0 +888 8 training.batch_size 0.0 +888 9 model.embedding_dim 1.0 +888 9 model.scoring_fct_norm 2.0 +888 9 regularizer.weight 0.012948052283749897 +888 9 optimizer.lr 0.02251548976546844 +888 9 negative_sampler.num_negs_per_pos 10.0 +888 9 training.batch_size 1.0 +888 10 model.embedding_dim 0.0 +888 10 model.scoring_fct_norm 1.0 +888 10 regularizer.weight 0.1441812875744601 +888 10 optimizer.lr 0.05767602347448325 +888 10 negative_sampler.num_negs_per_pos 99.0 +888 10 training.batch_size 2.0 +888 11 model.embedding_dim 2.0 +888 11 model.scoring_fct_norm 1.0 +888 11 regularizer.weight 0.20781198301977494 +888 11 optimizer.lr 0.0014814024517577529 +888 11 negative_sampler.num_negs_per_pos 93.0 +888 11 training.batch_size 2.0 +888 12 model.embedding_dim 1.0 +888 12 model.scoring_fct_norm 2.0 +888 12 regularizer.weight 0.033501642844098393 +888 12 optimizer.lr 0.00149948080113281 +888 12 negative_sampler.num_negs_per_pos 20.0 +888 12 training.batch_size 2.0 +888 13 model.embedding_dim 0.0 +888 13 model.scoring_fct_norm 2.0 +888 13 regularizer.weight 0.021670987954602417 +888 13 optimizer.lr 0.04095701424055743 +888 13 negative_sampler.num_negs_per_pos 44.0 +888 13 training.batch_size 2.0 +888 14 model.embedding_dim 1.0 +888 14 model.scoring_fct_norm 1.0 +888 14 regularizer.weight 0.1637517703286311 +888 14 optimizer.lr 0.05629066615671389 +888 14 negative_sampler.num_negs_per_pos 66.0 +888 14 training.batch_size 0.0 +888 15 model.embedding_dim 1.0 +888 15 model.scoring_fct_norm 2.0 +888 15 regularizer.weight 0.02547450343787881 +888 15 optimizer.lr 0.05083850455158534 +888 15 negative_sampler.num_negs_per_pos 46.0 +888 15 training.batch_size 2.0 +888 16 model.embedding_dim 2.0 +888 16 model.scoring_fct_norm 1.0 +888 16 regularizer.weight 0.2669249457985483 +888 16 optimizer.lr 0.0021134044374565195 +888 16 negative_sampler.num_negs_per_pos 89.0 +888 16 training.batch_size 1.0 +888 17 model.embedding_dim 1.0 +888 17 model.scoring_fct_norm 1.0 +888 17 regularizer.weight 0.023859093625337308 +888 17 optimizer.lr 0.01535575946767482 +888 17 negative_sampler.num_negs_per_pos 59.0 +888 17 training.batch_size 1.0 +888 18 model.embedding_dim 1.0 +888 18 model.scoring_fct_norm 1.0 +888 18 regularizer.weight 0.24538383520778587 +888 18 optimizer.lr 0.01701753281723584 +888 18 negative_sampler.num_negs_per_pos 13.0 +888 18 training.batch_size 0.0 +888 19 model.embedding_dim 0.0 +888 19 model.scoring_fct_norm 2.0 +888 19 regularizer.weight 0.13743999059360257 +888 19 optimizer.lr 0.0035956348321552994 +888 19 negative_sampler.num_negs_per_pos 35.0 +888 19 training.batch_size 0.0 +888 20 model.embedding_dim 0.0 +888 20 model.scoring_fct_norm 2.0 +888 20 regularizer.weight 0.0790030308098173 +888 20 optimizer.lr 0.004908584549805246 +888 20 negative_sampler.num_negs_per_pos 15.0 +888 20 training.batch_size 2.0 +888 21 model.embedding_dim 1.0 +888 21 model.scoring_fct_norm 1.0 +888 21 regularizer.weight 0.11339441895560717 +888 21 optimizer.lr 0.04183813547524366 +888 21 negative_sampler.num_negs_per_pos 47.0 +888 21 training.batch_size 2.0 +888 22 model.embedding_dim 1.0 +888 22 model.scoring_fct_norm 2.0 +888 22 regularizer.weight 0.13229742674421133 +888 22 optimizer.lr 0.013075581049085475 +888 22 negative_sampler.num_negs_per_pos 77.0 +888 22 training.batch_size 2.0 +888 23 model.embedding_dim 0.0 +888 23 model.scoring_fct_norm 2.0 +888 23 regularizer.weight 0.026937859804236856 +888 23 optimizer.lr 0.026337648017624785 +888 23 negative_sampler.num_negs_per_pos 62.0 +888 23 training.batch_size 0.0 +888 24 model.embedding_dim 1.0 +888 24 model.scoring_fct_norm 2.0 +888 24 regularizer.weight 0.2566372227091699 +888 24 optimizer.lr 0.015163581028193117 +888 24 negative_sampler.num_negs_per_pos 51.0 +888 24 training.batch_size 0.0 +888 25 model.embedding_dim 0.0 +888 25 model.scoring_fct_norm 2.0 +888 25 regularizer.weight 0.042935997697679945 +888 25 optimizer.lr 0.06749154772147586 +888 25 negative_sampler.num_negs_per_pos 8.0 +888 25 training.batch_size 0.0 +888 26 model.embedding_dim 1.0 +888 26 model.scoring_fct_norm 1.0 +888 26 regularizer.weight 0.28468856226823913 +888 26 optimizer.lr 0.002212963484009015 +888 26 negative_sampler.num_negs_per_pos 28.0 +888 26 training.batch_size 2.0 +888 27 model.embedding_dim 0.0 +888 27 model.scoring_fct_norm 1.0 +888 27 regularizer.weight 0.047292346855784435 +888 27 optimizer.lr 0.009381110146662318 +888 27 negative_sampler.num_negs_per_pos 48.0 +888 27 training.batch_size 1.0 +888 28 model.embedding_dim 0.0 +888 28 model.scoring_fct_norm 1.0 +888 28 regularizer.weight 0.20073042228204793 +888 28 optimizer.lr 0.007703459918865825 +888 28 negative_sampler.num_negs_per_pos 97.0 +888 28 training.batch_size 1.0 +888 29 model.embedding_dim 0.0 +888 29 model.scoring_fct_norm 2.0 +888 29 regularizer.weight 0.023220213580345546 +888 29 optimizer.lr 0.0010090738884525887 +888 29 negative_sampler.num_negs_per_pos 50.0 +888 29 training.batch_size 0.0 +888 30 model.embedding_dim 2.0 +888 30 model.scoring_fct_norm 2.0 +888 30 regularizer.weight 0.09843157978268734 +888 30 optimizer.lr 0.0031171707464602047 +888 30 negative_sampler.num_negs_per_pos 87.0 +888 30 training.batch_size 1.0 +888 31 model.embedding_dim 0.0 +888 31 model.scoring_fct_norm 1.0 +888 31 regularizer.weight 0.03547822790407808 +888 31 optimizer.lr 0.007723682563823482 +888 31 negative_sampler.num_negs_per_pos 37.0 +888 31 training.batch_size 2.0 +888 32 model.embedding_dim 1.0 +888 32 model.scoring_fct_norm 1.0 +888 32 regularizer.weight 0.01125615634144447 +888 32 optimizer.lr 0.02180872308311109 +888 32 negative_sampler.num_negs_per_pos 45.0 +888 32 training.batch_size 0.0 +888 33 model.embedding_dim 1.0 +888 33 model.scoring_fct_norm 2.0 +888 33 regularizer.weight 0.2766482100816353 +888 33 optimizer.lr 0.09032628845247637 +888 33 negative_sampler.num_negs_per_pos 87.0 +888 33 training.batch_size 0.0 +888 34 model.embedding_dim 2.0 +888 34 model.scoring_fct_norm 1.0 +888 34 regularizer.weight 0.018818978583434773 +888 34 optimizer.lr 0.031271359119110366 +888 34 negative_sampler.num_negs_per_pos 25.0 +888 34 training.batch_size 2.0 +888 35 model.embedding_dim 0.0 +888 35 model.scoring_fct_norm 1.0 +888 35 regularizer.weight 0.010376528538520648 +888 35 optimizer.lr 0.0037764620350872443 +888 35 negative_sampler.num_negs_per_pos 37.0 +888 35 training.batch_size 0.0 +888 36 model.embedding_dim 0.0 +888 36 model.scoring_fct_norm 1.0 +888 36 regularizer.weight 0.1721758526340628 +888 36 optimizer.lr 0.014409800779191401 +888 36 negative_sampler.num_negs_per_pos 26.0 +888 36 training.batch_size 2.0 +888 37 model.embedding_dim 0.0 +888 37 model.scoring_fct_norm 1.0 +888 37 regularizer.weight 0.07761457216288639 +888 37 optimizer.lr 0.0015978369819207067 +888 37 negative_sampler.num_negs_per_pos 45.0 +888 37 training.batch_size 0.0 +888 38 model.embedding_dim 2.0 +888 38 model.scoring_fct_norm 2.0 +888 38 regularizer.weight 0.15996506250352185 +888 38 optimizer.lr 0.0027335351591774655 +888 38 negative_sampler.num_negs_per_pos 50.0 +888 38 training.batch_size 2.0 +888 39 model.embedding_dim 2.0 +888 39 model.scoring_fct_norm 1.0 +888 39 regularizer.weight 0.01978232486603721 +888 39 optimizer.lr 0.0011447034617123157 +888 39 negative_sampler.num_negs_per_pos 42.0 +888 39 training.batch_size 1.0 +888 40 model.embedding_dim 1.0 +888 40 model.scoring_fct_norm 2.0 +888 40 regularizer.weight 0.1448186458006769 +888 40 optimizer.lr 0.05203183590036774 +888 40 negative_sampler.num_negs_per_pos 87.0 +888 40 training.batch_size 2.0 +888 41 model.embedding_dim 2.0 +888 41 model.scoring_fct_norm 2.0 +888 41 regularizer.weight 0.011356622762490448 +888 41 optimizer.lr 0.00405044529599639 +888 41 negative_sampler.num_negs_per_pos 39.0 +888 41 training.batch_size 2.0 +888 42 model.embedding_dim 1.0 +888 42 model.scoring_fct_norm 1.0 +888 42 regularizer.weight 0.011259847187117897 +888 42 optimizer.lr 0.015173182108609807 +888 42 negative_sampler.num_negs_per_pos 39.0 +888 42 training.batch_size 2.0 +888 43 model.embedding_dim 2.0 +888 43 model.scoring_fct_norm 1.0 +888 43 regularizer.weight 0.18176278430963447 +888 43 optimizer.lr 0.0022265112931580833 +888 43 negative_sampler.num_negs_per_pos 5.0 +888 43 training.batch_size 1.0 +888 44 model.embedding_dim 2.0 +888 44 model.scoring_fct_norm 2.0 +888 44 regularizer.weight 0.014105997150877684 +888 44 optimizer.lr 0.011559075158548813 +888 44 negative_sampler.num_negs_per_pos 43.0 +888 44 training.batch_size 0.0 +888 45 model.embedding_dim 2.0 +888 45 model.scoring_fct_norm 2.0 +888 45 regularizer.weight 0.02307724590829163 +888 45 optimizer.lr 0.015025317620590091 +888 45 negative_sampler.num_negs_per_pos 13.0 +888 45 training.batch_size 1.0 +888 46 model.embedding_dim 0.0 +888 46 model.scoring_fct_norm 1.0 +888 46 regularizer.weight 0.13004930812270626 +888 46 optimizer.lr 0.005395932199344684 +888 46 negative_sampler.num_negs_per_pos 19.0 +888 46 training.batch_size 1.0 +888 47 model.embedding_dim 2.0 +888 47 model.scoring_fct_norm 2.0 +888 47 regularizer.weight 0.029850908465717804 +888 47 optimizer.lr 0.0024062417110600376 +888 47 negative_sampler.num_negs_per_pos 85.0 +888 47 training.batch_size 1.0 +888 48 model.embedding_dim 2.0 +888 48 model.scoring_fct_norm 2.0 +888 48 regularizer.weight 0.2737797467326086 +888 48 optimizer.lr 0.0016242430047809398 +888 48 negative_sampler.num_negs_per_pos 91.0 +888 48 training.batch_size 2.0 +888 49 model.embedding_dim 1.0 +888 49 model.scoring_fct_norm 1.0 +888 49 regularizer.weight 0.015419862080333678 +888 49 optimizer.lr 0.019243998729155096 +888 49 negative_sampler.num_negs_per_pos 54.0 +888 49 training.batch_size 2.0 +888 50 model.embedding_dim 1.0 +888 50 model.scoring_fct_norm 2.0 +888 50 regularizer.weight 0.07338838656230444 +888 50 optimizer.lr 0.06230171194630302 +888 50 negative_sampler.num_negs_per_pos 3.0 +888 50 training.batch_size 1.0 +888 51 model.embedding_dim 0.0 +888 51 model.scoring_fct_norm 1.0 +888 51 regularizer.weight 0.022812191729906426 +888 51 optimizer.lr 0.0023512390304950416 +888 51 negative_sampler.num_negs_per_pos 53.0 +888 51 training.batch_size 2.0 +888 52 model.embedding_dim 2.0 +888 52 model.scoring_fct_norm 1.0 +888 52 regularizer.weight 0.04985982952704494 +888 52 optimizer.lr 0.00457139547330602 +888 52 negative_sampler.num_negs_per_pos 41.0 +888 52 training.batch_size 2.0 +888 53 model.embedding_dim 1.0 +888 53 model.scoring_fct_norm 2.0 +888 53 regularizer.weight 0.011206854729454966 +888 53 optimizer.lr 0.010490113841727267 +888 53 negative_sampler.num_negs_per_pos 95.0 +888 53 training.batch_size 2.0 +888 54 model.embedding_dim 1.0 +888 54 model.scoring_fct_norm 1.0 +888 54 regularizer.weight 0.03504693456420754 +888 54 optimizer.lr 0.0064166708839249595 +888 54 negative_sampler.num_negs_per_pos 98.0 +888 54 training.batch_size 1.0 +888 55 model.embedding_dim 2.0 +888 55 model.scoring_fct_norm 2.0 +888 55 regularizer.weight 0.013000099712368604 +888 55 optimizer.lr 0.006318307644531567 +888 55 negative_sampler.num_negs_per_pos 91.0 +888 55 training.batch_size 2.0 +888 56 model.embedding_dim 0.0 +888 56 model.scoring_fct_norm 2.0 +888 56 regularizer.weight 0.04246877003132977 +888 56 optimizer.lr 0.011211706557741272 +888 56 negative_sampler.num_negs_per_pos 9.0 +888 56 training.batch_size 0.0 +888 57 model.embedding_dim 0.0 +888 57 model.scoring_fct_norm 1.0 +888 57 regularizer.weight 0.015361727515222407 +888 57 optimizer.lr 0.040396424929644126 +888 57 negative_sampler.num_negs_per_pos 46.0 +888 57 training.batch_size 0.0 +888 58 model.embedding_dim 2.0 +888 58 model.scoring_fct_norm 2.0 +888 58 regularizer.weight 0.09649151413278152 +888 58 optimizer.lr 0.007084876306311991 +888 58 negative_sampler.num_negs_per_pos 60.0 +888 58 training.batch_size 1.0 +888 59 model.embedding_dim 1.0 +888 59 model.scoring_fct_norm 1.0 +888 59 regularizer.weight 0.1662268277017815 +888 59 optimizer.lr 0.008236290816531102 +888 59 negative_sampler.num_negs_per_pos 16.0 +888 59 training.batch_size 0.0 +888 60 model.embedding_dim 0.0 +888 60 model.scoring_fct_norm 2.0 +888 60 regularizer.weight 0.05051123666928727 +888 60 optimizer.lr 0.001460175299985629 +888 60 negative_sampler.num_negs_per_pos 95.0 +888 60 training.batch_size 2.0 +888 61 model.embedding_dim 0.0 +888 61 model.scoring_fct_norm 2.0 +888 61 regularizer.weight 0.089698546458524 +888 61 optimizer.lr 0.00403507796222179 +888 61 negative_sampler.num_negs_per_pos 35.0 +888 61 training.batch_size 0.0 +888 62 model.embedding_dim 2.0 +888 62 model.scoring_fct_norm 1.0 +888 62 regularizer.weight 0.01282103178460003 +888 62 optimizer.lr 0.010117583804142486 +888 62 negative_sampler.num_negs_per_pos 38.0 +888 62 training.batch_size 0.0 +888 63 model.embedding_dim 2.0 +888 63 model.scoring_fct_norm 1.0 +888 63 regularizer.weight 0.062384148298079735 +888 63 optimizer.lr 0.014519880609079031 +888 63 negative_sampler.num_negs_per_pos 17.0 +888 63 training.batch_size 2.0 +888 64 model.embedding_dim 0.0 +888 64 model.scoring_fct_norm 1.0 +888 64 regularizer.weight 0.0382444902687485 +888 64 optimizer.lr 0.03018075516855996 +888 64 negative_sampler.num_negs_per_pos 80.0 +888 64 training.batch_size 0.0 +888 65 model.embedding_dim 2.0 +888 65 model.scoring_fct_norm 1.0 +888 65 regularizer.weight 0.030913676505377203 +888 65 optimizer.lr 0.0022389037792388865 +888 65 negative_sampler.num_negs_per_pos 21.0 +888 65 training.batch_size 0.0 +888 1 dataset """wn18rr""" +888 1 model """transh""" +888 1 loss """softplus""" +888 1 regularizer """transh""" +888 1 optimizer """adam""" +888 1 training_loop """owa""" +888 1 negative_sampler """basic""" +888 1 evaluator """rankbased""" +888 2 dataset """wn18rr""" +888 2 model """transh""" +888 2 loss """softplus""" +888 2 regularizer """transh""" +888 2 optimizer """adam""" +888 2 training_loop """owa""" +888 2 negative_sampler """basic""" +888 2 evaluator """rankbased""" +888 3 dataset """wn18rr""" +888 3 model """transh""" +888 3 loss """softplus""" +888 3 regularizer """transh""" +888 3 optimizer """adam""" +888 3 training_loop """owa""" +888 3 negative_sampler """basic""" +888 3 evaluator """rankbased""" +888 4 dataset """wn18rr""" +888 4 model """transh""" +888 4 loss """softplus""" +888 4 regularizer """transh""" +888 4 optimizer """adam""" +888 4 training_loop """owa""" +888 4 negative_sampler """basic""" +888 4 evaluator """rankbased""" +888 5 dataset """wn18rr""" +888 5 model """transh""" +888 5 loss """softplus""" +888 5 regularizer """transh""" +888 5 optimizer """adam""" +888 5 training_loop """owa""" +888 5 negative_sampler """basic""" +888 5 evaluator """rankbased""" +888 6 dataset """wn18rr""" +888 6 model """transh""" +888 6 loss """softplus""" +888 6 regularizer """transh""" +888 6 optimizer """adam""" +888 6 training_loop """owa""" +888 6 negative_sampler """basic""" +888 6 evaluator """rankbased""" +888 7 dataset """wn18rr""" +888 7 model """transh""" +888 7 loss """softplus""" +888 7 regularizer """transh""" +888 7 optimizer """adam""" +888 7 training_loop """owa""" +888 7 negative_sampler """basic""" +888 7 evaluator """rankbased""" +888 8 dataset """wn18rr""" +888 8 model """transh""" +888 8 loss """softplus""" +888 8 regularizer """transh""" +888 8 optimizer """adam""" +888 8 training_loop """owa""" +888 8 negative_sampler """basic""" +888 8 evaluator """rankbased""" +888 9 dataset """wn18rr""" +888 9 model """transh""" +888 9 loss """softplus""" +888 9 regularizer """transh""" +888 9 optimizer """adam""" +888 9 training_loop """owa""" +888 9 negative_sampler """basic""" +888 9 evaluator """rankbased""" +888 10 dataset """wn18rr""" +888 10 model """transh""" +888 10 loss """softplus""" +888 10 regularizer """transh""" +888 10 optimizer """adam""" +888 10 training_loop """owa""" +888 10 negative_sampler """basic""" +888 10 evaluator """rankbased""" +888 11 dataset """wn18rr""" +888 11 model """transh""" +888 11 loss """softplus""" +888 11 regularizer """transh""" +888 11 optimizer """adam""" +888 11 training_loop """owa""" +888 11 negative_sampler """basic""" +888 11 evaluator """rankbased""" +888 12 dataset """wn18rr""" +888 12 model """transh""" +888 12 loss """softplus""" +888 12 regularizer """transh""" +888 12 optimizer """adam""" +888 12 training_loop """owa""" +888 12 negative_sampler """basic""" +888 12 evaluator """rankbased""" +888 13 dataset """wn18rr""" +888 13 model """transh""" +888 13 loss """softplus""" +888 13 regularizer """transh""" +888 13 optimizer """adam""" +888 13 training_loop """owa""" +888 13 negative_sampler """basic""" +888 13 evaluator """rankbased""" +888 14 dataset """wn18rr""" +888 14 model """transh""" +888 14 loss """softplus""" +888 14 regularizer """transh""" +888 14 optimizer """adam""" +888 14 training_loop """owa""" +888 14 negative_sampler """basic""" +888 14 evaluator """rankbased""" +888 15 dataset """wn18rr""" +888 15 model """transh""" +888 15 loss """softplus""" +888 15 regularizer """transh""" +888 15 optimizer """adam""" +888 15 training_loop """owa""" +888 15 negative_sampler """basic""" +888 15 evaluator """rankbased""" +888 16 dataset """wn18rr""" +888 16 model """transh""" +888 16 loss """softplus""" +888 16 regularizer """transh""" +888 16 optimizer """adam""" +888 16 training_loop """owa""" +888 16 negative_sampler """basic""" +888 16 evaluator """rankbased""" +888 17 dataset """wn18rr""" +888 17 model """transh""" +888 17 loss """softplus""" +888 17 regularizer """transh""" +888 17 optimizer """adam""" +888 17 training_loop """owa""" +888 17 negative_sampler """basic""" +888 17 evaluator """rankbased""" +888 18 dataset """wn18rr""" +888 18 model """transh""" +888 18 loss """softplus""" +888 18 regularizer """transh""" +888 18 optimizer """adam""" +888 18 training_loop """owa""" +888 18 negative_sampler """basic""" +888 18 evaluator """rankbased""" +888 19 dataset """wn18rr""" +888 19 model """transh""" +888 19 loss """softplus""" +888 19 regularizer """transh""" +888 19 optimizer """adam""" +888 19 training_loop """owa""" +888 19 negative_sampler """basic""" +888 19 evaluator """rankbased""" +888 20 dataset """wn18rr""" +888 20 model """transh""" +888 20 loss """softplus""" +888 20 regularizer """transh""" +888 20 optimizer """adam""" +888 20 training_loop """owa""" +888 20 negative_sampler """basic""" +888 20 evaluator """rankbased""" +888 21 dataset """wn18rr""" +888 21 model """transh""" +888 21 loss """softplus""" +888 21 regularizer """transh""" +888 21 optimizer """adam""" +888 21 training_loop """owa""" +888 21 negative_sampler """basic""" +888 21 evaluator """rankbased""" +888 22 dataset """wn18rr""" +888 22 model """transh""" +888 22 loss """softplus""" +888 22 regularizer """transh""" +888 22 optimizer """adam""" +888 22 training_loop """owa""" +888 22 negative_sampler """basic""" +888 22 evaluator """rankbased""" +888 23 dataset """wn18rr""" +888 23 model """transh""" +888 23 loss """softplus""" +888 23 regularizer """transh""" +888 23 optimizer """adam""" +888 23 training_loop """owa""" +888 23 negative_sampler """basic""" +888 23 evaluator """rankbased""" +888 24 dataset """wn18rr""" +888 24 model """transh""" +888 24 loss """softplus""" +888 24 regularizer """transh""" +888 24 optimizer """adam""" +888 24 training_loop """owa""" +888 24 negative_sampler """basic""" +888 24 evaluator """rankbased""" +888 25 dataset """wn18rr""" +888 25 model """transh""" +888 25 loss """softplus""" +888 25 regularizer """transh""" +888 25 optimizer """adam""" +888 25 training_loop """owa""" +888 25 negative_sampler """basic""" +888 25 evaluator """rankbased""" +888 26 dataset """wn18rr""" +888 26 model """transh""" +888 26 loss """softplus""" +888 26 regularizer """transh""" +888 26 optimizer """adam""" +888 26 training_loop """owa""" +888 26 negative_sampler """basic""" +888 26 evaluator """rankbased""" +888 27 dataset """wn18rr""" +888 27 model """transh""" +888 27 loss """softplus""" +888 27 regularizer """transh""" +888 27 optimizer """adam""" +888 27 training_loop """owa""" +888 27 negative_sampler """basic""" +888 27 evaluator """rankbased""" +888 28 dataset """wn18rr""" +888 28 model """transh""" +888 28 loss """softplus""" +888 28 regularizer """transh""" +888 28 optimizer """adam""" +888 28 training_loop """owa""" +888 28 negative_sampler """basic""" +888 28 evaluator """rankbased""" +888 29 dataset """wn18rr""" +888 29 model """transh""" +888 29 loss """softplus""" +888 29 regularizer """transh""" +888 29 optimizer """adam""" +888 29 training_loop """owa""" +888 29 negative_sampler """basic""" +888 29 evaluator """rankbased""" +888 30 dataset """wn18rr""" +888 30 model """transh""" +888 30 loss """softplus""" +888 30 regularizer """transh""" +888 30 optimizer """adam""" +888 30 training_loop """owa""" +888 30 negative_sampler """basic""" +888 30 evaluator """rankbased""" +888 31 dataset """wn18rr""" +888 31 model """transh""" +888 31 loss """softplus""" +888 31 regularizer """transh""" +888 31 optimizer """adam""" +888 31 training_loop """owa""" +888 31 negative_sampler """basic""" +888 31 evaluator """rankbased""" +888 32 dataset """wn18rr""" +888 32 model """transh""" +888 32 loss """softplus""" +888 32 regularizer """transh""" +888 32 optimizer """adam""" +888 32 training_loop """owa""" +888 32 negative_sampler """basic""" +888 32 evaluator """rankbased""" +888 33 dataset """wn18rr""" +888 33 model """transh""" +888 33 loss """softplus""" +888 33 regularizer """transh""" +888 33 optimizer """adam""" +888 33 training_loop """owa""" +888 33 negative_sampler """basic""" +888 33 evaluator """rankbased""" +888 34 dataset """wn18rr""" +888 34 model """transh""" +888 34 loss """softplus""" +888 34 regularizer """transh""" +888 34 optimizer """adam""" +888 34 training_loop """owa""" +888 34 negative_sampler """basic""" +888 34 evaluator """rankbased""" +888 35 dataset """wn18rr""" +888 35 model """transh""" +888 35 loss """softplus""" +888 35 regularizer """transh""" +888 35 optimizer """adam""" +888 35 training_loop """owa""" +888 35 negative_sampler """basic""" +888 35 evaluator """rankbased""" +888 36 dataset """wn18rr""" +888 36 model """transh""" +888 36 loss """softplus""" +888 36 regularizer """transh""" +888 36 optimizer """adam""" +888 36 training_loop """owa""" +888 36 negative_sampler """basic""" +888 36 evaluator """rankbased""" +888 37 dataset """wn18rr""" +888 37 model """transh""" +888 37 loss """softplus""" +888 37 regularizer """transh""" +888 37 optimizer """adam""" +888 37 training_loop """owa""" +888 37 negative_sampler """basic""" +888 37 evaluator """rankbased""" +888 38 dataset """wn18rr""" +888 38 model """transh""" +888 38 loss """softplus""" +888 38 regularizer """transh""" +888 38 optimizer """adam""" +888 38 training_loop """owa""" +888 38 negative_sampler """basic""" +888 38 evaluator """rankbased""" +888 39 dataset """wn18rr""" +888 39 model """transh""" +888 39 loss """softplus""" +888 39 regularizer """transh""" +888 39 optimizer """adam""" +888 39 training_loop """owa""" +888 39 negative_sampler """basic""" +888 39 evaluator """rankbased""" +888 40 dataset """wn18rr""" +888 40 model """transh""" +888 40 loss """softplus""" +888 40 regularizer """transh""" +888 40 optimizer """adam""" +888 40 training_loop """owa""" +888 40 negative_sampler """basic""" +888 40 evaluator """rankbased""" +888 41 dataset """wn18rr""" +888 41 model """transh""" +888 41 loss """softplus""" +888 41 regularizer """transh""" +888 41 optimizer """adam""" +888 41 training_loop """owa""" +888 41 negative_sampler """basic""" +888 41 evaluator """rankbased""" +888 42 dataset """wn18rr""" +888 42 model """transh""" +888 42 loss """softplus""" +888 42 regularizer """transh""" +888 42 optimizer """adam""" +888 42 training_loop """owa""" +888 42 negative_sampler """basic""" +888 42 evaluator """rankbased""" +888 43 dataset """wn18rr""" +888 43 model """transh""" +888 43 loss """softplus""" +888 43 regularizer """transh""" +888 43 optimizer """adam""" +888 43 training_loop """owa""" +888 43 negative_sampler """basic""" +888 43 evaluator """rankbased""" +888 44 dataset """wn18rr""" +888 44 model """transh""" +888 44 loss """softplus""" +888 44 regularizer """transh""" +888 44 optimizer """adam""" +888 44 training_loop """owa""" +888 44 negative_sampler """basic""" +888 44 evaluator """rankbased""" +888 45 dataset """wn18rr""" +888 45 model """transh""" +888 45 loss """softplus""" +888 45 regularizer """transh""" +888 45 optimizer """adam""" +888 45 training_loop """owa""" +888 45 negative_sampler """basic""" +888 45 evaluator """rankbased""" +888 46 dataset """wn18rr""" +888 46 model """transh""" +888 46 loss """softplus""" +888 46 regularizer """transh""" +888 46 optimizer """adam""" +888 46 training_loop """owa""" +888 46 negative_sampler """basic""" +888 46 evaluator """rankbased""" +888 47 dataset """wn18rr""" +888 47 model """transh""" +888 47 loss """softplus""" +888 47 regularizer """transh""" +888 47 optimizer """adam""" +888 47 training_loop """owa""" +888 47 negative_sampler """basic""" +888 47 evaluator """rankbased""" +888 48 dataset """wn18rr""" +888 48 model """transh""" +888 48 loss """softplus""" +888 48 regularizer """transh""" +888 48 optimizer """adam""" +888 48 training_loop """owa""" +888 48 negative_sampler """basic""" +888 48 evaluator """rankbased""" +888 49 dataset """wn18rr""" +888 49 model """transh""" +888 49 loss """softplus""" +888 49 regularizer """transh""" +888 49 optimizer """adam""" +888 49 training_loop """owa""" +888 49 negative_sampler """basic""" +888 49 evaluator """rankbased""" +888 50 dataset """wn18rr""" +888 50 model """transh""" +888 50 loss """softplus""" +888 50 regularizer """transh""" +888 50 optimizer """adam""" +888 50 training_loop """owa""" +888 50 negative_sampler """basic""" +888 50 evaluator """rankbased""" +888 51 dataset """wn18rr""" +888 51 model """transh""" +888 51 loss """softplus""" +888 51 regularizer """transh""" +888 51 optimizer """adam""" +888 51 training_loop """owa""" +888 51 negative_sampler """basic""" +888 51 evaluator """rankbased""" +888 52 dataset """wn18rr""" +888 52 model """transh""" +888 52 loss """softplus""" +888 52 regularizer """transh""" +888 52 optimizer """adam""" +888 52 training_loop """owa""" +888 52 negative_sampler """basic""" +888 52 evaluator """rankbased""" +888 53 dataset """wn18rr""" +888 53 model """transh""" +888 53 loss """softplus""" +888 53 regularizer """transh""" +888 53 optimizer """adam""" +888 53 training_loop """owa""" +888 53 negative_sampler """basic""" +888 53 evaluator """rankbased""" +888 54 dataset """wn18rr""" +888 54 model """transh""" +888 54 loss """softplus""" +888 54 regularizer """transh""" +888 54 optimizer """adam""" +888 54 training_loop """owa""" +888 54 negative_sampler """basic""" +888 54 evaluator """rankbased""" +888 55 dataset """wn18rr""" +888 55 model """transh""" +888 55 loss """softplus""" +888 55 regularizer """transh""" +888 55 optimizer """adam""" +888 55 training_loop """owa""" +888 55 negative_sampler """basic""" +888 55 evaluator """rankbased""" +888 56 dataset """wn18rr""" +888 56 model """transh""" +888 56 loss """softplus""" +888 56 regularizer """transh""" +888 56 optimizer """adam""" +888 56 training_loop """owa""" +888 56 negative_sampler """basic""" +888 56 evaluator """rankbased""" +888 57 dataset """wn18rr""" +888 57 model """transh""" +888 57 loss """softplus""" +888 57 regularizer """transh""" +888 57 optimizer """adam""" +888 57 training_loop """owa""" +888 57 negative_sampler """basic""" +888 57 evaluator """rankbased""" +888 58 dataset """wn18rr""" +888 58 model """transh""" +888 58 loss """softplus""" +888 58 regularizer """transh""" +888 58 optimizer """adam""" +888 58 training_loop """owa""" +888 58 negative_sampler """basic""" +888 58 evaluator """rankbased""" +888 59 dataset """wn18rr""" +888 59 model """transh""" +888 59 loss """softplus""" +888 59 regularizer """transh""" +888 59 optimizer """adam""" +888 59 training_loop """owa""" +888 59 negative_sampler """basic""" +888 59 evaluator """rankbased""" +888 60 dataset """wn18rr""" +888 60 model """transh""" +888 60 loss """softplus""" +888 60 regularizer """transh""" +888 60 optimizer """adam""" +888 60 training_loop """owa""" +888 60 negative_sampler """basic""" +888 60 evaluator """rankbased""" +888 61 dataset """wn18rr""" +888 61 model """transh""" +888 61 loss """softplus""" +888 61 regularizer """transh""" +888 61 optimizer """adam""" +888 61 training_loop """owa""" +888 61 negative_sampler """basic""" +888 61 evaluator """rankbased""" +888 62 dataset """wn18rr""" +888 62 model """transh""" +888 62 loss """softplus""" +888 62 regularizer """transh""" +888 62 optimizer """adam""" +888 62 training_loop """owa""" +888 62 negative_sampler """basic""" +888 62 evaluator """rankbased""" +888 63 dataset """wn18rr""" +888 63 model """transh""" +888 63 loss """softplus""" +888 63 regularizer """transh""" +888 63 optimizer """adam""" +888 63 training_loop """owa""" +888 63 negative_sampler """basic""" +888 63 evaluator """rankbased""" +888 64 dataset """wn18rr""" +888 64 model """transh""" +888 64 loss """softplus""" +888 64 regularizer """transh""" +888 64 optimizer """adam""" +888 64 training_loop """owa""" +888 64 negative_sampler """basic""" +888 64 evaluator """rankbased""" +888 65 dataset """wn18rr""" +888 65 model """transh""" +888 65 loss """softplus""" +888 65 regularizer """transh""" +888 65 optimizer """adam""" +888 65 training_loop """owa""" +888 65 negative_sampler """basic""" +888 65 evaluator """rankbased""" +889 1 model.embedding_dim 2.0 +889 1 model.scoring_fct_norm 1.0 +889 1 regularizer.weight 0.04813875630157035 +889 1 optimizer.lr 0.011240731843876809 +889 1 negative_sampler.num_negs_per_pos 80.0 +889 1 training.batch_size 1.0 +889 2 model.embedding_dim 0.0 +889 2 model.scoring_fct_norm 1.0 +889 2 regularizer.weight 0.07887246970926975 +889 2 optimizer.lr 0.005718795856858683 +889 2 negative_sampler.num_negs_per_pos 8.0 +889 2 training.batch_size 2.0 +889 3 model.embedding_dim 2.0 +889 3 model.scoring_fct_norm 2.0 +889 3 regularizer.weight 0.06947884338290711 +889 3 optimizer.lr 0.09578201378224079 +889 3 negative_sampler.num_negs_per_pos 9.0 +889 3 training.batch_size 2.0 +889 4 model.embedding_dim 2.0 +889 4 model.scoring_fct_norm 1.0 +889 4 regularizer.weight 0.03023174823157452 +889 4 optimizer.lr 0.0022013344314270804 +889 4 negative_sampler.num_negs_per_pos 40.0 +889 4 training.batch_size 2.0 +889 5 model.embedding_dim 1.0 +889 5 model.scoring_fct_norm 1.0 +889 5 regularizer.weight 0.02447831028909557 +889 5 optimizer.lr 0.006381887653643535 +889 5 negative_sampler.num_negs_per_pos 84.0 +889 5 training.batch_size 1.0 +889 6 model.embedding_dim 1.0 +889 6 model.scoring_fct_norm 2.0 +889 6 regularizer.weight 0.010374376869260342 +889 6 optimizer.lr 0.005760073879221024 +889 6 negative_sampler.num_negs_per_pos 20.0 +889 6 training.batch_size 1.0 +889 7 model.embedding_dim 1.0 +889 7 model.scoring_fct_norm 1.0 +889 7 regularizer.weight 0.22173789943424363 +889 7 optimizer.lr 0.02698651640073273 +889 7 negative_sampler.num_negs_per_pos 42.0 +889 7 training.batch_size 2.0 +889 8 model.embedding_dim 1.0 +889 8 model.scoring_fct_norm 2.0 +889 8 regularizer.weight 0.12343907496130953 +889 8 optimizer.lr 0.0024668144992281774 +889 8 negative_sampler.num_negs_per_pos 14.0 +889 8 training.batch_size 0.0 +889 9 model.embedding_dim 1.0 +889 9 model.scoring_fct_norm 1.0 +889 9 regularizer.weight 0.05761664703710151 +889 9 optimizer.lr 0.014610553461758184 +889 9 negative_sampler.num_negs_per_pos 67.0 +889 9 training.batch_size 0.0 +889 10 model.embedding_dim 1.0 +889 10 model.scoring_fct_norm 1.0 +889 10 regularizer.weight 0.26931180755693757 +889 10 optimizer.lr 0.08518495644536095 +889 10 negative_sampler.num_negs_per_pos 22.0 +889 10 training.batch_size 1.0 +889 11 model.embedding_dim 1.0 +889 11 model.scoring_fct_norm 1.0 +889 11 regularizer.weight 0.020431153240408357 +889 11 optimizer.lr 0.004222348245248376 +889 11 negative_sampler.num_negs_per_pos 96.0 +889 11 training.batch_size 0.0 +889 12 model.embedding_dim 1.0 +889 12 model.scoring_fct_norm 1.0 +889 12 regularizer.weight 0.03582800633457912 +889 12 optimizer.lr 0.010182459782845356 +889 12 negative_sampler.num_negs_per_pos 18.0 +889 12 training.batch_size 0.0 +889 13 model.embedding_dim 2.0 +889 13 model.scoring_fct_norm 1.0 +889 13 regularizer.weight 0.022706184905778647 +889 13 optimizer.lr 0.04000738416075942 +889 13 negative_sampler.num_negs_per_pos 82.0 +889 13 training.batch_size 0.0 +889 14 model.embedding_dim 1.0 +889 14 model.scoring_fct_norm 1.0 +889 14 regularizer.weight 0.017726400590940293 +889 14 optimizer.lr 0.0015248117483801576 +889 14 negative_sampler.num_negs_per_pos 77.0 +889 14 training.batch_size 0.0 +889 15 model.embedding_dim 0.0 +889 15 model.scoring_fct_norm 2.0 +889 15 regularizer.weight 0.08794499709102119 +889 15 optimizer.lr 0.0063511705742293565 +889 15 negative_sampler.num_negs_per_pos 16.0 +889 15 training.batch_size 2.0 +889 16 model.embedding_dim 2.0 +889 16 model.scoring_fct_norm 1.0 +889 16 regularizer.weight 0.04432282565546592 +889 16 optimizer.lr 0.0010638003780010078 +889 16 negative_sampler.num_negs_per_pos 9.0 +889 16 training.batch_size 2.0 +889 17 model.embedding_dim 2.0 +889 17 model.scoring_fct_norm 2.0 +889 17 regularizer.weight 0.037137163232748564 +889 17 optimizer.lr 0.009075670616628987 +889 17 negative_sampler.num_negs_per_pos 59.0 +889 17 training.batch_size 0.0 +889 18 model.embedding_dim 1.0 +889 18 model.scoring_fct_norm 1.0 +889 18 regularizer.weight 0.12056150864190658 +889 18 optimizer.lr 0.035509137668763624 +889 18 negative_sampler.num_negs_per_pos 76.0 +889 18 training.batch_size 2.0 +889 19 model.embedding_dim 1.0 +889 19 model.scoring_fct_norm 1.0 +889 19 regularizer.weight 0.04329384899277527 +889 19 optimizer.lr 0.08338143761631724 +889 19 negative_sampler.num_negs_per_pos 32.0 +889 19 training.batch_size 2.0 +889 20 model.embedding_dim 0.0 +889 20 model.scoring_fct_norm 1.0 +889 20 regularizer.weight 0.18274981370594454 +889 20 optimizer.lr 0.005331311263421724 +889 20 negative_sampler.num_negs_per_pos 15.0 +889 20 training.batch_size 0.0 +889 21 model.embedding_dim 2.0 +889 21 model.scoring_fct_norm 2.0 +889 21 regularizer.weight 0.025914832766368454 +889 21 optimizer.lr 0.005438765621872632 +889 21 negative_sampler.num_negs_per_pos 71.0 +889 21 training.batch_size 2.0 +889 22 model.embedding_dim 2.0 +889 22 model.scoring_fct_norm 1.0 +889 22 regularizer.weight 0.25186847284814134 +889 22 optimizer.lr 0.0010503093040864588 +889 22 negative_sampler.num_negs_per_pos 44.0 +889 22 training.batch_size 2.0 +889 23 model.embedding_dim 1.0 +889 23 model.scoring_fct_norm 2.0 +889 23 regularizer.weight 0.036190300047318616 +889 23 optimizer.lr 0.0018656803768777462 +889 23 negative_sampler.num_negs_per_pos 25.0 +889 23 training.batch_size 0.0 +889 24 model.embedding_dim 2.0 +889 24 model.scoring_fct_norm 2.0 +889 24 regularizer.weight 0.028158363646127022 +889 24 optimizer.lr 0.013728601706765844 +889 24 negative_sampler.num_negs_per_pos 48.0 +889 24 training.batch_size 1.0 +889 25 model.embedding_dim 0.0 +889 25 model.scoring_fct_norm 2.0 +889 25 regularizer.weight 0.0558248641749962 +889 25 optimizer.lr 0.05960162480928214 +889 25 negative_sampler.num_negs_per_pos 98.0 +889 25 training.batch_size 1.0 +889 26 model.embedding_dim 0.0 +889 26 model.scoring_fct_norm 1.0 +889 26 regularizer.weight 0.1332025066122826 +889 26 optimizer.lr 0.0013715912042572556 +889 26 negative_sampler.num_negs_per_pos 12.0 +889 26 training.batch_size 2.0 +889 27 model.embedding_dim 1.0 +889 27 model.scoring_fct_norm 2.0 +889 27 regularizer.weight 0.12192348622096852 +889 27 optimizer.lr 0.01699948491670571 +889 27 negative_sampler.num_negs_per_pos 48.0 +889 27 training.batch_size 1.0 +889 28 model.embedding_dim 2.0 +889 28 model.scoring_fct_norm 1.0 +889 28 regularizer.weight 0.0568145049564994 +889 28 optimizer.lr 0.027429133299330364 +889 28 negative_sampler.num_negs_per_pos 5.0 +889 28 training.batch_size 0.0 +889 29 model.embedding_dim 1.0 +889 29 model.scoring_fct_norm 1.0 +889 29 regularizer.weight 0.010272384023857044 +889 29 optimizer.lr 0.03662901943213135 +889 29 negative_sampler.num_negs_per_pos 8.0 +889 29 training.batch_size 0.0 +889 30 model.embedding_dim 0.0 +889 30 model.scoring_fct_norm 1.0 +889 30 regularizer.weight 0.01744875711939453 +889 30 optimizer.lr 0.04854926280878736 +889 30 negative_sampler.num_negs_per_pos 15.0 +889 30 training.batch_size 1.0 +889 31 model.embedding_dim 0.0 +889 31 model.scoring_fct_norm 1.0 +889 31 regularizer.weight 0.010374297789752756 +889 31 optimizer.lr 0.0021275931777056498 +889 31 negative_sampler.num_negs_per_pos 69.0 +889 31 training.batch_size 2.0 +889 32 model.embedding_dim 2.0 +889 32 model.scoring_fct_norm 1.0 +889 32 regularizer.weight 0.031482238622398864 +889 32 optimizer.lr 0.02780787990073026 +889 32 negative_sampler.num_negs_per_pos 1.0 +889 32 training.batch_size 0.0 +889 33 model.embedding_dim 2.0 +889 33 model.scoring_fct_norm 1.0 +889 33 regularizer.weight 0.2856686623617771 +889 33 optimizer.lr 0.011301343131125746 +889 33 negative_sampler.num_negs_per_pos 11.0 +889 33 training.batch_size 1.0 +889 34 model.embedding_dim 0.0 +889 34 model.scoring_fct_norm 1.0 +889 34 regularizer.weight 0.01917677620949059 +889 34 optimizer.lr 0.0011477415958077025 +889 34 negative_sampler.num_negs_per_pos 37.0 +889 34 training.batch_size 1.0 +889 35 model.embedding_dim 0.0 +889 35 model.scoring_fct_norm 1.0 +889 35 regularizer.weight 0.05168719925200377 +889 35 optimizer.lr 0.08110167270796631 +889 35 negative_sampler.num_negs_per_pos 11.0 +889 35 training.batch_size 1.0 +889 36 model.embedding_dim 2.0 +889 36 model.scoring_fct_norm 1.0 +889 36 regularizer.weight 0.07521472232697013 +889 36 optimizer.lr 0.02331065209041089 +889 36 negative_sampler.num_negs_per_pos 9.0 +889 36 training.batch_size 1.0 +889 37 model.embedding_dim 2.0 +889 37 model.scoring_fct_norm 2.0 +889 37 regularizer.weight 0.02555093824852773 +889 37 optimizer.lr 0.011931403339504919 +889 37 negative_sampler.num_negs_per_pos 92.0 +889 37 training.batch_size 0.0 +889 38 model.embedding_dim 1.0 +889 38 model.scoring_fct_norm 1.0 +889 38 regularizer.weight 0.012563931784160355 +889 38 optimizer.lr 0.006413501396359977 +889 38 negative_sampler.num_negs_per_pos 0.0 +889 38 training.batch_size 2.0 +889 39 model.embedding_dim 0.0 +889 39 model.scoring_fct_norm 2.0 +889 39 regularizer.weight 0.014862737947368601 +889 39 optimizer.lr 0.005210513940762167 +889 39 negative_sampler.num_negs_per_pos 24.0 +889 39 training.batch_size 1.0 +889 40 model.embedding_dim 0.0 +889 40 model.scoring_fct_norm 1.0 +889 40 regularizer.weight 0.13529030764992317 +889 40 optimizer.lr 0.003598782013894767 +889 40 negative_sampler.num_negs_per_pos 47.0 +889 40 training.batch_size 2.0 +889 41 model.embedding_dim 0.0 +889 41 model.scoring_fct_norm 1.0 +889 41 regularizer.weight 0.24273615920313762 +889 41 optimizer.lr 0.0016847253492120976 +889 41 negative_sampler.num_negs_per_pos 89.0 +889 41 training.batch_size 0.0 +889 42 model.embedding_dim 1.0 +889 42 model.scoring_fct_norm 2.0 +889 42 regularizer.weight 0.012083739563044996 +889 42 optimizer.lr 0.026663258355772868 +889 42 negative_sampler.num_negs_per_pos 83.0 +889 42 training.batch_size 2.0 +889 43 model.embedding_dim 2.0 +889 43 model.scoring_fct_norm 1.0 +889 43 regularizer.weight 0.019479678170443923 +889 43 optimizer.lr 0.003317318917702477 +889 43 negative_sampler.num_negs_per_pos 13.0 +889 43 training.batch_size 1.0 +889 44 model.embedding_dim 1.0 +889 44 model.scoring_fct_norm 1.0 +889 44 regularizer.weight 0.07370716946593313 +889 44 optimizer.lr 0.07713000381209649 +889 44 negative_sampler.num_negs_per_pos 11.0 +889 44 training.batch_size 2.0 +889 45 model.embedding_dim 0.0 +889 45 model.scoring_fct_norm 2.0 +889 45 regularizer.weight 0.026594211384873695 +889 45 optimizer.lr 0.009737658017334014 +889 45 negative_sampler.num_negs_per_pos 12.0 +889 45 training.batch_size 0.0 +889 46 model.embedding_dim 0.0 +889 46 model.scoring_fct_norm 1.0 +889 46 regularizer.weight 0.07272797875911859 +889 46 optimizer.lr 0.07059469026952392 +889 46 negative_sampler.num_negs_per_pos 6.0 +889 46 training.batch_size 2.0 +889 47 model.embedding_dim 2.0 +889 47 model.scoring_fct_norm 2.0 +889 47 regularizer.weight 0.02327743335900215 +889 47 optimizer.lr 0.07173083920754199 +889 47 negative_sampler.num_negs_per_pos 69.0 +889 47 training.batch_size 1.0 +889 48 model.embedding_dim 2.0 +889 48 model.scoring_fct_norm 2.0 +889 48 regularizer.weight 0.0310903353919469 +889 48 optimizer.lr 0.0021706585883141637 +889 48 negative_sampler.num_negs_per_pos 38.0 +889 48 training.batch_size 2.0 +889 49 model.embedding_dim 2.0 +889 49 model.scoring_fct_norm 1.0 +889 49 regularizer.weight 0.2665467434765859 +889 49 optimizer.lr 0.028927392224639057 +889 49 negative_sampler.num_negs_per_pos 41.0 +889 49 training.batch_size 1.0 +889 50 model.embedding_dim 1.0 +889 50 model.scoring_fct_norm 1.0 +889 50 regularizer.weight 0.19784284004704183 +889 50 optimizer.lr 0.0049924844409937625 +889 50 negative_sampler.num_negs_per_pos 75.0 +889 50 training.batch_size 2.0 +889 51 model.embedding_dim 2.0 +889 51 model.scoring_fct_norm 2.0 +889 51 regularizer.weight 0.03834150438091246 +889 51 optimizer.lr 0.037669745664982844 +889 51 negative_sampler.num_negs_per_pos 18.0 +889 51 training.batch_size 1.0 +889 52 model.embedding_dim 1.0 +889 52 model.scoring_fct_norm 1.0 +889 52 regularizer.weight 0.013299869857526755 +889 52 optimizer.lr 0.0104900617762649 +889 52 negative_sampler.num_negs_per_pos 24.0 +889 52 training.batch_size 2.0 +889 53 model.embedding_dim 1.0 +889 53 model.scoring_fct_norm 2.0 +889 53 regularizer.weight 0.012762275374811349 +889 53 optimizer.lr 0.019244898033591723 +889 53 negative_sampler.num_negs_per_pos 15.0 +889 53 training.batch_size 2.0 +889 54 model.embedding_dim 2.0 +889 54 model.scoring_fct_norm 1.0 +889 54 regularizer.weight 0.07041704069891652 +889 54 optimizer.lr 0.0012825955886367005 +889 54 negative_sampler.num_negs_per_pos 54.0 +889 54 training.batch_size 2.0 +889 55 model.embedding_dim 1.0 +889 55 model.scoring_fct_norm 1.0 +889 55 regularizer.weight 0.14415352911094445 +889 55 optimizer.lr 0.0016100908092693562 +889 55 negative_sampler.num_negs_per_pos 90.0 +889 55 training.batch_size 0.0 +889 56 model.embedding_dim 0.0 +889 56 model.scoring_fct_norm 2.0 +889 56 regularizer.weight 0.06845278606573628 +889 56 optimizer.lr 0.01741661433295669 +889 56 negative_sampler.num_negs_per_pos 71.0 +889 56 training.batch_size 2.0 +889 57 model.embedding_dim 2.0 +889 57 model.scoring_fct_norm 2.0 +889 57 regularizer.weight 0.03319231820288028 +889 57 optimizer.lr 0.009659156021136504 +889 57 negative_sampler.num_negs_per_pos 74.0 +889 57 training.batch_size 1.0 +889 58 model.embedding_dim 1.0 +889 58 model.scoring_fct_norm 1.0 +889 58 regularizer.weight 0.03887039285976591 +889 58 optimizer.lr 0.017517823014663318 +889 58 negative_sampler.num_negs_per_pos 52.0 +889 58 training.batch_size 1.0 +889 59 model.embedding_dim 2.0 +889 59 model.scoring_fct_norm 1.0 +889 59 regularizer.weight 0.05396881444895674 +889 59 optimizer.lr 0.00177633050391511 +889 59 negative_sampler.num_negs_per_pos 83.0 +889 59 training.batch_size 1.0 +889 60 model.embedding_dim 2.0 +889 60 model.scoring_fct_norm 1.0 +889 60 regularizer.weight 0.027633278910680653 +889 60 optimizer.lr 0.00266518966315157 +889 60 negative_sampler.num_negs_per_pos 82.0 +889 60 training.batch_size 0.0 +889 61 model.embedding_dim 2.0 +889 61 model.scoring_fct_norm 1.0 +889 61 regularizer.weight 0.016636516015549357 +889 61 optimizer.lr 0.08199054453240903 +889 61 negative_sampler.num_negs_per_pos 94.0 +889 61 training.batch_size 0.0 +889 62 model.embedding_dim 2.0 +889 62 model.scoring_fct_norm 1.0 +889 62 regularizer.weight 0.0681110375864377 +889 62 optimizer.lr 0.0029853035281068615 +889 62 negative_sampler.num_negs_per_pos 96.0 +889 62 training.batch_size 1.0 +889 63 model.embedding_dim 1.0 +889 63 model.scoring_fct_norm 1.0 +889 63 regularizer.weight 0.21304834467034353 +889 63 optimizer.lr 0.0041638432316773374 +889 63 negative_sampler.num_negs_per_pos 50.0 +889 63 training.batch_size 2.0 +889 64 model.embedding_dim 2.0 +889 64 model.scoring_fct_norm 1.0 +889 64 regularizer.weight 0.2994967586568791 +889 64 optimizer.lr 0.022718854456585825 +889 64 negative_sampler.num_negs_per_pos 29.0 +889 64 training.batch_size 2.0 +889 65 model.embedding_dim 0.0 +889 65 model.scoring_fct_norm 2.0 +889 65 regularizer.weight 0.01586510674168067 +889 65 optimizer.lr 0.04330929345665638 +889 65 negative_sampler.num_negs_per_pos 99.0 +889 65 training.batch_size 2.0 +889 66 model.embedding_dim 1.0 +889 66 model.scoring_fct_norm 1.0 +889 66 regularizer.weight 0.0539570210482427 +889 66 optimizer.lr 0.04898169652938321 +889 66 negative_sampler.num_negs_per_pos 30.0 +889 66 training.batch_size 2.0 +889 67 model.embedding_dim 2.0 +889 67 model.scoring_fct_norm 1.0 +889 67 regularizer.weight 0.024707476237160634 +889 67 optimizer.lr 0.0027014225635942136 +889 67 negative_sampler.num_negs_per_pos 20.0 +889 67 training.batch_size 0.0 +889 68 model.embedding_dim 2.0 +889 68 model.scoring_fct_norm 1.0 +889 68 regularizer.weight 0.04409790209804852 +889 68 optimizer.lr 0.05295593667237141 +889 68 negative_sampler.num_negs_per_pos 70.0 +889 68 training.batch_size 0.0 +889 69 model.embedding_dim 2.0 +889 69 model.scoring_fct_norm 2.0 +889 69 regularizer.weight 0.25015553766988735 +889 69 optimizer.lr 0.052135749873262756 +889 69 negative_sampler.num_negs_per_pos 78.0 +889 69 training.batch_size 2.0 +889 70 model.embedding_dim 1.0 +889 70 model.scoring_fct_norm 2.0 +889 70 regularizer.weight 0.052781122127971335 +889 70 optimizer.lr 0.07577563992452373 +889 70 negative_sampler.num_negs_per_pos 28.0 +889 70 training.batch_size 0.0 +889 71 model.embedding_dim 2.0 +889 71 model.scoring_fct_norm 1.0 +889 71 regularizer.weight 0.012739598733359312 +889 71 optimizer.lr 0.03762191328267054 +889 71 negative_sampler.num_negs_per_pos 98.0 +889 71 training.batch_size 1.0 +889 72 model.embedding_dim 1.0 +889 72 model.scoring_fct_norm 1.0 +889 72 regularizer.weight 0.016350795732878436 +889 72 optimizer.lr 0.010128765196197487 +889 72 negative_sampler.num_negs_per_pos 93.0 +889 72 training.batch_size 1.0 +889 73 model.embedding_dim 2.0 +889 73 model.scoring_fct_norm 1.0 +889 73 regularizer.weight 0.013535233959289272 +889 73 optimizer.lr 0.006105827513288302 +889 73 negative_sampler.num_negs_per_pos 30.0 +889 73 training.batch_size 0.0 +889 74 model.embedding_dim 1.0 +889 74 model.scoring_fct_norm 1.0 +889 74 regularizer.weight 0.054951252609287835 +889 74 optimizer.lr 0.004205379360928131 +889 74 negative_sampler.num_negs_per_pos 39.0 +889 74 training.batch_size 1.0 +889 75 model.embedding_dim 1.0 +889 75 model.scoring_fct_norm 2.0 +889 75 regularizer.weight 0.04237204032331484 +889 75 optimizer.lr 0.0018787648768879733 +889 75 negative_sampler.num_negs_per_pos 31.0 +889 75 training.batch_size 2.0 +889 76 model.embedding_dim 0.0 +889 76 model.scoring_fct_norm 1.0 +889 76 regularizer.weight 0.016280707203468133 +889 76 optimizer.lr 0.001496446826327336 +889 76 negative_sampler.num_negs_per_pos 69.0 +889 76 training.batch_size 0.0 +889 77 model.embedding_dim 1.0 +889 77 model.scoring_fct_norm 1.0 +889 77 regularizer.weight 0.15731754929128158 +889 77 optimizer.lr 0.004423565222641942 +889 77 negative_sampler.num_negs_per_pos 1.0 +889 77 training.batch_size 2.0 +889 78 model.embedding_dim 0.0 +889 78 model.scoring_fct_norm 2.0 +889 78 regularizer.weight 0.03504219439562538 +889 78 optimizer.lr 0.0052603692354539055 +889 78 negative_sampler.num_negs_per_pos 66.0 +889 78 training.batch_size 1.0 +889 79 model.embedding_dim 2.0 +889 79 model.scoring_fct_norm 1.0 +889 79 regularizer.weight 0.12023331541448327 +889 79 optimizer.lr 0.044486843845809726 +889 79 negative_sampler.num_negs_per_pos 37.0 +889 79 training.batch_size 1.0 +889 80 model.embedding_dim 1.0 +889 80 model.scoring_fct_norm 1.0 +889 80 regularizer.weight 0.010956552150780294 +889 80 optimizer.lr 0.015700534609436434 +889 80 negative_sampler.num_negs_per_pos 44.0 +889 80 training.batch_size 0.0 +889 81 model.embedding_dim 2.0 +889 81 model.scoring_fct_norm 1.0 +889 81 regularizer.weight 0.022680669602488008 +889 81 optimizer.lr 0.0010026949859751892 +889 81 negative_sampler.num_negs_per_pos 50.0 +889 81 training.batch_size 0.0 +889 82 model.embedding_dim 1.0 +889 82 model.scoring_fct_norm 2.0 +889 82 regularizer.weight 0.0469426940760863 +889 82 optimizer.lr 0.026583435265336335 +889 82 negative_sampler.num_negs_per_pos 22.0 +889 82 training.batch_size 1.0 +889 83 model.embedding_dim 1.0 +889 83 model.scoring_fct_norm 2.0 +889 83 regularizer.weight 0.12670618053912858 +889 83 optimizer.lr 0.003668441629607661 +889 83 negative_sampler.num_negs_per_pos 46.0 +889 83 training.batch_size 1.0 +889 84 model.embedding_dim 1.0 +889 84 model.scoring_fct_norm 1.0 +889 84 regularizer.weight 0.012912439529449102 +889 84 optimizer.lr 0.05134844622725761 +889 84 negative_sampler.num_negs_per_pos 14.0 +889 84 training.batch_size 2.0 +889 85 model.embedding_dim 1.0 +889 85 model.scoring_fct_norm 2.0 +889 85 regularizer.weight 0.1447294696524371 +889 85 optimizer.lr 0.019373199074884655 +889 85 negative_sampler.num_negs_per_pos 6.0 +889 85 training.batch_size 2.0 +889 86 model.embedding_dim 0.0 +889 86 model.scoring_fct_norm 2.0 +889 86 regularizer.weight 0.014795830913306352 +889 86 optimizer.lr 0.04407584964929614 +889 86 negative_sampler.num_negs_per_pos 47.0 +889 86 training.batch_size 1.0 +889 87 model.embedding_dim 1.0 +889 87 model.scoring_fct_norm 1.0 +889 87 regularizer.weight 0.10131880466486166 +889 87 optimizer.lr 0.014661359507346464 +889 87 negative_sampler.num_negs_per_pos 46.0 +889 87 training.batch_size 0.0 +889 88 model.embedding_dim 2.0 +889 88 model.scoring_fct_norm 2.0 +889 88 regularizer.weight 0.03814720818629615 +889 88 optimizer.lr 0.014371666283173821 +889 88 negative_sampler.num_negs_per_pos 50.0 +889 88 training.batch_size 0.0 +889 89 model.embedding_dim 1.0 +889 89 model.scoring_fct_norm 2.0 +889 89 regularizer.weight 0.022060481060751123 +889 89 optimizer.lr 0.005299358991031265 +889 89 negative_sampler.num_negs_per_pos 60.0 +889 89 training.batch_size 0.0 +889 90 model.embedding_dim 2.0 +889 90 model.scoring_fct_norm 2.0 +889 90 regularizer.weight 0.08874756168821593 +889 90 optimizer.lr 0.019862733622541647 +889 90 negative_sampler.num_negs_per_pos 47.0 +889 90 training.batch_size 1.0 +889 91 model.embedding_dim 0.0 +889 91 model.scoring_fct_norm 2.0 +889 91 regularizer.weight 0.2929127653609713 +889 91 optimizer.lr 0.05419122679840625 +889 91 negative_sampler.num_negs_per_pos 95.0 +889 91 training.batch_size 2.0 +889 92 model.embedding_dim 1.0 +889 92 model.scoring_fct_norm 1.0 +889 92 regularizer.weight 0.019851158104680636 +889 92 optimizer.lr 0.07346020548553452 +889 92 negative_sampler.num_negs_per_pos 61.0 +889 92 training.batch_size 1.0 +889 93 model.embedding_dim 2.0 +889 93 model.scoring_fct_norm 1.0 +889 93 regularizer.weight 0.05140766090656486 +889 93 optimizer.lr 0.04198435586594143 +889 93 negative_sampler.num_negs_per_pos 77.0 +889 93 training.batch_size 2.0 +889 94 model.embedding_dim 2.0 +889 94 model.scoring_fct_norm 1.0 +889 94 regularizer.weight 0.09952174413251054 +889 94 optimizer.lr 0.005790534106347698 +889 94 negative_sampler.num_negs_per_pos 12.0 +889 94 training.batch_size 0.0 +889 95 model.embedding_dim 2.0 +889 95 model.scoring_fct_norm 2.0 +889 95 regularizer.weight 0.014653003292733096 +889 95 optimizer.lr 0.07905576919698999 +889 95 negative_sampler.num_negs_per_pos 18.0 +889 95 training.batch_size 1.0 +889 96 model.embedding_dim 1.0 +889 96 model.scoring_fct_norm 2.0 +889 96 regularizer.weight 0.1042546638332965 +889 96 optimizer.lr 0.0690130690306251 +889 96 negative_sampler.num_negs_per_pos 77.0 +889 96 training.batch_size 0.0 +889 97 model.embedding_dim 1.0 +889 97 model.scoring_fct_norm 2.0 +889 97 regularizer.weight 0.015549586298337113 +889 97 optimizer.lr 0.01487655993219961 +889 97 negative_sampler.num_negs_per_pos 34.0 +889 97 training.batch_size 2.0 +889 98 model.embedding_dim 0.0 +889 98 model.scoring_fct_norm 1.0 +889 98 regularizer.weight 0.012772629802382229 +889 98 optimizer.lr 0.01036295466007538 +889 98 negative_sampler.num_negs_per_pos 55.0 +889 98 training.batch_size 0.0 +889 99 model.embedding_dim 2.0 +889 99 model.scoring_fct_norm 2.0 +889 99 regularizer.weight 0.01914926539073828 +889 99 optimizer.lr 0.00766684324895578 +889 99 negative_sampler.num_negs_per_pos 46.0 +889 99 training.batch_size 1.0 +889 100 model.embedding_dim 2.0 +889 100 model.scoring_fct_norm 2.0 +889 100 regularizer.weight 0.12151137763892109 +889 100 optimizer.lr 0.001016245259746054 +889 100 negative_sampler.num_negs_per_pos 55.0 +889 100 training.batch_size 2.0 +889 1 dataset """wn18rr""" +889 1 model """transh""" +889 1 loss """bceaftersigmoid""" +889 1 regularizer """transh""" +889 1 optimizer """adam""" +889 1 training_loop """owa""" +889 1 negative_sampler """basic""" +889 1 evaluator """rankbased""" +889 2 dataset """wn18rr""" +889 2 model """transh""" +889 2 loss """bceaftersigmoid""" +889 2 regularizer """transh""" +889 2 optimizer """adam""" +889 2 training_loop """owa""" +889 2 negative_sampler """basic""" +889 2 evaluator """rankbased""" +889 3 dataset """wn18rr""" +889 3 model """transh""" +889 3 loss """bceaftersigmoid""" +889 3 regularizer """transh""" +889 3 optimizer """adam""" +889 3 training_loop """owa""" +889 3 negative_sampler """basic""" +889 3 evaluator """rankbased""" +889 4 dataset """wn18rr""" +889 4 model """transh""" +889 4 loss """bceaftersigmoid""" +889 4 regularizer """transh""" +889 4 optimizer """adam""" +889 4 training_loop """owa""" +889 4 negative_sampler """basic""" +889 4 evaluator """rankbased""" +889 5 dataset """wn18rr""" +889 5 model """transh""" +889 5 loss """bceaftersigmoid""" +889 5 regularizer """transh""" +889 5 optimizer """adam""" +889 5 training_loop """owa""" +889 5 negative_sampler """basic""" +889 5 evaluator """rankbased""" +889 6 dataset """wn18rr""" +889 6 model """transh""" +889 6 loss """bceaftersigmoid""" +889 6 regularizer """transh""" +889 6 optimizer """adam""" +889 6 training_loop """owa""" +889 6 negative_sampler """basic""" +889 6 evaluator """rankbased""" +889 7 dataset """wn18rr""" +889 7 model """transh""" +889 7 loss """bceaftersigmoid""" +889 7 regularizer """transh""" +889 7 optimizer """adam""" +889 7 training_loop """owa""" +889 7 negative_sampler """basic""" +889 7 evaluator """rankbased""" +889 8 dataset """wn18rr""" +889 8 model """transh""" +889 8 loss """bceaftersigmoid""" +889 8 regularizer """transh""" +889 8 optimizer """adam""" +889 8 training_loop """owa""" +889 8 negative_sampler """basic""" +889 8 evaluator """rankbased""" +889 9 dataset """wn18rr""" +889 9 model """transh""" +889 9 loss """bceaftersigmoid""" +889 9 regularizer """transh""" +889 9 optimizer """adam""" +889 9 training_loop """owa""" +889 9 negative_sampler """basic""" +889 9 evaluator """rankbased""" +889 10 dataset """wn18rr""" +889 10 model """transh""" +889 10 loss """bceaftersigmoid""" +889 10 regularizer """transh""" +889 10 optimizer """adam""" +889 10 training_loop """owa""" +889 10 negative_sampler """basic""" +889 10 evaluator """rankbased""" +889 11 dataset """wn18rr""" +889 11 model """transh""" +889 11 loss """bceaftersigmoid""" +889 11 regularizer """transh""" +889 11 optimizer """adam""" +889 11 training_loop """owa""" +889 11 negative_sampler """basic""" +889 11 evaluator """rankbased""" +889 12 dataset """wn18rr""" +889 12 model """transh""" +889 12 loss """bceaftersigmoid""" +889 12 regularizer """transh""" +889 12 optimizer """adam""" +889 12 training_loop """owa""" +889 12 negative_sampler """basic""" +889 12 evaluator """rankbased""" +889 13 dataset """wn18rr""" +889 13 model """transh""" +889 13 loss """bceaftersigmoid""" +889 13 regularizer """transh""" +889 13 optimizer """adam""" +889 13 training_loop """owa""" +889 13 negative_sampler """basic""" +889 13 evaluator """rankbased""" +889 14 dataset """wn18rr""" +889 14 model """transh""" +889 14 loss """bceaftersigmoid""" +889 14 regularizer """transh""" +889 14 optimizer """adam""" +889 14 training_loop """owa""" +889 14 negative_sampler """basic""" +889 14 evaluator """rankbased""" +889 15 dataset """wn18rr""" +889 15 model """transh""" +889 15 loss """bceaftersigmoid""" +889 15 regularizer """transh""" +889 15 optimizer """adam""" +889 15 training_loop """owa""" +889 15 negative_sampler """basic""" +889 15 evaluator """rankbased""" +889 16 dataset """wn18rr""" +889 16 model """transh""" +889 16 loss """bceaftersigmoid""" +889 16 regularizer """transh""" +889 16 optimizer """adam""" +889 16 training_loop """owa""" +889 16 negative_sampler """basic""" +889 16 evaluator """rankbased""" +889 17 dataset """wn18rr""" +889 17 model """transh""" +889 17 loss """bceaftersigmoid""" +889 17 regularizer """transh""" +889 17 optimizer """adam""" +889 17 training_loop """owa""" +889 17 negative_sampler """basic""" +889 17 evaluator """rankbased""" +889 18 dataset """wn18rr""" +889 18 model """transh""" +889 18 loss """bceaftersigmoid""" +889 18 regularizer """transh""" +889 18 optimizer """adam""" +889 18 training_loop """owa""" +889 18 negative_sampler """basic""" +889 18 evaluator """rankbased""" +889 19 dataset """wn18rr""" +889 19 model """transh""" +889 19 loss """bceaftersigmoid""" +889 19 regularizer """transh""" +889 19 optimizer """adam""" +889 19 training_loop """owa""" +889 19 negative_sampler """basic""" +889 19 evaluator """rankbased""" +889 20 dataset """wn18rr""" +889 20 model """transh""" +889 20 loss """bceaftersigmoid""" +889 20 regularizer """transh""" +889 20 optimizer """adam""" +889 20 training_loop """owa""" +889 20 negative_sampler """basic""" +889 20 evaluator """rankbased""" +889 21 dataset """wn18rr""" +889 21 model """transh""" +889 21 loss """bceaftersigmoid""" +889 21 regularizer """transh""" +889 21 optimizer """adam""" +889 21 training_loop """owa""" +889 21 negative_sampler """basic""" +889 21 evaluator """rankbased""" +889 22 dataset """wn18rr""" +889 22 model """transh""" +889 22 loss """bceaftersigmoid""" +889 22 regularizer """transh""" +889 22 optimizer """adam""" +889 22 training_loop """owa""" +889 22 negative_sampler """basic""" +889 22 evaluator """rankbased""" +889 23 dataset """wn18rr""" +889 23 model """transh""" +889 23 loss """bceaftersigmoid""" +889 23 regularizer """transh""" +889 23 optimizer """adam""" +889 23 training_loop """owa""" +889 23 negative_sampler """basic""" +889 23 evaluator """rankbased""" +889 24 dataset """wn18rr""" +889 24 model """transh""" +889 24 loss """bceaftersigmoid""" +889 24 regularizer """transh""" +889 24 optimizer """adam""" +889 24 training_loop """owa""" +889 24 negative_sampler """basic""" +889 24 evaluator """rankbased""" +889 25 dataset """wn18rr""" +889 25 model """transh""" +889 25 loss """bceaftersigmoid""" +889 25 regularizer """transh""" +889 25 optimizer """adam""" +889 25 training_loop """owa""" +889 25 negative_sampler """basic""" +889 25 evaluator """rankbased""" +889 26 dataset """wn18rr""" +889 26 model """transh""" +889 26 loss """bceaftersigmoid""" +889 26 regularizer """transh""" +889 26 optimizer """adam""" +889 26 training_loop """owa""" +889 26 negative_sampler """basic""" +889 26 evaluator """rankbased""" +889 27 dataset """wn18rr""" +889 27 model """transh""" +889 27 loss """bceaftersigmoid""" +889 27 regularizer """transh""" +889 27 optimizer """adam""" +889 27 training_loop """owa""" +889 27 negative_sampler """basic""" +889 27 evaluator """rankbased""" +889 28 dataset """wn18rr""" +889 28 model """transh""" +889 28 loss """bceaftersigmoid""" +889 28 regularizer """transh""" +889 28 optimizer """adam""" +889 28 training_loop """owa""" +889 28 negative_sampler """basic""" +889 28 evaluator """rankbased""" +889 29 dataset """wn18rr""" +889 29 model """transh""" +889 29 loss """bceaftersigmoid""" +889 29 regularizer """transh""" +889 29 optimizer """adam""" +889 29 training_loop """owa""" +889 29 negative_sampler """basic""" +889 29 evaluator """rankbased""" +889 30 dataset """wn18rr""" +889 30 model """transh""" +889 30 loss """bceaftersigmoid""" +889 30 regularizer """transh""" +889 30 optimizer """adam""" +889 30 training_loop """owa""" +889 30 negative_sampler """basic""" +889 30 evaluator """rankbased""" +889 31 dataset """wn18rr""" +889 31 model """transh""" +889 31 loss """bceaftersigmoid""" +889 31 regularizer """transh""" +889 31 optimizer """adam""" +889 31 training_loop """owa""" +889 31 negative_sampler """basic""" +889 31 evaluator """rankbased""" +889 32 dataset """wn18rr""" +889 32 model """transh""" +889 32 loss """bceaftersigmoid""" +889 32 regularizer """transh""" +889 32 optimizer """adam""" +889 32 training_loop """owa""" +889 32 negative_sampler """basic""" +889 32 evaluator """rankbased""" +889 33 dataset """wn18rr""" +889 33 model """transh""" +889 33 loss """bceaftersigmoid""" +889 33 regularizer """transh""" +889 33 optimizer """adam""" +889 33 training_loop """owa""" +889 33 negative_sampler """basic""" +889 33 evaluator """rankbased""" +889 34 dataset """wn18rr""" +889 34 model """transh""" +889 34 loss """bceaftersigmoid""" +889 34 regularizer """transh""" +889 34 optimizer """adam""" +889 34 training_loop """owa""" +889 34 negative_sampler """basic""" +889 34 evaluator """rankbased""" +889 35 dataset """wn18rr""" +889 35 model """transh""" +889 35 loss """bceaftersigmoid""" +889 35 regularizer """transh""" +889 35 optimizer """adam""" +889 35 training_loop """owa""" +889 35 negative_sampler """basic""" +889 35 evaluator """rankbased""" +889 36 dataset """wn18rr""" +889 36 model """transh""" +889 36 loss """bceaftersigmoid""" +889 36 regularizer """transh""" +889 36 optimizer """adam""" +889 36 training_loop """owa""" +889 36 negative_sampler """basic""" +889 36 evaluator """rankbased""" +889 37 dataset """wn18rr""" +889 37 model """transh""" +889 37 loss """bceaftersigmoid""" +889 37 regularizer """transh""" +889 37 optimizer """adam""" +889 37 training_loop """owa""" +889 37 negative_sampler """basic""" +889 37 evaluator """rankbased""" +889 38 dataset """wn18rr""" +889 38 model """transh""" +889 38 loss """bceaftersigmoid""" +889 38 regularizer """transh""" +889 38 optimizer """adam""" +889 38 training_loop """owa""" +889 38 negative_sampler """basic""" +889 38 evaluator """rankbased""" +889 39 dataset """wn18rr""" +889 39 model """transh""" +889 39 loss """bceaftersigmoid""" +889 39 regularizer """transh""" +889 39 optimizer """adam""" +889 39 training_loop """owa""" +889 39 negative_sampler """basic""" +889 39 evaluator """rankbased""" +889 40 dataset """wn18rr""" +889 40 model """transh""" +889 40 loss """bceaftersigmoid""" +889 40 regularizer """transh""" +889 40 optimizer """adam""" +889 40 training_loop """owa""" +889 40 negative_sampler """basic""" +889 40 evaluator """rankbased""" +889 41 dataset """wn18rr""" +889 41 model """transh""" +889 41 loss """bceaftersigmoid""" +889 41 regularizer """transh""" +889 41 optimizer """adam""" +889 41 training_loop """owa""" +889 41 negative_sampler """basic""" +889 41 evaluator """rankbased""" +889 42 dataset """wn18rr""" +889 42 model """transh""" +889 42 loss """bceaftersigmoid""" +889 42 regularizer """transh""" +889 42 optimizer """adam""" +889 42 training_loop """owa""" +889 42 negative_sampler """basic""" +889 42 evaluator """rankbased""" +889 43 dataset """wn18rr""" +889 43 model """transh""" +889 43 loss """bceaftersigmoid""" +889 43 regularizer """transh""" +889 43 optimizer """adam""" +889 43 training_loop """owa""" +889 43 negative_sampler """basic""" +889 43 evaluator """rankbased""" +889 44 dataset """wn18rr""" +889 44 model """transh""" +889 44 loss """bceaftersigmoid""" +889 44 regularizer """transh""" +889 44 optimizer """adam""" +889 44 training_loop """owa""" +889 44 negative_sampler """basic""" +889 44 evaluator """rankbased""" +889 45 dataset """wn18rr""" +889 45 model """transh""" +889 45 loss """bceaftersigmoid""" +889 45 regularizer """transh""" +889 45 optimizer """adam""" +889 45 training_loop """owa""" +889 45 negative_sampler """basic""" +889 45 evaluator """rankbased""" +889 46 dataset """wn18rr""" +889 46 model """transh""" +889 46 loss """bceaftersigmoid""" +889 46 regularizer """transh""" +889 46 optimizer """adam""" +889 46 training_loop """owa""" +889 46 negative_sampler """basic""" +889 46 evaluator """rankbased""" +889 47 dataset """wn18rr""" +889 47 model """transh""" +889 47 loss """bceaftersigmoid""" +889 47 regularizer """transh""" +889 47 optimizer """adam""" +889 47 training_loop """owa""" +889 47 negative_sampler """basic""" +889 47 evaluator """rankbased""" +889 48 dataset """wn18rr""" +889 48 model """transh""" +889 48 loss """bceaftersigmoid""" +889 48 regularizer """transh""" +889 48 optimizer """adam""" +889 48 training_loop """owa""" +889 48 negative_sampler """basic""" +889 48 evaluator """rankbased""" +889 49 dataset """wn18rr""" +889 49 model """transh""" +889 49 loss """bceaftersigmoid""" +889 49 regularizer """transh""" +889 49 optimizer """adam""" +889 49 training_loop """owa""" +889 49 negative_sampler """basic""" +889 49 evaluator """rankbased""" +889 50 dataset """wn18rr""" +889 50 model """transh""" +889 50 loss """bceaftersigmoid""" +889 50 regularizer """transh""" +889 50 optimizer """adam""" +889 50 training_loop """owa""" +889 50 negative_sampler """basic""" +889 50 evaluator """rankbased""" +889 51 dataset """wn18rr""" +889 51 model """transh""" +889 51 loss """bceaftersigmoid""" +889 51 regularizer """transh""" +889 51 optimizer """adam""" +889 51 training_loop """owa""" +889 51 negative_sampler """basic""" +889 51 evaluator """rankbased""" +889 52 dataset """wn18rr""" +889 52 model """transh""" +889 52 loss """bceaftersigmoid""" +889 52 regularizer """transh""" +889 52 optimizer """adam""" +889 52 training_loop """owa""" +889 52 negative_sampler """basic""" +889 52 evaluator """rankbased""" +889 53 dataset """wn18rr""" +889 53 model """transh""" +889 53 loss """bceaftersigmoid""" +889 53 regularizer """transh""" +889 53 optimizer """adam""" +889 53 training_loop """owa""" +889 53 negative_sampler """basic""" +889 53 evaluator """rankbased""" +889 54 dataset """wn18rr""" +889 54 model """transh""" +889 54 loss """bceaftersigmoid""" +889 54 regularizer """transh""" +889 54 optimizer """adam""" +889 54 training_loop """owa""" +889 54 negative_sampler """basic""" +889 54 evaluator """rankbased""" +889 55 dataset """wn18rr""" +889 55 model """transh""" +889 55 loss """bceaftersigmoid""" +889 55 regularizer """transh""" +889 55 optimizer """adam""" +889 55 training_loop """owa""" +889 55 negative_sampler """basic""" +889 55 evaluator """rankbased""" +889 56 dataset """wn18rr""" +889 56 model """transh""" +889 56 loss """bceaftersigmoid""" +889 56 regularizer """transh""" +889 56 optimizer """adam""" +889 56 training_loop """owa""" +889 56 negative_sampler """basic""" +889 56 evaluator """rankbased""" +889 57 dataset """wn18rr""" +889 57 model """transh""" +889 57 loss """bceaftersigmoid""" +889 57 regularizer """transh""" +889 57 optimizer """adam""" +889 57 training_loop """owa""" +889 57 negative_sampler """basic""" +889 57 evaluator """rankbased""" +889 58 dataset """wn18rr""" +889 58 model """transh""" +889 58 loss """bceaftersigmoid""" +889 58 regularizer """transh""" +889 58 optimizer """adam""" +889 58 training_loop """owa""" +889 58 negative_sampler """basic""" +889 58 evaluator """rankbased""" +889 59 dataset """wn18rr""" +889 59 model """transh""" +889 59 loss """bceaftersigmoid""" +889 59 regularizer """transh""" +889 59 optimizer """adam""" +889 59 training_loop """owa""" +889 59 negative_sampler """basic""" +889 59 evaluator """rankbased""" +889 60 dataset """wn18rr""" +889 60 model """transh""" +889 60 loss """bceaftersigmoid""" +889 60 regularizer """transh""" +889 60 optimizer """adam""" +889 60 training_loop """owa""" +889 60 negative_sampler """basic""" +889 60 evaluator """rankbased""" +889 61 dataset """wn18rr""" +889 61 model """transh""" +889 61 loss """bceaftersigmoid""" +889 61 regularizer """transh""" +889 61 optimizer """adam""" +889 61 training_loop """owa""" +889 61 negative_sampler """basic""" +889 61 evaluator """rankbased""" +889 62 dataset """wn18rr""" +889 62 model """transh""" +889 62 loss """bceaftersigmoid""" +889 62 regularizer """transh""" +889 62 optimizer """adam""" +889 62 training_loop """owa""" +889 62 negative_sampler """basic""" +889 62 evaluator """rankbased""" +889 63 dataset """wn18rr""" +889 63 model """transh""" +889 63 loss """bceaftersigmoid""" +889 63 regularizer """transh""" +889 63 optimizer """adam""" +889 63 training_loop """owa""" +889 63 negative_sampler """basic""" +889 63 evaluator """rankbased""" +889 64 dataset """wn18rr""" +889 64 model """transh""" +889 64 loss """bceaftersigmoid""" +889 64 regularizer """transh""" +889 64 optimizer """adam""" +889 64 training_loop """owa""" +889 64 negative_sampler """basic""" +889 64 evaluator """rankbased""" +889 65 dataset """wn18rr""" +889 65 model """transh""" +889 65 loss """bceaftersigmoid""" +889 65 regularizer """transh""" +889 65 optimizer """adam""" +889 65 training_loop """owa""" +889 65 negative_sampler """basic""" +889 65 evaluator """rankbased""" +889 66 dataset """wn18rr""" +889 66 model """transh""" +889 66 loss """bceaftersigmoid""" +889 66 regularizer """transh""" +889 66 optimizer """adam""" +889 66 training_loop """owa""" +889 66 negative_sampler """basic""" +889 66 evaluator """rankbased""" +889 67 dataset """wn18rr""" +889 67 model """transh""" +889 67 loss """bceaftersigmoid""" +889 67 regularizer """transh""" +889 67 optimizer """adam""" +889 67 training_loop """owa""" +889 67 negative_sampler """basic""" +889 67 evaluator """rankbased""" +889 68 dataset """wn18rr""" +889 68 model """transh""" +889 68 loss """bceaftersigmoid""" +889 68 regularizer """transh""" +889 68 optimizer """adam""" +889 68 training_loop """owa""" +889 68 negative_sampler """basic""" +889 68 evaluator """rankbased""" +889 69 dataset """wn18rr""" +889 69 model """transh""" +889 69 loss """bceaftersigmoid""" +889 69 regularizer """transh""" +889 69 optimizer """adam""" +889 69 training_loop """owa""" +889 69 negative_sampler """basic""" +889 69 evaluator """rankbased""" +889 70 dataset """wn18rr""" +889 70 model """transh""" +889 70 loss """bceaftersigmoid""" +889 70 regularizer """transh""" +889 70 optimizer """adam""" +889 70 training_loop """owa""" +889 70 negative_sampler """basic""" +889 70 evaluator """rankbased""" +889 71 dataset """wn18rr""" +889 71 model """transh""" +889 71 loss """bceaftersigmoid""" +889 71 regularizer """transh""" +889 71 optimizer """adam""" +889 71 training_loop """owa""" +889 71 negative_sampler """basic""" +889 71 evaluator """rankbased""" +889 72 dataset """wn18rr""" +889 72 model """transh""" +889 72 loss """bceaftersigmoid""" +889 72 regularizer """transh""" +889 72 optimizer """adam""" +889 72 training_loop """owa""" +889 72 negative_sampler """basic""" +889 72 evaluator """rankbased""" +889 73 dataset """wn18rr""" +889 73 model """transh""" +889 73 loss """bceaftersigmoid""" +889 73 regularizer """transh""" +889 73 optimizer """adam""" +889 73 training_loop """owa""" +889 73 negative_sampler """basic""" +889 73 evaluator """rankbased""" +889 74 dataset """wn18rr""" +889 74 model """transh""" +889 74 loss """bceaftersigmoid""" +889 74 regularizer """transh""" +889 74 optimizer """adam""" +889 74 training_loop """owa""" +889 74 negative_sampler """basic""" +889 74 evaluator """rankbased""" +889 75 dataset """wn18rr""" +889 75 model """transh""" +889 75 loss """bceaftersigmoid""" +889 75 regularizer """transh""" +889 75 optimizer """adam""" +889 75 training_loop """owa""" +889 75 negative_sampler """basic""" +889 75 evaluator """rankbased""" +889 76 dataset """wn18rr""" +889 76 model """transh""" +889 76 loss """bceaftersigmoid""" +889 76 regularizer """transh""" +889 76 optimizer """adam""" +889 76 training_loop """owa""" +889 76 negative_sampler """basic""" +889 76 evaluator """rankbased""" +889 77 dataset """wn18rr""" +889 77 model """transh""" +889 77 loss """bceaftersigmoid""" +889 77 regularizer """transh""" +889 77 optimizer """adam""" +889 77 training_loop """owa""" +889 77 negative_sampler """basic""" +889 77 evaluator """rankbased""" +889 78 dataset """wn18rr""" +889 78 model """transh""" +889 78 loss """bceaftersigmoid""" +889 78 regularizer """transh""" +889 78 optimizer """adam""" +889 78 training_loop """owa""" +889 78 negative_sampler """basic""" +889 78 evaluator """rankbased""" +889 79 dataset """wn18rr""" +889 79 model """transh""" +889 79 loss """bceaftersigmoid""" +889 79 regularizer """transh""" +889 79 optimizer """adam""" +889 79 training_loop """owa""" +889 79 negative_sampler """basic""" +889 79 evaluator """rankbased""" +889 80 dataset """wn18rr""" +889 80 model """transh""" +889 80 loss """bceaftersigmoid""" +889 80 regularizer """transh""" +889 80 optimizer """adam""" +889 80 training_loop """owa""" +889 80 negative_sampler """basic""" +889 80 evaluator """rankbased""" +889 81 dataset """wn18rr""" +889 81 model """transh""" +889 81 loss """bceaftersigmoid""" +889 81 regularizer """transh""" +889 81 optimizer """adam""" +889 81 training_loop """owa""" +889 81 negative_sampler """basic""" +889 81 evaluator """rankbased""" +889 82 dataset """wn18rr""" +889 82 model """transh""" +889 82 loss """bceaftersigmoid""" +889 82 regularizer """transh""" +889 82 optimizer """adam""" +889 82 training_loop """owa""" +889 82 negative_sampler """basic""" +889 82 evaluator """rankbased""" +889 83 dataset """wn18rr""" +889 83 model """transh""" +889 83 loss """bceaftersigmoid""" +889 83 regularizer """transh""" +889 83 optimizer """adam""" +889 83 training_loop """owa""" +889 83 negative_sampler """basic""" +889 83 evaluator """rankbased""" +889 84 dataset """wn18rr""" +889 84 model """transh""" +889 84 loss """bceaftersigmoid""" +889 84 regularizer """transh""" +889 84 optimizer """adam""" +889 84 training_loop """owa""" +889 84 negative_sampler """basic""" +889 84 evaluator """rankbased""" +889 85 dataset """wn18rr""" +889 85 model """transh""" +889 85 loss """bceaftersigmoid""" +889 85 regularizer """transh""" +889 85 optimizer """adam""" +889 85 training_loop """owa""" +889 85 negative_sampler """basic""" +889 85 evaluator """rankbased""" +889 86 dataset """wn18rr""" +889 86 model """transh""" +889 86 loss """bceaftersigmoid""" +889 86 regularizer """transh""" +889 86 optimizer """adam""" +889 86 training_loop """owa""" +889 86 negative_sampler """basic""" +889 86 evaluator """rankbased""" +889 87 dataset """wn18rr""" +889 87 model """transh""" +889 87 loss """bceaftersigmoid""" +889 87 regularizer """transh""" +889 87 optimizer """adam""" +889 87 training_loop """owa""" +889 87 negative_sampler """basic""" +889 87 evaluator """rankbased""" +889 88 dataset """wn18rr""" +889 88 model """transh""" +889 88 loss """bceaftersigmoid""" +889 88 regularizer """transh""" +889 88 optimizer """adam""" +889 88 training_loop """owa""" +889 88 negative_sampler """basic""" +889 88 evaluator """rankbased""" +889 89 dataset """wn18rr""" +889 89 model """transh""" +889 89 loss """bceaftersigmoid""" +889 89 regularizer """transh""" +889 89 optimizer """adam""" +889 89 training_loop """owa""" +889 89 negative_sampler """basic""" +889 89 evaluator """rankbased""" +889 90 dataset """wn18rr""" +889 90 model """transh""" +889 90 loss """bceaftersigmoid""" +889 90 regularizer """transh""" +889 90 optimizer """adam""" +889 90 training_loop """owa""" +889 90 negative_sampler """basic""" +889 90 evaluator """rankbased""" +889 91 dataset """wn18rr""" +889 91 model """transh""" +889 91 loss """bceaftersigmoid""" +889 91 regularizer """transh""" +889 91 optimizer """adam""" +889 91 training_loop """owa""" +889 91 negative_sampler """basic""" +889 91 evaluator """rankbased""" +889 92 dataset """wn18rr""" +889 92 model """transh""" +889 92 loss """bceaftersigmoid""" +889 92 regularizer """transh""" +889 92 optimizer """adam""" +889 92 training_loop """owa""" +889 92 negative_sampler """basic""" +889 92 evaluator """rankbased""" +889 93 dataset """wn18rr""" +889 93 model """transh""" +889 93 loss """bceaftersigmoid""" +889 93 regularizer """transh""" +889 93 optimizer """adam""" +889 93 training_loop """owa""" +889 93 negative_sampler """basic""" +889 93 evaluator """rankbased""" +889 94 dataset """wn18rr""" +889 94 model """transh""" +889 94 loss """bceaftersigmoid""" +889 94 regularizer """transh""" +889 94 optimizer """adam""" +889 94 training_loop """owa""" +889 94 negative_sampler """basic""" +889 94 evaluator """rankbased""" +889 95 dataset """wn18rr""" +889 95 model """transh""" +889 95 loss """bceaftersigmoid""" +889 95 regularizer """transh""" +889 95 optimizer """adam""" +889 95 training_loop """owa""" +889 95 negative_sampler """basic""" +889 95 evaluator """rankbased""" +889 96 dataset """wn18rr""" +889 96 model """transh""" +889 96 loss """bceaftersigmoid""" +889 96 regularizer """transh""" +889 96 optimizer """adam""" +889 96 training_loop """owa""" +889 96 negative_sampler """basic""" +889 96 evaluator """rankbased""" +889 97 dataset """wn18rr""" +889 97 model """transh""" +889 97 loss """bceaftersigmoid""" +889 97 regularizer """transh""" +889 97 optimizer """adam""" +889 97 training_loop """owa""" +889 97 negative_sampler """basic""" +889 97 evaluator """rankbased""" +889 98 dataset """wn18rr""" +889 98 model """transh""" +889 98 loss """bceaftersigmoid""" +889 98 regularizer """transh""" +889 98 optimizer """adam""" +889 98 training_loop """owa""" +889 98 negative_sampler """basic""" +889 98 evaluator """rankbased""" +889 99 dataset """wn18rr""" +889 99 model """transh""" +889 99 loss """bceaftersigmoid""" +889 99 regularizer """transh""" +889 99 optimizer """adam""" +889 99 training_loop """owa""" +889 99 negative_sampler """basic""" +889 99 evaluator """rankbased""" +889 100 dataset """wn18rr""" +889 100 model """transh""" +889 100 loss """bceaftersigmoid""" +889 100 regularizer """transh""" +889 100 optimizer """adam""" +889 100 training_loop """owa""" +889 100 negative_sampler """basic""" +889 100 evaluator """rankbased""" +890 1 model.embedding_dim 1.0 +890 1 model.scoring_fct_norm 2.0 +890 1 regularizer.weight 0.08845036176787957 +890 1 optimizer.lr 0.02225617439318514 +890 1 negative_sampler.num_negs_per_pos 58.0 +890 1 training.batch_size 1.0 +890 2 model.embedding_dim 1.0 +890 2 model.scoring_fct_norm 1.0 +890 2 regularizer.weight 0.017672104725541458 +890 2 optimizer.lr 0.03652498917038771 +890 2 negative_sampler.num_negs_per_pos 84.0 +890 2 training.batch_size 2.0 +890 3 model.embedding_dim 2.0 +890 3 model.scoring_fct_norm 2.0 +890 3 regularizer.weight 0.02246137636914012 +890 3 optimizer.lr 0.005260707973372451 +890 3 negative_sampler.num_negs_per_pos 78.0 +890 3 training.batch_size 0.0 +890 4 model.embedding_dim 1.0 +890 4 model.scoring_fct_norm 2.0 +890 4 regularizer.weight 0.11627956264538726 +890 4 optimizer.lr 0.09440213552338775 +890 4 negative_sampler.num_negs_per_pos 54.0 +890 4 training.batch_size 2.0 +890 5 model.embedding_dim 0.0 +890 5 model.scoring_fct_norm 1.0 +890 5 regularizer.weight 0.02410057067095639 +890 5 optimizer.lr 0.006735006766081221 +890 5 negative_sampler.num_negs_per_pos 77.0 +890 5 training.batch_size 1.0 +890 6 model.embedding_dim 2.0 +890 6 model.scoring_fct_norm 1.0 +890 6 regularizer.weight 0.013749837467896734 +890 6 optimizer.lr 0.001279978867678408 +890 6 negative_sampler.num_negs_per_pos 68.0 +890 6 training.batch_size 1.0 +890 7 model.embedding_dim 1.0 +890 7 model.scoring_fct_norm 1.0 +890 7 regularizer.weight 0.13551057243749162 +890 7 optimizer.lr 0.012890586916285016 +890 7 negative_sampler.num_negs_per_pos 56.0 +890 7 training.batch_size 2.0 +890 8 model.embedding_dim 1.0 +890 8 model.scoring_fct_norm 1.0 +890 8 regularizer.weight 0.012395032162511869 +890 8 optimizer.lr 0.046275315293250234 +890 8 negative_sampler.num_negs_per_pos 64.0 +890 8 training.batch_size 1.0 +890 9 model.embedding_dim 2.0 +890 9 model.scoring_fct_norm 2.0 +890 9 regularizer.weight 0.2718794861942981 +890 9 optimizer.lr 0.013762200269311079 +890 9 negative_sampler.num_negs_per_pos 56.0 +890 9 training.batch_size 0.0 +890 10 model.embedding_dim 1.0 +890 10 model.scoring_fct_norm 1.0 +890 10 regularizer.weight 0.1677927286754295 +890 10 optimizer.lr 0.015686401641824357 +890 10 negative_sampler.num_negs_per_pos 5.0 +890 10 training.batch_size 1.0 +890 11 model.embedding_dim 1.0 +890 11 model.scoring_fct_norm 2.0 +890 11 regularizer.weight 0.165763478284593 +890 11 optimizer.lr 0.07791724191211993 +890 11 negative_sampler.num_negs_per_pos 41.0 +890 11 training.batch_size 0.0 +890 12 model.embedding_dim 1.0 +890 12 model.scoring_fct_norm 1.0 +890 12 regularizer.weight 0.12083541555591809 +890 12 optimizer.lr 0.002120949133697388 +890 12 negative_sampler.num_negs_per_pos 45.0 +890 12 training.batch_size 2.0 +890 13 model.embedding_dim 2.0 +890 13 model.scoring_fct_norm 2.0 +890 13 regularizer.weight 0.011846156497210993 +890 13 optimizer.lr 0.001393329407203958 +890 13 negative_sampler.num_negs_per_pos 39.0 +890 13 training.batch_size 2.0 +890 14 model.embedding_dim 2.0 +890 14 model.scoring_fct_norm 1.0 +890 14 regularizer.weight 0.12662170361279873 +890 14 optimizer.lr 0.0027759886989578717 +890 14 negative_sampler.num_negs_per_pos 6.0 +890 14 training.batch_size 0.0 +890 15 model.embedding_dim 0.0 +890 15 model.scoring_fct_norm 2.0 +890 15 regularizer.weight 0.050844720566946666 +890 15 optimizer.lr 0.003049875213177658 +890 15 negative_sampler.num_negs_per_pos 76.0 +890 15 training.batch_size 1.0 +890 16 model.embedding_dim 2.0 +890 16 model.scoring_fct_norm 2.0 +890 16 regularizer.weight 0.16459039897293268 +890 16 optimizer.lr 0.0030523782088928827 +890 16 negative_sampler.num_negs_per_pos 10.0 +890 16 training.batch_size 0.0 +890 17 model.embedding_dim 0.0 +890 17 model.scoring_fct_norm 1.0 +890 17 regularizer.weight 0.012157536991748178 +890 17 optimizer.lr 0.0784696164620108 +890 17 negative_sampler.num_negs_per_pos 13.0 +890 17 training.batch_size 1.0 +890 18 model.embedding_dim 1.0 +890 18 model.scoring_fct_norm 1.0 +890 18 regularizer.weight 0.01044516164124191 +890 18 optimizer.lr 0.03665329273424794 +890 18 negative_sampler.num_negs_per_pos 60.0 +890 18 training.batch_size 2.0 +890 19 model.embedding_dim 1.0 +890 19 model.scoring_fct_norm 2.0 +890 19 regularizer.weight 0.03448958326603377 +890 19 optimizer.lr 0.002710381602432957 +890 19 negative_sampler.num_negs_per_pos 58.0 +890 19 training.batch_size 2.0 +890 20 model.embedding_dim 0.0 +890 20 model.scoring_fct_norm 2.0 +890 20 regularizer.weight 0.13197046238551338 +890 20 optimizer.lr 0.005910097687135224 +890 20 negative_sampler.num_negs_per_pos 5.0 +890 20 training.batch_size 2.0 +890 21 model.embedding_dim 0.0 +890 21 model.scoring_fct_norm 1.0 +890 21 regularizer.weight 0.049522690573367535 +890 21 optimizer.lr 0.03309272736674465 +890 21 negative_sampler.num_negs_per_pos 92.0 +890 21 training.batch_size 1.0 +890 22 model.embedding_dim 2.0 +890 22 model.scoring_fct_norm 2.0 +890 22 regularizer.weight 0.061158529314779726 +890 22 optimizer.lr 0.012138816159521127 +890 22 negative_sampler.num_negs_per_pos 4.0 +890 22 training.batch_size 0.0 +890 23 model.embedding_dim 0.0 +890 23 model.scoring_fct_norm 2.0 +890 23 regularizer.weight 0.02458883183259495 +890 23 optimizer.lr 0.005212699635634724 +890 23 negative_sampler.num_negs_per_pos 72.0 +890 23 training.batch_size 1.0 +890 24 model.embedding_dim 2.0 +890 24 model.scoring_fct_norm 1.0 +890 24 regularizer.weight 0.010887456350791722 +890 24 optimizer.lr 0.0023630278089001085 +890 24 negative_sampler.num_negs_per_pos 94.0 +890 24 training.batch_size 1.0 +890 25 model.embedding_dim 1.0 +890 25 model.scoring_fct_norm 1.0 +890 25 regularizer.weight 0.021655083306340417 +890 25 optimizer.lr 0.007300492946608541 +890 25 negative_sampler.num_negs_per_pos 73.0 +890 25 training.batch_size 2.0 +890 26 model.embedding_dim 0.0 +890 26 model.scoring_fct_norm 1.0 +890 26 regularizer.weight 0.015288158006175298 +890 26 optimizer.lr 0.006469059347921961 +890 26 negative_sampler.num_negs_per_pos 97.0 +890 26 training.batch_size 1.0 +890 27 model.embedding_dim 1.0 +890 27 model.scoring_fct_norm 1.0 +890 27 regularizer.weight 0.04324084887344448 +890 27 optimizer.lr 0.0018386119943793386 +890 27 negative_sampler.num_negs_per_pos 1.0 +890 27 training.batch_size 0.0 +890 28 model.embedding_dim 2.0 +890 28 model.scoring_fct_norm 2.0 +890 28 regularizer.weight 0.10352530380237511 +890 28 optimizer.lr 0.0014607352006864279 +890 28 negative_sampler.num_negs_per_pos 47.0 +890 28 training.batch_size 1.0 +890 29 model.embedding_dim 0.0 +890 29 model.scoring_fct_norm 2.0 +890 29 regularizer.weight 0.09748631970435379 +890 29 optimizer.lr 0.018806781326824187 +890 29 negative_sampler.num_negs_per_pos 71.0 +890 29 training.batch_size 1.0 +890 30 model.embedding_dim 0.0 +890 30 model.scoring_fct_norm 2.0 +890 30 regularizer.weight 0.05300324445728298 +890 30 optimizer.lr 0.047272008398685526 +890 30 negative_sampler.num_negs_per_pos 65.0 +890 30 training.batch_size 2.0 +890 31 model.embedding_dim 2.0 +890 31 model.scoring_fct_norm 2.0 +890 31 regularizer.weight 0.019615935157807726 +890 31 optimizer.lr 0.009659541198584214 +890 31 negative_sampler.num_negs_per_pos 6.0 +890 31 training.batch_size 0.0 +890 32 model.embedding_dim 0.0 +890 32 model.scoring_fct_norm 2.0 +890 32 regularizer.weight 0.13887325775881174 +890 32 optimizer.lr 0.015482127892476362 +890 32 negative_sampler.num_negs_per_pos 6.0 +890 32 training.batch_size 2.0 +890 33 model.embedding_dim 1.0 +890 33 model.scoring_fct_norm 2.0 +890 33 regularizer.weight 0.0348977919773796 +890 33 optimizer.lr 0.028731068217010717 +890 33 negative_sampler.num_negs_per_pos 67.0 +890 33 training.batch_size 0.0 +890 34 model.embedding_dim 0.0 +890 34 model.scoring_fct_norm 1.0 +890 34 regularizer.weight 0.015466345939599925 +890 34 optimizer.lr 0.07897189954311921 +890 34 negative_sampler.num_negs_per_pos 16.0 +890 34 training.batch_size 2.0 +890 35 model.embedding_dim 1.0 +890 35 model.scoring_fct_norm 1.0 +890 35 regularizer.weight 0.19089314573352267 +890 35 optimizer.lr 0.02227854504600624 +890 35 negative_sampler.num_negs_per_pos 94.0 +890 35 training.batch_size 0.0 +890 36 model.embedding_dim 2.0 +890 36 model.scoring_fct_norm 1.0 +890 36 regularizer.weight 0.2894114162802457 +890 36 optimizer.lr 0.03135415234311126 +890 36 negative_sampler.num_negs_per_pos 99.0 +890 36 training.batch_size 2.0 +890 37 model.embedding_dim 1.0 +890 37 model.scoring_fct_norm 1.0 +890 37 regularizer.weight 0.16519933481208968 +890 37 optimizer.lr 0.0036074391306876986 +890 37 negative_sampler.num_negs_per_pos 67.0 +890 37 training.batch_size 0.0 +890 38 model.embedding_dim 1.0 +890 38 model.scoring_fct_norm 1.0 +890 38 regularizer.weight 0.08714425489387859 +890 38 optimizer.lr 0.001216126050093374 +890 38 negative_sampler.num_negs_per_pos 4.0 +890 38 training.batch_size 2.0 +890 39 model.embedding_dim 1.0 +890 39 model.scoring_fct_norm 1.0 +890 39 regularizer.weight 0.1119524337989931 +890 39 optimizer.lr 0.032892574967186235 +890 39 negative_sampler.num_negs_per_pos 33.0 +890 39 training.batch_size 1.0 +890 40 model.embedding_dim 1.0 +890 40 model.scoring_fct_norm 2.0 +890 40 regularizer.weight 0.054310507641548295 +890 40 optimizer.lr 0.010007728671152434 +890 40 negative_sampler.num_negs_per_pos 30.0 +890 40 training.batch_size 2.0 +890 41 model.embedding_dim 1.0 +890 41 model.scoring_fct_norm 1.0 +890 41 regularizer.weight 0.03536740131069389 +890 41 optimizer.lr 0.0320491477439505 +890 41 negative_sampler.num_negs_per_pos 99.0 +890 41 training.batch_size 1.0 +890 42 model.embedding_dim 2.0 +890 42 model.scoring_fct_norm 2.0 +890 42 regularizer.weight 0.07635439637832167 +890 42 optimizer.lr 0.0012292966450042867 +890 42 negative_sampler.num_negs_per_pos 56.0 +890 42 training.batch_size 1.0 +890 43 model.embedding_dim 2.0 +890 43 model.scoring_fct_norm 2.0 +890 43 regularizer.weight 0.13906919716814772 +890 43 optimizer.lr 0.015115558140685307 +890 43 negative_sampler.num_negs_per_pos 33.0 +890 43 training.batch_size 1.0 +890 44 model.embedding_dim 0.0 +890 44 model.scoring_fct_norm 1.0 +890 44 regularizer.weight 0.043112710472313284 +890 44 optimizer.lr 0.06526786032522165 +890 44 negative_sampler.num_negs_per_pos 83.0 +890 44 training.batch_size 1.0 +890 45 model.embedding_dim 1.0 +890 45 model.scoring_fct_norm 1.0 +890 45 regularizer.weight 0.2670209469749279 +890 45 optimizer.lr 0.019380702610936055 +890 45 negative_sampler.num_negs_per_pos 36.0 +890 45 training.batch_size 1.0 +890 46 model.embedding_dim 1.0 +890 46 model.scoring_fct_norm 2.0 +890 46 regularizer.weight 0.03829420760624111 +890 46 optimizer.lr 0.0017697445196285012 +890 46 negative_sampler.num_negs_per_pos 8.0 +890 46 training.batch_size 1.0 +890 47 model.embedding_dim 0.0 +890 47 model.scoring_fct_norm 2.0 +890 47 regularizer.weight 0.01150174830869056 +890 47 optimizer.lr 0.0016769095384661895 +890 47 negative_sampler.num_negs_per_pos 99.0 +890 47 training.batch_size 2.0 +890 48 model.embedding_dim 2.0 +890 48 model.scoring_fct_norm 1.0 +890 48 regularizer.weight 0.048015658008857515 +890 48 optimizer.lr 0.007459473104309735 +890 48 negative_sampler.num_negs_per_pos 75.0 +890 48 training.batch_size 0.0 +890 49 model.embedding_dim 0.0 +890 49 model.scoring_fct_norm 1.0 +890 49 regularizer.weight 0.2675752626817419 +890 49 optimizer.lr 0.0028620825346051732 +890 49 negative_sampler.num_negs_per_pos 67.0 +890 49 training.batch_size 0.0 +890 50 model.embedding_dim 0.0 +890 50 model.scoring_fct_norm 2.0 +890 50 regularizer.weight 0.04884879687981793 +890 50 optimizer.lr 0.00631814384585189 +890 50 negative_sampler.num_negs_per_pos 40.0 +890 50 training.batch_size 0.0 +890 51 model.embedding_dim 0.0 +890 51 model.scoring_fct_norm 2.0 +890 51 regularizer.weight 0.012034094776530667 +890 51 optimizer.lr 0.009453940223859902 +890 51 negative_sampler.num_negs_per_pos 11.0 +890 51 training.batch_size 1.0 +890 52 model.embedding_dim 1.0 +890 52 model.scoring_fct_norm 2.0 +890 52 regularizer.weight 0.041977404502161544 +890 52 optimizer.lr 0.005101244402133726 +890 52 negative_sampler.num_negs_per_pos 4.0 +890 52 training.batch_size 1.0 +890 53 model.embedding_dim 1.0 +890 53 model.scoring_fct_norm 1.0 +890 53 regularizer.weight 0.05858542695540009 +890 53 optimizer.lr 0.013860649806025967 +890 53 negative_sampler.num_negs_per_pos 74.0 +890 53 training.batch_size 0.0 +890 54 model.embedding_dim 0.0 +890 54 model.scoring_fct_norm 2.0 +890 54 regularizer.weight 0.023513643705142164 +890 54 optimizer.lr 0.07506421824863237 +890 54 negative_sampler.num_negs_per_pos 96.0 +890 54 training.batch_size 2.0 +890 55 model.embedding_dim 0.0 +890 55 model.scoring_fct_norm 2.0 +890 55 regularizer.weight 0.0185104942582024 +890 55 optimizer.lr 0.047460461812210065 +890 55 negative_sampler.num_negs_per_pos 94.0 +890 55 training.batch_size 2.0 +890 56 model.embedding_dim 1.0 +890 56 model.scoring_fct_norm 1.0 +890 56 regularizer.weight 0.023828845841180748 +890 56 optimizer.lr 0.0029772071820839113 +890 56 negative_sampler.num_negs_per_pos 41.0 +890 56 training.batch_size 0.0 +890 57 model.embedding_dim 2.0 +890 57 model.scoring_fct_norm 2.0 +890 57 regularizer.weight 0.12876369476566457 +890 57 optimizer.lr 0.07306970006848994 +890 57 negative_sampler.num_negs_per_pos 52.0 +890 57 training.batch_size 0.0 +890 58 model.embedding_dim 0.0 +890 58 model.scoring_fct_norm 1.0 +890 58 regularizer.weight 0.23128778993479704 +890 58 optimizer.lr 0.001837282958652319 +890 58 negative_sampler.num_negs_per_pos 59.0 +890 58 training.batch_size 2.0 +890 59 model.embedding_dim 2.0 +890 59 model.scoring_fct_norm 2.0 +890 59 regularizer.weight 0.010603077237984267 +890 59 optimizer.lr 0.020942298141360987 +890 59 negative_sampler.num_negs_per_pos 32.0 +890 59 training.batch_size 0.0 +890 60 model.embedding_dim 1.0 +890 60 model.scoring_fct_norm 2.0 +890 60 regularizer.weight 0.03830979653366677 +890 60 optimizer.lr 0.019792389437427605 +890 60 negative_sampler.num_negs_per_pos 92.0 +890 60 training.batch_size 1.0 +890 61 model.embedding_dim 2.0 +890 61 model.scoring_fct_norm 1.0 +890 61 regularizer.weight 0.0259512797573209 +890 61 optimizer.lr 0.058965829102768694 +890 61 negative_sampler.num_negs_per_pos 69.0 +890 61 training.batch_size 2.0 +890 62 model.embedding_dim 0.0 +890 62 model.scoring_fct_norm 2.0 +890 62 regularizer.weight 0.24264239345418817 +890 62 optimizer.lr 0.0016746276265024956 +890 62 negative_sampler.num_negs_per_pos 30.0 +890 62 training.batch_size 0.0 +890 63 model.embedding_dim 0.0 +890 63 model.scoring_fct_norm 2.0 +890 63 regularizer.weight 0.13541552033691936 +890 63 optimizer.lr 0.04217425863718466 +890 63 negative_sampler.num_negs_per_pos 92.0 +890 63 training.batch_size 2.0 +890 64 model.embedding_dim 2.0 +890 64 model.scoring_fct_norm 2.0 +890 64 regularizer.weight 0.013993564946393021 +890 64 optimizer.lr 0.00613347570770899 +890 64 negative_sampler.num_negs_per_pos 12.0 +890 64 training.batch_size 0.0 +890 65 model.embedding_dim 2.0 +890 65 model.scoring_fct_norm 2.0 +890 65 regularizer.weight 0.046540685131859716 +890 65 optimizer.lr 0.0015419132196828964 +890 65 negative_sampler.num_negs_per_pos 29.0 +890 65 training.batch_size 0.0 +890 66 model.embedding_dim 0.0 +890 66 model.scoring_fct_norm 1.0 +890 66 regularizer.weight 0.017348312662886195 +890 66 optimizer.lr 0.006917152963298458 +890 66 negative_sampler.num_negs_per_pos 90.0 +890 66 training.batch_size 1.0 +890 67 model.embedding_dim 0.0 +890 67 model.scoring_fct_norm 1.0 +890 67 regularizer.weight 0.010401285389543114 +890 67 optimizer.lr 0.00441907336277229 +890 67 negative_sampler.num_negs_per_pos 79.0 +890 67 training.batch_size 1.0 +890 68 model.embedding_dim 1.0 +890 68 model.scoring_fct_norm 1.0 +890 68 regularizer.weight 0.07251179105687997 +890 68 optimizer.lr 0.03906571849643129 +890 68 negative_sampler.num_negs_per_pos 11.0 +890 68 training.batch_size 0.0 +890 69 model.embedding_dim 2.0 +890 69 model.scoring_fct_norm 2.0 +890 69 regularizer.weight 0.01317304058513565 +890 69 optimizer.lr 0.0070839768518979164 +890 69 negative_sampler.num_negs_per_pos 36.0 +890 69 training.batch_size 0.0 +890 70 model.embedding_dim 0.0 +890 70 model.scoring_fct_norm 1.0 +890 70 regularizer.weight 0.016504187010058635 +890 70 optimizer.lr 0.0012010404162171992 +890 70 negative_sampler.num_negs_per_pos 7.0 +890 70 training.batch_size 1.0 +890 71 model.embedding_dim 2.0 +890 71 model.scoring_fct_norm 2.0 +890 71 regularizer.weight 0.03184209721062971 +890 71 optimizer.lr 0.02689473920542102 +890 71 negative_sampler.num_negs_per_pos 90.0 +890 71 training.batch_size 0.0 +890 72 model.embedding_dim 1.0 +890 72 model.scoring_fct_norm 1.0 +890 72 regularizer.weight 0.05888199256626166 +890 72 optimizer.lr 0.004017771698589908 +890 72 negative_sampler.num_negs_per_pos 52.0 +890 72 training.batch_size 0.0 +890 73 model.embedding_dim 0.0 +890 73 model.scoring_fct_norm 1.0 +890 73 regularizer.weight 0.039419691498101844 +890 73 optimizer.lr 0.0014369183373532522 +890 73 negative_sampler.num_negs_per_pos 13.0 +890 73 training.batch_size 0.0 +890 74 model.embedding_dim 0.0 +890 74 model.scoring_fct_norm 1.0 +890 74 regularizer.weight 0.016354373891657437 +890 74 optimizer.lr 0.004622900656633995 +890 74 negative_sampler.num_negs_per_pos 63.0 +890 74 training.batch_size 0.0 +890 75 model.embedding_dim 1.0 +890 75 model.scoring_fct_norm 2.0 +890 75 regularizer.weight 0.13860512642102546 +890 75 optimizer.lr 0.005494031535472014 +890 75 negative_sampler.num_negs_per_pos 40.0 +890 75 training.batch_size 1.0 +890 76 model.embedding_dim 0.0 +890 76 model.scoring_fct_norm 2.0 +890 76 regularizer.weight 0.011131196911785175 +890 76 optimizer.lr 0.014680363482507088 +890 76 negative_sampler.num_negs_per_pos 70.0 +890 76 training.batch_size 1.0 +890 77 model.embedding_dim 2.0 +890 77 model.scoring_fct_norm 2.0 +890 77 regularizer.weight 0.045697012691116166 +890 77 optimizer.lr 0.00566760288244566 +890 77 negative_sampler.num_negs_per_pos 70.0 +890 77 training.batch_size 1.0 +890 78 model.embedding_dim 1.0 +890 78 model.scoring_fct_norm 2.0 +890 78 regularizer.weight 0.0736490396593075 +890 78 optimizer.lr 0.0650701511173286 +890 78 negative_sampler.num_negs_per_pos 89.0 +890 78 training.batch_size 1.0 +890 79 model.embedding_dim 2.0 +890 79 model.scoring_fct_norm 2.0 +890 79 regularizer.weight 0.12397043053894845 +890 79 optimizer.lr 0.03384129668701058 +890 79 negative_sampler.num_negs_per_pos 58.0 +890 79 training.batch_size 1.0 +890 80 model.embedding_dim 0.0 +890 80 model.scoring_fct_norm 1.0 +890 80 regularizer.weight 0.07386034644892037 +890 80 optimizer.lr 0.0041446699814079195 +890 80 negative_sampler.num_negs_per_pos 21.0 +890 80 training.batch_size 1.0 +890 81 model.embedding_dim 2.0 +890 81 model.scoring_fct_norm 1.0 +890 81 regularizer.weight 0.06047243752054215 +890 81 optimizer.lr 0.001136610540741753 +890 81 negative_sampler.num_negs_per_pos 11.0 +890 81 training.batch_size 0.0 +890 82 model.embedding_dim 0.0 +890 82 model.scoring_fct_norm 2.0 +890 82 regularizer.weight 0.015107517708191541 +890 82 optimizer.lr 0.009379553725051504 +890 82 negative_sampler.num_negs_per_pos 75.0 +890 82 training.batch_size 2.0 +890 83 model.embedding_dim 0.0 +890 83 model.scoring_fct_norm 2.0 +890 83 regularizer.weight 0.190198373237579 +890 83 optimizer.lr 0.0013555914196756002 +890 83 negative_sampler.num_negs_per_pos 19.0 +890 83 training.batch_size 2.0 +890 84 model.embedding_dim 0.0 +890 84 model.scoring_fct_norm 1.0 +890 84 regularizer.weight 0.022014526188492974 +890 84 optimizer.lr 0.0025902640672236187 +890 84 negative_sampler.num_negs_per_pos 34.0 +890 84 training.batch_size 0.0 +890 85 model.embedding_dim 2.0 +890 85 model.scoring_fct_norm 2.0 +890 85 regularizer.weight 0.28073175531726813 +890 85 optimizer.lr 0.03479090977702613 +890 85 negative_sampler.num_negs_per_pos 11.0 +890 85 training.batch_size 1.0 +890 86 model.embedding_dim 2.0 +890 86 model.scoring_fct_norm 1.0 +890 86 regularizer.weight 0.01943667141591046 +890 86 optimizer.lr 0.009164814154904197 +890 86 negative_sampler.num_negs_per_pos 53.0 +890 86 training.batch_size 1.0 +890 87 model.embedding_dim 2.0 +890 87 model.scoring_fct_norm 2.0 +890 87 regularizer.weight 0.0103931340036443 +890 87 optimizer.lr 0.015464218279421576 +890 87 negative_sampler.num_negs_per_pos 91.0 +890 87 training.batch_size 0.0 +890 88 model.embedding_dim 0.0 +890 88 model.scoring_fct_norm 2.0 +890 88 regularizer.weight 0.010803336422890523 +890 88 optimizer.lr 0.003596221610280119 +890 88 negative_sampler.num_negs_per_pos 76.0 +890 88 training.batch_size 1.0 +890 89 model.embedding_dim 0.0 +890 89 model.scoring_fct_norm 2.0 +890 89 regularizer.weight 0.01905221638327786 +890 89 optimizer.lr 0.0010873712009858818 +890 89 negative_sampler.num_negs_per_pos 34.0 +890 89 training.batch_size 1.0 +890 90 model.embedding_dim 2.0 +890 90 model.scoring_fct_norm 1.0 +890 90 regularizer.weight 0.04708704729045817 +890 90 optimizer.lr 0.01517663948575903 +890 90 negative_sampler.num_negs_per_pos 38.0 +890 90 training.batch_size 2.0 +890 91 model.embedding_dim 2.0 +890 91 model.scoring_fct_norm 2.0 +890 91 regularizer.weight 0.295147551384261 +890 91 optimizer.lr 0.001269930744430539 +890 91 negative_sampler.num_negs_per_pos 61.0 +890 91 training.batch_size 2.0 +890 92 model.embedding_dim 1.0 +890 92 model.scoring_fct_norm 2.0 +890 92 regularizer.weight 0.10688016252641734 +890 92 optimizer.lr 0.02899624792372128 +890 92 negative_sampler.num_negs_per_pos 41.0 +890 92 training.batch_size 0.0 +890 93 model.embedding_dim 2.0 +890 93 model.scoring_fct_norm 1.0 +890 93 regularizer.weight 0.1901465596606213 +890 93 optimizer.lr 0.005390569395934194 +890 93 negative_sampler.num_negs_per_pos 57.0 +890 93 training.batch_size 1.0 +890 94 model.embedding_dim 0.0 +890 94 model.scoring_fct_norm 1.0 +890 94 regularizer.weight 0.11272319728870418 +890 94 optimizer.lr 0.008831160982719018 +890 94 negative_sampler.num_negs_per_pos 87.0 +890 94 training.batch_size 2.0 +890 95 model.embedding_dim 1.0 +890 95 model.scoring_fct_norm 2.0 +890 95 regularizer.weight 0.03330611760139976 +890 95 optimizer.lr 0.06230713557427486 +890 95 negative_sampler.num_negs_per_pos 55.0 +890 95 training.batch_size 0.0 +890 96 model.embedding_dim 2.0 +890 96 model.scoring_fct_norm 1.0 +890 96 regularizer.weight 0.012791333915615492 +890 96 optimizer.lr 0.004212623901342408 +890 96 negative_sampler.num_negs_per_pos 46.0 +890 96 training.batch_size 0.0 +890 97 model.embedding_dim 1.0 +890 97 model.scoring_fct_norm 2.0 +890 97 regularizer.weight 0.05481227255041254 +890 97 optimizer.lr 0.02174649616744568 +890 97 negative_sampler.num_negs_per_pos 98.0 +890 97 training.batch_size 2.0 +890 98 model.embedding_dim 1.0 +890 98 model.scoring_fct_norm 1.0 +890 98 regularizer.weight 0.0696835786478234 +890 98 optimizer.lr 0.002437260132409155 +890 98 negative_sampler.num_negs_per_pos 38.0 +890 98 training.batch_size 2.0 +890 99 model.embedding_dim 0.0 +890 99 model.scoring_fct_norm 2.0 +890 99 regularizer.weight 0.1341511537746414 +890 99 optimizer.lr 0.001047629199237014 +890 99 negative_sampler.num_negs_per_pos 89.0 +890 99 training.batch_size 1.0 +890 100 model.embedding_dim 0.0 +890 100 model.scoring_fct_norm 1.0 +890 100 regularizer.weight 0.02344139256662046 +890 100 optimizer.lr 0.0019145689412542865 +890 100 negative_sampler.num_negs_per_pos 11.0 +890 100 training.batch_size 2.0 +890 1 dataset """wn18rr""" +890 1 model """transh""" +890 1 loss """softplus""" +890 1 regularizer """transh""" +890 1 optimizer """adam""" +890 1 training_loop """owa""" +890 1 negative_sampler """basic""" +890 1 evaluator """rankbased""" +890 2 dataset """wn18rr""" +890 2 model """transh""" +890 2 loss """softplus""" +890 2 regularizer """transh""" +890 2 optimizer """adam""" +890 2 training_loop """owa""" +890 2 negative_sampler """basic""" +890 2 evaluator """rankbased""" +890 3 dataset """wn18rr""" +890 3 model """transh""" +890 3 loss """softplus""" +890 3 regularizer """transh""" +890 3 optimizer """adam""" +890 3 training_loop """owa""" +890 3 negative_sampler """basic""" +890 3 evaluator """rankbased""" +890 4 dataset """wn18rr""" +890 4 model """transh""" +890 4 loss """softplus""" +890 4 regularizer """transh""" +890 4 optimizer """adam""" +890 4 training_loop """owa""" +890 4 negative_sampler """basic""" +890 4 evaluator """rankbased""" +890 5 dataset """wn18rr""" +890 5 model """transh""" +890 5 loss """softplus""" +890 5 regularizer """transh""" +890 5 optimizer """adam""" +890 5 training_loop """owa""" +890 5 negative_sampler """basic""" +890 5 evaluator """rankbased""" +890 6 dataset """wn18rr""" +890 6 model """transh""" +890 6 loss """softplus""" +890 6 regularizer """transh""" +890 6 optimizer """adam""" +890 6 training_loop """owa""" +890 6 negative_sampler """basic""" +890 6 evaluator """rankbased""" +890 7 dataset """wn18rr""" +890 7 model """transh""" +890 7 loss """softplus""" +890 7 regularizer """transh""" +890 7 optimizer """adam""" +890 7 training_loop """owa""" +890 7 negative_sampler """basic""" +890 7 evaluator """rankbased""" +890 8 dataset """wn18rr""" +890 8 model """transh""" +890 8 loss """softplus""" +890 8 regularizer """transh""" +890 8 optimizer """adam""" +890 8 training_loop """owa""" +890 8 negative_sampler """basic""" +890 8 evaluator """rankbased""" +890 9 dataset """wn18rr""" +890 9 model """transh""" +890 9 loss """softplus""" +890 9 regularizer """transh""" +890 9 optimizer """adam""" +890 9 training_loop """owa""" +890 9 negative_sampler """basic""" +890 9 evaluator """rankbased""" +890 10 dataset """wn18rr""" +890 10 model """transh""" +890 10 loss """softplus""" +890 10 regularizer """transh""" +890 10 optimizer """adam""" +890 10 training_loop """owa""" +890 10 negative_sampler """basic""" +890 10 evaluator """rankbased""" +890 11 dataset """wn18rr""" +890 11 model """transh""" +890 11 loss """softplus""" +890 11 regularizer """transh""" +890 11 optimizer """adam""" +890 11 training_loop """owa""" +890 11 negative_sampler """basic""" +890 11 evaluator """rankbased""" +890 12 dataset """wn18rr""" +890 12 model """transh""" +890 12 loss """softplus""" +890 12 regularizer """transh""" +890 12 optimizer """adam""" +890 12 training_loop """owa""" +890 12 negative_sampler """basic""" +890 12 evaluator """rankbased""" +890 13 dataset """wn18rr""" +890 13 model """transh""" +890 13 loss """softplus""" +890 13 regularizer """transh""" +890 13 optimizer """adam""" +890 13 training_loop """owa""" +890 13 negative_sampler """basic""" +890 13 evaluator """rankbased""" +890 14 dataset """wn18rr""" +890 14 model """transh""" +890 14 loss """softplus""" +890 14 regularizer """transh""" +890 14 optimizer """adam""" +890 14 training_loop """owa""" +890 14 negative_sampler """basic""" +890 14 evaluator """rankbased""" +890 15 dataset """wn18rr""" +890 15 model """transh""" +890 15 loss """softplus""" +890 15 regularizer """transh""" +890 15 optimizer """adam""" +890 15 training_loop """owa""" +890 15 negative_sampler """basic""" +890 15 evaluator """rankbased""" +890 16 dataset """wn18rr""" +890 16 model """transh""" +890 16 loss """softplus""" +890 16 regularizer """transh""" +890 16 optimizer """adam""" +890 16 training_loop """owa""" +890 16 negative_sampler """basic""" +890 16 evaluator """rankbased""" +890 17 dataset """wn18rr""" +890 17 model """transh""" +890 17 loss """softplus""" +890 17 regularizer """transh""" +890 17 optimizer """adam""" +890 17 training_loop """owa""" +890 17 negative_sampler """basic""" +890 17 evaluator """rankbased""" +890 18 dataset """wn18rr""" +890 18 model """transh""" +890 18 loss """softplus""" +890 18 regularizer """transh""" +890 18 optimizer """adam""" +890 18 training_loop """owa""" +890 18 negative_sampler """basic""" +890 18 evaluator """rankbased""" +890 19 dataset """wn18rr""" +890 19 model """transh""" +890 19 loss """softplus""" +890 19 regularizer """transh""" +890 19 optimizer """adam""" +890 19 training_loop """owa""" +890 19 negative_sampler """basic""" +890 19 evaluator """rankbased""" +890 20 dataset """wn18rr""" +890 20 model """transh""" +890 20 loss """softplus""" +890 20 regularizer """transh""" +890 20 optimizer """adam""" +890 20 training_loop """owa""" +890 20 negative_sampler """basic""" +890 20 evaluator """rankbased""" +890 21 dataset """wn18rr""" +890 21 model """transh""" +890 21 loss """softplus""" +890 21 regularizer """transh""" +890 21 optimizer """adam""" +890 21 training_loop """owa""" +890 21 negative_sampler """basic""" +890 21 evaluator """rankbased""" +890 22 dataset """wn18rr""" +890 22 model """transh""" +890 22 loss """softplus""" +890 22 regularizer """transh""" +890 22 optimizer """adam""" +890 22 training_loop """owa""" +890 22 negative_sampler """basic""" +890 22 evaluator """rankbased""" +890 23 dataset """wn18rr""" +890 23 model """transh""" +890 23 loss """softplus""" +890 23 regularizer """transh""" +890 23 optimizer """adam""" +890 23 training_loop """owa""" +890 23 negative_sampler """basic""" +890 23 evaluator """rankbased""" +890 24 dataset """wn18rr""" +890 24 model """transh""" +890 24 loss """softplus""" +890 24 regularizer """transh""" +890 24 optimizer """adam""" +890 24 training_loop """owa""" +890 24 negative_sampler """basic""" +890 24 evaluator """rankbased""" +890 25 dataset """wn18rr""" +890 25 model """transh""" +890 25 loss """softplus""" +890 25 regularizer """transh""" +890 25 optimizer """adam""" +890 25 training_loop """owa""" +890 25 negative_sampler """basic""" +890 25 evaluator """rankbased""" +890 26 dataset """wn18rr""" +890 26 model """transh""" +890 26 loss """softplus""" +890 26 regularizer """transh""" +890 26 optimizer """adam""" +890 26 training_loop """owa""" +890 26 negative_sampler """basic""" +890 26 evaluator """rankbased""" +890 27 dataset """wn18rr""" +890 27 model """transh""" +890 27 loss """softplus""" +890 27 regularizer """transh""" +890 27 optimizer """adam""" +890 27 training_loop """owa""" +890 27 negative_sampler """basic""" +890 27 evaluator """rankbased""" +890 28 dataset """wn18rr""" +890 28 model """transh""" +890 28 loss """softplus""" +890 28 regularizer """transh""" +890 28 optimizer """adam""" +890 28 training_loop """owa""" +890 28 negative_sampler """basic""" +890 28 evaluator """rankbased""" +890 29 dataset """wn18rr""" +890 29 model """transh""" +890 29 loss """softplus""" +890 29 regularizer """transh""" +890 29 optimizer """adam""" +890 29 training_loop """owa""" +890 29 negative_sampler """basic""" +890 29 evaluator """rankbased""" +890 30 dataset """wn18rr""" +890 30 model """transh""" +890 30 loss """softplus""" +890 30 regularizer """transh""" +890 30 optimizer """adam""" +890 30 training_loop """owa""" +890 30 negative_sampler """basic""" +890 30 evaluator """rankbased""" +890 31 dataset """wn18rr""" +890 31 model """transh""" +890 31 loss """softplus""" +890 31 regularizer """transh""" +890 31 optimizer """adam""" +890 31 training_loop """owa""" +890 31 negative_sampler """basic""" +890 31 evaluator """rankbased""" +890 32 dataset """wn18rr""" +890 32 model """transh""" +890 32 loss """softplus""" +890 32 regularizer """transh""" +890 32 optimizer """adam""" +890 32 training_loop """owa""" +890 32 negative_sampler """basic""" +890 32 evaluator """rankbased""" +890 33 dataset """wn18rr""" +890 33 model """transh""" +890 33 loss """softplus""" +890 33 regularizer """transh""" +890 33 optimizer """adam""" +890 33 training_loop """owa""" +890 33 negative_sampler """basic""" +890 33 evaluator """rankbased""" +890 34 dataset """wn18rr""" +890 34 model """transh""" +890 34 loss """softplus""" +890 34 regularizer """transh""" +890 34 optimizer """adam""" +890 34 training_loop """owa""" +890 34 negative_sampler """basic""" +890 34 evaluator """rankbased""" +890 35 dataset """wn18rr""" +890 35 model """transh""" +890 35 loss """softplus""" +890 35 regularizer """transh""" +890 35 optimizer """adam""" +890 35 training_loop """owa""" +890 35 negative_sampler """basic""" +890 35 evaluator """rankbased""" +890 36 dataset """wn18rr""" +890 36 model """transh""" +890 36 loss """softplus""" +890 36 regularizer """transh""" +890 36 optimizer """adam""" +890 36 training_loop """owa""" +890 36 negative_sampler """basic""" +890 36 evaluator """rankbased""" +890 37 dataset """wn18rr""" +890 37 model """transh""" +890 37 loss """softplus""" +890 37 regularizer """transh""" +890 37 optimizer """adam""" +890 37 training_loop """owa""" +890 37 negative_sampler """basic""" +890 37 evaluator """rankbased""" +890 38 dataset """wn18rr""" +890 38 model """transh""" +890 38 loss """softplus""" +890 38 regularizer """transh""" +890 38 optimizer """adam""" +890 38 training_loop """owa""" +890 38 negative_sampler """basic""" +890 38 evaluator """rankbased""" +890 39 dataset """wn18rr""" +890 39 model """transh""" +890 39 loss """softplus""" +890 39 regularizer """transh""" +890 39 optimizer """adam""" +890 39 training_loop """owa""" +890 39 negative_sampler """basic""" +890 39 evaluator """rankbased""" +890 40 dataset """wn18rr""" +890 40 model """transh""" +890 40 loss """softplus""" +890 40 regularizer """transh""" +890 40 optimizer """adam""" +890 40 training_loop """owa""" +890 40 negative_sampler """basic""" +890 40 evaluator """rankbased""" +890 41 dataset """wn18rr""" +890 41 model """transh""" +890 41 loss """softplus""" +890 41 regularizer """transh""" +890 41 optimizer """adam""" +890 41 training_loop """owa""" +890 41 negative_sampler """basic""" +890 41 evaluator """rankbased""" +890 42 dataset """wn18rr""" +890 42 model """transh""" +890 42 loss """softplus""" +890 42 regularizer """transh""" +890 42 optimizer """adam""" +890 42 training_loop """owa""" +890 42 negative_sampler """basic""" +890 42 evaluator """rankbased""" +890 43 dataset """wn18rr""" +890 43 model """transh""" +890 43 loss """softplus""" +890 43 regularizer """transh""" +890 43 optimizer """adam""" +890 43 training_loop """owa""" +890 43 negative_sampler """basic""" +890 43 evaluator """rankbased""" +890 44 dataset """wn18rr""" +890 44 model """transh""" +890 44 loss """softplus""" +890 44 regularizer """transh""" +890 44 optimizer """adam""" +890 44 training_loop """owa""" +890 44 negative_sampler """basic""" +890 44 evaluator """rankbased""" +890 45 dataset """wn18rr""" +890 45 model """transh""" +890 45 loss """softplus""" +890 45 regularizer """transh""" +890 45 optimizer """adam""" +890 45 training_loop """owa""" +890 45 negative_sampler """basic""" +890 45 evaluator """rankbased""" +890 46 dataset """wn18rr""" +890 46 model """transh""" +890 46 loss """softplus""" +890 46 regularizer """transh""" +890 46 optimizer """adam""" +890 46 training_loop """owa""" +890 46 negative_sampler """basic""" +890 46 evaluator """rankbased""" +890 47 dataset """wn18rr""" +890 47 model """transh""" +890 47 loss """softplus""" +890 47 regularizer """transh""" +890 47 optimizer """adam""" +890 47 training_loop """owa""" +890 47 negative_sampler """basic""" +890 47 evaluator """rankbased""" +890 48 dataset """wn18rr""" +890 48 model """transh""" +890 48 loss """softplus""" +890 48 regularizer """transh""" +890 48 optimizer """adam""" +890 48 training_loop """owa""" +890 48 negative_sampler """basic""" +890 48 evaluator """rankbased""" +890 49 dataset """wn18rr""" +890 49 model """transh""" +890 49 loss """softplus""" +890 49 regularizer """transh""" +890 49 optimizer """adam""" +890 49 training_loop """owa""" +890 49 negative_sampler """basic""" +890 49 evaluator """rankbased""" +890 50 dataset """wn18rr""" +890 50 model """transh""" +890 50 loss """softplus""" +890 50 regularizer """transh""" +890 50 optimizer """adam""" +890 50 training_loop """owa""" +890 50 negative_sampler """basic""" +890 50 evaluator """rankbased""" +890 51 dataset """wn18rr""" +890 51 model """transh""" +890 51 loss """softplus""" +890 51 regularizer """transh""" +890 51 optimizer """adam""" +890 51 training_loop """owa""" +890 51 negative_sampler """basic""" +890 51 evaluator """rankbased""" +890 52 dataset """wn18rr""" +890 52 model """transh""" +890 52 loss """softplus""" +890 52 regularizer """transh""" +890 52 optimizer """adam""" +890 52 training_loop """owa""" +890 52 negative_sampler """basic""" +890 52 evaluator """rankbased""" +890 53 dataset """wn18rr""" +890 53 model """transh""" +890 53 loss """softplus""" +890 53 regularizer """transh""" +890 53 optimizer """adam""" +890 53 training_loop """owa""" +890 53 negative_sampler """basic""" +890 53 evaluator """rankbased""" +890 54 dataset """wn18rr""" +890 54 model """transh""" +890 54 loss """softplus""" +890 54 regularizer """transh""" +890 54 optimizer """adam""" +890 54 training_loop """owa""" +890 54 negative_sampler """basic""" +890 54 evaluator """rankbased""" +890 55 dataset """wn18rr""" +890 55 model """transh""" +890 55 loss """softplus""" +890 55 regularizer """transh""" +890 55 optimizer """adam""" +890 55 training_loop """owa""" +890 55 negative_sampler """basic""" +890 55 evaluator """rankbased""" +890 56 dataset """wn18rr""" +890 56 model """transh""" +890 56 loss """softplus""" +890 56 regularizer """transh""" +890 56 optimizer """adam""" +890 56 training_loop """owa""" +890 56 negative_sampler """basic""" +890 56 evaluator """rankbased""" +890 57 dataset """wn18rr""" +890 57 model """transh""" +890 57 loss """softplus""" +890 57 regularizer """transh""" +890 57 optimizer """adam""" +890 57 training_loop """owa""" +890 57 negative_sampler """basic""" +890 57 evaluator """rankbased""" +890 58 dataset """wn18rr""" +890 58 model """transh""" +890 58 loss """softplus""" +890 58 regularizer """transh""" +890 58 optimizer """adam""" +890 58 training_loop """owa""" +890 58 negative_sampler """basic""" +890 58 evaluator """rankbased""" +890 59 dataset """wn18rr""" +890 59 model """transh""" +890 59 loss """softplus""" +890 59 regularizer """transh""" +890 59 optimizer """adam""" +890 59 training_loop """owa""" +890 59 negative_sampler """basic""" +890 59 evaluator """rankbased""" +890 60 dataset """wn18rr""" +890 60 model """transh""" +890 60 loss """softplus""" +890 60 regularizer """transh""" +890 60 optimizer """adam""" +890 60 training_loop """owa""" +890 60 negative_sampler """basic""" +890 60 evaluator """rankbased""" +890 61 dataset """wn18rr""" +890 61 model """transh""" +890 61 loss """softplus""" +890 61 regularizer """transh""" +890 61 optimizer """adam""" +890 61 training_loop """owa""" +890 61 negative_sampler """basic""" +890 61 evaluator """rankbased""" +890 62 dataset """wn18rr""" +890 62 model """transh""" +890 62 loss """softplus""" +890 62 regularizer """transh""" +890 62 optimizer """adam""" +890 62 training_loop """owa""" +890 62 negative_sampler """basic""" +890 62 evaluator """rankbased""" +890 63 dataset """wn18rr""" +890 63 model """transh""" +890 63 loss """softplus""" +890 63 regularizer """transh""" +890 63 optimizer """adam""" +890 63 training_loop """owa""" +890 63 negative_sampler """basic""" +890 63 evaluator """rankbased""" +890 64 dataset """wn18rr""" +890 64 model """transh""" +890 64 loss """softplus""" +890 64 regularizer """transh""" +890 64 optimizer """adam""" +890 64 training_loop """owa""" +890 64 negative_sampler """basic""" +890 64 evaluator """rankbased""" +890 65 dataset """wn18rr""" +890 65 model """transh""" +890 65 loss """softplus""" +890 65 regularizer """transh""" +890 65 optimizer """adam""" +890 65 training_loop """owa""" +890 65 negative_sampler """basic""" +890 65 evaluator """rankbased""" +890 66 dataset """wn18rr""" +890 66 model """transh""" +890 66 loss """softplus""" +890 66 regularizer """transh""" +890 66 optimizer """adam""" +890 66 training_loop """owa""" +890 66 negative_sampler """basic""" +890 66 evaluator """rankbased""" +890 67 dataset """wn18rr""" +890 67 model """transh""" +890 67 loss """softplus""" +890 67 regularizer """transh""" +890 67 optimizer """adam""" +890 67 training_loop """owa""" +890 67 negative_sampler """basic""" +890 67 evaluator """rankbased""" +890 68 dataset """wn18rr""" +890 68 model """transh""" +890 68 loss """softplus""" +890 68 regularizer """transh""" +890 68 optimizer """adam""" +890 68 training_loop """owa""" +890 68 negative_sampler """basic""" +890 68 evaluator """rankbased""" +890 69 dataset """wn18rr""" +890 69 model """transh""" +890 69 loss """softplus""" +890 69 regularizer """transh""" +890 69 optimizer """adam""" +890 69 training_loop """owa""" +890 69 negative_sampler """basic""" +890 69 evaluator """rankbased""" +890 70 dataset """wn18rr""" +890 70 model """transh""" +890 70 loss """softplus""" +890 70 regularizer """transh""" +890 70 optimizer """adam""" +890 70 training_loop """owa""" +890 70 negative_sampler """basic""" +890 70 evaluator """rankbased""" +890 71 dataset """wn18rr""" +890 71 model """transh""" +890 71 loss """softplus""" +890 71 regularizer """transh""" +890 71 optimizer """adam""" +890 71 training_loop """owa""" +890 71 negative_sampler """basic""" +890 71 evaluator """rankbased""" +890 72 dataset """wn18rr""" +890 72 model """transh""" +890 72 loss """softplus""" +890 72 regularizer """transh""" +890 72 optimizer """adam""" +890 72 training_loop """owa""" +890 72 negative_sampler """basic""" +890 72 evaluator """rankbased""" +890 73 dataset """wn18rr""" +890 73 model """transh""" +890 73 loss """softplus""" +890 73 regularizer """transh""" +890 73 optimizer """adam""" +890 73 training_loop """owa""" +890 73 negative_sampler """basic""" +890 73 evaluator """rankbased""" +890 74 dataset """wn18rr""" +890 74 model """transh""" +890 74 loss """softplus""" +890 74 regularizer """transh""" +890 74 optimizer """adam""" +890 74 training_loop """owa""" +890 74 negative_sampler """basic""" +890 74 evaluator """rankbased""" +890 75 dataset """wn18rr""" +890 75 model """transh""" +890 75 loss """softplus""" +890 75 regularizer """transh""" +890 75 optimizer """adam""" +890 75 training_loop """owa""" +890 75 negative_sampler """basic""" +890 75 evaluator """rankbased""" +890 76 dataset """wn18rr""" +890 76 model """transh""" +890 76 loss """softplus""" +890 76 regularizer """transh""" +890 76 optimizer """adam""" +890 76 training_loop """owa""" +890 76 negative_sampler """basic""" +890 76 evaluator """rankbased""" +890 77 dataset """wn18rr""" +890 77 model """transh""" +890 77 loss """softplus""" +890 77 regularizer """transh""" +890 77 optimizer """adam""" +890 77 training_loop """owa""" +890 77 negative_sampler """basic""" +890 77 evaluator """rankbased""" +890 78 dataset """wn18rr""" +890 78 model """transh""" +890 78 loss """softplus""" +890 78 regularizer """transh""" +890 78 optimizer """adam""" +890 78 training_loop """owa""" +890 78 negative_sampler """basic""" +890 78 evaluator """rankbased""" +890 79 dataset """wn18rr""" +890 79 model """transh""" +890 79 loss """softplus""" +890 79 regularizer """transh""" +890 79 optimizer """adam""" +890 79 training_loop """owa""" +890 79 negative_sampler """basic""" +890 79 evaluator """rankbased""" +890 80 dataset """wn18rr""" +890 80 model """transh""" +890 80 loss """softplus""" +890 80 regularizer """transh""" +890 80 optimizer """adam""" +890 80 training_loop """owa""" +890 80 negative_sampler """basic""" +890 80 evaluator """rankbased""" +890 81 dataset """wn18rr""" +890 81 model """transh""" +890 81 loss """softplus""" +890 81 regularizer """transh""" +890 81 optimizer """adam""" +890 81 training_loop """owa""" +890 81 negative_sampler """basic""" +890 81 evaluator """rankbased""" +890 82 dataset """wn18rr""" +890 82 model """transh""" +890 82 loss """softplus""" +890 82 regularizer """transh""" +890 82 optimizer """adam""" +890 82 training_loop """owa""" +890 82 negative_sampler """basic""" +890 82 evaluator """rankbased""" +890 83 dataset """wn18rr""" +890 83 model """transh""" +890 83 loss """softplus""" +890 83 regularizer """transh""" +890 83 optimizer """adam""" +890 83 training_loop """owa""" +890 83 negative_sampler """basic""" +890 83 evaluator """rankbased""" +890 84 dataset """wn18rr""" +890 84 model """transh""" +890 84 loss """softplus""" +890 84 regularizer """transh""" +890 84 optimizer """adam""" +890 84 training_loop """owa""" +890 84 negative_sampler """basic""" +890 84 evaluator """rankbased""" +890 85 dataset """wn18rr""" +890 85 model """transh""" +890 85 loss """softplus""" +890 85 regularizer """transh""" +890 85 optimizer """adam""" +890 85 training_loop """owa""" +890 85 negative_sampler """basic""" +890 85 evaluator """rankbased""" +890 86 dataset """wn18rr""" +890 86 model """transh""" +890 86 loss """softplus""" +890 86 regularizer """transh""" +890 86 optimizer """adam""" +890 86 training_loop """owa""" +890 86 negative_sampler """basic""" +890 86 evaluator """rankbased""" +890 87 dataset """wn18rr""" +890 87 model """transh""" +890 87 loss """softplus""" +890 87 regularizer """transh""" +890 87 optimizer """adam""" +890 87 training_loop """owa""" +890 87 negative_sampler """basic""" +890 87 evaluator """rankbased""" +890 88 dataset """wn18rr""" +890 88 model """transh""" +890 88 loss """softplus""" +890 88 regularizer """transh""" +890 88 optimizer """adam""" +890 88 training_loop """owa""" +890 88 negative_sampler """basic""" +890 88 evaluator """rankbased""" +890 89 dataset """wn18rr""" +890 89 model """transh""" +890 89 loss """softplus""" +890 89 regularizer """transh""" +890 89 optimizer """adam""" +890 89 training_loop """owa""" +890 89 negative_sampler """basic""" +890 89 evaluator """rankbased""" +890 90 dataset """wn18rr""" +890 90 model """transh""" +890 90 loss """softplus""" +890 90 regularizer """transh""" +890 90 optimizer """adam""" +890 90 training_loop """owa""" +890 90 negative_sampler """basic""" +890 90 evaluator """rankbased""" +890 91 dataset """wn18rr""" +890 91 model """transh""" +890 91 loss """softplus""" +890 91 regularizer """transh""" +890 91 optimizer """adam""" +890 91 training_loop """owa""" +890 91 negative_sampler """basic""" +890 91 evaluator """rankbased""" +890 92 dataset """wn18rr""" +890 92 model """transh""" +890 92 loss """softplus""" +890 92 regularizer """transh""" +890 92 optimizer """adam""" +890 92 training_loop """owa""" +890 92 negative_sampler """basic""" +890 92 evaluator """rankbased""" +890 93 dataset """wn18rr""" +890 93 model """transh""" +890 93 loss """softplus""" +890 93 regularizer """transh""" +890 93 optimizer """adam""" +890 93 training_loop """owa""" +890 93 negative_sampler """basic""" +890 93 evaluator """rankbased""" +890 94 dataset """wn18rr""" +890 94 model """transh""" +890 94 loss """softplus""" +890 94 regularizer """transh""" +890 94 optimizer """adam""" +890 94 training_loop """owa""" +890 94 negative_sampler """basic""" +890 94 evaluator """rankbased""" +890 95 dataset """wn18rr""" +890 95 model """transh""" +890 95 loss """softplus""" +890 95 regularizer """transh""" +890 95 optimizer """adam""" +890 95 training_loop """owa""" +890 95 negative_sampler """basic""" +890 95 evaluator """rankbased""" +890 96 dataset """wn18rr""" +890 96 model """transh""" +890 96 loss """softplus""" +890 96 regularizer """transh""" +890 96 optimizer """adam""" +890 96 training_loop """owa""" +890 96 negative_sampler """basic""" +890 96 evaluator """rankbased""" +890 97 dataset """wn18rr""" +890 97 model """transh""" +890 97 loss """softplus""" +890 97 regularizer """transh""" +890 97 optimizer """adam""" +890 97 training_loop """owa""" +890 97 negative_sampler """basic""" +890 97 evaluator """rankbased""" +890 98 dataset """wn18rr""" +890 98 model """transh""" +890 98 loss """softplus""" +890 98 regularizer """transh""" +890 98 optimizer """adam""" +890 98 training_loop """owa""" +890 98 negative_sampler """basic""" +890 98 evaluator """rankbased""" +890 99 dataset """wn18rr""" +890 99 model """transh""" +890 99 loss """softplus""" +890 99 regularizer """transh""" +890 99 optimizer """adam""" +890 99 training_loop """owa""" +890 99 negative_sampler """basic""" +890 99 evaluator """rankbased""" +890 100 dataset """wn18rr""" +890 100 model """transh""" +890 100 loss """softplus""" +890 100 regularizer """transh""" +890 100 optimizer """adam""" +890 100 training_loop """owa""" +890 100 negative_sampler """basic""" +890 100 evaluator """rankbased""" +891 1 model.embedding_dim 0.0 +891 1 model.scoring_fct_norm 2.0 +891 1 regularizer.weight 0.0466772095465203 +891 1 optimizer.lr 0.015407165078050946 +891 1 training.batch_size 0.0 +891 1 training.label_smoothing 0.036112997593836335 +891 2 model.embedding_dim 1.0 +891 2 model.scoring_fct_norm 2.0 +891 2 regularizer.weight 0.033094262745982456 +891 2 optimizer.lr 0.07390653513917153 +891 2 training.batch_size 0.0 +891 2 training.label_smoothing 0.0012288676166844576 +891 3 model.embedding_dim 2.0 +891 3 model.scoring_fct_norm 2.0 +891 3 regularizer.weight 0.010747734538069374 +891 3 optimizer.lr 0.015376550756355942 +891 3 training.batch_size 1.0 +891 3 training.label_smoothing 0.20332918423481125 +891 4 model.embedding_dim 0.0 +891 4 model.scoring_fct_norm 2.0 +891 4 regularizer.weight 0.10010215571946872 +891 4 optimizer.lr 0.0038434547869519555 +891 4 training.batch_size 2.0 +891 4 training.label_smoothing 0.8553126782235747 +891 5 model.embedding_dim 2.0 +891 5 model.scoring_fct_norm 1.0 +891 5 regularizer.weight 0.011509996038121091 +891 5 optimizer.lr 0.0010426966213225764 +891 5 training.batch_size 2.0 +891 5 training.label_smoothing 0.5388269254518827 +891 1 dataset """wn18rr""" +891 1 model """transh""" +891 1 loss """softplus""" +891 1 regularizer """transh""" +891 1 optimizer """adam""" +891 1 training_loop """lcwa""" +891 1 evaluator """rankbased""" +891 2 dataset """wn18rr""" +891 2 model """transh""" +891 2 loss """softplus""" +891 2 regularizer """transh""" +891 2 optimizer """adam""" +891 2 training_loop """lcwa""" +891 2 evaluator """rankbased""" +891 3 dataset """wn18rr""" +891 3 model """transh""" +891 3 loss """softplus""" +891 3 regularizer """transh""" +891 3 optimizer """adam""" +891 3 training_loop """lcwa""" +891 3 evaluator """rankbased""" +891 4 dataset """wn18rr""" +891 4 model """transh""" +891 4 loss """softplus""" +891 4 regularizer """transh""" +891 4 optimizer """adam""" +891 4 training_loop """lcwa""" +891 4 evaluator """rankbased""" +891 5 dataset """wn18rr""" +891 5 model """transh""" +891 5 loss """softplus""" +891 5 regularizer """transh""" +891 5 optimizer """adam""" +891 5 training_loop """lcwa""" +891 5 evaluator """rankbased""" +892 1 model.embedding_dim 0.0 +892 1 model.scoring_fct_norm 2.0 +892 1 regularizer.weight 0.11396889211379439 +892 1 optimizer.lr 0.005313831519143102 +892 1 training.batch_size 0.0 +892 1 training.label_smoothing 0.0227052303106698 +892 2 model.embedding_dim 2.0 +892 2 model.scoring_fct_norm 1.0 +892 2 regularizer.weight 0.06205525676163522 +892 2 optimizer.lr 0.028979871304317808 +892 2 training.batch_size 2.0 +892 2 training.label_smoothing 0.7717375290996714 +892 3 model.embedding_dim 2.0 +892 3 model.scoring_fct_norm 2.0 +892 3 regularizer.weight 0.01161770113770936 +892 3 optimizer.lr 0.0310485017305985 +892 3 training.batch_size 1.0 +892 3 training.label_smoothing 0.5772641466927468 +892 4 model.embedding_dim 0.0 +892 4 model.scoring_fct_norm 1.0 +892 4 regularizer.weight 0.10449143769091124 +892 4 optimizer.lr 0.0017381576313560392 +892 4 training.batch_size 2.0 +892 4 training.label_smoothing 0.7153641176294572 +892 5 model.embedding_dim 2.0 +892 5 model.scoring_fct_norm 1.0 +892 5 regularizer.weight 0.24755183099680725 +892 5 optimizer.lr 0.018621030321343796 +892 5 training.batch_size 0.0 +892 5 training.label_smoothing 0.4683654978872801 +892 6 model.embedding_dim 0.0 +892 6 model.scoring_fct_norm 2.0 +892 6 regularizer.weight 0.020161638918553493 +892 6 optimizer.lr 0.03631170988510785 +892 6 training.batch_size 2.0 +892 6 training.label_smoothing 0.12355137382659805 +892 7 model.embedding_dim 1.0 +892 7 model.scoring_fct_norm 2.0 +892 7 regularizer.weight 0.021332616297895315 +892 7 optimizer.lr 0.0031074745895919857 +892 7 training.batch_size 0.0 +892 7 training.label_smoothing 0.025860112046620094 +892 8 model.embedding_dim 1.0 +892 8 model.scoring_fct_norm 2.0 +892 8 regularizer.weight 0.01475487364772096 +892 8 optimizer.lr 0.0514421679498086 +892 8 training.batch_size 2.0 +892 8 training.label_smoothing 0.49603447764484127 +892 9 model.embedding_dim 2.0 +892 9 model.scoring_fct_norm 1.0 +892 9 regularizer.weight 0.011543606093956799 +892 9 optimizer.lr 0.0017916994450826907 +892 9 training.batch_size 1.0 +892 9 training.label_smoothing 0.07871577816441508 +892 1 dataset """wn18rr""" +892 1 model """transh""" +892 1 loss """softplus""" +892 1 regularizer """transh""" +892 1 optimizer """adam""" +892 1 training_loop """lcwa""" +892 1 evaluator """rankbased""" +892 2 dataset """wn18rr""" +892 2 model """transh""" +892 2 loss """softplus""" +892 2 regularizer """transh""" +892 2 optimizer """adam""" +892 2 training_loop """lcwa""" +892 2 evaluator """rankbased""" +892 3 dataset """wn18rr""" +892 3 model """transh""" +892 3 loss """softplus""" +892 3 regularizer """transh""" +892 3 optimizer """adam""" +892 3 training_loop """lcwa""" +892 3 evaluator """rankbased""" +892 4 dataset """wn18rr""" +892 4 model """transh""" +892 4 loss """softplus""" +892 4 regularizer """transh""" +892 4 optimizer """adam""" +892 4 training_loop """lcwa""" +892 4 evaluator """rankbased""" +892 5 dataset """wn18rr""" +892 5 model """transh""" +892 5 loss """softplus""" +892 5 regularizer """transh""" +892 5 optimizer """adam""" +892 5 training_loop """lcwa""" +892 5 evaluator """rankbased""" +892 6 dataset """wn18rr""" +892 6 model """transh""" +892 6 loss """softplus""" +892 6 regularizer """transh""" +892 6 optimizer """adam""" +892 6 training_loop """lcwa""" +892 6 evaluator """rankbased""" +892 7 dataset """wn18rr""" +892 7 model """transh""" +892 7 loss """softplus""" +892 7 regularizer """transh""" +892 7 optimizer """adam""" +892 7 training_loop """lcwa""" +892 7 evaluator """rankbased""" +892 8 dataset """wn18rr""" +892 8 model """transh""" +892 8 loss """softplus""" +892 8 regularizer """transh""" +892 8 optimizer """adam""" +892 8 training_loop """lcwa""" +892 8 evaluator """rankbased""" +892 9 dataset """wn18rr""" +892 9 model """transh""" +892 9 loss """softplus""" +892 9 regularizer """transh""" +892 9 optimizer """adam""" +892 9 training_loop """lcwa""" +892 9 evaluator """rankbased""" +893 1 model.embedding_dim 0.0 +893 1 model.scoring_fct_norm 2.0 +893 1 regularizer.weight 0.015894887167265326 +893 1 optimizer.lr 0.01972255593381119 +893 1 training.batch_size 1.0 +893 1 training.label_smoothing 0.0018906473857929248 +893 2 model.embedding_dim 1.0 +893 2 model.scoring_fct_norm 1.0 +893 2 regularizer.weight 0.020907267793405005 +893 2 optimizer.lr 0.011776335864520501 +893 2 training.batch_size 1.0 +893 2 training.label_smoothing 0.0016453089269090988 +893 3 model.embedding_dim 0.0 +893 3 model.scoring_fct_norm 2.0 +893 3 regularizer.weight 0.061145776511703574 +893 3 optimizer.lr 0.07478150981438758 +893 3 training.batch_size 1.0 +893 3 training.label_smoothing 0.0066054852379173875 +893 4 model.embedding_dim 2.0 +893 4 model.scoring_fct_norm 1.0 +893 4 regularizer.weight 0.017623123073404145 +893 4 optimizer.lr 0.0011204545621008278 +893 4 training.batch_size 0.0 +893 4 training.label_smoothing 0.18553155009885225 +893 5 model.embedding_dim 2.0 +893 5 model.scoring_fct_norm 2.0 +893 5 regularizer.weight 0.01812349142262227 +893 5 optimizer.lr 0.009061870357037457 +893 5 training.batch_size 2.0 +893 5 training.label_smoothing 0.020237342612865805 +893 1 dataset """wn18rr""" +893 1 model """transh""" +893 1 loss """bceaftersigmoid""" +893 1 regularizer """transh""" +893 1 optimizer """adam""" +893 1 training_loop """lcwa""" +893 1 evaluator """rankbased""" +893 2 dataset """wn18rr""" +893 2 model """transh""" +893 2 loss """bceaftersigmoid""" +893 2 regularizer """transh""" +893 2 optimizer """adam""" +893 2 training_loop """lcwa""" +893 2 evaluator """rankbased""" +893 3 dataset """wn18rr""" +893 3 model """transh""" +893 3 loss """bceaftersigmoid""" +893 3 regularizer """transh""" +893 3 optimizer """adam""" +893 3 training_loop """lcwa""" +893 3 evaluator """rankbased""" +893 4 dataset """wn18rr""" +893 4 model """transh""" +893 4 loss """bceaftersigmoid""" +893 4 regularizer """transh""" +893 4 optimizer """adam""" +893 4 training_loop """lcwa""" +893 4 evaluator """rankbased""" +893 5 dataset """wn18rr""" +893 5 model """transh""" +893 5 loss """bceaftersigmoid""" +893 5 regularizer """transh""" +893 5 optimizer """adam""" +893 5 training_loop """lcwa""" +893 5 evaluator """rankbased""" +894 1 model.embedding_dim 1.0 +894 1 model.scoring_fct_norm 2.0 +894 1 regularizer.weight 0.015779593624740296 +894 1 optimizer.lr 0.0010523876283758087 +894 1 training.batch_size 1.0 +894 1 training.label_smoothing 0.01471015086066101 +894 2 model.embedding_dim 1.0 +894 2 model.scoring_fct_norm 2.0 +894 2 regularizer.weight 0.07530225319843227 +894 2 optimizer.lr 0.03938199811875207 +894 2 training.batch_size 1.0 +894 2 training.label_smoothing 0.0012539680374062082 +894 3 model.embedding_dim 0.0 +894 3 model.scoring_fct_norm 1.0 +894 3 regularizer.weight 0.02803157075929156 +894 3 optimizer.lr 0.0012844173728677284 +894 3 training.batch_size 1.0 +894 3 training.label_smoothing 0.048549767911540594 +894 4 model.embedding_dim 1.0 +894 4 model.scoring_fct_norm 2.0 +894 4 regularizer.weight 0.2513237833605932 +894 4 optimizer.lr 0.015626137178835445 +894 4 training.batch_size 0.0 +894 4 training.label_smoothing 0.035019196572953305 +894 5 model.embedding_dim 2.0 +894 5 model.scoring_fct_norm 1.0 +894 5 regularizer.weight 0.021767974236123526 +894 5 optimizer.lr 0.002079894828634749 +894 5 training.batch_size 1.0 +894 5 training.label_smoothing 0.07748620405447391 +894 6 model.embedding_dim 0.0 +894 6 model.scoring_fct_norm 2.0 +894 6 regularizer.weight 0.07387624548929322 +894 6 optimizer.lr 0.001075980845071271 +894 6 training.batch_size 0.0 +894 6 training.label_smoothing 0.01029919014422432 +894 7 model.embedding_dim 0.0 +894 7 model.scoring_fct_norm 2.0 +894 7 regularizer.weight 0.02850190107363449 +894 7 optimizer.lr 0.09663258132529312 +894 7 training.batch_size 1.0 +894 7 training.label_smoothing 0.09894378060883414 +894 8 model.embedding_dim 0.0 +894 8 model.scoring_fct_norm 1.0 +894 8 regularizer.weight 0.025459092277702523 +894 8 optimizer.lr 0.00949444563292841 +894 8 training.batch_size 0.0 +894 8 training.label_smoothing 0.025996789396585066 +894 1 dataset """wn18rr""" +894 1 model """transh""" +894 1 loss """bceaftersigmoid""" +894 1 regularizer """transh""" +894 1 optimizer """adam""" +894 1 training_loop """lcwa""" +894 1 evaluator """rankbased""" +894 2 dataset """wn18rr""" +894 2 model """transh""" +894 2 loss """bceaftersigmoid""" +894 2 regularizer """transh""" +894 2 optimizer """adam""" +894 2 training_loop """lcwa""" +894 2 evaluator """rankbased""" +894 3 dataset """wn18rr""" +894 3 model """transh""" +894 3 loss """bceaftersigmoid""" +894 3 regularizer """transh""" +894 3 optimizer """adam""" +894 3 training_loop """lcwa""" +894 3 evaluator """rankbased""" +894 4 dataset """wn18rr""" +894 4 model """transh""" +894 4 loss """bceaftersigmoid""" +894 4 regularizer """transh""" +894 4 optimizer """adam""" +894 4 training_loop """lcwa""" +894 4 evaluator """rankbased""" +894 5 dataset """wn18rr""" +894 5 model """transh""" +894 5 loss """bceaftersigmoid""" +894 5 regularizer """transh""" +894 5 optimizer """adam""" +894 5 training_loop """lcwa""" +894 5 evaluator """rankbased""" +894 6 dataset """wn18rr""" +894 6 model """transh""" +894 6 loss """bceaftersigmoid""" +894 6 regularizer """transh""" +894 6 optimizer """adam""" +894 6 training_loop """lcwa""" +894 6 evaluator """rankbased""" +894 7 dataset """wn18rr""" +894 7 model """transh""" +894 7 loss """bceaftersigmoid""" +894 7 regularizer """transh""" +894 7 optimizer """adam""" +894 7 training_loop """lcwa""" +894 7 evaluator """rankbased""" +894 8 dataset """wn18rr""" +894 8 model """transh""" +894 8 loss """bceaftersigmoid""" +894 8 regularizer """transh""" +894 8 optimizer """adam""" +894 8 training_loop """lcwa""" +894 8 evaluator """rankbased""" +895 1 model.embedding_dim 2.0 +895 1 model.scoring_fct_norm 1.0 +895 1 regularizer.weight 0.01007818106464189 +895 1 optimizer.lr 0.016308505081672083 +895 1 training.batch_size 2.0 +895 1 training.label_smoothing 0.009599642096468968 +895 2 model.embedding_dim 1.0 +895 2 model.scoring_fct_norm 1.0 +895 2 regularizer.weight 0.2194015130499783 +895 2 optimizer.lr 0.02728975596433755 +895 2 training.batch_size 2.0 +895 2 training.label_smoothing 0.22920013724186786 +895 3 model.embedding_dim 2.0 +895 3 model.scoring_fct_norm 2.0 +895 3 regularizer.weight 0.014252751418748776 +895 3 optimizer.lr 0.027766045373039817 +895 3 training.batch_size 0.0 +895 3 training.label_smoothing 0.07802609290498262 +895 4 model.embedding_dim 0.0 +895 4 model.scoring_fct_norm 1.0 +895 4 regularizer.weight 0.03017931578047262 +895 4 optimizer.lr 0.012142079657227522 +895 4 training.batch_size 1.0 +895 4 training.label_smoothing 0.4968840788250422 +895 5 model.embedding_dim 2.0 +895 5 model.scoring_fct_norm 1.0 +895 5 regularizer.weight 0.027348550120331022 +895 5 optimizer.lr 0.05536523907490419 +895 5 training.batch_size 0.0 +895 5 training.label_smoothing 0.5614857044170846 +895 1 dataset """wn18rr""" +895 1 model """transh""" +895 1 loss """crossentropy""" +895 1 regularizer """transh""" +895 1 optimizer """adam""" +895 1 training_loop """lcwa""" +895 1 evaluator """rankbased""" +895 2 dataset """wn18rr""" +895 2 model """transh""" +895 2 loss """crossentropy""" +895 2 regularizer """transh""" +895 2 optimizer """adam""" +895 2 training_loop """lcwa""" +895 2 evaluator """rankbased""" +895 3 dataset """wn18rr""" +895 3 model """transh""" +895 3 loss """crossentropy""" +895 3 regularizer """transh""" +895 3 optimizer """adam""" +895 3 training_loop """lcwa""" +895 3 evaluator """rankbased""" +895 4 dataset """wn18rr""" +895 4 model """transh""" +895 4 loss """crossentropy""" +895 4 regularizer """transh""" +895 4 optimizer """adam""" +895 4 training_loop """lcwa""" +895 4 evaluator """rankbased""" +895 5 dataset """wn18rr""" +895 5 model """transh""" +895 5 loss """crossentropy""" +895 5 regularizer """transh""" +895 5 optimizer """adam""" +895 5 training_loop """lcwa""" +895 5 evaluator """rankbased""" +896 1 model.embedding_dim 1.0 +896 1 model.scoring_fct_norm 2.0 +896 1 regularizer.weight 0.02427781256982519 +896 1 optimizer.lr 0.0045883427307030844 +896 1 training.batch_size 2.0 +896 1 training.label_smoothing 0.0265764619228255 +896 2 model.embedding_dim 1.0 +896 2 model.scoring_fct_norm 2.0 +896 2 regularizer.weight 0.018262444859449455 +896 2 optimizer.lr 0.017349017137052152 +896 2 training.batch_size 2.0 +896 2 training.label_smoothing 0.001012534504524305 +896 3 model.embedding_dim 2.0 +896 3 model.scoring_fct_norm 2.0 +896 3 regularizer.weight 0.011401790422431168 +896 3 optimizer.lr 0.00429718792886717 +896 3 training.batch_size 1.0 +896 3 training.label_smoothing 0.01601659844885233 +896 4 model.embedding_dim 2.0 +896 4 model.scoring_fct_norm 1.0 +896 4 regularizer.weight 0.023661738526424968 +896 4 optimizer.lr 0.07244283506296442 +896 4 training.batch_size 1.0 +896 4 training.label_smoothing 0.005241733980515373 +896 5 model.embedding_dim 0.0 +896 5 model.scoring_fct_norm 2.0 +896 5 regularizer.weight 0.012953896963866304 +896 5 optimizer.lr 0.005982033254797142 +896 5 training.batch_size 0.0 +896 5 training.label_smoothing 0.0031261520119115088 +896 6 model.embedding_dim 0.0 +896 6 model.scoring_fct_norm 1.0 +896 6 regularizer.weight 0.07290198830404168 +896 6 optimizer.lr 0.0278731678235467 +896 6 training.batch_size 0.0 +896 6 training.label_smoothing 0.00823060536891058 +896 7 model.embedding_dim 0.0 +896 7 model.scoring_fct_norm 2.0 +896 7 regularizer.weight 0.023946948519478175 +896 7 optimizer.lr 0.09995283273338104 +896 7 training.batch_size 0.0 +896 7 training.label_smoothing 0.0018476878480067121 +896 8 model.embedding_dim 2.0 +896 8 model.scoring_fct_norm 1.0 +896 8 regularizer.weight 0.058757015031806185 +896 8 optimizer.lr 0.009427685117239916 +896 8 training.batch_size 2.0 +896 8 training.label_smoothing 0.009346339791878636 +896 1 dataset """wn18rr""" +896 1 model """transh""" +896 1 loss """crossentropy""" +896 1 regularizer """transh""" +896 1 optimizer """adam""" +896 1 training_loop """lcwa""" +896 1 evaluator """rankbased""" +896 2 dataset """wn18rr""" +896 2 model """transh""" +896 2 loss """crossentropy""" +896 2 regularizer """transh""" +896 2 optimizer """adam""" +896 2 training_loop """lcwa""" +896 2 evaluator """rankbased""" +896 3 dataset """wn18rr""" +896 3 model """transh""" +896 3 loss """crossentropy""" +896 3 regularizer """transh""" +896 3 optimizer """adam""" +896 3 training_loop """lcwa""" +896 3 evaluator """rankbased""" +896 4 dataset """wn18rr""" +896 4 model """transh""" +896 4 loss """crossentropy""" +896 4 regularizer """transh""" +896 4 optimizer """adam""" +896 4 training_loop """lcwa""" +896 4 evaluator """rankbased""" +896 5 dataset """wn18rr""" +896 5 model """transh""" +896 5 loss """crossentropy""" +896 5 regularizer """transh""" +896 5 optimizer """adam""" +896 5 training_loop """lcwa""" +896 5 evaluator """rankbased""" +896 6 dataset """wn18rr""" +896 6 model """transh""" +896 6 loss """crossentropy""" +896 6 regularizer """transh""" +896 6 optimizer """adam""" +896 6 training_loop """lcwa""" +896 6 evaluator """rankbased""" +896 7 dataset """wn18rr""" +896 7 model """transh""" +896 7 loss """crossentropy""" +896 7 regularizer """transh""" +896 7 optimizer """adam""" +896 7 training_loop """lcwa""" +896 7 evaluator """rankbased""" +896 8 dataset """wn18rr""" +896 8 model """transh""" +896 8 loss """crossentropy""" +896 8 regularizer """transh""" +896 8 optimizer """adam""" +896 8 training_loop """lcwa""" +896 8 evaluator """rankbased""" +897 1 model.embedding_dim 0.0 +897 1 model.relation_dim 2.0 +897 1 model.scoring_fct_norm 2.0 +897 1 loss.margin 1.493691088321425 +897 1 optimizer.lr 0.001257611379796189 +897 1 negative_sampler.num_negs_per_pos 73.0 +897 1 training.batch_size 1.0 +897 2 model.embedding_dim 1.0 +897 2 model.relation_dim 1.0 +897 2 model.scoring_fct_norm 2.0 +897 2 loss.margin 9.39766718422195 +897 2 optimizer.lr 0.01646339266384861 +897 2 negative_sampler.num_negs_per_pos 27.0 +897 2 training.batch_size 1.0 +897 3 model.embedding_dim 0.0 +897 3 model.relation_dim 1.0 +897 3 model.scoring_fct_norm 2.0 +897 3 loss.margin 6.197489803017204 +897 3 optimizer.lr 0.0018108644239719868 +897 3 negative_sampler.num_negs_per_pos 72.0 +897 3 training.batch_size 0.0 +897 4 model.embedding_dim 1.0 +897 4 model.relation_dim 1.0 +897 4 model.scoring_fct_norm 1.0 +897 4 loss.margin 6.827058234835361 +897 4 optimizer.lr 0.07330977501317278 +897 4 negative_sampler.num_negs_per_pos 45.0 +897 4 training.batch_size 0.0 +897 5 model.embedding_dim 1.0 +897 5 model.relation_dim 0.0 +897 5 model.scoring_fct_norm 2.0 +897 5 loss.margin 6.392764113982106 +897 5 optimizer.lr 0.01198403959136811 +897 5 negative_sampler.num_negs_per_pos 75.0 +897 5 training.batch_size 2.0 +897 6 model.embedding_dim 0.0 +897 6 model.relation_dim 2.0 +897 6 model.scoring_fct_norm 1.0 +897 6 loss.margin 8.43679522473065 +897 6 optimizer.lr 0.011429837987860616 +897 6 negative_sampler.num_negs_per_pos 66.0 +897 6 training.batch_size 2.0 +897 7 model.embedding_dim 2.0 +897 7 model.relation_dim 1.0 +897 7 model.scoring_fct_norm 2.0 +897 7 loss.margin 3.1700331349353834 +897 7 optimizer.lr 0.005900085776932433 +897 7 negative_sampler.num_negs_per_pos 87.0 +897 7 training.batch_size 0.0 +897 8 model.embedding_dim 1.0 +897 8 model.relation_dim 2.0 +897 8 model.scoring_fct_norm 2.0 +897 8 loss.margin 3.619898118188381 +897 8 optimizer.lr 0.001083858191815302 +897 8 negative_sampler.num_negs_per_pos 8.0 +897 8 training.batch_size 2.0 +897 9 model.embedding_dim 1.0 +897 9 model.relation_dim 0.0 +897 9 model.scoring_fct_norm 1.0 +897 9 loss.margin 6.461372453705495 +897 9 optimizer.lr 0.018754318609224677 +897 9 negative_sampler.num_negs_per_pos 95.0 +897 9 training.batch_size 1.0 +897 10 model.embedding_dim 2.0 +897 10 model.relation_dim 1.0 +897 10 model.scoring_fct_norm 1.0 +897 10 loss.margin 5.028606416234684 +897 10 optimizer.lr 0.0017391185020775618 +897 10 negative_sampler.num_negs_per_pos 78.0 +897 10 training.batch_size 1.0 +897 11 model.embedding_dim 2.0 +897 11 model.relation_dim 0.0 +897 11 model.scoring_fct_norm 1.0 +897 11 loss.margin 7.201337947791849 +897 11 optimizer.lr 0.002860609010947273 +897 11 negative_sampler.num_negs_per_pos 81.0 +897 11 training.batch_size 0.0 +897 1 dataset """fb15k237""" +897 1 model """transr""" +897 1 loss """marginranking""" +897 1 regularizer """no""" +897 1 optimizer """adam""" +897 1 training_loop """owa""" +897 1 negative_sampler """basic""" +897 1 evaluator """rankbased""" +897 2 dataset """fb15k237""" +897 2 model """transr""" +897 2 loss """marginranking""" +897 2 regularizer """no""" +897 2 optimizer """adam""" +897 2 training_loop """owa""" +897 2 negative_sampler """basic""" +897 2 evaluator """rankbased""" +897 3 dataset """fb15k237""" +897 3 model """transr""" +897 3 loss """marginranking""" +897 3 regularizer """no""" +897 3 optimizer """adam""" +897 3 training_loop """owa""" +897 3 negative_sampler """basic""" +897 3 evaluator """rankbased""" +897 4 dataset """fb15k237""" +897 4 model """transr""" +897 4 loss """marginranking""" +897 4 regularizer """no""" +897 4 optimizer """adam""" +897 4 training_loop """owa""" +897 4 negative_sampler """basic""" +897 4 evaluator """rankbased""" +897 5 dataset """fb15k237""" +897 5 model """transr""" +897 5 loss """marginranking""" +897 5 regularizer """no""" +897 5 optimizer """adam""" +897 5 training_loop """owa""" +897 5 negative_sampler """basic""" +897 5 evaluator """rankbased""" +897 6 dataset """fb15k237""" +897 6 model """transr""" +897 6 loss """marginranking""" +897 6 regularizer """no""" +897 6 optimizer """adam""" +897 6 training_loop """owa""" +897 6 negative_sampler """basic""" +897 6 evaluator """rankbased""" +897 7 dataset """fb15k237""" +897 7 model """transr""" +897 7 loss """marginranking""" +897 7 regularizer """no""" +897 7 optimizer """adam""" +897 7 training_loop """owa""" +897 7 negative_sampler """basic""" +897 7 evaluator """rankbased""" +897 8 dataset """fb15k237""" +897 8 model """transr""" +897 8 loss """marginranking""" +897 8 regularizer """no""" +897 8 optimizer """adam""" +897 8 training_loop """owa""" +897 8 negative_sampler """basic""" +897 8 evaluator """rankbased""" +897 9 dataset """fb15k237""" +897 9 model """transr""" +897 9 loss """marginranking""" +897 9 regularizer """no""" +897 9 optimizer """adam""" +897 9 training_loop """owa""" +897 9 negative_sampler """basic""" +897 9 evaluator """rankbased""" +897 10 dataset """fb15k237""" +897 10 model """transr""" +897 10 loss """marginranking""" +897 10 regularizer """no""" +897 10 optimizer """adam""" +897 10 training_loop """owa""" +897 10 negative_sampler """basic""" +897 10 evaluator """rankbased""" +897 11 dataset """fb15k237""" +897 11 model """transr""" +897 11 loss """marginranking""" +897 11 regularizer """no""" +897 11 optimizer """adam""" +897 11 training_loop """owa""" +897 11 negative_sampler """basic""" +897 11 evaluator """rankbased""" +898 1 model.embedding_dim 2.0 +898 1 model.relation_dim 0.0 +898 1 model.scoring_fct_norm 2.0 +898 1 loss.margin 4.300106702620206 +898 1 optimizer.lr 0.002910666233825893 +898 1 negative_sampler.num_negs_per_pos 48.0 +898 1 training.batch_size 2.0 +898 2 model.embedding_dim 2.0 +898 2 model.relation_dim 1.0 +898 2 model.scoring_fct_norm 2.0 +898 2 loss.margin 3.6220571618641677 +898 2 optimizer.lr 0.09091303696246403 +898 2 negative_sampler.num_negs_per_pos 87.0 +898 2 training.batch_size 1.0 +898 3 model.embedding_dim 0.0 +898 3 model.relation_dim 2.0 +898 3 model.scoring_fct_norm 2.0 +898 3 loss.margin 5.404261324010525 +898 3 optimizer.lr 0.07820659243720138 +898 3 negative_sampler.num_negs_per_pos 8.0 +898 3 training.batch_size 1.0 +898 4 model.embedding_dim 0.0 +898 4 model.relation_dim 0.0 +898 4 model.scoring_fct_norm 1.0 +898 4 loss.margin 7.160926305392877 +898 4 optimizer.lr 0.03903420378022123 +898 4 negative_sampler.num_negs_per_pos 44.0 +898 4 training.batch_size 2.0 +898 5 model.embedding_dim 2.0 +898 5 model.relation_dim 1.0 +898 5 model.scoring_fct_norm 2.0 +898 5 loss.margin 5.83427012178602 +898 5 optimizer.lr 0.08603679442501799 +898 5 negative_sampler.num_negs_per_pos 93.0 +898 5 training.batch_size 0.0 +898 6 model.embedding_dim 2.0 +898 6 model.relation_dim 0.0 +898 6 model.scoring_fct_norm 1.0 +898 6 loss.margin 8.377214659235637 +898 6 optimizer.lr 0.05375992200921338 +898 6 negative_sampler.num_negs_per_pos 17.0 +898 6 training.batch_size 2.0 +898 7 model.embedding_dim 0.0 +898 7 model.relation_dim 0.0 +898 7 model.scoring_fct_norm 2.0 +898 7 loss.margin 7.229920661022742 +898 7 optimizer.lr 0.008877996384949146 +898 7 negative_sampler.num_negs_per_pos 37.0 +898 7 training.batch_size 1.0 +898 8 model.embedding_dim 0.0 +898 8 model.relation_dim 0.0 +898 8 model.scoring_fct_norm 1.0 +898 8 loss.margin 5.39382955812543 +898 8 optimizer.lr 0.012139072124248714 +898 8 negative_sampler.num_negs_per_pos 6.0 +898 8 training.batch_size 2.0 +898 9 model.embedding_dim 2.0 +898 9 model.relation_dim 2.0 +898 9 model.scoring_fct_norm 1.0 +898 9 loss.margin 0.5693172558810414 +898 9 optimizer.lr 0.0014396328438435802 +898 9 negative_sampler.num_negs_per_pos 39.0 +898 9 training.batch_size 2.0 +898 10 model.embedding_dim 1.0 +898 10 model.relation_dim 1.0 +898 10 model.scoring_fct_norm 2.0 +898 10 loss.margin 7.572231541693457 +898 10 optimizer.lr 0.00544510554840025 +898 10 negative_sampler.num_negs_per_pos 90.0 +898 10 training.batch_size 2.0 +898 11 model.embedding_dim 0.0 +898 11 model.relation_dim 0.0 +898 11 model.scoring_fct_norm 2.0 +898 11 loss.margin 6.81257685982864 +898 11 optimizer.lr 0.00150227279169919 +898 11 negative_sampler.num_negs_per_pos 21.0 +898 11 training.batch_size 2.0 +898 12 model.embedding_dim 1.0 +898 12 model.relation_dim 0.0 +898 12 model.scoring_fct_norm 2.0 +898 12 loss.margin 8.93642622709643 +898 12 optimizer.lr 0.0037833294375533852 +898 12 negative_sampler.num_negs_per_pos 58.0 +898 12 training.batch_size 2.0 +898 13 model.embedding_dim 0.0 +898 13 model.relation_dim 0.0 +898 13 model.scoring_fct_norm 1.0 +898 13 loss.margin 4.8862652201974965 +898 13 optimizer.lr 0.007129728098239861 +898 13 negative_sampler.num_negs_per_pos 2.0 +898 13 training.batch_size 1.0 +898 14 model.embedding_dim 1.0 +898 14 model.relation_dim 2.0 +898 14 model.scoring_fct_norm 1.0 +898 14 loss.margin 2.814063490063849 +898 14 optimizer.lr 0.0012924387117221828 +898 14 negative_sampler.num_negs_per_pos 90.0 +898 14 training.batch_size 0.0 +898 15 model.embedding_dim 2.0 +898 15 model.relation_dim 1.0 +898 15 model.scoring_fct_norm 2.0 +898 15 loss.margin 6.663105464385922 +898 15 optimizer.lr 0.00948294204351503 +898 15 negative_sampler.num_negs_per_pos 5.0 +898 15 training.batch_size 2.0 +898 16 model.embedding_dim 0.0 +898 16 model.relation_dim 1.0 +898 16 model.scoring_fct_norm 2.0 +898 16 loss.margin 6.097705969044155 +898 16 optimizer.lr 0.007615080906052643 +898 16 negative_sampler.num_negs_per_pos 44.0 +898 16 training.batch_size 0.0 +898 17 model.embedding_dim 2.0 +898 17 model.relation_dim 2.0 +898 17 model.scoring_fct_norm 1.0 +898 17 loss.margin 2.1423965625399606 +898 17 optimizer.lr 0.0012864620860674398 +898 17 negative_sampler.num_negs_per_pos 28.0 +898 17 training.batch_size 1.0 +898 18 model.embedding_dim 0.0 +898 18 model.relation_dim 2.0 +898 18 model.scoring_fct_norm 2.0 +898 18 loss.margin 6.476240830365621 +898 18 optimizer.lr 0.0100123236932801 +898 18 negative_sampler.num_negs_per_pos 40.0 +898 18 training.batch_size 2.0 +898 19 model.embedding_dim 2.0 +898 19 model.relation_dim 2.0 +898 19 model.scoring_fct_norm 2.0 +898 19 loss.margin 8.336242102954385 +898 19 optimizer.lr 0.05015399459428912 +898 19 negative_sampler.num_negs_per_pos 98.0 +898 19 training.batch_size 2.0 +898 20 model.embedding_dim 2.0 +898 20 model.relation_dim 1.0 +898 20 model.scoring_fct_norm 2.0 +898 20 loss.margin 2.92129864261689 +898 20 optimizer.lr 0.004116757723907838 +898 20 negative_sampler.num_negs_per_pos 75.0 +898 20 training.batch_size 0.0 +898 21 model.embedding_dim 0.0 +898 21 model.relation_dim 1.0 +898 21 model.scoring_fct_norm 2.0 +898 21 loss.margin 7.846154098313203 +898 21 optimizer.lr 0.010865055416753111 +898 21 negative_sampler.num_negs_per_pos 89.0 +898 21 training.batch_size 0.0 +898 1 dataset """fb15k237""" +898 1 model """transr""" +898 1 loss """marginranking""" +898 1 regularizer """no""" +898 1 optimizer """adam""" +898 1 training_loop """owa""" +898 1 negative_sampler """basic""" +898 1 evaluator """rankbased""" +898 2 dataset """fb15k237""" +898 2 model """transr""" +898 2 loss """marginranking""" +898 2 regularizer """no""" +898 2 optimizer """adam""" +898 2 training_loop """owa""" +898 2 negative_sampler """basic""" +898 2 evaluator """rankbased""" +898 3 dataset """fb15k237""" +898 3 model """transr""" +898 3 loss """marginranking""" +898 3 regularizer """no""" +898 3 optimizer """adam""" +898 3 training_loop """owa""" +898 3 negative_sampler """basic""" +898 3 evaluator """rankbased""" +898 4 dataset """fb15k237""" +898 4 model """transr""" +898 4 loss """marginranking""" +898 4 regularizer """no""" +898 4 optimizer """adam""" +898 4 training_loop """owa""" +898 4 negative_sampler """basic""" +898 4 evaluator """rankbased""" +898 5 dataset """fb15k237""" +898 5 model """transr""" +898 5 loss """marginranking""" +898 5 regularizer """no""" +898 5 optimizer """adam""" +898 5 training_loop """owa""" +898 5 negative_sampler """basic""" +898 5 evaluator """rankbased""" +898 6 dataset """fb15k237""" +898 6 model """transr""" +898 6 loss """marginranking""" +898 6 regularizer """no""" +898 6 optimizer """adam""" +898 6 training_loop """owa""" +898 6 negative_sampler """basic""" +898 6 evaluator """rankbased""" +898 7 dataset """fb15k237""" +898 7 model """transr""" +898 7 loss """marginranking""" +898 7 regularizer """no""" +898 7 optimizer """adam""" +898 7 training_loop """owa""" +898 7 negative_sampler """basic""" +898 7 evaluator """rankbased""" +898 8 dataset """fb15k237""" +898 8 model """transr""" +898 8 loss """marginranking""" +898 8 regularizer """no""" +898 8 optimizer """adam""" +898 8 training_loop """owa""" +898 8 negative_sampler """basic""" +898 8 evaluator """rankbased""" +898 9 dataset """fb15k237""" +898 9 model """transr""" +898 9 loss """marginranking""" +898 9 regularizer """no""" +898 9 optimizer """adam""" +898 9 training_loop """owa""" +898 9 negative_sampler """basic""" +898 9 evaluator """rankbased""" +898 10 dataset """fb15k237""" +898 10 model """transr""" +898 10 loss """marginranking""" +898 10 regularizer """no""" +898 10 optimizer """adam""" +898 10 training_loop """owa""" +898 10 negative_sampler """basic""" +898 10 evaluator """rankbased""" +898 11 dataset """fb15k237""" +898 11 model """transr""" +898 11 loss """marginranking""" +898 11 regularizer """no""" +898 11 optimizer """adam""" +898 11 training_loop """owa""" +898 11 negative_sampler """basic""" +898 11 evaluator """rankbased""" +898 12 dataset """fb15k237""" +898 12 model """transr""" +898 12 loss """marginranking""" +898 12 regularizer """no""" +898 12 optimizer """adam""" +898 12 training_loop """owa""" +898 12 negative_sampler """basic""" +898 12 evaluator """rankbased""" +898 13 dataset """fb15k237""" +898 13 model """transr""" +898 13 loss """marginranking""" +898 13 regularizer """no""" +898 13 optimizer """adam""" +898 13 training_loop """owa""" +898 13 negative_sampler """basic""" +898 13 evaluator """rankbased""" +898 14 dataset """fb15k237""" +898 14 model """transr""" +898 14 loss """marginranking""" +898 14 regularizer """no""" +898 14 optimizer """adam""" +898 14 training_loop """owa""" +898 14 negative_sampler """basic""" +898 14 evaluator """rankbased""" +898 15 dataset """fb15k237""" +898 15 model """transr""" +898 15 loss """marginranking""" +898 15 regularizer """no""" +898 15 optimizer """adam""" +898 15 training_loop """owa""" +898 15 negative_sampler """basic""" +898 15 evaluator """rankbased""" +898 16 dataset """fb15k237""" +898 16 model """transr""" +898 16 loss """marginranking""" +898 16 regularizer """no""" +898 16 optimizer """adam""" +898 16 training_loop """owa""" +898 16 negative_sampler """basic""" +898 16 evaluator """rankbased""" +898 17 dataset """fb15k237""" +898 17 model """transr""" +898 17 loss """marginranking""" +898 17 regularizer """no""" +898 17 optimizer """adam""" +898 17 training_loop """owa""" +898 17 negative_sampler """basic""" +898 17 evaluator """rankbased""" +898 18 dataset """fb15k237""" +898 18 model """transr""" +898 18 loss """marginranking""" +898 18 regularizer """no""" +898 18 optimizer """adam""" +898 18 training_loop """owa""" +898 18 negative_sampler """basic""" +898 18 evaluator """rankbased""" +898 19 dataset """fb15k237""" +898 19 model """transr""" +898 19 loss """marginranking""" +898 19 regularizer """no""" +898 19 optimizer """adam""" +898 19 training_loop """owa""" +898 19 negative_sampler """basic""" +898 19 evaluator """rankbased""" +898 20 dataset """fb15k237""" +898 20 model """transr""" +898 20 loss """marginranking""" +898 20 regularizer """no""" +898 20 optimizer """adam""" +898 20 training_loop """owa""" +898 20 negative_sampler """basic""" +898 20 evaluator """rankbased""" +898 21 dataset """fb15k237""" +898 21 model """transr""" +898 21 loss """marginranking""" +898 21 regularizer """no""" +898 21 optimizer """adam""" +898 21 training_loop """owa""" +898 21 negative_sampler """basic""" +898 21 evaluator """rankbased""" +899 1 model.embedding_dim 1.0 +899 1 model.relation_dim 0.0 +899 1 model.scoring_fct_norm 2.0 +899 1 optimizer.lr 0.0014715435048596745 +899 1 training.batch_size 1.0 +899 1 training.label_smoothing 0.00653953569949188 +899 2 model.embedding_dim 2.0 +899 2 model.relation_dim 1.0 +899 2 model.scoring_fct_norm 2.0 +899 2 optimizer.lr 0.002339589656890705 +899 2 training.batch_size 1.0 +899 2 training.label_smoothing 0.017591969569532715 +899 1 dataset """fb15k237""" +899 1 model """transr""" +899 1 loss """bceaftersigmoid""" +899 1 regularizer """no""" +899 1 optimizer """adam""" +899 1 training_loop """lcwa""" +899 1 evaluator """rankbased""" +899 2 dataset """fb15k237""" +899 2 model """transr""" +899 2 loss """bceaftersigmoid""" +899 2 regularizer """no""" +899 2 optimizer """adam""" +899 2 training_loop """lcwa""" +899 2 evaluator """rankbased""" +900 1 model.embedding_dim 2.0 +900 1 model.relation_dim 2.0 +900 1 model.scoring_fct_norm 1.0 +900 1 optimizer.lr 0.04022021097578895 +900 1 training.batch_size 2.0 +900 1 training.label_smoothing 0.028109195401345752 +900 2 model.embedding_dim 2.0 +900 2 model.relation_dim 0.0 +900 2 model.scoring_fct_norm 1.0 +900 2 optimizer.lr 0.003093690790737174 +900 2 training.batch_size 0.0 +900 2 training.label_smoothing 0.0036504605065756307 +900 3 model.embedding_dim 1.0 +900 3 model.relation_dim 1.0 +900 3 model.scoring_fct_norm 1.0 +900 3 optimizer.lr 0.0904454491334818 +900 3 training.batch_size 0.0 +900 3 training.label_smoothing 0.0035786564656991107 +900 4 model.embedding_dim 1.0 +900 4 model.relation_dim 2.0 +900 4 model.scoring_fct_norm 1.0 +900 4 optimizer.lr 0.0029870133723411286 +900 4 training.batch_size 0.0 +900 4 training.label_smoothing 0.0015627628990988525 +900 1 dataset """fb15k237""" +900 1 model """transr""" +900 1 loss """softplus""" +900 1 regularizer """no""" +900 1 optimizer """adam""" +900 1 training_loop """lcwa""" +900 1 evaluator """rankbased""" +900 2 dataset """fb15k237""" +900 2 model """transr""" +900 2 loss """softplus""" +900 2 regularizer """no""" +900 2 optimizer """adam""" +900 2 training_loop """lcwa""" +900 2 evaluator """rankbased""" +900 3 dataset """fb15k237""" +900 3 model """transr""" +900 3 loss """softplus""" +900 3 regularizer """no""" +900 3 optimizer """adam""" +900 3 training_loop """lcwa""" +900 3 evaluator """rankbased""" +900 4 dataset """fb15k237""" +900 4 model """transr""" +900 4 loss """softplus""" +900 4 regularizer """no""" +900 4 optimizer """adam""" +900 4 training_loop """lcwa""" +900 4 evaluator """rankbased""" +901 1 model.embedding_dim 0.0 +901 1 model.relation_dim 2.0 +901 1 model.scoring_fct_norm 2.0 +901 1 optimizer.lr 0.004304165399890355 +901 1 training.batch_size 1.0 +901 1 training.label_smoothing 0.30675836341751234 +901 2 model.embedding_dim 0.0 +901 2 model.relation_dim 2.0 +901 2 model.scoring_fct_norm 2.0 +901 2 optimizer.lr 0.0027281713785834018 +901 2 training.batch_size 2.0 +901 2 training.label_smoothing 0.19941950916355594 +901 3 model.embedding_dim 1.0 +901 3 model.relation_dim 2.0 +901 3 model.scoring_fct_norm 1.0 +901 3 optimizer.lr 0.001629485285942846 +901 3 training.batch_size 1.0 +901 3 training.label_smoothing 0.003243628202203517 +901 4 model.embedding_dim 2.0 +901 4 model.relation_dim 2.0 +901 4 model.scoring_fct_norm 1.0 +901 4 optimizer.lr 0.026491224321190828 +901 4 training.batch_size 0.0 +901 4 training.label_smoothing 0.0030198740632280496 +901 5 model.embedding_dim 2.0 +901 5 model.relation_dim 2.0 +901 5 model.scoring_fct_norm 1.0 +901 5 optimizer.lr 0.04417632482268172 +901 5 training.batch_size 0.0 +901 5 training.label_smoothing 0.0029050652168914237 +901 1 dataset """fb15k237""" +901 1 model """transr""" +901 1 loss """bceaftersigmoid""" +901 1 regularizer """no""" +901 1 optimizer """adam""" +901 1 training_loop """lcwa""" +901 1 evaluator """rankbased""" +901 2 dataset """fb15k237""" +901 2 model """transr""" +901 2 loss """bceaftersigmoid""" +901 2 regularizer """no""" +901 2 optimizer """adam""" +901 2 training_loop """lcwa""" +901 2 evaluator """rankbased""" +901 3 dataset """fb15k237""" +901 3 model """transr""" +901 3 loss """bceaftersigmoid""" +901 3 regularizer """no""" +901 3 optimizer """adam""" +901 3 training_loop """lcwa""" +901 3 evaluator """rankbased""" +901 4 dataset """fb15k237""" +901 4 model """transr""" +901 4 loss """bceaftersigmoid""" +901 4 regularizer """no""" +901 4 optimizer """adam""" +901 4 training_loop """lcwa""" +901 4 evaluator """rankbased""" +901 5 dataset """fb15k237""" +901 5 model """transr""" +901 5 loss """bceaftersigmoid""" +901 5 regularizer """no""" +901 5 optimizer """adam""" +901 5 training_loop """lcwa""" +901 5 evaluator """rankbased""" +902 1 model.embedding_dim 2.0 +902 1 model.relation_dim 2.0 +902 1 model.scoring_fct_norm 1.0 +902 1 optimizer.lr 0.0017141747881032152 +902 1 training.batch_size 2.0 +902 1 training.label_smoothing 0.3272218980747048 +902 2 model.embedding_dim 1.0 +902 2 model.relation_dim 0.0 +902 2 model.scoring_fct_norm 2.0 +902 2 optimizer.lr 0.004435396637136786 +902 2 training.batch_size 1.0 +902 2 training.label_smoothing 0.043132340269084926 +902 3 model.embedding_dim 0.0 +902 3 model.relation_dim 2.0 +902 3 model.scoring_fct_norm 2.0 +902 3 optimizer.lr 0.03333609728196595 +902 3 training.batch_size 0.0 +902 3 training.label_smoothing 0.0038465111115439828 +902 4 model.embedding_dim 2.0 +902 4 model.relation_dim 2.0 +902 4 model.scoring_fct_norm 1.0 +902 4 optimizer.lr 0.00588755109023398 +902 4 training.batch_size 1.0 +902 4 training.label_smoothing 0.2687033344936275 +902 1 dataset """fb15k237""" +902 1 model """transr""" +902 1 loss """softplus""" +902 1 regularizer """no""" +902 1 optimizer """adam""" +902 1 training_loop """lcwa""" +902 1 evaluator """rankbased""" +902 2 dataset """fb15k237""" +902 2 model """transr""" +902 2 loss """softplus""" +902 2 regularizer """no""" +902 2 optimizer """adam""" +902 2 training_loop """lcwa""" +902 2 evaluator """rankbased""" +902 3 dataset """fb15k237""" +902 3 model """transr""" +902 3 loss """softplus""" +902 3 regularizer """no""" +902 3 optimizer """adam""" +902 3 training_loop """lcwa""" +902 3 evaluator """rankbased""" +902 4 dataset """fb15k237""" +902 4 model """transr""" +902 4 loss """softplus""" +902 4 regularizer """no""" +902 4 optimizer """adam""" +902 4 training_loop """lcwa""" +902 4 evaluator """rankbased""" +903 1 model.embedding_dim 0.0 +903 1 model.relation_dim 2.0 +903 1 model.scoring_fct_norm 1.0 +903 1 loss.margin 12.843440752418758 +903 1 loss.adversarial_temperature 0.9091112110160019 +903 1 optimizer.lr 0.004709570361281801 +903 1 negative_sampler.num_negs_per_pos 59.0 +903 1 training.batch_size 0.0 +903 2 model.embedding_dim 1.0 +903 2 model.relation_dim 1.0 +903 2 model.scoring_fct_norm 2.0 +903 2 loss.margin 9.611515121042446 +903 2 loss.adversarial_temperature 0.40084286400492225 +903 2 optimizer.lr 0.012282076254508507 +903 2 negative_sampler.num_negs_per_pos 11.0 +903 2 training.batch_size 1.0 +903 3 model.embedding_dim 2.0 +903 3 model.relation_dim 0.0 +903 3 model.scoring_fct_norm 2.0 +903 3 loss.margin 1.8297367981284918 +903 3 loss.adversarial_temperature 0.9557993772507526 +903 3 optimizer.lr 0.00629534810869263 +903 3 negative_sampler.num_negs_per_pos 83.0 +903 3 training.batch_size 1.0 +903 4 model.embedding_dim 2.0 +903 4 model.relation_dim 1.0 +903 4 model.scoring_fct_norm 1.0 +903 4 loss.margin 18.725847284780773 +903 4 loss.adversarial_temperature 0.4235420700358792 +903 4 optimizer.lr 0.019012010797487433 +903 4 negative_sampler.num_negs_per_pos 60.0 +903 4 training.batch_size 2.0 +903 5 model.embedding_dim 0.0 +903 5 model.relation_dim 2.0 +903 5 model.scoring_fct_norm 1.0 +903 5 loss.margin 27.176844062898187 +903 5 loss.adversarial_temperature 0.2985047241709541 +903 5 optimizer.lr 0.031276898437114904 +903 5 negative_sampler.num_negs_per_pos 85.0 +903 5 training.batch_size 2.0 +903 6 model.embedding_dim 1.0 +903 6 model.relation_dim 2.0 +903 6 model.scoring_fct_norm 2.0 +903 6 loss.margin 23.427717594143534 +903 6 loss.adversarial_temperature 0.1543397697795337 +903 6 optimizer.lr 0.005211990505991186 +903 6 negative_sampler.num_negs_per_pos 87.0 +903 6 training.batch_size 0.0 +903 7 model.embedding_dim 0.0 +903 7 model.relation_dim 2.0 +903 7 model.scoring_fct_norm 2.0 +903 7 loss.margin 4.648253312019492 +903 7 loss.adversarial_temperature 0.9378990859144872 +903 7 optimizer.lr 0.0886308533408935 +903 7 negative_sampler.num_negs_per_pos 50.0 +903 7 training.batch_size 2.0 +903 8 model.embedding_dim 1.0 +903 8 model.relation_dim 1.0 +903 8 model.scoring_fct_norm 2.0 +903 8 loss.margin 27.012584880899908 +903 8 loss.adversarial_temperature 0.8985955347495126 +903 8 optimizer.lr 0.0271264895584901 +903 8 negative_sampler.num_negs_per_pos 32.0 +903 8 training.batch_size 2.0 +903 9 model.embedding_dim 1.0 +903 9 model.relation_dim 0.0 +903 9 model.scoring_fct_norm 2.0 +903 9 loss.margin 21.575320712657728 +903 9 loss.adversarial_temperature 0.445216055321204 +903 9 optimizer.lr 0.003953892036868951 +903 9 negative_sampler.num_negs_per_pos 58.0 +903 9 training.batch_size 0.0 +903 10 model.embedding_dim 0.0 +903 10 model.relation_dim 2.0 +903 10 model.scoring_fct_norm 2.0 +903 10 loss.margin 1.1871230598666398 +903 10 loss.adversarial_temperature 0.6494344609923985 +903 10 optimizer.lr 0.004446460322578257 +903 10 negative_sampler.num_negs_per_pos 37.0 +903 10 training.batch_size 1.0 +903 1 dataset """fb15k237""" +903 1 model """transr""" +903 1 loss """nssa""" +903 1 regularizer """no""" +903 1 optimizer """adam""" +903 1 training_loop """owa""" +903 1 negative_sampler """basic""" +903 1 evaluator """rankbased""" +903 2 dataset """fb15k237""" +903 2 model """transr""" +903 2 loss """nssa""" +903 2 regularizer """no""" +903 2 optimizer """adam""" +903 2 training_loop """owa""" +903 2 negative_sampler """basic""" +903 2 evaluator """rankbased""" +903 3 dataset """fb15k237""" +903 3 model """transr""" +903 3 loss """nssa""" +903 3 regularizer """no""" +903 3 optimizer """adam""" +903 3 training_loop """owa""" +903 3 negative_sampler """basic""" +903 3 evaluator """rankbased""" +903 4 dataset """fb15k237""" +903 4 model """transr""" +903 4 loss """nssa""" +903 4 regularizer """no""" +903 4 optimizer """adam""" +903 4 training_loop """owa""" +903 4 negative_sampler """basic""" +903 4 evaluator """rankbased""" +903 5 dataset """fb15k237""" +903 5 model """transr""" +903 5 loss """nssa""" +903 5 regularizer """no""" +903 5 optimizer """adam""" +903 5 training_loop """owa""" +903 5 negative_sampler """basic""" +903 5 evaluator """rankbased""" +903 6 dataset """fb15k237""" +903 6 model """transr""" +903 6 loss """nssa""" +903 6 regularizer """no""" +903 6 optimizer """adam""" +903 6 training_loop """owa""" +903 6 negative_sampler """basic""" +903 6 evaluator """rankbased""" +903 7 dataset """fb15k237""" +903 7 model """transr""" +903 7 loss """nssa""" +903 7 regularizer """no""" +903 7 optimizer """adam""" +903 7 training_loop """owa""" +903 7 negative_sampler """basic""" +903 7 evaluator """rankbased""" +903 8 dataset """fb15k237""" +903 8 model """transr""" +903 8 loss """nssa""" +903 8 regularizer """no""" +903 8 optimizer """adam""" +903 8 training_loop """owa""" +903 8 negative_sampler """basic""" +903 8 evaluator """rankbased""" +903 9 dataset """fb15k237""" +903 9 model """transr""" +903 9 loss """nssa""" +903 9 regularizer """no""" +903 9 optimizer """adam""" +903 9 training_loop """owa""" +903 9 negative_sampler """basic""" +903 9 evaluator """rankbased""" +903 10 dataset """fb15k237""" +903 10 model """transr""" +903 10 loss """nssa""" +903 10 regularizer """no""" +903 10 optimizer """adam""" +903 10 training_loop """owa""" +903 10 negative_sampler """basic""" +903 10 evaluator """rankbased""" +904 1 model.embedding_dim 2.0 +904 1 model.relation_dim 0.0 +904 1 model.scoring_fct_norm 1.0 +904 1 loss.margin 22.13786063903592 +904 1 loss.adversarial_temperature 0.8430515222487638 +904 1 optimizer.lr 0.0044371925376813495 +904 1 negative_sampler.num_negs_per_pos 48.0 +904 1 training.batch_size 0.0 +904 2 model.embedding_dim 0.0 +904 2 model.relation_dim 1.0 +904 2 model.scoring_fct_norm 1.0 +904 2 loss.margin 13.066923604823888 +904 2 loss.adversarial_temperature 0.6300565683466489 +904 2 optimizer.lr 0.024166374951416398 +904 2 negative_sampler.num_negs_per_pos 7.0 +904 2 training.batch_size 2.0 +904 3 model.embedding_dim 0.0 +904 3 model.relation_dim 0.0 +904 3 model.scoring_fct_norm 2.0 +904 3 loss.margin 5.316345811575512 +904 3 loss.adversarial_temperature 0.9981804552450632 +904 3 optimizer.lr 0.004266104456026454 +904 3 negative_sampler.num_negs_per_pos 33.0 +904 3 training.batch_size 1.0 +904 4 model.embedding_dim 0.0 +904 4 model.relation_dim 1.0 +904 4 model.scoring_fct_norm 1.0 +904 4 loss.margin 7.749754764120047 +904 4 loss.adversarial_temperature 0.45089981069854834 +904 4 optimizer.lr 0.005874554422318967 +904 4 negative_sampler.num_negs_per_pos 46.0 +904 4 training.batch_size 1.0 +904 5 model.embedding_dim 0.0 +904 5 model.relation_dim 0.0 +904 5 model.scoring_fct_norm 1.0 +904 5 loss.margin 26.027486237873187 +904 5 loss.adversarial_temperature 0.22538151602215648 +904 5 optimizer.lr 0.012693839685893162 +904 5 negative_sampler.num_negs_per_pos 81.0 +904 5 training.batch_size 2.0 +904 6 model.embedding_dim 0.0 +904 6 model.relation_dim 0.0 +904 6 model.scoring_fct_norm 2.0 +904 6 loss.margin 12.219358035840099 +904 6 loss.adversarial_temperature 0.44367123995936897 +904 6 optimizer.lr 0.08177729356383324 +904 6 negative_sampler.num_negs_per_pos 27.0 +904 6 training.batch_size 2.0 +904 7 model.embedding_dim 1.0 +904 7 model.relation_dim 0.0 +904 7 model.scoring_fct_norm 1.0 +904 7 loss.margin 27.38026045347156 +904 7 loss.adversarial_temperature 0.1660299961761221 +904 7 optimizer.lr 0.01141640599921431 +904 7 negative_sampler.num_negs_per_pos 29.0 +904 7 training.batch_size 0.0 +904 8 model.embedding_dim 1.0 +904 8 model.relation_dim 2.0 +904 8 model.scoring_fct_norm 2.0 +904 8 loss.margin 19.5088358084742 +904 8 loss.adversarial_temperature 0.45279445066419455 +904 8 optimizer.lr 0.0012392574738857146 +904 8 negative_sampler.num_negs_per_pos 35.0 +904 8 training.batch_size 2.0 +904 9 model.embedding_dim 0.0 +904 9 model.relation_dim 2.0 +904 9 model.scoring_fct_norm 1.0 +904 9 loss.margin 9.807214280254126 +904 9 loss.adversarial_temperature 0.9264906999854271 +904 9 optimizer.lr 0.0013692263867681872 +904 9 negative_sampler.num_negs_per_pos 60.0 +904 9 training.batch_size 1.0 +904 10 model.embedding_dim 0.0 +904 10 model.relation_dim 1.0 +904 10 model.scoring_fct_norm 2.0 +904 10 loss.margin 9.79115989222903 +904 10 loss.adversarial_temperature 0.2858251024059052 +904 10 optimizer.lr 0.004319295384048256 +904 10 negative_sampler.num_negs_per_pos 95.0 +904 10 training.batch_size 0.0 +904 11 model.embedding_dim 2.0 +904 11 model.relation_dim 2.0 +904 11 model.scoring_fct_norm 2.0 +904 11 loss.margin 15.889920162205353 +904 11 loss.adversarial_temperature 0.9255400751590258 +904 11 optimizer.lr 0.01002438813721179 +904 11 negative_sampler.num_negs_per_pos 87.0 +904 11 training.batch_size 0.0 +904 12 model.embedding_dim 0.0 +904 12 model.relation_dim 1.0 +904 12 model.scoring_fct_norm 1.0 +904 12 loss.margin 6.863826128175948 +904 12 loss.adversarial_temperature 0.7894239682868426 +904 12 optimizer.lr 0.015253377634784427 +904 12 negative_sampler.num_negs_per_pos 5.0 +904 12 training.batch_size 0.0 +904 13 model.embedding_dim 2.0 +904 13 model.relation_dim 0.0 +904 13 model.scoring_fct_norm 2.0 +904 13 loss.margin 15.191161314238176 +904 13 loss.adversarial_temperature 0.9702787630789748 +904 13 optimizer.lr 0.019038513065845433 +904 13 negative_sampler.num_negs_per_pos 73.0 +904 13 training.batch_size 1.0 +904 14 model.embedding_dim 2.0 +904 14 model.relation_dim 1.0 +904 14 model.scoring_fct_norm 2.0 +904 14 loss.margin 23.5098628664445 +904 14 loss.adversarial_temperature 0.5214133769900552 +904 14 optimizer.lr 0.0018600074894238553 +904 14 negative_sampler.num_negs_per_pos 74.0 +904 14 training.batch_size 1.0 +904 15 model.embedding_dim 2.0 +904 15 model.relation_dim 1.0 +904 15 model.scoring_fct_norm 1.0 +904 15 loss.margin 13.219013653372397 +904 15 loss.adversarial_temperature 0.4342701569676709 +904 15 optimizer.lr 0.03128043197430663 +904 15 negative_sampler.num_negs_per_pos 23.0 +904 15 training.batch_size 1.0 +904 16 model.embedding_dim 0.0 +904 16 model.relation_dim 0.0 +904 16 model.scoring_fct_norm 2.0 +904 16 loss.margin 24.934525806306073 +904 16 loss.adversarial_temperature 0.3129812549941091 +904 16 optimizer.lr 0.050682052044770505 +904 16 negative_sampler.num_negs_per_pos 53.0 +904 16 training.batch_size 0.0 +904 17 model.embedding_dim 0.0 +904 17 model.relation_dim 0.0 +904 17 model.scoring_fct_norm 1.0 +904 17 loss.margin 8.485695921258905 +904 17 loss.adversarial_temperature 0.9548501617457423 +904 17 optimizer.lr 0.026860710257810554 +904 17 negative_sampler.num_negs_per_pos 27.0 +904 17 training.batch_size 1.0 +904 18 model.embedding_dim 0.0 +904 18 model.relation_dim 1.0 +904 18 model.scoring_fct_norm 2.0 +904 18 loss.margin 2.03893931364071 +904 18 loss.adversarial_temperature 0.5875257150622644 +904 18 optimizer.lr 0.06728480297071426 +904 18 negative_sampler.num_negs_per_pos 75.0 +904 18 training.batch_size 2.0 +904 19 model.embedding_dim 2.0 +904 19 model.relation_dim 1.0 +904 19 model.scoring_fct_norm 1.0 +904 19 loss.margin 13.666655373852995 +904 19 loss.adversarial_temperature 0.2036333149347978 +904 19 optimizer.lr 0.0019889675782671025 +904 19 negative_sampler.num_negs_per_pos 1.0 +904 19 training.batch_size 2.0 +904 20 model.embedding_dim 1.0 +904 20 model.relation_dim 2.0 +904 20 model.scoring_fct_norm 1.0 +904 20 loss.margin 21.467887239022303 +904 20 loss.adversarial_temperature 0.8937884890347962 +904 20 optimizer.lr 0.01014293127562274 +904 20 negative_sampler.num_negs_per_pos 2.0 +904 20 training.batch_size 1.0 +904 21 model.embedding_dim 1.0 +904 21 model.relation_dim 0.0 +904 21 model.scoring_fct_norm 1.0 +904 21 loss.margin 6.32005631893689 +904 21 loss.adversarial_temperature 0.3209121968174801 +904 21 optimizer.lr 0.011078128098510834 +904 21 negative_sampler.num_negs_per_pos 2.0 +904 21 training.batch_size 1.0 +904 22 model.embedding_dim 1.0 +904 22 model.relation_dim 1.0 +904 22 model.scoring_fct_norm 1.0 +904 22 loss.margin 4.4944984147875315 +904 22 loss.adversarial_temperature 0.31207237403846355 +904 22 optimizer.lr 0.0018476774775339705 +904 22 negative_sampler.num_negs_per_pos 44.0 +904 22 training.batch_size 1.0 +904 23 model.embedding_dim 2.0 +904 23 model.relation_dim 2.0 +904 23 model.scoring_fct_norm 1.0 +904 23 loss.margin 12.511275972638327 +904 23 loss.adversarial_temperature 0.4251883980730572 +904 23 optimizer.lr 0.019442484276977336 +904 23 negative_sampler.num_negs_per_pos 30.0 +904 23 training.batch_size 0.0 +904 1 dataset """fb15k237""" +904 1 model """transr""" +904 1 loss """nssa""" +904 1 regularizer """no""" +904 1 optimizer """adam""" +904 1 training_loop """owa""" +904 1 negative_sampler """basic""" +904 1 evaluator """rankbased""" +904 2 dataset """fb15k237""" +904 2 model """transr""" +904 2 loss """nssa""" +904 2 regularizer """no""" +904 2 optimizer """adam""" +904 2 training_loop """owa""" +904 2 negative_sampler """basic""" +904 2 evaluator """rankbased""" +904 3 dataset """fb15k237""" +904 3 model """transr""" +904 3 loss """nssa""" +904 3 regularizer """no""" +904 3 optimizer """adam""" +904 3 training_loop """owa""" +904 3 negative_sampler """basic""" +904 3 evaluator """rankbased""" +904 4 dataset """fb15k237""" +904 4 model """transr""" +904 4 loss """nssa""" +904 4 regularizer """no""" +904 4 optimizer """adam""" +904 4 training_loop """owa""" +904 4 negative_sampler """basic""" +904 4 evaluator """rankbased""" +904 5 dataset """fb15k237""" +904 5 model """transr""" +904 5 loss """nssa""" +904 5 regularizer """no""" +904 5 optimizer """adam""" +904 5 training_loop """owa""" +904 5 negative_sampler """basic""" +904 5 evaluator """rankbased""" +904 6 dataset """fb15k237""" +904 6 model """transr""" +904 6 loss """nssa""" +904 6 regularizer """no""" +904 6 optimizer """adam""" +904 6 training_loop """owa""" +904 6 negative_sampler """basic""" +904 6 evaluator """rankbased""" +904 7 dataset """fb15k237""" +904 7 model """transr""" +904 7 loss """nssa""" +904 7 regularizer """no""" +904 7 optimizer """adam""" +904 7 training_loop """owa""" +904 7 negative_sampler """basic""" +904 7 evaluator """rankbased""" +904 8 dataset """fb15k237""" +904 8 model """transr""" +904 8 loss """nssa""" +904 8 regularizer """no""" +904 8 optimizer """adam""" +904 8 training_loop """owa""" +904 8 negative_sampler """basic""" +904 8 evaluator """rankbased""" +904 9 dataset """fb15k237""" +904 9 model """transr""" +904 9 loss """nssa""" +904 9 regularizer """no""" +904 9 optimizer """adam""" +904 9 training_loop """owa""" +904 9 negative_sampler """basic""" +904 9 evaluator """rankbased""" +904 10 dataset """fb15k237""" +904 10 model """transr""" +904 10 loss """nssa""" +904 10 regularizer """no""" +904 10 optimizer """adam""" +904 10 training_loop """owa""" +904 10 negative_sampler """basic""" +904 10 evaluator """rankbased""" +904 11 dataset """fb15k237""" +904 11 model """transr""" +904 11 loss """nssa""" +904 11 regularizer """no""" +904 11 optimizer """adam""" +904 11 training_loop """owa""" +904 11 negative_sampler """basic""" +904 11 evaluator """rankbased""" +904 12 dataset """fb15k237""" +904 12 model """transr""" +904 12 loss """nssa""" +904 12 regularizer """no""" +904 12 optimizer """adam""" +904 12 training_loop """owa""" +904 12 negative_sampler """basic""" +904 12 evaluator """rankbased""" +904 13 dataset """fb15k237""" +904 13 model """transr""" +904 13 loss """nssa""" +904 13 regularizer """no""" +904 13 optimizer """adam""" +904 13 training_loop """owa""" +904 13 negative_sampler """basic""" +904 13 evaluator """rankbased""" +904 14 dataset """fb15k237""" +904 14 model """transr""" +904 14 loss """nssa""" +904 14 regularizer """no""" +904 14 optimizer """adam""" +904 14 training_loop """owa""" +904 14 negative_sampler """basic""" +904 14 evaluator """rankbased""" +904 15 dataset """fb15k237""" +904 15 model """transr""" +904 15 loss """nssa""" +904 15 regularizer """no""" +904 15 optimizer """adam""" +904 15 training_loop """owa""" +904 15 negative_sampler """basic""" +904 15 evaluator """rankbased""" +904 16 dataset """fb15k237""" +904 16 model """transr""" +904 16 loss """nssa""" +904 16 regularizer """no""" +904 16 optimizer """adam""" +904 16 training_loop """owa""" +904 16 negative_sampler """basic""" +904 16 evaluator """rankbased""" +904 17 dataset """fb15k237""" +904 17 model """transr""" +904 17 loss """nssa""" +904 17 regularizer """no""" +904 17 optimizer """adam""" +904 17 training_loop """owa""" +904 17 negative_sampler """basic""" +904 17 evaluator """rankbased""" +904 18 dataset """fb15k237""" +904 18 model """transr""" +904 18 loss """nssa""" +904 18 regularizer """no""" +904 18 optimizer """adam""" +904 18 training_loop """owa""" +904 18 negative_sampler """basic""" +904 18 evaluator """rankbased""" +904 19 dataset """fb15k237""" +904 19 model """transr""" +904 19 loss """nssa""" +904 19 regularizer """no""" +904 19 optimizer """adam""" +904 19 training_loop """owa""" +904 19 negative_sampler """basic""" +904 19 evaluator """rankbased""" +904 20 dataset """fb15k237""" +904 20 model """transr""" +904 20 loss """nssa""" +904 20 regularizer """no""" +904 20 optimizer """adam""" +904 20 training_loop """owa""" +904 20 negative_sampler """basic""" +904 20 evaluator """rankbased""" +904 21 dataset """fb15k237""" +904 21 model """transr""" +904 21 loss """nssa""" +904 21 regularizer """no""" +904 21 optimizer """adam""" +904 21 training_loop """owa""" +904 21 negative_sampler """basic""" +904 21 evaluator """rankbased""" +904 22 dataset """fb15k237""" +904 22 model """transr""" +904 22 loss """nssa""" +904 22 regularizer """no""" +904 22 optimizer """adam""" +904 22 training_loop """owa""" +904 22 negative_sampler """basic""" +904 22 evaluator """rankbased""" +904 23 dataset """fb15k237""" +904 23 model """transr""" +904 23 loss """nssa""" +904 23 regularizer """no""" +904 23 optimizer """adam""" +904 23 training_loop """owa""" +904 23 negative_sampler """basic""" +904 23 evaluator """rankbased""" +905 1 model.embedding_dim 2.0 +905 1 model.relation_dim 0.0 +905 1 model.scoring_fct_norm 1.0 +905 1 optimizer.lr 0.014889755506599689 +905 1 training.batch_size 0.0 +905 1 training.label_smoothing 0.02382796563193206 +905 2 model.embedding_dim 2.0 +905 2 model.relation_dim 1.0 +905 2 model.scoring_fct_norm 2.0 +905 2 optimizer.lr 0.0010282206239580192 +905 2 training.batch_size 2.0 +905 2 training.label_smoothing 0.2852011516870578 +905 3 model.embedding_dim 0.0 +905 3 model.relation_dim 1.0 +905 3 model.scoring_fct_norm 2.0 +905 3 optimizer.lr 0.028114308469016518 +905 3 training.batch_size 2.0 +905 3 training.label_smoothing 0.0024063952990399833 +905 4 model.embedding_dim 0.0 +905 4 model.relation_dim 0.0 +905 4 model.scoring_fct_norm 2.0 +905 4 optimizer.lr 0.005673660555911838 +905 4 training.batch_size 1.0 +905 4 training.label_smoothing 0.027388394864329178 +905 5 model.embedding_dim 1.0 +905 5 model.relation_dim 1.0 +905 5 model.scoring_fct_norm 2.0 +905 5 optimizer.lr 0.0010022976690875474 +905 5 training.batch_size 1.0 +905 5 training.label_smoothing 0.5939314131042038 +905 6 model.embedding_dim 0.0 +905 6 model.relation_dim 2.0 +905 6 model.scoring_fct_norm 1.0 +905 6 optimizer.lr 0.040423464552392124 +905 6 training.batch_size 1.0 +905 6 training.label_smoothing 0.014478309785659147 +905 1 dataset """fb15k237""" +905 1 model """transr""" +905 1 loss """crossentropy""" +905 1 regularizer """no""" +905 1 optimizer """adam""" +905 1 training_loop """lcwa""" +905 1 evaluator """rankbased""" +905 2 dataset """fb15k237""" +905 2 model """transr""" +905 2 loss """crossentropy""" +905 2 regularizer """no""" +905 2 optimizer """adam""" +905 2 training_loop """lcwa""" +905 2 evaluator """rankbased""" +905 3 dataset """fb15k237""" +905 3 model """transr""" +905 3 loss """crossentropy""" +905 3 regularizer """no""" +905 3 optimizer """adam""" +905 3 training_loop """lcwa""" +905 3 evaluator """rankbased""" +905 4 dataset """fb15k237""" +905 4 model """transr""" +905 4 loss """crossentropy""" +905 4 regularizer """no""" +905 4 optimizer """adam""" +905 4 training_loop """lcwa""" +905 4 evaluator """rankbased""" +905 5 dataset """fb15k237""" +905 5 model """transr""" +905 5 loss """crossentropy""" +905 5 regularizer """no""" +905 5 optimizer """adam""" +905 5 training_loop """lcwa""" +905 5 evaluator """rankbased""" +905 6 dataset """fb15k237""" +905 6 model """transr""" +905 6 loss """crossentropy""" +905 6 regularizer """no""" +905 6 optimizer """adam""" +905 6 training_loop """lcwa""" +905 6 evaluator """rankbased""" +906 1 model.embedding_dim 0.0 +906 1 model.relation_dim 0.0 +906 1 model.scoring_fct_norm 2.0 +906 1 optimizer.lr 0.013587279111922668 +906 1 training.batch_size 1.0 +906 1 training.label_smoothing 0.4652851630945645 +906 2 model.embedding_dim 2.0 +906 2 model.relation_dim 2.0 +906 2 model.scoring_fct_norm 2.0 +906 2 optimizer.lr 0.09479036162169531 +906 2 training.batch_size 0.0 +906 2 training.label_smoothing 0.020587862268307142 +906 3 model.embedding_dim 1.0 +906 3 model.relation_dim 2.0 +906 3 model.scoring_fct_norm 2.0 +906 3 optimizer.lr 0.009381587716440104 +906 3 training.batch_size 1.0 +906 3 training.label_smoothing 0.26876810134578 +906 4 model.embedding_dim 1.0 +906 4 model.relation_dim 0.0 +906 4 model.scoring_fct_norm 2.0 +906 4 optimizer.lr 0.006224677507003252 +906 4 training.batch_size 2.0 +906 4 training.label_smoothing 0.002733303826374393 +906 5 model.embedding_dim 1.0 +906 5 model.relation_dim 0.0 +906 5 model.scoring_fct_norm 1.0 +906 5 optimizer.lr 0.0010357638319123045 +906 5 training.batch_size 1.0 +906 5 training.label_smoothing 0.12511019251146177 +906 6 model.embedding_dim 2.0 +906 6 model.relation_dim 2.0 +906 6 model.scoring_fct_norm 1.0 +906 6 optimizer.lr 0.033166759605949635 +906 6 training.batch_size 2.0 +906 6 training.label_smoothing 0.1702322971657926 +906 1 dataset """fb15k237""" +906 1 model """transr""" +906 1 loss """crossentropy""" +906 1 regularizer """no""" +906 1 optimizer """adam""" +906 1 training_loop """lcwa""" +906 1 evaluator """rankbased""" +906 2 dataset """fb15k237""" +906 2 model """transr""" +906 2 loss """crossentropy""" +906 2 regularizer """no""" +906 2 optimizer """adam""" +906 2 training_loop """lcwa""" +906 2 evaluator """rankbased""" +906 3 dataset """fb15k237""" +906 3 model """transr""" +906 3 loss """crossentropy""" +906 3 regularizer """no""" +906 3 optimizer """adam""" +906 3 training_loop """lcwa""" +906 3 evaluator """rankbased""" +906 4 dataset """fb15k237""" +906 4 model """transr""" +906 4 loss """crossentropy""" +906 4 regularizer """no""" +906 4 optimizer """adam""" +906 4 training_loop """lcwa""" +906 4 evaluator """rankbased""" +906 5 dataset """fb15k237""" +906 5 model """transr""" +906 5 loss """crossentropy""" +906 5 regularizer """no""" +906 5 optimizer """adam""" +906 5 training_loop """lcwa""" +906 5 evaluator """rankbased""" +906 6 dataset """fb15k237""" +906 6 model """transr""" +906 6 loss """crossentropy""" +906 6 regularizer """no""" +906 6 optimizer """adam""" +906 6 training_loop """lcwa""" +906 6 evaluator """rankbased""" +907 1 model.embedding_dim 0.0 +907 1 model.relation_dim 1.0 +907 1 model.scoring_fct_norm 1.0 +907 1 training.batch_size 0.0 +907 1 training.label_smoothing 0.00462340352329181 +907 2 model.embedding_dim 1.0 +907 2 model.relation_dim 1.0 +907 2 model.scoring_fct_norm 2.0 +907 2 training.batch_size 2.0 +907 2 training.label_smoothing 0.008695736214156244 +907 3 model.embedding_dim 1.0 +907 3 model.relation_dim 0.0 +907 3 model.scoring_fct_norm 1.0 +907 3 training.batch_size 2.0 +907 3 training.label_smoothing 0.0011335722411101356 +907 4 model.embedding_dim 0.0 +907 4 model.relation_dim 1.0 +907 4 model.scoring_fct_norm 1.0 +907 4 training.batch_size 2.0 +907 4 training.label_smoothing 0.0603172810585109 +907 5 model.embedding_dim 1.0 +907 5 model.relation_dim 0.0 +907 5 model.scoring_fct_norm 2.0 +907 5 training.batch_size 2.0 +907 5 training.label_smoothing 0.0064770775140891145 +907 6 model.embedding_dim 2.0 +907 6 model.relation_dim 1.0 +907 6 model.scoring_fct_norm 2.0 +907 6 training.batch_size 2.0 +907 6 training.label_smoothing 0.0023401054481108634 +907 7 model.embedding_dim 2.0 +907 7 model.relation_dim 1.0 +907 7 model.scoring_fct_norm 1.0 +907 7 training.batch_size 0.0 +907 7 training.label_smoothing 0.3109621249184272 +907 8 model.embedding_dim 0.0 +907 8 model.relation_dim 2.0 +907 8 model.scoring_fct_norm 1.0 +907 8 training.batch_size 2.0 +907 8 training.label_smoothing 0.26095542373078895 +907 9 model.embedding_dim 1.0 +907 9 model.relation_dim 2.0 +907 9 model.scoring_fct_norm 2.0 +907 9 training.batch_size 2.0 +907 9 training.label_smoothing 0.09069099660181723 +907 10 model.embedding_dim 0.0 +907 10 model.relation_dim 0.0 +907 10 model.scoring_fct_norm 1.0 +907 10 training.batch_size 1.0 +907 10 training.label_smoothing 0.001043424020385775 +907 11 model.embedding_dim 2.0 +907 11 model.relation_dim 0.0 +907 11 model.scoring_fct_norm 1.0 +907 11 training.batch_size 0.0 +907 11 training.label_smoothing 0.7837670848387959 +907 12 model.embedding_dim 2.0 +907 12 model.relation_dim 1.0 +907 12 model.scoring_fct_norm 1.0 +907 12 training.batch_size 2.0 +907 12 training.label_smoothing 0.20367569142619477 +907 13 model.embedding_dim 0.0 +907 13 model.relation_dim 0.0 +907 13 model.scoring_fct_norm 1.0 +907 13 training.batch_size 0.0 +907 13 training.label_smoothing 0.0019174012479889965 +907 14 model.embedding_dim 2.0 +907 14 model.relation_dim 0.0 +907 14 model.scoring_fct_norm 2.0 +907 14 training.batch_size 2.0 +907 14 training.label_smoothing 0.9635546797882241 +907 15 model.embedding_dim 2.0 +907 15 model.relation_dim 1.0 +907 15 model.scoring_fct_norm 1.0 +907 15 training.batch_size 1.0 +907 15 training.label_smoothing 0.002785770067697414 +907 16 model.embedding_dim 0.0 +907 16 model.relation_dim 0.0 +907 16 model.scoring_fct_norm 2.0 +907 16 training.batch_size 1.0 +907 16 training.label_smoothing 0.0074483742465274935 +907 17 model.embedding_dim 1.0 +907 17 model.relation_dim 0.0 +907 17 model.scoring_fct_norm 1.0 +907 17 training.batch_size 1.0 +907 17 training.label_smoothing 0.041884831673677694 +907 18 model.embedding_dim 2.0 +907 18 model.relation_dim 2.0 +907 18 model.scoring_fct_norm 2.0 +907 18 training.batch_size 0.0 +907 18 training.label_smoothing 0.1088408811235447 +907 19 model.embedding_dim 2.0 +907 19 model.relation_dim 2.0 +907 19 model.scoring_fct_norm 1.0 +907 19 training.batch_size 0.0 +907 19 training.label_smoothing 0.4654959774704093 +907 20 model.embedding_dim 0.0 +907 20 model.relation_dim 2.0 +907 20 model.scoring_fct_norm 2.0 +907 20 training.batch_size 0.0 +907 20 training.label_smoothing 0.021313940153106517 +907 21 model.embedding_dim 2.0 +907 21 model.relation_dim 0.0 +907 21 model.scoring_fct_norm 2.0 +907 21 training.batch_size 1.0 +907 21 training.label_smoothing 0.025496799067138594 +907 22 model.embedding_dim 1.0 +907 22 model.relation_dim 2.0 +907 22 model.scoring_fct_norm 1.0 +907 22 training.batch_size 0.0 +907 22 training.label_smoothing 0.0017191466471128637 +907 23 model.embedding_dim 2.0 +907 23 model.relation_dim 0.0 +907 23 model.scoring_fct_norm 1.0 +907 23 training.batch_size 1.0 +907 23 training.label_smoothing 0.004070918606803553 +907 24 model.embedding_dim 1.0 +907 24 model.relation_dim 0.0 +907 24 model.scoring_fct_norm 1.0 +907 24 training.batch_size 1.0 +907 24 training.label_smoothing 0.024015377151986927 +907 25 model.embedding_dim 1.0 +907 25 model.relation_dim 0.0 +907 25 model.scoring_fct_norm 2.0 +907 25 training.batch_size 2.0 +907 25 training.label_smoothing 0.01062130287591121 +907 26 model.embedding_dim 2.0 +907 26 model.relation_dim 0.0 +907 26 model.scoring_fct_norm 1.0 +907 26 training.batch_size 1.0 +907 26 training.label_smoothing 0.003740967821996382 +907 27 model.embedding_dim 2.0 +907 27 model.relation_dim 0.0 +907 27 model.scoring_fct_norm 2.0 +907 27 training.batch_size 2.0 +907 27 training.label_smoothing 0.02304029191492515 +907 28 model.embedding_dim 0.0 +907 28 model.relation_dim 1.0 +907 28 model.scoring_fct_norm 2.0 +907 28 training.batch_size 1.0 +907 28 training.label_smoothing 0.0031277033932516885 +907 29 model.embedding_dim 0.0 +907 29 model.relation_dim 0.0 +907 29 model.scoring_fct_norm 2.0 +907 29 training.batch_size 0.0 +907 29 training.label_smoothing 0.0032177121423089066 +907 30 model.embedding_dim 0.0 +907 30 model.relation_dim 1.0 +907 30 model.scoring_fct_norm 2.0 +907 30 training.batch_size 0.0 +907 30 training.label_smoothing 0.1604766058418151 +907 31 model.embedding_dim 1.0 +907 31 model.relation_dim 1.0 +907 31 model.scoring_fct_norm 1.0 +907 31 training.batch_size 2.0 +907 31 training.label_smoothing 0.0036697032800408662 +907 32 model.embedding_dim 1.0 +907 32 model.relation_dim 1.0 +907 32 model.scoring_fct_norm 2.0 +907 32 training.batch_size 0.0 +907 32 training.label_smoothing 0.00956786164754305 +907 33 model.embedding_dim 0.0 +907 33 model.relation_dim 1.0 +907 33 model.scoring_fct_norm 2.0 +907 33 training.batch_size 2.0 +907 33 training.label_smoothing 0.028058494053018506 +907 34 model.embedding_dim 1.0 +907 34 model.relation_dim 1.0 +907 34 model.scoring_fct_norm 2.0 +907 34 training.batch_size 1.0 +907 34 training.label_smoothing 0.18165207244108927 +907 35 model.embedding_dim 0.0 +907 35 model.relation_dim 0.0 +907 35 model.scoring_fct_norm 1.0 +907 35 training.batch_size 2.0 +907 35 training.label_smoothing 0.0015918685138175772 +907 36 model.embedding_dim 1.0 +907 36 model.relation_dim 0.0 +907 36 model.scoring_fct_norm 1.0 +907 36 training.batch_size 1.0 +907 36 training.label_smoothing 0.24655086723596892 +907 37 model.embedding_dim 1.0 +907 37 model.relation_dim 0.0 +907 37 model.scoring_fct_norm 2.0 +907 37 training.batch_size 0.0 +907 37 training.label_smoothing 0.6694039866967155 +907 38 model.embedding_dim 2.0 +907 38 model.relation_dim 0.0 +907 38 model.scoring_fct_norm 1.0 +907 38 training.batch_size 1.0 +907 38 training.label_smoothing 0.09853493909187595 +907 39 model.embedding_dim 0.0 +907 39 model.relation_dim 1.0 +907 39 model.scoring_fct_norm 1.0 +907 39 training.batch_size 0.0 +907 39 training.label_smoothing 0.00434119086669447 +907 40 model.embedding_dim 1.0 +907 40 model.relation_dim 0.0 +907 40 model.scoring_fct_norm 2.0 +907 40 training.batch_size 2.0 +907 40 training.label_smoothing 0.014278751244808043 +907 41 model.embedding_dim 0.0 +907 41 model.relation_dim 0.0 +907 41 model.scoring_fct_norm 2.0 +907 41 training.batch_size 0.0 +907 41 training.label_smoothing 0.007989111136403814 +907 42 model.embedding_dim 2.0 +907 42 model.relation_dim 2.0 +907 42 model.scoring_fct_norm 2.0 +907 42 training.batch_size 0.0 +907 42 training.label_smoothing 0.002101700454880127 +907 43 model.embedding_dim 2.0 +907 43 model.relation_dim 2.0 +907 43 model.scoring_fct_norm 2.0 +907 43 training.batch_size 0.0 +907 43 training.label_smoothing 0.004971160374947164 +907 44 model.embedding_dim 1.0 +907 44 model.relation_dim 2.0 +907 44 model.scoring_fct_norm 1.0 +907 44 training.batch_size 1.0 +907 44 training.label_smoothing 0.004035771895911797 +907 45 model.embedding_dim 0.0 +907 45 model.relation_dim 1.0 +907 45 model.scoring_fct_norm 2.0 +907 45 training.batch_size 1.0 +907 45 training.label_smoothing 0.7131915842338362 +907 46 model.embedding_dim 2.0 +907 46 model.relation_dim 0.0 +907 46 model.scoring_fct_norm 1.0 +907 46 training.batch_size 2.0 +907 46 training.label_smoothing 0.004127828738512486 +907 47 model.embedding_dim 2.0 +907 47 model.relation_dim 0.0 +907 47 model.scoring_fct_norm 1.0 +907 47 training.batch_size 1.0 +907 47 training.label_smoothing 0.00390628374755433 +907 48 model.embedding_dim 1.0 +907 48 model.relation_dim 2.0 +907 48 model.scoring_fct_norm 2.0 +907 48 training.batch_size 0.0 +907 48 training.label_smoothing 0.3842166616929387 +907 49 model.embedding_dim 1.0 +907 49 model.relation_dim 2.0 +907 49 model.scoring_fct_norm 1.0 +907 49 training.batch_size 1.0 +907 49 training.label_smoothing 0.032817411035887846 +907 50 model.embedding_dim 1.0 +907 50 model.relation_dim 0.0 +907 50 model.scoring_fct_norm 1.0 +907 50 training.batch_size 0.0 +907 50 training.label_smoothing 0.044812289401361914 +907 51 model.embedding_dim 1.0 +907 51 model.relation_dim 1.0 +907 51 model.scoring_fct_norm 2.0 +907 51 training.batch_size 2.0 +907 51 training.label_smoothing 0.6570150322541078 +907 52 model.embedding_dim 1.0 +907 52 model.relation_dim 1.0 +907 52 model.scoring_fct_norm 1.0 +907 52 training.batch_size 2.0 +907 52 training.label_smoothing 0.03994893216497685 +907 53 model.embedding_dim 0.0 +907 53 model.relation_dim 1.0 +907 53 model.scoring_fct_norm 1.0 +907 53 training.batch_size 0.0 +907 53 training.label_smoothing 0.0059899613145835825 +907 54 model.embedding_dim 2.0 +907 54 model.relation_dim 0.0 +907 54 model.scoring_fct_norm 2.0 +907 54 training.batch_size 1.0 +907 54 training.label_smoothing 0.0016052858254707902 +907 55 model.embedding_dim 1.0 +907 55 model.relation_dim 1.0 +907 55 model.scoring_fct_norm 1.0 +907 55 training.batch_size 1.0 +907 55 training.label_smoothing 0.012560604832403472 +907 56 model.embedding_dim 1.0 +907 56 model.relation_dim 0.0 +907 56 model.scoring_fct_norm 2.0 +907 56 training.batch_size 1.0 +907 56 training.label_smoothing 0.06556873318433613 +907 57 model.embedding_dim 1.0 +907 57 model.relation_dim 0.0 +907 57 model.scoring_fct_norm 1.0 +907 57 training.batch_size 2.0 +907 57 training.label_smoothing 0.3388509451246858 +907 58 model.embedding_dim 1.0 +907 58 model.relation_dim 2.0 +907 58 model.scoring_fct_norm 1.0 +907 58 training.batch_size 2.0 +907 58 training.label_smoothing 0.0029144241468400424 +907 59 model.embedding_dim 1.0 +907 59 model.relation_dim 0.0 +907 59 model.scoring_fct_norm 2.0 +907 59 training.batch_size 0.0 +907 59 training.label_smoothing 0.022621130089661896 +907 60 model.embedding_dim 0.0 +907 60 model.relation_dim 1.0 +907 60 model.scoring_fct_norm 2.0 +907 60 training.batch_size 2.0 +907 60 training.label_smoothing 0.5196217019400728 +907 61 model.embedding_dim 0.0 +907 61 model.relation_dim 1.0 +907 61 model.scoring_fct_norm 2.0 +907 61 training.batch_size 0.0 +907 61 training.label_smoothing 0.31499111551635756 +907 62 model.embedding_dim 0.0 +907 62 model.relation_dim 2.0 +907 62 model.scoring_fct_norm 2.0 +907 62 training.batch_size 1.0 +907 62 training.label_smoothing 0.06989662378618265 +907 63 model.embedding_dim 1.0 +907 63 model.relation_dim 1.0 +907 63 model.scoring_fct_norm 1.0 +907 63 training.batch_size 0.0 +907 63 training.label_smoothing 0.0012875540713032745 +907 64 model.embedding_dim 1.0 +907 64 model.relation_dim 0.0 +907 64 model.scoring_fct_norm 1.0 +907 64 training.batch_size 0.0 +907 64 training.label_smoothing 0.012001629483692093 +907 65 model.embedding_dim 2.0 +907 65 model.relation_dim 1.0 +907 65 model.scoring_fct_norm 1.0 +907 65 training.batch_size 1.0 +907 65 training.label_smoothing 0.01956053469728298 +907 66 model.embedding_dim 0.0 +907 66 model.relation_dim 1.0 +907 66 model.scoring_fct_norm 1.0 +907 66 training.batch_size 2.0 +907 66 training.label_smoothing 0.0020814901613090773 +907 67 model.embedding_dim 2.0 +907 67 model.relation_dim 1.0 +907 67 model.scoring_fct_norm 1.0 +907 67 training.batch_size 0.0 +907 67 training.label_smoothing 0.3283242735646568 +907 68 model.embedding_dim 1.0 +907 68 model.relation_dim 0.0 +907 68 model.scoring_fct_norm 2.0 +907 68 training.batch_size 2.0 +907 68 training.label_smoothing 0.0023302752608019553 +907 69 model.embedding_dim 0.0 +907 69 model.relation_dim 0.0 +907 69 model.scoring_fct_norm 2.0 +907 69 training.batch_size 0.0 +907 69 training.label_smoothing 0.02062964973073309 +907 70 model.embedding_dim 0.0 +907 70 model.relation_dim 0.0 +907 70 model.scoring_fct_norm 2.0 +907 70 training.batch_size 2.0 +907 70 training.label_smoothing 0.001582852128170633 +907 71 model.embedding_dim 0.0 +907 71 model.relation_dim 0.0 +907 71 model.scoring_fct_norm 2.0 +907 71 training.batch_size 1.0 +907 71 training.label_smoothing 0.3885640319245559 +907 72 model.embedding_dim 1.0 +907 72 model.relation_dim 1.0 +907 72 model.scoring_fct_norm 1.0 +907 72 training.batch_size 1.0 +907 72 training.label_smoothing 0.005088711317443308 +907 73 model.embedding_dim 0.0 +907 73 model.relation_dim 0.0 +907 73 model.scoring_fct_norm 1.0 +907 73 training.batch_size 2.0 +907 73 training.label_smoothing 0.29622908974539947 +907 74 model.embedding_dim 0.0 +907 74 model.relation_dim 1.0 +907 74 model.scoring_fct_norm 2.0 +907 74 training.batch_size 0.0 +907 74 training.label_smoothing 0.0396070238828105 +907 75 model.embedding_dim 1.0 +907 75 model.relation_dim 0.0 +907 75 model.scoring_fct_norm 2.0 +907 75 training.batch_size 2.0 +907 75 training.label_smoothing 0.11617079783108337 +907 76 model.embedding_dim 2.0 +907 76 model.relation_dim 0.0 +907 76 model.scoring_fct_norm 2.0 +907 76 training.batch_size 1.0 +907 76 training.label_smoothing 0.002609009661905372 +907 77 model.embedding_dim 2.0 +907 77 model.relation_dim 1.0 +907 77 model.scoring_fct_norm 2.0 +907 77 training.batch_size 0.0 +907 77 training.label_smoothing 0.022015779288974014 +907 78 model.embedding_dim 0.0 +907 78 model.relation_dim 1.0 +907 78 model.scoring_fct_norm 2.0 +907 78 training.batch_size 2.0 +907 78 training.label_smoothing 0.008488815850592631 +907 79 model.embedding_dim 0.0 +907 79 model.relation_dim 2.0 +907 79 model.scoring_fct_norm 1.0 +907 79 training.batch_size 2.0 +907 79 training.label_smoothing 0.09007561289874709 +907 80 model.embedding_dim 2.0 +907 80 model.relation_dim 2.0 +907 80 model.scoring_fct_norm 2.0 +907 80 training.batch_size 0.0 +907 80 training.label_smoothing 0.007634861442560179 +907 81 model.embedding_dim 2.0 +907 81 model.relation_dim 2.0 +907 81 model.scoring_fct_norm 1.0 +907 81 training.batch_size 0.0 +907 81 training.label_smoothing 0.014931666012049835 +907 82 model.embedding_dim 1.0 +907 82 model.relation_dim 0.0 +907 82 model.scoring_fct_norm 2.0 +907 82 training.batch_size 2.0 +907 82 training.label_smoothing 0.013288117476283237 +907 83 model.embedding_dim 2.0 +907 83 model.relation_dim 0.0 +907 83 model.scoring_fct_norm 1.0 +907 83 training.batch_size 0.0 +907 83 training.label_smoothing 0.055347580907687675 +907 84 model.embedding_dim 0.0 +907 84 model.relation_dim 2.0 +907 84 model.scoring_fct_norm 2.0 +907 84 training.batch_size 2.0 +907 84 training.label_smoothing 0.021193661323662866 +907 85 model.embedding_dim 2.0 +907 85 model.relation_dim 2.0 +907 85 model.scoring_fct_norm 2.0 +907 85 training.batch_size 2.0 +907 85 training.label_smoothing 0.40996272665172684 +907 86 model.embedding_dim 0.0 +907 86 model.relation_dim 0.0 +907 86 model.scoring_fct_norm 2.0 +907 86 training.batch_size 1.0 +907 86 training.label_smoothing 0.03339853182010933 +907 87 model.embedding_dim 2.0 +907 87 model.relation_dim 0.0 +907 87 model.scoring_fct_norm 1.0 +907 87 training.batch_size 1.0 +907 87 training.label_smoothing 0.19570119347548706 +907 88 model.embedding_dim 2.0 +907 88 model.relation_dim 2.0 +907 88 model.scoring_fct_norm 1.0 +907 88 training.batch_size 0.0 +907 88 training.label_smoothing 0.002091951221517503 +907 89 model.embedding_dim 1.0 +907 89 model.relation_dim 2.0 +907 89 model.scoring_fct_norm 1.0 +907 89 training.batch_size 1.0 +907 89 training.label_smoothing 0.0039752362049318285 +907 90 model.embedding_dim 0.0 +907 90 model.relation_dim 2.0 +907 90 model.scoring_fct_norm 2.0 +907 90 training.batch_size 0.0 +907 90 training.label_smoothing 0.05219126177677079 +907 91 model.embedding_dim 2.0 +907 91 model.relation_dim 0.0 +907 91 model.scoring_fct_norm 1.0 +907 91 training.batch_size 2.0 +907 91 training.label_smoothing 0.017368859440205067 +907 92 model.embedding_dim 0.0 +907 92 model.relation_dim 0.0 +907 92 model.scoring_fct_norm 2.0 +907 92 training.batch_size 1.0 +907 92 training.label_smoothing 0.11028214246365577 +907 93 model.embedding_dim 0.0 +907 93 model.relation_dim 0.0 +907 93 model.scoring_fct_norm 1.0 +907 93 training.batch_size 0.0 +907 93 training.label_smoothing 0.014955483796856931 +907 94 model.embedding_dim 2.0 +907 94 model.relation_dim 0.0 +907 94 model.scoring_fct_norm 1.0 +907 94 training.batch_size 2.0 +907 94 training.label_smoothing 0.00308122623664898 +907 95 model.embedding_dim 1.0 +907 95 model.relation_dim 1.0 +907 95 model.scoring_fct_norm 2.0 +907 95 training.batch_size 2.0 +907 95 training.label_smoothing 0.4395132309351888 +907 96 model.embedding_dim 0.0 +907 96 model.relation_dim 2.0 +907 96 model.scoring_fct_norm 1.0 +907 96 training.batch_size 0.0 +907 96 training.label_smoothing 0.06831998051201574 +907 97 model.embedding_dim 0.0 +907 97 model.relation_dim 2.0 +907 97 model.scoring_fct_norm 2.0 +907 97 training.batch_size 0.0 +907 97 training.label_smoothing 0.18535823615839056 +907 98 model.embedding_dim 2.0 +907 98 model.relation_dim 1.0 +907 98 model.scoring_fct_norm 1.0 +907 98 training.batch_size 1.0 +907 98 training.label_smoothing 0.0935670604646067 +907 99 model.embedding_dim 2.0 +907 99 model.relation_dim 2.0 +907 99 model.scoring_fct_norm 1.0 +907 99 training.batch_size 2.0 +907 99 training.label_smoothing 0.0074370734276681645 +907 100 model.embedding_dim 1.0 +907 100 model.relation_dim 0.0 +907 100 model.scoring_fct_norm 1.0 +907 100 training.batch_size 1.0 +907 100 training.label_smoothing 0.005301747918622821 +907 1 dataset """kinships""" +907 1 model """transr""" +907 1 loss """bceaftersigmoid""" +907 1 regularizer """no""" +907 1 optimizer """adadelta""" +907 1 training_loop """lcwa""" +907 1 evaluator """rankbased""" +907 2 dataset """kinships""" +907 2 model """transr""" +907 2 loss """bceaftersigmoid""" +907 2 regularizer """no""" +907 2 optimizer """adadelta""" +907 2 training_loop """lcwa""" +907 2 evaluator """rankbased""" +907 3 dataset """kinships""" +907 3 model """transr""" +907 3 loss """bceaftersigmoid""" +907 3 regularizer """no""" +907 3 optimizer """adadelta""" +907 3 training_loop """lcwa""" +907 3 evaluator """rankbased""" +907 4 dataset """kinships""" +907 4 model """transr""" +907 4 loss """bceaftersigmoid""" +907 4 regularizer """no""" +907 4 optimizer """adadelta""" +907 4 training_loop """lcwa""" +907 4 evaluator """rankbased""" +907 5 dataset """kinships""" +907 5 model """transr""" +907 5 loss """bceaftersigmoid""" +907 5 regularizer """no""" +907 5 optimizer """adadelta""" +907 5 training_loop """lcwa""" +907 5 evaluator """rankbased""" +907 6 dataset """kinships""" +907 6 model """transr""" +907 6 loss """bceaftersigmoid""" +907 6 regularizer """no""" +907 6 optimizer """adadelta""" +907 6 training_loop """lcwa""" +907 6 evaluator """rankbased""" +907 7 dataset """kinships""" +907 7 model """transr""" +907 7 loss """bceaftersigmoid""" +907 7 regularizer """no""" +907 7 optimizer """adadelta""" +907 7 training_loop """lcwa""" +907 7 evaluator """rankbased""" +907 8 dataset """kinships""" +907 8 model """transr""" +907 8 loss """bceaftersigmoid""" +907 8 regularizer """no""" +907 8 optimizer """adadelta""" +907 8 training_loop """lcwa""" +907 8 evaluator """rankbased""" +907 9 dataset """kinships""" +907 9 model """transr""" +907 9 loss """bceaftersigmoid""" +907 9 regularizer """no""" +907 9 optimizer """adadelta""" +907 9 training_loop """lcwa""" +907 9 evaluator """rankbased""" +907 10 dataset """kinships""" +907 10 model """transr""" +907 10 loss """bceaftersigmoid""" +907 10 regularizer """no""" +907 10 optimizer """adadelta""" +907 10 training_loop """lcwa""" +907 10 evaluator """rankbased""" +907 11 dataset """kinships""" +907 11 model """transr""" +907 11 loss """bceaftersigmoid""" +907 11 regularizer """no""" +907 11 optimizer """adadelta""" +907 11 training_loop """lcwa""" +907 11 evaluator """rankbased""" +907 12 dataset """kinships""" +907 12 model """transr""" +907 12 loss """bceaftersigmoid""" +907 12 regularizer """no""" +907 12 optimizer """adadelta""" +907 12 training_loop """lcwa""" +907 12 evaluator """rankbased""" +907 13 dataset """kinships""" +907 13 model """transr""" +907 13 loss """bceaftersigmoid""" +907 13 regularizer """no""" +907 13 optimizer """adadelta""" +907 13 training_loop """lcwa""" +907 13 evaluator """rankbased""" +907 14 dataset """kinships""" +907 14 model """transr""" +907 14 loss """bceaftersigmoid""" +907 14 regularizer """no""" +907 14 optimizer """adadelta""" +907 14 training_loop """lcwa""" +907 14 evaluator """rankbased""" +907 15 dataset """kinships""" +907 15 model """transr""" +907 15 loss """bceaftersigmoid""" +907 15 regularizer """no""" +907 15 optimizer """adadelta""" +907 15 training_loop """lcwa""" +907 15 evaluator """rankbased""" +907 16 dataset """kinships""" +907 16 model """transr""" +907 16 loss """bceaftersigmoid""" +907 16 regularizer """no""" +907 16 optimizer """adadelta""" +907 16 training_loop """lcwa""" +907 16 evaluator """rankbased""" +907 17 dataset """kinships""" +907 17 model """transr""" +907 17 loss """bceaftersigmoid""" +907 17 regularizer """no""" +907 17 optimizer """adadelta""" +907 17 training_loop """lcwa""" +907 17 evaluator """rankbased""" +907 18 dataset """kinships""" +907 18 model """transr""" +907 18 loss """bceaftersigmoid""" +907 18 regularizer """no""" +907 18 optimizer """adadelta""" +907 18 training_loop """lcwa""" +907 18 evaluator """rankbased""" +907 19 dataset """kinships""" +907 19 model """transr""" +907 19 loss """bceaftersigmoid""" +907 19 regularizer """no""" +907 19 optimizer """adadelta""" +907 19 training_loop """lcwa""" +907 19 evaluator """rankbased""" +907 20 dataset """kinships""" +907 20 model """transr""" +907 20 loss """bceaftersigmoid""" +907 20 regularizer """no""" +907 20 optimizer """adadelta""" +907 20 training_loop """lcwa""" +907 20 evaluator """rankbased""" +907 21 dataset """kinships""" +907 21 model """transr""" +907 21 loss """bceaftersigmoid""" +907 21 regularizer """no""" +907 21 optimizer """adadelta""" +907 21 training_loop """lcwa""" +907 21 evaluator """rankbased""" +907 22 dataset """kinships""" +907 22 model """transr""" +907 22 loss """bceaftersigmoid""" +907 22 regularizer """no""" +907 22 optimizer """adadelta""" +907 22 training_loop """lcwa""" +907 22 evaluator """rankbased""" +907 23 dataset """kinships""" +907 23 model """transr""" +907 23 loss """bceaftersigmoid""" +907 23 regularizer """no""" +907 23 optimizer """adadelta""" +907 23 training_loop """lcwa""" +907 23 evaluator """rankbased""" +907 24 dataset """kinships""" +907 24 model """transr""" +907 24 loss """bceaftersigmoid""" +907 24 regularizer """no""" +907 24 optimizer """adadelta""" +907 24 training_loop """lcwa""" +907 24 evaluator """rankbased""" +907 25 dataset """kinships""" +907 25 model """transr""" +907 25 loss """bceaftersigmoid""" +907 25 regularizer """no""" +907 25 optimizer """adadelta""" +907 25 training_loop """lcwa""" +907 25 evaluator """rankbased""" +907 26 dataset """kinships""" +907 26 model """transr""" +907 26 loss """bceaftersigmoid""" +907 26 regularizer """no""" +907 26 optimizer """adadelta""" +907 26 training_loop """lcwa""" +907 26 evaluator """rankbased""" +907 27 dataset """kinships""" +907 27 model """transr""" +907 27 loss """bceaftersigmoid""" +907 27 regularizer """no""" +907 27 optimizer """adadelta""" +907 27 training_loop """lcwa""" +907 27 evaluator """rankbased""" +907 28 dataset """kinships""" +907 28 model """transr""" +907 28 loss """bceaftersigmoid""" +907 28 regularizer """no""" +907 28 optimizer """adadelta""" +907 28 training_loop """lcwa""" +907 28 evaluator """rankbased""" +907 29 dataset """kinships""" +907 29 model """transr""" +907 29 loss """bceaftersigmoid""" +907 29 regularizer """no""" +907 29 optimizer """adadelta""" +907 29 training_loop """lcwa""" +907 29 evaluator """rankbased""" +907 30 dataset """kinships""" +907 30 model """transr""" +907 30 loss """bceaftersigmoid""" +907 30 regularizer """no""" +907 30 optimizer """adadelta""" +907 30 training_loop """lcwa""" +907 30 evaluator """rankbased""" +907 31 dataset """kinships""" +907 31 model """transr""" +907 31 loss """bceaftersigmoid""" +907 31 regularizer """no""" +907 31 optimizer """adadelta""" +907 31 training_loop """lcwa""" +907 31 evaluator """rankbased""" +907 32 dataset """kinships""" +907 32 model """transr""" +907 32 loss """bceaftersigmoid""" +907 32 regularizer """no""" +907 32 optimizer """adadelta""" +907 32 training_loop """lcwa""" +907 32 evaluator """rankbased""" +907 33 dataset """kinships""" +907 33 model """transr""" +907 33 loss """bceaftersigmoid""" +907 33 regularizer """no""" +907 33 optimizer """adadelta""" +907 33 training_loop """lcwa""" +907 33 evaluator """rankbased""" +907 34 dataset """kinships""" +907 34 model """transr""" +907 34 loss """bceaftersigmoid""" +907 34 regularizer """no""" +907 34 optimizer """adadelta""" +907 34 training_loop """lcwa""" +907 34 evaluator """rankbased""" +907 35 dataset """kinships""" +907 35 model """transr""" +907 35 loss """bceaftersigmoid""" +907 35 regularizer """no""" +907 35 optimizer """adadelta""" +907 35 training_loop """lcwa""" +907 35 evaluator """rankbased""" +907 36 dataset """kinships""" +907 36 model """transr""" +907 36 loss """bceaftersigmoid""" +907 36 regularizer """no""" +907 36 optimizer """adadelta""" +907 36 training_loop """lcwa""" +907 36 evaluator """rankbased""" +907 37 dataset """kinships""" +907 37 model """transr""" +907 37 loss """bceaftersigmoid""" +907 37 regularizer """no""" +907 37 optimizer """adadelta""" +907 37 training_loop """lcwa""" +907 37 evaluator """rankbased""" +907 38 dataset """kinships""" +907 38 model """transr""" +907 38 loss """bceaftersigmoid""" +907 38 regularizer """no""" +907 38 optimizer """adadelta""" +907 38 training_loop """lcwa""" +907 38 evaluator """rankbased""" +907 39 dataset """kinships""" +907 39 model """transr""" +907 39 loss """bceaftersigmoid""" +907 39 regularizer """no""" +907 39 optimizer """adadelta""" +907 39 training_loop """lcwa""" +907 39 evaluator """rankbased""" +907 40 dataset """kinships""" +907 40 model """transr""" +907 40 loss """bceaftersigmoid""" +907 40 regularizer """no""" +907 40 optimizer """adadelta""" +907 40 training_loop """lcwa""" +907 40 evaluator """rankbased""" +907 41 dataset """kinships""" +907 41 model """transr""" +907 41 loss """bceaftersigmoid""" +907 41 regularizer """no""" +907 41 optimizer """adadelta""" +907 41 training_loop """lcwa""" +907 41 evaluator """rankbased""" +907 42 dataset """kinships""" +907 42 model """transr""" +907 42 loss """bceaftersigmoid""" +907 42 regularizer """no""" +907 42 optimizer """adadelta""" +907 42 training_loop """lcwa""" +907 42 evaluator """rankbased""" +907 43 dataset """kinships""" +907 43 model """transr""" +907 43 loss """bceaftersigmoid""" +907 43 regularizer """no""" +907 43 optimizer """adadelta""" +907 43 training_loop """lcwa""" +907 43 evaluator """rankbased""" +907 44 dataset """kinships""" +907 44 model """transr""" +907 44 loss """bceaftersigmoid""" +907 44 regularizer """no""" +907 44 optimizer """adadelta""" +907 44 training_loop """lcwa""" +907 44 evaluator """rankbased""" +907 45 dataset """kinships""" +907 45 model """transr""" +907 45 loss """bceaftersigmoid""" +907 45 regularizer """no""" +907 45 optimizer """adadelta""" +907 45 training_loop """lcwa""" +907 45 evaluator """rankbased""" +907 46 dataset """kinships""" +907 46 model """transr""" +907 46 loss """bceaftersigmoid""" +907 46 regularizer """no""" +907 46 optimizer """adadelta""" +907 46 training_loop """lcwa""" +907 46 evaluator """rankbased""" +907 47 dataset """kinships""" +907 47 model """transr""" +907 47 loss """bceaftersigmoid""" +907 47 regularizer """no""" +907 47 optimizer """adadelta""" +907 47 training_loop """lcwa""" +907 47 evaluator """rankbased""" +907 48 dataset """kinships""" +907 48 model """transr""" +907 48 loss """bceaftersigmoid""" +907 48 regularizer """no""" +907 48 optimizer """adadelta""" +907 48 training_loop """lcwa""" +907 48 evaluator """rankbased""" +907 49 dataset """kinships""" +907 49 model """transr""" +907 49 loss """bceaftersigmoid""" +907 49 regularizer """no""" +907 49 optimizer """adadelta""" +907 49 training_loop """lcwa""" +907 49 evaluator """rankbased""" +907 50 dataset """kinships""" +907 50 model """transr""" +907 50 loss """bceaftersigmoid""" +907 50 regularizer """no""" +907 50 optimizer """adadelta""" +907 50 training_loop """lcwa""" +907 50 evaluator """rankbased""" +907 51 dataset """kinships""" +907 51 model """transr""" +907 51 loss """bceaftersigmoid""" +907 51 regularizer """no""" +907 51 optimizer """adadelta""" +907 51 training_loop """lcwa""" +907 51 evaluator """rankbased""" +907 52 dataset """kinships""" +907 52 model """transr""" +907 52 loss """bceaftersigmoid""" +907 52 regularizer """no""" +907 52 optimizer """adadelta""" +907 52 training_loop """lcwa""" +907 52 evaluator """rankbased""" +907 53 dataset """kinships""" +907 53 model """transr""" +907 53 loss """bceaftersigmoid""" +907 53 regularizer """no""" +907 53 optimizer """adadelta""" +907 53 training_loop """lcwa""" +907 53 evaluator """rankbased""" +907 54 dataset """kinships""" +907 54 model """transr""" +907 54 loss """bceaftersigmoid""" +907 54 regularizer """no""" +907 54 optimizer """adadelta""" +907 54 training_loop """lcwa""" +907 54 evaluator """rankbased""" +907 55 dataset """kinships""" +907 55 model """transr""" +907 55 loss """bceaftersigmoid""" +907 55 regularizer """no""" +907 55 optimizer """adadelta""" +907 55 training_loop """lcwa""" +907 55 evaluator """rankbased""" +907 56 dataset """kinships""" +907 56 model """transr""" +907 56 loss """bceaftersigmoid""" +907 56 regularizer """no""" +907 56 optimizer """adadelta""" +907 56 training_loop """lcwa""" +907 56 evaluator """rankbased""" +907 57 dataset """kinships""" +907 57 model """transr""" +907 57 loss """bceaftersigmoid""" +907 57 regularizer """no""" +907 57 optimizer """adadelta""" +907 57 training_loop """lcwa""" +907 57 evaluator """rankbased""" +907 58 dataset """kinships""" +907 58 model """transr""" +907 58 loss """bceaftersigmoid""" +907 58 regularizer """no""" +907 58 optimizer """adadelta""" +907 58 training_loop """lcwa""" +907 58 evaluator """rankbased""" +907 59 dataset """kinships""" +907 59 model """transr""" +907 59 loss """bceaftersigmoid""" +907 59 regularizer """no""" +907 59 optimizer """adadelta""" +907 59 training_loop """lcwa""" +907 59 evaluator """rankbased""" +907 60 dataset """kinships""" +907 60 model """transr""" +907 60 loss """bceaftersigmoid""" +907 60 regularizer """no""" +907 60 optimizer """adadelta""" +907 60 training_loop """lcwa""" +907 60 evaluator """rankbased""" +907 61 dataset """kinships""" +907 61 model """transr""" +907 61 loss """bceaftersigmoid""" +907 61 regularizer """no""" +907 61 optimizer """adadelta""" +907 61 training_loop """lcwa""" +907 61 evaluator """rankbased""" +907 62 dataset """kinships""" +907 62 model """transr""" +907 62 loss """bceaftersigmoid""" +907 62 regularizer """no""" +907 62 optimizer """adadelta""" +907 62 training_loop """lcwa""" +907 62 evaluator """rankbased""" +907 63 dataset """kinships""" +907 63 model """transr""" +907 63 loss """bceaftersigmoid""" +907 63 regularizer """no""" +907 63 optimizer """adadelta""" +907 63 training_loop """lcwa""" +907 63 evaluator """rankbased""" +907 64 dataset """kinships""" +907 64 model """transr""" +907 64 loss """bceaftersigmoid""" +907 64 regularizer """no""" +907 64 optimizer """adadelta""" +907 64 training_loop """lcwa""" +907 64 evaluator """rankbased""" +907 65 dataset """kinships""" +907 65 model """transr""" +907 65 loss """bceaftersigmoid""" +907 65 regularizer """no""" +907 65 optimizer """adadelta""" +907 65 training_loop """lcwa""" +907 65 evaluator """rankbased""" +907 66 dataset """kinships""" +907 66 model """transr""" +907 66 loss """bceaftersigmoid""" +907 66 regularizer """no""" +907 66 optimizer """adadelta""" +907 66 training_loop """lcwa""" +907 66 evaluator """rankbased""" +907 67 dataset """kinships""" +907 67 model """transr""" +907 67 loss """bceaftersigmoid""" +907 67 regularizer """no""" +907 67 optimizer """adadelta""" +907 67 training_loop """lcwa""" +907 67 evaluator """rankbased""" +907 68 dataset """kinships""" +907 68 model """transr""" +907 68 loss """bceaftersigmoid""" +907 68 regularizer """no""" +907 68 optimizer """adadelta""" +907 68 training_loop """lcwa""" +907 68 evaluator """rankbased""" +907 69 dataset """kinships""" +907 69 model """transr""" +907 69 loss """bceaftersigmoid""" +907 69 regularizer """no""" +907 69 optimizer """adadelta""" +907 69 training_loop """lcwa""" +907 69 evaluator """rankbased""" +907 70 dataset """kinships""" +907 70 model """transr""" +907 70 loss """bceaftersigmoid""" +907 70 regularizer """no""" +907 70 optimizer """adadelta""" +907 70 training_loop """lcwa""" +907 70 evaluator """rankbased""" +907 71 dataset """kinships""" +907 71 model """transr""" +907 71 loss """bceaftersigmoid""" +907 71 regularizer """no""" +907 71 optimizer """adadelta""" +907 71 training_loop """lcwa""" +907 71 evaluator """rankbased""" +907 72 dataset """kinships""" +907 72 model """transr""" +907 72 loss """bceaftersigmoid""" +907 72 regularizer """no""" +907 72 optimizer """adadelta""" +907 72 training_loop """lcwa""" +907 72 evaluator """rankbased""" +907 73 dataset """kinships""" +907 73 model """transr""" +907 73 loss """bceaftersigmoid""" +907 73 regularizer """no""" +907 73 optimizer """adadelta""" +907 73 training_loop """lcwa""" +907 73 evaluator """rankbased""" +907 74 dataset """kinships""" +907 74 model """transr""" +907 74 loss """bceaftersigmoid""" +907 74 regularizer """no""" +907 74 optimizer """adadelta""" +907 74 training_loop """lcwa""" +907 74 evaluator """rankbased""" +907 75 dataset """kinships""" +907 75 model """transr""" +907 75 loss """bceaftersigmoid""" +907 75 regularizer """no""" +907 75 optimizer """adadelta""" +907 75 training_loop """lcwa""" +907 75 evaluator """rankbased""" +907 76 dataset """kinships""" +907 76 model """transr""" +907 76 loss """bceaftersigmoid""" +907 76 regularizer """no""" +907 76 optimizer """adadelta""" +907 76 training_loop """lcwa""" +907 76 evaluator """rankbased""" +907 77 dataset """kinships""" +907 77 model """transr""" +907 77 loss """bceaftersigmoid""" +907 77 regularizer """no""" +907 77 optimizer """adadelta""" +907 77 training_loop """lcwa""" +907 77 evaluator """rankbased""" +907 78 dataset """kinships""" +907 78 model """transr""" +907 78 loss """bceaftersigmoid""" +907 78 regularizer """no""" +907 78 optimizer """adadelta""" +907 78 training_loop """lcwa""" +907 78 evaluator """rankbased""" +907 79 dataset """kinships""" +907 79 model """transr""" +907 79 loss """bceaftersigmoid""" +907 79 regularizer """no""" +907 79 optimizer """adadelta""" +907 79 training_loop """lcwa""" +907 79 evaluator """rankbased""" +907 80 dataset """kinships""" +907 80 model """transr""" +907 80 loss """bceaftersigmoid""" +907 80 regularizer """no""" +907 80 optimizer """adadelta""" +907 80 training_loop """lcwa""" +907 80 evaluator """rankbased""" +907 81 dataset """kinships""" +907 81 model """transr""" +907 81 loss """bceaftersigmoid""" +907 81 regularizer """no""" +907 81 optimizer """adadelta""" +907 81 training_loop """lcwa""" +907 81 evaluator """rankbased""" +907 82 dataset """kinships""" +907 82 model """transr""" +907 82 loss """bceaftersigmoid""" +907 82 regularizer """no""" +907 82 optimizer """adadelta""" +907 82 training_loop """lcwa""" +907 82 evaluator """rankbased""" +907 83 dataset """kinships""" +907 83 model """transr""" +907 83 loss """bceaftersigmoid""" +907 83 regularizer """no""" +907 83 optimizer """adadelta""" +907 83 training_loop """lcwa""" +907 83 evaluator """rankbased""" +907 84 dataset """kinships""" +907 84 model """transr""" +907 84 loss """bceaftersigmoid""" +907 84 regularizer """no""" +907 84 optimizer """adadelta""" +907 84 training_loop """lcwa""" +907 84 evaluator """rankbased""" +907 85 dataset """kinships""" +907 85 model """transr""" +907 85 loss """bceaftersigmoid""" +907 85 regularizer """no""" +907 85 optimizer """adadelta""" +907 85 training_loop """lcwa""" +907 85 evaluator """rankbased""" +907 86 dataset """kinships""" +907 86 model """transr""" +907 86 loss """bceaftersigmoid""" +907 86 regularizer """no""" +907 86 optimizer """adadelta""" +907 86 training_loop """lcwa""" +907 86 evaluator """rankbased""" +907 87 dataset """kinships""" +907 87 model """transr""" +907 87 loss """bceaftersigmoid""" +907 87 regularizer """no""" +907 87 optimizer """adadelta""" +907 87 training_loop """lcwa""" +907 87 evaluator """rankbased""" +907 88 dataset """kinships""" +907 88 model """transr""" +907 88 loss """bceaftersigmoid""" +907 88 regularizer """no""" +907 88 optimizer """adadelta""" +907 88 training_loop """lcwa""" +907 88 evaluator """rankbased""" +907 89 dataset """kinships""" +907 89 model """transr""" +907 89 loss """bceaftersigmoid""" +907 89 regularizer """no""" +907 89 optimizer """adadelta""" +907 89 training_loop """lcwa""" +907 89 evaluator """rankbased""" +907 90 dataset """kinships""" +907 90 model """transr""" +907 90 loss """bceaftersigmoid""" +907 90 regularizer """no""" +907 90 optimizer """adadelta""" +907 90 training_loop """lcwa""" +907 90 evaluator """rankbased""" +907 91 dataset """kinships""" +907 91 model """transr""" +907 91 loss """bceaftersigmoid""" +907 91 regularizer """no""" +907 91 optimizer """adadelta""" +907 91 training_loop """lcwa""" +907 91 evaluator """rankbased""" +907 92 dataset """kinships""" +907 92 model """transr""" +907 92 loss """bceaftersigmoid""" +907 92 regularizer """no""" +907 92 optimizer """adadelta""" +907 92 training_loop """lcwa""" +907 92 evaluator """rankbased""" +907 93 dataset """kinships""" +907 93 model """transr""" +907 93 loss """bceaftersigmoid""" +907 93 regularizer """no""" +907 93 optimizer """adadelta""" +907 93 training_loop """lcwa""" +907 93 evaluator """rankbased""" +907 94 dataset """kinships""" +907 94 model """transr""" +907 94 loss """bceaftersigmoid""" +907 94 regularizer """no""" +907 94 optimizer """adadelta""" +907 94 training_loop """lcwa""" +907 94 evaluator """rankbased""" +907 95 dataset """kinships""" +907 95 model """transr""" +907 95 loss """bceaftersigmoid""" +907 95 regularizer """no""" +907 95 optimizer """adadelta""" +907 95 training_loop """lcwa""" +907 95 evaluator """rankbased""" +907 96 dataset """kinships""" +907 96 model """transr""" +907 96 loss """bceaftersigmoid""" +907 96 regularizer """no""" +907 96 optimizer """adadelta""" +907 96 training_loop """lcwa""" +907 96 evaluator """rankbased""" +907 97 dataset """kinships""" +907 97 model """transr""" +907 97 loss """bceaftersigmoid""" +907 97 regularizer """no""" +907 97 optimizer """adadelta""" +907 97 training_loop """lcwa""" +907 97 evaluator """rankbased""" +907 98 dataset """kinships""" +907 98 model """transr""" +907 98 loss """bceaftersigmoid""" +907 98 regularizer """no""" +907 98 optimizer """adadelta""" +907 98 training_loop """lcwa""" +907 98 evaluator """rankbased""" +907 99 dataset """kinships""" +907 99 model """transr""" +907 99 loss """bceaftersigmoid""" +907 99 regularizer """no""" +907 99 optimizer """adadelta""" +907 99 training_loop """lcwa""" +907 99 evaluator """rankbased""" +907 100 dataset """kinships""" +907 100 model """transr""" +907 100 loss """bceaftersigmoid""" +907 100 regularizer """no""" +907 100 optimizer """adadelta""" +907 100 training_loop """lcwa""" +907 100 evaluator """rankbased""" +908 1 model.embedding_dim 0.0 +908 1 model.relation_dim 0.0 +908 1 model.scoring_fct_norm 1.0 +908 1 training.batch_size 2.0 +908 1 training.label_smoothing 0.008997740809856328 +908 2 model.embedding_dim 0.0 +908 2 model.relation_dim 0.0 +908 2 model.scoring_fct_norm 1.0 +908 2 training.batch_size 2.0 +908 2 training.label_smoothing 0.007074145617700258 +908 3 model.embedding_dim 0.0 +908 3 model.relation_dim 1.0 +908 3 model.scoring_fct_norm 1.0 +908 3 training.batch_size 0.0 +908 3 training.label_smoothing 0.06575358466545742 +908 4 model.embedding_dim 1.0 +908 4 model.relation_dim 0.0 +908 4 model.scoring_fct_norm 2.0 +908 4 training.batch_size 1.0 +908 4 training.label_smoothing 0.3367142861171722 +908 5 model.embedding_dim 1.0 +908 5 model.relation_dim 0.0 +908 5 model.scoring_fct_norm 1.0 +908 5 training.batch_size 1.0 +908 5 training.label_smoothing 0.0061228183585955885 +908 6 model.embedding_dim 0.0 +908 6 model.relation_dim 2.0 +908 6 model.scoring_fct_norm 1.0 +908 6 training.batch_size 2.0 +908 6 training.label_smoothing 0.04797157960070178 +908 7 model.embedding_dim 2.0 +908 7 model.relation_dim 0.0 +908 7 model.scoring_fct_norm 1.0 +908 7 training.batch_size 1.0 +908 7 training.label_smoothing 0.02757574401847319 +908 8 model.embedding_dim 1.0 +908 8 model.relation_dim 2.0 +908 8 model.scoring_fct_norm 2.0 +908 8 training.batch_size 1.0 +908 8 training.label_smoothing 0.07375113925967733 +908 9 model.embedding_dim 0.0 +908 9 model.relation_dim 2.0 +908 9 model.scoring_fct_norm 2.0 +908 9 training.batch_size 2.0 +908 9 training.label_smoothing 0.7663002862264876 +908 10 model.embedding_dim 1.0 +908 10 model.relation_dim 1.0 +908 10 model.scoring_fct_norm 2.0 +908 10 training.batch_size 0.0 +908 10 training.label_smoothing 0.5409457130220953 +908 11 model.embedding_dim 0.0 +908 11 model.relation_dim 1.0 +908 11 model.scoring_fct_norm 1.0 +908 11 training.batch_size 1.0 +908 11 training.label_smoothing 0.03867985725269683 +908 12 model.embedding_dim 2.0 +908 12 model.relation_dim 2.0 +908 12 model.scoring_fct_norm 1.0 +908 12 training.batch_size 1.0 +908 12 training.label_smoothing 0.4988957560148659 +908 13 model.embedding_dim 0.0 +908 13 model.relation_dim 2.0 +908 13 model.scoring_fct_norm 2.0 +908 13 training.batch_size 1.0 +908 13 training.label_smoothing 0.026200122089324165 +908 14 model.embedding_dim 2.0 +908 14 model.relation_dim 2.0 +908 14 model.scoring_fct_norm 2.0 +908 14 training.batch_size 2.0 +908 14 training.label_smoothing 0.7006167285054707 +908 15 model.embedding_dim 1.0 +908 15 model.relation_dim 2.0 +908 15 model.scoring_fct_norm 2.0 +908 15 training.batch_size 1.0 +908 15 training.label_smoothing 0.05273227452330492 +908 16 model.embedding_dim 0.0 +908 16 model.relation_dim 2.0 +908 16 model.scoring_fct_norm 2.0 +908 16 training.batch_size 2.0 +908 16 training.label_smoothing 0.009099923823181439 +908 17 model.embedding_dim 2.0 +908 17 model.relation_dim 2.0 +908 17 model.scoring_fct_norm 2.0 +908 17 training.batch_size 1.0 +908 17 training.label_smoothing 0.048767255939150655 +908 18 model.embedding_dim 0.0 +908 18 model.relation_dim 2.0 +908 18 model.scoring_fct_norm 2.0 +908 18 training.batch_size 2.0 +908 18 training.label_smoothing 0.009651663880674718 +908 19 model.embedding_dim 0.0 +908 19 model.relation_dim 0.0 +908 19 model.scoring_fct_norm 1.0 +908 19 training.batch_size 1.0 +908 19 training.label_smoothing 0.3844819616035429 +908 20 model.embedding_dim 2.0 +908 20 model.relation_dim 2.0 +908 20 model.scoring_fct_norm 1.0 +908 20 training.batch_size 0.0 +908 20 training.label_smoothing 0.01886052392350314 +908 21 model.embedding_dim 1.0 +908 21 model.relation_dim 1.0 +908 21 model.scoring_fct_norm 1.0 +908 21 training.batch_size 0.0 +908 21 training.label_smoothing 0.5223587067445215 +908 22 model.embedding_dim 1.0 +908 22 model.relation_dim 0.0 +908 22 model.scoring_fct_norm 2.0 +908 22 training.batch_size 0.0 +908 22 training.label_smoothing 0.004137764621990978 +908 23 model.embedding_dim 0.0 +908 23 model.relation_dim 1.0 +908 23 model.scoring_fct_norm 2.0 +908 23 training.batch_size 2.0 +908 23 training.label_smoothing 0.05652440036200554 +908 24 model.embedding_dim 0.0 +908 24 model.relation_dim 2.0 +908 24 model.scoring_fct_norm 1.0 +908 24 training.batch_size 2.0 +908 24 training.label_smoothing 0.28850949863352104 +908 25 model.embedding_dim 0.0 +908 25 model.relation_dim 1.0 +908 25 model.scoring_fct_norm 2.0 +908 25 training.batch_size 1.0 +908 25 training.label_smoothing 0.4518852322052943 +908 26 model.embedding_dim 0.0 +908 26 model.relation_dim 2.0 +908 26 model.scoring_fct_norm 2.0 +908 26 training.batch_size 2.0 +908 26 training.label_smoothing 0.3514865840068165 +908 27 model.embedding_dim 2.0 +908 27 model.relation_dim 2.0 +908 27 model.scoring_fct_norm 2.0 +908 27 training.batch_size 2.0 +908 27 training.label_smoothing 0.4831725829593505 +908 28 model.embedding_dim 0.0 +908 28 model.relation_dim 2.0 +908 28 model.scoring_fct_norm 2.0 +908 28 training.batch_size 0.0 +908 28 training.label_smoothing 0.022619335210727433 +908 29 model.embedding_dim 2.0 +908 29 model.relation_dim 1.0 +908 29 model.scoring_fct_norm 2.0 +908 29 training.batch_size 1.0 +908 29 training.label_smoothing 0.03610013005794761 +908 30 model.embedding_dim 2.0 +908 30 model.relation_dim 1.0 +908 30 model.scoring_fct_norm 1.0 +908 30 training.batch_size 0.0 +908 30 training.label_smoothing 0.004581733332793865 +908 31 model.embedding_dim 2.0 +908 31 model.relation_dim 0.0 +908 31 model.scoring_fct_norm 1.0 +908 31 training.batch_size 0.0 +908 31 training.label_smoothing 0.06870905403989352 +908 32 model.embedding_dim 2.0 +908 32 model.relation_dim 2.0 +908 32 model.scoring_fct_norm 1.0 +908 32 training.batch_size 0.0 +908 32 training.label_smoothing 0.0016580103894541973 +908 33 model.embedding_dim 1.0 +908 33 model.relation_dim 0.0 +908 33 model.scoring_fct_norm 2.0 +908 33 training.batch_size 0.0 +908 33 training.label_smoothing 0.20795011129974242 +908 34 model.embedding_dim 0.0 +908 34 model.relation_dim 2.0 +908 34 model.scoring_fct_norm 1.0 +908 34 training.batch_size 2.0 +908 34 training.label_smoothing 0.009909274202367728 +908 35 model.embedding_dim 0.0 +908 35 model.relation_dim 1.0 +908 35 model.scoring_fct_norm 2.0 +908 35 training.batch_size 0.0 +908 35 training.label_smoothing 0.0012121854554196204 +908 36 model.embedding_dim 0.0 +908 36 model.relation_dim 2.0 +908 36 model.scoring_fct_norm 1.0 +908 36 training.batch_size 1.0 +908 36 training.label_smoothing 0.008048725281086357 +908 37 model.embedding_dim 2.0 +908 37 model.relation_dim 1.0 +908 37 model.scoring_fct_norm 1.0 +908 37 training.batch_size 2.0 +908 37 training.label_smoothing 0.014424005489683417 +908 38 model.embedding_dim 2.0 +908 38 model.relation_dim 1.0 +908 38 model.scoring_fct_norm 1.0 +908 38 training.batch_size 1.0 +908 38 training.label_smoothing 0.002877801719724274 +908 39 model.embedding_dim 1.0 +908 39 model.relation_dim 1.0 +908 39 model.scoring_fct_norm 1.0 +908 39 training.batch_size 0.0 +908 39 training.label_smoothing 0.0016530738124342643 +908 40 model.embedding_dim 2.0 +908 40 model.relation_dim 0.0 +908 40 model.scoring_fct_norm 1.0 +908 40 training.batch_size 2.0 +908 40 training.label_smoothing 0.27840774434572013 +908 41 model.embedding_dim 0.0 +908 41 model.relation_dim 1.0 +908 41 model.scoring_fct_norm 2.0 +908 41 training.batch_size 0.0 +908 41 training.label_smoothing 0.004785894046548682 +908 42 model.embedding_dim 0.0 +908 42 model.relation_dim 0.0 +908 42 model.scoring_fct_norm 1.0 +908 42 training.batch_size 2.0 +908 42 training.label_smoothing 0.036547834861244535 +908 43 model.embedding_dim 0.0 +908 43 model.relation_dim 0.0 +908 43 model.scoring_fct_norm 1.0 +908 43 training.batch_size 1.0 +908 43 training.label_smoothing 0.025142986788297725 +908 44 model.embedding_dim 0.0 +908 44 model.relation_dim 1.0 +908 44 model.scoring_fct_norm 1.0 +908 44 training.batch_size 0.0 +908 44 training.label_smoothing 0.001475641244666142 +908 45 model.embedding_dim 0.0 +908 45 model.relation_dim 0.0 +908 45 model.scoring_fct_norm 1.0 +908 45 training.batch_size 0.0 +908 45 training.label_smoothing 0.010276104204134643 +908 46 model.embedding_dim 1.0 +908 46 model.relation_dim 2.0 +908 46 model.scoring_fct_norm 1.0 +908 46 training.batch_size 2.0 +908 46 training.label_smoothing 0.35642848607791927 +908 47 model.embedding_dim 0.0 +908 47 model.relation_dim 1.0 +908 47 model.scoring_fct_norm 2.0 +908 47 training.batch_size 1.0 +908 47 training.label_smoothing 0.0014668315466515593 +908 48 model.embedding_dim 2.0 +908 48 model.relation_dim 2.0 +908 48 model.scoring_fct_norm 2.0 +908 48 training.batch_size 2.0 +908 48 training.label_smoothing 0.2526341885913629 +908 49 model.embedding_dim 0.0 +908 49 model.relation_dim 0.0 +908 49 model.scoring_fct_norm 1.0 +908 49 training.batch_size 0.0 +908 49 training.label_smoothing 0.04298523949257219 +908 50 model.embedding_dim 0.0 +908 50 model.relation_dim 2.0 +908 50 model.scoring_fct_norm 2.0 +908 50 training.batch_size 2.0 +908 50 training.label_smoothing 0.0017502672562559414 +908 51 model.embedding_dim 2.0 +908 51 model.relation_dim 1.0 +908 51 model.scoring_fct_norm 1.0 +908 51 training.batch_size 2.0 +908 51 training.label_smoothing 0.008471397514826158 +908 52 model.embedding_dim 0.0 +908 52 model.relation_dim 0.0 +908 52 model.scoring_fct_norm 1.0 +908 52 training.batch_size 0.0 +908 52 training.label_smoothing 0.14810794411818493 +908 53 model.embedding_dim 0.0 +908 53 model.relation_dim 2.0 +908 53 model.scoring_fct_norm 2.0 +908 53 training.batch_size 2.0 +908 53 training.label_smoothing 0.4861801540883674 +908 54 model.embedding_dim 0.0 +908 54 model.relation_dim 2.0 +908 54 model.scoring_fct_norm 2.0 +908 54 training.batch_size 2.0 +908 54 training.label_smoothing 0.06409831306945962 +908 55 model.embedding_dim 1.0 +908 55 model.relation_dim 0.0 +908 55 model.scoring_fct_norm 2.0 +908 55 training.batch_size 1.0 +908 55 training.label_smoothing 0.014355341752982786 +908 56 model.embedding_dim 1.0 +908 56 model.relation_dim 1.0 +908 56 model.scoring_fct_norm 2.0 +908 56 training.batch_size 0.0 +908 56 training.label_smoothing 0.018153203949567046 +908 57 model.embedding_dim 2.0 +908 57 model.relation_dim 0.0 +908 57 model.scoring_fct_norm 2.0 +908 57 training.batch_size 0.0 +908 57 training.label_smoothing 0.0016247222379037675 +908 58 model.embedding_dim 1.0 +908 58 model.relation_dim 2.0 +908 58 model.scoring_fct_norm 2.0 +908 58 training.batch_size 0.0 +908 58 training.label_smoothing 0.04237400758150535 +908 59 model.embedding_dim 2.0 +908 59 model.relation_dim 2.0 +908 59 model.scoring_fct_norm 2.0 +908 59 training.batch_size 0.0 +908 59 training.label_smoothing 0.23528597980604 +908 60 model.embedding_dim 1.0 +908 60 model.relation_dim 1.0 +908 60 model.scoring_fct_norm 2.0 +908 60 training.batch_size 1.0 +908 60 training.label_smoothing 0.0380615123422475 +908 61 model.embedding_dim 1.0 +908 61 model.relation_dim 1.0 +908 61 model.scoring_fct_norm 1.0 +908 61 training.batch_size 2.0 +908 61 training.label_smoothing 0.6993878163560682 +908 62 model.embedding_dim 0.0 +908 62 model.relation_dim 0.0 +908 62 model.scoring_fct_norm 1.0 +908 62 training.batch_size 0.0 +908 62 training.label_smoothing 0.08598712978327215 +908 63 model.embedding_dim 0.0 +908 63 model.relation_dim 1.0 +908 63 model.scoring_fct_norm 1.0 +908 63 training.batch_size 1.0 +908 63 training.label_smoothing 0.0031342954547385663 +908 64 model.embedding_dim 2.0 +908 64 model.relation_dim 2.0 +908 64 model.scoring_fct_norm 1.0 +908 64 training.batch_size 2.0 +908 64 training.label_smoothing 0.024115481262235568 +908 65 model.embedding_dim 0.0 +908 65 model.relation_dim 0.0 +908 65 model.scoring_fct_norm 2.0 +908 65 training.batch_size 2.0 +908 65 training.label_smoothing 0.048584762485988246 +908 66 model.embedding_dim 1.0 +908 66 model.relation_dim 1.0 +908 66 model.scoring_fct_norm 1.0 +908 66 training.batch_size 0.0 +908 66 training.label_smoothing 0.0013019993486462853 +908 67 model.embedding_dim 2.0 +908 67 model.relation_dim 0.0 +908 67 model.scoring_fct_norm 1.0 +908 67 training.batch_size 2.0 +908 67 training.label_smoothing 0.23003979092341134 +908 68 model.embedding_dim 2.0 +908 68 model.relation_dim 1.0 +908 68 model.scoring_fct_norm 2.0 +908 68 training.batch_size 1.0 +908 68 training.label_smoothing 0.008175095243414086 +908 69 model.embedding_dim 2.0 +908 69 model.relation_dim 2.0 +908 69 model.scoring_fct_norm 2.0 +908 69 training.batch_size 0.0 +908 69 training.label_smoothing 0.003419712306323501 +908 70 model.embedding_dim 0.0 +908 70 model.relation_dim 2.0 +908 70 model.scoring_fct_norm 1.0 +908 70 training.batch_size 0.0 +908 70 training.label_smoothing 0.0034012277244392978 +908 71 model.embedding_dim 1.0 +908 71 model.relation_dim 0.0 +908 71 model.scoring_fct_norm 1.0 +908 71 training.batch_size 1.0 +908 71 training.label_smoothing 0.15750385968915798 +908 72 model.embedding_dim 2.0 +908 72 model.relation_dim 1.0 +908 72 model.scoring_fct_norm 2.0 +908 72 training.batch_size 2.0 +908 72 training.label_smoothing 0.008467393755068773 +908 73 model.embedding_dim 0.0 +908 73 model.relation_dim 0.0 +908 73 model.scoring_fct_norm 2.0 +908 73 training.batch_size 1.0 +908 73 training.label_smoothing 0.00370648896484038 +908 74 model.embedding_dim 0.0 +908 74 model.relation_dim 1.0 +908 74 model.scoring_fct_norm 2.0 +908 74 training.batch_size 1.0 +908 74 training.label_smoothing 0.13282838336201663 +908 75 model.embedding_dim 2.0 +908 75 model.relation_dim 1.0 +908 75 model.scoring_fct_norm 2.0 +908 75 training.batch_size 0.0 +908 75 training.label_smoothing 0.062401368659587154 +908 76 model.embedding_dim 2.0 +908 76 model.relation_dim 1.0 +908 76 model.scoring_fct_norm 1.0 +908 76 training.batch_size 2.0 +908 76 training.label_smoothing 0.024199683010006294 +908 77 model.embedding_dim 0.0 +908 77 model.relation_dim 2.0 +908 77 model.scoring_fct_norm 1.0 +908 77 training.batch_size 0.0 +908 77 training.label_smoothing 0.019462309330378104 +908 78 model.embedding_dim 1.0 +908 78 model.relation_dim 1.0 +908 78 model.scoring_fct_norm 1.0 +908 78 training.batch_size 0.0 +908 78 training.label_smoothing 0.4575901582230399 +908 79 model.embedding_dim 2.0 +908 79 model.relation_dim 2.0 +908 79 model.scoring_fct_norm 1.0 +908 79 training.batch_size 0.0 +908 79 training.label_smoothing 0.1351844547493437 +908 80 model.embedding_dim 2.0 +908 80 model.relation_dim 1.0 +908 80 model.scoring_fct_norm 2.0 +908 80 training.batch_size 2.0 +908 80 training.label_smoothing 0.008161687254053094 +908 81 model.embedding_dim 0.0 +908 81 model.relation_dim 2.0 +908 81 model.scoring_fct_norm 2.0 +908 81 training.batch_size 2.0 +908 81 training.label_smoothing 0.03644550891787331 +908 82 model.embedding_dim 0.0 +908 82 model.relation_dim 2.0 +908 82 model.scoring_fct_norm 2.0 +908 82 training.batch_size 0.0 +908 82 training.label_smoothing 0.008661172408407407 +908 83 model.embedding_dim 2.0 +908 83 model.relation_dim 2.0 +908 83 model.scoring_fct_norm 1.0 +908 83 training.batch_size 1.0 +908 83 training.label_smoothing 0.4067422078670685 +908 84 model.embedding_dim 2.0 +908 84 model.relation_dim 1.0 +908 84 model.scoring_fct_norm 2.0 +908 84 training.batch_size 0.0 +908 84 training.label_smoothing 0.21886881325377844 +908 85 model.embedding_dim 1.0 +908 85 model.relation_dim 1.0 +908 85 model.scoring_fct_norm 2.0 +908 85 training.batch_size 2.0 +908 85 training.label_smoothing 0.9682520022974055 +908 86 model.embedding_dim 2.0 +908 86 model.relation_dim 2.0 +908 86 model.scoring_fct_norm 1.0 +908 86 training.batch_size 2.0 +908 86 training.label_smoothing 0.07875169722855252 +908 87 model.embedding_dim 1.0 +908 87 model.relation_dim 0.0 +908 87 model.scoring_fct_norm 1.0 +908 87 training.batch_size 0.0 +908 87 training.label_smoothing 0.03406365896131439 +908 88 model.embedding_dim 2.0 +908 88 model.relation_dim 0.0 +908 88 model.scoring_fct_norm 1.0 +908 88 training.batch_size 1.0 +908 88 training.label_smoothing 0.0867437561213798 +908 89 model.embedding_dim 0.0 +908 89 model.relation_dim 0.0 +908 89 model.scoring_fct_norm 2.0 +908 89 training.batch_size 2.0 +908 89 training.label_smoothing 0.9768385446540674 +908 90 model.embedding_dim 2.0 +908 90 model.relation_dim 2.0 +908 90 model.scoring_fct_norm 2.0 +908 90 training.batch_size 1.0 +908 90 training.label_smoothing 0.30231027399075006 +908 91 model.embedding_dim 1.0 +908 91 model.relation_dim 2.0 +908 91 model.scoring_fct_norm 2.0 +908 91 training.batch_size 0.0 +908 91 training.label_smoothing 0.6821343592509684 +908 92 model.embedding_dim 0.0 +908 92 model.relation_dim 1.0 +908 92 model.scoring_fct_norm 1.0 +908 92 training.batch_size 0.0 +908 92 training.label_smoothing 0.09601539590289007 +908 93 model.embedding_dim 2.0 +908 93 model.relation_dim 1.0 +908 93 model.scoring_fct_norm 2.0 +908 93 training.batch_size 1.0 +908 93 training.label_smoothing 0.033854072467568816 +908 94 model.embedding_dim 1.0 +908 94 model.relation_dim 0.0 +908 94 model.scoring_fct_norm 1.0 +908 94 training.batch_size 1.0 +908 94 training.label_smoothing 0.07677866534927327 +908 95 model.embedding_dim 1.0 +908 95 model.relation_dim 2.0 +908 95 model.scoring_fct_norm 1.0 +908 95 training.batch_size 0.0 +908 95 training.label_smoothing 0.9962824578917368 +908 96 model.embedding_dim 2.0 +908 96 model.relation_dim 1.0 +908 96 model.scoring_fct_norm 1.0 +908 96 training.batch_size 1.0 +908 96 training.label_smoothing 0.4133415997601842 +908 97 model.embedding_dim 2.0 +908 97 model.relation_dim 0.0 +908 97 model.scoring_fct_norm 2.0 +908 97 training.batch_size 2.0 +908 97 training.label_smoothing 0.0012081676779454966 +908 98 model.embedding_dim 0.0 +908 98 model.relation_dim 0.0 +908 98 model.scoring_fct_norm 2.0 +908 98 training.batch_size 1.0 +908 98 training.label_smoothing 0.0014560106743468349 +908 99 model.embedding_dim 2.0 +908 99 model.relation_dim 0.0 +908 99 model.scoring_fct_norm 1.0 +908 99 training.batch_size 2.0 +908 99 training.label_smoothing 0.0010861535718016014 +908 100 model.embedding_dim 0.0 +908 100 model.relation_dim 2.0 +908 100 model.scoring_fct_norm 2.0 +908 100 training.batch_size 0.0 +908 100 training.label_smoothing 0.06979690110550443 +908 1 dataset """kinships""" +908 1 model """transr""" +908 1 loss """softplus""" +908 1 regularizer """no""" +908 1 optimizer """adadelta""" +908 1 training_loop """lcwa""" +908 1 evaluator """rankbased""" +908 2 dataset """kinships""" +908 2 model """transr""" +908 2 loss """softplus""" +908 2 regularizer """no""" +908 2 optimizer """adadelta""" +908 2 training_loop """lcwa""" +908 2 evaluator """rankbased""" +908 3 dataset """kinships""" +908 3 model """transr""" +908 3 loss """softplus""" +908 3 regularizer """no""" +908 3 optimizer """adadelta""" +908 3 training_loop """lcwa""" +908 3 evaluator """rankbased""" +908 4 dataset """kinships""" +908 4 model """transr""" +908 4 loss """softplus""" +908 4 regularizer """no""" +908 4 optimizer """adadelta""" +908 4 training_loop """lcwa""" +908 4 evaluator """rankbased""" +908 5 dataset """kinships""" +908 5 model """transr""" +908 5 loss """softplus""" +908 5 regularizer """no""" +908 5 optimizer """adadelta""" +908 5 training_loop """lcwa""" +908 5 evaluator """rankbased""" +908 6 dataset """kinships""" +908 6 model """transr""" +908 6 loss """softplus""" +908 6 regularizer """no""" +908 6 optimizer """adadelta""" +908 6 training_loop """lcwa""" +908 6 evaluator """rankbased""" +908 7 dataset """kinships""" +908 7 model """transr""" +908 7 loss """softplus""" +908 7 regularizer """no""" +908 7 optimizer """adadelta""" +908 7 training_loop """lcwa""" +908 7 evaluator """rankbased""" +908 8 dataset """kinships""" +908 8 model """transr""" +908 8 loss """softplus""" +908 8 regularizer """no""" +908 8 optimizer """adadelta""" +908 8 training_loop """lcwa""" +908 8 evaluator """rankbased""" +908 9 dataset """kinships""" +908 9 model """transr""" +908 9 loss """softplus""" +908 9 regularizer """no""" +908 9 optimizer """adadelta""" +908 9 training_loop """lcwa""" +908 9 evaluator """rankbased""" +908 10 dataset """kinships""" +908 10 model """transr""" +908 10 loss """softplus""" +908 10 regularizer """no""" +908 10 optimizer """adadelta""" +908 10 training_loop """lcwa""" +908 10 evaluator """rankbased""" +908 11 dataset """kinships""" +908 11 model """transr""" +908 11 loss """softplus""" +908 11 regularizer """no""" +908 11 optimizer """adadelta""" +908 11 training_loop """lcwa""" +908 11 evaluator """rankbased""" +908 12 dataset """kinships""" +908 12 model """transr""" +908 12 loss """softplus""" +908 12 regularizer """no""" +908 12 optimizer """adadelta""" +908 12 training_loop """lcwa""" +908 12 evaluator """rankbased""" +908 13 dataset """kinships""" +908 13 model """transr""" +908 13 loss """softplus""" +908 13 regularizer """no""" +908 13 optimizer """adadelta""" +908 13 training_loop """lcwa""" +908 13 evaluator """rankbased""" +908 14 dataset """kinships""" +908 14 model """transr""" +908 14 loss """softplus""" +908 14 regularizer """no""" +908 14 optimizer """adadelta""" +908 14 training_loop """lcwa""" +908 14 evaluator """rankbased""" +908 15 dataset """kinships""" +908 15 model """transr""" +908 15 loss """softplus""" +908 15 regularizer """no""" +908 15 optimizer """adadelta""" +908 15 training_loop """lcwa""" +908 15 evaluator """rankbased""" +908 16 dataset """kinships""" +908 16 model """transr""" +908 16 loss """softplus""" +908 16 regularizer """no""" +908 16 optimizer """adadelta""" +908 16 training_loop """lcwa""" +908 16 evaluator """rankbased""" +908 17 dataset """kinships""" +908 17 model """transr""" +908 17 loss """softplus""" +908 17 regularizer """no""" +908 17 optimizer """adadelta""" +908 17 training_loop """lcwa""" +908 17 evaluator """rankbased""" +908 18 dataset """kinships""" +908 18 model """transr""" +908 18 loss """softplus""" +908 18 regularizer """no""" +908 18 optimizer """adadelta""" +908 18 training_loop """lcwa""" +908 18 evaluator """rankbased""" +908 19 dataset """kinships""" +908 19 model """transr""" +908 19 loss """softplus""" +908 19 regularizer """no""" +908 19 optimizer """adadelta""" +908 19 training_loop """lcwa""" +908 19 evaluator """rankbased""" +908 20 dataset """kinships""" +908 20 model """transr""" +908 20 loss """softplus""" +908 20 regularizer """no""" +908 20 optimizer """adadelta""" +908 20 training_loop """lcwa""" +908 20 evaluator """rankbased""" +908 21 dataset """kinships""" +908 21 model """transr""" +908 21 loss """softplus""" +908 21 regularizer """no""" +908 21 optimizer """adadelta""" +908 21 training_loop """lcwa""" +908 21 evaluator """rankbased""" +908 22 dataset """kinships""" +908 22 model """transr""" +908 22 loss """softplus""" +908 22 regularizer """no""" +908 22 optimizer """adadelta""" +908 22 training_loop """lcwa""" +908 22 evaluator """rankbased""" +908 23 dataset """kinships""" +908 23 model """transr""" +908 23 loss """softplus""" +908 23 regularizer """no""" +908 23 optimizer """adadelta""" +908 23 training_loop """lcwa""" +908 23 evaluator """rankbased""" +908 24 dataset """kinships""" +908 24 model """transr""" +908 24 loss """softplus""" +908 24 regularizer """no""" +908 24 optimizer """adadelta""" +908 24 training_loop """lcwa""" +908 24 evaluator """rankbased""" +908 25 dataset """kinships""" +908 25 model """transr""" +908 25 loss """softplus""" +908 25 regularizer """no""" +908 25 optimizer """adadelta""" +908 25 training_loop """lcwa""" +908 25 evaluator """rankbased""" +908 26 dataset """kinships""" +908 26 model """transr""" +908 26 loss """softplus""" +908 26 regularizer """no""" +908 26 optimizer """adadelta""" +908 26 training_loop """lcwa""" +908 26 evaluator """rankbased""" +908 27 dataset """kinships""" +908 27 model """transr""" +908 27 loss """softplus""" +908 27 regularizer """no""" +908 27 optimizer """adadelta""" +908 27 training_loop """lcwa""" +908 27 evaluator """rankbased""" +908 28 dataset """kinships""" +908 28 model """transr""" +908 28 loss """softplus""" +908 28 regularizer """no""" +908 28 optimizer """adadelta""" +908 28 training_loop """lcwa""" +908 28 evaluator """rankbased""" +908 29 dataset """kinships""" +908 29 model """transr""" +908 29 loss """softplus""" +908 29 regularizer """no""" +908 29 optimizer """adadelta""" +908 29 training_loop """lcwa""" +908 29 evaluator """rankbased""" +908 30 dataset """kinships""" +908 30 model """transr""" +908 30 loss """softplus""" +908 30 regularizer """no""" +908 30 optimizer """adadelta""" +908 30 training_loop """lcwa""" +908 30 evaluator """rankbased""" +908 31 dataset """kinships""" +908 31 model """transr""" +908 31 loss """softplus""" +908 31 regularizer """no""" +908 31 optimizer """adadelta""" +908 31 training_loop """lcwa""" +908 31 evaluator """rankbased""" +908 32 dataset """kinships""" +908 32 model """transr""" +908 32 loss """softplus""" +908 32 regularizer """no""" +908 32 optimizer """adadelta""" +908 32 training_loop """lcwa""" +908 32 evaluator """rankbased""" +908 33 dataset """kinships""" +908 33 model """transr""" +908 33 loss """softplus""" +908 33 regularizer """no""" +908 33 optimizer """adadelta""" +908 33 training_loop """lcwa""" +908 33 evaluator """rankbased""" +908 34 dataset """kinships""" +908 34 model """transr""" +908 34 loss """softplus""" +908 34 regularizer """no""" +908 34 optimizer """adadelta""" +908 34 training_loop """lcwa""" +908 34 evaluator """rankbased""" +908 35 dataset """kinships""" +908 35 model """transr""" +908 35 loss """softplus""" +908 35 regularizer """no""" +908 35 optimizer """adadelta""" +908 35 training_loop """lcwa""" +908 35 evaluator """rankbased""" +908 36 dataset """kinships""" +908 36 model """transr""" +908 36 loss """softplus""" +908 36 regularizer """no""" +908 36 optimizer """adadelta""" +908 36 training_loop """lcwa""" +908 36 evaluator """rankbased""" +908 37 dataset """kinships""" +908 37 model """transr""" +908 37 loss """softplus""" +908 37 regularizer """no""" +908 37 optimizer """adadelta""" +908 37 training_loop """lcwa""" +908 37 evaluator """rankbased""" +908 38 dataset """kinships""" +908 38 model """transr""" +908 38 loss """softplus""" +908 38 regularizer """no""" +908 38 optimizer """adadelta""" +908 38 training_loop """lcwa""" +908 38 evaluator """rankbased""" +908 39 dataset """kinships""" +908 39 model """transr""" +908 39 loss """softplus""" +908 39 regularizer """no""" +908 39 optimizer """adadelta""" +908 39 training_loop """lcwa""" +908 39 evaluator """rankbased""" +908 40 dataset """kinships""" +908 40 model """transr""" +908 40 loss """softplus""" +908 40 regularizer """no""" +908 40 optimizer """adadelta""" +908 40 training_loop """lcwa""" +908 40 evaluator """rankbased""" +908 41 dataset """kinships""" +908 41 model """transr""" +908 41 loss """softplus""" +908 41 regularizer """no""" +908 41 optimizer """adadelta""" +908 41 training_loop """lcwa""" +908 41 evaluator """rankbased""" +908 42 dataset """kinships""" +908 42 model """transr""" +908 42 loss """softplus""" +908 42 regularizer """no""" +908 42 optimizer """adadelta""" +908 42 training_loop """lcwa""" +908 42 evaluator """rankbased""" +908 43 dataset """kinships""" +908 43 model """transr""" +908 43 loss """softplus""" +908 43 regularizer """no""" +908 43 optimizer """adadelta""" +908 43 training_loop """lcwa""" +908 43 evaluator """rankbased""" +908 44 dataset """kinships""" +908 44 model """transr""" +908 44 loss """softplus""" +908 44 regularizer """no""" +908 44 optimizer """adadelta""" +908 44 training_loop """lcwa""" +908 44 evaluator """rankbased""" +908 45 dataset """kinships""" +908 45 model """transr""" +908 45 loss """softplus""" +908 45 regularizer """no""" +908 45 optimizer """adadelta""" +908 45 training_loop """lcwa""" +908 45 evaluator """rankbased""" +908 46 dataset """kinships""" +908 46 model """transr""" +908 46 loss """softplus""" +908 46 regularizer """no""" +908 46 optimizer """adadelta""" +908 46 training_loop """lcwa""" +908 46 evaluator """rankbased""" +908 47 dataset """kinships""" +908 47 model """transr""" +908 47 loss """softplus""" +908 47 regularizer """no""" +908 47 optimizer """adadelta""" +908 47 training_loop """lcwa""" +908 47 evaluator """rankbased""" +908 48 dataset """kinships""" +908 48 model """transr""" +908 48 loss """softplus""" +908 48 regularizer """no""" +908 48 optimizer """adadelta""" +908 48 training_loop """lcwa""" +908 48 evaluator """rankbased""" +908 49 dataset """kinships""" +908 49 model """transr""" +908 49 loss """softplus""" +908 49 regularizer """no""" +908 49 optimizer """adadelta""" +908 49 training_loop """lcwa""" +908 49 evaluator """rankbased""" +908 50 dataset """kinships""" +908 50 model """transr""" +908 50 loss """softplus""" +908 50 regularizer """no""" +908 50 optimizer """adadelta""" +908 50 training_loop """lcwa""" +908 50 evaluator """rankbased""" +908 51 dataset """kinships""" +908 51 model """transr""" +908 51 loss """softplus""" +908 51 regularizer """no""" +908 51 optimizer """adadelta""" +908 51 training_loop """lcwa""" +908 51 evaluator """rankbased""" +908 52 dataset """kinships""" +908 52 model """transr""" +908 52 loss """softplus""" +908 52 regularizer """no""" +908 52 optimizer """adadelta""" +908 52 training_loop """lcwa""" +908 52 evaluator """rankbased""" +908 53 dataset """kinships""" +908 53 model """transr""" +908 53 loss """softplus""" +908 53 regularizer """no""" +908 53 optimizer """adadelta""" +908 53 training_loop """lcwa""" +908 53 evaluator """rankbased""" +908 54 dataset """kinships""" +908 54 model """transr""" +908 54 loss """softplus""" +908 54 regularizer """no""" +908 54 optimizer """adadelta""" +908 54 training_loop """lcwa""" +908 54 evaluator """rankbased""" +908 55 dataset """kinships""" +908 55 model """transr""" +908 55 loss """softplus""" +908 55 regularizer """no""" +908 55 optimizer """adadelta""" +908 55 training_loop """lcwa""" +908 55 evaluator """rankbased""" +908 56 dataset """kinships""" +908 56 model """transr""" +908 56 loss """softplus""" +908 56 regularizer """no""" +908 56 optimizer """adadelta""" +908 56 training_loop """lcwa""" +908 56 evaluator """rankbased""" +908 57 dataset """kinships""" +908 57 model """transr""" +908 57 loss """softplus""" +908 57 regularizer """no""" +908 57 optimizer """adadelta""" +908 57 training_loop """lcwa""" +908 57 evaluator """rankbased""" +908 58 dataset """kinships""" +908 58 model """transr""" +908 58 loss """softplus""" +908 58 regularizer """no""" +908 58 optimizer """adadelta""" +908 58 training_loop """lcwa""" +908 58 evaluator """rankbased""" +908 59 dataset """kinships""" +908 59 model """transr""" +908 59 loss """softplus""" +908 59 regularizer """no""" +908 59 optimizer """adadelta""" +908 59 training_loop """lcwa""" +908 59 evaluator """rankbased""" +908 60 dataset """kinships""" +908 60 model """transr""" +908 60 loss """softplus""" +908 60 regularizer """no""" +908 60 optimizer """adadelta""" +908 60 training_loop """lcwa""" +908 60 evaluator """rankbased""" +908 61 dataset """kinships""" +908 61 model """transr""" +908 61 loss """softplus""" +908 61 regularizer """no""" +908 61 optimizer """adadelta""" +908 61 training_loop """lcwa""" +908 61 evaluator """rankbased""" +908 62 dataset """kinships""" +908 62 model """transr""" +908 62 loss """softplus""" +908 62 regularizer """no""" +908 62 optimizer """adadelta""" +908 62 training_loop """lcwa""" +908 62 evaluator """rankbased""" +908 63 dataset """kinships""" +908 63 model """transr""" +908 63 loss """softplus""" +908 63 regularizer """no""" +908 63 optimizer """adadelta""" +908 63 training_loop """lcwa""" +908 63 evaluator """rankbased""" +908 64 dataset """kinships""" +908 64 model """transr""" +908 64 loss """softplus""" +908 64 regularizer """no""" +908 64 optimizer """adadelta""" +908 64 training_loop """lcwa""" +908 64 evaluator """rankbased""" +908 65 dataset """kinships""" +908 65 model """transr""" +908 65 loss """softplus""" +908 65 regularizer """no""" +908 65 optimizer """adadelta""" +908 65 training_loop """lcwa""" +908 65 evaluator """rankbased""" +908 66 dataset """kinships""" +908 66 model """transr""" +908 66 loss """softplus""" +908 66 regularizer """no""" +908 66 optimizer """adadelta""" +908 66 training_loop """lcwa""" +908 66 evaluator """rankbased""" +908 67 dataset """kinships""" +908 67 model """transr""" +908 67 loss """softplus""" +908 67 regularizer """no""" +908 67 optimizer """adadelta""" +908 67 training_loop """lcwa""" +908 67 evaluator """rankbased""" +908 68 dataset """kinships""" +908 68 model """transr""" +908 68 loss """softplus""" +908 68 regularizer """no""" +908 68 optimizer """adadelta""" +908 68 training_loop """lcwa""" +908 68 evaluator """rankbased""" +908 69 dataset """kinships""" +908 69 model """transr""" +908 69 loss """softplus""" +908 69 regularizer """no""" +908 69 optimizer """adadelta""" +908 69 training_loop """lcwa""" +908 69 evaluator """rankbased""" +908 70 dataset """kinships""" +908 70 model """transr""" +908 70 loss """softplus""" +908 70 regularizer """no""" +908 70 optimizer """adadelta""" +908 70 training_loop """lcwa""" +908 70 evaluator """rankbased""" +908 71 dataset """kinships""" +908 71 model """transr""" +908 71 loss """softplus""" +908 71 regularizer """no""" +908 71 optimizer """adadelta""" +908 71 training_loop """lcwa""" +908 71 evaluator """rankbased""" +908 72 dataset """kinships""" +908 72 model """transr""" +908 72 loss """softplus""" +908 72 regularizer """no""" +908 72 optimizer """adadelta""" +908 72 training_loop """lcwa""" +908 72 evaluator """rankbased""" +908 73 dataset """kinships""" +908 73 model """transr""" +908 73 loss """softplus""" +908 73 regularizer """no""" +908 73 optimizer """adadelta""" +908 73 training_loop """lcwa""" +908 73 evaluator """rankbased""" +908 74 dataset """kinships""" +908 74 model """transr""" +908 74 loss """softplus""" +908 74 regularizer """no""" +908 74 optimizer """adadelta""" +908 74 training_loop """lcwa""" +908 74 evaluator """rankbased""" +908 75 dataset """kinships""" +908 75 model """transr""" +908 75 loss """softplus""" +908 75 regularizer """no""" +908 75 optimizer """adadelta""" +908 75 training_loop """lcwa""" +908 75 evaluator """rankbased""" +908 76 dataset """kinships""" +908 76 model """transr""" +908 76 loss """softplus""" +908 76 regularizer """no""" +908 76 optimizer """adadelta""" +908 76 training_loop """lcwa""" +908 76 evaluator """rankbased""" +908 77 dataset """kinships""" +908 77 model """transr""" +908 77 loss """softplus""" +908 77 regularizer """no""" +908 77 optimizer """adadelta""" +908 77 training_loop """lcwa""" +908 77 evaluator """rankbased""" +908 78 dataset """kinships""" +908 78 model """transr""" +908 78 loss """softplus""" +908 78 regularizer """no""" +908 78 optimizer """adadelta""" +908 78 training_loop """lcwa""" +908 78 evaluator """rankbased""" +908 79 dataset """kinships""" +908 79 model """transr""" +908 79 loss """softplus""" +908 79 regularizer """no""" +908 79 optimizer """adadelta""" +908 79 training_loop """lcwa""" +908 79 evaluator """rankbased""" +908 80 dataset """kinships""" +908 80 model """transr""" +908 80 loss """softplus""" +908 80 regularizer """no""" +908 80 optimizer """adadelta""" +908 80 training_loop """lcwa""" +908 80 evaluator """rankbased""" +908 81 dataset """kinships""" +908 81 model """transr""" +908 81 loss """softplus""" +908 81 regularizer """no""" +908 81 optimizer """adadelta""" +908 81 training_loop """lcwa""" +908 81 evaluator """rankbased""" +908 82 dataset """kinships""" +908 82 model """transr""" +908 82 loss """softplus""" +908 82 regularizer """no""" +908 82 optimizer """adadelta""" +908 82 training_loop """lcwa""" +908 82 evaluator """rankbased""" +908 83 dataset """kinships""" +908 83 model """transr""" +908 83 loss """softplus""" +908 83 regularizer """no""" +908 83 optimizer """adadelta""" +908 83 training_loop """lcwa""" +908 83 evaluator """rankbased""" +908 84 dataset """kinships""" +908 84 model """transr""" +908 84 loss """softplus""" +908 84 regularizer """no""" +908 84 optimizer """adadelta""" +908 84 training_loop """lcwa""" +908 84 evaluator """rankbased""" +908 85 dataset """kinships""" +908 85 model """transr""" +908 85 loss """softplus""" +908 85 regularizer """no""" +908 85 optimizer """adadelta""" +908 85 training_loop """lcwa""" +908 85 evaluator """rankbased""" +908 86 dataset """kinships""" +908 86 model """transr""" +908 86 loss """softplus""" +908 86 regularizer """no""" +908 86 optimizer """adadelta""" +908 86 training_loop """lcwa""" +908 86 evaluator """rankbased""" +908 87 dataset """kinships""" +908 87 model """transr""" +908 87 loss """softplus""" +908 87 regularizer """no""" +908 87 optimizer """adadelta""" +908 87 training_loop """lcwa""" +908 87 evaluator """rankbased""" +908 88 dataset """kinships""" +908 88 model """transr""" +908 88 loss """softplus""" +908 88 regularizer """no""" +908 88 optimizer """adadelta""" +908 88 training_loop """lcwa""" +908 88 evaluator """rankbased""" +908 89 dataset """kinships""" +908 89 model """transr""" +908 89 loss """softplus""" +908 89 regularizer """no""" +908 89 optimizer """adadelta""" +908 89 training_loop """lcwa""" +908 89 evaluator """rankbased""" +908 90 dataset """kinships""" +908 90 model """transr""" +908 90 loss """softplus""" +908 90 regularizer """no""" +908 90 optimizer """adadelta""" +908 90 training_loop """lcwa""" +908 90 evaluator """rankbased""" +908 91 dataset """kinships""" +908 91 model """transr""" +908 91 loss """softplus""" +908 91 regularizer """no""" +908 91 optimizer """adadelta""" +908 91 training_loop """lcwa""" +908 91 evaluator """rankbased""" +908 92 dataset """kinships""" +908 92 model """transr""" +908 92 loss """softplus""" +908 92 regularizer """no""" +908 92 optimizer """adadelta""" +908 92 training_loop """lcwa""" +908 92 evaluator """rankbased""" +908 93 dataset """kinships""" +908 93 model """transr""" +908 93 loss """softplus""" +908 93 regularizer """no""" +908 93 optimizer """adadelta""" +908 93 training_loop """lcwa""" +908 93 evaluator """rankbased""" +908 94 dataset """kinships""" +908 94 model """transr""" +908 94 loss """softplus""" +908 94 regularizer """no""" +908 94 optimizer """adadelta""" +908 94 training_loop """lcwa""" +908 94 evaluator """rankbased""" +908 95 dataset """kinships""" +908 95 model """transr""" +908 95 loss """softplus""" +908 95 regularizer """no""" +908 95 optimizer """adadelta""" +908 95 training_loop """lcwa""" +908 95 evaluator """rankbased""" +908 96 dataset """kinships""" +908 96 model """transr""" +908 96 loss """softplus""" +908 96 regularizer """no""" +908 96 optimizer """adadelta""" +908 96 training_loop """lcwa""" +908 96 evaluator """rankbased""" +908 97 dataset """kinships""" +908 97 model """transr""" +908 97 loss """softplus""" +908 97 regularizer """no""" +908 97 optimizer """adadelta""" +908 97 training_loop """lcwa""" +908 97 evaluator """rankbased""" +908 98 dataset """kinships""" +908 98 model """transr""" +908 98 loss """softplus""" +908 98 regularizer """no""" +908 98 optimizer """adadelta""" +908 98 training_loop """lcwa""" +908 98 evaluator """rankbased""" +908 99 dataset """kinships""" +908 99 model """transr""" +908 99 loss """softplus""" +908 99 regularizer """no""" +908 99 optimizer """adadelta""" +908 99 training_loop """lcwa""" +908 99 evaluator """rankbased""" +908 100 dataset """kinships""" +908 100 model """transr""" +908 100 loss """softplus""" +908 100 regularizer """no""" +908 100 optimizer """adadelta""" +908 100 training_loop """lcwa""" +908 100 evaluator """rankbased""" +909 1 model.embedding_dim 2.0 +909 1 model.relation_dim 2.0 +909 1 model.scoring_fct_norm 1.0 +909 1 training.batch_size 1.0 +909 1 training.label_smoothing 0.007396934079595456 +909 2 model.embedding_dim 2.0 +909 2 model.relation_dim 1.0 +909 2 model.scoring_fct_norm 1.0 +909 2 training.batch_size 1.0 +909 2 training.label_smoothing 0.031072922651067002 +909 3 model.embedding_dim 1.0 +909 3 model.relation_dim 1.0 +909 3 model.scoring_fct_norm 2.0 +909 3 training.batch_size 0.0 +909 3 training.label_smoothing 0.39966243183707584 +909 4 model.embedding_dim 2.0 +909 4 model.relation_dim 2.0 +909 4 model.scoring_fct_norm 1.0 +909 4 training.batch_size 2.0 +909 4 training.label_smoothing 0.391884558648283 +909 5 model.embedding_dim 1.0 +909 5 model.relation_dim 0.0 +909 5 model.scoring_fct_norm 1.0 +909 5 training.batch_size 1.0 +909 5 training.label_smoothing 0.009856335542529704 +909 6 model.embedding_dim 0.0 +909 6 model.relation_dim 0.0 +909 6 model.scoring_fct_norm 1.0 +909 6 training.batch_size 1.0 +909 6 training.label_smoothing 0.040194044265785205 +909 7 model.embedding_dim 0.0 +909 7 model.relation_dim 1.0 +909 7 model.scoring_fct_norm 2.0 +909 7 training.batch_size 0.0 +909 7 training.label_smoothing 0.04946612740308325 +909 8 model.embedding_dim 0.0 +909 8 model.relation_dim 2.0 +909 8 model.scoring_fct_norm 2.0 +909 8 training.batch_size 0.0 +909 8 training.label_smoothing 0.22826318779632965 +909 9 model.embedding_dim 0.0 +909 9 model.relation_dim 2.0 +909 9 model.scoring_fct_norm 2.0 +909 9 training.batch_size 0.0 +909 9 training.label_smoothing 0.0014336822806885925 +909 10 model.embedding_dim 0.0 +909 10 model.relation_dim 1.0 +909 10 model.scoring_fct_norm 2.0 +909 10 training.batch_size 0.0 +909 10 training.label_smoothing 0.9616552134042204 +909 11 model.embedding_dim 2.0 +909 11 model.relation_dim 2.0 +909 11 model.scoring_fct_norm 2.0 +909 11 training.batch_size 2.0 +909 11 training.label_smoothing 0.11545751140871545 +909 12 model.embedding_dim 2.0 +909 12 model.relation_dim 1.0 +909 12 model.scoring_fct_norm 2.0 +909 12 training.batch_size 1.0 +909 12 training.label_smoothing 0.03396961487735296 +909 13 model.embedding_dim 2.0 +909 13 model.relation_dim 0.0 +909 13 model.scoring_fct_norm 2.0 +909 13 training.batch_size 1.0 +909 13 training.label_smoothing 0.02804201253165072 +909 14 model.embedding_dim 2.0 +909 14 model.relation_dim 0.0 +909 14 model.scoring_fct_norm 1.0 +909 14 training.batch_size 0.0 +909 14 training.label_smoothing 0.011165033041742755 +909 15 model.embedding_dim 0.0 +909 15 model.relation_dim 2.0 +909 15 model.scoring_fct_norm 1.0 +909 15 training.batch_size 2.0 +909 15 training.label_smoothing 0.06683525032923407 +909 16 model.embedding_dim 1.0 +909 16 model.relation_dim 1.0 +909 16 model.scoring_fct_norm 2.0 +909 16 training.batch_size 0.0 +909 16 training.label_smoothing 0.4401825041115817 +909 17 model.embedding_dim 0.0 +909 17 model.relation_dim 2.0 +909 17 model.scoring_fct_norm 1.0 +909 17 training.batch_size 1.0 +909 17 training.label_smoothing 0.036857133365651995 +909 18 model.embedding_dim 2.0 +909 18 model.relation_dim 0.0 +909 18 model.scoring_fct_norm 2.0 +909 18 training.batch_size 0.0 +909 18 training.label_smoothing 0.10069879459013861 +909 19 model.embedding_dim 2.0 +909 19 model.relation_dim 2.0 +909 19 model.scoring_fct_norm 1.0 +909 19 training.batch_size 1.0 +909 19 training.label_smoothing 0.24117714193645146 +909 20 model.embedding_dim 0.0 +909 20 model.relation_dim 1.0 +909 20 model.scoring_fct_norm 2.0 +909 20 training.batch_size 1.0 +909 20 training.label_smoothing 0.04129022067660513 +909 21 model.embedding_dim 1.0 +909 21 model.relation_dim 1.0 +909 21 model.scoring_fct_norm 2.0 +909 21 training.batch_size 2.0 +909 21 training.label_smoothing 0.2865177776666394 +909 22 model.embedding_dim 1.0 +909 22 model.relation_dim 2.0 +909 22 model.scoring_fct_norm 2.0 +909 22 training.batch_size 0.0 +909 22 training.label_smoothing 0.18945400410605465 +909 23 model.embedding_dim 0.0 +909 23 model.relation_dim 0.0 +909 23 model.scoring_fct_norm 2.0 +909 23 training.batch_size 0.0 +909 23 training.label_smoothing 0.028109038968113245 +909 24 model.embedding_dim 2.0 +909 24 model.relation_dim 0.0 +909 24 model.scoring_fct_norm 1.0 +909 24 training.batch_size 1.0 +909 24 training.label_smoothing 0.0017617684555435438 +909 25 model.embedding_dim 1.0 +909 25 model.relation_dim 1.0 +909 25 model.scoring_fct_norm 2.0 +909 25 training.batch_size 1.0 +909 25 training.label_smoothing 0.001969459850575475 +909 26 model.embedding_dim 2.0 +909 26 model.relation_dim 0.0 +909 26 model.scoring_fct_norm 2.0 +909 26 training.batch_size 2.0 +909 26 training.label_smoothing 0.0034401198297517575 +909 27 model.embedding_dim 0.0 +909 27 model.relation_dim 2.0 +909 27 model.scoring_fct_norm 2.0 +909 27 training.batch_size 0.0 +909 27 training.label_smoothing 0.4998833667879892 +909 28 model.embedding_dim 2.0 +909 28 model.relation_dim 0.0 +909 28 model.scoring_fct_norm 2.0 +909 28 training.batch_size 2.0 +909 28 training.label_smoothing 0.003987395445626793 +909 29 model.embedding_dim 2.0 +909 29 model.relation_dim 1.0 +909 29 model.scoring_fct_norm 2.0 +909 29 training.batch_size 2.0 +909 29 training.label_smoothing 0.0011710416802570866 +909 30 model.embedding_dim 2.0 +909 30 model.relation_dim 0.0 +909 30 model.scoring_fct_norm 1.0 +909 30 training.batch_size 0.0 +909 30 training.label_smoothing 0.8674618399721311 +909 31 model.embedding_dim 2.0 +909 31 model.relation_dim 2.0 +909 31 model.scoring_fct_norm 2.0 +909 31 training.batch_size 2.0 +909 31 training.label_smoothing 0.08390479809187319 +909 32 model.embedding_dim 1.0 +909 32 model.relation_dim 2.0 +909 32 model.scoring_fct_norm 2.0 +909 32 training.batch_size 1.0 +909 32 training.label_smoothing 0.0018900134891676585 +909 33 model.embedding_dim 0.0 +909 33 model.relation_dim 0.0 +909 33 model.scoring_fct_norm 1.0 +909 33 training.batch_size 1.0 +909 33 training.label_smoothing 0.2909483448732198 +909 34 model.embedding_dim 0.0 +909 34 model.relation_dim 0.0 +909 34 model.scoring_fct_norm 2.0 +909 34 training.batch_size 0.0 +909 34 training.label_smoothing 0.03435995590631257 +909 35 model.embedding_dim 1.0 +909 35 model.relation_dim 2.0 +909 35 model.scoring_fct_norm 1.0 +909 35 training.batch_size 2.0 +909 35 training.label_smoothing 0.9250659277952079 +909 36 model.embedding_dim 1.0 +909 36 model.relation_dim 2.0 +909 36 model.scoring_fct_norm 1.0 +909 36 training.batch_size 0.0 +909 36 training.label_smoothing 0.050486392935245025 +909 37 model.embedding_dim 2.0 +909 37 model.relation_dim 0.0 +909 37 model.scoring_fct_norm 1.0 +909 37 training.batch_size 1.0 +909 37 training.label_smoothing 0.20562262446630122 +909 38 model.embedding_dim 1.0 +909 38 model.relation_dim 2.0 +909 38 model.scoring_fct_norm 2.0 +909 38 training.batch_size 2.0 +909 38 training.label_smoothing 0.027164788738349734 +909 39 model.embedding_dim 2.0 +909 39 model.relation_dim 2.0 +909 39 model.scoring_fct_norm 2.0 +909 39 training.batch_size 0.0 +909 39 training.label_smoothing 0.026409346716781003 +909 40 model.embedding_dim 0.0 +909 40 model.relation_dim 0.0 +909 40 model.scoring_fct_norm 2.0 +909 40 training.batch_size 2.0 +909 40 training.label_smoothing 0.029644800286432526 +909 41 model.embedding_dim 2.0 +909 41 model.relation_dim 0.0 +909 41 model.scoring_fct_norm 1.0 +909 41 training.batch_size 2.0 +909 41 training.label_smoothing 0.17580842122120063 +909 42 model.embedding_dim 1.0 +909 42 model.relation_dim 1.0 +909 42 model.scoring_fct_norm 2.0 +909 42 training.batch_size 1.0 +909 42 training.label_smoothing 0.031093965208810387 +909 43 model.embedding_dim 1.0 +909 43 model.relation_dim 0.0 +909 43 model.scoring_fct_norm 2.0 +909 43 training.batch_size 1.0 +909 43 training.label_smoothing 0.056484056372712414 +909 44 model.embedding_dim 1.0 +909 44 model.relation_dim 0.0 +909 44 model.scoring_fct_norm 1.0 +909 44 training.batch_size 0.0 +909 44 training.label_smoothing 0.0038870660038027934 +909 45 model.embedding_dim 1.0 +909 45 model.relation_dim 2.0 +909 45 model.scoring_fct_norm 2.0 +909 45 training.batch_size 0.0 +909 45 training.label_smoothing 0.04174033436418628 +909 46 model.embedding_dim 0.0 +909 46 model.relation_dim 2.0 +909 46 model.scoring_fct_norm 2.0 +909 46 training.batch_size 2.0 +909 46 training.label_smoothing 0.09267370984349822 +909 47 model.embedding_dim 0.0 +909 47 model.relation_dim 0.0 +909 47 model.scoring_fct_norm 2.0 +909 47 training.batch_size 0.0 +909 47 training.label_smoothing 0.10881672516492448 +909 48 model.embedding_dim 1.0 +909 48 model.relation_dim 1.0 +909 48 model.scoring_fct_norm 2.0 +909 48 training.batch_size 0.0 +909 48 training.label_smoothing 0.0035152243037266037 +909 49 model.embedding_dim 0.0 +909 49 model.relation_dim 2.0 +909 49 model.scoring_fct_norm 2.0 +909 49 training.batch_size 1.0 +909 49 training.label_smoothing 0.2652849498472729 +909 50 model.embedding_dim 1.0 +909 50 model.relation_dim 0.0 +909 50 model.scoring_fct_norm 1.0 +909 50 training.batch_size 0.0 +909 50 training.label_smoothing 0.004721644518174436 +909 51 model.embedding_dim 1.0 +909 51 model.relation_dim 0.0 +909 51 model.scoring_fct_norm 2.0 +909 51 training.batch_size 2.0 +909 51 training.label_smoothing 0.757954750205829 +909 52 model.embedding_dim 2.0 +909 52 model.relation_dim 2.0 +909 52 model.scoring_fct_norm 2.0 +909 52 training.batch_size 1.0 +909 52 training.label_smoothing 0.032016760772097586 +909 53 model.embedding_dim 1.0 +909 53 model.relation_dim 1.0 +909 53 model.scoring_fct_norm 2.0 +909 53 training.batch_size 0.0 +909 53 training.label_smoothing 0.1596468372925371 +909 54 model.embedding_dim 0.0 +909 54 model.relation_dim 2.0 +909 54 model.scoring_fct_norm 1.0 +909 54 training.batch_size 1.0 +909 54 training.label_smoothing 0.09583034811974465 +909 55 model.embedding_dim 0.0 +909 55 model.relation_dim 2.0 +909 55 model.scoring_fct_norm 2.0 +909 55 training.batch_size 1.0 +909 55 training.label_smoothing 0.021229531143835013 +909 56 model.embedding_dim 1.0 +909 56 model.relation_dim 0.0 +909 56 model.scoring_fct_norm 1.0 +909 56 training.batch_size 0.0 +909 56 training.label_smoothing 0.08005631790565773 +909 57 model.embedding_dim 1.0 +909 57 model.relation_dim 1.0 +909 57 model.scoring_fct_norm 2.0 +909 57 training.batch_size 2.0 +909 57 training.label_smoothing 0.001980856757189495 +909 58 model.embedding_dim 2.0 +909 58 model.relation_dim 1.0 +909 58 model.scoring_fct_norm 2.0 +909 58 training.batch_size 1.0 +909 58 training.label_smoothing 0.0010992860272697371 +909 59 model.embedding_dim 0.0 +909 59 model.relation_dim 0.0 +909 59 model.scoring_fct_norm 2.0 +909 59 training.batch_size 0.0 +909 59 training.label_smoothing 0.21044740557482813 +909 60 model.embedding_dim 0.0 +909 60 model.relation_dim 1.0 +909 60 model.scoring_fct_norm 1.0 +909 60 training.batch_size 2.0 +909 60 training.label_smoothing 0.0017542495310280634 +909 61 model.embedding_dim 1.0 +909 61 model.relation_dim 0.0 +909 61 model.scoring_fct_norm 2.0 +909 61 training.batch_size 0.0 +909 61 training.label_smoothing 0.022100577837539624 +909 62 model.embedding_dim 0.0 +909 62 model.relation_dim 2.0 +909 62 model.scoring_fct_norm 2.0 +909 62 training.batch_size 1.0 +909 62 training.label_smoothing 0.23527574694857695 +909 63 model.embedding_dim 2.0 +909 63 model.relation_dim 2.0 +909 63 model.scoring_fct_norm 1.0 +909 63 training.batch_size 2.0 +909 63 training.label_smoothing 0.09461692198751182 +909 64 model.embedding_dim 2.0 +909 64 model.relation_dim 1.0 +909 64 model.scoring_fct_norm 1.0 +909 64 training.batch_size 1.0 +909 64 training.label_smoothing 0.022234942027501297 +909 65 model.embedding_dim 0.0 +909 65 model.relation_dim 1.0 +909 65 model.scoring_fct_norm 2.0 +909 65 training.batch_size 1.0 +909 65 training.label_smoothing 0.21550089918713086 +909 66 model.embedding_dim 1.0 +909 66 model.relation_dim 0.0 +909 66 model.scoring_fct_norm 2.0 +909 66 training.batch_size 1.0 +909 66 training.label_smoothing 0.1991344258782507 +909 67 model.embedding_dim 1.0 +909 67 model.relation_dim 2.0 +909 67 model.scoring_fct_norm 1.0 +909 67 training.batch_size 1.0 +909 67 training.label_smoothing 0.05028049577321764 +909 68 model.embedding_dim 0.0 +909 68 model.relation_dim 1.0 +909 68 model.scoring_fct_norm 2.0 +909 68 training.batch_size 0.0 +909 68 training.label_smoothing 0.008900950106322731 +909 69 model.embedding_dim 0.0 +909 69 model.relation_dim 2.0 +909 69 model.scoring_fct_norm 1.0 +909 69 training.batch_size 1.0 +909 69 training.label_smoothing 0.5253559476572358 +909 70 model.embedding_dim 2.0 +909 70 model.relation_dim 1.0 +909 70 model.scoring_fct_norm 1.0 +909 70 training.batch_size 1.0 +909 70 training.label_smoothing 0.14415396719142337 +909 71 model.embedding_dim 0.0 +909 71 model.relation_dim 2.0 +909 71 model.scoring_fct_norm 1.0 +909 71 training.batch_size 2.0 +909 71 training.label_smoothing 0.016121075060652818 +909 72 model.embedding_dim 1.0 +909 72 model.relation_dim 0.0 +909 72 model.scoring_fct_norm 1.0 +909 72 training.batch_size 1.0 +909 72 training.label_smoothing 0.012895319823391139 +909 73 model.embedding_dim 2.0 +909 73 model.relation_dim 0.0 +909 73 model.scoring_fct_norm 2.0 +909 73 training.batch_size 2.0 +909 73 training.label_smoothing 0.018165294880452303 +909 74 model.embedding_dim 1.0 +909 74 model.relation_dim 1.0 +909 74 model.scoring_fct_norm 1.0 +909 74 training.batch_size 1.0 +909 74 training.label_smoothing 0.19734814704818127 +909 75 model.embedding_dim 0.0 +909 75 model.relation_dim 0.0 +909 75 model.scoring_fct_norm 2.0 +909 75 training.batch_size 2.0 +909 75 training.label_smoothing 0.11752904424687519 +909 76 model.embedding_dim 2.0 +909 76 model.relation_dim 2.0 +909 76 model.scoring_fct_norm 1.0 +909 76 training.batch_size 0.0 +909 76 training.label_smoothing 0.08060279594709026 +909 77 model.embedding_dim 1.0 +909 77 model.relation_dim 1.0 +909 77 model.scoring_fct_norm 2.0 +909 77 training.batch_size 0.0 +909 77 training.label_smoothing 0.0021798868444145155 +909 78 model.embedding_dim 1.0 +909 78 model.relation_dim 2.0 +909 78 model.scoring_fct_norm 2.0 +909 78 training.batch_size 2.0 +909 78 training.label_smoothing 0.044901992993901935 +909 79 model.embedding_dim 0.0 +909 79 model.relation_dim 0.0 +909 79 model.scoring_fct_norm 1.0 +909 79 training.batch_size 1.0 +909 79 training.label_smoothing 0.838598112536842 +909 80 model.embedding_dim 2.0 +909 80 model.relation_dim 0.0 +909 80 model.scoring_fct_norm 2.0 +909 80 training.batch_size 2.0 +909 80 training.label_smoothing 0.021802773566383903 +909 81 model.embedding_dim 1.0 +909 81 model.relation_dim 1.0 +909 81 model.scoring_fct_norm 2.0 +909 81 training.batch_size 1.0 +909 81 training.label_smoothing 0.0731413847793747 +909 82 model.embedding_dim 0.0 +909 82 model.relation_dim 1.0 +909 82 model.scoring_fct_norm 2.0 +909 82 training.batch_size 2.0 +909 82 training.label_smoothing 0.1412814050933978 +909 83 model.embedding_dim 0.0 +909 83 model.relation_dim 0.0 +909 83 model.scoring_fct_norm 1.0 +909 83 training.batch_size 1.0 +909 83 training.label_smoothing 0.20611213153764066 +909 84 model.embedding_dim 0.0 +909 84 model.relation_dim 2.0 +909 84 model.scoring_fct_norm 1.0 +909 84 training.batch_size 1.0 +909 84 training.label_smoothing 0.02790168046696622 +909 85 model.embedding_dim 1.0 +909 85 model.relation_dim 1.0 +909 85 model.scoring_fct_norm 2.0 +909 85 training.batch_size 2.0 +909 85 training.label_smoothing 0.34891272971351744 +909 86 model.embedding_dim 0.0 +909 86 model.relation_dim 0.0 +909 86 model.scoring_fct_norm 2.0 +909 86 training.batch_size 0.0 +909 86 training.label_smoothing 0.02771653749327145 +909 87 model.embedding_dim 2.0 +909 87 model.relation_dim 0.0 +909 87 model.scoring_fct_norm 2.0 +909 87 training.batch_size 1.0 +909 87 training.label_smoothing 0.11450807465960253 +909 88 model.embedding_dim 0.0 +909 88 model.relation_dim 0.0 +909 88 model.scoring_fct_norm 1.0 +909 88 training.batch_size 2.0 +909 88 training.label_smoothing 0.048171451168844456 +909 89 model.embedding_dim 0.0 +909 89 model.relation_dim 0.0 +909 89 model.scoring_fct_norm 2.0 +909 89 training.batch_size 2.0 +909 89 training.label_smoothing 0.0864000180374402 +909 90 model.embedding_dim 1.0 +909 90 model.relation_dim 2.0 +909 90 model.scoring_fct_norm 2.0 +909 90 training.batch_size 1.0 +909 90 training.label_smoothing 0.05307315133770446 +909 91 model.embedding_dim 0.0 +909 91 model.relation_dim 1.0 +909 91 model.scoring_fct_norm 2.0 +909 91 training.batch_size 1.0 +909 91 training.label_smoothing 0.022471785046416025 +909 92 model.embedding_dim 2.0 +909 92 model.relation_dim 2.0 +909 92 model.scoring_fct_norm 2.0 +909 92 training.batch_size 1.0 +909 92 training.label_smoothing 0.1227721400458717 +909 93 model.embedding_dim 1.0 +909 93 model.relation_dim 1.0 +909 93 model.scoring_fct_norm 2.0 +909 93 training.batch_size 2.0 +909 93 training.label_smoothing 0.0023392891667407207 +909 94 model.embedding_dim 1.0 +909 94 model.relation_dim 1.0 +909 94 model.scoring_fct_norm 1.0 +909 94 training.batch_size 1.0 +909 94 training.label_smoothing 0.27911723172080166 +909 95 model.embedding_dim 2.0 +909 95 model.relation_dim 2.0 +909 95 model.scoring_fct_norm 1.0 +909 95 training.batch_size 0.0 +909 95 training.label_smoothing 0.3747248922457138 +909 96 model.embedding_dim 2.0 +909 96 model.relation_dim 2.0 +909 96 model.scoring_fct_norm 2.0 +909 96 training.batch_size 0.0 +909 96 training.label_smoothing 0.11653412036525575 +909 97 model.embedding_dim 0.0 +909 97 model.relation_dim 0.0 +909 97 model.scoring_fct_norm 1.0 +909 97 training.batch_size 0.0 +909 97 training.label_smoothing 0.33471067247705266 +909 98 model.embedding_dim 1.0 +909 98 model.relation_dim 2.0 +909 98 model.scoring_fct_norm 2.0 +909 98 training.batch_size 1.0 +909 98 training.label_smoothing 0.0028378864728663866 +909 99 model.embedding_dim 0.0 +909 99 model.relation_dim 1.0 +909 99 model.scoring_fct_norm 2.0 +909 99 training.batch_size 1.0 +909 99 training.label_smoothing 0.0011212471476641345 +909 100 model.embedding_dim 2.0 +909 100 model.relation_dim 2.0 +909 100 model.scoring_fct_norm 2.0 +909 100 training.batch_size 0.0 +909 100 training.label_smoothing 0.5457436277375821 +909 1 dataset """kinships""" +909 1 model """transr""" +909 1 loss """bceaftersigmoid""" +909 1 regularizer """no""" +909 1 optimizer """adadelta""" +909 1 training_loop """lcwa""" +909 1 evaluator """rankbased""" +909 2 dataset """kinships""" +909 2 model """transr""" +909 2 loss """bceaftersigmoid""" +909 2 regularizer """no""" +909 2 optimizer """adadelta""" +909 2 training_loop """lcwa""" +909 2 evaluator """rankbased""" +909 3 dataset """kinships""" +909 3 model """transr""" +909 3 loss """bceaftersigmoid""" +909 3 regularizer """no""" +909 3 optimizer """adadelta""" +909 3 training_loop """lcwa""" +909 3 evaluator """rankbased""" +909 4 dataset """kinships""" +909 4 model """transr""" +909 4 loss """bceaftersigmoid""" +909 4 regularizer """no""" +909 4 optimizer """adadelta""" +909 4 training_loop """lcwa""" +909 4 evaluator """rankbased""" +909 5 dataset """kinships""" +909 5 model """transr""" +909 5 loss """bceaftersigmoid""" +909 5 regularizer """no""" +909 5 optimizer """adadelta""" +909 5 training_loop """lcwa""" +909 5 evaluator """rankbased""" +909 6 dataset """kinships""" +909 6 model """transr""" +909 6 loss """bceaftersigmoid""" +909 6 regularizer """no""" +909 6 optimizer """adadelta""" +909 6 training_loop """lcwa""" +909 6 evaluator """rankbased""" +909 7 dataset """kinships""" +909 7 model """transr""" +909 7 loss """bceaftersigmoid""" +909 7 regularizer """no""" +909 7 optimizer """adadelta""" +909 7 training_loop """lcwa""" +909 7 evaluator """rankbased""" +909 8 dataset """kinships""" +909 8 model """transr""" +909 8 loss """bceaftersigmoid""" +909 8 regularizer """no""" +909 8 optimizer """adadelta""" +909 8 training_loop """lcwa""" +909 8 evaluator """rankbased""" +909 9 dataset """kinships""" +909 9 model """transr""" +909 9 loss """bceaftersigmoid""" +909 9 regularizer """no""" +909 9 optimizer """adadelta""" +909 9 training_loop """lcwa""" +909 9 evaluator """rankbased""" +909 10 dataset """kinships""" +909 10 model """transr""" +909 10 loss """bceaftersigmoid""" +909 10 regularizer """no""" +909 10 optimizer """adadelta""" +909 10 training_loop """lcwa""" +909 10 evaluator """rankbased""" +909 11 dataset """kinships""" +909 11 model """transr""" +909 11 loss """bceaftersigmoid""" +909 11 regularizer """no""" +909 11 optimizer """adadelta""" +909 11 training_loop """lcwa""" +909 11 evaluator """rankbased""" +909 12 dataset """kinships""" +909 12 model """transr""" +909 12 loss """bceaftersigmoid""" +909 12 regularizer """no""" +909 12 optimizer """adadelta""" +909 12 training_loop """lcwa""" +909 12 evaluator """rankbased""" +909 13 dataset """kinships""" +909 13 model """transr""" +909 13 loss """bceaftersigmoid""" +909 13 regularizer """no""" +909 13 optimizer """adadelta""" +909 13 training_loop """lcwa""" +909 13 evaluator """rankbased""" +909 14 dataset """kinships""" +909 14 model """transr""" +909 14 loss """bceaftersigmoid""" +909 14 regularizer """no""" +909 14 optimizer """adadelta""" +909 14 training_loop """lcwa""" +909 14 evaluator """rankbased""" +909 15 dataset """kinships""" +909 15 model """transr""" +909 15 loss """bceaftersigmoid""" +909 15 regularizer """no""" +909 15 optimizer """adadelta""" +909 15 training_loop """lcwa""" +909 15 evaluator """rankbased""" +909 16 dataset """kinships""" +909 16 model """transr""" +909 16 loss """bceaftersigmoid""" +909 16 regularizer """no""" +909 16 optimizer """adadelta""" +909 16 training_loop """lcwa""" +909 16 evaluator """rankbased""" +909 17 dataset """kinships""" +909 17 model """transr""" +909 17 loss """bceaftersigmoid""" +909 17 regularizer """no""" +909 17 optimizer """adadelta""" +909 17 training_loop """lcwa""" +909 17 evaluator """rankbased""" +909 18 dataset """kinships""" +909 18 model """transr""" +909 18 loss """bceaftersigmoid""" +909 18 regularizer """no""" +909 18 optimizer """adadelta""" +909 18 training_loop """lcwa""" +909 18 evaluator """rankbased""" +909 19 dataset """kinships""" +909 19 model """transr""" +909 19 loss """bceaftersigmoid""" +909 19 regularizer """no""" +909 19 optimizer """adadelta""" +909 19 training_loop """lcwa""" +909 19 evaluator """rankbased""" +909 20 dataset """kinships""" +909 20 model """transr""" +909 20 loss """bceaftersigmoid""" +909 20 regularizer """no""" +909 20 optimizer """adadelta""" +909 20 training_loop """lcwa""" +909 20 evaluator """rankbased""" +909 21 dataset """kinships""" +909 21 model """transr""" +909 21 loss """bceaftersigmoid""" +909 21 regularizer """no""" +909 21 optimizer """adadelta""" +909 21 training_loop """lcwa""" +909 21 evaluator """rankbased""" +909 22 dataset """kinships""" +909 22 model """transr""" +909 22 loss """bceaftersigmoid""" +909 22 regularizer """no""" +909 22 optimizer """adadelta""" +909 22 training_loop """lcwa""" +909 22 evaluator """rankbased""" +909 23 dataset """kinships""" +909 23 model """transr""" +909 23 loss """bceaftersigmoid""" +909 23 regularizer """no""" +909 23 optimizer """adadelta""" +909 23 training_loop """lcwa""" +909 23 evaluator """rankbased""" +909 24 dataset """kinships""" +909 24 model """transr""" +909 24 loss """bceaftersigmoid""" +909 24 regularizer """no""" +909 24 optimizer """adadelta""" +909 24 training_loop """lcwa""" +909 24 evaluator """rankbased""" +909 25 dataset """kinships""" +909 25 model """transr""" +909 25 loss """bceaftersigmoid""" +909 25 regularizer """no""" +909 25 optimizer """adadelta""" +909 25 training_loop """lcwa""" +909 25 evaluator """rankbased""" +909 26 dataset """kinships""" +909 26 model """transr""" +909 26 loss """bceaftersigmoid""" +909 26 regularizer """no""" +909 26 optimizer """adadelta""" +909 26 training_loop """lcwa""" +909 26 evaluator """rankbased""" +909 27 dataset """kinships""" +909 27 model """transr""" +909 27 loss """bceaftersigmoid""" +909 27 regularizer """no""" +909 27 optimizer """adadelta""" +909 27 training_loop """lcwa""" +909 27 evaluator """rankbased""" +909 28 dataset """kinships""" +909 28 model """transr""" +909 28 loss """bceaftersigmoid""" +909 28 regularizer """no""" +909 28 optimizer """adadelta""" +909 28 training_loop """lcwa""" +909 28 evaluator """rankbased""" +909 29 dataset """kinships""" +909 29 model """transr""" +909 29 loss """bceaftersigmoid""" +909 29 regularizer """no""" +909 29 optimizer """adadelta""" +909 29 training_loop """lcwa""" +909 29 evaluator """rankbased""" +909 30 dataset """kinships""" +909 30 model """transr""" +909 30 loss """bceaftersigmoid""" +909 30 regularizer """no""" +909 30 optimizer """adadelta""" +909 30 training_loop """lcwa""" +909 30 evaluator """rankbased""" +909 31 dataset """kinships""" +909 31 model """transr""" +909 31 loss """bceaftersigmoid""" +909 31 regularizer """no""" +909 31 optimizer """adadelta""" +909 31 training_loop """lcwa""" +909 31 evaluator """rankbased""" +909 32 dataset """kinships""" +909 32 model """transr""" +909 32 loss """bceaftersigmoid""" +909 32 regularizer """no""" +909 32 optimizer """adadelta""" +909 32 training_loop """lcwa""" +909 32 evaluator """rankbased""" +909 33 dataset """kinships""" +909 33 model """transr""" +909 33 loss """bceaftersigmoid""" +909 33 regularizer """no""" +909 33 optimizer """adadelta""" +909 33 training_loop """lcwa""" +909 33 evaluator """rankbased""" +909 34 dataset """kinships""" +909 34 model """transr""" +909 34 loss """bceaftersigmoid""" +909 34 regularizer """no""" +909 34 optimizer """adadelta""" +909 34 training_loop """lcwa""" +909 34 evaluator """rankbased""" +909 35 dataset """kinships""" +909 35 model """transr""" +909 35 loss """bceaftersigmoid""" +909 35 regularizer """no""" +909 35 optimizer """adadelta""" +909 35 training_loop """lcwa""" +909 35 evaluator """rankbased""" +909 36 dataset """kinships""" +909 36 model """transr""" +909 36 loss """bceaftersigmoid""" +909 36 regularizer """no""" +909 36 optimizer """adadelta""" +909 36 training_loop """lcwa""" +909 36 evaluator """rankbased""" +909 37 dataset """kinships""" +909 37 model """transr""" +909 37 loss """bceaftersigmoid""" +909 37 regularizer """no""" +909 37 optimizer """adadelta""" +909 37 training_loop """lcwa""" +909 37 evaluator """rankbased""" +909 38 dataset """kinships""" +909 38 model """transr""" +909 38 loss """bceaftersigmoid""" +909 38 regularizer """no""" +909 38 optimizer """adadelta""" +909 38 training_loop """lcwa""" +909 38 evaluator """rankbased""" +909 39 dataset """kinships""" +909 39 model """transr""" +909 39 loss """bceaftersigmoid""" +909 39 regularizer """no""" +909 39 optimizer """adadelta""" +909 39 training_loop """lcwa""" +909 39 evaluator """rankbased""" +909 40 dataset """kinships""" +909 40 model """transr""" +909 40 loss """bceaftersigmoid""" +909 40 regularizer """no""" +909 40 optimizer """adadelta""" +909 40 training_loop """lcwa""" +909 40 evaluator """rankbased""" +909 41 dataset """kinships""" +909 41 model """transr""" +909 41 loss """bceaftersigmoid""" +909 41 regularizer """no""" +909 41 optimizer """adadelta""" +909 41 training_loop """lcwa""" +909 41 evaluator """rankbased""" +909 42 dataset """kinships""" +909 42 model """transr""" +909 42 loss """bceaftersigmoid""" +909 42 regularizer """no""" +909 42 optimizer """adadelta""" +909 42 training_loop """lcwa""" +909 42 evaluator """rankbased""" +909 43 dataset """kinships""" +909 43 model """transr""" +909 43 loss """bceaftersigmoid""" +909 43 regularizer """no""" +909 43 optimizer """adadelta""" +909 43 training_loop """lcwa""" +909 43 evaluator """rankbased""" +909 44 dataset """kinships""" +909 44 model """transr""" +909 44 loss """bceaftersigmoid""" +909 44 regularizer """no""" +909 44 optimizer """adadelta""" +909 44 training_loop """lcwa""" +909 44 evaluator """rankbased""" +909 45 dataset """kinships""" +909 45 model """transr""" +909 45 loss """bceaftersigmoid""" +909 45 regularizer """no""" +909 45 optimizer """adadelta""" +909 45 training_loop """lcwa""" +909 45 evaluator """rankbased""" +909 46 dataset """kinships""" +909 46 model """transr""" +909 46 loss """bceaftersigmoid""" +909 46 regularizer """no""" +909 46 optimizer """adadelta""" +909 46 training_loop """lcwa""" +909 46 evaluator """rankbased""" +909 47 dataset """kinships""" +909 47 model """transr""" +909 47 loss """bceaftersigmoid""" +909 47 regularizer """no""" +909 47 optimizer """adadelta""" +909 47 training_loop """lcwa""" +909 47 evaluator """rankbased""" +909 48 dataset """kinships""" +909 48 model """transr""" +909 48 loss """bceaftersigmoid""" +909 48 regularizer """no""" +909 48 optimizer """adadelta""" +909 48 training_loop """lcwa""" +909 48 evaluator """rankbased""" +909 49 dataset """kinships""" +909 49 model """transr""" +909 49 loss """bceaftersigmoid""" +909 49 regularizer """no""" +909 49 optimizer """adadelta""" +909 49 training_loop """lcwa""" +909 49 evaluator """rankbased""" +909 50 dataset """kinships""" +909 50 model """transr""" +909 50 loss """bceaftersigmoid""" +909 50 regularizer """no""" +909 50 optimizer """adadelta""" +909 50 training_loop """lcwa""" +909 50 evaluator """rankbased""" +909 51 dataset """kinships""" +909 51 model """transr""" +909 51 loss """bceaftersigmoid""" +909 51 regularizer """no""" +909 51 optimizer """adadelta""" +909 51 training_loop """lcwa""" +909 51 evaluator """rankbased""" +909 52 dataset """kinships""" +909 52 model """transr""" +909 52 loss """bceaftersigmoid""" +909 52 regularizer """no""" +909 52 optimizer """adadelta""" +909 52 training_loop """lcwa""" +909 52 evaluator """rankbased""" +909 53 dataset """kinships""" +909 53 model """transr""" +909 53 loss """bceaftersigmoid""" +909 53 regularizer """no""" +909 53 optimizer """adadelta""" +909 53 training_loop """lcwa""" +909 53 evaluator """rankbased""" +909 54 dataset """kinships""" +909 54 model """transr""" +909 54 loss """bceaftersigmoid""" +909 54 regularizer """no""" +909 54 optimizer """adadelta""" +909 54 training_loop """lcwa""" +909 54 evaluator """rankbased""" +909 55 dataset """kinships""" +909 55 model """transr""" +909 55 loss """bceaftersigmoid""" +909 55 regularizer """no""" +909 55 optimizer """adadelta""" +909 55 training_loop """lcwa""" +909 55 evaluator """rankbased""" +909 56 dataset """kinships""" +909 56 model """transr""" +909 56 loss """bceaftersigmoid""" +909 56 regularizer """no""" +909 56 optimizer """adadelta""" +909 56 training_loop """lcwa""" +909 56 evaluator """rankbased""" +909 57 dataset """kinships""" +909 57 model """transr""" +909 57 loss """bceaftersigmoid""" +909 57 regularizer """no""" +909 57 optimizer """adadelta""" +909 57 training_loop """lcwa""" +909 57 evaluator """rankbased""" +909 58 dataset """kinships""" +909 58 model """transr""" +909 58 loss """bceaftersigmoid""" +909 58 regularizer """no""" +909 58 optimizer """adadelta""" +909 58 training_loop """lcwa""" +909 58 evaluator """rankbased""" +909 59 dataset """kinships""" +909 59 model """transr""" +909 59 loss """bceaftersigmoid""" +909 59 regularizer """no""" +909 59 optimizer """adadelta""" +909 59 training_loop """lcwa""" +909 59 evaluator """rankbased""" +909 60 dataset """kinships""" +909 60 model """transr""" +909 60 loss """bceaftersigmoid""" +909 60 regularizer """no""" +909 60 optimizer """adadelta""" +909 60 training_loop """lcwa""" +909 60 evaluator """rankbased""" +909 61 dataset """kinships""" +909 61 model """transr""" +909 61 loss """bceaftersigmoid""" +909 61 regularizer """no""" +909 61 optimizer """adadelta""" +909 61 training_loop """lcwa""" +909 61 evaluator """rankbased""" +909 62 dataset """kinships""" +909 62 model """transr""" +909 62 loss """bceaftersigmoid""" +909 62 regularizer """no""" +909 62 optimizer """adadelta""" +909 62 training_loop """lcwa""" +909 62 evaluator """rankbased""" +909 63 dataset """kinships""" +909 63 model """transr""" +909 63 loss """bceaftersigmoid""" +909 63 regularizer """no""" +909 63 optimizer """adadelta""" +909 63 training_loop """lcwa""" +909 63 evaluator """rankbased""" +909 64 dataset """kinships""" +909 64 model """transr""" +909 64 loss """bceaftersigmoid""" +909 64 regularizer """no""" +909 64 optimizer """adadelta""" +909 64 training_loop """lcwa""" +909 64 evaluator """rankbased""" +909 65 dataset """kinships""" +909 65 model """transr""" +909 65 loss """bceaftersigmoid""" +909 65 regularizer """no""" +909 65 optimizer """adadelta""" +909 65 training_loop """lcwa""" +909 65 evaluator """rankbased""" +909 66 dataset """kinships""" +909 66 model """transr""" +909 66 loss """bceaftersigmoid""" +909 66 regularizer """no""" +909 66 optimizer """adadelta""" +909 66 training_loop """lcwa""" +909 66 evaluator """rankbased""" +909 67 dataset """kinships""" +909 67 model """transr""" +909 67 loss """bceaftersigmoid""" +909 67 regularizer """no""" +909 67 optimizer """adadelta""" +909 67 training_loop """lcwa""" +909 67 evaluator """rankbased""" +909 68 dataset """kinships""" +909 68 model """transr""" +909 68 loss """bceaftersigmoid""" +909 68 regularizer """no""" +909 68 optimizer """adadelta""" +909 68 training_loop """lcwa""" +909 68 evaluator """rankbased""" +909 69 dataset """kinships""" +909 69 model """transr""" +909 69 loss """bceaftersigmoid""" +909 69 regularizer """no""" +909 69 optimizer """adadelta""" +909 69 training_loop """lcwa""" +909 69 evaluator """rankbased""" +909 70 dataset """kinships""" +909 70 model """transr""" +909 70 loss """bceaftersigmoid""" +909 70 regularizer """no""" +909 70 optimizer """adadelta""" +909 70 training_loop """lcwa""" +909 70 evaluator """rankbased""" +909 71 dataset """kinships""" +909 71 model """transr""" +909 71 loss """bceaftersigmoid""" +909 71 regularizer """no""" +909 71 optimizer """adadelta""" +909 71 training_loop """lcwa""" +909 71 evaluator """rankbased""" +909 72 dataset """kinships""" +909 72 model """transr""" +909 72 loss """bceaftersigmoid""" +909 72 regularizer """no""" +909 72 optimizer """adadelta""" +909 72 training_loop """lcwa""" +909 72 evaluator """rankbased""" +909 73 dataset """kinships""" +909 73 model """transr""" +909 73 loss """bceaftersigmoid""" +909 73 regularizer """no""" +909 73 optimizer """adadelta""" +909 73 training_loop """lcwa""" +909 73 evaluator """rankbased""" +909 74 dataset """kinships""" +909 74 model """transr""" +909 74 loss """bceaftersigmoid""" +909 74 regularizer """no""" +909 74 optimizer """adadelta""" +909 74 training_loop """lcwa""" +909 74 evaluator """rankbased""" +909 75 dataset """kinships""" +909 75 model """transr""" +909 75 loss """bceaftersigmoid""" +909 75 regularizer """no""" +909 75 optimizer """adadelta""" +909 75 training_loop """lcwa""" +909 75 evaluator """rankbased""" +909 76 dataset """kinships""" +909 76 model """transr""" +909 76 loss """bceaftersigmoid""" +909 76 regularizer """no""" +909 76 optimizer """adadelta""" +909 76 training_loop """lcwa""" +909 76 evaluator """rankbased""" +909 77 dataset """kinships""" +909 77 model """transr""" +909 77 loss """bceaftersigmoid""" +909 77 regularizer """no""" +909 77 optimizer """adadelta""" +909 77 training_loop """lcwa""" +909 77 evaluator """rankbased""" +909 78 dataset """kinships""" +909 78 model """transr""" +909 78 loss """bceaftersigmoid""" +909 78 regularizer """no""" +909 78 optimizer """adadelta""" +909 78 training_loop """lcwa""" +909 78 evaluator """rankbased""" +909 79 dataset """kinships""" +909 79 model """transr""" +909 79 loss """bceaftersigmoid""" +909 79 regularizer """no""" +909 79 optimizer """adadelta""" +909 79 training_loop """lcwa""" +909 79 evaluator """rankbased""" +909 80 dataset """kinships""" +909 80 model """transr""" +909 80 loss """bceaftersigmoid""" +909 80 regularizer """no""" +909 80 optimizer """adadelta""" +909 80 training_loop """lcwa""" +909 80 evaluator """rankbased""" +909 81 dataset """kinships""" +909 81 model """transr""" +909 81 loss """bceaftersigmoid""" +909 81 regularizer """no""" +909 81 optimizer """adadelta""" +909 81 training_loop """lcwa""" +909 81 evaluator """rankbased""" +909 82 dataset """kinships""" +909 82 model """transr""" +909 82 loss """bceaftersigmoid""" +909 82 regularizer """no""" +909 82 optimizer """adadelta""" +909 82 training_loop """lcwa""" +909 82 evaluator """rankbased""" +909 83 dataset """kinships""" +909 83 model """transr""" +909 83 loss """bceaftersigmoid""" +909 83 regularizer """no""" +909 83 optimizer """adadelta""" +909 83 training_loop """lcwa""" +909 83 evaluator """rankbased""" +909 84 dataset """kinships""" +909 84 model """transr""" +909 84 loss """bceaftersigmoid""" +909 84 regularizer """no""" +909 84 optimizer """adadelta""" +909 84 training_loop """lcwa""" +909 84 evaluator """rankbased""" +909 85 dataset """kinships""" +909 85 model """transr""" +909 85 loss """bceaftersigmoid""" +909 85 regularizer """no""" +909 85 optimizer """adadelta""" +909 85 training_loop """lcwa""" +909 85 evaluator """rankbased""" +909 86 dataset """kinships""" +909 86 model """transr""" +909 86 loss """bceaftersigmoid""" +909 86 regularizer """no""" +909 86 optimizer """adadelta""" +909 86 training_loop """lcwa""" +909 86 evaluator """rankbased""" +909 87 dataset """kinships""" +909 87 model """transr""" +909 87 loss """bceaftersigmoid""" +909 87 regularizer """no""" +909 87 optimizer """adadelta""" +909 87 training_loop """lcwa""" +909 87 evaluator """rankbased""" +909 88 dataset """kinships""" +909 88 model """transr""" +909 88 loss """bceaftersigmoid""" +909 88 regularizer """no""" +909 88 optimizer """adadelta""" +909 88 training_loop """lcwa""" +909 88 evaluator """rankbased""" +909 89 dataset """kinships""" +909 89 model """transr""" +909 89 loss """bceaftersigmoid""" +909 89 regularizer """no""" +909 89 optimizer """adadelta""" +909 89 training_loop """lcwa""" +909 89 evaluator """rankbased""" +909 90 dataset """kinships""" +909 90 model """transr""" +909 90 loss """bceaftersigmoid""" +909 90 regularizer """no""" +909 90 optimizer """adadelta""" +909 90 training_loop """lcwa""" +909 90 evaluator """rankbased""" +909 91 dataset """kinships""" +909 91 model """transr""" +909 91 loss """bceaftersigmoid""" +909 91 regularizer """no""" +909 91 optimizer """adadelta""" +909 91 training_loop """lcwa""" +909 91 evaluator """rankbased""" +909 92 dataset """kinships""" +909 92 model """transr""" +909 92 loss """bceaftersigmoid""" +909 92 regularizer """no""" +909 92 optimizer """adadelta""" +909 92 training_loop """lcwa""" +909 92 evaluator """rankbased""" +909 93 dataset """kinships""" +909 93 model """transr""" +909 93 loss """bceaftersigmoid""" +909 93 regularizer """no""" +909 93 optimizer """adadelta""" +909 93 training_loop """lcwa""" +909 93 evaluator """rankbased""" +909 94 dataset """kinships""" +909 94 model """transr""" +909 94 loss """bceaftersigmoid""" +909 94 regularizer """no""" +909 94 optimizer """adadelta""" +909 94 training_loop """lcwa""" +909 94 evaluator """rankbased""" +909 95 dataset """kinships""" +909 95 model """transr""" +909 95 loss """bceaftersigmoid""" +909 95 regularizer """no""" +909 95 optimizer """adadelta""" +909 95 training_loop """lcwa""" +909 95 evaluator """rankbased""" +909 96 dataset """kinships""" +909 96 model """transr""" +909 96 loss """bceaftersigmoid""" +909 96 regularizer """no""" +909 96 optimizer """adadelta""" +909 96 training_loop """lcwa""" +909 96 evaluator """rankbased""" +909 97 dataset """kinships""" +909 97 model """transr""" +909 97 loss """bceaftersigmoid""" +909 97 regularizer """no""" +909 97 optimizer """adadelta""" +909 97 training_loop """lcwa""" +909 97 evaluator """rankbased""" +909 98 dataset """kinships""" +909 98 model """transr""" +909 98 loss """bceaftersigmoid""" +909 98 regularizer """no""" +909 98 optimizer """adadelta""" +909 98 training_loop """lcwa""" +909 98 evaluator """rankbased""" +909 99 dataset """kinships""" +909 99 model """transr""" +909 99 loss """bceaftersigmoid""" +909 99 regularizer """no""" +909 99 optimizer """adadelta""" +909 99 training_loop """lcwa""" +909 99 evaluator """rankbased""" +909 100 dataset """kinships""" +909 100 model """transr""" +909 100 loss """bceaftersigmoid""" +909 100 regularizer """no""" +909 100 optimizer """adadelta""" +909 100 training_loop """lcwa""" +909 100 evaluator """rankbased""" +910 1 model.embedding_dim 2.0 +910 1 model.relation_dim 2.0 +910 1 model.scoring_fct_norm 2.0 +910 1 training.batch_size 1.0 +910 1 training.label_smoothing 0.2884902287950184 +910 2 model.embedding_dim 1.0 +910 2 model.relation_dim 1.0 +910 2 model.scoring_fct_norm 2.0 +910 2 training.batch_size 2.0 +910 2 training.label_smoothing 0.14781359837343808 +910 3 model.embedding_dim 1.0 +910 3 model.relation_dim 1.0 +910 3 model.scoring_fct_norm 2.0 +910 3 training.batch_size 1.0 +910 3 training.label_smoothing 0.006133193251888748 +910 4 model.embedding_dim 0.0 +910 4 model.relation_dim 1.0 +910 4 model.scoring_fct_norm 2.0 +910 4 training.batch_size 2.0 +910 4 training.label_smoothing 0.010694473457026241 +910 5 model.embedding_dim 1.0 +910 5 model.relation_dim 0.0 +910 5 model.scoring_fct_norm 1.0 +910 5 training.batch_size 1.0 +910 5 training.label_smoothing 0.08865520144962194 +910 6 model.embedding_dim 0.0 +910 6 model.relation_dim 2.0 +910 6 model.scoring_fct_norm 2.0 +910 6 training.batch_size 1.0 +910 6 training.label_smoothing 0.001113284304630563 +910 7 model.embedding_dim 1.0 +910 7 model.relation_dim 0.0 +910 7 model.scoring_fct_norm 2.0 +910 7 training.batch_size 2.0 +910 7 training.label_smoothing 0.4734946811150015 +910 8 model.embedding_dim 1.0 +910 8 model.relation_dim 1.0 +910 8 model.scoring_fct_norm 1.0 +910 8 training.batch_size 1.0 +910 8 training.label_smoothing 0.008787983740588015 +910 9 model.embedding_dim 2.0 +910 9 model.relation_dim 2.0 +910 9 model.scoring_fct_norm 1.0 +910 9 training.batch_size 2.0 +910 9 training.label_smoothing 0.002320250198834419 +910 10 model.embedding_dim 2.0 +910 10 model.relation_dim 2.0 +910 10 model.scoring_fct_norm 1.0 +910 10 training.batch_size 0.0 +910 10 training.label_smoothing 0.03515464405012797 +910 11 model.embedding_dim 2.0 +910 11 model.relation_dim 1.0 +910 11 model.scoring_fct_norm 1.0 +910 11 training.batch_size 0.0 +910 11 training.label_smoothing 0.1409167876377195 +910 12 model.embedding_dim 1.0 +910 12 model.relation_dim 2.0 +910 12 model.scoring_fct_norm 1.0 +910 12 training.batch_size 2.0 +910 12 training.label_smoothing 0.003910818440993021 +910 13 model.embedding_dim 2.0 +910 13 model.relation_dim 2.0 +910 13 model.scoring_fct_norm 2.0 +910 13 training.batch_size 0.0 +910 13 training.label_smoothing 0.07133728826889435 +910 14 model.embedding_dim 1.0 +910 14 model.relation_dim 2.0 +910 14 model.scoring_fct_norm 1.0 +910 14 training.batch_size 0.0 +910 14 training.label_smoothing 0.10537263574946948 +910 15 model.embedding_dim 0.0 +910 15 model.relation_dim 0.0 +910 15 model.scoring_fct_norm 2.0 +910 15 training.batch_size 1.0 +910 15 training.label_smoothing 0.00284668083266952 +910 16 model.embedding_dim 1.0 +910 16 model.relation_dim 2.0 +910 16 model.scoring_fct_norm 2.0 +910 16 training.batch_size 0.0 +910 16 training.label_smoothing 0.003993060641142197 +910 17 model.embedding_dim 0.0 +910 17 model.relation_dim 2.0 +910 17 model.scoring_fct_norm 2.0 +910 17 training.batch_size 1.0 +910 17 training.label_smoothing 0.011766354361028672 +910 18 model.embedding_dim 2.0 +910 18 model.relation_dim 0.0 +910 18 model.scoring_fct_norm 2.0 +910 18 training.batch_size 1.0 +910 18 training.label_smoothing 0.7221478318241441 +910 19 model.embedding_dim 0.0 +910 19 model.relation_dim 0.0 +910 19 model.scoring_fct_norm 1.0 +910 19 training.batch_size 1.0 +910 19 training.label_smoothing 0.049146618169718485 +910 20 model.embedding_dim 2.0 +910 20 model.relation_dim 1.0 +910 20 model.scoring_fct_norm 2.0 +910 20 training.batch_size 1.0 +910 20 training.label_smoothing 0.501878901880261 +910 21 model.embedding_dim 2.0 +910 21 model.relation_dim 2.0 +910 21 model.scoring_fct_norm 1.0 +910 21 training.batch_size 2.0 +910 21 training.label_smoothing 0.015219518319221668 +910 22 model.embedding_dim 1.0 +910 22 model.relation_dim 2.0 +910 22 model.scoring_fct_norm 2.0 +910 22 training.batch_size 1.0 +910 22 training.label_smoothing 0.3896910860059878 +910 23 model.embedding_dim 0.0 +910 23 model.relation_dim 1.0 +910 23 model.scoring_fct_norm 1.0 +910 23 training.batch_size 2.0 +910 23 training.label_smoothing 0.014768838958717978 +910 24 model.embedding_dim 2.0 +910 24 model.relation_dim 2.0 +910 24 model.scoring_fct_norm 1.0 +910 24 training.batch_size 0.0 +910 24 training.label_smoothing 0.85273897004514 +910 25 model.embedding_dim 2.0 +910 25 model.relation_dim 0.0 +910 25 model.scoring_fct_norm 2.0 +910 25 training.batch_size 1.0 +910 25 training.label_smoothing 0.010141979742501559 +910 26 model.embedding_dim 0.0 +910 26 model.relation_dim 1.0 +910 26 model.scoring_fct_norm 2.0 +910 26 training.batch_size 2.0 +910 26 training.label_smoothing 0.00783443260967839 +910 27 model.embedding_dim 2.0 +910 27 model.relation_dim 1.0 +910 27 model.scoring_fct_norm 2.0 +910 27 training.batch_size 0.0 +910 27 training.label_smoothing 0.034076025162979404 +910 28 model.embedding_dim 1.0 +910 28 model.relation_dim 2.0 +910 28 model.scoring_fct_norm 1.0 +910 28 training.batch_size 2.0 +910 28 training.label_smoothing 0.019103467909836757 +910 29 model.embedding_dim 2.0 +910 29 model.relation_dim 0.0 +910 29 model.scoring_fct_norm 1.0 +910 29 training.batch_size 0.0 +910 29 training.label_smoothing 0.15085197734479516 +910 30 model.embedding_dim 1.0 +910 30 model.relation_dim 1.0 +910 30 model.scoring_fct_norm 2.0 +910 30 training.batch_size 0.0 +910 30 training.label_smoothing 0.663728924119721 +910 31 model.embedding_dim 1.0 +910 31 model.relation_dim 2.0 +910 31 model.scoring_fct_norm 2.0 +910 31 training.batch_size 0.0 +910 31 training.label_smoothing 0.7748467539979657 +910 32 model.embedding_dim 2.0 +910 32 model.relation_dim 0.0 +910 32 model.scoring_fct_norm 1.0 +910 32 training.batch_size 1.0 +910 32 training.label_smoothing 0.017287841581688734 +910 33 model.embedding_dim 2.0 +910 33 model.relation_dim 2.0 +910 33 model.scoring_fct_norm 2.0 +910 33 training.batch_size 1.0 +910 33 training.label_smoothing 0.016729145953794673 +910 34 model.embedding_dim 1.0 +910 34 model.relation_dim 1.0 +910 34 model.scoring_fct_norm 2.0 +910 34 training.batch_size 1.0 +910 34 training.label_smoothing 0.0019705172062337707 +910 35 model.embedding_dim 0.0 +910 35 model.relation_dim 1.0 +910 35 model.scoring_fct_norm 1.0 +910 35 training.batch_size 2.0 +910 35 training.label_smoothing 0.01352000700633484 +910 36 model.embedding_dim 0.0 +910 36 model.relation_dim 0.0 +910 36 model.scoring_fct_norm 2.0 +910 36 training.batch_size 1.0 +910 36 training.label_smoothing 0.038206062297524636 +910 37 model.embedding_dim 1.0 +910 37 model.relation_dim 2.0 +910 37 model.scoring_fct_norm 1.0 +910 37 training.batch_size 2.0 +910 37 training.label_smoothing 0.5228220755575846 +910 38 model.embedding_dim 2.0 +910 38 model.relation_dim 2.0 +910 38 model.scoring_fct_norm 2.0 +910 38 training.batch_size 2.0 +910 38 training.label_smoothing 0.1272994386487257 +910 39 model.embedding_dim 0.0 +910 39 model.relation_dim 2.0 +910 39 model.scoring_fct_norm 1.0 +910 39 training.batch_size 1.0 +910 39 training.label_smoothing 0.2392865523217037 +910 40 model.embedding_dim 1.0 +910 40 model.relation_dim 2.0 +910 40 model.scoring_fct_norm 2.0 +910 40 training.batch_size 2.0 +910 40 training.label_smoothing 0.856470220029124 +910 41 model.embedding_dim 1.0 +910 41 model.relation_dim 1.0 +910 41 model.scoring_fct_norm 1.0 +910 41 training.batch_size 2.0 +910 41 training.label_smoothing 0.0014476982235724078 +910 42 model.embedding_dim 1.0 +910 42 model.relation_dim 0.0 +910 42 model.scoring_fct_norm 2.0 +910 42 training.batch_size 1.0 +910 42 training.label_smoothing 0.0036469465372995216 +910 43 model.embedding_dim 0.0 +910 43 model.relation_dim 2.0 +910 43 model.scoring_fct_norm 1.0 +910 43 training.batch_size 0.0 +910 43 training.label_smoothing 0.0033633202809003056 +910 44 model.embedding_dim 1.0 +910 44 model.relation_dim 0.0 +910 44 model.scoring_fct_norm 2.0 +910 44 training.batch_size 2.0 +910 44 training.label_smoothing 0.0011956628820401724 +910 45 model.embedding_dim 2.0 +910 45 model.relation_dim 0.0 +910 45 model.scoring_fct_norm 1.0 +910 45 training.batch_size 1.0 +910 45 training.label_smoothing 0.137375720629091 +910 46 model.embedding_dim 2.0 +910 46 model.relation_dim 0.0 +910 46 model.scoring_fct_norm 1.0 +910 46 training.batch_size 2.0 +910 46 training.label_smoothing 0.0011365495921093981 +910 47 model.embedding_dim 1.0 +910 47 model.relation_dim 2.0 +910 47 model.scoring_fct_norm 1.0 +910 47 training.batch_size 1.0 +910 47 training.label_smoothing 0.06926435395651095 +910 48 model.embedding_dim 2.0 +910 48 model.relation_dim 1.0 +910 48 model.scoring_fct_norm 1.0 +910 48 training.batch_size 0.0 +910 48 training.label_smoothing 0.03131148825250604 +910 49 model.embedding_dim 0.0 +910 49 model.relation_dim 0.0 +910 49 model.scoring_fct_norm 1.0 +910 49 training.batch_size 1.0 +910 49 training.label_smoothing 0.06477143775900998 +910 50 model.embedding_dim 0.0 +910 50 model.relation_dim 0.0 +910 50 model.scoring_fct_norm 2.0 +910 50 training.batch_size 1.0 +910 50 training.label_smoothing 0.00554095865351969 +910 51 model.embedding_dim 2.0 +910 51 model.relation_dim 2.0 +910 51 model.scoring_fct_norm 2.0 +910 51 training.batch_size 2.0 +910 51 training.label_smoothing 0.32558182493790067 +910 52 model.embedding_dim 1.0 +910 52 model.relation_dim 2.0 +910 52 model.scoring_fct_norm 2.0 +910 52 training.batch_size 2.0 +910 52 training.label_smoothing 0.004215032462193468 +910 53 model.embedding_dim 0.0 +910 53 model.relation_dim 2.0 +910 53 model.scoring_fct_norm 1.0 +910 53 training.batch_size 0.0 +910 53 training.label_smoothing 0.47039139961149523 +910 54 model.embedding_dim 1.0 +910 54 model.relation_dim 1.0 +910 54 model.scoring_fct_norm 1.0 +910 54 training.batch_size 2.0 +910 54 training.label_smoothing 0.001788511200693427 +910 55 model.embedding_dim 0.0 +910 55 model.relation_dim 2.0 +910 55 model.scoring_fct_norm 2.0 +910 55 training.batch_size 0.0 +910 55 training.label_smoothing 0.2791071650619521 +910 56 model.embedding_dim 0.0 +910 56 model.relation_dim 1.0 +910 56 model.scoring_fct_norm 2.0 +910 56 training.batch_size 1.0 +910 56 training.label_smoothing 0.0010282646764036094 +910 57 model.embedding_dim 2.0 +910 57 model.relation_dim 1.0 +910 57 model.scoring_fct_norm 1.0 +910 57 training.batch_size 2.0 +910 57 training.label_smoothing 0.040328444844137105 +910 58 model.embedding_dim 2.0 +910 58 model.relation_dim 2.0 +910 58 model.scoring_fct_norm 2.0 +910 58 training.batch_size 2.0 +910 58 training.label_smoothing 0.03368535005682499 +910 59 model.embedding_dim 2.0 +910 59 model.relation_dim 0.0 +910 59 model.scoring_fct_norm 1.0 +910 59 training.batch_size 2.0 +910 59 training.label_smoothing 0.6510481979907444 +910 60 model.embedding_dim 0.0 +910 60 model.relation_dim 2.0 +910 60 model.scoring_fct_norm 1.0 +910 60 training.batch_size 0.0 +910 60 training.label_smoothing 0.007639157700272325 +910 61 model.embedding_dim 2.0 +910 61 model.relation_dim 2.0 +910 61 model.scoring_fct_norm 2.0 +910 61 training.batch_size 2.0 +910 61 training.label_smoothing 0.00519884165881057 +910 62 model.embedding_dim 2.0 +910 62 model.relation_dim 1.0 +910 62 model.scoring_fct_norm 1.0 +910 62 training.batch_size 1.0 +910 62 training.label_smoothing 0.0011646899338560973 +910 63 model.embedding_dim 0.0 +910 63 model.relation_dim 0.0 +910 63 model.scoring_fct_norm 2.0 +910 63 training.batch_size 0.0 +910 63 training.label_smoothing 0.10194611996766079 +910 64 model.embedding_dim 2.0 +910 64 model.relation_dim 0.0 +910 64 model.scoring_fct_norm 1.0 +910 64 training.batch_size 0.0 +910 64 training.label_smoothing 0.1708097425817555 +910 65 model.embedding_dim 0.0 +910 65 model.relation_dim 0.0 +910 65 model.scoring_fct_norm 2.0 +910 65 training.batch_size 0.0 +910 65 training.label_smoothing 0.019885126113250693 +910 66 model.embedding_dim 0.0 +910 66 model.relation_dim 0.0 +910 66 model.scoring_fct_norm 2.0 +910 66 training.batch_size 2.0 +910 66 training.label_smoothing 0.024288218422465053 +910 67 model.embedding_dim 0.0 +910 67 model.relation_dim 0.0 +910 67 model.scoring_fct_norm 1.0 +910 67 training.batch_size 1.0 +910 67 training.label_smoothing 0.1476537230152695 +910 68 model.embedding_dim 1.0 +910 68 model.relation_dim 1.0 +910 68 model.scoring_fct_norm 1.0 +910 68 training.batch_size 1.0 +910 68 training.label_smoothing 0.16669126558234196 +910 69 model.embedding_dim 0.0 +910 69 model.relation_dim 0.0 +910 69 model.scoring_fct_norm 2.0 +910 69 training.batch_size 2.0 +910 69 training.label_smoothing 0.004426301709067527 +910 70 model.embedding_dim 0.0 +910 70 model.relation_dim 1.0 +910 70 model.scoring_fct_norm 2.0 +910 70 training.batch_size 0.0 +910 70 training.label_smoothing 0.2565248296948946 +910 71 model.embedding_dim 0.0 +910 71 model.relation_dim 2.0 +910 71 model.scoring_fct_norm 1.0 +910 71 training.batch_size 1.0 +910 71 training.label_smoothing 0.0038380982690585596 +910 72 model.embedding_dim 1.0 +910 72 model.relation_dim 0.0 +910 72 model.scoring_fct_norm 1.0 +910 72 training.batch_size 1.0 +910 72 training.label_smoothing 0.05949324242052627 +910 73 model.embedding_dim 1.0 +910 73 model.relation_dim 0.0 +910 73 model.scoring_fct_norm 1.0 +910 73 training.batch_size 0.0 +910 73 training.label_smoothing 0.2768316682069811 +910 74 model.embedding_dim 0.0 +910 74 model.relation_dim 0.0 +910 74 model.scoring_fct_norm 1.0 +910 74 training.batch_size 0.0 +910 74 training.label_smoothing 0.0021125902288987673 +910 75 model.embedding_dim 0.0 +910 75 model.relation_dim 1.0 +910 75 model.scoring_fct_norm 2.0 +910 75 training.batch_size 1.0 +910 75 training.label_smoothing 0.010802428613760724 +910 76 model.embedding_dim 0.0 +910 76 model.relation_dim 0.0 +910 76 model.scoring_fct_norm 1.0 +910 76 training.batch_size 1.0 +910 76 training.label_smoothing 0.0028436506135165753 +910 77 model.embedding_dim 2.0 +910 77 model.relation_dim 2.0 +910 77 model.scoring_fct_norm 1.0 +910 77 training.batch_size 2.0 +910 77 training.label_smoothing 0.05094682562068865 +910 78 model.embedding_dim 1.0 +910 78 model.relation_dim 0.0 +910 78 model.scoring_fct_norm 2.0 +910 78 training.batch_size 0.0 +910 78 training.label_smoothing 0.00562159027716974 +910 79 model.embedding_dim 1.0 +910 79 model.relation_dim 1.0 +910 79 model.scoring_fct_norm 2.0 +910 79 training.batch_size 2.0 +910 79 training.label_smoothing 0.009859796818599227 +910 80 model.embedding_dim 0.0 +910 80 model.relation_dim 0.0 +910 80 model.scoring_fct_norm 1.0 +910 80 training.batch_size 0.0 +910 80 training.label_smoothing 0.4382166079660878 +910 81 model.embedding_dim 0.0 +910 81 model.relation_dim 1.0 +910 81 model.scoring_fct_norm 2.0 +910 81 training.batch_size 2.0 +910 81 training.label_smoothing 0.03569644877439852 +910 82 model.embedding_dim 1.0 +910 82 model.relation_dim 1.0 +910 82 model.scoring_fct_norm 1.0 +910 82 training.batch_size 2.0 +910 82 training.label_smoothing 0.09990493369066196 +910 83 model.embedding_dim 0.0 +910 83 model.relation_dim 1.0 +910 83 model.scoring_fct_norm 2.0 +910 83 training.batch_size 0.0 +910 83 training.label_smoothing 0.002484137768085249 +910 84 model.embedding_dim 0.0 +910 84 model.relation_dim 1.0 +910 84 model.scoring_fct_norm 2.0 +910 84 training.batch_size 0.0 +910 84 training.label_smoothing 0.01778431825347956 +910 85 model.embedding_dim 2.0 +910 85 model.relation_dim 0.0 +910 85 model.scoring_fct_norm 2.0 +910 85 training.batch_size 1.0 +910 85 training.label_smoothing 0.7510497583380306 +910 86 model.embedding_dim 1.0 +910 86 model.relation_dim 2.0 +910 86 model.scoring_fct_norm 1.0 +910 86 training.batch_size 2.0 +910 86 training.label_smoothing 0.046644486334604204 +910 87 model.embedding_dim 2.0 +910 87 model.relation_dim 2.0 +910 87 model.scoring_fct_norm 2.0 +910 87 training.batch_size 2.0 +910 87 training.label_smoothing 0.015393965997503685 +910 88 model.embedding_dim 0.0 +910 88 model.relation_dim 1.0 +910 88 model.scoring_fct_norm 2.0 +910 88 training.batch_size 0.0 +910 88 training.label_smoothing 0.0034535511849280044 +910 89 model.embedding_dim 0.0 +910 89 model.relation_dim 0.0 +910 89 model.scoring_fct_norm 2.0 +910 89 training.batch_size 2.0 +910 89 training.label_smoothing 0.11702145291422938 +910 90 model.embedding_dim 0.0 +910 90 model.relation_dim 0.0 +910 90 model.scoring_fct_norm 2.0 +910 90 training.batch_size 0.0 +910 90 training.label_smoothing 0.07770811224805066 +910 91 model.embedding_dim 1.0 +910 91 model.relation_dim 1.0 +910 91 model.scoring_fct_norm 2.0 +910 91 training.batch_size 2.0 +910 91 training.label_smoothing 0.002663984018389033 +910 92 model.embedding_dim 1.0 +910 92 model.relation_dim 1.0 +910 92 model.scoring_fct_norm 2.0 +910 92 training.batch_size 2.0 +910 92 training.label_smoothing 0.007840389139066874 +910 93 model.embedding_dim 0.0 +910 93 model.relation_dim 2.0 +910 93 model.scoring_fct_norm 2.0 +910 93 training.batch_size 0.0 +910 93 training.label_smoothing 0.46870338775716625 +910 94 model.embedding_dim 1.0 +910 94 model.relation_dim 2.0 +910 94 model.scoring_fct_norm 1.0 +910 94 training.batch_size 0.0 +910 94 training.label_smoothing 0.09222740933183028 +910 95 model.embedding_dim 2.0 +910 95 model.relation_dim 2.0 +910 95 model.scoring_fct_norm 2.0 +910 95 training.batch_size 1.0 +910 95 training.label_smoothing 0.20402887745817364 +910 96 model.embedding_dim 1.0 +910 96 model.relation_dim 1.0 +910 96 model.scoring_fct_norm 2.0 +910 96 training.batch_size 0.0 +910 96 training.label_smoothing 0.08398287815474009 +910 97 model.embedding_dim 0.0 +910 97 model.relation_dim 1.0 +910 97 model.scoring_fct_norm 2.0 +910 97 training.batch_size 1.0 +910 97 training.label_smoothing 0.0075708488297655755 +910 98 model.embedding_dim 1.0 +910 98 model.relation_dim 0.0 +910 98 model.scoring_fct_norm 1.0 +910 98 training.batch_size 1.0 +910 98 training.label_smoothing 0.015323423556694329 +910 99 model.embedding_dim 2.0 +910 99 model.relation_dim 0.0 +910 99 model.scoring_fct_norm 1.0 +910 99 training.batch_size 2.0 +910 99 training.label_smoothing 0.05874775147825348 +910 100 model.embedding_dim 2.0 +910 100 model.relation_dim 0.0 +910 100 model.scoring_fct_norm 1.0 +910 100 training.batch_size 1.0 +910 100 training.label_smoothing 0.005316213524205624 +910 1 dataset """kinships""" +910 1 model """transr""" +910 1 loss """softplus""" +910 1 regularizer """no""" +910 1 optimizer """adadelta""" +910 1 training_loop """lcwa""" +910 1 evaluator """rankbased""" +910 2 dataset """kinships""" +910 2 model """transr""" +910 2 loss """softplus""" +910 2 regularizer """no""" +910 2 optimizer """adadelta""" +910 2 training_loop """lcwa""" +910 2 evaluator """rankbased""" +910 3 dataset """kinships""" +910 3 model """transr""" +910 3 loss """softplus""" +910 3 regularizer """no""" +910 3 optimizer """adadelta""" +910 3 training_loop """lcwa""" +910 3 evaluator """rankbased""" +910 4 dataset """kinships""" +910 4 model """transr""" +910 4 loss """softplus""" +910 4 regularizer """no""" +910 4 optimizer """adadelta""" +910 4 training_loop """lcwa""" +910 4 evaluator """rankbased""" +910 5 dataset """kinships""" +910 5 model """transr""" +910 5 loss """softplus""" +910 5 regularizer """no""" +910 5 optimizer """adadelta""" +910 5 training_loop """lcwa""" +910 5 evaluator """rankbased""" +910 6 dataset """kinships""" +910 6 model """transr""" +910 6 loss """softplus""" +910 6 regularizer """no""" +910 6 optimizer """adadelta""" +910 6 training_loop """lcwa""" +910 6 evaluator """rankbased""" +910 7 dataset """kinships""" +910 7 model """transr""" +910 7 loss """softplus""" +910 7 regularizer """no""" +910 7 optimizer """adadelta""" +910 7 training_loop """lcwa""" +910 7 evaluator """rankbased""" +910 8 dataset """kinships""" +910 8 model """transr""" +910 8 loss """softplus""" +910 8 regularizer """no""" +910 8 optimizer """adadelta""" +910 8 training_loop """lcwa""" +910 8 evaluator """rankbased""" +910 9 dataset """kinships""" +910 9 model """transr""" +910 9 loss """softplus""" +910 9 regularizer """no""" +910 9 optimizer """adadelta""" +910 9 training_loop """lcwa""" +910 9 evaluator """rankbased""" +910 10 dataset """kinships""" +910 10 model """transr""" +910 10 loss """softplus""" +910 10 regularizer """no""" +910 10 optimizer """adadelta""" +910 10 training_loop """lcwa""" +910 10 evaluator """rankbased""" +910 11 dataset """kinships""" +910 11 model """transr""" +910 11 loss """softplus""" +910 11 regularizer """no""" +910 11 optimizer """adadelta""" +910 11 training_loop """lcwa""" +910 11 evaluator """rankbased""" +910 12 dataset """kinships""" +910 12 model """transr""" +910 12 loss """softplus""" +910 12 regularizer """no""" +910 12 optimizer """adadelta""" +910 12 training_loop """lcwa""" +910 12 evaluator """rankbased""" +910 13 dataset """kinships""" +910 13 model """transr""" +910 13 loss """softplus""" +910 13 regularizer """no""" +910 13 optimizer """adadelta""" +910 13 training_loop """lcwa""" +910 13 evaluator """rankbased""" +910 14 dataset """kinships""" +910 14 model """transr""" +910 14 loss """softplus""" +910 14 regularizer """no""" +910 14 optimizer """adadelta""" +910 14 training_loop """lcwa""" +910 14 evaluator """rankbased""" +910 15 dataset """kinships""" +910 15 model """transr""" +910 15 loss """softplus""" +910 15 regularizer """no""" +910 15 optimizer """adadelta""" +910 15 training_loop """lcwa""" +910 15 evaluator """rankbased""" +910 16 dataset """kinships""" +910 16 model """transr""" +910 16 loss """softplus""" +910 16 regularizer """no""" +910 16 optimizer """adadelta""" +910 16 training_loop """lcwa""" +910 16 evaluator """rankbased""" +910 17 dataset """kinships""" +910 17 model """transr""" +910 17 loss """softplus""" +910 17 regularizer """no""" +910 17 optimizer """adadelta""" +910 17 training_loop """lcwa""" +910 17 evaluator """rankbased""" +910 18 dataset """kinships""" +910 18 model """transr""" +910 18 loss """softplus""" +910 18 regularizer """no""" +910 18 optimizer """adadelta""" +910 18 training_loop """lcwa""" +910 18 evaluator """rankbased""" +910 19 dataset """kinships""" +910 19 model """transr""" +910 19 loss """softplus""" +910 19 regularizer """no""" +910 19 optimizer """adadelta""" +910 19 training_loop """lcwa""" +910 19 evaluator """rankbased""" +910 20 dataset """kinships""" +910 20 model """transr""" +910 20 loss """softplus""" +910 20 regularizer """no""" +910 20 optimizer """adadelta""" +910 20 training_loop """lcwa""" +910 20 evaluator """rankbased""" +910 21 dataset """kinships""" +910 21 model """transr""" +910 21 loss """softplus""" +910 21 regularizer """no""" +910 21 optimizer """adadelta""" +910 21 training_loop """lcwa""" +910 21 evaluator """rankbased""" +910 22 dataset """kinships""" +910 22 model """transr""" +910 22 loss """softplus""" +910 22 regularizer """no""" +910 22 optimizer """adadelta""" +910 22 training_loop """lcwa""" +910 22 evaluator """rankbased""" +910 23 dataset """kinships""" +910 23 model """transr""" +910 23 loss """softplus""" +910 23 regularizer """no""" +910 23 optimizer """adadelta""" +910 23 training_loop """lcwa""" +910 23 evaluator """rankbased""" +910 24 dataset """kinships""" +910 24 model """transr""" +910 24 loss """softplus""" +910 24 regularizer """no""" +910 24 optimizer """adadelta""" +910 24 training_loop """lcwa""" +910 24 evaluator """rankbased""" +910 25 dataset """kinships""" +910 25 model """transr""" +910 25 loss """softplus""" +910 25 regularizer """no""" +910 25 optimizer """adadelta""" +910 25 training_loop """lcwa""" +910 25 evaluator """rankbased""" +910 26 dataset """kinships""" +910 26 model """transr""" +910 26 loss """softplus""" +910 26 regularizer """no""" +910 26 optimizer """adadelta""" +910 26 training_loop """lcwa""" +910 26 evaluator """rankbased""" +910 27 dataset """kinships""" +910 27 model """transr""" +910 27 loss """softplus""" +910 27 regularizer """no""" +910 27 optimizer """adadelta""" +910 27 training_loop """lcwa""" +910 27 evaluator """rankbased""" +910 28 dataset """kinships""" +910 28 model """transr""" +910 28 loss """softplus""" +910 28 regularizer """no""" +910 28 optimizer """adadelta""" +910 28 training_loop """lcwa""" +910 28 evaluator """rankbased""" +910 29 dataset """kinships""" +910 29 model """transr""" +910 29 loss """softplus""" +910 29 regularizer """no""" +910 29 optimizer """adadelta""" +910 29 training_loop """lcwa""" +910 29 evaluator """rankbased""" +910 30 dataset """kinships""" +910 30 model """transr""" +910 30 loss """softplus""" +910 30 regularizer """no""" +910 30 optimizer """adadelta""" +910 30 training_loop """lcwa""" +910 30 evaluator """rankbased""" +910 31 dataset """kinships""" +910 31 model """transr""" +910 31 loss """softplus""" +910 31 regularizer """no""" +910 31 optimizer """adadelta""" +910 31 training_loop """lcwa""" +910 31 evaluator """rankbased""" +910 32 dataset """kinships""" +910 32 model """transr""" +910 32 loss """softplus""" +910 32 regularizer """no""" +910 32 optimizer """adadelta""" +910 32 training_loop """lcwa""" +910 32 evaluator """rankbased""" +910 33 dataset """kinships""" +910 33 model """transr""" +910 33 loss """softplus""" +910 33 regularizer """no""" +910 33 optimizer """adadelta""" +910 33 training_loop """lcwa""" +910 33 evaluator """rankbased""" +910 34 dataset """kinships""" +910 34 model """transr""" +910 34 loss """softplus""" +910 34 regularizer """no""" +910 34 optimizer """adadelta""" +910 34 training_loop """lcwa""" +910 34 evaluator """rankbased""" +910 35 dataset """kinships""" +910 35 model """transr""" +910 35 loss """softplus""" +910 35 regularizer """no""" +910 35 optimizer """adadelta""" +910 35 training_loop """lcwa""" +910 35 evaluator """rankbased""" +910 36 dataset """kinships""" +910 36 model """transr""" +910 36 loss """softplus""" +910 36 regularizer """no""" +910 36 optimizer """adadelta""" +910 36 training_loop """lcwa""" +910 36 evaluator """rankbased""" +910 37 dataset """kinships""" +910 37 model """transr""" +910 37 loss """softplus""" +910 37 regularizer """no""" +910 37 optimizer """adadelta""" +910 37 training_loop """lcwa""" +910 37 evaluator """rankbased""" +910 38 dataset """kinships""" +910 38 model """transr""" +910 38 loss """softplus""" +910 38 regularizer """no""" +910 38 optimizer """adadelta""" +910 38 training_loop """lcwa""" +910 38 evaluator """rankbased""" +910 39 dataset """kinships""" +910 39 model """transr""" +910 39 loss """softplus""" +910 39 regularizer """no""" +910 39 optimizer """adadelta""" +910 39 training_loop """lcwa""" +910 39 evaluator """rankbased""" +910 40 dataset """kinships""" +910 40 model """transr""" +910 40 loss """softplus""" +910 40 regularizer """no""" +910 40 optimizer """adadelta""" +910 40 training_loop """lcwa""" +910 40 evaluator """rankbased""" +910 41 dataset """kinships""" +910 41 model """transr""" +910 41 loss """softplus""" +910 41 regularizer """no""" +910 41 optimizer """adadelta""" +910 41 training_loop """lcwa""" +910 41 evaluator """rankbased""" +910 42 dataset """kinships""" +910 42 model """transr""" +910 42 loss """softplus""" +910 42 regularizer """no""" +910 42 optimizer """adadelta""" +910 42 training_loop """lcwa""" +910 42 evaluator """rankbased""" +910 43 dataset """kinships""" +910 43 model """transr""" +910 43 loss """softplus""" +910 43 regularizer """no""" +910 43 optimizer """adadelta""" +910 43 training_loop """lcwa""" +910 43 evaluator """rankbased""" +910 44 dataset """kinships""" +910 44 model """transr""" +910 44 loss """softplus""" +910 44 regularizer """no""" +910 44 optimizer """adadelta""" +910 44 training_loop """lcwa""" +910 44 evaluator """rankbased""" +910 45 dataset """kinships""" +910 45 model """transr""" +910 45 loss """softplus""" +910 45 regularizer """no""" +910 45 optimizer """adadelta""" +910 45 training_loop """lcwa""" +910 45 evaluator """rankbased""" +910 46 dataset """kinships""" +910 46 model """transr""" +910 46 loss """softplus""" +910 46 regularizer """no""" +910 46 optimizer """adadelta""" +910 46 training_loop """lcwa""" +910 46 evaluator """rankbased""" +910 47 dataset """kinships""" +910 47 model """transr""" +910 47 loss """softplus""" +910 47 regularizer """no""" +910 47 optimizer """adadelta""" +910 47 training_loop """lcwa""" +910 47 evaluator """rankbased""" +910 48 dataset """kinships""" +910 48 model """transr""" +910 48 loss """softplus""" +910 48 regularizer """no""" +910 48 optimizer """adadelta""" +910 48 training_loop """lcwa""" +910 48 evaluator """rankbased""" +910 49 dataset """kinships""" +910 49 model """transr""" +910 49 loss """softplus""" +910 49 regularizer """no""" +910 49 optimizer """adadelta""" +910 49 training_loop """lcwa""" +910 49 evaluator """rankbased""" +910 50 dataset """kinships""" +910 50 model """transr""" +910 50 loss """softplus""" +910 50 regularizer """no""" +910 50 optimizer """adadelta""" +910 50 training_loop """lcwa""" +910 50 evaluator """rankbased""" +910 51 dataset """kinships""" +910 51 model """transr""" +910 51 loss """softplus""" +910 51 regularizer """no""" +910 51 optimizer """adadelta""" +910 51 training_loop """lcwa""" +910 51 evaluator """rankbased""" +910 52 dataset """kinships""" +910 52 model """transr""" +910 52 loss """softplus""" +910 52 regularizer """no""" +910 52 optimizer """adadelta""" +910 52 training_loop """lcwa""" +910 52 evaluator """rankbased""" +910 53 dataset """kinships""" +910 53 model """transr""" +910 53 loss """softplus""" +910 53 regularizer """no""" +910 53 optimizer """adadelta""" +910 53 training_loop """lcwa""" +910 53 evaluator """rankbased""" +910 54 dataset """kinships""" +910 54 model """transr""" +910 54 loss """softplus""" +910 54 regularizer """no""" +910 54 optimizer """adadelta""" +910 54 training_loop """lcwa""" +910 54 evaluator """rankbased""" +910 55 dataset """kinships""" +910 55 model """transr""" +910 55 loss """softplus""" +910 55 regularizer """no""" +910 55 optimizer """adadelta""" +910 55 training_loop """lcwa""" +910 55 evaluator """rankbased""" +910 56 dataset """kinships""" +910 56 model """transr""" +910 56 loss """softplus""" +910 56 regularizer """no""" +910 56 optimizer """adadelta""" +910 56 training_loop """lcwa""" +910 56 evaluator """rankbased""" +910 57 dataset """kinships""" +910 57 model """transr""" +910 57 loss """softplus""" +910 57 regularizer """no""" +910 57 optimizer """adadelta""" +910 57 training_loop """lcwa""" +910 57 evaluator """rankbased""" +910 58 dataset """kinships""" +910 58 model """transr""" +910 58 loss """softplus""" +910 58 regularizer """no""" +910 58 optimizer """adadelta""" +910 58 training_loop """lcwa""" +910 58 evaluator """rankbased""" +910 59 dataset """kinships""" +910 59 model """transr""" +910 59 loss """softplus""" +910 59 regularizer """no""" +910 59 optimizer """adadelta""" +910 59 training_loop """lcwa""" +910 59 evaluator """rankbased""" +910 60 dataset """kinships""" +910 60 model """transr""" +910 60 loss """softplus""" +910 60 regularizer """no""" +910 60 optimizer """adadelta""" +910 60 training_loop """lcwa""" +910 60 evaluator """rankbased""" +910 61 dataset """kinships""" +910 61 model """transr""" +910 61 loss """softplus""" +910 61 regularizer """no""" +910 61 optimizer """adadelta""" +910 61 training_loop """lcwa""" +910 61 evaluator """rankbased""" +910 62 dataset """kinships""" +910 62 model """transr""" +910 62 loss """softplus""" +910 62 regularizer """no""" +910 62 optimizer """adadelta""" +910 62 training_loop """lcwa""" +910 62 evaluator """rankbased""" +910 63 dataset """kinships""" +910 63 model """transr""" +910 63 loss """softplus""" +910 63 regularizer """no""" +910 63 optimizer """adadelta""" +910 63 training_loop """lcwa""" +910 63 evaluator """rankbased""" +910 64 dataset """kinships""" +910 64 model """transr""" +910 64 loss """softplus""" +910 64 regularizer """no""" +910 64 optimizer """adadelta""" +910 64 training_loop """lcwa""" +910 64 evaluator """rankbased""" +910 65 dataset """kinships""" +910 65 model """transr""" +910 65 loss """softplus""" +910 65 regularizer """no""" +910 65 optimizer """adadelta""" +910 65 training_loop """lcwa""" +910 65 evaluator """rankbased""" +910 66 dataset """kinships""" +910 66 model """transr""" +910 66 loss """softplus""" +910 66 regularizer """no""" +910 66 optimizer """adadelta""" +910 66 training_loop """lcwa""" +910 66 evaluator """rankbased""" +910 67 dataset """kinships""" +910 67 model """transr""" +910 67 loss """softplus""" +910 67 regularizer """no""" +910 67 optimizer """adadelta""" +910 67 training_loop """lcwa""" +910 67 evaluator """rankbased""" +910 68 dataset """kinships""" +910 68 model """transr""" +910 68 loss """softplus""" +910 68 regularizer """no""" +910 68 optimizer """adadelta""" +910 68 training_loop """lcwa""" +910 68 evaluator """rankbased""" +910 69 dataset """kinships""" +910 69 model """transr""" +910 69 loss """softplus""" +910 69 regularizer """no""" +910 69 optimizer """adadelta""" +910 69 training_loop """lcwa""" +910 69 evaluator """rankbased""" +910 70 dataset """kinships""" +910 70 model """transr""" +910 70 loss """softplus""" +910 70 regularizer """no""" +910 70 optimizer """adadelta""" +910 70 training_loop """lcwa""" +910 70 evaluator """rankbased""" +910 71 dataset """kinships""" +910 71 model """transr""" +910 71 loss """softplus""" +910 71 regularizer """no""" +910 71 optimizer """adadelta""" +910 71 training_loop """lcwa""" +910 71 evaluator """rankbased""" +910 72 dataset """kinships""" +910 72 model """transr""" +910 72 loss """softplus""" +910 72 regularizer """no""" +910 72 optimizer """adadelta""" +910 72 training_loop """lcwa""" +910 72 evaluator """rankbased""" +910 73 dataset """kinships""" +910 73 model """transr""" +910 73 loss """softplus""" +910 73 regularizer """no""" +910 73 optimizer """adadelta""" +910 73 training_loop """lcwa""" +910 73 evaluator """rankbased""" +910 74 dataset """kinships""" +910 74 model """transr""" +910 74 loss """softplus""" +910 74 regularizer """no""" +910 74 optimizer """adadelta""" +910 74 training_loop """lcwa""" +910 74 evaluator """rankbased""" +910 75 dataset """kinships""" +910 75 model """transr""" +910 75 loss """softplus""" +910 75 regularizer """no""" +910 75 optimizer """adadelta""" +910 75 training_loop """lcwa""" +910 75 evaluator """rankbased""" +910 76 dataset """kinships""" +910 76 model """transr""" +910 76 loss """softplus""" +910 76 regularizer """no""" +910 76 optimizer """adadelta""" +910 76 training_loop """lcwa""" +910 76 evaluator """rankbased""" +910 77 dataset """kinships""" +910 77 model """transr""" +910 77 loss """softplus""" +910 77 regularizer """no""" +910 77 optimizer """adadelta""" +910 77 training_loop """lcwa""" +910 77 evaluator """rankbased""" +910 78 dataset """kinships""" +910 78 model """transr""" +910 78 loss """softplus""" +910 78 regularizer """no""" +910 78 optimizer """adadelta""" +910 78 training_loop """lcwa""" +910 78 evaluator """rankbased""" +910 79 dataset """kinships""" +910 79 model """transr""" +910 79 loss """softplus""" +910 79 regularizer """no""" +910 79 optimizer """adadelta""" +910 79 training_loop """lcwa""" +910 79 evaluator """rankbased""" +910 80 dataset """kinships""" +910 80 model """transr""" +910 80 loss """softplus""" +910 80 regularizer """no""" +910 80 optimizer """adadelta""" +910 80 training_loop """lcwa""" +910 80 evaluator """rankbased""" +910 81 dataset """kinships""" +910 81 model """transr""" +910 81 loss """softplus""" +910 81 regularizer """no""" +910 81 optimizer """adadelta""" +910 81 training_loop """lcwa""" +910 81 evaluator """rankbased""" +910 82 dataset """kinships""" +910 82 model """transr""" +910 82 loss """softplus""" +910 82 regularizer """no""" +910 82 optimizer """adadelta""" +910 82 training_loop """lcwa""" +910 82 evaluator """rankbased""" +910 83 dataset """kinships""" +910 83 model """transr""" +910 83 loss """softplus""" +910 83 regularizer """no""" +910 83 optimizer """adadelta""" +910 83 training_loop """lcwa""" +910 83 evaluator """rankbased""" +910 84 dataset """kinships""" +910 84 model """transr""" +910 84 loss """softplus""" +910 84 regularizer """no""" +910 84 optimizer """adadelta""" +910 84 training_loop """lcwa""" +910 84 evaluator """rankbased""" +910 85 dataset """kinships""" +910 85 model """transr""" +910 85 loss """softplus""" +910 85 regularizer """no""" +910 85 optimizer """adadelta""" +910 85 training_loop """lcwa""" +910 85 evaluator """rankbased""" +910 86 dataset """kinships""" +910 86 model """transr""" +910 86 loss """softplus""" +910 86 regularizer """no""" +910 86 optimizer """adadelta""" +910 86 training_loop """lcwa""" +910 86 evaluator """rankbased""" +910 87 dataset """kinships""" +910 87 model """transr""" +910 87 loss """softplus""" +910 87 regularizer """no""" +910 87 optimizer """adadelta""" +910 87 training_loop """lcwa""" +910 87 evaluator """rankbased""" +910 88 dataset """kinships""" +910 88 model """transr""" +910 88 loss """softplus""" +910 88 regularizer """no""" +910 88 optimizer """adadelta""" +910 88 training_loop """lcwa""" +910 88 evaluator """rankbased""" +910 89 dataset """kinships""" +910 89 model """transr""" +910 89 loss """softplus""" +910 89 regularizer """no""" +910 89 optimizer """adadelta""" +910 89 training_loop """lcwa""" +910 89 evaluator """rankbased""" +910 90 dataset """kinships""" +910 90 model """transr""" +910 90 loss """softplus""" +910 90 regularizer """no""" +910 90 optimizer """adadelta""" +910 90 training_loop """lcwa""" +910 90 evaluator """rankbased""" +910 91 dataset """kinships""" +910 91 model """transr""" +910 91 loss """softplus""" +910 91 regularizer """no""" +910 91 optimizer """adadelta""" +910 91 training_loop """lcwa""" +910 91 evaluator """rankbased""" +910 92 dataset """kinships""" +910 92 model """transr""" +910 92 loss """softplus""" +910 92 regularizer """no""" +910 92 optimizer """adadelta""" +910 92 training_loop """lcwa""" +910 92 evaluator """rankbased""" +910 93 dataset """kinships""" +910 93 model """transr""" +910 93 loss """softplus""" +910 93 regularizer """no""" +910 93 optimizer """adadelta""" +910 93 training_loop """lcwa""" +910 93 evaluator """rankbased""" +910 94 dataset """kinships""" +910 94 model """transr""" +910 94 loss """softplus""" +910 94 regularizer """no""" +910 94 optimizer """adadelta""" +910 94 training_loop """lcwa""" +910 94 evaluator """rankbased""" +910 95 dataset """kinships""" +910 95 model """transr""" +910 95 loss """softplus""" +910 95 regularizer """no""" +910 95 optimizer """adadelta""" +910 95 training_loop """lcwa""" +910 95 evaluator """rankbased""" +910 96 dataset """kinships""" +910 96 model """transr""" +910 96 loss """softplus""" +910 96 regularizer """no""" +910 96 optimizer """adadelta""" +910 96 training_loop """lcwa""" +910 96 evaluator """rankbased""" +910 97 dataset """kinships""" +910 97 model """transr""" +910 97 loss """softplus""" +910 97 regularizer """no""" +910 97 optimizer """adadelta""" +910 97 training_loop """lcwa""" +910 97 evaluator """rankbased""" +910 98 dataset """kinships""" +910 98 model """transr""" +910 98 loss """softplus""" +910 98 regularizer """no""" +910 98 optimizer """adadelta""" +910 98 training_loop """lcwa""" +910 98 evaluator """rankbased""" +910 99 dataset """kinships""" +910 99 model """transr""" +910 99 loss """softplus""" +910 99 regularizer """no""" +910 99 optimizer """adadelta""" +910 99 training_loop """lcwa""" +910 99 evaluator """rankbased""" +910 100 dataset """kinships""" +910 100 model """transr""" +910 100 loss """softplus""" +910 100 regularizer """no""" +910 100 optimizer """adadelta""" +910 100 training_loop """lcwa""" +910 100 evaluator """rankbased""" +911 1 model.embedding_dim 2.0 +911 1 model.relation_dim 2.0 +911 1 model.scoring_fct_norm 2.0 +911 1 training.batch_size 2.0 +911 1 training.label_smoothing 0.003480406019858971 +911 2 model.embedding_dim 0.0 +911 2 model.relation_dim 2.0 +911 2 model.scoring_fct_norm 1.0 +911 2 training.batch_size 0.0 +911 2 training.label_smoothing 0.05547307926033637 +911 3 model.embedding_dim 2.0 +911 3 model.relation_dim 1.0 +911 3 model.scoring_fct_norm 2.0 +911 3 training.batch_size 2.0 +911 3 training.label_smoothing 0.007204266180294023 +911 4 model.embedding_dim 2.0 +911 4 model.relation_dim 1.0 +911 4 model.scoring_fct_norm 1.0 +911 4 training.batch_size 2.0 +911 4 training.label_smoothing 0.10253733191281157 +911 5 model.embedding_dim 0.0 +911 5 model.relation_dim 0.0 +911 5 model.scoring_fct_norm 1.0 +911 5 training.batch_size 1.0 +911 5 training.label_smoothing 0.026791600617244053 +911 6 model.embedding_dim 1.0 +911 6 model.relation_dim 1.0 +911 6 model.scoring_fct_norm 1.0 +911 6 training.batch_size 1.0 +911 6 training.label_smoothing 0.03568009570110202 +911 7 model.embedding_dim 0.0 +911 7 model.relation_dim 0.0 +911 7 model.scoring_fct_norm 2.0 +911 7 training.batch_size 0.0 +911 7 training.label_smoothing 0.2937168878704844 +911 8 model.embedding_dim 1.0 +911 8 model.relation_dim 2.0 +911 8 model.scoring_fct_norm 2.0 +911 8 training.batch_size 2.0 +911 8 training.label_smoothing 0.3753105876384571 +911 9 model.embedding_dim 1.0 +911 9 model.relation_dim 2.0 +911 9 model.scoring_fct_norm 1.0 +911 9 training.batch_size 2.0 +911 9 training.label_smoothing 0.0023204728356570237 +911 10 model.embedding_dim 1.0 +911 10 model.relation_dim 2.0 +911 10 model.scoring_fct_norm 1.0 +911 10 training.batch_size 2.0 +911 10 training.label_smoothing 0.0582852894947231 +911 11 model.embedding_dim 1.0 +911 11 model.relation_dim 1.0 +911 11 model.scoring_fct_norm 1.0 +911 11 training.batch_size 1.0 +911 11 training.label_smoothing 0.006431542641538454 +911 12 model.embedding_dim 1.0 +911 12 model.relation_dim 1.0 +911 12 model.scoring_fct_norm 1.0 +911 12 training.batch_size 0.0 +911 12 training.label_smoothing 0.17271028040952308 +911 13 model.embedding_dim 1.0 +911 13 model.relation_dim 0.0 +911 13 model.scoring_fct_norm 2.0 +911 13 training.batch_size 1.0 +911 13 training.label_smoothing 0.013464228813313515 +911 14 model.embedding_dim 1.0 +911 14 model.relation_dim 2.0 +911 14 model.scoring_fct_norm 2.0 +911 14 training.batch_size 0.0 +911 14 training.label_smoothing 0.002573135013351119 +911 15 model.embedding_dim 0.0 +911 15 model.relation_dim 2.0 +911 15 model.scoring_fct_norm 1.0 +911 15 training.batch_size 0.0 +911 15 training.label_smoothing 0.09876075468888554 +911 16 model.embedding_dim 2.0 +911 16 model.relation_dim 0.0 +911 16 model.scoring_fct_norm 1.0 +911 16 training.batch_size 0.0 +911 16 training.label_smoothing 0.5874113277343564 +911 17 model.embedding_dim 1.0 +911 17 model.relation_dim 1.0 +911 17 model.scoring_fct_norm 1.0 +911 17 training.batch_size 1.0 +911 17 training.label_smoothing 0.07044627137771232 +911 18 model.embedding_dim 1.0 +911 18 model.relation_dim 1.0 +911 18 model.scoring_fct_norm 1.0 +911 18 training.batch_size 0.0 +911 18 training.label_smoothing 0.4016077713846595 +911 19 model.embedding_dim 1.0 +911 19 model.relation_dim 1.0 +911 19 model.scoring_fct_norm 2.0 +911 19 training.batch_size 2.0 +911 19 training.label_smoothing 0.5547706795429364 +911 20 model.embedding_dim 1.0 +911 20 model.relation_dim 0.0 +911 20 model.scoring_fct_norm 2.0 +911 20 training.batch_size 1.0 +911 20 training.label_smoothing 0.003987840717652312 +911 21 model.embedding_dim 2.0 +911 21 model.relation_dim 0.0 +911 21 model.scoring_fct_norm 1.0 +911 21 training.batch_size 1.0 +911 21 training.label_smoothing 0.006513234586659412 +911 22 model.embedding_dim 1.0 +911 22 model.relation_dim 1.0 +911 22 model.scoring_fct_norm 2.0 +911 22 training.batch_size 0.0 +911 22 training.label_smoothing 0.36971827273567104 +911 23 model.embedding_dim 0.0 +911 23 model.relation_dim 2.0 +911 23 model.scoring_fct_norm 2.0 +911 23 training.batch_size 0.0 +911 23 training.label_smoothing 0.0010980845949901225 +911 24 model.embedding_dim 0.0 +911 24 model.relation_dim 1.0 +911 24 model.scoring_fct_norm 1.0 +911 24 training.batch_size 0.0 +911 24 training.label_smoothing 0.00175306942605228 +911 25 model.embedding_dim 2.0 +911 25 model.relation_dim 0.0 +911 25 model.scoring_fct_norm 1.0 +911 25 training.batch_size 2.0 +911 25 training.label_smoothing 0.03162163688702793 +911 26 model.embedding_dim 0.0 +911 26 model.relation_dim 0.0 +911 26 model.scoring_fct_norm 1.0 +911 26 training.batch_size 0.0 +911 26 training.label_smoothing 0.6455621157396555 +911 27 model.embedding_dim 2.0 +911 27 model.relation_dim 0.0 +911 27 model.scoring_fct_norm 2.0 +911 27 training.batch_size 0.0 +911 27 training.label_smoothing 0.3960569463674082 +911 28 model.embedding_dim 0.0 +911 28 model.relation_dim 0.0 +911 28 model.scoring_fct_norm 2.0 +911 28 training.batch_size 2.0 +911 28 training.label_smoothing 0.20626609552130618 +911 29 model.embedding_dim 1.0 +911 29 model.relation_dim 0.0 +911 29 model.scoring_fct_norm 2.0 +911 29 training.batch_size 0.0 +911 29 training.label_smoothing 0.002950279099161315 +911 30 model.embedding_dim 0.0 +911 30 model.relation_dim 0.0 +911 30 model.scoring_fct_norm 1.0 +911 30 training.batch_size 0.0 +911 30 training.label_smoothing 0.0020420627171502687 +911 31 model.embedding_dim 1.0 +911 31 model.relation_dim 0.0 +911 31 model.scoring_fct_norm 1.0 +911 31 training.batch_size 1.0 +911 31 training.label_smoothing 0.21356106126411317 +911 32 model.embedding_dim 1.0 +911 32 model.relation_dim 2.0 +911 32 model.scoring_fct_norm 2.0 +911 32 training.batch_size 1.0 +911 32 training.label_smoothing 0.003936800181300575 +911 33 model.embedding_dim 1.0 +911 33 model.relation_dim 0.0 +911 33 model.scoring_fct_norm 2.0 +911 33 training.batch_size 2.0 +911 33 training.label_smoothing 0.010339645083369467 +911 34 model.embedding_dim 0.0 +911 34 model.relation_dim 1.0 +911 34 model.scoring_fct_norm 2.0 +911 34 training.batch_size 1.0 +911 34 training.label_smoothing 0.34970892115939334 +911 35 model.embedding_dim 2.0 +911 35 model.relation_dim 0.0 +911 35 model.scoring_fct_norm 2.0 +911 35 training.batch_size 2.0 +911 35 training.label_smoothing 0.052157767786096966 +911 36 model.embedding_dim 2.0 +911 36 model.relation_dim 0.0 +911 36 model.scoring_fct_norm 2.0 +911 36 training.batch_size 1.0 +911 36 training.label_smoothing 0.3404806537189659 +911 37 model.embedding_dim 2.0 +911 37 model.relation_dim 2.0 +911 37 model.scoring_fct_norm 2.0 +911 37 training.batch_size 0.0 +911 37 training.label_smoothing 0.007481996224757204 +911 38 model.embedding_dim 2.0 +911 38 model.relation_dim 1.0 +911 38 model.scoring_fct_norm 2.0 +911 38 training.batch_size 0.0 +911 38 training.label_smoothing 0.15888857243519394 +911 39 model.embedding_dim 1.0 +911 39 model.relation_dim 0.0 +911 39 model.scoring_fct_norm 1.0 +911 39 training.batch_size 2.0 +911 39 training.label_smoothing 0.6862637346507579 +911 40 model.embedding_dim 1.0 +911 40 model.relation_dim 0.0 +911 40 model.scoring_fct_norm 2.0 +911 40 training.batch_size 1.0 +911 40 training.label_smoothing 0.0032400261120877545 +911 41 model.embedding_dim 2.0 +911 41 model.relation_dim 2.0 +911 41 model.scoring_fct_norm 2.0 +911 41 training.batch_size 0.0 +911 41 training.label_smoothing 0.014624869674727363 +911 42 model.embedding_dim 1.0 +911 42 model.relation_dim 1.0 +911 42 model.scoring_fct_norm 2.0 +911 42 training.batch_size 2.0 +911 42 training.label_smoothing 0.01255461460728767 +911 43 model.embedding_dim 0.0 +911 43 model.relation_dim 0.0 +911 43 model.scoring_fct_norm 2.0 +911 43 training.batch_size 2.0 +911 43 training.label_smoothing 0.06721295274269212 +911 44 model.embedding_dim 0.0 +911 44 model.relation_dim 2.0 +911 44 model.scoring_fct_norm 2.0 +911 44 training.batch_size 2.0 +911 44 training.label_smoothing 0.0010151842921089587 +911 45 model.embedding_dim 2.0 +911 45 model.relation_dim 1.0 +911 45 model.scoring_fct_norm 1.0 +911 45 training.batch_size 2.0 +911 45 training.label_smoothing 0.21964456751504843 +911 46 model.embedding_dim 1.0 +911 46 model.relation_dim 0.0 +911 46 model.scoring_fct_norm 2.0 +911 46 training.batch_size 2.0 +911 46 training.label_smoothing 0.04678201671290061 +911 47 model.embedding_dim 0.0 +911 47 model.relation_dim 0.0 +911 47 model.scoring_fct_norm 1.0 +911 47 training.batch_size 1.0 +911 47 training.label_smoothing 0.468501541378668 +911 48 model.embedding_dim 2.0 +911 48 model.relation_dim 0.0 +911 48 model.scoring_fct_norm 1.0 +911 48 training.batch_size 0.0 +911 48 training.label_smoothing 0.10719633173475633 +911 49 model.embedding_dim 2.0 +911 49 model.relation_dim 1.0 +911 49 model.scoring_fct_norm 2.0 +911 49 training.batch_size 2.0 +911 49 training.label_smoothing 0.02581033494780778 +911 50 model.embedding_dim 0.0 +911 50 model.relation_dim 2.0 +911 50 model.scoring_fct_norm 1.0 +911 50 training.batch_size 2.0 +911 50 training.label_smoothing 0.09600792588993767 +911 51 model.embedding_dim 0.0 +911 51 model.relation_dim 2.0 +911 51 model.scoring_fct_norm 2.0 +911 51 training.batch_size 2.0 +911 51 training.label_smoothing 0.0016615058757153897 +911 52 model.embedding_dim 2.0 +911 52 model.relation_dim 0.0 +911 52 model.scoring_fct_norm 2.0 +911 52 training.batch_size 0.0 +911 52 training.label_smoothing 0.0036704863026068507 +911 53 model.embedding_dim 1.0 +911 53 model.relation_dim 1.0 +911 53 model.scoring_fct_norm 2.0 +911 53 training.batch_size 0.0 +911 53 training.label_smoothing 0.6231527547522404 +911 54 model.embedding_dim 0.0 +911 54 model.relation_dim 0.0 +911 54 model.scoring_fct_norm 1.0 +911 54 training.batch_size 1.0 +911 54 training.label_smoothing 0.0014314584567301704 +911 55 model.embedding_dim 0.0 +911 55 model.relation_dim 0.0 +911 55 model.scoring_fct_norm 1.0 +911 55 training.batch_size 0.0 +911 55 training.label_smoothing 0.00844619806011296 +911 56 model.embedding_dim 0.0 +911 56 model.relation_dim 2.0 +911 56 model.scoring_fct_norm 2.0 +911 56 training.batch_size 0.0 +911 56 training.label_smoothing 0.8232355816904099 +911 57 model.embedding_dim 1.0 +911 57 model.relation_dim 1.0 +911 57 model.scoring_fct_norm 1.0 +911 57 training.batch_size 1.0 +911 57 training.label_smoothing 0.034551853702050384 +911 58 model.embedding_dim 2.0 +911 58 model.relation_dim 1.0 +911 58 model.scoring_fct_norm 2.0 +911 58 training.batch_size 2.0 +911 58 training.label_smoothing 0.06160753457694312 +911 59 model.embedding_dim 0.0 +911 59 model.relation_dim 2.0 +911 59 model.scoring_fct_norm 2.0 +911 59 training.batch_size 0.0 +911 59 training.label_smoothing 0.04180105389189708 +911 60 model.embedding_dim 2.0 +911 60 model.relation_dim 0.0 +911 60 model.scoring_fct_norm 2.0 +911 60 training.batch_size 0.0 +911 60 training.label_smoothing 0.0014317494099025517 +911 61 model.embedding_dim 1.0 +911 61 model.relation_dim 1.0 +911 61 model.scoring_fct_norm 1.0 +911 61 training.batch_size 0.0 +911 61 training.label_smoothing 0.014341084061883317 +911 62 model.embedding_dim 1.0 +911 62 model.relation_dim 1.0 +911 62 model.scoring_fct_norm 2.0 +911 62 training.batch_size 0.0 +911 62 training.label_smoothing 0.001237977757625124 +911 63 model.embedding_dim 2.0 +911 63 model.relation_dim 2.0 +911 63 model.scoring_fct_norm 2.0 +911 63 training.batch_size 2.0 +911 63 training.label_smoothing 0.1961329070571634 +911 64 model.embedding_dim 1.0 +911 64 model.relation_dim 2.0 +911 64 model.scoring_fct_norm 2.0 +911 64 training.batch_size 0.0 +911 64 training.label_smoothing 0.039429463340093715 +911 65 model.embedding_dim 0.0 +911 65 model.relation_dim 0.0 +911 65 model.scoring_fct_norm 1.0 +911 65 training.batch_size 2.0 +911 65 training.label_smoothing 0.018293412462040817 +911 66 model.embedding_dim 0.0 +911 66 model.relation_dim 1.0 +911 66 model.scoring_fct_norm 2.0 +911 66 training.batch_size 0.0 +911 66 training.label_smoothing 0.994676763359722 +911 67 model.embedding_dim 2.0 +911 67 model.relation_dim 0.0 +911 67 model.scoring_fct_norm 2.0 +911 67 training.batch_size 0.0 +911 67 training.label_smoothing 0.20700698425281575 +911 68 model.embedding_dim 1.0 +911 68 model.relation_dim 1.0 +911 68 model.scoring_fct_norm 1.0 +911 68 training.batch_size 0.0 +911 68 training.label_smoothing 0.0014680651868107504 +911 69 model.embedding_dim 0.0 +911 69 model.relation_dim 1.0 +911 69 model.scoring_fct_norm 2.0 +911 69 training.batch_size 2.0 +911 69 training.label_smoothing 0.04437008004123211 +911 70 model.embedding_dim 0.0 +911 70 model.relation_dim 2.0 +911 70 model.scoring_fct_norm 1.0 +911 70 training.batch_size 0.0 +911 70 training.label_smoothing 0.009673883552683822 +911 71 model.embedding_dim 2.0 +911 71 model.relation_dim 0.0 +911 71 model.scoring_fct_norm 1.0 +911 71 training.batch_size 2.0 +911 71 training.label_smoothing 0.0017499141993577152 +911 72 model.embedding_dim 2.0 +911 72 model.relation_dim 1.0 +911 72 model.scoring_fct_norm 1.0 +911 72 training.batch_size 0.0 +911 72 training.label_smoothing 0.001235400593717285 +911 73 model.embedding_dim 0.0 +911 73 model.relation_dim 2.0 +911 73 model.scoring_fct_norm 1.0 +911 73 training.batch_size 1.0 +911 73 training.label_smoothing 0.04483454734068868 +911 74 model.embedding_dim 0.0 +911 74 model.relation_dim 2.0 +911 74 model.scoring_fct_norm 2.0 +911 74 training.batch_size 0.0 +911 74 training.label_smoothing 0.7689034724466747 +911 75 model.embedding_dim 2.0 +911 75 model.relation_dim 1.0 +911 75 model.scoring_fct_norm 1.0 +911 75 training.batch_size 2.0 +911 75 training.label_smoothing 0.027671857542537755 +911 76 model.embedding_dim 0.0 +911 76 model.relation_dim 0.0 +911 76 model.scoring_fct_norm 2.0 +911 76 training.batch_size 1.0 +911 76 training.label_smoothing 0.5907910096724175 +911 77 model.embedding_dim 2.0 +911 77 model.relation_dim 2.0 +911 77 model.scoring_fct_norm 1.0 +911 77 training.batch_size 1.0 +911 77 training.label_smoothing 0.002147974531181716 +911 78 model.embedding_dim 1.0 +911 78 model.relation_dim 1.0 +911 78 model.scoring_fct_norm 1.0 +911 78 training.batch_size 0.0 +911 78 training.label_smoothing 0.7990903811327997 +911 79 model.embedding_dim 2.0 +911 79 model.relation_dim 1.0 +911 79 model.scoring_fct_norm 2.0 +911 79 training.batch_size 1.0 +911 79 training.label_smoothing 0.09039674526983969 +911 80 model.embedding_dim 0.0 +911 80 model.relation_dim 1.0 +911 80 model.scoring_fct_norm 2.0 +911 80 training.batch_size 1.0 +911 80 training.label_smoothing 0.02568332959106784 +911 81 model.embedding_dim 0.0 +911 81 model.relation_dim 0.0 +911 81 model.scoring_fct_norm 2.0 +911 81 training.batch_size 0.0 +911 81 training.label_smoothing 0.003390985096890736 +911 82 model.embedding_dim 1.0 +911 82 model.relation_dim 1.0 +911 82 model.scoring_fct_norm 2.0 +911 82 training.batch_size 1.0 +911 82 training.label_smoothing 0.0018753277218666563 +911 83 model.embedding_dim 1.0 +911 83 model.relation_dim 0.0 +911 83 model.scoring_fct_norm 1.0 +911 83 training.batch_size 1.0 +911 83 training.label_smoothing 0.005806137467979438 +911 84 model.embedding_dim 1.0 +911 84 model.relation_dim 2.0 +911 84 model.scoring_fct_norm 1.0 +911 84 training.batch_size 1.0 +911 84 training.label_smoothing 0.023489704037413572 +911 85 model.embedding_dim 1.0 +911 85 model.relation_dim 2.0 +911 85 model.scoring_fct_norm 1.0 +911 85 training.batch_size 2.0 +911 85 training.label_smoothing 0.016142496324286066 +911 86 model.embedding_dim 2.0 +911 86 model.relation_dim 2.0 +911 86 model.scoring_fct_norm 2.0 +911 86 training.batch_size 2.0 +911 86 training.label_smoothing 0.02336169269921125 +911 87 model.embedding_dim 2.0 +911 87 model.relation_dim 1.0 +911 87 model.scoring_fct_norm 1.0 +911 87 training.batch_size 2.0 +911 87 training.label_smoothing 0.015753460991469737 +911 88 model.embedding_dim 0.0 +911 88 model.relation_dim 2.0 +911 88 model.scoring_fct_norm 1.0 +911 88 training.batch_size 0.0 +911 88 training.label_smoothing 0.22273203831104704 +911 89 model.embedding_dim 2.0 +911 89 model.relation_dim 1.0 +911 89 model.scoring_fct_norm 2.0 +911 89 training.batch_size 1.0 +911 89 training.label_smoothing 0.9121563906004497 +911 90 model.embedding_dim 2.0 +911 90 model.relation_dim 2.0 +911 90 model.scoring_fct_norm 2.0 +911 90 training.batch_size 0.0 +911 90 training.label_smoothing 0.0746168028736142 +911 91 model.embedding_dim 2.0 +911 91 model.relation_dim 2.0 +911 91 model.scoring_fct_norm 1.0 +911 91 training.batch_size 1.0 +911 91 training.label_smoothing 0.0014082456023662608 +911 92 model.embedding_dim 1.0 +911 92 model.relation_dim 1.0 +911 92 model.scoring_fct_norm 1.0 +911 92 training.batch_size 0.0 +911 92 training.label_smoothing 0.8605908278128469 +911 93 model.embedding_dim 1.0 +911 93 model.relation_dim 1.0 +911 93 model.scoring_fct_norm 2.0 +911 93 training.batch_size 1.0 +911 93 training.label_smoothing 0.020714481151708503 +911 94 model.embedding_dim 2.0 +911 94 model.relation_dim 1.0 +911 94 model.scoring_fct_norm 2.0 +911 94 training.batch_size 1.0 +911 94 training.label_smoothing 0.2859083987670891 +911 95 model.embedding_dim 0.0 +911 95 model.relation_dim 0.0 +911 95 model.scoring_fct_norm 1.0 +911 95 training.batch_size 2.0 +911 95 training.label_smoothing 0.4712605104963389 +911 96 model.embedding_dim 1.0 +911 96 model.relation_dim 1.0 +911 96 model.scoring_fct_norm 2.0 +911 96 training.batch_size 1.0 +911 96 training.label_smoothing 0.3243721494927487 +911 97 model.embedding_dim 2.0 +911 97 model.relation_dim 2.0 +911 97 model.scoring_fct_norm 2.0 +911 97 training.batch_size 1.0 +911 97 training.label_smoothing 0.20165995586820035 +911 98 model.embedding_dim 0.0 +911 98 model.relation_dim 0.0 +911 98 model.scoring_fct_norm 2.0 +911 98 training.batch_size 2.0 +911 98 training.label_smoothing 0.01108475978735453 +911 99 model.embedding_dim 2.0 +911 99 model.relation_dim 2.0 +911 99 model.scoring_fct_norm 2.0 +911 99 training.batch_size 1.0 +911 99 training.label_smoothing 0.013238870907416064 +911 100 model.embedding_dim 2.0 +911 100 model.relation_dim 1.0 +911 100 model.scoring_fct_norm 2.0 +911 100 training.batch_size 1.0 +911 100 training.label_smoothing 0.09182492603484622 +911 1 dataset """kinships""" +911 1 model """transr""" +911 1 loss """crossentropy""" +911 1 regularizer """no""" +911 1 optimizer """adadelta""" +911 1 training_loop """lcwa""" +911 1 evaluator """rankbased""" +911 2 dataset """kinships""" +911 2 model """transr""" +911 2 loss """crossentropy""" +911 2 regularizer """no""" +911 2 optimizer """adadelta""" +911 2 training_loop """lcwa""" +911 2 evaluator """rankbased""" +911 3 dataset """kinships""" +911 3 model """transr""" +911 3 loss """crossentropy""" +911 3 regularizer """no""" +911 3 optimizer """adadelta""" +911 3 training_loop """lcwa""" +911 3 evaluator """rankbased""" +911 4 dataset """kinships""" +911 4 model """transr""" +911 4 loss """crossentropy""" +911 4 regularizer """no""" +911 4 optimizer """adadelta""" +911 4 training_loop """lcwa""" +911 4 evaluator """rankbased""" +911 5 dataset """kinships""" +911 5 model """transr""" +911 5 loss """crossentropy""" +911 5 regularizer """no""" +911 5 optimizer """adadelta""" +911 5 training_loop """lcwa""" +911 5 evaluator """rankbased""" +911 6 dataset """kinships""" +911 6 model """transr""" +911 6 loss """crossentropy""" +911 6 regularizer """no""" +911 6 optimizer """adadelta""" +911 6 training_loop """lcwa""" +911 6 evaluator """rankbased""" +911 7 dataset """kinships""" +911 7 model """transr""" +911 7 loss """crossentropy""" +911 7 regularizer """no""" +911 7 optimizer """adadelta""" +911 7 training_loop """lcwa""" +911 7 evaluator """rankbased""" +911 8 dataset """kinships""" +911 8 model """transr""" +911 8 loss """crossentropy""" +911 8 regularizer """no""" +911 8 optimizer """adadelta""" +911 8 training_loop """lcwa""" +911 8 evaluator """rankbased""" +911 9 dataset """kinships""" +911 9 model """transr""" +911 9 loss """crossentropy""" +911 9 regularizer """no""" +911 9 optimizer """adadelta""" +911 9 training_loop """lcwa""" +911 9 evaluator """rankbased""" +911 10 dataset """kinships""" +911 10 model """transr""" +911 10 loss """crossentropy""" +911 10 regularizer """no""" +911 10 optimizer """adadelta""" +911 10 training_loop """lcwa""" +911 10 evaluator """rankbased""" +911 11 dataset """kinships""" +911 11 model """transr""" +911 11 loss """crossentropy""" +911 11 regularizer """no""" +911 11 optimizer """adadelta""" +911 11 training_loop """lcwa""" +911 11 evaluator """rankbased""" +911 12 dataset """kinships""" +911 12 model """transr""" +911 12 loss """crossentropy""" +911 12 regularizer """no""" +911 12 optimizer """adadelta""" +911 12 training_loop """lcwa""" +911 12 evaluator """rankbased""" +911 13 dataset """kinships""" +911 13 model """transr""" +911 13 loss """crossentropy""" +911 13 regularizer """no""" +911 13 optimizer """adadelta""" +911 13 training_loop """lcwa""" +911 13 evaluator """rankbased""" +911 14 dataset """kinships""" +911 14 model """transr""" +911 14 loss """crossentropy""" +911 14 regularizer """no""" +911 14 optimizer """adadelta""" +911 14 training_loop """lcwa""" +911 14 evaluator """rankbased""" +911 15 dataset """kinships""" +911 15 model """transr""" +911 15 loss """crossentropy""" +911 15 regularizer """no""" +911 15 optimizer """adadelta""" +911 15 training_loop """lcwa""" +911 15 evaluator """rankbased""" +911 16 dataset """kinships""" +911 16 model """transr""" +911 16 loss """crossentropy""" +911 16 regularizer """no""" +911 16 optimizer """adadelta""" +911 16 training_loop """lcwa""" +911 16 evaluator """rankbased""" +911 17 dataset """kinships""" +911 17 model """transr""" +911 17 loss """crossentropy""" +911 17 regularizer """no""" +911 17 optimizer """adadelta""" +911 17 training_loop """lcwa""" +911 17 evaluator """rankbased""" +911 18 dataset """kinships""" +911 18 model """transr""" +911 18 loss """crossentropy""" +911 18 regularizer """no""" +911 18 optimizer """adadelta""" +911 18 training_loop """lcwa""" +911 18 evaluator """rankbased""" +911 19 dataset """kinships""" +911 19 model """transr""" +911 19 loss """crossentropy""" +911 19 regularizer """no""" +911 19 optimizer """adadelta""" +911 19 training_loop """lcwa""" +911 19 evaluator """rankbased""" +911 20 dataset """kinships""" +911 20 model """transr""" +911 20 loss """crossentropy""" +911 20 regularizer """no""" +911 20 optimizer """adadelta""" +911 20 training_loop """lcwa""" +911 20 evaluator """rankbased""" +911 21 dataset """kinships""" +911 21 model """transr""" +911 21 loss """crossentropy""" +911 21 regularizer """no""" +911 21 optimizer """adadelta""" +911 21 training_loop """lcwa""" +911 21 evaluator """rankbased""" +911 22 dataset """kinships""" +911 22 model """transr""" +911 22 loss """crossentropy""" +911 22 regularizer """no""" +911 22 optimizer """adadelta""" +911 22 training_loop """lcwa""" +911 22 evaluator """rankbased""" +911 23 dataset """kinships""" +911 23 model """transr""" +911 23 loss """crossentropy""" +911 23 regularizer """no""" +911 23 optimizer """adadelta""" +911 23 training_loop """lcwa""" +911 23 evaluator """rankbased""" +911 24 dataset """kinships""" +911 24 model """transr""" +911 24 loss """crossentropy""" +911 24 regularizer """no""" +911 24 optimizer """adadelta""" +911 24 training_loop """lcwa""" +911 24 evaluator """rankbased""" +911 25 dataset """kinships""" +911 25 model """transr""" +911 25 loss """crossentropy""" +911 25 regularizer """no""" +911 25 optimizer """adadelta""" +911 25 training_loop """lcwa""" +911 25 evaluator """rankbased""" +911 26 dataset """kinships""" +911 26 model """transr""" +911 26 loss """crossentropy""" +911 26 regularizer """no""" +911 26 optimizer """adadelta""" +911 26 training_loop """lcwa""" +911 26 evaluator """rankbased""" +911 27 dataset """kinships""" +911 27 model """transr""" +911 27 loss """crossentropy""" +911 27 regularizer """no""" +911 27 optimizer """adadelta""" +911 27 training_loop """lcwa""" +911 27 evaluator """rankbased""" +911 28 dataset """kinships""" +911 28 model """transr""" +911 28 loss """crossentropy""" +911 28 regularizer """no""" +911 28 optimizer """adadelta""" +911 28 training_loop """lcwa""" +911 28 evaluator """rankbased""" +911 29 dataset """kinships""" +911 29 model """transr""" +911 29 loss """crossentropy""" +911 29 regularizer """no""" +911 29 optimizer """adadelta""" +911 29 training_loop """lcwa""" +911 29 evaluator """rankbased""" +911 30 dataset """kinships""" +911 30 model """transr""" +911 30 loss """crossentropy""" +911 30 regularizer """no""" +911 30 optimizer """adadelta""" +911 30 training_loop """lcwa""" +911 30 evaluator """rankbased""" +911 31 dataset """kinships""" +911 31 model """transr""" +911 31 loss """crossentropy""" +911 31 regularizer """no""" +911 31 optimizer """adadelta""" +911 31 training_loop """lcwa""" +911 31 evaluator """rankbased""" +911 32 dataset """kinships""" +911 32 model """transr""" +911 32 loss """crossentropy""" +911 32 regularizer """no""" +911 32 optimizer """adadelta""" +911 32 training_loop """lcwa""" +911 32 evaluator """rankbased""" +911 33 dataset """kinships""" +911 33 model """transr""" +911 33 loss """crossentropy""" +911 33 regularizer """no""" +911 33 optimizer """adadelta""" +911 33 training_loop """lcwa""" +911 33 evaluator """rankbased""" +911 34 dataset """kinships""" +911 34 model """transr""" +911 34 loss """crossentropy""" +911 34 regularizer """no""" +911 34 optimizer """adadelta""" +911 34 training_loop """lcwa""" +911 34 evaluator """rankbased""" +911 35 dataset """kinships""" +911 35 model """transr""" +911 35 loss """crossentropy""" +911 35 regularizer """no""" +911 35 optimizer """adadelta""" +911 35 training_loop """lcwa""" +911 35 evaluator """rankbased""" +911 36 dataset """kinships""" +911 36 model """transr""" +911 36 loss """crossentropy""" +911 36 regularizer """no""" +911 36 optimizer """adadelta""" +911 36 training_loop """lcwa""" +911 36 evaluator """rankbased""" +911 37 dataset """kinships""" +911 37 model """transr""" +911 37 loss """crossentropy""" +911 37 regularizer """no""" +911 37 optimizer """adadelta""" +911 37 training_loop """lcwa""" +911 37 evaluator """rankbased""" +911 38 dataset """kinships""" +911 38 model """transr""" +911 38 loss """crossentropy""" +911 38 regularizer """no""" +911 38 optimizer """adadelta""" +911 38 training_loop """lcwa""" +911 38 evaluator """rankbased""" +911 39 dataset """kinships""" +911 39 model """transr""" +911 39 loss """crossentropy""" +911 39 regularizer """no""" +911 39 optimizer """adadelta""" +911 39 training_loop """lcwa""" +911 39 evaluator """rankbased""" +911 40 dataset """kinships""" +911 40 model """transr""" +911 40 loss """crossentropy""" +911 40 regularizer """no""" +911 40 optimizer """adadelta""" +911 40 training_loop """lcwa""" +911 40 evaluator """rankbased""" +911 41 dataset """kinships""" +911 41 model """transr""" +911 41 loss """crossentropy""" +911 41 regularizer """no""" +911 41 optimizer """adadelta""" +911 41 training_loop """lcwa""" +911 41 evaluator """rankbased""" +911 42 dataset """kinships""" +911 42 model """transr""" +911 42 loss """crossentropy""" +911 42 regularizer """no""" +911 42 optimizer """adadelta""" +911 42 training_loop """lcwa""" +911 42 evaluator """rankbased""" +911 43 dataset """kinships""" +911 43 model """transr""" +911 43 loss """crossentropy""" +911 43 regularizer """no""" +911 43 optimizer """adadelta""" +911 43 training_loop """lcwa""" +911 43 evaluator """rankbased""" +911 44 dataset """kinships""" +911 44 model """transr""" +911 44 loss """crossentropy""" +911 44 regularizer """no""" +911 44 optimizer """adadelta""" +911 44 training_loop """lcwa""" +911 44 evaluator """rankbased""" +911 45 dataset """kinships""" +911 45 model """transr""" +911 45 loss """crossentropy""" +911 45 regularizer """no""" +911 45 optimizer """adadelta""" +911 45 training_loop """lcwa""" +911 45 evaluator """rankbased""" +911 46 dataset """kinships""" +911 46 model """transr""" +911 46 loss """crossentropy""" +911 46 regularizer """no""" +911 46 optimizer """adadelta""" +911 46 training_loop """lcwa""" +911 46 evaluator """rankbased""" +911 47 dataset """kinships""" +911 47 model """transr""" +911 47 loss """crossentropy""" +911 47 regularizer """no""" +911 47 optimizer """adadelta""" +911 47 training_loop """lcwa""" +911 47 evaluator """rankbased""" +911 48 dataset """kinships""" +911 48 model """transr""" +911 48 loss """crossentropy""" +911 48 regularizer """no""" +911 48 optimizer """adadelta""" +911 48 training_loop """lcwa""" +911 48 evaluator """rankbased""" +911 49 dataset """kinships""" +911 49 model """transr""" +911 49 loss """crossentropy""" +911 49 regularizer """no""" +911 49 optimizer """adadelta""" +911 49 training_loop """lcwa""" +911 49 evaluator """rankbased""" +911 50 dataset """kinships""" +911 50 model """transr""" +911 50 loss """crossentropy""" +911 50 regularizer """no""" +911 50 optimizer """adadelta""" +911 50 training_loop """lcwa""" +911 50 evaluator """rankbased""" +911 51 dataset """kinships""" +911 51 model """transr""" +911 51 loss """crossentropy""" +911 51 regularizer """no""" +911 51 optimizer """adadelta""" +911 51 training_loop """lcwa""" +911 51 evaluator """rankbased""" +911 52 dataset """kinships""" +911 52 model """transr""" +911 52 loss """crossentropy""" +911 52 regularizer """no""" +911 52 optimizer """adadelta""" +911 52 training_loop """lcwa""" +911 52 evaluator """rankbased""" +911 53 dataset """kinships""" +911 53 model """transr""" +911 53 loss """crossentropy""" +911 53 regularizer """no""" +911 53 optimizer """adadelta""" +911 53 training_loop """lcwa""" +911 53 evaluator """rankbased""" +911 54 dataset """kinships""" +911 54 model """transr""" +911 54 loss """crossentropy""" +911 54 regularizer """no""" +911 54 optimizer """adadelta""" +911 54 training_loop """lcwa""" +911 54 evaluator """rankbased""" +911 55 dataset """kinships""" +911 55 model """transr""" +911 55 loss """crossentropy""" +911 55 regularizer """no""" +911 55 optimizer """adadelta""" +911 55 training_loop """lcwa""" +911 55 evaluator """rankbased""" +911 56 dataset """kinships""" +911 56 model """transr""" +911 56 loss """crossentropy""" +911 56 regularizer """no""" +911 56 optimizer """adadelta""" +911 56 training_loop """lcwa""" +911 56 evaluator """rankbased""" +911 57 dataset """kinships""" +911 57 model """transr""" +911 57 loss """crossentropy""" +911 57 regularizer """no""" +911 57 optimizer """adadelta""" +911 57 training_loop """lcwa""" +911 57 evaluator """rankbased""" +911 58 dataset """kinships""" +911 58 model """transr""" +911 58 loss """crossentropy""" +911 58 regularizer """no""" +911 58 optimizer """adadelta""" +911 58 training_loop """lcwa""" +911 58 evaluator """rankbased""" +911 59 dataset """kinships""" +911 59 model """transr""" +911 59 loss """crossentropy""" +911 59 regularizer """no""" +911 59 optimizer """adadelta""" +911 59 training_loop """lcwa""" +911 59 evaluator """rankbased""" +911 60 dataset """kinships""" +911 60 model """transr""" +911 60 loss """crossentropy""" +911 60 regularizer """no""" +911 60 optimizer """adadelta""" +911 60 training_loop """lcwa""" +911 60 evaluator """rankbased""" +911 61 dataset """kinships""" +911 61 model """transr""" +911 61 loss """crossentropy""" +911 61 regularizer """no""" +911 61 optimizer """adadelta""" +911 61 training_loop """lcwa""" +911 61 evaluator """rankbased""" +911 62 dataset """kinships""" +911 62 model """transr""" +911 62 loss """crossentropy""" +911 62 regularizer """no""" +911 62 optimizer """adadelta""" +911 62 training_loop """lcwa""" +911 62 evaluator """rankbased""" +911 63 dataset """kinships""" +911 63 model """transr""" +911 63 loss """crossentropy""" +911 63 regularizer """no""" +911 63 optimizer """adadelta""" +911 63 training_loop """lcwa""" +911 63 evaluator """rankbased""" +911 64 dataset """kinships""" +911 64 model """transr""" +911 64 loss """crossentropy""" +911 64 regularizer """no""" +911 64 optimizer """adadelta""" +911 64 training_loop """lcwa""" +911 64 evaluator """rankbased""" +911 65 dataset """kinships""" +911 65 model """transr""" +911 65 loss """crossentropy""" +911 65 regularizer """no""" +911 65 optimizer """adadelta""" +911 65 training_loop """lcwa""" +911 65 evaluator """rankbased""" +911 66 dataset """kinships""" +911 66 model """transr""" +911 66 loss """crossentropy""" +911 66 regularizer """no""" +911 66 optimizer """adadelta""" +911 66 training_loop """lcwa""" +911 66 evaluator """rankbased""" +911 67 dataset """kinships""" +911 67 model """transr""" +911 67 loss """crossentropy""" +911 67 regularizer """no""" +911 67 optimizer """adadelta""" +911 67 training_loop """lcwa""" +911 67 evaluator """rankbased""" +911 68 dataset """kinships""" +911 68 model """transr""" +911 68 loss """crossentropy""" +911 68 regularizer """no""" +911 68 optimizer """adadelta""" +911 68 training_loop """lcwa""" +911 68 evaluator """rankbased""" +911 69 dataset """kinships""" +911 69 model """transr""" +911 69 loss """crossentropy""" +911 69 regularizer """no""" +911 69 optimizer """adadelta""" +911 69 training_loop """lcwa""" +911 69 evaluator """rankbased""" +911 70 dataset """kinships""" +911 70 model """transr""" +911 70 loss """crossentropy""" +911 70 regularizer """no""" +911 70 optimizer """adadelta""" +911 70 training_loop """lcwa""" +911 70 evaluator """rankbased""" +911 71 dataset """kinships""" +911 71 model """transr""" +911 71 loss """crossentropy""" +911 71 regularizer """no""" +911 71 optimizer """adadelta""" +911 71 training_loop """lcwa""" +911 71 evaluator """rankbased""" +911 72 dataset """kinships""" +911 72 model """transr""" +911 72 loss """crossentropy""" +911 72 regularizer """no""" +911 72 optimizer """adadelta""" +911 72 training_loop """lcwa""" +911 72 evaluator """rankbased""" +911 73 dataset """kinships""" +911 73 model """transr""" +911 73 loss """crossentropy""" +911 73 regularizer """no""" +911 73 optimizer """adadelta""" +911 73 training_loop """lcwa""" +911 73 evaluator """rankbased""" +911 74 dataset """kinships""" +911 74 model """transr""" +911 74 loss """crossentropy""" +911 74 regularizer """no""" +911 74 optimizer """adadelta""" +911 74 training_loop """lcwa""" +911 74 evaluator """rankbased""" +911 75 dataset """kinships""" +911 75 model """transr""" +911 75 loss """crossentropy""" +911 75 regularizer """no""" +911 75 optimizer """adadelta""" +911 75 training_loop """lcwa""" +911 75 evaluator """rankbased""" +911 76 dataset """kinships""" +911 76 model """transr""" +911 76 loss """crossentropy""" +911 76 regularizer """no""" +911 76 optimizer """adadelta""" +911 76 training_loop """lcwa""" +911 76 evaluator """rankbased""" +911 77 dataset """kinships""" +911 77 model """transr""" +911 77 loss """crossentropy""" +911 77 regularizer """no""" +911 77 optimizer """adadelta""" +911 77 training_loop """lcwa""" +911 77 evaluator """rankbased""" +911 78 dataset """kinships""" +911 78 model """transr""" +911 78 loss """crossentropy""" +911 78 regularizer """no""" +911 78 optimizer """adadelta""" +911 78 training_loop """lcwa""" +911 78 evaluator """rankbased""" +911 79 dataset """kinships""" +911 79 model """transr""" +911 79 loss """crossentropy""" +911 79 regularizer """no""" +911 79 optimizer """adadelta""" +911 79 training_loop """lcwa""" +911 79 evaluator """rankbased""" +911 80 dataset """kinships""" +911 80 model """transr""" +911 80 loss """crossentropy""" +911 80 regularizer """no""" +911 80 optimizer """adadelta""" +911 80 training_loop """lcwa""" +911 80 evaluator """rankbased""" +911 81 dataset """kinships""" +911 81 model """transr""" +911 81 loss """crossentropy""" +911 81 regularizer """no""" +911 81 optimizer """adadelta""" +911 81 training_loop """lcwa""" +911 81 evaluator """rankbased""" +911 82 dataset """kinships""" +911 82 model """transr""" +911 82 loss """crossentropy""" +911 82 regularizer """no""" +911 82 optimizer """adadelta""" +911 82 training_loop """lcwa""" +911 82 evaluator """rankbased""" +911 83 dataset """kinships""" +911 83 model """transr""" +911 83 loss """crossentropy""" +911 83 regularizer """no""" +911 83 optimizer """adadelta""" +911 83 training_loop """lcwa""" +911 83 evaluator """rankbased""" +911 84 dataset """kinships""" +911 84 model """transr""" +911 84 loss """crossentropy""" +911 84 regularizer """no""" +911 84 optimizer """adadelta""" +911 84 training_loop """lcwa""" +911 84 evaluator """rankbased""" +911 85 dataset """kinships""" +911 85 model """transr""" +911 85 loss """crossentropy""" +911 85 regularizer """no""" +911 85 optimizer """adadelta""" +911 85 training_loop """lcwa""" +911 85 evaluator """rankbased""" +911 86 dataset """kinships""" +911 86 model """transr""" +911 86 loss """crossentropy""" +911 86 regularizer """no""" +911 86 optimizer """adadelta""" +911 86 training_loop """lcwa""" +911 86 evaluator """rankbased""" +911 87 dataset """kinships""" +911 87 model """transr""" +911 87 loss """crossentropy""" +911 87 regularizer """no""" +911 87 optimizer """adadelta""" +911 87 training_loop """lcwa""" +911 87 evaluator """rankbased""" +911 88 dataset """kinships""" +911 88 model """transr""" +911 88 loss """crossentropy""" +911 88 regularizer """no""" +911 88 optimizer """adadelta""" +911 88 training_loop """lcwa""" +911 88 evaluator """rankbased""" +911 89 dataset """kinships""" +911 89 model """transr""" +911 89 loss """crossentropy""" +911 89 regularizer """no""" +911 89 optimizer """adadelta""" +911 89 training_loop """lcwa""" +911 89 evaluator """rankbased""" +911 90 dataset """kinships""" +911 90 model """transr""" +911 90 loss """crossentropy""" +911 90 regularizer """no""" +911 90 optimizer """adadelta""" +911 90 training_loop """lcwa""" +911 90 evaluator """rankbased""" +911 91 dataset """kinships""" +911 91 model """transr""" +911 91 loss """crossentropy""" +911 91 regularizer """no""" +911 91 optimizer """adadelta""" +911 91 training_loop """lcwa""" +911 91 evaluator """rankbased""" +911 92 dataset """kinships""" +911 92 model """transr""" +911 92 loss """crossentropy""" +911 92 regularizer """no""" +911 92 optimizer """adadelta""" +911 92 training_loop """lcwa""" +911 92 evaluator """rankbased""" +911 93 dataset """kinships""" +911 93 model """transr""" +911 93 loss """crossentropy""" +911 93 regularizer """no""" +911 93 optimizer """adadelta""" +911 93 training_loop """lcwa""" +911 93 evaluator """rankbased""" +911 94 dataset """kinships""" +911 94 model """transr""" +911 94 loss """crossentropy""" +911 94 regularizer """no""" +911 94 optimizer """adadelta""" +911 94 training_loop """lcwa""" +911 94 evaluator """rankbased""" +911 95 dataset """kinships""" +911 95 model """transr""" +911 95 loss """crossentropy""" +911 95 regularizer """no""" +911 95 optimizer """adadelta""" +911 95 training_loop """lcwa""" +911 95 evaluator """rankbased""" +911 96 dataset """kinships""" +911 96 model """transr""" +911 96 loss """crossentropy""" +911 96 regularizer """no""" +911 96 optimizer """adadelta""" +911 96 training_loop """lcwa""" +911 96 evaluator """rankbased""" +911 97 dataset """kinships""" +911 97 model """transr""" +911 97 loss """crossentropy""" +911 97 regularizer """no""" +911 97 optimizer """adadelta""" +911 97 training_loop """lcwa""" +911 97 evaluator """rankbased""" +911 98 dataset """kinships""" +911 98 model """transr""" +911 98 loss """crossentropy""" +911 98 regularizer """no""" +911 98 optimizer """adadelta""" +911 98 training_loop """lcwa""" +911 98 evaluator """rankbased""" +911 99 dataset """kinships""" +911 99 model """transr""" +911 99 loss """crossentropy""" +911 99 regularizer """no""" +911 99 optimizer """adadelta""" +911 99 training_loop """lcwa""" +911 99 evaluator """rankbased""" +911 100 dataset """kinships""" +911 100 model """transr""" +911 100 loss """crossentropy""" +911 100 regularizer """no""" +911 100 optimizer """adadelta""" +911 100 training_loop """lcwa""" +911 100 evaluator """rankbased""" +912 1 model.embedding_dim 0.0 +912 1 model.relation_dim 1.0 +912 1 model.scoring_fct_norm 1.0 +912 1 training.batch_size 2.0 +912 1 training.label_smoothing 0.006141432238706335 +912 2 model.embedding_dim 1.0 +912 2 model.relation_dim 2.0 +912 2 model.scoring_fct_norm 2.0 +912 2 training.batch_size 1.0 +912 2 training.label_smoothing 0.02722854719448766 +912 3 model.embedding_dim 1.0 +912 3 model.relation_dim 0.0 +912 3 model.scoring_fct_norm 1.0 +912 3 training.batch_size 1.0 +912 3 training.label_smoothing 0.0044199009708625735 +912 4 model.embedding_dim 0.0 +912 4 model.relation_dim 1.0 +912 4 model.scoring_fct_norm 2.0 +912 4 training.batch_size 2.0 +912 4 training.label_smoothing 0.001797767346646438 +912 5 model.embedding_dim 1.0 +912 5 model.relation_dim 2.0 +912 5 model.scoring_fct_norm 2.0 +912 5 training.batch_size 1.0 +912 5 training.label_smoothing 0.004300408249657616 +912 6 model.embedding_dim 0.0 +912 6 model.relation_dim 2.0 +912 6 model.scoring_fct_norm 1.0 +912 6 training.batch_size 1.0 +912 6 training.label_smoothing 0.0025244566848656854 +912 7 model.embedding_dim 2.0 +912 7 model.relation_dim 0.0 +912 7 model.scoring_fct_norm 1.0 +912 7 training.batch_size 0.0 +912 7 training.label_smoothing 0.0010533513434966136 +912 8 model.embedding_dim 2.0 +912 8 model.relation_dim 0.0 +912 8 model.scoring_fct_norm 1.0 +912 8 training.batch_size 2.0 +912 8 training.label_smoothing 0.6572708941684726 +912 9 model.embedding_dim 1.0 +912 9 model.relation_dim 2.0 +912 9 model.scoring_fct_norm 2.0 +912 9 training.batch_size 2.0 +912 9 training.label_smoothing 0.04982896106705719 +912 10 model.embedding_dim 1.0 +912 10 model.relation_dim 1.0 +912 10 model.scoring_fct_norm 1.0 +912 10 training.batch_size 0.0 +912 10 training.label_smoothing 0.09431897545706178 +912 11 model.embedding_dim 0.0 +912 11 model.relation_dim 1.0 +912 11 model.scoring_fct_norm 1.0 +912 11 training.batch_size 1.0 +912 11 training.label_smoothing 0.003078872841214181 +912 12 model.embedding_dim 0.0 +912 12 model.relation_dim 2.0 +912 12 model.scoring_fct_norm 1.0 +912 12 training.batch_size 1.0 +912 12 training.label_smoothing 0.0018724426526616673 +912 13 model.embedding_dim 1.0 +912 13 model.relation_dim 0.0 +912 13 model.scoring_fct_norm 2.0 +912 13 training.batch_size 0.0 +912 13 training.label_smoothing 0.40368413181776 +912 14 model.embedding_dim 2.0 +912 14 model.relation_dim 1.0 +912 14 model.scoring_fct_norm 1.0 +912 14 training.batch_size 2.0 +912 14 training.label_smoothing 0.1827103181798502 +912 15 model.embedding_dim 2.0 +912 15 model.relation_dim 0.0 +912 15 model.scoring_fct_norm 2.0 +912 15 training.batch_size 0.0 +912 15 training.label_smoothing 0.7787172546308673 +912 16 model.embedding_dim 2.0 +912 16 model.relation_dim 2.0 +912 16 model.scoring_fct_norm 1.0 +912 16 training.batch_size 1.0 +912 16 training.label_smoothing 0.006998121035482351 +912 17 model.embedding_dim 2.0 +912 17 model.relation_dim 2.0 +912 17 model.scoring_fct_norm 1.0 +912 17 training.batch_size 1.0 +912 17 training.label_smoothing 0.002124893667806738 +912 18 model.embedding_dim 1.0 +912 18 model.relation_dim 0.0 +912 18 model.scoring_fct_norm 1.0 +912 18 training.batch_size 2.0 +912 18 training.label_smoothing 0.006789643787606447 +912 19 model.embedding_dim 2.0 +912 19 model.relation_dim 0.0 +912 19 model.scoring_fct_norm 1.0 +912 19 training.batch_size 1.0 +912 19 training.label_smoothing 0.007119466051368592 +912 20 model.embedding_dim 1.0 +912 20 model.relation_dim 0.0 +912 20 model.scoring_fct_norm 2.0 +912 20 training.batch_size 2.0 +912 20 training.label_smoothing 0.15517448023232772 +912 21 model.embedding_dim 0.0 +912 21 model.relation_dim 0.0 +912 21 model.scoring_fct_norm 1.0 +912 21 training.batch_size 2.0 +912 21 training.label_smoothing 0.07153455131063674 +912 22 model.embedding_dim 0.0 +912 22 model.relation_dim 0.0 +912 22 model.scoring_fct_norm 2.0 +912 22 training.batch_size 0.0 +912 22 training.label_smoothing 0.06481130565442236 +912 23 model.embedding_dim 0.0 +912 23 model.relation_dim 2.0 +912 23 model.scoring_fct_norm 1.0 +912 23 training.batch_size 0.0 +912 23 training.label_smoothing 0.15423099944503832 +912 24 model.embedding_dim 0.0 +912 24 model.relation_dim 2.0 +912 24 model.scoring_fct_norm 2.0 +912 24 training.batch_size 0.0 +912 24 training.label_smoothing 0.19883025833605877 +912 25 model.embedding_dim 1.0 +912 25 model.relation_dim 2.0 +912 25 model.scoring_fct_norm 2.0 +912 25 training.batch_size 2.0 +912 25 training.label_smoothing 0.6677563042498358 +912 26 model.embedding_dim 1.0 +912 26 model.relation_dim 1.0 +912 26 model.scoring_fct_norm 2.0 +912 26 training.batch_size 2.0 +912 26 training.label_smoothing 0.2892651983638765 +912 27 model.embedding_dim 0.0 +912 27 model.relation_dim 1.0 +912 27 model.scoring_fct_norm 2.0 +912 27 training.batch_size 2.0 +912 27 training.label_smoothing 0.7205592394735347 +912 28 model.embedding_dim 2.0 +912 28 model.relation_dim 0.0 +912 28 model.scoring_fct_norm 2.0 +912 28 training.batch_size 1.0 +912 28 training.label_smoothing 0.043509325590374154 +912 29 model.embedding_dim 0.0 +912 29 model.relation_dim 1.0 +912 29 model.scoring_fct_norm 2.0 +912 29 training.batch_size 0.0 +912 29 training.label_smoothing 0.003185517822512361 +912 30 model.embedding_dim 0.0 +912 30 model.relation_dim 0.0 +912 30 model.scoring_fct_norm 1.0 +912 30 training.batch_size 0.0 +912 30 training.label_smoothing 0.40035079278124375 +912 31 model.embedding_dim 2.0 +912 31 model.relation_dim 0.0 +912 31 model.scoring_fct_norm 1.0 +912 31 training.batch_size 0.0 +912 31 training.label_smoothing 0.6189491231167087 +912 32 model.embedding_dim 2.0 +912 32 model.relation_dim 2.0 +912 32 model.scoring_fct_norm 2.0 +912 32 training.batch_size 2.0 +912 32 training.label_smoothing 0.015383131930425089 +912 33 model.embedding_dim 0.0 +912 33 model.relation_dim 0.0 +912 33 model.scoring_fct_norm 1.0 +912 33 training.batch_size 2.0 +912 33 training.label_smoothing 0.0048568922272317645 +912 34 model.embedding_dim 2.0 +912 34 model.relation_dim 1.0 +912 34 model.scoring_fct_norm 1.0 +912 34 training.batch_size 0.0 +912 34 training.label_smoothing 0.012360270255218296 +912 35 model.embedding_dim 2.0 +912 35 model.relation_dim 1.0 +912 35 model.scoring_fct_norm 1.0 +912 35 training.batch_size 2.0 +912 35 training.label_smoothing 0.0016293134244172369 +912 36 model.embedding_dim 0.0 +912 36 model.relation_dim 0.0 +912 36 model.scoring_fct_norm 1.0 +912 36 training.batch_size 0.0 +912 36 training.label_smoothing 0.010231219388888577 +912 37 model.embedding_dim 0.0 +912 37 model.relation_dim 2.0 +912 37 model.scoring_fct_norm 2.0 +912 37 training.batch_size 1.0 +912 37 training.label_smoothing 0.00555188736102365 +912 38 model.embedding_dim 2.0 +912 38 model.relation_dim 2.0 +912 38 model.scoring_fct_norm 2.0 +912 38 training.batch_size 1.0 +912 38 training.label_smoothing 0.0021034460901696453 +912 39 model.embedding_dim 2.0 +912 39 model.relation_dim 2.0 +912 39 model.scoring_fct_norm 2.0 +912 39 training.batch_size 2.0 +912 39 training.label_smoothing 0.3497551408469266 +912 40 model.embedding_dim 1.0 +912 40 model.relation_dim 1.0 +912 40 model.scoring_fct_norm 1.0 +912 40 training.batch_size 2.0 +912 40 training.label_smoothing 0.05775884529618554 +912 41 model.embedding_dim 2.0 +912 41 model.relation_dim 1.0 +912 41 model.scoring_fct_norm 2.0 +912 41 training.batch_size 1.0 +912 41 training.label_smoothing 0.9849120932073876 +912 42 model.embedding_dim 0.0 +912 42 model.relation_dim 2.0 +912 42 model.scoring_fct_norm 1.0 +912 42 training.batch_size 1.0 +912 42 training.label_smoothing 0.07998478541635234 +912 43 model.embedding_dim 0.0 +912 43 model.relation_dim 1.0 +912 43 model.scoring_fct_norm 1.0 +912 43 training.batch_size 1.0 +912 43 training.label_smoothing 0.07549242059009892 +912 44 model.embedding_dim 0.0 +912 44 model.relation_dim 1.0 +912 44 model.scoring_fct_norm 2.0 +912 44 training.batch_size 1.0 +912 44 training.label_smoothing 0.1365887011311332 +912 45 model.embedding_dim 0.0 +912 45 model.relation_dim 2.0 +912 45 model.scoring_fct_norm 2.0 +912 45 training.batch_size 0.0 +912 45 training.label_smoothing 0.8972329529790475 +912 46 model.embedding_dim 0.0 +912 46 model.relation_dim 2.0 +912 46 model.scoring_fct_norm 2.0 +912 46 training.batch_size 2.0 +912 46 training.label_smoothing 0.022825065497947265 +912 47 model.embedding_dim 0.0 +912 47 model.relation_dim 2.0 +912 47 model.scoring_fct_norm 2.0 +912 47 training.batch_size 2.0 +912 47 training.label_smoothing 0.0018016731747472993 +912 48 model.embedding_dim 1.0 +912 48 model.relation_dim 1.0 +912 48 model.scoring_fct_norm 2.0 +912 48 training.batch_size 2.0 +912 48 training.label_smoothing 0.0025516377489325694 +912 49 model.embedding_dim 0.0 +912 49 model.relation_dim 2.0 +912 49 model.scoring_fct_norm 2.0 +912 49 training.batch_size 2.0 +912 49 training.label_smoothing 0.6585692618665405 +912 50 model.embedding_dim 2.0 +912 50 model.relation_dim 2.0 +912 50 model.scoring_fct_norm 2.0 +912 50 training.batch_size 2.0 +912 50 training.label_smoothing 0.031688675129725875 +912 51 model.embedding_dim 0.0 +912 51 model.relation_dim 0.0 +912 51 model.scoring_fct_norm 1.0 +912 51 training.batch_size 0.0 +912 51 training.label_smoothing 0.052688610228301014 +912 52 model.embedding_dim 0.0 +912 52 model.relation_dim 1.0 +912 52 model.scoring_fct_norm 1.0 +912 52 training.batch_size 2.0 +912 52 training.label_smoothing 0.22376326900085036 +912 53 model.embedding_dim 2.0 +912 53 model.relation_dim 2.0 +912 53 model.scoring_fct_norm 1.0 +912 53 training.batch_size 0.0 +912 53 training.label_smoothing 0.7994077821581984 +912 54 model.embedding_dim 0.0 +912 54 model.relation_dim 0.0 +912 54 model.scoring_fct_norm 1.0 +912 54 training.batch_size 0.0 +912 54 training.label_smoothing 0.025139524030673488 +912 55 model.embedding_dim 2.0 +912 55 model.relation_dim 2.0 +912 55 model.scoring_fct_norm 2.0 +912 55 training.batch_size 0.0 +912 55 training.label_smoothing 0.12452718951251922 +912 56 model.embedding_dim 2.0 +912 56 model.relation_dim 2.0 +912 56 model.scoring_fct_norm 2.0 +912 56 training.batch_size 0.0 +912 56 training.label_smoothing 0.48263616097502704 +912 57 model.embedding_dim 2.0 +912 57 model.relation_dim 0.0 +912 57 model.scoring_fct_norm 1.0 +912 57 training.batch_size 0.0 +912 57 training.label_smoothing 0.0023838706505670375 +912 58 model.embedding_dim 1.0 +912 58 model.relation_dim 1.0 +912 58 model.scoring_fct_norm 1.0 +912 58 training.batch_size 1.0 +912 58 training.label_smoothing 0.07526498102936557 +912 59 model.embedding_dim 1.0 +912 59 model.relation_dim 0.0 +912 59 model.scoring_fct_norm 2.0 +912 59 training.batch_size 1.0 +912 59 training.label_smoothing 0.11014228090020717 +912 60 model.embedding_dim 0.0 +912 60 model.relation_dim 2.0 +912 60 model.scoring_fct_norm 1.0 +912 60 training.batch_size 0.0 +912 60 training.label_smoothing 0.017972353696509076 +912 61 model.embedding_dim 2.0 +912 61 model.relation_dim 1.0 +912 61 model.scoring_fct_norm 1.0 +912 61 training.batch_size 0.0 +912 61 training.label_smoothing 0.27227436122384346 +912 62 model.embedding_dim 0.0 +912 62 model.relation_dim 2.0 +912 62 model.scoring_fct_norm 2.0 +912 62 training.batch_size 0.0 +912 62 training.label_smoothing 0.05249313039753293 +912 63 model.embedding_dim 2.0 +912 63 model.relation_dim 2.0 +912 63 model.scoring_fct_norm 2.0 +912 63 training.batch_size 2.0 +912 63 training.label_smoothing 0.8159638318601179 +912 64 model.embedding_dim 1.0 +912 64 model.relation_dim 0.0 +912 64 model.scoring_fct_norm 2.0 +912 64 training.batch_size 1.0 +912 64 training.label_smoothing 0.28682592891418723 +912 65 model.embedding_dim 2.0 +912 65 model.relation_dim 2.0 +912 65 model.scoring_fct_norm 2.0 +912 65 training.batch_size 2.0 +912 65 training.label_smoothing 0.007904058068920168 +912 66 model.embedding_dim 2.0 +912 66 model.relation_dim 0.0 +912 66 model.scoring_fct_norm 1.0 +912 66 training.batch_size 0.0 +912 66 training.label_smoothing 0.004454162543676131 +912 67 model.embedding_dim 1.0 +912 67 model.relation_dim 0.0 +912 67 model.scoring_fct_norm 1.0 +912 67 training.batch_size 1.0 +912 67 training.label_smoothing 0.009873374520055418 +912 68 model.embedding_dim 2.0 +912 68 model.relation_dim 1.0 +912 68 model.scoring_fct_norm 1.0 +912 68 training.batch_size 1.0 +912 68 training.label_smoothing 0.009495135551353248 +912 69 model.embedding_dim 1.0 +912 69 model.relation_dim 2.0 +912 69 model.scoring_fct_norm 1.0 +912 69 training.batch_size 1.0 +912 69 training.label_smoothing 0.0010177021770258734 +912 70 model.embedding_dim 0.0 +912 70 model.relation_dim 1.0 +912 70 model.scoring_fct_norm 2.0 +912 70 training.batch_size 2.0 +912 70 training.label_smoothing 0.13235929076379258 +912 71 model.embedding_dim 0.0 +912 71 model.relation_dim 1.0 +912 71 model.scoring_fct_norm 1.0 +912 71 training.batch_size 2.0 +912 71 training.label_smoothing 0.023775256576150636 +912 72 model.embedding_dim 2.0 +912 72 model.relation_dim 1.0 +912 72 model.scoring_fct_norm 1.0 +912 72 training.batch_size 0.0 +912 72 training.label_smoothing 0.09620139968080423 +912 73 model.embedding_dim 0.0 +912 73 model.relation_dim 2.0 +912 73 model.scoring_fct_norm 1.0 +912 73 training.batch_size 0.0 +912 73 training.label_smoothing 0.9102557795218722 +912 74 model.embedding_dim 1.0 +912 74 model.relation_dim 1.0 +912 74 model.scoring_fct_norm 1.0 +912 74 training.batch_size 2.0 +912 74 training.label_smoothing 0.00957878473871704 +912 75 model.embedding_dim 1.0 +912 75 model.relation_dim 2.0 +912 75 model.scoring_fct_norm 2.0 +912 75 training.batch_size 0.0 +912 75 training.label_smoothing 0.17360057021647476 +912 76 model.embedding_dim 1.0 +912 76 model.relation_dim 0.0 +912 76 model.scoring_fct_norm 1.0 +912 76 training.batch_size 0.0 +912 76 training.label_smoothing 0.41444766476973804 +912 77 model.embedding_dim 1.0 +912 77 model.relation_dim 1.0 +912 77 model.scoring_fct_norm 2.0 +912 77 training.batch_size 0.0 +912 77 training.label_smoothing 0.4718903763529738 +912 78 model.embedding_dim 0.0 +912 78 model.relation_dim 0.0 +912 78 model.scoring_fct_norm 2.0 +912 78 training.batch_size 2.0 +912 78 training.label_smoothing 0.01582785079395702 +912 79 model.embedding_dim 1.0 +912 79 model.relation_dim 2.0 +912 79 model.scoring_fct_norm 2.0 +912 79 training.batch_size 1.0 +912 79 training.label_smoothing 0.0948011792268503 +912 80 model.embedding_dim 0.0 +912 80 model.relation_dim 1.0 +912 80 model.scoring_fct_norm 1.0 +912 80 training.batch_size 2.0 +912 80 training.label_smoothing 0.012714224177129198 +912 81 model.embedding_dim 1.0 +912 81 model.relation_dim 0.0 +912 81 model.scoring_fct_norm 1.0 +912 81 training.batch_size 1.0 +912 81 training.label_smoothing 0.0030500818048650523 +912 82 model.embedding_dim 0.0 +912 82 model.relation_dim 2.0 +912 82 model.scoring_fct_norm 2.0 +912 82 training.batch_size 2.0 +912 82 training.label_smoothing 0.002386526860783542 +912 83 model.embedding_dim 0.0 +912 83 model.relation_dim 2.0 +912 83 model.scoring_fct_norm 1.0 +912 83 training.batch_size 0.0 +912 83 training.label_smoothing 0.0017240591323706637 +912 84 model.embedding_dim 2.0 +912 84 model.relation_dim 0.0 +912 84 model.scoring_fct_norm 2.0 +912 84 training.batch_size 0.0 +912 84 training.label_smoothing 0.01733930501176767 +912 85 model.embedding_dim 2.0 +912 85 model.relation_dim 1.0 +912 85 model.scoring_fct_norm 1.0 +912 85 training.batch_size 0.0 +912 85 training.label_smoothing 0.18390012539922143 +912 86 model.embedding_dim 2.0 +912 86 model.relation_dim 0.0 +912 86 model.scoring_fct_norm 2.0 +912 86 training.batch_size 0.0 +912 86 training.label_smoothing 0.3081143752668203 +912 87 model.embedding_dim 1.0 +912 87 model.relation_dim 1.0 +912 87 model.scoring_fct_norm 2.0 +912 87 training.batch_size 1.0 +912 87 training.label_smoothing 0.007800416751327613 +912 88 model.embedding_dim 0.0 +912 88 model.relation_dim 0.0 +912 88 model.scoring_fct_norm 2.0 +912 88 training.batch_size 2.0 +912 88 training.label_smoothing 0.02274162852147789 +912 89 model.embedding_dim 1.0 +912 89 model.relation_dim 0.0 +912 89 model.scoring_fct_norm 2.0 +912 89 training.batch_size 0.0 +912 89 training.label_smoothing 0.0319307768305534 +912 90 model.embedding_dim 1.0 +912 90 model.relation_dim 1.0 +912 90 model.scoring_fct_norm 1.0 +912 90 training.batch_size 2.0 +912 90 training.label_smoothing 0.06446577849585465 +912 91 model.embedding_dim 1.0 +912 91 model.relation_dim 1.0 +912 91 model.scoring_fct_norm 2.0 +912 91 training.batch_size 0.0 +912 91 training.label_smoothing 0.017957800863603258 +912 92 model.embedding_dim 1.0 +912 92 model.relation_dim 0.0 +912 92 model.scoring_fct_norm 2.0 +912 92 training.batch_size 2.0 +912 92 training.label_smoothing 0.0504527804130686 +912 93 model.embedding_dim 1.0 +912 93 model.relation_dim 2.0 +912 93 model.scoring_fct_norm 2.0 +912 93 training.batch_size 0.0 +912 93 training.label_smoothing 0.8843665593496964 +912 94 model.embedding_dim 0.0 +912 94 model.relation_dim 2.0 +912 94 model.scoring_fct_norm 2.0 +912 94 training.batch_size 0.0 +912 94 training.label_smoothing 0.0013228961981384471 +912 95 model.embedding_dim 0.0 +912 95 model.relation_dim 0.0 +912 95 model.scoring_fct_norm 2.0 +912 95 training.batch_size 0.0 +912 95 training.label_smoothing 0.7059528797573851 +912 96 model.embedding_dim 0.0 +912 96 model.relation_dim 1.0 +912 96 model.scoring_fct_norm 2.0 +912 96 training.batch_size 2.0 +912 96 training.label_smoothing 0.019594398683371803 +912 97 model.embedding_dim 0.0 +912 97 model.relation_dim 1.0 +912 97 model.scoring_fct_norm 1.0 +912 97 training.batch_size 1.0 +912 97 training.label_smoothing 0.8712574707571279 +912 98 model.embedding_dim 2.0 +912 98 model.relation_dim 1.0 +912 98 model.scoring_fct_norm 2.0 +912 98 training.batch_size 2.0 +912 98 training.label_smoothing 0.012797167923535418 +912 99 model.embedding_dim 2.0 +912 99 model.relation_dim 2.0 +912 99 model.scoring_fct_norm 1.0 +912 99 training.batch_size 0.0 +912 99 training.label_smoothing 0.03931575575102576 +912 100 model.embedding_dim 1.0 +912 100 model.relation_dim 2.0 +912 100 model.scoring_fct_norm 2.0 +912 100 training.batch_size 1.0 +912 100 training.label_smoothing 0.02044544194892911 +912 1 dataset """kinships""" +912 1 model """transr""" +912 1 loss """crossentropy""" +912 1 regularizer """no""" +912 1 optimizer """adadelta""" +912 1 training_loop """lcwa""" +912 1 evaluator """rankbased""" +912 2 dataset """kinships""" +912 2 model """transr""" +912 2 loss """crossentropy""" +912 2 regularizer """no""" +912 2 optimizer """adadelta""" +912 2 training_loop """lcwa""" +912 2 evaluator """rankbased""" +912 3 dataset """kinships""" +912 3 model """transr""" +912 3 loss """crossentropy""" +912 3 regularizer """no""" +912 3 optimizer """adadelta""" +912 3 training_loop """lcwa""" +912 3 evaluator """rankbased""" +912 4 dataset """kinships""" +912 4 model """transr""" +912 4 loss """crossentropy""" +912 4 regularizer """no""" +912 4 optimizer """adadelta""" +912 4 training_loop """lcwa""" +912 4 evaluator """rankbased""" +912 5 dataset """kinships""" +912 5 model """transr""" +912 5 loss """crossentropy""" +912 5 regularizer """no""" +912 5 optimizer """adadelta""" +912 5 training_loop """lcwa""" +912 5 evaluator """rankbased""" +912 6 dataset """kinships""" +912 6 model """transr""" +912 6 loss """crossentropy""" +912 6 regularizer """no""" +912 6 optimizer """adadelta""" +912 6 training_loop """lcwa""" +912 6 evaluator """rankbased""" +912 7 dataset """kinships""" +912 7 model """transr""" +912 7 loss """crossentropy""" +912 7 regularizer """no""" +912 7 optimizer """adadelta""" +912 7 training_loop """lcwa""" +912 7 evaluator """rankbased""" +912 8 dataset """kinships""" +912 8 model """transr""" +912 8 loss """crossentropy""" +912 8 regularizer """no""" +912 8 optimizer """adadelta""" +912 8 training_loop """lcwa""" +912 8 evaluator """rankbased""" +912 9 dataset """kinships""" +912 9 model """transr""" +912 9 loss """crossentropy""" +912 9 regularizer """no""" +912 9 optimizer """adadelta""" +912 9 training_loop """lcwa""" +912 9 evaluator """rankbased""" +912 10 dataset """kinships""" +912 10 model """transr""" +912 10 loss """crossentropy""" +912 10 regularizer """no""" +912 10 optimizer """adadelta""" +912 10 training_loop """lcwa""" +912 10 evaluator """rankbased""" +912 11 dataset """kinships""" +912 11 model """transr""" +912 11 loss """crossentropy""" +912 11 regularizer """no""" +912 11 optimizer """adadelta""" +912 11 training_loop """lcwa""" +912 11 evaluator """rankbased""" +912 12 dataset """kinships""" +912 12 model """transr""" +912 12 loss """crossentropy""" +912 12 regularizer """no""" +912 12 optimizer """adadelta""" +912 12 training_loop """lcwa""" +912 12 evaluator """rankbased""" +912 13 dataset """kinships""" +912 13 model """transr""" +912 13 loss """crossentropy""" +912 13 regularizer """no""" +912 13 optimizer """adadelta""" +912 13 training_loop """lcwa""" +912 13 evaluator """rankbased""" +912 14 dataset """kinships""" +912 14 model """transr""" +912 14 loss """crossentropy""" +912 14 regularizer """no""" +912 14 optimizer """adadelta""" +912 14 training_loop """lcwa""" +912 14 evaluator """rankbased""" +912 15 dataset """kinships""" +912 15 model """transr""" +912 15 loss """crossentropy""" +912 15 regularizer """no""" +912 15 optimizer """adadelta""" +912 15 training_loop """lcwa""" +912 15 evaluator """rankbased""" +912 16 dataset """kinships""" +912 16 model """transr""" +912 16 loss """crossentropy""" +912 16 regularizer """no""" +912 16 optimizer """adadelta""" +912 16 training_loop """lcwa""" +912 16 evaluator """rankbased""" +912 17 dataset """kinships""" +912 17 model """transr""" +912 17 loss """crossentropy""" +912 17 regularizer """no""" +912 17 optimizer """adadelta""" +912 17 training_loop """lcwa""" +912 17 evaluator """rankbased""" +912 18 dataset """kinships""" +912 18 model """transr""" +912 18 loss """crossentropy""" +912 18 regularizer """no""" +912 18 optimizer """adadelta""" +912 18 training_loop """lcwa""" +912 18 evaluator """rankbased""" +912 19 dataset """kinships""" +912 19 model """transr""" +912 19 loss """crossentropy""" +912 19 regularizer """no""" +912 19 optimizer """adadelta""" +912 19 training_loop """lcwa""" +912 19 evaluator """rankbased""" +912 20 dataset """kinships""" +912 20 model """transr""" +912 20 loss """crossentropy""" +912 20 regularizer """no""" +912 20 optimizer """adadelta""" +912 20 training_loop """lcwa""" +912 20 evaluator """rankbased""" +912 21 dataset """kinships""" +912 21 model """transr""" +912 21 loss """crossentropy""" +912 21 regularizer """no""" +912 21 optimizer """adadelta""" +912 21 training_loop """lcwa""" +912 21 evaluator """rankbased""" +912 22 dataset """kinships""" +912 22 model """transr""" +912 22 loss """crossentropy""" +912 22 regularizer """no""" +912 22 optimizer """adadelta""" +912 22 training_loop """lcwa""" +912 22 evaluator """rankbased""" +912 23 dataset """kinships""" +912 23 model """transr""" +912 23 loss """crossentropy""" +912 23 regularizer """no""" +912 23 optimizer """adadelta""" +912 23 training_loop """lcwa""" +912 23 evaluator """rankbased""" +912 24 dataset """kinships""" +912 24 model """transr""" +912 24 loss """crossentropy""" +912 24 regularizer """no""" +912 24 optimizer """adadelta""" +912 24 training_loop """lcwa""" +912 24 evaluator """rankbased""" +912 25 dataset """kinships""" +912 25 model """transr""" +912 25 loss """crossentropy""" +912 25 regularizer """no""" +912 25 optimizer """adadelta""" +912 25 training_loop """lcwa""" +912 25 evaluator """rankbased""" +912 26 dataset """kinships""" +912 26 model """transr""" +912 26 loss """crossentropy""" +912 26 regularizer """no""" +912 26 optimizer """adadelta""" +912 26 training_loop """lcwa""" +912 26 evaluator """rankbased""" +912 27 dataset """kinships""" +912 27 model """transr""" +912 27 loss """crossentropy""" +912 27 regularizer """no""" +912 27 optimizer """adadelta""" +912 27 training_loop """lcwa""" +912 27 evaluator """rankbased""" +912 28 dataset """kinships""" +912 28 model """transr""" +912 28 loss """crossentropy""" +912 28 regularizer """no""" +912 28 optimizer """adadelta""" +912 28 training_loop """lcwa""" +912 28 evaluator """rankbased""" +912 29 dataset """kinships""" +912 29 model """transr""" +912 29 loss """crossentropy""" +912 29 regularizer """no""" +912 29 optimizer """adadelta""" +912 29 training_loop """lcwa""" +912 29 evaluator """rankbased""" +912 30 dataset """kinships""" +912 30 model """transr""" +912 30 loss """crossentropy""" +912 30 regularizer """no""" +912 30 optimizer """adadelta""" +912 30 training_loop """lcwa""" +912 30 evaluator """rankbased""" +912 31 dataset """kinships""" +912 31 model """transr""" +912 31 loss """crossentropy""" +912 31 regularizer """no""" +912 31 optimizer """adadelta""" +912 31 training_loop """lcwa""" +912 31 evaluator """rankbased""" +912 32 dataset """kinships""" +912 32 model """transr""" +912 32 loss """crossentropy""" +912 32 regularizer """no""" +912 32 optimizer """adadelta""" +912 32 training_loop """lcwa""" +912 32 evaluator """rankbased""" +912 33 dataset """kinships""" +912 33 model """transr""" +912 33 loss """crossentropy""" +912 33 regularizer """no""" +912 33 optimizer """adadelta""" +912 33 training_loop """lcwa""" +912 33 evaluator """rankbased""" +912 34 dataset """kinships""" +912 34 model """transr""" +912 34 loss """crossentropy""" +912 34 regularizer """no""" +912 34 optimizer """adadelta""" +912 34 training_loop """lcwa""" +912 34 evaluator """rankbased""" +912 35 dataset """kinships""" +912 35 model """transr""" +912 35 loss """crossentropy""" +912 35 regularizer """no""" +912 35 optimizer """adadelta""" +912 35 training_loop """lcwa""" +912 35 evaluator """rankbased""" +912 36 dataset """kinships""" +912 36 model """transr""" +912 36 loss """crossentropy""" +912 36 regularizer """no""" +912 36 optimizer """adadelta""" +912 36 training_loop """lcwa""" +912 36 evaluator """rankbased""" +912 37 dataset """kinships""" +912 37 model """transr""" +912 37 loss """crossentropy""" +912 37 regularizer """no""" +912 37 optimizer """adadelta""" +912 37 training_loop """lcwa""" +912 37 evaluator """rankbased""" +912 38 dataset """kinships""" +912 38 model """transr""" +912 38 loss """crossentropy""" +912 38 regularizer """no""" +912 38 optimizer """adadelta""" +912 38 training_loop """lcwa""" +912 38 evaluator """rankbased""" +912 39 dataset """kinships""" +912 39 model """transr""" +912 39 loss """crossentropy""" +912 39 regularizer """no""" +912 39 optimizer """adadelta""" +912 39 training_loop """lcwa""" +912 39 evaluator """rankbased""" +912 40 dataset """kinships""" +912 40 model """transr""" +912 40 loss """crossentropy""" +912 40 regularizer """no""" +912 40 optimizer """adadelta""" +912 40 training_loop """lcwa""" +912 40 evaluator """rankbased""" +912 41 dataset """kinships""" +912 41 model """transr""" +912 41 loss """crossentropy""" +912 41 regularizer """no""" +912 41 optimizer """adadelta""" +912 41 training_loop """lcwa""" +912 41 evaluator """rankbased""" +912 42 dataset """kinships""" +912 42 model """transr""" +912 42 loss """crossentropy""" +912 42 regularizer """no""" +912 42 optimizer """adadelta""" +912 42 training_loop """lcwa""" +912 42 evaluator """rankbased""" +912 43 dataset """kinships""" +912 43 model """transr""" +912 43 loss """crossentropy""" +912 43 regularizer """no""" +912 43 optimizer """adadelta""" +912 43 training_loop """lcwa""" +912 43 evaluator """rankbased""" +912 44 dataset """kinships""" +912 44 model """transr""" +912 44 loss """crossentropy""" +912 44 regularizer """no""" +912 44 optimizer """adadelta""" +912 44 training_loop """lcwa""" +912 44 evaluator """rankbased""" +912 45 dataset """kinships""" +912 45 model """transr""" +912 45 loss """crossentropy""" +912 45 regularizer """no""" +912 45 optimizer """adadelta""" +912 45 training_loop """lcwa""" +912 45 evaluator """rankbased""" +912 46 dataset """kinships""" +912 46 model """transr""" +912 46 loss """crossentropy""" +912 46 regularizer """no""" +912 46 optimizer """adadelta""" +912 46 training_loop """lcwa""" +912 46 evaluator """rankbased""" +912 47 dataset """kinships""" +912 47 model """transr""" +912 47 loss """crossentropy""" +912 47 regularizer """no""" +912 47 optimizer """adadelta""" +912 47 training_loop """lcwa""" +912 47 evaluator """rankbased""" +912 48 dataset """kinships""" +912 48 model """transr""" +912 48 loss """crossentropy""" +912 48 regularizer """no""" +912 48 optimizer """adadelta""" +912 48 training_loop """lcwa""" +912 48 evaluator """rankbased""" +912 49 dataset """kinships""" +912 49 model """transr""" +912 49 loss """crossentropy""" +912 49 regularizer """no""" +912 49 optimizer """adadelta""" +912 49 training_loop """lcwa""" +912 49 evaluator """rankbased""" +912 50 dataset """kinships""" +912 50 model """transr""" +912 50 loss """crossentropy""" +912 50 regularizer """no""" +912 50 optimizer """adadelta""" +912 50 training_loop """lcwa""" +912 50 evaluator """rankbased""" +912 51 dataset """kinships""" +912 51 model """transr""" +912 51 loss """crossentropy""" +912 51 regularizer """no""" +912 51 optimizer """adadelta""" +912 51 training_loop """lcwa""" +912 51 evaluator """rankbased""" +912 52 dataset """kinships""" +912 52 model """transr""" +912 52 loss """crossentropy""" +912 52 regularizer """no""" +912 52 optimizer """adadelta""" +912 52 training_loop """lcwa""" +912 52 evaluator """rankbased""" +912 53 dataset """kinships""" +912 53 model """transr""" +912 53 loss """crossentropy""" +912 53 regularizer """no""" +912 53 optimizer """adadelta""" +912 53 training_loop """lcwa""" +912 53 evaluator """rankbased""" +912 54 dataset """kinships""" +912 54 model """transr""" +912 54 loss """crossentropy""" +912 54 regularizer """no""" +912 54 optimizer """adadelta""" +912 54 training_loop """lcwa""" +912 54 evaluator """rankbased""" +912 55 dataset """kinships""" +912 55 model """transr""" +912 55 loss """crossentropy""" +912 55 regularizer """no""" +912 55 optimizer """adadelta""" +912 55 training_loop """lcwa""" +912 55 evaluator """rankbased""" +912 56 dataset """kinships""" +912 56 model """transr""" +912 56 loss """crossentropy""" +912 56 regularizer """no""" +912 56 optimizer """adadelta""" +912 56 training_loop """lcwa""" +912 56 evaluator """rankbased""" +912 57 dataset """kinships""" +912 57 model """transr""" +912 57 loss """crossentropy""" +912 57 regularizer """no""" +912 57 optimizer """adadelta""" +912 57 training_loop """lcwa""" +912 57 evaluator """rankbased""" +912 58 dataset """kinships""" +912 58 model """transr""" +912 58 loss """crossentropy""" +912 58 regularizer """no""" +912 58 optimizer """adadelta""" +912 58 training_loop """lcwa""" +912 58 evaluator """rankbased""" +912 59 dataset """kinships""" +912 59 model """transr""" +912 59 loss """crossentropy""" +912 59 regularizer """no""" +912 59 optimizer """adadelta""" +912 59 training_loop """lcwa""" +912 59 evaluator """rankbased""" +912 60 dataset """kinships""" +912 60 model """transr""" +912 60 loss """crossentropy""" +912 60 regularizer """no""" +912 60 optimizer """adadelta""" +912 60 training_loop """lcwa""" +912 60 evaluator """rankbased""" +912 61 dataset """kinships""" +912 61 model """transr""" +912 61 loss """crossentropy""" +912 61 regularizer """no""" +912 61 optimizer """adadelta""" +912 61 training_loop """lcwa""" +912 61 evaluator """rankbased""" +912 62 dataset """kinships""" +912 62 model """transr""" +912 62 loss """crossentropy""" +912 62 regularizer """no""" +912 62 optimizer """adadelta""" +912 62 training_loop """lcwa""" +912 62 evaluator """rankbased""" +912 63 dataset """kinships""" +912 63 model """transr""" +912 63 loss """crossentropy""" +912 63 regularizer """no""" +912 63 optimizer """adadelta""" +912 63 training_loop """lcwa""" +912 63 evaluator """rankbased""" +912 64 dataset """kinships""" +912 64 model """transr""" +912 64 loss """crossentropy""" +912 64 regularizer """no""" +912 64 optimizer """adadelta""" +912 64 training_loop """lcwa""" +912 64 evaluator """rankbased""" +912 65 dataset """kinships""" +912 65 model """transr""" +912 65 loss """crossentropy""" +912 65 regularizer """no""" +912 65 optimizer """adadelta""" +912 65 training_loop """lcwa""" +912 65 evaluator """rankbased""" +912 66 dataset """kinships""" +912 66 model """transr""" +912 66 loss """crossentropy""" +912 66 regularizer """no""" +912 66 optimizer """adadelta""" +912 66 training_loop """lcwa""" +912 66 evaluator """rankbased""" +912 67 dataset """kinships""" +912 67 model """transr""" +912 67 loss """crossentropy""" +912 67 regularizer """no""" +912 67 optimizer """adadelta""" +912 67 training_loop """lcwa""" +912 67 evaluator """rankbased""" +912 68 dataset """kinships""" +912 68 model """transr""" +912 68 loss """crossentropy""" +912 68 regularizer """no""" +912 68 optimizer """adadelta""" +912 68 training_loop """lcwa""" +912 68 evaluator """rankbased""" +912 69 dataset """kinships""" +912 69 model """transr""" +912 69 loss """crossentropy""" +912 69 regularizer """no""" +912 69 optimizer """adadelta""" +912 69 training_loop """lcwa""" +912 69 evaluator """rankbased""" +912 70 dataset """kinships""" +912 70 model """transr""" +912 70 loss """crossentropy""" +912 70 regularizer """no""" +912 70 optimizer """adadelta""" +912 70 training_loop """lcwa""" +912 70 evaluator """rankbased""" +912 71 dataset """kinships""" +912 71 model """transr""" +912 71 loss """crossentropy""" +912 71 regularizer """no""" +912 71 optimizer """adadelta""" +912 71 training_loop """lcwa""" +912 71 evaluator """rankbased""" +912 72 dataset """kinships""" +912 72 model """transr""" +912 72 loss """crossentropy""" +912 72 regularizer """no""" +912 72 optimizer """adadelta""" +912 72 training_loop """lcwa""" +912 72 evaluator """rankbased""" +912 73 dataset """kinships""" +912 73 model """transr""" +912 73 loss """crossentropy""" +912 73 regularizer """no""" +912 73 optimizer """adadelta""" +912 73 training_loop """lcwa""" +912 73 evaluator """rankbased""" +912 74 dataset """kinships""" +912 74 model """transr""" +912 74 loss """crossentropy""" +912 74 regularizer """no""" +912 74 optimizer """adadelta""" +912 74 training_loop """lcwa""" +912 74 evaluator """rankbased""" +912 75 dataset """kinships""" +912 75 model """transr""" +912 75 loss """crossentropy""" +912 75 regularizer """no""" +912 75 optimizer """adadelta""" +912 75 training_loop """lcwa""" +912 75 evaluator """rankbased""" +912 76 dataset """kinships""" +912 76 model """transr""" +912 76 loss """crossentropy""" +912 76 regularizer """no""" +912 76 optimizer """adadelta""" +912 76 training_loop """lcwa""" +912 76 evaluator """rankbased""" +912 77 dataset """kinships""" +912 77 model """transr""" +912 77 loss """crossentropy""" +912 77 regularizer """no""" +912 77 optimizer """adadelta""" +912 77 training_loop """lcwa""" +912 77 evaluator """rankbased""" +912 78 dataset """kinships""" +912 78 model """transr""" +912 78 loss """crossentropy""" +912 78 regularizer """no""" +912 78 optimizer """adadelta""" +912 78 training_loop """lcwa""" +912 78 evaluator """rankbased""" +912 79 dataset """kinships""" +912 79 model """transr""" +912 79 loss """crossentropy""" +912 79 regularizer """no""" +912 79 optimizer """adadelta""" +912 79 training_loop """lcwa""" +912 79 evaluator """rankbased""" +912 80 dataset """kinships""" +912 80 model """transr""" +912 80 loss """crossentropy""" +912 80 regularizer """no""" +912 80 optimizer """adadelta""" +912 80 training_loop """lcwa""" +912 80 evaluator """rankbased""" +912 81 dataset """kinships""" +912 81 model """transr""" +912 81 loss """crossentropy""" +912 81 regularizer """no""" +912 81 optimizer """adadelta""" +912 81 training_loop """lcwa""" +912 81 evaluator """rankbased""" +912 82 dataset """kinships""" +912 82 model """transr""" +912 82 loss """crossentropy""" +912 82 regularizer """no""" +912 82 optimizer """adadelta""" +912 82 training_loop """lcwa""" +912 82 evaluator """rankbased""" +912 83 dataset """kinships""" +912 83 model """transr""" +912 83 loss """crossentropy""" +912 83 regularizer """no""" +912 83 optimizer """adadelta""" +912 83 training_loop """lcwa""" +912 83 evaluator """rankbased""" +912 84 dataset """kinships""" +912 84 model """transr""" +912 84 loss """crossentropy""" +912 84 regularizer """no""" +912 84 optimizer """adadelta""" +912 84 training_loop """lcwa""" +912 84 evaluator """rankbased""" +912 85 dataset """kinships""" +912 85 model """transr""" +912 85 loss """crossentropy""" +912 85 regularizer """no""" +912 85 optimizer """adadelta""" +912 85 training_loop """lcwa""" +912 85 evaluator """rankbased""" +912 86 dataset """kinships""" +912 86 model """transr""" +912 86 loss """crossentropy""" +912 86 regularizer """no""" +912 86 optimizer """adadelta""" +912 86 training_loop """lcwa""" +912 86 evaluator """rankbased""" +912 87 dataset """kinships""" +912 87 model """transr""" +912 87 loss """crossentropy""" +912 87 regularizer """no""" +912 87 optimizer """adadelta""" +912 87 training_loop """lcwa""" +912 87 evaluator """rankbased""" +912 88 dataset """kinships""" +912 88 model """transr""" +912 88 loss """crossentropy""" +912 88 regularizer """no""" +912 88 optimizer """adadelta""" +912 88 training_loop """lcwa""" +912 88 evaluator """rankbased""" +912 89 dataset """kinships""" +912 89 model """transr""" +912 89 loss """crossentropy""" +912 89 regularizer """no""" +912 89 optimizer """adadelta""" +912 89 training_loop """lcwa""" +912 89 evaluator """rankbased""" +912 90 dataset """kinships""" +912 90 model """transr""" +912 90 loss """crossentropy""" +912 90 regularizer """no""" +912 90 optimizer """adadelta""" +912 90 training_loop """lcwa""" +912 90 evaluator """rankbased""" +912 91 dataset """kinships""" +912 91 model """transr""" +912 91 loss """crossentropy""" +912 91 regularizer """no""" +912 91 optimizer """adadelta""" +912 91 training_loop """lcwa""" +912 91 evaluator """rankbased""" +912 92 dataset """kinships""" +912 92 model """transr""" +912 92 loss """crossentropy""" +912 92 regularizer """no""" +912 92 optimizer """adadelta""" +912 92 training_loop """lcwa""" +912 92 evaluator """rankbased""" +912 93 dataset """kinships""" +912 93 model """transr""" +912 93 loss """crossentropy""" +912 93 regularizer """no""" +912 93 optimizer """adadelta""" +912 93 training_loop """lcwa""" +912 93 evaluator """rankbased""" +912 94 dataset """kinships""" +912 94 model """transr""" +912 94 loss """crossentropy""" +912 94 regularizer """no""" +912 94 optimizer """adadelta""" +912 94 training_loop """lcwa""" +912 94 evaluator """rankbased""" +912 95 dataset """kinships""" +912 95 model """transr""" +912 95 loss """crossentropy""" +912 95 regularizer """no""" +912 95 optimizer """adadelta""" +912 95 training_loop """lcwa""" +912 95 evaluator """rankbased""" +912 96 dataset """kinships""" +912 96 model """transr""" +912 96 loss """crossentropy""" +912 96 regularizer """no""" +912 96 optimizer """adadelta""" +912 96 training_loop """lcwa""" +912 96 evaluator """rankbased""" +912 97 dataset """kinships""" +912 97 model """transr""" +912 97 loss """crossentropy""" +912 97 regularizer """no""" +912 97 optimizer """adadelta""" +912 97 training_loop """lcwa""" +912 97 evaluator """rankbased""" +912 98 dataset """kinships""" +912 98 model """transr""" +912 98 loss """crossentropy""" +912 98 regularizer """no""" +912 98 optimizer """adadelta""" +912 98 training_loop """lcwa""" +912 98 evaluator """rankbased""" +912 99 dataset """kinships""" +912 99 model """transr""" +912 99 loss """crossentropy""" +912 99 regularizer """no""" +912 99 optimizer """adadelta""" +912 99 training_loop """lcwa""" +912 99 evaluator """rankbased""" +912 100 dataset """kinships""" +912 100 model """transr""" +912 100 loss """crossentropy""" +912 100 regularizer """no""" +912 100 optimizer """adadelta""" +912 100 training_loop """lcwa""" +912 100 evaluator """rankbased""" +913 1 model.embedding_dim 1.0 +913 1 model.relation_dim 0.0 +913 1 model.scoring_fct_norm 1.0 +913 1 negative_sampler.num_negs_per_pos 0.0 +913 1 training.batch_size 1.0 +913 2 model.embedding_dim 1.0 +913 2 model.relation_dim 0.0 +913 2 model.scoring_fct_norm 2.0 +913 2 negative_sampler.num_negs_per_pos 16.0 +913 2 training.batch_size 2.0 +913 3 model.embedding_dim 0.0 +913 3 model.relation_dim 0.0 +913 3 model.scoring_fct_norm 2.0 +913 3 negative_sampler.num_negs_per_pos 37.0 +913 3 training.batch_size 2.0 +913 4 model.embedding_dim 1.0 +913 4 model.relation_dim 0.0 +913 4 model.scoring_fct_norm 1.0 +913 4 negative_sampler.num_negs_per_pos 88.0 +913 4 training.batch_size 0.0 +913 5 model.embedding_dim 2.0 +913 5 model.relation_dim 2.0 +913 5 model.scoring_fct_norm 1.0 +913 5 negative_sampler.num_negs_per_pos 70.0 +913 5 training.batch_size 0.0 +913 6 model.embedding_dim 0.0 +913 6 model.relation_dim 0.0 +913 6 model.scoring_fct_norm 2.0 +913 6 negative_sampler.num_negs_per_pos 11.0 +913 6 training.batch_size 2.0 +913 7 model.embedding_dim 2.0 +913 7 model.relation_dim 0.0 +913 7 model.scoring_fct_norm 1.0 +913 7 negative_sampler.num_negs_per_pos 34.0 +913 7 training.batch_size 2.0 +913 8 model.embedding_dim 2.0 +913 8 model.relation_dim 2.0 +913 8 model.scoring_fct_norm 1.0 +913 8 negative_sampler.num_negs_per_pos 77.0 +913 8 training.batch_size 2.0 +913 9 model.embedding_dim 0.0 +913 9 model.relation_dim 2.0 +913 9 model.scoring_fct_norm 2.0 +913 9 negative_sampler.num_negs_per_pos 15.0 +913 9 training.batch_size 2.0 +913 10 model.embedding_dim 2.0 +913 10 model.relation_dim 2.0 +913 10 model.scoring_fct_norm 2.0 +913 10 negative_sampler.num_negs_per_pos 23.0 +913 10 training.batch_size 0.0 +913 11 model.embedding_dim 1.0 +913 11 model.relation_dim 0.0 +913 11 model.scoring_fct_norm 2.0 +913 11 negative_sampler.num_negs_per_pos 99.0 +913 11 training.batch_size 1.0 +913 12 model.embedding_dim 1.0 +913 12 model.relation_dim 1.0 +913 12 model.scoring_fct_norm 2.0 +913 12 negative_sampler.num_negs_per_pos 42.0 +913 12 training.batch_size 1.0 +913 13 model.embedding_dim 2.0 +913 13 model.relation_dim 2.0 +913 13 model.scoring_fct_norm 1.0 +913 13 negative_sampler.num_negs_per_pos 75.0 +913 13 training.batch_size 2.0 +913 14 model.embedding_dim 1.0 +913 14 model.relation_dim 2.0 +913 14 model.scoring_fct_norm 2.0 +913 14 negative_sampler.num_negs_per_pos 35.0 +913 14 training.batch_size 0.0 +913 15 model.embedding_dim 2.0 +913 15 model.relation_dim 0.0 +913 15 model.scoring_fct_norm 1.0 +913 15 negative_sampler.num_negs_per_pos 68.0 +913 15 training.batch_size 2.0 +913 16 model.embedding_dim 2.0 +913 16 model.relation_dim 2.0 +913 16 model.scoring_fct_norm 1.0 +913 16 negative_sampler.num_negs_per_pos 80.0 +913 16 training.batch_size 2.0 +913 17 model.embedding_dim 1.0 +913 17 model.relation_dim 1.0 +913 17 model.scoring_fct_norm 1.0 +913 17 negative_sampler.num_negs_per_pos 84.0 +913 17 training.batch_size 1.0 +913 18 model.embedding_dim 1.0 +913 18 model.relation_dim 1.0 +913 18 model.scoring_fct_norm 2.0 +913 18 negative_sampler.num_negs_per_pos 98.0 +913 18 training.batch_size 0.0 +913 19 model.embedding_dim 2.0 +913 19 model.relation_dim 2.0 +913 19 model.scoring_fct_norm 2.0 +913 19 negative_sampler.num_negs_per_pos 71.0 +913 19 training.batch_size 0.0 +913 20 model.embedding_dim 0.0 +913 20 model.relation_dim 0.0 +913 20 model.scoring_fct_norm 2.0 +913 20 negative_sampler.num_negs_per_pos 55.0 +913 20 training.batch_size 0.0 +913 21 model.embedding_dim 0.0 +913 21 model.relation_dim 1.0 +913 21 model.scoring_fct_norm 2.0 +913 21 negative_sampler.num_negs_per_pos 42.0 +913 21 training.batch_size 1.0 +913 22 model.embedding_dim 2.0 +913 22 model.relation_dim 0.0 +913 22 model.scoring_fct_norm 2.0 +913 22 negative_sampler.num_negs_per_pos 80.0 +913 22 training.batch_size 0.0 +913 23 model.embedding_dim 1.0 +913 23 model.relation_dim 1.0 +913 23 model.scoring_fct_norm 1.0 +913 23 negative_sampler.num_negs_per_pos 82.0 +913 23 training.batch_size 1.0 +913 24 model.embedding_dim 1.0 +913 24 model.relation_dim 2.0 +913 24 model.scoring_fct_norm 2.0 +913 24 negative_sampler.num_negs_per_pos 49.0 +913 24 training.batch_size 1.0 +913 25 model.embedding_dim 0.0 +913 25 model.relation_dim 1.0 +913 25 model.scoring_fct_norm 2.0 +913 25 negative_sampler.num_negs_per_pos 99.0 +913 25 training.batch_size 2.0 +913 26 model.embedding_dim 0.0 +913 26 model.relation_dim 2.0 +913 26 model.scoring_fct_norm 1.0 +913 26 negative_sampler.num_negs_per_pos 86.0 +913 26 training.batch_size 0.0 +913 27 model.embedding_dim 1.0 +913 27 model.relation_dim 2.0 +913 27 model.scoring_fct_norm 1.0 +913 27 negative_sampler.num_negs_per_pos 78.0 +913 27 training.batch_size 1.0 +913 28 model.embedding_dim 2.0 +913 28 model.relation_dim 2.0 +913 28 model.scoring_fct_norm 2.0 +913 28 negative_sampler.num_negs_per_pos 30.0 +913 28 training.batch_size 1.0 +913 29 model.embedding_dim 0.0 +913 29 model.relation_dim 1.0 +913 29 model.scoring_fct_norm 1.0 +913 29 negative_sampler.num_negs_per_pos 39.0 +913 29 training.batch_size 0.0 +913 30 model.embedding_dim 1.0 +913 30 model.relation_dim 1.0 +913 30 model.scoring_fct_norm 1.0 +913 30 negative_sampler.num_negs_per_pos 23.0 +913 30 training.batch_size 1.0 +913 31 model.embedding_dim 0.0 +913 31 model.relation_dim 2.0 +913 31 model.scoring_fct_norm 1.0 +913 31 negative_sampler.num_negs_per_pos 10.0 +913 31 training.batch_size 0.0 +913 32 model.embedding_dim 0.0 +913 32 model.relation_dim 1.0 +913 32 model.scoring_fct_norm 1.0 +913 32 negative_sampler.num_negs_per_pos 16.0 +913 32 training.batch_size 2.0 +913 33 model.embedding_dim 0.0 +913 33 model.relation_dim 1.0 +913 33 model.scoring_fct_norm 2.0 +913 33 negative_sampler.num_negs_per_pos 94.0 +913 33 training.batch_size 0.0 +913 34 model.embedding_dim 1.0 +913 34 model.relation_dim 1.0 +913 34 model.scoring_fct_norm 2.0 +913 34 negative_sampler.num_negs_per_pos 5.0 +913 34 training.batch_size 0.0 +913 35 model.embedding_dim 0.0 +913 35 model.relation_dim 1.0 +913 35 model.scoring_fct_norm 1.0 +913 35 negative_sampler.num_negs_per_pos 57.0 +913 35 training.batch_size 0.0 +913 36 model.embedding_dim 0.0 +913 36 model.relation_dim 2.0 +913 36 model.scoring_fct_norm 2.0 +913 36 negative_sampler.num_negs_per_pos 82.0 +913 36 training.batch_size 2.0 +913 37 model.embedding_dim 1.0 +913 37 model.relation_dim 0.0 +913 37 model.scoring_fct_norm 1.0 +913 37 negative_sampler.num_negs_per_pos 49.0 +913 37 training.batch_size 2.0 +913 38 model.embedding_dim 1.0 +913 38 model.relation_dim 1.0 +913 38 model.scoring_fct_norm 2.0 +913 38 negative_sampler.num_negs_per_pos 71.0 +913 38 training.batch_size 1.0 +913 39 model.embedding_dim 1.0 +913 39 model.relation_dim 2.0 +913 39 model.scoring_fct_norm 2.0 +913 39 negative_sampler.num_negs_per_pos 5.0 +913 39 training.batch_size 1.0 +913 40 model.embedding_dim 2.0 +913 40 model.relation_dim 2.0 +913 40 model.scoring_fct_norm 1.0 +913 40 negative_sampler.num_negs_per_pos 36.0 +913 40 training.batch_size 1.0 +913 41 model.embedding_dim 2.0 +913 41 model.relation_dim 1.0 +913 41 model.scoring_fct_norm 2.0 +913 41 negative_sampler.num_negs_per_pos 26.0 +913 41 training.batch_size 2.0 +913 42 model.embedding_dim 0.0 +913 42 model.relation_dim 0.0 +913 42 model.scoring_fct_norm 1.0 +913 42 negative_sampler.num_negs_per_pos 7.0 +913 42 training.batch_size 2.0 +913 43 model.embedding_dim 2.0 +913 43 model.relation_dim 1.0 +913 43 model.scoring_fct_norm 1.0 +913 43 negative_sampler.num_negs_per_pos 82.0 +913 43 training.batch_size 0.0 +913 44 model.embedding_dim 1.0 +913 44 model.relation_dim 0.0 +913 44 model.scoring_fct_norm 2.0 +913 44 negative_sampler.num_negs_per_pos 60.0 +913 44 training.batch_size 0.0 +913 45 model.embedding_dim 0.0 +913 45 model.relation_dim 0.0 +913 45 model.scoring_fct_norm 1.0 +913 45 negative_sampler.num_negs_per_pos 13.0 +913 45 training.batch_size 2.0 +913 46 model.embedding_dim 1.0 +913 46 model.relation_dim 0.0 +913 46 model.scoring_fct_norm 2.0 +913 46 negative_sampler.num_negs_per_pos 72.0 +913 46 training.batch_size 2.0 +913 47 model.embedding_dim 2.0 +913 47 model.relation_dim 2.0 +913 47 model.scoring_fct_norm 1.0 +913 47 negative_sampler.num_negs_per_pos 18.0 +913 47 training.batch_size 1.0 +913 48 model.embedding_dim 1.0 +913 48 model.relation_dim 2.0 +913 48 model.scoring_fct_norm 2.0 +913 48 negative_sampler.num_negs_per_pos 84.0 +913 48 training.batch_size 1.0 +913 49 model.embedding_dim 0.0 +913 49 model.relation_dim 2.0 +913 49 model.scoring_fct_norm 1.0 +913 49 negative_sampler.num_negs_per_pos 64.0 +913 49 training.batch_size 0.0 +913 50 model.embedding_dim 0.0 +913 50 model.relation_dim 1.0 +913 50 model.scoring_fct_norm 1.0 +913 50 negative_sampler.num_negs_per_pos 77.0 +913 50 training.batch_size 2.0 +913 51 model.embedding_dim 1.0 +913 51 model.relation_dim 0.0 +913 51 model.scoring_fct_norm 2.0 +913 51 negative_sampler.num_negs_per_pos 59.0 +913 51 training.batch_size 0.0 +913 52 model.embedding_dim 2.0 +913 52 model.relation_dim 2.0 +913 52 model.scoring_fct_norm 2.0 +913 52 negative_sampler.num_negs_per_pos 7.0 +913 52 training.batch_size 2.0 +913 53 model.embedding_dim 2.0 +913 53 model.relation_dim 0.0 +913 53 model.scoring_fct_norm 1.0 +913 53 negative_sampler.num_negs_per_pos 37.0 +913 53 training.batch_size 0.0 +913 54 model.embedding_dim 2.0 +913 54 model.relation_dim 2.0 +913 54 model.scoring_fct_norm 1.0 +913 54 negative_sampler.num_negs_per_pos 82.0 +913 54 training.batch_size 2.0 +913 55 model.embedding_dim 0.0 +913 55 model.relation_dim 0.0 +913 55 model.scoring_fct_norm 1.0 +913 55 negative_sampler.num_negs_per_pos 91.0 +913 55 training.batch_size 2.0 +913 56 model.embedding_dim 1.0 +913 56 model.relation_dim 1.0 +913 56 model.scoring_fct_norm 1.0 +913 56 negative_sampler.num_negs_per_pos 18.0 +913 56 training.batch_size 1.0 +913 57 model.embedding_dim 2.0 +913 57 model.relation_dim 0.0 +913 57 model.scoring_fct_norm 1.0 +913 57 negative_sampler.num_negs_per_pos 1.0 +913 57 training.batch_size 2.0 +913 58 model.embedding_dim 2.0 +913 58 model.relation_dim 2.0 +913 58 model.scoring_fct_norm 1.0 +913 58 negative_sampler.num_negs_per_pos 92.0 +913 58 training.batch_size 2.0 +913 59 model.embedding_dim 1.0 +913 59 model.relation_dim 0.0 +913 59 model.scoring_fct_norm 2.0 +913 59 negative_sampler.num_negs_per_pos 86.0 +913 59 training.batch_size 0.0 +913 1 dataset """kinships""" +913 1 model """transr""" +913 1 loss """bceaftersigmoid""" +913 1 regularizer """no""" +913 1 optimizer """adadelta""" +913 1 training_loop """owa""" +913 1 negative_sampler """basic""" +913 1 evaluator """rankbased""" +913 2 dataset """kinships""" +913 2 model """transr""" +913 2 loss """bceaftersigmoid""" +913 2 regularizer """no""" +913 2 optimizer """adadelta""" +913 2 training_loop """owa""" +913 2 negative_sampler """basic""" +913 2 evaluator """rankbased""" +913 3 dataset """kinships""" +913 3 model """transr""" +913 3 loss """bceaftersigmoid""" +913 3 regularizer """no""" +913 3 optimizer """adadelta""" +913 3 training_loop """owa""" +913 3 negative_sampler """basic""" +913 3 evaluator """rankbased""" +913 4 dataset """kinships""" +913 4 model """transr""" +913 4 loss """bceaftersigmoid""" +913 4 regularizer """no""" +913 4 optimizer """adadelta""" +913 4 training_loop """owa""" +913 4 negative_sampler """basic""" +913 4 evaluator """rankbased""" +913 5 dataset """kinships""" +913 5 model """transr""" +913 5 loss """bceaftersigmoid""" +913 5 regularizer """no""" +913 5 optimizer """adadelta""" +913 5 training_loop """owa""" +913 5 negative_sampler """basic""" +913 5 evaluator """rankbased""" +913 6 dataset """kinships""" +913 6 model """transr""" +913 6 loss """bceaftersigmoid""" +913 6 regularizer """no""" +913 6 optimizer """adadelta""" +913 6 training_loop """owa""" +913 6 negative_sampler """basic""" +913 6 evaluator """rankbased""" +913 7 dataset """kinships""" +913 7 model """transr""" +913 7 loss """bceaftersigmoid""" +913 7 regularizer """no""" +913 7 optimizer """adadelta""" +913 7 training_loop """owa""" +913 7 negative_sampler """basic""" +913 7 evaluator """rankbased""" +913 8 dataset """kinships""" +913 8 model """transr""" +913 8 loss """bceaftersigmoid""" +913 8 regularizer """no""" +913 8 optimizer """adadelta""" +913 8 training_loop """owa""" +913 8 negative_sampler """basic""" +913 8 evaluator """rankbased""" +913 9 dataset """kinships""" +913 9 model """transr""" +913 9 loss """bceaftersigmoid""" +913 9 regularizer """no""" +913 9 optimizer """adadelta""" +913 9 training_loop """owa""" +913 9 negative_sampler """basic""" +913 9 evaluator """rankbased""" +913 10 dataset """kinships""" +913 10 model """transr""" +913 10 loss """bceaftersigmoid""" +913 10 regularizer """no""" +913 10 optimizer """adadelta""" +913 10 training_loop """owa""" +913 10 negative_sampler """basic""" +913 10 evaluator """rankbased""" +913 11 dataset """kinships""" +913 11 model """transr""" +913 11 loss """bceaftersigmoid""" +913 11 regularizer """no""" +913 11 optimizer """adadelta""" +913 11 training_loop """owa""" +913 11 negative_sampler """basic""" +913 11 evaluator """rankbased""" +913 12 dataset """kinships""" +913 12 model """transr""" +913 12 loss """bceaftersigmoid""" +913 12 regularizer """no""" +913 12 optimizer """adadelta""" +913 12 training_loop """owa""" +913 12 negative_sampler """basic""" +913 12 evaluator """rankbased""" +913 13 dataset """kinships""" +913 13 model """transr""" +913 13 loss """bceaftersigmoid""" +913 13 regularizer """no""" +913 13 optimizer """adadelta""" +913 13 training_loop """owa""" +913 13 negative_sampler """basic""" +913 13 evaluator """rankbased""" +913 14 dataset """kinships""" +913 14 model """transr""" +913 14 loss """bceaftersigmoid""" +913 14 regularizer """no""" +913 14 optimizer """adadelta""" +913 14 training_loop """owa""" +913 14 negative_sampler """basic""" +913 14 evaluator """rankbased""" +913 15 dataset """kinships""" +913 15 model """transr""" +913 15 loss """bceaftersigmoid""" +913 15 regularizer """no""" +913 15 optimizer """adadelta""" +913 15 training_loop """owa""" +913 15 negative_sampler """basic""" +913 15 evaluator """rankbased""" +913 16 dataset """kinships""" +913 16 model """transr""" +913 16 loss """bceaftersigmoid""" +913 16 regularizer """no""" +913 16 optimizer """adadelta""" +913 16 training_loop """owa""" +913 16 negative_sampler """basic""" +913 16 evaluator """rankbased""" +913 17 dataset """kinships""" +913 17 model """transr""" +913 17 loss """bceaftersigmoid""" +913 17 regularizer """no""" +913 17 optimizer """adadelta""" +913 17 training_loop """owa""" +913 17 negative_sampler """basic""" +913 17 evaluator """rankbased""" +913 18 dataset """kinships""" +913 18 model """transr""" +913 18 loss """bceaftersigmoid""" +913 18 regularizer """no""" +913 18 optimizer """adadelta""" +913 18 training_loop """owa""" +913 18 negative_sampler """basic""" +913 18 evaluator """rankbased""" +913 19 dataset """kinships""" +913 19 model """transr""" +913 19 loss """bceaftersigmoid""" +913 19 regularizer """no""" +913 19 optimizer """adadelta""" +913 19 training_loop """owa""" +913 19 negative_sampler """basic""" +913 19 evaluator """rankbased""" +913 20 dataset """kinships""" +913 20 model """transr""" +913 20 loss """bceaftersigmoid""" +913 20 regularizer """no""" +913 20 optimizer """adadelta""" +913 20 training_loop """owa""" +913 20 negative_sampler """basic""" +913 20 evaluator """rankbased""" +913 21 dataset """kinships""" +913 21 model """transr""" +913 21 loss """bceaftersigmoid""" +913 21 regularizer """no""" +913 21 optimizer """adadelta""" +913 21 training_loop """owa""" +913 21 negative_sampler """basic""" +913 21 evaluator """rankbased""" +913 22 dataset """kinships""" +913 22 model """transr""" +913 22 loss """bceaftersigmoid""" +913 22 regularizer """no""" +913 22 optimizer """adadelta""" +913 22 training_loop """owa""" +913 22 negative_sampler """basic""" +913 22 evaluator """rankbased""" +913 23 dataset """kinships""" +913 23 model """transr""" +913 23 loss """bceaftersigmoid""" +913 23 regularizer """no""" +913 23 optimizer """adadelta""" +913 23 training_loop """owa""" +913 23 negative_sampler """basic""" +913 23 evaluator """rankbased""" +913 24 dataset """kinships""" +913 24 model """transr""" +913 24 loss """bceaftersigmoid""" +913 24 regularizer """no""" +913 24 optimizer """adadelta""" +913 24 training_loop """owa""" +913 24 negative_sampler """basic""" +913 24 evaluator """rankbased""" +913 25 dataset """kinships""" +913 25 model """transr""" +913 25 loss """bceaftersigmoid""" +913 25 regularizer """no""" +913 25 optimizer """adadelta""" +913 25 training_loop """owa""" +913 25 negative_sampler """basic""" +913 25 evaluator """rankbased""" +913 26 dataset """kinships""" +913 26 model """transr""" +913 26 loss """bceaftersigmoid""" +913 26 regularizer """no""" +913 26 optimizer """adadelta""" +913 26 training_loop """owa""" +913 26 negative_sampler """basic""" +913 26 evaluator """rankbased""" +913 27 dataset """kinships""" +913 27 model """transr""" +913 27 loss """bceaftersigmoid""" +913 27 regularizer """no""" +913 27 optimizer """adadelta""" +913 27 training_loop """owa""" +913 27 negative_sampler """basic""" +913 27 evaluator """rankbased""" +913 28 dataset """kinships""" +913 28 model """transr""" +913 28 loss """bceaftersigmoid""" +913 28 regularizer """no""" +913 28 optimizer """adadelta""" +913 28 training_loop """owa""" +913 28 negative_sampler """basic""" +913 28 evaluator """rankbased""" +913 29 dataset """kinships""" +913 29 model """transr""" +913 29 loss """bceaftersigmoid""" +913 29 regularizer """no""" +913 29 optimizer """adadelta""" +913 29 training_loop """owa""" +913 29 negative_sampler """basic""" +913 29 evaluator """rankbased""" +913 30 dataset """kinships""" +913 30 model """transr""" +913 30 loss """bceaftersigmoid""" +913 30 regularizer """no""" +913 30 optimizer """adadelta""" +913 30 training_loop """owa""" +913 30 negative_sampler """basic""" +913 30 evaluator """rankbased""" +913 31 dataset """kinships""" +913 31 model """transr""" +913 31 loss """bceaftersigmoid""" +913 31 regularizer """no""" +913 31 optimizer """adadelta""" +913 31 training_loop """owa""" +913 31 negative_sampler """basic""" +913 31 evaluator """rankbased""" +913 32 dataset """kinships""" +913 32 model """transr""" +913 32 loss """bceaftersigmoid""" +913 32 regularizer """no""" +913 32 optimizer """adadelta""" +913 32 training_loop """owa""" +913 32 negative_sampler """basic""" +913 32 evaluator """rankbased""" +913 33 dataset """kinships""" +913 33 model """transr""" +913 33 loss """bceaftersigmoid""" +913 33 regularizer """no""" +913 33 optimizer """adadelta""" +913 33 training_loop """owa""" +913 33 negative_sampler """basic""" +913 33 evaluator """rankbased""" +913 34 dataset """kinships""" +913 34 model """transr""" +913 34 loss """bceaftersigmoid""" +913 34 regularizer """no""" +913 34 optimizer """adadelta""" +913 34 training_loop """owa""" +913 34 negative_sampler """basic""" +913 34 evaluator """rankbased""" +913 35 dataset """kinships""" +913 35 model """transr""" +913 35 loss """bceaftersigmoid""" +913 35 regularizer """no""" +913 35 optimizer """adadelta""" +913 35 training_loop """owa""" +913 35 negative_sampler """basic""" +913 35 evaluator """rankbased""" +913 36 dataset """kinships""" +913 36 model """transr""" +913 36 loss """bceaftersigmoid""" +913 36 regularizer """no""" +913 36 optimizer """adadelta""" +913 36 training_loop """owa""" +913 36 negative_sampler """basic""" +913 36 evaluator """rankbased""" +913 37 dataset """kinships""" +913 37 model """transr""" +913 37 loss """bceaftersigmoid""" +913 37 regularizer """no""" +913 37 optimizer """adadelta""" +913 37 training_loop """owa""" +913 37 negative_sampler """basic""" +913 37 evaluator """rankbased""" +913 38 dataset """kinships""" +913 38 model """transr""" +913 38 loss """bceaftersigmoid""" +913 38 regularizer """no""" +913 38 optimizer """adadelta""" +913 38 training_loop """owa""" +913 38 negative_sampler """basic""" +913 38 evaluator """rankbased""" +913 39 dataset """kinships""" +913 39 model """transr""" +913 39 loss """bceaftersigmoid""" +913 39 regularizer """no""" +913 39 optimizer """adadelta""" +913 39 training_loop """owa""" +913 39 negative_sampler """basic""" +913 39 evaluator """rankbased""" +913 40 dataset """kinships""" +913 40 model """transr""" +913 40 loss """bceaftersigmoid""" +913 40 regularizer """no""" +913 40 optimizer """adadelta""" +913 40 training_loop """owa""" +913 40 negative_sampler """basic""" +913 40 evaluator """rankbased""" +913 41 dataset """kinships""" +913 41 model """transr""" +913 41 loss """bceaftersigmoid""" +913 41 regularizer """no""" +913 41 optimizer """adadelta""" +913 41 training_loop """owa""" +913 41 negative_sampler """basic""" +913 41 evaluator """rankbased""" +913 42 dataset """kinships""" +913 42 model """transr""" +913 42 loss """bceaftersigmoid""" +913 42 regularizer """no""" +913 42 optimizer """adadelta""" +913 42 training_loop """owa""" +913 42 negative_sampler """basic""" +913 42 evaluator """rankbased""" +913 43 dataset """kinships""" +913 43 model """transr""" +913 43 loss """bceaftersigmoid""" +913 43 regularizer """no""" +913 43 optimizer """adadelta""" +913 43 training_loop """owa""" +913 43 negative_sampler """basic""" +913 43 evaluator """rankbased""" +913 44 dataset """kinships""" +913 44 model """transr""" +913 44 loss """bceaftersigmoid""" +913 44 regularizer """no""" +913 44 optimizer """adadelta""" +913 44 training_loop """owa""" +913 44 negative_sampler """basic""" +913 44 evaluator """rankbased""" +913 45 dataset """kinships""" +913 45 model """transr""" +913 45 loss """bceaftersigmoid""" +913 45 regularizer """no""" +913 45 optimizer """adadelta""" +913 45 training_loop """owa""" +913 45 negative_sampler """basic""" +913 45 evaluator """rankbased""" +913 46 dataset """kinships""" +913 46 model """transr""" +913 46 loss """bceaftersigmoid""" +913 46 regularizer """no""" +913 46 optimizer """adadelta""" +913 46 training_loop """owa""" +913 46 negative_sampler """basic""" +913 46 evaluator """rankbased""" +913 47 dataset """kinships""" +913 47 model """transr""" +913 47 loss """bceaftersigmoid""" +913 47 regularizer """no""" +913 47 optimizer """adadelta""" +913 47 training_loop """owa""" +913 47 negative_sampler """basic""" +913 47 evaluator """rankbased""" +913 48 dataset """kinships""" +913 48 model """transr""" +913 48 loss """bceaftersigmoid""" +913 48 regularizer """no""" +913 48 optimizer """adadelta""" +913 48 training_loop """owa""" +913 48 negative_sampler """basic""" +913 48 evaluator """rankbased""" +913 49 dataset """kinships""" +913 49 model """transr""" +913 49 loss """bceaftersigmoid""" +913 49 regularizer """no""" +913 49 optimizer """adadelta""" +913 49 training_loop """owa""" +913 49 negative_sampler """basic""" +913 49 evaluator """rankbased""" +913 50 dataset """kinships""" +913 50 model """transr""" +913 50 loss """bceaftersigmoid""" +913 50 regularizer """no""" +913 50 optimizer """adadelta""" +913 50 training_loop """owa""" +913 50 negative_sampler """basic""" +913 50 evaluator """rankbased""" +913 51 dataset """kinships""" +913 51 model """transr""" +913 51 loss """bceaftersigmoid""" +913 51 regularizer """no""" +913 51 optimizer """adadelta""" +913 51 training_loop """owa""" +913 51 negative_sampler """basic""" +913 51 evaluator """rankbased""" +913 52 dataset """kinships""" +913 52 model """transr""" +913 52 loss """bceaftersigmoid""" +913 52 regularizer """no""" +913 52 optimizer """adadelta""" +913 52 training_loop """owa""" +913 52 negative_sampler """basic""" +913 52 evaluator """rankbased""" +913 53 dataset """kinships""" +913 53 model """transr""" +913 53 loss """bceaftersigmoid""" +913 53 regularizer """no""" +913 53 optimizer """adadelta""" +913 53 training_loop """owa""" +913 53 negative_sampler """basic""" +913 53 evaluator """rankbased""" +913 54 dataset """kinships""" +913 54 model """transr""" +913 54 loss """bceaftersigmoid""" +913 54 regularizer """no""" +913 54 optimizer """adadelta""" +913 54 training_loop """owa""" +913 54 negative_sampler """basic""" +913 54 evaluator """rankbased""" +913 55 dataset """kinships""" +913 55 model """transr""" +913 55 loss """bceaftersigmoid""" +913 55 regularizer """no""" +913 55 optimizer """adadelta""" +913 55 training_loop """owa""" +913 55 negative_sampler """basic""" +913 55 evaluator """rankbased""" +913 56 dataset """kinships""" +913 56 model """transr""" +913 56 loss """bceaftersigmoid""" +913 56 regularizer """no""" +913 56 optimizer """adadelta""" +913 56 training_loop """owa""" +913 56 negative_sampler """basic""" +913 56 evaluator """rankbased""" +913 57 dataset """kinships""" +913 57 model """transr""" +913 57 loss """bceaftersigmoid""" +913 57 regularizer """no""" +913 57 optimizer """adadelta""" +913 57 training_loop """owa""" +913 57 negative_sampler """basic""" +913 57 evaluator """rankbased""" +913 58 dataset """kinships""" +913 58 model """transr""" +913 58 loss """bceaftersigmoid""" +913 58 regularizer """no""" +913 58 optimizer """adadelta""" +913 58 training_loop """owa""" +913 58 negative_sampler """basic""" +913 58 evaluator """rankbased""" +913 59 dataset """kinships""" +913 59 model """transr""" +913 59 loss """bceaftersigmoid""" +913 59 regularizer """no""" +913 59 optimizer """adadelta""" +913 59 training_loop """owa""" +913 59 negative_sampler """basic""" +913 59 evaluator """rankbased""" +914 1 model.embedding_dim 2.0 +914 1 model.relation_dim 0.0 +914 1 model.scoring_fct_norm 1.0 +914 1 negative_sampler.num_negs_per_pos 8.0 +914 1 training.batch_size 0.0 +914 2 model.embedding_dim 2.0 +914 2 model.relation_dim 0.0 +914 2 model.scoring_fct_norm 2.0 +914 2 negative_sampler.num_negs_per_pos 44.0 +914 2 training.batch_size 2.0 +914 3 model.embedding_dim 0.0 +914 3 model.relation_dim 0.0 +914 3 model.scoring_fct_norm 2.0 +914 3 negative_sampler.num_negs_per_pos 76.0 +914 3 training.batch_size 0.0 +914 4 model.embedding_dim 2.0 +914 4 model.relation_dim 1.0 +914 4 model.scoring_fct_norm 1.0 +914 4 negative_sampler.num_negs_per_pos 53.0 +914 4 training.batch_size 2.0 +914 5 model.embedding_dim 2.0 +914 5 model.relation_dim 0.0 +914 5 model.scoring_fct_norm 2.0 +914 5 negative_sampler.num_negs_per_pos 69.0 +914 5 training.batch_size 2.0 +914 6 model.embedding_dim 1.0 +914 6 model.relation_dim 0.0 +914 6 model.scoring_fct_norm 2.0 +914 6 negative_sampler.num_negs_per_pos 19.0 +914 6 training.batch_size 2.0 +914 7 model.embedding_dim 2.0 +914 7 model.relation_dim 1.0 +914 7 model.scoring_fct_norm 1.0 +914 7 negative_sampler.num_negs_per_pos 51.0 +914 7 training.batch_size 1.0 +914 8 model.embedding_dim 0.0 +914 8 model.relation_dim 1.0 +914 8 model.scoring_fct_norm 1.0 +914 8 negative_sampler.num_negs_per_pos 82.0 +914 8 training.batch_size 1.0 +914 9 model.embedding_dim 0.0 +914 9 model.relation_dim 0.0 +914 9 model.scoring_fct_norm 1.0 +914 9 negative_sampler.num_negs_per_pos 41.0 +914 9 training.batch_size 1.0 +914 10 model.embedding_dim 0.0 +914 10 model.relation_dim 0.0 +914 10 model.scoring_fct_norm 2.0 +914 10 negative_sampler.num_negs_per_pos 77.0 +914 10 training.batch_size 1.0 +914 11 model.embedding_dim 2.0 +914 11 model.relation_dim 0.0 +914 11 model.scoring_fct_norm 2.0 +914 11 negative_sampler.num_negs_per_pos 17.0 +914 11 training.batch_size 2.0 +914 12 model.embedding_dim 0.0 +914 12 model.relation_dim 0.0 +914 12 model.scoring_fct_norm 2.0 +914 12 negative_sampler.num_negs_per_pos 98.0 +914 12 training.batch_size 2.0 +914 13 model.embedding_dim 2.0 +914 13 model.relation_dim 2.0 +914 13 model.scoring_fct_norm 2.0 +914 13 negative_sampler.num_negs_per_pos 83.0 +914 13 training.batch_size 2.0 +914 14 model.embedding_dim 2.0 +914 14 model.relation_dim 0.0 +914 14 model.scoring_fct_norm 1.0 +914 14 negative_sampler.num_negs_per_pos 17.0 +914 14 training.batch_size 1.0 +914 15 model.embedding_dim 1.0 +914 15 model.relation_dim 0.0 +914 15 model.scoring_fct_norm 2.0 +914 15 negative_sampler.num_negs_per_pos 55.0 +914 15 training.batch_size 0.0 +914 16 model.embedding_dim 2.0 +914 16 model.relation_dim 1.0 +914 16 model.scoring_fct_norm 1.0 +914 16 negative_sampler.num_negs_per_pos 36.0 +914 16 training.batch_size 2.0 +914 17 model.embedding_dim 0.0 +914 17 model.relation_dim 1.0 +914 17 model.scoring_fct_norm 1.0 +914 17 negative_sampler.num_negs_per_pos 3.0 +914 17 training.batch_size 0.0 +914 18 model.embedding_dim 1.0 +914 18 model.relation_dim 1.0 +914 18 model.scoring_fct_norm 2.0 +914 18 negative_sampler.num_negs_per_pos 30.0 +914 18 training.batch_size 0.0 +914 19 model.embedding_dim 1.0 +914 19 model.relation_dim 1.0 +914 19 model.scoring_fct_norm 1.0 +914 19 negative_sampler.num_negs_per_pos 39.0 +914 19 training.batch_size 2.0 +914 20 model.embedding_dim 0.0 +914 20 model.relation_dim 1.0 +914 20 model.scoring_fct_norm 2.0 +914 20 negative_sampler.num_negs_per_pos 88.0 +914 20 training.batch_size 0.0 +914 21 model.embedding_dim 1.0 +914 21 model.relation_dim 2.0 +914 21 model.scoring_fct_norm 1.0 +914 21 negative_sampler.num_negs_per_pos 24.0 +914 21 training.batch_size 0.0 +914 22 model.embedding_dim 0.0 +914 22 model.relation_dim 1.0 +914 22 model.scoring_fct_norm 2.0 +914 22 negative_sampler.num_negs_per_pos 64.0 +914 22 training.batch_size 1.0 +914 23 model.embedding_dim 2.0 +914 23 model.relation_dim 2.0 +914 23 model.scoring_fct_norm 2.0 +914 23 negative_sampler.num_negs_per_pos 12.0 +914 23 training.batch_size 0.0 +914 24 model.embedding_dim 1.0 +914 24 model.relation_dim 1.0 +914 24 model.scoring_fct_norm 1.0 +914 24 negative_sampler.num_negs_per_pos 31.0 +914 24 training.batch_size 0.0 +914 25 model.embedding_dim 1.0 +914 25 model.relation_dim 2.0 +914 25 model.scoring_fct_norm 1.0 +914 25 negative_sampler.num_negs_per_pos 18.0 +914 25 training.batch_size 2.0 +914 26 model.embedding_dim 2.0 +914 26 model.relation_dim 2.0 +914 26 model.scoring_fct_norm 2.0 +914 26 negative_sampler.num_negs_per_pos 95.0 +914 26 training.batch_size 0.0 +914 27 model.embedding_dim 1.0 +914 27 model.relation_dim 2.0 +914 27 model.scoring_fct_norm 2.0 +914 27 negative_sampler.num_negs_per_pos 90.0 +914 27 training.batch_size 0.0 +914 28 model.embedding_dim 2.0 +914 28 model.relation_dim 1.0 +914 28 model.scoring_fct_norm 1.0 +914 28 negative_sampler.num_negs_per_pos 89.0 +914 28 training.batch_size 2.0 +914 29 model.embedding_dim 2.0 +914 29 model.relation_dim 1.0 +914 29 model.scoring_fct_norm 1.0 +914 29 negative_sampler.num_negs_per_pos 93.0 +914 29 training.batch_size 2.0 +914 30 model.embedding_dim 1.0 +914 30 model.relation_dim 2.0 +914 30 model.scoring_fct_norm 1.0 +914 30 negative_sampler.num_negs_per_pos 96.0 +914 30 training.batch_size 0.0 +914 31 model.embedding_dim 1.0 +914 31 model.relation_dim 0.0 +914 31 model.scoring_fct_norm 1.0 +914 31 negative_sampler.num_negs_per_pos 3.0 +914 31 training.batch_size 1.0 +914 32 model.embedding_dim 2.0 +914 32 model.relation_dim 0.0 +914 32 model.scoring_fct_norm 1.0 +914 32 negative_sampler.num_negs_per_pos 65.0 +914 32 training.batch_size 0.0 +914 33 model.embedding_dim 0.0 +914 33 model.relation_dim 1.0 +914 33 model.scoring_fct_norm 2.0 +914 33 negative_sampler.num_negs_per_pos 83.0 +914 33 training.batch_size 2.0 +914 34 model.embedding_dim 0.0 +914 34 model.relation_dim 0.0 +914 34 model.scoring_fct_norm 1.0 +914 34 negative_sampler.num_negs_per_pos 3.0 +914 34 training.batch_size 2.0 +914 35 model.embedding_dim 1.0 +914 35 model.relation_dim 2.0 +914 35 model.scoring_fct_norm 2.0 +914 35 negative_sampler.num_negs_per_pos 79.0 +914 35 training.batch_size 2.0 +914 36 model.embedding_dim 0.0 +914 36 model.relation_dim 2.0 +914 36 model.scoring_fct_norm 1.0 +914 36 negative_sampler.num_negs_per_pos 69.0 +914 36 training.batch_size 2.0 +914 37 model.embedding_dim 2.0 +914 37 model.relation_dim 1.0 +914 37 model.scoring_fct_norm 2.0 +914 37 negative_sampler.num_negs_per_pos 56.0 +914 37 training.batch_size 0.0 +914 38 model.embedding_dim 0.0 +914 38 model.relation_dim 2.0 +914 38 model.scoring_fct_norm 2.0 +914 38 negative_sampler.num_negs_per_pos 65.0 +914 38 training.batch_size 1.0 +914 39 model.embedding_dim 0.0 +914 39 model.relation_dim 1.0 +914 39 model.scoring_fct_norm 1.0 +914 39 negative_sampler.num_negs_per_pos 70.0 +914 39 training.batch_size 0.0 +914 40 model.embedding_dim 1.0 +914 40 model.relation_dim 2.0 +914 40 model.scoring_fct_norm 1.0 +914 40 negative_sampler.num_negs_per_pos 14.0 +914 40 training.batch_size 0.0 +914 41 model.embedding_dim 1.0 +914 41 model.relation_dim 1.0 +914 41 model.scoring_fct_norm 1.0 +914 41 negative_sampler.num_negs_per_pos 32.0 +914 41 training.batch_size 0.0 +914 42 model.embedding_dim 2.0 +914 42 model.relation_dim 0.0 +914 42 model.scoring_fct_norm 2.0 +914 42 negative_sampler.num_negs_per_pos 88.0 +914 42 training.batch_size 2.0 +914 43 model.embedding_dim 0.0 +914 43 model.relation_dim 1.0 +914 43 model.scoring_fct_norm 1.0 +914 43 negative_sampler.num_negs_per_pos 52.0 +914 43 training.batch_size 1.0 +914 44 model.embedding_dim 0.0 +914 44 model.relation_dim 1.0 +914 44 model.scoring_fct_norm 2.0 +914 44 negative_sampler.num_negs_per_pos 21.0 +914 44 training.batch_size 2.0 +914 45 model.embedding_dim 1.0 +914 45 model.relation_dim 1.0 +914 45 model.scoring_fct_norm 2.0 +914 45 negative_sampler.num_negs_per_pos 67.0 +914 45 training.batch_size 1.0 +914 46 model.embedding_dim 0.0 +914 46 model.relation_dim 2.0 +914 46 model.scoring_fct_norm 2.0 +914 46 negative_sampler.num_negs_per_pos 40.0 +914 46 training.batch_size 2.0 +914 47 model.embedding_dim 1.0 +914 47 model.relation_dim 2.0 +914 47 model.scoring_fct_norm 2.0 +914 47 negative_sampler.num_negs_per_pos 39.0 +914 47 training.batch_size 2.0 +914 48 model.embedding_dim 2.0 +914 48 model.relation_dim 2.0 +914 48 model.scoring_fct_norm 1.0 +914 48 negative_sampler.num_negs_per_pos 66.0 +914 48 training.batch_size 1.0 +914 49 model.embedding_dim 0.0 +914 49 model.relation_dim 0.0 +914 49 model.scoring_fct_norm 1.0 +914 49 negative_sampler.num_negs_per_pos 9.0 +914 49 training.batch_size 0.0 +914 50 model.embedding_dim 0.0 +914 50 model.relation_dim 0.0 +914 50 model.scoring_fct_norm 2.0 +914 50 negative_sampler.num_negs_per_pos 63.0 +914 50 training.batch_size 0.0 +914 51 model.embedding_dim 2.0 +914 51 model.relation_dim 0.0 +914 51 model.scoring_fct_norm 2.0 +914 51 negative_sampler.num_negs_per_pos 28.0 +914 51 training.batch_size 0.0 +914 52 model.embedding_dim 1.0 +914 52 model.relation_dim 2.0 +914 52 model.scoring_fct_norm 2.0 +914 52 negative_sampler.num_negs_per_pos 33.0 +914 52 training.batch_size 0.0 +914 53 model.embedding_dim 2.0 +914 53 model.relation_dim 1.0 +914 53 model.scoring_fct_norm 2.0 +914 53 negative_sampler.num_negs_per_pos 51.0 +914 53 training.batch_size 0.0 +914 54 model.embedding_dim 1.0 +914 54 model.relation_dim 1.0 +914 54 model.scoring_fct_norm 1.0 +914 54 negative_sampler.num_negs_per_pos 68.0 +914 54 training.batch_size 2.0 +914 1 dataset """kinships""" +914 1 model """transr""" +914 1 loss """softplus""" +914 1 regularizer """no""" +914 1 optimizer """adadelta""" +914 1 training_loop """owa""" +914 1 negative_sampler """basic""" +914 1 evaluator """rankbased""" +914 2 dataset """kinships""" +914 2 model """transr""" +914 2 loss """softplus""" +914 2 regularizer """no""" +914 2 optimizer """adadelta""" +914 2 training_loop """owa""" +914 2 negative_sampler """basic""" +914 2 evaluator """rankbased""" +914 3 dataset """kinships""" +914 3 model """transr""" +914 3 loss """softplus""" +914 3 regularizer """no""" +914 3 optimizer """adadelta""" +914 3 training_loop """owa""" +914 3 negative_sampler """basic""" +914 3 evaluator """rankbased""" +914 4 dataset """kinships""" +914 4 model """transr""" +914 4 loss """softplus""" +914 4 regularizer """no""" +914 4 optimizer """adadelta""" +914 4 training_loop """owa""" +914 4 negative_sampler """basic""" +914 4 evaluator """rankbased""" +914 5 dataset """kinships""" +914 5 model """transr""" +914 5 loss """softplus""" +914 5 regularizer """no""" +914 5 optimizer """adadelta""" +914 5 training_loop """owa""" +914 5 negative_sampler """basic""" +914 5 evaluator """rankbased""" +914 6 dataset """kinships""" +914 6 model """transr""" +914 6 loss """softplus""" +914 6 regularizer """no""" +914 6 optimizer """adadelta""" +914 6 training_loop """owa""" +914 6 negative_sampler """basic""" +914 6 evaluator """rankbased""" +914 7 dataset """kinships""" +914 7 model """transr""" +914 7 loss """softplus""" +914 7 regularizer """no""" +914 7 optimizer """adadelta""" +914 7 training_loop """owa""" +914 7 negative_sampler """basic""" +914 7 evaluator """rankbased""" +914 8 dataset """kinships""" +914 8 model """transr""" +914 8 loss """softplus""" +914 8 regularizer """no""" +914 8 optimizer """adadelta""" +914 8 training_loop """owa""" +914 8 negative_sampler """basic""" +914 8 evaluator """rankbased""" +914 9 dataset """kinships""" +914 9 model """transr""" +914 9 loss """softplus""" +914 9 regularizer """no""" +914 9 optimizer """adadelta""" +914 9 training_loop """owa""" +914 9 negative_sampler """basic""" +914 9 evaluator """rankbased""" +914 10 dataset """kinships""" +914 10 model """transr""" +914 10 loss """softplus""" +914 10 regularizer """no""" +914 10 optimizer """adadelta""" +914 10 training_loop """owa""" +914 10 negative_sampler """basic""" +914 10 evaluator """rankbased""" +914 11 dataset """kinships""" +914 11 model """transr""" +914 11 loss """softplus""" +914 11 regularizer """no""" +914 11 optimizer """adadelta""" +914 11 training_loop """owa""" +914 11 negative_sampler """basic""" +914 11 evaluator """rankbased""" +914 12 dataset """kinships""" +914 12 model """transr""" +914 12 loss """softplus""" +914 12 regularizer """no""" +914 12 optimizer """adadelta""" +914 12 training_loop """owa""" +914 12 negative_sampler """basic""" +914 12 evaluator """rankbased""" +914 13 dataset """kinships""" +914 13 model """transr""" +914 13 loss """softplus""" +914 13 regularizer """no""" +914 13 optimizer """adadelta""" +914 13 training_loop """owa""" +914 13 negative_sampler """basic""" +914 13 evaluator """rankbased""" +914 14 dataset """kinships""" +914 14 model """transr""" +914 14 loss """softplus""" +914 14 regularizer """no""" +914 14 optimizer """adadelta""" +914 14 training_loop """owa""" +914 14 negative_sampler """basic""" +914 14 evaluator """rankbased""" +914 15 dataset """kinships""" +914 15 model """transr""" +914 15 loss """softplus""" +914 15 regularizer """no""" +914 15 optimizer """adadelta""" +914 15 training_loop """owa""" +914 15 negative_sampler """basic""" +914 15 evaluator """rankbased""" +914 16 dataset """kinships""" +914 16 model """transr""" +914 16 loss """softplus""" +914 16 regularizer """no""" +914 16 optimizer """adadelta""" +914 16 training_loop """owa""" +914 16 negative_sampler """basic""" +914 16 evaluator """rankbased""" +914 17 dataset """kinships""" +914 17 model """transr""" +914 17 loss """softplus""" +914 17 regularizer """no""" +914 17 optimizer """adadelta""" +914 17 training_loop """owa""" +914 17 negative_sampler """basic""" +914 17 evaluator """rankbased""" +914 18 dataset """kinships""" +914 18 model """transr""" +914 18 loss """softplus""" +914 18 regularizer """no""" +914 18 optimizer """adadelta""" +914 18 training_loop """owa""" +914 18 negative_sampler """basic""" +914 18 evaluator """rankbased""" +914 19 dataset """kinships""" +914 19 model """transr""" +914 19 loss """softplus""" +914 19 regularizer """no""" +914 19 optimizer """adadelta""" +914 19 training_loop """owa""" +914 19 negative_sampler """basic""" +914 19 evaluator """rankbased""" +914 20 dataset """kinships""" +914 20 model """transr""" +914 20 loss """softplus""" +914 20 regularizer """no""" +914 20 optimizer """adadelta""" +914 20 training_loop """owa""" +914 20 negative_sampler """basic""" +914 20 evaluator """rankbased""" +914 21 dataset """kinships""" +914 21 model """transr""" +914 21 loss """softplus""" +914 21 regularizer """no""" +914 21 optimizer """adadelta""" +914 21 training_loop """owa""" +914 21 negative_sampler """basic""" +914 21 evaluator """rankbased""" +914 22 dataset """kinships""" +914 22 model """transr""" +914 22 loss """softplus""" +914 22 regularizer """no""" +914 22 optimizer """adadelta""" +914 22 training_loop """owa""" +914 22 negative_sampler """basic""" +914 22 evaluator """rankbased""" +914 23 dataset """kinships""" +914 23 model """transr""" +914 23 loss """softplus""" +914 23 regularizer """no""" +914 23 optimizer """adadelta""" +914 23 training_loop """owa""" +914 23 negative_sampler """basic""" +914 23 evaluator """rankbased""" +914 24 dataset """kinships""" +914 24 model """transr""" +914 24 loss """softplus""" +914 24 regularizer """no""" +914 24 optimizer """adadelta""" +914 24 training_loop """owa""" +914 24 negative_sampler """basic""" +914 24 evaluator """rankbased""" +914 25 dataset """kinships""" +914 25 model """transr""" +914 25 loss """softplus""" +914 25 regularizer """no""" +914 25 optimizer """adadelta""" +914 25 training_loop """owa""" +914 25 negative_sampler """basic""" +914 25 evaluator """rankbased""" +914 26 dataset """kinships""" +914 26 model """transr""" +914 26 loss """softplus""" +914 26 regularizer """no""" +914 26 optimizer """adadelta""" +914 26 training_loop """owa""" +914 26 negative_sampler """basic""" +914 26 evaluator """rankbased""" +914 27 dataset """kinships""" +914 27 model """transr""" +914 27 loss """softplus""" +914 27 regularizer """no""" +914 27 optimizer """adadelta""" +914 27 training_loop """owa""" +914 27 negative_sampler """basic""" +914 27 evaluator """rankbased""" +914 28 dataset """kinships""" +914 28 model """transr""" +914 28 loss """softplus""" +914 28 regularizer """no""" +914 28 optimizer """adadelta""" +914 28 training_loop """owa""" +914 28 negative_sampler """basic""" +914 28 evaluator """rankbased""" +914 29 dataset """kinships""" +914 29 model """transr""" +914 29 loss """softplus""" +914 29 regularizer """no""" +914 29 optimizer """adadelta""" +914 29 training_loop """owa""" +914 29 negative_sampler """basic""" +914 29 evaluator """rankbased""" +914 30 dataset """kinships""" +914 30 model """transr""" +914 30 loss """softplus""" +914 30 regularizer """no""" +914 30 optimizer """adadelta""" +914 30 training_loop """owa""" +914 30 negative_sampler """basic""" +914 30 evaluator """rankbased""" +914 31 dataset """kinships""" +914 31 model """transr""" +914 31 loss """softplus""" +914 31 regularizer """no""" +914 31 optimizer """adadelta""" +914 31 training_loop """owa""" +914 31 negative_sampler """basic""" +914 31 evaluator """rankbased""" +914 32 dataset """kinships""" +914 32 model """transr""" +914 32 loss """softplus""" +914 32 regularizer """no""" +914 32 optimizer """adadelta""" +914 32 training_loop """owa""" +914 32 negative_sampler """basic""" +914 32 evaluator """rankbased""" +914 33 dataset """kinships""" +914 33 model """transr""" +914 33 loss """softplus""" +914 33 regularizer """no""" +914 33 optimizer """adadelta""" +914 33 training_loop """owa""" +914 33 negative_sampler """basic""" +914 33 evaluator """rankbased""" +914 34 dataset """kinships""" +914 34 model """transr""" +914 34 loss """softplus""" +914 34 regularizer """no""" +914 34 optimizer """adadelta""" +914 34 training_loop """owa""" +914 34 negative_sampler """basic""" +914 34 evaluator """rankbased""" +914 35 dataset """kinships""" +914 35 model """transr""" +914 35 loss """softplus""" +914 35 regularizer """no""" +914 35 optimizer """adadelta""" +914 35 training_loop """owa""" +914 35 negative_sampler """basic""" +914 35 evaluator """rankbased""" +914 36 dataset """kinships""" +914 36 model """transr""" +914 36 loss """softplus""" +914 36 regularizer """no""" +914 36 optimizer """adadelta""" +914 36 training_loop """owa""" +914 36 negative_sampler """basic""" +914 36 evaluator """rankbased""" +914 37 dataset """kinships""" +914 37 model """transr""" +914 37 loss """softplus""" +914 37 regularizer """no""" +914 37 optimizer """adadelta""" +914 37 training_loop """owa""" +914 37 negative_sampler """basic""" +914 37 evaluator """rankbased""" +914 38 dataset """kinships""" +914 38 model """transr""" +914 38 loss """softplus""" +914 38 regularizer """no""" +914 38 optimizer """adadelta""" +914 38 training_loop """owa""" +914 38 negative_sampler """basic""" +914 38 evaluator """rankbased""" +914 39 dataset """kinships""" +914 39 model """transr""" +914 39 loss """softplus""" +914 39 regularizer """no""" +914 39 optimizer """adadelta""" +914 39 training_loop """owa""" +914 39 negative_sampler """basic""" +914 39 evaluator """rankbased""" +914 40 dataset """kinships""" +914 40 model """transr""" +914 40 loss """softplus""" +914 40 regularizer """no""" +914 40 optimizer """adadelta""" +914 40 training_loop """owa""" +914 40 negative_sampler """basic""" +914 40 evaluator """rankbased""" +914 41 dataset """kinships""" +914 41 model """transr""" +914 41 loss """softplus""" +914 41 regularizer """no""" +914 41 optimizer """adadelta""" +914 41 training_loop """owa""" +914 41 negative_sampler """basic""" +914 41 evaluator """rankbased""" +914 42 dataset """kinships""" +914 42 model """transr""" +914 42 loss """softplus""" +914 42 regularizer """no""" +914 42 optimizer """adadelta""" +914 42 training_loop """owa""" +914 42 negative_sampler """basic""" +914 42 evaluator """rankbased""" +914 43 dataset """kinships""" +914 43 model """transr""" +914 43 loss """softplus""" +914 43 regularizer """no""" +914 43 optimizer """adadelta""" +914 43 training_loop """owa""" +914 43 negative_sampler """basic""" +914 43 evaluator """rankbased""" +914 44 dataset """kinships""" +914 44 model """transr""" +914 44 loss """softplus""" +914 44 regularizer """no""" +914 44 optimizer """adadelta""" +914 44 training_loop """owa""" +914 44 negative_sampler """basic""" +914 44 evaluator """rankbased""" +914 45 dataset """kinships""" +914 45 model """transr""" +914 45 loss """softplus""" +914 45 regularizer """no""" +914 45 optimizer """adadelta""" +914 45 training_loop """owa""" +914 45 negative_sampler """basic""" +914 45 evaluator """rankbased""" +914 46 dataset """kinships""" +914 46 model """transr""" +914 46 loss """softplus""" +914 46 regularizer """no""" +914 46 optimizer """adadelta""" +914 46 training_loop """owa""" +914 46 negative_sampler """basic""" +914 46 evaluator """rankbased""" +914 47 dataset """kinships""" +914 47 model """transr""" +914 47 loss """softplus""" +914 47 regularizer """no""" +914 47 optimizer """adadelta""" +914 47 training_loop """owa""" +914 47 negative_sampler """basic""" +914 47 evaluator """rankbased""" +914 48 dataset """kinships""" +914 48 model """transr""" +914 48 loss """softplus""" +914 48 regularizer """no""" +914 48 optimizer """adadelta""" +914 48 training_loop """owa""" +914 48 negative_sampler """basic""" +914 48 evaluator """rankbased""" +914 49 dataset """kinships""" +914 49 model """transr""" +914 49 loss """softplus""" +914 49 regularizer """no""" +914 49 optimizer """adadelta""" +914 49 training_loop """owa""" +914 49 negative_sampler """basic""" +914 49 evaluator """rankbased""" +914 50 dataset """kinships""" +914 50 model """transr""" +914 50 loss """softplus""" +914 50 regularizer """no""" +914 50 optimizer """adadelta""" +914 50 training_loop """owa""" +914 50 negative_sampler """basic""" +914 50 evaluator """rankbased""" +914 51 dataset """kinships""" +914 51 model """transr""" +914 51 loss """softplus""" +914 51 regularizer """no""" +914 51 optimizer """adadelta""" +914 51 training_loop """owa""" +914 51 negative_sampler """basic""" +914 51 evaluator """rankbased""" +914 52 dataset """kinships""" +914 52 model """transr""" +914 52 loss """softplus""" +914 52 regularizer """no""" +914 52 optimizer """adadelta""" +914 52 training_loop """owa""" +914 52 negative_sampler """basic""" +914 52 evaluator """rankbased""" +914 53 dataset """kinships""" +914 53 model """transr""" +914 53 loss """softplus""" +914 53 regularizer """no""" +914 53 optimizer """adadelta""" +914 53 training_loop """owa""" +914 53 negative_sampler """basic""" +914 53 evaluator """rankbased""" +914 54 dataset """kinships""" +914 54 model """transr""" +914 54 loss """softplus""" +914 54 regularizer """no""" +914 54 optimizer """adadelta""" +914 54 training_loop """owa""" +914 54 negative_sampler """basic""" +914 54 evaluator """rankbased""" +915 1 model.embedding_dim 0.0 +915 1 model.relation_dim 2.0 +915 1 model.scoring_fct_norm 2.0 +915 1 negative_sampler.num_negs_per_pos 4.0 +915 1 training.batch_size 0.0 +915 2 model.embedding_dim 0.0 +915 2 model.relation_dim 1.0 +915 2 model.scoring_fct_norm 2.0 +915 2 negative_sampler.num_negs_per_pos 66.0 +915 2 training.batch_size 1.0 +915 3 model.embedding_dim 1.0 +915 3 model.relation_dim 2.0 +915 3 model.scoring_fct_norm 1.0 +915 3 negative_sampler.num_negs_per_pos 13.0 +915 3 training.batch_size 1.0 +915 4 model.embedding_dim 2.0 +915 4 model.relation_dim 1.0 +915 4 model.scoring_fct_norm 2.0 +915 4 negative_sampler.num_negs_per_pos 45.0 +915 4 training.batch_size 0.0 +915 5 model.embedding_dim 0.0 +915 5 model.relation_dim 2.0 +915 5 model.scoring_fct_norm 1.0 +915 5 negative_sampler.num_negs_per_pos 74.0 +915 5 training.batch_size 0.0 +915 6 model.embedding_dim 2.0 +915 6 model.relation_dim 2.0 +915 6 model.scoring_fct_norm 2.0 +915 6 negative_sampler.num_negs_per_pos 23.0 +915 6 training.batch_size 1.0 +915 7 model.embedding_dim 2.0 +915 7 model.relation_dim 2.0 +915 7 model.scoring_fct_norm 2.0 +915 7 negative_sampler.num_negs_per_pos 88.0 +915 7 training.batch_size 1.0 +915 8 model.embedding_dim 1.0 +915 8 model.relation_dim 2.0 +915 8 model.scoring_fct_norm 2.0 +915 8 negative_sampler.num_negs_per_pos 8.0 +915 8 training.batch_size 1.0 +915 9 model.embedding_dim 1.0 +915 9 model.relation_dim 1.0 +915 9 model.scoring_fct_norm 2.0 +915 9 negative_sampler.num_negs_per_pos 1.0 +915 9 training.batch_size 2.0 +915 10 model.embedding_dim 2.0 +915 10 model.relation_dim 1.0 +915 10 model.scoring_fct_norm 2.0 +915 10 negative_sampler.num_negs_per_pos 74.0 +915 10 training.batch_size 0.0 +915 11 model.embedding_dim 2.0 +915 11 model.relation_dim 2.0 +915 11 model.scoring_fct_norm 1.0 +915 11 negative_sampler.num_negs_per_pos 58.0 +915 11 training.batch_size 2.0 +915 12 model.embedding_dim 1.0 +915 12 model.relation_dim 0.0 +915 12 model.scoring_fct_norm 1.0 +915 12 negative_sampler.num_negs_per_pos 33.0 +915 12 training.batch_size 1.0 +915 13 model.embedding_dim 2.0 +915 13 model.relation_dim 2.0 +915 13 model.scoring_fct_norm 1.0 +915 13 negative_sampler.num_negs_per_pos 79.0 +915 13 training.batch_size 1.0 +915 14 model.embedding_dim 0.0 +915 14 model.relation_dim 1.0 +915 14 model.scoring_fct_norm 2.0 +915 14 negative_sampler.num_negs_per_pos 22.0 +915 14 training.batch_size 1.0 +915 15 model.embedding_dim 1.0 +915 15 model.relation_dim 0.0 +915 15 model.scoring_fct_norm 1.0 +915 15 negative_sampler.num_negs_per_pos 20.0 +915 15 training.batch_size 0.0 +915 16 model.embedding_dim 1.0 +915 16 model.relation_dim 2.0 +915 16 model.scoring_fct_norm 1.0 +915 16 negative_sampler.num_negs_per_pos 62.0 +915 16 training.batch_size 0.0 +915 17 model.embedding_dim 2.0 +915 17 model.relation_dim 1.0 +915 17 model.scoring_fct_norm 1.0 +915 17 negative_sampler.num_negs_per_pos 16.0 +915 17 training.batch_size 2.0 +915 18 model.embedding_dim 1.0 +915 18 model.relation_dim 1.0 +915 18 model.scoring_fct_norm 1.0 +915 18 negative_sampler.num_negs_per_pos 30.0 +915 18 training.batch_size 1.0 +915 19 model.embedding_dim 0.0 +915 19 model.relation_dim 1.0 +915 19 model.scoring_fct_norm 1.0 +915 19 negative_sampler.num_negs_per_pos 63.0 +915 19 training.batch_size 2.0 +915 20 model.embedding_dim 0.0 +915 20 model.relation_dim 2.0 +915 20 model.scoring_fct_norm 1.0 +915 20 negative_sampler.num_negs_per_pos 55.0 +915 20 training.batch_size 1.0 +915 21 model.embedding_dim 2.0 +915 21 model.relation_dim 1.0 +915 21 model.scoring_fct_norm 2.0 +915 21 negative_sampler.num_negs_per_pos 25.0 +915 21 training.batch_size 0.0 +915 22 model.embedding_dim 2.0 +915 22 model.relation_dim 1.0 +915 22 model.scoring_fct_norm 1.0 +915 22 negative_sampler.num_negs_per_pos 59.0 +915 22 training.batch_size 2.0 +915 23 model.embedding_dim 1.0 +915 23 model.relation_dim 2.0 +915 23 model.scoring_fct_norm 2.0 +915 23 negative_sampler.num_negs_per_pos 34.0 +915 23 training.batch_size 2.0 +915 24 model.embedding_dim 1.0 +915 24 model.relation_dim 2.0 +915 24 model.scoring_fct_norm 2.0 +915 24 negative_sampler.num_negs_per_pos 94.0 +915 24 training.batch_size 0.0 +915 25 model.embedding_dim 2.0 +915 25 model.relation_dim 2.0 +915 25 model.scoring_fct_norm 2.0 +915 25 negative_sampler.num_negs_per_pos 75.0 +915 25 training.batch_size 2.0 +915 26 model.embedding_dim 2.0 +915 26 model.relation_dim 1.0 +915 26 model.scoring_fct_norm 1.0 +915 26 negative_sampler.num_negs_per_pos 17.0 +915 26 training.batch_size 0.0 +915 27 model.embedding_dim 0.0 +915 27 model.relation_dim 0.0 +915 27 model.scoring_fct_norm 2.0 +915 27 negative_sampler.num_negs_per_pos 22.0 +915 27 training.batch_size 1.0 +915 28 model.embedding_dim 1.0 +915 28 model.relation_dim 2.0 +915 28 model.scoring_fct_norm 1.0 +915 28 negative_sampler.num_negs_per_pos 59.0 +915 28 training.batch_size 0.0 +915 29 model.embedding_dim 0.0 +915 29 model.relation_dim 1.0 +915 29 model.scoring_fct_norm 1.0 +915 29 negative_sampler.num_negs_per_pos 96.0 +915 29 training.batch_size 0.0 +915 30 model.embedding_dim 0.0 +915 30 model.relation_dim 0.0 +915 30 model.scoring_fct_norm 2.0 +915 30 negative_sampler.num_negs_per_pos 62.0 +915 30 training.batch_size 1.0 +915 31 model.embedding_dim 0.0 +915 31 model.relation_dim 0.0 +915 31 model.scoring_fct_norm 1.0 +915 31 negative_sampler.num_negs_per_pos 16.0 +915 31 training.batch_size 2.0 +915 32 model.embedding_dim 0.0 +915 32 model.relation_dim 2.0 +915 32 model.scoring_fct_norm 1.0 +915 32 negative_sampler.num_negs_per_pos 91.0 +915 32 training.batch_size 0.0 +915 33 model.embedding_dim 0.0 +915 33 model.relation_dim 1.0 +915 33 model.scoring_fct_norm 1.0 +915 33 negative_sampler.num_negs_per_pos 86.0 +915 33 training.batch_size 0.0 +915 34 model.embedding_dim 0.0 +915 34 model.relation_dim 1.0 +915 34 model.scoring_fct_norm 1.0 +915 34 negative_sampler.num_negs_per_pos 27.0 +915 34 training.batch_size 2.0 +915 35 model.embedding_dim 1.0 +915 35 model.relation_dim 1.0 +915 35 model.scoring_fct_norm 2.0 +915 35 negative_sampler.num_negs_per_pos 57.0 +915 35 training.batch_size 2.0 +915 36 model.embedding_dim 2.0 +915 36 model.relation_dim 2.0 +915 36 model.scoring_fct_norm 1.0 +915 36 negative_sampler.num_negs_per_pos 38.0 +915 36 training.batch_size 2.0 +915 37 model.embedding_dim 2.0 +915 37 model.relation_dim 1.0 +915 37 model.scoring_fct_norm 1.0 +915 37 negative_sampler.num_negs_per_pos 95.0 +915 37 training.batch_size 0.0 +915 38 model.embedding_dim 0.0 +915 38 model.relation_dim 1.0 +915 38 model.scoring_fct_norm 1.0 +915 38 negative_sampler.num_negs_per_pos 90.0 +915 38 training.batch_size 2.0 +915 39 model.embedding_dim 2.0 +915 39 model.relation_dim 1.0 +915 39 model.scoring_fct_norm 1.0 +915 39 negative_sampler.num_negs_per_pos 42.0 +915 39 training.batch_size 0.0 +915 40 model.embedding_dim 1.0 +915 40 model.relation_dim 0.0 +915 40 model.scoring_fct_norm 1.0 +915 40 negative_sampler.num_negs_per_pos 2.0 +915 40 training.batch_size 2.0 +915 41 model.embedding_dim 1.0 +915 41 model.relation_dim 2.0 +915 41 model.scoring_fct_norm 2.0 +915 41 negative_sampler.num_negs_per_pos 24.0 +915 41 training.batch_size 0.0 +915 42 model.embedding_dim 1.0 +915 42 model.relation_dim 0.0 +915 42 model.scoring_fct_norm 2.0 +915 42 negative_sampler.num_negs_per_pos 82.0 +915 42 training.batch_size 2.0 +915 43 model.embedding_dim 2.0 +915 43 model.relation_dim 0.0 +915 43 model.scoring_fct_norm 1.0 +915 43 negative_sampler.num_negs_per_pos 83.0 +915 43 training.batch_size 2.0 +915 44 model.embedding_dim 0.0 +915 44 model.relation_dim 1.0 +915 44 model.scoring_fct_norm 2.0 +915 44 negative_sampler.num_negs_per_pos 59.0 +915 44 training.batch_size 0.0 +915 45 model.embedding_dim 0.0 +915 45 model.relation_dim 0.0 +915 45 model.scoring_fct_norm 2.0 +915 45 negative_sampler.num_negs_per_pos 25.0 +915 45 training.batch_size 0.0 +915 46 model.embedding_dim 2.0 +915 46 model.relation_dim 2.0 +915 46 model.scoring_fct_norm 2.0 +915 46 negative_sampler.num_negs_per_pos 12.0 +915 46 training.batch_size 2.0 +915 47 model.embedding_dim 1.0 +915 47 model.relation_dim 2.0 +915 47 model.scoring_fct_norm 1.0 +915 47 negative_sampler.num_negs_per_pos 66.0 +915 47 training.batch_size 1.0 +915 48 model.embedding_dim 2.0 +915 48 model.relation_dim 0.0 +915 48 model.scoring_fct_norm 2.0 +915 48 negative_sampler.num_negs_per_pos 33.0 +915 48 training.batch_size 1.0 +915 49 model.embedding_dim 2.0 +915 49 model.relation_dim 1.0 +915 49 model.scoring_fct_norm 2.0 +915 49 negative_sampler.num_negs_per_pos 30.0 +915 49 training.batch_size 1.0 +915 50 model.embedding_dim 0.0 +915 50 model.relation_dim 1.0 +915 50 model.scoring_fct_norm 1.0 +915 50 negative_sampler.num_negs_per_pos 28.0 +915 50 training.batch_size 1.0 +915 51 model.embedding_dim 1.0 +915 51 model.relation_dim 1.0 +915 51 model.scoring_fct_norm 1.0 +915 51 negative_sampler.num_negs_per_pos 30.0 +915 51 training.batch_size 1.0 +915 52 model.embedding_dim 0.0 +915 52 model.relation_dim 2.0 +915 52 model.scoring_fct_norm 2.0 +915 52 negative_sampler.num_negs_per_pos 30.0 +915 52 training.batch_size 0.0 +915 53 model.embedding_dim 2.0 +915 53 model.relation_dim 2.0 +915 53 model.scoring_fct_norm 1.0 +915 53 negative_sampler.num_negs_per_pos 72.0 +915 53 training.batch_size 2.0 +915 54 model.embedding_dim 1.0 +915 54 model.relation_dim 1.0 +915 54 model.scoring_fct_norm 1.0 +915 54 negative_sampler.num_negs_per_pos 97.0 +915 54 training.batch_size 2.0 +915 55 model.embedding_dim 1.0 +915 55 model.relation_dim 2.0 +915 55 model.scoring_fct_norm 1.0 +915 55 negative_sampler.num_negs_per_pos 10.0 +915 55 training.batch_size 0.0 +915 56 model.embedding_dim 1.0 +915 56 model.relation_dim 0.0 +915 56 model.scoring_fct_norm 2.0 +915 56 negative_sampler.num_negs_per_pos 39.0 +915 56 training.batch_size 0.0 +915 57 model.embedding_dim 1.0 +915 57 model.relation_dim 2.0 +915 57 model.scoring_fct_norm 2.0 +915 57 negative_sampler.num_negs_per_pos 10.0 +915 57 training.batch_size 1.0 +915 58 model.embedding_dim 2.0 +915 58 model.relation_dim 2.0 +915 58 model.scoring_fct_norm 1.0 +915 58 negative_sampler.num_negs_per_pos 20.0 +915 58 training.batch_size 1.0 +915 59 model.embedding_dim 0.0 +915 59 model.relation_dim 2.0 +915 59 model.scoring_fct_norm 1.0 +915 59 negative_sampler.num_negs_per_pos 52.0 +915 59 training.batch_size 1.0 +915 60 model.embedding_dim 2.0 +915 60 model.relation_dim 0.0 +915 60 model.scoring_fct_norm 2.0 +915 60 negative_sampler.num_negs_per_pos 48.0 +915 60 training.batch_size 0.0 +915 61 model.embedding_dim 0.0 +915 61 model.relation_dim 2.0 +915 61 model.scoring_fct_norm 1.0 +915 61 negative_sampler.num_negs_per_pos 95.0 +915 61 training.batch_size 1.0 +915 62 model.embedding_dim 0.0 +915 62 model.relation_dim 0.0 +915 62 model.scoring_fct_norm 2.0 +915 62 negative_sampler.num_negs_per_pos 53.0 +915 62 training.batch_size 2.0 +915 63 model.embedding_dim 0.0 +915 63 model.relation_dim 1.0 +915 63 model.scoring_fct_norm 2.0 +915 63 negative_sampler.num_negs_per_pos 29.0 +915 63 training.batch_size 1.0 +915 64 model.embedding_dim 2.0 +915 64 model.relation_dim 1.0 +915 64 model.scoring_fct_norm 1.0 +915 64 negative_sampler.num_negs_per_pos 53.0 +915 64 training.batch_size 1.0 +915 65 model.embedding_dim 1.0 +915 65 model.relation_dim 1.0 +915 65 model.scoring_fct_norm 2.0 +915 65 negative_sampler.num_negs_per_pos 6.0 +915 65 training.batch_size 0.0 +915 66 model.embedding_dim 1.0 +915 66 model.relation_dim 1.0 +915 66 model.scoring_fct_norm 2.0 +915 66 negative_sampler.num_negs_per_pos 60.0 +915 66 training.batch_size 2.0 +915 67 model.embedding_dim 1.0 +915 67 model.relation_dim 0.0 +915 67 model.scoring_fct_norm 2.0 +915 67 negative_sampler.num_negs_per_pos 33.0 +915 67 training.batch_size 2.0 +915 68 model.embedding_dim 1.0 +915 68 model.relation_dim 1.0 +915 68 model.scoring_fct_norm 2.0 +915 68 negative_sampler.num_negs_per_pos 71.0 +915 68 training.batch_size 1.0 +915 69 model.embedding_dim 1.0 +915 69 model.relation_dim 2.0 +915 69 model.scoring_fct_norm 2.0 +915 69 negative_sampler.num_negs_per_pos 67.0 +915 69 training.batch_size 2.0 +915 70 model.embedding_dim 2.0 +915 70 model.relation_dim 1.0 +915 70 model.scoring_fct_norm 1.0 +915 70 negative_sampler.num_negs_per_pos 56.0 +915 70 training.batch_size 2.0 +915 71 model.embedding_dim 0.0 +915 71 model.relation_dim 0.0 +915 71 model.scoring_fct_norm 1.0 +915 71 negative_sampler.num_negs_per_pos 58.0 +915 71 training.batch_size 2.0 +915 72 model.embedding_dim 0.0 +915 72 model.relation_dim 0.0 +915 72 model.scoring_fct_norm 2.0 +915 72 negative_sampler.num_negs_per_pos 64.0 +915 72 training.batch_size 2.0 +915 73 model.embedding_dim 2.0 +915 73 model.relation_dim 2.0 +915 73 model.scoring_fct_norm 1.0 +915 73 negative_sampler.num_negs_per_pos 26.0 +915 73 training.batch_size 2.0 +915 74 model.embedding_dim 0.0 +915 74 model.relation_dim 1.0 +915 74 model.scoring_fct_norm 1.0 +915 74 negative_sampler.num_negs_per_pos 11.0 +915 74 training.batch_size 2.0 +915 75 model.embedding_dim 1.0 +915 75 model.relation_dim 1.0 +915 75 model.scoring_fct_norm 1.0 +915 75 negative_sampler.num_negs_per_pos 15.0 +915 75 training.batch_size 0.0 +915 76 model.embedding_dim 0.0 +915 76 model.relation_dim 0.0 +915 76 model.scoring_fct_norm 2.0 +915 76 negative_sampler.num_negs_per_pos 20.0 +915 76 training.batch_size 2.0 +915 77 model.embedding_dim 0.0 +915 77 model.relation_dim 0.0 +915 77 model.scoring_fct_norm 1.0 +915 77 negative_sampler.num_negs_per_pos 51.0 +915 77 training.batch_size 1.0 +915 78 model.embedding_dim 0.0 +915 78 model.relation_dim 1.0 +915 78 model.scoring_fct_norm 2.0 +915 78 negative_sampler.num_negs_per_pos 50.0 +915 78 training.batch_size 2.0 +915 79 model.embedding_dim 0.0 +915 79 model.relation_dim 2.0 +915 79 model.scoring_fct_norm 2.0 +915 79 negative_sampler.num_negs_per_pos 27.0 +915 79 training.batch_size 0.0 +915 80 model.embedding_dim 0.0 +915 80 model.relation_dim 0.0 +915 80 model.scoring_fct_norm 2.0 +915 80 negative_sampler.num_negs_per_pos 43.0 +915 80 training.batch_size 1.0 +915 81 model.embedding_dim 1.0 +915 81 model.relation_dim 2.0 +915 81 model.scoring_fct_norm 1.0 +915 81 negative_sampler.num_negs_per_pos 52.0 +915 81 training.batch_size 0.0 +915 82 model.embedding_dim 1.0 +915 82 model.relation_dim 1.0 +915 82 model.scoring_fct_norm 2.0 +915 82 negative_sampler.num_negs_per_pos 2.0 +915 82 training.batch_size 0.0 +915 83 model.embedding_dim 0.0 +915 83 model.relation_dim 0.0 +915 83 model.scoring_fct_norm 2.0 +915 83 negative_sampler.num_negs_per_pos 98.0 +915 83 training.batch_size 0.0 +915 84 model.embedding_dim 1.0 +915 84 model.relation_dim 0.0 +915 84 model.scoring_fct_norm 2.0 +915 84 negative_sampler.num_negs_per_pos 46.0 +915 84 training.batch_size 2.0 +915 85 model.embedding_dim 0.0 +915 85 model.relation_dim 0.0 +915 85 model.scoring_fct_norm 2.0 +915 85 negative_sampler.num_negs_per_pos 80.0 +915 85 training.batch_size 1.0 +915 86 model.embedding_dim 0.0 +915 86 model.relation_dim 2.0 +915 86 model.scoring_fct_norm 1.0 +915 86 negative_sampler.num_negs_per_pos 31.0 +915 86 training.batch_size 1.0 +915 87 model.embedding_dim 0.0 +915 87 model.relation_dim 1.0 +915 87 model.scoring_fct_norm 2.0 +915 87 negative_sampler.num_negs_per_pos 49.0 +915 87 training.batch_size 2.0 +915 88 model.embedding_dim 0.0 +915 88 model.relation_dim 2.0 +915 88 model.scoring_fct_norm 2.0 +915 88 negative_sampler.num_negs_per_pos 99.0 +915 88 training.batch_size 2.0 +915 89 model.embedding_dim 2.0 +915 89 model.relation_dim 0.0 +915 89 model.scoring_fct_norm 2.0 +915 89 negative_sampler.num_negs_per_pos 2.0 +915 89 training.batch_size 0.0 +915 90 model.embedding_dim 2.0 +915 90 model.relation_dim 1.0 +915 90 model.scoring_fct_norm 1.0 +915 90 negative_sampler.num_negs_per_pos 87.0 +915 90 training.batch_size 2.0 +915 91 model.embedding_dim 1.0 +915 91 model.relation_dim 1.0 +915 91 model.scoring_fct_norm 1.0 +915 91 negative_sampler.num_negs_per_pos 85.0 +915 91 training.batch_size 0.0 +915 92 model.embedding_dim 2.0 +915 92 model.relation_dim 1.0 +915 92 model.scoring_fct_norm 2.0 +915 92 negative_sampler.num_negs_per_pos 25.0 +915 92 training.batch_size 2.0 +915 93 model.embedding_dim 0.0 +915 93 model.relation_dim 0.0 +915 93 model.scoring_fct_norm 2.0 +915 93 negative_sampler.num_negs_per_pos 64.0 +915 93 training.batch_size 0.0 +915 94 model.embedding_dim 0.0 +915 94 model.relation_dim 1.0 +915 94 model.scoring_fct_norm 2.0 +915 94 negative_sampler.num_negs_per_pos 87.0 +915 94 training.batch_size 1.0 +915 95 model.embedding_dim 1.0 +915 95 model.relation_dim 1.0 +915 95 model.scoring_fct_norm 1.0 +915 95 negative_sampler.num_negs_per_pos 99.0 +915 95 training.batch_size 0.0 +915 96 model.embedding_dim 2.0 +915 96 model.relation_dim 1.0 +915 96 model.scoring_fct_norm 2.0 +915 96 negative_sampler.num_negs_per_pos 12.0 +915 96 training.batch_size 1.0 +915 97 model.embedding_dim 1.0 +915 97 model.relation_dim 1.0 +915 97 model.scoring_fct_norm 1.0 +915 97 negative_sampler.num_negs_per_pos 99.0 +915 97 training.batch_size 0.0 +915 98 model.embedding_dim 0.0 +915 98 model.relation_dim 2.0 +915 98 model.scoring_fct_norm 2.0 +915 98 negative_sampler.num_negs_per_pos 89.0 +915 98 training.batch_size 2.0 +915 99 model.embedding_dim 2.0 +915 99 model.relation_dim 1.0 +915 99 model.scoring_fct_norm 1.0 +915 99 negative_sampler.num_negs_per_pos 99.0 +915 99 training.batch_size 1.0 +915 100 model.embedding_dim 0.0 +915 100 model.relation_dim 1.0 +915 100 model.scoring_fct_norm 2.0 +915 100 negative_sampler.num_negs_per_pos 91.0 +915 100 training.batch_size 2.0 +915 1 dataset """kinships""" +915 1 model """transr""" +915 1 loss """bceaftersigmoid""" +915 1 regularizer """no""" +915 1 optimizer """adadelta""" +915 1 training_loop """owa""" +915 1 negative_sampler """basic""" +915 1 evaluator """rankbased""" +915 2 dataset """kinships""" +915 2 model """transr""" +915 2 loss """bceaftersigmoid""" +915 2 regularizer """no""" +915 2 optimizer """adadelta""" +915 2 training_loop """owa""" +915 2 negative_sampler """basic""" +915 2 evaluator """rankbased""" +915 3 dataset """kinships""" +915 3 model """transr""" +915 3 loss """bceaftersigmoid""" +915 3 regularizer """no""" +915 3 optimizer """adadelta""" +915 3 training_loop """owa""" +915 3 negative_sampler """basic""" +915 3 evaluator """rankbased""" +915 4 dataset """kinships""" +915 4 model """transr""" +915 4 loss """bceaftersigmoid""" +915 4 regularizer """no""" +915 4 optimizer """adadelta""" +915 4 training_loop """owa""" +915 4 negative_sampler """basic""" +915 4 evaluator """rankbased""" +915 5 dataset """kinships""" +915 5 model """transr""" +915 5 loss """bceaftersigmoid""" +915 5 regularizer """no""" +915 5 optimizer """adadelta""" +915 5 training_loop """owa""" +915 5 negative_sampler """basic""" +915 5 evaluator """rankbased""" +915 6 dataset """kinships""" +915 6 model """transr""" +915 6 loss """bceaftersigmoid""" +915 6 regularizer """no""" +915 6 optimizer """adadelta""" +915 6 training_loop """owa""" +915 6 negative_sampler """basic""" +915 6 evaluator """rankbased""" +915 7 dataset """kinships""" +915 7 model """transr""" +915 7 loss """bceaftersigmoid""" +915 7 regularizer """no""" +915 7 optimizer """adadelta""" +915 7 training_loop """owa""" +915 7 negative_sampler """basic""" +915 7 evaluator """rankbased""" +915 8 dataset """kinships""" +915 8 model """transr""" +915 8 loss """bceaftersigmoid""" +915 8 regularizer """no""" +915 8 optimizer """adadelta""" +915 8 training_loop """owa""" +915 8 negative_sampler """basic""" +915 8 evaluator """rankbased""" +915 9 dataset """kinships""" +915 9 model """transr""" +915 9 loss """bceaftersigmoid""" +915 9 regularizer """no""" +915 9 optimizer """adadelta""" +915 9 training_loop """owa""" +915 9 negative_sampler """basic""" +915 9 evaluator """rankbased""" +915 10 dataset """kinships""" +915 10 model """transr""" +915 10 loss """bceaftersigmoid""" +915 10 regularizer """no""" +915 10 optimizer """adadelta""" +915 10 training_loop """owa""" +915 10 negative_sampler """basic""" +915 10 evaluator """rankbased""" +915 11 dataset """kinships""" +915 11 model """transr""" +915 11 loss """bceaftersigmoid""" +915 11 regularizer """no""" +915 11 optimizer """adadelta""" +915 11 training_loop """owa""" +915 11 negative_sampler """basic""" +915 11 evaluator """rankbased""" +915 12 dataset """kinships""" +915 12 model """transr""" +915 12 loss """bceaftersigmoid""" +915 12 regularizer """no""" +915 12 optimizer """adadelta""" +915 12 training_loop """owa""" +915 12 negative_sampler """basic""" +915 12 evaluator """rankbased""" +915 13 dataset """kinships""" +915 13 model """transr""" +915 13 loss """bceaftersigmoid""" +915 13 regularizer """no""" +915 13 optimizer """adadelta""" +915 13 training_loop """owa""" +915 13 negative_sampler """basic""" +915 13 evaluator """rankbased""" +915 14 dataset """kinships""" +915 14 model """transr""" +915 14 loss """bceaftersigmoid""" +915 14 regularizer """no""" +915 14 optimizer """adadelta""" +915 14 training_loop """owa""" +915 14 negative_sampler """basic""" +915 14 evaluator """rankbased""" +915 15 dataset """kinships""" +915 15 model """transr""" +915 15 loss """bceaftersigmoid""" +915 15 regularizer """no""" +915 15 optimizer """adadelta""" +915 15 training_loop """owa""" +915 15 negative_sampler """basic""" +915 15 evaluator """rankbased""" +915 16 dataset """kinships""" +915 16 model """transr""" +915 16 loss """bceaftersigmoid""" +915 16 regularizer """no""" +915 16 optimizer """adadelta""" +915 16 training_loop """owa""" +915 16 negative_sampler """basic""" +915 16 evaluator """rankbased""" +915 17 dataset """kinships""" +915 17 model """transr""" +915 17 loss """bceaftersigmoid""" +915 17 regularizer """no""" +915 17 optimizer """adadelta""" +915 17 training_loop """owa""" +915 17 negative_sampler """basic""" +915 17 evaluator """rankbased""" +915 18 dataset """kinships""" +915 18 model """transr""" +915 18 loss """bceaftersigmoid""" +915 18 regularizer """no""" +915 18 optimizer """adadelta""" +915 18 training_loop """owa""" +915 18 negative_sampler """basic""" +915 18 evaluator """rankbased""" +915 19 dataset """kinships""" +915 19 model """transr""" +915 19 loss """bceaftersigmoid""" +915 19 regularizer """no""" +915 19 optimizer """adadelta""" +915 19 training_loop """owa""" +915 19 negative_sampler """basic""" +915 19 evaluator """rankbased""" +915 20 dataset """kinships""" +915 20 model """transr""" +915 20 loss """bceaftersigmoid""" +915 20 regularizer """no""" +915 20 optimizer """adadelta""" +915 20 training_loop """owa""" +915 20 negative_sampler """basic""" +915 20 evaluator """rankbased""" +915 21 dataset """kinships""" +915 21 model """transr""" +915 21 loss """bceaftersigmoid""" +915 21 regularizer """no""" +915 21 optimizer """adadelta""" +915 21 training_loop """owa""" +915 21 negative_sampler """basic""" +915 21 evaluator """rankbased""" +915 22 dataset """kinships""" +915 22 model """transr""" +915 22 loss """bceaftersigmoid""" +915 22 regularizer """no""" +915 22 optimizer """adadelta""" +915 22 training_loop """owa""" +915 22 negative_sampler """basic""" +915 22 evaluator """rankbased""" +915 23 dataset """kinships""" +915 23 model """transr""" +915 23 loss """bceaftersigmoid""" +915 23 regularizer """no""" +915 23 optimizer """adadelta""" +915 23 training_loop """owa""" +915 23 negative_sampler """basic""" +915 23 evaluator """rankbased""" +915 24 dataset """kinships""" +915 24 model """transr""" +915 24 loss """bceaftersigmoid""" +915 24 regularizer """no""" +915 24 optimizer """adadelta""" +915 24 training_loop """owa""" +915 24 negative_sampler """basic""" +915 24 evaluator """rankbased""" +915 25 dataset """kinships""" +915 25 model """transr""" +915 25 loss """bceaftersigmoid""" +915 25 regularizer """no""" +915 25 optimizer """adadelta""" +915 25 training_loop """owa""" +915 25 negative_sampler """basic""" +915 25 evaluator """rankbased""" +915 26 dataset """kinships""" +915 26 model """transr""" +915 26 loss """bceaftersigmoid""" +915 26 regularizer """no""" +915 26 optimizer """adadelta""" +915 26 training_loop """owa""" +915 26 negative_sampler """basic""" +915 26 evaluator """rankbased""" +915 27 dataset """kinships""" +915 27 model """transr""" +915 27 loss """bceaftersigmoid""" +915 27 regularizer """no""" +915 27 optimizer """adadelta""" +915 27 training_loop """owa""" +915 27 negative_sampler """basic""" +915 27 evaluator """rankbased""" +915 28 dataset """kinships""" +915 28 model """transr""" +915 28 loss """bceaftersigmoid""" +915 28 regularizer """no""" +915 28 optimizer """adadelta""" +915 28 training_loop """owa""" +915 28 negative_sampler """basic""" +915 28 evaluator """rankbased""" +915 29 dataset """kinships""" +915 29 model """transr""" +915 29 loss """bceaftersigmoid""" +915 29 regularizer """no""" +915 29 optimizer """adadelta""" +915 29 training_loop """owa""" +915 29 negative_sampler """basic""" +915 29 evaluator """rankbased""" +915 30 dataset """kinships""" +915 30 model """transr""" +915 30 loss """bceaftersigmoid""" +915 30 regularizer """no""" +915 30 optimizer """adadelta""" +915 30 training_loop """owa""" +915 30 negative_sampler """basic""" +915 30 evaluator """rankbased""" +915 31 dataset """kinships""" +915 31 model """transr""" +915 31 loss """bceaftersigmoid""" +915 31 regularizer """no""" +915 31 optimizer """adadelta""" +915 31 training_loop """owa""" +915 31 negative_sampler """basic""" +915 31 evaluator """rankbased""" +915 32 dataset """kinships""" +915 32 model """transr""" +915 32 loss """bceaftersigmoid""" +915 32 regularizer """no""" +915 32 optimizer """adadelta""" +915 32 training_loop """owa""" +915 32 negative_sampler """basic""" +915 32 evaluator """rankbased""" +915 33 dataset """kinships""" +915 33 model """transr""" +915 33 loss """bceaftersigmoid""" +915 33 regularizer """no""" +915 33 optimizer """adadelta""" +915 33 training_loop """owa""" +915 33 negative_sampler """basic""" +915 33 evaluator """rankbased""" +915 34 dataset """kinships""" +915 34 model """transr""" +915 34 loss """bceaftersigmoid""" +915 34 regularizer """no""" +915 34 optimizer """adadelta""" +915 34 training_loop """owa""" +915 34 negative_sampler """basic""" +915 34 evaluator """rankbased""" +915 35 dataset """kinships""" +915 35 model """transr""" +915 35 loss """bceaftersigmoid""" +915 35 regularizer """no""" +915 35 optimizer """adadelta""" +915 35 training_loop """owa""" +915 35 negative_sampler """basic""" +915 35 evaluator """rankbased""" +915 36 dataset """kinships""" +915 36 model """transr""" +915 36 loss """bceaftersigmoid""" +915 36 regularizer """no""" +915 36 optimizer """adadelta""" +915 36 training_loop """owa""" +915 36 negative_sampler """basic""" +915 36 evaluator """rankbased""" +915 37 dataset """kinships""" +915 37 model """transr""" +915 37 loss """bceaftersigmoid""" +915 37 regularizer """no""" +915 37 optimizer """adadelta""" +915 37 training_loop """owa""" +915 37 negative_sampler """basic""" +915 37 evaluator """rankbased""" +915 38 dataset """kinships""" +915 38 model """transr""" +915 38 loss """bceaftersigmoid""" +915 38 regularizer """no""" +915 38 optimizer """adadelta""" +915 38 training_loop """owa""" +915 38 negative_sampler """basic""" +915 38 evaluator """rankbased""" +915 39 dataset """kinships""" +915 39 model """transr""" +915 39 loss """bceaftersigmoid""" +915 39 regularizer """no""" +915 39 optimizer """adadelta""" +915 39 training_loop """owa""" +915 39 negative_sampler """basic""" +915 39 evaluator """rankbased""" +915 40 dataset """kinships""" +915 40 model """transr""" +915 40 loss """bceaftersigmoid""" +915 40 regularizer """no""" +915 40 optimizer """adadelta""" +915 40 training_loop """owa""" +915 40 negative_sampler """basic""" +915 40 evaluator """rankbased""" +915 41 dataset """kinships""" +915 41 model """transr""" +915 41 loss """bceaftersigmoid""" +915 41 regularizer """no""" +915 41 optimizer """adadelta""" +915 41 training_loop """owa""" +915 41 negative_sampler """basic""" +915 41 evaluator """rankbased""" +915 42 dataset """kinships""" +915 42 model """transr""" +915 42 loss """bceaftersigmoid""" +915 42 regularizer """no""" +915 42 optimizer """adadelta""" +915 42 training_loop """owa""" +915 42 negative_sampler """basic""" +915 42 evaluator """rankbased""" +915 43 dataset """kinships""" +915 43 model """transr""" +915 43 loss """bceaftersigmoid""" +915 43 regularizer """no""" +915 43 optimizer """adadelta""" +915 43 training_loop """owa""" +915 43 negative_sampler """basic""" +915 43 evaluator """rankbased""" +915 44 dataset """kinships""" +915 44 model """transr""" +915 44 loss """bceaftersigmoid""" +915 44 regularizer """no""" +915 44 optimizer """adadelta""" +915 44 training_loop """owa""" +915 44 negative_sampler """basic""" +915 44 evaluator """rankbased""" +915 45 dataset """kinships""" +915 45 model """transr""" +915 45 loss """bceaftersigmoid""" +915 45 regularizer """no""" +915 45 optimizer """adadelta""" +915 45 training_loop """owa""" +915 45 negative_sampler """basic""" +915 45 evaluator """rankbased""" +915 46 dataset """kinships""" +915 46 model """transr""" +915 46 loss """bceaftersigmoid""" +915 46 regularizer """no""" +915 46 optimizer """adadelta""" +915 46 training_loop """owa""" +915 46 negative_sampler """basic""" +915 46 evaluator """rankbased""" +915 47 dataset """kinships""" +915 47 model """transr""" +915 47 loss """bceaftersigmoid""" +915 47 regularizer """no""" +915 47 optimizer """adadelta""" +915 47 training_loop """owa""" +915 47 negative_sampler """basic""" +915 47 evaluator """rankbased""" +915 48 dataset """kinships""" +915 48 model """transr""" +915 48 loss """bceaftersigmoid""" +915 48 regularizer """no""" +915 48 optimizer """adadelta""" +915 48 training_loop """owa""" +915 48 negative_sampler """basic""" +915 48 evaluator """rankbased""" +915 49 dataset """kinships""" +915 49 model """transr""" +915 49 loss """bceaftersigmoid""" +915 49 regularizer """no""" +915 49 optimizer """adadelta""" +915 49 training_loop """owa""" +915 49 negative_sampler """basic""" +915 49 evaluator """rankbased""" +915 50 dataset """kinships""" +915 50 model """transr""" +915 50 loss """bceaftersigmoid""" +915 50 regularizer """no""" +915 50 optimizer """adadelta""" +915 50 training_loop """owa""" +915 50 negative_sampler """basic""" +915 50 evaluator """rankbased""" +915 51 dataset """kinships""" +915 51 model """transr""" +915 51 loss """bceaftersigmoid""" +915 51 regularizer """no""" +915 51 optimizer """adadelta""" +915 51 training_loop """owa""" +915 51 negative_sampler """basic""" +915 51 evaluator """rankbased""" +915 52 dataset """kinships""" +915 52 model """transr""" +915 52 loss """bceaftersigmoid""" +915 52 regularizer """no""" +915 52 optimizer """adadelta""" +915 52 training_loop """owa""" +915 52 negative_sampler """basic""" +915 52 evaluator """rankbased""" +915 53 dataset """kinships""" +915 53 model """transr""" +915 53 loss """bceaftersigmoid""" +915 53 regularizer """no""" +915 53 optimizer """adadelta""" +915 53 training_loop """owa""" +915 53 negative_sampler """basic""" +915 53 evaluator """rankbased""" +915 54 dataset """kinships""" +915 54 model """transr""" +915 54 loss """bceaftersigmoid""" +915 54 regularizer """no""" +915 54 optimizer """adadelta""" +915 54 training_loop """owa""" +915 54 negative_sampler """basic""" +915 54 evaluator """rankbased""" +915 55 dataset """kinships""" +915 55 model """transr""" +915 55 loss """bceaftersigmoid""" +915 55 regularizer """no""" +915 55 optimizer """adadelta""" +915 55 training_loop """owa""" +915 55 negative_sampler """basic""" +915 55 evaluator """rankbased""" +915 56 dataset """kinships""" +915 56 model """transr""" +915 56 loss """bceaftersigmoid""" +915 56 regularizer """no""" +915 56 optimizer """adadelta""" +915 56 training_loop """owa""" +915 56 negative_sampler """basic""" +915 56 evaluator """rankbased""" +915 57 dataset """kinships""" +915 57 model """transr""" +915 57 loss """bceaftersigmoid""" +915 57 regularizer """no""" +915 57 optimizer """adadelta""" +915 57 training_loop """owa""" +915 57 negative_sampler """basic""" +915 57 evaluator """rankbased""" +915 58 dataset """kinships""" +915 58 model """transr""" +915 58 loss """bceaftersigmoid""" +915 58 regularizer """no""" +915 58 optimizer """adadelta""" +915 58 training_loop """owa""" +915 58 negative_sampler """basic""" +915 58 evaluator """rankbased""" +915 59 dataset """kinships""" +915 59 model """transr""" +915 59 loss """bceaftersigmoid""" +915 59 regularizer """no""" +915 59 optimizer """adadelta""" +915 59 training_loop """owa""" +915 59 negative_sampler """basic""" +915 59 evaluator """rankbased""" +915 60 dataset """kinships""" +915 60 model """transr""" +915 60 loss """bceaftersigmoid""" +915 60 regularizer """no""" +915 60 optimizer """adadelta""" +915 60 training_loop """owa""" +915 60 negative_sampler """basic""" +915 60 evaluator """rankbased""" +915 61 dataset """kinships""" +915 61 model """transr""" +915 61 loss """bceaftersigmoid""" +915 61 regularizer """no""" +915 61 optimizer """adadelta""" +915 61 training_loop """owa""" +915 61 negative_sampler """basic""" +915 61 evaluator """rankbased""" +915 62 dataset """kinships""" +915 62 model """transr""" +915 62 loss """bceaftersigmoid""" +915 62 regularizer """no""" +915 62 optimizer """adadelta""" +915 62 training_loop """owa""" +915 62 negative_sampler """basic""" +915 62 evaluator """rankbased""" +915 63 dataset """kinships""" +915 63 model """transr""" +915 63 loss """bceaftersigmoid""" +915 63 regularizer """no""" +915 63 optimizer """adadelta""" +915 63 training_loop """owa""" +915 63 negative_sampler """basic""" +915 63 evaluator """rankbased""" +915 64 dataset """kinships""" +915 64 model """transr""" +915 64 loss """bceaftersigmoid""" +915 64 regularizer """no""" +915 64 optimizer """adadelta""" +915 64 training_loop """owa""" +915 64 negative_sampler """basic""" +915 64 evaluator """rankbased""" +915 65 dataset """kinships""" +915 65 model """transr""" +915 65 loss """bceaftersigmoid""" +915 65 regularizer """no""" +915 65 optimizer """adadelta""" +915 65 training_loop """owa""" +915 65 negative_sampler """basic""" +915 65 evaluator """rankbased""" +915 66 dataset """kinships""" +915 66 model """transr""" +915 66 loss """bceaftersigmoid""" +915 66 regularizer """no""" +915 66 optimizer """adadelta""" +915 66 training_loop """owa""" +915 66 negative_sampler """basic""" +915 66 evaluator """rankbased""" +915 67 dataset """kinships""" +915 67 model """transr""" +915 67 loss """bceaftersigmoid""" +915 67 regularizer """no""" +915 67 optimizer """adadelta""" +915 67 training_loop """owa""" +915 67 negative_sampler """basic""" +915 67 evaluator """rankbased""" +915 68 dataset """kinships""" +915 68 model """transr""" +915 68 loss """bceaftersigmoid""" +915 68 regularizer """no""" +915 68 optimizer """adadelta""" +915 68 training_loop """owa""" +915 68 negative_sampler """basic""" +915 68 evaluator """rankbased""" +915 69 dataset """kinships""" +915 69 model """transr""" +915 69 loss """bceaftersigmoid""" +915 69 regularizer """no""" +915 69 optimizer """adadelta""" +915 69 training_loop """owa""" +915 69 negative_sampler """basic""" +915 69 evaluator """rankbased""" +915 70 dataset """kinships""" +915 70 model """transr""" +915 70 loss """bceaftersigmoid""" +915 70 regularizer """no""" +915 70 optimizer """adadelta""" +915 70 training_loop """owa""" +915 70 negative_sampler """basic""" +915 70 evaluator """rankbased""" +915 71 dataset """kinships""" +915 71 model """transr""" +915 71 loss """bceaftersigmoid""" +915 71 regularizer """no""" +915 71 optimizer """adadelta""" +915 71 training_loop """owa""" +915 71 negative_sampler """basic""" +915 71 evaluator """rankbased""" +915 72 dataset """kinships""" +915 72 model """transr""" +915 72 loss """bceaftersigmoid""" +915 72 regularizer """no""" +915 72 optimizer """adadelta""" +915 72 training_loop """owa""" +915 72 negative_sampler """basic""" +915 72 evaluator """rankbased""" +915 73 dataset """kinships""" +915 73 model """transr""" +915 73 loss """bceaftersigmoid""" +915 73 regularizer """no""" +915 73 optimizer """adadelta""" +915 73 training_loop """owa""" +915 73 negative_sampler """basic""" +915 73 evaluator """rankbased""" +915 74 dataset """kinships""" +915 74 model """transr""" +915 74 loss """bceaftersigmoid""" +915 74 regularizer """no""" +915 74 optimizer """adadelta""" +915 74 training_loop """owa""" +915 74 negative_sampler """basic""" +915 74 evaluator """rankbased""" +915 75 dataset """kinships""" +915 75 model """transr""" +915 75 loss """bceaftersigmoid""" +915 75 regularizer """no""" +915 75 optimizer """adadelta""" +915 75 training_loop """owa""" +915 75 negative_sampler """basic""" +915 75 evaluator """rankbased""" +915 76 dataset """kinships""" +915 76 model """transr""" +915 76 loss """bceaftersigmoid""" +915 76 regularizer """no""" +915 76 optimizer """adadelta""" +915 76 training_loop """owa""" +915 76 negative_sampler """basic""" +915 76 evaluator """rankbased""" +915 77 dataset """kinships""" +915 77 model """transr""" +915 77 loss """bceaftersigmoid""" +915 77 regularizer """no""" +915 77 optimizer """adadelta""" +915 77 training_loop """owa""" +915 77 negative_sampler """basic""" +915 77 evaluator """rankbased""" +915 78 dataset """kinships""" +915 78 model """transr""" +915 78 loss """bceaftersigmoid""" +915 78 regularizer """no""" +915 78 optimizer """adadelta""" +915 78 training_loop """owa""" +915 78 negative_sampler """basic""" +915 78 evaluator """rankbased""" +915 79 dataset """kinships""" +915 79 model """transr""" +915 79 loss """bceaftersigmoid""" +915 79 regularizer """no""" +915 79 optimizer """adadelta""" +915 79 training_loop """owa""" +915 79 negative_sampler """basic""" +915 79 evaluator """rankbased""" +915 80 dataset """kinships""" +915 80 model """transr""" +915 80 loss """bceaftersigmoid""" +915 80 regularizer """no""" +915 80 optimizer """adadelta""" +915 80 training_loop """owa""" +915 80 negative_sampler """basic""" +915 80 evaluator """rankbased""" +915 81 dataset """kinships""" +915 81 model """transr""" +915 81 loss """bceaftersigmoid""" +915 81 regularizer """no""" +915 81 optimizer """adadelta""" +915 81 training_loop """owa""" +915 81 negative_sampler """basic""" +915 81 evaluator """rankbased""" +915 82 dataset """kinships""" +915 82 model """transr""" +915 82 loss """bceaftersigmoid""" +915 82 regularizer """no""" +915 82 optimizer """adadelta""" +915 82 training_loop """owa""" +915 82 negative_sampler """basic""" +915 82 evaluator """rankbased""" +915 83 dataset """kinships""" +915 83 model """transr""" +915 83 loss """bceaftersigmoid""" +915 83 regularizer """no""" +915 83 optimizer """adadelta""" +915 83 training_loop """owa""" +915 83 negative_sampler """basic""" +915 83 evaluator """rankbased""" +915 84 dataset """kinships""" +915 84 model """transr""" +915 84 loss """bceaftersigmoid""" +915 84 regularizer """no""" +915 84 optimizer """adadelta""" +915 84 training_loop """owa""" +915 84 negative_sampler """basic""" +915 84 evaluator """rankbased""" +915 85 dataset """kinships""" +915 85 model """transr""" +915 85 loss """bceaftersigmoid""" +915 85 regularizer """no""" +915 85 optimizer """adadelta""" +915 85 training_loop """owa""" +915 85 negative_sampler """basic""" +915 85 evaluator """rankbased""" +915 86 dataset """kinships""" +915 86 model """transr""" +915 86 loss """bceaftersigmoid""" +915 86 regularizer """no""" +915 86 optimizer """adadelta""" +915 86 training_loop """owa""" +915 86 negative_sampler """basic""" +915 86 evaluator """rankbased""" +915 87 dataset """kinships""" +915 87 model """transr""" +915 87 loss """bceaftersigmoid""" +915 87 regularizer """no""" +915 87 optimizer """adadelta""" +915 87 training_loop """owa""" +915 87 negative_sampler """basic""" +915 87 evaluator """rankbased""" +915 88 dataset """kinships""" +915 88 model """transr""" +915 88 loss """bceaftersigmoid""" +915 88 regularizer """no""" +915 88 optimizer """adadelta""" +915 88 training_loop """owa""" +915 88 negative_sampler """basic""" +915 88 evaluator """rankbased""" +915 89 dataset """kinships""" +915 89 model """transr""" +915 89 loss """bceaftersigmoid""" +915 89 regularizer """no""" +915 89 optimizer """adadelta""" +915 89 training_loop """owa""" +915 89 negative_sampler """basic""" +915 89 evaluator """rankbased""" +915 90 dataset """kinships""" +915 90 model """transr""" +915 90 loss """bceaftersigmoid""" +915 90 regularizer """no""" +915 90 optimizer """adadelta""" +915 90 training_loop """owa""" +915 90 negative_sampler """basic""" +915 90 evaluator """rankbased""" +915 91 dataset """kinships""" +915 91 model """transr""" +915 91 loss """bceaftersigmoid""" +915 91 regularizer """no""" +915 91 optimizer """adadelta""" +915 91 training_loop """owa""" +915 91 negative_sampler """basic""" +915 91 evaluator """rankbased""" +915 92 dataset """kinships""" +915 92 model """transr""" +915 92 loss """bceaftersigmoid""" +915 92 regularizer """no""" +915 92 optimizer """adadelta""" +915 92 training_loop """owa""" +915 92 negative_sampler """basic""" +915 92 evaluator """rankbased""" +915 93 dataset """kinships""" +915 93 model """transr""" +915 93 loss """bceaftersigmoid""" +915 93 regularizer """no""" +915 93 optimizer """adadelta""" +915 93 training_loop """owa""" +915 93 negative_sampler """basic""" +915 93 evaluator """rankbased""" +915 94 dataset """kinships""" +915 94 model """transr""" +915 94 loss """bceaftersigmoid""" +915 94 regularizer """no""" +915 94 optimizer """adadelta""" +915 94 training_loop """owa""" +915 94 negative_sampler """basic""" +915 94 evaluator """rankbased""" +915 95 dataset """kinships""" +915 95 model """transr""" +915 95 loss """bceaftersigmoid""" +915 95 regularizer """no""" +915 95 optimizer """adadelta""" +915 95 training_loop """owa""" +915 95 negative_sampler """basic""" +915 95 evaluator """rankbased""" +915 96 dataset """kinships""" +915 96 model """transr""" +915 96 loss """bceaftersigmoid""" +915 96 regularizer """no""" +915 96 optimizer """adadelta""" +915 96 training_loop """owa""" +915 96 negative_sampler """basic""" +915 96 evaluator """rankbased""" +915 97 dataset """kinships""" +915 97 model """transr""" +915 97 loss """bceaftersigmoid""" +915 97 regularizer """no""" +915 97 optimizer """adadelta""" +915 97 training_loop """owa""" +915 97 negative_sampler """basic""" +915 97 evaluator """rankbased""" +915 98 dataset """kinships""" +915 98 model """transr""" +915 98 loss """bceaftersigmoid""" +915 98 regularizer """no""" +915 98 optimizer """adadelta""" +915 98 training_loop """owa""" +915 98 negative_sampler """basic""" +915 98 evaluator """rankbased""" +915 99 dataset """kinships""" +915 99 model """transr""" +915 99 loss """bceaftersigmoid""" +915 99 regularizer """no""" +915 99 optimizer """adadelta""" +915 99 training_loop """owa""" +915 99 negative_sampler """basic""" +915 99 evaluator """rankbased""" +915 100 dataset """kinships""" +915 100 model """transr""" +915 100 loss """bceaftersigmoid""" +915 100 regularizer """no""" +915 100 optimizer """adadelta""" +915 100 training_loop """owa""" +915 100 negative_sampler """basic""" +915 100 evaluator """rankbased""" +916 1 model.embedding_dim 1.0 +916 1 model.relation_dim 0.0 +916 1 model.scoring_fct_norm 2.0 +916 1 negative_sampler.num_negs_per_pos 42.0 +916 1 training.batch_size 1.0 +916 2 model.embedding_dim 2.0 +916 2 model.relation_dim 0.0 +916 2 model.scoring_fct_norm 2.0 +916 2 negative_sampler.num_negs_per_pos 15.0 +916 2 training.batch_size 2.0 +916 3 model.embedding_dim 0.0 +916 3 model.relation_dim 0.0 +916 3 model.scoring_fct_norm 2.0 +916 3 negative_sampler.num_negs_per_pos 52.0 +916 3 training.batch_size 2.0 +916 4 model.embedding_dim 2.0 +916 4 model.relation_dim 2.0 +916 4 model.scoring_fct_norm 2.0 +916 4 negative_sampler.num_negs_per_pos 52.0 +916 4 training.batch_size 1.0 +916 5 model.embedding_dim 0.0 +916 5 model.relation_dim 2.0 +916 5 model.scoring_fct_norm 2.0 +916 5 negative_sampler.num_negs_per_pos 95.0 +916 5 training.batch_size 0.0 +916 6 model.embedding_dim 1.0 +916 6 model.relation_dim 2.0 +916 6 model.scoring_fct_norm 1.0 +916 6 negative_sampler.num_negs_per_pos 65.0 +916 6 training.batch_size 0.0 +916 7 model.embedding_dim 1.0 +916 7 model.relation_dim 1.0 +916 7 model.scoring_fct_norm 1.0 +916 7 negative_sampler.num_negs_per_pos 94.0 +916 7 training.batch_size 2.0 +916 8 model.embedding_dim 2.0 +916 8 model.relation_dim 1.0 +916 8 model.scoring_fct_norm 2.0 +916 8 negative_sampler.num_negs_per_pos 28.0 +916 8 training.batch_size 2.0 +916 9 model.embedding_dim 1.0 +916 9 model.relation_dim 1.0 +916 9 model.scoring_fct_norm 1.0 +916 9 negative_sampler.num_negs_per_pos 18.0 +916 9 training.batch_size 1.0 +916 10 model.embedding_dim 2.0 +916 10 model.relation_dim 0.0 +916 10 model.scoring_fct_norm 1.0 +916 10 negative_sampler.num_negs_per_pos 98.0 +916 10 training.batch_size 2.0 +916 11 model.embedding_dim 2.0 +916 11 model.relation_dim 0.0 +916 11 model.scoring_fct_norm 2.0 +916 11 negative_sampler.num_negs_per_pos 25.0 +916 11 training.batch_size 1.0 +916 12 model.embedding_dim 2.0 +916 12 model.relation_dim 0.0 +916 12 model.scoring_fct_norm 2.0 +916 12 negative_sampler.num_negs_per_pos 19.0 +916 12 training.batch_size 1.0 +916 13 model.embedding_dim 0.0 +916 13 model.relation_dim 1.0 +916 13 model.scoring_fct_norm 2.0 +916 13 negative_sampler.num_negs_per_pos 89.0 +916 13 training.batch_size 2.0 +916 14 model.embedding_dim 1.0 +916 14 model.relation_dim 0.0 +916 14 model.scoring_fct_norm 2.0 +916 14 negative_sampler.num_negs_per_pos 8.0 +916 14 training.batch_size 0.0 +916 15 model.embedding_dim 2.0 +916 15 model.relation_dim 1.0 +916 15 model.scoring_fct_norm 2.0 +916 15 negative_sampler.num_negs_per_pos 93.0 +916 15 training.batch_size 0.0 +916 16 model.embedding_dim 2.0 +916 16 model.relation_dim 0.0 +916 16 model.scoring_fct_norm 1.0 +916 16 negative_sampler.num_negs_per_pos 4.0 +916 16 training.batch_size 0.0 +916 17 model.embedding_dim 1.0 +916 17 model.relation_dim 0.0 +916 17 model.scoring_fct_norm 2.0 +916 17 negative_sampler.num_negs_per_pos 53.0 +916 17 training.batch_size 1.0 +916 18 model.embedding_dim 2.0 +916 18 model.relation_dim 1.0 +916 18 model.scoring_fct_norm 2.0 +916 18 negative_sampler.num_negs_per_pos 79.0 +916 18 training.batch_size 0.0 +916 19 model.embedding_dim 2.0 +916 19 model.relation_dim 0.0 +916 19 model.scoring_fct_norm 2.0 +916 19 negative_sampler.num_negs_per_pos 43.0 +916 19 training.batch_size 1.0 +916 20 model.embedding_dim 1.0 +916 20 model.relation_dim 0.0 +916 20 model.scoring_fct_norm 1.0 +916 20 negative_sampler.num_negs_per_pos 13.0 +916 20 training.batch_size 1.0 +916 21 model.embedding_dim 1.0 +916 21 model.relation_dim 0.0 +916 21 model.scoring_fct_norm 1.0 +916 21 negative_sampler.num_negs_per_pos 28.0 +916 21 training.batch_size 2.0 +916 22 model.embedding_dim 0.0 +916 22 model.relation_dim 0.0 +916 22 model.scoring_fct_norm 2.0 +916 22 negative_sampler.num_negs_per_pos 69.0 +916 22 training.batch_size 2.0 +916 23 model.embedding_dim 1.0 +916 23 model.relation_dim 1.0 +916 23 model.scoring_fct_norm 2.0 +916 23 negative_sampler.num_negs_per_pos 80.0 +916 23 training.batch_size 0.0 +916 24 model.embedding_dim 0.0 +916 24 model.relation_dim 2.0 +916 24 model.scoring_fct_norm 1.0 +916 24 negative_sampler.num_negs_per_pos 37.0 +916 24 training.batch_size 2.0 +916 25 model.embedding_dim 2.0 +916 25 model.relation_dim 1.0 +916 25 model.scoring_fct_norm 2.0 +916 25 negative_sampler.num_negs_per_pos 66.0 +916 25 training.batch_size 2.0 +916 26 model.embedding_dim 0.0 +916 26 model.relation_dim 0.0 +916 26 model.scoring_fct_norm 1.0 +916 26 negative_sampler.num_negs_per_pos 83.0 +916 26 training.batch_size 2.0 +916 27 model.embedding_dim 0.0 +916 27 model.relation_dim 0.0 +916 27 model.scoring_fct_norm 2.0 +916 27 negative_sampler.num_negs_per_pos 71.0 +916 27 training.batch_size 0.0 +916 28 model.embedding_dim 1.0 +916 28 model.relation_dim 2.0 +916 28 model.scoring_fct_norm 1.0 +916 28 negative_sampler.num_negs_per_pos 49.0 +916 28 training.batch_size 1.0 +916 29 model.embedding_dim 0.0 +916 29 model.relation_dim 0.0 +916 29 model.scoring_fct_norm 1.0 +916 29 negative_sampler.num_negs_per_pos 29.0 +916 29 training.batch_size 2.0 +916 30 model.embedding_dim 0.0 +916 30 model.relation_dim 2.0 +916 30 model.scoring_fct_norm 1.0 +916 30 negative_sampler.num_negs_per_pos 63.0 +916 30 training.batch_size 1.0 +916 31 model.embedding_dim 0.0 +916 31 model.relation_dim 2.0 +916 31 model.scoring_fct_norm 1.0 +916 31 negative_sampler.num_negs_per_pos 12.0 +916 31 training.batch_size 2.0 +916 32 model.embedding_dim 1.0 +916 32 model.relation_dim 0.0 +916 32 model.scoring_fct_norm 2.0 +916 32 negative_sampler.num_negs_per_pos 92.0 +916 32 training.batch_size 2.0 +916 33 model.embedding_dim 2.0 +916 33 model.relation_dim 1.0 +916 33 model.scoring_fct_norm 1.0 +916 33 negative_sampler.num_negs_per_pos 17.0 +916 33 training.batch_size 1.0 +916 34 model.embedding_dim 1.0 +916 34 model.relation_dim 0.0 +916 34 model.scoring_fct_norm 2.0 +916 34 negative_sampler.num_negs_per_pos 56.0 +916 34 training.batch_size 2.0 +916 35 model.embedding_dim 1.0 +916 35 model.relation_dim 0.0 +916 35 model.scoring_fct_norm 2.0 +916 35 negative_sampler.num_negs_per_pos 59.0 +916 35 training.batch_size 1.0 +916 36 model.embedding_dim 0.0 +916 36 model.relation_dim 0.0 +916 36 model.scoring_fct_norm 1.0 +916 36 negative_sampler.num_negs_per_pos 36.0 +916 36 training.batch_size 0.0 +916 37 model.embedding_dim 0.0 +916 37 model.relation_dim 1.0 +916 37 model.scoring_fct_norm 2.0 +916 37 negative_sampler.num_negs_per_pos 82.0 +916 37 training.batch_size 1.0 +916 38 model.embedding_dim 1.0 +916 38 model.relation_dim 1.0 +916 38 model.scoring_fct_norm 2.0 +916 38 negative_sampler.num_negs_per_pos 80.0 +916 38 training.batch_size 2.0 +916 39 model.embedding_dim 2.0 +916 39 model.relation_dim 0.0 +916 39 model.scoring_fct_norm 1.0 +916 39 negative_sampler.num_negs_per_pos 1.0 +916 39 training.batch_size 2.0 +916 40 model.embedding_dim 2.0 +916 40 model.relation_dim 0.0 +916 40 model.scoring_fct_norm 2.0 +916 40 negative_sampler.num_negs_per_pos 45.0 +916 40 training.batch_size 0.0 +916 41 model.embedding_dim 1.0 +916 41 model.relation_dim 0.0 +916 41 model.scoring_fct_norm 1.0 +916 41 negative_sampler.num_negs_per_pos 85.0 +916 41 training.batch_size 0.0 +916 42 model.embedding_dim 0.0 +916 42 model.relation_dim 1.0 +916 42 model.scoring_fct_norm 1.0 +916 42 negative_sampler.num_negs_per_pos 69.0 +916 42 training.batch_size 0.0 +916 43 model.embedding_dim 2.0 +916 43 model.relation_dim 0.0 +916 43 model.scoring_fct_norm 2.0 +916 43 negative_sampler.num_negs_per_pos 80.0 +916 43 training.batch_size 1.0 +916 44 model.embedding_dim 2.0 +916 44 model.relation_dim 2.0 +916 44 model.scoring_fct_norm 1.0 +916 44 negative_sampler.num_negs_per_pos 96.0 +916 44 training.batch_size 0.0 +916 45 model.embedding_dim 2.0 +916 45 model.relation_dim 1.0 +916 45 model.scoring_fct_norm 1.0 +916 45 negative_sampler.num_negs_per_pos 2.0 +916 45 training.batch_size 2.0 +916 46 model.embedding_dim 1.0 +916 46 model.relation_dim 2.0 +916 46 model.scoring_fct_norm 1.0 +916 46 negative_sampler.num_negs_per_pos 33.0 +916 46 training.batch_size 0.0 +916 47 model.embedding_dim 2.0 +916 47 model.relation_dim 0.0 +916 47 model.scoring_fct_norm 2.0 +916 47 negative_sampler.num_negs_per_pos 79.0 +916 47 training.batch_size 1.0 +916 48 model.embedding_dim 1.0 +916 48 model.relation_dim 1.0 +916 48 model.scoring_fct_norm 2.0 +916 48 negative_sampler.num_negs_per_pos 58.0 +916 48 training.batch_size 1.0 +916 49 model.embedding_dim 1.0 +916 49 model.relation_dim 1.0 +916 49 model.scoring_fct_norm 1.0 +916 49 negative_sampler.num_negs_per_pos 18.0 +916 49 training.batch_size 0.0 +916 50 model.embedding_dim 0.0 +916 50 model.relation_dim 1.0 +916 50 model.scoring_fct_norm 2.0 +916 50 negative_sampler.num_negs_per_pos 31.0 +916 50 training.batch_size 2.0 +916 51 model.embedding_dim 0.0 +916 51 model.relation_dim 1.0 +916 51 model.scoring_fct_norm 1.0 +916 51 negative_sampler.num_negs_per_pos 94.0 +916 51 training.batch_size 0.0 +916 52 model.embedding_dim 0.0 +916 52 model.relation_dim 1.0 +916 52 model.scoring_fct_norm 2.0 +916 52 negative_sampler.num_negs_per_pos 48.0 +916 52 training.batch_size 0.0 +916 53 model.embedding_dim 1.0 +916 53 model.relation_dim 1.0 +916 53 model.scoring_fct_norm 2.0 +916 53 negative_sampler.num_negs_per_pos 54.0 +916 53 training.batch_size 0.0 +916 54 model.embedding_dim 2.0 +916 54 model.relation_dim 0.0 +916 54 model.scoring_fct_norm 1.0 +916 54 negative_sampler.num_negs_per_pos 19.0 +916 54 training.batch_size 1.0 +916 55 model.embedding_dim 2.0 +916 55 model.relation_dim 0.0 +916 55 model.scoring_fct_norm 1.0 +916 55 negative_sampler.num_negs_per_pos 80.0 +916 55 training.batch_size 2.0 +916 56 model.embedding_dim 1.0 +916 56 model.relation_dim 0.0 +916 56 model.scoring_fct_norm 2.0 +916 56 negative_sampler.num_negs_per_pos 73.0 +916 56 training.batch_size 1.0 +916 57 model.embedding_dim 1.0 +916 57 model.relation_dim 1.0 +916 57 model.scoring_fct_norm 2.0 +916 57 negative_sampler.num_negs_per_pos 50.0 +916 57 training.batch_size 1.0 +916 58 model.embedding_dim 0.0 +916 58 model.relation_dim 1.0 +916 58 model.scoring_fct_norm 2.0 +916 58 negative_sampler.num_negs_per_pos 82.0 +916 58 training.batch_size 2.0 +916 59 model.embedding_dim 1.0 +916 59 model.relation_dim 0.0 +916 59 model.scoring_fct_norm 2.0 +916 59 negative_sampler.num_negs_per_pos 73.0 +916 59 training.batch_size 0.0 +916 60 model.embedding_dim 2.0 +916 60 model.relation_dim 0.0 +916 60 model.scoring_fct_norm 1.0 +916 60 negative_sampler.num_negs_per_pos 29.0 +916 60 training.batch_size 2.0 +916 61 model.embedding_dim 2.0 +916 61 model.relation_dim 2.0 +916 61 model.scoring_fct_norm 2.0 +916 61 negative_sampler.num_negs_per_pos 66.0 +916 61 training.batch_size 2.0 +916 62 model.embedding_dim 0.0 +916 62 model.relation_dim 0.0 +916 62 model.scoring_fct_norm 2.0 +916 62 negative_sampler.num_negs_per_pos 33.0 +916 62 training.batch_size 1.0 +916 63 model.embedding_dim 0.0 +916 63 model.relation_dim 0.0 +916 63 model.scoring_fct_norm 2.0 +916 63 negative_sampler.num_negs_per_pos 41.0 +916 63 training.batch_size 0.0 +916 64 model.embedding_dim 2.0 +916 64 model.relation_dim 2.0 +916 64 model.scoring_fct_norm 1.0 +916 64 negative_sampler.num_negs_per_pos 10.0 +916 64 training.batch_size 1.0 +916 65 model.embedding_dim 1.0 +916 65 model.relation_dim 0.0 +916 65 model.scoring_fct_norm 2.0 +916 65 negative_sampler.num_negs_per_pos 82.0 +916 65 training.batch_size 1.0 +916 66 model.embedding_dim 2.0 +916 66 model.relation_dim 2.0 +916 66 model.scoring_fct_norm 2.0 +916 66 negative_sampler.num_negs_per_pos 94.0 +916 66 training.batch_size 0.0 +916 67 model.embedding_dim 2.0 +916 67 model.relation_dim 0.0 +916 67 model.scoring_fct_norm 1.0 +916 67 negative_sampler.num_negs_per_pos 83.0 +916 67 training.batch_size 1.0 +916 68 model.embedding_dim 0.0 +916 68 model.relation_dim 0.0 +916 68 model.scoring_fct_norm 2.0 +916 68 negative_sampler.num_negs_per_pos 6.0 +916 68 training.batch_size 2.0 +916 69 model.embedding_dim 0.0 +916 69 model.relation_dim 1.0 +916 69 model.scoring_fct_norm 1.0 +916 69 negative_sampler.num_negs_per_pos 98.0 +916 69 training.batch_size 1.0 +916 70 model.embedding_dim 2.0 +916 70 model.relation_dim 2.0 +916 70 model.scoring_fct_norm 1.0 +916 70 negative_sampler.num_negs_per_pos 49.0 +916 70 training.batch_size 1.0 +916 71 model.embedding_dim 2.0 +916 71 model.relation_dim 0.0 +916 71 model.scoring_fct_norm 1.0 +916 71 negative_sampler.num_negs_per_pos 97.0 +916 71 training.batch_size 1.0 +916 72 model.embedding_dim 1.0 +916 72 model.relation_dim 1.0 +916 72 model.scoring_fct_norm 2.0 +916 72 negative_sampler.num_negs_per_pos 29.0 +916 72 training.batch_size 0.0 +916 73 model.embedding_dim 0.0 +916 73 model.relation_dim 0.0 +916 73 model.scoring_fct_norm 2.0 +916 73 negative_sampler.num_negs_per_pos 20.0 +916 73 training.batch_size 0.0 +916 74 model.embedding_dim 1.0 +916 74 model.relation_dim 2.0 +916 74 model.scoring_fct_norm 1.0 +916 74 negative_sampler.num_negs_per_pos 97.0 +916 74 training.batch_size 0.0 +916 75 model.embedding_dim 1.0 +916 75 model.relation_dim 2.0 +916 75 model.scoring_fct_norm 2.0 +916 75 negative_sampler.num_negs_per_pos 95.0 +916 75 training.batch_size 2.0 +916 76 model.embedding_dim 2.0 +916 76 model.relation_dim 1.0 +916 76 model.scoring_fct_norm 2.0 +916 76 negative_sampler.num_negs_per_pos 91.0 +916 76 training.batch_size 2.0 +916 77 model.embedding_dim 2.0 +916 77 model.relation_dim 0.0 +916 77 model.scoring_fct_norm 2.0 +916 77 negative_sampler.num_negs_per_pos 2.0 +916 77 training.batch_size 1.0 +916 78 model.embedding_dim 1.0 +916 78 model.relation_dim 2.0 +916 78 model.scoring_fct_norm 2.0 +916 78 negative_sampler.num_negs_per_pos 93.0 +916 78 training.batch_size 2.0 +916 79 model.embedding_dim 2.0 +916 79 model.relation_dim 2.0 +916 79 model.scoring_fct_norm 2.0 +916 79 negative_sampler.num_negs_per_pos 74.0 +916 79 training.batch_size 2.0 +916 80 model.embedding_dim 1.0 +916 80 model.relation_dim 1.0 +916 80 model.scoring_fct_norm 2.0 +916 80 negative_sampler.num_negs_per_pos 40.0 +916 80 training.batch_size 0.0 +916 81 model.embedding_dim 2.0 +916 81 model.relation_dim 0.0 +916 81 model.scoring_fct_norm 2.0 +916 81 negative_sampler.num_negs_per_pos 7.0 +916 81 training.batch_size 1.0 +916 82 model.embedding_dim 0.0 +916 82 model.relation_dim 2.0 +916 82 model.scoring_fct_norm 1.0 +916 82 negative_sampler.num_negs_per_pos 33.0 +916 82 training.batch_size 2.0 +916 83 model.embedding_dim 2.0 +916 83 model.relation_dim 0.0 +916 83 model.scoring_fct_norm 2.0 +916 83 negative_sampler.num_negs_per_pos 41.0 +916 83 training.batch_size 2.0 +916 84 model.embedding_dim 1.0 +916 84 model.relation_dim 0.0 +916 84 model.scoring_fct_norm 1.0 +916 84 negative_sampler.num_negs_per_pos 17.0 +916 84 training.batch_size 2.0 +916 85 model.embedding_dim 1.0 +916 85 model.relation_dim 2.0 +916 85 model.scoring_fct_norm 2.0 +916 85 negative_sampler.num_negs_per_pos 38.0 +916 85 training.batch_size 0.0 +916 86 model.embedding_dim 2.0 +916 86 model.relation_dim 1.0 +916 86 model.scoring_fct_norm 2.0 +916 86 negative_sampler.num_negs_per_pos 17.0 +916 86 training.batch_size 1.0 +916 87 model.embedding_dim 2.0 +916 87 model.relation_dim 1.0 +916 87 model.scoring_fct_norm 2.0 +916 87 negative_sampler.num_negs_per_pos 4.0 +916 87 training.batch_size 0.0 +916 88 model.embedding_dim 2.0 +916 88 model.relation_dim 1.0 +916 88 model.scoring_fct_norm 2.0 +916 88 negative_sampler.num_negs_per_pos 37.0 +916 88 training.batch_size 2.0 +916 89 model.embedding_dim 1.0 +916 89 model.relation_dim 1.0 +916 89 model.scoring_fct_norm 2.0 +916 89 negative_sampler.num_negs_per_pos 45.0 +916 89 training.batch_size 1.0 +916 90 model.embedding_dim 2.0 +916 90 model.relation_dim 1.0 +916 90 model.scoring_fct_norm 1.0 +916 90 negative_sampler.num_negs_per_pos 98.0 +916 90 training.batch_size 1.0 +916 91 model.embedding_dim 2.0 +916 91 model.relation_dim 1.0 +916 91 model.scoring_fct_norm 1.0 +916 91 negative_sampler.num_negs_per_pos 45.0 +916 91 training.batch_size 1.0 +916 92 model.embedding_dim 2.0 +916 92 model.relation_dim 2.0 +916 92 model.scoring_fct_norm 1.0 +916 92 negative_sampler.num_negs_per_pos 28.0 +916 92 training.batch_size 1.0 +916 93 model.embedding_dim 0.0 +916 93 model.relation_dim 0.0 +916 93 model.scoring_fct_norm 1.0 +916 93 negative_sampler.num_negs_per_pos 83.0 +916 93 training.batch_size 0.0 +916 94 model.embedding_dim 0.0 +916 94 model.relation_dim 2.0 +916 94 model.scoring_fct_norm 1.0 +916 94 negative_sampler.num_negs_per_pos 90.0 +916 94 training.batch_size 0.0 +916 95 model.embedding_dim 0.0 +916 95 model.relation_dim 2.0 +916 95 model.scoring_fct_norm 2.0 +916 95 negative_sampler.num_negs_per_pos 51.0 +916 95 training.batch_size 1.0 +916 96 model.embedding_dim 2.0 +916 96 model.relation_dim 2.0 +916 96 model.scoring_fct_norm 1.0 +916 96 negative_sampler.num_negs_per_pos 59.0 +916 96 training.batch_size 1.0 +916 97 model.embedding_dim 0.0 +916 97 model.relation_dim 1.0 +916 97 model.scoring_fct_norm 2.0 +916 97 negative_sampler.num_negs_per_pos 90.0 +916 97 training.batch_size 0.0 +916 98 model.embedding_dim 1.0 +916 98 model.relation_dim 1.0 +916 98 model.scoring_fct_norm 2.0 +916 98 negative_sampler.num_negs_per_pos 62.0 +916 98 training.batch_size 1.0 +916 99 model.embedding_dim 2.0 +916 99 model.relation_dim 0.0 +916 99 model.scoring_fct_norm 2.0 +916 99 negative_sampler.num_negs_per_pos 64.0 +916 99 training.batch_size 0.0 +916 100 model.embedding_dim 0.0 +916 100 model.relation_dim 2.0 +916 100 model.scoring_fct_norm 1.0 +916 100 negative_sampler.num_negs_per_pos 97.0 +916 100 training.batch_size 2.0 +916 1 dataset """kinships""" +916 1 model """transr""" +916 1 loss """softplus""" +916 1 regularizer """no""" +916 1 optimizer """adadelta""" +916 1 training_loop """owa""" +916 1 negative_sampler """basic""" +916 1 evaluator """rankbased""" +916 2 dataset """kinships""" +916 2 model """transr""" +916 2 loss """softplus""" +916 2 regularizer """no""" +916 2 optimizer """adadelta""" +916 2 training_loop """owa""" +916 2 negative_sampler """basic""" +916 2 evaluator """rankbased""" +916 3 dataset """kinships""" +916 3 model """transr""" +916 3 loss """softplus""" +916 3 regularizer """no""" +916 3 optimizer """adadelta""" +916 3 training_loop """owa""" +916 3 negative_sampler """basic""" +916 3 evaluator """rankbased""" +916 4 dataset """kinships""" +916 4 model """transr""" +916 4 loss """softplus""" +916 4 regularizer """no""" +916 4 optimizer """adadelta""" +916 4 training_loop """owa""" +916 4 negative_sampler """basic""" +916 4 evaluator """rankbased""" +916 5 dataset """kinships""" +916 5 model """transr""" +916 5 loss """softplus""" +916 5 regularizer """no""" +916 5 optimizer """adadelta""" +916 5 training_loop """owa""" +916 5 negative_sampler """basic""" +916 5 evaluator """rankbased""" +916 6 dataset """kinships""" +916 6 model """transr""" +916 6 loss """softplus""" +916 6 regularizer """no""" +916 6 optimizer """adadelta""" +916 6 training_loop """owa""" +916 6 negative_sampler """basic""" +916 6 evaluator """rankbased""" +916 7 dataset """kinships""" +916 7 model """transr""" +916 7 loss """softplus""" +916 7 regularizer """no""" +916 7 optimizer """adadelta""" +916 7 training_loop """owa""" +916 7 negative_sampler """basic""" +916 7 evaluator """rankbased""" +916 8 dataset """kinships""" +916 8 model """transr""" +916 8 loss """softplus""" +916 8 regularizer """no""" +916 8 optimizer """adadelta""" +916 8 training_loop """owa""" +916 8 negative_sampler """basic""" +916 8 evaluator """rankbased""" +916 9 dataset """kinships""" +916 9 model """transr""" +916 9 loss """softplus""" +916 9 regularizer """no""" +916 9 optimizer """adadelta""" +916 9 training_loop """owa""" +916 9 negative_sampler """basic""" +916 9 evaluator """rankbased""" +916 10 dataset """kinships""" +916 10 model """transr""" +916 10 loss """softplus""" +916 10 regularizer """no""" +916 10 optimizer """adadelta""" +916 10 training_loop """owa""" +916 10 negative_sampler """basic""" +916 10 evaluator """rankbased""" +916 11 dataset """kinships""" +916 11 model """transr""" +916 11 loss """softplus""" +916 11 regularizer """no""" +916 11 optimizer """adadelta""" +916 11 training_loop """owa""" +916 11 negative_sampler """basic""" +916 11 evaluator """rankbased""" +916 12 dataset """kinships""" +916 12 model """transr""" +916 12 loss """softplus""" +916 12 regularizer """no""" +916 12 optimizer """adadelta""" +916 12 training_loop """owa""" +916 12 negative_sampler """basic""" +916 12 evaluator """rankbased""" +916 13 dataset """kinships""" +916 13 model """transr""" +916 13 loss """softplus""" +916 13 regularizer """no""" +916 13 optimizer """adadelta""" +916 13 training_loop """owa""" +916 13 negative_sampler """basic""" +916 13 evaluator """rankbased""" +916 14 dataset """kinships""" +916 14 model """transr""" +916 14 loss """softplus""" +916 14 regularizer """no""" +916 14 optimizer """adadelta""" +916 14 training_loop """owa""" +916 14 negative_sampler """basic""" +916 14 evaluator """rankbased""" +916 15 dataset """kinships""" +916 15 model """transr""" +916 15 loss """softplus""" +916 15 regularizer """no""" +916 15 optimizer """adadelta""" +916 15 training_loop """owa""" +916 15 negative_sampler """basic""" +916 15 evaluator """rankbased""" +916 16 dataset """kinships""" +916 16 model """transr""" +916 16 loss """softplus""" +916 16 regularizer """no""" +916 16 optimizer """adadelta""" +916 16 training_loop """owa""" +916 16 negative_sampler """basic""" +916 16 evaluator """rankbased""" +916 17 dataset """kinships""" +916 17 model """transr""" +916 17 loss """softplus""" +916 17 regularizer """no""" +916 17 optimizer """adadelta""" +916 17 training_loop """owa""" +916 17 negative_sampler """basic""" +916 17 evaluator """rankbased""" +916 18 dataset """kinships""" +916 18 model """transr""" +916 18 loss """softplus""" +916 18 regularizer """no""" +916 18 optimizer """adadelta""" +916 18 training_loop """owa""" +916 18 negative_sampler """basic""" +916 18 evaluator """rankbased""" +916 19 dataset """kinships""" +916 19 model """transr""" +916 19 loss """softplus""" +916 19 regularizer """no""" +916 19 optimizer """adadelta""" +916 19 training_loop """owa""" +916 19 negative_sampler """basic""" +916 19 evaluator """rankbased""" +916 20 dataset """kinships""" +916 20 model """transr""" +916 20 loss """softplus""" +916 20 regularizer """no""" +916 20 optimizer """adadelta""" +916 20 training_loop """owa""" +916 20 negative_sampler """basic""" +916 20 evaluator """rankbased""" +916 21 dataset """kinships""" +916 21 model """transr""" +916 21 loss """softplus""" +916 21 regularizer """no""" +916 21 optimizer """adadelta""" +916 21 training_loop """owa""" +916 21 negative_sampler """basic""" +916 21 evaluator """rankbased""" +916 22 dataset """kinships""" +916 22 model """transr""" +916 22 loss """softplus""" +916 22 regularizer """no""" +916 22 optimizer """adadelta""" +916 22 training_loop """owa""" +916 22 negative_sampler """basic""" +916 22 evaluator """rankbased""" +916 23 dataset """kinships""" +916 23 model """transr""" +916 23 loss """softplus""" +916 23 regularizer """no""" +916 23 optimizer """adadelta""" +916 23 training_loop """owa""" +916 23 negative_sampler """basic""" +916 23 evaluator """rankbased""" +916 24 dataset """kinships""" +916 24 model """transr""" +916 24 loss """softplus""" +916 24 regularizer """no""" +916 24 optimizer """adadelta""" +916 24 training_loop """owa""" +916 24 negative_sampler """basic""" +916 24 evaluator """rankbased""" +916 25 dataset """kinships""" +916 25 model """transr""" +916 25 loss """softplus""" +916 25 regularizer """no""" +916 25 optimizer """adadelta""" +916 25 training_loop """owa""" +916 25 negative_sampler """basic""" +916 25 evaluator """rankbased""" +916 26 dataset """kinships""" +916 26 model """transr""" +916 26 loss """softplus""" +916 26 regularizer """no""" +916 26 optimizer """adadelta""" +916 26 training_loop """owa""" +916 26 negative_sampler """basic""" +916 26 evaluator """rankbased""" +916 27 dataset """kinships""" +916 27 model """transr""" +916 27 loss """softplus""" +916 27 regularizer """no""" +916 27 optimizer """adadelta""" +916 27 training_loop """owa""" +916 27 negative_sampler """basic""" +916 27 evaluator """rankbased""" +916 28 dataset """kinships""" +916 28 model """transr""" +916 28 loss """softplus""" +916 28 regularizer """no""" +916 28 optimizer """adadelta""" +916 28 training_loop """owa""" +916 28 negative_sampler """basic""" +916 28 evaluator """rankbased""" +916 29 dataset """kinships""" +916 29 model """transr""" +916 29 loss """softplus""" +916 29 regularizer """no""" +916 29 optimizer """adadelta""" +916 29 training_loop """owa""" +916 29 negative_sampler """basic""" +916 29 evaluator """rankbased""" +916 30 dataset """kinships""" +916 30 model """transr""" +916 30 loss """softplus""" +916 30 regularizer """no""" +916 30 optimizer """adadelta""" +916 30 training_loop """owa""" +916 30 negative_sampler """basic""" +916 30 evaluator """rankbased""" +916 31 dataset """kinships""" +916 31 model """transr""" +916 31 loss """softplus""" +916 31 regularizer """no""" +916 31 optimizer """adadelta""" +916 31 training_loop """owa""" +916 31 negative_sampler """basic""" +916 31 evaluator """rankbased""" +916 32 dataset """kinships""" +916 32 model """transr""" +916 32 loss """softplus""" +916 32 regularizer """no""" +916 32 optimizer """adadelta""" +916 32 training_loop """owa""" +916 32 negative_sampler """basic""" +916 32 evaluator """rankbased""" +916 33 dataset """kinships""" +916 33 model """transr""" +916 33 loss """softplus""" +916 33 regularizer """no""" +916 33 optimizer """adadelta""" +916 33 training_loop """owa""" +916 33 negative_sampler """basic""" +916 33 evaluator """rankbased""" +916 34 dataset """kinships""" +916 34 model """transr""" +916 34 loss """softplus""" +916 34 regularizer """no""" +916 34 optimizer """adadelta""" +916 34 training_loop """owa""" +916 34 negative_sampler """basic""" +916 34 evaluator """rankbased""" +916 35 dataset """kinships""" +916 35 model """transr""" +916 35 loss """softplus""" +916 35 regularizer """no""" +916 35 optimizer """adadelta""" +916 35 training_loop """owa""" +916 35 negative_sampler """basic""" +916 35 evaluator """rankbased""" +916 36 dataset """kinships""" +916 36 model """transr""" +916 36 loss """softplus""" +916 36 regularizer """no""" +916 36 optimizer """adadelta""" +916 36 training_loop """owa""" +916 36 negative_sampler """basic""" +916 36 evaluator """rankbased""" +916 37 dataset """kinships""" +916 37 model """transr""" +916 37 loss """softplus""" +916 37 regularizer """no""" +916 37 optimizer """adadelta""" +916 37 training_loop """owa""" +916 37 negative_sampler """basic""" +916 37 evaluator """rankbased""" +916 38 dataset """kinships""" +916 38 model """transr""" +916 38 loss """softplus""" +916 38 regularizer """no""" +916 38 optimizer """adadelta""" +916 38 training_loop """owa""" +916 38 negative_sampler """basic""" +916 38 evaluator """rankbased""" +916 39 dataset """kinships""" +916 39 model """transr""" +916 39 loss """softplus""" +916 39 regularizer """no""" +916 39 optimizer """adadelta""" +916 39 training_loop """owa""" +916 39 negative_sampler """basic""" +916 39 evaluator """rankbased""" +916 40 dataset """kinships""" +916 40 model """transr""" +916 40 loss """softplus""" +916 40 regularizer """no""" +916 40 optimizer """adadelta""" +916 40 training_loop """owa""" +916 40 negative_sampler """basic""" +916 40 evaluator """rankbased""" +916 41 dataset """kinships""" +916 41 model """transr""" +916 41 loss """softplus""" +916 41 regularizer """no""" +916 41 optimizer """adadelta""" +916 41 training_loop """owa""" +916 41 negative_sampler """basic""" +916 41 evaluator """rankbased""" +916 42 dataset """kinships""" +916 42 model """transr""" +916 42 loss """softplus""" +916 42 regularizer """no""" +916 42 optimizer """adadelta""" +916 42 training_loop """owa""" +916 42 negative_sampler """basic""" +916 42 evaluator """rankbased""" +916 43 dataset """kinships""" +916 43 model """transr""" +916 43 loss """softplus""" +916 43 regularizer """no""" +916 43 optimizer """adadelta""" +916 43 training_loop """owa""" +916 43 negative_sampler """basic""" +916 43 evaluator """rankbased""" +916 44 dataset """kinships""" +916 44 model """transr""" +916 44 loss """softplus""" +916 44 regularizer """no""" +916 44 optimizer """adadelta""" +916 44 training_loop """owa""" +916 44 negative_sampler """basic""" +916 44 evaluator """rankbased""" +916 45 dataset """kinships""" +916 45 model """transr""" +916 45 loss """softplus""" +916 45 regularizer """no""" +916 45 optimizer """adadelta""" +916 45 training_loop """owa""" +916 45 negative_sampler """basic""" +916 45 evaluator """rankbased""" +916 46 dataset """kinships""" +916 46 model """transr""" +916 46 loss """softplus""" +916 46 regularizer """no""" +916 46 optimizer """adadelta""" +916 46 training_loop """owa""" +916 46 negative_sampler """basic""" +916 46 evaluator """rankbased""" +916 47 dataset """kinships""" +916 47 model """transr""" +916 47 loss """softplus""" +916 47 regularizer """no""" +916 47 optimizer """adadelta""" +916 47 training_loop """owa""" +916 47 negative_sampler """basic""" +916 47 evaluator """rankbased""" +916 48 dataset """kinships""" +916 48 model """transr""" +916 48 loss """softplus""" +916 48 regularizer """no""" +916 48 optimizer """adadelta""" +916 48 training_loop """owa""" +916 48 negative_sampler """basic""" +916 48 evaluator """rankbased""" +916 49 dataset """kinships""" +916 49 model """transr""" +916 49 loss """softplus""" +916 49 regularizer """no""" +916 49 optimizer """adadelta""" +916 49 training_loop """owa""" +916 49 negative_sampler """basic""" +916 49 evaluator """rankbased""" +916 50 dataset """kinships""" +916 50 model """transr""" +916 50 loss """softplus""" +916 50 regularizer """no""" +916 50 optimizer """adadelta""" +916 50 training_loop """owa""" +916 50 negative_sampler """basic""" +916 50 evaluator """rankbased""" +916 51 dataset """kinships""" +916 51 model """transr""" +916 51 loss """softplus""" +916 51 regularizer """no""" +916 51 optimizer """adadelta""" +916 51 training_loop """owa""" +916 51 negative_sampler """basic""" +916 51 evaluator """rankbased""" +916 52 dataset """kinships""" +916 52 model """transr""" +916 52 loss """softplus""" +916 52 regularizer """no""" +916 52 optimizer """adadelta""" +916 52 training_loop """owa""" +916 52 negative_sampler """basic""" +916 52 evaluator """rankbased""" +916 53 dataset """kinships""" +916 53 model """transr""" +916 53 loss """softplus""" +916 53 regularizer """no""" +916 53 optimizer """adadelta""" +916 53 training_loop """owa""" +916 53 negative_sampler """basic""" +916 53 evaluator """rankbased""" +916 54 dataset """kinships""" +916 54 model """transr""" +916 54 loss """softplus""" +916 54 regularizer """no""" +916 54 optimizer """adadelta""" +916 54 training_loop """owa""" +916 54 negative_sampler """basic""" +916 54 evaluator """rankbased""" +916 55 dataset """kinships""" +916 55 model """transr""" +916 55 loss """softplus""" +916 55 regularizer """no""" +916 55 optimizer """adadelta""" +916 55 training_loop """owa""" +916 55 negative_sampler """basic""" +916 55 evaluator """rankbased""" +916 56 dataset """kinships""" +916 56 model """transr""" +916 56 loss """softplus""" +916 56 regularizer """no""" +916 56 optimizer """adadelta""" +916 56 training_loop """owa""" +916 56 negative_sampler """basic""" +916 56 evaluator """rankbased""" +916 57 dataset """kinships""" +916 57 model """transr""" +916 57 loss """softplus""" +916 57 regularizer """no""" +916 57 optimizer """adadelta""" +916 57 training_loop """owa""" +916 57 negative_sampler """basic""" +916 57 evaluator """rankbased""" +916 58 dataset """kinships""" +916 58 model """transr""" +916 58 loss """softplus""" +916 58 regularizer """no""" +916 58 optimizer """adadelta""" +916 58 training_loop """owa""" +916 58 negative_sampler """basic""" +916 58 evaluator """rankbased""" +916 59 dataset """kinships""" +916 59 model """transr""" +916 59 loss """softplus""" +916 59 regularizer """no""" +916 59 optimizer """adadelta""" +916 59 training_loop """owa""" +916 59 negative_sampler """basic""" +916 59 evaluator """rankbased""" +916 60 dataset """kinships""" +916 60 model """transr""" +916 60 loss """softplus""" +916 60 regularizer """no""" +916 60 optimizer """adadelta""" +916 60 training_loop """owa""" +916 60 negative_sampler """basic""" +916 60 evaluator """rankbased""" +916 61 dataset """kinships""" +916 61 model """transr""" +916 61 loss """softplus""" +916 61 regularizer """no""" +916 61 optimizer """adadelta""" +916 61 training_loop """owa""" +916 61 negative_sampler """basic""" +916 61 evaluator """rankbased""" +916 62 dataset """kinships""" +916 62 model """transr""" +916 62 loss """softplus""" +916 62 regularizer """no""" +916 62 optimizer """adadelta""" +916 62 training_loop """owa""" +916 62 negative_sampler """basic""" +916 62 evaluator """rankbased""" +916 63 dataset """kinships""" +916 63 model """transr""" +916 63 loss """softplus""" +916 63 regularizer """no""" +916 63 optimizer """adadelta""" +916 63 training_loop """owa""" +916 63 negative_sampler """basic""" +916 63 evaluator """rankbased""" +916 64 dataset """kinships""" +916 64 model """transr""" +916 64 loss """softplus""" +916 64 regularizer """no""" +916 64 optimizer """adadelta""" +916 64 training_loop """owa""" +916 64 negative_sampler """basic""" +916 64 evaluator """rankbased""" +916 65 dataset """kinships""" +916 65 model """transr""" +916 65 loss """softplus""" +916 65 regularizer """no""" +916 65 optimizer """adadelta""" +916 65 training_loop """owa""" +916 65 negative_sampler """basic""" +916 65 evaluator """rankbased""" +916 66 dataset """kinships""" +916 66 model """transr""" +916 66 loss """softplus""" +916 66 regularizer """no""" +916 66 optimizer """adadelta""" +916 66 training_loop """owa""" +916 66 negative_sampler """basic""" +916 66 evaluator """rankbased""" +916 67 dataset """kinships""" +916 67 model """transr""" +916 67 loss """softplus""" +916 67 regularizer """no""" +916 67 optimizer """adadelta""" +916 67 training_loop """owa""" +916 67 negative_sampler """basic""" +916 67 evaluator """rankbased""" +916 68 dataset """kinships""" +916 68 model """transr""" +916 68 loss """softplus""" +916 68 regularizer """no""" +916 68 optimizer """adadelta""" +916 68 training_loop """owa""" +916 68 negative_sampler """basic""" +916 68 evaluator """rankbased""" +916 69 dataset """kinships""" +916 69 model """transr""" +916 69 loss """softplus""" +916 69 regularizer """no""" +916 69 optimizer """adadelta""" +916 69 training_loop """owa""" +916 69 negative_sampler """basic""" +916 69 evaluator """rankbased""" +916 70 dataset """kinships""" +916 70 model """transr""" +916 70 loss """softplus""" +916 70 regularizer """no""" +916 70 optimizer """adadelta""" +916 70 training_loop """owa""" +916 70 negative_sampler """basic""" +916 70 evaluator """rankbased""" +916 71 dataset """kinships""" +916 71 model """transr""" +916 71 loss """softplus""" +916 71 regularizer """no""" +916 71 optimizer """adadelta""" +916 71 training_loop """owa""" +916 71 negative_sampler """basic""" +916 71 evaluator """rankbased""" +916 72 dataset """kinships""" +916 72 model """transr""" +916 72 loss """softplus""" +916 72 regularizer """no""" +916 72 optimizer """adadelta""" +916 72 training_loop """owa""" +916 72 negative_sampler """basic""" +916 72 evaluator """rankbased""" +916 73 dataset """kinships""" +916 73 model """transr""" +916 73 loss """softplus""" +916 73 regularizer """no""" +916 73 optimizer """adadelta""" +916 73 training_loop """owa""" +916 73 negative_sampler """basic""" +916 73 evaluator """rankbased""" +916 74 dataset """kinships""" +916 74 model """transr""" +916 74 loss """softplus""" +916 74 regularizer """no""" +916 74 optimizer """adadelta""" +916 74 training_loop """owa""" +916 74 negative_sampler """basic""" +916 74 evaluator """rankbased""" +916 75 dataset """kinships""" +916 75 model """transr""" +916 75 loss """softplus""" +916 75 regularizer """no""" +916 75 optimizer """adadelta""" +916 75 training_loop """owa""" +916 75 negative_sampler """basic""" +916 75 evaluator """rankbased""" +916 76 dataset """kinships""" +916 76 model """transr""" +916 76 loss """softplus""" +916 76 regularizer """no""" +916 76 optimizer """adadelta""" +916 76 training_loop """owa""" +916 76 negative_sampler """basic""" +916 76 evaluator """rankbased""" +916 77 dataset """kinships""" +916 77 model """transr""" +916 77 loss """softplus""" +916 77 regularizer """no""" +916 77 optimizer """adadelta""" +916 77 training_loop """owa""" +916 77 negative_sampler """basic""" +916 77 evaluator """rankbased""" +916 78 dataset """kinships""" +916 78 model """transr""" +916 78 loss """softplus""" +916 78 regularizer """no""" +916 78 optimizer """adadelta""" +916 78 training_loop """owa""" +916 78 negative_sampler """basic""" +916 78 evaluator """rankbased""" +916 79 dataset """kinships""" +916 79 model """transr""" +916 79 loss """softplus""" +916 79 regularizer """no""" +916 79 optimizer """adadelta""" +916 79 training_loop """owa""" +916 79 negative_sampler """basic""" +916 79 evaluator """rankbased""" +916 80 dataset """kinships""" +916 80 model """transr""" +916 80 loss """softplus""" +916 80 regularizer """no""" +916 80 optimizer """adadelta""" +916 80 training_loop """owa""" +916 80 negative_sampler """basic""" +916 80 evaluator """rankbased""" +916 81 dataset """kinships""" +916 81 model """transr""" +916 81 loss """softplus""" +916 81 regularizer """no""" +916 81 optimizer """adadelta""" +916 81 training_loop """owa""" +916 81 negative_sampler """basic""" +916 81 evaluator """rankbased""" +916 82 dataset """kinships""" +916 82 model """transr""" +916 82 loss """softplus""" +916 82 regularizer """no""" +916 82 optimizer """adadelta""" +916 82 training_loop """owa""" +916 82 negative_sampler """basic""" +916 82 evaluator """rankbased""" +916 83 dataset """kinships""" +916 83 model """transr""" +916 83 loss """softplus""" +916 83 regularizer """no""" +916 83 optimizer """adadelta""" +916 83 training_loop """owa""" +916 83 negative_sampler """basic""" +916 83 evaluator """rankbased""" +916 84 dataset """kinships""" +916 84 model """transr""" +916 84 loss """softplus""" +916 84 regularizer """no""" +916 84 optimizer """adadelta""" +916 84 training_loop """owa""" +916 84 negative_sampler """basic""" +916 84 evaluator """rankbased""" +916 85 dataset """kinships""" +916 85 model """transr""" +916 85 loss """softplus""" +916 85 regularizer """no""" +916 85 optimizer """adadelta""" +916 85 training_loop """owa""" +916 85 negative_sampler """basic""" +916 85 evaluator """rankbased""" +916 86 dataset """kinships""" +916 86 model """transr""" +916 86 loss """softplus""" +916 86 regularizer """no""" +916 86 optimizer """adadelta""" +916 86 training_loop """owa""" +916 86 negative_sampler """basic""" +916 86 evaluator """rankbased""" +916 87 dataset """kinships""" +916 87 model """transr""" +916 87 loss """softplus""" +916 87 regularizer """no""" +916 87 optimizer """adadelta""" +916 87 training_loop """owa""" +916 87 negative_sampler """basic""" +916 87 evaluator """rankbased""" +916 88 dataset """kinships""" +916 88 model """transr""" +916 88 loss """softplus""" +916 88 regularizer """no""" +916 88 optimizer """adadelta""" +916 88 training_loop """owa""" +916 88 negative_sampler """basic""" +916 88 evaluator """rankbased""" +916 89 dataset """kinships""" +916 89 model """transr""" +916 89 loss """softplus""" +916 89 regularizer """no""" +916 89 optimizer """adadelta""" +916 89 training_loop """owa""" +916 89 negative_sampler """basic""" +916 89 evaluator """rankbased""" +916 90 dataset """kinships""" +916 90 model """transr""" +916 90 loss """softplus""" +916 90 regularizer """no""" +916 90 optimizer """adadelta""" +916 90 training_loop """owa""" +916 90 negative_sampler """basic""" +916 90 evaluator """rankbased""" +916 91 dataset """kinships""" +916 91 model """transr""" +916 91 loss """softplus""" +916 91 regularizer """no""" +916 91 optimizer """adadelta""" +916 91 training_loop """owa""" +916 91 negative_sampler """basic""" +916 91 evaluator """rankbased""" +916 92 dataset """kinships""" +916 92 model """transr""" +916 92 loss """softplus""" +916 92 regularizer """no""" +916 92 optimizer """adadelta""" +916 92 training_loop """owa""" +916 92 negative_sampler """basic""" +916 92 evaluator """rankbased""" +916 93 dataset """kinships""" +916 93 model """transr""" +916 93 loss """softplus""" +916 93 regularizer """no""" +916 93 optimizer """adadelta""" +916 93 training_loop """owa""" +916 93 negative_sampler """basic""" +916 93 evaluator """rankbased""" +916 94 dataset """kinships""" +916 94 model """transr""" +916 94 loss """softplus""" +916 94 regularizer """no""" +916 94 optimizer """adadelta""" +916 94 training_loop """owa""" +916 94 negative_sampler """basic""" +916 94 evaluator """rankbased""" +916 95 dataset """kinships""" +916 95 model """transr""" +916 95 loss """softplus""" +916 95 regularizer """no""" +916 95 optimizer """adadelta""" +916 95 training_loop """owa""" +916 95 negative_sampler """basic""" +916 95 evaluator """rankbased""" +916 96 dataset """kinships""" +916 96 model """transr""" +916 96 loss """softplus""" +916 96 regularizer """no""" +916 96 optimizer """adadelta""" +916 96 training_loop """owa""" +916 96 negative_sampler """basic""" +916 96 evaluator """rankbased""" +916 97 dataset """kinships""" +916 97 model """transr""" +916 97 loss """softplus""" +916 97 regularizer """no""" +916 97 optimizer """adadelta""" +916 97 training_loop """owa""" +916 97 negative_sampler """basic""" +916 97 evaluator """rankbased""" +916 98 dataset """kinships""" +916 98 model """transr""" +916 98 loss """softplus""" +916 98 regularizer """no""" +916 98 optimizer """adadelta""" +916 98 training_loop """owa""" +916 98 negative_sampler """basic""" +916 98 evaluator """rankbased""" +916 99 dataset """kinships""" +916 99 model """transr""" +916 99 loss """softplus""" +916 99 regularizer """no""" +916 99 optimizer """adadelta""" +916 99 training_loop """owa""" +916 99 negative_sampler """basic""" +916 99 evaluator """rankbased""" +916 100 dataset """kinships""" +916 100 model """transr""" +916 100 loss """softplus""" +916 100 regularizer """no""" +916 100 optimizer """adadelta""" +916 100 training_loop """owa""" +916 100 negative_sampler """basic""" +916 100 evaluator """rankbased""" +917 1 model.embedding_dim 2.0 +917 1 model.relation_dim 2.0 +917 1 model.scoring_fct_norm 1.0 +917 1 loss.margin 4.722485857923933 +917 1 negative_sampler.num_negs_per_pos 71.0 +917 1 training.batch_size 2.0 +917 2 model.embedding_dim 0.0 +917 2 model.relation_dim 2.0 +917 2 model.scoring_fct_norm 2.0 +917 2 loss.margin 5.890225573568926 +917 2 negative_sampler.num_negs_per_pos 13.0 +917 2 training.batch_size 2.0 +917 3 model.embedding_dim 0.0 +917 3 model.relation_dim 2.0 +917 3 model.scoring_fct_norm 2.0 +917 3 loss.margin 6.936497805140873 +917 3 negative_sampler.num_negs_per_pos 24.0 +917 3 training.batch_size 0.0 +917 4 model.embedding_dim 1.0 +917 4 model.relation_dim 1.0 +917 4 model.scoring_fct_norm 1.0 +917 4 loss.margin 8.0726993445173 +917 4 negative_sampler.num_negs_per_pos 85.0 +917 4 training.batch_size 1.0 +917 5 model.embedding_dim 0.0 +917 5 model.relation_dim 0.0 +917 5 model.scoring_fct_norm 2.0 +917 5 loss.margin 5.413392085313489 +917 5 negative_sampler.num_negs_per_pos 41.0 +917 5 training.batch_size 1.0 +917 6 model.embedding_dim 2.0 +917 6 model.relation_dim 2.0 +917 6 model.scoring_fct_norm 2.0 +917 6 loss.margin 1.6958974602661947 +917 6 negative_sampler.num_negs_per_pos 55.0 +917 6 training.batch_size 2.0 +917 7 model.embedding_dim 2.0 +917 7 model.relation_dim 0.0 +917 7 model.scoring_fct_norm 2.0 +917 7 loss.margin 6.83331511724644 +917 7 negative_sampler.num_negs_per_pos 84.0 +917 7 training.batch_size 0.0 +917 8 model.embedding_dim 2.0 +917 8 model.relation_dim 1.0 +917 8 model.scoring_fct_norm 1.0 +917 8 loss.margin 1.3812596448189167 +917 8 negative_sampler.num_negs_per_pos 51.0 +917 8 training.batch_size 2.0 +917 9 model.embedding_dim 0.0 +917 9 model.relation_dim 0.0 +917 9 model.scoring_fct_norm 2.0 +917 9 loss.margin 2.340932719901601 +917 9 negative_sampler.num_negs_per_pos 10.0 +917 9 training.batch_size 0.0 +917 10 model.embedding_dim 0.0 +917 10 model.relation_dim 1.0 +917 10 model.scoring_fct_norm 2.0 +917 10 loss.margin 2.412892810635591 +917 10 negative_sampler.num_negs_per_pos 73.0 +917 10 training.batch_size 2.0 +917 11 model.embedding_dim 2.0 +917 11 model.relation_dim 0.0 +917 11 model.scoring_fct_norm 1.0 +917 11 loss.margin 9.05501261360778 +917 11 negative_sampler.num_negs_per_pos 6.0 +917 11 training.batch_size 1.0 +917 12 model.embedding_dim 0.0 +917 12 model.relation_dim 0.0 +917 12 model.scoring_fct_norm 2.0 +917 12 loss.margin 0.5756332479344111 +917 12 negative_sampler.num_negs_per_pos 41.0 +917 12 training.batch_size 2.0 +917 13 model.embedding_dim 1.0 +917 13 model.relation_dim 0.0 +917 13 model.scoring_fct_norm 1.0 +917 13 loss.margin 4.8620876387612215 +917 13 negative_sampler.num_negs_per_pos 68.0 +917 13 training.batch_size 0.0 +917 14 model.embedding_dim 1.0 +917 14 model.relation_dim 0.0 +917 14 model.scoring_fct_norm 2.0 +917 14 loss.margin 3.0022201020947614 +917 14 negative_sampler.num_negs_per_pos 1.0 +917 14 training.batch_size 1.0 +917 15 model.embedding_dim 2.0 +917 15 model.relation_dim 2.0 +917 15 model.scoring_fct_norm 2.0 +917 15 loss.margin 1.443088863223946 +917 15 negative_sampler.num_negs_per_pos 42.0 +917 15 training.batch_size 0.0 +917 16 model.embedding_dim 1.0 +917 16 model.relation_dim 1.0 +917 16 model.scoring_fct_norm 2.0 +917 16 loss.margin 5.237254521144064 +917 16 negative_sampler.num_negs_per_pos 69.0 +917 16 training.batch_size 1.0 +917 17 model.embedding_dim 2.0 +917 17 model.relation_dim 1.0 +917 17 model.scoring_fct_norm 2.0 +917 17 loss.margin 5.173032149690892 +917 17 negative_sampler.num_negs_per_pos 18.0 +917 17 training.batch_size 2.0 +917 18 model.embedding_dim 2.0 +917 18 model.relation_dim 1.0 +917 18 model.scoring_fct_norm 1.0 +917 18 loss.margin 9.485964961465786 +917 18 negative_sampler.num_negs_per_pos 7.0 +917 18 training.batch_size 0.0 +917 19 model.embedding_dim 2.0 +917 19 model.relation_dim 0.0 +917 19 model.scoring_fct_norm 1.0 +917 19 loss.margin 1.152381663755481 +917 19 negative_sampler.num_negs_per_pos 97.0 +917 19 training.batch_size 0.0 +917 20 model.embedding_dim 2.0 +917 20 model.relation_dim 0.0 +917 20 model.scoring_fct_norm 2.0 +917 20 loss.margin 4.318434318611342 +917 20 negative_sampler.num_negs_per_pos 92.0 +917 20 training.batch_size 2.0 +917 21 model.embedding_dim 0.0 +917 21 model.relation_dim 1.0 +917 21 model.scoring_fct_norm 2.0 +917 21 loss.margin 1.3811301897515882 +917 21 negative_sampler.num_negs_per_pos 1.0 +917 21 training.batch_size 1.0 +917 22 model.embedding_dim 1.0 +917 22 model.relation_dim 0.0 +917 22 model.scoring_fct_norm 1.0 +917 22 loss.margin 9.95919648092791 +917 22 negative_sampler.num_negs_per_pos 45.0 +917 22 training.batch_size 0.0 +917 23 model.embedding_dim 2.0 +917 23 model.relation_dim 2.0 +917 23 model.scoring_fct_norm 2.0 +917 23 loss.margin 7.500996864776265 +917 23 negative_sampler.num_negs_per_pos 82.0 +917 23 training.batch_size 1.0 +917 24 model.embedding_dim 0.0 +917 24 model.relation_dim 1.0 +917 24 model.scoring_fct_norm 1.0 +917 24 loss.margin 5.8022014922150555 +917 24 negative_sampler.num_negs_per_pos 36.0 +917 24 training.batch_size 0.0 +917 25 model.embedding_dim 1.0 +917 25 model.relation_dim 0.0 +917 25 model.scoring_fct_norm 2.0 +917 25 loss.margin 8.754395076644936 +917 25 negative_sampler.num_negs_per_pos 86.0 +917 25 training.batch_size 1.0 +917 26 model.embedding_dim 0.0 +917 26 model.relation_dim 2.0 +917 26 model.scoring_fct_norm 2.0 +917 26 loss.margin 0.7011405533640311 +917 26 negative_sampler.num_negs_per_pos 52.0 +917 26 training.batch_size 0.0 +917 27 model.embedding_dim 0.0 +917 27 model.relation_dim 1.0 +917 27 model.scoring_fct_norm 2.0 +917 27 loss.margin 5.143820554039781 +917 27 negative_sampler.num_negs_per_pos 54.0 +917 27 training.batch_size 2.0 +917 28 model.embedding_dim 0.0 +917 28 model.relation_dim 1.0 +917 28 model.scoring_fct_norm 1.0 +917 28 loss.margin 2.708115177920544 +917 28 negative_sampler.num_negs_per_pos 5.0 +917 28 training.batch_size 1.0 +917 29 model.embedding_dim 2.0 +917 29 model.relation_dim 0.0 +917 29 model.scoring_fct_norm 1.0 +917 29 loss.margin 7.822901718338814 +917 29 negative_sampler.num_negs_per_pos 67.0 +917 29 training.batch_size 2.0 +917 30 model.embedding_dim 2.0 +917 30 model.relation_dim 2.0 +917 30 model.scoring_fct_norm 1.0 +917 30 loss.margin 7.197033695597995 +917 30 negative_sampler.num_negs_per_pos 71.0 +917 30 training.batch_size 0.0 +917 31 model.embedding_dim 1.0 +917 31 model.relation_dim 1.0 +917 31 model.scoring_fct_norm 1.0 +917 31 loss.margin 1.3274185794332598 +917 31 negative_sampler.num_negs_per_pos 90.0 +917 31 training.batch_size 2.0 +917 32 model.embedding_dim 1.0 +917 32 model.relation_dim 1.0 +917 32 model.scoring_fct_norm 1.0 +917 32 loss.margin 9.244373367795331 +917 32 negative_sampler.num_negs_per_pos 70.0 +917 32 training.batch_size 2.0 +917 33 model.embedding_dim 0.0 +917 33 model.relation_dim 2.0 +917 33 model.scoring_fct_norm 1.0 +917 33 loss.margin 4.635557702150661 +917 33 negative_sampler.num_negs_per_pos 17.0 +917 33 training.batch_size 1.0 +917 34 model.embedding_dim 0.0 +917 34 model.relation_dim 1.0 +917 34 model.scoring_fct_norm 2.0 +917 34 loss.margin 0.9692760445794321 +917 34 negative_sampler.num_negs_per_pos 39.0 +917 34 training.batch_size 0.0 +917 35 model.embedding_dim 1.0 +917 35 model.relation_dim 2.0 +917 35 model.scoring_fct_norm 1.0 +917 35 loss.margin 8.59023332262424 +917 35 negative_sampler.num_negs_per_pos 63.0 +917 35 training.batch_size 1.0 +917 36 model.embedding_dim 2.0 +917 36 model.relation_dim 0.0 +917 36 model.scoring_fct_norm 1.0 +917 36 loss.margin 3.7624602668335805 +917 36 negative_sampler.num_negs_per_pos 71.0 +917 36 training.batch_size 0.0 +917 37 model.embedding_dim 1.0 +917 37 model.relation_dim 1.0 +917 37 model.scoring_fct_norm 2.0 +917 37 loss.margin 4.184640319458374 +917 37 negative_sampler.num_negs_per_pos 72.0 +917 37 training.batch_size 0.0 +917 38 model.embedding_dim 2.0 +917 38 model.relation_dim 1.0 +917 38 model.scoring_fct_norm 2.0 +917 38 loss.margin 9.286192185059788 +917 38 negative_sampler.num_negs_per_pos 63.0 +917 38 training.batch_size 0.0 +917 39 model.embedding_dim 0.0 +917 39 model.relation_dim 1.0 +917 39 model.scoring_fct_norm 1.0 +917 39 loss.margin 3.7618930696367516 +917 39 negative_sampler.num_negs_per_pos 39.0 +917 39 training.batch_size 2.0 +917 40 model.embedding_dim 0.0 +917 40 model.relation_dim 1.0 +917 40 model.scoring_fct_norm 1.0 +917 40 loss.margin 9.663505380779577 +917 40 negative_sampler.num_negs_per_pos 68.0 +917 40 training.batch_size 0.0 +917 41 model.embedding_dim 1.0 +917 41 model.relation_dim 2.0 +917 41 model.scoring_fct_norm 1.0 +917 41 loss.margin 6.349975038782424 +917 41 negative_sampler.num_negs_per_pos 5.0 +917 41 training.batch_size 1.0 +917 42 model.embedding_dim 2.0 +917 42 model.relation_dim 0.0 +917 42 model.scoring_fct_norm 1.0 +917 42 loss.margin 0.7553005197015399 +917 42 negative_sampler.num_negs_per_pos 96.0 +917 42 training.batch_size 1.0 +917 43 model.embedding_dim 1.0 +917 43 model.relation_dim 0.0 +917 43 model.scoring_fct_norm 1.0 +917 43 loss.margin 7.981849252723817 +917 43 negative_sampler.num_negs_per_pos 88.0 +917 43 training.batch_size 0.0 +917 44 model.embedding_dim 1.0 +917 44 model.relation_dim 1.0 +917 44 model.scoring_fct_norm 2.0 +917 44 loss.margin 8.251748086178864 +917 44 negative_sampler.num_negs_per_pos 73.0 +917 44 training.batch_size 2.0 +917 45 model.embedding_dim 0.0 +917 45 model.relation_dim 1.0 +917 45 model.scoring_fct_norm 2.0 +917 45 loss.margin 4.1859314512391945 +917 45 negative_sampler.num_negs_per_pos 99.0 +917 45 training.batch_size 0.0 +917 46 model.embedding_dim 0.0 +917 46 model.relation_dim 0.0 +917 46 model.scoring_fct_norm 1.0 +917 46 loss.margin 7.5896526751185265 +917 46 negative_sampler.num_negs_per_pos 39.0 +917 46 training.batch_size 2.0 +917 47 model.embedding_dim 0.0 +917 47 model.relation_dim 0.0 +917 47 model.scoring_fct_norm 1.0 +917 47 loss.margin 1.054547243644917 +917 47 negative_sampler.num_negs_per_pos 22.0 +917 47 training.batch_size 0.0 +917 1 dataset """kinships""" +917 1 model """transr""" +917 1 loss """marginranking""" +917 1 regularizer """no""" +917 1 optimizer """adadelta""" +917 1 training_loop """owa""" +917 1 negative_sampler """basic""" +917 1 evaluator """rankbased""" +917 2 dataset """kinships""" +917 2 model """transr""" +917 2 loss """marginranking""" +917 2 regularizer """no""" +917 2 optimizer """adadelta""" +917 2 training_loop """owa""" +917 2 negative_sampler """basic""" +917 2 evaluator """rankbased""" +917 3 dataset """kinships""" +917 3 model """transr""" +917 3 loss """marginranking""" +917 3 regularizer """no""" +917 3 optimizer """adadelta""" +917 3 training_loop """owa""" +917 3 negative_sampler """basic""" +917 3 evaluator """rankbased""" +917 4 dataset """kinships""" +917 4 model """transr""" +917 4 loss """marginranking""" +917 4 regularizer """no""" +917 4 optimizer """adadelta""" +917 4 training_loop """owa""" +917 4 negative_sampler """basic""" +917 4 evaluator """rankbased""" +917 5 dataset """kinships""" +917 5 model """transr""" +917 5 loss """marginranking""" +917 5 regularizer """no""" +917 5 optimizer """adadelta""" +917 5 training_loop """owa""" +917 5 negative_sampler """basic""" +917 5 evaluator """rankbased""" +917 6 dataset """kinships""" +917 6 model """transr""" +917 6 loss """marginranking""" +917 6 regularizer """no""" +917 6 optimizer """adadelta""" +917 6 training_loop """owa""" +917 6 negative_sampler """basic""" +917 6 evaluator """rankbased""" +917 7 dataset """kinships""" +917 7 model """transr""" +917 7 loss """marginranking""" +917 7 regularizer """no""" +917 7 optimizer """adadelta""" +917 7 training_loop """owa""" +917 7 negative_sampler """basic""" +917 7 evaluator """rankbased""" +917 8 dataset """kinships""" +917 8 model """transr""" +917 8 loss """marginranking""" +917 8 regularizer """no""" +917 8 optimizer """adadelta""" +917 8 training_loop """owa""" +917 8 negative_sampler """basic""" +917 8 evaluator """rankbased""" +917 9 dataset """kinships""" +917 9 model """transr""" +917 9 loss """marginranking""" +917 9 regularizer """no""" +917 9 optimizer """adadelta""" +917 9 training_loop """owa""" +917 9 negative_sampler """basic""" +917 9 evaluator """rankbased""" +917 10 dataset """kinships""" +917 10 model """transr""" +917 10 loss """marginranking""" +917 10 regularizer """no""" +917 10 optimizer """adadelta""" +917 10 training_loop """owa""" +917 10 negative_sampler """basic""" +917 10 evaluator """rankbased""" +917 11 dataset """kinships""" +917 11 model """transr""" +917 11 loss """marginranking""" +917 11 regularizer """no""" +917 11 optimizer """adadelta""" +917 11 training_loop """owa""" +917 11 negative_sampler """basic""" +917 11 evaluator """rankbased""" +917 12 dataset """kinships""" +917 12 model """transr""" +917 12 loss """marginranking""" +917 12 regularizer """no""" +917 12 optimizer """adadelta""" +917 12 training_loop """owa""" +917 12 negative_sampler """basic""" +917 12 evaluator """rankbased""" +917 13 dataset """kinships""" +917 13 model """transr""" +917 13 loss """marginranking""" +917 13 regularizer """no""" +917 13 optimizer """adadelta""" +917 13 training_loop """owa""" +917 13 negative_sampler """basic""" +917 13 evaluator """rankbased""" +917 14 dataset """kinships""" +917 14 model """transr""" +917 14 loss """marginranking""" +917 14 regularizer """no""" +917 14 optimizer """adadelta""" +917 14 training_loop """owa""" +917 14 negative_sampler """basic""" +917 14 evaluator """rankbased""" +917 15 dataset """kinships""" +917 15 model """transr""" +917 15 loss """marginranking""" +917 15 regularizer """no""" +917 15 optimizer """adadelta""" +917 15 training_loop """owa""" +917 15 negative_sampler """basic""" +917 15 evaluator """rankbased""" +917 16 dataset """kinships""" +917 16 model """transr""" +917 16 loss """marginranking""" +917 16 regularizer """no""" +917 16 optimizer """adadelta""" +917 16 training_loop """owa""" +917 16 negative_sampler """basic""" +917 16 evaluator """rankbased""" +917 17 dataset """kinships""" +917 17 model """transr""" +917 17 loss """marginranking""" +917 17 regularizer """no""" +917 17 optimizer """adadelta""" +917 17 training_loop """owa""" +917 17 negative_sampler """basic""" +917 17 evaluator """rankbased""" +917 18 dataset """kinships""" +917 18 model """transr""" +917 18 loss """marginranking""" +917 18 regularizer """no""" +917 18 optimizer """adadelta""" +917 18 training_loop """owa""" +917 18 negative_sampler """basic""" +917 18 evaluator """rankbased""" +917 19 dataset """kinships""" +917 19 model """transr""" +917 19 loss """marginranking""" +917 19 regularizer """no""" +917 19 optimizer """adadelta""" +917 19 training_loop """owa""" +917 19 negative_sampler """basic""" +917 19 evaluator """rankbased""" +917 20 dataset """kinships""" +917 20 model """transr""" +917 20 loss """marginranking""" +917 20 regularizer """no""" +917 20 optimizer """adadelta""" +917 20 training_loop """owa""" +917 20 negative_sampler """basic""" +917 20 evaluator """rankbased""" +917 21 dataset """kinships""" +917 21 model """transr""" +917 21 loss """marginranking""" +917 21 regularizer """no""" +917 21 optimizer """adadelta""" +917 21 training_loop """owa""" +917 21 negative_sampler """basic""" +917 21 evaluator """rankbased""" +917 22 dataset """kinships""" +917 22 model """transr""" +917 22 loss """marginranking""" +917 22 regularizer """no""" +917 22 optimizer """adadelta""" +917 22 training_loop """owa""" +917 22 negative_sampler """basic""" +917 22 evaluator """rankbased""" +917 23 dataset """kinships""" +917 23 model """transr""" +917 23 loss """marginranking""" +917 23 regularizer """no""" +917 23 optimizer """adadelta""" +917 23 training_loop """owa""" +917 23 negative_sampler """basic""" +917 23 evaluator """rankbased""" +917 24 dataset """kinships""" +917 24 model """transr""" +917 24 loss """marginranking""" +917 24 regularizer """no""" +917 24 optimizer """adadelta""" +917 24 training_loop """owa""" +917 24 negative_sampler """basic""" +917 24 evaluator """rankbased""" +917 25 dataset """kinships""" +917 25 model """transr""" +917 25 loss """marginranking""" +917 25 regularizer """no""" +917 25 optimizer """adadelta""" +917 25 training_loop """owa""" +917 25 negative_sampler """basic""" +917 25 evaluator """rankbased""" +917 26 dataset """kinships""" +917 26 model """transr""" +917 26 loss """marginranking""" +917 26 regularizer """no""" +917 26 optimizer """adadelta""" +917 26 training_loop """owa""" +917 26 negative_sampler """basic""" +917 26 evaluator """rankbased""" +917 27 dataset """kinships""" +917 27 model """transr""" +917 27 loss """marginranking""" +917 27 regularizer """no""" +917 27 optimizer """adadelta""" +917 27 training_loop """owa""" +917 27 negative_sampler """basic""" +917 27 evaluator """rankbased""" +917 28 dataset """kinships""" +917 28 model """transr""" +917 28 loss """marginranking""" +917 28 regularizer """no""" +917 28 optimizer """adadelta""" +917 28 training_loop """owa""" +917 28 negative_sampler """basic""" +917 28 evaluator """rankbased""" +917 29 dataset """kinships""" +917 29 model """transr""" +917 29 loss """marginranking""" +917 29 regularizer """no""" +917 29 optimizer """adadelta""" +917 29 training_loop """owa""" +917 29 negative_sampler """basic""" +917 29 evaluator """rankbased""" +917 30 dataset """kinships""" +917 30 model """transr""" +917 30 loss """marginranking""" +917 30 regularizer """no""" +917 30 optimizer """adadelta""" +917 30 training_loop """owa""" +917 30 negative_sampler """basic""" +917 30 evaluator """rankbased""" +917 31 dataset """kinships""" +917 31 model """transr""" +917 31 loss """marginranking""" +917 31 regularizer """no""" +917 31 optimizer """adadelta""" +917 31 training_loop """owa""" +917 31 negative_sampler """basic""" +917 31 evaluator """rankbased""" +917 32 dataset """kinships""" +917 32 model """transr""" +917 32 loss """marginranking""" +917 32 regularizer """no""" +917 32 optimizer """adadelta""" +917 32 training_loop """owa""" +917 32 negative_sampler """basic""" +917 32 evaluator """rankbased""" +917 33 dataset """kinships""" +917 33 model """transr""" +917 33 loss """marginranking""" +917 33 regularizer """no""" +917 33 optimizer """adadelta""" +917 33 training_loop """owa""" +917 33 negative_sampler """basic""" +917 33 evaluator """rankbased""" +917 34 dataset """kinships""" +917 34 model """transr""" +917 34 loss """marginranking""" +917 34 regularizer """no""" +917 34 optimizer """adadelta""" +917 34 training_loop """owa""" +917 34 negative_sampler """basic""" +917 34 evaluator """rankbased""" +917 35 dataset """kinships""" +917 35 model """transr""" +917 35 loss """marginranking""" +917 35 regularizer """no""" +917 35 optimizer """adadelta""" +917 35 training_loop """owa""" +917 35 negative_sampler """basic""" +917 35 evaluator """rankbased""" +917 36 dataset """kinships""" +917 36 model """transr""" +917 36 loss """marginranking""" +917 36 regularizer """no""" +917 36 optimizer """adadelta""" +917 36 training_loop """owa""" +917 36 negative_sampler """basic""" +917 36 evaluator """rankbased""" +917 37 dataset """kinships""" +917 37 model """transr""" +917 37 loss """marginranking""" +917 37 regularizer """no""" +917 37 optimizer """adadelta""" +917 37 training_loop """owa""" +917 37 negative_sampler """basic""" +917 37 evaluator """rankbased""" +917 38 dataset """kinships""" +917 38 model """transr""" +917 38 loss """marginranking""" +917 38 regularizer """no""" +917 38 optimizer """adadelta""" +917 38 training_loop """owa""" +917 38 negative_sampler """basic""" +917 38 evaluator """rankbased""" +917 39 dataset """kinships""" +917 39 model """transr""" +917 39 loss """marginranking""" +917 39 regularizer """no""" +917 39 optimizer """adadelta""" +917 39 training_loop """owa""" +917 39 negative_sampler """basic""" +917 39 evaluator """rankbased""" +917 40 dataset """kinships""" +917 40 model """transr""" +917 40 loss """marginranking""" +917 40 regularizer """no""" +917 40 optimizer """adadelta""" +917 40 training_loop """owa""" +917 40 negative_sampler """basic""" +917 40 evaluator """rankbased""" +917 41 dataset """kinships""" +917 41 model """transr""" +917 41 loss """marginranking""" +917 41 regularizer """no""" +917 41 optimizer """adadelta""" +917 41 training_loop """owa""" +917 41 negative_sampler """basic""" +917 41 evaluator """rankbased""" +917 42 dataset """kinships""" +917 42 model """transr""" +917 42 loss """marginranking""" +917 42 regularizer """no""" +917 42 optimizer """adadelta""" +917 42 training_loop """owa""" +917 42 negative_sampler """basic""" +917 42 evaluator """rankbased""" +917 43 dataset """kinships""" +917 43 model """transr""" +917 43 loss """marginranking""" +917 43 regularizer """no""" +917 43 optimizer """adadelta""" +917 43 training_loop """owa""" +917 43 negative_sampler """basic""" +917 43 evaluator """rankbased""" +917 44 dataset """kinships""" +917 44 model """transr""" +917 44 loss """marginranking""" +917 44 regularizer """no""" +917 44 optimizer """adadelta""" +917 44 training_loop """owa""" +917 44 negative_sampler """basic""" +917 44 evaluator """rankbased""" +917 45 dataset """kinships""" +917 45 model """transr""" +917 45 loss """marginranking""" +917 45 regularizer """no""" +917 45 optimizer """adadelta""" +917 45 training_loop """owa""" +917 45 negative_sampler """basic""" +917 45 evaluator """rankbased""" +917 46 dataset """kinships""" +917 46 model """transr""" +917 46 loss """marginranking""" +917 46 regularizer """no""" +917 46 optimizer """adadelta""" +917 46 training_loop """owa""" +917 46 negative_sampler """basic""" +917 46 evaluator """rankbased""" +917 47 dataset """kinships""" +917 47 model """transr""" +917 47 loss """marginranking""" +917 47 regularizer """no""" +917 47 optimizer """adadelta""" +917 47 training_loop """owa""" +917 47 negative_sampler """basic""" +917 47 evaluator """rankbased""" +918 1 model.embedding_dim 2.0 +918 1 model.relation_dim 0.0 +918 1 model.scoring_fct_norm 1.0 +918 1 loss.margin 7.56667070319539 +918 1 negative_sampler.num_negs_per_pos 90.0 +918 1 training.batch_size 2.0 +918 2 model.embedding_dim 1.0 +918 2 model.relation_dim 2.0 +918 2 model.scoring_fct_norm 2.0 +918 2 loss.margin 7.241006431731553 +918 2 negative_sampler.num_negs_per_pos 81.0 +918 2 training.batch_size 2.0 +918 3 model.embedding_dim 2.0 +918 3 model.relation_dim 0.0 +918 3 model.scoring_fct_norm 2.0 +918 3 loss.margin 5.432028748082113 +918 3 negative_sampler.num_negs_per_pos 72.0 +918 3 training.batch_size 2.0 +918 4 model.embedding_dim 2.0 +918 4 model.relation_dim 2.0 +918 4 model.scoring_fct_norm 2.0 +918 4 loss.margin 8.273384398344643 +918 4 negative_sampler.num_negs_per_pos 35.0 +918 4 training.batch_size 0.0 +918 5 model.embedding_dim 0.0 +918 5 model.relation_dim 0.0 +918 5 model.scoring_fct_norm 2.0 +918 5 loss.margin 0.8752042451798498 +918 5 negative_sampler.num_negs_per_pos 88.0 +918 5 training.batch_size 0.0 +918 6 model.embedding_dim 2.0 +918 6 model.relation_dim 0.0 +918 6 model.scoring_fct_norm 1.0 +918 6 loss.margin 5.556415553125413 +918 6 negative_sampler.num_negs_per_pos 61.0 +918 6 training.batch_size 0.0 +918 7 model.embedding_dim 1.0 +918 7 model.relation_dim 1.0 +918 7 model.scoring_fct_norm 2.0 +918 7 loss.margin 3.59946233312362 +918 7 negative_sampler.num_negs_per_pos 53.0 +918 7 training.batch_size 0.0 +918 8 model.embedding_dim 0.0 +918 8 model.relation_dim 2.0 +918 8 model.scoring_fct_norm 1.0 +918 8 loss.margin 4.795750231411038 +918 8 negative_sampler.num_negs_per_pos 16.0 +918 8 training.batch_size 1.0 +918 9 model.embedding_dim 2.0 +918 9 model.relation_dim 0.0 +918 9 model.scoring_fct_norm 1.0 +918 9 loss.margin 3.873014678118659 +918 9 negative_sampler.num_negs_per_pos 44.0 +918 9 training.batch_size 2.0 +918 10 model.embedding_dim 0.0 +918 10 model.relation_dim 2.0 +918 10 model.scoring_fct_norm 1.0 +918 10 loss.margin 0.8155546214303204 +918 10 negative_sampler.num_negs_per_pos 43.0 +918 10 training.batch_size 0.0 +918 11 model.embedding_dim 2.0 +918 11 model.relation_dim 0.0 +918 11 model.scoring_fct_norm 2.0 +918 11 loss.margin 9.0138255166902 +918 11 negative_sampler.num_negs_per_pos 47.0 +918 11 training.batch_size 2.0 +918 12 model.embedding_dim 0.0 +918 12 model.relation_dim 2.0 +918 12 model.scoring_fct_norm 2.0 +918 12 loss.margin 4.254033580950928 +918 12 negative_sampler.num_negs_per_pos 84.0 +918 12 training.batch_size 1.0 +918 13 model.embedding_dim 0.0 +918 13 model.relation_dim 1.0 +918 13 model.scoring_fct_norm 1.0 +918 13 loss.margin 0.8362369250671622 +918 13 negative_sampler.num_negs_per_pos 24.0 +918 13 training.batch_size 1.0 +918 14 model.embedding_dim 1.0 +918 14 model.relation_dim 0.0 +918 14 model.scoring_fct_norm 2.0 +918 14 loss.margin 8.025578378012096 +918 14 negative_sampler.num_negs_per_pos 9.0 +918 14 training.batch_size 0.0 +918 15 model.embedding_dim 2.0 +918 15 model.relation_dim 2.0 +918 15 model.scoring_fct_norm 2.0 +918 15 loss.margin 6.0966974849760245 +918 15 negative_sampler.num_negs_per_pos 31.0 +918 15 training.batch_size 2.0 +918 16 model.embedding_dim 0.0 +918 16 model.relation_dim 2.0 +918 16 model.scoring_fct_norm 2.0 +918 16 loss.margin 5.069339318056283 +918 16 negative_sampler.num_negs_per_pos 19.0 +918 16 training.batch_size 2.0 +918 17 model.embedding_dim 2.0 +918 17 model.relation_dim 0.0 +918 17 model.scoring_fct_norm 1.0 +918 17 loss.margin 5.23809256262415 +918 17 negative_sampler.num_negs_per_pos 40.0 +918 17 training.batch_size 0.0 +918 18 model.embedding_dim 1.0 +918 18 model.relation_dim 0.0 +918 18 model.scoring_fct_norm 2.0 +918 18 loss.margin 8.514860733722625 +918 18 negative_sampler.num_negs_per_pos 1.0 +918 18 training.batch_size 0.0 +918 19 model.embedding_dim 1.0 +918 19 model.relation_dim 2.0 +918 19 model.scoring_fct_norm 2.0 +918 19 loss.margin 9.542575401917963 +918 19 negative_sampler.num_negs_per_pos 57.0 +918 19 training.batch_size 1.0 +918 20 model.embedding_dim 2.0 +918 20 model.relation_dim 0.0 +918 20 model.scoring_fct_norm 1.0 +918 20 loss.margin 6.334760016406133 +918 20 negative_sampler.num_negs_per_pos 39.0 +918 20 training.batch_size 0.0 +918 21 model.embedding_dim 2.0 +918 21 model.relation_dim 0.0 +918 21 model.scoring_fct_norm 2.0 +918 21 loss.margin 9.782002104793964 +918 21 negative_sampler.num_negs_per_pos 78.0 +918 21 training.batch_size 0.0 +918 22 model.embedding_dim 0.0 +918 22 model.relation_dim 1.0 +918 22 model.scoring_fct_norm 1.0 +918 22 loss.margin 5.18893464542794 +918 22 negative_sampler.num_negs_per_pos 30.0 +918 22 training.batch_size 1.0 +918 23 model.embedding_dim 2.0 +918 23 model.relation_dim 0.0 +918 23 model.scoring_fct_norm 1.0 +918 23 loss.margin 3.1595106520210487 +918 23 negative_sampler.num_negs_per_pos 87.0 +918 23 training.batch_size 1.0 +918 24 model.embedding_dim 0.0 +918 24 model.relation_dim 1.0 +918 24 model.scoring_fct_norm 1.0 +918 24 loss.margin 5.601882562668357 +918 24 negative_sampler.num_negs_per_pos 62.0 +918 24 training.batch_size 0.0 +918 25 model.embedding_dim 1.0 +918 25 model.relation_dim 0.0 +918 25 model.scoring_fct_norm 2.0 +918 25 loss.margin 8.945912750363153 +918 25 negative_sampler.num_negs_per_pos 20.0 +918 25 training.batch_size 1.0 +918 26 model.embedding_dim 2.0 +918 26 model.relation_dim 1.0 +918 26 model.scoring_fct_norm 1.0 +918 26 loss.margin 3.7538724328834996 +918 26 negative_sampler.num_negs_per_pos 93.0 +918 26 training.batch_size 2.0 +918 27 model.embedding_dim 0.0 +918 27 model.relation_dim 0.0 +918 27 model.scoring_fct_norm 1.0 +918 27 loss.margin 6.486228202652127 +918 27 negative_sampler.num_negs_per_pos 62.0 +918 27 training.batch_size 1.0 +918 28 model.embedding_dim 0.0 +918 28 model.relation_dim 2.0 +918 28 model.scoring_fct_norm 1.0 +918 28 loss.margin 4.2097653782283455 +918 28 negative_sampler.num_negs_per_pos 28.0 +918 28 training.batch_size 0.0 +918 29 model.embedding_dim 2.0 +918 29 model.relation_dim 1.0 +918 29 model.scoring_fct_norm 1.0 +918 29 loss.margin 3.68227241944765 +918 29 negative_sampler.num_negs_per_pos 25.0 +918 29 training.batch_size 1.0 +918 30 model.embedding_dim 0.0 +918 30 model.relation_dim 2.0 +918 30 model.scoring_fct_norm 1.0 +918 30 loss.margin 8.899858113658619 +918 30 negative_sampler.num_negs_per_pos 58.0 +918 30 training.batch_size 1.0 +918 31 model.embedding_dim 2.0 +918 31 model.relation_dim 2.0 +918 31 model.scoring_fct_norm 2.0 +918 31 loss.margin 3.4004171037531927 +918 31 negative_sampler.num_negs_per_pos 95.0 +918 31 training.batch_size 1.0 +918 32 model.embedding_dim 2.0 +918 32 model.relation_dim 2.0 +918 32 model.scoring_fct_norm 1.0 +918 32 loss.margin 0.7629839524553534 +918 32 negative_sampler.num_negs_per_pos 9.0 +918 32 training.batch_size 1.0 +918 33 model.embedding_dim 1.0 +918 33 model.relation_dim 0.0 +918 33 model.scoring_fct_norm 2.0 +918 33 loss.margin 3.885130801061483 +918 33 negative_sampler.num_negs_per_pos 51.0 +918 33 training.batch_size 1.0 +918 34 model.embedding_dim 1.0 +918 34 model.relation_dim 1.0 +918 34 model.scoring_fct_norm 1.0 +918 34 loss.margin 7.970405792521368 +918 34 negative_sampler.num_negs_per_pos 94.0 +918 34 training.batch_size 2.0 +918 35 model.embedding_dim 0.0 +918 35 model.relation_dim 1.0 +918 35 model.scoring_fct_norm 1.0 +918 35 loss.margin 4.296499667294281 +918 35 negative_sampler.num_negs_per_pos 86.0 +918 35 training.batch_size 1.0 +918 36 model.embedding_dim 0.0 +918 36 model.relation_dim 2.0 +918 36 model.scoring_fct_norm 2.0 +918 36 loss.margin 5.82787564982973 +918 36 negative_sampler.num_negs_per_pos 51.0 +918 36 training.batch_size 1.0 +918 37 model.embedding_dim 0.0 +918 37 model.relation_dim 0.0 +918 37 model.scoring_fct_norm 1.0 +918 37 loss.margin 5.579798749632728 +918 37 negative_sampler.num_negs_per_pos 92.0 +918 37 training.batch_size 1.0 +918 38 model.embedding_dim 1.0 +918 38 model.relation_dim 0.0 +918 38 model.scoring_fct_norm 2.0 +918 38 loss.margin 4.316805026234679 +918 38 negative_sampler.num_negs_per_pos 42.0 +918 38 training.batch_size 0.0 +918 39 model.embedding_dim 0.0 +918 39 model.relation_dim 2.0 +918 39 model.scoring_fct_norm 1.0 +918 39 loss.margin 2.4730151087953747 +918 39 negative_sampler.num_negs_per_pos 82.0 +918 39 training.batch_size 0.0 +918 40 model.embedding_dim 0.0 +918 40 model.relation_dim 2.0 +918 40 model.scoring_fct_norm 1.0 +918 40 loss.margin 8.103357567717755 +918 40 negative_sampler.num_negs_per_pos 28.0 +918 40 training.batch_size 0.0 +918 41 model.embedding_dim 2.0 +918 41 model.relation_dim 0.0 +918 41 model.scoring_fct_norm 2.0 +918 41 loss.margin 3.3121325537586563 +918 41 negative_sampler.num_negs_per_pos 30.0 +918 41 training.batch_size 0.0 +918 42 model.embedding_dim 2.0 +918 42 model.relation_dim 2.0 +918 42 model.scoring_fct_norm 2.0 +918 42 loss.margin 1.3323006775533939 +918 42 negative_sampler.num_negs_per_pos 70.0 +918 42 training.batch_size 2.0 +918 43 model.embedding_dim 1.0 +918 43 model.relation_dim 1.0 +918 43 model.scoring_fct_norm 1.0 +918 43 loss.margin 7.306261048629382 +918 43 negative_sampler.num_negs_per_pos 29.0 +918 43 training.batch_size 0.0 +918 44 model.embedding_dim 0.0 +918 44 model.relation_dim 2.0 +918 44 model.scoring_fct_norm 2.0 +918 44 loss.margin 3.108813923449649 +918 44 negative_sampler.num_negs_per_pos 63.0 +918 44 training.batch_size 0.0 +918 45 model.embedding_dim 2.0 +918 45 model.relation_dim 1.0 +918 45 model.scoring_fct_norm 1.0 +918 45 loss.margin 7.747947533973789 +918 45 negative_sampler.num_negs_per_pos 68.0 +918 45 training.batch_size 1.0 +918 46 model.embedding_dim 2.0 +918 46 model.relation_dim 1.0 +918 46 model.scoring_fct_norm 2.0 +918 46 loss.margin 8.44339718914466 +918 46 negative_sampler.num_negs_per_pos 59.0 +918 46 training.batch_size 2.0 +918 47 model.embedding_dim 2.0 +918 47 model.relation_dim 1.0 +918 47 model.scoring_fct_norm 1.0 +918 47 loss.margin 3.456370616457799 +918 47 negative_sampler.num_negs_per_pos 29.0 +918 47 training.batch_size 1.0 +918 48 model.embedding_dim 0.0 +918 48 model.relation_dim 1.0 +918 48 model.scoring_fct_norm 2.0 +918 48 loss.margin 0.6917025165631212 +918 48 negative_sampler.num_negs_per_pos 49.0 +918 48 training.batch_size 0.0 +918 49 model.embedding_dim 1.0 +918 49 model.relation_dim 2.0 +918 49 model.scoring_fct_norm 2.0 +918 49 loss.margin 1.1806640474231922 +918 49 negative_sampler.num_negs_per_pos 4.0 +918 49 training.batch_size 2.0 +918 50 model.embedding_dim 0.0 +918 50 model.relation_dim 0.0 +918 50 model.scoring_fct_norm 2.0 +918 50 loss.margin 6.236622451144632 +918 50 negative_sampler.num_negs_per_pos 23.0 +918 50 training.batch_size 2.0 +918 51 model.embedding_dim 1.0 +918 51 model.relation_dim 2.0 +918 51 model.scoring_fct_norm 2.0 +918 51 loss.margin 7.956533737687237 +918 51 negative_sampler.num_negs_per_pos 62.0 +918 51 training.batch_size 0.0 +918 52 model.embedding_dim 2.0 +918 52 model.relation_dim 2.0 +918 52 model.scoring_fct_norm 2.0 +918 52 loss.margin 2.0138172437212374 +918 52 negative_sampler.num_negs_per_pos 56.0 +918 52 training.batch_size 0.0 +918 53 model.embedding_dim 1.0 +918 53 model.relation_dim 1.0 +918 53 model.scoring_fct_norm 1.0 +918 53 loss.margin 2.58580327454633 +918 53 negative_sampler.num_negs_per_pos 24.0 +918 53 training.batch_size 2.0 +918 54 model.embedding_dim 2.0 +918 54 model.relation_dim 2.0 +918 54 model.scoring_fct_norm 1.0 +918 54 loss.margin 6.435380066258304 +918 54 negative_sampler.num_negs_per_pos 53.0 +918 54 training.batch_size 0.0 +918 55 model.embedding_dim 2.0 +918 55 model.relation_dim 0.0 +918 55 model.scoring_fct_norm 2.0 +918 55 loss.margin 7.1672458456753 +918 55 negative_sampler.num_negs_per_pos 78.0 +918 55 training.batch_size 1.0 +918 56 model.embedding_dim 1.0 +918 56 model.relation_dim 1.0 +918 56 model.scoring_fct_norm 1.0 +918 56 loss.margin 6.669507176067418 +918 56 negative_sampler.num_negs_per_pos 70.0 +918 56 training.batch_size 2.0 +918 57 model.embedding_dim 2.0 +918 57 model.relation_dim 0.0 +918 57 model.scoring_fct_norm 2.0 +918 57 loss.margin 6.974260603930845 +918 57 negative_sampler.num_negs_per_pos 26.0 +918 57 training.batch_size 1.0 +918 58 model.embedding_dim 0.0 +918 58 model.relation_dim 2.0 +918 58 model.scoring_fct_norm 2.0 +918 58 loss.margin 6.637815298276077 +918 58 negative_sampler.num_negs_per_pos 60.0 +918 58 training.batch_size 0.0 +918 59 model.embedding_dim 2.0 +918 59 model.relation_dim 2.0 +918 59 model.scoring_fct_norm 1.0 +918 59 loss.margin 4.52482759890848 +918 59 negative_sampler.num_negs_per_pos 61.0 +918 59 training.batch_size 2.0 +918 60 model.embedding_dim 0.0 +918 60 model.relation_dim 0.0 +918 60 model.scoring_fct_norm 2.0 +918 60 loss.margin 2.7131800977006515 +918 60 negative_sampler.num_negs_per_pos 8.0 +918 60 training.batch_size 1.0 +918 61 model.embedding_dim 0.0 +918 61 model.relation_dim 0.0 +918 61 model.scoring_fct_norm 1.0 +918 61 loss.margin 5.908020115870105 +918 61 negative_sampler.num_negs_per_pos 92.0 +918 61 training.batch_size 1.0 +918 62 model.embedding_dim 2.0 +918 62 model.relation_dim 0.0 +918 62 model.scoring_fct_norm 1.0 +918 62 loss.margin 4.610722567033139 +918 62 negative_sampler.num_negs_per_pos 25.0 +918 62 training.batch_size 2.0 +918 63 model.embedding_dim 2.0 +918 63 model.relation_dim 0.0 +918 63 model.scoring_fct_norm 2.0 +918 63 loss.margin 1.9888057964558672 +918 63 negative_sampler.num_negs_per_pos 50.0 +918 63 training.batch_size 2.0 +918 64 model.embedding_dim 1.0 +918 64 model.relation_dim 0.0 +918 64 model.scoring_fct_norm 1.0 +918 64 loss.margin 9.717216948404772 +918 64 negative_sampler.num_negs_per_pos 11.0 +918 64 training.batch_size 0.0 +918 65 model.embedding_dim 2.0 +918 65 model.relation_dim 0.0 +918 65 model.scoring_fct_norm 2.0 +918 65 loss.margin 5.635914422515769 +918 65 negative_sampler.num_negs_per_pos 90.0 +918 65 training.batch_size 0.0 +918 66 model.embedding_dim 1.0 +918 66 model.relation_dim 2.0 +918 66 model.scoring_fct_norm 2.0 +918 66 loss.margin 5.960815648056409 +918 66 negative_sampler.num_negs_per_pos 31.0 +918 66 training.batch_size 1.0 +918 67 model.embedding_dim 1.0 +918 67 model.relation_dim 1.0 +918 67 model.scoring_fct_norm 2.0 +918 67 loss.margin 2.8209448449135293 +918 67 negative_sampler.num_negs_per_pos 11.0 +918 67 training.batch_size 1.0 +918 68 model.embedding_dim 0.0 +918 68 model.relation_dim 0.0 +918 68 model.scoring_fct_norm 2.0 +918 68 loss.margin 0.8776584853554688 +918 68 negative_sampler.num_negs_per_pos 37.0 +918 68 training.batch_size 0.0 +918 69 model.embedding_dim 1.0 +918 69 model.relation_dim 0.0 +918 69 model.scoring_fct_norm 1.0 +918 69 loss.margin 1.0047600124804168 +918 69 negative_sampler.num_negs_per_pos 97.0 +918 69 training.batch_size 2.0 +918 70 model.embedding_dim 2.0 +918 70 model.relation_dim 2.0 +918 70 model.scoring_fct_norm 1.0 +918 70 loss.margin 4.839586674870807 +918 70 negative_sampler.num_negs_per_pos 68.0 +918 70 training.batch_size 0.0 +918 71 model.embedding_dim 2.0 +918 71 model.relation_dim 2.0 +918 71 model.scoring_fct_norm 1.0 +918 71 loss.margin 9.171608613984692 +918 71 negative_sampler.num_negs_per_pos 89.0 +918 71 training.batch_size 0.0 +918 72 model.embedding_dim 1.0 +918 72 model.relation_dim 2.0 +918 72 model.scoring_fct_norm 1.0 +918 72 loss.margin 5.401407039150735 +918 72 negative_sampler.num_negs_per_pos 42.0 +918 72 training.batch_size 1.0 +918 73 model.embedding_dim 2.0 +918 73 model.relation_dim 2.0 +918 73 model.scoring_fct_norm 1.0 +918 73 loss.margin 4.9760901083830875 +918 73 negative_sampler.num_negs_per_pos 89.0 +918 73 training.batch_size 2.0 +918 74 model.embedding_dim 1.0 +918 74 model.relation_dim 0.0 +918 74 model.scoring_fct_norm 1.0 +918 74 loss.margin 3.2720774263739654 +918 74 negative_sampler.num_negs_per_pos 91.0 +918 74 training.batch_size 0.0 +918 75 model.embedding_dim 2.0 +918 75 model.relation_dim 2.0 +918 75 model.scoring_fct_norm 2.0 +918 75 loss.margin 6.514164403972211 +918 75 negative_sampler.num_negs_per_pos 65.0 +918 75 training.batch_size 0.0 +918 76 model.embedding_dim 1.0 +918 76 model.relation_dim 2.0 +918 76 model.scoring_fct_norm 2.0 +918 76 loss.margin 9.658374036389827 +918 76 negative_sampler.num_negs_per_pos 61.0 +918 76 training.batch_size 0.0 +918 77 model.embedding_dim 2.0 +918 77 model.relation_dim 2.0 +918 77 model.scoring_fct_norm 1.0 +918 77 loss.margin 9.006711217653736 +918 77 negative_sampler.num_negs_per_pos 83.0 +918 77 training.batch_size 2.0 +918 78 model.embedding_dim 2.0 +918 78 model.relation_dim 2.0 +918 78 model.scoring_fct_norm 1.0 +918 78 loss.margin 5.902267699320533 +918 78 negative_sampler.num_negs_per_pos 27.0 +918 78 training.batch_size 0.0 +918 79 model.embedding_dim 2.0 +918 79 model.relation_dim 1.0 +918 79 model.scoring_fct_norm 2.0 +918 79 loss.margin 4.689235774641707 +918 79 negative_sampler.num_negs_per_pos 55.0 +918 79 training.batch_size 1.0 +918 80 model.embedding_dim 1.0 +918 80 model.relation_dim 2.0 +918 80 model.scoring_fct_norm 2.0 +918 80 loss.margin 4.315086866777918 +918 80 negative_sampler.num_negs_per_pos 63.0 +918 80 training.batch_size 1.0 +918 81 model.embedding_dim 0.0 +918 81 model.relation_dim 0.0 +918 81 model.scoring_fct_norm 1.0 +918 81 loss.margin 0.6598040009171424 +918 81 negative_sampler.num_negs_per_pos 74.0 +918 81 training.batch_size 2.0 +918 82 model.embedding_dim 2.0 +918 82 model.relation_dim 2.0 +918 82 model.scoring_fct_norm 1.0 +918 82 loss.margin 0.7141458640563068 +918 82 negative_sampler.num_negs_per_pos 65.0 +918 82 training.batch_size 0.0 +918 83 model.embedding_dim 1.0 +918 83 model.relation_dim 0.0 +918 83 model.scoring_fct_norm 2.0 +918 83 loss.margin 1.5217595388878005 +918 83 negative_sampler.num_negs_per_pos 73.0 +918 83 training.batch_size 0.0 +918 84 model.embedding_dim 1.0 +918 84 model.relation_dim 0.0 +918 84 model.scoring_fct_norm 2.0 +918 84 loss.margin 2.5060052010712455 +918 84 negative_sampler.num_negs_per_pos 30.0 +918 84 training.batch_size 0.0 +918 85 model.embedding_dim 0.0 +918 85 model.relation_dim 2.0 +918 85 model.scoring_fct_norm 2.0 +918 85 loss.margin 1.3348637247517752 +918 85 negative_sampler.num_negs_per_pos 32.0 +918 85 training.batch_size 1.0 +918 86 model.embedding_dim 0.0 +918 86 model.relation_dim 0.0 +918 86 model.scoring_fct_norm 1.0 +918 86 loss.margin 1.375642138639287 +918 86 negative_sampler.num_negs_per_pos 72.0 +918 86 training.batch_size 0.0 +918 87 model.embedding_dim 2.0 +918 87 model.relation_dim 0.0 +918 87 model.scoring_fct_norm 2.0 +918 87 loss.margin 7.444750467804739 +918 87 negative_sampler.num_negs_per_pos 69.0 +918 87 training.batch_size 1.0 +918 88 model.embedding_dim 1.0 +918 88 model.relation_dim 1.0 +918 88 model.scoring_fct_norm 1.0 +918 88 loss.margin 5.349537324928265 +918 88 negative_sampler.num_negs_per_pos 86.0 +918 88 training.batch_size 1.0 +918 89 model.embedding_dim 0.0 +918 89 model.relation_dim 0.0 +918 89 model.scoring_fct_norm 2.0 +918 89 loss.margin 4.876373610731373 +918 89 negative_sampler.num_negs_per_pos 57.0 +918 89 training.batch_size 0.0 +918 90 model.embedding_dim 0.0 +918 90 model.relation_dim 1.0 +918 90 model.scoring_fct_norm 1.0 +918 90 loss.margin 2.0514707692156735 +918 90 negative_sampler.num_negs_per_pos 92.0 +918 90 training.batch_size 1.0 +918 91 model.embedding_dim 1.0 +918 91 model.relation_dim 0.0 +918 91 model.scoring_fct_norm 1.0 +918 91 loss.margin 5.080614498227525 +918 91 negative_sampler.num_negs_per_pos 78.0 +918 91 training.batch_size 0.0 +918 92 model.embedding_dim 1.0 +918 92 model.relation_dim 0.0 +918 92 model.scoring_fct_norm 1.0 +918 92 loss.margin 6.426820093824297 +918 92 negative_sampler.num_negs_per_pos 22.0 +918 92 training.batch_size 0.0 +918 93 model.embedding_dim 0.0 +918 93 model.relation_dim 1.0 +918 93 model.scoring_fct_norm 1.0 +918 93 loss.margin 3.3392637482441163 +918 93 negative_sampler.num_negs_per_pos 97.0 +918 93 training.batch_size 1.0 +918 94 model.embedding_dim 0.0 +918 94 model.relation_dim 1.0 +918 94 model.scoring_fct_norm 1.0 +918 94 loss.margin 1.1343810036997695 +918 94 negative_sampler.num_negs_per_pos 6.0 +918 94 training.batch_size 0.0 +918 95 model.embedding_dim 0.0 +918 95 model.relation_dim 0.0 +918 95 model.scoring_fct_norm 1.0 +918 95 loss.margin 6.389271782900011 +918 95 negative_sampler.num_negs_per_pos 29.0 +918 95 training.batch_size 1.0 +918 96 model.embedding_dim 0.0 +918 96 model.relation_dim 1.0 +918 96 model.scoring_fct_norm 2.0 +918 96 loss.margin 1.529368100585148 +918 96 negative_sampler.num_negs_per_pos 46.0 +918 96 training.batch_size 1.0 +918 97 model.embedding_dim 0.0 +918 97 model.relation_dim 0.0 +918 97 model.scoring_fct_norm 2.0 +918 97 loss.margin 8.752894281910114 +918 97 negative_sampler.num_negs_per_pos 85.0 +918 97 training.batch_size 2.0 +918 98 model.embedding_dim 0.0 +918 98 model.relation_dim 1.0 +918 98 model.scoring_fct_norm 2.0 +918 98 loss.margin 1.371483234660258 +918 98 negative_sampler.num_negs_per_pos 20.0 +918 98 training.batch_size 1.0 +918 99 model.embedding_dim 2.0 +918 99 model.relation_dim 2.0 +918 99 model.scoring_fct_norm 2.0 +918 99 loss.margin 4.144513848581429 +918 99 negative_sampler.num_negs_per_pos 21.0 +918 99 training.batch_size 2.0 +918 100 model.embedding_dim 0.0 +918 100 model.relation_dim 2.0 +918 100 model.scoring_fct_norm 2.0 +918 100 loss.margin 3.6358515221980774 +918 100 negative_sampler.num_negs_per_pos 99.0 +918 100 training.batch_size 1.0 +918 1 dataset """kinships""" +918 1 model """transr""" +918 1 loss """marginranking""" +918 1 regularizer """no""" +918 1 optimizer """adadelta""" +918 1 training_loop """owa""" +918 1 negative_sampler """basic""" +918 1 evaluator """rankbased""" +918 2 dataset """kinships""" +918 2 model """transr""" +918 2 loss """marginranking""" +918 2 regularizer """no""" +918 2 optimizer """adadelta""" +918 2 training_loop """owa""" +918 2 negative_sampler """basic""" +918 2 evaluator """rankbased""" +918 3 dataset """kinships""" +918 3 model """transr""" +918 3 loss """marginranking""" +918 3 regularizer """no""" +918 3 optimizer """adadelta""" +918 3 training_loop """owa""" +918 3 negative_sampler """basic""" +918 3 evaluator """rankbased""" +918 4 dataset """kinships""" +918 4 model """transr""" +918 4 loss """marginranking""" +918 4 regularizer """no""" +918 4 optimizer """adadelta""" +918 4 training_loop """owa""" +918 4 negative_sampler """basic""" +918 4 evaluator """rankbased""" +918 5 dataset """kinships""" +918 5 model """transr""" +918 5 loss """marginranking""" +918 5 regularizer """no""" +918 5 optimizer """adadelta""" +918 5 training_loop """owa""" +918 5 negative_sampler """basic""" +918 5 evaluator """rankbased""" +918 6 dataset """kinships""" +918 6 model """transr""" +918 6 loss """marginranking""" +918 6 regularizer """no""" +918 6 optimizer """adadelta""" +918 6 training_loop """owa""" +918 6 negative_sampler """basic""" +918 6 evaluator """rankbased""" +918 7 dataset """kinships""" +918 7 model """transr""" +918 7 loss """marginranking""" +918 7 regularizer """no""" +918 7 optimizer """adadelta""" +918 7 training_loop """owa""" +918 7 negative_sampler """basic""" +918 7 evaluator """rankbased""" +918 8 dataset """kinships""" +918 8 model """transr""" +918 8 loss """marginranking""" +918 8 regularizer """no""" +918 8 optimizer """adadelta""" +918 8 training_loop """owa""" +918 8 negative_sampler """basic""" +918 8 evaluator """rankbased""" +918 9 dataset """kinships""" +918 9 model """transr""" +918 9 loss """marginranking""" +918 9 regularizer """no""" +918 9 optimizer """adadelta""" +918 9 training_loop """owa""" +918 9 negative_sampler """basic""" +918 9 evaluator """rankbased""" +918 10 dataset """kinships""" +918 10 model """transr""" +918 10 loss """marginranking""" +918 10 regularizer """no""" +918 10 optimizer """adadelta""" +918 10 training_loop """owa""" +918 10 negative_sampler """basic""" +918 10 evaluator """rankbased""" +918 11 dataset """kinships""" +918 11 model """transr""" +918 11 loss """marginranking""" +918 11 regularizer """no""" +918 11 optimizer """adadelta""" +918 11 training_loop """owa""" +918 11 negative_sampler """basic""" +918 11 evaluator """rankbased""" +918 12 dataset """kinships""" +918 12 model """transr""" +918 12 loss """marginranking""" +918 12 regularizer """no""" +918 12 optimizer """adadelta""" +918 12 training_loop """owa""" +918 12 negative_sampler """basic""" +918 12 evaluator """rankbased""" +918 13 dataset """kinships""" +918 13 model """transr""" +918 13 loss """marginranking""" +918 13 regularizer """no""" +918 13 optimizer """adadelta""" +918 13 training_loop """owa""" +918 13 negative_sampler """basic""" +918 13 evaluator """rankbased""" +918 14 dataset """kinships""" +918 14 model """transr""" +918 14 loss """marginranking""" +918 14 regularizer """no""" +918 14 optimizer """adadelta""" +918 14 training_loop """owa""" +918 14 negative_sampler """basic""" +918 14 evaluator """rankbased""" +918 15 dataset """kinships""" +918 15 model """transr""" +918 15 loss """marginranking""" +918 15 regularizer """no""" +918 15 optimizer """adadelta""" +918 15 training_loop """owa""" +918 15 negative_sampler """basic""" +918 15 evaluator """rankbased""" +918 16 dataset """kinships""" +918 16 model """transr""" +918 16 loss """marginranking""" +918 16 regularizer """no""" +918 16 optimizer """adadelta""" +918 16 training_loop """owa""" +918 16 negative_sampler """basic""" +918 16 evaluator """rankbased""" +918 17 dataset """kinships""" +918 17 model """transr""" +918 17 loss """marginranking""" +918 17 regularizer """no""" +918 17 optimizer """adadelta""" +918 17 training_loop """owa""" +918 17 negative_sampler """basic""" +918 17 evaluator """rankbased""" +918 18 dataset """kinships""" +918 18 model """transr""" +918 18 loss """marginranking""" +918 18 regularizer """no""" +918 18 optimizer """adadelta""" +918 18 training_loop """owa""" +918 18 negative_sampler """basic""" +918 18 evaluator """rankbased""" +918 19 dataset """kinships""" +918 19 model """transr""" +918 19 loss """marginranking""" +918 19 regularizer """no""" +918 19 optimizer """adadelta""" +918 19 training_loop """owa""" +918 19 negative_sampler """basic""" +918 19 evaluator """rankbased""" +918 20 dataset """kinships""" +918 20 model """transr""" +918 20 loss """marginranking""" +918 20 regularizer """no""" +918 20 optimizer """adadelta""" +918 20 training_loop """owa""" +918 20 negative_sampler """basic""" +918 20 evaluator """rankbased""" +918 21 dataset """kinships""" +918 21 model """transr""" +918 21 loss """marginranking""" +918 21 regularizer """no""" +918 21 optimizer """adadelta""" +918 21 training_loop """owa""" +918 21 negative_sampler """basic""" +918 21 evaluator """rankbased""" +918 22 dataset """kinships""" +918 22 model """transr""" +918 22 loss """marginranking""" +918 22 regularizer """no""" +918 22 optimizer """adadelta""" +918 22 training_loop """owa""" +918 22 negative_sampler """basic""" +918 22 evaluator """rankbased""" +918 23 dataset """kinships""" +918 23 model """transr""" +918 23 loss """marginranking""" +918 23 regularizer """no""" +918 23 optimizer """adadelta""" +918 23 training_loop """owa""" +918 23 negative_sampler """basic""" +918 23 evaluator """rankbased""" +918 24 dataset """kinships""" +918 24 model """transr""" +918 24 loss """marginranking""" +918 24 regularizer """no""" +918 24 optimizer """adadelta""" +918 24 training_loop """owa""" +918 24 negative_sampler """basic""" +918 24 evaluator """rankbased""" +918 25 dataset """kinships""" +918 25 model """transr""" +918 25 loss """marginranking""" +918 25 regularizer """no""" +918 25 optimizer """adadelta""" +918 25 training_loop """owa""" +918 25 negative_sampler """basic""" +918 25 evaluator """rankbased""" +918 26 dataset """kinships""" +918 26 model """transr""" +918 26 loss """marginranking""" +918 26 regularizer """no""" +918 26 optimizer """adadelta""" +918 26 training_loop """owa""" +918 26 negative_sampler """basic""" +918 26 evaluator """rankbased""" +918 27 dataset """kinships""" +918 27 model """transr""" +918 27 loss """marginranking""" +918 27 regularizer """no""" +918 27 optimizer """adadelta""" +918 27 training_loop """owa""" +918 27 negative_sampler """basic""" +918 27 evaluator """rankbased""" +918 28 dataset """kinships""" +918 28 model """transr""" +918 28 loss """marginranking""" +918 28 regularizer """no""" +918 28 optimizer """adadelta""" +918 28 training_loop """owa""" +918 28 negative_sampler """basic""" +918 28 evaluator """rankbased""" +918 29 dataset """kinships""" +918 29 model """transr""" +918 29 loss """marginranking""" +918 29 regularizer """no""" +918 29 optimizer """adadelta""" +918 29 training_loop """owa""" +918 29 negative_sampler """basic""" +918 29 evaluator """rankbased""" +918 30 dataset """kinships""" +918 30 model """transr""" +918 30 loss """marginranking""" +918 30 regularizer """no""" +918 30 optimizer """adadelta""" +918 30 training_loop """owa""" +918 30 negative_sampler """basic""" +918 30 evaluator """rankbased""" +918 31 dataset """kinships""" +918 31 model """transr""" +918 31 loss """marginranking""" +918 31 regularizer """no""" +918 31 optimizer """adadelta""" +918 31 training_loop """owa""" +918 31 negative_sampler """basic""" +918 31 evaluator """rankbased""" +918 32 dataset """kinships""" +918 32 model """transr""" +918 32 loss """marginranking""" +918 32 regularizer """no""" +918 32 optimizer """adadelta""" +918 32 training_loop """owa""" +918 32 negative_sampler """basic""" +918 32 evaluator """rankbased""" +918 33 dataset """kinships""" +918 33 model """transr""" +918 33 loss """marginranking""" +918 33 regularizer """no""" +918 33 optimizer """adadelta""" +918 33 training_loop """owa""" +918 33 negative_sampler """basic""" +918 33 evaluator """rankbased""" +918 34 dataset """kinships""" +918 34 model """transr""" +918 34 loss """marginranking""" +918 34 regularizer """no""" +918 34 optimizer """adadelta""" +918 34 training_loop """owa""" +918 34 negative_sampler """basic""" +918 34 evaluator """rankbased""" +918 35 dataset """kinships""" +918 35 model """transr""" +918 35 loss """marginranking""" +918 35 regularizer """no""" +918 35 optimizer """adadelta""" +918 35 training_loop """owa""" +918 35 negative_sampler """basic""" +918 35 evaluator """rankbased""" +918 36 dataset """kinships""" +918 36 model """transr""" +918 36 loss """marginranking""" +918 36 regularizer """no""" +918 36 optimizer """adadelta""" +918 36 training_loop """owa""" +918 36 negative_sampler """basic""" +918 36 evaluator """rankbased""" +918 37 dataset """kinships""" +918 37 model """transr""" +918 37 loss """marginranking""" +918 37 regularizer """no""" +918 37 optimizer """adadelta""" +918 37 training_loop """owa""" +918 37 negative_sampler """basic""" +918 37 evaluator """rankbased""" +918 38 dataset """kinships""" +918 38 model """transr""" +918 38 loss """marginranking""" +918 38 regularizer """no""" +918 38 optimizer """adadelta""" +918 38 training_loop """owa""" +918 38 negative_sampler """basic""" +918 38 evaluator """rankbased""" +918 39 dataset """kinships""" +918 39 model """transr""" +918 39 loss """marginranking""" +918 39 regularizer """no""" +918 39 optimizer """adadelta""" +918 39 training_loop """owa""" +918 39 negative_sampler """basic""" +918 39 evaluator """rankbased""" +918 40 dataset """kinships""" +918 40 model """transr""" +918 40 loss """marginranking""" +918 40 regularizer """no""" +918 40 optimizer """adadelta""" +918 40 training_loop """owa""" +918 40 negative_sampler """basic""" +918 40 evaluator """rankbased""" +918 41 dataset """kinships""" +918 41 model """transr""" +918 41 loss """marginranking""" +918 41 regularizer """no""" +918 41 optimizer """adadelta""" +918 41 training_loop """owa""" +918 41 negative_sampler """basic""" +918 41 evaluator """rankbased""" +918 42 dataset """kinships""" +918 42 model """transr""" +918 42 loss """marginranking""" +918 42 regularizer """no""" +918 42 optimizer """adadelta""" +918 42 training_loop """owa""" +918 42 negative_sampler """basic""" +918 42 evaluator """rankbased""" +918 43 dataset """kinships""" +918 43 model """transr""" +918 43 loss """marginranking""" +918 43 regularizer """no""" +918 43 optimizer """adadelta""" +918 43 training_loop """owa""" +918 43 negative_sampler """basic""" +918 43 evaluator """rankbased""" +918 44 dataset """kinships""" +918 44 model """transr""" +918 44 loss """marginranking""" +918 44 regularizer """no""" +918 44 optimizer """adadelta""" +918 44 training_loop """owa""" +918 44 negative_sampler """basic""" +918 44 evaluator """rankbased""" +918 45 dataset """kinships""" +918 45 model """transr""" +918 45 loss """marginranking""" +918 45 regularizer """no""" +918 45 optimizer """adadelta""" +918 45 training_loop """owa""" +918 45 negative_sampler """basic""" +918 45 evaluator """rankbased""" +918 46 dataset """kinships""" +918 46 model """transr""" +918 46 loss """marginranking""" +918 46 regularizer """no""" +918 46 optimizer """adadelta""" +918 46 training_loop """owa""" +918 46 negative_sampler """basic""" +918 46 evaluator """rankbased""" +918 47 dataset """kinships""" +918 47 model """transr""" +918 47 loss """marginranking""" +918 47 regularizer """no""" +918 47 optimizer """adadelta""" +918 47 training_loop """owa""" +918 47 negative_sampler """basic""" +918 47 evaluator """rankbased""" +918 48 dataset """kinships""" +918 48 model """transr""" +918 48 loss """marginranking""" +918 48 regularizer """no""" +918 48 optimizer """adadelta""" +918 48 training_loop """owa""" +918 48 negative_sampler """basic""" +918 48 evaluator """rankbased""" +918 49 dataset """kinships""" +918 49 model """transr""" +918 49 loss """marginranking""" +918 49 regularizer """no""" +918 49 optimizer """adadelta""" +918 49 training_loop """owa""" +918 49 negative_sampler """basic""" +918 49 evaluator """rankbased""" +918 50 dataset """kinships""" +918 50 model """transr""" +918 50 loss """marginranking""" +918 50 regularizer """no""" +918 50 optimizer """adadelta""" +918 50 training_loop """owa""" +918 50 negative_sampler """basic""" +918 50 evaluator """rankbased""" +918 51 dataset """kinships""" +918 51 model """transr""" +918 51 loss """marginranking""" +918 51 regularizer """no""" +918 51 optimizer """adadelta""" +918 51 training_loop """owa""" +918 51 negative_sampler """basic""" +918 51 evaluator """rankbased""" +918 52 dataset """kinships""" +918 52 model """transr""" +918 52 loss """marginranking""" +918 52 regularizer """no""" +918 52 optimizer """adadelta""" +918 52 training_loop """owa""" +918 52 negative_sampler """basic""" +918 52 evaluator """rankbased""" +918 53 dataset """kinships""" +918 53 model """transr""" +918 53 loss """marginranking""" +918 53 regularizer """no""" +918 53 optimizer """adadelta""" +918 53 training_loop """owa""" +918 53 negative_sampler """basic""" +918 53 evaluator """rankbased""" +918 54 dataset """kinships""" +918 54 model """transr""" +918 54 loss """marginranking""" +918 54 regularizer """no""" +918 54 optimizer """adadelta""" +918 54 training_loop """owa""" +918 54 negative_sampler """basic""" +918 54 evaluator """rankbased""" +918 55 dataset """kinships""" +918 55 model """transr""" +918 55 loss """marginranking""" +918 55 regularizer """no""" +918 55 optimizer """adadelta""" +918 55 training_loop """owa""" +918 55 negative_sampler """basic""" +918 55 evaluator """rankbased""" +918 56 dataset """kinships""" +918 56 model """transr""" +918 56 loss """marginranking""" +918 56 regularizer """no""" +918 56 optimizer """adadelta""" +918 56 training_loop """owa""" +918 56 negative_sampler """basic""" +918 56 evaluator """rankbased""" +918 57 dataset """kinships""" +918 57 model """transr""" +918 57 loss """marginranking""" +918 57 regularizer """no""" +918 57 optimizer """adadelta""" +918 57 training_loop """owa""" +918 57 negative_sampler """basic""" +918 57 evaluator """rankbased""" +918 58 dataset """kinships""" +918 58 model """transr""" +918 58 loss """marginranking""" +918 58 regularizer """no""" +918 58 optimizer """adadelta""" +918 58 training_loop """owa""" +918 58 negative_sampler """basic""" +918 58 evaluator """rankbased""" +918 59 dataset """kinships""" +918 59 model """transr""" +918 59 loss """marginranking""" +918 59 regularizer """no""" +918 59 optimizer """adadelta""" +918 59 training_loop """owa""" +918 59 negative_sampler """basic""" +918 59 evaluator """rankbased""" +918 60 dataset """kinships""" +918 60 model """transr""" +918 60 loss """marginranking""" +918 60 regularizer """no""" +918 60 optimizer """adadelta""" +918 60 training_loop """owa""" +918 60 negative_sampler """basic""" +918 60 evaluator """rankbased""" +918 61 dataset """kinships""" +918 61 model """transr""" +918 61 loss """marginranking""" +918 61 regularizer """no""" +918 61 optimizer """adadelta""" +918 61 training_loop """owa""" +918 61 negative_sampler """basic""" +918 61 evaluator """rankbased""" +918 62 dataset """kinships""" +918 62 model """transr""" +918 62 loss """marginranking""" +918 62 regularizer """no""" +918 62 optimizer """adadelta""" +918 62 training_loop """owa""" +918 62 negative_sampler """basic""" +918 62 evaluator """rankbased""" +918 63 dataset """kinships""" +918 63 model """transr""" +918 63 loss """marginranking""" +918 63 regularizer """no""" +918 63 optimizer """adadelta""" +918 63 training_loop """owa""" +918 63 negative_sampler """basic""" +918 63 evaluator """rankbased""" +918 64 dataset """kinships""" +918 64 model """transr""" +918 64 loss """marginranking""" +918 64 regularizer """no""" +918 64 optimizer """adadelta""" +918 64 training_loop """owa""" +918 64 negative_sampler """basic""" +918 64 evaluator """rankbased""" +918 65 dataset """kinships""" +918 65 model """transr""" +918 65 loss """marginranking""" +918 65 regularizer """no""" +918 65 optimizer """adadelta""" +918 65 training_loop """owa""" +918 65 negative_sampler """basic""" +918 65 evaluator """rankbased""" +918 66 dataset """kinships""" +918 66 model """transr""" +918 66 loss """marginranking""" +918 66 regularizer """no""" +918 66 optimizer """adadelta""" +918 66 training_loop """owa""" +918 66 negative_sampler """basic""" +918 66 evaluator """rankbased""" +918 67 dataset """kinships""" +918 67 model """transr""" +918 67 loss """marginranking""" +918 67 regularizer """no""" +918 67 optimizer """adadelta""" +918 67 training_loop """owa""" +918 67 negative_sampler """basic""" +918 67 evaluator """rankbased""" +918 68 dataset """kinships""" +918 68 model """transr""" +918 68 loss """marginranking""" +918 68 regularizer """no""" +918 68 optimizer """adadelta""" +918 68 training_loop """owa""" +918 68 negative_sampler """basic""" +918 68 evaluator """rankbased""" +918 69 dataset """kinships""" +918 69 model """transr""" +918 69 loss """marginranking""" +918 69 regularizer """no""" +918 69 optimizer """adadelta""" +918 69 training_loop """owa""" +918 69 negative_sampler """basic""" +918 69 evaluator """rankbased""" +918 70 dataset """kinships""" +918 70 model """transr""" +918 70 loss """marginranking""" +918 70 regularizer """no""" +918 70 optimizer """adadelta""" +918 70 training_loop """owa""" +918 70 negative_sampler """basic""" +918 70 evaluator """rankbased""" +918 71 dataset """kinships""" +918 71 model """transr""" +918 71 loss """marginranking""" +918 71 regularizer """no""" +918 71 optimizer """adadelta""" +918 71 training_loop """owa""" +918 71 negative_sampler """basic""" +918 71 evaluator """rankbased""" +918 72 dataset """kinships""" +918 72 model """transr""" +918 72 loss """marginranking""" +918 72 regularizer """no""" +918 72 optimizer """adadelta""" +918 72 training_loop """owa""" +918 72 negative_sampler """basic""" +918 72 evaluator """rankbased""" +918 73 dataset """kinships""" +918 73 model """transr""" +918 73 loss """marginranking""" +918 73 regularizer """no""" +918 73 optimizer """adadelta""" +918 73 training_loop """owa""" +918 73 negative_sampler """basic""" +918 73 evaluator """rankbased""" +918 74 dataset """kinships""" +918 74 model """transr""" +918 74 loss """marginranking""" +918 74 regularizer """no""" +918 74 optimizer """adadelta""" +918 74 training_loop """owa""" +918 74 negative_sampler """basic""" +918 74 evaluator """rankbased""" +918 75 dataset """kinships""" +918 75 model """transr""" +918 75 loss """marginranking""" +918 75 regularizer """no""" +918 75 optimizer """adadelta""" +918 75 training_loop """owa""" +918 75 negative_sampler """basic""" +918 75 evaluator """rankbased""" +918 76 dataset """kinships""" +918 76 model """transr""" +918 76 loss """marginranking""" +918 76 regularizer """no""" +918 76 optimizer """adadelta""" +918 76 training_loop """owa""" +918 76 negative_sampler """basic""" +918 76 evaluator """rankbased""" +918 77 dataset """kinships""" +918 77 model """transr""" +918 77 loss """marginranking""" +918 77 regularizer """no""" +918 77 optimizer """adadelta""" +918 77 training_loop """owa""" +918 77 negative_sampler """basic""" +918 77 evaluator """rankbased""" +918 78 dataset """kinships""" +918 78 model """transr""" +918 78 loss """marginranking""" +918 78 regularizer """no""" +918 78 optimizer """adadelta""" +918 78 training_loop """owa""" +918 78 negative_sampler """basic""" +918 78 evaluator """rankbased""" +918 79 dataset """kinships""" +918 79 model """transr""" +918 79 loss """marginranking""" +918 79 regularizer """no""" +918 79 optimizer """adadelta""" +918 79 training_loop """owa""" +918 79 negative_sampler """basic""" +918 79 evaluator """rankbased""" +918 80 dataset """kinships""" +918 80 model """transr""" +918 80 loss """marginranking""" +918 80 regularizer """no""" +918 80 optimizer """adadelta""" +918 80 training_loop """owa""" +918 80 negative_sampler """basic""" +918 80 evaluator """rankbased""" +918 81 dataset """kinships""" +918 81 model """transr""" +918 81 loss """marginranking""" +918 81 regularizer """no""" +918 81 optimizer """adadelta""" +918 81 training_loop """owa""" +918 81 negative_sampler """basic""" +918 81 evaluator """rankbased""" +918 82 dataset """kinships""" +918 82 model """transr""" +918 82 loss """marginranking""" +918 82 regularizer """no""" +918 82 optimizer """adadelta""" +918 82 training_loop """owa""" +918 82 negative_sampler """basic""" +918 82 evaluator """rankbased""" +918 83 dataset """kinships""" +918 83 model """transr""" +918 83 loss """marginranking""" +918 83 regularizer """no""" +918 83 optimizer """adadelta""" +918 83 training_loop """owa""" +918 83 negative_sampler """basic""" +918 83 evaluator """rankbased""" +918 84 dataset """kinships""" +918 84 model """transr""" +918 84 loss """marginranking""" +918 84 regularizer """no""" +918 84 optimizer """adadelta""" +918 84 training_loop """owa""" +918 84 negative_sampler """basic""" +918 84 evaluator """rankbased""" +918 85 dataset """kinships""" +918 85 model """transr""" +918 85 loss """marginranking""" +918 85 regularizer """no""" +918 85 optimizer """adadelta""" +918 85 training_loop """owa""" +918 85 negative_sampler """basic""" +918 85 evaluator """rankbased""" +918 86 dataset """kinships""" +918 86 model """transr""" +918 86 loss """marginranking""" +918 86 regularizer """no""" +918 86 optimizer """adadelta""" +918 86 training_loop """owa""" +918 86 negative_sampler """basic""" +918 86 evaluator """rankbased""" +918 87 dataset """kinships""" +918 87 model """transr""" +918 87 loss """marginranking""" +918 87 regularizer """no""" +918 87 optimizer """adadelta""" +918 87 training_loop """owa""" +918 87 negative_sampler """basic""" +918 87 evaluator """rankbased""" +918 88 dataset """kinships""" +918 88 model """transr""" +918 88 loss """marginranking""" +918 88 regularizer """no""" +918 88 optimizer """adadelta""" +918 88 training_loop """owa""" +918 88 negative_sampler """basic""" +918 88 evaluator """rankbased""" +918 89 dataset """kinships""" +918 89 model """transr""" +918 89 loss """marginranking""" +918 89 regularizer """no""" +918 89 optimizer """adadelta""" +918 89 training_loop """owa""" +918 89 negative_sampler """basic""" +918 89 evaluator """rankbased""" +918 90 dataset """kinships""" +918 90 model """transr""" +918 90 loss """marginranking""" +918 90 regularizer """no""" +918 90 optimizer """adadelta""" +918 90 training_loop """owa""" +918 90 negative_sampler """basic""" +918 90 evaluator """rankbased""" +918 91 dataset """kinships""" +918 91 model """transr""" +918 91 loss """marginranking""" +918 91 regularizer """no""" +918 91 optimizer """adadelta""" +918 91 training_loop """owa""" +918 91 negative_sampler """basic""" +918 91 evaluator """rankbased""" +918 92 dataset """kinships""" +918 92 model """transr""" +918 92 loss """marginranking""" +918 92 regularizer """no""" +918 92 optimizer """adadelta""" +918 92 training_loop """owa""" +918 92 negative_sampler """basic""" +918 92 evaluator """rankbased""" +918 93 dataset """kinships""" +918 93 model """transr""" +918 93 loss """marginranking""" +918 93 regularizer """no""" +918 93 optimizer """adadelta""" +918 93 training_loop """owa""" +918 93 negative_sampler """basic""" +918 93 evaluator """rankbased""" +918 94 dataset """kinships""" +918 94 model """transr""" +918 94 loss """marginranking""" +918 94 regularizer """no""" +918 94 optimizer """adadelta""" +918 94 training_loop """owa""" +918 94 negative_sampler """basic""" +918 94 evaluator """rankbased""" +918 95 dataset """kinships""" +918 95 model """transr""" +918 95 loss """marginranking""" +918 95 regularizer """no""" +918 95 optimizer """adadelta""" +918 95 training_loop """owa""" +918 95 negative_sampler """basic""" +918 95 evaluator """rankbased""" +918 96 dataset """kinships""" +918 96 model """transr""" +918 96 loss """marginranking""" +918 96 regularizer """no""" +918 96 optimizer """adadelta""" +918 96 training_loop """owa""" +918 96 negative_sampler """basic""" +918 96 evaluator """rankbased""" +918 97 dataset """kinships""" +918 97 model """transr""" +918 97 loss """marginranking""" +918 97 regularizer """no""" +918 97 optimizer """adadelta""" +918 97 training_loop """owa""" +918 97 negative_sampler """basic""" +918 97 evaluator """rankbased""" +918 98 dataset """kinships""" +918 98 model """transr""" +918 98 loss """marginranking""" +918 98 regularizer """no""" +918 98 optimizer """adadelta""" +918 98 training_loop """owa""" +918 98 negative_sampler """basic""" +918 98 evaluator """rankbased""" +918 99 dataset """kinships""" +918 99 model """transr""" +918 99 loss """marginranking""" +918 99 regularizer """no""" +918 99 optimizer """adadelta""" +918 99 training_loop """owa""" +918 99 negative_sampler """basic""" +918 99 evaluator """rankbased""" +918 100 dataset """kinships""" +918 100 model """transr""" +918 100 loss """marginranking""" +918 100 regularizer """no""" +918 100 optimizer """adadelta""" +918 100 training_loop """owa""" +918 100 negative_sampler """basic""" +918 100 evaluator """rankbased""" +919 1 model.embedding_dim 0.0 +919 1 model.relation_dim 2.0 +919 1 model.scoring_fct_norm 2.0 +919 1 loss.margin 27.255033933860965 +919 1 loss.adversarial_temperature 0.6866016276318895 +919 1 negative_sampler.num_negs_per_pos 47.0 +919 1 training.batch_size 1.0 +919 2 model.embedding_dim 1.0 +919 2 model.relation_dim 2.0 +919 2 model.scoring_fct_norm 2.0 +919 2 loss.margin 20.62396010990636 +919 2 loss.adversarial_temperature 0.3376841141934509 +919 2 negative_sampler.num_negs_per_pos 81.0 +919 2 training.batch_size 0.0 +919 3 model.embedding_dim 2.0 +919 3 model.relation_dim 0.0 +919 3 model.scoring_fct_norm 1.0 +919 3 loss.margin 10.506876948375211 +919 3 loss.adversarial_temperature 0.7659390161420939 +919 3 negative_sampler.num_negs_per_pos 95.0 +919 3 training.batch_size 0.0 +919 4 model.embedding_dim 1.0 +919 4 model.relation_dim 2.0 +919 4 model.scoring_fct_norm 2.0 +919 4 loss.margin 26.05372720410834 +919 4 loss.adversarial_temperature 0.7454890859493258 +919 4 negative_sampler.num_negs_per_pos 21.0 +919 4 training.batch_size 1.0 +919 5 model.embedding_dim 2.0 +919 5 model.relation_dim 2.0 +919 5 model.scoring_fct_norm 2.0 +919 5 loss.margin 16.586780564595312 +919 5 loss.adversarial_temperature 0.3233235632213611 +919 5 negative_sampler.num_negs_per_pos 54.0 +919 5 training.batch_size 1.0 +919 6 model.embedding_dim 0.0 +919 6 model.relation_dim 0.0 +919 6 model.scoring_fct_norm 2.0 +919 6 loss.margin 1.1399062241486464 +919 6 loss.adversarial_temperature 0.7917567282925804 +919 6 negative_sampler.num_negs_per_pos 53.0 +919 6 training.batch_size 0.0 +919 7 model.embedding_dim 2.0 +919 7 model.relation_dim 1.0 +919 7 model.scoring_fct_norm 1.0 +919 7 loss.margin 13.89348326552603 +919 7 loss.adversarial_temperature 0.2670701551004016 +919 7 negative_sampler.num_negs_per_pos 8.0 +919 7 training.batch_size 1.0 +919 8 model.embedding_dim 2.0 +919 8 model.relation_dim 0.0 +919 8 model.scoring_fct_norm 2.0 +919 8 loss.margin 7.446352894291899 +919 8 loss.adversarial_temperature 0.8954242457905436 +919 8 negative_sampler.num_negs_per_pos 15.0 +919 8 training.batch_size 2.0 +919 9 model.embedding_dim 0.0 +919 9 model.relation_dim 0.0 +919 9 model.scoring_fct_norm 2.0 +919 9 loss.margin 15.1175253483797 +919 9 loss.adversarial_temperature 0.5868449258830049 +919 9 negative_sampler.num_negs_per_pos 84.0 +919 9 training.batch_size 0.0 +919 10 model.embedding_dim 0.0 +919 10 model.relation_dim 1.0 +919 10 model.scoring_fct_norm 2.0 +919 10 loss.margin 14.878217946618676 +919 10 loss.adversarial_temperature 0.8307869443700698 +919 10 negative_sampler.num_negs_per_pos 63.0 +919 10 training.batch_size 0.0 +919 11 model.embedding_dim 0.0 +919 11 model.relation_dim 1.0 +919 11 model.scoring_fct_norm 2.0 +919 11 loss.margin 11.529904915755454 +919 11 loss.adversarial_temperature 0.8403658070122598 +919 11 negative_sampler.num_negs_per_pos 27.0 +919 11 training.batch_size 2.0 +919 12 model.embedding_dim 1.0 +919 12 model.relation_dim 1.0 +919 12 model.scoring_fct_norm 2.0 +919 12 loss.margin 8.933751931942757 +919 12 loss.adversarial_temperature 0.5734478200495506 +919 12 negative_sampler.num_negs_per_pos 40.0 +919 12 training.batch_size 2.0 +919 13 model.embedding_dim 1.0 +919 13 model.relation_dim 0.0 +919 13 model.scoring_fct_norm 2.0 +919 13 loss.margin 15.264497401344347 +919 13 loss.adversarial_temperature 0.7081631679461794 +919 13 negative_sampler.num_negs_per_pos 98.0 +919 13 training.batch_size 1.0 +919 14 model.embedding_dim 0.0 +919 14 model.relation_dim 1.0 +919 14 model.scoring_fct_norm 1.0 +919 14 loss.margin 4.0062934720721275 +919 14 loss.adversarial_temperature 0.3130736486393985 +919 14 negative_sampler.num_negs_per_pos 44.0 +919 14 training.batch_size 2.0 +919 15 model.embedding_dim 2.0 +919 15 model.relation_dim 0.0 +919 15 model.scoring_fct_norm 1.0 +919 15 loss.margin 1.9334794912387965 +919 15 loss.adversarial_temperature 0.5831954940618219 +919 15 negative_sampler.num_negs_per_pos 86.0 +919 15 training.batch_size 1.0 +919 16 model.embedding_dim 0.0 +919 16 model.relation_dim 1.0 +919 16 model.scoring_fct_norm 2.0 +919 16 loss.margin 19.69258267530981 +919 16 loss.adversarial_temperature 0.4089056878574616 +919 16 negative_sampler.num_negs_per_pos 35.0 +919 16 training.batch_size 2.0 +919 17 model.embedding_dim 2.0 +919 17 model.relation_dim 0.0 +919 17 model.scoring_fct_norm 1.0 +919 17 loss.margin 9.451552739480496 +919 17 loss.adversarial_temperature 0.41519542803076026 +919 17 negative_sampler.num_negs_per_pos 73.0 +919 17 training.batch_size 2.0 +919 18 model.embedding_dim 1.0 +919 18 model.relation_dim 0.0 +919 18 model.scoring_fct_norm 2.0 +919 18 loss.margin 21.820792388319326 +919 18 loss.adversarial_temperature 0.834190399190605 +919 18 negative_sampler.num_negs_per_pos 54.0 +919 18 training.batch_size 2.0 +919 19 model.embedding_dim 2.0 +919 19 model.relation_dim 0.0 +919 19 model.scoring_fct_norm 2.0 +919 19 loss.margin 19.783131476714534 +919 19 loss.adversarial_temperature 0.9804471377856656 +919 19 negative_sampler.num_negs_per_pos 12.0 +919 19 training.batch_size 0.0 +919 20 model.embedding_dim 1.0 +919 20 model.relation_dim 2.0 +919 20 model.scoring_fct_norm 1.0 +919 20 loss.margin 21.32669891701733 +919 20 loss.adversarial_temperature 0.19538868386925395 +919 20 negative_sampler.num_negs_per_pos 26.0 +919 20 training.batch_size 1.0 +919 21 model.embedding_dim 1.0 +919 21 model.relation_dim 2.0 +919 21 model.scoring_fct_norm 1.0 +919 21 loss.margin 2.8456428394928555 +919 21 loss.adversarial_temperature 0.6061399359686028 +919 21 negative_sampler.num_negs_per_pos 52.0 +919 21 training.batch_size 0.0 +919 22 model.embedding_dim 2.0 +919 22 model.relation_dim 1.0 +919 22 model.scoring_fct_norm 2.0 +919 22 loss.margin 27.707466294552056 +919 22 loss.adversarial_temperature 0.14261196281933824 +919 22 negative_sampler.num_negs_per_pos 86.0 +919 22 training.batch_size 0.0 +919 23 model.embedding_dim 0.0 +919 23 model.relation_dim 1.0 +919 23 model.scoring_fct_norm 2.0 +919 23 loss.margin 29.084923820277428 +919 23 loss.adversarial_temperature 0.8657997768881001 +919 23 negative_sampler.num_negs_per_pos 39.0 +919 23 training.batch_size 2.0 +919 24 model.embedding_dim 2.0 +919 24 model.relation_dim 2.0 +919 24 model.scoring_fct_norm 1.0 +919 24 loss.margin 28.834622023950864 +919 24 loss.adversarial_temperature 0.9590064659948379 +919 24 negative_sampler.num_negs_per_pos 18.0 +919 24 training.batch_size 2.0 +919 25 model.embedding_dim 1.0 +919 25 model.relation_dim 0.0 +919 25 model.scoring_fct_norm 1.0 +919 25 loss.margin 25.442075729548115 +919 25 loss.adversarial_temperature 0.23593464169507303 +919 25 negative_sampler.num_negs_per_pos 1.0 +919 25 training.batch_size 0.0 +919 26 model.embedding_dim 2.0 +919 26 model.relation_dim 1.0 +919 26 model.scoring_fct_norm 2.0 +919 26 loss.margin 11.906965370453827 +919 26 loss.adversarial_temperature 0.19367725956191895 +919 26 negative_sampler.num_negs_per_pos 31.0 +919 26 training.batch_size 0.0 +919 27 model.embedding_dim 2.0 +919 27 model.relation_dim 1.0 +919 27 model.scoring_fct_norm 1.0 +919 27 loss.margin 26.578883185827642 +919 27 loss.adversarial_temperature 0.7198419263169783 +919 27 negative_sampler.num_negs_per_pos 32.0 +919 27 training.batch_size 1.0 +919 28 model.embedding_dim 0.0 +919 28 model.relation_dim 0.0 +919 28 model.scoring_fct_norm 2.0 +919 28 loss.margin 1.2757932639757636 +919 28 loss.adversarial_temperature 0.8576141222782874 +919 28 negative_sampler.num_negs_per_pos 93.0 +919 28 training.batch_size 1.0 +919 29 model.embedding_dim 0.0 +919 29 model.relation_dim 1.0 +919 29 model.scoring_fct_norm 2.0 +919 29 loss.margin 22.279880982285224 +919 29 loss.adversarial_temperature 0.5400731768912604 +919 29 negative_sampler.num_negs_per_pos 64.0 +919 29 training.batch_size 2.0 +919 30 model.embedding_dim 1.0 +919 30 model.relation_dim 0.0 +919 30 model.scoring_fct_norm 1.0 +919 30 loss.margin 23.53187211289189 +919 30 loss.adversarial_temperature 0.3908683056697424 +919 30 negative_sampler.num_negs_per_pos 35.0 +919 30 training.batch_size 0.0 +919 31 model.embedding_dim 1.0 +919 31 model.relation_dim 1.0 +919 31 model.scoring_fct_norm 2.0 +919 31 loss.margin 20.666938063515353 +919 31 loss.adversarial_temperature 0.30934863488613634 +919 31 negative_sampler.num_negs_per_pos 55.0 +919 31 training.batch_size 0.0 +919 32 model.embedding_dim 1.0 +919 32 model.relation_dim 0.0 +919 32 model.scoring_fct_norm 1.0 +919 32 loss.margin 25.442971350954128 +919 32 loss.adversarial_temperature 0.13310245569102086 +919 32 negative_sampler.num_negs_per_pos 47.0 +919 32 training.batch_size 2.0 +919 33 model.embedding_dim 1.0 +919 33 model.relation_dim 1.0 +919 33 model.scoring_fct_norm 2.0 +919 33 loss.margin 26.327197528554674 +919 33 loss.adversarial_temperature 0.13169064952856102 +919 33 negative_sampler.num_negs_per_pos 63.0 +919 33 training.batch_size 0.0 +919 34 model.embedding_dim 0.0 +919 34 model.relation_dim 0.0 +919 34 model.scoring_fct_norm 1.0 +919 34 loss.margin 22.01640416165593 +919 34 loss.adversarial_temperature 0.8710625782405368 +919 34 negative_sampler.num_negs_per_pos 61.0 +919 34 training.batch_size 1.0 +919 35 model.embedding_dim 1.0 +919 35 model.relation_dim 0.0 +919 35 model.scoring_fct_norm 1.0 +919 35 loss.margin 22.955579077656886 +919 35 loss.adversarial_temperature 0.6553271979680534 +919 35 negative_sampler.num_negs_per_pos 38.0 +919 35 training.batch_size 0.0 +919 36 model.embedding_dim 2.0 +919 36 model.relation_dim 0.0 +919 36 model.scoring_fct_norm 1.0 +919 36 loss.margin 13.982221074140044 +919 36 loss.adversarial_temperature 0.8432242614204515 +919 36 negative_sampler.num_negs_per_pos 27.0 +919 36 training.batch_size 0.0 +919 37 model.embedding_dim 0.0 +919 37 model.relation_dim 0.0 +919 37 model.scoring_fct_norm 2.0 +919 37 loss.margin 19.81397302330585 +919 37 loss.adversarial_temperature 0.44708614514458456 +919 37 negative_sampler.num_negs_per_pos 57.0 +919 37 training.batch_size 2.0 +919 38 model.embedding_dim 2.0 +919 38 model.relation_dim 1.0 +919 38 model.scoring_fct_norm 1.0 +919 38 loss.margin 20.946443528630056 +919 38 loss.adversarial_temperature 0.6478860392412895 +919 38 negative_sampler.num_negs_per_pos 95.0 +919 38 training.batch_size 0.0 +919 39 model.embedding_dim 2.0 +919 39 model.relation_dim 2.0 +919 39 model.scoring_fct_norm 1.0 +919 39 loss.margin 10.330361644552934 +919 39 loss.adversarial_temperature 0.914102133164362 +919 39 negative_sampler.num_negs_per_pos 72.0 +919 39 training.batch_size 2.0 +919 40 model.embedding_dim 2.0 +919 40 model.relation_dim 1.0 +919 40 model.scoring_fct_norm 1.0 +919 40 loss.margin 21.9949310652529 +919 40 loss.adversarial_temperature 0.3184530965630572 +919 40 negative_sampler.num_negs_per_pos 42.0 +919 40 training.batch_size 0.0 +919 41 model.embedding_dim 2.0 +919 41 model.relation_dim 1.0 +919 41 model.scoring_fct_norm 1.0 +919 41 loss.margin 27.333666633833662 +919 41 loss.adversarial_temperature 0.6130181498725865 +919 41 negative_sampler.num_negs_per_pos 89.0 +919 41 training.batch_size 2.0 +919 42 model.embedding_dim 1.0 +919 42 model.relation_dim 1.0 +919 42 model.scoring_fct_norm 2.0 +919 42 loss.margin 26.336338379514903 +919 42 loss.adversarial_temperature 0.5746092033274356 +919 42 negative_sampler.num_negs_per_pos 43.0 +919 42 training.batch_size 1.0 +919 43 model.embedding_dim 0.0 +919 43 model.relation_dim 0.0 +919 43 model.scoring_fct_norm 1.0 +919 43 loss.margin 11.3556749223852 +919 43 loss.adversarial_temperature 0.5974933336405498 +919 43 negative_sampler.num_negs_per_pos 96.0 +919 43 training.batch_size 1.0 +919 44 model.embedding_dim 0.0 +919 44 model.relation_dim 0.0 +919 44 model.scoring_fct_norm 1.0 +919 44 loss.margin 14.135146912735427 +919 44 loss.adversarial_temperature 0.3869147102692918 +919 44 negative_sampler.num_negs_per_pos 6.0 +919 44 training.batch_size 1.0 +919 45 model.embedding_dim 0.0 +919 45 model.relation_dim 1.0 +919 45 model.scoring_fct_norm 1.0 +919 45 loss.margin 23.74805552046203 +919 45 loss.adversarial_temperature 0.6966352801167048 +919 45 negative_sampler.num_negs_per_pos 80.0 +919 45 training.batch_size 0.0 +919 46 model.embedding_dim 2.0 +919 46 model.relation_dim 0.0 +919 46 model.scoring_fct_norm 1.0 +919 46 loss.margin 19.127493563703936 +919 46 loss.adversarial_temperature 0.9007564124777071 +919 46 negative_sampler.num_negs_per_pos 75.0 +919 46 training.batch_size 2.0 +919 47 model.embedding_dim 1.0 +919 47 model.relation_dim 0.0 +919 47 model.scoring_fct_norm 2.0 +919 47 loss.margin 17.00645648914029 +919 47 loss.adversarial_temperature 0.3575876155346175 +919 47 negative_sampler.num_negs_per_pos 97.0 +919 47 training.batch_size 0.0 +919 48 model.embedding_dim 0.0 +919 48 model.relation_dim 0.0 +919 48 model.scoring_fct_norm 1.0 +919 48 loss.margin 19.33072596167502 +919 48 loss.adversarial_temperature 0.23728694009063006 +919 48 negative_sampler.num_negs_per_pos 90.0 +919 48 training.batch_size 0.0 +919 49 model.embedding_dim 2.0 +919 49 model.relation_dim 1.0 +919 49 model.scoring_fct_norm 1.0 +919 49 loss.margin 18.532767936268073 +919 49 loss.adversarial_temperature 0.21266509964632338 +919 49 negative_sampler.num_negs_per_pos 65.0 +919 49 training.batch_size 0.0 +919 50 model.embedding_dim 2.0 +919 50 model.relation_dim 2.0 +919 50 model.scoring_fct_norm 1.0 +919 50 loss.margin 22.915724705217112 +919 50 loss.adversarial_temperature 0.5408313578645163 +919 50 negative_sampler.num_negs_per_pos 74.0 +919 50 training.batch_size 1.0 +919 51 model.embedding_dim 1.0 +919 51 model.relation_dim 1.0 +919 51 model.scoring_fct_norm 1.0 +919 51 loss.margin 17.078240572058522 +919 51 loss.adversarial_temperature 0.44163348578078954 +919 51 negative_sampler.num_negs_per_pos 40.0 +919 51 training.batch_size 1.0 +919 52 model.embedding_dim 2.0 +919 52 model.relation_dim 0.0 +919 52 model.scoring_fct_norm 1.0 +919 52 loss.margin 3.0453977518213198 +919 52 loss.adversarial_temperature 0.4150470536272428 +919 52 negative_sampler.num_negs_per_pos 17.0 +919 52 training.batch_size 0.0 +919 53 model.embedding_dim 0.0 +919 53 model.relation_dim 1.0 +919 53 model.scoring_fct_norm 2.0 +919 53 loss.margin 17.702390587811717 +919 53 loss.adversarial_temperature 0.4301280188930878 +919 53 negative_sampler.num_negs_per_pos 63.0 +919 53 training.batch_size 1.0 +919 54 model.embedding_dim 2.0 +919 54 model.relation_dim 2.0 +919 54 model.scoring_fct_norm 1.0 +919 54 loss.margin 3.706380102470602 +919 54 loss.adversarial_temperature 0.597634306432457 +919 54 negative_sampler.num_negs_per_pos 73.0 +919 54 training.batch_size 0.0 +919 55 model.embedding_dim 2.0 +919 55 model.relation_dim 0.0 +919 55 model.scoring_fct_norm 2.0 +919 55 loss.margin 4.239872252831141 +919 55 loss.adversarial_temperature 0.15283882641866964 +919 55 negative_sampler.num_negs_per_pos 14.0 +919 55 training.batch_size 1.0 +919 56 model.embedding_dim 0.0 +919 56 model.relation_dim 0.0 +919 56 model.scoring_fct_norm 1.0 +919 56 loss.margin 1.0237069234908933 +919 56 loss.adversarial_temperature 0.9369874890963211 +919 56 negative_sampler.num_negs_per_pos 47.0 +919 56 training.batch_size 1.0 +919 57 model.embedding_dim 2.0 +919 57 model.relation_dim 0.0 +919 57 model.scoring_fct_norm 1.0 +919 57 loss.margin 13.098222646385148 +919 57 loss.adversarial_temperature 0.11642845542306768 +919 57 negative_sampler.num_negs_per_pos 67.0 +919 57 training.batch_size 1.0 +919 58 model.embedding_dim 0.0 +919 58 model.relation_dim 1.0 +919 58 model.scoring_fct_norm 1.0 +919 58 loss.margin 11.173675477859303 +919 58 loss.adversarial_temperature 0.3475188746826892 +919 58 negative_sampler.num_negs_per_pos 13.0 +919 58 training.batch_size 2.0 +919 59 model.embedding_dim 2.0 +919 59 model.relation_dim 2.0 +919 59 model.scoring_fct_norm 2.0 +919 59 loss.margin 9.759578349113747 +919 59 loss.adversarial_temperature 0.2449525626359671 +919 59 negative_sampler.num_negs_per_pos 85.0 +919 59 training.batch_size 0.0 +919 60 model.embedding_dim 1.0 +919 60 model.relation_dim 2.0 +919 60 model.scoring_fct_norm 2.0 +919 60 loss.margin 13.763877011264098 +919 60 loss.adversarial_temperature 0.5871363011290369 +919 60 negative_sampler.num_negs_per_pos 50.0 +919 60 training.batch_size 2.0 +919 61 model.embedding_dim 2.0 +919 61 model.relation_dim 0.0 +919 61 model.scoring_fct_norm 2.0 +919 61 loss.margin 8.821811426916302 +919 61 loss.adversarial_temperature 0.49330403966612535 +919 61 negative_sampler.num_negs_per_pos 75.0 +919 61 training.batch_size 2.0 +919 62 model.embedding_dim 1.0 +919 62 model.relation_dim 2.0 +919 62 model.scoring_fct_norm 1.0 +919 62 loss.margin 8.342929117258267 +919 62 loss.adversarial_temperature 0.9129623318140687 +919 62 negative_sampler.num_negs_per_pos 57.0 +919 62 training.batch_size 0.0 +919 63 model.embedding_dim 0.0 +919 63 model.relation_dim 1.0 +919 63 model.scoring_fct_norm 1.0 +919 63 loss.margin 24.23581005753793 +919 63 loss.adversarial_temperature 0.29562644266202154 +919 63 negative_sampler.num_negs_per_pos 77.0 +919 63 training.batch_size 2.0 +919 64 model.embedding_dim 0.0 +919 64 model.relation_dim 0.0 +919 64 model.scoring_fct_norm 2.0 +919 64 loss.margin 18.475137427799712 +919 64 loss.adversarial_temperature 0.27525720966964085 +919 64 negative_sampler.num_negs_per_pos 38.0 +919 64 training.batch_size 1.0 +919 65 model.embedding_dim 1.0 +919 65 model.relation_dim 0.0 +919 65 model.scoring_fct_norm 1.0 +919 65 loss.margin 18.447435222098807 +919 65 loss.adversarial_temperature 0.8913212429627925 +919 65 negative_sampler.num_negs_per_pos 7.0 +919 65 training.batch_size 2.0 +919 66 model.embedding_dim 2.0 +919 66 model.relation_dim 1.0 +919 66 model.scoring_fct_norm 1.0 +919 66 loss.margin 7.544005421612387 +919 66 loss.adversarial_temperature 0.18980718822628298 +919 66 negative_sampler.num_negs_per_pos 48.0 +919 66 training.batch_size 0.0 +919 67 model.embedding_dim 0.0 +919 67 model.relation_dim 1.0 +919 67 model.scoring_fct_norm 2.0 +919 67 loss.margin 17.60113032998085 +919 67 loss.adversarial_temperature 0.6296542162969766 +919 67 negative_sampler.num_negs_per_pos 74.0 +919 67 training.batch_size 0.0 +919 68 model.embedding_dim 0.0 +919 68 model.relation_dim 0.0 +919 68 model.scoring_fct_norm 2.0 +919 68 loss.margin 23.76675360236851 +919 68 loss.adversarial_temperature 0.2933854185917133 +919 68 negative_sampler.num_negs_per_pos 54.0 +919 68 training.batch_size 0.0 +919 69 model.embedding_dim 0.0 +919 69 model.relation_dim 0.0 +919 69 model.scoring_fct_norm 2.0 +919 69 loss.margin 28.487395539838236 +919 69 loss.adversarial_temperature 0.13444724297323649 +919 69 negative_sampler.num_negs_per_pos 59.0 +919 69 training.batch_size 0.0 +919 70 model.embedding_dim 1.0 +919 70 model.relation_dim 1.0 +919 70 model.scoring_fct_norm 2.0 +919 70 loss.margin 29.063934102767263 +919 70 loss.adversarial_temperature 0.10697615413880196 +919 70 negative_sampler.num_negs_per_pos 35.0 +919 70 training.batch_size 1.0 +919 71 model.embedding_dim 2.0 +919 71 model.relation_dim 1.0 +919 71 model.scoring_fct_norm 2.0 +919 71 loss.margin 25.780070736303404 +919 71 loss.adversarial_temperature 0.39695910552281066 +919 71 negative_sampler.num_negs_per_pos 51.0 +919 71 training.batch_size 2.0 +919 72 model.embedding_dim 2.0 +919 72 model.relation_dim 0.0 +919 72 model.scoring_fct_norm 1.0 +919 72 loss.margin 1.022380163853613 +919 72 loss.adversarial_temperature 0.3482846317068131 +919 72 negative_sampler.num_negs_per_pos 37.0 +919 72 training.batch_size 1.0 +919 73 model.embedding_dim 0.0 +919 73 model.relation_dim 0.0 +919 73 model.scoring_fct_norm 2.0 +919 73 loss.margin 11.93136394932816 +919 73 loss.adversarial_temperature 0.6025505853871941 +919 73 negative_sampler.num_negs_per_pos 21.0 +919 73 training.batch_size 2.0 +919 74 model.embedding_dim 0.0 +919 74 model.relation_dim 0.0 +919 74 model.scoring_fct_norm 1.0 +919 74 loss.margin 15.574696444797812 +919 74 loss.adversarial_temperature 0.9417199661501657 +919 74 negative_sampler.num_negs_per_pos 47.0 +919 74 training.batch_size 0.0 +919 75 model.embedding_dim 1.0 +919 75 model.relation_dim 0.0 +919 75 model.scoring_fct_norm 1.0 +919 75 loss.margin 11.043977511888853 +919 75 loss.adversarial_temperature 0.10833170922923827 +919 75 negative_sampler.num_negs_per_pos 27.0 +919 75 training.batch_size 0.0 +919 76 model.embedding_dim 2.0 +919 76 model.relation_dim 2.0 +919 76 model.scoring_fct_norm 1.0 +919 76 loss.margin 3.340790610543477 +919 76 loss.adversarial_temperature 0.6253848804807164 +919 76 negative_sampler.num_negs_per_pos 23.0 +919 76 training.batch_size 2.0 +919 77 model.embedding_dim 2.0 +919 77 model.relation_dim 0.0 +919 77 model.scoring_fct_norm 1.0 +919 77 loss.margin 24.238698671445178 +919 77 loss.adversarial_temperature 0.7552058158147198 +919 77 negative_sampler.num_negs_per_pos 16.0 +919 77 training.batch_size 1.0 +919 78 model.embedding_dim 1.0 +919 78 model.relation_dim 0.0 +919 78 model.scoring_fct_norm 1.0 +919 78 loss.margin 7.986913673151224 +919 78 loss.adversarial_temperature 0.5080696094078841 +919 78 negative_sampler.num_negs_per_pos 39.0 +919 78 training.batch_size 2.0 +919 79 model.embedding_dim 2.0 +919 79 model.relation_dim 0.0 +919 79 model.scoring_fct_norm 1.0 +919 79 loss.margin 27.699465247857926 +919 79 loss.adversarial_temperature 0.7323209800159325 +919 79 negative_sampler.num_negs_per_pos 70.0 +919 79 training.batch_size 1.0 +919 80 model.embedding_dim 2.0 +919 80 model.relation_dim 1.0 +919 80 model.scoring_fct_norm 2.0 +919 80 loss.margin 10.967392309766119 +919 80 loss.adversarial_temperature 0.7007516167238119 +919 80 negative_sampler.num_negs_per_pos 68.0 +919 80 training.batch_size 2.0 +919 81 model.embedding_dim 2.0 +919 81 model.relation_dim 1.0 +919 81 model.scoring_fct_norm 2.0 +919 81 loss.margin 15.42392305650081 +919 81 loss.adversarial_temperature 0.8749470740851051 +919 81 negative_sampler.num_negs_per_pos 28.0 +919 81 training.batch_size 2.0 +919 82 model.embedding_dim 1.0 +919 82 model.relation_dim 2.0 +919 82 model.scoring_fct_norm 1.0 +919 82 loss.margin 8.112279294535668 +919 82 loss.adversarial_temperature 0.3805654421106487 +919 82 negative_sampler.num_negs_per_pos 86.0 +919 82 training.batch_size 0.0 +919 83 model.embedding_dim 1.0 +919 83 model.relation_dim 0.0 +919 83 model.scoring_fct_norm 1.0 +919 83 loss.margin 13.116955878980821 +919 83 loss.adversarial_temperature 0.6832198727514228 +919 83 negative_sampler.num_negs_per_pos 16.0 +919 83 training.batch_size 1.0 +919 84 model.embedding_dim 2.0 +919 84 model.relation_dim 2.0 +919 84 model.scoring_fct_norm 1.0 +919 84 loss.margin 11.345762693728323 +919 84 loss.adversarial_temperature 0.5689895476574917 +919 84 negative_sampler.num_negs_per_pos 90.0 +919 84 training.batch_size 0.0 +919 85 model.embedding_dim 2.0 +919 85 model.relation_dim 1.0 +919 85 model.scoring_fct_norm 2.0 +919 85 loss.margin 9.972403614059822 +919 85 loss.adversarial_temperature 0.7674522496637934 +919 85 negative_sampler.num_negs_per_pos 53.0 +919 85 training.batch_size 2.0 +919 86 model.embedding_dim 0.0 +919 86 model.relation_dim 2.0 +919 86 model.scoring_fct_norm 2.0 +919 86 loss.margin 3.0669137426629174 +919 86 loss.adversarial_temperature 0.8193483752455321 +919 86 negative_sampler.num_negs_per_pos 80.0 +919 86 training.batch_size 2.0 +919 87 model.embedding_dim 2.0 +919 87 model.relation_dim 2.0 +919 87 model.scoring_fct_norm 2.0 +919 87 loss.margin 5.948882381746804 +919 87 loss.adversarial_temperature 0.8237064001278681 +919 87 negative_sampler.num_negs_per_pos 69.0 +919 87 training.batch_size 1.0 +919 88 model.embedding_dim 1.0 +919 88 model.relation_dim 0.0 +919 88 model.scoring_fct_norm 2.0 +919 88 loss.margin 14.72410454775291 +919 88 loss.adversarial_temperature 0.9029660664909607 +919 88 negative_sampler.num_negs_per_pos 61.0 +919 88 training.batch_size 0.0 +919 89 model.embedding_dim 2.0 +919 89 model.relation_dim 0.0 +919 89 model.scoring_fct_norm 1.0 +919 89 loss.margin 9.442639770824984 +919 89 loss.adversarial_temperature 0.8524015533135365 +919 89 negative_sampler.num_negs_per_pos 85.0 +919 89 training.batch_size 0.0 +919 90 model.embedding_dim 1.0 +919 90 model.relation_dim 1.0 +919 90 model.scoring_fct_norm 1.0 +919 90 loss.margin 24.88933387412614 +919 90 loss.adversarial_temperature 0.8259173763156273 +919 90 negative_sampler.num_negs_per_pos 5.0 +919 90 training.batch_size 0.0 +919 91 model.embedding_dim 0.0 +919 91 model.relation_dim 2.0 +919 91 model.scoring_fct_norm 1.0 +919 91 loss.margin 22.079465898747785 +919 91 loss.adversarial_temperature 0.9931146858499119 +919 91 negative_sampler.num_negs_per_pos 41.0 +919 91 training.batch_size 2.0 +919 92 model.embedding_dim 0.0 +919 92 model.relation_dim 2.0 +919 92 model.scoring_fct_norm 1.0 +919 92 loss.margin 2.1728927454951474 +919 92 loss.adversarial_temperature 0.6996461670115598 +919 92 negative_sampler.num_negs_per_pos 32.0 +919 92 training.batch_size 0.0 +919 93 model.embedding_dim 1.0 +919 93 model.relation_dim 2.0 +919 93 model.scoring_fct_norm 2.0 +919 93 loss.margin 7.602718350201048 +919 93 loss.adversarial_temperature 0.796983422586354 +919 93 negative_sampler.num_negs_per_pos 15.0 +919 93 training.batch_size 1.0 +919 94 model.embedding_dim 0.0 +919 94 model.relation_dim 1.0 +919 94 model.scoring_fct_norm 1.0 +919 94 loss.margin 22.55671341380506 +919 94 loss.adversarial_temperature 0.1718784307580341 +919 94 negative_sampler.num_negs_per_pos 81.0 +919 94 training.batch_size 2.0 +919 95 model.embedding_dim 1.0 +919 95 model.relation_dim 2.0 +919 95 model.scoring_fct_norm 1.0 +919 95 loss.margin 13.858150762504728 +919 95 loss.adversarial_temperature 0.1003745474742534 +919 95 negative_sampler.num_negs_per_pos 16.0 +919 95 training.batch_size 0.0 +919 96 model.embedding_dim 0.0 +919 96 model.relation_dim 1.0 +919 96 model.scoring_fct_norm 1.0 +919 96 loss.margin 29.494685651572663 +919 96 loss.adversarial_temperature 0.8271122914987652 +919 96 negative_sampler.num_negs_per_pos 54.0 +919 96 training.batch_size 2.0 +919 97 model.embedding_dim 2.0 +919 97 model.relation_dim 1.0 +919 97 model.scoring_fct_norm 1.0 +919 97 loss.margin 5.477889419292017 +919 97 loss.adversarial_temperature 0.46613780557979934 +919 97 negative_sampler.num_negs_per_pos 5.0 +919 97 training.batch_size 0.0 +919 98 model.embedding_dim 1.0 +919 98 model.relation_dim 1.0 +919 98 model.scoring_fct_norm 2.0 +919 98 loss.margin 13.191984874782591 +919 98 loss.adversarial_temperature 0.5625161562695713 +919 98 negative_sampler.num_negs_per_pos 9.0 +919 98 training.batch_size 2.0 +919 99 model.embedding_dim 0.0 +919 99 model.relation_dim 2.0 +919 99 model.scoring_fct_norm 1.0 +919 99 loss.margin 29.14906726155985 +919 99 loss.adversarial_temperature 0.735931665875018 +919 99 negative_sampler.num_negs_per_pos 74.0 +919 99 training.batch_size 1.0 +919 100 model.embedding_dim 2.0 +919 100 model.relation_dim 2.0 +919 100 model.scoring_fct_norm 2.0 +919 100 loss.margin 16.671298392664198 +919 100 loss.adversarial_temperature 0.12333832031121507 +919 100 negative_sampler.num_negs_per_pos 65.0 +919 100 training.batch_size 2.0 +919 1 dataset """kinships""" +919 1 model """transr""" +919 1 loss """nssa""" +919 1 regularizer """no""" +919 1 optimizer """adadelta""" +919 1 training_loop """owa""" +919 1 negative_sampler """basic""" +919 1 evaluator """rankbased""" +919 2 dataset """kinships""" +919 2 model """transr""" +919 2 loss """nssa""" +919 2 regularizer """no""" +919 2 optimizer """adadelta""" +919 2 training_loop """owa""" +919 2 negative_sampler """basic""" +919 2 evaluator """rankbased""" +919 3 dataset """kinships""" +919 3 model """transr""" +919 3 loss """nssa""" +919 3 regularizer """no""" +919 3 optimizer """adadelta""" +919 3 training_loop """owa""" +919 3 negative_sampler """basic""" +919 3 evaluator """rankbased""" +919 4 dataset """kinships""" +919 4 model """transr""" +919 4 loss """nssa""" +919 4 regularizer """no""" +919 4 optimizer """adadelta""" +919 4 training_loop """owa""" +919 4 negative_sampler """basic""" +919 4 evaluator """rankbased""" +919 5 dataset """kinships""" +919 5 model """transr""" +919 5 loss """nssa""" +919 5 regularizer """no""" +919 5 optimizer """adadelta""" +919 5 training_loop """owa""" +919 5 negative_sampler """basic""" +919 5 evaluator """rankbased""" +919 6 dataset """kinships""" +919 6 model """transr""" +919 6 loss """nssa""" +919 6 regularizer """no""" +919 6 optimizer """adadelta""" +919 6 training_loop """owa""" +919 6 negative_sampler """basic""" +919 6 evaluator """rankbased""" +919 7 dataset """kinships""" +919 7 model """transr""" +919 7 loss """nssa""" +919 7 regularizer """no""" +919 7 optimizer """adadelta""" +919 7 training_loop """owa""" +919 7 negative_sampler """basic""" +919 7 evaluator """rankbased""" +919 8 dataset """kinships""" +919 8 model """transr""" +919 8 loss """nssa""" +919 8 regularizer """no""" +919 8 optimizer """adadelta""" +919 8 training_loop """owa""" +919 8 negative_sampler """basic""" +919 8 evaluator """rankbased""" +919 9 dataset """kinships""" +919 9 model """transr""" +919 9 loss """nssa""" +919 9 regularizer """no""" +919 9 optimizer """adadelta""" +919 9 training_loop """owa""" +919 9 negative_sampler """basic""" +919 9 evaluator """rankbased""" +919 10 dataset """kinships""" +919 10 model """transr""" +919 10 loss """nssa""" +919 10 regularizer """no""" +919 10 optimizer """adadelta""" +919 10 training_loop """owa""" +919 10 negative_sampler """basic""" +919 10 evaluator """rankbased""" +919 11 dataset """kinships""" +919 11 model """transr""" +919 11 loss """nssa""" +919 11 regularizer """no""" +919 11 optimizer """adadelta""" +919 11 training_loop """owa""" +919 11 negative_sampler """basic""" +919 11 evaluator """rankbased""" +919 12 dataset """kinships""" +919 12 model """transr""" +919 12 loss """nssa""" +919 12 regularizer """no""" +919 12 optimizer """adadelta""" +919 12 training_loop """owa""" +919 12 negative_sampler """basic""" +919 12 evaluator """rankbased""" +919 13 dataset """kinships""" +919 13 model """transr""" +919 13 loss """nssa""" +919 13 regularizer """no""" +919 13 optimizer """adadelta""" +919 13 training_loop """owa""" +919 13 negative_sampler """basic""" +919 13 evaluator """rankbased""" +919 14 dataset """kinships""" +919 14 model """transr""" +919 14 loss """nssa""" +919 14 regularizer """no""" +919 14 optimizer """adadelta""" +919 14 training_loop """owa""" +919 14 negative_sampler """basic""" +919 14 evaluator """rankbased""" +919 15 dataset """kinships""" +919 15 model """transr""" +919 15 loss """nssa""" +919 15 regularizer """no""" +919 15 optimizer """adadelta""" +919 15 training_loop """owa""" +919 15 negative_sampler """basic""" +919 15 evaluator """rankbased""" +919 16 dataset """kinships""" +919 16 model """transr""" +919 16 loss """nssa""" +919 16 regularizer """no""" +919 16 optimizer """adadelta""" +919 16 training_loop """owa""" +919 16 negative_sampler """basic""" +919 16 evaluator """rankbased""" +919 17 dataset """kinships""" +919 17 model """transr""" +919 17 loss """nssa""" +919 17 regularizer """no""" +919 17 optimizer """adadelta""" +919 17 training_loop """owa""" +919 17 negative_sampler """basic""" +919 17 evaluator """rankbased""" +919 18 dataset """kinships""" +919 18 model """transr""" +919 18 loss """nssa""" +919 18 regularizer """no""" +919 18 optimizer """adadelta""" +919 18 training_loop """owa""" +919 18 negative_sampler """basic""" +919 18 evaluator """rankbased""" +919 19 dataset """kinships""" +919 19 model """transr""" +919 19 loss """nssa""" +919 19 regularizer """no""" +919 19 optimizer """adadelta""" +919 19 training_loop """owa""" +919 19 negative_sampler """basic""" +919 19 evaluator """rankbased""" +919 20 dataset """kinships""" +919 20 model """transr""" +919 20 loss """nssa""" +919 20 regularizer """no""" +919 20 optimizer """adadelta""" +919 20 training_loop """owa""" +919 20 negative_sampler """basic""" +919 20 evaluator """rankbased""" +919 21 dataset """kinships""" +919 21 model """transr""" +919 21 loss """nssa""" +919 21 regularizer """no""" +919 21 optimizer """adadelta""" +919 21 training_loop """owa""" +919 21 negative_sampler """basic""" +919 21 evaluator """rankbased""" +919 22 dataset """kinships""" +919 22 model """transr""" +919 22 loss """nssa""" +919 22 regularizer """no""" +919 22 optimizer """adadelta""" +919 22 training_loop """owa""" +919 22 negative_sampler """basic""" +919 22 evaluator """rankbased""" +919 23 dataset """kinships""" +919 23 model """transr""" +919 23 loss """nssa""" +919 23 regularizer """no""" +919 23 optimizer """adadelta""" +919 23 training_loop """owa""" +919 23 negative_sampler """basic""" +919 23 evaluator """rankbased""" +919 24 dataset """kinships""" +919 24 model """transr""" +919 24 loss """nssa""" +919 24 regularizer """no""" +919 24 optimizer """adadelta""" +919 24 training_loop """owa""" +919 24 negative_sampler """basic""" +919 24 evaluator """rankbased""" +919 25 dataset """kinships""" +919 25 model """transr""" +919 25 loss """nssa""" +919 25 regularizer """no""" +919 25 optimizer """adadelta""" +919 25 training_loop """owa""" +919 25 negative_sampler """basic""" +919 25 evaluator """rankbased""" +919 26 dataset """kinships""" +919 26 model """transr""" +919 26 loss """nssa""" +919 26 regularizer """no""" +919 26 optimizer """adadelta""" +919 26 training_loop """owa""" +919 26 negative_sampler """basic""" +919 26 evaluator """rankbased""" +919 27 dataset """kinships""" +919 27 model """transr""" +919 27 loss """nssa""" +919 27 regularizer """no""" +919 27 optimizer """adadelta""" +919 27 training_loop """owa""" +919 27 negative_sampler """basic""" +919 27 evaluator """rankbased""" +919 28 dataset """kinships""" +919 28 model """transr""" +919 28 loss """nssa""" +919 28 regularizer """no""" +919 28 optimizer """adadelta""" +919 28 training_loop """owa""" +919 28 negative_sampler """basic""" +919 28 evaluator """rankbased""" +919 29 dataset """kinships""" +919 29 model """transr""" +919 29 loss """nssa""" +919 29 regularizer """no""" +919 29 optimizer """adadelta""" +919 29 training_loop """owa""" +919 29 negative_sampler """basic""" +919 29 evaluator """rankbased""" +919 30 dataset """kinships""" +919 30 model """transr""" +919 30 loss """nssa""" +919 30 regularizer """no""" +919 30 optimizer """adadelta""" +919 30 training_loop """owa""" +919 30 negative_sampler """basic""" +919 30 evaluator """rankbased""" +919 31 dataset """kinships""" +919 31 model """transr""" +919 31 loss """nssa""" +919 31 regularizer """no""" +919 31 optimizer """adadelta""" +919 31 training_loop """owa""" +919 31 negative_sampler """basic""" +919 31 evaluator """rankbased""" +919 32 dataset """kinships""" +919 32 model """transr""" +919 32 loss """nssa""" +919 32 regularizer """no""" +919 32 optimizer """adadelta""" +919 32 training_loop """owa""" +919 32 negative_sampler """basic""" +919 32 evaluator """rankbased""" +919 33 dataset """kinships""" +919 33 model """transr""" +919 33 loss """nssa""" +919 33 regularizer """no""" +919 33 optimizer """adadelta""" +919 33 training_loop """owa""" +919 33 negative_sampler """basic""" +919 33 evaluator """rankbased""" +919 34 dataset """kinships""" +919 34 model """transr""" +919 34 loss """nssa""" +919 34 regularizer """no""" +919 34 optimizer """adadelta""" +919 34 training_loop """owa""" +919 34 negative_sampler """basic""" +919 34 evaluator """rankbased""" +919 35 dataset """kinships""" +919 35 model """transr""" +919 35 loss """nssa""" +919 35 regularizer """no""" +919 35 optimizer """adadelta""" +919 35 training_loop """owa""" +919 35 negative_sampler """basic""" +919 35 evaluator """rankbased""" +919 36 dataset """kinships""" +919 36 model """transr""" +919 36 loss """nssa""" +919 36 regularizer """no""" +919 36 optimizer """adadelta""" +919 36 training_loop """owa""" +919 36 negative_sampler """basic""" +919 36 evaluator """rankbased""" +919 37 dataset """kinships""" +919 37 model """transr""" +919 37 loss """nssa""" +919 37 regularizer """no""" +919 37 optimizer """adadelta""" +919 37 training_loop """owa""" +919 37 negative_sampler """basic""" +919 37 evaluator """rankbased""" +919 38 dataset """kinships""" +919 38 model """transr""" +919 38 loss """nssa""" +919 38 regularizer """no""" +919 38 optimizer """adadelta""" +919 38 training_loop """owa""" +919 38 negative_sampler """basic""" +919 38 evaluator """rankbased""" +919 39 dataset """kinships""" +919 39 model """transr""" +919 39 loss """nssa""" +919 39 regularizer """no""" +919 39 optimizer """adadelta""" +919 39 training_loop """owa""" +919 39 negative_sampler """basic""" +919 39 evaluator """rankbased""" +919 40 dataset """kinships""" +919 40 model """transr""" +919 40 loss """nssa""" +919 40 regularizer """no""" +919 40 optimizer """adadelta""" +919 40 training_loop """owa""" +919 40 negative_sampler """basic""" +919 40 evaluator """rankbased""" +919 41 dataset """kinships""" +919 41 model """transr""" +919 41 loss """nssa""" +919 41 regularizer """no""" +919 41 optimizer """adadelta""" +919 41 training_loop """owa""" +919 41 negative_sampler """basic""" +919 41 evaluator """rankbased""" +919 42 dataset """kinships""" +919 42 model """transr""" +919 42 loss """nssa""" +919 42 regularizer """no""" +919 42 optimizer """adadelta""" +919 42 training_loop """owa""" +919 42 negative_sampler """basic""" +919 42 evaluator """rankbased""" +919 43 dataset """kinships""" +919 43 model """transr""" +919 43 loss """nssa""" +919 43 regularizer """no""" +919 43 optimizer """adadelta""" +919 43 training_loop """owa""" +919 43 negative_sampler """basic""" +919 43 evaluator """rankbased""" +919 44 dataset """kinships""" +919 44 model """transr""" +919 44 loss """nssa""" +919 44 regularizer """no""" +919 44 optimizer """adadelta""" +919 44 training_loop """owa""" +919 44 negative_sampler """basic""" +919 44 evaluator """rankbased""" +919 45 dataset """kinships""" +919 45 model """transr""" +919 45 loss """nssa""" +919 45 regularizer """no""" +919 45 optimizer """adadelta""" +919 45 training_loop """owa""" +919 45 negative_sampler """basic""" +919 45 evaluator """rankbased""" +919 46 dataset """kinships""" +919 46 model """transr""" +919 46 loss """nssa""" +919 46 regularizer """no""" +919 46 optimizer """adadelta""" +919 46 training_loop """owa""" +919 46 negative_sampler """basic""" +919 46 evaluator """rankbased""" +919 47 dataset """kinships""" +919 47 model """transr""" +919 47 loss """nssa""" +919 47 regularizer """no""" +919 47 optimizer """adadelta""" +919 47 training_loop """owa""" +919 47 negative_sampler """basic""" +919 47 evaluator """rankbased""" +919 48 dataset """kinships""" +919 48 model """transr""" +919 48 loss """nssa""" +919 48 regularizer """no""" +919 48 optimizer """adadelta""" +919 48 training_loop """owa""" +919 48 negative_sampler """basic""" +919 48 evaluator """rankbased""" +919 49 dataset """kinships""" +919 49 model """transr""" +919 49 loss """nssa""" +919 49 regularizer """no""" +919 49 optimizer """adadelta""" +919 49 training_loop """owa""" +919 49 negative_sampler """basic""" +919 49 evaluator """rankbased""" +919 50 dataset """kinships""" +919 50 model """transr""" +919 50 loss """nssa""" +919 50 regularizer """no""" +919 50 optimizer """adadelta""" +919 50 training_loop """owa""" +919 50 negative_sampler """basic""" +919 50 evaluator """rankbased""" +919 51 dataset """kinships""" +919 51 model """transr""" +919 51 loss """nssa""" +919 51 regularizer """no""" +919 51 optimizer """adadelta""" +919 51 training_loop """owa""" +919 51 negative_sampler """basic""" +919 51 evaluator """rankbased""" +919 52 dataset """kinships""" +919 52 model """transr""" +919 52 loss """nssa""" +919 52 regularizer """no""" +919 52 optimizer """adadelta""" +919 52 training_loop """owa""" +919 52 negative_sampler """basic""" +919 52 evaluator """rankbased""" +919 53 dataset """kinships""" +919 53 model """transr""" +919 53 loss """nssa""" +919 53 regularizer """no""" +919 53 optimizer """adadelta""" +919 53 training_loop """owa""" +919 53 negative_sampler """basic""" +919 53 evaluator """rankbased""" +919 54 dataset """kinships""" +919 54 model """transr""" +919 54 loss """nssa""" +919 54 regularizer """no""" +919 54 optimizer """adadelta""" +919 54 training_loop """owa""" +919 54 negative_sampler """basic""" +919 54 evaluator """rankbased""" +919 55 dataset """kinships""" +919 55 model """transr""" +919 55 loss """nssa""" +919 55 regularizer """no""" +919 55 optimizer """adadelta""" +919 55 training_loop """owa""" +919 55 negative_sampler """basic""" +919 55 evaluator """rankbased""" +919 56 dataset """kinships""" +919 56 model """transr""" +919 56 loss """nssa""" +919 56 regularizer """no""" +919 56 optimizer """adadelta""" +919 56 training_loop """owa""" +919 56 negative_sampler """basic""" +919 56 evaluator """rankbased""" +919 57 dataset """kinships""" +919 57 model """transr""" +919 57 loss """nssa""" +919 57 regularizer """no""" +919 57 optimizer """adadelta""" +919 57 training_loop """owa""" +919 57 negative_sampler """basic""" +919 57 evaluator """rankbased""" +919 58 dataset """kinships""" +919 58 model """transr""" +919 58 loss """nssa""" +919 58 regularizer """no""" +919 58 optimizer """adadelta""" +919 58 training_loop """owa""" +919 58 negative_sampler """basic""" +919 58 evaluator """rankbased""" +919 59 dataset """kinships""" +919 59 model """transr""" +919 59 loss """nssa""" +919 59 regularizer """no""" +919 59 optimizer """adadelta""" +919 59 training_loop """owa""" +919 59 negative_sampler """basic""" +919 59 evaluator """rankbased""" +919 60 dataset """kinships""" +919 60 model """transr""" +919 60 loss """nssa""" +919 60 regularizer """no""" +919 60 optimizer """adadelta""" +919 60 training_loop """owa""" +919 60 negative_sampler """basic""" +919 60 evaluator """rankbased""" +919 61 dataset """kinships""" +919 61 model """transr""" +919 61 loss """nssa""" +919 61 regularizer """no""" +919 61 optimizer """adadelta""" +919 61 training_loop """owa""" +919 61 negative_sampler """basic""" +919 61 evaluator """rankbased""" +919 62 dataset """kinships""" +919 62 model """transr""" +919 62 loss """nssa""" +919 62 regularizer """no""" +919 62 optimizer """adadelta""" +919 62 training_loop """owa""" +919 62 negative_sampler """basic""" +919 62 evaluator """rankbased""" +919 63 dataset """kinships""" +919 63 model """transr""" +919 63 loss """nssa""" +919 63 regularizer """no""" +919 63 optimizer """adadelta""" +919 63 training_loop """owa""" +919 63 negative_sampler """basic""" +919 63 evaluator """rankbased""" +919 64 dataset """kinships""" +919 64 model """transr""" +919 64 loss """nssa""" +919 64 regularizer """no""" +919 64 optimizer """adadelta""" +919 64 training_loop """owa""" +919 64 negative_sampler """basic""" +919 64 evaluator """rankbased""" +919 65 dataset """kinships""" +919 65 model """transr""" +919 65 loss """nssa""" +919 65 regularizer """no""" +919 65 optimizer """adadelta""" +919 65 training_loop """owa""" +919 65 negative_sampler """basic""" +919 65 evaluator """rankbased""" +919 66 dataset """kinships""" +919 66 model """transr""" +919 66 loss """nssa""" +919 66 regularizer """no""" +919 66 optimizer """adadelta""" +919 66 training_loop """owa""" +919 66 negative_sampler """basic""" +919 66 evaluator """rankbased""" +919 67 dataset """kinships""" +919 67 model """transr""" +919 67 loss """nssa""" +919 67 regularizer """no""" +919 67 optimizer """adadelta""" +919 67 training_loop """owa""" +919 67 negative_sampler """basic""" +919 67 evaluator """rankbased""" +919 68 dataset """kinships""" +919 68 model """transr""" +919 68 loss """nssa""" +919 68 regularizer """no""" +919 68 optimizer """adadelta""" +919 68 training_loop """owa""" +919 68 negative_sampler """basic""" +919 68 evaluator """rankbased""" +919 69 dataset """kinships""" +919 69 model """transr""" +919 69 loss """nssa""" +919 69 regularizer """no""" +919 69 optimizer """adadelta""" +919 69 training_loop """owa""" +919 69 negative_sampler """basic""" +919 69 evaluator """rankbased""" +919 70 dataset """kinships""" +919 70 model """transr""" +919 70 loss """nssa""" +919 70 regularizer """no""" +919 70 optimizer """adadelta""" +919 70 training_loop """owa""" +919 70 negative_sampler """basic""" +919 70 evaluator """rankbased""" +919 71 dataset """kinships""" +919 71 model """transr""" +919 71 loss """nssa""" +919 71 regularizer """no""" +919 71 optimizer """adadelta""" +919 71 training_loop """owa""" +919 71 negative_sampler """basic""" +919 71 evaluator """rankbased""" +919 72 dataset """kinships""" +919 72 model """transr""" +919 72 loss """nssa""" +919 72 regularizer """no""" +919 72 optimizer """adadelta""" +919 72 training_loop """owa""" +919 72 negative_sampler """basic""" +919 72 evaluator """rankbased""" +919 73 dataset """kinships""" +919 73 model """transr""" +919 73 loss """nssa""" +919 73 regularizer """no""" +919 73 optimizer """adadelta""" +919 73 training_loop """owa""" +919 73 negative_sampler """basic""" +919 73 evaluator """rankbased""" +919 74 dataset """kinships""" +919 74 model """transr""" +919 74 loss """nssa""" +919 74 regularizer """no""" +919 74 optimizer """adadelta""" +919 74 training_loop """owa""" +919 74 negative_sampler """basic""" +919 74 evaluator """rankbased""" +919 75 dataset """kinships""" +919 75 model """transr""" +919 75 loss """nssa""" +919 75 regularizer """no""" +919 75 optimizer """adadelta""" +919 75 training_loop """owa""" +919 75 negative_sampler """basic""" +919 75 evaluator """rankbased""" +919 76 dataset """kinships""" +919 76 model """transr""" +919 76 loss """nssa""" +919 76 regularizer """no""" +919 76 optimizer """adadelta""" +919 76 training_loop """owa""" +919 76 negative_sampler """basic""" +919 76 evaluator """rankbased""" +919 77 dataset """kinships""" +919 77 model """transr""" +919 77 loss """nssa""" +919 77 regularizer """no""" +919 77 optimizer """adadelta""" +919 77 training_loop """owa""" +919 77 negative_sampler """basic""" +919 77 evaluator """rankbased""" +919 78 dataset """kinships""" +919 78 model """transr""" +919 78 loss """nssa""" +919 78 regularizer """no""" +919 78 optimizer """adadelta""" +919 78 training_loop """owa""" +919 78 negative_sampler """basic""" +919 78 evaluator """rankbased""" +919 79 dataset """kinships""" +919 79 model """transr""" +919 79 loss """nssa""" +919 79 regularizer """no""" +919 79 optimizer """adadelta""" +919 79 training_loop """owa""" +919 79 negative_sampler """basic""" +919 79 evaluator """rankbased""" +919 80 dataset """kinships""" +919 80 model """transr""" +919 80 loss """nssa""" +919 80 regularizer """no""" +919 80 optimizer """adadelta""" +919 80 training_loop """owa""" +919 80 negative_sampler """basic""" +919 80 evaluator """rankbased""" +919 81 dataset """kinships""" +919 81 model """transr""" +919 81 loss """nssa""" +919 81 regularizer """no""" +919 81 optimizer """adadelta""" +919 81 training_loop """owa""" +919 81 negative_sampler """basic""" +919 81 evaluator """rankbased""" +919 82 dataset """kinships""" +919 82 model """transr""" +919 82 loss """nssa""" +919 82 regularizer """no""" +919 82 optimizer """adadelta""" +919 82 training_loop """owa""" +919 82 negative_sampler """basic""" +919 82 evaluator """rankbased""" +919 83 dataset """kinships""" +919 83 model """transr""" +919 83 loss """nssa""" +919 83 regularizer """no""" +919 83 optimizer """adadelta""" +919 83 training_loop """owa""" +919 83 negative_sampler """basic""" +919 83 evaluator """rankbased""" +919 84 dataset """kinships""" +919 84 model """transr""" +919 84 loss """nssa""" +919 84 regularizer """no""" +919 84 optimizer """adadelta""" +919 84 training_loop """owa""" +919 84 negative_sampler """basic""" +919 84 evaluator """rankbased""" +919 85 dataset """kinships""" +919 85 model """transr""" +919 85 loss """nssa""" +919 85 regularizer """no""" +919 85 optimizer """adadelta""" +919 85 training_loop """owa""" +919 85 negative_sampler """basic""" +919 85 evaluator """rankbased""" +919 86 dataset """kinships""" +919 86 model """transr""" +919 86 loss """nssa""" +919 86 regularizer """no""" +919 86 optimizer """adadelta""" +919 86 training_loop """owa""" +919 86 negative_sampler """basic""" +919 86 evaluator """rankbased""" +919 87 dataset """kinships""" +919 87 model """transr""" +919 87 loss """nssa""" +919 87 regularizer """no""" +919 87 optimizer """adadelta""" +919 87 training_loop """owa""" +919 87 negative_sampler """basic""" +919 87 evaluator """rankbased""" +919 88 dataset """kinships""" +919 88 model """transr""" +919 88 loss """nssa""" +919 88 regularizer """no""" +919 88 optimizer """adadelta""" +919 88 training_loop """owa""" +919 88 negative_sampler """basic""" +919 88 evaluator """rankbased""" +919 89 dataset """kinships""" +919 89 model """transr""" +919 89 loss """nssa""" +919 89 regularizer """no""" +919 89 optimizer """adadelta""" +919 89 training_loop """owa""" +919 89 negative_sampler """basic""" +919 89 evaluator """rankbased""" +919 90 dataset """kinships""" +919 90 model """transr""" +919 90 loss """nssa""" +919 90 regularizer """no""" +919 90 optimizer """adadelta""" +919 90 training_loop """owa""" +919 90 negative_sampler """basic""" +919 90 evaluator """rankbased""" +919 91 dataset """kinships""" +919 91 model """transr""" +919 91 loss """nssa""" +919 91 regularizer """no""" +919 91 optimizer """adadelta""" +919 91 training_loop """owa""" +919 91 negative_sampler """basic""" +919 91 evaluator """rankbased""" +919 92 dataset """kinships""" +919 92 model """transr""" +919 92 loss """nssa""" +919 92 regularizer """no""" +919 92 optimizer """adadelta""" +919 92 training_loop """owa""" +919 92 negative_sampler """basic""" +919 92 evaluator """rankbased""" +919 93 dataset """kinships""" +919 93 model """transr""" +919 93 loss """nssa""" +919 93 regularizer """no""" +919 93 optimizer """adadelta""" +919 93 training_loop """owa""" +919 93 negative_sampler """basic""" +919 93 evaluator """rankbased""" +919 94 dataset """kinships""" +919 94 model """transr""" +919 94 loss """nssa""" +919 94 regularizer """no""" +919 94 optimizer """adadelta""" +919 94 training_loop """owa""" +919 94 negative_sampler """basic""" +919 94 evaluator """rankbased""" +919 95 dataset """kinships""" +919 95 model """transr""" +919 95 loss """nssa""" +919 95 regularizer """no""" +919 95 optimizer """adadelta""" +919 95 training_loop """owa""" +919 95 negative_sampler """basic""" +919 95 evaluator """rankbased""" +919 96 dataset """kinships""" +919 96 model """transr""" +919 96 loss """nssa""" +919 96 regularizer """no""" +919 96 optimizer """adadelta""" +919 96 training_loop """owa""" +919 96 negative_sampler """basic""" +919 96 evaluator """rankbased""" +919 97 dataset """kinships""" +919 97 model """transr""" +919 97 loss """nssa""" +919 97 regularizer """no""" +919 97 optimizer """adadelta""" +919 97 training_loop """owa""" +919 97 negative_sampler """basic""" +919 97 evaluator """rankbased""" +919 98 dataset """kinships""" +919 98 model """transr""" +919 98 loss """nssa""" +919 98 regularizer """no""" +919 98 optimizer """adadelta""" +919 98 training_loop """owa""" +919 98 negative_sampler """basic""" +919 98 evaluator """rankbased""" +919 99 dataset """kinships""" +919 99 model """transr""" +919 99 loss """nssa""" +919 99 regularizer """no""" +919 99 optimizer """adadelta""" +919 99 training_loop """owa""" +919 99 negative_sampler """basic""" +919 99 evaluator """rankbased""" +919 100 dataset """kinships""" +919 100 model """transr""" +919 100 loss """nssa""" +919 100 regularizer """no""" +919 100 optimizer """adadelta""" +919 100 training_loop """owa""" +919 100 negative_sampler """basic""" +919 100 evaluator """rankbased""" +920 1 model.embedding_dim 2.0 +920 1 model.relation_dim 1.0 +920 1 model.scoring_fct_norm 2.0 +920 1 loss.margin 16.449178077148375 +920 1 loss.adversarial_temperature 0.7925273045260599 +920 1 negative_sampler.num_negs_per_pos 90.0 +920 1 training.batch_size 2.0 +920 2 model.embedding_dim 1.0 +920 2 model.relation_dim 2.0 +920 2 model.scoring_fct_norm 2.0 +920 2 loss.margin 18.361905457660033 +920 2 loss.adversarial_temperature 0.7143935341168258 +920 2 negative_sampler.num_negs_per_pos 98.0 +920 2 training.batch_size 2.0 +920 3 model.embedding_dim 0.0 +920 3 model.relation_dim 2.0 +920 3 model.scoring_fct_norm 1.0 +920 3 loss.margin 28.536682000176153 +920 3 loss.adversarial_temperature 0.5139706789358364 +920 3 negative_sampler.num_negs_per_pos 94.0 +920 3 training.batch_size 1.0 +920 4 model.embedding_dim 0.0 +920 4 model.relation_dim 1.0 +920 4 model.scoring_fct_norm 1.0 +920 4 loss.margin 8.470204410788744 +920 4 loss.adversarial_temperature 0.6753447082229675 +920 4 negative_sampler.num_negs_per_pos 88.0 +920 4 training.batch_size 1.0 +920 5 model.embedding_dim 2.0 +920 5 model.relation_dim 2.0 +920 5 model.scoring_fct_norm 2.0 +920 5 loss.margin 3.083865918216995 +920 5 loss.adversarial_temperature 0.1726993344210686 +920 5 negative_sampler.num_negs_per_pos 31.0 +920 5 training.batch_size 1.0 +920 6 model.embedding_dim 2.0 +920 6 model.relation_dim 1.0 +920 6 model.scoring_fct_norm 1.0 +920 6 loss.margin 6.702649344822658 +920 6 loss.adversarial_temperature 0.38055354866741287 +920 6 negative_sampler.num_negs_per_pos 70.0 +920 6 training.batch_size 0.0 +920 7 model.embedding_dim 1.0 +920 7 model.relation_dim 0.0 +920 7 model.scoring_fct_norm 1.0 +920 7 loss.margin 15.79711363472124 +920 7 loss.adversarial_temperature 0.15156226682215665 +920 7 negative_sampler.num_negs_per_pos 72.0 +920 7 training.batch_size 1.0 +920 8 model.embedding_dim 2.0 +920 8 model.relation_dim 2.0 +920 8 model.scoring_fct_norm 2.0 +920 8 loss.margin 16.37009932673105 +920 8 loss.adversarial_temperature 0.5401460306671597 +920 8 negative_sampler.num_negs_per_pos 68.0 +920 8 training.batch_size 1.0 +920 9 model.embedding_dim 2.0 +920 9 model.relation_dim 1.0 +920 9 model.scoring_fct_norm 2.0 +920 9 loss.margin 29.327703199584022 +920 9 loss.adversarial_temperature 0.7682666674471986 +920 9 negative_sampler.num_negs_per_pos 98.0 +920 9 training.batch_size 2.0 +920 10 model.embedding_dim 2.0 +920 10 model.relation_dim 1.0 +920 10 model.scoring_fct_norm 2.0 +920 10 loss.margin 1.0423708613217384 +920 10 loss.adversarial_temperature 0.997298358518807 +920 10 negative_sampler.num_negs_per_pos 91.0 +920 10 training.batch_size 2.0 +920 11 model.embedding_dim 1.0 +920 11 model.relation_dim 2.0 +920 11 model.scoring_fct_norm 1.0 +920 11 loss.margin 1.313150231276245 +920 11 loss.adversarial_temperature 0.5642997803442746 +920 11 negative_sampler.num_negs_per_pos 50.0 +920 11 training.batch_size 0.0 +920 12 model.embedding_dim 2.0 +920 12 model.relation_dim 1.0 +920 12 model.scoring_fct_norm 2.0 +920 12 loss.margin 7.449047819347449 +920 12 loss.adversarial_temperature 0.9372243433709669 +920 12 negative_sampler.num_negs_per_pos 34.0 +920 12 training.batch_size 1.0 +920 13 model.embedding_dim 1.0 +920 13 model.relation_dim 2.0 +920 13 model.scoring_fct_norm 2.0 +920 13 loss.margin 8.308272179502183 +920 13 loss.adversarial_temperature 0.9801558032901574 +920 13 negative_sampler.num_negs_per_pos 46.0 +920 13 training.batch_size 0.0 +920 14 model.embedding_dim 2.0 +920 14 model.relation_dim 1.0 +920 14 model.scoring_fct_norm 2.0 +920 14 loss.margin 5.873935584055024 +920 14 loss.adversarial_temperature 0.6409170151776032 +920 14 negative_sampler.num_negs_per_pos 3.0 +920 14 training.batch_size 0.0 +920 15 model.embedding_dim 2.0 +920 15 model.relation_dim 0.0 +920 15 model.scoring_fct_norm 2.0 +920 15 loss.margin 21.771988254566903 +920 15 loss.adversarial_temperature 0.39605982237093673 +920 15 negative_sampler.num_negs_per_pos 41.0 +920 15 training.batch_size 0.0 +920 16 model.embedding_dim 1.0 +920 16 model.relation_dim 1.0 +920 16 model.scoring_fct_norm 1.0 +920 16 loss.margin 15.60314716778236 +920 16 loss.adversarial_temperature 0.6638128656237159 +920 16 negative_sampler.num_negs_per_pos 88.0 +920 16 training.batch_size 1.0 +920 17 model.embedding_dim 2.0 +920 17 model.relation_dim 2.0 +920 17 model.scoring_fct_norm 2.0 +920 17 loss.margin 7.542765205519138 +920 17 loss.adversarial_temperature 0.8776518768319249 +920 17 negative_sampler.num_negs_per_pos 21.0 +920 17 training.batch_size 2.0 +920 18 model.embedding_dim 0.0 +920 18 model.relation_dim 2.0 +920 18 model.scoring_fct_norm 1.0 +920 18 loss.margin 6.302523426746522 +920 18 loss.adversarial_temperature 0.4819896551783571 +920 18 negative_sampler.num_negs_per_pos 57.0 +920 18 training.batch_size 2.0 +920 19 model.embedding_dim 1.0 +920 19 model.relation_dim 0.0 +920 19 model.scoring_fct_norm 2.0 +920 19 loss.margin 12.09606576769265 +920 19 loss.adversarial_temperature 0.29371890814509327 +920 19 negative_sampler.num_negs_per_pos 36.0 +920 19 training.batch_size 1.0 +920 20 model.embedding_dim 2.0 +920 20 model.relation_dim 2.0 +920 20 model.scoring_fct_norm 2.0 +920 20 loss.margin 4.923445913541493 +920 20 loss.adversarial_temperature 0.8752905263929381 +920 20 negative_sampler.num_negs_per_pos 70.0 +920 20 training.batch_size 2.0 +920 21 model.embedding_dim 2.0 +920 21 model.relation_dim 0.0 +920 21 model.scoring_fct_norm 1.0 +920 21 loss.margin 6.866447490383356 +920 21 loss.adversarial_temperature 0.715942502055831 +920 21 negative_sampler.num_negs_per_pos 15.0 +920 21 training.batch_size 2.0 +920 22 model.embedding_dim 1.0 +920 22 model.relation_dim 0.0 +920 22 model.scoring_fct_norm 1.0 +920 22 loss.margin 14.664529488468622 +920 22 loss.adversarial_temperature 0.858797721570045 +920 22 negative_sampler.num_negs_per_pos 29.0 +920 22 training.batch_size 1.0 +920 23 model.embedding_dim 1.0 +920 23 model.relation_dim 0.0 +920 23 model.scoring_fct_norm 2.0 +920 23 loss.margin 7.47461899616013 +920 23 loss.adversarial_temperature 0.2192085180322218 +920 23 negative_sampler.num_negs_per_pos 66.0 +920 23 training.batch_size 1.0 +920 24 model.embedding_dim 0.0 +920 24 model.relation_dim 1.0 +920 24 model.scoring_fct_norm 2.0 +920 24 loss.margin 11.348689928263193 +920 24 loss.adversarial_temperature 0.6868962596416841 +920 24 negative_sampler.num_negs_per_pos 36.0 +920 24 training.batch_size 1.0 +920 25 model.embedding_dim 2.0 +920 25 model.relation_dim 0.0 +920 25 model.scoring_fct_norm 2.0 +920 25 loss.margin 9.467942222325174 +920 25 loss.adversarial_temperature 0.2840975744784103 +920 25 negative_sampler.num_negs_per_pos 82.0 +920 25 training.batch_size 0.0 +920 26 model.embedding_dim 1.0 +920 26 model.relation_dim 2.0 +920 26 model.scoring_fct_norm 2.0 +920 26 loss.margin 14.705386431923298 +920 26 loss.adversarial_temperature 0.22456313588792592 +920 26 negative_sampler.num_negs_per_pos 75.0 +920 26 training.batch_size 1.0 +920 27 model.embedding_dim 2.0 +920 27 model.relation_dim 1.0 +920 27 model.scoring_fct_norm 1.0 +920 27 loss.margin 1.0509649049039433 +920 27 loss.adversarial_temperature 0.47060666163324494 +920 27 negative_sampler.num_negs_per_pos 66.0 +920 27 training.batch_size 1.0 +920 28 model.embedding_dim 2.0 +920 28 model.relation_dim 0.0 +920 28 model.scoring_fct_norm 2.0 +920 28 loss.margin 20.531078556814236 +920 28 loss.adversarial_temperature 0.5799490352952859 +920 28 negative_sampler.num_negs_per_pos 40.0 +920 28 training.batch_size 2.0 +920 29 model.embedding_dim 1.0 +920 29 model.relation_dim 1.0 +920 29 model.scoring_fct_norm 1.0 +920 29 loss.margin 1.8012015673612638 +920 29 loss.adversarial_temperature 0.1339931910583732 +920 29 negative_sampler.num_negs_per_pos 94.0 +920 29 training.batch_size 0.0 +920 30 model.embedding_dim 2.0 +920 30 model.relation_dim 0.0 +920 30 model.scoring_fct_norm 1.0 +920 30 loss.margin 6.215007240454115 +920 30 loss.adversarial_temperature 0.1169735859300009 +920 30 negative_sampler.num_negs_per_pos 20.0 +920 30 training.batch_size 0.0 +920 31 model.embedding_dim 1.0 +920 31 model.relation_dim 2.0 +920 31 model.scoring_fct_norm 2.0 +920 31 loss.margin 2.507833035910487 +920 31 loss.adversarial_temperature 0.632903465499457 +920 31 negative_sampler.num_negs_per_pos 56.0 +920 31 training.batch_size 2.0 +920 32 model.embedding_dim 1.0 +920 32 model.relation_dim 1.0 +920 32 model.scoring_fct_norm 1.0 +920 32 loss.margin 20.10896313629593 +920 32 loss.adversarial_temperature 0.12535183673130657 +920 32 negative_sampler.num_negs_per_pos 12.0 +920 32 training.batch_size 0.0 +920 33 model.embedding_dim 2.0 +920 33 model.relation_dim 1.0 +920 33 model.scoring_fct_norm 2.0 +920 33 loss.margin 17.049389743512897 +920 33 loss.adversarial_temperature 0.5542975035276436 +920 33 negative_sampler.num_negs_per_pos 17.0 +920 33 training.batch_size 0.0 +920 34 model.embedding_dim 2.0 +920 34 model.relation_dim 0.0 +920 34 model.scoring_fct_norm 1.0 +920 34 loss.margin 21.564979899475535 +920 34 loss.adversarial_temperature 0.6979365216999907 +920 34 negative_sampler.num_negs_per_pos 21.0 +920 34 training.batch_size 0.0 +920 35 model.embedding_dim 0.0 +920 35 model.relation_dim 0.0 +920 35 model.scoring_fct_norm 2.0 +920 35 loss.margin 3.6485812301653064 +920 35 loss.adversarial_temperature 0.6412343709514273 +920 35 negative_sampler.num_negs_per_pos 71.0 +920 35 training.batch_size 1.0 +920 36 model.embedding_dim 0.0 +920 36 model.relation_dim 1.0 +920 36 model.scoring_fct_norm 1.0 +920 36 loss.margin 23.88237913327562 +920 36 loss.adversarial_temperature 0.8379216862397679 +920 36 negative_sampler.num_negs_per_pos 43.0 +920 36 training.batch_size 2.0 +920 37 model.embedding_dim 0.0 +920 37 model.relation_dim 2.0 +920 37 model.scoring_fct_norm 1.0 +920 37 loss.margin 20.34462414574553 +920 37 loss.adversarial_temperature 0.3490728087751339 +920 37 negative_sampler.num_negs_per_pos 71.0 +920 37 training.batch_size 0.0 +920 38 model.embedding_dim 0.0 +920 38 model.relation_dim 0.0 +920 38 model.scoring_fct_norm 2.0 +920 38 loss.margin 23.055079047958 +920 38 loss.adversarial_temperature 0.972123080178193 +920 38 negative_sampler.num_negs_per_pos 99.0 +920 38 training.batch_size 0.0 +920 39 model.embedding_dim 2.0 +920 39 model.relation_dim 0.0 +920 39 model.scoring_fct_norm 2.0 +920 39 loss.margin 19.539427098565127 +920 39 loss.adversarial_temperature 0.3218813259734247 +920 39 negative_sampler.num_negs_per_pos 10.0 +920 39 training.batch_size 0.0 +920 40 model.embedding_dim 0.0 +920 40 model.relation_dim 0.0 +920 40 model.scoring_fct_norm 2.0 +920 40 loss.margin 15.058863604289161 +920 40 loss.adversarial_temperature 0.12604703665384415 +920 40 negative_sampler.num_negs_per_pos 86.0 +920 40 training.batch_size 2.0 +920 41 model.embedding_dim 1.0 +920 41 model.relation_dim 1.0 +920 41 model.scoring_fct_norm 1.0 +920 41 loss.margin 25.319287256501877 +920 41 loss.adversarial_temperature 0.5030268286785833 +920 41 negative_sampler.num_negs_per_pos 57.0 +920 41 training.batch_size 1.0 +920 42 model.embedding_dim 2.0 +920 42 model.relation_dim 1.0 +920 42 model.scoring_fct_norm 1.0 +920 42 loss.margin 26.45537183392157 +920 42 loss.adversarial_temperature 0.48387147648527584 +920 42 negative_sampler.num_negs_per_pos 81.0 +920 42 training.batch_size 2.0 +920 43 model.embedding_dim 0.0 +920 43 model.relation_dim 0.0 +920 43 model.scoring_fct_norm 2.0 +920 43 loss.margin 20.434517429788926 +920 43 loss.adversarial_temperature 0.813112570292541 +920 43 negative_sampler.num_negs_per_pos 23.0 +920 43 training.batch_size 2.0 +920 44 model.embedding_dim 0.0 +920 44 model.relation_dim 2.0 +920 44 model.scoring_fct_norm 2.0 +920 44 loss.margin 25.308350361750133 +920 44 loss.adversarial_temperature 0.42070532994185805 +920 44 negative_sampler.num_negs_per_pos 35.0 +920 44 training.batch_size 1.0 +920 45 model.embedding_dim 1.0 +920 45 model.relation_dim 0.0 +920 45 model.scoring_fct_norm 2.0 +920 45 loss.margin 9.092751387791317 +920 45 loss.adversarial_temperature 0.243368500524302 +920 45 negative_sampler.num_negs_per_pos 62.0 +920 45 training.batch_size 2.0 +920 46 model.embedding_dim 2.0 +920 46 model.relation_dim 2.0 +920 46 model.scoring_fct_norm 1.0 +920 46 loss.margin 19.845223232245065 +920 46 loss.adversarial_temperature 0.31066531832856226 +920 46 negative_sampler.num_negs_per_pos 66.0 +920 46 training.batch_size 0.0 +920 47 model.embedding_dim 0.0 +920 47 model.relation_dim 0.0 +920 47 model.scoring_fct_norm 1.0 +920 47 loss.margin 2.046588328327353 +920 47 loss.adversarial_temperature 0.889552683581032 +920 47 negative_sampler.num_negs_per_pos 11.0 +920 47 training.batch_size 0.0 +920 48 model.embedding_dim 1.0 +920 48 model.relation_dim 1.0 +920 48 model.scoring_fct_norm 1.0 +920 48 loss.margin 14.411218823429975 +920 48 loss.adversarial_temperature 0.8069088854944592 +920 48 negative_sampler.num_negs_per_pos 97.0 +920 48 training.batch_size 1.0 +920 49 model.embedding_dim 1.0 +920 49 model.relation_dim 0.0 +920 49 model.scoring_fct_norm 1.0 +920 49 loss.margin 15.336124186461232 +920 49 loss.adversarial_temperature 0.8905621014315657 +920 49 negative_sampler.num_negs_per_pos 63.0 +920 49 training.batch_size 0.0 +920 50 model.embedding_dim 2.0 +920 50 model.relation_dim 0.0 +920 50 model.scoring_fct_norm 2.0 +920 50 loss.margin 26.894844500341215 +920 50 loss.adversarial_temperature 0.9552768552208261 +920 50 negative_sampler.num_negs_per_pos 57.0 +920 50 training.batch_size 1.0 +920 51 model.embedding_dim 1.0 +920 51 model.relation_dim 0.0 +920 51 model.scoring_fct_norm 2.0 +920 51 loss.margin 13.25005790266801 +920 51 loss.adversarial_temperature 0.1801940266350127 +920 51 negative_sampler.num_negs_per_pos 25.0 +920 51 training.batch_size 1.0 +920 52 model.embedding_dim 0.0 +920 52 model.relation_dim 1.0 +920 52 model.scoring_fct_norm 1.0 +920 52 loss.margin 13.934280062071508 +920 52 loss.adversarial_temperature 0.691748306691379 +920 52 negative_sampler.num_negs_per_pos 27.0 +920 52 training.batch_size 1.0 +920 53 model.embedding_dim 0.0 +920 53 model.relation_dim 1.0 +920 53 model.scoring_fct_norm 1.0 +920 53 loss.margin 8.945450351160797 +920 53 loss.adversarial_temperature 0.8159419115671908 +920 53 negative_sampler.num_negs_per_pos 45.0 +920 53 training.batch_size 2.0 +920 54 model.embedding_dim 0.0 +920 54 model.relation_dim 2.0 +920 54 model.scoring_fct_norm 2.0 +920 54 loss.margin 29.441472239632652 +920 54 loss.adversarial_temperature 0.1329917864884157 +920 54 negative_sampler.num_negs_per_pos 38.0 +920 54 training.batch_size 1.0 +920 55 model.embedding_dim 2.0 +920 55 model.relation_dim 1.0 +920 55 model.scoring_fct_norm 2.0 +920 55 loss.margin 25.44717717440365 +920 55 loss.adversarial_temperature 0.9758295217945232 +920 55 negative_sampler.num_negs_per_pos 56.0 +920 55 training.batch_size 2.0 +920 56 model.embedding_dim 0.0 +920 56 model.relation_dim 0.0 +920 56 model.scoring_fct_norm 1.0 +920 56 loss.margin 6.484655422557513 +920 56 loss.adversarial_temperature 0.10598330447033977 +920 56 negative_sampler.num_negs_per_pos 39.0 +920 56 training.batch_size 0.0 +920 57 model.embedding_dim 0.0 +920 57 model.relation_dim 1.0 +920 57 model.scoring_fct_norm 1.0 +920 57 loss.margin 17.26611939336328 +920 57 loss.adversarial_temperature 0.4567845720006788 +920 57 negative_sampler.num_negs_per_pos 1.0 +920 57 training.batch_size 0.0 +920 58 model.embedding_dim 2.0 +920 58 model.relation_dim 2.0 +920 58 model.scoring_fct_norm 1.0 +920 58 loss.margin 12.534189984883083 +920 58 loss.adversarial_temperature 0.9706067452109225 +920 58 negative_sampler.num_negs_per_pos 68.0 +920 58 training.batch_size 0.0 +920 59 model.embedding_dim 2.0 +920 59 model.relation_dim 0.0 +920 59 model.scoring_fct_norm 2.0 +920 59 loss.margin 17.738677496901957 +920 59 loss.adversarial_temperature 0.5244823232482374 +920 59 negative_sampler.num_negs_per_pos 25.0 +920 59 training.batch_size 2.0 +920 60 model.embedding_dim 1.0 +920 60 model.relation_dim 1.0 +920 60 model.scoring_fct_norm 2.0 +920 60 loss.margin 20.570094325816623 +920 60 loss.adversarial_temperature 0.17683459975006965 +920 60 negative_sampler.num_negs_per_pos 39.0 +920 60 training.batch_size 1.0 +920 61 model.embedding_dim 2.0 +920 61 model.relation_dim 2.0 +920 61 model.scoring_fct_norm 2.0 +920 61 loss.margin 22.984422141202458 +920 61 loss.adversarial_temperature 0.12299470514444799 +920 61 negative_sampler.num_negs_per_pos 91.0 +920 61 training.batch_size 1.0 +920 62 model.embedding_dim 1.0 +920 62 model.relation_dim 0.0 +920 62 model.scoring_fct_norm 2.0 +920 62 loss.margin 17.187006212121833 +920 62 loss.adversarial_temperature 0.5138589668297991 +920 62 negative_sampler.num_negs_per_pos 17.0 +920 62 training.batch_size 0.0 +920 63 model.embedding_dim 0.0 +920 63 model.relation_dim 0.0 +920 63 model.scoring_fct_norm 2.0 +920 63 loss.margin 1.7010736805639055 +920 63 loss.adversarial_temperature 0.6923606996987965 +920 63 negative_sampler.num_negs_per_pos 52.0 +920 63 training.batch_size 2.0 +920 64 model.embedding_dim 2.0 +920 64 model.relation_dim 0.0 +920 64 model.scoring_fct_norm 1.0 +920 64 loss.margin 11.609287446835129 +920 64 loss.adversarial_temperature 0.3276425812009761 +920 64 negative_sampler.num_negs_per_pos 91.0 +920 64 training.batch_size 1.0 +920 65 model.embedding_dim 2.0 +920 65 model.relation_dim 1.0 +920 65 model.scoring_fct_norm 1.0 +920 65 loss.margin 6.784997995415289 +920 65 loss.adversarial_temperature 0.16008153158136185 +920 65 negative_sampler.num_negs_per_pos 50.0 +920 65 training.batch_size 2.0 +920 66 model.embedding_dim 2.0 +920 66 model.relation_dim 1.0 +920 66 model.scoring_fct_norm 1.0 +920 66 loss.margin 23.159441837466716 +920 66 loss.adversarial_temperature 0.7947004989640718 +920 66 negative_sampler.num_negs_per_pos 41.0 +920 66 training.batch_size 0.0 +920 67 model.embedding_dim 1.0 +920 67 model.relation_dim 0.0 +920 67 model.scoring_fct_norm 2.0 +920 67 loss.margin 17.131053752766935 +920 67 loss.adversarial_temperature 0.19078435216945583 +920 67 negative_sampler.num_negs_per_pos 35.0 +920 67 training.batch_size 0.0 +920 68 model.embedding_dim 2.0 +920 68 model.relation_dim 0.0 +920 68 model.scoring_fct_norm 1.0 +920 68 loss.margin 6.765035134560444 +920 68 loss.adversarial_temperature 0.38499253426570834 +920 68 negative_sampler.num_negs_per_pos 58.0 +920 68 training.batch_size 0.0 +920 69 model.embedding_dim 2.0 +920 69 model.relation_dim 0.0 +920 69 model.scoring_fct_norm 2.0 +920 69 loss.margin 27.150801901913695 +920 69 loss.adversarial_temperature 0.5793789737895453 +920 69 negative_sampler.num_negs_per_pos 9.0 +920 69 training.batch_size 1.0 +920 70 model.embedding_dim 1.0 +920 70 model.relation_dim 1.0 +920 70 model.scoring_fct_norm 1.0 +920 70 loss.margin 27.420783853897696 +920 70 loss.adversarial_temperature 0.3865278538090049 +920 70 negative_sampler.num_negs_per_pos 91.0 +920 70 training.batch_size 0.0 +920 71 model.embedding_dim 2.0 +920 71 model.relation_dim 0.0 +920 71 model.scoring_fct_norm 1.0 +920 71 loss.margin 2.664318499784791 +920 71 loss.adversarial_temperature 0.5609319332458403 +920 71 negative_sampler.num_negs_per_pos 78.0 +920 71 training.batch_size 0.0 +920 72 model.embedding_dim 2.0 +920 72 model.relation_dim 2.0 +920 72 model.scoring_fct_norm 2.0 +920 72 loss.margin 26.169821627145243 +920 72 loss.adversarial_temperature 0.813872539393031 +920 72 negative_sampler.num_negs_per_pos 8.0 +920 72 training.batch_size 1.0 +920 73 model.embedding_dim 0.0 +920 73 model.relation_dim 1.0 +920 73 model.scoring_fct_norm 1.0 +920 73 loss.margin 26.490780096313767 +920 73 loss.adversarial_temperature 0.7804481419780457 +920 73 negative_sampler.num_negs_per_pos 97.0 +920 73 training.batch_size 0.0 +920 74 model.embedding_dim 0.0 +920 74 model.relation_dim 1.0 +920 74 model.scoring_fct_norm 2.0 +920 74 loss.margin 13.514544131316644 +920 74 loss.adversarial_temperature 0.9981945564135176 +920 74 negative_sampler.num_negs_per_pos 90.0 +920 74 training.batch_size 1.0 +920 75 model.embedding_dim 2.0 +920 75 model.relation_dim 0.0 +920 75 model.scoring_fct_norm 1.0 +920 75 loss.margin 12.011123937020761 +920 75 loss.adversarial_temperature 0.6788820329260153 +920 75 negative_sampler.num_negs_per_pos 2.0 +920 75 training.batch_size 0.0 +920 76 model.embedding_dim 1.0 +920 76 model.relation_dim 0.0 +920 76 model.scoring_fct_norm 2.0 +920 76 loss.margin 27.043115829437482 +920 76 loss.adversarial_temperature 0.7272359077880031 +920 76 negative_sampler.num_negs_per_pos 46.0 +920 76 training.batch_size 0.0 +920 77 model.embedding_dim 2.0 +920 77 model.relation_dim 0.0 +920 77 model.scoring_fct_norm 1.0 +920 77 loss.margin 7.4225654584584895 +920 77 loss.adversarial_temperature 0.450900710905633 +920 77 negative_sampler.num_negs_per_pos 82.0 +920 77 training.batch_size 1.0 +920 78 model.embedding_dim 0.0 +920 78 model.relation_dim 0.0 +920 78 model.scoring_fct_norm 1.0 +920 78 loss.margin 17.903346005376946 +920 78 loss.adversarial_temperature 0.4747165588769282 +920 78 negative_sampler.num_negs_per_pos 65.0 +920 78 training.batch_size 2.0 +920 79 model.embedding_dim 0.0 +920 79 model.relation_dim 0.0 +920 79 model.scoring_fct_norm 2.0 +920 79 loss.margin 4.4545757015966405 +920 79 loss.adversarial_temperature 0.922210651808937 +920 79 negative_sampler.num_negs_per_pos 36.0 +920 79 training.batch_size 0.0 +920 80 model.embedding_dim 0.0 +920 80 model.relation_dim 1.0 +920 80 model.scoring_fct_norm 2.0 +920 80 loss.margin 11.609039989074896 +920 80 loss.adversarial_temperature 0.21617170315000228 +920 80 negative_sampler.num_negs_per_pos 16.0 +920 80 training.batch_size 1.0 +920 81 model.embedding_dim 0.0 +920 81 model.relation_dim 0.0 +920 81 model.scoring_fct_norm 1.0 +920 81 loss.margin 21.827740325662177 +920 81 loss.adversarial_temperature 0.8668071352152041 +920 81 negative_sampler.num_negs_per_pos 9.0 +920 81 training.batch_size 2.0 +920 82 model.embedding_dim 2.0 +920 82 model.relation_dim 0.0 +920 82 model.scoring_fct_norm 2.0 +920 82 loss.margin 19.949420038552336 +920 82 loss.adversarial_temperature 0.25457267417625895 +920 82 negative_sampler.num_negs_per_pos 8.0 +920 82 training.batch_size 0.0 +920 83 model.embedding_dim 1.0 +920 83 model.relation_dim 0.0 +920 83 model.scoring_fct_norm 1.0 +920 83 loss.margin 16.14095892512941 +920 83 loss.adversarial_temperature 0.8264555068678233 +920 83 negative_sampler.num_negs_per_pos 46.0 +920 83 training.batch_size 1.0 +920 84 model.embedding_dim 1.0 +920 84 model.relation_dim 1.0 +920 84 model.scoring_fct_norm 2.0 +920 84 loss.margin 10.7539450012292 +920 84 loss.adversarial_temperature 0.3275897144190474 +920 84 negative_sampler.num_negs_per_pos 55.0 +920 84 training.batch_size 1.0 +920 85 model.embedding_dim 0.0 +920 85 model.relation_dim 1.0 +920 85 model.scoring_fct_norm 1.0 +920 85 loss.margin 18.68135895588943 +920 85 loss.adversarial_temperature 0.8756938983763722 +920 85 negative_sampler.num_negs_per_pos 73.0 +920 85 training.batch_size 1.0 +920 86 model.embedding_dim 0.0 +920 86 model.relation_dim 1.0 +920 86 model.scoring_fct_norm 1.0 +920 86 loss.margin 17.677924186794858 +920 86 loss.adversarial_temperature 0.23404577921191846 +920 86 negative_sampler.num_negs_per_pos 28.0 +920 86 training.batch_size 1.0 +920 87 model.embedding_dim 1.0 +920 87 model.relation_dim 2.0 +920 87 model.scoring_fct_norm 1.0 +920 87 loss.margin 28.71240532573132 +920 87 loss.adversarial_temperature 0.27419474227578156 +920 87 negative_sampler.num_negs_per_pos 95.0 +920 87 training.batch_size 2.0 +920 88 model.embedding_dim 0.0 +920 88 model.relation_dim 1.0 +920 88 model.scoring_fct_norm 2.0 +920 88 loss.margin 13.311134381401338 +920 88 loss.adversarial_temperature 0.9350383909950787 +920 88 negative_sampler.num_negs_per_pos 63.0 +920 88 training.batch_size 1.0 +920 89 model.embedding_dim 0.0 +920 89 model.relation_dim 1.0 +920 89 model.scoring_fct_norm 1.0 +920 89 loss.margin 1.380848179795814 +920 89 loss.adversarial_temperature 0.8241044858052924 +920 89 negative_sampler.num_negs_per_pos 95.0 +920 89 training.batch_size 2.0 +920 90 model.embedding_dim 2.0 +920 90 model.relation_dim 1.0 +920 90 model.scoring_fct_norm 1.0 +920 90 loss.margin 24.102021510701285 +920 90 loss.adversarial_temperature 0.6017937195282935 +920 90 negative_sampler.num_negs_per_pos 61.0 +920 90 training.batch_size 1.0 +920 91 model.embedding_dim 1.0 +920 91 model.relation_dim 2.0 +920 91 model.scoring_fct_norm 2.0 +920 91 loss.margin 5.697997663696188 +920 91 loss.adversarial_temperature 0.7949140664999403 +920 91 negative_sampler.num_negs_per_pos 30.0 +920 91 training.batch_size 2.0 +920 92 model.embedding_dim 1.0 +920 92 model.relation_dim 0.0 +920 92 model.scoring_fct_norm 2.0 +920 92 loss.margin 29.8262826134688 +920 92 loss.adversarial_temperature 0.27734778731827586 +920 92 negative_sampler.num_negs_per_pos 48.0 +920 92 training.batch_size 2.0 +920 93 model.embedding_dim 2.0 +920 93 model.relation_dim 0.0 +920 93 model.scoring_fct_norm 1.0 +920 93 loss.margin 5.2212627806512995 +920 93 loss.adversarial_temperature 0.5072312410572732 +920 93 negative_sampler.num_negs_per_pos 78.0 +920 93 training.batch_size 2.0 +920 94 model.embedding_dim 1.0 +920 94 model.relation_dim 1.0 +920 94 model.scoring_fct_norm 1.0 +920 94 loss.margin 9.79925628748241 +920 94 loss.adversarial_temperature 0.6118885963546479 +920 94 negative_sampler.num_negs_per_pos 56.0 +920 94 training.batch_size 1.0 +920 95 model.embedding_dim 2.0 +920 95 model.relation_dim 2.0 +920 95 model.scoring_fct_norm 2.0 +920 95 loss.margin 21.756001821768688 +920 95 loss.adversarial_temperature 0.610587707471323 +920 95 negative_sampler.num_negs_per_pos 55.0 +920 95 training.batch_size 1.0 +920 96 model.embedding_dim 1.0 +920 96 model.relation_dim 2.0 +920 96 model.scoring_fct_norm 1.0 +920 96 loss.margin 21.24502832302002 +920 96 loss.adversarial_temperature 0.3220752955385948 +920 96 negative_sampler.num_negs_per_pos 96.0 +920 96 training.batch_size 2.0 +920 97 model.embedding_dim 2.0 +920 97 model.relation_dim 1.0 +920 97 model.scoring_fct_norm 2.0 +920 97 loss.margin 19.1536870757418 +920 97 loss.adversarial_temperature 0.2539323286817214 +920 97 negative_sampler.num_negs_per_pos 94.0 +920 97 training.batch_size 1.0 +920 98 model.embedding_dim 2.0 +920 98 model.relation_dim 1.0 +920 98 model.scoring_fct_norm 2.0 +920 98 loss.margin 8.757291121953186 +920 98 loss.adversarial_temperature 0.8087986445302346 +920 98 negative_sampler.num_negs_per_pos 19.0 +920 98 training.batch_size 0.0 +920 99 model.embedding_dim 1.0 +920 99 model.relation_dim 0.0 +920 99 model.scoring_fct_norm 2.0 +920 99 loss.margin 19.44874284615795 +920 99 loss.adversarial_temperature 0.23327510891823475 +920 99 negative_sampler.num_negs_per_pos 59.0 +920 99 training.batch_size 0.0 +920 100 model.embedding_dim 0.0 +920 100 model.relation_dim 1.0 +920 100 model.scoring_fct_norm 1.0 +920 100 loss.margin 17.391786953707832 +920 100 loss.adversarial_temperature 0.5642023459396729 +920 100 negative_sampler.num_negs_per_pos 70.0 +920 100 training.batch_size 2.0 +920 1 dataset """kinships""" +920 1 model """transr""" +920 1 loss """nssa""" +920 1 regularizer """no""" +920 1 optimizer """adadelta""" +920 1 training_loop """owa""" +920 1 negative_sampler """basic""" +920 1 evaluator """rankbased""" +920 2 dataset """kinships""" +920 2 model """transr""" +920 2 loss """nssa""" +920 2 regularizer """no""" +920 2 optimizer """adadelta""" +920 2 training_loop """owa""" +920 2 negative_sampler """basic""" +920 2 evaluator """rankbased""" +920 3 dataset """kinships""" +920 3 model """transr""" +920 3 loss """nssa""" +920 3 regularizer """no""" +920 3 optimizer """adadelta""" +920 3 training_loop """owa""" +920 3 negative_sampler """basic""" +920 3 evaluator """rankbased""" +920 4 dataset """kinships""" +920 4 model """transr""" +920 4 loss """nssa""" +920 4 regularizer """no""" +920 4 optimizer """adadelta""" +920 4 training_loop """owa""" +920 4 negative_sampler """basic""" +920 4 evaluator """rankbased""" +920 5 dataset """kinships""" +920 5 model """transr""" +920 5 loss """nssa""" +920 5 regularizer """no""" +920 5 optimizer """adadelta""" +920 5 training_loop """owa""" +920 5 negative_sampler """basic""" +920 5 evaluator """rankbased""" +920 6 dataset """kinships""" +920 6 model """transr""" +920 6 loss """nssa""" +920 6 regularizer """no""" +920 6 optimizer """adadelta""" +920 6 training_loop """owa""" +920 6 negative_sampler """basic""" +920 6 evaluator """rankbased""" +920 7 dataset """kinships""" +920 7 model """transr""" +920 7 loss """nssa""" +920 7 regularizer """no""" +920 7 optimizer """adadelta""" +920 7 training_loop """owa""" +920 7 negative_sampler """basic""" +920 7 evaluator """rankbased""" +920 8 dataset """kinships""" +920 8 model """transr""" +920 8 loss """nssa""" +920 8 regularizer """no""" +920 8 optimizer """adadelta""" +920 8 training_loop """owa""" +920 8 negative_sampler """basic""" +920 8 evaluator """rankbased""" +920 9 dataset """kinships""" +920 9 model """transr""" +920 9 loss """nssa""" +920 9 regularizer """no""" +920 9 optimizer """adadelta""" +920 9 training_loop """owa""" +920 9 negative_sampler """basic""" +920 9 evaluator """rankbased""" +920 10 dataset """kinships""" +920 10 model """transr""" +920 10 loss """nssa""" +920 10 regularizer """no""" +920 10 optimizer """adadelta""" +920 10 training_loop """owa""" +920 10 negative_sampler """basic""" +920 10 evaluator """rankbased""" +920 11 dataset """kinships""" +920 11 model """transr""" +920 11 loss """nssa""" +920 11 regularizer """no""" +920 11 optimizer """adadelta""" +920 11 training_loop """owa""" +920 11 negative_sampler """basic""" +920 11 evaluator """rankbased""" +920 12 dataset """kinships""" +920 12 model """transr""" +920 12 loss """nssa""" +920 12 regularizer """no""" +920 12 optimizer """adadelta""" +920 12 training_loop """owa""" +920 12 negative_sampler """basic""" +920 12 evaluator """rankbased""" +920 13 dataset """kinships""" +920 13 model """transr""" +920 13 loss """nssa""" +920 13 regularizer """no""" +920 13 optimizer """adadelta""" +920 13 training_loop """owa""" +920 13 negative_sampler """basic""" +920 13 evaluator """rankbased""" +920 14 dataset """kinships""" +920 14 model """transr""" +920 14 loss """nssa""" +920 14 regularizer """no""" +920 14 optimizer """adadelta""" +920 14 training_loop """owa""" +920 14 negative_sampler """basic""" +920 14 evaluator """rankbased""" +920 15 dataset """kinships""" +920 15 model """transr""" +920 15 loss """nssa""" +920 15 regularizer """no""" +920 15 optimizer """adadelta""" +920 15 training_loop """owa""" +920 15 negative_sampler """basic""" +920 15 evaluator """rankbased""" +920 16 dataset """kinships""" +920 16 model """transr""" +920 16 loss """nssa""" +920 16 regularizer """no""" +920 16 optimizer """adadelta""" +920 16 training_loop """owa""" +920 16 negative_sampler """basic""" +920 16 evaluator """rankbased""" +920 17 dataset """kinships""" +920 17 model """transr""" +920 17 loss """nssa""" +920 17 regularizer """no""" +920 17 optimizer """adadelta""" +920 17 training_loop """owa""" +920 17 negative_sampler """basic""" +920 17 evaluator """rankbased""" +920 18 dataset """kinships""" +920 18 model """transr""" +920 18 loss """nssa""" +920 18 regularizer """no""" +920 18 optimizer """adadelta""" +920 18 training_loop """owa""" +920 18 negative_sampler """basic""" +920 18 evaluator """rankbased""" +920 19 dataset """kinships""" +920 19 model """transr""" +920 19 loss """nssa""" +920 19 regularizer """no""" +920 19 optimizer """adadelta""" +920 19 training_loop """owa""" +920 19 negative_sampler """basic""" +920 19 evaluator """rankbased""" +920 20 dataset """kinships""" +920 20 model """transr""" +920 20 loss """nssa""" +920 20 regularizer """no""" +920 20 optimizer """adadelta""" +920 20 training_loop """owa""" +920 20 negative_sampler """basic""" +920 20 evaluator """rankbased""" +920 21 dataset """kinships""" +920 21 model """transr""" +920 21 loss """nssa""" +920 21 regularizer """no""" +920 21 optimizer """adadelta""" +920 21 training_loop """owa""" +920 21 negative_sampler """basic""" +920 21 evaluator """rankbased""" +920 22 dataset """kinships""" +920 22 model """transr""" +920 22 loss """nssa""" +920 22 regularizer """no""" +920 22 optimizer """adadelta""" +920 22 training_loop """owa""" +920 22 negative_sampler """basic""" +920 22 evaluator """rankbased""" +920 23 dataset """kinships""" +920 23 model """transr""" +920 23 loss """nssa""" +920 23 regularizer """no""" +920 23 optimizer """adadelta""" +920 23 training_loop """owa""" +920 23 negative_sampler """basic""" +920 23 evaluator """rankbased""" +920 24 dataset """kinships""" +920 24 model """transr""" +920 24 loss """nssa""" +920 24 regularizer """no""" +920 24 optimizer """adadelta""" +920 24 training_loop """owa""" +920 24 negative_sampler """basic""" +920 24 evaluator """rankbased""" +920 25 dataset """kinships""" +920 25 model """transr""" +920 25 loss """nssa""" +920 25 regularizer """no""" +920 25 optimizer """adadelta""" +920 25 training_loop """owa""" +920 25 negative_sampler """basic""" +920 25 evaluator """rankbased""" +920 26 dataset """kinships""" +920 26 model """transr""" +920 26 loss """nssa""" +920 26 regularizer """no""" +920 26 optimizer """adadelta""" +920 26 training_loop """owa""" +920 26 negative_sampler """basic""" +920 26 evaluator """rankbased""" +920 27 dataset """kinships""" +920 27 model """transr""" +920 27 loss """nssa""" +920 27 regularizer """no""" +920 27 optimizer """adadelta""" +920 27 training_loop """owa""" +920 27 negative_sampler """basic""" +920 27 evaluator """rankbased""" +920 28 dataset """kinships""" +920 28 model """transr""" +920 28 loss """nssa""" +920 28 regularizer """no""" +920 28 optimizer """adadelta""" +920 28 training_loop """owa""" +920 28 negative_sampler """basic""" +920 28 evaluator """rankbased""" +920 29 dataset """kinships""" +920 29 model """transr""" +920 29 loss """nssa""" +920 29 regularizer """no""" +920 29 optimizer """adadelta""" +920 29 training_loop """owa""" +920 29 negative_sampler """basic""" +920 29 evaluator """rankbased""" +920 30 dataset """kinships""" +920 30 model """transr""" +920 30 loss """nssa""" +920 30 regularizer """no""" +920 30 optimizer """adadelta""" +920 30 training_loop """owa""" +920 30 negative_sampler """basic""" +920 30 evaluator """rankbased""" +920 31 dataset """kinships""" +920 31 model """transr""" +920 31 loss """nssa""" +920 31 regularizer """no""" +920 31 optimizer """adadelta""" +920 31 training_loop """owa""" +920 31 negative_sampler """basic""" +920 31 evaluator """rankbased""" +920 32 dataset """kinships""" +920 32 model """transr""" +920 32 loss """nssa""" +920 32 regularizer """no""" +920 32 optimizer """adadelta""" +920 32 training_loop """owa""" +920 32 negative_sampler """basic""" +920 32 evaluator """rankbased""" +920 33 dataset """kinships""" +920 33 model """transr""" +920 33 loss """nssa""" +920 33 regularizer """no""" +920 33 optimizer """adadelta""" +920 33 training_loop """owa""" +920 33 negative_sampler """basic""" +920 33 evaluator """rankbased""" +920 34 dataset """kinships""" +920 34 model """transr""" +920 34 loss """nssa""" +920 34 regularizer """no""" +920 34 optimizer """adadelta""" +920 34 training_loop """owa""" +920 34 negative_sampler """basic""" +920 34 evaluator """rankbased""" +920 35 dataset """kinships""" +920 35 model """transr""" +920 35 loss """nssa""" +920 35 regularizer """no""" +920 35 optimizer """adadelta""" +920 35 training_loop """owa""" +920 35 negative_sampler """basic""" +920 35 evaluator """rankbased""" +920 36 dataset """kinships""" +920 36 model """transr""" +920 36 loss """nssa""" +920 36 regularizer """no""" +920 36 optimizer """adadelta""" +920 36 training_loop """owa""" +920 36 negative_sampler """basic""" +920 36 evaluator """rankbased""" +920 37 dataset """kinships""" +920 37 model """transr""" +920 37 loss """nssa""" +920 37 regularizer """no""" +920 37 optimizer """adadelta""" +920 37 training_loop """owa""" +920 37 negative_sampler """basic""" +920 37 evaluator """rankbased""" +920 38 dataset """kinships""" +920 38 model """transr""" +920 38 loss """nssa""" +920 38 regularizer """no""" +920 38 optimizer """adadelta""" +920 38 training_loop """owa""" +920 38 negative_sampler """basic""" +920 38 evaluator """rankbased""" +920 39 dataset """kinships""" +920 39 model """transr""" +920 39 loss """nssa""" +920 39 regularizer """no""" +920 39 optimizer """adadelta""" +920 39 training_loop """owa""" +920 39 negative_sampler """basic""" +920 39 evaluator """rankbased""" +920 40 dataset """kinships""" +920 40 model """transr""" +920 40 loss """nssa""" +920 40 regularizer """no""" +920 40 optimizer """adadelta""" +920 40 training_loop """owa""" +920 40 negative_sampler """basic""" +920 40 evaluator """rankbased""" +920 41 dataset """kinships""" +920 41 model """transr""" +920 41 loss """nssa""" +920 41 regularizer """no""" +920 41 optimizer """adadelta""" +920 41 training_loop """owa""" +920 41 negative_sampler """basic""" +920 41 evaluator """rankbased""" +920 42 dataset """kinships""" +920 42 model """transr""" +920 42 loss """nssa""" +920 42 regularizer """no""" +920 42 optimizer """adadelta""" +920 42 training_loop """owa""" +920 42 negative_sampler """basic""" +920 42 evaluator """rankbased""" +920 43 dataset """kinships""" +920 43 model """transr""" +920 43 loss """nssa""" +920 43 regularizer """no""" +920 43 optimizer """adadelta""" +920 43 training_loop """owa""" +920 43 negative_sampler """basic""" +920 43 evaluator """rankbased""" +920 44 dataset """kinships""" +920 44 model """transr""" +920 44 loss """nssa""" +920 44 regularizer """no""" +920 44 optimizer """adadelta""" +920 44 training_loop """owa""" +920 44 negative_sampler """basic""" +920 44 evaluator """rankbased""" +920 45 dataset """kinships""" +920 45 model """transr""" +920 45 loss """nssa""" +920 45 regularizer """no""" +920 45 optimizer """adadelta""" +920 45 training_loop """owa""" +920 45 negative_sampler """basic""" +920 45 evaluator """rankbased""" +920 46 dataset """kinships""" +920 46 model """transr""" +920 46 loss """nssa""" +920 46 regularizer """no""" +920 46 optimizer """adadelta""" +920 46 training_loop """owa""" +920 46 negative_sampler """basic""" +920 46 evaluator """rankbased""" +920 47 dataset """kinships""" +920 47 model """transr""" +920 47 loss """nssa""" +920 47 regularizer """no""" +920 47 optimizer """adadelta""" +920 47 training_loop """owa""" +920 47 negative_sampler """basic""" +920 47 evaluator """rankbased""" +920 48 dataset """kinships""" +920 48 model """transr""" +920 48 loss """nssa""" +920 48 regularizer """no""" +920 48 optimizer """adadelta""" +920 48 training_loop """owa""" +920 48 negative_sampler """basic""" +920 48 evaluator """rankbased""" +920 49 dataset """kinships""" +920 49 model """transr""" +920 49 loss """nssa""" +920 49 regularizer """no""" +920 49 optimizer """adadelta""" +920 49 training_loop """owa""" +920 49 negative_sampler """basic""" +920 49 evaluator """rankbased""" +920 50 dataset """kinships""" +920 50 model """transr""" +920 50 loss """nssa""" +920 50 regularizer """no""" +920 50 optimizer """adadelta""" +920 50 training_loop """owa""" +920 50 negative_sampler """basic""" +920 50 evaluator """rankbased""" +920 51 dataset """kinships""" +920 51 model """transr""" +920 51 loss """nssa""" +920 51 regularizer """no""" +920 51 optimizer """adadelta""" +920 51 training_loop """owa""" +920 51 negative_sampler """basic""" +920 51 evaluator """rankbased""" +920 52 dataset """kinships""" +920 52 model """transr""" +920 52 loss """nssa""" +920 52 regularizer """no""" +920 52 optimizer """adadelta""" +920 52 training_loop """owa""" +920 52 negative_sampler """basic""" +920 52 evaluator """rankbased""" +920 53 dataset """kinships""" +920 53 model """transr""" +920 53 loss """nssa""" +920 53 regularizer """no""" +920 53 optimizer """adadelta""" +920 53 training_loop """owa""" +920 53 negative_sampler """basic""" +920 53 evaluator """rankbased""" +920 54 dataset """kinships""" +920 54 model """transr""" +920 54 loss """nssa""" +920 54 regularizer """no""" +920 54 optimizer """adadelta""" +920 54 training_loop """owa""" +920 54 negative_sampler """basic""" +920 54 evaluator """rankbased""" +920 55 dataset """kinships""" +920 55 model """transr""" +920 55 loss """nssa""" +920 55 regularizer """no""" +920 55 optimizer """adadelta""" +920 55 training_loop """owa""" +920 55 negative_sampler """basic""" +920 55 evaluator """rankbased""" +920 56 dataset """kinships""" +920 56 model """transr""" +920 56 loss """nssa""" +920 56 regularizer """no""" +920 56 optimizer """adadelta""" +920 56 training_loop """owa""" +920 56 negative_sampler """basic""" +920 56 evaluator """rankbased""" +920 57 dataset """kinships""" +920 57 model """transr""" +920 57 loss """nssa""" +920 57 regularizer """no""" +920 57 optimizer """adadelta""" +920 57 training_loop """owa""" +920 57 negative_sampler """basic""" +920 57 evaluator """rankbased""" +920 58 dataset """kinships""" +920 58 model """transr""" +920 58 loss """nssa""" +920 58 regularizer """no""" +920 58 optimizer """adadelta""" +920 58 training_loop """owa""" +920 58 negative_sampler """basic""" +920 58 evaluator """rankbased""" +920 59 dataset """kinships""" +920 59 model """transr""" +920 59 loss """nssa""" +920 59 regularizer """no""" +920 59 optimizer """adadelta""" +920 59 training_loop """owa""" +920 59 negative_sampler """basic""" +920 59 evaluator """rankbased""" +920 60 dataset """kinships""" +920 60 model """transr""" +920 60 loss """nssa""" +920 60 regularizer """no""" +920 60 optimizer """adadelta""" +920 60 training_loop """owa""" +920 60 negative_sampler """basic""" +920 60 evaluator """rankbased""" +920 61 dataset """kinships""" +920 61 model """transr""" +920 61 loss """nssa""" +920 61 regularizer """no""" +920 61 optimizer """adadelta""" +920 61 training_loop """owa""" +920 61 negative_sampler """basic""" +920 61 evaluator """rankbased""" +920 62 dataset """kinships""" +920 62 model """transr""" +920 62 loss """nssa""" +920 62 regularizer """no""" +920 62 optimizer """adadelta""" +920 62 training_loop """owa""" +920 62 negative_sampler """basic""" +920 62 evaluator """rankbased""" +920 63 dataset """kinships""" +920 63 model """transr""" +920 63 loss """nssa""" +920 63 regularizer """no""" +920 63 optimizer """adadelta""" +920 63 training_loop """owa""" +920 63 negative_sampler """basic""" +920 63 evaluator """rankbased""" +920 64 dataset """kinships""" +920 64 model """transr""" +920 64 loss """nssa""" +920 64 regularizer """no""" +920 64 optimizer """adadelta""" +920 64 training_loop """owa""" +920 64 negative_sampler """basic""" +920 64 evaluator """rankbased""" +920 65 dataset """kinships""" +920 65 model """transr""" +920 65 loss """nssa""" +920 65 regularizer """no""" +920 65 optimizer """adadelta""" +920 65 training_loop """owa""" +920 65 negative_sampler """basic""" +920 65 evaluator """rankbased""" +920 66 dataset """kinships""" +920 66 model """transr""" +920 66 loss """nssa""" +920 66 regularizer """no""" +920 66 optimizer """adadelta""" +920 66 training_loop """owa""" +920 66 negative_sampler """basic""" +920 66 evaluator """rankbased""" +920 67 dataset """kinships""" +920 67 model """transr""" +920 67 loss """nssa""" +920 67 regularizer """no""" +920 67 optimizer """adadelta""" +920 67 training_loop """owa""" +920 67 negative_sampler """basic""" +920 67 evaluator """rankbased""" +920 68 dataset """kinships""" +920 68 model """transr""" +920 68 loss """nssa""" +920 68 regularizer """no""" +920 68 optimizer """adadelta""" +920 68 training_loop """owa""" +920 68 negative_sampler """basic""" +920 68 evaluator """rankbased""" +920 69 dataset """kinships""" +920 69 model """transr""" +920 69 loss """nssa""" +920 69 regularizer """no""" +920 69 optimizer """adadelta""" +920 69 training_loop """owa""" +920 69 negative_sampler """basic""" +920 69 evaluator """rankbased""" +920 70 dataset """kinships""" +920 70 model """transr""" +920 70 loss """nssa""" +920 70 regularizer """no""" +920 70 optimizer """adadelta""" +920 70 training_loop """owa""" +920 70 negative_sampler """basic""" +920 70 evaluator """rankbased""" +920 71 dataset """kinships""" +920 71 model """transr""" +920 71 loss """nssa""" +920 71 regularizer """no""" +920 71 optimizer """adadelta""" +920 71 training_loop """owa""" +920 71 negative_sampler """basic""" +920 71 evaluator """rankbased""" +920 72 dataset """kinships""" +920 72 model """transr""" +920 72 loss """nssa""" +920 72 regularizer """no""" +920 72 optimizer """adadelta""" +920 72 training_loop """owa""" +920 72 negative_sampler """basic""" +920 72 evaluator """rankbased""" +920 73 dataset """kinships""" +920 73 model """transr""" +920 73 loss """nssa""" +920 73 regularizer """no""" +920 73 optimizer """adadelta""" +920 73 training_loop """owa""" +920 73 negative_sampler """basic""" +920 73 evaluator """rankbased""" +920 74 dataset """kinships""" +920 74 model """transr""" +920 74 loss """nssa""" +920 74 regularizer """no""" +920 74 optimizer """adadelta""" +920 74 training_loop """owa""" +920 74 negative_sampler """basic""" +920 74 evaluator """rankbased""" +920 75 dataset """kinships""" +920 75 model """transr""" +920 75 loss """nssa""" +920 75 regularizer """no""" +920 75 optimizer """adadelta""" +920 75 training_loop """owa""" +920 75 negative_sampler """basic""" +920 75 evaluator """rankbased""" +920 76 dataset """kinships""" +920 76 model """transr""" +920 76 loss """nssa""" +920 76 regularizer """no""" +920 76 optimizer """adadelta""" +920 76 training_loop """owa""" +920 76 negative_sampler """basic""" +920 76 evaluator """rankbased""" +920 77 dataset """kinships""" +920 77 model """transr""" +920 77 loss """nssa""" +920 77 regularizer """no""" +920 77 optimizer """adadelta""" +920 77 training_loop """owa""" +920 77 negative_sampler """basic""" +920 77 evaluator """rankbased""" +920 78 dataset """kinships""" +920 78 model """transr""" +920 78 loss """nssa""" +920 78 regularizer """no""" +920 78 optimizer """adadelta""" +920 78 training_loop """owa""" +920 78 negative_sampler """basic""" +920 78 evaluator """rankbased""" +920 79 dataset """kinships""" +920 79 model """transr""" +920 79 loss """nssa""" +920 79 regularizer """no""" +920 79 optimizer """adadelta""" +920 79 training_loop """owa""" +920 79 negative_sampler """basic""" +920 79 evaluator """rankbased""" +920 80 dataset """kinships""" +920 80 model """transr""" +920 80 loss """nssa""" +920 80 regularizer """no""" +920 80 optimizer """adadelta""" +920 80 training_loop """owa""" +920 80 negative_sampler """basic""" +920 80 evaluator """rankbased""" +920 81 dataset """kinships""" +920 81 model """transr""" +920 81 loss """nssa""" +920 81 regularizer """no""" +920 81 optimizer """adadelta""" +920 81 training_loop """owa""" +920 81 negative_sampler """basic""" +920 81 evaluator """rankbased""" +920 82 dataset """kinships""" +920 82 model """transr""" +920 82 loss """nssa""" +920 82 regularizer """no""" +920 82 optimizer """adadelta""" +920 82 training_loop """owa""" +920 82 negative_sampler """basic""" +920 82 evaluator """rankbased""" +920 83 dataset """kinships""" +920 83 model """transr""" +920 83 loss """nssa""" +920 83 regularizer """no""" +920 83 optimizer """adadelta""" +920 83 training_loop """owa""" +920 83 negative_sampler """basic""" +920 83 evaluator """rankbased""" +920 84 dataset """kinships""" +920 84 model """transr""" +920 84 loss """nssa""" +920 84 regularizer """no""" +920 84 optimizer """adadelta""" +920 84 training_loop """owa""" +920 84 negative_sampler """basic""" +920 84 evaluator """rankbased""" +920 85 dataset """kinships""" +920 85 model """transr""" +920 85 loss """nssa""" +920 85 regularizer """no""" +920 85 optimizer """adadelta""" +920 85 training_loop """owa""" +920 85 negative_sampler """basic""" +920 85 evaluator """rankbased""" +920 86 dataset """kinships""" +920 86 model """transr""" +920 86 loss """nssa""" +920 86 regularizer """no""" +920 86 optimizer """adadelta""" +920 86 training_loop """owa""" +920 86 negative_sampler """basic""" +920 86 evaluator """rankbased""" +920 87 dataset """kinships""" +920 87 model """transr""" +920 87 loss """nssa""" +920 87 regularizer """no""" +920 87 optimizer """adadelta""" +920 87 training_loop """owa""" +920 87 negative_sampler """basic""" +920 87 evaluator """rankbased""" +920 88 dataset """kinships""" +920 88 model """transr""" +920 88 loss """nssa""" +920 88 regularizer """no""" +920 88 optimizer """adadelta""" +920 88 training_loop """owa""" +920 88 negative_sampler """basic""" +920 88 evaluator """rankbased""" +920 89 dataset """kinships""" +920 89 model """transr""" +920 89 loss """nssa""" +920 89 regularizer """no""" +920 89 optimizer """adadelta""" +920 89 training_loop """owa""" +920 89 negative_sampler """basic""" +920 89 evaluator """rankbased""" +920 90 dataset """kinships""" +920 90 model """transr""" +920 90 loss """nssa""" +920 90 regularizer """no""" +920 90 optimizer """adadelta""" +920 90 training_loop """owa""" +920 90 negative_sampler """basic""" +920 90 evaluator """rankbased""" +920 91 dataset """kinships""" +920 91 model """transr""" +920 91 loss """nssa""" +920 91 regularizer """no""" +920 91 optimizer """adadelta""" +920 91 training_loop """owa""" +920 91 negative_sampler """basic""" +920 91 evaluator """rankbased""" +920 92 dataset """kinships""" +920 92 model """transr""" +920 92 loss """nssa""" +920 92 regularizer """no""" +920 92 optimizer """adadelta""" +920 92 training_loop """owa""" +920 92 negative_sampler """basic""" +920 92 evaluator """rankbased""" +920 93 dataset """kinships""" +920 93 model """transr""" +920 93 loss """nssa""" +920 93 regularizer """no""" +920 93 optimizer """adadelta""" +920 93 training_loop """owa""" +920 93 negative_sampler """basic""" +920 93 evaluator """rankbased""" +920 94 dataset """kinships""" +920 94 model """transr""" +920 94 loss """nssa""" +920 94 regularizer """no""" +920 94 optimizer """adadelta""" +920 94 training_loop """owa""" +920 94 negative_sampler """basic""" +920 94 evaluator """rankbased""" +920 95 dataset """kinships""" +920 95 model """transr""" +920 95 loss """nssa""" +920 95 regularizer """no""" +920 95 optimizer """adadelta""" +920 95 training_loop """owa""" +920 95 negative_sampler """basic""" +920 95 evaluator """rankbased""" +920 96 dataset """kinships""" +920 96 model """transr""" +920 96 loss """nssa""" +920 96 regularizer """no""" +920 96 optimizer """adadelta""" +920 96 training_loop """owa""" +920 96 negative_sampler """basic""" +920 96 evaluator """rankbased""" +920 97 dataset """kinships""" +920 97 model """transr""" +920 97 loss """nssa""" +920 97 regularizer """no""" +920 97 optimizer """adadelta""" +920 97 training_loop """owa""" +920 97 negative_sampler """basic""" +920 97 evaluator """rankbased""" +920 98 dataset """kinships""" +920 98 model """transr""" +920 98 loss """nssa""" +920 98 regularizer """no""" +920 98 optimizer """adadelta""" +920 98 training_loop """owa""" +920 98 negative_sampler """basic""" +920 98 evaluator """rankbased""" +920 99 dataset """kinships""" +920 99 model """transr""" +920 99 loss """nssa""" +920 99 regularizer """no""" +920 99 optimizer """adadelta""" +920 99 training_loop """owa""" +920 99 negative_sampler """basic""" +920 99 evaluator """rankbased""" +920 100 dataset """kinships""" +920 100 model """transr""" +920 100 loss """nssa""" +920 100 regularizer """no""" +920 100 optimizer """adadelta""" +920 100 training_loop """owa""" +920 100 negative_sampler """basic""" +920 100 evaluator """rankbased""" +921 1 model.embedding_dim 2.0 +921 1 model.relation_dim 0.0 +921 1 model.scoring_fct_norm 1.0 +921 1 optimizer.lr 0.0011567080337539794 +921 1 training.batch_size 2.0 +921 1 training.label_smoothing 0.14744219064157635 +921 2 model.embedding_dim 2.0 +921 2 model.relation_dim 2.0 +921 2 model.scoring_fct_norm 1.0 +921 2 optimizer.lr 0.0037642133840224434 +921 2 training.batch_size 2.0 +921 2 training.label_smoothing 0.8689750886303953 +921 3 model.embedding_dim 1.0 +921 3 model.relation_dim 1.0 +921 3 model.scoring_fct_norm 1.0 +921 3 optimizer.lr 0.019570312895755202 +921 3 training.batch_size 1.0 +921 3 training.label_smoothing 0.03147534150277298 +921 4 model.embedding_dim 2.0 +921 4 model.relation_dim 2.0 +921 4 model.scoring_fct_norm 1.0 +921 4 optimizer.lr 0.009538068143901568 +921 4 training.batch_size 1.0 +921 4 training.label_smoothing 0.17052169513394336 +921 5 model.embedding_dim 1.0 +921 5 model.relation_dim 1.0 +921 5 model.scoring_fct_norm 1.0 +921 5 optimizer.lr 0.03339919185874197 +921 5 training.batch_size 2.0 +921 5 training.label_smoothing 0.07516789417714624 +921 6 model.embedding_dim 0.0 +921 6 model.relation_dim 0.0 +921 6 model.scoring_fct_norm 2.0 +921 6 optimizer.lr 0.030900013959560057 +921 6 training.batch_size 1.0 +921 6 training.label_smoothing 0.0046984077706897455 +921 7 model.embedding_dim 0.0 +921 7 model.relation_dim 1.0 +921 7 model.scoring_fct_norm 2.0 +921 7 optimizer.lr 0.0011172459381901944 +921 7 training.batch_size 1.0 +921 7 training.label_smoothing 0.02566339008956478 +921 8 model.embedding_dim 1.0 +921 8 model.relation_dim 2.0 +921 8 model.scoring_fct_norm 1.0 +921 8 optimizer.lr 0.07141881018699266 +921 8 training.batch_size 2.0 +921 8 training.label_smoothing 0.04204021568215552 +921 9 model.embedding_dim 0.0 +921 9 model.relation_dim 1.0 +921 9 model.scoring_fct_norm 1.0 +921 9 optimizer.lr 0.0049553006416256 +921 9 training.batch_size 1.0 +921 9 training.label_smoothing 0.014184574762412402 +921 10 model.embedding_dim 2.0 +921 10 model.relation_dim 0.0 +921 10 model.scoring_fct_norm 1.0 +921 10 optimizer.lr 0.002724782569248924 +921 10 training.batch_size 0.0 +921 10 training.label_smoothing 0.0023547004711431244 +921 11 model.embedding_dim 0.0 +921 11 model.relation_dim 2.0 +921 11 model.scoring_fct_norm 1.0 +921 11 optimizer.lr 0.01882188422205117 +921 11 training.batch_size 1.0 +921 11 training.label_smoothing 0.002520449844260892 +921 12 model.embedding_dim 2.0 +921 12 model.relation_dim 0.0 +921 12 model.scoring_fct_norm 1.0 +921 12 optimizer.lr 0.03318211439507662 +921 12 training.batch_size 1.0 +921 12 training.label_smoothing 0.005993321822422007 +921 13 model.embedding_dim 1.0 +921 13 model.relation_dim 1.0 +921 13 model.scoring_fct_norm 2.0 +921 13 optimizer.lr 0.001315890127749991 +921 13 training.batch_size 1.0 +921 13 training.label_smoothing 0.004143286203062192 +921 14 model.embedding_dim 1.0 +921 14 model.relation_dim 2.0 +921 14 model.scoring_fct_norm 1.0 +921 14 optimizer.lr 0.022685826718116725 +921 14 training.batch_size 0.0 +921 14 training.label_smoothing 0.03235388285990884 +921 15 model.embedding_dim 2.0 +921 15 model.relation_dim 1.0 +921 15 model.scoring_fct_norm 1.0 +921 15 optimizer.lr 0.001574175836761978 +921 15 training.batch_size 0.0 +921 15 training.label_smoothing 0.0012370039138813133 +921 16 model.embedding_dim 2.0 +921 16 model.relation_dim 1.0 +921 16 model.scoring_fct_norm 2.0 +921 16 optimizer.lr 0.09543282959454079 +921 16 training.batch_size 2.0 +921 16 training.label_smoothing 0.00182170217890299 +921 17 model.embedding_dim 2.0 +921 17 model.relation_dim 0.0 +921 17 model.scoring_fct_norm 1.0 +921 17 optimizer.lr 0.030451420900608185 +921 17 training.batch_size 0.0 +921 17 training.label_smoothing 0.005859433835847575 +921 18 model.embedding_dim 0.0 +921 18 model.relation_dim 1.0 +921 18 model.scoring_fct_norm 2.0 +921 18 optimizer.lr 0.011109114437577202 +921 18 training.batch_size 1.0 +921 18 training.label_smoothing 0.31820950296102224 +921 19 model.embedding_dim 1.0 +921 19 model.relation_dim 2.0 +921 19 model.scoring_fct_norm 1.0 +921 19 optimizer.lr 0.014048618726240003 +921 19 training.batch_size 2.0 +921 19 training.label_smoothing 0.38364197137135486 +921 20 model.embedding_dim 2.0 +921 20 model.relation_dim 0.0 +921 20 model.scoring_fct_norm 1.0 +921 20 optimizer.lr 0.0012972508328436727 +921 20 training.batch_size 1.0 +921 20 training.label_smoothing 0.8570946301573134 +921 21 model.embedding_dim 0.0 +921 21 model.relation_dim 0.0 +921 21 model.scoring_fct_norm 2.0 +921 21 optimizer.lr 0.017996090121259932 +921 21 training.batch_size 1.0 +921 21 training.label_smoothing 0.0013712784996945913 +921 22 model.embedding_dim 1.0 +921 22 model.relation_dim 2.0 +921 22 model.scoring_fct_norm 2.0 +921 22 optimizer.lr 0.0014328378588700937 +921 22 training.batch_size 1.0 +921 22 training.label_smoothing 0.023287011339751758 +921 23 model.embedding_dim 1.0 +921 23 model.relation_dim 1.0 +921 23 model.scoring_fct_norm 2.0 +921 23 optimizer.lr 0.0014019378893259037 +921 23 training.batch_size 2.0 +921 23 training.label_smoothing 0.8179983556014944 +921 24 model.embedding_dim 2.0 +921 24 model.relation_dim 1.0 +921 24 model.scoring_fct_norm 1.0 +921 24 optimizer.lr 0.006451586673334928 +921 24 training.batch_size 0.0 +921 24 training.label_smoothing 0.012735829344032763 +921 25 model.embedding_dim 0.0 +921 25 model.relation_dim 1.0 +921 25 model.scoring_fct_norm 1.0 +921 25 optimizer.lr 0.012714817607665772 +921 25 training.batch_size 0.0 +921 25 training.label_smoothing 0.0038987192398841435 +921 26 model.embedding_dim 1.0 +921 26 model.relation_dim 1.0 +921 26 model.scoring_fct_norm 2.0 +921 26 optimizer.lr 0.00197912118435335 +921 26 training.batch_size 0.0 +921 26 training.label_smoothing 0.09215616957350803 +921 27 model.embedding_dim 2.0 +921 27 model.relation_dim 1.0 +921 27 model.scoring_fct_norm 1.0 +921 27 optimizer.lr 0.009761203523861553 +921 27 training.batch_size 0.0 +921 27 training.label_smoothing 0.27568986895786085 +921 28 model.embedding_dim 0.0 +921 28 model.relation_dim 1.0 +921 28 model.scoring_fct_norm 2.0 +921 28 optimizer.lr 0.047916101378903515 +921 28 training.batch_size 2.0 +921 28 training.label_smoothing 0.17583649417999087 +921 29 model.embedding_dim 0.0 +921 29 model.relation_dim 0.0 +921 29 model.scoring_fct_norm 2.0 +921 29 optimizer.lr 0.013057848941056858 +921 29 training.batch_size 1.0 +921 29 training.label_smoothing 0.0027740749868952713 +921 30 model.embedding_dim 2.0 +921 30 model.relation_dim 1.0 +921 30 model.scoring_fct_norm 1.0 +921 30 optimizer.lr 0.0706014153394114 +921 30 training.batch_size 1.0 +921 30 training.label_smoothing 0.005270460062250019 +921 31 model.embedding_dim 2.0 +921 31 model.relation_dim 0.0 +921 31 model.scoring_fct_norm 2.0 +921 31 optimizer.lr 0.004432052391674764 +921 31 training.batch_size 2.0 +921 31 training.label_smoothing 0.043125209309682576 +921 32 model.embedding_dim 1.0 +921 32 model.relation_dim 0.0 +921 32 model.scoring_fct_norm 1.0 +921 32 optimizer.lr 0.0029644499545404476 +921 32 training.batch_size 1.0 +921 32 training.label_smoothing 0.19468569907927763 +921 33 model.embedding_dim 2.0 +921 33 model.relation_dim 0.0 +921 33 model.scoring_fct_norm 2.0 +921 33 optimizer.lr 0.028333043875082196 +921 33 training.batch_size 0.0 +921 33 training.label_smoothing 0.12068672193369845 +921 34 model.embedding_dim 2.0 +921 34 model.relation_dim 2.0 +921 34 model.scoring_fct_norm 2.0 +921 34 optimizer.lr 0.023729047275162497 +921 34 training.batch_size 2.0 +921 34 training.label_smoothing 0.35862320597271063 +921 35 model.embedding_dim 0.0 +921 35 model.relation_dim 0.0 +921 35 model.scoring_fct_norm 1.0 +921 35 optimizer.lr 0.0012449797947025197 +921 35 training.batch_size 0.0 +921 35 training.label_smoothing 0.005842613682872482 +921 36 model.embedding_dim 2.0 +921 36 model.relation_dim 1.0 +921 36 model.scoring_fct_norm 2.0 +921 36 optimizer.lr 0.0023713703301620616 +921 36 training.batch_size 1.0 +921 36 training.label_smoothing 0.0038668724109334842 +921 37 model.embedding_dim 1.0 +921 37 model.relation_dim 1.0 +921 37 model.scoring_fct_norm 2.0 +921 37 optimizer.lr 0.07131512407548896 +921 37 training.batch_size 1.0 +921 37 training.label_smoothing 0.011662486780134028 +921 38 model.embedding_dim 0.0 +921 38 model.relation_dim 2.0 +921 38 model.scoring_fct_norm 1.0 +921 38 optimizer.lr 0.005408124402701418 +921 38 training.batch_size 1.0 +921 38 training.label_smoothing 0.2609589318307765 +921 39 model.embedding_dim 0.0 +921 39 model.relation_dim 1.0 +921 39 model.scoring_fct_norm 1.0 +921 39 optimizer.lr 0.015434173124259114 +921 39 training.batch_size 0.0 +921 39 training.label_smoothing 0.0038769560037301117 +921 40 model.embedding_dim 0.0 +921 40 model.relation_dim 0.0 +921 40 model.scoring_fct_norm 1.0 +921 40 optimizer.lr 0.0015360080058335866 +921 40 training.batch_size 2.0 +921 40 training.label_smoothing 0.008001461528719158 +921 41 model.embedding_dim 0.0 +921 41 model.relation_dim 1.0 +921 41 model.scoring_fct_norm 2.0 +921 41 optimizer.lr 0.007974402477732512 +921 41 training.batch_size 0.0 +921 41 training.label_smoothing 0.38486006262904776 +921 42 model.embedding_dim 1.0 +921 42 model.relation_dim 0.0 +921 42 model.scoring_fct_norm 1.0 +921 42 optimizer.lr 0.0308386220392548 +921 42 training.batch_size 0.0 +921 42 training.label_smoothing 0.02316051264204199 +921 43 model.embedding_dim 0.0 +921 43 model.relation_dim 2.0 +921 43 model.scoring_fct_norm 2.0 +921 43 optimizer.lr 0.005132555783315755 +921 43 training.batch_size 2.0 +921 43 training.label_smoothing 0.17345999624592906 +921 44 model.embedding_dim 2.0 +921 44 model.relation_dim 2.0 +921 44 model.scoring_fct_norm 1.0 +921 44 optimizer.lr 0.002857085098728618 +921 44 training.batch_size 1.0 +921 44 training.label_smoothing 0.4128668465435706 +921 45 model.embedding_dim 0.0 +921 45 model.relation_dim 2.0 +921 45 model.scoring_fct_norm 1.0 +921 45 optimizer.lr 0.0031053729159506776 +921 45 training.batch_size 0.0 +921 45 training.label_smoothing 0.0024195818662938857 +921 46 model.embedding_dim 0.0 +921 46 model.relation_dim 0.0 +921 46 model.scoring_fct_norm 1.0 +921 46 optimizer.lr 0.001077018290375909 +921 46 training.batch_size 2.0 +921 46 training.label_smoothing 0.0045363924090971956 +921 47 model.embedding_dim 0.0 +921 47 model.relation_dim 1.0 +921 47 model.scoring_fct_norm 1.0 +921 47 optimizer.lr 0.007748656045077999 +921 47 training.batch_size 2.0 +921 47 training.label_smoothing 0.0049261122299368645 +921 48 model.embedding_dim 0.0 +921 48 model.relation_dim 0.0 +921 48 model.scoring_fct_norm 1.0 +921 48 optimizer.lr 0.014508500964680962 +921 48 training.batch_size 0.0 +921 48 training.label_smoothing 0.8332725926744884 +921 49 model.embedding_dim 0.0 +921 49 model.relation_dim 0.0 +921 49 model.scoring_fct_norm 2.0 +921 49 optimizer.lr 0.013431752094641397 +921 49 training.batch_size 1.0 +921 49 training.label_smoothing 0.026249502969816502 +921 50 model.embedding_dim 0.0 +921 50 model.relation_dim 1.0 +921 50 model.scoring_fct_norm 1.0 +921 50 optimizer.lr 0.0016733346093791561 +921 50 training.batch_size 1.0 +921 50 training.label_smoothing 0.43156577133436635 +921 51 model.embedding_dim 1.0 +921 51 model.relation_dim 0.0 +921 51 model.scoring_fct_norm 1.0 +921 51 optimizer.lr 0.0010872422590093912 +921 51 training.batch_size 2.0 +921 51 training.label_smoothing 0.012029368301050803 +921 52 model.embedding_dim 0.0 +921 52 model.relation_dim 0.0 +921 52 model.scoring_fct_norm 1.0 +921 52 optimizer.lr 0.0015960740353397893 +921 52 training.batch_size 2.0 +921 52 training.label_smoothing 0.011670405796962447 +921 53 model.embedding_dim 2.0 +921 53 model.relation_dim 2.0 +921 53 model.scoring_fct_norm 2.0 +921 53 optimizer.lr 0.016744131442720175 +921 53 training.batch_size 0.0 +921 53 training.label_smoothing 0.313360118922445 +921 54 model.embedding_dim 0.0 +921 54 model.relation_dim 2.0 +921 54 model.scoring_fct_norm 1.0 +921 54 optimizer.lr 0.0013016872452919978 +921 54 training.batch_size 0.0 +921 54 training.label_smoothing 0.026011603789970448 +921 55 model.embedding_dim 1.0 +921 55 model.relation_dim 0.0 +921 55 model.scoring_fct_norm 1.0 +921 55 optimizer.lr 0.0039757911017603495 +921 55 training.batch_size 2.0 +921 55 training.label_smoothing 0.14971338917630303 +921 56 model.embedding_dim 2.0 +921 56 model.relation_dim 2.0 +921 56 model.scoring_fct_norm 1.0 +921 56 optimizer.lr 0.0011074529877540327 +921 56 training.batch_size 0.0 +921 56 training.label_smoothing 0.2087689118185721 +921 57 model.embedding_dim 1.0 +921 57 model.relation_dim 1.0 +921 57 model.scoring_fct_norm 2.0 +921 57 optimizer.lr 0.07083894313738089 +921 57 training.batch_size 1.0 +921 57 training.label_smoothing 0.023287551280496078 +921 58 model.embedding_dim 0.0 +921 58 model.relation_dim 0.0 +921 58 model.scoring_fct_norm 2.0 +921 58 optimizer.lr 0.0015368828751779022 +921 58 training.batch_size 0.0 +921 58 training.label_smoothing 0.01292629223948142 +921 59 model.embedding_dim 1.0 +921 59 model.relation_dim 2.0 +921 59 model.scoring_fct_norm 1.0 +921 59 optimizer.lr 0.0021646233986603986 +921 59 training.batch_size 0.0 +921 59 training.label_smoothing 0.01043241769044555 +921 60 model.embedding_dim 0.0 +921 60 model.relation_dim 2.0 +921 60 model.scoring_fct_norm 2.0 +921 60 optimizer.lr 0.0017224852220492958 +921 60 training.batch_size 2.0 +921 60 training.label_smoothing 0.0016255956428696935 +921 61 model.embedding_dim 2.0 +921 61 model.relation_dim 0.0 +921 61 model.scoring_fct_norm 1.0 +921 61 optimizer.lr 0.009966860336786612 +921 61 training.batch_size 1.0 +921 61 training.label_smoothing 0.558663052150953 +921 62 model.embedding_dim 0.0 +921 62 model.relation_dim 1.0 +921 62 model.scoring_fct_norm 1.0 +921 62 optimizer.lr 0.03717776353298862 +921 62 training.batch_size 1.0 +921 62 training.label_smoothing 0.0723990756863291 +921 63 model.embedding_dim 1.0 +921 63 model.relation_dim 1.0 +921 63 model.scoring_fct_norm 2.0 +921 63 optimizer.lr 0.006368189822032639 +921 63 training.batch_size 1.0 +921 63 training.label_smoothing 0.5845959627527375 +921 64 model.embedding_dim 2.0 +921 64 model.relation_dim 2.0 +921 64 model.scoring_fct_norm 2.0 +921 64 optimizer.lr 0.04831624939778774 +921 64 training.batch_size 0.0 +921 64 training.label_smoothing 0.014961254127374578 +921 65 model.embedding_dim 1.0 +921 65 model.relation_dim 1.0 +921 65 model.scoring_fct_norm 1.0 +921 65 optimizer.lr 0.007277601495209108 +921 65 training.batch_size 2.0 +921 65 training.label_smoothing 0.005866652168419364 +921 66 model.embedding_dim 1.0 +921 66 model.relation_dim 0.0 +921 66 model.scoring_fct_norm 2.0 +921 66 optimizer.lr 0.0010446594002256513 +921 66 training.batch_size 1.0 +921 66 training.label_smoothing 0.6197609816288915 +921 67 model.embedding_dim 2.0 +921 67 model.relation_dim 2.0 +921 67 model.scoring_fct_norm 1.0 +921 67 optimizer.lr 0.015968029211455688 +921 67 training.batch_size 2.0 +921 67 training.label_smoothing 0.0019579778391216112 +921 68 model.embedding_dim 0.0 +921 68 model.relation_dim 1.0 +921 68 model.scoring_fct_norm 2.0 +921 68 optimizer.lr 0.00295422919376088 +921 68 training.batch_size 1.0 +921 68 training.label_smoothing 0.01326890451388337 +921 69 model.embedding_dim 1.0 +921 69 model.relation_dim 2.0 +921 69 model.scoring_fct_norm 1.0 +921 69 optimizer.lr 0.0673529347308918 +921 69 training.batch_size 1.0 +921 69 training.label_smoothing 0.007385506151779444 +921 70 model.embedding_dim 0.0 +921 70 model.relation_dim 1.0 +921 70 model.scoring_fct_norm 2.0 +921 70 optimizer.lr 0.0929200670820974 +921 70 training.batch_size 1.0 +921 70 training.label_smoothing 0.03326844223267813 +921 71 model.embedding_dim 0.0 +921 71 model.relation_dim 2.0 +921 71 model.scoring_fct_norm 2.0 +921 71 optimizer.lr 0.0027798490456361347 +921 71 training.batch_size 1.0 +921 71 training.label_smoothing 0.002582357063568232 +921 72 model.embedding_dim 1.0 +921 72 model.relation_dim 0.0 +921 72 model.scoring_fct_norm 1.0 +921 72 optimizer.lr 0.034288434029528754 +921 72 training.batch_size 0.0 +921 72 training.label_smoothing 0.01750657309740601 +921 73 model.embedding_dim 0.0 +921 73 model.relation_dim 2.0 +921 73 model.scoring_fct_norm 1.0 +921 73 optimizer.lr 0.0390602834295323 +921 73 training.batch_size 2.0 +921 73 training.label_smoothing 0.0020095980059638076 +921 74 model.embedding_dim 2.0 +921 74 model.relation_dim 2.0 +921 74 model.scoring_fct_norm 2.0 +921 74 optimizer.lr 0.00117522714997505 +921 74 training.batch_size 0.0 +921 74 training.label_smoothing 0.6088241769141893 +921 75 model.embedding_dim 1.0 +921 75 model.relation_dim 1.0 +921 75 model.scoring_fct_norm 1.0 +921 75 optimizer.lr 0.0012383672685517442 +921 75 training.batch_size 2.0 +921 75 training.label_smoothing 0.001334211746068694 +921 76 model.embedding_dim 0.0 +921 76 model.relation_dim 2.0 +921 76 model.scoring_fct_norm 2.0 +921 76 optimizer.lr 0.002488743941530019 +921 76 training.batch_size 1.0 +921 76 training.label_smoothing 0.003986619568883132 +921 77 model.embedding_dim 0.0 +921 77 model.relation_dim 2.0 +921 77 model.scoring_fct_norm 1.0 +921 77 optimizer.lr 0.0023390058982384306 +921 77 training.batch_size 2.0 +921 77 training.label_smoothing 0.3586879153685663 +921 78 model.embedding_dim 0.0 +921 78 model.relation_dim 1.0 +921 78 model.scoring_fct_norm 2.0 +921 78 optimizer.lr 0.024918884382847447 +921 78 training.batch_size 1.0 +921 78 training.label_smoothing 0.10198237000658686 +921 79 model.embedding_dim 2.0 +921 79 model.relation_dim 2.0 +921 79 model.scoring_fct_norm 1.0 +921 79 optimizer.lr 0.04841910509297831 +921 79 training.batch_size 1.0 +921 79 training.label_smoothing 0.00221263053577317 +921 80 model.embedding_dim 0.0 +921 80 model.relation_dim 2.0 +921 80 model.scoring_fct_norm 2.0 +921 80 optimizer.lr 0.003071320851455065 +921 80 training.batch_size 0.0 +921 80 training.label_smoothing 0.0442603047762312 +921 81 model.embedding_dim 2.0 +921 81 model.relation_dim 2.0 +921 81 model.scoring_fct_norm 1.0 +921 81 optimizer.lr 0.019228593956245196 +921 81 training.batch_size 0.0 +921 81 training.label_smoothing 0.21145388648069477 +921 82 model.embedding_dim 1.0 +921 82 model.relation_dim 1.0 +921 82 model.scoring_fct_norm 2.0 +921 82 optimizer.lr 0.009326620524170106 +921 82 training.batch_size 0.0 +921 82 training.label_smoothing 0.05546802139641879 +921 83 model.embedding_dim 0.0 +921 83 model.relation_dim 0.0 +921 83 model.scoring_fct_norm 1.0 +921 83 optimizer.lr 0.001595171737907553 +921 83 training.batch_size 1.0 +921 83 training.label_smoothing 0.03589136703030583 +921 84 model.embedding_dim 1.0 +921 84 model.relation_dim 1.0 +921 84 model.scoring_fct_norm 2.0 +921 84 optimizer.lr 0.005102650147589586 +921 84 training.batch_size 1.0 +921 84 training.label_smoothing 0.04665726193223719 +921 85 model.embedding_dim 2.0 +921 85 model.relation_dim 1.0 +921 85 model.scoring_fct_norm 2.0 +921 85 optimizer.lr 0.0072686908409085585 +921 85 training.batch_size 0.0 +921 85 training.label_smoothing 0.4919725563858029 +921 86 model.embedding_dim 0.0 +921 86 model.relation_dim 1.0 +921 86 model.scoring_fct_norm 2.0 +921 86 optimizer.lr 0.001969786786460066 +921 86 training.batch_size 0.0 +921 86 training.label_smoothing 0.016421816214086758 +921 87 model.embedding_dim 2.0 +921 87 model.relation_dim 2.0 +921 87 model.scoring_fct_norm 1.0 +921 87 optimizer.lr 0.02289909879042729 +921 87 training.batch_size 0.0 +921 87 training.label_smoothing 0.09288577320151177 +921 88 model.embedding_dim 2.0 +921 88 model.relation_dim 1.0 +921 88 model.scoring_fct_norm 1.0 +921 88 optimizer.lr 0.07338645020825543 +921 88 training.batch_size 2.0 +921 88 training.label_smoothing 0.1621399989800546 +921 89 model.embedding_dim 2.0 +921 89 model.relation_dim 1.0 +921 89 model.scoring_fct_norm 2.0 +921 89 optimizer.lr 0.004077253409254873 +921 89 training.batch_size 1.0 +921 89 training.label_smoothing 0.0876172039044684 +921 90 model.embedding_dim 1.0 +921 90 model.relation_dim 2.0 +921 90 model.scoring_fct_norm 1.0 +921 90 optimizer.lr 0.02938618902960544 +921 90 training.batch_size 1.0 +921 90 training.label_smoothing 0.4226957724633786 +921 91 model.embedding_dim 1.0 +921 91 model.relation_dim 0.0 +921 91 model.scoring_fct_norm 1.0 +921 91 optimizer.lr 0.0038697196051123675 +921 91 training.batch_size 1.0 +921 91 training.label_smoothing 0.014377564690397028 +921 92 model.embedding_dim 2.0 +921 92 model.relation_dim 2.0 +921 92 model.scoring_fct_norm 2.0 +921 92 optimizer.lr 0.06633087769161972 +921 92 training.batch_size 1.0 +921 92 training.label_smoothing 0.0024165888405653633 +921 93 model.embedding_dim 2.0 +921 93 model.relation_dim 0.0 +921 93 model.scoring_fct_norm 1.0 +921 93 optimizer.lr 0.010702953657176535 +921 93 training.batch_size 1.0 +921 93 training.label_smoothing 0.3582362260501653 +921 94 model.embedding_dim 2.0 +921 94 model.relation_dim 1.0 +921 94 model.scoring_fct_norm 2.0 +921 94 optimizer.lr 0.0036694991428955696 +921 94 training.batch_size 1.0 +921 94 training.label_smoothing 0.0012465106123977392 +921 95 model.embedding_dim 1.0 +921 95 model.relation_dim 2.0 +921 95 model.scoring_fct_norm 1.0 +921 95 optimizer.lr 0.0012540305958323719 +921 95 training.batch_size 2.0 +921 95 training.label_smoothing 0.17062791368726987 +921 96 model.embedding_dim 1.0 +921 96 model.relation_dim 1.0 +921 96 model.scoring_fct_norm 1.0 +921 96 optimizer.lr 0.009864030801769013 +921 96 training.batch_size 0.0 +921 96 training.label_smoothing 0.008984017058153224 +921 97 model.embedding_dim 0.0 +921 97 model.relation_dim 0.0 +921 97 model.scoring_fct_norm 1.0 +921 97 optimizer.lr 0.028921949592666082 +921 97 training.batch_size 2.0 +921 97 training.label_smoothing 0.032080900283131736 +921 98 model.embedding_dim 2.0 +921 98 model.relation_dim 0.0 +921 98 model.scoring_fct_norm 2.0 +921 98 optimizer.lr 0.003862218213904747 +921 98 training.batch_size 1.0 +921 98 training.label_smoothing 0.7067910832444146 +921 99 model.embedding_dim 2.0 +921 99 model.relation_dim 1.0 +921 99 model.scoring_fct_norm 1.0 +921 99 optimizer.lr 0.005777226005864785 +921 99 training.batch_size 0.0 +921 99 training.label_smoothing 0.007328017003490795 +921 100 model.embedding_dim 2.0 +921 100 model.relation_dim 2.0 +921 100 model.scoring_fct_norm 1.0 +921 100 optimizer.lr 0.0020916419048339872 +921 100 training.batch_size 0.0 +921 100 training.label_smoothing 0.011175866407324422 +921 1 dataset """kinships""" +921 1 model """transr""" +921 1 loss """bceaftersigmoid""" +921 1 regularizer """no""" +921 1 optimizer """adam""" +921 1 training_loop """lcwa""" +921 1 evaluator """rankbased""" +921 2 dataset """kinships""" +921 2 model """transr""" +921 2 loss """bceaftersigmoid""" +921 2 regularizer """no""" +921 2 optimizer """adam""" +921 2 training_loop """lcwa""" +921 2 evaluator """rankbased""" +921 3 dataset """kinships""" +921 3 model """transr""" +921 3 loss """bceaftersigmoid""" +921 3 regularizer """no""" +921 3 optimizer """adam""" +921 3 training_loop """lcwa""" +921 3 evaluator """rankbased""" +921 4 dataset """kinships""" +921 4 model """transr""" +921 4 loss """bceaftersigmoid""" +921 4 regularizer """no""" +921 4 optimizer """adam""" +921 4 training_loop """lcwa""" +921 4 evaluator """rankbased""" +921 5 dataset """kinships""" +921 5 model """transr""" +921 5 loss """bceaftersigmoid""" +921 5 regularizer """no""" +921 5 optimizer """adam""" +921 5 training_loop """lcwa""" +921 5 evaluator """rankbased""" +921 6 dataset """kinships""" +921 6 model """transr""" +921 6 loss """bceaftersigmoid""" +921 6 regularizer """no""" +921 6 optimizer """adam""" +921 6 training_loop """lcwa""" +921 6 evaluator """rankbased""" +921 7 dataset """kinships""" +921 7 model """transr""" +921 7 loss """bceaftersigmoid""" +921 7 regularizer """no""" +921 7 optimizer """adam""" +921 7 training_loop """lcwa""" +921 7 evaluator """rankbased""" +921 8 dataset """kinships""" +921 8 model """transr""" +921 8 loss """bceaftersigmoid""" +921 8 regularizer """no""" +921 8 optimizer """adam""" +921 8 training_loop """lcwa""" +921 8 evaluator """rankbased""" +921 9 dataset """kinships""" +921 9 model """transr""" +921 9 loss """bceaftersigmoid""" +921 9 regularizer """no""" +921 9 optimizer """adam""" +921 9 training_loop """lcwa""" +921 9 evaluator """rankbased""" +921 10 dataset """kinships""" +921 10 model """transr""" +921 10 loss """bceaftersigmoid""" +921 10 regularizer """no""" +921 10 optimizer """adam""" +921 10 training_loop """lcwa""" +921 10 evaluator """rankbased""" +921 11 dataset """kinships""" +921 11 model """transr""" +921 11 loss """bceaftersigmoid""" +921 11 regularizer """no""" +921 11 optimizer """adam""" +921 11 training_loop """lcwa""" +921 11 evaluator """rankbased""" +921 12 dataset """kinships""" +921 12 model """transr""" +921 12 loss """bceaftersigmoid""" +921 12 regularizer """no""" +921 12 optimizer """adam""" +921 12 training_loop """lcwa""" +921 12 evaluator """rankbased""" +921 13 dataset """kinships""" +921 13 model """transr""" +921 13 loss """bceaftersigmoid""" +921 13 regularizer """no""" +921 13 optimizer """adam""" +921 13 training_loop """lcwa""" +921 13 evaluator """rankbased""" +921 14 dataset """kinships""" +921 14 model """transr""" +921 14 loss """bceaftersigmoid""" +921 14 regularizer """no""" +921 14 optimizer """adam""" +921 14 training_loop """lcwa""" +921 14 evaluator """rankbased""" +921 15 dataset """kinships""" +921 15 model """transr""" +921 15 loss """bceaftersigmoid""" +921 15 regularizer """no""" +921 15 optimizer """adam""" +921 15 training_loop """lcwa""" +921 15 evaluator """rankbased""" +921 16 dataset """kinships""" +921 16 model """transr""" +921 16 loss """bceaftersigmoid""" +921 16 regularizer """no""" +921 16 optimizer """adam""" +921 16 training_loop """lcwa""" +921 16 evaluator """rankbased""" +921 17 dataset """kinships""" +921 17 model """transr""" +921 17 loss """bceaftersigmoid""" +921 17 regularizer """no""" +921 17 optimizer """adam""" +921 17 training_loop """lcwa""" +921 17 evaluator """rankbased""" +921 18 dataset """kinships""" +921 18 model """transr""" +921 18 loss """bceaftersigmoid""" +921 18 regularizer """no""" +921 18 optimizer """adam""" +921 18 training_loop """lcwa""" +921 18 evaluator """rankbased""" +921 19 dataset """kinships""" +921 19 model """transr""" +921 19 loss """bceaftersigmoid""" +921 19 regularizer """no""" +921 19 optimizer """adam""" +921 19 training_loop """lcwa""" +921 19 evaluator """rankbased""" +921 20 dataset """kinships""" +921 20 model """transr""" +921 20 loss """bceaftersigmoid""" +921 20 regularizer """no""" +921 20 optimizer """adam""" +921 20 training_loop """lcwa""" +921 20 evaluator """rankbased""" +921 21 dataset """kinships""" +921 21 model """transr""" +921 21 loss """bceaftersigmoid""" +921 21 regularizer """no""" +921 21 optimizer """adam""" +921 21 training_loop """lcwa""" +921 21 evaluator """rankbased""" +921 22 dataset """kinships""" +921 22 model """transr""" +921 22 loss """bceaftersigmoid""" +921 22 regularizer """no""" +921 22 optimizer """adam""" +921 22 training_loop """lcwa""" +921 22 evaluator """rankbased""" +921 23 dataset """kinships""" +921 23 model """transr""" +921 23 loss """bceaftersigmoid""" +921 23 regularizer """no""" +921 23 optimizer """adam""" +921 23 training_loop """lcwa""" +921 23 evaluator """rankbased""" +921 24 dataset """kinships""" +921 24 model """transr""" +921 24 loss """bceaftersigmoid""" +921 24 regularizer """no""" +921 24 optimizer """adam""" +921 24 training_loop """lcwa""" +921 24 evaluator """rankbased""" +921 25 dataset """kinships""" +921 25 model """transr""" +921 25 loss """bceaftersigmoid""" +921 25 regularizer """no""" +921 25 optimizer """adam""" +921 25 training_loop """lcwa""" +921 25 evaluator """rankbased""" +921 26 dataset """kinships""" +921 26 model """transr""" +921 26 loss """bceaftersigmoid""" +921 26 regularizer """no""" +921 26 optimizer """adam""" +921 26 training_loop """lcwa""" +921 26 evaluator """rankbased""" +921 27 dataset """kinships""" +921 27 model """transr""" +921 27 loss """bceaftersigmoid""" +921 27 regularizer """no""" +921 27 optimizer """adam""" +921 27 training_loop """lcwa""" +921 27 evaluator """rankbased""" +921 28 dataset """kinships""" +921 28 model """transr""" +921 28 loss """bceaftersigmoid""" +921 28 regularizer """no""" +921 28 optimizer """adam""" +921 28 training_loop """lcwa""" +921 28 evaluator """rankbased""" +921 29 dataset """kinships""" +921 29 model """transr""" +921 29 loss """bceaftersigmoid""" +921 29 regularizer """no""" +921 29 optimizer """adam""" +921 29 training_loop """lcwa""" +921 29 evaluator """rankbased""" +921 30 dataset """kinships""" +921 30 model """transr""" +921 30 loss """bceaftersigmoid""" +921 30 regularizer """no""" +921 30 optimizer """adam""" +921 30 training_loop """lcwa""" +921 30 evaluator """rankbased""" +921 31 dataset """kinships""" +921 31 model """transr""" +921 31 loss """bceaftersigmoid""" +921 31 regularizer """no""" +921 31 optimizer """adam""" +921 31 training_loop """lcwa""" +921 31 evaluator """rankbased""" +921 32 dataset """kinships""" +921 32 model """transr""" +921 32 loss """bceaftersigmoid""" +921 32 regularizer """no""" +921 32 optimizer """adam""" +921 32 training_loop """lcwa""" +921 32 evaluator """rankbased""" +921 33 dataset """kinships""" +921 33 model """transr""" +921 33 loss """bceaftersigmoid""" +921 33 regularizer """no""" +921 33 optimizer """adam""" +921 33 training_loop """lcwa""" +921 33 evaluator """rankbased""" +921 34 dataset """kinships""" +921 34 model """transr""" +921 34 loss """bceaftersigmoid""" +921 34 regularizer """no""" +921 34 optimizer """adam""" +921 34 training_loop """lcwa""" +921 34 evaluator """rankbased""" +921 35 dataset """kinships""" +921 35 model """transr""" +921 35 loss """bceaftersigmoid""" +921 35 regularizer """no""" +921 35 optimizer """adam""" +921 35 training_loop """lcwa""" +921 35 evaluator """rankbased""" +921 36 dataset """kinships""" +921 36 model """transr""" +921 36 loss """bceaftersigmoid""" +921 36 regularizer """no""" +921 36 optimizer """adam""" +921 36 training_loop """lcwa""" +921 36 evaluator """rankbased""" +921 37 dataset """kinships""" +921 37 model """transr""" +921 37 loss """bceaftersigmoid""" +921 37 regularizer """no""" +921 37 optimizer """adam""" +921 37 training_loop """lcwa""" +921 37 evaluator """rankbased""" +921 38 dataset """kinships""" +921 38 model """transr""" +921 38 loss """bceaftersigmoid""" +921 38 regularizer """no""" +921 38 optimizer """adam""" +921 38 training_loop """lcwa""" +921 38 evaluator """rankbased""" +921 39 dataset """kinships""" +921 39 model """transr""" +921 39 loss """bceaftersigmoid""" +921 39 regularizer """no""" +921 39 optimizer """adam""" +921 39 training_loop """lcwa""" +921 39 evaluator """rankbased""" +921 40 dataset """kinships""" +921 40 model """transr""" +921 40 loss """bceaftersigmoid""" +921 40 regularizer """no""" +921 40 optimizer """adam""" +921 40 training_loop """lcwa""" +921 40 evaluator """rankbased""" +921 41 dataset """kinships""" +921 41 model """transr""" +921 41 loss """bceaftersigmoid""" +921 41 regularizer """no""" +921 41 optimizer """adam""" +921 41 training_loop """lcwa""" +921 41 evaluator """rankbased""" +921 42 dataset """kinships""" +921 42 model """transr""" +921 42 loss """bceaftersigmoid""" +921 42 regularizer """no""" +921 42 optimizer """adam""" +921 42 training_loop """lcwa""" +921 42 evaluator """rankbased""" +921 43 dataset """kinships""" +921 43 model """transr""" +921 43 loss """bceaftersigmoid""" +921 43 regularizer """no""" +921 43 optimizer """adam""" +921 43 training_loop """lcwa""" +921 43 evaluator """rankbased""" +921 44 dataset """kinships""" +921 44 model """transr""" +921 44 loss """bceaftersigmoid""" +921 44 regularizer """no""" +921 44 optimizer """adam""" +921 44 training_loop """lcwa""" +921 44 evaluator """rankbased""" +921 45 dataset """kinships""" +921 45 model """transr""" +921 45 loss """bceaftersigmoid""" +921 45 regularizer """no""" +921 45 optimizer """adam""" +921 45 training_loop """lcwa""" +921 45 evaluator """rankbased""" +921 46 dataset """kinships""" +921 46 model """transr""" +921 46 loss """bceaftersigmoid""" +921 46 regularizer """no""" +921 46 optimizer """adam""" +921 46 training_loop """lcwa""" +921 46 evaluator """rankbased""" +921 47 dataset """kinships""" +921 47 model """transr""" +921 47 loss """bceaftersigmoid""" +921 47 regularizer """no""" +921 47 optimizer """adam""" +921 47 training_loop """lcwa""" +921 47 evaluator """rankbased""" +921 48 dataset """kinships""" +921 48 model """transr""" +921 48 loss """bceaftersigmoid""" +921 48 regularizer """no""" +921 48 optimizer """adam""" +921 48 training_loop """lcwa""" +921 48 evaluator """rankbased""" +921 49 dataset """kinships""" +921 49 model """transr""" +921 49 loss """bceaftersigmoid""" +921 49 regularizer """no""" +921 49 optimizer """adam""" +921 49 training_loop """lcwa""" +921 49 evaluator """rankbased""" +921 50 dataset """kinships""" +921 50 model """transr""" +921 50 loss """bceaftersigmoid""" +921 50 regularizer """no""" +921 50 optimizer """adam""" +921 50 training_loop """lcwa""" +921 50 evaluator """rankbased""" +921 51 dataset """kinships""" +921 51 model """transr""" +921 51 loss """bceaftersigmoid""" +921 51 regularizer """no""" +921 51 optimizer """adam""" +921 51 training_loop """lcwa""" +921 51 evaluator """rankbased""" +921 52 dataset """kinships""" +921 52 model """transr""" +921 52 loss """bceaftersigmoid""" +921 52 regularizer """no""" +921 52 optimizer """adam""" +921 52 training_loop """lcwa""" +921 52 evaluator """rankbased""" +921 53 dataset """kinships""" +921 53 model """transr""" +921 53 loss """bceaftersigmoid""" +921 53 regularizer """no""" +921 53 optimizer """adam""" +921 53 training_loop """lcwa""" +921 53 evaluator """rankbased""" +921 54 dataset """kinships""" +921 54 model """transr""" +921 54 loss """bceaftersigmoid""" +921 54 regularizer """no""" +921 54 optimizer """adam""" +921 54 training_loop """lcwa""" +921 54 evaluator """rankbased""" +921 55 dataset """kinships""" +921 55 model """transr""" +921 55 loss """bceaftersigmoid""" +921 55 regularizer """no""" +921 55 optimizer """adam""" +921 55 training_loop """lcwa""" +921 55 evaluator """rankbased""" +921 56 dataset """kinships""" +921 56 model """transr""" +921 56 loss """bceaftersigmoid""" +921 56 regularizer """no""" +921 56 optimizer """adam""" +921 56 training_loop """lcwa""" +921 56 evaluator """rankbased""" +921 57 dataset """kinships""" +921 57 model """transr""" +921 57 loss """bceaftersigmoid""" +921 57 regularizer """no""" +921 57 optimizer """adam""" +921 57 training_loop """lcwa""" +921 57 evaluator """rankbased""" +921 58 dataset """kinships""" +921 58 model """transr""" +921 58 loss """bceaftersigmoid""" +921 58 regularizer """no""" +921 58 optimizer """adam""" +921 58 training_loop """lcwa""" +921 58 evaluator """rankbased""" +921 59 dataset """kinships""" +921 59 model """transr""" +921 59 loss """bceaftersigmoid""" +921 59 regularizer """no""" +921 59 optimizer """adam""" +921 59 training_loop """lcwa""" +921 59 evaluator """rankbased""" +921 60 dataset """kinships""" +921 60 model """transr""" +921 60 loss """bceaftersigmoid""" +921 60 regularizer """no""" +921 60 optimizer """adam""" +921 60 training_loop """lcwa""" +921 60 evaluator """rankbased""" +921 61 dataset """kinships""" +921 61 model """transr""" +921 61 loss """bceaftersigmoid""" +921 61 regularizer """no""" +921 61 optimizer """adam""" +921 61 training_loop """lcwa""" +921 61 evaluator """rankbased""" +921 62 dataset """kinships""" +921 62 model """transr""" +921 62 loss """bceaftersigmoid""" +921 62 regularizer """no""" +921 62 optimizer """adam""" +921 62 training_loop """lcwa""" +921 62 evaluator """rankbased""" +921 63 dataset """kinships""" +921 63 model """transr""" +921 63 loss """bceaftersigmoid""" +921 63 regularizer """no""" +921 63 optimizer """adam""" +921 63 training_loop """lcwa""" +921 63 evaluator """rankbased""" +921 64 dataset """kinships""" +921 64 model """transr""" +921 64 loss """bceaftersigmoid""" +921 64 regularizer """no""" +921 64 optimizer """adam""" +921 64 training_loop """lcwa""" +921 64 evaluator """rankbased""" +921 65 dataset """kinships""" +921 65 model """transr""" +921 65 loss """bceaftersigmoid""" +921 65 regularizer """no""" +921 65 optimizer """adam""" +921 65 training_loop """lcwa""" +921 65 evaluator """rankbased""" +921 66 dataset """kinships""" +921 66 model """transr""" +921 66 loss """bceaftersigmoid""" +921 66 regularizer """no""" +921 66 optimizer """adam""" +921 66 training_loop """lcwa""" +921 66 evaluator """rankbased""" +921 67 dataset """kinships""" +921 67 model """transr""" +921 67 loss """bceaftersigmoid""" +921 67 regularizer """no""" +921 67 optimizer """adam""" +921 67 training_loop """lcwa""" +921 67 evaluator """rankbased""" +921 68 dataset """kinships""" +921 68 model """transr""" +921 68 loss """bceaftersigmoid""" +921 68 regularizer """no""" +921 68 optimizer """adam""" +921 68 training_loop """lcwa""" +921 68 evaluator """rankbased""" +921 69 dataset """kinships""" +921 69 model """transr""" +921 69 loss """bceaftersigmoid""" +921 69 regularizer """no""" +921 69 optimizer """adam""" +921 69 training_loop """lcwa""" +921 69 evaluator """rankbased""" +921 70 dataset """kinships""" +921 70 model """transr""" +921 70 loss """bceaftersigmoid""" +921 70 regularizer """no""" +921 70 optimizer """adam""" +921 70 training_loop """lcwa""" +921 70 evaluator """rankbased""" +921 71 dataset """kinships""" +921 71 model """transr""" +921 71 loss """bceaftersigmoid""" +921 71 regularizer """no""" +921 71 optimizer """adam""" +921 71 training_loop """lcwa""" +921 71 evaluator """rankbased""" +921 72 dataset """kinships""" +921 72 model """transr""" +921 72 loss """bceaftersigmoid""" +921 72 regularizer """no""" +921 72 optimizer """adam""" +921 72 training_loop """lcwa""" +921 72 evaluator """rankbased""" +921 73 dataset """kinships""" +921 73 model """transr""" +921 73 loss """bceaftersigmoid""" +921 73 regularizer """no""" +921 73 optimizer """adam""" +921 73 training_loop """lcwa""" +921 73 evaluator """rankbased""" +921 74 dataset """kinships""" +921 74 model """transr""" +921 74 loss """bceaftersigmoid""" +921 74 regularizer """no""" +921 74 optimizer """adam""" +921 74 training_loop """lcwa""" +921 74 evaluator """rankbased""" +921 75 dataset """kinships""" +921 75 model """transr""" +921 75 loss """bceaftersigmoid""" +921 75 regularizer """no""" +921 75 optimizer """adam""" +921 75 training_loop """lcwa""" +921 75 evaluator """rankbased""" +921 76 dataset """kinships""" +921 76 model """transr""" +921 76 loss """bceaftersigmoid""" +921 76 regularizer """no""" +921 76 optimizer """adam""" +921 76 training_loop """lcwa""" +921 76 evaluator """rankbased""" +921 77 dataset """kinships""" +921 77 model """transr""" +921 77 loss """bceaftersigmoid""" +921 77 regularizer """no""" +921 77 optimizer """adam""" +921 77 training_loop """lcwa""" +921 77 evaluator """rankbased""" +921 78 dataset """kinships""" +921 78 model """transr""" +921 78 loss """bceaftersigmoid""" +921 78 regularizer """no""" +921 78 optimizer """adam""" +921 78 training_loop """lcwa""" +921 78 evaluator """rankbased""" +921 79 dataset """kinships""" +921 79 model """transr""" +921 79 loss """bceaftersigmoid""" +921 79 regularizer """no""" +921 79 optimizer """adam""" +921 79 training_loop """lcwa""" +921 79 evaluator """rankbased""" +921 80 dataset """kinships""" +921 80 model """transr""" +921 80 loss """bceaftersigmoid""" +921 80 regularizer """no""" +921 80 optimizer """adam""" +921 80 training_loop """lcwa""" +921 80 evaluator """rankbased""" +921 81 dataset """kinships""" +921 81 model """transr""" +921 81 loss """bceaftersigmoid""" +921 81 regularizer """no""" +921 81 optimizer """adam""" +921 81 training_loop """lcwa""" +921 81 evaluator """rankbased""" +921 82 dataset """kinships""" +921 82 model """transr""" +921 82 loss """bceaftersigmoid""" +921 82 regularizer """no""" +921 82 optimizer """adam""" +921 82 training_loop """lcwa""" +921 82 evaluator """rankbased""" +921 83 dataset """kinships""" +921 83 model """transr""" +921 83 loss """bceaftersigmoid""" +921 83 regularizer """no""" +921 83 optimizer """adam""" +921 83 training_loop """lcwa""" +921 83 evaluator """rankbased""" +921 84 dataset """kinships""" +921 84 model """transr""" +921 84 loss """bceaftersigmoid""" +921 84 regularizer """no""" +921 84 optimizer """adam""" +921 84 training_loop """lcwa""" +921 84 evaluator """rankbased""" +921 85 dataset """kinships""" +921 85 model """transr""" +921 85 loss """bceaftersigmoid""" +921 85 regularizer """no""" +921 85 optimizer """adam""" +921 85 training_loop """lcwa""" +921 85 evaluator """rankbased""" +921 86 dataset """kinships""" +921 86 model """transr""" +921 86 loss """bceaftersigmoid""" +921 86 regularizer """no""" +921 86 optimizer """adam""" +921 86 training_loop """lcwa""" +921 86 evaluator """rankbased""" +921 87 dataset """kinships""" +921 87 model """transr""" +921 87 loss """bceaftersigmoid""" +921 87 regularizer """no""" +921 87 optimizer """adam""" +921 87 training_loop """lcwa""" +921 87 evaluator """rankbased""" +921 88 dataset """kinships""" +921 88 model """transr""" +921 88 loss """bceaftersigmoid""" +921 88 regularizer """no""" +921 88 optimizer """adam""" +921 88 training_loop """lcwa""" +921 88 evaluator """rankbased""" +921 89 dataset """kinships""" +921 89 model """transr""" +921 89 loss """bceaftersigmoid""" +921 89 regularizer """no""" +921 89 optimizer """adam""" +921 89 training_loop """lcwa""" +921 89 evaluator """rankbased""" +921 90 dataset """kinships""" +921 90 model """transr""" +921 90 loss """bceaftersigmoid""" +921 90 regularizer """no""" +921 90 optimizer """adam""" +921 90 training_loop """lcwa""" +921 90 evaluator """rankbased""" +921 91 dataset """kinships""" +921 91 model """transr""" +921 91 loss """bceaftersigmoid""" +921 91 regularizer """no""" +921 91 optimizer """adam""" +921 91 training_loop """lcwa""" +921 91 evaluator """rankbased""" +921 92 dataset """kinships""" +921 92 model """transr""" +921 92 loss """bceaftersigmoid""" +921 92 regularizer """no""" +921 92 optimizer """adam""" +921 92 training_loop """lcwa""" +921 92 evaluator """rankbased""" +921 93 dataset """kinships""" +921 93 model """transr""" +921 93 loss """bceaftersigmoid""" +921 93 regularizer """no""" +921 93 optimizer """adam""" +921 93 training_loop """lcwa""" +921 93 evaluator """rankbased""" +921 94 dataset """kinships""" +921 94 model """transr""" +921 94 loss """bceaftersigmoid""" +921 94 regularizer """no""" +921 94 optimizer """adam""" +921 94 training_loop """lcwa""" +921 94 evaluator """rankbased""" +921 95 dataset """kinships""" +921 95 model """transr""" +921 95 loss """bceaftersigmoid""" +921 95 regularizer """no""" +921 95 optimizer """adam""" +921 95 training_loop """lcwa""" +921 95 evaluator """rankbased""" +921 96 dataset """kinships""" +921 96 model """transr""" +921 96 loss """bceaftersigmoid""" +921 96 regularizer """no""" +921 96 optimizer """adam""" +921 96 training_loop """lcwa""" +921 96 evaluator """rankbased""" +921 97 dataset """kinships""" +921 97 model """transr""" +921 97 loss """bceaftersigmoid""" +921 97 regularizer """no""" +921 97 optimizer """adam""" +921 97 training_loop """lcwa""" +921 97 evaluator """rankbased""" +921 98 dataset """kinships""" +921 98 model """transr""" +921 98 loss """bceaftersigmoid""" +921 98 regularizer """no""" +921 98 optimizer """adam""" +921 98 training_loop """lcwa""" +921 98 evaluator """rankbased""" +921 99 dataset """kinships""" +921 99 model """transr""" +921 99 loss """bceaftersigmoid""" +921 99 regularizer """no""" +921 99 optimizer """adam""" +921 99 training_loop """lcwa""" +921 99 evaluator """rankbased""" +921 100 dataset """kinships""" +921 100 model """transr""" +921 100 loss """bceaftersigmoid""" +921 100 regularizer """no""" +921 100 optimizer """adam""" +921 100 training_loop """lcwa""" +921 100 evaluator """rankbased""" +922 1 model.embedding_dim 2.0 +922 1 model.relation_dim 2.0 +922 1 model.scoring_fct_norm 2.0 +922 1 optimizer.lr 0.001571468462317667 +922 1 training.batch_size 0.0 +922 1 training.label_smoothing 0.2381926825314317 +922 2 model.embedding_dim 0.0 +922 2 model.relation_dim 1.0 +922 2 model.scoring_fct_norm 2.0 +922 2 optimizer.lr 0.0016977737182769293 +922 2 training.batch_size 2.0 +922 2 training.label_smoothing 0.11153442312254565 +922 3 model.embedding_dim 0.0 +922 3 model.relation_dim 1.0 +922 3 model.scoring_fct_norm 1.0 +922 3 optimizer.lr 0.05179568938738051 +922 3 training.batch_size 2.0 +922 3 training.label_smoothing 0.053218988068003785 +922 4 model.embedding_dim 1.0 +922 4 model.relation_dim 1.0 +922 4 model.scoring_fct_norm 2.0 +922 4 optimizer.lr 0.018405250726246186 +922 4 training.batch_size 0.0 +922 4 training.label_smoothing 0.007534648706304557 +922 5 model.embedding_dim 2.0 +922 5 model.relation_dim 0.0 +922 5 model.scoring_fct_norm 2.0 +922 5 optimizer.lr 0.015940852951158085 +922 5 training.batch_size 1.0 +922 5 training.label_smoothing 0.00236540691654767 +922 6 model.embedding_dim 1.0 +922 6 model.relation_dim 1.0 +922 6 model.scoring_fct_norm 1.0 +922 6 optimizer.lr 0.06559731631681807 +922 6 training.batch_size 0.0 +922 6 training.label_smoothing 0.27772528460745716 +922 7 model.embedding_dim 0.0 +922 7 model.relation_dim 0.0 +922 7 model.scoring_fct_norm 2.0 +922 7 optimizer.lr 0.001760305674025459 +922 7 training.batch_size 0.0 +922 7 training.label_smoothing 0.31451451807182496 +922 8 model.embedding_dim 1.0 +922 8 model.relation_dim 1.0 +922 8 model.scoring_fct_norm 1.0 +922 8 optimizer.lr 0.0019216557134946839 +922 8 training.batch_size 1.0 +922 8 training.label_smoothing 0.002046255637689948 +922 9 model.embedding_dim 1.0 +922 9 model.relation_dim 2.0 +922 9 model.scoring_fct_norm 1.0 +922 9 optimizer.lr 0.002043564680220946 +922 9 training.batch_size 1.0 +922 9 training.label_smoothing 0.0435116428171027 +922 10 model.embedding_dim 2.0 +922 10 model.relation_dim 2.0 +922 10 model.scoring_fct_norm 1.0 +922 10 optimizer.lr 0.03655575348567531 +922 10 training.batch_size 0.0 +922 10 training.label_smoothing 0.1645329688569062 +922 11 model.embedding_dim 1.0 +922 11 model.relation_dim 0.0 +922 11 model.scoring_fct_norm 2.0 +922 11 optimizer.lr 0.05501448466762319 +922 11 training.batch_size 2.0 +922 11 training.label_smoothing 0.004741884272914319 +922 12 model.embedding_dim 2.0 +922 12 model.relation_dim 0.0 +922 12 model.scoring_fct_norm 1.0 +922 12 optimizer.lr 0.0012252537282138466 +922 12 training.batch_size 2.0 +922 12 training.label_smoothing 0.003932637637975666 +922 13 model.embedding_dim 2.0 +922 13 model.relation_dim 1.0 +922 13 model.scoring_fct_norm 1.0 +922 13 optimizer.lr 0.00276303660160663 +922 13 training.batch_size 0.0 +922 13 training.label_smoothing 0.12900854779044746 +922 14 model.embedding_dim 1.0 +922 14 model.relation_dim 0.0 +922 14 model.scoring_fct_norm 1.0 +922 14 optimizer.lr 0.0012436300358370097 +922 14 training.batch_size 2.0 +922 14 training.label_smoothing 0.6191804057952784 +922 15 model.embedding_dim 2.0 +922 15 model.relation_dim 2.0 +922 15 model.scoring_fct_norm 1.0 +922 15 optimizer.lr 0.09660481860430457 +922 15 training.batch_size 0.0 +922 15 training.label_smoothing 0.015309211252570844 +922 16 model.embedding_dim 0.0 +922 16 model.relation_dim 0.0 +922 16 model.scoring_fct_norm 1.0 +922 16 optimizer.lr 0.004030198152341009 +922 16 training.batch_size 1.0 +922 16 training.label_smoothing 0.0018514246336589105 +922 17 model.embedding_dim 2.0 +922 17 model.relation_dim 1.0 +922 17 model.scoring_fct_norm 2.0 +922 17 optimizer.lr 0.025958750879762404 +922 17 training.batch_size 2.0 +922 17 training.label_smoothing 0.009654175629329763 +922 18 model.embedding_dim 2.0 +922 18 model.relation_dim 2.0 +922 18 model.scoring_fct_norm 1.0 +922 18 optimizer.lr 0.04695924966531984 +922 18 training.batch_size 2.0 +922 18 training.label_smoothing 0.6325030094439188 +922 19 model.embedding_dim 0.0 +922 19 model.relation_dim 2.0 +922 19 model.scoring_fct_norm 2.0 +922 19 optimizer.lr 0.0016207622953836069 +922 19 training.batch_size 2.0 +922 19 training.label_smoothing 0.006164645657255596 +922 20 model.embedding_dim 2.0 +922 20 model.relation_dim 2.0 +922 20 model.scoring_fct_norm 1.0 +922 20 optimizer.lr 0.0033959463011448815 +922 20 training.batch_size 0.0 +922 20 training.label_smoothing 0.635642640665468 +922 21 model.embedding_dim 1.0 +922 21 model.relation_dim 0.0 +922 21 model.scoring_fct_norm 1.0 +922 21 optimizer.lr 0.03422601028714159 +922 21 training.batch_size 1.0 +922 21 training.label_smoothing 0.0010864705009860942 +922 22 model.embedding_dim 0.0 +922 22 model.relation_dim 0.0 +922 22 model.scoring_fct_norm 1.0 +922 22 optimizer.lr 0.00147251864620228 +922 22 training.batch_size 1.0 +922 22 training.label_smoothing 0.608761954162768 +922 23 model.embedding_dim 0.0 +922 23 model.relation_dim 0.0 +922 23 model.scoring_fct_norm 1.0 +922 23 optimizer.lr 0.029057484731336233 +922 23 training.batch_size 2.0 +922 23 training.label_smoothing 0.0020124187086564085 +922 24 model.embedding_dim 1.0 +922 24 model.relation_dim 0.0 +922 24 model.scoring_fct_norm 1.0 +922 24 optimizer.lr 0.002893340517260037 +922 24 training.batch_size 1.0 +922 24 training.label_smoothing 0.005787874270247832 +922 25 model.embedding_dim 1.0 +922 25 model.relation_dim 2.0 +922 25 model.scoring_fct_norm 1.0 +922 25 optimizer.lr 0.07155117908617376 +922 25 training.batch_size 0.0 +922 25 training.label_smoothing 0.04693775158279248 +922 26 model.embedding_dim 0.0 +922 26 model.relation_dim 2.0 +922 26 model.scoring_fct_norm 2.0 +922 26 optimizer.lr 0.05181771276181424 +922 26 training.batch_size 0.0 +922 26 training.label_smoothing 0.057419574501216236 +922 27 model.embedding_dim 2.0 +922 27 model.relation_dim 2.0 +922 27 model.scoring_fct_norm 2.0 +922 27 optimizer.lr 0.02913511991446885 +922 27 training.batch_size 0.0 +922 27 training.label_smoothing 0.025745119102683123 +922 28 model.embedding_dim 2.0 +922 28 model.relation_dim 0.0 +922 28 model.scoring_fct_norm 2.0 +922 28 optimizer.lr 0.003919215226344636 +922 28 training.batch_size 0.0 +922 28 training.label_smoothing 0.6389086993188345 +922 29 model.embedding_dim 2.0 +922 29 model.relation_dim 0.0 +922 29 model.scoring_fct_norm 1.0 +922 29 optimizer.lr 0.001826317558331567 +922 29 training.batch_size 2.0 +922 29 training.label_smoothing 0.003983992861689776 +922 30 model.embedding_dim 0.0 +922 30 model.relation_dim 0.0 +922 30 model.scoring_fct_norm 1.0 +922 30 optimizer.lr 0.014317141928115751 +922 30 training.batch_size 2.0 +922 30 training.label_smoothing 0.017411325767943717 +922 31 model.embedding_dim 2.0 +922 31 model.relation_dim 2.0 +922 31 model.scoring_fct_norm 2.0 +922 31 optimizer.lr 0.032472196694979555 +922 31 training.batch_size 1.0 +922 31 training.label_smoothing 0.010503316323402699 +922 32 model.embedding_dim 0.0 +922 32 model.relation_dim 0.0 +922 32 model.scoring_fct_norm 2.0 +922 32 optimizer.lr 0.036548908440212906 +922 32 training.batch_size 0.0 +922 32 training.label_smoothing 0.008714754654598538 +922 33 model.embedding_dim 2.0 +922 33 model.relation_dim 2.0 +922 33 model.scoring_fct_norm 2.0 +922 33 optimizer.lr 0.0034037731331315367 +922 33 training.batch_size 1.0 +922 33 training.label_smoothing 0.014137443734774281 +922 34 model.embedding_dim 1.0 +922 34 model.relation_dim 1.0 +922 34 model.scoring_fct_norm 1.0 +922 34 optimizer.lr 0.006597727383871399 +922 34 training.batch_size 0.0 +922 34 training.label_smoothing 0.0016982459993708783 +922 35 model.embedding_dim 2.0 +922 35 model.relation_dim 1.0 +922 35 model.scoring_fct_norm 2.0 +922 35 optimizer.lr 0.00963531654596022 +922 35 training.batch_size 0.0 +922 35 training.label_smoothing 0.005641290634151256 +922 36 model.embedding_dim 2.0 +922 36 model.relation_dim 2.0 +922 36 model.scoring_fct_norm 2.0 +922 36 optimizer.lr 0.013941209185809003 +922 36 training.batch_size 2.0 +922 36 training.label_smoothing 0.1738130339708829 +922 37 model.embedding_dim 2.0 +922 37 model.relation_dim 1.0 +922 37 model.scoring_fct_norm 2.0 +922 37 optimizer.lr 0.0010117397790568906 +922 37 training.batch_size 0.0 +922 37 training.label_smoothing 0.002708357512950774 +922 38 model.embedding_dim 2.0 +922 38 model.relation_dim 0.0 +922 38 model.scoring_fct_norm 1.0 +922 38 optimizer.lr 0.06758238012444305 +922 38 training.batch_size 1.0 +922 38 training.label_smoothing 0.05317185959605286 +922 39 model.embedding_dim 0.0 +922 39 model.relation_dim 1.0 +922 39 model.scoring_fct_norm 2.0 +922 39 optimizer.lr 0.05944416305804463 +922 39 training.batch_size 1.0 +922 39 training.label_smoothing 0.03558283191322814 +922 40 model.embedding_dim 2.0 +922 40 model.relation_dim 1.0 +922 40 model.scoring_fct_norm 2.0 +922 40 optimizer.lr 0.0030868740282457272 +922 40 training.batch_size 0.0 +922 40 training.label_smoothing 0.8162524332064014 +922 41 model.embedding_dim 2.0 +922 41 model.relation_dim 2.0 +922 41 model.scoring_fct_norm 1.0 +922 41 optimizer.lr 0.002257774983626516 +922 41 training.batch_size 2.0 +922 41 training.label_smoothing 0.2596907165096787 +922 42 model.embedding_dim 0.0 +922 42 model.relation_dim 0.0 +922 42 model.scoring_fct_norm 2.0 +922 42 optimizer.lr 0.03863806407807899 +922 42 training.batch_size 0.0 +922 42 training.label_smoothing 0.002101781666924508 +922 43 model.embedding_dim 0.0 +922 43 model.relation_dim 0.0 +922 43 model.scoring_fct_norm 1.0 +922 43 optimizer.lr 0.002866841165247474 +922 43 training.batch_size 1.0 +922 43 training.label_smoothing 0.026757681020199174 +922 44 model.embedding_dim 1.0 +922 44 model.relation_dim 0.0 +922 44 model.scoring_fct_norm 1.0 +922 44 optimizer.lr 0.005134571779523514 +922 44 training.batch_size 1.0 +922 44 training.label_smoothing 0.5371962364623108 +922 45 model.embedding_dim 1.0 +922 45 model.relation_dim 0.0 +922 45 model.scoring_fct_norm 2.0 +922 45 optimizer.lr 0.00920623302056866 +922 45 training.batch_size 0.0 +922 45 training.label_smoothing 0.06245985680374466 +922 46 model.embedding_dim 2.0 +922 46 model.relation_dim 1.0 +922 46 model.scoring_fct_norm 2.0 +922 46 optimizer.lr 0.013216244689323297 +922 46 training.batch_size 1.0 +922 46 training.label_smoothing 0.43413370567810217 +922 47 model.embedding_dim 2.0 +922 47 model.relation_dim 0.0 +922 47 model.scoring_fct_norm 1.0 +922 47 optimizer.lr 0.005321606055471162 +922 47 training.batch_size 1.0 +922 47 training.label_smoothing 0.0021090461994299203 +922 48 model.embedding_dim 1.0 +922 48 model.relation_dim 0.0 +922 48 model.scoring_fct_norm 2.0 +922 48 optimizer.lr 0.04011822116120423 +922 48 training.batch_size 0.0 +922 48 training.label_smoothing 0.006968140350842245 +922 49 model.embedding_dim 0.0 +922 49 model.relation_dim 0.0 +922 49 model.scoring_fct_norm 2.0 +922 49 optimizer.lr 0.02720733604432716 +922 49 training.batch_size 1.0 +922 49 training.label_smoothing 0.0021428470324815273 +922 50 model.embedding_dim 1.0 +922 50 model.relation_dim 2.0 +922 50 model.scoring_fct_norm 2.0 +922 50 optimizer.lr 0.09791295383713308 +922 50 training.batch_size 2.0 +922 50 training.label_smoothing 0.003715926735238657 +922 51 model.embedding_dim 2.0 +922 51 model.relation_dim 2.0 +922 51 model.scoring_fct_norm 1.0 +922 51 optimizer.lr 0.018156836312031504 +922 51 training.batch_size 0.0 +922 51 training.label_smoothing 0.014857679302026595 +922 52 model.embedding_dim 2.0 +922 52 model.relation_dim 0.0 +922 52 model.scoring_fct_norm 2.0 +922 52 optimizer.lr 0.005856882923378111 +922 52 training.batch_size 0.0 +922 52 training.label_smoothing 0.00998303383334837 +922 53 model.embedding_dim 2.0 +922 53 model.relation_dim 0.0 +922 53 model.scoring_fct_norm 1.0 +922 53 optimizer.lr 0.004036386212152631 +922 53 training.batch_size 1.0 +922 53 training.label_smoothing 0.17671052383237712 +922 54 model.embedding_dim 1.0 +922 54 model.relation_dim 1.0 +922 54 model.scoring_fct_norm 1.0 +922 54 optimizer.lr 0.044639592610793165 +922 54 training.batch_size 1.0 +922 54 training.label_smoothing 0.013760467369496763 +922 55 model.embedding_dim 0.0 +922 55 model.relation_dim 1.0 +922 55 model.scoring_fct_norm 2.0 +922 55 optimizer.lr 0.0069549406922246974 +922 55 training.batch_size 1.0 +922 55 training.label_smoothing 0.13948231591841342 +922 56 model.embedding_dim 1.0 +922 56 model.relation_dim 1.0 +922 56 model.scoring_fct_norm 2.0 +922 56 optimizer.lr 0.009836406222548383 +922 56 training.batch_size 2.0 +922 56 training.label_smoothing 0.7436887811548526 +922 57 model.embedding_dim 0.0 +922 57 model.relation_dim 1.0 +922 57 model.scoring_fct_norm 1.0 +922 57 optimizer.lr 0.002331953379139992 +922 57 training.batch_size 2.0 +922 57 training.label_smoothing 0.03670012858795914 +922 58 model.embedding_dim 2.0 +922 58 model.relation_dim 0.0 +922 58 model.scoring_fct_norm 1.0 +922 58 optimizer.lr 0.001059443505922909 +922 58 training.batch_size 1.0 +922 58 training.label_smoothing 0.01146334696704799 +922 59 model.embedding_dim 0.0 +922 59 model.relation_dim 1.0 +922 59 model.scoring_fct_norm 2.0 +922 59 optimizer.lr 0.0016576302789402524 +922 59 training.batch_size 2.0 +922 59 training.label_smoothing 0.17142038665247308 +922 60 model.embedding_dim 2.0 +922 60 model.relation_dim 2.0 +922 60 model.scoring_fct_norm 2.0 +922 60 optimizer.lr 0.0015140245467687823 +922 60 training.batch_size 1.0 +922 60 training.label_smoothing 0.05658882725803478 +922 61 model.embedding_dim 0.0 +922 61 model.relation_dim 0.0 +922 61 model.scoring_fct_norm 2.0 +922 61 optimizer.lr 0.015916826594428623 +922 61 training.batch_size 0.0 +922 61 training.label_smoothing 0.003415837016653832 +922 62 model.embedding_dim 2.0 +922 62 model.relation_dim 0.0 +922 62 model.scoring_fct_norm 2.0 +922 62 optimizer.lr 0.003958607984146839 +922 62 training.batch_size 1.0 +922 62 training.label_smoothing 0.019841386476364662 +922 63 model.embedding_dim 0.0 +922 63 model.relation_dim 2.0 +922 63 model.scoring_fct_norm 2.0 +922 63 optimizer.lr 0.04171085365553083 +922 63 training.batch_size 1.0 +922 63 training.label_smoothing 0.002041809629357677 +922 64 model.embedding_dim 1.0 +922 64 model.relation_dim 2.0 +922 64 model.scoring_fct_norm 2.0 +922 64 optimizer.lr 0.009355700841636304 +922 64 training.batch_size 2.0 +922 64 training.label_smoothing 0.12382848526112514 +922 65 model.embedding_dim 1.0 +922 65 model.relation_dim 0.0 +922 65 model.scoring_fct_norm 1.0 +922 65 optimizer.lr 0.05378564672968904 +922 65 training.batch_size 2.0 +922 65 training.label_smoothing 0.0015456251131571951 +922 66 model.embedding_dim 1.0 +922 66 model.relation_dim 1.0 +922 66 model.scoring_fct_norm 2.0 +922 66 optimizer.lr 0.0024571505792989486 +922 66 training.batch_size 2.0 +922 66 training.label_smoothing 0.028380403311817737 +922 67 model.embedding_dim 2.0 +922 67 model.relation_dim 0.0 +922 67 model.scoring_fct_norm 2.0 +922 67 optimizer.lr 0.0069799027100060805 +922 67 training.batch_size 0.0 +922 67 training.label_smoothing 0.30974942075727824 +922 68 model.embedding_dim 2.0 +922 68 model.relation_dim 2.0 +922 68 model.scoring_fct_norm 1.0 +922 68 optimizer.lr 0.016777517103905754 +922 68 training.batch_size 2.0 +922 68 training.label_smoothing 0.0029273171371979705 +922 69 model.embedding_dim 0.0 +922 69 model.relation_dim 1.0 +922 69 model.scoring_fct_norm 2.0 +922 69 optimizer.lr 0.014746544809621683 +922 69 training.batch_size 2.0 +922 69 training.label_smoothing 0.044363529117402524 +922 70 model.embedding_dim 2.0 +922 70 model.relation_dim 0.0 +922 70 model.scoring_fct_norm 2.0 +922 70 optimizer.lr 0.003386832552017623 +922 70 training.batch_size 2.0 +922 70 training.label_smoothing 0.002310471362784975 +922 71 model.embedding_dim 0.0 +922 71 model.relation_dim 1.0 +922 71 model.scoring_fct_norm 1.0 +922 71 optimizer.lr 0.00919416278789776 +922 71 training.batch_size 2.0 +922 71 training.label_smoothing 0.12064386420939119 +922 72 model.embedding_dim 1.0 +922 72 model.relation_dim 1.0 +922 72 model.scoring_fct_norm 1.0 +922 72 optimizer.lr 0.05980260069422107 +922 72 training.batch_size 1.0 +922 72 training.label_smoothing 0.0014712645856871855 +922 73 model.embedding_dim 1.0 +922 73 model.relation_dim 2.0 +922 73 model.scoring_fct_norm 2.0 +922 73 optimizer.lr 0.0032257082027432613 +922 73 training.batch_size 2.0 +922 73 training.label_smoothing 0.008231183086964033 +922 74 model.embedding_dim 0.0 +922 74 model.relation_dim 1.0 +922 74 model.scoring_fct_norm 2.0 +922 74 optimizer.lr 0.02367594245756359 +922 74 training.batch_size 0.0 +922 74 training.label_smoothing 0.06290679783602846 +922 75 model.embedding_dim 2.0 +922 75 model.relation_dim 0.0 +922 75 model.scoring_fct_norm 1.0 +922 75 optimizer.lr 0.003437135389828175 +922 75 training.batch_size 2.0 +922 75 training.label_smoothing 0.0039511283729838954 +922 76 model.embedding_dim 0.0 +922 76 model.relation_dim 0.0 +922 76 model.scoring_fct_norm 1.0 +922 76 optimizer.lr 0.06024141459247294 +922 76 training.batch_size 2.0 +922 76 training.label_smoothing 0.0022586777403582683 +922 77 model.embedding_dim 2.0 +922 77 model.relation_dim 2.0 +922 77 model.scoring_fct_norm 1.0 +922 77 optimizer.lr 0.006481992394782836 +922 77 training.batch_size 1.0 +922 77 training.label_smoothing 0.0010629253091661798 +922 78 model.embedding_dim 2.0 +922 78 model.relation_dim 0.0 +922 78 model.scoring_fct_norm 1.0 +922 78 optimizer.lr 0.010273207506910378 +922 78 training.batch_size 0.0 +922 78 training.label_smoothing 0.2843369244073695 +922 79 model.embedding_dim 0.0 +922 79 model.relation_dim 0.0 +922 79 model.scoring_fct_norm 2.0 +922 79 optimizer.lr 0.032839540848706064 +922 79 training.batch_size 0.0 +922 79 training.label_smoothing 0.018024690197201692 +922 80 model.embedding_dim 1.0 +922 80 model.relation_dim 2.0 +922 80 model.scoring_fct_norm 2.0 +922 80 optimizer.lr 0.0029792700539418232 +922 80 training.batch_size 1.0 +922 80 training.label_smoothing 0.1441719421541476 +922 81 model.embedding_dim 1.0 +922 81 model.relation_dim 0.0 +922 81 model.scoring_fct_norm 1.0 +922 81 optimizer.lr 0.005856313892139096 +922 81 training.batch_size 0.0 +922 81 training.label_smoothing 0.057178362520869476 +922 82 model.embedding_dim 0.0 +922 82 model.relation_dim 2.0 +922 82 model.scoring_fct_norm 1.0 +922 82 optimizer.lr 0.002607763811801706 +922 82 training.batch_size 2.0 +922 82 training.label_smoothing 0.01530281460047619 +922 83 model.embedding_dim 1.0 +922 83 model.relation_dim 1.0 +922 83 model.scoring_fct_norm 1.0 +922 83 optimizer.lr 0.051336092209682466 +922 83 training.batch_size 2.0 +922 83 training.label_smoothing 0.005723020969364951 +922 84 model.embedding_dim 0.0 +922 84 model.relation_dim 2.0 +922 84 model.scoring_fct_norm 1.0 +922 84 optimizer.lr 0.02951032890878059 +922 84 training.batch_size 1.0 +922 84 training.label_smoothing 0.24575533172971906 +922 85 model.embedding_dim 2.0 +922 85 model.relation_dim 0.0 +922 85 model.scoring_fct_norm 1.0 +922 85 optimizer.lr 0.016005585081976078 +922 85 training.batch_size 2.0 +922 85 training.label_smoothing 0.07076469110518621 +922 86 model.embedding_dim 2.0 +922 86 model.relation_dim 2.0 +922 86 model.scoring_fct_norm 2.0 +922 86 optimizer.lr 0.0786675621435902 +922 86 training.batch_size 2.0 +922 86 training.label_smoothing 0.055035961915812386 +922 87 model.embedding_dim 1.0 +922 87 model.relation_dim 2.0 +922 87 model.scoring_fct_norm 2.0 +922 87 optimizer.lr 0.007979913491431845 +922 87 training.batch_size 2.0 +922 87 training.label_smoothing 0.02948583374867127 +922 88 model.embedding_dim 0.0 +922 88 model.relation_dim 2.0 +922 88 model.scoring_fct_norm 1.0 +922 88 optimizer.lr 0.003202881617647736 +922 88 training.batch_size 0.0 +922 88 training.label_smoothing 0.0014616853727552018 +922 89 model.embedding_dim 2.0 +922 89 model.relation_dim 2.0 +922 89 model.scoring_fct_norm 2.0 +922 89 optimizer.lr 0.01976545342730042 +922 89 training.batch_size 1.0 +922 89 training.label_smoothing 0.6861087294313828 +922 90 model.embedding_dim 2.0 +922 90 model.relation_dim 0.0 +922 90 model.scoring_fct_norm 1.0 +922 90 optimizer.lr 0.003600527081183095 +922 90 training.batch_size 2.0 +922 90 training.label_smoothing 0.8471962577735721 +922 91 model.embedding_dim 2.0 +922 91 model.relation_dim 2.0 +922 91 model.scoring_fct_norm 1.0 +922 91 optimizer.lr 0.009562605889536913 +922 91 training.batch_size 2.0 +922 91 training.label_smoothing 0.003567498666395529 +922 92 model.embedding_dim 1.0 +922 92 model.relation_dim 2.0 +922 92 model.scoring_fct_norm 1.0 +922 92 optimizer.lr 0.05700758652851159 +922 92 training.batch_size 0.0 +922 92 training.label_smoothing 0.0827468137209497 +922 93 model.embedding_dim 1.0 +922 93 model.relation_dim 2.0 +922 93 model.scoring_fct_norm 2.0 +922 93 optimizer.lr 0.057937689133574055 +922 93 training.batch_size 1.0 +922 93 training.label_smoothing 0.12091346149358467 +922 94 model.embedding_dim 2.0 +922 94 model.relation_dim 0.0 +922 94 model.scoring_fct_norm 2.0 +922 94 optimizer.lr 0.00151861366456139 +922 94 training.batch_size 0.0 +922 94 training.label_smoothing 0.6633547939536327 +922 95 model.embedding_dim 2.0 +922 95 model.relation_dim 2.0 +922 95 model.scoring_fct_norm 1.0 +922 95 optimizer.lr 0.03231146998983258 +922 95 training.batch_size 2.0 +922 95 training.label_smoothing 0.0014637542099283998 +922 96 model.embedding_dim 2.0 +922 96 model.relation_dim 2.0 +922 96 model.scoring_fct_norm 1.0 +922 96 optimizer.lr 0.0038770148824779213 +922 96 training.batch_size 0.0 +922 96 training.label_smoothing 0.00870028576630576 +922 97 model.embedding_dim 1.0 +922 97 model.relation_dim 1.0 +922 97 model.scoring_fct_norm 1.0 +922 97 optimizer.lr 0.004118160998209626 +922 97 training.batch_size 1.0 +922 97 training.label_smoothing 0.19012387453227506 +922 98 model.embedding_dim 2.0 +922 98 model.relation_dim 0.0 +922 98 model.scoring_fct_norm 1.0 +922 98 optimizer.lr 0.002394556397085737 +922 98 training.batch_size 2.0 +922 98 training.label_smoothing 0.009241939876634062 +922 99 model.embedding_dim 0.0 +922 99 model.relation_dim 2.0 +922 99 model.scoring_fct_norm 1.0 +922 99 optimizer.lr 0.007620804131164688 +922 99 training.batch_size 1.0 +922 99 training.label_smoothing 0.0012957143809303776 +922 100 model.embedding_dim 2.0 +922 100 model.relation_dim 2.0 +922 100 model.scoring_fct_norm 1.0 +922 100 optimizer.lr 0.01818617246396253 +922 100 training.batch_size 1.0 +922 100 training.label_smoothing 0.015742059344421896 +922 1 dataset """kinships""" +922 1 model """transr""" +922 1 loss """softplus""" +922 1 regularizer """no""" +922 1 optimizer """adam""" +922 1 training_loop """lcwa""" +922 1 evaluator """rankbased""" +922 2 dataset """kinships""" +922 2 model """transr""" +922 2 loss """softplus""" +922 2 regularizer """no""" +922 2 optimizer """adam""" +922 2 training_loop """lcwa""" +922 2 evaluator """rankbased""" +922 3 dataset """kinships""" +922 3 model """transr""" +922 3 loss """softplus""" +922 3 regularizer """no""" +922 3 optimizer """adam""" +922 3 training_loop """lcwa""" +922 3 evaluator """rankbased""" +922 4 dataset """kinships""" +922 4 model """transr""" +922 4 loss """softplus""" +922 4 regularizer """no""" +922 4 optimizer """adam""" +922 4 training_loop """lcwa""" +922 4 evaluator """rankbased""" +922 5 dataset """kinships""" +922 5 model """transr""" +922 5 loss """softplus""" +922 5 regularizer """no""" +922 5 optimizer """adam""" +922 5 training_loop """lcwa""" +922 5 evaluator """rankbased""" +922 6 dataset """kinships""" +922 6 model """transr""" +922 6 loss """softplus""" +922 6 regularizer """no""" +922 6 optimizer """adam""" +922 6 training_loop """lcwa""" +922 6 evaluator """rankbased""" +922 7 dataset """kinships""" +922 7 model """transr""" +922 7 loss """softplus""" +922 7 regularizer """no""" +922 7 optimizer """adam""" +922 7 training_loop """lcwa""" +922 7 evaluator """rankbased""" +922 8 dataset """kinships""" +922 8 model """transr""" +922 8 loss """softplus""" +922 8 regularizer """no""" +922 8 optimizer """adam""" +922 8 training_loop """lcwa""" +922 8 evaluator """rankbased""" +922 9 dataset """kinships""" +922 9 model """transr""" +922 9 loss """softplus""" +922 9 regularizer """no""" +922 9 optimizer """adam""" +922 9 training_loop """lcwa""" +922 9 evaluator """rankbased""" +922 10 dataset """kinships""" +922 10 model """transr""" +922 10 loss """softplus""" +922 10 regularizer """no""" +922 10 optimizer """adam""" +922 10 training_loop """lcwa""" +922 10 evaluator """rankbased""" +922 11 dataset """kinships""" +922 11 model """transr""" +922 11 loss """softplus""" +922 11 regularizer """no""" +922 11 optimizer """adam""" +922 11 training_loop """lcwa""" +922 11 evaluator """rankbased""" +922 12 dataset """kinships""" +922 12 model """transr""" +922 12 loss """softplus""" +922 12 regularizer """no""" +922 12 optimizer """adam""" +922 12 training_loop """lcwa""" +922 12 evaluator """rankbased""" +922 13 dataset """kinships""" +922 13 model """transr""" +922 13 loss """softplus""" +922 13 regularizer """no""" +922 13 optimizer """adam""" +922 13 training_loop """lcwa""" +922 13 evaluator """rankbased""" +922 14 dataset """kinships""" +922 14 model """transr""" +922 14 loss """softplus""" +922 14 regularizer """no""" +922 14 optimizer """adam""" +922 14 training_loop """lcwa""" +922 14 evaluator """rankbased""" +922 15 dataset """kinships""" +922 15 model """transr""" +922 15 loss """softplus""" +922 15 regularizer """no""" +922 15 optimizer """adam""" +922 15 training_loop """lcwa""" +922 15 evaluator """rankbased""" +922 16 dataset """kinships""" +922 16 model """transr""" +922 16 loss """softplus""" +922 16 regularizer """no""" +922 16 optimizer """adam""" +922 16 training_loop """lcwa""" +922 16 evaluator """rankbased""" +922 17 dataset """kinships""" +922 17 model """transr""" +922 17 loss """softplus""" +922 17 regularizer """no""" +922 17 optimizer """adam""" +922 17 training_loop """lcwa""" +922 17 evaluator """rankbased""" +922 18 dataset """kinships""" +922 18 model """transr""" +922 18 loss """softplus""" +922 18 regularizer """no""" +922 18 optimizer """adam""" +922 18 training_loop """lcwa""" +922 18 evaluator """rankbased""" +922 19 dataset """kinships""" +922 19 model """transr""" +922 19 loss """softplus""" +922 19 regularizer """no""" +922 19 optimizer """adam""" +922 19 training_loop """lcwa""" +922 19 evaluator """rankbased""" +922 20 dataset """kinships""" +922 20 model """transr""" +922 20 loss """softplus""" +922 20 regularizer """no""" +922 20 optimizer """adam""" +922 20 training_loop """lcwa""" +922 20 evaluator """rankbased""" +922 21 dataset """kinships""" +922 21 model """transr""" +922 21 loss """softplus""" +922 21 regularizer """no""" +922 21 optimizer """adam""" +922 21 training_loop """lcwa""" +922 21 evaluator """rankbased""" +922 22 dataset """kinships""" +922 22 model """transr""" +922 22 loss """softplus""" +922 22 regularizer """no""" +922 22 optimizer """adam""" +922 22 training_loop """lcwa""" +922 22 evaluator """rankbased""" +922 23 dataset """kinships""" +922 23 model """transr""" +922 23 loss """softplus""" +922 23 regularizer """no""" +922 23 optimizer """adam""" +922 23 training_loop """lcwa""" +922 23 evaluator """rankbased""" +922 24 dataset """kinships""" +922 24 model """transr""" +922 24 loss """softplus""" +922 24 regularizer """no""" +922 24 optimizer """adam""" +922 24 training_loop """lcwa""" +922 24 evaluator """rankbased""" +922 25 dataset """kinships""" +922 25 model """transr""" +922 25 loss """softplus""" +922 25 regularizer """no""" +922 25 optimizer """adam""" +922 25 training_loop """lcwa""" +922 25 evaluator """rankbased""" +922 26 dataset """kinships""" +922 26 model """transr""" +922 26 loss """softplus""" +922 26 regularizer """no""" +922 26 optimizer """adam""" +922 26 training_loop """lcwa""" +922 26 evaluator """rankbased""" +922 27 dataset """kinships""" +922 27 model """transr""" +922 27 loss """softplus""" +922 27 regularizer """no""" +922 27 optimizer """adam""" +922 27 training_loop """lcwa""" +922 27 evaluator """rankbased""" +922 28 dataset """kinships""" +922 28 model """transr""" +922 28 loss """softplus""" +922 28 regularizer """no""" +922 28 optimizer """adam""" +922 28 training_loop """lcwa""" +922 28 evaluator """rankbased""" +922 29 dataset """kinships""" +922 29 model """transr""" +922 29 loss """softplus""" +922 29 regularizer """no""" +922 29 optimizer """adam""" +922 29 training_loop """lcwa""" +922 29 evaluator """rankbased""" +922 30 dataset """kinships""" +922 30 model """transr""" +922 30 loss """softplus""" +922 30 regularizer """no""" +922 30 optimizer """adam""" +922 30 training_loop """lcwa""" +922 30 evaluator """rankbased""" +922 31 dataset """kinships""" +922 31 model """transr""" +922 31 loss """softplus""" +922 31 regularizer """no""" +922 31 optimizer """adam""" +922 31 training_loop """lcwa""" +922 31 evaluator """rankbased""" +922 32 dataset """kinships""" +922 32 model """transr""" +922 32 loss """softplus""" +922 32 regularizer """no""" +922 32 optimizer """adam""" +922 32 training_loop """lcwa""" +922 32 evaluator """rankbased""" +922 33 dataset """kinships""" +922 33 model """transr""" +922 33 loss """softplus""" +922 33 regularizer """no""" +922 33 optimizer """adam""" +922 33 training_loop """lcwa""" +922 33 evaluator """rankbased""" +922 34 dataset """kinships""" +922 34 model """transr""" +922 34 loss """softplus""" +922 34 regularizer """no""" +922 34 optimizer """adam""" +922 34 training_loop """lcwa""" +922 34 evaluator """rankbased""" +922 35 dataset """kinships""" +922 35 model """transr""" +922 35 loss """softplus""" +922 35 regularizer """no""" +922 35 optimizer """adam""" +922 35 training_loop """lcwa""" +922 35 evaluator """rankbased""" +922 36 dataset """kinships""" +922 36 model """transr""" +922 36 loss """softplus""" +922 36 regularizer """no""" +922 36 optimizer """adam""" +922 36 training_loop """lcwa""" +922 36 evaluator """rankbased""" +922 37 dataset """kinships""" +922 37 model """transr""" +922 37 loss """softplus""" +922 37 regularizer """no""" +922 37 optimizer """adam""" +922 37 training_loop """lcwa""" +922 37 evaluator """rankbased""" +922 38 dataset """kinships""" +922 38 model """transr""" +922 38 loss """softplus""" +922 38 regularizer """no""" +922 38 optimizer """adam""" +922 38 training_loop """lcwa""" +922 38 evaluator """rankbased""" +922 39 dataset """kinships""" +922 39 model """transr""" +922 39 loss """softplus""" +922 39 regularizer """no""" +922 39 optimizer """adam""" +922 39 training_loop """lcwa""" +922 39 evaluator """rankbased""" +922 40 dataset """kinships""" +922 40 model """transr""" +922 40 loss """softplus""" +922 40 regularizer """no""" +922 40 optimizer """adam""" +922 40 training_loop """lcwa""" +922 40 evaluator """rankbased""" +922 41 dataset """kinships""" +922 41 model """transr""" +922 41 loss """softplus""" +922 41 regularizer """no""" +922 41 optimizer """adam""" +922 41 training_loop """lcwa""" +922 41 evaluator """rankbased""" +922 42 dataset """kinships""" +922 42 model """transr""" +922 42 loss """softplus""" +922 42 regularizer """no""" +922 42 optimizer """adam""" +922 42 training_loop """lcwa""" +922 42 evaluator """rankbased""" +922 43 dataset """kinships""" +922 43 model """transr""" +922 43 loss """softplus""" +922 43 regularizer """no""" +922 43 optimizer """adam""" +922 43 training_loop """lcwa""" +922 43 evaluator """rankbased""" +922 44 dataset """kinships""" +922 44 model """transr""" +922 44 loss """softplus""" +922 44 regularizer """no""" +922 44 optimizer """adam""" +922 44 training_loop """lcwa""" +922 44 evaluator """rankbased""" +922 45 dataset """kinships""" +922 45 model """transr""" +922 45 loss """softplus""" +922 45 regularizer """no""" +922 45 optimizer """adam""" +922 45 training_loop """lcwa""" +922 45 evaluator """rankbased""" +922 46 dataset """kinships""" +922 46 model """transr""" +922 46 loss """softplus""" +922 46 regularizer """no""" +922 46 optimizer """adam""" +922 46 training_loop """lcwa""" +922 46 evaluator """rankbased""" +922 47 dataset """kinships""" +922 47 model """transr""" +922 47 loss """softplus""" +922 47 regularizer """no""" +922 47 optimizer """adam""" +922 47 training_loop """lcwa""" +922 47 evaluator """rankbased""" +922 48 dataset """kinships""" +922 48 model """transr""" +922 48 loss """softplus""" +922 48 regularizer """no""" +922 48 optimizer """adam""" +922 48 training_loop """lcwa""" +922 48 evaluator """rankbased""" +922 49 dataset """kinships""" +922 49 model """transr""" +922 49 loss """softplus""" +922 49 regularizer """no""" +922 49 optimizer """adam""" +922 49 training_loop """lcwa""" +922 49 evaluator """rankbased""" +922 50 dataset """kinships""" +922 50 model """transr""" +922 50 loss """softplus""" +922 50 regularizer """no""" +922 50 optimizer """adam""" +922 50 training_loop """lcwa""" +922 50 evaluator """rankbased""" +922 51 dataset """kinships""" +922 51 model """transr""" +922 51 loss """softplus""" +922 51 regularizer """no""" +922 51 optimizer """adam""" +922 51 training_loop """lcwa""" +922 51 evaluator """rankbased""" +922 52 dataset """kinships""" +922 52 model """transr""" +922 52 loss """softplus""" +922 52 regularizer """no""" +922 52 optimizer """adam""" +922 52 training_loop """lcwa""" +922 52 evaluator """rankbased""" +922 53 dataset """kinships""" +922 53 model """transr""" +922 53 loss """softplus""" +922 53 regularizer """no""" +922 53 optimizer """adam""" +922 53 training_loop """lcwa""" +922 53 evaluator """rankbased""" +922 54 dataset """kinships""" +922 54 model """transr""" +922 54 loss """softplus""" +922 54 regularizer """no""" +922 54 optimizer """adam""" +922 54 training_loop """lcwa""" +922 54 evaluator """rankbased""" +922 55 dataset """kinships""" +922 55 model """transr""" +922 55 loss """softplus""" +922 55 regularizer """no""" +922 55 optimizer """adam""" +922 55 training_loop """lcwa""" +922 55 evaluator """rankbased""" +922 56 dataset """kinships""" +922 56 model """transr""" +922 56 loss """softplus""" +922 56 regularizer """no""" +922 56 optimizer """adam""" +922 56 training_loop """lcwa""" +922 56 evaluator """rankbased""" +922 57 dataset """kinships""" +922 57 model """transr""" +922 57 loss """softplus""" +922 57 regularizer """no""" +922 57 optimizer """adam""" +922 57 training_loop """lcwa""" +922 57 evaluator """rankbased""" +922 58 dataset """kinships""" +922 58 model """transr""" +922 58 loss """softplus""" +922 58 regularizer """no""" +922 58 optimizer """adam""" +922 58 training_loop """lcwa""" +922 58 evaluator """rankbased""" +922 59 dataset """kinships""" +922 59 model """transr""" +922 59 loss """softplus""" +922 59 regularizer """no""" +922 59 optimizer """adam""" +922 59 training_loop """lcwa""" +922 59 evaluator """rankbased""" +922 60 dataset """kinships""" +922 60 model """transr""" +922 60 loss """softplus""" +922 60 regularizer """no""" +922 60 optimizer """adam""" +922 60 training_loop """lcwa""" +922 60 evaluator """rankbased""" +922 61 dataset """kinships""" +922 61 model """transr""" +922 61 loss """softplus""" +922 61 regularizer """no""" +922 61 optimizer """adam""" +922 61 training_loop """lcwa""" +922 61 evaluator """rankbased""" +922 62 dataset """kinships""" +922 62 model """transr""" +922 62 loss """softplus""" +922 62 regularizer """no""" +922 62 optimizer """adam""" +922 62 training_loop """lcwa""" +922 62 evaluator """rankbased""" +922 63 dataset """kinships""" +922 63 model """transr""" +922 63 loss """softplus""" +922 63 regularizer """no""" +922 63 optimizer """adam""" +922 63 training_loop """lcwa""" +922 63 evaluator """rankbased""" +922 64 dataset """kinships""" +922 64 model """transr""" +922 64 loss """softplus""" +922 64 regularizer """no""" +922 64 optimizer """adam""" +922 64 training_loop """lcwa""" +922 64 evaluator """rankbased""" +922 65 dataset """kinships""" +922 65 model """transr""" +922 65 loss """softplus""" +922 65 regularizer """no""" +922 65 optimizer """adam""" +922 65 training_loop """lcwa""" +922 65 evaluator """rankbased""" +922 66 dataset """kinships""" +922 66 model """transr""" +922 66 loss """softplus""" +922 66 regularizer """no""" +922 66 optimizer """adam""" +922 66 training_loop """lcwa""" +922 66 evaluator """rankbased""" +922 67 dataset """kinships""" +922 67 model """transr""" +922 67 loss """softplus""" +922 67 regularizer """no""" +922 67 optimizer """adam""" +922 67 training_loop """lcwa""" +922 67 evaluator """rankbased""" +922 68 dataset """kinships""" +922 68 model """transr""" +922 68 loss """softplus""" +922 68 regularizer """no""" +922 68 optimizer """adam""" +922 68 training_loop """lcwa""" +922 68 evaluator """rankbased""" +922 69 dataset """kinships""" +922 69 model """transr""" +922 69 loss """softplus""" +922 69 regularizer """no""" +922 69 optimizer """adam""" +922 69 training_loop """lcwa""" +922 69 evaluator """rankbased""" +922 70 dataset """kinships""" +922 70 model """transr""" +922 70 loss """softplus""" +922 70 regularizer """no""" +922 70 optimizer """adam""" +922 70 training_loop """lcwa""" +922 70 evaluator """rankbased""" +922 71 dataset """kinships""" +922 71 model """transr""" +922 71 loss """softplus""" +922 71 regularizer """no""" +922 71 optimizer """adam""" +922 71 training_loop """lcwa""" +922 71 evaluator """rankbased""" +922 72 dataset """kinships""" +922 72 model """transr""" +922 72 loss """softplus""" +922 72 regularizer """no""" +922 72 optimizer """adam""" +922 72 training_loop """lcwa""" +922 72 evaluator """rankbased""" +922 73 dataset """kinships""" +922 73 model """transr""" +922 73 loss """softplus""" +922 73 regularizer """no""" +922 73 optimizer """adam""" +922 73 training_loop """lcwa""" +922 73 evaluator """rankbased""" +922 74 dataset """kinships""" +922 74 model """transr""" +922 74 loss """softplus""" +922 74 regularizer """no""" +922 74 optimizer """adam""" +922 74 training_loop """lcwa""" +922 74 evaluator """rankbased""" +922 75 dataset """kinships""" +922 75 model """transr""" +922 75 loss """softplus""" +922 75 regularizer """no""" +922 75 optimizer """adam""" +922 75 training_loop """lcwa""" +922 75 evaluator """rankbased""" +922 76 dataset """kinships""" +922 76 model """transr""" +922 76 loss """softplus""" +922 76 regularizer """no""" +922 76 optimizer """adam""" +922 76 training_loop """lcwa""" +922 76 evaluator """rankbased""" +922 77 dataset """kinships""" +922 77 model """transr""" +922 77 loss """softplus""" +922 77 regularizer """no""" +922 77 optimizer """adam""" +922 77 training_loop """lcwa""" +922 77 evaluator """rankbased""" +922 78 dataset """kinships""" +922 78 model """transr""" +922 78 loss """softplus""" +922 78 regularizer """no""" +922 78 optimizer """adam""" +922 78 training_loop """lcwa""" +922 78 evaluator """rankbased""" +922 79 dataset """kinships""" +922 79 model """transr""" +922 79 loss """softplus""" +922 79 regularizer """no""" +922 79 optimizer """adam""" +922 79 training_loop """lcwa""" +922 79 evaluator """rankbased""" +922 80 dataset """kinships""" +922 80 model """transr""" +922 80 loss """softplus""" +922 80 regularizer """no""" +922 80 optimizer """adam""" +922 80 training_loop """lcwa""" +922 80 evaluator """rankbased""" +922 81 dataset """kinships""" +922 81 model """transr""" +922 81 loss """softplus""" +922 81 regularizer """no""" +922 81 optimizer """adam""" +922 81 training_loop """lcwa""" +922 81 evaluator """rankbased""" +922 82 dataset """kinships""" +922 82 model """transr""" +922 82 loss """softplus""" +922 82 regularizer """no""" +922 82 optimizer """adam""" +922 82 training_loop """lcwa""" +922 82 evaluator """rankbased""" +922 83 dataset """kinships""" +922 83 model """transr""" +922 83 loss """softplus""" +922 83 regularizer """no""" +922 83 optimizer """adam""" +922 83 training_loop """lcwa""" +922 83 evaluator """rankbased""" +922 84 dataset """kinships""" +922 84 model """transr""" +922 84 loss """softplus""" +922 84 regularizer """no""" +922 84 optimizer """adam""" +922 84 training_loop """lcwa""" +922 84 evaluator """rankbased""" +922 85 dataset """kinships""" +922 85 model """transr""" +922 85 loss """softplus""" +922 85 regularizer """no""" +922 85 optimizer """adam""" +922 85 training_loop """lcwa""" +922 85 evaluator """rankbased""" +922 86 dataset """kinships""" +922 86 model """transr""" +922 86 loss """softplus""" +922 86 regularizer """no""" +922 86 optimizer """adam""" +922 86 training_loop """lcwa""" +922 86 evaluator """rankbased""" +922 87 dataset """kinships""" +922 87 model """transr""" +922 87 loss """softplus""" +922 87 regularizer """no""" +922 87 optimizer """adam""" +922 87 training_loop """lcwa""" +922 87 evaluator """rankbased""" +922 88 dataset """kinships""" +922 88 model """transr""" +922 88 loss """softplus""" +922 88 regularizer """no""" +922 88 optimizer """adam""" +922 88 training_loop """lcwa""" +922 88 evaluator """rankbased""" +922 89 dataset """kinships""" +922 89 model """transr""" +922 89 loss """softplus""" +922 89 regularizer """no""" +922 89 optimizer """adam""" +922 89 training_loop """lcwa""" +922 89 evaluator """rankbased""" +922 90 dataset """kinships""" +922 90 model """transr""" +922 90 loss """softplus""" +922 90 regularizer """no""" +922 90 optimizer """adam""" +922 90 training_loop """lcwa""" +922 90 evaluator """rankbased""" +922 91 dataset """kinships""" +922 91 model """transr""" +922 91 loss """softplus""" +922 91 regularizer """no""" +922 91 optimizer """adam""" +922 91 training_loop """lcwa""" +922 91 evaluator """rankbased""" +922 92 dataset """kinships""" +922 92 model """transr""" +922 92 loss """softplus""" +922 92 regularizer """no""" +922 92 optimizer """adam""" +922 92 training_loop """lcwa""" +922 92 evaluator """rankbased""" +922 93 dataset """kinships""" +922 93 model """transr""" +922 93 loss """softplus""" +922 93 regularizer """no""" +922 93 optimizer """adam""" +922 93 training_loop """lcwa""" +922 93 evaluator """rankbased""" +922 94 dataset """kinships""" +922 94 model """transr""" +922 94 loss """softplus""" +922 94 regularizer """no""" +922 94 optimizer """adam""" +922 94 training_loop """lcwa""" +922 94 evaluator """rankbased""" +922 95 dataset """kinships""" +922 95 model """transr""" +922 95 loss """softplus""" +922 95 regularizer """no""" +922 95 optimizer """adam""" +922 95 training_loop """lcwa""" +922 95 evaluator """rankbased""" +922 96 dataset """kinships""" +922 96 model """transr""" +922 96 loss """softplus""" +922 96 regularizer """no""" +922 96 optimizer """adam""" +922 96 training_loop """lcwa""" +922 96 evaluator """rankbased""" +922 97 dataset """kinships""" +922 97 model """transr""" +922 97 loss """softplus""" +922 97 regularizer """no""" +922 97 optimizer """adam""" +922 97 training_loop """lcwa""" +922 97 evaluator """rankbased""" +922 98 dataset """kinships""" +922 98 model """transr""" +922 98 loss """softplus""" +922 98 regularizer """no""" +922 98 optimizer """adam""" +922 98 training_loop """lcwa""" +922 98 evaluator """rankbased""" +922 99 dataset """kinships""" +922 99 model """transr""" +922 99 loss """softplus""" +922 99 regularizer """no""" +922 99 optimizer """adam""" +922 99 training_loop """lcwa""" +922 99 evaluator """rankbased""" +922 100 dataset """kinships""" +922 100 model """transr""" +922 100 loss """softplus""" +922 100 regularizer """no""" +922 100 optimizer """adam""" +922 100 training_loop """lcwa""" +922 100 evaluator """rankbased""" +923 1 model.embedding_dim 2.0 +923 1 model.relation_dim 1.0 +923 1 model.scoring_fct_norm 2.0 +923 1 optimizer.lr 0.027521864026783693 +923 1 training.batch_size 2.0 +923 1 training.label_smoothing 0.0027803264420080817 +923 2 model.embedding_dim 0.0 +923 2 model.relation_dim 1.0 +923 2 model.scoring_fct_norm 2.0 +923 2 optimizer.lr 0.011175300396409972 +923 2 training.batch_size 1.0 +923 2 training.label_smoothing 0.6638450414518213 +923 3 model.embedding_dim 1.0 +923 3 model.relation_dim 2.0 +923 3 model.scoring_fct_norm 1.0 +923 3 optimizer.lr 0.009827384097494129 +923 3 training.batch_size 1.0 +923 3 training.label_smoothing 0.011511685702717921 +923 4 model.embedding_dim 2.0 +923 4 model.relation_dim 1.0 +923 4 model.scoring_fct_norm 1.0 +923 4 optimizer.lr 0.02973819641896579 +923 4 training.batch_size 0.0 +923 4 training.label_smoothing 0.0197860135426221 +923 5 model.embedding_dim 2.0 +923 5 model.relation_dim 0.0 +923 5 model.scoring_fct_norm 2.0 +923 5 optimizer.lr 0.003650771394868724 +923 5 training.batch_size 1.0 +923 5 training.label_smoothing 0.003099850060891519 +923 6 model.embedding_dim 0.0 +923 6 model.relation_dim 0.0 +923 6 model.scoring_fct_norm 1.0 +923 6 optimizer.lr 0.004345244890443094 +923 6 training.batch_size 0.0 +923 6 training.label_smoothing 0.13249831626302325 +923 7 model.embedding_dim 0.0 +923 7 model.relation_dim 1.0 +923 7 model.scoring_fct_norm 1.0 +923 7 optimizer.lr 0.007721322178555406 +923 7 training.batch_size 0.0 +923 7 training.label_smoothing 0.00601309076819029 +923 8 model.embedding_dim 1.0 +923 8 model.relation_dim 0.0 +923 8 model.scoring_fct_norm 1.0 +923 8 optimizer.lr 0.007541187245727105 +923 8 training.batch_size 1.0 +923 8 training.label_smoothing 0.4889499098317604 +923 9 model.embedding_dim 2.0 +923 9 model.relation_dim 1.0 +923 9 model.scoring_fct_norm 1.0 +923 9 optimizer.lr 0.004653800730098475 +923 9 training.batch_size 0.0 +923 9 training.label_smoothing 0.0011405041486450443 +923 10 model.embedding_dim 1.0 +923 10 model.relation_dim 0.0 +923 10 model.scoring_fct_norm 1.0 +923 10 optimizer.lr 0.01798512092083499 +923 10 training.batch_size 2.0 +923 10 training.label_smoothing 0.9770336376130906 +923 11 model.embedding_dim 1.0 +923 11 model.relation_dim 0.0 +923 11 model.scoring_fct_norm 1.0 +923 11 optimizer.lr 0.006810173521358367 +923 11 training.batch_size 2.0 +923 11 training.label_smoothing 0.44766843127630246 +923 12 model.embedding_dim 0.0 +923 12 model.relation_dim 0.0 +923 12 model.scoring_fct_norm 1.0 +923 12 optimizer.lr 0.005540711532894397 +923 12 training.batch_size 1.0 +923 12 training.label_smoothing 0.843484016445356 +923 13 model.embedding_dim 0.0 +923 13 model.relation_dim 1.0 +923 13 model.scoring_fct_norm 1.0 +923 13 optimizer.lr 0.021395694920457647 +923 13 training.batch_size 0.0 +923 13 training.label_smoothing 0.006972255150666609 +923 14 model.embedding_dim 1.0 +923 14 model.relation_dim 1.0 +923 14 model.scoring_fct_norm 1.0 +923 14 optimizer.lr 0.011855128038718995 +923 14 training.batch_size 1.0 +923 14 training.label_smoothing 0.12267638938771235 +923 15 model.embedding_dim 2.0 +923 15 model.relation_dim 0.0 +923 15 model.scoring_fct_norm 1.0 +923 15 optimizer.lr 0.0073143360548770155 +923 15 training.batch_size 0.0 +923 15 training.label_smoothing 0.05023975452624919 +923 16 model.embedding_dim 1.0 +923 16 model.relation_dim 0.0 +923 16 model.scoring_fct_norm 1.0 +923 16 optimizer.lr 0.011208821587213605 +923 16 training.batch_size 1.0 +923 16 training.label_smoothing 0.6691749972469113 +923 17 model.embedding_dim 0.0 +923 17 model.relation_dim 0.0 +923 17 model.scoring_fct_norm 2.0 +923 17 optimizer.lr 0.013206982245924844 +923 17 training.batch_size 2.0 +923 17 training.label_smoothing 0.05106926934130208 +923 18 model.embedding_dim 2.0 +923 18 model.relation_dim 0.0 +923 18 model.scoring_fct_norm 2.0 +923 18 optimizer.lr 0.025636387494758318 +923 18 training.batch_size 0.0 +923 18 training.label_smoothing 0.0014968062195664347 +923 19 model.embedding_dim 0.0 +923 19 model.relation_dim 2.0 +923 19 model.scoring_fct_norm 1.0 +923 19 optimizer.lr 0.0026127309347530043 +923 19 training.batch_size 2.0 +923 19 training.label_smoothing 0.008110508802315905 +923 20 model.embedding_dim 0.0 +923 20 model.relation_dim 2.0 +923 20 model.scoring_fct_norm 1.0 +923 20 optimizer.lr 0.004728987463663759 +923 20 training.batch_size 0.0 +923 20 training.label_smoothing 0.008909915067262348 +923 21 model.embedding_dim 1.0 +923 21 model.relation_dim 1.0 +923 21 model.scoring_fct_norm 2.0 +923 21 optimizer.lr 0.02139379405287472 +923 21 training.batch_size 0.0 +923 21 training.label_smoothing 0.013290324889751922 +923 22 model.embedding_dim 1.0 +923 22 model.relation_dim 2.0 +923 22 model.scoring_fct_norm 1.0 +923 22 optimizer.lr 0.06284240764380133 +923 22 training.batch_size 2.0 +923 22 training.label_smoothing 0.0010630580376663195 +923 23 model.embedding_dim 0.0 +923 23 model.relation_dim 1.0 +923 23 model.scoring_fct_norm 2.0 +923 23 optimizer.lr 0.023389934573019477 +923 23 training.batch_size 0.0 +923 23 training.label_smoothing 0.09763392277654896 +923 24 model.embedding_dim 1.0 +923 24 model.relation_dim 2.0 +923 24 model.scoring_fct_norm 2.0 +923 24 optimizer.lr 0.0010802714236455605 +923 24 training.batch_size 1.0 +923 24 training.label_smoothing 0.0013410101870887981 +923 25 model.embedding_dim 1.0 +923 25 model.relation_dim 1.0 +923 25 model.scoring_fct_norm 1.0 +923 25 optimizer.lr 0.011092979271497556 +923 25 training.batch_size 0.0 +923 25 training.label_smoothing 0.04660806337544859 +923 26 model.embedding_dim 1.0 +923 26 model.relation_dim 1.0 +923 26 model.scoring_fct_norm 2.0 +923 26 optimizer.lr 0.0982764273109168 +923 26 training.batch_size 2.0 +923 26 training.label_smoothing 0.002244137838004329 +923 27 model.embedding_dim 2.0 +923 27 model.relation_dim 1.0 +923 27 model.scoring_fct_norm 1.0 +923 27 optimizer.lr 0.002242214658774909 +923 27 training.batch_size 2.0 +923 27 training.label_smoothing 0.009742992581648202 +923 28 model.embedding_dim 1.0 +923 28 model.relation_dim 1.0 +923 28 model.scoring_fct_norm 2.0 +923 28 optimizer.lr 0.0011033358337401312 +923 28 training.batch_size 0.0 +923 28 training.label_smoothing 0.0037291660607598315 +923 29 model.embedding_dim 0.0 +923 29 model.relation_dim 0.0 +923 29 model.scoring_fct_norm 2.0 +923 29 optimizer.lr 0.011416984333164907 +923 29 training.batch_size 1.0 +923 29 training.label_smoothing 0.0010796851155538787 +923 30 model.embedding_dim 2.0 +923 30 model.relation_dim 0.0 +923 30 model.scoring_fct_norm 1.0 +923 30 optimizer.lr 0.005097020778354691 +923 30 training.batch_size 0.0 +923 30 training.label_smoothing 0.1029652735319709 +923 31 model.embedding_dim 2.0 +923 31 model.relation_dim 2.0 +923 31 model.scoring_fct_norm 1.0 +923 31 optimizer.lr 0.008495295064173599 +923 31 training.batch_size 1.0 +923 31 training.label_smoothing 0.005531963622759722 +923 32 model.embedding_dim 0.0 +923 32 model.relation_dim 2.0 +923 32 model.scoring_fct_norm 1.0 +923 32 optimizer.lr 0.004614797348714424 +923 32 training.batch_size 1.0 +923 32 training.label_smoothing 0.5378922910078109 +923 33 model.embedding_dim 1.0 +923 33 model.relation_dim 1.0 +923 33 model.scoring_fct_norm 2.0 +923 33 optimizer.lr 0.005923272412067823 +923 33 training.batch_size 1.0 +923 33 training.label_smoothing 0.022648579067922784 +923 34 model.embedding_dim 0.0 +923 34 model.relation_dim 1.0 +923 34 model.scoring_fct_norm 1.0 +923 34 optimizer.lr 0.0017788045366317852 +923 34 training.batch_size 1.0 +923 34 training.label_smoothing 0.0075720494203558334 +923 35 model.embedding_dim 1.0 +923 35 model.relation_dim 1.0 +923 35 model.scoring_fct_norm 1.0 +923 35 optimizer.lr 0.003326803306299051 +923 35 training.batch_size 0.0 +923 35 training.label_smoothing 0.13328497819508592 +923 36 model.embedding_dim 0.0 +923 36 model.relation_dim 0.0 +923 36 model.scoring_fct_norm 1.0 +923 36 optimizer.lr 0.06552047371457906 +923 36 training.batch_size 1.0 +923 36 training.label_smoothing 0.12590125780837935 +923 37 model.embedding_dim 1.0 +923 37 model.relation_dim 1.0 +923 37 model.scoring_fct_norm 1.0 +923 37 optimizer.lr 0.009505310464947365 +923 37 training.batch_size 0.0 +923 37 training.label_smoothing 0.5930724788567714 +923 38 model.embedding_dim 1.0 +923 38 model.relation_dim 0.0 +923 38 model.scoring_fct_norm 2.0 +923 38 optimizer.lr 0.006392091189338538 +923 38 training.batch_size 1.0 +923 38 training.label_smoothing 0.07269929069328024 +923 39 model.embedding_dim 0.0 +923 39 model.relation_dim 0.0 +923 39 model.scoring_fct_norm 2.0 +923 39 optimizer.lr 0.0010938717018818675 +923 39 training.batch_size 1.0 +923 39 training.label_smoothing 0.6051434994861636 +923 40 model.embedding_dim 0.0 +923 40 model.relation_dim 0.0 +923 40 model.scoring_fct_norm 2.0 +923 40 optimizer.lr 0.009297959583146066 +923 40 training.batch_size 2.0 +923 40 training.label_smoothing 0.0014289782726104587 +923 41 model.embedding_dim 0.0 +923 41 model.relation_dim 2.0 +923 41 model.scoring_fct_norm 2.0 +923 41 optimizer.lr 0.007586560482837432 +923 41 training.batch_size 2.0 +923 41 training.label_smoothing 0.01867079783840055 +923 42 model.embedding_dim 0.0 +923 42 model.relation_dim 1.0 +923 42 model.scoring_fct_norm 2.0 +923 42 optimizer.lr 0.0018403343148641269 +923 42 training.batch_size 1.0 +923 42 training.label_smoothing 0.014506812669004032 +923 43 model.embedding_dim 0.0 +923 43 model.relation_dim 0.0 +923 43 model.scoring_fct_norm 1.0 +923 43 optimizer.lr 0.043117294607701646 +923 43 training.batch_size 2.0 +923 43 training.label_smoothing 0.001502073200944103 +923 44 model.embedding_dim 1.0 +923 44 model.relation_dim 2.0 +923 44 model.scoring_fct_norm 2.0 +923 44 optimizer.lr 0.0016986665701971219 +923 44 training.batch_size 2.0 +923 44 training.label_smoothing 0.07059271218717846 +923 45 model.embedding_dim 1.0 +923 45 model.relation_dim 0.0 +923 45 model.scoring_fct_norm 1.0 +923 45 optimizer.lr 0.0010638594463006727 +923 45 training.batch_size 1.0 +923 45 training.label_smoothing 0.009642457448444488 +923 46 model.embedding_dim 0.0 +923 46 model.relation_dim 0.0 +923 46 model.scoring_fct_norm 1.0 +923 46 optimizer.lr 0.01287059195778149 +923 46 training.batch_size 0.0 +923 46 training.label_smoothing 0.003553580257172379 +923 47 model.embedding_dim 2.0 +923 47 model.relation_dim 1.0 +923 47 model.scoring_fct_norm 2.0 +923 47 optimizer.lr 0.03457336874544939 +923 47 training.batch_size 2.0 +923 47 training.label_smoothing 0.05611433968064247 +923 48 model.embedding_dim 0.0 +923 48 model.relation_dim 0.0 +923 48 model.scoring_fct_norm 1.0 +923 48 optimizer.lr 0.016851831778906604 +923 48 training.batch_size 2.0 +923 48 training.label_smoothing 0.6924937101285946 +923 49 model.embedding_dim 1.0 +923 49 model.relation_dim 1.0 +923 49 model.scoring_fct_norm 1.0 +923 49 optimizer.lr 0.01749727297590976 +923 49 training.batch_size 0.0 +923 49 training.label_smoothing 0.006725238958741786 +923 50 model.embedding_dim 2.0 +923 50 model.relation_dim 2.0 +923 50 model.scoring_fct_norm 2.0 +923 50 optimizer.lr 0.0012493322340120018 +923 50 training.batch_size 2.0 +923 50 training.label_smoothing 0.005222606717581591 +923 51 model.embedding_dim 0.0 +923 51 model.relation_dim 1.0 +923 51 model.scoring_fct_norm 1.0 +923 51 optimizer.lr 0.00283388940810584 +923 51 training.batch_size 2.0 +923 51 training.label_smoothing 0.01901659970052117 +923 52 model.embedding_dim 1.0 +923 52 model.relation_dim 0.0 +923 52 model.scoring_fct_norm 1.0 +923 52 optimizer.lr 0.017082933998588186 +923 52 training.batch_size 2.0 +923 52 training.label_smoothing 0.22410917406305034 +923 53 model.embedding_dim 1.0 +923 53 model.relation_dim 2.0 +923 53 model.scoring_fct_norm 1.0 +923 53 optimizer.lr 0.0014096179027594615 +923 53 training.batch_size 1.0 +923 53 training.label_smoothing 0.013476563596478506 +923 54 model.embedding_dim 2.0 +923 54 model.relation_dim 1.0 +923 54 model.scoring_fct_norm 2.0 +923 54 optimizer.lr 0.060500904785857026 +923 54 training.batch_size 0.0 +923 54 training.label_smoothing 0.10373660167383106 +923 55 model.embedding_dim 2.0 +923 55 model.relation_dim 0.0 +923 55 model.scoring_fct_norm 1.0 +923 55 optimizer.lr 0.001712563383930855 +923 55 training.batch_size 1.0 +923 55 training.label_smoothing 0.9407165945411767 +923 56 model.embedding_dim 1.0 +923 56 model.relation_dim 2.0 +923 56 model.scoring_fct_norm 1.0 +923 56 optimizer.lr 0.003604018212668081 +923 56 training.batch_size 2.0 +923 56 training.label_smoothing 0.0010244686395422338 +923 57 model.embedding_dim 2.0 +923 57 model.relation_dim 2.0 +923 57 model.scoring_fct_norm 1.0 +923 57 optimizer.lr 0.002038486947775796 +923 57 training.batch_size 1.0 +923 57 training.label_smoothing 0.006838672492230084 +923 58 model.embedding_dim 2.0 +923 58 model.relation_dim 1.0 +923 58 model.scoring_fct_norm 1.0 +923 58 optimizer.lr 0.008311625158852877 +923 58 training.batch_size 0.0 +923 58 training.label_smoothing 0.5253525429429838 +923 59 model.embedding_dim 2.0 +923 59 model.relation_dim 0.0 +923 59 model.scoring_fct_norm 1.0 +923 59 optimizer.lr 0.010454800233026796 +923 59 training.batch_size 2.0 +923 59 training.label_smoothing 0.0200476161877693 +923 60 model.embedding_dim 0.0 +923 60 model.relation_dim 2.0 +923 60 model.scoring_fct_norm 2.0 +923 60 optimizer.lr 0.003084492134113864 +923 60 training.batch_size 1.0 +923 60 training.label_smoothing 0.11539825049547232 +923 61 model.embedding_dim 1.0 +923 61 model.relation_dim 2.0 +923 61 model.scoring_fct_norm 1.0 +923 61 optimizer.lr 0.010005523730919251 +923 61 training.batch_size 2.0 +923 61 training.label_smoothing 0.0017928297942859256 +923 62 model.embedding_dim 0.0 +923 62 model.relation_dim 2.0 +923 62 model.scoring_fct_norm 2.0 +923 62 optimizer.lr 0.0012841198119726504 +923 62 training.batch_size 0.0 +923 62 training.label_smoothing 0.001388866071929769 +923 63 model.embedding_dim 0.0 +923 63 model.relation_dim 2.0 +923 63 model.scoring_fct_norm 2.0 +923 63 optimizer.lr 0.001469523676408077 +923 63 training.batch_size 0.0 +923 63 training.label_smoothing 0.012791260142065837 +923 64 model.embedding_dim 0.0 +923 64 model.relation_dim 2.0 +923 64 model.scoring_fct_norm 1.0 +923 64 optimizer.lr 0.0017294070246793743 +923 64 training.batch_size 1.0 +923 64 training.label_smoothing 0.6387351740142068 +923 65 model.embedding_dim 1.0 +923 65 model.relation_dim 2.0 +923 65 model.scoring_fct_norm 2.0 +923 65 optimizer.lr 0.004382917017811599 +923 65 training.batch_size 1.0 +923 65 training.label_smoothing 0.9926525422819518 +923 66 model.embedding_dim 1.0 +923 66 model.relation_dim 2.0 +923 66 model.scoring_fct_norm 1.0 +923 66 optimizer.lr 0.08999114452386886 +923 66 training.batch_size 1.0 +923 66 training.label_smoothing 0.013301581504540071 +923 67 model.embedding_dim 2.0 +923 67 model.relation_dim 1.0 +923 67 model.scoring_fct_norm 1.0 +923 67 optimizer.lr 0.03316912249490151 +923 67 training.batch_size 0.0 +923 67 training.label_smoothing 0.37423550051512605 +923 68 model.embedding_dim 0.0 +923 68 model.relation_dim 0.0 +923 68 model.scoring_fct_norm 2.0 +923 68 optimizer.lr 0.005998730380525145 +923 68 training.batch_size 2.0 +923 68 training.label_smoothing 0.0057174926735127874 +923 69 model.embedding_dim 2.0 +923 69 model.relation_dim 1.0 +923 69 model.scoring_fct_norm 2.0 +923 69 optimizer.lr 0.01418577701092145 +923 69 training.batch_size 2.0 +923 69 training.label_smoothing 0.18782455616982832 +923 70 model.embedding_dim 2.0 +923 70 model.relation_dim 1.0 +923 70 model.scoring_fct_norm 2.0 +923 70 optimizer.lr 0.07079891696385247 +923 70 training.batch_size 2.0 +923 70 training.label_smoothing 0.2139163623707364 +923 71 model.embedding_dim 1.0 +923 71 model.relation_dim 1.0 +923 71 model.scoring_fct_norm 2.0 +923 71 optimizer.lr 0.00747130562750673 +923 71 training.batch_size 1.0 +923 71 training.label_smoothing 0.003469694002219601 +923 72 model.embedding_dim 2.0 +923 72 model.relation_dim 2.0 +923 72 model.scoring_fct_norm 2.0 +923 72 optimizer.lr 0.005394464643816887 +923 72 training.batch_size 2.0 +923 72 training.label_smoothing 0.022458833483111063 +923 73 model.embedding_dim 1.0 +923 73 model.relation_dim 0.0 +923 73 model.scoring_fct_norm 2.0 +923 73 optimizer.lr 0.08609426000614796 +923 73 training.batch_size 2.0 +923 73 training.label_smoothing 0.1777057883364195 +923 74 model.embedding_dim 1.0 +923 74 model.relation_dim 2.0 +923 74 model.scoring_fct_norm 2.0 +923 74 optimizer.lr 0.0470693465632267 +923 74 training.batch_size 1.0 +923 74 training.label_smoothing 0.01020801172961547 +923 75 model.embedding_dim 2.0 +923 75 model.relation_dim 0.0 +923 75 model.scoring_fct_norm 2.0 +923 75 optimizer.lr 0.011137336931630933 +923 75 training.batch_size 2.0 +923 75 training.label_smoothing 0.0016209865297483743 +923 76 model.embedding_dim 0.0 +923 76 model.relation_dim 0.0 +923 76 model.scoring_fct_norm 2.0 +923 76 optimizer.lr 0.01921987423755869 +923 76 training.batch_size 2.0 +923 76 training.label_smoothing 0.0068359796977310455 +923 77 model.embedding_dim 2.0 +923 77 model.relation_dim 0.0 +923 77 model.scoring_fct_norm 1.0 +923 77 optimizer.lr 0.00329485956110866 +923 77 training.batch_size 2.0 +923 77 training.label_smoothing 0.01493794153744743 +923 78 model.embedding_dim 0.0 +923 78 model.relation_dim 1.0 +923 78 model.scoring_fct_norm 1.0 +923 78 optimizer.lr 0.0868872258448504 +923 78 training.batch_size 1.0 +923 78 training.label_smoothing 0.08066592105274933 +923 79 model.embedding_dim 1.0 +923 79 model.relation_dim 2.0 +923 79 model.scoring_fct_norm 2.0 +923 79 optimizer.lr 0.0018919835831735727 +923 79 training.batch_size 1.0 +923 79 training.label_smoothing 0.1821330739356837 +923 80 model.embedding_dim 0.0 +923 80 model.relation_dim 2.0 +923 80 model.scoring_fct_norm 2.0 +923 80 optimizer.lr 0.045346684692161195 +923 80 training.batch_size 1.0 +923 80 training.label_smoothing 0.02277606172972352 +923 81 model.embedding_dim 0.0 +923 81 model.relation_dim 0.0 +923 81 model.scoring_fct_norm 2.0 +923 81 optimizer.lr 0.003497313807694489 +923 81 training.batch_size 1.0 +923 81 training.label_smoothing 0.6142279641074507 +923 82 model.embedding_dim 0.0 +923 82 model.relation_dim 0.0 +923 82 model.scoring_fct_norm 2.0 +923 82 optimizer.lr 0.022300544649400757 +923 82 training.batch_size 2.0 +923 82 training.label_smoothing 0.029586751670345815 +923 83 model.embedding_dim 2.0 +923 83 model.relation_dim 0.0 +923 83 model.scoring_fct_norm 1.0 +923 83 optimizer.lr 0.05315011930196602 +923 83 training.batch_size 0.0 +923 83 training.label_smoothing 0.0032109439342892254 +923 84 model.embedding_dim 1.0 +923 84 model.relation_dim 0.0 +923 84 model.scoring_fct_norm 2.0 +923 84 optimizer.lr 0.0020577212138520305 +923 84 training.batch_size 1.0 +923 84 training.label_smoothing 0.002869977641522546 +923 85 model.embedding_dim 2.0 +923 85 model.relation_dim 1.0 +923 85 model.scoring_fct_norm 2.0 +923 85 optimizer.lr 0.03217074838939797 +923 85 training.batch_size 1.0 +923 85 training.label_smoothing 0.09744228578226262 +923 86 model.embedding_dim 0.0 +923 86 model.relation_dim 1.0 +923 86 model.scoring_fct_norm 1.0 +923 86 optimizer.lr 0.002512212784895987 +923 86 training.batch_size 2.0 +923 86 training.label_smoothing 0.01846748458528897 +923 87 model.embedding_dim 0.0 +923 87 model.relation_dim 1.0 +923 87 model.scoring_fct_norm 2.0 +923 87 optimizer.lr 0.0024214372921850407 +923 87 training.batch_size 2.0 +923 87 training.label_smoothing 0.689096015182315 +923 88 model.embedding_dim 2.0 +923 88 model.relation_dim 1.0 +923 88 model.scoring_fct_norm 2.0 +923 88 optimizer.lr 0.021964260821533896 +923 88 training.batch_size 1.0 +923 88 training.label_smoothing 0.011927207297928783 +923 89 model.embedding_dim 1.0 +923 89 model.relation_dim 1.0 +923 89 model.scoring_fct_norm 2.0 +923 89 optimizer.lr 0.0011596171557843888 +923 89 training.batch_size 1.0 +923 89 training.label_smoothing 0.8155332537810251 +923 90 model.embedding_dim 0.0 +923 90 model.relation_dim 0.0 +923 90 model.scoring_fct_norm 2.0 +923 90 optimizer.lr 0.005846392200935694 +923 90 training.batch_size 1.0 +923 90 training.label_smoothing 0.3808469756315041 +923 91 model.embedding_dim 2.0 +923 91 model.relation_dim 1.0 +923 91 model.scoring_fct_norm 2.0 +923 91 optimizer.lr 0.09332450613490535 +923 91 training.batch_size 1.0 +923 91 training.label_smoothing 0.021238059834669375 +923 92 model.embedding_dim 0.0 +923 92 model.relation_dim 0.0 +923 92 model.scoring_fct_norm 2.0 +923 92 optimizer.lr 0.0010447312854562048 +923 92 training.batch_size 0.0 +923 92 training.label_smoothing 0.18920760892235872 +923 93 model.embedding_dim 2.0 +923 93 model.relation_dim 0.0 +923 93 model.scoring_fct_norm 2.0 +923 93 optimizer.lr 0.027740338472847764 +923 93 training.batch_size 1.0 +923 93 training.label_smoothing 0.003614200481438076 +923 94 model.embedding_dim 2.0 +923 94 model.relation_dim 0.0 +923 94 model.scoring_fct_norm 1.0 +923 94 optimizer.lr 0.001159245285586715 +923 94 training.batch_size 0.0 +923 94 training.label_smoothing 0.1666003379763161 +923 95 model.embedding_dim 2.0 +923 95 model.relation_dim 0.0 +923 95 model.scoring_fct_norm 2.0 +923 95 optimizer.lr 0.004021783592714386 +923 95 training.batch_size 1.0 +923 95 training.label_smoothing 0.0028217053883982635 +923 96 model.embedding_dim 2.0 +923 96 model.relation_dim 0.0 +923 96 model.scoring_fct_norm 1.0 +923 96 optimizer.lr 0.033221581750186635 +923 96 training.batch_size 1.0 +923 96 training.label_smoothing 0.007299272594385916 +923 97 model.embedding_dim 1.0 +923 97 model.relation_dim 0.0 +923 97 model.scoring_fct_norm 2.0 +923 97 optimizer.lr 0.01639229355392095 +923 97 training.batch_size 1.0 +923 97 training.label_smoothing 0.0014698087339394506 +923 98 model.embedding_dim 2.0 +923 98 model.relation_dim 1.0 +923 98 model.scoring_fct_norm 1.0 +923 98 optimizer.lr 0.03329593816618317 +923 98 training.batch_size 0.0 +923 98 training.label_smoothing 0.0029826677363795403 +923 99 model.embedding_dim 1.0 +923 99 model.relation_dim 0.0 +923 99 model.scoring_fct_norm 1.0 +923 99 optimizer.lr 0.008098716374889027 +923 99 training.batch_size 2.0 +923 99 training.label_smoothing 0.02897274067290107 +923 100 model.embedding_dim 0.0 +923 100 model.relation_dim 0.0 +923 100 model.scoring_fct_norm 1.0 +923 100 optimizer.lr 0.008107721122300165 +923 100 training.batch_size 2.0 +923 100 training.label_smoothing 0.011287194597144381 +923 1 dataset """kinships""" +923 1 model """transr""" +923 1 loss """bceaftersigmoid""" +923 1 regularizer """no""" +923 1 optimizer """adam""" +923 1 training_loop """lcwa""" +923 1 evaluator """rankbased""" +923 2 dataset """kinships""" +923 2 model """transr""" +923 2 loss """bceaftersigmoid""" +923 2 regularizer """no""" +923 2 optimizer """adam""" +923 2 training_loop """lcwa""" +923 2 evaluator """rankbased""" +923 3 dataset """kinships""" +923 3 model """transr""" +923 3 loss """bceaftersigmoid""" +923 3 regularizer """no""" +923 3 optimizer """adam""" +923 3 training_loop """lcwa""" +923 3 evaluator """rankbased""" +923 4 dataset """kinships""" +923 4 model """transr""" +923 4 loss """bceaftersigmoid""" +923 4 regularizer """no""" +923 4 optimizer """adam""" +923 4 training_loop """lcwa""" +923 4 evaluator """rankbased""" +923 5 dataset """kinships""" +923 5 model """transr""" +923 5 loss """bceaftersigmoid""" +923 5 regularizer """no""" +923 5 optimizer """adam""" +923 5 training_loop """lcwa""" +923 5 evaluator """rankbased""" +923 6 dataset """kinships""" +923 6 model """transr""" +923 6 loss """bceaftersigmoid""" +923 6 regularizer """no""" +923 6 optimizer """adam""" +923 6 training_loop """lcwa""" +923 6 evaluator """rankbased""" +923 7 dataset """kinships""" +923 7 model """transr""" +923 7 loss """bceaftersigmoid""" +923 7 regularizer """no""" +923 7 optimizer """adam""" +923 7 training_loop """lcwa""" +923 7 evaluator """rankbased""" +923 8 dataset """kinships""" +923 8 model """transr""" +923 8 loss """bceaftersigmoid""" +923 8 regularizer """no""" +923 8 optimizer """adam""" +923 8 training_loop """lcwa""" +923 8 evaluator """rankbased""" +923 9 dataset """kinships""" +923 9 model """transr""" +923 9 loss """bceaftersigmoid""" +923 9 regularizer """no""" +923 9 optimizer """adam""" +923 9 training_loop """lcwa""" +923 9 evaluator """rankbased""" +923 10 dataset """kinships""" +923 10 model """transr""" +923 10 loss """bceaftersigmoid""" +923 10 regularizer """no""" +923 10 optimizer """adam""" +923 10 training_loop """lcwa""" +923 10 evaluator """rankbased""" +923 11 dataset """kinships""" +923 11 model """transr""" +923 11 loss """bceaftersigmoid""" +923 11 regularizer """no""" +923 11 optimizer """adam""" +923 11 training_loop """lcwa""" +923 11 evaluator """rankbased""" +923 12 dataset """kinships""" +923 12 model """transr""" +923 12 loss """bceaftersigmoid""" +923 12 regularizer """no""" +923 12 optimizer """adam""" +923 12 training_loop """lcwa""" +923 12 evaluator """rankbased""" +923 13 dataset """kinships""" +923 13 model """transr""" +923 13 loss """bceaftersigmoid""" +923 13 regularizer """no""" +923 13 optimizer """adam""" +923 13 training_loop """lcwa""" +923 13 evaluator """rankbased""" +923 14 dataset """kinships""" +923 14 model """transr""" +923 14 loss """bceaftersigmoid""" +923 14 regularizer """no""" +923 14 optimizer """adam""" +923 14 training_loop """lcwa""" +923 14 evaluator """rankbased""" +923 15 dataset """kinships""" +923 15 model """transr""" +923 15 loss """bceaftersigmoid""" +923 15 regularizer """no""" +923 15 optimizer """adam""" +923 15 training_loop """lcwa""" +923 15 evaluator """rankbased""" +923 16 dataset """kinships""" +923 16 model """transr""" +923 16 loss """bceaftersigmoid""" +923 16 regularizer """no""" +923 16 optimizer """adam""" +923 16 training_loop """lcwa""" +923 16 evaluator """rankbased""" +923 17 dataset """kinships""" +923 17 model """transr""" +923 17 loss """bceaftersigmoid""" +923 17 regularizer """no""" +923 17 optimizer """adam""" +923 17 training_loop """lcwa""" +923 17 evaluator """rankbased""" +923 18 dataset """kinships""" +923 18 model """transr""" +923 18 loss """bceaftersigmoid""" +923 18 regularizer """no""" +923 18 optimizer """adam""" +923 18 training_loop """lcwa""" +923 18 evaluator """rankbased""" +923 19 dataset """kinships""" +923 19 model """transr""" +923 19 loss """bceaftersigmoid""" +923 19 regularizer """no""" +923 19 optimizer """adam""" +923 19 training_loop """lcwa""" +923 19 evaluator """rankbased""" +923 20 dataset """kinships""" +923 20 model """transr""" +923 20 loss """bceaftersigmoid""" +923 20 regularizer """no""" +923 20 optimizer """adam""" +923 20 training_loop """lcwa""" +923 20 evaluator """rankbased""" +923 21 dataset """kinships""" +923 21 model """transr""" +923 21 loss """bceaftersigmoid""" +923 21 regularizer """no""" +923 21 optimizer """adam""" +923 21 training_loop """lcwa""" +923 21 evaluator """rankbased""" +923 22 dataset """kinships""" +923 22 model """transr""" +923 22 loss """bceaftersigmoid""" +923 22 regularizer """no""" +923 22 optimizer """adam""" +923 22 training_loop """lcwa""" +923 22 evaluator """rankbased""" +923 23 dataset """kinships""" +923 23 model """transr""" +923 23 loss """bceaftersigmoid""" +923 23 regularizer """no""" +923 23 optimizer """adam""" +923 23 training_loop """lcwa""" +923 23 evaluator """rankbased""" +923 24 dataset """kinships""" +923 24 model """transr""" +923 24 loss """bceaftersigmoid""" +923 24 regularizer """no""" +923 24 optimizer """adam""" +923 24 training_loop """lcwa""" +923 24 evaluator """rankbased""" +923 25 dataset """kinships""" +923 25 model """transr""" +923 25 loss """bceaftersigmoid""" +923 25 regularizer """no""" +923 25 optimizer """adam""" +923 25 training_loop """lcwa""" +923 25 evaluator """rankbased""" +923 26 dataset """kinships""" +923 26 model """transr""" +923 26 loss """bceaftersigmoid""" +923 26 regularizer """no""" +923 26 optimizer """adam""" +923 26 training_loop """lcwa""" +923 26 evaluator """rankbased""" +923 27 dataset """kinships""" +923 27 model """transr""" +923 27 loss """bceaftersigmoid""" +923 27 regularizer """no""" +923 27 optimizer """adam""" +923 27 training_loop """lcwa""" +923 27 evaluator """rankbased""" +923 28 dataset """kinships""" +923 28 model """transr""" +923 28 loss """bceaftersigmoid""" +923 28 regularizer """no""" +923 28 optimizer """adam""" +923 28 training_loop """lcwa""" +923 28 evaluator """rankbased""" +923 29 dataset """kinships""" +923 29 model """transr""" +923 29 loss """bceaftersigmoid""" +923 29 regularizer """no""" +923 29 optimizer """adam""" +923 29 training_loop """lcwa""" +923 29 evaluator """rankbased""" +923 30 dataset """kinships""" +923 30 model """transr""" +923 30 loss """bceaftersigmoid""" +923 30 regularizer """no""" +923 30 optimizer """adam""" +923 30 training_loop """lcwa""" +923 30 evaluator """rankbased""" +923 31 dataset """kinships""" +923 31 model """transr""" +923 31 loss """bceaftersigmoid""" +923 31 regularizer """no""" +923 31 optimizer """adam""" +923 31 training_loop """lcwa""" +923 31 evaluator """rankbased""" +923 32 dataset """kinships""" +923 32 model """transr""" +923 32 loss """bceaftersigmoid""" +923 32 regularizer """no""" +923 32 optimizer """adam""" +923 32 training_loop """lcwa""" +923 32 evaluator """rankbased""" +923 33 dataset """kinships""" +923 33 model """transr""" +923 33 loss """bceaftersigmoid""" +923 33 regularizer """no""" +923 33 optimizer """adam""" +923 33 training_loop """lcwa""" +923 33 evaluator """rankbased""" +923 34 dataset """kinships""" +923 34 model """transr""" +923 34 loss """bceaftersigmoid""" +923 34 regularizer """no""" +923 34 optimizer """adam""" +923 34 training_loop """lcwa""" +923 34 evaluator """rankbased""" +923 35 dataset """kinships""" +923 35 model """transr""" +923 35 loss """bceaftersigmoid""" +923 35 regularizer """no""" +923 35 optimizer """adam""" +923 35 training_loop """lcwa""" +923 35 evaluator """rankbased""" +923 36 dataset """kinships""" +923 36 model """transr""" +923 36 loss """bceaftersigmoid""" +923 36 regularizer """no""" +923 36 optimizer """adam""" +923 36 training_loop """lcwa""" +923 36 evaluator """rankbased""" +923 37 dataset """kinships""" +923 37 model """transr""" +923 37 loss """bceaftersigmoid""" +923 37 regularizer """no""" +923 37 optimizer """adam""" +923 37 training_loop """lcwa""" +923 37 evaluator """rankbased""" +923 38 dataset """kinships""" +923 38 model """transr""" +923 38 loss """bceaftersigmoid""" +923 38 regularizer """no""" +923 38 optimizer """adam""" +923 38 training_loop """lcwa""" +923 38 evaluator """rankbased""" +923 39 dataset """kinships""" +923 39 model """transr""" +923 39 loss """bceaftersigmoid""" +923 39 regularizer """no""" +923 39 optimizer """adam""" +923 39 training_loop """lcwa""" +923 39 evaluator """rankbased""" +923 40 dataset """kinships""" +923 40 model """transr""" +923 40 loss """bceaftersigmoid""" +923 40 regularizer """no""" +923 40 optimizer """adam""" +923 40 training_loop """lcwa""" +923 40 evaluator """rankbased""" +923 41 dataset """kinships""" +923 41 model """transr""" +923 41 loss """bceaftersigmoid""" +923 41 regularizer """no""" +923 41 optimizer """adam""" +923 41 training_loop """lcwa""" +923 41 evaluator """rankbased""" +923 42 dataset """kinships""" +923 42 model """transr""" +923 42 loss """bceaftersigmoid""" +923 42 regularizer """no""" +923 42 optimizer """adam""" +923 42 training_loop """lcwa""" +923 42 evaluator """rankbased""" +923 43 dataset """kinships""" +923 43 model """transr""" +923 43 loss """bceaftersigmoid""" +923 43 regularizer """no""" +923 43 optimizer """adam""" +923 43 training_loop """lcwa""" +923 43 evaluator """rankbased""" +923 44 dataset """kinships""" +923 44 model """transr""" +923 44 loss """bceaftersigmoid""" +923 44 regularizer """no""" +923 44 optimizer """adam""" +923 44 training_loop """lcwa""" +923 44 evaluator """rankbased""" +923 45 dataset """kinships""" +923 45 model """transr""" +923 45 loss """bceaftersigmoid""" +923 45 regularizer """no""" +923 45 optimizer """adam""" +923 45 training_loop """lcwa""" +923 45 evaluator """rankbased""" +923 46 dataset """kinships""" +923 46 model """transr""" +923 46 loss """bceaftersigmoid""" +923 46 regularizer """no""" +923 46 optimizer """adam""" +923 46 training_loop """lcwa""" +923 46 evaluator """rankbased""" +923 47 dataset """kinships""" +923 47 model """transr""" +923 47 loss """bceaftersigmoid""" +923 47 regularizer """no""" +923 47 optimizer """adam""" +923 47 training_loop """lcwa""" +923 47 evaluator """rankbased""" +923 48 dataset """kinships""" +923 48 model """transr""" +923 48 loss """bceaftersigmoid""" +923 48 regularizer """no""" +923 48 optimizer """adam""" +923 48 training_loop """lcwa""" +923 48 evaluator """rankbased""" +923 49 dataset """kinships""" +923 49 model """transr""" +923 49 loss """bceaftersigmoid""" +923 49 regularizer """no""" +923 49 optimizer """adam""" +923 49 training_loop """lcwa""" +923 49 evaluator """rankbased""" +923 50 dataset """kinships""" +923 50 model """transr""" +923 50 loss """bceaftersigmoid""" +923 50 regularizer """no""" +923 50 optimizer """adam""" +923 50 training_loop """lcwa""" +923 50 evaluator """rankbased""" +923 51 dataset """kinships""" +923 51 model """transr""" +923 51 loss """bceaftersigmoid""" +923 51 regularizer """no""" +923 51 optimizer """adam""" +923 51 training_loop """lcwa""" +923 51 evaluator """rankbased""" +923 52 dataset """kinships""" +923 52 model """transr""" +923 52 loss """bceaftersigmoid""" +923 52 regularizer """no""" +923 52 optimizer """adam""" +923 52 training_loop """lcwa""" +923 52 evaluator """rankbased""" +923 53 dataset """kinships""" +923 53 model """transr""" +923 53 loss """bceaftersigmoid""" +923 53 regularizer """no""" +923 53 optimizer """adam""" +923 53 training_loop """lcwa""" +923 53 evaluator """rankbased""" +923 54 dataset """kinships""" +923 54 model """transr""" +923 54 loss """bceaftersigmoid""" +923 54 regularizer """no""" +923 54 optimizer """adam""" +923 54 training_loop """lcwa""" +923 54 evaluator """rankbased""" +923 55 dataset """kinships""" +923 55 model """transr""" +923 55 loss """bceaftersigmoid""" +923 55 regularizer """no""" +923 55 optimizer """adam""" +923 55 training_loop """lcwa""" +923 55 evaluator """rankbased""" +923 56 dataset """kinships""" +923 56 model """transr""" +923 56 loss """bceaftersigmoid""" +923 56 regularizer """no""" +923 56 optimizer """adam""" +923 56 training_loop """lcwa""" +923 56 evaluator """rankbased""" +923 57 dataset """kinships""" +923 57 model """transr""" +923 57 loss """bceaftersigmoid""" +923 57 regularizer """no""" +923 57 optimizer """adam""" +923 57 training_loop """lcwa""" +923 57 evaluator """rankbased""" +923 58 dataset """kinships""" +923 58 model """transr""" +923 58 loss """bceaftersigmoid""" +923 58 regularizer """no""" +923 58 optimizer """adam""" +923 58 training_loop """lcwa""" +923 58 evaluator """rankbased""" +923 59 dataset """kinships""" +923 59 model """transr""" +923 59 loss """bceaftersigmoid""" +923 59 regularizer """no""" +923 59 optimizer """adam""" +923 59 training_loop """lcwa""" +923 59 evaluator """rankbased""" +923 60 dataset """kinships""" +923 60 model """transr""" +923 60 loss """bceaftersigmoid""" +923 60 regularizer """no""" +923 60 optimizer """adam""" +923 60 training_loop """lcwa""" +923 60 evaluator """rankbased""" +923 61 dataset """kinships""" +923 61 model """transr""" +923 61 loss """bceaftersigmoid""" +923 61 regularizer """no""" +923 61 optimizer """adam""" +923 61 training_loop """lcwa""" +923 61 evaluator """rankbased""" +923 62 dataset """kinships""" +923 62 model """transr""" +923 62 loss """bceaftersigmoid""" +923 62 regularizer """no""" +923 62 optimizer """adam""" +923 62 training_loop """lcwa""" +923 62 evaluator """rankbased""" +923 63 dataset """kinships""" +923 63 model """transr""" +923 63 loss """bceaftersigmoid""" +923 63 regularizer """no""" +923 63 optimizer """adam""" +923 63 training_loop """lcwa""" +923 63 evaluator """rankbased""" +923 64 dataset """kinships""" +923 64 model """transr""" +923 64 loss """bceaftersigmoid""" +923 64 regularizer """no""" +923 64 optimizer """adam""" +923 64 training_loop """lcwa""" +923 64 evaluator """rankbased""" +923 65 dataset """kinships""" +923 65 model """transr""" +923 65 loss """bceaftersigmoid""" +923 65 regularizer """no""" +923 65 optimizer """adam""" +923 65 training_loop """lcwa""" +923 65 evaluator """rankbased""" +923 66 dataset """kinships""" +923 66 model """transr""" +923 66 loss """bceaftersigmoid""" +923 66 regularizer """no""" +923 66 optimizer """adam""" +923 66 training_loop """lcwa""" +923 66 evaluator """rankbased""" +923 67 dataset """kinships""" +923 67 model """transr""" +923 67 loss """bceaftersigmoid""" +923 67 regularizer """no""" +923 67 optimizer """adam""" +923 67 training_loop """lcwa""" +923 67 evaluator """rankbased""" +923 68 dataset """kinships""" +923 68 model """transr""" +923 68 loss """bceaftersigmoid""" +923 68 regularizer """no""" +923 68 optimizer """adam""" +923 68 training_loop """lcwa""" +923 68 evaluator """rankbased""" +923 69 dataset """kinships""" +923 69 model """transr""" +923 69 loss """bceaftersigmoid""" +923 69 regularizer """no""" +923 69 optimizer """adam""" +923 69 training_loop """lcwa""" +923 69 evaluator """rankbased""" +923 70 dataset """kinships""" +923 70 model """transr""" +923 70 loss """bceaftersigmoid""" +923 70 regularizer """no""" +923 70 optimizer """adam""" +923 70 training_loop """lcwa""" +923 70 evaluator """rankbased""" +923 71 dataset """kinships""" +923 71 model """transr""" +923 71 loss """bceaftersigmoid""" +923 71 regularizer """no""" +923 71 optimizer """adam""" +923 71 training_loop """lcwa""" +923 71 evaluator """rankbased""" +923 72 dataset """kinships""" +923 72 model """transr""" +923 72 loss """bceaftersigmoid""" +923 72 regularizer """no""" +923 72 optimizer """adam""" +923 72 training_loop """lcwa""" +923 72 evaluator """rankbased""" +923 73 dataset """kinships""" +923 73 model """transr""" +923 73 loss """bceaftersigmoid""" +923 73 regularizer """no""" +923 73 optimizer """adam""" +923 73 training_loop """lcwa""" +923 73 evaluator """rankbased""" +923 74 dataset """kinships""" +923 74 model """transr""" +923 74 loss """bceaftersigmoid""" +923 74 regularizer """no""" +923 74 optimizer """adam""" +923 74 training_loop """lcwa""" +923 74 evaluator """rankbased""" +923 75 dataset """kinships""" +923 75 model """transr""" +923 75 loss """bceaftersigmoid""" +923 75 regularizer """no""" +923 75 optimizer """adam""" +923 75 training_loop """lcwa""" +923 75 evaluator """rankbased""" +923 76 dataset """kinships""" +923 76 model """transr""" +923 76 loss """bceaftersigmoid""" +923 76 regularizer """no""" +923 76 optimizer """adam""" +923 76 training_loop """lcwa""" +923 76 evaluator """rankbased""" +923 77 dataset """kinships""" +923 77 model """transr""" +923 77 loss """bceaftersigmoid""" +923 77 regularizer """no""" +923 77 optimizer """adam""" +923 77 training_loop """lcwa""" +923 77 evaluator """rankbased""" +923 78 dataset """kinships""" +923 78 model """transr""" +923 78 loss """bceaftersigmoid""" +923 78 regularizer """no""" +923 78 optimizer """adam""" +923 78 training_loop """lcwa""" +923 78 evaluator """rankbased""" +923 79 dataset """kinships""" +923 79 model """transr""" +923 79 loss """bceaftersigmoid""" +923 79 regularizer """no""" +923 79 optimizer """adam""" +923 79 training_loop """lcwa""" +923 79 evaluator """rankbased""" +923 80 dataset """kinships""" +923 80 model """transr""" +923 80 loss """bceaftersigmoid""" +923 80 regularizer """no""" +923 80 optimizer """adam""" +923 80 training_loop """lcwa""" +923 80 evaluator """rankbased""" +923 81 dataset """kinships""" +923 81 model """transr""" +923 81 loss """bceaftersigmoid""" +923 81 regularizer """no""" +923 81 optimizer """adam""" +923 81 training_loop """lcwa""" +923 81 evaluator """rankbased""" +923 82 dataset """kinships""" +923 82 model """transr""" +923 82 loss """bceaftersigmoid""" +923 82 regularizer """no""" +923 82 optimizer """adam""" +923 82 training_loop """lcwa""" +923 82 evaluator """rankbased""" +923 83 dataset """kinships""" +923 83 model """transr""" +923 83 loss """bceaftersigmoid""" +923 83 regularizer """no""" +923 83 optimizer """adam""" +923 83 training_loop """lcwa""" +923 83 evaluator """rankbased""" +923 84 dataset """kinships""" +923 84 model """transr""" +923 84 loss """bceaftersigmoid""" +923 84 regularizer """no""" +923 84 optimizer """adam""" +923 84 training_loop """lcwa""" +923 84 evaluator """rankbased""" +923 85 dataset """kinships""" +923 85 model """transr""" +923 85 loss """bceaftersigmoid""" +923 85 regularizer """no""" +923 85 optimizer """adam""" +923 85 training_loop """lcwa""" +923 85 evaluator """rankbased""" +923 86 dataset """kinships""" +923 86 model """transr""" +923 86 loss """bceaftersigmoid""" +923 86 regularizer """no""" +923 86 optimizer """adam""" +923 86 training_loop """lcwa""" +923 86 evaluator """rankbased""" +923 87 dataset """kinships""" +923 87 model """transr""" +923 87 loss """bceaftersigmoid""" +923 87 regularizer """no""" +923 87 optimizer """adam""" +923 87 training_loop """lcwa""" +923 87 evaluator """rankbased""" +923 88 dataset """kinships""" +923 88 model """transr""" +923 88 loss """bceaftersigmoid""" +923 88 regularizer """no""" +923 88 optimizer """adam""" +923 88 training_loop """lcwa""" +923 88 evaluator """rankbased""" +923 89 dataset """kinships""" +923 89 model """transr""" +923 89 loss """bceaftersigmoid""" +923 89 regularizer """no""" +923 89 optimizer """adam""" +923 89 training_loop """lcwa""" +923 89 evaluator """rankbased""" +923 90 dataset """kinships""" +923 90 model """transr""" +923 90 loss """bceaftersigmoid""" +923 90 regularizer """no""" +923 90 optimizer """adam""" +923 90 training_loop """lcwa""" +923 90 evaluator """rankbased""" +923 91 dataset """kinships""" +923 91 model """transr""" +923 91 loss """bceaftersigmoid""" +923 91 regularizer """no""" +923 91 optimizer """adam""" +923 91 training_loop """lcwa""" +923 91 evaluator """rankbased""" +923 92 dataset """kinships""" +923 92 model """transr""" +923 92 loss """bceaftersigmoid""" +923 92 regularizer """no""" +923 92 optimizer """adam""" +923 92 training_loop """lcwa""" +923 92 evaluator """rankbased""" +923 93 dataset """kinships""" +923 93 model """transr""" +923 93 loss """bceaftersigmoid""" +923 93 regularizer """no""" +923 93 optimizer """adam""" +923 93 training_loop """lcwa""" +923 93 evaluator """rankbased""" +923 94 dataset """kinships""" +923 94 model """transr""" +923 94 loss """bceaftersigmoid""" +923 94 regularizer """no""" +923 94 optimizer """adam""" +923 94 training_loop """lcwa""" +923 94 evaluator """rankbased""" +923 95 dataset """kinships""" +923 95 model """transr""" +923 95 loss """bceaftersigmoid""" +923 95 regularizer """no""" +923 95 optimizer """adam""" +923 95 training_loop """lcwa""" +923 95 evaluator """rankbased""" +923 96 dataset """kinships""" +923 96 model """transr""" +923 96 loss """bceaftersigmoid""" +923 96 regularizer """no""" +923 96 optimizer """adam""" +923 96 training_loop """lcwa""" +923 96 evaluator """rankbased""" +923 97 dataset """kinships""" +923 97 model """transr""" +923 97 loss """bceaftersigmoid""" +923 97 regularizer """no""" +923 97 optimizer """adam""" +923 97 training_loop """lcwa""" +923 97 evaluator """rankbased""" +923 98 dataset """kinships""" +923 98 model """transr""" +923 98 loss """bceaftersigmoid""" +923 98 regularizer """no""" +923 98 optimizer """adam""" +923 98 training_loop """lcwa""" +923 98 evaluator """rankbased""" +923 99 dataset """kinships""" +923 99 model """transr""" +923 99 loss """bceaftersigmoid""" +923 99 regularizer """no""" +923 99 optimizer """adam""" +923 99 training_loop """lcwa""" +923 99 evaluator """rankbased""" +923 100 dataset """kinships""" +923 100 model """transr""" +923 100 loss """bceaftersigmoid""" +923 100 regularizer """no""" +923 100 optimizer """adam""" +923 100 training_loop """lcwa""" +923 100 evaluator """rankbased""" +924 1 model.embedding_dim 2.0 +924 1 model.relation_dim 0.0 +924 1 model.scoring_fct_norm 1.0 +924 1 optimizer.lr 0.024309404668086926 +924 1 training.batch_size 2.0 +924 1 training.label_smoothing 0.0871640290441204 +924 2 model.embedding_dim 0.0 +924 2 model.relation_dim 2.0 +924 2 model.scoring_fct_norm 2.0 +924 2 optimizer.lr 0.009090018007406045 +924 2 training.batch_size 0.0 +924 2 training.label_smoothing 0.0597923266070356 +924 3 model.embedding_dim 1.0 +924 3 model.relation_dim 0.0 +924 3 model.scoring_fct_norm 2.0 +924 3 optimizer.lr 0.009314895459698708 +924 3 training.batch_size 0.0 +924 3 training.label_smoothing 0.07551612511317993 +924 4 model.embedding_dim 0.0 +924 4 model.relation_dim 2.0 +924 4 model.scoring_fct_norm 2.0 +924 4 optimizer.lr 0.059757060768910734 +924 4 training.batch_size 2.0 +924 4 training.label_smoothing 0.002204067077215925 +924 5 model.embedding_dim 1.0 +924 5 model.relation_dim 0.0 +924 5 model.scoring_fct_norm 1.0 +924 5 optimizer.lr 0.0010576312618570307 +924 5 training.batch_size 0.0 +924 5 training.label_smoothing 0.35204750604756535 +924 6 model.embedding_dim 0.0 +924 6 model.relation_dim 0.0 +924 6 model.scoring_fct_norm 1.0 +924 6 optimizer.lr 0.013243532608580506 +924 6 training.batch_size 0.0 +924 6 training.label_smoothing 0.024368982465716706 +924 7 model.embedding_dim 0.0 +924 7 model.relation_dim 2.0 +924 7 model.scoring_fct_norm 2.0 +924 7 optimizer.lr 0.08338276551184615 +924 7 training.batch_size 2.0 +924 7 training.label_smoothing 0.2102012565138144 +924 8 model.embedding_dim 0.0 +924 8 model.relation_dim 2.0 +924 8 model.scoring_fct_norm 1.0 +924 8 optimizer.lr 0.011755828716414691 +924 8 training.batch_size 1.0 +924 8 training.label_smoothing 0.05205296377127541 +924 9 model.embedding_dim 1.0 +924 9 model.relation_dim 1.0 +924 9 model.scoring_fct_norm 1.0 +924 9 optimizer.lr 0.037015272538907004 +924 9 training.batch_size 1.0 +924 9 training.label_smoothing 0.010945029441984477 +924 10 model.embedding_dim 2.0 +924 10 model.relation_dim 0.0 +924 10 model.scoring_fct_norm 1.0 +924 10 optimizer.lr 0.029096832806295585 +924 10 training.batch_size 1.0 +924 10 training.label_smoothing 0.0012610551337929174 +924 11 model.embedding_dim 1.0 +924 11 model.relation_dim 0.0 +924 11 model.scoring_fct_norm 1.0 +924 11 optimizer.lr 0.0010315168795518323 +924 11 training.batch_size 2.0 +924 11 training.label_smoothing 0.007651132003748474 +924 12 model.embedding_dim 2.0 +924 12 model.relation_dim 1.0 +924 12 model.scoring_fct_norm 2.0 +924 12 optimizer.lr 0.03304765384152071 +924 12 training.batch_size 2.0 +924 12 training.label_smoothing 0.010529572872295218 +924 13 model.embedding_dim 0.0 +924 13 model.relation_dim 1.0 +924 13 model.scoring_fct_norm 2.0 +924 13 optimizer.lr 0.08594899793679005 +924 13 training.batch_size 1.0 +924 13 training.label_smoothing 0.00621601596500766 +924 14 model.embedding_dim 1.0 +924 14 model.relation_dim 0.0 +924 14 model.scoring_fct_norm 1.0 +924 14 optimizer.lr 0.007234764128725956 +924 14 training.batch_size 0.0 +924 14 training.label_smoothing 0.009794191869628441 +924 15 model.embedding_dim 2.0 +924 15 model.relation_dim 0.0 +924 15 model.scoring_fct_norm 2.0 +924 15 optimizer.lr 0.013221416777952993 +924 15 training.batch_size 0.0 +924 15 training.label_smoothing 0.016381005957325463 +924 16 model.embedding_dim 0.0 +924 16 model.relation_dim 2.0 +924 16 model.scoring_fct_norm 2.0 +924 16 optimizer.lr 0.0016503032248106329 +924 16 training.batch_size 1.0 +924 16 training.label_smoothing 0.014925603251879208 +924 17 model.embedding_dim 0.0 +924 17 model.relation_dim 1.0 +924 17 model.scoring_fct_norm 2.0 +924 17 optimizer.lr 0.059329833519789654 +924 17 training.batch_size 2.0 +924 17 training.label_smoothing 0.029069337597319004 +924 18 model.embedding_dim 2.0 +924 18 model.relation_dim 0.0 +924 18 model.scoring_fct_norm 1.0 +924 18 optimizer.lr 0.05904322221044671 +924 18 training.batch_size 0.0 +924 18 training.label_smoothing 0.016244487210099808 +924 19 model.embedding_dim 2.0 +924 19 model.relation_dim 2.0 +924 19 model.scoring_fct_norm 1.0 +924 19 optimizer.lr 0.012712738416967153 +924 19 training.batch_size 0.0 +924 19 training.label_smoothing 0.9890800634774255 +924 20 model.embedding_dim 1.0 +924 20 model.relation_dim 1.0 +924 20 model.scoring_fct_norm 2.0 +924 20 optimizer.lr 0.003901788405056323 +924 20 training.batch_size 2.0 +924 20 training.label_smoothing 0.21180263492576354 +924 21 model.embedding_dim 1.0 +924 21 model.relation_dim 1.0 +924 21 model.scoring_fct_norm 2.0 +924 21 optimizer.lr 0.006287242879837975 +924 21 training.batch_size 0.0 +924 21 training.label_smoothing 0.36671403015745574 +924 22 model.embedding_dim 1.0 +924 22 model.relation_dim 2.0 +924 22 model.scoring_fct_norm 2.0 +924 22 optimizer.lr 0.058239576354417816 +924 22 training.batch_size 2.0 +924 22 training.label_smoothing 0.04556813332826265 +924 23 model.embedding_dim 2.0 +924 23 model.relation_dim 1.0 +924 23 model.scoring_fct_norm 2.0 +924 23 optimizer.lr 0.014246670477129228 +924 23 training.batch_size 2.0 +924 23 training.label_smoothing 0.0024709927718868084 +924 24 model.embedding_dim 1.0 +924 24 model.relation_dim 2.0 +924 24 model.scoring_fct_norm 2.0 +924 24 optimizer.lr 0.005746224959788347 +924 24 training.batch_size 1.0 +924 24 training.label_smoothing 0.010939596002367621 +924 25 model.embedding_dim 1.0 +924 25 model.relation_dim 2.0 +924 25 model.scoring_fct_norm 2.0 +924 25 optimizer.lr 0.0013164392496370237 +924 25 training.batch_size 1.0 +924 25 training.label_smoothing 0.026646140950128645 +924 26 model.embedding_dim 2.0 +924 26 model.relation_dim 2.0 +924 26 model.scoring_fct_norm 2.0 +924 26 optimizer.lr 0.03533902453602534 +924 26 training.batch_size 1.0 +924 26 training.label_smoothing 0.043531107767373546 +924 27 model.embedding_dim 0.0 +924 27 model.relation_dim 0.0 +924 27 model.scoring_fct_norm 2.0 +924 27 optimizer.lr 0.011570701000402136 +924 27 training.batch_size 0.0 +924 27 training.label_smoothing 0.0012417841527779159 +924 28 model.embedding_dim 0.0 +924 28 model.relation_dim 0.0 +924 28 model.scoring_fct_norm 2.0 +924 28 optimizer.lr 0.003973478650167585 +924 28 training.batch_size 0.0 +924 28 training.label_smoothing 0.1396026579975869 +924 29 model.embedding_dim 1.0 +924 29 model.relation_dim 2.0 +924 29 model.scoring_fct_norm 2.0 +924 29 optimizer.lr 0.03075944267288597 +924 29 training.batch_size 0.0 +924 29 training.label_smoothing 0.16561164541887033 +924 30 model.embedding_dim 2.0 +924 30 model.relation_dim 0.0 +924 30 model.scoring_fct_norm 1.0 +924 30 optimizer.lr 0.026969927821117682 +924 30 training.batch_size 1.0 +924 30 training.label_smoothing 0.006021829467765013 +924 31 model.embedding_dim 0.0 +924 31 model.relation_dim 0.0 +924 31 model.scoring_fct_norm 2.0 +924 31 optimizer.lr 0.009131092284253716 +924 31 training.batch_size 2.0 +924 31 training.label_smoothing 0.030218432839240403 +924 32 model.embedding_dim 1.0 +924 32 model.relation_dim 0.0 +924 32 model.scoring_fct_norm 2.0 +924 32 optimizer.lr 0.0041810814767370185 +924 32 training.batch_size 2.0 +924 32 training.label_smoothing 0.00735014727444328 +924 33 model.embedding_dim 2.0 +924 33 model.relation_dim 1.0 +924 33 model.scoring_fct_norm 1.0 +924 33 optimizer.lr 0.00630019700409031 +924 33 training.batch_size 0.0 +924 33 training.label_smoothing 0.9596327302128598 +924 34 model.embedding_dim 2.0 +924 34 model.relation_dim 2.0 +924 34 model.scoring_fct_norm 2.0 +924 34 optimizer.lr 0.060633363669662016 +924 34 training.batch_size 2.0 +924 34 training.label_smoothing 0.23128438627859566 +924 35 model.embedding_dim 1.0 +924 35 model.relation_dim 2.0 +924 35 model.scoring_fct_norm 1.0 +924 35 optimizer.lr 0.03589941926124751 +924 35 training.batch_size 0.0 +924 35 training.label_smoothing 0.7862015428110232 +924 36 model.embedding_dim 0.0 +924 36 model.relation_dim 2.0 +924 36 model.scoring_fct_norm 2.0 +924 36 optimizer.lr 0.039968018597837204 +924 36 training.batch_size 1.0 +924 36 training.label_smoothing 0.0022349878433278888 +924 37 model.embedding_dim 1.0 +924 37 model.relation_dim 0.0 +924 37 model.scoring_fct_norm 2.0 +924 37 optimizer.lr 0.051037616252636045 +924 37 training.batch_size 2.0 +924 37 training.label_smoothing 0.00818639835312467 +924 38 model.embedding_dim 2.0 +924 38 model.relation_dim 0.0 +924 38 model.scoring_fct_norm 2.0 +924 38 optimizer.lr 0.0016788027792146371 +924 38 training.batch_size 0.0 +924 38 training.label_smoothing 0.11203268143953059 +924 39 model.embedding_dim 1.0 +924 39 model.relation_dim 1.0 +924 39 model.scoring_fct_norm 2.0 +924 39 optimizer.lr 0.04367859775121307 +924 39 training.batch_size 0.0 +924 39 training.label_smoothing 0.33411697399208956 +924 40 model.embedding_dim 2.0 +924 40 model.relation_dim 0.0 +924 40 model.scoring_fct_norm 1.0 +924 40 optimizer.lr 0.06041649212043117 +924 40 training.batch_size 0.0 +924 40 training.label_smoothing 0.565800431286236 +924 41 model.embedding_dim 0.0 +924 41 model.relation_dim 0.0 +924 41 model.scoring_fct_norm 2.0 +924 41 optimizer.lr 0.008864285996964425 +924 41 training.batch_size 1.0 +924 41 training.label_smoothing 0.007406375759682451 +924 42 model.embedding_dim 1.0 +924 42 model.relation_dim 0.0 +924 42 model.scoring_fct_norm 2.0 +924 42 optimizer.lr 0.018825502619266778 +924 42 training.batch_size 2.0 +924 42 training.label_smoothing 0.004971660262799154 +924 43 model.embedding_dim 2.0 +924 43 model.relation_dim 0.0 +924 43 model.scoring_fct_norm 1.0 +924 43 optimizer.lr 0.00763717664509508 +924 43 training.batch_size 1.0 +924 43 training.label_smoothing 0.0017348412101149977 +924 44 model.embedding_dim 1.0 +924 44 model.relation_dim 0.0 +924 44 model.scoring_fct_norm 1.0 +924 44 optimizer.lr 0.016074495356006014 +924 44 training.batch_size 0.0 +924 44 training.label_smoothing 0.0018982273799218118 +924 45 model.embedding_dim 0.0 +924 45 model.relation_dim 0.0 +924 45 model.scoring_fct_norm 1.0 +924 45 optimizer.lr 0.0042098594242820295 +924 45 training.batch_size 2.0 +924 45 training.label_smoothing 0.5248319232390721 +924 46 model.embedding_dim 1.0 +924 46 model.relation_dim 2.0 +924 46 model.scoring_fct_norm 2.0 +924 46 optimizer.lr 0.020891961554700536 +924 46 training.batch_size 2.0 +924 46 training.label_smoothing 0.03994430706290018 +924 47 model.embedding_dim 1.0 +924 47 model.relation_dim 2.0 +924 47 model.scoring_fct_norm 2.0 +924 47 optimizer.lr 0.016434425993022965 +924 47 training.batch_size 0.0 +924 47 training.label_smoothing 0.008110517291069175 +924 48 model.embedding_dim 0.0 +924 48 model.relation_dim 2.0 +924 48 model.scoring_fct_norm 2.0 +924 48 optimizer.lr 0.0015207884700883182 +924 48 training.batch_size 1.0 +924 48 training.label_smoothing 0.058348571410730896 +924 49 model.embedding_dim 2.0 +924 49 model.relation_dim 1.0 +924 49 model.scoring_fct_norm 1.0 +924 49 optimizer.lr 0.022332815461441065 +924 49 training.batch_size 2.0 +924 49 training.label_smoothing 0.21991094871450206 +924 50 model.embedding_dim 2.0 +924 50 model.relation_dim 1.0 +924 50 model.scoring_fct_norm 1.0 +924 50 optimizer.lr 0.08945393928025634 +924 50 training.batch_size 1.0 +924 50 training.label_smoothing 0.019825775121358597 +924 51 model.embedding_dim 2.0 +924 51 model.relation_dim 1.0 +924 51 model.scoring_fct_norm 1.0 +924 51 optimizer.lr 0.001460005190080468 +924 51 training.batch_size 2.0 +924 51 training.label_smoothing 0.37371451879575673 +924 52 model.embedding_dim 0.0 +924 52 model.relation_dim 2.0 +924 52 model.scoring_fct_norm 1.0 +924 52 optimizer.lr 0.0012298461912342122 +924 52 training.batch_size 1.0 +924 52 training.label_smoothing 0.005885472744915779 +924 53 model.embedding_dim 2.0 +924 53 model.relation_dim 0.0 +924 53 model.scoring_fct_norm 2.0 +924 53 optimizer.lr 0.0026148862194518107 +924 53 training.batch_size 0.0 +924 53 training.label_smoothing 0.3161601325766473 +924 54 model.embedding_dim 1.0 +924 54 model.relation_dim 0.0 +924 54 model.scoring_fct_norm 1.0 +924 54 optimizer.lr 0.016640577857126127 +924 54 training.batch_size 2.0 +924 54 training.label_smoothing 0.004366533047321151 +924 55 model.embedding_dim 2.0 +924 55 model.relation_dim 1.0 +924 55 model.scoring_fct_norm 2.0 +924 55 optimizer.lr 0.05997224533815328 +924 55 training.batch_size 0.0 +924 55 training.label_smoothing 0.002369370347058509 +924 56 model.embedding_dim 2.0 +924 56 model.relation_dim 1.0 +924 56 model.scoring_fct_norm 2.0 +924 56 optimizer.lr 0.06753110318636106 +924 56 training.batch_size 0.0 +924 56 training.label_smoothing 0.003786180155629305 +924 57 model.embedding_dim 1.0 +924 57 model.relation_dim 2.0 +924 57 model.scoring_fct_norm 1.0 +924 57 optimizer.lr 0.0012038412039886933 +924 57 training.batch_size 2.0 +924 57 training.label_smoothing 0.3914035092322701 +924 58 model.embedding_dim 0.0 +924 58 model.relation_dim 0.0 +924 58 model.scoring_fct_norm 1.0 +924 58 optimizer.lr 0.02537249530194251 +924 58 training.batch_size 2.0 +924 58 training.label_smoothing 0.027113155005945022 +924 59 model.embedding_dim 0.0 +924 59 model.relation_dim 1.0 +924 59 model.scoring_fct_norm 1.0 +924 59 optimizer.lr 0.001673170451999063 +924 59 training.batch_size 0.0 +924 59 training.label_smoothing 0.005421874032619754 +924 60 model.embedding_dim 1.0 +924 60 model.relation_dim 2.0 +924 60 model.scoring_fct_norm 2.0 +924 60 optimizer.lr 0.00782370078745761 +924 60 training.batch_size 2.0 +924 60 training.label_smoothing 0.05289191293644906 +924 61 model.embedding_dim 0.0 +924 61 model.relation_dim 1.0 +924 61 model.scoring_fct_norm 1.0 +924 61 optimizer.lr 0.0014729863550000204 +924 61 training.batch_size 2.0 +924 61 training.label_smoothing 0.9748579089065981 +924 62 model.embedding_dim 0.0 +924 62 model.relation_dim 2.0 +924 62 model.scoring_fct_norm 2.0 +924 62 optimizer.lr 0.0026913623167846012 +924 62 training.batch_size 0.0 +924 62 training.label_smoothing 0.030647759437865203 +924 63 model.embedding_dim 0.0 +924 63 model.relation_dim 0.0 +924 63 model.scoring_fct_norm 2.0 +924 63 optimizer.lr 0.0012239742262642544 +924 63 training.batch_size 1.0 +924 63 training.label_smoothing 0.020163912881749894 +924 64 model.embedding_dim 0.0 +924 64 model.relation_dim 2.0 +924 64 model.scoring_fct_norm 2.0 +924 64 optimizer.lr 0.010905104287786373 +924 64 training.batch_size 0.0 +924 64 training.label_smoothing 0.06428020858152905 +924 65 model.embedding_dim 2.0 +924 65 model.relation_dim 1.0 +924 65 model.scoring_fct_norm 1.0 +924 65 optimizer.lr 0.0015091806393313163 +924 65 training.batch_size 2.0 +924 65 training.label_smoothing 0.0033295991607802627 +924 66 model.embedding_dim 2.0 +924 66 model.relation_dim 1.0 +924 66 model.scoring_fct_norm 1.0 +924 66 optimizer.lr 0.008077943262375452 +924 66 training.batch_size 0.0 +924 66 training.label_smoothing 0.0013342287090403542 +924 67 model.embedding_dim 0.0 +924 67 model.relation_dim 0.0 +924 67 model.scoring_fct_norm 1.0 +924 67 optimizer.lr 0.01317273740502317 +924 67 training.batch_size 0.0 +924 67 training.label_smoothing 0.03489886576400432 +924 68 model.embedding_dim 0.0 +924 68 model.relation_dim 2.0 +924 68 model.scoring_fct_norm 2.0 +924 68 optimizer.lr 0.004835673196518432 +924 68 training.batch_size 1.0 +924 68 training.label_smoothing 0.020294020196143337 +924 69 model.embedding_dim 1.0 +924 69 model.relation_dim 0.0 +924 69 model.scoring_fct_norm 1.0 +924 69 optimizer.lr 0.0021449065097004145 +924 69 training.batch_size 1.0 +924 69 training.label_smoothing 0.5099028916951599 +924 70 model.embedding_dim 1.0 +924 70 model.relation_dim 0.0 +924 70 model.scoring_fct_norm 2.0 +924 70 optimizer.lr 0.0010403791844923937 +924 70 training.batch_size 2.0 +924 70 training.label_smoothing 0.0952867282870554 +924 71 model.embedding_dim 1.0 +924 71 model.relation_dim 2.0 +924 71 model.scoring_fct_norm 1.0 +924 71 optimizer.lr 0.006079968057781602 +924 71 training.batch_size 2.0 +924 71 training.label_smoothing 0.006480267434725969 +924 72 model.embedding_dim 0.0 +924 72 model.relation_dim 0.0 +924 72 model.scoring_fct_norm 1.0 +924 72 optimizer.lr 0.06824073799732418 +924 72 training.batch_size 2.0 +924 72 training.label_smoothing 0.21831324406671893 +924 73 model.embedding_dim 1.0 +924 73 model.relation_dim 1.0 +924 73 model.scoring_fct_norm 2.0 +924 73 optimizer.lr 0.01488178594082424 +924 73 training.batch_size 1.0 +924 73 training.label_smoothing 0.029783555350781796 +924 74 model.embedding_dim 1.0 +924 74 model.relation_dim 2.0 +924 74 model.scoring_fct_norm 2.0 +924 74 optimizer.lr 0.026689196951319563 +924 74 training.batch_size 0.0 +924 74 training.label_smoothing 0.04369656724173989 +924 75 model.embedding_dim 1.0 +924 75 model.relation_dim 1.0 +924 75 model.scoring_fct_norm 2.0 +924 75 optimizer.lr 0.07689597873225827 +924 75 training.batch_size 0.0 +924 75 training.label_smoothing 0.8660804208470632 +924 76 model.embedding_dim 0.0 +924 76 model.relation_dim 1.0 +924 76 model.scoring_fct_norm 1.0 +924 76 optimizer.lr 0.02671687324063775 +924 76 training.batch_size 2.0 +924 76 training.label_smoothing 0.07332462385528604 +924 77 model.embedding_dim 0.0 +924 77 model.relation_dim 1.0 +924 77 model.scoring_fct_norm 1.0 +924 77 optimizer.lr 0.011542576856779245 +924 77 training.batch_size 1.0 +924 77 training.label_smoothing 0.0039617968742113405 +924 78 model.embedding_dim 0.0 +924 78 model.relation_dim 0.0 +924 78 model.scoring_fct_norm 1.0 +924 78 optimizer.lr 0.0082635638755515 +924 78 training.batch_size 0.0 +924 78 training.label_smoothing 0.011091347166478032 +924 79 model.embedding_dim 0.0 +924 79 model.relation_dim 1.0 +924 79 model.scoring_fct_norm 2.0 +924 79 optimizer.lr 0.00452609071439157 +924 79 training.batch_size 0.0 +924 79 training.label_smoothing 0.03776789359688163 +924 80 model.embedding_dim 1.0 +924 80 model.relation_dim 2.0 +924 80 model.scoring_fct_norm 1.0 +924 80 optimizer.lr 0.005164150485131828 +924 80 training.batch_size 2.0 +924 80 training.label_smoothing 0.01878136719419025 +924 81 model.embedding_dim 0.0 +924 81 model.relation_dim 2.0 +924 81 model.scoring_fct_norm 2.0 +924 81 optimizer.lr 0.061700648387998366 +924 81 training.batch_size 2.0 +924 81 training.label_smoothing 0.0159600214062307 +924 82 model.embedding_dim 0.0 +924 82 model.relation_dim 0.0 +924 82 model.scoring_fct_norm 1.0 +924 82 optimizer.lr 0.0031257118854011163 +924 82 training.batch_size 0.0 +924 82 training.label_smoothing 0.015860587962362695 +924 83 model.embedding_dim 2.0 +924 83 model.relation_dim 0.0 +924 83 model.scoring_fct_norm 1.0 +924 83 optimizer.lr 0.009223525167691033 +924 83 training.batch_size 1.0 +924 83 training.label_smoothing 0.18134100670111633 +924 84 model.embedding_dim 1.0 +924 84 model.relation_dim 0.0 +924 84 model.scoring_fct_norm 1.0 +924 84 optimizer.lr 0.048322787896912904 +924 84 training.batch_size 0.0 +924 84 training.label_smoothing 0.9691030024359847 +924 85 model.embedding_dim 1.0 +924 85 model.relation_dim 1.0 +924 85 model.scoring_fct_norm 2.0 +924 85 optimizer.lr 0.011042020014200103 +924 85 training.batch_size 0.0 +924 85 training.label_smoothing 0.07091442901033408 +924 86 model.embedding_dim 2.0 +924 86 model.relation_dim 2.0 +924 86 model.scoring_fct_norm 2.0 +924 86 optimizer.lr 0.02326458682577972 +924 86 training.batch_size 2.0 +924 86 training.label_smoothing 0.884635305498539 +924 87 model.embedding_dim 0.0 +924 87 model.relation_dim 1.0 +924 87 model.scoring_fct_norm 2.0 +924 87 optimizer.lr 0.005465246904429675 +924 87 training.batch_size 2.0 +924 87 training.label_smoothing 0.010532938756521034 +924 88 model.embedding_dim 0.0 +924 88 model.relation_dim 2.0 +924 88 model.scoring_fct_norm 2.0 +924 88 optimizer.lr 0.06931958690160259 +924 88 training.batch_size 1.0 +924 88 training.label_smoothing 0.016372027029109427 +924 89 model.embedding_dim 2.0 +924 89 model.relation_dim 0.0 +924 89 model.scoring_fct_norm 2.0 +924 89 optimizer.lr 0.004051589632279046 +924 89 training.batch_size 1.0 +924 89 training.label_smoothing 0.054743290233161976 +924 90 model.embedding_dim 2.0 +924 90 model.relation_dim 2.0 +924 90 model.scoring_fct_norm 2.0 +924 90 optimizer.lr 0.028280293619832534 +924 90 training.batch_size 2.0 +924 90 training.label_smoothing 0.23563970110335977 +924 91 model.embedding_dim 2.0 +924 91 model.relation_dim 0.0 +924 91 model.scoring_fct_norm 2.0 +924 91 optimizer.lr 0.010415560983702398 +924 91 training.batch_size 1.0 +924 91 training.label_smoothing 0.13653038339092674 +924 92 model.embedding_dim 2.0 +924 92 model.relation_dim 1.0 +924 92 model.scoring_fct_norm 2.0 +924 92 optimizer.lr 0.01502073947122245 +924 92 training.batch_size 2.0 +924 92 training.label_smoothing 0.7115699295338693 +924 93 model.embedding_dim 0.0 +924 93 model.relation_dim 0.0 +924 93 model.scoring_fct_norm 2.0 +924 93 optimizer.lr 0.010245459652997056 +924 93 training.batch_size 2.0 +924 93 training.label_smoothing 0.27319171839535444 +924 94 model.embedding_dim 2.0 +924 94 model.relation_dim 1.0 +924 94 model.scoring_fct_norm 1.0 +924 94 optimizer.lr 0.009247888658209207 +924 94 training.batch_size 0.0 +924 94 training.label_smoothing 0.07826911273755277 +924 95 model.embedding_dim 0.0 +924 95 model.relation_dim 2.0 +924 95 model.scoring_fct_norm 2.0 +924 95 optimizer.lr 0.01898508554998923 +924 95 training.batch_size 0.0 +924 95 training.label_smoothing 0.39068336831592004 +924 96 model.embedding_dim 1.0 +924 96 model.relation_dim 0.0 +924 96 model.scoring_fct_norm 1.0 +924 96 optimizer.lr 0.03910615861761381 +924 96 training.batch_size 0.0 +924 96 training.label_smoothing 0.016871050874630342 +924 97 model.embedding_dim 2.0 +924 97 model.relation_dim 0.0 +924 97 model.scoring_fct_norm 2.0 +924 97 optimizer.lr 0.001279324630656667 +924 97 training.batch_size 0.0 +924 97 training.label_smoothing 0.001229344632901355 +924 98 model.embedding_dim 1.0 +924 98 model.relation_dim 0.0 +924 98 model.scoring_fct_norm 2.0 +924 98 optimizer.lr 0.05183111055426779 +924 98 training.batch_size 1.0 +924 98 training.label_smoothing 0.04366822705701515 +924 99 model.embedding_dim 1.0 +924 99 model.relation_dim 2.0 +924 99 model.scoring_fct_norm 1.0 +924 99 optimizer.lr 0.026379923460851117 +924 99 training.batch_size 2.0 +924 99 training.label_smoothing 0.0033835513284736023 +924 100 model.embedding_dim 0.0 +924 100 model.relation_dim 1.0 +924 100 model.scoring_fct_norm 2.0 +924 100 optimizer.lr 0.008939691494452008 +924 100 training.batch_size 1.0 +924 100 training.label_smoothing 0.2961906898156468 +924 1 dataset """kinships""" +924 1 model """transr""" +924 1 loss """softplus""" +924 1 regularizer """no""" +924 1 optimizer """adam""" +924 1 training_loop """lcwa""" +924 1 evaluator """rankbased""" +924 2 dataset """kinships""" +924 2 model """transr""" +924 2 loss """softplus""" +924 2 regularizer """no""" +924 2 optimizer """adam""" +924 2 training_loop """lcwa""" +924 2 evaluator """rankbased""" +924 3 dataset """kinships""" +924 3 model """transr""" +924 3 loss """softplus""" +924 3 regularizer """no""" +924 3 optimizer """adam""" +924 3 training_loop """lcwa""" +924 3 evaluator """rankbased""" +924 4 dataset """kinships""" +924 4 model """transr""" +924 4 loss """softplus""" +924 4 regularizer """no""" +924 4 optimizer """adam""" +924 4 training_loop """lcwa""" +924 4 evaluator """rankbased""" +924 5 dataset """kinships""" +924 5 model """transr""" +924 5 loss """softplus""" +924 5 regularizer """no""" +924 5 optimizer """adam""" +924 5 training_loop """lcwa""" +924 5 evaluator """rankbased""" +924 6 dataset """kinships""" +924 6 model """transr""" +924 6 loss """softplus""" +924 6 regularizer """no""" +924 6 optimizer """adam""" +924 6 training_loop """lcwa""" +924 6 evaluator """rankbased""" +924 7 dataset """kinships""" +924 7 model """transr""" +924 7 loss """softplus""" +924 7 regularizer """no""" +924 7 optimizer """adam""" +924 7 training_loop """lcwa""" +924 7 evaluator """rankbased""" +924 8 dataset """kinships""" +924 8 model """transr""" +924 8 loss """softplus""" +924 8 regularizer """no""" +924 8 optimizer """adam""" +924 8 training_loop """lcwa""" +924 8 evaluator """rankbased""" +924 9 dataset """kinships""" +924 9 model """transr""" +924 9 loss """softplus""" +924 9 regularizer """no""" +924 9 optimizer """adam""" +924 9 training_loop """lcwa""" +924 9 evaluator """rankbased""" +924 10 dataset """kinships""" +924 10 model """transr""" +924 10 loss """softplus""" +924 10 regularizer """no""" +924 10 optimizer """adam""" +924 10 training_loop """lcwa""" +924 10 evaluator """rankbased""" +924 11 dataset """kinships""" +924 11 model """transr""" +924 11 loss """softplus""" +924 11 regularizer """no""" +924 11 optimizer """adam""" +924 11 training_loop """lcwa""" +924 11 evaluator """rankbased""" +924 12 dataset """kinships""" +924 12 model """transr""" +924 12 loss """softplus""" +924 12 regularizer """no""" +924 12 optimizer """adam""" +924 12 training_loop """lcwa""" +924 12 evaluator """rankbased""" +924 13 dataset """kinships""" +924 13 model """transr""" +924 13 loss """softplus""" +924 13 regularizer """no""" +924 13 optimizer """adam""" +924 13 training_loop """lcwa""" +924 13 evaluator """rankbased""" +924 14 dataset """kinships""" +924 14 model """transr""" +924 14 loss """softplus""" +924 14 regularizer """no""" +924 14 optimizer """adam""" +924 14 training_loop """lcwa""" +924 14 evaluator """rankbased""" +924 15 dataset """kinships""" +924 15 model """transr""" +924 15 loss """softplus""" +924 15 regularizer """no""" +924 15 optimizer """adam""" +924 15 training_loop """lcwa""" +924 15 evaluator """rankbased""" +924 16 dataset """kinships""" +924 16 model """transr""" +924 16 loss """softplus""" +924 16 regularizer """no""" +924 16 optimizer """adam""" +924 16 training_loop """lcwa""" +924 16 evaluator """rankbased""" +924 17 dataset """kinships""" +924 17 model """transr""" +924 17 loss """softplus""" +924 17 regularizer """no""" +924 17 optimizer """adam""" +924 17 training_loop """lcwa""" +924 17 evaluator """rankbased""" +924 18 dataset """kinships""" +924 18 model """transr""" +924 18 loss """softplus""" +924 18 regularizer """no""" +924 18 optimizer """adam""" +924 18 training_loop """lcwa""" +924 18 evaluator """rankbased""" +924 19 dataset """kinships""" +924 19 model """transr""" +924 19 loss """softplus""" +924 19 regularizer """no""" +924 19 optimizer """adam""" +924 19 training_loop """lcwa""" +924 19 evaluator """rankbased""" +924 20 dataset """kinships""" +924 20 model """transr""" +924 20 loss """softplus""" +924 20 regularizer """no""" +924 20 optimizer """adam""" +924 20 training_loop """lcwa""" +924 20 evaluator """rankbased""" +924 21 dataset """kinships""" +924 21 model """transr""" +924 21 loss """softplus""" +924 21 regularizer """no""" +924 21 optimizer """adam""" +924 21 training_loop """lcwa""" +924 21 evaluator """rankbased""" +924 22 dataset """kinships""" +924 22 model """transr""" +924 22 loss """softplus""" +924 22 regularizer """no""" +924 22 optimizer """adam""" +924 22 training_loop """lcwa""" +924 22 evaluator """rankbased""" +924 23 dataset """kinships""" +924 23 model """transr""" +924 23 loss """softplus""" +924 23 regularizer """no""" +924 23 optimizer """adam""" +924 23 training_loop """lcwa""" +924 23 evaluator """rankbased""" +924 24 dataset """kinships""" +924 24 model """transr""" +924 24 loss """softplus""" +924 24 regularizer """no""" +924 24 optimizer """adam""" +924 24 training_loop """lcwa""" +924 24 evaluator """rankbased""" +924 25 dataset """kinships""" +924 25 model """transr""" +924 25 loss """softplus""" +924 25 regularizer """no""" +924 25 optimizer """adam""" +924 25 training_loop """lcwa""" +924 25 evaluator """rankbased""" +924 26 dataset """kinships""" +924 26 model """transr""" +924 26 loss """softplus""" +924 26 regularizer """no""" +924 26 optimizer """adam""" +924 26 training_loop """lcwa""" +924 26 evaluator """rankbased""" +924 27 dataset """kinships""" +924 27 model """transr""" +924 27 loss """softplus""" +924 27 regularizer """no""" +924 27 optimizer """adam""" +924 27 training_loop """lcwa""" +924 27 evaluator """rankbased""" +924 28 dataset """kinships""" +924 28 model """transr""" +924 28 loss """softplus""" +924 28 regularizer """no""" +924 28 optimizer """adam""" +924 28 training_loop """lcwa""" +924 28 evaluator """rankbased""" +924 29 dataset """kinships""" +924 29 model """transr""" +924 29 loss """softplus""" +924 29 regularizer """no""" +924 29 optimizer """adam""" +924 29 training_loop """lcwa""" +924 29 evaluator """rankbased""" +924 30 dataset """kinships""" +924 30 model """transr""" +924 30 loss """softplus""" +924 30 regularizer """no""" +924 30 optimizer """adam""" +924 30 training_loop """lcwa""" +924 30 evaluator """rankbased""" +924 31 dataset """kinships""" +924 31 model """transr""" +924 31 loss """softplus""" +924 31 regularizer """no""" +924 31 optimizer """adam""" +924 31 training_loop """lcwa""" +924 31 evaluator """rankbased""" +924 32 dataset """kinships""" +924 32 model """transr""" +924 32 loss """softplus""" +924 32 regularizer """no""" +924 32 optimizer """adam""" +924 32 training_loop """lcwa""" +924 32 evaluator """rankbased""" +924 33 dataset """kinships""" +924 33 model """transr""" +924 33 loss """softplus""" +924 33 regularizer """no""" +924 33 optimizer """adam""" +924 33 training_loop """lcwa""" +924 33 evaluator """rankbased""" +924 34 dataset """kinships""" +924 34 model """transr""" +924 34 loss """softplus""" +924 34 regularizer """no""" +924 34 optimizer """adam""" +924 34 training_loop """lcwa""" +924 34 evaluator """rankbased""" +924 35 dataset """kinships""" +924 35 model """transr""" +924 35 loss """softplus""" +924 35 regularizer """no""" +924 35 optimizer """adam""" +924 35 training_loop """lcwa""" +924 35 evaluator """rankbased""" +924 36 dataset """kinships""" +924 36 model """transr""" +924 36 loss """softplus""" +924 36 regularizer """no""" +924 36 optimizer """adam""" +924 36 training_loop """lcwa""" +924 36 evaluator """rankbased""" +924 37 dataset """kinships""" +924 37 model """transr""" +924 37 loss """softplus""" +924 37 regularizer """no""" +924 37 optimizer """adam""" +924 37 training_loop """lcwa""" +924 37 evaluator """rankbased""" +924 38 dataset """kinships""" +924 38 model """transr""" +924 38 loss """softplus""" +924 38 regularizer """no""" +924 38 optimizer """adam""" +924 38 training_loop """lcwa""" +924 38 evaluator """rankbased""" +924 39 dataset """kinships""" +924 39 model """transr""" +924 39 loss """softplus""" +924 39 regularizer """no""" +924 39 optimizer """adam""" +924 39 training_loop """lcwa""" +924 39 evaluator """rankbased""" +924 40 dataset """kinships""" +924 40 model """transr""" +924 40 loss """softplus""" +924 40 regularizer """no""" +924 40 optimizer """adam""" +924 40 training_loop """lcwa""" +924 40 evaluator """rankbased""" +924 41 dataset """kinships""" +924 41 model """transr""" +924 41 loss """softplus""" +924 41 regularizer """no""" +924 41 optimizer """adam""" +924 41 training_loop """lcwa""" +924 41 evaluator """rankbased""" +924 42 dataset """kinships""" +924 42 model """transr""" +924 42 loss """softplus""" +924 42 regularizer """no""" +924 42 optimizer """adam""" +924 42 training_loop """lcwa""" +924 42 evaluator """rankbased""" +924 43 dataset """kinships""" +924 43 model """transr""" +924 43 loss """softplus""" +924 43 regularizer """no""" +924 43 optimizer """adam""" +924 43 training_loop """lcwa""" +924 43 evaluator """rankbased""" +924 44 dataset """kinships""" +924 44 model """transr""" +924 44 loss """softplus""" +924 44 regularizer """no""" +924 44 optimizer """adam""" +924 44 training_loop """lcwa""" +924 44 evaluator """rankbased""" +924 45 dataset """kinships""" +924 45 model """transr""" +924 45 loss """softplus""" +924 45 regularizer """no""" +924 45 optimizer """adam""" +924 45 training_loop """lcwa""" +924 45 evaluator """rankbased""" +924 46 dataset """kinships""" +924 46 model """transr""" +924 46 loss """softplus""" +924 46 regularizer """no""" +924 46 optimizer """adam""" +924 46 training_loop """lcwa""" +924 46 evaluator """rankbased""" +924 47 dataset """kinships""" +924 47 model """transr""" +924 47 loss """softplus""" +924 47 regularizer """no""" +924 47 optimizer """adam""" +924 47 training_loop """lcwa""" +924 47 evaluator """rankbased""" +924 48 dataset """kinships""" +924 48 model """transr""" +924 48 loss """softplus""" +924 48 regularizer """no""" +924 48 optimizer """adam""" +924 48 training_loop """lcwa""" +924 48 evaluator """rankbased""" +924 49 dataset """kinships""" +924 49 model """transr""" +924 49 loss """softplus""" +924 49 regularizer """no""" +924 49 optimizer """adam""" +924 49 training_loop """lcwa""" +924 49 evaluator """rankbased""" +924 50 dataset """kinships""" +924 50 model """transr""" +924 50 loss """softplus""" +924 50 regularizer """no""" +924 50 optimizer """adam""" +924 50 training_loop """lcwa""" +924 50 evaluator """rankbased""" +924 51 dataset """kinships""" +924 51 model """transr""" +924 51 loss """softplus""" +924 51 regularizer """no""" +924 51 optimizer """adam""" +924 51 training_loop """lcwa""" +924 51 evaluator """rankbased""" +924 52 dataset """kinships""" +924 52 model """transr""" +924 52 loss """softplus""" +924 52 regularizer """no""" +924 52 optimizer """adam""" +924 52 training_loop """lcwa""" +924 52 evaluator """rankbased""" +924 53 dataset """kinships""" +924 53 model """transr""" +924 53 loss """softplus""" +924 53 regularizer """no""" +924 53 optimizer """adam""" +924 53 training_loop """lcwa""" +924 53 evaluator """rankbased""" +924 54 dataset """kinships""" +924 54 model """transr""" +924 54 loss """softplus""" +924 54 regularizer """no""" +924 54 optimizer """adam""" +924 54 training_loop """lcwa""" +924 54 evaluator """rankbased""" +924 55 dataset """kinships""" +924 55 model """transr""" +924 55 loss """softplus""" +924 55 regularizer """no""" +924 55 optimizer """adam""" +924 55 training_loop """lcwa""" +924 55 evaluator """rankbased""" +924 56 dataset """kinships""" +924 56 model """transr""" +924 56 loss """softplus""" +924 56 regularizer """no""" +924 56 optimizer """adam""" +924 56 training_loop """lcwa""" +924 56 evaluator """rankbased""" +924 57 dataset """kinships""" +924 57 model """transr""" +924 57 loss """softplus""" +924 57 regularizer """no""" +924 57 optimizer """adam""" +924 57 training_loop """lcwa""" +924 57 evaluator """rankbased""" +924 58 dataset """kinships""" +924 58 model """transr""" +924 58 loss """softplus""" +924 58 regularizer """no""" +924 58 optimizer """adam""" +924 58 training_loop """lcwa""" +924 58 evaluator """rankbased""" +924 59 dataset """kinships""" +924 59 model """transr""" +924 59 loss """softplus""" +924 59 regularizer """no""" +924 59 optimizer """adam""" +924 59 training_loop """lcwa""" +924 59 evaluator """rankbased""" +924 60 dataset """kinships""" +924 60 model """transr""" +924 60 loss """softplus""" +924 60 regularizer """no""" +924 60 optimizer """adam""" +924 60 training_loop """lcwa""" +924 60 evaluator """rankbased""" +924 61 dataset """kinships""" +924 61 model """transr""" +924 61 loss """softplus""" +924 61 regularizer """no""" +924 61 optimizer """adam""" +924 61 training_loop """lcwa""" +924 61 evaluator """rankbased""" +924 62 dataset """kinships""" +924 62 model """transr""" +924 62 loss """softplus""" +924 62 regularizer """no""" +924 62 optimizer """adam""" +924 62 training_loop """lcwa""" +924 62 evaluator """rankbased""" +924 63 dataset """kinships""" +924 63 model """transr""" +924 63 loss """softplus""" +924 63 regularizer """no""" +924 63 optimizer """adam""" +924 63 training_loop """lcwa""" +924 63 evaluator """rankbased""" +924 64 dataset """kinships""" +924 64 model """transr""" +924 64 loss """softplus""" +924 64 regularizer """no""" +924 64 optimizer """adam""" +924 64 training_loop """lcwa""" +924 64 evaluator """rankbased""" +924 65 dataset """kinships""" +924 65 model """transr""" +924 65 loss """softplus""" +924 65 regularizer """no""" +924 65 optimizer """adam""" +924 65 training_loop """lcwa""" +924 65 evaluator """rankbased""" +924 66 dataset """kinships""" +924 66 model """transr""" +924 66 loss """softplus""" +924 66 regularizer """no""" +924 66 optimizer """adam""" +924 66 training_loop """lcwa""" +924 66 evaluator """rankbased""" +924 67 dataset """kinships""" +924 67 model """transr""" +924 67 loss """softplus""" +924 67 regularizer """no""" +924 67 optimizer """adam""" +924 67 training_loop """lcwa""" +924 67 evaluator """rankbased""" +924 68 dataset """kinships""" +924 68 model """transr""" +924 68 loss """softplus""" +924 68 regularizer """no""" +924 68 optimizer """adam""" +924 68 training_loop """lcwa""" +924 68 evaluator """rankbased""" +924 69 dataset """kinships""" +924 69 model """transr""" +924 69 loss """softplus""" +924 69 regularizer """no""" +924 69 optimizer """adam""" +924 69 training_loop """lcwa""" +924 69 evaluator """rankbased""" +924 70 dataset """kinships""" +924 70 model """transr""" +924 70 loss """softplus""" +924 70 regularizer """no""" +924 70 optimizer """adam""" +924 70 training_loop """lcwa""" +924 70 evaluator """rankbased""" +924 71 dataset """kinships""" +924 71 model """transr""" +924 71 loss """softplus""" +924 71 regularizer """no""" +924 71 optimizer """adam""" +924 71 training_loop """lcwa""" +924 71 evaluator """rankbased""" +924 72 dataset """kinships""" +924 72 model """transr""" +924 72 loss """softplus""" +924 72 regularizer """no""" +924 72 optimizer """adam""" +924 72 training_loop """lcwa""" +924 72 evaluator """rankbased""" +924 73 dataset """kinships""" +924 73 model """transr""" +924 73 loss """softplus""" +924 73 regularizer """no""" +924 73 optimizer """adam""" +924 73 training_loop """lcwa""" +924 73 evaluator """rankbased""" +924 74 dataset """kinships""" +924 74 model """transr""" +924 74 loss """softplus""" +924 74 regularizer """no""" +924 74 optimizer """adam""" +924 74 training_loop """lcwa""" +924 74 evaluator """rankbased""" +924 75 dataset """kinships""" +924 75 model """transr""" +924 75 loss """softplus""" +924 75 regularizer """no""" +924 75 optimizer """adam""" +924 75 training_loop """lcwa""" +924 75 evaluator """rankbased""" +924 76 dataset """kinships""" +924 76 model """transr""" +924 76 loss """softplus""" +924 76 regularizer """no""" +924 76 optimizer """adam""" +924 76 training_loop """lcwa""" +924 76 evaluator """rankbased""" +924 77 dataset """kinships""" +924 77 model """transr""" +924 77 loss """softplus""" +924 77 regularizer """no""" +924 77 optimizer """adam""" +924 77 training_loop """lcwa""" +924 77 evaluator """rankbased""" +924 78 dataset """kinships""" +924 78 model """transr""" +924 78 loss """softplus""" +924 78 regularizer """no""" +924 78 optimizer """adam""" +924 78 training_loop """lcwa""" +924 78 evaluator """rankbased""" +924 79 dataset """kinships""" +924 79 model """transr""" +924 79 loss """softplus""" +924 79 regularizer """no""" +924 79 optimizer """adam""" +924 79 training_loop """lcwa""" +924 79 evaluator """rankbased""" +924 80 dataset """kinships""" +924 80 model """transr""" +924 80 loss """softplus""" +924 80 regularizer """no""" +924 80 optimizer """adam""" +924 80 training_loop """lcwa""" +924 80 evaluator """rankbased""" +924 81 dataset """kinships""" +924 81 model """transr""" +924 81 loss """softplus""" +924 81 regularizer """no""" +924 81 optimizer """adam""" +924 81 training_loop """lcwa""" +924 81 evaluator """rankbased""" +924 82 dataset """kinships""" +924 82 model """transr""" +924 82 loss """softplus""" +924 82 regularizer """no""" +924 82 optimizer """adam""" +924 82 training_loop """lcwa""" +924 82 evaluator """rankbased""" +924 83 dataset """kinships""" +924 83 model """transr""" +924 83 loss """softplus""" +924 83 regularizer """no""" +924 83 optimizer """adam""" +924 83 training_loop """lcwa""" +924 83 evaluator """rankbased""" +924 84 dataset """kinships""" +924 84 model """transr""" +924 84 loss """softplus""" +924 84 regularizer """no""" +924 84 optimizer """adam""" +924 84 training_loop """lcwa""" +924 84 evaluator """rankbased""" +924 85 dataset """kinships""" +924 85 model """transr""" +924 85 loss """softplus""" +924 85 regularizer """no""" +924 85 optimizer """adam""" +924 85 training_loop """lcwa""" +924 85 evaluator """rankbased""" +924 86 dataset """kinships""" +924 86 model """transr""" +924 86 loss """softplus""" +924 86 regularizer """no""" +924 86 optimizer """adam""" +924 86 training_loop """lcwa""" +924 86 evaluator """rankbased""" +924 87 dataset """kinships""" +924 87 model """transr""" +924 87 loss """softplus""" +924 87 regularizer """no""" +924 87 optimizer """adam""" +924 87 training_loop """lcwa""" +924 87 evaluator """rankbased""" +924 88 dataset """kinships""" +924 88 model """transr""" +924 88 loss """softplus""" +924 88 regularizer """no""" +924 88 optimizer """adam""" +924 88 training_loop """lcwa""" +924 88 evaluator """rankbased""" +924 89 dataset """kinships""" +924 89 model """transr""" +924 89 loss """softplus""" +924 89 regularizer """no""" +924 89 optimizer """adam""" +924 89 training_loop """lcwa""" +924 89 evaluator """rankbased""" +924 90 dataset """kinships""" +924 90 model """transr""" +924 90 loss """softplus""" +924 90 regularizer """no""" +924 90 optimizer """adam""" +924 90 training_loop """lcwa""" +924 90 evaluator """rankbased""" +924 91 dataset """kinships""" +924 91 model """transr""" +924 91 loss """softplus""" +924 91 regularizer """no""" +924 91 optimizer """adam""" +924 91 training_loop """lcwa""" +924 91 evaluator """rankbased""" +924 92 dataset """kinships""" +924 92 model """transr""" +924 92 loss """softplus""" +924 92 regularizer """no""" +924 92 optimizer """adam""" +924 92 training_loop """lcwa""" +924 92 evaluator """rankbased""" +924 93 dataset """kinships""" +924 93 model """transr""" +924 93 loss """softplus""" +924 93 regularizer """no""" +924 93 optimizer """adam""" +924 93 training_loop """lcwa""" +924 93 evaluator """rankbased""" +924 94 dataset """kinships""" +924 94 model """transr""" +924 94 loss """softplus""" +924 94 regularizer """no""" +924 94 optimizer """adam""" +924 94 training_loop """lcwa""" +924 94 evaluator """rankbased""" +924 95 dataset """kinships""" +924 95 model """transr""" +924 95 loss """softplus""" +924 95 regularizer """no""" +924 95 optimizer """adam""" +924 95 training_loop """lcwa""" +924 95 evaluator """rankbased""" +924 96 dataset """kinships""" +924 96 model """transr""" +924 96 loss """softplus""" +924 96 regularizer """no""" +924 96 optimizer """adam""" +924 96 training_loop """lcwa""" +924 96 evaluator """rankbased""" +924 97 dataset """kinships""" +924 97 model """transr""" +924 97 loss """softplus""" +924 97 regularizer """no""" +924 97 optimizer """adam""" +924 97 training_loop """lcwa""" +924 97 evaluator """rankbased""" +924 98 dataset """kinships""" +924 98 model """transr""" +924 98 loss """softplus""" +924 98 regularizer """no""" +924 98 optimizer """adam""" +924 98 training_loop """lcwa""" +924 98 evaluator """rankbased""" +924 99 dataset """kinships""" +924 99 model """transr""" +924 99 loss """softplus""" +924 99 regularizer """no""" +924 99 optimizer """adam""" +924 99 training_loop """lcwa""" +924 99 evaluator """rankbased""" +924 100 dataset """kinships""" +924 100 model """transr""" +924 100 loss """softplus""" +924 100 regularizer """no""" +924 100 optimizer """adam""" +924 100 training_loop """lcwa""" +924 100 evaluator """rankbased""" +925 1 model.embedding_dim 0.0 +925 1 model.relation_dim 2.0 +925 1 model.scoring_fct_norm 2.0 +925 1 optimizer.lr 0.06062787206514234 +925 1 training.batch_size 2.0 +925 1 training.label_smoothing 0.7647512960917453 +925 2 model.embedding_dim 1.0 +925 2 model.relation_dim 1.0 +925 2 model.scoring_fct_norm 2.0 +925 2 optimizer.lr 0.07671550188893121 +925 2 training.batch_size 1.0 +925 2 training.label_smoothing 0.029653933061607077 +925 3 model.embedding_dim 1.0 +925 3 model.relation_dim 0.0 +925 3 model.scoring_fct_norm 1.0 +925 3 optimizer.lr 0.02291402640681255 +925 3 training.batch_size 1.0 +925 3 training.label_smoothing 0.09453154908671982 +925 4 model.embedding_dim 1.0 +925 4 model.relation_dim 0.0 +925 4 model.scoring_fct_norm 2.0 +925 4 optimizer.lr 0.0070959389172925375 +925 4 training.batch_size 2.0 +925 4 training.label_smoothing 0.011481062512783457 +925 5 model.embedding_dim 1.0 +925 5 model.relation_dim 0.0 +925 5 model.scoring_fct_norm 2.0 +925 5 optimizer.lr 0.06161609221331147 +925 5 training.batch_size 1.0 +925 5 training.label_smoothing 0.007384951979417159 +925 6 model.embedding_dim 1.0 +925 6 model.relation_dim 2.0 +925 6 model.scoring_fct_norm 2.0 +925 6 optimizer.lr 0.01087776869087354 +925 6 training.batch_size 2.0 +925 6 training.label_smoothing 0.03990017622572442 +925 7 model.embedding_dim 1.0 +925 7 model.relation_dim 0.0 +925 7 model.scoring_fct_norm 2.0 +925 7 optimizer.lr 0.004735227288629618 +925 7 training.batch_size 1.0 +925 7 training.label_smoothing 0.20354930259371154 +925 8 model.embedding_dim 0.0 +925 8 model.relation_dim 1.0 +925 8 model.scoring_fct_norm 1.0 +925 8 optimizer.lr 0.018680663643626333 +925 8 training.batch_size 2.0 +925 8 training.label_smoothing 0.0018702416456931336 +925 9 model.embedding_dim 2.0 +925 9 model.relation_dim 2.0 +925 9 model.scoring_fct_norm 1.0 +925 9 optimizer.lr 0.04539671523643229 +925 9 training.batch_size 1.0 +925 9 training.label_smoothing 0.5248339312016347 +925 10 model.embedding_dim 1.0 +925 10 model.relation_dim 1.0 +925 10 model.scoring_fct_norm 2.0 +925 10 optimizer.lr 0.007053054705202255 +925 10 training.batch_size 1.0 +925 10 training.label_smoothing 0.017319353307610672 +925 11 model.embedding_dim 2.0 +925 11 model.relation_dim 1.0 +925 11 model.scoring_fct_norm 1.0 +925 11 optimizer.lr 0.04740268180475851 +925 11 training.batch_size 1.0 +925 11 training.label_smoothing 0.2969897279750486 +925 12 model.embedding_dim 0.0 +925 12 model.relation_dim 2.0 +925 12 model.scoring_fct_norm 1.0 +925 12 optimizer.lr 0.011469740204616363 +925 12 training.batch_size 2.0 +925 12 training.label_smoothing 0.0048790824007172535 +925 13 model.embedding_dim 0.0 +925 13 model.relation_dim 0.0 +925 13 model.scoring_fct_norm 1.0 +925 13 optimizer.lr 0.008324944337486298 +925 13 training.batch_size 2.0 +925 13 training.label_smoothing 0.15928423678241763 +925 14 model.embedding_dim 0.0 +925 14 model.relation_dim 0.0 +925 14 model.scoring_fct_norm 1.0 +925 14 optimizer.lr 0.015452805031982212 +925 14 training.batch_size 0.0 +925 14 training.label_smoothing 0.008095535640313515 +925 15 model.embedding_dim 2.0 +925 15 model.relation_dim 2.0 +925 15 model.scoring_fct_norm 1.0 +925 15 optimizer.lr 0.060045027858021294 +925 15 training.batch_size 1.0 +925 15 training.label_smoothing 0.00196958885986548 +925 16 model.embedding_dim 2.0 +925 16 model.relation_dim 0.0 +925 16 model.scoring_fct_norm 2.0 +925 16 optimizer.lr 0.0014656356349436169 +925 16 training.batch_size 0.0 +925 16 training.label_smoothing 0.17561106885947955 +925 17 model.embedding_dim 0.0 +925 17 model.relation_dim 0.0 +925 17 model.scoring_fct_norm 2.0 +925 17 optimizer.lr 0.011055787260942296 +925 17 training.batch_size 0.0 +925 17 training.label_smoothing 0.10346115530233255 +925 18 model.embedding_dim 0.0 +925 18 model.relation_dim 1.0 +925 18 model.scoring_fct_norm 1.0 +925 18 optimizer.lr 0.05406572436112659 +925 18 training.batch_size 0.0 +925 18 training.label_smoothing 0.08590105499827227 +925 19 model.embedding_dim 2.0 +925 19 model.relation_dim 2.0 +925 19 model.scoring_fct_norm 1.0 +925 19 optimizer.lr 0.0019688784125471166 +925 19 training.batch_size 1.0 +925 19 training.label_smoothing 0.4639501206623413 +925 20 model.embedding_dim 2.0 +925 20 model.relation_dim 1.0 +925 20 model.scoring_fct_norm 2.0 +925 20 optimizer.lr 0.024343822451609452 +925 20 training.batch_size 2.0 +925 20 training.label_smoothing 0.3895682986757169 +925 21 model.embedding_dim 1.0 +925 21 model.relation_dim 2.0 +925 21 model.scoring_fct_norm 1.0 +925 21 optimizer.lr 0.07251286297167522 +925 21 training.batch_size 1.0 +925 21 training.label_smoothing 0.007001701119349538 +925 22 model.embedding_dim 2.0 +925 22 model.relation_dim 0.0 +925 22 model.scoring_fct_norm 2.0 +925 22 optimizer.lr 0.08410122249403615 +925 22 training.batch_size 1.0 +925 22 training.label_smoothing 0.5944976174427783 +925 23 model.embedding_dim 1.0 +925 23 model.relation_dim 0.0 +925 23 model.scoring_fct_norm 1.0 +925 23 optimizer.lr 0.0015774575159384471 +925 23 training.batch_size 0.0 +925 23 training.label_smoothing 0.17629979533217066 +925 24 model.embedding_dim 0.0 +925 24 model.relation_dim 1.0 +925 24 model.scoring_fct_norm 1.0 +925 24 optimizer.lr 0.0010346832723302601 +925 24 training.batch_size 2.0 +925 24 training.label_smoothing 0.007246483423455102 +925 25 model.embedding_dim 2.0 +925 25 model.relation_dim 0.0 +925 25 model.scoring_fct_norm 2.0 +925 25 optimizer.lr 0.00727340435276135 +925 25 training.batch_size 1.0 +925 25 training.label_smoothing 0.15577814133783985 +925 26 model.embedding_dim 2.0 +925 26 model.relation_dim 1.0 +925 26 model.scoring_fct_norm 1.0 +925 26 optimizer.lr 0.0026853830294259634 +925 26 training.batch_size 1.0 +925 26 training.label_smoothing 0.18314506027726118 +925 27 model.embedding_dim 1.0 +925 27 model.relation_dim 1.0 +925 27 model.scoring_fct_norm 2.0 +925 27 optimizer.lr 0.0052470409108185895 +925 27 training.batch_size 2.0 +925 27 training.label_smoothing 0.03348201389634508 +925 28 model.embedding_dim 2.0 +925 28 model.relation_dim 1.0 +925 28 model.scoring_fct_norm 2.0 +925 28 optimizer.lr 0.0872087697379774 +925 28 training.batch_size 2.0 +925 28 training.label_smoothing 0.8326771048818757 +925 29 model.embedding_dim 2.0 +925 29 model.relation_dim 1.0 +925 29 model.scoring_fct_norm 1.0 +925 29 optimizer.lr 0.0020498898076542135 +925 29 training.batch_size 1.0 +925 29 training.label_smoothing 0.0036192691708687537 +925 30 model.embedding_dim 2.0 +925 30 model.relation_dim 2.0 +925 30 model.scoring_fct_norm 2.0 +925 30 optimizer.lr 0.027720554295602957 +925 30 training.batch_size 2.0 +925 30 training.label_smoothing 0.037064137975668 +925 31 model.embedding_dim 0.0 +925 31 model.relation_dim 1.0 +925 31 model.scoring_fct_norm 1.0 +925 31 optimizer.lr 0.004237463463774745 +925 31 training.batch_size 2.0 +925 31 training.label_smoothing 0.006680154928055221 +925 32 model.embedding_dim 2.0 +925 32 model.relation_dim 0.0 +925 32 model.scoring_fct_norm 2.0 +925 32 optimizer.lr 0.0024183840684012716 +925 32 training.batch_size 0.0 +925 32 training.label_smoothing 0.06135155202684647 +925 33 model.embedding_dim 0.0 +925 33 model.relation_dim 2.0 +925 33 model.scoring_fct_norm 1.0 +925 33 optimizer.lr 0.00475768506792331 +925 33 training.batch_size 2.0 +925 33 training.label_smoothing 0.0014877089707970794 +925 34 model.embedding_dim 1.0 +925 34 model.relation_dim 0.0 +925 34 model.scoring_fct_norm 2.0 +925 34 optimizer.lr 0.00947748669936932 +925 34 training.batch_size 0.0 +925 34 training.label_smoothing 0.14817707342183692 +925 35 model.embedding_dim 1.0 +925 35 model.relation_dim 1.0 +925 35 model.scoring_fct_norm 2.0 +925 35 optimizer.lr 0.05876019252988593 +925 35 training.batch_size 1.0 +925 35 training.label_smoothing 0.0012786539359361064 +925 36 model.embedding_dim 0.0 +925 36 model.relation_dim 2.0 +925 36 model.scoring_fct_norm 2.0 +925 36 optimizer.lr 0.0018576660923313038 +925 36 training.batch_size 1.0 +925 36 training.label_smoothing 0.07280305479766105 +925 37 model.embedding_dim 1.0 +925 37 model.relation_dim 1.0 +925 37 model.scoring_fct_norm 2.0 +925 37 optimizer.lr 0.01768701142175287 +925 37 training.batch_size 0.0 +925 37 training.label_smoothing 0.4188791347383725 +925 38 model.embedding_dim 0.0 +925 38 model.relation_dim 2.0 +925 38 model.scoring_fct_norm 2.0 +925 38 optimizer.lr 0.018720956576700896 +925 38 training.batch_size 0.0 +925 38 training.label_smoothing 0.019941584789150642 +925 39 model.embedding_dim 0.0 +925 39 model.relation_dim 0.0 +925 39 model.scoring_fct_norm 2.0 +925 39 optimizer.lr 0.008051913523458697 +925 39 training.batch_size 0.0 +925 39 training.label_smoothing 0.0033773489322801897 +925 40 model.embedding_dim 1.0 +925 40 model.relation_dim 0.0 +925 40 model.scoring_fct_norm 2.0 +925 40 optimizer.lr 0.003380861588437316 +925 40 training.batch_size 1.0 +925 40 training.label_smoothing 0.05756558601092763 +925 41 model.embedding_dim 0.0 +925 41 model.relation_dim 1.0 +925 41 model.scoring_fct_norm 2.0 +925 41 optimizer.lr 0.04933826346910454 +925 41 training.batch_size 1.0 +925 41 training.label_smoothing 0.0018360070817483667 +925 42 model.embedding_dim 1.0 +925 42 model.relation_dim 1.0 +925 42 model.scoring_fct_norm 2.0 +925 42 optimizer.lr 0.0015815134992270285 +925 42 training.batch_size 0.0 +925 42 training.label_smoothing 0.18796544162836826 +925 43 model.embedding_dim 2.0 +925 43 model.relation_dim 1.0 +925 43 model.scoring_fct_norm 2.0 +925 43 optimizer.lr 0.0011012659863470955 +925 43 training.batch_size 1.0 +925 43 training.label_smoothing 0.327550618744293 +925 44 model.embedding_dim 2.0 +925 44 model.relation_dim 2.0 +925 44 model.scoring_fct_norm 1.0 +925 44 optimizer.lr 0.07756466121503783 +925 44 training.batch_size 0.0 +925 44 training.label_smoothing 0.006437392042110704 +925 45 model.embedding_dim 2.0 +925 45 model.relation_dim 2.0 +925 45 model.scoring_fct_norm 2.0 +925 45 optimizer.lr 0.03571038596994482 +925 45 training.batch_size 0.0 +925 45 training.label_smoothing 0.4061048747454233 +925 46 model.embedding_dim 0.0 +925 46 model.relation_dim 1.0 +925 46 model.scoring_fct_norm 1.0 +925 46 optimizer.lr 0.018652330292465137 +925 46 training.batch_size 2.0 +925 46 training.label_smoothing 0.06089425860482162 +925 47 model.embedding_dim 1.0 +925 47 model.relation_dim 1.0 +925 47 model.scoring_fct_norm 2.0 +925 47 optimizer.lr 0.028934455427907953 +925 47 training.batch_size 2.0 +925 47 training.label_smoothing 0.02097235884402979 +925 48 model.embedding_dim 1.0 +925 48 model.relation_dim 0.0 +925 48 model.scoring_fct_norm 2.0 +925 48 optimizer.lr 0.05301390310781807 +925 48 training.batch_size 0.0 +925 48 training.label_smoothing 0.002464739789099724 +925 49 model.embedding_dim 2.0 +925 49 model.relation_dim 2.0 +925 49 model.scoring_fct_norm 2.0 +925 49 optimizer.lr 0.025318991471899845 +925 49 training.batch_size 2.0 +925 49 training.label_smoothing 0.001381412924148651 +925 50 model.embedding_dim 1.0 +925 50 model.relation_dim 2.0 +925 50 model.scoring_fct_norm 2.0 +925 50 optimizer.lr 0.01336663707736145 +925 50 training.batch_size 0.0 +925 50 training.label_smoothing 0.8814421823681958 +925 51 model.embedding_dim 1.0 +925 51 model.relation_dim 2.0 +925 51 model.scoring_fct_norm 2.0 +925 51 optimizer.lr 0.07547224104849738 +925 51 training.batch_size 0.0 +925 51 training.label_smoothing 0.001029330508031618 +925 52 model.embedding_dim 1.0 +925 52 model.relation_dim 2.0 +925 52 model.scoring_fct_norm 2.0 +925 52 optimizer.lr 0.014818813152495738 +925 52 training.batch_size 2.0 +925 52 training.label_smoothing 0.18008871033226656 +925 53 model.embedding_dim 2.0 +925 53 model.relation_dim 0.0 +925 53 model.scoring_fct_norm 2.0 +925 53 optimizer.lr 0.0030106607937582415 +925 53 training.batch_size 1.0 +925 53 training.label_smoothing 0.0029635535898836973 +925 54 model.embedding_dim 2.0 +925 54 model.relation_dim 1.0 +925 54 model.scoring_fct_norm 2.0 +925 54 optimizer.lr 0.042052846365697075 +925 54 training.batch_size 2.0 +925 54 training.label_smoothing 0.0025373091373183127 +925 55 model.embedding_dim 0.0 +925 55 model.relation_dim 1.0 +925 55 model.scoring_fct_norm 2.0 +925 55 optimizer.lr 0.015787884150204115 +925 55 training.batch_size 1.0 +925 55 training.label_smoothing 0.37629253052251527 +925 56 model.embedding_dim 0.0 +925 56 model.relation_dim 1.0 +925 56 model.scoring_fct_norm 1.0 +925 56 optimizer.lr 0.0028526415992212203 +925 56 training.batch_size 1.0 +925 56 training.label_smoothing 0.2964816753385948 +925 57 model.embedding_dim 1.0 +925 57 model.relation_dim 1.0 +925 57 model.scoring_fct_norm 2.0 +925 57 optimizer.lr 0.03234606514678694 +925 57 training.batch_size 1.0 +925 57 training.label_smoothing 0.0035824287606471108 +925 58 model.embedding_dim 1.0 +925 58 model.relation_dim 1.0 +925 58 model.scoring_fct_norm 1.0 +925 58 optimizer.lr 0.010165295002711666 +925 58 training.batch_size 2.0 +925 58 training.label_smoothing 0.31438538916753805 +925 59 model.embedding_dim 2.0 +925 59 model.relation_dim 0.0 +925 59 model.scoring_fct_norm 2.0 +925 59 optimizer.lr 0.017967851981349123 +925 59 training.batch_size 0.0 +925 59 training.label_smoothing 0.013859147076234899 +925 60 model.embedding_dim 2.0 +925 60 model.relation_dim 0.0 +925 60 model.scoring_fct_norm 1.0 +925 60 optimizer.lr 0.019620611096732063 +925 60 training.batch_size 0.0 +925 60 training.label_smoothing 0.00446067390325855 +925 61 model.embedding_dim 1.0 +925 61 model.relation_dim 1.0 +925 61 model.scoring_fct_norm 1.0 +925 61 optimizer.lr 0.014829552561715992 +925 61 training.batch_size 2.0 +925 61 training.label_smoothing 0.6465113060723182 +925 62 model.embedding_dim 2.0 +925 62 model.relation_dim 2.0 +925 62 model.scoring_fct_norm 1.0 +925 62 optimizer.lr 0.005158150733774937 +925 62 training.batch_size 0.0 +925 62 training.label_smoothing 0.5731816757592894 +925 63 model.embedding_dim 0.0 +925 63 model.relation_dim 1.0 +925 63 model.scoring_fct_norm 2.0 +925 63 optimizer.lr 0.05477324874936814 +925 63 training.batch_size 0.0 +925 63 training.label_smoothing 0.04832189436865585 +925 64 model.embedding_dim 1.0 +925 64 model.relation_dim 1.0 +925 64 model.scoring_fct_norm 1.0 +925 64 optimizer.lr 0.013944336434214543 +925 64 training.batch_size 2.0 +925 64 training.label_smoothing 0.02426370010243757 +925 65 model.embedding_dim 2.0 +925 65 model.relation_dim 0.0 +925 65 model.scoring_fct_norm 1.0 +925 65 optimizer.lr 0.040172236938605145 +925 65 training.batch_size 2.0 +925 65 training.label_smoothing 0.005006124191318513 +925 66 model.embedding_dim 1.0 +925 66 model.relation_dim 2.0 +925 66 model.scoring_fct_norm 1.0 +925 66 optimizer.lr 0.041202174603371604 +925 66 training.batch_size 2.0 +925 66 training.label_smoothing 0.0014764719406286197 +925 67 model.embedding_dim 2.0 +925 67 model.relation_dim 0.0 +925 67 model.scoring_fct_norm 1.0 +925 67 optimizer.lr 0.013388208182997456 +925 67 training.batch_size 0.0 +925 67 training.label_smoothing 0.10152472840528236 +925 68 model.embedding_dim 0.0 +925 68 model.relation_dim 2.0 +925 68 model.scoring_fct_norm 1.0 +925 68 optimizer.lr 0.057060737750758435 +925 68 training.batch_size 1.0 +925 68 training.label_smoothing 0.04270691963630004 +925 69 model.embedding_dim 2.0 +925 69 model.relation_dim 1.0 +925 69 model.scoring_fct_norm 2.0 +925 69 optimizer.lr 0.007420352910449749 +925 69 training.batch_size 0.0 +925 69 training.label_smoothing 0.384713806569226 +925 70 model.embedding_dim 2.0 +925 70 model.relation_dim 0.0 +925 70 model.scoring_fct_norm 2.0 +925 70 optimizer.lr 0.07066862612665566 +925 70 training.batch_size 0.0 +925 70 training.label_smoothing 0.2105109406951606 +925 71 model.embedding_dim 2.0 +925 71 model.relation_dim 2.0 +925 71 model.scoring_fct_norm 1.0 +925 71 optimizer.lr 0.00151897859932778 +925 71 training.batch_size 0.0 +925 71 training.label_smoothing 0.05447483973443094 +925 72 model.embedding_dim 2.0 +925 72 model.relation_dim 0.0 +925 72 model.scoring_fct_norm 2.0 +925 72 optimizer.lr 0.03393691377319944 +925 72 training.batch_size 2.0 +925 72 training.label_smoothing 0.4055236042726469 +925 73 model.embedding_dim 2.0 +925 73 model.relation_dim 1.0 +925 73 model.scoring_fct_norm 2.0 +925 73 optimizer.lr 0.03713496148183242 +925 73 training.batch_size 1.0 +925 73 training.label_smoothing 0.04515379926957485 +925 74 model.embedding_dim 2.0 +925 74 model.relation_dim 1.0 +925 74 model.scoring_fct_norm 1.0 +925 74 optimizer.lr 0.005034487130524089 +925 74 training.batch_size 2.0 +925 74 training.label_smoothing 0.04659752192925311 +925 75 model.embedding_dim 2.0 +925 75 model.relation_dim 2.0 +925 75 model.scoring_fct_norm 1.0 +925 75 optimizer.lr 0.0923725975337378 +925 75 training.batch_size 0.0 +925 75 training.label_smoothing 0.0014575658263633231 +925 76 model.embedding_dim 0.0 +925 76 model.relation_dim 2.0 +925 76 model.scoring_fct_norm 1.0 +925 76 optimizer.lr 0.006672367295839844 +925 76 training.batch_size 2.0 +925 76 training.label_smoothing 0.0021594945039329068 +925 77 model.embedding_dim 0.0 +925 77 model.relation_dim 1.0 +925 77 model.scoring_fct_norm 2.0 +925 77 optimizer.lr 0.017664192112079466 +925 77 training.batch_size 0.0 +925 77 training.label_smoothing 0.01257866517554617 +925 78 model.embedding_dim 1.0 +925 78 model.relation_dim 1.0 +925 78 model.scoring_fct_norm 2.0 +925 78 optimizer.lr 0.020901238175901723 +925 78 training.batch_size 1.0 +925 78 training.label_smoothing 0.012461029064392193 +925 79 model.embedding_dim 0.0 +925 79 model.relation_dim 2.0 +925 79 model.scoring_fct_norm 1.0 +925 79 optimizer.lr 0.028796480800162436 +925 79 training.batch_size 1.0 +925 79 training.label_smoothing 0.0015602917422966752 +925 80 model.embedding_dim 1.0 +925 80 model.relation_dim 2.0 +925 80 model.scoring_fct_norm 1.0 +925 80 optimizer.lr 0.017173020766018 +925 80 training.batch_size 2.0 +925 80 training.label_smoothing 0.4964239161243061 +925 81 model.embedding_dim 1.0 +925 81 model.relation_dim 0.0 +925 81 model.scoring_fct_norm 1.0 +925 81 optimizer.lr 0.04438475971169166 +925 81 training.batch_size 1.0 +925 81 training.label_smoothing 0.2520068085002534 +925 82 model.embedding_dim 1.0 +925 82 model.relation_dim 2.0 +925 82 model.scoring_fct_norm 2.0 +925 82 optimizer.lr 0.03619448325869358 +925 82 training.batch_size 0.0 +925 82 training.label_smoothing 0.014957160653988517 +925 83 model.embedding_dim 2.0 +925 83 model.relation_dim 0.0 +925 83 model.scoring_fct_norm 2.0 +925 83 optimizer.lr 0.031764146760328474 +925 83 training.batch_size 1.0 +925 83 training.label_smoothing 0.0011082938802592138 +925 84 model.embedding_dim 2.0 +925 84 model.relation_dim 2.0 +925 84 model.scoring_fct_norm 2.0 +925 84 optimizer.lr 0.002281308204924407 +925 84 training.batch_size 0.0 +925 84 training.label_smoothing 0.32822529283651636 +925 85 model.embedding_dim 0.0 +925 85 model.relation_dim 0.0 +925 85 model.scoring_fct_norm 2.0 +925 85 optimizer.lr 0.011469901901375465 +925 85 training.batch_size 1.0 +925 85 training.label_smoothing 0.15608082917085137 +925 86 model.embedding_dim 0.0 +925 86 model.relation_dim 2.0 +925 86 model.scoring_fct_norm 1.0 +925 86 optimizer.lr 0.005664045745063909 +925 86 training.batch_size 0.0 +925 86 training.label_smoothing 0.004608531124157058 +925 87 model.embedding_dim 2.0 +925 87 model.relation_dim 1.0 +925 87 model.scoring_fct_norm 1.0 +925 87 optimizer.lr 0.062250565745542906 +925 87 training.batch_size 0.0 +925 87 training.label_smoothing 0.007331708963700349 +925 88 model.embedding_dim 2.0 +925 88 model.relation_dim 2.0 +925 88 model.scoring_fct_norm 1.0 +925 88 optimizer.lr 0.013736275180770556 +925 88 training.batch_size 1.0 +925 88 training.label_smoothing 0.014345061250833211 +925 89 model.embedding_dim 1.0 +925 89 model.relation_dim 1.0 +925 89 model.scoring_fct_norm 2.0 +925 89 optimizer.lr 0.04992351477512981 +925 89 training.batch_size 0.0 +925 89 training.label_smoothing 0.015195400319611033 +925 90 model.embedding_dim 1.0 +925 90 model.relation_dim 0.0 +925 90 model.scoring_fct_norm 2.0 +925 90 optimizer.lr 0.036144267251927126 +925 90 training.batch_size 0.0 +925 90 training.label_smoothing 0.0026454578496466217 +925 91 model.embedding_dim 0.0 +925 91 model.relation_dim 2.0 +925 91 model.scoring_fct_norm 2.0 +925 91 optimizer.lr 0.003502039056444386 +925 91 training.batch_size 1.0 +925 91 training.label_smoothing 0.12469554459779239 +925 92 model.embedding_dim 0.0 +925 92 model.relation_dim 0.0 +925 92 model.scoring_fct_norm 2.0 +925 92 optimizer.lr 0.01574986902101647 +925 92 training.batch_size 2.0 +925 92 training.label_smoothing 0.08286509905805703 +925 93 model.embedding_dim 1.0 +925 93 model.relation_dim 0.0 +925 93 model.scoring_fct_norm 1.0 +925 93 optimizer.lr 0.03878143153147587 +925 93 training.batch_size 0.0 +925 93 training.label_smoothing 0.04788935228313426 +925 94 model.embedding_dim 2.0 +925 94 model.relation_dim 1.0 +925 94 model.scoring_fct_norm 2.0 +925 94 optimizer.lr 0.08045857619431515 +925 94 training.batch_size 1.0 +925 94 training.label_smoothing 0.32536118314345447 +925 95 model.embedding_dim 0.0 +925 95 model.relation_dim 1.0 +925 95 model.scoring_fct_norm 2.0 +925 95 optimizer.lr 0.09004354931936309 +925 95 training.batch_size 1.0 +925 95 training.label_smoothing 0.03035006813088703 +925 96 model.embedding_dim 1.0 +925 96 model.relation_dim 1.0 +925 96 model.scoring_fct_norm 2.0 +925 96 optimizer.lr 0.005655842704155859 +925 96 training.batch_size 1.0 +925 96 training.label_smoothing 0.0054137810747175085 +925 97 model.embedding_dim 0.0 +925 97 model.relation_dim 1.0 +925 97 model.scoring_fct_norm 1.0 +925 97 optimizer.lr 0.003195484196695492 +925 97 training.batch_size 0.0 +925 97 training.label_smoothing 0.5337244216327063 +925 98 model.embedding_dim 0.0 +925 98 model.relation_dim 2.0 +925 98 model.scoring_fct_norm 2.0 +925 98 optimizer.lr 0.03571055081431992 +925 98 training.batch_size 0.0 +925 98 training.label_smoothing 0.018595315772488307 +925 99 model.embedding_dim 1.0 +925 99 model.relation_dim 1.0 +925 99 model.scoring_fct_norm 2.0 +925 99 optimizer.lr 0.04512464678810227 +925 99 training.batch_size 0.0 +925 99 training.label_smoothing 0.045759702343193515 +925 100 model.embedding_dim 2.0 +925 100 model.relation_dim 2.0 +925 100 model.scoring_fct_norm 2.0 +925 100 optimizer.lr 0.09764915969788417 +925 100 training.batch_size 1.0 +925 100 training.label_smoothing 0.005853630488438296 +925 1 dataset """kinships""" +925 1 model """transr""" +925 1 loss """crossentropy""" +925 1 regularizer """no""" +925 1 optimizer """adam""" +925 1 training_loop """lcwa""" +925 1 evaluator """rankbased""" +925 2 dataset """kinships""" +925 2 model """transr""" +925 2 loss """crossentropy""" +925 2 regularizer """no""" +925 2 optimizer """adam""" +925 2 training_loop """lcwa""" +925 2 evaluator """rankbased""" +925 3 dataset """kinships""" +925 3 model """transr""" +925 3 loss """crossentropy""" +925 3 regularizer """no""" +925 3 optimizer """adam""" +925 3 training_loop """lcwa""" +925 3 evaluator """rankbased""" +925 4 dataset """kinships""" +925 4 model """transr""" +925 4 loss """crossentropy""" +925 4 regularizer """no""" +925 4 optimizer """adam""" +925 4 training_loop """lcwa""" +925 4 evaluator """rankbased""" +925 5 dataset """kinships""" +925 5 model """transr""" +925 5 loss """crossentropy""" +925 5 regularizer """no""" +925 5 optimizer """adam""" +925 5 training_loop """lcwa""" +925 5 evaluator """rankbased""" +925 6 dataset """kinships""" +925 6 model """transr""" +925 6 loss """crossentropy""" +925 6 regularizer """no""" +925 6 optimizer """adam""" +925 6 training_loop """lcwa""" +925 6 evaluator """rankbased""" +925 7 dataset """kinships""" +925 7 model """transr""" +925 7 loss """crossentropy""" +925 7 regularizer """no""" +925 7 optimizer """adam""" +925 7 training_loop """lcwa""" +925 7 evaluator """rankbased""" +925 8 dataset """kinships""" +925 8 model """transr""" +925 8 loss """crossentropy""" +925 8 regularizer """no""" +925 8 optimizer """adam""" +925 8 training_loop """lcwa""" +925 8 evaluator """rankbased""" +925 9 dataset """kinships""" +925 9 model """transr""" +925 9 loss """crossentropy""" +925 9 regularizer """no""" +925 9 optimizer """adam""" +925 9 training_loop """lcwa""" +925 9 evaluator """rankbased""" +925 10 dataset """kinships""" +925 10 model """transr""" +925 10 loss """crossentropy""" +925 10 regularizer """no""" +925 10 optimizer """adam""" +925 10 training_loop """lcwa""" +925 10 evaluator """rankbased""" +925 11 dataset """kinships""" +925 11 model """transr""" +925 11 loss """crossentropy""" +925 11 regularizer """no""" +925 11 optimizer """adam""" +925 11 training_loop """lcwa""" +925 11 evaluator """rankbased""" +925 12 dataset """kinships""" +925 12 model """transr""" +925 12 loss """crossentropy""" +925 12 regularizer """no""" +925 12 optimizer """adam""" +925 12 training_loop """lcwa""" +925 12 evaluator """rankbased""" +925 13 dataset """kinships""" +925 13 model """transr""" +925 13 loss """crossentropy""" +925 13 regularizer """no""" +925 13 optimizer """adam""" +925 13 training_loop """lcwa""" +925 13 evaluator """rankbased""" +925 14 dataset """kinships""" +925 14 model """transr""" +925 14 loss """crossentropy""" +925 14 regularizer """no""" +925 14 optimizer """adam""" +925 14 training_loop """lcwa""" +925 14 evaluator """rankbased""" +925 15 dataset """kinships""" +925 15 model """transr""" +925 15 loss """crossentropy""" +925 15 regularizer """no""" +925 15 optimizer """adam""" +925 15 training_loop """lcwa""" +925 15 evaluator """rankbased""" +925 16 dataset """kinships""" +925 16 model """transr""" +925 16 loss """crossentropy""" +925 16 regularizer """no""" +925 16 optimizer """adam""" +925 16 training_loop """lcwa""" +925 16 evaluator """rankbased""" +925 17 dataset """kinships""" +925 17 model """transr""" +925 17 loss """crossentropy""" +925 17 regularizer """no""" +925 17 optimizer """adam""" +925 17 training_loop """lcwa""" +925 17 evaluator """rankbased""" +925 18 dataset """kinships""" +925 18 model """transr""" +925 18 loss """crossentropy""" +925 18 regularizer """no""" +925 18 optimizer """adam""" +925 18 training_loop """lcwa""" +925 18 evaluator """rankbased""" +925 19 dataset """kinships""" +925 19 model """transr""" +925 19 loss """crossentropy""" +925 19 regularizer """no""" +925 19 optimizer """adam""" +925 19 training_loop """lcwa""" +925 19 evaluator """rankbased""" +925 20 dataset """kinships""" +925 20 model """transr""" +925 20 loss """crossentropy""" +925 20 regularizer """no""" +925 20 optimizer """adam""" +925 20 training_loop """lcwa""" +925 20 evaluator """rankbased""" +925 21 dataset """kinships""" +925 21 model """transr""" +925 21 loss """crossentropy""" +925 21 regularizer """no""" +925 21 optimizer """adam""" +925 21 training_loop """lcwa""" +925 21 evaluator """rankbased""" +925 22 dataset """kinships""" +925 22 model """transr""" +925 22 loss """crossentropy""" +925 22 regularizer """no""" +925 22 optimizer """adam""" +925 22 training_loop """lcwa""" +925 22 evaluator """rankbased""" +925 23 dataset """kinships""" +925 23 model """transr""" +925 23 loss """crossentropy""" +925 23 regularizer """no""" +925 23 optimizer """adam""" +925 23 training_loop """lcwa""" +925 23 evaluator """rankbased""" +925 24 dataset """kinships""" +925 24 model """transr""" +925 24 loss """crossentropy""" +925 24 regularizer """no""" +925 24 optimizer """adam""" +925 24 training_loop """lcwa""" +925 24 evaluator """rankbased""" +925 25 dataset """kinships""" +925 25 model """transr""" +925 25 loss """crossentropy""" +925 25 regularizer """no""" +925 25 optimizer """adam""" +925 25 training_loop """lcwa""" +925 25 evaluator """rankbased""" +925 26 dataset """kinships""" +925 26 model """transr""" +925 26 loss """crossentropy""" +925 26 regularizer """no""" +925 26 optimizer """adam""" +925 26 training_loop """lcwa""" +925 26 evaluator """rankbased""" +925 27 dataset """kinships""" +925 27 model """transr""" +925 27 loss """crossentropy""" +925 27 regularizer """no""" +925 27 optimizer """adam""" +925 27 training_loop """lcwa""" +925 27 evaluator """rankbased""" +925 28 dataset """kinships""" +925 28 model """transr""" +925 28 loss """crossentropy""" +925 28 regularizer """no""" +925 28 optimizer """adam""" +925 28 training_loop """lcwa""" +925 28 evaluator """rankbased""" +925 29 dataset """kinships""" +925 29 model """transr""" +925 29 loss """crossentropy""" +925 29 regularizer """no""" +925 29 optimizer """adam""" +925 29 training_loop """lcwa""" +925 29 evaluator """rankbased""" +925 30 dataset """kinships""" +925 30 model """transr""" +925 30 loss """crossentropy""" +925 30 regularizer """no""" +925 30 optimizer """adam""" +925 30 training_loop """lcwa""" +925 30 evaluator """rankbased""" +925 31 dataset """kinships""" +925 31 model """transr""" +925 31 loss """crossentropy""" +925 31 regularizer """no""" +925 31 optimizer """adam""" +925 31 training_loop """lcwa""" +925 31 evaluator """rankbased""" +925 32 dataset """kinships""" +925 32 model """transr""" +925 32 loss """crossentropy""" +925 32 regularizer """no""" +925 32 optimizer """adam""" +925 32 training_loop """lcwa""" +925 32 evaluator """rankbased""" +925 33 dataset """kinships""" +925 33 model """transr""" +925 33 loss """crossentropy""" +925 33 regularizer """no""" +925 33 optimizer """adam""" +925 33 training_loop """lcwa""" +925 33 evaluator """rankbased""" +925 34 dataset """kinships""" +925 34 model """transr""" +925 34 loss """crossentropy""" +925 34 regularizer """no""" +925 34 optimizer """adam""" +925 34 training_loop """lcwa""" +925 34 evaluator """rankbased""" +925 35 dataset """kinships""" +925 35 model """transr""" +925 35 loss """crossentropy""" +925 35 regularizer """no""" +925 35 optimizer """adam""" +925 35 training_loop """lcwa""" +925 35 evaluator """rankbased""" +925 36 dataset """kinships""" +925 36 model """transr""" +925 36 loss """crossentropy""" +925 36 regularizer """no""" +925 36 optimizer """adam""" +925 36 training_loop """lcwa""" +925 36 evaluator """rankbased""" +925 37 dataset """kinships""" +925 37 model """transr""" +925 37 loss """crossentropy""" +925 37 regularizer """no""" +925 37 optimizer """adam""" +925 37 training_loop """lcwa""" +925 37 evaluator """rankbased""" +925 38 dataset """kinships""" +925 38 model """transr""" +925 38 loss """crossentropy""" +925 38 regularizer """no""" +925 38 optimizer """adam""" +925 38 training_loop """lcwa""" +925 38 evaluator """rankbased""" +925 39 dataset """kinships""" +925 39 model """transr""" +925 39 loss """crossentropy""" +925 39 regularizer """no""" +925 39 optimizer """adam""" +925 39 training_loop """lcwa""" +925 39 evaluator """rankbased""" +925 40 dataset """kinships""" +925 40 model """transr""" +925 40 loss """crossentropy""" +925 40 regularizer """no""" +925 40 optimizer """adam""" +925 40 training_loop """lcwa""" +925 40 evaluator """rankbased""" +925 41 dataset """kinships""" +925 41 model """transr""" +925 41 loss """crossentropy""" +925 41 regularizer """no""" +925 41 optimizer """adam""" +925 41 training_loop """lcwa""" +925 41 evaluator """rankbased""" +925 42 dataset """kinships""" +925 42 model """transr""" +925 42 loss """crossentropy""" +925 42 regularizer """no""" +925 42 optimizer """adam""" +925 42 training_loop """lcwa""" +925 42 evaluator """rankbased""" +925 43 dataset """kinships""" +925 43 model """transr""" +925 43 loss """crossentropy""" +925 43 regularizer """no""" +925 43 optimizer """adam""" +925 43 training_loop """lcwa""" +925 43 evaluator """rankbased""" +925 44 dataset """kinships""" +925 44 model """transr""" +925 44 loss """crossentropy""" +925 44 regularizer """no""" +925 44 optimizer """adam""" +925 44 training_loop """lcwa""" +925 44 evaluator """rankbased""" +925 45 dataset """kinships""" +925 45 model """transr""" +925 45 loss """crossentropy""" +925 45 regularizer """no""" +925 45 optimizer """adam""" +925 45 training_loop """lcwa""" +925 45 evaluator """rankbased""" +925 46 dataset """kinships""" +925 46 model """transr""" +925 46 loss """crossentropy""" +925 46 regularizer """no""" +925 46 optimizer """adam""" +925 46 training_loop """lcwa""" +925 46 evaluator """rankbased""" +925 47 dataset """kinships""" +925 47 model """transr""" +925 47 loss """crossentropy""" +925 47 regularizer """no""" +925 47 optimizer """adam""" +925 47 training_loop """lcwa""" +925 47 evaluator """rankbased""" +925 48 dataset """kinships""" +925 48 model """transr""" +925 48 loss """crossentropy""" +925 48 regularizer """no""" +925 48 optimizer """adam""" +925 48 training_loop """lcwa""" +925 48 evaluator """rankbased""" +925 49 dataset """kinships""" +925 49 model """transr""" +925 49 loss """crossentropy""" +925 49 regularizer """no""" +925 49 optimizer """adam""" +925 49 training_loop """lcwa""" +925 49 evaluator """rankbased""" +925 50 dataset """kinships""" +925 50 model """transr""" +925 50 loss """crossentropy""" +925 50 regularizer """no""" +925 50 optimizer """adam""" +925 50 training_loop """lcwa""" +925 50 evaluator """rankbased""" +925 51 dataset """kinships""" +925 51 model """transr""" +925 51 loss """crossentropy""" +925 51 regularizer """no""" +925 51 optimizer """adam""" +925 51 training_loop """lcwa""" +925 51 evaluator """rankbased""" +925 52 dataset """kinships""" +925 52 model """transr""" +925 52 loss """crossentropy""" +925 52 regularizer """no""" +925 52 optimizer """adam""" +925 52 training_loop """lcwa""" +925 52 evaluator """rankbased""" +925 53 dataset """kinships""" +925 53 model """transr""" +925 53 loss """crossentropy""" +925 53 regularizer """no""" +925 53 optimizer """adam""" +925 53 training_loop """lcwa""" +925 53 evaluator """rankbased""" +925 54 dataset """kinships""" +925 54 model """transr""" +925 54 loss """crossentropy""" +925 54 regularizer """no""" +925 54 optimizer """adam""" +925 54 training_loop """lcwa""" +925 54 evaluator """rankbased""" +925 55 dataset """kinships""" +925 55 model """transr""" +925 55 loss """crossentropy""" +925 55 regularizer """no""" +925 55 optimizer """adam""" +925 55 training_loop """lcwa""" +925 55 evaluator """rankbased""" +925 56 dataset """kinships""" +925 56 model """transr""" +925 56 loss """crossentropy""" +925 56 regularizer """no""" +925 56 optimizer """adam""" +925 56 training_loop """lcwa""" +925 56 evaluator """rankbased""" +925 57 dataset """kinships""" +925 57 model """transr""" +925 57 loss """crossentropy""" +925 57 regularizer """no""" +925 57 optimizer """adam""" +925 57 training_loop """lcwa""" +925 57 evaluator """rankbased""" +925 58 dataset """kinships""" +925 58 model """transr""" +925 58 loss """crossentropy""" +925 58 regularizer """no""" +925 58 optimizer """adam""" +925 58 training_loop """lcwa""" +925 58 evaluator """rankbased""" +925 59 dataset """kinships""" +925 59 model """transr""" +925 59 loss """crossentropy""" +925 59 regularizer """no""" +925 59 optimizer """adam""" +925 59 training_loop """lcwa""" +925 59 evaluator """rankbased""" +925 60 dataset """kinships""" +925 60 model """transr""" +925 60 loss """crossentropy""" +925 60 regularizer """no""" +925 60 optimizer """adam""" +925 60 training_loop """lcwa""" +925 60 evaluator """rankbased""" +925 61 dataset """kinships""" +925 61 model """transr""" +925 61 loss """crossentropy""" +925 61 regularizer """no""" +925 61 optimizer """adam""" +925 61 training_loop """lcwa""" +925 61 evaluator """rankbased""" +925 62 dataset """kinships""" +925 62 model """transr""" +925 62 loss """crossentropy""" +925 62 regularizer """no""" +925 62 optimizer """adam""" +925 62 training_loop """lcwa""" +925 62 evaluator """rankbased""" +925 63 dataset """kinships""" +925 63 model """transr""" +925 63 loss """crossentropy""" +925 63 regularizer """no""" +925 63 optimizer """adam""" +925 63 training_loop """lcwa""" +925 63 evaluator """rankbased""" +925 64 dataset """kinships""" +925 64 model """transr""" +925 64 loss """crossentropy""" +925 64 regularizer """no""" +925 64 optimizer """adam""" +925 64 training_loop """lcwa""" +925 64 evaluator """rankbased""" +925 65 dataset """kinships""" +925 65 model """transr""" +925 65 loss """crossentropy""" +925 65 regularizer """no""" +925 65 optimizer """adam""" +925 65 training_loop """lcwa""" +925 65 evaluator """rankbased""" +925 66 dataset """kinships""" +925 66 model """transr""" +925 66 loss """crossentropy""" +925 66 regularizer """no""" +925 66 optimizer """adam""" +925 66 training_loop """lcwa""" +925 66 evaluator """rankbased""" +925 67 dataset """kinships""" +925 67 model """transr""" +925 67 loss """crossentropy""" +925 67 regularizer """no""" +925 67 optimizer """adam""" +925 67 training_loop """lcwa""" +925 67 evaluator """rankbased""" +925 68 dataset """kinships""" +925 68 model """transr""" +925 68 loss """crossentropy""" +925 68 regularizer """no""" +925 68 optimizer """adam""" +925 68 training_loop """lcwa""" +925 68 evaluator """rankbased""" +925 69 dataset """kinships""" +925 69 model """transr""" +925 69 loss """crossentropy""" +925 69 regularizer """no""" +925 69 optimizer """adam""" +925 69 training_loop """lcwa""" +925 69 evaluator """rankbased""" +925 70 dataset """kinships""" +925 70 model """transr""" +925 70 loss """crossentropy""" +925 70 regularizer """no""" +925 70 optimizer """adam""" +925 70 training_loop """lcwa""" +925 70 evaluator """rankbased""" +925 71 dataset """kinships""" +925 71 model """transr""" +925 71 loss """crossentropy""" +925 71 regularizer """no""" +925 71 optimizer """adam""" +925 71 training_loop """lcwa""" +925 71 evaluator """rankbased""" +925 72 dataset """kinships""" +925 72 model """transr""" +925 72 loss """crossentropy""" +925 72 regularizer """no""" +925 72 optimizer """adam""" +925 72 training_loop """lcwa""" +925 72 evaluator """rankbased""" +925 73 dataset """kinships""" +925 73 model """transr""" +925 73 loss """crossentropy""" +925 73 regularizer """no""" +925 73 optimizer """adam""" +925 73 training_loop """lcwa""" +925 73 evaluator """rankbased""" +925 74 dataset """kinships""" +925 74 model """transr""" +925 74 loss """crossentropy""" +925 74 regularizer """no""" +925 74 optimizer """adam""" +925 74 training_loop """lcwa""" +925 74 evaluator """rankbased""" +925 75 dataset """kinships""" +925 75 model """transr""" +925 75 loss """crossentropy""" +925 75 regularizer """no""" +925 75 optimizer """adam""" +925 75 training_loop """lcwa""" +925 75 evaluator """rankbased""" +925 76 dataset """kinships""" +925 76 model """transr""" +925 76 loss """crossentropy""" +925 76 regularizer """no""" +925 76 optimizer """adam""" +925 76 training_loop """lcwa""" +925 76 evaluator """rankbased""" +925 77 dataset """kinships""" +925 77 model """transr""" +925 77 loss """crossentropy""" +925 77 regularizer """no""" +925 77 optimizer """adam""" +925 77 training_loop """lcwa""" +925 77 evaluator """rankbased""" +925 78 dataset """kinships""" +925 78 model """transr""" +925 78 loss """crossentropy""" +925 78 regularizer """no""" +925 78 optimizer """adam""" +925 78 training_loop """lcwa""" +925 78 evaluator """rankbased""" +925 79 dataset """kinships""" +925 79 model """transr""" +925 79 loss """crossentropy""" +925 79 regularizer """no""" +925 79 optimizer """adam""" +925 79 training_loop """lcwa""" +925 79 evaluator """rankbased""" +925 80 dataset """kinships""" +925 80 model """transr""" +925 80 loss """crossentropy""" +925 80 regularizer """no""" +925 80 optimizer """adam""" +925 80 training_loop """lcwa""" +925 80 evaluator """rankbased""" +925 81 dataset """kinships""" +925 81 model """transr""" +925 81 loss """crossentropy""" +925 81 regularizer """no""" +925 81 optimizer """adam""" +925 81 training_loop """lcwa""" +925 81 evaluator """rankbased""" +925 82 dataset """kinships""" +925 82 model """transr""" +925 82 loss """crossentropy""" +925 82 regularizer """no""" +925 82 optimizer """adam""" +925 82 training_loop """lcwa""" +925 82 evaluator """rankbased""" +925 83 dataset """kinships""" +925 83 model """transr""" +925 83 loss """crossentropy""" +925 83 regularizer """no""" +925 83 optimizer """adam""" +925 83 training_loop """lcwa""" +925 83 evaluator """rankbased""" +925 84 dataset """kinships""" +925 84 model """transr""" +925 84 loss """crossentropy""" +925 84 regularizer """no""" +925 84 optimizer """adam""" +925 84 training_loop """lcwa""" +925 84 evaluator """rankbased""" +925 85 dataset """kinships""" +925 85 model """transr""" +925 85 loss """crossentropy""" +925 85 regularizer """no""" +925 85 optimizer """adam""" +925 85 training_loop """lcwa""" +925 85 evaluator """rankbased""" +925 86 dataset """kinships""" +925 86 model """transr""" +925 86 loss """crossentropy""" +925 86 regularizer """no""" +925 86 optimizer """adam""" +925 86 training_loop """lcwa""" +925 86 evaluator """rankbased""" +925 87 dataset """kinships""" +925 87 model """transr""" +925 87 loss """crossentropy""" +925 87 regularizer """no""" +925 87 optimizer """adam""" +925 87 training_loop """lcwa""" +925 87 evaluator """rankbased""" +925 88 dataset """kinships""" +925 88 model """transr""" +925 88 loss """crossentropy""" +925 88 regularizer """no""" +925 88 optimizer """adam""" +925 88 training_loop """lcwa""" +925 88 evaluator """rankbased""" +925 89 dataset """kinships""" +925 89 model """transr""" +925 89 loss """crossentropy""" +925 89 regularizer """no""" +925 89 optimizer """adam""" +925 89 training_loop """lcwa""" +925 89 evaluator """rankbased""" +925 90 dataset """kinships""" +925 90 model """transr""" +925 90 loss """crossentropy""" +925 90 regularizer """no""" +925 90 optimizer """adam""" +925 90 training_loop """lcwa""" +925 90 evaluator """rankbased""" +925 91 dataset """kinships""" +925 91 model """transr""" +925 91 loss """crossentropy""" +925 91 regularizer """no""" +925 91 optimizer """adam""" +925 91 training_loop """lcwa""" +925 91 evaluator """rankbased""" +925 92 dataset """kinships""" +925 92 model """transr""" +925 92 loss """crossentropy""" +925 92 regularizer """no""" +925 92 optimizer """adam""" +925 92 training_loop """lcwa""" +925 92 evaluator """rankbased""" +925 93 dataset """kinships""" +925 93 model """transr""" +925 93 loss """crossentropy""" +925 93 regularizer """no""" +925 93 optimizer """adam""" +925 93 training_loop """lcwa""" +925 93 evaluator """rankbased""" +925 94 dataset """kinships""" +925 94 model """transr""" +925 94 loss """crossentropy""" +925 94 regularizer """no""" +925 94 optimizer """adam""" +925 94 training_loop """lcwa""" +925 94 evaluator """rankbased""" +925 95 dataset """kinships""" +925 95 model """transr""" +925 95 loss """crossentropy""" +925 95 regularizer """no""" +925 95 optimizer """adam""" +925 95 training_loop """lcwa""" +925 95 evaluator """rankbased""" +925 96 dataset """kinships""" +925 96 model """transr""" +925 96 loss """crossentropy""" +925 96 regularizer """no""" +925 96 optimizer """adam""" +925 96 training_loop """lcwa""" +925 96 evaluator """rankbased""" +925 97 dataset """kinships""" +925 97 model """transr""" +925 97 loss """crossentropy""" +925 97 regularizer """no""" +925 97 optimizer """adam""" +925 97 training_loop """lcwa""" +925 97 evaluator """rankbased""" +925 98 dataset """kinships""" +925 98 model """transr""" +925 98 loss """crossentropy""" +925 98 regularizer """no""" +925 98 optimizer """adam""" +925 98 training_loop """lcwa""" +925 98 evaluator """rankbased""" +925 99 dataset """kinships""" +925 99 model """transr""" +925 99 loss """crossentropy""" +925 99 regularizer """no""" +925 99 optimizer """adam""" +925 99 training_loop """lcwa""" +925 99 evaluator """rankbased""" +925 100 dataset """kinships""" +925 100 model """transr""" +925 100 loss """crossentropy""" +925 100 regularizer """no""" +925 100 optimizer """adam""" +925 100 training_loop """lcwa""" +925 100 evaluator """rankbased""" +926 1 model.embedding_dim 1.0 +926 1 model.relation_dim 0.0 +926 1 model.scoring_fct_norm 2.0 +926 1 optimizer.lr 0.03100980491127349 +926 1 training.batch_size 0.0 +926 1 training.label_smoothing 0.03222003835959765 +926 2 model.embedding_dim 1.0 +926 2 model.relation_dim 0.0 +926 2 model.scoring_fct_norm 2.0 +926 2 optimizer.lr 0.003292768333597727 +926 2 training.batch_size 0.0 +926 2 training.label_smoothing 0.004705202010941347 +926 3 model.embedding_dim 0.0 +926 3 model.relation_dim 1.0 +926 3 model.scoring_fct_norm 2.0 +926 3 optimizer.lr 0.0015809147661508187 +926 3 training.batch_size 0.0 +926 3 training.label_smoothing 0.09861585899597314 +926 4 model.embedding_dim 0.0 +926 4 model.relation_dim 1.0 +926 4 model.scoring_fct_norm 2.0 +926 4 optimizer.lr 0.009751202340435642 +926 4 training.batch_size 1.0 +926 4 training.label_smoothing 0.0019567067996770974 +926 5 model.embedding_dim 0.0 +926 5 model.relation_dim 1.0 +926 5 model.scoring_fct_norm 1.0 +926 5 optimizer.lr 0.001997221467715082 +926 5 training.batch_size 2.0 +926 5 training.label_smoothing 0.005633562519652484 +926 6 model.embedding_dim 0.0 +926 6 model.relation_dim 2.0 +926 6 model.scoring_fct_norm 2.0 +926 6 optimizer.lr 0.0015770881089232588 +926 6 training.batch_size 2.0 +926 6 training.label_smoothing 0.4907221789046151 +926 7 model.embedding_dim 2.0 +926 7 model.relation_dim 0.0 +926 7 model.scoring_fct_norm 2.0 +926 7 optimizer.lr 0.0018213854321513072 +926 7 training.batch_size 0.0 +926 7 training.label_smoothing 0.004946549564218545 +926 8 model.embedding_dim 1.0 +926 8 model.relation_dim 1.0 +926 8 model.scoring_fct_norm 1.0 +926 8 optimizer.lr 0.001954404920290067 +926 8 training.batch_size 2.0 +926 8 training.label_smoothing 0.0036667155556241423 +926 9 model.embedding_dim 0.0 +926 9 model.relation_dim 0.0 +926 9 model.scoring_fct_norm 2.0 +926 9 optimizer.lr 0.0016864564607438728 +926 9 training.batch_size 0.0 +926 9 training.label_smoothing 0.016834786551463934 +926 10 model.embedding_dim 0.0 +926 10 model.relation_dim 1.0 +926 10 model.scoring_fct_norm 1.0 +926 10 optimizer.lr 0.0017626582362189397 +926 10 training.batch_size 2.0 +926 10 training.label_smoothing 0.0016867337439164102 +926 11 model.embedding_dim 0.0 +926 11 model.relation_dim 0.0 +926 11 model.scoring_fct_norm 1.0 +926 11 optimizer.lr 0.008460671281609398 +926 11 training.batch_size 1.0 +926 11 training.label_smoothing 0.02260641886362534 +926 12 model.embedding_dim 2.0 +926 12 model.relation_dim 1.0 +926 12 model.scoring_fct_norm 1.0 +926 12 optimizer.lr 0.0518447301711458 +926 12 training.batch_size 0.0 +926 12 training.label_smoothing 0.002894315905713799 +926 13 model.embedding_dim 1.0 +926 13 model.relation_dim 0.0 +926 13 model.scoring_fct_norm 2.0 +926 13 optimizer.lr 0.0278844235988247 +926 13 training.batch_size 2.0 +926 13 training.label_smoothing 0.9534186623265912 +926 14 model.embedding_dim 1.0 +926 14 model.relation_dim 0.0 +926 14 model.scoring_fct_norm 2.0 +926 14 optimizer.lr 0.004970809666296558 +926 14 training.batch_size 0.0 +926 14 training.label_smoothing 0.011371054158836312 +926 15 model.embedding_dim 1.0 +926 15 model.relation_dim 1.0 +926 15 model.scoring_fct_norm 2.0 +926 15 optimizer.lr 0.05898189884464094 +926 15 training.batch_size 0.0 +926 15 training.label_smoothing 0.004073626233351401 +926 16 model.embedding_dim 0.0 +926 16 model.relation_dim 1.0 +926 16 model.scoring_fct_norm 1.0 +926 16 optimizer.lr 0.02984988276841788 +926 16 training.batch_size 2.0 +926 16 training.label_smoothing 0.01058422791745134 +926 17 model.embedding_dim 0.0 +926 17 model.relation_dim 1.0 +926 17 model.scoring_fct_norm 2.0 +926 17 optimizer.lr 0.07191854435995584 +926 17 training.batch_size 2.0 +926 17 training.label_smoothing 0.016529243108879058 +926 18 model.embedding_dim 2.0 +926 18 model.relation_dim 1.0 +926 18 model.scoring_fct_norm 1.0 +926 18 optimizer.lr 0.009139894073180005 +926 18 training.batch_size 0.0 +926 18 training.label_smoothing 0.059032792459534356 +926 19 model.embedding_dim 1.0 +926 19 model.relation_dim 2.0 +926 19 model.scoring_fct_norm 1.0 +926 19 optimizer.lr 0.011727291519325814 +926 19 training.batch_size 1.0 +926 19 training.label_smoothing 0.0064103414761762555 +926 20 model.embedding_dim 0.0 +926 20 model.relation_dim 0.0 +926 20 model.scoring_fct_norm 1.0 +926 20 optimizer.lr 0.003958034477963711 +926 20 training.batch_size 2.0 +926 20 training.label_smoothing 0.017509778239996318 +926 21 model.embedding_dim 1.0 +926 21 model.relation_dim 1.0 +926 21 model.scoring_fct_norm 1.0 +926 21 optimizer.lr 0.0010189443011224375 +926 21 training.batch_size 2.0 +926 21 training.label_smoothing 0.053964881928466435 +926 22 model.embedding_dim 1.0 +926 22 model.relation_dim 1.0 +926 22 model.scoring_fct_norm 1.0 +926 22 optimizer.lr 0.017148092741875825 +926 22 training.batch_size 1.0 +926 22 training.label_smoothing 0.2036821519318289 +926 23 model.embedding_dim 2.0 +926 23 model.relation_dim 0.0 +926 23 model.scoring_fct_norm 1.0 +926 23 optimizer.lr 0.015460943003764396 +926 23 training.batch_size 2.0 +926 23 training.label_smoothing 0.00829238843514204 +926 24 model.embedding_dim 2.0 +926 24 model.relation_dim 1.0 +926 24 model.scoring_fct_norm 1.0 +926 24 optimizer.lr 0.0023827260896449676 +926 24 training.batch_size 0.0 +926 24 training.label_smoothing 0.0061937504693150005 +926 25 model.embedding_dim 0.0 +926 25 model.relation_dim 1.0 +926 25 model.scoring_fct_norm 2.0 +926 25 optimizer.lr 0.00454171847580683 +926 25 training.batch_size 2.0 +926 25 training.label_smoothing 0.017707230333754855 +926 26 model.embedding_dim 2.0 +926 26 model.relation_dim 2.0 +926 26 model.scoring_fct_norm 1.0 +926 26 optimizer.lr 0.012242892566352227 +926 26 training.batch_size 2.0 +926 26 training.label_smoothing 0.0010817125766404 +926 27 model.embedding_dim 0.0 +926 27 model.relation_dim 1.0 +926 27 model.scoring_fct_norm 2.0 +926 27 optimizer.lr 0.02023515080106194 +926 27 training.batch_size 2.0 +926 27 training.label_smoothing 0.022648601520120548 +926 28 model.embedding_dim 2.0 +926 28 model.relation_dim 0.0 +926 28 model.scoring_fct_norm 1.0 +926 28 optimizer.lr 0.0434017994339341 +926 28 training.batch_size 1.0 +926 28 training.label_smoothing 0.20060280777450526 +926 29 model.embedding_dim 0.0 +926 29 model.relation_dim 0.0 +926 29 model.scoring_fct_norm 2.0 +926 29 optimizer.lr 0.0017526683270090166 +926 29 training.batch_size 0.0 +926 29 training.label_smoothing 0.003111041995884012 +926 30 model.embedding_dim 2.0 +926 30 model.relation_dim 0.0 +926 30 model.scoring_fct_norm 1.0 +926 30 optimizer.lr 0.008472274917303887 +926 30 training.batch_size 0.0 +926 30 training.label_smoothing 0.0029175598508182615 +926 31 model.embedding_dim 2.0 +926 31 model.relation_dim 2.0 +926 31 model.scoring_fct_norm 2.0 +926 31 optimizer.lr 0.0026549865821866095 +926 31 training.batch_size 1.0 +926 31 training.label_smoothing 0.37861840720893614 +926 32 model.embedding_dim 2.0 +926 32 model.relation_dim 2.0 +926 32 model.scoring_fct_norm 1.0 +926 32 optimizer.lr 0.02979695873546067 +926 32 training.batch_size 2.0 +926 32 training.label_smoothing 0.0219584162027006 +926 33 model.embedding_dim 0.0 +926 33 model.relation_dim 0.0 +926 33 model.scoring_fct_norm 1.0 +926 33 optimizer.lr 0.002021315716522995 +926 33 training.batch_size 1.0 +926 33 training.label_smoothing 0.10618285879796775 +926 34 model.embedding_dim 0.0 +926 34 model.relation_dim 2.0 +926 34 model.scoring_fct_norm 2.0 +926 34 optimizer.lr 0.0011760646289805754 +926 34 training.batch_size 0.0 +926 34 training.label_smoothing 0.2657167712982702 +926 35 model.embedding_dim 0.0 +926 35 model.relation_dim 0.0 +926 35 model.scoring_fct_norm 2.0 +926 35 optimizer.lr 0.0035029452366941967 +926 35 training.batch_size 1.0 +926 35 training.label_smoothing 0.047140305656561764 +926 36 model.embedding_dim 2.0 +926 36 model.relation_dim 1.0 +926 36 model.scoring_fct_norm 1.0 +926 36 optimizer.lr 0.023780740431299612 +926 36 training.batch_size 0.0 +926 36 training.label_smoothing 0.009905611632701095 +926 37 model.embedding_dim 2.0 +926 37 model.relation_dim 1.0 +926 37 model.scoring_fct_norm 1.0 +926 37 optimizer.lr 0.0010942382174796844 +926 37 training.batch_size 1.0 +926 37 training.label_smoothing 0.008286718084963786 +926 38 model.embedding_dim 0.0 +926 38 model.relation_dim 2.0 +926 38 model.scoring_fct_norm 1.0 +926 38 optimizer.lr 0.08231144878204813 +926 38 training.batch_size 1.0 +926 38 training.label_smoothing 0.5217816645536105 +926 39 model.embedding_dim 2.0 +926 39 model.relation_dim 2.0 +926 39 model.scoring_fct_norm 2.0 +926 39 optimizer.lr 0.015766704872277288 +926 39 training.batch_size 1.0 +926 39 training.label_smoothing 0.0014961015998534435 +926 40 model.embedding_dim 1.0 +926 40 model.relation_dim 0.0 +926 40 model.scoring_fct_norm 1.0 +926 40 optimizer.lr 0.0015298323842556408 +926 40 training.batch_size 1.0 +926 40 training.label_smoothing 0.0036892773197875436 +926 41 model.embedding_dim 0.0 +926 41 model.relation_dim 2.0 +926 41 model.scoring_fct_norm 1.0 +926 41 optimizer.lr 0.005844362893078874 +926 41 training.batch_size 0.0 +926 41 training.label_smoothing 0.001727013894902487 +926 42 model.embedding_dim 0.0 +926 42 model.relation_dim 2.0 +926 42 model.scoring_fct_norm 1.0 +926 42 optimizer.lr 0.0067432175422386465 +926 42 training.batch_size 1.0 +926 42 training.label_smoothing 0.5345785810449056 +926 43 model.embedding_dim 1.0 +926 43 model.relation_dim 1.0 +926 43 model.scoring_fct_norm 1.0 +926 43 optimizer.lr 0.0034555891514808984 +926 43 training.batch_size 2.0 +926 43 training.label_smoothing 0.15403855168695604 +926 44 model.embedding_dim 2.0 +926 44 model.relation_dim 0.0 +926 44 model.scoring_fct_norm 2.0 +926 44 optimizer.lr 0.09969889978345711 +926 44 training.batch_size 1.0 +926 44 training.label_smoothing 0.00803093489729108 +926 45 model.embedding_dim 1.0 +926 45 model.relation_dim 2.0 +926 45 model.scoring_fct_norm 1.0 +926 45 optimizer.lr 0.0017193454017326842 +926 45 training.batch_size 1.0 +926 45 training.label_smoothing 0.9336896436602667 +926 46 model.embedding_dim 2.0 +926 46 model.relation_dim 2.0 +926 46 model.scoring_fct_norm 2.0 +926 46 optimizer.lr 0.004373418269404748 +926 46 training.batch_size 0.0 +926 46 training.label_smoothing 0.5324972832048518 +926 47 model.embedding_dim 2.0 +926 47 model.relation_dim 1.0 +926 47 model.scoring_fct_norm 1.0 +926 47 optimizer.lr 0.016838913474479914 +926 47 training.batch_size 1.0 +926 47 training.label_smoothing 0.12734492280177645 +926 48 model.embedding_dim 0.0 +926 48 model.relation_dim 2.0 +926 48 model.scoring_fct_norm 1.0 +926 48 optimizer.lr 0.010055764255220865 +926 48 training.batch_size 1.0 +926 48 training.label_smoothing 0.5742644562893443 +926 49 model.embedding_dim 2.0 +926 49 model.relation_dim 0.0 +926 49 model.scoring_fct_norm 1.0 +926 49 optimizer.lr 0.0016429527755404529 +926 49 training.batch_size 0.0 +926 49 training.label_smoothing 0.006924786066579809 +926 50 model.embedding_dim 1.0 +926 50 model.relation_dim 2.0 +926 50 model.scoring_fct_norm 1.0 +926 50 optimizer.lr 0.025285633048147273 +926 50 training.batch_size 0.0 +926 50 training.label_smoothing 0.041621639139121584 +926 51 model.embedding_dim 1.0 +926 51 model.relation_dim 0.0 +926 51 model.scoring_fct_norm 1.0 +926 51 optimizer.lr 0.0783830715085147 +926 51 training.batch_size 2.0 +926 51 training.label_smoothing 0.032643840978267856 +926 52 model.embedding_dim 1.0 +926 52 model.relation_dim 0.0 +926 52 model.scoring_fct_norm 1.0 +926 52 optimizer.lr 0.05467449515376161 +926 52 training.batch_size 2.0 +926 52 training.label_smoothing 0.1789076404505961 +926 53 model.embedding_dim 1.0 +926 53 model.relation_dim 1.0 +926 53 model.scoring_fct_norm 1.0 +926 53 optimizer.lr 0.015737044366993472 +926 53 training.batch_size 1.0 +926 53 training.label_smoothing 0.24409864129719802 +926 54 model.embedding_dim 2.0 +926 54 model.relation_dim 2.0 +926 54 model.scoring_fct_norm 1.0 +926 54 optimizer.lr 0.001053362774867716 +926 54 training.batch_size 2.0 +926 54 training.label_smoothing 0.49759769884584937 +926 55 model.embedding_dim 1.0 +926 55 model.relation_dim 0.0 +926 55 model.scoring_fct_norm 1.0 +926 55 optimizer.lr 0.001249617061819342 +926 55 training.batch_size 2.0 +926 55 training.label_smoothing 0.004184733541026613 +926 56 model.embedding_dim 1.0 +926 56 model.relation_dim 0.0 +926 56 model.scoring_fct_norm 1.0 +926 56 optimizer.lr 0.0010651806554888223 +926 56 training.batch_size 0.0 +926 56 training.label_smoothing 0.7769609421528714 +926 57 model.embedding_dim 2.0 +926 57 model.relation_dim 0.0 +926 57 model.scoring_fct_norm 1.0 +926 57 optimizer.lr 0.005939498963416625 +926 57 training.batch_size 0.0 +926 57 training.label_smoothing 0.04925081805039875 +926 58 model.embedding_dim 2.0 +926 58 model.relation_dim 1.0 +926 58 model.scoring_fct_norm 1.0 +926 58 optimizer.lr 0.007693419219452524 +926 58 training.batch_size 1.0 +926 58 training.label_smoothing 0.005320326301112491 +926 59 model.embedding_dim 1.0 +926 59 model.relation_dim 2.0 +926 59 model.scoring_fct_norm 1.0 +926 59 optimizer.lr 0.07036131748065942 +926 59 training.batch_size 2.0 +926 59 training.label_smoothing 0.09075575435683447 +926 60 model.embedding_dim 0.0 +926 60 model.relation_dim 0.0 +926 60 model.scoring_fct_norm 1.0 +926 60 optimizer.lr 0.0011814914843666573 +926 60 training.batch_size 0.0 +926 60 training.label_smoothing 0.049068186435264595 +926 61 model.embedding_dim 1.0 +926 61 model.relation_dim 0.0 +926 61 model.scoring_fct_norm 2.0 +926 61 optimizer.lr 0.003706807912205424 +926 61 training.batch_size 0.0 +926 61 training.label_smoothing 0.008735970571841455 +926 62 model.embedding_dim 1.0 +926 62 model.relation_dim 1.0 +926 62 model.scoring_fct_norm 1.0 +926 62 optimizer.lr 0.0036675459419931353 +926 62 training.batch_size 2.0 +926 62 training.label_smoothing 0.6786179434588034 +926 63 model.embedding_dim 2.0 +926 63 model.relation_dim 0.0 +926 63 model.scoring_fct_norm 1.0 +926 63 optimizer.lr 0.004503437205809038 +926 63 training.batch_size 2.0 +926 63 training.label_smoothing 0.0013631282418171645 +926 64 model.embedding_dim 0.0 +926 64 model.relation_dim 1.0 +926 64 model.scoring_fct_norm 2.0 +926 64 optimizer.lr 0.003928350479694584 +926 64 training.batch_size 1.0 +926 64 training.label_smoothing 0.09709331361193675 +926 65 model.embedding_dim 2.0 +926 65 model.relation_dim 0.0 +926 65 model.scoring_fct_norm 2.0 +926 65 optimizer.lr 0.0014947997545840825 +926 65 training.batch_size 2.0 +926 65 training.label_smoothing 0.10225272257936242 +926 66 model.embedding_dim 0.0 +926 66 model.relation_dim 1.0 +926 66 model.scoring_fct_norm 2.0 +926 66 optimizer.lr 0.004139097116413269 +926 66 training.batch_size 1.0 +926 66 training.label_smoothing 0.06240294603279947 +926 67 model.embedding_dim 2.0 +926 67 model.relation_dim 2.0 +926 67 model.scoring_fct_norm 2.0 +926 67 optimizer.lr 0.027403433975376833 +926 67 training.batch_size 2.0 +926 67 training.label_smoothing 0.09784846291412524 +926 68 model.embedding_dim 0.0 +926 68 model.relation_dim 1.0 +926 68 model.scoring_fct_norm 2.0 +926 68 optimizer.lr 0.002270929276346988 +926 68 training.batch_size 2.0 +926 68 training.label_smoothing 0.49546562224031354 +926 69 model.embedding_dim 2.0 +926 69 model.relation_dim 1.0 +926 69 model.scoring_fct_norm 2.0 +926 69 optimizer.lr 0.0029169466521680363 +926 69 training.batch_size 1.0 +926 69 training.label_smoothing 0.2274253686465856 +926 70 model.embedding_dim 2.0 +926 70 model.relation_dim 0.0 +926 70 model.scoring_fct_norm 2.0 +926 70 optimizer.lr 0.057651907543737786 +926 70 training.batch_size 1.0 +926 70 training.label_smoothing 0.22179181837872322 +926 71 model.embedding_dim 1.0 +926 71 model.relation_dim 0.0 +926 71 model.scoring_fct_norm 2.0 +926 71 optimizer.lr 0.001816213322009834 +926 71 training.batch_size 0.0 +926 71 training.label_smoothing 0.015575897519199473 +926 72 model.embedding_dim 0.0 +926 72 model.relation_dim 1.0 +926 72 model.scoring_fct_norm 2.0 +926 72 optimizer.lr 0.008600092451381235 +926 72 training.batch_size 2.0 +926 72 training.label_smoothing 0.001627261172746976 +926 73 model.embedding_dim 1.0 +926 73 model.relation_dim 2.0 +926 73 model.scoring_fct_norm 2.0 +926 73 optimizer.lr 0.022729007086070896 +926 73 training.batch_size 0.0 +926 73 training.label_smoothing 0.0028145217726545193 +926 74 model.embedding_dim 1.0 +926 74 model.relation_dim 0.0 +926 74 model.scoring_fct_norm 2.0 +926 74 optimizer.lr 0.006518286026930294 +926 74 training.batch_size 0.0 +926 74 training.label_smoothing 0.46587860512343293 +926 75 model.embedding_dim 0.0 +926 75 model.relation_dim 2.0 +926 75 model.scoring_fct_norm 1.0 +926 75 optimizer.lr 0.021530616279928725 +926 75 training.batch_size 2.0 +926 75 training.label_smoothing 0.013070249967778669 +926 76 model.embedding_dim 2.0 +926 76 model.relation_dim 0.0 +926 76 model.scoring_fct_norm 1.0 +926 76 optimizer.lr 0.04373211943188968 +926 76 training.batch_size 1.0 +926 76 training.label_smoothing 0.1042754114623658 +926 77 model.embedding_dim 2.0 +926 77 model.relation_dim 1.0 +926 77 model.scoring_fct_norm 1.0 +926 77 optimizer.lr 0.00645186039443455 +926 77 training.batch_size 1.0 +926 77 training.label_smoothing 0.008019858859408592 +926 78 model.embedding_dim 2.0 +926 78 model.relation_dim 2.0 +926 78 model.scoring_fct_norm 1.0 +926 78 optimizer.lr 0.001880132940797137 +926 78 training.batch_size 0.0 +926 78 training.label_smoothing 0.5447454940711515 +926 79 model.embedding_dim 2.0 +926 79 model.relation_dim 2.0 +926 79 model.scoring_fct_norm 1.0 +926 79 optimizer.lr 0.040526591582309444 +926 79 training.batch_size 1.0 +926 79 training.label_smoothing 0.18911387063941515 +926 80 model.embedding_dim 0.0 +926 80 model.relation_dim 2.0 +926 80 model.scoring_fct_norm 1.0 +926 80 optimizer.lr 0.00928862107135341 +926 80 training.batch_size 1.0 +926 80 training.label_smoothing 0.152893734538093 +926 81 model.embedding_dim 1.0 +926 81 model.relation_dim 2.0 +926 81 model.scoring_fct_norm 1.0 +926 81 optimizer.lr 0.003619247229165437 +926 81 training.batch_size 0.0 +926 81 training.label_smoothing 0.0016904698271706887 +926 82 model.embedding_dim 0.0 +926 82 model.relation_dim 1.0 +926 82 model.scoring_fct_norm 1.0 +926 82 optimizer.lr 0.0065475991513630994 +926 82 training.batch_size 1.0 +926 82 training.label_smoothing 0.552893078363825 +926 83 model.embedding_dim 1.0 +926 83 model.relation_dim 0.0 +926 83 model.scoring_fct_norm 1.0 +926 83 optimizer.lr 0.005230830669285825 +926 83 training.batch_size 1.0 +926 83 training.label_smoothing 0.046076178412836594 +926 84 model.embedding_dim 0.0 +926 84 model.relation_dim 1.0 +926 84 model.scoring_fct_norm 1.0 +926 84 optimizer.lr 0.0028669798233544176 +926 84 training.batch_size 2.0 +926 84 training.label_smoothing 0.0031686037738111383 +926 85 model.embedding_dim 0.0 +926 85 model.relation_dim 1.0 +926 85 model.scoring_fct_norm 2.0 +926 85 optimizer.lr 0.07701510823769722 +926 85 training.batch_size 1.0 +926 85 training.label_smoothing 0.05901835620503486 +926 86 model.embedding_dim 1.0 +926 86 model.relation_dim 0.0 +926 86 model.scoring_fct_norm 1.0 +926 86 optimizer.lr 0.08428135398845941 +926 86 training.batch_size 1.0 +926 86 training.label_smoothing 0.08579859559173075 +926 87 model.embedding_dim 2.0 +926 87 model.relation_dim 0.0 +926 87 model.scoring_fct_norm 1.0 +926 87 optimizer.lr 0.07698356134147258 +926 87 training.batch_size 0.0 +926 87 training.label_smoothing 0.006018500151327247 +926 88 model.embedding_dim 2.0 +926 88 model.relation_dim 1.0 +926 88 model.scoring_fct_norm 1.0 +926 88 optimizer.lr 0.003307774068567376 +926 88 training.batch_size 2.0 +926 88 training.label_smoothing 0.14825683580598414 +926 89 model.embedding_dim 1.0 +926 89 model.relation_dim 2.0 +926 89 model.scoring_fct_norm 1.0 +926 89 optimizer.lr 0.003971880914345553 +926 89 training.batch_size 0.0 +926 89 training.label_smoothing 0.0012131569088777807 +926 90 model.embedding_dim 1.0 +926 90 model.relation_dim 0.0 +926 90 model.scoring_fct_norm 2.0 +926 90 optimizer.lr 0.03413350585123635 +926 90 training.batch_size 1.0 +926 90 training.label_smoothing 0.46106544793257076 +926 91 model.embedding_dim 1.0 +926 91 model.relation_dim 0.0 +926 91 model.scoring_fct_norm 1.0 +926 91 optimizer.lr 0.0010724251457612942 +926 91 training.batch_size 2.0 +926 91 training.label_smoothing 0.0018947025038912487 +926 92 model.embedding_dim 2.0 +926 92 model.relation_dim 1.0 +926 92 model.scoring_fct_norm 2.0 +926 92 optimizer.lr 0.001021134460296192 +926 92 training.batch_size 2.0 +926 92 training.label_smoothing 0.013100307535377777 +926 93 model.embedding_dim 1.0 +926 93 model.relation_dim 0.0 +926 93 model.scoring_fct_norm 1.0 +926 93 optimizer.lr 0.0010949960075148837 +926 93 training.batch_size 2.0 +926 93 training.label_smoothing 0.9773045960104987 +926 94 model.embedding_dim 0.0 +926 94 model.relation_dim 0.0 +926 94 model.scoring_fct_norm 2.0 +926 94 optimizer.lr 0.09389077752502702 +926 94 training.batch_size 0.0 +926 94 training.label_smoothing 0.045634787656453304 +926 95 model.embedding_dim 1.0 +926 95 model.relation_dim 2.0 +926 95 model.scoring_fct_norm 2.0 +926 95 optimizer.lr 0.023929793003197915 +926 95 training.batch_size 0.0 +926 95 training.label_smoothing 0.03107683911257804 +926 96 model.embedding_dim 0.0 +926 96 model.relation_dim 1.0 +926 96 model.scoring_fct_norm 1.0 +926 96 optimizer.lr 0.015306389327735436 +926 96 training.batch_size 2.0 +926 96 training.label_smoothing 0.0010874860029195933 +926 97 model.embedding_dim 2.0 +926 97 model.relation_dim 0.0 +926 97 model.scoring_fct_norm 2.0 +926 97 optimizer.lr 0.001909238631503563 +926 97 training.batch_size 0.0 +926 97 training.label_smoothing 0.7090157139925191 +926 98 model.embedding_dim 2.0 +926 98 model.relation_dim 0.0 +926 98 model.scoring_fct_norm 1.0 +926 98 optimizer.lr 0.003459358124415141 +926 98 training.batch_size 1.0 +926 98 training.label_smoothing 0.10935569596532944 +926 99 model.embedding_dim 0.0 +926 99 model.relation_dim 0.0 +926 99 model.scoring_fct_norm 1.0 +926 99 optimizer.lr 0.052312692330480866 +926 99 training.batch_size 2.0 +926 99 training.label_smoothing 0.27151766227113094 +926 100 model.embedding_dim 0.0 +926 100 model.relation_dim 0.0 +926 100 model.scoring_fct_norm 1.0 +926 100 optimizer.lr 0.0024231340836757664 +926 100 training.batch_size 0.0 +926 100 training.label_smoothing 0.16992709862848823 +926 1 dataset """kinships""" +926 1 model """transr""" +926 1 loss """crossentropy""" +926 1 regularizer """no""" +926 1 optimizer """adam""" +926 1 training_loop """lcwa""" +926 1 evaluator """rankbased""" +926 2 dataset """kinships""" +926 2 model """transr""" +926 2 loss """crossentropy""" +926 2 regularizer """no""" +926 2 optimizer """adam""" +926 2 training_loop """lcwa""" +926 2 evaluator """rankbased""" +926 3 dataset """kinships""" +926 3 model """transr""" +926 3 loss """crossentropy""" +926 3 regularizer """no""" +926 3 optimizer """adam""" +926 3 training_loop """lcwa""" +926 3 evaluator """rankbased""" +926 4 dataset """kinships""" +926 4 model """transr""" +926 4 loss """crossentropy""" +926 4 regularizer """no""" +926 4 optimizer """adam""" +926 4 training_loop """lcwa""" +926 4 evaluator """rankbased""" +926 5 dataset """kinships""" +926 5 model """transr""" +926 5 loss """crossentropy""" +926 5 regularizer """no""" +926 5 optimizer """adam""" +926 5 training_loop """lcwa""" +926 5 evaluator """rankbased""" +926 6 dataset """kinships""" +926 6 model """transr""" +926 6 loss """crossentropy""" +926 6 regularizer """no""" +926 6 optimizer """adam""" +926 6 training_loop """lcwa""" +926 6 evaluator """rankbased""" +926 7 dataset """kinships""" +926 7 model """transr""" +926 7 loss """crossentropy""" +926 7 regularizer """no""" +926 7 optimizer """adam""" +926 7 training_loop """lcwa""" +926 7 evaluator """rankbased""" +926 8 dataset """kinships""" +926 8 model """transr""" +926 8 loss """crossentropy""" +926 8 regularizer """no""" +926 8 optimizer """adam""" +926 8 training_loop """lcwa""" +926 8 evaluator """rankbased""" +926 9 dataset """kinships""" +926 9 model """transr""" +926 9 loss """crossentropy""" +926 9 regularizer """no""" +926 9 optimizer """adam""" +926 9 training_loop """lcwa""" +926 9 evaluator """rankbased""" +926 10 dataset """kinships""" +926 10 model """transr""" +926 10 loss """crossentropy""" +926 10 regularizer """no""" +926 10 optimizer """adam""" +926 10 training_loop """lcwa""" +926 10 evaluator """rankbased""" +926 11 dataset """kinships""" +926 11 model """transr""" +926 11 loss """crossentropy""" +926 11 regularizer """no""" +926 11 optimizer """adam""" +926 11 training_loop """lcwa""" +926 11 evaluator """rankbased""" +926 12 dataset """kinships""" +926 12 model """transr""" +926 12 loss """crossentropy""" +926 12 regularizer """no""" +926 12 optimizer """adam""" +926 12 training_loop """lcwa""" +926 12 evaluator """rankbased""" +926 13 dataset """kinships""" +926 13 model """transr""" +926 13 loss """crossentropy""" +926 13 regularizer """no""" +926 13 optimizer """adam""" +926 13 training_loop """lcwa""" +926 13 evaluator """rankbased""" +926 14 dataset """kinships""" +926 14 model """transr""" +926 14 loss """crossentropy""" +926 14 regularizer """no""" +926 14 optimizer """adam""" +926 14 training_loop """lcwa""" +926 14 evaluator """rankbased""" +926 15 dataset """kinships""" +926 15 model """transr""" +926 15 loss """crossentropy""" +926 15 regularizer """no""" +926 15 optimizer """adam""" +926 15 training_loop """lcwa""" +926 15 evaluator """rankbased""" +926 16 dataset """kinships""" +926 16 model """transr""" +926 16 loss """crossentropy""" +926 16 regularizer """no""" +926 16 optimizer """adam""" +926 16 training_loop """lcwa""" +926 16 evaluator """rankbased""" +926 17 dataset """kinships""" +926 17 model """transr""" +926 17 loss """crossentropy""" +926 17 regularizer """no""" +926 17 optimizer """adam""" +926 17 training_loop """lcwa""" +926 17 evaluator """rankbased""" +926 18 dataset """kinships""" +926 18 model """transr""" +926 18 loss """crossentropy""" +926 18 regularizer """no""" +926 18 optimizer """adam""" +926 18 training_loop """lcwa""" +926 18 evaluator """rankbased""" +926 19 dataset """kinships""" +926 19 model """transr""" +926 19 loss """crossentropy""" +926 19 regularizer """no""" +926 19 optimizer """adam""" +926 19 training_loop """lcwa""" +926 19 evaluator """rankbased""" +926 20 dataset """kinships""" +926 20 model """transr""" +926 20 loss """crossentropy""" +926 20 regularizer """no""" +926 20 optimizer """adam""" +926 20 training_loop """lcwa""" +926 20 evaluator """rankbased""" +926 21 dataset """kinships""" +926 21 model """transr""" +926 21 loss """crossentropy""" +926 21 regularizer """no""" +926 21 optimizer """adam""" +926 21 training_loop """lcwa""" +926 21 evaluator """rankbased""" +926 22 dataset """kinships""" +926 22 model """transr""" +926 22 loss """crossentropy""" +926 22 regularizer """no""" +926 22 optimizer """adam""" +926 22 training_loop """lcwa""" +926 22 evaluator """rankbased""" +926 23 dataset """kinships""" +926 23 model """transr""" +926 23 loss """crossentropy""" +926 23 regularizer """no""" +926 23 optimizer """adam""" +926 23 training_loop """lcwa""" +926 23 evaluator """rankbased""" +926 24 dataset """kinships""" +926 24 model """transr""" +926 24 loss """crossentropy""" +926 24 regularizer """no""" +926 24 optimizer """adam""" +926 24 training_loop """lcwa""" +926 24 evaluator """rankbased""" +926 25 dataset """kinships""" +926 25 model """transr""" +926 25 loss """crossentropy""" +926 25 regularizer """no""" +926 25 optimizer """adam""" +926 25 training_loop """lcwa""" +926 25 evaluator """rankbased""" +926 26 dataset """kinships""" +926 26 model """transr""" +926 26 loss """crossentropy""" +926 26 regularizer """no""" +926 26 optimizer """adam""" +926 26 training_loop """lcwa""" +926 26 evaluator """rankbased""" +926 27 dataset """kinships""" +926 27 model """transr""" +926 27 loss """crossentropy""" +926 27 regularizer """no""" +926 27 optimizer """adam""" +926 27 training_loop """lcwa""" +926 27 evaluator """rankbased""" +926 28 dataset """kinships""" +926 28 model """transr""" +926 28 loss """crossentropy""" +926 28 regularizer """no""" +926 28 optimizer """adam""" +926 28 training_loop """lcwa""" +926 28 evaluator """rankbased""" +926 29 dataset """kinships""" +926 29 model """transr""" +926 29 loss """crossentropy""" +926 29 regularizer """no""" +926 29 optimizer """adam""" +926 29 training_loop """lcwa""" +926 29 evaluator """rankbased""" +926 30 dataset """kinships""" +926 30 model """transr""" +926 30 loss """crossentropy""" +926 30 regularizer """no""" +926 30 optimizer """adam""" +926 30 training_loop """lcwa""" +926 30 evaluator """rankbased""" +926 31 dataset """kinships""" +926 31 model """transr""" +926 31 loss """crossentropy""" +926 31 regularizer """no""" +926 31 optimizer """adam""" +926 31 training_loop """lcwa""" +926 31 evaluator """rankbased""" +926 32 dataset """kinships""" +926 32 model """transr""" +926 32 loss """crossentropy""" +926 32 regularizer """no""" +926 32 optimizer """adam""" +926 32 training_loop """lcwa""" +926 32 evaluator """rankbased""" +926 33 dataset """kinships""" +926 33 model """transr""" +926 33 loss """crossentropy""" +926 33 regularizer """no""" +926 33 optimizer """adam""" +926 33 training_loop """lcwa""" +926 33 evaluator """rankbased""" +926 34 dataset """kinships""" +926 34 model """transr""" +926 34 loss """crossentropy""" +926 34 regularizer """no""" +926 34 optimizer """adam""" +926 34 training_loop """lcwa""" +926 34 evaluator """rankbased""" +926 35 dataset """kinships""" +926 35 model """transr""" +926 35 loss """crossentropy""" +926 35 regularizer """no""" +926 35 optimizer """adam""" +926 35 training_loop """lcwa""" +926 35 evaluator """rankbased""" +926 36 dataset """kinships""" +926 36 model """transr""" +926 36 loss """crossentropy""" +926 36 regularizer """no""" +926 36 optimizer """adam""" +926 36 training_loop """lcwa""" +926 36 evaluator """rankbased""" +926 37 dataset """kinships""" +926 37 model """transr""" +926 37 loss """crossentropy""" +926 37 regularizer """no""" +926 37 optimizer """adam""" +926 37 training_loop """lcwa""" +926 37 evaluator """rankbased""" +926 38 dataset """kinships""" +926 38 model """transr""" +926 38 loss """crossentropy""" +926 38 regularizer """no""" +926 38 optimizer """adam""" +926 38 training_loop """lcwa""" +926 38 evaluator """rankbased""" +926 39 dataset """kinships""" +926 39 model """transr""" +926 39 loss """crossentropy""" +926 39 regularizer """no""" +926 39 optimizer """adam""" +926 39 training_loop """lcwa""" +926 39 evaluator """rankbased""" +926 40 dataset """kinships""" +926 40 model """transr""" +926 40 loss """crossentropy""" +926 40 regularizer """no""" +926 40 optimizer """adam""" +926 40 training_loop """lcwa""" +926 40 evaluator """rankbased""" +926 41 dataset """kinships""" +926 41 model """transr""" +926 41 loss """crossentropy""" +926 41 regularizer """no""" +926 41 optimizer """adam""" +926 41 training_loop """lcwa""" +926 41 evaluator """rankbased""" +926 42 dataset """kinships""" +926 42 model """transr""" +926 42 loss """crossentropy""" +926 42 regularizer """no""" +926 42 optimizer """adam""" +926 42 training_loop """lcwa""" +926 42 evaluator """rankbased""" +926 43 dataset """kinships""" +926 43 model """transr""" +926 43 loss """crossentropy""" +926 43 regularizer """no""" +926 43 optimizer """adam""" +926 43 training_loop """lcwa""" +926 43 evaluator """rankbased""" +926 44 dataset """kinships""" +926 44 model """transr""" +926 44 loss """crossentropy""" +926 44 regularizer """no""" +926 44 optimizer """adam""" +926 44 training_loop """lcwa""" +926 44 evaluator """rankbased""" +926 45 dataset """kinships""" +926 45 model """transr""" +926 45 loss """crossentropy""" +926 45 regularizer """no""" +926 45 optimizer """adam""" +926 45 training_loop """lcwa""" +926 45 evaluator """rankbased""" +926 46 dataset """kinships""" +926 46 model """transr""" +926 46 loss """crossentropy""" +926 46 regularizer """no""" +926 46 optimizer """adam""" +926 46 training_loop """lcwa""" +926 46 evaluator """rankbased""" +926 47 dataset """kinships""" +926 47 model """transr""" +926 47 loss """crossentropy""" +926 47 regularizer """no""" +926 47 optimizer """adam""" +926 47 training_loop """lcwa""" +926 47 evaluator """rankbased""" +926 48 dataset """kinships""" +926 48 model """transr""" +926 48 loss """crossentropy""" +926 48 regularizer """no""" +926 48 optimizer """adam""" +926 48 training_loop """lcwa""" +926 48 evaluator """rankbased""" +926 49 dataset """kinships""" +926 49 model """transr""" +926 49 loss """crossentropy""" +926 49 regularizer """no""" +926 49 optimizer """adam""" +926 49 training_loop """lcwa""" +926 49 evaluator """rankbased""" +926 50 dataset """kinships""" +926 50 model """transr""" +926 50 loss """crossentropy""" +926 50 regularizer """no""" +926 50 optimizer """adam""" +926 50 training_loop """lcwa""" +926 50 evaluator """rankbased""" +926 51 dataset """kinships""" +926 51 model """transr""" +926 51 loss """crossentropy""" +926 51 regularizer """no""" +926 51 optimizer """adam""" +926 51 training_loop """lcwa""" +926 51 evaluator """rankbased""" +926 52 dataset """kinships""" +926 52 model """transr""" +926 52 loss """crossentropy""" +926 52 regularizer """no""" +926 52 optimizer """adam""" +926 52 training_loop """lcwa""" +926 52 evaluator """rankbased""" +926 53 dataset """kinships""" +926 53 model """transr""" +926 53 loss """crossentropy""" +926 53 regularizer """no""" +926 53 optimizer """adam""" +926 53 training_loop """lcwa""" +926 53 evaluator """rankbased""" +926 54 dataset """kinships""" +926 54 model """transr""" +926 54 loss """crossentropy""" +926 54 regularizer """no""" +926 54 optimizer """adam""" +926 54 training_loop """lcwa""" +926 54 evaluator """rankbased""" +926 55 dataset """kinships""" +926 55 model """transr""" +926 55 loss """crossentropy""" +926 55 regularizer """no""" +926 55 optimizer """adam""" +926 55 training_loop """lcwa""" +926 55 evaluator """rankbased""" +926 56 dataset """kinships""" +926 56 model """transr""" +926 56 loss """crossentropy""" +926 56 regularizer """no""" +926 56 optimizer """adam""" +926 56 training_loop """lcwa""" +926 56 evaluator """rankbased""" +926 57 dataset """kinships""" +926 57 model """transr""" +926 57 loss """crossentropy""" +926 57 regularizer """no""" +926 57 optimizer """adam""" +926 57 training_loop """lcwa""" +926 57 evaluator """rankbased""" +926 58 dataset """kinships""" +926 58 model """transr""" +926 58 loss """crossentropy""" +926 58 regularizer """no""" +926 58 optimizer """adam""" +926 58 training_loop """lcwa""" +926 58 evaluator """rankbased""" +926 59 dataset """kinships""" +926 59 model """transr""" +926 59 loss """crossentropy""" +926 59 regularizer """no""" +926 59 optimizer """adam""" +926 59 training_loop """lcwa""" +926 59 evaluator """rankbased""" +926 60 dataset """kinships""" +926 60 model """transr""" +926 60 loss """crossentropy""" +926 60 regularizer """no""" +926 60 optimizer """adam""" +926 60 training_loop """lcwa""" +926 60 evaluator """rankbased""" +926 61 dataset """kinships""" +926 61 model """transr""" +926 61 loss """crossentropy""" +926 61 regularizer """no""" +926 61 optimizer """adam""" +926 61 training_loop """lcwa""" +926 61 evaluator """rankbased""" +926 62 dataset """kinships""" +926 62 model """transr""" +926 62 loss """crossentropy""" +926 62 regularizer """no""" +926 62 optimizer """adam""" +926 62 training_loop """lcwa""" +926 62 evaluator """rankbased""" +926 63 dataset """kinships""" +926 63 model """transr""" +926 63 loss """crossentropy""" +926 63 regularizer """no""" +926 63 optimizer """adam""" +926 63 training_loop """lcwa""" +926 63 evaluator """rankbased""" +926 64 dataset """kinships""" +926 64 model """transr""" +926 64 loss """crossentropy""" +926 64 regularizer """no""" +926 64 optimizer """adam""" +926 64 training_loop """lcwa""" +926 64 evaluator """rankbased""" +926 65 dataset """kinships""" +926 65 model """transr""" +926 65 loss """crossentropy""" +926 65 regularizer """no""" +926 65 optimizer """adam""" +926 65 training_loop """lcwa""" +926 65 evaluator """rankbased""" +926 66 dataset """kinships""" +926 66 model """transr""" +926 66 loss """crossentropy""" +926 66 regularizer """no""" +926 66 optimizer """adam""" +926 66 training_loop """lcwa""" +926 66 evaluator """rankbased""" +926 67 dataset """kinships""" +926 67 model """transr""" +926 67 loss """crossentropy""" +926 67 regularizer """no""" +926 67 optimizer """adam""" +926 67 training_loop """lcwa""" +926 67 evaluator """rankbased""" +926 68 dataset """kinships""" +926 68 model """transr""" +926 68 loss """crossentropy""" +926 68 regularizer """no""" +926 68 optimizer """adam""" +926 68 training_loop """lcwa""" +926 68 evaluator """rankbased""" +926 69 dataset """kinships""" +926 69 model """transr""" +926 69 loss """crossentropy""" +926 69 regularizer """no""" +926 69 optimizer """adam""" +926 69 training_loop """lcwa""" +926 69 evaluator """rankbased""" +926 70 dataset """kinships""" +926 70 model """transr""" +926 70 loss """crossentropy""" +926 70 regularizer """no""" +926 70 optimizer """adam""" +926 70 training_loop """lcwa""" +926 70 evaluator """rankbased""" +926 71 dataset """kinships""" +926 71 model """transr""" +926 71 loss """crossentropy""" +926 71 regularizer """no""" +926 71 optimizer """adam""" +926 71 training_loop """lcwa""" +926 71 evaluator """rankbased""" +926 72 dataset """kinships""" +926 72 model """transr""" +926 72 loss """crossentropy""" +926 72 regularizer """no""" +926 72 optimizer """adam""" +926 72 training_loop """lcwa""" +926 72 evaluator """rankbased""" +926 73 dataset """kinships""" +926 73 model """transr""" +926 73 loss """crossentropy""" +926 73 regularizer """no""" +926 73 optimizer """adam""" +926 73 training_loop """lcwa""" +926 73 evaluator """rankbased""" +926 74 dataset """kinships""" +926 74 model """transr""" +926 74 loss """crossentropy""" +926 74 regularizer """no""" +926 74 optimizer """adam""" +926 74 training_loop """lcwa""" +926 74 evaluator """rankbased""" +926 75 dataset """kinships""" +926 75 model """transr""" +926 75 loss """crossentropy""" +926 75 regularizer """no""" +926 75 optimizer """adam""" +926 75 training_loop """lcwa""" +926 75 evaluator """rankbased""" +926 76 dataset """kinships""" +926 76 model """transr""" +926 76 loss """crossentropy""" +926 76 regularizer """no""" +926 76 optimizer """adam""" +926 76 training_loop """lcwa""" +926 76 evaluator """rankbased""" +926 77 dataset """kinships""" +926 77 model """transr""" +926 77 loss """crossentropy""" +926 77 regularizer """no""" +926 77 optimizer """adam""" +926 77 training_loop """lcwa""" +926 77 evaluator """rankbased""" +926 78 dataset """kinships""" +926 78 model """transr""" +926 78 loss """crossentropy""" +926 78 regularizer """no""" +926 78 optimizer """adam""" +926 78 training_loop """lcwa""" +926 78 evaluator """rankbased""" +926 79 dataset """kinships""" +926 79 model """transr""" +926 79 loss """crossentropy""" +926 79 regularizer """no""" +926 79 optimizer """adam""" +926 79 training_loop """lcwa""" +926 79 evaluator """rankbased""" +926 80 dataset """kinships""" +926 80 model """transr""" +926 80 loss """crossentropy""" +926 80 regularizer """no""" +926 80 optimizer """adam""" +926 80 training_loop """lcwa""" +926 80 evaluator """rankbased""" +926 81 dataset """kinships""" +926 81 model """transr""" +926 81 loss """crossentropy""" +926 81 regularizer """no""" +926 81 optimizer """adam""" +926 81 training_loop """lcwa""" +926 81 evaluator """rankbased""" +926 82 dataset """kinships""" +926 82 model """transr""" +926 82 loss """crossentropy""" +926 82 regularizer """no""" +926 82 optimizer """adam""" +926 82 training_loop """lcwa""" +926 82 evaluator """rankbased""" +926 83 dataset """kinships""" +926 83 model """transr""" +926 83 loss """crossentropy""" +926 83 regularizer """no""" +926 83 optimizer """adam""" +926 83 training_loop """lcwa""" +926 83 evaluator """rankbased""" +926 84 dataset """kinships""" +926 84 model """transr""" +926 84 loss """crossentropy""" +926 84 regularizer """no""" +926 84 optimizer """adam""" +926 84 training_loop """lcwa""" +926 84 evaluator """rankbased""" +926 85 dataset """kinships""" +926 85 model """transr""" +926 85 loss """crossentropy""" +926 85 regularizer """no""" +926 85 optimizer """adam""" +926 85 training_loop """lcwa""" +926 85 evaluator """rankbased""" +926 86 dataset """kinships""" +926 86 model """transr""" +926 86 loss """crossentropy""" +926 86 regularizer """no""" +926 86 optimizer """adam""" +926 86 training_loop """lcwa""" +926 86 evaluator """rankbased""" +926 87 dataset """kinships""" +926 87 model """transr""" +926 87 loss """crossentropy""" +926 87 regularizer """no""" +926 87 optimizer """adam""" +926 87 training_loop """lcwa""" +926 87 evaluator """rankbased""" +926 88 dataset """kinships""" +926 88 model """transr""" +926 88 loss """crossentropy""" +926 88 regularizer """no""" +926 88 optimizer """adam""" +926 88 training_loop """lcwa""" +926 88 evaluator """rankbased""" +926 89 dataset """kinships""" +926 89 model """transr""" +926 89 loss """crossentropy""" +926 89 regularizer """no""" +926 89 optimizer """adam""" +926 89 training_loop """lcwa""" +926 89 evaluator """rankbased""" +926 90 dataset """kinships""" +926 90 model """transr""" +926 90 loss """crossentropy""" +926 90 regularizer """no""" +926 90 optimizer """adam""" +926 90 training_loop """lcwa""" +926 90 evaluator """rankbased""" +926 91 dataset """kinships""" +926 91 model """transr""" +926 91 loss """crossentropy""" +926 91 regularizer """no""" +926 91 optimizer """adam""" +926 91 training_loop """lcwa""" +926 91 evaluator """rankbased""" +926 92 dataset """kinships""" +926 92 model """transr""" +926 92 loss """crossentropy""" +926 92 regularizer """no""" +926 92 optimizer """adam""" +926 92 training_loop """lcwa""" +926 92 evaluator """rankbased""" +926 93 dataset """kinships""" +926 93 model """transr""" +926 93 loss """crossentropy""" +926 93 regularizer """no""" +926 93 optimizer """adam""" +926 93 training_loop """lcwa""" +926 93 evaluator """rankbased""" +926 94 dataset """kinships""" +926 94 model """transr""" +926 94 loss """crossentropy""" +926 94 regularizer """no""" +926 94 optimizer """adam""" +926 94 training_loop """lcwa""" +926 94 evaluator """rankbased""" +926 95 dataset """kinships""" +926 95 model """transr""" +926 95 loss """crossentropy""" +926 95 regularizer """no""" +926 95 optimizer """adam""" +926 95 training_loop """lcwa""" +926 95 evaluator """rankbased""" +926 96 dataset """kinships""" +926 96 model """transr""" +926 96 loss """crossentropy""" +926 96 regularizer """no""" +926 96 optimizer """adam""" +926 96 training_loop """lcwa""" +926 96 evaluator """rankbased""" +926 97 dataset """kinships""" +926 97 model """transr""" +926 97 loss """crossentropy""" +926 97 regularizer """no""" +926 97 optimizer """adam""" +926 97 training_loop """lcwa""" +926 97 evaluator """rankbased""" +926 98 dataset """kinships""" +926 98 model """transr""" +926 98 loss """crossentropy""" +926 98 regularizer """no""" +926 98 optimizer """adam""" +926 98 training_loop """lcwa""" +926 98 evaluator """rankbased""" +926 99 dataset """kinships""" +926 99 model """transr""" +926 99 loss """crossentropy""" +926 99 regularizer """no""" +926 99 optimizer """adam""" +926 99 training_loop """lcwa""" +926 99 evaluator """rankbased""" +926 100 dataset """kinships""" +926 100 model """transr""" +926 100 loss """crossentropy""" +926 100 regularizer """no""" +926 100 optimizer """adam""" +926 100 training_loop """lcwa""" +926 100 evaluator """rankbased""" +927 1 model.embedding_dim 2.0 +927 1 model.relation_dim 1.0 +927 1 model.scoring_fct_norm 2.0 +927 1 optimizer.lr 0.0018345684456473048 +927 1 negative_sampler.num_negs_per_pos 34.0 +927 1 training.batch_size 1.0 +927 2 model.embedding_dim 0.0 +927 2 model.relation_dim 0.0 +927 2 model.scoring_fct_norm 1.0 +927 2 optimizer.lr 0.031774419103487675 +927 2 negative_sampler.num_negs_per_pos 18.0 +927 2 training.batch_size 1.0 +927 3 model.embedding_dim 1.0 +927 3 model.relation_dim 0.0 +927 3 model.scoring_fct_norm 1.0 +927 3 optimizer.lr 0.052815018156252974 +927 3 negative_sampler.num_negs_per_pos 29.0 +927 3 training.batch_size 2.0 +927 4 model.embedding_dim 1.0 +927 4 model.relation_dim 2.0 +927 4 model.scoring_fct_norm 2.0 +927 4 optimizer.lr 0.0011188177319097882 +927 4 negative_sampler.num_negs_per_pos 75.0 +927 4 training.batch_size 0.0 +927 5 model.embedding_dim 0.0 +927 5 model.relation_dim 1.0 +927 5 model.scoring_fct_norm 2.0 +927 5 optimizer.lr 0.012833620957038399 +927 5 negative_sampler.num_negs_per_pos 8.0 +927 5 training.batch_size 2.0 +927 6 model.embedding_dim 1.0 +927 6 model.relation_dim 0.0 +927 6 model.scoring_fct_norm 1.0 +927 6 optimizer.lr 0.016536207587340337 +927 6 negative_sampler.num_negs_per_pos 14.0 +927 6 training.batch_size 1.0 +927 7 model.embedding_dim 2.0 +927 7 model.relation_dim 1.0 +927 7 model.scoring_fct_norm 1.0 +927 7 optimizer.lr 0.08183718072084288 +927 7 negative_sampler.num_negs_per_pos 85.0 +927 7 training.batch_size 0.0 +927 8 model.embedding_dim 0.0 +927 8 model.relation_dim 0.0 +927 8 model.scoring_fct_norm 1.0 +927 8 optimizer.lr 0.00516615395874046 +927 8 negative_sampler.num_negs_per_pos 42.0 +927 8 training.batch_size 1.0 +927 9 model.embedding_dim 0.0 +927 9 model.relation_dim 2.0 +927 9 model.scoring_fct_norm 2.0 +927 9 optimizer.lr 0.031987367273611345 +927 9 negative_sampler.num_negs_per_pos 89.0 +927 9 training.batch_size 2.0 +927 10 model.embedding_dim 0.0 +927 10 model.relation_dim 1.0 +927 10 model.scoring_fct_norm 1.0 +927 10 optimizer.lr 0.05498686400091702 +927 10 negative_sampler.num_negs_per_pos 81.0 +927 10 training.batch_size 2.0 +927 11 model.embedding_dim 0.0 +927 11 model.relation_dim 2.0 +927 11 model.scoring_fct_norm 2.0 +927 11 optimizer.lr 0.0037168188267399744 +927 11 negative_sampler.num_negs_per_pos 36.0 +927 11 training.batch_size 0.0 +927 12 model.embedding_dim 1.0 +927 12 model.relation_dim 1.0 +927 12 model.scoring_fct_norm 2.0 +927 12 optimizer.lr 0.0019984643386895063 +927 12 negative_sampler.num_negs_per_pos 49.0 +927 12 training.batch_size 0.0 +927 13 model.embedding_dim 0.0 +927 13 model.relation_dim 0.0 +927 13 model.scoring_fct_norm 1.0 +927 13 optimizer.lr 0.016678164491072196 +927 13 negative_sampler.num_negs_per_pos 36.0 +927 13 training.batch_size 2.0 +927 14 model.embedding_dim 0.0 +927 14 model.relation_dim 2.0 +927 14 model.scoring_fct_norm 2.0 +927 14 optimizer.lr 0.0011168553495622363 +927 14 negative_sampler.num_negs_per_pos 11.0 +927 14 training.batch_size 1.0 +927 15 model.embedding_dim 1.0 +927 15 model.relation_dim 1.0 +927 15 model.scoring_fct_norm 1.0 +927 15 optimizer.lr 0.001509504668058991 +927 15 negative_sampler.num_negs_per_pos 0.0 +927 15 training.batch_size 1.0 +927 16 model.embedding_dim 0.0 +927 16 model.relation_dim 0.0 +927 16 model.scoring_fct_norm 1.0 +927 16 optimizer.lr 0.00205812228352059 +927 16 negative_sampler.num_negs_per_pos 16.0 +927 16 training.batch_size 0.0 +927 17 model.embedding_dim 1.0 +927 17 model.relation_dim 0.0 +927 17 model.scoring_fct_norm 1.0 +927 17 optimizer.lr 0.0013396451214915295 +927 17 negative_sampler.num_negs_per_pos 90.0 +927 17 training.batch_size 2.0 +927 18 model.embedding_dim 1.0 +927 18 model.relation_dim 1.0 +927 18 model.scoring_fct_norm 1.0 +927 18 optimizer.lr 0.0012564344212071363 +927 18 negative_sampler.num_negs_per_pos 48.0 +927 18 training.batch_size 1.0 +927 19 model.embedding_dim 0.0 +927 19 model.relation_dim 0.0 +927 19 model.scoring_fct_norm 2.0 +927 19 optimizer.lr 0.0065195598181479126 +927 19 negative_sampler.num_negs_per_pos 48.0 +927 19 training.batch_size 2.0 +927 20 model.embedding_dim 1.0 +927 20 model.relation_dim 2.0 +927 20 model.scoring_fct_norm 1.0 +927 20 optimizer.lr 0.003480563293581207 +927 20 negative_sampler.num_negs_per_pos 86.0 +927 20 training.batch_size 1.0 +927 21 model.embedding_dim 1.0 +927 21 model.relation_dim 2.0 +927 21 model.scoring_fct_norm 1.0 +927 21 optimizer.lr 0.0013942433728939628 +927 21 negative_sampler.num_negs_per_pos 58.0 +927 21 training.batch_size 2.0 +927 22 model.embedding_dim 1.0 +927 22 model.relation_dim 0.0 +927 22 model.scoring_fct_norm 1.0 +927 22 optimizer.lr 0.002947927614428295 +927 22 negative_sampler.num_negs_per_pos 19.0 +927 22 training.batch_size 1.0 +927 23 model.embedding_dim 1.0 +927 23 model.relation_dim 1.0 +927 23 model.scoring_fct_norm 2.0 +927 23 optimizer.lr 0.008619689838536867 +927 23 negative_sampler.num_negs_per_pos 72.0 +927 23 training.batch_size 1.0 +927 24 model.embedding_dim 0.0 +927 24 model.relation_dim 2.0 +927 24 model.scoring_fct_norm 2.0 +927 24 optimizer.lr 0.04182088114522962 +927 24 negative_sampler.num_negs_per_pos 6.0 +927 24 training.batch_size 0.0 +927 25 model.embedding_dim 0.0 +927 25 model.relation_dim 0.0 +927 25 model.scoring_fct_norm 1.0 +927 25 optimizer.lr 0.0024815777028489067 +927 25 negative_sampler.num_negs_per_pos 43.0 +927 25 training.batch_size 0.0 +927 26 model.embedding_dim 2.0 +927 26 model.relation_dim 2.0 +927 26 model.scoring_fct_norm 2.0 +927 26 optimizer.lr 0.01944669092807927 +927 26 negative_sampler.num_negs_per_pos 21.0 +927 26 training.batch_size 0.0 +927 27 model.embedding_dim 0.0 +927 27 model.relation_dim 1.0 +927 27 model.scoring_fct_norm 2.0 +927 27 optimizer.lr 0.037565324277539565 +927 27 negative_sampler.num_negs_per_pos 94.0 +927 27 training.batch_size 0.0 +927 28 model.embedding_dim 2.0 +927 28 model.relation_dim 0.0 +927 28 model.scoring_fct_norm 1.0 +927 28 optimizer.lr 0.09635333290635244 +927 28 negative_sampler.num_negs_per_pos 76.0 +927 28 training.batch_size 0.0 +927 29 model.embedding_dim 1.0 +927 29 model.relation_dim 1.0 +927 29 model.scoring_fct_norm 1.0 +927 29 optimizer.lr 0.05516683558757961 +927 29 negative_sampler.num_negs_per_pos 10.0 +927 29 training.batch_size 1.0 +927 30 model.embedding_dim 2.0 +927 30 model.relation_dim 2.0 +927 30 model.scoring_fct_norm 2.0 +927 30 optimizer.lr 0.0012531535845273108 +927 30 negative_sampler.num_negs_per_pos 96.0 +927 30 training.batch_size 1.0 +927 31 model.embedding_dim 2.0 +927 31 model.relation_dim 0.0 +927 31 model.scoring_fct_norm 2.0 +927 31 optimizer.lr 0.018485022025949694 +927 31 negative_sampler.num_negs_per_pos 5.0 +927 31 training.batch_size 0.0 +927 32 model.embedding_dim 2.0 +927 32 model.relation_dim 0.0 +927 32 model.scoring_fct_norm 1.0 +927 32 optimizer.lr 0.01451972528156639 +927 32 negative_sampler.num_negs_per_pos 20.0 +927 32 training.batch_size 2.0 +927 33 model.embedding_dim 2.0 +927 33 model.relation_dim 2.0 +927 33 model.scoring_fct_norm 2.0 +927 33 optimizer.lr 0.008660878831714089 +927 33 negative_sampler.num_negs_per_pos 92.0 +927 33 training.batch_size 1.0 +927 34 model.embedding_dim 0.0 +927 34 model.relation_dim 1.0 +927 34 model.scoring_fct_norm 1.0 +927 34 optimizer.lr 0.012327615116301871 +927 34 negative_sampler.num_negs_per_pos 7.0 +927 34 training.batch_size 1.0 +927 35 model.embedding_dim 2.0 +927 35 model.relation_dim 1.0 +927 35 model.scoring_fct_norm 2.0 +927 35 optimizer.lr 0.07586686519289061 +927 35 negative_sampler.num_negs_per_pos 49.0 +927 35 training.batch_size 1.0 +927 36 model.embedding_dim 2.0 +927 36 model.relation_dim 0.0 +927 36 model.scoring_fct_norm 2.0 +927 36 optimizer.lr 0.027127476686395154 +927 36 negative_sampler.num_negs_per_pos 59.0 +927 36 training.batch_size 2.0 +927 37 model.embedding_dim 1.0 +927 37 model.relation_dim 0.0 +927 37 model.scoring_fct_norm 1.0 +927 37 optimizer.lr 0.02450412897925675 +927 37 negative_sampler.num_negs_per_pos 19.0 +927 37 training.batch_size 0.0 +927 38 model.embedding_dim 2.0 +927 38 model.relation_dim 2.0 +927 38 model.scoring_fct_norm 1.0 +927 38 optimizer.lr 0.02114948295372074 +927 38 negative_sampler.num_negs_per_pos 45.0 +927 38 training.batch_size 2.0 +927 39 model.embedding_dim 0.0 +927 39 model.relation_dim 0.0 +927 39 model.scoring_fct_norm 2.0 +927 39 optimizer.lr 0.001167408955817817 +927 39 negative_sampler.num_negs_per_pos 21.0 +927 39 training.batch_size 1.0 +927 40 model.embedding_dim 1.0 +927 40 model.relation_dim 2.0 +927 40 model.scoring_fct_norm 2.0 +927 40 optimizer.lr 0.039610289874545755 +927 40 negative_sampler.num_negs_per_pos 88.0 +927 40 training.batch_size 2.0 +927 41 model.embedding_dim 2.0 +927 41 model.relation_dim 0.0 +927 41 model.scoring_fct_norm 1.0 +927 41 optimizer.lr 0.054176445167793075 +927 41 negative_sampler.num_negs_per_pos 34.0 +927 41 training.batch_size 2.0 +927 42 model.embedding_dim 0.0 +927 42 model.relation_dim 0.0 +927 42 model.scoring_fct_norm 2.0 +927 42 optimizer.lr 0.01517438092293523 +927 42 negative_sampler.num_negs_per_pos 28.0 +927 42 training.batch_size 2.0 +927 43 model.embedding_dim 2.0 +927 43 model.relation_dim 2.0 +927 43 model.scoring_fct_norm 2.0 +927 43 optimizer.lr 0.001443905488805818 +927 43 negative_sampler.num_negs_per_pos 57.0 +927 43 training.batch_size 0.0 +927 44 model.embedding_dim 2.0 +927 44 model.relation_dim 1.0 +927 44 model.scoring_fct_norm 2.0 +927 44 optimizer.lr 0.09103179379605197 +927 44 negative_sampler.num_negs_per_pos 57.0 +927 44 training.batch_size 0.0 +927 45 model.embedding_dim 0.0 +927 45 model.relation_dim 1.0 +927 45 model.scoring_fct_norm 1.0 +927 45 optimizer.lr 0.08417453469912513 +927 45 negative_sampler.num_negs_per_pos 69.0 +927 45 training.batch_size 0.0 +927 46 model.embedding_dim 2.0 +927 46 model.relation_dim 0.0 +927 46 model.scoring_fct_norm 2.0 +927 46 optimizer.lr 0.0064592946044829255 +927 46 negative_sampler.num_negs_per_pos 24.0 +927 46 training.batch_size 0.0 +927 47 model.embedding_dim 2.0 +927 47 model.relation_dim 1.0 +927 47 model.scoring_fct_norm 1.0 +927 47 optimizer.lr 0.005327363129221086 +927 47 negative_sampler.num_negs_per_pos 64.0 +927 47 training.batch_size 1.0 +927 48 model.embedding_dim 0.0 +927 48 model.relation_dim 1.0 +927 48 model.scoring_fct_norm 1.0 +927 48 optimizer.lr 0.05407901531635242 +927 48 negative_sampler.num_negs_per_pos 85.0 +927 48 training.batch_size 2.0 +927 49 model.embedding_dim 1.0 +927 49 model.relation_dim 2.0 +927 49 model.scoring_fct_norm 1.0 +927 49 optimizer.lr 0.0024650868108776467 +927 49 negative_sampler.num_negs_per_pos 84.0 +927 49 training.batch_size 0.0 +927 50 model.embedding_dim 2.0 +927 50 model.relation_dim 0.0 +927 50 model.scoring_fct_norm 1.0 +927 50 optimizer.lr 0.001556949744582271 +927 50 negative_sampler.num_negs_per_pos 98.0 +927 50 training.batch_size 0.0 +927 51 model.embedding_dim 1.0 +927 51 model.relation_dim 2.0 +927 51 model.scoring_fct_norm 1.0 +927 51 optimizer.lr 0.005105678175722427 +927 51 negative_sampler.num_negs_per_pos 19.0 +927 51 training.batch_size 2.0 +927 52 model.embedding_dim 0.0 +927 52 model.relation_dim 0.0 +927 52 model.scoring_fct_norm 2.0 +927 52 optimizer.lr 0.0314311190395403 +927 52 negative_sampler.num_negs_per_pos 82.0 +927 52 training.batch_size 2.0 +927 53 model.embedding_dim 2.0 +927 53 model.relation_dim 0.0 +927 53 model.scoring_fct_norm 2.0 +927 53 optimizer.lr 0.0023714457425318496 +927 53 negative_sampler.num_negs_per_pos 23.0 +927 53 training.batch_size 1.0 +927 54 model.embedding_dim 0.0 +927 54 model.relation_dim 0.0 +927 54 model.scoring_fct_norm 2.0 +927 54 optimizer.lr 0.00790457808824019 +927 54 negative_sampler.num_negs_per_pos 33.0 +927 54 training.batch_size 1.0 +927 55 model.embedding_dim 2.0 +927 55 model.relation_dim 0.0 +927 55 model.scoring_fct_norm 1.0 +927 55 optimizer.lr 0.02078478055522437 +927 55 negative_sampler.num_negs_per_pos 37.0 +927 55 training.batch_size 0.0 +927 56 model.embedding_dim 2.0 +927 56 model.relation_dim 2.0 +927 56 model.scoring_fct_norm 1.0 +927 56 optimizer.lr 0.07773817596843323 +927 56 negative_sampler.num_negs_per_pos 55.0 +927 56 training.batch_size 1.0 +927 57 model.embedding_dim 0.0 +927 57 model.relation_dim 2.0 +927 57 model.scoring_fct_norm 2.0 +927 57 optimizer.lr 0.0021450853250788724 +927 57 negative_sampler.num_negs_per_pos 78.0 +927 57 training.batch_size 2.0 +927 58 model.embedding_dim 2.0 +927 58 model.relation_dim 1.0 +927 58 model.scoring_fct_norm 1.0 +927 58 optimizer.lr 0.0830038479609706 +927 58 negative_sampler.num_negs_per_pos 89.0 +927 58 training.batch_size 0.0 +927 59 model.embedding_dim 0.0 +927 59 model.relation_dim 0.0 +927 59 model.scoring_fct_norm 1.0 +927 59 optimizer.lr 0.01772351554865485 +927 59 negative_sampler.num_negs_per_pos 35.0 +927 59 training.batch_size 2.0 +927 60 model.embedding_dim 2.0 +927 60 model.relation_dim 2.0 +927 60 model.scoring_fct_norm 1.0 +927 60 optimizer.lr 0.07647170865629908 +927 60 negative_sampler.num_negs_per_pos 31.0 +927 60 training.batch_size 1.0 +927 61 model.embedding_dim 1.0 +927 61 model.relation_dim 1.0 +927 61 model.scoring_fct_norm 1.0 +927 61 optimizer.lr 0.0756432897243744 +927 61 negative_sampler.num_negs_per_pos 89.0 +927 61 training.batch_size 0.0 +927 62 model.embedding_dim 2.0 +927 62 model.relation_dim 0.0 +927 62 model.scoring_fct_norm 1.0 +927 62 optimizer.lr 0.013918668869101501 +927 62 negative_sampler.num_negs_per_pos 65.0 +927 62 training.batch_size 0.0 +927 63 model.embedding_dim 0.0 +927 63 model.relation_dim 1.0 +927 63 model.scoring_fct_norm 1.0 +927 63 optimizer.lr 0.004531223512721877 +927 63 negative_sampler.num_negs_per_pos 0.0 +927 63 training.batch_size 1.0 +927 64 model.embedding_dim 2.0 +927 64 model.relation_dim 1.0 +927 64 model.scoring_fct_norm 2.0 +927 64 optimizer.lr 0.001398486718830273 +927 64 negative_sampler.num_negs_per_pos 93.0 +927 64 training.batch_size 2.0 +927 65 model.embedding_dim 0.0 +927 65 model.relation_dim 0.0 +927 65 model.scoring_fct_norm 2.0 +927 65 optimizer.lr 0.06601483893171804 +927 65 negative_sampler.num_negs_per_pos 67.0 +927 65 training.batch_size 0.0 +927 66 model.embedding_dim 2.0 +927 66 model.relation_dim 1.0 +927 66 model.scoring_fct_norm 1.0 +927 66 optimizer.lr 0.039261782064730105 +927 66 negative_sampler.num_negs_per_pos 0.0 +927 66 training.batch_size 2.0 +927 67 model.embedding_dim 2.0 +927 67 model.relation_dim 2.0 +927 67 model.scoring_fct_norm 2.0 +927 67 optimizer.lr 0.006927824613209209 +927 67 negative_sampler.num_negs_per_pos 43.0 +927 67 training.batch_size 1.0 +927 68 model.embedding_dim 2.0 +927 68 model.relation_dim 1.0 +927 68 model.scoring_fct_norm 2.0 +927 68 optimizer.lr 0.00450217470460478 +927 68 negative_sampler.num_negs_per_pos 15.0 +927 68 training.batch_size 1.0 +927 69 model.embedding_dim 0.0 +927 69 model.relation_dim 2.0 +927 69 model.scoring_fct_norm 2.0 +927 69 optimizer.lr 0.003619239090627738 +927 69 negative_sampler.num_negs_per_pos 20.0 +927 69 training.batch_size 0.0 +927 70 model.embedding_dim 1.0 +927 70 model.relation_dim 1.0 +927 70 model.scoring_fct_norm 1.0 +927 70 optimizer.lr 0.00909886740604272 +927 70 negative_sampler.num_negs_per_pos 30.0 +927 70 training.batch_size 2.0 +927 71 model.embedding_dim 0.0 +927 71 model.relation_dim 0.0 +927 71 model.scoring_fct_norm 1.0 +927 71 optimizer.lr 0.003472216711762139 +927 71 negative_sampler.num_negs_per_pos 9.0 +927 71 training.batch_size 1.0 +927 72 model.embedding_dim 1.0 +927 72 model.relation_dim 2.0 +927 72 model.scoring_fct_norm 2.0 +927 72 optimizer.lr 0.008862091905398464 +927 72 negative_sampler.num_negs_per_pos 66.0 +927 72 training.batch_size 0.0 +927 73 model.embedding_dim 0.0 +927 73 model.relation_dim 1.0 +927 73 model.scoring_fct_norm 2.0 +927 73 optimizer.lr 0.017312144102785573 +927 73 negative_sampler.num_negs_per_pos 71.0 +927 73 training.batch_size 0.0 +927 74 model.embedding_dim 0.0 +927 74 model.relation_dim 2.0 +927 74 model.scoring_fct_norm 1.0 +927 74 optimizer.lr 0.005974725769131514 +927 74 negative_sampler.num_negs_per_pos 3.0 +927 74 training.batch_size 2.0 +927 75 model.embedding_dim 2.0 +927 75 model.relation_dim 1.0 +927 75 model.scoring_fct_norm 2.0 +927 75 optimizer.lr 0.002692704638049135 +927 75 negative_sampler.num_negs_per_pos 77.0 +927 75 training.batch_size 1.0 +927 76 model.embedding_dim 2.0 +927 76 model.relation_dim 0.0 +927 76 model.scoring_fct_norm 2.0 +927 76 optimizer.lr 0.012666269460874223 +927 76 negative_sampler.num_negs_per_pos 83.0 +927 76 training.batch_size 1.0 +927 77 model.embedding_dim 2.0 +927 77 model.relation_dim 2.0 +927 77 model.scoring_fct_norm 2.0 +927 77 optimizer.lr 0.07568495549119414 +927 77 negative_sampler.num_negs_per_pos 31.0 +927 77 training.batch_size 1.0 +927 78 model.embedding_dim 1.0 +927 78 model.relation_dim 2.0 +927 78 model.scoring_fct_norm 1.0 +927 78 optimizer.lr 0.0034298930740480796 +927 78 negative_sampler.num_negs_per_pos 69.0 +927 78 training.batch_size 1.0 +927 79 model.embedding_dim 2.0 +927 79 model.relation_dim 1.0 +927 79 model.scoring_fct_norm 1.0 +927 79 optimizer.lr 0.04266969504486244 +927 79 negative_sampler.num_negs_per_pos 40.0 +927 79 training.batch_size 1.0 +927 80 model.embedding_dim 0.0 +927 80 model.relation_dim 2.0 +927 80 model.scoring_fct_norm 2.0 +927 80 optimizer.lr 0.03156286610069572 +927 80 negative_sampler.num_negs_per_pos 65.0 +927 80 training.batch_size 0.0 +927 81 model.embedding_dim 2.0 +927 81 model.relation_dim 2.0 +927 81 model.scoring_fct_norm 1.0 +927 81 optimizer.lr 0.01107818615578879 +927 81 negative_sampler.num_negs_per_pos 0.0 +927 81 training.batch_size 0.0 +927 82 model.embedding_dim 1.0 +927 82 model.relation_dim 2.0 +927 82 model.scoring_fct_norm 1.0 +927 82 optimizer.lr 0.034038295898766634 +927 82 negative_sampler.num_negs_per_pos 74.0 +927 82 training.batch_size 0.0 +927 83 model.embedding_dim 2.0 +927 83 model.relation_dim 2.0 +927 83 model.scoring_fct_norm 1.0 +927 83 optimizer.lr 0.0012372019619734619 +927 83 negative_sampler.num_negs_per_pos 24.0 +927 83 training.batch_size 0.0 +927 84 model.embedding_dim 0.0 +927 84 model.relation_dim 2.0 +927 84 model.scoring_fct_norm 1.0 +927 84 optimizer.lr 0.027024105726625152 +927 84 negative_sampler.num_negs_per_pos 18.0 +927 84 training.batch_size 1.0 +927 85 model.embedding_dim 2.0 +927 85 model.relation_dim 1.0 +927 85 model.scoring_fct_norm 2.0 +927 85 optimizer.lr 0.01095881176400187 +927 85 negative_sampler.num_negs_per_pos 50.0 +927 85 training.batch_size 0.0 +927 86 model.embedding_dim 2.0 +927 86 model.relation_dim 0.0 +927 86 model.scoring_fct_norm 2.0 +927 86 optimizer.lr 0.003418258676359826 +927 86 negative_sampler.num_negs_per_pos 9.0 +927 86 training.batch_size 2.0 +927 87 model.embedding_dim 1.0 +927 87 model.relation_dim 1.0 +927 87 model.scoring_fct_norm 1.0 +927 87 optimizer.lr 0.008591401004926702 +927 87 negative_sampler.num_negs_per_pos 76.0 +927 87 training.batch_size 0.0 +927 88 model.embedding_dim 0.0 +927 88 model.relation_dim 0.0 +927 88 model.scoring_fct_norm 1.0 +927 88 optimizer.lr 0.014274229186693771 +927 88 negative_sampler.num_negs_per_pos 46.0 +927 88 training.batch_size 2.0 +927 89 model.embedding_dim 1.0 +927 89 model.relation_dim 1.0 +927 89 model.scoring_fct_norm 1.0 +927 89 optimizer.lr 0.0478736318977382 +927 89 negative_sampler.num_negs_per_pos 5.0 +927 89 training.batch_size 2.0 +927 90 model.embedding_dim 0.0 +927 90 model.relation_dim 0.0 +927 90 model.scoring_fct_norm 2.0 +927 90 optimizer.lr 0.05860538858553829 +927 90 negative_sampler.num_negs_per_pos 44.0 +927 90 training.batch_size 2.0 +927 91 model.embedding_dim 0.0 +927 91 model.relation_dim 2.0 +927 91 model.scoring_fct_norm 1.0 +927 91 optimizer.lr 0.023709066285415126 +927 91 negative_sampler.num_negs_per_pos 77.0 +927 91 training.batch_size 0.0 +927 92 model.embedding_dim 1.0 +927 92 model.relation_dim 1.0 +927 92 model.scoring_fct_norm 2.0 +927 92 optimizer.lr 0.0044495876796627355 +927 92 negative_sampler.num_negs_per_pos 46.0 +927 92 training.batch_size 0.0 +927 93 model.embedding_dim 2.0 +927 93 model.relation_dim 2.0 +927 93 model.scoring_fct_norm 2.0 +927 93 optimizer.lr 0.0019668837974052044 +927 93 negative_sampler.num_negs_per_pos 85.0 +927 93 training.batch_size 2.0 +927 94 model.embedding_dim 2.0 +927 94 model.relation_dim 0.0 +927 94 model.scoring_fct_norm 2.0 +927 94 optimizer.lr 0.0038017436515874643 +927 94 negative_sampler.num_negs_per_pos 77.0 +927 94 training.batch_size 2.0 +927 95 model.embedding_dim 2.0 +927 95 model.relation_dim 0.0 +927 95 model.scoring_fct_norm 1.0 +927 95 optimizer.lr 0.07353455938415944 +927 95 negative_sampler.num_negs_per_pos 73.0 +927 95 training.batch_size 1.0 +927 96 model.embedding_dim 1.0 +927 96 model.relation_dim 1.0 +927 96 model.scoring_fct_norm 1.0 +927 96 optimizer.lr 0.002616744779833824 +927 96 negative_sampler.num_negs_per_pos 31.0 +927 96 training.batch_size 2.0 +927 97 model.embedding_dim 0.0 +927 97 model.relation_dim 2.0 +927 97 model.scoring_fct_norm 1.0 +927 97 optimizer.lr 0.03296253463638509 +927 97 negative_sampler.num_negs_per_pos 60.0 +927 97 training.batch_size 0.0 +927 98 model.embedding_dim 1.0 +927 98 model.relation_dim 1.0 +927 98 model.scoring_fct_norm 2.0 +927 98 optimizer.lr 0.023323963039147078 +927 98 negative_sampler.num_negs_per_pos 71.0 +927 98 training.batch_size 2.0 +927 99 model.embedding_dim 2.0 +927 99 model.relation_dim 2.0 +927 99 model.scoring_fct_norm 2.0 +927 99 optimizer.lr 0.007637325459843533 +927 99 negative_sampler.num_negs_per_pos 95.0 +927 99 training.batch_size 0.0 +927 100 model.embedding_dim 0.0 +927 100 model.relation_dim 1.0 +927 100 model.scoring_fct_norm 2.0 +927 100 optimizer.lr 0.002641306822645154 +927 100 negative_sampler.num_negs_per_pos 10.0 +927 100 training.batch_size 2.0 +927 1 dataset """kinships""" +927 1 model """transr""" +927 1 loss """bceaftersigmoid""" +927 1 regularizer """no""" +927 1 optimizer """adam""" +927 1 training_loop """owa""" +927 1 negative_sampler """basic""" +927 1 evaluator """rankbased""" +927 2 dataset """kinships""" +927 2 model """transr""" +927 2 loss """bceaftersigmoid""" +927 2 regularizer """no""" +927 2 optimizer """adam""" +927 2 training_loop """owa""" +927 2 negative_sampler """basic""" +927 2 evaluator """rankbased""" +927 3 dataset """kinships""" +927 3 model """transr""" +927 3 loss """bceaftersigmoid""" +927 3 regularizer """no""" +927 3 optimizer """adam""" +927 3 training_loop """owa""" +927 3 negative_sampler """basic""" +927 3 evaluator """rankbased""" +927 4 dataset """kinships""" +927 4 model """transr""" +927 4 loss """bceaftersigmoid""" +927 4 regularizer """no""" +927 4 optimizer """adam""" +927 4 training_loop """owa""" +927 4 negative_sampler """basic""" +927 4 evaluator """rankbased""" +927 5 dataset """kinships""" +927 5 model """transr""" +927 5 loss """bceaftersigmoid""" +927 5 regularizer """no""" +927 5 optimizer """adam""" +927 5 training_loop """owa""" +927 5 negative_sampler """basic""" +927 5 evaluator """rankbased""" +927 6 dataset """kinships""" +927 6 model """transr""" +927 6 loss """bceaftersigmoid""" +927 6 regularizer """no""" +927 6 optimizer """adam""" +927 6 training_loop """owa""" +927 6 negative_sampler """basic""" +927 6 evaluator """rankbased""" +927 7 dataset """kinships""" +927 7 model """transr""" +927 7 loss """bceaftersigmoid""" +927 7 regularizer """no""" +927 7 optimizer """adam""" +927 7 training_loop """owa""" +927 7 negative_sampler """basic""" +927 7 evaluator """rankbased""" +927 8 dataset """kinships""" +927 8 model """transr""" +927 8 loss """bceaftersigmoid""" +927 8 regularizer """no""" +927 8 optimizer """adam""" +927 8 training_loop """owa""" +927 8 negative_sampler """basic""" +927 8 evaluator """rankbased""" +927 9 dataset """kinships""" +927 9 model """transr""" +927 9 loss """bceaftersigmoid""" +927 9 regularizer """no""" +927 9 optimizer """adam""" +927 9 training_loop """owa""" +927 9 negative_sampler """basic""" +927 9 evaluator """rankbased""" +927 10 dataset """kinships""" +927 10 model """transr""" +927 10 loss """bceaftersigmoid""" +927 10 regularizer """no""" +927 10 optimizer """adam""" +927 10 training_loop """owa""" +927 10 negative_sampler """basic""" +927 10 evaluator """rankbased""" +927 11 dataset """kinships""" +927 11 model """transr""" +927 11 loss """bceaftersigmoid""" +927 11 regularizer """no""" +927 11 optimizer """adam""" +927 11 training_loop """owa""" +927 11 negative_sampler """basic""" +927 11 evaluator """rankbased""" +927 12 dataset """kinships""" +927 12 model """transr""" +927 12 loss """bceaftersigmoid""" +927 12 regularizer """no""" +927 12 optimizer """adam""" +927 12 training_loop """owa""" +927 12 negative_sampler """basic""" +927 12 evaluator """rankbased""" +927 13 dataset """kinships""" +927 13 model """transr""" +927 13 loss """bceaftersigmoid""" +927 13 regularizer """no""" +927 13 optimizer """adam""" +927 13 training_loop """owa""" +927 13 negative_sampler """basic""" +927 13 evaluator """rankbased""" +927 14 dataset """kinships""" +927 14 model """transr""" +927 14 loss """bceaftersigmoid""" +927 14 regularizer """no""" +927 14 optimizer """adam""" +927 14 training_loop """owa""" +927 14 negative_sampler """basic""" +927 14 evaluator """rankbased""" +927 15 dataset """kinships""" +927 15 model """transr""" +927 15 loss """bceaftersigmoid""" +927 15 regularizer """no""" +927 15 optimizer """adam""" +927 15 training_loop """owa""" +927 15 negative_sampler """basic""" +927 15 evaluator """rankbased""" +927 16 dataset """kinships""" +927 16 model """transr""" +927 16 loss """bceaftersigmoid""" +927 16 regularizer """no""" +927 16 optimizer """adam""" +927 16 training_loop """owa""" +927 16 negative_sampler """basic""" +927 16 evaluator """rankbased""" +927 17 dataset """kinships""" +927 17 model """transr""" +927 17 loss """bceaftersigmoid""" +927 17 regularizer """no""" +927 17 optimizer """adam""" +927 17 training_loop """owa""" +927 17 negative_sampler """basic""" +927 17 evaluator """rankbased""" +927 18 dataset """kinships""" +927 18 model """transr""" +927 18 loss """bceaftersigmoid""" +927 18 regularizer """no""" +927 18 optimizer """adam""" +927 18 training_loop """owa""" +927 18 negative_sampler """basic""" +927 18 evaluator """rankbased""" +927 19 dataset """kinships""" +927 19 model """transr""" +927 19 loss """bceaftersigmoid""" +927 19 regularizer """no""" +927 19 optimizer """adam""" +927 19 training_loop """owa""" +927 19 negative_sampler """basic""" +927 19 evaluator """rankbased""" +927 20 dataset """kinships""" +927 20 model """transr""" +927 20 loss """bceaftersigmoid""" +927 20 regularizer """no""" +927 20 optimizer """adam""" +927 20 training_loop """owa""" +927 20 negative_sampler """basic""" +927 20 evaluator """rankbased""" +927 21 dataset """kinships""" +927 21 model """transr""" +927 21 loss """bceaftersigmoid""" +927 21 regularizer """no""" +927 21 optimizer """adam""" +927 21 training_loop """owa""" +927 21 negative_sampler """basic""" +927 21 evaluator """rankbased""" +927 22 dataset """kinships""" +927 22 model """transr""" +927 22 loss """bceaftersigmoid""" +927 22 regularizer """no""" +927 22 optimizer """adam""" +927 22 training_loop """owa""" +927 22 negative_sampler """basic""" +927 22 evaluator """rankbased""" +927 23 dataset """kinships""" +927 23 model """transr""" +927 23 loss """bceaftersigmoid""" +927 23 regularizer """no""" +927 23 optimizer """adam""" +927 23 training_loop """owa""" +927 23 negative_sampler """basic""" +927 23 evaluator """rankbased""" +927 24 dataset """kinships""" +927 24 model """transr""" +927 24 loss """bceaftersigmoid""" +927 24 regularizer """no""" +927 24 optimizer """adam""" +927 24 training_loop """owa""" +927 24 negative_sampler """basic""" +927 24 evaluator """rankbased""" +927 25 dataset """kinships""" +927 25 model """transr""" +927 25 loss """bceaftersigmoid""" +927 25 regularizer """no""" +927 25 optimizer """adam""" +927 25 training_loop """owa""" +927 25 negative_sampler """basic""" +927 25 evaluator """rankbased""" +927 26 dataset """kinships""" +927 26 model """transr""" +927 26 loss """bceaftersigmoid""" +927 26 regularizer """no""" +927 26 optimizer """adam""" +927 26 training_loop """owa""" +927 26 negative_sampler """basic""" +927 26 evaluator """rankbased""" +927 27 dataset """kinships""" +927 27 model """transr""" +927 27 loss """bceaftersigmoid""" +927 27 regularizer """no""" +927 27 optimizer """adam""" +927 27 training_loop """owa""" +927 27 negative_sampler """basic""" +927 27 evaluator """rankbased""" +927 28 dataset """kinships""" +927 28 model """transr""" +927 28 loss """bceaftersigmoid""" +927 28 regularizer """no""" +927 28 optimizer """adam""" +927 28 training_loop """owa""" +927 28 negative_sampler """basic""" +927 28 evaluator """rankbased""" +927 29 dataset """kinships""" +927 29 model """transr""" +927 29 loss """bceaftersigmoid""" +927 29 regularizer """no""" +927 29 optimizer """adam""" +927 29 training_loop """owa""" +927 29 negative_sampler """basic""" +927 29 evaluator """rankbased""" +927 30 dataset """kinships""" +927 30 model """transr""" +927 30 loss """bceaftersigmoid""" +927 30 regularizer """no""" +927 30 optimizer """adam""" +927 30 training_loop """owa""" +927 30 negative_sampler """basic""" +927 30 evaluator """rankbased""" +927 31 dataset """kinships""" +927 31 model """transr""" +927 31 loss """bceaftersigmoid""" +927 31 regularizer """no""" +927 31 optimizer """adam""" +927 31 training_loop """owa""" +927 31 negative_sampler """basic""" +927 31 evaluator """rankbased""" +927 32 dataset """kinships""" +927 32 model """transr""" +927 32 loss """bceaftersigmoid""" +927 32 regularizer """no""" +927 32 optimizer """adam""" +927 32 training_loop """owa""" +927 32 negative_sampler """basic""" +927 32 evaluator """rankbased""" +927 33 dataset """kinships""" +927 33 model """transr""" +927 33 loss """bceaftersigmoid""" +927 33 regularizer """no""" +927 33 optimizer """adam""" +927 33 training_loop """owa""" +927 33 negative_sampler """basic""" +927 33 evaluator """rankbased""" +927 34 dataset """kinships""" +927 34 model """transr""" +927 34 loss """bceaftersigmoid""" +927 34 regularizer """no""" +927 34 optimizer """adam""" +927 34 training_loop """owa""" +927 34 negative_sampler """basic""" +927 34 evaluator """rankbased""" +927 35 dataset """kinships""" +927 35 model """transr""" +927 35 loss """bceaftersigmoid""" +927 35 regularizer """no""" +927 35 optimizer """adam""" +927 35 training_loop """owa""" +927 35 negative_sampler """basic""" +927 35 evaluator """rankbased""" +927 36 dataset """kinships""" +927 36 model """transr""" +927 36 loss """bceaftersigmoid""" +927 36 regularizer """no""" +927 36 optimizer """adam""" +927 36 training_loop """owa""" +927 36 negative_sampler """basic""" +927 36 evaluator """rankbased""" +927 37 dataset """kinships""" +927 37 model """transr""" +927 37 loss """bceaftersigmoid""" +927 37 regularizer """no""" +927 37 optimizer """adam""" +927 37 training_loop """owa""" +927 37 negative_sampler """basic""" +927 37 evaluator """rankbased""" +927 38 dataset """kinships""" +927 38 model """transr""" +927 38 loss """bceaftersigmoid""" +927 38 regularizer """no""" +927 38 optimizer """adam""" +927 38 training_loop """owa""" +927 38 negative_sampler """basic""" +927 38 evaluator """rankbased""" +927 39 dataset """kinships""" +927 39 model """transr""" +927 39 loss """bceaftersigmoid""" +927 39 regularizer """no""" +927 39 optimizer """adam""" +927 39 training_loop """owa""" +927 39 negative_sampler """basic""" +927 39 evaluator """rankbased""" +927 40 dataset """kinships""" +927 40 model """transr""" +927 40 loss """bceaftersigmoid""" +927 40 regularizer """no""" +927 40 optimizer """adam""" +927 40 training_loop """owa""" +927 40 negative_sampler """basic""" +927 40 evaluator """rankbased""" +927 41 dataset """kinships""" +927 41 model """transr""" +927 41 loss """bceaftersigmoid""" +927 41 regularizer """no""" +927 41 optimizer """adam""" +927 41 training_loop """owa""" +927 41 negative_sampler """basic""" +927 41 evaluator """rankbased""" +927 42 dataset """kinships""" +927 42 model """transr""" +927 42 loss """bceaftersigmoid""" +927 42 regularizer """no""" +927 42 optimizer """adam""" +927 42 training_loop """owa""" +927 42 negative_sampler """basic""" +927 42 evaluator """rankbased""" +927 43 dataset """kinships""" +927 43 model """transr""" +927 43 loss """bceaftersigmoid""" +927 43 regularizer """no""" +927 43 optimizer """adam""" +927 43 training_loop """owa""" +927 43 negative_sampler """basic""" +927 43 evaluator """rankbased""" +927 44 dataset """kinships""" +927 44 model """transr""" +927 44 loss """bceaftersigmoid""" +927 44 regularizer """no""" +927 44 optimizer """adam""" +927 44 training_loop """owa""" +927 44 negative_sampler """basic""" +927 44 evaluator """rankbased""" +927 45 dataset """kinships""" +927 45 model """transr""" +927 45 loss """bceaftersigmoid""" +927 45 regularizer """no""" +927 45 optimizer """adam""" +927 45 training_loop """owa""" +927 45 negative_sampler """basic""" +927 45 evaluator """rankbased""" +927 46 dataset """kinships""" +927 46 model """transr""" +927 46 loss """bceaftersigmoid""" +927 46 regularizer """no""" +927 46 optimizer """adam""" +927 46 training_loop """owa""" +927 46 negative_sampler """basic""" +927 46 evaluator """rankbased""" +927 47 dataset """kinships""" +927 47 model """transr""" +927 47 loss """bceaftersigmoid""" +927 47 regularizer """no""" +927 47 optimizer """adam""" +927 47 training_loop """owa""" +927 47 negative_sampler """basic""" +927 47 evaluator """rankbased""" +927 48 dataset """kinships""" +927 48 model """transr""" +927 48 loss """bceaftersigmoid""" +927 48 regularizer """no""" +927 48 optimizer """adam""" +927 48 training_loop """owa""" +927 48 negative_sampler """basic""" +927 48 evaluator """rankbased""" +927 49 dataset """kinships""" +927 49 model """transr""" +927 49 loss """bceaftersigmoid""" +927 49 regularizer """no""" +927 49 optimizer """adam""" +927 49 training_loop """owa""" +927 49 negative_sampler """basic""" +927 49 evaluator """rankbased""" +927 50 dataset """kinships""" +927 50 model """transr""" +927 50 loss """bceaftersigmoid""" +927 50 regularizer """no""" +927 50 optimizer """adam""" +927 50 training_loop """owa""" +927 50 negative_sampler """basic""" +927 50 evaluator """rankbased""" +927 51 dataset """kinships""" +927 51 model """transr""" +927 51 loss """bceaftersigmoid""" +927 51 regularizer """no""" +927 51 optimizer """adam""" +927 51 training_loop """owa""" +927 51 negative_sampler """basic""" +927 51 evaluator """rankbased""" +927 52 dataset """kinships""" +927 52 model """transr""" +927 52 loss """bceaftersigmoid""" +927 52 regularizer """no""" +927 52 optimizer """adam""" +927 52 training_loop """owa""" +927 52 negative_sampler """basic""" +927 52 evaluator """rankbased""" +927 53 dataset """kinships""" +927 53 model """transr""" +927 53 loss """bceaftersigmoid""" +927 53 regularizer """no""" +927 53 optimizer """adam""" +927 53 training_loop """owa""" +927 53 negative_sampler """basic""" +927 53 evaluator """rankbased""" +927 54 dataset """kinships""" +927 54 model """transr""" +927 54 loss """bceaftersigmoid""" +927 54 regularizer """no""" +927 54 optimizer """adam""" +927 54 training_loop """owa""" +927 54 negative_sampler """basic""" +927 54 evaluator """rankbased""" +927 55 dataset """kinships""" +927 55 model """transr""" +927 55 loss """bceaftersigmoid""" +927 55 regularizer """no""" +927 55 optimizer """adam""" +927 55 training_loop """owa""" +927 55 negative_sampler """basic""" +927 55 evaluator """rankbased""" +927 56 dataset """kinships""" +927 56 model """transr""" +927 56 loss """bceaftersigmoid""" +927 56 regularizer """no""" +927 56 optimizer """adam""" +927 56 training_loop """owa""" +927 56 negative_sampler """basic""" +927 56 evaluator """rankbased""" +927 57 dataset """kinships""" +927 57 model """transr""" +927 57 loss """bceaftersigmoid""" +927 57 regularizer """no""" +927 57 optimizer """adam""" +927 57 training_loop """owa""" +927 57 negative_sampler """basic""" +927 57 evaluator """rankbased""" +927 58 dataset """kinships""" +927 58 model """transr""" +927 58 loss """bceaftersigmoid""" +927 58 regularizer """no""" +927 58 optimizer """adam""" +927 58 training_loop """owa""" +927 58 negative_sampler """basic""" +927 58 evaluator """rankbased""" +927 59 dataset """kinships""" +927 59 model """transr""" +927 59 loss """bceaftersigmoid""" +927 59 regularizer """no""" +927 59 optimizer """adam""" +927 59 training_loop """owa""" +927 59 negative_sampler """basic""" +927 59 evaluator """rankbased""" +927 60 dataset """kinships""" +927 60 model """transr""" +927 60 loss """bceaftersigmoid""" +927 60 regularizer """no""" +927 60 optimizer """adam""" +927 60 training_loop """owa""" +927 60 negative_sampler """basic""" +927 60 evaluator """rankbased""" +927 61 dataset """kinships""" +927 61 model """transr""" +927 61 loss """bceaftersigmoid""" +927 61 regularizer """no""" +927 61 optimizer """adam""" +927 61 training_loop """owa""" +927 61 negative_sampler """basic""" +927 61 evaluator """rankbased""" +927 62 dataset """kinships""" +927 62 model """transr""" +927 62 loss """bceaftersigmoid""" +927 62 regularizer """no""" +927 62 optimizer """adam""" +927 62 training_loop """owa""" +927 62 negative_sampler """basic""" +927 62 evaluator """rankbased""" +927 63 dataset """kinships""" +927 63 model """transr""" +927 63 loss """bceaftersigmoid""" +927 63 regularizer """no""" +927 63 optimizer """adam""" +927 63 training_loop """owa""" +927 63 negative_sampler """basic""" +927 63 evaluator """rankbased""" +927 64 dataset """kinships""" +927 64 model """transr""" +927 64 loss """bceaftersigmoid""" +927 64 regularizer """no""" +927 64 optimizer """adam""" +927 64 training_loop """owa""" +927 64 negative_sampler """basic""" +927 64 evaluator """rankbased""" +927 65 dataset """kinships""" +927 65 model """transr""" +927 65 loss """bceaftersigmoid""" +927 65 regularizer """no""" +927 65 optimizer """adam""" +927 65 training_loop """owa""" +927 65 negative_sampler """basic""" +927 65 evaluator """rankbased""" +927 66 dataset """kinships""" +927 66 model """transr""" +927 66 loss """bceaftersigmoid""" +927 66 regularizer """no""" +927 66 optimizer """adam""" +927 66 training_loop """owa""" +927 66 negative_sampler """basic""" +927 66 evaluator """rankbased""" +927 67 dataset """kinships""" +927 67 model """transr""" +927 67 loss """bceaftersigmoid""" +927 67 regularizer """no""" +927 67 optimizer """adam""" +927 67 training_loop """owa""" +927 67 negative_sampler """basic""" +927 67 evaluator """rankbased""" +927 68 dataset """kinships""" +927 68 model """transr""" +927 68 loss """bceaftersigmoid""" +927 68 regularizer """no""" +927 68 optimizer """adam""" +927 68 training_loop """owa""" +927 68 negative_sampler """basic""" +927 68 evaluator """rankbased""" +927 69 dataset """kinships""" +927 69 model """transr""" +927 69 loss """bceaftersigmoid""" +927 69 regularizer """no""" +927 69 optimizer """adam""" +927 69 training_loop """owa""" +927 69 negative_sampler """basic""" +927 69 evaluator """rankbased""" +927 70 dataset """kinships""" +927 70 model """transr""" +927 70 loss """bceaftersigmoid""" +927 70 regularizer """no""" +927 70 optimizer """adam""" +927 70 training_loop """owa""" +927 70 negative_sampler """basic""" +927 70 evaluator """rankbased""" +927 71 dataset """kinships""" +927 71 model """transr""" +927 71 loss """bceaftersigmoid""" +927 71 regularizer """no""" +927 71 optimizer """adam""" +927 71 training_loop """owa""" +927 71 negative_sampler """basic""" +927 71 evaluator """rankbased""" +927 72 dataset """kinships""" +927 72 model """transr""" +927 72 loss """bceaftersigmoid""" +927 72 regularizer """no""" +927 72 optimizer """adam""" +927 72 training_loop """owa""" +927 72 negative_sampler """basic""" +927 72 evaluator """rankbased""" +927 73 dataset """kinships""" +927 73 model """transr""" +927 73 loss """bceaftersigmoid""" +927 73 regularizer """no""" +927 73 optimizer """adam""" +927 73 training_loop """owa""" +927 73 negative_sampler """basic""" +927 73 evaluator """rankbased""" +927 74 dataset """kinships""" +927 74 model """transr""" +927 74 loss """bceaftersigmoid""" +927 74 regularizer """no""" +927 74 optimizer """adam""" +927 74 training_loop """owa""" +927 74 negative_sampler """basic""" +927 74 evaluator """rankbased""" +927 75 dataset """kinships""" +927 75 model """transr""" +927 75 loss """bceaftersigmoid""" +927 75 regularizer """no""" +927 75 optimizer """adam""" +927 75 training_loop """owa""" +927 75 negative_sampler """basic""" +927 75 evaluator """rankbased""" +927 76 dataset """kinships""" +927 76 model """transr""" +927 76 loss """bceaftersigmoid""" +927 76 regularizer """no""" +927 76 optimizer """adam""" +927 76 training_loop """owa""" +927 76 negative_sampler """basic""" +927 76 evaluator """rankbased""" +927 77 dataset """kinships""" +927 77 model """transr""" +927 77 loss """bceaftersigmoid""" +927 77 regularizer """no""" +927 77 optimizer """adam""" +927 77 training_loop """owa""" +927 77 negative_sampler """basic""" +927 77 evaluator """rankbased""" +927 78 dataset """kinships""" +927 78 model """transr""" +927 78 loss """bceaftersigmoid""" +927 78 regularizer """no""" +927 78 optimizer """adam""" +927 78 training_loop """owa""" +927 78 negative_sampler """basic""" +927 78 evaluator """rankbased""" +927 79 dataset """kinships""" +927 79 model """transr""" +927 79 loss """bceaftersigmoid""" +927 79 regularizer """no""" +927 79 optimizer """adam""" +927 79 training_loop """owa""" +927 79 negative_sampler """basic""" +927 79 evaluator """rankbased""" +927 80 dataset """kinships""" +927 80 model """transr""" +927 80 loss """bceaftersigmoid""" +927 80 regularizer """no""" +927 80 optimizer """adam""" +927 80 training_loop """owa""" +927 80 negative_sampler """basic""" +927 80 evaluator """rankbased""" +927 81 dataset """kinships""" +927 81 model """transr""" +927 81 loss """bceaftersigmoid""" +927 81 regularizer """no""" +927 81 optimizer """adam""" +927 81 training_loop """owa""" +927 81 negative_sampler """basic""" +927 81 evaluator """rankbased""" +927 82 dataset """kinships""" +927 82 model """transr""" +927 82 loss """bceaftersigmoid""" +927 82 regularizer """no""" +927 82 optimizer """adam""" +927 82 training_loop """owa""" +927 82 negative_sampler """basic""" +927 82 evaluator """rankbased""" +927 83 dataset """kinships""" +927 83 model """transr""" +927 83 loss """bceaftersigmoid""" +927 83 regularizer """no""" +927 83 optimizer """adam""" +927 83 training_loop """owa""" +927 83 negative_sampler """basic""" +927 83 evaluator """rankbased""" +927 84 dataset """kinships""" +927 84 model """transr""" +927 84 loss """bceaftersigmoid""" +927 84 regularizer """no""" +927 84 optimizer """adam""" +927 84 training_loop """owa""" +927 84 negative_sampler """basic""" +927 84 evaluator """rankbased""" +927 85 dataset """kinships""" +927 85 model """transr""" +927 85 loss """bceaftersigmoid""" +927 85 regularizer """no""" +927 85 optimizer """adam""" +927 85 training_loop """owa""" +927 85 negative_sampler """basic""" +927 85 evaluator """rankbased""" +927 86 dataset """kinships""" +927 86 model """transr""" +927 86 loss """bceaftersigmoid""" +927 86 regularizer """no""" +927 86 optimizer """adam""" +927 86 training_loop """owa""" +927 86 negative_sampler """basic""" +927 86 evaluator """rankbased""" +927 87 dataset """kinships""" +927 87 model """transr""" +927 87 loss """bceaftersigmoid""" +927 87 regularizer """no""" +927 87 optimizer """adam""" +927 87 training_loop """owa""" +927 87 negative_sampler """basic""" +927 87 evaluator """rankbased""" +927 88 dataset """kinships""" +927 88 model """transr""" +927 88 loss """bceaftersigmoid""" +927 88 regularizer """no""" +927 88 optimizer """adam""" +927 88 training_loop """owa""" +927 88 negative_sampler """basic""" +927 88 evaluator """rankbased""" +927 89 dataset """kinships""" +927 89 model """transr""" +927 89 loss """bceaftersigmoid""" +927 89 regularizer """no""" +927 89 optimizer """adam""" +927 89 training_loop """owa""" +927 89 negative_sampler """basic""" +927 89 evaluator """rankbased""" +927 90 dataset """kinships""" +927 90 model """transr""" +927 90 loss """bceaftersigmoid""" +927 90 regularizer """no""" +927 90 optimizer """adam""" +927 90 training_loop """owa""" +927 90 negative_sampler """basic""" +927 90 evaluator """rankbased""" +927 91 dataset """kinships""" +927 91 model """transr""" +927 91 loss """bceaftersigmoid""" +927 91 regularizer """no""" +927 91 optimizer """adam""" +927 91 training_loop """owa""" +927 91 negative_sampler """basic""" +927 91 evaluator """rankbased""" +927 92 dataset """kinships""" +927 92 model """transr""" +927 92 loss """bceaftersigmoid""" +927 92 regularizer """no""" +927 92 optimizer """adam""" +927 92 training_loop """owa""" +927 92 negative_sampler """basic""" +927 92 evaluator """rankbased""" +927 93 dataset """kinships""" +927 93 model """transr""" +927 93 loss """bceaftersigmoid""" +927 93 regularizer """no""" +927 93 optimizer """adam""" +927 93 training_loop """owa""" +927 93 negative_sampler """basic""" +927 93 evaluator """rankbased""" +927 94 dataset """kinships""" +927 94 model """transr""" +927 94 loss """bceaftersigmoid""" +927 94 regularizer """no""" +927 94 optimizer """adam""" +927 94 training_loop """owa""" +927 94 negative_sampler """basic""" +927 94 evaluator """rankbased""" +927 95 dataset """kinships""" +927 95 model """transr""" +927 95 loss """bceaftersigmoid""" +927 95 regularizer """no""" +927 95 optimizer """adam""" +927 95 training_loop """owa""" +927 95 negative_sampler """basic""" +927 95 evaluator """rankbased""" +927 96 dataset """kinships""" +927 96 model """transr""" +927 96 loss """bceaftersigmoid""" +927 96 regularizer """no""" +927 96 optimizer """adam""" +927 96 training_loop """owa""" +927 96 negative_sampler """basic""" +927 96 evaluator """rankbased""" +927 97 dataset """kinships""" +927 97 model """transr""" +927 97 loss """bceaftersigmoid""" +927 97 regularizer """no""" +927 97 optimizer """adam""" +927 97 training_loop """owa""" +927 97 negative_sampler """basic""" +927 97 evaluator """rankbased""" +927 98 dataset """kinships""" +927 98 model """transr""" +927 98 loss """bceaftersigmoid""" +927 98 regularizer """no""" +927 98 optimizer """adam""" +927 98 training_loop """owa""" +927 98 negative_sampler """basic""" +927 98 evaluator """rankbased""" +927 99 dataset """kinships""" +927 99 model """transr""" +927 99 loss """bceaftersigmoid""" +927 99 regularizer """no""" +927 99 optimizer """adam""" +927 99 training_loop """owa""" +927 99 negative_sampler """basic""" +927 99 evaluator """rankbased""" +927 100 dataset """kinships""" +927 100 model """transr""" +927 100 loss """bceaftersigmoid""" +927 100 regularizer """no""" +927 100 optimizer """adam""" +927 100 training_loop """owa""" +927 100 negative_sampler """basic""" +927 100 evaluator """rankbased""" +928 1 model.embedding_dim 0.0 +928 1 model.relation_dim 0.0 +928 1 model.scoring_fct_norm 1.0 +928 1 optimizer.lr 0.09804745927622141 +928 1 negative_sampler.num_negs_per_pos 76.0 +928 1 training.batch_size 2.0 +928 2 model.embedding_dim 0.0 +928 2 model.relation_dim 1.0 +928 2 model.scoring_fct_norm 1.0 +928 2 optimizer.lr 0.0010978492009466039 +928 2 negative_sampler.num_negs_per_pos 54.0 +928 2 training.batch_size 1.0 +928 3 model.embedding_dim 2.0 +928 3 model.relation_dim 0.0 +928 3 model.scoring_fct_norm 1.0 +928 3 optimizer.lr 0.0073805841679779405 +928 3 negative_sampler.num_negs_per_pos 83.0 +928 3 training.batch_size 1.0 +928 4 model.embedding_dim 1.0 +928 4 model.relation_dim 0.0 +928 4 model.scoring_fct_norm 1.0 +928 4 optimizer.lr 0.0549988684169092 +928 4 negative_sampler.num_negs_per_pos 52.0 +928 4 training.batch_size 2.0 +928 5 model.embedding_dim 2.0 +928 5 model.relation_dim 1.0 +928 5 model.scoring_fct_norm 2.0 +928 5 optimizer.lr 0.08385546708352296 +928 5 negative_sampler.num_negs_per_pos 86.0 +928 5 training.batch_size 0.0 +928 6 model.embedding_dim 0.0 +928 6 model.relation_dim 2.0 +928 6 model.scoring_fct_norm 2.0 +928 6 optimizer.lr 0.06450613706525091 +928 6 negative_sampler.num_negs_per_pos 61.0 +928 6 training.batch_size 2.0 +928 7 model.embedding_dim 1.0 +928 7 model.relation_dim 0.0 +928 7 model.scoring_fct_norm 2.0 +928 7 optimizer.lr 0.0010281005586211022 +928 7 negative_sampler.num_negs_per_pos 21.0 +928 7 training.batch_size 2.0 +928 8 model.embedding_dim 0.0 +928 8 model.relation_dim 2.0 +928 8 model.scoring_fct_norm 1.0 +928 8 optimizer.lr 0.042188722363322266 +928 8 negative_sampler.num_negs_per_pos 14.0 +928 8 training.batch_size 2.0 +928 9 model.embedding_dim 0.0 +928 9 model.relation_dim 0.0 +928 9 model.scoring_fct_norm 2.0 +928 9 optimizer.lr 0.007894514351696951 +928 9 negative_sampler.num_negs_per_pos 73.0 +928 9 training.batch_size 2.0 +928 10 model.embedding_dim 0.0 +928 10 model.relation_dim 2.0 +928 10 model.scoring_fct_norm 1.0 +928 10 optimizer.lr 0.07091107535314509 +928 10 negative_sampler.num_negs_per_pos 97.0 +928 10 training.batch_size 1.0 +928 11 model.embedding_dim 0.0 +928 11 model.relation_dim 2.0 +928 11 model.scoring_fct_norm 1.0 +928 11 optimizer.lr 0.010109572887891376 +928 11 negative_sampler.num_negs_per_pos 26.0 +928 11 training.batch_size 1.0 +928 12 model.embedding_dim 0.0 +928 12 model.relation_dim 2.0 +928 12 model.scoring_fct_norm 1.0 +928 12 optimizer.lr 0.004464884107640754 +928 12 negative_sampler.num_negs_per_pos 89.0 +928 12 training.batch_size 2.0 +928 13 model.embedding_dim 2.0 +928 13 model.relation_dim 1.0 +928 13 model.scoring_fct_norm 1.0 +928 13 optimizer.lr 0.004856467974781139 +928 13 negative_sampler.num_negs_per_pos 86.0 +928 13 training.batch_size 2.0 +928 14 model.embedding_dim 2.0 +928 14 model.relation_dim 2.0 +928 14 model.scoring_fct_norm 1.0 +928 14 optimizer.lr 0.011810439140855977 +928 14 negative_sampler.num_negs_per_pos 96.0 +928 14 training.batch_size 2.0 +928 15 model.embedding_dim 2.0 +928 15 model.relation_dim 0.0 +928 15 model.scoring_fct_norm 2.0 +928 15 optimizer.lr 0.017866407093107863 +928 15 negative_sampler.num_negs_per_pos 94.0 +928 15 training.batch_size 2.0 +928 16 model.embedding_dim 0.0 +928 16 model.relation_dim 2.0 +928 16 model.scoring_fct_norm 2.0 +928 16 optimizer.lr 0.0996277069462136 +928 16 negative_sampler.num_negs_per_pos 22.0 +928 16 training.batch_size 0.0 +928 17 model.embedding_dim 0.0 +928 17 model.relation_dim 0.0 +928 17 model.scoring_fct_norm 1.0 +928 17 optimizer.lr 0.011987844307764948 +928 17 negative_sampler.num_negs_per_pos 67.0 +928 17 training.batch_size 1.0 +928 18 model.embedding_dim 1.0 +928 18 model.relation_dim 0.0 +928 18 model.scoring_fct_norm 2.0 +928 18 optimizer.lr 0.007273548831183724 +928 18 negative_sampler.num_negs_per_pos 25.0 +928 18 training.batch_size 2.0 +928 19 model.embedding_dim 0.0 +928 19 model.relation_dim 2.0 +928 19 model.scoring_fct_norm 2.0 +928 19 optimizer.lr 0.002987652186589435 +928 19 negative_sampler.num_negs_per_pos 64.0 +928 19 training.batch_size 0.0 +928 20 model.embedding_dim 2.0 +928 20 model.relation_dim 2.0 +928 20 model.scoring_fct_norm 1.0 +928 20 optimizer.lr 0.021051478488293522 +928 20 negative_sampler.num_negs_per_pos 56.0 +928 20 training.batch_size 2.0 +928 21 model.embedding_dim 0.0 +928 21 model.relation_dim 1.0 +928 21 model.scoring_fct_norm 2.0 +928 21 optimizer.lr 0.00358068311877524 +928 21 negative_sampler.num_negs_per_pos 50.0 +928 21 training.batch_size 2.0 +928 22 model.embedding_dim 1.0 +928 22 model.relation_dim 0.0 +928 22 model.scoring_fct_norm 1.0 +928 22 optimizer.lr 0.0054993044502259295 +928 22 negative_sampler.num_negs_per_pos 47.0 +928 22 training.batch_size 1.0 +928 23 model.embedding_dim 2.0 +928 23 model.relation_dim 0.0 +928 23 model.scoring_fct_norm 1.0 +928 23 optimizer.lr 0.009043363988785394 +928 23 negative_sampler.num_negs_per_pos 22.0 +928 23 training.batch_size 2.0 +928 24 model.embedding_dim 0.0 +928 24 model.relation_dim 1.0 +928 24 model.scoring_fct_norm 1.0 +928 24 optimizer.lr 0.02159179478840555 +928 24 negative_sampler.num_negs_per_pos 94.0 +928 24 training.batch_size 2.0 +928 25 model.embedding_dim 0.0 +928 25 model.relation_dim 2.0 +928 25 model.scoring_fct_norm 1.0 +928 25 optimizer.lr 0.002461242801383832 +928 25 negative_sampler.num_negs_per_pos 87.0 +928 25 training.batch_size 1.0 +928 26 model.embedding_dim 1.0 +928 26 model.relation_dim 2.0 +928 26 model.scoring_fct_norm 1.0 +928 26 optimizer.lr 0.00471664563102771 +928 26 negative_sampler.num_negs_per_pos 83.0 +928 26 training.batch_size 2.0 +928 27 model.embedding_dim 1.0 +928 27 model.relation_dim 1.0 +928 27 model.scoring_fct_norm 1.0 +928 27 optimizer.lr 0.013308895095060828 +928 27 negative_sampler.num_negs_per_pos 70.0 +928 27 training.batch_size 1.0 +928 28 model.embedding_dim 2.0 +928 28 model.relation_dim 1.0 +928 28 model.scoring_fct_norm 1.0 +928 28 optimizer.lr 0.08892360030280173 +928 28 negative_sampler.num_negs_per_pos 33.0 +928 28 training.batch_size 0.0 +928 29 model.embedding_dim 2.0 +928 29 model.relation_dim 2.0 +928 29 model.scoring_fct_norm 2.0 +928 29 optimizer.lr 0.0035319853339551657 +928 29 negative_sampler.num_negs_per_pos 4.0 +928 29 training.batch_size 1.0 +928 30 model.embedding_dim 2.0 +928 30 model.relation_dim 2.0 +928 30 model.scoring_fct_norm 2.0 +928 30 optimizer.lr 0.02332192362225244 +928 30 negative_sampler.num_negs_per_pos 42.0 +928 30 training.batch_size 0.0 +928 31 model.embedding_dim 1.0 +928 31 model.relation_dim 0.0 +928 31 model.scoring_fct_norm 1.0 +928 31 optimizer.lr 0.09458098414778238 +928 31 negative_sampler.num_negs_per_pos 9.0 +928 31 training.batch_size 1.0 +928 32 model.embedding_dim 1.0 +928 32 model.relation_dim 2.0 +928 32 model.scoring_fct_norm 1.0 +928 32 optimizer.lr 0.008372126085822515 +928 32 negative_sampler.num_negs_per_pos 76.0 +928 32 training.batch_size 0.0 +928 33 model.embedding_dim 1.0 +928 33 model.relation_dim 0.0 +928 33 model.scoring_fct_norm 1.0 +928 33 optimizer.lr 0.009212274447262435 +928 33 negative_sampler.num_negs_per_pos 48.0 +928 33 training.batch_size 0.0 +928 34 model.embedding_dim 1.0 +928 34 model.relation_dim 0.0 +928 34 model.scoring_fct_norm 1.0 +928 34 optimizer.lr 0.0026306821174490267 +928 34 negative_sampler.num_negs_per_pos 43.0 +928 34 training.batch_size 1.0 +928 35 model.embedding_dim 1.0 +928 35 model.relation_dim 2.0 +928 35 model.scoring_fct_norm 1.0 +928 35 optimizer.lr 0.008456937536309451 +928 35 negative_sampler.num_negs_per_pos 71.0 +928 35 training.batch_size 2.0 +928 36 model.embedding_dim 2.0 +928 36 model.relation_dim 0.0 +928 36 model.scoring_fct_norm 1.0 +928 36 optimizer.lr 0.02235449730811413 +928 36 negative_sampler.num_negs_per_pos 95.0 +928 36 training.batch_size 1.0 +928 37 model.embedding_dim 1.0 +928 37 model.relation_dim 1.0 +928 37 model.scoring_fct_norm 1.0 +928 37 optimizer.lr 0.04394405050410413 +928 37 negative_sampler.num_negs_per_pos 44.0 +928 37 training.batch_size 1.0 +928 38 model.embedding_dim 1.0 +928 38 model.relation_dim 1.0 +928 38 model.scoring_fct_norm 1.0 +928 38 optimizer.lr 0.0019368968941509738 +928 38 negative_sampler.num_negs_per_pos 46.0 +928 38 training.batch_size 2.0 +928 39 model.embedding_dim 0.0 +928 39 model.relation_dim 1.0 +928 39 model.scoring_fct_norm 1.0 +928 39 optimizer.lr 0.03557817181587061 +928 39 negative_sampler.num_negs_per_pos 84.0 +928 39 training.batch_size 2.0 +928 40 model.embedding_dim 2.0 +928 40 model.relation_dim 0.0 +928 40 model.scoring_fct_norm 2.0 +928 40 optimizer.lr 0.0011776632534532888 +928 40 negative_sampler.num_negs_per_pos 84.0 +928 40 training.batch_size 0.0 +928 41 model.embedding_dim 1.0 +928 41 model.relation_dim 0.0 +928 41 model.scoring_fct_norm 1.0 +928 41 optimizer.lr 0.019023251409610024 +928 41 negative_sampler.num_negs_per_pos 69.0 +928 41 training.batch_size 2.0 +928 42 model.embedding_dim 1.0 +928 42 model.relation_dim 2.0 +928 42 model.scoring_fct_norm 2.0 +928 42 optimizer.lr 0.0014563130712511546 +928 42 negative_sampler.num_negs_per_pos 65.0 +928 42 training.batch_size 1.0 +928 43 model.embedding_dim 2.0 +928 43 model.relation_dim 1.0 +928 43 model.scoring_fct_norm 1.0 +928 43 optimizer.lr 0.022052681938969983 +928 43 negative_sampler.num_negs_per_pos 30.0 +928 43 training.batch_size 0.0 +928 44 model.embedding_dim 0.0 +928 44 model.relation_dim 2.0 +928 44 model.scoring_fct_norm 2.0 +928 44 optimizer.lr 0.01426643969027858 +928 44 negative_sampler.num_negs_per_pos 52.0 +928 44 training.batch_size 2.0 +928 45 model.embedding_dim 2.0 +928 45 model.relation_dim 0.0 +928 45 model.scoring_fct_norm 1.0 +928 45 optimizer.lr 0.012961973519042403 +928 45 negative_sampler.num_negs_per_pos 2.0 +928 45 training.batch_size 1.0 +928 46 model.embedding_dim 1.0 +928 46 model.relation_dim 2.0 +928 46 model.scoring_fct_norm 2.0 +928 46 optimizer.lr 0.014726214770117907 +928 46 negative_sampler.num_negs_per_pos 28.0 +928 46 training.batch_size 1.0 +928 47 model.embedding_dim 2.0 +928 47 model.relation_dim 2.0 +928 47 model.scoring_fct_norm 1.0 +928 47 optimizer.lr 0.0077189550321369975 +928 47 negative_sampler.num_negs_per_pos 22.0 +928 47 training.batch_size 2.0 +928 48 model.embedding_dim 2.0 +928 48 model.relation_dim 1.0 +928 48 model.scoring_fct_norm 2.0 +928 48 optimizer.lr 0.036691549788064666 +928 48 negative_sampler.num_negs_per_pos 35.0 +928 48 training.batch_size 0.0 +928 49 model.embedding_dim 0.0 +928 49 model.relation_dim 0.0 +928 49 model.scoring_fct_norm 2.0 +928 49 optimizer.lr 0.017748084749189014 +928 49 negative_sampler.num_negs_per_pos 29.0 +928 49 training.batch_size 0.0 +928 50 model.embedding_dim 2.0 +928 50 model.relation_dim 2.0 +928 50 model.scoring_fct_norm 2.0 +928 50 optimizer.lr 0.0060764699554689315 +928 50 negative_sampler.num_negs_per_pos 31.0 +928 50 training.batch_size 2.0 +928 51 model.embedding_dim 1.0 +928 51 model.relation_dim 1.0 +928 51 model.scoring_fct_norm 2.0 +928 51 optimizer.lr 0.0574764462290618 +928 51 negative_sampler.num_negs_per_pos 2.0 +928 51 training.batch_size 1.0 +928 52 model.embedding_dim 2.0 +928 52 model.relation_dim 1.0 +928 52 model.scoring_fct_norm 2.0 +928 52 optimizer.lr 0.0022251811869755684 +928 52 negative_sampler.num_negs_per_pos 1.0 +928 52 training.batch_size 2.0 +928 53 model.embedding_dim 0.0 +928 53 model.relation_dim 1.0 +928 53 model.scoring_fct_norm 1.0 +928 53 optimizer.lr 0.011179096194617724 +928 53 negative_sampler.num_negs_per_pos 2.0 +928 53 training.batch_size 1.0 +928 54 model.embedding_dim 2.0 +928 54 model.relation_dim 0.0 +928 54 model.scoring_fct_norm 2.0 +928 54 optimizer.lr 0.006295508116091839 +928 54 negative_sampler.num_negs_per_pos 77.0 +928 54 training.batch_size 2.0 +928 55 model.embedding_dim 2.0 +928 55 model.relation_dim 2.0 +928 55 model.scoring_fct_norm 2.0 +928 55 optimizer.lr 0.01643660858766664 +928 55 negative_sampler.num_negs_per_pos 6.0 +928 55 training.batch_size 1.0 +928 56 model.embedding_dim 1.0 +928 56 model.relation_dim 2.0 +928 56 model.scoring_fct_norm 2.0 +928 56 optimizer.lr 0.048490041383200524 +928 56 negative_sampler.num_negs_per_pos 5.0 +928 56 training.batch_size 0.0 +928 57 model.embedding_dim 2.0 +928 57 model.relation_dim 2.0 +928 57 model.scoring_fct_norm 2.0 +928 57 optimizer.lr 0.0127332126112506 +928 57 negative_sampler.num_negs_per_pos 4.0 +928 57 training.batch_size 2.0 +928 58 model.embedding_dim 0.0 +928 58 model.relation_dim 1.0 +928 58 model.scoring_fct_norm 2.0 +928 58 optimizer.lr 0.00860950007958558 +928 58 negative_sampler.num_negs_per_pos 96.0 +928 58 training.batch_size 2.0 +928 59 model.embedding_dim 0.0 +928 59 model.relation_dim 1.0 +928 59 model.scoring_fct_norm 2.0 +928 59 optimizer.lr 0.05290531375827697 +928 59 negative_sampler.num_negs_per_pos 19.0 +928 59 training.batch_size 2.0 +928 60 model.embedding_dim 1.0 +928 60 model.relation_dim 2.0 +928 60 model.scoring_fct_norm 2.0 +928 60 optimizer.lr 0.0021610407064539903 +928 60 negative_sampler.num_negs_per_pos 77.0 +928 60 training.batch_size 1.0 +928 61 model.embedding_dim 2.0 +928 61 model.relation_dim 1.0 +928 61 model.scoring_fct_norm 1.0 +928 61 optimizer.lr 0.0010800602660758633 +928 61 negative_sampler.num_negs_per_pos 80.0 +928 61 training.batch_size 1.0 +928 62 model.embedding_dim 1.0 +928 62 model.relation_dim 1.0 +928 62 model.scoring_fct_norm 2.0 +928 62 optimizer.lr 0.002191727276231718 +928 62 negative_sampler.num_negs_per_pos 97.0 +928 62 training.batch_size 0.0 +928 63 model.embedding_dim 2.0 +928 63 model.relation_dim 0.0 +928 63 model.scoring_fct_norm 1.0 +928 63 optimizer.lr 0.016801449710483387 +928 63 negative_sampler.num_negs_per_pos 29.0 +928 63 training.batch_size 1.0 +928 64 model.embedding_dim 2.0 +928 64 model.relation_dim 0.0 +928 64 model.scoring_fct_norm 1.0 +928 64 optimizer.lr 0.0010145165900130617 +928 64 negative_sampler.num_negs_per_pos 30.0 +928 64 training.batch_size 1.0 +928 65 model.embedding_dim 1.0 +928 65 model.relation_dim 2.0 +928 65 model.scoring_fct_norm 1.0 +928 65 optimizer.lr 0.005084547676539629 +928 65 negative_sampler.num_negs_per_pos 66.0 +928 65 training.batch_size 1.0 +928 66 model.embedding_dim 2.0 +928 66 model.relation_dim 1.0 +928 66 model.scoring_fct_norm 1.0 +928 66 optimizer.lr 0.011319097175375976 +928 66 negative_sampler.num_negs_per_pos 41.0 +928 66 training.batch_size 1.0 +928 67 model.embedding_dim 2.0 +928 67 model.relation_dim 0.0 +928 67 model.scoring_fct_norm 2.0 +928 67 optimizer.lr 0.0010293721903892792 +928 67 negative_sampler.num_negs_per_pos 29.0 +928 67 training.batch_size 1.0 +928 68 model.embedding_dim 1.0 +928 68 model.relation_dim 2.0 +928 68 model.scoring_fct_norm 1.0 +928 68 optimizer.lr 0.0039918915786077945 +928 68 negative_sampler.num_negs_per_pos 15.0 +928 68 training.batch_size 0.0 +928 69 model.embedding_dim 2.0 +928 69 model.relation_dim 2.0 +928 69 model.scoring_fct_norm 1.0 +928 69 optimizer.lr 0.006933758716822179 +928 69 negative_sampler.num_negs_per_pos 24.0 +928 69 training.batch_size 0.0 +928 70 model.embedding_dim 0.0 +928 70 model.relation_dim 2.0 +928 70 model.scoring_fct_norm 1.0 +928 70 optimizer.lr 0.01685860122730207 +928 70 negative_sampler.num_negs_per_pos 85.0 +928 70 training.batch_size 2.0 +928 71 model.embedding_dim 1.0 +928 71 model.relation_dim 0.0 +928 71 model.scoring_fct_norm 1.0 +928 71 optimizer.lr 0.00527421647669143 +928 71 negative_sampler.num_negs_per_pos 16.0 +928 71 training.batch_size 1.0 +928 72 model.embedding_dim 2.0 +928 72 model.relation_dim 1.0 +928 72 model.scoring_fct_norm 2.0 +928 72 optimizer.lr 0.03497460848359443 +928 72 negative_sampler.num_negs_per_pos 69.0 +928 72 training.batch_size 1.0 +928 73 model.embedding_dim 0.0 +928 73 model.relation_dim 2.0 +928 73 model.scoring_fct_norm 2.0 +928 73 optimizer.lr 0.05708307877901295 +928 73 negative_sampler.num_negs_per_pos 9.0 +928 73 training.batch_size 1.0 +928 74 model.embedding_dim 2.0 +928 74 model.relation_dim 0.0 +928 74 model.scoring_fct_norm 1.0 +928 74 optimizer.lr 0.056188058570763394 +928 74 negative_sampler.num_negs_per_pos 67.0 +928 74 training.batch_size 2.0 +928 75 model.embedding_dim 0.0 +928 75 model.relation_dim 0.0 +928 75 model.scoring_fct_norm 2.0 +928 75 optimizer.lr 0.016318511968326045 +928 75 negative_sampler.num_negs_per_pos 76.0 +928 75 training.batch_size 2.0 +928 76 model.embedding_dim 0.0 +928 76 model.relation_dim 2.0 +928 76 model.scoring_fct_norm 2.0 +928 76 optimizer.lr 0.010188012048684235 +928 76 negative_sampler.num_negs_per_pos 69.0 +928 76 training.batch_size 1.0 +928 77 model.embedding_dim 2.0 +928 77 model.relation_dim 2.0 +928 77 model.scoring_fct_norm 2.0 +928 77 optimizer.lr 0.0022552547291001634 +928 77 negative_sampler.num_negs_per_pos 23.0 +928 77 training.batch_size 2.0 +928 78 model.embedding_dim 0.0 +928 78 model.relation_dim 2.0 +928 78 model.scoring_fct_norm 2.0 +928 78 optimizer.lr 0.012197819695070992 +928 78 negative_sampler.num_negs_per_pos 43.0 +928 78 training.batch_size 2.0 +928 79 model.embedding_dim 0.0 +928 79 model.relation_dim 1.0 +928 79 model.scoring_fct_norm 1.0 +928 79 optimizer.lr 0.033121898468663216 +928 79 negative_sampler.num_negs_per_pos 40.0 +928 79 training.batch_size 0.0 +928 80 model.embedding_dim 2.0 +928 80 model.relation_dim 2.0 +928 80 model.scoring_fct_norm 1.0 +928 80 optimizer.lr 0.003534541658022044 +928 80 negative_sampler.num_negs_per_pos 88.0 +928 80 training.batch_size 0.0 +928 81 model.embedding_dim 2.0 +928 81 model.relation_dim 0.0 +928 81 model.scoring_fct_norm 1.0 +928 81 optimizer.lr 0.010390463609061967 +928 81 negative_sampler.num_negs_per_pos 9.0 +928 81 training.batch_size 0.0 +928 82 model.embedding_dim 1.0 +928 82 model.relation_dim 1.0 +928 82 model.scoring_fct_norm 2.0 +928 82 optimizer.lr 0.0014088328945981883 +928 82 negative_sampler.num_negs_per_pos 64.0 +928 82 training.batch_size 2.0 +928 83 model.embedding_dim 2.0 +928 83 model.relation_dim 1.0 +928 83 model.scoring_fct_norm 1.0 +928 83 optimizer.lr 0.04288787578383084 +928 83 negative_sampler.num_negs_per_pos 69.0 +928 83 training.batch_size 1.0 +928 84 model.embedding_dim 2.0 +928 84 model.relation_dim 1.0 +928 84 model.scoring_fct_norm 2.0 +928 84 optimizer.lr 0.02078619151301855 +928 84 negative_sampler.num_negs_per_pos 68.0 +928 84 training.batch_size 0.0 +928 85 model.embedding_dim 2.0 +928 85 model.relation_dim 2.0 +928 85 model.scoring_fct_norm 2.0 +928 85 optimizer.lr 0.016465291372945595 +928 85 negative_sampler.num_negs_per_pos 8.0 +928 85 training.batch_size 1.0 +928 86 model.embedding_dim 1.0 +928 86 model.relation_dim 2.0 +928 86 model.scoring_fct_norm 1.0 +928 86 optimizer.lr 0.0012184981722533983 +928 86 negative_sampler.num_negs_per_pos 45.0 +928 86 training.batch_size 1.0 +928 87 model.embedding_dim 0.0 +928 87 model.relation_dim 1.0 +928 87 model.scoring_fct_norm 2.0 +928 87 optimizer.lr 0.015372385401174242 +928 87 negative_sampler.num_negs_per_pos 45.0 +928 87 training.batch_size 1.0 +928 88 model.embedding_dim 0.0 +928 88 model.relation_dim 2.0 +928 88 model.scoring_fct_norm 2.0 +928 88 optimizer.lr 0.033882465903150874 +928 88 negative_sampler.num_negs_per_pos 98.0 +928 88 training.batch_size 1.0 +928 89 model.embedding_dim 1.0 +928 89 model.relation_dim 2.0 +928 89 model.scoring_fct_norm 2.0 +928 89 optimizer.lr 0.059500421655058235 +928 89 negative_sampler.num_negs_per_pos 87.0 +928 89 training.batch_size 0.0 +928 90 model.embedding_dim 0.0 +928 90 model.relation_dim 0.0 +928 90 model.scoring_fct_norm 2.0 +928 90 optimizer.lr 0.0011432974182647181 +928 90 negative_sampler.num_negs_per_pos 98.0 +928 90 training.batch_size 0.0 +928 91 model.embedding_dim 0.0 +928 91 model.relation_dim 2.0 +928 91 model.scoring_fct_norm 2.0 +928 91 optimizer.lr 0.07593726636623087 +928 91 negative_sampler.num_negs_per_pos 44.0 +928 91 training.batch_size 2.0 +928 92 model.embedding_dim 0.0 +928 92 model.relation_dim 0.0 +928 92 model.scoring_fct_norm 1.0 +928 92 optimizer.lr 0.0015000771163792793 +928 92 negative_sampler.num_negs_per_pos 66.0 +928 92 training.batch_size 1.0 +928 93 model.embedding_dim 0.0 +928 93 model.relation_dim 0.0 +928 93 model.scoring_fct_norm 1.0 +928 93 optimizer.lr 0.0016472885318185636 +928 93 negative_sampler.num_negs_per_pos 3.0 +928 93 training.batch_size 0.0 +928 94 model.embedding_dim 1.0 +928 94 model.relation_dim 0.0 +928 94 model.scoring_fct_norm 1.0 +928 94 optimizer.lr 0.05020420363739671 +928 94 negative_sampler.num_negs_per_pos 89.0 +928 94 training.batch_size 2.0 +928 95 model.embedding_dim 1.0 +928 95 model.relation_dim 1.0 +928 95 model.scoring_fct_norm 1.0 +928 95 optimizer.lr 0.020082285163227315 +928 95 negative_sampler.num_negs_per_pos 37.0 +928 95 training.batch_size 0.0 +928 96 model.embedding_dim 2.0 +928 96 model.relation_dim 0.0 +928 96 model.scoring_fct_norm 1.0 +928 96 optimizer.lr 0.008719983212115453 +928 96 negative_sampler.num_negs_per_pos 18.0 +928 96 training.batch_size 0.0 +928 97 model.embedding_dim 1.0 +928 97 model.relation_dim 1.0 +928 97 model.scoring_fct_norm 2.0 +928 97 optimizer.lr 0.00545595402393628 +928 97 negative_sampler.num_negs_per_pos 20.0 +928 97 training.batch_size 0.0 +928 98 model.embedding_dim 0.0 +928 98 model.relation_dim 2.0 +928 98 model.scoring_fct_norm 1.0 +928 98 optimizer.lr 0.012263179189306658 +928 98 negative_sampler.num_negs_per_pos 43.0 +928 98 training.batch_size 1.0 +928 99 model.embedding_dim 1.0 +928 99 model.relation_dim 0.0 +928 99 model.scoring_fct_norm 2.0 +928 99 optimizer.lr 0.0036819749021437197 +928 99 negative_sampler.num_negs_per_pos 91.0 +928 99 training.batch_size 2.0 +928 100 model.embedding_dim 0.0 +928 100 model.relation_dim 2.0 +928 100 model.scoring_fct_norm 2.0 +928 100 optimizer.lr 0.04390807510096296 +928 100 negative_sampler.num_negs_per_pos 11.0 +928 100 training.batch_size 1.0 +928 1 dataset """kinships""" +928 1 model """transr""" +928 1 loss """softplus""" +928 1 regularizer """no""" +928 1 optimizer """adam""" +928 1 training_loop """owa""" +928 1 negative_sampler """basic""" +928 1 evaluator """rankbased""" +928 2 dataset """kinships""" +928 2 model """transr""" +928 2 loss """softplus""" +928 2 regularizer """no""" +928 2 optimizer """adam""" +928 2 training_loop """owa""" +928 2 negative_sampler """basic""" +928 2 evaluator """rankbased""" +928 3 dataset """kinships""" +928 3 model """transr""" +928 3 loss """softplus""" +928 3 regularizer """no""" +928 3 optimizer """adam""" +928 3 training_loop """owa""" +928 3 negative_sampler """basic""" +928 3 evaluator """rankbased""" +928 4 dataset """kinships""" +928 4 model """transr""" +928 4 loss """softplus""" +928 4 regularizer """no""" +928 4 optimizer """adam""" +928 4 training_loop """owa""" +928 4 negative_sampler """basic""" +928 4 evaluator """rankbased""" +928 5 dataset """kinships""" +928 5 model """transr""" +928 5 loss """softplus""" +928 5 regularizer """no""" +928 5 optimizer """adam""" +928 5 training_loop """owa""" +928 5 negative_sampler """basic""" +928 5 evaluator """rankbased""" +928 6 dataset """kinships""" +928 6 model """transr""" +928 6 loss """softplus""" +928 6 regularizer """no""" +928 6 optimizer """adam""" +928 6 training_loop """owa""" +928 6 negative_sampler """basic""" +928 6 evaluator """rankbased""" +928 7 dataset """kinships""" +928 7 model """transr""" +928 7 loss """softplus""" +928 7 regularizer """no""" +928 7 optimizer """adam""" +928 7 training_loop """owa""" +928 7 negative_sampler """basic""" +928 7 evaluator """rankbased""" +928 8 dataset """kinships""" +928 8 model """transr""" +928 8 loss """softplus""" +928 8 regularizer """no""" +928 8 optimizer """adam""" +928 8 training_loop """owa""" +928 8 negative_sampler """basic""" +928 8 evaluator """rankbased""" +928 9 dataset """kinships""" +928 9 model """transr""" +928 9 loss """softplus""" +928 9 regularizer """no""" +928 9 optimizer """adam""" +928 9 training_loop """owa""" +928 9 negative_sampler """basic""" +928 9 evaluator """rankbased""" +928 10 dataset """kinships""" +928 10 model """transr""" +928 10 loss """softplus""" +928 10 regularizer """no""" +928 10 optimizer """adam""" +928 10 training_loop """owa""" +928 10 negative_sampler """basic""" +928 10 evaluator """rankbased""" +928 11 dataset """kinships""" +928 11 model """transr""" +928 11 loss """softplus""" +928 11 regularizer """no""" +928 11 optimizer """adam""" +928 11 training_loop """owa""" +928 11 negative_sampler """basic""" +928 11 evaluator """rankbased""" +928 12 dataset """kinships""" +928 12 model """transr""" +928 12 loss """softplus""" +928 12 regularizer """no""" +928 12 optimizer """adam""" +928 12 training_loop """owa""" +928 12 negative_sampler """basic""" +928 12 evaluator """rankbased""" +928 13 dataset """kinships""" +928 13 model """transr""" +928 13 loss """softplus""" +928 13 regularizer """no""" +928 13 optimizer """adam""" +928 13 training_loop """owa""" +928 13 negative_sampler """basic""" +928 13 evaluator """rankbased""" +928 14 dataset """kinships""" +928 14 model """transr""" +928 14 loss """softplus""" +928 14 regularizer """no""" +928 14 optimizer """adam""" +928 14 training_loop """owa""" +928 14 negative_sampler """basic""" +928 14 evaluator """rankbased""" +928 15 dataset """kinships""" +928 15 model """transr""" +928 15 loss """softplus""" +928 15 regularizer """no""" +928 15 optimizer """adam""" +928 15 training_loop """owa""" +928 15 negative_sampler """basic""" +928 15 evaluator """rankbased""" +928 16 dataset """kinships""" +928 16 model """transr""" +928 16 loss """softplus""" +928 16 regularizer """no""" +928 16 optimizer """adam""" +928 16 training_loop """owa""" +928 16 negative_sampler """basic""" +928 16 evaluator """rankbased""" +928 17 dataset """kinships""" +928 17 model """transr""" +928 17 loss """softplus""" +928 17 regularizer """no""" +928 17 optimizer """adam""" +928 17 training_loop """owa""" +928 17 negative_sampler """basic""" +928 17 evaluator """rankbased""" +928 18 dataset """kinships""" +928 18 model """transr""" +928 18 loss """softplus""" +928 18 regularizer """no""" +928 18 optimizer """adam""" +928 18 training_loop """owa""" +928 18 negative_sampler """basic""" +928 18 evaluator """rankbased""" +928 19 dataset """kinships""" +928 19 model """transr""" +928 19 loss """softplus""" +928 19 regularizer """no""" +928 19 optimizer """adam""" +928 19 training_loop """owa""" +928 19 negative_sampler """basic""" +928 19 evaluator """rankbased""" +928 20 dataset """kinships""" +928 20 model """transr""" +928 20 loss """softplus""" +928 20 regularizer """no""" +928 20 optimizer """adam""" +928 20 training_loop """owa""" +928 20 negative_sampler """basic""" +928 20 evaluator """rankbased""" +928 21 dataset """kinships""" +928 21 model """transr""" +928 21 loss """softplus""" +928 21 regularizer """no""" +928 21 optimizer """adam""" +928 21 training_loop """owa""" +928 21 negative_sampler """basic""" +928 21 evaluator """rankbased""" +928 22 dataset """kinships""" +928 22 model """transr""" +928 22 loss """softplus""" +928 22 regularizer """no""" +928 22 optimizer """adam""" +928 22 training_loop """owa""" +928 22 negative_sampler """basic""" +928 22 evaluator """rankbased""" +928 23 dataset """kinships""" +928 23 model """transr""" +928 23 loss """softplus""" +928 23 regularizer """no""" +928 23 optimizer """adam""" +928 23 training_loop """owa""" +928 23 negative_sampler """basic""" +928 23 evaluator """rankbased""" +928 24 dataset """kinships""" +928 24 model """transr""" +928 24 loss """softplus""" +928 24 regularizer """no""" +928 24 optimizer """adam""" +928 24 training_loop """owa""" +928 24 negative_sampler """basic""" +928 24 evaluator """rankbased""" +928 25 dataset """kinships""" +928 25 model """transr""" +928 25 loss """softplus""" +928 25 regularizer """no""" +928 25 optimizer """adam""" +928 25 training_loop """owa""" +928 25 negative_sampler """basic""" +928 25 evaluator """rankbased""" +928 26 dataset """kinships""" +928 26 model """transr""" +928 26 loss """softplus""" +928 26 regularizer """no""" +928 26 optimizer """adam""" +928 26 training_loop """owa""" +928 26 negative_sampler """basic""" +928 26 evaluator """rankbased""" +928 27 dataset """kinships""" +928 27 model """transr""" +928 27 loss """softplus""" +928 27 regularizer """no""" +928 27 optimizer """adam""" +928 27 training_loop """owa""" +928 27 negative_sampler """basic""" +928 27 evaluator """rankbased""" +928 28 dataset """kinships""" +928 28 model """transr""" +928 28 loss """softplus""" +928 28 regularizer """no""" +928 28 optimizer """adam""" +928 28 training_loop """owa""" +928 28 negative_sampler """basic""" +928 28 evaluator """rankbased""" +928 29 dataset """kinships""" +928 29 model """transr""" +928 29 loss """softplus""" +928 29 regularizer """no""" +928 29 optimizer """adam""" +928 29 training_loop """owa""" +928 29 negative_sampler """basic""" +928 29 evaluator """rankbased""" +928 30 dataset """kinships""" +928 30 model """transr""" +928 30 loss """softplus""" +928 30 regularizer """no""" +928 30 optimizer """adam""" +928 30 training_loop """owa""" +928 30 negative_sampler """basic""" +928 30 evaluator """rankbased""" +928 31 dataset """kinships""" +928 31 model """transr""" +928 31 loss """softplus""" +928 31 regularizer """no""" +928 31 optimizer """adam""" +928 31 training_loop """owa""" +928 31 negative_sampler """basic""" +928 31 evaluator """rankbased""" +928 32 dataset """kinships""" +928 32 model """transr""" +928 32 loss """softplus""" +928 32 regularizer """no""" +928 32 optimizer """adam""" +928 32 training_loop """owa""" +928 32 negative_sampler """basic""" +928 32 evaluator """rankbased""" +928 33 dataset """kinships""" +928 33 model """transr""" +928 33 loss """softplus""" +928 33 regularizer """no""" +928 33 optimizer """adam""" +928 33 training_loop """owa""" +928 33 negative_sampler """basic""" +928 33 evaluator """rankbased""" +928 34 dataset """kinships""" +928 34 model """transr""" +928 34 loss """softplus""" +928 34 regularizer """no""" +928 34 optimizer """adam""" +928 34 training_loop """owa""" +928 34 negative_sampler """basic""" +928 34 evaluator """rankbased""" +928 35 dataset """kinships""" +928 35 model """transr""" +928 35 loss """softplus""" +928 35 regularizer """no""" +928 35 optimizer """adam""" +928 35 training_loop """owa""" +928 35 negative_sampler """basic""" +928 35 evaluator """rankbased""" +928 36 dataset """kinships""" +928 36 model """transr""" +928 36 loss """softplus""" +928 36 regularizer """no""" +928 36 optimizer """adam""" +928 36 training_loop """owa""" +928 36 negative_sampler """basic""" +928 36 evaluator """rankbased""" +928 37 dataset """kinships""" +928 37 model """transr""" +928 37 loss """softplus""" +928 37 regularizer """no""" +928 37 optimizer """adam""" +928 37 training_loop """owa""" +928 37 negative_sampler """basic""" +928 37 evaluator """rankbased""" +928 38 dataset """kinships""" +928 38 model """transr""" +928 38 loss """softplus""" +928 38 regularizer """no""" +928 38 optimizer """adam""" +928 38 training_loop """owa""" +928 38 negative_sampler """basic""" +928 38 evaluator """rankbased""" +928 39 dataset """kinships""" +928 39 model """transr""" +928 39 loss """softplus""" +928 39 regularizer """no""" +928 39 optimizer """adam""" +928 39 training_loop """owa""" +928 39 negative_sampler """basic""" +928 39 evaluator """rankbased""" +928 40 dataset """kinships""" +928 40 model """transr""" +928 40 loss """softplus""" +928 40 regularizer """no""" +928 40 optimizer """adam""" +928 40 training_loop """owa""" +928 40 negative_sampler """basic""" +928 40 evaluator """rankbased""" +928 41 dataset """kinships""" +928 41 model """transr""" +928 41 loss """softplus""" +928 41 regularizer """no""" +928 41 optimizer """adam""" +928 41 training_loop """owa""" +928 41 negative_sampler """basic""" +928 41 evaluator """rankbased""" +928 42 dataset """kinships""" +928 42 model """transr""" +928 42 loss """softplus""" +928 42 regularizer """no""" +928 42 optimizer """adam""" +928 42 training_loop """owa""" +928 42 negative_sampler """basic""" +928 42 evaluator """rankbased""" +928 43 dataset """kinships""" +928 43 model """transr""" +928 43 loss """softplus""" +928 43 regularizer """no""" +928 43 optimizer """adam""" +928 43 training_loop """owa""" +928 43 negative_sampler """basic""" +928 43 evaluator """rankbased""" +928 44 dataset """kinships""" +928 44 model """transr""" +928 44 loss """softplus""" +928 44 regularizer """no""" +928 44 optimizer """adam""" +928 44 training_loop """owa""" +928 44 negative_sampler """basic""" +928 44 evaluator """rankbased""" +928 45 dataset """kinships""" +928 45 model """transr""" +928 45 loss """softplus""" +928 45 regularizer """no""" +928 45 optimizer """adam""" +928 45 training_loop """owa""" +928 45 negative_sampler """basic""" +928 45 evaluator """rankbased""" +928 46 dataset """kinships""" +928 46 model """transr""" +928 46 loss """softplus""" +928 46 regularizer """no""" +928 46 optimizer """adam""" +928 46 training_loop """owa""" +928 46 negative_sampler """basic""" +928 46 evaluator """rankbased""" +928 47 dataset """kinships""" +928 47 model """transr""" +928 47 loss """softplus""" +928 47 regularizer """no""" +928 47 optimizer """adam""" +928 47 training_loop """owa""" +928 47 negative_sampler """basic""" +928 47 evaluator """rankbased""" +928 48 dataset """kinships""" +928 48 model """transr""" +928 48 loss """softplus""" +928 48 regularizer """no""" +928 48 optimizer """adam""" +928 48 training_loop """owa""" +928 48 negative_sampler """basic""" +928 48 evaluator """rankbased""" +928 49 dataset """kinships""" +928 49 model """transr""" +928 49 loss """softplus""" +928 49 regularizer """no""" +928 49 optimizer """adam""" +928 49 training_loop """owa""" +928 49 negative_sampler """basic""" +928 49 evaluator """rankbased""" +928 50 dataset """kinships""" +928 50 model """transr""" +928 50 loss """softplus""" +928 50 regularizer """no""" +928 50 optimizer """adam""" +928 50 training_loop """owa""" +928 50 negative_sampler """basic""" +928 50 evaluator """rankbased""" +928 51 dataset """kinships""" +928 51 model """transr""" +928 51 loss """softplus""" +928 51 regularizer """no""" +928 51 optimizer """adam""" +928 51 training_loop """owa""" +928 51 negative_sampler """basic""" +928 51 evaluator """rankbased""" +928 52 dataset """kinships""" +928 52 model """transr""" +928 52 loss """softplus""" +928 52 regularizer """no""" +928 52 optimizer """adam""" +928 52 training_loop """owa""" +928 52 negative_sampler """basic""" +928 52 evaluator """rankbased""" +928 53 dataset """kinships""" +928 53 model """transr""" +928 53 loss """softplus""" +928 53 regularizer """no""" +928 53 optimizer """adam""" +928 53 training_loop """owa""" +928 53 negative_sampler """basic""" +928 53 evaluator """rankbased""" +928 54 dataset """kinships""" +928 54 model """transr""" +928 54 loss """softplus""" +928 54 regularizer """no""" +928 54 optimizer """adam""" +928 54 training_loop """owa""" +928 54 negative_sampler """basic""" +928 54 evaluator """rankbased""" +928 55 dataset """kinships""" +928 55 model """transr""" +928 55 loss """softplus""" +928 55 regularizer """no""" +928 55 optimizer """adam""" +928 55 training_loop """owa""" +928 55 negative_sampler """basic""" +928 55 evaluator """rankbased""" +928 56 dataset """kinships""" +928 56 model """transr""" +928 56 loss """softplus""" +928 56 regularizer """no""" +928 56 optimizer """adam""" +928 56 training_loop """owa""" +928 56 negative_sampler """basic""" +928 56 evaluator """rankbased""" +928 57 dataset """kinships""" +928 57 model """transr""" +928 57 loss """softplus""" +928 57 regularizer """no""" +928 57 optimizer """adam""" +928 57 training_loop """owa""" +928 57 negative_sampler """basic""" +928 57 evaluator """rankbased""" +928 58 dataset """kinships""" +928 58 model """transr""" +928 58 loss """softplus""" +928 58 regularizer """no""" +928 58 optimizer """adam""" +928 58 training_loop """owa""" +928 58 negative_sampler """basic""" +928 58 evaluator """rankbased""" +928 59 dataset """kinships""" +928 59 model """transr""" +928 59 loss """softplus""" +928 59 regularizer """no""" +928 59 optimizer """adam""" +928 59 training_loop """owa""" +928 59 negative_sampler """basic""" +928 59 evaluator """rankbased""" +928 60 dataset """kinships""" +928 60 model """transr""" +928 60 loss """softplus""" +928 60 regularizer """no""" +928 60 optimizer """adam""" +928 60 training_loop """owa""" +928 60 negative_sampler """basic""" +928 60 evaluator """rankbased""" +928 61 dataset """kinships""" +928 61 model """transr""" +928 61 loss """softplus""" +928 61 regularizer """no""" +928 61 optimizer """adam""" +928 61 training_loop """owa""" +928 61 negative_sampler """basic""" +928 61 evaluator """rankbased""" +928 62 dataset """kinships""" +928 62 model """transr""" +928 62 loss """softplus""" +928 62 regularizer """no""" +928 62 optimizer """adam""" +928 62 training_loop """owa""" +928 62 negative_sampler """basic""" +928 62 evaluator """rankbased""" +928 63 dataset """kinships""" +928 63 model """transr""" +928 63 loss """softplus""" +928 63 regularizer """no""" +928 63 optimizer """adam""" +928 63 training_loop """owa""" +928 63 negative_sampler """basic""" +928 63 evaluator """rankbased""" +928 64 dataset """kinships""" +928 64 model """transr""" +928 64 loss """softplus""" +928 64 regularizer """no""" +928 64 optimizer """adam""" +928 64 training_loop """owa""" +928 64 negative_sampler """basic""" +928 64 evaluator """rankbased""" +928 65 dataset """kinships""" +928 65 model """transr""" +928 65 loss """softplus""" +928 65 regularizer """no""" +928 65 optimizer """adam""" +928 65 training_loop """owa""" +928 65 negative_sampler """basic""" +928 65 evaluator """rankbased""" +928 66 dataset """kinships""" +928 66 model """transr""" +928 66 loss """softplus""" +928 66 regularizer """no""" +928 66 optimizer """adam""" +928 66 training_loop """owa""" +928 66 negative_sampler """basic""" +928 66 evaluator """rankbased""" +928 67 dataset """kinships""" +928 67 model """transr""" +928 67 loss """softplus""" +928 67 regularizer """no""" +928 67 optimizer """adam""" +928 67 training_loop """owa""" +928 67 negative_sampler """basic""" +928 67 evaluator """rankbased""" +928 68 dataset """kinships""" +928 68 model """transr""" +928 68 loss """softplus""" +928 68 regularizer """no""" +928 68 optimizer """adam""" +928 68 training_loop """owa""" +928 68 negative_sampler """basic""" +928 68 evaluator """rankbased""" +928 69 dataset """kinships""" +928 69 model """transr""" +928 69 loss """softplus""" +928 69 regularizer """no""" +928 69 optimizer """adam""" +928 69 training_loop """owa""" +928 69 negative_sampler """basic""" +928 69 evaluator """rankbased""" +928 70 dataset """kinships""" +928 70 model """transr""" +928 70 loss """softplus""" +928 70 regularizer """no""" +928 70 optimizer """adam""" +928 70 training_loop """owa""" +928 70 negative_sampler """basic""" +928 70 evaluator """rankbased""" +928 71 dataset """kinships""" +928 71 model """transr""" +928 71 loss """softplus""" +928 71 regularizer """no""" +928 71 optimizer """adam""" +928 71 training_loop """owa""" +928 71 negative_sampler """basic""" +928 71 evaluator """rankbased""" +928 72 dataset """kinships""" +928 72 model """transr""" +928 72 loss """softplus""" +928 72 regularizer """no""" +928 72 optimizer """adam""" +928 72 training_loop """owa""" +928 72 negative_sampler """basic""" +928 72 evaluator """rankbased""" +928 73 dataset """kinships""" +928 73 model """transr""" +928 73 loss """softplus""" +928 73 regularizer """no""" +928 73 optimizer """adam""" +928 73 training_loop """owa""" +928 73 negative_sampler """basic""" +928 73 evaluator """rankbased""" +928 74 dataset """kinships""" +928 74 model """transr""" +928 74 loss """softplus""" +928 74 regularizer """no""" +928 74 optimizer """adam""" +928 74 training_loop """owa""" +928 74 negative_sampler """basic""" +928 74 evaluator """rankbased""" +928 75 dataset """kinships""" +928 75 model """transr""" +928 75 loss """softplus""" +928 75 regularizer """no""" +928 75 optimizer """adam""" +928 75 training_loop """owa""" +928 75 negative_sampler """basic""" +928 75 evaluator """rankbased""" +928 76 dataset """kinships""" +928 76 model """transr""" +928 76 loss """softplus""" +928 76 regularizer """no""" +928 76 optimizer """adam""" +928 76 training_loop """owa""" +928 76 negative_sampler """basic""" +928 76 evaluator """rankbased""" +928 77 dataset """kinships""" +928 77 model """transr""" +928 77 loss """softplus""" +928 77 regularizer """no""" +928 77 optimizer """adam""" +928 77 training_loop """owa""" +928 77 negative_sampler """basic""" +928 77 evaluator """rankbased""" +928 78 dataset """kinships""" +928 78 model """transr""" +928 78 loss """softplus""" +928 78 regularizer """no""" +928 78 optimizer """adam""" +928 78 training_loop """owa""" +928 78 negative_sampler """basic""" +928 78 evaluator """rankbased""" +928 79 dataset """kinships""" +928 79 model """transr""" +928 79 loss """softplus""" +928 79 regularizer """no""" +928 79 optimizer """adam""" +928 79 training_loop """owa""" +928 79 negative_sampler """basic""" +928 79 evaluator """rankbased""" +928 80 dataset """kinships""" +928 80 model """transr""" +928 80 loss """softplus""" +928 80 regularizer """no""" +928 80 optimizer """adam""" +928 80 training_loop """owa""" +928 80 negative_sampler """basic""" +928 80 evaluator """rankbased""" +928 81 dataset """kinships""" +928 81 model """transr""" +928 81 loss """softplus""" +928 81 regularizer """no""" +928 81 optimizer """adam""" +928 81 training_loop """owa""" +928 81 negative_sampler """basic""" +928 81 evaluator """rankbased""" +928 82 dataset """kinships""" +928 82 model """transr""" +928 82 loss """softplus""" +928 82 regularizer """no""" +928 82 optimizer """adam""" +928 82 training_loop """owa""" +928 82 negative_sampler """basic""" +928 82 evaluator """rankbased""" +928 83 dataset """kinships""" +928 83 model """transr""" +928 83 loss """softplus""" +928 83 regularizer """no""" +928 83 optimizer """adam""" +928 83 training_loop """owa""" +928 83 negative_sampler """basic""" +928 83 evaluator """rankbased""" +928 84 dataset """kinships""" +928 84 model """transr""" +928 84 loss """softplus""" +928 84 regularizer """no""" +928 84 optimizer """adam""" +928 84 training_loop """owa""" +928 84 negative_sampler """basic""" +928 84 evaluator """rankbased""" +928 85 dataset """kinships""" +928 85 model """transr""" +928 85 loss """softplus""" +928 85 regularizer """no""" +928 85 optimizer """adam""" +928 85 training_loop """owa""" +928 85 negative_sampler """basic""" +928 85 evaluator """rankbased""" +928 86 dataset """kinships""" +928 86 model """transr""" +928 86 loss """softplus""" +928 86 regularizer """no""" +928 86 optimizer """adam""" +928 86 training_loop """owa""" +928 86 negative_sampler """basic""" +928 86 evaluator """rankbased""" +928 87 dataset """kinships""" +928 87 model """transr""" +928 87 loss """softplus""" +928 87 regularizer """no""" +928 87 optimizer """adam""" +928 87 training_loop """owa""" +928 87 negative_sampler """basic""" +928 87 evaluator """rankbased""" +928 88 dataset """kinships""" +928 88 model """transr""" +928 88 loss """softplus""" +928 88 regularizer """no""" +928 88 optimizer """adam""" +928 88 training_loop """owa""" +928 88 negative_sampler """basic""" +928 88 evaluator """rankbased""" +928 89 dataset """kinships""" +928 89 model """transr""" +928 89 loss """softplus""" +928 89 regularizer """no""" +928 89 optimizer """adam""" +928 89 training_loop """owa""" +928 89 negative_sampler """basic""" +928 89 evaluator """rankbased""" +928 90 dataset """kinships""" +928 90 model """transr""" +928 90 loss """softplus""" +928 90 regularizer """no""" +928 90 optimizer """adam""" +928 90 training_loop """owa""" +928 90 negative_sampler """basic""" +928 90 evaluator """rankbased""" +928 91 dataset """kinships""" +928 91 model """transr""" +928 91 loss """softplus""" +928 91 regularizer """no""" +928 91 optimizer """adam""" +928 91 training_loop """owa""" +928 91 negative_sampler """basic""" +928 91 evaluator """rankbased""" +928 92 dataset """kinships""" +928 92 model """transr""" +928 92 loss """softplus""" +928 92 regularizer """no""" +928 92 optimizer """adam""" +928 92 training_loop """owa""" +928 92 negative_sampler """basic""" +928 92 evaluator """rankbased""" +928 93 dataset """kinships""" +928 93 model """transr""" +928 93 loss """softplus""" +928 93 regularizer """no""" +928 93 optimizer """adam""" +928 93 training_loop """owa""" +928 93 negative_sampler """basic""" +928 93 evaluator """rankbased""" +928 94 dataset """kinships""" +928 94 model """transr""" +928 94 loss """softplus""" +928 94 regularizer """no""" +928 94 optimizer """adam""" +928 94 training_loop """owa""" +928 94 negative_sampler """basic""" +928 94 evaluator """rankbased""" +928 95 dataset """kinships""" +928 95 model """transr""" +928 95 loss """softplus""" +928 95 regularizer """no""" +928 95 optimizer """adam""" +928 95 training_loop """owa""" +928 95 negative_sampler """basic""" +928 95 evaluator """rankbased""" +928 96 dataset """kinships""" +928 96 model """transr""" +928 96 loss """softplus""" +928 96 regularizer """no""" +928 96 optimizer """adam""" +928 96 training_loop """owa""" +928 96 negative_sampler """basic""" +928 96 evaluator """rankbased""" +928 97 dataset """kinships""" +928 97 model """transr""" +928 97 loss """softplus""" +928 97 regularizer """no""" +928 97 optimizer """adam""" +928 97 training_loop """owa""" +928 97 negative_sampler """basic""" +928 97 evaluator """rankbased""" +928 98 dataset """kinships""" +928 98 model """transr""" +928 98 loss """softplus""" +928 98 regularizer """no""" +928 98 optimizer """adam""" +928 98 training_loop """owa""" +928 98 negative_sampler """basic""" +928 98 evaluator """rankbased""" +928 99 dataset """kinships""" +928 99 model """transr""" +928 99 loss """softplus""" +928 99 regularizer """no""" +928 99 optimizer """adam""" +928 99 training_loop """owa""" +928 99 negative_sampler """basic""" +928 99 evaluator """rankbased""" +928 100 dataset """kinships""" +928 100 model """transr""" +928 100 loss """softplus""" +928 100 regularizer """no""" +928 100 optimizer """adam""" +928 100 training_loop """owa""" +928 100 negative_sampler """basic""" +928 100 evaluator """rankbased""" +929 1 model.embedding_dim 2.0 +929 1 model.relation_dim 1.0 +929 1 model.scoring_fct_norm 2.0 +929 1 optimizer.lr 0.012233073576043384 +929 1 negative_sampler.num_negs_per_pos 54.0 +929 1 training.batch_size 2.0 +929 2 model.embedding_dim 2.0 +929 2 model.relation_dim 2.0 +929 2 model.scoring_fct_norm 1.0 +929 2 optimizer.lr 0.008721193613859675 +929 2 negative_sampler.num_negs_per_pos 15.0 +929 2 training.batch_size 2.0 +929 3 model.embedding_dim 2.0 +929 3 model.relation_dim 0.0 +929 3 model.scoring_fct_norm 1.0 +929 3 optimizer.lr 0.07963904327455246 +929 3 negative_sampler.num_negs_per_pos 89.0 +929 3 training.batch_size 1.0 +929 4 model.embedding_dim 1.0 +929 4 model.relation_dim 1.0 +929 4 model.scoring_fct_norm 1.0 +929 4 optimizer.lr 0.035439960708220794 +929 4 negative_sampler.num_negs_per_pos 65.0 +929 4 training.batch_size 0.0 +929 5 model.embedding_dim 2.0 +929 5 model.relation_dim 0.0 +929 5 model.scoring_fct_norm 1.0 +929 5 optimizer.lr 0.03263960864969163 +929 5 negative_sampler.num_negs_per_pos 60.0 +929 5 training.batch_size 1.0 +929 6 model.embedding_dim 0.0 +929 6 model.relation_dim 1.0 +929 6 model.scoring_fct_norm 2.0 +929 6 optimizer.lr 0.0010762688728927917 +929 6 negative_sampler.num_negs_per_pos 26.0 +929 6 training.batch_size 2.0 +929 7 model.embedding_dim 1.0 +929 7 model.relation_dim 0.0 +929 7 model.scoring_fct_norm 2.0 +929 7 optimizer.lr 0.013854967830900134 +929 7 negative_sampler.num_negs_per_pos 85.0 +929 7 training.batch_size 1.0 +929 8 model.embedding_dim 2.0 +929 8 model.relation_dim 2.0 +929 8 model.scoring_fct_norm 2.0 +929 8 optimizer.lr 0.00208826916433944 +929 8 negative_sampler.num_negs_per_pos 95.0 +929 8 training.batch_size 0.0 +929 9 model.embedding_dim 1.0 +929 9 model.relation_dim 1.0 +929 9 model.scoring_fct_norm 1.0 +929 9 optimizer.lr 0.029765110766720174 +929 9 negative_sampler.num_negs_per_pos 66.0 +929 9 training.batch_size 2.0 +929 10 model.embedding_dim 0.0 +929 10 model.relation_dim 2.0 +929 10 model.scoring_fct_norm 2.0 +929 10 optimizer.lr 0.002526020177387997 +929 10 negative_sampler.num_negs_per_pos 42.0 +929 10 training.batch_size 0.0 +929 11 model.embedding_dim 2.0 +929 11 model.relation_dim 2.0 +929 11 model.scoring_fct_norm 2.0 +929 11 optimizer.lr 0.0012464158254902936 +929 11 negative_sampler.num_negs_per_pos 72.0 +929 11 training.batch_size 2.0 +929 12 model.embedding_dim 2.0 +929 12 model.relation_dim 0.0 +929 12 model.scoring_fct_norm 2.0 +929 12 optimizer.lr 0.0377922142756535 +929 12 negative_sampler.num_negs_per_pos 81.0 +929 12 training.batch_size 1.0 +929 13 model.embedding_dim 1.0 +929 13 model.relation_dim 0.0 +929 13 model.scoring_fct_norm 2.0 +929 13 optimizer.lr 0.051606581787596174 +929 13 negative_sampler.num_negs_per_pos 66.0 +929 13 training.batch_size 1.0 +929 14 model.embedding_dim 1.0 +929 14 model.relation_dim 2.0 +929 14 model.scoring_fct_norm 1.0 +929 14 optimizer.lr 0.02133618958782844 +929 14 negative_sampler.num_negs_per_pos 84.0 +929 14 training.batch_size 0.0 +929 15 model.embedding_dim 0.0 +929 15 model.relation_dim 2.0 +929 15 model.scoring_fct_norm 2.0 +929 15 optimizer.lr 0.008997784284432401 +929 15 negative_sampler.num_negs_per_pos 85.0 +929 15 training.batch_size 1.0 +929 16 model.embedding_dim 2.0 +929 16 model.relation_dim 0.0 +929 16 model.scoring_fct_norm 2.0 +929 16 optimizer.lr 0.0018258573417458835 +929 16 negative_sampler.num_negs_per_pos 44.0 +929 16 training.batch_size 0.0 +929 17 model.embedding_dim 1.0 +929 17 model.relation_dim 0.0 +929 17 model.scoring_fct_norm 1.0 +929 17 optimizer.lr 0.004879862109156483 +929 17 negative_sampler.num_negs_per_pos 50.0 +929 17 training.batch_size 0.0 +929 18 model.embedding_dim 2.0 +929 18 model.relation_dim 2.0 +929 18 model.scoring_fct_norm 1.0 +929 18 optimizer.lr 0.027566589479603828 +929 18 negative_sampler.num_negs_per_pos 56.0 +929 18 training.batch_size 1.0 +929 19 model.embedding_dim 1.0 +929 19 model.relation_dim 1.0 +929 19 model.scoring_fct_norm 1.0 +929 19 optimizer.lr 0.054744919432181446 +929 19 negative_sampler.num_negs_per_pos 56.0 +929 19 training.batch_size 1.0 +929 20 model.embedding_dim 1.0 +929 20 model.relation_dim 1.0 +929 20 model.scoring_fct_norm 1.0 +929 20 optimizer.lr 0.001268732662940347 +929 20 negative_sampler.num_negs_per_pos 26.0 +929 20 training.batch_size 1.0 +929 21 model.embedding_dim 1.0 +929 21 model.relation_dim 2.0 +929 21 model.scoring_fct_norm 2.0 +929 21 optimizer.lr 0.019867377684761916 +929 21 negative_sampler.num_negs_per_pos 20.0 +929 21 training.batch_size 1.0 +929 22 model.embedding_dim 1.0 +929 22 model.relation_dim 0.0 +929 22 model.scoring_fct_norm 2.0 +929 22 optimizer.lr 0.003324001370983653 +929 22 negative_sampler.num_negs_per_pos 47.0 +929 22 training.batch_size 1.0 +929 23 model.embedding_dim 2.0 +929 23 model.relation_dim 0.0 +929 23 model.scoring_fct_norm 2.0 +929 23 optimizer.lr 0.011796508808329583 +929 23 negative_sampler.num_negs_per_pos 65.0 +929 23 training.batch_size 1.0 +929 24 model.embedding_dim 1.0 +929 24 model.relation_dim 1.0 +929 24 model.scoring_fct_norm 2.0 +929 24 optimizer.lr 0.002268356566018279 +929 24 negative_sampler.num_negs_per_pos 4.0 +929 24 training.batch_size 0.0 +929 25 model.embedding_dim 0.0 +929 25 model.relation_dim 1.0 +929 25 model.scoring_fct_norm 1.0 +929 25 optimizer.lr 0.003916438031983424 +929 25 negative_sampler.num_negs_per_pos 46.0 +929 25 training.batch_size 1.0 +929 26 model.embedding_dim 0.0 +929 26 model.relation_dim 1.0 +929 26 model.scoring_fct_norm 1.0 +929 26 optimizer.lr 0.018629215058571052 +929 26 negative_sampler.num_negs_per_pos 57.0 +929 26 training.batch_size 0.0 +929 27 model.embedding_dim 1.0 +929 27 model.relation_dim 2.0 +929 27 model.scoring_fct_norm 1.0 +929 27 optimizer.lr 0.015519626846535912 +929 27 negative_sampler.num_negs_per_pos 23.0 +929 27 training.batch_size 1.0 +929 28 model.embedding_dim 0.0 +929 28 model.relation_dim 0.0 +929 28 model.scoring_fct_norm 1.0 +929 28 optimizer.lr 0.04923643570057684 +929 28 negative_sampler.num_negs_per_pos 47.0 +929 28 training.batch_size 0.0 +929 29 model.embedding_dim 0.0 +929 29 model.relation_dim 0.0 +929 29 model.scoring_fct_norm 1.0 +929 29 optimizer.lr 0.02622747977705296 +929 29 negative_sampler.num_negs_per_pos 99.0 +929 29 training.batch_size 1.0 +929 30 model.embedding_dim 2.0 +929 30 model.relation_dim 0.0 +929 30 model.scoring_fct_norm 2.0 +929 30 optimizer.lr 0.030545546468148208 +929 30 negative_sampler.num_negs_per_pos 75.0 +929 30 training.batch_size 0.0 +929 31 model.embedding_dim 0.0 +929 31 model.relation_dim 2.0 +929 31 model.scoring_fct_norm 2.0 +929 31 optimizer.lr 0.004067194799378436 +929 31 negative_sampler.num_negs_per_pos 49.0 +929 31 training.batch_size 1.0 +929 32 model.embedding_dim 2.0 +929 32 model.relation_dim 0.0 +929 32 model.scoring_fct_norm 2.0 +929 32 optimizer.lr 0.009877005695001344 +929 32 negative_sampler.num_negs_per_pos 32.0 +929 32 training.batch_size 1.0 +929 33 model.embedding_dim 1.0 +929 33 model.relation_dim 1.0 +929 33 model.scoring_fct_norm 2.0 +929 33 optimizer.lr 0.00815394507803339 +929 33 negative_sampler.num_negs_per_pos 2.0 +929 33 training.batch_size 0.0 +929 34 model.embedding_dim 1.0 +929 34 model.relation_dim 2.0 +929 34 model.scoring_fct_norm 1.0 +929 34 optimizer.lr 0.006944237182620133 +929 34 negative_sampler.num_negs_per_pos 74.0 +929 34 training.batch_size 2.0 +929 35 model.embedding_dim 2.0 +929 35 model.relation_dim 2.0 +929 35 model.scoring_fct_norm 2.0 +929 35 optimizer.lr 0.0010763312343342916 +929 35 negative_sampler.num_negs_per_pos 87.0 +929 35 training.batch_size 2.0 +929 36 model.embedding_dim 1.0 +929 36 model.relation_dim 0.0 +929 36 model.scoring_fct_norm 2.0 +929 36 optimizer.lr 0.007671825567028745 +929 36 negative_sampler.num_negs_per_pos 95.0 +929 36 training.batch_size 2.0 +929 37 model.embedding_dim 0.0 +929 37 model.relation_dim 1.0 +929 37 model.scoring_fct_norm 2.0 +929 37 optimizer.lr 0.00153889380676236 +929 37 negative_sampler.num_negs_per_pos 89.0 +929 37 training.batch_size 0.0 +929 38 model.embedding_dim 1.0 +929 38 model.relation_dim 2.0 +929 38 model.scoring_fct_norm 2.0 +929 38 optimizer.lr 0.023367054718739778 +929 38 negative_sampler.num_negs_per_pos 39.0 +929 38 training.batch_size 0.0 +929 39 model.embedding_dim 1.0 +929 39 model.relation_dim 0.0 +929 39 model.scoring_fct_norm 2.0 +929 39 optimizer.lr 0.03892961067882143 +929 39 negative_sampler.num_negs_per_pos 4.0 +929 39 training.batch_size 1.0 +929 40 model.embedding_dim 0.0 +929 40 model.relation_dim 2.0 +929 40 model.scoring_fct_norm 1.0 +929 40 optimizer.lr 0.0011258426704396486 +929 40 negative_sampler.num_negs_per_pos 43.0 +929 40 training.batch_size 0.0 +929 41 model.embedding_dim 1.0 +929 41 model.relation_dim 2.0 +929 41 model.scoring_fct_norm 2.0 +929 41 optimizer.lr 0.05959438668186211 +929 41 negative_sampler.num_negs_per_pos 50.0 +929 41 training.batch_size 1.0 +929 42 model.embedding_dim 0.0 +929 42 model.relation_dim 0.0 +929 42 model.scoring_fct_norm 1.0 +929 42 optimizer.lr 0.011465904464370345 +929 42 negative_sampler.num_negs_per_pos 63.0 +929 42 training.batch_size 0.0 +929 43 model.embedding_dim 2.0 +929 43 model.relation_dim 1.0 +929 43 model.scoring_fct_norm 1.0 +929 43 optimizer.lr 0.01078137960206725 +929 43 negative_sampler.num_negs_per_pos 12.0 +929 43 training.batch_size 0.0 +929 44 model.embedding_dim 1.0 +929 44 model.relation_dim 1.0 +929 44 model.scoring_fct_norm 1.0 +929 44 optimizer.lr 0.003010652391333002 +929 44 negative_sampler.num_negs_per_pos 34.0 +929 44 training.batch_size 1.0 +929 45 model.embedding_dim 1.0 +929 45 model.relation_dim 2.0 +929 45 model.scoring_fct_norm 1.0 +929 45 optimizer.lr 0.08556383218382756 +929 45 negative_sampler.num_negs_per_pos 86.0 +929 45 training.batch_size 0.0 +929 46 model.embedding_dim 1.0 +929 46 model.relation_dim 0.0 +929 46 model.scoring_fct_norm 2.0 +929 46 optimizer.lr 0.024473928009924678 +929 46 negative_sampler.num_negs_per_pos 82.0 +929 46 training.batch_size 1.0 +929 47 model.embedding_dim 0.0 +929 47 model.relation_dim 1.0 +929 47 model.scoring_fct_norm 1.0 +929 47 optimizer.lr 0.034884738680774276 +929 47 negative_sampler.num_negs_per_pos 83.0 +929 47 training.batch_size 1.0 +929 48 model.embedding_dim 0.0 +929 48 model.relation_dim 2.0 +929 48 model.scoring_fct_norm 1.0 +929 48 optimizer.lr 0.003258018903053977 +929 48 negative_sampler.num_negs_per_pos 56.0 +929 48 training.batch_size 1.0 +929 49 model.embedding_dim 0.0 +929 49 model.relation_dim 1.0 +929 49 model.scoring_fct_norm 1.0 +929 49 optimizer.lr 0.02889506764974619 +929 49 negative_sampler.num_negs_per_pos 67.0 +929 49 training.batch_size 1.0 +929 50 model.embedding_dim 0.0 +929 50 model.relation_dim 1.0 +929 50 model.scoring_fct_norm 2.0 +929 50 optimizer.lr 0.0034994882649279405 +929 50 negative_sampler.num_negs_per_pos 22.0 +929 50 training.batch_size 0.0 +929 51 model.embedding_dim 2.0 +929 51 model.relation_dim 0.0 +929 51 model.scoring_fct_norm 1.0 +929 51 optimizer.lr 0.005685948117209012 +929 51 negative_sampler.num_negs_per_pos 18.0 +929 51 training.batch_size 1.0 +929 52 model.embedding_dim 1.0 +929 52 model.relation_dim 1.0 +929 52 model.scoring_fct_norm 2.0 +929 52 optimizer.lr 0.0025960662771592393 +929 52 negative_sampler.num_negs_per_pos 27.0 +929 52 training.batch_size 2.0 +929 53 model.embedding_dim 2.0 +929 53 model.relation_dim 0.0 +929 53 model.scoring_fct_norm 1.0 +929 53 optimizer.lr 0.03376799620812886 +929 53 negative_sampler.num_negs_per_pos 49.0 +929 53 training.batch_size 2.0 +929 54 model.embedding_dim 2.0 +929 54 model.relation_dim 1.0 +929 54 model.scoring_fct_norm 1.0 +929 54 optimizer.lr 0.05999115527815785 +929 54 negative_sampler.num_negs_per_pos 39.0 +929 54 training.batch_size 2.0 +929 55 model.embedding_dim 0.0 +929 55 model.relation_dim 2.0 +929 55 model.scoring_fct_norm 2.0 +929 55 optimizer.lr 0.003789183563424735 +929 55 negative_sampler.num_negs_per_pos 76.0 +929 55 training.batch_size 2.0 +929 56 model.embedding_dim 1.0 +929 56 model.relation_dim 2.0 +929 56 model.scoring_fct_norm 2.0 +929 56 optimizer.lr 0.009743000061349634 +929 56 negative_sampler.num_negs_per_pos 39.0 +929 56 training.batch_size 1.0 +929 57 model.embedding_dim 1.0 +929 57 model.relation_dim 1.0 +929 57 model.scoring_fct_norm 2.0 +929 57 optimizer.lr 0.010868409170209033 +929 57 negative_sampler.num_negs_per_pos 99.0 +929 57 training.batch_size 0.0 +929 58 model.embedding_dim 1.0 +929 58 model.relation_dim 1.0 +929 58 model.scoring_fct_norm 1.0 +929 58 optimizer.lr 0.0010954181890752697 +929 58 negative_sampler.num_negs_per_pos 23.0 +929 58 training.batch_size 1.0 +929 59 model.embedding_dim 0.0 +929 59 model.relation_dim 2.0 +929 59 model.scoring_fct_norm 1.0 +929 59 optimizer.lr 0.051718792879009025 +929 59 negative_sampler.num_negs_per_pos 41.0 +929 59 training.batch_size 2.0 +929 60 model.embedding_dim 0.0 +929 60 model.relation_dim 1.0 +929 60 model.scoring_fct_norm 2.0 +929 60 optimizer.lr 0.013268794263339469 +929 60 negative_sampler.num_negs_per_pos 7.0 +929 60 training.batch_size 0.0 +929 61 model.embedding_dim 0.0 +929 61 model.relation_dim 2.0 +929 61 model.scoring_fct_norm 2.0 +929 61 optimizer.lr 0.03431493481544466 +929 61 negative_sampler.num_negs_per_pos 26.0 +929 61 training.batch_size 2.0 +929 62 model.embedding_dim 1.0 +929 62 model.relation_dim 0.0 +929 62 model.scoring_fct_norm 2.0 +929 62 optimizer.lr 0.01297866032187507 +929 62 negative_sampler.num_negs_per_pos 29.0 +929 62 training.batch_size 0.0 +929 63 model.embedding_dim 2.0 +929 63 model.relation_dim 2.0 +929 63 model.scoring_fct_norm 2.0 +929 63 optimizer.lr 0.004224342280399649 +929 63 negative_sampler.num_negs_per_pos 68.0 +929 63 training.batch_size 1.0 +929 64 model.embedding_dim 0.0 +929 64 model.relation_dim 2.0 +929 64 model.scoring_fct_norm 1.0 +929 64 optimizer.lr 0.058441021469528454 +929 64 negative_sampler.num_negs_per_pos 57.0 +929 64 training.batch_size 2.0 +929 65 model.embedding_dim 1.0 +929 65 model.relation_dim 0.0 +929 65 model.scoring_fct_norm 1.0 +929 65 optimizer.lr 0.002248504967843302 +929 65 negative_sampler.num_negs_per_pos 68.0 +929 65 training.batch_size 1.0 +929 66 model.embedding_dim 0.0 +929 66 model.relation_dim 0.0 +929 66 model.scoring_fct_norm 1.0 +929 66 optimizer.lr 0.07988873985507773 +929 66 negative_sampler.num_negs_per_pos 38.0 +929 66 training.batch_size 1.0 +929 67 model.embedding_dim 2.0 +929 67 model.relation_dim 1.0 +929 67 model.scoring_fct_norm 1.0 +929 67 optimizer.lr 0.009695848803977414 +929 67 negative_sampler.num_negs_per_pos 3.0 +929 67 training.batch_size 2.0 +929 68 model.embedding_dim 0.0 +929 68 model.relation_dim 1.0 +929 68 model.scoring_fct_norm 1.0 +929 68 optimizer.lr 0.04536960754405552 +929 68 negative_sampler.num_negs_per_pos 59.0 +929 68 training.batch_size 2.0 +929 69 model.embedding_dim 0.0 +929 69 model.relation_dim 1.0 +929 69 model.scoring_fct_norm 1.0 +929 69 optimizer.lr 0.015458118639006509 +929 69 negative_sampler.num_negs_per_pos 18.0 +929 69 training.batch_size 2.0 +929 70 model.embedding_dim 2.0 +929 70 model.relation_dim 0.0 +929 70 model.scoring_fct_norm 1.0 +929 70 optimizer.lr 0.0802441158353367 +929 70 negative_sampler.num_negs_per_pos 8.0 +929 70 training.batch_size 1.0 +929 71 model.embedding_dim 1.0 +929 71 model.relation_dim 2.0 +929 71 model.scoring_fct_norm 1.0 +929 71 optimizer.lr 0.0050172601042385015 +929 71 negative_sampler.num_negs_per_pos 5.0 +929 71 training.batch_size 0.0 +929 72 model.embedding_dim 0.0 +929 72 model.relation_dim 0.0 +929 72 model.scoring_fct_norm 1.0 +929 72 optimizer.lr 0.0014705897799575586 +929 72 negative_sampler.num_negs_per_pos 67.0 +929 72 training.batch_size 2.0 +929 73 model.embedding_dim 0.0 +929 73 model.relation_dim 1.0 +929 73 model.scoring_fct_norm 1.0 +929 73 optimizer.lr 0.009770004824191556 +929 73 negative_sampler.num_negs_per_pos 42.0 +929 73 training.batch_size 1.0 +929 74 model.embedding_dim 0.0 +929 74 model.relation_dim 1.0 +929 74 model.scoring_fct_norm 2.0 +929 74 optimizer.lr 0.022014532535640553 +929 74 negative_sampler.num_negs_per_pos 5.0 +929 74 training.batch_size 0.0 +929 75 model.embedding_dim 2.0 +929 75 model.relation_dim 2.0 +929 75 model.scoring_fct_norm 2.0 +929 75 optimizer.lr 0.004207340601509996 +929 75 negative_sampler.num_negs_per_pos 68.0 +929 75 training.batch_size 0.0 +929 76 model.embedding_dim 1.0 +929 76 model.relation_dim 2.0 +929 76 model.scoring_fct_norm 1.0 +929 76 optimizer.lr 0.033324224767629015 +929 76 negative_sampler.num_negs_per_pos 52.0 +929 76 training.batch_size 0.0 +929 77 model.embedding_dim 2.0 +929 77 model.relation_dim 0.0 +929 77 model.scoring_fct_norm 1.0 +929 77 optimizer.lr 0.007680455478221883 +929 77 negative_sampler.num_negs_per_pos 22.0 +929 77 training.batch_size 1.0 +929 78 model.embedding_dim 0.0 +929 78 model.relation_dim 0.0 +929 78 model.scoring_fct_norm 2.0 +929 78 optimizer.lr 0.001218160888083822 +929 78 negative_sampler.num_negs_per_pos 22.0 +929 78 training.batch_size 1.0 +929 79 model.embedding_dim 2.0 +929 79 model.relation_dim 2.0 +929 79 model.scoring_fct_norm 2.0 +929 79 optimizer.lr 0.009086136886595568 +929 79 negative_sampler.num_negs_per_pos 61.0 +929 79 training.batch_size 0.0 +929 80 model.embedding_dim 0.0 +929 80 model.relation_dim 1.0 +929 80 model.scoring_fct_norm 1.0 +929 80 optimizer.lr 0.012334090478071168 +929 80 negative_sampler.num_negs_per_pos 0.0 +929 80 training.batch_size 2.0 +929 81 model.embedding_dim 2.0 +929 81 model.relation_dim 0.0 +929 81 model.scoring_fct_norm 1.0 +929 81 optimizer.lr 0.005331201183918376 +929 81 negative_sampler.num_negs_per_pos 25.0 +929 81 training.batch_size 2.0 +929 82 model.embedding_dim 0.0 +929 82 model.relation_dim 1.0 +929 82 model.scoring_fct_norm 1.0 +929 82 optimizer.lr 0.002499943150011706 +929 82 negative_sampler.num_negs_per_pos 77.0 +929 82 training.batch_size 1.0 +929 83 model.embedding_dim 1.0 +929 83 model.relation_dim 2.0 +929 83 model.scoring_fct_norm 2.0 +929 83 optimizer.lr 0.002239768445193534 +929 83 negative_sampler.num_negs_per_pos 28.0 +929 83 training.batch_size 0.0 +929 84 model.embedding_dim 1.0 +929 84 model.relation_dim 2.0 +929 84 model.scoring_fct_norm 1.0 +929 84 optimizer.lr 0.009778993824930889 +929 84 negative_sampler.num_negs_per_pos 45.0 +929 84 training.batch_size 1.0 +929 85 model.embedding_dim 2.0 +929 85 model.relation_dim 1.0 +929 85 model.scoring_fct_norm 2.0 +929 85 optimizer.lr 0.007797268261950473 +929 85 negative_sampler.num_negs_per_pos 68.0 +929 85 training.batch_size 2.0 +929 86 model.embedding_dim 1.0 +929 86 model.relation_dim 2.0 +929 86 model.scoring_fct_norm 2.0 +929 86 optimizer.lr 0.015539032317853144 +929 86 negative_sampler.num_negs_per_pos 38.0 +929 86 training.batch_size 0.0 +929 87 model.embedding_dim 2.0 +929 87 model.relation_dim 0.0 +929 87 model.scoring_fct_norm 2.0 +929 87 optimizer.lr 0.0030585023739055864 +929 87 negative_sampler.num_negs_per_pos 64.0 +929 87 training.batch_size 2.0 +929 88 model.embedding_dim 2.0 +929 88 model.relation_dim 0.0 +929 88 model.scoring_fct_norm 2.0 +929 88 optimizer.lr 0.002825764328523811 +929 88 negative_sampler.num_negs_per_pos 73.0 +929 88 training.batch_size 0.0 +929 89 model.embedding_dim 0.0 +929 89 model.relation_dim 1.0 +929 89 model.scoring_fct_norm 2.0 +929 89 optimizer.lr 0.04362202158724879 +929 89 negative_sampler.num_negs_per_pos 58.0 +929 89 training.batch_size 0.0 +929 90 model.embedding_dim 0.0 +929 90 model.relation_dim 0.0 +929 90 model.scoring_fct_norm 2.0 +929 90 optimizer.lr 0.03239400091868219 +929 90 negative_sampler.num_negs_per_pos 5.0 +929 90 training.batch_size 2.0 +929 91 model.embedding_dim 0.0 +929 91 model.relation_dim 2.0 +929 91 model.scoring_fct_norm 1.0 +929 91 optimizer.lr 0.016373257638144224 +929 91 negative_sampler.num_negs_per_pos 32.0 +929 91 training.batch_size 1.0 +929 92 model.embedding_dim 1.0 +929 92 model.relation_dim 0.0 +929 92 model.scoring_fct_norm 1.0 +929 92 optimizer.lr 0.001477293561268771 +929 92 negative_sampler.num_negs_per_pos 83.0 +929 92 training.batch_size 0.0 +929 93 model.embedding_dim 0.0 +929 93 model.relation_dim 1.0 +929 93 model.scoring_fct_norm 1.0 +929 93 optimizer.lr 0.0010557163825393777 +929 93 negative_sampler.num_negs_per_pos 38.0 +929 93 training.batch_size 0.0 +929 94 model.embedding_dim 2.0 +929 94 model.relation_dim 0.0 +929 94 model.scoring_fct_norm 2.0 +929 94 optimizer.lr 0.014681571879651245 +929 94 negative_sampler.num_negs_per_pos 4.0 +929 94 training.batch_size 0.0 +929 95 model.embedding_dim 2.0 +929 95 model.relation_dim 2.0 +929 95 model.scoring_fct_norm 1.0 +929 95 optimizer.lr 0.023179036534923616 +929 95 negative_sampler.num_negs_per_pos 7.0 +929 95 training.batch_size 1.0 +929 96 model.embedding_dim 0.0 +929 96 model.relation_dim 0.0 +929 96 model.scoring_fct_norm 2.0 +929 96 optimizer.lr 0.0063887122436679595 +929 96 negative_sampler.num_negs_per_pos 78.0 +929 96 training.batch_size 1.0 +929 97 model.embedding_dim 2.0 +929 97 model.relation_dim 2.0 +929 97 model.scoring_fct_norm 2.0 +929 97 optimizer.lr 0.0016438950206016584 +929 97 negative_sampler.num_negs_per_pos 17.0 +929 97 training.batch_size 1.0 +929 98 model.embedding_dim 2.0 +929 98 model.relation_dim 0.0 +929 98 model.scoring_fct_norm 2.0 +929 98 optimizer.lr 0.0038745495127307217 +929 98 negative_sampler.num_negs_per_pos 83.0 +929 98 training.batch_size 1.0 +929 99 model.embedding_dim 0.0 +929 99 model.relation_dim 0.0 +929 99 model.scoring_fct_norm 1.0 +929 99 optimizer.lr 0.05021271207362185 +929 99 negative_sampler.num_negs_per_pos 16.0 +929 99 training.batch_size 2.0 +929 100 model.embedding_dim 2.0 +929 100 model.relation_dim 2.0 +929 100 model.scoring_fct_norm 1.0 +929 100 optimizer.lr 0.08755158489913684 +929 100 negative_sampler.num_negs_per_pos 91.0 +929 100 training.batch_size 0.0 +929 1 dataset """kinships""" +929 1 model """transr""" +929 1 loss """bceaftersigmoid""" +929 1 regularizer """no""" +929 1 optimizer """adam""" +929 1 training_loop """owa""" +929 1 negative_sampler """basic""" +929 1 evaluator """rankbased""" +929 2 dataset """kinships""" +929 2 model """transr""" +929 2 loss """bceaftersigmoid""" +929 2 regularizer """no""" +929 2 optimizer """adam""" +929 2 training_loop """owa""" +929 2 negative_sampler """basic""" +929 2 evaluator """rankbased""" +929 3 dataset """kinships""" +929 3 model """transr""" +929 3 loss """bceaftersigmoid""" +929 3 regularizer """no""" +929 3 optimizer """adam""" +929 3 training_loop """owa""" +929 3 negative_sampler """basic""" +929 3 evaluator """rankbased""" +929 4 dataset """kinships""" +929 4 model """transr""" +929 4 loss """bceaftersigmoid""" +929 4 regularizer """no""" +929 4 optimizer """adam""" +929 4 training_loop """owa""" +929 4 negative_sampler """basic""" +929 4 evaluator """rankbased""" +929 5 dataset """kinships""" +929 5 model """transr""" +929 5 loss """bceaftersigmoid""" +929 5 regularizer """no""" +929 5 optimizer """adam""" +929 5 training_loop """owa""" +929 5 negative_sampler """basic""" +929 5 evaluator """rankbased""" +929 6 dataset """kinships""" +929 6 model """transr""" +929 6 loss """bceaftersigmoid""" +929 6 regularizer """no""" +929 6 optimizer """adam""" +929 6 training_loop """owa""" +929 6 negative_sampler """basic""" +929 6 evaluator """rankbased""" +929 7 dataset """kinships""" +929 7 model """transr""" +929 7 loss """bceaftersigmoid""" +929 7 regularizer """no""" +929 7 optimizer """adam""" +929 7 training_loop """owa""" +929 7 negative_sampler """basic""" +929 7 evaluator """rankbased""" +929 8 dataset """kinships""" +929 8 model """transr""" +929 8 loss """bceaftersigmoid""" +929 8 regularizer """no""" +929 8 optimizer """adam""" +929 8 training_loop """owa""" +929 8 negative_sampler """basic""" +929 8 evaluator """rankbased""" +929 9 dataset """kinships""" +929 9 model """transr""" +929 9 loss """bceaftersigmoid""" +929 9 regularizer """no""" +929 9 optimizer """adam""" +929 9 training_loop """owa""" +929 9 negative_sampler """basic""" +929 9 evaluator """rankbased""" +929 10 dataset """kinships""" +929 10 model """transr""" +929 10 loss """bceaftersigmoid""" +929 10 regularizer """no""" +929 10 optimizer """adam""" +929 10 training_loop """owa""" +929 10 negative_sampler """basic""" +929 10 evaluator """rankbased""" +929 11 dataset """kinships""" +929 11 model """transr""" +929 11 loss """bceaftersigmoid""" +929 11 regularizer """no""" +929 11 optimizer """adam""" +929 11 training_loop """owa""" +929 11 negative_sampler """basic""" +929 11 evaluator """rankbased""" +929 12 dataset """kinships""" +929 12 model """transr""" +929 12 loss """bceaftersigmoid""" +929 12 regularizer """no""" +929 12 optimizer """adam""" +929 12 training_loop """owa""" +929 12 negative_sampler """basic""" +929 12 evaluator """rankbased""" +929 13 dataset """kinships""" +929 13 model """transr""" +929 13 loss """bceaftersigmoid""" +929 13 regularizer """no""" +929 13 optimizer """adam""" +929 13 training_loop """owa""" +929 13 negative_sampler """basic""" +929 13 evaluator """rankbased""" +929 14 dataset """kinships""" +929 14 model """transr""" +929 14 loss """bceaftersigmoid""" +929 14 regularizer """no""" +929 14 optimizer """adam""" +929 14 training_loop """owa""" +929 14 negative_sampler """basic""" +929 14 evaluator """rankbased""" +929 15 dataset """kinships""" +929 15 model """transr""" +929 15 loss """bceaftersigmoid""" +929 15 regularizer """no""" +929 15 optimizer """adam""" +929 15 training_loop """owa""" +929 15 negative_sampler """basic""" +929 15 evaluator """rankbased""" +929 16 dataset """kinships""" +929 16 model """transr""" +929 16 loss """bceaftersigmoid""" +929 16 regularizer """no""" +929 16 optimizer """adam""" +929 16 training_loop """owa""" +929 16 negative_sampler """basic""" +929 16 evaluator """rankbased""" +929 17 dataset """kinships""" +929 17 model """transr""" +929 17 loss """bceaftersigmoid""" +929 17 regularizer """no""" +929 17 optimizer """adam""" +929 17 training_loop """owa""" +929 17 negative_sampler """basic""" +929 17 evaluator """rankbased""" +929 18 dataset """kinships""" +929 18 model """transr""" +929 18 loss """bceaftersigmoid""" +929 18 regularizer """no""" +929 18 optimizer """adam""" +929 18 training_loop """owa""" +929 18 negative_sampler """basic""" +929 18 evaluator """rankbased""" +929 19 dataset """kinships""" +929 19 model """transr""" +929 19 loss """bceaftersigmoid""" +929 19 regularizer """no""" +929 19 optimizer """adam""" +929 19 training_loop """owa""" +929 19 negative_sampler """basic""" +929 19 evaluator """rankbased""" +929 20 dataset """kinships""" +929 20 model """transr""" +929 20 loss """bceaftersigmoid""" +929 20 regularizer """no""" +929 20 optimizer """adam""" +929 20 training_loop """owa""" +929 20 negative_sampler """basic""" +929 20 evaluator """rankbased""" +929 21 dataset """kinships""" +929 21 model """transr""" +929 21 loss """bceaftersigmoid""" +929 21 regularizer """no""" +929 21 optimizer """adam""" +929 21 training_loop """owa""" +929 21 negative_sampler """basic""" +929 21 evaluator """rankbased""" +929 22 dataset """kinships""" +929 22 model """transr""" +929 22 loss """bceaftersigmoid""" +929 22 regularizer """no""" +929 22 optimizer """adam""" +929 22 training_loop """owa""" +929 22 negative_sampler """basic""" +929 22 evaluator """rankbased""" +929 23 dataset """kinships""" +929 23 model """transr""" +929 23 loss """bceaftersigmoid""" +929 23 regularizer """no""" +929 23 optimizer """adam""" +929 23 training_loop """owa""" +929 23 negative_sampler """basic""" +929 23 evaluator """rankbased""" +929 24 dataset """kinships""" +929 24 model """transr""" +929 24 loss """bceaftersigmoid""" +929 24 regularizer """no""" +929 24 optimizer """adam""" +929 24 training_loop """owa""" +929 24 negative_sampler """basic""" +929 24 evaluator """rankbased""" +929 25 dataset """kinships""" +929 25 model """transr""" +929 25 loss """bceaftersigmoid""" +929 25 regularizer """no""" +929 25 optimizer """adam""" +929 25 training_loop """owa""" +929 25 negative_sampler """basic""" +929 25 evaluator """rankbased""" +929 26 dataset """kinships""" +929 26 model """transr""" +929 26 loss """bceaftersigmoid""" +929 26 regularizer """no""" +929 26 optimizer """adam""" +929 26 training_loop """owa""" +929 26 negative_sampler """basic""" +929 26 evaluator """rankbased""" +929 27 dataset """kinships""" +929 27 model """transr""" +929 27 loss """bceaftersigmoid""" +929 27 regularizer """no""" +929 27 optimizer """adam""" +929 27 training_loop """owa""" +929 27 negative_sampler """basic""" +929 27 evaluator """rankbased""" +929 28 dataset """kinships""" +929 28 model """transr""" +929 28 loss """bceaftersigmoid""" +929 28 regularizer """no""" +929 28 optimizer """adam""" +929 28 training_loop """owa""" +929 28 negative_sampler """basic""" +929 28 evaluator """rankbased""" +929 29 dataset """kinships""" +929 29 model """transr""" +929 29 loss """bceaftersigmoid""" +929 29 regularizer """no""" +929 29 optimizer """adam""" +929 29 training_loop """owa""" +929 29 negative_sampler """basic""" +929 29 evaluator """rankbased""" +929 30 dataset """kinships""" +929 30 model """transr""" +929 30 loss """bceaftersigmoid""" +929 30 regularizer """no""" +929 30 optimizer """adam""" +929 30 training_loop """owa""" +929 30 negative_sampler """basic""" +929 30 evaluator """rankbased""" +929 31 dataset """kinships""" +929 31 model """transr""" +929 31 loss """bceaftersigmoid""" +929 31 regularizer """no""" +929 31 optimizer """adam""" +929 31 training_loop """owa""" +929 31 negative_sampler """basic""" +929 31 evaluator """rankbased""" +929 32 dataset """kinships""" +929 32 model """transr""" +929 32 loss """bceaftersigmoid""" +929 32 regularizer """no""" +929 32 optimizer """adam""" +929 32 training_loop """owa""" +929 32 negative_sampler """basic""" +929 32 evaluator """rankbased""" +929 33 dataset """kinships""" +929 33 model """transr""" +929 33 loss """bceaftersigmoid""" +929 33 regularizer """no""" +929 33 optimizer """adam""" +929 33 training_loop """owa""" +929 33 negative_sampler """basic""" +929 33 evaluator """rankbased""" +929 34 dataset """kinships""" +929 34 model """transr""" +929 34 loss """bceaftersigmoid""" +929 34 regularizer """no""" +929 34 optimizer """adam""" +929 34 training_loop """owa""" +929 34 negative_sampler """basic""" +929 34 evaluator """rankbased""" +929 35 dataset """kinships""" +929 35 model """transr""" +929 35 loss """bceaftersigmoid""" +929 35 regularizer """no""" +929 35 optimizer """adam""" +929 35 training_loop """owa""" +929 35 negative_sampler """basic""" +929 35 evaluator """rankbased""" +929 36 dataset """kinships""" +929 36 model """transr""" +929 36 loss """bceaftersigmoid""" +929 36 regularizer """no""" +929 36 optimizer """adam""" +929 36 training_loop """owa""" +929 36 negative_sampler """basic""" +929 36 evaluator """rankbased""" +929 37 dataset """kinships""" +929 37 model """transr""" +929 37 loss """bceaftersigmoid""" +929 37 regularizer """no""" +929 37 optimizer """adam""" +929 37 training_loop """owa""" +929 37 negative_sampler """basic""" +929 37 evaluator """rankbased""" +929 38 dataset """kinships""" +929 38 model """transr""" +929 38 loss """bceaftersigmoid""" +929 38 regularizer """no""" +929 38 optimizer """adam""" +929 38 training_loop """owa""" +929 38 negative_sampler """basic""" +929 38 evaluator """rankbased""" +929 39 dataset """kinships""" +929 39 model """transr""" +929 39 loss """bceaftersigmoid""" +929 39 regularizer """no""" +929 39 optimizer """adam""" +929 39 training_loop """owa""" +929 39 negative_sampler """basic""" +929 39 evaluator """rankbased""" +929 40 dataset """kinships""" +929 40 model """transr""" +929 40 loss """bceaftersigmoid""" +929 40 regularizer """no""" +929 40 optimizer """adam""" +929 40 training_loop """owa""" +929 40 negative_sampler """basic""" +929 40 evaluator """rankbased""" +929 41 dataset """kinships""" +929 41 model """transr""" +929 41 loss """bceaftersigmoid""" +929 41 regularizer """no""" +929 41 optimizer """adam""" +929 41 training_loop """owa""" +929 41 negative_sampler """basic""" +929 41 evaluator """rankbased""" +929 42 dataset """kinships""" +929 42 model """transr""" +929 42 loss """bceaftersigmoid""" +929 42 regularizer """no""" +929 42 optimizer """adam""" +929 42 training_loop """owa""" +929 42 negative_sampler """basic""" +929 42 evaluator """rankbased""" +929 43 dataset """kinships""" +929 43 model """transr""" +929 43 loss """bceaftersigmoid""" +929 43 regularizer """no""" +929 43 optimizer """adam""" +929 43 training_loop """owa""" +929 43 negative_sampler """basic""" +929 43 evaluator """rankbased""" +929 44 dataset """kinships""" +929 44 model """transr""" +929 44 loss """bceaftersigmoid""" +929 44 regularizer """no""" +929 44 optimizer """adam""" +929 44 training_loop """owa""" +929 44 negative_sampler """basic""" +929 44 evaluator """rankbased""" +929 45 dataset """kinships""" +929 45 model """transr""" +929 45 loss """bceaftersigmoid""" +929 45 regularizer """no""" +929 45 optimizer """adam""" +929 45 training_loop """owa""" +929 45 negative_sampler """basic""" +929 45 evaluator """rankbased""" +929 46 dataset """kinships""" +929 46 model """transr""" +929 46 loss """bceaftersigmoid""" +929 46 regularizer """no""" +929 46 optimizer """adam""" +929 46 training_loop """owa""" +929 46 negative_sampler """basic""" +929 46 evaluator """rankbased""" +929 47 dataset """kinships""" +929 47 model """transr""" +929 47 loss """bceaftersigmoid""" +929 47 regularizer """no""" +929 47 optimizer """adam""" +929 47 training_loop """owa""" +929 47 negative_sampler """basic""" +929 47 evaluator """rankbased""" +929 48 dataset """kinships""" +929 48 model """transr""" +929 48 loss """bceaftersigmoid""" +929 48 regularizer """no""" +929 48 optimizer """adam""" +929 48 training_loop """owa""" +929 48 negative_sampler """basic""" +929 48 evaluator """rankbased""" +929 49 dataset """kinships""" +929 49 model """transr""" +929 49 loss """bceaftersigmoid""" +929 49 regularizer """no""" +929 49 optimizer """adam""" +929 49 training_loop """owa""" +929 49 negative_sampler """basic""" +929 49 evaluator """rankbased""" +929 50 dataset """kinships""" +929 50 model """transr""" +929 50 loss """bceaftersigmoid""" +929 50 regularizer """no""" +929 50 optimizer """adam""" +929 50 training_loop """owa""" +929 50 negative_sampler """basic""" +929 50 evaluator """rankbased""" +929 51 dataset """kinships""" +929 51 model """transr""" +929 51 loss """bceaftersigmoid""" +929 51 regularizer """no""" +929 51 optimizer """adam""" +929 51 training_loop """owa""" +929 51 negative_sampler """basic""" +929 51 evaluator """rankbased""" +929 52 dataset """kinships""" +929 52 model """transr""" +929 52 loss """bceaftersigmoid""" +929 52 regularizer """no""" +929 52 optimizer """adam""" +929 52 training_loop """owa""" +929 52 negative_sampler """basic""" +929 52 evaluator """rankbased""" +929 53 dataset """kinships""" +929 53 model """transr""" +929 53 loss """bceaftersigmoid""" +929 53 regularizer """no""" +929 53 optimizer """adam""" +929 53 training_loop """owa""" +929 53 negative_sampler """basic""" +929 53 evaluator """rankbased""" +929 54 dataset """kinships""" +929 54 model """transr""" +929 54 loss """bceaftersigmoid""" +929 54 regularizer """no""" +929 54 optimizer """adam""" +929 54 training_loop """owa""" +929 54 negative_sampler """basic""" +929 54 evaluator """rankbased""" +929 55 dataset """kinships""" +929 55 model """transr""" +929 55 loss """bceaftersigmoid""" +929 55 regularizer """no""" +929 55 optimizer """adam""" +929 55 training_loop """owa""" +929 55 negative_sampler """basic""" +929 55 evaluator """rankbased""" +929 56 dataset """kinships""" +929 56 model """transr""" +929 56 loss """bceaftersigmoid""" +929 56 regularizer """no""" +929 56 optimizer """adam""" +929 56 training_loop """owa""" +929 56 negative_sampler """basic""" +929 56 evaluator """rankbased""" +929 57 dataset """kinships""" +929 57 model """transr""" +929 57 loss """bceaftersigmoid""" +929 57 regularizer """no""" +929 57 optimizer """adam""" +929 57 training_loop """owa""" +929 57 negative_sampler """basic""" +929 57 evaluator """rankbased""" +929 58 dataset """kinships""" +929 58 model """transr""" +929 58 loss """bceaftersigmoid""" +929 58 regularizer """no""" +929 58 optimizer """adam""" +929 58 training_loop """owa""" +929 58 negative_sampler """basic""" +929 58 evaluator """rankbased""" +929 59 dataset """kinships""" +929 59 model """transr""" +929 59 loss """bceaftersigmoid""" +929 59 regularizer """no""" +929 59 optimizer """adam""" +929 59 training_loop """owa""" +929 59 negative_sampler """basic""" +929 59 evaluator """rankbased""" +929 60 dataset """kinships""" +929 60 model """transr""" +929 60 loss """bceaftersigmoid""" +929 60 regularizer """no""" +929 60 optimizer """adam""" +929 60 training_loop """owa""" +929 60 negative_sampler """basic""" +929 60 evaluator """rankbased""" +929 61 dataset """kinships""" +929 61 model """transr""" +929 61 loss """bceaftersigmoid""" +929 61 regularizer """no""" +929 61 optimizer """adam""" +929 61 training_loop """owa""" +929 61 negative_sampler """basic""" +929 61 evaluator """rankbased""" +929 62 dataset """kinships""" +929 62 model """transr""" +929 62 loss """bceaftersigmoid""" +929 62 regularizer """no""" +929 62 optimizer """adam""" +929 62 training_loop """owa""" +929 62 negative_sampler """basic""" +929 62 evaluator """rankbased""" +929 63 dataset """kinships""" +929 63 model """transr""" +929 63 loss """bceaftersigmoid""" +929 63 regularizer """no""" +929 63 optimizer """adam""" +929 63 training_loop """owa""" +929 63 negative_sampler """basic""" +929 63 evaluator """rankbased""" +929 64 dataset """kinships""" +929 64 model """transr""" +929 64 loss """bceaftersigmoid""" +929 64 regularizer """no""" +929 64 optimizer """adam""" +929 64 training_loop """owa""" +929 64 negative_sampler """basic""" +929 64 evaluator """rankbased""" +929 65 dataset """kinships""" +929 65 model """transr""" +929 65 loss """bceaftersigmoid""" +929 65 regularizer """no""" +929 65 optimizer """adam""" +929 65 training_loop """owa""" +929 65 negative_sampler """basic""" +929 65 evaluator """rankbased""" +929 66 dataset """kinships""" +929 66 model """transr""" +929 66 loss """bceaftersigmoid""" +929 66 regularizer """no""" +929 66 optimizer """adam""" +929 66 training_loop """owa""" +929 66 negative_sampler """basic""" +929 66 evaluator """rankbased""" +929 67 dataset """kinships""" +929 67 model """transr""" +929 67 loss """bceaftersigmoid""" +929 67 regularizer """no""" +929 67 optimizer """adam""" +929 67 training_loop """owa""" +929 67 negative_sampler """basic""" +929 67 evaluator """rankbased""" +929 68 dataset """kinships""" +929 68 model """transr""" +929 68 loss """bceaftersigmoid""" +929 68 regularizer """no""" +929 68 optimizer """adam""" +929 68 training_loop """owa""" +929 68 negative_sampler """basic""" +929 68 evaluator """rankbased""" +929 69 dataset """kinships""" +929 69 model """transr""" +929 69 loss """bceaftersigmoid""" +929 69 regularizer """no""" +929 69 optimizer """adam""" +929 69 training_loop """owa""" +929 69 negative_sampler """basic""" +929 69 evaluator """rankbased""" +929 70 dataset """kinships""" +929 70 model """transr""" +929 70 loss """bceaftersigmoid""" +929 70 regularizer """no""" +929 70 optimizer """adam""" +929 70 training_loop """owa""" +929 70 negative_sampler """basic""" +929 70 evaluator """rankbased""" +929 71 dataset """kinships""" +929 71 model """transr""" +929 71 loss """bceaftersigmoid""" +929 71 regularizer """no""" +929 71 optimizer """adam""" +929 71 training_loop """owa""" +929 71 negative_sampler """basic""" +929 71 evaluator """rankbased""" +929 72 dataset """kinships""" +929 72 model """transr""" +929 72 loss """bceaftersigmoid""" +929 72 regularizer """no""" +929 72 optimizer """adam""" +929 72 training_loop """owa""" +929 72 negative_sampler """basic""" +929 72 evaluator """rankbased""" +929 73 dataset """kinships""" +929 73 model """transr""" +929 73 loss """bceaftersigmoid""" +929 73 regularizer """no""" +929 73 optimizer """adam""" +929 73 training_loop """owa""" +929 73 negative_sampler """basic""" +929 73 evaluator """rankbased""" +929 74 dataset """kinships""" +929 74 model """transr""" +929 74 loss """bceaftersigmoid""" +929 74 regularizer """no""" +929 74 optimizer """adam""" +929 74 training_loop """owa""" +929 74 negative_sampler """basic""" +929 74 evaluator """rankbased""" +929 75 dataset """kinships""" +929 75 model """transr""" +929 75 loss """bceaftersigmoid""" +929 75 regularizer """no""" +929 75 optimizer """adam""" +929 75 training_loop """owa""" +929 75 negative_sampler """basic""" +929 75 evaluator """rankbased""" +929 76 dataset """kinships""" +929 76 model """transr""" +929 76 loss """bceaftersigmoid""" +929 76 regularizer """no""" +929 76 optimizer """adam""" +929 76 training_loop """owa""" +929 76 negative_sampler """basic""" +929 76 evaluator """rankbased""" +929 77 dataset """kinships""" +929 77 model """transr""" +929 77 loss """bceaftersigmoid""" +929 77 regularizer """no""" +929 77 optimizer """adam""" +929 77 training_loop """owa""" +929 77 negative_sampler """basic""" +929 77 evaluator """rankbased""" +929 78 dataset """kinships""" +929 78 model """transr""" +929 78 loss """bceaftersigmoid""" +929 78 regularizer """no""" +929 78 optimizer """adam""" +929 78 training_loop """owa""" +929 78 negative_sampler """basic""" +929 78 evaluator """rankbased""" +929 79 dataset """kinships""" +929 79 model """transr""" +929 79 loss """bceaftersigmoid""" +929 79 regularizer """no""" +929 79 optimizer """adam""" +929 79 training_loop """owa""" +929 79 negative_sampler """basic""" +929 79 evaluator """rankbased""" +929 80 dataset """kinships""" +929 80 model """transr""" +929 80 loss """bceaftersigmoid""" +929 80 regularizer """no""" +929 80 optimizer """adam""" +929 80 training_loop """owa""" +929 80 negative_sampler """basic""" +929 80 evaluator """rankbased""" +929 81 dataset """kinships""" +929 81 model """transr""" +929 81 loss """bceaftersigmoid""" +929 81 regularizer """no""" +929 81 optimizer """adam""" +929 81 training_loop """owa""" +929 81 negative_sampler """basic""" +929 81 evaluator """rankbased""" +929 82 dataset """kinships""" +929 82 model """transr""" +929 82 loss """bceaftersigmoid""" +929 82 regularizer """no""" +929 82 optimizer """adam""" +929 82 training_loop """owa""" +929 82 negative_sampler """basic""" +929 82 evaluator """rankbased""" +929 83 dataset """kinships""" +929 83 model """transr""" +929 83 loss """bceaftersigmoid""" +929 83 regularizer """no""" +929 83 optimizer """adam""" +929 83 training_loop """owa""" +929 83 negative_sampler """basic""" +929 83 evaluator """rankbased""" +929 84 dataset """kinships""" +929 84 model """transr""" +929 84 loss """bceaftersigmoid""" +929 84 regularizer """no""" +929 84 optimizer """adam""" +929 84 training_loop """owa""" +929 84 negative_sampler """basic""" +929 84 evaluator """rankbased""" +929 85 dataset """kinships""" +929 85 model """transr""" +929 85 loss """bceaftersigmoid""" +929 85 regularizer """no""" +929 85 optimizer """adam""" +929 85 training_loop """owa""" +929 85 negative_sampler """basic""" +929 85 evaluator """rankbased""" +929 86 dataset """kinships""" +929 86 model """transr""" +929 86 loss """bceaftersigmoid""" +929 86 regularizer """no""" +929 86 optimizer """adam""" +929 86 training_loop """owa""" +929 86 negative_sampler """basic""" +929 86 evaluator """rankbased""" +929 87 dataset """kinships""" +929 87 model """transr""" +929 87 loss """bceaftersigmoid""" +929 87 regularizer """no""" +929 87 optimizer """adam""" +929 87 training_loop """owa""" +929 87 negative_sampler """basic""" +929 87 evaluator """rankbased""" +929 88 dataset """kinships""" +929 88 model """transr""" +929 88 loss """bceaftersigmoid""" +929 88 regularizer """no""" +929 88 optimizer """adam""" +929 88 training_loop """owa""" +929 88 negative_sampler """basic""" +929 88 evaluator """rankbased""" +929 89 dataset """kinships""" +929 89 model """transr""" +929 89 loss """bceaftersigmoid""" +929 89 regularizer """no""" +929 89 optimizer """adam""" +929 89 training_loop """owa""" +929 89 negative_sampler """basic""" +929 89 evaluator """rankbased""" +929 90 dataset """kinships""" +929 90 model """transr""" +929 90 loss """bceaftersigmoid""" +929 90 regularizer """no""" +929 90 optimizer """adam""" +929 90 training_loop """owa""" +929 90 negative_sampler """basic""" +929 90 evaluator """rankbased""" +929 91 dataset """kinships""" +929 91 model """transr""" +929 91 loss """bceaftersigmoid""" +929 91 regularizer """no""" +929 91 optimizer """adam""" +929 91 training_loop """owa""" +929 91 negative_sampler """basic""" +929 91 evaluator """rankbased""" +929 92 dataset """kinships""" +929 92 model """transr""" +929 92 loss """bceaftersigmoid""" +929 92 regularizer """no""" +929 92 optimizer """adam""" +929 92 training_loop """owa""" +929 92 negative_sampler """basic""" +929 92 evaluator """rankbased""" +929 93 dataset """kinships""" +929 93 model """transr""" +929 93 loss """bceaftersigmoid""" +929 93 regularizer """no""" +929 93 optimizer """adam""" +929 93 training_loop """owa""" +929 93 negative_sampler """basic""" +929 93 evaluator """rankbased""" +929 94 dataset """kinships""" +929 94 model """transr""" +929 94 loss """bceaftersigmoid""" +929 94 regularizer """no""" +929 94 optimizer """adam""" +929 94 training_loop """owa""" +929 94 negative_sampler """basic""" +929 94 evaluator """rankbased""" +929 95 dataset """kinships""" +929 95 model """transr""" +929 95 loss """bceaftersigmoid""" +929 95 regularizer """no""" +929 95 optimizer """adam""" +929 95 training_loop """owa""" +929 95 negative_sampler """basic""" +929 95 evaluator """rankbased""" +929 96 dataset """kinships""" +929 96 model """transr""" +929 96 loss """bceaftersigmoid""" +929 96 regularizer """no""" +929 96 optimizer """adam""" +929 96 training_loop """owa""" +929 96 negative_sampler """basic""" +929 96 evaluator """rankbased""" +929 97 dataset """kinships""" +929 97 model """transr""" +929 97 loss """bceaftersigmoid""" +929 97 regularizer """no""" +929 97 optimizer """adam""" +929 97 training_loop """owa""" +929 97 negative_sampler """basic""" +929 97 evaluator """rankbased""" +929 98 dataset """kinships""" +929 98 model """transr""" +929 98 loss """bceaftersigmoid""" +929 98 regularizer """no""" +929 98 optimizer """adam""" +929 98 training_loop """owa""" +929 98 negative_sampler """basic""" +929 98 evaluator """rankbased""" +929 99 dataset """kinships""" +929 99 model """transr""" +929 99 loss """bceaftersigmoid""" +929 99 regularizer """no""" +929 99 optimizer """adam""" +929 99 training_loop """owa""" +929 99 negative_sampler """basic""" +929 99 evaluator """rankbased""" +929 100 dataset """kinships""" +929 100 model """transr""" +929 100 loss """bceaftersigmoid""" +929 100 regularizer """no""" +929 100 optimizer """adam""" +929 100 training_loop """owa""" +929 100 negative_sampler """basic""" +929 100 evaluator """rankbased""" +930 1 model.embedding_dim 2.0 +930 1 model.relation_dim 2.0 +930 1 model.scoring_fct_norm 1.0 +930 1 optimizer.lr 0.006437118592460345 +930 1 negative_sampler.num_negs_per_pos 3.0 +930 1 training.batch_size 2.0 +930 2 model.embedding_dim 0.0 +930 2 model.relation_dim 0.0 +930 2 model.scoring_fct_norm 1.0 +930 2 optimizer.lr 0.012242497805680142 +930 2 negative_sampler.num_negs_per_pos 62.0 +930 2 training.batch_size 0.0 +930 3 model.embedding_dim 1.0 +930 3 model.relation_dim 0.0 +930 3 model.scoring_fct_norm 2.0 +930 3 optimizer.lr 0.08151384014209699 +930 3 negative_sampler.num_negs_per_pos 49.0 +930 3 training.batch_size 1.0 +930 4 model.embedding_dim 2.0 +930 4 model.relation_dim 0.0 +930 4 model.scoring_fct_norm 2.0 +930 4 optimizer.lr 0.004595301864183548 +930 4 negative_sampler.num_negs_per_pos 84.0 +930 4 training.batch_size 0.0 +930 5 model.embedding_dim 2.0 +930 5 model.relation_dim 2.0 +930 5 model.scoring_fct_norm 1.0 +930 5 optimizer.lr 0.03274579761052521 +930 5 negative_sampler.num_negs_per_pos 18.0 +930 5 training.batch_size 0.0 +930 6 model.embedding_dim 0.0 +930 6 model.relation_dim 2.0 +930 6 model.scoring_fct_norm 2.0 +930 6 optimizer.lr 0.002280766476409132 +930 6 negative_sampler.num_negs_per_pos 0.0 +930 6 training.batch_size 2.0 +930 7 model.embedding_dim 1.0 +930 7 model.relation_dim 1.0 +930 7 model.scoring_fct_norm 2.0 +930 7 optimizer.lr 0.04555882135392794 +930 7 negative_sampler.num_negs_per_pos 90.0 +930 7 training.batch_size 2.0 +930 8 model.embedding_dim 1.0 +930 8 model.relation_dim 1.0 +930 8 model.scoring_fct_norm 2.0 +930 8 optimizer.lr 0.003124789128745435 +930 8 negative_sampler.num_negs_per_pos 75.0 +930 8 training.batch_size 0.0 +930 9 model.embedding_dim 1.0 +930 9 model.relation_dim 2.0 +930 9 model.scoring_fct_norm 1.0 +930 9 optimizer.lr 0.016842266611013002 +930 9 negative_sampler.num_negs_per_pos 83.0 +930 9 training.batch_size 1.0 +930 10 model.embedding_dim 2.0 +930 10 model.relation_dim 0.0 +930 10 model.scoring_fct_norm 2.0 +930 10 optimizer.lr 0.007192714157918955 +930 10 negative_sampler.num_negs_per_pos 43.0 +930 10 training.batch_size 2.0 +930 11 model.embedding_dim 1.0 +930 11 model.relation_dim 0.0 +930 11 model.scoring_fct_norm 1.0 +930 11 optimizer.lr 0.012121071845437988 +930 11 negative_sampler.num_negs_per_pos 16.0 +930 11 training.batch_size 1.0 +930 12 model.embedding_dim 0.0 +930 12 model.relation_dim 2.0 +930 12 model.scoring_fct_norm 1.0 +930 12 optimizer.lr 0.015707418330939475 +930 12 negative_sampler.num_negs_per_pos 22.0 +930 12 training.batch_size 0.0 +930 13 model.embedding_dim 2.0 +930 13 model.relation_dim 2.0 +930 13 model.scoring_fct_norm 2.0 +930 13 optimizer.lr 0.002026871448976734 +930 13 negative_sampler.num_negs_per_pos 44.0 +930 13 training.batch_size 0.0 +930 14 model.embedding_dim 0.0 +930 14 model.relation_dim 0.0 +930 14 model.scoring_fct_norm 1.0 +930 14 optimizer.lr 0.024397566192886088 +930 14 negative_sampler.num_negs_per_pos 2.0 +930 14 training.batch_size 0.0 +930 15 model.embedding_dim 0.0 +930 15 model.relation_dim 0.0 +930 15 model.scoring_fct_norm 1.0 +930 15 optimizer.lr 0.0015845624390242244 +930 15 negative_sampler.num_negs_per_pos 5.0 +930 15 training.batch_size 2.0 +930 16 model.embedding_dim 0.0 +930 16 model.relation_dim 1.0 +930 16 model.scoring_fct_norm 1.0 +930 16 optimizer.lr 0.05512530777537321 +930 16 negative_sampler.num_negs_per_pos 26.0 +930 16 training.batch_size 1.0 +930 17 model.embedding_dim 2.0 +930 17 model.relation_dim 2.0 +930 17 model.scoring_fct_norm 2.0 +930 17 optimizer.lr 0.02884128447351455 +930 17 negative_sampler.num_negs_per_pos 62.0 +930 17 training.batch_size 2.0 +930 18 model.embedding_dim 0.0 +930 18 model.relation_dim 0.0 +930 18 model.scoring_fct_norm 1.0 +930 18 optimizer.lr 0.001028631293205428 +930 18 negative_sampler.num_negs_per_pos 99.0 +930 18 training.batch_size 1.0 +930 19 model.embedding_dim 1.0 +930 19 model.relation_dim 2.0 +930 19 model.scoring_fct_norm 2.0 +930 19 optimizer.lr 0.08225647112583846 +930 19 negative_sampler.num_negs_per_pos 77.0 +930 19 training.batch_size 0.0 +930 20 model.embedding_dim 2.0 +930 20 model.relation_dim 1.0 +930 20 model.scoring_fct_norm 2.0 +930 20 optimizer.lr 0.010266697998632916 +930 20 negative_sampler.num_negs_per_pos 0.0 +930 20 training.batch_size 1.0 +930 21 model.embedding_dim 1.0 +930 21 model.relation_dim 2.0 +930 21 model.scoring_fct_norm 2.0 +930 21 optimizer.lr 0.0018133246898340068 +930 21 negative_sampler.num_negs_per_pos 92.0 +930 21 training.batch_size 0.0 +930 22 model.embedding_dim 0.0 +930 22 model.relation_dim 2.0 +930 22 model.scoring_fct_norm 1.0 +930 22 optimizer.lr 0.008157635034209606 +930 22 negative_sampler.num_negs_per_pos 37.0 +930 22 training.batch_size 0.0 +930 23 model.embedding_dim 2.0 +930 23 model.relation_dim 0.0 +930 23 model.scoring_fct_norm 1.0 +930 23 optimizer.lr 0.0015967049531106672 +930 23 negative_sampler.num_negs_per_pos 17.0 +930 23 training.batch_size 1.0 +930 24 model.embedding_dim 2.0 +930 24 model.relation_dim 1.0 +930 24 model.scoring_fct_norm 2.0 +930 24 optimizer.lr 0.009761631562678723 +930 24 negative_sampler.num_negs_per_pos 11.0 +930 24 training.batch_size 0.0 +930 25 model.embedding_dim 1.0 +930 25 model.relation_dim 0.0 +930 25 model.scoring_fct_norm 2.0 +930 25 optimizer.lr 0.01238533224388041 +930 25 negative_sampler.num_negs_per_pos 17.0 +930 25 training.batch_size 0.0 +930 26 model.embedding_dim 0.0 +930 26 model.relation_dim 0.0 +930 26 model.scoring_fct_norm 1.0 +930 26 optimizer.lr 0.005131778218394701 +930 26 negative_sampler.num_negs_per_pos 41.0 +930 26 training.batch_size 1.0 +930 27 model.embedding_dim 2.0 +930 27 model.relation_dim 0.0 +930 27 model.scoring_fct_norm 1.0 +930 27 optimizer.lr 0.0035788932724502643 +930 27 negative_sampler.num_negs_per_pos 63.0 +930 27 training.batch_size 0.0 +930 28 model.embedding_dim 0.0 +930 28 model.relation_dim 2.0 +930 28 model.scoring_fct_norm 2.0 +930 28 optimizer.lr 0.028781410893908187 +930 28 negative_sampler.num_negs_per_pos 91.0 +930 28 training.batch_size 0.0 +930 29 model.embedding_dim 2.0 +930 29 model.relation_dim 1.0 +930 29 model.scoring_fct_norm 1.0 +930 29 optimizer.lr 0.038231386839174764 +930 29 negative_sampler.num_negs_per_pos 48.0 +930 29 training.batch_size 2.0 +930 30 model.embedding_dim 2.0 +930 30 model.relation_dim 1.0 +930 30 model.scoring_fct_norm 1.0 +930 30 optimizer.lr 0.009222763579213816 +930 30 negative_sampler.num_negs_per_pos 75.0 +930 30 training.batch_size 2.0 +930 31 model.embedding_dim 2.0 +930 31 model.relation_dim 2.0 +930 31 model.scoring_fct_norm 1.0 +930 31 optimizer.lr 0.03175757155093092 +930 31 negative_sampler.num_negs_per_pos 70.0 +930 31 training.batch_size 2.0 +930 32 model.embedding_dim 1.0 +930 32 model.relation_dim 1.0 +930 32 model.scoring_fct_norm 2.0 +930 32 optimizer.lr 0.0023734920922198507 +930 32 negative_sampler.num_negs_per_pos 13.0 +930 32 training.batch_size 2.0 +930 33 model.embedding_dim 2.0 +930 33 model.relation_dim 2.0 +930 33 model.scoring_fct_norm 1.0 +930 33 optimizer.lr 0.022410686835076767 +930 33 negative_sampler.num_negs_per_pos 43.0 +930 33 training.batch_size 1.0 +930 34 model.embedding_dim 0.0 +930 34 model.relation_dim 0.0 +930 34 model.scoring_fct_norm 2.0 +930 34 optimizer.lr 0.05230431947607883 +930 34 negative_sampler.num_negs_per_pos 12.0 +930 34 training.batch_size 1.0 +930 35 model.embedding_dim 2.0 +930 35 model.relation_dim 0.0 +930 35 model.scoring_fct_norm 1.0 +930 35 optimizer.lr 0.0010809102564692979 +930 35 negative_sampler.num_negs_per_pos 41.0 +930 35 training.batch_size 1.0 +930 36 model.embedding_dim 1.0 +930 36 model.relation_dim 1.0 +930 36 model.scoring_fct_norm 2.0 +930 36 optimizer.lr 0.07223703269308115 +930 36 negative_sampler.num_negs_per_pos 22.0 +930 36 training.batch_size 0.0 +930 37 model.embedding_dim 0.0 +930 37 model.relation_dim 2.0 +930 37 model.scoring_fct_norm 2.0 +930 37 optimizer.lr 0.011292147359843662 +930 37 negative_sampler.num_negs_per_pos 3.0 +930 37 training.batch_size 1.0 +930 38 model.embedding_dim 2.0 +930 38 model.relation_dim 1.0 +930 38 model.scoring_fct_norm 2.0 +930 38 optimizer.lr 0.045123935808075846 +930 38 negative_sampler.num_negs_per_pos 62.0 +930 38 training.batch_size 2.0 +930 39 model.embedding_dim 1.0 +930 39 model.relation_dim 1.0 +930 39 model.scoring_fct_norm 2.0 +930 39 optimizer.lr 0.0648889906607472 +930 39 negative_sampler.num_negs_per_pos 17.0 +930 39 training.batch_size 2.0 +930 40 model.embedding_dim 2.0 +930 40 model.relation_dim 0.0 +930 40 model.scoring_fct_norm 2.0 +930 40 optimizer.lr 0.0014551104354799751 +930 40 negative_sampler.num_negs_per_pos 24.0 +930 40 training.batch_size 2.0 +930 41 model.embedding_dim 2.0 +930 41 model.relation_dim 0.0 +930 41 model.scoring_fct_norm 1.0 +930 41 optimizer.lr 0.07975394252492865 +930 41 negative_sampler.num_negs_per_pos 37.0 +930 41 training.batch_size 0.0 +930 42 model.embedding_dim 0.0 +930 42 model.relation_dim 0.0 +930 42 model.scoring_fct_norm 1.0 +930 42 optimizer.lr 0.01069830368233222 +930 42 negative_sampler.num_negs_per_pos 93.0 +930 42 training.batch_size 0.0 +930 43 model.embedding_dim 2.0 +930 43 model.relation_dim 0.0 +930 43 model.scoring_fct_norm 2.0 +930 43 optimizer.lr 0.005785532009025822 +930 43 negative_sampler.num_negs_per_pos 50.0 +930 43 training.batch_size 2.0 +930 44 model.embedding_dim 2.0 +930 44 model.relation_dim 2.0 +930 44 model.scoring_fct_norm 2.0 +930 44 optimizer.lr 0.008969523613085095 +930 44 negative_sampler.num_negs_per_pos 73.0 +930 44 training.batch_size 2.0 +930 45 model.embedding_dim 2.0 +930 45 model.relation_dim 1.0 +930 45 model.scoring_fct_norm 1.0 +930 45 optimizer.lr 0.04392213320355652 +930 45 negative_sampler.num_negs_per_pos 98.0 +930 45 training.batch_size 2.0 +930 46 model.embedding_dim 0.0 +930 46 model.relation_dim 1.0 +930 46 model.scoring_fct_norm 2.0 +930 46 optimizer.lr 0.0014014921355874534 +930 46 negative_sampler.num_negs_per_pos 71.0 +930 46 training.batch_size 0.0 +930 47 model.embedding_dim 0.0 +930 47 model.relation_dim 0.0 +930 47 model.scoring_fct_norm 2.0 +930 47 optimizer.lr 0.0798336144911367 +930 47 negative_sampler.num_negs_per_pos 57.0 +930 47 training.batch_size 2.0 +930 48 model.embedding_dim 2.0 +930 48 model.relation_dim 2.0 +930 48 model.scoring_fct_norm 2.0 +930 48 optimizer.lr 0.01012854741696444 +930 48 negative_sampler.num_negs_per_pos 86.0 +930 48 training.batch_size 0.0 +930 49 model.embedding_dim 1.0 +930 49 model.relation_dim 1.0 +930 49 model.scoring_fct_norm 2.0 +930 49 optimizer.lr 0.015728550130935173 +930 49 negative_sampler.num_negs_per_pos 6.0 +930 49 training.batch_size 2.0 +930 50 model.embedding_dim 0.0 +930 50 model.relation_dim 0.0 +930 50 model.scoring_fct_norm 2.0 +930 50 optimizer.lr 0.00896815851702598 +930 50 negative_sampler.num_negs_per_pos 43.0 +930 50 training.batch_size 2.0 +930 51 model.embedding_dim 0.0 +930 51 model.relation_dim 0.0 +930 51 model.scoring_fct_norm 2.0 +930 51 optimizer.lr 0.02436970798662451 +930 51 negative_sampler.num_negs_per_pos 73.0 +930 51 training.batch_size 1.0 +930 52 model.embedding_dim 0.0 +930 52 model.relation_dim 2.0 +930 52 model.scoring_fct_norm 1.0 +930 52 optimizer.lr 0.001398046040761029 +930 52 negative_sampler.num_negs_per_pos 89.0 +930 52 training.batch_size 0.0 +930 53 model.embedding_dim 0.0 +930 53 model.relation_dim 0.0 +930 53 model.scoring_fct_norm 1.0 +930 53 optimizer.lr 0.07290637661873502 +930 53 negative_sampler.num_negs_per_pos 7.0 +930 53 training.batch_size 0.0 +930 54 model.embedding_dim 0.0 +930 54 model.relation_dim 0.0 +930 54 model.scoring_fct_norm 2.0 +930 54 optimizer.lr 0.06727730863569523 +930 54 negative_sampler.num_negs_per_pos 4.0 +930 54 training.batch_size 2.0 +930 55 model.embedding_dim 1.0 +930 55 model.relation_dim 2.0 +930 55 model.scoring_fct_norm 2.0 +930 55 optimizer.lr 0.0010248506079696897 +930 55 negative_sampler.num_negs_per_pos 94.0 +930 55 training.batch_size 2.0 +930 56 model.embedding_dim 1.0 +930 56 model.relation_dim 1.0 +930 56 model.scoring_fct_norm 1.0 +930 56 optimizer.lr 0.005161014621374935 +930 56 negative_sampler.num_negs_per_pos 74.0 +930 56 training.batch_size 1.0 +930 57 model.embedding_dim 1.0 +930 57 model.relation_dim 2.0 +930 57 model.scoring_fct_norm 2.0 +930 57 optimizer.lr 0.005338363518496856 +930 57 negative_sampler.num_negs_per_pos 17.0 +930 57 training.batch_size 0.0 +930 58 model.embedding_dim 0.0 +930 58 model.relation_dim 1.0 +930 58 model.scoring_fct_norm 2.0 +930 58 optimizer.lr 0.025372910341281703 +930 58 negative_sampler.num_negs_per_pos 76.0 +930 58 training.batch_size 0.0 +930 59 model.embedding_dim 0.0 +930 59 model.relation_dim 2.0 +930 59 model.scoring_fct_norm 2.0 +930 59 optimizer.lr 0.07397023251113359 +930 59 negative_sampler.num_negs_per_pos 11.0 +930 59 training.batch_size 0.0 +930 60 model.embedding_dim 2.0 +930 60 model.relation_dim 1.0 +930 60 model.scoring_fct_norm 2.0 +930 60 optimizer.lr 0.011083360846412727 +930 60 negative_sampler.num_negs_per_pos 40.0 +930 60 training.batch_size 2.0 +930 61 model.embedding_dim 2.0 +930 61 model.relation_dim 2.0 +930 61 model.scoring_fct_norm 1.0 +930 61 optimizer.lr 0.027791833315587367 +930 61 negative_sampler.num_negs_per_pos 3.0 +930 61 training.batch_size 2.0 +930 62 model.embedding_dim 0.0 +930 62 model.relation_dim 0.0 +930 62 model.scoring_fct_norm 1.0 +930 62 optimizer.lr 0.0011100341386419948 +930 62 negative_sampler.num_negs_per_pos 88.0 +930 62 training.batch_size 1.0 +930 63 model.embedding_dim 2.0 +930 63 model.relation_dim 1.0 +930 63 model.scoring_fct_norm 1.0 +930 63 optimizer.lr 0.02142873844721512 +930 63 negative_sampler.num_negs_per_pos 17.0 +930 63 training.batch_size 1.0 +930 64 model.embedding_dim 1.0 +930 64 model.relation_dim 0.0 +930 64 model.scoring_fct_norm 2.0 +930 64 optimizer.lr 0.0033547520517335015 +930 64 negative_sampler.num_negs_per_pos 19.0 +930 64 training.batch_size 0.0 +930 65 model.embedding_dim 1.0 +930 65 model.relation_dim 0.0 +930 65 model.scoring_fct_norm 2.0 +930 65 optimizer.lr 0.00946746361827105 +930 65 negative_sampler.num_negs_per_pos 79.0 +930 65 training.batch_size 2.0 +930 66 model.embedding_dim 2.0 +930 66 model.relation_dim 2.0 +930 66 model.scoring_fct_norm 2.0 +930 66 optimizer.lr 0.04133890619818254 +930 66 negative_sampler.num_negs_per_pos 23.0 +930 66 training.batch_size 2.0 +930 67 model.embedding_dim 0.0 +930 67 model.relation_dim 1.0 +930 67 model.scoring_fct_norm 2.0 +930 67 optimizer.lr 0.014487768010136131 +930 67 negative_sampler.num_negs_per_pos 69.0 +930 67 training.batch_size 2.0 +930 68 model.embedding_dim 1.0 +930 68 model.relation_dim 1.0 +930 68 model.scoring_fct_norm 2.0 +930 68 optimizer.lr 0.001414950648567693 +930 68 negative_sampler.num_negs_per_pos 11.0 +930 68 training.batch_size 0.0 +930 69 model.embedding_dim 2.0 +930 69 model.relation_dim 2.0 +930 69 model.scoring_fct_norm 2.0 +930 69 optimizer.lr 0.03887601395138624 +930 69 negative_sampler.num_negs_per_pos 34.0 +930 69 training.batch_size 0.0 +930 70 model.embedding_dim 1.0 +930 70 model.relation_dim 1.0 +930 70 model.scoring_fct_norm 1.0 +930 70 optimizer.lr 0.03815949818400843 +930 70 negative_sampler.num_negs_per_pos 96.0 +930 70 training.batch_size 1.0 +930 71 model.embedding_dim 1.0 +930 71 model.relation_dim 2.0 +930 71 model.scoring_fct_norm 1.0 +930 71 optimizer.lr 0.03252404057319936 +930 71 negative_sampler.num_negs_per_pos 39.0 +930 71 training.batch_size 0.0 +930 72 model.embedding_dim 2.0 +930 72 model.relation_dim 2.0 +930 72 model.scoring_fct_norm 2.0 +930 72 optimizer.lr 0.03845518755389899 +930 72 negative_sampler.num_negs_per_pos 15.0 +930 72 training.batch_size 2.0 +930 73 model.embedding_dim 1.0 +930 73 model.relation_dim 1.0 +930 73 model.scoring_fct_norm 2.0 +930 73 optimizer.lr 0.00246965139861646 +930 73 negative_sampler.num_negs_per_pos 75.0 +930 73 training.batch_size 1.0 +930 74 model.embedding_dim 0.0 +930 74 model.relation_dim 1.0 +930 74 model.scoring_fct_norm 1.0 +930 74 optimizer.lr 0.0742550965330075 +930 74 negative_sampler.num_negs_per_pos 90.0 +930 74 training.batch_size 1.0 +930 75 model.embedding_dim 0.0 +930 75 model.relation_dim 0.0 +930 75 model.scoring_fct_norm 1.0 +930 75 optimizer.lr 0.002840519522219469 +930 75 negative_sampler.num_negs_per_pos 28.0 +930 75 training.batch_size 1.0 +930 76 model.embedding_dim 0.0 +930 76 model.relation_dim 1.0 +930 76 model.scoring_fct_norm 1.0 +930 76 optimizer.lr 0.05350436418380043 +930 76 negative_sampler.num_negs_per_pos 97.0 +930 76 training.batch_size 0.0 +930 77 model.embedding_dim 2.0 +930 77 model.relation_dim 0.0 +930 77 model.scoring_fct_norm 1.0 +930 77 optimizer.lr 0.04490334877003016 +930 77 negative_sampler.num_negs_per_pos 22.0 +930 77 training.batch_size 1.0 +930 78 model.embedding_dim 0.0 +930 78 model.relation_dim 1.0 +930 78 model.scoring_fct_norm 2.0 +930 78 optimizer.lr 0.05192926862462917 +930 78 negative_sampler.num_negs_per_pos 29.0 +930 78 training.batch_size 0.0 +930 79 model.embedding_dim 0.0 +930 79 model.relation_dim 2.0 +930 79 model.scoring_fct_norm 2.0 +930 79 optimizer.lr 0.0027638414689271727 +930 79 negative_sampler.num_negs_per_pos 30.0 +930 79 training.batch_size 2.0 +930 80 model.embedding_dim 1.0 +930 80 model.relation_dim 0.0 +930 80 model.scoring_fct_norm 1.0 +930 80 optimizer.lr 0.011459845400849662 +930 80 negative_sampler.num_negs_per_pos 58.0 +930 80 training.batch_size 0.0 +930 81 model.embedding_dim 1.0 +930 81 model.relation_dim 2.0 +930 81 model.scoring_fct_norm 2.0 +930 81 optimizer.lr 0.004150754344634375 +930 81 negative_sampler.num_negs_per_pos 10.0 +930 81 training.batch_size 2.0 +930 82 model.embedding_dim 1.0 +930 82 model.relation_dim 2.0 +930 82 model.scoring_fct_norm 2.0 +930 82 optimizer.lr 0.001695323663014604 +930 82 negative_sampler.num_negs_per_pos 89.0 +930 82 training.batch_size 1.0 +930 83 model.embedding_dim 2.0 +930 83 model.relation_dim 1.0 +930 83 model.scoring_fct_norm 1.0 +930 83 optimizer.lr 0.004583385127322791 +930 83 negative_sampler.num_negs_per_pos 46.0 +930 83 training.batch_size 2.0 +930 84 model.embedding_dim 2.0 +930 84 model.relation_dim 0.0 +930 84 model.scoring_fct_norm 1.0 +930 84 optimizer.lr 0.0061025970965326555 +930 84 negative_sampler.num_negs_per_pos 44.0 +930 84 training.batch_size 1.0 +930 85 model.embedding_dim 1.0 +930 85 model.relation_dim 0.0 +930 85 model.scoring_fct_norm 2.0 +930 85 optimizer.lr 0.0013963068893886696 +930 85 negative_sampler.num_negs_per_pos 48.0 +930 85 training.batch_size 1.0 +930 86 model.embedding_dim 2.0 +930 86 model.relation_dim 1.0 +930 86 model.scoring_fct_norm 1.0 +930 86 optimizer.lr 0.001366069269294722 +930 86 negative_sampler.num_negs_per_pos 98.0 +930 86 training.batch_size 2.0 +930 87 model.embedding_dim 0.0 +930 87 model.relation_dim 1.0 +930 87 model.scoring_fct_norm 1.0 +930 87 optimizer.lr 0.001203217038501185 +930 87 negative_sampler.num_negs_per_pos 10.0 +930 87 training.batch_size 2.0 +930 88 model.embedding_dim 2.0 +930 88 model.relation_dim 1.0 +930 88 model.scoring_fct_norm 2.0 +930 88 optimizer.lr 0.015782919840182232 +930 88 negative_sampler.num_negs_per_pos 85.0 +930 88 training.batch_size 2.0 +930 89 model.embedding_dim 1.0 +930 89 model.relation_dim 1.0 +930 89 model.scoring_fct_norm 1.0 +930 89 optimizer.lr 0.010991329082978151 +930 89 negative_sampler.num_negs_per_pos 89.0 +930 89 training.batch_size 2.0 +930 90 model.embedding_dim 1.0 +930 90 model.relation_dim 2.0 +930 90 model.scoring_fct_norm 1.0 +930 90 optimizer.lr 0.02616301212769307 +930 90 negative_sampler.num_negs_per_pos 56.0 +930 90 training.batch_size 1.0 +930 91 model.embedding_dim 2.0 +930 91 model.relation_dim 1.0 +930 91 model.scoring_fct_norm 2.0 +930 91 optimizer.lr 0.006999493425384641 +930 91 negative_sampler.num_negs_per_pos 65.0 +930 91 training.batch_size 1.0 +930 92 model.embedding_dim 2.0 +930 92 model.relation_dim 0.0 +930 92 model.scoring_fct_norm 1.0 +930 92 optimizer.lr 0.006730405436080212 +930 92 negative_sampler.num_negs_per_pos 49.0 +930 92 training.batch_size 1.0 +930 93 model.embedding_dim 2.0 +930 93 model.relation_dim 1.0 +930 93 model.scoring_fct_norm 2.0 +930 93 optimizer.lr 0.008343087339851797 +930 93 negative_sampler.num_negs_per_pos 60.0 +930 93 training.batch_size 0.0 +930 94 model.embedding_dim 2.0 +930 94 model.relation_dim 0.0 +930 94 model.scoring_fct_norm 1.0 +930 94 optimizer.lr 0.0017326685335807305 +930 94 negative_sampler.num_negs_per_pos 40.0 +930 94 training.batch_size 2.0 +930 95 model.embedding_dim 1.0 +930 95 model.relation_dim 0.0 +930 95 model.scoring_fct_norm 2.0 +930 95 optimizer.lr 0.0036848415975518243 +930 95 negative_sampler.num_negs_per_pos 88.0 +930 95 training.batch_size 2.0 +930 96 model.embedding_dim 2.0 +930 96 model.relation_dim 1.0 +930 96 model.scoring_fct_norm 1.0 +930 96 optimizer.lr 0.005664181065102852 +930 96 negative_sampler.num_negs_per_pos 1.0 +930 96 training.batch_size 0.0 +930 97 model.embedding_dim 2.0 +930 97 model.relation_dim 1.0 +930 97 model.scoring_fct_norm 2.0 +930 97 optimizer.lr 0.019996505629272486 +930 97 negative_sampler.num_negs_per_pos 94.0 +930 97 training.batch_size 0.0 +930 98 model.embedding_dim 2.0 +930 98 model.relation_dim 1.0 +930 98 model.scoring_fct_norm 1.0 +930 98 optimizer.lr 0.021925113600704427 +930 98 negative_sampler.num_negs_per_pos 66.0 +930 98 training.batch_size 2.0 +930 99 model.embedding_dim 2.0 +930 99 model.relation_dim 2.0 +930 99 model.scoring_fct_norm 2.0 +930 99 optimizer.lr 0.021315795495645246 +930 99 negative_sampler.num_negs_per_pos 89.0 +930 99 training.batch_size 1.0 +930 100 model.embedding_dim 0.0 +930 100 model.relation_dim 2.0 +930 100 model.scoring_fct_norm 1.0 +930 100 optimizer.lr 0.005641320044836452 +930 100 negative_sampler.num_negs_per_pos 60.0 +930 100 training.batch_size 0.0 +930 1 dataset """kinships""" +930 1 model """transr""" +930 1 loss """softplus""" +930 1 regularizer """no""" +930 1 optimizer """adam""" +930 1 training_loop """owa""" +930 1 negative_sampler """basic""" +930 1 evaluator """rankbased""" +930 2 dataset """kinships""" +930 2 model """transr""" +930 2 loss """softplus""" +930 2 regularizer """no""" +930 2 optimizer """adam""" +930 2 training_loop """owa""" +930 2 negative_sampler """basic""" +930 2 evaluator """rankbased""" +930 3 dataset """kinships""" +930 3 model """transr""" +930 3 loss """softplus""" +930 3 regularizer """no""" +930 3 optimizer """adam""" +930 3 training_loop """owa""" +930 3 negative_sampler """basic""" +930 3 evaluator """rankbased""" +930 4 dataset """kinships""" +930 4 model """transr""" +930 4 loss """softplus""" +930 4 regularizer """no""" +930 4 optimizer """adam""" +930 4 training_loop """owa""" +930 4 negative_sampler """basic""" +930 4 evaluator """rankbased""" +930 5 dataset """kinships""" +930 5 model """transr""" +930 5 loss """softplus""" +930 5 regularizer """no""" +930 5 optimizer """adam""" +930 5 training_loop """owa""" +930 5 negative_sampler """basic""" +930 5 evaluator """rankbased""" +930 6 dataset """kinships""" +930 6 model """transr""" +930 6 loss """softplus""" +930 6 regularizer """no""" +930 6 optimizer """adam""" +930 6 training_loop """owa""" +930 6 negative_sampler """basic""" +930 6 evaluator """rankbased""" +930 7 dataset """kinships""" +930 7 model """transr""" +930 7 loss """softplus""" +930 7 regularizer """no""" +930 7 optimizer """adam""" +930 7 training_loop """owa""" +930 7 negative_sampler """basic""" +930 7 evaluator """rankbased""" +930 8 dataset """kinships""" +930 8 model """transr""" +930 8 loss """softplus""" +930 8 regularizer """no""" +930 8 optimizer """adam""" +930 8 training_loop """owa""" +930 8 negative_sampler """basic""" +930 8 evaluator """rankbased""" +930 9 dataset """kinships""" +930 9 model """transr""" +930 9 loss """softplus""" +930 9 regularizer """no""" +930 9 optimizer """adam""" +930 9 training_loop """owa""" +930 9 negative_sampler """basic""" +930 9 evaluator """rankbased""" +930 10 dataset """kinships""" +930 10 model """transr""" +930 10 loss """softplus""" +930 10 regularizer """no""" +930 10 optimizer """adam""" +930 10 training_loop """owa""" +930 10 negative_sampler """basic""" +930 10 evaluator """rankbased""" +930 11 dataset """kinships""" +930 11 model """transr""" +930 11 loss """softplus""" +930 11 regularizer """no""" +930 11 optimizer """adam""" +930 11 training_loop """owa""" +930 11 negative_sampler """basic""" +930 11 evaluator """rankbased""" +930 12 dataset """kinships""" +930 12 model """transr""" +930 12 loss """softplus""" +930 12 regularizer """no""" +930 12 optimizer """adam""" +930 12 training_loop """owa""" +930 12 negative_sampler """basic""" +930 12 evaluator """rankbased""" +930 13 dataset """kinships""" +930 13 model """transr""" +930 13 loss """softplus""" +930 13 regularizer """no""" +930 13 optimizer """adam""" +930 13 training_loop """owa""" +930 13 negative_sampler """basic""" +930 13 evaluator """rankbased""" +930 14 dataset """kinships""" +930 14 model """transr""" +930 14 loss """softplus""" +930 14 regularizer """no""" +930 14 optimizer """adam""" +930 14 training_loop """owa""" +930 14 negative_sampler """basic""" +930 14 evaluator """rankbased""" +930 15 dataset """kinships""" +930 15 model """transr""" +930 15 loss """softplus""" +930 15 regularizer """no""" +930 15 optimizer """adam""" +930 15 training_loop """owa""" +930 15 negative_sampler """basic""" +930 15 evaluator """rankbased""" +930 16 dataset """kinships""" +930 16 model """transr""" +930 16 loss """softplus""" +930 16 regularizer """no""" +930 16 optimizer """adam""" +930 16 training_loop """owa""" +930 16 negative_sampler """basic""" +930 16 evaluator """rankbased""" +930 17 dataset """kinships""" +930 17 model """transr""" +930 17 loss """softplus""" +930 17 regularizer """no""" +930 17 optimizer """adam""" +930 17 training_loop """owa""" +930 17 negative_sampler """basic""" +930 17 evaluator """rankbased""" +930 18 dataset """kinships""" +930 18 model """transr""" +930 18 loss """softplus""" +930 18 regularizer """no""" +930 18 optimizer """adam""" +930 18 training_loop """owa""" +930 18 negative_sampler """basic""" +930 18 evaluator """rankbased""" +930 19 dataset """kinships""" +930 19 model """transr""" +930 19 loss """softplus""" +930 19 regularizer """no""" +930 19 optimizer """adam""" +930 19 training_loop """owa""" +930 19 negative_sampler """basic""" +930 19 evaluator """rankbased""" +930 20 dataset """kinships""" +930 20 model """transr""" +930 20 loss """softplus""" +930 20 regularizer """no""" +930 20 optimizer """adam""" +930 20 training_loop """owa""" +930 20 negative_sampler """basic""" +930 20 evaluator """rankbased""" +930 21 dataset """kinships""" +930 21 model """transr""" +930 21 loss """softplus""" +930 21 regularizer """no""" +930 21 optimizer """adam""" +930 21 training_loop """owa""" +930 21 negative_sampler """basic""" +930 21 evaluator """rankbased""" +930 22 dataset """kinships""" +930 22 model """transr""" +930 22 loss """softplus""" +930 22 regularizer """no""" +930 22 optimizer """adam""" +930 22 training_loop """owa""" +930 22 negative_sampler """basic""" +930 22 evaluator """rankbased""" +930 23 dataset """kinships""" +930 23 model """transr""" +930 23 loss """softplus""" +930 23 regularizer """no""" +930 23 optimizer """adam""" +930 23 training_loop """owa""" +930 23 negative_sampler """basic""" +930 23 evaluator """rankbased""" +930 24 dataset """kinships""" +930 24 model """transr""" +930 24 loss """softplus""" +930 24 regularizer """no""" +930 24 optimizer """adam""" +930 24 training_loop """owa""" +930 24 negative_sampler """basic""" +930 24 evaluator """rankbased""" +930 25 dataset """kinships""" +930 25 model """transr""" +930 25 loss """softplus""" +930 25 regularizer """no""" +930 25 optimizer """adam""" +930 25 training_loop """owa""" +930 25 negative_sampler """basic""" +930 25 evaluator """rankbased""" +930 26 dataset """kinships""" +930 26 model """transr""" +930 26 loss """softplus""" +930 26 regularizer """no""" +930 26 optimizer """adam""" +930 26 training_loop """owa""" +930 26 negative_sampler """basic""" +930 26 evaluator """rankbased""" +930 27 dataset """kinships""" +930 27 model """transr""" +930 27 loss """softplus""" +930 27 regularizer """no""" +930 27 optimizer """adam""" +930 27 training_loop """owa""" +930 27 negative_sampler """basic""" +930 27 evaluator """rankbased""" +930 28 dataset """kinships""" +930 28 model """transr""" +930 28 loss """softplus""" +930 28 regularizer """no""" +930 28 optimizer """adam""" +930 28 training_loop """owa""" +930 28 negative_sampler """basic""" +930 28 evaluator """rankbased""" +930 29 dataset """kinships""" +930 29 model """transr""" +930 29 loss """softplus""" +930 29 regularizer """no""" +930 29 optimizer """adam""" +930 29 training_loop """owa""" +930 29 negative_sampler """basic""" +930 29 evaluator """rankbased""" +930 30 dataset """kinships""" +930 30 model """transr""" +930 30 loss """softplus""" +930 30 regularizer """no""" +930 30 optimizer """adam""" +930 30 training_loop """owa""" +930 30 negative_sampler """basic""" +930 30 evaluator """rankbased""" +930 31 dataset """kinships""" +930 31 model """transr""" +930 31 loss """softplus""" +930 31 regularizer """no""" +930 31 optimizer """adam""" +930 31 training_loop """owa""" +930 31 negative_sampler """basic""" +930 31 evaluator """rankbased""" +930 32 dataset """kinships""" +930 32 model """transr""" +930 32 loss """softplus""" +930 32 regularizer """no""" +930 32 optimizer """adam""" +930 32 training_loop """owa""" +930 32 negative_sampler """basic""" +930 32 evaluator """rankbased""" +930 33 dataset """kinships""" +930 33 model """transr""" +930 33 loss """softplus""" +930 33 regularizer """no""" +930 33 optimizer """adam""" +930 33 training_loop """owa""" +930 33 negative_sampler """basic""" +930 33 evaluator """rankbased""" +930 34 dataset """kinships""" +930 34 model """transr""" +930 34 loss """softplus""" +930 34 regularizer """no""" +930 34 optimizer """adam""" +930 34 training_loop """owa""" +930 34 negative_sampler """basic""" +930 34 evaluator """rankbased""" +930 35 dataset """kinships""" +930 35 model """transr""" +930 35 loss """softplus""" +930 35 regularizer """no""" +930 35 optimizer """adam""" +930 35 training_loop """owa""" +930 35 negative_sampler """basic""" +930 35 evaluator """rankbased""" +930 36 dataset """kinships""" +930 36 model """transr""" +930 36 loss """softplus""" +930 36 regularizer """no""" +930 36 optimizer """adam""" +930 36 training_loop """owa""" +930 36 negative_sampler """basic""" +930 36 evaluator """rankbased""" +930 37 dataset """kinships""" +930 37 model """transr""" +930 37 loss """softplus""" +930 37 regularizer """no""" +930 37 optimizer """adam""" +930 37 training_loop """owa""" +930 37 negative_sampler """basic""" +930 37 evaluator """rankbased""" +930 38 dataset """kinships""" +930 38 model """transr""" +930 38 loss """softplus""" +930 38 regularizer """no""" +930 38 optimizer """adam""" +930 38 training_loop """owa""" +930 38 negative_sampler """basic""" +930 38 evaluator """rankbased""" +930 39 dataset """kinships""" +930 39 model """transr""" +930 39 loss """softplus""" +930 39 regularizer """no""" +930 39 optimizer """adam""" +930 39 training_loop """owa""" +930 39 negative_sampler """basic""" +930 39 evaluator """rankbased""" +930 40 dataset """kinships""" +930 40 model """transr""" +930 40 loss """softplus""" +930 40 regularizer """no""" +930 40 optimizer """adam""" +930 40 training_loop """owa""" +930 40 negative_sampler """basic""" +930 40 evaluator """rankbased""" +930 41 dataset """kinships""" +930 41 model """transr""" +930 41 loss """softplus""" +930 41 regularizer """no""" +930 41 optimizer """adam""" +930 41 training_loop """owa""" +930 41 negative_sampler """basic""" +930 41 evaluator """rankbased""" +930 42 dataset """kinships""" +930 42 model """transr""" +930 42 loss """softplus""" +930 42 regularizer """no""" +930 42 optimizer """adam""" +930 42 training_loop """owa""" +930 42 negative_sampler """basic""" +930 42 evaluator """rankbased""" +930 43 dataset """kinships""" +930 43 model """transr""" +930 43 loss """softplus""" +930 43 regularizer """no""" +930 43 optimizer """adam""" +930 43 training_loop """owa""" +930 43 negative_sampler """basic""" +930 43 evaluator """rankbased""" +930 44 dataset """kinships""" +930 44 model """transr""" +930 44 loss """softplus""" +930 44 regularizer """no""" +930 44 optimizer """adam""" +930 44 training_loop """owa""" +930 44 negative_sampler """basic""" +930 44 evaluator """rankbased""" +930 45 dataset """kinships""" +930 45 model """transr""" +930 45 loss """softplus""" +930 45 regularizer """no""" +930 45 optimizer """adam""" +930 45 training_loop """owa""" +930 45 negative_sampler """basic""" +930 45 evaluator """rankbased""" +930 46 dataset """kinships""" +930 46 model """transr""" +930 46 loss """softplus""" +930 46 regularizer """no""" +930 46 optimizer """adam""" +930 46 training_loop """owa""" +930 46 negative_sampler """basic""" +930 46 evaluator """rankbased""" +930 47 dataset """kinships""" +930 47 model """transr""" +930 47 loss """softplus""" +930 47 regularizer """no""" +930 47 optimizer """adam""" +930 47 training_loop """owa""" +930 47 negative_sampler """basic""" +930 47 evaluator """rankbased""" +930 48 dataset """kinships""" +930 48 model """transr""" +930 48 loss """softplus""" +930 48 regularizer """no""" +930 48 optimizer """adam""" +930 48 training_loop """owa""" +930 48 negative_sampler """basic""" +930 48 evaluator """rankbased""" +930 49 dataset """kinships""" +930 49 model """transr""" +930 49 loss """softplus""" +930 49 regularizer """no""" +930 49 optimizer """adam""" +930 49 training_loop """owa""" +930 49 negative_sampler """basic""" +930 49 evaluator """rankbased""" +930 50 dataset """kinships""" +930 50 model """transr""" +930 50 loss """softplus""" +930 50 regularizer """no""" +930 50 optimizer """adam""" +930 50 training_loop """owa""" +930 50 negative_sampler """basic""" +930 50 evaluator """rankbased""" +930 51 dataset """kinships""" +930 51 model """transr""" +930 51 loss """softplus""" +930 51 regularizer """no""" +930 51 optimizer """adam""" +930 51 training_loop """owa""" +930 51 negative_sampler """basic""" +930 51 evaluator """rankbased""" +930 52 dataset """kinships""" +930 52 model """transr""" +930 52 loss """softplus""" +930 52 regularizer """no""" +930 52 optimizer """adam""" +930 52 training_loop """owa""" +930 52 negative_sampler """basic""" +930 52 evaluator """rankbased""" +930 53 dataset """kinships""" +930 53 model """transr""" +930 53 loss """softplus""" +930 53 regularizer """no""" +930 53 optimizer """adam""" +930 53 training_loop """owa""" +930 53 negative_sampler """basic""" +930 53 evaluator """rankbased""" +930 54 dataset """kinships""" +930 54 model """transr""" +930 54 loss """softplus""" +930 54 regularizer """no""" +930 54 optimizer """adam""" +930 54 training_loop """owa""" +930 54 negative_sampler """basic""" +930 54 evaluator """rankbased""" +930 55 dataset """kinships""" +930 55 model """transr""" +930 55 loss """softplus""" +930 55 regularizer """no""" +930 55 optimizer """adam""" +930 55 training_loop """owa""" +930 55 negative_sampler """basic""" +930 55 evaluator """rankbased""" +930 56 dataset """kinships""" +930 56 model """transr""" +930 56 loss """softplus""" +930 56 regularizer """no""" +930 56 optimizer """adam""" +930 56 training_loop """owa""" +930 56 negative_sampler """basic""" +930 56 evaluator """rankbased""" +930 57 dataset """kinships""" +930 57 model """transr""" +930 57 loss """softplus""" +930 57 regularizer """no""" +930 57 optimizer """adam""" +930 57 training_loop """owa""" +930 57 negative_sampler """basic""" +930 57 evaluator """rankbased""" +930 58 dataset """kinships""" +930 58 model """transr""" +930 58 loss """softplus""" +930 58 regularizer """no""" +930 58 optimizer """adam""" +930 58 training_loop """owa""" +930 58 negative_sampler """basic""" +930 58 evaluator """rankbased""" +930 59 dataset """kinships""" +930 59 model """transr""" +930 59 loss """softplus""" +930 59 regularizer """no""" +930 59 optimizer """adam""" +930 59 training_loop """owa""" +930 59 negative_sampler """basic""" +930 59 evaluator """rankbased""" +930 60 dataset """kinships""" +930 60 model """transr""" +930 60 loss """softplus""" +930 60 regularizer """no""" +930 60 optimizer """adam""" +930 60 training_loop """owa""" +930 60 negative_sampler """basic""" +930 60 evaluator """rankbased""" +930 61 dataset """kinships""" +930 61 model """transr""" +930 61 loss """softplus""" +930 61 regularizer """no""" +930 61 optimizer """adam""" +930 61 training_loop """owa""" +930 61 negative_sampler """basic""" +930 61 evaluator """rankbased""" +930 62 dataset """kinships""" +930 62 model """transr""" +930 62 loss """softplus""" +930 62 regularizer """no""" +930 62 optimizer """adam""" +930 62 training_loop """owa""" +930 62 negative_sampler """basic""" +930 62 evaluator """rankbased""" +930 63 dataset """kinships""" +930 63 model """transr""" +930 63 loss """softplus""" +930 63 regularizer """no""" +930 63 optimizer """adam""" +930 63 training_loop """owa""" +930 63 negative_sampler """basic""" +930 63 evaluator """rankbased""" +930 64 dataset """kinships""" +930 64 model """transr""" +930 64 loss """softplus""" +930 64 regularizer """no""" +930 64 optimizer """adam""" +930 64 training_loop """owa""" +930 64 negative_sampler """basic""" +930 64 evaluator """rankbased""" +930 65 dataset """kinships""" +930 65 model """transr""" +930 65 loss """softplus""" +930 65 regularizer """no""" +930 65 optimizer """adam""" +930 65 training_loop """owa""" +930 65 negative_sampler """basic""" +930 65 evaluator """rankbased""" +930 66 dataset """kinships""" +930 66 model """transr""" +930 66 loss """softplus""" +930 66 regularizer """no""" +930 66 optimizer """adam""" +930 66 training_loop """owa""" +930 66 negative_sampler """basic""" +930 66 evaluator """rankbased""" +930 67 dataset """kinships""" +930 67 model """transr""" +930 67 loss """softplus""" +930 67 regularizer """no""" +930 67 optimizer """adam""" +930 67 training_loop """owa""" +930 67 negative_sampler """basic""" +930 67 evaluator """rankbased""" +930 68 dataset """kinships""" +930 68 model """transr""" +930 68 loss """softplus""" +930 68 regularizer """no""" +930 68 optimizer """adam""" +930 68 training_loop """owa""" +930 68 negative_sampler """basic""" +930 68 evaluator """rankbased""" +930 69 dataset """kinships""" +930 69 model """transr""" +930 69 loss """softplus""" +930 69 regularizer """no""" +930 69 optimizer """adam""" +930 69 training_loop """owa""" +930 69 negative_sampler """basic""" +930 69 evaluator """rankbased""" +930 70 dataset """kinships""" +930 70 model """transr""" +930 70 loss """softplus""" +930 70 regularizer """no""" +930 70 optimizer """adam""" +930 70 training_loop """owa""" +930 70 negative_sampler """basic""" +930 70 evaluator """rankbased""" +930 71 dataset """kinships""" +930 71 model """transr""" +930 71 loss """softplus""" +930 71 regularizer """no""" +930 71 optimizer """adam""" +930 71 training_loop """owa""" +930 71 negative_sampler """basic""" +930 71 evaluator """rankbased""" +930 72 dataset """kinships""" +930 72 model """transr""" +930 72 loss """softplus""" +930 72 regularizer """no""" +930 72 optimizer """adam""" +930 72 training_loop """owa""" +930 72 negative_sampler """basic""" +930 72 evaluator """rankbased""" +930 73 dataset """kinships""" +930 73 model """transr""" +930 73 loss """softplus""" +930 73 regularizer """no""" +930 73 optimizer """adam""" +930 73 training_loop """owa""" +930 73 negative_sampler """basic""" +930 73 evaluator """rankbased""" +930 74 dataset """kinships""" +930 74 model """transr""" +930 74 loss """softplus""" +930 74 regularizer """no""" +930 74 optimizer """adam""" +930 74 training_loop """owa""" +930 74 negative_sampler """basic""" +930 74 evaluator """rankbased""" +930 75 dataset """kinships""" +930 75 model """transr""" +930 75 loss """softplus""" +930 75 regularizer """no""" +930 75 optimizer """adam""" +930 75 training_loop """owa""" +930 75 negative_sampler """basic""" +930 75 evaluator """rankbased""" +930 76 dataset """kinships""" +930 76 model """transr""" +930 76 loss """softplus""" +930 76 regularizer """no""" +930 76 optimizer """adam""" +930 76 training_loop """owa""" +930 76 negative_sampler """basic""" +930 76 evaluator """rankbased""" +930 77 dataset """kinships""" +930 77 model """transr""" +930 77 loss """softplus""" +930 77 regularizer """no""" +930 77 optimizer """adam""" +930 77 training_loop """owa""" +930 77 negative_sampler """basic""" +930 77 evaluator """rankbased""" +930 78 dataset """kinships""" +930 78 model """transr""" +930 78 loss """softplus""" +930 78 regularizer """no""" +930 78 optimizer """adam""" +930 78 training_loop """owa""" +930 78 negative_sampler """basic""" +930 78 evaluator """rankbased""" +930 79 dataset """kinships""" +930 79 model """transr""" +930 79 loss """softplus""" +930 79 regularizer """no""" +930 79 optimizer """adam""" +930 79 training_loop """owa""" +930 79 negative_sampler """basic""" +930 79 evaluator """rankbased""" +930 80 dataset """kinships""" +930 80 model """transr""" +930 80 loss """softplus""" +930 80 regularizer """no""" +930 80 optimizer """adam""" +930 80 training_loop """owa""" +930 80 negative_sampler """basic""" +930 80 evaluator """rankbased""" +930 81 dataset """kinships""" +930 81 model """transr""" +930 81 loss """softplus""" +930 81 regularizer """no""" +930 81 optimizer """adam""" +930 81 training_loop """owa""" +930 81 negative_sampler """basic""" +930 81 evaluator """rankbased""" +930 82 dataset """kinships""" +930 82 model """transr""" +930 82 loss """softplus""" +930 82 regularizer """no""" +930 82 optimizer """adam""" +930 82 training_loop """owa""" +930 82 negative_sampler """basic""" +930 82 evaluator """rankbased""" +930 83 dataset """kinships""" +930 83 model """transr""" +930 83 loss """softplus""" +930 83 regularizer """no""" +930 83 optimizer """adam""" +930 83 training_loop """owa""" +930 83 negative_sampler """basic""" +930 83 evaluator """rankbased""" +930 84 dataset """kinships""" +930 84 model """transr""" +930 84 loss """softplus""" +930 84 regularizer """no""" +930 84 optimizer """adam""" +930 84 training_loop """owa""" +930 84 negative_sampler """basic""" +930 84 evaluator """rankbased""" +930 85 dataset """kinships""" +930 85 model """transr""" +930 85 loss """softplus""" +930 85 regularizer """no""" +930 85 optimizer """adam""" +930 85 training_loop """owa""" +930 85 negative_sampler """basic""" +930 85 evaluator """rankbased""" +930 86 dataset """kinships""" +930 86 model """transr""" +930 86 loss """softplus""" +930 86 regularizer """no""" +930 86 optimizer """adam""" +930 86 training_loop """owa""" +930 86 negative_sampler """basic""" +930 86 evaluator """rankbased""" +930 87 dataset """kinships""" +930 87 model """transr""" +930 87 loss """softplus""" +930 87 regularizer """no""" +930 87 optimizer """adam""" +930 87 training_loop """owa""" +930 87 negative_sampler """basic""" +930 87 evaluator """rankbased""" +930 88 dataset """kinships""" +930 88 model """transr""" +930 88 loss """softplus""" +930 88 regularizer """no""" +930 88 optimizer """adam""" +930 88 training_loop """owa""" +930 88 negative_sampler """basic""" +930 88 evaluator """rankbased""" +930 89 dataset """kinships""" +930 89 model """transr""" +930 89 loss """softplus""" +930 89 regularizer """no""" +930 89 optimizer """adam""" +930 89 training_loop """owa""" +930 89 negative_sampler """basic""" +930 89 evaluator """rankbased""" +930 90 dataset """kinships""" +930 90 model """transr""" +930 90 loss """softplus""" +930 90 regularizer """no""" +930 90 optimizer """adam""" +930 90 training_loop """owa""" +930 90 negative_sampler """basic""" +930 90 evaluator """rankbased""" +930 91 dataset """kinships""" +930 91 model """transr""" +930 91 loss """softplus""" +930 91 regularizer """no""" +930 91 optimizer """adam""" +930 91 training_loop """owa""" +930 91 negative_sampler """basic""" +930 91 evaluator """rankbased""" +930 92 dataset """kinships""" +930 92 model """transr""" +930 92 loss """softplus""" +930 92 regularizer """no""" +930 92 optimizer """adam""" +930 92 training_loop """owa""" +930 92 negative_sampler """basic""" +930 92 evaluator """rankbased""" +930 93 dataset """kinships""" +930 93 model """transr""" +930 93 loss """softplus""" +930 93 regularizer """no""" +930 93 optimizer """adam""" +930 93 training_loop """owa""" +930 93 negative_sampler """basic""" +930 93 evaluator """rankbased""" +930 94 dataset """kinships""" +930 94 model """transr""" +930 94 loss """softplus""" +930 94 regularizer """no""" +930 94 optimizer """adam""" +930 94 training_loop """owa""" +930 94 negative_sampler """basic""" +930 94 evaluator """rankbased""" +930 95 dataset """kinships""" +930 95 model """transr""" +930 95 loss """softplus""" +930 95 regularizer """no""" +930 95 optimizer """adam""" +930 95 training_loop """owa""" +930 95 negative_sampler """basic""" +930 95 evaluator """rankbased""" +930 96 dataset """kinships""" +930 96 model """transr""" +930 96 loss """softplus""" +930 96 regularizer """no""" +930 96 optimizer """adam""" +930 96 training_loop """owa""" +930 96 negative_sampler """basic""" +930 96 evaluator """rankbased""" +930 97 dataset """kinships""" +930 97 model """transr""" +930 97 loss """softplus""" +930 97 regularizer """no""" +930 97 optimizer """adam""" +930 97 training_loop """owa""" +930 97 negative_sampler """basic""" +930 97 evaluator """rankbased""" +930 98 dataset """kinships""" +930 98 model """transr""" +930 98 loss """softplus""" +930 98 regularizer """no""" +930 98 optimizer """adam""" +930 98 training_loop """owa""" +930 98 negative_sampler """basic""" +930 98 evaluator """rankbased""" +930 99 dataset """kinships""" +930 99 model """transr""" +930 99 loss """softplus""" +930 99 regularizer """no""" +930 99 optimizer """adam""" +930 99 training_loop """owa""" +930 99 negative_sampler """basic""" +930 99 evaluator """rankbased""" +930 100 dataset """kinships""" +930 100 model """transr""" +930 100 loss """softplus""" +930 100 regularizer """no""" +930 100 optimizer """adam""" +930 100 training_loop """owa""" +930 100 negative_sampler """basic""" +930 100 evaluator """rankbased""" +931 1 model.embedding_dim 1.0 +931 1 model.relation_dim 2.0 +931 1 model.scoring_fct_norm 2.0 +931 1 loss.margin 7.631330399362544 +931 1 optimizer.lr 0.001002054923188931 +931 1 negative_sampler.num_negs_per_pos 43.0 +931 1 training.batch_size 2.0 +931 2 model.embedding_dim 0.0 +931 2 model.relation_dim 1.0 +931 2 model.scoring_fct_norm 1.0 +931 2 loss.margin 3.8940849205345986 +931 2 optimizer.lr 0.015867764274994034 +931 2 negative_sampler.num_negs_per_pos 23.0 +931 2 training.batch_size 2.0 +931 3 model.embedding_dim 0.0 +931 3 model.relation_dim 1.0 +931 3 model.scoring_fct_norm 1.0 +931 3 loss.margin 2.4293639315207063 +931 3 optimizer.lr 0.0017595463680161057 +931 3 negative_sampler.num_negs_per_pos 59.0 +931 3 training.batch_size 0.0 +931 4 model.embedding_dim 2.0 +931 4 model.relation_dim 1.0 +931 4 model.scoring_fct_norm 2.0 +931 4 loss.margin 9.482393648733073 +931 4 optimizer.lr 0.06406647287368078 +931 4 negative_sampler.num_negs_per_pos 74.0 +931 4 training.batch_size 0.0 +931 5 model.embedding_dim 0.0 +931 5 model.relation_dim 1.0 +931 5 model.scoring_fct_norm 1.0 +931 5 loss.margin 5.822383419910103 +931 5 optimizer.lr 0.050036744010182356 +931 5 negative_sampler.num_negs_per_pos 95.0 +931 5 training.batch_size 1.0 +931 6 model.embedding_dim 2.0 +931 6 model.relation_dim 0.0 +931 6 model.scoring_fct_norm 2.0 +931 6 loss.margin 6.42713375316349 +931 6 optimizer.lr 0.014307198374806238 +931 6 negative_sampler.num_negs_per_pos 55.0 +931 6 training.batch_size 2.0 +931 7 model.embedding_dim 0.0 +931 7 model.relation_dim 0.0 +931 7 model.scoring_fct_norm 2.0 +931 7 loss.margin 6.861968486081186 +931 7 optimizer.lr 0.008370303083207324 +931 7 negative_sampler.num_negs_per_pos 73.0 +931 7 training.batch_size 1.0 +931 8 model.embedding_dim 2.0 +931 8 model.relation_dim 2.0 +931 8 model.scoring_fct_norm 2.0 +931 8 loss.margin 3.86774055943195 +931 8 optimizer.lr 0.06293995471718271 +931 8 negative_sampler.num_negs_per_pos 5.0 +931 8 training.batch_size 2.0 +931 9 model.embedding_dim 2.0 +931 9 model.relation_dim 2.0 +931 9 model.scoring_fct_norm 2.0 +931 9 loss.margin 5.543214951484153 +931 9 optimizer.lr 0.008264382016691729 +931 9 negative_sampler.num_negs_per_pos 70.0 +931 9 training.batch_size 0.0 +931 10 model.embedding_dim 0.0 +931 10 model.relation_dim 0.0 +931 10 model.scoring_fct_norm 1.0 +931 10 loss.margin 8.059807651252804 +931 10 optimizer.lr 0.0011684913723276403 +931 10 negative_sampler.num_negs_per_pos 19.0 +931 10 training.batch_size 1.0 +931 11 model.embedding_dim 2.0 +931 11 model.relation_dim 2.0 +931 11 model.scoring_fct_norm 2.0 +931 11 loss.margin 9.026248198957529 +931 11 optimizer.lr 0.0032534577515264586 +931 11 negative_sampler.num_negs_per_pos 23.0 +931 11 training.batch_size 1.0 +931 12 model.embedding_dim 2.0 +931 12 model.relation_dim 0.0 +931 12 model.scoring_fct_norm 2.0 +931 12 loss.margin 2.190953347597643 +931 12 optimizer.lr 0.0018343580147622233 +931 12 negative_sampler.num_negs_per_pos 72.0 +931 12 training.batch_size 1.0 +931 13 model.embedding_dim 0.0 +931 13 model.relation_dim 1.0 +931 13 model.scoring_fct_norm 2.0 +931 13 loss.margin 9.60871678442681 +931 13 optimizer.lr 0.01130397231537207 +931 13 negative_sampler.num_negs_per_pos 81.0 +931 13 training.batch_size 0.0 +931 14 model.embedding_dim 2.0 +931 14 model.relation_dim 0.0 +931 14 model.scoring_fct_norm 2.0 +931 14 loss.margin 4.582540299372486 +931 14 optimizer.lr 0.03157165039044091 +931 14 negative_sampler.num_negs_per_pos 27.0 +931 14 training.batch_size 2.0 +931 15 model.embedding_dim 1.0 +931 15 model.relation_dim 1.0 +931 15 model.scoring_fct_norm 2.0 +931 15 loss.margin 3.3200238812188942 +931 15 optimizer.lr 0.009916210032804813 +931 15 negative_sampler.num_negs_per_pos 62.0 +931 15 training.batch_size 1.0 +931 16 model.embedding_dim 1.0 +931 16 model.relation_dim 0.0 +931 16 model.scoring_fct_norm 1.0 +931 16 loss.margin 9.305878930774835 +931 16 optimizer.lr 0.08638036146088747 +931 16 negative_sampler.num_negs_per_pos 71.0 +931 16 training.batch_size 2.0 +931 17 model.embedding_dim 0.0 +931 17 model.relation_dim 2.0 +931 17 model.scoring_fct_norm 2.0 +931 17 loss.margin 3.259021422607932 +931 17 optimizer.lr 0.0910276133663219 +931 17 negative_sampler.num_negs_per_pos 75.0 +931 17 training.batch_size 2.0 +931 18 model.embedding_dim 2.0 +931 18 model.relation_dim 0.0 +931 18 model.scoring_fct_norm 1.0 +931 18 loss.margin 7.695729109227271 +931 18 optimizer.lr 0.05129344031645173 +931 18 negative_sampler.num_negs_per_pos 41.0 +931 18 training.batch_size 1.0 +931 19 model.embedding_dim 1.0 +931 19 model.relation_dim 2.0 +931 19 model.scoring_fct_norm 1.0 +931 19 loss.margin 4.416990809278454 +931 19 optimizer.lr 0.08766459059864243 +931 19 negative_sampler.num_negs_per_pos 92.0 +931 19 training.batch_size 0.0 +931 20 model.embedding_dim 1.0 +931 20 model.relation_dim 0.0 +931 20 model.scoring_fct_norm 1.0 +931 20 loss.margin 2.344373590651486 +931 20 optimizer.lr 0.06116094291423311 +931 20 negative_sampler.num_negs_per_pos 61.0 +931 20 training.batch_size 1.0 +931 21 model.embedding_dim 0.0 +931 21 model.relation_dim 2.0 +931 21 model.scoring_fct_norm 1.0 +931 21 loss.margin 2.6611923103302972 +931 21 optimizer.lr 0.0016814667237572657 +931 21 negative_sampler.num_negs_per_pos 22.0 +931 21 training.batch_size 2.0 +931 22 model.embedding_dim 2.0 +931 22 model.relation_dim 2.0 +931 22 model.scoring_fct_norm 2.0 +931 22 loss.margin 1.265152568246228 +931 22 optimizer.lr 0.001710715806428713 +931 22 negative_sampler.num_negs_per_pos 78.0 +931 22 training.batch_size 2.0 +931 23 model.embedding_dim 0.0 +931 23 model.relation_dim 1.0 +931 23 model.scoring_fct_norm 1.0 +931 23 loss.margin 9.935150484052967 +931 23 optimizer.lr 0.0012677381335617968 +931 23 negative_sampler.num_negs_per_pos 46.0 +931 23 training.batch_size 2.0 +931 24 model.embedding_dim 2.0 +931 24 model.relation_dim 2.0 +931 24 model.scoring_fct_norm 1.0 +931 24 loss.margin 6.783861164114871 +931 24 optimizer.lr 0.019311642119116998 +931 24 negative_sampler.num_negs_per_pos 37.0 +931 24 training.batch_size 0.0 +931 25 model.embedding_dim 2.0 +931 25 model.relation_dim 2.0 +931 25 model.scoring_fct_norm 2.0 +931 25 loss.margin 4.497543228882867 +931 25 optimizer.lr 0.00838888851559753 +931 25 negative_sampler.num_negs_per_pos 64.0 +931 25 training.batch_size 2.0 +931 26 model.embedding_dim 0.0 +931 26 model.relation_dim 2.0 +931 26 model.scoring_fct_norm 1.0 +931 26 loss.margin 2.343844934528491 +931 26 optimizer.lr 0.025115503099223297 +931 26 negative_sampler.num_negs_per_pos 63.0 +931 26 training.batch_size 0.0 +931 27 model.embedding_dim 0.0 +931 27 model.relation_dim 1.0 +931 27 model.scoring_fct_norm 2.0 +931 27 loss.margin 3.495254036760953 +931 27 optimizer.lr 0.0012827686937967146 +931 27 negative_sampler.num_negs_per_pos 47.0 +931 27 training.batch_size 0.0 +931 28 model.embedding_dim 0.0 +931 28 model.relation_dim 1.0 +931 28 model.scoring_fct_norm 2.0 +931 28 loss.margin 9.71793336337337 +931 28 optimizer.lr 0.016716886377777088 +931 28 negative_sampler.num_negs_per_pos 97.0 +931 28 training.batch_size 1.0 +931 29 model.embedding_dim 1.0 +931 29 model.relation_dim 0.0 +931 29 model.scoring_fct_norm 2.0 +931 29 loss.margin 5.400017734197767 +931 29 optimizer.lr 0.002645190568751785 +931 29 negative_sampler.num_negs_per_pos 23.0 +931 29 training.batch_size 1.0 +931 30 model.embedding_dim 1.0 +931 30 model.relation_dim 0.0 +931 30 model.scoring_fct_norm 1.0 +931 30 loss.margin 6.547052986544551 +931 30 optimizer.lr 0.01383974098343287 +931 30 negative_sampler.num_negs_per_pos 35.0 +931 30 training.batch_size 1.0 +931 31 model.embedding_dim 1.0 +931 31 model.relation_dim 2.0 +931 31 model.scoring_fct_norm 2.0 +931 31 loss.margin 0.7708668579385699 +931 31 optimizer.lr 0.002076888920217061 +931 31 negative_sampler.num_negs_per_pos 5.0 +931 31 training.batch_size 1.0 +931 32 model.embedding_dim 0.0 +931 32 model.relation_dim 0.0 +931 32 model.scoring_fct_norm 2.0 +931 32 loss.margin 3.9551340884615303 +931 32 optimizer.lr 0.005537403420350713 +931 32 negative_sampler.num_negs_per_pos 18.0 +931 32 training.batch_size 2.0 +931 33 model.embedding_dim 0.0 +931 33 model.relation_dim 1.0 +931 33 model.scoring_fct_norm 2.0 +931 33 loss.margin 5.6112617533300755 +931 33 optimizer.lr 0.014274636409634004 +931 33 negative_sampler.num_negs_per_pos 73.0 +931 33 training.batch_size 0.0 +931 34 model.embedding_dim 0.0 +931 34 model.relation_dim 2.0 +931 34 model.scoring_fct_norm 2.0 +931 34 loss.margin 1.6901905084837665 +931 34 optimizer.lr 0.00966984583326629 +931 34 negative_sampler.num_negs_per_pos 11.0 +931 34 training.batch_size 2.0 +931 35 model.embedding_dim 1.0 +931 35 model.relation_dim 2.0 +931 35 model.scoring_fct_norm 2.0 +931 35 loss.margin 4.7490189472742586 +931 35 optimizer.lr 0.0022634053676518773 +931 35 negative_sampler.num_negs_per_pos 41.0 +931 35 training.batch_size 1.0 +931 36 model.embedding_dim 0.0 +931 36 model.relation_dim 2.0 +931 36 model.scoring_fct_norm 1.0 +931 36 loss.margin 5.295658743193523 +931 36 optimizer.lr 0.004949851308851917 +931 36 negative_sampler.num_negs_per_pos 49.0 +931 36 training.batch_size 1.0 +931 37 model.embedding_dim 2.0 +931 37 model.relation_dim 1.0 +931 37 model.scoring_fct_norm 1.0 +931 37 loss.margin 8.603738021774307 +931 37 optimizer.lr 0.010723484638881491 +931 37 negative_sampler.num_negs_per_pos 1.0 +931 37 training.batch_size 0.0 +931 38 model.embedding_dim 0.0 +931 38 model.relation_dim 0.0 +931 38 model.scoring_fct_norm 2.0 +931 38 loss.margin 1.7589928651006825 +931 38 optimizer.lr 0.018943576119252896 +931 38 negative_sampler.num_negs_per_pos 48.0 +931 38 training.batch_size 0.0 +931 39 model.embedding_dim 2.0 +931 39 model.relation_dim 0.0 +931 39 model.scoring_fct_norm 1.0 +931 39 loss.margin 9.579861143201306 +931 39 optimizer.lr 0.0035524762263987658 +931 39 negative_sampler.num_negs_per_pos 13.0 +931 39 training.batch_size 2.0 +931 40 model.embedding_dim 1.0 +931 40 model.relation_dim 0.0 +931 40 model.scoring_fct_norm 1.0 +931 40 loss.margin 7.305007182918783 +931 40 optimizer.lr 0.0014868512242138903 +931 40 negative_sampler.num_negs_per_pos 40.0 +931 40 training.batch_size 0.0 +931 41 model.embedding_dim 1.0 +931 41 model.relation_dim 0.0 +931 41 model.scoring_fct_norm 2.0 +931 41 loss.margin 8.741469521762333 +931 41 optimizer.lr 0.0048467469170737285 +931 41 negative_sampler.num_negs_per_pos 32.0 +931 41 training.batch_size 0.0 +931 42 model.embedding_dim 1.0 +931 42 model.relation_dim 2.0 +931 42 model.scoring_fct_norm 2.0 +931 42 loss.margin 5.970329468651628 +931 42 optimizer.lr 0.003914944036661134 +931 42 negative_sampler.num_negs_per_pos 17.0 +931 42 training.batch_size 1.0 +931 43 model.embedding_dim 0.0 +931 43 model.relation_dim 0.0 +931 43 model.scoring_fct_norm 1.0 +931 43 loss.margin 5.9201912858587615 +931 43 optimizer.lr 0.025222818267977107 +931 43 negative_sampler.num_negs_per_pos 50.0 +931 43 training.batch_size 0.0 +931 44 model.embedding_dim 0.0 +931 44 model.relation_dim 2.0 +931 44 model.scoring_fct_norm 2.0 +931 44 loss.margin 2.62817125291961 +931 44 optimizer.lr 0.0011718974352414163 +931 44 negative_sampler.num_negs_per_pos 28.0 +931 44 training.batch_size 0.0 +931 45 model.embedding_dim 1.0 +931 45 model.relation_dim 2.0 +931 45 model.scoring_fct_norm 1.0 +931 45 loss.margin 7.1724321527882395 +931 45 optimizer.lr 0.048689331532483636 +931 45 negative_sampler.num_negs_per_pos 84.0 +931 45 training.batch_size 1.0 +931 46 model.embedding_dim 1.0 +931 46 model.relation_dim 1.0 +931 46 model.scoring_fct_norm 1.0 +931 46 loss.margin 5.67915304613798 +931 46 optimizer.lr 0.08970339731279595 +931 46 negative_sampler.num_negs_per_pos 30.0 +931 46 training.batch_size 1.0 +931 47 model.embedding_dim 1.0 +931 47 model.relation_dim 2.0 +931 47 model.scoring_fct_norm 1.0 +931 47 loss.margin 6.463781092587767 +931 47 optimizer.lr 0.0014879264943375864 +931 47 negative_sampler.num_negs_per_pos 89.0 +931 47 training.batch_size 2.0 +931 48 model.embedding_dim 0.0 +931 48 model.relation_dim 2.0 +931 48 model.scoring_fct_norm 1.0 +931 48 loss.margin 9.85706022903509 +931 48 optimizer.lr 0.04729001328164852 +931 48 negative_sampler.num_negs_per_pos 53.0 +931 48 training.batch_size 2.0 +931 49 model.embedding_dim 0.0 +931 49 model.relation_dim 1.0 +931 49 model.scoring_fct_norm 1.0 +931 49 loss.margin 9.950371044667508 +931 49 optimizer.lr 0.0019031831056767602 +931 49 negative_sampler.num_negs_per_pos 21.0 +931 49 training.batch_size 0.0 +931 50 model.embedding_dim 0.0 +931 50 model.relation_dim 0.0 +931 50 model.scoring_fct_norm 2.0 +931 50 loss.margin 4.813136820975529 +931 50 optimizer.lr 0.003566828426541786 +931 50 negative_sampler.num_negs_per_pos 52.0 +931 50 training.batch_size 2.0 +931 51 model.embedding_dim 1.0 +931 51 model.relation_dim 2.0 +931 51 model.scoring_fct_norm 2.0 +931 51 loss.margin 8.895260354413566 +931 51 optimizer.lr 0.0073818951130388034 +931 51 negative_sampler.num_negs_per_pos 89.0 +931 51 training.batch_size 2.0 +931 52 model.embedding_dim 1.0 +931 52 model.relation_dim 1.0 +931 52 model.scoring_fct_norm 2.0 +931 52 loss.margin 6.62324720838267 +931 52 optimizer.lr 0.008225334168737773 +931 52 negative_sampler.num_negs_per_pos 75.0 +931 52 training.batch_size 0.0 +931 53 model.embedding_dim 2.0 +931 53 model.relation_dim 0.0 +931 53 model.scoring_fct_norm 2.0 +931 53 loss.margin 2.761895479001968 +931 53 optimizer.lr 0.042436093781999404 +931 53 negative_sampler.num_negs_per_pos 88.0 +931 53 training.batch_size 2.0 +931 54 model.embedding_dim 0.0 +931 54 model.relation_dim 0.0 +931 54 model.scoring_fct_norm 1.0 +931 54 loss.margin 7.499907261822615 +931 54 optimizer.lr 0.02455491186580256 +931 54 negative_sampler.num_negs_per_pos 18.0 +931 54 training.batch_size 0.0 +931 55 model.embedding_dim 0.0 +931 55 model.relation_dim 0.0 +931 55 model.scoring_fct_norm 2.0 +931 55 loss.margin 5.778824468792838 +931 55 optimizer.lr 0.04422009587212131 +931 55 negative_sampler.num_negs_per_pos 79.0 +931 55 training.batch_size 0.0 +931 56 model.embedding_dim 1.0 +931 56 model.relation_dim 1.0 +931 56 model.scoring_fct_norm 2.0 +931 56 loss.margin 2.48735693547898 +931 56 optimizer.lr 0.08449179800096919 +931 56 negative_sampler.num_negs_per_pos 3.0 +931 56 training.batch_size 0.0 +931 57 model.embedding_dim 0.0 +931 57 model.relation_dim 0.0 +931 57 model.scoring_fct_norm 2.0 +931 57 loss.margin 8.390852441539963 +931 57 optimizer.lr 0.008897739463947315 +931 57 negative_sampler.num_negs_per_pos 26.0 +931 57 training.batch_size 2.0 +931 58 model.embedding_dim 1.0 +931 58 model.relation_dim 0.0 +931 58 model.scoring_fct_norm 2.0 +931 58 loss.margin 5.09271960032717 +931 58 optimizer.lr 0.007285619222636563 +931 58 negative_sampler.num_negs_per_pos 7.0 +931 58 training.batch_size 1.0 +931 59 model.embedding_dim 0.0 +931 59 model.relation_dim 1.0 +931 59 model.scoring_fct_norm 1.0 +931 59 loss.margin 2.793321334452675 +931 59 optimizer.lr 0.09881405633724667 +931 59 negative_sampler.num_negs_per_pos 64.0 +931 59 training.batch_size 0.0 +931 60 model.embedding_dim 0.0 +931 60 model.relation_dim 2.0 +931 60 model.scoring_fct_norm 2.0 +931 60 loss.margin 1.2852797419612778 +931 60 optimizer.lr 0.04228707406002353 +931 60 negative_sampler.num_negs_per_pos 24.0 +931 60 training.batch_size 0.0 +931 61 model.embedding_dim 0.0 +931 61 model.relation_dim 2.0 +931 61 model.scoring_fct_norm 2.0 +931 61 loss.margin 9.776471655251601 +931 61 optimizer.lr 0.003090628117248429 +931 61 negative_sampler.num_negs_per_pos 86.0 +931 61 training.batch_size 2.0 +931 62 model.embedding_dim 0.0 +931 62 model.relation_dim 2.0 +931 62 model.scoring_fct_norm 1.0 +931 62 loss.margin 0.7944200019210317 +931 62 optimizer.lr 0.0969439064473036 +931 62 negative_sampler.num_negs_per_pos 84.0 +931 62 training.batch_size 0.0 +931 63 model.embedding_dim 1.0 +931 63 model.relation_dim 2.0 +931 63 model.scoring_fct_norm 2.0 +931 63 loss.margin 9.746656720569149 +931 63 optimizer.lr 0.011462027787839854 +931 63 negative_sampler.num_negs_per_pos 47.0 +931 63 training.batch_size 2.0 +931 64 model.embedding_dim 2.0 +931 64 model.relation_dim 2.0 +931 64 model.scoring_fct_norm 2.0 +931 64 loss.margin 0.6098101157793911 +931 64 optimizer.lr 0.0063269587651117315 +931 64 negative_sampler.num_negs_per_pos 86.0 +931 64 training.batch_size 1.0 +931 65 model.embedding_dim 0.0 +931 65 model.relation_dim 1.0 +931 65 model.scoring_fct_norm 2.0 +931 65 loss.margin 3.2223684732480913 +931 65 optimizer.lr 0.006984514451862957 +931 65 negative_sampler.num_negs_per_pos 80.0 +931 65 training.batch_size 1.0 +931 66 model.embedding_dim 0.0 +931 66 model.relation_dim 0.0 +931 66 model.scoring_fct_norm 1.0 +931 66 loss.margin 7.872434721708111 +931 66 optimizer.lr 0.028284314115358915 +931 66 negative_sampler.num_negs_per_pos 97.0 +931 66 training.batch_size 0.0 +931 67 model.embedding_dim 2.0 +931 67 model.relation_dim 0.0 +931 67 model.scoring_fct_norm 1.0 +931 67 loss.margin 4.119406379511844 +931 67 optimizer.lr 0.007684687846316281 +931 67 negative_sampler.num_negs_per_pos 45.0 +931 67 training.batch_size 1.0 +931 68 model.embedding_dim 0.0 +931 68 model.relation_dim 1.0 +931 68 model.scoring_fct_norm 2.0 +931 68 loss.margin 3.334651288574736 +931 68 optimizer.lr 0.0016055040914809543 +931 68 negative_sampler.num_negs_per_pos 51.0 +931 68 training.batch_size 0.0 +931 69 model.embedding_dim 0.0 +931 69 model.relation_dim 2.0 +931 69 model.scoring_fct_norm 1.0 +931 69 loss.margin 9.717745398418117 +931 69 optimizer.lr 0.016442737851196605 +931 69 negative_sampler.num_negs_per_pos 25.0 +931 69 training.batch_size 1.0 +931 70 model.embedding_dim 1.0 +931 70 model.relation_dim 2.0 +931 70 model.scoring_fct_norm 2.0 +931 70 loss.margin 9.612088741967092 +931 70 optimizer.lr 0.007561215399965395 +931 70 negative_sampler.num_negs_per_pos 31.0 +931 70 training.batch_size 1.0 +931 71 model.embedding_dim 2.0 +931 71 model.relation_dim 2.0 +931 71 model.scoring_fct_norm 1.0 +931 71 loss.margin 2.95473677544717 +931 71 optimizer.lr 0.0016784739518199824 +931 71 negative_sampler.num_negs_per_pos 66.0 +931 71 training.batch_size 2.0 +931 72 model.embedding_dim 1.0 +931 72 model.relation_dim 1.0 +931 72 model.scoring_fct_norm 1.0 +931 72 loss.margin 4.384688120960453 +931 72 optimizer.lr 0.08791807242910117 +931 72 negative_sampler.num_negs_per_pos 96.0 +931 72 training.batch_size 0.0 +931 73 model.embedding_dim 2.0 +931 73 model.relation_dim 0.0 +931 73 model.scoring_fct_norm 2.0 +931 73 loss.margin 3.735153042564593 +931 73 optimizer.lr 0.0024498861847949594 +931 73 negative_sampler.num_negs_per_pos 4.0 +931 73 training.batch_size 0.0 +931 74 model.embedding_dim 0.0 +931 74 model.relation_dim 2.0 +931 74 model.scoring_fct_norm 2.0 +931 74 loss.margin 9.201686664381914 +931 74 optimizer.lr 0.011249953921753321 +931 74 negative_sampler.num_negs_per_pos 58.0 +931 74 training.batch_size 1.0 +931 75 model.embedding_dim 1.0 +931 75 model.relation_dim 2.0 +931 75 model.scoring_fct_norm 2.0 +931 75 loss.margin 4.764049035991601 +931 75 optimizer.lr 0.025302170940594868 +931 75 negative_sampler.num_negs_per_pos 20.0 +931 75 training.batch_size 0.0 +931 76 model.embedding_dim 2.0 +931 76 model.relation_dim 2.0 +931 76 model.scoring_fct_norm 1.0 +931 76 loss.margin 8.657064758266012 +931 76 optimizer.lr 0.0020790964045261126 +931 76 negative_sampler.num_negs_per_pos 84.0 +931 76 training.batch_size 2.0 +931 77 model.embedding_dim 2.0 +931 77 model.relation_dim 2.0 +931 77 model.scoring_fct_norm 1.0 +931 77 loss.margin 7.733918244223563 +931 77 optimizer.lr 0.009053467029996192 +931 77 negative_sampler.num_negs_per_pos 88.0 +931 77 training.batch_size 0.0 +931 78 model.embedding_dim 1.0 +931 78 model.relation_dim 0.0 +931 78 model.scoring_fct_norm 2.0 +931 78 loss.margin 0.9314536896454122 +931 78 optimizer.lr 0.08436427714448143 +931 78 negative_sampler.num_negs_per_pos 99.0 +931 78 training.batch_size 1.0 +931 79 model.embedding_dim 1.0 +931 79 model.relation_dim 0.0 +931 79 model.scoring_fct_norm 1.0 +931 79 loss.margin 5.102591789341392 +931 79 optimizer.lr 0.029971491623553986 +931 79 negative_sampler.num_negs_per_pos 64.0 +931 79 training.batch_size 2.0 +931 80 model.embedding_dim 1.0 +931 80 model.relation_dim 0.0 +931 80 model.scoring_fct_norm 2.0 +931 80 loss.margin 7.354315179487591 +931 80 optimizer.lr 0.0020055232873288664 +931 80 negative_sampler.num_negs_per_pos 46.0 +931 80 training.batch_size 0.0 +931 81 model.embedding_dim 2.0 +931 81 model.relation_dim 2.0 +931 81 model.scoring_fct_norm 1.0 +931 81 loss.margin 1.0962461056974815 +931 81 optimizer.lr 0.007356686213118032 +931 81 negative_sampler.num_negs_per_pos 85.0 +931 81 training.batch_size 0.0 +931 82 model.embedding_dim 1.0 +931 82 model.relation_dim 2.0 +931 82 model.scoring_fct_norm 1.0 +931 82 loss.margin 1.365477482417821 +931 82 optimizer.lr 0.006435431753178096 +931 82 negative_sampler.num_negs_per_pos 41.0 +931 82 training.batch_size 0.0 +931 83 model.embedding_dim 0.0 +931 83 model.relation_dim 2.0 +931 83 model.scoring_fct_norm 2.0 +931 83 loss.margin 8.215841674197438 +931 83 optimizer.lr 0.008413145409056886 +931 83 negative_sampler.num_negs_per_pos 62.0 +931 83 training.batch_size 1.0 +931 84 model.embedding_dim 0.0 +931 84 model.relation_dim 0.0 +931 84 model.scoring_fct_norm 2.0 +931 84 loss.margin 5.53680114226886 +931 84 optimizer.lr 0.03996294592023079 +931 84 negative_sampler.num_negs_per_pos 86.0 +931 84 training.batch_size 2.0 +931 85 model.embedding_dim 0.0 +931 85 model.relation_dim 2.0 +931 85 model.scoring_fct_norm 2.0 +931 85 loss.margin 1.7038675415141689 +931 85 optimizer.lr 0.009388608318762643 +931 85 negative_sampler.num_negs_per_pos 15.0 +931 85 training.batch_size 2.0 +931 86 model.embedding_dim 0.0 +931 86 model.relation_dim 2.0 +931 86 model.scoring_fct_norm 2.0 +931 86 loss.margin 7.663546311402735 +931 86 optimizer.lr 0.008059156363568071 +931 86 negative_sampler.num_negs_per_pos 80.0 +931 86 training.batch_size 2.0 +931 87 model.embedding_dim 0.0 +931 87 model.relation_dim 1.0 +931 87 model.scoring_fct_norm 2.0 +931 87 loss.margin 4.106839657023491 +931 87 optimizer.lr 0.010367508213082348 +931 87 negative_sampler.num_negs_per_pos 0.0 +931 87 training.batch_size 1.0 +931 88 model.embedding_dim 2.0 +931 88 model.relation_dim 2.0 +931 88 model.scoring_fct_norm 1.0 +931 88 loss.margin 2.031321936175776 +931 88 optimizer.lr 0.008409143583797084 +931 88 negative_sampler.num_negs_per_pos 73.0 +931 88 training.batch_size 2.0 +931 1 dataset """kinships""" +931 1 model """transr""" +931 1 loss """marginranking""" +931 1 regularizer """no""" +931 1 optimizer """adam""" +931 1 training_loop """owa""" +931 1 negative_sampler """basic""" +931 1 evaluator """rankbased""" +931 2 dataset """kinships""" +931 2 model """transr""" +931 2 loss """marginranking""" +931 2 regularizer """no""" +931 2 optimizer """adam""" +931 2 training_loop """owa""" +931 2 negative_sampler """basic""" +931 2 evaluator """rankbased""" +931 3 dataset """kinships""" +931 3 model """transr""" +931 3 loss """marginranking""" +931 3 regularizer """no""" +931 3 optimizer """adam""" +931 3 training_loop """owa""" +931 3 negative_sampler """basic""" +931 3 evaluator """rankbased""" +931 4 dataset """kinships""" +931 4 model """transr""" +931 4 loss """marginranking""" +931 4 regularizer """no""" +931 4 optimizer """adam""" +931 4 training_loop """owa""" +931 4 negative_sampler """basic""" +931 4 evaluator """rankbased""" +931 5 dataset """kinships""" +931 5 model """transr""" +931 5 loss """marginranking""" +931 5 regularizer """no""" +931 5 optimizer """adam""" +931 5 training_loop """owa""" +931 5 negative_sampler """basic""" +931 5 evaluator """rankbased""" +931 6 dataset """kinships""" +931 6 model """transr""" +931 6 loss """marginranking""" +931 6 regularizer """no""" +931 6 optimizer """adam""" +931 6 training_loop """owa""" +931 6 negative_sampler """basic""" +931 6 evaluator """rankbased""" +931 7 dataset """kinships""" +931 7 model """transr""" +931 7 loss """marginranking""" +931 7 regularizer """no""" +931 7 optimizer """adam""" +931 7 training_loop """owa""" +931 7 negative_sampler """basic""" +931 7 evaluator """rankbased""" +931 8 dataset """kinships""" +931 8 model """transr""" +931 8 loss """marginranking""" +931 8 regularizer """no""" +931 8 optimizer """adam""" +931 8 training_loop """owa""" +931 8 negative_sampler """basic""" +931 8 evaluator """rankbased""" +931 9 dataset """kinships""" +931 9 model """transr""" +931 9 loss """marginranking""" +931 9 regularizer """no""" +931 9 optimizer """adam""" +931 9 training_loop """owa""" +931 9 negative_sampler """basic""" +931 9 evaluator """rankbased""" +931 10 dataset """kinships""" +931 10 model """transr""" +931 10 loss """marginranking""" +931 10 regularizer """no""" +931 10 optimizer """adam""" +931 10 training_loop """owa""" +931 10 negative_sampler """basic""" +931 10 evaluator """rankbased""" +931 11 dataset """kinships""" +931 11 model """transr""" +931 11 loss """marginranking""" +931 11 regularizer """no""" +931 11 optimizer """adam""" +931 11 training_loop """owa""" +931 11 negative_sampler """basic""" +931 11 evaluator """rankbased""" +931 12 dataset """kinships""" +931 12 model """transr""" +931 12 loss """marginranking""" +931 12 regularizer """no""" +931 12 optimizer """adam""" +931 12 training_loop """owa""" +931 12 negative_sampler """basic""" +931 12 evaluator """rankbased""" +931 13 dataset """kinships""" +931 13 model """transr""" +931 13 loss """marginranking""" +931 13 regularizer """no""" +931 13 optimizer """adam""" +931 13 training_loop """owa""" +931 13 negative_sampler """basic""" +931 13 evaluator """rankbased""" +931 14 dataset """kinships""" +931 14 model """transr""" +931 14 loss """marginranking""" +931 14 regularizer """no""" +931 14 optimizer """adam""" +931 14 training_loop """owa""" +931 14 negative_sampler """basic""" +931 14 evaluator """rankbased""" +931 15 dataset """kinships""" +931 15 model """transr""" +931 15 loss """marginranking""" +931 15 regularizer """no""" +931 15 optimizer """adam""" +931 15 training_loop """owa""" +931 15 negative_sampler """basic""" +931 15 evaluator """rankbased""" +931 16 dataset """kinships""" +931 16 model """transr""" +931 16 loss """marginranking""" +931 16 regularizer """no""" +931 16 optimizer """adam""" +931 16 training_loop """owa""" +931 16 negative_sampler """basic""" +931 16 evaluator """rankbased""" +931 17 dataset """kinships""" +931 17 model """transr""" +931 17 loss """marginranking""" +931 17 regularizer """no""" +931 17 optimizer """adam""" +931 17 training_loop """owa""" +931 17 negative_sampler """basic""" +931 17 evaluator """rankbased""" +931 18 dataset """kinships""" +931 18 model """transr""" +931 18 loss """marginranking""" +931 18 regularizer """no""" +931 18 optimizer """adam""" +931 18 training_loop """owa""" +931 18 negative_sampler """basic""" +931 18 evaluator """rankbased""" +931 19 dataset """kinships""" +931 19 model """transr""" +931 19 loss """marginranking""" +931 19 regularizer """no""" +931 19 optimizer """adam""" +931 19 training_loop """owa""" +931 19 negative_sampler """basic""" +931 19 evaluator """rankbased""" +931 20 dataset """kinships""" +931 20 model """transr""" +931 20 loss """marginranking""" +931 20 regularizer """no""" +931 20 optimizer """adam""" +931 20 training_loop """owa""" +931 20 negative_sampler """basic""" +931 20 evaluator """rankbased""" +931 21 dataset """kinships""" +931 21 model """transr""" +931 21 loss """marginranking""" +931 21 regularizer """no""" +931 21 optimizer """adam""" +931 21 training_loop """owa""" +931 21 negative_sampler """basic""" +931 21 evaluator """rankbased""" +931 22 dataset """kinships""" +931 22 model """transr""" +931 22 loss """marginranking""" +931 22 regularizer """no""" +931 22 optimizer """adam""" +931 22 training_loop """owa""" +931 22 negative_sampler """basic""" +931 22 evaluator """rankbased""" +931 23 dataset """kinships""" +931 23 model """transr""" +931 23 loss """marginranking""" +931 23 regularizer """no""" +931 23 optimizer """adam""" +931 23 training_loop """owa""" +931 23 negative_sampler """basic""" +931 23 evaluator """rankbased""" +931 24 dataset """kinships""" +931 24 model """transr""" +931 24 loss """marginranking""" +931 24 regularizer """no""" +931 24 optimizer """adam""" +931 24 training_loop """owa""" +931 24 negative_sampler """basic""" +931 24 evaluator """rankbased""" +931 25 dataset """kinships""" +931 25 model """transr""" +931 25 loss """marginranking""" +931 25 regularizer """no""" +931 25 optimizer """adam""" +931 25 training_loop """owa""" +931 25 negative_sampler """basic""" +931 25 evaluator """rankbased""" +931 26 dataset """kinships""" +931 26 model """transr""" +931 26 loss """marginranking""" +931 26 regularizer """no""" +931 26 optimizer """adam""" +931 26 training_loop """owa""" +931 26 negative_sampler """basic""" +931 26 evaluator """rankbased""" +931 27 dataset """kinships""" +931 27 model """transr""" +931 27 loss """marginranking""" +931 27 regularizer """no""" +931 27 optimizer """adam""" +931 27 training_loop """owa""" +931 27 negative_sampler """basic""" +931 27 evaluator """rankbased""" +931 28 dataset """kinships""" +931 28 model """transr""" +931 28 loss """marginranking""" +931 28 regularizer """no""" +931 28 optimizer """adam""" +931 28 training_loop """owa""" +931 28 negative_sampler """basic""" +931 28 evaluator """rankbased""" +931 29 dataset """kinships""" +931 29 model """transr""" +931 29 loss """marginranking""" +931 29 regularizer """no""" +931 29 optimizer """adam""" +931 29 training_loop """owa""" +931 29 negative_sampler """basic""" +931 29 evaluator """rankbased""" +931 30 dataset """kinships""" +931 30 model """transr""" +931 30 loss """marginranking""" +931 30 regularizer """no""" +931 30 optimizer """adam""" +931 30 training_loop """owa""" +931 30 negative_sampler """basic""" +931 30 evaluator """rankbased""" +931 31 dataset """kinships""" +931 31 model """transr""" +931 31 loss """marginranking""" +931 31 regularizer """no""" +931 31 optimizer """adam""" +931 31 training_loop """owa""" +931 31 negative_sampler """basic""" +931 31 evaluator """rankbased""" +931 32 dataset """kinships""" +931 32 model """transr""" +931 32 loss """marginranking""" +931 32 regularizer """no""" +931 32 optimizer """adam""" +931 32 training_loop """owa""" +931 32 negative_sampler """basic""" +931 32 evaluator """rankbased""" +931 33 dataset """kinships""" +931 33 model """transr""" +931 33 loss """marginranking""" +931 33 regularizer """no""" +931 33 optimizer """adam""" +931 33 training_loop """owa""" +931 33 negative_sampler """basic""" +931 33 evaluator """rankbased""" +931 34 dataset """kinships""" +931 34 model """transr""" +931 34 loss """marginranking""" +931 34 regularizer """no""" +931 34 optimizer """adam""" +931 34 training_loop """owa""" +931 34 negative_sampler """basic""" +931 34 evaluator """rankbased""" +931 35 dataset """kinships""" +931 35 model """transr""" +931 35 loss """marginranking""" +931 35 regularizer """no""" +931 35 optimizer """adam""" +931 35 training_loop """owa""" +931 35 negative_sampler """basic""" +931 35 evaluator """rankbased""" +931 36 dataset """kinships""" +931 36 model """transr""" +931 36 loss """marginranking""" +931 36 regularizer """no""" +931 36 optimizer """adam""" +931 36 training_loop """owa""" +931 36 negative_sampler """basic""" +931 36 evaluator """rankbased""" +931 37 dataset """kinships""" +931 37 model """transr""" +931 37 loss """marginranking""" +931 37 regularizer """no""" +931 37 optimizer """adam""" +931 37 training_loop """owa""" +931 37 negative_sampler """basic""" +931 37 evaluator """rankbased""" +931 38 dataset """kinships""" +931 38 model """transr""" +931 38 loss """marginranking""" +931 38 regularizer """no""" +931 38 optimizer """adam""" +931 38 training_loop """owa""" +931 38 negative_sampler """basic""" +931 38 evaluator """rankbased""" +931 39 dataset """kinships""" +931 39 model """transr""" +931 39 loss """marginranking""" +931 39 regularizer """no""" +931 39 optimizer """adam""" +931 39 training_loop """owa""" +931 39 negative_sampler """basic""" +931 39 evaluator """rankbased""" +931 40 dataset """kinships""" +931 40 model """transr""" +931 40 loss """marginranking""" +931 40 regularizer """no""" +931 40 optimizer """adam""" +931 40 training_loop """owa""" +931 40 negative_sampler """basic""" +931 40 evaluator """rankbased""" +931 41 dataset """kinships""" +931 41 model """transr""" +931 41 loss """marginranking""" +931 41 regularizer """no""" +931 41 optimizer """adam""" +931 41 training_loop """owa""" +931 41 negative_sampler """basic""" +931 41 evaluator """rankbased""" +931 42 dataset """kinships""" +931 42 model """transr""" +931 42 loss """marginranking""" +931 42 regularizer """no""" +931 42 optimizer """adam""" +931 42 training_loop """owa""" +931 42 negative_sampler """basic""" +931 42 evaluator """rankbased""" +931 43 dataset """kinships""" +931 43 model """transr""" +931 43 loss """marginranking""" +931 43 regularizer """no""" +931 43 optimizer """adam""" +931 43 training_loop """owa""" +931 43 negative_sampler """basic""" +931 43 evaluator """rankbased""" +931 44 dataset """kinships""" +931 44 model """transr""" +931 44 loss """marginranking""" +931 44 regularizer """no""" +931 44 optimizer """adam""" +931 44 training_loop """owa""" +931 44 negative_sampler """basic""" +931 44 evaluator """rankbased""" +931 45 dataset """kinships""" +931 45 model """transr""" +931 45 loss """marginranking""" +931 45 regularizer """no""" +931 45 optimizer """adam""" +931 45 training_loop """owa""" +931 45 negative_sampler """basic""" +931 45 evaluator """rankbased""" +931 46 dataset """kinships""" +931 46 model """transr""" +931 46 loss """marginranking""" +931 46 regularizer """no""" +931 46 optimizer """adam""" +931 46 training_loop """owa""" +931 46 negative_sampler """basic""" +931 46 evaluator """rankbased""" +931 47 dataset """kinships""" +931 47 model """transr""" +931 47 loss """marginranking""" +931 47 regularizer """no""" +931 47 optimizer """adam""" +931 47 training_loop """owa""" +931 47 negative_sampler """basic""" +931 47 evaluator """rankbased""" +931 48 dataset """kinships""" +931 48 model """transr""" +931 48 loss """marginranking""" +931 48 regularizer """no""" +931 48 optimizer """adam""" +931 48 training_loop """owa""" +931 48 negative_sampler """basic""" +931 48 evaluator """rankbased""" +931 49 dataset """kinships""" +931 49 model """transr""" +931 49 loss """marginranking""" +931 49 regularizer """no""" +931 49 optimizer """adam""" +931 49 training_loop """owa""" +931 49 negative_sampler """basic""" +931 49 evaluator """rankbased""" +931 50 dataset """kinships""" +931 50 model """transr""" +931 50 loss """marginranking""" +931 50 regularizer """no""" +931 50 optimizer """adam""" +931 50 training_loop """owa""" +931 50 negative_sampler """basic""" +931 50 evaluator """rankbased""" +931 51 dataset """kinships""" +931 51 model """transr""" +931 51 loss """marginranking""" +931 51 regularizer """no""" +931 51 optimizer """adam""" +931 51 training_loop """owa""" +931 51 negative_sampler """basic""" +931 51 evaluator """rankbased""" +931 52 dataset """kinships""" +931 52 model """transr""" +931 52 loss """marginranking""" +931 52 regularizer """no""" +931 52 optimizer """adam""" +931 52 training_loop """owa""" +931 52 negative_sampler """basic""" +931 52 evaluator """rankbased""" +931 53 dataset """kinships""" +931 53 model """transr""" +931 53 loss """marginranking""" +931 53 regularizer """no""" +931 53 optimizer """adam""" +931 53 training_loop """owa""" +931 53 negative_sampler """basic""" +931 53 evaluator """rankbased""" +931 54 dataset """kinships""" +931 54 model """transr""" +931 54 loss """marginranking""" +931 54 regularizer """no""" +931 54 optimizer """adam""" +931 54 training_loop """owa""" +931 54 negative_sampler """basic""" +931 54 evaluator """rankbased""" +931 55 dataset """kinships""" +931 55 model """transr""" +931 55 loss """marginranking""" +931 55 regularizer """no""" +931 55 optimizer """adam""" +931 55 training_loop """owa""" +931 55 negative_sampler """basic""" +931 55 evaluator """rankbased""" +931 56 dataset """kinships""" +931 56 model """transr""" +931 56 loss """marginranking""" +931 56 regularizer """no""" +931 56 optimizer """adam""" +931 56 training_loop """owa""" +931 56 negative_sampler """basic""" +931 56 evaluator """rankbased""" +931 57 dataset """kinships""" +931 57 model """transr""" +931 57 loss """marginranking""" +931 57 regularizer """no""" +931 57 optimizer """adam""" +931 57 training_loop """owa""" +931 57 negative_sampler """basic""" +931 57 evaluator """rankbased""" +931 58 dataset """kinships""" +931 58 model """transr""" +931 58 loss """marginranking""" +931 58 regularizer """no""" +931 58 optimizer """adam""" +931 58 training_loop """owa""" +931 58 negative_sampler """basic""" +931 58 evaluator """rankbased""" +931 59 dataset """kinships""" +931 59 model """transr""" +931 59 loss """marginranking""" +931 59 regularizer """no""" +931 59 optimizer """adam""" +931 59 training_loop """owa""" +931 59 negative_sampler """basic""" +931 59 evaluator """rankbased""" +931 60 dataset """kinships""" +931 60 model """transr""" +931 60 loss """marginranking""" +931 60 regularizer """no""" +931 60 optimizer """adam""" +931 60 training_loop """owa""" +931 60 negative_sampler """basic""" +931 60 evaluator """rankbased""" +931 61 dataset """kinships""" +931 61 model """transr""" +931 61 loss """marginranking""" +931 61 regularizer """no""" +931 61 optimizer """adam""" +931 61 training_loop """owa""" +931 61 negative_sampler """basic""" +931 61 evaluator """rankbased""" +931 62 dataset """kinships""" +931 62 model """transr""" +931 62 loss """marginranking""" +931 62 regularizer """no""" +931 62 optimizer """adam""" +931 62 training_loop """owa""" +931 62 negative_sampler """basic""" +931 62 evaluator """rankbased""" +931 63 dataset """kinships""" +931 63 model """transr""" +931 63 loss """marginranking""" +931 63 regularizer """no""" +931 63 optimizer """adam""" +931 63 training_loop """owa""" +931 63 negative_sampler """basic""" +931 63 evaluator """rankbased""" +931 64 dataset """kinships""" +931 64 model """transr""" +931 64 loss """marginranking""" +931 64 regularizer """no""" +931 64 optimizer """adam""" +931 64 training_loop """owa""" +931 64 negative_sampler """basic""" +931 64 evaluator """rankbased""" +931 65 dataset """kinships""" +931 65 model """transr""" +931 65 loss """marginranking""" +931 65 regularizer """no""" +931 65 optimizer """adam""" +931 65 training_loop """owa""" +931 65 negative_sampler """basic""" +931 65 evaluator """rankbased""" +931 66 dataset """kinships""" +931 66 model """transr""" +931 66 loss """marginranking""" +931 66 regularizer """no""" +931 66 optimizer """adam""" +931 66 training_loop """owa""" +931 66 negative_sampler """basic""" +931 66 evaluator """rankbased""" +931 67 dataset """kinships""" +931 67 model """transr""" +931 67 loss """marginranking""" +931 67 regularizer """no""" +931 67 optimizer """adam""" +931 67 training_loop """owa""" +931 67 negative_sampler """basic""" +931 67 evaluator """rankbased""" +931 68 dataset """kinships""" +931 68 model """transr""" +931 68 loss """marginranking""" +931 68 regularizer """no""" +931 68 optimizer """adam""" +931 68 training_loop """owa""" +931 68 negative_sampler """basic""" +931 68 evaluator """rankbased""" +931 69 dataset """kinships""" +931 69 model """transr""" +931 69 loss """marginranking""" +931 69 regularizer """no""" +931 69 optimizer """adam""" +931 69 training_loop """owa""" +931 69 negative_sampler """basic""" +931 69 evaluator """rankbased""" +931 70 dataset """kinships""" +931 70 model """transr""" +931 70 loss """marginranking""" +931 70 regularizer """no""" +931 70 optimizer """adam""" +931 70 training_loop """owa""" +931 70 negative_sampler """basic""" +931 70 evaluator """rankbased""" +931 71 dataset """kinships""" +931 71 model """transr""" +931 71 loss """marginranking""" +931 71 regularizer """no""" +931 71 optimizer """adam""" +931 71 training_loop """owa""" +931 71 negative_sampler """basic""" +931 71 evaluator """rankbased""" +931 72 dataset """kinships""" +931 72 model """transr""" +931 72 loss """marginranking""" +931 72 regularizer """no""" +931 72 optimizer """adam""" +931 72 training_loop """owa""" +931 72 negative_sampler """basic""" +931 72 evaluator """rankbased""" +931 73 dataset """kinships""" +931 73 model """transr""" +931 73 loss """marginranking""" +931 73 regularizer """no""" +931 73 optimizer """adam""" +931 73 training_loop """owa""" +931 73 negative_sampler """basic""" +931 73 evaluator """rankbased""" +931 74 dataset """kinships""" +931 74 model """transr""" +931 74 loss """marginranking""" +931 74 regularizer """no""" +931 74 optimizer """adam""" +931 74 training_loop """owa""" +931 74 negative_sampler """basic""" +931 74 evaluator """rankbased""" +931 75 dataset """kinships""" +931 75 model """transr""" +931 75 loss """marginranking""" +931 75 regularizer """no""" +931 75 optimizer """adam""" +931 75 training_loop """owa""" +931 75 negative_sampler """basic""" +931 75 evaluator """rankbased""" +931 76 dataset """kinships""" +931 76 model """transr""" +931 76 loss """marginranking""" +931 76 regularizer """no""" +931 76 optimizer """adam""" +931 76 training_loop """owa""" +931 76 negative_sampler """basic""" +931 76 evaluator """rankbased""" +931 77 dataset """kinships""" +931 77 model """transr""" +931 77 loss """marginranking""" +931 77 regularizer """no""" +931 77 optimizer """adam""" +931 77 training_loop """owa""" +931 77 negative_sampler """basic""" +931 77 evaluator """rankbased""" +931 78 dataset """kinships""" +931 78 model """transr""" +931 78 loss """marginranking""" +931 78 regularizer """no""" +931 78 optimizer """adam""" +931 78 training_loop """owa""" +931 78 negative_sampler """basic""" +931 78 evaluator """rankbased""" +931 79 dataset """kinships""" +931 79 model """transr""" +931 79 loss """marginranking""" +931 79 regularizer """no""" +931 79 optimizer """adam""" +931 79 training_loop """owa""" +931 79 negative_sampler """basic""" +931 79 evaluator """rankbased""" +931 80 dataset """kinships""" +931 80 model """transr""" +931 80 loss """marginranking""" +931 80 regularizer """no""" +931 80 optimizer """adam""" +931 80 training_loop """owa""" +931 80 negative_sampler """basic""" +931 80 evaluator """rankbased""" +931 81 dataset """kinships""" +931 81 model """transr""" +931 81 loss """marginranking""" +931 81 regularizer """no""" +931 81 optimizer """adam""" +931 81 training_loop """owa""" +931 81 negative_sampler """basic""" +931 81 evaluator """rankbased""" +931 82 dataset """kinships""" +931 82 model """transr""" +931 82 loss """marginranking""" +931 82 regularizer """no""" +931 82 optimizer """adam""" +931 82 training_loop """owa""" +931 82 negative_sampler """basic""" +931 82 evaluator """rankbased""" +931 83 dataset """kinships""" +931 83 model """transr""" +931 83 loss """marginranking""" +931 83 regularizer """no""" +931 83 optimizer """adam""" +931 83 training_loop """owa""" +931 83 negative_sampler """basic""" +931 83 evaluator """rankbased""" +931 84 dataset """kinships""" +931 84 model """transr""" +931 84 loss """marginranking""" +931 84 regularizer """no""" +931 84 optimizer """adam""" +931 84 training_loop """owa""" +931 84 negative_sampler """basic""" +931 84 evaluator """rankbased""" +931 85 dataset """kinships""" +931 85 model """transr""" +931 85 loss """marginranking""" +931 85 regularizer """no""" +931 85 optimizer """adam""" +931 85 training_loop """owa""" +931 85 negative_sampler """basic""" +931 85 evaluator """rankbased""" +931 86 dataset """kinships""" +931 86 model """transr""" +931 86 loss """marginranking""" +931 86 regularizer """no""" +931 86 optimizer """adam""" +931 86 training_loop """owa""" +931 86 negative_sampler """basic""" +931 86 evaluator """rankbased""" +931 87 dataset """kinships""" +931 87 model """transr""" +931 87 loss """marginranking""" +931 87 regularizer """no""" +931 87 optimizer """adam""" +931 87 training_loop """owa""" +931 87 negative_sampler """basic""" +931 87 evaluator """rankbased""" +931 88 dataset """kinships""" +931 88 model """transr""" +931 88 loss """marginranking""" +931 88 regularizer """no""" +931 88 optimizer """adam""" +931 88 training_loop """owa""" +931 88 negative_sampler """basic""" +931 88 evaluator """rankbased""" +932 1 model.embedding_dim 2.0 +932 1 model.relation_dim 2.0 +932 1 model.scoring_fct_norm 2.0 +932 1 loss.margin 7.977767981468929 +932 1 optimizer.lr 0.016765265355015404 +932 1 negative_sampler.num_negs_per_pos 71.0 +932 1 training.batch_size 0.0 +932 2 model.embedding_dim 0.0 +932 2 model.relation_dim 1.0 +932 2 model.scoring_fct_norm 1.0 +932 2 loss.margin 7.408343073681457 +932 2 optimizer.lr 0.007600089470619755 +932 2 negative_sampler.num_negs_per_pos 41.0 +932 2 training.batch_size 2.0 +932 3 model.embedding_dim 0.0 +932 3 model.relation_dim 1.0 +932 3 model.scoring_fct_norm 2.0 +932 3 loss.margin 1.1432550567529511 +932 3 optimizer.lr 0.015519385823261853 +932 3 negative_sampler.num_negs_per_pos 22.0 +932 3 training.batch_size 0.0 +932 4 model.embedding_dim 0.0 +932 4 model.relation_dim 0.0 +932 4 model.scoring_fct_norm 1.0 +932 4 loss.margin 5.384276147324473 +932 4 optimizer.lr 0.007659886585123671 +932 4 negative_sampler.num_negs_per_pos 49.0 +932 4 training.batch_size 0.0 +932 5 model.embedding_dim 0.0 +932 5 model.relation_dim 1.0 +932 5 model.scoring_fct_norm 1.0 +932 5 loss.margin 2.0525436576066163 +932 5 optimizer.lr 0.026217039128930898 +932 5 negative_sampler.num_negs_per_pos 71.0 +932 5 training.batch_size 0.0 +932 6 model.embedding_dim 2.0 +932 6 model.relation_dim 1.0 +932 6 model.scoring_fct_norm 1.0 +932 6 loss.margin 1.8500068754438894 +932 6 optimizer.lr 0.008988272214304125 +932 6 negative_sampler.num_negs_per_pos 30.0 +932 6 training.batch_size 1.0 +932 7 model.embedding_dim 1.0 +932 7 model.relation_dim 1.0 +932 7 model.scoring_fct_norm 2.0 +932 7 loss.margin 6.8967825568197325 +932 7 optimizer.lr 0.008059683709871356 +932 7 negative_sampler.num_negs_per_pos 56.0 +932 7 training.batch_size 0.0 +932 8 model.embedding_dim 2.0 +932 8 model.relation_dim 2.0 +932 8 model.scoring_fct_norm 2.0 +932 8 loss.margin 7.920526235918536 +932 8 optimizer.lr 0.0068470887447021865 +932 8 negative_sampler.num_negs_per_pos 12.0 +932 8 training.batch_size 1.0 +932 9 model.embedding_dim 1.0 +932 9 model.relation_dim 1.0 +932 9 model.scoring_fct_norm 2.0 +932 9 loss.margin 2.5560108265053385 +932 9 optimizer.lr 0.010561360200764645 +932 9 negative_sampler.num_negs_per_pos 94.0 +932 9 training.batch_size 2.0 +932 10 model.embedding_dim 2.0 +932 10 model.relation_dim 0.0 +932 10 model.scoring_fct_norm 2.0 +932 10 loss.margin 3.4125831193388096 +932 10 optimizer.lr 0.0162274835738764 +932 10 negative_sampler.num_negs_per_pos 36.0 +932 10 training.batch_size 0.0 +932 11 model.embedding_dim 2.0 +932 11 model.relation_dim 2.0 +932 11 model.scoring_fct_norm 1.0 +932 11 loss.margin 6.067170459923929 +932 11 optimizer.lr 0.04760917207254555 +932 11 negative_sampler.num_negs_per_pos 56.0 +932 11 training.batch_size 1.0 +932 12 model.embedding_dim 0.0 +932 12 model.relation_dim 0.0 +932 12 model.scoring_fct_norm 2.0 +932 12 loss.margin 1.0909041978873009 +932 12 optimizer.lr 0.0036315663498559477 +932 12 negative_sampler.num_negs_per_pos 19.0 +932 12 training.batch_size 2.0 +932 13 model.embedding_dim 0.0 +932 13 model.relation_dim 1.0 +932 13 model.scoring_fct_norm 2.0 +932 13 loss.margin 5.742856590314732 +932 13 optimizer.lr 0.0014442079934455575 +932 13 negative_sampler.num_negs_per_pos 90.0 +932 13 training.batch_size 1.0 +932 14 model.embedding_dim 1.0 +932 14 model.relation_dim 2.0 +932 14 model.scoring_fct_norm 1.0 +932 14 loss.margin 2.492289725388741 +932 14 optimizer.lr 0.004694543045982091 +932 14 negative_sampler.num_negs_per_pos 23.0 +932 14 training.batch_size 0.0 +932 15 model.embedding_dim 2.0 +932 15 model.relation_dim 2.0 +932 15 model.scoring_fct_norm 1.0 +932 15 loss.margin 6.092122859206912 +932 15 optimizer.lr 0.04430341141407789 +932 15 negative_sampler.num_negs_per_pos 69.0 +932 15 training.batch_size 0.0 +932 16 model.embedding_dim 2.0 +932 16 model.relation_dim 1.0 +932 16 model.scoring_fct_norm 2.0 +932 16 loss.margin 2.0085675670480696 +932 16 optimizer.lr 0.003640716431211514 +932 16 negative_sampler.num_negs_per_pos 2.0 +932 16 training.batch_size 2.0 +932 17 model.embedding_dim 0.0 +932 17 model.relation_dim 0.0 +932 17 model.scoring_fct_norm 1.0 +932 17 loss.margin 2.862604725449791 +932 17 optimizer.lr 0.02084783132485416 +932 17 negative_sampler.num_negs_per_pos 85.0 +932 17 training.batch_size 1.0 +932 18 model.embedding_dim 0.0 +932 18 model.relation_dim 2.0 +932 18 model.scoring_fct_norm 1.0 +932 18 loss.margin 4.026620003138274 +932 18 optimizer.lr 0.002356767299432652 +932 18 negative_sampler.num_negs_per_pos 86.0 +932 18 training.batch_size 0.0 +932 19 model.embedding_dim 0.0 +932 19 model.relation_dim 2.0 +932 19 model.scoring_fct_norm 2.0 +932 19 loss.margin 1.3570559359105536 +932 19 optimizer.lr 0.013013333376604515 +932 19 negative_sampler.num_negs_per_pos 89.0 +932 19 training.batch_size 0.0 +932 20 model.embedding_dim 1.0 +932 20 model.relation_dim 2.0 +932 20 model.scoring_fct_norm 2.0 +932 20 loss.margin 9.30705837117899 +932 20 optimizer.lr 0.06545034213608626 +932 20 negative_sampler.num_negs_per_pos 19.0 +932 20 training.batch_size 0.0 +932 21 model.embedding_dim 1.0 +932 21 model.relation_dim 2.0 +932 21 model.scoring_fct_norm 2.0 +932 21 loss.margin 1.9414974536988359 +932 21 optimizer.lr 0.0010681226162610592 +932 21 negative_sampler.num_negs_per_pos 17.0 +932 21 training.batch_size 0.0 +932 22 model.embedding_dim 1.0 +932 22 model.relation_dim 2.0 +932 22 model.scoring_fct_norm 1.0 +932 22 loss.margin 3.883441884375495 +932 22 optimizer.lr 0.002828411749473185 +932 22 negative_sampler.num_negs_per_pos 22.0 +932 22 training.batch_size 2.0 +932 23 model.embedding_dim 0.0 +932 23 model.relation_dim 1.0 +932 23 model.scoring_fct_norm 1.0 +932 23 loss.margin 5.219535227537432 +932 23 optimizer.lr 0.0017070471974021976 +932 23 negative_sampler.num_negs_per_pos 72.0 +932 23 training.batch_size 2.0 +932 24 model.embedding_dim 1.0 +932 24 model.relation_dim 2.0 +932 24 model.scoring_fct_norm 1.0 +932 24 loss.margin 2.3834128363775484 +932 24 optimizer.lr 0.004151511793382348 +932 24 negative_sampler.num_negs_per_pos 35.0 +932 24 training.batch_size 0.0 +932 25 model.embedding_dim 1.0 +932 25 model.relation_dim 0.0 +932 25 model.scoring_fct_norm 2.0 +932 25 loss.margin 7.40501859089981 +932 25 optimizer.lr 0.001075636695750327 +932 25 negative_sampler.num_negs_per_pos 54.0 +932 25 training.batch_size 0.0 +932 26 model.embedding_dim 1.0 +932 26 model.relation_dim 1.0 +932 26 model.scoring_fct_norm 1.0 +932 26 loss.margin 7.479235764144413 +932 26 optimizer.lr 0.01064589152441323 +932 26 negative_sampler.num_negs_per_pos 36.0 +932 26 training.batch_size 0.0 +932 27 model.embedding_dim 1.0 +932 27 model.relation_dim 0.0 +932 27 model.scoring_fct_norm 2.0 +932 27 loss.margin 8.93084941894436 +932 27 optimizer.lr 0.02337242211345076 +932 27 negative_sampler.num_negs_per_pos 45.0 +932 27 training.batch_size 1.0 +932 28 model.embedding_dim 1.0 +932 28 model.relation_dim 0.0 +932 28 model.scoring_fct_norm 2.0 +932 28 loss.margin 7.502567442132697 +932 28 optimizer.lr 0.002769828608444033 +932 28 negative_sampler.num_negs_per_pos 23.0 +932 28 training.batch_size 2.0 +932 29 model.embedding_dim 0.0 +932 29 model.relation_dim 2.0 +932 29 model.scoring_fct_norm 2.0 +932 29 loss.margin 7.286949392219258 +932 29 optimizer.lr 0.018105380557440032 +932 29 negative_sampler.num_negs_per_pos 16.0 +932 29 training.batch_size 1.0 +932 30 model.embedding_dim 2.0 +932 30 model.relation_dim 0.0 +932 30 model.scoring_fct_norm 2.0 +932 30 loss.margin 4.3104863185954665 +932 30 optimizer.lr 0.012015257996069006 +932 30 negative_sampler.num_negs_per_pos 72.0 +932 30 training.batch_size 0.0 +932 31 model.embedding_dim 2.0 +932 31 model.relation_dim 1.0 +932 31 model.scoring_fct_norm 2.0 +932 31 loss.margin 4.069815231892594 +932 31 optimizer.lr 0.005352112144509666 +932 31 negative_sampler.num_negs_per_pos 16.0 +932 31 training.batch_size 2.0 +932 32 model.embedding_dim 1.0 +932 32 model.relation_dim 0.0 +932 32 model.scoring_fct_norm 2.0 +932 32 loss.margin 6.207046891902855 +932 32 optimizer.lr 0.0756945641149316 +932 32 negative_sampler.num_negs_per_pos 77.0 +932 32 training.batch_size 0.0 +932 33 model.embedding_dim 2.0 +932 33 model.relation_dim 2.0 +932 33 model.scoring_fct_norm 2.0 +932 33 loss.margin 3.1217275163286375 +932 33 optimizer.lr 0.0014805731770814222 +932 33 negative_sampler.num_negs_per_pos 34.0 +932 33 training.batch_size 1.0 +932 34 model.embedding_dim 2.0 +932 34 model.relation_dim 0.0 +932 34 model.scoring_fct_norm 1.0 +932 34 loss.margin 9.460667919524314 +932 34 optimizer.lr 0.006900715888934843 +932 34 negative_sampler.num_negs_per_pos 24.0 +932 34 training.batch_size 1.0 +932 35 model.embedding_dim 0.0 +932 35 model.relation_dim 2.0 +932 35 model.scoring_fct_norm 1.0 +932 35 loss.margin 7.2016613183305305 +932 35 optimizer.lr 0.0013339066107342955 +932 35 negative_sampler.num_negs_per_pos 99.0 +932 35 training.batch_size 0.0 +932 36 model.embedding_dim 1.0 +932 36 model.relation_dim 2.0 +932 36 model.scoring_fct_norm 1.0 +932 36 loss.margin 5.487473759277388 +932 36 optimizer.lr 0.00778856413007621 +932 36 negative_sampler.num_negs_per_pos 72.0 +932 36 training.batch_size 1.0 +932 37 model.embedding_dim 1.0 +932 37 model.relation_dim 0.0 +932 37 model.scoring_fct_norm 2.0 +932 37 loss.margin 6.669855036039513 +932 37 optimizer.lr 0.020801887257743474 +932 37 negative_sampler.num_negs_per_pos 81.0 +932 37 training.batch_size 2.0 +932 38 model.embedding_dim 2.0 +932 38 model.relation_dim 0.0 +932 38 model.scoring_fct_norm 1.0 +932 38 loss.margin 3.488322370707718 +932 38 optimizer.lr 0.009355052309616993 +932 38 negative_sampler.num_negs_per_pos 89.0 +932 38 training.batch_size 2.0 +932 39 model.embedding_dim 2.0 +932 39 model.relation_dim 1.0 +932 39 model.scoring_fct_norm 2.0 +932 39 loss.margin 7.645707753733429 +932 39 optimizer.lr 0.016778929469892435 +932 39 negative_sampler.num_negs_per_pos 22.0 +932 39 training.batch_size 2.0 +932 40 model.embedding_dim 1.0 +932 40 model.relation_dim 1.0 +932 40 model.scoring_fct_norm 1.0 +932 40 loss.margin 8.06194450843781 +932 40 optimizer.lr 0.0012814266883143083 +932 40 negative_sampler.num_negs_per_pos 75.0 +932 40 training.batch_size 2.0 +932 41 model.embedding_dim 1.0 +932 41 model.relation_dim 2.0 +932 41 model.scoring_fct_norm 1.0 +932 41 loss.margin 5.259919479757837 +932 41 optimizer.lr 0.03669120651299375 +932 41 negative_sampler.num_negs_per_pos 38.0 +932 41 training.batch_size 2.0 +932 42 model.embedding_dim 2.0 +932 42 model.relation_dim 0.0 +932 42 model.scoring_fct_norm 2.0 +932 42 loss.margin 5.4812156251044755 +932 42 optimizer.lr 0.002474734566760718 +932 42 negative_sampler.num_negs_per_pos 7.0 +932 42 training.batch_size 1.0 +932 43 model.embedding_dim 2.0 +932 43 model.relation_dim 1.0 +932 43 model.scoring_fct_norm 2.0 +932 43 loss.margin 6.637910399919192 +932 43 optimizer.lr 0.0011401502194763311 +932 43 negative_sampler.num_negs_per_pos 78.0 +932 43 training.batch_size 0.0 +932 44 model.embedding_dim 2.0 +932 44 model.relation_dim 2.0 +932 44 model.scoring_fct_norm 2.0 +932 44 loss.margin 6.94278166625163 +932 44 optimizer.lr 0.04258080471735863 +932 44 negative_sampler.num_negs_per_pos 53.0 +932 44 training.batch_size 1.0 +932 45 model.embedding_dim 2.0 +932 45 model.relation_dim 1.0 +932 45 model.scoring_fct_norm 2.0 +932 45 loss.margin 9.044097429796796 +932 45 optimizer.lr 0.015084748709034713 +932 45 negative_sampler.num_negs_per_pos 58.0 +932 45 training.batch_size 2.0 +932 46 model.embedding_dim 0.0 +932 46 model.relation_dim 2.0 +932 46 model.scoring_fct_norm 2.0 +932 46 loss.margin 6.615316922645496 +932 46 optimizer.lr 0.0015735875481786227 +932 46 negative_sampler.num_negs_per_pos 5.0 +932 46 training.batch_size 2.0 +932 47 model.embedding_dim 0.0 +932 47 model.relation_dim 1.0 +932 47 model.scoring_fct_norm 2.0 +932 47 loss.margin 9.749053127165048 +932 47 optimizer.lr 0.02468360826301509 +932 47 negative_sampler.num_negs_per_pos 43.0 +932 47 training.batch_size 2.0 +932 48 model.embedding_dim 2.0 +932 48 model.relation_dim 1.0 +932 48 model.scoring_fct_norm 1.0 +932 48 loss.margin 2.478290011467945 +932 48 optimizer.lr 0.007331843318662234 +932 48 negative_sampler.num_negs_per_pos 41.0 +932 48 training.batch_size 1.0 +932 49 model.embedding_dim 2.0 +932 49 model.relation_dim 1.0 +932 49 model.scoring_fct_norm 1.0 +932 49 loss.margin 5.5745834295938215 +932 49 optimizer.lr 0.0025276300744058212 +932 49 negative_sampler.num_negs_per_pos 46.0 +932 49 training.batch_size 1.0 +932 50 model.embedding_dim 2.0 +932 50 model.relation_dim 1.0 +932 50 model.scoring_fct_norm 1.0 +932 50 loss.margin 2.6044085736609333 +932 50 optimizer.lr 0.0022440136719201805 +932 50 negative_sampler.num_negs_per_pos 22.0 +932 50 training.batch_size 1.0 +932 51 model.embedding_dim 1.0 +932 51 model.relation_dim 1.0 +932 51 model.scoring_fct_norm 1.0 +932 51 loss.margin 5.692829594766688 +932 51 optimizer.lr 0.017795163337744294 +932 51 negative_sampler.num_negs_per_pos 88.0 +932 51 training.batch_size 0.0 +932 52 model.embedding_dim 2.0 +932 52 model.relation_dim 1.0 +932 52 model.scoring_fct_norm 2.0 +932 52 loss.margin 2.0818064853858225 +932 52 optimizer.lr 0.006519814910784987 +932 52 negative_sampler.num_negs_per_pos 84.0 +932 52 training.batch_size 1.0 +932 53 model.embedding_dim 1.0 +932 53 model.relation_dim 0.0 +932 53 model.scoring_fct_norm 1.0 +932 53 loss.margin 8.759622500197125 +932 53 optimizer.lr 0.012659200103758983 +932 53 negative_sampler.num_negs_per_pos 46.0 +932 53 training.batch_size 1.0 +932 54 model.embedding_dim 0.0 +932 54 model.relation_dim 1.0 +932 54 model.scoring_fct_norm 2.0 +932 54 loss.margin 2.3589633443426825 +932 54 optimizer.lr 0.06975968212765876 +932 54 negative_sampler.num_negs_per_pos 5.0 +932 54 training.batch_size 2.0 +932 55 model.embedding_dim 0.0 +932 55 model.relation_dim 0.0 +932 55 model.scoring_fct_norm 1.0 +932 55 loss.margin 9.35743933381875 +932 55 optimizer.lr 0.002691493375891596 +932 55 negative_sampler.num_negs_per_pos 2.0 +932 55 training.batch_size 1.0 +932 56 model.embedding_dim 1.0 +932 56 model.relation_dim 1.0 +932 56 model.scoring_fct_norm 1.0 +932 56 loss.margin 3.3583671283117544 +932 56 optimizer.lr 0.005669662511387306 +932 56 negative_sampler.num_negs_per_pos 92.0 +932 56 training.batch_size 2.0 +932 57 model.embedding_dim 1.0 +932 57 model.relation_dim 0.0 +932 57 model.scoring_fct_norm 1.0 +932 57 loss.margin 5.426277623939793 +932 57 optimizer.lr 0.007103557036710307 +932 57 negative_sampler.num_negs_per_pos 87.0 +932 57 training.batch_size 0.0 +932 58 model.embedding_dim 0.0 +932 58 model.relation_dim 1.0 +932 58 model.scoring_fct_norm 2.0 +932 58 loss.margin 3.9790784365678955 +932 58 optimizer.lr 0.05953402084629624 +932 58 negative_sampler.num_negs_per_pos 11.0 +932 58 training.batch_size 0.0 +932 59 model.embedding_dim 1.0 +932 59 model.relation_dim 1.0 +932 59 model.scoring_fct_norm 1.0 +932 59 loss.margin 0.5071927083782148 +932 59 optimizer.lr 0.024081146755497582 +932 59 negative_sampler.num_negs_per_pos 58.0 +932 59 training.batch_size 0.0 +932 60 model.embedding_dim 0.0 +932 60 model.relation_dim 2.0 +932 60 model.scoring_fct_norm 1.0 +932 60 loss.margin 8.06762729261037 +932 60 optimizer.lr 0.0062496765374757506 +932 60 negative_sampler.num_negs_per_pos 22.0 +932 60 training.batch_size 2.0 +932 61 model.embedding_dim 1.0 +932 61 model.relation_dim 0.0 +932 61 model.scoring_fct_norm 1.0 +932 61 loss.margin 0.5691747053750835 +932 61 optimizer.lr 0.0011204582039369857 +932 61 negative_sampler.num_negs_per_pos 44.0 +932 61 training.batch_size 2.0 +932 62 model.embedding_dim 0.0 +932 62 model.relation_dim 2.0 +932 62 model.scoring_fct_norm 1.0 +932 62 loss.margin 6.025031589603441 +932 62 optimizer.lr 0.024205130815486597 +932 62 negative_sampler.num_negs_per_pos 3.0 +932 62 training.batch_size 2.0 +932 63 model.embedding_dim 0.0 +932 63 model.relation_dim 2.0 +932 63 model.scoring_fct_norm 2.0 +932 63 loss.margin 2.994799077337042 +932 63 optimizer.lr 0.09001692428088477 +932 63 negative_sampler.num_negs_per_pos 25.0 +932 63 training.batch_size 1.0 +932 64 model.embedding_dim 1.0 +932 64 model.relation_dim 1.0 +932 64 model.scoring_fct_norm 2.0 +932 64 loss.margin 1.1372024805961791 +932 64 optimizer.lr 0.004021344811838324 +932 64 negative_sampler.num_negs_per_pos 74.0 +932 64 training.batch_size 0.0 +932 65 model.embedding_dim 0.0 +932 65 model.relation_dim 1.0 +932 65 model.scoring_fct_norm 2.0 +932 65 loss.margin 7.940114069893304 +932 65 optimizer.lr 0.011277725567845997 +932 65 negative_sampler.num_negs_per_pos 27.0 +932 65 training.batch_size 2.0 +932 66 model.embedding_dim 0.0 +932 66 model.relation_dim 2.0 +932 66 model.scoring_fct_norm 2.0 +932 66 loss.margin 6.765403365521792 +932 66 optimizer.lr 0.008010761793132084 +932 66 negative_sampler.num_negs_per_pos 85.0 +932 66 training.batch_size 1.0 +932 67 model.embedding_dim 0.0 +932 67 model.relation_dim 0.0 +932 67 model.scoring_fct_norm 2.0 +932 67 loss.margin 4.464766190244799 +932 67 optimizer.lr 0.004775919639157345 +932 67 negative_sampler.num_negs_per_pos 45.0 +932 67 training.batch_size 0.0 +932 68 model.embedding_dim 0.0 +932 68 model.relation_dim 1.0 +932 68 model.scoring_fct_norm 2.0 +932 68 loss.margin 8.496575212859867 +932 68 optimizer.lr 0.00413794884695401 +932 68 negative_sampler.num_negs_per_pos 36.0 +932 68 training.batch_size 0.0 +932 69 model.embedding_dim 2.0 +932 69 model.relation_dim 1.0 +932 69 model.scoring_fct_norm 2.0 +932 69 loss.margin 4.169645015663638 +932 69 optimizer.lr 0.06338990034319524 +932 69 negative_sampler.num_negs_per_pos 53.0 +932 69 training.batch_size 1.0 +932 70 model.embedding_dim 1.0 +932 70 model.relation_dim 2.0 +932 70 model.scoring_fct_norm 1.0 +932 70 loss.margin 7.949774590453562 +932 70 optimizer.lr 0.009504871394350372 +932 70 negative_sampler.num_negs_per_pos 7.0 +932 70 training.batch_size 2.0 +932 71 model.embedding_dim 1.0 +932 71 model.relation_dim 1.0 +932 71 model.scoring_fct_norm 2.0 +932 71 loss.margin 5.06216434344071 +932 71 optimizer.lr 0.023046026397349467 +932 71 negative_sampler.num_negs_per_pos 85.0 +932 71 training.batch_size 2.0 +932 72 model.embedding_dim 0.0 +932 72 model.relation_dim 2.0 +932 72 model.scoring_fct_norm 1.0 +932 72 loss.margin 9.622501578124128 +932 72 optimizer.lr 0.009952782380662457 +932 72 negative_sampler.num_negs_per_pos 80.0 +932 72 training.batch_size 1.0 +932 73 model.embedding_dim 2.0 +932 73 model.relation_dim 0.0 +932 73 model.scoring_fct_norm 1.0 +932 73 loss.margin 3.5288567176938277 +932 73 optimizer.lr 0.049743146101100054 +932 73 negative_sampler.num_negs_per_pos 67.0 +932 73 training.batch_size 2.0 +932 74 model.embedding_dim 1.0 +932 74 model.relation_dim 1.0 +932 74 model.scoring_fct_norm 1.0 +932 74 loss.margin 2.2429732026105924 +932 74 optimizer.lr 0.05470202637887945 +932 74 negative_sampler.num_negs_per_pos 21.0 +932 74 training.batch_size 1.0 +932 75 model.embedding_dim 2.0 +932 75 model.relation_dim 1.0 +932 75 model.scoring_fct_norm 2.0 +932 75 loss.margin 7.568574613209399 +932 75 optimizer.lr 0.003372346919939049 +932 75 negative_sampler.num_negs_per_pos 20.0 +932 75 training.batch_size 2.0 +932 76 model.embedding_dim 2.0 +932 76 model.relation_dim 2.0 +932 76 model.scoring_fct_norm 1.0 +932 76 loss.margin 8.673791745784019 +932 76 optimizer.lr 0.05215049033898573 +932 76 negative_sampler.num_negs_per_pos 60.0 +932 76 training.batch_size 2.0 +932 77 model.embedding_dim 1.0 +932 77 model.relation_dim 1.0 +932 77 model.scoring_fct_norm 2.0 +932 77 loss.margin 9.301009147969852 +932 77 optimizer.lr 0.03897983595971756 +932 77 negative_sampler.num_negs_per_pos 18.0 +932 77 training.batch_size 0.0 +932 78 model.embedding_dim 0.0 +932 78 model.relation_dim 2.0 +932 78 model.scoring_fct_norm 2.0 +932 78 loss.margin 1.9797378637354575 +932 78 optimizer.lr 0.006860821624695565 +932 78 negative_sampler.num_negs_per_pos 62.0 +932 78 training.batch_size 0.0 +932 79 model.embedding_dim 2.0 +932 79 model.relation_dim 1.0 +932 79 model.scoring_fct_norm 2.0 +932 79 loss.margin 5.5699574155891565 +932 79 optimizer.lr 0.004488830021037529 +932 79 negative_sampler.num_negs_per_pos 69.0 +932 79 training.batch_size 0.0 +932 80 model.embedding_dim 0.0 +932 80 model.relation_dim 0.0 +932 80 model.scoring_fct_norm 1.0 +932 80 loss.margin 6.648789211497895 +932 80 optimizer.lr 0.005525158943508026 +932 80 negative_sampler.num_negs_per_pos 1.0 +932 80 training.batch_size 1.0 +932 81 model.embedding_dim 0.0 +932 81 model.relation_dim 2.0 +932 81 model.scoring_fct_norm 1.0 +932 81 loss.margin 6.050812088891069 +932 81 optimizer.lr 0.08308226602872726 +932 81 negative_sampler.num_negs_per_pos 8.0 +932 81 training.batch_size 2.0 +932 82 model.embedding_dim 0.0 +932 82 model.relation_dim 1.0 +932 82 model.scoring_fct_norm 1.0 +932 82 loss.margin 4.903653009406718 +932 82 optimizer.lr 0.002248093675604062 +932 82 negative_sampler.num_negs_per_pos 17.0 +932 82 training.batch_size 0.0 +932 83 model.embedding_dim 2.0 +932 83 model.relation_dim 2.0 +932 83 model.scoring_fct_norm 2.0 +932 83 loss.margin 4.697413470466538 +932 83 optimizer.lr 0.07339300140018219 +932 83 negative_sampler.num_negs_per_pos 84.0 +932 83 training.batch_size 0.0 +932 84 model.embedding_dim 1.0 +932 84 model.relation_dim 1.0 +932 84 model.scoring_fct_norm 1.0 +932 84 loss.margin 3.2545869681524224 +932 84 optimizer.lr 0.014750464111831684 +932 84 negative_sampler.num_negs_per_pos 23.0 +932 84 training.batch_size 0.0 +932 85 model.embedding_dim 1.0 +932 85 model.relation_dim 0.0 +932 85 model.scoring_fct_norm 1.0 +932 85 loss.margin 1.722572899747516 +932 85 optimizer.lr 0.08903668597949356 +932 85 negative_sampler.num_negs_per_pos 85.0 +932 85 training.batch_size 0.0 +932 86 model.embedding_dim 0.0 +932 86 model.relation_dim 0.0 +932 86 model.scoring_fct_norm 2.0 +932 86 loss.margin 9.999267236748446 +932 86 optimizer.lr 0.017131896520608963 +932 86 negative_sampler.num_negs_per_pos 50.0 +932 86 training.batch_size 2.0 +932 87 model.embedding_dim 0.0 +932 87 model.relation_dim 0.0 +932 87 model.scoring_fct_norm 1.0 +932 87 loss.margin 3.0182903033562685 +932 87 optimizer.lr 0.025271767052654003 +932 87 negative_sampler.num_negs_per_pos 54.0 +932 87 training.batch_size 0.0 +932 88 model.embedding_dim 2.0 +932 88 model.relation_dim 1.0 +932 88 model.scoring_fct_norm 1.0 +932 88 loss.margin 1.3143662278963895 +932 88 optimizer.lr 0.0015786829901447096 +932 88 negative_sampler.num_negs_per_pos 6.0 +932 88 training.batch_size 1.0 +932 89 model.embedding_dim 1.0 +932 89 model.relation_dim 2.0 +932 89 model.scoring_fct_norm 1.0 +932 89 loss.margin 9.246711906077504 +932 89 optimizer.lr 0.0841004819801095 +932 89 negative_sampler.num_negs_per_pos 33.0 +932 89 training.batch_size 2.0 +932 90 model.embedding_dim 0.0 +932 90 model.relation_dim 0.0 +932 90 model.scoring_fct_norm 2.0 +932 90 loss.margin 1.9733398846583978 +932 90 optimizer.lr 0.05982109758411999 +932 90 negative_sampler.num_negs_per_pos 19.0 +932 90 training.batch_size 2.0 +932 91 model.embedding_dim 0.0 +932 91 model.relation_dim 0.0 +932 91 model.scoring_fct_norm 2.0 +932 91 loss.margin 1.1525578724185577 +932 91 optimizer.lr 0.08504922979787347 +932 91 negative_sampler.num_negs_per_pos 68.0 +932 91 training.batch_size 0.0 +932 92 model.embedding_dim 2.0 +932 92 model.relation_dim 2.0 +932 92 model.scoring_fct_norm 2.0 +932 92 loss.margin 2.8661658802939085 +932 92 optimizer.lr 0.08508722468556543 +932 92 negative_sampler.num_negs_per_pos 7.0 +932 92 training.batch_size 0.0 +932 93 model.embedding_dim 2.0 +932 93 model.relation_dim 2.0 +932 93 model.scoring_fct_norm 2.0 +932 93 loss.margin 8.780139149677206 +932 93 optimizer.lr 0.006887551482414525 +932 93 negative_sampler.num_negs_per_pos 16.0 +932 93 training.batch_size 0.0 +932 94 model.embedding_dim 0.0 +932 94 model.relation_dim 1.0 +932 94 model.scoring_fct_norm 2.0 +932 94 loss.margin 5.928303678226741 +932 94 optimizer.lr 0.004514351464062685 +932 94 negative_sampler.num_negs_per_pos 18.0 +932 94 training.batch_size 1.0 +932 95 model.embedding_dim 1.0 +932 95 model.relation_dim 2.0 +932 95 model.scoring_fct_norm 1.0 +932 95 loss.margin 9.590562301648497 +932 95 optimizer.lr 0.005101642577045719 +932 95 negative_sampler.num_negs_per_pos 90.0 +932 95 training.batch_size 1.0 +932 96 model.embedding_dim 2.0 +932 96 model.relation_dim 2.0 +932 96 model.scoring_fct_norm 1.0 +932 96 loss.margin 7.9205920289246885 +932 96 optimizer.lr 0.0025786786883757826 +932 96 negative_sampler.num_negs_per_pos 43.0 +932 96 training.batch_size 2.0 +932 97 model.embedding_dim 0.0 +932 97 model.relation_dim 0.0 +932 97 model.scoring_fct_norm 2.0 +932 97 loss.margin 6.585731246707738 +932 97 optimizer.lr 0.0035998718791512515 +932 97 negative_sampler.num_negs_per_pos 48.0 +932 97 training.batch_size 0.0 +932 98 model.embedding_dim 0.0 +932 98 model.relation_dim 0.0 +932 98 model.scoring_fct_norm 2.0 +932 98 loss.margin 4.7468733191801435 +932 98 optimizer.lr 0.0033749426727792873 +932 98 negative_sampler.num_negs_per_pos 20.0 +932 98 training.batch_size 2.0 +932 99 model.embedding_dim 0.0 +932 99 model.relation_dim 2.0 +932 99 model.scoring_fct_norm 1.0 +932 99 loss.margin 1.8539309562780542 +932 99 optimizer.lr 0.015409128394750012 +932 99 negative_sampler.num_negs_per_pos 10.0 +932 99 training.batch_size 2.0 +932 100 model.embedding_dim 0.0 +932 100 model.relation_dim 2.0 +932 100 model.scoring_fct_norm 2.0 +932 100 loss.margin 7.850383939691618 +932 100 optimizer.lr 0.052592379939674284 +932 100 negative_sampler.num_negs_per_pos 49.0 +932 100 training.batch_size 2.0 +932 1 dataset """kinships""" +932 1 model """transr""" +932 1 loss """marginranking""" +932 1 regularizer """no""" +932 1 optimizer """adam""" +932 1 training_loop """owa""" +932 1 negative_sampler """basic""" +932 1 evaluator """rankbased""" +932 2 dataset """kinships""" +932 2 model """transr""" +932 2 loss """marginranking""" +932 2 regularizer """no""" +932 2 optimizer """adam""" +932 2 training_loop """owa""" +932 2 negative_sampler """basic""" +932 2 evaluator """rankbased""" +932 3 dataset """kinships""" +932 3 model """transr""" +932 3 loss """marginranking""" +932 3 regularizer """no""" +932 3 optimizer """adam""" +932 3 training_loop """owa""" +932 3 negative_sampler """basic""" +932 3 evaluator """rankbased""" +932 4 dataset """kinships""" +932 4 model """transr""" +932 4 loss """marginranking""" +932 4 regularizer """no""" +932 4 optimizer """adam""" +932 4 training_loop """owa""" +932 4 negative_sampler """basic""" +932 4 evaluator """rankbased""" +932 5 dataset """kinships""" +932 5 model """transr""" +932 5 loss """marginranking""" +932 5 regularizer """no""" +932 5 optimizer """adam""" +932 5 training_loop """owa""" +932 5 negative_sampler """basic""" +932 5 evaluator """rankbased""" +932 6 dataset """kinships""" +932 6 model """transr""" +932 6 loss """marginranking""" +932 6 regularizer """no""" +932 6 optimizer """adam""" +932 6 training_loop """owa""" +932 6 negative_sampler """basic""" +932 6 evaluator """rankbased""" +932 7 dataset """kinships""" +932 7 model """transr""" +932 7 loss """marginranking""" +932 7 regularizer """no""" +932 7 optimizer """adam""" +932 7 training_loop """owa""" +932 7 negative_sampler """basic""" +932 7 evaluator """rankbased""" +932 8 dataset """kinships""" +932 8 model """transr""" +932 8 loss """marginranking""" +932 8 regularizer """no""" +932 8 optimizer """adam""" +932 8 training_loop """owa""" +932 8 negative_sampler """basic""" +932 8 evaluator """rankbased""" +932 9 dataset """kinships""" +932 9 model """transr""" +932 9 loss """marginranking""" +932 9 regularizer """no""" +932 9 optimizer """adam""" +932 9 training_loop """owa""" +932 9 negative_sampler """basic""" +932 9 evaluator """rankbased""" +932 10 dataset """kinships""" +932 10 model """transr""" +932 10 loss """marginranking""" +932 10 regularizer """no""" +932 10 optimizer """adam""" +932 10 training_loop """owa""" +932 10 negative_sampler """basic""" +932 10 evaluator """rankbased""" +932 11 dataset """kinships""" +932 11 model """transr""" +932 11 loss """marginranking""" +932 11 regularizer """no""" +932 11 optimizer """adam""" +932 11 training_loop """owa""" +932 11 negative_sampler """basic""" +932 11 evaluator """rankbased""" +932 12 dataset """kinships""" +932 12 model """transr""" +932 12 loss """marginranking""" +932 12 regularizer """no""" +932 12 optimizer """adam""" +932 12 training_loop """owa""" +932 12 negative_sampler """basic""" +932 12 evaluator """rankbased""" +932 13 dataset """kinships""" +932 13 model """transr""" +932 13 loss """marginranking""" +932 13 regularizer """no""" +932 13 optimizer """adam""" +932 13 training_loop """owa""" +932 13 negative_sampler """basic""" +932 13 evaluator """rankbased""" +932 14 dataset """kinships""" +932 14 model """transr""" +932 14 loss """marginranking""" +932 14 regularizer """no""" +932 14 optimizer """adam""" +932 14 training_loop """owa""" +932 14 negative_sampler """basic""" +932 14 evaluator """rankbased""" +932 15 dataset """kinships""" +932 15 model """transr""" +932 15 loss """marginranking""" +932 15 regularizer """no""" +932 15 optimizer """adam""" +932 15 training_loop """owa""" +932 15 negative_sampler """basic""" +932 15 evaluator """rankbased""" +932 16 dataset """kinships""" +932 16 model """transr""" +932 16 loss """marginranking""" +932 16 regularizer """no""" +932 16 optimizer """adam""" +932 16 training_loop """owa""" +932 16 negative_sampler """basic""" +932 16 evaluator """rankbased""" +932 17 dataset """kinships""" +932 17 model """transr""" +932 17 loss """marginranking""" +932 17 regularizer """no""" +932 17 optimizer """adam""" +932 17 training_loop """owa""" +932 17 negative_sampler """basic""" +932 17 evaluator """rankbased""" +932 18 dataset """kinships""" +932 18 model """transr""" +932 18 loss """marginranking""" +932 18 regularizer """no""" +932 18 optimizer """adam""" +932 18 training_loop """owa""" +932 18 negative_sampler """basic""" +932 18 evaluator """rankbased""" +932 19 dataset """kinships""" +932 19 model """transr""" +932 19 loss """marginranking""" +932 19 regularizer """no""" +932 19 optimizer """adam""" +932 19 training_loop """owa""" +932 19 negative_sampler """basic""" +932 19 evaluator """rankbased""" +932 20 dataset """kinships""" +932 20 model """transr""" +932 20 loss """marginranking""" +932 20 regularizer """no""" +932 20 optimizer """adam""" +932 20 training_loop """owa""" +932 20 negative_sampler """basic""" +932 20 evaluator """rankbased""" +932 21 dataset """kinships""" +932 21 model """transr""" +932 21 loss """marginranking""" +932 21 regularizer """no""" +932 21 optimizer """adam""" +932 21 training_loop """owa""" +932 21 negative_sampler """basic""" +932 21 evaluator """rankbased""" +932 22 dataset """kinships""" +932 22 model """transr""" +932 22 loss """marginranking""" +932 22 regularizer """no""" +932 22 optimizer """adam""" +932 22 training_loop """owa""" +932 22 negative_sampler """basic""" +932 22 evaluator """rankbased""" +932 23 dataset """kinships""" +932 23 model """transr""" +932 23 loss """marginranking""" +932 23 regularizer """no""" +932 23 optimizer """adam""" +932 23 training_loop """owa""" +932 23 negative_sampler """basic""" +932 23 evaluator """rankbased""" +932 24 dataset """kinships""" +932 24 model """transr""" +932 24 loss """marginranking""" +932 24 regularizer """no""" +932 24 optimizer """adam""" +932 24 training_loop """owa""" +932 24 negative_sampler """basic""" +932 24 evaluator """rankbased""" +932 25 dataset """kinships""" +932 25 model """transr""" +932 25 loss """marginranking""" +932 25 regularizer """no""" +932 25 optimizer """adam""" +932 25 training_loop """owa""" +932 25 negative_sampler """basic""" +932 25 evaluator """rankbased""" +932 26 dataset """kinships""" +932 26 model """transr""" +932 26 loss """marginranking""" +932 26 regularizer """no""" +932 26 optimizer """adam""" +932 26 training_loop """owa""" +932 26 negative_sampler """basic""" +932 26 evaluator """rankbased""" +932 27 dataset """kinships""" +932 27 model """transr""" +932 27 loss """marginranking""" +932 27 regularizer """no""" +932 27 optimizer """adam""" +932 27 training_loop """owa""" +932 27 negative_sampler """basic""" +932 27 evaluator """rankbased""" +932 28 dataset """kinships""" +932 28 model """transr""" +932 28 loss """marginranking""" +932 28 regularizer """no""" +932 28 optimizer """adam""" +932 28 training_loop """owa""" +932 28 negative_sampler """basic""" +932 28 evaluator """rankbased""" +932 29 dataset """kinships""" +932 29 model """transr""" +932 29 loss """marginranking""" +932 29 regularizer """no""" +932 29 optimizer """adam""" +932 29 training_loop """owa""" +932 29 negative_sampler """basic""" +932 29 evaluator """rankbased""" +932 30 dataset """kinships""" +932 30 model """transr""" +932 30 loss """marginranking""" +932 30 regularizer """no""" +932 30 optimizer """adam""" +932 30 training_loop """owa""" +932 30 negative_sampler """basic""" +932 30 evaluator """rankbased""" +932 31 dataset """kinships""" +932 31 model """transr""" +932 31 loss """marginranking""" +932 31 regularizer """no""" +932 31 optimizer """adam""" +932 31 training_loop """owa""" +932 31 negative_sampler """basic""" +932 31 evaluator """rankbased""" +932 32 dataset """kinships""" +932 32 model """transr""" +932 32 loss """marginranking""" +932 32 regularizer """no""" +932 32 optimizer """adam""" +932 32 training_loop """owa""" +932 32 negative_sampler """basic""" +932 32 evaluator """rankbased""" +932 33 dataset """kinships""" +932 33 model """transr""" +932 33 loss """marginranking""" +932 33 regularizer """no""" +932 33 optimizer """adam""" +932 33 training_loop """owa""" +932 33 negative_sampler """basic""" +932 33 evaluator """rankbased""" +932 34 dataset """kinships""" +932 34 model """transr""" +932 34 loss """marginranking""" +932 34 regularizer """no""" +932 34 optimizer """adam""" +932 34 training_loop """owa""" +932 34 negative_sampler """basic""" +932 34 evaluator """rankbased""" +932 35 dataset """kinships""" +932 35 model """transr""" +932 35 loss """marginranking""" +932 35 regularizer """no""" +932 35 optimizer """adam""" +932 35 training_loop """owa""" +932 35 negative_sampler """basic""" +932 35 evaluator """rankbased""" +932 36 dataset """kinships""" +932 36 model """transr""" +932 36 loss """marginranking""" +932 36 regularizer """no""" +932 36 optimizer """adam""" +932 36 training_loop """owa""" +932 36 negative_sampler """basic""" +932 36 evaluator """rankbased""" +932 37 dataset """kinships""" +932 37 model """transr""" +932 37 loss """marginranking""" +932 37 regularizer """no""" +932 37 optimizer """adam""" +932 37 training_loop """owa""" +932 37 negative_sampler """basic""" +932 37 evaluator """rankbased""" +932 38 dataset """kinships""" +932 38 model """transr""" +932 38 loss """marginranking""" +932 38 regularizer """no""" +932 38 optimizer """adam""" +932 38 training_loop """owa""" +932 38 negative_sampler """basic""" +932 38 evaluator """rankbased""" +932 39 dataset """kinships""" +932 39 model """transr""" +932 39 loss """marginranking""" +932 39 regularizer """no""" +932 39 optimizer """adam""" +932 39 training_loop """owa""" +932 39 negative_sampler """basic""" +932 39 evaluator """rankbased""" +932 40 dataset """kinships""" +932 40 model """transr""" +932 40 loss """marginranking""" +932 40 regularizer """no""" +932 40 optimizer """adam""" +932 40 training_loop """owa""" +932 40 negative_sampler """basic""" +932 40 evaluator """rankbased""" +932 41 dataset """kinships""" +932 41 model """transr""" +932 41 loss """marginranking""" +932 41 regularizer """no""" +932 41 optimizer """adam""" +932 41 training_loop """owa""" +932 41 negative_sampler """basic""" +932 41 evaluator """rankbased""" +932 42 dataset """kinships""" +932 42 model """transr""" +932 42 loss """marginranking""" +932 42 regularizer """no""" +932 42 optimizer """adam""" +932 42 training_loop """owa""" +932 42 negative_sampler """basic""" +932 42 evaluator """rankbased""" +932 43 dataset """kinships""" +932 43 model """transr""" +932 43 loss """marginranking""" +932 43 regularizer """no""" +932 43 optimizer """adam""" +932 43 training_loop """owa""" +932 43 negative_sampler """basic""" +932 43 evaluator """rankbased""" +932 44 dataset """kinships""" +932 44 model """transr""" +932 44 loss """marginranking""" +932 44 regularizer """no""" +932 44 optimizer """adam""" +932 44 training_loop """owa""" +932 44 negative_sampler """basic""" +932 44 evaluator """rankbased""" +932 45 dataset """kinships""" +932 45 model """transr""" +932 45 loss """marginranking""" +932 45 regularizer """no""" +932 45 optimizer """adam""" +932 45 training_loop """owa""" +932 45 negative_sampler """basic""" +932 45 evaluator """rankbased""" +932 46 dataset """kinships""" +932 46 model """transr""" +932 46 loss """marginranking""" +932 46 regularizer """no""" +932 46 optimizer """adam""" +932 46 training_loop """owa""" +932 46 negative_sampler """basic""" +932 46 evaluator """rankbased""" +932 47 dataset """kinships""" +932 47 model """transr""" +932 47 loss """marginranking""" +932 47 regularizer """no""" +932 47 optimizer """adam""" +932 47 training_loop """owa""" +932 47 negative_sampler """basic""" +932 47 evaluator """rankbased""" +932 48 dataset """kinships""" +932 48 model """transr""" +932 48 loss """marginranking""" +932 48 regularizer """no""" +932 48 optimizer """adam""" +932 48 training_loop """owa""" +932 48 negative_sampler """basic""" +932 48 evaluator """rankbased""" +932 49 dataset """kinships""" +932 49 model """transr""" +932 49 loss """marginranking""" +932 49 regularizer """no""" +932 49 optimizer """adam""" +932 49 training_loop """owa""" +932 49 negative_sampler """basic""" +932 49 evaluator """rankbased""" +932 50 dataset """kinships""" +932 50 model """transr""" +932 50 loss """marginranking""" +932 50 regularizer """no""" +932 50 optimizer """adam""" +932 50 training_loop """owa""" +932 50 negative_sampler """basic""" +932 50 evaluator """rankbased""" +932 51 dataset """kinships""" +932 51 model """transr""" +932 51 loss """marginranking""" +932 51 regularizer """no""" +932 51 optimizer """adam""" +932 51 training_loop """owa""" +932 51 negative_sampler """basic""" +932 51 evaluator """rankbased""" +932 52 dataset """kinships""" +932 52 model """transr""" +932 52 loss """marginranking""" +932 52 regularizer """no""" +932 52 optimizer """adam""" +932 52 training_loop """owa""" +932 52 negative_sampler """basic""" +932 52 evaluator """rankbased""" +932 53 dataset """kinships""" +932 53 model """transr""" +932 53 loss """marginranking""" +932 53 regularizer """no""" +932 53 optimizer """adam""" +932 53 training_loop """owa""" +932 53 negative_sampler """basic""" +932 53 evaluator """rankbased""" +932 54 dataset """kinships""" +932 54 model """transr""" +932 54 loss """marginranking""" +932 54 regularizer """no""" +932 54 optimizer """adam""" +932 54 training_loop """owa""" +932 54 negative_sampler """basic""" +932 54 evaluator """rankbased""" +932 55 dataset """kinships""" +932 55 model """transr""" +932 55 loss """marginranking""" +932 55 regularizer """no""" +932 55 optimizer """adam""" +932 55 training_loop """owa""" +932 55 negative_sampler """basic""" +932 55 evaluator """rankbased""" +932 56 dataset """kinships""" +932 56 model """transr""" +932 56 loss """marginranking""" +932 56 regularizer """no""" +932 56 optimizer """adam""" +932 56 training_loop """owa""" +932 56 negative_sampler """basic""" +932 56 evaluator """rankbased""" +932 57 dataset """kinships""" +932 57 model """transr""" +932 57 loss """marginranking""" +932 57 regularizer """no""" +932 57 optimizer """adam""" +932 57 training_loop """owa""" +932 57 negative_sampler """basic""" +932 57 evaluator """rankbased""" +932 58 dataset """kinships""" +932 58 model """transr""" +932 58 loss """marginranking""" +932 58 regularizer """no""" +932 58 optimizer """adam""" +932 58 training_loop """owa""" +932 58 negative_sampler """basic""" +932 58 evaluator """rankbased""" +932 59 dataset """kinships""" +932 59 model """transr""" +932 59 loss """marginranking""" +932 59 regularizer """no""" +932 59 optimizer """adam""" +932 59 training_loop """owa""" +932 59 negative_sampler """basic""" +932 59 evaluator """rankbased""" +932 60 dataset """kinships""" +932 60 model """transr""" +932 60 loss """marginranking""" +932 60 regularizer """no""" +932 60 optimizer """adam""" +932 60 training_loop """owa""" +932 60 negative_sampler """basic""" +932 60 evaluator """rankbased""" +932 61 dataset """kinships""" +932 61 model """transr""" +932 61 loss """marginranking""" +932 61 regularizer """no""" +932 61 optimizer """adam""" +932 61 training_loop """owa""" +932 61 negative_sampler """basic""" +932 61 evaluator """rankbased""" +932 62 dataset """kinships""" +932 62 model """transr""" +932 62 loss """marginranking""" +932 62 regularizer """no""" +932 62 optimizer """adam""" +932 62 training_loop """owa""" +932 62 negative_sampler """basic""" +932 62 evaluator """rankbased""" +932 63 dataset """kinships""" +932 63 model """transr""" +932 63 loss """marginranking""" +932 63 regularizer """no""" +932 63 optimizer """adam""" +932 63 training_loop """owa""" +932 63 negative_sampler """basic""" +932 63 evaluator """rankbased""" +932 64 dataset """kinships""" +932 64 model """transr""" +932 64 loss """marginranking""" +932 64 regularizer """no""" +932 64 optimizer """adam""" +932 64 training_loop """owa""" +932 64 negative_sampler """basic""" +932 64 evaluator """rankbased""" +932 65 dataset """kinships""" +932 65 model """transr""" +932 65 loss """marginranking""" +932 65 regularizer """no""" +932 65 optimizer """adam""" +932 65 training_loop """owa""" +932 65 negative_sampler """basic""" +932 65 evaluator """rankbased""" +932 66 dataset """kinships""" +932 66 model """transr""" +932 66 loss """marginranking""" +932 66 regularizer """no""" +932 66 optimizer """adam""" +932 66 training_loop """owa""" +932 66 negative_sampler """basic""" +932 66 evaluator """rankbased""" +932 67 dataset """kinships""" +932 67 model """transr""" +932 67 loss """marginranking""" +932 67 regularizer """no""" +932 67 optimizer """adam""" +932 67 training_loop """owa""" +932 67 negative_sampler """basic""" +932 67 evaluator """rankbased""" +932 68 dataset """kinships""" +932 68 model """transr""" +932 68 loss """marginranking""" +932 68 regularizer """no""" +932 68 optimizer """adam""" +932 68 training_loop """owa""" +932 68 negative_sampler """basic""" +932 68 evaluator """rankbased""" +932 69 dataset """kinships""" +932 69 model """transr""" +932 69 loss """marginranking""" +932 69 regularizer """no""" +932 69 optimizer """adam""" +932 69 training_loop """owa""" +932 69 negative_sampler """basic""" +932 69 evaluator """rankbased""" +932 70 dataset """kinships""" +932 70 model """transr""" +932 70 loss """marginranking""" +932 70 regularizer """no""" +932 70 optimizer """adam""" +932 70 training_loop """owa""" +932 70 negative_sampler """basic""" +932 70 evaluator """rankbased""" +932 71 dataset """kinships""" +932 71 model """transr""" +932 71 loss """marginranking""" +932 71 regularizer """no""" +932 71 optimizer """adam""" +932 71 training_loop """owa""" +932 71 negative_sampler """basic""" +932 71 evaluator """rankbased""" +932 72 dataset """kinships""" +932 72 model """transr""" +932 72 loss """marginranking""" +932 72 regularizer """no""" +932 72 optimizer """adam""" +932 72 training_loop """owa""" +932 72 negative_sampler """basic""" +932 72 evaluator """rankbased""" +932 73 dataset """kinships""" +932 73 model """transr""" +932 73 loss """marginranking""" +932 73 regularizer """no""" +932 73 optimizer """adam""" +932 73 training_loop """owa""" +932 73 negative_sampler """basic""" +932 73 evaluator """rankbased""" +932 74 dataset """kinships""" +932 74 model """transr""" +932 74 loss """marginranking""" +932 74 regularizer """no""" +932 74 optimizer """adam""" +932 74 training_loop """owa""" +932 74 negative_sampler """basic""" +932 74 evaluator """rankbased""" +932 75 dataset """kinships""" +932 75 model """transr""" +932 75 loss """marginranking""" +932 75 regularizer """no""" +932 75 optimizer """adam""" +932 75 training_loop """owa""" +932 75 negative_sampler """basic""" +932 75 evaluator """rankbased""" +932 76 dataset """kinships""" +932 76 model """transr""" +932 76 loss """marginranking""" +932 76 regularizer """no""" +932 76 optimizer """adam""" +932 76 training_loop """owa""" +932 76 negative_sampler """basic""" +932 76 evaluator """rankbased""" +932 77 dataset """kinships""" +932 77 model """transr""" +932 77 loss """marginranking""" +932 77 regularizer """no""" +932 77 optimizer """adam""" +932 77 training_loop """owa""" +932 77 negative_sampler """basic""" +932 77 evaluator """rankbased""" +932 78 dataset """kinships""" +932 78 model """transr""" +932 78 loss """marginranking""" +932 78 regularizer """no""" +932 78 optimizer """adam""" +932 78 training_loop """owa""" +932 78 negative_sampler """basic""" +932 78 evaluator """rankbased""" +932 79 dataset """kinships""" +932 79 model """transr""" +932 79 loss """marginranking""" +932 79 regularizer """no""" +932 79 optimizer """adam""" +932 79 training_loop """owa""" +932 79 negative_sampler """basic""" +932 79 evaluator """rankbased""" +932 80 dataset """kinships""" +932 80 model """transr""" +932 80 loss """marginranking""" +932 80 regularizer """no""" +932 80 optimizer """adam""" +932 80 training_loop """owa""" +932 80 negative_sampler """basic""" +932 80 evaluator """rankbased""" +932 81 dataset """kinships""" +932 81 model """transr""" +932 81 loss """marginranking""" +932 81 regularizer """no""" +932 81 optimizer """adam""" +932 81 training_loop """owa""" +932 81 negative_sampler """basic""" +932 81 evaluator """rankbased""" +932 82 dataset """kinships""" +932 82 model """transr""" +932 82 loss """marginranking""" +932 82 regularizer """no""" +932 82 optimizer """adam""" +932 82 training_loop """owa""" +932 82 negative_sampler """basic""" +932 82 evaluator """rankbased""" +932 83 dataset """kinships""" +932 83 model """transr""" +932 83 loss """marginranking""" +932 83 regularizer """no""" +932 83 optimizer """adam""" +932 83 training_loop """owa""" +932 83 negative_sampler """basic""" +932 83 evaluator """rankbased""" +932 84 dataset """kinships""" +932 84 model """transr""" +932 84 loss """marginranking""" +932 84 regularizer """no""" +932 84 optimizer """adam""" +932 84 training_loop """owa""" +932 84 negative_sampler """basic""" +932 84 evaluator """rankbased""" +932 85 dataset """kinships""" +932 85 model """transr""" +932 85 loss """marginranking""" +932 85 regularizer """no""" +932 85 optimizer """adam""" +932 85 training_loop """owa""" +932 85 negative_sampler """basic""" +932 85 evaluator """rankbased""" +932 86 dataset """kinships""" +932 86 model """transr""" +932 86 loss """marginranking""" +932 86 regularizer """no""" +932 86 optimizer """adam""" +932 86 training_loop """owa""" +932 86 negative_sampler """basic""" +932 86 evaluator """rankbased""" +932 87 dataset """kinships""" +932 87 model """transr""" +932 87 loss """marginranking""" +932 87 regularizer """no""" +932 87 optimizer """adam""" +932 87 training_loop """owa""" +932 87 negative_sampler """basic""" +932 87 evaluator """rankbased""" +932 88 dataset """kinships""" +932 88 model """transr""" +932 88 loss """marginranking""" +932 88 regularizer """no""" +932 88 optimizer """adam""" +932 88 training_loop """owa""" +932 88 negative_sampler """basic""" +932 88 evaluator """rankbased""" +932 89 dataset """kinships""" +932 89 model """transr""" +932 89 loss """marginranking""" +932 89 regularizer """no""" +932 89 optimizer """adam""" +932 89 training_loop """owa""" +932 89 negative_sampler """basic""" +932 89 evaluator """rankbased""" +932 90 dataset """kinships""" +932 90 model """transr""" +932 90 loss """marginranking""" +932 90 regularizer """no""" +932 90 optimizer """adam""" +932 90 training_loop """owa""" +932 90 negative_sampler """basic""" +932 90 evaluator """rankbased""" +932 91 dataset """kinships""" +932 91 model """transr""" +932 91 loss """marginranking""" +932 91 regularizer """no""" +932 91 optimizer """adam""" +932 91 training_loop """owa""" +932 91 negative_sampler """basic""" +932 91 evaluator """rankbased""" +932 92 dataset """kinships""" +932 92 model """transr""" +932 92 loss """marginranking""" +932 92 regularizer """no""" +932 92 optimizer """adam""" +932 92 training_loop """owa""" +932 92 negative_sampler """basic""" +932 92 evaluator """rankbased""" +932 93 dataset """kinships""" +932 93 model """transr""" +932 93 loss """marginranking""" +932 93 regularizer """no""" +932 93 optimizer """adam""" +932 93 training_loop """owa""" +932 93 negative_sampler """basic""" +932 93 evaluator """rankbased""" +932 94 dataset """kinships""" +932 94 model """transr""" +932 94 loss """marginranking""" +932 94 regularizer """no""" +932 94 optimizer """adam""" +932 94 training_loop """owa""" +932 94 negative_sampler """basic""" +932 94 evaluator """rankbased""" +932 95 dataset """kinships""" +932 95 model """transr""" +932 95 loss """marginranking""" +932 95 regularizer """no""" +932 95 optimizer """adam""" +932 95 training_loop """owa""" +932 95 negative_sampler """basic""" +932 95 evaluator """rankbased""" +932 96 dataset """kinships""" +932 96 model """transr""" +932 96 loss """marginranking""" +932 96 regularizer """no""" +932 96 optimizer """adam""" +932 96 training_loop """owa""" +932 96 negative_sampler """basic""" +932 96 evaluator """rankbased""" +932 97 dataset """kinships""" +932 97 model """transr""" +932 97 loss """marginranking""" +932 97 regularizer """no""" +932 97 optimizer """adam""" +932 97 training_loop """owa""" +932 97 negative_sampler """basic""" +932 97 evaluator """rankbased""" +932 98 dataset """kinships""" +932 98 model """transr""" +932 98 loss """marginranking""" +932 98 regularizer """no""" +932 98 optimizer """adam""" +932 98 training_loop """owa""" +932 98 negative_sampler """basic""" +932 98 evaluator """rankbased""" +932 99 dataset """kinships""" +932 99 model """transr""" +932 99 loss """marginranking""" +932 99 regularizer """no""" +932 99 optimizer """adam""" +932 99 training_loop """owa""" +932 99 negative_sampler """basic""" +932 99 evaluator """rankbased""" +932 100 dataset """kinships""" +932 100 model """transr""" +932 100 loss """marginranking""" +932 100 regularizer """no""" +932 100 optimizer """adam""" +932 100 training_loop """owa""" +932 100 negative_sampler """basic""" +932 100 evaluator """rankbased""" +933 1 model.embedding_dim 2.0 +933 1 model.relation_dim 2.0 +933 1 model.scoring_fct_norm 1.0 +933 1 loss.margin 26.02784047532831 +933 1 loss.adversarial_temperature 0.9933356263238193 +933 1 optimizer.lr 0.03233367582747218 +933 1 negative_sampler.num_negs_per_pos 65.0 +933 1 training.batch_size 0.0 +933 2 model.embedding_dim 0.0 +933 2 model.relation_dim 1.0 +933 2 model.scoring_fct_norm 2.0 +933 2 loss.margin 9.39648106267338 +933 2 loss.adversarial_temperature 0.18250993492915926 +933 2 optimizer.lr 0.0093565625186066 +933 2 negative_sampler.num_negs_per_pos 62.0 +933 2 training.batch_size 1.0 +933 3 model.embedding_dim 1.0 +933 3 model.relation_dim 0.0 +933 3 model.scoring_fct_norm 1.0 +933 3 loss.margin 19.42130235367797 +933 3 loss.adversarial_temperature 0.6143769204231159 +933 3 optimizer.lr 0.0021424999094787713 +933 3 negative_sampler.num_negs_per_pos 24.0 +933 3 training.batch_size 0.0 +933 4 model.embedding_dim 1.0 +933 4 model.relation_dim 0.0 +933 4 model.scoring_fct_norm 2.0 +933 4 loss.margin 8.279693992767939 +933 4 loss.adversarial_temperature 0.8148880958920618 +933 4 optimizer.lr 0.0330673897492881 +933 4 negative_sampler.num_negs_per_pos 32.0 +933 4 training.batch_size 2.0 +933 5 model.embedding_dim 2.0 +933 5 model.relation_dim 1.0 +933 5 model.scoring_fct_norm 2.0 +933 5 loss.margin 20.005986272625012 +933 5 loss.adversarial_temperature 0.6036696396045098 +933 5 optimizer.lr 0.012007861193209058 +933 5 negative_sampler.num_negs_per_pos 77.0 +933 5 training.batch_size 2.0 +933 6 model.embedding_dim 0.0 +933 6 model.relation_dim 2.0 +933 6 model.scoring_fct_norm 1.0 +933 6 loss.margin 6.626650448461676 +933 6 loss.adversarial_temperature 0.3364519934804534 +933 6 optimizer.lr 0.01534693220322355 +933 6 negative_sampler.num_negs_per_pos 64.0 +933 6 training.batch_size 0.0 +933 7 model.embedding_dim 2.0 +933 7 model.relation_dim 1.0 +933 7 model.scoring_fct_norm 1.0 +933 7 loss.margin 15.144101786038435 +933 7 loss.adversarial_temperature 0.273683913968643 +933 7 optimizer.lr 0.0030531075361953717 +933 7 negative_sampler.num_negs_per_pos 42.0 +933 7 training.batch_size 1.0 +933 8 model.embedding_dim 1.0 +933 8 model.relation_dim 0.0 +933 8 model.scoring_fct_norm 2.0 +933 8 loss.margin 15.089472733684158 +933 8 loss.adversarial_temperature 0.5197760277650822 +933 8 optimizer.lr 0.019239793503857832 +933 8 negative_sampler.num_negs_per_pos 29.0 +933 8 training.batch_size 2.0 +933 9 model.embedding_dim 2.0 +933 9 model.relation_dim 0.0 +933 9 model.scoring_fct_norm 1.0 +933 9 loss.margin 5.090803494428757 +933 9 loss.adversarial_temperature 0.7371979585902458 +933 9 optimizer.lr 0.0048381089518127725 +933 9 negative_sampler.num_negs_per_pos 70.0 +933 9 training.batch_size 1.0 +933 10 model.embedding_dim 2.0 +933 10 model.relation_dim 2.0 +933 10 model.scoring_fct_norm 2.0 +933 10 loss.margin 8.663191006246231 +933 10 loss.adversarial_temperature 0.9822196883538474 +933 10 optimizer.lr 0.036429644015725515 +933 10 negative_sampler.num_negs_per_pos 27.0 +933 10 training.batch_size 1.0 +933 11 model.embedding_dim 2.0 +933 11 model.relation_dim 2.0 +933 11 model.scoring_fct_norm 1.0 +933 11 loss.margin 28.830671740327723 +933 11 loss.adversarial_temperature 0.9901009274668301 +933 11 optimizer.lr 0.03772595038992162 +933 11 negative_sampler.num_negs_per_pos 55.0 +933 11 training.batch_size 1.0 +933 12 model.embedding_dim 2.0 +933 12 model.relation_dim 2.0 +933 12 model.scoring_fct_norm 2.0 +933 12 loss.margin 4.42975006400234 +933 12 loss.adversarial_temperature 0.6492984105718186 +933 12 optimizer.lr 0.0011964530471405485 +933 12 negative_sampler.num_negs_per_pos 96.0 +933 12 training.batch_size 0.0 +933 13 model.embedding_dim 0.0 +933 13 model.relation_dim 0.0 +933 13 model.scoring_fct_norm 2.0 +933 13 loss.margin 22.63864134895068 +933 13 loss.adversarial_temperature 0.21749002753241017 +933 13 optimizer.lr 0.0011594297571168484 +933 13 negative_sampler.num_negs_per_pos 4.0 +933 13 training.batch_size 1.0 +933 14 model.embedding_dim 0.0 +933 14 model.relation_dim 2.0 +933 14 model.scoring_fct_norm 2.0 +933 14 loss.margin 19.30806255148651 +933 14 loss.adversarial_temperature 0.21307913352912855 +933 14 optimizer.lr 0.0072932129564514 +933 14 negative_sampler.num_negs_per_pos 79.0 +933 14 training.batch_size 1.0 +933 15 model.embedding_dim 1.0 +933 15 model.relation_dim 1.0 +933 15 model.scoring_fct_norm 1.0 +933 15 loss.margin 18.82479507737208 +933 15 loss.adversarial_temperature 0.9111862018294525 +933 15 optimizer.lr 0.005699841322653469 +933 15 negative_sampler.num_negs_per_pos 48.0 +933 15 training.batch_size 2.0 +933 16 model.embedding_dim 0.0 +933 16 model.relation_dim 2.0 +933 16 model.scoring_fct_norm 1.0 +933 16 loss.margin 7.603353236108682 +933 16 loss.adversarial_temperature 0.7960122156859651 +933 16 optimizer.lr 0.003860198587606246 +933 16 negative_sampler.num_negs_per_pos 20.0 +933 16 training.batch_size 2.0 +933 17 model.embedding_dim 1.0 +933 17 model.relation_dim 0.0 +933 17 model.scoring_fct_norm 2.0 +933 17 loss.margin 16.061588963031987 +933 17 loss.adversarial_temperature 0.8720723317502116 +933 17 optimizer.lr 0.003859620164615136 +933 17 negative_sampler.num_negs_per_pos 0.0 +933 17 training.batch_size 0.0 +933 18 model.embedding_dim 2.0 +933 18 model.relation_dim 2.0 +933 18 model.scoring_fct_norm 1.0 +933 18 loss.margin 8.744316613707415 +933 18 loss.adversarial_temperature 0.6733118351347931 +933 18 optimizer.lr 0.025824468099940205 +933 18 negative_sampler.num_negs_per_pos 44.0 +933 18 training.batch_size 1.0 +933 19 model.embedding_dim 2.0 +933 19 model.relation_dim 0.0 +933 19 model.scoring_fct_norm 2.0 +933 19 loss.margin 13.728397041477807 +933 19 loss.adversarial_temperature 0.8612293361397099 +933 19 optimizer.lr 0.024768199525219414 +933 19 negative_sampler.num_negs_per_pos 44.0 +933 19 training.batch_size 1.0 +933 20 model.embedding_dim 0.0 +933 20 model.relation_dim 2.0 +933 20 model.scoring_fct_norm 1.0 +933 20 loss.margin 22.162825583695742 +933 20 loss.adversarial_temperature 0.7653096832684835 +933 20 optimizer.lr 0.025968295198609024 +933 20 negative_sampler.num_negs_per_pos 96.0 +933 20 training.batch_size 0.0 +933 21 model.embedding_dim 2.0 +933 21 model.relation_dim 2.0 +933 21 model.scoring_fct_norm 2.0 +933 21 loss.margin 17.028907583285715 +933 21 loss.adversarial_temperature 0.7424269041604924 +933 21 optimizer.lr 0.001483691675925771 +933 21 negative_sampler.num_negs_per_pos 62.0 +933 21 training.batch_size 1.0 +933 22 model.embedding_dim 1.0 +933 22 model.relation_dim 0.0 +933 22 model.scoring_fct_norm 2.0 +933 22 loss.margin 6.9845732710255195 +933 22 loss.adversarial_temperature 0.6937526804685779 +933 22 optimizer.lr 0.037667639518270964 +933 22 negative_sampler.num_negs_per_pos 69.0 +933 22 training.batch_size 2.0 +933 23 model.embedding_dim 2.0 +933 23 model.relation_dim 1.0 +933 23 model.scoring_fct_norm 1.0 +933 23 loss.margin 14.849910192982069 +933 23 loss.adversarial_temperature 0.5508091168956837 +933 23 optimizer.lr 0.0012931919308853833 +933 23 negative_sampler.num_negs_per_pos 98.0 +933 23 training.batch_size 0.0 +933 24 model.embedding_dim 0.0 +933 24 model.relation_dim 2.0 +933 24 model.scoring_fct_norm 2.0 +933 24 loss.margin 20.579357739956073 +933 24 loss.adversarial_temperature 0.30536305750870235 +933 24 optimizer.lr 0.004573037839386707 +933 24 negative_sampler.num_negs_per_pos 33.0 +933 24 training.batch_size 1.0 +933 25 model.embedding_dim 2.0 +933 25 model.relation_dim 0.0 +933 25 model.scoring_fct_norm 2.0 +933 25 loss.margin 17.975107642033656 +933 25 loss.adversarial_temperature 0.24993431168865243 +933 25 optimizer.lr 0.003805692016706069 +933 25 negative_sampler.num_negs_per_pos 37.0 +933 25 training.batch_size 0.0 +933 26 model.embedding_dim 0.0 +933 26 model.relation_dim 2.0 +933 26 model.scoring_fct_norm 1.0 +933 26 loss.margin 14.950572248687292 +933 26 loss.adversarial_temperature 0.8622958364991732 +933 26 optimizer.lr 0.0023586435870006075 +933 26 negative_sampler.num_negs_per_pos 8.0 +933 26 training.batch_size 1.0 +933 27 model.embedding_dim 1.0 +933 27 model.relation_dim 0.0 +933 27 model.scoring_fct_norm 2.0 +933 27 loss.margin 23.328832893353063 +933 27 loss.adversarial_temperature 0.6842342820531562 +933 27 optimizer.lr 0.01608357288860708 +933 27 negative_sampler.num_negs_per_pos 25.0 +933 27 training.batch_size 0.0 +933 28 model.embedding_dim 0.0 +933 28 model.relation_dim 0.0 +933 28 model.scoring_fct_norm 1.0 +933 28 loss.margin 9.636481598956543 +933 28 loss.adversarial_temperature 0.4235749892737821 +933 28 optimizer.lr 0.0038716225752018314 +933 28 negative_sampler.num_negs_per_pos 45.0 +933 28 training.batch_size 2.0 +933 29 model.embedding_dim 0.0 +933 29 model.relation_dim 1.0 +933 29 model.scoring_fct_norm 2.0 +933 29 loss.margin 21.18836248494715 +933 29 loss.adversarial_temperature 0.759307802201602 +933 29 optimizer.lr 0.002776237351073053 +933 29 negative_sampler.num_negs_per_pos 74.0 +933 29 training.batch_size 2.0 +933 30 model.embedding_dim 2.0 +933 30 model.relation_dim 0.0 +933 30 model.scoring_fct_norm 1.0 +933 30 loss.margin 10.16542077385291 +933 30 loss.adversarial_temperature 0.44951528063981483 +933 30 optimizer.lr 0.008396180430321667 +933 30 negative_sampler.num_negs_per_pos 3.0 +933 30 training.batch_size 2.0 +933 31 model.embedding_dim 1.0 +933 31 model.relation_dim 1.0 +933 31 model.scoring_fct_norm 2.0 +933 31 loss.margin 2.586474269733672 +933 31 loss.adversarial_temperature 0.61795314319386 +933 31 optimizer.lr 0.026660441984315573 +933 31 negative_sampler.num_negs_per_pos 72.0 +933 31 training.batch_size 0.0 +933 32 model.embedding_dim 2.0 +933 32 model.relation_dim 1.0 +933 32 model.scoring_fct_norm 2.0 +933 32 loss.margin 5.465566398998714 +933 32 loss.adversarial_temperature 0.4748121853041629 +933 32 optimizer.lr 0.04308698991016045 +933 32 negative_sampler.num_negs_per_pos 58.0 +933 32 training.batch_size 1.0 +933 33 model.embedding_dim 1.0 +933 33 model.relation_dim 2.0 +933 33 model.scoring_fct_norm 1.0 +933 33 loss.margin 1.2486868266719613 +933 33 loss.adversarial_temperature 0.4178343471694477 +933 33 optimizer.lr 0.05874447202202227 +933 33 negative_sampler.num_negs_per_pos 61.0 +933 33 training.batch_size 0.0 +933 34 model.embedding_dim 2.0 +933 34 model.relation_dim 0.0 +933 34 model.scoring_fct_norm 1.0 +933 34 loss.margin 9.084445608064613 +933 34 loss.adversarial_temperature 0.20607792274626807 +933 34 optimizer.lr 0.007957023909411566 +933 34 negative_sampler.num_negs_per_pos 28.0 +933 34 training.batch_size 1.0 +933 35 model.embedding_dim 1.0 +933 35 model.relation_dim 2.0 +933 35 model.scoring_fct_norm 1.0 +933 35 loss.margin 12.506051330748681 +933 35 loss.adversarial_temperature 0.643583205487017 +933 35 optimizer.lr 0.0012507363591176982 +933 35 negative_sampler.num_negs_per_pos 44.0 +933 35 training.batch_size 0.0 +933 36 model.embedding_dim 1.0 +933 36 model.relation_dim 1.0 +933 36 model.scoring_fct_norm 1.0 +933 36 loss.margin 24.328168814582174 +933 36 loss.adversarial_temperature 0.3983282671561629 +933 36 optimizer.lr 0.020125638402037613 +933 36 negative_sampler.num_negs_per_pos 73.0 +933 36 training.batch_size 2.0 +933 37 model.embedding_dim 2.0 +933 37 model.relation_dim 0.0 +933 37 model.scoring_fct_norm 1.0 +933 37 loss.margin 13.72738235565387 +933 37 loss.adversarial_temperature 0.12581910011054415 +933 37 optimizer.lr 0.07530758011675583 +933 37 negative_sampler.num_negs_per_pos 37.0 +933 37 training.batch_size 0.0 +933 38 model.embedding_dim 2.0 +933 38 model.relation_dim 1.0 +933 38 model.scoring_fct_norm 1.0 +933 38 loss.margin 15.047383093223138 +933 38 loss.adversarial_temperature 0.7293132882691469 +933 38 optimizer.lr 0.0014533619960869525 +933 38 negative_sampler.num_negs_per_pos 54.0 +933 38 training.batch_size 1.0 +933 39 model.embedding_dim 2.0 +933 39 model.relation_dim 0.0 +933 39 model.scoring_fct_norm 1.0 +933 39 loss.margin 23.597290131322463 +933 39 loss.adversarial_temperature 0.4363842577003585 +933 39 optimizer.lr 0.0021211283174823754 +933 39 negative_sampler.num_negs_per_pos 80.0 +933 39 training.batch_size 0.0 +933 40 model.embedding_dim 2.0 +933 40 model.relation_dim 1.0 +933 40 model.scoring_fct_norm 2.0 +933 40 loss.margin 1.2891914623282934 +933 40 loss.adversarial_temperature 0.922641507183421 +933 40 optimizer.lr 0.0017776611956824416 +933 40 negative_sampler.num_negs_per_pos 30.0 +933 40 training.batch_size 2.0 +933 41 model.embedding_dim 0.0 +933 41 model.relation_dim 2.0 +933 41 model.scoring_fct_norm 1.0 +933 41 loss.margin 10.680572730677845 +933 41 loss.adversarial_temperature 0.27444606614947625 +933 41 optimizer.lr 0.0038643938893527835 +933 41 negative_sampler.num_negs_per_pos 88.0 +933 41 training.batch_size 0.0 +933 42 model.embedding_dim 0.0 +933 42 model.relation_dim 2.0 +933 42 model.scoring_fct_norm 1.0 +933 42 loss.margin 17.792946953883433 +933 42 loss.adversarial_temperature 0.37578555867311136 +933 42 optimizer.lr 0.0600653634275054 +933 42 negative_sampler.num_negs_per_pos 16.0 +933 42 training.batch_size 2.0 +933 43 model.embedding_dim 1.0 +933 43 model.relation_dim 1.0 +933 43 model.scoring_fct_norm 2.0 +933 43 loss.margin 2.4959333615100947 +933 43 loss.adversarial_temperature 0.827976987169325 +933 43 optimizer.lr 0.030554236409272056 +933 43 negative_sampler.num_negs_per_pos 15.0 +933 43 training.batch_size 0.0 +933 44 model.embedding_dim 2.0 +933 44 model.relation_dim 1.0 +933 44 model.scoring_fct_norm 2.0 +933 44 loss.margin 1.7610472609872014 +933 44 loss.adversarial_temperature 0.5034057740117187 +933 44 optimizer.lr 0.03580592866289729 +933 44 negative_sampler.num_negs_per_pos 86.0 +933 44 training.batch_size 1.0 +933 45 model.embedding_dim 1.0 +933 45 model.relation_dim 0.0 +933 45 model.scoring_fct_norm 1.0 +933 45 loss.margin 22.60914808722478 +933 45 loss.adversarial_temperature 0.5570568300339893 +933 45 optimizer.lr 0.008691636345350282 +933 45 negative_sampler.num_negs_per_pos 72.0 +933 45 training.batch_size 0.0 +933 46 model.embedding_dim 1.0 +933 46 model.relation_dim 2.0 +933 46 model.scoring_fct_norm 1.0 +933 46 loss.margin 25.323861770809515 +933 46 loss.adversarial_temperature 0.3648542715250884 +933 46 optimizer.lr 0.0011720626081294194 +933 46 negative_sampler.num_negs_per_pos 59.0 +933 46 training.batch_size 2.0 +933 47 model.embedding_dim 2.0 +933 47 model.relation_dim 2.0 +933 47 model.scoring_fct_norm 2.0 +933 47 loss.margin 10.653897830101814 +933 47 loss.adversarial_temperature 0.17587625705514653 +933 47 optimizer.lr 0.013821400329461803 +933 47 negative_sampler.num_negs_per_pos 68.0 +933 47 training.batch_size 1.0 +933 48 model.embedding_dim 0.0 +933 48 model.relation_dim 1.0 +933 48 model.scoring_fct_norm 2.0 +933 48 loss.margin 10.79179939935658 +933 48 loss.adversarial_temperature 0.31640927391000356 +933 48 optimizer.lr 0.0010732453595763147 +933 48 negative_sampler.num_negs_per_pos 66.0 +933 48 training.batch_size 1.0 +933 49 model.embedding_dim 2.0 +933 49 model.relation_dim 0.0 +933 49 model.scoring_fct_norm 2.0 +933 49 loss.margin 16.9638712002992 +933 49 loss.adversarial_temperature 0.4236061669974446 +933 49 optimizer.lr 0.08077048576114522 +933 49 negative_sampler.num_negs_per_pos 34.0 +933 49 training.batch_size 2.0 +933 50 model.embedding_dim 0.0 +933 50 model.relation_dim 0.0 +933 50 model.scoring_fct_norm 1.0 +933 50 loss.margin 16.131579695300537 +933 50 loss.adversarial_temperature 0.926303279158269 +933 50 optimizer.lr 0.013254314637233382 +933 50 negative_sampler.num_negs_per_pos 64.0 +933 50 training.batch_size 2.0 +933 51 model.embedding_dim 1.0 +933 51 model.relation_dim 0.0 +933 51 model.scoring_fct_norm 2.0 +933 51 loss.margin 12.517893038297702 +933 51 loss.adversarial_temperature 0.7071407847016553 +933 51 optimizer.lr 0.044435429741832906 +933 51 negative_sampler.num_negs_per_pos 62.0 +933 51 training.batch_size 0.0 +933 52 model.embedding_dim 0.0 +933 52 model.relation_dim 1.0 +933 52 model.scoring_fct_norm 1.0 +933 52 loss.margin 3.2382468772849533 +933 52 loss.adversarial_temperature 0.47392377388647844 +933 52 optimizer.lr 0.005187097235318763 +933 52 negative_sampler.num_negs_per_pos 13.0 +933 52 training.batch_size 1.0 +933 53 model.embedding_dim 2.0 +933 53 model.relation_dim 1.0 +933 53 model.scoring_fct_norm 1.0 +933 53 loss.margin 13.461473553414768 +933 53 loss.adversarial_temperature 0.35734877639853746 +933 53 optimizer.lr 0.0014223579217455939 +933 53 negative_sampler.num_negs_per_pos 11.0 +933 53 training.batch_size 1.0 +933 54 model.embedding_dim 1.0 +933 54 model.relation_dim 0.0 +933 54 model.scoring_fct_norm 2.0 +933 54 loss.margin 1.353280625233618 +933 54 loss.adversarial_temperature 0.2641222652001325 +933 54 optimizer.lr 0.008865078385366942 +933 54 negative_sampler.num_negs_per_pos 20.0 +933 54 training.batch_size 0.0 +933 55 model.embedding_dim 2.0 +933 55 model.relation_dim 0.0 +933 55 model.scoring_fct_norm 2.0 +933 55 loss.margin 9.844743813335954 +933 55 loss.adversarial_temperature 0.6416402898124708 +933 55 optimizer.lr 0.0025326922890786595 +933 55 negative_sampler.num_negs_per_pos 81.0 +933 55 training.batch_size 1.0 +933 56 model.embedding_dim 1.0 +933 56 model.relation_dim 2.0 +933 56 model.scoring_fct_norm 2.0 +933 56 loss.margin 4.321432252541493 +933 56 loss.adversarial_temperature 0.7877402913125279 +933 56 optimizer.lr 0.007571696484624903 +933 56 negative_sampler.num_negs_per_pos 54.0 +933 56 training.batch_size 0.0 +933 57 model.embedding_dim 1.0 +933 57 model.relation_dim 2.0 +933 57 model.scoring_fct_norm 2.0 +933 57 loss.margin 6.961889264415835 +933 57 loss.adversarial_temperature 0.37609101033279446 +933 57 optimizer.lr 0.0018390333618547973 +933 57 negative_sampler.num_negs_per_pos 95.0 +933 57 training.batch_size 1.0 +933 58 model.embedding_dim 2.0 +933 58 model.relation_dim 1.0 +933 58 model.scoring_fct_norm 2.0 +933 58 loss.margin 26.555540788570436 +933 58 loss.adversarial_temperature 0.6259469431520224 +933 58 optimizer.lr 0.031045144398813635 +933 58 negative_sampler.num_negs_per_pos 73.0 +933 58 training.batch_size 1.0 +933 59 model.embedding_dim 1.0 +933 59 model.relation_dim 0.0 +933 59 model.scoring_fct_norm 2.0 +933 59 loss.margin 7.180212483189061 +933 59 loss.adversarial_temperature 0.4017267502043932 +933 59 optimizer.lr 0.005028511927578918 +933 59 negative_sampler.num_negs_per_pos 18.0 +933 59 training.batch_size 1.0 +933 60 model.embedding_dim 1.0 +933 60 model.relation_dim 0.0 +933 60 model.scoring_fct_norm 1.0 +933 60 loss.margin 14.281580226331581 +933 60 loss.adversarial_temperature 0.2693266467165816 +933 60 optimizer.lr 0.09038808390045779 +933 60 negative_sampler.num_negs_per_pos 64.0 +933 60 training.batch_size 2.0 +933 61 model.embedding_dim 1.0 +933 61 model.relation_dim 0.0 +933 61 model.scoring_fct_norm 1.0 +933 61 loss.margin 13.906038770482372 +933 61 loss.adversarial_temperature 0.2956736807391277 +933 61 optimizer.lr 0.017242611797988174 +933 61 negative_sampler.num_negs_per_pos 89.0 +933 61 training.batch_size 2.0 +933 62 model.embedding_dim 0.0 +933 62 model.relation_dim 0.0 +933 62 model.scoring_fct_norm 2.0 +933 62 loss.margin 23.270476032851818 +933 62 loss.adversarial_temperature 0.1240215422432463 +933 62 optimizer.lr 0.001867217877391102 +933 62 negative_sampler.num_negs_per_pos 26.0 +933 62 training.batch_size 2.0 +933 63 model.embedding_dim 2.0 +933 63 model.relation_dim 2.0 +933 63 model.scoring_fct_norm 2.0 +933 63 loss.margin 23.968849030151134 +933 63 loss.adversarial_temperature 0.43654617125744377 +933 63 optimizer.lr 0.0019392162779402746 +933 63 negative_sampler.num_negs_per_pos 38.0 +933 63 training.batch_size 0.0 +933 64 model.embedding_dim 2.0 +933 64 model.relation_dim 2.0 +933 64 model.scoring_fct_norm 2.0 +933 64 loss.margin 23.797774313027396 +933 64 loss.adversarial_temperature 0.5388435461072848 +933 64 optimizer.lr 0.05090715051806682 +933 64 negative_sampler.num_negs_per_pos 66.0 +933 64 training.batch_size 2.0 +933 65 model.embedding_dim 2.0 +933 65 model.relation_dim 1.0 +933 65 model.scoring_fct_norm 1.0 +933 65 loss.margin 10.402787400194304 +933 65 loss.adversarial_temperature 0.34662123588168636 +933 65 optimizer.lr 0.005550686918801098 +933 65 negative_sampler.num_negs_per_pos 32.0 +933 65 training.batch_size 1.0 +933 66 model.embedding_dim 0.0 +933 66 model.relation_dim 2.0 +933 66 model.scoring_fct_norm 2.0 +933 66 loss.margin 12.742835676403518 +933 66 loss.adversarial_temperature 0.43031382397993967 +933 66 optimizer.lr 0.002494442632467429 +933 66 negative_sampler.num_negs_per_pos 35.0 +933 66 training.batch_size 2.0 +933 67 model.embedding_dim 2.0 +933 67 model.relation_dim 0.0 +933 67 model.scoring_fct_norm 2.0 +933 67 loss.margin 19.059314566343353 +933 67 loss.adversarial_temperature 0.5287740218012073 +933 67 optimizer.lr 0.0019567463503602027 +933 67 negative_sampler.num_negs_per_pos 40.0 +933 67 training.batch_size 2.0 +933 68 model.embedding_dim 0.0 +933 68 model.relation_dim 2.0 +933 68 model.scoring_fct_norm 2.0 +933 68 loss.margin 15.866797599192846 +933 68 loss.adversarial_temperature 0.8714214148847915 +933 68 optimizer.lr 0.006543623448147647 +933 68 negative_sampler.num_negs_per_pos 34.0 +933 68 training.batch_size 1.0 +933 69 model.embedding_dim 0.0 +933 69 model.relation_dim 0.0 +933 69 model.scoring_fct_norm 2.0 +933 69 loss.margin 29.693002801315295 +933 69 loss.adversarial_temperature 0.40173471755507784 +933 69 optimizer.lr 0.006432983347492186 +933 69 negative_sampler.num_negs_per_pos 73.0 +933 69 training.batch_size 0.0 +933 70 model.embedding_dim 2.0 +933 70 model.relation_dim 2.0 +933 70 model.scoring_fct_norm 2.0 +933 70 loss.margin 19.189334308170093 +933 70 loss.adversarial_temperature 0.6448536345078278 +933 70 optimizer.lr 0.006204151084192756 +933 70 negative_sampler.num_negs_per_pos 22.0 +933 70 training.batch_size 1.0 +933 71 model.embedding_dim 1.0 +933 71 model.relation_dim 0.0 +933 71 model.scoring_fct_norm 1.0 +933 71 loss.margin 28.734407135570482 +933 71 loss.adversarial_temperature 0.700369169410599 +933 71 optimizer.lr 0.0018904011355578743 +933 71 negative_sampler.num_negs_per_pos 45.0 +933 71 training.batch_size 1.0 +933 72 model.embedding_dim 1.0 +933 72 model.relation_dim 0.0 +933 72 model.scoring_fct_norm 1.0 +933 72 loss.margin 15.882512754056865 +933 72 loss.adversarial_temperature 0.7684374610820514 +933 72 optimizer.lr 0.057132031049057126 +933 72 negative_sampler.num_negs_per_pos 94.0 +933 72 training.batch_size 0.0 +933 73 model.embedding_dim 0.0 +933 73 model.relation_dim 2.0 +933 73 model.scoring_fct_norm 1.0 +933 73 loss.margin 25.121703710610078 +933 73 loss.adversarial_temperature 0.5705019213808737 +933 73 optimizer.lr 0.03046583322170096 +933 73 negative_sampler.num_negs_per_pos 47.0 +933 73 training.batch_size 1.0 +933 74 model.embedding_dim 2.0 +933 74 model.relation_dim 0.0 +933 74 model.scoring_fct_norm 2.0 +933 74 loss.margin 20.20088503105518 +933 74 loss.adversarial_temperature 0.42960129378559053 +933 74 optimizer.lr 0.008053931721572427 +933 74 negative_sampler.num_negs_per_pos 41.0 +933 74 training.batch_size 0.0 +933 75 model.embedding_dim 1.0 +933 75 model.relation_dim 1.0 +933 75 model.scoring_fct_norm 1.0 +933 75 loss.margin 2.77511385498018 +933 75 loss.adversarial_temperature 0.1791252556978162 +933 75 optimizer.lr 0.0010744738162433434 +933 75 negative_sampler.num_negs_per_pos 95.0 +933 75 training.batch_size 2.0 +933 76 model.embedding_dim 0.0 +933 76 model.relation_dim 2.0 +933 76 model.scoring_fct_norm 2.0 +933 76 loss.margin 2.954803667926666 +933 76 loss.adversarial_temperature 0.5968957782147789 +933 76 optimizer.lr 0.09609421644752804 +933 76 negative_sampler.num_negs_per_pos 9.0 +933 76 training.batch_size 1.0 +933 1 dataset """kinships""" +933 1 model """transr""" +933 1 loss """nssa""" +933 1 regularizer """no""" +933 1 optimizer """adam""" +933 1 training_loop """owa""" +933 1 negative_sampler """basic""" +933 1 evaluator """rankbased""" +933 2 dataset """kinships""" +933 2 model """transr""" +933 2 loss """nssa""" +933 2 regularizer """no""" +933 2 optimizer """adam""" +933 2 training_loop """owa""" +933 2 negative_sampler """basic""" +933 2 evaluator """rankbased""" +933 3 dataset """kinships""" +933 3 model """transr""" +933 3 loss """nssa""" +933 3 regularizer """no""" +933 3 optimizer """adam""" +933 3 training_loop """owa""" +933 3 negative_sampler """basic""" +933 3 evaluator """rankbased""" +933 4 dataset """kinships""" +933 4 model """transr""" +933 4 loss """nssa""" +933 4 regularizer """no""" +933 4 optimizer """adam""" +933 4 training_loop """owa""" +933 4 negative_sampler """basic""" +933 4 evaluator """rankbased""" +933 5 dataset """kinships""" +933 5 model """transr""" +933 5 loss """nssa""" +933 5 regularizer """no""" +933 5 optimizer """adam""" +933 5 training_loop """owa""" +933 5 negative_sampler """basic""" +933 5 evaluator """rankbased""" +933 6 dataset """kinships""" +933 6 model """transr""" +933 6 loss """nssa""" +933 6 regularizer """no""" +933 6 optimizer """adam""" +933 6 training_loop """owa""" +933 6 negative_sampler """basic""" +933 6 evaluator """rankbased""" +933 7 dataset """kinships""" +933 7 model """transr""" +933 7 loss """nssa""" +933 7 regularizer """no""" +933 7 optimizer """adam""" +933 7 training_loop """owa""" +933 7 negative_sampler """basic""" +933 7 evaluator """rankbased""" +933 8 dataset """kinships""" +933 8 model """transr""" +933 8 loss """nssa""" +933 8 regularizer """no""" +933 8 optimizer """adam""" +933 8 training_loop """owa""" +933 8 negative_sampler """basic""" +933 8 evaluator """rankbased""" +933 9 dataset """kinships""" +933 9 model """transr""" +933 9 loss """nssa""" +933 9 regularizer """no""" +933 9 optimizer """adam""" +933 9 training_loop """owa""" +933 9 negative_sampler """basic""" +933 9 evaluator """rankbased""" +933 10 dataset """kinships""" +933 10 model """transr""" +933 10 loss """nssa""" +933 10 regularizer """no""" +933 10 optimizer """adam""" +933 10 training_loop """owa""" +933 10 negative_sampler """basic""" +933 10 evaluator """rankbased""" +933 11 dataset """kinships""" +933 11 model """transr""" +933 11 loss """nssa""" +933 11 regularizer """no""" +933 11 optimizer """adam""" +933 11 training_loop """owa""" +933 11 negative_sampler """basic""" +933 11 evaluator """rankbased""" +933 12 dataset """kinships""" +933 12 model """transr""" +933 12 loss """nssa""" +933 12 regularizer """no""" +933 12 optimizer """adam""" +933 12 training_loop """owa""" +933 12 negative_sampler """basic""" +933 12 evaluator """rankbased""" +933 13 dataset """kinships""" +933 13 model """transr""" +933 13 loss """nssa""" +933 13 regularizer """no""" +933 13 optimizer """adam""" +933 13 training_loop """owa""" +933 13 negative_sampler """basic""" +933 13 evaluator """rankbased""" +933 14 dataset """kinships""" +933 14 model """transr""" +933 14 loss """nssa""" +933 14 regularizer """no""" +933 14 optimizer """adam""" +933 14 training_loop """owa""" +933 14 negative_sampler """basic""" +933 14 evaluator """rankbased""" +933 15 dataset """kinships""" +933 15 model """transr""" +933 15 loss """nssa""" +933 15 regularizer """no""" +933 15 optimizer """adam""" +933 15 training_loop """owa""" +933 15 negative_sampler """basic""" +933 15 evaluator """rankbased""" +933 16 dataset """kinships""" +933 16 model """transr""" +933 16 loss """nssa""" +933 16 regularizer """no""" +933 16 optimizer """adam""" +933 16 training_loop """owa""" +933 16 negative_sampler """basic""" +933 16 evaluator """rankbased""" +933 17 dataset """kinships""" +933 17 model """transr""" +933 17 loss """nssa""" +933 17 regularizer """no""" +933 17 optimizer """adam""" +933 17 training_loop """owa""" +933 17 negative_sampler """basic""" +933 17 evaluator """rankbased""" +933 18 dataset """kinships""" +933 18 model """transr""" +933 18 loss """nssa""" +933 18 regularizer """no""" +933 18 optimizer """adam""" +933 18 training_loop """owa""" +933 18 negative_sampler """basic""" +933 18 evaluator """rankbased""" +933 19 dataset """kinships""" +933 19 model """transr""" +933 19 loss """nssa""" +933 19 regularizer """no""" +933 19 optimizer """adam""" +933 19 training_loop """owa""" +933 19 negative_sampler """basic""" +933 19 evaluator """rankbased""" +933 20 dataset """kinships""" +933 20 model """transr""" +933 20 loss """nssa""" +933 20 regularizer """no""" +933 20 optimizer """adam""" +933 20 training_loop """owa""" +933 20 negative_sampler """basic""" +933 20 evaluator """rankbased""" +933 21 dataset """kinships""" +933 21 model """transr""" +933 21 loss """nssa""" +933 21 regularizer """no""" +933 21 optimizer """adam""" +933 21 training_loop """owa""" +933 21 negative_sampler """basic""" +933 21 evaluator """rankbased""" +933 22 dataset """kinships""" +933 22 model """transr""" +933 22 loss """nssa""" +933 22 regularizer """no""" +933 22 optimizer """adam""" +933 22 training_loop """owa""" +933 22 negative_sampler """basic""" +933 22 evaluator """rankbased""" +933 23 dataset """kinships""" +933 23 model """transr""" +933 23 loss """nssa""" +933 23 regularizer """no""" +933 23 optimizer """adam""" +933 23 training_loop """owa""" +933 23 negative_sampler """basic""" +933 23 evaluator """rankbased""" +933 24 dataset """kinships""" +933 24 model """transr""" +933 24 loss """nssa""" +933 24 regularizer """no""" +933 24 optimizer """adam""" +933 24 training_loop """owa""" +933 24 negative_sampler """basic""" +933 24 evaluator """rankbased""" +933 25 dataset """kinships""" +933 25 model """transr""" +933 25 loss """nssa""" +933 25 regularizer """no""" +933 25 optimizer """adam""" +933 25 training_loop """owa""" +933 25 negative_sampler """basic""" +933 25 evaluator """rankbased""" +933 26 dataset """kinships""" +933 26 model """transr""" +933 26 loss """nssa""" +933 26 regularizer """no""" +933 26 optimizer """adam""" +933 26 training_loop """owa""" +933 26 negative_sampler """basic""" +933 26 evaluator """rankbased""" +933 27 dataset """kinships""" +933 27 model """transr""" +933 27 loss """nssa""" +933 27 regularizer """no""" +933 27 optimizer """adam""" +933 27 training_loop """owa""" +933 27 negative_sampler """basic""" +933 27 evaluator """rankbased""" +933 28 dataset """kinships""" +933 28 model """transr""" +933 28 loss """nssa""" +933 28 regularizer """no""" +933 28 optimizer """adam""" +933 28 training_loop """owa""" +933 28 negative_sampler """basic""" +933 28 evaluator """rankbased""" +933 29 dataset """kinships""" +933 29 model """transr""" +933 29 loss """nssa""" +933 29 regularizer """no""" +933 29 optimizer """adam""" +933 29 training_loop """owa""" +933 29 negative_sampler """basic""" +933 29 evaluator """rankbased""" +933 30 dataset """kinships""" +933 30 model """transr""" +933 30 loss """nssa""" +933 30 regularizer """no""" +933 30 optimizer """adam""" +933 30 training_loop """owa""" +933 30 negative_sampler """basic""" +933 30 evaluator """rankbased""" +933 31 dataset """kinships""" +933 31 model """transr""" +933 31 loss """nssa""" +933 31 regularizer """no""" +933 31 optimizer """adam""" +933 31 training_loop """owa""" +933 31 negative_sampler """basic""" +933 31 evaluator """rankbased""" +933 32 dataset """kinships""" +933 32 model """transr""" +933 32 loss """nssa""" +933 32 regularizer """no""" +933 32 optimizer """adam""" +933 32 training_loop """owa""" +933 32 negative_sampler """basic""" +933 32 evaluator """rankbased""" +933 33 dataset """kinships""" +933 33 model """transr""" +933 33 loss """nssa""" +933 33 regularizer """no""" +933 33 optimizer """adam""" +933 33 training_loop """owa""" +933 33 negative_sampler """basic""" +933 33 evaluator """rankbased""" +933 34 dataset """kinships""" +933 34 model """transr""" +933 34 loss """nssa""" +933 34 regularizer """no""" +933 34 optimizer """adam""" +933 34 training_loop """owa""" +933 34 negative_sampler """basic""" +933 34 evaluator """rankbased""" +933 35 dataset """kinships""" +933 35 model """transr""" +933 35 loss """nssa""" +933 35 regularizer """no""" +933 35 optimizer """adam""" +933 35 training_loop """owa""" +933 35 negative_sampler """basic""" +933 35 evaluator """rankbased""" +933 36 dataset """kinships""" +933 36 model """transr""" +933 36 loss """nssa""" +933 36 regularizer """no""" +933 36 optimizer """adam""" +933 36 training_loop """owa""" +933 36 negative_sampler """basic""" +933 36 evaluator """rankbased""" +933 37 dataset """kinships""" +933 37 model """transr""" +933 37 loss """nssa""" +933 37 regularizer """no""" +933 37 optimizer """adam""" +933 37 training_loop """owa""" +933 37 negative_sampler """basic""" +933 37 evaluator """rankbased""" +933 38 dataset """kinships""" +933 38 model """transr""" +933 38 loss """nssa""" +933 38 regularizer """no""" +933 38 optimizer """adam""" +933 38 training_loop """owa""" +933 38 negative_sampler """basic""" +933 38 evaluator """rankbased""" +933 39 dataset """kinships""" +933 39 model """transr""" +933 39 loss """nssa""" +933 39 regularizer """no""" +933 39 optimizer """adam""" +933 39 training_loop """owa""" +933 39 negative_sampler """basic""" +933 39 evaluator """rankbased""" +933 40 dataset """kinships""" +933 40 model """transr""" +933 40 loss """nssa""" +933 40 regularizer """no""" +933 40 optimizer """adam""" +933 40 training_loop """owa""" +933 40 negative_sampler """basic""" +933 40 evaluator """rankbased""" +933 41 dataset """kinships""" +933 41 model """transr""" +933 41 loss """nssa""" +933 41 regularizer """no""" +933 41 optimizer """adam""" +933 41 training_loop """owa""" +933 41 negative_sampler """basic""" +933 41 evaluator """rankbased""" +933 42 dataset """kinships""" +933 42 model """transr""" +933 42 loss """nssa""" +933 42 regularizer """no""" +933 42 optimizer """adam""" +933 42 training_loop """owa""" +933 42 negative_sampler """basic""" +933 42 evaluator """rankbased""" +933 43 dataset """kinships""" +933 43 model """transr""" +933 43 loss """nssa""" +933 43 regularizer """no""" +933 43 optimizer """adam""" +933 43 training_loop """owa""" +933 43 negative_sampler """basic""" +933 43 evaluator """rankbased""" +933 44 dataset """kinships""" +933 44 model """transr""" +933 44 loss """nssa""" +933 44 regularizer """no""" +933 44 optimizer """adam""" +933 44 training_loop """owa""" +933 44 negative_sampler """basic""" +933 44 evaluator """rankbased""" +933 45 dataset """kinships""" +933 45 model """transr""" +933 45 loss """nssa""" +933 45 regularizer """no""" +933 45 optimizer """adam""" +933 45 training_loop """owa""" +933 45 negative_sampler """basic""" +933 45 evaluator """rankbased""" +933 46 dataset """kinships""" +933 46 model """transr""" +933 46 loss """nssa""" +933 46 regularizer """no""" +933 46 optimizer """adam""" +933 46 training_loop """owa""" +933 46 negative_sampler """basic""" +933 46 evaluator """rankbased""" +933 47 dataset """kinships""" +933 47 model """transr""" +933 47 loss """nssa""" +933 47 regularizer """no""" +933 47 optimizer """adam""" +933 47 training_loop """owa""" +933 47 negative_sampler """basic""" +933 47 evaluator """rankbased""" +933 48 dataset """kinships""" +933 48 model """transr""" +933 48 loss """nssa""" +933 48 regularizer """no""" +933 48 optimizer """adam""" +933 48 training_loop """owa""" +933 48 negative_sampler """basic""" +933 48 evaluator """rankbased""" +933 49 dataset """kinships""" +933 49 model """transr""" +933 49 loss """nssa""" +933 49 regularizer """no""" +933 49 optimizer """adam""" +933 49 training_loop """owa""" +933 49 negative_sampler """basic""" +933 49 evaluator """rankbased""" +933 50 dataset """kinships""" +933 50 model """transr""" +933 50 loss """nssa""" +933 50 regularizer """no""" +933 50 optimizer """adam""" +933 50 training_loop """owa""" +933 50 negative_sampler """basic""" +933 50 evaluator """rankbased""" +933 51 dataset """kinships""" +933 51 model """transr""" +933 51 loss """nssa""" +933 51 regularizer """no""" +933 51 optimizer """adam""" +933 51 training_loop """owa""" +933 51 negative_sampler """basic""" +933 51 evaluator """rankbased""" +933 52 dataset """kinships""" +933 52 model """transr""" +933 52 loss """nssa""" +933 52 regularizer """no""" +933 52 optimizer """adam""" +933 52 training_loop """owa""" +933 52 negative_sampler """basic""" +933 52 evaluator """rankbased""" +933 53 dataset """kinships""" +933 53 model """transr""" +933 53 loss """nssa""" +933 53 regularizer """no""" +933 53 optimizer """adam""" +933 53 training_loop """owa""" +933 53 negative_sampler """basic""" +933 53 evaluator """rankbased""" +933 54 dataset """kinships""" +933 54 model """transr""" +933 54 loss """nssa""" +933 54 regularizer """no""" +933 54 optimizer """adam""" +933 54 training_loop """owa""" +933 54 negative_sampler """basic""" +933 54 evaluator """rankbased""" +933 55 dataset """kinships""" +933 55 model """transr""" +933 55 loss """nssa""" +933 55 regularizer """no""" +933 55 optimizer """adam""" +933 55 training_loop """owa""" +933 55 negative_sampler """basic""" +933 55 evaluator """rankbased""" +933 56 dataset """kinships""" +933 56 model """transr""" +933 56 loss """nssa""" +933 56 regularizer """no""" +933 56 optimizer """adam""" +933 56 training_loop """owa""" +933 56 negative_sampler """basic""" +933 56 evaluator """rankbased""" +933 57 dataset """kinships""" +933 57 model """transr""" +933 57 loss """nssa""" +933 57 regularizer """no""" +933 57 optimizer """adam""" +933 57 training_loop """owa""" +933 57 negative_sampler """basic""" +933 57 evaluator """rankbased""" +933 58 dataset """kinships""" +933 58 model """transr""" +933 58 loss """nssa""" +933 58 regularizer """no""" +933 58 optimizer """adam""" +933 58 training_loop """owa""" +933 58 negative_sampler """basic""" +933 58 evaluator """rankbased""" +933 59 dataset """kinships""" +933 59 model """transr""" +933 59 loss """nssa""" +933 59 regularizer """no""" +933 59 optimizer """adam""" +933 59 training_loop """owa""" +933 59 negative_sampler """basic""" +933 59 evaluator """rankbased""" +933 60 dataset """kinships""" +933 60 model """transr""" +933 60 loss """nssa""" +933 60 regularizer """no""" +933 60 optimizer """adam""" +933 60 training_loop """owa""" +933 60 negative_sampler """basic""" +933 60 evaluator """rankbased""" +933 61 dataset """kinships""" +933 61 model """transr""" +933 61 loss """nssa""" +933 61 regularizer """no""" +933 61 optimizer """adam""" +933 61 training_loop """owa""" +933 61 negative_sampler """basic""" +933 61 evaluator """rankbased""" +933 62 dataset """kinships""" +933 62 model """transr""" +933 62 loss """nssa""" +933 62 regularizer """no""" +933 62 optimizer """adam""" +933 62 training_loop """owa""" +933 62 negative_sampler """basic""" +933 62 evaluator """rankbased""" +933 63 dataset """kinships""" +933 63 model """transr""" +933 63 loss """nssa""" +933 63 regularizer """no""" +933 63 optimizer """adam""" +933 63 training_loop """owa""" +933 63 negative_sampler """basic""" +933 63 evaluator """rankbased""" +933 64 dataset """kinships""" +933 64 model """transr""" +933 64 loss """nssa""" +933 64 regularizer """no""" +933 64 optimizer """adam""" +933 64 training_loop """owa""" +933 64 negative_sampler """basic""" +933 64 evaluator """rankbased""" +933 65 dataset """kinships""" +933 65 model """transr""" +933 65 loss """nssa""" +933 65 regularizer """no""" +933 65 optimizer """adam""" +933 65 training_loop """owa""" +933 65 negative_sampler """basic""" +933 65 evaluator """rankbased""" +933 66 dataset """kinships""" +933 66 model """transr""" +933 66 loss """nssa""" +933 66 regularizer """no""" +933 66 optimizer """adam""" +933 66 training_loop """owa""" +933 66 negative_sampler """basic""" +933 66 evaluator """rankbased""" +933 67 dataset """kinships""" +933 67 model """transr""" +933 67 loss """nssa""" +933 67 regularizer """no""" +933 67 optimizer """adam""" +933 67 training_loop """owa""" +933 67 negative_sampler """basic""" +933 67 evaluator """rankbased""" +933 68 dataset """kinships""" +933 68 model """transr""" +933 68 loss """nssa""" +933 68 regularizer """no""" +933 68 optimizer """adam""" +933 68 training_loop """owa""" +933 68 negative_sampler """basic""" +933 68 evaluator """rankbased""" +933 69 dataset """kinships""" +933 69 model """transr""" +933 69 loss """nssa""" +933 69 regularizer """no""" +933 69 optimizer """adam""" +933 69 training_loop """owa""" +933 69 negative_sampler """basic""" +933 69 evaluator """rankbased""" +933 70 dataset """kinships""" +933 70 model """transr""" +933 70 loss """nssa""" +933 70 regularizer """no""" +933 70 optimizer """adam""" +933 70 training_loop """owa""" +933 70 negative_sampler """basic""" +933 70 evaluator """rankbased""" +933 71 dataset """kinships""" +933 71 model """transr""" +933 71 loss """nssa""" +933 71 regularizer """no""" +933 71 optimizer """adam""" +933 71 training_loop """owa""" +933 71 negative_sampler """basic""" +933 71 evaluator """rankbased""" +933 72 dataset """kinships""" +933 72 model """transr""" +933 72 loss """nssa""" +933 72 regularizer """no""" +933 72 optimizer """adam""" +933 72 training_loop """owa""" +933 72 negative_sampler """basic""" +933 72 evaluator """rankbased""" +933 73 dataset """kinships""" +933 73 model """transr""" +933 73 loss """nssa""" +933 73 regularizer """no""" +933 73 optimizer """adam""" +933 73 training_loop """owa""" +933 73 negative_sampler """basic""" +933 73 evaluator """rankbased""" +933 74 dataset """kinships""" +933 74 model """transr""" +933 74 loss """nssa""" +933 74 regularizer """no""" +933 74 optimizer """adam""" +933 74 training_loop """owa""" +933 74 negative_sampler """basic""" +933 74 evaluator """rankbased""" +933 75 dataset """kinships""" +933 75 model """transr""" +933 75 loss """nssa""" +933 75 regularizer """no""" +933 75 optimizer """adam""" +933 75 training_loop """owa""" +933 75 negative_sampler """basic""" +933 75 evaluator """rankbased""" +933 76 dataset """kinships""" +933 76 model """transr""" +933 76 loss """nssa""" +933 76 regularizer """no""" +933 76 optimizer """adam""" +933 76 training_loop """owa""" +933 76 negative_sampler """basic""" +933 76 evaluator """rankbased""" +934 1 model.embedding_dim 0.0 +934 1 model.relation_dim 0.0 +934 1 model.scoring_fct_norm 2.0 +934 1 loss.margin 19.77004542475461 +934 1 loss.adversarial_temperature 0.879195984213679 +934 1 optimizer.lr 0.023076090131924736 +934 1 negative_sampler.num_negs_per_pos 29.0 +934 1 training.batch_size 2.0 +934 2 model.embedding_dim 0.0 +934 2 model.relation_dim 0.0 +934 2 model.scoring_fct_norm 2.0 +934 2 loss.margin 3.878575459936377 +934 2 loss.adversarial_temperature 0.41149543179234893 +934 2 optimizer.lr 0.003720242814819716 +934 2 negative_sampler.num_negs_per_pos 64.0 +934 2 training.batch_size 0.0 +934 3 model.embedding_dim 2.0 +934 3 model.relation_dim 0.0 +934 3 model.scoring_fct_norm 2.0 +934 3 loss.margin 21.30272874638815 +934 3 loss.adversarial_temperature 0.45629074128287583 +934 3 optimizer.lr 0.004995759385155324 +934 3 negative_sampler.num_negs_per_pos 17.0 +934 3 training.batch_size 1.0 +934 4 model.embedding_dim 2.0 +934 4 model.relation_dim 1.0 +934 4 model.scoring_fct_norm 2.0 +934 4 loss.margin 17.790743547259463 +934 4 loss.adversarial_temperature 0.8430502674871257 +934 4 optimizer.lr 0.001083239847827144 +934 4 negative_sampler.num_negs_per_pos 21.0 +934 4 training.batch_size 1.0 +934 5 model.embedding_dim 2.0 +934 5 model.relation_dim 0.0 +934 5 model.scoring_fct_norm 2.0 +934 5 loss.margin 22.71705165335389 +934 5 loss.adversarial_temperature 0.48226746270066156 +934 5 optimizer.lr 0.03570529045176332 +934 5 negative_sampler.num_negs_per_pos 12.0 +934 5 training.batch_size 1.0 +934 6 model.embedding_dim 0.0 +934 6 model.relation_dim 2.0 +934 6 model.scoring_fct_norm 2.0 +934 6 loss.margin 25.532801479033512 +934 6 loss.adversarial_temperature 0.6625627205300002 +934 6 optimizer.lr 0.0031032824585466735 +934 6 negative_sampler.num_negs_per_pos 9.0 +934 6 training.batch_size 1.0 +934 7 model.embedding_dim 1.0 +934 7 model.relation_dim 0.0 +934 7 model.scoring_fct_norm 2.0 +934 7 loss.margin 9.503122518124677 +934 7 loss.adversarial_temperature 0.8268135662020373 +934 7 optimizer.lr 0.0016387679166174453 +934 7 negative_sampler.num_negs_per_pos 92.0 +934 7 training.batch_size 0.0 +934 8 model.embedding_dim 1.0 +934 8 model.relation_dim 2.0 +934 8 model.scoring_fct_norm 2.0 +934 8 loss.margin 8.609334710454405 +934 8 loss.adversarial_temperature 0.721402047995216 +934 8 optimizer.lr 0.008507267656349782 +934 8 negative_sampler.num_negs_per_pos 42.0 +934 8 training.batch_size 0.0 +934 9 model.embedding_dim 2.0 +934 9 model.relation_dim 0.0 +934 9 model.scoring_fct_norm 2.0 +934 9 loss.margin 7.481159038070375 +934 9 loss.adversarial_temperature 0.9495363806029417 +934 9 optimizer.lr 0.02543506826109084 +934 9 negative_sampler.num_negs_per_pos 11.0 +934 9 training.batch_size 1.0 +934 10 model.embedding_dim 2.0 +934 10 model.relation_dim 1.0 +934 10 model.scoring_fct_norm 2.0 +934 10 loss.margin 11.253630487357643 +934 10 loss.adversarial_temperature 0.4474832004363194 +934 10 optimizer.lr 0.09530347113924958 +934 10 negative_sampler.num_negs_per_pos 42.0 +934 10 training.batch_size 1.0 +934 11 model.embedding_dim 0.0 +934 11 model.relation_dim 2.0 +934 11 model.scoring_fct_norm 2.0 +934 11 loss.margin 14.22032809821201 +934 11 loss.adversarial_temperature 0.8428120112155518 +934 11 optimizer.lr 0.0015674117930932197 +934 11 negative_sampler.num_negs_per_pos 51.0 +934 11 training.batch_size 1.0 +934 12 model.embedding_dim 1.0 +934 12 model.relation_dim 2.0 +934 12 model.scoring_fct_norm 1.0 +934 12 loss.margin 27.046573113036946 +934 12 loss.adversarial_temperature 0.11128883067155006 +934 12 optimizer.lr 0.0022196952031026582 +934 12 negative_sampler.num_negs_per_pos 94.0 +934 12 training.batch_size 1.0 +934 13 model.embedding_dim 1.0 +934 13 model.relation_dim 1.0 +934 13 model.scoring_fct_norm 2.0 +934 13 loss.margin 15.230452929917062 +934 13 loss.adversarial_temperature 0.4912670146602397 +934 13 optimizer.lr 0.003320702984825415 +934 13 negative_sampler.num_negs_per_pos 6.0 +934 13 training.batch_size 2.0 +934 14 model.embedding_dim 1.0 +934 14 model.relation_dim 1.0 +934 14 model.scoring_fct_norm 2.0 +934 14 loss.margin 29.256722509149277 +934 14 loss.adversarial_temperature 0.61933587979411 +934 14 optimizer.lr 0.004964037220288325 +934 14 negative_sampler.num_negs_per_pos 7.0 +934 14 training.batch_size 1.0 +934 15 model.embedding_dim 2.0 +934 15 model.relation_dim 0.0 +934 15 model.scoring_fct_norm 2.0 +934 15 loss.margin 20.555050619086607 +934 15 loss.adversarial_temperature 0.9347871829948526 +934 15 optimizer.lr 0.005210003520670076 +934 15 negative_sampler.num_negs_per_pos 66.0 +934 15 training.batch_size 0.0 +934 16 model.embedding_dim 2.0 +934 16 model.relation_dim 2.0 +934 16 model.scoring_fct_norm 2.0 +934 16 loss.margin 9.392799425170164 +934 16 loss.adversarial_temperature 0.89997608953474 +934 16 optimizer.lr 0.009063355161810787 +934 16 negative_sampler.num_negs_per_pos 79.0 +934 16 training.batch_size 0.0 +934 17 model.embedding_dim 0.0 +934 17 model.relation_dim 2.0 +934 17 model.scoring_fct_norm 1.0 +934 17 loss.margin 9.720101121904284 +934 17 loss.adversarial_temperature 0.7236038658098575 +934 17 optimizer.lr 0.007861803272976567 +934 17 negative_sampler.num_negs_per_pos 5.0 +934 17 training.batch_size 0.0 +934 18 model.embedding_dim 2.0 +934 18 model.relation_dim 2.0 +934 18 model.scoring_fct_norm 1.0 +934 18 loss.margin 9.334516946446472 +934 18 loss.adversarial_temperature 0.21737299966891838 +934 18 optimizer.lr 0.01882189433520897 +934 18 negative_sampler.num_negs_per_pos 25.0 +934 18 training.batch_size 1.0 +934 19 model.embedding_dim 2.0 +934 19 model.relation_dim 1.0 +934 19 model.scoring_fct_norm 2.0 +934 19 loss.margin 2.1045334313265265 +934 19 loss.adversarial_temperature 0.5872004314072222 +934 19 optimizer.lr 0.0015834236632034401 +934 19 negative_sampler.num_negs_per_pos 5.0 +934 19 training.batch_size 0.0 +934 20 model.embedding_dim 0.0 +934 20 model.relation_dim 0.0 +934 20 model.scoring_fct_norm 1.0 +934 20 loss.margin 22.981841981502576 +934 20 loss.adversarial_temperature 0.5917607341563667 +934 20 optimizer.lr 0.002811587401666949 +934 20 negative_sampler.num_negs_per_pos 27.0 +934 20 training.batch_size 0.0 +934 21 model.embedding_dim 1.0 +934 21 model.relation_dim 2.0 +934 21 model.scoring_fct_norm 2.0 +934 21 loss.margin 14.121433335954245 +934 21 loss.adversarial_temperature 0.21556479078978685 +934 21 optimizer.lr 0.002546214934006975 +934 21 negative_sampler.num_negs_per_pos 77.0 +934 21 training.batch_size 2.0 +934 22 model.embedding_dim 1.0 +934 22 model.relation_dim 0.0 +934 22 model.scoring_fct_norm 2.0 +934 22 loss.margin 22.45118650294543 +934 22 loss.adversarial_temperature 0.17636774587405202 +934 22 optimizer.lr 0.0025823230592645265 +934 22 negative_sampler.num_negs_per_pos 4.0 +934 22 training.batch_size 0.0 +934 23 model.embedding_dim 0.0 +934 23 model.relation_dim 0.0 +934 23 model.scoring_fct_norm 1.0 +934 23 loss.margin 28.04126351610301 +934 23 loss.adversarial_temperature 0.7203504788773167 +934 23 optimizer.lr 0.0074576855807349934 +934 23 negative_sampler.num_negs_per_pos 33.0 +934 23 training.batch_size 2.0 +934 24 model.embedding_dim 2.0 +934 24 model.relation_dim 0.0 +934 24 model.scoring_fct_norm 2.0 +934 24 loss.margin 1.7950660180610996 +934 24 loss.adversarial_temperature 0.30297143477066996 +934 24 optimizer.lr 0.037431601445483506 +934 24 negative_sampler.num_negs_per_pos 80.0 +934 24 training.batch_size 0.0 +934 25 model.embedding_dim 2.0 +934 25 model.relation_dim 0.0 +934 25 model.scoring_fct_norm 2.0 +934 25 loss.margin 4.865313462435361 +934 25 loss.adversarial_temperature 0.3279244448875206 +934 25 optimizer.lr 0.0020661301216086153 +934 25 negative_sampler.num_negs_per_pos 7.0 +934 25 training.batch_size 2.0 +934 26 model.embedding_dim 1.0 +934 26 model.relation_dim 2.0 +934 26 model.scoring_fct_norm 1.0 +934 26 loss.margin 4.731116467187479 +934 26 loss.adversarial_temperature 0.754026324113931 +934 26 optimizer.lr 0.041823005785494985 +934 26 negative_sampler.num_negs_per_pos 65.0 +934 26 training.batch_size 0.0 +934 27 model.embedding_dim 2.0 +934 27 model.relation_dim 0.0 +934 27 model.scoring_fct_norm 2.0 +934 27 loss.margin 4.2460148093848185 +934 27 loss.adversarial_temperature 0.5528079797340935 +934 27 optimizer.lr 0.005670416244251898 +934 27 negative_sampler.num_negs_per_pos 21.0 +934 27 training.batch_size 1.0 +934 28 model.embedding_dim 0.0 +934 28 model.relation_dim 0.0 +934 28 model.scoring_fct_norm 2.0 +934 28 loss.margin 19.271737421997482 +934 28 loss.adversarial_temperature 0.6132515286544057 +934 28 optimizer.lr 0.007653260411804624 +934 28 negative_sampler.num_negs_per_pos 12.0 +934 28 training.batch_size 0.0 +934 29 model.embedding_dim 1.0 +934 29 model.relation_dim 0.0 +934 29 model.scoring_fct_norm 2.0 +934 29 loss.margin 20.466936673870347 +934 29 loss.adversarial_temperature 0.22880094563770853 +934 29 optimizer.lr 0.001733559852606063 +934 29 negative_sampler.num_negs_per_pos 5.0 +934 29 training.batch_size 0.0 +934 30 model.embedding_dim 1.0 +934 30 model.relation_dim 1.0 +934 30 model.scoring_fct_norm 2.0 +934 30 loss.margin 13.75145136379869 +934 30 loss.adversarial_temperature 0.51600328978818 +934 30 optimizer.lr 0.00228981507292017 +934 30 negative_sampler.num_negs_per_pos 1.0 +934 30 training.batch_size 2.0 +934 31 model.embedding_dim 2.0 +934 31 model.relation_dim 0.0 +934 31 model.scoring_fct_norm 2.0 +934 31 loss.margin 26.359213373185344 +934 31 loss.adversarial_temperature 0.7550488206705072 +934 31 optimizer.lr 0.0109060988326873 +934 31 negative_sampler.num_negs_per_pos 56.0 +934 31 training.batch_size 1.0 +934 32 model.embedding_dim 1.0 +934 32 model.relation_dim 0.0 +934 32 model.scoring_fct_norm 2.0 +934 32 loss.margin 18.85881389525142 +934 32 loss.adversarial_temperature 0.4414306314059766 +934 32 optimizer.lr 0.04565594296230969 +934 32 negative_sampler.num_negs_per_pos 42.0 +934 32 training.batch_size 1.0 +934 33 model.embedding_dim 1.0 +934 33 model.relation_dim 1.0 +934 33 model.scoring_fct_norm 2.0 +934 33 loss.margin 13.229658688275407 +934 33 loss.adversarial_temperature 0.4316123718438254 +934 33 optimizer.lr 0.00696026132787758 +934 33 negative_sampler.num_negs_per_pos 84.0 +934 33 training.batch_size 1.0 +934 34 model.embedding_dim 2.0 +934 34 model.relation_dim 1.0 +934 34 model.scoring_fct_norm 1.0 +934 34 loss.margin 15.588328689825104 +934 34 loss.adversarial_temperature 0.9294337892010655 +934 34 optimizer.lr 0.0023912698445830716 +934 34 negative_sampler.num_negs_per_pos 39.0 +934 34 training.batch_size 0.0 +934 35 model.embedding_dim 1.0 +934 35 model.relation_dim 1.0 +934 35 model.scoring_fct_norm 2.0 +934 35 loss.margin 19.089459378801493 +934 35 loss.adversarial_temperature 0.3720561653890395 +934 35 optimizer.lr 0.05314571487014629 +934 35 negative_sampler.num_negs_per_pos 61.0 +934 35 training.batch_size 2.0 +934 36 model.embedding_dim 1.0 +934 36 model.relation_dim 2.0 +934 36 model.scoring_fct_norm 2.0 +934 36 loss.margin 29.41369504148046 +934 36 loss.adversarial_temperature 0.818771898057947 +934 36 optimizer.lr 0.0859942319775884 +934 36 negative_sampler.num_negs_per_pos 54.0 +934 36 training.batch_size 1.0 +934 37 model.embedding_dim 0.0 +934 37 model.relation_dim 1.0 +934 37 model.scoring_fct_norm 2.0 +934 37 loss.margin 8.35757878298981 +934 37 loss.adversarial_temperature 0.3282151367705495 +934 37 optimizer.lr 0.026928359297261988 +934 37 negative_sampler.num_negs_per_pos 4.0 +934 37 training.batch_size 0.0 +934 38 model.embedding_dim 0.0 +934 38 model.relation_dim 1.0 +934 38 model.scoring_fct_norm 2.0 +934 38 loss.margin 26.58930559772765 +934 38 loss.adversarial_temperature 0.5088792600344514 +934 38 optimizer.lr 0.001061619114578235 +934 38 negative_sampler.num_negs_per_pos 85.0 +934 38 training.batch_size 0.0 +934 39 model.embedding_dim 0.0 +934 39 model.relation_dim 0.0 +934 39 model.scoring_fct_norm 1.0 +934 39 loss.margin 1.4539041127955628 +934 39 loss.adversarial_temperature 0.748361282441208 +934 39 optimizer.lr 0.0905605803301215 +934 39 negative_sampler.num_negs_per_pos 77.0 +934 39 training.batch_size 2.0 +934 40 model.embedding_dim 2.0 +934 40 model.relation_dim 2.0 +934 40 model.scoring_fct_norm 1.0 +934 40 loss.margin 2.482873131488062 +934 40 loss.adversarial_temperature 0.48988043740647047 +934 40 optimizer.lr 0.03629929872757 +934 40 negative_sampler.num_negs_per_pos 72.0 +934 40 training.batch_size 2.0 +934 41 model.embedding_dim 0.0 +934 41 model.relation_dim 0.0 +934 41 model.scoring_fct_norm 1.0 +934 41 loss.margin 26.982677530875595 +934 41 loss.adversarial_temperature 0.6410905953450354 +934 41 optimizer.lr 0.02824816152149556 +934 41 negative_sampler.num_negs_per_pos 47.0 +934 41 training.batch_size 1.0 +934 42 model.embedding_dim 2.0 +934 42 model.relation_dim 1.0 +934 42 model.scoring_fct_norm 1.0 +934 42 loss.margin 19.04968804959702 +934 42 loss.adversarial_temperature 0.5103570019784633 +934 42 optimizer.lr 0.04888963006961872 +934 42 negative_sampler.num_negs_per_pos 55.0 +934 42 training.batch_size 1.0 +934 43 model.embedding_dim 1.0 +934 43 model.relation_dim 1.0 +934 43 model.scoring_fct_norm 1.0 +934 43 loss.margin 10.314286156435294 +934 43 loss.adversarial_temperature 0.13365132422218515 +934 43 optimizer.lr 0.0015879854002099248 +934 43 negative_sampler.num_negs_per_pos 23.0 +934 43 training.batch_size 0.0 +934 44 model.embedding_dim 1.0 +934 44 model.relation_dim 1.0 +934 44 model.scoring_fct_norm 2.0 +934 44 loss.margin 16.87239579815606 +934 44 loss.adversarial_temperature 0.2414024765340727 +934 44 optimizer.lr 0.00334837597202511 +934 44 negative_sampler.num_negs_per_pos 80.0 +934 44 training.batch_size 1.0 +934 45 model.embedding_dim 0.0 +934 45 model.relation_dim 1.0 +934 45 model.scoring_fct_norm 1.0 +934 45 loss.margin 6.991217910631342 +934 45 loss.adversarial_temperature 0.9242547896838249 +934 45 optimizer.lr 0.0229396343632693 +934 45 negative_sampler.num_negs_per_pos 36.0 +934 45 training.batch_size 0.0 +934 46 model.embedding_dim 0.0 +934 46 model.relation_dim 2.0 +934 46 model.scoring_fct_norm 1.0 +934 46 loss.margin 5.416450434668851 +934 46 loss.adversarial_temperature 0.40738269352836454 +934 46 optimizer.lr 0.00347043295647524 +934 46 negative_sampler.num_negs_per_pos 44.0 +934 46 training.batch_size 2.0 +934 47 model.embedding_dim 2.0 +934 47 model.relation_dim 0.0 +934 47 model.scoring_fct_norm 1.0 +934 47 loss.margin 21.189996456586666 +934 47 loss.adversarial_temperature 0.11982278337787704 +934 47 optimizer.lr 0.0033893241930691505 +934 47 negative_sampler.num_negs_per_pos 80.0 +934 47 training.batch_size 0.0 +934 48 model.embedding_dim 1.0 +934 48 model.relation_dim 1.0 +934 48 model.scoring_fct_norm 2.0 +934 48 loss.margin 1.8995007823201315 +934 48 loss.adversarial_temperature 0.5236563297125907 +934 48 optimizer.lr 0.017806277243994154 +934 48 negative_sampler.num_negs_per_pos 61.0 +934 48 training.batch_size 0.0 +934 49 model.embedding_dim 1.0 +934 49 model.relation_dim 1.0 +934 49 model.scoring_fct_norm 1.0 +934 49 loss.margin 4.588722296798193 +934 49 loss.adversarial_temperature 0.9868942743325577 +934 49 optimizer.lr 0.003605908964077805 +934 49 negative_sampler.num_negs_per_pos 99.0 +934 49 training.batch_size 1.0 +934 50 model.embedding_dim 2.0 +934 50 model.relation_dim 1.0 +934 50 model.scoring_fct_norm 2.0 +934 50 loss.margin 21.029373088150468 +934 50 loss.adversarial_temperature 0.6274699271986233 +934 50 optimizer.lr 0.013373264435664209 +934 50 negative_sampler.num_negs_per_pos 75.0 +934 50 training.batch_size 2.0 +934 51 model.embedding_dim 2.0 +934 51 model.relation_dim 0.0 +934 51 model.scoring_fct_norm 2.0 +934 51 loss.margin 29.449620802069223 +934 51 loss.adversarial_temperature 0.38135639301951596 +934 51 optimizer.lr 0.05522473354438083 +934 51 negative_sampler.num_negs_per_pos 27.0 +934 51 training.batch_size 1.0 +934 52 model.embedding_dim 2.0 +934 52 model.relation_dim 0.0 +934 52 model.scoring_fct_norm 1.0 +934 52 loss.margin 19.08221662686333 +934 52 loss.adversarial_temperature 0.6776068224794913 +934 52 optimizer.lr 0.0042879553763313025 +934 52 negative_sampler.num_negs_per_pos 89.0 +934 52 training.batch_size 2.0 +934 53 model.embedding_dim 2.0 +934 53 model.relation_dim 1.0 +934 53 model.scoring_fct_norm 1.0 +934 53 loss.margin 4.78494453209217 +934 53 loss.adversarial_temperature 0.573517083269007 +934 53 optimizer.lr 0.0027370654823361856 +934 53 negative_sampler.num_negs_per_pos 38.0 +934 53 training.batch_size 2.0 +934 54 model.embedding_dim 2.0 +934 54 model.relation_dim 1.0 +934 54 model.scoring_fct_norm 2.0 +934 54 loss.margin 19.691310515884144 +934 54 loss.adversarial_temperature 0.7435956531118555 +934 54 optimizer.lr 0.0026526315913380696 +934 54 negative_sampler.num_negs_per_pos 74.0 +934 54 training.batch_size 1.0 +934 55 model.embedding_dim 1.0 +934 55 model.relation_dim 2.0 +934 55 model.scoring_fct_norm 1.0 +934 55 loss.margin 25.340216806507964 +934 55 loss.adversarial_temperature 0.5937739673601871 +934 55 optimizer.lr 0.002415958115709986 +934 55 negative_sampler.num_negs_per_pos 59.0 +934 55 training.batch_size 2.0 +934 56 model.embedding_dim 2.0 +934 56 model.relation_dim 2.0 +934 56 model.scoring_fct_norm 1.0 +934 56 loss.margin 25.302221170012164 +934 56 loss.adversarial_temperature 0.8434781441818487 +934 56 optimizer.lr 0.09027919325021239 +934 56 negative_sampler.num_negs_per_pos 58.0 +934 56 training.batch_size 2.0 +934 57 model.embedding_dim 0.0 +934 57 model.relation_dim 1.0 +934 57 model.scoring_fct_norm 1.0 +934 57 loss.margin 17.767928341052336 +934 57 loss.adversarial_temperature 0.5167289841228986 +934 57 optimizer.lr 0.0017520482895761664 +934 57 negative_sampler.num_negs_per_pos 8.0 +934 57 training.batch_size 1.0 +934 58 model.embedding_dim 0.0 +934 58 model.relation_dim 2.0 +934 58 model.scoring_fct_norm 1.0 +934 58 loss.margin 23.759803647108235 +934 58 loss.adversarial_temperature 0.820835368117855 +934 58 optimizer.lr 0.0021489945751553066 +934 58 negative_sampler.num_negs_per_pos 37.0 +934 58 training.batch_size 1.0 +934 59 model.embedding_dim 1.0 +934 59 model.relation_dim 0.0 +934 59 model.scoring_fct_norm 2.0 +934 59 loss.margin 15.541571816585197 +934 59 loss.adversarial_temperature 0.13755369005899237 +934 59 optimizer.lr 0.0011536339820693278 +934 59 negative_sampler.num_negs_per_pos 1.0 +934 59 training.batch_size 2.0 +934 60 model.embedding_dim 2.0 +934 60 model.relation_dim 1.0 +934 60 model.scoring_fct_norm 1.0 +934 60 loss.margin 28.087050848059384 +934 60 loss.adversarial_temperature 0.44308902501919245 +934 60 optimizer.lr 0.02905841504990122 +934 60 negative_sampler.num_negs_per_pos 54.0 +934 60 training.batch_size 2.0 +934 61 model.embedding_dim 2.0 +934 61 model.relation_dim 2.0 +934 61 model.scoring_fct_norm 2.0 +934 61 loss.margin 12.812938690628897 +934 61 loss.adversarial_temperature 0.4235883826675084 +934 61 optimizer.lr 0.008440812027591388 +934 61 negative_sampler.num_negs_per_pos 86.0 +934 61 training.batch_size 2.0 +934 62 model.embedding_dim 2.0 +934 62 model.relation_dim 1.0 +934 62 model.scoring_fct_norm 1.0 +934 62 loss.margin 14.136070456459096 +934 62 loss.adversarial_temperature 0.5468080127516923 +934 62 optimizer.lr 0.005021631823556538 +934 62 negative_sampler.num_negs_per_pos 11.0 +934 62 training.batch_size 2.0 +934 63 model.embedding_dim 2.0 +934 63 model.relation_dim 1.0 +934 63 model.scoring_fct_norm 1.0 +934 63 loss.margin 18.276997488143426 +934 63 loss.adversarial_temperature 0.982668522637447 +934 63 optimizer.lr 0.054615723183612574 +934 63 negative_sampler.num_negs_per_pos 62.0 +934 63 training.batch_size 2.0 +934 64 model.embedding_dim 2.0 +934 64 model.relation_dim 1.0 +934 64 model.scoring_fct_norm 1.0 +934 64 loss.margin 10.55372155823986 +934 64 loss.adversarial_temperature 0.8249301756278079 +934 64 optimizer.lr 0.0014209384639317102 +934 64 negative_sampler.num_negs_per_pos 32.0 +934 64 training.batch_size 1.0 +934 65 model.embedding_dim 0.0 +934 65 model.relation_dim 2.0 +934 65 model.scoring_fct_norm 1.0 +934 65 loss.margin 17.322502189615253 +934 65 loss.adversarial_temperature 0.6682421591886811 +934 65 optimizer.lr 0.0012184886752933126 +934 65 negative_sampler.num_negs_per_pos 0.0 +934 65 training.batch_size 1.0 +934 66 model.embedding_dim 1.0 +934 66 model.relation_dim 1.0 +934 66 model.scoring_fct_norm 2.0 +934 66 loss.margin 29.082924676786273 +934 66 loss.adversarial_temperature 0.9943636442805589 +934 66 optimizer.lr 0.07737939323977959 +934 66 negative_sampler.num_negs_per_pos 82.0 +934 66 training.batch_size 1.0 +934 67 model.embedding_dim 0.0 +934 67 model.relation_dim 1.0 +934 67 model.scoring_fct_norm 1.0 +934 67 loss.margin 14.563308283007045 +934 67 loss.adversarial_temperature 0.2725376468776471 +934 67 optimizer.lr 0.005991213159629655 +934 67 negative_sampler.num_negs_per_pos 64.0 +934 67 training.batch_size 1.0 +934 68 model.embedding_dim 0.0 +934 68 model.relation_dim 1.0 +934 68 model.scoring_fct_norm 1.0 +934 68 loss.margin 17.665698228727823 +934 68 loss.adversarial_temperature 0.1704732908095285 +934 68 optimizer.lr 0.015866018404555406 +934 68 negative_sampler.num_negs_per_pos 83.0 +934 68 training.batch_size 0.0 +934 69 model.embedding_dim 0.0 +934 69 model.relation_dim 0.0 +934 69 model.scoring_fct_norm 1.0 +934 69 loss.margin 4.378202640018461 +934 69 loss.adversarial_temperature 0.8735131235595937 +934 69 optimizer.lr 0.09604753042620849 +934 69 negative_sampler.num_negs_per_pos 84.0 +934 69 training.batch_size 2.0 +934 70 model.embedding_dim 0.0 +934 70 model.relation_dim 0.0 +934 70 model.scoring_fct_norm 2.0 +934 70 loss.margin 6.2271069936415175 +934 70 loss.adversarial_temperature 0.6171460608387968 +934 70 optimizer.lr 0.07616125001170662 +934 70 negative_sampler.num_negs_per_pos 9.0 +934 70 training.batch_size 2.0 +934 71 model.embedding_dim 1.0 +934 71 model.relation_dim 2.0 +934 71 model.scoring_fct_norm 2.0 +934 71 loss.margin 27.179503202048924 +934 71 loss.adversarial_temperature 0.3467457002127023 +934 71 optimizer.lr 0.019914000189157514 +934 71 negative_sampler.num_negs_per_pos 75.0 +934 71 training.batch_size 0.0 +934 72 model.embedding_dim 2.0 +934 72 model.relation_dim 1.0 +934 72 model.scoring_fct_norm 2.0 +934 72 loss.margin 1.3614421530499428 +934 72 loss.adversarial_temperature 0.209837679639779 +934 72 optimizer.lr 0.04054351753785272 +934 72 negative_sampler.num_negs_per_pos 27.0 +934 72 training.batch_size 2.0 +934 73 model.embedding_dim 2.0 +934 73 model.relation_dim 2.0 +934 73 model.scoring_fct_norm 2.0 +934 73 loss.margin 13.55965611001292 +934 73 loss.adversarial_temperature 0.8534006156032831 +934 73 optimizer.lr 0.009746722556495575 +934 73 negative_sampler.num_negs_per_pos 37.0 +934 73 training.batch_size 2.0 +934 74 model.embedding_dim 2.0 +934 74 model.relation_dim 2.0 +934 74 model.scoring_fct_norm 1.0 +934 74 loss.margin 13.784180973412854 +934 74 loss.adversarial_temperature 0.4832416831814351 +934 74 optimizer.lr 0.0206857497187073 +934 74 negative_sampler.num_negs_per_pos 42.0 +934 74 training.batch_size 2.0 +934 75 model.embedding_dim 2.0 +934 75 model.relation_dim 1.0 +934 75 model.scoring_fct_norm 2.0 +934 75 loss.margin 1.234968619029778 +934 75 loss.adversarial_temperature 0.7033328475477485 +934 75 optimizer.lr 0.004500205985787654 +934 75 negative_sampler.num_negs_per_pos 31.0 +934 75 training.batch_size 1.0 +934 76 model.embedding_dim 1.0 +934 76 model.relation_dim 0.0 +934 76 model.scoring_fct_norm 1.0 +934 76 loss.margin 21.63991202019109 +934 76 loss.adversarial_temperature 0.43952216166237723 +934 76 optimizer.lr 0.002272912848443717 +934 76 negative_sampler.num_negs_per_pos 97.0 +934 76 training.batch_size 1.0 +934 77 model.embedding_dim 2.0 +934 77 model.relation_dim 0.0 +934 77 model.scoring_fct_norm 1.0 +934 77 loss.margin 5.713595657134667 +934 77 loss.adversarial_temperature 0.795308670621228 +934 77 optimizer.lr 0.024815280561968696 +934 77 negative_sampler.num_negs_per_pos 46.0 +934 77 training.batch_size 2.0 +934 78 model.embedding_dim 2.0 +934 78 model.relation_dim 0.0 +934 78 model.scoring_fct_norm 1.0 +934 78 loss.margin 3.1930721831765334 +934 78 loss.adversarial_temperature 0.7380625570758811 +934 78 optimizer.lr 0.007512294504922219 +934 78 negative_sampler.num_negs_per_pos 62.0 +934 78 training.batch_size 1.0 +934 79 model.embedding_dim 2.0 +934 79 model.relation_dim 2.0 +934 79 model.scoring_fct_norm 2.0 +934 79 loss.margin 17.81223530239707 +934 79 loss.adversarial_temperature 0.37727466803424314 +934 79 optimizer.lr 0.0052593918314342465 +934 79 negative_sampler.num_negs_per_pos 56.0 +934 79 training.batch_size 1.0 +934 80 model.embedding_dim 0.0 +934 80 model.relation_dim 0.0 +934 80 model.scoring_fct_norm 2.0 +934 80 loss.margin 23.22628880921769 +934 80 loss.adversarial_temperature 0.5046966173662725 +934 80 optimizer.lr 0.01760919238274393 +934 80 negative_sampler.num_negs_per_pos 51.0 +934 80 training.batch_size 2.0 +934 81 model.embedding_dim 2.0 +934 81 model.relation_dim 0.0 +934 81 model.scoring_fct_norm 2.0 +934 81 loss.margin 2.5717600177425437 +934 81 loss.adversarial_temperature 0.26108852008443106 +934 81 optimizer.lr 0.011938194392099625 +934 81 negative_sampler.num_negs_per_pos 13.0 +934 81 training.batch_size 2.0 +934 82 model.embedding_dim 0.0 +934 82 model.relation_dim 1.0 +934 82 model.scoring_fct_norm 1.0 +934 82 loss.margin 13.926987145608749 +934 82 loss.adversarial_temperature 0.7393357800703002 +934 82 optimizer.lr 0.04343858393102665 +934 82 negative_sampler.num_negs_per_pos 5.0 +934 82 training.batch_size 1.0 +934 83 model.embedding_dim 1.0 +934 83 model.relation_dim 0.0 +934 83 model.scoring_fct_norm 2.0 +934 83 loss.margin 9.348647235121016 +934 83 loss.adversarial_temperature 0.12174231021544249 +934 83 optimizer.lr 0.002267017690005787 +934 83 negative_sampler.num_negs_per_pos 62.0 +934 83 training.batch_size 2.0 +934 84 model.embedding_dim 0.0 +934 84 model.relation_dim 1.0 +934 84 model.scoring_fct_norm 1.0 +934 84 loss.margin 28.32816174786469 +934 84 loss.adversarial_temperature 0.6729653329292392 +934 84 optimizer.lr 0.014947792911813465 +934 84 negative_sampler.num_negs_per_pos 27.0 +934 84 training.batch_size 1.0 +934 85 model.embedding_dim 0.0 +934 85 model.relation_dim 1.0 +934 85 model.scoring_fct_norm 1.0 +934 85 loss.margin 26.439069104033564 +934 85 loss.adversarial_temperature 0.8908955342786826 +934 85 optimizer.lr 0.0014283302159128924 +934 85 negative_sampler.num_negs_per_pos 8.0 +934 85 training.batch_size 0.0 +934 86 model.embedding_dim 0.0 +934 86 model.relation_dim 0.0 +934 86 model.scoring_fct_norm 1.0 +934 86 loss.margin 27.111094729396278 +934 86 loss.adversarial_temperature 0.4572271837212517 +934 86 optimizer.lr 0.004089378898485508 +934 86 negative_sampler.num_negs_per_pos 82.0 +934 86 training.batch_size 1.0 +934 87 model.embedding_dim 0.0 +934 87 model.relation_dim 2.0 +934 87 model.scoring_fct_norm 2.0 +934 87 loss.margin 25.305135460136928 +934 87 loss.adversarial_temperature 0.4804389361795369 +934 87 optimizer.lr 0.0016808372268133755 +934 87 negative_sampler.num_negs_per_pos 25.0 +934 87 training.batch_size 2.0 +934 88 model.embedding_dim 2.0 +934 88 model.relation_dim 2.0 +934 88 model.scoring_fct_norm 1.0 +934 88 loss.margin 19.366213690470236 +934 88 loss.adversarial_temperature 0.47400985278432206 +934 88 optimizer.lr 0.010689462385000957 +934 88 negative_sampler.num_negs_per_pos 24.0 +934 88 training.batch_size 1.0 +934 89 model.embedding_dim 1.0 +934 89 model.relation_dim 1.0 +934 89 model.scoring_fct_norm 1.0 +934 89 loss.margin 29.914193677549672 +934 89 loss.adversarial_temperature 0.9670875350724749 +934 89 optimizer.lr 0.00220965612585127 +934 89 negative_sampler.num_negs_per_pos 72.0 +934 89 training.batch_size 2.0 +934 90 model.embedding_dim 2.0 +934 90 model.relation_dim 1.0 +934 90 model.scoring_fct_norm 2.0 +934 90 loss.margin 2.630639951237598 +934 90 loss.adversarial_temperature 0.4195167711324256 +934 90 optimizer.lr 0.06361409849206588 +934 90 negative_sampler.num_negs_per_pos 20.0 +934 90 training.batch_size 1.0 +934 91 model.embedding_dim 1.0 +934 91 model.relation_dim 0.0 +934 91 model.scoring_fct_norm 2.0 +934 91 loss.margin 7.502699095812958 +934 91 loss.adversarial_temperature 0.8019164988870673 +934 91 optimizer.lr 0.06352266685575311 +934 91 negative_sampler.num_negs_per_pos 75.0 +934 91 training.batch_size 1.0 +934 92 model.embedding_dim 0.0 +934 92 model.relation_dim 1.0 +934 92 model.scoring_fct_norm 1.0 +934 92 loss.margin 10.856319030137566 +934 92 loss.adversarial_temperature 0.5379302440272961 +934 92 optimizer.lr 0.001270692651040418 +934 92 negative_sampler.num_negs_per_pos 92.0 +934 92 training.batch_size 1.0 +934 93 model.embedding_dim 0.0 +934 93 model.relation_dim 0.0 +934 93 model.scoring_fct_norm 1.0 +934 93 loss.margin 8.612801247246576 +934 93 loss.adversarial_temperature 0.4470056290061982 +934 93 optimizer.lr 0.003184142188953875 +934 93 negative_sampler.num_negs_per_pos 58.0 +934 93 training.batch_size 1.0 +934 94 model.embedding_dim 2.0 +934 94 model.relation_dim 2.0 +934 94 model.scoring_fct_norm 1.0 +934 94 loss.margin 10.965453863064985 +934 94 loss.adversarial_temperature 0.5464594107492743 +934 94 optimizer.lr 0.0039906705276732506 +934 94 negative_sampler.num_negs_per_pos 18.0 +934 94 training.batch_size 0.0 +934 95 model.embedding_dim 2.0 +934 95 model.relation_dim 2.0 +934 95 model.scoring_fct_norm 1.0 +934 95 loss.margin 28.18075124274843 +934 95 loss.adversarial_temperature 0.3840010600198921 +934 95 optimizer.lr 0.006084493219252882 +934 95 negative_sampler.num_negs_per_pos 21.0 +934 95 training.batch_size 1.0 +934 96 model.embedding_dim 1.0 +934 96 model.relation_dim 0.0 +934 96 model.scoring_fct_norm 2.0 +934 96 loss.margin 21.403108263794305 +934 96 loss.adversarial_temperature 0.4090448872914436 +934 96 optimizer.lr 0.002587975962133137 +934 96 negative_sampler.num_negs_per_pos 93.0 +934 96 training.batch_size 1.0 +934 97 model.embedding_dim 2.0 +934 97 model.relation_dim 1.0 +934 97 model.scoring_fct_norm 1.0 +934 97 loss.margin 25.00687629694684 +934 97 loss.adversarial_temperature 0.5301624913667741 +934 97 optimizer.lr 0.0014123937697511821 +934 97 negative_sampler.num_negs_per_pos 32.0 +934 97 training.batch_size 1.0 +934 98 model.embedding_dim 0.0 +934 98 model.relation_dim 2.0 +934 98 model.scoring_fct_norm 1.0 +934 98 loss.margin 19.632671516685868 +934 98 loss.adversarial_temperature 0.9076644658586388 +934 98 optimizer.lr 0.06123132839275111 +934 98 negative_sampler.num_negs_per_pos 41.0 +934 98 training.batch_size 2.0 +934 99 model.embedding_dim 1.0 +934 99 model.relation_dim 2.0 +934 99 model.scoring_fct_norm 2.0 +934 99 loss.margin 4.697680108696658 +934 99 loss.adversarial_temperature 0.810248059841171 +934 99 optimizer.lr 0.001610692128223034 +934 99 negative_sampler.num_negs_per_pos 6.0 +934 99 training.batch_size 0.0 +934 100 model.embedding_dim 1.0 +934 100 model.relation_dim 0.0 +934 100 model.scoring_fct_norm 1.0 +934 100 loss.margin 6.274722992086276 +934 100 loss.adversarial_temperature 0.8269643728663819 +934 100 optimizer.lr 0.021307271497965585 +934 100 negative_sampler.num_negs_per_pos 83.0 +934 100 training.batch_size 1.0 +934 1 dataset """kinships""" +934 1 model """transr""" +934 1 loss """nssa""" +934 1 regularizer """no""" +934 1 optimizer """adam""" +934 1 training_loop """owa""" +934 1 negative_sampler """basic""" +934 1 evaluator """rankbased""" +934 2 dataset """kinships""" +934 2 model """transr""" +934 2 loss """nssa""" +934 2 regularizer """no""" +934 2 optimizer """adam""" +934 2 training_loop """owa""" +934 2 negative_sampler """basic""" +934 2 evaluator """rankbased""" +934 3 dataset """kinships""" +934 3 model """transr""" +934 3 loss """nssa""" +934 3 regularizer """no""" +934 3 optimizer """adam""" +934 3 training_loop """owa""" +934 3 negative_sampler """basic""" +934 3 evaluator """rankbased""" +934 4 dataset """kinships""" +934 4 model """transr""" +934 4 loss """nssa""" +934 4 regularizer """no""" +934 4 optimizer """adam""" +934 4 training_loop """owa""" +934 4 negative_sampler """basic""" +934 4 evaluator """rankbased""" +934 5 dataset """kinships""" +934 5 model """transr""" +934 5 loss """nssa""" +934 5 regularizer """no""" +934 5 optimizer """adam""" +934 5 training_loop """owa""" +934 5 negative_sampler """basic""" +934 5 evaluator """rankbased""" +934 6 dataset """kinships""" +934 6 model """transr""" +934 6 loss """nssa""" +934 6 regularizer """no""" +934 6 optimizer """adam""" +934 6 training_loop """owa""" +934 6 negative_sampler """basic""" +934 6 evaluator """rankbased""" +934 7 dataset """kinships""" +934 7 model """transr""" +934 7 loss """nssa""" +934 7 regularizer """no""" +934 7 optimizer """adam""" +934 7 training_loop """owa""" +934 7 negative_sampler """basic""" +934 7 evaluator """rankbased""" +934 8 dataset """kinships""" +934 8 model """transr""" +934 8 loss """nssa""" +934 8 regularizer """no""" +934 8 optimizer """adam""" +934 8 training_loop """owa""" +934 8 negative_sampler """basic""" +934 8 evaluator """rankbased""" +934 9 dataset """kinships""" +934 9 model """transr""" +934 9 loss """nssa""" +934 9 regularizer """no""" +934 9 optimizer """adam""" +934 9 training_loop """owa""" +934 9 negative_sampler """basic""" +934 9 evaluator """rankbased""" +934 10 dataset """kinships""" +934 10 model """transr""" +934 10 loss """nssa""" +934 10 regularizer """no""" +934 10 optimizer """adam""" +934 10 training_loop """owa""" +934 10 negative_sampler """basic""" +934 10 evaluator """rankbased""" +934 11 dataset """kinships""" +934 11 model """transr""" +934 11 loss """nssa""" +934 11 regularizer """no""" +934 11 optimizer """adam""" +934 11 training_loop """owa""" +934 11 negative_sampler """basic""" +934 11 evaluator """rankbased""" +934 12 dataset """kinships""" +934 12 model """transr""" +934 12 loss """nssa""" +934 12 regularizer """no""" +934 12 optimizer """adam""" +934 12 training_loop """owa""" +934 12 negative_sampler """basic""" +934 12 evaluator """rankbased""" +934 13 dataset """kinships""" +934 13 model """transr""" +934 13 loss """nssa""" +934 13 regularizer """no""" +934 13 optimizer """adam""" +934 13 training_loop """owa""" +934 13 negative_sampler """basic""" +934 13 evaluator """rankbased""" +934 14 dataset """kinships""" +934 14 model """transr""" +934 14 loss """nssa""" +934 14 regularizer """no""" +934 14 optimizer """adam""" +934 14 training_loop """owa""" +934 14 negative_sampler """basic""" +934 14 evaluator """rankbased""" +934 15 dataset """kinships""" +934 15 model """transr""" +934 15 loss """nssa""" +934 15 regularizer """no""" +934 15 optimizer """adam""" +934 15 training_loop """owa""" +934 15 negative_sampler """basic""" +934 15 evaluator """rankbased""" +934 16 dataset """kinships""" +934 16 model """transr""" +934 16 loss """nssa""" +934 16 regularizer """no""" +934 16 optimizer """adam""" +934 16 training_loop """owa""" +934 16 negative_sampler """basic""" +934 16 evaluator """rankbased""" +934 17 dataset """kinships""" +934 17 model """transr""" +934 17 loss """nssa""" +934 17 regularizer """no""" +934 17 optimizer """adam""" +934 17 training_loop """owa""" +934 17 negative_sampler """basic""" +934 17 evaluator """rankbased""" +934 18 dataset """kinships""" +934 18 model """transr""" +934 18 loss """nssa""" +934 18 regularizer """no""" +934 18 optimizer """adam""" +934 18 training_loop """owa""" +934 18 negative_sampler """basic""" +934 18 evaluator """rankbased""" +934 19 dataset """kinships""" +934 19 model """transr""" +934 19 loss """nssa""" +934 19 regularizer """no""" +934 19 optimizer """adam""" +934 19 training_loop """owa""" +934 19 negative_sampler """basic""" +934 19 evaluator """rankbased""" +934 20 dataset """kinships""" +934 20 model """transr""" +934 20 loss """nssa""" +934 20 regularizer """no""" +934 20 optimizer """adam""" +934 20 training_loop """owa""" +934 20 negative_sampler """basic""" +934 20 evaluator """rankbased""" +934 21 dataset """kinships""" +934 21 model """transr""" +934 21 loss """nssa""" +934 21 regularizer """no""" +934 21 optimizer """adam""" +934 21 training_loop """owa""" +934 21 negative_sampler """basic""" +934 21 evaluator """rankbased""" +934 22 dataset """kinships""" +934 22 model """transr""" +934 22 loss """nssa""" +934 22 regularizer """no""" +934 22 optimizer """adam""" +934 22 training_loop """owa""" +934 22 negative_sampler """basic""" +934 22 evaluator """rankbased""" +934 23 dataset """kinships""" +934 23 model """transr""" +934 23 loss """nssa""" +934 23 regularizer """no""" +934 23 optimizer """adam""" +934 23 training_loop """owa""" +934 23 negative_sampler """basic""" +934 23 evaluator """rankbased""" +934 24 dataset """kinships""" +934 24 model """transr""" +934 24 loss """nssa""" +934 24 regularizer """no""" +934 24 optimizer """adam""" +934 24 training_loop """owa""" +934 24 negative_sampler """basic""" +934 24 evaluator """rankbased""" +934 25 dataset """kinships""" +934 25 model """transr""" +934 25 loss """nssa""" +934 25 regularizer """no""" +934 25 optimizer """adam""" +934 25 training_loop """owa""" +934 25 negative_sampler """basic""" +934 25 evaluator """rankbased""" +934 26 dataset """kinships""" +934 26 model """transr""" +934 26 loss """nssa""" +934 26 regularizer """no""" +934 26 optimizer """adam""" +934 26 training_loop """owa""" +934 26 negative_sampler """basic""" +934 26 evaluator """rankbased""" +934 27 dataset """kinships""" +934 27 model """transr""" +934 27 loss """nssa""" +934 27 regularizer """no""" +934 27 optimizer """adam""" +934 27 training_loop """owa""" +934 27 negative_sampler """basic""" +934 27 evaluator """rankbased""" +934 28 dataset """kinships""" +934 28 model """transr""" +934 28 loss """nssa""" +934 28 regularizer """no""" +934 28 optimizer """adam""" +934 28 training_loop """owa""" +934 28 negative_sampler """basic""" +934 28 evaluator """rankbased""" +934 29 dataset """kinships""" +934 29 model """transr""" +934 29 loss """nssa""" +934 29 regularizer """no""" +934 29 optimizer """adam""" +934 29 training_loop """owa""" +934 29 negative_sampler """basic""" +934 29 evaluator """rankbased""" +934 30 dataset """kinships""" +934 30 model """transr""" +934 30 loss """nssa""" +934 30 regularizer """no""" +934 30 optimizer """adam""" +934 30 training_loop """owa""" +934 30 negative_sampler """basic""" +934 30 evaluator """rankbased""" +934 31 dataset """kinships""" +934 31 model """transr""" +934 31 loss """nssa""" +934 31 regularizer """no""" +934 31 optimizer """adam""" +934 31 training_loop """owa""" +934 31 negative_sampler """basic""" +934 31 evaluator """rankbased""" +934 32 dataset """kinships""" +934 32 model """transr""" +934 32 loss """nssa""" +934 32 regularizer """no""" +934 32 optimizer """adam""" +934 32 training_loop """owa""" +934 32 negative_sampler """basic""" +934 32 evaluator """rankbased""" +934 33 dataset """kinships""" +934 33 model """transr""" +934 33 loss """nssa""" +934 33 regularizer """no""" +934 33 optimizer """adam""" +934 33 training_loop """owa""" +934 33 negative_sampler """basic""" +934 33 evaluator """rankbased""" +934 34 dataset """kinships""" +934 34 model """transr""" +934 34 loss """nssa""" +934 34 regularizer """no""" +934 34 optimizer """adam""" +934 34 training_loop """owa""" +934 34 negative_sampler """basic""" +934 34 evaluator """rankbased""" +934 35 dataset """kinships""" +934 35 model """transr""" +934 35 loss """nssa""" +934 35 regularizer """no""" +934 35 optimizer """adam""" +934 35 training_loop """owa""" +934 35 negative_sampler """basic""" +934 35 evaluator """rankbased""" +934 36 dataset """kinships""" +934 36 model """transr""" +934 36 loss """nssa""" +934 36 regularizer """no""" +934 36 optimizer """adam""" +934 36 training_loop """owa""" +934 36 negative_sampler """basic""" +934 36 evaluator """rankbased""" +934 37 dataset """kinships""" +934 37 model """transr""" +934 37 loss """nssa""" +934 37 regularizer """no""" +934 37 optimizer """adam""" +934 37 training_loop """owa""" +934 37 negative_sampler """basic""" +934 37 evaluator """rankbased""" +934 38 dataset """kinships""" +934 38 model """transr""" +934 38 loss """nssa""" +934 38 regularizer """no""" +934 38 optimizer """adam""" +934 38 training_loop """owa""" +934 38 negative_sampler """basic""" +934 38 evaluator """rankbased""" +934 39 dataset """kinships""" +934 39 model """transr""" +934 39 loss """nssa""" +934 39 regularizer """no""" +934 39 optimizer """adam""" +934 39 training_loop """owa""" +934 39 negative_sampler """basic""" +934 39 evaluator """rankbased""" +934 40 dataset """kinships""" +934 40 model """transr""" +934 40 loss """nssa""" +934 40 regularizer """no""" +934 40 optimizer """adam""" +934 40 training_loop """owa""" +934 40 negative_sampler """basic""" +934 40 evaluator """rankbased""" +934 41 dataset """kinships""" +934 41 model """transr""" +934 41 loss """nssa""" +934 41 regularizer """no""" +934 41 optimizer """adam""" +934 41 training_loop """owa""" +934 41 negative_sampler """basic""" +934 41 evaluator """rankbased""" +934 42 dataset """kinships""" +934 42 model """transr""" +934 42 loss """nssa""" +934 42 regularizer """no""" +934 42 optimizer """adam""" +934 42 training_loop """owa""" +934 42 negative_sampler """basic""" +934 42 evaluator """rankbased""" +934 43 dataset """kinships""" +934 43 model """transr""" +934 43 loss """nssa""" +934 43 regularizer """no""" +934 43 optimizer """adam""" +934 43 training_loop """owa""" +934 43 negative_sampler """basic""" +934 43 evaluator """rankbased""" +934 44 dataset """kinships""" +934 44 model """transr""" +934 44 loss """nssa""" +934 44 regularizer """no""" +934 44 optimizer """adam""" +934 44 training_loop """owa""" +934 44 negative_sampler """basic""" +934 44 evaluator """rankbased""" +934 45 dataset """kinships""" +934 45 model """transr""" +934 45 loss """nssa""" +934 45 regularizer """no""" +934 45 optimizer """adam""" +934 45 training_loop """owa""" +934 45 negative_sampler """basic""" +934 45 evaluator """rankbased""" +934 46 dataset """kinships""" +934 46 model """transr""" +934 46 loss """nssa""" +934 46 regularizer """no""" +934 46 optimizer """adam""" +934 46 training_loop """owa""" +934 46 negative_sampler """basic""" +934 46 evaluator """rankbased""" +934 47 dataset """kinships""" +934 47 model """transr""" +934 47 loss """nssa""" +934 47 regularizer """no""" +934 47 optimizer """adam""" +934 47 training_loop """owa""" +934 47 negative_sampler """basic""" +934 47 evaluator """rankbased""" +934 48 dataset """kinships""" +934 48 model """transr""" +934 48 loss """nssa""" +934 48 regularizer """no""" +934 48 optimizer """adam""" +934 48 training_loop """owa""" +934 48 negative_sampler """basic""" +934 48 evaluator """rankbased""" +934 49 dataset """kinships""" +934 49 model """transr""" +934 49 loss """nssa""" +934 49 regularizer """no""" +934 49 optimizer """adam""" +934 49 training_loop """owa""" +934 49 negative_sampler """basic""" +934 49 evaluator """rankbased""" +934 50 dataset """kinships""" +934 50 model """transr""" +934 50 loss """nssa""" +934 50 regularizer """no""" +934 50 optimizer """adam""" +934 50 training_loop """owa""" +934 50 negative_sampler """basic""" +934 50 evaluator """rankbased""" +934 51 dataset """kinships""" +934 51 model """transr""" +934 51 loss """nssa""" +934 51 regularizer """no""" +934 51 optimizer """adam""" +934 51 training_loop """owa""" +934 51 negative_sampler """basic""" +934 51 evaluator """rankbased""" +934 52 dataset """kinships""" +934 52 model """transr""" +934 52 loss """nssa""" +934 52 regularizer """no""" +934 52 optimizer """adam""" +934 52 training_loop """owa""" +934 52 negative_sampler """basic""" +934 52 evaluator """rankbased""" +934 53 dataset """kinships""" +934 53 model """transr""" +934 53 loss """nssa""" +934 53 regularizer """no""" +934 53 optimizer """adam""" +934 53 training_loop """owa""" +934 53 negative_sampler """basic""" +934 53 evaluator """rankbased""" +934 54 dataset """kinships""" +934 54 model """transr""" +934 54 loss """nssa""" +934 54 regularizer """no""" +934 54 optimizer """adam""" +934 54 training_loop """owa""" +934 54 negative_sampler """basic""" +934 54 evaluator """rankbased""" +934 55 dataset """kinships""" +934 55 model """transr""" +934 55 loss """nssa""" +934 55 regularizer """no""" +934 55 optimizer """adam""" +934 55 training_loop """owa""" +934 55 negative_sampler """basic""" +934 55 evaluator """rankbased""" +934 56 dataset """kinships""" +934 56 model """transr""" +934 56 loss """nssa""" +934 56 regularizer """no""" +934 56 optimizer """adam""" +934 56 training_loop """owa""" +934 56 negative_sampler """basic""" +934 56 evaluator """rankbased""" +934 57 dataset """kinships""" +934 57 model """transr""" +934 57 loss """nssa""" +934 57 regularizer """no""" +934 57 optimizer """adam""" +934 57 training_loop """owa""" +934 57 negative_sampler """basic""" +934 57 evaluator """rankbased""" +934 58 dataset """kinships""" +934 58 model """transr""" +934 58 loss """nssa""" +934 58 regularizer """no""" +934 58 optimizer """adam""" +934 58 training_loop """owa""" +934 58 negative_sampler """basic""" +934 58 evaluator """rankbased""" +934 59 dataset """kinships""" +934 59 model """transr""" +934 59 loss """nssa""" +934 59 regularizer """no""" +934 59 optimizer """adam""" +934 59 training_loop """owa""" +934 59 negative_sampler """basic""" +934 59 evaluator """rankbased""" +934 60 dataset """kinships""" +934 60 model """transr""" +934 60 loss """nssa""" +934 60 regularizer """no""" +934 60 optimizer """adam""" +934 60 training_loop """owa""" +934 60 negative_sampler """basic""" +934 60 evaluator """rankbased""" +934 61 dataset """kinships""" +934 61 model """transr""" +934 61 loss """nssa""" +934 61 regularizer """no""" +934 61 optimizer """adam""" +934 61 training_loop """owa""" +934 61 negative_sampler """basic""" +934 61 evaluator """rankbased""" +934 62 dataset """kinships""" +934 62 model """transr""" +934 62 loss """nssa""" +934 62 regularizer """no""" +934 62 optimizer """adam""" +934 62 training_loop """owa""" +934 62 negative_sampler """basic""" +934 62 evaluator """rankbased""" +934 63 dataset """kinships""" +934 63 model """transr""" +934 63 loss """nssa""" +934 63 regularizer """no""" +934 63 optimizer """adam""" +934 63 training_loop """owa""" +934 63 negative_sampler """basic""" +934 63 evaluator """rankbased""" +934 64 dataset """kinships""" +934 64 model """transr""" +934 64 loss """nssa""" +934 64 regularizer """no""" +934 64 optimizer """adam""" +934 64 training_loop """owa""" +934 64 negative_sampler """basic""" +934 64 evaluator """rankbased""" +934 65 dataset """kinships""" +934 65 model """transr""" +934 65 loss """nssa""" +934 65 regularizer """no""" +934 65 optimizer """adam""" +934 65 training_loop """owa""" +934 65 negative_sampler """basic""" +934 65 evaluator """rankbased""" +934 66 dataset """kinships""" +934 66 model """transr""" +934 66 loss """nssa""" +934 66 regularizer """no""" +934 66 optimizer """adam""" +934 66 training_loop """owa""" +934 66 negative_sampler """basic""" +934 66 evaluator """rankbased""" +934 67 dataset """kinships""" +934 67 model """transr""" +934 67 loss """nssa""" +934 67 regularizer """no""" +934 67 optimizer """adam""" +934 67 training_loop """owa""" +934 67 negative_sampler """basic""" +934 67 evaluator """rankbased""" +934 68 dataset """kinships""" +934 68 model """transr""" +934 68 loss """nssa""" +934 68 regularizer """no""" +934 68 optimizer """adam""" +934 68 training_loop """owa""" +934 68 negative_sampler """basic""" +934 68 evaluator """rankbased""" +934 69 dataset """kinships""" +934 69 model """transr""" +934 69 loss """nssa""" +934 69 regularizer """no""" +934 69 optimizer """adam""" +934 69 training_loop """owa""" +934 69 negative_sampler """basic""" +934 69 evaluator """rankbased""" +934 70 dataset """kinships""" +934 70 model """transr""" +934 70 loss """nssa""" +934 70 regularizer """no""" +934 70 optimizer """adam""" +934 70 training_loop """owa""" +934 70 negative_sampler """basic""" +934 70 evaluator """rankbased""" +934 71 dataset """kinships""" +934 71 model """transr""" +934 71 loss """nssa""" +934 71 regularizer """no""" +934 71 optimizer """adam""" +934 71 training_loop """owa""" +934 71 negative_sampler """basic""" +934 71 evaluator """rankbased""" +934 72 dataset """kinships""" +934 72 model """transr""" +934 72 loss """nssa""" +934 72 regularizer """no""" +934 72 optimizer """adam""" +934 72 training_loop """owa""" +934 72 negative_sampler """basic""" +934 72 evaluator """rankbased""" +934 73 dataset """kinships""" +934 73 model """transr""" +934 73 loss """nssa""" +934 73 regularizer """no""" +934 73 optimizer """adam""" +934 73 training_loop """owa""" +934 73 negative_sampler """basic""" +934 73 evaluator """rankbased""" +934 74 dataset """kinships""" +934 74 model """transr""" +934 74 loss """nssa""" +934 74 regularizer """no""" +934 74 optimizer """adam""" +934 74 training_loop """owa""" +934 74 negative_sampler """basic""" +934 74 evaluator """rankbased""" +934 75 dataset """kinships""" +934 75 model """transr""" +934 75 loss """nssa""" +934 75 regularizer """no""" +934 75 optimizer """adam""" +934 75 training_loop """owa""" +934 75 negative_sampler """basic""" +934 75 evaluator """rankbased""" +934 76 dataset """kinships""" +934 76 model """transr""" +934 76 loss """nssa""" +934 76 regularizer """no""" +934 76 optimizer """adam""" +934 76 training_loop """owa""" +934 76 negative_sampler """basic""" +934 76 evaluator """rankbased""" +934 77 dataset """kinships""" +934 77 model """transr""" +934 77 loss """nssa""" +934 77 regularizer """no""" +934 77 optimizer """adam""" +934 77 training_loop """owa""" +934 77 negative_sampler """basic""" +934 77 evaluator """rankbased""" +934 78 dataset """kinships""" +934 78 model """transr""" +934 78 loss """nssa""" +934 78 regularizer """no""" +934 78 optimizer """adam""" +934 78 training_loop """owa""" +934 78 negative_sampler """basic""" +934 78 evaluator """rankbased""" +934 79 dataset """kinships""" +934 79 model """transr""" +934 79 loss """nssa""" +934 79 regularizer """no""" +934 79 optimizer """adam""" +934 79 training_loop """owa""" +934 79 negative_sampler """basic""" +934 79 evaluator """rankbased""" +934 80 dataset """kinships""" +934 80 model """transr""" +934 80 loss """nssa""" +934 80 regularizer """no""" +934 80 optimizer """adam""" +934 80 training_loop """owa""" +934 80 negative_sampler """basic""" +934 80 evaluator """rankbased""" +934 81 dataset """kinships""" +934 81 model """transr""" +934 81 loss """nssa""" +934 81 regularizer """no""" +934 81 optimizer """adam""" +934 81 training_loop """owa""" +934 81 negative_sampler """basic""" +934 81 evaluator """rankbased""" +934 82 dataset """kinships""" +934 82 model """transr""" +934 82 loss """nssa""" +934 82 regularizer """no""" +934 82 optimizer """adam""" +934 82 training_loop """owa""" +934 82 negative_sampler """basic""" +934 82 evaluator """rankbased""" +934 83 dataset """kinships""" +934 83 model """transr""" +934 83 loss """nssa""" +934 83 regularizer """no""" +934 83 optimizer """adam""" +934 83 training_loop """owa""" +934 83 negative_sampler """basic""" +934 83 evaluator """rankbased""" +934 84 dataset """kinships""" +934 84 model """transr""" +934 84 loss """nssa""" +934 84 regularizer """no""" +934 84 optimizer """adam""" +934 84 training_loop """owa""" +934 84 negative_sampler """basic""" +934 84 evaluator """rankbased""" +934 85 dataset """kinships""" +934 85 model """transr""" +934 85 loss """nssa""" +934 85 regularizer """no""" +934 85 optimizer """adam""" +934 85 training_loop """owa""" +934 85 negative_sampler """basic""" +934 85 evaluator """rankbased""" +934 86 dataset """kinships""" +934 86 model """transr""" +934 86 loss """nssa""" +934 86 regularizer """no""" +934 86 optimizer """adam""" +934 86 training_loop """owa""" +934 86 negative_sampler """basic""" +934 86 evaluator """rankbased""" +934 87 dataset """kinships""" +934 87 model """transr""" +934 87 loss """nssa""" +934 87 regularizer """no""" +934 87 optimizer """adam""" +934 87 training_loop """owa""" +934 87 negative_sampler """basic""" +934 87 evaluator """rankbased""" +934 88 dataset """kinships""" +934 88 model """transr""" +934 88 loss """nssa""" +934 88 regularizer """no""" +934 88 optimizer """adam""" +934 88 training_loop """owa""" +934 88 negative_sampler """basic""" +934 88 evaluator """rankbased""" +934 89 dataset """kinships""" +934 89 model """transr""" +934 89 loss """nssa""" +934 89 regularizer """no""" +934 89 optimizer """adam""" +934 89 training_loop """owa""" +934 89 negative_sampler """basic""" +934 89 evaluator """rankbased""" +934 90 dataset """kinships""" +934 90 model """transr""" +934 90 loss """nssa""" +934 90 regularizer """no""" +934 90 optimizer """adam""" +934 90 training_loop """owa""" +934 90 negative_sampler """basic""" +934 90 evaluator """rankbased""" +934 91 dataset """kinships""" +934 91 model """transr""" +934 91 loss """nssa""" +934 91 regularizer """no""" +934 91 optimizer """adam""" +934 91 training_loop """owa""" +934 91 negative_sampler """basic""" +934 91 evaluator """rankbased""" +934 92 dataset """kinships""" +934 92 model """transr""" +934 92 loss """nssa""" +934 92 regularizer """no""" +934 92 optimizer """adam""" +934 92 training_loop """owa""" +934 92 negative_sampler """basic""" +934 92 evaluator """rankbased""" +934 93 dataset """kinships""" +934 93 model """transr""" +934 93 loss """nssa""" +934 93 regularizer """no""" +934 93 optimizer """adam""" +934 93 training_loop """owa""" +934 93 negative_sampler """basic""" +934 93 evaluator """rankbased""" +934 94 dataset """kinships""" +934 94 model """transr""" +934 94 loss """nssa""" +934 94 regularizer """no""" +934 94 optimizer """adam""" +934 94 training_loop """owa""" +934 94 negative_sampler """basic""" +934 94 evaluator """rankbased""" +934 95 dataset """kinships""" +934 95 model """transr""" +934 95 loss """nssa""" +934 95 regularizer """no""" +934 95 optimizer """adam""" +934 95 training_loop """owa""" +934 95 negative_sampler """basic""" +934 95 evaluator """rankbased""" +934 96 dataset """kinships""" +934 96 model """transr""" +934 96 loss """nssa""" +934 96 regularizer """no""" +934 96 optimizer """adam""" +934 96 training_loop """owa""" +934 96 negative_sampler """basic""" +934 96 evaluator """rankbased""" +934 97 dataset """kinships""" +934 97 model """transr""" +934 97 loss """nssa""" +934 97 regularizer """no""" +934 97 optimizer """adam""" +934 97 training_loop """owa""" +934 97 negative_sampler """basic""" +934 97 evaluator """rankbased""" +934 98 dataset """kinships""" +934 98 model """transr""" +934 98 loss """nssa""" +934 98 regularizer """no""" +934 98 optimizer """adam""" +934 98 training_loop """owa""" +934 98 negative_sampler """basic""" +934 98 evaluator """rankbased""" +934 99 dataset """kinships""" +934 99 model """transr""" +934 99 loss """nssa""" +934 99 regularizer """no""" +934 99 optimizer """adam""" +934 99 training_loop """owa""" +934 99 negative_sampler """basic""" +934 99 evaluator """rankbased""" +934 100 dataset """kinships""" +934 100 model """transr""" +934 100 loss """nssa""" +934 100 regularizer """no""" +934 100 optimizer """adam""" +934 100 training_loop """owa""" +934 100 negative_sampler """basic""" +934 100 evaluator """rankbased""" +935 1 model.embedding_dim 1.0 +935 1 model.relation_dim 0.0 +935 1 model.scoring_fct_norm 1.0 +935 1 optimizer.lr 0.0024220668188894854 +935 1 training.batch_size 0.0 +935 1 training.label_smoothing 0.003946277095009144 +935 2 model.embedding_dim 1.0 +935 2 model.relation_dim 0.0 +935 2 model.scoring_fct_norm 1.0 +935 2 optimizer.lr 0.007140089236626014 +935 2 training.batch_size 0.0 +935 2 training.label_smoothing 0.0024484053480430343 +935 3 model.embedding_dim 1.0 +935 3 model.relation_dim 2.0 +935 3 model.scoring_fct_norm 2.0 +935 3 optimizer.lr 0.06282162116326792 +935 3 training.batch_size 2.0 +935 3 training.label_smoothing 0.003007819012229384 +935 4 model.embedding_dim 2.0 +935 4 model.relation_dim 0.0 +935 4 model.scoring_fct_norm 1.0 +935 4 optimizer.lr 0.03721601317504715 +935 4 training.batch_size 0.0 +935 4 training.label_smoothing 0.03753529193314867 +935 5 model.embedding_dim 2.0 +935 5 model.relation_dim 2.0 +935 5 model.scoring_fct_norm 2.0 +935 5 optimizer.lr 0.001992801970947313 +935 5 training.batch_size 1.0 +935 5 training.label_smoothing 0.0040057319554246 +935 1 dataset """wn18rr""" +935 1 model """transr""" +935 1 loss """softplus""" +935 1 regularizer """no""" +935 1 optimizer """adam""" +935 1 training_loop """lcwa""" +935 1 evaluator """rankbased""" +935 2 dataset """wn18rr""" +935 2 model """transr""" +935 2 loss """softplus""" +935 2 regularizer """no""" +935 2 optimizer """adam""" +935 2 training_loop """lcwa""" +935 2 evaluator """rankbased""" +935 3 dataset """wn18rr""" +935 3 model """transr""" +935 3 loss """softplus""" +935 3 regularizer """no""" +935 3 optimizer """adam""" +935 3 training_loop """lcwa""" +935 3 evaluator """rankbased""" +935 4 dataset """wn18rr""" +935 4 model """transr""" +935 4 loss """softplus""" +935 4 regularizer """no""" +935 4 optimizer """adam""" +935 4 training_loop """lcwa""" +935 4 evaluator """rankbased""" +935 5 dataset """wn18rr""" +935 5 model """transr""" +935 5 loss """softplus""" +935 5 regularizer """no""" +935 5 optimizer """adam""" +935 5 training_loop """lcwa""" +935 5 evaluator """rankbased""" +936 1 model.embedding_dim 2.0 +936 1 model.relation_dim 1.0 +936 1 model.scoring_fct_norm 2.0 +936 1 optimizer.lr 0.0017697061379118749 +936 1 training.batch_size 0.0 +936 1 training.label_smoothing 0.002036496838950553 +936 2 model.embedding_dim 1.0 +936 2 model.relation_dim 1.0 +936 2 model.scoring_fct_norm 1.0 +936 2 optimizer.lr 0.022642553303197675 +936 2 training.batch_size 2.0 +936 2 training.label_smoothing 0.25469027629435426 +936 3 model.embedding_dim 2.0 +936 3 model.relation_dim 2.0 +936 3 model.scoring_fct_norm 1.0 +936 3 optimizer.lr 0.0895469238500704 +936 3 training.batch_size 1.0 +936 3 training.label_smoothing 0.1820563102587784 +936 4 model.embedding_dim 2.0 +936 4 model.relation_dim 2.0 +936 4 model.scoring_fct_norm 1.0 +936 4 optimizer.lr 0.0775071757610818 +936 4 training.batch_size 1.0 +936 4 training.label_smoothing 0.046539394045095064 +936 5 model.embedding_dim 2.0 +936 5 model.relation_dim 0.0 +936 5 model.scoring_fct_norm 1.0 +936 5 optimizer.lr 0.02981641006350876 +936 5 training.batch_size 2.0 +936 5 training.label_smoothing 0.14543976247406398 +936 1 dataset """wn18rr""" +936 1 model """transr""" +936 1 loss """softplus""" +936 1 regularizer """no""" +936 1 optimizer """adam""" +936 1 training_loop """lcwa""" +936 1 evaluator """rankbased""" +936 2 dataset """wn18rr""" +936 2 model """transr""" +936 2 loss """softplus""" +936 2 regularizer """no""" +936 2 optimizer """adam""" +936 2 training_loop """lcwa""" +936 2 evaluator """rankbased""" +936 3 dataset """wn18rr""" +936 3 model """transr""" +936 3 loss """softplus""" +936 3 regularizer """no""" +936 3 optimizer """adam""" +936 3 training_loop """lcwa""" +936 3 evaluator """rankbased""" +936 4 dataset """wn18rr""" +936 4 model """transr""" +936 4 loss """softplus""" +936 4 regularizer """no""" +936 4 optimizer """adam""" +936 4 training_loop """lcwa""" +936 4 evaluator """rankbased""" +936 5 dataset """wn18rr""" +936 5 model """transr""" +936 5 loss """softplus""" +936 5 regularizer """no""" +936 5 optimizer """adam""" +936 5 training_loop """lcwa""" +936 5 evaluator """rankbased""" +937 1 model.embedding_dim 0.0 +937 1 model.relation_dim 0.0 +937 1 model.scoring_fct_norm 1.0 +937 1 optimizer.lr 0.002265749275546705 +937 1 training.batch_size 2.0 +937 1 training.label_smoothing 0.6704263065510782 +937 2 model.embedding_dim 1.0 +937 2 model.relation_dim 2.0 +937 2 model.scoring_fct_norm 1.0 +937 2 optimizer.lr 0.028959346286982414 +937 2 training.batch_size 0.0 +937 2 training.label_smoothing 0.22325448124983507 +937 3 model.embedding_dim 1.0 +937 3 model.relation_dim 1.0 +937 3 model.scoring_fct_norm 2.0 +937 3 optimizer.lr 0.003803836518654516 +937 3 training.batch_size 2.0 +937 3 training.label_smoothing 0.7030294547543939 +937 4 model.embedding_dim 2.0 +937 4 model.relation_dim 1.0 +937 4 model.scoring_fct_norm 2.0 +937 4 optimizer.lr 0.03995153596651928 +937 4 training.batch_size 0.0 +937 4 training.label_smoothing 0.22340063669898 +937 1 dataset """wn18rr""" +937 1 model """transr""" +937 1 loss """crossentropy""" +937 1 regularizer """no""" +937 1 optimizer """adam""" +937 1 training_loop """lcwa""" +937 1 evaluator """rankbased""" +937 2 dataset """wn18rr""" +937 2 model """transr""" +937 2 loss """crossentropy""" +937 2 regularizer """no""" +937 2 optimizer """adam""" +937 2 training_loop """lcwa""" +937 2 evaluator """rankbased""" +937 3 dataset """wn18rr""" +937 3 model """transr""" +937 3 loss """crossentropy""" +937 3 regularizer """no""" +937 3 optimizer """adam""" +937 3 training_loop """lcwa""" +937 3 evaluator """rankbased""" +937 4 dataset """wn18rr""" +937 4 model """transr""" +937 4 loss """crossentropy""" +937 4 regularizer """no""" +937 4 optimizer """adam""" +937 4 training_loop """lcwa""" +937 4 evaluator """rankbased""" +938 1 model.embedding_dim 1.0 +938 1 model.relation_dim 1.0 +938 1 model.scoring_fct_norm 2.0 +938 1 optimizer.lr 0.0013032679869822606 +938 1 training.batch_size 0.0 +938 1 training.label_smoothing 0.0020124946022295905 +938 2 model.embedding_dim 2.0 +938 2 model.relation_dim 2.0 +938 2 model.scoring_fct_norm 2.0 +938 2 optimizer.lr 0.0036301439587818 +938 2 training.batch_size 0.0 +938 2 training.label_smoothing 0.02149602485091911 +938 3 model.embedding_dim 2.0 +938 3 model.relation_dim 0.0 +938 3 model.scoring_fct_norm 1.0 +938 3 optimizer.lr 0.028609667059404997 +938 3 training.batch_size 0.0 +938 3 training.label_smoothing 0.0013240137699099006 +938 4 model.embedding_dim 1.0 +938 4 model.relation_dim 1.0 +938 4 model.scoring_fct_norm 1.0 +938 4 optimizer.lr 0.015162384900873793 +938 4 training.batch_size 2.0 +938 4 training.label_smoothing 0.20391120979328797 +938 1 dataset """wn18rr""" +938 1 model """transr""" +938 1 loss """crossentropy""" +938 1 regularizer """no""" +938 1 optimizer """adam""" +938 1 training_loop """lcwa""" +938 1 evaluator """rankbased""" +938 2 dataset """wn18rr""" +938 2 model """transr""" +938 2 loss """crossentropy""" +938 2 regularizer """no""" +938 2 optimizer """adam""" +938 2 training_loop """lcwa""" +938 2 evaluator """rankbased""" +938 3 dataset """wn18rr""" +938 3 model """transr""" +938 3 loss """crossentropy""" +938 3 regularizer """no""" +938 3 optimizer """adam""" +938 3 training_loop """lcwa""" +938 3 evaluator """rankbased""" +938 4 dataset """wn18rr""" +938 4 model """transr""" +938 4 loss """crossentropy""" +938 4 regularizer """no""" +938 4 optimizer """adam""" +938 4 training_loop """lcwa""" +938 4 evaluator """rankbased""" +939 1 model.embedding_dim 0.0 +939 1 model.relation_dim 1.0 +939 1 model.scoring_fct_norm 2.0 +939 1 loss.margin 20.230168566764064 +939 1 loss.adversarial_temperature 0.9386524817887595 +939 1 optimizer.lr 0.033548078859477526 +939 1 negative_sampler.num_negs_per_pos 65.0 +939 1 training.batch_size 2.0 +939 2 model.embedding_dim 2.0 +939 2 model.relation_dim 2.0 +939 2 model.scoring_fct_norm 1.0 +939 2 loss.margin 22.9063303847659 +939 2 loss.adversarial_temperature 0.8481005963886831 +939 2 optimizer.lr 0.02142926580742337 +939 2 negative_sampler.num_negs_per_pos 85.0 +939 2 training.batch_size 2.0 +939 3 model.embedding_dim 2.0 +939 3 model.relation_dim 1.0 +939 3 model.scoring_fct_norm 1.0 +939 3 loss.margin 9.96186611720113 +939 3 loss.adversarial_temperature 0.6883409521251134 +939 3 optimizer.lr 0.0041471964096639 +939 3 negative_sampler.num_negs_per_pos 63.0 +939 3 training.batch_size 1.0 +939 4 model.embedding_dim 1.0 +939 4 model.relation_dim 2.0 +939 4 model.scoring_fct_norm 2.0 +939 4 loss.margin 24.88830102172392 +939 4 loss.adversarial_temperature 0.4761623090644655 +939 4 optimizer.lr 0.07966748772152595 +939 4 negative_sampler.num_negs_per_pos 23.0 +939 4 training.batch_size 2.0 +939 5 model.embedding_dim 0.0 +939 5 model.relation_dim 0.0 +939 5 model.scoring_fct_norm 1.0 +939 5 loss.margin 4.540448579819399 +939 5 loss.adversarial_temperature 0.10912944973707164 +939 5 optimizer.lr 0.0012152982443963108 +939 5 negative_sampler.num_negs_per_pos 46.0 +939 5 training.batch_size 1.0 +939 6 model.embedding_dim 2.0 +939 6 model.relation_dim 2.0 +939 6 model.scoring_fct_norm 2.0 +939 6 loss.margin 22.999032861409127 +939 6 loss.adversarial_temperature 0.5748720093337409 +939 6 optimizer.lr 0.002174797528336322 +939 6 negative_sampler.num_negs_per_pos 20.0 +939 6 training.batch_size 2.0 +939 7 model.embedding_dim 0.0 +939 7 model.relation_dim 1.0 +939 7 model.scoring_fct_norm 2.0 +939 7 loss.margin 21.715046830046298 +939 7 loss.adversarial_temperature 0.11449195689100465 +939 7 optimizer.lr 0.0051146549762592555 +939 7 negative_sampler.num_negs_per_pos 20.0 +939 7 training.batch_size 2.0 +939 8 model.embedding_dim 1.0 +939 8 model.relation_dim 2.0 +939 8 model.scoring_fct_norm 2.0 +939 8 loss.margin 24.733717833914504 +939 8 loss.adversarial_temperature 0.9346396328924044 +939 8 optimizer.lr 0.003144114417330749 +939 8 negative_sampler.num_negs_per_pos 92.0 +939 8 training.batch_size 1.0 +939 9 model.embedding_dim 1.0 +939 9 model.relation_dim 2.0 +939 9 model.scoring_fct_norm 1.0 +939 9 loss.margin 25.491146539973165 +939 9 loss.adversarial_temperature 0.14548076262552403 +939 9 optimizer.lr 0.02637744816717504 +939 9 negative_sampler.num_negs_per_pos 96.0 +939 9 training.batch_size 1.0 +939 10 model.embedding_dim 0.0 +939 10 model.relation_dim 1.0 +939 10 model.scoring_fct_norm 1.0 +939 10 loss.margin 28.459849396941117 +939 10 loss.adversarial_temperature 0.29456745701084236 +939 10 optimizer.lr 0.010189536208456427 +939 10 negative_sampler.num_negs_per_pos 35.0 +939 10 training.batch_size 1.0 +939 11 model.embedding_dim 0.0 +939 11 model.relation_dim 0.0 +939 11 model.scoring_fct_norm 1.0 +939 11 loss.margin 23.507786586836136 +939 11 loss.adversarial_temperature 0.14583209729762855 +939 11 optimizer.lr 0.019444026128228917 +939 11 negative_sampler.num_negs_per_pos 52.0 +939 11 training.batch_size 0.0 +939 12 model.embedding_dim 2.0 +939 12 model.relation_dim 1.0 +939 12 model.scoring_fct_norm 1.0 +939 12 loss.margin 8.09452334675914 +939 12 loss.adversarial_temperature 0.15872988485348224 +939 12 optimizer.lr 0.02921759827374905 +939 12 negative_sampler.num_negs_per_pos 77.0 +939 12 training.batch_size 0.0 +939 13 model.embedding_dim 0.0 +939 13 model.relation_dim 0.0 +939 13 model.scoring_fct_norm 1.0 +939 13 loss.margin 18.796782799988428 +939 13 loss.adversarial_temperature 0.5870620067197074 +939 13 optimizer.lr 0.008009901919403945 +939 13 negative_sampler.num_negs_per_pos 39.0 +939 13 training.batch_size 2.0 +939 14 model.embedding_dim 1.0 +939 14 model.relation_dim 2.0 +939 14 model.scoring_fct_norm 1.0 +939 14 loss.margin 14.192090470555053 +939 14 loss.adversarial_temperature 0.6683301584837796 +939 14 optimizer.lr 0.014149443127320421 +939 14 negative_sampler.num_negs_per_pos 72.0 +939 14 training.batch_size 2.0 +939 15 model.embedding_dim 2.0 +939 15 model.relation_dim 1.0 +939 15 model.scoring_fct_norm 1.0 +939 15 loss.margin 29.69640924260182 +939 15 loss.adversarial_temperature 0.32168007748703725 +939 15 optimizer.lr 0.007207273052542333 +939 15 negative_sampler.num_negs_per_pos 34.0 +939 15 training.batch_size 1.0 +939 16 model.embedding_dim 2.0 +939 16 model.relation_dim 2.0 +939 16 model.scoring_fct_norm 2.0 +939 16 loss.margin 16.524107248724878 +939 16 loss.adversarial_temperature 0.6359264608075106 +939 16 optimizer.lr 0.0020649623159294146 +939 16 negative_sampler.num_negs_per_pos 49.0 +939 16 training.batch_size 0.0 +939 17 model.embedding_dim 1.0 +939 17 model.relation_dim 0.0 +939 17 model.scoring_fct_norm 1.0 +939 17 loss.margin 20.976494487765873 +939 17 loss.adversarial_temperature 0.7118414942992677 +939 17 optimizer.lr 0.0032366080843438606 +939 17 negative_sampler.num_negs_per_pos 83.0 +939 17 training.batch_size 2.0 +939 18 model.embedding_dim 2.0 +939 18 model.relation_dim 0.0 +939 18 model.scoring_fct_norm 2.0 +939 18 loss.margin 1.8749501866278822 +939 18 loss.adversarial_temperature 0.9781231272910027 +939 18 optimizer.lr 0.0025617134541615866 +939 18 negative_sampler.num_negs_per_pos 7.0 +939 18 training.batch_size 0.0 +939 19 model.embedding_dim 0.0 +939 19 model.relation_dim 0.0 +939 19 model.scoring_fct_norm 1.0 +939 19 loss.margin 4.235955907658364 +939 19 loss.adversarial_temperature 0.5542379125275909 +939 19 optimizer.lr 0.056459054253902644 +939 19 negative_sampler.num_negs_per_pos 85.0 +939 19 training.batch_size 0.0 +939 20 model.embedding_dim 0.0 +939 20 model.relation_dim 2.0 +939 20 model.scoring_fct_norm 2.0 +939 20 loss.margin 18.1817549593095 +939 20 loss.adversarial_temperature 0.6812142542184736 +939 20 optimizer.lr 0.011081777139210864 +939 20 negative_sampler.num_negs_per_pos 90.0 +939 20 training.batch_size 0.0 +939 21 model.embedding_dim 0.0 +939 21 model.relation_dim 2.0 +939 21 model.scoring_fct_norm 1.0 +939 21 loss.margin 12.938245523757393 +939 21 loss.adversarial_temperature 0.45436240530982674 +939 21 optimizer.lr 0.06464214638131932 +939 21 negative_sampler.num_negs_per_pos 5.0 +939 21 training.batch_size 2.0 +939 22 model.embedding_dim 2.0 +939 22 model.relation_dim 0.0 +939 22 model.scoring_fct_norm 1.0 +939 22 loss.margin 6.565623937673269 +939 22 loss.adversarial_temperature 0.20950541426625416 +939 22 optimizer.lr 0.010646541920395273 +939 22 negative_sampler.num_negs_per_pos 30.0 +939 22 training.batch_size 1.0 +939 23 model.embedding_dim 1.0 +939 23 model.relation_dim 2.0 +939 23 model.scoring_fct_norm 2.0 +939 23 loss.margin 17.032513306647974 +939 23 loss.adversarial_temperature 0.9045454870031019 +939 23 optimizer.lr 0.05978464937216715 +939 23 negative_sampler.num_negs_per_pos 29.0 +939 23 training.batch_size 2.0 +939 24 model.embedding_dim 1.0 +939 24 model.relation_dim 2.0 +939 24 model.scoring_fct_norm 2.0 +939 24 loss.margin 16.02703541161686 +939 24 loss.adversarial_temperature 0.8471975003489834 +939 24 optimizer.lr 0.09181099563197673 +939 24 negative_sampler.num_negs_per_pos 10.0 +939 24 training.batch_size 2.0 +939 25 model.embedding_dim 2.0 +939 25 model.relation_dim 2.0 +939 25 model.scoring_fct_norm 1.0 +939 25 loss.margin 18.67389882149054 +939 25 loss.adversarial_temperature 0.4928922908269701 +939 25 optimizer.lr 0.02710059281449232 +939 25 negative_sampler.num_negs_per_pos 7.0 +939 25 training.batch_size 1.0 +939 26 model.embedding_dim 1.0 +939 26 model.relation_dim 1.0 +939 26 model.scoring_fct_norm 2.0 +939 26 loss.margin 13.021440883562455 +939 26 loss.adversarial_temperature 0.2709859419130697 +939 26 optimizer.lr 0.0014127677075409216 +939 26 negative_sampler.num_negs_per_pos 53.0 +939 26 training.batch_size 2.0 +939 27 model.embedding_dim 0.0 +939 27 model.relation_dim 2.0 +939 27 model.scoring_fct_norm 1.0 +939 27 loss.margin 29.88929087072516 +939 27 loss.adversarial_temperature 0.9767389614742504 +939 27 optimizer.lr 0.014186875173892466 +939 27 negative_sampler.num_negs_per_pos 58.0 +939 27 training.batch_size 0.0 +939 28 model.embedding_dim 1.0 +939 28 model.relation_dim 0.0 +939 28 model.scoring_fct_norm 1.0 +939 28 loss.margin 13.324080642556941 +939 28 loss.adversarial_temperature 0.40271531253319665 +939 28 optimizer.lr 0.02560310265737053 +939 28 negative_sampler.num_negs_per_pos 40.0 +939 28 training.batch_size 0.0 +939 29 model.embedding_dim 0.0 +939 29 model.relation_dim 2.0 +939 29 model.scoring_fct_norm 2.0 +939 29 loss.margin 7.90799013654844 +939 29 loss.adversarial_temperature 0.9650026569182455 +939 29 optimizer.lr 0.003256872320842906 +939 29 negative_sampler.num_negs_per_pos 3.0 +939 29 training.batch_size 2.0 +939 30 model.embedding_dim 0.0 +939 30 model.relation_dim 1.0 +939 30 model.scoring_fct_norm 1.0 +939 30 loss.margin 11.569987290525038 +939 30 loss.adversarial_temperature 0.791347311262009 +939 30 optimizer.lr 0.0048148754765248445 +939 30 negative_sampler.num_negs_per_pos 6.0 +939 30 training.batch_size 2.0 +939 31 model.embedding_dim 0.0 +939 31 model.relation_dim 0.0 +939 31 model.scoring_fct_norm 1.0 +939 31 loss.margin 14.421516259887174 +939 31 loss.adversarial_temperature 0.8759783519734947 +939 31 optimizer.lr 0.002514817217172447 +939 31 negative_sampler.num_negs_per_pos 18.0 +939 31 training.batch_size 1.0 +939 32 model.embedding_dim 2.0 +939 32 model.relation_dim 1.0 +939 32 model.scoring_fct_norm 2.0 +939 32 loss.margin 11.95063365104947 +939 32 loss.adversarial_temperature 0.6833843447578463 +939 32 optimizer.lr 0.00466441334228113 +939 32 negative_sampler.num_negs_per_pos 96.0 +939 32 training.batch_size 0.0 +939 1 dataset """wn18rr""" +939 1 model """transr""" +939 1 loss """nssa""" +939 1 regularizer """no""" +939 1 optimizer """adam""" +939 1 training_loop """owa""" +939 1 negative_sampler """basic""" +939 1 evaluator """rankbased""" +939 2 dataset """wn18rr""" +939 2 model """transr""" +939 2 loss """nssa""" +939 2 regularizer """no""" +939 2 optimizer """adam""" +939 2 training_loop """owa""" +939 2 negative_sampler """basic""" +939 2 evaluator """rankbased""" +939 3 dataset """wn18rr""" +939 3 model """transr""" +939 3 loss """nssa""" +939 3 regularizer """no""" +939 3 optimizer """adam""" +939 3 training_loop """owa""" +939 3 negative_sampler """basic""" +939 3 evaluator """rankbased""" +939 4 dataset """wn18rr""" +939 4 model """transr""" +939 4 loss """nssa""" +939 4 regularizer """no""" +939 4 optimizer """adam""" +939 4 training_loop """owa""" +939 4 negative_sampler """basic""" +939 4 evaluator """rankbased""" +939 5 dataset """wn18rr""" +939 5 model """transr""" +939 5 loss """nssa""" +939 5 regularizer """no""" +939 5 optimizer """adam""" +939 5 training_loop """owa""" +939 5 negative_sampler """basic""" +939 5 evaluator """rankbased""" +939 6 dataset """wn18rr""" +939 6 model """transr""" +939 6 loss """nssa""" +939 6 regularizer """no""" +939 6 optimizer """adam""" +939 6 training_loop """owa""" +939 6 negative_sampler """basic""" +939 6 evaluator """rankbased""" +939 7 dataset """wn18rr""" +939 7 model """transr""" +939 7 loss """nssa""" +939 7 regularizer """no""" +939 7 optimizer """adam""" +939 7 training_loop """owa""" +939 7 negative_sampler """basic""" +939 7 evaluator """rankbased""" +939 8 dataset """wn18rr""" +939 8 model """transr""" +939 8 loss """nssa""" +939 8 regularizer """no""" +939 8 optimizer """adam""" +939 8 training_loop """owa""" +939 8 negative_sampler """basic""" +939 8 evaluator """rankbased""" +939 9 dataset """wn18rr""" +939 9 model """transr""" +939 9 loss """nssa""" +939 9 regularizer """no""" +939 9 optimizer """adam""" +939 9 training_loop """owa""" +939 9 negative_sampler """basic""" +939 9 evaluator """rankbased""" +939 10 dataset """wn18rr""" +939 10 model """transr""" +939 10 loss """nssa""" +939 10 regularizer """no""" +939 10 optimizer """adam""" +939 10 training_loop """owa""" +939 10 negative_sampler """basic""" +939 10 evaluator """rankbased""" +939 11 dataset """wn18rr""" +939 11 model """transr""" +939 11 loss """nssa""" +939 11 regularizer """no""" +939 11 optimizer """adam""" +939 11 training_loop """owa""" +939 11 negative_sampler """basic""" +939 11 evaluator """rankbased""" +939 12 dataset """wn18rr""" +939 12 model """transr""" +939 12 loss """nssa""" +939 12 regularizer """no""" +939 12 optimizer """adam""" +939 12 training_loop """owa""" +939 12 negative_sampler """basic""" +939 12 evaluator """rankbased""" +939 13 dataset """wn18rr""" +939 13 model """transr""" +939 13 loss """nssa""" +939 13 regularizer """no""" +939 13 optimizer """adam""" +939 13 training_loop """owa""" +939 13 negative_sampler """basic""" +939 13 evaluator """rankbased""" +939 14 dataset """wn18rr""" +939 14 model """transr""" +939 14 loss """nssa""" +939 14 regularizer """no""" +939 14 optimizer """adam""" +939 14 training_loop """owa""" +939 14 negative_sampler """basic""" +939 14 evaluator """rankbased""" +939 15 dataset """wn18rr""" +939 15 model """transr""" +939 15 loss """nssa""" +939 15 regularizer """no""" +939 15 optimizer """adam""" +939 15 training_loop """owa""" +939 15 negative_sampler """basic""" +939 15 evaluator """rankbased""" +939 16 dataset """wn18rr""" +939 16 model """transr""" +939 16 loss """nssa""" +939 16 regularizer """no""" +939 16 optimizer """adam""" +939 16 training_loop """owa""" +939 16 negative_sampler """basic""" +939 16 evaluator """rankbased""" +939 17 dataset """wn18rr""" +939 17 model """transr""" +939 17 loss """nssa""" +939 17 regularizer """no""" +939 17 optimizer """adam""" +939 17 training_loop """owa""" +939 17 negative_sampler """basic""" +939 17 evaluator """rankbased""" +939 18 dataset """wn18rr""" +939 18 model """transr""" +939 18 loss """nssa""" +939 18 regularizer """no""" +939 18 optimizer """adam""" +939 18 training_loop """owa""" +939 18 negative_sampler """basic""" +939 18 evaluator """rankbased""" +939 19 dataset """wn18rr""" +939 19 model """transr""" +939 19 loss """nssa""" +939 19 regularizer """no""" +939 19 optimizer """adam""" +939 19 training_loop """owa""" +939 19 negative_sampler """basic""" +939 19 evaluator """rankbased""" +939 20 dataset """wn18rr""" +939 20 model """transr""" +939 20 loss """nssa""" +939 20 regularizer """no""" +939 20 optimizer """adam""" +939 20 training_loop """owa""" +939 20 negative_sampler """basic""" +939 20 evaluator """rankbased""" +939 21 dataset """wn18rr""" +939 21 model """transr""" +939 21 loss """nssa""" +939 21 regularizer """no""" +939 21 optimizer """adam""" +939 21 training_loop """owa""" +939 21 negative_sampler """basic""" +939 21 evaluator """rankbased""" +939 22 dataset """wn18rr""" +939 22 model """transr""" +939 22 loss """nssa""" +939 22 regularizer """no""" +939 22 optimizer """adam""" +939 22 training_loop """owa""" +939 22 negative_sampler """basic""" +939 22 evaluator """rankbased""" +939 23 dataset """wn18rr""" +939 23 model """transr""" +939 23 loss """nssa""" +939 23 regularizer """no""" +939 23 optimizer """adam""" +939 23 training_loop """owa""" +939 23 negative_sampler """basic""" +939 23 evaluator """rankbased""" +939 24 dataset """wn18rr""" +939 24 model """transr""" +939 24 loss """nssa""" +939 24 regularizer """no""" +939 24 optimizer """adam""" +939 24 training_loop """owa""" +939 24 negative_sampler """basic""" +939 24 evaluator """rankbased""" +939 25 dataset """wn18rr""" +939 25 model """transr""" +939 25 loss """nssa""" +939 25 regularizer """no""" +939 25 optimizer """adam""" +939 25 training_loop """owa""" +939 25 negative_sampler """basic""" +939 25 evaluator """rankbased""" +939 26 dataset """wn18rr""" +939 26 model """transr""" +939 26 loss """nssa""" +939 26 regularizer """no""" +939 26 optimizer """adam""" +939 26 training_loop """owa""" +939 26 negative_sampler """basic""" +939 26 evaluator """rankbased""" +939 27 dataset """wn18rr""" +939 27 model """transr""" +939 27 loss """nssa""" +939 27 regularizer """no""" +939 27 optimizer """adam""" +939 27 training_loop """owa""" +939 27 negative_sampler """basic""" +939 27 evaluator """rankbased""" +939 28 dataset """wn18rr""" +939 28 model """transr""" +939 28 loss """nssa""" +939 28 regularizer """no""" +939 28 optimizer """adam""" +939 28 training_loop """owa""" +939 28 negative_sampler """basic""" +939 28 evaluator """rankbased""" +939 29 dataset """wn18rr""" +939 29 model """transr""" +939 29 loss """nssa""" +939 29 regularizer """no""" +939 29 optimizer """adam""" +939 29 training_loop """owa""" +939 29 negative_sampler """basic""" +939 29 evaluator """rankbased""" +939 30 dataset """wn18rr""" +939 30 model """transr""" +939 30 loss """nssa""" +939 30 regularizer """no""" +939 30 optimizer """adam""" +939 30 training_loop """owa""" +939 30 negative_sampler """basic""" +939 30 evaluator """rankbased""" +939 31 dataset """wn18rr""" +939 31 model """transr""" +939 31 loss """nssa""" +939 31 regularizer """no""" +939 31 optimizer """adam""" +939 31 training_loop """owa""" +939 31 negative_sampler """basic""" +939 31 evaluator """rankbased""" +939 32 dataset """wn18rr""" +939 32 model """transr""" +939 32 loss """nssa""" +939 32 regularizer """no""" +939 32 optimizer """adam""" +939 32 training_loop """owa""" +939 32 negative_sampler """basic""" +939 32 evaluator """rankbased""" +940 1 model.embedding_dim 0.0 +940 1 model.relation_dim 1.0 +940 1 model.scoring_fct_norm 2.0 +940 1 loss.margin 12.591730893720495 +940 1 loss.adversarial_temperature 0.7476296226038297 +940 1 optimizer.lr 0.027917069511522882 +940 1 negative_sampler.num_negs_per_pos 93.0 +940 1 training.batch_size 2.0 +940 2 model.embedding_dim 2.0 +940 2 model.relation_dim 2.0 +940 2 model.scoring_fct_norm 2.0 +940 2 loss.margin 23.796953517280407 +940 2 loss.adversarial_temperature 0.588883870447038 +940 2 optimizer.lr 0.004438703058214376 +940 2 negative_sampler.num_negs_per_pos 28.0 +940 2 training.batch_size 0.0 +940 3 model.embedding_dim 0.0 +940 3 model.relation_dim 2.0 +940 3 model.scoring_fct_norm 1.0 +940 3 loss.margin 5.555427157375812 +940 3 loss.adversarial_temperature 0.5144479438170483 +940 3 optimizer.lr 0.007741472437874959 +940 3 negative_sampler.num_negs_per_pos 18.0 +940 3 training.batch_size 0.0 +940 4 model.embedding_dim 0.0 +940 4 model.relation_dim 1.0 +940 4 model.scoring_fct_norm 1.0 +940 4 loss.margin 12.832039378755061 +940 4 loss.adversarial_temperature 0.40855301158403207 +940 4 optimizer.lr 0.022622086798729416 +940 4 negative_sampler.num_negs_per_pos 92.0 +940 4 training.batch_size 0.0 +940 5 model.embedding_dim 0.0 +940 5 model.relation_dim 1.0 +940 5 model.scoring_fct_norm 1.0 +940 5 loss.margin 7.524878351650497 +940 5 loss.adversarial_temperature 0.3709843724490282 +940 5 optimizer.lr 0.015113785023108771 +940 5 negative_sampler.num_negs_per_pos 46.0 +940 5 training.batch_size 1.0 +940 6 model.embedding_dim 1.0 +940 6 model.relation_dim 0.0 +940 6 model.scoring_fct_norm 1.0 +940 6 loss.margin 4.768854292239533 +940 6 loss.adversarial_temperature 0.6983695787815797 +940 6 optimizer.lr 0.08380763555651823 +940 6 negative_sampler.num_negs_per_pos 44.0 +940 6 training.batch_size 1.0 +940 7 model.embedding_dim 0.0 +940 7 model.relation_dim 0.0 +940 7 model.scoring_fct_norm 2.0 +940 7 loss.margin 15.085962786928423 +940 7 loss.adversarial_temperature 0.7304758380096255 +940 7 optimizer.lr 0.027607888062013013 +940 7 negative_sampler.num_negs_per_pos 58.0 +940 7 training.batch_size 1.0 +940 8 model.embedding_dim 2.0 +940 8 model.relation_dim 1.0 +940 8 model.scoring_fct_norm 1.0 +940 8 loss.margin 18.787133989181434 +940 8 loss.adversarial_temperature 0.8167426528275009 +940 8 optimizer.lr 0.002649098182412152 +940 8 negative_sampler.num_negs_per_pos 69.0 +940 8 training.batch_size 2.0 +940 9 model.embedding_dim 1.0 +940 9 model.relation_dim 2.0 +940 9 model.scoring_fct_norm 1.0 +940 9 loss.margin 6.756448004310496 +940 9 loss.adversarial_temperature 0.3839749592326005 +940 9 optimizer.lr 0.004891077713132372 +940 9 negative_sampler.num_negs_per_pos 74.0 +940 9 training.batch_size 2.0 +940 10 model.embedding_dim 2.0 +940 10 model.relation_dim 2.0 +940 10 model.scoring_fct_norm 1.0 +940 10 loss.margin 12.307384709317576 +940 10 loss.adversarial_temperature 0.9893360911771295 +940 10 optimizer.lr 0.03210883205046952 +940 10 negative_sampler.num_negs_per_pos 98.0 +940 10 training.batch_size 2.0 +940 11 model.embedding_dim 0.0 +940 11 model.relation_dim 1.0 +940 11 model.scoring_fct_norm 2.0 +940 11 loss.margin 11.799126706401168 +940 11 loss.adversarial_temperature 0.7716296616897322 +940 11 optimizer.lr 0.03729285723199546 +940 11 negative_sampler.num_negs_per_pos 58.0 +940 11 training.batch_size 0.0 +940 12 model.embedding_dim 2.0 +940 12 model.relation_dim 2.0 +940 12 model.scoring_fct_norm 2.0 +940 12 loss.margin 25.04893260983322 +940 12 loss.adversarial_temperature 0.9620801250888594 +940 12 optimizer.lr 0.002002649063088712 +940 12 negative_sampler.num_negs_per_pos 94.0 +940 12 training.batch_size 1.0 +940 13 model.embedding_dim 0.0 +940 13 model.relation_dim 1.0 +940 13 model.scoring_fct_norm 1.0 +940 13 loss.margin 3.4898329444232354 +940 13 loss.adversarial_temperature 0.5834621795234421 +940 13 optimizer.lr 0.03476106938363291 +940 13 negative_sampler.num_negs_per_pos 30.0 +940 13 training.batch_size 1.0 +940 14 model.embedding_dim 0.0 +940 14 model.relation_dim 1.0 +940 14 model.scoring_fct_norm 1.0 +940 14 loss.margin 23.427383023412983 +940 14 loss.adversarial_temperature 0.48870824104506505 +940 14 optimizer.lr 0.004392639405654754 +940 14 negative_sampler.num_negs_per_pos 85.0 +940 14 training.batch_size 2.0 +940 15 model.embedding_dim 0.0 +940 15 model.relation_dim 0.0 +940 15 model.scoring_fct_norm 2.0 +940 15 loss.margin 21.44282892640246 +940 15 loss.adversarial_temperature 0.5760503103488189 +940 15 optimizer.lr 0.045657709538058186 +940 15 negative_sampler.num_negs_per_pos 75.0 +940 15 training.batch_size 2.0 +940 16 model.embedding_dim 0.0 +940 16 model.relation_dim 0.0 +940 16 model.scoring_fct_norm 1.0 +940 16 loss.margin 15.909752147945097 +940 16 loss.adversarial_temperature 0.552661860613347 +940 16 optimizer.lr 0.08321254416258729 +940 16 negative_sampler.num_negs_per_pos 81.0 +940 16 training.batch_size 2.0 +940 17 model.embedding_dim 1.0 +940 17 model.relation_dim 2.0 +940 17 model.scoring_fct_norm 1.0 +940 17 loss.margin 3.915458818350203 +940 17 loss.adversarial_temperature 0.22500173290653663 +940 17 optimizer.lr 0.014796186394242157 +940 17 negative_sampler.num_negs_per_pos 49.0 +940 17 training.batch_size 1.0 +940 18 model.embedding_dim 1.0 +940 18 model.relation_dim 0.0 +940 18 model.scoring_fct_norm 1.0 +940 18 loss.margin 2.0012059128925674 +940 18 loss.adversarial_temperature 0.18998004132670013 +940 18 optimizer.lr 0.02611996093328744 +940 18 negative_sampler.num_negs_per_pos 30.0 +940 18 training.batch_size 0.0 +940 19 model.embedding_dim 1.0 +940 19 model.relation_dim 1.0 +940 19 model.scoring_fct_norm 1.0 +940 19 loss.margin 16.80744805086673 +940 19 loss.adversarial_temperature 0.20964203376986026 +940 19 optimizer.lr 0.0014741947286558673 +940 19 negative_sampler.num_negs_per_pos 96.0 +940 19 training.batch_size 1.0 +940 20 model.embedding_dim 2.0 +940 20 model.relation_dim 2.0 +940 20 model.scoring_fct_norm 1.0 +940 20 loss.margin 1.738311823960379 +940 20 loss.adversarial_temperature 0.4570881753605774 +940 20 optimizer.lr 0.05602861269052412 +940 20 negative_sampler.num_negs_per_pos 63.0 +940 20 training.batch_size 0.0 +940 21 model.embedding_dim 0.0 +940 21 model.relation_dim 1.0 +940 21 model.scoring_fct_norm 2.0 +940 21 loss.margin 17.836838634767158 +940 21 loss.adversarial_temperature 0.9177002847145985 +940 21 optimizer.lr 0.08937794626378776 +940 21 negative_sampler.num_negs_per_pos 79.0 +940 21 training.batch_size 1.0 +940 22 model.embedding_dim 0.0 +940 22 model.relation_dim 1.0 +940 22 model.scoring_fct_norm 1.0 +940 22 loss.margin 28.064194452509202 +940 22 loss.adversarial_temperature 0.7049479810778098 +940 22 optimizer.lr 0.009371710775030255 +940 22 negative_sampler.num_negs_per_pos 90.0 +940 22 training.batch_size 0.0 +940 23 model.embedding_dim 0.0 +940 23 model.relation_dim 0.0 +940 23 model.scoring_fct_norm 2.0 +940 23 loss.margin 20.39608076636238 +940 23 loss.adversarial_temperature 0.7513846953885095 +940 23 optimizer.lr 0.06850063304240338 +940 23 negative_sampler.num_negs_per_pos 74.0 +940 23 training.batch_size 2.0 +940 24 model.embedding_dim 2.0 +940 24 model.relation_dim 0.0 +940 24 model.scoring_fct_norm 1.0 +940 24 loss.margin 10.050908700690021 +940 24 loss.adversarial_temperature 0.827863807084597 +940 24 optimizer.lr 0.002827919665552693 +940 24 negative_sampler.num_negs_per_pos 65.0 +940 24 training.batch_size 1.0 +940 25 model.embedding_dim 1.0 +940 25 model.relation_dim 1.0 +940 25 model.scoring_fct_norm 1.0 +940 25 loss.margin 1.3592671125058278 +940 25 loss.adversarial_temperature 0.6481642191273033 +940 25 optimizer.lr 0.005829453094171532 +940 25 negative_sampler.num_negs_per_pos 93.0 +940 25 training.batch_size 0.0 +940 26 model.embedding_dim 2.0 +940 26 model.relation_dim 0.0 +940 26 model.scoring_fct_norm 1.0 +940 26 loss.margin 4.852489887306346 +940 26 loss.adversarial_temperature 0.9871209109915462 +940 26 optimizer.lr 0.0022866455909594813 +940 26 negative_sampler.num_negs_per_pos 42.0 +940 26 training.batch_size 0.0 +940 27 model.embedding_dim 2.0 +940 27 model.relation_dim 0.0 +940 27 model.scoring_fct_norm 1.0 +940 27 loss.margin 5.318319953342358 +940 27 loss.adversarial_temperature 0.24738824227580786 +940 27 optimizer.lr 0.005173771760784749 +940 27 negative_sampler.num_negs_per_pos 50.0 +940 27 training.batch_size 1.0 +940 28 model.embedding_dim 0.0 +940 28 model.relation_dim 1.0 +940 28 model.scoring_fct_norm 2.0 +940 28 loss.margin 13.991066077946105 +940 28 loss.adversarial_temperature 0.38549882057172113 +940 28 optimizer.lr 0.015374105558962917 +940 28 negative_sampler.num_negs_per_pos 21.0 +940 28 training.batch_size 2.0 +940 29 model.embedding_dim 0.0 +940 29 model.relation_dim 2.0 +940 29 model.scoring_fct_norm 2.0 +940 29 loss.margin 10.52451025410879 +940 29 loss.adversarial_temperature 0.8667820697638546 +940 29 optimizer.lr 0.021784288760655694 +940 29 negative_sampler.num_negs_per_pos 44.0 +940 29 training.batch_size 2.0 +940 30 model.embedding_dim 0.0 +940 30 model.relation_dim 1.0 +940 30 model.scoring_fct_norm 2.0 +940 30 loss.margin 28.499751511675974 +940 30 loss.adversarial_temperature 0.15505260655406963 +940 30 optimizer.lr 0.08037866582666262 +940 30 negative_sampler.num_negs_per_pos 57.0 +940 30 training.batch_size 1.0 +940 31 model.embedding_dim 1.0 +940 31 model.relation_dim 1.0 +940 31 model.scoring_fct_norm 1.0 +940 31 loss.margin 12.359743022226285 +940 31 loss.adversarial_temperature 0.7631095026145971 +940 31 optimizer.lr 0.010640101326518804 +940 31 negative_sampler.num_negs_per_pos 22.0 +940 31 training.batch_size 1.0 +940 32 model.embedding_dim 0.0 +940 32 model.relation_dim 2.0 +940 32 model.scoring_fct_norm 1.0 +940 32 loss.margin 1.0572775535281123 +940 32 loss.adversarial_temperature 0.22799378420669078 +940 32 optimizer.lr 0.0013676068547311105 +940 32 negative_sampler.num_negs_per_pos 33.0 +940 32 training.batch_size 2.0 +940 33 model.embedding_dim 1.0 +940 33 model.relation_dim 0.0 +940 33 model.scoring_fct_norm 1.0 +940 33 loss.margin 8.884800115765373 +940 33 loss.adversarial_temperature 0.1441912065674276 +940 33 optimizer.lr 0.005162968495936018 +940 33 negative_sampler.num_negs_per_pos 4.0 +940 33 training.batch_size 2.0 +940 34 model.embedding_dim 1.0 +940 34 model.relation_dim 0.0 +940 34 model.scoring_fct_norm 2.0 +940 34 loss.margin 1.155315067008966 +940 34 loss.adversarial_temperature 0.24273435489386952 +940 34 optimizer.lr 0.05273358304388276 +940 34 negative_sampler.num_negs_per_pos 80.0 +940 34 training.batch_size 2.0 +940 35 model.embedding_dim 1.0 +940 35 model.relation_dim 0.0 +940 35 model.scoring_fct_norm 1.0 +940 35 loss.margin 13.197549347893272 +940 35 loss.adversarial_temperature 0.13704259536126115 +940 35 optimizer.lr 0.004342912002052226 +940 35 negative_sampler.num_negs_per_pos 68.0 +940 35 training.batch_size 0.0 +940 36 model.embedding_dim 2.0 +940 36 model.relation_dim 1.0 +940 36 model.scoring_fct_norm 1.0 +940 36 loss.margin 27.74943701949967 +940 36 loss.adversarial_temperature 0.6551113760784093 +940 36 optimizer.lr 0.08267025154127293 +940 36 negative_sampler.num_negs_per_pos 33.0 +940 36 training.batch_size 2.0 +940 37 model.embedding_dim 1.0 +940 37 model.relation_dim 2.0 +940 37 model.scoring_fct_norm 1.0 +940 37 loss.margin 29.16043534196422 +940 37 loss.adversarial_temperature 0.3985784681288639 +940 37 optimizer.lr 0.07998065843346218 +940 37 negative_sampler.num_negs_per_pos 2.0 +940 37 training.batch_size 0.0 +940 38 model.embedding_dim 1.0 +940 38 model.relation_dim 2.0 +940 38 model.scoring_fct_norm 1.0 +940 38 loss.margin 21.689195399445886 +940 38 loss.adversarial_temperature 0.3235559923109081 +940 38 optimizer.lr 0.010097965925320914 +940 38 negative_sampler.num_negs_per_pos 23.0 +940 38 training.batch_size 2.0 +940 39 model.embedding_dim 1.0 +940 39 model.relation_dim 1.0 +940 39 model.scoring_fct_norm 2.0 +940 39 loss.margin 6.460413988644884 +940 39 loss.adversarial_temperature 0.1950174109730233 +940 39 optimizer.lr 0.008782468882067563 +940 39 negative_sampler.num_negs_per_pos 1.0 +940 39 training.batch_size 1.0 +940 40 model.embedding_dim 0.0 +940 40 model.relation_dim 1.0 +940 40 model.scoring_fct_norm 1.0 +940 40 loss.margin 3.0049793879963085 +940 40 loss.adversarial_temperature 0.378982361113811 +940 40 optimizer.lr 0.009652332053622915 +940 40 negative_sampler.num_negs_per_pos 25.0 +940 40 training.batch_size 2.0 +940 41 model.embedding_dim 0.0 +940 41 model.relation_dim 2.0 +940 41 model.scoring_fct_norm 2.0 +940 41 loss.margin 29.050068680836105 +940 41 loss.adversarial_temperature 0.5365432704922408 +940 41 optimizer.lr 0.0013901937550775956 +940 41 negative_sampler.num_negs_per_pos 90.0 +940 41 training.batch_size 2.0 +940 42 model.embedding_dim 2.0 +940 42 model.relation_dim 2.0 +940 42 model.scoring_fct_norm 2.0 +940 42 loss.margin 3.943527663637009 +940 42 loss.adversarial_temperature 0.9581577638532193 +940 42 optimizer.lr 0.001279543703945717 +940 42 negative_sampler.num_negs_per_pos 11.0 +940 42 training.batch_size 1.0 +940 43 model.embedding_dim 0.0 +940 43 model.relation_dim 2.0 +940 43 model.scoring_fct_norm 2.0 +940 43 loss.margin 27.11026138137216 +940 43 loss.adversarial_temperature 0.7471277098601139 +940 43 optimizer.lr 0.013324747603987025 +940 43 negative_sampler.num_negs_per_pos 82.0 +940 43 training.batch_size 0.0 +940 44 model.embedding_dim 1.0 +940 44 model.relation_dim 1.0 +940 44 model.scoring_fct_norm 2.0 +940 44 loss.margin 27.054248628132825 +940 44 loss.adversarial_temperature 0.7585569163031269 +940 44 optimizer.lr 0.05843278941391135 +940 44 negative_sampler.num_negs_per_pos 70.0 +940 44 training.batch_size 0.0 +940 45 model.embedding_dim 2.0 +940 45 model.relation_dim 0.0 +940 45 model.scoring_fct_norm 1.0 +940 45 loss.margin 6.224981855813501 +940 45 loss.adversarial_temperature 0.7053927111891318 +940 45 optimizer.lr 0.0034392200220127647 +940 45 negative_sampler.num_negs_per_pos 6.0 +940 45 training.batch_size 1.0 +940 46 model.embedding_dim 1.0 +940 46 model.relation_dim 2.0 +940 46 model.scoring_fct_norm 2.0 +940 46 loss.margin 4.708565890348495 +940 46 loss.adversarial_temperature 0.48475875896527826 +940 46 optimizer.lr 0.002205013260759351 +940 46 negative_sampler.num_negs_per_pos 72.0 +940 46 training.batch_size 1.0 +940 47 model.embedding_dim 0.0 +940 47 model.relation_dim 0.0 +940 47 model.scoring_fct_norm 1.0 +940 47 loss.margin 4.7056605467235855 +940 47 loss.adversarial_temperature 0.806375717740144 +940 47 optimizer.lr 0.002214988862598369 +940 47 negative_sampler.num_negs_per_pos 31.0 +940 47 training.batch_size 1.0 +940 48 model.embedding_dim 0.0 +940 48 model.relation_dim 2.0 +940 48 model.scoring_fct_norm 2.0 +940 48 loss.margin 21.73711596286709 +940 48 loss.adversarial_temperature 0.5520764563000704 +940 48 optimizer.lr 0.02263537996018543 +940 48 negative_sampler.num_negs_per_pos 52.0 +940 48 training.batch_size 0.0 +940 49 model.embedding_dim 1.0 +940 49 model.relation_dim 1.0 +940 49 model.scoring_fct_norm 1.0 +940 49 loss.margin 21.843520355246014 +940 49 loss.adversarial_temperature 0.6651399281584501 +940 49 optimizer.lr 0.0019191298747692292 +940 49 negative_sampler.num_negs_per_pos 77.0 +940 49 training.batch_size 1.0 +940 50 model.embedding_dim 2.0 +940 50 model.relation_dim 1.0 +940 50 model.scoring_fct_norm 2.0 +940 50 loss.margin 20.32956950103502 +940 50 loss.adversarial_temperature 0.8134849167409224 +940 50 optimizer.lr 0.045572332224680066 +940 50 negative_sampler.num_negs_per_pos 63.0 +940 50 training.batch_size 2.0 +940 51 model.embedding_dim 2.0 +940 51 model.relation_dim 0.0 +940 51 model.scoring_fct_norm 2.0 +940 51 loss.margin 10.32166137716224 +940 51 loss.adversarial_temperature 0.8511999238406519 +940 51 optimizer.lr 0.00157887857404692 +940 51 negative_sampler.num_negs_per_pos 66.0 +940 51 training.batch_size 0.0 +940 1 dataset """wn18rr""" +940 1 model """transr""" +940 1 loss """nssa""" +940 1 regularizer """no""" +940 1 optimizer """adam""" +940 1 training_loop """owa""" +940 1 negative_sampler """basic""" +940 1 evaluator """rankbased""" +940 2 dataset """wn18rr""" +940 2 model """transr""" +940 2 loss """nssa""" +940 2 regularizer """no""" +940 2 optimizer """adam""" +940 2 training_loop """owa""" +940 2 negative_sampler """basic""" +940 2 evaluator """rankbased""" +940 3 dataset """wn18rr""" +940 3 model """transr""" +940 3 loss """nssa""" +940 3 regularizer """no""" +940 3 optimizer """adam""" +940 3 training_loop """owa""" +940 3 negative_sampler """basic""" +940 3 evaluator """rankbased""" +940 4 dataset """wn18rr""" +940 4 model """transr""" +940 4 loss """nssa""" +940 4 regularizer """no""" +940 4 optimizer """adam""" +940 4 training_loop """owa""" +940 4 negative_sampler """basic""" +940 4 evaluator """rankbased""" +940 5 dataset """wn18rr""" +940 5 model """transr""" +940 5 loss """nssa""" +940 5 regularizer """no""" +940 5 optimizer """adam""" +940 5 training_loop """owa""" +940 5 negative_sampler """basic""" +940 5 evaluator """rankbased""" +940 6 dataset """wn18rr""" +940 6 model """transr""" +940 6 loss """nssa""" +940 6 regularizer """no""" +940 6 optimizer """adam""" +940 6 training_loop """owa""" +940 6 negative_sampler """basic""" +940 6 evaluator """rankbased""" +940 7 dataset """wn18rr""" +940 7 model """transr""" +940 7 loss """nssa""" +940 7 regularizer """no""" +940 7 optimizer """adam""" +940 7 training_loop """owa""" +940 7 negative_sampler """basic""" +940 7 evaluator """rankbased""" +940 8 dataset """wn18rr""" +940 8 model """transr""" +940 8 loss """nssa""" +940 8 regularizer """no""" +940 8 optimizer """adam""" +940 8 training_loop """owa""" +940 8 negative_sampler """basic""" +940 8 evaluator """rankbased""" +940 9 dataset """wn18rr""" +940 9 model """transr""" +940 9 loss """nssa""" +940 9 regularizer """no""" +940 9 optimizer """adam""" +940 9 training_loop """owa""" +940 9 negative_sampler """basic""" +940 9 evaluator """rankbased""" +940 10 dataset """wn18rr""" +940 10 model """transr""" +940 10 loss """nssa""" +940 10 regularizer """no""" +940 10 optimizer """adam""" +940 10 training_loop """owa""" +940 10 negative_sampler """basic""" +940 10 evaluator """rankbased""" +940 11 dataset """wn18rr""" +940 11 model """transr""" +940 11 loss """nssa""" +940 11 regularizer """no""" +940 11 optimizer """adam""" +940 11 training_loop """owa""" +940 11 negative_sampler """basic""" +940 11 evaluator """rankbased""" +940 12 dataset """wn18rr""" +940 12 model """transr""" +940 12 loss """nssa""" +940 12 regularizer """no""" +940 12 optimizer """adam""" +940 12 training_loop """owa""" +940 12 negative_sampler """basic""" +940 12 evaluator """rankbased""" +940 13 dataset """wn18rr""" +940 13 model """transr""" +940 13 loss """nssa""" +940 13 regularizer """no""" +940 13 optimizer """adam""" +940 13 training_loop """owa""" +940 13 negative_sampler """basic""" +940 13 evaluator """rankbased""" +940 14 dataset """wn18rr""" +940 14 model """transr""" +940 14 loss """nssa""" +940 14 regularizer """no""" +940 14 optimizer """adam""" +940 14 training_loop """owa""" +940 14 negative_sampler """basic""" +940 14 evaluator """rankbased""" +940 15 dataset """wn18rr""" +940 15 model """transr""" +940 15 loss """nssa""" +940 15 regularizer """no""" +940 15 optimizer """adam""" +940 15 training_loop """owa""" +940 15 negative_sampler """basic""" +940 15 evaluator """rankbased""" +940 16 dataset """wn18rr""" +940 16 model """transr""" +940 16 loss """nssa""" +940 16 regularizer """no""" +940 16 optimizer """adam""" +940 16 training_loop """owa""" +940 16 negative_sampler """basic""" +940 16 evaluator """rankbased""" +940 17 dataset """wn18rr""" +940 17 model """transr""" +940 17 loss """nssa""" +940 17 regularizer """no""" +940 17 optimizer """adam""" +940 17 training_loop """owa""" +940 17 negative_sampler """basic""" +940 17 evaluator """rankbased""" +940 18 dataset """wn18rr""" +940 18 model """transr""" +940 18 loss """nssa""" +940 18 regularizer """no""" +940 18 optimizer """adam""" +940 18 training_loop """owa""" +940 18 negative_sampler """basic""" +940 18 evaluator """rankbased""" +940 19 dataset """wn18rr""" +940 19 model """transr""" +940 19 loss """nssa""" +940 19 regularizer """no""" +940 19 optimizer """adam""" +940 19 training_loop """owa""" +940 19 negative_sampler """basic""" +940 19 evaluator """rankbased""" +940 20 dataset """wn18rr""" +940 20 model """transr""" +940 20 loss """nssa""" +940 20 regularizer """no""" +940 20 optimizer """adam""" +940 20 training_loop """owa""" +940 20 negative_sampler """basic""" +940 20 evaluator """rankbased""" +940 21 dataset """wn18rr""" +940 21 model """transr""" +940 21 loss """nssa""" +940 21 regularizer """no""" +940 21 optimizer """adam""" +940 21 training_loop """owa""" +940 21 negative_sampler """basic""" +940 21 evaluator """rankbased""" +940 22 dataset """wn18rr""" +940 22 model """transr""" +940 22 loss """nssa""" +940 22 regularizer """no""" +940 22 optimizer """adam""" +940 22 training_loop """owa""" +940 22 negative_sampler """basic""" +940 22 evaluator """rankbased""" +940 23 dataset """wn18rr""" +940 23 model """transr""" +940 23 loss """nssa""" +940 23 regularizer """no""" +940 23 optimizer """adam""" +940 23 training_loop """owa""" +940 23 negative_sampler """basic""" +940 23 evaluator """rankbased""" +940 24 dataset """wn18rr""" +940 24 model """transr""" +940 24 loss """nssa""" +940 24 regularizer """no""" +940 24 optimizer """adam""" +940 24 training_loop """owa""" +940 24 negative_sampler """basic""" +940 24 evaluator """rankbased""" +940 25 dataset """wn18rr""" +940 25 model """transr""" +940 25 loss """nssa""" +940 25 regularizer """no""" +940 25 optimizer """adam""" +940 25 training_loop """owa""" +940 25 negative_sampler """basic""" +940 25 evaluator """rankbased""" +940 26 dataset """wn18rr""" +940 26 model """transr""" +940 26 loss """nssa""" +940 26 regularizer """no""" +940 26 optimizer """adam""" +940 26 training_loop """owa""" +940 26 negative_sampler """basic""" +940 26 evaluator """rankbased""" +940 27 dataset """wn18rr""" +940 27 model """transr""" +940 27 loss """nssa""" +940 27 regularizer """no""" +940 27 optimizer """adam""" +940 27 training_loop """owa""" +940 27 negative_sampler """basic""" +940 27 evaluator """rankbased""" +940 28 dataset """wn18rr""" +940 28 model """transr""" +940 28 loss """nssa""" +940 28 regularizer """no""" +940 28 optimizer """adam""" +940 28 training_loop """owa""" +940 28 negative_sampler """basic""" +940 28 evaluator """rankbased""" +940 29 dataset """wn18rr""" +940 29 model """transr""" +940 29 loss """nssa""" +940 29 regularizer """no""" +940 29 optimizer """adam""" +940 29 training_loop """owa""" +940 29 negative_sampler """basic""" +940 29 evaluator """rankbased""" +940 30 dataset """wn18rr""" +940 30 model """transr""" +940 30 loss """nssa""" +940 30 regularizer """no""" +940 30 optimizer """adam""" +940 30 training_loop """owa""" +940 30 negative_sampler """basic""" +940 30 evaluator """rankbased""" +940 31 dataset """wn18rr""" +940 31 model """transr""" +940 31 loss """nssa""" +940 31 regularizer """no""" +940 31 optimizer """adam""" +940 31 training_loop """owa""" +940 31 negative_sampler """basic""" +940 31 evaluator """rankbased""" +940 32 dataset """wn18rr""" +940 32 model """transr""" +940 32 loss """nssa""" +940 32 regularizer """no""" +940 32 optimizer """adam""" +940 32 training_loop """owa""" +940 32 negative_sampler """basic""" +940 32 evaluator """rankbased""" +940 33 dataset """wn18rr""" +940 33 model """transr""" +940 33 loss """nssa""" +940 33 regularizer """no""" +940 33 optimizer """adam""" +940 33 training_loop """owa""" +940 33 negative_sampler """basic""" +940 33 evaluator """rankbased""" +940 34 dataset """wn18rr""" +940 34 model """transr""" +940 34 loss """nssa""" +940 34 regularizer """no""" +940 34 optimizer """adam""" +940 34 training_loop """owa""" +940 34 negative_sampler """basic""" +940 34 evaluator """rankbased""" +940 35 dataset """wn18rr""" +940 35 model """transr""" +940 35 loss """nssa""" +940 35 regularizer """no""" +940 35 optimizer """adam""" +940 35 training_loop """owa""" +940 35 negative_sampler """basic""" +940 35 evaluator """rankbased""" +940 36 dataset """wn18rr""" +940 36 model """transr""" +940 36 loss """nssa""" +940 36 regularizer """no""" +940 36 optimizer """adam""" +940 36 training_loop """owa""" +940 36 negative_sampler """basic""" +940 36 evaluator """rankbased""" +940 37 dataset """wn18rr""" +940 37 model """transr""" +940 37 loss """nssa""" +940 37 regularizer """no""" +940 37 optimizer """adam""" +940 37 training_loop """owa""" +940 37 negative_sampler """basic""" +940 37 evaluator """rankbased""" +940 38 dataset """wn18rr""" +940 38 model """transr""" +940 38 loss """nssa""" +940 38 regularizer """no""" +940 38 optimizer """adam""" +940 38 training_loop """owa""" +940 38 negative_sampler """basic""" +940 38 evaluator """rankbased""" +940 39 dataset """wn18rr""" +940 39 model """transr""" +940 39 loss """nssa""" +940 39 regularizer """no""" +940 39 optimizer """adam""" +940 39 training_loop """owa""" +940 39 negative_sampler """basic""" +940 39 evaluator """rankbased""" +940 40 dataset """wn18rr""" +940 40 model """transr""" +940 40 loss """nssa""" +940 40 regularizer """no""" +940 40 optimizer """adam""" +940 40 training_loop """owa""" +940 40 negative_sampler """basic""" +940 40 evaluator """rankbased""" +940 41 dataset """wn18rr""" +940 41 model """transr""" +940 41 loss """nssa""" +940 41 regularizer """no""" +940 41 optimizer """adam""" +940 41 training_loop """owa""" +940 41 negative_sampler """basic""" +940 41 evaluator """rankbased""" +940 42 dataset """wn18rr""" +940 42 model """transr""" +940 42 loss """nssa""" +940 42 regularizer """no""" +940 42 optimizer """adam""" +940 42 training_loop """owa""" +940 42 negative_sampler """basic""" +940 42 evaluator """rankbased""" +940 43 dataset """wn18rr""" +940 43 model """transr""" +940 43 loss """nssa""" +940 43 regularizer """no""" +940 43 optimizer """adam""" +940 43 training_loop """owa""" +940 43 negative_sampler """basic""" +940 43 evaluator """rankbased""" +940 44 dataset """wn18rr""" +940 44 model """transr""" +940 44 loss """nssa""" +940 44 regularizer """no""" +940 44 optimizer """adam""" +940 44 training_loop """owa""" +940 44 negative_sampler """basic""" +940 44 evaluator """rankbased""" +940 45 dataset """wn18rr""" +940 45 model """transr""" +940 45 loss """nssa""" +940 45 regularizer """no""" +940 45 optimizer """adam""" +940 45 training_loop """owa""" +940 45 negative_sampler """basic""" +940 45 evaluator """rankbased""" +940 46 dataset """wn18rr""" +940 46 model """transr""" +940 46 loss """nssa""" +940 46 regularizer """no""" +940 46 optimizer """adam""" +940 46 training_loop """owa""" +940 46 negative_sampler """basic""" +940 46 evaluator """rankbased""" +940 47 dataset """wn18rr""" +940 47 model """transr""" +940 47 loss """nssa""" +940 47 regularizer """no""" +940 47 optimizer """adam""" +940 47 training_loop """owa""" +940 47 negative_sampler """basic""" +940 47 evaluator """rankbased""" +940 48 dataset """wn18rr""" +940 48 model """transr""" +940 48 loss """nssa""" +940 48 regularizer """no""" +940 48 optimizer """adam""" +940 48 training_loop """owa""" +940 48 negative_sampler """basic""" +940 48 evaluator """rankbased""" +940 49 dataset """wn18rr""" +940 49 model """transr""" +940 49 loss """nssa""" +940 49 regularizer """no""" +940 49 optimizer """adam""" +940 49 training_loop """owa""" +940 49 negative_sampler """basic""" +940 49 evaluator """rankbased""" +940 50 dataset """wn18rr""" +940 50 model """transr""" +940 50 loss """nssa""" +940 50 regularizer """no""" +940 50 optimizer """adam""" +940 50 training_loop """owa""" +940 50 negative_sampler """basic""" +940 50 evaluator """rankbased""" +940 51 dataset """wn18rr""" +940 51 model """transr""" +940 51 loss """nssa""" +940 51 regularizer """no""" +940 51 optimizer """adam""" +940 51 training_loop """owa""" +940 51 negative_sampler """basic""" +940 51 evaluator """rankbased""" +941 1 model.embedding_dim 2.0 +941 1 model.relation_dim 0.0 +941 1 model.scoring_fct_norm 1.0 +941 1 optimizer.lr 0.001711089729949143 +941 1 negative_sampler.num_negs_per_pos 44.0 +941 1 training.batch_size 2.0 +941 2 model.embedding_dim 2.0 +941 2 model.relation_dim 2.0 +941 2 model.scoring_fct_norm 2.0 +941 2 optimizer.lr 0.00747874864633472 +941 2 negative_sampler.num_negs_per_pos 91.0 +941 2 training.batch_size 0.0 +941 3 model.embedding_dim 0.0 +941 3 model.relation_dim 0.0 +941 3 model.scoring_fct_norm 2.0 +941 3 optimizer.lr 0.02369022600389493 +941 3 negative_sampler.num_negs_per_pos 91.0 +941 3 training.batch_size 0.0 +941 4 model.embedding_dim 2.0 +941 4 model.relation_dim 2.0 +941 4 model.scoring_fct_norm 1.0 +941 4 optimizer.lr 0.0013958268861123716 +941 4 negative_sampler.num_negs_per_pos 88.0 +941 4 training.batch_size 1.0 +941 5 model.embedding_dim 2.0 +941 5 model.relation_dim 2.0 +941 5 model.scoring_fct_norm 1.0 +941 5 optimizer.lr 0.05316099559005391 +941 5 negative_sampler.num_negs_per_pos 52.0 +941 5 training.batch_size 2.0 +941 6 model.embedding_dim 0.0 +941 6 model.relation_dim 0.0 +941 6 model.scoring_fct_norm 1.0 +941 6 optimizer.lr 0.05948602923816 +941 6 negative_sampler.num_negs_per_pos 20.0 +941 6 training.batch_size 1.0 +941 7 model.embedding_dim 2.0 +941 7 model.relation_dim 0.0 +941 7 model.scoring_fct_norm 1.0 +941 7 optimizer.lr 0.020789329689746616 +941 7 negative_sampler.num_negs_per_pos 91.0 +941 7 training.batch_size 1.0 +941 8 model.embedding_dim 0.0 +941 8 model.relation_dim 1.0 +941 8 model.scoring_fct_norm 1.0 +941 8 optimizer.lr 0.002750121823237365 +941 8 negative_sampler.num_negs_per_pos 38.0 +941 8 training.batch_size 1.0 +941 9 model.embedding_dim 2.0 +941 9 model.relation_dim 2.0 +941 9 model.scoring_fct_norm 1.0 +941 9 optimizer.lr 0.07484242363692707 +941 9 negative_sampler.num_negs_per_pos 12.0 +941 9 training.batch_size 2.0 +941 10 model.embedding_dim 1.0 +941 10 model.relation_dim 1.0 +941 10 model.scoring_fct_norm 1.0 +941 10 optimizer.lr 0.052677774481625535 +941 10 negative_sampler.num_negs_per_pos 19.0 +941 10 training.batch_size 2.0 +941 11 model.embedding_dim 0.0 +941 11 model.relation_dim 0.0 +941 11 model.scoring_fct_norm 2.0 +941 11 optimizer.lr 0.0036894669104494196 +941 11 negative_sampler.num_negs_per_pos 13.0 +941 11 training.batch_size 2.0 +941 12 model.embedding_dim 1.0 +941 12 model.relation_dim 1.0 +941 12 model.scoring_fct_norm 1.0 +941 12 optimizer.lr 0.0522349471609089 +941 12 negative_sampler.num_negs_per_pos 57.0 +941 12 training.batch_size 2.0 +941 13 model.embedding_dim 0.0 +941 13 model.relation_dim 2.0 +941 13 model.scoring_fct_norm 2.0 +941 13 optimizer.lr 0.0732375875723303 +941 13 negative_sampler.num_negs_per_pos 47.0 +941 13 training.batch_size 1.0 +941 14 model.embedding_dim 2.0 +941 14 model.relation_dim 0.0 +941 14 model.scoring_fct_norm 2.0 +941 14 optimizer.lr 0.006598034345659715 +941 14 negative_sampler.num_negs_per_pos 12.0 +941 14 training.batch_size 1.0 +941 15 model.embedding_dim 0.0 +941 15 model.relation_dim 2.0 +941 15 model.scoring_fct_norm 2.0 +941 15 optimizer.lr 0.05749697477792997 +941 15 negative_sampler.num_negs_per_pos 57.0 +941 15 training.batch_size 2.0 +941 16 model.embedding_dim 2.0 +941 16 model.relation_dim 1.0 +941 16 model.scoring_fct_norm 1.0 +941 16 optimizer.lr 0.09389427101246377 +941 16 negative_sampler.num_negs_per_pos 7.0 +941 16 training.batch_size 0.0 +941 17 model.embedding_dim 2.0 +941 17 model.relation_dim 2.0 +941 17 model.scoring_fct_norm 1.0 +941 17 optimizer.lr 0.039686754357193134 +941 17 negative_sampler.num_negs_per_pos 5.0 +941 17 training.batch_size 2.0 +941 18 model.embedding_dim 0.0 +941 18 model.relation_dim 1.0 +941 18 model.scoring_fct_norm 1.0 +941 18 optimizer.lr 0.0012558633169282686 +941 18 negative_sampler.num_negs_per_pos 16.0 +941 18 training.batch_size 0.0 +941 19 model.embedding_dim 2.0 +941 19 model.relation_dim 0.0 +941 19 model.scoring_fct_norm 1.0 +941 19 optimizer.lr 0.008982473791675586 +941 19 negative_sampler.num_negs_per_pos 80.0 +941 19 training.batch_size 0.0 +941 1 dataset """wn18rr""" +941 1 model """transr""" +941 1 loss """softplus""" +941 1 regularizer """no""" +941 1 optimizer """adam""" +941 1 training_loop """owa""" +941 1 negative_sampler """basic""" +941 1 evaluator """rankbased""" +941 2 dataset """wn18rr""" +941 2 model """transr""" +941 2 loss """softplus""" +941 2 regularizer """no""" +941 2 optimizer """adam""" +941 2 training_loop """owa""" +941 2 negative_sampler """basic""" +941 2 evaluator """rankbased""" +941 3 dataset """wn18rr""" +941 3 model """transr""" +941 3 loss """softplus""" +941 3 regularizer """no""" +941 3 optimizer """adam""" +941 3 training_loop """owa""" +941 3 negative_sampler """basic""" +941 3 evaluator """rankbased""" +941 4 dataset """wn18rr""" +941 4 model """transr""" +941 4 loss """softplus""" +941 4 regularizer """no""" +941 4 optimizer """adam""" +941 4 training_loop """owa""" +941 4 negative_sampler """basic""" +941 4 evaluator """rankbased""" +941 5 dataset """wn18rr""" +941 5 model """transr""" +941 5 loss """softplus""" +941 5 regularizer """no""" +941 5 optimizer """adam""" +941 5 training_loop """owa""" +941 5 negative_sampler """basic""" +941 5 evaluator """rankbased""" +941 6 dataset """wn18rr""" +941 6 model """transr""" +941 6 loss """softplus""" +941 6 regularizer """no""" +941 6 optimizer """adam""" +941 6 training_loop """owa""" +941 6 negative_sampler """basic""" +941 6 evaluator """rankbased""" +941 7 dataset """wn18rr""" +941 7 model """transr""" +941 7 loss """softplus""" +941 7 regularizer """no""" +941 7 optimizer """adam""" +941 7 training_loop """owa""" +941 7 negative_sampler """basic""" +941 7 evaluator """rankbased""" +941 8 dataset """wn18rr""" +941 8 model """transr""" +941 8 loss """softplus""" +941 8 regularizer """no""" +941 8 optimizer """adam""" +941 8 training_loop """owa""" +941 8 negative_sampler """basic""" +941 8 evaluator """rankbased""" +941 9 dataset """wn18rr""" +941 9 model """transr""" +941 9 loss """softplus""" +941 9 regularizer """no""" +941 9 optimizer """adam""" +941 9 training_loop """owa""" +941 9 negative_sampler """basic""" +941 9 evaluator """rankbased""" +941 10 dataset """wn18rr""" +941 10 model """transr""" +941 10 loss """softplus""" +941 10 regularizer """no""" +941 10 optimizer """adam""" +941 10 training_loop """owa""" +941 10 negative_sampler """basic""" +941 10 evaluator """rankbased""" +941 11 dataset """wn18rr""" +941 11 model """transr""" +941 11 loss """softplus""" +941 11 regularizer """no""" +941 11 optimizer """adam""" +941 11 training_loop """owa""" +941 11 negative_sampler """basic""" +941 11 evaluator """rankbased""" +941 12 dataset """wn18rr""" +941 12 model """transr""" +941 12 loss """softplus""" +941 12 regularizer """no""" +941 12 optimizer """adam""" +941 12 training_loop """owa""" +941 12 negative_sampler """basic""" +941 12 evaluator """rankbased""" +941 13 dataset """wn18rr""" +941 13 model """transr""" +941 13 loss """softplus""" +941 13 regularizer """no""" +941 13 optimizer """adam""" +941 13 training_loop """owa""" +941 13 negative_sampler """basic""" +941 13 evaluator """rankbased""" +941 14 dataset """wn18rr""" +941 14 model """transr""" +941 14 loss """softplus""" +941 14 regularizer """no""" +941 14 optimizer """adam""" +941 14 training_loop """owa""" +941 14 negative_sampler """basic""" +941 14 evaluator """rankbased""" +941 15 dataset """wn18rr""" +941 15 model """transr""" +941 15 loss """softplus""" +941 15 regularizer """no""" +941 15 optimizer """adam""" +941 15 training_loop """owa""" +941 15 negative_sampler """basic""" +941 15 evaluator """rankbased""" +941 16 dataset """wn18rr""" +941 16 model """transr""" +941 16 loss """softplus""" +941 16 regularizer """no""" +941 16 optimizer """adam""" +941 16 training_loop """owa""" +941 16 negative_sampler """basic""" +941 16 evaluator """rankbased""" +941 17 dataset """wn18rr""" +941 17 model """transr""" +941 17 loss """softplus""" +941 17 regularizer """no""" +941 17 optimizer """adam""" +941 17 training_loop """owa""" +941 17 negative_sampler """basic""" +941 17 evaluator """rankbased""" +941 18 dataset """wn18rr""" +941 18 model """transr""" +941 18 loss """softplus""" +941 18 regularizer """no""" +941 18 optimizer """adam""" +941 18 training_loop """owa""" +941 18 negative_sampler """basic""" +941 18 evaluator """rankbased""" +941 19 dataset """wn18rr""" +941 19 model """transr""" +941 19 loss """softplus""" +941 19 regularizer """no""" +941 19 optimizer """adam""" +941 19 training_loop """owa""" +941 19 negative_sampler """basic""" +941 19 evaluator """rankbased""" +942 1 model.embedding_dim 1.0 +942 1 model.relation_dim 2.0 +942 1 model.scoring_fct_norm 1.0 +942 1 optimizer.lr 0.0028662557764065266 +942 1 negative_sampler.num_negs_per_pos 0.0 +942 1 training.batch_size 2.0 +942 2 model.embedding_dim 2.0 +942 2 model.relation_dim 2.0 +942 2 model.scoring_fct_norm 1.0 +942 2 optimizer.lr 0.001756634897174047 +942 2 negative_sampler.num_negs_per_pos 89.0 +942 2 training.batch_size 0.0 +942 3 model.embedding_dim 1.0 +942 3 model.relation_dim 2.0 +942 3 model.scoring_fct_norm 2.0 +942 3 optimizer.lr 0.0032785434321280825 +942 3 negative_sampler.num_negs_per_pos 15.0 +942 3 training.batch_size 0.0 +942 4 model.embedding_dim 1.0 +942 4 model.relation_dim 2.0 +942 4 model.scoring_fct_norm 2.0 +942 4 optimizer.lr 0.0028723397703343655 +942 4 negative_sampler.num_negs_per_pos 74.0 +942 4 training.batch_size 1.0 +942 5 model.embedding_dim 1.0 +942 5 model.relation_dim 0.0 +942 5 model.scoring_fct_norm 2.0 +942 5 optimizer.lr 0.0022341490491135357 +942 5 negative_sampler.num_negs_per_pos 15.0 +942 5 training.batch_size 1.0 +942 6 model.embedding_dim 1.0 +942 6 model.relation_dim 2.0 +942 6 model.scoring_fct_norm 2.0 +942 6 optimizer.lr 0.013749807111293873 +942 6 negative_sampler.num_negs_per_pos 70.0 +942 6 training.batch_size 0.0 +942 7 model.embedding_dim 2.0 +942 7 model.relation_dim 0.0 +942 7 model.scoring_fct_norm 2.0 +942 7 optimizer.lr 0.014505283952157612 +942 7 negative_sampler.num_negs_per_pos 96.0 +942 7 training.batch_size 0.0 +942 8 model.embedding_dim 1.0 +942 8 model.relation_dim 0.0 +942 8 model.scoring_fct_norm 2.0 +942 8 optimizer.lr 0.010647674006433365 +942 8 negative_sampler.num_negs_per_pos 65.0 +942 8 training.batch_size 2.0 +942 9 model.embedding_dim 1.0 +942 9 model.relation_dim 2.0 +942 9 model.scoring_fct_norm 1.0 +942 9 optimizer.lr 0.007089420248926884 +942 9 negative_sampler.num_negs_per_pos 4.0 +942 9 training.batch_size 1.0 +942 10 model.embedding_dim 2.0 +942 10 model.relation_dim 1.0 +942 10 model.scoring_fct_norm 1.0 +942 10 optimizer.lr 0.008602690149238777 +942 10 negative_sampler.num_negs_per_pos 57.0 +942 10 training.batch_size 2.0 +942 11 model.embedding_dim 1.0 +942 11 model.relation_dim 1.0 +942 11 model.scoring_fct_norm 1.0 +942 11 optimizer.lr 0.03085733556920227 +942 11 negative_sampler.num_negs_per_pos 93.0 +942 11 training.batch_size 2.0 +942 12 model.embedding_dim 1.0 +942 12 model.relation_dim 0.0 +942 12 model.scoring_fct_norm 1.0 +942 12 optimizer.lr 0.0016126864967461018 +942 12 negative_sampler.num_negs_per_pos 51.0 +942 12 training.batch_size 2.0 +942 13 model.embedding_dim 1.0 +942 13 model.relation_dim 2.0 +942 13 model.scoring_fct_norm 2.0 +942 13 optimizer.lr 0.00402903711316381 +942 13 negative_sampler.num_negs_per_pos 51.0 +942 13 training.batch_size 0.0 +942 14 model.embedding_dim 0.0 +942 14 model.relation_dim 0.0 +942 14 model.scoring_fct_norm 2.0 +942 14 optimizer.lr 0.007858963050138683 +942 14 negative_sampler.num_negs_per_pos 45.0 +942 14 training.batch_size 2.0 +942 15 model.embedding_dim 0.0 +942 15 model.relation_dim 2.0 +942 15 model.scoring_fct_norm 2.0 +942 15 optimizer.lr 0.08903650887764307 +942 15 negative_sampler.num_negs_per_pos 46.0 +942 15 training.batch_size 2.0 +942 16 model.embedding_dim 2.0 +942 16 model.relation_dim 1.0 +942 16 model.scoring_fct_norm 2.0 +942 16 optimizer.lr 0.001802749922804673 +942 16 negative_sampler.num_negs_per_pos 78.0 +942 16 training.batch_size 1.0 +942 17 model.embedding_dim 0.0 +942 17 model.relation_dim 1.0 +942 17 model.scoring_fct_norm 1.0 +942 17 optimizer.lr 0.0161874720078408 +942 17 negative_sampler.num_negs_per_pos 6.0 +942 17 training.batch_size 0.0 +942 18 model.embedding_dim 0.0 +942 18 model.relation_dim 1.0 +942 18 model.scoring_fct_norm 2.0 +942 18 optimizer.lr 0.005104067330612878 +942 18 negative_sampler.num_negs_per_pos 58.0 +942 18 training.batch_size 2.0 +942 19 model.embedding_dim 1.0 +942 19 model.relation_dim 1.0 +942 19 model.scoring_fct_norm 2.0 +942 19 optimizer.lr 0.008708742406837748 +942 19 negative_sampler.num_negs_per_pos 44.0 +942 19 training.batch_size 1.0 +942 20 model.embedding_dim 2.0 +942 20 model.relation_dim 1.0 +942 20 model.scoring_fct_norm 2.0 +942 20 optimizer.lr 0.010951969128723716 +942 20 negative_sampler.num_negs_per_pos 6.0 +942 20 training.batch_size 1.0 +942 21 model.embedding_dim 0.0 +942 21 model.relation_dim 0.0 +942 21 model.scoring_fct_norm 2.0 +942 21 optimizer.lr 0.004768726261604421 +942 21 negative_sampler.num_negs_per_pos 50.0 +942 21 training.batch_size 1.0 +942 22 model.embedding_dim 2.0 +942 22 model.relation_dim 0.0 +942 22 model.scoring_fct_norm 2.0 +942 22 optimizer.lr 0.0011097554928875646 +942 22 negative_sampler.num_negs_per_pos 88.0 +942 22 training.batch_size 1.0 +942 23 model.embedding_dim 1.0 +942 23 model.relation_dim 0.0 +942 23 model.scoring_fct_norm 1.0 +942 23 optimizer.lr 0.002615032049303092 +942 23 negative_sampler.num_negs_per_pos 65.0 +942 23 training.batch_size 2.0 +942 24 model.embedding_dim 0.0 +942 24 model.relation_dim 0.0 +942 24 model.scoring_fct_norm 2.0 +942 24 optimizer.lr 0.026424510166249665 +942 24 negative_sampler.num_negs_per_pos 78.0 +942 24 training.batch_size 1.0 +942 25 model.embedding_dim 0.0 +942 25 model.relation_dim 1.0 +942 25 model.scoring_fct_norm 1.0 +942 25 optimizer.lr 0.09222273592677592 +942 25 negative_sampler.num_negs_per_pos 99.0 +942 25 training.batch_size 1.0 +942 26 model.embedding_dim 1.0 +942 26 model.relation_dim 1.0 +942 26 model.scoring_fct_norm 1.0 +942 26 optimizer.lr 0.001357303879492624 +942 26 negative_sampler.num_negs_per_pos 65.0 +942 26 training.batch_size 2.0 +942 27 model.embedding_dim 2.0 +942 27 model.relation_dim 2.0 +942 27 model.scoring_fct_norm 2.0 +942 27 optimizer.lr 0.006690184472458396 +942 27 negative_sampler.num_negs_per_pos 92.0 +942 27 training.batch_size 2.0 +942 28 model.embedding_dim 0.0 +942 28 model.relation_dim 2.0 +942 28 model.scoring_fct_norm 2.0 +942 28 optimizer.lr 0.0019267418516328194 +942 28 negative_sampler.num_negs_per_pos 34.0 +942 28 training.batch_size 2.0 +942 29 model.embedding_dim 2.0 +942 29 model.relation_dim 0.0 +942 29 model.scoring_fct_norm 1.0 +942 29 optimizer.lr 0.0032841429286705787 +942 29 negative_sampler.num_negs_per_pos 26.0 +942 29 training.batch_size 0.0 +942 30 model.embedding_dim 2.0 +942 30 model.relation_dim 2.0 +942 30 model.scoring_fct_norm 1.0 +942 30 optimizer.lr 0.008027644677262432 +942 30 negative_sampler.num_negs_per_pos 91.0 +942 30 training.batch_size 2.0 +942 31 model.embedding_dim 1.0 +942 31 model.relation_dim 0.0 +942 31 model.scoring_fct_norm 1.0 +942 31 optimizer.lr 0.0019500389953572214 +942 31 negative_sampler.num_negs_per_pos 58.0 +942 31 training.batch_size 0.0 +942 32 model.embedding_dim 2.0 +942 32 model.relation_dim 1.0 +942 32 model.scoring_fct_norm 1.0 +942 32 optimizer.lr 0.0011786796494823082 +942 32 negative_sampler.num_negs_per_pos 93.0 +942 32 training.batch_size 2.0 +942 33 model.embedding_dim 0.0 +942 33 model.relation_dim 2.0 +942 33 model.scoring_fct_norm 1.0 +942 33 optimizer.lr 0.035128876181314356 +942 33 negative_sampler.num_negs_per_pos 31.0 +942 33 training.batch_size 2.0 +942 34 model.embedding_dim 2.0 +942 34 model.relation_dim 0.0 +942 34 model.scoring_fct_norm 1.0 +942 34 optimizer.lr 0.019279814738842313 +942 34 negative_sampler.num_negs_per_pos 36.0 +942 34 training.batch_size 1.0 +942 35 model.embedding_dim 0.0 +942 35 model.relation_dim 2.0 +942 35 model.scoring_fct_norm 2.0 +942 35 optimizer.lr 0.004114895569002203 +942 35 negative_sampler.num_negs_per_pos 16.0 +942 35 training.batch_size 2.0 +942 36 model.embedding_dim 0.0 +942 36 model.relation_dim 1.0 +942 36 model.scoring_fct_norm 1.0 +942 36 optimizer.lr 0.08118517761953886 +942 36 negative_sampler.num_negs_per_pos 35.0 +942 36 training.batch_size 2.0 +942 37 model.embedding_dim 0.0 +942 37 model.relation_dim 2.0 +942 37 model.scoring_fct_norm 2.0 +942 37 optimizer.lr 0.050777191069634714 +942 37 negative_sampler.num_negs_per_pos 44.0 +942 37 training.batch_size 2.0 +942 38 model.embedding_dim 0.0 +942 38 model.relation_dim 0.0 +942 38 model.scoring_fct_norm 2.0 +942 38 optimizer.lr 0.04554837807383038 +942 38 negative_sampler.num_negs_per_pos 51.0 +942 38 training.batch_size 0.0 +942 39 model.embedding_dim 1.0 +942 39 model.relation_dim 2.0 +942 39 model.scoring_fct_norm 1.0 +942 39 optimizer.lr 0.02426274510583174 +942 39 negative_sampler.num_negs_per_pos 33.0 +942 39 training.batch_size 2.0 +942 40 model.embedding_dim 2.0 +942 40 model.relation_dim 0.0 +942 40 model.scoring_fct_norm 2.0 +942 40 optimizer.lr 0.002061207285396319 +942 40 negative_sampler.num_negs_per_pos 26.0 +942 40 training.batch_size 0.0 +942 1 dataset """wn18rr""" +942 1 model """transr""" +942 1 loss """softplus""" +942 1 regularizer """no""" +942 1 optimizer """adam""" +942 1 training_loop """owa""" +942 1 negative_sampler """basic""" +942 1 evaluator """rankbased""" +942 2 dataset """wn18rr""" +942 2 model """transr""" +942 2 loss """softplus""" +942 2 regularizer """no""" +942 2 optimizer """adam""" +942 2 training_loop """owa""" +942 2 negative_sampler """basic""" +942 2 evaluator """rankbased""" +942 3 dataset """wn18rr""" +942 3 model """transr""" +942 3 loss """softplus""" +942 3 regularizer """no""" +942 3 optimizer """adam""" +942 3 training_loop """owa""" +942 3 negative_sampler """basic""" +942 3 evaluator """rankbased""" +942 4 dataset """wn18rr""" +942 4 model """transr""" +942 4 loss """softplus""" +942 4 regularizer """no""" +942 4 optimizer """adam""" +942 4 training_loop """owa""" +942 4 negative_sampler """basic""" +942 4 evaluator """rankbased""" +942 5 dataset """wn18rr""" +942 5 model """transr""" +942 5 loss """softplus""" +942 5 regularizer """no""" +942 5 optimizer """adam""" +942 5 training_loop """owa""" +942 5 negative_sampler """basic""" +942 5 evaluator """rankbased""" +942 6 dataset """wn18rr""" +942 6 model """transr""" +942 6 loss """softplus""" +942 6 regularizer """no""" +942 6 optimizer """adam""" +942 6 training_loop """owa""" +942 6 negative_sampler """basic""" +942 6 evaluator """rankbased""" +942 7 dataset """wn18rr""" +942 7 model """transr""" +942 7 loss """softplus""" +942 7 regularizer """no""" +942 7 optimizer """adam""" +942 7 training_loop """owa""" +942 7 negative_sampler """basic""" +942 7 evaluator """rankbased""" +942 8 dataset """wn18rr""" +942 8 model """transr""" +942 8 loss """softplus""" +942 8 regularizer """no""" +942 8 optimizer """adam""" +942 8 training_loop """owa""" +942 8 negative_sampler """basic""" +942 8 evaluator """rankbased""" +942 9 dataset """wn18rr""" +942 9 model """transr""" +942 9 loss """softplus""" +942 9 regularizer """no""" +942 9 optimizer """adam""" +942 9 training_loop """owa""" +942 9 negative_sampler """basic""" +942 9 evaluator """rankbased""" +942 10 dataset """wn18rr""" +942 10 model """transr""" +942 10 loss """softplus""" +942 10 regularizer """no""" +942 10 optimizer """adam""" +942 10 training_loop """owa""" +942 10 negative_sampler """basic""" +942 10 evaluator """rankbased""" +942 11 dataset """wn18rr""" +942 11 model """transr""" +942 11 loss """softplus""" +942 11 regularizer """no""" +942 11 optimizer """adam""" +942 11 training_loop """owa""" +942 11 negative_sampler """basic""" +942 11 evaluator """rankbased""" +942 12 dataset """wn18rr""" +942 12 model """transr""" +942 12 loss """softplus""" +942 12 regularizer """no""" +942 12 optimizer """adam""" +942 12 training_loop """owa""" +942 12 negative_sampler """basic""" +942 12 evaluator """rankbased""" +942 13 dataset """wn18rr""" +942 13 model """transr""" +942 13 loss """softplus""" +942 13 regularizer """no""" +942 13 optimizer """adam""" +942 13 training_loop """owa""" +942 13 negative_sampler """basic""" +942 13 evaluator """rankbased""" +942 14 dataset """wn18rr""" +942 14 model """transr""" +942 14 loss """softplus""" +942 14 regularizer """no""" +942 14 optimizer """adam""" +942 14 training_loop """owa""" +942 14 negative_sampler """basic""" +942 14 evaluator """rankbased""" +942 15 dataset """wn18rr""" +942 15 model """transr""" +942 15 loss """softplus""" +942 15 regularizer """no""" +942 15 optimizer """adam""" +942 15 training_loop """owa""" +942 15 negative_sampler """basic""" +942 15 evaluator """rankbased""" +942 16 dataset """wn18rr""" +942 16 model """transr""" +942 16 loss """softplus""" +942 16 regularizer """no""" +942 16 optimizer """adam""" +942 16 training_loop """owa""" +942 16 negative_sampler """basic""" +942 16 evaluator """rankbased""" +942 17 dataset """wn18rr""" +942 17 model """transr""" +942 17 loss """softplus""" +942 17 regularizer """no""" +942 17 optimizer """adam""" +942 17 training_loop """owa""" +942 17 negative_sampler """basic""" +942 17 evaluator """rankbased""" +942 18 dataset """wn18rr""" +942 18 model """transr""" +942 18 loss """softplus""" +942 18 regularizer """no""" +942 18 optimizer """adam""" +942 18 training_loop """owa""" +942 18 negative_sampler """basic""" +942 18 evaluator """rankbased""" +942 19 dataset """wn18rr""" +942 19 model """transr""" +942 19 loss """softplus""" +942 19 regularizer """no""" +942 19 optimizer """adam""" +942 19 training_loop """owa""" +942 19 negative_sampler """basic""" +942 19 evaluator """rankbased""" +942 20 dataset """wn18rr""" +942 20 model """transr""" +942 20 loss """softplus""" +942 20 regularizer """no""" +942 20 optimizer """adam""" +942 20 training_loop """owa""" +942 20 negative_sampler """basic""" +942 20 evaluator """rankbased""" +942 21 dataset """wn18rr""" +942 21 model """transr""" +942 21 loss """softplus""" +942 21 regularizer """no""" +942 21 optimizer """adam""" +942 21 training_loop """owa""" +942 21 negative_sampler """basic""" +942 21 evaluator """rankbased""" +942 22 dataset """wn18rr""" +942 22 model """transr""" +942 22 loss """softplus""" +942 22 regularizer """no""" +942 22 optimizer """adam""" +942 22 training_loop """owa""" +942 22 negative_sampler """basic""" +942 22 evaluator """rankbased""" +942 23 dataset """wn18rr""" +942 23 model """transr""" +942 23 loss """softplus""" +942 23 regularizer """no""" +942 23 optimizer """adam""" +942 23 training_loop """owa""" +942 23 negative_sampler """basic""" +942 23 evaluator """rankbased""" +942 24 dataset """wn18rr""" +942 24 model """transr""" +942 24 loss """softplus""" +942 24 regularizer """no""" +942 24 optimizer """adam""" +942 24 training_loop """owa""" +942 24 negative_sampler """basic""" +942 24 evaluator """rankbased""" +942 25 dataset """wn18rr""" +942 25 model """transr""" +942 25 loss """softplus""" +942 25 regularizer """no""" +942 25 optimizer """adam""" +942 25 training_loop """owa""" +942 25 negative_sampler """basic""" +942 25 evaluator """rankbased""" +942 26 dataset """wn18rr""" +942 26 model """transr""" +942 26 loss """softplus""" +942 26 regularizer """no""" +942 26 optimizer """adam""" +942 26 training_loop """owa""" +942 26 negative_sampler """basic""" +942 26 evaluator """rankbased""" +942 27 dataset """wn18rr""" +942 27 model """transr""" +942 27 loss """softplus""" +942 27 regularizer """no""" +942 27 optimizer """adam""" +942 27 training_loop """owa""" +942 27 negative_sampler """basic""" +942 27 evaluator """rankbased""" +942 28 dataset """wn18rr""" +942 28 model """transr""" +942 28 loss """softplus""" +942 28 regularizer """no""" +942 28 optimizer """adam""" +942 28 training_loop """owa""" +942 28 negative_sampler """basic""" +942 28 evaluator """rankbased""" +942 29 dataset """wn18rr""" +942 29 model """transr""" +942 29 loss """softplus""" +942 29 regularizer """no""" +942 29 optimizer """adam""" +942 29 training_loop """owa""" +942 29 negative_sampler """basic""" +942 29 evaluator """rankbased""" +942 30 dataset """wn18rr""" +942 30 model """transr""" +942 30 loss """softplus""" +942 30 regularizer """no""" +942 30 optimizer """adam""" +942 30 training_loop """owa""" +942 30 negative_sampler """basic""" +942 30 evaluator """rankbased""" +942 31 dataset """wn18rr""" +942 31 model """transr""" +942 31 loss """softplus""" +942 31 regularizer """no""" +942 31 optimizer """adam""" +942 31 training_loop """owa""" +942 31 negative_sampler """basic""" +942 31 evaluator """rankbased""" +942 32 dataset """wn18rr""" +942 32 model """transr""" +942 32 loss """softplus""" +942 32 regularizer """no""" +942 32 optimizer """adam""" +942 32 training_loop """owa""" +942 32 negative_sampler """basic""" +942 32 evaluator """rankbased""" +942 33 dataset """wn18rr""" +942 33 model """transr""" +942 33 loss """softplus""" +942 33 regularizer """no""" +942 33 optimizer """adam""" +942 33 training_loop """owa""" +942 33 negative_sampler """basic""" +942 33 evaluator """rankbased""" +942 34 dataset """wn18rr""" +942 34 model """transr""" +942 34 loss """softplus""" +942 34 regularizer """no""" +942 34 optimizer """adam""" +942 34 training_loop """owa""" +942 34 negative_sampler """basic""" +942 34 evaluator """rankbased""" +942 35 dataset """wn18rr""" +942 35 model """transr""" +942 35 loss """softplus""" +942 35 regularizer """no""" +942 35 optimizer """adam""" +942 35 training_loop """owa""" +942 35 negative_sampler """basic""" +942 35 evaluator """rankbased""" +942 36 dataset """wn18rr""" +942 36 model """transr""" +942 36 loss """softplus""" +942 36 regularizer """no""" +942 36 optimizer """adam""" +942 36 training_loop """owa""" +942 36 negative_sampler """basic""" +942 36 evaluator """rankbased""" +942 37 dataset """wn18rr""" +942 37 model """transr""" +942 37 loss """softplus""" +942 37 regularizer """no""" +942 37 optimizer """adam""" +942 37 training_loop """owa""" +942 37 negative_sampler """basic""" +942 37 evaluator """rankbased""" +942 38 dataset """wn18rr""" +942 38 model """transr""" +942 38 loss """softplus""" +942 38 regularizer """no""" +942 38 optimizer """adam""" +942 38 training_loop """owa""" +942 38 negative_sampler """basic""" +942 38 evaluator """rankbased""" +942 39 dataset """wn18rr""" +942 39 model """transr""" +942 39 loss """softplus""" +942 39 regularizer """no""" +942 39 optimizer """adam""" +942 39 training_loop """owa""" +942 39 negative_sampler """basic""" +942 39 evaluator """rankbased""" +942 40 dataset """wn18rr""" +942 40 model """transr""" +942 40 loss """softplus""" +942 40 regularizer """no""" +942 40 optimizer """adam""" +942 40 training_loop """owa""" +942 40 negative_sampler """basic""" +942 40 evaluator """rankbased""" +943 1 model.embedding_dim 0.0 +943 1 model.relation_dim 2.0 +943 1 model.scoring_fct_norm 1.0 +943 1 optimizer.lr 0.05859974509422384 +943 1 training.batch_size 1.0 +943 1 training.label_smoothing 0.2869825239981775 +943 2 model.embedding_dim 2.0 +943 2 model.relation_dim 2.0 +943 2 model.scoring_fct_norm 1.0 +943 2 optimizer.lr 0.006125504544252172 +943 2 training.batch_size 1.0 +943 2 training.label_smoothing 0.03153423253711048 +943 3 model.embedding_dim 1.0 +943 3 model.relation_dim 0.0 +943 3 model.scoring_fct_norm 1.0 +943 3 optimizer.lr 0.007940021099984887 +943 3 training.batch_size 0.0 +943 3 training.label_smoothing 0.0017794933117118736 +943 1 dataset """wn18rr""" +943 1 model """transr""" +943 1 loss """bceaftersigmoid""" +943 1 regularizer """no""" +943 1 optimizer """adam""" +943 1 training_loop """lcwa""" +943 1 evaluator """rankbased""" +943 2 dataset """wn18rr""" +943 2 model """transr""" +943 2 loss """bceaftersigmoid""" +943 2 regularizer """no""" +943 2 optimizer """adam""" +943 2 training_loop """lcwa""" +943 2 evaluator """rankbased""" +943 3 dataset """wn18rr""" +943 3 model """transr""" +943 3 loss """bceaftersigmoid""" +943 3 regularizer """no""" +943 3 optimizer """adam""" +943 3 training_loop """lcwa""" +943 3 evaluator """rankbased""" +944 1 model.embedding_dim 0.0 +944 1 model.relation_dim 1.0 +944 1 model.scoring_fct_norm 2.0 +944 1 optimizer.lr 0.002336488328408217 +944 1 training.batch_size 0.0 +944 1 training.label_smoothing 0.005772835652070644 +944 2 model.embedding_dim 1.0 +944 2 model.relation_dim 0.0 +944 2 model.scoring_fct_norm 1.0 +944 2 optimizer.lr 0.0589357385442075 +944 2 training.batch_size 2.0 +944 2 training.label_smoothing 0.003736464388225615 +944 3 model.embedding_dim 1.0 +944 3 model.relation_dim 2.0 +944 3 model.scoring_fct_norm 2.0 +944 3 optimizer.lr 0.0010492347421555987 +944 3 training.batch_size 1.0 +944 3 training.label_smoothing 0.33988511070927213 +944 4 model.embedding_dim 0.0 +944 4 model.relation_dim 1.0 +944 4 model.scoring_fct_norm 1.0 +944 4 optimizer.lr 0.0029614711047449894 +944 4 training.batch_size 2.0 +944 4 training.label_smoothing 0.03281694932939264 +944 5 model.embedding_dim 0.0 +944 5 model.relation_dim 1.0 +944 5 model.scoring_fct_norm 1.0 +944 5 optimizer.lr 0.044834421067398544 +944 5 training.batch_size 2.0 +944 5 training.label_smoothing 0.019251428233121048 +944 6 model.embedding_dim 2.0 +944 6 model.relation_dim 0.0 +944 6 model.scoring_fct_norm 1.0 +944 6 optimizer.lr 0.03324301819788877 +944 6 training.batch_size 1.0 +944 6 training.label_smoothing 0.1251926140599146 +944 7 model.embedding_dim 2.0 +944 7 model.relation_dim 0.0 +944 7 model.scoring_fct_norm 1.0 +944 7 optimizer.lr 0.0015068553230302968 +944 7 training.batch_size 0.0 +944 7 training.label_smoothing 0.032795200980833765 +944 1 dataset """wn18rr""" +944 1 model """transr""" +944 1 loss """bceaftersigmoid""" +944 1 regularizer """no""" +944 1 optimizer """adam""" +944 1 training_loop """lcwa""" +944 1 evaluator """rankbased""" +944 2 dataset """wn18rr""" +944 2 model """transr""" +944 2 loss """bceaftersigmoid""" +944 2 regularizer """no""" +944 2 optimizer """adam""" +944 2 training_loop """lcwa""" +944 2 evaluator """rankbased""" +944 3 dataset """wn18rr""" +944 3 model """transr""" +944 3 loss """bceaftersigmoid""" +944 3 regularizer """no""" +944 3 optimizer """adam""" +944 3 training_loop """lcwa""" +944 3 evaluator """rankbased""" +944 4 dataset """wn18rr""" +944 4 model """transr""" +944 4 loss """bceaftersigmoid""" +944 4 regularizer """no""" +944 4 optimizer """adam""" +944 4 training_loop """lcwa""" +944 4 evaluator """rankbased""" +944 5 dataset """wn18rr""" +944 5 model """transr""" +944 5 loss """bceaftersigmoid""" +944 5 regularizer """no""" +944 5 optimizer """adam""" +944 5 training_loop """lcwa""" +944 5 evaluator """rankbased""" +944 6 dataset """wn18rr""" +944 6 model """transr""" +944 6 loss """bceaftersigmoid""" +944 6 regularizer """no""" +944 6 optimizer """adam""" +944 6 training_loop """lcwa""" +944 6 evaluator """rankbased""" +944 7 dataset """wn18rr""" +944 7 model """transr""" +944 7 loss """bceaftersigmoid""" +944 7 regularizer """no""" +944 7 optimizer """adam""" +944 7 training_loop """lcwa""" +944 7 evaluator """rankbased""" +945 1 model.embedding_dim 0.0 +945 1 model.relation_dim 1.0 +945 1 model.scoring_fct_norm 1.0 +945 1 optimizer.lr 0.03196760071993093 +945 1 negative_sampler.num_negs_per_pos 30.0 +945 1 training.batch_size 1.0 +945 2 model.embedding_dim 0.0 +945 2 model.relation_dim 2.0 +945 2 model.scoring_fct_norm 2.0 +945 2 optimizer.lr 0.011474834418461585 +945 2 negative_sampler.num_negs_per_pos 68.0 +945 2 training.batch_size 2.0 +945 3 model.embedding_dim 1.0 +945 3 model.relation_dim 1.0 +945 3 model.scoring_fct_norm 1.0 +945 3 optimizer.lr 0.03272831190136645 +945 3 negative_sampler.num_negs_per_pos 23.0 +945 3 training.batch_size 2.0 +945 4 model.embedding_dim 0.0 +945 4 model.relation_dim 0.0 +945 4 model.scoring_fct_norm 2.0 +945 4 optimizer.lr 0.0034043039904981566 +945 4 negative_sampler.num_negs_per_pos 7.0 +945 4 training.batch_size 0.0 +945 5 model.embedding_dim 0.0 +945 5 model.relation_dim 2.0 +945 5 model.scoring_fct_norm 2.0 +945 5 optimizer.lr 0.005228003724314512 +945 5 negative_sampler.num_negs_per_pos 40.0 +945 5 training.batch_size 1.0 +945 6 model.embedding_dim 0.0 +945 6 model.relation_dim 1.0 +945 6 model.scoring_fct_norm 2.0 +945 6 optimizer.lr 0.028176089839641157 +945 6 negative_sampler.num_negs_per_pos 37.0 +945 6 training.batch_size 2.0 +945 7 model.embedding_dim 1.0 +945 7 model.relation_dim 1.0 +945 7 model.scoring_fct_norm 1.0 +945 7 optimizer.lr 0.028742404809148848 +945 7 negative_sampler.num_negs_per_pos 63.0 +945 7 training.batch_size 0.0 +945 8 model.embedding_dim 2.0 +945 8 model.relation_dim 0.0 +945 8 model.scoring_fct_norm 1.0 +945 8 optimizer.lr 0.0030435872421319178 +945 8 negative_sampler.num_negs_per_pos 75.0 +945 8 training.batch_size 0.0 +945 9 model.embedding_dim 0.0 +945 9 model.relation_dim 2.0 +945 9 model.scoring_fct_norm 1.0 +945 9 optimizer.lr 0.05765195671047186 +945 9 negative_sampler.num_negs_per_pos 45.0 +945 9 training.batch_size 2.0 +945 10 model.embedding_dim 0.0 +945 10 model.relation_dim 0.0 +945 10 model.scoring_fct_norm 1.0 +945 10 optimizer.lr 0.04221527952292531 +945 10 negative_sampler.num_negs_per_pos 92.0 +945 10 training.batch_size 0.0 +945 11 model.embedding_dim 1.0 +945 11 model.relation_dim 2.0 +945 11 model.scoring_fct_norm 1.0 +945 11 optimizer.lr 0.005359621166125416 +945 11 negative_sampler.num_negs_per_pos 59.0 +945 11 training.batch_size 0.0 +945 12 model.embedding_dim 2.0 +945 12 model.relation_dim 0.0 +945 12 model.scoring_fct_norm 1.0 +945 12 optimizer.lr 0.05680511184930888 +945 12 negative_sampler.num_negs_per_pos 16.0 +945 12 training.batch_size 2.0 +945 13 model.embedding_dim 2.0 +945 13 model.relation_dim 1.0 +945 13 model.scoring_fct_norm 2.0 +945 13 optimizer.lr 0.021554404515708624 +945 13 negative_sampler.num_negs_per_pos 78.0 +945 13 training.batch_size 0.0 +945 14 model.embedding_dim 2.0 +945 14 model.relation_dim 1.0 +945 14 model.scoring_fct_norm 1.0 +945 14 optimizer.lr 0.09248795407514339 +945 14 negative_sampler.num_negs_per_pos 86.0 +945 14 training.batch_size 0.0 +945 15 model.embedding_dim 2.0 +945 15 model.relation_dim 2.0 +945 15 model.scoring_fct_norm 1.0 +945 15 optimizer.lr 0.02233302254463638 +945 15 negative_sampler.num_negs_per_pos 34.0 +945 15 training.batch_size 2.0 +945 16 model.embedding_dim 0.0 +945 16 model.relation_dim 0.0 +945 16 model.scoring_fct_norm 2.0 +945 16 optimizer.lr 0.00321698677151177 +945 16 negative_sampler.num_negs_per_pos 59.0 +945 16 training.batch_size 2.0 +945 17 model.embedding_dim 2.0 +945 17 model.relation_dim 0.0 +945 17 model.scoring_fct_norm 2.0 +945 17 optimizer.lr 0.005117581788438552 +945 17 negative_sampler.num_negs_per_pos 94.0 +945 17 training.batch_size 2.0 +945 18 model.embedding_dim 1.0 +945 18 model.relation_dim 1.0 +945 18 model.scoring_fct_norm 2.0 +945 18 optimizer.lr 0.015430438009986516 +945 18 negative_sampler.num_negs_per_pos 70.0 +945 18 training.batch_size 2.0 +945 19 model.embedding_dim 0.0 +945 19 model.relation_dim 1.0 +945 19 model.scoring_fct_norm 2.0 +945 19 optimizer.lr 0.0030295324183842996 +945 19 negative_sampler.num_negs_per_pos 17.0 +945 19 training.batch_size 2.0 +945 20 model.embedding_dim 2.0 +945 20 model.relation_dim 0.0 +945 20 model.scoring_fct_norm 2.0 +945 20 optimizer.lr 0.008378322203295452 +945 20 negative_sampler.num_negs_per_pos 59.0 +945 20 training.batch_size 1.0 +945 21 model.embedding_dim 2.0 +945 21 model.relation_dim 2.0 +945 21 model.scoring_fct_norm 2.0 +945 21 optimizer.lr 0.06201984802009564 +945 21 negative_sampler.num_negs_per_pos 80.0 +945 21 training.batch_size 0.0 +945 1 dataset """wn18rr""" +945 1 model """transr""" +945 1 loss """bceaftersigmoid""" +945 1 regularizer """no""" +945 1 optimizer """adam""" +945 1 training_loop """owa""" +945 1 negative_sampler """basic""" +945 1 evaluator """rankbased""" +945 2 dataset """wn18rr""" +945 2 model """transr""" +945 2 loss """bceaftersigmoid""" +945 2 regularizer """no""" +945 2 optimizer """adam""" +945 2 training_loop """owa""" +945 2 negative_sampler """basic""" +945 2 evaluator """rankbased""" +945 3 dataset """wn18rr""" +945 3 model """transr""" +945 3 loss """bceaftersigmoid""" +945 3 regularizer """no""" +945 3 optimizer """adam""" +945 3 training_loop """owa""" +945 3 negative_sampler """basic""" +945 3 evaluator """rankbased""" +945 4 dataset """wn18rr""" +945 4 model """transr""" +945 4 loss """bceaftersigmoid""" +945 4 regularizer """no""" +945 4 optimizer """adam""" +945 4 training_loop """owa""" +945 4 negative_sampler """basic""" +945 4 evaluator """rankbased""" +945 5 dataset """wn18rr""" +945 5 model """transr""" +945 5 loss """bceaftersigmoid""" +945 5 regularizer """no""" +945 5 optimizer """adam""" +945 5 training_loop """owa""" +945 5 negative_sampler """basic""" +945 5 evaluator """rankbased""" +945 6 dataset """wn18rr""" +945 6 model """transr""" +945 6 loss """bceaftersigmoid""" +945 6 regularizer """no""" +945 6 optimizer """adam""" +945 6 training_loop """owa""" +945 6 negative_sampler """basic""" +945 6 evaluator """rankbased""" +945 7 dataset """wn18rr""" +945 7 model """transr""" +945 7 loss """bceaftersigmoid""" +945 7 regularizer """no""" +945 7 optimizer """adam""" +945 7 training_loop """owa""" +945 7 negative_sampler """basic""" +945 7 evaluator """rankbased""" +945 8 dataset """wn18rr""" +945 8 model """transr""" +945 8 loss """bceaftersigmoid""" +945 8 regularizer """no""" +945 8 optimizer """adam""" +945 8 training_loop """owa""" +945 8 negative_sampler """basic""" +945 8 evaluator """rankbased""" +945 9 dataset """wn18rr""" +945 9 model """transr""" +945 9 loss """bceaftersigmoid""" +945 9 regularizer """no""" +945 9 optimizer """adam""" +945 9 training_loop """owa""" +945 9 negative_sampler """basic""" +945 9 evaluator """rankbased""" +945 10 dataset """wn18rr""" +945 10 model """transr""" +945 10 loss """bceaftersigmoid""" +945 10 regularizer """no""" +945 10 optimizer """adam""" +945 10 training_loop """owa""" +945 10 negative_sampler """basic""" +945 10 evaluator """rankbased""" +945 11 dataset """wn18rr""" +945 11 model """transr""" +945 11 loss """bceaftersigmoid""" +945 11 regularizer """no""" +945 11 optimizer """adam""" +945 11 training_loop """owa""" +945 11 negative_sampler """basic""" +945 11 evaluator """rankbased""" +945 12 dataset """wn18rr""" +945 12 model """transr""" +945 12 loss """bceaftersigmoid""" +945 12 regularizer """no""" +945 12 optimizer """adam""" +945 12 training_loop """owa""" +945 12 negative_sampler """basic""" +945 12 evaluator """rankbased""" +945 13 dataset """wn18rr""" +945 13 model """transr""" +945 13 loss """bceaftersigmoid""" +945 13 regularizer """no""" +945 13 optimizer """adam""" +945 13 training_loop """owa""" +945 13 negative_sampler """basic""" +945 13 evaluator """rankbased""" +945 14 dataset """wn18rr""" +945 14 model """transr""" +945 14 loss """bceaftersigmoid""" +945 14 regularizer """no""" +945 14 optimizer """adam""" +945 14 training_loop """owa""" +945 14 negative_sampler """basic""" +945 14 evaluator """rankbased""" +945 15 dataset """wn18rr""" +945 15 model """transr""" +945 15 loss """bceaftersigmoid""" +945 15 regularizer """no""" +945 15 optimizer """adam""" +945 15 training_loop """owa""" +945 15 negative_sampler """basic""" +945 15 evaluator """rankbased""" +945 16 dataset """wn18rr""" +945 16 model """transr""" +945 16 loss """bceaftersigmoid""" +945 16 regularizer """no""" +945 16 optimizer """adam""" +945 16 training_loop """owa""" +945 16 negative_sampler """basic""" +945 16 evaluator """rankbased""" +945 17 dataset """wn18rr""" +945 17 model """transr""" +945 17 loss """bceaftersigmoid""" +945 17 regularizer """no""" +945 17 optimizer """adam""" +945 17 training_loop """owa""" +945 17 negative_sampler """basic""" +945 17 evaluator """rankbased""" +945 18 dataset """wn18rr""" +945 18 model """transr""" +945 18 loss """bceaftersigmoid""" +945 18 regularizer """no""" +945 18 optimizer """adam""" +945 18 training_loop """owa""" +945 18 negative_sampler """basic""" +945 18 evaluator """rankbased""" +945 19 dataset """wn18rr""" +945 19 model """transr""" +945 19 loss """bceaftersigmoid""" +945 19 regularizer """no""" +945 19 optimizer """adam""" +945 19 training_loop """owa""" +945 19 negative_sampler """basic""" +945 19 evaluator """rankbased""" +945 20 dataset """wn18rr""" +945 20 model """transr""" +945 20 loss """bceaftersigmoid""" +945 20 regularizer """no""" +945 20 optimizer """adam""" +945 20 training_loop """owa""" +945 20 negative_sampler """basic""" +945 20 evaluator """rankbased""" +945 21 dataset """wn18rr""" +945 21 model """transr""" +945 21 loss """bceaftersigmoid""" +945 21 regularizer """no""" +945 21 optimizer """adam""" +945 21 training_loop """owa""" +945 21 negative_sampler """basic""" +945 21 evaluator """rankbased""" +946 1 model.embedding_dim 0.0 +946 1 model.relation_dim 2.0 +946 1 model.scoring_fct_norm 1.0 +946 1 optimizer.lr 0.021530398574487034 +946 1 negative_sampler.num_negs_per_pos 68.0 +946 1 training.batch_size 2.0 +946 2 model.embedding_dim 0.0 +946 2 model.relation_dim 0.0 +946 2 model.scoring_fct_norm 2.0 +946 2 optimizer.lr 0.002330932386708055 +946 2 negative_sampler.num_negs_per_pos 38.0 +946 2 training.batch_size 0.0 +946 3 model.embedding_dim 1.0 +946 3 model.relation_dim 0.0 +946 3 model.scoring_fct_norm 2.0 +946 3 optimizer.lr 0.002808638644800394 +946 3 negative_sampler.num_negs_per_pos 6.0 +946 3 training.batch_size 0.0 +946 4 model.embedding_dim 1.0 +946 4 model.relation_dim 1.0 +946 4 model.scoring_fct_norm 2.0 +946 4 optimizer.lr 0.007239605089231309 +946 4 negative_sampler.num_negs_per_pos 25.0 +946 4 training.batch_size 1.0 +946 5 model.embedding_dim 2.0 +946 5 model.relation_dim 1.0 +946 5 model.scoring_fct_norm 1.0 +946 5 optimizer.lr 0.00532759123528127 +946 5 negative_sampler.num_negs_per_pos 74.0 +946 5 training.batch_size 0.0 +946 6 model.embedding_dim 0.0 +946 6 model.relation_dim 0.0 +946 6 model.scoring_fct_norm 2.0 +946 6 optimizer.lr 0.002839485928778743 +946 6 negative_sampler.num_negs_per_pos 20.0 +946 6 training.batch_size 1.0 +946 7 model.embedding_dim 0.0 +946 7 model.relation_dim 0.0 +946 7 model.scoring_fct_norm 1.0 +946 7 optimizer.lr 0.0024058645452730604 +946 7 negative_sampler.num_negs_per_pos 58.0 +946 7 training.batch_size 2.0 +946 8 model.embedding_dim 0.0 +946 8 model.relation_dim 1.0 +946 8 model.scoring_fct_norm 2.0 +946 8 optimizer.lr 0.0786848550413752 +946 8 negative_sampler.num_negs_per_pos 11.0 +946 8 training.batch_size 1.0 +946 9 model.embedding_dim 1.0 +946 9 model.relation_dim 2.0 +946 9 model.scoring_fct_norm 2.0 +946 9 optimizer.lr 0.00930218832129264 +946 9 negative_sampler.num_negs_per_pos 61.0 +946 9 training.batch_size 2.0 +946 10 model.embedding_dim 0.0 +946 10 model.relation_dim 2.0 +946 10 model.scoring_fct_norm 2.0 +946 10 optimizer.lr 0.0010033084005955037 +946 10 negative_sampler.num_negs_per_pos 24.0 +946 10 training.batch_size 0.0 +946 11 model.embedding_dim 0.0 +946 11 model.relation_dim 0.0 +946 11 model.scoring_fct_norm 1.0 +946 11 optimizer.lr 0.04891298150102282 +946 11 negative_sampler.num_negs_per_pos 95.0 +946 11 training.batch_size 1.0 +946 12 model.embedding_dim 1.0 +946 12 model.relation_dim 1.0 +946 12 model.scoring_fct_norm 1.0 +946 12 optimizer.lr 0.08583041617946721 +946 12 negative_sampler.num_negs_per_pos 71.0 +946 12 training.batch_size 2.0 +946 13 model.embedding_dim 0.0 +946 13 model.relation_dim 0.0 +946 13 model.scoring_fct_norm 1.0 +946 13 optimizer.lr 0.0016106508331509 +946 13 negative_sampler.num_negs_per_pos 11.0 +946 13 training.batch_size 1.0 +946 14 model.embedding_dim 0.0 +946 14 model.relation_dim 2.0 +946 14 model.scoring_fct_norm 2.0 +946 14 optimizer.lr 0.001280392662611992 +946 14 negative_sampler.num_negs_per_pos 81.0 +946 14 training.batch_size 2.0 +946 15 model.embedding_dim 1.0 +946 15 model.relation_dim 2.0 +946 15 model.scoring_fct_norm 2.0 +946 15 optimizer.lr 0.010270203240803572 +946 15 negative_sampler.num_negs_per_pos 8.0 +946 15 training.batch_size 1.0 +946 16 model.embedding_dim 2.0 +946 16 model.relation_dim 0.0 +946 16 model.scoring_fct_norm 1.0 +946 16 optimizer.lr 0.007575596389850264 +946 16 negative_sampler.num_negs_per_pos 99.0 +946 16 training.batch_size 0.0 +946 17 model.embedding_dim 0.0 +946 17 model.relation_dim 1.0 +946 17 model.scoring_fct_norm 1.0 +946 17 optimizer.lr 0.06145754360739013 +946 17 negative_sampler.num_negs_per_pos 82.0 +946 17 training.batch_size 0.0 +946 18 model.embedding_dim 0.0 +946 18 model.relation_dim 2.0 +946 18 model.scoring_fct_norm 1.0 +946 18 optimizer.lr 0.017655455314269392 +946 18 negative_sampler.num_negs_per_pos 24.0 +946 18 training.batch_size 1.0 +946 19 model.embedding_dim 1.0 +946 19 model.relation_dim 0.0 +946 19 model.scoring_fct_norm 2.0 +946 19 optimizer.lr 0.005480040387010543 +946 19 negative_sampler.num_negs_per_pos 18.0 +946 19 training.batch_size 2.0 +946 20 model.embedding_dim 0.0 +946 20 model.relation_dim 2.0 +946 20 model.scoring_fct_norm 2.0 +946 20 optimizer.lr 0.0012299822408293178 +946 20 negative_sampler.num_negs_per_pos 96.0 +946 20 training.batch_size 2.0 +946 21 model.embedding_dim 2.0 +946 21 model.relation_dim 2.0 +946 21 model.scoring_fct_norm 2.0 +946 21 optimizer.lr 0.00810823276425905 +946 21 negative_sampler.num_negs_per_pos 57.0 +946 21 training.batch_size 1.0 +946 22 model.embedding_dim 2.0 +946 22 model.relation_dim 0.0 +946 22 model.scoring_fct_norm 2.0 +946 22 optimizer.lr 0.004654214476709777 +946 22 negative_sampler.num_negs_per_pos 0.0 +946 22 training.batch_size 0.0 +946 23 model.embedding_dim 2.0 +946 23 model.relation_dim 0.0 +946 23 model.scoring_fct_norm 1.0 +946 23 optimizer.lr 0.011573375797532438 +946 23 negative_sampler.num_negs_per_pos 70.0 +946 23 training.batch_size 2.0 +946 24 model.embedding_dim 0.0 +946 24 model.relation_dim 1.0 +946 24 model.scoring_fct_norm 2.0 +946 24 optimizer.lr 0.07039001695424633 +946 24 negative_sampler.num_negs_per_pos 44.0 +946 24 training.batch_size 2.0 +946 25 model.embedding_dim 1.0 +946 25 model.relation_dim 0.0 +946 25 model.scoring_fct_norm 1.0 +946 25 optimizer.lr 0.0018503732949435576 +946 25 negative_sampler.num_negs_per_pos 49.0 +946 25 training.batch_size 0.0 +946 26 model.embedding_dim 2.0 +946 26 model.relation_dim 1.0 +946 26 model.scoring_fct_norm 2.0 +946 26 optimizer.lr 0.01564190886187234 +946 26 negative_sampler.num_negs_per_pos 37.0 +946 26 training.batch_size 0.0 +946 27 model.embedding_dim 2.0 +946 27 model.relation_dim 2.0 +946 27 model.scoring_fct_norm 1.0 +946 27 optimizer.lr 0.047263424268231186 +946 27 negative_sampler.num_negs_per_pos 70.0 +946 27 training.batch_size 2.0 +946 28 model.embedding_dim 1.0 +946 28 model.relation_dim 2.0 +946 28 model.scoring_fct_norm 1.0 +946 28 optimizer.lr 0.0023374358640839157 +946 28 negative_sampler.num_negs_per_pos 90.0 +946 28 training.batch_size 2.0 +946 29 model.embedding_dim 1.0 +946 29 model.relation_dim 1.0 +946 29 model.scoring_fct_norm 1.0 +946 29 optimizer.lr 0.019903798616036163 +946 29 negative_sampler.num_negs_per_pos 82.0 +946 29 training.batch_size 1.0 +946 30 model.embedding_dim 2.0 +946 30 model.relation_dim 2.0 +946 30 model.scoring_fct_norm 2.0 +946 30 optimizer.lr 0.0017410748883870879 +946 30 negative_sampler.num_negs_per_pos 81.0 +946 30 training.batch_size 1.0 +946 31 model.embedding_dim 2.0 +946 31 model.relation_dim 0.0 +946 31 model.scoring_fct_norm 1.0 +946 31 optimizer.lr 0.003393049607197729 +946 31 negative_sampler.num_negs_per_pos 89.0 +946 31 training.batch_size 0.0 +946 32 model.embedding_dim 0.0 +946 32 model.relation_dim 2.0 +946 32 model.scoring_fct_norm 1.0 +946 32 optimizer.lr 0.011359872095630816 +946 32 negative_sampler.num_negs_per_pos 89.0 +946 32 training.batch_size 1.0 +946 33 model.embedding_dim 2.0 +946 33 model.relation_dim 0.0 +946 33 model.scoring_fct_norm 2.0 +946 33 optimizer.lr 0.0015449267950300646 +946 33 negative_sampler.num_negs_per_pos 75.0 +946 33 training.batch_size 1.0 +946 34 model.embedding_dim 1.0 +946 34 model.relation_dim 0.0 +946 34 model.scoring_fct_norm 2.0 +946 34 optimizer.lr 0.04018567001290601 +946 34 negative_sampler.num_negs_per_pos 27.0 +946 34 training.batch_size 0.0 +946 35 model.embedding_dim 2.0 +946 35 model.relation_dim 1.0 +946 35 model.scoring_fct_norm 2.0 +946 35 optimizer.lr 0.001565018169765336 +946 35 negative_sampler.num_negs_per_pos 77.0 +946 35 training.batch_size 2.0 +946 36 model.embedding_dim 0.0 +946 36 model.relation_dim 1.0 +946 36 model.scoring_fct_norm 2.0 +946 36 optimizer.lr 0.04900141479672457 +946 36 negative_sampler.num_negs_per_pos 39.0 +946 36 training.batch_size 0.0 +946 37 model.embedding_dim 2.0 +946 37 model.relation_dim 1.0 +946 37 model.scoring_fct_norm 1.0 +946 37 optimizer.lr 0.009442556509920914 +946 37 negative_sampler.num_negs_per_pos 6.0 +946 37 training.batch_size 1.0 +946 38 model.embedding_dim 1.0 +946 38 model.relation_dim 0.0 +946 38 model.scoring_fct_norm 1.0 +946 38 optimizer.lr 0.003175800604903679 +946 38 negative_sampler.num_negs_per_pos 64.0 +946 38 training.batch_size 1.0 +946 39 model.embedding_dim 0.0 +946 39 model.relation_dim 0.0 +946 39 model.scoring_fct_norm 2.0 +946 39 optimizer.lr 0.006176412593706808 +946 39 negative_sampler.num_negs_per_pos 28.0 +946 39 training.batch_size 1.0 +946 40 model.embedding_dim 0.0 +946 40 model.relation_dim 2.0 +946 40 model.scoring_fct_norm 2.0 +946 40 optimizer.lr 0.005720274872175269 +946 40 negative_sampler.num_negs_per_pos 64.0 +946 40 training.batch_size 2.0 +946 41 model.embedding_dim 2.0 +946 41 model.relation_dim 1.0 +946 41 model.scoring_fct_norm 1.0 +946 41 optimizer.lr 0.025519423741553582 +946 41 negative_sampler.num_negs_per_pos 48.0 +946 41 training.batch_size 1.0 +946 42 model.embedding_dim 2.0 +946 42 model.relation_dim 2.0 +946 42 model.scoring_fct_norm 2.0 +946 42 optimizer.lr 0.006311558330724273 +946 42 negative_sampler.num_negs_per_pos 99.0 +946 42 training.batch_size 0.0 +946 1 dataset """wn18rr""" +946 1 model """transr""" +946 1 loss """bceaftersigmoid""" +946 1 regularizer """no""" +946 1 optimizer """adam""" +946 1 training_loop """owa""" +946 1 negative_sampler """basic""" +946 1 evaluator """rankbased""" +946 2 dataset """wn18rr""" +946 2 model """transr""" +946 2 loss """bceaftersigmoid""" +946 2 regularizer """no""" +946 2 optimizer """adam""" +946 2 training_loop """owa""" +946 2 negative_sampler """basic""" +946 2 evaluator """rankbased""" +946 3 dataset """wn18rr""" +946 3 model """transr""" +946 3 loss """bceaftersigmoid""" +946 3 regularizer """no""" +946 3 optimizer """adam""" +946 3 training_loop """owa""" +946 3 negative_sampler """basic""" +946 3 evaluator """rankbased""" +946 4 dataset """wn18rr""" +946 4 model """transr""" +946 4 loss """bceaftersigmoid""" +946 4 regularizer """no""" +946 4 optimizer """adam""" +946 4 training_loop """owa""" +946 4 negative_sampler """basic""" +946 4 evaluator """rankbased""" +946 5 dataset """wn18rr""" +946 5 model """transr""" +946 5 loss """bceaftersigmoid""" +946 5 regularizer """no""" +946 5 optimizer """adam""" +946 5 training_loop """owa""" +946 5 negative_sampler """basic""" +946 5 evaluator """rankbased""" +946 6 dataset """wn18rr""" +946 6 model """transr""" +946 6 loss """bceaftersigmoid""" +946 6 regularizer """no""" +946 6 optimizer """adam""" +946 6 training_loop """owa""" +946 6 negative_sampler """basic""" +946 6 evaluator """rankbased""" +946 7 dataset """wn18rr""" +946 7 model """transr""" +946 7 loss """bceaftersigmoid""" +946 7 regularizer """no""" +946 7 optimizer """adam""" +946 7 training_loop """owa""" +946 7 negative_sampler """basic""" +946 7 evaluator """rankbased""" +946 8 dataset """wn18rr""" +946 8 model """transr""" +946 8 loss """bceaftersigmoid""" +946 8 regularizer """no""" +946 8 optimizer """adam""" +946 8 training_loop """owa""" +946 8 negative_sampler """basic""" +946 8 evaluator """rankbased""" +946 9 dataset """wn18rr""" +946 9 model """transr""" +946 9 loss """bceaftersigmoid""" +946 9 regularizer """no""" +946 9 optimizer """adam""" +946 9 training_loop """owa""" +946 9 negative_sampler """basic""" +946 9 evaluator """rankbased""" +946 10 dataset """wn18rr""" +946 10 model """transr""" +946 10 loss """bceaftersigmoid""" +946 10 regularizer """no""" +946 10 optimizer """adam""" +946 10 training_loop """owa""" +946 10 negative_sampler """basic""" +946 10 evaluator """rankbased""" +946 11 dataset """wn18rr""" +946 11 model """transr""" +946 11 loss """bceaftersigmoid""" +946 11 regularizer """no""" +946 11 optimizer """adam""" +946 11 training_loop """owa""" +946 11 negative_sampler """basic""" +946 11 evaluator """rankbased""" +946 12 dataset """wn18rr""" +946 12 model """transr""" +946 12 loss """bceaftersigmoid""" +946 12 regularizer """no""" +946 12 optimizer """adam""" +946 12 training_loop """owa""" +946 12 negative_sampler """basic""" +946 12 evaluator """rankbased""" +946 13 dataset """wn18rr""" +946 13 model """transr""" +946 13 loss """bceaftersigmoid""" +946 13 regularizer """no""" +946 13 optimizer """adam""" +946 13 training_loop """owa""" +946 13 negative_sampler """basic""" +946 13 evaluator """rankbased""" +946 14 dataset """wn18rr""" +946 14 model """transr""" +946 14 loss """bceaftersigmoid""" +946 14 regularizer """no""" +946 14 optimizer """adam""" +946 14 training_loop """owa""" +946 14 negative_sampler """basic""" +946 14 evaluator """rankbased""" +946 15 dataset """wn18rr""" +946 15 model """transr""" +946 15 loss """bceaftersigmoid""" +946 15 regularizer """no""" +946 15 optimizer """adam""" +946 15 training_loop """owa""" +946 15 negative_sampler """basic""" +946 15 evaluator """rankbased""" +946 16 dataset """wn18rr""" +946 16 model """transr""" +946 16 loss """bceaftersigmoid""" +946 16 regularizer """no""" +946 16 optimizer """adam""" +946 16 training_loop """owa""" +946 16 negative_sampler """basic""" +946 16 evaluator """rankbased""" +946 17 dataset """wn18rr""" +946 17 model """transr""" +946 17 loss """bceaftersigmoid""" +946 17 regularizer """no""" +946 17 optimizer """adam""" +946 17 training_loop """owa""" +946 17 negative_sampler """basic""" +946 17 evaluator """rankbased""" +946 18 dataset """wn18rr""" +946 18 model """transr""" +946 18 loss """bceaftersigmoid""" +946 18 regularizer """no""" +946 18 optimizer """adam""" +946 18 training_loop """owa""" +946 18 negative_sampler """basic""" +946 18 evaluator """rankbased""" +946 19 dataset """wn18rr""" +946 19 model """transr""" +946 19 loss """bceaftersigmoid""" +946 19 regularizer """no""" +946 19 optimizer """adam""" +946 19 training_loop """owa""" +946 19 negative_sampler """basic""" +946 19 evaluator """rankbased""" +946 20 dataset """wn18rr""" +946 20 model """transr""" +946 20 loss """bceaftersigmoid""" +946 20 regularizer """no""" +946 20 optimizer """adam""" +946 20 training_loop """owa""" +946 20 negative_sampler """basic""" +946 20 evaluator """rankbased""" +946 21 dataset """wn18rr""" +946 21 model """transr""" +946 21 loss """bceaftersigmoid""" +946 21 regularizer """no""" +946 21 optimizer """adam""" +946 21 training_loop """owa""" +946 21 negative_sampler """basic""" +946 21 evaluator """rankbased""" +946 22 dataset """wn18rr""" +946 22 model """transr""" +946 22 loss """bceaftersigmoid""" +946 22 regularizer """no""" +946 22 optimizer """adam""" +946 22 training_loop """owa""" +946 22 negative_sampler """basic""" +946 22 evaluator """rankbased""" +946 23 dataset """wn18rr""" +946 23 model """transr""" +946 23 loss """bceaftersigmoid""" +946 23 regularizer """no""" +946 23 optimizer """adam""" +946 23 training_loop """owa""" +946 23 negative_sampler """basic""" +946 23 evaluator """rankbased""" +946 24 dataset """wn18rr""" +946 24 model """transr""" +946 24 loss """bceaftersigmoid""" +946 24 regularizer """no""" +946 24 optimizer """adam""" +946 24 training_loop """owa""" +946 24 negative_sampler """basic""" +946 24 evaluator """rankbased""" +946 25 dataset """wn18rr""" +946 25 model """transr""" +946 25 loss """bceaftersigmoid""" +946 25 regularizer """no""" +946 25 optimizer """adam""" +946 25 training_loop """owa""" +946 25 negative_sampler """basic""" +946 25 evaluator """rankbased""" +946 26 dataset """wn18rr""" +946 26 model """transr""" +946 26 loss """bceaftersigmoid""" +946 26 regularizer """no""" +946 26 optimizer """adam""" +946 26 training_loop """owa""" +946 26 negative_sampler """basic""" +946 26 evaluator """rankbased""" +946 27 dataset """wn18rr""" +946 27 model """transr""" +946 27 loss """bceaftersigmoid""" +946 27 regularizer """no""" +946 27 optimizer """adam""" +946 27 training_loop """owa""" +946 27 negative_sampler """basic""" +946 27 evaluator """rankbased""" +946 28 dataset """wn18rr""" +946 28 model """transr""" +946 28 loss """bceaftersigmoid""" +946 28 regularizer """no""" +946 28 optimizer """adam""" +946 28 training_loop """owa""" +946 28 negative_sampler """basic""" +946 28 evaluator """rankbased""" +946 29 dataset """wn18rr""" +946 29 model """transr""" +946 29 loss """bceaftersigmoid""" +946 29 regularizer """no""" +946 29 optimizer """adam""" +946 29 training_loop """owa""" +946 29 negative_sampler """basic""" +946 29 evaluator """rankbased""" +946 30 dataset """wn18rr""" +946 30 model """transr""" +946 30 loss """bceaftersigmoid""" +946 30 regularizer """no""" +946 30 optimizer """adam""" +946 30 training_loop """owa""" +946 30 negative_sampler """basic""" +946 30 evaluator """rankbased""" +946 31 dataset """wn18rr""" +946 31 model """transr""" +946 31 loss """bceaftersigmoid""" +946 31 regularizer """no""" +946 31 optimizer """adam""" +946 31 training_loop """owa""" +946 31 negative_sampler """basic""" +946 31 evaluator """rankbased""" +946 32 dataset """wn18rr""" +946 32 model """transr""" +946 32 loss """bceaftersigmoid""" +946 32 regularizer """no""" +946 32 optimizer """adam""" +946 32 training_loop """owa""" +946 32 negative_sampler """basic""" +946 32 evaluator """rankbased""" +946 33 dataset """wn18rr""" +946 33 model """transr""" +946 33 loss """bceaftersigmoid""" +946 33 regularizer """no""" +946 33 optimizer """adam""" +946 33 training_loop """owa""" +946 33 negative_sampler """basic""" +946 33 evaluator """rankbased""" +946 34 dataset """wn18rr""" +946 34 model """transr""" +946 34 loss """bceaftersigmoid""" +946 34 regularizer """no""" +946 34 optimizer """adam""" +946 34 training_loop """owa""" +946 34 negative_sampler """basic""" +946 34 evaluator """rankbased""" +946 35 dataset """wn18rr""" +946 35 model """transr""" +946 35 loss """bceaftersigmoid""" +946 35 regularizer """no""" +946 35 optimizer """adam""" +946 35 training_loop """owa""" +946 35 negative_sampler """basic""" +946 35 evaluator """rankbased""" +946 36 dataset """wn18rr""" +946 36 model """transr""" +946 36 loss """bceaftersigmoid""" +946 36 regularizer """no""" +946 36 optimizer """adam""" +946 36 training_loop """owa""" +946 36 negative_sampler """basic""" +946 36 evaluator """rankbased""" +946 37 dataset """wn18rr""" +946 37 model """transr""" +946 37 loss """bceaftersigmoid""" +946 37 regularizer """no""" +946 37 optimizer """adam""" +946 37 training_loop """owa""" +946 37 negative_sampler """basic""" +946 37 evaluator """rankbased""" +946 38 dataset """wn18rr""" +946 38 model """transr""" +946 38 loss """bceaftersigmoid""" +946 38 regularizer """no""" +946 38 optimizer """adam""" +946 38 training_loop """owa""" +946 38 negative_sampler """basic""" +946 38 evaluator """rankbased""" +946 39 dataset """wn18rr""" +946 39 model """transr""" +946 39 loss """bceaftersigmoid""" +946 39 regularizer """no""" +946 39 optimizer """adam""" +946 39 training_loop """owa""" +946 39 negative_sampler """basic""" +946 39 evaluator """rankbased""" +946 40 dataset """wn18rr""" +946 40 model """transr""" +946 40 loss """bceaftersigmoid""" +946 40 regularizer """no""" +946 40 optimizer """adam""" +946 40 training_loop """owa""" +946 40 negative_sampler """basic""" +946 40 evaluator """rankbased""" +946 41 dataset """wn18rr""" +946 41 model """transr""" +946 41 loss """bceaftersigmoid""" +946 41 regularizer """no""" +946 41 optimizer """adam""" +946 41 training_loop """owa""" +946 41 negative_sampler """basic""" +946 41 evaluator """rankbased""" +946 42 dataset """wn18rr""" +946 42 model """transr""" +946 42 loss """bceaftersigmoid""" +946 42 regularizer """no""" +946 42 optimizer """adam""" +946 42 training_loop """owa""" +946 42 negative_sampler """basic""" +946 42 evaluator """rankbased""" +947 1 model.embedding_dim 1.0 +947 1 model.relation_dim 1.0 +947 1 model.scoring_fct_norm 2.0 +947 1 loss.margin 7.213207864221595 +947 1 optimizer.lr 0.05197918839164887 +947 1 negative_sampler.num_negs_per_pos 23.0 +947 1 training.batch_size 2.0 +947 2 model.embedding_dim 0.0 +947 2 model.relation_dim 1.0 +947 2 model.scoring_fct_norm 2.0 +947 2 loss.margin 5.069481219902079 +947 2 optimizer.lr 0.0048563096982179865 +947 2 negative_sampler.num_negs_per_pos 27.0 +947 2 training.batch_size 1.0 +947 3 model.embedding_dim 2.0 +947 3 model.relation_dim 1.0 +947 3 model.scoring_fct_norm 1.0 +947 3 loss.margin 9.168406111326709 +947 3 optimizer.lr 0.01327383107367225 +947 3 negative_sampler.num_negs_per_pos 74.0 +947 3 training.batch_size 0.0 +947 4 model.embedding_dim 2.0 +947 4 model.relation_dim 0.0 +947 4 model.scoring_fct_norm 2.0 +947 4 loss.margin 8.014870192513056 +947 4 optimizer.lr 0.00112871830250763 +947 4 negative_sampler.num_negs_per_pos 53.0 +947 4 training.batch_size 0.0 +947 5 model.embedding_dim 0.0 +947 5 model.relation_dim 0.0 +947 5 model.scoring_fct_norm 1.0 +947 5 loss.margin 0.8413379002424539 +947 5 optimizer.lr 0.04196993667795902 +947 5 negative_sampler.num_negs_per_pos 50.0 +947 5 training.batch_size 0.0 +947 6 model.embedding_dim 2.0 +947 6 model.relation_dim 2.0 +947 6 model.scoring_fct_norm 2.0 +947 6 loss.margin 8.895612075153608 +947 6 optimizer.lr 0.009833557690384314 +947 6 negative_sampler.num_negs_per_pos 38.0 +947 6 training.batch_size 0.0 +947 7 model.embedding_dim 2.0 +947 7 model.relation_dim 1.0 +947 7 model.scoring_fct_norm 2.0 +947 7 loss.margin 9.70172043975819 +947 7 optimizer.lr 0.016622939543829377 +947 7 negative_sampler.num_negs_per_pos 26.0 +947 7 training.batch_size 0.0 +947 8 model.embedding_dim 0.0 +947 8 model.relation_dim 1.0 +947 8 model.scoring_fct_norm 2.0 +947 8 loss.margin 7.551072102013807 +947 8 optimizer.lr 0.0015550401053560149 +947 8 negative_sampler.num_negs_per_pos 64.0 +947 8 training.batch_size 1.0 +947 9 model.embedding_dim 0.0 +947 9 model.relation_dim 1.0 +947 9 model.scoring_fct_norm 1.0 +947 9 loss.margin 8.101966757910697 +947 9 optimizer.lr 0.031231010936704637 +947 9 negative_sampler.num_negs_per_pos 89.0 +947 9 training.batch_size 1.0 +947 10 model.embedding_dim 2.0 +947 10 model.relation_dim 1.0 +947 10 model.scoring_fct_norm 2.0 +947 10 loss.margin 9.155620347084636 +947 10 optimizer.lr 0.005657088227986939 +947 10 negative_sampler.num_negs_per_pos 9.0 +947 10 training.batch_size 1.0 +947 11 model.embedding_dim 0.0 +947 11 model.relation_dim 0.0 +947 11 model.scoring_fct_norm 1.0 +947 11 loss.margin 3.98662721421367 +947 11 optimizer.lr 0.003041882852592583 +947 11 negative_sampler.num_negs_per_pos 19.0 +947 11 training.batch_size 2.0 +947 12 model.embedding_dim 1.0 +947 12 model.relation_dim 0.0 +947 12 model.scoring_fct_norm 1.0 +947 12 loss.margin 6.181214801513555 +947 12 optimizer.lr 0.001730731608360927 +947 12 negative_sampler.num_negs_per_pos 14.0 +947 12 training.batch_size 1.0 +947 13 model.embedding_dim 2.0 +947 13 model.relation_dim 0.0 +947 13 model.scoring_fct_norm 1.0 +947 13 loss.margin 5.092201197556027 +947 13 optimizer.lr 0.014424494406030983 +947 13 negative_sampler.num_negs_per_pos 30.0 +947 13 training.batch_size 2.0 +947 14 model.embedding_dim 2.0 +947 14 model.relation_dim 1.0 +947 14 model.scoring_fct_norm 2.0 +947 14 loss.margin 8.230862520954709 +947 14 optimizer.lr 0.034333605238767426 +947 14 negative_sampler.num_negs_per_pos 41.0 +947 14 training.batch_size 2.0 +947 15 model.embedding_dim 2.0 +947 15 model.relation_dim 0.0 +947 15 model.scoring_fct_norm 2.0 +947 15 loss.margin 5.97391395792631 +947 15 optimizer.lr 0.05695833662931051 +947 15 negative_sampler.num_negs_per_pos 84.0 +947 15 training.batch_size 2.0 +947 16 model.embedding_dim 0.0 +947 16 model.relation_dim 1.0 +947 16 model.scoring_fct_norm 2.0 +947 16 loss.margin 9.175292610807785 +947 16 optimizer.lr 0.021558992363674687 +947 16 negative_sampler.num_negs_per_pos 40.0 +947 16 training.batch_size 2.0 +947 17 model.embedding_dim 2.0 +947 17 model.relation_dim 0.0 +947 17 model.scoring_fct_norm 1.0 +947 17 loss.margin 7.444703672340598 +947 17 optimizer.lr 0.0027191475956199963 +947 17 negative_sampler.num_negs_per_pos 9.0 +947 17 training.batch_size 2.0 +947 18 model.embedding_dim 2.0 +947 18 model.relation_dim 0.0 +947 18 model.scoring_fct_norm 2.0 +947 18 loss.margin 2.505303796365838 +947 18 optimizer.lr 0.01683670672405882 +947 18 negative_sampler.num_negs_per_pos 37.0 +947 18 training.batch_size 1.0 +947 19 model.embedding_dim 1.0 +947 19 model.relation_dim 1.0 +947 19 model.scoring_fct_norm 2.0 +947 19 loss.margin 6.5336827006667715 +947 19 optimizer.lr 0.02586034828216791 +947 19 negative_sampler.num_negs_per_pos 68.0 +947 19 training.batch_size 0.0 +947 20 model.embedding_dim 0.0 +947 20 model.relation_dim 2.0 +947 20 model.scoring_fct_norm 2.0 +947 20 loss.margin 7.525030311748442 +947 20 optimizer.lr 0.01219586109455046 +947 20 negative_sampler.num_negs_per_pos 13.0 +947 20 training.batch_size 0.0 +947 21 model.embedding_dim 1.0 +947 21 model.relation_dim 1.0 +947 21 model.scoring_fct_norm 1.0 +947 21 loss.margin 8.382533537627848 +947 21 optimizer.lr 0.0035006901092190378 +947 21 negative_sampler.num_negs_per_pos 66.0 +947 21 training.batch_size 0.0 +947 22 model.embedding_dim 2.0 +947 22 model.relation_dim 0.0 +947 22 model.scoring_fct_norm 2.0 +947 22 loss.margin 9.952485141000974 +947 22 optimizer.lr 0.002197928567406396 +947 22 negative_sampler.num_negs_per_pos 37.0 +947 22 training.batch_size 2.0 +947 23 model.embedding_dim 1.0 +947 23 model.relation_dim 1.0 +947 23 model.scoring_fct_norm 1.0 +947 23 loss.margin 2.774894310511055 +947 23 optimizer.lr 0.001098634649540761 +947 23 negative_sampler.num_negs_per_pos 79.0 +947 23 training.batch_size 1.0 +947 24 model.embedding_dim 1.0 +947 24 model.relation_dim 0.0 +947 24 model.scoring_fct_norm 1.0 +947 24 loss.margin 4.730353107327644 +947 24 optimizer.lr 0.05689445828085351 +947 24 negative_sampler.num_negs_per_pos 49.0 +947 24 training.batch_size 2.0 +947 25 model.embedding_dim 2.0 +947 25 model.relation_dim 0.0 +947 25 model.scoring_fct_norm 1.0 +947 25 loss.margin 7.735305077490866 +947 25 optimizer.lr 0.0016218905172266933 +947 25 negative_sampler.num_negs_per_pos 72.0 +947 25 training.batch_size 1.0 +947 26 model.embedding_dim 2.0 +947 26 model.relation_dim 2.0 +947 26 model.scoring_fct_norm 1.0 +947 26 loss.margin 5.706048012355673 +947 26 optimizer.lr 0.0017495970803639233 +947 26 negative_sampler.num_negs_per_pos 60.0 +947 26 training.batch_size 1.0 +947 27 model.embedding_dim 1.0 +947 27 model.relation_dim 1.0 +947 27 model.scoring_fct_norm 2.0 +947 27 loss.margin 8.415753516312376 +947 27 optimizer.lr 0.00978149414111118 +947 27 negative_sampler.num_negs_per_pos 38.0 +947 27 training.batch_size 1.0 +947 28 model.embedding_dim 1.0 +947 28 model.relation_dim 1.0 +947 28 model.scoring_fct_norm 1.0 +947 28 loss.margin 2.5077953640750166 +947 28 optimizer.lr 0.02570431060934656 +947 28 negative_sampler.num_negs_per_pos 57.0 +947 28 training.batch_size 2.0 +947 29 model.embedding_dim 0.0 +947 29 model.relation_dim 2.0 +947 29 model.scoring_fct_norm 1.0 +947 29 loss.margin 3.668558554016951 +947 29 optimizer.lr 0.010701678019615062 +947 29 negative_sampler.num_negs_per_pos 19.0 +947 29 training.batch_size 0.0 +947 30 model.embedding_dim 0.0 +947 30 model.relation_dim 1.0 +947 30 model.scoring_fct_norm 1.0 +947 30 loss.margin 6.549659278411099 +947 30 optimizer.lr 0.00776562286323066 +947 30 negative_sampler.num_negs_per_pos 55.0 +947 30 training.batch_size 2.0 +947 31 model.embedding_dim 0.0 +947 31 model.relation_dim 0.0 +947 31 model.scoring_fct_norm 2.0 +947 31 loss.margin 6.131239398528316 +947 31 optimizer.lr 0.03589591049611433 +947 31 negative_sampler.num_negs_per_pos 89.0 +947 31 training.batch_size 0.0 +947 32 model.embedding_dim 1.0 +947 32 model.relation_dim 1.0 +947 32 model.scoring_fct_norm 2.0 +947 32 loss.margin 1.0327472722536688 +947 32 optimizer.lr 0.06486853083596134 +947 32 negative_sampler.num_negs_per_pos 67.0 +947 32 training.batch_size 1.0 +947 1 dataset """wn18rr""" +947 1 model """transr""" +947 1 loss """marginranking""" +947 1 regularizer """no""" +947 1 optimizer """adam""" +947 1 training_loop """owa""" +947 1 negative_sampler """basic""" +947 1 evaluator """rankbased""" +947 2 dataset """wn18rr""" +947 2 model """transr""" +947 2 loss """marginranking""" +947 2 regularizer """no""" +947 2 optimizer """adam""" +947 2 training_loop """owa""" +947 2 negative_sampler """basic""" +947 2 evaluator """rankbased""" +947 3 dataset """wn18rr""" +947 3 model """transr""" +947 3 loss """marginranking""" +947 3 regularizer """no""" +947 3 optimizer """adam""" +947 3 training_loop """owa""" +947 3 negative_sampler """basic""" +947 3 evaluator """rankbased""" +947 4 dataset """wn18rr""" +947 4 model """transr""" +947 4 loss """marginranking""" +947 4 regularizer """no""" +947 4 optimizer """adam""" +947 4 training_loop """owa""" +947 4 negative_sampler """basic""" +947 4 evaluator """rankbased""" +947 5 dataset """wn18rr""" +947 5 model """transr""" +947 5 loss """marginranking""" +947 5 regularizer """no""" +947 5 optimizer """adam""" +947 5 training_loop """owa""" +947 5 negative_sampler """basic""" +947 5 evaluator """rankbased""" +947 6 dataset """wn18rr""" +947 6 model """transr""" +947 6 loss """marginranking""" +947 6 regularizer """no""" +947 6 optimizer """adam""" +947 6 training_loop """owa""" +947 6 negative_sampler """basic""" +947 6 evaluator """rankbased""" +947 7 dataset """wn18rr""" +947 7 model """transr""" +947 7 loss """marginranking""" +947 7 regularizer """no""" +947 7 optimizer """adam""" +947 7 training_loop """owa""" +947 7 negative_sampler """basic""" +947 7 evaluator """rankbased""" +947 8 dataset """wn18rr""" +947 8 model """transr""" +947 8 loss """marginranking""" +947 8 regularizer """no""" +947 8 optimizer """adam""" +947 8 training_loop """owa""" +947 8 negative_sampler """basic""" +947 8 evaluator """rankbased""" +947 9 dataset """wn18rr""" +947 9 model """transr""" +947 9 loss """marginranking""" +947 9 regularizer """no""" +947 9 optimizer """adam""" +947 9 training_loop """owa""" +947 9 negative_sampler """basic""" +947 9 evaluator """rankbased""" +947 10 dataset """wn18rr""" +947 10 model """transr""" +947 10 loss """marginranking""" +947 10 regularizer """no""" +947 10 optimizer """adam""" +947 10 training_loop """owa""" +947 10 negative_sampler """basic""" +947 10 evaluator """rankbased""" +947 11 dataset """wn18rr""" +947 11 model """transr""" +947 11 loss """marginranking""" +947 11 regularizer """no""" +947 11 optimizer """adam""" +947 11 training_loop """owa""" +947 11 negative_sampler """basic""" +947 11 evaluator """rankbased""" +947 12 dataset """wn18rr""" +947 12 model """transr""" +947 12 loss """marginranking""" +947 12 regularizer """no""" +947 12 optimizer """adam""" +947 12 training_loop """owa""" +947 12 negative_sampler """basic""" +947 12 evaluator """rankbased""" +947 13 dataset """wn18rr""" +947 13 model """transr""" +947 13 loss """marginranking""" +947 13 regularizer """no""" +947 13 optimizer """adam""" +947 13 training_loop """owa""" +947 13 negative_sampler """basic""" +947 13 evaluator """rankbased""" +947 14 dataset """wn18rr""" +947 14 model """transr""" +947 14 loss """marginranking""" +947 14 regularizer """no""" +947 14 optimizer """adam""" +947 14 training_loop """owa""" +947 14 negative_sampler """basic""" +947 14 evaluator """rankbased""" +947 15 dataset """wn18rr""" +947 15 model """transr""" +947 15 loss """marginranking""" +947 15 regularizer """no""" +947 15 optimizer """adam""" +947 15 training_loop """owa""" +947 15 negative_sampler """basic""" +947 15 evaluator """rankbased""" +947 16 dataset """wn18rr""" +947 16 model """transr""" +947 16 loss """marginranking""" +947 16 regularizer """no""" +947 16 optimizer """adam""" +947 16 training_loop """owa""" +947 16 negative_sampler """basic""" +947 16 evaluator """rankbased""" +947 17 dataset """wn18rr""" +947 17 model """transr""" +947 17 loss """marginranking""" +947 17 regularizer """no""" +947 17 optimizer """adam""" +947 17 training_loop """owa""" +947 17 negative_sampler """basic""" +947 17 evaluator """rankbased""" +947 18 dataset """wn18rr""" +947 18 model """transr""" +947 18 loss """marginranking""" +947 18 regularizer """no""" +947 18 optimizer """adam""" +947 18 training_loop """owa""" +947 18 negative_sampler """basic""" +947 18 evaluator """rankbased""" +947 19 dataset """wn18rr""" +947 19 model """transr""" +947 19 loss """marginranking""" +947 19 regularizer """no""" +947 19 optimizer """adam""" +947 19 training_loop """owa""" +947 19 negative_sampler """basic""" +947 19 evaluator """rankbased""" +947 20 dataset """wn18rr""" +947 20 model """transr""" +947 20 loss """marginranking""" +947 20 regularizer """no""" +947 20 optimizer """adam""" +947 20 training_loop """owa""" +947 20 negative_sampler """basic""" +947 20 evaluator """rankbased""" +947 21 dataset """wn18rr""" +947 21 model """transr""" +947 21 loss """marginranking""" +947 21 regularizer """no""" +947 21 optimizer """adam""" +947 21 training_loop """owa""" +947 21 negative_sampler """basic""" +947 21 evaluator """rankbased""" +947 22 dataset """wn18rr""" +947 22 model """transr""" +947 22 loss """marginranking""" +947 22 regularizer """no""" +947 22 optimizer """adam""" +947 22 training_loop """owa""" +947 22 negative_sampler """basic""" +947 22 evaluator """rankbased""" +947 23 dataset """wn18rr""" +947 23 model """transr""" +947 23 loss """marginranking""" +947 23 regularizer """no""" +947 23 optimizer """adam""" +947 23 training_loop """owa""" +947 23 negative_sampler """basic""" +947 23 evaluator """rankbased""" +947 24 dataset """wn18rr""" +947 24 model """transr""" +947 24 loss """marginranking""" +947 24 regularizer """no""" +947 24 optimizer """adam""" +947 24 training_loop """owa""" +947 24 negative_sampler """basic""" +947 24 evaluator """rankbased""" +947 25 dataset """wn18rr""" +947 25 model """transr""" +947 25 loss """marginranking""" +947 25 regularizer """no""" +947 25 optimizer """adam""" +947 25 training_loop """owa""" +947 25 negative_sampler """basic""" +947 25 evaluator """rankbased""" +947 26 dataset """wn18rr""" +947 26 model """transr""" +947 26 loss """marginranking""" +947 26 regularizer """no""" +947 26 optimizer """adam""" +947 26 training_loop """owa""" +947 26 negative_sampler """basic""" +947 26 evaluator """rankbased""" +947 27 dataset """wn18rr""" +947 27 model """transr""" +947 27 loss """marginranking""" +947 27 regularizer """no""" +947 27 optimizer """adam""" +947 27 training_loop """owa""" +947 27 negative_sampler """basic""" +947 27 evaluator """rankbased""" +947 28 dataset """wn18rr""" +947 28 model """transr""" +947 28 loss """marginranking""" +947 28 regularizer """no""" +947 28 optimizer """adam""" +947 28 training_loop """owa""" +947 28 negative_sampler """basic""" +947 28 evaluator """rankbased""" +947 29 dataset """wn18rr""" +947 29 model """transr""" +947 29 loss """marginranking""" +947 29 regularizer """no""" +947 29 optimizer """adam""" +947 29 training_loop """owa""" +947 29 negative_sampler """basic""" +947 29 evaluator """rankbased""" +947 30 dataset """wn18rr""" +947 30 model """transr""" +947 30 loss """marginranking""" +947 30 regularizer """no""" +947 30 optimizer """adam""" +947 30 training_loop """owa""" +947 30 negative_sampler """basic""" +947 30 evaluator """rankbased""" +947 31 dataset """wn18rr""" +947 31 model """transr""" +947 31 loss """marginranking""" +947 31 regularizer """no""" +947 31 optimizer """adam""" +947 31 training_loop """owa""" +947 31 negative_sampler """basic""" +947 31 evaluator """rankbased""" +947 32 dataset """wn18rr""" +947 32 model """transr""" +947 32 loss """marginranking""" +947 32 regularizer """no""" +947 32 optimizer """adam""" +947 32 training_loop """owa""" +947 32 negative_sampler """basic""" +947 32 evaluator """rankbased""" +948 1 model.embedding_dim 2.0 +948 1 model.relation_dim 2.0 +948 1 model.scoring_fct_norm 2.0 +948 1 loss.margin 9.899767986931952 +948 1 optimizer.lr 0.05025381624323266 +948 1 negative_sampler.num_negs_per_pos 5.0 +948 1 training.batch_size 2.0 +948 2 model.embedding_dim 1.0 +948 2 model.relation_dim 2.0 +948 2 model.scoring_fct_norm 1.0 +948 2 loss.margin 9.01812259803853 +948 2 optimizer.lr 0.004920397523926068 +948 2 negative_sampler.num_negs_per_pos 15.0 +948 2 training.batch_size 0.0 +948 3 model.embedding_dim 1.0 +948 3 model.relation_dim 0.0 +948 3 model.scoring_fct_norm 1.0 +948 3 loss.margin 6.697693481221102 +948 3 optimizer.lr 0.042250173371098765 +948 3 negative_sampler.num_negs_per_pos 58.0 +948 3 training.batch_size 2.0 +948 4 model.embedding_dim 1.0 +948 4 model.relation_dim 1.0 +948 4 model.scoring_fct_norm 2.0 +948 4 loss.margin 9.745313656863692 +948 4 optimizer.lr 0.05477806863617508 +948 4 negative_sampler.num_negs_per_pos 80.0 +948 4 training.batch_size 0.0 +948 5 model.embedding_dim 2.0 +948 5 model.relation_dim 1.0 +948 5 model.scoring_fct_norm 1.0 +948 5 loss.margin 8.704710059442267 +948 5 optimizer.lr 0.0024320328105139665 +948 5 negative_sampler.num_negs_per_pos 31.0 +948 5 training.batch_size 0.0 +948 6 model.embedding_dim 0.0 +948 6 model.relation_dim 2.0 +948 6 model.scoring_fct_norm 2.0 +948 6 loss.margin 2.598630272627153 +948 6 optimizer.lr 0.010309584024528584 +948 6 negative_sampler.num_negs_per_pos 66.0 +948 6 training.batch_size 1.0 +948 7 model.embedding_dim 0.0 +948 7 model.relation_dim 0.0 +948 7 model.scoring_fct_norm 2.0 +948 7 loss.margin 0.8080116647006517 +948 7 optimizer.lr 0.024405338231272847 +948 7 negative_sampler.num_negs_per_pos 50.0 +948 7 training.batch_size 0.0 +948 8 model.embedding_dim 2.0 +948 8 model.relation_dim 1.0 +948 8 model.scoring_fct_norm 2.0 +948 8 loss.margin 5.214046492126282 +948 8 optimizer.lr 0.0013280023479185653 +948 8 negative_sampler.num_negs_per_pos 21.0 +948 8 training.batch_size 2.0 +948 9 model.embedding_dim 0.0 +948 9 model.relation_dim 0.0 +948 9 model.scoring_fct_norm 2.0 +948 9 loss.margin 9.749799726514903 +948 9 optimizer.lr 0.01770715904823227 +948 9 negative_sampler.num_negs_per_pos 93.0 +948 9 training.batch_size 2.0 +948 10 model.embedding_dim 0.0 +948 10 model.relation_dim 0.0 +948 10 model.scoring_fct_norm 1.0 +948 10 loss.margin 5.293391905045092 +948 10 optimizer.lr 0.012473027966803997 +948 10 negative_sampler.num_negs_per_pos 53.0 +948 10 training.batch_size 1.0 +948 11 model.embedding_dim 2.0 +948 11 model.relation_dim 2.0 +948 11 model.scoring_fct_norm 1.0 +948 11 loss.margin 1.223526701496657 +948 11 optimizer.lr 0.04765202422397879 +948 11 negative_sampler.num_negs_per_pos 70.0 +948 11 training.batch_size 2.0 +948 12 model.embedding_dim 2.0 +948 12 model.relation_dim 2.0 +948 12 model.scoring_fct_norm 1.0 +948 12 loss.margin 2.6726747312420946 +948 12 optimizer.lr 0.029276441919624648 +948 12 negative_sampler.num_negs_per_pos 87.0 +948 12 training.batch_size 1.0 +948 13 model.embedding_dim 0.0 +948 13 model.relation_dim 0.0 +948 13 model.scoring_fct_norm 1.0 +948 13 loss.margin 8.128673644776931 +948 13 optimizer.lr 0.05446760740435762 +948 13 negative_sampler.num_negs_per_pos 4.0 +948 13 training.batch_size 0.0 +948 14 model.embedding_dim 1.0 +948 14 model.relation_dim 1.0 +948 14 model.scoring_fct_norm 2.0 +948 14 loss.margin 3.5846395950093686 +948 14 optimizer.lr 0.06835690182119382 +948 14 negative_sampler.num_negs_per_pos 77.0 +948 14 training.batch_size 1.0 +948 15 model.embedding_dim 1.0 +948 15 model.relation_dim 1.0 +948 15 model.scoring_fct_norm 1.0 +948 15 loss.margin 9.750351170422041 +948 15 optimizer.lr 0.009835342272832809 +948 15 negative_sampler.num_negs_per_pos 76.0 +948 15 training.batch_size 0.0 +948 16 model.embedding_dim 1.0 +948 16 model.relation_dim 2.0 +948 16 model.scoring_fct_norm 1.0 +948 16 loss.margin 2.451388317528822 +948 16 optimizer.lr 0.005585229343411345 +948 16 negative_sampler.num_negs_per_pos 76.0 +948 16 training.batch_size 1.0 +948 17 model.embedding_dim 0.0 +948 17 model.relation_dim 1.0 +948 17 model.scoring_fct_norm 2.0 +948 17 loss.margin 0.542636309352603 +948 17 optimizer.lr 0.0014750482682198967 +948 17 negative_sampler.num_negs_per_pos 86.0 +948 17 training.batch_size 2.0 +948 18 model.embedding_dim 1.0 +948 18 model.relation_dim 1.0 +948 18 model.scoring_fct_norm 2.0 +948 18 loss.margin 3.315369445471653 +948 18 optimizer.lr 0.043493375874864924 +948 18 negative_sampler.num_negs_per_pos 20.0 +948 18 training.batch_size 2.0 +948 19 model.embedding_dim 0.0 +948 19 model.relation_dim 0.0 +948 19 model.scoring_fct_norm 1.0 +948 19 loss.margin 2.6293545407642536 +948 19 optimizer.lr 0.03916618131042951 +948 19 negative_sampler.num_negs_per_pos 13.0 +948 19 training.batch_size 0.0 +948 20 model.embedding_dim 2.0 +948 20 model.relation_dim 1.0 +948 20 model.scoring_fct_norm 2.0 +948 20 loss.margin 7.783616796340508 +948 20 optimizer.lr 0.0011173437205147657 +948 20 negative_sampler.num_negs_per_pos 97.0 +948 20 training.batch_size 0.0 +948 21 model.embedding_dim 0.0 +948 21 model.relation_dim 2.0 +948 21 model.scoring_fct_norm 2.0 +948 21 loss.margin 1.0064663718901605 +948 21 optimizer.lr 0.007049329553672189 +948 21 negative_sampler.num_negs_per_pos 2.0 +948 21 training.batch_size 1.0 +948 22 model.embedding_dim 1.0 +948 22 model.relation_dim 1.0 +948 22 model.scoring_fct_norm 2.0 +948 22 loss.margin 9.804383287812055 +948 22 optimizer.lr 0.02469225221249382 +948 22 negative_sampler.num_negs_per_pos 30.0 +948 22 training.batch_size 2.0 +948 23 model.embedding_dim 0.0 +948 23 model.relation_dim 2.0 +948 23 model.scoring_fct_norm 1.0 +948 23 loss.margin 7.410514884273248 +948 23 optimizer.lr 0.005546064994525799 +948 23 negative_sampler.num_negs_per_pos 32.0 +948 23 training.batch_size 2.0 +948 24 model.embedding_dim 0.0 +948 24 model.relation_dim 1.0 +948 24 model.scoring_fct_norm 2.0 +948 24 loss.margin 2.764685520933228 +948 24 optimizer.lr 0.0012387480673149245 +948 24 negative_sampler.num_negs_per_pos 58.0 +948 24 training.batch_size 1.0 +948 25 model.embedding_dim 0.0 +948 25 model.relation_dim 2.0 +948 25 model.scoring_fct_norm 1.0 +948 25 loss.margin 7.639366327037352 +948 25 optimizer.lr 0.055316451768566664 +948 25 negative_sampler.num_negs_per_pos 37.0 +948 25 training.batch_size 1.0 +948 26 model.embedding_dim 0.0 +948 26 model.relation_dim 1.0 +948 26 model.scoring_fct_norm 1.0 +948 26 loss.margin 2.9698906658998534 +948 26 optimizer.lr 0.004285132167647885 +948 26 negative_sampler.num_negs_per_pos 58.0 +948 26 training.batch_size 0.0 +948 27 model.embedding_dim 0.0 +948 27 model.relation_dim 0.0 +948 27 model.scoring_fct_norm 2.0 +948 27 loss.margin 6.432585519603504 +948 27 optimizer.lr 0.006936571230342905 +948 27 negative_sampler.num_negs_per_pos 11.0 +948 27 training.batch_size 1.0 +948 28 model.embedding_dim 2.0 +948 28 model.relation_dim 1.0 +948 28 model.scoring_fct_norm 1.0 +948 28 loss.margin 5.670689758537412 +948 28 optimizer.lr 0.035223755452648824 +948 28 negative_sampler.num_negs_per_pos 68.0 +948 28 training.batch_size 1.0 +948 29 model.embedding_dim 1.0 +948 29 model.relation_dim 1.0 +948 29 model.scoring_fct_norm 1.0 +948 29 loss.margin 7.097295489261504 +948 29 optimizer.lr 0.0014046966885960198 +948 29 negative_sampler.num_negs_per_pos 74.0 +948 29 training.batch_size 0.0 +948 30 model.embedding_dim 1.0 +948 30 model.relation_dim 0.0 +948 30 model.scoring_fct_norm 1.0 +948 30 loss.margin 8.64137733488221 +948 30 optimizer.lr 0.015555824791557015 +948 30 negative_sampler.num_negs_per_pos 61.0 +948 30 training.batch_size 2.0 +948 31 model.embedding_dim 1.0 +948 31 model.relation_dim 1.0 +948 31 model.scoring_fct_norm 1.0 +948 31 loss.margin 8.448135094157468 +948 31 optimizer.lr 0.016340867645419484 +948 31 negative_sampler.num_negs_per_pos 64.0 +948 31 training.batch_size 0.0 +948 32 model.embedding_dim 1.0 +948 32 model.relation_dim 1.0 +948 32 model.scoring_fct_norm 2.0 +948 32 loss.margin 9.995215747572145 +948 32 optimizer.lr 0.01541773492516416 +948 32 negative_sampler.num_negs_per_pos 35.0 +948 32 training.batch_size 2.0 +948 33 model.embedding_dim 2.0 +948 33 model.relation_dim 0.0 +948 33 model.scoring_fct_norm 2.0 +948 33 loss.margin 9.431700488272776 +948 33 optimizer.lr 0.0011071701210264191 +948 33 negative_sampler.num_negs_per_pos 37.0 +948 33 training.batch_size 2.0 +948 34 model.embedding_dim 1.0 +948 34 model.relation_dim 0.0 +948 34 model.scoring_fct_norm 1.0 +948 34 loss.margin 4.075245927227532 +948 34 optimizer.lr 0.050432903839724826 +948 34 negative_sampler.num_negs_per_pos 2.0 +948 34 training.batch_size 0.0 +948 35 model.embedding_dim 0.0 +948 35 model.relation_dim 0.0 +948 35 model.scoring_fct_norm 2.0 +948 35 loss.margin 9.576449922473738 +948 35 optimizer.lr 0.0010476674525702048 +948 35 negative_sampler.num_negs_per_pos 74.0 +948 35 training.batch_size 1.0 +948 36 model.embedding_dim 1.0 +948 36 model.relation_dim 0.0 +948 36 model.scoring_fct_norm 1.0 +948 36 loss.margin 0.8826603030910521 +948 36 optimizer.lr 0.0015135072016888964 +948 36 negative_sampler.num_negs_per_pos 94.0 +948 36 training.batch_size 0.0 +948 37 model.embedding_dim 2.0 +948 37 model.relation_dim 2.0 +948 37 model.scoring_fct_norm 2.0 +948 37 loss.margin 7.03309684436226 +948 37 optimizer.lr 0.006136245694501533 +948 37 negative_sampler.num_negs_per_pos 78.0 +948 37 training.batch_size 0.0 +948 38 model.embedding_dim 0.0 +948 38 model.relation_dim 2.0 +948 38 model.scoring_fct_norm 1.0 +948 38 loss.margin 8.516216120310316 +948 38 optimizer.lr 0.04703145719131722 +948 38 negative_sampler.num_negs_per_pos 50.0 +948 38 training.batch_size 2.0 +948 39 model.embedding_dim 2.0 +948 39 model.relation_dim 2.0 +948 39 model.scoring_fct_norm 1.0 +948 39 loss.margin 3.4114981418004264 +948 39 optimizer.lr 0.0014818351876943491 +948 39 negative_sampler.num_negs_per_pos 89.0 +948 39 training.batch_size 0.0 +948 40 model.embedding_dim 0.0 +948 40 model.relation_dim 2.0 +948 40 model.scoring_fct_norm 2.0 +948 40 loss.margin 3.9869987460787057 +948 40 optimizer.lr 0.003976602990881271 +948 40 negative_sampler.num_negs_per_pos 10.0 +948 40 training.batch_size 0.0 +948 41 model.embedding_dim 1.0 +948 41 model.relation_dim 0.0 +948 41 model.scoring_fct_norm 2.0 +948 41 loss.margin 3.4811031117789084 +948 41 optimizer.lr 0.0011181187451092735 +948 41 negative_sampler.num_negs_per_pos 16.0 +948 41 training.batch_size 2.0 +948 42 model.embedding_dim 2.0 +948 42 model.relation_dim 0.0 +948 42 model.scoring_fct_norm 1.0 +948 42 loss.margin 6.812244825637864 +948 42 optimizer.lr 0.00864230041403856 +948 42 negative_sampler.num_negs_per_pos 28.0 +948 42 training.batch_size 1.0 +948 43 model.embedding_dim 1.0 +948 43 model.relation_dim 0.0 +948 43 model.scoring_fct_norm 1.0 +948 43 loss.margin 6.743344031557966 +948 43 optimizer.lr 0.02166457336611764 +948 43 negative_sampler.num_negs_per_pos 29.0 +948 43 training.batch_size 2.0 +948 44 model.embedding_dim 2.0 +948 44 model.relation_dim 0.0 +948 44 model.scoring_fct_norm 2.0 +948 44 loss.margin 3.7527620770303862 +948 44 optimizer.lr 0.003314679410321942 +948 44 negative_sampler.num_negs_per_pos 8.0 +948 44 training.batch_size 1.0 +948 45 model.embedding_dim 1.0 +948 45 model.relation_dim 2.0 +948 45 model.scoring_fct_norm 1.0 +948 45 loss.margin 2.851370023287008 +948 45 optimizer.lr 0.012556539363667543 +948 45 negative_sampler.num_negs_per_pos 69.0 +948 45 training.batch_size 0.0 +948 46 model.embedding_dim 1.0 +948 46 model.relation_dim 0.0 +948 46 model.scoring_fct_norm 1.0 +948 46 loss.margin 5.308331388449046 +948 46 optimizer.lr 0.006361220762314022 +948 46 negative_sampler.num_negs_per_pos 95.0 +948 46 training.batch_size 0.0 +948 1 dataset """wn18rr""" +948 1 model """transr""" +948 1 loss """marginranking""" +948 1 regularizer """no""" +948 1 optimizer """adam""" +948 1 training_loop """owa""" +948 1 negative_sampler """basic""" +948 1 evaluator """rankbased""" +948 2 dataset """wn18rr""" +948 2 model """transr""" +948 2 loss """marginranking""" +948 2 regularizer """no""" +948 2 optimizer """adam""" +948 2 training_loop """owa""" +948 2 negative_sampler """basic""" +948 2 evaluator """rankbased""" +948 3 dataset """wn18rr""" +948 3 model """transr""" +948 3 loss """marginranking""" +948 3 regularizer """no""" +948 3 optimizer """adam""" +948 3 training_loop """owa""" +948 3 negative_sampler """basic""" +948 3 evaluator """rankbased""" +948 4 dataset """wn18rr""" +948 4 model """transr""" +948 4 loss """marginranking""" +948 4 regularizer """no""" +948 4 optimizer """adam""" +948 4 training_loop """owa""" +948 4 negative_sampler """basic""" +948 4 evaluator """rankbased""" +948 5 dataset """wn18rr""" +948 5 model """transr""" +948 5 loss """marginranking""" +948 5 regularizer """no""" +948 5 optimizer """adam""" +948 5 training_loop """owa""" +948 5 negative_sampler """basic""" +948 5 evaluator """rankbased""" +948 6 dataset """wn18rr""" +948 6 model """transr""" +948 6 loss """marginranking""" +948 6 regularizer """no""" +948 6 optimizer """adam""" +948 6 training_loop """owa""" +948 6 negative_sampler """basic""" +948 6 evaluator """rankbased""" +948 7 dataset """wn18rr""" +948 7 model """transr""" +948 7 loss """marginranking""" +948 7 regularizer """no""" +948 7 optimizer """adam""" +948 7 training_loop """owa""" +948 7 negative_sampler """basic""" +948 7 evaluator """rankbased""" +948 8 dataset """wn18rr""" +948 8 model """transr""" +948 8 loss """marginranking""" +948 8 regularizer """no""" +948 8 optimizer """adam""" +948 8 training_loop """owa""" +948 8 negative_sampler """basic""" +948 8 evaluator """rankbased""" +948 9 dataset """wn18rr""" +948 9 model """transr""" +948 9 loss """marginranking""" +948 9 regularizer """no""" +948 9 optimizer """adam""" +948 9 training_loop """owa""" +948 9 negative_sampler """basic""" +948 9 evaluator """rankbased""" +948 10 dataset """wn18rr""" +948 10 model """transr""" +948 10 loss """marginranking""" +948 10 regularizer """no""" +948 10 optimizer """adam""" +948 10 training_loop """owa""" +948 10 negative_sampler """basic""" +948 10 evaluator """rankbased""" +948 11 dataset """wn18rr""" +948 11 model """transr""" +948 11 loss """marginranking""" +948 11 regularizer """no""" +948 11 optimizer """adam""" +948 11 training_loop """owa""" +948 11 negative_sampler """basic""" +948 11 evaluator """rankbased""" +948 12 dataset """wn18rr""" +948 12 model """transr""" +948 12 loss """marginranking""" +948 12 regularizer """no""" +948 12 optimizer """adam""" +948 12 training_loop """owa""" +948 12 negative_sampler """basic""" +948 12 evaluator """rankbased""" +948 13 dataset """wn18rr""" +948 13 model """transr""" +948 13 loss """marginranking""" +948 13 regularizer """no""" +948 13 optimizer """adam""" +948 13 training_loop """owa""" +948 13 negative_sampler """basic""" +948 13 evaluator """rankbased""" +948 14 dataset """wn18rr""" +948 14 model """transr""" +948 14 loss """marginranking""" +948 14 regularizer """no""" +948 14 optimizer """adam""" +948 14 training_loop """owa""" +948 14 negative_sampler """basic""" +948 14 evaluator """rankbased""" +948 15 dataset """wn18rr""" +948 15 model """transr""" +948 15 loss """marginranking""" +948 15 regularizer """no""" +948 15 optimizer """adam""" +948 15 training_loop """owa""" +948 15 negative_sampler """basic""" +948 15 evaluator """rankbased""" +948 16 dataset """wn18rr""" +948 16 model """transr""" +948 16 loss """marginranking""" +948 16 regularizer """no""" +948 16 optimizer """adam""" +948 16 training_loop """owa""" +948 16 negative_sampler """basic""" +948 16 evaluator """rankbased""" +948 17 dataset """wn18rr""" +948 17 model """transr""" +948 17 loss """marginranking""" +948 17 regularizer """no""" +948 17 optimizer """adam""" +948 17 training_loop """owa""" +948 17 negative_sampler """basic""" +948 17 evaluator """rankbased""" +948 18 dataset """wn18rr""" +948 18 model """transr""" +948 18 loss """marginranking""" +948 18 regularizer """no""" +948 18 optimizer """adam""" +948 18 training_loop """owa""" +948 18 negative_sampler """basic""" +948 18 evaluator """rankbased""" +948 19 dataset """wn18rr""" +948 19 model """transr""" +948 19 loss """marginranking""" +948 19 regularizer """no""" +948 19 optimizer """adam""" +948 19 training_loop """owa""" +948 19 negative_sampler """basic""" +948 19 evaluator """rankbased""" +948 20 dataset """wn18rr""" +948 20 model """transr""" +948 20 loss """marginranking""" +948 20 regularizer """no""" +948 20 optimizer """adam""" +948 20 training_loop """owa""" +948 20 negative_sampler """basic""" +948 20 evaluator """rankbased""" +948 21 dataset """wn18rr""" +948 21 model """transr""" +948 21 loss """marginranking""" +948 21 regularizer """no""" +948 21 optimizer """adam""" +948 21 training_loop """owa""" +948 21 negative_sampler """basic""" +948 21 evaluator """rankbased""" +948 22 dataset """wn18rr""" +948 22 model """transr""" +948 22 loss """marginranking""" +948 22 regularizer """no""" +948 22 optimizer """adam""" +948 22 training_loop """owa""" +948 22 negative_sampler """basic""" +948 22 evaluator """rankbased""" +948 23 dataset """wn18rr""" +948 23 model """transr""" +948 23 loss """marginranking""" +948 23 regularizer """no""" +948 23 optimizer """adam""" +948 23 training_loop """owa""" +948 23 negative_sampler """basic""" +948 23 evaluator """rankbased""" +948 24 dataset """wn18rr""" +948 24 model """transr""" +948 24 loss """marginranking""" +948 24 regularizer """no""" +948 24 optimizer """adam""" +948 24 training_loop """owa""" +948 24 negative_sampler """basic""" +948 24 evaluator """rankbased""" +948 25 dataset """wn18rr""" +948 25 model """transr""" +948 25 loss """marginranking""" +948 25 regularizer """no""" +948 25 optimizer """adam""" +948 25 training_loop """owa""" +948 25 negative_sampler """basic""" +948 25 evaluator """rankbased""" +948 26 dataset """wn18rr""" +948 26 model """transr""" +948 26 loss """marginranking""" +948 26 regularizer """no""" +948 26 optimizer """adam""" +948 26 training_loop """owa""" +948 26 negative_sampler """basic""" +948 26 evaluator """rankbased""" +948 27 dataset """wn18rr""" +948 27 model """transr""" +948 27 loss """marginranking""" +948 27 regularizer """no""" +948 27 optimizer """adam""" +948 27 training_loop """owa""" +948 27 negative_sampler """basic""" +948 27 evaluator """rankbased""" +948 28 dataset """wn18rr""" +948 28 model """transr""" +948 28 loss """marginranking""" +948 28 regularizer """no""" +948 28 optimizer """adam""" +948 28 training_loop """owa""" +948 28 negative_sampler """basic""" +948 28 evaluator """rankbased""" +948 29 dataset """wn18rr""" +948 29 model """transr""" +948 29 loss """marginranking""" +948 29 regularizer """no""" +948 29 optimizer """adam""" +948 29 training_loop """owa""" +948 29 negative_sampler """basic""" +948 29 evaluator """rankbased""" +948 30 dataset """wn18rr""" +948 30 model """transr""" +948 30 loss """marginranking""" +948 30 regularizer """no""" +948 30 optimizer """adam""" +948 30 training_loop """owa""" +948 30 negative_sampler """basic""" +948 30 evaluator """rankbased""" +948 31 dataset """wn18rr""" +948 31 model """transr""" +948 31 loss """marginranking""" +948 31 regularizer """no""" +948 31 optimizer """adam""" +948 31 training_loop """owa""" +948 31 negative_sampler """basic""" +948 31 evaluator """rankbased""" +948 32 dataset """wn18rr""" +948 32 model """transr""" +948 32 loss """marginranking""" +948 32 regularizer """no""" +948 32 optimizer """adam""" +948 32 training_loop """owa""" +948 32 negative_sampler """basic""" +948 32 evaluator """rankbased""" +948 33 dataset """wn18rr""" +948 33 model """transr""" +948 33 loss """marginranking""" +948 33 regularizer """no""" +948 33 optimizer """adam""" +948 33 training_loop """owa""" +948 33 negative_sampler """basic""" +948 33 evaluator """rankbased""" +948 34 dataset """wn18rr""" +948 34 model """transr""" +948 34 loss """marginranking""" +948 34 regularizer """no""" +948 34 optimizer """adam""" +948 34 training_loop """owa""" +948 34 negative_sampler """basic""" +948 34 evaluator """rankbased""" +948 35 dataset """wn18rr""" +948 35 model """transr""" +948 35 loss """marginranking""" +948 35 regularizer """no""" +948 35 optimizer """adam""" +948 35 training_loop """owa""" +948 35 negative_sampler """basic""" +948 35 evaluator """rankbased""" +948 36 dataset """wn18rr""" +948 36 model """transr""" +948 36 loss """marginranking""" +948 36 regularizer """no""" +948 36 optimizer """adam""" +948 36 training_loop """owa""" +948 36 negative_sampler """basic""" +948 36 evaluator """rankbased""" +948 37 dataset """wn18rr""" +948 37 model """transr""" +948 37 loss """marginranking""" +948 37 regularizer """no""" +948 37 optimizer """adam""" +948 37 training_loop """owa""" +948 37 negative_sampler """basic""" +948 37 evaluator """rankbased""" +948 38 dataset """wn18rr""" +948 38 model """transr""" +948 38 loss """marginranking""" +948 38 regularizer """no""" +948 38 optimizer """adam""" +948 38 training_loop """owa""" +948 38 negative_sampler """basic""" +948 38 evaluator """rankbased""" +948 39 dataset """wn18rr""" +948 39 model """transr""" +948 39 loss """marginranking""" +948 39 regularizer """no""" +948 39 optimizer """adam""" +948 39 training_loop """owa""" +948 39 negative_sampler """basic""" +948 39 evaluator """rankbased""" +948 40 dataset """wn18rr""" +948 40 model """transr""" +948 40 loss """marginranking""" +948 40 regularizer """no""" +948 40 optimizer """adam""" +948 40 training_loop """owa""" +948 40 negative_sampler """basic""" +948 40 evaluator """rankbased""" +948 41 dataset """wn18rr""" +948 41 model """transr""" +948 41 loss """marginranking""" +948 41 regularizer """no""" +948 41 optimizer """adam""" +948 41 training_loop """owa""" +948 41 negative_sampler """basic""" +948 41 evaluator """rankbased""" +948 42 dataset """wn18rr""" +948 42 model """transr""" +948 42 loss """marginranking""" +948 42 regularizer """no""" +948 42 optimizer """adam""" +948 42 training_loop """owa""" +948 42 negative_sampler """basic""" +948 42 evaluator """rankbased""" +948 43 dataset """wn18rr""" +948 43 model """transr""" +948 43 loss """marginranking""" +948 43 regularizer """no""" +948 43 optimizer """adam""" +948 43 training_loop """owa""" +948 43 negative_sampler """basic""" +948 43 evaluator """rankbased""" +948 44 dataset """wn18rr""" +948 44 model """transr""" +948 44 loss """marginranking""" +948 44 regularizer """no""" +948 44 optimizer """adam""" +948 44 training_loop """owa""" +948 44 negative_sampler """basic""" +948 44 evaluator """rankbased""" +948 45 dataset """wn18rr""" +948 45 model """transr""" +948 45 loss """marginranking""" +948 45 regularizer """no""" +948 45 optimizer """adam""" +948 45 training_loop """owa""" +948 45 negative_sampler """basic""" +948 45 evaluator """rankbased""" +948 46 dataset """wn18rr""" +948 46 model """transr""" +948 46 loss """marginranking""" +948 46 regularizer """no""" +948 46 optimizer """adam""" +948 46 training_loop """owa""" +948 46 negative_sampler """basic""" +948 46 evaluator """rankbased""" +949 1 model.embedding_dim 0.0 +949 1 model.relation_dim 2.0 +949 1 model.dropout_0 0.4550112674434435 +949 1 model.dropout_1 0.4500175181644861 +949 1 model.dropout_2 0.29793347450412216 +949 1 optimizer.lr 0.00420023608534756 +949 1 training.batch_size 1.0 +949 1 training.label_smoothing 0.007439983206214817 +949 2 model.embedding_dim 2.0 +949 2 model.relation_dim 2.0 +949 2 model.dropout_0 0.4259157601906101 +949 2 model.dropout_1 0.315502894753252 +949 2 model.dropout_2 0.20988228009900822 +949 2 optimizer.lr 0.010096559334047624 +949 2 training.batch_size 1.0 +949 2 training.label_smoothing 0.013847216857555009 +949 3 model.embedding_dim 2.0 +949 3 model.relation_dim 2.0 +949 3 model.dropout_0 0.2131389710153246 +949 3 model.dropout_1 0.1881905119242554 +949 3 model.dropout_2 0.14527273110820427 +949 3 optimizer.lr 0.0026165043066004074 +949 3 training.batch_size 2.0 +949 3 training.label_smoothing 0.008989610548600438 +949 4 model.embedding_dim 1.0 +949 4 model.relation_dim 2.0 +949 4 model.dropout_0 0.40025016840880934 +949 4 model.dropout_1 0.1436977958091557 +949 4 model.dropout_2 0.2774913268307919 +949 4 optimizer.lr 0.004451377712163505 +949 4 training.batch_size 1.0 +949 4 training.label_smoothing 0.0797957602544633 +949 1 dataset """fb15k237""" +949 1 model """tucker""" +949 1 loss """crossentropy""" +949 1 regularizer """no""" +949 1 optimizer """adam""" +949 1 training_loop """lcwa""" +949 1 evaluator """rankbased""" +949 2 dataset """fb15k237""" +949 2 model """tucker""" +949 2 loss """crossentropy""" +949 2 regularizer """no""" +949 2 optimizer """adam""" +949 2 training_loop """lcwa""" +949 2 evaluator """rankbased""" +949 3 dataset """fb15k237""" +949 3 model """tucker""" +949 3 loss """crossentropy""" +949 3 regularizer """no""" +949 3 optimizer """adam""" +949 3 training_loop """lcwa""" +949 3 evaluator """rankbased""" +949 4 dataset """fb15k237""" +949 4 model """tucker""" +949 4 loss """crossentropy""" +949 4 regularizer """no""" +949 4 optimizer """adam""" +949 4 training_loop """lcwa""" +949 4 evaluator """rankbased""" +950 1 model.embedding_dim 2.0 +950 1 model.relation_dim 1.0 +950 1 model.dropout_0 0.4519488684050546 +950 1 model.dropout_1 0.12495265511230134 +950 1 model.dropout_2 0.17024329286651982 +950 1 optimizer.lr 0.09148660566014293 +950 1 training.batch_size 1.0 +950 1 training.label_smoothing 0.018830457612801818 +950 2 model.embedding_dim 1.0 +950 2 model.relation_dim 0.0 +950 2 model.dropout_0 0.394574013007457 +950 2 model.dropout_1 0.1858209405770678 +950 2 model.dropout_2 0.38644485455158506 +950 2 optimizer.lr 0.0036282021830066078 +950 2 training.batch_size 1.0 +950 2 training.label_smoothing 0.02416069017334947 +950 3 model.embedding_dim 0.0 +950 3 model.relation_dim 1.0 +950 3 model.dropout_0 0.13602091982951156 +950 3 model.dropout_1 0.2941777489302635 +950 3 model.dropout_2 0.4154549034396522 +950 3 optimizer.lr 0.019094398500421262 +950 3 training.batch_size 0.0 +950 3 training.label_smoothing 0.12381095574061485 +950 4 model.embedding_dim 0.0 +950 4 model.relation_dim 0.0 +950 4 model.dropout_0 0.4426474063120503 +950 4 model.dropout_1 0.24708601287851384 +950 4 model.dropout_2 0.14639257497112432 +950 4 optimizer.lr 0.005723206323909517 +950 4 training.batch_size 1.0 +950 4 training.label_smoothing 0.12820602681519297 +950 5 model.embedding_dim 0.0 +950 5 model.relation_dim 2.0 +950 5 model.dropout_0 0.4244905034865407 +950 5 model.dropout_1 0.1228398241430445 +950 5 model.dropout_2 0.1308060636738524 +950 5 optimizer.lr 0.037101503945532875 +950 5 training.batch_size 0.0 +950 5 training.label_smoothing 0.07871618459589681 +950 6 model.embedding_dim 2.0 +950 6 model.relation_dim 1.0 +950 6 model.dropout_0 0.35751396118581913 +950 6 model.dropout_1 0.26845747746969123 +950 6 model.dropout_2 0.29459101464177584 +950 6 optimizer.lr 0.003437342600066384 +950 6 training.batch_size 0.0 +950 6 training.label_smoothing 0.06989759041788399 +950 7 model.embedding_dim 0.0 +950 7 model.relation_dim 2.0 +950 7 model.dropout_0 0.20474621343275837 +950 7 model.dropout_1 0.4872894451442713 +950 7 model.dropout_2 0.39598699987908714 +950 7 optimizer.lr 0.0011020941770125668 +950 7 training.batch_size 1.0 +950 7 training.label_smoothing 0.006210824494775227 +950 8 model.embedding_dim 2.0 +950 8 model.relation_dim 0.0 +950 8 model.dropout_0 0.13790138185748166 +950 8 model.dropout_1 0.44839531302152924 +950 8 model.dropout_2 0.22801448157936674 +950 8 optimizer.lr 0.05841318259289139 +950 8 training.batch_size 1.0 +950 8 training.label_smoothing 0.05646506018671449 +950 9 model.embedding_dim 0.0 +950 9 model.relation_dim 1.0 +950 9 model.dropout_0 0.45212699987192484 +950 9 model.dropout_1 0.36399055213128306 +950 9 model.dropout_2 0.4299532719646414 +950 9 optimizer.lr 0.004990894390589742 +950 9 training.batch_size 1.0 +950 9 training.label_smoothing 0.0031675945061742385 +950 10 model.embedding_dim 2.0 +950 10 model.relation_dim 1.0 +950 10 model.dropout_0 0.14659533285321752 +950 10 model.dropout_1 0.4074464130937592 +950 10 model.dropout_2 0.39373856498991455 +950 10 optimizer.lr 0.038873317213007505 +950 10 training.batch_size 2.0 +950 10 training.label_smoothing 0.3701874348039886 +950 11 model.embedding_dim 0.0 +950 11 model.relation_dim 1.0 +950 11 model.dropout_0 0.3946917375546546 +950 11 model.dropout_1 0.10107558196261285 +950 11 model.dropout_2 0.4069445483962688 +950 11 optimizer.lr 0.017939810145245046 +950 11 training.batch_size 1.0 +950 11 training.label_smoothing 0.04275500619410253 +950 12 model.embedding_dim 2.0 +950 12 model.relation_dim 0.0 +950 12 model.dropout_0 0.10727824135247435 +950 12 model.dropout_1 0.4733426631474221 +950 12 model.dropout_2 0.40070386473736547 +950 12 optimizer.lr 0.0061425487989726375 +950 12 training.batch_size 1.0 +950 12 training.label_smoothing 0.00760923088493968 +950 13 model.embedding_dim 1.0 +950 13 model.relation_dim 2.0 +950 13 model.dropout_0 0.15730865178061784 +950 13 model.dropout_1 0.12754071599705283 +950 13 model.dropout_2 0.1375604763654358 +950 13 optimizer.lr 0.01475035540362743 +950 13 training.batch_size 2.0 +950 13 training.label_smoothing 0.5912883504439916 +950 14 model.embedding_dim 0.0 +950 14 model.relation_dim 0.0 +950 14 model.dropout_0 0.20024246246797298 +950 14 model.dropout_1 0.31403343303565956 +950 14 model.dropout_2 0.17663432021756187 +950 14 optimizer.lr 0.0013083400017356497 +950 14 training.batch_size 1.0 +950 14 training.label_smoothing 0.03716644341014562 +950 15 model.embedding_dim 1.0 +950 15 model.relation_dim 0.0 +950 15 model.dropout_0 0.1391189655861472 +950 15 model.dropout_1 0.13195047097286589 +950 15 model.dropout_2 0.19330179467334077 +950 15 optimizer.lr 0.0018087370711403331 +950 15 training.batch_size 2.0 +950 15 training.label_smoothing 0.7005371838141643 +950 16 model.embedding_dim 2.0 +950 16 model.relation_dim 0.0 +950 16 model.dropout_0 0.14615355799065535 +950 16 model.dropout_1 0.1389631266209846 +950 16 model.dropout_2 0.48700177554141666 +950 16 optimizer.lr 0.0018834161851845456 +950 16 training.batch_size 1.0 +950 16 training.label_smoothing 0.42800886365372687 +950 1 dataset """fb15k237""" +950 1 model """tucker""" +950 1 loss """crossentropy""" +950 1 regularizer """no""" +950 1 optimizer """adam""" +950 1 training_loop """lcwa""" +950 1 evaluator """rankbased""" +950 2 dataset """fb15k237""" +950 2 model """tucker""" +950 2 loss """crossentropy""" +950 2 regularizer """no""" +950 2 optimizer """adam""" +950 2 training_loop """lcwa""" +950 2 evaluator """rankbased""" +950 3 dataset """fb15k237""" +950 3 model """tucker""" +950 3 loss """crossentropy""" +950 3 regularizer """no""" +950 3 optimizer """adam""" +950 3 training_loop """lcwa""" +950 3 evaluator """rankbased""" +950 4 dataset """fb15k237""" +950 4 model """tucker""" +950 4 loss """crossentropy""" +950 4 regularizer """no""" +950 4 optimizer """adam""" +950 4 training_loop """lcwa""" +950 4 evaluator """rankbased""" +950 5 dataset """fb15k237""" +950 5 model """tucker""" +950 5 loss """crossentropy""" +950 5 regularizer """no""" +950 5 optimizer """adam""" +950 5 training_loop """lcwa""" +950 5 evaluator """rankbased""" +950 6 dataset """fb15k237""" +950 6 model """tucker""" +950 6 loss """crossentropy""" +950 6 regularizer """no""" +950 6 optimizer """adam""" +950 6 training_loop """lcwa""" +950 6 evaluator """rankbased""" +950 7 dataset """fb15k237""" +950 7 model """tucker""" +950 7 loss """crossentropy""" +950 7 regularizer """no""" +950 7 optimizer """adam""" +950 7 training_loop """lcwa""" +950 7 evaluator """rankbased""" +950 8 dataset """fb15k237""" +950 8 model """tucker""" +950 8 loss """crossentropy""" +950 8 regularizer """no""" +950 8 optimizer """adam""" +950 8 training_loop """lcwa""" +950 8 evaluator """rankbased""" +950 9 dataset """fb15k237""" +950 9 model """tucker""" +950 9 loss """crossentropy""" +950 9 regularizer """no""" +950 9 optimizer """adam""" +950 9 training_loop """lcwa""" +950 9 evaluator """rankbased""" +950 10 dataset """fb15k237""" +950 10 model """tucker""" +950 10 loss """crossentropy""" +950 10 regularizer """no""" +950 10 optimizer """adam""" +950 10 training_loop """lcwa""" +950 10 evaluator """rankbased""" +950 11 dataset """fb15k237""" +950 11 model """tucker""" +950 11 loss """crossentropy""" +950 11 regularizer """no""" +950 11 optimizer """adam""" +950 11 training_loop """lcwa""" +950 11 evaluator """rankbased""" +950 12 dataset """fb15k237""" +950 12 model """tucker""" +950 12 loss """crossentropy""" +950 12 regularizer """no""" +950 12 optimizer """adam""" +950 12 training_loop """lcwa""" +950 12 evaluator """rankbased""" +950 13 dataset """fb15k237""" +950 13 model """tucker""" +950 13 loss """crossentropy""" +950 13 regularizer """no""" +950 13 optimizer """adam""" +950 13 training_loop """lcwa""" +950 13 evaluator """rankbased""" +950 14 dataset """fb15k237""" +950 14 model """tucker""" +950 14 loss """crossentropy""" +950 14 regularizer """no""" +950 14 optimizer """adam""" +950 14 training_loop """lcwa""" +950 14 evaluator """rankbased""" +950 15 dataset """fb15k237""" +950 15 model """tucker""" +950 15 loss """crossentropy""" +950 15 regularizer """no""" +950 15 optimizer """adam""" +950 15 training_loop """lcwa""" +950 15 evaluator """rankbased""" +950 16 dataset """fb15k237""" +950 16 model """tucker""" +950 16 loss """crossentropy""" +950 16 regularizer """no""" +950 16 optimizer """adam""" +950 16 training_loop """lcwa""" +950 16 evaluator """rankbased""" +951 1 model.embedding_dim 2.0 +951 1 model.relation_dim 0.0 +951 1 model.dropout_0 0.40612043452195523 +951 1 model.dropout_1 0.10364351768326925 +951 1 model.dropout_2 0.19399067837349518 +951 1 optimizer.lr 0.028220407215817714 +951 1 training.batch_size 2.0 +951 1 training.label_smoothing 0.0026717647583527006 +951 2 model.embedding_dim 2.0 +951 2 model.relation_dim 1.0 +951 2 model.dropout_0 0.36542886266942154 +951 2 model.dropout_1 0.4315303077941412 +951 2 model.dropout_2 0.22496950629089973 +951 2 optimizer.lr 0.03012865484404833 +951 2 training.batch_size 0.0 +951 2 training.label_smoothing 0.04268059425937861 +951 3 model.embedding_dim 0.0 +951 3 model.relation_dim 2.0 +951 3 model.dropout_0 0.4777896356272066 +951 3 model.dropout_1 0.4755988576447051 +951 3 model.dropout_2 0.127686336307984 +951 3 optimizer.lr 0.004013820056888576 +951 3 training.batch_size 1.0 +951 3 training.label_smoothing 0.017000038310636477 +951 4 model.embedding_dim 0.0 +951 4 model.relation_dim 2.0 +951 4 model.dropout_0 0.4215468939957133 +951 4 model.dropout_1 0.4304205047499254 +951 4 model.dropout_2 0.40870734891790295 +951 4 optimizer.lr 0.002924736732789132 +951 4 training.batch_size 0.0 +951 4 training.label_smoothing 0.0025800511313442743 +951 5 model.embedding_dim 1.0 +951 5 model.relation_dim 0.0 +951 5 model.dropout_0 0.4295600419547356 +951 5 model.dropout_1 0.27777257880956635 +951 5 model.dropout_2 0.18481907004116227 +951 5 optimizer.lr 0.0519278929565653 +951 5 training.batch_size 2.0 +951 5 training.label_smoothing 0.0029725424708680406 +951 6 model.embedding_dim 2.0 +951 6 model.relation_dim 2.0 +951 6 model.dropout_0 0.45451524379469843 +951 6 model.dropout_1 0.325615494395769 +951 6 model.dropout_2 0.19977409337834418 +951 6 optimizer.lr 0.035665763628216204 +951 6 training.batch_size 1.0 +951 6 training.label_smoothing 0.4099815337990704 +951 7 model.embedding_dim 0.0 +951 7 model.relation_dim 0.0 +951 7 model.dropout_0 0.20916283943094835 +951 7 model.dropout_1 0.19166793067175336 +951 7 model.dropout_2 0.21984908087586175 +951 7 optimizer.lr 0.07172912610339927 +951 7 training.batch_size 1.0 +951 7 training.label_smoothing 0.039880301424875246 +951 8 model.embedding_dim 1.0 +951 8 model.relation_dim 0.0 +951 8 model.dropout_0 0.3960059795913954 +951 8 model.dropout_1 0.4914129550227301 +951 8 model.dropout_2 0.2649315498971374 +951 8 optimizer.lr 0.006949199132794613 +951 8 training.batch_size 1.0 +951 8 training.label_smoothing 0.0025306960957896395 +951 9 model.embedding_dim 2.0 +951 9 model.relation_dim 0.0 +951 9 model.dropout_0 0.19425196482871906 +951 9 model.dropout_1 0.27078785501637065 +951 9 model.dropout_2 0.3757014014214603 +951 9 optimizer.lr 0.0029222797254608687 +951 9 training.batch_size 1.0 +951 9 training.label_smoothing 0.009709084555665609 +951 10 model.embedding_dim 2.0 +951 10 model.relation_dim 1.0 +951 10 model.dropout_0 0.22641305454645 +951 10 model.dropout_1 0.3935275675875753 +951 10 model.dropout_2 0.3299264951544233 +951 10 optimizer.lr 0.018856620301695503 +951 10 training.batch_size 2.0 +951 10 training.label_smoothing 0.3136314891800361 +951 11 model.embedding_dim 1.0 +951 11 model.relation_dim 1.0 +951 11 model.dropout_0 0.1930765464257066 +951 11 model.dropout_1 0.3074811193325176 +951 11 model.dropout_2 0.12242997601326949 +951 11 optimizer.lr 0.006780402363881897 +951 11 training.batch_size 2.0 +951 11 training.label_smoothing 0.00438564577731719 +951 12 model.embedding_dim 2.0 +951 12 model.relation_dim 0.0 +951 12 model.dropout_0 0.3815958937555876 +951 12 model.dropout_1 0.3707333885862088 +951 12 model.dropout_2 0.4889142007764053 +951 12 optimizer.lr 0.0038570161192988967 +951 12 training.batch_size 2.0 +951 12 training.label_smoothing 0.4962659090532219 +951 13 model.embedding_dim 2.0 +951 13 model.relation_dim 2.0 +951 13 model.dropout_0 0.1670326301559209 +951 13 model.dropout_1 0.11688361644417347 +951 13 model.dropout_2 0.475653835567764 +951 13 optimizer.lr 0.09343544740781701 +951 13 training.batch_size 2.0 +951 13 training.label_smoothing 0.005214957865131633 +951 1 dataset """fb15k237""" +951 1 model """tucker""" +951 1 loss """bceaftersigmoid""" +951 1 regularizer """no""" +951 1 optimizer """adam""" +951 1 training_loop """lcwa""" +951 1 evaluator """rankbased""" +951 2 dataset """fb15k237""" +951 2 model """tucker""" +951 2 loss """bceaftersigmoid""" +951 2 regularizer """no""" +951 2 optimizer """adam""" +951 2 training_loop """lcwa""" +951 2 evaluator """rankbased""" +951 3 dataset """fb15k237""" +951 3 model """tucker""" +951 3 loss """bceaftersigmoid""" +951 3 regularizer """no""" +951 3 optimizer """adam""" +951 3 training_loop """lcwa""" +951 3 evaluator """rankbased""" +951 4 dataset """fb15k237""" +951 4 model """tucker""" +951 4 loss """bceaftersigmoid""" +951 4 regularizer """no""" +951 4 optimizer """adam""" +951 4 training_loop """lcwa""" +951 4 evaluator """rankbased""" +951 5 dataset """fb15k237""" +951 5 model """tucker""" +951 5 loss """bceaftersigmoid""" +951 5 regularizer """no""" +951 5 optimizer """adam""" +951 5 training_loop """lcwa""" +951 5 evaluator """rankbased""" +951 6 dataset """fb15k237""" +951 6 model """tucker""" +951 6 loss """bceaftersigmoid""" +951 6 regularizer """no""" +951 6 optimizer """adam""" +951 6 training_loop """lcwa""" +951 6 evaluator """rankbased""" +951 7 dataset """fb15k237""" +951 7 model """tucker""" +951 7 loss """bceaftersigmoid""" +951 7 regularizer """no""" +951 7 optimizer """adam""" +951 7 training_loop """lcwa""" +951 7 evaluator """rankbased""" +951 8 dataset """fb15k237""" +951 8 model """tucker""" +951 8 loss """bceaftersigmoid""" +951 8 regularizer """no""" +951 8 optimizer """adam""" +951 8 training_loop """lcwa""" +951 8 evaluator """rankbased""" +951 9 dataset """fb15k237""" +951 9 model """tucker""" +951 9 loss """bceaftersigmoid""" +951 9 regularizer """no""" +951 9 optimizer """adam""" +951 9 training_loop """lcwa""" +951 9 evaluator """rankbased""" +951 10 dataset """fb15k237""" +951 10 model """tucker""" +951 10 loss """bceaftersigmoid""" +951 10 regularizer """no""" +951 10 optimizer """adam""" +951 10 training_loop """lcwa""" +951 10 evaluator """rankbased""" +951 11 dataset """fb15k237""" +951 11 model """tucker""" +951 11 loss """bceaftersigmoid""" +951 11 regularizer """no""" +951 11 optimizer """adam""" +951 11 training_loop """lcwa""" +951 11 evaluator """rankbased""" +951 12 dataset """fb15k237""" +951 12 model """tucker""" +951 12 loss """bceaftersigmoid""" +951 12 regularizer """no""" +951 12 optimizer """adam""" +951 12 training_loop """lcwa""" +951 12 evaluator """rankbased""" +951 13 dataset """fb15k237""" +951 13 model """tucker""" +951 13 loss """bceaftersigmoid""" +951 13 regularizer """no""" +951 13 optimizer """adam""" +951 13 training_loop """lcwa""" +951 13 evaluator """rankbased""" +952 1 model.embedding_dim 2.0 +952 1 model.relation_dim 1.0 +952 1 model.dropout_0 0.42462068962663035 +952 1 model.dropout_1 0.30164109279043716 +952 1 model.dropout_2 0.22977332719492996 +952 1 optimizer.lr 0.009818322010801546 +952 1 training.batch_size 1.0 +952 1 training.label_smoothing 0.07174186150633557 +952 2 model.embedding_dim 2.0 +952 2 model.relation_dim 1.0 +952 2 model.dropout_0 0.2007851570485186 +952 2 model.dropout_1 0.22183033112656642 +952 2 model.dropout_2 0.2889138549587073 +952 2 optimizer.lr 0.0838212259377981 +952 2 training.batch_size 0.0 +952 2 training.label_smoothing 0.031462351049419926 +952 3 model.embedding_dim 1.0 +952 3 model.relation_dim 2.0 +952 3 model.dropout_0 0.25829942549658264 +952 3 model.dropout_1 0.28917766486656377 +952 3 model.dropout_2 0.36213570720219246 +952 3 optimizer.lr 0.053924319370275635 +952 3 training.batch_size 2.0 +952 3 training.label_smoothing 0.003538890665761105 +952 4 model.embedding_dim 2.0 +952 4 model.relation_dim 1.0 +952 4 model.dropout_0 0.1280352009900712 +952 4 model.dropout_1 0.4687908338750095 +952 4 model.dropout_2 0.4659546422355944 +952 4 optimizer.lr 0.009551309638733426 +952 4 training.batch_size 0.0 +952 4 training.label_smoothing 0.005089359158341599 +952 5 model.embedding_dim 2.0 +952 5 model.relation_dim 0.0 +952 5 model.dropout_0 0.34667609342014855 +952 5 model.dropout_1 0.3549070519395987 +952 5 model.dropout_2 0.15977021113577675 +952 5 optimizer.lr 0.0011859462717282397 +952 5 training.batch_size 2.0 +952 5 training.label_smoothing 0.01884696728005757 +952 6 model.embedding_dim 1.0 +952 6 model.relation_dim 2.0 +952 6 model.dropout_0 0.4298812530407421 +952 6 model.dropout_1 0.4638742003401284 +952 6 model.dropout_2 0.42049645502765265 +952 6 optimizer.lr 0.07908033852952019 +952 6 training.batch_size 1.0 +952 6 training.label_smoothing 0.7989818242754617 +952 7 model.embedding_dim 1.0 +952 7 model.relation_dim 1.0 +952 7 model.dropout_0 0.12033609560797186 +952 7 model.dropout_1 0.4527496101456687 +952 7 model.dropout_2 0.2985068127906481 +952 7 optimizer.lr 0.03597157352415 +952 7 training.batch_size 0.0 +952 7 training.label_smoothing 0.025415614282182843 +952 1 dataset """fb15k237""" +952 1 model """tucker""" +952 1 loss """softplus""" +952 1 regularizer """no""" +952 1 optimizer """adam""" +952 1 training_loop """lcwa""" +952 1 evaluator """rankbased""" +952 2 dataset """fb15k237""" +952 2 model """tucker""" +952 2 loss """softplus""" +952 2 regularizer """no""" +952 2 optimizer """adam""" +952 2 training_loop """lcwa""" +952 2 evaluator """rankbased""" +952 3 dataset """fb15k237""" +952 3 model """tucker""" +952 3 loss """softplus""" +952 3 regularizer """no""" +952 3 optimizer """adam""" +952 3 training_loop """lcwa""" +952 3 evaluator """rankbased""" +952 4 dataset """fb15k237""" +952 4 model """tucker""" +952 4 loss """softplus""" +952 4 regularizer """no""" +952 4 optimizer """adam""" +952 4 training_loop """lcwa""" +952 4 evaluator """rankbased""" +952 5 dataset """fb15k237""" +952 5 model """tucker""" +952 5 loss """softplus""" +952 5 regularizer """no""" +952 5 optimizer """adam""" +952 5 training_loop """lcwa""" +952 5 evaluator """rankbased""" +952 6 dataset """fb15k237""" +952 6 model """tucker""" +952 6 loss """softplus""" +952 6 regularizer """no""" +952 6 optimizer """adam""" +952 6 training_loop """lcwa""" +952 6 evaluator """rankbased""" +952 7 dataset """fb15k237""" +952 7 model """tucker""" +952 7 loss """softplus""" +952 7 regularizer """no""" +952 7 optimizer """adam""" +952 7 training_loop """lcwa""" +952 7 evaluator """rankbased""" +953 1 model.embedding_dim 2.0 +953 1 model.relation_dim 2.0 +953 1 model.dropout_0 0.49031918570129385 +953 1 model.dropout_1 0.31507102133857834 +953 1 model.dropout_2 0.21497248057630444 +953 1 optimizer.lr 0.002627188860155492 +953 1 training.batch_size 2.0 +953 1 training.label_smoothing 0.0074496521171778995 +953 2 model.embedding_dim 0.0 +953 2 model.relation_dim 0.0 +953 2 model.dropout_0 0.3437619346457589 +953 2 model.dropout_1 0.22683742746395003 +953 2 model.dropout_2 0.10494592635110177 +953 2 optimizer.lr 0.0025777842722737084 +953 2 training.batch_size 0.0 +953 2 training.label_smoothing 0.004784733683297777 +953 3 model.embedding_dim 2.0 +953 3 model.relation_dim 0.0 +953 3 model.dropout_0 0.12962842630794022 +953 3 model.dropout_1 0.44865618505468596 +953 3 model.dropout_2 0.44469178852900343 +953 3 optimizer.lr 0.029790288632668235 +953 3 training.batch_size 1.0 +953 3 training.label_smoothing 0.0019170815441749063 +953 4 model.embedding_dim 1.0 +953 4 model.relation_dim 2.0 +953 4 model.dropout_0 0.37724173820371737 +953 4 model.dropout_1 0.4571229691619938 +953 4 model.dropout_2 0.22786871752004548 +953 4 optimizer.lr 0.003163421231854003 +953 4 training.batch_size 2.0 +953 4 training.label_smoothing 0.031751931425097364 +953 5 model.embedding_dim 2.0 +953 5 model.relation_dim 2.0 +953 5 model.dropout_0 0.3832992290120183 +953 5 model.dropout_1 0.21400065304675409 +953 5 model.dropout_2 0.20959573438531265 +953 5 optimizer.lr 0.0010773276016603401 +953 5 training.batch_size 1.0 +953 5 training.label_smoothing 0.041264944467758274 +953 6 model.embedding_dim 2.0 +953 6 model.relation_dim 2.0 +953 6 model.dropout_0 0.42538883483165435 +953 6 model.dropout_1 0.43907560740104445 +953 6 model.dropout_2 0.30276058720222937 +953 6 optimizer.lr 0.0026430377381767077 +953 6 training.batch_size 1.0 +953 6 training.label_smoothing 0.07665867277718132 +953 7 model.embedding_dim 0.0 +953 7 model.relation_dim 1.0 +953 7 model.dropout_0 0.21477495008744177 +953 7 model.dropout_1 0.15610968008955597 +953 7 model.dropout_2 0.2615742986889066 +953 7 optimizer.lr 0.07419039706157618 +953 7 training.batch_size 0.0 +953 7 training.label_smoothing 0.0010589964085520487 +953 8 model.embedding_dim 1.0 +953 8 model.relation_dim 2.0 +953 8 model.dropout_0 0.1408306718492107 +953 8 model.dropout_1 0.19892963343931347 +953 8 model.dropout_2 0.38032263015968276 +953 8 optimizer.lr 0.0030967250495968902 +953 8 training.batch_size 0.0 +953 8 training.label_smoothing 0.13432544075591904 +953 9 model.embedding_dim 1.0 +953 9 model.relation_dim 0.0 +953 9 model.dropout_0 0.3482332254964241 +953 9 model.dropout_1 0.2928031971957148 +953 9 model.dropout_2 0.25224313559521866 +953 9 optimizer.lr 0.0021270420038575636 +953 9 training.batch_size 0.0 +953 9 training.label_smoothing 0.7315262974893103 +953 10 model.embedding_dim 0.0 +953 10 model.relation_dim 2.0 +953 10 model.dropout_0 0.40701565632358955 +953 10 model.dropout_1 0.20097248952695723 +953 10 model.dropout_2 0.4541173351426637 +953 10 optimizer.lr 0.029661751168856908 +953 10 training.batch_size 1.0 +953 10 training.label_smoothing 0.01663082723744935 +953 11 model.embedding_dim 2.0 +953 11 model.relation_dim 2.0 +953 11 model.dropout_0 0.28982264809668606 +953 11 model.dropout_1 0.4821515986646054 +953 11 model.dropout_2 0.18200065658283254 +953 11 optimizer.lr 0.02084124189701152 +953 11 training.batch_size 2.0 +953 11 training.label_smoothing 0.04448311399701303 +953 12 model.embedding_dim 2.0 +953 12 model.relation_dim 0.0 +953 12 model.dropout_0 0.3195839192032679 +953 12 model.dropout_1 0.44786789104422037 +953 12 model.dropout_2 0.1802196248331324 +953 12 optimizer.lr 0.06093966508420283 +953 12 training.batch_size 2.0 +953 12 training.label_smoothing 0.048948319140665164 +953 1 dataset """fb15k237""" +953 1 model """tucker""" +953 1 loss """bceaftersigmoid""" +953 1 regularizer """no""" +953 1 optimizer """adam""" +953 1 training_loop """lcwa""" +953 1 evaluator """rankbased""" +953 2 dataset """fb15k237""" +953 2 model """tucker""" +953 2 loss """bceaftersigmoid""" +953 2 regularizer """no""" +953 2 optimizer """adam""" +953 2 training_loop """lcwa""" +953 2 evaluator """rankbased""" +953 3 dataset """fb15k237""" +953 3 model """tucker""" +953 3 loss """bceaftersigmoid""" +953 3 regularizer """no""" +953 3 optimizer """adam""" +953 3 training_loop """lcwa""" +953 3 evaluator """rankbased""" +953 4 dataset """fb15k237""" +953 4 model """tucker""" +953 4 loss """bceaftersigmoid""" +953 4 regularizer """no""" +953 4 optimizer """adam""" +953 4 training_loop """lcwa""" +953 4 evaluator """rankbased""" +953 5 dataset """fb15k237""" +953 5 model """tucker""" +953 5 loss """bceaftersigmoid""" +953 5 regularizer """no""" +953 5 optimizer """adam""" +953 5 training_loop """lcwa""" +953 5 evaluator """rankbased""" +953 6 dataset """fb15k237""" +953 6 model """tucker""" +953 6 loss """bceaftersigmoid""" +953 6 regularizer """no""" +953 6 optimizer """adam""" +953 6 training_loop """lcwa""" +953 6 evaluator """rankbased""" +953 7 dataset """fb15k237""" +953 7 model """tucker""" +953 7 loss """bceaftersigmoid""" +953 7 regularizer """no""" +953 7 optimizer """adam""" +953 7 training_loop """lcwa""" +953 7 evaluator """rankbased""" +953 8 dataset """fb15k237""" +953 8 model """tucker""" +953 8 loss """bceaftersigmoid""" +953 8 regularizer """no""" +953 8 optimizer """adam""" +953 8 training_loop """lcwa""" +953 8 evaluator """rankbased""" +953 9 dataset """fb15k237""" +953 9 model """tucker""" +953 9 loss """bceaftersigmoid""" +953 9 regularizer """no""" +953 9 optimizer """adam""" +953 9 training_loop """lcwa""" +953 9 evaluator """rankbased""" +953 10 dataset """fb15k237""" +953 10 model """tucker""" +953 10 loss """bceaftersigmoid""" +953 10 regularizer """no""" +953 10 optimizer """adam""" +953 10 training_loop """lcwa""" +953 10 evaluator """rankbased""" +953 11 dataset """fb15k237""" +953 11 model """tucker""" +953 11 loss """bceaftersigmoid""" +953 11 regularizer """no""" +953 11 optimizer """adam""" +953 11 training_loop """lcwa""" +953 11 evaluator """rankbased""" +953 12 dataset """fb15k237""" +953 12 model """tucker""" +953 12 loss """bceaftersigmoid""" +953 12 regularizer """no""" +953 12 optimizer """adam""" +953 12 training_loop """lcwa""" +953 12 evaluator """rankbased""" +954 1 model.embedding_dim 2.0 +954 1 model.relation_dim 1.0 +954 1 model.dropout_0 0.3963995017781951 +954 1 model.dropout_1 0.29221266424365927 +954 1 model.dropout_2 0.45801951772828564 +954 1 optimizer.lr 0.040004016645862964 +954 1 training.batch_size 2.0 +954 1 training.label_smoothing 0.0012589556975089997 +954 2 model.embedding_dim 0.0 +954 2 model.relation_dim 2.0 +954 2 model.dropout_0 0.12640677824906682 +954 2 model.dropout_1 0.3269446257690828 +954 2 model.dropout_2 0.4490281927009162 +954 2 optimizer.lr 0.026953194456285384 +954 2 training.batch_size 2.0 +954 2 training.label_smoothing 0.005407039058663499 +954 3 model.embedding_dim 2.0 +954 3 model.relation_dim 0.0 +954 3 model.dropout_0 0.27362172157616266 +954 3 model.dropout_1 0.46960618109344604 +954 3 model.dropout_2 0.14897307495553194 +954 3 optimizer.lr 0.011899134671700673 +954 3 training.batch_size 0.0 +954 3 training.label_smoothing 0.9268655974336975 +954 4 model.embedding_dim 2.0 +954 4 model.relation_dim 1.0 +954 4 model.dropout_0 0.4007614514143024 +954 4 model.dropout_1 0.36840705253472383 +954 4 model.dropout_2 0.4282778032166609 +954 4 optimizer.lr 0.0011580785971420037 +954 4 training.batch_size 2.0 +954 4 training.label_smoothing 0.10214058693122036 +954 5 model.embedding_dim 1.0 +954 5 model.relation_dim 0.0 +954 5 model.dropout_0 0.3193099554752642 +954 5 model.dropout_1 0.26200134798532065 +954 5 model.dropout_2 0.35247656524957793 +954 5 optimizer.lr 0.006857435582226712 +954 5 training.batch_size 2.0 +954 5 training.label_smoothing 0.04562029341291902 +954 6 model.embedding_dim 1.0 +954 6 model.relation_dim 1.0 +954 6 model.dropout_0 0.2885935415282017 +954 6 model.dropout_1 0.35950116372743685 +954 6 model.dropout_2 0.19609229496654976 +954 6 optimizer.lr 0.06370169560510788 +954 6 training.batch_size 2.0 +954 6 training.label_smoothing 0.002950026040806602 +954 7 model.embedding_dim 2.0 +954 7 model.relation_dim 0.0 +954 7 model.dropout_0 0.38814955005018553 +954 7 model.dropout_1 0.40111703581762226 +954 7 model.dropout_2 0.1767498016064868 +954 7 optimizer.lr 0.02597065341227154 +954 7 training.batch_size 0.0 +954 7 training.label_smoothing 0.0015961785671855032 +954 8 model.embedding_dim 1.0 +954 8 model.relation_dim 0.0 +954 8 model.dropout_0 0.30962104079400277 +954 8 model.dropout_1 0.4512096253719955 +954 8 model.dropout_2 0.28462910500996397 +954 8 optimizer.lr 0.005242231350504981 +954 8 training.batch_size 1.0 +954 8 training.label_smoothing 0.4276685910781151 +954 9 model.embedding_dim 1.0 +954 9 model.relation_dim 0.0 +954 9 model.dropout_0 0.39897639940087104 +954 9 model.dropout_1 0.36307242690559677 +954 9 model.dropout_2 0.3032144398224158 +954 9 optimizer.lr 0.016858673881971865 +954 9 training.batch_size 1.0 +954 9 training.label_smoothing 0.0014886431270025105 +954 10 model.embedding_dim 2.0 +954 10 model.relation_dim 2.0 +954 10 model.dropout_0 0.3809962098822568 +954 10 model.dropout_1 0.43853564430183833 +954 10 model.dropout_2 0.20402488159329893 +954 10 optimizer.lr 0.0648937906977762 +954 10 training.batch_size 0.0 +954 10 training.label_smoothing 0.12525521074438992 +954 11 model.embedding_dim 2.0 +954 11 model.relation_dim 1.0 +954 11 model.dropout_0 0.40177258007278377 +954 11 model.dropout_1 0.27222090327326376 +954 11 model.dropout_2 0.25251412056885475 +954 11 optimizer.lr 0.07538280900426816 +954 11 training.batch_size 2.0 +954 11 training.label_smoothing 0.883992812199666 +954 12 model.embedding_dim 2.0 +954 12 model.relation_dim 2.0 +954 12 model.dropout_0 0.39141222332281056 +954 12 model.dropout_1 0.1930361879125264 +954 12 model.dropout_2 0.22799450575445307 +954 12 optimizer.lr 0.00453308378496187 +954 12 training.batch_size 1.0 +954 12 training.label_smoothing 0.36639388391125194 +954 1 dataset """fb15k237""" +954 1 model """tucker""" +954 1 loss """softplus""" +954 1 regularizer """no""" +954 1 optimizer """adam""" +954 1 training_loop """lcwa""" +954 1 evaluator """rankbased""" +954 2 dataset """fb15k237""" +954 2 model """tucker""" +954 2 loss """softplus""" +954 2 regularizer """no""" +954 2 optimizer """adam""" +954 2 training_loop """lcwa""" +954 2 evaluator """rankbased""" +954 3 dataset """fb15k237""" +954 3 model """tucker""" +954 3 loss """softplus""" +954 3 regularizer """no""" +954 3 optimizer """adam""" +954 3 training_loop """lcwa""" +954 3 evaluator """rankbased""" +954 4 dataset """fb15k237""" +954 4 model """tucker""" +954 4 loss """softplus""" +954 4 regularizer """no""" +954 4 optimizer """adam""" +954 4 training_loop """lcwa""" +954 4 evaluator """rankbased""" +954 5 dataset """fb15k237""" +954 5 model """tucker""" +954 5 loss """softplus""" +954 5 regularizer """no""" +954 5 optimizer """adam""" +954 5 training_loop """lcwa""" +954 5 evaluator """rankbased""" +954 6 dataset """fb15k237""" +954 6 model """tucker""" +954 6 loss """softplus""" +954 6 regularizer """no""" +954 6 optimizer """adam""" +954 6 training_loop """lcwa""" +954 6 evaluator """rankbased""" +954 7 dataset """fb15k237""" +954 7 model """tucker""" +954 7 loss """softplus""" +954 7 regularizer """no""" +954 7 optimizer """adam""" +954 7 training_loop """lcwa""" +954 7 evaluator """rankbased""" +954 8 dataset """fb15k237""" +954 8 model """tucker""" +954 8 loss """softplus""" +954 8 regularizer """no""" +954 8 optimizer """adam""" +954 8 training_loop """lcwa""" +954 8 evaluator """rankbased""" +954 9 dataset """fb15k237""" +954 9 model """tucker""" +954 9 loss """softplus""" +954 9 regularizer """no""" +954 9 optimizer """adam""" +954 9 training_loop """lcwa""" +954 9 evaluator """rankbased""" +954 10 dataset """fb15k237""" +954 10 model """tucker""" +954 10 loss """softplus""" +954 10 regularizer """no""" +954 10 optimizer """adam""" +954 10 training_loop """lcwa""" +954 10 evaluator """rankbased""" +954 11 dataset """fb15k237""" +954 11 model """tucker""" +954 11 loss """softplus""" +954 11 regularizer """no""" +954 11 optimizer """adam""" +954 11 training_loop """lcwa""" +954 11 evaluator """rankbased""" +954 12 dataset """fb15k237""" +954 12 model """tucker""" +954 12 loss """softplus""" +954 12 regularizer """no""" +954 12 optimizer """adam""" +954 12 training_loop """lcwa""" +954 12 evaluator """rankbased""" +955 1 model.embedding_dim 0.0 +955 1 model.relation_dim 2.0 +955 1 model.dropout_0 0.23151775547915054 +955 1 model.dropout_1 0.2054525196487582 +955 1 model.dropout_2 0.2909088206417615 +955 1 training.batch_size 2.0 +955 1 training.label_smoothing 0.014201391591237388 +955 2 model.embedding_dim 2.0 +955 2 model.relation_dim 1.0 +955 2 model.dropout_0 0.47914738601960605 +955 2 model.dropout_1 0.36708345093766825 +955 2 model.dropout_2 0.4222409186439787 +955 2 training.batch_size 2.0 +955 2 training.label_smoothing 0.0055083829302072125 +955 3 model.embedding_dim 0.0 +955 3 model.relation_dim 2.0 +955 3 model.dropout_0 0.24938798490978198 +955 3 model.dropout_1 0.48366684189235376 +955 3 model.dropout_2 0.18592570383955512 +955 3 training.batch_size 1.0 +955 3 training.label_smoothing 0.1608603572161404 +955 4 model.embedding_dim 1.0 +955 4 model.relation_dim 2.0 +955 4 model.dropout_0 0.49266862359809815 +955 4 model.dropout_1 0.4840487613514294 +955 4 model.dropout_2 0.1654683875958048 +955 4 training.batch_size 1.0 +955 4 training.label_smoothing 0.0021410203253265517 +955 5 model.embedding_dim 1.0 +955 5 model.relation_dim 2.0 +955 5 model.dropout_0 0.4578030173178357 +955 5 model.dropout_1 0.4508024763296792 +955 5 model.dropout_2 0.1499411818898133 +955 5 training.batch_size 0.0 +955 5 training.label_smoothing 0.007454310557845857 +955 6 model.embedding_dim 2.0 +955 6 model.relation_dim 0.0 +955 6 model.dropout_0 0.1907306624896745 +955 6 model.dropout_1 0.37141281702349505 +955 6 model.dropout_2 0.2670113768451121 +955 6 training.batch_size 2.0 +955 6 training.label_smoothing 0.08429212683321681 +955 7 model.embedding_dim 1.0 +955 7 model.relation_dim 1.0 +955 7 model.dropout_0 0.2521228790991291 +955 7 model.dropout_1 0.30954506694779294 +955 7 model.dropout_2 0.3224973679012634 +955 7 training.batch_size 0.0 +955 7 training.label_smoothing 0.7864184578091149 +955 8 model.embedding_dim 1.0 +955 8 model.relation_dim 2.0 +955 8 model.dropout_0 0.3222259155020536 +955 8 model.dropout_1 0.36339675113073466 +955 8 model.dropout_2 0.4922589642761174 +955 8 training.batch_size 1.0 +955 8 training.label_smoothing 0.0038869910745622234 +955 9 model.embedding_dim 0.0 +955 9 model.relation_dim 1.0 +955 9 model.dropout_0 0.23287907920836082 +955 9 model.dropout_1 0.22119115852260715 +955 9 model.dropout_2 0.20330284542636623 +955 9 training.batch_size 0.0 +955 9 training.label_smoothing 0.05600457686334759 +955 10 model.embedding_dim 0.0 +955 10 model.relation_dim 2.0 +955 10 model.dropout_0 0.3044960532153963 +955 10 model.dropout_1 0.2085114572719126 +955 10 model.dropout_2 0.2036306478550346 +955 10 training.batch_size 0.0 +955 10 training.label_smoothing 0.0589880139370866 +955 11 model.embedding_dim 2.0 +955 11 model.relation_dim 0.0 +955 11 model.dropout_0 0.1743892829362323 +955 11 model.dropout_1 0.20246247042401067 +955 11 model.dropout_2 0.3506642098944702 +955 11 training.batch_size 1.0 +955 11 training.label_smoothing 0.19434307056937866 +955 12 model.embedding_dim 2.0 +955 12 model.relation_dim 0.0 +955 12 model.dropout_0 0.13251474167133237 +955 12 model.dropout_1 0.3459711217915733 +955 12 model.dropout_2 0.2621607106848953 +955 12 training.batch_size 0.0 +955 12 training.label_smoothing 0.01692515084520253 +955 13 model.embedding_dim 0.0 +955 13 model.relation_dim 2.0 +955 13 model.dropout_0 0.470982810456792 +955 13 model.dropout_1 0.12540422182033586 +955 13 model.dropout_2 0.11120073127385806 +955 13 training.batch_size 2.0 +955 13 training.label_smoothing 0.1351884434390185 +955 14 model.embedding_dim 0.0 +955 14 model.relation_dim 2.0 +955 14 model.dropout_0 0.25958325000096305 +955 14 model.dropout_1 0.3887555534363326 +955 14 model.dropout_2 0.16714173125443987 +955 14 training.batch_size 1.0 +955 14 training.label_smoothing 0.7598045637263294 +955 15 model.embedding_dim 2.0 +955 15 model.relation_dim 0.0 +955 15 model.dropout_0 0.20471994060156976 +955 15 model.dropout_1 0.2589130784289827 +955 15 model.dropout_2 0.4430629560034366 +955 15 training.batch_size 1.0 +955 15 training.label_smoothing 0.0012083107442587135 +955 16 model.embedding_dim 0.0 +955 16 model.relation_dim 1.0 +955 16 model.dropout_0 0.4344301845789895 +955 16 model.dropout_1 0.2979193577061295 +955 16 model.dropout_2 0.13153058891162672 +955 16 training.batch_size 0.0 +955 16 training.label_smoothing 0.08762292424167391 +955 17 model.embedding_dim 0.0 +955 17 model.relation_dim 1.0 +955 17 model.dropout_0 0.10653150139843524 +955 17 model.dropout_1 0.44127566574089017 +955 17 model.dropout_2 0.14743538718881188 +955 17 training.batch_size 1.0 +955 17 training.label_smoothing 0.21084856261182228 +955 18 model.embedding_dim 2.0 +955 18 model.relation_dim 0.0 +955 18 model.dropout_0 0.3576055445294939 +955 18 model.dropout_1 0.10582535070691144 +955 18 model.dropout_2 0.47234427955476344 +955 18 training.batch_size 1.0 +955 18 training.label_smoothing 0.0465671288456339 +955 19 model.embedding_dim 2.0 +955 19 model.relation_dim 2.0 +955 19 model.dropout_0 0.3026841398668866 +955 19 model.dropout_1 0.31770812239205004 +955 19 model.dropout_2 0.2824097877827721 +955 19 training.batch_size 2.0 +955 19 training.label_smoothing 0.022489618226617877 +955 20 model.embedding_dim 2.0 +955 20 model.relation_dim 1.0 +955 20 model.dropout_0 0.48480166365605315 +955 20 model.dropout_1 0.3850752651309802 +955 20 model.dropout_2 0.35401485928066645 +955 20 training.batch_size 2.0 +955 20 training.label_smoothing 0.002295175021451091 +955 21 model.embedding_dim 2.0 +955 21 model.relation_dim 1.0 +955 21 model.dropout_0 0.24036452369830297 +955 21 model.dropout_1 0.453750267326289 +955 21 model.dropout_2 0.1992393339947563 +955 21 training.batch_size 1.0 +955 21 training.label_smoothing 0.5373984903635786 +955 22 model.embedding_dim 0.0 +955 22 model.relation_dim 2.0 +955 22 model.dropout_0 0.4054728062941221 +955 22 model.dropout_1 0.11106634437630643 +955 22 model.dropout_2 0.43889195871726905 +955 22 training.batch_size 2.0 +955 22 training.label_smoothing 0.00582161508594685 +955 23 model.embedding_dim 0.0 +955 23 model.relation_dim 1.0 +955 23 model.dropout_0 0.41748562192239513 +955 23 model.dropout_1 0.17714201364540597 +955 23 model.dropout_2 0.44262758140179326 +955 23 training.batch_size 1.0 +955 23 training.label_smoothing 0.4957936470440146 +955 24 model.embedding_dim 1.0 +955 24 model.relation_dim 0.0 +955 24 model.dropout_0 0.42504093023094724 +955 24 model.dropout_1 0.2924254955900507 +955 24 model.dropout_2 0.15053522696268595 +955 24 training.batch_size 1.0 +955 24 training.label_smoothing 0.03530400264452416 +955 25 model.embedding_dim 1.0 +955 25 model.relation_dim 0.0 +955 25 model.dropout_0 0.4064609798126744 +955 25 model.dropout_1 0.4716622386086341 +955 25 model.dropout_2 0.27077283889349074 +955 25 training.batch_size 0.0 +955 25 training.label_smoothing 0.43110441747524775 +955 26 model.embedding_dim 0.0 +955 26 model.relation_dim 0.0 +955 26 model.dropout_0 0.38694480647063545 +955 26 model.dropout_1 0.46827811269484876 +955 26 model.dropout_2 0.44387946325628835 +955 26 training.batch_size 0.0 +955 26 training.label_smoothing 0.11260986944623408 +955 27 model.embedding_dim 0.0 +955 27 model.relation_dim 1.0 +955 27 model.dropout_0 0.3101744204054848 +955 27 model.dropout_1 0.12333966017319785 +955 27 model.dropout_2 0.26655582776882314 +955 27 training.batch_size 2.0 +955 27 training.label_smoothing 0.04689818213355417 +955 28 model.embedding_dim 0.0 +955 28 model.relation_dim 1.0 +955 28 model.dropout_0 0.14354042559341473 +955 28 model.dropout_1 0.2644646315304413 +955 28 model.dropout_2 0.49249213817907833 +955 28 training.batch_size 2.0 +955 28 training.label_smoothing 0.002062713746180122 +955 29 model.embedding_dim 0.0 +955 29 model.relation_dim 1.0 +955 29 model.dropout_0 0.47364371052652565 +955 29 model.dropout_1 0.2597436064897163 +955 29 model.dropout_2 0.4131601477279546 +955 29 training.batch_size 0.0 +955 29 training.label_smoothing 0.006198028189968725 +955 30 model.embedding_dim 2.0 +955 30 model.relation_dim 1.0 +955 30 model.dropout_0 0.1013376880651137 +955 30 model.dropout_1 0.4120897368637213 +955 30 model.dropout_2 0.2856525931634484 +955 30 training.batch_size 1.0 +955 30 training.label_smoothing 0.003452480649746706 +955 31 model.embedding_dim 1.0 +955 31 model.relation_dim 2.0 +955 31 model.dropout_0 0.4148087796645251 +955 31 model.dropout_1 0.343215695244821 +955 31 model.dropout_2 0.26100753078619376 +955 31 training.batch_size 1.0 +955 31 training.label_smoothing 0.2110961080026903 +955 32 model.embedding_dim 0.0 +955 32 model.relation_dim 0.0 +955 32 model.dropout_0 0.27849410637106486 +955 32 model.dropout_1 0.42864573932468675 +955 32 model.dropout_2 0.2867703156099973 +955 32 training.batch_size 0.0 +955 32 training.label_smoothing 0.058264814257104794 +955 33 model.embedding_dim 2.0 +955 33 model.relation_dim 2.0 +955 33 model.dropout_0 0.4859854827554039 +955 33 model.dropout_1 0.4975457522205122 +955 33 model.dropout_2 0.48547042719684075 +955 33 training.batch_size 0.0 +955 33 training.label_smoothing 0.7994279902732645 +955 34 model.embedding_dim 1.0 +955 34 model.relation_dim 1.0 +955 34 model.dropout_0 0.15660125393946633 +955 34 model.dropout_1 0.3712708414604202 +955 34 model.dropout_2 0.2305089655454351 +955 34 training.batch_size 2.0 +955 34 training.label_smoothing 0.003262007082776291 +955 35 model.embedding_dim 2.0 +955 35 model.relation_dim 0.0 +955 35 model.dropout_0 0.393307784496845 +955 35 model.dropout_1 0.3173577207791666 +955 35 model.dropout_2 0.221403662689259 +955 35 training.batch_size 0.0 +955 35 training.label_smoothing 0.4355389526312254 +955 36 model.embedding_dim 1.0 +955 36 model.relation_dim 0.0 +955 36 model.dropout_0 0.345769868879241 +955 36 model.dropout_1 0.4390646758719863 +955 36 model.dropout_2 0.4782961185325263 +955 36 training.batch_size 1.0 +955 36 training.label_smoothing 0.18057938827737383 +955 37 model.embedding_dim 2.0 +955 37 model.relation_dim 1.0 +955 37 model.dropout_0 0.1866271987516969 +955 37 model.dropout_1 0.3726524718361981 +955 37 model.dropout_2 0.3074844721695066 +955 37 training.batch_size 0.0 +955 37 training.label_smoothing 0.13063410672925188 +955 38 model.embedding_dim 1.0 +955 38 model.relation_dim 1.0 +955 38 model.dropout_0 0.18544103595200123 +955 38 model.dropout_1 0.3719355996590409 +955 38 model.dropout_2 0.4968832886384197 +955 38 training.batch_size 1.0 +955 38 training.label_smoothing 0.004989044412653052 +955 39 model.embedding_dim 2.0 +955 39 model.relation_dim 1.0 +955 39 model.dropout_0 0.48034807086617515 +955 39 model.dropout_1 0.1626802042987876 +955 39 model.dropout_2 0.27184826620147184 +955 39 training.batch_size 1.0 +955 39 training.label_smoothing 0.4797168488546701 +955 40 model.embedding_dim 1.0 +955 40 model.relation_dim 0.0 +955 40 model.dropout_0 0.10487939175974642 +955 40 model.dropout_1 0.12992049993625254 +955 40 model.dropout_2 0.3008331506535753 +955 40 training.batch_size 1.0 +955 40 training.label_smoothing 0.002224540043764614 +955 41 model.embedding_dim 0.0 +955 41 model.relation_dim 2.0 +955 41 model.dropout_0 0.4897799757428837 +955 41 model.dropout_1 0.2517461845071157 +955 41 model.dropout_2 0.38824408563476853 +955 41 training.batch_size 2.0 +955 41 training.label_smoothing 0.19830718450572554 +955 42 model.embedding_dim 2.0 +955 42 model.relation_dim 0.0 +955 42 model.dropout_0 0.23093295229218144 +955 42 model.dropout_1 0.2323635002095836 +955 42 model.dropout_2 0.14202090941187306 +955 42 training.batch_size 1.0 +955 42 training.label_smoothing 0.0354314052179131 +955 43 model.embedding_dim 2.0 +955 43 model.relation_dim 2.0 +955 43 model.dropout_0 0.4961877633395837 +955 43 model.dropout_1 0.17969462856679722 +955 43 model.dropout_2 0.4197508140644809 +955 43 training.batch_size 0.0 +955 43 training.label_smoothing 0.006308502126139492 +955 44 model.embedding_dim 0.0 +955 44 model.relation_dim 0.0 +955 44 model.dropout_0 0.23250587555206592 +955 44 model.dropout_1 0.2634642100328837 +955 44 model.dropout_2 0.4153549087037167 +955 44 training.batch_size 1.0 +955 44 training.label_smoothing 0.215407675605117 +955 45 model.embedding_dim 1.0 +955 45 model.relation_dim 2.0 +955 45 model.dropout_0 0.26500124495426625 +955 45 model.dropout_1 0.3897585792548328 +955 45 model.dropout_2 0.4306201823243304 +955 45 training.batch_size 2.0 +955 45 training.label_smoothing 0.13617920676913542 +955 46 model.embedding_dim 2.0 +955 46 model.relation_dim 1.0 +955 46 model.dropout_0 0.18722184718152668 +955 46 model.dropout_1 0.3379638772539698 +955 46 model.dropout_2 0.32801192069021257 +955 46 training.batch_size 2.0 +955 46 training.label_smoothing 0.09542747429599505 +955 47 model.embedding_dim 1.0 +955 47 model.relation_dim 0.0 +955 47 model.dropout_0 0.11990801384760826 +955 47 model.dropout_1 0.24635465941901868 +955 47 model.dropout_2 0.32463846295270227 +955 47 training.batch_size 0.0 +955 47 training.label_smoothing 0.001806339700505887 +955 48 model.embedding_dim 2.0 +955 48 model.relation_dim 0.0 +955 48 model.dropout_0 0.2251169897065064 +955 48 model.dropout_1 0.4617364577643155 +955 48 model.dropout_2 0.31245305399748036 +955 48 training.batch_size 0.0 +955 48 training.label_smoothing 0.002182294454937967 +955 49 model.embedding_dim 0.0 +955 49 model.relation_dim 0.0 +955 49 model.dropout_0 0.368612405014323 +955 49 model.dropout_1 0.3824039472124898 +955 49 model.dropout_2 0.43474370895185377 +955 49 training.batch_size 1.0 +955 49 training.label_smoothing 0.40061052597038377 +955 50 model.embedding_dim 1.0 +955 50 model.relation_dim 0.0 +955 50 model.dropout_0 0.15355676836674417 +955 50 model.dropout_1 0.3627537368009083 +955 50 model.dropout_2 0.12463392023284126 +955 50 training.batch_size 0.0 +955 50 training.label_smoothing 0.4427371055064093 +955 51 model.embedding_dim 2.0 +955 51 model.relation_dim 0.0 +955 51 model.dropout_0 0.22765654830245446 +955 51 model.dropout_1 0.11825732091006787 +955 51 model.dropout_2 0.25703781359275163 +955 51 training.batch_size 2.0 +955 51 training.label_smoothing 0.22622515727178097 +955 52 model.embedding_dim 0.0 +955 52 model.relation_dim 2.0 +955 52 model.dropout_0 0.20274933320880476 +955 52 model.dropout_1 0.29836834481888364 +955 52 model.dropout_2 0.4803293555828654 +955 52 training.batch_size 2.0 +955 52 training.label_smoothing 0.1472237305344458 +955 53 model.embedding_dim 0.0 +955 53 model.relation_dim 0.0 +955 53 model.dropout_0 0.3949907878994948 +955 53 model.dropout_1 0.44903733864812 +955 53 model.dropout_2 0.24178633217141798 +955 53 training.batch_size 1.0 +955 53 training.label_smoothing 0.0071547561709306646 +955 54 model.embedding_dim 0.0 +955 54 model.relation_dim 2.0 +955 54 model.dropout_0 0.36730097337656986 +955 54 model.dropout_1 0.26601437596612354 +955 54 model.dropout_2 0.10369863747018684 +955 54 training.batch_size 2.0 +955 54 training.label_smoothing 0.6742732278478086 +955 55 model.embedding_dim 1.0 +955 55 model.relation_dim 0.0 +955 55 model.dropout_0 0.4785298277627966 +955 55 model.dropout_1 0.11012267942712449 +955 55 model.dropout_2 0.44855369424926883 +955 55 training.batch_size 2.0 +955 55 training.label_smoothing 0.011222947510055972 +955 56 model.embedding_dim 1.0 +955 56 model.relation_dim 1.0 +955 56 model.dropout_0 0.4491257484075345 +955 56 model.dropout_1 0.211241258435803 +955 56 model.dropout_2 0.23947769413228803 +955 56 training.batch_size 0.0 +955 56 training.label_smoothing 0.009009430607894471 +955 57 model.embedding_dim 2.0 +955 57 model.relation_dim 1.0 +955 57 model.dropout_0 0.14495863790770894 +955 57 model.dropout_1 0.1309380648612447 +955 57 model.dropout_2 0.39101555886296624 +955 57 training.batch_size 2.0 +955 57 training.label_smoothing 0.01378509784033027 +955 58 model.embedding_dim 1.0 +955 58 model.relation_dim 0.0 +955 58 model.dropout_0 0.3699890349612335 +955 58 model.dropout_1 0.1312041670124925 +955 58 model.dropout_2 0.25190804640936637 +955 58 training.batch_size 0.0 +955 58 training.label_smoothing 0.009753035986659645 +955 59 model.embedding_dim 0.0 +955 59 model.relation_dim 1.0 +955 59 model.dropout_0 0.2829052697676714 +955 59 model.dropout_1 0.3631294004358073 +955 59 model.dropout_2 0.21477694960791896 +955 59 training.batch_size 0.0 +955 59 training.label_smoothing 0.2811098494603552 +955 60 model.embedding_dim 1.0 +955 60 model.relation_dim 2.0 +955 60 model.dropout_0 0.2582813812795586 +955 60 model.dropout_1 0.34929864808719385 +955 60 model.dropout_2 0.45101750808304975 +955 60 training.batch_size 0.0 +955 60 training.label_smoothing 0.0104214749134871 +955 61 model.embedding_dim 1.0 +955 61 model.relation_dim 0.0 +955 61 model.dropout_0 0.126527222410355 +955 61 model.dropout_1 0.15201182410547553 +955 61 model.dropout_2 0.37880500220948765 +955 61 training.batch_size 2.0 +955 61 training.label_smoothing 0.005343039073459835 +955 62 model.embedding_dim 2.0 +955 62 model.relation_dim 1.0 +955 62 model.dropout_0 0.3182621013787895 +955 62 model.dropout_1 0.16666000776759277 +955 62 model.dropout_2 0.2282419904122649 +955 62 training.batch_size 2.0 +955 62 training.label_smoothing 0.026207403388611637 +955 63 model.embedding_dim 2.0 +955 63 model.relation_dim 0.0 +955 63 model.dropout_0 0.26606049210419824 +955 63 model.dropout_1 0.12765434750825755 +955 63 model.dropout_2 0.35840385839408895 +955 63 training.batch_size 2.0 +955 63 training.label_smoothing 0.01229633890911909 +955 64 model.embedding_dim 1.0 +955 64 model.relation_dim 1.0 +955 64 model.dropout_0 0.4651889313629477 +955 64 model.dropout_1 0.1967819531881994 +955 64 model.dropout_2 0.36388282371874603 +955 64 training.batch_size 2.0 +955 64 training.label_smoothing 0.08741805855445034 +955 65 model.embedding_dim 0.0 +955 65 model.relation_dim 1.0 +955 65 model.dropout_0 0.4258078436171031 +955 65 model.dropout_1 0.4127231791743301 +955 65 model.dropout_2 0.1399857464164779 +955 65 training.batch_size 2.0 +955 65 training.label_smoothing 0.021168929979183 +955 66 model.embedding_dim 2.0 +955 66 model.relation_dim 2.0 +955 66 model.dropout_0 0.2362272038074924 +955 66 model.dropout_1 0.12442775585089648 +955 66 model.dropout_2 0.42418383762310363 +955 66 training.batch_size 1.0 +955 66 training.label_smoothing 0.01615008623454546 +955 67 model.embedding_dim 2.0 +955 67 model.relation_dim 0.0 +955 67 model.dropout_0 0.3975308526979391 +955 67 model.dropout_1 0.3050159020720636 +955 67 model.dropout_2 0.2491636512386319 +955 67 training.batch_size 0.0 +955 67 training.label_smoothing 0.012595590959717705 +955 68 model.embedding_dim 1.0 +955 68 model.relation_dim 2.0 +955 68 model.dropout_0 0.17644348310791177 +955 68 model.dropout_1 0.2466044601241762 +955 68 model.dropout_2 0.4763229803462322 +955 68 training.batch_size 0.0 +955 68 training.label_smoothing 0.5678757996283987 +955 69 model.embedding_dim 2.0 +955 69 model.relation_dim 1.0 +955 69 model.dropout_0 0.20053741616151868 +955 69 model.dropout_1 0.36779193073412575 +955 69 model.dropout_2 0.26830929200712483 +955 69 training.batch_size 0.0 +955 69 training.label_smoothing 0.22066674254720625 +955 70 model.embedding_dim 2.0 +955 70 model.relation_dim 0.0 +955 70 model.dropout_0 0.17298986729401325 +955 70 model.dropout_1 0.1280172397847648 +955 70 model.dropout_2 0.19846253931173044 +955 70 training.batch_size 2.0 +955 70 training.label_smoothing 0.0572062108291352 +955 71 model.embedding_dim 1.0 +955 71 model.relation_dim 1.0 +955 71 model.dropout_0 0.28345927561734074 +955 71 model.dropout_1 0.4661239010476975 +955 71 model.dropout_2 0.10100752413549739 +955 71 training.batch_size 0.0 +955 71 training.label_smoothing 0.007365507965932914 +955 72 model.embedding_dim 2.0 +955 72 model.relation_dim 1.0 +955 72 model.dropout_0 0.21470158026688343 +955 72 model.dropout_1 0.48520439686142935 +955 72 model.dropout_2 0.16299101696539448 +955 72 training.batch_size 0.0 +955 72 training.label_smoothing 0.025472778913267706 +955 73 model.embedding_dim 2.0 +955 73 model.relation_dim 1.0 +955 73 model.dropout_0 0.42608254792829714 +955 73 model.dropout_1 0.19190974141154285 +955 73 model.dropout_2 0.21749004022961638 +955 73 training.batch_size 0.0 +955 73 training.label_smoothing 0.818888452590948 +955 74 model.embedding_dim 2.0 +955 74 model.relation_dim 0.0 +955 74 model.dropout_0 0.4806100031395738 +955 74 model.dropout_1 0.16175113459275853 +955 74 model.dropout_2 0.463027924085457 +955 74 training.batch_size 0.0 +955 74 training.label_smoothing 0.1109230087434255 +955 75 model.embedding_dim 0.0 +955 75 model.relation_dim 1.0 +955 75 model.dropout_0 0.2032302250363149 +955 75 model.dropout_1 0.4441815805739465 +955 75 model.dropout_2 0.4733130713127617 +955 75 training.batch_size 0.0 +955 75 training.label_smoothing 0.040212858557236095 +955 76 model.embedding_dim 1.0 +955 76 model.relation_dim 2.0 +955 76 model.dropout_0 0.46638016037707475 +955 76 model.dropout_1 0.3521064602965269 +955 76 model.dropout_2 0.24970978908300123 +955 76 training.batch_size 1.0 +955 76 training.label_smoothing 0.02468469583156592 +955 77 model.embedding_dim 1.0 +955 77 model.relation_dim 2.0 +955 77 model.dropout_0 0.3622710121462889 +955 77 model.dropout_1 0.4133943996012778 +955 77 model.dropout_2 0.18195926922931985 +955 77 training.batch_size 0.0 +955 77 training.label_smoothing 0.01208042397712577 +955 78 model.embedding_dim 2.0 +955 78 model.relation_dim 2.0 +955 78 model.dropout_0 0.156629668155543 +955 78 model.dropout_1 0.4034556348688846 +955 78 model.dropout_2 0.4540543358826472 +955 78 training.batch_size 0.0 +955 78 training.label_smoothing 0.08560521350280945 +955 79 model.embedding_dim 0.0 +955 79 model.relation_dim 0.0 +955 79 model.dropout_0 0.4990355024570222 +955 79 model.dropout_1 0.1584995328194505 +955 79 model.dropout_2 0.45390094918539015 +955 79 training.batch_size 2.0 +955 79 training.label_smoothing 0.0017813798027035352 +955 80 model.embedding_dim 2.0 +955 80 model.relation_dim 1.0 +955 80 model.dropout_0 0.14173987507187982 +955 80 model.dropout_1 0.2354096037314978 +955 80 model.dropout_2 0.29063272866445256 +955 80 training.batch_size 2.0 +955 80 training.label_smoothing 0.02039566257479607 +955 81 model.embedding_dim 1.0 +955 81 model.relation_dim 2.0 +955 81 model.dropout_0 0.2225417416639288 +955 81 model.dropout_1 0.28576718401832724 +955 81 model.dropout_2 0.40591756633433584 +955 81 training.batch_size 1.0 +955 81 training.label_smoothing 0.0048483863666687185 +955 82 model.embedding_dim 2.0 +955 82 model.relation_dim 1.0 +955 82 model.dropout_0 0.34904060570632023 +955 82 model.dropout_1 0.17379854461562433 +955 82 model.dropout_2 0.20757876044387524 +955 82 training.batch_size 1.0 +955 82 training.label_smoothing 0.044887341239170334 +955 83 model.embedding_dim 0.0 +955 83 model.relation_dim 1.0 +955 83 model.dropout_0 0.3686010244440173 +955 83 model.dropout_1 0.18156794292935988 +955 83 model.dropout_2 0.27512552131359214 +955 83 training.batch_size 0.0 +955 83 training.label_smoothing 0.0016132535253585497 +955 84 model.embedding_dim 2.0 +955 84 model.relation_dim 0.0 +955 84 model.dropout_0 0.17341585031140938 +955 84 model.dropout_1 0.11204325963835382 +955 84 model.dropout_2 0.19753606257142758 +955 84 training.batch_size 1.0 +955 84 training.label_smoothing 0.03406470692960932 +955 85 model.embedding_dim 1.0 +955 85 model.relation_dim 0.0 +955 85 model.dropout_0 0.44925529895190464 +955 85 model.dropout_1 0.2269525266073381 +955 85 model.dropout_2 0.3088100732673258 +955 85 training.batch_size 1.0 +955 85 training.label_smoothing 0.044421970740067006 +955 86 model.embedding_dim 2.0 +955 86 model.relation_dim 1.0 +955 86 model.dropout_0 0.41360000959160537 +955 86 model.dropout_1 0.39831448772566813 +955 86 model.dropout_2 0.4080666386325469 +955 86 training.batch_size 0.0 +955 86 training.label_smoothing 0.018601393821309428 +955 87 model.embedding_dim 0.0 +955 87 model.relation_dim 1.0 +955 87 model.dropout_0 0.483528631968147 +955 87 model.dropout_1 0.45541046493474446 +955 87 model.dropout_2 0.18527703945312282 +955 87 training.batch_size 0.0 +955 87 training.label_smoothing 0.5757225690965629 +955 88 model.embedding_dim 2.0 +955 88 model.relation_dim 2.0 +955 88 model.dropout_0 0.22903796033411916 +955 88 model.dropout_1 0.4576205996782302 +955 88 model.dropout_2 0.15183281128939782 +955 88 training.batch_size 2.0 +955 88 training.label_smoothing 0.04241630679413985 +955 89 model.embedding_dim 2.0 +955 89 model.relation_dim 0.0 +955 89 model.dropout_0 0.11142495186669442 +955 89 model.dropout_1 0.12007693757636827 +955 89 model.dropout_2 0.14407803355084536 +955 89 training.batch_size 2.0 +955 89 training.label_smoothing 0.013671607773971518 +955 90 model.embedding_dim 2.0 +955 90 model.relation_dim 0.0 +955 90 model.dropout_0 0.3060887932193438 +955 90 model.dropout_1 0.4758182702410332 +955 90 model.dropout_2 0.18476225270703442 +955 90 training.batch_size 2.0 +955 90 training.label_smoothing 0.05379226047893695 +955 91 model.embedding_dim 0.0 +955 91 model.relation_dim 1.0 +955 91 model.dropout_0 0.2612844675453535 +955 91 model.dropout_1 0.3695859082186356 +955 91 model.dropout_2 0.2815332568092139 +955 91 training.batch_size 1.0 +955 91 training.label_smoothing 0.23228665402764734 +955 92 model.embedding_dim 2.0 +955 92 model.relation_dim 1.0 +955 92 model.dropout_0 0.19210826755811677 +955 92 model.dropout_1 0.42979392879410316 +955 92 model.dropout_2 0.43182941932279384 +955 92 training.batch_size 2.0 +955 92 training.label_smoothing 0.13475944710922907 +955 93 model.embedding_dim 0.0 +955 93 model.relation_dim 2.0 +955 93 model.dropout_0 0.2601456585648466 +955 93 model.dropout_1 0.1261361519057853 +955 93 model.dropout_2 0.2838603268512912 +955 93 training.batch_size 1.0 +955 93 training.label_smoothing 0.16367082970154828 +955 94 model.embedding_dim 0.0 +955 94 model.relation_dim 1.0 +955 94 model.dropout_0 0.3658059446207288 +955 94 model.dropout_1 0.16217187014843948 +955 94 model.dropout_2 0.3874481305381591 +955 94 training.batch_size 2.0 +955 94 training.label_smoothing 0.00828576982007038 +955 95 model.embedding_dim 0.0 +955 95 model.relation_dim 0.0 +955 95 model.dropout_0 0.31589545550622394 +955 95 model.dropout_1 0.4660035694570184 +955 95 model.dropout_2 0.15437021865218356 +955 95 training.batch_size 2.0 +955 95 training.label_smoothing 0.8672429451905244 +955 96 model.embedding_dim 0.0 +955 96 model.relation_dim 1.0 +955 96 model.dropout_0 0.4784849908629947 +955 96 model.dropout_1 0.28819908591134125 +955 96 model.dropout_2 0.46924923659328377 +955 96 training.batch_size 1.0 +955 96 training.label_smoothing 0.1104203759476624 +955 97 model.embedding_dim 1.0 +955 97 model.relation_dim 0.0 +955 97 model.dropout_0 0.19879444667599439 +955 97 model.dropout_1 0.19413113439246704 +955 97 model.dropout_2 0.22735970094166524 +955 97 training.batch_size 0.0 +955 97 training.label_smoothing 0.3371154456141391 +955 98 model.embedding_dim 0.0 +955 98 model.relation_dim 1.0 +955 98 model.dropout_0 0.46317359922813695 +955 98 model.dropout_1 0.2030202195377711 +955 98 model.dropout_2 0.49206172091618283 +955 98 training.batch_size 2.0 +955 98 training.label_smoothing 0.0034113825363437764 +955 99 model.embedding_dim 0.0 +955 99 model.relation_dim 1.0 +955 99 model.dropout_0 0.17119627579103983 +955 99 model.dropout_1 0.12129914473715893 +955 99 model.dropout_2 0.24500042535564034 +955 99 training.batch_size 0.0 +955 99 training.label_smoothing 0.16991123088336502 +955 100 model.embedding_dim 2.0 +955 100 model.relation_dim 2.0 +955 100 model.dropout_0 0.2505236133642873 +955 100 model.dropout_1 0.15549697617264233 +955 100 model.dropout_2 0.1930647822116762 +955 100 training.batch_size 1.0 +955 100 training.label_smoothing 0.01330124266710447 +955 1 dataset """kinships""" +955 1 model """tucker""" +955 1 loss """bceaftersigmoid""" +955 1 regularizer """no""" +955 1 optimizer """adadelta""" +955 1 training_loop """lcwa""" +955 1 evaluator """rankbased""" +955 2 dataset """kinships""" +955 2 model """tucker""" +955 2 loss """bceaftersigmoid""" +955 2 regularizer """no""" +955 2 optimizer """adadelta""" +955 2 training_loop """lcwa""" +955 2 evaluator """rankbased""" +955 3 dataset """kinships""" +955 3 model """tucker""" +955 3 loss """bceaftersigmoid""" +955 3 regularizer """no""" +955 3 optimizer """adadelta""" +955 3 training_loop """lcwa""" +955 3 evaluator """rankbased""" +955 4 dataset """kinships""" +955 4 model """tucker""" +955 4 loss """bceaftersigmoid""" +955 4 regularizer """no""" +955 4 optimizer """adadelta""" +955 4 training_loop """lcwa""" +955 4 evaluator """rankbased""" +955 5 dataset """kinships""" +955 5 model """tucker""" +955 5 loss """bceaftersigmoid""" +955 5 regularizer """no""" +955 5 optimizer """adadelta""" +955 5 training_loop """lcwa""" +955 5 evaluator """rankbased""" +955 6 dataset """kinships""" +955 6 model """tucker""" +955 6 loss """bceaftersigmoid""" +955 6 regularizer """no""" +955 6 optimizer """adadelta""" +955 6 training_loop """lcwa""" +955 6 evaluator """rankbased""" +955 7 dataset """kinships""" +955 7 model """tucker""" +955 7 loss """bceaftersigmoid""" +955 7 regularizer """no""" +955 7 optimizer """adadelta""" +955 7 training_loop """lcwa""" +955 7 evaluator """rankbased""" +955 8 dataset """kinships""" +955 8 model """tucker""" +955 8 loss """bceaftersigmoid""" +955 8 regularizer """no""" +955 8 optimizer """adadelta""" +955 8 training_loop """lcwa""" +955 8 evaluator """rankbased""" +955 9 dataset """kinships""" +955 9 model """tucker""" +955 9 loss """bceaftersigmoid""" +955 9 regularizer """no""" +955 9 optimizer """adadelta""" +955 9 training_loop """lcwa""" +955 9 evaluator """rankbased""" +955 10 dataset """kinships""" +955 10 model """tucker""" +955 10 loss """bceaftersigmoid""" +955 10 regularizer """no""" +955 10 optimizer """adadelta""" +955 10 training_loop """lcwa""" +955 10 evaluator """rankbased""" +955 11 dataset """kinships""" +955 11 model """tucker""" +955 11 loss """bceaftersigmoid""" +955 11 regularizer """no""" +955 11 optimizer """adadelta""" +955 11 training_loop """lcwa""" +955 11 evaluator """rankbased""" +955 12 dataset """kinships""" +955 12 model """tucker""" +955 12 loss """bceaftersigmoid""" +955 12 regularizer """no""" +955 12 optimizer """adadelta""" +955 12 training_loop """lcwa""" +955 12 evaluator """rankbased""" +955 13 dataset """kinships""" +955 13 model """tucker""" +955 13 loss """bceaftersigmoid""" +955 13 regularizer """no""" +955 13 optimizer """adadelta""" +955 13 training_loop """lcwa""" +955 13 evaluator """rankbased""" +955 14 dataset """kinships""" +955 14 model """tucker""" +955 14 loss """bceaftersigmoid""" +955 14 regularizer """no""" +955 14 optimizer """adadelta""" +955 14 training_loop """lcwa""" +955 14 evaluator """rankbased""" +955 15 dataset """kinships""" +955 15 model """tucker""" +955 15 loss """bceaftersigmoid""" +955 15 regularizer """no""" +955 15 optimizer """adadelta""" +955 15 training_loop """lcwa""" +955 15 evaluator """rankbased""" +955 16 dataset """kinships""" +955 16 model """tucker""" +955 16 loss """bceaftersigmoid""" +955 16 regularizer """no""" +955 16 optimizer """adadelta""" +955 16 training_loop """lcwa""" +955 16 evaluator """rankbased""" +955 17 dataset """kinships""" +955 17 model """tucker""" +955 17 loss """bceaftersigmoid""" +955 17 regularizer """no""" +955 17 optimizer """adadelta""" +955 17 training_loop """lcwa""" +955 17 evaluator """rankbased""" +955 18 dataset """kinships""" +955 18 model """tucker""" +955 18 loss """bceaftersigmoid""" +955 18 regularizer """no""" +955 18 optimizer """adadelta""" +955 18 training_loop """lcwa""" +955 18 evaluator """rankbased""" +955 19 dataset """kinships""" +955 19 model """tucker""" +955 19 loss """bceaftersigmoid""" +955 19 regularizer """no""" +955 19 optimizer """adadelta""" +955 19 training_loop """lcwa""" +955 19 evaluator """rankbased""" +955 20 dataset """kinships""" +955 20 model """tucker""" +955 20 loss """bceaftersigmoid""" +955 20 regularizer """no""" +955 20 optimizer """adadelta""" +955 20 training_loop """lcwa""" +955 20 evaluator """rankbased""" +955 21 dataset """kinships""" +955 21 model """tucker""" +955 21 loss """bceaftersigmoid""" +955 21 regularizer """no""" +955 21 optimizer """adadelta""" +955 21 training_loop """lcwa""" +955 21 evaluator """rankbased""" +955 22 dataset """kinships""" +955 22 model """tucker""" +955 22 loss """bceaftersigmoid""" +955 22 regularizer """no""" +955 22 optimizer """adadelta""" +955 22 training_loop """lcwa""" +955 22 evaluator """rankbased""" +955 23 dataset """kinships""" +955 23 model """tucker""" +955 23 loss """bceaftersigmoid""" +955 23 regularizer """no""" +955 23 optimizer """adadelta""" +955 23 training_loop """lcwa""" +955 23 evaluator """rankbased""" +955 24 dataset """kinships""" +955 24 model """tucker""" +955 24 loss """bceaftersigmoid""" +955 24 regularizer """no""" +955 24 optimizer """adadelta""" +955 24 training_loop """lcwa""" +955 24 evaluator """rankbased""" +955 25 dataset """kinships""" +955 25 model """tucker""" +955 25 loss """bceaftersigmoid""" +955 25 regularizer """no""" +955 25 optimizer """adadelta""" +955 25 training_loop """lcwa""" +955 25 evaluator """rankbased""" +955 26 dataset """kinships""" +955 26 model """tucker""" +955 26 loss """bceaftersigmoid""" +955 26 regularizer """no""" +955 26 optimizer """adadelta""" +955 26 training_loop """lcwa""" +955 26 evaluator """rankbased""" +955 27 dataset """kinships""" +955 27 model """tucker""" +955 27 loss """bceaftersigmoid""" +955 27 regularizer """no""" +955 27 optimizer """adadelta""" +955 27 training_loop """lcwa""" +955 27 evaluator """rankbased""" +955 28 dataset """kinships""" +955 28 model """tucker""" +955 28 loss """bceaftersigmoid""" +955 28 regularizer """no""" +955 28 optimizer """adadelta""" +955 28 training_loop """lcwa""" +955 28 evaluator """rankbased""" +955 29 dataset """kinships""" +955 29 model """tucker""" +955 29 loss """bceaftersigmoid""" +955 29 regularizer """no""" +955 29 optimizer """adadelta""" +955 29 training_loop """lcwa""" +955 29 evaluator """rankbased""" +955 30 dataset """kinships""" +955 30 model """tucker""" +955 30 loss """bceaftersigmoid""" +955 30 regularizer """no""" +955 30 optimizer """adadelta""" +955 30 training_loop """lcwa""" +955 30 evaluator """rankbased""" +955 31 dataset """kinships""" +955 31 model """tucker""" +955 31 loss """bceaftersigmoid""" +955 31 regularizer """no""" +955 31 optimizer """adadelta""" +955 31 training_loop """lcwa""" +955 31 evaluator """rankbased""" +955 32 dataset """kinships""" +955 32 model """tucker""" +955 32 loss """bceaftersigmoid""" +955 32 regularizer """no""" +955 32 optimizer """adadelta""" +955 32 training_loop """lcwa""" +955 32 evaluator """rankbased""" +955 33 dataset """kinships""" +955 33 model """tucker""" +955 33 loss """bceaftersigmoid""" +955 33 regularizer """no""" +955 33 optimizer """adadelta""" +955 33 training_loop """lcwa""" +955 33 evaluator """rankbased""" +955 34 dataset """kinships""" +955 34 model """tucker""" +955 34 loss """bceaftersigmoid""" +955 34 regularizer """no""" +955 34 optimizer """adadelta""" +955 34 training_loop """lcwa""" +955 34 evaluator """rankbased""" +955 35 dataset """kinships""" +955 35 model """tucker""" +955 35 loss """bceaftersigmoid""" +955 35 regularizer """no""" +955 35 optimizer """adadelta""" +955 35 training_loop """lcwa""" +955 35 evaluator """rankbased""" +955 36 dataset """kinships""" +955 36 model """tucker""" +955 36 loss """bceaftersigmoid""" +955 36 regularizer """no""" +955 36 optimizer """adadelta""" +955 36 training_loop """lcwa""" +955 36 evaluator """rankbased""" +955 37 dataset """kinships""" +955 37 model """tucker""" +955 37 loss """bceaftersigmoid""" +955 37 regularizer """no""" +955 37 optimizer """adadelta""" +955 37 training_loop """lcwa""" +955 37 evaluator """rankbased""" +955 38 dataset """kinships""" +955 38 model """tucker""" +955 38 loss """bceaftersigmoid""" +955 38 regularizer """no""" +955 38 optimizer """adadelta""" +955 38 training_loop """lcwa""" +955 38 evaluator """rankbased""" +955 39 dataset """kinships""" +955 39 model """tucker""" +955 39 loss """bceaftersigmoid""" +955 39 regularizer """no""" +955 39 optimizer """adadelta""" +955 39 training_loop """lcwa""" +955 39 evaluator """rankbased""" +955 40 dataset """kinships""" +955 40 model """tucker""" +955 40 loss """bceaftersigmoid""" +955 40 regularizer """no""" +955 40 optimizer """adadelta""" +955 40 training_loop """lcwa""" +955 40 evaluator """rankbased""" +955 41 dataset """kinships""" +955 41 model """tucker""" +955 41 loss """bceaftersigmoid""" +955 41 regularizer """no""" +955 41 optimizer """adadelta""" +955 41 training_loop """lcwa""" +955 41 evaluator """rankbased""" +955 42 dataset """kinships""" +955 42 model """tucker""" +955 42 loss """bceaftersigmoid""" +955 42 regularizer """no""" +955 42 optimizer """adadelta""" +955 42 training_loop """lcwa""" +955 42 evaluator """rankbased""" +955 43 dataset """kinships""" +955 43 model """tucker""" +955 43 loss """bceaftersigmoid""" +955 43 regularizer """no""" +955 43 optimizer """adadelta""" +955 43 training_loop """lcwa""" +955 43 evaluator """rankbased""" +955 44 dataset """kinships""" +955 44 model """tucker""" +955 44 loss """bceaftersigmoid""" +955 44 regularizer """no""" +955 44 optimizer """adadelta""" +955 44 training_loop """lcwa""" +955 44 evaluator """rankbased""" +955 45 dataset """kinships""" +955 45 model """tucker""" +955 45 loss """bceaftersigmoid""" +955 45 regularizer """no""" +955 45 optimizer """adadelta""" +955 45 training_loop """lcwa""" +955 45 evaluator """rankbased""" +955 46 dataset """kinships""" +955 46 model """tucker""" +955 46 loss """bceaftersigmoid""" +955 46 regularizer """no""" +955 46 optimizer """adadelta""" +955 46 training_loop """lcwa""" +955 46 evaluator """rankbased""" +955 47 dataset """kinships""" +955 47 model """tucker""" +955 47 loss """bceaftersigmoid""" +955 47 regularizer """no""" +955 47 optimizer """adadelta""" +955 47 training_loop """lcwa""" +955 47 evaluator """rankbased""" +955 48 dataset """kinships""" +955 48 model """tucker""" +955 48 loss """bceaftersigmoid""" +955 48 regularizer """no""" +955 48 optimizer """adadelta""" +955 48 training_loop """lcwa""" +955 48 evaluator """rankbased""" +955 49 dataset """kinships""" +955 49 model """tucker""" +955 49 loss """bceaftersigmoid""" +955 49 regularizer """no""" +955 49 optimizer """adadelta""" +955 49 training_loop """lcwa""" +955 49 evaluator """rankbased""" +955 50 dataset """kinships""" +955 50 model """tucker""" +955 50 loss """bceaftersigmoid""" +955 50 regularizer """no""" +955 50 optimizer """adadelta""" +955 50 training_loop """lcwa""" +955 50 evaluator """rankbased""" +955 51 dataset """kinships""" +955 51 model """tucker""" +955 51 loss """bceaftersigmoid""" +955 51 regularizer """no""" +955 51 optimizer """adadelta""" +955 51 training_loop """lcwa""" +955 51 evaluator """rankbased""" +955 52 dataset """kinships""" +955 52 model """tucker""" +955 52 loss """bceaftersigmoid""" +955 52 regularizer """no""" +955 52 optimizer """adadelta""" +955 52 training_loop """lcwa""" +955 52 evaluator """rankbased""" +955 53 dataset """kinships""" +955 53 model """tucker""" +955 53 loss """bceaftersigmoid""" +955 53 regularizer """no""" +955 53 optimizer """adadelta""" +955 53 training_loop """lcwa""" +955 53 evaluator """rankbased""" +955 54 dataset """kinships""" +955 54 model """tucker""" +955 54 loss """bceaftersigmoid""" +955 54 regularizer """no""" +955 54 optimizer """adadelta""" +955 54 training_loop """lcwa""" +955 54 evaluator """rankbased""" +955 55 dataset """kinships""" +955 55 model """tucker""" +955 55 loss """bceaftersigmoid""" +955 55 regularizer """no""" +955 55 optimizer """adadelta""" +955 55 training_loop """lcwa""" +955 55 evaluator """rankbased""" +955 56 dataset """kinships""" +955 56 model """tucker""" +955 56 loss """bceaftersigmoid""" +955 56 regularizer """no""" +955 56 optimizer """adadelta""" +955 56 training_loop """lcwa""" +955 56 evaluator """rankbased""" +955 57 dataset """kinships""" +955 57 model """tucker""" +955 57 loss """bceaftersigmoid""" +955 57 regularizer """no""" +955 57 optimizer """adadelta""" +955 57 training_loop """lcwa""" +955 57 evaluator """rankbased""" +955 58 dataset """kinships""" +955 58 model """tucker""" +955 58 loss """bceaftersigmoid""" +955 58 regularizer """no""" +955 58 optimizer """adadelta""" +955 58 training_loop """lcwa""" +955 58 evaluator """rankbased""" +955 59 dataset """kinships""" +955 59 model """tucker""" +955 59 loss """bceaftersigmoid""" +955 59 regularizer """no""" +955 59 optimizer """adadelta""" +955 59 training_loop """lcwa""" +955 59 evaluator """rankbased""" +955 60 dataset """kinships""" +955 60 model """tucker""" +955 60 loss """bceaftersigmoid""" +955 60 regularizer """no""" +955 60 optimizer """adadelta""" +955 60 training_loop """lcwa""" +955 60 evaluator """rankbased""" +955 61 dataset """kinships""" +955 61 model """tucker""" +955 61 loss """bceaftersigmoid""" +955 61 regularizer """no""" +955 61 optimizer """adadelta""" +955 61 training_loop """lcwa""" +955 61 evaluator """rankbased""" +955 62 dataset """kinships""" +955 62 model """tucker""" +955 62 loss """bceaftersigmoid""" +955 62 regularizer """no""" +955 62 optimizer """adadelta""" +955 62 training_loop """lcwa""" +955 62 evaluator """rankbased""" +955 63 dataset """kinships""" +955 63 model """tucker""" +955 63 loss """bceaftersigmoid""" +955 63 regularizer """no""" +955 63 optimizer """adadelta""" +955 63 training_loop """lcwa""" +955 63 evaluator """rankbased""" +955 64 dataset """kinships""" +955 64 model """tucker""" +955 64 loss """bceaftersigmoid""" +955 64 regularizer """no""" +955 64 optimizer """adadelta""" +955 64 training_loop """lcwa""" +955 64 evaluator """rankbased""" +955 65 dataset """kinships""" +955 65 model """tucker""" +955 65 loss """bceaftersigmoid""" +955 65 regularizer """no""" +955 65 optimizer """adadelta""" +955 65 training_loop """lcwa""" +955 65 evaluator """rankbased""" +955 66 dataset """kinships""" +955 66 model """tucker""" +955 66 loss """bceaftersigmoid""" +955 66 regularizer """no""" +955 66 optimizer """adadelta""" +955 66 training_loop """lcwa""" +955 66 evaluator """rankbased""" +955 67 dataset """kinships""" +955 67 model """tucker""" +955 67 loss """bceaftersigmoid""" +955 67 regularizer """no""" +955 67 optimizer """adadelta""" +955 67 training_loop """lcwa""" +955 67 evaluator """rankbased""" +955 68 dataset """kinships""" +955 68 model """tucker""" +955 68 loss """bceaftersigmoid""" +955 68 regularizer """no""" +955 68 optimizer """adadelta""" +955 68 training_loop """lcwa""" +955 68 evaluator """rankbased""" +955 69 dataset """kinships""" +955 69 model """tucker""" +955 69 loss """bceaftersigmoid""" +955 69 regularizer """no""" +955 69 optimizer """adadelta""" +955 69 training_loop """lcwa""" +955 69 evaluator """rankbased""" +955 70 dataset """kinships""" +955 70 model """tucker""" +955 70 loss """bceaftersigmoid""" +955 70 regularizer """no""" +955 70 optimizer """adadelta""" +955 70 training_loop """lcwa""" +955 70 evaluator """rankbased""" +955 71 dataset """kinships""" +955 71 model """tucker""" +955 71 loss """bceaftersigmoid""" +955 71 regularizer """no""" +955 71 optimizer """adadelta""" +955 71 training_loop """lcwa""" +955 71 evaluator """rankbased""" +955 72 dataset """kinships""" +955 72 model """tucker""" +955 72 loss """bceaftersigmoid""" +955 72 regularizer """no""" +955 72 optimizer """adadelta""" +955 72 training_loop """lcwa""" +955 72 evaluator """rankbased""" +955 73 dataset """kinships""" +955 73 model """tucker""" +955 73 loss """bceaftersigmoid""" +955 73 regularizer """no""" +955 73 optimizer """adadelta""" +955 73 training_loop """lcwa""" +955 73 evaluator """rankbased""" +955 74 dataset """kinships""" +955 74 model """tucker""" +955 74 loss """bceaftersigmoid""" +955 74 regularizer """no""" +955 74 optimizer """adadelta""" +955 74 training_loop """lcwa""" +955 74 evaluator """rankbased""" +955 75 dataset """kinships""" +955 75 model """tucker""" +955 75 loss """bceaftersigmoid""" +955 75 regularizer """no""" +955 75 optimizer """adadelta""" +955 75 training_loop """lcwa""" +955 75 evaluator """rankbased""" +955 76 dataset """kinships""" +955 76 model """tucker""" +955 76 loss """bceaftersigmoid""" +955 76 regularizer """no""" +955 76 optimizer """adadelta""" +955 76 training_loop """lcwa""" +955 76 evaluator """rankbased""" +955 77 dataset """kinships""" +955 77 model """tucker""" +955 77 loss """bceaftersigmoid""" +955 77 regularizer """no""" +955 77 optimizer """adadelta""" +955 77 training_loop """lcwa""" +955 77 evaluator """rankbased""" +955 78 dataset """kinships""" +955 78 model """tucker""" +955 78 loss """bceaftersigmoid""" +955 78 regularizer """no""" +955 78 optimizer """adadelta""" +955 78 training_loop """lcwa""" +955 78 evaluator """rankbased""" +955 79 dataset """kinships""" +955 79 model """tucker""" +955 79 loss """bceaftersigmoid""" +955 79 regularizer """no""" +955 79 optimizer """adadelta""" +955 79 training_loop """lcwa""" +955 79 evaluator """rankbased""" +955 80 dataset """kinships""" +955 80 model """tucker""" +955 80 loss """bceaftersigmoid""" +955 80 regularizer """no""" +955 80 optimizer """adadelta""" +955 80 training_loop """lcwa""" +955 80 evaluator """rankbased""" +955 81 dataset """kinships""" +955 81 model """tucker""" +955 81 loss """bceaftersigmoid""" +955 81 regularizer """no""" +955 81 optimizer """adadelta""" +955 81 training_loop """lcwa""" +955 81 evaluator """rankbased""" +955 82 dataset """kinships""" +955 82 model """tucker""" +955 82 loss """bceaftersigmoid""" +955 82 regularizer """no""" +955 82 optimizer """adadelta""" +955 82 training_loop """lcwa""" +955 82 evaluator """rankbased""" +955 83 dataset """kinships""" +955 83 model """tucker""" +955 83 loss """bceaftersigmoid""" +955 83 regularizer """no""" +955 83 optimizer """adadelta""" +955 83 training_loop """lcwa""" +955 83 evaluator """rankbased""" +955 84 dataset """kinships""" +955 84 model """tucker""" +955 84 loss """bceaftersigmoid""" +955 84 regularizer """no""" +955 84 optimizer """adadelta""" +955 84 training_loop """lcwa""" +955 84 evaluator """rankbased""" +955 85 dataset """kinships""" +955 85 model """tucker""" +955 85 loss """bceaftersigmoid""" +955 85 regularizer """no""" +955 85 optimizer """adadelta""" +955 85 training_loop """lcwa""" +955 85 evaluator """rankbased""" +955 86 dataset """kinships""" +955 86 model """tucker""" +955 86 loss """bceaftersigmoid""" +955 86 regularizer """no""" +955 86 optimizer """adadelta""" +955 86 training_loop """lcwa""" +955 86 evaluator """rankbased""" +955 87 dataset """kinships""" +955 87 model """tucker""" +955 87 loss """bceaftersigmoid""" +955 87 regularizer """no""" +955 87 optimizer """adadelta""" +955 87 training_loop """lcwa""" +955 87 evaluator """rankbased""" +955 88 dataset """kinships""" +955 88 model """tucker""" +955 88 loss """bceaftersigmoid""" +955 88 regularizer """no""" +955 88 optimizer """adadelta""" +955 88 training_loop """lcwa""" +955 88 evaluator """rankbased""" +955 89 dataset """kinships""" +955 89 model """tucker""" +955 89 loss """bceaftersigmoid""" +955 89 regularizer """no""" +955 89 optimizer """adadelta""" +955 89 training_loop """lcwa""" +955 89 evaluator """rankbased""" +955 90 dataset """kinships""" +955 90 model """tucker""" +955 90 loss """bceaftersigmoid""" +955 90 regularizer """no""" +955 90 optimizer """adadelta""" +955 90 training_loop """lcwa""" +955 90 evaluator """rankbased""" +955 91 dataset """kinships""" +955 91 model """tucker""" +955 91 loss """bceaftersigmoid""" +955 91 regularizer """no""" +955 91 optimizer """adadelta""" +955 91 training_loop """lcwa""" +955 91 evaluator """rankbased""" +955 92 dataset """kinships""" +955 92 model """tucker""" +955 92 loss """bceaftersigmoid""" +955 92 regularizer """no""" +955 92 optimizer """adadelta""" +955 92 training_loop """lcwa""" +955 92 evaluator """rankbased""" +955 93 dataset """kinships""" +955 93 model """tucker""" +955 93 loss """bceaftersigmoid""" +955 93 regularizer """no""" +955 93 optimizer """adadelta""" +955 93 training_loop """lcwa""" +955 93 evaluator """rankbased""" +955 94 dataset """kinships""" +955 94 model """tucker""" +955 94 loss """bceaftersigmoid""" +955 94 regularizer """no""" +955 94 optimizer """adadelta""" +955 94 training_loop """lcwa""" +955 94 evaluator """rankbased""" +955 95 dataset """kinships""" +955 95 model """tucker""" +955 95 loss """bceaftersigmoid""" +955 95 regularizer """no""" +955 95 optimizer """adadelta""" +955 95 training_loop """lcwa""" +955 95 evaluator """rankbased""" +955 96 dataset """kinships""" +955 96 model """tucker""" +955 96 loss """bceaftersigmoid""" +955 96 regularizer """no""" +955 96 optimizer """adadelta""" +955 96 training_loop """lcwa""" +955 96 evaluator """rankbased""" +955 97 dataset """kinships""" +955 97 model """tucker""" +955 97 loss """bceaftersigmoid""" +955 97 regularizer """no""" +955 97 optimizer """adadelta""" +955 97 training_loop """lcwa""" +955 97 evaluator """rankbased""" +955 98 dataset """kinships""" +955 98 model """tucker""" +955 98 loss """bceaftersigmoid""" +955 98 regularizer """no""" +955 98 optimizer """adadelta""" +955 98 training_loop """lcwa""" +955 98 evaluator """rankbased""" +955 99 dataset """kinships""" +955 99 model """tucker""" +955 99 loss """bceaftersigmoid""" +955 99 regularizer """no""" +955 99 optimizer """adadelta""" +955 99 training_loop """lcwa""" +955 99 evaluator """rankbased""" +955 100 dataset """kinships""" +955 100 model """tucker""" +955 100 loss """bceaftersigmoid""" +955 100 regularizer """no""" +955 100 optimizer """adadelta""" +955 100 training_loop """lcwa""" +955 100 evaluator """rankbased""" +956 1 model.embedding_dim 2.0 +956 1 model.relation_dim 1.0 +956 1 model.dropout_0 0.2905141884346186 +956 1 model.dropout_1 0.2597120065377112 +956 1 model.dropout_2 0.11002974622739217 +956 1 training.batch_size 2.0 +956 1 training.label_smoothing 0.11509185384899 +956 2 model.embedding_dim 2.0 +956 2 model.relation_dim 0.0 +956 2 model.dropout_0 0.2771126551962833 +956 2 model.dropout_1 0.12992891541329676 +956 2 model.dropout_2 0.22407108017050403 +956 2 training.batch_size 0.0 +956 2 training.label_smoothing 0.035325370768599464 +956 3 model.embedding_dim 2.0 +956 3 model.relation_dim 1.0 +956 3 model.dropout_0 0.3299671994232227 +956 3 model.dropout_1 0.370188959523859 +956 3 model.dropout_2 0.4089656766138198 +956 3 training.batch_size 0.0 +956 3 training.label_smoothing 0.06631915395362922 +956 4 model.embedding_dim 1.0 +956 4 model.relation_dim 1.0 +956 4 model.dropout_0 0.31054868242974465 +956 4 model.dropout_1 0.11643980256561633 +956 4 model.dropout_2 0.4115258312395732 +956 4 training.batch_size 1.0 +956 4 training.label_smoothing 0.010694840977074637 +956 5 model.embedding_dim 1.0 +956 5 model.relation_dim 0.0 +956 5 model.dropout_0 0.181571770981775 +956 5 model.dropout_1 0.1834557521360606 +956 5 model.dropout_2 0.4102426846030781 +956 5 training.batch_size 0.0 +956 5 training.label_smoothing 0.009544292849596698 +956 6 model.embedding_dim 1.0 +956 6 model.relation_dim 0.0 +956 6 model.dropout_0 0.4285788498371156 +956 6 model.dropout_1 0.2974687262879693 +956 6 model.dropout_2 0.14868546282331374 +956 6 training.batch_size 2.0 +956 6 training.label_smoothing 0.2653338441187497 +956 7 model.embedding_dim 1.0 +956 7 model.relation_dim 2.0 +956 7 model.dropout_0 0.38714383742145775 +956 7 model.dropout_1 0.2020522758578634 +956 7 model.dropout_2 0.14733885360122825 +956 7 training.batch_size 2.0 +956 7 training.label_smoothing 0.2732069238383597 +956 8 model.embedding_dim 1.0 +956 8 model.relation_dim 1.0 +956 8 model.dropout_0 0.49157595435824863 +956 8 model.dropout_1 0.368762627251189 +956 8 model.dropout_2 0.3849188566452699 +956 8 training.batch_size 0.0 +956 8 training.label_smoothing 0.0029427492591187843 +956 9 model.embedding_dim 0.0 +956 9 model.relation_dim 2.0 +956 9 model.dropout_0 0.46570118412677064 +956 9 model.dropout_1 0.33870897220710866 +956 9 model.dropout_2 0.24784496464568875 +956 9 training.batch_size 0.0 +956 9 training.label_smoothing 0.401615300284772 +956 10 model.embedding_dim 2.0 +956 10 model.relation_dim 2.0 +956 10 model.dropout_0 0.44688025030083944 +956 10 model.dropout_1 0.43616309720549784 +956 10 model.dropout_2 0.2045962057057461 +956 10 training.batch_size 1.0 +956 10 training.label_smoothing 0.19816725733035778 +956 11 model.embedding_dim 2.0 +956 11 model.relation_dim 1.0 +956 11 model.dropout_0 0.26359281575098936 +956 11 model.dropout_1 0.3505292387329578 +956 11 model.dropout_2 0.3210519334219296 +956 11 training.batch_size 2.0 +956 11 training.label_smoothing 0.32593405902302847 +956 12 model.embedding_dim 1.0 +956 12 model.relation_dim 0.0 +956 12 model.dropout_0 0.15751611994960202 +956 12 model.dropout_1 0.12933494609362628 +956 12 model.dropout_2 0.47390581663408093 +956 12 training.batch_size 0.0 +956 12 training.label_smoothing 0.00765518955207706 +956 13 model.embedding_dim 0.0 +956 13 model.relation_dim 2.0 +956 13 model.dropout_0 0.1705814167091862 +956 13 model.dropout_1 0.3167504031920272 +956 13 model.dropout_2 0.45221339159301943 +956 13 training.batch_size 0.0 +956 13 training.label_smoothing 0.5864291750118281 +956 14 model.embedding_dim 2.0 +956 14 model.relation_dim 0.0 +956 14 model.dropout_0 0.3942865953543184 +956 14 model.dropout_1 0.1466546709263069 +956 14 model.dropout_2 0.17067570809931704 +956 14 training.batch_size 0.0 +956 14 training.label_smoothing 0.17679615525743775 +956 15 model.embedding_dim 2.0 +956 15 model.relation_dim 1.0 +956 15 model.dropout_0 0.30819237777422637 +956 15 model.dropout_1 0.3523337397936379 +956 15 model.dropout_2 0.4390709198079842 +956 15 training.batch_size 2.0 +956 15 training.label_smoothing 0.09920966032567552 +956 16 model.embedding_dim 1.0 +956 16 model.relation_dim 2.0 +956 16 model.dropout_0 0.22320502709579007 +956 16 model.dropout_1 0.217380744022879 +956 16 model.dropout_2 0.1390700335547496 +956 16 training.batch_size 0.0 +956 16 training.label_smoothing 0.654406639109863 +956 17 model.embedding_dim 0.0 +956 17 model.relation_dim 0.0 +956 17 model.dropout_0 0.18747101084278067 +956 17 model.dropout_1 0.16384941946117515 +956 17 model.dropout_2 0.475180900273238 +956 17 training.batch_size 0.0 +956 17 training.label_smoothing 0.0033276545698750293 +956 18 model.embedding_dim 1.0 +956 18 model.relation_dim 2.0 +956 18 model.dropout_0 0.48866719888249066 +956 18 model.dropout_1 0.23786601753818096 +956 18 model.dropout_2 0.47495198773281466 +956 18 training.batch_size 1.0 +956 18 training.label_smoothing 0.23411551662288618 +956 19 model.embedding_dim 0.0 +956 19 model.relation_dim 2.0 +956 19 model.dropout_0 0.15317232124564883 +956 19 model.dropout_1 0.23635472762911008 +956 19 model.dropout_2 0.3140531559850589 +956 19 training.batch_size 0.0 +956 19 training.label_smoothing 0.004090305399190727 +956 20 model.embedding_dim 1.0 +956 20 model.relation_dim 2.0 +956 20 model.dropout_0 0.14027674017544137 +956 20 model.dropout_1 0.17141088230763457 +956 20 model.dropout_2 0.26377298670140625 +956 20 training.batch_size 1.0 +956 20 training.label_smoothing 0.042939176255808766 +956 21 model.embedding_dim 1.0 +956 21 model.relation_dim 0.0 +956 21 model.dropout_0 0.41931554616531974 +956 21 model.dropout_1 0.34673005035694204 +956 21 model.dropout_2 0.2838809972295609 +956 21 training.batch_size 2.0 +956 21 training.label_smoothing 0.08280775975038888 +956 22 model.embedding_dim 0.0 +956 22 model.relation_dim 1.0 +956 22 model.dropout_0 0.29387940581612537 +956 22 model.dropout_1 0.4339141024939047 +956 22 model.dropout_2 0.10686543876378313 +956 22 training.batch_size 1.0 +956 22 training.label_smoothing 0.4824212105015766 +956 23 model.embedding_dim 2.0 +956 23 model.relation_dim 0.0 +956 23 model.dropout_0 0.17483589200627087 +956 23 model.dropout_1 0.4761255128990036 +956 23 model.dropout_2 0.3514372912680904 +956 23 training.batch_size 2.0 +956 23 training.label_smoothing 0.08519193341899528 +956 24 model.embedding_dim 1.0 +956 24 model.relation_dim 0.0 +956 24 model.dropout_0 0.20004330490305008 +956 24 model.dropout_1 0.4837638353445063 +956 24 model.dropout_2 0.1907391820919654 +956 24 training.batch_size 2.0 +956 24 training.label_smoothing 0.0027906146523853 +956 25 model.embedding_dim 1.0 +956 25 model.relation_dim 1.0 +956 25 model.dropout_0 0.46913091589165823 +956 25 model.dropout_1 0.48066384106233107 +956 25 model.dropout_2 0.24394705077133014 +956 25 training.batch_size 0.0 +956 25 training.label_smoothing 0.018112153647839368 +956 26 model.embedding_dim 2.0 +956 26 model.relation_dim 2.0 +956 26 model.dropout_0 0.15060594495665575 +956 26 model.dropout_1 0.3172854258485567 +956 26 model.dropout_2 0.3630669352569129 +956 26 training.batch_size 2.0 +956 26 training.label_smoothing 0.02677725456148632 +956 27 model.embedding_dim 1.0 +956 27 model.relation_dim 0.0 +956 27 model.dropout_0 0.11648633206737724 +956 27 model.dropout_1 0.4079426352062707 +956 27 model.dropout_2 0.23812956825286247 +956 27 training.batch_size 2.0 +956 27 training.label_smoothing 0.03908280806084521 +956 28 model.embedding_dim 0.0 +956 28 model.relation_dim 1.0 +956 28 model.dropout_0 0.2722451425085528 +956 28 model.dropout_1 0.3466537713027682 +956 28 model.dropout_2 0.46083269893463397 +956 28 training.batch_size 0.0 +956 28 training.label_smoothing 0.004584834590606142 +956 29 model.embedding_dim 1.0 +956 29 model.relation_dim 0.0 +956 29 model.dropout_0 0.43740534947342824 +956 29 model.dropout_1 0.37559199613038063 +956 29 model.dropout_2 0.2603297939246765 +956 29 training.batch_size 2.0 +956 29 training.label_smoothing 0.01082604385499511 +956 30 model.embedding_dim 1.0 +956 30 model.relation_dim 0.0 +956 30 model.dropout_0 0.14433192631510738 +956 30 model.dropout_1 0.2276633324200458 +956 30 model.dropout_2 0.4944705010484607 +956 30 training.batch_size 1.0 +956 30 training.label_smoothing 0.002357829711212902 +956 31 model.embedding_dim 2.0 +956 31 model.relation_dim 2.0 +956 31 model.dropout_0 0.2673930719021578 +956 31 model.dropout_1 0.11186322917087721 +956 31 model.dropout_2 0.14883240550490096 +956 31 training.batch_size 1.0 +956 31 training.label_smoothing 0.010111345578633685 +956 32 model.embedding_dim 1.0 +956 32 model.relation_dim 1.0 +956 32 model.dropout_0 0.23811817713813055 +956 32 model.dropout_1 0.432819802814421 +956 32 model.dropout_2 0.34834445757250176 +956 32 training.batch_size 2.0 +956 32 training.label_smoothing 0.03992883434079636 +956 33 model.embedding_dim 0.0 +956 33 model.relation_dim 1.0 +956 33 model.dropout_0 0.30624763077801476 +956 33 model.dropout_1 0.31718923467423144 +956 33 model.dropout_2 0.16083839417870097 +956 33 training.batch_size 0.0 +956 33 training.label_smoothing 0.0015880819838652086 +956 34 model.embedding_dim 2.0 +956 34 model.relation_dim 0.0 +956 34 model.dropout_0 0.4354834781015562 +956 34 model.dropout_1 0.33654715518603506 +956 34 model.dropout_2 0.3937066536847106 +956 34 training.batch_size 1.0 +956 34 training.label_smoothing 0.41880951061056043 +956 35 model.embedding_dim 2.0 +956 35 model.relation_dim 1.0 +956 35 model.dropout_0 0.4278396493485227 +956 35 model.dropout_1 0.26811485739954444 +956 35 model.dropout_2 0.26700485509784144 +956 35 training.batch_size 0.0 +956 35 training.label_smoothing 0.06266889169789693 +956 36 model.embedding_dim 0.0 +956 36 model.relation_dim 1.0 +956 36 model.dropout_0 0.39357561180101264 +956 36 model.dropout_1 0.1994678282727862 +956 36 model.dropout_2 0.3814877316635146 +956 36 training.batch_size 0.0 +956 36 training.label_smoothing 0.11454934595533345 +956 37 model.embedding_dim 1.0 +956 37 model.relation_dim 0.0 +956 37 model.dropout_0 0.4954568017723985 +956 37 model.dropout_1 0.292432190986655 +956 37 model.dropout_2 0.12552041843690465 +956 37 training.batch_size 1.0 +956 37 training.label_smoothing 0.13251078201982205 +956 38 model.embedding_dim 1.0 +956 38 model.relation_dim 0.0 +956 38 model.dropout_0 0.36542590541224584 +956 38 model.dropout_1 0.1406491798357836 +956 38 model.dropout_2 0.22693012610155583 +956 38 training.batch_size 0.0 +956 38 training.label_smoothing 0.11743123994709724 +956 39 model.embedding_dim 1.0 +956 39 model.relation_dim 1.0 +956 39 model.dropout_0 0.29197345971358285 +956 39 model.dropout_1 0.18008461258470218 +956 39 model.dropout_2 0.20561536827343657 +956 39 training.batch_size 1.0 +956 39 training.label_smoothing 0.028627124468361385 +956 40 model.embedding_dim 1.0 +956 40 model.relation_dim 1.0 +956 40 model.dropout_0 0.3310914001443823 +956 40 model.dropout_1 0.1021535217207763 +956 40 model.dropout_2 0.30574843691112596 +956 40 training.batch_size 0.0 +956 40 training.label_smoothing 0.014132796485227889 +956 41 model.embedding_dim 1.0 +956 41 model.relation_dim 2.0 +956 41 model.dropout_0 0.49749320279015496 +956 41 model.dropout_1 0.37817886658893674 +956 41 model.dropout_2 0.4186437602781268 +956 41 training.batch_size 1.0 +956 41 training.label_smoothing 0.09033104073463577 +956 42 model.embedding_dim 0.0 +956 42 model.relation_dim 0.0 +956 42 model.dropout_0 0.26001150217025615 +956 42 model.dropout_1 0.41723070272932006 +956 42 model.dropout_2 0.2090070497571749 +956 42 training.batch_size 2.0 +956 42 training.label_smoothing 0.0014517444353384392 +956 43 model.embedding_dim 1.0 +956 43 model.relation_dim 1.0 +956 43 model.dropout_0 0.10537493522523117 +956 43 model.dropout_1 0.33017528521754813 +956 43 model.dropout_2 0.29044299599477524 +956 43 training.batch_size 0.0 +956 43 training.label_smoothing 0.30762739701380815 +956 44 model.embedding_dim 2.0 +956 44 model.relation_dim 2.0 +956 44 model.dropout_0 0.2827713184699213 +956 44 model.dropout_1 0.28434963785836836 +956 44 model.dropout_2 0.4004498230123843 +956 44 training.batch_size 2.0 +956 44 training.label_smoothing 0.03784153615575845 +956 45 model.embedding_dim 0.0 +956 45 model.relation_dim 1.0 +956 45 model.dropout_0 0.16223194618010905 +956 45 model.dropout_1 0.2970216657140011 +956 45 model.dropout_2 0.23891939795758757 +956 45 training.batch_size 2.0 +956 45 training.label_smoothing 0.39610639231814054 +956 46 model.embedding_dim 1.0 +956 46 model.relation_dim 2.0 +956 46 model.dropout_0 0.1495665316119129 +956 46 model.dropout_1 0.11666448626815731 +956 46 model.dropout_2 0.3172144225889345 +956 46 training.batch_size 2.0 +956 46 training.label_smoothing 0.36950181295546375 +956 47 model.embedding_dim 0.0 +956 47 model.relation_dim 0.0 +956 47 model.dropout_0 0.1499387556456871 +956 47 model.dropout_1 0.49455202166501266 +956 47 model.dropout_2 0.3453751595908835 +956 47 training.batch_size 1.0 +956 47 training.label_smoothing 0.04502303736287621 +956 48 model.embedding_dim 0.0 +956 48 model.relation_dim 0.0 +956 48 model.dropout_0 0.24910663099984787 +956 48 model.dropout_1 0.21371024501917144 +956 48 model.dropout_2 0.3264848620762343 +956 48 training.batch_size 1.0 +956 48 training.label_smoothing 0.09446807154999116 +956 49 model.embedding_dim 2.0 +956 49 model.relation_dim 0.0 +956 49 model.dropout_0 0.34560303317579544 +956 49 model.dropout_1 0.1826554016596644 +956 49 model.dropout_2 0.15701387157877855 +956 49 training.batch_size 0.0 +956 49 training.label_smoothing 0.10945119777361972 +956 50 model.embedding_dim 1.0 +956 50 model.relation_dim 2.0 +956 50 model.dropout_0 0.30158705454382473 +956 50 model.dropout_1 0.1610612759776412 +956 50 model.dropout_2 0.25539629581201173 +956 50 training.batch_size 1.0 +956 50 training.label_smoothing 0.038309659155209244 +956 51 model.embedding_dim 2.0 +956 51 model.relation_dim 2.0 +956 51 model.dropout_0 0.3153464645495392 +956 51 model.dropout_1 0.4349873054599013 +956 51 model.dropout_2 0.1953007297258023 +956 51 training.batch_size 0.0 +956 51 training.label_smoothing 0.33675036664438035 +956 52 model.embedding_dim 2.0 +956 52 model.relation_dim 2.0 +956 52 model.dropout_0 0.45635053009874227 +956 52 model.dropout_1 0.14704424592368914 +956 52 model.dropout_2 0.4240797907340246 +956 52 training.batch_size 0.0 +956 52 training.label_smoothing 0.9018066613641043 +956 53 model.embedding_dim 2.0 +956 53 model.relation_dim 0.0 +956 53 model.dropout_0 0.22972122102926248 +956 53 model.dropout_1 0.47552753467303666 +956 53 model.dropout_2 0.48646163874329285 +956 53 training.batch_size 0.0 +956 53 training.label_smoothing 0.002753610065671549 +956 54 model.embedding_dim 1.0 +956 54 model.relation_dim 1.0 +956 54 model.dropout_0 0.3238544535235791 +956 54 model.dropout_1 0.254232323141398 +956 54 model.dropout_2 0.38676054800034676 +956 54 training.batch_size 0.0 +956 54 training.label_smoothing 0.007833500069234502 +956 55 model.embedding_dim 0.0 +956 55 model.relation_dim 0.0 +956 55 model.dropout_0 0.47941997670465797 +956 55 model.dropout_1 0.22346389557785457 +956 55 model.dropout_2 0.4578861442234522 +956 55 training.batch_size 1.0 +956 55 training.label_smoothing 0.002007316039007717 +956 56 model.embedding_dim 1.0 +956 56 model.relation_dim 0.0 +956 56 model.dropout_0 0.49699149659652797 +956 56 model.dropout_1 0.2487300900714125 +956 56 model.dropout_2 0.2990025212308538 +956 56 training.batch_size 1.0 +956 56 training.label_smoothing 0.012493506150332838 +956 57 model.embedding_dim 0.0 +956 57 model.relation_dim 2.0 +956 57 model.dropout_0 0.1819558341280216 +956 57 model.dropout_1 0.4129984919525109 +956 57 model.dropout_2 0.29773308871545856 +956 57 training.batch_size 1.0 +956 57 training.label_smoothing 0.6616060265067031 +956 58 model.embedding_dim 2.0 +956 58 model.relation_dim 2.0 +956 58 model.dropout_0 0.2120007580569595 +956 58 model.dropout_1 0.30008720100863506 +956 58 model.dropout_2 0.3066927079435265 +956 58 training.batch_size 2.0 +956 58 training.label_smoothing 0.0017842077214250318 +956 59 model.embedding_dim 2.0 +956 59 model.relation_dim 1.0 +956 59 model.dropout_0 0.3105396926633853 +956 59 model.dropout_1 0.18948904179796766 +956 59 model.dropout_2 0.12385721967250132 +956 59 training.batch_size 1.0 +956 59 training.label_smoothing 0.27029852958369216 +956 60 model.embedding_dim 1.0 +956 60 model.relation_dim 0.0 +956 60 model.dropout_0 0.37768686641877236 +956 60 model.dropout_1 0.27392011204225175 +956 60 model.dropout_2 0.16027587933621637 +956 60 training.batch_size 1.0 +956 60 training.label_smoothing 0.06469780239863081 +956 61 model.embedding_dim 0.0 +956 61 model.relation_dim 1.0 +956 61 model.dropout_0 0.30291168332268614 +956 61 model.dropout_1 0.41650226440662763 +956 61 model.dropout_2 0.12438484252395382 +956 61 training.batch_size 0.0 +956 61 training.label_smoothing 0.01776669245161556 +956 62 model.embedding_dim 0.0 +956 62 model.relation_dim 1.0 +956 62 model.dropout_0 0.10287190532365087 +956 62 model.dropout_1 0.13592023238786025 +956 62 model.dropout_2 0.19511414412335829 +956 62 training.batch_size 1.0 +956 62 training.label_smoothing 0.026746047713707553 +956 63 model.embedding_dim 0.0 +956 63 model.relation_dim 2.0 +956 63 model.dropout_0 0.17053739710712745 +956 63 model.dropout_1 0.2155432540152562 +956 63 model.dropout_2 0.4603224982320161 +956 63 training.batch_size 2.0 +956 63 training.label_smoothing 0.06203467748222655 +956 64 model.embedding_dim 2.0 +956 64 model.relation_dim 0.0 +956 64 model.dropout_0 0.34111161783175176 +956 64 model.dropout_1 0.3523182241503017 +956 64 model.dropout_2 0.11394875444440507 +956 64 training.batch_size 1.0 +956 64 training.label_smoothing 0.014215096984084796 +956 65 model.embedding_dim 0.0 +956 65 model.relation_dim 1.0 +956 65 model.dropout_0 0.35211225871301766 +956 65 model.dropout_1 0.16366397431142493 +956 65 model.dropout_2 0.3806833478706021 +956 65 training.batch_size 0.0 +956 65 training.label_smoothing 0.051864320237294106 +956 66 model.embedding_dim 0.0 +956 66 model.relation_dim 0.0 +956 66 model.dropout_0 0.19239554789380284 +956 66 model.dropout_1 0.4646741961023181 +956 66 model.dropout_2 0.256320300293234 +956 66 training.batch_size 0.0 +956 66 training.label_smoothing 0.0600105905961532 +956 67 model.embedding_dim 2.0 +956 67 model.relation_dim 2.0 +956 67 model.dropout_0 0.39920238613604053 +956 67 model.dropout_1 0.2535057059386894 +956 67 model.dropout_2 0.1994322691513872 +956 67 training.batch_size 2.0 +956 67 training.label_smoothing 0.155343971873148 +956 68 model.embedding_dim 2.0 +956 68 model.relation_dim 2.0 +956 68 model.dropout_0 0.23439234921644747 +956 68 model.dropout_1 0.18522386553627845 +956 68 model.dropout_2 0.18830312561534732 +956 68 training.batch_size 0.0 +956 68 training.label_smoothing 0.7599223351688676 +956 69 model.embedding_dim 1.0 +956 69 model.relation_dim 1.0 +956 69 model.dropout_0 0.14990712552917912 +956 69 model.dropout_1 0.23013944630962424 +956 69 model.dropout_2 0.31863090920257126 +956 69 training.batch_size 2.0 +956 69 training.label_smoothing 0.1417752910278467 +956 70 model.embedding_dim 2.0 +956 70 model.relation_dim 1.0 +956 70 model.dropout_0 0.4011842368675542 +956 70 model.dropout_1 0.24049433098870346 +956 70 model.dropout_2 0.13795026404364577 +956 70 training.batch_size 0.0 +956 70 training.label_smoothing 0.10102499614322119 +956 71 model.embedding_dim 1.0 +956 71 model.relation_dim 2.0 +956 71 model.dropout_0 0.1064204642260469 +956 71 model.dropout_1 0.2329535547730265 +956 71 model.dropout_2 0.1017171076036779 +956 71 training.batch_size 1.0 +956 71 training.label_smoothing 0.005054766335631494 +956 72 model.embedding_dim 0.0 +956 72 model.relation_dim 1.0 +956 72 model.dropout_0 0.48354571683445047 +956 72 model.dropout_1 0.31586764105750853 +956 72 model.dropout_2 0.49898694269295973 +956 72 training.batch_size 1.0 +956 72 training.label_smoothing 0.00470865827847835 +956 73 model.embedding_dim 2.0 +956 73 model.relation_dim 2.0 +956 73 model.dropout_0 0.3747839241247961 +956 73 model.dropout_1 0.4214072050009744 +956 73 model.dropout_2 0.37667035868480137 +956 73 training.batch_size 0.0 +956 73 training.label_smoothing 0.0058217413044657045 +956 74 model.embedding_dim 0.0 +956 74 model.relation_dim 1.0 +956 74 model.dropout_0 0.4218403776280467 +956 74 model.dropout_1 0.4478537702012493 +956 74 model.dropout_2 0.1677176429222266 +956 74 training.batch_size 1.0 +956 74 training.label_smoothing 0.07628774854060581 +956 75 model.embedding_dim 0.0 +956 75 model.relation_dim 0.0 +956 75 model.dropout_0 0.17968145338153094 +956 75 model.dropout_1 0.2816704337647947 +956 75 model.dropout_2 0.11959141323405063 +956 75 training.batch_size 2.0 +956 75 training.label_smoothing 0.14519417967251733 +956 76 model.embedding_dim 2.0 +956 76 model.relation_dim 0.0 +956 76 model.dropout_0 0.310322914372986 +956 76 model.dropout_1 0.4011957617109262 +956 76 model.dropout_2 0.2966211554559557 +956 76 training.batch_size 1.0 +956 76 training.label_smoothing 0.024431940468961508 +956 77 model.embedding_dim 1.0 +956 77 model.relation_dim 2.0 +956 77 model.dropout_0 0.2904038608619076 +956 77 model.dropout_1 0.30538333642330306 +956 77 model.dropout_2 0.20434317915436606 +956 77 training.batch_size 0.0 +956 77 training.label_smoothing 0.34462657429215116 +956 78 model.embedding_dim 0.0 +956 78 model.relation_dim 0.0 +956 78 model.dropout_0 0.25812598076060755 +956 78 model.dropout_1 0.44524422528478336 +956 78 model.dropout_2 0.429162764385447 +956 78 training.batch_size 1.0 +956 78 training.label_smoothing 0.3972880770544418 +956 79 model.embedding_dim 2.0 +956 79 model.relation_dim 1.0 +956 79 model.dropout_0 0.1277221378728827 +956 79 model.dropout_1 0.10579696583687458 +956 79 model.dropout_2 0.41857489154726213 +956 79 training.batch_size 0.0 +956 79 training.label_smoothing 0.019691638280108455 +956 80 model.embedding_dim 0.0 +956 80 model.relation_dim 0.0 +956 80 model.dropout_0 0.23667400525054091 +956 80 model.dropout_1 0.22652220153445857 +956 80 model.dropout_2 0.32365602001063853 +956 80 training.batch_size 1.0 +956 80 training.label_smoothing 0.0023550161280947875 +956 81 model.embedding_dim 1.0 +956 81 model.relation_dim 1.0 +956 81 model.dropout_0 0.3177431806592883 +956 81 model.dropout_1 0.37374615540315015 +956 81 model.dropout_2 0.3829289327681702 +956 81 training.batch_size 1.0 +956 81 training.label_smoothing 0.0026642235124568533 +956 82 model.embedding_dim 2.0 +956 82 model.relation_dim 1.0 +956 82 model.dropout_0 0.22012582892922394 +956 82 model.dropout_1 0.1844341405004839 +956 82 model.dropout_2 0.2217902118397566 +956 82 training.batch_size 1.0 +956 82 training.label_smoothing 0.14744459857406408 +956 83 model.embedding_dim 2.0 +956 83 model.relation_dim 2.0 +956 83 model.dropout_0 0.1489969711181979 +956 83 model.dropout_1 0.3105604076272088 +956 83 model.dropout_2 0.10119042299641651 +956 83 training.batch_size 2.0 +956 83 training.label_smoothing 0.0029105668260281897 +956 84 model.embedding_dim 2.0 +956 84 model.relation_dim 0.0 +956 84 model.dropout_0 0.14380599252958587 +956 84 model.dropout_1 0.3156470163007862 +956 84 model.dropout_2 0.30121894295758134 +956 84 training.batch_size 1.0 +956 84 training.label_smoothing 0.41301920358000793 +956 85 model.embedding_dim 2.0 +956 85 model.relation_dim 1.0 +956 85 model.dropout_0 0.31471890912435874 +956 85 model.dropout_1 0.15365620063099353 +956 85 model.dropout_2 0.440358288383386 +956 85 training.batch_size 1.0 +956 85 training.label_smoothing 0.055857791764241334 +956 86 model.embedding_dim 0.0 +956 86 model.relation_dim 1.0 +956 86 model.dropout_0 0.331665901097847 +956 86 model.dropout_1 0.44830496763348293 +956 86 model.dropout_2 0.4334740858261592 +956 86 training.batch_size 2.0 +956 86 training.label_smoothing 0.0022127399100384587 +956 87 model.embedding_dim 2.0 +956 87 model.relation_dim 2.0 +956 87 model.dropout_0 0.12677247781885767 +956 87 model.dropout_1 0.3981892713329451 +956 87 model.dropout_2 0.3170036164893759 +956 87 training.batch_size 2.0 +956 87 training.label_smoothing 0.7178859872773866 +956 88 model.embedding_dim 2.0 +956 88 model.relation_dim 0.0 +956 88 model.dropout_0 0.34807379508097813 +956 88 model.dropout_1 0.46817286964501825 +956 88 model.dropout_2 0.3566130776669786 +956 88 training.batch_size 0.0 +956 88 training.label_smoothing 0.003113759095732994 +956 89 model.embedding_dim 2.0 +956 89 model.relation_dim 0.0 +956 89 model.dropout_0 0.34227496444071337 +956 89 model.dropout_1 0.1292025330315382 +956 89 model.dropout_2 0.11534374504619893 +956 89 training.batch_size 2.0 +956 89 training.label_smoothing 0.04297702661811163 +956 90 model.embedding_dim 2.0 +956 90 model.relation_dim 0.0 +956 90 model.dropout_0 0.44771908129740257 +956 90 model.dropout_1 0.4512314293256525 +956 90 model.dropout_2 0.31021364065826773 +956 90 training.batch_size 2.0 +956 90 training.label_smoothing 0.0021479299050261116 +956 91 model.embedding_dim 1.0 +956 91 model.relation_dim 2.0 +956 91 model.dropout_0 0.171463505748168 +956 91 model.dropout_1 0.1466996420704474 +956 91 model.dropout_2 0.2023733409060501 +956 91 training.batch_size 0.0 +956 91 training.label_smoothing 0.017197688050715452 +956 92 model.embedding_dim 1.0 +956 92 model.relation_dim 2.0 +956 92 model.dropout_0 0.3511210021865917 +956 92 model.dropout_1 0.1565725167852095 +956 92 model.dropout_2 0.1696806339267825 +956 92 training.batch_size 2.0 +956 92 training.label_smoothing 0.01561026332591235 +956 93 model.embedding_dim 0.0 +956 93 model.relation_dim 2.0 +956 93 model.dropout_0 0.312505904401703 +956 93 model.dropout_1 0.19290039946796966 +956 93 model.dropout_2 0.2704634177124273 +956 93 training.batch_size 2.0 +956 93 training.label_smoothing 0.0017598038414895843 +956 94 model.embedding_dim 0.0 +956 94 model.relation_dim 0.0 +956 94 model.dropout_0 0.49040212574352104 +956 94 model.dropout_1 0.20713866584502152 +956 94 model.dropout_2 0.29152828481499454 +956 94 training.batch_size 1.0 +956 94 training.label_smoothing 0.004859970971662787 +956 95 model.embedding_dim 1.0 +956 95 model.relation_dim 0.0 +956 95 model.dropout_0 0.4001450491772951 +956 95 model.dropout_1 0.34914391437871684 +956 95 model.dropout_2 0.4088569168126437 +956 95 training.batch_size 0.0 +956 95 training.label_smoothing 0.13167864339768853 +956 96 model.embedding_dim 1.0 +956 96 model.relation_dim 0.0 +956 96 model.dropout_0 0.2586288662284331 +956 96 model.dropout_1 0.24498500443398663 +956 96 model.dropout_2 0.18659571841985084 +956 96 training.batch_size 2.0 +956 96 training.label_smoothing 0.032658952207358274 +956 97 model.embedding_dim 1.0 +956 97 model.relation_dim 0.0 +956 97 model.dropout_0 0.21523419893550455 +956 97 model.dropout_1 0.3697970014116938 +956 97 model.dropout_2 0.3369368382139238 +956 97 training.batch_size 0.0 +956 97 training.label_smoothing 0.004668638714173987 +956 98 model.embedding_dim 0.0 +956 98 model.relation_dim 1.0 +956 98 model.dropout_0 0.41822421459217496 +956 98 model.dropout_1 0.3698168830297681 +956 98 model.dropout_2 0.49294240042624626 +956 98 training.batch_size 0.0 +956 98 training.label_smoothing 0.08765206878049857 +956 99 model.embedding_dim 1.0 +956 99 model.relation_dim 2.0 +956 99 model.dropout_0 0.2034901229400577 +956 99 model.dropout_1 0.45498394292652033 +956 99 model.dropout_2 0.4450167403192361 +956 99 training.batch_size 0.0 +956 99 training.label_smoothing 0.009228419806516352 +956 100 model.embedding_dim 0.0 +956 100 model.relation_dim 2.0 +956 100 model.dropout_0 0.19196739478206049 +956 100 model.dropout_1 0.19740270059990667 +956 100 model.dropout_2 0.3101886501443255 +956 100 training.batch_size 1.0 +956 100 training.label_smoothing 0.20088460288883836 +956 1 dataset """kinships""" +956 1 model """tucker""" +956 1 loss """softplus""" +956 1 regularizer """no""" +956 1 optimizer """adadelta""" +956 1 training_loop """lcwa""" +956 1 evaluator """rankbased""" +956 2 dataset """kinships""" +956 2 model """tucker""" +956 2 loss """softplus""" +956 2 regularizer """no""" +956 2 optimizer """adadelta""" +956 2 training_loop """lcwa""" +956 2 evaluator """rankbased""" +956 3 dataset """kinships""" +956 3 model """tucker""" +956 3 loss """softplus""" +956 3 regularizer """no""" +956 3 optimizer """adadelta""" +956 3 training_loop """lcwa""" +956 3 evaluator """rankbased""" +956 4 dataset """kinships""" +956 4 model """tucker""" +956 4 loss """softplus""" +956 4 regularizer """no""" +956 4 optimizer """adadelta""" +956 4 training_loop """lcwa""" +956 4 evaluator """rankbased""" +956 5 dataset """kinships""" +956 5 model """tucker""" +956 5 loss """softplus""" +956 5 regularizer """no""" +956 5 optimizer """adadelta""" +956 5 training_loop """lcwa""" +956 5 evaluator """rankbased""" +956 6 dataset """kinships""" +956 6 model """tucker""" +956 6 loss """softplus""" +956 6 regularizer """no""" +956 6 optimizer """adadelta""" +956 6 training_loop """lcwa""" +956 6 evaluator """rankbased""" +956 7 dataset """kinships""" +956 7 model """tucker""" +956 7 loss """softplus""" +956 7 regularizer """no""" +956 7 optimizer """adadelta""" +956 7 training_loop """lcwa""" +956 7 evaluator """rankbased""" +956 8 dataset """kinships""" +956 8 model """tucker""" +956 8 loss """softplus""" +956 8 regularizer """no""" +956 8 optimizer """adadelta""" +956 8 training_loop """lcwa""" +956 8 evaluator """rankbased""" +956 9 dataset """kinships""" +956 9 model """tucker""" +956 9 loss """softplus""" +956 9 regularizer """no""" +956 9 optimizer """adadelta""" +956 9 training_loop """lcwa""" +956 9 evaluator """rankbased""" +956 10 dataset """kinships""" +956 10 model """tucker""" +956 10 loss """softplus""" +956 10 regularizer """no""" +956 10 optimizer """adadelta""" +956 10 training_loop """lcwa""" +956 10 evaluator """rankbased""" +956 11 dataset """kinships""" +956 11 model """tucker""" +956 11 loss """softplus""" +956 11 regularizer """no""" +956 11 optimizer """adadelta""" +956 11 training_loop """lcwa""" +956 11 evaluator """rankbased""" +956 12 dataset """kinships""" +956 12 model """tucker""" +956 12 loss """softplus""" +956 12 regularizer """no""" +956 12 optimizer """adadelta""" +956 12 training_loop """lcwa""" +956 12 evaluator """rankbased""" +956 13 dataset """kinships""" +956 13 model """tucker""" +956 13 loss """softplus""" +956 13 regularizer """no""" +956 13 optimizer """adadelta""" +956 13 training_loop """lcwa""" +956 13 evaluator """rankbased""" +956 14 dataset """kinships""" +956 14 model """tucker""" +956 14 loss """softplus""" +956 14 regularizer """no""" +956 14 optimizer """adadelta""" +956 14 training_loop """lcwa""" +956 14 evaluator """rankbased""" +956 15 dataset """kinships""" +956 15 model """tucker""" +956 15 loss """softplus""" +956 15 regularizer """no""" +956 15 optimizer """adadelta""" +956 15 training_loop """lcwa""" +956 15 evaluator """rankbased""" +956 16 dataset """kinships""" +956 16 model """tucker""" +956 16 loss """softplus""" +956 16 regularizer """no""" +956 16 optimizer """adadelta""" +956 16 training_loop """lcwa""" +956 16 evaluator """rankbased""" +956 17 dataset """kinships""" +956 17 model """tucker""" +956 17 loss """softplus""" +956 17 regularizer """no""" +956 17 optimizer """adadelta""" +956 17 training_loop """lcwa""" +956 17 evaluator """rankbased""" +956 18 dataset """kinships""" +956 18 model """tucker""" +956 18 loss """softplus""" +956 18 regularizer """no""" +956 18 optimizer """adadelta""" +956 18 training_loop """lcwa""" +956 18 evaluator """rankbased""" +956 19 dataset """kinships""" +956 19 model """tucker""" +956 19 loss """softplus""" +956 19 regularizer """no""" +956 19 optimizer """adadelta""" +956 19 training_loop """lcwa""" +956 19 evaluator """rankbased""" +956 20 dataset """kinships""" +956 20 model """tucker""" +956 20 loss """softplus""" +956 20 regularizer """no""" +956 20 optimizer """adadelta""" +956 20 training_loop """lcwa""" +956 20 evaluator """rankbased""" +956 21 dataset """kinships""" +956 21 model """tucker""" +956 21 loss """softplus""" +956 21 regularizer """no""" +956 21 optimizer """adadelta""" +956 21 training_loop """lcwa""" +956 21 evaluator """rankbased""" +956 22 dataset """kinships""" +956 22 model """tucker""" +956 22 loss """softplus""" +956 22 regularizer """no""" +956 22 optimizer """adadelta""" +956 22 training_loop """lcwa""" +956 22 evaluator """rankbased""" +956 23 dataset """kinships""" +956 23 model """tucker""" +956 23 loss """softplus""" +956 23 regularizer """no""" +956 23 optimizer """adadelta""" +956 23 training_loop """lcwa""" +956 23 evaluator """rankbased""" +956 24 dataset """kinships""" +956 24 model """tucker""" +956 24 loss """softplus""" +956 24 regularizer """no""" +956 24 optimizer """adadelta""" +956 24 training_loop """lcwa""" +956 24 evaluator """rankbased""" +956 25 dataset """kinships""" +956 25 model """tucker""" +956 25 loss """softplus""" +956 25 regularizer """no""" +956 25 optimizer """adadelta""" +956 25 training_loop """lcwa""" +956 25 evaluator """rankbased""" +956 26 dataset """kinships""" +956 26 model """tucker""" +956 26 loss """softplus""" +956 26 regularizer """no""" +956 26 optimizer """adadelta""" +956 26 training_loop """lcwa""" +956 26 evaluator """rankbased""" +956 27 dataset """kinships""" +956 27 model """tucker""" +956 27 loss """softplus""" +956 27 regularizer """no""" +956 27 optimizer """adadelta""" +956 27 training_loop """lcwa""" +956 27 evaluator """rankbased""" +956 28 dataset """kinships""" +956 28 model """tucker""" +956 28 loss """softplus""" +956 28 regularizer """no""" +956 28 optimizer """adadelta""" +956 28 training_loop """lcwa""" +956 28 evaluator """rankbased""" +956 29 dataset """kinships""" +956 29 model """tucker""" +956 29 loss """softplus""" +956 29 regularizer """no""" +956 29 optimizer """adadelta""" +956 29 training_loop """lcwa""" +956 29 evaluator """rankbased""" +956 30 dataset """kinships""" +956 30 model """tucker""" +956 30 loss """softplus""" +956 30 regularizer """no""" +956 30 optimizer """adadelta""" +956 30 training_loop """lcwa""" +956 30 evaluator """rankbased""" +956 31 dataset """kinships""" +956 31 model """tucker""" +956 31 loss """softplus""" +956 31 regularizer """no""" +956 31 optimizer """adadelta""" +956 31 training_loop """lcwa""" +956 31 evaluator """rankbased""" +956 32 dataset """kinships""" +956 32 model """tucker""" +956 32 loss """softplus""" +956 32 regularizer """no""" +956 32 optimizer """adadelta""" +956 32 training_loop """lcwa""" +956 32 evaluator """rankbased""" +956 33 dataset """kinships""" +956 33 model """tucker""" +956 33 loss """softplus""" +956 33 regularizer """no""" +956 33 optimizer """adadelta""" +956 33 training_loop """lcwa""" +956 33 evaluator """rankbased""" +956 34 dataset """kinships""" +956 34 model """tucker""" +956 34 loss """softplus""" +956 34 regularizer """no""" +956 34 optimizer """adadelta""" +956 34 training_loop """lcwa""" +956 34 evaluator """rankbased""" +956 35 dataset """kinships""" +956 35 model """tucker""" +956 35 loss """softplus""" +956 35 regularizer """no""" +956 35 optimizer """adadelta""" +956 35 training_loop """lcwa""" +956 35 evaluator """rankbased""" +956 36 dataset """kinships""" +956 36 model """tucker""" +956 36 loss """softplus""" +956 36 regularizer """no""" +956 36 optimizer """adadelta""" +956 36 training_loop """lcwa""" +956 36 evaluator """rankbased""" +956 37 dataset """kinships""" +956 37 model """tucker""" +956 37 loss """softplus""" +956 37 regularizer """no""" +956 37 optimizer """adadelta""" +956 37 training_loop """lcwa""" +956 37 evaluator """rankbased""" +956 38 dataset """kinships""" +956 38 model """tucker""" +956 38 loss """softplus""" +956 38 regularizer """no""" +956 38 optimizer """adadelta""" +956 38 training_loop """lcwa""" +956 38 evaluator """rankbased""" +956 39 dataset """kinships""" +956 39 model """tucker""" +956 39 loss """softplus""" +956 39 regularizer """no""" +956 39 optimizer """adadelta""" +956 39 training_loop """lcwa""" +956 39 evaluator """rankbased""" +956 40 dataset """kinships""" +956 40 model """tucker""" +956 40 loss """softplus""" +956 40 regularizer """no""" +956 40 optimizer """adadelta""" +956 40 training_loop """lcwa""" +956 40 evaluator """rankbased""" +956 41 dataset """kinships""" +956 41 model """tucker""" +956 41 loss """softplus""" +956 41 regularizer """no""" +956 41 optimizer """adadelta""" +956 41 training_loop """lcwa""" +956 41 evaluator """rankbased""" +956 42 dataset """kinships""" +956 42 model """tucker""" +956 42 loss """softplus""" +956 42 regularizer """no""" +956 42 optimizer """adadelta""" +956 42 training_loop """lcwa""" +956 42 evaluator """rankbased""" +956 43 dataset """kinships""" +956 43 model """tucker""" +956 43 loss """softplus""" +956 43 regularizer """no""" +956 43 optimizer """adadelta""" +956 43 training_loop """lcwa""" +956 43 evaluator """rankbased""" +956 44 dataset """kinships""" +956 44 model """tucker""" +956 44 loss """softplus""" +956 44 regularizer """no""" +956 44 optimizer """adadelta""" +956 44 training_loop """lcwa""" +956 44 evaluator """rankbased""" +956 45 dataset """kinships""" +956 45 model """tucker""" +956 45 loss """softplus""" +956 45 regularizer """no""" +956 45 optimizer """adadelta""" +956 45 training_loop """lcwa""" +956 45 evaluator """rankbased""" +956 46 dataset """kinships""" +956 46 model """tucker""" +956 46 loss """softplus""" +956 46 regularizer """no""" +956 46 optimizer """adadelta""" +956 46 training_loop """lcwa""" +956 46 evaluator """rankbased""" +956 47 dataset """kinships""" +956 47 model """tucker""" +956 47 loss """softplus""" +956 47 regularizer """no""" +956 47 optimizer """adadelta""" +956 47 training_loop """lcwa""" +956 47 evaluator """rankbased""" +956 48 dataset """kinships""" +956 48 model """tucker""" +956 48 loss """softplus""" +956 48 regularizer """no""" +956 48 optimizer """adadelta""" +956 48 training_loop """lcwa""" +956 48 evaluator """rankbased""" +956 49 dataset """kinships""" +956 49 model """tucker""" +956 49 loss """softplus""" +956 49 regularizer """no""" +956 49 optimizer """adadelta""" +956 49 training_loop """lcwa""" +956 49 evaluator """rankbased""" +956 50 dataset """kinships""" +956 50 model """tucker""" +956 50 loss """softplus""" +956 50 regularizer """no""" +956 50 optimizer """adadelta""" +956 50 training_loop """lcwa""" +956 50 evaluator """rankbased""" +956 51 dataset """kinships""" +956 51 model """tucker""" +956 51 loss """softplus""" +956 51 regularizer """no""" +956 51 optimizer """adadelta""" +956 51 training_loop """lcwa""" +956 51 evaluator """rankbased""" +956 52 dataset """kinships""" +956 52 model """tucker""" +956 52 loss """softplus""" +956 52 regularizer """no""" +956 52 optimizer """adadelta""" +956 52 training_loop """lcwa""" +956 52 evaluator """rankbased""" +956 53 dataset """kinships""" +956 53 model """tucker""" +956 53 loss """softplus""" +956 53 regularizer """no""" +956 53 optimizer """adadelta""" +956 53 training_loop """lcwa""" +956 53 evaluator """rankbased""" +956 54 dataset """kinships""" +956 54 model """tucker""" +956 54 loss """softplus""" +956 54 regularizer """no""" +956 54 optimizer """adadelta""" +956 54 training_loop """lcwa""" +956 54 evaluator """rankbased""" +956 55 dataset """kinships""" +956 55 model """tucker""" +956 55 loss """softplus""" +956 55 regularizer """no""" +956 55 optimizer """adadelta""" +956 55 training_loop """lcwa""" +956 55 evaluator """rankbased""" +956 56 dataset """kinships""" +956 56 model """tucker""" +956 56 loss """softplus""" +956 56 regularizer """no""" +956 56 optimizer """adadelta""" +956 56 training_loop """lcwa""" +956 56 evaluator """rankbased""" +956 57 dataset """kinships""" +956 57 model """tucker""" +956 57 loss """softplus""" +956 57 regularizer """no""" +956 57 optimizer """adadelta""" +956 57 training_loop """lcwa""" +956 57 evaluator """rankbased""" +956 58 dataset """kinships""" +956 58 model """tucker""" +956 58 loss """softplus""" +956 58 regularizer """no""" +956 58 optimizer """adadelta""" +956 58 training_loop """lcwa""" +956 58 evaluator """rankbased""" +956 59 dataset """kinships""" +956 59 model """tucker""" +956 59 loss """softplus""" +956 59 regularizer """no""" +956 59 optimizer """adadelta""" +956 59 training_loop """lcwa""" +956 59 evaluator """rankbased""" +956 60 dataset """kinships""" +956 60 model """tucker""" +956 60 loss """softplus""" +956 60 regularizer """no""" +956 60 optimizer """adadelta""" +956 60 training_loop """lcwa""" +956 60 evaluator """rankbased""" +956 61 dataset """kinships""" +956 61 model """tucker""" +956 61 loss """softplus""" +956 61 regularizer """no""" +956 61 optimizer """adadelta""" +956 61 training_loop """lcwa""" +956 61 evaluator """rankbased""" +956 62 dataset """kinships""" +956 62 model """tucker""" +956 62 loss """softplus""" +956 62 regularizer """no""" +956 62 optimizer """adadelta""" +956 62 training_loop """lcwa""" +956 62 evaluator """rankbased""" +956 63 dataset """kinships""" +956 63 model """tucker""" +956 63 loss """softplus""" +956 63 regularizer """no""" +956 63 optimizer """adadelta""" +956 63 training_loop """lcwa""" +956 63 evaluator """rankbased""" +956 64 dataset """kinships""" +956 64 model """tucker""" +956 64 loss """softplus""" +956 64 regularizer """no""" +956 64 optimizer """adadelta""" +956 64 training_loop """lcwa""" +956 64 evaluator """rankbased""" +956 65 dataset """kinships""" +956 65 model """tucker""" +956 65 loss """softplus""" +956 65 regularizer """no""" +956 65 optimizer """adadelta""" +956 65 training_loop """lcwa""" +956 65 evaluator """rankbased""" +956 66 dataset """kinships""" +956 66 model """tucker""" +956 66 loss """softplus""" +956 66 regularizer """no""" +956 66 optimizer """adadelta""" +956 66 training_loop """lcwa""" +956 66 evaluator """rankbased""" +956 67 dataset """kinships""" +956 67 model """tucker""" +956 67 loss """softplus""" +956 67 regularizer """no""" +956 67 optimizer """adadelta""" +956 67 training_loop """lcwa""" +956 67 evaluator """rankbased""" +956 68 dataset """kinships""" +956 68 model """tucker""" +956 68 loss """softplus""" +956 68 regularizer """no""" +956 68 optimizer """adadelta""" +956 68 training_loop """lcwa""" +956 68 evaluator """rankbased""" +956 69 dataset """kinships""" +956 69 model """tucker""" +956 69 loss """softplus""" +956 69 regularizer """no""" +956 69 optimizer """adadelta""" +956 69 training_loop """lcwa""" +956 69 evaluator """rankbased""" +956 70 dataset """kinships""" +956 70 model """tucker""" +956 70 loss """softplus""" +956 70 regularizer """no""" +956 70 optimizer """adadelta""" +956 70 training_loop """lcwa""" +956 70 evaluator """rankbased""" +956 71 dataset """kinships""" +956 71 model """tucker""" +956 71 loss """softplus""" +956 71 regularizer """no""" +956 71 optimizer """adadelta""" +956 71 training_loop """lcwa""" +956 71 evaluator """rankbased""" +956 72 dataset """kinships""" +956 72 model """tucker""" +956 72 loss """softplus""" +956 72 regularizer """no""" +956 72 optimizer """adadelta""" +956 72 training_loop """lcwa""" +956 72 evaluator """rankbased""" +956 73 dataset """kinships""" +956 73 model """tucker""" +956 73 loss """softplus""" +956 73 regularizer """no""" +956 73 optimizer """adadelta""" +956 73 training_loop """lcwa""" +956 73 evaluator """rankbased""" +956 74 dataset """kinships""" +956 74 model """tucker""" +956 74 loss """softplus""" +956 74 regularizer """no""" +956 74 optimizer """adadelta""" +956 74 training_loop """lcwa""" +956 74 evaluator """rankbased""" +956 75 dataset """kinships""" +956 75 model """tucker""" +956 75 loss """softplus""" +956 75 regularizer """no""" +956 75 optimizer """adadelta""" +956 75 training_loop """lcwa""" +956 75 evaluator """rankbased""" +956 76 dataset """kinships""" +956 76 model """tucker""" +956 76 loss """softplus""" +956 76 regularizer """no""" +956 76 optimizer """adadelta""" +956 76 training_loop """lcwa""" +956 76 evaluator """rankbased""" +956 77 dataset """kinships""" +956 77 model """tucker""" +956 77 loss """softplus""" +956 77 regularizer """no""" +956 77 optimizer """adadelta""" +956 77 training_loop """lcwa""" +956 77 evaluator """rankbased""" +956 78 dataset """kinships""" +956 78 model """tucker""" +956 78 loss """softplus""" +956 78 regularizer """no""" +956 78 optimizer """adadelta""" +956 78 training_loop """lcwa""" +956 78 evaluator """rankbased""" +956 79 dataset """kinships""" +956 79 model """tucker""" +956 79 loss """softplus""" +956 79 regularizer """no""" +956 79 optimizer """adadelta""" +956 79 training_loop """lcwa""" +956 79 evaluator """rankbased""" +956 80 dataset """kinships""" +956 80 model """tucker""" +956 80 loss """softplus""" +956 80 regularizer """no""" +956 80 optimizer """adadelta""" +956 80 training_loop """lcwa""" +956 80 evaluator """rankbased""" +956 81 dataset """kinships""" +956 81 model """tucker""" +956 81 loss """softplus""" +956 81 regularizer """no""" +956 81 optimizer """adadelta""" +956 81 training_loop """lcwa""" +956 81 evaluator """rankbased""" +956 82 dataset """kinships""" +956 82 model """tucker""" +956 82 loss """softplus""" +956 82 regularizer """no""" +956 82 optimizer """adadelta""" +956 82 training_loop """lcwa""" +956 82 evaluator """rankbased""" +956 83 dataset """kinships""" +956 83 model """tucker""" +956 83 loss """softplus""" +956 83 regularizer """no""" +956 83 optimizer """adadelta""" +956 83 training_loop """lcwa""" +956 83 evaluator """rankbased""" +956 84 dataset """kinships""" +956 84 model """tucker""" +956 84 loss """softplus""" +956 84 regularizer """no""" +956 84 optimizer """adadelta""" +956 84 training_loop """lcwa""" +956 84 evaluator """rankbased""" +956 85 dataset """kinships""" +956 85 model """tucker""" +956 85 loss """softplus""" +956 85 regularizer """no""" +956 85 optimizer """adadelta""" +956 85 training_loop """lcwa""" +956 85 evaluator """rankbased""" +956 86 dataset """kinships""" +956 86 model """tucker""" +956 86 loss """softplus""" +956 86 regularizer """no""" +956 86 optimizer """adadelta""" +956 86 training_loop """lcwa""" +956 86 evaluator """rankbased""" +956 87 dataset """kinships""" +956 87 model """tucker""" +956 87 loss """softplus""" +956 87 regularizer """no""" +956 87 optimizer """adadelta""" +956 87 training_loop """lcwa""" +956 87 evaluator """rankbased""" +956 88 dataset """kinships""" +956 88 model """tucker""" +956 88 loss """softplus""" +956 88 regularizer """no""" +956 88 optimizer """adadelta""" +956 88 training_loop """lcwa""" +956 88 evaluator """rankbased""" +956 89 dataset """kinships""" +956 89 model """tucker""" +956 89 loss """softplus""" +956 89 regularizer """no""" +956 89 optimizer """adadelta""" +956 89 training_loop """lcwa""" +956 89 evaluator """rankbased""" +956 90 dataset """kinships""" +956 90 model """tucker""" +956 90 loss """softplus""" +956 90 regularizer """no""" +956 90 optimizer """adadelta""" +956 90 training_loop """lcwa""" +956 90 evaluator """rankbased""" +956 91 dataset """kinships""" +956 91 model """tucker""" +956 91 loss """softplus""" +956 91 regularizer """no""" +956 91 optimizer """adadelta""" +956 91 training_loop """lcwa""" +956 91 evaluator """rankbased""" +956 92 dataset """kinships""" +956 92 model """tucker""" +956 92 loss """softplus""" +956 92 regularizer """no""" +956 92 optimizer """adadelta""" +956 92 training_loop """lcwa""" +956 92 evaluator """rankbased""" +956 93 dataset """kinships""" +956 93 model """tucker""" +956 93 loss """softplus""" +956 93 regularizer """no""" +956 93 optimizer """adadelta""" +956 93 training_loop """lcwa""" +956 93 evaluator """rankbased""" +956 94 dataset """kinships""" +956 94 model """tucker""" +956 94 loss """softplus""" +956 94 regularizer """no""" +956 94 optimizer """adadelta""" +956 94 training_loop """lcwa""" +956 94 evaluator """rankbased""" +956 95 dataset """kinships""" +956 95 model """tucker""" +956 95 loss """softplus""" +956 95 regularizer """no""" +956 95 optimizer """adadelta""" +956 95 training_loop """lcwa""" +956 95 evaluator """rankbased""" +956 96 dataset """kinships""" +956 96 model """tucker""" +956 96 loss """softplus""" +956 96 regularizer """no""" +956 96 optimizer """adadelta""" +956 96 training_loop """lcwa""" +956 96 evaluator """rankbased""" +956 97 dataset """kinships""" +956 97 model """tucker""" +956 97 loss """softplus""" +956 97 regularizer """no""" +956 97 optimizer """adadelta""" +956 97 training_loop """lcwa""" +956 97 evaluator """rankbased""" +956 98 dataset """kinships""" +956 98 model """tucker""" +956 98 loss """softplus""" +956 98 regularizer """no""" +956 98 optimizer """adadelta""" +956 98 training_loop """lcwa""" +956 98 evaluator """rankbased""" +956 99 dataset """kinships""" +956 99 model """tucker""" +956 99 loss """softplus""" +956 99 regularizer """no""" +956 99 optimizer """adadelta""" +956 99 training_loop """lcwa""" +956 99 evaluator """rankbased""" +956 100 dataset """kinships""" +956 100 model """tucker""" +956 100 loss """softplus""" +956 100 regularizer """no""" +956 100 optimizer """adadelta""" +956 100 training_loop """lcwa""" +956 100 evaluator """rankbased""" +957 1 model.embedding_dim 2.0 +957 1 model.relation_dim 2.0 +957 1 model.dropout_0 0.40899030923286606 +957 1 model.dropout_1 0.35143310103158054 +957 1 model.dropout_2 0.24199612017967354 +957 1 training.batch_size 2.0 +957 1 training.label_smoothing 0.008961363123150662 +957 2 model.embedding_dim 0.0 +957 2 model.relation_dim 1.0 +957 2 model.dropout_0 0.4622239311702825 +957 2 model.dropout_1 0.3348498559749039 +957 2 model.dropout_2 0.13597836771671462 +957 2 training.batch_size 2.0 +957 2 training.label_smoothing 0.9252361968502995 +957 3 model.embedding_dim 1.0 +957 3 model.relation_dim 0.0 +957 3 model.dropout_0 0.43964889237314464 +957 3 model.dropout_1 0.47232854438878424 +957 3 model.dropout_2 0.42238997247590393 +957 3 training.batch_size 0.0 +957 3 training.label_smoothing 0.4244999459552034 +957 4 model.embedding_dim 0.0 +957 4 model.relation_dim 2.0 +957 4 model.dropout_0 0.3626019962364847 +957 4 model.dropout_1 0.41979627157341937 +957 4 model.dropout_2 0.4011825565283621 +957 4 training.batch_size 2.0 +957 4 training.label_smoothing 0.004964016986316896 +957 5 model.embedding_dim 1.0 +957 5 model.relation_dim 2.0 +957 5 model.dropout_0 0.22249411488408294 +957 5 model.dropout_1 0.20639444716485147 +957 5 model.dropout_2 0.25016679881965287 +957 5 training.batch_size 1.0 +957 5 training.label_smoothing 0.02682410719721809 +957 6 model.embedding_dim 1.0 +957 6 model.relation_dim 0.0 +957 6 model.dropout_0 0.16647018242806055 +957 6 model.dropout_1 0.1482009532669209 +957 6 model.dropout_2 0.25866985212468185 +957 6 training.batch_size 1.0 +957 6 training.label_smoothing 0.0514993358268688 +957 7 model.embedding_dim 2.0 +957 7 model.relation_dim 2.0 +957 7 model.dropout_0 0.4197392029857221 +957 7 model.dropout_1 0.14745687954998524 +957 7 model.dropout_2 0.38956209256617247 +957 7 training.batch_size 0.0 +957 7 training.label_smoothing 0.01858166489474526 +957 8 model.embedding_dim 2.0 +957 8 model.relation_dim 0.0 +957 8 model.dropout_0 0.29132357400940034 +957 8 model.dropout_1 0.15931966071744808 +957 8 model.dropout_2 0.21350454597513613 +957 8 training.batch_size 1.0 +957 8 training.label_smoothing 0.01983986848486683 +957 9 model.embedding_dim 1.0 +957 9 model.relation_dim 1.0 +957 9 model.dropout_0 0.2625062881708623 +957 9 model.dropout_1 0.3264637410482823 +957 9 model.dropout_2 0.3071113932274052 +957 9 training.batch_size 2.0 +957 9 training.label_smoothing 0.7429598640559915 +957 10 model.embedding_dim 0.0 +957 10 model.relation_dim 0.0 +957 10 model.dropout_0 0.34576229558464844 +957 10 model.dropout_1 0.42524964484255623 +957 10 model.dropout_2 0.15490768479406875 +957 10 training.batch_size 2.0 +957 10 training.label_smoothing 0.01219731055800299 +957 11 model.embedding_dim 0.0 +957 11 model.relation_dim 2.0 +957 11 model.dropout_0 0.1962152715487362 +957 11 model.dropout_1 0.38172496291919866 +957 11 model.dropout_2 0.40484427604666884 +957 11 training.batch_size 2.0 +957 11 training.label_smoothing 0.05134104764539803 +957 12 model.embedding_dim 1.0 +957 12 model.relation_dim 1.0 +957 12 model.dropout_0 0.20559535249715843 +957 12 model.dropout_1 0.3416723885883182 +957 12 model.dropout_2 0.33816797772210083 +957 12 training.batch_size 0.0 +957 12 training.label_smoothing 0.0028284715168754287 +957 13 model.embedding_dim 2.0 +957 13 model.relation_dim 0.0 +957 13 model.dropout_0 0.2373974930270519 +957 13 model.dropout_1 0.308337058362447 +957 13 model.dropout_2 0.16795162810573264 +957 13 training.batch_size 0.0 +957 13 training.label_smoothing 0.06500881408315776 +957 14 model.embedding_dim 2.0 +957 14 model.relation_dim 0.0 +957 14 model.dropout_0 0.3767692444220151 +957 14 model.dropout_1 0.28111767514867947 +957 14 model.dropout_2 0.14533986204681823 +957 14 training.batch_size 2.0 +957 14 training.label_smoothing 0.011460948228886296 +957 15 model.embedding_dim 1.0 +957 15 model.relation_dim 2.0 +957 15 model.dropout_0 0.48063330418606054 +957 15 model.dropout_1 0.3293041403263809 +957 15 model.dropout_2 0.13632380556436643 +957 15 training.batch_size 2.0 +957 15 training.label_smoothing 0.08735446958857128 +957 16 model.embedding_dim 1.0 +957 16 model.relation_dim 1.0 +957 16 model.dropout_0 0.3830650304356773 +957 16 model.dropout_1 0.10505223130840867 +957 16 model.dropout_2 0.41389728635774137 +957 16 training.batch_size 1.0 +957 16 training.label_smoothing 0.0030855561694010224 +957 17 model.embedding_dim 1.0 +957 17 model.relation_dim 0.0 +957 17 model.dropout_0 0.32680246313258066 +957 17 model.dropout_1 0.13934812390834128 +957 17 model.dropout_2 0.47537291637635104 +957 17 training.batch_size 2.0 +957 17 training.label_smoothing 0.04206032658392874 +957 18 model.embedding_dim 0.0 +957 18 model.relation_dim 2.0 +957 18 model.dropout_0 0.34773652932985777 +957 18 model.dropout_1 0.19159006947965462 +957 18 model.dropout_2 0.48388911310334015 +957 18 training.batch_size 1.0 +957 18 training.label_smoothing 0.04593988412199533 +957 19 model.embedding_dim 0.0 +957 19 model.relation_dim 2.0 +957 19 model.dropout_0 0.4685864997809198 +957 19 model.dropout_1 0.23330266607063838 +957 19 model.dropout_2 0.4905722094714007 +957 19 training.batch_size 2.0 +957 19 training.label_smoothing 0.00853563266590822 +957 20 model.embedding_dim 1.0 +957 20 model.relation_dim 1.0 +957 20 model.dropout_0 0.23799984484836176 +957 20 model.dropout_1 0.20411194671543142 +957 20 model.dropout_2 0.3527792475199373 +957 20 training.batch_size 1.0 +957 20 training.label_smoothing 0.19768755422313922 +957 21 model.embedding_dim 1.0 +957 21 model.relation_dim 0.0 +957 21 model.dropout_0 0.39521826351926115 +957 21 model.dropout_1 0.4187892165101822 +957 21 model.dropout_2 0.4028868153491958 +957 21 training.batch_size 1.0 +957 21 training.label_smoothing 0.013027764868591871 +957 22 model.embedding_dim 2.0 +957 22 model.relation_dim 1.0 +957 22 model.dropout_0 0.28818796431720173 +957 22 model.dropout_1 0.12275378722980182 +957 22 model.dropout_2 0.40035604888941945 +957 22 training.batch_size 2.0 +957 22 training.label_smoothing 0.016456992050237387 +957 23 model.embedding_dim 2.0 +957 23 model.relation_dim 1.0 +957 23 model.dropout_0 0.2481890510052102 +957 23 model.dropout_1 0.22099648826555618 +957 23 model.dropout_2 0.48632001963851124 +957 23 training.batch_size 2.0 +957 23 training.label_smoothing 0.12019349001416807 +957 24 model.embedding_dim 1.0 +957 24 model.relation_dim 2.0 +957 24 model.dropout_0 0.4872600242473165 +957 24 model.dropout_1 0.1875960362093536 +957 24 model.dropout_2 0.4005918884348353 +957 24 training.batch_size 0.0 +957 24 training.label_smoothing 0.006491582415063939 +957 25 model.embedding_dim 2.0 +957 25 model.relation_dim 2.0 +957 25 model.dropout_0 0.47792426441381075 +957 25 model.dropout_1 0.1408386990299885 +957 25 model.dropout_2 0.49047838897923346 +957 25 training.batch_size 1.0 +957 25 training.label_smoothing 0.002079096583860846 +957 26 model.embedding_dim 1.0 +957 26 model.relation_dim 0.0 +957 26 model.dropout_0 0.32179936712426327 +957 26 model.dropout_1 0.10201192653337664 +957 26 model.dropout_2 0.4874153461275912 +957 26 training.batch_size 0.0 +957 26 training.label_smoothing 0.002202633743335919 +957 27 model.embedding_dim 1.0 +957 27 model.relation_dim 0.0 +957 27 model.dropout_0 0.3842158829976722 +957 27 model.dropout_1 0.17277644271558656 +957 27 model.dropout_2 0.3556287221308895 +957 27 training.batch_size 1.0 +957 27 training.label_smoothing 0.014547662874563485 +957 28 model.embedding_dim 2.0 +957 28 model.relation_dim 0.0 +957 28 model.dropout_0 0.4466416821798873 +957 28 model.dropout_1 0.34283529119263023 +957 28 model.dropout_2 0.40406718182276613 +957 28 training.batch_size 0.0 +957 28 training.label_smoothing 0.0015384530363856184 +957 29 model.embedding_dim 2.0 +957 29 model.relation_dim 2.0 +957 29 model.dropout_0 0.2825961221390758 +957 29 model.dropout_1 0.3087776806218484 +957 29 model.dropout_2 0.24379782313961804 +957 29 training.batch_size 2.0 +957 29 training.label_smoothing 0.007092566721691192 +957 30 model.embedding_dim 0.0 +957 30 model.relation_dim 1.0 +957 30 model.dropout_0 0.2631165861748762 +957 30 model.dropout_1 0.48435508145735484 +957 30 model.dropout_2 0.44611734202527387 +957 30 training.batch_size 0.0 +957 30 training.label_smoothing 0.012405361450994698 +957 31 model.embedding_dim 2.0 +957 31 model.relation_dim 1.0 +957 31 model.dropout_0 0.43143635748633247 +957 31 model.dropout_1 0.19729451508064189 +957 31 model.dropout_2 0.1044110044968384 +957 31 training.batch_size 0.0 +957 31 training.label_smoothing 0.0011677553435977533 +957 32 model.embedding_dim 1.0 +957 32 model.relation_dim 2.0 +957 32 model.dropout_0 0.3477513170931615 +957 32 model.dropout_1 0.14375065532141118 +957 32 model.dropout_2 0.4331635017161847 +957 32 training.batch_size 2.0 +957 32 training.label_smoothing 0.2469764012226862 +957 33 model.embedding_dim 2.0 +957 33 model.relation_dim 0.0 +957 33 model.dropout_0 0.32501699782783483 +957 33 model.dropout_1 0.27990619963471164 +957 33 model.dropout_2 0.1907328922807738 +957 33 training.batch_size 0.0 +957 33 training.label_smoothing 0.30760728962928446 +957 34 model.embedding_dim 1.0 +957 34 model.relation_dim 2.0 +957 34 model.dropout_0 0.42721208455858417 +957 34 model.dropout_1 0.2416043092807295 +957 34 model.dropout_2 0.4010384559990964 +957 34 training.batch_size 2.0 +957 34 training.label_smoothing 0.00572381721726542 +957 35 model.embedding_dim 2.0 +957 35 model.relation_dim 2.0 +957 35 model.dropout_0 0.31229645892577595 +957 35 model.dropout_1 0.4176752115804653 +957 35 model.dropout_2 0.3790878588179313 +957 35 training.batch_size 1.0 +957 35 training.label_smoothing 0.008185872372303147 +957 36 model.embedding_dim 2.0 +957 36 model.relation_dim 1.0 +957 36 model.dropout_0 0.371153532206952 +957 36 model.dropout_1 0.3245266450280202 +957 36 model.dropout_2 0.37570848277743374 +957 36 training.batch_size 2.0 +957 36 training.label_smoothing 0.12321446945670761 +957 37 model.embedding_dim 1.0 +957 37 model.relation_dim 0.0 +957 37 model.dropout_0 0.4527161181897864 +957 37 model.dropout_1 0.3848863843871875 +957 37 model.dropout_2 0.4944349725490912 +957 37 training.batch_size 1.0 +957 37 training.label_smoothing 0.008694685864161281 +957 38 model.embedding_dim 2.0 +957 38 model.relation_dim 1.0 +957 38 model.dropout_0 0.3373784703490247 +957 38 model.dropout_1 0.2800282141927243 +957 38 model.dropout_2 0.48039953607814523 +957 38 training.batch_size 1.0 +957 38 training.label_smoothing 0.011804009620612656 +957 39 model.embedding_dim 0.0 +957 39 model.relation_dim 2.0 +957 39 model.dropout_0 0.1402847898061008 +957 39 model.dropout_1 0.4224981108664456 +957 39 model.dropout_2 0.4921675177496616 +957 39 training.batch_size 0.0 +957 39 training.label_smoothing 0.003472367304288611 +957 40 model.embedding_dim 2.0 +957 40 model.relation_dim 0.0 +957 40 model.dropout_0 0.2690379178325618 +957 40 model.dropout_1 0.4027861268040326 +957 40 model.dropout_2 0.3561905298856566 +957 40 training.batch_size 0.0 +957 40 training.label_smoothing 0.06381791689196502 +957 41 model.embedding_dim 0.0 +957 41 model.relation_dim 1.0 +957 41 model.dropout_0 0.2721314835037537 +957 41 model.dropout_1 0.16252298592013814 +957 41 model.dropout_2 0.47068314864561567 +957 41 training.batch_size 1.0 +957 41 training.label_smoothing 0.04209098409346888 +957 42 model.embedding_dim 0.0 +957 42 model.relation_dim 1.0 +957 42 model.dropout_0 0.37220738278295196 +957 42 model.dropout_1 0.19865737547949058 +957 42 model.dropout_2 0.47476064503203386 +957 42 training.batch_size 2.0 +957 42 training.label_smoothing 0.0022605134245558306 +957 43 model.embedding_dim 2.0 +957 43 model.relation_dim 2.0 +957 43 model.dropout_0 0.37823540306179326 +957 43 model.dropout_1 0.2218789423804653 +957 43 model.dropout_2 0.45482069561028327 +957 43 training.batch_size 2.0 +957 43 training.label_smoothing 0.0047141045901732525 +957 44 model.embedding_dim 2.0 +957 44 model.relation_dim 0.0 +957 44 model.dropout_0 0.17295111276594094 +957 44 model.dropout_1 0.2829005997582932 +957 44 model.dropout_2 0.4120783762640783 +957 44 training.batch_size 0.0 +957 44 training.label_smoothing 0.0060567585766782076 +957 45 model.embedding_dim 2.0 +957 45 model.relation_dim 1.0 +957 45 model.dropout_0 0.23161433469796375 +957 45 model.dropout_1 0.3906162572730211 +957 45 model.dropout_2 0.33827975078838357 +957 45 training.batch_size 1.0 +957 45 training.label_smoothing 0.03898628256580985 +957 46 model.embedding_dim 2.0 +957 46 model.relation_dim 0.0 +957 46 model.dropout_0 0.11289809491935628 +957 46 model.dropout_1 0.4393425696258939 +957 46 model.dropout_2 0.49055824151905036 +957 46 training.batch_size 2.0 +957 46 training.label_smoothing 0.20849572298802038 +957 47 model.embedding_dim 0.0 +957 47 model.relation_dim 2.0 +957 47 model.dropout_0 0.2760062406044521 +957 47 model.dropout_1 0.13569418820187096 +957 47 model.dropout_2 0.46326735518665085 +957 47 training.batch_size 1.0 +957 47 training.label_smoothing 0.04664636363590839 +957 48 model.embedding_dim 0.0 +957 48 model.relation_dim 1.0 +957 48 model.dropout_0 0.4242573164247575 +957 48 model.dropout_1 0.4146296352335891 +957 48 model.dropout_2 0.2267906244980018 +957 48 training.batch_size 0.0 +957 48 training.label_smoothing 0.0016136642777723643 +957 49 model.embedding_dim 1.0 +957 49 model.relation_dim 1.0 +957 49 model.dropout_0 0.23144947032788166 +957 49 model.dropout_1 0.37532715366233493 +957 49 model.dropout_2 0.45237063880438755 +957 49 training.batch_size 1.0 +957 49 training.label_smoothing 0.013977080421348018 +957 50 model.embedding_dim 2.0 +957 50 model.relation_dim 2.0 +957 50 model.dropout_0 0.3093120181457325 +957 50 model.dropout_1 0.15525466815902966 +957 50 model.dropout_2 0.29386609361684934 +957 50 training.batch_size 0.0 +957 50 training.label_smoothing 0.00754565683035722 +957 51 model.embedding_dim 0.0 +957 51 model.relation_dim 1.0 +957 51 model.dropout_0 0.4794991370866559 +957 51 model.dropout_1 0.17969799176882548 +957 51 model.dropout_2 0.29243262413852955 +957 51 training.batch_size 2.0 +957 51 training.label_smoothing 0.20972657237062994 +957 52 model.embedding_dim 1.0 +957 52 model.relation_dim 0.0 +957 52 model.dropout_0 0.2602861418793647 +957 52 model.dropout_1 0.13062620789421572 +957 52 model.dropout_2 0.30287020630704853 +957 52 training.batch_size 1.0 +957 52 training.label_smoothing 0.20796063140795668 +957 53 model.embedding_dim 1.0 +957 53 model.relation_dim 2.0 +957 53 model.dropout_0 0.4315771184884344 +957 53 model.dropout_1 0.33639107753045394 +957 53 model.dropout_2 0.41138725897288053 +957 53 training.batch_size 0.0 +957 53 training.label_smoothing 0.34717963969117077 +957 54 model.embedding_dim 1.0 +957 54 model.relation_dim 0.0 +957 54 model.dropout_0 0.13053493373396724 +957 54 model.dropout_1 0.20508805646571227 +957 54 model.dropout_2 0.28610693281474653 +957 54 training.batch_size 1.0 +957 54 training.label_smoothing 0.06536652299413848 +957 55 model.embedding_dim 2.0 +957 55 model.relation_dim 0.0 +957 55 model.dropout_0 0.10113096573373559 +957 55 model.dropout_1 0.10824385734326324 +957 55 model.dropout_2 0.28102418764761916 +957 55 training.batch_size 0.0 +957 55 training.label_smoothing 0.020568954048436092 +957 56 model.embedding_dim 1.0 +957 56 model.relation_dim 2.0 +957 56 model.dropout_0 0.26749776775189443 +957 56 model.dropout_1 0.32353159348474014 +957 56 model.dropout_2 0.48299820759883505 +957 56 training.batch_size 2.0 +957 56 training.label_smoothing 0.005476196450003983 +957 57 model.embedding_dim 0.0 +957 57 model.relation_dim 1.0 +957 57 model.dropout_0 0.10628638797210668 +957 57 model.dropout_1 0.4965751635299952 +957 57 model.dropout_2 0.17138927574505206 +957 57 training.batch_size 0.0 +957 57 training.label_smoothing 0.16218363414613274 +957 58 model.embedding_dim 1.0 +957 58 model.relation_dim 0.0 +957 58 model.dropout_0 0.16947298398371924 +957 58 model.dropout_1 0.37767187201802144 +957 58 model.dropout_2 0.41790760169412233 +957 58 training.batch_size 0.0 +957 58 training.label_smoothing 0.18458841429864944 +957 59 model.embedding_dim 1.0 +957 59 model.relation_dim 1.0 +957 59 model.dropout_0 0.33575085413913464 +957 59 model.dropout_1 0.33692468731939507 +957 59 model.dropout_2 0.34489420126888837 +957 59 training.batch_size 0.0 +957 59 training.label_smoothing 0.051798435888198976 +957 60 model.embedding_dim 2.0 +957 60 model.relation_dim 2.0 +957 60 model.dropout_0 0.11344246814723698 +957 60 model.dropout_1 0.18449155348839544 +957 60 model.dropout_2 0.12302841623904032 +957 60 training.batch_size 0.0 +957 60 training.label_smoothing 0.017037684587196747 +957 61 model.embedding_dim 0.0 +957 61 model.relation_dim 2.0 +957 61 model.dropout_0 0.47110510200631495 +957 61 model.dropout_1 0.14809314447206284 +957 61 model.dropout_2 0.350210288954253 +957 61 training.batch_size 0.0 +957 61 training.label_smoothing 0.15124948738472666 +957 62 model.embedding_dim 0.0 +957 62 model.relation_dim 2.0 +957 62 model.dropout_0 0.4447166866142329 +957 62 model.dropout_1 0.3721341698442848 +957 62 model.dropout_2 0.37012424458196147 +957 62 training.batch_size 0.0 +957 62 training.label_smoothing 0.06604289960302272 +957 63 model.embedding_dim 2.0 +957 63 model.relation_dim 2.0 +957 63 model.dropout_0 0.20285359757674934 +957 63 model.dropout_1 0.2265550030954806 +957 63 model.dropout_2 0.23886752654385668 +957 63 training.batch_size 0.0 +957 63 training.label_smoothing 0.058166055749365606 +957 64 model.embedding_dim 2.0 +957 64 model.relation_dim 1.0 +957 64 model.dropout_0 0.2822494448152198 +957 64 model.dropout_1 0.28754376464196346 +957 64 model.dropout_2 0.17046452203480655 +957 64 training.batch_size 2.0 +957 64 training.label_smoothing 0.054202804392981106 +957 65 model.embedding_dim 2.0 +957 65 model.relation_dim 1.0 +957 65 model.dropout_0 0.29555416102939996 +957 65 model.dropout_1 0.3353987756098069 +957 65 model.dropout_2 0.2839947547583539 +957 65 training.batch_size 0.0 +957 65 training.label_smoothing 0.23871638217199517 +957 66 model.embedding_dim 1.0 +957 66 model.relation_dim 0.0 +957 66 model.dropout_0 0.20551498099160037 +957 66 model.dropout_1 0.26321494998537137 +957 66 model.dropout_2 0.17971855903274842 +957 66 training.batch_size 2.0 +957 66 training.label_smoothing 0.23653247463195526 +957 67 model.embedding_dim 0.0 +957 67 model.relation_dim 1.0 +957 67 model.dropout_0 0.2674621115527813 +957 67 model.dropout_1 0.4256378291342432 +957 67 model.dropout_2 0.3669892884217184 +957 67 training.batch_size 1.0 +957 67 training.label_smoothing 0.002835449845945607 +957 68 model.embedding_dim 2.0 +957 68 model.relation_dim 2.0 +957 68 model.dropout_0 0.10636649241682755 +957 68 model.dropout_1 0.4365880538451391 +957 68 model.dropout_2 0.42886766772263063 +957 68 training.batch_size 2.0 +957 68 training.label_smoothing 0.06379301566125721 +957 69 model.embedding_dim 2.0 +957 69 model.relation_dim 1.0 +957 69 model.dropout_0 0.42717954507570066 +957 69 model.dropout_1 0.16647169309026782 +957 69 model.dropout_2 0.4181668881422689 +957 69 training.batch_size 2.0 +957 69 training.label_smoothing 0.005518145580658702 +957 70 model.embedding_dim 0.0 +957 70 model.relation_dim 2.0 +957 70 model.dropout_0 0.370718974169316 +957 70 model.dropout_1 0.18999746266957873 +957 70 model.dropout_2 0.20818273147688063 +957 70 training.batch_size 0.0 +957 70 training.label_smoothing 0.001500343784968111 +957 71 model.embedding_dim 0.0 +957 71 model.relation_dim 2.0 +957 71 model.dropout_0 0.12511931901568893 +957 71 model.dropout_1 0.37860203947254334 +957 71 model.dropout_2 0.48686792640586585 +957 71 training.batch_size 2.0 +957 71 training.label_smoothing 0.0030438128242197557 +957 72 model.embedding_dim 1.0 +957 72 model.relation_dim 2.0 +957 72 model.dropout_0 0.25848252462076615 +957 72 model.dropout_1 0.27355056852858584 +957 72 model.dropout_2 0.3894741756766288 +957 72 training.batch_size 0.0 +957 72 training.label_smoothing 0.010472869291883251 +957 73 model.embedding_dim 0.0 +957 73 model.relation_dim 1.0 +957 73 model.dropout_0 0.24258956721331282 +957 73 model.dropout_1 0.24630809694524178 +957 73 model.dropout_2 0.38423155441171286 +957 73 training.batch_size 2.0 +957 73 training.label_smoothing 0.5344092185240764 +957 74 model.embedding_dim 0.0 +957 74 model.relation_dim 1.0 +957 74 model.dropout_0 0.20334670340957164 +957 74 model.dropout_1 0.2855377897007217 +957 74 model.dropout_2 0.29203910654553367 +957 74 training.batch_size 1.0 +957 74 training.label_smoothing 0.046185751910438236 +957 75 model.embedding_dim 2.0 +957 75 model.relation_dim 0.0 +957 75 model.dropout_0 0.4413249966467234 +957 75 model.dropout_1 0.41222420984494224 +957 75 model.dropout_2 0.40117239956170714 +957 75 training.batch_size 2.0 +957 75 training.label_smoothing 0.058696433983319696 +957 76 model.embedding_dim 0.0 +957 76 model.relation_dim 1.0 +957 76 model.dropout_0 0.19459471146710494 +957 76 model.dropout_1 0.3530648653347239 +957 76 model.dropout_2 0.3604486130016312 +957 76 training.batch_size 0.0 +957 76 training.label_smoothing 0.008305067414757271 +957 77 model.embedding_dim 2.0 +957 77 model.relation_dim 2.0 +957 77 model.dropout_0 0.1484529981166691 +957 77 model.dropout_1 0.10213881635715111 +957 77 model.dropout_2 0.2611879845330545 +957 77 training.batch_size 0.0 +957 77 training.label_smoothing 0.03726745050114042 +957 78 model.embedding_dim 1.0 +957 78 model.relation_dim 1.0 +957 78 model.dropout_0 0.29578672680702467 +957 78 model.dropout_1 0.44750483524420626 +957 78 model.dropout_2 0.3990958762124397 +957 78 training.batch_size 2.0 +957 78 training.label_smoothing 0.0013786615507382079 +957 79 model.embedding_dim 1.0 +957 79 model.relation_dim 2.0 +957 79 model.dropout_0 0.46977622247210626 +957 79 model.dropout_1 0.13481207074443946 +957 79 model.dropout_2 0.2867444429559621 +957 79 training.batch_size 2.0 +957 79 training.label_smoothing 0.36453085232728355 +957 80 model.embedding_dim 2.0 +957 80 model.relation_dim 2.0 +957 80 model.dropout_0 0.1013645896599531 +957 80 model.dropout_1 0.44911249653038077 +957 80 model.dropout_2 0.26487838591669766 +957 80 training.batch_size 2.0 +957 80 training.label_smoothing 0.0023036033991146844 +957 81 model.embedding_dim 1.0 +957 81 model.relation_dim 2.0 +957 81 model.dropout_0 0.3302116651151772 +957 81 model.dropout_1 0.452746883840759 +957 81 model.dropout_2 0.3331259389182209 +957 81 training.batch_size 2.0 +957 81 training.label_smoothing 0.08671770240658035 +957 82 model.embedding_dim 2.0 +957 82 model.relation_dim 2.0 +957 82 model.dropout_0 0.1921664283894789 +957 82 model.dropout_1 0.43902413742297963 +957 82 model.dropout_2 0.14258079306822613 +957 82 training.batch_size 2.0 +957 82 training.label_smoothing 0.7773218221416156 +957 83 model.embedding_dim 1.0 +957 83 model.relation_dim 1.0 +957 83 model.dropout_0 0.45184639445430963 +957 83 model.dropout_1 0.21742213470457916 +957 83 model.dropout_2 0.23635252959112016 +957 83 training.batch_size 0.0 +957 83 training.label_smoothing 0.06892235964151398 +957 84 model.embedding_dim 0.0 +957 84 model.relation_dim 2.0 +957 84 model.dropout_0 0.2566575896991471 +957 84 model.dropout_1 0.2340019537474699 +957 84 model.dropout_2 0.16487036899941548 +957 84 training.batch_size 0.0 +957 84 training.label_smoothing 0.13229416062110724 +957 85 model.embedding_dim 2.0 +957 85 model.relation_dim 0.0 +957 85 model.dropout_0 0.48317160258962644 +957 85 model.dropout_1 0.33764480878842723 +957 85 model.dropout_2 0.3939584113629899 +957 85 training.batch_size 0.0 +957 85 training.label_smoothing 0.47193570133810686 +957 86 model.embedding_dim 2.0 +957 86 model.relation_dim 2.0 +957 86 model.dropout_0 0.4142686105682466 +957 86 model.dropout_1 0.4851458987304045 +957 86 model.dropout_2 0.3025647912551903 +957 86 training.batch_size 0.0 +957 86 training.label_smoothing 0.8346801565805478 +957 87 model.embedding_dim 0.0 +957 87 model.relation_dim 0.0 +957 87 model.dropout_0 0.13413143773486094 +957 87 model.dropout_1 0.21860479914236888 +957 87 model.dropout_2 0.4339930721695531 +957 87 training.batch_size 1.0 +957 87 training.label_smoothing 0.0054301558873642046 +957 88 model.embedding_dim 2.0 +957 88 model.relation_dim 0.0 +957 88 model.dropout_0 0.14037032265657656 +957 88 model.dropout_1 0.3702747074122651 +957 88 model.dropout_2 0.24804109994030277 +957 88 training.batch_size 1.0 +957 88 training.label_smoothing 0.02644315549077467 +957 89 model.embedding_dim 2.0 +957 89 model.relation_dim 1.0 +957 89 model.dropout_0 0.18103098706910237 +957 89 model.dropout_1 0.17634303642542326 +957 89 model.dropout_2 0.20880205552891634 +957 89 training.batch_size 0.0 +957 89 training.label_smoothing 0.0142929787524855 +957 90 model.embedding_dim 2.0 +957 90 model.relation_dim 0.0 +957 90 model.dropout_0 0.30094025554100956 +957 90 model.dropout_1 0.44724707784098394 +957 90 model.dropout_2 0.41020145719109824 +957 90 training.batch_size 2.0 +957 90 training.label_smoothing 0.9154736090699169 +957 91 model.embedding_dim 1.0 +957 91 model.relation_dim 0.0 +957 91 model.dropout_0 0.12401376677888215 +957 91 model.dropout_1 0.3540566216442682 +957 91 model.dropout_2 0.40068735896666263 +957 91 training.batch_size 0.0 +957 91 training.label_smoothing 0.04518091171259804 +957 92 model.embedding_dim 1.0 +957 92 model.relation_dim 2.0 +957 92 model.dropout_0 0.2469411209510701 +957 92 model.dropout_1 0.4264094059062359 +957 92 model.dropout_2 0.1863685030221227 +957 92 training.batch_size 1.0 +957 92 training.label_smoothing 0.004563101331748049 +957 93 model.embedding_dim 0.0 +957 93 model.relation_dim 0.0 +957 93 model.dropout_0 0.16871234526967518 +957 93 model.dropout_1 0.2974572166671423 +957 93 model.dropout_2 0.4751245077475853 +957 93 training.batch_size 1.0 +957 93 training.label_smoothing 0.003185308634187522 +957 94 model.embedding_dim 0.0 +957 94 model.relation_dim 1.0 +957 94 model.dropout_0 0.10086182365218753 +957 94 model.dropout_1 0.28017820596126664 +957 94 model.dropout_2 0.2048174048877582 +957 94 training.batch_size 1.0 +957 94 training.label_smoothing 0.005061670704524829 +957 95 model.embedding_dim 1.0 +957 95 model.relation_dim 1.0 +957 95 model.dropout_0 0.19479843992487386 +957 95 model.dropout_1 0.3796881245924675 +957 95 model.dropout_2 0.17683150902492395 +957 95 training.batch_size 0.0 +957 95 training.label_smoothing 0.0046823844535046535 +957 96 model.embedding_dim 1.0 +957 96 model.relation_dim 1.0 +957 96 model.dropout_0 0.13524601666470876 +957 96 model.dropout_1 0.16676637588376156 +957 96 model.dropout_2 0.39900898155978504 +957 96 training.batch_size 0.0 +957 96 training.label_smoothing 0.015975911238544062 +957 97 model.embedding_dim 0.0 +957 97 model.relation_dim 0.0 +957 97 model.dropout_0 0.13048234881505763 +957 97 model.dropout_1 0.4357918532311976 +957 97 model.dropout_2 0.24039131221613422 +957 97 training.batch_size 1.0 +957 97 training.label_smoothing 0.2543893355549646 +957 98 model.embedding_dim 2.0 +957 98 model.relation_dim 2.0 +957 98 model.dropout_0 0.2090021980732758 +957 98 model.dropout_1 0.448997956955565 +957 98 model.dropout_2 0.45318533665118976 +957 98 training.batch_size 1.0 +957 98 training.label_smoothing 0.19270565552038835 +957 99 model.embedding_dim 1.0 +957 99 model.relation_dim 1.0 +957 99 model.dropout_0 0.4292437840096751 +957 99 model.dropout_1 0.36306604795285313 +957 99 model.dropout_2 0.31720211742813254 +957 99 training.batch_size 1.0 +957 99 training.label_smoothing 0.004820159439117518 +957 100 model.embedding_dim 0.0 +957 100 model.relation_dim 2.0 +957 100 model.dropout_0 0.12083410129049216 +957 100 model.dropout_1 0.3224509289783829 +957 100 model.dropout_2 0.2239812069152899 +957 100 training.batch_size 1.0 +957 100 training.label_smoothing 0.2888705670313598 +957 1 dataset """kinships""" +957 1 model """tucker""" +957 1 loss """bceaftersigmoid""" +957 1 regularizer """no""" +957 1 optimizer """adadelta""" +957 1 training_loop """lcwa""" +957 1 evaluator """rankbased""" +957 2 dataset """kinships""" +957 2 model """tucker""" +957 2 loss """bceaftersigmoid""" +957 2 regularizer """no""" +957 2 optimizer """adadelta""" +957 2 training_loop """lcwa""" +957 2 evaluator """rankbased""" +957 3 dataset """kinships""" +957 3 model """tucker""" +957 3 loss """bceaftersigmoid""" +957 3 regularizer """no""" +957 3 optimizer """adadelta""" +957 3 training_loop """lcwa""" +957 3 evaluator """rankbased""" +957 4 dataset """kinships""" +957 4 model """tucker""" +957 4 loss """bceaftersigmoid""" +957 4 regularizer """no""" +957 4 optimizer """adadelta""" +957 4 training_loop """lcwa""" +957 4 evaluator """rankbased""" +957 5 dataset """kinships""" +957 5 model """tucker""" +957 5 loss """bceaftersigmoid""" +957 5 regularizer """no""" +957 5 optimizer """adadelta""" +957 5 training_loop """lcwa""" +957 5 evaluator """rankbased""" +957 6 dataset """kinships""" +957 6 model """tucker""" +957 6 loss """bceaftersigmoid""" +957 6 regularizer """no""" +957 6 optimizer """adadelta""" +957 6 training_loop """lcwa""" +957 6 evaluator """rankbased""" +957 7 dataset """kinships""" +957 7 model """tucker""" +957 7 loss """bceaftersigmoid""" +957 7 regularizer """no""" +957 7 optimizer """adadelta""" +957 7 training_loop """lcwa""" +957 7 evaluator """rankbased""" +957 8 dataset """kinships""" +957 8 model """tucker""" +957 8 loss """bceaftersigmoid""" +957 8 regularizer """no""" +957 8 optimizer """adadelta""" +957 8 training_loop """lcwa""" +957 8 evaluator """rankbased""" +957 9 dataset """kinships""" +957 9 model """tucker""" +957 9 loss """bceaftersigmoid""" +957 9 regularizer """no""" +957 9 optimizer """adadelta""" +957 9 training_loop """lcwa""" +957 9 evaluator """rankbased""" +957 10 dataset """kinships""" +957 10 model """tucker""" +957 10 loss """bceaftersigmoid""" +957 10 regularizer """no""" +957 10 optimizer """adadelta""" +957 10 training_loop """lcwa""" +957 10 evaluator """rankbased""" +957 11 dataset """kinships""" +957 11 model """tucker""" +957 11 loss """bceaftersigmoid""" +957 11 regularizer """no""" +957 11 optimizer """adadelta""" +957 11 training_loop """lcwa""" +957 11 evaluator """rankbased""" +957 12 dataset """kinships""" +957 12 model """tucker""" +957 12 loss """bceaftersigmoid""" +957 12 regularizer """no""" +957 12 optimizer """adadelta""" +957 12 training_loop """lcwa""" +957 12 evaluator """rankbased""" +957 13 dataset """kinships""" +957 13 model """tucker""" +957 13 loss """bceaftersigmoid""" +957 13 regularizer """no""" +957 13 optimizer """adadelta""" +957 13 training_loop """lcwa""" +957 13 evaluator """rankbased""" +957 14 dataset """kinships""" +957 14 model """tucker""" +957 14 loss """bceaftersigmoid""" +957 14 regularizer """no""" +957 14 optimizer """adadelta""" +957 14 training_loop """lcwa""" +957 14 evaluator """rankbased""" +957 15 dataset """kinships""" +957 15 model """tucker""" +957 15 loss """bceaftersigmoid""" +957 15 regularizer """no""" +957 15 optimizer """adadelta""" +957 15 training_loop """lcwa""" +957 15 evaluator """rankbased""" +957 16 dataset """kinships""" +957 16 model """tucker""" +957 16 loss """bceaftersigmoid""" +957 16 regularizer """no""" +957 16 optimizer """adadelta""" +957 16 training_loop """lcwa""" +957 16 evaluator """rankbased""" +957 17 dataset """kinships""" +957 17 model """tucker""" +957 17 loss """bceaftersigmoid""" +957 17 regularizer """no""" +957 17 optimizer """adadelta""" +957 17 training_loop """lcwa""" +957 17 evaluator """rankbased""" +957 18 dataset """kinships""" +957 18 model """tucker""" +957 18 loss """bceaftersigmoid""" +957 18 regularizer """no""" +957 18 optimizer """adadelta""" +957 18 training_loop """lcwa""" +957 18 evaluator """rankbased""" +957 19 dataset """kinships""" +957 19 model """tucker""" +957 19 loss """bceaftersigmoid""" +957 19 regularizer """no""" +957 19 optimizer """adadelta""" +957 19 training_loop """lcwa""" +957 19 evaluator """rankbased""" +957 20 dataset """kinships""" +957 20 model """tucker""" +957 20 loss """bceaftersigmoid""" +957 20 regularizer """no""" +957 20 optimizer """adadelta""" +957 20 training_loop """lcwa""" +957 20 evaluator """rankbased""" +957 21 dataset """kinships""" +957 21 model """tucker""" +957 21 loss """bceaftersigmoid""" +957 21 regularizer """no""" +957 21 optimizer """adadelta""" +957 21 training_loop """lcwa""" +957 21 evaluator """rankbased""" +957 22 dataset """kinships""" +957 22 model """tucker""" +957 22 loss """bceaftersigmoid""" +957 22 regularizer """no""" +957 22 optimizer """adadelta""" +957 22 training_loop """lcwa""" +957 22 evaluator """rankbased""" +957 23 dataset """kinships""" +957 23 model """tucker""" +957 23 loss """bceaftersigmoid""" +957 23 regularizer """no""" +957 23 optimizer """adadelta""" +957 23 training_loop """lcwa""" +957 23 evaluator """rankbased""" +957 24 dataset """kinships""" +957 24 model """tucker""" +957 24 loss """bceaftersigmoid""" +957 24 regularizer """no""" +957 24 optimizer """adadelta""" +957 24 training_loop """lcwa""" +957 24 evaluator """rankbased""" +957 25 dataset """kinships""" +957 25 model """tucker""" +957 25 loss """bceaftersigmoid""" +957 25 regularizer """no""" +957 25 optimizer """adadelta""" +957 25 training_loop """lcwa""" +957 25 evaluator """rankbased""" +957 26 dataset """kinships""" +957 26 model """tucker""" +957 26 loss """bceaftersigmoid""" +957 26 regularizer """no""" +957 26 optimizer """adadelta""" +957 26 training_loop """lcwa""" +957 26 evaluator """rankbased""" +957 27 dataset """kinships""" +957 27 model """tucker""" +957 27 loss """bceaftersigmoid""" +957 27 regularizer """no""" +957 27 optimizer """adadelta""" +957 27 training_loop """lcwa""" +957 27 evaluator """rankbased""" +957 28 dataset """kinships""" +957 28 model """tucker""" +957 28 loss """bceaftersigmoid""" +957 28 regularizer """no""" +957 28 optimizer """adadelta""" +957 28 training_loop """lcwa""" +957 28 evaluator """rankbased""" +957 29 dataset """kinships""" +957 29 model """tucker""" +957 29 loss """bceaftersigmoid""" +957 29 regularizer """no""" +957 29 optimizer """adadelta""" +957 29 training_loop """lcwa""" +957 29 evaluator """rankbased""" +957 30 dataset """kinships""" +957 30 model """tucker""" +957 30 loss """bceaftersigmoid""" +957 30 regularizer """no""" +957 30 optimizer """adadelta""" +957 30 training_loop """lcwa""" +957 30 evaluator """rankbased""" +957 31 dataset """kinships""" +957 31 model """tucker""" +957 31 loss """bceaftersigmoid""" +957 31 regularizer """no""" +957 31 optimizer """adadelta""" +957 31 training_loop """lcwa""" +957 31 evaluator """rankbased""" +957 32 dataset """kinships""" +957 32 model """tucker""" +957 32 loss """bceaftersigmoid""" +957 32 regularizer """no""" +957 32 optimizer """adadelta""" +957 32 training_loop """lcwa""" +957 32 evaluator """rankbased""" +957 33 dataset """kinships""" +957 33 model """tucker""" +957 33 loss """bceaftersigmoid""" +957 33 regularizer """no""" +957 33 optimizer """adadelta""" +957 33 training_loop """lcwa""" +957 33 evaluator """rankbased""" +957 34 dataset """kinships""" +957 34 model """tucker""" +957 34 loss """bceaftersigmoid""" +957 34 regularizer """no""" +957 34 optimizer """adadelta""" +957 34 training_loop """lcwa""" +957 34 evaluator """rankbased""" +957 35 dataset """kinships""" +957 35 model """tucker""" +957 35 loss """bceaftersigmoid""" +957 35 regularizer """no""" +957 35 optimizer """adadelta""" +957 35 training_loop """lcwa""" +957 35 evaluator """rankbased""" +957 36 dataset """kinships""" +957 36 model """tucker""" +957 36 loss """bceaftersigmoid""" +957 36 regularizer """no""" +957 36 optimizer """adadelta""" +957 36 training_loop """lcwa""" +957 36 evaluator """rankbased""" +957 37 dataset """kinships""" +957 37 model """tucker""" +957 37 loss """bceaftersigmoid""" +957 37 regularizer """no""" +957 37 optimizer """adadelta""" +957 37 training_loop """lcwa""" +957 37 evaluator """rankbased""" +957 38 dataset """kinships""" +957 38 model """tucker""" +957 38 loss """bceaftersigmoid""" +957 38 regularizer """no""" +957 38 optimizer """adadelta""" +957 38 training_loop """lcwa""" +957 38 evaluator """rankbased""" +957 39 dataset """kinships""" +957 39 model """tucker""" +957 39 loss """bceaftersigmoid""" +957 39 regularizer """no""" +957 39 optimizer """adadelta""" +957 39 training_loop """lcwa""" +957 39 evaluator """rankbased""" +957 40 dataset """kinships""" +957 40 model """tucker""" +957 40 loss """bceaftersigmoid""" +957 40 regularizer """no""" +957 40 optimizer """adadelta""" +957 40 training_loop """lcwa""" +957 40 evaluator """rankbased""" +957 41 dataset """kinships""" +957 41 model """tucker""" +957 41 loss """bceaftersigmoid""" +957 41 regularizer """no""" +957 41 optimizer """adadelta""" +957 41 training_loop """lcwa""" +957 41 evaluator """rankbased""" +957 42 dataset """kinships""" +957 42 model """tucker""" +957 42 loss """bceaftersigmoid""" +957 42 regularizer """no""" +957 42 optimizer """adadelta""" +957 42 training_loop """lcwa""" +957 42 evaluator """rankbased""" +957 43 dataset """kinships""" +957 43 model """tucker""" +957 43 loss """bceaftersigmoid""" +957 43 regularizer """no""" +957 43 optimizer """adadelta""" +957 43 training_loop """lcwa""" +957 43 evaluator """rankbased""" +957 44 dataset """kinships""" +957 44 model """tucker""" +957 44 loss """bceaftersigmoid""" +957 44 regularizer """no""" +957 44 optimizer """adadelta""" +957 44 training_loop """lcwa""" +957 44 evaluator """rankbased""" +957 45 dataset """kinships""" +957 45 model """tucker""" +957 45 loss """bceaftersigmoid""" +957 45 regularizer """no""" +957 45 optimizer """adadelta""" +957 45 training_loop """lcwa""" +957 45 evaluator """rankbased""" +957 46 dataset """kinships""" +957 46 model """tucker""" +957 46 loss """bceaftersigmoid""" +957 46 regularizer """no""" +957 46 optimizer """adadelta""" +957 46 training_loop """lcwa""" +957 46 evaluator """rankbased""" +957 47 dataset """kinships""" +957 47 model """tucker""" +957 47 loss """bceaftersigmoid""" +957 47 regularizer """no""" +957 47 optimizer """adadelta""" +957 47 training_loop """lcwa""" +957 47 evaluator """rankbased""" +957 48 dataset """kinships""" +957 48 model """tucker""" +957 48 loss """bceaftersigmoid""" +957 48 regularizer """no""" +957 48 optimizer """adadelta""" +957 48 training_loop """lcwa""" +957 48 evaluator """rankbased""" +957 49 dataset """kinships""" +957 49 model """tucker""" +957 49 loss """bceaftersigmoid""" +957 49 regularizer """no""" +957 49 optimizer """adadelta""" +957 49 training_loop """lcwa""" +957 49 evaluator """rankbased""" +957 50 dataset """kinships""" +957 50 model """tucker""" +957 50 loss """bceaftersigmoid""" +957 50 regularizer """no""" +957 50 optimizer """adadelta""" +957 50 training_loop """lcwa""" +957 50 evaluator """rankbased""" +957 51 dataset """kinships""" +957 51 model """tucker""" +957 51 loss """bceaftersigmoid""" +957 51 regularizer """no""" +957 51 optimizer """adadelta""" +957 51 training_loop """lcwa""" +957 51 evaluator """rankbased""" +957 52 dataset """kinships""" +957 52 model """tucker""" +957 52 loss """bceaftersigmoid""" +957 52 regularizer """no""" +957 52 optimizer """adadelta""" +957 52 training_loop """lcwa""" +957 52 evaluator """rankbased""" +957 53 dataset """kinships""" +957 53 model """tucker""" +957 53 loss """bceaftersigmoid""" +957 53 regularizer """no""" +957 53 optimizer """adadelta""" +957 53 training_loop """lcwa""" +957 53 evaluator """rankbased""" +957 54 dataset """kinships""" +957 54 model """tucker""" +957 54 loss """bceaftersigmoid""" +957 54 regularizer """no""" +957 54 optimizer """adadelta""" +957 54 training_loop """lcwa""" +957 54 evaluator """rankbased""" +957 55 dataset """kinships""" +957 55 model """tucker""" +957 55 loss """bceaftersigmoid""" +957 55 regularizer """no""" +957 55 optimizer """adadelta""" +957 55 training_loop """lcwa""" +957 55 evaluator """rankbased""" +957 56 dataset """kinships""" +957 56 model """tucker""" +957 56 loss """bceaftersigmoid""" +957 56 regularizer """no""" +957 56 optimizer """adadelta""" +957 56 training_loop """lcwa""" +957 56 evaluator """rankbased""" +957 57 dataset """kinships""" +957 57 model """tucker""" +957 57 loss """bceaftersigmoid""" +957 57 regularizer """no""" +957 57 optimizer """adadelta""" +957 57 training_loop """lcwa""" +957 57 evaluator """rankbased""" +957 58 dataset """kinships""" +957 58 model """tucker""" +957 58 loss """bceaftersigmoid""" +957 58 regularizer """no""" +957 58 optimizer """adadelta""" +957 58 training_loop """lcwa""" +957 58 evaluator """rankbased""" +957 59 dataset """kinships""" +957 59 model """tucker""" +957 59 loss """bceaftersigmoid""" +957 59 regularizer """no""" +957 59 optimizer """adadelta""" +957 59 training_loop """lcwa""" +957 59 evaluator """rankbased""" +957 60 dataset """kinships""" +957 60 model """tucker""" +957 60 loss """bceaftersigmoid""" +957 60 regularizer """no""" +957 60 optimizer """adadelta""" +957 60 training_loop """lcwa""" +957 60 evaluator """rankbased""" +957 61 dataset """kinships""" +957 61 model """tucker""" +957 61 loss """bceaftersigmoid""" +957 61 regularizer """no""" +957 61 optimizer """adadelta""" +957 61 training_loop """lcwa""" +957 61 evaluator """rankbased""" +957 62 dataset """kinships""" +957 62 model """tucker""" +957 62 loss """bceaftersigmoid""" +957 62 regularizer """no""" +957 62 optimizer """adadelta""" +957 62 training_loop """lcwa""" +957 62 evaluator """rankbased""" +957 63 dataset """kinships""" +957 63 model """tucker""" +957 63 loss """bceaftersigmoid""" +957 63 regularizer """no""" +957 63 optimizer """adadelta""" +957 63 training_loop """lcwa""" +957 63 evaluator """rankbased""" +957 64 dataset """kinships""" +957 64 model """tucker""" +957 64 loss """bceaftersigmoid""" +957 64 regularizer """no""" +957 64 optimizer """adadelta""" +957 64 training_loop """lcwa""" +957 64 evaluator """rankbased""" +957 65 dataset """kinships""" +957 65 model """tucker""" +957 65 loss """bceaftersigmoid""" +957 65 regularizer """no""" +957 65 optimizer """adadelta""" +957 65 training_loop """lcwa""" +957 65 evaluator """rankbased""" +957 66 dataset """kinships""" +957 66 model """tucker""" +957 66 loss """bceaftersigmoid""" +957 66 regularizer """no""" +957 66 optimizer """adadelta""" +957 66 training_loop """lcwa""" +957 66 evaluator """rankbased""" +957 67 dataset """kinships""" +957 67 model """tucker""" +957 67 loss """bceaftersigmoid""" +957 67 regularizer """no""" +957 67 optimizer """adadelta""" +957 67 training_loop """lcwa""" +957 67 evaluator """rankbased""" +957 68 dataset """kinships""" +957 68 model """tucker""" +957 68 loss """bceaftersigmoid""" +957 68 regularizer """no""" +957 68 optimizer """adadelta""" +957 68 training_loop """lcwa""" +957 68 evaluator """rankbased""" +957 69 dataset """kinships""" +957 69 model """tucker""" +957 69 loss """bceaftersigmoid""" +957 69 regularizer """no""" +957 69 optimizer """adadelta""" +957 69 training_loop """lcwa""" +957 69 evaluator """rankbased""" +957 70 dataset """kinships""" +957 70 model """tucker""" +957 70 loss """bceaftersigmoid""" +957 70 regularizer """no""" +957 70 optimizer """adadelta""" +957 70 training_loop """lcwa""" +957 70 evaluator """rankbased""" +957 71 dataset """kinships""" +957 71 model """tucker""" +957 71 loss """bceaftersigmoid""" +957 71 regularizer """no""" +957 71 optimizer """adadelta""" +957 71 training_loop """lcwa""" +957 71 evaluator """rankbased""" +957 72 dataset """kinships""" +957 72 model """tucker""" +957 72 loss """bceaftersigmoid""" +957 72 regularizer """no""" +957 72 optimizer """adadelta""" +957 72 training_loop """lcwa""" +957 72 evaluator """rankbased""" +957 73 dataset """kinships""" +957 73 model """tucker""" +957 73 loss """bceaftersigmoid""" +957 73 regularizer """no""" +957 73 optimizer """adadelta""" +957 73 training_loop """lcwa""" +957 73 evaluator """rankbased""" +957 74 dataset """kinships""" +957 74 model """tucker""" +957 74 loss """bceaftersigmoid""" +957 74 regularizer """no""" +957 74 optimizer """adadelta""" +957 74 training_loop """lcwa""" +957 74 evaluator """rankbased""" +957 75 dataset """kinships""" +957 75 model """tucker""" +957 75 loss """bceaftersigmoid""" +957 75 regularizer """no""" +957 75 optimizer """adadelta""" +957 75 training_loop """lcwa""" +957 75 evaluator """rankbased""" +957 76 dataset """kinships""" +957 76 model """tucker""" +957 76 loss """bceaftersigmoid""" +957 76 regularizer """no""" +957 76 optimizer """adadelta""" +957 76 training_loop """lcwa""" +957 76 evaluator """rankbased""" +957 77 dataset """kinships""" +957 77 model """tucker""" +957 77 loss """bceaftersigmoid""" +957 77 regularizer """no""" +957 77 optimizer """adadelta""" +957 77 training_loop """lcwa""" +957 77 evaluator """rankbased""" +957 78 dataset """kinships""" +957 78 model """tucker""" +957 78 loss """bceaftersigmoid""" +957 78 regularizer """no""" +957 78 optimizer """adadelta""" +957 78 training_loop """lcwa""" +957 78 evaluator """rankbased""" +957 79 dataset """kinships""" +957 79 model """tucker""" +957 79 loss """bceaftersigmoid""" +957 79 regularizer """no""" +957 79 optimizer """adadelta""" +957 79 training_loop """lcwa""" +957 79 evaluator """rankbased""" +957 80 dataset """kinships""" +957 80 model """tucker""" +957 80 loss """bceaftersigmoid""" +957 80 regularizer """no""" +957 80 optimizer """adadelta""" +957 80 training_loop """lcwa""" +957 80 evaluator """rankbased""" +957 81 dataset """kinships""" +957 81 model """tucker""" +957 81 loss """bceaftersigmoid""" +957 81 regularizer """no""" +957 81 optimizer """adadelta""" +957 81 training_loop """lcwa""" +957 81 evaluator """rankbased""" +957 82 dataset """kinships""" +957 82 model """tucker""" +957 82 loss """bceaftersigmoid""" +957 82 regularizer """no""" +957 82 optimizer """adadelta""" +957 82 training_loop """lcwa""" +957 82 evaluator """rankbased""" +957 83 dataset """kinships""" +957 83 model """tucker""" +957 83 loss """bceaftersigmoid""" +957 83 regularizer """no""" +957 83 optimizer """adadelta""" +957 83 training_loop """lcwa""" +957 83 evaluator """rankbased""" +957 84 dataset """kinships""" +957 84 model """tucker""" +957 84 loss """bceaftersigmoid""" +957 84 regularizer """no""" +957 84 optimizer """adadelta""" +957 84 training_loop """lcwa""" +957 84 evaluator """rankbased""" +957 85 dataset """kinships""" +957 85 model """tucker""" +957 85 loss """bceaftersigmoid""" +957 85 regularizer """no""" +957 85 optimizer """adadelta""" +957 85 training_loop """lcwa""" +957 85 evaluator """rankbased""" +957 86 dataset """kinships""" +957 86 model """tucker""" +957 86 loss """bceaftersigmoid""" +957 86 regularizer """no""" +957 86 optimizer """adadelta""" +957 86 training_loop """lcwa""" +957 86 evaluator """rankbased""" +957 87 dataset """kinships""" +957 87 model """tucker""" +957 87 loss """bceaftersigmoid""" +957 87 regularizer """no""" +957 87 optimizer """adadelta""" +957 87 training_loop """lcwa""" +957 87 evaluator """rankbased""" +957 88 dataset """kinships""" +957 88 model """tucker""" +957 88 loss """bceaftersigmoid""" +957 88 regularizer """no""" +957 88 optimizer """adadelta""" +957 88 training_loop """lcwa""" +957 88 evaluator """rankbased""" +957 89 dataset """kinships""" +957 89 model """tucker""" +957 89 loss """bceaftersigmoid""" +957 89 regularizer """no""" +957 89 optimizer """adadelta""" +957 89 training_loop """lcwa""" +957 89 evaluator """rankbased""" +957 90 dataset """kinships""" +957 90 model """tucker""" +957 90 loss """bceaftersigmoid""" +957 90 regularizer """no""" +957 90 optimizer """adadelta""" +957 90 training_loop """lcwa""" +957 90 evaluator """rankbased""" +957 91 dataset """kinships""" +957 91 model """tucker""" +957 91 loss """bceaftersigmoid""" +957 91 regularizer """no""" +957 91 optimizer """adadelta""" +957 91 training_loop """lcwa""" +957 91 evaluator """rankbased""" +957 92 dataset """kinships""" +957 92 model """tucker""" +957 92 loss """bceaftersigmoid""" +957 92 regularizer """no""" +957 92 optimizer """adadelta""" +957 92 training_loop """lcwa""" +957 92 evaluator """rankbased""" +957 93 dataset """kinships""" +957 93 model """tucker""" +957 93 loss """bceaftersigmoid""" +957 93 regularizer """no""" +957 93 optimizer """adadelta""" +957 93 training_loop """lcwa""" +957 93 evaluator """rankbased""" +957 94 dataset """kinships""" +957 94 model """tucker""" +957 94 loss """bceaftersigmoid""" +957 94 regularizer """no""" +957 94 optimizer """adadelta""" +957 94 training_loop """lcwa""" +957 94 evaluator """rankbased""" +957 95 dataset """kinships""" +957 95 model """tucker""" +957 95 loss """bceaftersigmoid""" +957 95 regularizer """no""" +957 95 optimizer """adadelta""" +957 95 training_loop """lcwa""" +957 95 evaluator """rankbased""" +957 96 dataset """kinships""" +957 96 model """tucker""" +957 96 loss """bceaftersigmoid""" +957 96 regularizer """no""" +957 96 optimizer """adadelta""" +957 96 training_loop """lcwa""" +957 96 evaluator """rankbased""" +957 97 dataset """kinships""" +957 97 model """tucker""" +957 97 loss """bceaftersigmoid""" +957 97 regularizer """no""" +957 97 optimizer """adadelta""" +957 97 training_loop """lcwa""" +957 97 evaluator """rankbased""" +957 98 dataset """kinships""" +957 98 model """tucker""" +957 98 loss """bceaftersigmoid""" +957 98 regularizer """no""" +957 98 optimizer """adadelta""" +957 98 training_loop """lcwa""" +957 98 evaluator """rankbased""" +957 99 dataset """kinships""" +957 99 model """tucker""" +957 99 loss """bceaftersigmoid""" +957 99 regularizer """no""" +957 99 optimizer """adadelta""" +957 99 training_loop """lcwa""" +957 99 evaluator """rankbased""" +957 100 dataset """kinships""" +957 100 model """tucker""" +957 100 loss """bceaftersigmoid""" +957 100 regularizer """no""" +957 100 optimizer """adadelta""" +957 100 training_loop """lcwa""" +957 100 evaluator """rankbased""" +958 1 model.embedding_dim 1.0 +958 1 model.relation_dim 0.0 +958 1 model.dropout_0 0.20774007842913986 +958 1 model.dropout_1 0.38484279994011905 +958 1 model.dropout_2 0.3355583910932145 +958 1 training.batch_size 1.0 +958 1 training.label_smoothing 0.14676430847912128 +958 2 model.embedding_dim 1.0 +958 2 model.relation_dim 0.0 +958 2 model.dropout_0 0.2962576184507828 +958 2 model.dropout_1 0.3113807677666521 +958 2 model.dropout_2 0.23384757468547998 +958 2 training.batch_size 0.0 +958 2 training.label_smoothing 0.10814225661524912 +958 3 model.embedding_dim 1.0 +958 3 model.relation_dim 2.0 +958 3 model.dropout_0 0.17039774951105705 +958 3 model.dropout_1 0.40322769503219036 +958 3 model.dropout_2 0.46840292778723136 +958 3 training.batch_size 0.0 +958 3 training.label_smoothing 0.43082843362795853 +958 4 model.embedding_dim 0.0 +958 4 model.relation_dim 0.0 +958 4 model.dropout_0 0.4756555022903243 +958 4 model.dropout_1 0.2069295148476533 +958 4 model.dropout_2 0.104079973548096 +958 4 training.batch_size 2.0 +958 4 training.label_smoothing 0.5302283102513347 +958 5 model.embedding_dim 0.0 +958 5 model.relation_dim 1.0 +958 5 model.dropout_0 0.16929457867918474 +958 5 model.dropout_1 0.4890151716078275 +958 5 model.dropout_2 0.26227095211233437 +958 5 training.batch_size 0.0 +958 5 training.label_smoothing 0.0011081886008574805 +958 6 model.embedding_dim 1.0 +958 6 model.relation_dim 1.0 +958 6 model.dropout_0 0.38977955041468026 +958 6 model.dropout_1 0.13038298749144236 +958 6 model.dropout_2 0.4011519525656183 +958 6 training.batch_size 2.0 +958 6 training.label_smoothing 0.4384867116054295 +958 7 model.embedding_dim 2.0 +958 7 model.relation_dim 1.0 +958 7 model.dropout_0 0.4355388754873778 +958 7 model.dropout_1 0.10456920864332737 +958 7 model.dropout_2 0.3631871473752575 +958 7 training.batch_size 2.0 +958 7 training.label_smoothing 0.006882309724424645 +958 8 model.embedding_dim 1.0 +958 8 model.relation_dim 2.0 +958 8 model.dropout_0 0.24148739570042693 +958 8 model.dropout_1 0.3704081404675943 +958 8 model.dropout_2 0.22724182082815456 +958 8 training.batch_size 1.0 +958 8 training.label_smoothing 0.0627607782158934 +958 9 model.embedding_dim 0.0 +958 9 model.relation_dim 0.0 +958 9 model.dropout_0 0.33655185463001036 +958 9 model.dropout_1 0.18696464983772737 +958 9 model.dropout_2 0.24099597135664774 +958 9 training.batch_size 2.0 +958 9 training.label_smoothing 0.1985395927183447 +958 10 model.embedding_dim 0.0 +958 10 model.relation_dim 0.0 +958 10 model.dropout_0 0.3261319852750249 +958 10 model.dropout_1 0.29197553025040635 +958 10 model.dropout_2 0.35093531161197944 +958 10 training.batch_size 1.0 +958 10 training.label_smoothing 0.0018777956079158503 +958 11 model.embedding_dim 2.0 +958 11 model.relation_dim 2.0 +958 11 model.dropout_0 0.47826045903236514 +958 11 model.dropout_1 0.3095493201117223 +958 11 model.dropout_2 0.47443394869800626 +958 11 training.batch_size 0.0 +958 11 training.label_smoothing 0.0024574948241242118 +958 12 model.embedding_dim 2.0 +958 12 model.relation_dim 1.0 +958 12 model.dropout_0 0.24702465683838026 +958 12 model.dropout_1 0.3713577460922871 +958 12 model.dropout_2 0.3458349021394467 +958 12 training.batch_size 1.0 +958 12 training.label_smoothing 0.11061012376541976 +958 13 model.embedding_dim 1.0 +958 13 model.relation_dim 1.0 +958 13 model.dropout_0 0.3369615576178606 +958 13 model.dropout_1 0.3769981478934751 +958 13 model.dropout_2 0.31219747139946796 +958 13 training.batch_size 0.0 +958 13 training.label_smoothing 0.0032934024003982757 +958 14 model.embedding_dim 0.0 +958 14 model.relation_dim 1.0 +958 14 model.dropout_0 0.4896711009500684 +958 14 model.dropout_1 0.46967497066460095 +958 14 model.dropout_2 0.4543896904826814 +958 14 training.batch_size 1.0 +958 14 training.label_smoothing 0.008321954682570935 +958 15 model.embedding_dim 2.0 +958 15 model.relation_dim 0.0 +958 15 model.dropout_0 0.20892988744176463 +958 15 model.dropout_1 0.19501254988767835 +958 15 model.dropout_2 0.4033899857725788 +958 15 training.batch_size 2.0 +958 15 training.label_smoothing 0.01026409917257964 +958 16 model.embedding_dim 0.0 +958 16 model.relation_dim 0.0 +958 16 model.dropout_0 0.1452398236983787 +958 16 model.dropout_1 0.15887977864959182 +958 16 model.dropout_2 0.12450106607754288 +958 16 training.batch_size 1.0 +958 16 training.label_smoothing 0.10881179651934857 +958 17 model.embedding_dim 1.0 +958 17 model.relation_dim 2.0 +958 17 model.dropout_0 0.12148900230651649 +958 17 model.dropout_1 0.16742137648933986 +958 17 model.dropout_2 0.15325098501370685 +958 17 training.batch_size 1.0 +958 17 training.label_smoothing 0.00201931817503776 +958 18 model.embedding_dim 2.0 +958 18 model.relation_dim 0.0 +958 18 model.dropout_0 0.4132463427109312 +958 18 model.dropout_1 0.48906141796395775 +958 18 model.dropout_2 0.17422538550604194 +958 18 training.batch_size 0.0 +958 18 training.label_smoothing 0.0013697021300838642 +958 19 model.embedding_dim 1.0 +958 19 model.relation_dim 1.0 +958 19 model.dropout_0 0.21599286167340337 +958 19 model.dropout_1 0.32911931049098586 +958 19 model.dropout_2 0.27397270812665836 +958 19 training.batch_size 1.0 +958 19 training.label_smoothing 0.005910079025190242 +958 20 model.embedding_dim 0.0 +958 20 model.relation_dim 1.0 +958 20 model.dropout_0 0.44586966711826603 +958 20 model.dropout_1 0.37740016909894425 +958 20 model.dropout_2 0.36460308016228293 +958 20 training.batch_size 0.0 +958 20 training.label_smoothing 0.0014269247902648032 +958 21 model.embedding_dim 1.0 +958 21 model.relation_dim 1.0 +958 21 model.dropout_0 0.3911947552128122 +958 21 model.dropout_1 0.2883588946117646 +958 21 model.dropout_2 0.4971155315058817 +958 21 training.batch_size 2.0 +958 21 training.label_smoothing 0.0027546425285629047 +958 22 model.embedding_dim 2.0 +958 22 model.relation_dim 0.0 +958 22 model.dropout_0 0.25355214428211964 +958 22 model.dropout_1 0.16222509143686525 +958 22 model.dropout_2 0.10515171337759491 +958 22 training.batch_size 0.0 +958 22 training.label_smoothing 0.017769440551829396 +958 23 model.embedding_dim 2.0 +958 23 model.relation_dim 0.0 +958 23 model.dropout_0 0.3730038778958721 +958 23 model.dropout_1 0.14492666791186096 +958 23 model.dropout_2 0.4336077372433308 +958 23 training.batch_size 1.0 +958 23 training.label_smoothing 0.0010765204272656758 +958 24 model.embedding_dim 2.0 +958 24 model.relation_dim 2.0 +958 24 model.dropout_0 0.47111494109289476 +958 24 model.dropout_1 0.473650804305897 +958 24 model.dropout_2 0.2550209792386458 +958 24 training.batch_size 1.0 +958 24 training.label_smoothing 0.0022955792393281574 +958 25 model.embedding_dim 1.0 +958 25 model.relation_dim 0.0 +958 25 model.dropout_0 0.3103272282469255 +958 25 model.dropout_1 0.46717383715418426 +958 25 model.dropout_2 0.38130638658533667 +958 25 training.batch_size 0.0 +958 25 training.label_smoothing 0.029065896303298838 +958 26 model.embedding_dim 1.0 +958 26 model.relation_dim 2.0 +958 26 model.dropout_0 0.13329516747683062 +958 26 model.dropout_1 0.15699297079975524 +958 26 model.dropout_2 0.42823309342055393 +958 26 training.batch_size 0.0 +958 26 training.label_smoothing 0.4712010982249336 +958 27 model.embedding_dim 2.0 +958 27 model.relation_dim 1.0 +958 27 model.dropout_0 0.4383414723675677 +958 27 model.dropout_1 0.21603372489243916 +958 27 model.dropout_2 0.12773984193795213 +958 27 training.batch_size 1.0 +958 27 training.label_smoothing 0.004755188675037322 +958 28 model.embedding_dim 0.0 +958 28 model.relation_dim 1.0 +958 28 model.dropout_0 0.27146021097140954 +958 28 model.dropout_1 0.23589352615384596 +958 28 model.dropout_2 0.31721153456516793 +958 28 training.batch_size 1.0 +958 28 training.label_smoothing 0.21301957425381965 +958 29 model.embedding_dim 0.0 +958 29 model.relation_dim 2.0 +958 29 model.dropout_0 0.403251059114228 +958 29 model.dropout_1 0.3351130251022577 +958 29 model.dropout_2 0.26095236843728015 +958 29 training.batch_size 0.0 +958 29 training.label_smoothing 0.2281660832925126 +958 30 model.embedding_dim 2.0 +958 30 model.relation_dim 0.0 +958 30 model.dropout_0 0.3427124490813561 +958 30 model.dropout_1 0.16149674291358507 +958 30 model.dropout_2 0.24760924370862014 +958 30 training.batch_size 0.0 +958 30 training.label_smoothing 0.0020976576052077595 +958 31 model.embedding_dim 1.0 +958 31 model.relation_dim 2.0 +958 31 model.dropout_0 0.48319572800972843 +958 31 model.dropout_1 0.43289696398643984 +958 31 model.dropout_2 0.32412885493194993 +958 31 training.batch_size 2.0 +958 31 training.label_smoothing 0.23971834514650792 +958 32 model.embedding_dim 0.0 +958 32 model.relation_dim 1.0 +958 32 model.dropout_0 0.264851779405368 +958 32 model.dropout_1 0.47476444282880736 +958 32 model.dropout_2 0.22403381800110964 +958 32 training.batch_size 1.0 +958 32 training.label_smoothing 0.44994012188793187 +958 33 model.embedding_dim 2.0 +958 33 model.relation_dim 0.0 +958 33 model.dropout_0 0.2245893307797147 +958 33 model.dropout_1 0.25996150109174915 +958 33 model.dropout_2 0.1212176702289915 +958 33 training.batch_size 1.0 +958 33 training.label_smoothing 0.05853582201627105 +958 34 model.embedding_dim 1.0 +958 34 model.relation_dim 0.0 +958 34 model.dropout_0 0.140379968019813 +958 34 model.dropout_1 0.26115256475788806 +958 34 model.dropout_2 0.29973958269372614 +958 34 training.batch_size 1.0 +958 34 training.label_smoothing 0.7760803695836248 +958 35 model.embedding_dim 0.0 +958 35 model.relation_dim 0.0 +958 35 model.dropout_0 0.42414513385692076 +958 35 model.dropout_1 0.44567224892118185 +958 35 model.dropout_2 0.31674861361272694 +958 35 training.batch_size 1.0 +958 35 training.label_smoothing 0.010847590941191306 +958 36 model.embedding_dim 0.0 +958 36 model.relation_dim 1.0 +958 36 model.dropout_0 0.22667160083149254 +958 36 model.dropout_1 0.3510461021352921 +958 36 model.dropout_2 0.3666271089813753 +958 36 training.batch_size 0.0 +958 36 training.label_smoothing 0.004071726102392477 +958 37 model.embedding_dim 2.0 +958 37 model.relation_dim 2.0 +958 37 model.dropout_0 0.4823731712010284 +958 37 model.dropout_1 0.19973726535998684 +958 37 model.dropout_2 0.26981332315382445 +958 37 training.batch_size 1.0 +958 37 training.label_smoothing 0.07894978291773848 +958 38 model.embedding_dim 2.0 +958 38 model.relation_dim 2.0 +958 38 model.dropout_0 0.14274151557557502 +958 38 model.dropout_1 0.26441249818747925 +958 38 model.dropout_2 0.42067509702181693 +958 38 training.batch_size 1.0 +958 38 training.label_smoothing 0.0012705805106875386 +958 39 model.embedding_dim 2.0 +958 39 model.relation_dim 0.0 +958 39 model.dropout_0 0.4299044391640656 +958 39 model.dropout_1 0.3834495653149963 +958 39 model.dropout_2 0.3989341654255497 +958 39 training.batch_size 2.0 +958 39 training.label_smoothing 0.010227581016762759 +958 40 model.embedding_dim 0.0 +958 40 model.relation_dim 1.0 +958 40 model.dropout_0 0.16203612806286954 +958 40 model.dropout_1 0.21611023499019885 +958 40 model.dropout_2 0.2127562451209566 +958 40 training.batch_size 2.0 +958 40 training.label_smoothing 0.013753596575755781 +958 41 model.embedding_dim 0.0 +958 41 model.relation_dim 1.0 +958 41 model.dropout_0 0.2460539526272806 +958 41 model.dropout_1 0.4036687643707132 +958 41 model.dropout_2 0.4572865467307381 +958 41 training.batch_size 2.0 +958 41 training.label_smoothing 0.8872436333900034 +958 42 model.embedding_dim 0.0 +958 42 model.relation_dim 1.0 +958 42 model.dropout_0 0.121801826972668 +958 42 model.dropout_1 0.41006006662002414 +958 42 model.dropout_2 0.31732408406490586 +958 42 training.batch_size 2.0 +958 42 training.label_smoothing 0.016498289470656468 +958 43 model.embedding_dim 2.0 +958 43 model.relation_dim 1.0 +958 43 model.dropout_0 0.1400053722844012 +958 43 model.dropout_1 0.2174329722668874 +958 43 model.dropout_2 0.26812375292165547 +958 43 training.batch_size 2.0 +958 43 training.label_smoothing 0.005588767278041309 +958 44 model.embedding_dim 2.0 +958 44 model.relation_dim 2.0 +958 44 model.dropout_0 0.4163090958863047 +958 44 model.dropout_1 0.2725474541271192 +958 44 model.dropout_2 0.20464150004709925 +958 44 training.batch_size 0.0 +958 44 training.label_smoothing 0.04875349096017729 +958 45 model.embedding_dim 2.0 +958 45 model.relation_dim 2.0 +958 45 model.dropout_0 0.14402159834369532 +958 45 model.dropout_1 0.45546944940415823 +958 45 model.dropout_2 0.4824892051077725 +958 45 training.batch_size 2.0 +958 45 training.label_smoothing 0.027473523331980207 +958 46 model.embedding_dim 0.0 +958 46 model.relation_dim 2.0 +958 46 model.dropout_0 0.23405586507352416 +958 46 model.dropout_1 0.22938996766591557 +958 46 model.dropout_2 0.42193286111733197 +958 46 training.batch_size 2.0 +958 46 training.label_smoothing 0.005737247048234654 +958 47 model.embedding_dim 2.0 +958 47 model.relation_dim 1.0 +958 47 model.dropout_0 0.2602947997829761 +958 47 model.dropout_1 0.14269362742018196 +958 47 model.dropout_2 0.4247035069884335 +958 47 training.batch_size 0.0 +958 47 training.label_smoothing 0.07608839928959754 +958 48 model.embedding_dim 2.0 +958 48 model.relation_dim 0.0 +958 48 model.dropout_0 0.18871303510139154 +958 48 model.dropout_1 0.30521361658729784 +958 48 model.dropout_2 0.3562401181210638 +958 48 training.batch_size 1.0 +958 48 training.label_smoothing 0.47214126482392166 +958 49 model.embedding_dim 0.0 +958 49 model.relation_dim 2.0 +958 49 model.dropout_0 0.4314667023927322 +958 49 model.dropout_1 0.4488286771300737 +958 49 model.dropout_2 0.10028644830432568 +958 49 training.batch_size 2.0 +958 49 training.label_smoothing 0.002120746935574217 +958 50 model.embedding_dim 1.0 +958 50 model.relation_dim 0.0 +958 50 model.dropout_0 0.32795554381584724 +958 50 model.dropout_1 0.39778202585529465 +958 50 model.dropout_2 0.3977432708284192 +958 50 training.batch_size 0.0 +958 50 training.label_smoothing 0.020377089147482833 +958 51 model.embedding_dim 0.0 +958 51 model.relation_dim 1.0 +958 51 model.dropout_0 0.15354726635080646 +958 51 model.dropout_1 0.29604940247826056 +958 51 model.dropout_2 0.12285680684378404 +958 51 training.batch_size 2.0 +958 51 training.label_smoothing 0.005161369916682934 +958 52 model.embedding_dim 2.0 +958 52 model.relation_dim 1.0 +958 52 model.dropout_0 0.13002495297706174 +958 52 model.dropout_1 0.2879203771231549 +958 52 model.dropout_2 0.4601750872877354 +958 52 training.batch_size 2.0 +958 52 training.label_smoothing 0.009960043329185165 +958 53 model.embedding_dim 2.0 +958 53 model.relation_dim 0.0 +958 53 model.dropout_0 0.1266250714512661 +958 53 model.dropout_1 0.2795524861417567 +958 53 model.dropout_2 0.48090735453286126 +958 53 training.batch_size 2.0 +958 53 training.label_smoothing 0.0017679437562213931 +958 54 model.embedding_dim 0.0 +958 54 model.relation_dim 1.0 +958 54 model.dropout_0 0.13413710002661206 +958 54 model.dropout_1 0.1805853789871168 +958 54 model.dropout_2 0.47933901610054763 +958 54 training.batch_size 1.0 +958 54 training.label_smoothing 0.002565714426743886 +958 55 model.embedding_dim 1.0 +958 55 model.relation_dim 2.0 +958 55 model.dropout_0 0.120994918158687 +958 55 model.dropout_1 0.16769784405183874 +958 55 model.dropout_2 0.27142963914818763 +958 55 training.batch_size 2.0 +958 55 training.label_smoothing 0.09114680067488101 +958 56 model.embedding_dim 1.0 +958 56 model.relation_dim 1.0 +958 56 model.dropout_0 0.23759551868857054 +958 56 model.dropout_1 0.2727503496444057 +958 56 model.dropout_2 0.17446431365194848 +958 56 training.batch_size 1.0 +958 56 training.label_smoothing 0.012215158720972969 +958 57 model.embedding_dim 1.0 +958 57 model.relation_dim 1.0 +958 57 model.dropout_0 0.4913809090611224 +958 57 model.dropout_1 0.31658957221573414 +958 57 model.dropout_2 0.2359627928517318 +958 57 training.batch_size 1.0 +958 57 training.label_smoothing 0.07122893784242909 +958 58 model.embedding_dim 2.0 +958 58 model.relation_dim 2.0 +958 58 model.dropout_0 0.362784098863296 +958 58 model.dropout_1 0.44477307626847307 +958 58 model.dropout_2 0.2719214124201289 +958 58 training.batch_size 1.0 +958 58 training.label_smoothing 0.0011705423982575414 +958 59 model.embedding_dim 1.0 +958 59 model.relation_dim 0.0 +958 59 model.dropout_0 0.22712398375249965 +958 59 model.dropout_1 0.12841608632373713 +958 59 model.dropout_2 0.14851253627664476 +958 59 training.batch_size 2.0 +958 59 training.label_smoothing 0.0011289080453909964 +958 60 model.embedding_dim 1.0 +958 60 model.relation_dim 1.0 +958 60 model.dropout_0 0.3482324393047316 +958 60 model.dropout_1 0.2907466936393095 +958 60 model.dropout_2 0.17543615157406625 +958 60 training.batch_size 2.0 +958 60 training.label_smoothing 0.03122919263080693 +958 61 model.embedding_dim 1.0 +958 61 model.relation_dim 2.0 +958 61 model.dropout_0 0.2676934009987407 +958 61 model.dropout_1 0.31986661810797445 +958 61 model.dropout_2 0.35928308937714526 +958 61 training.batch_size 0.0 +958 61 training.label_smoothing 0.5597915104469069 +958 62 model.embedding_dim 2.0 +958 62 model.relation_dim 2.0 +958 62 model.dropout_0 0.16360400531422753 +958 62 model.dropout_1 0.22123519568020905 +958 62 model.dropout_2 0.15932135553473692 +958 62 training.batch_size 1.0 +958 62 training.label_smoothing 0.3129429998732127 +958 63 model.embedding_dim 0.0 +958 63 model.relation_dim 2.0 +958 63 model.dropout_0 0.15831032063242031 +958 63 model.dropout_1 0.12094518645035973 +958 63 model.dropout_2 0.13811333641813228 +958 63 training.batch_size 2.0 +958 63 training.label_smoothing 0.007510563447222716 +958 64 model.embedding_dim 0.0 +958 64 model.relation_dim 1.0 +958 64 model.dropout_0 0.2993225549197346 +958 64 model.dropout_1 0.450667204649735 +958 64 model.dropout_2 0.4047100877310411 +958 64 training.batch_size 0.0 +958 64 training.label_smoothing 0.013758612799262111 +958 65 model.embedding_dim 1.0 +958 65 model.relation_dim 2.0 +958 65 model.dropout_0 0.10728643674332727 +958 65 model.dropout_1 0.27549468363931584 +958 65 model.dropout_2 0.1728866815305853 +958 65 training.batch_size 2.0 +958 65 training.label_smoothing 0.113631468672188 +958 66 model.embedding_dim 0.0 +958 66 model.relation_dim 2.0 +958 66 model.dropout_0 0.45378477040779364 +958 66 model.dropout_1 0.41677437252788485 +958 66 model.dropout_2 0.3264566713603514 +958 66 training.batch_size 0.0 +958 66 training.label_smoothing 0.002448164118310104 +958 67 model.embedding_dim 2.0 +958 67 model.relation_dim 2.0 +958 67 model.dropout_0 0.4421173321020887 +958 67 model.dropout_1 0.4361321054192938 +958 67 model.dropout_2 0.1559576225240373 +958 67 training.batch_size 2.0 +958 67 training.label_smoothing 0.3510069712467987 +958 68 model.embedding_dim 1.0 +958 68 model.relation_dim 1.0 +958 68 model.dropout_0 0.49448240489728 +958 68 model.dropout_1 0.19544386066936262 +958 68 model.dropout_2 0.351774468995807 +958 68 training.batch_size 0.0 +958 68 training.label_smoothing 0.0011005229513808476 +958 69 model.embedding_dim 0.0 +958 69 model.relation_dim 0.0 +958 69 model.dropout_0 0.1385700067506054 +958 69 model.dropout_1 0.1282398295202133 +958 69 model.dropout_2 0.41719778824874465 +958 69 training.batch_size 2.0 +958 69 training.label_smoothing 0.014880461751228228 +958 70 model.embedding_dim 2.0 +958 70 model.relation_dim 2.0 +958 70 model.dropout_0 0.44988798334510793 +958 70 model.dropout_1 0.16766479439987286 +958 70 model.dropout_2 0.12659170938232475 +958 70 training.batch_size 0.0 +958 70 training.label_smoothing 0.3769767973393993 +958 71 model.embedding_dim 0.0 +958 71 model.relation_dim 2.0 +958 71 model.dropout_0 0.3750617830928672 +958 71 model.dropout_1 0.2479256240737005 +958 71 model.dropout_2 0.2182163671462859 +958 71 training.batch_size 2.0 +958 71 training.label_smoothing 0.590682295918133 +958 72 model.embedding_dim 2.0 +958 72 model.relation_dim 2.0 +958 72 model.dropout_0 0.3535142898654806 +958 72 model.dropout_1 0.4642726085571913 +958 72 model.dropout_2 0.23741365554289484 +958 72 training.batch_size 1.0 +958 72 training.label_smoothing 0.0015492977320664865 +958 73 model.embedding_dim 1.0 +958 73 model.relation_dim 1.0 +958 73 model.dropout_0 0.11084543246170729 +958 73 model.dropout_1 0.48514131364626956 +958 73 model.dropout_2 0.1700479593141384 +958 73 training.batch_size 0.0 +958 73 training.label_smoothing 0.24543427334046142 +958 74 model.embedding_dim 0.0 +958 74 model.relation_dim 2.0 +958 74 model.dropout_0 0.41679294167483294 +958 74 model.dropout_1 0.18673452516903222 +958 74 model.dropout_2 0.20490087697411116 +958 74 training.batch_size 0.0 +958 74 training.label_smoothing 0.008267379429730362 +958 75 model.embedding_dim 0.0 +958 75 model.relation_dim 0.0 +958 75 model.dropout_0 0.24945260440572872 +958 75 model.dropout_1 0.13483018812370595 +958 75 model.dropout_2 0.4748217229864259 +958 75 training.batch_size 2.0 +958 75 training.label_smoothing 0.05552394078395797 +958 76 model.embedding_dim 1.0 +958 76 model.relation_dim 1.0 +958 76 model.dropout_0 0.2509103311252977 +958 76 model.dropout_1 0.3900184258889293 +958 76 model.dropout_2 0.4451760433695148 +958 76 training.batch_size 2.0 +958 76 training.label_smoothing 0.03249940161115253 +958 77 model.embedding_dim 0.0 +958 77 model.relation_dim 0.0 +958 77 model.dropout_0 0.18713141307351325 +958 77 model.dropout_1 0.11616381868585131 +958 77 model.dropout_2 0.362568540945194 +958 77 training.batch_size 1.0 +958 77 training.label_smoothing 0.00151184967441753 +958 78 model.embedding_dim 1.0 +958 78 model.relation_dim 1.0 +958 78 model.dropout_0 0.22898427836540575 +958 78 model.dropout_1 0.16686840509255474 +958 78 model.dropout_2 0.46844959051523605 +958 78 training.batch_size 1.0 +958 78 training.label_smoothing 0.10212568632428096 +958 79 model.embedding_dim 1.0 +958 79 model.relation_dim 2.0 +958 79 model.dropout_0 0.41949663140083193 +958 79 model.dropout_1 0.230436766914396 +958 79 model.dropout_2 0.14816682655432833 +958 79 training.batch_size 1.0 +958 79 training.label_smoothing 0.012310442291723142 +958 80 model.embedding_dim 0.0 +958 80 model.relation_dim 0.0 +958 80 model.dropout_0 0.39595920002789253 +958 80 model.dropout_1 0.25806203524529037 +958 80 model.dropout_2 0.3648498632528139 +958 80 training.batch_size 1.0 +958 80 training.label_smoothing 0.32850634951326957 +958 81 model.embedding_dim 1.0 +958 81 model.relation_dim 2.0 +958 81 model.dropout_0 0.4484525455669015 +958 81 model.dropout_1 0.36140309435382456 +958 81 model.dropout_2 0.11117569977325217 +958 81 training.batch_size 2.0 +958 81 training.label_smoothing 0.9262171013928551 +958 82 model.embedding_dim 2.0 +958 82 model.relation_dim 0.0 +958 82 model.dropout_0 0.15873764229602716 +958 82 model.dropout_1 0.20066315825485317 +958 82 model.dropout_2 0.1500333692246502 +958 82 training.batch_size 1.0 +958 82 training.label_smoothing 0.04064109970817941 +958 83 model.embedding_dim 1.0 +958 83 model.relation_dim 1.0 +958 83 model.dropout_0 0.23421497667986146 +958 83 model.dropout_1 0.34806256528251317 +958 83 model.dropout_2 0.4661375685864918 +958 83 training.batch_size 1.0 +958 83 training.label_smoothing 0.4464768547274045 +958 84 model.embedding_dim 0.0 +958 84 model.relation_dim 2.0 +958 84 model.dropout_0 0.13069840763192248 +958 84 model.dropout_1 0.40348436163782286 +958 84 model.dropout_2 0.45326230960523983 +958 84 training.batch_size 0.0 +958 84 training.label_smoothing 0.030455054464966772 +958 85 model.embedding_dim 0.0 +958 85 model.relation_dim 2.0 +958 85 model.dropout_0 0.1776939754557717 +958 85 model.dropout_1 0.411837157895406 +958 85 model.dropout_2 0.145963730652379 +958 85 training.batch_size 2.0 +958 85 training.label_smoothing 0.29823001797028675 +958 86 model.embedding_dim 1.0 +958 86 model.relation_dim 0.0 +958 86 model.dropout_0 0.34288664914201994 +958 86 model.dropout_1 0.23289888356311583 +958 86 model.dropout_2 0.23919393519168583 +958 86 training.batch_size 1.0 +958 86 training.label_smoothing 0.009518254769142103 +958 87 model.embedding_dim 1.0 +958 87 model.relation_dim 1.0 +958 87 model.dropout_0 0.4853936725874026 +958 87 model.dropout_1 0.27630795700268107 +958 87 model.dropout_2 0.12579742506567207 +958 87 training.batch_size 1.0 +958 87 training.label_smoothing 0.08673506421003246 +958 88 model.embedding_dim 1.0 +958 88 model.relation_dim 1.0 +958 88 model.dropout_0 0.3476421863178559 +958 88 model.dropout_1 0.31328968750385944 +958 88 model.dropout_2 0.48578661520322486 +958 88 training.batch_size 0.0 +958 88 training.label_smoothing 0.013297539155087095 +958 89 model.embedding_dim 2.0 +958 89 model.relation_dim 1.0 +958 89 model.dropout_0 0.4509581506944559 +958 89 model.dropout_1 0.31803988026573304 +958 89 model.dropout_2 0.18784753757525402 +958 89 training.batch_size 0.0 +958 89 training.label_smoothing 0.010711811777147142 +958 90 model.embedding_dim 0.0 +958 90 model.relation_dim 2.0 +958 90 model.dropout_0 0.34772360080376774 +958 90 model.dropout_1 0.3404545554938262 +958 90 model.dropout_2 0.269236491090198 +958 90 training.batch_size 2.0 +958 90 training.label_smoothing 0.019260589630553514 +958 91 model.embedding_dim 0.0 +958 91 model.relation_dim 1.0 +958 91 model.dropout_0 0.4123847148830314 +958 91 model.dropout_1 0.4165744413420406 +958 91 model.dropout_2 0.40584808260493255 +958 91 training.batch_size 0.0 +958 91 training.label_smoothing 0.005197675388855514 +958 92 model.embedding_dim 0.0 +958 92 model.relation_dim 0.0 +958 92 model.dropout_0 0.4071014462865574 +958 92 model.dropout_1 0.2929128058437721 +958 92 model.dropout_2 0.4576612745050843 +958 92 training.batch_size 1.0 +958 92 training.label_smoothing 0.011108510029237129 +958 93 model.embedding_dim 2.0 +958 93 model.relation_dim 0.0 +958 93 model.dropout_0 0.11937250928933901 +958 93 model.dropout_1 0.2440864832619426 +958 93 model.dropout_2 0.3284689567327964 +958 93 training.batch_size 2.0 +958 93 training.label_smoothing 0.2863620934981756 +958 94 model.embedding_dim 1.0 +958 94 model.relation_dim 1.0 +958 94 model.dropout_0 0.3469211479440155 +958 94 model.dropout_1 0.23902209743102465 +958 94 model.dropout_2 0.15251271791574147 +958 94 training.batch_size 2.0 +958 94 training.label_smoothing 0.2577019824215319 +958 95 model.embedding_dim 2.0 +958 95 model.relation_dim 1.0 +958 95 model.dropout_0 0.1787695852735597 +958 95 model.dropout_1 0.3654321812799871 +958 95 model.dropout_2 0.2847774040332923 +958 95 training.batch_size 2.0 +958 95 training.label_smoothing 0.001672498276469103 +958 96 model.embedding_dim 2.0 +958 96 model.relation_dim 2.0 +958 96 model.dropout_0 0.48493184199317185 +958 96 model.dropout_1 0.12966259517850529 +958 96 model.dropout_2 0.3986918056422447 +958 96 training.batch_size 2.0 +958 96 training.label_smoothing 0.0032148263198904306 +958 97 model.embedding_dim 0.0 +958 97 model.relation_dim 2.0 +958 97 model.dropout_0 0.3770227955027443 +958 97 model.dropout_1 0.30440309047829917 +958 97 model.dropout_2 0.28493858355106766 +958 97 training.batch_size 2.0 +958 97 training.label_smoothing 0.5602818808283525 +958 98 model.embedding_dim 0.0 +958 98 model.relation_dim 2.0 +958 98 model.dropout_0 0.39913885689753326 +958 98 model.dropout_1 0.4678631325418813 +958 98 model.dropout_2 0.4739836643739028 +958 98 training.batch_size 2.0 +958 98 training.label_smoothing 0.020910736837169017 +958 99 model.embedding_dim 0.0 +958 99 model.relation_dim 1.0 +958 99 model.dropout_0 0.3658709003873134 +958 99 model.dropout_1 0.3988195368959814 +958 99 model.dropout_2 0.24390918704192877 +958 99 training.batch_size 1.0 +958 99 training.label_smoothing 0.9643710171250268 +958 100 model.embedding_dim 0.0 +958 100 model.relation_dim 1.0 +958 100 model.dropout_0 0.2650724556591718 +958 100 model.dropout_1 0.13097598245271339 +958 100 model.dropout_2 0.22036468972485856 +958 100 training.batch_size 0.0 +958 100 training.label_smoothing 0.0014132186500860856 +958 1 dataset """kinships""" +958 1 model """tucker""" +958 1 loss """softplus""" +958 1 regularizer """no""" +958 1 optimizer """adadelta""" +958 1 training_loop """lcwa""" +958 1 evaluator """rankbased""" +958 2 dataset """kinships""" +958 2 model """tucker""" +958 2 loss """softplus""" +958 2 regularizer """no""" +958 2 optimizer """adadelta""" +958 2 training_loop """lcwa""" +958 2 evaluator """rankbased""" +958 3 dataset """kinships""" +958 3 model """tucker""" +958 3 loss """softplus""" +958 3 regularizer """no""" +958 3 optimizer """adadelta""" +958 3 training_loop """lcwa""" +958 3 evaluator """rankbased""" +958 4 dataset """kinships""" +958 4 model """tucker""" +958 4 loss """softplus""" +958 4 regularizer """no""" +958 4 optimizer """adadelta""" +958 4 training_loop """lcwa""" +958 4 evaluator """rankbased""" +958 5 dataset """kinships""" +958 5 model """tucker""" +958 5 loss """softplus""" +958 5 regularizer """no""" +958 5 optimizer """adadelta""" +958 5 training_loop """lcwa""" +958 5 evaluator """rankbased""" +958 6 dataset """kinships""" +958 6 model """tucker""" +958 6 loss """softplus""" +958 6 regularizer """no""" +958 6 optimizer """adadelta""" +958 6 training_loop """lcwa""" +958 6 evaluator """rankbased""" +958 7 dataset """kinships""" +958 7 model """tucker""" +958 7 loss """softplus""" +958 7 regularizer """no""" +958 7 optimizer """adadelta""" +958 7 training_loop """lcwa""" +958 7 evaluator """rankbased""" +958 8 dataset """kinships""" +958 8 model """tucker""" +958 8 loss """softplus""" +958 8 regularizer """no""" +958 8 optimizer """adadelta""" +958 8 training_loop """lcwa""" +958 8 evaluator """rankbased""" +958 9 dataset """kinships""" +958 9 model """tucker""" +958 9 loss """softplus""" +958 9 regularizer """no""" +958 9 optimizer """adadelta""" +958 9 training_loop """lcwa""" +958 9 evaluator """rankbased""" +958 10 dataset """kinships""" +958 10 model """tucker""" +958 10 loss """softplus""" +958 10 regularizer """no""" +958 10 optimizer """adadelta""" +958 10 training_loop """lcwa""" +958 10 evaluator """rankbased""" +958 11 dataset """kinships""" +958 11 model """tucker""" +958 11 loss """softplus""" +958 11 regularizer """no""" +958 11 optimizer """adadelta""" +958 11 training_loop """lcwa""" +958 11 evaluator """rankbased""" +958 12 dataset """kinships""" +958 12 model """tucker""" +958 12 loss """softplus""" +958 12 regularizer """no""" +958 12 optimizer """adadelta""" +958 12 training_loop """lcwa""" +958 12 evaluator """rankbased""" +958 13 dataset """kinships""" +958 13 model """tucker""" +958 13 loss """softplus""" +958 13 regularizer """no""" +958 13 optimizer """adadelta""" +958 13 training_loop """lcwa""" +958 13 evaluator """rankbased""" +958 14 dataset """kinships""" +958 14 model """tucker""" +958 14 loss """softplus""" +958 14 regularizer """no""" +958 14 optimizer """adadelta""" +958 14 training_loop """lcwa""" +958 14 evaluator """rankbased""" +958 15 dataset """kinships""" +958 15 model """tucker""" +958 15 loss """softplus""" +958 15 regularizer """no""" +958 15 optimizer """adadelta""" +958 15 training_loop """lcwa""" +958 15 evaluator """rankbased""" +958 16 dataset """kinships""" +958 16 model """tucker""" +958 16 loss """softplus""" +958 16 regularizer """no""" +958 16 optimizer """adadelta""" +958 16 training_loop """lcwa""" +958 16 evaluator """rankbased""" +958 17 dataset """kinships""" +958 17 model """tucker""" +958 17 loss """softplus""" +958 17 regularizer """no""" +958 17 optimizer """adadelta""" +958 17 training_loop """lcwa""" +958 17 evaluator """rankbased""" +958 18 dataset """kinships""" +958 18 model """tucker""" +958 18 loss """softplus""" +958 18 regularizer """no""" +958 18 optimizer """adadelta""" +958 18 training_loop """lcwa""" +958 18 evaluator """rankbased""" +958 19 dataset """kinships""" +958 19 model """tucker""" +958 19 loss """softplus""" +958 19 regularizer """no""" +958 19 optimizer """adadelta""" +958 19 training_loop """lcwa""" +958 19 evaluator """rankbased""" +958 20 dataset """kinships""" +958 20 model """tucker""" +958 20 loss """softplus""" +958 20 regularizer """no""" +958 20 optimizer """adadelta""" +958 20 training_loop """lcwa""" +958 20 evaluator """rankbased""" +958 21 dataset """kinships""" +958 21 model """tucker""" +958 21 loss """softplus""" +958 21 regularizer """no""" +958 21 optimizer """adadelta""" +958 21 training_loop """lcwa""" +958 21 evaluator """rankbased""" +958 22 dataset """kinships""" +958 22 model """tucker""" +958 22 loss """softplus""" +958 22 regularizer """no""" +958 22 optimizer """adadelta""" +958 22 training_loop """lcwa""" +958 22 evaluator """rankbased""" +958 23 dataset """kinships""" +958 23 model """tucker""" +958 23 loss """softplus""" +958 23 regularizer """no""" +958 23 optimizer """adadelta""" +958 23 training_loop """lcwa""" +958 23 evaluator """rankbased""" +958 24 dataset """kinships""" +958 24 model """tucker""" +958 24 loss """softplus""" +958 24 regularizer """no""" +958 24 optimizer """adadelta""" +958 24 training_loop """lcwa""" +958 24 evaluator """rankbased""" +958 25 dataset """kinships""" +958 25 model """tucker""" +958 25 loss """softplus""" +958 25 regularizer """no""" +958 25 optimizer """adadelta""" +958 25 training_loop """lcwa""" +958 25 evaluator """rankbased""" +958 26 dataset """kinships""" +958 26 model """tucker""" +958 26 loss """softplus""" +958 26 regularizer """no""" +958 26 optimizer """adadelta""" +958 26 training_loop """lcwa""" +958 26 evaluator """rankbased""" +958 27 dataset """kinships""" +958 27 model """tucker""" +958 27 loss """softplus""" +958 27 regularizer """no""" +958 27 optimizer """adadelta""" +958 27 training_loop """lcwa""" +958 27 evaluator """rankbased""" +958 28 dataset """kinships""" +958 28 model """tucker""" +958 28 loss """softplus""" +958 28 regularizer """no""" +958 28 optimizer """adadelta""" +958 28 training_loop """lcwa""" +958 28 evaluator """rankbased""" +958 29 dataset """kinships""" +958 29 model """tucker""" +958 29 loss """softplus""" +958 29 regularizer """no""" +958 29 optimizer """adadelta""" +958 29 training_loop """lcwa""" +958 29 evaluator """rankbased""" +958 30 dataset """kinships""" +958 30 model """tucker""" +958 30 loss """softplus""" +958 30 regularizer """no""" +958 30 optimizer """adadelta""" +958 30 training_loop """lcwa""" +958 30 evaluator """rankbased""" +958 31 dataset """kinships""" +958 31 model """tucker""" +958 31 loss """softplus""" +958 31 regularizer """no""" +958 31 optimizer """adadelta""" +958 31 training_loop """lcwa""" +958 31 evaluator """rankbased""" +958 32 dataset """kinships""" +958 32 model """tucker""" +958 32 loss """softplus""" +958 32 regularizer """no""" +958 32 optimizer """adadelta""" +958 32 training_loop """lcwa""" +958 32 evaluator """rankbased""" +958 33 dataset """kinships""" +958 33 model """tucker""" +958 33 loss """softplus""" +958 33 regularizer """no""" +958 33 optimizer """adadelta""" +958 33 training_loop """lcwa""" +958 33 evaluator """rankbased""" +958 34 dataset """kinships""" +958 34 model """tucker""" +958 34 loss """softplus""" +958 34 regularizer """no""" +958 34 optimizer """adadelta""" +958 34 training_loop """lcwa""" +958 34 evaluator """rankbased""" +958 35 dataset """kinships""" +958 35 model """tucker""" +958 35 loss """softplus""" +958 35 regularizer """no""" +958 35 optimizer """adadelta""" +958 35 training_loop """lcwa""" +958 35 evaluator """rankbased""" +958 36 dataset """kinships""" +958 36 model """tucker""" +958 36 loss """softplus""" +958 36 regularizer """no""" +958 36 optimizer """adadelta""" +958 36 training_loop """lcwa""" +958 36 evaluator """rankbased""" +958 37 dataset """kinships""" +958 37 model """tucker""" +958 37 loss """softplus""" +958 37 regularizer """no""" +958 37 optimizer """adadelta""" +958 37 training_loop """lcwa""" +958 37 evaluator """rankbased""" +958 38 dataset """kinships""" +958 38 model """tucker""" +958 38 loss """softplus""" +958 38 regularizer """no""" +958 38 optimizer """adadelta""" +958 38 training_loop """lcwa""" +958 38 evaluator """rankbased""" +958 39 dataset """kinships""" +958 39 model """tucker""" +958 39 loss """softplus""" +958 39 regularizer """no""" +958 39 optimizer """adadelta""" +958 39 training_loop """lcwa""" +958 39 evaluator """rankbased""" +958 40 dataset """kinships""" +958 40 model """tucker""" +958 40 loss """softplus""" +958 40 regularizer """no""" +958 40 optimizer """adadelta""" +958 40 training_loop """lcwa""" +958 40 evaluator """rankbased""" +958 41 dataset """kinships""" +958 41 model """tucker""" +958 41 loss """softplus""" +958 41 regularizer """no""" +958 41 optimizer """adadelta""" +958 41 training_loop """lcwa""" +958 41 evaluator """rankbased""" +958 42 dataset """kinships""" +958 42 model """tucker""" +958 42 loss """softplus""" +958 42 regularizer """no""" +958 42 optimizer """adadelta""" +958 42 training_loop """lcwa""" +958 42 evaluator """rankbased""" +958 43 dataset """kinships""" +958 43 model """tucker""" +958 43 loss """softplus""" +958 43 regularizer """no""" +958 43 optimizer """adadelta""" +958 43 training_loop """lcwa""" +958 43 evaluator """rankbased""" +958 44 dataset """kinships""" +958 44 model """tucker""" +958 44 loss """softplus""" +958 44 regularizer """no""" +958 44 optimizer """adadelta""" +958 44 training_loop """lcwa""" +958 44 evaluator """rankbased""" +958 45 dataset """kinships""" +958 45 model """tucker""" +958 45 loss """softplus""" +958 45 regularizer """no""" +958 45 optimizer """adadelta""" +958 45 training_loop """lcwa""" +958 45 evaluator """rankbased""" +958 46 dataset """kinships""" +958 46 model """tucker""" +958 46 loss """softplus""" +958 46 regularizer """no""" +958 46 optimizer """adadelta""" +958 46 training_loop """lcwa""" +958 46 evaluator """rankbased""" +958 47 dataset """kinships""" +958 47 model """tucker""" +958 47 loss """softplus""" +958 47 regularizer """no""" +958 47 optimizer """adadelta""" +958 47 training_loop """lcwa""" +958 47 evaluator """rankbased""" +958 48 dataset """kinships""" +958 48 model """tucker""" +958 48 loss """softplus""" +958 48 regularizer """no""" +958 48 optimizer """adadelta""" +958 48 training_loop """lcwa""" +958 48 evaluator """rankbased""" +958 49 dataset """kinships""" +958 49 model """tucker""" +958 49 loss """softplus""" +958 49 regularizer """no""" +958 49 optimizer """adadelta""" +958 49 training_loop """lcwa""" +958 49 evaluator """rankbased""" +958 50 dataset """kinships""" +958 50 model """tucker""" +958 50 loss """softplus""" +958 50 regularizer """no""" +958 50 optimizer """adadelta""" +958 50 training_loop """lcwa""" +958 50 evaluator """rankbased""" +958 51 dataset """kinships""" +958 51 model """tucker""" +958 51 loss """softplus""" +958 51 regularizer """no""" +958 51 optimizer """adadelta""" +958 51 training_loop """lcwa""" +958 51 evaluator """rankbased""" +958 52 dataset """kinships""" +958 52 model """tucker""" +958 52 loss """softplus""" +958 52 regularizer """no""" +958 52 optimizer """adadelta""" +958 52 training_loop """lcwa""" +958 52 evaluator """rankbased""" +958 53 dataset """kinships""" +958 53 model """tucker""" +958 53 loss """softplus""" +958 53 regularizer """no""" +958 53 optimizer """adadelta""" +958 53 training_loop """lcwa""" +958 53 evaluator """rankbased""" +958 54 dataset """kinships""" +958 54 model """tucker""" +958 54 loss """softplus""" +958 54 regularizer """no""" +958 54 optimizer """adadelta""" +958 54 training_loop """lcwa""" +958 54 evaluator """rankbased""" +958 55 dataset """kinships""" +958 55 model """tucker""" +958 55 loss """softplus""" +958 55 regularizer """no""" +958 55 optimizer """adadelta""" +958 55 training_loop """lcwa""" +958 55 evaluator """rankbased""" +958 56 dataset """kinships""" +958 56 model """tucker""" +958 56 loss """softplus""" +958 56 regularizer """no""" +958 56 optimizer """adadelta""" +958 56 training_loop """lcwa""" +958 56 evaluator """rankbased""" +958 57 dataset """kinships""" +958 57 model """tucker""" +958 57 loss """softplus""" +958 57 regularizer """no""" +958 57 optimizer """adadelta""" +958 57 training_loop """lcwa""" +958 57 evaluator """rankbased""" +958 58 dataset """kinships""" +958 58 model """tucker""" +958 58 loss """softplus""" +958 58 regularizer """no""" +958 58 optimizer """adadelta""" +958 58 training_loop """lcwa""" +958 58 evaluator """rankbased""" +958 59 dataset """kinships""" +958 59 model """tucker""" +958 59 loss """softplus""" +958 59 regularizer """no""" +958 59 optimizer """adadelta""" +958 59 training_loop """lcwa""" +958 59 evaluator """rankbased""" +958 60 dataset """kinships""" +958 60 model """tucker""" +958 60 loss """softplus""" +958 60 regularizer """no""" +958 60 optimizer """adadelta""" +958 60 training_loop """lcwa""" +958 60 evaluator """rankbased""" +958 61 dataset """kinships""" +958 61 model """tucker""" +958 61 loss """softplus""" +958 61 regularizer """no""" +958 61 optimizer """adadelta""" +958 61 training_loop """lcwa""" +958 61 evaluator """rankbased""" +958 62 dataset """kinships""" +958 62 model """tucker""" +958 62 loss """softplus""" +958 62 regularizer """no""" +958 62 optimizer """adadelta""" +958 62 training_loop """lcwa""" +958 62 evaluator """rankbased""" +958 63 dataset """kinships""" +958 63 model """tucker""" +958 63 loss """softplus""" +958 63 regularizer """no""" +958 63 optimizer """adadelta""" +958 63 training_loop """lcwa""" +958 63 evaluator """rankbased""" +958 64 dataset """kinships""" +958 64 model """tucker""" +958 64 loss """softplus""" +958 64 regularizer """no""" +958 64 optimizer """adadelta""" +958 64 training_loop """lcwa""" +958 64 evaluator """rankbased""" +958 65 dataset """kinships""" +958 65 model """tucker""" +958 65 loss """softplus""" +958 65 regularizer """no""" +958 65 optimizer """adadelta""" +958 65 training_loop """lcwa""" +958 65 evaluator """rankbased""" +958 66 dataset """kinships""" +958 66 model """tucker""" +958 66 loss """softplus""" +958 66 regularizer """no""" +958 66 optimizer """adadelta""" +958 66 training_loop """lcwa""" +958 66 evaluator """rankbased""" +958 67 dataset """kinships""" +958 67 model """tucker""" +958 67 loss """softplus""" +958 67 regularizer """no""" +958 67 optimizer """adadelta""" +958 67 training_loop """lcwa""" +958 67 evaluator """rankbased""" +958 68 dataset """kinships""" +958 68 model """tucker""" +958 68 loss """softplus""" +958 68 regularizer """no""" +958 68 optimizer """adadelta""" +958 68 training_loop """lcwa""" +958 68 evaluator """rankbased""" +958 69 dataset """kinships""" +958 69 model """tucker""" +958 69 loss """softplus""" +958 69 regularizer """no""" +958 69 optimizer """adadelta""" +958 69 training_loop """lcwa""" +958 69 evaluator """rankbased""" +958 70 dataset """kinships""" +958 70 model """tucker""" +958 70 loss """softplus""" +958 70 regularizer """no""" +958 70 optimizer """adadelta""" +958 70 training_loop """lcwa""" +958 70 evaluator """rankbased""" +958 71 dataset """kinships""" +958 71 model """tucker""" +958 71 loss """softplus""" +958 71 regularizer """no""" +958 71 optimizer """adadelta""" +958 71 training_loop """lcwa""" +958 71 evaluator """rankbased""" +958 72 dataset """kinships""" +958 72 model """tucker""" +958 72 loss """softplus""" +958 72 regularizer """no""" +958 72 optimizer """adadelta""" +958 72 training_loop """lcwa""" +958 72 evaluator """rankbased""" +958 73 dataset """kinships""" +958 73 model """tucker""" +958 73 loss """softplus""" +958 73 regularizer """no""" +958 73 optimizer """adadelta""" +958 73 training_loop """lcwa""" +958 73 evaluator """rankbased""" +958 74 dataset """kinships""" +958 74 model """tucker""" +958 74 loss """softplus""" +958 74 regularizer """no""" +958 74 optimizer """adadelta""" +958 74 training_loop """lcwa""" +958 74 evaluator """rankbased""" +958 75 dataset """kinships""" +958 75 model """tucker""" +958 75 loss """softplus""" +958 75 regularizer """no""" +958 75 optimizer """adadelta""" +958 75 training_loop """lcwa""" +958 75 evaluator """rankbased""" +958 76 dataset """kinships""" +958 76 model """tucker""" +958 76 loss """softplus""" +958 76 regularizer """no""" +958 76 optimizer """adadelta""" +958 76 training_loop """lcwa""" +958 76 evaluator """rankbased""" +958 77 dataset """kinships""" +958 77 model """tucker""" +958 77 loss """softplus""" +958 77 regularizer """no""" +958 77 optimizer """adadelta""" +958 77 training_loop """lcwa""" +958 77 evaluator """rankbased""" +958 78 dataset """kinships""" +958 78 model """tucker""" +958 78 loss """softplus""" +958 78 regularizer """no""" +958 78 optimizer """adadelta""" +958 78 training_loop """lcwa""" +958 78 evaluator """rankbased""" +958 79 dataset """kinships""" +958 79 model """tucker""" +958 79 loss """softplus""" +958 79 regularizer """no""" +958 79 optimizer """adadelta""" +958 79 training_loop """lcwa""" +958 79 evaluator """rankbased""" +958 80 dataset """kinships""" +958 80 model """tucker""" +958 80 loss """softplus""" +958 80 regularizer """no""" +958 80 optimizer """adadelta""" +958 80 training_loop """lcwa""" +958 80 evaluator """rankbased""" +958 81 dataset """kinships""" +958 81 model """tucker""" +958 81 loss """softplus""" +958 81 regularizer """no""" +958 81 optimizer """adadelta""" +958 81 training_loop """lcwa""" +958 81 evaluator """rankbased""" +958 82 dataset """kinships""" +958 82 model """tucker""" +958 82 loss """softplus""" +958 82 regularizer """no""" +958 82 optimizer """adadelta""" +958 82 training_loop """lcwa""" +958 82 evaluator """rankbased""" +958 83 dataset """kinships""" +958 83 model """tucker""" +958 83 loss """softplus""" +958 83 regularizer """no""" +958 83 optimizer """adadelta""" +958 83 training_loop """lcwa""" +958 83 evaluator """rankbased""" +958 84 dataset """kinships""" +958 84 model """tucker""" +958 84 loss """softplus""" +958 84 regularizer """no""" +958 84 optimizer """adadelta""" +958 84 training_loop """lcwa""" +958 84 evaluator """rankbased""" +958 85 dataset """kinships""" +958 85 model """tucker""" +958 85 loss """softplus""" +958 85 regularizer """no""" +958 85 optimizer """adadelta""" +958 85 training_loop """lcwa""" +958 85 evaluator """rankbased""" +958 86 dataset """kinships""" +958 86 model """tucker""" +958 86 loss """softplus""" +958 86 regularizer """no""" +958 86 optimizer """adadelta""" +958 86 training_loop """lcwa""" +958 86 evaluator """rankbased""" +958 87 dataset """kinships""" +958 87 model """tucker""" +958 87 loss """softplus""" +958 87 regularizer """no""" +958 87 optimizer """adadelta""" +958 87 training_loop """lcwa""" +958 87 evaluator """rankbased""" +958 88 dataset """kinships""" +958 88 model """tucker""" +958 88 loss """softplus""" +958 88 regularizer """no""" +958 88 optimizer """adadelta""" +958 88 training_loop """lcwa""" +958 88 evaluator """rankbased""" +958 89 dataset """kinships""" +958 89 model """tucker""" +958 89 loss """softplus""" +958 89 regularizer """no""" +958 89 optimizer """adadelta""" +958 89 training_loop """lcwa""" +958 89 evaluator """rankbased""" +958 90 dataset """kinships""" +958 90 model """tucker""" +958 90 loss """softplus""" +958 90 regularizer """no""" +958 90 optimizer """adadelta""" +958 90 training_loop """lcwa""" +958 90 evaluator """rankbased""" +958 91 dataset """kinships""" +958 91 model """tucker""" +958 91 loss """softplus""" +958 91 regularizer """no""" +958 91 optimizer """adadelta""" +958 91 training_loop """lcwa""" +958 91 evaluator """rankbased""" +958 92 dataset """kinships""" +958 92 model """tucker""" +958 92 loss """softplus""" +958 92 regularizer """no""" +958 92 optimizer """adadelta""" +958 92 training_loop """lcwa""" +958 92 evaluator """rankbased""" +958 93 dataset """kinships""" +958 93 model """tucker""" +958 93 loss """softplus""" +958 93 regularizer """no""" +958 93 optimizer """adadelta""" +958 93 training_loop """lcwa""" +958 93 evaluator """rankbased""" +958 94 dataset """kinships""" +958 94 model """tucker""" +958 94 loss """softplus""" +958 94 regularizer """no""" +958 94 optimizer """adadelta""" +958 94 training_loop """lcwa""" +958 94 evaluator """rankbased""" +958 95 dataset """kinships""" +958 95 model """tucker""" +958 95 loss """softplus""" +958 95 regularizer """no""" +958 95 optimizer """adadelta""" +958 95 training_loop """lcwa""" +958 95 evaluator """rankbased""" +958 96 dataset """kinships""" +958 96 model """tucker""" +958 96 loss """softplus""" +958 96 regularizer """no""" +958 96 optimizer """adadelta""" +958 96 training_loop """lcwa""" +958 96 evaluator """rankbased""" +958 97 dataset """kinships""" +958 97 model """tucker""" +958 97 loss """softplus""" +958 97 regularizer """no""" +958 97 optimizer """adadelta""" +958 97 training_loop """lcwa""" +958 97 evaluator """rankbased""" +958 98 dataset """kinships""" +958 98 model """tucker""" +958 98 loss """softplus""" +958 98 regularizer """no""" +958 98 optimizer """adadelta""" +958 98 training_loop """lcwa""" +958 98 evaluator """rankbased""" +958 99 dataset """kinships""" +958 99 model """tucker""" +958 99 loss """softplus""" +958 99 regularizer """no""" +958 99 optimizer """adadelta""" +958 99 training_loop """lcwa""" +958 99 evaluator """rankbased""" +958 100 dataset """kinships""" +958 100 model """tucker""" +958 100 loss """softplus""" +958 100 regularizer """no""" +958 100 optimizer """adadelta""" +958 100 training_loop """lcwa""" +958 100 evaluator """rankbased""" +959 1 model.embedding_dim 0.0 +959 1 model.relation_dim 0.0 +959 1 model.dropout_0 0.3862194840923262 +959 1 model.dropout_1 0.4695699366549563 +959 1 model.dropout_2 0.36519984478562983 +959 1 training.batch_size 1.0 +959 1 training.label_smoothing 0.0609297248646026 +959 2 model.embedding_dim 2.0 +959 2 model.relation_dim 2.0 +959 2 model.dropout_0 0.3028558215950341 +959 2 model.dropout_1 0.280818130902702 +959 2 model.dropout_2 0.4581109486426107 +959 2 training.batch_size 1.0 +959 2 training.label_smoothing 0.16422239856056578 +959 3 model.embedding_dim 2.0 +959 3 model.relation_dim 2.0 +959 3 model.dropout_0 0.1653176668134934 +959 3 model.dropout_1 0.337051383680825 +959 3 model.dropout_2 0.15496434733007086 +959 3 training.batch_size 0.0 +959 3 training.label_smoothing 0.10474864268252389 +959 4 model.embedding_dim 1.0 +959 4 model.relation_dim 2.0 +959 4 model.dropout_0 0.3923237955241232 +959 4 model.dropout_1 0.1964840778264415 +959 4 model.dropout_2 0.4128612731862312 +959 4 training.batch_size 2.0 +959 4 training.label_smoothing 0.64859635859362 +959 5 model.embedding_dim 0.0 +959 5 model.relation_dim 1.0 +959 5 model.dropout_0 0.1529638569460924 +959 5 model.dropout_1 0.4660456999021363 +959 5 model.dropout_2 0.2602354290230612 +959 5 training.batch_size 2.0 +959 5 training.label_smoothing 0.19004711222524287 +959 6 model.embedding_dim 2.0 +959 6 model.relation_dim 1.0 +959 6 model.dropout_0 0.38007927898852467 +959 6 model.dropout_1 0.45899663193678497 +959 6 model.dropout_2 0.11867193969287962 +959 6 training.batch_size 0.0 +959 6 training.label_smoothing 0.019395457762671035 +959 7 model.embedding_dim 2.0 +959 7 model.relation_dim 1.0 +959 7 model.dropout_0 0.21794962980357835 +959 7 model.dropout_1 0.43092464698262356 +959 7 model.dropout_2 0.48017483253240556 +959 7 training.batch_size 0.0 +959 7 training.label_smoothing 0.007406950669994574 +959 8 model.embedding_dim 2.0 +959 8 model.relation_dim 0.0 +959 8 model.dropout_0 0.4910327444323114 +959 8 model.dropout_1 0.24430716437304645 +959 8 model.dropout_2 0.296754984300637 +959 8 training.batch_size 0.0 +959 8 training.label_smoothing 0.0962501853191538 +959 9 model.embedding_dim 0.0 +959 9 model.relation_dim 1.0 +959 9 model.dropout_0 0.481226750047851 +959 9 model.dropout_1 0.16845528982044633 +959 9 model.dropout_2 0.44469747771131196 +959 9 training.batch_size 1.0 +959 9 training.label_smoothing 0.45120786388943385 +959 10 model.embedding_dim 0.0 +959 10 model.relation_dim 1.0 +959 10 model.dropout_0 0.28732303204327964 +959 10 model.dropout_1 0.21118356690147957 +959 10 model.dropout_2 0.3606914251241903 +959 10 training.batch_size 0.0 +959 10 training.label_smoothing 0.0278645381861059 +959 11 model.embedding_dim 1.0 +959 11 model.relation_dim 0.0 +959 11 model.dropout_0 0.13082585014937057 +959 11 model.dropout_1 0.417456184052734 +959 11 model.dropout_2 0.20841137344819927 +959 11 training.batch_size 0.0 +959 11 training.label_smoothing 0.012592696751825627 +959 12 model.embedding_dim 0.0 +959 12 model.relation_dim 0.0 +959 12 model.dropout_0 0.3839097032987163 +959 12 model.dropout_1 0.2543848884503582 +959 12 model.dropout_2 0.15960812793492962 +959 12 training.batch_size 1.0 +959 12 training.label_smoothing 0.004856490626683881 +959 13 model.embedding_dim 0.0 +959 13 model.relation_dim 0.0 +959 13 model.dropout_0 0.174893832326472 +959 13 model.dropout_1 0.31657713023023054 +959 13 model.dropout_2 0.28054127556864983 +959 13 training.batch_size 2.0 +959 13 training.label_smoothing 0.9157859001381756 +959 14 model.embedding_dim 2.0 +959 14 model.relation_dim 2.0 +959 14 model.dropout_0 0.2778832712931406 +959 14 model.dropout_1 0.2909887013266986 +959 14 model.dropout_2 0.43212609123091816 +959 14 training.batch_size 2.0 +959 14 training.label_smoothing 0.21291766283381497 +959 15 model.embedding_dim 0.0 +959 15 model.relation_dim 1.0 +959 15 model.dropout_0 0.15820841809386474 +959 15 model.dropout_1 0.24636147444534948 +959 15 model.dropout_2 0.28729479455200546 +959 15 training.batch_size 0.0 +959 15 training.label_smoothing 0.006287015135241357 +959 16 model.embedding_dim 0.0 +959 16 model.relation_dim 0.0 +959 16 model.dropout_0 0.46626758928498957 +959 16 model.dropout_1 0.10473731553039799 +959 16 model.dropout_2 0.337685209809796 +959 16 training.batch_size 1.0 +959 16 training.label_smoothing 0.0069347642947055485 +959 17 model.embedding_dim 2.0 +959 17 model.relation_dim 0.0 +959 17 model.dropout_0 0.1693255773695742 +959 17 model.dropout_1 0.34670281108284784 +959 17 model.dropout_2 0.3752842110189315 +959 17 training.batch_size 1.0 +959 17 training.label_smoothing 0.00130925783925281 +959 18 model.embedding_dim 1.0 +959 18 model.relation_dim 1.0 +959 18 model.dropout_0 0.42367780151157974 +959 18 model.dropout_1 0.16001250151523616 +959 18 model.dropout_2 0.17597013718175397 +959 18 training.batch_size 2.0 +959 18 training.label_smoothing 0.4442962679459105 +959 19 model.embedding_dim 1.0 +959 19 model.relation_dim 2.0 +959 19 model.dropout_0 0.19502015719300847 +959 19 model.dropout_1 0.20062190836350846 +959 19 model.dropout_2 0.49502871446796426 +959 19 training.batch_size 0.0 +959 19 training.label_smoothing 0.33567370621280807 +959 20 model.embedding_dim 1.0 +959 20 model.relation_dim 0.0 +959 20 model.dropout_0 0.18523116892519084 +959 20 model.dropout_1 0.4433630285974709 +959 20 model.dropout_2 0.349514029471362 +959 20 training.batch_size 2.0 +959 20 training.label_smoothing 0.00864531142880982 +959 21 model.embedding_dim 1.0 +959 21 model.relation_dim 2.0 +959 21 model.dropout_0 0.41227592958116244 +959 21 model.dropout_1 0.32544732009119337 +959 21 model.dropout_2 0.18547091408580416 +959 21 training.batch_size 1.0 +959 21 training.label_smoothing 0.00919449085791243 +959 22 model.embedding_dim 1.0 +959 22 model.relation_dim 0.0 +959 22 model.dropout_0 0.14560375816408713 +959 22 model.dropout_1 0.39886222982893527 +959 22 model.dropout_2 0.1734226795180978 +959 22 training.batch_size 0.0 +959 22 training.label_smoothing 0.004037611689571289 +959 23 model.embedding_dim 1.0 +959 23 model.relation_dim 0.0 +959 23 model.dropout_0 0.3412107913944573 +959 23 model.dropout_1 0.4116050932831018 +959 23 model.dropout_2 0.16464974372224833 +959 23 training.batch_size 1.0 +959 23 training.label_smoothing 0.052846449124822135 +959 24 model.embedding_dim 0.0 +959 24 model.relation_dim 1.0 +959 24 model.dropout_0 0.21387308002979163 +959 24 model.dropout_1 0.3701951330691451 +959 24 model.dropout_2 0.10981281972510244 +959 24 training.batch_size 1.0 +959 24 training.label_smoothing 0.11653878394738429 +959 25 model.embedding_dim 2.0 +959 25 model.relation_dim 2.0 +959 25 model.dropout_0 0.10096326710836463 +959 25 model.dropout_1 0.28572224387494655 +959 25 model.dropout_2 0.2585428830621981 +959 25 training.batch_size 1.0 +959 25 training.label_smoothing 0.013977803818096537 +959 26 model.embedding_dim 0.0 +959 26 model.relation_dim 1.0 +959 26 model.dropout_0 0.349363336787325 +959 26 model.dropout_1 0.1568957179236573 +959 26 model.dropout_2 0.4339848428304799 +959 26 training.batch_size 0.0 +959 26 training.label_smoothing 0.08219662287383799 +959 27 model.embedding_dim 2.0 +959 27 model.relation_dim 2.0 +959 27 model.dropout_0 0.330953106407862 +959 27 model.dropout_1 0.39303355154349473 +959 27 model.dropout_2 0.3652141856869443 +959 27 training.batch_size 2.0 +959 27 training.label_smoothing 0.017620020146477303 +959 28 model.embedding_dim 0.0 +959 28 model.relation_dim 2.0 +959 28 model.dropout_0 0.38092446935212954 +959 28 model.dropout_1 0.40048961532496363 +959 28 model.dropout_2 0.3283682123541436 +959 28 training.batch_size 1.0 +959 28 training.label_smoothing 0.003911750042331066 +959 29 model.embedding_dim 2.0 +959 29 model.relation_dim 2.0 +959 29 model.dropout_0 0.37197554868084215 +959 29 model.dropout_1 0.14913809209098725 +959 29 model.dropout_2 0.26463901543733004 +959 29 training.batch_size 2.0 +959 29 training.label_smoothing 0.006108785085338086 +959 30 model.embedding_dim 0.0 +959 30 model.relation_dim 2.0 +959 30 model.dropout_0 0.151928289267725 +959 30 model.dropout_1 0.2253081606243074 +959 30 model.dropout_2 0.37688437518877105 +959 30 training.batch_size 2.0 +959 30 training.label_smoothing 0.6448318511324792 +959 31 model.embedding_dim 2.0 +959 31 model.relation_dim 0.0 +959 31 model.dropout_0 0.22661370063176878 +959 31 model.dropout_1 0.15234040199852414 +959 31 model.dropout_2 0.24921442712592917 +959 31 training.batch_size 2.0 +959 31 training.label_smoothing 0.01158311129354515 +959 32 model.embedding_dim 0.0 +959 32 model.relation_dim 2.0 +959 32 model.dropout_0 0.17288188591182546 +959 32 model.dropout_1 0.479737992684243 +959 32 model.dropout_2 0.49105734113172217 +959 32 training.batch_size 2.0 +959 32 training.label_smoothing 0.27327002554982466 +959 33 model.embedding_dim 2.0 +959 33 model.relation_dim 2.0 +959 33 model.dropout_0 0.15768403803809014 +959 33 model.dropout_1 0.445120691349956 +959 33 model.dropout_2 0.15845100519385613 +959 33 training.batch_size 2.0 +959 33 training.label_smoothing 0.013147473321499556 +959 34 model.embedding_dim 1.0 +959 34 model.relation_dim 2.0 +959 34 model.dropout_0 0.2898446533227022 +959 34 model.dropout_1 0.4078024670998427 +959 34 model.dropout_2 0.43220198541000976 +959 34 training.batch_size 1.0 +959 34 training.label_smoothing 0.014953377542013866 +959 35 model.embedding_dim 2.0 +959 35 model.relation_dim 2.0 +959 35 model.dropout_0 0.17984852068066548 +959 35 model.dropout_1 0.13918137138902004 +959 35 model.dropout_2 0.2553421904078682 +959 35 training.batch_size 2.0 +959 35 training.label_smoothing 0.19056062828220394 +959 36 model.embedding_dim 1.0 +959 36 model.relation_dim 2.0 +959 36 model.dropout_0 0.2406547988849309 +959 36 model.dropout_1 0.39629012841972444 +959 36 model.dropout_2 0.278032509201772 +959 36 training.batch_size 2.0 +959 36 training.label_smoothing 0.5912875049621631 +959 37 model.embedding_dim 1.0 +959 37 model.relation_dim 1.0 +959 37 model.dropout_0 0.30494004238769035 +959 37 model.dropout_1 0.2937337607288174 +959 37 model.dropout_2 0.1941418869762278 +959 37 training.batch_size 0.0 +959 37 training.label_smoothing 0.03870717872748612 +959 38 model.embedding_dim 1.0 +959 38 model.relation_dim 2.0 +959 38 model.dropout_0 0.13017875907503346 +959 38 model.dropout_1 0.10741365505530882 +959 38 model.dropout_2 0.4466443225204283 +959 38 training.batch_size 2.0 +959 38 training.label_smoothing 0.03782478111877876 +959 39 model.embedding_dim 0.0 +959 39 model.relation_dim 1.0 +959 39 model.dropout_0 0.370443440667669 +959 39 model.dropout_1 0.3420274206334746 +959 39 model.dropout_2 0.3687722520413025 +959 39 training.batch_size 0.0 +959 39 training.label_smoothing 0.0015617885870063681 +959 40 model.embedding_dim 1.0 +959 40 model.relation_dim 0.0 +959 40 model.dropout_0 0.2970446820721451 +959 40 model.dropout_1 0.4994960590071159 +959 40 model.dropout_2 0.24428373311178653 +959 40 training.batch_size 1.0 +959 40 training.label_smoothing 0.21828114530503293 +959 41 model.embedding_dim 1.0 +959 41 model.relation_dim 2.0 +959 41 model.dropout_0 0.11863622258462692 +959 41 model.dropout_1 0.46018940053922575 +959 41 model.dropout_2 0.46122053361639537 +959 41 training.batch_size 2.0 +959 41 training.label_smoothing 0.0046199029393645735 +959 42 model.embedding_dim 0.0 +959 42 model.relation_dim 2.0 +959 42 model.dropout_0 0.363400396052065 +959 42 model.dropout_1 0.3524119422573381 +959 42 model.dropout_2 0.4343683185558872 +959 42 training.batch_size 0.0 +959 42 training.label_smoothing 0.01642330456146933 +959 43 model.embedding_dim 1.0 +959 43 model.relation_dim 1.0 +959 43 model.dropout_0 0.40532447284218287 +959 43 model.dropout_1 0.3519247777482206 +959 43 model.dropout_2 0.14219691360344947 +959 43 training.batch_size 2.0 +959 43 training.label_smoothing 0.0023039251606632285 +959 44 model.embedding_dim 1.0 +959 44 model.relation_dim 1.0 +959 44 model.dropout_0 0.3900287032912345 +959 44 model.dropout_1 0.40250044043409716 +959 44 model.dropout_2 0.46094856546320184 +959 44 training.batch_size 1.0 +959 44 training.label_smoothing 0.014137164180044936 +959 45 model.embedding_dim 1.0 +959 45 model.relation_dim 2.0 +959 45 model.dropout_0 0.11534842876731238 +959 45 model.dropout_1 0.27940161014814024 +959 45 model.dropout_2 0.23856935872049345 +959 45 training.batch_size 2.0 +959 45 training.label_smoothing 0.01964090376750094 +959 46 model.embedding_dim 1.0 +959 46 model.relation_dim 0.0 +959 46 model.dropout_0 0.4234580725111178 +959 46 model.dropout_1 0.23453829675833884 +959 46 model.dropout_2 0.3484321216224331 +959 46 training.batch_size 2.0 +959 46 training.label_smoothing 0.0011087174362546942 +959 47 model.embedding_dim 1.0 +959 47 model.relation_dim 0.0 +959 47 model.dropout_0 0.49842767134825217 +959 47 model.dropout_1 0.15656475680550588 +959 47 model.dropout_2 0.499895312475383 +959 47 training.batch_size 2.0 +959 47 training.label_smoothing 0.005104755687209607 +959 48 model.embedding_dim 2.0 +959 48 model.relation_dim 0.0 +959 48 model.dropout_0 0.153991585691982 +959 48 model.dropout_1 0.1459627684204385 +959 48 model.dropout_2 0.3472463990852426 +959 48 training.batch_size 2.0 +959 48 training.label_smoothing 0.2278068428674353 +959 49 model.embedding_dim 1.0 +959 49 model.relation_dim 2.0 +959 49 model.dropout_0 0.2014215281714453 +959 49 model.dropout_1 0.3007040366255787 +959 49 model.dropout_2 0.1304862680066309 +959 49 training.batch_size 1.0 +959 49 training.label_smoothing 0.003005182734565603 +959 50 model.embedding_dim 0.0 +959 50 model.relation_dim 0.0 +959 50 model.dropout_0 0.33021095310086046 +959 50 model.dropout_1 0.4350696988443211 +959 50 model.dropout_2 0.42415622374636286 +959 50 training.batch_size 2.0 +959 50 training.label_smoothing 0.0534242971783617 +959 51 model.embedding_dim 2.0 +959 51 model.relation_dim 2.0 +959 51 model.dropout_0 0.18867632546007163 +959 51 model.dropout_1 0.4101636453637477 +959 51 model.dropout_2 0.2062640502928655 +959 51 training.batch_size 0.0 +959 51 training.label_smoothing 0.5300294765119596 +959 52 model.embedding_dim 0.0 +959 52 model.relation_dim 1.0 +959 52 model.dropout_0 0.2889773444482906 +959 52 model.dropout_1 0.10769259723610457 +959 52 model.dropout_2 0.3268285856098969 +959 52 training.batch_size 2.0 +959 52 training.label_smoothing 0.0010165085786816767 +959 53 model.embedding_dim 2.0 +959 53 model.relation_dim 2.0 +959 53 model.dropout_0 0.4894133510133093 +959 53 model.dropout_1 0.4498422726162965 +959 53 model.dropout_2 0.35463807277057313 +959 53 training.batch_size 1.0 +959 53 training.label_smoothing 0.05661551018090088 +959 54 model.embedding_dim 1.0 +959 54 model.relation_dim 2.0 +959 54 model.dropout_0 0.410894369002346 +959 54 model.dropout_1 0.3442263850200267 +959 54 model.dropout_2 0.2808161726047432 +959 54 training.batch_size 1.0 +959 54 training.label_smoothing 0.02323096645653673 +959 55 model.embedding_dim 2.0 +959 55 model.relation_dim 1.0 +959 55 model.dropout_0 0.11453248008907631 +959 55 model.dropout_1 0.2215395858666109 +959 55 model.dropout_2 0.1607421615576517 +959 55 training.batch_size 2.0 +959 55 training.label_smoothing 0.006450172928626988 +959 56 model.embedding_dim 2.0 +959 56 model.relation_dim 1.0 +959 56 model.dropout_0 0.25686545362885316 +959 56 model.dropout_1 0.3599662353460812 +959 56 model.dropout_2 0.11534521021768325 +959 56 training.batch_size 2.0 +959 56 training.label_smoothing 0.1187376377666547 +959 57 model.embedding_dim 1.0 +959 57 model.relation_dim 0.0 +959 57 model.dropout_0 0.10574508947931097 +959 57 model.dropout_1 0.2751125421570141 +959 57 model.dropout_2 0.18499126366094 +959 57 training.batch_size 2.0 +959 57 training.label_smoothing 0.0014576743187045937 +959 58 model.embedding_dim 2.0 +959 58 model.relation_dim 2.0 +959 58 model.dropout_0 0.24556666219113757 +959 58 model.dropout_1 0.4282193499756859 +959 58 model.dropout_2 0.42909841208234256 +959 58 training.batch_size 2.0 +959 58 training.label_smoothing 0.6116425330815027 +959 59 model.embedding_dim 0.0 +959 59 model.relation_dim 0.0 +959 59 model.dropout_0 0.33951588510286684 +959 59 model.dropout_1 0.2550547462931104 +959 59 model.dropout_2 0.1441019939500402 +959 59 training.batch_size 2.0 +959 59 training.label_smoothing 0.05123399676860273 +959 60 model.embedding_dim 2.0 +959 60 model.relation_dim 0.0 +959 60 model.dropout_0 0.16590965336394925 +959 60 model.dropout_1 0.3986808790792964 +959 60 model.dropout_2 0.3955637332979449 +959 60 training.batch_size 0.0 +959 60 training.label_smoothing 0.04063434587172713 +959 61 model.embedding_dim 2.0 +959 61 model.relation_dim 1.0 +959 61 model.dropout_0 0.26713674548534194 +959 61 model.dropout_1 0.4151208942295119 +959 61 model.dropout_2 0.4801736625249776 +959 61 training.batch_size 0.0 +959 61 training.label_smoothing 0.06370718140733914 +959 62 model.embedding_dim 1.0 +959 62 model.relation_dim 1.0 +959 62 model.dropout_0 0.495385998379573 +959 62 model.dropout_1 0.3557475760245946 +959 62 model.dropout_2 0.2157556860465083 +959 62 training.batch_size 1.0 +959 62 training.label_smoothing 0.0016779578736234961 +959 63 model.embedding_dim 1.0 +959 63 model.relation_dim 1.0 +959 63 model.dropout_0 0.31511360592123927 +959 63 model.dropout_1 0.3764825002701605 +959 63 model.dropout_2 0.4704299712266413 +959 63 training.batch_size 2.0 +959 63 training.label_smoothing 0.0033504797915552506 +959 64 model.embedding_dim 0.0 +959 64 model.relation_dim 1.0 +959 64 model.dropout_0 0.4520035678148132 +959 64 model.dropout_1 0.21737091645427487 +959 64 model.dropout_2 0.138022941589704 +959 64 training.batch_size 1.0 +959 64 training.label_smoothing 0.19965552232758413 +959 65 model.embedding_dim 2.0 +959 65 model.relation_dim 1.0 +959 65 model.dropout_0 0.16877338001867043 +959 65 model.dropout_1 0.364747878690301 +959 65 model.dropout_2 0.314054505744201 +959 65 training.batch_size 0.0 +959 65 training.label_smoothing 0.020763611370321657 +959 66 model.embedding_dim 1.0 +959 66 model.relation_dim 0.0 +959 66 model.dropout_0 0.4161361704789477 +959 66 model.dropout_1 0.25796935262755527 +959 66 model.dropout_2 0.2382571718561947 +959 66 training.batch_size 1.0 +959 66 training.label_smoothing 0.0011045049562665335 +959 67 model.embedding_dim 0.0 +959 67 model.relation_dim 2.0 +959 67 model.dropout_0 0.10071254762244926 +959 67 model.dropout_1 0.45013575115864723 +959 67 model.dropout_2 0.11399475313093439 +959 67 training.batch_size 0.0 +959 67 training.label_smoothing 0.002234271478471909 +959 68 model.embedding_dim 2.0 +959 68 model.relation_dim 2.0 +959 68 model.dropout_0 0.44582645315241176 +959 68 model.dropout_1 0.4792039279805588 +959 68 model.dropout_2 0.21611720320411193 +959 68 training.batch_size 2.0 +959 68 training.label_smoothing 0.002564386489718134 +959 69 model.embedding_dim 2.0 +959 69 model.relation_dim 1.0 +959 69 model.dropout_0 0.14920682270302588 +959 69 model.dropout_1 0.16908299322668213 +959 69 model.dropout_2 0.46484324133289134 +959 69 training.batch_size 0.0 +959 69 training.label_smoothing 0.010082472585427464 +959 70 model.embedding_dim 1.0 +959 70 model.relation_dim 0.0 +959 70 model.dropout_0 0.30064804625987424 +959 70 model.dropout_1 0.3844517563468076 +959 70 model.dropout_2 0.2952811585489038 +959 70 training.batch_size 0.0 +959 70 training.label_smoothing 0.028355397088473167 +959 71 model.embedding_dim 0.0 +959 71 model.relation_dim 2.0 +959 71 model.dropout_0 0.17669602443091043 +959 71 model.dropout_1 0.14837161019191736 +959 71 model.dropout_2 0.40887141951653283 +959 71 training.batch_size 0.0 +959 71 training.label_smoothing 0.04579208539621224 +959 72 model.embedding_dim 2.0 +959 72 model.relation_dim 1.0 +959 72 model.dropout_0 0.2899061856176471 +959 72 model.dropout_1 0.16359609307188508 +959 72 model.dropout_2 0.21957190642791732 +959 72 training.batch_size 2.0 +959 72 training.label_smoothing 0.007222209003844785 +959 73 model.embedding_dim 2.0 +959 73 model.relation_dim 2.0 +959 73 model.dropout_0 0.40326443430028547 +959 73 model.dropout_1 0.18241053444792887 +959 73 model.dropout_2 0.2599512305231 +959 73 training.batch_size 2.0 +959 73 training.label_smoothing 0.9895658018877164 +959 74 model.embedding_dim 2.0 +959 74 model.relation_dim 0.0 +959 74 model.dropout_0 0.2087312380937877 +959 74 model.dropout_1 0.26615637818064164 +959 74 model.dropout_2 0.37569973390134637 +959 74 training.batch_size 0.0 +959 74 training.label_smoothing 0.005278758988845424 +959 75 model.embedding_dim 2.0 +959 75 model.relation_dim 0.0 +959 75 model.dropout_0 0.346173106048524 +959 75 model.dropout_1 0.40570768874463337 +959 75 model.dropout_2 0.45226723013620085 +959 75 training.batch_size 0.0 +959 75 training.label_smoothing 0.040874919370825345 +959 76 model.embedding_dim 1.0 +959 76 model.relation_dim 1.0 +959 76 model.dropout_0 0.385837809711842 +959 76 model.dropout_1 0.3114544851851254 +959 76 model.dropout_2 0.49997829902015267 +959 76 training.batch_size 0.0 +959 76 training.label_smoothing 0.004579525358234243 +959 77 model.embedding_dim 1.0 +959 77 model.relation_dim 2.0 +959 77 model.dropout_0 0.44565204398181046 +959 77 model.dropout_1 0.1769460146104605 +959 77 model.dropout_2 0.33850067568903014 +959 77 training.batch_size 2.0 +959 77 training.label_smoothing 0.0011508249242610422 +959 78 model.embedding_dim 0.0 +959 78 model.relation_dim 2.0 +959 78 model.dropout_0 0.33243632301048176 +959 78 model.dropout_1 0.2937249550643969 +959 78 model.dropout_2 0.34461347879933807 +959 78 training.batch_size 0.0 +959 78 training.label_smoothing 0.0016522855705812351 +959 79 model.embedding_dim 2.0 +959 79 model.relation_dim 0.0 +959 79 model.dropout_0 0.10211108998549467 +959 79 model.dropout_1 0.19549358010098072 +959 79 model.dropout_2 0.429160653033565 +959 79 training.batch_size 0.0 +959 79 training.label_smoothing 0.056621697202866667 +959 80 model.embedding_dim 0.0 +959 80 model.relation_dim 2.0 +959 80 model.dropout_0 0.48737449452748294 +959 80 model.dropout_1 0.37299236980034967 +959 80 model.dropout_2 0.2846330268277067 +959 80 training.batch_size 1.0 +959 80 training.label_smoothing 0.014619308314560115 +959 81 model.embedding_dim 0.0 +959 81 model.relation_dim 0.0 +959 81 model.dropout_0 0.1653336127615948 +959 81 model.dropout_1 0.2263168032045174 +959 81 model.dropout_2 0.39554000690630675 +959 81 training.batch_size 2.0 +959 81 training.label_smoothing 0.004081895869584271 +959 82 model.embedding_dim 1.0 +959 82 model.relation_dim 1.0 +959 82 model.dropout_0 0.25576310197106034 +959 82 model.dropout_1 0.12405553209232473 +959 82 model.dropout_2 0.1384778717050232 +959 82 training.batch_size 0.0 +959 82 training.label_smoothing 0.0151145994198622 +959 83 model.embedding_dim 0.0 +959 83 model.relation_dim 0.0 +959 83 model.dropout_0 0.30896918620197555 +959 83 model.dropout_1 0.13038973569152526 +959 83 model.dropout_2 0.4816760551849284 +959 83 training.batch_size 1.0 +959 83 training.label_smoothing 0.015125422107280309 +959 84 model.embedding_dim 0.0 +959 84 model.relation_dim 2.0 +959 84 model.dropout_0 0.2961072830559969 +959 84 model.dropout_1 0.2795468433463185 +959 84 model.dropout_2 0.15774535156675024 +959 84 training.batch_size 2.0 +959 84 training.label_smoothing 0.002924545528991151 +959 85 model.embedding_dim 0.0 +959 85 model.relation_dim 2.0 +959 85 model.dropout_0 0.4753342922806054 +959 85 model.dropout_1 0.450119494055192 +959 85 model.dropout_2 0.3863917243707259 +959 85 training.batch_size 1.0 +959 85 training.label_smoothing 0.035831240807544566 +959 86 model.embedding_dim 2.0 +959 86 model.relation_dim 2.0 +959 86 model.dropout_0 0.19066572306571627 +959 86 model.dropout_1 0.3696498792476557 +959 86 model.dropout_2 0.31555512048966416 +959 86 training.batch_size 0.0 +959 86 training.label_smoothing 0.1457257040602032 +959 87 model.embedding_dim 1.0 +959 87 model.relation_dim 0.0 +959 87 model.dropout_0 0.4422739340625059 +959 87 model.dropout_1 0.20879673101376223 +959 87 model.dropout_2 0.23640172797616021 +959 87 training.batch_size 2.0 +959 87 training.label_smoothing 0.0038354890225719227 +959 88 model.embedding_dim 2.0 +959 88 model.relation_dim 2.0 +959 88 model.dropout_0 0.45357603827191806 +959 88 model.dropout_1 0.16735504364452494 +959 88 model.dropout_2 0.2555374769153932 +959 88 training.batch_size 2.0 +959 88 training.label_smoothing 0.0019775202737157887 +959 89 model.embedding_dim 2.0 +959 89 model.relation_dim 0.0 +959 89 model.dropout_0 0.20712967770116114 +959 89 model.dropout_1 0.1726865061705355 +959 89 model.dropout_2 0.24498406363557154 +959 89 training.batch_size 1.0 +959 89 training.label_smoothing 0.13929957302055562 +959 90 model.embedding_dim 0.0 +959 90 model.relation_dim 0.0 +959 90 model.dropout_0 0.42741252584634026 +959 90 model.dropout_1 0.10063210460548251 +959 90 model.dropout_2 0.16043394067806652 +959 90 training.batch_size 2.0 +959 90 training.label_smoothing 0.045483531282439815 +959 91 model.embedding_dim 0.0 +959 91 model.relation_dim 1.0 +959 91 model.dropout_0 0.4742171662706218 +959 91 model.dropout_1 0.3574259471511043 +959 91 model.dropout_2 0.3342168741279402 +959 91 training.batch_size 1.0 +959 91 training.label_smoothing 0.0011834852010189545 +959 92 model.embedding_dim 2.0 +959 92 model.relation_dim 0.0 +959 92 model.dropout_0 0.41559923908749863 +959 92 model.dropout_1 0.27686402659739584 +959 92 model.dropout_2 0.18539468051864838 +959 92 training.batch_size 1.0 +959 92 training.label_smoothing 0.8344550696387344 +959 93 model.embedding_dim 0.0 +959 93 model.relation_dim 1.0 +959 93 model.dropout_0 0.43932517176879904 +959 93 model.dropout_1 0.3071054085210002 +959 93 model.dropout_2 0.2412277037294729 +959 93 training.batch_size 2.0 +959 93 training.label_smoothing 0.002358582929297411 +959 94 model.embedding_dim 1.0 +959 94 model.relation_dim 2.0 +959 94 model.dropout_0 0.3375498929127507 +959 94 model.dropout_1 0.38402224414073993 +959 94 model.dropout_2 0.39034769747496967 +959 94 training.batch_size 1.0 +959 94 training.label_smoothing 0.14566571399009823 +959 95 model.embedding_dim 0.0 +959 95 model.relation_dim 1.0 +959 95 model.dropout_0 0.28116487021447645 +959 95 model.dropout_1 0.11430000524904416 +959 95 model.dropout_2 0.10034114498742541 +959 95 training.batch_size 1.0 +959 95 training.label_smoothing 0.0014522539890234139 +959 96 model.embedding_dim 1.0 +959 96 model.relation_dim 2.0 +959 96 model.dropout_0 0.26501927621614974 +959 96 model.dropout_1 0.4642988728493428 +959 96 model.dropout_2 0.4638272970426113 +959 96 training.batch_size 0.0 +959 96 training.label_smoothing 0.002352499411392275 +959 97 model.embedding_dim 1.0 +959 97 model.relation_dim 0.0 +959 97 model.dropout_0 0.21811259499742436 +959 97 model.dropout_1 0.25232823558871886 +959 97 model.dropout_2 0.37312152208569105 +959 97 training.batch_size 0.0 +959 97 training.label_smoothing 0.005500080934289215 +959 98 model.embedding_dim 0.0 +959 98 model.relation_dim 0.0 +959 98 model.dropout_0 0.1916272786223362 +959 98 model.dropout_1 0.1746136155375303 +959 98 model.dropout_2 0.4116210566299572 +959 98 training.batch_size 1.0 +959 98 training.label_smoothing 0.5024883599303603 +959 99 model.embedding_dim 2.0 +959 99 model.relation_dim 1.0 +959 99 model.dropout_0 0.25229291669660814 +959 99 model.dropout_1 0.14033827602037108 +959 99 model.dropout_2 0.10618232562716928 +959 99 training.batch_size 2.0 +959 99 training.label_smoothing 0.0034211558604959006 +959 100 model.embedding_dim 1.0 +959 100 model.relation_dim 2.0 +959 100 model.dropout_0 0.17880169337692126 +959 100 model.dropout_1 0.17750157930290503 +959 100 model.dropout_2 0.4954016642468665 +959 100 training.batch_size 1.0 +959 100 training.label_smoothing 0.13712928802567104 +959 1 dataset """kinships""" +959 1 model """tucker""" +959 1 loss """crossentropy""" +959 1 regularizer """no""" +959 1 optimizer """adadelta""" +959 1 training_loop """lcwa""" +959 1 evaluator """rankbased""" +959 2 dataset """kinships""" +959 2 model """tucker""" +959 2 loss """crossentropy""" +959 2 regularizer """no""" +959 2 optimizer """adadelta""" +959 2 training_loop """lcwa""" +959 2 evaluator """rankbased""" +959 3 dataset """kinships""" +959 3 model """tucker""" +959 3 loss """crossentropy""" +959 3 regularizer """no""" +959 3 optimizer """adadelta""" +959 3 training_loop """lcwa""" +959 3 evaluator """rankbased""" +959 4 dataset """kinships""" +959 4 model """tucker""" +959 4 loss """crossentropy""" +959 4 regularizer """no""" +959 4 optimizer """adadelta""" +959 4 training_loop """lcwa""" +959 4 evaluator """rankbased""" +959 5 dataset """kinships""" +959 5 model """tucker""" +959 5 loss """crossentropy""" +959 5 regularizer """no""" +959 5 optimizer """adadelta""" +959 5 training_loop """lcwa""" +959 5 evaluator """rankbased""" +959 6 dataset """kinships""" +959 6 model """tucker""" +959 6 loss """crossentropy""" +959 6 regularizer """no""" +959 6 optimizer """adadelta""" +959 6 training_loop """lcwa""" +959 6 evaluator """rankbased""" +959 7 dataset """kinships""" +959 7 model """tucker""" +959 7 loss """crossentropy""" +959 7 regularizer """no""" +959 7 optimizer """adadelta""" +959 7 training_loop """lcwa""" +959 7 evaluator """rankbased""" +959 8 dataset """kinships""" +959 8 model """tucker""" +959 8 loss """crossentropy""" +959 8 regularizer """no""" +959 8 optimizer """adadelta""" +959 8 training_loop """lcwa""" +959 8 evaluator """rankbased""" +959 9 dataset """kinships""" +959 9 model """tucker""" +959 9 loss """crossentropy""" +959 9 regularizer """no""" +959 9 optimizer """adadelta""" +959 9 training_loop """lcwa""" +959 9 evaluator """rankbased""" +959 10 dataset """kinships""" +959 10 model """tucker""" +959 10 loss """crossentropy""" +959 10 regularizer """no""" +959 10 optimizer """adadelta""" +959 10 training_loop """lcwa""" +959 10 evaluator """rankbased""" +959 11 dataset """kinships""" +959 11 model """tucker""" +959 11 loss """crossentropy""" +959 11 regularizer """no""" +959 11 optimizer """adadelta""" +959 11 training_loop """lcwa""" +959 11 evaluator """rankbased""" +959 12 dataset """kinships""" +959 12 model """tucker""" +959 12 loss """crossentropy""" +959 12 regularizer """no""" +959 12 optimizer """adadelta""" +959 12 training_loop """lcwa""" +959 12 evaluator """rankbased""" +959 13 dataset """kinships""" +959 13 model """tucker""" +959 13 loss """crossentropy""" +959 13 regularizer """no""" +959 13 optimizer """adadelta""" +959 13 training_loop """lcwa""" +959 13 evaluator """rankbased""" +959 14 dataset """kinships""" +959 14 model """tucker""" +959 14 loss """crossentropy""" +959 14 regularizer """no""" +959 14 optimizer """adadelta""" +959 14 training_loop """lcwa""" +959 14 evaluator """rankbased""" +959 15 dataset """kinships""" +959 15 model """tucker""" +959 15 loss """crossentropy""" +959 15 regularizer """no""" +959 15 optimizer """adadelta""" +959 15 training_loop """lcwa""" +959 15 evaluator """rankbased""" +959 16 dataset """kinships""" +959 16 model """tucker""" +959 16 loss """crossentropy""" +959 16 regularizer """no""" +959 16 optimizer """adadelta""" +959 16 training_loop """lcwa""" +959 16 evaluator """rankbased""" +959 17 dataset """kinships""" +959 17 model """tucker""" +959 17 loss """crossentropy""" +959 17 regularizer """no""" +959 17 optimizer """adadelta""" +959 17 training_loop """lcwa""" +959 17 evaluator """rankbased""" +959 18 dataset """kinships""" +959 18 model """tucker""" +959 18 loss """crossentropy""" +959 18 regularizer """no""" +959 18 optimizer """adadelta""" +959 18 training_loop """lcwa""" +959 18 evaluator """rankbased""" +959 19 dataset """kinships""" +959 19 model """tucker""" +959 19 loss """crossentropy""" +959 19 regularizer """no""" +959 19 optimizer """adadelta""" +959 19 training_loop """lcwa""" +959 19 evaluator """rankbased""" +959 20 dataset """kinships""" +959 20 model """tucker""" +959 20 loss """crossentropy""" +959 20 regularizer """no""" +959 20 optimizer """adadelta""" +959 20 training_loop """lcwa""" +959 20 evaluator """rankbased""" +959 21 dataset """kinships""" +959 21 model """tucker""" +959 21 loss """crossentropy""" +959 21 regularizer """no""" +959 21 optimizer """adadelta""" +959 21 training_loop """lcwa""" +959 21 evaluator """rankbased""" +959 22 dataset """kinships""" +959 22 model """tucker""" +959 22 loss """crossentropy""" +959 22 regularizer """no""" +959 22 optimizer """adadelta""" +959 22 training_loop """lcwa""" +959 22 evaluator """rankbased""" +959 23 dataset """kinships""" +959 23 model """tucker""" +959 23 loss """crossentropy""" +959 23 regularizer """no""" +959 23 optimizer """adadelta""" +959 23 training_loop """lcwa""" +959 23 evaluator """rankbased""" +959 24 dataset """kinships""" +959 24 model """tucker""" +959 24 loss """crossentropy""" +959 24 regularizer """no""" +959 24 optimizer """adadelta""" +959 24 training_loop """lcwa""" +959 24 evaluator """rankbased""" +959 25 dataset """kinships""" +959 25 model """tucker""" +959 25 loss """crossentropy""" +959 25 regularizer """no""" +959 25 optimizer """adadelta""" +959 25 training_loop """lcwa""" +959 25 evaluator """rankbased""" +959 26 dataset """kinships""" +959 26 model """tucker""" +959 26 loss """crossentropy""" +959 26 regularizer """no""" +959 26 optimizer """adadelta""" +959 26 training_loop """lcwa""" +959 26 evaluator """rankbased""" +959 27 dataset """kinships""" +959 27 model """tucker""" +959 27 loss """crossentropy""" +959 27 regularizer """no""" +959 27 optimizer """adadelta""" +959 27 training_loop """lcwa""" +959 27 evaluator """rankbased""" +959 28 dataset """kinships""" +959 28 model """tucker""" +959 28 loss """crossentropy""" +959 28 regularizer """no""" +959 28 optimizer """adadelta""" +959 28 training_loop """lcwa""" +959 28 evaluator """rankbased""" +959 29 dataset """kinships""" +959 29 model """tucker""" +959 29 loss """crossentropy""" +959 29 regularizer """no""" +959 29 optimizer """adadelta""" +959 29 training_loop """lcwa""" +959 29 evaluator """rankbased""" +959 30 dataset """kinships""" +959 30 model """tucker""" +959 30 loss """crossentropy""" +959 30 regularizer """no""" +959 30 optimizer """adadelta""" +959 30 training_loop """lcwa""" +959 30 evaluator """rankbased""" +959 31 dataset """kinships""" +959 31 model """tucker""" +959 31 loss """crossentropy""" +959 31 regularizer """no""" +959 31 optimizer """adadelta""" +959 31 training_loop """lcwa""" +959 31 evaluator """rankbased""" +959 32 dataset """kinships""" +959 32 model """tucker""" +959 32 loss """crossentropy""" +959 32 regularizer """no""" +959 32 optimizer """adadelta""" +959 32 training_loop """lcwa""" +959 32 evaluator """rankbased""" +959 33 dataset """kinships""" +959 33 model """tucker""" +959 33 loss """crossentropy""" +959 33 regularizer """no""" +959 33 optimizer """adadelta""" +959 33 training_loop """lcwa""" +959 33 evaluator """rankbased""" +959 34 dataset """kinships""" +959 34 model """tucker""" +959 34 loss """crossentropy""" +959 34 regularizer """no""" +959 34 optimizer """adadelta""" +959 34 training_loop """lcwa""" +959 34 evaluator """rankbased""" +959 35 dataset """kinships""" +959 35 model """tucker""" +959 35 loss """crossentropy""" +959 35 regularizer """no""" +959 35 optimizer """adadelta""" +959 35 training_loop """lcwa""" +959 35 evaluator """rankbased""" +959 36 dataset """kinships""" +959 36 model """tucker""" +959 36 loss """crossentropy""" +959 36 regularizer """no""" +959 36 optimizer """adadelta""" +959 36 training_loop """lcwa""" +959 36 evaluator """rankbased""" +959 37 dataset """kinships""" +959 37 model """tucker""" +959 37 loss """crossentropy""" +959 37 regularizer """no""" +959 37 optimizer """adadelta""" +959 37 training_loop """lcwa""" +959 37 evaluator """rankbased""" +959 38 dataset """kinships""" +959 38 model """tucker""" +959 38 loss """crossentropy""" +959 38 regularizer """no""" +959 38 optimizer """adadelta""" +959 38 training_loop """lcwa""" +959 38 evaluator """rankbased""" +959 39 dataset """kinships""" +959 39 model """tucker""" +959 39 loss """crossentropy""" +959 39 regularizer """no""" +959 39 optimizer """adadelta""" +959 39 training_loop """lcwa""" +959 39 evaluator """rankbased""" +959 40 dataset """kinships""" +959 40 model """tucker""" +959 40 loss """crossentropy""" +959 40 regularizer """no""" +959 40 optimizer """adadelta""" +959 40 training_loop """lcwa""" +959 40 evaluator """rankbased""" +959 41 dataset """kinships""" +959 41 model """tucker""" +959 41 loss """crossentropy""" +959 41 regularizer """no""" +959 41 optimizer """adadelta""" +959 41 training_loop """lcwa""" +959 41 evaluator """rankbased""" +959 42 dataset """kinships""" +959 42 model """tucker""" +959 42 loss """crossentropy""" +959 42 regularizer """no""" +959 42 optimizer """adadelta""" +959 42 training_loop """lcwa""" +959 42 evaluator """rankbased""" +959 43 dataset """kinships""" +959 43 model """tucker""" +959 43 loss """crossentropy""" +959 43 regularizer """no""" +959 43 optimizer """adadelta""" +959 43 training_loop """lcwa""" +959 43 evaluator """rankbased""" +959 44 dataset """kinships""" +959 44 model """tucker""" +959 44 loss """crossentropy""" +959 44 regularizer """no""" +959 44 optimizer """adadelta""" +959 44 training_loop """lcwa""" +959 44 evaluator """rankbased""" +959 45 dataset """kinships""" +959 45 model """tucker""" +959 45 loss """crossentropy""" +959 45 regularizer """no""" +959 45 optimizer """adadelta""" +959 45 training_loop """lcwa""" +959 45 evaluator """rankbased""" +959 46 dataset """kinships""" +959 46 model """tucker""" +959 46 loss """crossentropy""" +959 46 regularizer """no""" +959 46 optimizer """adadelta""" +959 46 training_loop """lcwa""" +959 46 evaluator """rankbased""" +959 47 dataset """kinships""" +959 47 model """tucker""" +959 47 loss """crossentropy""" +959 47 regularizer """no""" +959 47 optimizer """adadelta""" +959 47 training_loop """lcwa""" +959 47 evaluator """rankbased""" +959 48 dataset """kinships""" +959 48 model """tucker""" +959 48 loss """crossentropy""" +959 48 regularizer """no""" +959 48 optimizer """adadelta""" +959 48 training_loop """lcwa""" +959 48 evaluator """rankbased""" +959 49 dataset """kinships""" +959 49 model """tucker""" +959 49 loss """crossentropy""" +959 49 regularizer """no""" +959 49 optimizer """adadelta""" +959 49 training_loop """lcwa""" +959 49 evaluator """rankbased""" +959 50 dataset """kinships""" +959 50 model """tucker""" +959 50 loss """crossentropy""" +959 50 regularizer """no""" +959 50 optimizer """adadelta""" +959 50 training_loop """lcwa""" +959 50 evaluator """rankbased""" +959 51 dataset """kinships""" +959 51 model """tucker""" +959 51 loss """crossentropy""" +959 51 regularizer """no""" +959 51 optimizer """adadelta""" +959 51 training_loop """lcwa""" +959 51 evaluator """rankbased""" +959 52 dataset """kinships""" +959 52 model """tucker""" +959 52 loss """crossentropy""" +959 52 regularizer """no""" +959 52 optimizer """adadelta""" +959 52 training_loop """lcwa""" +959 52 evaluator """rankbased""" +959 53 dataset """kinships""" +959 53 model """tucker""" +959 53 loss """crossentropy""" +959 53 regularizer """no""" +959 53 optimizer """adadelta""" +959 53 training_loop """lcwa""" +959 53 evaluator """rankbased""" +959 54 dataset """kinships""" +959 54 model """tucker""" +959 54 loss """crossentropy""" +959 54 regularizer """no""" +959 54 optimizer """adadelta""" +959 54 training_loop """lcwa""" +959 54 evaluator """rankbased""" +959 55 dataset """kinships""" +959 55 model """tucker""" +959 55 loss """crossentropy""" +959 55 regularizer """no""" +959 55 optimizer """adadelta""" +959 55 training_loop """lcwa""" +959 55 evaluator """rankbased""" +959 56 dataset """kinships""" +959 56 model """tucker""" +959 56 loss """crossentropy""" +959 56 regularizer """no""" +959 56 optimizer """adadelta""" +959 56 training_loop """lcwa""" +959 56 evaluator """rankbased""" +959 57 dataset """kinships""" +959 57 model """tucker""" +959 57 loss """crossentropy""" +959 57 regularizer """no""" +959 57 optimizer """adadelta""" +959 57 training_loop """lcwa""" +959 57 evaluator """rankbased""" +959 58 dataset """kinships""" +959 58 model """tucker""" +959 58 loss """crossentropy""" +959 58 regularizer """no""" +959 58 optimizer """adadelta""" +959 58 training_loop """lcwa""" +959 58 evaluator """rankbased""" +959 59 dataset """kinships""" +959 59 model """tucker""" +959 59 loss """crossentropy""" +959 59 regularizer """no""" +959 59 optimizer """adadelta""" +959 59 training_loop """lcwa""" +959 59 evaluator """rankbased""" +959 60 dataset """kinships""" +959 60 model """tucker""" +959 60 loss """crossentropy""" +959 60 regularizer """no""" +959 60 optimizer """adadelta""" +959 60 training_loop """lcwa""" +959 60 evaluator """rankbased""" +959 61 dataset """kinships""" +959 61 model """tucker""" +959 61 loss """crossentropy""" +959 61 regularizer """no""" +959 61 optimizer """adadelta""" +959 61 training_loop """lcwa""" +959 61 evaluator """rankbased""" +959 62 dataset """kinships""" +959 62 model """tucker""" +959 62 loss """crossentropy""" +959 62 regularizer """no""" +959 62 optimizer """adadelta""" +959 62 training_loop """lcwa""" +959 62 evaluator """rankbased""" +959 63 dataset """kinships""" +959 63 model """tucker""" +959 63 loss """crossentropy""" +959 63 regularizer """no""" +959 63 optimizer """adadelta""" +959 63 training_loop """lcwa""" +959 63 evaluator """rankbased""" +959 64 dataset """kinships""" +959 64 model """tucker""" +959 64 loss """crossentropy""" +959 64 regularizer """no""" +959 64 optimizer """adadelta""" +959 64 training_loop """lcwa""" +959 64 evaluator """rankbased""" +959 65 dataset """kinships""" +959 65 model """tucker""" +959 65 loss """crossentropy""" +959 65 regularizer """no""" +959 65 optimizer """adadelta""" +959 65 training_loop """lcwa""" +959 65 evaluator """rankbased""" +959 66 dataset """kinships""" +959 66 model """tucker""" +959 66 loss """crossentropy""" +959 66 regularizer """no""" +959 66 optimizer """adadelta""" +959 66 training_loop """lcwa""" +959 66 evaluator """rankbased""" +959 67 dataset """kinships""" +959 67 model """tucker""" +959 67 loss """crossentropy""" +959 67 regularizer """no""" +959 67 optimizer """adadelta""" +959 67 training_loop """lcwa""" +959 67 evaluator """rankbased""" +959 68 dataset """kinships""" +959 68 model """tucker""" +959 68 loss """crossentropy""" +959 68 regularizer """no""" +959 68 optimizer """adadelta""" +959 68 training_loop """lcwa""" +959 68 evaluator """rankbased""" +959 69 dataset """kinships""" +959 69 model """tucker""" +959 69 loss """crossentropy""" +959 69 regularizer """no""" +959 69 optimizer """adadelta""" +959 69 training_loop """lcwa""" +959 69 evaluator """rankbased""" +959 70 dataset """kinships""" +959 70 model """tucker""" +959 70 loss """crossentropy""" +959 70 regularizer """no""" +959 70 optimizer """adadelta""" +959 70 training_loop """lcwa""" +959 70 evaluator """rankbased""" +959 71 dataset """kinships""" +959 71 model """tucker""" +959 71 loss """crossentropy""" +959 71 regularizer """no""" +959 71 optimizer """adadelta""" +959 71 training_loop """lcwa""" +959 71 evaluator """rankbased""" +959 72 dataset """kinships""" +959 72 model """tucker""" +959 72 loss """crossentropy""" +959 72 regularizer """no""" +959 72 optimizer """adadelta""" +959 72 training_loop """lcwa""" +959 72 evaluator """rankbased""" +959 73 dataset """kinships""" +959 73 model """tucker""" +959 73 loss """crossentropy""" +959 73 regularizer """no""" +959 73 optimizer """adadelta""" +959 73 training_loop """lcwa""" +959 73 evaluator """rankbased""" +959 74 dataset """kinships""" +959 74 model """tucker""" +959 74 loss """crossentropy""" +959 74 regularizer """no""" +959 74 optimizer """adadelta""" +959 74 training_loop """lcwa""" +959 74 evaluator """rankbased""" +959 75 dataset """kinships""" +959 75 model """tucker""" +959 75 loss """crossentropy""" +959 75 regularizer """no""" +959 75 optimizer """adadelta""" +959 75 training_loop """lcwa""" +959 75 evaluator """rankbased""" +959 76 dataset """kinships""" +959 76 model """tucker""" +959 76 loss """crossentropy""" +959 76 regularizer """no""" +959 76 optimizer """adadelta""" +959 76 training_loop """lcwa""" +959 76 evaluator """rankbased""" +959 77 dataset """kinships""" +959 77 model """tucker""" +959 77 loss """crossentropy""" +959 77 regularizer """no""" +959 77 optimizer """adadelta""" +959 77 training_loop """lcwa""" +959 77 evaluator """rankbased""" +959 78 dataset """kinships""" +959 78 model """tucker""" +959 78 loss """crossentropy""" +959 78 regularizer """no""" +959 78 optimizer """adadelta""" +959 78 training_loop """lcwa""" +959 78 evaluator """rankbased""" +959 79 dataset """kinships""" +959 79 model """tucker""" +959 79 loss """crossentropy""" +959 79 regularizer """no""" +959 79 optimizer """adadelta""" +959 79 training_loop """lcwa""" +959 79 evaluator """rankbased""" +959 80 dataset """kinships""" +959 80 model """tucker""" +959 80 loss """crossentropy""" +959 80 regularizer """no""" +959 80 optimizer """adadelta""" +959 80 training_loop """lcwa""" +959 80 evaluator """rankbased""" +959 81 dataset """kinships""" +959 81 model """tucker""" +959 81 loss """crossentropy""" +959 81 regularizer """no""" +959 81 optimizer """adadelta""" +959 81 training_loop """lcwa""" +959 81 evaluator """rankbased""" +959 82 dataset """kinships""" +959 82 model """tucker""" +959 82 loss """crossentropy""" +959 82 regularizer """no""" +959 82 optimizer """adadelta""" +959 82 training_loop """lcwa""" +959 82 evaluator """rankbased""" +959 83 dataset """kinships""" +959 83 model """tucker""" +959 83 loss """crossentropy""" +959 83 regularizer """no""" +959 83 optimizer """adadelta""" +959 83 training_loop """lcwa""" +959 83 evaluator """rankbased""" +959 84 dataset """kinships""" +959 84 model """tucker""" +959 84 loss """crossentropy""" +959 84 regularizer """no""" +959 84 optimizer """adadelta""" +959 84 training_loop """lcwa""" +959 84 evaluator """rankbased""" +959 85 dataset """kinships""" +959 85 model """tucker""" +959 85 loss """crossentropy""" +959 85 regularizer """no""" +959 85 optimizer """adadelta""" +959 85 training_loop """lcwa""" +959 85 evaluator """rankbased""" +959 86 dataset """kinships""" +959 86 model """tucker""" +959 86 loss """crossentropy""" +959 86 regularizer """no""" +959 86 optimizer """adadelta""" +959 86 training_loop """lcwa""" +959 86 evaluator """rankbased""" +959 87 dataset """kinships""" +959 87 model """tucker""" +959 87 loss """crossentropy""" +959 87 regularizer """no""" +959 87 optimizer """adadelta""" +959 87 training_loop """lcwa""" +959 87 evaluator """rankbased""" +959 88 dataset """kinships""" +959 88 model """tucker""" +959 88 loss """crossentropy""" +959 88 regularizer """no""" +959 88 optimizer """adadelta""" +959 88 training_loop """lcwa""" +959 88 evaluator """rankbased""" +959 89 dataset """kinships""" +959 89 model """tucker""" +959 89 loss """crossentropy""" +959 89 regularizer """no""" +959 89 optimizer """adadelta""" +959 89 training_loop """lcwa""" +959 89 evaluator """rankbased""" +959 90 dataset """kinships""" +959 90 model """tucker""" +959 90 loss """crossentropy""" +959 90 regularizer """no""" +959 90 optimizer """adadelta""" +959 90 training_loop """lcwa""" +959 90 evaluator """rankbased""" +959 91 dataset """kinships""" +959 91 model """tucker""" +959 91 loss """crossentropy""" +959 91 regularizer """no""" +959 91 optimizer """adadelta""" +959 91 training_loop """lcwa""" +959 91 evaluator """rankbased""" +959 92 dataset """kinships""" +959 92 model """tucker""" +959 92 loss """crossentropy""" +959 92 regularizer """no""" +959 92 optimizer """adadelta""" +959 92 training_loop """lcwa""" +959 92 evaluator """rankbased""" +959 93 dataset """kinships""" +959 93 model """tucker""" +959 93 loss """crossentropy""" +959 93 regularizer """no""" +959 93 optimizer """adadelta""" +959 93 training_loop """lcwa""" +959 93 evaluator """rankbased""" +959 94 dataset """kinships""" +959 94 model """tucker""" +959 94 loss """crossentropy""" +959 94 regularizer """no""" +959 94 optimizer """adadelta""" +959 94 training_loop """lcwa""" +959 94 evaluator """rankbased""" +959 95 dataset """kinships""" +959 95 model """tucker""" +959 95 loss """crossentropy""" +959 95 regularizer """no""" +959 95 optimizer """adadelta""" +959 95 training_loop """lcwa""" +959 95 evaluator """rankbased""" +959 96 dataset """kinships""" +959 96 model """tucker""" +959 96 loss """crossentropy""" +959 96 regularizer """no""" +959 96 optimizer """adadelta""" +959 96 training_loop """lcwa""" +959 96 evaluator """rankbased""" +959 97 dataset """kinships""" +959 97 model """tucker""" +959 97 loss """crossentropy""" +959 97 regularizer """no""" +959 97 optimizer """adadelta""" +959 97 training_loop """lcwa""" +959 97 evaluator """rankbased""" +959 98 dataset """kinships""" +959 98 model """tucker""" +959 98 loss """crossentropy""" +959 98 regularizer """no""" +959 98 optimizer """adadelta""" +959 98 training_loop """lcwa""" +959 98 evaluator """rankbased""" +959 99 dataset """kinships""" +959 99 model """tucker""" +959 99 loss """crossentropy""" +959 99 regularizer """no""" +959 99 optimizer """adadelta""" +959 99 training_loop """lcwa""" +959 99 evaluator """rankbased""" +959 100 dataset """kinships""" +959 100 model """tucker""" +959 100 loss """crossentropy""" +959 100 regularizer """no""" +959 100 optimizer """adadelta""" +959 100 training_loop """lcwa""" +959 100 evaluator """rankbased""" +960 1 model.embedding_dim 1.0 +960 1 model.relation_dim 2.0 +960 1 model.dropout_0 0.23464138785898606 +960 1 model.dropout_1 0.31400715152290626 +960 1 model.dropout_2 0.40524509097065686 +960 1 training.batch_size 1.0 +960 1 training.label_smoothing 0.13705807988282503 +960 2 model.embedding_dim 1.0 +960 2 model.relation_dim 1.0 +960 2 model.dropout_0 0.36963812649031724 +960 2 model.dropout_1 0.3049438254731212 +960 2 model.dropout_2 0.36117943302404565 +960 2 training.batch_size 2.0 +960 2 training.label_smoothing 0.006668878577861206 +960 3 model.embedding_dim 1.0 +960 3 model.relation_dim 0.0 +960 3 model.dropout_0 0.30567115849138476 +960 3 model.dropout_1 0.10710023819256441 +960 3 model.dropout_2 0.3057061796944047 +960 3 training.batch_size 1.0 +960 3 training.label_smoothing 0.012029321849304758 +960 4 model.embedding_dim 1.0 +960 4 model.relation_dim 0.0 +960 4 model.dropout_0 0.2682095768046809 +960 4 model.dropout_1 0.10290476813706087 +960 4 model.dropout_2 0.2717156799962901 +960 4 training.batch_size 0.0 +960 4 training.label_smoothing 0.14335317555441396 +960 5 model.embedding_dim 1.0 +960 5 model.relation_dim 1.0 +960 5 model.dropout_0 0.4870179642312452 +960 5 model.dropout_1 0.392917719008374 +960 5 model.dropout_2 0.4402085222947453 +960 5 training.batch_size 1.0 +960 5 training.label_smoothing 0.18168817236029877 +960 6 model.embedding_dim 0.0 +960 6 model.relation_dim 0.0 +960 6 model.dropout_0 0.49203574765879876 +960 6 model.dropout_1 0.17257344415513365 +960 6 model.dropout_2 0.13448057900698893 +960 6 training.batch_size 0.0 +960 6 training.label_smoothing 0.1883676589033381 +960 7 model.embedding_dim 1.0 +960 7 model.relation_dim 0.0 +960 7 model.dropout_0 0.27723405051710603 +960 7 model.dropout_1 0.1276333074975888 +960 7 model.dropout_2 0.49101343379919105 +960 7 training.batch_size 0.0 +960 7 training.label_smoothing 0.01008809486439528 +960 8 model.embedding_dim 1.0 +960 8 model.relation_dim 1.0 +960 8 model.dropout_0 0.4961611827740462 +960 8 model.dropout_1 0.4482408960631684 +960 8 model.dropout_2 0.24603296412763598 +960 8 training.batch_size 2.0 +960 8 training.label_smoothing 0.25911045892227413 +960 9 model.embedding_dim 1.0 +960 9 model.relation_dim 0.0 +960 9 model.dropout_0 0.3473851850799284 +960 9 model.dropout_1 0.12744076211371472 +960 9 model.dropout_2 0.3953742438762303 +960 9 training.batch_size 0.0 +960 9 training.label_smoothing 0.01787867204245911 +960 10 model.embedding_dim 1.0 +960 10 model.relation_dim 1.0 +960 10 model.dropout_0 0.10911083573467129 +960 10 model.dropout_1 0.19925633713450958 +960 10 model.dropout_2 0.2740003790288379 +960 10 training.batch_size 2.0 +960 10 training.label_smoothing 0.07559218107726125 +960 11 model.embedding_dim 2.0 +960 11 model.relation_dim 1.0 +960 11 model.dropout_0 0.40358696987337206 +960 11 model.dropout_1 0.48846199117521805 +960 11 model.dropout_2 0.18429634065303624 +960 11 training.batch_size 1.0 +960 11 training.label_smoothing 0.003757751965855535 +960 12 model.embedding_dim 2.0 +960 12 model.relation_dim 1.0 +960 12 model.dropout_0 0.44713014550734653 +960 12 model.dropout_1 0.35637148967318855 +960 12 model.dropout_2 0.4385295881146377 +960 12 training.batch_size 2.0 +960 12 training.label_smoothing 0.009875264091993378 +960 13 model.embedding_dim 0.0 +960 13 model.relation_dim 1.0 +960 13 model.dropout_0 0.29447636550823286 +960 13 model.dropout_1 0.37968315662262075 +960 13 model.dropout_2 0.26736505787294773 +960 13 training.batch_size 1.0 +960 13 training.label_smoothing 0.6105375609993381 +960 14 model.embedding_dim 0.0 +960 14 model.relation_dim 0.0 +960 14 model.dropout_0 0.38008829447748094 +960 14 model.dropout_1 0.2548017065471391 +960 14 model.dropout_2 0.19168603866938808 +960 14 training.batch_size 1.0 +960 14 training.label_smoothing 0.0026883713029503876 +960 15 model.embedding_dim 1.0 +960 15 model.relation_dim 0.0 +960 15 model.dropout_0 0.18370614546058534 +960 15 model.dropout_1 0.4818964984707971 +960 15 model.dropout_2 0.4019320203297303 +960 15 training.batch_size 2.0 +960 15 training.label_smoothing 0.2899528804144397 +960 16 model.embedding_dim 0.0 +960 16 model.relation_dim 1.0 +960 16 model.dropout_0 0.3252371045810477 +960 16 model.dropout_1 0.47415231873300745 +960 16 model.dropout_2 0.20348769216047158 +960 16 training.batch_size 2.0 +960 16 training.label_smoothing 0.8426368059371269 +960 17 model.embedding_dim 1.0 +960 17 model.relation_dim 2.0 +960 17 model.dropout_0 0.48976569640719014 +960 17 model.dropout_1 0.24953863683370586 +960 17 model.dropout_2 0.40505899728045713 +960 17 training.batch_size 0.0 +960 17 training.label_smoothing 0.004214017616738267 +960 18 model.embedding_dim 2.0 +960 18 model.relation_dim 0.0 +960 18 model.dropout_0 0.24939695536662085 +960 18 model.dropout_1 0.3578825565706313 +960 18 model.dropout_2 0.31472797765818933 +960 18 training.batch_size 0.0 +960 18 training.label_smoothing 0.0015946567357854908 +960 19 model.embedding_dim 0.0 +960 19 model.relation_dim 0.0 +960 19 model.dropout_0 0.1505108154516396 +960 19 model.dropout_1 0.18825178712921337 +960 19 model.dropout_2 0.26003008466036537 +960 19 training.batch_size 2.0 +960 19 training.label_smoothing 0.6906017794270376 +960 20 model.embedding_dim 1.0 +960 20 model.relation_dim 2.0 +960 20 model.dropout_0 0.39329167155465583 +960 20 model.dropout_1 0.3956622801391355 +960 20 model.dropout_2 0.3723265382567015 +960 20 training.batch_size 2.0 +960 20 training.label_smoothing 0.030350761847687886 +960 21 model.embedding_dim 2.0 +960 21 model.relation_dim 0.0 +960 21 model.dropout_0 0.4125369389349285 +960 21 model.dropout_1 0.2478244226260872 +960 21 model.dropout_2 0.12350444072358086 +960 21 training.batch_size 2.0 +960 21 training.label_smoothing 0.06303966029635891 +960 22 model.embedding_dim 2.0 +960 22 model.relation_dim 1.0 +960 22 model.dropout_0 0.14070730986219845 +960 22 model.dropout_1 0.3144655250417693 +960 22 model.dropout_2 0.44700564511906216 +960 22 training.batch_size 1.0 +960 22 training.label_smoothing 0.2921932726879233 +960 23 model.embedding_dim 2.0 +960 23 model.relation_dim 1.0 +960 23 model.dropout_0 0.43376568376410496 +960 23 model.dropout_1 0.19715579032387245 +960 23 model.dropout_2 0.49587461454029336 +960 23 training.batch_size 1.0 +960 23 training.label_smoothing 0.011420376470516297 +960 24 model.embedding_dim 2.0 +960 24 model.relation_dim 2.0 +960 24 model.dropout_0 0.30702983362482084 +960 24 model.dropout_1 0.35077448192846616 +960 24 model.dropout_2 0.30721844440610024 +960 24 training.batch_size 0.0 +960 24 training.label_smoothing 0.007711705167173209 +960 25 model.embedding_dim 1.0 +960 25 model.relation_dim 2.0 +960 25 model.dropout_0 0.3811885357445751 +960 25 model.dropout_1 0.13078127716100652 +960 25 model.dropout_2 0.43886466951648295 +960 25 training.batch_size 1.0 +960 25 training.label_smoothing 0.006019480129377731 +960 26 model.embedding_dim 1.0 +960 26 model.relation_dim 0.0 +960 26 model.dropout_0 0.20557315675273796 +960 26 model.dropout_1 0.11311633627748066 +960 26 model.dropout_2 0.42010031600305275 +960 26 training.batch_size 2.0 +960 26 training.label_smoothing 0.19610402298901172 +960 27 model.embedding_dim 1.0 +960 27 model.relation_dim 2.0 +960 27 model.dropout_0 0.40619794733886605 +960 27 model.dropout_1 0.3299833050920066 +960 27 model.dropout_2 0.3992352355325376 +960 27 training.batch_size 1.0 +960 27 training.label_smoothing 0.21974967351583763 +960 28 model.embedding_dim 1.0 +960 28 model.relation_dim 1.0 +960 28 model.dropout_0 0.293141422483688 +960 28 model.dropout_1 0.3495985720376672 +960 28 model.dropout_2 0.38081399968438234 +960 28 training.batch_size 1.0 +960 28 training.label_smoothing 0.01509252181765968 +960 29 model.embedding_dim 0.0 +960 29 model.relation_dim 2.0 +960 29 model.dropout_0 0.2026801154290523 +960 29 model.dropout_1 0.34707430205487083 +960 29 model.dropout_2 0.28071559559719617 +960 29 training.batch_size 0.0 +960 29 training.label_smoothing 0.25785773551782937 +960 30 model.embedding_dim 0.0 +960 30 model.relation_dim 0.0 +960 30 model.dropout_0 0.11479413957137377 +960 30 model.dropout_1 0.32240142239775926 +960 30 model.dropout_2 0.21209004365300835 +960 30 training.batch_size 2.0 +960 30 training.label_smoothing 0.10921510176617129 +960 31 model.embedding_dim 2.0 +960 31 model.relation_dim 2.0 +960 31 model.dropout_0 0.22904713308307634 +960 31 model.dropout_1 0.13426388117703247 +960 31 model.dropout_2 0.34396139792153824 +960 31 training.batch_size 1.0 +960 31 training.label_smoothing 0.0016840645087535583 +960 32 model.embedding_dim 1.0 +960 32 model.relation_dim 2.0 +960 32 model.dropout_0 0.2530286879212718 +960 32 model.dropout_1 0.11321065978202363 +960 32 model.dropout_2 0.4801854990981943 +960 32 training.batch_size 1.0 +960 32 training.label_smoothing 0.7115509502296516 +960 33 model.embedding_dim 1.0 +960 33 model.relation_dim 0.0 +960 33 model.dropout_0 0.19071723298044066 +960 33 model.dropout_1 0.18115749285225116 +960 33 model.dropout_2 0.3466101464769942 +960 33 training.batch_size 2.0 +960 33 training.label_smoothing 0.676404634521198 +960 34 model.embedding_dim 1.0 +960 34 model.relation_dim 1.0 +960 34 model.dropout_0 0.2836600792457393 +960 34 model.dropout_1 0.2398926903243872 +960 34 model.dropout_2 0.1738246675418263 +960 34 training.batch_size 1.0 +960 34 training.label_smoothing 0.012507067180387861 +960 35 model.embedding_dim 2.0 +960 35 model.relation_dim 2.0 +960 35 model.dropout_0 0.23575245552485563 +960 35 model.dropout_1 0.34395076062654484 +960 35 model.dropout_2 0.1542020922630572 +960 35 training.batch_size 1.0 +960 35 training.label_smoothing 0.001124405979434902 +960 36 model.embedding_dim 2.0 +960 36 model.relation_dim 0.0 +960 36 model.dropout_0 0.21760435586223423 +960 36 model.dropout_1 0.434911630606443 +960 36 model.dropout_2 0.268214800999846 +960 36 training.batch_size 0.0 +960 36 training.label_smoothing 0.7470098046514203 +960 37 model.embedding_dim 0.0 +960 37 model.relation_dim 2.0 +960 37 model.dropout_0 0.2580641389029894 +960 37 model.dropout_1 0.13412783605360445 +960 37 model.dropout_2 0.4544520739246477 +960 37 training.batch_size 2.0 +960 37 training.label_smoothing 0.08299923723942762 +960 38 model.embedding_dim 2.0 +960 38 model.relation_dim 0.0 +960 38 model.dropout_0 0.41285438080885173 +960 38 model.dropout_1 0.392701780860938 +960 38 model.dropout_2 0.1328661510204889 +960 38 training.batch_size 1.0 +960 38 training.label_smoothing 0.04104687834842921 +960 39 model.embedding_dim 0.0 +960 39 model.relation_dim 1.0 +960 39 model.dropout_0 0.11092284514382587 +960 39 model.dropout_1 0.46026145564236187 +960 39 model.dropout_2 0.3794298671262578 +960 39 training.batch_size 1.0 +960 39 training.label_smoothing 0.013167206969164598 +960 40 model.embedding_dim 2.0 +960 40 model.relation_dim 1.0 +960 40 model.dropout_0 0.3500253863284105 +960 40 model.dropout_1 0.17372860080179625 +960 40 model.dropout_2 0.4830795347923066 +960 40 training.batch_size 0.0 +960 40 training.label_smoothing 0.0010782943305039552 +960 41 model.embedding_dim 2.0 +960 41 model.relation_dim 1.0 +960 41 model.dropout_0 0.15990538563317525 +960 41 model.dropout_1 0.476739684951656 +960 41 model.dropout_2 0.20386638214836117 +960 41 training.batch_size 2.0 +960 41 training.label_smoothing 0.002713836445642236 +960 42 model.embedding_dim 0.0 +960 42 model.relation_dim 1.0 +960 42 model.dropout_0 0.3241715561613685 +960 42 model.dropout_1 0.22027576309529018 +960 42 model.dropout_2 0.4882993093374669 +960 42 training.batch_size 2.0 +960 42 training.label_smoothing 0.10870011810666486 +960 43 model.embedding_dim 1.0 +960 43 model.relation_dim 0.0 +960 43 model.dropout_0 0.16770111772669039 +960 43 model.dropout_1 0.29879330398084286 +960 43 model.dropout_2 0.20514136058590773 +960 43 training.batch_size 1.0 +960 43 training.label_smoothing 0.009261248036740988 +960 44 model.embedding_dim 0.0 +960 44 model.relation_dim 1.0 +960 44 model.dropout_0 0.18856793280595144 +960 44 model.dropout_1 0.41136333268801273 +960 44 model.dropout_2 0.2985639601922788 +960 44 training.batch_size 0.0 +960 44 training.label_smoothing 0.002402831613286644 +960 45 model.embedding_dim 2.0 +960 45 model.relation_dim 1.0 +960 45 model.dropout_0 0.22082668765141045 +960 45 model.dropout_1 0.33059696428808893 +960 45 model.dropout_2 0.35553620057524926 +960 45 training.batch_size 2.0 +960 45 training.label_smoothing 0.20516613793582128 +960 46 model.embedding_dim 2.0 +960 46 model.relation_dim 2.0 +960 46 model.dropout_0 0.4076285527535951 +960 46 model.dropout_1 0.12565103685409962 +960 46 model.dropout_2 0.34761767376085595 +960 46 training.batch_size 2.0 +960 46 training.label_smoothing 0.44948583293846733 +960 47 model.embedding_dim 2.0 +960 47 model.relation_dim 2.0 +960 47 model.dropout_0 0.3260739888320918 +960 47 model.dropout_1 0.4452940888613667 +960 47 model.dropout_2 0.1703454332766003 +960 47 training.batch_size 0.0 +960 47 training.label_smoothing 0.9667821862116294 +960 48 model.embedding_dim 0.0 +960 48 model.relation_dim 1.0 +960 48 model.dropout_0 0.41873426459240515 +960 48 model.dropout_1 0.3090153508976574 +960 48 model.dropout_2 0.4522525390358433 +960 48 training.batch_size 0.0 +960 48 training.label_smoothing 0.015328646905114278 +960 49 model.embedding_dim 2.0 +960 49 model.relation_dim 0.0 +960 49 model.dropout_0 0.30738075158612577 +960 49 model.dropout_1 0.46031667095063467 +960 49 model.dropout_2 0.45011421240768534 +960 49 training.batch_size 1.0 +960 49 training.label_smoothing 0.08972974448215254 +960 50 model.embedding_dim 0.0 +960 50 model.relation_dim 0.0 +960 50 model.dropout_0 0.4862037424758131 +960 50 model.dropout_1 0.48563855251163324 +960 50 model.dropout_2 0.23044370194630567 +960 50 training.batch_size 2.0 +960 50 training.label_smoothing 0.595211762597503 +960 51 model.embedding_dim 2.0 +960 51 model.relation_dim 2.0 +960 51 model.dropout_0 0.11429684710483445 +960 51 model.dropout_1 0.23676376649499242 +960 51 model.dropout_2 0.299296475997614 +960 51 training.batch_size 2.0 +960 51 training.label_smoothing 0.06308034132930876 +960 52 model.embedding_dim 0.0 +960 52 model.relation_dim 2.0 +960 52 model.dropout_0 0.29217043005410903 +960 52 model.dropout_1 0.312178867451415 +960 52 model.dropout_2 0.17691760745858842 +960 52 training.batch_size 2.0 +960 52 training.label_smoothing 0.0027254425813677406 +960 53 model.embedding_dim 2.0 +960 53 model.relation_dim 2.0 +960 53 model.dropout_0 0.48299703887707957 +960 53 model.dropout_1 0.42067627125499574 +960 53 model.dropout_2 0.4866325081043309 +960 53 training.batch_size 2.0 +960 53 training.label_smoothing 0.24710818374560328 +960 54 model.embedding_dim 1.0 +960 54 model.relation_dim 0.0 +960 54 model.dropout_0 0.33195510542313716 +960 54 model.dropout_1 0.2850043173024826 +960 54 model.dropout_2 0.2197687721599528 +960 54 training.batch_size 2.0 +960 54 training.label_smoothing 0.4477266585815653 +960 55 model.embedding_dim 0.0 +960 55 model.relation_dim 0.0 +960 55 model.dropout_0 0.312334224984465 +960 55 model.dropout_1 0.31654446882350407 +960 55 model.dropout_2 0.21537502515498597 +960 55 training.batch_size 2.0 +960 55 training.label_smoothing 0.0018671127188569648 +960 56 model.embedding_dim 1.0 +960 56 model.relation_dim 0.0 +960 56 model.dropout_0 0.2933220562558606 +960 56 model.dropout_1 0.3581080372371944 +960 56 model.dropout_2 0.43069101858185566 +960 56 training.batch_size 0.0 +960 56 training.label_smoothing 0.0031292184442511324 +960 57 model.embedding_dim 2.0 +960 57 model.relation_dim 2.0 +960 57 model.dropout_0 0.12141108420715906 +960 57 model.dropout_1 0.18589617129451025 +960 57 model.dropout_2 0.17678920125789865 +960 57 training.batch_size 1.0 +960 57 training.label_smoothing 0.006968363617229881 +960 58 model.embedding_dim 0.0 +960 58 model.relation_dim 1.0 +960 58 model.dropout_0 0.48240482742165236 +960 58 model.dropout_1 0.48473818196645724 +960 58 model.dropout_2 0.365914817991854 +960 58 training.batch_size 0.0 +960 58 training.label_smoothing 0.022477173034619456 +960 59 model.embedding_dim 2.0 +960 59 model.relation_dim 0.0 +960 59 model.dropout_0 0.3571825382989673 +960 59 model.dropout_1 0.31156404230979273 +960 59 model.dropout_2 0.15664464514807253 +960 59 training.batch_size 0.0 +960 59 training.label_smoothing 0.012812744242683189 +960 60 model.embedding_dim 2.0 +960 60 model.relation_dim 2.0 +960 60 model.dropout_0 0.26491091326792726 +960 60 model.dropout_1 0.4252145687648694 +960 60 model.dropout_2 0.37418125677406955 +960 60 training.batch_size 1.0 +960 60 training.label_smoothing 0.0016684771597142362 +960 61 model.embedding_dim 2.0 +960 61 model.relation_dim 1.0 +960 61 model.dropout_0 0.17184451799758427 +960 61 model.dropout_1 0.4056823527422827 +960 61 model.dropout_2 0.16666933971827014 +960 61 training.batch_size 1.0 +960 61 training.label_smoothing 0.007215247380607771 +960 62 model.embedding_dim 2.0 +960 62 model.relation_dim 1.0 +960 62 model.dropout_0 0.11497647546432486 +960 62 model.dropout_1 0.43838744000995467 +960 62 model.dropout_2 0.15499355360783273 +960 62 training.batch_size 0.0 +960 62 training.label_smoothing 0.04800081824443775 +960 63 model.embedding_dim 1.0 +960 63 model.relation_dim 0.0 +960 63 model.dropout_0 0.4214294025627746 +960 63 model.dropout_1 0.2761159395861762 +960 63 model.dropout_2 0.28294494204775567 +960 63 training.batch_size 2.0 +960 63 training.label_smoothing 0.004124762711814362 +960 64 model.embedding_dim 0.0 +960 64 model.relation_dim 1.0 +960 64 model.dropout_0 0.3120273256632866 +960 64 model.dropout_1 0.1881636462545947 +960 64 model.dropout_2 0.26441779498156104 +960 64 training.batch_size 1.0 +960 64 training.label_smoothing 0.0024246612591653097 +960 65 model.embedding_dim 0.0 +960 65 model.relation_dim 2.0 +960 65 model.dropout_0 0.2899220635156746 +960 65 model.dropout_1 0.39216273211813457 +960 65 model.dropout_2 0.21759512250206905 +960 65 training.batch_size 2.0 +960 65 training.label_smoothing 0.3961578390957006 +960 66 model.embedding_dim 2.0 +960 66 model.relation_dim 2.0 +960 66 model.dropout_0 0.26398905323554067 +960 66 model.dropout_1 0.4390892858326553 +960 66 model.dropout_2 0.3274484091950434 +960 66 training.batch_size 2.0 +960 66 training.label_smoothing 0.2170236079189601 +960 67 model.embedding_dim 1.0 +960 67 model.relation_dim 2.0 +960 67 model.dropout_0 0.3712408436436341 +960 67 model.dropout_1 0.3071415525603203 +960 67 model.dropout_2 0.40298823412607576 +960 67 training.batch_size 1.0 +960 67 training.label_smoothing 0.0034540039028504456 +960 68 model.embedding_dim 0.0 +960 68 model.relation_dim 2.0 +960 68 model.dropout_0 0.1834276182959781 +960 68 model.dropout_1 0.2263027475841271 +960 68 model.dropout_2 0.16509930344438645 +960 68 training.batch_size 0.0 +960 68 training.label_smoothing 0.002449019027255558 +960 69 model.embedding_dim 1.0 +960 69 model.relation_dim 2.0 +960 69 model.dropout_0 0.3926900886983994 +960 69 model.dropout_1 0.3428647669004816 +960 69 model.dropout_2 0.14466296009803595 +960 69 training.batch_size 1.0 +960 69 training.label_smoothing 0.008461235471723475 +960 70 model.embedding_dim 0.0 +960 70 model.relation_dim 1.0 +960 70 model.dropout_0 0.4026183922349992 +960 70 model.dropout_1 0.1226486032932364 +960 70 model.dropout_2 0.4519640457135492 +960 70 training.batch_size 2.0 +960 70 training.label_smoothing 0.001075081193768902 +960 71 model.embedding_dim 0.0 +960 71 model.relation_dim 0.0 +960 71 model.dropout_0 0.4532735560152067 +960 71 model.dropout_1 0.4124041564918478 +960 71 model.dropout_2 0.428561127218295 +960 71 training.batch_size 0.0 +960 71 training.label_smoothing 0.01830591063440778 +960 72 model.embedding_dim 2.0 +960 72 model.relation_dim 0.0 +960 72 model.dropout_0 0.1109684332519818 +960 72 model.dropout_1 0.29470322389527737 +960 72 model.dropout_2 0.16583874918108005 +960 72 training.batch_size 2.0 +960 72 training.label_smoothing 0.7553894607792798 +960 73 model.embedding_dim 0.0 +960 73 model.relation_dim 2.0 +960 73 model.dropout_0 0.29268839420890713 +960 73 model.dropout_1 0.2806899486428641 +960 73 model.dropout_2 0.2364881109409781 +960 73 training.batch_size 2.0 +960 73 training.label_smoothing 0.053377111396927315 +960 74 model.embedding_dim 0.0 +960 74 model.relation_dim 2.0 +960 74 model.dropout_0 0.4949366670493641 +960 74 model.dropout_1 0.36243238840324155 +960 74 model.dropout_2 0.1161230306667958 +960 74 training.batch_size 2.0 +960 74 training.label_smoothing 0.03325721650310183 +960 75 model.embedding_dim 1.0 +960 75 model.relation_dim 1.0 +960 75 model.dropout_0 0.40834301375036863 +960 75 model.dropout_1 0.4192764092017408 +960 75 model.dropout_2 0.31854705598918587 +960 75 training.batch_size 1.0 +960 75 training.label_smoothing 0.001063645146990471 +960 76 model.embedding_dim 2.0 +960 76 model.relation_dim 0.0 +960 76 model.dropout_0 0.2639844757339139 +960 76 model.dropout_1 0.37152408758055366 +960 76 model.dropout_2 0.16944524916930626 +960 76 training.batch_size 0.0 +960 76 training.label_smoothing 0.16307654893977153 +960 77 model.embedding_dim 1.0 +960 77 model.relation_dim 2.0 +960 77 model.dropout_0 0.2886200917324886 +960 77 model.dropout_1 0.4400812090437233 +960 77 model.dropout_2 0.35361404978771627 +960 77 training.batch_size 0.0 +960 77 training.label_smoothing 0.1548457214330607 +960 78 model.embedding_dim 2.0 +960 78 model.relation_dim 2.0 +960 78 model.dropout_0 0.2513923130982212 +960 78 model.dropout_1 0.4131036662030606 +960 78 model.dropout_2 0.4043794524812888 +960 78 training.batch_size 2.0 +960 78 training.label_smoothing 0.0012602018455743018 +960 79 model.embedding_dim 0.0 +960 79 model.relation_dim 0.0 +960 79 model.dropout_0 0.1529090822128754 +960 79 model.dropout_1 0.27166584764335183 +960 79 model.dropout_2 0.43707616440837815 +960 79 training.batch_size 2.0 +960 79 training.label_smoothing 0.023838091861758882 +960 80 model.embedding_dim 0.0 +960 80 model.relation_dim 1.0 +960 80 model.dropout_0 0.37013540163838077 +960 80 model.dropout_1 0.24867495269121692 +960 80 model.dropout_2 0.35136478358065515 +960 80 training.batch_size 2.0 +960 80 training.label_smoothing 0.005067452292304555 +960 81 model.embedding_dim 1.0 +960 81 model.relation_dim 0.0 +960 81 model.dropout_0 0.22712750201731874 +960 81 model.dropout_1 0.33202716395467935 +960 81 model.dropout_2 0.3986144508483166 +960 81 training.batch_size 2.0 +960 81 training.label_smoothing 0.010565946961268481 +960 82 model.embedding_dim 1.0 +960 82 model.relation_dim 2.0 +960 82 model.dropout_0 0.4468663534833426 +960 82 model.dropout_1 0.23234061475966752 +960 82 model.dropout_2 0.32326363762961663 +960 82 training.batch_size 1.0 +960 82 training.label_smoothing 0.12411984562944936 +960 83 model.embedding_dim 0.0 +960 83 model.relation_dim 0.0 +960 83 model.dropout_0 0.3326050185801895 +960 83 model.dropout_1 0.3997500678483431 +960 83 model.dropout_2 0.39058805804925956 +960 83 training.batch_size 0.0 +960 83 training.label_smoothing 0.365152055570044 +960 84 model.embedding_dim 0.0 +960 84 model.relation_dim 2.0 +960 84 model.dropout_0 0.424088163442478 +960 84 model.dropout_1 0.2815139373745793 +960 84 model.dropout_2 0.39122426311665814 +960 84 training.batch_size 0.0 +960 84 training.label_smoothing 0.05668305008386629 +960 85 model.embedding_dim 2.0 +960 85 model.relation_dim 2.0 +960 85 model.dropout_0 0.24375310457309562 +960 85 model.dropout_1 0.23506719654492825 +960 85 model.dropout_2 0.4693043111541938 +960 85 training.batch_size 1.0 +960 85 training.label_smoothing 0.0014698466576072299 +960 86 model.embedding_dim 2.0 +960 86 model.relation_dim 1.0 +960 86 model.dropout_0 0.46192420641918064 +960 86 model.dropout_1 0.1996120437640884 +960 86 model.dropout_2 0.2355418150853618 +960 86 training.batch_size 0.0 +960 86 training.label_smoothing 0.0014115668309486343 +960 87 model.embedding_dim 2.0 +960 87 model.relation_dim 1.0 +960 87 model.dropout_0 0.34845473287104123 +960 87 model.dropout_1 0.23595328975044982 +960 87 model.dropout_2 0.3454658154996394 +960 87 training.batch_size 1.0 +960 87 training.label_smoothing 0.17494891707276902 +960 88 model.embedding_dim 2.0 +960 88 model.relation_dim 0.0 +960 88 model.dropout_0 0.48169166567085964 +960 88 model.dropout_1 0.22223639321942057 +960 88 model.dropout_2 0.30728784622630223 +960 88 training.batch_size 1.0 +960 88 training.label_smoothing 0.027523115732537476 +960 89 model.embedding_dim 1.0 +960 89 model.relation_dim 2.0 +960 89 model.dropout_0 0.12690636088045434 +960 89 model.dropout_1 0.3837738453142515 +960 89 model.dropout_2 0.34888310745796536 +960 89 training.batch_size 0.0 +960 89 training.label_smoothing 0.16850779097323174 +960 90 model.embedding_dim 2.0 +960 90 model.relation_dim 1.0 +960 90 model.dropout_0 0.3314803273563639 +960 90 model.dropout_1 0.3987335271952316 +960 90 model.dropout_2 0.4624189348669072 +960 90 training.batch_size 2.0 +960 90 training.label_smoothing 0.0015318962082644153 +960 91 model.embedding_dim 0.0 +960 91 model.relation_dim 2.0 +960 91 model.dropout_0 0.4286094725937074 +960 91 model.dropout_1 0.3103533536158253 +960 91 model.dropout_2 0.49859197071661876 +960 91 training.batch_size 1.0 +960 91 training.label_smoothing 0.0099389193241974 +960 92 model.embedding_dim 1.0 +960 92 model.relation_dim 0.0 +960 92 model.dropout_0 0.19924090971361852 +960 92 model.dropout_1 0.30734128751894774 +960 92 model.dropout_2 0.262534228326301 +960 92 training.batch_size 0.0 +960 92 training.label_smoothing 0.0023303742293576723 +960 93 model.embedding_dim 1.0 +960 93 model.relation_dim 0.0 +960 93 model.dropout_0 0.20851737706884965 +960 93 model.dropout_1 0.14938216778329558 +960 93 model.dropout_2 0.32476409184064026 +960 93 training.batch_size 1.0 +960 93 training.label_smoothing 0.021505865545666726 +960 94 model.embedding_dim 0.0 +960 94 model.relation_dim 2.0 +960 94 model.dropout_0 0.35084196061150863 +960 94 model.dropout_1 0.19928657222788038 +960 94 model.dropout_2 0.48824180810695583 +960 94 training.batch_size 2.0 +960 94 training.label_smoothing 0.004692985514037707 +960 95 model.embedding_dim 2.0 +960 95 model.relation_dim 0.0 +960 95 model.dropout_0 0.11389805164155953 +960 95 model.dropout_1 0.2185959206652217 +960 95 model.dropout_2 0.45181488761098354 +960 95 training.batch_size 2.0 +960 95 training.label_smoothing 0.004613551244870522 +960 96 model.embedding_dim 0.0 +960 96 model.relation_dim 0.0 +960 96 model.dropout_0 0.3625220749663819 +960 96 model.dropout_1 0.34781003159737345 +960 96 model.dropout_2 0.11032451179793577 +960 96 training.batch_size 1.0 +960 96 training.label_smoothing 0.01011944132180871 +960 97 model.embedding_dim 2.0 +960 97 model.relation_dim 1.0 +960 97 model.dropout_0 0.3713333035517139 +960 97 model.dropout_1 0.38388869986151797 +960 97 model.dropout_2 0.2727522123911694 +960 97 training.batch_size 0.0 +960 97 training.label_smoothing 0.002800426677967559 +960 98 model.embedding_dim 0.0 +960 98 model.relation_dim 0.0 +960 98 model.dropout_0 0.35694094053084313 +960 98 model.dropout_1 0.3645321833514994 +960 98 model.dropout_2 0.3745663534447313 +960 98 training.batch_size 0.0 +960 98 training.label_smoothing 0.2865489189325155 +960 99 model.embedding_dim 1.0 +960 99 model.relation_dim 2.0 +960 99 model.dropout_0 0.27547785491951104 +960 99 model.dropout_1 0.24083725040417978 +960 99 model.dropout_2 0.27525613438678753 +960 99 training.batch_size 2.0 +960 99 training.label_smoothing 0.0023313726543758135 +960 100 model.embedding_dim 0.0 +960 100 model.relation_dim 1.0 +960 100 model.dropout_0 0.3843605595029874 +960 100 model.dropout_1 0.2742868559809587 +960 100 model.dropout_2 0.46214714974939186 +960 100 training.batch_size 0.0 +960 100 training.label_smoothing 0.060678101167491866 +960 1 dataset """kinships""" +960 1 model """tucker""" +960 1 loss """crossentropy""" +960 1 regularizer """no""" +960 1 optimizer """adadelta""" +960 1 training_loop """lcwa""" +960 1 evaluator """rankbased""" +960 2 dataset """kinships""" +960 2 model """tucker""" +960 2 loss """crossentropy""" +960 2 regularizer """no""" +960 2 optimizer """adadelta""" +960 2 training_loop """lcwa""" +960 2 evaluator """rankbased""" +960 3 dataset """kinships""" +960 3 model """tucker""" +960 3 loss """crossentropy""" +960 3 regularizer """no""" +960 3 optimizer """adadelta""" +960 3 training_loop """lcwa""" +960 3 evaluator """rankbased""" +960 4 dataset """kinships""" +960 4 model """tucker""" +960 4 loss """crossentropy""" +960 4 regularizer """no""" +960 4 optimizer """adadelta""" +960 4 training_loop """lcwa""" +960 4 evaluator """rankbased""" +960 5 dataset """kinships""" +960 5 model """tucker""" +960 5 loss """crossentropy""" +960 5 regularizer """no""" +960 5 optimizer """adadelta""" +960 5 training_loop """lcwa""" +960 5 evaluator """rankbased""" +960 6 dataset """kinships""" +960 6 model """tucker""" +960 6 loss """crossentropy""" +960 6 regularizer """no""" +960 6 optimizer """adadelta""" +960 6 training_loop """lcwa""" +960 6 evaluator """rankbased""" +960 7 dataset """kinships""" +960 7 model """tucker""" +960 7 loss """crossentropy""" +960 7 regularizer """no""" +960 7 optimizer """adadelta""" +960 7 training_loop """lcwa""" +960 7 evaluator """rankbased""" +960 8 dataset """kinships""" +960 8 model """tucker""" +960 8 loss """crossentropy""" +960 8 regularizer """no""" +960 8 optimizer """adadelta""" +960 8 training_loop """lcwa""" +960 8 evaluator """rankbased""" +960 9 dataset """kinships""" +960 9 model """tucker""" +960 9 loss """crossentropy""" +960 9 regularizer """no""" +960 9 optimizer """adadelta""" +960 9 training_loop """lcwa""" +960 9 evaluator """rankbased""" +960 10 dataset """kinships""" +960 10 model """tucker""" +960 10 loss """crossentropy""" +960 10 regularizer """no""" +960 10 optimizer """adadelta""" +960 10 training_loop """lcwa""" +960 10 evaluator """rankbased""" +960 11 dataset """kinships""" +960 11 model """tucker""" +960 11 loss """crossentropy""" +960 11 regularizer """no""" +960 11 optimizer """adadelta""" +960 11 training_loop """lcwa""" +960 11 evaluator """rankbased""" +960 12 dataset """kinships""" +960 12 model """tucker""" +960 12 loss """crossentropy""" +960 12 regularizer """no""" +960 12 optimizer """adadelta""" +960 12 training_loop """lcwa""" +960 12 evaluator """rankbased""" +960 13 dataset """kinships""" +960 13 model """tucker""" +960 13 loss """crossentropy""" +960 13 regularizer """no""" +960 13 optimizer """adadelta""" +960 13 training_loop """lcwa""" +960 13 evaluator """rankbased""" +960 14 dataset """kinships""" +960 14 model """tucker""" +960 14 loss """crossentropy""" +960 14 regularizer """no""" +960 14 optimizer """adadelta""" +960 14 training_loop """lcwa""" +960 14 evaluator """rankbased""" +960 15 dataset """kinships""" +960 15 model """tucker""" +960 15 loss """crossentropy""" +960 15 regularizer """no""" +960 15 optimizer """adadelta""" +960 15 training_loop """lcwa""" +960 15 evaluator """rankbased""" +960 16 dataset """kinships""" +960 16 model """tucker""" +960 16 loss """crossentropy""" +960 16 regularizer """no""" +960 16 optimizer """adadelta""" +960 16 training_loop """lcwa""" +960 16 evaluator """rankbased""" +960 17 dataset """kinships""" +960 17 model """tucker""" +960 17 loss """crossentropy""" +960 17 regularizer """no""" +960 17 optimizer """adadelta""" +960 17 training_loop """lcwa""" +960 17 evaluator """rankbased""" +960 18 dataset """kinships""" +960 18 model """tucker""" +960 18 loss """crossentropy""" +960 18 regularizer """no""" +960 18 optimizer """adadelta""" +960 18 training_loop """lcwa""" +960 18 evaluator """rankbased""" +960 19 dataset """kinships""" +960 19 model """tucker""" +960 19 loss """crossentropy""" +960 19 regularizer """no""" +960 19 optimizer """adadelta""" +960 19 training_loop """lcwa""" +960 19 evaluator """rankbased""" +960 20 dataset """kinships""" +960 20 model """tucker""" +960 20 loss """crossentropy""" +960 20 regularizer """no""" +960 20 optimizer """adadelta""" +960 20 training_loop """lcwa""" +960 20 evaluator """rankbased""" +960 21 dataset """kinships""" +960 21 model """tucker""" +960 21 loss """crossentropy""" +960 21 regularizer """no""" +960 21 optimizer """adadelta""" +960 21 training_loop """lcwa""" +960 21 evaluator """rankbased""" +960 22 dataset """kinships""" +960 22 model """tucker""" +960 22 loss """crossentropy""" +960 22 regularizer """no""" +960 22 optimizer """adadelta""" +960 22 training_loop """lcwa""" +960 22 evaluator """rankbased""" +960 23 dataset """kinships""" +960 23 model """tucker""" +960 23 loss """crossentropy""" +960 23 regularizer """no""" +960 23 optimizer """adadelta""" +960 23 training_loop """lcwa""" +960 23 evaluator """rankbased""" +960 24 dataset """kinships""" +960 24 model """tucker""" +960 24 loss """crossentropy""" +960 24 regularizer """no""" +960 24 optimizer """adadelta""" +960 24 training_loop """lcwa""" +960 24 evaluator """rankbased""" +960 25 dataset """kinships""" +960 25 model """tucker""" +960 25 loss """crossentropy""" +960 25 regularizer """no""" +960 25 optimizer """adadelta""" +960 25 training_loop """lcwa""" +960 25 evaluator """rankbased""" +960 26 dataset """kinships""" +960 26 model """tucker""" +960 26 loss """crossentropy""" +960 26 regularizer """no""" +960 26 optimizer """adadelta""" +960 26 training_loop """lcwa""" +960 26 evaluator """rankbased""" +960 27 dataset """kinships""" +960 27 model """tucker""" +960 27 loss """crossentropy""" +960 27 regularizer """no""" +960 27 optimizer """adadelta""" +960 27 training_loop """lcwa""" +960 27 evaluator """rankbased""" +960 28 dataset """kinships""" +960 28 model """tucker""" +960 28 loss """crossentropy""" +960 28 regularizer """no""" +960 28 optimizer """adadelta""" +960 28 training_loop """lcwa""" +960 28 evaluator """rankbased""" +960 29 dataset """kinships""" +960 29 model """tucker""" +960 29 loss """crossentropy""" +960 29 regularizer """no""" +960 29 optimizer """adadelta""" +960 29 training_loop """lcwa""" +960 29 evaluator """rankbased""" +960 30 dataset """kinships""" +960 30 model """tucker""" +960 30 loss """crossentropy""" +960 30 regularizer """no""" +960 30 optimizer """adadelta""" +960 30 training_loop """lcwa""" +960 30 evaluator """rankbased""" +960 31 dataset """kinships""" +960 31 model """tucker""" +960 31 loss """crossentropy""" +960 31 regularizer """no""" +960 31 optimizer """adadelta""" +960 31 training_loop """lcwa""" +960 31 evaluator """rankbased""" +960 32 dataset """kinships""" +960 32 model """tucker""" +960 32 loss """crossentropy""" +960 32 regularizer """no""" +960 32 optimizer """adadelta""" +960 32 training_loop """lcwa""" +960 32 evaluator """rankbased""" +960 33 dataset """kinships""" +960 33 model """tucker""" +960 33 loss """crossentropy""" +960 33 regularizer """no""" +960 33 optimizer """adadelta""" +960 33 training_loop """lcwa""" +960 33 evaluator """rankbased""" +960 34 dataset """kinships""" +960 34 model """tucker""" +960 34 loss """crossentropy""" +960 34 regularizer """no""" +960 34 optimizer """adadelta""" +960 34 training_loop """lcwa""" +960 34 evaluator """rankbased""" +960 35 dataset """kinships""" +960 35 model """tucker""" +960 35 loss """crossentropy""" +960 35 regularizer """no""" +960 35 optimizer """adadelta""" +960 35 training_loop """lcwa""" +960 35 evaluator """rankbased""" +960 36 dataset """kinships""" +960 36 model """tucker""" +960 36 loss """crossentropy""" +960 36 regularizer """no""" +960 36 optimizer """adadelta""" +960 36 training_loop """lcwa""" +960 36 evaluator """rankbased""" +960 37 dataset """kinships""" +960 37 model """tucker""" +960 37 loss """crossentropy""" +960 37 regularizer """no""" +960 37 optimizer """adadelta""" +960 37 training_loop """lcwa""" +960 37 evaluator """rankbased""" +960 38 dataset """kinships""" +960 38 model """tucker""" +960 38 loss """crossentropy""" +960 38 regularizer """no""" +960 38 optimizer """adadelta""" +960 38 training_loop """lcwa""" +960 38 evaluator """rankbased""" +960 39 dataset """kinships""" +960 39 model """tucker""" +960 39 loss """crossentropy""" +960 39 regularizer """no""" +960 39 optimizer """adadelta""" +960 39 training_loop """lcwa""" +960 39 evaluator """rankbased""" +960 40 dataset """kinships""" +960 40 model """tucker""" +960 40 loss """crossentropy""" +960 40 regularizer """no""" +960 40 optimizer """adadelta""" +960 40 training_loop """lcwa""" +960 40 evaluator """rankbased""" +960 41 dataset """kinships""" +960 41 model """tucker""" +960 41 loss """crossentropy""" +960 41 regularizer """no""" +960 41 optimizer """adadelta""" +960 41 training_loop """lcwa""" +960 41 evaluator """rankbased""" +960 42 dataset """kinships""" +960 42 model """tucker""" +960 42 loss """crossentropy""" +960 42 regularizer """no""" +960 42 optimizer """adadelta""" +960 42 training_loop """lcwa""" +960 42 evaluator """rankbased""" +960 43 dataset """kinships""" +960 43 model """tucker""" +960 43 loss """crossentropy""" +960 43 regularizer """no""" +960 43 optimizer """adadelta""" +960 43 training_loop """lcwa""" +960 43 evaluator """rankbased""" +960 44 dataset """kinships""" +960 44 model """tucker""" +960 44 loss """crossentropy""" +960 44 regularizer """no""" +960 44 optimizer """adadelta""" +960 44 training_loop """lcwa""" +960 44 evaluator """rankbased""" +960 45 dataset """kinships""" +960 45 model """tucker""" +960 45 loss """crossentropy""" +960 45 regularizer """no""" +960 45 optimizer """adadelta""" +960 45 training_loop """lcwa""" +960 45 evaluator """rankbased""" +960 46 dataset """kinships""" +960 46 model """tucker""" +960 46 loss """crossentropy""" +960 46 regularizer """no""" +960 46 optimizer """adadelta""" +960 46 training_loop """lcwa""" +960 46 evaluator """rankbased""" +960 47 dataset """kinships""" +960 47 model """tucker""" +960 47 loss """crossentropy""" +960 47 regularizer """no""" +960 47 optimizer """adadelta""" +960 47 training_loop """lcwa""" +960 47 evaluator """rankbased""" +960 48 dataset """kinships""" +960 48 model """tucker""" +960 48 loss """crossentropy""" +960 48 regularizer """no""" +960 48 optimizer """adadelta""" +960 48 training_loop """lcwa""" +960 48 evaluator """rankbased""" +960 49 dataset """kinships""" +960 49 model """tucker""" +960 49 loss """crossentropy""" +960 49 regularizer """no""" +960 49 optimizer """adadelta""" +960 49 training_loop """lcwa""" +960 49 evaluator """rankbased""" +960 50 dataset """kinships""" +960 50 model """tucker""" +960 50 loss """crossentropy""" +960 50 regularizer """no""" +960 50 optimizer """adadelta""" +960 50 training_loop """lcwa""" +960 50 evaluator """rankbased""" +960 51 dataset """kinships""" +960 51 model """tucker""" +960 51 loss """crossentropy""" +960 51 regularizer """no""" +960 51 optimizer """adadelta""" +960 51 training_loop """lcwa""" +960 51 evaluator """rankbased""" +960 52 dataset """kinships""" +960 52 model """tucker""" +960 52 loss """crossentropy""" +960 52 regularizer """no""" +960 52 optimizer """adadelta""" +960 52 training_loop """lcwa""" +960 52 evaluator """rankbased""" +960 53 dataset """kinships""" +960 53 model """tucker""" +960 53 loss """crossentropy""" +960 53 regularizer """no""" +960 53 optimizer """adadelta""" +960 53 training_loop """lcwa""" +960 53 evaluator """rankbased""" +960 54 dataset """kinships""" +960 54 model """tucker""" +960 54 loss """crossentropy""" +960 54 regularizer """no""" +960 54 optimizer """adadelta""" +960 54 training_loop """lcwa""" +960 54 evaluator """rankbased""" +960 55 dataset """kinships""" +960 55 model """tucker""" +960 55 loss """crossentropy""" +960 55 regularizer """no""" +960 55 optimizer """adadelta""" +960 55 training_loop """lcwa""" +960 55 evaluator """rankbased""" +960 56 dataset """kinships""" +960 56 model """tucker""" +960 56 loss """crossentropy""" +960 56 regularizer """no""" +960 56 optimizer """adadelta""" +960 56 training_loop """lcwa""" +960 56 evaluator """rankbased""" +960 57 dataset """kinships""" +960 57 model """tucker""" +960 57 loss """crossentropy""" +960 57 regularizer """no""" +960 57 optimizer """adadelta""" +960 57 training_loop """lcwa""" +960 57 evaluator """rankbased""" +960 58 dataset """kinships""" +960 58 model """tucker""" +960 58 loss """crossentropy""" +960 58 regularizer """no""" +960 58 optimizer """adadelta""" +960 58 training_loop """lcwa""" +960 58 evaluator """rankbased""" +960 59 dataset """kinships""" +960 59 model """tucker""" +960 59 loss """crossentropy""" +960 59 regularizer """no""" +960 59 optimizer """adadelta""" +960 59 training_loop """lcwa""" +960 59 evaluator """rankbased""" +960 60 dataset """kinships""" +960 60 model """tucker""" +960 60 loss """crossentropy""" +960 60 regularizer """no""" +960 60 optimizer """adadelta""" +960 60 training_loop """lcwa""" +960 60 evaluator """rankbased""" +960 61 dataset """kinships""" +960 61 model """tucker""" +960 61 loss """crossentropy""" +960 61 regularizer """no""" +960 61 optimizer """adadelta""" +960 61 training_loop """lcwa""" +960 61 evaluator """rankbased""" +960 62 dataset """kinships""" +960 62 model """tucker""" +960 62 loss """crossentropy""" +960 62 regularizer """no""" +960 62 optimizer """adadelta""" +960 62 training_loop """lcwa""" +960 62 evaluator """rankbased""" +960 63 dataset """kinships""" +960 63 model """tucker""" +960 63 loss """crossentropy""" +960 63 regularizer """no""" +960 63 optimizer """adadelta""" +960 63 training_loop """lcwa""" +960 63 evaluator """rankbased""" +960 64 dataset """kinships""" +960 64 model """tucker""" +960 64 loss """crossentropy""" +960 64 regularizer """no""" +960 64 optimizer """adadelta""" +960 64 training_loop """lcwa""" +960 64 evaluator """rankbased""" +960 65 dataset """kinships""" +960 65 model """tucker""" +960 65 loss """crossentropy""" +960 65 regularizer """no""" +960 65 optimizer """adadelta""" +960 65 training_loop """lcwa""" +960 65 evaluator """rankbased""" +960 66 dataset """kinships""" +960 66 model """tucker""" +960 66 loss """crossentropy""" +960 66 regularizer """no""" +960 66 optimizer """adadelta""" +960 66 training_loop """lcwa""" +960 66 evaluator """rankbased""" +960 67 dataset """kinships""" +960 67 model """tucker""" +960 67 loss """crossentropy""" +960 67 regularizer """no""" +960 67 optimizer """adadelta""" +960 67 training_loop """lcwa""" +960 67 evaluator """rankbased""" +960 68 dataset """kinships""" +960 68 model """tucker""" +960 68 loss """crossentropy""" +960 68 regularizer """no""" +960 68 optimizer """adadelta""" +960 68 training_loop """lcwa""" +960 68 evaluator """rankbased""" +960 69 dataset """kinships""" +960 69 model """tucker""" +960 69 loss """crossentropy""" +960 69 regularizer """no""" +960 69 optimizer """adadelta""" +960 69 training_loop """lcwa""" +960 69 evaluator """rankbased""" +960 70 dataset """kinships""" +960 70 model """tucker""" +960 70 loss """crossentropy""" +960 70 regularizer """no""" +960 70 optimizer """adadelta""" +960 70 training_loop """lcwa""" +960 70 evaluator """rankbased""" +960 71 dataset """kinships""" +960 71 model """tucker""" +960 71 loss """crossentropy""" +960 71 regularizer """no""" +960 71 optimizer """adadelta""" +960 71 training_loop """lcwa""" +960 71 evaluator """rankbased""" +960 72 dataset """kinships""" +960 72 model """tucker""" +960 72 loss """crossentropy""" +960 72 regularizer """no""" +960 72 optimizer """adadelta""" +960 72 training_loop """lcwa""" +960 72 evaluator """rankbased""" +960 73 dataset """kinships""" +960 73 model """tucker""" +960 73 loss """crossentropy""" +960 73 regularizer """no""" +960 73 optimizer """adadelta""" +960 73 training_loop """lcwa""" +960 73 evaluator """rankbased""" +960 74 dataset """kinships""" +960 74 model """tucker""" +960 74 loss """crossentropy""" +960 74 regularizer """no""" +960 74 optimizer """adadelta""" +960 74 training_loop """lcwa""" +960 74 evaluator """rankbased""" +960 75 dataset """kinships""" +960 75 model """tucker""" +960 75 loss """crossentropy""" +960 75 regularizer """no""" +960 75 optimizer """adadelta""" +960 75 training_loop """lcwa""" +960 75 evaluator """rankbased""" +960 76 dataset """kinships""" +960 76 model """tucker""" +960 76 loss """crossentropy""" +960 76 regularizer """no""" +960 76 optimizer """adadelta""" +960 76 training_loop """lcwa""" +960 76 evaluator """rankbased""" +960 77 dataset """kinships""" +960 77 model """tucker""" +960 77 loss """crossentropy""" +960 77 regularizer """no""" +960 77 optimizer """adadelta""" +960 77 training_loop """lcwa""" +960 77 evaluator """rankbased""" +960 78 dataset """kinships""" +960 78 model """tucker""" +960 78 loss """crossentropy""" +960 78 regularizer """no""" +960 78 optimizer """adadelta""" +960 78 training_loop """lcwa""" +960 78 evaluator """rankbased""" +960 79 dataset """kinships""" +960 79 model """tucker""" +960 79 loss """crossentropy""" +960 79 regularizer """no""" +960 79 optimizer """adadelta""" +960 79 training_loop """lcwa""" +960 79 evaluator """rankbased""" +960 80 dataset """kinships""" +960 80 model """tucker""" +960 80 loss """crossentropy""" +960 80 regularizer """no""" +960 80 optimizer """adadelta""" +960 80 training_loop """lcwa""" +960 80 evaluator """rankbased""" +960 81 dataset """kinships""" +960 81 model """tucker""" +960 81 loss """crossentropy""" +960 81 regularizer """no""" +960 81 optimizer """adadelta""" +960 81 training_loop """lcwa""" +960 81 evaluator """rankbased""" +960 82 dataset """kinships""" +960 82 model """tucker""" +960 82 loss """crossentropy""" +960 82 regularizer """no""" +960 82 optimizer """adadelta""" +960 82 training_loop """lcwa""" +960 82 evaluator """rankbased""" +960 83 dataset """kinships""" +960 83 model """tucker""" +960 83 loss """crossentropy""" +960 83 regularizer """no""" +960 83 optimizer """adadelta""" +960 83 training_loop """lcwa""" +960 83 evaluator """rankbased""" +960 84 dataset """kinships""" +960 84 model """tucker""" +960 84 loss """crossentropy""" +960 84 regularizer """no""" +960 84 optimizer """adadelta""" +960 84 training_loop """lcwa""" +960 84 evaluator """rankbased""" +960 85 dataset """kinships""" +960 85 model """tucker""" +960 85 loss """crossentropy""" +960 85 regularizer """no""" +960 85 optimizer """adadelta""" +960 85 training_loop """lcwa""" +960 85 evaluator """rankbased""" +960 86 dataset """kinships""" +960 86 model """tucker""" +960 86 loss """crossentropy""" +960 86 regularizer """no""" +960 86 optimizer """adadelta""" +960 86 training_loop """lcwa""" +960 86 evaluator """rankbased""" +960 87 dataset """kinships""" +960 87 model """tucker""" +960 87 loss """crossentropy""" +960 87 regularizer """no""" +960 87 optimizer """adadelta""" +960 87 training_loop """lcwa""" +960 87 evaluator """rankbased""" +960 88 dataset """kinships""" +960 88 model """tucker""" +960 88 loss """crossentropy""" +960 88 regularizer """no""" +960 88 optimizer """adadelta""" +960 88 training_loop """lcwa""" +960 88 evaluator """rankbased""" +960 89 dataset """kinships""" +960 89 model """tucker""" +960 89 loss """crossentropy""" +960 89 regularizer """no""" +960 89 optimizer """adadelta""" +960 89 training_loop """lcwa""" +960 89 evaluator """rankbased""" +960 90 dataset """kinships""" +960 90 model """tucker""" +960 90 loss """crossentropy""" +960 90 regularizer """no""" +960 90 optimizer """adadelta""" +960 90 training_loop """lcwa""" +960 90 evaluator """rankbased""" +960 91 dataset """kinships""" +960 91 model """tucker""" +960 91 loss """crossentropy""" +960 91 regularizer """no""" +960 91 optimizer """adadelta""" +960 91 training_loop """lcwa""" +960 91 evaluator """rankbased""" +960 92 dataset """kinships""" +960 92 model """tucker""" +960 92 loss """crossentropy""" +960 92 regularizer """no""" +960 92 optimizer """adadelta""" +960 92 training_loop """lcwa""" +960 92 evaluator """rankbased""" +960 93 dataset """kinships""" +960 93 model """tucker""" +960 93 loss """crossentropy""" +960 93 regularizer """no""" +960 93 optimizer """adadelta""" +960 93 training_loop """lcwa""" +960 93 evaluator """rankbased""" +960 94 dataset """kinships""" +960 94 model """tucker""" +960 94 loss """crossentropy""" +960 94 regularizer """no""" +960 94 optimizer """adadelta""" +960 94 training_loop """lcwa""" +960 94 evaluator """rankbased""" +960 95 dataset """kinships""" +960 95 model """tucker""" +960 95 loss """crossentropy""" +960 95 regularizer """no""" +960 95 optimizer """adadelta""" +960 95 training_loop """lcwa""" +960 95 evaluator """rankbased""" +960 96 dataset """kinships""" +960 96 model """tucker""" +960 96 loss """crossentropy""" +960 96 regularizer """no""" +960 96 optimizer """adadelta""" +960 96 training_loop """lcwa""" +960 96 evaluator """rankbased""" +960 97 dataset """kinships""" +960 97 model """tucker""" +960 97 loss """crossentropy""" +960 97 regularizer """no""" +960 97 optimizer """adadelta""" +960 97 training_loop """lcwa""" +960 97 evaluator """rankbased""" +960 98 dataset """kinships""" +960 98 model """tucker""" +960 98 loss """crossentropy""" +960 98 regularizer """no""" +960 98 optimizer """adadelta""" +960 98 training_loop """lcwa""" +960 98 evaluator """rankbased""" +960 99 dataset """kinships""" +960 99 model """tucker""" +960 99 loss """crossentropy""" +960 99 regularizer """no""" +960 99 optimizer """adadelta""" +960 99 training_loop """lcwa""" +960 99 evaluator """rankbased""" +960 100 dataset """kinships""" +960 100 model """tucker""" +960 100 loss """crossentropy""" +960 100 regularizer """no""" +960 100 optimizer """adadelta""" +960 100 training_loop """lcwa""" +960 100 evaluator """rankbased""" +961 1 model.embedding_dim 2.0 +961 1 model.relation_dim 2.0 +961 1 model.dropout_0 0.1898909242444914 +961 1 model.dropout_1 0.1272723092667976 +961 1 model.dropout_2 0.2963593604907351 +961 1 negative_sampler.num_negs_per_pos 20.0 +961 1 training.batch_size 0.0 +961 2 model.embedding_dim 2.0 +961 2 model.relation_dim 1.0 +961 2 model.dropout_0 0.3373561768884895 +961 2 model.dropout_1 0.3084870248524467 +961 2 model.dropout_2 0.37295195110776636 +961 2 negative_sampler.num_negs_per_pos 51.0 +961 2 training.batch_size 1.0 +961 1 dataset """kinships""" +961 1 model """tucker""" +961 1 loss """bceaftersigmoid""" +961 1 regularizer """no""" +961 1 optimizer """adadelta""" +961 1 training_loop """owa""" +961 1 negative_sampler """basic""" +961 1 evaluator """rankbased""" +961 2 dataset """kinships""" +961 2 model """tucker""" +961 2 loss """bceaftersigmoid""" +961 2 regularizer """no""" +961 2 optimizer """adadelta""" +961 2 training_loop """owa""" +961 2 negative_sampler """basic""" +961 2 evaluator """rankbased""" +962 1 model.embedding_dim 2.0 +962 1 model.relation_dim 1.0 +962 1 model.dropout_0 0.32774085240057416 +962 1 model.dropout_1 0.21838344736095225 +962 1 model.dropout_2 0.16523395900340881 +962 1 negative_sampler.num_negs_per_pos 10.0 +962 1 training.batch_size 2.0 +962 2 model.embedding_dim 1.0 +962 2 model.relation_dim 1.0 +962 2 model.dropout_0 0.2395711578285583 +962 2 model.dropout_1 0.20137049471327617 +962 2 model.dropout_2 0.1944194247678425 +962 2 negative_sampler.num_negs_per_pos 50.0 +962 2 training.batch_size 1.0 +962 1 dataset """kinships""" +962 1 model """tucker""" +962 1 loss """softplus""" +962 1 regularizer """no""" +962 1 optimizer """adadelta""" +962 1 training_loop """owa""" +962 1 negative_sampler """basic""" +962 1 evaluator """rankbased""" +962 2 dataset """kinships""" +962 2 model """tucker""" +962 2 loss """softplus""" +962 2 regularizer """no""" +962 2 optimizer """adadelta""" +962 2 training_loop """owa""" +962 2 negative_sampler """basic""" +962 2 evaluator """rankbased""" +963 1 model.embedding_dim 2.0 +963 1 model.relation_dim 1.0 +963 1 model.dropout_0 0.10182967545360243 +963 1 model.dropout_1 0.36771802353147176 +963 1 model.dropout_2 0.1248547670088036 +963 1 negative_sampler.num_negs_per_pos 22.0 +963 1 training.batch_size 2.0 +963 2 model.embedding_dim 2.0 +963 2 model.relation_dim 0.0 +963 2 model.dropout_0 0.3402448017236711 +963 2 model.dropout_1 0.3854063922605121 +963 2 model.dropout_2 0.12350717040177198 +963 2 negative_sampler.num_negs_per_pos 6.0 +963 2 training.batch_size 1.0 +963 3 model.embedding_dim 1.0 +963 3 model.relation_dim 0.0 +963 3 model.dropout_0 0.3477714357034892 +963 3 model.dropout_1 0.2633069963296416 +963 3 model.dropout_2 0.12004980178437048 +963 3 negative_sampler.num_negs_per_pos 13.0 +963 3 training.batch_size 1.0 +963 4 model.embedding_dim 1.0 +963 4 model.relation_dim 2.0 +963 4 model.dropout_0 0.36978265130968435 +963 4 model.dropout_1 0.48849290818438107 +963 4 model.dropout_2 0.428678928820589 +963 4 negative_sampler.num_negs_per_pos 15.0 +963 4 training.batch_size 1.0 +963 1 dataset """kinships""" +963 1 model """tucker""" +963 1 loss """bceaftersigmoid""" +963 1 regularizer """no""" +963 1 optimizer """adadelta""" +963 1 training_loop """owa""" +963 1 negative_sampler """basic""" +963 1 evaluator """rankbased""" +963 2 dataset """kinships""" +963 2 model """tucker""" +963 2 loss """bceaftersigmoid""" +963 2 regularizer """no""" +963 2 optimizer """adadelta""" +963 2 training_loop """owa""" +963 2 negative_sampler """basic""" +963 2 evaluator """rankbased""" +963 3 dataset """kinships""" +963 3 model """tucker""" +963 3 loss """bceaftersigmoid""" +963 3 regularizer """no""" +963 3 optimizer """adadelta""" +963 3 training_loop """owa""" +963 3 negative_sampler """basic""" +963 3 evaluator """rankbased""" +963 4 dataset """kinships""" +963 4 model """tucker""" +963 4 loss """bceaftersigmoid""" +963 4 regularizer """no""" +963 4 optimizer """adadelta""" +963 4 training_loop """owa""" +963 4 negative_sampler """basic""" +963 4 evaluator """rankbased""" +964 1 model.embedding_dim 0.0 +964 1 model.relation_dim 2.0 +964 1 model.dropout_0 0.13623401123459478 +964 1 model.dropout_1 0.49628385762042904 +964 1 model.dropout_2 0.4471385030596974 +964 1 loss.margin 7.983907245614943 +964 1 negative_sampler.num_negs_per_pos 15.0 +964 1 training.batch_size 1.0 +964 2 model.embedding_dim 2.0 +964 2 model.relation_dim 2.0 +964 2 model.dropout_0 0.23242399656447416 +964 2 model.dropout_1 0.43291557585563134 +964 2 model.dropout_2 0.41596556219683845 +964 2 loss.margin 8.954592173256897 +964 2 negative_sampler.num_negs_per_pos 25.0 +964 2 training.batch_size 1.0 +964 1 dataset """kinships""" +964 1 model """tucker""" +964 1 loss """marginranking""" +964 1 regularizer """no""" +964 1 optimizer """adadelta""" +964 1 training_loop """owa""" +964 1 negative_sampler """basic""" +964 1 evaluator """rankbased""" +964 2 dataset """kinships""" +964 2 model """tucker""" +964 2 loss """marginranking""" +964 2 regularizer """no""" +964 2 optimizer """adadelta""" +964 2 training_loop """owa""" +964 2 negative_sampler """basic""" +964 2 evaluator """rankbased""" +965 1 model.embedding_dim 1.0 +965 1 model.relation_dim 0.0 +965 1 model.dropout_0 0.1926131637024657 +965 1 model.dropout_1 0.11862792073505934 +965 1 model.dropout_2 0.10141566827490399 +965 1 loss.margin 6.256391455264938 +965 1 negative_sampler.num_negs_per_pos 18.0 +965 1 training.batch_size 1.0 +965 2 model.embedding_dim 0.0 +965 2 model.relation_dim 0.0 +965 2 model.dropout_0 0.4058296077086274 +965 2 model.dropout_1 0.2650446240638762 +965 2 model.dropout_2 0.12357949212284912 +965 2 loss.margin 9.771399515949756 +965 2 negative_sampler.num_negs_per_pos 34.0 +965 2 training.batch_size 1.0 +965 3 model.embedding_dim 2.0 +965 3 model.relation_dim 2.0 +965 3 model.dropout_0 0.4380797328238992 +965 3 model.dropout_1 0.23044830570948058 +965 3 model.dropout_2 0.3002674573168349 +965 3 loss.margin 5.758876620213008 +965 3 negative_sampler.num_negs_per_pos 23.0 +965 3 training.batch_size 2.0 +965 4 model.embedding_dim 0.0 +965 4 model.relation_dim 1.0 +965 4 model.dropout_0 0.4368350848121275 +965 4 model.dropout_1 0.10193170424664469 +965 4 model.dropout_2 0.28803369530834433 +965 4 loss.margin 9.705416990201549 +965 4 negative_sampler.num_negs_per_pos 23.0 +965 4 training.batch_size 1.0 +965 5 model.embedding_dim 0.0 +965 5 model.relation_dim 2.0 +965 5 model.dropout_0 0.43179762700455515 +965 5 model.dropout_1 0.4162426216667273 +965 5 model.dropout_2 0.36716326664976773 +965 5 loss.margin 2.442558996762254 +965 5 negative_sampler.num_negs_per_pos 87.0 +965 5 training.batch_size 0.0 +965 1 dataset """kinships""" +965 1 model """tucker""" +965 1 loss """marginranking""" +965 1 regularizer """no""" +965 1 optimizer """adadelta""" +965 1 training_loop """owa""" +965 1 negative_sampler """basic""" +965 1 evaluator """rankbased""" +965 2 dataset """kinships""" +965 2 model """tucker""" +965 2 loss """marginranking""" +965 2 regularizer """no""" +965 2 optimizer """adadelta""" +965 2 training_loop """owa""" +965 2 negative_sampler """basic""" +965 2 evaluator """rankbased""" +965 3 dataset """kinships""" +965 3 model """tucker""" +965 3 loss """marginranking""" +965 3 regularizer """no""" +965 3 optimizer """adadelta""" +965 3 training_loop """owa""" +965 3 negative_sampler """basic""" +965 3 evaluator """rankbased""" +965 4 dataset """kinships""" +965 4 model """tucker""" +965 4 loss """marginranking""" +965 4 regularizer """no""" +965 4 optimizer """adadelta""" +965 4 training_loop """owa""" +965 4 negative_sampler """basic""" +965 4 evaluator """rankbased""" +965 5 dataset """kinships""" +965 5 model """tucker""" +965 5 loss """marginranking""" +965 5 regularizer """no""" +965 5 optimizer """adadelta""" +965 5 training_loop """owa""" +965 5 negative_sampler """basic""" +965 5 evaluator """rankbased""" +966 1 model.embedding_dim 2.0 +966 1 model.relation_dim 2.0 +966 1 model.dropout_0 0.24019005684482445 +966 1 model.dropout_1 0.48258848523321096 +966 1 model.dropout_2 0.44113747065299946 +966 1 loss.margin 12.369664739304028 +966 1 loss.adversarial_temperature 0.24984504160615958 +966 1 negative_sampler.num_negs_per_pos 73.0 +966 1 training.batch_size 2.0 +966 2 model.embedding_dim 2.0 +966 2 model.relation_dim 2.0 +966 2 model.dropout_0 0.3810726058717494 +966 2 model.dropout_1 0.42400161235581335 +966 2 model.dropout_2 0.4308334868804703 +966 2 loss.margin 9.884994980181943 +966 2 loss.adversarial_temperature 0.4163108693660377 +966 2 negative_sampler.num_negs_per_pos 6.0 +966 2 training.batch_size 2.0 +966 1 dataset """kinships""" +966 1 model """tucker""" +966 1 loss """nssa""" +966 1 regularizer """no""" +966 1 optimizer """adadelta""" +966 1 training_loop """owa""" +966 1 negative_sampler """basic""" +966 1 evaluator """rankbased""" +966 2 dataset """kinships""" +966 2 model """tucker""" +966 2 loss """nssa""" +966 2 regularizer """no""" +966 2 optimizer """adadelta""" +966 2 training_loop """owa""" +966 2 negative_sampler """basic""" +966 2 evaluator """rankbased""" +967 1 model.embedding_dim 0.0 +967 1 model.relation_dim 2.0 +967 1 model.dropout_0 0.16874789681321675 +967 1 model.dropout_1 0.26906482494348116 +967 1 model.dropout_2 0.34400702650039894 +967 1 optimizer.lr 0.020654687032260182 +967 1 negative_sampler.num_negs_per_pos 87.0 +967 1 training.batch_size 0.0 +967 2 model.embedding_dim 1.0 +967 2 model.relation_dim 2.0 +967 2 model.dropout_0 0.41663769168272813 +967 2 model.dropout_1 0.27105145818750137 +967 2 model.dropout_2 0.49857740000234224 +967 2 optimizer.lr 0.00224709033655705 +967 2 negative_sampler.num_negs_per_pos 51.0 +967 2 training.batch_size 0.0 +967 3 model.embedding_dim 1.0 +967 3 model.relation_dim 2.0 +967 3 model.dropout_0 0.4008251407317629 +967 3 model.dropout_1 0.3254983753127496 +967 3 model.dropout_2 0.45192575548603453 +967 3 optimizer.lr 0.004984844598687207 +967 3 negative_sampler.num_negs_per_pos 74.0 +967 3 training.batch_size 1.0 +967 1 dataset """kinships""" +967 1 model """tucker""" +967 1 loss """bceaftersigmoid""" +967 1 regularizer """no""" +967 1 optimizer """adam""" +967 1 training_loop """owa""" +967 1 negative_sampler """basic""" +967 1 evaluator """rankbased""" +967 2 dataset """kinships""" +967 2 model """tucker""" +967 2 loss """bceaftersigmoid""" +967 2 regularizer """no""" +967 2 optimizer """adam""" +967 2 training_loop """owa""" +967 2 negative_sampler """basic""" +967 2 evaluator """rankbased""" +967 3 dataset """kinships""" +967 3 model """tucker""" +967 3 loss """bceaftersigmoid""" +967 3 regularizer """no""" +967 3 optimizer """adam""" +967 3 training_loop """owa""" +967 3 negative_sampler """basic""" +967 3 evaluator """rankbased""" +968 1 model.embedding_dim 0.0 +968 1 model.relation_dim 1.0 +968 1 model.dropout_0 0.349308094407825 +968 1 model.dropout_1 0.3750439913007573 +968 1 model.dropout_2 0.42520383946818024 +968 1 optimizer.lr 0.0015988166954739014 +968 1 negative_sampler.num_negs_per_pos 62.0 +968 1 training.batch_size 2.0 +968 2 model.embedding_dim 1.0 +968 2 model.relation_dim 1.0 +968 2 model.dropout_0 0.4746117172905868 +968 2 model.dropout_1 0.33461747296282507 +968 2 model.dropout_2 0.39448282290796133 +968 2 optimizer.lr 0.0504075917114791 +968 2 negative_sampler.num_negs_per_pos 86.0 +968 2 training.batch_size 0.0 +968 3 model.embedding_dim 1.0 +968 3 model.relation_dim 0.0 +968 3 model.dropout_0 0.2418237301072415 +968 3 model.dropout_1 0.38745642657547136 +968 3 model.dropout_2 0.26918357986959707 +968 3 optimizer.lr 0.08100151824327696 +968 3 negative_sampler.num_negs_per_pos 19.0 +968 3 training.batch_size 2.0 +968 4 model.embedding_dim 1.0 +968 4 model.relation_dim 0.0 +968 4 model.dropout_0 0.36226817282178536 +968 4 model.dropout_1 0.36125624601285466 +968 4 model.dropout_2 0.26474505137713383 +968 4 optimizer.lr 0.0033860622659639002 +968 4 negative_sampler.num_negs_per_pos 71.0 +968 4 training.batch_size 1.0 +968 5 model.embedding_dim 1.0 +968 5 model.relation_dim 2.0 +968 5 model.dropout_0 0.16010736888203536 +968 5 model.dropout_1 0.3883385477275569 +968 5 model.dropout_2 0.28504093670743996 +968 5 optimizer.lr 0.014251475003314595 +968 5 negative_sampler.num_negs_per_pos 46.0 +968 5 training.batch_size 0.0 +968 6 model.embedding_dim 1.0 +968 6 model.relation_dim 2.0 +968 6 model.dropout_0 0.3952607307472682 +968 6 model.dropout_1 0.4204451042904261 +968 6 model.dropout_2 0.24478657569664516 +968 6 optimizer.lr 0.09121351404552475 +968 6 negative_sampler.num_negs_per_pos 4.0 +968 6 training.batch_size 2.0 +968 7 model.embedding_dim 1.0 +968 7 model.relation_dim 2.0 +968 7 model.dropout_0 0.4873373634310227 +968 7 model.dropout_1 0.21081077952158922 +968 7 model.dropout_2 0.49984813284867174 +968 7 optimizer.lr 0.003005158436002895 +968 7 negative_sampler.num_negs_per_pos 50.0 +968 7 training.batch_size 0.0 +968 8 model.embedding_dim 2.0 +968 8 model.relation_dim 2.0 +968 8 model.dropout_0 0.17772093233841832 +968 8 model.dropout_1 0.11333381794806369 +968 8 model.dropout_2 0.37126969987222097 +968 8 optimizer.lr 0.017232314781774626 +968 8 negative_sampler.num_negs_per_pos 10.0 +968 8 training.batch_size 1.0 +968 1 dataset """kinships""" +968 1 model """tucker""" +968 1 loss """softplus""" +968 1 regularizer """no""" +968 1 optimizer """adam""" +968 1 training_loop """owa""" +968 1 negative_sampler """basic""" +968 1 evaluator """rankbased""" +968 2 dataset """kinships""" +968 2 model """tucker""" +968 2 loss """softplus""" +968 2 regularizer """no""" +968 2 optimizer """adam""" +968 2 training_loop """owa""" +968 2 negative_sampler """basic""" +968 2 evaluator """rankbased""" +968 3 dataset """kinships""" +968 3 model """tucker""" +968 3 loss """softplus""" +968 3 regularizer """no""" +968 3 optimizer """adam""" +968 3 training_loop """owa""" +968 3 negative_sampler """basic""" +968 3 evaluator """rankbased""" +968 4 dataset """kinships""" +968 4 model """tucker""" +968 4 loss """softplus""" +968 4 regularizer """no""" +968 4 optimizer """adam""" +968 4 training_loop """owa""" +968 4 negative_sampler """basic""" +968 4 evaluator """rankbased""" +968 5 dataset """kinships""" +968 5 model """tucker""" +968 5 loss """softplus""" +968 5 regularizer """no""" +968 5 optimizer """adam""" +968 5 training_loop """owa""" +968 5 negative_sampler """basic""" +968 5 evaluator """rankbased""" +968 6 dataset """kinships""" +968 6 model """tucker""" +968 6 loss """softplus""" +968 6 regularizer """no""" +968 6 optimizer """adam""" +968 6 training_loop """owa""" +968 6 negative_sampler """basic""" +968 6 evaluator """rankbased""" +968 7 dataset """kinships""" +968 7 model """tucker""" +968 7 loss """softplus""" +968 7 regularizer """no""" +968 7 optimizer """adam""" +968 7 training_loop """owa""" +968 7 negative_sampler """basic""" +968 7 evaluator """rankbased""" +968 8 dataset """kinships""" +968 8 model """tucker""" +968 8 loss """softplus""" +968 8 regularizer """no""" +968 8 optimizer """adam""" +968 8 training_loop """owa""" +968 8 negative_sampler """basic""" +968 8 evaluator """rankbased""" +969 1 model.embedding_dim 0.0 +969 1 model.relation_dim 1.0 +969 1 model.dropout_0 0.2537693848532582 +969 1 model.dropout_1 0.22708354730808408 +969 1 model.dropout_2 0.31053038674851446 +969 1 optimizer.lr 0.0013774005833511677 +969 1 negative_sampler.num_negs_per_pos 23.0 +969 1 training.batch_size 1.0 +969 2 model.embedding_dim 0.0 +969 2 model.relation_dim 0.0 +969 2 model.dropout_0 0.4240684144643923 +969 2 model.dropout_1 0.43975100543437284 +969 2 model.dropout_2 0.49074917622631375 +969 2 optimizer.lr 0.0016280012726460407 +969 2 negative_sampler.num_negs_per_pos 91.0 +969 2 training.batch_size 1.0 +969 3 model.embedding_dim 0.0 +969 3 model.relation_dim 2.0 +969 3 model.dropout_0 0.4913035176155924 +969 3 model.dropout_1 0.18475612925944318 +969 3 model.dropout_2 0.35330185717521895 +969 3 optimizer.lr 0.04369173774106236 +969 3 negative_sampler.num_negs_per_pos 94.0 +969 3 training.batch_size 0.0 +969 4 model.embedding_dim 0.0 +969 4 model.relation_dim 0.0 +969 4 model.dropout_0 0.19276981678352978 +969 4 model.dropout_1 0.363871847802165 +969 4 model.dropout_2 0.11999714117722493 +969 4 optimizer.lr 0.004988213928807194 +969 4 negative_sampler.num_negs_per_pos 85.0 +969 4 training.batch_size 0.0 +969 5 model.embedding_dim 2.0 +969 5 model.relation_dim 0.0 +969 5 model.dropout_0 0.4478641885224499 +969 5 model.dropout_1 0.12604137164884197 +969 5 model.dropout_2 0.473502279383358 +969 5 optimizer.lr 0.07357520337359069 +969 5 negative_sampler.num_negs_per_pos 84.0 +969 5 training.batch_size 1.0 +969 6 model.embedding_dim 0.0 +969 6 model.relation_dim 0.0 +969 6 model.dropout_0 0.29491221587081345 +969 6 model.dropout_1 0.2234207179547136 +969 6 model.dropout_2 0.4976854919746373 +969 6 optimizer.lr 0.022330726998668435 +969 6 negative_sampler.num_negs_per_pos 92.0 +969 6 training.batch_size 0.0 +969 7 model.embedding_dim 0.0 +969 7 model.relation_dim 2.0 +969 7 model.dropout_0 0.32442197570226067 +969 7 model.dropout_1 0.4062401283897872 +969 7 model.dropout_2 0.18248929569747727 +969 7 optimizer.lr 0.016583216611418913 +969 7 negative_sampler.num_negs_per_pos 46.0 +969 7 training.batch_size 0.0 +969 8 model.embedding_dim 0.0 +969 8 model.relation_dim 0.0 +969 8 model.dropout_0 0.4727495033138786 +969 8 model.dropout_1 0.37263387523725355 +969 8 model.dropout_2 0.1763160215746157 +969 8 optimizer.lr 0.07459206689487025 +969 8 negative_sampler.num_negs_per_pos 10.0 +969 8 training.batch_size 2.0 +969 9 model.embedding_dim 2.0 +969 9 model.relation_dim 2.0 +969 9 model.dropout_0 0.4278220148231129 +969 9 model.dropout_1 0.4531334871128278 +969 9 model.dropout_2 0.42559250811469884 +969 9 optimizer.lr 0.08144165697246097 +969 9 negative_sampler.num_negs_per_pos 62.0 +969 9 training.batch_size 0.0 +969 10 model.embedding_dim 1.0 +969 10 model.relation_dim 2.0 +969 10 model.dropout_0 0.274796650845026 +969 10 model.dropout_1 0.41626229834228895 +969 10 model.dropout_2 0.18559211799125724 +969 10 optimizer.lr 0.0809148037721777 +969 10 negative_sampler.num_negs_per_pos 94.0 +969 10 training.batch_size 2.0 +969 11 model.embedding_dim 0.0 +969 11 model.relation_dim 2.0 +969 11 model.dropout_0 0.16072921980406224 +969 11 model.dropout_1 0.3068679484425316 +969 11 model.dropout_2 0.36948211324507874 +969 11 optimizer.lr 0.004935269668765924 +969 11 negative_sampler.num_negs_per_pos 82.0 +969 11 training.batch_size 1.0 +969 1 dataset """kinships""" +969 1 model """tucker""" +969 1 loss """bceaftersigmoid""" +969 1 regularizer """no""" +969 1 optimizer """adam""" +969 1 training_loop """owa""" +969 1 negative_sampler """basic""" +969 1 evaluator """rankbased""" +969 2 dataset """kinships""" +969 2 model """tucker""" +969 2 loss """bceaftersigmoid""" +969 2 regularizer """no""" +969 2 optimizer """adam""" +969 2 training_loop """owa""" +969 2 negative_sampler """basic""" +969 2 evaluator """rankbased""" +969 3 dataset """kinships""" +969 3 model """tucker""" +969 3 loss """bceaftersigmoid""" +969 3 regularizer """no""" +969 3 optimizer """adam""" +969 3 training_loop """owa""" +969 3 negative_sampler """basic""" +969 3 evaluator """rankbased""" +969 4 dataset """kinships""" +969 4 model """tucker""" +969 4 loss """bceaftersigmoid""" +969 4 regularizer """no""" +969 4 optimizer """adam""" +969 4 training_loop """owa""" +969 4 negative_sampler """basic""" +969 4 evaluator """rankbased""" +969 5 dataset """kinships""" +969 5 model """tucker""" +969 5 loss """bceaftersigmoid""" +969 5 regularizer """no""" +969 5 optimizer """adam""" +969 5 training_loop """owa""" +969 5 negative_sampler """basic""" +969 5 evaluator """rankbased""" +969 6 dataset """kinships""" +969 6 model """tucker""" +969 6 loss """bceaftersigmoid""" +969 6 regularizer """no""" +969 6 optimizer """adam""" +969 6 training_loop """owa""" +969 6 negative_sampler """basic""" +969 6 evaluator """rankbased""" +969 7 dataset """kinships""" +969 7 model """tucker""" +969 7 loss """bceaftersigmoid""" +969 7 regularizer """no""" +969 7 optimizer """adam""" +969 7 training_loop """owa""" +969 7 negative_sampler """basic""" +969 7 evaluator """rankbased""" +969 8 dataset """kinships""" +969 8 model """tucker""" +969 8 loss """bceaftersigmoid""" +969 8 regularizer """no""" +969 8 optimizer """adam""" +969 8 training_loop """owa""" +969 8 negative_sampler """basic""" +969 8 evaluator """rankbased""" +969 9 dataset """kinships""" +969 9 model """tucker""" +969 9 loss """bceaftersigmoid""" +969 9 regularizer """no""" +969 9 optimizer """adam""" +969 9 training_loop """owa""" +969 9 negative_sampler """basic""" +969 9 evaluator """rankbased""" +969 10 dataset """kinships""" +969 10 model """tucker""" +969 10 loss """bceaftersigmoid""" +969 10 regularizer """no""" +969 10 optimizer """adam""" +969 10 training_loop """owa""" +969 10 negative_sampler """basic""" +969 10 evaluator """rankbased""" +969 11 dataset """kinships""" +969 11 model """tucker""" +969 11 loss """bceaftersigmoid""" +969 11 regularizer """no""" +969 11 optimizer """adam""" +969 11 training_loop """owa""" +969 11 negative_sampler """basic""" +969 11 evaluator """rankbased""" +970 1 model.embedding_dim 0.0 +970 1 model.relation_dim 2.0 +970 1 model.dropout_0 0.38283434888211026 +970 1 model.dropout_1 0.4607226030489812 +970 1 model.dropout_2 0.4079866428686316 +970 1 optimizer.lr 0.001408173626801425 +970 1 negative_sampler.num_negs_per_pos 88.0 +970 1 training.batch_size 1.0 +970 2 model.embedding_dim 2.0 +970 2 model.relation_dim 1.0 +970 2 model.dropout_0 0.1823641949203998 +970 2 model.dropout_1 0.23241397888672444 +970 2 model.dropout_2 0.44110093571311504 +970 2 optimizer.lr 0.021468421911995703 +970 2 negative_sampler.num_negs_per_pos 22.0 +970 2 training.batch_size 1.0 +970 3 model.embedding_dim 2.0 +970 3 model.relation_dim 2.0 +970 3 model.dropout_0 0.2773351580439439 +970 3 model.dropout_1 0.17572588756874744 +970 3 model.dropout_2 0.19391806706504797 +970 3 optimizer.lr 0.011766676196800926 +970 3 negative_sampler.num_negs_per_pos 46.0 +970 3 training.batch_size 2.0 +970 4 model.embedding_dim 2.0 +970 4 model.relation_dim 1.0 +970 4 model.dropout_0 0.4563413657032851 +970 4 model.dropout_1 0.319247765461504 +970 4 model.dropout_2 0.3969072694133373 +970 4 optimizer.lr 0.0278169319636469 +970 4 negative_sampler.num_negs_per_pos 73.0 +970 4 training.batch_size 1.0 +970 1 dataset """kinships""" +970 1 model """tucker""" +970 1 loss """softplus""" +970 1 regularizer """no""" +970 1 optimizer """adam""" +970 1 training_loop """owa""" +970 1 negative_sampler """basic""" +970 1 evaluator """rankbased""" +970 2 dataset """kinships""" +970 2 model """tucker""" +970 2 loss """softplus""" +970 2 regularizer """no""" +970 2 optimizer """adam""" +970 2 training_loop """owa""" +970 2 negative_sampler """basic""" +970 2 evaluator """rankbased""" +970 3 dataset """kinships""" +970 3 model """tucker""" +970 3 loss """softplus""" +970 3 regularizer """no""" +970 3 optimizer """adam""" +970 3 training_loop """owa""" +970 3 negative_sampler """basic""" +970 3 evaluator """rankbased""" +970 4 dataset """kinships""" +970 4 model """tucker""" +970 4 loss """softplus""" +970 4 regularizer """no""" +970 4 optimizer """adam""" +970 4 training_loop """owa""" +970 4 negative_sampler """basic""" +970 4 evaluator """rankbased""" +971 1 model.embedding_dim 1.0 +971 1 model.relation_dim 0.0 +971 1 model.dropout_0 0.4915501873243426 +971 1 model.dropout_1 0.18244934897389162 +971 1 model.dropout_2 0.24032911920594066 +971 1 optimizer.lr 0.003913191598243745 +971 1 training.batch_size 0.0 +971 1 training.label_smoothing 0.0020148453916806856 +971 2 model.embedding_dim 0.0 +971 2 model.relation_dim 0.0 +971 2 model.dropout_0 0.37521852417396195 +971 2 model.dropout_1 0.2292351624281643 +971 2 model.dropout_2 0.4044150083225513 +971 2 optimizer.lr 0.004448614938324544 +971 2 training.batch_size 1.0 +971 2 training.label_smoothing 0.012197179410941127 +971 3 model.embedding_dim 2.0 +971 3 model.relation_dim 2.0 +971 3 model.dropout_0 0.13180896253042046 +971 3 model.dropout_1 0.27779296088650635 +971 3 model.dropout_2 0.44079668555012586 +971 3 optimizer.lr 0.003506944570374417 +971 3 training.batch_size 2.0 +971 3 training.label_smoothing 0.0018553806883143669 +971 4 model.embedding_dim 2.0 +971 4 model.relation_dim 2.0 +971 4 model.dropout_0 0.3183039706247078 +971 4 model.dropout_1 0.3024511777425758 +971 4 model.dropout_2 0.13909501872361535 +971 4 optimizer.lr 0.0017657297461391949 +971 4 training.batch_size 2.0 +971 4 training.label_smoothing 0.005009670238759436 +971 5 model.embedding_dim 1.0 +971 5 model.relation_dim 1.0 +971 5 model.dropout_0 0.28518145764075603 +971 5 model.dropout_1 0.44971226137193654 +971 5 model.dropout_2 0.3886196057637723 +971 5 optimizer.lr 0.08428657279092636 +971 5 training.batch_size 0.0 +971 5 training.label_smoothing 0.004076237709878734 +971 6 model.embedding_dim 0.0 +971 6 model.relation_dim 2.0 +971 6 model.dropout_0 0.38694572401509747 +971 6 model.dropout_1 0.4606494132189859 +971 6 model.dropout_2 0.4192796711108622 +971 6 optimizer.lr 0.01847847071536605 +971 6 training.batch_size 0.0 +971 6 training.label_smoothing 0.4202061623223889 +971 7 model.embedding_dim 2.0 +971 7 model.relation_dim 2.0 +971 7 model.dropout_0 0.23503320485740442 +971 7 model.dropout_1 0.3674524186820112 +971 7 model.dropout_2 0.16835423583359393 +971 7 optimizer.lr 0.05896863648449446 +971 7 training.batch_size 0.0 +971 7 training.label_smoothing 0.372238509247416 +971 8 model.embedding_dim 1.0 +971 8 model.relation_dim 2.0 +971 8 model.dropout_0 0.22103687974904013 +971 8 model.dropout_1 0.10212996427413637 +971 8 model.dropout_2 0.38252087040676896 +971 8 optimizer.lr 0.023278094139876505 +971 8 training.batch_size 0.0 +971 8 training.label_smoothing 0.03593658460094341 +971 9 model.embedding_dim 2.0 +971 9 model.relation_dim 2.0 +971 9 model.dropout_0 0.41282591886867215 +971 9 model.dropout_1 0.3469596864225404 +971 9 model.dropout_2 0.22480930618932776 +971 9 optimizer.lr 0.007927451903848496 +971 9 training.batch_size 1.0 +971 9 training.label_smoothing 0.2563175638746108 +971 10 model.embedding_dim 0.0 +971 10 model.relation_dim 0.0 +971 10 model.dropout_0 0.36460115568563967 +971 10 model.dropout_1 0.45873496820042686 +971 10 model.dropout_2 0.23105075669382807 +971 10 optimizer.lr 0.012609543359908005 +971 10 training.batch_size 2.0 +971 10 training.label_smoothing 0.747372940840337 +971 11 model.embedding_dim 2.0 +971 11 model.relation_dim 0.0 +971 11 model.dropout_0 0.29438043239887324 +971 11 model.dropout_1 0.48059266112908083 +971 11 model.dropout_2 0.4257149565083579 +971 11 optimizer.lr 0.021947022460299455 +971 11 training.batch_size 2.0 +971 11 training.label_smoothing 0.12330820939865164 +971 12 model.embedding_dim 1.0 +971 12 model.relation_dim 0.0 +971 12 model.dropout_0 0.1259100834995274 +971 12 model.dropout_1 0.21494218245306765 +971 12 model.dropout_2 0.11281265168315056 +971 12 optimizer.lr 0.051898362024050404 +971 12 training.batch_size 1.0 +971 12 training.label_smoothing 0.056622719213877 +971 13 model.embedding_dim 0.0 +971 13 model.relation_dim 1.0 +971 13 model.dropout_0 0.1732385337494006 +971 13 model.dropout_1 0.29808422182534766 +971 13 model.dropout_2 0.21188578774900219 +971 13 optimizer.lr 0.008643666790517661 +971 13 training.batch_size 2.0 +971 13 training.label_smoothing 0.0032025476189876875 +971 14 model.embedding_dim 1.0 +971 14 model.relation_dim 2.0 +971 14 model.dropout_0 0.24255371064603867 +971 14 model.dropout_1 0.19051805482464387 +971 14 model.dropout_2 0.45891574263678603 +971 14 optimizer.lr 0.001147229108896137 +971 14 training.batch_size 2.0 +971 14 training.label_smoothing 0.2250349377436455 +971 15 model.embedding_dim 2.0 +971 15 model.relation_dim 0.0 +971 15 model.dropout_0 0.4741516482227024 +971 15 model.dropout_1 0.2797907264603233 +971 15 model.dropout_2 0.20447343797993428 +971 15 optimizer.lr 0.01095601467140189 +971 15 training.batch_size 0.0 +971 15 training.label_smoothing 0.12908402557451173 +971 16 model.embedding_dim 1.0 +971 16 model.relation_dim 0.0 +971 16 model.dropout_0 0.18311781805128413 +971 16 model.dropout_1 0.12785671958569364 +971 16 model.dropout_2 0.3715733631681367 +971 16 optimizer.lr 0.0811953385374465 +971 16 training.batch_size 2.0 +971 16 training.label_smoothing 0.05175633421497365 +971 17 model.embedding_dim 2.0 +971 17 model.relation_dim 2.0 +971 17 model.dropout_0 0.29607819875289704 +971 17 model.dropout_1 0.16980599381630743 +971 17 model.dropout_2 0.14260073333798343 +971 17 optimizer.lr 0.04620260817703868 +971 17 training.batch_size 2.0 +971 17 training.label_smoothing 0.00435764834178909 +971 18 model.embedding_dim 2.0 +971 18 model.relation_dim 0.0 +971 18 model.dropout_0 0.3961472716153714 +971 18 model.dropout_1 0.329586209568788 +971 18 model.dropout_2 0.4405059813194101 +971 18 optimizer.lr 0.009990829692687601 +971 18 training.batch_size 0.0 +971 18 training.label_smoothing 0.028123465262425377 +971 19 model.embedding_dim 1.0 +971 19 model.relation_dim 0.0 +971 19 model.dropout_0 0.4407259959139444 +971 19 model.dropout_1 0.4514805517819876 +971 19 model.dropout_2 0.1535588750906276 +971 19 optimizer.lr 0.026709890062153224 +971 19 training.batch_size 1.0 +971 19 training.label_smoothing 0.007979626478121575 +971 20 model.embedding_dim 0.0 +971 20 model.relation_dim 1.0 +971 20 model.dropout_0 0.10709609830418065 +971 20 model.dropout_1 0.23252688647685177 +971 20 model.dropout_2 0.2340869781000588 +971 20 optimizer.lr 0.009737618526299584 +971 20 training.batch_size 2.0 +971 20 training.label_smoothing 0.0016001801269354752 +971 21 model.embedding_dim 2.0 +971 21 model.relation_dim 1.0 +971 21 model.dropout_0 0.4665129985825243 +971 21 model.dropout_1 0.15688688079734187 +971 21 model.dropout_2 0.47296988352214075 +971 21 optimizer.lr 0.09591554631825995 +971 21 training.batch_size 1.0 +971 21 training.label_smoothing 0.02275471176887724 +971 22 model.embedding_dim 1.0 +971 22 model.relation_dim 1.0 +971 22 model.dropout_0 0.24293803853789836 +971 22 model.dropout_1 0.3407624484577014 +971 22 model.dropout_2 0.1637245874971912 +971 22 optimizer.lr 0.0011114556139759997 +971 22 training.batch_size 0.0 +971 22 training.label_smoothing 0.0032712620695237236 +971 23 model.embedding_dim 0.0 +971 23 model.relation_dim 2.0 +971 23 model.dropout_0 0.2979325229464857 +971 23 model.dropout_1 0.21256976613339035 +971 23 model.dropout_2 0.23557989632697374 +971 23 optimizer.lr 0.08084640138158017 +971 23 training.batch_size 1.0 +971 23 training.label_smoothing 0.526185586627614 +971 24 model.embedding_dim 0.0 +971 24 model.relation_dim 0.0 +971 24 model.dropout_0 0.4401084547989761 +971 24 model.dropout_1 0.41280614108330194 +971 24 model.dropout_2 0.3553299556207753 +971 24 optimizer.lr 0.0053198314328220505 +971 24 training.batch_size 2.0 +971 24 training.label_smoothing 0.00897809427732229 +971 25 model.embedding_dim 2.0 +971 25 model.relation_dim 1.0 +971 25 model.dropout_0 0.47905493346496675 +971 25 model.dropout_1 0.10375563360224499 +971 25 model.dropout_2 0.2648798231824951 +971 25 optimizer.lr 0.016394773938409133 +971 25 training.batch_size 1.0 +971 25 training.label_smoothing 0.06970313227875241 +971 26 model.embedding_dim 2.0 +971 26 model.relation_dim 2.0 +971 26 model.dropout_0 0.45150355676609816 +971 26 model.dropout_1 0.2133452153457204 +971 26 model.dropout_2 0.19263186920864134 +971 26 optimizer.lr 0.0012917732248348725 +971 26 training.batch_size 2.0 +971 26 training.label_smoothing 0.008367193323947248 +971 27 model.embedding_dim 1.0 +971 27 model.relation_dim 2.0 +971 27 model.dropout_0 0.31888453201402994 +971 27 model.dropout_1 0.15105135904270972 +971 27 model.dropout_2 0.2446253081814581 +971 27 optimizer.lr 0.0018256127199785855 +971 27 training.batch_size 2.0 +971 27 training.label_smoothing 0.2124428449759043 +971 28 model.embedding_dim 0.0 +971 28 model.relation_dim 0.0 +971 28 model.dropout_0 0.4422150741435704 +971 28 model.dropout_1 0.15848252743865276 +971 28 model.dropout_2 0.24058629481579216 +971 28 optimizer.lr 0.0010413844756362643 +971 28 training.batch_size 0.0 +971 28 training.label_smoothing 0.005373958713517433 +971 29 model.embedding_dim 1.0 +971 29 model.relation_dim 1.0 +971 29 model.dropout_0 0.4811812806714179 +971 29 model.dropout_1 0.4575608848190795 +971 29 model.dropout_2 0.21645333317930074 +971 29 optimizer.lr 0.0019479661537749216 +971 29 training.batch_size 1.0 +971 29 training.label_smoothing 0.01833496035429407 +971 30 model.embedding_dim 0.0 +971 30 model.relation_dim 1.0 +971 30 model.dropout_0 0.48112464809054173 +971 30 model.dropout_1 0.16146271949073007 +971 30 model.dropout_2 0.3957293072498678 +971 30 optimizer.lr 0.03586017936611896 +971 30 training.batch_size 0.0 +971 30 training.label_smoothing 0.2726711122525201 +971 31 model.embedding_dim 2.0 +971 31 model.relation_dim 0.0 +971 31 model.dropout_0 0.4856602470522136 +971 31 model.dropout_1 0.23149978663703052 +971 31 model.dropout_2 0.2992246210090643 +971 31 optimizer.lr 0.01503374300111881 +971 31 training.batch_size 0.0 +971 31 training.label_smoothing 0.009519059631351433 +971 32 model.embedding_dim 2.0 +971 32 model.relation_dim 0.0 +971 32 model.dropout_0 0.2806579014765242 +971 32 model.dropout_1 0.2616332645108356 +971 32 model.dropout_2 0.3798205149137545 +971 32 optimizer.lr 0.0029645442865642757 +971 32 training.batch_size 1.0 +971 32 training.label_smoothing 0.005350849355949011 +971 33 model.embedding_dim 2.0 +971 33 model.relation_dim 0.0 +971 33 model.dropout_0 0.48096259110928097 +971 33 model.dropout_1 0.24328365273749875 +971 33 model.dropout_2 0.20052431693154815 +971 33 optimizer.lr 0.001494496774874901 +971 33 training.batch_size 0.0 +971 33 training.label_smoothing 0.02420038523296675 +971 34 model.embedding_dim 1.0 +971 34 model.relation_dim 0.0 +971 34 model.dropout_0 0.46113634079561205 +971 34 model.dropout_1 0.16536055454076398 +971 34 model.dropout_2 0.3015346777864317 +971 34 optimizer.lr 0.0018331839274199025 +971 34 training.batch_size 1.0 +971 34 training.label_smoothing 0.26833539347991286 +971 35 model.embedding_dim 1.0 +971 35 model.relation_dim 2.0 +971 35 model.dropout_0 0.4389376971436541 +971 35 model.dropout_1 0.4641386493519428 +971 35 model.dropout_2 0.3619663282151072 +971 35 optimizer.lr 0.054428404162873735 +971 35 training.batch_size 0.0 +971 35 training.label_smoothing 0.002835601308584617 +971 36 model.embedding_dim 2.0 +971 36 model.relation_dim 0.0 +971 36 model.dropout_0 0.3669303626712985 +971 36 model.dropout_1 0.48645724895181863 +971 36 model.dropout_2 0.3741053601184767 +971 36 optimizer.lr 0.06441480828509487 +971 36 training.batch_size 2.0 +971 36 training.label_smoothing 0.1696877053403767 +971 37 model.embedding_dim 0.0 +971 37 model.relation_dim 2.0 +971 37 model.dropout_0 0.12254732154267818 +971 37 model.dropout_1 0.18731521432970047 +971 37 model.dropout_2 0.3809856706963366 +971 37 optimizer.lr 0.07369790692381074 +971 37 training.batch_size 0.0 +971 37 training.label_smoothing 0.9191468784154702 +971 38 model.embedding_dim 0.0 +971 38 model.relation_dim 2.0 +971 38 model.dropout_0 0.4102010973268888 +971 38 model.dropout_1 0.174582318551969 +971 38 model.dropout_2 0.44776208816974794 +971 38 optimizer.lr 0.0402276162371022 +971 38 training.batch_size 2.0 +971 38 training.label_smoothing 0.006803795885758916 +971 39 model.embedding_dim 2.0 +971 39 model.relation_dim 1.0 +971 39 model.dropout_0 0.3207155925209486 +971 39 model.dropout_1 0.14443617029533748 +971 39 model.dropout_2 0.2712438826948899 +971 39 optimizer.lr 0.0010338866815106254 +971 39 training.batch_size 0.0 +971 39 training.label_smoothing 0.1174109758193224 +971 40 model.embedding_dim 2.0 +971 40 model.relation_dim 1.0 +971 40 model.dropout_0 0.4888542048198532 +971 40 model.dropout_1 0.2548011731335057 +971 40 model.dropout_2 0.2896299790064706 +971 40 optimizer.lr 0.03390526243920588 +971 40 training.batch_size 1.0 +971 40 training.label_smoothing 0.0014283406867128905 +971 41 model.embedding_dim 2.0 +971 41 model.relation_dim 1.0 +971 41 model.dropout_0 0.43993734119382044 +971 41 model.dropout_1 0.10899726179747377 +971 41 model.dropout_2 0.24924357275827014 +971 41 optimizer.lr 0.03458214724815245 +971 41 training.batch_size 2.0 +971 41 training.label_smoothing 0.010424745677321295 +971 42 model.embedding_dim 1.0 +971 42 model.relation_dim 1.0 +971 42 model.dropout_0 0.44406449135202397 +971 42 model.dropout_1 0.34641115917171567 +971 42 model.dropout_2 0.2599944195927533 +971 42 optimizer.lr 0.007241012313702543 +971 42 training.batch_size 0.0 +971 42 training.label_smoothing 0.023871910984555152 +971 43 model.embedding_dim 2.0 +971 43 model.relation_dim 0.0 +971 43 model.dropout_0 0.2383112784747922 +971 43 model.dropout_1 0.37950023989249965 +971 43 model.dropout_2 0.14462107386243886 +971 43 optimizer.lr 0.009441690729318676 +971 43 training.batch_size 1.0 +971 43 training.label_smoothing 0.14497407064423798 +971 44 model.embedding_dim 0.0 +971 44 model.relation_dim 0.0 +971 44 model.dropout_0 0.2667308491281414 +971 44 model.dropout_1 0.37564053565962985 +971 44 model.dropout_2 0.3571915466455972 +971 44 optimizer.lr 0.0011853035085683358 +971 44 training.batch_size 1.0 +971 44 training.label_smoothing 0.0046124015745348575 +971 45 model.embedding_dim 2.0 +971 45 model.relation_dim 2.0 +971 45 model.dropout_0 0.42637846422526554 +971 45 model.dropout_1 0.10242073756274156 +971 45 model.dropout_2 0.11827393847207818 +971 45 optimizer.lr 0.03877768620684948 +971 45 training.batch_size 0.0 +971 45 training.label_smoothing 0.01396531484702198 +971 46 model.embedding_dim 2.0 +971 46 model.relation_dim 2.0 +971 46 model.dropout_0 0.13203314072636793 +971 46 model.dropout_1 0.3413313719036695 +971 46 model.dropout_2 0.15092421156426444 +971 46 optimizer.lr 0.012448775407268079 +971 46 training.batch_size 2.0 +971 46 training.label_smoothing 0.005743749634757002 +971 47 model.embedding_dim 1.0 +971 47 model.relation_dim 0.0 +971 47 model.dropout_0 0.4500284560541732 +971 47 model.dropout_1 0.3278184350200958 +971 47 model.dropout_2 0.45652877136969905 +971 47 optimizer.lr 0.005014754204321021 +971 47 training.batch_size 0.0 +971 47 training.label_smoothing 0.779739193299236 +971 48 model.embedding_dim 2.0 +971 48 model.relation_dim 0.0 +971 48 model.dropout_0 0.14392790764058527 +971 48 model.dropout_1 0.41838320689517833 +971 48 model.dropout_2 0.34934191565957884 +971 48 optimizer.lr 0.04470263759917307 +971 48 training.batch_size 1.0 +971 48 training.label_smoothing 0.014773667795960833 +971 49 model.embedding_dim 2.0 +971 49 model.relation_dim 0.0 +971 49 model.dropout_0 0.4482694237521729 +971 49 model.dropout_1 0.4498492100853937 +971 49 model.dropout_2 0.3909257346137582 +971 49 optimizer.lr 0.08993560734639133 +971 49 training.batch_size 0.0 +971 49 training.label_smoothing 0.010485098013864278 +971 50 model.embedding_dim 0.0 +971 50 model.relation_dim 1.0 +971 50 model.dropout_0 0.30757593304433856 +971 50 model.dropout_1 0.11706694666516984 +971 50 model.dropout_2 0.42794322031481 +971 50 optimizer.lr 0.012108359512887273 +971 50 training.batch_size 0.0 +971 50 training.label_smoothing 0.5677170820103152 +971 51 model.embedding_dim 0.0 +971 51 model.relation_dim 2.0 +971 51 model.dropout_0 0.4184239169199143 +971 51 model.dropout_1 0.3002972855711383 +971 51 model.dropout_2 0.39583753514061004 +971 51 optimizer.lr 0.010193602846482073 +971 51 training.batch_size 2.0 +971 51 training.label_smoothing 0.042439864429058574 +971 52 model.embedding_dim 0.0 +971 52 model.relation_dim 2.0 +971 52 model.dropout_0 0.26349511178264207 +971 52 model.dropout_1 0.13964986874443372 +971 52 model.dropout_2 0.269961242250088 +971 52 optimizer.lr 0.01102977806134235 +971 52 training.batch_size 2.0 +971 52 training.label_smoothing 0.059622650848009655 +971 53 model.embedding_dim 1.0 +971 53 model.relation_dim 0.0 +971 53 model.dropout_0 0.15994549706013672 +971 53 model.dropout_1 0.35332309729174827 +971 53 model.dropout_2 0.1642150189854834 +971 53 optimizer.lr 0.0020523138938230347 +971 53 training.batch_size 2.0 +971 53 training.label_smoothing 0.0012505322237846119 +971 54 model.embedding_dim 2.0 +971 54 model.relation_dim 1.0 +971 54 model.dropout_0 0.2421356902843964 +971 54 model.dropout_1 0.29879710761915595 +971 54 model.dropout_2 0.4879569285986101 +971 54 optimizer.lr 0.012074587266737161 +971 54 training.batch_size 1.0 +971 54 training.label_smoothing 0.043327197490792134 +971 55 model.embedding_dim 1.0 +971 55 model.relation_dim 1.0 +971 55 model.dropout_0 0.20863704116017676 +971 55 model.dropout_1 0.303495145601046 +971 55 model.dropout_2 0.3214120301441194 +971 55 optimizer.lr 0.01422637285862457 +971 55 training.batch_size 2.0 +971 55 training.label_smoothing 0.4614003771732758 +971 56 model.embedding_dim 2.0 +971 56 model.relation_dim 1.0 +971 56 model.dropout_0 0.23718083286583527 +971 56 model.dropout_1 0.3736811612391948 +971 56 model.dropout_2 0.34329356856535664 +971 56 optimizer.lr 0.04086292239041978 +971 56 training.batch_size 2.0 +971 56 training.label_smoothing 0.0010554803563896788 +971 57 model.embedding_dim 2.0 +971 57 model.relation_dim 1.0 +971 57 model.dropout_0 0.2635635615137229 +971 57 model.dropout_1 0.450119264213313 +971 57 model.dropout_2 0.1694973026088189 +971 57 optimizer.lr 0.012726204276719345 +971 57 training.batch_size 2.0 +971 57 training.label_smoothing 0.003322689114142353 +971 58 model.embedding_dim 1.0 +971 58 model.relation_dim 1.0 +971 58 model.dropout_0 0.44565996023943405 +971 58 model.dropout_1 0.33143488669626175 +971 58 model.dropout_2 0.24926867254596213 +971 58 optimizer.lr 0.004005435650480766 +971 58 training.batch_size 2.0 +971 58 training.label_smoothing 0.014969987035760572 +971 59 model.embedding_dim 0.0 +971 59 model.relation_dim 1.0 +971 59 model.dropout_0 0.40276792673156186 +971 59 model.dropout_1 0.49326965997631844 +971 59 model.dropout_2 0.4970649407203341 +971 59 optimizer.lr 0.0032658233195093462 +971 59 training.batch_size 1.0 +971 59 training.label_smoothing 0.17259642924081356 +971 60 model.embedding_dim 1.0 +971 60 model.relation_dim 2.0 +971 60 model.dropout_0 0.25349880004340986 +971 60 model.dropout_1 0.14237968615352625 +971 60 model.dropout_2 0.10209186495666606 +971 60 optimizer.lr 0.007770038689022693 +971 60 training.batch_size 2.0 +971 60 training.label_smoothing 0.001506292300642169 +971 61 model.embedding_dim 0.0 +971 61 model.relation_dim 2.0 +971 61 model.dropout_0 0.14143899135592028 +971 61 model.dropout_1 0.23726199864770559 +971 61 model.dropout_2 0.12274840453376906 +971 61 optimizer.lr 0.004940348299543673 +971 61 training.batch_size 1.0 +971 61 training.label_smoothing 0.06450537795612585 +971 62 model.embedding_dim 1.0 +971 62 model.relation_dim 1.0 +971 62 model.dropout_0 0.43109241126197606 +971 62 model.dropout_1 0.16538044584995137 +971 62 model.dropout_2 0.38976061016231944 +971 62 optimizer.lr 0.007288424738976067 +971 62 training.batch_size 0.0 +971 62 training.label_smoothing 0.05909998124222517 +971 63 model.embedding_dim 2.0 +971 63 model.relation_dim 0.0 +971 63 model.dropout_0 0.2636800577909196 +971 63 model.dropout_1 0.19018904108914342 +971 63 model.dropout_2 0.13274230285403746 +971 63 optimizer.lr 0.007450269228583352 +971 63 training.batch_size 2.0 +971 63 training.label_smoothing 0.003783548988502654 +971 64 model.embedding_dim 1.0 +971 64 model.relation_dim 1.0 +971 64 model.dropout_0 0.4613057385090277 +971 64 model.dropout_1 0.382381169835798 +971 64 model.dropout_2 0.4847755263627895 +971 64 optimizer.lr 0.001108507917872265 +971 64 training.batch_size 2.0 +971 64 training.label_smoothing 0.009540844858791257 +971 65 model.embedding_dim 2.0 +971 65 model.relation_dim 0.0 +971 65 model.dropout_0 0.10570945255684211 +971 65 model.dropout_1 0.3328767966370452 +971 65 model.dropout_2 0.3920463694822462 +971 65 optimizer.lr 0.0013920696621154921 +971 65 training.batch_size 2.0 +971 65 training.label_smoothing 0.26590250455156667 +971 66 model.embedding_dim 1.0 +971 66 model.relation_dim 2.0 +971 66 model.dropout_0 0.23295666614287783 +971 66 model.dropout_1 0.32390838444778147 +971 66 model.dropout_2 0.492623308903136 +971 66 optimizer.lr 0.020860611796973797 +971 66 training.batch_size 2.0 +971 66 training.label_smoothing 0.37912641620986803 +971 67 model.embedding_dim 0.0 +971 67 model.relation_dim 0.0 +971 67 model.dropout_0 0.33721236421011225 +971 67 model.dropout_1 0.1894390911009023 +971 67 model.dropout_2 0.41413244039607666 +971 67 optimizer.lr 0.007134319566651412 +971 67 training.batch_size 2.0 +971 67 training.label_smoothing 0.6926212481687702 +971 68 model.embedding_dim 1.0 +971 68 model.relation_dim 2.0 +971 68 model.dropout_0 0.4840103609060716 +971 68 model.dropout_1 0.2398149129608049 +971 68 model.dropout_2 0.16320986098837903 +971 68 optimizer.lr 0.01698980578726469 +971 68 training.batch_size 0.0 +971 68 training.label_smoothing 0.20469255013298282 +971 69 model.embedding_dim 2.0 +971 69 model.relation_dim 2.0 +971 69 model.dropout_0 0.3409336913040003 +971 69 model.dropout_1 0.49635886951056485 +971 69 model.dropout_2 0.19774747845414062 +971 69 optimizer.lr 0.004614784900433789 +971 69 training.batch_size 0.0 +971 69 training.label_smoothing 0.007071759690693141 +971 70 model.embedding_dim 0.0 +971 70 model.relation_dim 0.0 +971 70 model.dropout_0 0.28116660437474417 +971 70 model.dropout_1 0.4963077061479535 +971 70 model.dropout_2 0.3086208562252665 +971 70 optimizer.lr 0.0016408875625312724 +971 70 training.batch_size 0.0 +971 70 training.label_smoothing 0.009568472067486672 +971 71 model.embedding_dim 2.0 +971 71 model.relation_dim 2.0 +971 71 model.dropout_0 0.20135358811245735 +971 71 model.dropout_1 0.11357622232573666 +971 71 model.dropout_2 0.439934078151913 +971 71 optimizer.lr 0.00973738754476986 +971 71 training.batch_size 0.0 +971 71 training.label_smoothing 0.10341825376430946 +971 72 model.embedding_dim 0.0 +971 72 model.relation_dim 2.0 +971 72 model.dropout_0 0.4046608889074234 +971 72 model.dropout_1 0.41070364356979766 +971 72 model.dropout_2 0.3206882268773062 +971 72 optimizer.lr 0.009822794257030617 +971 72 training.batch_size 1.0 +971 72 training.label_smoothing 0.36657551721520093 +971 73 model.embedding_dim 1.0 +971 73 model.relation_dim 2.0 +971 73 model.dropout_0 0.11933737336163329 +971 73 model.dropout_1 0.46433786860998083 +971 73 model.dropout_2 0.11316655665981044 +971 73 optimizer.lr 0.0014737710668779461 +971 73 training.batch_size 0.0 +971 73 training.label_smoothing 0.062211949551279545 +971 74 model.embedding_dim 1.0 +971 74 model.relation_dim 1.0 +971 74 model.dropout_0 0.3813956671297891 +971 74 model.dropout_1 0.1966200291138463 +971 74 model.dropout_2 0.27310137397963963 +971 74 optimizer.lr 0.0010451312459657934 +971 74 training.batch_size 0.0 +971 74 training.label_smoothing 0.6990388026475841 +971 75 model.embedding_dim 2.0 +971 75 model.relation_dim 2.0 +971 75 model.dropout_0 0.43742043099140804 +971 75 model.dropout_1 0.3992423560038548 +971 75 model.dropout_2 0.3919245501507212 +971 75 optimizer.lr 0.09567415731809471 +971 75 training.batch_size 1.0 +971 75 training.label_smoothing 0.0010239868558764677 +971 76 model.embedding_dim 2.0 +971 76 model.relation_dim 0.0 +971 76 model.dropout_0 0.17728696868378824 +971 76 model.dropout_1 0.16839426614728537 +971 76 model.dropout_2 0.1617542329667749 +971 76 optimizer.lr 0.004449771666000126 +971 76 training.batch_size 0.0 +971 76 training.label_smoothing 0.9691667299232699 +971 77 model.embedding_dim 1.0 +971 77 model.relation_dim 1.0 +971 77 model.dropout_0 0.48532788576738195 +971 77 model.dropout_1 0.37759528707877477 +971 77 model.dropout_2 0.20360627247072977 +971 77 optimizer.lr 0.029790249026135536 +971 77 training.batch_size 0.0 +971 77 training.label_smoothing 0.22198337781261424 +971 78 model.embedding_dim 0.0 +971 78 model.relation_dim 2.0 +971 78 model.dropout_0 0.3512978297088013 +971 78 model.dropout_1 0.19989928249059996 +971 78 model.dropout_2 0.40308333951494113 +971 78 optimizer.lr 0.006690143606099592 +971 78 training.batch_size 1.0 +971 78 training.label_smoothing 0.2994583192489357 +971 79 model.embedding_dim 1.0 +971 79 model.relation_dim 2.0 +971 79 model.dropout_0 0.35552931907855717 +971 79 model.dropout_1 0.31302163426043184 +971 79 model.dropout_2 0.49132154144046447 +971 79 optimizer.lr 0.019435120249627475 +971 79 training.batch_size 1.0 +971 79 training.label_smoothing 0.018554249950288322 +971 80 model.embedding_dim 1.0 +971 80 model.relation_dim 1.0 +971 80 model.dropout_0 0.3448138842272718 +971 80 model.dropout_1 0.3338885461807956 +971 80 model.dropout_2 0.44418052129575614 +971 80 optimizer.lr 0.0020213797774464182 +971 80 training.batch_size 1.0 +971 80 training.label_smoothing 0.007619085020452419 +971 81 model.embedding_dim 0.0 +971 81 model.relation_dim 2.0 +971 81 model.dropout_0 0.3940063802554329 +971 81 model.dropout_1 0.21367335798621154 +971 81 model.dropout_2 0.45832697656789223 +971 81 optimizer.lr 0.0023627355603790647 +971 81 training.batch_size 1.0 +971 81 training.label_smoothing 0.008301786127412187 +971 82 model.embedding_dim 0.0 +971 82 model.relation_dim 1.0 +971 82 model.dropout_0 0.18185545809848774 +971 82 model.dropout_1 0.11848777243137501 +971 82 model.dropout_2 0.3211102724991681 +971 82 optimizer.lr 0.00316876438554267 +971 82 training.batch_size 0.0 +971 82 training.label_smoothing 0.44652992856511525 +971 83 model.embedding_dim 1.0 +971 83 model.relation_dim 1.0 +971 83 model.dropout_0 0.44112791439810944 +971 83 model.dropout_1 0.12096446768198305 +971 83 model.dropout_2 0.41241355397036233 +971 83 optimizer.lr 0.0015197068502568539 +971 83 training.batch_size 0.0 +971 83 training.label_smoothing 0.05292184641666608 +971 84 model.embedding_dim 1.0 +971 84 model.relation_dim 2.0 +971 84 model.dropout_0 0.13342296660619685 +971 84 model.dropout_1 0.24787668462668813 +971 84 model.dropout_2 0.10977091805977271 +971 84 optimizer.lr 0.003299600809596609 +971 84 training.batch_size 2.0 +971 84 training.label_smoothing 0.003872967482676558 +971 85 model.embedding_dim 1.0 +971 85 model.relation_dim 0.0 +971 85 model.dropout_0 0.29872858897701954 +971 85 model.dropout_1 0.17018104567829162 +971 85 model.dropout_2 0.25449294240749326 +971 85 optimizer.lr 0.01283458225046811 +971 85 training.batch_size 2.0 +971 85 training.label_smoothing 0.0015002066986722381 +971 86 model.embedding_dim 1.0 +971 86 model.relation_dim 0.0 +971 86 model.dropout_0 0.35998236470234235 +971 86 model.dropout_1 0.10170936041429793 +971 86 model.dropout_2 0.15708301146232825 +971 86 optimizer.lr 0.05830268831657554 +971 86 training.batch_size 1.0 +971 86 training.label_smoothing 0.01049045201381172 +971 87 model.embedding_dim 0.0 +971 87 model.relation_dim 1.0 +971 87 model.dropout_0 0.1838251467014667 +971 87 model.dropout_1 0.36574649467832443 +971 87 model.dropout_2 0.4268338600481922 +971 87 optimizer.lr 0.042205360019977364 +971 87 training.batch_size 1.0 +971 87 training.label_smoothing 0.03357860909096045 +971 88 model.embedding_dim 2.0 +971 88 model.relation_dim 2.0 +971 88 model.dropout_0 0.370106591275922 +971 88 model.dropout_1 0.3716958441444146 +971 88 model.dropout_2 0.3847547199832433 +971 88 optimizer.lr 0.06235138452935908 +971 88 training.batch_size 2.0 +971 88 training.label_smoothing 0.7208997689032158 +971 89 model.embedding_dim 1.0 +971 89 model.relation_dim 0.0 +971 89 model.dropout_0 0.4011866226633366 +971 89 model.dropout_1 0.4468463250455007 +971 89 model.dropout_2 0.2867875120797558 +971 89 optimizer.lr 0.028144497596136173 +971 89 training.batch_size 0.0 +971 89 training.label_smoothing 0.004212745265137789 +971 90 model.embedding_dim 2.0 +971 90 model.relation_dim 0.0 +971 90 model.dropout_0 0.39956018032925295 +971 90 model.dropout_1 0.15088804675314857 +971 90 model.dropout_2 0.2859703646501149 +971 90 optimizer.lr 0.08767432577563285 +971 90 training.batch_size 1.0 +971 90 training.label_smoothing 0.11363205877669257 +971 91 model.embedding_dim 2.0 +971 91 model.relation_dim 2.0 +971 91 model.dropout_0 0.14140602548143974 +971 91 model.dropout_1 0.41992141981896514 +971 91 model.dropout_2 0.2048867897060257 +971 91 optimizer.lr 0.0042605275380879805 +971 91 training.batch_size 1.0 +971 91 training.label_smoothing 0.18732387672434644 +971 92 model.embedding_dim 2.0 +971 92 model.relation_dim 1.0 +971 92 model.dropout_0 0.3735910345857511 +971 92 model.dropout_1 0.47286562669611915 +971 92 model.dropout_2 0.36957604646440373 +971 92 optimizer.lr 0.09649031255862936 +971 92 training.batch_size 1.0 +971 92 training.label_smoothing 0.008763831515116317 +971 93 model.embedding_dim 0.0 +971 93 model.relation_dim 0.0 +971 93 model.dropout_0 0.20921048387567578 +971 93 model.dropout_1 0.3950422920223399 +971 93 model.dropout_2 0.12651612036688165 +971 93 optimizer.lr 0.08988326916521212 +971 93 training.batch_size 1.0 +971 93 training.label_smoothing 0.9347399930263236 +971 94 model.embedding_dim 2.0 +971 94 model.relation_dim 0.0 +971 94 model.dropout_0 0.29466752830172166 +971 94 model.dropout_1 0.1492459896224018 +971 94 model.dropout_2 0.1397525047239134 +971 94 optimizer.lr 0.06154009454286404 +971 94 training.batch_size 1.0 +971 94 training.label_smoothing 0.047720047801812986 +971 95 model.embedding_dim 2.0 +971 95 model.relation_dim 1.0 +971 95 model.dropout_0 0.19949559956522733 +971 95 model.dropout_1 0.47901585447483436 +971 95 model.dropout_2 0.39419401346772576 +971 95 optimizer.lr 0.05579192389838365 +971 95 training.batch_size 0.0 +971 95 training.label_smoothing 0.4367931491053231 +971 96 model.embedding_dim 0.0 +971 96 model.relation_dim 2.0 +971 96 model.dropout_0 0.27633162378727844 +971 96 model.dropout_1 0.2662327466322185 +971 96 model.dropout_2 0.22253280650314325 +971 96 optimizer.lr 0.04244833658114738 +971 96 training.batch_size 2.0 +971 96 training.label_smoothing 0.048138227583702724 +971 97 model.embedding_dim 1.0 +971 97 model.relation_dim 0.0 +971 97 model.dropout_0 0.41269914728324864 +971 97 model.dropout_1 0.17184632136225783 +971 97 model.dropout_2 0.4825535001577158 +971 97 optimizer.lr 0.0018298573797915934 +971 97 training.batch_size 1.0 +971 97 training.label_smoothing 0.23229784860017122 +971 98 model.embedding_dim 1.0 +971 98 model.relation_dim 2.0 +971 98 model.dropout_0 0.11041109127047291 +971 98 model.dropout_1 0.10116135911932007 +971 98 model.dropout_2 0.28258464538601025 +971 98 optimizer.lr 0.04343030149706024 +971 98 training.batch_size 1.0 +971 98 training.label_smoothing 0.0010103035627049954 +971 99 model.embedding_dim 0.0 +971 99 model.relation_dim 0.0 +971 99 model.dropout_0 0.28051209761881246 +971 99 model.dropout_1 0.2279840478421357 +971 99 model.dropout_2 0.19485778878033244 +971 99 optimizer.lr 0.009642068516373281 +971 99 training.batch_size 1.0 +971 99 training.label_smoothing 0.02199611381614698 +971 100 model.embedding_dim 1.0 +971 100 model.relation_dim 2.0 +971 100 model.dropout_0 0.3282628512584446 +971 100 model.dropout_1 0.27990502643713444 +971 100 model.dropout_2 0.38214161145179587 +971 100 optimizer.lr 0.03316200353259251 +971 100 training.batch_size 2.0 +971 100 training.label_smoothing 0.7235487410603609 +971 1 dataset """kinships""" +971 1 model """tucker""" +971 1 loss """crossentropy""" +971 1 regularizer """no""" +971 1 optimizer """adam""" +971 1 training_loop """lcwa""" +971 1 evaluator """rankbased""" +971 2 dataset """kinships""" +971 2 model """tucker""" +971 2 loss """crossentropy""" +971 2 regularizer """no""" +971 2 optimizer """adam""" +971 2 training_loop """lcwa""" +971 2 evaluator """rankbased""" +971 3 dataset """kinships""" +971 3 model """tucker""" +971 3 loss """crossentropy""" +971 3 regularizer """no""" +971 3 optimizer """adam""" +971 3 training_loop """lcwa""" +971 3 evaluator """rankbased""" +971 4 dataset """kinships""" +971 4 model """tucker""" +971 4 loss """crossentropy""" +971 4 regularizer """no""" +971 4 optimizer """adam""" +971 4 training_loop """lcwa""" +971 4 evaluator """rankbased""" +971 5 dataset """kinships""" +971 5 model """tucker""" +971 5 loss """crossentropy""" +971 5 regularizer """no""" +971 5 optimizer """adam""" +971 5 training_loop """lcwa""" +971 5 evaluator """rankbased""" +971 6 dataset """kinships""" +971 6 model """tucker""" +971 6 loss """crossentropy""" +971 6 regularizer """no""" +971 6 optimizer """adam""" +971 6 training_loop """lcwa""" +971 6 evaluator """rankbased""" +971 7 dataset """kinships""" +971 7 model """tucker""" +971 7 loss """crossentropy""" +971 7 regularizer """no""" +971 7 optimizer """adam""" +971 7 training_loop """lcwa""" +971 7 evaluator """rankbased""" +971 8 dataset """kinships""" +971 8 model """tucker""" +971 8 loss """crossentropy""" +971 8 regularizer """no""" +971 8 optimizer """adam""" +971 8 training_loop """lcwa""" +971 8 evaluator """rankbased""" +971 9 dataset """kinships""" +971 9 model """tucker""" +971 9 loss """crossentropy""" +971 9 regularizer """no""" +971 9 optimizer """adam""" +971 9 training_loop """lcwa""" +971 9 evaluator """rankbased""" +971 10 dataset """kinships""" +971 10 model """tucker""" +971 10 loss """crossentropy""" +971 10 regularizer """no""" +971 10 optimizer """adam""" +971 10 training_loop """lcwa""" +971 10 evaluator """rankbased""" +971 11 dataset """kinships""" +971 11 model """tucker""" +971 11 loss """crossentropy""" +971 11 regularizer """no""" +971 11 optimizer """adam""" +971 11 training_loop """lcwa""" +971 11 evaluator """rankbased""" +971 12 dataset """kinships""" +971 12 model """tucker""" +971 12 loss """crossentropy""" +971 12 regularizer """no""" +971 12 optimizer """adam""" +971 12 training_loop """lcwa""" +971 12 evaluator """rankbased""" +971 13 dataset """kinships""" +971 13 model """tucker""" +971 13 loss """crossentropy""" +971 13 regularizer """no""" +971 13 optimizer """adam""" +971 13 training_loop """lcwa""" +971 13 evaluator """rankbased""" +971 14 dataset """kinships""" +971 14 model """tucker""" +971 14 loss """crossentropy""" +971 14 regularizer """no""" +971 14 optimizer """adam""" +971 14 training_loop """lcwa""" +971 14 evaluator """rankbased""" +971 15 dataset """kinships""" +971 15 model """tucker""" +971 15 loss """crossentropy""" +971 15 regularizer """no""" +971 15 optimizer """adam""" +971 15 training_loop """lcwa""" +971 15 evaluator """rankbased""" +971 16 dataset """kinships""" +971 16 model """tucker""" +971 16 loss """crossentropy""" +971 16 regularizer """no""" +971 16 optimizer """adam""" +971 16 training_loop """lcwa""" +971 16 evaluator """rankbased""" +971 17 dataset """kinships""" +971 17 model """tucker""" +971 17 loss """crossentropy""" +971 17 regularizer """no""" +971 17 optimizer """adam""" +971 17 training_loop """lcwa""" +971 17 evaluator """rankbased""" +971 18 dataset """kinships""" +971 18 model """tucker""" +971 18 loss """crossentropy""" +971 18 regularizer """no""" +971 18 optimizer """adam""" +971 18 training_loop """lcwa""" +971 18 evaluator """rankbased""" +971 19 dataset """kinships""" +971 19 model """tucker""" +971 19 loss """crossentropy""" +971 19 regularizer """no""" +971 19 optimizer """adam""" +971 19 training_loop """lcwa""" +971 19 evaluator """rankbased""" +971 20 dataset """kinships""" +971 20 model """tucker""" +971 20 loss """crossentropy""" +971 20 regularizer """no""" +971 20 optimizer """adam""" +971 20 training_loop """lcwa""" +971 20 evaluator """rankbased""" +971 21 dataset """kinships""" +971 21 model """tucker""" +971 21 loss """crossentropy""" +971 21 regularizer """no""" +971 21 optimizer """adam""" +971 21 training_loop """lcwa""" +971 21 evaluator """rankbased""" +971 22 dataset """kinships""" +971 22 model """tucker""" +971 22 loss """crossentropy""" +971 22 regularizer """no""" +971 22 optimizer """adam""" +971 22 training_loop """lcwa""" +971 22 evaluator """rankbased""" +971 23 dataset """kinships""" +971 23 model """tucker""" +971 23 loss """crossentropy""" +971 23 regularizer """no""" +971 23 optimizer """adam""" +971 23 training_loop """lcwa""" +971 23 evaluator """rankbased""" +971 24 dataset """kinships""" +971 24 model """tucker""" +971 24 loss """crossentropy""" +971 24 regularizer """no""" +971 24 optimizer """adam""" +971 24 training_loop """lcwa""" +971 24 evaluator """rankbased""" +971 25 dataset """kinships""" +971 25 model """tucker""" +971 25 loss """crossentropy""" +971 25 regularizer """no""" +971 25 optimizer """adam""" +971 25 training_loop """lcwa""" +971 25 evaluator """rankbased""" +971 26 dataset """kinships""" +971 26 model """tucker""" +971 26 loss """crossentropy""" +971 26 regularizer """no""" +971 26 optimizer """adam""" +971 26 training_loop """lcwa""" +971 26 evaluator """rankbased""" +971 27 dataset """kinships""" +971 27 model """tucker""" +971 27 loss """crossentropy""" +971 27 regularizer """no""" +971 27 optimizer """adam""" +971 27 training_loop """lcwa""" +971 27 evaluator """rankbased""" +971 28 dataset """kinships""" +971 28 model """tucker""" +971 28 loss """crossentropy""" +971 28 regularizer """no""" +971 28 optimizer """adam""" +971 28 training_loop """lcwa""" +971 28 evaluator """rankbased""" +971 29 dataset """kinships""" +971 29 model """tucker""" +971 29 loss """crossentropy""" +971 29 regularizer """no""" +971 29 optimizer """adam""" +971 29 training_loop """lcwa""" +971 29 evaluator """rankbased""" +971 30 dataset """kinships""" +971 30 model """tucker""" +971 30 loss """crossentropy""" +971 30 regularizer """no""" +971 30 optimizer """adam""" +971 30 training_loop """lcwa""" +971 30 evaluator """rankbased""" +971 31 dataset """kinships""" +971 31 model """tucker""" +971 31 loss """crossentropy""" +971 31 regularizer """no""" +971 31 optimizer """adam""" +971 31 training_loop """lcwa""" +971 31 evaluator """rankbased""" +971 32 dataset """kinships""" +971 32 model """tucker""" +971 32 loss """crossentropy""" +971 32 regularizer """no""" +971 32 optimizer """adam""" +971 32 training_loop """lcwa""" +971 32 evaluator """rankbased""" +971 33 dataset """kinships""" +971 33 model """tucker""" +971 33 loss """crossentropy""" +971 33 regularizer """no""" +971 33 optimizer """adam""" +971 33 training_loop """lcwa""" +971 33 evaluator """rankbased""" +971 34 dataset """kinships""" +971 34 model """tucker""" +971 34 loss """crossentropy""" +971 34 regularizer """no""" +971 34 optimizer """adam""" +971 34 training_loop """lcwa""" +971 34 evaluator """rankbased""" +971 35 dataset """kinships""" +971 35 model """tucker""" +971 35 loss """crossentropy""" +971 35 regularizer """no""" +971 35 optimizer """adam""" +971 35 training_loop """lcwa""" +971 35 evaluator """rankbased""" +971 36 dataset """kinships""" +971 36 model """tucker""" +971 36 loss """crossentropy""" +971 36 regularizer """no""" +971 36 optimizer """adam""" +971 36 training_loop """lcwa""" +971 36 evaluator """rankbased""" +971 37 dataset """kinships""" +971 37 model """tucker""" +971 37 loss """crossentropy""" +971 37 regularizer """no""" +971 37 optimizer """adam""" +971 37 training_loop """lcwa""" +971 37 evaluator """rankbased""" +971 38 dataset """kinships""" +971 38 model """tucker""" +971 38 loss """crossentropy""" +971 38 regularizer """no""" +971 38 optimizer """adam""" +971 38 training_loop """lcwa""" +971 38 evaluator """rankbased""" +971 39 dataset """kinships""" +971 39 model """tucker""" +971 39 loss """crossentropy""" +971 39 regularizer """no""" +971 39 optimizer """adam""" +971 39 training_loop """lcwa""" +971 39 evaluator """rankbased""" +971 40 dataset """kinships""" +971 40 model """tucker""" +971 40 loss """crossentropy""" +971 40 regularizer """no""" +971 40 optimizer """adam""" +971 40 training_loop """lcwa""" +971 40 evaluator """rankbased""" +971 41 dataset """kinships""" +971 41 model """tucker""" +971 41 loss """crossentropy""" +971 41 regularizer """no""" +971 41 optimizer """adam""" +971 41 training_loop """lcwa""" +971 41 evaluator """rankbased""" +971 42 dataset """kinships""" +971 42 model """tucker""" +971 42 loss """crossentropy""" +971 42 regularizer """no""" +971 42 optimizer """adam""" +971 42 training_loop """lcwa""" +971 42 evaluator """rankbased""" +971 43 dataset """kinships""" +971 43 model """tucker""" +971 43 loss """crossentropy""" +971 43 regularizer """no""" +971 43 optimizer """adam""" +971 43 training_loop """lcwa""" +971 43 evaluator """rankbased""" +971 44 dataset """kinships""" +971 44 model """tucker""" +971 44 loss """crossentropy""" +971 44 regularizer """no""" +971 44 optimizer """adam""" +971 44 training_loop """lcwa""" +971 44 evaluator """rankbased""" +971 45 dataset """kinships""" +971 45 model """tucker""" +971 45 loss """crossentropy""" +971 45 regularizer """no""" +971 45 optimizer """adam""" +971 45 training_loop """lcwa""" +971 45 evaluator """rankbased""" +971 46 dataset """kinships""" +971 46 model """tucker""" +971 46 loss """crossentropy""" +971 46 regularizer """no""" +971 46 optimizer """adam""" +971 46 training_loop """lcwa""" +971 46 evaluator """rankbased""" +971 47 dataset """kinships""" +971 47 model """tucker""" +971 47 loss """crossentropy""" +971 47 regularizer """no""" +971 47 optimizer """adam""" +971 47 training_loop """lcwa""" +971 47 evaluator """rankbased""" +971 48 dataset """kinships""" +971 48 model """tucker""" +971 48 loss """crossentropy""" +971 48 regularizer """no""" +971 48 optimizer """adam""" +971 48 training_loop """lcwa""" +971 48 evaluator """rankbased""" +971 49 dataset """kinships""" +971 49 model """tucker""" +971 49 loss """crossentropy""" +971 49 regularizer """no""" +971 49 optimizer """adam""" +971 49 training_loop """lcwa""" +971 49 evaluator """rankbased""" +971 50 dataset """kinships""" +971 50 model """tucker""" +971 50 loss """crossentropy""" +971 50 regularizer """no""" +971 50 optimizer """adam""" +971 50 training_loop """lcwa""" +971 50 evaluator """rankbased""" +971 51 dataset """kinships""" +971 51 model """tucker""" +971 51 loss """crossentropy""" +971 51 regularizer """no""" +971 51 optimizer """adam""" +971 51 training_loop """lcwa""" +971 51 evaluator """rankbased""" +971 52 dataset """kinships""" +971 52 model """tucker""" +971 52 loss """crossentropy""" +971 52 regularizer """no""" +971 52 optimizer """adam""" +971 52 training_loop """lcwa""" +971 52 evaluator """rankbased""" +971 53 dataset """kinships""" +971 53 model """tucker""" +971 53 loss """crossentropy""" +971 53 regularizer """no""" +971 53 optimizer """adam""" +971 53 training_loop """lcwa""" +971 53 evaluator """rankbased""" +971 54 dataset """kinships""" +971 54 model """tucker""" +971 54 loss """crossentropy""" +971 54 regularizer """no""" +971 54 optimizer """adam""" +971 54 training_loop """lcwa""" +971 54 evaluator """rankbased""" +971 55 dataset """kinships""" +971 55 model """tucker""" +971 55 loss """crossentropy""" +971 55 regularizer """no""" +971 55 optimizer """adam""" +971 55 training_loop """lcwa""" +971 55 evaluator """rankbased""" +971 56 dataset """kinships""" +971 56 model """tucker""" +971 56 loss """crossentropy""" +971 56 regularizer """no""" +971 56 optimizer """adam""" +971 56 training_loop """lcwa""" +971 56 evaluator """rankbased""" +971 57 dataset """kinships""" +971 57 model """tucker""" +971 57 loss """crossentropy""" +971 57 regularizer """no""" +971 57 optimizer """adam""" +971 57 training_loop """lcwa""" +971 57 evaluator """rankbased""" +971 58 dataset """kinships""" +971 58 model """tucker""" +971 58 loss """crossentropy""" +971 58 regularizer """no""" +971 58 optimizer """adam""" +971 58 training_loop """lcwa""" +971 58 evaluator """rankbased""" +971 59 dataset """kinships""" +971 59 model """tucker""" +971 59 loss """crossentropy""" +971 59 regularizer """no""" +971 59 optimizer """adam""" +971 59 training_loop """lcwa""" +971 59 evaluator """rankbased""" +971 60 dataset """kinships""" +971 60 model """tucker""" +971 60 loss """crossentropy""" +971 60 regularizer """no""" +971 60 optimizer """adam""" +971 60 training_loop """lcwa""" +971 60 evaluator """rankbased""" +971 61 dataset """kinships""" +971 61 model """tucker""" +971 61 loss """crossentropy""" +971 61 regularizer """no""" +971 61 optimizer """adam""" +971 61 training_loop """lcwa""" +971 61 evaluator """rankbased""" +971 62 dataset """kinships""" +971 62 model """tucker""" +971 62 loss """crossentropy""" +971 62 regularizer """no""" +971 62 optimizer """adam""" +971 62 training_loop """lcwa""" +971 62 evaluator """rankbased""" +971 63 dataset """kinships""" +971 63 model """tucker""" +971 63 loss """crossentropy""" +971 63 regularizer """no""" +971 63 optimizer """adam""" +971 63 training_loop """lcwa""" +971 63 evaluator """rankbased""" +971 64 dataset """kinships""" +971 64 model """tucker""" +971 64 loss """crossentropy""" +971 64 regularizer """no""" +971 64 optimizer """adam""" +971 64 training_loop """lcwa""" +971 64 evaluator """rankbased""" +971 65 dataset """kinships""" +971 65 model """tucker""" +971 65 loss """crossentropy""" +971 65 regularizer """no""" +971 65 optimizer """adam""" +971 65 training_loop """lcwa""" +971 65 evaluator """rankbased""" +971 66 dataset """kinships""" +971 66 model """tucker""" +971 66 loss """crossentropy""" +971 66 regularizer """no""" +971 66 optimizer """adam""" +971 66 training_loop """lcwa""" +971 66 evaluator """rankbased""" +971 67 dataset """kinships""" +971 67 model """tucker""" +971 67 loss """crossentropy""" +971 67 regularizer """no""" +971 67 optimizer """adam""" +971 67 training_loop """lcwa""" +971 67 evaluator """rankbased""" +971 68 dataset """kinships""" +971 68 model """tucker""" +971 68 loss """crossentropy""" +971 68 regularizer """no""" +971 68 optimizer """adam""" +971 68 training_loop """lcwa""" +971 68 evaluator """rankbased""" +971 69 dataset """kinships""" +971 69 model """tucker""" +971 69 loss """crossentropy""" +971 69 regularizer """no""" +971 69 optimizer """adam""" +971 69 training_loop """lcwa""" +971 69 evaluator """rankbased""" +971 70 dataset """kinships""" +971 70 model """tucker""" +971 70 loss """crossentropy""" +971 70 regularizer """no""" +971 70 optimizer """adam""" +971 70 training_loop """lcwa""" +971 70 evaluator """rankbased""" +971 71 dataset """kinships""" +971 71 model """tucker""" +971 71 loss """crossentropy""" +971 71 regularizer """no""" +971 71 optimizer """adam""" +971 71 training_loop """lcwa""" +971 71 evaluator """rankbased""" +971 72 dataset """kinships""" +971 72 model """tucker""" +971 72 loss """crossentropy""" +971 72 regularizer """no""" +971 72 optimizer """adam""" +971 72 training_loop """lcwa""" +971 72 evaluator """rankbased""" +971 73 dataset """kinships""" +971 73 model """tucker""" +971 73 loss """crossentropy""" +971 73 regularizer """no""" +971 73 optimizer """adam""" +971 73 training_loop """lcwa""" +971 73 evaluator """rankbased""" +971 74 dataset """kinships""" +971 74 model """tucker""" +971 74 loss """crossentropy""" +971 74 regularizer """no""" +971 74 optimizer """adam""" +971 74 training_loop """lcwa""" +971 74 evaluator """rankbased""" +971 75 dataset """kinships""" +971 75 model """tucker""" +971 75 loss """crossentropy""" +971 75 regularizer """no""" +971 75 optimizer """adam""" +971 75 training_loop """lcwa""" +971 75 evaluator """rankbased""" +971 76 dataset """kinships""" +971 76 model """tucker""" +971 76 loss """crossentropy""" +971 76 regularizer """no""" +971 76 optimizer """adam""" +971 76 training_loop """lcwa""" +971 76 evaluator """rankbased""" +971 77 dataset """kinships""" +971 77 model """tucker""" +971 77 loss """crossentropy""" +971 77 regularizer """no""" +971 77 optimizer """adam""" +971 77 training_loop """lcwa""" +971 77 evaluator """rankbased""" +971 78 dataset """kinships""" +971 78 model """tucker""" +971 78 loss """crossentropy""" +971 78 regularizer """no""" +971 78 optimizer """adam""" +971 78 training_loop """lcwa""" +971 78 evaluator """rankbased""" +971 79 dataset """kinships""" +971 79 model """tucker""" +971 79 loss """crossentropy""" +971 79 regularizer """no""" +971 79 optimizer """adam""" +971 79 training_loop """lcwa""" +971 79 evaluator """rankbased""" +971 80 dataset """kinships""" +971 80 model """tucker""" +971 80 loss """crossentropy""" +971 80 regularizer """no""" +971 80 optimizer """adam""" +971 80 training_loop """lcwa""" +971 80 evaluator """rankbased""" +971 81 dataset """kinships""" +971 81 model """tucker""" +971 81 loss """crossentropy""" +971 81 regularizer """no""" +971 81 optimizer """adam""" +971 81 training_loop """lcwa""" +971 81 evaluator """rankbased""" +971 82 dataset """kinships""" +971 82 model """tucker""" +971 82 loss """crossentropy""" +971 82 regularizer """no""" +971 82 optimizer """adam""" +971 82 training_loop """lcwa""" +971 82 evaluator """rankbased""" +971 83 dataset """kinships""" +971 83 model """tucker""" +971 83 loss """crossentropy""" +971 83 regularizer """no""" +971 83 optimizer """adam""" +971 83 training_loop """lcwa""" +971 83 evaluator """rankbased""" +971 84 dataset """kinships""" +971 84 model """tucker""" +971 84 loss """crossentropy""" +971 84 regularizer """no""" +971 84 optimizer """adam""" +971 84 training_loop """lcwa""" +971 84 evaluator """rankbased""" +971 85 dataset """kinships""" +971 85 model """tucker""" +971 85 loss """crossentropy""" +971 85 regularizer """no""" +971 85 optimizer """adam""" +971 85 training_loop """lcwa""" +971 85 evaluator """rankbased""" +971 86 dataset """kinships""" +971 86 model """tucker""" +971 86 loss """crossentropy""" +971 86 regularizer """no""" +971 86 optimizer """adam""" +971 86 training_loop """lcwa""" +971 86 evaluator """rankbased""" +971 87 dataset """kinships""" +971 87 model """tucker""" +971 87 loss """crossentropy""" +971 87 regularizer """no""" +971 87 optimizer """adam""" +971 87 training_loop """lcwa""" +971 87 evaluator """rankbased""" +971 88 dataset """kinships""" +971 88 model """tucker""" +971 88 loss """crossentropy""" +971 88 regularizer """no""" +971 88 optimizer """adam""" +971 88 training_loop """lcwa""" +971 88 evaluator """rankbased""" +971 89 dataset """kinships""" +971 89 model """tucker""" +971 89 loss """crossentropy""" +971 89 regularizer """no""" +971 89 optimizer """adam""" +971 89 training_loop """lcwa""" +971 89 evaluator """rankbased""" +971 90 dataset """kinships""" +971 90 model """tucker""" +971 90 loss """crossentropy""" +971 90 regularizer """no""" +971 90 optimizer """adam""" +971 90 training_loop """lcwa""" +971 90 evaluator """rankbased""" +971 91 dataset """kinships""" +971 91 model """tucker""" +971 91 loss """crossentropy""" +971 91 regularizer """no""" +971 91 optimizer """adam""" +971 91 training_loop """lcwa""" +971 91 evaluator """rankbased""" +971 92 dataset """kinships""" +971 92 model """tucker""" +971 92 loss """crossentropy""" +971 92 regularizer """no""" +971 92 optimizer """adam""" +971 92 training_loop """lcwa""" +971 92 evaluator """rankbased""" +971 93 dataset """kinships""" +971 93 model """tucker""" +971 93 loss """crossentropy""" +971 93 regularizer """no""" +971 93 optimizer """adam""" +971 93 training_loop """lcwa""" +971 93 evaluator """rankbased""" +971 94 dataset """kinships""" +971 94 model """tucker""" +971 94 loss """crossentropy""" +971 94 regularizer """no""" +971 94 optimizer """adam""" +971 94 training_loop """lcwa""" +971 94 evaluator """rankbased""" +971 95 dataset """kinships""" +971 95 model """tucker""" +971 95 loss """crossentropy""" +971 95 regularizer """no""" +971 95 optimizer """adam""" +971 95 training_loop """lcwa""" +971 95 evaluator """rankbased""" +971 96 dataset """kinships""" +971 96 model """tucker""" +971 96 loss """crossentropy""" +971 96 regularizer """no""" +971 96 optimizer """adam""" +971 96 training_loop """lcwa""" +971 96 evaluator """rankbased""" +971 97 dataset """kinships""" +971 97 model """tucker""" +971 97 loss """crossentropy""" +971 97 regularizer """no""" +971 97 optimizer """adam""" +971 97 training_loop """lcwa""" +971 97 evaluator """rankbased""" +971 98 dataset """kinships""" +971 98 model """tucker""" +971 98 loss """crossentropy""" +971 98 regularizer """no""" +971 98 optimizer """adam""" +971 98 training_loop """lcwa""" +971 98 evaluator """rankbased""" +971 99 dataset """kinships""" +971 99 model """tucker""" +971 99 loss """crossentropy""" +971 99 regularizer """no""" +971 99 optimizer """adam""" +971 99 training_loop """lcwa""" +971 99 evaluator """rankbased""" +971 100 dataset """kinships""" +971 100 model """tucker""" +971 100 loss """crossentropy""" +971 100 regularizer """no""" +971 100 optimizer """adam""" +971 100 training_loop """lcwa""" +971 100 evaluator """rankbased""" +972 1 model.embedding_dim 2.0 +972 1 model.relation_dim 1.0 +972 1 model.dropout_0 0.4674936864674097 +972 1 model.dropout_1 0.3237723724453016 +972 1 model.dropout_2 0.28082018953429566 +972 1 optimizer.lr 0.02239739489439981 +972 1 training.batch_size 1.0 +972 1 training.label_smoothing 0.9680136909371766 +972 2 model.embedding_dim 1.0 +972 2 model.relation_dim 0.0 +972 2 model.dropout_0 0.23283334652857335 +972 2 model.dropout_1 0.3166471174338905 +972 2 model.dropout_2 0.21310450762033994 +972 2 optimizer.lr 0.003211430156936416 +972 2 training.batch_size 2.0 +972 2 training.label_smoothing 0.40238014726875576 +972 3 model.embedding_dim 2.0 +972 3 model.relation_dim 1.0 +972 3 model.dropout_0 0.4697957069251875 +972 3 model.dropout_1 0.3780693593781712 +972 3 model.dropout_2 0.42030075976018355 +972 3 optimizer.lr 0.010444478262773166 +972 3 training.batch_size 2.0 +972 3 training.label_smoothing 0.7687046280225988 +972 4 model.embedding_dim 0.0 +972 4 model.relation_dim 1.0 +972 4 model.dropout_0 0.4351697381210194 +972 4 model.dropout_1 0.4721750844102117 +972 4 model.dropout_2 0.42747100490366235 +972 4 optimizer.lr 0.010914756996256714 +972 4 training.batch_size 0.0 +972 4 training.label_smoothing 0.058044078717005086 +972 5 model.embedding_dim 0.0 +972 5 model.relation_dim 0.0 +972 5 model.dropout_0 0.29104393390689726 +972 5 model.dropout_1 0.2213533023341283 +972 5 model.dropout_2 0.1212382924436633 +972 5 optimizer.lr 0.02759337866853015 +972 5 training.batch_size 2.0 +972 5 training.label_smoothing 0.08336837131347143 +972 6 model.embedding_dim 0.0 +972 6 model.relation_dim 0.0 +972 6 model.dropout_0 0.24151938697559433 +972 6 model.dropout_1 0.4307724213887921 +972 6 model.dropout_2 0.302959768434975 +972 6 optimizer.lr 0.03384029320852965 +972 6 training.batch_size 2.0 +972 6 training.label_smoothing 0.0836942874081712 +972 7 model.embedding_dim 0.0 +972 7 model.relation_dim 2.0 +972 7 model.dropout_0 0.3605187026056159 +972 7 model.dropout_1 0.1256593468119712 +972 7 model.dropout_2 0.3700889159626153 +972 7 optimizer.lr 0.0712408050836865 +972 7 training.batch_size 1.0 +972 7 training.label_smoothing 0.002262329251996512 +972 8 model.embedding_dim 0.0 +972 8 model.relation_dim 2.0 +972 8 model.dropout_0 0.4318053350298256 +972 8 model.dropout_1 0.49417260559754583 +972 8 model.dropout_2 0.12327943727643481 +972 8 optimizer.lr 0.023330682973343876 +972 8 training.batch_size 0.0 +972 8 training.label_smoothing 0.28974900142485294 +972 9 model.embedding_dim 0.0 +972 9 model.relation_dim 0.0 +972 9 model.dropout_0 0.1579117557342422 +972 9 model.dropout_1 0.4882261348460352 +972 9 model.dropout_2 0.34466074674976244 +972 9 optimizer.lr 0.015256369552628341 +972 9 training.batch_size 0.0 +972 9 training.label_smoothing 0.0023400301800385144 +972 10 model.embedding_dim 0.0 +972 10 model.relation_dim 0.0 +972 10 model.dropout_0 0.33571508411152245 +972 10 model.dropout_1 0.17132402764971444 +972 10 model.dropout_2 0.15699431569477917 +972 10 optimizer.lr 0.025303819247687053 +972 10 training.batch_size 0.0 +972 10 training.label_smoothing 0.6192000170902935 +972 11 model.embedding_dim 0.0 +972 11 model.relation_dim 1.0 +972 11 model.dropout_0 0.19856791343910576 +972 11 model.dropout_1 0.23174978002227178 +972 11 model.dropout_2 0.2745860900247084 +972 11 optimizer.lr 0.03948538448078952 +972 11 training.batch_size 2.0 +972 11 training.label_smoothing 0.02013664394003586 +972 12 model.embedding_dim 1.0 +972 12 model.relation_dim 2.0 +972 12 model.dropout_0 0.3182114107221554 +972 12 model.dropout_1 0.3975979833325534 +972 12 model.dropout_2 0.47957712378673434 +972 12 optimizer.lr 0.001264351537986475 +972 12 training.batch_size 2.0 +972 12 training.label_smoothing 0.048651452369978726 +972 13 model.embedding_dim 2.0 +972 13 model.relation_dim 1.0 +972 13 model.dropout_0 0.4284677455640825 +972 13 model.dropout_1 0.2777236862531524 +972 13 model.dropout_2 0.3132899558429182 +972 13 optimizer.lr 0.059122088141840785 +972 13 training.batch_size 1.0 +972 13 training.label_smoothing 0.0312702420529309 +972 14 model.embedding_dim 2.0 +972 14 model.relation_dim 2.0 +972 14 model.dropout_0 0.4825871895737589 +972 14 model.dropout_1 0.3146088689303518 +972 14 model.dropout_2 0.41411091289500673 +972 14 optimizer.lr 0.00882855111012296 +972 14 training.batch_size 0.0 +972 14 training.label_smoothing 0.0014621903474969452 +972 15 model.embedding_dim 2.0 +972 15 model.relation_dim 2.0 +972 15 model.dropout_0 0.4772989831766126 +972 15 model.dropout_1 0.47236744163183975 +972 15 model.dropout_2 0.3103319485748415 +972 15 optimizer.lr 0.003520246199795411 +972 15 training.batch_size 2.0 +972 15 training.label_smoothing 0.0013392044597246918 +972 16 model.embedding_dim 1.0 +972 16 model.relation_dim 0.0 +972 16 model.dropout_0 0.2550804027262637 +972 16 model.dropout_1 0.4635102939492634 +972 16 model.dropout_2 0.23163384007306367 +972 16 optimizer.lr 0.0112177438760761 +972 16 training.batch_size 1.0 +972 16 training.label_smoothing 0.1708536341022177 +972 17 model.embedding_dim 0.0 +972 17 model.relation_dim 2.0 +972 17 model.dropout_0 0.35683794297725735 +972 17 model.dropout_1 0.2295134296480612 +972 17 model.dropout_2 0.17777314771754257 +972 17 optimizer.lr 0.00129677018995211 +972 17 training.batch_size 0.0 +972 17 training.label_smoothing 0.049791392328169065 +972 18 model.embedding_dim 2.0 +972 18 model.relation_dim 2.0 +972 18 model.dropout_0 0.2413004253018256 +972 18 model.dropout_1 0.33698181042364567 +972 18 model.dropout_2 0.15563028192337872 +972 18 optimizer.lr 0.005214442875657354 +972 18 training.batch_size 2.0 +972 18 training.label_smoothing 0.05894854555370759 +972 19 model.embedding_dim 0.0 +972 19 model.relation_dim 1.0 +972 19 model.dropout_0 0.2854176746645695 +972 19 model.dropout_1 0.43973144354432 +972 19 model.dropout_2 0.33680888171268936 +972 19 optimizer.lr 0.016611404908396726 +972 19 training.batch_size 0.0 +972 19 training.label_smoothing 0.014451327533208614 +972 20 model.embedding_dim 1.0 +972 20 model.relation_dim 0.0 +972 20 model.dropout_0 0.40915341669039473 +972 20 model.dropout_1 0.1318327713837942 +972 20 model.dropout_2 0.2711638973144877 +972 20 optimizer.lr 0.001074646760274813 +972 20 training.batch_size 0.0 +972 20 training.label_smoothing 0.03946013050986253 +972 21 model.embedding_dim 0.0 +972 21 model.relation_dim 1.0 +972 21 model.dropout_0 0.18884950912529566 +972 21 model.dropout_1 0.4913647201378454 +972 21 model.dropout_2 0.4953304920040633 +972 21 optimizer.lr 0.0013024698323578206 +972 21 training.batch_size 2.0 +972 21 training.label_smoothing 0.007308985348421235 +972 22 model.embedding_dim 2.0 +972 22 model.relation_dim 2.0 +972 22 model.dropout_0 0.21243056039184954 +972 22 model.dropout_1 0.33658169286863804 +972 22 model.dropout_2 0.17460615669174412 +972 22 optimizer.lr 0.002988052896768191 +972 22 training.batch_size 2.0 +972 22 training.label_smoothing 0.004108775746475744 +972 23 model.embedding_dim 2.0 +972 23 model.relation_dim 2.0 +972 23 model.dropout_0 0.40479610549377 +972 23 model.dropout_1 0.23595067810149406 +972 23 model.dropout_2 0.11420853207820852 +972 23 optimizer.lr 0.0022185497719372045 +972 23 training.batch_size 2.0 +972 23 training.label_smoothing 0.321756880117204 +972 24 model.embedding_dim 1.0 +972 24 model.relation_dim 2.0 +972 24 model.dropout_0 0.4676599053105093 +972 24 model.dropout_1 0.2340417209421038 +972 24 model.dropout_2 0.33407305171630863 +972 24 optimizer.lr 0.004246009045246439 +972 24 training.batch_size 1.0 +972 24 training.label_smoothing 0.017074566022583768 +972 25 model.embedding_dim 0.0 +972 25 model.relation_dim 1.0 +972 25 model.dropout_0 0.11263513948796487 +972 25 model.dropout_1 0.23658877792723818 +972 25 model.dropout_2 0.2927937583253427 +972 25 optimizer.lr 0.004627499819066675 +972 25 training.batch_size 1.0 +972 25 training.label_smoothing 0.5376034381874127 +972 26 model.embedding_dim 1.0 +972 26 model.relation_dim 1.0 +972 26 model.dropout_0 0.2143219206548865 +972 26 model.dropout_1 0.3450352837440202 +972 26 model.dropout_2 0.3747509759907658 +972 26 optimizer.lr 0.006914933641604877 +972 26 training.batch_size 1.0 +972 26 training.label_smoothing 0.022041040293706647 +972 27 model.embedding_dim 2.0 +972 27 model.relation_dim 2.0 +972 27 model.dropout_0 0.2940658438981653 +972 27 model.dropout_1 0.27352540674905995 +972 27 model.dropout_2 0.37267697767835845 +972 27 optimizer.lr 0.059299264813894456 +972 27 training.batch_size 1.0 +972 27 training.label_smoothing 0.04493845644006716 +972 28 model.embedding_dim 1.0 +972 28 model.relation_dim 1.0 +972 28 model.dropout_0 0.17410922350828503 +972 28 model.dropout_1 0.27324628356147546 +972 28 model.dropout_2 0.39418033860169627 +972 28 optimizer.lr 0.00169428570696483 +972 28 training.batch_size 1.0 +972 28 training.label_smoothing 0.00986867249905721 +972 29 model.embedding_dim 0.0 +972 29 model.relation_dim 0.0 +972 29 model.dropout_0 0.3799683544378767 +972 29 model.dropout_1 0.114737731889243 +972 29 model.dropout_2 0.21118488651710352 +972 29 optimizer.lr 0.0754669593515083 +972 29 training.batch_size 1.0 +972 29 training.label_smoothing 0.004357673897463879 +972 30 model.embedding_dim 2.0 +972 30 model.relation_dim 0.0 +972 30 model.dropout_0 0.34249854974750826 +972 30 model.dropout_1 0.3742025071511803 +972 30 model.dropout_2 0.4769430702227221 +972 30 optimizer.lr 0.060339012161397795 +972 30 training.batch_size 2.0 +972 30 training.label_smoothing 0.002778033364333283 +972 31 model.embedding_dim 2.0 +972 31 model.relation_dim 2.0 +972 31 model.dropout_0 0.4183024361281235 +972 31 model.dropout_1 0.4824162021857415 +972 31 model.dropout_2 0.26394069177445567 +972 31 optimizer.lr 0.0010257490949988073 +972 31 training.batch_size 0.0 +972 31 training.label_smoothing 0.06590121180024172 +972 32 model.embedding_dim 2.0 +972 32 model.relation_dim 2.0 +972 32 model.dropout_0 0.43160151785835776 +972 32 model.dropout_1 0.42636289239066605 +972 32 model.dropout_2 0.43855235953472277 +972 32 optimizer.lr 0.02537841708577365 +972 32 training.batch_size 1.0 +972 32 training.label_smoothing 0.05467040318950179 +972 33 model.embedding_dim 1.0 +972 33 model.relation_dim 1.0 +972 33 model.dropout_0 0.43649698902292483 +972 33 model.dropout_1 0.17921330936509988 +972 33 model.dropout_2 0.16286905190429693 +972 33 optimizer.lr 0.08679012907854476 +972 33 training.batch_size 2.0 +972 33 training.label_smoothing 0.13169875751291504 +972 34 model.embedding_dim 0.0 +972 34 model.relation_dim 0.0 +972 34 model.dropout_0 0.36032312868446414 +972 34 model.dropout_1 0.15092045075455635 +972 34 model.dropout_2 0.2796699426940154 +972 34 optimizer.lr 0.0782068776137578 +972 34 training.batch_size 1.0 +972 34 training.label_smoothing 0.17685465875616663 +972 35 model.embedding_dim 2.0 +972 35 model.relation_dim 2.0 +972 35 model.dropout_0 0.4377680959460631 +972 35 model.dropout_1 0.1166293260374423 +972 35 model.dropout_2 0.4193614911293284 +972 35 optimizer.lr 0.07684694963767753 +972 35 training.batch_size 0.0 +972 35 training.label_smoothing 0.013428324478588742 +972 36 model.embedding_dim 1.0 +972 36 model.relation_dim 2.0 +972 36 model.dropout_0 0.24082547603515223 +972 36 model.dropout_1 0.34630986230153304 +972 36 model.dropout_2 0.4747208364998432 +972 36 optimizer.lr 0.010931663306392435 +972 36 training.batch_size 2.0 +972 36 training.label_smoothing 0.0012106578141294051 +972 37 model.embedding_dim 1.0 +972 37 model.relation_dim 1.0 +972 37 model.dropout_0 0.10140439156709502 +972 37 model.dropout_1 0.4005648322370446 +972 37 model.dropout_2 0.26859776090789084 +972 37 optimizer.lr 0.0012937686517605215 +972 37 training.batch_size 2.0 +972 37 training.label_smoothing 0.00109879939706345 +972 38 model.embedding_dim 0.0 +972 38 model.relation_dim 1.0 +972 38 model.dropout_0 0.312413625063356 +972 38 model.dropout_1 0.4080739257000692 +972 38 model.dropout_2 0.27911467873508067 +972 38 optimizer.lr 0.0010167700116763582 +972 38 training.batch_size 1.0 +972 38 training.label_smoothing 0.004732475048459331 +972 39 model.embedding_dim 2.0 +972 39 model.relation_dim 2.0 +972 39 model.dropout_0 0.38852673346195615 +972 39 model.dropout_1 0.2241423542737836 +972 39 model.dropout_2 0.4244891660133119 +972 39 optimizer.lr 0.005213287483321454 +972 39 training.batch_size 2.0 +972 39 training.label_smoothing 0.444110029809621 +972 40 model.embedding_dim 1.0 +972 40 model.relation_dim 0.0 +972 40 model.dropout_0 0.19538883537569351 +972 40 model.dropout_1 0.2737074869874458 +972 40 model.dropout_2 0.2268485446409928 +972 40 optimizer.lr 0.0673743067576801 +972 40 training.batch_size 0.0 +972 40 training.label_smoothing 0.4669796516845393 +972 41 model.embedding_dim 0.0 +972 41 model.relation_dim 0.0 +972 41 model.dropout_0 0.4730143084784664 +972 41 model.dropout_1 0.19689609198674646 +972 41 model.dropout_2 0.16177153799868213 +972 41 optimizer.lr 0.0015012155337313516 +972 41 training.batch_size 2.0 +972 41 training.label_smoothing 0.028927886797419696 +972 42 model.embedding_dim 0.0 +972 42 model.relation_dim 2.0 +972 42 model.dropout_0 0.24395759892729973 +972 42 model.dropout_1 0.3206420749553514 +972 42 model.dropout_2 0.45245902873532 +972 42 optimizer.lr 0.008056803360806388 +972 42 training.batch_size 1.0 +972 42 training.label_smoothing 0.03594573358792197 +972 43 model.embedding_dim 2.0 +972 43 model.relation_dim 2.0 +972 43 model.dropout_0 0.20413155852325954 +972 43 model.dropout_1 0.3411140932200027 +972 43 model.dropout_2 0.19006379356062572 +972 43 optimizer.lr 0.027952780935376006 +972 43 training.batch_size 1.0 +972 43 training.label_smoothing 0.0037489901077556393 +972 44 model.embedding_dim 2.0 +972 44 model.relation_dim 0.0 +972 44 model.dropout_0 0.41045939183419056 +972 44 model.dropout_1 0.31870031406555405 +972 44 model.dropout_2 0.1144470847641535 +972 44 optimizer.lr 0.08820854668392855 +972 44 training.batch_size 2.0 +972 44 training.label_smoothing 0.003312108633426596 +972 45 model.embedding_dim 2.0 +972 45 model.relation_dim 2.0 +972 45 model.dropout_0 0.36772302172015636 +972 45 model.dropout_1 0.42257483773478854 +972 45 model.dropout_2 0.22634459435758714 +972 45 optimizer.lr 0.009466487531496134 +972 45 training.batch_size 2.0 +972 45 training.label_smoothing 0.03398420683409604 +972 46 model.embedding_dim 0.0 +972 46 model.relation_dim 1.0 +972 46 model.dropout_0 0.19687908891401706 +972 46 model.dropout_1 0.23865718456554258 +972 46 model.dropout_2 0.18087083001619636 +972 46 optimizer.lr 0.0013329039357411932 +972 46 training.batch_size 0.0 +972 46 training.label_smoothing 0.4152491210357222 +972 47 model.embedding_dim 0.0 +972 47 model.relation_dim 0.0 +972 47 model.dropout_0 0.19246678045713372 +972 47 model.dropout_1 0.31303157423975914 +972 47 model.dropout_2 0.46183769447788886 +972 47 optimizer.lr 0.0013537469181813542 +972 47 training.batch_size 1.0 +972 47 training.label_smoothing 0.0010430527349185017 +972 48 model.embedding_dim 1.0 +972 48 model.relation_dim 1.0 +972 48 model.dropout_0 0.2608466088911117 +972 48 model.dropout_1 0.25609574180961986 +972 48 model.dropout_2 0.25224486063353174 +972 48 optimizer.lr 0.023849857961803677 +972 48 training.batch_size 2.0 +972 48 training.label_smoothing 0.0030020781747481106 +972 49 model.embedding_dim 0.0 +972 49 model.relation_dim 2.0 +972 49 model.dropout_0 0.4754310205241902 +972 49 model.dropout_1 0.1065632204948158 +972 49 model.dropout_2 0.3903617793650962 +972 49 optimizer.lr 0.011730548698145389 +972 49 training.batch_size 1.0 +972 49 training.label_smoothing 0.0031177775529516527 +972 50 model.embedding_dim 1.0 +972 50 model.relation_dim 1.0 +972 50 model.dropout_0 0.10261866336248149 +972 50 model.dropout_1 0.17440376506543928 +972 50 model.dropout_2 0.4915473760007722 +972 50 optimizer.lr 0.019795322252542227 +972 50 training.batch_size 0.0 +972 50 training.label_smoothing 0.005983714837054592 +972 51 model.embedding_dim 1.0 +972 51 model.relation_dim 1.0 +972 51 model.dropout_0 0.4796955200692895 +972 51 model.dropout_1 0.491278141752445 +972 51 model.dropout_2 0.14800395478898953 +972 51 optimizer.lr 0.011498289036504836 +972 51 training.batch_size 0.0 +972 51 training.label_smoothing 0.06430596669097186 +972 52 model.embedding_dim 1.0 +972 52 model.relation_dim 2.0 +972 52 model.dropout_0 0.4344175081281907 +972 52 model.dropout_1 0.4019569394614757 +972 52 model.dropout_2 0.32700671923839264 +972 52 optimizer.lr 0.003023710941551014 +972 52 training.batch_size 2.0 +972 52 training.label_smoothing 0.08316614372913286 +972 53 model.embedding_dim 1.0 +972 53 model.relation_dim 0.0 +972 53 model.dropout_0 0.10615638618648174 +972 53 model.dropout_1 0.260341512156453 +972 53 model.dropout_2 0.13561260252167748 +972 53 optimizer.lr 0.0014919439880970193 +972 53 training.batch_size 1.0 +972 53 training.label_smoothing 0.10016293084471019 +972 54 model.embedding_dim 0.0 +972 54 model.relation_dim 0.0 +972 54 model.dropout_0 0.40773260717979387 +972 54 model.dropout_1 0.19777014740545018 +972 54 model.dropout_2 0.35574598690829057 +972 54 optimizer.lr 0.015481850111406676 +972 54 training.batch_size 2.0 +972 54 training.label_smoothing 0.18033984469378825 +972 55 model.embedding_dim 1.0 +972 55 model.relation_dim 1.0 +972 55 model.dropout_0 0.22417408982190637 +972 55 model.dropout_1 0.3390351049471594 +972 55 model.dropout_2 0.2635670544880592 +972 55 optimizer.lr 0.009312866173123907 +972 55 training.batch_size 0.0 +972 55 training.label_smoothing 0.006995445393133201 +972 56 model.embedding_dim 0.0 +972 56 model.relation_dim 2.0 +972 56 model.dropout_0 0.4393103928386953 +972 56 model.dropout_1 0.37169670129238747 +972 56 model.dropout_2 0.16507476426051104 +972 56 optimizer.lr 0.003015126988221321 +972 56 training.batch_size 1.0 +972 56 training.label_smoothing 0.0039017769269981714 +972 57 model.embedding_dim 0.0 +972 57 model.relation_dim 0.0 +972 57 model.dropout_0 0.3127807776586292 +972 57 model.dropout_1 0.4851817224051167 +972 57 model.dropout_2 0.26218407761605284 +972 57 optimizer.lr 0.043311958048691694 +972 57 training.batch_size 1.0 +972 57 training.label_smoothing 0.007881836455020023 +972 58 model.embedding_dim 1.0 +972 58 model.relation_dim 1.0 +972 58 model.dropout_0 0.1708378363140044 +972 58 model.dropout_1 0.38095570252580235 +972 58 model.dropout_2 0.3075419328379218 +972 58 optimizer.lr 0.093606298301743 +972 58 training.batch_size 1.0 +972 58 training.label_smoothing 0.04457152344149129 +972 59 model.embedding_dim 1.0 +972 59 model.relation_dim 1.0 +972 59 model.dropout_0 0.23690776819681508 +972 59 model.dropout_1 0.4675753108698808 +972 59 model.dropout_2 0.3118732022637659 +972 59 optimizer.lr 0.014108057533987417 +972 59 training.batch_size 1.0 +972 59 training.label_smoothing 0.0013138450845934636 +972 60 model.embedding_dim 1.0 +972 60 model.relation_dim 2.0 +972 60 model.dropout_0 0.4961410075864126 +972 60 model.dropout_1 0.1855368424938654 +972 60 model.dropout_2 0.4735069850777944 +972 60 optimizer.lr 0.0010952869349414073 +972 60 training.batch_size 0.0 +972 60 training.label_smoothing 0.18715934824082248 +972 61 model.embedding_dim 1.0 +972 61 model.relation_dim 1.0 +972 61 model.dropout_0 0.206533775744393 +972 61 model.dropout_1 0.2663124998747226 +972 61 model.dropout_2 0.14163292494245935 +972 61 optimizer.lr 0.07428791841868279 +972 61 training.batch_size 0.0 +972 61 training.label_smoothing 0.0022581204332111603 +972 62 model.embedding_dim 1.0 +972 62 model.relation_dim 0.0 +972 62 model.dropout_0 0.11272307281025001 +972 62 model.dropout_1 0.21135246981873634 +972 62 model.dropout_2 0.43136837817825424 +972 62 optimizer.lr 0.014238213357196024 +972 62 training.batch_size 1.0 +972 62 training.label_smoothing 0.02733260695015039 +972 63 model.embedding_dim 2.0 +972 63 model.relation_dim 2.0 +972 63 model.dropout_0 0.13064507663201358 +972 63 model.dropout_1 0.34506498265470487 +972 63 model.dropout_2 0.3758077683529535 +972 63 optimizer.lr 0.012642877733601766 +972 63 training.batch_size 1.0 +972 63 training.label_smoothing 0.0010227347397043375 +972 64 model.embedding_dim 2.0 +972 64 model.relation_dim 2.0 +972 64 model.dropout_0 0.21336761637354124 +972 64 model.dropout_1 0.40481524654522894 +972 64 model.dropout_2 0.470192505161353 +972 64 optimizer.lr 0.004334760071196646 +972 64 training.batch_size 0.0 +972 64 training.label_smoothing 0.09233526559812091 +972 65 model.embedding_dim 0.0 +972 65 model.relation_dim 0.0 +972 65 model.dropout_0 0.28139276676774266 +972 65 model.dropout_1 0.2616303692073272 +972 65 model.dropout_2 0.3405887136787979 +972 65 optimizer.lr 0.03146059857061928 +972 65 training.batch_size 2.0 +972 65 training.label_smoothing 0.0032591989311321055 +972 66 model.embedding_dim 0.0 +972 66 model.relation_dim 1.0 +972 66 model.dropout_0 0.23154980313760276 +972 66 model.dropout_1 0.1847455643226033 +972 66 model.dropout_2 0.43676849612169927 +972 66 optimizer.lr 0.005028561106753353 +972 66 training.batch_size 2.0 +972 66 training.label_smoothing 0.23740118334998422 +972 67 model.embedding_dim 1.0 +972 67 model.relation_dim 1.0 +972 67 model.dropout_0 0.36450175388005984 +972 67 model.dropout_1 0.4134167080747173 +972 67 model.dropout_2 0.3791103391806432 +972 67 optimizer.lr 0.0022313383217609757 +972 67 training.batch_size 1.0 +972 67 training.label_smoothing 0.0031823994326423705 +972 68 model.embedding_dim 1.0 +972 68 model.relation_dim 2.0 +972 68 model.dropout_0 0.30720071618631 +972 68 model.dropout_1 0.33506311366915487 +972 68 model.dropout_2 0.26610198456650574 +972 68 optimizer.lr 0.0027607167676633655 +972 68 training.batch_size 0.0 +972 68 training.label_smoothing 0.021822647172250596 +972 69 model.embedding_dim 1.0 +972 69 model.relation_dim 0.0 +972 69 model.dropout_0 0.29723220299625813 +972 69 model.dropout_1 0.47904761588798794 +972 69 model.dropout_2 0.2774283795546697 +972 69 optimizer.lr 0.001270396189293039 +972 69 training.batch_size 2.0 +972 69 training.label_smoothing 0.04071734896480431 +972 70 model.embedding_dim 0.0 +972 70 model.relation_dim 1.0 +972 70 model.dropout_0 0.24348080844187084 +972 70 model.dropout_1 0.10781815953427554 +972 70 model.dropout_2 0.444691941455214 +972 70 optimizer.lr 0.003871721544463725 +972 70 training.batch_size 1.0 +972 70 training.label_smoothing 0.034127718957093045 +972 71 model.embedding_dim 0.0 +972 71 model.relation_dim 2.0 +972 71 model.dropout_0 0.28445484028291806 +972 71 model.dropout_1 0.4416142331641335 +972 71 model.dropout_2 0.29242876347212315 +972 71 optimizer.lr 0.015847467958733794 +972 71 training.batch_size 1.0 +972 71 training.label_smoothing 0.20751212664844357 +972 72 model.embedding_dim 2.0 +972 72 model.relation_dim 1.0 +972 72 model.dropout_0 0.48218450236152877 +972 72 model.dropout_1 0.10905375113430216 +972 72 model.dropout_2 0.31946284935560315 +972 72 optimizer.lr 0.015595889762189938 +972 72 training.batch_size 0.0 +972 72 training.label_smoothing 0.0028411634401095927 +972 73 model.embedding_dim 1.0 +972 73 model.relation_dim 1.0 +972 73 model.dropout_0 0.15688865698013307 +972 73 model.dropout_1 0.47498802710677945 +972 73 model.dropout_2 0.15984914622057117 +972 73 optimizer.lr 0.0026849698181085416 +972 73 training.batch_size 2.0 +972 73 training.label_smoothing 0.00805037835196576 +972 74 model.embedding_dim 0.0 +972 74 model.relation_dim 2.0 +972 74 model.dropout_0 0.23972834498501805 +972 74 model.dropout_1 0.16364236349136202 +972 74 model.dropout_2 0.4296696627176162 +972 74 optimizer.lr 0.023022883994341665 +972 74 training.batch_size 1.0 +972 74 training.label_smoothing 0.006857850363824295 +972 75 model.embedding_dim 0.0 +972 75 model.relation_dim 0.0 +972 75 model.dropout_0 0.31059072645322616 +972 75 model.dropout_1 0.4450741087632313 +972 75 model.dropout_2 0.3456244969593298 +972 75 optimizer.lr 0.0011360936716319132 +972 75 training.batch_size 0.0 +972 75 training.label_smoothing 0.4314452101899263 +972 76 model.embedding_dim 0.0 +972 76 model.relation_dim 0.0 +972 76 model.dropout_0 0.4424204251514632 +972 76 model.dropout_1 0.2915267062482293 +972 76 model.dropout_2 0.2504556417591785 +972 76 optimizer.lr 0.0018133251604428107 +972 76 training.batch_size 1.0 +972 76 training.label_smoothing 0.001676691566785087 +972 77 model.embedding_dim 2.0 +972 77 model.relation_dim 0.0 +972 77 model.dropout_0 0.21331551438530025 +972 77 model.dropout_1 0.2594585825136672 +972 77 model.dropout_2 0.35057372277950916 +972 77 optimizer.lr 0.045511260301729715 +972 77 training.batch_size 1.0 +972 77 training.label_smoothing 0.04693163952729463 +972 78 model.embedding_dim 1.0 +972 78 model.relation_dim 2.0 +972 78 model.dropout_0 0.29088313044513436 +972 78 model.dropout_1 0.24782108685857215 +972 78 model.dropout_2 0.19885105917491486 +972 78 optimizer.lr 0.008432809067542292 +972 78 training.batch_size 2.0 +972 78 training.label_smoothing 0.7063618993677582 +972 79 model.embedding_dim 0.0 +972 79 model.relation_dim 0.0 +972 79 model.dropout_0 0.22249458225464638 +972 79 model.dropout_1 0.42323570787561393 +972 79 model.dropout_2 0.4918057814916114 +972 79 optimizer.lr 0.002884181196609338 +972 79 training.batch_size 2.0 +972 79 training.label_smoothing 0.1385575725661655 +972 80 model.embedding_dim 2.0 +972 80 model.relation_dim 2.0 +972 80 model.dropout_0 0.16724084474232612 +972 80 model.dropout_1 0.17070806526439838 +972 80 model.dropout_2 0.2603980350039245 +972 80 optimizer.lr 0.013693239868427728 +972 80 training.batch_size 2.0 +972 80 training.label_smoothing 0.05404848976895567 +972 81 model.embedding_dim 2.0 +972 81 model.relation_dim 0.0 +972 81 model.dropout_0 0.29970775690928086 +972 81 model.dropout_1 0.2758782940715188 +972 81 model.dropout_2 0.28630284904504466 +972 81 optimizer.lr 0.009745184411496556 +972 81 training.batch_size 2.0 +972 81 training.label_smoothing 0.0010522096706119446 +972 82 model.embedding_dim 2.0 +972 82 model.relation_dim 1.0 +972 82 model.dropout_0 0.26607669967749253 +972 82 model.dropout_1 0.4572505337677847 +972 82 model.dropout_2 0.16252913475656128 +972 82 optimizer.lr 0.05638052882090877 +972 82 training.batch_size 2.0 +972 82 training.label_smoothing 0.60041219791214 +972 83 model.embedding_dim 1.0 +972 83 model.relation_dim 1.0 +972 83 model.dropout_0 0.37428896836972764 +972 83 model.dropout_1 0.10271613991293967 +972 83 model.dropout_2 0.4984090305875686 +972 83 optimizer.lr 0.00817455134877079 +972 83 training.batch_size 0.0 +972 83 training.label_smoothing 0.021829345671472173 +972 84 model.embedding_dim 1.0 +972 84 model.relation_dim 1.0 +972 84 model.dropout_0 0.423912626340736 +972 84 model.dropout_1 0.499141357410517 +972 84 model.dropout_2 0.33584465483277903 +972 84 optimizer.lr 0.028764295272246703 +972 84 training.batch_size 1.0 +972 84 training.label_smoothing 0.18441185935082363 +972 85 model.embedding_dim 0.0 +972 85 model.relation_dim 0.0 +972 85 model.dropout_0 0.21904998601260373 +972 85 model.dropout_1 0.3605709131446275 +972 85 model.dropout_2 0.27040549924625823 +972 85 optimizer.lr 0.00175841155343509 +972 85 training.batch_size 1.0 +972 85 training.label_smoothing 0.020657188695414875 +972 86 model.embedding_dim 2.0 +972 86 model.relation_dim 2.0 +972 86 model.dropout_0 0.2149959727207548 +972 86 model.dropout_1 0.3385467394303391 +972 86 model.dropout_2 0.43290347124834244 +972 86 optimizer.lr 0.08806797771598224 +972 86 training.batch_size 1.0 +972 86 training.label_smoothing 0.49389036064227265 +972 87 model.embedding_dim 1.0 +972 87 model.relation_dim 0.0 +972 87 model.dropout_0 0.487956303657558 +972 87 model.dropout_1 0.4651109294663325 +972 87 model.dropout_2 0.4299231687973992 +972 87 optimizer.lr 0.01504168711377861 +972 87 training.batch_size 2.0 +972 87 training.label_smoothing 0.2852615655806912 +972 88 model.embedding_dim 1.0 +972 88 model.relation_dim 1.0 +972 88 model.dropout_0 0.1909237921506525 +972 88 model.dropout_1 0.42260129967920657 +972 88 model.dropout_2 0.4432356457512548 +972 88 optimizer.lr 0.002376223978517105 +972 88 training.batch_size 0.0 +972 88 training.label_smoothing 0.04092990917101712 +972 89 model.embedding_dim 1.0 +972 89 model.relation_dim 2.0 +972 89 model.dropout_0 0.3574758737943604 +972 89 model.dropout_1 0.2048134960199607 +972 89 model.dropout_2 0.12462079468206566 +972 89 optimizer.lr 0.001069177278629873 +972 89 training.batch_size 2.0 +972 89 training.label_smoothing 0.03397632165132117 +972 90 model.embedding_dim 1.0 +972 90 model.relation_dim 0.0 +972 90 model.dropout_0 0.1882873050749744 +972 90 model.dropout_1 0.19246336038862416 +972 90 model.dropout_2 0.32756834570007975 +972 90 optimizer.lr 0.006366384294132565 +972 90 training.batch_size 2.0 +972 90 training.label_smoothing 0.0488018862929254 +972 91 model.embedding_dim 0.0 +972 91 model.relation_dim 1.0 +972 91 model.dropout_0 0.181915326598548 +972 91 model.dropout_1 0.352909624122466 +972 91 model.dropout_2 0.39824609567671054 +972 91 optimizer.lr 0.06278141274213826 +972 91 training.batch_size 1.0 +972 91 training.label_smoothing 0.0012430508885722842 +972 92 model.embedding_dim 0.0 +972 92 model.relation_dim 0.0 +972 92 model.dropout_0 0.3139447395744044 +972 92 model.dropout_1 0.4306215053865733 +972 92 model.dropout_2 0.3718279078050195 +972 92 optimizer.lr 0.0012331409946344423 +972 92 training.batch_size 2.0 +972 92 training.label_smoothing 0.06845066896678798 +972 93 model.embedding_dim 0.0 +972 93 model.relation_dim 2.0 +972 93 model.dropout_0 0.3417276571424012 +972 93 model.dropout_1 0.22155269846091719 +972 93 model.dropout_2 0.4423975395696318 +972 93 optimizer.lr 0.004690081083050864 +972 93 training.batch_size 2.0 +972 93 training.label_smoothing 0.0022339595539109043 +972 94 model.embedding_dim 2.0 +972 94 model.relation_dim 1.0 +972 94 model.dropout_0 0.13405418170237474 +972 94 model.dropout_1 0.48124741619453737 +972 94 model.dropout_2 0.28469737065013073 +972 94 optimizer.lr 0.037918182770269745 +972 94 training.batch_size 2.0 +972 94 training.label_smoothing 0.00847510776389478 +972 95 model.embedding_dim 2.0 +972 95 model.relation_dim 2.0 +972 95 model.dropout_0 0.10558678080208482 +972 95 model.dropout_1 0.2682782864878201 +972 95 model.dropout_2 0.2106264497983116 +972 95 optimizer.lr 0.001382174432795869 +972 95 training.batch_size 1.0 +972 95 training.label_smoothing 0.0027717018394921925 +972 96 model.embedding_dim 2.0 +972 96 model.relation_dim 0.0 +972 96 model.dropout_0 0.4879279134811414 +972 96 model.dropout_1 0.43134948559932285 +972 96 model.dropout_2 0.23988930100666886 +972 96 optimizer.lr 0.06364011615507711 +972 96 training.batch_size 1.0 +972 96 training.label_smoothing 0.8931840142369005 +972 97 model.embedding_dim 0.0 +972 97 model.relation_dim 2.0 +972 97 model.dropout_0 0.43823513152522703 +972 97 model.dropout_1 0.29061568343135613 +972 97 model.dropout_2 0.3403518388479234 +972 97 optimizer.lr 0.021625061648402916 +972 97 training.batch_size 2.0 +972 97 training.label_smoothing 0.001985149115036302 +972 98 model.embedding_dim 1.0 +972 98 model.relation_dim 1.0 +972 98 model.dropout_0 0.31221403463252595 +972 98 model.dropout_1 0.26172443044194 +972 98 model.dropout_2 0.2160284583901284 +972 98 optimizer.lr 0.08959806332231218 +972 98 training.batch_size 1.0 +972 98 training.label_smoothing 0.15172009433311973 +972 99 model.embedding_dim 1.0 +972 99 model.relation_dim 2.0 +972 99 model.dropout_0 0.39639572907023757 +972 99 model.dropout_1 0.1282454384558634 +972 99 model.dropout_2 0.11472227495824915 +972 99 optimizer.lr 0.07503816484209211 +972 99 training.batch_size 2.0 +972 99 training.label_smoothing 0.39891818382484323 +972 100 model.embedding_dim 1.0 +972 100 model.relation_dim 1.0 +972 100 model.dropout_0 0.1420328474111344 +972 100 model.dropout_1 0.20178729935825152 +972 100 model.dropout_2 0.38268609184743463 +972 100 optimizer.lr 0.011279882114935927 +972 100 training.batch_size 1.0 +972 100 training.label_smoothing 0.049451743655263156 +972 1 dataset """kinships""" +972 1 model """tucker""" +972 1 loss """crossentropy""" +972 1 regularizer """no""" +972 1 optimizer """adadelta""" +972 1 training_loop """lcwa""" +972 1 evaluator """rankbased""" +972 2 dataset """kinships""" +972 2 model """tucker""" +972 2 loss """crossentropy""" +972 2 regularizer """no""" +972 2 optimizer """adadelta""" +972 2 training_loop """lcwa""" +972 2 evaluator """rankbased""" +972 3 dataset """kinships""" +972 3 model """tucker""" +972 3 loss """crossentropy""" +972 3 regularizer """no""" +972 3 optimizer """adadelta""" +972 3 training_loop """lcwa""" +972 3 evaluator """rankbased""" +972 4 dataset """kinships""" +972 4 model """tucker""" +972 4 loss """crossentropy""" +972 4 regularizer """no""" +972 4 optimizer """adadelta""" +972 4 training_loop """lcwa""" +972 4 evaluator """rankbased""" +972 5 dataset """kinships""" +972 5 model """tucker""" +972 5 loss """crossentropy""" +972 5 regularizer """no""" +972 5 optimizer """adadelta""" +972 5 training_loop """lcwa""" +972 5 evaluator """rankbased""" +972 6 dataset """kinships""" +972 6 model """tucker""" +972 6 loss """crossentropy""" +972 6 regularizer """no""" +972 6 optimizer """adadelta""" +972 6 training_loop """lcwa""" +972 6 evaluator """rankbased""" +972 7 dataset """kinships""" +972 7 model """tucker""" +972 7 loss """crossentropy""" +972 7 regularizer """no""" +972 7 optimizer """adadelta""" +972 7 training_loop """lcwa""" +972 7 evaluator """rankbased""" +972 8 dataset """kinships""" +972 8 model """tucker""" +972 8 loss """crossentropy""" +972 8 regularizer """no""" +972 8 optimizer """adadelta""" +972 8 training_loop """lcwa""" +972 8 evaluator """rankbased""" +972 9 dataset """kinships""" +972 9 model """tucker""" +972 9 loss """crossentropy""" +972 9 regularizer """no""" +972 9 optimizer """adadelta""" +972 9 training_loop """lcwa""" +972 9 evaluator """rankbased""" +972 10 dataset """kinships""" +972 10 model """tucker""" +972 10 loss """crossentropy""" +972 10 regularizer """no""" +972 10 optimizer """adadelta""" +972 10 training_loop """lcwa""" +972 10 evaluator """rankbased""" +972 11 dataset """kinships""" +972 11 model """tucker""" +972 11 loss """crossentropy""" +972 11 regularizer """no""" +972 11 optimizer """adadelta""" +972 11 training_loop """lcwa""" +972 11 evaluator """rankbased""" +972 12 dataset """kinships""" +972 12 model """tucker""" +972 12 loss """crossentropy""" +972 12 regularizer """no""" +972 12 optimizer """adadelta""" +972 12 training_loop """lcwa""" +972 12 evaluator """rankbased""" +972 13 dataset """kinships""" +972 13 model """tucker""" +972 13 loss """crossentropy""" +972 13 regularizer """no""" +972 13 optimizer """adadelta""" +972 13 training_loop """lcwa""" +972 13 evaluator """rankbased""" +972 14 dataset """kinships""" +972 14 model """tucker""" +972 14 loss """crossentropy""" +972 14 regularizer """no""" +972 14 optimizer """adadelta""" +972 14 training_loop """lcwa""" +972 14 evaluator """rankbased""" +972 15 dataset """kinships""" +972 15 model """tucker""" +972 15 loss """crossentropy""" +972 15 regularizer """no""" +972 15 optimizer """adadelta""" +972 15 training_loop """lcwa""" +972 15 evaluator """rankbased""" +972 16 dataset """kinships""" +972 16 model """tucker""" +972 16 loss """crossentropy""" +972 16 regularizer """no""" +972 16 optimizer """adadelta""" +972 16 training_loop """lcwa""" +972 16 evaluator """rankbased""" +972 17 dataset """kinships""" +972 17 model """tucker""" +972 17 loss """crossentropy""" +972 17 regularizer """no""" +972 17 optimizer """adadelta""" +972 17 training_loop """lcwa""" +972 17 evaluator """rankbased""" +972 18 dataset """kinships""" +972 18 model """tucker""" +972 18 loss """crossentropy""" +972 18 regularizer """no""" +972 18 optimizer """adadelta""" +972 18 training_loop """lcwa""" +972 18 evaluator """rankbased""" +972 19 dataset """kinships""" +972 19 model """tucker""" +972 19 loss """crossentropy""" +972 19 regularizer """no""" +972 19 optimizer """adadelta""" +972 19 training_loop """lcwa""" +972 19 evaluator """rankbased""" +972 20 dataset """kinships""" +972 20 model """tucker""" +972 20 loss """crossentropy""" +972 20 regularizer """no""" +972 20 optimizer """adadelta""" +972 20 training_loop """lcwa""" +972 20 evaluator """rankbased""" +972 21 dataset """kinships""" +972 21 model """tucker""" +972 21 loss """crossentropy""" +972 21 regularizer """no""" +972 21 optimizer """adadelta""" +972 21 training_loop """lcwa""" +972 21 evaluator """rankbased""" +972 22 dataset """kinships""" +972 22 model """tucker""" +972 22 loss """crossentropy""" +972 22 regularizer """no""" +972 22 optimizer """adadelta""" +972 22 training_loop """lcwa""" +972 22 evaluator """rankbased""" +972 23 dataset """kinships""" +972 23 model """tucker""" +972 23 loss """crossentropy""" +972 23 regularizer """no""" +972 23 optimizer """adadelta""" +972 23 training_loop """lcwa""" +972 23 evaluator """rankbased""" +972 24 dataset """kinships""" +972 24 model """tucker""" +972 24 loss """crossentropy""" +972 24 regularizer """no""" +972 24 optimizer """adadelta""" +972 24 training_loop """lcwa""" +972 24 evaluator """rankbased""" +972 25 dataset """kinships""" +972 25 model """tucker""" +972 25 loss """crossentropy""" +972 25 regularizer """no""" +972 25 optimizer """adadelta""" +972 25 training_loop """lcwa""" +972 25 evaluator """rankbased""" +972 26 dataset """kinships""" +972 26 model """tucker""" +972 26 loss """crossentropy""" +972 26 regularizer """no""" +972 26 optimizer """adadelta""" +972 26 training_loop """lcwa""" +972 26 evaluator """rankbased""" +972 27 dataset """kinships""" +972 27 model """tucker""" +972 27 loss """crossentropy""" +972 27 regularizer """no""" +972 27 optimizer """adadelta""" +972 27 training_loop """lcwa""" +972 27 evaluator """rankbased""" +972 28 dataset """kinships""" +972 28 model """tucker""" +972 28 loss """crossentropy""" +972 28 regularizer """no""" +972 28 optimizer """adadelta""" +972 28 training_loop """lcwa""" +972 28 evaluator """rankbased""" +972 29 dataset """kinships""" +972 29 model """tucker""" +972 29 loss """crossentropy""" +972 29 regularizer """no""" +972 29 optimizer """adadelta""" +972 29 training_loop """lcwa""" +972 29 evaluator """rankbased""" +972 30 dataset """kinships""" +972 30 model """tucker""" +972 30 loss """crossentropy""" +972 30 regularizer """no""" +972 30 optimizer """adadelta""" +972 30 training_loop """lcwa""" +972 30 evaluator """rankbased""" +972 31 dataset """kinships""" +972 31 model """tucker""" +972 31 loss """crossentropy""" +972 31 regularizer """no""" +972 31 optimizer """adadelta""" +972 31 training_loop """lcwa""" +972 31 evaluator """rankbased""" +972 32 dataset """kinships""" +972 32 model """tucker""" +972 32 loss """crossentropy""" +972 32 regularizer """no""" +972 32 optimizer """adadelta""" +972 32 training_loop """lcwa""" +972 32 evaluator """rankbased""" +972 33 dataset """kinships""" +972 33 model """tucker""" +972 33 loss """crossentropy""" +972 33 regularizer """no""" +972 33 optimizer """adadelta""" +972 33 training_loop """lcwa""" +972 33 evaluator """rankbased""" +972 34 dataset """kinships""" +972 34 model """tucker""" +972 34 loss """crossentropy""" +972 34 regularizer """no""" +972 34 optimizer """adadelta""" +972 34 training_loop """lcwa""" +972 34 evaluator """rankbased""" +972 35 dataset """kinships""" +972 35 model """tucker""" +972 35 loss """crossentropy""" +972 35 regularizer """no""" +972 35 optimizer """adadelta""" +972 35 training_loop """lcwa""" +972 35 evaluator """rankbased""" +972 36 dataset """kinships""" +972 36 model """tucker""" +972 36 loss """crossentropy""" +972 36 regularizer """no""" +972 36 optimizer """adadelta""" +972 36 training_loop """lcwa""" +972 36 evaluator """rankbased""" +972 37 dataset """kinships""" +972 37 model """tucker""" +972 37 loss """crossentropy""" +972 37 regularizer """no""" +972 37 optimizer """adadelta""" +972 37 training_loop """lcwa""" +972 37 evaluator """rankbased""" +972 38 dataset """kinships""" +972 38 model """tucker""" +972 38 loss """crossentropy""" +972 38 regularizer """no""" +972 38 optimizer """adadelta""" +972 38 training_loop """lcwa""" +972 38 evaluator """rankbased""" +972 39 dataset """kinships""" +972 39 model """tucker""" +972 39 loss """crossentropy""" +972 39 regularizer """no""" +972 39 optimizer """adadelta""" +972 39 training_loop """lcwa""" +972 39 evaluator """rankbased""" +972 40 dataset """kinships""" +972 40 model """tucker""" +972 40 loss """crossentropy""" +972 40 regularizer """no""" +972 40 optimizer """adadelta""" +972 40 training_loop """lcwa""" +972 40 evaluator """rankbased""" +972 41 dataset """kinships""" +972 41 model """tucker""" +972 41 loss """crossentropy""" +972 41 regularizer """no""" +972 41 optimizer """adadelta""" +972 41 training_loop """lcwa""" +972 41 evaluator """rankbased""" +972 42 dataset """kinships""" +972 42 model """tucker""" +972 42 loss """crossentropy""" +972 42 regularizer """no""" +972 42 optimizer """adadelta""" +972 42 training_loop """lcwa""" +972 42 evaluator """rankbased""" +972 43 dataset """kinships""" +972 43 model """tucker""" +972 43 loss """crossentropy""" +972 43 regularizer """no""" +972 43 optimizer """adadelta""" +972 43 training_loop """lcwa""" +972 43 evaluator """rankbased""" +972 44 dataset """kinships""" +972 44 model """tucker""" +972 44 loss """crossentropy""" +972 44 regularizer """no""" +972 44 optimizer """adadelta""" +972 44 training_loop """lcwa""" +972 44 evaluator """rankbased""" +972 45 dataset """kinships""" +972 45 model """tucker""" +972 45 loss """crossentropy""" +972 45 regularizer """no""" +972 45 optimizer """adadelta""" +972 45 training_loop """lcwa""" +972 45 evaluator """rankbased""" +972 46 dataset """kinships""" +972 46 model """tucker""" +972 46 loss """crossentropy""" +972 46 regularizer """no""" +972 46 optimizer """adadelta""" +972 46 training_loop """lcwa""" +972 46 evaluator """rankbased""" +972 47 dataset """kinships""" +972 47 model """tucker""" +972 47 loss """crossentropy""" +972 47 regularizer """no""" +972 47 optimizer """adadelta""" +972 47 training_loop """lcwa""" +972 47 evaluator """rankbased""" +972 48 dataset """kinships""" +972 48 model """tucker""" +972 48 loss """crossentropy""" +972 48 regularizer """no""" +972 48 optimizer """adadelta""" +972 48 training_loop """lcwa""" +972 48 evaluator """rankbased""" +972 49 dataset """kinships""" +972 49 model """tucker""" +972 49 loss """crossentropy""" +972 49 regularizer """no""" +972 49 optimizer """adadelta""" +972 49 training_loop """lcwa""" +972 49 evaluator """rankbased""" +972 50 dataset """kinships""" +972 50 model """tucker""" +972 50 loss """crossentropy""" +972 50 regularizer """no""" +972 50 optimizer """adadelta""" +972 50 training_loop """lcwa""" +972 50 evaluator """rankbased""" +972 51 dataset """kinships""" +972 51 model """tucker""" +972 51 loss """crossentropy""" +972 51 regularizer """no""" +972 51 optimizer """adadelta""" +972 51 training_loop """lcwa""" +972 51 evaluator """rankbased""" +972 52 dataset """kinships""" +972 52 model """tucker""" +972 52 loss """crossentropy""" +972 52 regularizer """no""" +972 52 optimizer """adadelta""" +972 52 training_loop """lcwa""" +972 52 evaluator """rankbased""" +972 53 dataset """kinships""" +972 53 model """tucker""" +972 53 loss """crossentropy""" +972 53 regularizer """no""" +972 53 optimizer """adadelta""" +972 53 training_loop """lcwa""" +972 53 evaluator """rankbased""" +972 54 dataset """kinships""" +972 54 model """tucker""" +972 54 loss """crossentropy""" +972 54 regularizer """no""" +972 54 optimizer """adadelta""" +972 54 training_loop """lcwa""" +972 54 evaluator """rankbased""" +972 55 dataset """kinships""" +972 55 model """tucker""" +972 55 loss """crossentropy""" +972 55 regularizer """no""" +972 55 optimizer """adadelta""" +972 55 training_loop """lcwa""" +972 55 evaluator """rankbased""" +972 56 dataset """kinships""" +972 56 model """tucker""" +972 56 loss """crossentropy""" +972 56 regularizer """no""" +972 56 optimizer """adadelta""" +972 56 training_loop """lcwa""" +972 56 evaluator """rankbased""" +972 57 dataset """kinships""" +972 57 model """tucker""" +972 57 loss """crossentropy""" +972 57 regularizer """no""" +972 57 optimizer """adadelta""" +972 57 training_loop """lcwa""" +972 57 evaluator """rankbased""" +972 58 dataset """kinships""" +972 58 model """tucker""" +972 58 loss """crossentropy""" +972 58 regularizer """no""" +972 58 optimizer """adadelta""" +972 58 training_loop """lcwa""" +972 58 evaluator """rankbased""" +972 59 dataset """kinships""" +972 59 model """tucker""" +972 59 loss """crossentropy""" +972 59 regularizer """no""" +972 59 optimizer """adadelta""" +972 59 training_loop """lcwa""" +972 59 evaluator """rankbased""" +972 60 dataset """kinships""" +972 60 model """tucker""" +972 60 loss """crossentropy""" +972 60 regularizer """no""" +972 60 optimizer """adadelta""" +972 60 training_loop """lcwa""" +972 60 evaluator """rankbased""" +972 61 dataset """kinships""" +972 61 model """tucker""" +972 61 loss """crossentropy""" +972 61 regularizer """no""" +972 61 optimizer """adadelta""" +972 61 training_loop """lcwa""" +972 61 evaluator """rankbased""" +972 62 dataset """kinships""" +972 62 model """tucker""" +972 62 loss """crossentropy""" +972 62 regularizer """no""" +972 62 optimizer """adadelta""" +972 62 training_loop """lcwa""" +972 62 evaluator """rankbased""" +972 63 dataset """kinships""" +972 63 model """tucker""" +972 63 loss """crossentropy""" +972 63 regularizer """no""" +972 63 optimizer """adadelta""" +972 63 training_loop """lcwa""" +972 63 evaluator """rankbased""" +972 64 dataset """kinships""" +972 64 model """tucker""" +972 64 loss """crossentropy""" +972 64 regularizer """no""" +972 64 optimizer """adadelta""" +972 64 training_loop """lcwa""" +972 64 evaluator """rankbased""" +972 65 dataset """kinships""" +972 65 model """tucker""" +972 65 loss """crossentropy""" +972 65 regularizer """no""" +972 65 optimizer """adadelta""" +972 65 training_loop """lcwa""" +972 65 evaluator """rankbased""" +972 66 dataset """kinships""" +972 66 model """tucker""" +972 66 loss """crossentropy""" +972 66 regularizer """no""" +972 66 optimizer """adadelta""" +972 66 training_loop """lcwa""" +972 66 evaluator """rankbased""" +972 67 dataset """kinships""" +972 67 model """tucker""" +972 67 loss """crossentropy""" +972 67 regularizer """no""" +972 67 optimizer """adadelta""" +972 67 training_loop """lcwa""" +972 67 evaluator """rankbased""" +972 68 dataset """kinships""" +972 68 model """tucker""" +972 68 loss """crossentropy""" +972 68 regularizer """no""" +972 68 optimizer """adadelta""" +972 68 training_loop """lcwa""" +972 68 evaluator """rankbased""" +972 69 dataset """kinships""" +972 69 model """tucker""" +972 69 loss """crossentropy""" +972 69 regularizer """no""" +972 69 optimizer """adadelta""" +972 69 training_loop """lcwa""" +972 69 evaluator """rankbased""" +972 70 dataset """kinships""" +972 70 model """tucker""" +972 70 loss """crossentropy""" +972 70 regularizer """no""" +972 70 optimizer """adadelta""" +972 70 training_loop """lcwa""" +972 70 evaluator """rankbased""" +972 71 dataset """kinships""" +972 71 model """tucker""" +972 71 loss """crossentropy""" +972 71 regularizer """no""" +972 71 optimizer """adadelta""" +972 71 training_loop """lcwa""" +972 71 evaluator """rankbased""" +972 72 dataset """kinships""" +972 72 model """tucker""" +972 72 loss """crossentropy""" +972 72 regularizer """no""" +972 72 optimizer """adadelta""" +972 72 training_loop """lcwa""" +972 72 evaluator """rankbased""" +972 73 dataset """kinships""" +972 73 model """tucker""" +972 73 loss """crossentropy""" +972 73 regularizer """no""" +972 73 optimizer """adadelta""" +972 73 training_loop """lcwa""" +972 73 evaluator """rankbased""" +972 74 dataset """kinships""" +972 74 model """tucker""" +972 74 loss """crossentropy""" +972 74 regularizer """no""" +972 74 optimizer """adadelta""" +972 74 training_loop """lcwa""" +972 74 evaluator """rankbased""" +972 75 dataset """kinships""" +972 75 model """tucker""" +972 75 loss """crossentropy""" +972 75 regularizer """no""" +972 75 optimizer """adadelta""" +972 75 training_loop """lcwa""" +972 75 evaluator """rankbased""" +972 76 dataset """kinships""" +972 76 model """tucker""" +972 76 loss """crossentropy""" +972 76 regularizer """no""" +972 76 optimizer """adadelta""" +972 76 training_loop """lcwa""" +972 76 evaluator """rankbased""" +972 77 dataset """kinships""" +972 77 model """tucker""" +972 77 loss """crossentropy""" +972 77 regularizer """no""" +972 77 optimizer """adadelta""" +972 77 training_loop """lcwa""" +972 77 evaluator """rankbased""" +972 78 dataset """kinships""" +972 78 model """tucker""" +972 78 loss """crossentropy""" +972 78 regularizer """no""" +972 78 optimizer """adadelta""" +972 78 training_loop """lcwa""" +972 78 evaluator """rankbased""" +972 79 dataset """kinships""" +972 79 model """tucker""" +972 79 loss """crossentropy""" +972 79 regularizer """no""" +972 79 optimizer """adadelta""" +972 79 training_loop """lcwa""" +972 79 evaluator """rankbased""" +972 80 dataset """kinships""" +972 80 model """tucker""" +972 80 loss """crossentropy""" +972 80 regularizer """no""" +972 80 optimizer """adadelta""" +972 80 training_loop """lcwa""" +972 80 evaluator """rankbased""" +972 81 dataset """kinships""" +972 81 model """tucker""" +972 81 loss """crossentropy""" +972 81 regularizer """no""" +972 81 optimizer """adadelta""" +972 81 training_loop """lcwa""" +972 81 evaluator """rankbased""" +972 82 dataset """kinships""" +972 82 model """tucker""" +972 82 loss """crossentropy""" +972 82 regularizer """no""" +972 82 optimizer """adadelta""" +972 82 training_loop """lcwa""" +972 82 evaluator """rankbased""" +972 83 dataset """kinships""" +972 83 model """tucker""" +972 83 loss """crossentropy""" +972 83 regularizer """no""" +972 83 optimizer """adadelta""" +972 83 training_loop """lcwa""" +972 83 evaluator """rankbased""" +972 84 dataset """kinships""" +972 84 model """tucker""" +972 84 loss """crossentropy""" +972 84 regularizer """no""" +972 84 optimizer """adadelta""" +972 84 training_loop """lcwa""" +972 84 evaluator """rankbased""" +972 85 dataset """kinships""" +972 85 model """tucker""" +972 85 loss """crossentropy""" +972 85 regularizer """no""" +972 85 optimizer """adadelta""" +972 85 training_loop """lcwa""" +972 85 evaluator """rankbased""" +972 86 dataset """kinships""" +972 86 model """tucker""" +972 86 loss """crossentropy""" +972 86 regularizer """no""" +972 86 optimizer """adadelta""" +972 86 training_loop """lcwa""" +972 86 evaluator """rankbased""" +972 87 dataset """kinships""" +972 87 model """tucker""" +972 87 loss """crossentropy""" +972 87 regularizer """no""" +972 87 optimizer """adadelta""" +972 87 training_loop """lcwa""" +972 87 evaluator """rankbased""" +972 88 dataset """kinships""" +972 88 model """tucker""" +972 88 loss """crossentropy""" +972 88 regularizer """no""" +972 88 optimizer """adadelta""" +972 88 training_loop """lcwa""" +972 88 evaluator """rankbased""" +972 89 dataset """kinships""" +972 89 model """tucker""" +972 89 loss """crossentropy""" +972 89 regularizer """no""" +972 89 optimizer """adadelta""" +972 89 training_loop """lcwa""" +972 89 evaluator """rankbased""" +972 90 dataset """kinships""" +972 90 model """tucker""" +972 90 loss """crossentropy""" +972 90 regularizer """no""" +972 90 optimizer """adadelta""" +972 90 training_loop """lcwa""" +972 90 evaluator """rankbased""" +972 91 dataset """kinships""" +972 91 model """tucker""" +972 91 loss """crossentropy""" +972 91 regularizer """no""" +972 91 optimizer """adadelta""" +972 91 training_loop """lcwa""" +972 91 evaluator """rankbased""" +972 92 dataset """kinships""" +972 92 model """tucker""" +972 92 loss """crossentropy""" +972 92 regularizer """no""" +972 92 optimizer """adadelta""" +972 92 training_loop """lcwa""" +972 92 evaluator """rankbased""" +972 93 dataset """kinships""" +972 93 model """tucker""" +972 93 loss """crossentropy""" +972 93 regularizer """no""" +972 93 optimizer """adadelta""" +972 93 training_loop """lcwa""" +972 93 evaluator """rankbased""" +972 94 dataset """kinships""" +972 94 model """tucker""" +972 94 loss """crossentropy""" +972 94 regularizer """no""" +972 94 optimizer """adadelta""" +972 94 training_loop """lcwa""" +972 94 evaluator """rankbased""" +972 95 dataset """kinships""" +972 95 model """tucker""" +972 95 loss """crossentropy""" +972 95 regularizer """no""" +972 95 optimizer """adadelta""" +972 95 training_loop """lcwa""" +972 95 evaluator """rankbased""" +972 96 dataset """kinships""" +972 96 model """tucker""" +972 96 loss """crossentropy""" +972 96 regularizer """no""" +972 96 optimizer """adadelta""" +972 96 training_loop """lcwa""" +972 96 evaluator """rankbased""" +972 97 dataset """kinships""" +972 97 model """tucker""" +972 97 loss """crossentropy""" +972 97 regularizer """no""" +972 97 optimizer """adadelta""" +972 97 training_loop """lcwa""" +972 97 evaluator """rankbased""" +972 98 dataset """kinships""" +972 98 model """tucker""" +972 98 loss """crossentropy""" +972 98 regularizer """no""" +972 98 optimizer """adadelta""" +972 98 training_loop """lcwa""" +972 98 evaluator """rankbased""" +972 99 dataset """kinships""" +972 99 model """tucker""" +972 99 loss """crossentropy""" +972 99 regularizer """no""" +972 99 optimizer """adadelta""" +972 99 training_loop """lcwa""" +972 99 evaluator """rankbased""" +972 100 dataset """kinships""" +972 100 model """tucker""" +972 100 loss """crossentropy""" +972 100 regularizer """no""" +972 100 optimizer """adadelta""" +972 100 training_loop """lcwa""" +972 100 evaluator """rankbased""" +973 1 model.embedding_dim 2.0 +973 1 model.relation_dim 2.0 +973 1 model.dropout_0 0.18121391864115843 +973 1 model.dropout_1 0.3537703211777319 +973 1 model.dropout_2 0.23015469689662704 +973 1 optimizer.lr 0.03737242674633722 +973 1 training.batch_size 0.0 +973 1 training.label_smoothing 0.002812725138473479 +973 2 model.embedding_dim 1.0 +973 2 model.relation_dim 1.0 +973 2 model.dropout_0 0.2698103377246718 +973 2 model.dropout_1 0.25802604223280107 +973 2 model.dropout_2 0.11879007496789092 +973 2 optimizer.lr 0.0034398889871663854 +973 2 training.batch_size 2.0 +973 2 training.label_smoothing 0.4286952387669243 +973 3 model.embedding_dim 2.0 +973 3 model.relation_dim 1.0 +973 3 model.dropout_0 0.19597198298133256 +973 3 model.dropout_1 0.49096390501339615 +973 3 model.dropout_2 0.42641489023811274 +973 3 optimizer.lr 0.007139043332614437 +973 3 training.batch_size 1.0 +973 3 training.label_smoothing 0.001774916090873169 +973 4 model.embedding_dim 2.0 +973 4 model.relation_dim 0.0 +973 4 model.dropout_0 0.464596802275663 +973 4 model.dropout_1 0.39158953005745106 +973 4 model.dropout_2 0.49393070928189453 +973 4 optimizer.lr 0.01754705125193846 +973 4 training.batch_size 2.0 +973 4 training.label_smoothing 0.5161163334877175 +973 5 model.embedding_dim 0.0 +973 5 model.relation_dim 2.0 +973 5 model.dropout_0 0.24383973184655658 +973 5 model.dropout_1 0.3068730035458201 +973 5 model.dropout_2 0.3600819285084474 +973 5 optimizer.lr 0.0014261172974133892 +973 5 training.batch_size 1.0 +973 5 training.label_smoothing 0.21931449773327472 +973 6 model.embedding_dim 1.0 +973 6 model.relation_dim 2.0 +973 6 model.dropout_0 0.1691080671931518 +973 6 model.dropout_1 0.22520808554149535 +973 6 model.dropout_2 0.495168922611026 +973 6 optimizer.lr 0.04073346859015267 +973 6 training.batch_size 0.0 +973 6 training.label_smoothing 0.008420120205822442 +973 7 model.embedding_dim 1.0 +973 7 model.relation_dim 1.0 +973 7 model.dropout_0 0.1056575299440607 +973 7 model.dropout_1 0.3575745767813042 +973 7 model.dropout_2 0.33311975807843713 +973 7 optimizer.lr 0.08098363878525645 +973 7 training.batch_size 1.0 +973 7 training.label_smoothing 0.4080230148602306 +973 8 model.embedding_dim 2.0 +973 8 model.relation_dim 2.0 +973 8 model.dropout_0 0.31233330657526065 +973 8 model.dropout_1 0.2991060295055967 +973 8 model.dropout_2 0.404884283801166 +973 8 optimizer.lr 0.05540493277169855 +973 8 training.batch_size 0.0 +973 8 training.label_smoothing 0.002936609409591133 +973 9 model.embedding_dim 1.0 +973 9 model.relation_dim 2.0 +973 9 model.dropout_0 0.22734260798234543 +973 9 model.dropout_1 0.42120005955636114 +973 9 model.dropout_2 0.1125162066109577 +973 9 optimizer.lr 0.008331901601552047 +973 9 training.batch_size 1.0 +973 9 training.label_smoothing 0.013262853127895503 +973 10 model.embedding_dim 0.0 +973 10 model.relation_dim 2.0 +973 10 model.dropout_0 0.29642938743310715 +973 10 model.dropout_1 0.2567396414028473 +973 10 model.dropout_2 0.47047198155118536 +973 10 optimizer.lr 0.0017051014853753986 +973 10 training.batch_size 2.0 +973 10 training.label_smoothing 0.0034557473230560822 +973 11 model.embedding_dim 1.0 +973 11 model.relation_dim 0.0 +973 11 model.dropout_0 0.47979268165073025 +973 11 model.dropout_1 0.49548474915575996 +973 11 model.dropout_2 0.15584686334565437 +973 11 optimizer.lr 0.02229164889514297 +973 11 training.batch_size 0.0 +973 11 training.label_smoothing 0.1342829175111403 +973 12 model.embedding_dim 0.0 +973 12 model.relation_dim 1.0 +973 12 model.dropout_0 0.28834354125668554 +973 12 model.dropout_1 0.25094123523614836 +973 12 model.dropout_2 0.3805370857629595 +973 12 optimizer.lr 0.09016005982563693 +973 12 training.batch_size 0.0 +973 12 training.label_smoothing 0.029934975203962225 +973 13 model.embedding_dim 0.0 +973 13 model.relation_dim 0.0 +973 13 model.dropout_0 0.18570230371985574 +973 13 model.dropout_1 0.48896298220262624 +973 13 model.dropout_2 0.21443980314621955 +973 13 optimizer.lr 0.0013900124595726548 +973 13 training.batch_size 0.0 +973 13 training.label_smoothing 0.0011048399699553934 +973 14 model.embedding_dim 2.0 +973 14 model.relation_dim 0.0 +973 14 model.dropout_0 0.4219796373601037 +973 14 model.dropout_1 0.459538290062411 +973 14 model.dropout_2 0.2535666247516391 +973 14 optimizer.lr 0.03403811646507772 +973 14 training.batch_size 0.0 +973 14 training.label_smoothing 0.04447887471952347 +973 15 model.embedding_dim 1.0 +973 15 model.relation_dim 2.0 +973 15 model.dropout_0 0.48496284209082896 +973 15 model.dropout_1 0.4677410443354797 +973 15 model.dropout_2 0.31039908700107005 +973 15 optimizer.lr 0.006213454980859535 +973 15 training.batch_size 0.0 +973 15 training.label_smoothing 0.07691387380138588 +973 16 model.embedding_dim 1.0 +973 16 model.relation_dim 1.0 +973 16 model.dropout_0 0.3891740690989323 +973 16 model.dropout_1 0.29256589250320286 +973 16 model.dropout_2 0.24966365473724572 +973 16 optimizer.lr 0.008084243194138028 +973 16 training.batch_size 1.0 +973 16 training.label_smoothing 0.003199925840316334 +973 17 model.embedding_dim 2.0 +973 17 model.relation_dim 1.0 +973 17 model.dropout_0 0.2562422631942554 +973 17 model.dropout_1 0.21268555528503985 +973 17 model.dropout_2 0.303874574796255 +973 17 optimizer.lr 0.0020493714580558657 +973 17 training.batch_size 2.0 +973 17 training.label_smoothing 0.0010777680725454514 +973 18 model.embedding_dim 0.0 +973 18 model.relation_dim 0.0 +973 18 model.dropout_0 0.42404868583313143 +973 18 model.dropout_1 0.25578145923340884 +973 18 model.dropout_2 0.20504378100995968 +973 18 optimizer.lr 0.07444233844146932 +973 18 training.batch_size 0.0 +973 18 training.label_smoothing 0.0030069707955425475 +973 19 model.embedding_dim 0.0 +973 19 model.relation_dim 2.0 +973 19 model.dropout_0 0.18653832514992819 +973 19 model.dropout_1 0.2670211254915647 +973 19 model.dropout_2 0.44568519953559704 +973 19 optimizer.lr 0.0038371015613701837 +973 19 training.batch_size 0.0 +973 19 training.label_smoothing 0.005509865538067162 +973 20 model.embedding_dim 1.0 +973 20 model.relation_dim 0.0 +973 20 model.dropout_0 0.41102545093940107 +973 20 model.dropout_1 0.46787068949721444 +973 20 model.dropout_2 0.4630947774806668 +973 20 optimizer.lr 0.021474633921620714 +973 20 training.batch_size 0.0 +973 20 training.label_smoothing 0.005567025999616407 +973 21 model.embedding_dim 0.0 +973 21 model.relation_dim 1.0 +973 21 model.dropout_0 0.48896275308638515 +973 21 model.dropout_1 0.1442895522055266 +973 21 model.dropout_2 0.18757100026929774 +973 21 optimizer.lr 0.0029771360858427084 +973 21 training.batch_size 0.0 +973 21 training.label_smoothing 0.635965609214288 +973 22 model.embedding_dim 2.0 +973 22 model.relation_dim 2.0 +973 22 model.dropout_0 0.24743413428458882 +973 22 model.dropout_1 0.14660059263847397 +973 22 model.dropout_2 0.1631210428417684 +973 22 optimizer.lr 0.02167952792171221 +973 22 training.batch_size 0.0 +973 22 training.label_smoothing 0.3358504165539937 +973 23 model.embedding_dim 0.0 +973 23 model.relation_dim 1.0 +973 23 model.dropout_0 0.21104506087589137 +973 23 model.dropout_1 0.30246080075322906 +973 23 model.dropout_2 0.3997339893823446 +973 23 optimizer.lr 0.002880232323512212 +973 23 training.batch_size 1.0 +973 23 training.label_smoothing 0.05159514888630165 +973 24 model.embedding_dim 1.0 +973 24 model.relation_dim 1.0 +973 24 model.dropout_0 0.3353659799163556 +973 24 model.dropout_1 0.1697594202448358 +973 24 model.dropout_2 0.34560154021717404 +973 24 optimizer.lr 0.026612314528869797 +973 24 training.batch_size 1.0 +973 24 training.label_smoothing 0.0014472509224971119 +973 25 model.embedding_dim 2.0 +973 25 model.relation_dim 2.0 +973 25 model.dropout_0 0.38897736846828607 +973 25 model.dropout_1 0.1953904195452744 +973 25 model.dropout_2 0.16271838168247965 +973 25 optimizer.lr 0.004114833473262558 +973 25 training.batch_size 2.0 +973 25 training.label_smoothing 0.0029293862865373862 +973 26 model.embedding_dim 1.0 +973 26 model.relation_dim 0.0 +973 26 model.dropout_0 0.4498300104961071 +973 26 model.dropout_1 0.11587226253441144 +973 26 model.dropout_2 0.16261167925212583 +973 26 optimizer.lr 0.003122867827606199 +973 26 training.batch_size 0.0 +973 26 training.label_smoothing 0.02949150938760455 +973 27 model.embedding_dim 1.0 +973 27 model.relation_dim 2.0 +973 27 model.dropout_0 0.22138970579422934 +973 27 model.dropout_1 0.4184051859489128 +973 27 model.dropout_2 0.1131122781014689 +973 27 optimizer.lr 0.01045393274061927 +973 27 training.batch_size 1.0 +973 27 training.label_smoothing 0.12523436040531363 +973 28 model.embedding_dim 1.0 +973 28 model.relation_dim 1.0 +973 28 model.dropout_0 0.3422381349043475 +973 28 model.dropout_1 0.49753624525509554 +973 28 model.dropout_2 0.15841818706185187 +973 28 optimizer.lr 0.01734306084402533 +973 28 training.batch_size 1.0 +973 28 training.label_smoothing 0.06053603562538853 +973 29 model.embedding_dim 1.0 +973 29 model.relation_dim 2.0 +973 29 model.dropout_0 0.44596715787481334 +973 29 model.dropout_1 0.10489675122220513 +973 29 model.dropout_2 0.24161868915620072 +973 29 optimizer.lr 0.05383523833922968 +973 29 training.batch_size 1.0 +973 29 training.label_smoothing 0.006336187936754725 +973 30 model.embedding_dim 2.0 +973 30 model.relation_dim 2.0 +973 30 model.dropout_0 0.4302521095740104 +973 30 model.dropout_1 0.2827222462498847 +973 30 model.dropout_2 0.20928938951741927 +973 30 optimizer.lr 0.020843678554657883 +973 30 training.batch_size 2.0 +973 30 training.label_smoothing 0.03770738348915936 +973 31 model.embedding_dim 2.0 +973 31 model.relation_dim 2.0 +973 31 model.dropout_0 0.21131821574241383 +973 31 model.dropout_1 0.2576987487230686 +973 31 model.dropout_2 0.2758636553647382 +973 31 optimizer.lr 0.004327876775469225 +973 31 training.batch_size 1.0 +973 31 training.label_smoothing 0.001211426071924939 +973 32 model.embedding_dim 1.0 +973 32 model.relation_dim 1.0 +973 32 model.dropout_0 0.21284710915094296 +973 32 model.dropout_1 0.47413032998767973 +973 32 model.dropout_2 0.1927940955352089 +973 32 optimizer.lr 0.07294736829090355 +973 32 training.batch_size 2.0 +973 32 training.label_smoothing 0.0014985952415027615 +973 33 model.embedding_dim 0.0 +973 33 model.relation_dim 0.0 +973 33 model.dropout_0 0.14804668075084032 +973 33 model.dropout_1 0.2208022315814951 +973 33 model.dropout_2 0.12635983434263381 +973 33 optimizer.lr 0.04400425366378013 +973 33 training.batch_size 1.0 +973 33 training.label_smoothing 0.0014329286780273012 +973 34 model.embedding_dim 2.0 +973 34 model.relation_dim 1.0 +973 34 model.dropout_0 0.151947969011338 +973 34 model.dropout_1 0.2073770463376953 +973 34 model.dropout_2 0.34164832136099643 +973 34 optimizer.lr 0.05124066641846939 +973 34 training.batch_size 2.0 +973 34 training.label_smoothing 0.1535034955538075 +973 35 model.embedding_dim 1.0 +973 35 model.relation_dim 0.0 +973 35 model.dropout_0 0.365032441773101 +973 35 model.dropout_1 0.26245292325834013 +973 35 model.dropout_2 0.1035084948219756 +973 35 optimizer.lr 0.06621715988735302 +973 35 training.batch_size 2.0 +973 35 training.label_smoothing 0.025124810817545567 +973 36 model.embedding_dim 2.0 +973 36 model.relation_dim 1.0 +973 36 model.dropout_0 0.3557968325158404 +973 36 model.dropout_1 0.34983556817916844 +973 36 model.dropout_2 0.24053299286785867 +973 36 optimizer.lr 0.08351727730378826 +973 36 training.batch_size 0.0 +973 36 training.label_smoothing 0.012688777132897363 +973 37 model.embedding_dim 1.0 +973 37 model.relation_dim 1.0 +973 37 model.dropout_0 0.4035009778840195 +973 37 model.dropout_1 0.3714875068368697 +973 37 model.dropout_2 0.2522487535316425 +973 37 optimizer.lr 0.0638211323850506 +973 37 training.batch_size 0.0 +973 37 training.label_smoothing 0.012930764512899484 +973 38 model.embedding_dim 2.0 +973 38 model.relation_dim 0.0 +973 38 model.dropout_0 0.21395762814431613 +973 38 model.dropout_1 0.1358772010689949 +973 38 model.dropout_2 0.15110038944573692 +973 38 optimizer.lr 0.05685554271896095 +973 38 training.batch_size 1.0 +973 38 training.label_smoothing 0.02057638179366055 +973 39 model.embedding_dim 2.0 +973 39 model.relation_dim 1.0 +973 39 model.dropout_0 0.2978632484662784 +973 39 model.dropout_1 0.13013213489737718 +973 39 model.dropout_2 0.14520232857277843 +973 39 optimizer.lr 0.0012869790990836809 +973 39 training.batch_size 2.0 +973 39 training.label_smoothing 0.17581082058531758 +973 40 model.embedding_dim 2.0 +973 40 model.relation_dim 0.0 +973 40 model.dropout_0 0.3881249990673686 +973 40 model.dropout_1 0.323408343775889 +973 40 model.dropout_2 0.33638106681292723 +973 40 optimizer.lr 0.002034880723903848 +973 40 training.batch_size 1.0 +973 40 training.label_smoothing 0.0020779158862311354 +973 41 model.embedding_dim 0.0 +973 41 model.relation_dim 2.0 +973 41 model.dropout_0 0.2733601207479402 +973 41 model.dropout_1 0.28456987389370303 +973 41 model.dropout_2 0.45215460392024687 +973 41 optimizer.lr 0.011584389642293245 +973 41 training.batch_size 1.0 +973 41 training.label_smoothing 0.021377958721318875 +973 42 model.embedding_dim 2.0 +973 42 model.relation_dim 2.0 +973 42 model.dropout_0 0.48695406821804715 +973 42 model.dropout_1 0.27666987893988676 +973 42 model.dropout_2 0.41341688303036783 +973 42 optimizer.lr 0.005459061922171298 +973 42 training.batch_size 2.0 +973 42 training.label_smoothing 0.007438458711835446 +973 43 model.embedding_dim 2.0 +973 43 model.relation_dim 2.0 +973 43 model.dropout_0 0.38172835559942864 +973 43 model.dropout_1 0.18460759445290975 +973 43 model.dropout_2 0.24515509096133312 +973 43 optimizer.lr 0.020899751643400456 +973 43 training.batch_size 2.0 +973 43 training.label_smoothing 0.006076423455500246 +973 44 model.embedding_dim 1.0 +973 44 model.relation_dim 0.0 +973 44 model.dropout_0 0.3863459312438069 +973 44 model.dropout_1 0.11948832219960322 +973 44 model.dropout_2 0.3718459191999015 +973 44 optimizer.lr 0.02805012782430863 +973 44 training.batch_size 2.0 +973 44 training.label_smoothing 0.0011373180975784605 +973 45 model.embedding_dim 0.0 +973 45 model.relation_dim 1.0 +973 45 model.dropout_0 0.4965356786007704 +973 45 model.dropout_1 0.17712460136438357 +973 45 model.dropout_2 0.10753469492196382 +973 45 optimizer.lr 0.0013276047384219378 +973 45 training.batch_size 0.0 +973 45 training.label_smoothing 0.12852631392314667 +973 46 model.embedding_dim 0.0 +973 46 model.relation_dim 2.0 +973 46 model.dropout_0 0.43433645906793256 +973 46 model.dropout_1 0.32714365020679204 +973 46 model.dropout_2 0.20376534989858422 +973 46 optimizer.lr 0.0064240448290822875 +973 46 training.batch_size 2.0 +973 46 training.label_smoothing 0.003027978533256547 +973 47 model.embedding_dim 1.0 +973 47 model.relation_dim 1.0 +973 47 model.dropout_0 0.2747282368671257 +973 47 model.dropout_1 0.28990060564769066 +973 47 model.dropout_2 0.47775069825830296 +973 47 optimizer.lr 0.07247717281745455 +973 47 training.batch_size 2.0 +973 47 training.label_smoothing 0.0029882958171910247 +973 48 model.embedding_dim 1.0 +973 48 model.relation_dim 1.0 +973 48 model.dropout_0 0.26996997617547397 +973 48 model.dropout_1 0.15686006357541504 +973 48 model.dropout_2 0.31643005816673586 +973 48 optimizer.lr 0.07676321573189383 +973 48 training.batch_size 2.0 +973 48 training.label_smoothing 0.07344167175601314 +973 49 model.embedding_dim 2.0 +973 49 model.relation_dim 1.0 +973 49 model.dropout_0 0.2010994524725374 +973 49 model.dropout_1 0.29186172850579883 +973 49 model.dropout_2 0.16608128404133043 +973 49 optimizer.lr 0.004569431889041098 +973 49 training.batch_size 0.0 +973 49 training.label_smoothing 0.026668469918086718 +973 50 model.embedding_dim 2.0 +973 50 model.relation_dim 0.0 +973 50 model.dropout_0 0.3570963556380709 +973 50 model.dropout_1 0.3759727240744473 +973 50 model.dropout_2 0.1456418783114309 +973 50 optimizer.lr 0.010189623493524357 +973 50 training.batch_size 0.0 +973 50 training.label_smoothing 0.049914146767894976 +973 51 model.embedding_dim 1.0 +973 51 model.relation_dim 1.0 +973 51 model.dropout_0 0.362371704203938 +973 51 model.dropout_1 0.43990872329539166 +973 51 model.dropout_2 0.42405504023668894 +973 51 optimizer.lr 0.09937465337662436 +973 51 training.batch_size 2.0 +973 51 training.label_smoothing 0.006101723131790391 +973 52 model.embedding_dim 1.0 +973 52 model.relation_dim 2.0 +973 52 model.dropout_0 0.38409664634481744 +973 52 model.dropout_1 0.3475263235684969 +973 52 model.dropout_2 0.12038364924457344 +973 52 optimizer.lr 0.003299194373323959 +973 52 training.batch_size 0.0 +973 52 training.label_smoothing 0.0050602632161434805 +973 53 model.embedding_dim 2.0 +973 53 model.relation_dim 2.0 +973 53 model.dropout_0 0.48187847930490774 +973 53 model.dropout_1 0.47089068915985527 +973 53 model.dropout_2 0.4005381948932115 +973 53 optimizer.lr 0.002698136647116358 +973 53 training.batch_size 2.0 +973 53 training.label_smoothing 0.006600805699049675 +973 54 model.embedding_dim 2.0 +973 54 model.relation_dim 2.0 +973 54 model.dropout_0 0.45037436866021596 +973 54 model.dropout_1 0.3770235997097999 +973 54 model.dropout_2 0.30493177361285223 +973 54 optimizer.lr 0.05702399144437903 +973 54 training.batch_size 2.0 +973 54 training.label_smoothing 0.4222480482563077 +973 55 model.embedding_dim 0.0 +973 55 model.relation_dim 0.0 +973 55 model.dropout_0 0.1296981883940521 +973 55 model.dropout_1 0.2613196638559768 +973 55 model.dropout_2 0.4445102596728738 +973 55 optimizer.lr 0.0021180894962927895 +973 55 training.batch_size 1.0 +973 55 training.label_smoothing 0.023157197207135063 +973 56 model.embedding_dim 1.0 +973 56 model.relation_dim 2.0 +973 56 model.dropout_0 0.3737901801189034 +973 56 model.dropout_1 0.33360395963729383 +973 56 model.dropout_2 0.4351090652667113 +973 56 optimizer.lr 0.01923516366978302 +973 56 training.batch_size 0.0 +973 56 training.label_smoothing 0.32784920791549743 +973 57 model.embedding_dim 1.0 +973 57 model.relation_dim 2.0 +973 57 model.dropout_0 0.41710718141617853 +973 57 model.dropout_1 0.37491770765987753 +973 57 model.dropout_2 0.2586301261563069 +973 57 optimizer.lr 0.021374951125911555 +973 57 training.batch_size 1.0 +973 57 training.label_smoothing 0.3238800231876369 +973 58 model.embedding_dim 0.0 +973 58 model.relation_dim 2.0 +973 58 model.dropout_0 0.19720955193369294 +973 58 model.dropout_1 0.4484186877789119 +973 58 model.dropout_2 0.10830541453569698 +973 58 optimizer.lr 0.004037522718892649 +973 58 training.batch_size 0.0 +973 58 training.label_smoothing 0.24256576036755836 +973 59 model.embedding_dim 1.0 +973 59 model.relation_dim 0.0 +973 59 model.dropout_0 0.2061444823228223 +973 59 model.dropout_1 0.2931509158238283 +973 59 model.dropout_2 0.19834442232796326 +973 59 optimizer.lr 0.030002700646644787 +973 59 training.batch_size 0.0 +973 59 training.label_smoothing 0.2794341522955933 +973 60 model.embedding_dim 0.0 +973 60 model.relation_dim 1.0 +973 60 model.dropout_0 0.43448874102409163 +973 60 model.dropout_1 0.28632043025347126 +973 60 model.dropout_2 0.18364325634141732 +973 60 optimizer.lr 0.01856098229195795 +973 60 training.batch_size 1.0 +973 60 training.label_smoothing 0.3267923479347523 +973 61 model.embedding_dim 2.0 +973 61 model.relation_dim 0.0 +973 61 model.dropout_0 0.25748232169460117 +973 61 model.dropout_1 0.42946896435497606 +973 61 model.dropout_2 0.28287370472965073 +973 61 optimizer.lr 0.0443164942826773 +973 61 training.batch_size 0.0 +973 61 training.label_smoothing 0.023924063724244157 +973 62 model.embedding_dim 0.0 +973 62 model.relation_dim 0.0 +973 62 model.dropout_0 0.25079782478361995 +973 62 model.dropout_1 0.30378315474833906 +973 62 model.dropout_2 0.34493073881460895 +973 62 optimizer.lr 0.03336747188880233 +973 62 training.batch_size 0.0 +973 62 training.label_smoothing 0.01220057638046201 +973 63 model.embedding_dim 2.0 +973 63 model.relation_dim 1.0 +973 63 model.dropout_0 0.26976937126854605 +973 63 model.dropout_1 0.3531760586673861 +973 63 model.dropout_2 0.28886525672287655 +973 63 optimizer.lr 0.01359536165798036 +973 63 training.batch_size 0.0 +973 63 training.label_smoothing 0.844862546894371 +973 64 model.embedding_dim 2.0 +973 64 model.relation_dim 2.0 +973 64 model.dropout_0 0.1565128231841984 +973 64 model.dropout_1 0.37554000939672405 +973 64 model.dropout_2 0.4784519946791909 +973 64 optimizer.lr 0.00798839137045011 +973 64 training.batch_size 0.0 +973 64 training.label_smoothing 0.009423115560021934 +973 65 model.embedding_dim 0.0 +973 65 model.relation_dim 1.0 +973 65 model.dropout_0 0.25759359960512845 +973 65 model.dropout_1 0.49021998559561686 +973 65 model.dropout_2 0.4486358876699513 +973 65 optimizer.lr 0.0014153819347900037 +973 65 training.batch_size 0.0 +973 65 training.label_smoothing 0.49724882309015117 +973 66 model.embedding_dim 0.0 +973 66 model.relation_dim 2.0 +973 66 model.dropout_0 0.32784156366982004 +973 66 model.dropout_1 0.16334365412989305 +973 66 model.dropout_2 0.2550667028265025 +973 66 optimizer.lr 0.0070426942170710325 +973 66 training.batch_size 2.0 +973 66 training.label_smoothing 0.08605395237182609 +973 67 model.embedding_dim 1.0 +973 67 model.relation_dim 2.0 +973 67 model.dropout_0 0.4795740693863485 +973 67 model.dropout_1 0.3407026902274658 +973 67 model.dropout_2 0.42589550555384514 +973 67 optimizer.lr 0.007167102425418015 +973 67 training.batch_size 0.0 +973 67 training.label_smoothing 0.05980769932462369 +973 68 model.embedding_dim 2.0 +973 68 model.relation_dim 1.0 +973 68 model.dropout_0 0.11841160118122139 +973 68 model.dropout_1 0.4069303654438494 +973 68 model.dropout_2 0.1756792408009861 +973 68 optimizer.lr 0.003257139925795008 +973 68 training.batch_size 2.0 +973 68 training.label_smoothing 0.10918501253822968 +973 69 model.embedding_dim 0.0 +973 69 model.relation_dim 1.0 +973 69 model.dropout_0 0.3415653049026234 +973 69 model.dropout_1 0.3947196738536844 +973 69 model.dropout_2 0.29558809484435383 +973 69 optimizer.lr 0.030247948027207556 +973 69 training.batch_size 2.0 +973 69 training.label_smoothing 0.09200618172164886 +973 70 model.embedding_dim 2.0 +973 70 model.relation_dim 0.0 +973 70 model.dropout_0 0.19256325491779896 +973 70 model.dropout_1 0.35371647024752506 +973 70 model.dropout_2 0.19926978628787434 +973 70 optimizer.lr 0.003376473814316555 +973 70 training.batch_size 1.0 +973 70 training.label_smoothing 0.41880872512796874 +973 71 model.embedding_dim 0.0 +973 71 model.relation_dim 2.0 +973 71 model.dropout_0 0.37082160110253526 +973 71 model.dropout_1 0.4758762435894667 +973 71 model.dropout_2 0.2892878192091722 +973 71 optimizer.lr 0.0071462733478668645 +973 71 training.batch_size 1.0 +973 71 training.label_smoothing 0.004242342679337742 +973 72 model.embedding_dim 0.0 +973 72 model.relation_dim 2.0 +973 72 model.dropout_0 0.24911438753067416 +973 72 model.dropout_1 0.4349053957606512 +973 72 model.dropout_2 0.23408327617377675 +973 72 optimizer.lr 0.028212679505727336 +973 72 training.batch_size 1.0 +973 72 training.label_smoothing 0.9917848088046582 +973 73 model.embedding_dim 0.0 +973 73 model.relation_dim 0.0 +973 73 model.dropout_0 0.345211289535541 +973 73 model.dropout_1 0.3858833644804264 +973 73 model.dropout_2 0.44365930101175943 +973 73 optimizer.lr 0.005920831348215782 +973 73 training.batch_size 2.0 +973 73 training.label_smoothing 0.0031283143853457324 +973 74 model.embedding_dim 0.0 +973 74 model.relation_dim 2.0 +973 74 model.dropout_0 0.19720657900917415 +973 74 model.dropout_1 0.35049936636619927 +973 74 model.dropout_2 0.3736174031619294 +973 74 optimizer.lr 0.001601407020884144 +973 74 training.batch_size 2.0 +973 74 training.label_smoothing 0.005889830412290659 +973 75 model.embedding_dim 2.0 +973 75 model.relation_dim 1.0 +973 75 model.dropout_0 0.3058847314394717 +973 75 model.dropout_1 0.16498202257420755 +973 75 model.dropout_2 0.14191862315867768 +973 75 optimizer.lr 0.016919707186931834 +973 75 training.batch_size 0.0 +973 75 training.label_smoothing 0.7295392952309658 +973 76 model.embedding_dim 1.0 +973 76 model.relation_dim 2.0 +973 76 model.dropout_0 0.16529292948211577 +973 76 model.dropout_1 0.4293750475214269 +973 76 model.dropout_2 0.4656421341647811 +973 76 optimizer.lr 0.004498896201778955 +973 76 training.batch_size 1.0 +973 76 training.label_smoothing 0.01712392750212791 +973 77 model.embedding_dim 0.0 +973 77 model.relation_dim 0.0 +973 77 model.dropout_0 0.29296777675202396 +973 77 model.dropout_1 0.33233819779862483 +973 77 model.dropout_2 0.10460437805809462 +973 77 optimizer.lr 0.001450606619141883 +973 77 training.batch_size 1.0 +973 77 training.label_smoothing 0.015998391198177864 +973 78 model.embedding_dim 0.0 +973 78 model.relation_dim 1.0 +973 78 model.dropout_0 0.3385821290326786 +973 78 model.dropout_1 0.4398514159962129 +973 78 model.dropout_2 0.42898107502655924 +973 78 optimizer.lr 0.019890120729417173 +973 78 training.batch_size 1.0 +973 78 training.label_smoothing 0.36078680077339603 +973 79 model.embedding_dim 2.0 +973 79 model.relation_dim 1.0 +973 79 model.dropout_0 0.17584970444664685 +973 79 model.dropout_1 0.27669047884064024 +973 79 model.dropout_2 0.23780845564631306 +973 79 optimizer.lr 0.004714084932921574 +973 79 training.batch_size 0.0 +973 79 training.label_smoothing 0.09873776167767548 +973 80 model.embedding_dim 1.0 +973 80 model.relation_dim 2.0 +973 80 model.dropout_0 0.4281973250330145 +973 80 model.dropout_1 0.17893393563106513 +973 80 model.dropout_2 0.15220188583114186 +973 80 optimizer.lr 0.043937514931054 +973 80 training.batch_size 0.0 +973 80 training.label_smoothing 0.03091476498079147 +973 81 model.embedding_dim 2.0 +973 81 model.relation_dim 1.0 +973 81 model.dropout_0 0.40801735140789747 +973 81 model.dropout_1 0.48983429349440355 +973 81 model.dropout_2 0.2417399530137556 +973 81 optimizer.lr 0.0029319999189881167 +973 81 training.batch_size 0.0 +973 81 training.label_smoothing 0.17174096460830487 +973 82 model.embedding_dim 2.0 +973 82 model.relation_dim 1.0 +973 82 model.dropout_0 0.3442357248765098 +973 82 model.dropout_1 0.25269484599713643 +973 82 model.dropout_2 0.46501215170542654 +973 82 optimizer.lr 0.014082751900310336 +973 82 training.batch_size 2.0 +973 82 training.label_smoothing 0.030166778358203944 +973 83 model.embedding_dim 2.0 +973 83 model.relation_dim 2.0 +973 83 model.dropout_0 0.16853550200252487 +973 83 model.dropout_1 0.10160057787549054 +973 83 model.dropout_2 0.40453187566819665 +973 83 optimizer.lr 0.002078714877034983 +973 83 training.batch_size 1.0 +973 83 training.label_smoothing 0.011437423146415476 +973 84 model.embedding_dim 2.0 +973 84 model.relation_dim 0.0 +973 84 model.dropout_0 0.24504398882294193 +973 84 model.dropout_1 0.45614725257962097 +973 84 model.dropout_2 0.39227730051742027 +973 84 optimizer.lr 0.0032774188179621622 +973 84 training.batch_size 1.0 +973 84 training.label_smoothing 0.12363522480276773 +973 85 model.embedding_dim 0.0 +973 85 model.relation_dim 2.0 +973 85 model.dropout_0 0.35846967103279853 +973 85 model.dropout_1 0.39821448558020706 +973 85 model.dropout_2 0.23212462954170246 +973 85 optimizer.lr 0.007994527099979734 +973 85 training.batch_size 0.0 +973 85 training.label_smoothing 0.014105225049708191 +973 86 model.embedding_dim 2.0 +973 86 model.relation_dim 0.0 +973 86 model.dropout_0 0.33919179884809203 +973 86 model.dropout_1 0.210483965716989 +973 86 model.dropout_2 0.1075142168137909 +973 86 optimizer.lr 0.042453050206924685 +973 86 training.batch_size 2.0 +973 86 training.label_smoothing 0.00452141769340934 +973 87 model.embedding_dim 0.0 +973 87 model.relation_dim 1.0 +973 87 model.dropout_0 0.28142261481219455 +973 87 model.dropout_1 0.3213536458362114 +973 87 model.dropout_2 0.19567950587187735 +973 87 optimizer.lr 0.02184416636048745 +973 87 training.batch_size 0.0 +973 87 training.label_smoothing 0.02501631663624947 +973 88 model.embedding_dim 1.0 +973 88 model.relation_dim 1.0 +973 88 model.dropout_0 0.4238134263602389 +973 88 model.dropout_1 0.22369937936679296 +973 88 model.dropout_2 0.327856572908352 +973 88 optimizer.lr 0.0014465091421169338 +973 88 training.batch_size 0.0 +973 88 training.label_smoothing 0.27765615239979313 +973 89 model.embedding_dim 2.0 +973 89 model.relation_dim 0.0 +973 89 model.dropout_0 0.16084278063159113 +973 89 model.dropout_1 0.28347143785982176 +973 89 model.dropout_2 0.14175499275738654 +973 89 optimizer.lr 0.00500489080466903 +973 89 training.batch_size 2.0 +973 89 training.label_smoothing 0.04674405235297904 +973 90 model.embedding_dim 1.0 +973 90 model.relation_dim 0.0 +973 90 model.dropout_0 0.18257453693668438 +973 90 model.dropout_1 0.45105131882088945 +973 90 model.dropout_2 0.11624903567760053 +973 90 optimizer.lr 0.0012590875066130164 +973 90 training.batch_size 1.0 +973 90 training.label_smoothing 0.11259479159619978 +973 91 model.embedding_dim 2.0 +973 91 model.relation_dim 2.0 +973 91 model.dropout_0 0.20791322778256519 +973 91 model.dropout_1 0.4222070282803435 +973 91 model.dropout_2 0.41645112851271066 +973 91 optimizer.lr 0.08363809102178309 +973 91 training.batch_size 0.0 +973 91 training.label_smoothing 0.020657985673156062 +973 92 model.embedding_dim 2.0 +973 92 model.relation_dim 2.0 +973 92 model.dropout_0 0.17838591032221696 +973 92 model.dropout_1 0.48505006819931773 +973 92 model.dropout_2 0.1303469654329373 +973 92 optimizer.lr 0.016678091263269405 +973 92 training.batch_size 0.0 +973 92 training.label_smoothing 0.6934823486435028 +973 93 model.embedding_dim 1.0 +973 93 model.relation_dim 1.0 +973 93 model.dropout_0 0.25938770857364624 +973 93 model.dropout_1 0.11180740820321203 +973 93 model.dropout_2 0.46041289479520664 +973 93 optimizer.lr 0.0018224571531537718 +973 93 training.batch_size 1.0 +973 93 training.label_smoothing 0.0033808260870179197 +973 94 model.embedding_dim 0.0 +973 94 model.relation_dim 2.0 +973 94 model.dropout_0 0.20604507080480974 +973 94 model.dropout_1 0.4472506287043494 +973 94 model.dropout_2 0.472364258315996 +973 94 optimizer.lr 0.021370761468231393 +973 94 training.batch_size 1.0 +973 94 training.label_smoothing 0.23910395841426693 +973 95 model.embedding_dim 2.0 +973 95 model.relation_dim 2.0 +973 95 model.dropout_0 0.17248300837359987 +973 95 model.dropout_1 0.22556170911866338 +973 95 model.dropout_2 0.2013237175919662 +973 95 optimizer.lr 0.009922248045866697 +973 95 training.batch_size 2.0 +973 95 training.label_smoothing 0.11666600653953155 +973 96 model.embedding_dim 2.0 +973 96 model.relation_dim 1.0 +973 96 model.dropout_0 0.4281856517595362 +973 96 model.dropout_1 0.15388020109779532 +973 96 model.dropout_2 0.2525423489161332 +973 96 optimizer.lr 0.0014935587229341087 +973 96 training.batch_size 0.0 +973 96 training.label_smoothing 0.9690441923529801 +973 97 model.embedding_dim 2.0 +973 97 model.relation_dim 0.0 +973 97 model.dropout_0 0.4630845242563163 +973 97 model.dropout_1 0.3787589518802684 +973 97 model.dropout_2 0.1787507692439083 +973 97 optimizer.lr 0.02279620595435459 +973 97 training.batch_size 1.0 +973 97 training.label_smoothing 0.3896544615246616 +973 98 model.embedding_dim 0.0 +973 98 model.relation_dim 1.0 +973 98 model.dropout_0 0.1356631508790905 +973 98 model.dropout_1 0.47363705950772467 +973 98 model.dropout_2 0.16772999773841657 +973 98 optimizer.lr 0.0024824149599228453 +973 98 training.batch_size 0.0 +973 98 training.label_smoothing 0.0035623767918865896 +973 99 model.embedding_dim 0.0 +973 99 model.relation_dim 2.0 +973 99 model.dropout_0 0.4655327591124106 +973 99 model.dropout_1 0.38002555260959997 +973 99 model.dropout_2 0.31039198659812517 +973 99 optimizer.lr 0.0020816812907921305 +973 99 training.batch_size 2.0 +973 99 training.label_smoothing 0.048782602797141535 +973 100 model.embedding_dim 1.0 +973 100 model.relation_dim 0.0 +973 100 model.dropout_0 0.3155535166545091 +973 100 model.dropout_1 0.31456604087975637 +973 100 model.dropout_2 0.13669464877487805 +973 100 optimizer.lr 0.05064620770280874 +973 100 training.batch_size 2.0 +973 100 training.label_smoothing 0.22967557370418343 +973 1 dataset """kinships""" +973 1 model """tucker""" +973 1 loss """crossentropy""" +973 1 regularizer """no""" +973 1 optimizer """adam""" +973 1 training_loop """lcwa""" +973 1 evaluator """rankbased""" +973 2 dataset """kinships""" +973 2 model """tucker""" +973 2 loss """crossentropy""" +973 2 regularizer """no""" +973 2 optimizer """adam""" +973 2 training_loop """lcwa""" +973 2 evaluator """rankbased""" +973 3 dataset """kinships""" +973 3 model """tucker""" +973 3 loss """crossentropy""" +973 3 regularizer """no""" +973 3 optimizer """adam""" +973 3 training_loop """lcwa""" +973 3 evaluator """rankbased""" +973 4 dataset """kinships""" +973 4 model """tucker""" +973 4 loss """crossentropy""" +973 4 regularizer """no""" +973 4 optimizer """adam""" +973 4 training_loop """lcwa""" +973 4 evaluator """rankbased""" +973 5 dataset """kinships""" +973 5 model """tucker""" +973 5 loss """crossentropy""" +973 5 regularizer """no""" +973 5 optimizer """adam""" +973 5 training_loop """lcwa""" +973 5 evaluator """rankbased""" +973 6 dataset """kinships""" +973 6 model """tucker""" +973 6 loss """crossentropy""" +973 6 regularizer """no""" +973 6 optimizer """adam""" +973 6 training_loop """lcwa""" +973 6 evaluator """rankbased""" +973 7 dataset """kinships""" +973 7 model """tucker""" +973 7 loss """crossentropy""" +973 7 regularizer """no""" +973 7 optimizer """adam""" +973 7 training_loop """lcwa""" +973 7 evaluator """rankbased""" +973 8 dataset """kinships""" +973 8 model """tucker""" +973 8 loss """crossentropy""" +973 8 regularizer """no""" +973 8 optimizer """adam""" +973 8 training_loop """lcwa""" +973 8 evaluator """rankbased""" +973 9 dataset """kinships""" +973 9 model """tucker""" +973 9 loss """crossentropy""" +973 9 regularizer """no""" +973 9 optimizer """adam""" +973 9 training_loop """lcwa""" +973 9 evaluator """rankbased""" +973 10 dataset """kinships""" +973 10 model """tucker""" +973 10 loss """crossentropy""" +973 10 regularizer """no""" +973 10 optimizer """adam""" +973 10 training_loop """lcwa""" +973 10 evaluator """rankbased""" +973 11 dataset """kinships""" +973 11 model """tucker""" +973 11 loss """crossentropy""" +973 11 regularizer """no""" +973 11 optimizer """adam""" +973 11 training_loop """lcwa""" +973 11 evaluator """rankbased""" +973 12 dataset """kinships""" +973 12 model """tucker""" +973 12 loss """crossentropy""" +973 12 regularizer """no""" +973 12 optimizer """adam""" +973 12 training_loop """lcwa""" +973 12 evaluator """rankbased""" +973 13 dataset """kinships""" +973 13 model """tucker""" +973 13 loss """crossentropy""" +973 13 regularizer """no""" +973 13 optimizer """adam""" +973 13 training_loop """lcwa""" +973 13 evaluator """rankbased""" +973 14 dataset """kinships""" +973 14 model """tucker""" +973 14 loss """crossentropy""" +973 14 regularizer """no""" +973 14 optimizer """adam""" +973 14 training_loop """lcwa""" +973 14 evaluator """rankbased""" +973 15 dataset """kinships""" +973 15 model """tucker""" +973 15 loss """crossentropy""" +973 15 regularizer """no""" +973 15 optimizer """adam""" +973 15 training_loop """lcwa""" +973 15 evaluator """rankbased""" +973 16 dataset """kinships""" +973 16 model """tucker""" +973 16 loss """crossentropy""" +973 16 regularizer """no""" +973 16 optimizer """adam""" +973 16 training_loop """lcwa""" +973 16 evaluator """rankbased""" +973 17 dataset """kinships""" +973 17 model """tucker""" +973 17 loss """crossentropy""" +973 17 regularizer """no""" +973 17 optimizer """adam""" +973 17 training_loop """lcwa""" +973 17 evaluator """rankbased""" +973 18 dataset """kinships""" +973 18 model """tucker""" +973 18 loss """crossentropy""" +973 18 regularizer """no""" +973 18 optimizer """adam""" +973 18 training_loop """lcwa""" +973 18 evaluator """rankbased""" +973 19 dataset """kinships""" +973 19 model """tucker""" +973 19 loss """crossentropy""" +973 19 regularizer """no""" +973 19 optimizer """adam""" +973 19 training_loop """lcwa""" +973 19 evaluator """rankbased""" +973 20 dataset """kinships""" +973 20 model """tucker""" +973 20 loss """crossentropy""" +973 20 regularizer """no""" +973 20 optimizer """adam""" +973 20 training_loop """lcwa""" +973 20 evaluator """rankbased""" +973 21 dataset """kinships""" +973 21 model """tucker""" +973 21 loss """crossentropy""" +973 21 regularizer """no""" +973 21 optimizer """adam""" +973 21 training_loop """lcwa""" +973 21 evaluator """rankbased""" +973 22 dataset """kinships""" +973 22 model """tucker""" +973 22 loss """crossentropy""" +973 22 regularizer """no""" +973 22 optimizer """adam""" +973 22 training_loop """lcwa""" +973 22 evaluator """rankbased""" +973 23 dataset """kinships""" +973 23 model """tucker""" +973 23 loss """crossentropy""" +973 23 regularizer """no""" +973 23 optimizer """adam""" +973 23 training_loop """lcwa""" +973 23 evaluator """rankbased""" +973 24 dataset """kinships""" +973 24 model """tucker""" +973 24 loss """crossentropy""" +973 24 regularizer """no""" +973 24 optimizer """adam""" +973 24 training_loop """lcwa""" +973 24 evaluator """rankbased""" +973 25 dataset """kinships""" +973 25 model """tucker""" +973 25 loss """crossentropy""" +973 25 regularizer """no""" +973 25 optimizer """adam""" +973 25 training_loop """lcwa""" +973 25 evaluator """rankbased""" +973 26 dataset """kinships""" +973 26 model """tucker""" +973 26 loss """crossentropy""" +973 26 regularizer """no""" +973 26 optimizer """adam""" +973 26 training_loop """lcwa""" +973 26 evaluator """rankbased""" +973 27 dataset """kinships""" +973 27 model """tucker""" +973 27 loss """crossentropy""" +973 27 regularizer """no""" +973 27 optimizer """adam""" +973 27 training_loop """lcwa""" +973 27 evaluator """rankbased""" +973 28 dataset """kinships""" +973 28 model """tucker""" +973 28 loss """crossentropy""" +973 28 regularizer """no""" +973 28 optimizer """adam""" +973 28 training_loop """lcwa""" +973 28 evaluator """rankbased""" +973 29 dataset """kinships""" +973 29 model """tucker""" +973 29 loss """crossentropy""" +973 29 regularizer """no""" +973 29 optimizer """adam""" +973 29 training_loop """lcwa""" +973 29 evaluator """rankbased""" +973 30 dataset """kinships""" +973 30 model """tucker""" +973 30 loss """crossentropy""" +973 30 regularizer """no""" +973 30 optimizer """adam""" +973 30 training_loop """lcwa""" +973 30 evaluator """rankbased""" +973 31 dataset """kinships""" +973 31 model """tucker""" +973 31 loss """crossentropy""" +973 31 regularizer """no""" +973 31 optimizer """adam""" +973 31 training_loop """lcwa""" +973 31 evaluator """rankbased""" +973 32 dataset """kinships""" +973 32 model """tucker""" +973 32 loss """crossentropy""" +973 32 regularizer """no""" +973 32 optimizer """adam""" +973 32 training_loop """lcwa""" +973 32 evaluator """rankbased""" +973 33 dataset """kinships""" +973 33 model """tucker""" +973 33 loss """crossentropy""" +973 33 regularizer """no""" +973 33 optimizer """adam""" +973 33 training_loop """lcwa""" +973 33 evaluator """rankbased""" +973 34 dataset """kinships""" +973 34 model """tucker""" +973 34 loss """crossentropy""" +973 34 regularizer """no""" +973 34 optimizer """adam""" +973 34 training_loop """lcwa""" +973 34 evaluator """rankbased""" +973 35 dataset """kinships""" +973 35 model """tucker""" +973 35 loss """crossentropy""" +973 35 regularizer """no""" +973 35 optimizer """adam""" +973 35 training_loop """lcwa""" +973 35 evaluator """rankbased""" +973 36 dataset """kinships""" +973 36 model """tucker""" +973 36 loss """crossentropy""" +973 36 regularizer """no""" +973 36 optimizer """adam""" +973 36 training_loop """lcwa""" +973 36 evaluator """rankbased""" +973 37 dataset """kinships""" +973 37 model """tucker""" +973 37 loss """crossentropy""" +973 37 regularizer """no""" +973 37 optimizer """adam""" +973 37 training_loop """lcwa""" +973 37 evaluator """rankbased""" +973 38 dataset """kinships""" +973 38 model """tucker""" +973 38 loss """crossentropy""" +973 38 regularizer """no""" +973 38 optimizer """adam""" +973 38 training_loop """lcwa""" +973 38 evaluator """rankbased""" +973 39 dataset """kinships""" +973 39 model """tucker""" +973 39 loss """crossentropy""" +973 39 regularizer """no""" +973 39 optimizer """adam""" +973 39 training_loop """lcwa""" +973 39 evaluator """rankbased""" +973 40 dataset """kinships""" +973 40 model """tucker""" +973 40 loss """crossentropy""" +973 40 regularizer """no""" +973 40 optimizer """adam""" +973 40 training_loop """lcwa""" +973 40 evaluator """rankbased""" +973 41 dataset """kinships""" +973 41 model """tucker""" +973 41 loss """crossentropy""" +973 41 regularizer """no""" +973 41 optimizer """adam""" +973 41 training_loop """lcwa""" +973 41 evaluator """rankbased""" +973 42 dataset """kinships""" +973 42 model """tucker""" +973 42 loss """crossentropy""" +973 42 regularizer """no""" +973 42 optimizer """adam""" +973 42 training_loop """lcwa""" +973 42 evaluator """rankbased""" +973 43 dataset """kinships""" +973 43 model """tucker""" +973 43 loss """crossentropy""" +973 43 regularizer """no""" +973 43 optimizer """adam""" +973 43 training_loop """lcwa""" +973 43 evaluator """rankbased""" +973 44 dataset """kinships""" +973 44 model """tucker""" +973 44 loss """crossentropy""" +973 44 regularizer """no""" +973 44 optimizer """adam""" +973 44 training_loop """lcwa""" +973 44 evaluator """rankbased""" +973 45 dataset """kinships""" +973 45 model """tucker""" +973 45 loss """crossentropy""" +973 45 regularizer """no""" +973 45 optimizer """adam""" +973 45 training_loop """lcwa""" +973 45 evaluator """rankbased""" +973 46 dataset """kinships""" +973 46 model """tucker""" +973 46 loss """crossentropy""" +973 46 regularizer """no""" +973 46 optimizer """adam""" +973 46 training_loop """lcwa""" +973 46 evaluator """rankbased""" +973 47 dataset """kinships""" +973 47 model """tucker""" +973 47 loss """crossentropy""" +973 47 regularizer """no""" +973 47 optimizer """adam""" +973 47 training_loop """lcwa""" +973 47 evaluator """rankbased""" +973 48 dataset """kinships""" +973 48 model """tucker""" +973 48 loss """crossentropy""" +973 48 regularizer """no""" +973 48 optimizer """adam""" +973 48 training_loop """lcwa""" +973 48 evaluator """rankbased""" +973 49 dataset """kinships""" +973 49 model """tucker""" +973 49 loss """crossentropy""" +973 49 regularizer """no""" +973 49 optimizer """adam""" +973 49 training_loop """lcwa""" +973 49 evaluator """rankbased""" +973 50 dataset """kinships""" +973 50 model """tucker""" +973 50 loss """crossentropy""" +973 50 regularizer """no""" +973 50 optimizer """adam""" +973 50 training_loop """lcwa""" +973 50 evaluator """rankbased""" +973 51 dataset """kinships""" +973 51 model """tucker""" +973 51 loss """crossentropy""" +973 51 regularizer """no""" +973 51 optimizer """adam""" +973 51 training_loop """lcwa""" +973 51 evaluator """rankbased""" +973 52 dataset """kinships""" +973 52 model """tucker""" +973 52 loss """crossentropy""" +973 52 regularizer """no""" +973 52 optimizer """adam""" +973 52 training_loop """lcwa""" +973 52 evaluator """rankbased""" +973 53 dataset """kinships""" +973 53 model """tucker""" +973 53 loss """crossentropy""" +973 53 regularizer """no""" +973 53 optimizer """adam""" +973 53 training_loop """lcwa""" +973 53 evaluator """rankbased""" +973 54 dataset """kinships""" +973 54 model """tucker""" +973 54 loss """crossentropy""" +973 54 regularizer """no""" +973 54 optimizer """adam""" +973 54 training_loop """lcwa""" +973 54 evaluator """rankbased""" +973 55 dataset """kinships""" +973 55 model """tucker""" +973 55 loss """crossentropy""" +973 55 regularizer """no""" +973 55 optimizer """adam""" +973 55 training_loop """lcwa""" +973 55 evaluator """rankbased""" +973 56 dataset """kinships""" +973 56 model """tucker""" +973 56 loss """crossentropy""" +973 56 regularizer """no""" +973 56 optimizer """adam""" +973 56 training_loop """lcwa""" +973 56 evaluator """rankbased""" +973 57 dataset """kinships""" +973 57 model """tucker""" +973 57 loss """crossentropy""" +973 57 regularizer """no""" +973 57 optimizer """adam""" +973 57 training_loop """lcwa""" +973 57 evaluator """rankbased""" +973 58 dataset """kinships""" +973 58 model """tucker""" +973 58 loss """crossentropy""" +973 58 regularizer """no""" +973 58 optimizer """adam""" +973 58 training_loop """lcwa""" +973 58 evaluator """rankbased""" +973 59 dataset """kinships""" +973 59 model """tucker""" +973 59 loss """crossentropy""" +973 59 regularizer """no""" +973 59 optimizer """adam""" +973 59 training_loop """lcwa""" +973 59 evaluator """rankbased""" +973 60 dataset """kinships""" +973 60 model """tucker""" +973 60 loss """crossentropy""" +973 60 regularizer """no""" +973 60 optimizer """adam""" +973 60 training_loop """lcwa""" +973 60 evaluator """rankbased""" +973 61 dataset """kinships""" +973 61 model """tucker""" +973 61 loss """crossentropy""" +973 61 regularizer """no""" +973 61 optimizer """adam""" +973 61 training_loop """lcwa""" +973 61 evaluator """rankbased""" +973 62 dataset """kinships""" +973 62 model """tucker""" +973 62 loss """crossentropy""" +973 62 regularizer """no""" +973 62 optimizer """adam""" +973 62 training_loop """lcwa""" +973 62 evaluator """rankbased""" +973 63 dataset """kinships""" +973 63 model """tucker""" +973 63 loss """crossentropy""" +973 63 regularizer """no""" +973 63 optimizer """adam""" +973 63 training_loop """lcwa""" +973 63 evaluator """rankbased""" +973 64 dataset """kinships""" +973 64 model """tucker""" +973 64 loss """crossentropy""" +973 64 regularizer """no""" +973 64 optimizer """adam""" +973 64 training_loop """lcwa""" +973 64 evaluator """rankbased""" +973 65 dataset """kinships""" +973 65 model """tucker""" +973 65 loss """crossentropy""" +973 65 regularizer """no""" +973 65 optimizer """adam""" +973 65 training_loop """lcwa""" +973 65 evaluator """rankbased""" +973 66 dataset """kinships""" +973 66 model """tucker""" +973 66 loss """crossentropy""" +973 66 regularizer """no""" +973 66 optimizer """adam""" +973 66 training_loop """lcwa""" +973 66 evaluator """rankbased""" +973 67 dataset """kinships""" +973 67 model """tucker""" +973 67 loss """crossentropy""" +973 67 regularizer """no""" +973 67 optimizer """adam""" +973 67 training_loop """lcwa""" +973 67 evaluator """rankbased""" +973 68 dataset """kinships""" +973 68 model """tucker""" +973 68 loss """crossentropy""" +973 68 regularizer """no""" +973 68 optimizer """adam""" +973 68 training_loop """lcwa""" +973 68 evaluator """rankbased""" +973 69 dataset """kinships""" +973 69 model """tucker""" +973 69 loss """crossentropy""" +973 69 regularizer """no""" +973 69 optimizer """adam""" +973 69 training_loop """lcwa""" +973 69 evaluator """rankbased""" +973 70 dataset """kinships""" +973 70 model """tucker""" +973 70 loss """crossentropy""" +973 70 regularizer """no""" +973 70 optimizer """adam""" +973 70 training_loop """lcwa""" +973 70 evaluator """rankbased""" +973 71 dataset """kinships""" +973 71 model """tucker""" +973 71 loss """crossentropy""" +973 71 regularizer """no""" +973 71 optimizer """adam""" +973 71 training_loop """lcwa""" +973 71 evaluator """rankbased""" +973 72 dataset """kinships""" +973 72 model """tucker""" +973 72 loss """crossentropy""" +973 72 regularizer """no""" +973 72 optimizer """adam""" +973 72 training_loop """lcwa""" +973 72 evaluator """rankbased""" +973 73 dataset """kinships""" +973 73 model """tucker""" +973 73 loss """crossentropy""" +973 73 regularizer """no""" +973 73 optimizer """adam""" +973 73 training_loop """lcwa""" +973 73 evaluator """rankbased""" +973 74 dataset """kinships""" +973 74 model """tucker""" +973 74 loss """crossentropy""" +973 74 regularizer """no""" +973 74 optimizer """adam""" +973 74 training_loop """lcwa""" +973 74 evaluator """rankbased""" +973 75 dataset """kinships""" +973 75 model """tucker""" +973 75 loss """crossentropy""" +973 75 regularizer """no""" +973 75 optimizer """adam""" +973 75 training_loop """lcwa""" +973 75 evaluator """rankbased""" +973 76 dataset """kinships""" +973 76 model """tucker""" +973 76 loss """crossentropy""" +973 76 regularizer """no""" +973 76 optimizer """adam""" +973 76 training_loop """lcwa""" +973 76 evaluator """rankbased""" +973 77 dataset """kinships""" +973 77 model """tucker""" +973 77 loss """crossentropy""" +973 77 regularizer """no""" +973 77 optimizer """adam""" +973 77 training_loop """lcwa""" +973 77 evaluator """rankbased""" +973 78 dataset """kinships""" +973 78 model """tucker""" +973 78 loss """crossentropy""" +973 78 regularizer """no""" +973 78 optimizer """adam""" +973 78 training_loop """lcwa""" +973 78 evaluator """rankbased""" +973 79 dataset """kinships""" +973 79 model """tucker""" +973 79 loss """crossentropy""" +973 79 regularizer """no""" +973 79 optimizer """adam""" +973 79 training_loop """lcwa""" +973 79 evaluator """rankbased""" +973 80 dataset """kinships""" +973 80 model """tucker""" +973 80 loss """crossentropy""" +973 80 regularizer """no""" +973 80 optimizer """adam""" +973 80 training_loop """lcwa""" +973 80 evaluator """rankbased""" +973 81 dataset """kinships""" +973 81 model """tucker""" +973 81 loss """crossentropy""" +973 81 regularizer """no""" +973 81 optimizer """adam""" +973 81 training_loop """lcwa""" +973 81 evaluator """rankbased""" +973 82 dataset """kinships""" +973 82 model """tucker""" +973 82 loss """crossentropy""" +973 82 regularizer """no""" +973 82 optimizer """adam""" +973 82 training_loop """lcwa""" +973 82 evaluator """rankbased""" +973 83 dataset """kinships""" +973 83 model """tucker""" +973 83 loss """crossentropy""" +973 83 regularizer """no""" +973 83 optimizer """adam""" +973 83 training_loop """lcwa""" +973 83 evaluator """rankbased""" +973 84 dataset """kinships""" +973 84 model """tucker""" +973 84 loss """crossentropy""" +973 84 regularizer """no""" +973 84 optimizer """adam""" +973 84 training_loop """lcwa""" +973 84 evaluator """rankbased""" +973 85 dataset """kinships""" +973 85 model """tucker""" +973 85 loss """crossentropy""" +973 85 regularizer """no""" +973 85 optimizer """adam""" +973 85 training_loop """lcwa""" +973 85 evaluator """rankbased""" +973 86 dataset """kinships""" +973 86 model """tucker""" +973 86 loss """crossentropy""" +973 86 regularizer """no""" +973 86 optimizer """adam""" +973 86 training_loop """lcwa""" +973 86 evaluator """rankbased""" +973 87 dataset """kinships""" +973 87 model """tucker""" +973 87 loss """crossentropy""" +973 87 regularizer """no""" +973 87 optimizer """adam""" +973 87 training_loop """lcwa""" +973 87 evaluator """rankbased""" +973 88 dataset """kinships""" +973 88 model """tucker""" +973 88 loss """crossentropy""" +973 88 regularizer """no""" +973 88 optimizer """adam""" +973 88 training_loop """lcwa""" +973 88 evaluator """rankbased""" +973 89 dataset """kinships""" +973 89 model """tucker""" +973 89 loss """crossentropy""" +973 89 regularizer """no""" +973 89 optimizer """adam""" +973 89 training_loop """lcwa""" +973 89 evaluator """rankbased""" +973 90 dataset """kinships""" +973 90 model """tucker""" +973 90 loss """crossentropy""" +973 90 regularizer """no""" +973 90 optimizer """adam""" +973 90 training_loop """lcwa""" +973 90 evaluator """rankbased""" +973 91 dataset """kinships""" +973 91 model """tucker""" +973 91 loss """crossentropy""" +973 91 regularizer """no""" +973 91 optimizer """adam""" +973 91 training_loop """lcwa""" +973 91 evaluator """rankbased""" +973 92 dataset """kinships""" +973 92 model """tucker""" +973 92 loss """crossentropy""" +973 92 regularizer """no""" +973 92 optimizer """adam""" +973 92 training_loop """lcwa""" +973 92 evaluator """rankbased""" +973 93 dataset """kinships""" +973 93 model """tucker""" +973 93 loss """crossentropy""" +973 93 regularizer """no""" +973 93 optimizer """adam""" +973 93 training_loop """lcwa""" +973 93 evaluator """rankbased""" +973 94 dataset """kinships""" +973 94 model """tucker""" +973 94 loss """crossentropy""" +973 94 regularizer """no""" +973 94 optimizer """adam""" +973 94 training_loop """lcwa""" +973 94 evaluator """rankbased""" +973 95 dataset """kinships""" +973 95 model """tucker""" +973 95 loss """crossentropy""" +973 95 regularizer """no""" +973 95 optimizer """adam""" +973 95 training_loop """lcwa""" +973 95 evaluator """rankbased""" +973 96 dataset """kinships""" +973 96 model """tucker""" +973 96 loss """crossentropy""" +973 96 regularizer """no""" +973 96 optimizer """adam""" +973 96 training_loop """lcwa""" +973 96 evaluator """rankbased""" +973 97 dataset """kinships""" +973 97 model """tucker""" +973 97 loss """crossentropy""" +973 97 regularizer """no""" +973 97 optimizer """adam""" +973 97 training_loop """lcwa""" +973 97 evaluator """rankbased""" +973 98 dataset """kinships""" +973 98 model """tucker""" +973 98 loss """crossentropy""" +973 98 regularizer """no""" +973 98 optimizer """adam""" +973 98 training_loop """lcwa""" +973 98 evaluator """rankbased""" +973 99 dataset """kinships""" +973 99 model """tucker""" +973 99 loss """crossentropy""" +973 99 regularizer """no""" +973 99 optimizer """adam""" +973 99 training_loop """lcwa""" +973 99 evaluator """rankbased""" +973 100 dataset """kinships""" +973 100 model """tucker""" +973 100 loss """crossentropy""" +973 100 regularizer """no""" +973 100 optimizer """adam""" +973 100 training_loop """lcwa""" +973 100 evaluator """rankbased""" +974 1 model.embedding_dim 0.0 +974 1 model.relation_dim 0.0 +974 1 model.dropout_0 0.3867284611799435 +974 1 model.dropout_1 0.46267819246554764 +974 1 model.dropout_2 0.4461231658689244 +974 1 optimizer.lr 0.001666272826566878 +974 1 training.batch_size 2.0 +974 1 training.label_smoothing 0.0014157109774869748 +974 2 model.embedding_dim 1.0 +974 2 model.relation_dim 0.0 +974 2 model.dropout_0 0.13717950682939067 +974 2 model.dropout_1 0.44769403602791114 +974 2 model.dropout_2 0.3188343224072818 +974 2 optimizer.lr 0.0359699247334281 +974 2 training.batch_size 0.0 +974 2 training.label_smoothing 0.15331978208754396 +974 3 model.embedding_dim 1.0 +974 3 model.relation_dim 0.0 +974 3 model.dropout_0 0.4832413385293801 +974 3 model.dropout_1 0.475690818517823 +974 3 model.dropout_2 0.27290160344551845 +974 3 optimizer.lr 0.03996323989925826 +974 3 training.batch_size 2.0 +974 3 training.label_smoothing 0.027209492128730296 +974 4 model.embedding_dim 0.0 +974 4 model.relation_dim 1.0 +974 4 model.dropout_0 0.2621002878316033 +974 4 model.dropout_1 0.47238098175792687 +974 4 model.dropout_2 0.48119301561180816 +974 4 optimizer.lr 0.0055960085679806915 +974 4 training.batch_size 0.0 +974 4 training.label_smoothing 0.32135681179016423 +974 5 model.embedding_dim 2.0 +974 5 model.relation_dim 0.0 +974 5 model.dropout_0 0.16641333335489272 +974 5 model.dropout_1 0.39183104939349356 +974 5 model.dropout_2 0.16549652359172412 +974 5 optimizer.lr 0.0442419104909521 +974 5 training.batch_size 1.0 +974 5 training.label_smoothing 0.015500743874517536 +974 6 model.embedding_dim 2.0 +974 6 model.relation_dim 1.0 +974 6 model.dropout_0 0.15812997413697472 +974 6 model.dropout_1 0.3017199180111496 +974 6 model.dropout_2 0.4235513925482018 +974 6 optimizer.lr 0.001825177030617194 +974 6 training.batch_size 1.0 +974 6 training.label_smoothing 0.6996903803003021 +974 7 model.embedding_dim 1.0 +974 7 model.relation_dim 0.0 +974 7 model.dropout_0 0.48185597618277143 +974 7 model.dropout_1 0.2805367152909266 +974 7 model.dropout_2 0.30181548185006934 +974 7 optimizer.lr 0.08465140355351072 +974 7 training.batch_size 1.0 +974 7 training.label_smoothing 0.06189701566059086 +974 8 model.embedding_dim 0.0 +974 8 model.relation_dim 1.0 +974 8 model.dropout_0 0.24534212958023574 +974 8 model.dropout_1 0.19742897277468058 +974 8 model.dropout_2 0.4889115104968622 +974 8 optimizer.lr 0.05389436808482576 +974 8 training.batch_size 2.0 +974 8 training.label_smoothing 0.3755837347601627 +974 9 model.embedding_dim 2.0 +974 9 model.relation_dim 0.0 +974 9 model.dropout_0 0.3744033265477032 +974 9 model.dropout_1 0.4022385127608901 +974 9 model.dropout_2 0.15668603345872867 +974 9 optimizer.lr 0.051814081323894284 +974 9 training.batch_size 2.0 +974 9 training.label_smoothing 0.012936805805094742 +974 10 model.embedding_dim 0.0 +974 10 model.relation_dim 0.0 +974 10 model.dropout_0 0.27656886663566366 +974 10 model.dropout_1 0.12152572511834464 +974 10 model.dropout_2 0.11644988306503286 +974 10 optimizer.lr 0.006663529547455666 +974 10 training.batch_size 1.0 +974 10 training.label_smoothing 0.002751943976845675 +974 11 model.embedding_dim 0.0 +974 11 model.relation_dim 1.0 +974 11 model.dropout_0 0.44991818559784513 +974 11 model.dropout_1 0.19530282909956892 +974 11 model.dropout_2 0.43166441323759375 +974 11 optimizer.lr 0.011722173629916597 +974 11 training.batch_size 1.0 +974 11 training.label_smoothing 0.003831713021965016 +974 12 model.embedding_dim 1.0 +974 12 model.relation_dim 2.0 +974 12 model.dropout_0 0.4712531916100921 +974 12 model.dropout_1 0.10129031422394119 +974 12 model.dropout_2 0.35578871518918 +974 12 optimizer.lr 0.00493157145642074 +974 12 training.batch_size 2.0 +974 12 training.label_smoothing 0.039622334698477826 +974 13 model.embedding_dim 2.0 +974 13 model.relation_dim 1.0 +974 13 model.dropout_0 0.4680395525389626 +974 13 model.dropout_1 0.4743920507354653 +974 13 model.dropout_2 0.3819023147858409 +974 13 optimizer.lr 0.04112652546444448 +974 13 training.batch_size 1.0 +974 13 training.label_smoothing 0.7699547404319126 +974 14 model.embedding_dim 1.0 +974 14 model.relation_dim 0.0 +974 14 model.dropout_0 0.4359408494858058 +974 14 model.dropout_1 0.21793157598091015 +974 14 model.dropout_2 0.10422688482731918 +974 14 optimizer.lr 0.03800419175938055 +974 14 training.batch_size 1.0 +974 14 training.label_smoothing 0.34024920230295874 +974 15 model.embedding_dim 0.0 +974 15 model.relation_dim 1.0 +974 15 model.dropout_0 0.2215528111742946 +974 15 model.dropout_1 0.18855327336800537 +974 15 model.dropout_2 0.4474674921978454 +974 15 optimizer.lr 0.0016998365009863931 +974 15 training.batch_size 2.0 +974 15 training.label_smoothing 0.017238051532058773 +974 16 model.embedding_dim 1.0 +974 16 model.relation_dim 1.0 +974 16 model.dropout_0 0.4718564526614326 +974 16 model.dropout_1 0.4132657531885535 +974 16 model.dropout_2 0.3617343384362915 +974 16 optimizer.lr 0.0384479231365057 +974 16 training.batch_size 2.0 +974 16 training.label_smoothing 0.16832091762891838 +974 17 model.embedding_dim 1.0 +974 17 model.relation_dim 2.0 +974 17 model.dropout_0 0.4193176585632942 +974 17 model.dropout_1 0.22840100686992823 +974 17 model.dropout_2 0.10847751361043652 +974 17 optimizer.lr 0.0010144126579615393 +974 17 training.batch_size 1.0 +974 17 training.label_smoothing 0.0026387305262651818 +974 18 model.embedding_dim 2.0 +974 18 model.relation_dim 0.0 +974 18 model.dropout_0 0.2661563881644241 +974 18 model.dropout_1 0.4132455495692438 +974 18 model.dropout_2 0.4515106180116524 +974 18 optimizer.lr 0.04470565101792201 +974 18 training.batch_size 2.0 +974 18 training.label_smoothing 0.43544965698720856 +974 19 model.embedding_dim 1.0 +974 19 model.relation_dim 1.0 +974 19 model.dropout_0 0.17284557420934893 +974 19 model.dropout_1 0.25808618968362107 +974 19 model.dropout_2 0.19160691721038298 +974 19 optimizer.lr 0.011689902682339175 +974 19 training.batch_size 0.0 +974 19 training.label_smoothing 0.08177988435506293 +974 20 model.embedding_dim 0.0 +974 20 model.relation_dim 2.0 +974 20 model.dropout_0 0.44229506560273796 +974 20 model.dropout_1 0.29893583082131076 +974 20 model.dropout_2 0.1285940853573984 +974 20 optimizer.lr 0.0029335396236576917 +974 20 training.batch_size 2.0 +974 20 training.label_smoothing 0.028503721982231372 +974 21 model.embedding_dim 1.0 +974 21 model.relation_dim 2.0 +974 21 model.dropout_0 0.11751990804660442 +974 21 model.dropout_1 0.17457369968520867 +974 21 model.dropout_2 0.12162099462921253 +974 21 optimizer.lr 0.04011412120321777 +974 21 training.batch_size 2.0 +974 21 training.label_smoothing 0.04644076490080995 +974 22 model.embedding_dim 1.0 +974 22 model.relation_dim 0.0 +974 22 model.dropout_0 0.37147571040023064 +974 22 model.dropout_1 0.3283089090239004 +974 22 model.dropout_2 0.3268682666636053 +974 22 optimizer.lr 0.09601155009792565 +974 22 training.batch_size 2.0 +974 22 training.label_smoothing 0.0022204455299459614 +974 23 model.embedding_dim 2.0 +974 23 model.relation_dim 1.0 +974 23 model.dropout_0 0.25371996403832786 +974 23 model.dropout_1 0.10731180770800336 +974 23 model.dropout_2 0.3948574093985563 +974 23 optimizer.lr 0.00225053522743978 +974 23 training.batch_size 2.0 +974 23 training.label_smoothing 0.0014544036155265448 +974 24 model.embedding_dim 2.0 +974 24 model.relation_dim 1.0 +974 24 model.dropout_0 0.11441809863710116 +974 24 model.dropout_1 0.4926804793573974 +974 24 model.dropout_2 0.36057270739640435 +974 24 optimizer.lr 0.005692606088739062 +974 24 training.batch_size 1.0 +974 24 training.label_smoothing 0.011105480553486303 +974 25 model.embedding_dim 0.0 +974 25 model.relation_dim 1.0 +974 25 model.dropout_0 0.13665507613966033 +974 25 model.dropout_1 0.4855427339310552 +974 25 model.dropout_2 0.2713525566828245 +974 25 optimizer.lr 0.005214675481952114 +974 25 training.batch_size 2.0 +974 25 training.label_smoothing 0.17423547846249568 +974 26 model.embedding_dim 2.0 +974 26 model.relation_dim 2.0 +974 26 model.dropout_0 0.4487211651479255 +974 26 model.dropout_1 0.4152694766842543 +974 26 model.dropout_2 0.358700684636722 +974 26 optimizer.lr 0.0023525905579931685 +974 26 training.batch_size 0.0 +974 26 training.label_smoothing 0.0016596319658175717 +974 27 model.embedding_dim 0.0 +974 27 model.relation_dim 1.0 +974 27 model.dropout_0 0.3817537322127494 +974 27 model.dropout_1 0.44811878891318324 +974 27 model.dropout_2 0.32545085834807574 +974 27 optimizer.lr 0.016244114095812633 +974 27 training.batch_size 0.0 +974 27 training.label_smoothing 0.0019748799757632256 +974 28 model.embedding_dim 1.0 +974 28 model.relation_dim 0.0 +974 28 model.dropout_0 0.3996919037744669 +974 28 model.dropout_1 0.34012418332392313 +974 28 model.dropout_2 0.10672229551997084 +974 28 optimizer.lr 0.05090533891518242 +974 28 training.batch_size 0.0 +974 28 training.label_smoothing 0.5136300111122825 +974 29 model.embedding_dim 2.0 +974 29 model.relation_dim 0.0 +974 29 model.dropout_0 0.18062988839876104 +974 29 model.dropout_1 0.11873157770534642 +974 29 model.dropout_2 0.2046814434557956 +974 29 optimizer.lr 0.00804934267009835 +974 29 training.batch_size 0.0 +974 29 training.label_smoothing 0.0010768741821581952 +974 30 model.embedding_dim 0.0 +974 30 model.relation_dim 2.0 +974 30 model.dropout_0 0.21615123667652442 +974 30 model.dropout_1 0.48860314848693137 +974 30 model.dropout_2 0.47838230576233937 +974 30 optimizer.lr 0.004076237024257138 +974 30 training.batch_size 2.0 +974 30 training.label_smoothing 0.19414702009981277 +974 31 model.embedding_dim 2.0 +974 31 model.relation_dim 0.0 +974 31 model.dropout_0 0.41925034393191046 +974 31 model.dropout_1 0.1891959835622756 +974 31 model.dropout_2 0.28055512851837133 +974 31 optimizer.lr 0.08362052559689885 +974 31 training.batch_size 1.0 +974 31 training.label_smoothing 0.036475852847268286 +974 32 model.embedding_dim 2.0 +974 32 model.relation_dim 0.0 +974 32 model.dropout_0 0.22408393595410014 +974 32 model.dropout_1 0.14758682769422463 +974 32 model.dropout_2 0.400196494208874 +974 32 optimizer.lr 0.008094674918200935 +974 32 training.batch_size 1.0 +974 32 training.label_smoothing 0.4070107085944146 +974 33 model.embedding_dim 1.0 +974 33 model.relation_dim 1.0 +974 33 model.dropout_0 0.2722957983344137 +974 33 model.dropout_1 0.21374482151865185 +974 33 model.dropout_2 0.3669357821149647 +974 33 optimizer.lr 0.03595842926066493 +974 33 training.batch_size 1.0 +974 33 training.label_smoothing 0.0014842644494171273 +974 34 model.embedding_dim 1.0 +974 34 model.relation_dim 1.0 +974 34 model.dropout_0 0.32109713658820344 +974 34 model.dropout_1 0.47363437511840883 +974 34 model.dropout_2 0.26631989637717685 +974 34 optimizer.lr 0.007037364404168859 +974 34 training.batch_size 0.0 +974 34 training.label_smoothing 0.23912402596639296 +974 35 model.embedding_dim 2.0 +974 35 model.relation_dim 2.0 +974 35 model.dropout_0 0.20225897383996913 +974 35 model.dropout_1 0.10861329775891725 +974 35 model.dropout_2 0.499370408196199 +974 35 optimizer.lr 0.05231530658388097 +974 35 training.batch_size 2.0 +974 35 training.label_smoothing 0.015714573874663674 +974 36 model.embedding_dim 2.0 +974 36 model.relation_dim 1.0 +974 36 model.dropout_0 0.4227213159454724 +974 36 model.dropout_1 0.4810079024656286 +974 36 model.dropout_2 0.19708607051802773 +974 36 optimizer.lr 0.0014248230670420668 +974 36 training.batch_size 1.0 +974 36 training.label_smoothing 0.10561150046604943 +974 37 model.embedding_dim 0.0 +974 37 model.relation_dim 0.0 +974 37 model.dropout_0 0.15764676554606863 +974 37 model.dropout_1 0.4422200851446667 +974 37 model.dropout_2 0.42958931324121286 +974 37 optimizer.lr 0.0017757594985954893 +974 37 training.batch_size 2.0 +974 37 training.label_smoothing 0.01946114911357605 +974 38 model.embedding_dim 1.0 +974 38 model.relation_dim 0.0 +974 38 model.dropout_0 0.11829220735003099 +974 38 model.dropout_1 0.47608583914894376 +974 38 model.dropout_2 0.4463529034691855 +974 38 optimizer.lr 0.015366110546674633 +974 38 training.batch_size 2.0 +974 38 training.label_smoothing 0.003796351792305656 +974 39 model.embedding_dim 2.0 +974 39 model.relation_dim 0.0 +974 39 model.dropout_0 0.22517226327804876 +974 39 model.dropout_1 0.27967352683592595 +974 39 model.dropout_2 0.36477833813906524 +974 39 optimizer.lr 0.07094216197228835 +974 39 training.batch_size 0.0 +974 39 training.label_smoothing 0.0012462624433029392 +974 40 model.embedding_dim 1.0 +974 40 model.relation_dim 1.0 +974 40 model.dropout_0 0.1744213374726467 +974 40 model.dropout_1 0.4724740818605714 +974 40 model.dropout_2 0.3745996058498597 +974 40 optimizer.lr 0.002249490537478091 +974 40 training.batch_size 2.0 +974 40 training.label_smoothing 0.049747399029603366 +974 41 model.embedding_dim 0.0 +974 41 model.relation_dim 0.0 +974 41 model.dropout_0 0.38723291857940495 +974 41 model.dropout_1 0.2653156646604224 +974 41 model.dropout_2 0.44552057221481395 +974 41 optimizer.lr 0.00142697282827216 +974 41 training.batch_size 2.0 +974 41 training.label_smoothing 0.038235190709633986 +974 42 model.embedding_dim 2.0 +974 42 model.relation_dim 0.0 +974 42 model.dropout_0 0.14352207130943717 +974 42 model.dropout_1 0.27983275422435433 +974 42 model.dropout_2 0.40304015289960127 +974 42 optimizer.lr 0.0029266918707338808 +974 42 training.batch_size 1.0 +974 42 training.label_smoothing 0.30936736890462285 +974 43 model.embedding_dim 2.0 +974 43 model.relation_dim 1.0 +974 43 model.dropout_0 0.4669152448167508 +974 43 model.dropout_1 0.2320359905754005 +974 43 model.dropout_2 0.37497292321345227 +974 43 optimizer.lr 0.0010252163489136532 +974 43 training.batch_size 0.0 +974 43 training.label_smoothing 0.019268015959727693 +974 44 model.embedding_dim 0.0 +974 44 model.relation_dim 1.0 +974 44 model.dropout_0 0.2944286234490154 +974 44 model.dropout_1 0.23312197460987996 +974 44 model.dropout_2 0.4080577959771398 +974 44 optimizer.lr 0.0014444216151443985 +974 44 training.batch_size 0.0 +974 44 training.label_smoothing 0.5112004102630523 +974 45 model.embedding_dim 0.0 +974 45 model.relation_dim 2.0 +974 45 model.dropout_0 0.28733872058373205 +974 45 model.dropout_1 0.2758446175009204 +974 45 model.dropout_2 0.4456924493284229 +974 45 optimizer.lr 0.0057985745862117865 +974 45 training.batch_size 1.0 +974 45 training.label_smoothing 0.0011561278755859166 +974 46 model.embedding_dim 0.0 +974 46 model.relation_dim 2.0 +974 46 model.dropout_0 0.2680658147735678 +974 46 model.dropout_1 0.4597456030345567 +974 46 model.dropout_2 0.1500946869123294 +974 46 optimizer.lr 0.014778737694148112 +974 46 training.batch_size 1.0 +974 46 training.label_smoothing 0.4323796635899125 +974 47 model.embedding_dim 2.0 +974 47 model.relation_dim 0.0 +974 47 model.dropout_0 0.37538057436938943 +974 47 model.dropout_1 0.4855122517077566 +974 47 model.dropout_2 0.22470209233213267 +974 47 optimizer.lr 0.003577551195160465 +974 47 training.batch_size 0.0 +974 47 training.label_smoothing 0.002126611264126709 +974 48 model.embedding_dim 2.0 +974 48 model.relation_dim 2.0 +974 48 model.dropout_0 0.13056990584114217 +974 48 model.dropout_1 0.2890777850922308 +974 48 model.dropout_2 0.1448100643168107 +974 48 optimizer.lr 0.05001215409141257 +974 48 training.batch_size 2.0 +974 48 training.label_smoothing 0.2148257141086889 +974 49 model.embedding_dim 1.0 +974 49 model.relation_dim 1.0 +974 49 model.dropout_0 0.1835998905645989 +974 49 model.dropout_1 0.16529073545384967 +974 49 model.dropout_2 0.3685913171728987 +974 49 optimizer.lr 0.04159260731782204 +974 49 training.batch_size 2.0 +974 49 training.label_smoothing 0.46986618438375627 +974 50 model.embedding_dim 2.0 +974 50 model.relation_dim 1.0 +974 50 model.dropout_0 0.23238011155991467 +974 50 model.dropout_1 0.11308294204023586 +974 50 model.dropout_2 0.10394875362088621 +974 50 optimizer.lr 0.0010629613596959216 +974 50 training.batch_size 0.0 +974 50 training.label_smoothing 0.008762678116844337 +974 51 model.embedding_dim 0.0 +974 51 model.relation_dim 2.0 +974 51 model.dropout_0 0.4064257082944786 +974 51 model.dropout_1 0.22072473744764856 +974 51 model.dropout_2 0.27265976632084027 +974 51 optimizer.lr 0.07678109566101139 +974 51 training.batch_size 0.0 +974 51 training.label_smoothing 0.2361618395769822 +974 52 model.embedding_dim 2.0 +974 52 model.relation_dim 2.0 +974 52 model.dropout_0 0.13961766900818087 +974 52 model.dropout_1 0.298429602095057 +974 52 model.dropout_2 0.18693085216210048 +974 52 optimizer.lr 0.019236519545492728 +974 52 training.batch_size 0.0 +974 52 training.label_smoothing 0.005370627511018775 +974 53 model.embedding_dim 1.0 +974 53 model.relation_dim 0.0 +974 53 model.dropout_0 0.4977811106550721 +974 53 model.dropout_1 0.10023434804836624 +974 53 model.dropout_2 0.3396307772019281 +974 53 optimizer.lr 0.033671212360602026 +974 53 training.batch_size 2.0 +974 53 training.label_smoothing 0.009121821820496681 +974 54 model.embedding_dim 1.0 +974 54 model.relation_dim 2.0 +974 54 model.dropout_0 0.3654767331844585 +974 54 model.dropout_1 0.15638177686026827 +974 54 model.dropout_2 0.4675751986280359 +974 54 optimizer.lr 0.014225533739704723 +974 54 training.batch_size 2.0 +974 54 training.label_smoothing 0.1736082295607854 +974 55 model.embedding_dim 2.0 +974 55 model.relation_dim 0.0 +974 55 model.dropout_0 0.4639102398192121 +974 55 model.dropout_1 0.49931814701100535 +974 55 model.dropout_2 0.11169878271305099 +974 55 optimizer.lr 0.07413505859564211 +974 55 training.batch_size 1.0 +974 55 training.label_smoothing 0.018060385390585666 +974 56 model.embedding_dim 0.0 +974 56 model.relation_dim 1.0 +974 56 model.dropout_0 0.3931763091017281 +974 56 model.dropout_1 0.38804937645110676 +974 56 model.dropout_2 0.20756067797043654 +974 56 optimizer.lr 0.06896378367244106 +974 56 training.batch_size 2.0 +974 56 training.label_smoothing 0.009456137096326455 +974 57 model.embedding_dim 0.0 +974 57 model.relation_dim 2.0 +974 57 model.dropout_0 0.31140828751443017 +974 57 model.dropout_1 0.24457990905111726 +974 57 model.dropout_2 0.20059357289371596 +974 57 optimizer.lr 0.0024672717565207545 +974 57 training.batch_size 1.0 +974 57 training.label_smoothing 0.02184648813712051 +974 58 model.embedding_dim 2.0 +974 58 model.relation_dim 1.0 +974 58 model.dropout_0 0.40938162135593137 +974 58 model.dropout_1 0.23411054596118472 +974 58 model.dropout_2 0.43308702153093087 +974 58 optimizer.lr 0.021156126203881062 +974 58 training.batch_size 1.0 +974 58 training.label_smoothing 0.0588359261904149 +974 59 model.embedding_dim 1.0 +974 59 model.relation_dim 2.0 +974 59 model.dropout_0 0.26125641403852135 +974 59 model.dropout_1 0.11839504713123082 +974 59 model.dropout_2 0.21954738410690086 +974 59 optimizer.lr 0.025270871363896136 +974 59 training.batch_size 0.0 +974 59 training.label_smoothing 0.004969074127715893 +974 60 model.embedding_dim 1.0 +974 60 model.relation_dim 2.0 +974 60 model.dropout_0 0.38847307807421216 +974 60 model.dropout_1 0.3790082644633856 +974 60 model.dropout_2 0.26126113021866265 +974 60 optimizer.lr 0.01923733788343793 +974 60 training.batch_size 1.0 +974 60 training.label_smoothing 0.039554094443365315 +974 61 model.embedding_dim 2.0 +974 61 model.relation_dim 2.0 +974 61 model.dropout_0 0.10574340495280295 +974 61 model.dropout_1 0.1598279333676183 +974 61 model.dropout_2 0.22689926482941458 +974 61 optimizer.lr 0.022661252779630884 +974 61 training.batch_size 0.0 +974 61 training.label_smoothing 0.5422657138924974 +974 62 model.embedding_dim 0.0 +974 62 model.relation_dim 0.0 +974 62 model.dropout_0 0.4096342040884178 +974 62 model.dropout_1 0.29998101075112904 +974 62 model.dropout_2 0.30664304206741616 +974 62 optimizer.lr 0.08310843818452913 +974 62 training.batch_size 2.0 +974 62 training.label_smoothing 0.003441771559596919 +974 63 model.embedding_dim 2.0 +974 63 model.relation_dim 0.0 +974 63 model.dropout_0 0.13270182369504033 +974 63 model.dropout_1 0.19897349718641483 +974 63 model.dropout_2 0.24389017158568538 +974 63 optimizer.lr 0.0026713279905215513 +974 63 training.batch_size 1.0 +974 63 training.label_smoothing 0.23397455270903791 +974 64 model.embedding_dim 2.0 +974 64 model.relation_dim 0.0 +974 64 model.dropout_0 0.3475772045103556 +974 64 model.dropout_1 0.4859577273705681 +974 64 model.dropout_2 0.2694207261079013 +974 64 optimizer.lr 0.025428643374774622 +974 64 training.batch_size 2.0 +974 64 training.label_smoothing 0.013115537996273243 +974 65 model.embedding_dim 1.0 +974 65 model.relation_dim 0.0 +974 65 model.dropout_0 0.37785296682384173 +974 65 model.dropout_1 0.3910959410855501 +974 65 model.dropout_2 0.49453157262019576 +974 65 optimizer.lr 0.007252673146233597 +974 65 training.batch_size 0.0 +974 65 training.label_smoothing 0.010211272909146585 +974 66 model.embedding_dim 1.0 +974 66 model.relation_dim 0.0 +974 66 model.dropout_0 0.390865445194855 +974 66 model.dropout_1 0.33263066367817706 +974 66 model.dropout_2 0.16594291900262303 +974 66 optimizer.lr 0.0013984492247311024 +974 66 training.batch_size 1.0 +974 66 training.label_smoothing 0.1099677144705006 +974 67 model.embedding_dim 2.0 +974 67 model.relation_dim 1.0 +974 67 model.dropout_0 0.14863217760519976 +974 67 model.dropout_1 0.18623824958043073 +974 67 model.dropout_2 0.41003165032640876 +974 67 optimizer.lr 0.07645236924341585 +974 67 training.batch_size 1.0 +974 67 training.label_smoothing 0.011340435087735424 +974 68 model.embedding_dim 2.0 +974 68 model.relation_dim 1.0 +974 68 model.dropout_0 0.15567853488915567 +974 68 model.dropout_1 0.3137900134970526 +974 68 model.dropout_2 0.38942415173777173 +974 68 optimizer.lr 0.0012407410250388855 +974 68 training.batch_size 0.0 +974 68 training.label_smoothing 0.12779235265063846 +974 69 model.embedding_dim 1.0 +974 69 model.relation_dim 2.0 +974 69 model.dropout_0 0.3222427857656406 +974 69 model.dropout_1 0.3302736475142967 +974 69 model.dropout_2 0.4873254559340554 +974 69 optimizer.lr 0.04675580429735915 +974 69 training.batch_size 1.0 +974 69 training.label_smoothing 0.25610676742141375 +974 70 model.embedding_dim 1.0 +974 70 model.relation_dim 2.0 +974 70 model.dropout_0 0.34248348380018634 +974 70 model.dropout_1 0.21961924209661868 +974 70 model.dropout_2 0.18057028600983038 +974 70 optimizer.lr 0.013916537860061513 +974 70 training.batch_size 1.0 +974 70 training.label_smoothing 0.004504490071311556 +974 71 model.embedding_dim 2.0 +974 71 model.relation_dim 0.0 +974 71 model.dropout_0 0.21065662832014587 +974 71 model.dropout_1 0.4816966997151752 +974 71 model.dropout_2 0.271197990592366 +974 71 optimizer.lr 0.03356146115071467 +974 71 training.batch_size 0.0 +974 71 training.label_smoothing 0.030468118138116382 +974 72 model.embedding_dim 1.0 +974 72 model.relation_dim 0.0 +974 72 model.dropout_0 0.39591679066904334 +974 72 model.dropout_1 0.36875616768278324 +974 72 model.dropout_2 0.2055914382721445 +974 72 optimizer.lr 0.007248408575508538 +974 72 training.batch_size 2.0 +974 72 training.label_smoothing 0.041493724173692625 +974 73 model.embedding_dim 2.0 +974 73 model.relation_dim 0.0 +974 73 model.dropout_0 0.2930551734511347 +974 73 model.dropout_1 0.2551355616746782 +974 73 model.dropout_2 0.17124759288058355 +974 73 optimizer.lr 0.0010678116165427044 +974 73 training.batch_size 1.0 +974 73 training.label_smoothing 0.014621229865300737 +974 74 model.embedding_dim 1.0 +974 74 model.relation_dim 0.0 +974 74 model.dropout_0 0.11779828470729377 +974 74 model.dropout_1 0.23779197991705897 +974 74 model.dropout_2 0.21367622975180153 +974 74 optimizer.lr 0.004825490272757917 +974 74 training.batch_size 0.0 +974 74 training.label_smoothing 0.01985141806476417 +974 75 model.embedding_dim 0.0 +974 75 model.relation_dim 0.0 +974 75 model.dropout_0 0.1703869232646933 +974 75 model.dropout_1 0.15544598438443327 +974 75 model.dropout_2 0.1569380667578172 +974 75 optimizer.lr 0.03536227707381989 +974 75 training.batch_size 0.0 +974 75 training.label_smoothing 0.0012061852023921536 +974 76 model.embedding_dim 0.0 +974 76 model.relation_dim 2.0 +974 76 model.dropout_0 0.14797523364531667 +974 76 model.dropout_1 0.35199371173381977 +974 76 model.dropout_2 0.3322541163901913 +974 76 optimizer.lr 0.001014409910955212 +974 76 training.batch_size 0.0 +974 76 training.label_smoothing 0.03245449123220664 +974 77 model.embedding_dim 2.0 +974 77 model.relation_dim 1.0 +974 77 model.dropout_0 0.14473763221297883 +974 77 model.dropout_1 0.32925190392386927 +974 77 model.dropout_2 0.36166734547419216 +974 77 optimizer.lr 0.020730437781704684 +974 77 training.batch_size 0.0 +974 77 training.label_smoothing 0.913705849990184 +974 78 model.embedding_dim 1.0 +974 78 model.relation_dim 1.0 +974 78 model.dropout_0 0.41275719816731854 +974 78 model.dropout_1 0.16161538338796255 +974 78 model.dropout_2 0.181557470525008 +974 78 optimizer.lr 0.0905227185567988 +974 78 training.batch_size 1.0 +974 78 training.label_smoothing 0.04123716694571623 +974 79 model.embedding_dim 0.0 +974 79 model.relation_dim 1.0 +974 79 model.dropout_0 0.37972436546855637 +974 79 model.dropout_1 0.34730515883231416 +974 79 model.dropout_2 0.17699917047892827 +974 79 optimizer.lr 0.0130903127999884 +974 79 training.batch_size 1.0 +974 79 training.label_smoothing 0.1802012081825584 +974 80 model.embedding_dim 1.0 +974 80 model.relation_dim 0.0 +974 80 model.dropout_0 0.1333550759328825 +974 80 model.dropout_1 0.47925693701782746 +974 80 model.dropout_2 0.2275487468519331 +974 80 optimizer.lr 0.05650667887215759 +974 80 training.batch_size 2.0 +974 80 training.label_smoothing 0.011188370873224978 +974 81 model.embedding_dim 0.0 +974 81 model.relation_dim 1.0 +974 81 model.dropout_0 0.1864353386369297 +974 81 model.dropout_1 0.11700753937982018 +974 81 model.dropout_2 0.2416596362680029 +974 81 optimizer.lr 0.01030843755583005 +974 81 training.batch_size 2.0 +974 81 training.label_smoothing 0.007059670288197556 +974 82 model.embedding_dim 0.0 +974 82 model.relation_dim 2.0 +974 82 model.dropout_0 0.26560730239477565 +974 82 model.dropout_1 0.12930617034134248 +974 82 model.dropout_2 0.30997975241039555 +974 82 optimizer.lr 0.004337297158078663 +974 82 training.batch_size 0.0 +974 82 training.label_smoothing 0.0891585569322699 +974 83 model.embedding_dim 0.0 +974 83 model.relation_dim 1.0 +974 83 model.dropout_0 0.39511735570559275 +974 83 model.dropout_1 0.12362971306432602 +974 83 model.dropout_2 0.4674544549172476 +974 83 optimizer.lr 0.0418881317724257 +974 83 training.batch_size 0.0 +974 83 training.label_smoothing 0.0070715675167875355 +974 84 model.embedding_dim 2.0 +974 84 model.relation_dim 1.0 +974 84 model.dropout_0 0.44016679656830987 +974 84 model.dropout_1 0.436207645666685 +974 84 model.dropout_2 0.16374917730084843 +974 84 optimizer.lr 0.0015169562396198186 +974 84 training.batch_size 1.0 +974 84 training.label_smoothing 0.03477248509163483 +974 85 model.embedding_dim 2.0 +974 85 model.relation_dim 0.0 +974 85 model.dropout_0 0.4067462170176692 +974 85 model.dropout_1 0.11903043741005043 +974 85 model.dropout_2 0.3807885582784597 +974 85 optimizer.lr 0.007148370013011879 +974 85 training.batch_size 1.0 +974 85 training.label_smoothing 0.010995214510155407 +974 86 model.embedding_dim 0.0 +974 86 model.relation_dim 0.0 +974 86 model.dropout_0 0.14648271829719706 +974 86 model.dropout_1 0.24591714417879712 +974 86 model.dropout_2 0.48647276249981003 +974 86 optimizer.lr 0.004949081675056922 +974 86 training.batch_size 0.0 +974 86 training.label_smoothing 0.05819279891656009 +974 87 model.embedding_dim 0.0 +974 87 model.relation_dim 1.0 +974 87 model.dropout_0 0.2846548139509442 +974 87 model.dropout_1 0.21939634136750696 +974 87 model.dropout_2 0.3595316755935164 +974 87 optimizer.lr 0.004778292776506253 +974 87 training.batch_size 1.0 +974 87 training.label_smoothing 0.06696924781278292 +974 88 model.embedding_dim 2.0 +974 88 model.relation_dim 0.0 +974 88 model.dropout_0 0.34909671002868614 +974 88 model.dropout_1 0.3331424486195693 +974 88 model.dropout_2 0.3645550203176698 +974 88 optimizer.lr 0.014919679926026327 +974 88 training.batch_size 0.0 +974 88 training.label_smoothing 0.019099663203836888 +974 89 model.embedding_dim 1.0 +974 89 model.relation_dim 1.0 +974 89 model.dropout_0 0.3041518370471351 +974 89 model.dropout_1 0.14640559027563124 +974 89 model.dropout_2 0.31357330894267077 +974 89 optimizer.lr 0.08863235485572253 +974 89 training.batch_size 2.0 +974 89 training.label_smoothing 0.002398495314508204 +974 90 model.embedding_dim 2.0 +974 90 model.relation_dim 0.0 +974 90 model.dropout_0 0.11314874842357298 +974 90 model.dropout_1 0.2843043693694243 +974 90 model.dropout_2 0.30450577051198013 +974 90 optimizer.lr 0.007976139701548885 +974 90 training.batch_size 2.0 +974 90 training.label_smoothing 0.04291702317532439 +974 91 model.embedding_dim 0.0 +974 91 model.relation_dim 0.0 +974 91 model.dropout_0 0.1194646953091862 +974 91 model.dropout_1 0.4620255937495846 +974 91 model.dropout_2 0.165886224113731 +974 91 optimizer.lr 0.0604867448427478 +974 91 training.batch_size 1.0 +974 91 training.label_smoothing 0.00673057481540786 +974 92 model.embedding_dim 0.0 +974 92 model.relation_dim 0.0 +974 92 model.dropout_0 0.319186564708554 +974 92 model.dropout_1 0.1293989132346572 +974 92 model.dropout_2 0.49154954920152594 +974 92 optimizer.lr 0.005809187203785789 +974 92 training.batch_size 2.0 +974 92 training.label_smoothing 0.10890622301367504 +974 93 model.embedding_dim 1.0 +974 93 model.relation_dim 0.0 +974 93 model.dropout_0 0.24392169655268395 +974 93 model.dropout_1 0.36654867641784394 +974 93 model.dropout_2 0.2523666146859389 +974 93 optimizer.lr 0.004473443952901763 +974 93 training.batch_size 1.0 +974 93 training.label_smoothing 0.2927212512447155 +974 94 model.embedding_dim 1.0 +974 94 model.relation_dim 1.0 +974 94 model.dropout_0 0.21525311842257744 +974 94 model.dropout_1 0.21761306468013722 +974 94 model.dropout_2 0.2710757396535027 +974 94 optimizer.lr 0.0018853201019696248 +974 94 training.batch_size 2.0 +974 94 training.label_smoothing 0.5141937122314699 +974 95 model.embedding_dim 2.0 +974 95 model.relation_dim 2.0 +974 95 model.dropout_0 0.4466745504485186 +974 95 model.dropout_1 0.307996875041804 +974 95 model.dropout_2 0.4420772984205845 +974 95 optimizer.lr 0.0265062955520323 +974 95 training.batch_size 1.0 +974 95 training.label_smoothing 0.20901355936824093 +974 96 model.embedding_dim 0.0 +974 96 model.relation_dim 2.0 +974 96 model.dropout_0 0.3746156388978281 +974 96 model.dropout_1 0.32085958598870534 +974 96 model.dropout_2 0.2530984572594204 +974 96 optimizer.lr 0.013235948009912115 +974 96 training.batch_size 2.0 +974 96 training.label_smoothing 0.0013920952486400034 +974 97 model.embedding_dim 0.0 +974 97 model.relation_dim 1.0 +974 97 model.dropout_0 0.1429547814581992 +974 97 model.dropout_1 0.2559401572382726 +974 97 model.dropout_2 0.3370381377102704 +974 97 optimizer.lr 0.0026543588646001154 +974 97 training.batch_size 1.0 +974 97 training.label_smoothing 0.13990514052933198 +974 98 model.embedding_dim 2.0 +974 98 model.relation_dim 0.0 +974 98 model.dropout_0 0.13047549283892068 +974 98 model.dropout_1 0.12572757326610132 +974 98 model.dropout_2 0.3511501464154929 +974 98 optimizer.lr 0.07084015745818444 +974 98 training.batch_size 2.0 +974 98 training.label_smoothing 0.6079541183692858 +974 99 model.embedding_dim 2.0 +974 99 model.relation_dim 1.0 +974 99 model.dropout_0 0.2771274247627832 +974 99 model.dropout_1 0.27136928895167084 +974 99 model.dropout_2 0.27273482203386995 +974 99 optimizer.lr 0.002118928848398882 +974 99 training.batch_size 0.0 +974 99 training.label_smoothing 0.01599325808109531 +974 100 model.embedding_dim 1.0 +974 100 model.relation_dim 2.0 +974 100 model.dropout_0 0.12302486869737823 +974 100 model.dropout_1 0.16474024198072224 +974 100 model.dropout_2 0.1328120012506037 +974 100 optimizer.lr 0.0013168934127697725 +974 100 training.batch_size 0.0 +974 100 training.label_smoothing 0.0017112228612669107 +974 1 dataset """kinships""" +974 1 model """tucker""" +974 1 loss """crossentropy""" +974 1 regularizer """no""" +974 1 optimizer """adadelta""" +974 1 training_loop """lcwa""" +974 1 evaluator """rankbased""" +974 2 dataset """kinships""" +974 2 model """tucker""" +974 2 loss """crossentropy""" +974 2 regularizer """no""" +974 2 optimizer """adadelta""" +974 2 training_loop """lcwa""" +974 2 evaluator """rankbased""" +974 3 dataset """kinships""" +974 3 model """tucker""" +974 3 loss """crossentropy""" +974 3 regularizer """no""" +974 3 optimizer """adadelta""" +974 3 training_loop """lcwa""" +974 3 evaluator """rankbased""" +974 4 dataset """kinships""" +974 4 model """tucker""" +974 4 loss """crossentropy""" +974 4 regularizer """no""" +974 4 optimizer """adadelta""" +974 4 training_loop """lcwa""" +974 4 evaluator """rankbased""" +974 5 dataset """kinships""" +974 5 model """tucker""" +974 5 loss """crossentropy""" +974 5 regularizer """no""" +974 5 optimizer """adadelta""" +974 5 training_loop """lcwa""" +974 5 evaluator """rankbased""" +974 6 dataset """kinships""" +974 6 model """tucker""" +974 6 loss """crossentropy""" +974 6 regularizer """no""" +974 6 optimizer """adadelta""" +974 6 training_loop """lcwa""" +974 6 evaluator """rankbased""" +974 7 dataset """kinships""" +974 7 model """tucker""" +974 7 loss """crossentropy""" +974 7 regularizer """no""" +974 7 optimizer """adadelta""" +974 7 training_loop """lcwa""" +974 7 evaluator """rankbased""" +974 8 dataset """kinships""" +974 8 model """tucker""" +974 8 loss """crossentropy""" +974 8 regularizer """no""" +974 8 optimizer """adadelta""" +974 8 training_loop """lcwa""" +974 8 evaluator """rankbased""" +974 9 dataset """kinships""" +974 9 model """tucker""" +974 9 loss """crossentropy""" +974 9 regularizer """no""" +974 9 optimizer """adadelta""" +974 9 training_loop """lcwa""" +974 9 evaluator """rankbased""" +974 10 dataset """kinships""" +974 10 model """tucker""" +974 10 loss """crossentropy""" +974 10 regularizer """no""" +974 10 optimizer """adadelta""" +974 10 training_loop """lcwa""" +974 10 evaluator """rankbased""" +974 11 dataset """kinships""" +974 11 model """tucker""" +974 11 loss """crossentropy""" +974 11 regularizer """no""" +974 11 optimizer """adadelta""" +974 11 training_loop """lcwa""" +974 11 evaluator """rankbased""" +974 12 dataset """kinships""" +974 12 model """tucker""" +974 12 loss """crossentropy""" +974 12 regularizer """no""" +974 12 optimizer """adadelta""" +974 12 training_loop """lcwa""" +974 12 evaluator """rankbased""" +974 13 dataset """kinships""" +974 13 model """tucker""" +974 13 loss """crossentropy""" +974 13 regularizer """no""" +974 13 optimizer """adadelta""" +974 13 training_loop """lcwa""" +974 13 evaluator """rankbased""" +974 14 dataset """kinships""" +974 14 model """tucker""" +974 14 loss """crossentropy""" +974 14 regularizer """no""" +974 14 optimizer """adadelta""" +974 14 training_loop """lcwa""" +974 14 evaluator """rankbased""" +974 15 dataset """kinships""" +974 15 model """tucker""" +974 15 loss """crossentropy""" +974 15 regularizer """no""" +974 15 optimizer """adadelta""" +974 15 training_loop """lcwa""" +974 15 evaluator """rankbased""" +974 16 dataset """kinships""" +974 16 model """tucker""" +974 16 loss """crossentropy""" +974 16 regularizer """no""" +974 16 optimizer """adadelta""" +974 16 training_loop """lcwa""" +974 16 evaluator """rankbased""" +974 17 dataset """kinships""" +974 17 model """tucker""" +974 17 loss """crossentropy""" +974 17 regularizer """no""" +974 17 optimizer """adadelta""" +974 17 training_loop """lcwa""" +974 17 evaluator """rankbased""" +974 18 dataset """kinships""" +974 18 model """tucker""" +974 18 loss """crossentropy""" +974 18 regularizer """no""" +974 18 optimizer """adadelta""" +974 18 training_loop """lcwa""" +974 18 evaluator """rankbased""" +974 19 dataset """kinships""" +974 19 model """tucker""" +974 19 loss """crossentropy""" +974 19 regularizer """no""" +974 19 optimizer """adadelta""" +974 19 training_loop """lcwa""" +974 19 evaluator """rankbased""" +974 20 dataset """kinships""" +974 20 model """tucker""" +974 20 loss """crossentropy""" +974 20 regularizer """no""" +974 20 optimizer """adadelta""" +974 20 training_loop """lcwa""" +974 20 evaluator """rankbased""" +974 21 dataset """kinships""" +974 21 model """tucker""" +974 21 loss """crossentropy""" +974 21 regularizer """no""" +974 21 optimizer """adadelta""" +974 21 training_loop """lcwa""" +974 21 evaluator """rankbased""" +974 22 dataset """kinships""" +974 22 model """tucker""" +974 22 loss """crossentropy""" +974 22 regularizer """no""" +974 22 optimizer """adadelta""" +974 22 training_loop """lcwa""" +974 22 evaluator """rankbased""" +974 23 dataset """kinships""" +974 23 model """tucker""" +974 23 loss """crossentropy""" +974 23 regularizer """no""" +974 23 optimizer """adadelta""" +974 23 training_loop """lcwa""" +974 23 evaluator """rankbased""" +974 24 dataset """kinships""" +974 24 model """tucker""" +974 24 loss """crossentropy""" +974 24 regularizer """no""" +974 24 optimizer """adadelta""" +974 24 training_loop """lcwa""" +974 24 evaluator """rankbased""" +974 25 dataset """kinships""" +974 25 model """tucker""" +974 25 loss """crossentropy""" +974 25 regularizer """no""" +974 25 optimizer """adadelta""" +974 25 training_loop """lcwa""" +974 25 evaluator """rankbased""" +974 26 dataset """kinships""" +974 26 model """tucker""" +974 26 loss """crossentropy""" +974 26 regularizer """no""" +974 26 optimizer """adadelta""" +974 26 training_loop """lcwa""" +974 26 evaluator """rankbased""" +974 27 dataset """kinships""" +974 27 model """tucker""" +974 27 loss """crossentropy""" +974 27 regularizer """no""" +974 27 optimizer """adadelta""" +974 27 training_loop """lcwa""" +974 27 evaluator """rankbased""" +974 28 dataset """kinships""" +974 28 model """tucker""" +974 28 loss """crossentropy""" +974 28 regularizer """no""" +974 28 optimizer """adadelta""" +974 28 training_loop """lcwa""" +974 28 evaluator """rankbased""" +974 29 dataset """kinships""" +974 29 model """tucker""" +974 29 loss """crossentropy""" +974 29 regularizer """no""" +974 29 optimizer """adadelta""" +974 29 training_loop """lcwa""" +974 29 evaluator """rankbased""" +974 30 dataset """kinships""" +974 30 model """tucker""" +974 30 loss """crossentropy""" +974 30 regularizer """no""" +974 30 optimizer """adadelta""" +974 30 training_loop """lcwa""" +974 30 evaluator """rankbased""" +974 31 dataset """kinships""" +974 31 model """tucker""" +974 31 loss """crossentropy""" +974 31 regularizer """no""" +974 31 optimizer """adadelta""" +974 31 training_loop """lcwa""" +974 31 evaluator """rankbased""" +974 32 dataset """kinships""" +974 32 model """tucker""" +974 32 loss """crossentropy""" +974 32 regularizer """no""" +974 32 optimizer """adadelta""" +974 32 training_loop """lcwa""" +974 32 evaluator """rankbased""" +974 33 dataset """kinships""" +974 33 model """tucker""" +974 33 loss """crossentropy""" +974 33 regularizer """no""" +974 33 optimizer """adadelta""" +974 33 training_loop """lcwa""" +974 33 evaluator """rankbased""" +974 34 dataset """kinships""" +974 34 model """tucker""" +974 34 loss """crossentropy""" +974 34 regularizer """no""" +974 34 optimizer """adadelta""" +974 34 training_loop """lcwa""" +974 34 evaluator """rankbased""" +974 35 dataset """kinships""" +974 35 model """tucker""" +974 35 loss """crossentropy""" +974 35 regularizer """no""" +974 35 optimizer """adadelta""" +974 35 training_loop """lcwa""" +974 35 evaluator """rankbased""" +974 36 dataset """kinships""" +974 36 model """tucker""" +974 36 loss """crossentropy""" +974 36 regularizer """no""" +974 36 optimizer """adadelta""" +974 36 training_loop """lcwa""" +974 36 evaluator """rankbased""" +974 37 dataset """kinships""" +974 37 model """tucker""" +974 37 loss """crossentropy""" +974 37 regularizer """no""" +974 37 optimizer """adadelta""" +974 37 training_loop """lcwa""" +974 37 evaluator """rankbased""" +974 38 dataset """kinships""" +974 38 model """tucker""" +974 38 loss """crossentropy""" +974 38 regularizer """no""" +974 38 optimizer """adadelta""" +974 38 training_loop """lcwa""" +974 38 evaluator """rankbased""" +974 39 dataset """kinships""" +974 39 model """tucker""" +974 39 loss """crossentropy""" +974 39 regularizer """no""" +974 39 optimizer """adadelta""" +974 39 training_loop """lcwa""" +974 39 evaluator """rankbased""" +974 40 dataset """kinships""" +974 40 model """tucker""" +974 40 loss """crossentropy""" +974 40 regularizer """no""" +974 40 optimizer """adadelta""" +974 40 training_loop """lcwa""" +974 40 evaluator """rankbased""" +974 41 dataset """kinships""" +974 41 model """tucker""" +974 41 loss """crossentropy""" +974 41 regularizer """no""" +974 41 optimizer """adadelta""" +974 41 training_loop """lcwa""" +974 41 evaluator """rankbased""" +974 42 dataset """kinships""" +974 42 model """tucker""" +974 42 loss """crossentropy""" +974 42 regularizer """no""" +974 42 optimizer """adadelta""" +974 42 training_loop """lcwa""" +974 42 evaluator """rankbased""" +974 43 dataset """kinships""" +974 43 model """tucker""" +974 43 loss """crossentropy""" +974 43 regularizer """no""" +974 43 optimizer """adadelta""" +974 43 training_loop """lcwa""" +974 43 evaluator """rankbased""" +974 44 dataset """kinships""" +974 44 model """tucker""" +974 44 loss """crossentropy""" +974 44 regularizer """no""" +974 44 optimizer """adadelta""" +974 44 training_loop """lcwa""" +974 44 evaluator """rankbased""" +974 45 dataset """kinships""" +974 45 model """tucker""" +974 45 loss """crossentropy""" +974 45 regularizer """no""" +974 45 optimizer """adadelta""" +974 45 training_loop """lcwa""" +974 45 evaluator """rankbased""" +974 46 dataset """kinships""" +974 46 model """tucker""" +974 46 loss """crossentropy""" +974 46 regularizer """no""" +974 46 optimizer """adadelta""" +974 46 training_loop """lcwa""" +974 46 evaluator """rankbased""" +974 47 dataset """kinships""" +974 47 model """tucker""" +974 47 loss """crossentropy""" +974 47 regularizer """no""" +974 47 optimizer """adadelta""" +974 47 training_loop """lcwa""" +974 47 evaluator """rankbased""" +974 48 dataset """kinships""" +974 48 model """tucker""" +974 48 loss """crossentropy""" +974 48 regularizer """no""" +974 48 optimizer """adadelta""" +974 48 training_loop """lcwa""" +974 48 evaluator """rankbased""" +974 49 dataset """kinships""" +974 49 model """tucker""" +974 49 loss """crossentropy""" +974 49 regularizer """no""" +974 49 optimizer """adadelta""" +974 49 training_loop """lcwa""" +974 49 evaluator """rankbased""" +974 50 dataset """kinships""" +974 50 model """tucker""" +974 50 loss """crossentropy""" +974 50 regularizer """no""" +974 50 optimizer """adadelta""" +974 50 training_loop """lcwa""" +974 50 evaluator """rankbased""" +974 51 dataset """kinships""" +974 51 model """tucker""" +974 51 loss """crossentropy""" +974 51 regularizer """no""" +974 51 optimizer """adadelta""" +974 51 training_loop """lcwa""" +974 51 evaluator """rankbased""" +974 52 dataset """kinships""" +974 52 model """tucker""" +974 52 loss """crossentropy""" +974 52 regularizer """no""" +974 52 optimizer """adadelta""" +974 52 training_loop """lcwa""" +974 52 evaluator """rankbased""" +974 53 dataset """kinships""" +974 53 model """tucker""" +974 53 loss """crossentropy""" +974 53 regularizer """no""" +974 53 optimizer """adadelta""" +974 53 training_loop """lcwa""" +974 53 evaluator """rankbased""" +974 54 dataset """kinships""" +974 54 model """tucker""" +974 54 loss """crossentropy""" +974 54 regularizer """no""" +974 54 optimizer """adadelta""" +974 54 training_loop """lcwa""" +974 54 evaluator """rankbased""" +974 55 dataset """kinships""" +974 55 model """tucker""" +974 55 loss """crossentropy""" +974 55 regularizer """no""" +974 55 optimizer """adadelta""" +974 55 training_loop """lcwa""" +974 55 evaluator """rankbased""" +974 56 dataset """kinships""" +974 56 model """tucker""" +974 56 loss """crossentropy""" +974 56 regularizer """no""" +974 56 optimizer """adadelta""" +974 56 training_loop """lcwa""" +974 56 evaluator """rankbased""" +974 57 dataset """kinships""" +974 57 model """tucker""" +974 57 loss """crossentropy""" +974 57 regularizer """no""" +974 57 optimizer """adadelta""" +974 57 training_loop """lcwa""" +974 57 evaluator """rankbased""" +974 58 dataset """kinships""" +974 58 model """tucker""" +974 58 loss """crossentropy""" +974 58 regularizer """no""" +974 58 optimizer """adadelta""" +974 58 training_loop """lcwa""" +974 58 evaluator """rankbased""" +974 59 dataset """kinships""" +974 59 model """tucker""" +974 59 loss """crossentropy""" +974 59 regularizer """no""" +974 59 optimizer """adadelta""" +974 59 training_loop """lcwa""" +974 59 evaluator """rankbased""" +974 60 dataset """kinships""" +974 60 model """tucker""" +974 60 loss """crossentropy""" +974 60 regularizer """no""" +974 60 optimizer """adadelta""" +974 60 training_loop """lcwa""" +974 60 evaluator """rankbased""" +974 61 dataset """kinships""" +974 61 model """tucker""" +974 61 loss """crossentropy""" +974 61 regularizer """no""" +974 61 optimizer """adadelta""" +974 61 training_loop """lcwa""" +974 61 evaluator """rankbased""" +974 62 dataset """kinships""" +974 62 model """tucker""" +974 62 loss """crossentropy""" +974 62 regularizer """no""" +974 62 optimizer """adadelta""" +974 62 training_loop """lcwa""" +974 62 evaluator """rankbased""" +974 63 dataset """kinships""" +974 63 model """tucker""" +974 63 loss """crossentropy""" +974 63 regularizer """no""" +974 63 optimizer """adadelta""" +974 63 training_loop """lcwa""" +974 63 evaluator """rankbased""" +974 64 dataset """kinships""" +974 64 model """tucker""" +974 64 loss """crossentropy""" +974 64 regularizer """no""" +974 64 optimizer """adadelta""" +974 64 training_loop """lcwa""" +974 64 evaluator """rankbased""" +974 65 dataset """kinships""" +974 65 model """tucker""" +974 65 loss """crossentropy""" +974 65 regularizer """no""" +974 65 optimizer """adadelta""" +974 65 training_loop """lcwa""" +974 65 evaluator """rankbased""" +974 66 dataset """kinships""" +974 66 model """tucker""" +974 66 loss """crossentropy""" +974 66 regularizer """no""" +974 66 optimizer """adadelta""" +974 66 training_loop """lcwa""" +974 66 evaluator """rankbased""" +974 67 dataset """kinships""" +974 67 model """tucker""" +974 67 loss """crossentropy""" +974 67 regularizer """no""" +974 67 optimizer """adadelta""" +974 67 training_loop """lcwa""" +974 67 evaluator """rankbased""" +974 68 dataset """kinships""" +974 68 model """tucker""" +974 68 loss """crossentropy""" +974 68 regularizer """no""" +974 68 optimizer """adadelta""" +974 68 training_loop """lcwa""" +974 68 evaluator """rankbased""" +974 69 dataset """kinships""" +974 69 model """tucker""" +974 69 loss """crossentropy""" +974 69 regularizer """no""" +974 69 optimizer """adadelta""" +974 69 training_loop """lcwa""" +974 69 evaluator """rankbased""" +974 70 dataset """kinships""" +974 70 model """tucker""" +974 70 loss """crossentropy""" +974 70 regularizer """no""" +974 70 optimizer """adadelta""" +974 70 training_loop """lcwa""" +974 70 evaluator """rankbased""" +974 71 dataset """kinships""" +974 71 model """tucker""" +974 71 loss """crossentropy""" +974 71 regularizer """no""" +974 71 optimizer """adadelta""" +974 71 training_loop """lcwa""" +974 71 evaluator """rankbased""" +974 72 dataset """kinships""" +974 72 model """tucker""" +974 72 loss """crossentropy""" +974 72 regularizer """no""" +974 72 optimizer """adadelta""" +974 72 training_loop """lcwa""" +974 72 evaluator """rankbased""" +974 73 dataset """kinships""" +974 73 model """tucker""" +974 73 loss """crossentropy""" +974 73 regularizer """no""" +974 73 optimizer """adadelta""" +974 73 training_loop """lcwa""" +974 73 evaluator """rankbased""" +974 74 dataset """kinships""" +974 74 model """tucker""" +974 74 loss """crossentropy""" +974 74 regularizer """no""" +974 74 optimizer """adadelta""" +974 74 training_loop """lcwa""" +974 74 evaluator """rankbased""" +974 75 dataset """kinships""" +974 75 model """tucker""" +974 75 loss """crossentropy""" +974 75 regularizer """no""" +974 75 optimizer """adadelta""" +974 75 training_loop """lcwa""" +974 75 evaluator """rankbased""" +974 76 dataset """kinships""" +974 76 model """tucker""" +974 76 loss """crossentropy""" +974 76 regularizer """no""" +974 76 optimizer """adadelta""" +974 76 training_loop """lcwa""" +974 76 evaluator """rankbased""" +974 77 dataset """kinships""" +974 77 model """tucker""" +974 77 loss """crossentropy""" +974 77 regularizer """no""" +974 77 optimizer """adadelta""" +974 77 training_loop """lcwa""" +974 77 evaluator """rankbased""" +974 78 dataset """kinships""" +974 78 model """tucker""" +974 78 loss """crossentropy""" +974 78 regularizer """no""" +974 78 optimizer """adadelta""" +974 78 training_loop """lcwa""" +974 78 evaluator """rankbased""" +974 79 dataset """kinships""" +974 79 model """tucker""" +974 79 loss """crossentropy""" +974 79 regularizer """no""" +974 79 optimizer """adadelta""" +974 79 training_loop """lcwa""" +974 79 evaluator """rankbased""" +974 80 dataset """kinships""" +974 80 model """tucker""" +974 80 loss """crossentropy""" +974 80 regularizer """no""" +974 80 optimizer """adadelta""" +974 80 training_loop """lcwa""" +974 80 evaluator """rankbased""" +974 81 dataset """kinships""" +974 81 model """tucker""" +974 81 loss """crossentropy""" +974 81 regularizer """no""" +974 81 optimizer """adadelta""" +974 81 training_loop """lcwa""" +974 81 evaluator """rankbased""" +974 82 dataset """kinships""" +974 82 model """tucker""" +974 82 loss """crossentropy""" +974 82 regularizer """no""" +974 82 optimizer """adadelta""" +974 82 training_loop """lcwa""" +974 82 evaluator """rankbased""" +974 83 dataset """kinships""" +974 83 model """tucker""" +974 83 loss """crossentropy""" +974 83 regularizer """no""" +974 83 optimizer """adadelta""" +974 83 training_loop """lcwa""" +974 83 evaluator """rankbased""" +974 84 dataset """kinships""" +974 84 model """tucker""" +974 84 loss """crossentropy""" +974 84 regularizer """no""" +974 84 optimizer """adadelta""" +974 84 training_loop """lcwa""" +974 84 evaluator """rankbased""" +974 85 dataset """kinships""" +974 85 model """tucker""" +974 85 loss """crossentropy""" +974 85 regularizer """no""" +974 85 optimizer """adadelta""" +974 85 training_loop """lcwa""" +974 85 evaluator """rankbased""" +974 86 dataset """kinships""" +974 86 model """tucker""" +974 86 loss """crossentropy""" +974 86 regularizer """no""" +974 86 optimizer """adadelta""" +974 86 training_loop """lcwa""" +974 86 evaluator """rankbased""" +974 87 dataset """kinships""" +974 87 model """tucker""" +974 87 loss """crossentropy""" +974 87 regularizer """no""" +974 87 optimizer """adadelta""" +974 87 training_loop """lcwa""" +974 87 evaluator """rankbased""" +974 88 dataset """kinships""" +974 88 model """tucker""" +974 88 loss """crossentropy""" +974 88 regularizer """no""" +974 88 optimizer """adadelta""" +974 88 training_loop """lcwa""" +974 88 evaluator """rankbased""" +974 89 dataset """kinships""" +974 89 model """tucker""" +974 89 loss """crossentropy""" +974 89 regularizer """no""" +974 89 optimizer """adadelta""" +974 89 training_loop """lcwa""" +974 89 evaluator """rankbased""" +974 90 dataset """kinships""" +974 90 model """tucker""" +974 90 loss """crossentropy""" +974 90 regularizer """no""" +974 90 optimizer """adadelta""" +974 90 training_loop """lcwa""" +974 90 evaluator """rankbased""" +974 91 dataset """kinships""" +974 91 model """tucker""" +974 91 loss """crossentropy""" +974 91 regularizer """no""" +974 91 optimizer """adadelta""" +974 91 training_loop """lcwa""" +974 91 evaluator """rankbased""" +974 92 dataset """kinships""" +974 92 model """tucker""" +974 92 loss """crossentropy""" +974 92 regularizer """no""" +974 92 optimizer """adadelta""" +974 92 training_loop """lcwa""" +974 92 evaluator """rankbased""" +974 93 dataset """kinships""" +974 93 model """tucker""" +974 93 loss """crossentropy""" +974 93 regularizer """no""" +974 93 optimizer """adadelta""" +974 93 training_loop """lcwa""" +974 93 evaluator """rankbased""" +974 94 dataset """kinships""" +974 94 model """tucker""" +974 94 loss """crossentropy""" +974 94 regularizer """no""" +974 94 optimizer """adadelta""" +974 94 training_loop """lcwa""" +974 94 evaluator """rankbased""" +974 95 dataset """kinships""" +974 95 model """tucker""" +974 95 loss """crossentropy""" +974 95 regularizer """no""" +974 95 optimizer """adadelta""" +974 95 training_loop """lcwa""" +974 95 evaluator """rankbased""" +974 96 dataset """kinships""" +974 96 model """tucker""" +974 96 loss """crossentropy""" +974 96 regularizer """no""" +974 96 optimizer """adadelta""" +974 96 training_loop """lcwa""" +974 96 evaluator """rankbased""" +974 97 dataset """kinships""" +974 97 model """tucker""" +974 97 loss """crossentropy""" +974 97 regularizer """no""" +974 97 optimizer """adadelta""" +974 97 training_loop """lcwa""" +974 97 evaluator """rankbased""" +974 98 dataset """kinships""" +974 98 model """tucker""" +974 98 loss """crossentropy""" +974 98 regularizer """no""" +974 98 optimizer """adadelta""" +974 98 training_loop """lcwa""" +974 98 evaluator """rankbased""" +974 99 dataset """kinships""" +974 99 model """tucker""" +974 99 loss """crossentropy""" +974 99 regularizer """no""" +974 99 optimizer """adadelta""" +974 99 training_loop """lcwa""" +974 99 evaluator """rankbased""" +974 100 dataset """kinships""" +974 100 model """tucker""" +974 100 loss """crossentropy""" +974 100 regularizer """no""" +974 100 optimizer """adadelta""" +974 100 training_loop """lcwa""" +974 100 evaluator """rankbased""" +975 1 model.embedding_dim 1.0 +975 1 model.relation_dim 0.0 +975 1 model.dropout_0 0.28648143196475706 +975 1 model.dropout_1 0.135185847201418 +975 1 model.dropout_2 0.15883961553719575 +975 1 optimizer.lr 0.04889235453095465 +975 1 training.batch_size 1.0 +975 1 training.label_smoothing 0.06459852661021287 +975 2 model.embedding_dim 1.0 +975 2 model.relation_dim 0.0 +975 2 model.dropout_0 0.1166831307089919 +975 2 model.dropout_1 0.39848330545895966 +975 2 model.dropout_2 0.1654510592673232 +975 2 optimizer.lr 0.007327654693108908 +975 2 training.batch_size 1.0 +975 2 training.label_smoothing 0.7148030353622917 +975 3 model.embedding_dim 1.0 +975 3 model.relation_dim 1.0 +975 3 model.dropout_0 0.4083773969109805 +975 3 model.dropout_1 0.1864622773909985 +975 3 model.dropout_2 0.19999847134594073 +975 3 optimizer.lr 0.0035411634929946464 +975 3 training.batch_size 0.0 +975 3 training.label_smoothing 0.0018114277145942794 +975 4 model.embedding_dim 0.0 +975 4 model.relation_dim 1.0 +975 4 model.dropout_0 0.35670575467034826 +975 4 model.dropout_1 0.3118585610131377 +975 4 model.dropout_2 0.10704918139474283 +975 4 optimizer.lr 0.02309201184514306 +975 4 training.batch_size 2.0 +975 4 training.label_smoothing 0.007436579199563536 +975 5 model.embedding_dim 1.0 +975 5 model.relation_dim 1.0 +975 5 model.dropout_0 0.3016365777601762 +975 5 model.dropout_1 0.3224161432547737 +975 5 model.dropout_2 0.3053539659582598 +975 5 optimizer.lr 0.002296404868461444 +975 5 training.batch_size 1.0 +975 5 training.label_smoothing 0.0035657667361149053 +975 6 model.embedding_dim 1.0 +975 6 model.relation_dim 1.0 +975 6 model.dropout_0 0.24167004609058163 +975 6 model.dropout_1 0.13470000901792709 +975 6 model.dropout_2 0.3249676150402184 +975 6 optimizer.lr 0.02891609449870133 +975 6 training.batch_size 1.0 +975 6 training.label_smoothing 0.3292963086155779 +975 7 model.embedding_dim 2.0 +975 7 model.relation_dim 1.0 +975 7 model.dropout_0 0.45875707670547333 +975 7 model.dropout_1 0.3093236683221168 +975 7 model.dropout_2 0.18404736255062565 +975 7 optimizer.lr 0.015609746754246062 +975 7 training.batch_size 1.0 +975 7 training.label_smoothing 0.01632988462586245 +975 8 model.embedding_dim 0.0 +975 8 model.relation_dim 2.0 +975 8 model.dropout_0 0.2712277337412873 +975 8 model.dropout_1 0.3311386468137932 +975 8 model.dropout_2 0.4599265402847832 +975 8 optimizer.lr 0.012650796054606285 +975 8 training.batch_size 0.0 +975 8 training.label_smoothing 0.021928700856715125 +975 9 model.embedding_dim 0.0 +975 9 model.relation_dim 1.0 +975 9 model.dropout_0 0.4934224832869346 +975 9 model.dropout_1 0.3464118095298704 +975 9 model.dropout_2 0.11849356511272556 +975 9 optimizer.lr 0.05823256864921092 +975 9 training.batch_size 0.0 +975 9 training.label_smoothing 0.012541307768265788 +975 10 model.embedding_dim 2.0 +975 10 model.relation_dim 1.0 +975 10 model.dropout_0 0.11694497516149815 +975 10 model.dropout_1 0.1338939633786533 +975 10 model.dropout_2 0.40371373015789624 +975 10 optimizer.lr 0.0037421655830627296 +975 10 training.batch_size 2.0 +975 10 training.label_smoothing 0.005171902978889606 +975 11 model.embedding_dim 2.0 +975 11 model.relation_dim 2.0 +975 11 model.dropout_0 0.23345413521070008 +975 11 model.dropout_1 0.13147106457827182 +975 11 model.dropout_2 0.4857363467952236 +975 11 optimizer.lr 0.06322254756428924 +975 11 training.batch_size 0.0 +975 11 training.label_smoothing 0.017291190336663608 +975 12 model.embedding_dim 0.0 +975 12 model.relation_dim 1.0 +975 12 model.dropout_0 0.10911029901098646 +975 12 model.dropout_1 0.4747101491363239 +975 12 model.dropout_2 0.1126475039595062 +975 12 optimizer.lr 0.00946226831311525 +975 12 training.batch_size 0.0 +975 12 training.label_smoothing 0.07368937268947046 +975 13 model.embedding_dim 2.0 +975 13 model.relation_dim 1.0 +975 13 model.dropout_0 0.42106156787172316 +975 13 model.dropout_1 0.4674937228919057 +975 13 model.dropout_2 0.29479570940726496 +975 13 optimizer.lr 0.008733517611372421 +975 13 training.batch_size 2.0 +975 13 training.label_smoothing 0.1512825679562581 +975 14 model.embedding_dim 0.0 +975 14 model.relation_dim 1.0 +975 14 model.dropout_0 0.3778532071940368 +975 14 model.dropout_1 0.15967034259029714 +975 14 model.dropout_2 0.47960002045183286 +975 14 optimizer.lr 0.016838037308167783 +975 14 training.batch_size 0.0 +975 14 training.label_smoothing 0.005985746291525377 +975 15 model.embedding_dim 1.0 +975 15 model.relation_dim 1.0 +975 15 model.dropout_0 0.43832245291875616 +975 15 model.dropout_1 0.13468783192461947 +975 15 model.dropout_2 0.31905297711695196 +975 15 optimizer.lr 0.027324952005403354 +975 15 training.batch_size 0.0 +975 15 training.label_smoothing 0.03172667649154881 +975 16 model.embedding_dim 1.0 +975 16 model.relation_dim 1.0 +975 16 model.dropout_0 0.32638302676813963 +975 16 model.dropout_1 0.2507400976699294 +975 16 model.dropout_2 0.2803937184792001 +975 16 optimizer.lr 0.0042830625902216864 +975 16 training.batch_size 1.0 +975 16 training.label_smoothing 0.0069095118271757325 +975 17 model.embedding_dim 2.0 +975 17 model.relation_dim 2.0 +975 17 model.dropout_0 0.4976812244832073 +975 17 model.dropout_1 0.19126011143581617 +975 17 model.dropout_2 0.27739708564203325 +975 17 optimizer.lr 0.021092440728292274 +975 17 training.batch_size 0.0 +975 17 training.label_smoothing 0.06047521613079056 +975 18 model.embedding_dim 0.0 +975 18 model.relation_dim 2.0 +975 18 model.dropout_0 0.1621087616744145 +975 18 model.dropout_1 0.31808512824797125 +975 18 model.dropout_2 0.3855412117500261 +975 18 optimizer.lr 0.018491973463401527 +975 18 training.batch_size 2.0 +975 18 training.label_smoothing 0.31430874018274807 +975 19 model.embedding_dim 1.0 +975 19 model.relation_dim 0.0 +975 19 model.dropout_0 0.2305431726058132 +975 19 model.dropout_1 0.3062419485533161 +975 19 model.dropout_2 0.17595813888093093 +975 19 optimizer.lr 0.06057894722990354 +975 19 training.batch_size 0.0 +975 19 training.label_smoothing 0.011619297311306133 +975 20 model.embedding_dim 1.0 +975 20 model.relation_dim 2.0 +975 20 model.dropout_0 0.40176123070994946 +975 20 model.dropout_1 0.40717704557877066 +975 20 model.dropout_2 0.25161274502383874 +975 20 optimizer.lr 0.015895291333568186 +975 20 training.batch_size 1.0 +975 20 training.label_smoothing 0.10185401658631578 +975 21 model.embedding_dim 0.0 +975 21 model.relation_dim 2.0 +975 21 model.dropout_0 0.302393168459965 +975 21 model.dropout_1 0.15730837793419927 +975 21 model.dropout_2 0.32341512837000164 +975 21 optimizer.lr 0.018395858952098935 +975 21 training.batch_size 0.0 +975 21 training.label_smoothing 0.22350626005065893 +975 22 model.embedding_dim 0.0 +975 22 model.relation_dim 0.0 +975 22 model.dropout_0 0.3630779813393034 +975 22 model.dropout_1 0.13778959324986953 +975 22 model.dropout_2 0.4878061768966978 +975 22 optimizer.lr 0.01504130229632163 +975 22 training.batch_size 2.0 +975 22 training.label_smoothing 0.011322028700012064 +975 23 model.embedding_dim 0.0 +975 23 model.relation_dim 2.0 +975 23 model.dropout_0 0.3800831178473778 +975 23 model.dropout_1 0.30314732218441454 +975 23 model.dropout_2 0.3242626189350843 +975 23 optimizer.lr 0.043766310141765775 +975 23 training.batch_size 0.0 +975 23 training.label_smoothing 0.013479042560406341 +975 24 model.embedding_dim 2.0 +975 24 model.relation_dim 0.0 +975 24 model.dropout_0 0.319772746952548 +975 24 model.dropout_1 0.33347870579557654 +975 24 model.dropout_2 0.40457391702402196 +975 24 optimizer.lr 0.003935453103568314 +975 24 training.batch_size 0.0 +975 24 training.label_smoothing 0.033264500555833354 +975 25 model.embedding_dim 1.0 +975 25 model.relation_dim 1.0 +975 25 model.dropout_0 0.32585284129449976 +975 25 model.dropout_1 0.23328518657127661 +975 25 model.dropout_2 0.24203350724032147 +975 25 optimizer.lr 0.011321163654357681 +975 25 training.batch_size 2.0 +975 25 training.label_smoothing 0.2164992372784561 +975 26 model.embedding_dim 2.0 +975 26 model.relation_dim 0.0 +975 26 model.dropout_0 0.468076686509349 +975 26 model.dropout_1 0.18019687425643793 +975 26 model.dropout_2 0.15161989403590523 +975 26 optimizer.lr 0.0019831583032786032 +975 26 training.batch_size 2.0 +975 26 training.label_smoothing 0.0013039985877066073 +975 27 model.embedding_dim 0.0 +975 27 model.relation_dim 2.0 +975 27 model.dropout_0 0.11031275015470521 +975 27 model.dropout_1 0.1191491122895875 +975 27 model.dropout_2 0.3253256821828757 +975 27 optimizer.lr 0.005284718069874511 +975 27 training.batch_size 2.0 +975 27 training.label_smoothing 0.319216005287967 +975 28 model.embedding_dim 0.0 +975 28 model.relation_dim 2.0 +975 28 model.dropout_0 0.21462565566367978 +975 28 model.dropout_1 0.23564512284321137 +975 28 model.dropout_2 0.3491820631071258 +975 28 optimizer.lr 0.0050765780254104125 +975 28 training.batch_size 2.0 +975 28 training.label_smoothing 0.052716443372959096 +975 29 model.embedding_dim 0.0 +975 29 model.relation_dim 1.0 +975 29 model.dropout_0 0.4223082699609016 +975 29 model.dropout_1 0.36686460211231003 +975 29 model.dropout_2 0.4104143963532924 +975 29 optimizer.lr 0.0022926432352974985 +975 29 training.batch_size 0.0 +975 29 training.label_smoothing 0.013769178803284004 +975 30 model.embedding_dim 0.0 +975 30 model.relation_dim 2.0 +975 30 model.dropout_0 0.2306372760985875 +975 30 model.dropout_1 0.16335862923371747 +975 30 model.dropout_2 0.2669202230421505 +975 30 optimizer.lr 0.08108932847565531 +975 30 training.batch_size 1.0 +975 30 training.label_smoothing 0.6251647992969236 +975 31 model.embedding_dim 1.0 +975 31 model.relation_dim 1.0 +975 31 model.dropout_0 0.44172368584742505 +975 31 model.dropout_1 0.34836812260259287 +975 31 model.dropout_2 0.4484139962634514 +975 31 optimizer.lr 0.001561515787784576 +975 31 training.batch_size 1.0 +975 31 training.label_smoothing 0.6477541912589302 +975 32 model.embedding_dim 1.0 +975 32 model.relation_dim 0.0 +975 32 model.dropout_0 0.3220354667439487 +975 32 model.dropout_1 0.2306729396123157 +975 32 model.dropout_2 0.16760413592840517 +975 32 optimizer.lr 0.06459158884577937 +975 32 training.batch_size 2.0 +975 32 training.label_smoothing 0.0026792179288652965 +975 33 model.embedding_dim 0.0 +975 33 model.relation_dim 2.0 +975 33 model.dropout_0 0.41534201832658124 +975 33 model.dropout_1 0.44882761789452197 +975 33 model.dropout_2 0.4301372945891764 +975 33 optimizer.lr 0.00757349387123776 +975 33 training.batch_size 1.0 +975 33 training.label_smoothing 0.056240481491962487 +975 34 model.embedding_dim 2.0 +975 34 model.relation_dim 1.0 +975 34 model.dropout_0 0.162896232634813 +975 34 model.dropout_1 0.46222460996624193 +975 34 model.dropout_2 0.42829301312090795 +975 34 optimizer.lr 0.0020104355761524405 +975 34 training.batch_size 1.0 +975 34 training.label_smoothing 0.3629950391394746 +975 35 model.embedding_dim 2.0 +975 35 model.relation_dim 1.0 +975 35 model.dropout_0 0.22521740428855283 +975 35 model.dropout_1 0.1258923075586373 +975 35 model.dropout_2 0.45962175884442125 +975 35 optimizer.lr 0.00516713863471779 +975 35 training.batch_size 2.0 +975 35 training.label_smoothing 0.01445913480152803 +975 36 model.embedding_dim 2.0 +975 36 model.relation_dim 2.0 +975 36 model.dropout_0 0.24449550592332103 +975 36 model.dropout_1 0.2846810402697713 +975 36 model.dropout_2 0.3230060803070138 +975 36 optimizer.lr 0.005246843485489532 +975 36 training.batch_size 2.0 +975 36 training.label_smoothing 0.005129556623444838 +975 37 model.embedding_dim 1.0 +975 37 model.relation_dim 0.0 +975 37 model.dropout_0 0.4919304929506332 +975 37 model.dropout_1 0.30618547326232115 +975 37 model.dropout_2 0.4417608148980756 +975 37 optimizer.lr 0.0012114618376898123 +975 37 training.batch_size 1.0 +975 37 training.label_smoothing 0.0010803311483345603 +975 38 model.embedding_dim 1.0 +975 38 model.relation_dim 0.0 +975 38 model.dropout_0 0.10663002267125746 +975 38 model.dropout_1 0.4605717397216306 +975 38 model.dropout_2 0.1730300892729989 +975 38 optimizer.lr 0.025292666978853837 +975 38 training.batch_size 2.0 +975 38 training.label_smoothing 0.005940570276279982 +975 39 model.embedding_dim 2.0 +975 39 model.relation_dim 2.0 +975 39 model.dropout_0 0.16693349319755427 +975 39 model.dropout_1 0.484493446236544 +975 39 model.dropout_2 0.23539657635211417 +975 39 optimizer.lr 0.03215742203725979 +975 39 training.batch_size 0.0 +975 39 training.label_smoothing 0.03988904736612299 +975 40 model.embedding_dim 0.0 +975 40 model.relation_dim 0.0 +975 40 model.dropout_0 0.1554897408986407 +975 40 model.dropout_1 0.4323332998260473 +975 40 model.dropout_2 0.2851925840060011 +975 40 optimizer.lr 0.04146605198910287 +975 40 training.batch_size 1.0 +975 40 training.label_smoothing 0.0040302199741112195 +975 41 model.embedding_dim 0.0 +975 41 model.relation_dim 2.0 +975 41 model.dropout_0 0.2285398187728986 +975 41 model.dropout_1 0.4522336625986687 +975 41 model.dropout_2 0.3227009132026745 +975 41 optimizer.lr 0.004566887069558721 +975 41 training.batch_size 2.0 +975 41 training.label_smoothing 0.15505058640717567 +975 42 model.embedding_dim 1.0 +975 42 model.relation_dim 0.0 +975 42 model.dropout_0 0.44932894668711243 +975 42 model.dropout_1 0.47148237320935255 +975 42 model.dropout_2 0.20800514048857838 +975 42 optimizer.lr 0.01147538341675978 +975 42 training.batch_size 1.0 +975 42 training.label_smoothing 0.6327513831067698 +975 43 model.embedding_dim 2.0 +975 43 model.relation_dim 0.0 +975 43 model.dropout_0 0.3224134841073796 +975 43 model.dropout_1 0.31601717116205563 +975 43 model.dropout_2 0.17269348758715283 +975 43 optimizer.lr 0.01833322441068509 +975 43 training.batch_size 0.0 +975 43 training.label_smoothing 0.0011540655144314676 +975 44 model.embedding_dim 0.0 +975 44 model.relation_dim 2.0 +975 44 model.dropout_0 0.28757470666350404 +975 44 model.dropout_1 0.13967394039641717 +975 44 model.dropout_2 0.2215780372095448 +975 44 optimizer.lr 0.002581910880937741 +975 44 training.batch_size 0.0 +975 44 training.label_smoothing 0.01861837024764884 +975 45 model.embedding_dim 1.0 +975 45 model.relation_dim 1.0 +975 45 model.dropout_0 0.4641215662487496 +975 45 model.dropout_1 0.4513119502587134 +975 45 model.dropout_2 0.18994545408876712 +975 45 optimizer.lr 0.02263787399665923 +975 45 training.batch_size 1.0 +975 45 training.label_smoothing 0.058578766811014 +975 46 model.embedding_dim 0.0 +975 46 model.relation_dim 1.0 +975 46 model.dropout_0 0.2509254927938323 +975 46 model.dropout_1 0.17796300591562775 +975 46 model.dropout_2 0.47828422965450684 +975 46 optimizer.lr 0.04344015169128819 +975 46 training.batch_size 0.0 +975 46 training.label_smoothing 0.032748810770617115 +975 47 model.embedding_dim 2.0 +975 47 model.relation_dim 0.0 +975 47 model.dropout_0 0.1605744224979501 +975 47 model.dropout_1 0.1976305298159341 +975 47 model.dropout_2 0.23046896774726192 +975 47 optimizer.lr 0.005189981636999298 +975 47 training.batch_size 1.0 +975 47 training.label_smoothing 0.0875883105503182 +975 48 model.embedding_dim 2.0 +975 48 model.relation_dim 2.0 +975 48 model.dropout_0 0.14502946384001497 +975 48 model.dropout_1 0.2943005595590489 +975 48 model.dropout_2 0.18072947695227906 +975 48 optimizer.lr 0.016368086622381626 +975 48 training.batch_size 2.0 +975 48 training.label_smoothing 0.0017549511782347714 +975 49 model.embedding_dim 0.0 +975 49 model.relation_dim 1.0 +975 49 model.dropout_0 0.38883068240183793 +975 49 model.dropout_1 0.1888272094924069 +975 49 model.dropout_2 0.3983497220634822 +975 49 optimizer.lr 0.004729544742234222 +975 49 training.batch_size 2.0 +975 49 training.label_smoothing 0.07083069526904258 +975 50 model.embedding_dim 1.0 +975 50 model.relation_dim 1.0 +975 50 model.dropout_0 0.27185270473187423 +975 50 model.dropout_1 0.2883715406063063 +975 50 model.dropout_2 0.2178311751062732 +975 50 optimizer.lr 0.009466752794704835 +975 50 training.batch_size 1.0 +975 50 training.label_smoothing 0.07647797134989573 +975 51 model.embedding_dim 1.0 +975 51 model.relation_dim 1.0 +975 51 model.dropout_0 0.44751521295767527 +975 51 model.dropout_1 0.17756254619064268 +975 51 model.dropout_2 0.14830749904861262 +975 51 optimizer.lr 0.004051478201053988 +975 51 training.batch_size 0.0 +975 51 training.label_smoothing 0.005330380951958401 +975 52 model.embedding_dim 1.0 +975 52 model.relation_dim 0.0 +975 52 model.dropout_0 0.139594125228586 +975 52 model.dropout_1 0.2765915098317772 +975 52 model.dropout_2 0.41814267085098145 +975 52 optimizer.lr 0.02457535548044437 +975 52 training.batch_size 0.0 +975 52 training.label_smoothing 0.004858218445646814 +975 53 model.embedding_dim 1.0 +975 53 model.relation_dim 1.0 +975 53 model.dropout_0 0.21120499202412812 +975 53 model.dropout_1 0.18407030978401837 +975 53 model.dropout_2 0.1256799463030888 +975 53 optimizer.lr 0.016231245737211998 +975 53 training.batch_size 1.0 +975 53 training.label_smoothing 0.002454114289852111 +975 54 model.embedding_dim 0.0 +975 54 model.relation_dim 0.0 +975 54 model.dropout_0 0.4221777742240672 +975 54 model.dropout_1 0.2142603089196567 +975 54 model.dropout_2 0.14867158037066655 +975 54 optimizer.lr 0.04576841322164804 +975 54 training.batch_size 2.0 +975 54 training.label_smoothing 0.11287076475787618 +975 55 model.embedding_dim 0.0 +975 55 model.relation_dim 2.0 +975 55 model.dropout_0 0.39506145567264217 +975 55 model.dropout_1 0.11476173396450787 +975 55 model.dropout_2 0.28617020564948564 +975 55 optimizer.lr 0.006257497767821873 +975 55 training.batch_size 1.0 +975 55 training.label_smoothing 0.19078980083391972 +975 56 model.embedding_dim 0.0 +975 56 model.relation_dim 1.0 +975 56 model.dropout_0 0.48125510245421177 +975 56 model.dropout_1 0.4620261859734821 +975 56 model.dropout_2 0.21442914893633055 +975 56 optimizer.lr 0.03254364607158274 +975 56 training.batch_size 2.0 +975 56 training.label_smoothing 0.34478488246752115 +975 57 model.embedding_dim 2.0 +975 57 model.relation_dim 2.0 +975 57 model.dropout_0 0.3794283164900233 +975 57 model.dropout_1 0.15260123834969921 +975 57 model.dropout_2 0.4271244523957464 +975 57 optimizer.lr 0.0023009567275061055 +975 57 training.batch_size 0.0 +975 57 training.label_smoothing 0.12818914012087546 +975 58 model.embedding_dim 2.0 +975 58 model.relation_dim 0.0 +975 58 model.dropout_0 0.32445920307473686 +975 58 model.dropout_1 0.12478358525841259 +975 58 model.dropout_2 0.3294129056481957 +975 58 optimizer.lr 0.012652875291577888 +975 58 training.batch_size 1.0 +975 58 training.label_smoothing 0.083048464343932 +975 59 model.embedding_dim 1.0 +975 59 model.relation_dim 2.0 +975 59 model.dropout_0 0.1276026140074997 +975 59 model.dropout_1 0.36801062671463103 +975 59 model.dropout_2 0.1774181063666605 +975 59 optimizer.lr 0.003075413098255219 +975 59 training.batch_size 2.0 +975 59 training.label_smoothing 0.21903717873148174 +975 60 model.embedding_dim 1.0 +975 60 model.relation_dim 0.0 +975 60 model.dropout_0 0.28886425497781504 +975 60 model.dropout_1 0.39491262554083506 +975 60 model.dropout_2 0.18782266391142657 +975 60 optimizer.lr 0.02806503283282781 +975 60 training.batch_size 0.0 +975 60 training.label_smoothing 0.6117273538592076 +975 61 model.embedding_dim 0.0 +975 61 model.relation_dim 1.0 +975 61 model.dropout_0 0.33188413452726734 +975 61 model.dropout_1 0.4194680474960346 +975 61 model.dropout_2 0.2963741869517925 +975 61 optimizer.lr 0.004097156716230279 +975 61 training.batch_size 1.0 +975 61 training.label_smoothing 0.3546891367703254 +975 62 model.embedding_dim 0.0 +975 62 model.relation_dim 1.0 +975 62 model.dropout_0 0.4552255023301243 +975 62 model.dropout_1 0.22554467163348335 +975 62 model.dropout_2 0.12727299417306154 +975 62 optimizer.lr 0.06855970174153685 +975 62 training.batch_size 0.0 +975 62 training.label_smoothing 0.31465393711883183 +975 63 model.embedding_dim 0.0 +975 63 model.relation_dim 2.0 +975 63 model.dropout_0 0.1662060404936042 +975 63 model.dropout_1 0.4654555844938421 +975 63 model.dropout_2 0.2353931557940698 +975 63 optimizer.lr 0.005879541632346625 +975 63 training.batch_size 0.0 +975 63 training.label_smoothing 0.40425791849948545 +975 64 model.embedding_dim 1.0 +975 64 model.relation_dim 0.0 +975 64 model.dropout_0 0.16923701178639186 +975 64 model.dropout_1 0.28280566565567317 +975 64 model.dropout_2 0.36171208734232446 +975 64 optimizer.lr 0.01422435558718668 +975 64 training.batch_size 1.0 +975 64 training.label_smoothing 0.6242063332028264 +975 65 model.embedding_dim 2.0 +975 65 model.relation_dim 2.0 +975 65 model.dropout_0 0.48367171236586304 +975 65 model.dropout_1 0.35789585856988115 +975 65 model.dropout_2 0.44454605232102673 +975 65 optimizer.lr 0.00709975935776472 +975 65 training.batch_size 2.0 +975 65 training.label_smoothing 0.002840259420193579 +975 66 model.embedding_dim 2.0 +975 66 model.relation_dim 0.0 +975 66 model.dropout_0 0.35754480696604046 +975 66 model.dropout_1 0.3548488974549724 +975 66 model.dropout_2 0.26635040256556003 +975 66 optimizer.lr 0.010785258427781356 +975 66 training.batch_size 0.0 +975 66 training.label_smoothing 0.0014004952708578438 +975 67 model.embedding_dim 1.0 +975 67 model.relation_dim 2.0 +975 67 model.dropout_0 0.4652951524579898 +975 67 model.dropout_1 0.4031260782251252 +975 67 model.dropout_2 0.16392862050345017 +975 67 optimizer.lr 0.0020339339909521033 +975 67 training.batch_size 2.0 +975 67 training.label_smoothing 0.0032998165021971278 +975 68 model.embedding_dim 2.0 +975 68 model.relation_dim 1.0 +975 68 model.dropout_0 0.31154169775723933 +975 68 model.dropout_1 0.33176871267294755 +975 68 model.dropout_2 0.4766488948130134 +975 68 optimizer.lr 0.019744186613929387 +975 68 training.batch_size 1.0 +975 68 training.label_smoothing 0.002611176525383162 +975 69 model.embedding_dim 1.0 +975 69 model.relation_dim 2.0 +975 69 model.dropout_0 0.17075674483364592 +975 69 model.dropout_1 0.27913431261561955 +975 69 model.dropout_2 0.23794652323208476 +975 69 optimizer.lr 0.010442099238528787 +975 69 training.batch_size 1.0 +975 69 training.label_smoothing 0.9541733364219217 +975 70 model.embedding_dim 1.0 +975 70 model.relation_dim 2.0 +975 70 model.dropout_0 0.3225450722174372 +975 70 model.dropout_1 0.3687953694250953 +975 70 model.dropout_2 0.38867446309578174 +975 70 optimizer.lr 0.07949530974327482 +975 70 training.batch_size 1.0 +975 70 training.label_smoothing 0.5160202823878919 +975 71 model.embedding_dim 2.0 +975 71 model.relation_dim 2.0 +975 71 model.dropout_0 0.48228446325279806 +975 71 model.dropout_1 0.4028518973763659 +975 71 model.dropout_2 0.14256412757154419 +975 71 optimizer.lr 0.025116403395331752 +975 71 training.batch_size 1.0 +975 71 training.label_smoothing 0.02932044010589534 +975 72 model.embedding_dim 0.0 +975 72 model.relation_dim 2.0 +975 72 model.dropout_0 0.4780818799098494 +975 72 model.dropout_1 0.4942954640958082 +975 72 model.dropout_2 0.47358624129818483 +975 72 optimizer.lr 0.018505679257743327 +975 72 training.batch_size 1.0 +975 72 training.label_smoothing 0.30679794202118116 +975 73 model.embedding_dim 1.0 +975 73 model.relation_dim 1.0 +975 73 model.dropout_0 0.21042883587803282 +975 73 model.dropout_1 0.12511057419413682 +975 73 model.dropout_2 0.41559979813618125 +975 73 optimizer.lr 0.008315036886899762 +975 73 training.batch_size 2.0 +975 73 training.label_smoothing 0.0024437118909219576 +975 74 model.embedding_dim 0.0 +975 74 model.relation_dim 0.0 +975 74 model.dropout_0 0.3680393013858755 +975 74 model.dropout_1 0.32652772715109124 +975 74 model.dropout_2 0.3764751576221162 +975 74 optimizer.lr 0.03141664428813255 +975 74 training.batch_size 0.0 +975 74 training.label_smoothing 0.15161965746781556 +975 75 model.embedding_dim 2.0 +975 75 model.relation_dim 1.0 +975 75 model.dropout_0 0.4107281452990555 +975 75 model.dropout_1 0.26123621242152095 +975 75 model.dropout_2 0.2696841834502792 +975 75 optimizer.lr 0.001200862507763247 +975 75 training.batch_size 2.0 +975 75 training.label_smoothing 0.002244762944059092 +975 76 model.embedding_dim 1.0 +975 76 model.relation_dim 1.0 +975 76 model.dropout_0 0.4464417827831124 +975 76 model.dropout_1 0.2783874810137308 +975 76 model.dropout_2 0.2682480608951348 +975 76 optimizer.lr 0.019817077093292692 +975 76 training.batch_size 1.0 +975 76 training.label_smoothing 0.035600931348092765 +975 77 model.embedding_dim 0.0 +975 77 model.relation_dim 2.0 +975 77 model.dropout_0 0.21675003328048983 +975 77 model.dropout_1 0.4583996480420972 +975 77 model.dropout_2 0.34298573766218465 +975 77 optimizer.lr 0.0010237641948153805 +975 77 training.batch_size 2.0 +975 77 training.label_smoothing 0.010291371112153643 +975 78 model.embedding_dim 1.0 +975 78 model.relation_dim 0.0 +975 78 model.dropout_0 0.2782855716078776 +975 78 model.dropout_1 0.296063438122517 +975 78 model.dropout_2 0.46929272521007387 +975 78 optimizer.lr 0.009279146194317035 +975 78 training.batch_size 1.0 +975 78 training.label_smoothing 0.38219851625255935 +975 79 model.embedding_dim 1.0 +975 79 model.relation_dim 2.0 +975 79 model.dropout_0 0.3161472716026199 +975 79 model.dropout_1 0.14240464212066772 +975 79 model.dropout_2 0.3103012234133955 +975 79 optimizer.lr 0.004604095188291948 +975 79 training.batch_size 1.0 +975 79 training.label_smoothing 0.180215920785809 +975 80 model.embedding_dim 1.0 +975 80 model.relation_dim 0.0 +975 80 model.dropout_0 0.20520154255485315 +975 80 model.dropout_1 0.29758197589116525 +975 80 model.dropout_2 0.2182755431909511 +975 80 optimizer.lr 0.008071184688305465 +975 80 training.batch_size 2.0 +975 80 training.label_smoothing 0.0498106104028508 +975 81 model.embedding_dim 2.0 +975 81 model.relation_dim 1.0 +975 81 model.dropout_0 0.10732145661509773 +975 81 model.dropout_1 0.16873953963932245 +975 81 model.dropout_2 0.25673639278654825 +975 81 optimizer.lr 0.011646938121561569 +975 81 training.batch_size 0.0 +975 81 training.label_smoothing 0.5980905499331309 +975 82 model.embedding_dim 0.0 +975 82 model.relation_dim 0.0 +975 82 model.dropout_0 0.27308969574481584 +975 82 model.dropout_1 0.13366498595484044 +975 82 model.dropout_2 0.36163854097049186 +975 82 optimizer.lr 0.004978871311595812 +975 82 training.batch_size 0.0 +975 82 training.label_smoothing 0.026198095063948674 +975 83 model.embedding_dim 1.0 +975 83 model.relation_dim 1.0 +975 83 model.dropout_0 0.29979836117135084 +975 83 model.dropout_1 0.1475590647778109 +975 83 model.dropout_2 0.43742336882561306 +975 83 optimizer.lr 0.0014025803343889719 +975 83 training.batch_size 2.0 +975 83 training.label_smoothing 0.2566369720786953 +975 84 model.embedding_dim 0.0 +975 84 model.relation_dim 1.0 +975 84 model.dropout_0 0.44416349816993483 +975 84 model.dropout_1 0.3794344808981373 +975 84 model.dropout_2 0.36435123273395037 +975 84 optimizer.lr 0.098330079680973 +975 84 training.batch_size 2.0 +975 84 training.label_smoothing 0.11834541520659368 +975 85 model.embedding_dim 0.0 +975 85 model.relation_dim 0.0 +975 85 model.dropout_0 0.32187601926067005 +975 85 model.dropout_1 0.360151527885696 +975 85 model.dropout_2 0.20754732258051925 +975 85 optimizer.lr 0.011530537198756787 +975 85 training.batch_size 1.0 +975 85 training.label_smoothing 0.5930117282648776 +975 86 model.embedding_dim 2.0 +975 86 model.relation_dim 1.0 +975 86 model.dropout_0 0.4057229027341771 +975 86 model.dropout_1 0.20256066985757648 +975 86 model.dropout_2 0.14628203022220682 +975 86 optimizer.lr 0.03952286184948298 +975 86 training.batch_size 0.0 +975 86 training.label_smoothing 0.0024671111274207455 +975 87 model.embedding_dim 1.0 +975 87 model.relation_dim 2.0 +975 87 model.dropout_0 0.3348104312732214 +975 87 model.dropout_1 0.42839795520286794 +975 87 model.dropout_2 0.41726881374778896 +975 87 optimizer.lr 0.002247414267422216 +975 87 training.batch_size 2.0 +975 87 training.label_smoothing 0.9268226668074238 +975 88 model.embedding_dim 0.0 +975 88 model.relation_dim 0.0 +975 88 model.dropout_0 0.4856817052521216 +975 88 model.dropout_1 0.3450573022085128 +975 88 model.dropout_2 0.18613264556221454 +975 88 optimizer.lr 0.06257933966919664 +975 88 training.batch_size 0.0 +975 88 training.label_smoothing 0.11741823676632344 +975 89 model.embedding_dim 1.0 +975 89 model.relation_dim 0.0 +975 89 model.dropout_0 0.3540459763573993 +975 89 model.dropout_1 0.32261342394212406 +975 89 model.dropout_2 0.1323699877016725 +975 89 optimizer.lr 0.04604428479832844 +975 89 training.batch_size 1.0 +975 89 training.label_smoothing 0.003728440812524051 +975 90 model.embedding_dim 1.0 +975 90 model.relation_dim 2.0 +975 90 model.dropout_0 0.22618224488929006 +975 90 model.dropout_1 0.23840591168886893 +975 90 model.dropout_2 0.15612039127319652 +975 90 optimizer.lr 0.026944490098312866 +975 90 training.batch_size 0.0 +975 90 training.label_smoothing 0.9214958521294833 +975 91 model.embedding_dim 2.0 +975 91 model.relation_dim 0.0 +975 91 model.dropout_0 0.17557157141706084 +975 91 model.dropout_1 0.41134784437261906 +975 91 model.dropout_2 0.19842489753535175 +975 91 optimizer.lr 0.009556546078700405 +975 91 training.batch_size 0.0 +975 91 training.label_smoothing 0.0017443950709259564 +975 92 model.embedding_dim 1.0 +975 92 model.relation_dim 2.0 +975 92 model.dropout_0 0.15981483549776307 +975 92 model.dropout_1 0.46165647831883627 +975 92 model.dropout_2 0.18482958603984928 +975 92 optimizer.lr 0.005762614017165245 +975 92 training.batch_size 1.0 +975 92 training.label_smoothing 0.16177342900995817 +975 93 model.embedding_dim 1.0 +975 93 model.relation_dim 2.0 +975 93 model.dropout_0 0.39933540304488485 +975 93 model.dropout_1 0.459863167160847 +975 93 model.dropout_2 0.16435037786107684 +975 93 optimizer.lr 0.03681306945243744 +975 93 training.batch_size 0.0 +975 93 training.label_smoothing 0.07478169069210562 +975 94 model.embedding_dim 2.0 +975 94 model.relation_dim 1.0 +975 94 model.dropout_0 0.48157960864268984 +975 94 model.dropout_1 0.1441837678553152 +975 94 model.dropout_2 0.4294502964756869 +975 94 optimizer.lr 0.029439298627704565 +975 94 training.batch_size 0.0 +975 94 training.label_smoothing 0.001336060450249899 +975 95 model.embedding_dim 1.0 +975 95 model.relation_dim 2.0 +975 95 model.dropout_0 0.4467762643403044 +975 95 model.dropout_1 0.10049705321061389 +975 95 model.dropout_2 0.22192735694828616 +975 95 optimizer.lr 0.007121336107042118 +975 95 training.batch_size 1.0 +975 95 training.label_smoothing 0.8861974702568037 +975 96 model.embedding_dim 0.0 +975 96 model.relation_dim 1.0 +975 96 model.dropout_0 0.3897297492014301 +975 96 model.dropout_1 0.37339697702505853 +975 96 model.dropout_2 0.42494342326010015 +975 96 optimizer.lr 0.016056183412732367 +975 96 training.batch_size 1.0 +975 96 training.label_smoothing 0.004664932930019377 +975 97 model.embedding_dim 0.0 +975 97 model.relation_dim 1.0 +975 97 model.dropout_0 0.35852063963063807 +975 97 model.dropout_1 0.49427535486534624 +975 97 model.dropout_2 0.23426218582019265 +975 97 optimizer.lr 0.003950505525460554 +975 97 training.batch_size 2.0 +975 97 training.label_smoothing 0.05407814523962808 +975 98 model.embedding_dim 1.0 +975 98 model.relation_dim 1.0 +975 98 model.dropout_0 0.4975339762599224 +975 98 model.dropout_1 0.44740037593345416 +975 98 model.dropout_2 0.4485694078483088 +975 98 optimizer.lr 0.003812391122692259 +975 98 training.batch_size 2.0 +975 98 training.label_smoothing 0.0010772499191163722 +975 99 model.embedding_dim 0.0 +975 99 model.relation_dim 1.0 +975 99 model.dropout_0 0.32341421416680777 +975 99 model.dropout_1 0.2746814081865134 +975 99 model.dropout_2 0.11982738812632016 +975 99 optimizer.lr 0.0017591188298692352 +975 99 training.batch_size 2.0 +975 99 training.label_smoothing 0.3938039403208199 +975 100 model.embedding_dim 2.0 +975 100 model.relation_dim 2.0 +975 100 model.dropout_0 0.3849283070877994 +975 100 model.dropout_1 0.1616995964124132 +975 100 model.dropout_2 0.4707472164393607 +975 100 optimizer.lr 0.0025386068506159935 +975 100 training.batch_size 2.0 +975 100 training.label_smoothing 0.6035307177914588 +975 1 dataset """kinships""" +975 1 model """tucker""" +975 1 loss """bceaftersigmoid""" +975 1 regularizer """no""" +975 1 optimizer """adam""" +975 1 training_loop """lcwa""" +975 1 evaluator """rankbased""" +975 2 dataset """kinships""" +975 2 model """tucker""" +975 2 loss """bceaftersigmoid""" +975 2 regularizer """no""" +975 2 optimizer """adam""" +975 2 training_loop """lcwa""" +975 2 evaluator """rankbased""" +975 3 dataset """kinships""" +975 3 model """tucker""" +975 3 loss """bceaftersigmoid""" +975 3 regularizer """no""" +975 3 optimizer """adam""" +975 3 training_loop """lcwa""" +975 3 evaluator """rankbased""" +975 4 dataset """kinships""" +975 4 model """tucker""" +975 4 loss """bceaftersigmoid""" +975 4 regularizer """no""" +975 4 optimizer """adam""" +975 4 training_loop """lcwa""" +975 4 evaluator """rankbased""" +975 5 dataset """kinships""" +975 5 model """tucker""" +975 5 loss """bceaftersigmoid""" +975 5 regularizer """no""" +975 5 optimizer """adam""" +975 5 training_loop """lcwa""" +975 5 evaluator """rankbased""" +975 6 dataset """kinships""" +975 6 model """tucker""" +975 6 loss """bceaftersigmoid""" +975 6 regularizer """no""" +975 6 optimizer """adam""" +975 6 training_loop """lcwa""" +975 6 evaluator """rankbased""" +975 7 dataset """kinships""" +975 7 model """tucker""" +975 7 loss """bceaftersigmoid""" +975 7 regularizer """no""" +975 7 optimizer """adam""" +975 7 training_loop """lcwa""" +975 7 evaluator """rankbased""" +975 8 dataset """kinships""" +975 8 model """tucker""" +975 8 loss """bceaftersigmoid""" +975 8 regularizer """no""" +975 8 optimizer """adam""" +975 8 training_loop """lcwa""" +975 8 evaluator """rankbased""" +975 9 dataset """kinships""" +975 9 model """tucker""" +975 9 loss """bceaftersigmoid""" +975 9 regularizer """no""" +975 9 optimizer """adam""" +975 9 training_loop """lcwa""" +975 9 evaluator """rankbased""" +975 10 dataset """kinships""" +975 10 model """tucker""" +975 10 loss """bceaftersigmoid""" +975 10 regularizer """no""" +975 10 optimizer """adam""" +975 10 training_loop """lcwa""" +975 10 evaluator """rankbased""" +975 11 dataset """kinships""" +975 11 model """tucker""" +975 11 loss """bceaftersigmoid""" +975 11 regularizer """no""" +975 11 optimizer """adam""" +975 11 training_loop """lcwa""" +975 11 evaluator """rankbased""" +975 12 dataset """kinships""" +975 12 model """tucker""" +975 12 loss """bceaftersigmoid""" +975 12 regularizer """no""" +975 12 optimizer """adam""" +975 12 training_loop """lcwa""" +975 12 evaluator """rankbased""" +975 13 dataset """kinships""" +975 13 model """tucker""" +975 13 loss """bceaftersigmoid""" +975 13 regularizer """no""" +975 13 optimizer """adam""" +975 13 training_loop """lcwa""" +975 13 evaluator """rankbased""" +975 14 dataset """kinships""" +975 14 model """tucker""" +975 14 loss """bceaftersigmoid""" +975 14 regularizer """no""" +975 14 optimizer """adam""" +975 14 training_loop """lcwa""" +975 14 evaluator """rankbased""" +975 15 dataset """kinships""" +975 15 model """tucker""" +975 15 loss """bceaftersigmoid""" +975 15 regularizer """no""" +975 15 optimizer """adam""" +975 15 training_loop """lcwa""" +975 15 evaluator """rankbased""" +975 16 dataset """kinships""" +975 16 model """tucker""" +975 16 loss """bceaftersigmoid""" +975 16 regularizer """no""" +975 16 optimizer """adam""" +975 16 training_loop """lcwa""" +975 16 evaluator """rankbased""" +975 17 dataset """kinships""" +975 17 model """tucker""" +975 17 loss """bceaftersigmoid""" +975 17 regularizer """no""" +975 17 optimizer """adam""" +975 17 training_loop """lcwa""" +975 17 evaluator """rankbased""" +975 18 dataset """kinships""" +975 18 model """tucker""" +975 18 loss """bceaftersigmoid""" +975 18 regularizer """no""" +975 18 optimizer """adam""" +975 18 training_loop """lcwa""" +975 18 evaluator """rankbased""" +975 19 dataset """kinships""" +975 19 model """tucker""" +975 19 loss """bceaftersigmoid""" +975 19 regularizer """no""" +975 19 optimizer """adam""" +975 19 training_loop """lcwa""" +975 19 evaluator """rankbased""" +975 20 dataset """kinships""" +975 20 model """tucker""" +975 20 loss """bceaftersigmoid""" +975 20 regularizer """no""" +975 20 optimizer """adam""" +975 20 training_loop """lcwa""" +975 20 evaluator """rankbased""" +975 21 dataset """kinships""" +975 21 model """tucker""" +975 21 loss """bceaftersigmoid""" +975 21 regularizer """no""" +975 21 optimizer """adam""" +975 21 training_loop """lcwa""" +975 21 evaluator """rankbased""" +975 22 dataset """kinships""" +975 22 model """tucker""" +975 22 loss """bceaftersigmoid""" +975 22 regularizer """no""" +975 22 optimizer """adam""" +975 22 training_loop """lcwa""" +975 22 evaluator """rankbased""" +975 23 dataset """kinships""" +975 23 model """tucker""" +975 23 loss """bceaftersigmoid""" +975 23 regularizer """no""" +975 23 optimizer """adam""" +975 23 training_loop """lcwa""" +975 23 evaluator """rankbased""" +975 24 dataset """kinships""" +975 24 model """tucker""" +975 24 loss """bceaftersigmoid""" +975 24 regularizer """no""" +975 24 optimizer """adam""" +975 24 training_loop """lcwa""" +975 24 evaluator """rankbased""" +975 25 dataset """kinships""" +975 25 model """tucker""" +975 25 loss """bceaftersigmoid""" +975 25 regularizer """no""" +975 25 optimizer """adam""" +975 25 training_loop """lcwa""" +975 25 evaluator """rankbased""" +975 26 dataset """kinships""" +975 26 model """tucker""" +975 26 loss """bceaftersigmoid""" +975 26 regularizer """no""" +975 26 optimizer """adam""" +975 26 training_loop """lcwa""" +975 26 evaluator """rankbased""" +975 27 dataset """kinships""" +975 27 model """tucker""" +975 27 loss """bceaftersigmoid""" +975 27 regularizer """no""" +975 27 optimizer """adam""" +975 27 training_loop """lcwa""" +975 27 evaluator """rankbased""" +975 28 dataset """kinships""" +975 28 model """tucker""" +975 28 loss """bceaftersigmoid""" +975 28 regularizer """no""" +975 28 optimizer """adam""" +975 28 training_loop """lcwa""" +975 28 evaluator """rankbased""" +975 29 dataset """kinships""" +975 29 model """tucker""" +975 29 loss """bceaftersigmoid""" +975 29 regularizer """no""" +975 29 optimizer """adam""" +975 29 training_loop """lcwa""" +975 29 evaluator """rankbased""" +975 30 dataset """kinships""" +975 30 model """tucker""" +975 30 loss """bceaftersigmoid""" +975 30 regularizer """no""" +975 30 optimizer """adam""" +975 30 training_loop """lcwa""" +975 30 evaluator """rankbased""" +975 31 dataset """kinships""" +975 31 model """tucker""" +975 31 loss """bceaftersigmoid""" +975 31 regularizer """no""" +975 31 optimizer """adam""" +975 31 training_loop """lcwa""" +975 31 evaluator """rankbased""" +975 32 dataset """kinships""" +975 32 model """tucker""" +975 32 loss """bceaftersigmoid""" +975 32 regularizer """no""" +975 32 optimizer """adam""" +975 32 training_loop """lcwa""" +975 32 evaluator """rankbased""" +975 33 dataset """kinships""" +975 33 model """tucker""" +975 33 loss """bceaftersigmoid""" +975 33 regularizer """no""" +975 33 optimizer """adam""" +975 33 training_loop """lcwa""" +975 33 evaluator """rankbased""" +975 34 dataset """kinships""" +975 34 model """tucker""" +975 34 loss """bceaftersigmoid""" +975 34 regularizer """no""" +975 34 optimizer """adam""" +975 34 training_loop """lcwa""" +975 34 evaluator """rankbased""" +975 35 dataset """kinships""" +975 35 model """tucker""" +975 35 loss """bceaftersigmoid""" +975 35 regularizer """no""" +975 35 optimizer """adam""" +975 35 training_loop """lcwa""" +975 35 evaluator """rankbased""" +975 36 dataset """kinships""" +975 36 model """tucker""" +975 36 loss """bceaftersigmoid""" +975 36 regularizer """no""" +975 36 optimizer """adam""" +975 36 training_loop """lcwa""" +975 36 evaluator """rankbased""" +975 37 dataset """kinships""" +975 37 model """tucker""" +975 37 loss """bceaftersigmoid""" +975 37 regularizer """no""" +975 37 optimizer """adam""" +975 37 training_loop """lcwa""" +975 37 evaluator """rankbased""" +975 38 dataset """kinships""" +975 38 model """tucker""" +975 38 loss """bceaftersigmoid""" +975 38 regularizer """no""" +975 38 optimizer """adam""" +975 38 training_loop """lcwa""" +975 38 evaluator """rankbased""" +975 39 dataset """kinships""" +975 39 model """tucker""" +975 39 loss """bceaftersigmoid""" +975 39 regularizer """no""" +975 39 optimizer """adam""" +975 39 training_loop """lcwa""" +975 39 evaluator """rankbased""" +975 40 dataset """kinships""" +975 40 model """tucker""" +975 40 loss """bceaftersigmoid""" +975 40 regularizer """no""" +975 40 optimizer """adam""" +975 40 training_loop """lcwa""" +975 40 evaluator """rankbased""" +975 41 dataset """kinships""" +975 41 model """tucker""" +975 41 loss """bceaftersigmoid""" +975 41 regularizer """no""" +975 41 optimizer """adam""" +975 41 training_loop """lcwa""" +975 41 evaluator """rankbased""" +975 42 dataset """kinships""" +975 42 model """tucker""" +975 42 loss """bceaftersigmoid""" +975 42 regularizer """no""" +975 42 optimizer """adam""" +975 42 training_loop """lcwa""" +975 42 evaluator """rankbased""" +975 43 dataset """kinships""" +975 43 model """tucker""" +975 43 loss """bceaftersigmoid""" +975 43 regularizer """no""" +975 43 optimizer """adam""" +975 43 training_loop """lcwa""" +975 43 evaluator """rankbased""" +975 44 dataset """kinships""" +975 44 model """tucker""" +975 44 loss """bceaftersigmoid""" +975 44 regularizer """no""" +975 44 optimizer """adam""" +975 44 training_loop """lcwa""" +975 44 evaluator """rankbased""" +975 45 dataset """kinships""" +975 45 model """tucker""" +975 45 loss """bceaftersigmoid""" +975 45 regularizer """no""" +975 45 optimizer """adam""" +975 45 training_loop """lcwa""" +975 45 evaluator """rankbased""" +975 46 dataset """kinships""" +975 46 model """tucker""" +975 46 loss """bceaftersigmoid""" +975 46 regularizer """no""" +975 46 optimizer """adam""" +975 46 training_loop """lcwa""" +975 46 evaluator """rankbased""" +975 47 dataset """kinships""" +975 47 model """tucker""" +975 47 loss """bceaftersigmoid""" +975 47 regularizer """no""" +975 47 optimizer """adam""" +975 47 training_loop """lcwa""" +975 47 evaluator """rankbased""" +975 48 dataset """kinships""" +975 48 model """tucker""" +975 48 loss """bceaftersigmoid""" +975 48 regularizer """no""" +975 48 optimizer """adam""" +975 48 training_loop """lcwa""" +975 48 evaluator """rankbased""" +975 49 dataset """kinships""" +975 49 model """tucker""" +975 49 loss """bceaftersigmoid""" +975 49 regularizer """no""" +975 49 optimizer """adam""" +975 49 training_loop """lcwa""" +975 49 evaluator """rankbased""" +975 50 dataset """kinships""" +975 50 model """tucker""" +975 50 loss """bceaftersigmoid""" +975 50 regularizer """no""" +975 50 optimizer """adam""" +975 50 training_loop """lcwa""" +975 50 evaluator """rankbased""" +975 51 dataset """kinships""" +975 51 model """tucker""" +975 51 loss """bceaftersigmoid""" +975 51 regularizer """no""" +975 51 optimizer """adam""" +975 51 training_loop """lcwa""" +975 51 evaluator """rankbased""" +975 52 dataset """kinships""" +975 52 model """tucker""" +975 52 loss """bceaftersigmoid""" +975 52 regularizer """no""" +975 52 optimizer """adam""" +975 52 training_loop """lcwa""" +975 52 evaluator """rankbased""" +975 53 dataset """kinships""" +975 53 model """tucker""" +975 53 loss """bceaftersigmoid""" +975 53 regularizer """no""" +975 53 optimizer """adam""" +975 53 training_loop """lcwa""" +975 53 evaluator """rankbased""" +975 54 dataset """kinships""" +975 54 model """tucker""" +975 54 loss """bceaftersigmoid""" +975 54 regularizer """no""" +975 54 optimizer """adam""" +975 54 training_loop """lcwa""" +975 54 evaluator """rankbased""" +975 55 dataset """kinships""" +975 55 model """tucker""" +975 55 loss """bceaftersigmoid""" +975 55 regularizer """no""" +975 55 optimizer """adam""" +975 55 training_loop """lcwa""" +975 55 evaluator """rankbased""" +975 56 dataset """kinships""" +975 56 model """tucker""" +975 56 loss """bceaftersigmoid""" +975 56 regularizer """no""" +975 56 optimizer """adam""" +975 56 training_loop """lcwa""" +975 56 evaluator """rankbased""" +975 57 dataset """kinships""" +975 57 model """tucker""" +975 57 loss """bceaftersigmoid""" +975 57 regularizer """no""" +975 57 optimizer """adam""" +975 57 training_loop """lcwa""" +975 57 evaluator """rankbased""" +975 58 dataset """kinships""" +975 58 model """tucker""" +975 58 loss """bceaftersigmoid""" +975 58 regularizer """no""" +975 58 optimizer """adam""" +975 58 training_loop """lcwa""" +975 58 evaluator """rankbased""" +975 59 dataset """kinships""" +975 59 model """tucker""" +975 59 loss """bceaftersigmoid""" +975 59 regularizer """no""" +975 59 optimizer """adam""" +975 59 training_loop """lcwa""" +975 59 evaluator """rankbased""" +975 60 dataset """kinships""" +975 60 model """tucker""" +975 60 loss """bceaftersigmoid""" +975 60 regularizer """no""" +975 60 optimizer """adam""" +975 60 training_loop """lcwa""" +975 60 evaluator """rankbased""" +975 61 dataset """kinships""" +975 61 model """tucker""" +975 61 loss """bceaftersigmoid""" +975 61 regularizer """no""" +975 61 optimizer """adam""" +975 61 training_loop """lcwa""" +975 61 evaluator """rankbased""" +975 62 dataset """kinships""" +975 62 model """tucker""" +975 62 loss """bceaftersigmoid""" +975 62 regularizer """no""" +975 62 optimizer """adam""" +975 62 training_loop """lcwa""" +975 62 evaluator """rankbased""" +975 63 dataset """kinships""" +975 63 model """tucker""" +975 63 loss """bceaftersigmoid""" +975 63 regularizer """no""" +975 63 optimizer """adam""" +975 63 training_loop """lcwa""" +975 63 evaluator """rankbased""" +975 64 dataset """kinships""" +975 64 model """tucker""" +975 64 loss """bceaftersigmoid""" +975 64 regularizer """no""" +975 64 optimizer """adam""" +975 64 training_loop """lcwa""" +975 64 evaluator """rankbased""" +975 65 dataset """kinships""" +975 65 model """tucker""" +975 65 loss """bceaftersigmoid""" +975 65 regularizer """no""" +975 65 optimizer """adam""" +975 65 training_loop """lcwa""" +975 65 evaluator """rankbased""" +975 66 dataset """kinships""" +975 66 model """tucker""" +975 66 loss """bceaftersigmoid""" +975 66 regularizer """no""" +975 66 optimizer """adam""" +975 66 training_loop """lcwa""" +975 66 evaluator """rankbased""" +975 67 dataset """kinships""" +975 67 model """tucker""" +975 67 loss """bceaftersigmoid""" +975 67 regularizer """no""" +975 67 optimizer """adam""" +975 67 training_loop """lcwa""" +975 67 evaluator """rankbased""" +975 68 dataset """kinships""" +975 68 model """tucker""" +975 68 loss """bceaftersigmoid""" +975 68 regularizer """no""" +975 68 optimizer """adam""" +975 68 training_loop """lcwa""" +975 68 evaluator """rankbased""" +975 69 dataset """kinships""" +975 69 model """tucker""" +975 69 loss """bceaftersigmoid""" +975 69 regularizer """no""" +975 69 optimizer """adam""" +975 69 training_loop """lcwa""" +975 69 evaluator """rankbased""" +975 70 dataset """kinships""" +975 70 model """tucker""" +975 70 loss """bceaftersigmoid""" +975 70 regularizer """no""" +975 70 optimizer """adam""" +975 70 training_loop """lcwa""" +975 70 evaluator """rankbased""" +975 71 dataset """kinships""" +975 71 model """tucker""" +975 71 loss """bceaftersigmoid""" +975 71 regularizer """no""" +975 71 optimizer """adam""" +975 71 training_loop """lcwa""" +975 71 evaluator """rankbased""" +975 72 dataset """kinships""" +975 72 model """tucker""" +975 72 loss """bceaftersigmoid""" +975 72 regularizer """no""" +975 72 optimizer """adam""" +975 72 training_loop """lcwa""" +975 72 evaluator """rankbased""" +975 73 dataset """kinships""" +975 73 model """tucker""" +975 73 loss """bceaftersigmoid""" +975 73 regularizer """no""" +975 73 optimizer """adam""" +975 73 training_loop """lcwa""" +975 73 evaluator """rankbased""" +975 74 dataset """kinships""" +975 74 model """tucker""" +975 74 loss """bceaftersigmoid""" +975 74 regularizer """no""" +975 74 optimizer """adam""" +975 74 training_loop """lcwa""" +975 74 evaluator """rankbased""" +975 75 dataset """kinships""" +975 75 model """tucker""" +975 75 loss """bceaftersigmoid""" +975 75 regularizer """no""" +975 75 optimizer """adam""" +975 75 training_loop """lcwa""" +975 75 evaluator """rankbased""" +975 76 dataset """kinships""" +975 76 model """tucker""" +975 76 loss """bceaftersigmoid""" +975 76 regularizer """no""" +975 76 optimizer """adam""" +975 76 training_loop """lcwa""" +975 76 evaluator """rankbased""" +975 77 dataset """kinships""" +975 77 model """tucker""" +975 77 loss """bceaftersigmoid""" +975 77 regularizer """no""" +975 77 optimizer """adam""" +975 77 training_loop """lcwa""" +975 77 evaluator """rankbased""" +975 78 dataset """kinships""" +975 78 model """tucker""" +975 78 loss """bceaftersigmoid""" +975 78 regularizer """no""" +975 78 optimizer """adam""" +975 78 training_loop """lcwa""" +975 78 evaluator """rankbased""" +975 79 dataset """kinships""" +975 79 model """tucker""" +975 79 loss """bceaftersigmoid""" +975 79 regularizer """no""" +975 79 optimizer """adam""" +975 79 training_loop """lcwa""" +975 79 evaluator """rankbased""" +975 80 dataset """kinships""" +975 80 model """tucker""" +975 80 loss """bceaftersigmoid""" +975 80 regularizer """no""" +975 80 optimizer """adam""" +975 80 training_loop """lcwa""" +975 80 evaluator """rankbased""" +975 81 dataset """kinships""" +975 81 model """tucker""" +975 81 loss """bceaftersigmoid""" +975 81 regularizer """no""" +975 81 optimizer """adam""" +975 81 training_loop """lcwa""" +975 81 evaluator """rankbased""" +975 82 dataset """kinships""" +975 82 model """tucker""" +975 82 loss """bceaftersigmoid""" +975 82 regularizer """no""" +975 82 optimizer """adam""" +975 82 training_loop """lcwa""" +975 82 evaluator """rankbased""" +975 83 dataset """kinships""" +975 83 model """tucker""" +975 83 loss """bceaftersigmoid""" +975 83 regularizer """no""" +975 83 optimizer """adam""" +975 83 training_loop """lcwa""" +975 83 evaluator """rankbased""" +975 84 dataset """kinships""" +975 84 model """tucker""" +975 84 loss """bceaftersigmoid""" +975 84 regularizer """no""" +975 84 optimizer """adam""" +975 84 training_loop """lcwa""" +975 84 evaluator """rankbased""" +975 85 dataset """kinships""" +975 85 model """tucker""" +975 85 loss """bceaftersigmoid""" +975 85 regularizer """no""" +975 85 optimizer """adam""" +975 85 training_loop """lcwa""" +975 85 evaluator """rankbased""" +975 86 dataset """kinships""" +975 86 model """tucker""" +975 86 loss """bceaftersigmoid""" +975 86 regularizer """no""" +975 86 optimizer """adam""" +975 86 training_loop """lcwa""" +975 86 evaluator """rankbased""" +975 87 dataset """kinships""" +975 87 model """tucker""" +975 87 loss """bceaftersigmoid""" +975 87 regularizer """no""" +975 87 optimizer """adam""" +975 87 training_loop """lcwa""" +975 87 evaluator """rankbased""" +975 88 dataset """kinships""" +975 88 model """tucker""" +975 88 loss """bceaftersigmoid""" +975 88 regularizer """no""" +975 88 optimizer """adam""" +975 88 training_loop """lcwa""" +975 88 evaluator """rankbased""" +975 89 dataset """kinships""" +975 89 model """tucker""" +975 89 loss """bceaftersigmoid""" +975 89 regularizer """no""" +975 89 optimizer """adam""" +975 89 training_loop """lcwa""" +975 89 evaluator """rankbased""" +975 90 dataset """kinships""" +975 90 model """tucker""" +975 90 loss """bceaftersigmoid""" +975 90 regularizer """no""" +975 90 optimizer """adam""" +975 90 training_loop """lcwa""" +975 90 evaluator """rankbased""" +975 91 dataset """kinships""" +975 91 model """tucker""" +975 91 loss """bceaftersigmoid""" +975 91 regularizer """no""" +975 91 optimizer """adam""" +975 91 training_loop """lcwa""" +975 91 evaluator """rankbased""" +975 92 dataset """kinships""" +975 92 model """tucker""" +975 92 loss """bceaftersigmoid""" +975 92 regularizer """no""" +975 92 optimizer """adam""" +975 92 training_loop """lcwa""" +975 92 evaluator """rankbased""" +975 93 dataset """kinships""" +975 93 model """tucker""" +975 93 loss """bceaftersigmoid""" +975 93 regularizer """no""" +975 93 optimizer """adam""" +975 93 training_loop """lcwa""" +975 93 evaluator """rankbased""" +975 94 dataset """kinships""" +975 94 model """tucker""" +975 94 loss """bceaftersigmoid""" +975 94 regularizer """no""" +975 94 optimizer """adam""" +975 94 training_loop """lcwa""" +975 94 evaluator """rankbased""" +975 95 dataset """kinships""" +975 95 model """tucker""" +975 95 loss """bceaftersigmoid""" +975 95 regularizer """no""" +975 95 optimizer """adam""" +975 95 training_loop """lcwa""" +975 95 evaluator """rankbased""" +975 96 dataset """kinships""" +975 96 model """tucker""" +975 96 loss """bceaftersigmoid""" +975 96 regularizer """no""" +975 96 optimizer """adam""" +975 96 training_loop """lcwa""" +975 96 evaluator """rankbased""" +975 97 dataset """kinships""" +975 97 model """tucker""" +975 97 loss """bceaftersigmoid""" +975 97 regularizer """no""" +975 97 optimizer """adam""" +975 97 training_loop """lcwa""" +975 97 evaluator """rankbased""" +975 98 dataset """kinships""" +975 98 model """tucker""" +975 98 loss """bceaftersigmoid""" +975 98 regularizer """no""" +975 98 optimizer """adam""" +975 98 training_loop """lcwa""" +975 98 evaluator """rankbased""" +975 99 dataset """kinships""" +975 99 model """tucker""" +975 99 loss """bceaftersigmoid""" +975 99 regularizer """no""" +975 99 optimizer """adam""" +975 99 training_loop """lcwa""" +975 99 evaluator """rankbased""" +975 100 dataset """kinships""" +975 100 model """tucker""" +975 100 loss """bceaftersigmoid""" +975 100 regularizer """no""" +975 100 optimizer """adam""" +975 100 training_loop """lcwa""" +975 100 evaluator """rankbased""" +976 1 model.embedding_dim 2.0 +976 1 model.relation_dim 2.0 +976 1 model.dropout_0 0.4467209868407387 +976 1 model.dropout_1 0.10903140529650601 +976 1 model.dropout_2 0.39870432823754354 +976 1 optimizer.lr 0.00618375668958669 +976 1 training.batch_size 0.0 +976 1 training.label_smoothing 0.757409280102984 +976 2 model.embedding_dim 1.0 +976 2 model.relation_dim 0.0 +976 2 model.dropout_0 0.22846452021084185 +976 2 model.dropout_1 0.2041473431938195 +976 2 model.dropout_2 0.37901367225567706 +976 2 optimizer.lr 0.0016556210504908308 +976 2 training.batch_size 0.0 +976 2 training.label_smoothing 0.3728441724112803 +976 3 model.embedding_dim 0.0 +976 3 model.relation_dim 1.0 +976 3 model.dropout_0 0.47278545606417255 +976 3 model.dropout_1 0.40967279127916734 +976 3 model.dropout_2 0.336966841760342 +976 3 optimizer.lr 0.019320521960134823 +976 3 training.batch_size 0.0 +976 3 training.label_smoothing 0.0028542352275516014 +976 4 model.embedding_dim 1.0 +976 4 model.relation_dim 2.0 +976 4 model.dropout_0 0.23341792600219016 +976 4 model.dropout_1 0.25561544900550187 +976 4 model.dropout_2 0.1972361359344419 +976 4 optimizer.lr 0.008861526304072581 +976 4 training.batch_size 0.0 +976 4 training.label_smoothing 0.0973511291971279 +976 5 model.embedding_dim 2.0 +976 5 model.relation_dim 2.0 +976 5 model.dropout_0 0.3490510494123769 +976 5 model.dropout_1 0.15078040559094863 +976 5 model.dropout_2 0.15778748965492936 +976 5 optimizer.lr 0.002432183319826735 +976 5 training.batch_size 0.0 +976 5 training.label_smoothing 0.3315616757513865 +976 6 model.embedding_dim 0.0 +976 6 model.relation_dim 2.0 +976 6 model.dropout_0 0.28735491523125317 +976 6 model.dropout_1 0.2915920037929922 +976 6 model.dropout_2 0.41671954434375685 +976 6 optimizer.lr 0.037989311466689255 +976 6 training.batch_size 2.0 +976 6 training.label_smoothing 0.0036677983682447636 +976 7 model.embedding_dim 0.0 +976 7 model.relation_dim 2.0 +976 7 model.dropout_0 0.22172848992392719 +976 7 model.dropout_1 0.24158012367968817 +976 7 model.dropout_2 0.26343299994429875 +976 7 optimizer.lr 0.0014536392684529604 +976 7 training.batch_size 1.0 +976 7 training.label_smoothing 0.015532650624377378 +976 8 model.embedding_dim 1.0 +976 8 model.relation_dim 1.0 +976 8 model.dropout_0 0.36895575463717023 +976 8 model.dropout_1 0.2673927540102764 +976 8 model.dropout_2 0.21215694252644157 +976 8 optimizer.lr 0.004118951613561266 +976 8 training.batch_size 0.0 +976 8 training.label_smoothing 0.3250341230789704 +976 9 model.embedding_dim 2.0 +976 9 model.relation_dim 1.0 +976 9 model.dropout_0 0.22297280369576222 +976 9 model.dropout_1 0.12168881384862683 +976 9 model.dropout_2 0.3738134420257136 +976 9 optimizer.lr 0.001968257454397875 +976 9 training.batch_size 0.0 +976 9 training.label_smoothing 0.0014056387596046375 +976 10 model.embedding_dim 2.0 +976 10 model.relation_dim 2.0 +976 10 model.dropout_0 0.10590370120833886 +976 10 model.dropout_1 0.41414996991982966 +976 10 model.dropout_2 0.10019080787916873 +976 10 optimizer.lr 0.09737246353427181 +976 10 training.batch_size 2.0 +976 10 training.label_smoothing 0.6366607843991292 +976 11 model.embedding_dim 2.0 +976 11 model.relation_dim 2.0 +976 11 model.dropout_0 0.4577132016171162 +976 11 model.dropout_1 0.1587388832321393 +976 11 model.dropout_2 0.4591976363831363 +976 11 optimizer.lr 0.0202795137735953 +976 11 training.batch_size 1.0 +976 11 training.label_smoothing 0.002451605576154323 +976 12 model.embedding_dim 1.0 +976 12 model.relation_dim 0.0 +976 12 model.dropout_0 0.313811026787003 +976 12 model.dropout_1 0.3827786561806486 +976 12 model.dropout_2 0.3662965114037162 +976 12 optimizer.lr 0.001801165603834376 +976 12 training.batch_size 0.0 +976 12 training.label_smoothing 0.0028319466880428316 +976 13 model.embedding_dim 2.0 +976 13 model.relation_dim 2.0 +976 13 model.dropout_0 0.34493011324155404 +976 13 model.dropout_1 0.30776072314790204 +976 13 model.dropout_2 0.3248159853654711 +976 13 optimizer.lr 0.008179651873053995 +976 13 training.batch_size 1.0 +976 13 training.label_smoothing 0.1145814227001224 +976 14 model.embedding_dim 0.0 +976 14 model.relation_dim 2.0 +976 14 model.dropout_0 0.3370844686093437 +976 14 model.dropout_1 0.20698603711037578 +976 14 model.dropout_2 0.4916406715905591 +976 14 optimizer.lr 0.0045221934177335985 +976 14 training.batch_size 1.0 +976 14 training.label_smoothing 0.001957272922827715 +976 15 model.embedding_dim 2.0 +976 15 model.relation_dim 1.0 +976 15 model.dropout_0 0.3929922834286241 +976 15 model.dropout_1 0.4150185746970928 +976 15 model.dropout_2 0.4558468373437068 +976 15 optimizer.lr 0.0016568281873632655 +976 15 training.batch_size 1.0 +976 15 training.label_smoothing 0.02011178671594869 +976 16 model.embedding_dim 0.0 +976 16 model.relation_dim 0.0 +976 16 model.dropout_0 0.1080587937974265 +976 16 model.dropout_1 0.48697051356409904 +976 16 model.dropout_2 0.21559741983241248 +976 16 optimizer.lr 0.006075831406495539 +976 16 training.batch_size 0.0 +976 16 training.label_smoothing 0.003380035709213504 +976 17 model.embedding_dim 1.0 +976 17 model.relation_dim 2.0 +976 17 model.dropout_0 0.32095926027897487 +976 17 model.dropout_1 0.4487682645707096 +976 17 model.dropout_2 0.14241738641415602 +976 17 optimizer.lr 0.030574867633885446 +976 17 training.batch_size 1.0 +976 17 training.label_smoothing 0.3446636526386716 +976 18 model.embedding_dim 0.0 +976 18 model.relation_dim 0.0 +976 18 model.dropout_0 0.18292070405178942 +976 18 model.dropout_1 0.25155895639867465 +976 18 model.dropout_2 0.2247847996399401 +976 18 optimizer.lr 0.022950947964158416 +976 18 training.batch_size 1.0 +976 18 training.label_smoothing 0.00785081067972095 +976 19 model.embedding_dim 0.0 +976 19 model.relation_dim 2.0 +976 19 model.dropout_0 0.1654524623782103 +976 19 model.dropout_1 0.10789630572037878 +976 19 model.dropout_2 0.3402831273189548 +976 19 optimizer.lr 0.0015932879084376082 +976 19 training.batch_size 1.0 +976 19 training.label_smoothing 0.0132630430083034 +976 20 model.embedding_dim 2.0 +976 20 model.relation_dim 2.0 +976 20 model.dropout_0 0.3830995098240806 +976 20 model.dropout_1 0.1203900544528545 +976 20 model.dropout_2 0.25795520296710833 +976 20 optimizer.lr 0.024170183047413443 +976 20 training.batch_size 0.0 +976 20 training.label_smoothing 0.27401268410909047 +976 21 model.embedding_dim 1.0 +976 21 model.relation_dim 0.0 +976 21 model.dropout_0 0.32182137366720714 +976 21 model.dropout_1 0.42537785527367666 +976 21 model.dropout_2 0.19446698484256345 +976 21 optimizer.lr 0.001531738977947484 +976 21 training.batch_size 2.0 +976 21 training.label_smoothing 0.566094876067251 +976 22 model.embedding_dim 1.0 +976 22 model.relation_dim 0.0 +976 22 model.dropout_0 0.470691479429738 +976 22 model.dropout_1 0.38929613407008495 +976 22 model.dropout_2 0.4554907902822054 +976 22 optimizer.lr 0.0022295035880614774 +976 22 training.batch_size 0.0 +976 22 training.label_smoothing 0.0034759689665749137 +976 23 model.embedding_dim 0.0 +976 23 model.relation_dim 1.0 +976 23 model.dropout_0 0.492880423336332 +976 23 model.dropout_1 0.27043307158442287 +976 23 model.dropout_2 0.18507376670773779 +976 23 optimizer.lr 0.0029010083799167695 +976 23 training.batch_size 1.0 +976 23 training.label_smoothing 0.018042941871762926 +976 24 model.embedding_dim 2.0 +976 24 model.relation_dim 1.0 +976 24 model.dropout_0 0.4366671168374871 +976 24 model.dropout_1 0.48334142089114934 +976 24 model.dropout_2 0.40347249000803515 +976 24 optimizer.lr 0.0012506376604706992 +976 24 training.batch_size 1.0 +976 24 training.label_smoothing 0.053009700732386346 +976 25 model.embedding_dim 1.0 +976 25 model.relation_dim 2.0 +976 25 model.dropout_0 0.32709509054042485 +976 25 model.dropout_1 0.21687406942678744 +976 25 model.dropout_2 0.2801805556190192 +976 25 optimizer.lr 0.005061281744898754 +976 25 training.batch_size 0.0 +976 25 training.label_smoothing 0.0032202446098296153 +976 26 model.embedding_dim 0.0 +976 26 model.relation_dim 0.0 +976 26 model.dropout_0 0.31581581087048205 +976 26 model.dropout_1 0.4157942603163867 +976 26 model.dropout_2 0.28149607721587255 +976 26 optimizer.lr 0.012704126740746504 +976 26 training.batch_size 0.0 +976 26 training.label_smoothing 0.0045680877661377 +976 27 model.embedding_dim 0.0 +976 27 model.relation_dim 1.0 +976 27 model.dropout_0 0.23478241247963677 +976 27 model.dropout_1 0.49001951712221975 +976 27 model.dropout_2 0.17793329328700463 +976 27 optimizer.lr 0.0019280884145684864 +976 27 training.batch_size 1.0 +976 27 training.label_smoothing 0.15854234912102516 +976 28 model.embedding_dim 1.0 +976 28 model.relation_dim 2.0 +976 28 model.dropout_0 0.2709982749811625 +976 28 model.dropout_1 0.1279225232011124 +976 28 model.dropout_2 0.11265763024691583 +976 28 optimizer.lr 0.009875461632827813 +976 28 training.batch_size 1.0 +976 28 training.label_smoothing 0.02591289850804379 +976 29 model.embedding_dim 1.0 +976 29 model.relation_dim 0.0 +976 29 model.dropout_0 0.18708620991092706 +976 29 model.dropout_1 0.40589257536647494 +976 29 model.dropout_2 0.3491901133339125 +976 29 optimizer.lr 0.0023743008611762724 +976 29 training.batch_size 1.0 +976 29 training.label_smoothing 0.08155911763001439 +976 30 model.embedding_dim 2.0 +976 30 model.relation_dim 1.0 +976 30 model.dropout_0 0.44428592177102644 +976 30 model.dropout_1 0.2235354339762794 +976 30 model.dropout_2 0.49507897729024875 +976 30 optimizer.lr 0.005754151966625352 +976 30 training.batch_size 1.0 +976 30 training.label_smoothing 0.2762481211825737 +976 31 model.embedding_dim 2.0 +976 31 model.relation_dim 2.0 +976 31 model.dropout_0 0.38145115477614944 +976 31 model.dropout_1 0.23207807583720474 +976 31 model.dropout_2 0.35710658785114907 +976 31 optimizer.lr 0.001548674230612172 +976 31 training.batch_size 1.0 +976 31 training.label_smoothing 0.0035618694169058654 +976 32 model.embedding_dim 0.0 +976 32 model.relation_dim 1.0 +976 32 model.dropout_0 0.2726309109446927 +976 32 model.dropout_1 0.4914488353976616 +976 32 model.dropout_2 0.35405362266076246 +976 32 optimizer.lr 0.026871574185284686 +976 32 training.batch_size 2.0 +976 32 training.label_smoothing 0.03275246918218926 +976 33 model.embedding_dim 0.0 +976 33 model.relation_dim 1.0 +976 33 model.dropout_0 0.3777501343209589 +976 33 model.dropout_1 0.4441630719520788 +976 33 model.dropout_2 0.2536712901126393 +976 33 optimizer.lr 0.029994678036482158 +976 33 training.batch_size 2.0 +976 33 training.label_smoothing 0.005492881669668457 +976 34 model.embedding_dim 1.0 +976 34 model.relation_dim 0.0 +976 34 model.dropout_0 0.4867445146948468 +976 34 model.dropout_1 0.10238683096992443 +976 34 model.dropout_2 0.4887757407878444 +976 34 optimizer.lr 0.0022347991863831476 +976 34 training.batch_size 2.0 +976 34 training.label_smoothing 0.00745964723079796 +976 35 model.embedding_dim 2.0 +976 35 model.relation_dim 0.0 +976 35 model.dropout_0 0.1769744619107952 +976 35 model.dropout_1 0.19954268918024703 +976 35 model.dropout_2 0.117831092437774 +976 35 optimizer.lr 0.0024941718966761564 +976 35 training.batch_size 0.0 +976 35 training.label_smoothing 0.03756697650288389 +976 36 model.embedding_dim 1.0 +976 36 model.relation_dim 1.0 +976 36 model.dropout_0 0.4027520085579032 +976 36 model.dropout_1 0.17332089359486236 +976 36 model.dropout_2 0.10667336278600366 +976 36 optimizer.lr 0.002995661192623252 +976 36 training.batch_size 1.0 +976 36 training.label_smoothing 0.0026072104951748924 +976 37 model.embedding_dim 0.0 +976 37 model.relation_dim 1.0 +976 37 model.dropout_0 0.3797163552977131 +976 37 model.dropout_1 0.45774874916752184 +976 37 model.dropout_2 0.11576172378100998 +976 37 optimizer.lr 0.011781549792053658 +976 37 training.batch_size 1.0 +976 37 training.label_smoothing 0.2382666974542907 +976 38 model.embedding_dim 2.0 +976 38 model.relation_dim 0.0 +976 38 model.dropout_0 0.31718659116008263 +976 38 model.dropout_1 0.3576383277584557 +976 38 model.dropout_2 0.4458716359268602 +976 38 optimizer.lr 0.007893630538216486 +976 38 training.batch_size 2.0 +976 38 training.label_smoothing 0.05635077429029664 +976 39 model.embedding_dim 1.0 +976 39 model.relation_dim 2.0 +976 39 model.dropout_0 0.22192362096225027 +976 39 model.dropout_1 0.26119893036738395 +976 39 model.dropout_2 0.3649946639921324 +976 39 optimizer.lr 0.0595684362675261 +976 39 training.batch_size 1.0 +976 39 training.label_smoothing 0.0024113993944981497 +976 40 model.embedding_dim 0.0 +976 40 model.relation_dim 2.0 +976 40 model.dropout_0 0.4665594099762853 +976 40 model.dropout_1 0.4295380337478634 +976 40 model.dropout_2 0.3940182176141796 +976 40 optimizer.lr 0.03318555520619647 +976 40 training.batch_size 2.0 +976 40 training.label_smoothing 0.04830066963445462 +976 41 model.embedding_dim 0.0 +976 41 model.relation_dim 2.0 +976 41 model.dropout_0 0.11026148318974527 +976 41 model.dropout_1 0.4867196727585626 +976 41 model.dropout_2 0.40676808566228706 +976 41 optimizer.lr 0.07297907197200651 +976 41 training.batch_size 1.0 +976 41 training.label_smoothing 0.020491438187016226 +976 42 model.embedding_dim 2.0 +976 42 model.relation_dim 2.0 +976 42 model.dropout_0 0.4528574559616585 +976 42 model.dropout_1 0.19607843327158325 +976 42 model.dropout_2 0.40883317208843345 +976 42 optimizer.lr 0.004177104896908387 +976 42 training.batch_size 0.0 +976 42 training.label_smoothing 0.38507236762601266 +976 43 model.embedding_dim 2.0 +976 43 model.relation_dim 1.0 +976 43 model.dropout_0 0.15420293505587637 +976 43 model.dropout_1 0.18850361644925076 +976 43 model.dropout_2 0.4947646001161238 +976 43 optimizer.lr 0.003663638919628571 +976 43 training.batch_size 2.0 +976 43 training.label_smoothing 0.005263844847087476 +976 44 model.embedding_dim 1.0 +976 44 model.relation_dim 2.0 +976 44 model.dropout_0 0.23165800732217826 +976 44 model.dropout_1 0.32803740127377345 +976 44 model.dropout_2 0.2001212242363113 +976 44 optimizer.lr 0.015320372310055756 +976 44 training.batch_size 0.0 +976 44 training.label_smoothing 0.060621064308292213 +976 45 model.embedding_dim 1.0 +976 45 model.relation_dim 1.0 +976 45 model.dropout_0 0.370715877800411 +976 45 model.dropout_1 0.12935194028192348 +976 45 model.dropout_2 0.25795027101108764 +976 45 optimizer.lr 0.015436138970667422 +976 45 training.batch_size 2.0 +976 45 training.label_smoothing 0.001314499946205418 +976 46 model.embedding_dim 0.0 +976 46 model.relation_dim 2.0 +976 46 model.dropout_0 0.4776127389434926 +976 46 model.dropout_1 0.39097382279606013 +976 46 model.dropout_2 0.38684513217913175 +976 46 optimizer.lr 0.0013869012188796534 +976 46 training.batch_size 1.0 +976 46 training.label_smoothing 0.13447026406010598 +976 47 model.embedding_dim 0.0 +976 47 model.relation_dim 2.0 +976 47 model.dropout_0 0.29082572833879855 +976 47 model.dropout_1 0.4911267816494243 +976 47 model.dropout_2 0.477332746041909 +976 47 optimizer.lr 0.0016357519983202825 +976 47 training.batch_size 0.0 +976 47 training.label_smoothing 0.02103210270247231 +976 48 model.embedding_dim 1.0 +976 48 model.relation_dim 0.0 +976 48 model.dropout_0 0.271703786304371 +976 48 model.dropout_1 0.3173447440573707 +976 48 model.dropout_2 0.411718927497426 +976 48 optimizer.lr 0.006663492539803749 +976 48 training.batch_size 2.0 +976 48 training.label_smoothing 0.027301105622320125 +976 49 model.embedding_dim 2.0 +976 49 model.relation_dim 0.0 +976 49 model.dropout_0 0.16145699577814768 +976 49 model.dropout_1 0.2370469176584579 +976 49 model.dropout_2 0.4741304623647169 +976 49 optimizer.lr 0.02578731712522819 +976 49 training.batch_size 0.0 +976 49 training.label_smoothing 0.0060883693925445435 +976 50 model.embedding_dim 1.0 +976 50 model.relation_dim 2.0 +976 50 model.dropout_0 0.26654831178226207 +976 50 model.dropout_1 0.17746083940637214 +976 50 model.dropout_2 0.40764139709167063 +976 50 optimizer.lr 0.0031592999516704815 +976 50 training.batch_size 2.0 +976 50 training.label_smoothing 0.7370383216044071 +976 51 model.embedding_dim 1.0 +976 51 model.relation_dim 2.0 +976 51 model.dropout_0 0.45307577216889117 +976 51 model.dropout_1 0.37009127098365996 +976 51 model.dropout_2 0.13793029682561234 +976 51 optimizer.lr 0.004783579781927356 +976 51 training.batch_size 2.0 +976 51 training.label_smoothing 0.18116035982092127 +976 52 model.embedding_dim 2.0 +976 52 model.relation_dim 1.0 +976 52 model.dropout_0 0.33701220382035524 +976 52 model.dropout_1 0.39312381154503756 +976 52 model.dropout_2 0.48889516947030087 +976 52 optimizer.lr 0.004623059578166775 +976 52 training.batch_size 1.0 +976 52 training.label_smoothing 0.05993836183584994 +976 53 model.embedding_dim 1.0 +976 53 model.relation_dim 1.0 +976 53 model.dropout_0 0.26987677255861175 +976 53 model.dropout_1 0.3428028170521855 +976 53 model.dropout_2 0.40413176775135784 +976 53 optimizer.lr 0.03447212317070955 +976 53 training.batch_size 2.0 +976 53 training.label_smoothing 0.0014442654645184278 +976 54 model.embedding_dim 2.0 +976 54 model.relation_dim 2.0 +976 54 model.dropout_0 0.468084853868004 +976 54 model.dropout_1 0.18825657159892828 +976 54 model.dropout_2 0.12106565472304838 +976 54 optimizer.lr 0.046884617068112824 +976 54 training.batch_size 0.0 +976 54 training.label_smoothing 0.0032906092453806467 +976 55 model.embedding_dim 1.0 +976 55 model.relation_dim 0.0 +976 55 model.dropout_0 0.25737192030348266 +976 55 model.dropout_1 0.4195154312328533 +976 55 model.dropout_2 0.27737194116533126 +976 55 optimizer.lr 0.010917912526777006 +976 55 training.batch_size 1.0 +976 55 training.label_smoothing 0.015841809248009413 +976 56 model.embedding_dim 2.0 +976 56 model.relation_dim 2.0 +976 56 model.dropout_0 0.16637185743572763 +976 56 model.dropout_1 0.36691460010537563 +976 56 model.dropout_2 0.2163929795613283 +976 56 optimizer.lr 0.014421782272534727 +976 56 training.batch_size 1.0 +976 56 training.label_smoothing 0.0236607532402135 +976 57 model.embedding_dim 1.0 +976 57 model.relation_dim 0.0 +976 57 model.dropout_0 0.4310849674806625 +976 57 model.dropout_1 0.23655268425655016 +976 57 model.dropout_2 0.4322246845666784 +976 57 optimizer.lr 0.08869819790628677 +976 57 training.batch_size 2.0 +976 57 training.label_smoothing 0.12424481814540518 +976 58 model.embedding_dim 1.0 +976 58 model.relation_dim 2.0 +976 58 model.dropout_0 0.4806688401222263 +976 58 model.dropout_1 0.20546238208566467 +976 58 model.dropout_2 0.1361884378209491 +976 58 optimizer.lr 0.04852840770129034 +976 58 training.batch_size 2.0 +976 58 training.label_smoothing 0.019387758779257264 +976 59 model.embedding_dim 1.0 +976 59 model.relation_dim 2.0 +976 59 model.dropout_0 0.45265821510094695 +976 59 model.dropout_1 0.3736464982567951 +976 59 model.dropout_2 0.42365169663925406 +976 59 optimizer.lr 0.03569182593319828 +976 59 training.batch_size 2.0 +976 59 training.label_smoothing 0.0019045452059202783 +976 60 model.embedding_dim 1.0 +976 60 model.relation_dim 1.0 +976 60 model.dropout_0 0.4616380375871431 +976 60 model.dropout_1 0.188947001448771 +976 60 model.dropout_2 0.41982506535959535 +976 60 optimizer.lr 0.012876017448617091 +976 60 training.batch_size 0.0 +976 60 training.label_smoothing 0.020137276620658114 +976 61 model.embedding_dim 1.0 +976 61 model.relation_dim 1.0 +976 61 model.dropout_0 0.4821667484901506 +976 61 model.dropout_1 0.39226759447727144 +976 61 model.dropout_2 0.35468000558392254 +976 61 optimizer.lr 0.033191607102129476 +976 61 training.batch_size 1.0 +976 61 training.label_smoothing 0.2645840314937369 +976 62 model.embedding_dim 1.0 +976 62 model.relation_dim 0.0 +976 62 model.dropout_0 0.23591099645130653 +976 62 model.dropout_1 0.11713354078615273 +976 62 model.dropout_2 0.3630309535210068 +976 62 optimizer.lr 0.08221583113591797 +976 62 training.batch_size 2.0 +976 62 training.label_smoothing 0.032846082200905104 +976 63 model.embedding_dim 0.0 +976 63 model.relation_dim 1.0 +976 63 model.dropout_0 0.44743768682878166 +976 63 model.dropout_1 0.4909490270785143 +976 63 model.dropout_2 0.19195605213771882 +976 63 optimizer.lr 0.04009209296779655 +976 63 training.batch_size 1.0 +976 63 training.label_smoothing 0.051529195378554465 +976 64 model.embedding_dim 1.0 +976 64 model.relation_dim 0.0 +976 64 model.dropout_0 0.4188627280534447 +976 64 model.dropout_1 0.3996057642212515 +976 64 model.dropout_2 0.28335545736039647 +976 64 optimizer.lr 0.07825932027020986 +976 64 training.batch_size 0.0 +976 64 training.label_smoothing 0.07481045074817526 +976 65 model.embedding_dim 0.0 +976 65 model.relation_dim 2.0 +976 65 model.dropout_0 0.31739086817078077 +976 65 model.dropout_1 0.2355443690242939 +976 65 model.dropout_2 0.31908844001332226 +976 65 optimizer.lr 0.0014523311340890234 +976 65 training.batch_size 0.0 +976 65 training.label_smoothing 0.0027837444613099836 +976 66 model.embedding_dim 0.0 +976 66 model.relation_dim 0.0 +976 66 model.dropout_0 0.4508255517939371 +976 66 model.dropout_1 0.24955372989500968 +976 66 model.dropout_2 0.22221490580310388 +976 66 optimizer.lr 0.011896805495618827 +976 66 training.batch_size 0.0 +976 66 training.label_smoothing 0.3712858425729559 +976 67 model.embedding_dim 0.0 +976 67 model.relation_dim 1.0 +976 67 model.dropout_0 0.47611139149729714 +976 67 model.dropout_1 0.32389283284207343 +976 67 model.dropout_2 0.37866927097068864 +976 67 optimizer.lr 0.0588480945614297 +976 67 training.batch_size 0.0 +976 67 training.label_smoothing 0.0012292305039540432 +976 68 model.embedding_dim 1.0 +976 68 model.relation_dim 1.0 +976 68 model.dropout_0 0.270218256401378 +976 68 model.dropout_1 0.15010976960899242 +976 68 model.dropout_2 0.39712107153860216 +976 68 optimizer.lr 0.05852471536523307 +976 68 training.batch_size 2.0 +976 68 training.label_smoothing 0.8326691883434568 +976 69 model.embedding_dim 1.0 +976 69 model.relation_dim 0.0 +976 69 model.dropout_0 0.22263702308300226 +976 69 model.dropout_1 0.42724956126350333 +976 69 model.dropout_2 0.12010236628098624 +976 69 optimizer.lr 0.006318887538988416 +976 69 training.batch_size 2.0 +976 69 training.label_smoothing 0.04219508722941605 +976 70 model.embedding_dim 2.0 +976 70 model.relation_dim 1.0 +976 70 model.dropout_0 0.16584390725851603 +976 70 model.dropout_1 0.4858732426034224 +976 70 model.dropout_2 0.18963825513439297 +976 70 optimizer.lr 0.005989068403228203 +976 70 training.batch_size 1.0 +976 70 training.label_smoothing 0.09579139556432407 +976 71 model.embedding_dim 1.0 +976 71 model.relation_dim 1.0 +976 71 model.dropout_0 0.14889561436922621 +976 71 model.dropout_1 0.23080900750277633 +976 71 model.dropout_2 0.11956952966058387 +976 71 optimizer.lr 0.00252722401479978 +976 71 training.batch_size 2.0 +976 71 training.label_smoothing 0.006784083343515745 +976 72 model.embedding_dim 0.0 +976 72 model.relation_dim 0.0 +976 72 model.dropout_0 0.4334559038712513 +976 72 model.dropout_1 0.26830264727897657 +976 72 model.dropout_2 0.2439060226203815 +976 72 optimizer.lr 0.0034441796753524392 +976 72 training.batch_size 1.0 +976 72 training.label_smoothing 0.21802256762422073 +976 73 model.embedding_dim 0.0 +976 73 model.relation_dim 2.0 +976 73 model.dropout_0 0.2465341702944108 +976 73 model.dropout_1 0.4622959145563895 +976 73 model.dropout_2 0.48966196828138686 +976 73 optimizer.lr 0.011601860709174116 +976 73 training.batch_size 2.0 +976 73 training.label_smoothing 0.014075403532704405 +976 74 model.embedding_dim 1.0 +976 74 model.relation_dim 1.0 +976 74 model.dropout_0 0.25421516679051986 +976 74 model.dropout_1 0.3351941948445165 +976 74 model.dropout_2 0.2774357597364776 +976 74 optimizer.lr 0.0010306999542708502 +976 74 training.batch_size 1.0 +976 74 training.label_smoothing 0.008455454724369647 +976 75 model.embedding_dim 2.0 +976 75 model.relation_dim 2.0 +976 75 model.dropout_0 0.41240463829422525 +976 75 model.dropout_1 0.3961503881232956 +976 75 model.dropout_2 0.27982872907270967 +976 75 optimizer.lr 0.006794878597357351 +976 75 training.batch_size 1.0 +976 75 training.label_smoothing 0.29984742076945636 +976 76 model.embedding_dim 1.0 +976 76 model.relation_dim 2.0 +976 76 model.dropout_0 0.20022720459007015 +976 76 model.dropout_1 0.41848590670311775 +976 76 model.dropout_2 0.37114775285318635 +976 76 optimizer.lr 0.047612837898174616 +976 76 training.batch_size 0.0 +976 76 training.label_smoothing 0.0027428862184263737 +976 77 model.embedding_dim 1.0 +976 77 model.relation_dim 2.0 +976 77 model.dropout_0 0.42127967763287133 +976 77 model.dropout_1 0.45945757083221156 +976 77 model.dropout_2 0.10006848356943264 +976 77 optimizer.lr 0.002444894339335267 +976 77 training.batch_size 0.0 +976 77 training.label_smoothing 0.05220984827899947 +976 78 model.embedding_dim 1.0 +976 78 model.relation_dim 2.0 +976 78 model.dropout_0 0.3839425685213862 +976 78 model.dropout_1 0.28361798876798855 +976 78 model.dropout_2 0.4428685285765379 +976 78 optimizer.lr 0.024231027486025965 +976 78 training.batch_size 2.0 +976 78 training.label_smoothing 0.006585375081290047 +976 79 model.embedding_dim 2.0 +976 79 model.relation_dim 2.0 +976 79 model.dropout_0 0.2346454672040928 +976 79 model.dropout_1 0.25400768704054943 +976 79 model.dropout_2 0.2257682139490199 +976 79 optimizer.lr 0.0057733246189705795 +976 79 training.batch_size 1.0 +976 79 training.label_smoothing 0.007821500785765129 +976 80 model.embedding_dim 2.0 +976 80 model.relation_dim 2.0 +976 80 model.dropout_0 0.30976882468039696 +976 80 model.dropout_1 0.333539302189342 +976 80 model.dropout_2 0.1901896027034566 +976 80 optimizer.lr 0.006974620204245683 +976 80 training.batch_size 1.0 +976 80 training.label_smoothing 0.0010235536425890405 +976 81 model.embedding_dim 2.0 +976 81 model.relation_dim 0.0 +976 81 model.dropout_0 0.1143923904374553 +976 81 model.dropout_1 0.1795780879159471 +976 81 model.dropout_2 0.39623736634887446 +976 81 optimizer.lr 0.001401485672553412 +976 81 training.batch_size 2.0 +976 81 training.label_smoothing 0.03868805174084607 +976 82 model.embedding_dim 1.0 +976 82 model.relation_dim 2.0 +976 82 model.dropout_0 0.48142063813703945 +976 82 model.dropout_1 0.4106689520866091 +976 82 model.dropout_2 0.15371200781284064 +976 82 optimizer.lr 0.007776296443832161 +976 82 training.batch_size 1.0 +976 82 training.label_smoothing 0.5547080436862992 +976 83 model.embedding_dim 0.0 +976 83 model.relation_dim 0.0 +976 83 model.dropout_0 0.3000100250528002 +976 83 model.dropout_1 0.3009674071155201 +976 83 model.dropout_2 0.4460926593306382 +976 83 optimizer.lr 0.0163337717725631 +976 83 training.batch_size 0.0 +976 83 training.label_smoothing 0.004592854933238523 +976 84 model.embedding_dim 1.0 +976 84 model.relation_dim 1.0 +976 84 model.dropout_0 0.29980749200851986 +976 84 model.dropout_1 0.29850847765387767 +976 84 model.dropout_2 0.392849740184511 +976 84 optimizer.lr 0.0401701389583596 +976 84 training.batch_size 1.0 +976 84 training.label_smoothing 0.036816447126314156 +976 85 model.embedding_dim 1.0 +976 85 model.relation_dim 0.0 +976 85 model.dropout_0 0.19752193059402529 +976 85 model.dropout_1 0.2341357309951084 +976 85 model.dropout_2 0.46213883705654046 +976 85 optimizer.lr 0.008297190460316087 +976 85 training.batch_size 1.0 +976 85 training.label_smoothing 0.3146567871627033 +976 86 model.embedding_dim 0.0 +976 86 model.relation_dim 2.0 +976 86 model.dropout_0 0.34290072584149744 +976 86 model.dropout_1 0.3799294021860784 +976 86 model.dropout_2 0.12328563259920787 +976 86 optimizer.lr 0.0015600141394853933 +976 86 training.batch_size 1.0 +976 86 training.label_smoothing 0.001607631703321396 +976 87 model.embedding_dim 2.0 +976 87 model.relation_dim 2.0 +976 87 model.dropout_0 0.11831228951322377 +976 87 model.dropout_1 0.2131976492653078 +976 87 model.dropout_2 0.20578582100301263 +976 87 optimizer.lr 0.0057665445477302615 +976 87 training.batch_size 1.0 +976 87 training.label_smoothing 0.2939478549796199 +976 88 model.embedding_dim 2.0 +976 88 model.relation_dim 1.0 +976 88 model.dropout_0 0.10669433177312367 +976 88 model.dropout_1 0.31347700996531347 +976 88 model.dropout_2 0.23980921539932942 +976 88 optimizer.lr 0.013045363629686812 +976 88 training.batch_size 1.0 +976 88 training.label_smoothing 0.08292793895647195 +976 89 model.embedding_dim 2.0 +976 89 model.relation_dim 2.0 +976 89 model.dropout_0 0.35782807076312445 +976 89 model.dropout_1 0.4318661764522199 +976 89 model.dropout_2 0.35029484764491564 +976 89 optimizer.lr 0.02441584641309007 +976 89 training.batch_size 0.0 +976 89 training.label_smoothing 0.39731306945794403 +976 90 model.embedding_dim 0.0 +976 90 model.relation_dim 0.0 +976 90 model.dropout_0 0.3038136148784645 +976 90 model.dropout_1 0.22620516569460333 +976 90 model.dropout_2 0.18471419092193342 +976 90 optimizer.lr 0.0669749578320003 +976 90 training.batch_size 2.0 +976 90 training.label_smoothing 0.010270127815723111 +976 91 model.embedding_dim 0.0 +976 91 model.relation_dim 0.0 +976 91 model.dropout_0 0.4917186043482619 +976 91 model.dropout_1 0.4304936091317934 +976 91 model.dropout_2 0.3710360905140715 +976 91 optimizer.lr 0.09565975004754249 +976 91 training.batch_size 0.0 +976 91 training.label_smoothing 0.07964977137457441 +976 92 model.embedding_dim 2.0 +976 92 model.relation_dim 2.0 +976 92 model.dropout_0 0.17903249610617322 +976 92 model.dropout_1 0.36459304544175386 +976 92 model.dropout_2 0.1335882706514561 +976 92 optimizer.lr 0.05641083810539732 +976 92 training.batch_size 1.0 +976 92 training.label_smoothing 0.11225272976325193 +976 93 model.embedding_dim 0.0 +976 93 model.relation_dim 2.0 +976 93 model.dropout_0 0.4914412740694949 +976 93 model.dropout_1 0.3095702364119278 +976 93 model.dropout_2 0.38604686741161864 +976 93 optimizer.lr 0.0037394322207179084 +976 93 training.batch_size 1.0 +976 93 training.label_smoothing 0.03452705266860821 +976 94 model.embedding_dim 0.0 +976 94 model.relation_dim 1.0 +976 94 model.dropout_0 0.12547840331808038 +976 94 model.dropout_1 0.21177785953456574 +976 94 model.dropout_2 0.4427629998064391 +976 94 optimizer.lr 0.002406650405488517 +976 94 training.batch_size 1.0 +976 94 training.label_smoothing 0.02097692809956345 +976 95 model.embedding_dim 1.0 +976 95 model.relation_dim 1.0 +976 95 model.dropout_0 0.25132551903975825 +976 95 model.dropout_1 0.49789828638903116 +976 95 model.dropout_2 0.3411846974707863 +976 95 optimizer.lr 0.02246327933805142 +976 95 training.batch_size 2.0 +976 95 training.label_smoothing 0.557683441686263 +976 96 model.embedding_dim 2.0 +976 96 model.relation_dim 1.0 +976 96 model.dropout_0 0.4320415351977227 +976 96 model.dropout_1 0.42170538442922356 +976 96 model.dropout_2 0.20004641587803113 +976 96 optimizer.lr 0.0266103853710551 +976 96 training.batch_size 2.0 +976 96 training.label_smoothing 0.0012535335746334528 +976 97 model.embedding_dim 0.0 +976 97 model.relation_dim 0.0 +976 97 model.dropout_0 0.44460723390938317 +976 97 model.dropout_1 0.24024102516477475 +976 97 model.dropout_2 0.12228684520741072 +976 97 optimizer.lr 0.00224529319134112 +976 97 training.batch_size 2.0 +976 97 training.label_smoothing 0.005889202826108597 +976 98 model.embedding_dim 1.0 +976 98 model.relation_dim 1.0 +976 98 model.dropout_0 0.10050263527313437 +976 98 model.dropout_1 0.11214405851501899 +976 98 model.dropout_2 0.3674568941670756 +976 98 optimizer.lr 0.0011826530708768518 +976 98 training.batch_size 1.0 +976 98 training.label_smoothing 0.002548274857576461 +976 99 model.embedding_dim 2.0 +976 99 model.relation_dim 1.0 +976 99 model.dropout_0 0.26254295888004214 +976 99 model.dropout_1 0.38034795513949365 +976 99 model.dropout_2 0.35398832867687857 +976 99 optimizer.lr 0.012599988917237808 +976 99 training.batch_size 0.0 +976 99 training.label_smoothing 0.22305391848454126 +976 100 model.embedding_dim 0.0 +976 100 model.relation_dim 0.0 +976 100 model.dropout_0 0.3069453384478599 +976 100 model.dropout_1 0.327649617928616 +976 100 model.dropout_2 0.2146714022058062 +976 100 optimizer.lr 0.0019110825578196287 +976 100 training.batch_size 1.0 +976 100 training.label_smoothing 0.10257849227120405 +976 1 dataset """kinships""" +976 1 model """tucker""" +976 1 loss """softplus""" +976 1 regularizer """no""" +976 1 optimizer """adam""" +976 1 training_loop """lcwa""" +976 1 evaluator """rankbased""" +976 2 dataset """kinships""" +976 2 model """tucker""" +976 2 loss """softplus""" +976 2 regularizer """no""" +976 2 optimizer """adam""" +976 2 training_loop """lcwa""" +976 2 evaluator """rankbased""" +976 3 dataset """kinships""" +976 3 model """tucker""" +976 3 loss """softplus""" +976 3 regularizer """no""" +976 3 optimizer """adam""" +976 3 training_loop """lcwa""" +976 3 evaluator """rankbased""" +976 4 dataset """kinships""" +976 4 model """tucker""" +976 4 loss """softplus""" +976 4 regularizer """no""" +976 4 optimizer """adam""" +976 4 training_loop """lcwa""" +976 4 evaluator """rankbased""" +976 5 dataset """kinships""" +976 5 model """tucker""" +976 5 loss """softplus""" +976 5 regularizer """no""" +976 5 optimizer """adam""" +976 5 training_loop """lcwa""" +976 5 evaluator """rankbased""" +976 6 dataset """kinships""" +976 6 model """tucker""" +976 6 loss """softplus""" +976 6 regularizer """no""" +976 6 optimizer """adam""" +976 6 training_loop """lcwa""" +976 6 evaluator """rankbased""" +976 7 dataset """kinships""" +976 7 model """tucker""" +976 7 loss """softplus""" +976 7 regularizer """no""" +976 7 optimizer """adam""" +976 7 training_loop """lcwa""" +976 7 evaluator """rankbased""" +976 8 dataset """kinships""" +976 8 model """tucker""" +976 8 loss """softplus""" +976 8 regularizer """no""" +976 8 optimizer """adam""" +976 8 training_loop """lcwa""" +976 8 evaluator """rankbased""" +976 9 dataset """kinships""" +976 9 model """tucker""" +976 9 loss """softplus""" +976 9 regularizer """no""" +976 9 optimizer """adam""" +976 9 training_loop """lcwa""" +976 9 evaluator """rankbased""" +976 10 dataset """kinships""" +976 10 model """tucker""" +976 10 loss """softplus""" +976 10 regularizer """no""" +976 10 optimizer """adam""" +976 10 training_loop """lcwa""" +976 10 evaluator """rankbased""" +976 11 dataset """kinships""" +976 11 model """tucker""" +976 11 loss """softplus""" +976 11 regularizer """no""" +976 11 optimizer """adam""" +976 11 training_loop """lcwa""" +976 11 evaluator """rankbased""" +976 12 dataset """kinships""" +976 12 model """tucker""" +976 12 loss """softplus""" +976 12 regularizer """no""" +976 12 optimizer """adam""" +976 12 training_loop """lcwa""" +976 12 evaluator """rankbased""" +976 13 dataset """kinships""" +976 13 model """tucker""" +976 13 loss """softplus""" +976 13 regularizer """no""" +976 13 optimizer """adam""" +976 13 training_loop """lcwa""" +976 13 evaluator """rankbased""" +976 14 dataset """kinships""" +976 14 model """tucker""" +976 14 loss """softplus""" +976 14 regularizer """no""" +976 14 optimizer """adam""" +976 14 training_loop """lcwa""" +976 14 evaluator """rankbased""" +976 15 dataset """kinships""" +976 15 model """tucker""" +976 15 loss """softplus""" +976 15 regularizer """no""" +976 15 optimizer """adam""" +976 15 training_loop """lcwa""" +976 15 evaluator """rankbased""" +976 16 dataset """kinships""" +976 16 model """tucker""" +976 16 loss """softplus""" +976 16 regularizer """no""" +976 16 optimizer """adam""" +976 16 training_loop """lcwa""" +976 16 evaluator """rankbased""" +976 17 dataset """kinships""" +976 17 model """tucker""" +976 17 loss """softplus""" +976 17 regularizer """no""" +976 17 optimizer """adam""" +976 17 training_loop """lcwa""" +976 17 evaluator """rankbased""" +976 18 dataset """kinships""" +976 18 model """tucker""" +976 18 loss """softplus""" +976 18 regularizer """no""" +976 18 optimizer """adam""" +976 18 training_loop """lcwa""" +976 18 evaluator """rankbased""" +976 19 dataset """kinships""" +976 19 model """tucker""" +976 19 loss """softplus""" +976 19 regularizer """no""" +976 19 optimizer """adam""" +976 19 training_loop """lcwa""" +976 19 evaluator """rankbased""" +976 20 dataset """kinships""" +976 20 model """tucker""" +976 20 loss """softplus""" +976 20 regularizer """no""" +976 20 optimizer """adam""" +976 20 training_loop """lcwa""" +976 20 evaluator """rankbased""" +976 21 dataset """kinships""" +976 21 model """tucker""" +976 21 loss """softplus""" +976 21 regularizer """no""" +976 21 optimizer """adam""" +976 21 training_loop """lcwa""" +976 21 evaluator """rankbased""" +976 22 dataset """kinships""" +976 22 model """tucker""" +976 22 loss """softplus""" +976 22 regularizer """no""" +976 22 optimizer """adam""" +976 22 training_loop """lcwa""" +976 22 evaluator """rankbased""" +976 23 dataset """kinships""" +976 23 model """tucker""" +976 23 loss """softplus""" +976 23 regularizer """no""" +976 23 optimizer """adam""" +976 23 training_loop """lcwa""" +976 23 evaluator """rankbased""" +976 24 dataset """kinships""" +976 24 model """tucker""" +976 24 loss """softplus""" +976 24 regularizer """no""" +976 24 optimizer """adam""" +976 24 training_loop """lcwa""" +976 24 evaluator """rankbased""" +976 25 dataset """kinships""" +976 25 model """tucker""" +976 25 loss """softplus""" +976 25 regularizer """no""" +976 25 optimizer """adam""" +976 25 training_loop """lcwa""" +976 25 evaluator """rankbased""" +976 26 dataset """kinships""" +976 26 model """tucker""" +976 26 loss """softplus""" +976 26 regularizer """no""" +976 26 optimizer """adam""" +976 26 training_loop """lcwa""" +976 26 evaluator """rankbased""" +976 27 dataset """kinships""" +976 27 model """tucker""" +976 27 loss """softplus""" +976 27 regularizer """no""" +976 27 optimizer """adam""" +976 27 training_loop """lcwa""" +976 27 evaluator """rankbased""" +976 28 dataset """kinships""" +976 28 model """tucker""" +976 28 loss """softplus""" +976 28 regularizer """no""" +976 28 optimizer """adam""" +976 28 training_loop """lcwa""" +976 28 evaluator """rankbased""" +976 29 dataset """kinships""" +976 29 model """tucker""" +976 29 loss """softplus""" +976 29 regularizer """no""" +976 29 optimizer """adam""" +976 29 training_loop """lcwa""" +976 29 evaluator """rankbased""" +976 30 dataset """kinships""" +976 30 model """tucker""" +976 30 loss """softplus""" +976 30 regularizer """no""" +976 30 optimizer """adam""" +976 30 training_loop """lcwa""" +976 30 evaluator """rankbased""" +976 31 dataset """kinships""" +976 31 model """tucker""" +976 31 loss """softplus""" +976 31 regularizer """no""" +976 31 optimizer """adam""" +976 31 training_loop """lcwa""" +976 31 evaluator """rankbased""" +976 32 dataset """kinships""" +976 32 model """tucker""" +976 32 loss """softplus""" +976 32 regularizer """no""" +976 32 optimizer """adam""" +976 32 training_loop """lcwa""" +976 32 evaluator """rankbased""" +976 33 dataset """kinships""" +976 33 model """tucker""" +976 33 loss """softplus""" +976 33 regularizer """no""" +976 33 optimizer """adam""" +976 33 training_loop """lcwa""" +976 33 evaluator """rankbased""" +976 34 dataset """kinships""" +976 34 model """tucker""" +976 34 loss """softplus""" +976 34 regularizer """no""" +976 34 optimizer """adam""" +976 34 training_loop """lcwa""" +976 34 evaluator """rankbased""" +976 35 dataset """kinships""" +976 35 model """tucker""" +976 35 loss """softplus""" +976 35 regularizer """no""" +976 35 optimizer """adam""" +976 35 training_loop """lcwa""" +976 35 evaluator """rankbased""" +976 36 dataset """kinships""" +976 36 model """tucker""" +976 36 loss """softplus""" +976 36 regularizer """no""" +976 36 optimizer """adam""" +976 36 training_loop """lcwa""" +976 36 evaluator """rankbased""" +976 37 dataset """kinships""" +976 37 model """tucker""" +976 37 loss """softplus""" +976 37 regularizer """no""" +976 37 optimizer """adam""" +976 37 training_loop """lcwa""" +976 37 evaluator """rankbased""" +976 38 dataset """kinships""" +976 38 model """tucker""" +976 38 loss """softplus""" +976 38 regularizer """no""" +976 38 optimizer """adam""" +976 38 training_loop """lcwa""" +976 38 evaluator """rankbased""" +976 39 dataset """kinships""" +976 39 model """tucker""" +976 39 loss """softplus""" +976 39 regularizer """no""" +976 39 optimizer """adam""" +976 39 training_loop """lcwa""" +976 39 evaluator """rankbased""" +976 40 dataset """kinships""" +976 40 model """tucker""" +976 40 loss """softplus""" +976 40 regularizer """no""" +976 40 optimizer """adam""" +976 40 training_loop """lcwa""" +976 40 evaluator """rankbased""" +976 41 dataset """kinships""" +976 41 model """tucker""" +976 41 loss """softplus""" +976 41 regularizer """no""" +976 41 optimizer """adam""" +976 41 training_loop """lcwa""" +976 41 evaluator """rankbased""" +976 42 dataset """kinships""" +976 42 model """tucker""" +976 42 loss """softplus""" +976 42 regularizer """no""" +976 42 optimizer """adam""" +976 42 training_loop """lcwa""" +976 42 evaluator """rankbased""" +976 43 dataset """kinships""" +976 43 model """tucker""" +976 43 loss """softplus""" +976 43 regularizer """no""" +976 43 optimizer """adam""" +976 43 training_loop """lcwa""" +976 43 evaluator """rankbased""" +976 44 dataset """kinships""" +976 44 model """tucker""" +976 44 loss """softplus""" +976 44 regularizer """no""" +976 44 optimizer """adam""" +976 44 training_loop """lcwa""" +976 44 evaluator """rankbased""" +976 45 dataset """kinships""" +976 45 model """tucker""" +976 45 loss """softplus""" +976 45 regularizer """no""" +976 45 optimizer """adam""" +976 45 training_loop """lcwa""" +976 45 evaluator """rankbased""" +976 46 dataset """kinships""" +976 46 model """tucker""" +976 46 loss """softplus""" +976 46 regularizer """no""" +976 46 optimizer """adam""" +976 46 training_loop """lcwa""" +976 46 evaluator """rankbased""" +976 47 dataset """kinships""" +976 47 model """tucker""" +976 47 loss """softplus""" +976 47 regularizer """no""" +976 47 optimizer """adam""" +976 47 training_loop """lcwa""" +976 47 evaluator """rankbased""" +976 48 dataset """kinships""" +976 48 model """tucker""" +976 48 loss """softplus""" +976 48 regularizer """no""" +976 48 optimizer """adam""" +976 48 training_loop """lcwa""" +976 48 evaluator """rankbased""" +976 49 dataset """kinships""" +976 49 model """tucker""" +976 49 loss """softplus""" +976 49 regularizer """no""" +976 49 optimizer """adam""" +976 49 training_loop """lcwa""" +976 49 evaluator """rankbased""" +976 50 dataset """kinships""" +976 50 model """tucker""" +976 50 loss """softplus""" +976 50 regularizer """no""" +976 50 optimizer """adam""" +976 50 training_loop """lcwa""" +976 50 evaluator """rankbased""" +976 51 dataset """kinships""" +976 51 model """tucker""" +976 51 loss """softplus""" +976 51 regularizer """no""" +976 51 optimizer """adam""" +976 51 training_loop """lcwa""" +976 51 evaluator """rankbased""" +976 52 dataset """kinships""" +976 52 model """tucker""" +976 52 loss """softplus""" +976 52 regularizer """no""" +976 52 optimizer """adam""" +976 52 training_loop """lcwa""" +976 52 evaluator """rankbased""" +976 53 dataset """kinships""" +976 53 model """tucker""" +976 53 loss """softplus""" +976 53 regularizer """no""" +976 53 optimizer """adam""" +976 53 training_loop """lcwa""" +976 53 evaluator """rankbased""" +976 54 dataset """kinships""" +976 54 model """tucker""" +976 54 loss """softplus""" +976 54 regularizer """no""" +976 54 optimizer """adam""" +976 54 training_loop """lcwa""" +976 54 evaluator """rankbased""" +976 55 dataset """kinships""" +976 55 model """tucker""" +976 55 loss """softplus""" +976 55 regularizer """no""" +976 55 optimizer """adam""" +976 55 training_loop """lcwa""" +976 55 evaluator """rankbased""" +976 56 dataset """kinships""" +976 56 model """tucker""" +976 56 loss """softplus""" +976 56 regularizer """no""" +976 56 optimizer """adam""" +976 56 training_loop """lcwa""" +976 56 evaluator """rankbased""" +976 57 dataset """kinships""" +976 57 model """tucker""" +976 57 loss """softplus""" +976 57 regularizer """no""" +976 57 optimizer """adam""" +976 57 training_loop """lcwa""" +976 57 evaluator """rankbased""" +976 58 dataset """kinships""" +976 58 model """tucker""" +976 58 loss """softplus""" +976 58 regularizer """no""" +976 58 optimizer """adam""" +976 58 training_loop """lcwa""" +976 58 evaluator """rankbased""" +976 59 dataset """kinships""" +976 59 model """tucker""" +976 59 loss """softplus""" +976 59 regularizer """no""" +976 59 optimizer """adam""" +976 59 training_loop """lcwa""" +976 59 evaluator """rankbased""" +976 60 dataset """kinships""" +976 60 model """tucker""" +976 60 loss """softplus""" +976 60 regularizer """no""" +976 60 optimizer """adam""" +976 60 training_loop """lcwa""" +976 60 evaluator """rankbased""" +976 61 dataset """kinships""" +976 61 model """tucker""" +976 61 loss """softplus""" +976 61 regularizer """no""" +976 61 optimizer """adam""" +976 61 training_loop """lcwa""" +976 61 evaluator """rankbased""" +976 62 dataset """kinships""" +976 62 model """tucker""" +976 62 loss """softplus""" +976 62 regularizer """no""" +976 62 optimizer """adam""" +976 62 training_loop """lcwa""" +976 62 evaluator """rankbased""" +976 63 dataset """kinships""" +976 63 model """tucker""" +976 63 loss """softplus""" +976 63 regularizer """no""" +976 63 optimizer """adam""" +976 63 training_loop """lcwa""" +976 63 evaluator """rankbased""" +976 64 dataset """kinships""" +976 64 model """tucker""" +976 64 loss """softplus""" +976 64 regularizer """no""" +976 64 optimizer """adam""" +976 64 training_loop """lcwa""" +976 64 evaluator """rankbased""" +976 65 dataset """kinships""" +976 65 model """tucker""" +976 65 loss """softplus""" +976 65 regularizer """no""" +976 65 optimizer """adam""" +976 65 training_loop """lcwa""" +976 65 evaluator """rankbased""" +976 66 dataset """kinships""" +976 66 model """tucker""" +976 66 loss """softplus""" +976 66 regularizer """no""" +976 66 optimizer """adam""" +976 66 training_loop """lcwa""" +976 66 evaluator """rankbased""" +976 67 dataset """kinships""" +976 67 model """tucker""" +976 67 loss """softplus""" +976 67 regularizer """no""" +976 67 optimizer """adam""" +976 67 training_loop """lcwa""" +976 67 evaluator """rankbased""" +976 68 dataset """kinships""" +976 68 model """tucker""" +976 68 loss """softplus""" +976 68 regularizer """no""" +976 68 optimizer """adam""" +976 68 training_loop """lcwa""" +976 68 evaluator """rankbased""" +976 69 dataset """kinships""" +976 69 model """tucker""" +976 69 loss """softplus""" +976 69 regularizer """no""" +976 69 optimizer """adam""" +976 69 training_loop """lcwa""" +976 69 evaluator """rankbased""" +976 70 dataset """kinships""" +976 70 model """tucker""" +976 70 loss """softplus""" +976 70 regularizer """no""" +976 70 optimizer """adam""" +976 70 training_loop """lcwa""" +976 70 evaluator """rankbased""" +976 71 dataset """kinships""" +976 71 model """tucker""" +976 71 loss """softplus""" +976 71 regularizer """no""" +976 71 optimizer """adam""" +976 71 training_loop """lcwa""" +976 71 evaluator """rankbased""" +976 72 dataset """kinships""" +976 72 model """tucker""" +976 72 loss """softplus""" +976 72 regularizer """no""" +976 72 optimizer """adam""" +976 72 training_loop """lcwa""" +976 72 evaluator """rankbased""" +976 73 dataset """kinships""" +976 73 model """tucker""" +976 73 loss """softplus""" +976 73 regularizer """no""" +976 73 optimizer """adam""" +976 73 training_loop """lcwa""" +976 73 evaluator """rankbased""" +976 74 dataset """kinships""" +976 74 model """tucker""" +976 74 loss """softplus""" +976 74 regularizer """no""" +976 74 optimizer """adam""" +976 74 training_loop """lcwa""" +976 74 evaluator """rankbased""" +976 75 dataset """kinships""" +976 75 model """tucker""" +976 75 loss """softplus""" +976 75 regularizer """no""" +976 75 optimizer """adam""" +976 75 training_loop """lcwa""" +976 75 evaluator """rankbased""" +976 76 dataset """kinships""" +976 76 model """tucker""" +976 76 loss """softplus""" +976 76 regularizer """no""" +976 76 optimizer """adam""" +976 76 training_loop """lcwa""" +976 76 evaluator """rankbased""" +976 77 dataset """kinships""" +976 77 model """tucker""" +976 77 loss """softplus""" +976 77 regularizer """no""" +976 77 optimizer """adam""" +976 77 training_loop """lcwa""" +976 77 evaluator """rankbased""" +976 78 dataset """kinships""" +976 78 model """tucker""" +976 78 loss """softplus""" +976 78 regularizer """no""" +976 78 optimizer """adam""" +976 78 training_loop """lcwa""" +976 78 evaluator """rankbased""" +976 79 dataset """kinships""" +976 79 model """tucker""" +976 79 loss """softplus""" +976 79 regularizer """no""" +976 79 optimizer """adam""" +976 79 training_loop """lcwa""" +976 79 evaluator """rankbased""" +976 80 dataset """kinships""" +976 80 model """tucker""" +976 80 loss """softplus""" +976 80 regularizer """no""" +976 80 optimizer """adam""" +976 80 training_loop """lcwa""" +976 80 evaluator """rankbased""" +976 81 dataset """kinships""" +976 81 model """tucker""" +976 81 loss """softplus""" +976 81 regularizer """no""" +976 81 optimizer """adam""" +976 81 training_loop """lcwa""" +976 81 evaluator """rankbased""" +976 82 dataset """kinships""" +976 82 model """tucker""" +976 82 loss """softplus""" +976 82 regularizer """no""" +976 82 optimizer """adam""" +976 82 training_loop """lcwa""" +976 82 evaluator """rankbased""" +976 83 dataset """kinships""" +976 83 model """tucker""" +976 83 loss """softplus""" +976 83 regularizer """no""" +976 83 optimizer """adam""" +976 83 training_loop """lcwa""" +976 83 evaluator """rankbased""" +976 84 dataset """kinships""" +976 84 model """tucker""" +976 84 loss """softplus""" +976 84 regularizer """no""" +976 84 optimizer """adam""" +976 84 training_loop """lcwa""" +976 84 evaluator """rankbased""" +976 85 dataset """kinships""" +976 85 model """tucker""" +976 85 loss """softplus""" +976 85 regularizer """no""" +976 85 optimizer """adam""" +976 85 training_loop """lcwa""" +976 85 evaluator """rankbased""" +976 86 dataset """kinships""" +976 86 model """tucker""" +976 86 loss """softplus""" +976 86 regularizer """no""" +976 86 optimizer """adam""" +976 86 training_loop """lcwa""" +976 86 evaluator """rankbased""" +976 87 dataset """kinships""" +976 87 model """tucker""" +976 87 loss """softplus""" +976 87 regularizer """no""" +976 87 optimizer """adam""" +976 87 training_loop """lcwa""" +976 87 evaluator """rankbased""" +976 88 dataset """kinships""" +976 88 model """tucker""" +976 88 loss """softplus""" +976 88 regularizer """no""" +976 88 optimizer """adam""" +976 88 training_loop """lcwa""" +976 88 evaluator """rankbased""" +976 89 dataset """kinships""" +976 89 model """tucker""" +976 89 loss """softplus""" +976 89 regularizer """no""" +976 89 optimizer """adam""" +976 89 training_loop """lcwa""" +976 89 evaluator """rankbased""" +976 90 dataset """kinships""" +976 90 model """tucker""" +976 90 loss """softplus""" +976 90 regularizer """no""" +976 90 optimizer """adam""" +976 90 training_loop """lcwa""" +976 90 evaluator """rankbased""" +976 91 dataset """kinships""" +976 91 model """tucker""" +976 91 loss """softplus""" +976 91 regularizer """no""" +976 91 optimizer """adam""" +976 91 training_loop """lcwa""" +976 91 evaluator """rankbased""" +976 92 dataset """kinships""" +976 92 model """tucker""" +976 92 loss """softplus""" +976 92 regularizer """no""" +976 92 optimizer """adam""" +976 92 training_loop """lcwa""" +976 92 evaluator """rankbased""" +976 93 dataset """kinships""" +976 93 model """tucker""" +976 93 loss """softplus""" +976 93 regularizer """no""" +976 93 optimizer """adam""" +976 93 training_loop """lcwa""" +976 93 evaluator """rankbased""" +976 94 dataset """kinships""" +976 94 model """tucker""" +976 94 loss """softplus""" +976 94 regularizer """no""" +976 94 optimizer """adam""" +976 94 training_loop """lcwa""" +976 94 evaluator """rankbased""" +976 95 dataset """kinships""" +976 95 model """tucker""" +976 95 loss """softplus""" +976 95 regularizer """no""" +976 95 optimizer """adam""" +976 95 training_loop """lcwa""" +976 95 evaluator """rankbased""" +976 96 dataset """kinships""" +976 96 model """tucker""" +976 96 loss """softplus""" +976 96 regularizer """no""" +976 96 optimizer """adam""" +976 96 training_loop """lcwa""" +976 96 evaluator """rankbased""" +976 97 dataset """kinships""" +976 97 model """tucker""" +976 97 loss """softplus""" +976 97 regularizer """no""" +976 97 optimizer """adam""" +976 97 training_loop """lcwa""" +976 97 evaluator """rankbased""" +976 98 dataset """kinships""" +976 98 model """tucker""" +976 98 loss """softplus""" +976 98 regularizer """no""" +976 98 optimizer """adam""" +976 98 training_loop """lcwa""" +976 98 evaluator """rankbased""" +976 99 dataset """kinships""" +976 99 model """tucker""" +976 99 loss """softplus""" +976 99 regularizer """no""" +976 99 optimizer """adam""" +976 99 training_loop """lcwa""" +976 99 evaluator """rankbased""" +976 100 dataset """kinships""" +976 100 model """tucker""" +976 100 loss """softplus""" +976 100 regularizer """no""" +976 100 optimizer """adam""" +976 100 training_loop """lcwa""" +976 100 evaluator """rankbased""" +977 1 model.embedding_dim 0.0 +977 1 model.relation_dim 1.0 +977 1 model.dropout_0 0.4123737218958454 +977 1 model.dropout_1 0.19179575317242398 +977 1 model.dropout_2 0.4249438810472519 +977 1 optimizer.lr 0.002259763236563363 +977 1 training.batch_size 0.0 +977 1 training.label_smoothing 0.001262521200008664 +977 2 model.embedding_dim 0.0 +977 2 model.relation_dim 1.0 +977 2 model.dropout_0 0.25000451134747703 +977 2 model.dropout_1 0.33535959502154467 +977 2 model.dropout_2 0.3847035994475263 +977 2 optimizer.lr 0.06328436688155695 +977 2 training.batch_size 2.0 +977 2 training.label_smoothing 0.0031335969258864936 +977 3 model.embedding_dim 0.0 +977 3 model.relation_dim 0.0 +977 3 model.dropout_0 0.367851245682254 +977 3 model.dropout_1 0.15152842323408625 +977 3 model.dropout_2 0.4249204411879257 +977 3 optimizer.lr 0.0030637337375073615 +977 3 training.batch_size 2.0 +977 3 training.label_smoothing 0.08852595061427923 +977 4 model.embedding_dim 1.0 +977 4 model.relation_dim 2.0 +977 4 model.dropout_0 0.3867296576484131 +977 4 model.dropout_1 0.4387200345053821 +977 4 model.dropout_2 0.18872572355775682 +977 4 optimizer.lr 0.020046076472630642 +977 4 training.batch_size 1.0 +977 4 training.label_smoothing 0.010120088874937603 +977 5 model.embedding_dim 1.0 +977 5 model.relation_dim 0.0 +977 5 model.dropout_0 0.4544501274317676 +977 5 model.dropout_1 0.2671168721764309 +977 5 model.dropout_2 0.3533736733817035 +977 5 optimizer.lr 0.006481523741014688 +977 5 training.batch_size 2.0 +977 5 training.label_smoothing 0.3187322855806704 +977 6 model.embedding_dim 0.0 +977 6 model.relation_dim 2.0 +977 6 model.dropout_0 0.20302901371703902 +977 6 model.dropout_1 0.2031547637311861 +977 6 model.dropout_2 0.37943639632574594 +977 6 optimizer.lr 0.004259873372621865 +977 6 training.batch_size 2.0 +977 6 training.label_smoothing 0.20897552288098253 +977 7 model.embedding_dim 1.0 +977 7 model.relation_dim 1.0 +977 7 model.dropout_0 0.37958493487485184 +977 7 model.dropout_1 0.16422981213700238 +977 7 model.dropout_2 0.4783122314695884 +977 7 optimizer.lr 0.0026201463206360787 +977 7 training.batch_size 0.0 +977 7 training.label_smoothing 0.0018746159681149964 +977 8 model.embedding_dim 1.0 +977 8 model.relation_dim 2.0 +977 8 model.dropout_0 0.3076084417488891 +977 8 model.dropout_1 0.2740797464412728 +977 8 model.dropout_2 0.3132906403730207 +977 8 optimizer.lr 0.009681719734357386 +977 8 training.batch_size 1.0 +977 8 training.label_smoothing 0.014225627498496074 +977 9 model.embedding_dim 0.0 +977 9 model.relation_dim 2.0 +977 9 model.dropout_0 0.2252269069027024 +977 9 model.dropout_1 0.40038895777155403 +977 9 model.dropout_2 0.17497993582458016 +977 9 optimizer.lr 0.004015567202219074 +977 9 training.batch_size 2.0 +977 9 training.label_smoothing 0.7948091880183824 +977 10 model.embedding_dim 0.0 +977 10 model.relation_dim 0.0 +977 10 model.dropout_0 0.29313346811338253 +977 10 model.dropout_1 0.4777808428383451 +977 10 model.dropout_2 0.22668506658458756 +977 10 optimizer.lr 0.004163052192641337 +977 10 training.batch_size 1.0 +977 10 training.label_smoothing 0.0015628934640011013 +977 11 model.embedding_dim 0.0 +977 11 model.relation_dim 0.0 +977 11 model.dropout_0 0.2503620276474395 +977 11 model.dropout_1 0.4446709268457567 +977 11 model.dropout_2 0.19062003073303224 +977 11 optimizer.lr 0.00386452084010186 +977 11 training.batch_size 0.0 +977 11 training.label_smoothing 0.02466212274746804 +977 12 model.embedding_dim 0.0 +977 12 model.relation_dim 1.0 +977 12 model.dropout_0 0.2646220439348172 +977 12 model.dropout_1 0.3350647317118547 +977 12 model.dropout_2 0.39155872763040833 +977 12 optimizer.lr 0.0017281361679948543 +977 12 training.batch_size 2.0 +977 12 training.label_smoothing 0.003329000861562389 +977 13 model.embedding_dim 2.0 +977 13 model.relation_dim 0.0 +977 13 model.dropout_0 0.23732124562160425 +977 13 model.dropout_1 0.16009086671696623 +977 13 model.dropout_2 0.3715579475802619 +977 13 optimizer.lr 0.07592560477160248 +977 13 training.batch_size 2.0 +977 13 training.label_smoothing 0.005045809127320789 +977 14 model.embedding_dim 0.0 +977 14 model.relation_dim 2.0 +977 14 model.dropout_0 0.11342472351342536 +977 14 model.dropout_1 0.3365749850961038 +977 14 model.dropout_2 0.35076819574274315 +977 14 optimizer.lr 0.0013029066275543684 +977 14 training.batch_size 2.0 +977 14 training.label_smoothing 0.0022724539618475276 +977 15 model.embedding_dim 0.0 +977 15 model.relation_dim 0.0 +977 15 model.dropout_0 0.10949782842113978 +977 15 model.dropout_1 0.35254213711886817 +977 15 model.dropout_2 0.25143527119724013 +977 15 optimizer.lr 0.06811458153796437 +977 15 training.batch_size 2.0 +977 15 training.label_smoothing 0.014627269529474393 +977 16 model.embedding_dim 2.0 +977 16 model.relation_dim 1.0 +977 16 model.dropout_0 0.1948789390367033 +977 16 model.dropout_1 0.3408355020721012 +977 16 model.dropout_2 0.2883941192359895 +977 16 optimizer.lr 0.014578443130011904 +977 16 training.batch_size 2.0 +977 16 training.label_smoothing 0.280935621176136 +977 17 model.embedding_dim 1.0 +977 17 model.relation_dim 0.0 +977 17 model.dropout_0 0.30843112543814044 +977 17 model.dropout_1 0.3026908367165683 +977 17 model.dropout_2 0.1774785598657844 +977 17 optimizer.lr 0.001095302549452073 +977 17 training.batch_size 0.0 +977 17 training.label_smoothing 0.012778620829318129 +977 18 model.embedding_dim 2.0 +977 18 model.relation_dim 2.0 +977 18 model.dropout_0 0.149396576039178 +977 18 model.dropout_1 0.39446272494794377 +977 18 model.dropout_2 0.2850346615606177 +977 18 optimizer.lr 0.008518709753359088 +977 18 training.batch_size 0.0 +977 18 training.label_smoothing 0.01522647646725592 +977 19 model.embedding_dim 0.0 +977 19 model.relation_dim 2.0 +977 19 model.dropout_0 0.2708544894527201 +977 19 model.dropout_1 0.45599003628024737 +977 19 model.dropout_2 0.3773876779243888 +977 19 optimizer.lr 0.009232221969477305 +977 19 training.batch_size 1.0 +977 19 training.label_smoothing 0.006505516409401121 +977 20 model.embedding_dim 1.0 +977 20 model.relation_dim 1.0 +977 20 model.dropout_0 0.13980160359113883 +977 20 model.dropout_1 0.21228246744582804 +977 20 model.dropout_2 0.13286627693824848 +977 20 optimizer.lr 0.015519567625702679 +977 20 training.batch_size 1.0 +977 20 training.label_smoothing 0.10068304413128419 +977 21 model.embedding_dim 2.0 +977 21 model.relation_dim 1.0 +977 21 model.dropout_0 0.469192876604605 +977 21 model.dropout_1 0.2224488577590992 +977 21 model.dropout_2 0.1555635457009038 +977 21 optimizer.lr 0.009661752713220766 +977 21 training.batch_size 2.0 +977 21 training.label_smoothing 0.10069416839363103 +977 22 model.embedding_dim 2.0 +977 22 model.relation_dim 0.0 +977 22 model.dropout_0 0.47642662564617727 +977 22 model.dropout_1 0.4837798846388692 +977 22 model.dropout_2 0.18611326612565457 +977 22 optimizer.lr 0.09574483872742738 +977 22 training.batch_size 0.0 +977 22 training.label_smoothing 0.0025477676958583248 +977 23 model.embedding_dim 2.0 +977 23 model.relation_dim 0.0 +977 23 model.dropout_0 0.10033377254086982 +977 23 model.dropout_1 0.26537833496818447 +977 23 model.dropout_2 0.4404715707405833 +977 23 optimizer.lr 0.01841291385847173 +977 23 training.batch_size 2.0 +977 23 training.label_smoothing 0.006601502530760716 +977 24 model.embedding_dim 2.0 +977 24 model.relation_dim 1.0 +977 24 model.dropout_0 0.474132788290144 +977 24 model.dropout_1 0.33856325602060855 +977 24 model.dropout_2 0.13767175188309996 +977 24 optimizer.lr 0.0011542649894990913 +977 24 training.batch_size 2.0 +977 24 training.label_smoothing 0.5303020468491432 +977 25 model.embedding_dim 0.0 +977 25 model.relation_dim 2.0 +977 25 model.dropout_0 0.2534948349628877 +977 25 model.dropout_1 0.3953874524552911 +977 25 model.dropout_2 0.3953112901471414 +977 25 optimizer.lr 0.0018599009063315643 +977 25 training.batch_size 2.0 +977 25 training.label_smoothing 0.29690616334515063 +977 26 model.embedding_dim 0.0 +977 26 model.relation_dim 0.0 +977 26 model.dropout_0 0.16641617743114898 +977 26 model.dropout_1 0.30179717283503377 +977 26 model.dropout_2 0.36682572067316804 +977 26 optimizer.lr 0.04656473627090099 +977 26 training.batch_size 0.0 +977 26 training.label_smoothing 0.002228487224756859 +977 27 model.embedding_dim 1.0 +977 27 model.relation_dim 2.0 +977 27 model.dropout_0 0.12040924734368308 +977 27 model.dropout_1 0.3331104812072294 +977 27 model.dropout_2 0.19645862349854665 +977 27 optimizer.lr 0.021845182534560036 +977 27 training.batch_size 1.0 +977 27 training.label_smoothing 0.04080347666380881 +977 28 model.embedding_dim 2.0 +977 28 model.relation_dim 0.0 +977 28 model.dropout_0 0.12831486846565854 +977 28 model.dropout_1 0.39790611628425143 +977 28 model.dropout_2 0.4181329055147014 +977 28 optimizer.lr 0.020126180181183215 +977 28 training.batch_size 2.0 +977 28 training.label_smoothing 0.005079458556776411 +977 29 model.embedding_dim 2.0 +977 29 model.relation_dim 1.0 +977 29 model.dropout_0 0.14513449852079813 +977 29 model.dropout_1 0.478668532465504 +977 29 model.dropout_2 0.43742230083777883 +977 29 optimizer.lr 0.05396143735711297 +977 29 training.batch_size 0.0 +977 29 training.label_smoothing 0.057475551010105194 +977 30 model.embedding_dim 0.0 +977 30 model.relation_dim 2.0 +977 30 model.dropout_0 0.17596155075469883 +977 30 model.dropout_1 0.3501289681392932 +977 30 model.dropout_2 0.17189768161202018 +977 30 optimizer.lr 0.015315438848062449 +977 30 training.batch_size 2.0 +977 30 training.label_smoothing 0.19689480317111105 +977 31 model.embedding_dim 0.0 +977 31 model.relation_dim 2.0 +977 31 model.dropout_0 0.46688930956215247 +977 31 model.dropout_1 0.17423779169569326 +977 31 model.dropout_2 0.3344258403726432 +977 31 optimizer.lr 0.004089514879489028 +977 31 training.batch_size 1.0 +977 31 training.label_smoothing 0.26221751771701696 +977 32 model.embedding_dim 2.0 +977 32 model.relation_dim 2.0 +977 32 model.dropout_0 0.14063179323477587 +977 32 model.dropout_1 0.462031373469486 +977 32 model.dropout_2 0.16617907257911246 +977 32 optimizer.lr 0.025645263934327464 +977 32 training.batch_size 1.0 +977 32 training.label_smoothing 0.0014920148678846488 +977 33 model.embedding_dim 2.0 +977 33 model.relation_dim 2.0 +977 33 model.dropout_0 0.3032191976670608 +977 33 model.dropout_1 0.420618346706199 +977 33 model.dropout_2 0.21160553404861993 +977 33 optimizer.lr 0.0014970286178330947 +977 33 training.batch_size 0.0 +977 33 training.label_smoothing 0.07300921058548157 +977 34 model.embedding_dim 0.0 +977 34 model.relation_dim 2.0 +977 34 model.dropout_0 0.4259086336823905 +977 34 model.dropout_1 0.10354506039873597 +977 34 model.dropout_2 0.4300466062557199 +977 34 optimizer.lr 0.003315062708971845 +977 34 training.batch_size 1.0 +977 34 training.label_smoothing 0.12515402274312998 +977 35 model.embedding_dim 2.0 +977 35 model.relation_dim 2.0 +977 35 model.dropout_0 0.23046632754200613 +977 35 model.dropout_1 0.3931183812017152 +977 35 model.dropout_2 0.3811052318223413 +977 35 optimizer.lr 0.0015155278314041792 +977 35 training.batch_size 1.0 +977 35 training.label_smoothing 0.5490637597440831 +977 36 model.embedding_dim 2.0 +977 36 model.relation_dim 2.0 +977 36 model.dropout_0 0.3047931258705764 +977 36 model.dropout_1 0.4713584483500969 +977 36 model.dropout_2 0.3769933231330562 +977 36 optimizer.lr 0.015318058771236993 +977 36 training.batch_size 1.0 +977 36 training.label_smoothing 0.0030456282360905675 +977 37 model.embedding_dim 0.0 +977 37 model.relation_dim 2.0 +977 37 model.dropout_0 0.2379182251414676 +977 37 model.dropout_1 0.26834665890775034 +977 37 model.dropout_2 0.3958019202206612 +977 37 optimizer.lr 0.0038381842359358044 +977 37 training.batch_size 1.0 +977 37 training.label_smoothing 0.009327151414042298 +977 38 model.embedding_dim 0.0 +977 38 model.relation_dim 1.0 +977 38 model.dropout_0 0.23631336420161642 +977 38 model.dropout_1 0.1701183046495253 +977 38 model.dropout_2 0.27903126625891195 +977 38 optimizer.lr 0.043481659940952995 +977 38 training.batch_size 2.0 +977 38 training.label_smoothing 0.0012875536717963866 +977 39 model.embedding_dim 1.0 +977 39 model.relation_dim 2.0 +977 39 model.dropout_0 0.2704671978136391 +977 39 model.dropout_1 0.24012162636271117 +977 39 model.dropout_2 0.1812019783614956 +977 39 optimizer.lr 0.039942079634121413 +977 39 training.batch_size 1.0 +977 39 training.label_smoothing 0.01767003084854667 +977 40 model.embedding_dim 1.0 +977 40 model.relation_dim 2.0 +977 40 model.dropout_0 0.4518998079325258 +977 40 model.dropout_1 0.117198254341197 +977 40 model.dropout_2 0.4290377650610016 +977 40 optimizer.lr 0.0024009406072158413 +977 40 training.batch_size 2.0 +977 40 training.label_smoothing 0.0040278293760662295 +977 41 model.embedding_dim 2.0 +977 41 model.relation_dim 0.0 +977 41 model.dropout_0 0.24545948395140274 +977 41 model.dropout_1 0.11040805753891791 +977 41 model.dropout_2 0.2819966388721911 +977 41 optimizer.lr 0.003358551764530529 +977 41 training.batch_size 2.0 +977 41 training.label_smoothing 0.0012423826111996683 +977 42 model.embedding_dim 2.0 +977 42 model.relation_dim 0.0 +977 42 model.dropout_0 0.4811550948616019 +977 42 model.dropout_1 0.16676212966903156 +977 42 model.dropout_2 0.23367362354501378 +977 42 optimizer.lr 0.0010784073226626776 +977 42 training.batch_size 1.0 +977 42 training.label_smoothing 0.07419212741107709 +977 43 model.embedding_dim 2.0 +977 43 model.relation_dim 0.0 +977 43 model.dropout_0 0.30160226408311225 +977 43 model.dropout_1 0.15077888000005712 +977 43 model.dropout_2 0.21238609904297046 +977 43 optimizer.lr 0.003332402684456313 +977 43 training.batch_size 1.0 +977 43 training.label_smoothing 0.0011985285202505043 +977 44 model.embedding_dim 1.0 +977 44 model.relation_dim 0.0 +977 44 model.dropout_0 0.10572889086003943 +977 44 model.dropout_1 0.324426886443796 +977 44 model.dropout_2 0.2414612198499655 +977 44 optimizer.lr 0.0016711194261198462 +977 44 training.batch_size 0.0 +977 44 training.label_smoothing 0.05363656780340246 +977 45 model.embedding_dim 0.0 +977 45 model.relation_dim 2.0 +977 45 model.dropout_0 0.4235645560222903 +977 45 model.dropout_1 0.23035558392199929 +977 45 model.dropout_2 0.23381200059766122 +977 45 optimizer.lr 0.012779535842552708 +977 45 training.batch_size 2.0 +977 45 training.label_smoothing 0.815022161598655 +977 46 model.embedding_dim 2.0 +977 46 model.relation_dim 0.0 +977 46 model.dropout_0 0.4964351715378626 +977 46 model.dropout_1 0.1751589079765349 +977 46 model.dropout_2 0.3350197870190861 +977 46 optimizer.lr 0.006306728287535694 +977 46 training.batch_size 1.0 +977 46 training.label_smoothing 0.48986921732760913 +977 47 model.embedding_dim 2.0 +977 47 model.relation_dim 1.0 +977 47 model.dropout_0 0.11206254558108611 +977 47 model.dropout_1 0.24665833884203214 +977 47 model.dropout_2 0.34838509743898866 +977 47 optimizer.lr 0.05543542662506647 +977 47 training.batch_size 2.0 +977 47 training.label_smoothing 0.0011939126215352372 +977 48 model.embedding_dim 1.0 +977 48 model.relation_dim 1.0 +977 48 model.dropout_0 0.3653813949579857 +977 48 model.dropout_1 0.44763479456005406 +977 48 model.dropout_2 0.1641319639138033 +977 48 optimizer.lr 0.01800616383915689 +977 48 training.batch_size 1.0 +977 48 training.label_smoothing 0.0012175677868941715 +977 49 model.embedding_dim 0.0 +977 49 model.relation_dim 1.0 +977 49 model.dropout_0 0.16440258859685591 +977 49 model.dropout_1 0.14851457544542687 +977 49 model.dropout_2 0.31653779806216964 +977 49 optimizer.lr 0.07018819506917445 +977 49 training.batch_size 1.0 +977 49 training.label_smoothing 0.0035774988838422456 +977 50 model.embedding_dim 2.0 +977 50 model.relation_dim 0.0 +977 50 model.dropout_0 0.21696132910797 +977 50 model.dropout_1 0.3255727356117398 +977 50 model.dropout_2 0.28388474838846955 +977 50 optimizer.lr 0.014082776807467938 +977 50 training.batch_size 1.0 +977 50 training.label_smoothing 0.0010722839777232685 +977 51 model.embedding_dim 1.0 +977 51 model.relation_dim 1.0 +977 51 model.dropout_0 0.344662353478861 +977 51 model.dropout_1 0.3036866024899338 +977 51 model.dropout_2 0.23578685446130987 +977 51 optimizer.lr 0.07969857732829556 +977 51 training.batch_size 2.0 +977 51 training.label_smoothing 0.013504156351393957 +977 52 model.embedding_dim 2.0 +977 52 model.relation_dim 2.0 +977 52 model.dropout_0 0.1902912408254286 +977 52 model.dropout_1 0.15178220007174317 +977 52 model.dropout_2 0.12788866293455184 +977 52 optimizer.lr 0.0066911147430480395 +977 52 training.batch_size 1.0 +977 52 training.label_smoothing 0.003759462685119528 +977 53 model.embedding_dim 0.0 +977 53 model.relation_dim 0.0 +977 53 model.dropout_0 0.4347979654933812 +977 53 model.dropout_1 0.15904341565105695 +977 53 model.dropout_2 0.12316183826910665 +977 53 optimizer.lr 0.002749177009303035 +977 53 training.batch_size 1.0 +977 53 training.label_smoothing 0.28424738526245097 +977 54 model.embedding_dim 0.0 +977 54 model.relation_dim 2.0 +977 54 model.dropout_0 0.4909927654765564 +977 54 model.dropout_1 0.3633271890915672 +977 54 model.dropout_2 0.10893551069389931 +977 54 optimizer.lr 0.07306918510422143 +977 54 training.batch_size 2.0 +977 54 training.label_smoothing 0.011684245534978781 +977 55 model.embedding_dim 2.0 +977 55 model.relation_dim 1.0 +977 55 model.dropout_0 0.24191566074740814 +977 55 model.dropout_1 0.35249127977610933 +977 55 model.dropout_2 0.4612181403807044 +977 55 optimizer.lr 0.019851313007508982 +977 55 training.batch_size 0.0 +977 55 training.label_smoothing 0.007042021512069821 +977 56 model.embedding_dim 2.0 +977 56 model.relation_dim 2.0 +977 56 model.dropout_0 0.3194840010249207 +977 56 model.dropout_1 0.331539209162454 +977 56 model.dropout_2 0.4246056284691073 +977 56 optimizer.lr 0.0017287453653369734 +977 56 training.batch_size 2.0 +977 56 training.label_smoothing 0.004368234653349961 +977 57 model.embedding_dim 0.0 +977 57 model.relation_dim 2.0 +977 57 model.dropout_0 0.4854033659874403 +977 57 model.dropout_1 0.42582657467678664 +977 57 model.dropout_2 0.31800846059658916 +977 57 optimizer.lr 0.04222177299207591 +977 57 training.batch_size 0.0 +977 57 training.label_smoothing 0.019574719145020333 +977 58 model.embedding_dim 2.0 +977 58 model.relation_dim 1.0 +977 58 model.dropout_0 0.11760597683831052 +977 58 model.dropout_1 0.296386086777486 +977 58 model.dropout_2 0.2855025516528861 +977 58 optimizer.lr 0.005927456855631911 +977 58 training.batch_size 1.0 +977 58 training.label_smoothing 0.022612199250028688 +977 59 model.embedding_dim 0.0 +977 59 model.relation_dim 1.0 +977 59 model.dropout_0 0.4572316011473976 +977 59 model.dropout_1 0.2882972608058171 +977 59 model.dropout_2 0.17115680639447511 +977 59 optimizer.lr 0.05591315020129403 +977 59 training.batch_size 2.0 +977 59 training.label_smoothing 0.003135651822231711 +977 60 model.embedding_dim 2.0 +977 60 model.relation_dim 0.0 +977 60 model.dropout_0 0.3966215772288294 +977 60 model.dropout_1 0.33464545547393754 +977 60 model.dropout_2 0.13616245738387733 +977 60 optimizer.lr 0.006812345491000562 +977 60 training.batch_size 0.0 +977 60 training.label_smoothing 0.08872167671806498 +977 61 model.embedding_dim 1.0 +977 61 model.relation_dim 0.0 +977 61 model.dropout_0 0.1187968977451396 +977 61 model.dropout_1 0.42466765122031697 +977 61 model.dropout_2 0.4196154183860554 +977 61 optimizer.lr 0.07936720599995063 +977 61 training.batch_size 1.0 +977 61 training.label_smoothing 0.06523616732643132 +977 62 model.embedding_dim 2.0 +977 62 model.relation_dim 0.0 +977 62 model.dropout_0 0.1139962091087094 +977 62 model.dropout_1 0.10965614019180664 +977 62 model.dropout_2 0.24179000978309695 +977 62 optimizer.lr 0.012438073060401041 +977 62 training.batch_size 2.0 +977 62 training.label_smoothing 0.004060421132568293 +977 63 model.embedding_dim 2.0 +977 63 model.relation_dim 1.0 +977 63 model.dropout_0 0.3241620641816304 +977 63 model.dropout_1 0.12445605127569165 +977 63 model.dropout_2 0.49352404743849715 +977 63 optimizer.lr 0.015904385319227107 +977 63 training.batch_size 1.0 +977 63 training.label_smoothing 0.0902297960067083 +977 64 model.embedding_dim 1.0 +977 64 model.relation_dim 0.0 +977 64 model.dropout_0 0.36728947731840306 +977 64 model.dropout_1 0.18529337410294022 +977 64 model.dropout_2 0.42379543555027177 +977 64 optimizer.lr 0.0755502655036738 +977 64 training.batch_size 1.0 +977 64 training.label_smoothing 0.17443734383990034 +977 65 model.embedding_dim 1.0 +977 65 model.relation_dim 1.0 +977 65 model.dropout_0 0.31520850643415743 +977 65 model.dropout_1 0.4104859658537482 +977 65 model.dropout_2 0.39217858275859574 +977 65 optimizer.lr 0.05075128316566703 +977 65 training.batch_size 0.0 +977 65 training.label_smoothing 0.1250150790733126 +977 66 model.embedding_dim 0.0 +977 66 model.relation_dim 1.0 +977 66 model.dropout_0 0.4665044017535923 +977 66 model.dropout_1 0.1886918655608895 +977 66 model.dropout_2 0.17149209320778125 +977 66 optimizer.lr 0.004568370606048093 +977 66 training.batch_size 1.0 +977 66 training.label_smoothing 0.12453145199962246 +977 67 model.embedding_dim 1.0 +977 67 model.relation_dim 1.0 +977 67 model.dropout_0 0.31122060581234534 +977 67 model.dropout_1 0.42129602357868917 +977 67 model.dropout_2 0.4805602105408339 +977 67 optimizer.lr 0.001628708630540282 +977 67 training.batch_size 0.0 +977 67 training.label_smoothing 0.19462627001821947 +977 68 model.embedding_dim 1.0 +977 68 model.relation_dim 1.0 +977 68 model.dropout_0 0.25144302621894504 +977 68 model.dropout_1 0.13082855616800212 +977 68 model.dropout_2 0.4865668152350946 +977 68 optimizer.lr 0.003713894463076704 +977 68 training.batch_size 2.0 +977 68 training.label_smoothing 0.006250835694532508 +977 69 model.embedding_dim 2.0 +977 69 model.relation_dim 0.0 +977 69 model.dropout_0 0.1646463378338257 +977 69 model.dropout_1 0.11637810407776593 +977 69 model.dropout_2 0.3866802325529386 +977 69 optimizer.lr 0.0013701086476955804 +977 69 training.batch_size 2.0 +977 69 training.label_smoothing 0.7535167185306942 +977 70 model.embedding_dim 1.0 +977 70 model.relation_dim 1.0 +977 70 model.dropout_0 0.23078721342808792 +977 70 model.dropout_1 0.48110023001156355 +977 70 model.dropout_2 0.18053575862975735 +977 70 optimizer.lr 0.0013939122809955174 +977 70 training.batch_size 1.0 +977 70 training.label_smoothing 0.25621076103930424 +977 71 model.embedding_dim 2.0 +977 71 model.relation_dim 1.0 +977 71 model.dropout_0 0.36730131891622597 +977 71 model.dropout_1 0.27219430426800967 +977 71 model.dropout_2 0.17228929271597573 +977 71 optimizer.lr 0.0034165134992678154 +977 71 training.batch_size 0.0 +977 71 training.label_smoothing 0.05703195600252963 +977 72 model.embedding_dim 2.0 +977 72 model.relation_dim 2.0 +977 72 model.dropout_0 0.2061408324833856 +977 72 model.dropout_1 0.32549977319951484 +977 72 model.dropout_2 0.4739061360602405 +977 72 optimizer.lr 0.012655345566111455 +977 72 training.batch_size 1.0 +977 72 training.label_smoothing 0.4126571310411031 +977 73 model.embedding_dim 2.0 +977 73 model.relation_dim 0.0 +977 73 model.dropout_0 0.3917256822281891 +977 73 model.dropout_1 0.10792375263750911 +977 73 model.dropout_2 0.28236471752113745 +977 73 optimizer.lr 0.013992435139896347 +977 73 training.batch_size 2.0 +977 73 training.label_smoothing 0.0011108793102058976 +977 74 model.embedding_dim 0.0 +977 74 model.relation_dim 1.0 +977 74 model.dropout_0 0.18681084697853023 +977 74 model.dropout_1 0.31079182682977785 +977 74 model.dropout_2 0.1912669063586927 +977 74 optimizer.lr 0.010908719809000404 +977 74 training.batch_size 0.0 +977 74 training.label_smoothing 0.0017196352421065062 +977 75 model.embedding_dim 0.0 +977 75 model.relation_dim 0.0 +977 75 model.dropout_0 0.3802622537439172 +977 75 model.dropout_1 0.3022166227360161 +977 75 model.dropout_2 0.20107452872841375 +977 75 optimizer.lr 0.0012586378383409676 +977 75 training.batch_size 0.0 +977 75 training.label_smoothing 0.0012121449141567111 +977 76 model.embedding_dim 2.0 +977 76 model.relation_dim 0.0 +977 76 model.dropout_0 0.11821099876931043 +977 76 model.dropout_1 0.4674051560054001 +977 76 model.dropout_2 0.13645368843046857 +977 76 optimizer.lr 0.011983573565340045 +977 76 training.batch_size 1.0 +977 76 training.label_smoothing 0.003364529397078371 +977 77 model.embedding_dim 1.0 +977 77 model.relation_dim 0.0 +977 77 model.dropout_0 0.19723071288844093 +977 77 model.dropout_1 0.23974596210344712 +977 77 model.dropout_2 0.45854697312092474 +977 77 optimizer.lr 0.032800093505219556 +977 77 training.batch_size 1.0 +977 77 training.label_smoothing 0.06850147611449992 +977 78 model.embedding_dim 0.0 +977 78 model.relation_dim 2.0 +977 78 model.dropout_0 0.13251371112458293 +977 78 model.dropout_1 0.16483116164003753 +977 78 model.dropout_2 0.38591049660507953 +977 78 optimizer.lr 0.02486284871439085 +977 78 training.batch_size 0.0 +977 78 training.label_smoothing 0.19662394563887808 +977 79 model.embedding_dim 0.0 +977 79 model.relation_dim 0.0 +977 79 model.dropout_0 0.4215066694801218 +977 79 model.dropout_1 0.11347705851430626 +977 79 model.dropout_2 0.33667086248729083 +977 79 optimizer.lr 0.038970442824686445 +977 79 training.batch_size 0.0 +977 79 training.label_smoothing 0.0024699019364279673 +977 80 model.embedding_dim 0.0 +977 80 model.relation_dim 0.0 +977 80 model.dropout_0 0.10057606098864294 +977 80 model.dropout_1 0.13454518244635466 +977 80 model.dropout_2 0.338949345807898 +977 80 optimizer.lr 0.001406901987974952 +977 80 training.batch_size 1.0 +977 80 training.label_smoothing 0.392166689552288 +977 81 model.embedding_dim 1.0 +977 81 model.relation_dim 1.0 +977 81 model.dropout_0 0.2593488937577311 +977 81 model.dropout_1 0.377425214932356 +977 81 model.dropout_2 0.3663759391983816 +977 81 optimizer.lr 0.002832969718005276 +977 81 training.batch_size 2.0 +977 81 training.label_smoothing 0.27249857378549897 +977 82 model.embedding_dim 0.0 +977 82 model.relation_dim 0.0 +977 82 model.dropout_0 0.37802309320911337 +977 82 model.dropout_1 0.3635757598534984 +977 82 model.dropout_2 0.47284819113536036 +977 82 optimizer.lr 0.019901713945983283 +977 82 training.batch_size 1.0 +977 82 training.label_smoothing 0.0025378133463939117 +977 83 model.embedding_dim 2.0 +977 83 model.relation_dim 2.0 +977 83 model.dropout_0 0.3049616507513497 +977 83 model.dropout_1 0.4919501719080769 +977 83 model.dropout_2 0.14784463482625185 +977 83 optimizer.lr 0.05075150779180565 +977 83 training.batch_size 2.0 +977 83 training.label_smoothing 0.003978659490810372 +977 84 model.embedding_dim 1.0 +977 84 model.relation_dim 2.0 +977 84 model.dropout_0 0.27737416382415897 +977 84 model.dropout_1 0.2058580570448379 +977 84 model.dropout_2 0.23274904255818907 +977 84 optimizer.lr 0.0014950295620802746 +977 84 training.batch_size 1.0 +977 84 training.label_smoothing 0.0018276405257508942 +977 85 model.embedding_dim 2.0 +977 85 model.relation_dim 0.0 +977 85 model.dropout_0 0.4211836117969264 +977 85 model.dropout_1 0.4412786820545458 +977 85 model.dropout_2 0.35190330569514655 +977 85 optimizer.lr 0.002048765237933961 +977 85 training.batch_size 0.0 +977 85 training.label_smoothing 0.00702738119954855 +977 86 model.embedding_dim 2.0 +977 86 model.relation_dim 2.0 +977 86 model.dropout_0 0.47079677050843166 +977 86 model.dropout_1 0.13951975854714993 +977 86 model.dropout_2 0.12226078952745274 +977 86 optimizer.lr 0.020340626780354486 +977 86 training.batch_size 2.0 +977 86 training.label_smoothing 0.023367226677450895 +977 87 model.embedding_dim 2.0 +977 87 model.relation_dim 0.0 +977 87 model.dropout_0 0.23531209826190833 +977 87 model.dropout_1 0.3370468994221112 +977 87 model.dropout_2 0.43183145749569835 +977 87 optimizer.lr 0.002385922111936451 +977 87 training.batch_size 0.0 +977 87 training.label_smoothing 0.1237357164502104 +977 88 model.embedding_dim 1.0 +977 88 model.relation_dim 0.0 +977 88 model.dropout_0 0.3832459668654475 +977 88 model.dropout_1 0.442851196325489 +977 88 model.dropout_2 0.42563385555161193 +977 88 optimizer.lr 0.019494048145623923 +977 88 training.batch_size 1.0 +977 88 training.label_smoothing 0.006275017877234707 +977 89 model.embedding_dim 2.0 +977 89 model.relation_dim 1.0 +977 89 model.dropout_0 0.2485198669214984 +977 89 model.dropout_1 0.13332812957312198 +977 89 model.dropout_2 0.13837743192737914 +977 89 optimizer.lr 0.013867393508019325 +977 89 training.batch_size 0.0 +977 89 training.label_smoothing 0.0025858614668496085 +977 90 model.embedding_dim 1.0 +977 90 model.relation_dim 2.0 +977 90 model.dropout_0 0.1712627273352757 +977 90 model.dropout_1 0.3004388127976083 +977 90 model.dropout_2 0.11304444489198812 +977 90 optimizer.lr 0.008125110399195492 +977 90 training.batch_size 1.0 +977 90 training.label_smoothing 0.0052978764289013875 +977 91 model.embedding_dim 2.0 +977 91 model.relation_dim 2.0 +977 91 model.dropout_0 0.38910542730863 +977 91 model.dropout_1 0.10653704759655419 +977 91 model.dropout_2 0.40981077526008153 +977 91 optimizer.lr 0.016503080605003967 +977 91 training.batch_size 2.0 +977 91 training.label_smoothing 0.15699417438032837 +977 92 model.embedding_dim 0.0 +977 92 model.relation_dim 2.0 +977 92 model.dropout_0 0.17529314273909802 +977 92 model.dropout_1 0.1264772202586177 +977 92 model.dropout_2 0.2148861633765806 +977 92 optimizer.lr 0.0011446496015201752 +977 92 training.batch_size 2.0 +977 92 training.label_smoothing 0.6496477719020385 +977 93 model.embedding_dim 2.0 +977 93 model.relation_dim 2.0 +977 93 model.dropout_0 0.21520941397524004 +977 93 model.dropout_1 0.2814250753937575 +977 93 model.dropout_2 0.3293409696368534 +977 93 optimizer.lr 0.018986381861613756 +977 93 training.batch_size 1.0 +977 93 training.label_smoothing 0.00977243071003394 +977 94 model.embedding_dim 0.0 +977 94 model.relation_dim 0.0 +977 94 model.dropout_0 0.341700871515356 +977 94 model.dropout_1 0.3064716620001562 +977 94 model.dropout_2 0.3496133154847951 +977 94 optimizer.lr 0.0031663257606784974 +977 94 training.batch_size 1.0 +977 94 training.label_smoothing 0.03915758216126213 +977 95 model.embedding_dim 2.0 +977 95 model.relation_dim 2.0 +977 95 model.dropout_0 0.2917820816790249 +977 95 model.dropout_1 0.18889012560681884 +977 95 model.dropout_2 0.17954889212171304 +977 95 optimizer.lr 0.017666681806744324 +977 95 training.batch_size 0.0 +977 95 training.label_smoothing 0.0026348664119435414 +977 96 model.embedding_dim 0.0 +977 96 model.relation_dim 1.0 +977 96 model.dropout_0 0.1406803959289331 +977 96 model.dropout_1 0.4830302311032853 +977 96 model.dropout_2 0.43181849184091486 +977 96 optimizer.lr 0.018773366407009596 +977 96 training.batch_size 2.0 +977 96 training.label_smoothing 0.009830515834023133 +977 97 model.embedding_dim 2.0 +977 97 model.relation_dim 1.0 +977 97 model.dropout_0 0.32042036953801883 +977 97 model.dropout_1 0.3153510404556654 +977 97 model.dropout_2 0.22536713092636096 +977 97 optimizer.lr 0.0019031890908793397 +977 97 training.batch_size 0.0 +977 97 training.label_smoothing 0.6669059639000596 +977 98 model.embedding_dim 2.0 +977 98 model.relation_dim 0.0 +977 98 model.dropout_0 0.3203865340922272 +977 98 model.dropout_1 0.27465219505723937 +977 98 model.dropout_2 0.4427925013300644 +977 98 optimizer.lr 0.05402352674651441 +977 98 training.batch_size 1.0 +977 98 training.label_smoothing 0.3531395317873203 +977 99 model.embedding_dim 1.0 +977 99 model.relation_dim 1.0 +977 99 model.dropout_0 0.25289131876531584 +977 99 model.dropout_1 0.15107624755524768 +977 99 model.dropout_2 0.2189620194860616 +977 99 optimizer.lr 0.0060118393785608985 +977 99 training.batch_size 2.0 +977 99 training.label_smoothing 0.0012767366862744634 +977 100 model.embedding_dim 1.0 +977 100 model.relation_dim 1.0 +977 100 model.dropout_0 0.232929015112848 +977 100 model.dropout_1 0.14373289076860002 +977 100 model.dropout_2 0.26885049077612727 +977 100 optimizer.lr 0.0019143883176407737 +977 100 training.batch_size 0.0 +977 100 training.label_smoothing 0.0010267617657293195 +977 1 dataset """kinships""" +977 1 model """tucker""" +977 1 loss """bceaftersigmoid""" +977 1 regularizer """no""" +977 1 optimizer """adam""" +977 1 training_loop """lcwa""" +977 1 evaluator """rankbased""" +977 2 dataset """kinships""" +977 2 model """tucker""" +977 2 loss """bceaftersigmoid""" +977 2 regularizer """no""" +977 2 optimizer """adam""" +977 2 training_loop """lcwa""" +977 2 evaluator """rankbased""" +977 3 dataset """kinships""" +977 3 model """tucker""" +977 3 loss """bceaftersigmoid""" +977 3 regularizer """no""" +977 3 optimizer """adam""" +977 3 training_loop """lcwa""" +977 3 evaluator """rankbased""" +977 4 dataset """kinships""" +977 4 model """tucker""" +977 4 loss """bceaftersigmoid""" +977 4 regularizer """no""" +977 4 optimizer """adam""" +977 4 training_loop """lcwa""" +977 4 evaluator """rankbased""" +977 5 dataset """kinships""" +977 5 model """tucker""" +977 5 loss """bceaftersigmoid""" +977 5 regularizer """no""" +977 5 optimizer """adam""" +977 5 training_loop """lcwa""" +977 5 evaluator """rankbased""" +977 6 dataset """kinships""" +977 6 model """tucker""" +977 6 loss """bceaftersigmoid""" +977 6 regularizer """no""" +977 6 optimizer """adam""" +977 6 training_loop """lcwa""" +977 6 evaluator """rankbased""" +977 7 dataset """kinships""" +977 7 model """tucker""" +977 7 loss """bceaftersigmoid""" +977 7 regularizer """no""" +977 7 optimizer """adam""" +977 7 training_loop """lcwa""" +977 7 evaluator """rankbased""" +977 8 dataset """kinships""" +977 8 model """tucker""" +977 8 loss """bceaftersigmoid""" +977 8 regularizer """no""" +977 8 optimizer """adam""" +977 8 training_loop """lcwa""" +977 8 evaluator """rankbased""" +977 9 dataset """kinships""" +977 9 model """tucker""" +977 9 loss """bceaftersigmoid""" +977 9 regularizer """no""" +977 9 optimizer """adam""" +977 9 training_loop """lcwa""" +977 9 evaluator """rankbased""" +977 10 dataset """kinships""" +977 10 model """tucker""" +977 10 loss """bceaftersigmoid""" +977 10 regularizer """no""" +977 10 optimizer """adam""" +977 10 training_loop """lcwa""" +977 10 evaluator """rankbased""" +977 11 dataset """kinships""" +977 11 model """tucker""" +977 11 loss """bceaftersigmoid""" +977 11 regularizer """no""" +977 11 optimizer """adam""" +977 11 training_loop """lcwa""" +977 11 evaluator """rankbased""" +977 12 dataset """kinships""" +977 12 model """tucker""" +977 12 loss """bceaftersigmoid""" +977 12 regularizer """no""" +977 12 optimizer """adam""" +977 12 training_loop """lcwa""" +977 12 evaluator """rankbased""" +977 13 dataset """kinships""" +977 13 model """tucker""" +977 13 loss """bceaftersigmoid""" +977 13 regularizer """no""" +977 13 optimizer """adam""" +977 13 training_loop """lcwa""" +977 13 evaluator """rankbased""" +977 14 dataset """kinships""" +977 14 model """tucker""" +977 14 loss """bceaftersigmoid""" +977 14 regularizer """no""" +977 14 optimizer """adam""" +977 14 training_loop """lcwa""" +977 14 evaluator """rankbased""" +977 15 dataset """kinships""" +977 15 model """tucker""" +977 15 loss """bceaftersigmoid""" +977 15 regularizer """no""" +977 15 optimizer """adam""" +977 15 training_loop """lcwa""" +977 15 evaluator """rankbased""" +977 16 dataset """kinships""" +977 16 model """tucker""" +977 16 loss """bceaftersigmoid""" +977 16 regularizer """no""" +977 16 optimizer """adam""" +977 16 training_loop """lcwa""" +977 16 evaluator """rankbased""" +977 17 dataset """kinships""" +977 17 model """tucker""" +977 17 loss """bceaftersigmoid""" +977 17 regularizer """no""" +977 17 optimizer """adam""" +977 17 training_loop """lcwa""" +977 17 evaluator """rankbased""" +977 18 dataset """kinships""" +977 18 model """tucker""" +977 18 loss """bceaftersigmoid""" +977 18 regularizer """no""" +977 18 optimizer """adam""" +977 18 training_loop """lcwa""" +977 18 evaluator """rankbased""" +977 19 dataset """kinships""" +977 19 model """tucker""" +977 19 loss """bceaftersigmoid""" +977 19 regularizer """no""" +977 19 optimizer """adam""" +977 19 training_loop """lcwa""" +977 19 evaluator """rankbased""" +977 20 dataset """kinships""" +977 20 model """tucker""" +977 20 loss """bceaftersigmoid""" +977 20 regularizer """no""" +977 20 optimizer """adam""" +977 20 training_loop """lcwa""" +977 20 evaluator """rankbased""" +977 21 dataset """kinships""" +977 21 model """tucker""" +977 21 loss """bceaftersigmoid""" +977 21 regularizer """no""" +977 21 optimizer """adam""" +977 21 training_loop """lcwa""" +977 21 evaluator """rankbased""" +977 22 dataset """kinships""" +977 22 model """tucker""" +977 22 loss """bceaftersigmoid""" +977 22 regularizer """no""" +977 22 optimizer """adam""" +977 22 training_loop """lcwa""" +977 22 evaluator """rankbased""" +977 23 dataset """kinships""" +977 23 model """tucker""" +977 23 loss """bceaftersigmoid""" +977 23 regularizer """no""" +977 23 optimizer """adam""" +977 23 training_loop """lcwa""" +977 23 evaluator """rankbased""" +977 24 dataset """kinships""" +977 24 model """tucker""" +977 24 loss """bceaftersigmoid""" +977 24 regularizer """no""" +977 24 optimizer """adam""" +977 24 training_loop """lcwa""" +977 24 evaluator """rankbased""" +977 25 dataset """kinships""" +977 25 model """tucker""" +977 25 loss """bceaftersigmoid""" +977 25 regularizer """no""" +977 25 optimizer """adam""" +977 25 training_loop """lcwa""" +977 25 evaluator """rankbased""" +977 26 dataset """kinships""" +977 26 model """tucker""" +977 26 loss """bceaftersigmoid""" +977 26 regularizer """no""" +977 26 optimizer """adam""" +977 26 training_loop """lcwa""" +977 26 evaluator """rankbased""" +977 27 dataset """kinships""" +977 27 model """tucker""" +977 27 loss """bceaftersigmoid""" +977 27 regularizer """no""" +977 27 optimizer """adam""" +977 27 training_loop """lcwa""" +977 27 evaluator """rankbased""" +977 28 dataset """kinships""" +977 28 model """tucker""" +977 28 loss """bceaftersigmoid""" +977 28 regularizer """no""" +977 28 optimizer """adam""" +977 28 training_loop """lcwa""" +977 28 evaluator """rankbased""" +977 29 dataset """kinships""" +977 29 model """tucker""" +977 29 loss """bceaftersigmoid""" +977 29 regularizer """no""" +977 29 optimizer """adam""" +977 29 training_loop """lcwa""" +977 29 evaluator """rankbased""" +977 30 dataset """kinships""" +977 30 model """tucker""" +977 30 loss """bceaftersigmoid""" +977 30 regularizer """no""" +977 30 optimizer """adam""" +977 30 training_loop """lcwa""" +977 30 evaluator """rankbased""" +977 31 dataset """kinships""" +977 31 model """tucker""" +977 31 loss """bceaftersigmoid""" +977 31 regularizer """no""" +977 31 optimizer """adam""" +977 31 training_loop """lcwa""" +977 31 evaluator """rankbased""" +977 32 dataset """kinships""" +977 32 model """tucker""" +977 32 loss """bceaftersigmoid""" +977 32 regularizer """no""" +977 32 optimizer """adam""" +977 32 training_loop """lcwa""" +977 32 evaluator """rankbased""" +977 33 dataset """kinships""" +977 33 model """tucker""" +977 33 loss """bceaftersigmoid""" +977 33 regularizer """no""" +977 33 optimizer """adam""" +977 33 training_loop """lcwa""" +977 33 evaluator """rankbased""" +977 34 dataset """kinships""" +977 34 model """tucker""" +977 34 loss """bceaftersigmoid""" +977 34 regularizer """no""" +977 34 optimizer """adam""" +977 34 training_loop """lcwa""" +977 34 evaluator """rankbased""" +977 35 dataset """kinships""" +977 35 model """tucker""" +977 35 loss """bceaftersigmoid""" +977 35 regularizer """no""" +977 35 optimizer """adam""" +977 35 training_loop """lcwa""" +977 35 evaluator """rankbased""" +977 36 dataset """kinships""" +977 36 model """tucker""" +977 36 loss """bceaftersigmoid""" +977 36 regularizer """no""" +977 36 optimizer """adam""" +977 36 training_loop """lcwa""" +977 36 evaluator """rankbased""" +977 37 dataset """kinships""" +977 37 model """tucker""" +977 37 loss """bceaftersigmoid""" +977 37 regularizer """no""" +977 37 optimizer """adam""" +977 37 training_loop """lcwa""" +977 37 evaluator """rankbased""" +977 38 dataset """kinships""" +977 38 model """tucker""" +977 38 loss """bceaftersigmoid""" +977 38 regularizer """no""" +977 38 optimizer """adam""" +977 38 training_loop """lcwa""" +977 38 evaluator """rankbased""" +977 39 dataset """kinships""" +977 39 model """tucker""" +977 39 loss """bceaftersigmoid""" +977 39 regularizer """no""" +977 39 optimizer """adam""" +977 39 training_loop """lcwa""" +977 39 evaluator """rankbased""" +977 40 dataset """kinships""" +977 40 model """tucker""" +977 40 loss """bceaftersigmoid""" +977 40 regularizer """no""" +977 40 optimizer """adam""" +977 40 training_loop """lcwa""" +977 40 evaluator """rankbased""" +977 41 dataset """kinships""" +977 41 model """tucker""" +977 41 loss """bceaftersigmoid""" +977 41 regularizer """no""" +977 41 optimizer """adam""" +977 41 training_loop """lcwa""" +977 41 evaluator """rankbased""" +977 42 dataset """kinships""" +977 42 model """tucker""" +977 42 loss """bceaftersigmoid""" +977 42 regularizer """no""" +977 42 optimizer """adam""" +977 42 training_loop """lcwa""" +977 42 evaluator """rankbased""" +977 43 dataset """kinships""" +977 43 model """tucker""" +977 43 loss """bceaftersigmoid""" +977 43 regularizer """no""" +977 43 optimizer """adam""" +977 43 training_loop """lcwa""" +977 43 evaluator """rankbased""" +977 44 dataset """kinships""" +977 44 model """tucker""" +977 44 loss """bceaftersigmoid""" +977 44 regularizer """no""" +977 44 optimizer """adam""" +977 44 training_loop """lcwa""" +977 44 evaluator """rankbased""" +977 45 dataset """kinships""" +977 45 model """tucker""" +977 45 loss """bceaftersigmoid""" +977 45 regularizer """no""" +977 45 optimizer """adam""" +977 45 training_loop """lcwa""" +977 45 evaluator """rankbased""" +977 46 dataset """kinships""" +977 46 model """tucker""" +977 46 loss """bceaftersigmoid""" +977 46 regularizer """no""" +977 46 optimizer """adam""" +977 46 training_loop """lcwa""" +977 46 evaluator """rankbased""" +977 47 dataset """kinships""" +977 47 model """tucker""" +977 47 loss """bceaftersigmoid""" +977 47 regularizer """no""" +977 47 optimizer """adam""" +977 47 training_loop """lcwa""" +977 47 evaluator """rankbased""" +977 48 dataset """kinships""" +977 48 model """tucker""" +977 48 loss """bceaftersigmoid""" +977 48 regularizer """no""" +977 48 optimizer """adam""" +977 48 training_loop """lcwa""" +977 48 evaluator """rankbased""" +977 49 dataset """kinships""" +977 49 model """tucker""" +977 49 loss """bceaftersigmoid""" +977 49 regularizer """no""" +977 49 optimizer """adam""" +977 49 training_loop """lcwa""" +977 49 evaluator """rankbased""" +977 50 dataset """kinships""" +977 50 model """tucker""" +977 50 loss """bceaftersigmoid""" +977 50 regularizer """no""" +977 50 optimizer """adam""" +977 50 training_loop """lcwa""" +977 50 evaluator """rankbased""" +977 51 dataset """kinships""" +977 51 model """tucker""" +977 51 loss """bceaftersigmoid""" +977 51 regularizer """no""" +977 51 optimizer """adam""" +977 51 training_loop """lcwa""" +977 51 evaluator """rankbased""" +977 52 dataset """kinships""" +977 52 model """tucker""" +977 52 loss """bceaftersigmoid""" +977 52 regularizer """no""" +977 52 optimizer """adam""" +977 52 training_loop """lcwa""" +977 52 evaluator """rankbased""" +977 53 dataset """kinships""" +977 53 model """tucker""" +977 53 loss """bceaftersigmoid""" +977 53 regularizer """no""" +977 53 optimizer """adam""" +977 53 training_loop """lcwa""" +977 53 evaluator """rankbased""" +977 54 dataset """kinships""" +977 54 model """tucker""" +977 54 loss """bceaftersigmoid""" +977 54 regularizer """no""" +977 54 optimizer """adam""" +977 54 training_loop """lcwa""" +977 54 evaluator """rankbased""" +977 55 dataset """kinships""" +977 55 model """tucker""" +977 55 loss """bceaftersigmoid""" +977 55 regularizer """no""" +977 55 optimizer """adam""" +977 55 training_loop """lcwa""" +977 55 evaluator """rankbased""" +977 56 dataset """kinships""" +977 56 model """tucker""" +977 56 loss """bceaftersigmoid""" +977 56 regularizer """no""" +977 56 optimizer """adam""" +977 56 training_loop """lcwa""" +977 56 evaluator """rankbased""" +977 57 dataset """kinships""" +977 57 model """tucker""" +977 57 loss """bceaftersigmoid""" +977 57 regularizer """no""" +977 57 optimizer """adam""" +977 57 training_loop """lcwa""" +977 57 evaluator """rankbased""" +977 58 dataset """kinships""" +977 58 model """tucker""" +977 58 loss """bceaftersigmoid""" +977 58 regularizer """no""" +977 58 optimizer """adam""" +977 58 training_loop """lcwa""" +977 58 evaluator """rankbased""" +977 59 dataset """kinships""" +977 59 model """tucker""" +977 59 loss """bceaftersigmoid""" +977 59 regularizer """no""" +977 59 optimizer """adam""" +977 59 training_loop """lcwa""" +977 59 evaluator """rankbased""" +977 60 dataset """kinships""" +977 60 model """tucker""" +977 60 loss """bceaftersigmoid""" +977 60 regularizer """no""" +977 60 optimizer """adam""" +977 60 training_loop """lcwa""" +977 60 evaluator """rankbased""" +977 61 dataset """kinships""" +977 61 model """tucker""" +977 61 loss """bceaftersigmoid""" +977 61 regularizer """no""" +977 61 optimizer """adam""" +977 61 training_loop """lcwa""" +977 61 evaluator """rankbased""" +977 62 dataset """kinships""" +977 62 model """tucker""" +977 62 loss """bceaftersigmoid""" +977 62 regularizer """no""" +977 62 optimizer """adam""" +977 62 training_loop """lcwa""" +977 62 evaluator """rankbased""" +977 63 dataset """kinships""" +977 63 model """tucker""" +977 63 loss """bceaftersigmoid""" +977 63 regularizer """no""" +977 63 optimizer """adam""" +977 63 training_loop """lcwa""" +977 63 evaluator """rankbased""" +977 64 dataset """kinships""" +977 64 model """tucker""" +977 64 loss """bceaftersigmoid""" +977 64 regularizer """no""" +977 64 optimizer """adam""" +977 64 training_loop """lcwa""" +977 64 evaluator """rankbased""" +977 65 dataset """kinships""" +977 65 model """tucker""" +977 65 loss """bceaftersigmoid""" +977 65 regularizer """no""" +977 65 optimizer """adam""" +977 65 training_loop """lcwa""" +977 65 evaluator """rankbased""" +977 66 dataset """kinships""" +977 66 model """tucker""" +977 66 loss """bceaftersigmoid""" +977 66 regularizer """no""" +977 66 optimizer """adam""" +977 66 training_loop """lcwa""" +977 66 evaluator """rankbased""" +977 67 dataset """kinships""" +977 67 model """tucker""" +977 67 loss """bceaftersigmoid""" +977 67 regularizer """no""" +977 67 optimizer """adam""" +977 67 training_loop """lcwa""" +977 67 evaluator """rankbased""" +977 68 dataset """kinships""" +977 68 model """tucker""" +977 68 loss """bceaftersigmoid""" +977 68 regularizer """no""" +977 68 optimizer """adam""" +977 68 training_loop """lcwa""" +977 68 evaluator """rankbased""" +977 69 dataset """kinships""" +977 69 model """tucker""" +977 69 loss """bceaftersigmoid""" +977 69 regularizer """no""" +977 69 optimizer """adam""" +977 69 training_loop """lcwa""" +977 69 evaluator """rankbased""" +977 70 dataset """kinships""" +977 70 model """tucker""" +977 70 loss """bceaftersigmoid""" +977 70 regularizer """no""" +977 70 optimizer """adam""" +977 70 training_loop """lcwa""" +977 70 evaluator """rankbased""" +977 71 dataset """kinships""" +977 71 model """tucker""" +977 71 loss """bceaftersigmoid""" +977 71 regularizer """no""" +977 71 optimizer """adam""" +977 71 training_loop """lcwa""" +977 71 evaluator """rankbased""" +977 72 dataset """kinships""" +977 72 model """tucker""" +977 72 loss """bceaftersigmoid""" +977 72 regularizer """no""" +977 72 optimizer """adam""" +977 72 training_loop """lcwa""" +977 72 evaluator """rankbased""" +977 73 dataset """kinships""" +977 73 model """tucker""" +977 73 loss """bceaftersigmoid""" +977 73 regularizer """no""" +977 73 optimizer """adam""" +977 73 training_loop """lcwa""" +977 73 evaluator """rankbased""" +977 74 dataset """kinships""" +977 74 model """tucker""" +977 74 loss """bceaftersigmoid""" +977 74 regularizer """no""" +977 74 optimizer """adam""" +977 74 training_loop """lcwa""" +977 74 evaluator """rankbased""" +977 75 dataset """kinships""" +977 75 model """tucker""" +977 75 loss """bceaftersigmoid""" +977 75 regularizer """no""" +977 75 optimizer """adam""" +977 75 training_loop """lcwa""" +977 75 evaluator """rankbased""" +977 76 dataset """kinships""" +977 76 model """tucker""" +977 76 loss """bceaftersigmoid""" +977 76 regularizer """no""" +977 76 optimizer """adam""" +977 76 training_loop """lcwa""" +977 76 evaluator """rankbased""" +977 77 dataset """kinships""" +977 77 model """tucker""" +977 77 loss """bceaftersigmoid""" +977 77 regularizer """no""" +977 77 optimizer """adam""" +977 77 training_loop """lcwa""" +977 77 evaluator """rankbased""" +977 78 dataset """kinships""" +977 78 model """tucker""" +977 78 loss """bceaftersigmoid""" +977 78 regularizer """no""" +977 78 optimizer """adam""" +977 78 training_loop """lcwa""" +977 78 evaluator """rankbased""" +977 79 dataset """kinships""" +977 79 model """tucker""" +977 79 loss """bceaftersigmoid""" +977 79 regularizer """no""" +977 79 optimizer """adam""" +977 79 training_loop """lcwa""" +977 79 evaluator """rankbased""" +977 80 dataset """kinships""" +977 80 model """tucker""" +977 80 loss """bceaftersigmoid""" +977 80 regularizer """no""" +977 80 optimizer """adam""" +977 80 training_loop """lcwa""" +977 80 evaluator """rankbased""" +977 81 dataset """kinships""" +977 81 model """tucker""" +977 81 loss """bceaftersigmoid""" +977 81 regularizer """no""" +977 81 optimizer """adam""" +977 81 training_loop """lcwa""" +977 81 evaluator """rankbased""" +977 82 dataset """kinships""" +977 82 model """tucker""" +977 82 loss """bceaftersigmoid""" +977 82 regularizer """no""" +977 82 optimizer """adam""" +977 82 training_loop """lcwa""" +977 82 evaluator """rankbased""" +977 83 dataset """kinships""" +977 83 model """tucker""" +977 83 loss """bceaftersigmoid""" +977 83 regularizer """no""" +977 83 optimizer """adam""" +977 83 training_loop """lcwa""" +977 83 evaluator """rankbased""" +977 84 dataset """kinships""" +977 84 model """tucker""" +977 84 loss """bceaftersigmoid""" +977 84 regularizer """no""" +977 84 optimizer """adam""" +977 84 training_loop """lcwa""" +977 84 evaluator """rankbased""" +977 85 dataset """kinships""" +977 85 model """tucker""" +977 85 loss """bceaftersigmoid""" +977 85 regularizer """no""" +977 85 optimizer """adam""" +977 85 training_loop """lcwa""" +977 85 evaluator """rankbased""" +977 86 dataset """kinships""" +977 86 model """tucker""" +977 86 loss """bceaftersigmoid""" +977 86 regularizer """no""" +977 86 optimizer """adam""" +977 86 training_loop """lcwa""" +977 86 evaluator """rankbased""" +977 87 dataset """kinships""" +977 87 model """tucker""" +977 87 loss """bceaftersigmoid""" +977 87 regularizer """no""" +977 87 optimizer """adam""" +977 87 training_loop """lcwa""" +977 87 evaluator """rankbased""" +977 88 dataset """kinships""" +977 88 model """tucker""" +977 88 loss """bceaftersigmoid""" +977 88 regularizer """no""" +977 88 optimizer """adam""" +977 88 training_loop """lcwa""" +977 88 evaluator """rankbased""" +977 89 dataset """kinships""" +977 89 model """tucker""" +977 89 loss """bceaftersigmoid""" +977 89 regularizer """no""" +977 89 optimizer """adam""" +977 89 training_loop """lcwa""" +977 89 evaluator """rankbased""" +977 90 dataset """kinships""" +977 90 model """tucker""" +977 90 loss """bceaftersigmoid""" +977 90 regularizer """no""" +977 90 optimizer """adam""" +977 90 training_loop """lcwa""" +977 90 evaluator """rankbased""" +977 91 dataset """kinships""" +977 91 model """tucker""" +977 91 loss """bceaftersigmoid""" +977 91 regularizer """no""" +977 91 optimizer """adam""" +977 91 training_loop """lcwa""" +977 91 evaluator """rankbased""" +977 92 dataset """kinships""" +977 92 model """tucker""" +977 92 loss """bceaftersigmoid""" +977 92 regularizer """no""" +977 92 optimizer """adam""" +977 92 training_loop """lcwa""" +977 92 evaluator """rankbased""" +977 93 dataset """kinships""" +977 93 model """tucker""" +977 93 loss """bceaftersigmoid""" +977 93 regularizer """no""" +977 93 optimizer """adam""" +977 93 training_loop """lcwa""" +977 93 evaluator """rankbased""" +977 94 dataset """kinships""" +977 94 model """tucker""" +977 94 loss """bceaftersigmoid""" +977 94 regularizer """no""" +977 94 optimizer """adam""" +977 94 training_loop """lcwa""" +977 94 evaluator """rankbased""" +977 95 dataset """kinships""" +977 95 model """tucker""" +977 95 loss """bceaftersigmoid""" +977 95 regularizer """no""" +977 95 optimizer """adam""" +977 95 training_loop """lcwa""" +977 95 evaluator """rankbased""" +977 96 dataset """kinships""" +977 96 model """tucker""" +977 96 loss """bceaftersigmoid""" +977 96 regularizer """no""" +977 96 optimizer """adam""" +977 96 training_loop """lcwa""" +977 96 evaluator """rankbased""" +977 97 dataset """kinships""" +977 97 model """tucker""" +977 97 loss """bceaftersigmoid""" +977 97 regularizer """no""" +977 97 optimizer """adam""" +977 97 training_loop """lcwa""" +977 97 evaluator """rankbased""" +977 98 dataset """kinships""" +977 98 model """tucker""" +977 98 loss """bceaftersigmoid""" +977 98 regularizer """no""" +977 98 optimizer """adam""" +977 98 training_loop """lcwa""" +977 98 evaluator """rankbased""" +977 99 dataset """kinships""" +977 99 model """tucker""" +977 99 loss """bceaftersigmoid""" +977 99 regularizer """no""" +977 99 optimizer """adam""" +977 99 training_loop """lcwa""" +977 99 evaluator """rankbased""" +977 100 dataset """kinships""" +977 100 model """tucker""" +977 100 loss """bceaftersigmoid""" +977 100 regularizer """no""" +977 100 optimizer """adam""" +977 100 training_loop """lcwa""" +977 100 evaluator """rankbased""" +978 1 model.embedding_dim 2.0 +978 1 model.relation_dim 0.0 +978 1 model.dropout_0 0.18934803482236595 +978 1 model.dropout_1 0.3653672639075396 +978 1 model.dropout_2 0.14096505565558776 +978 1 optimizer.lr 0.05639493271232813 +978 1 training.batch_size 2.0 +978 1 training.label_smoothing 0.6939291777340795 +978 2 model.embedding_dim 0.0 +978 2 model.relation_dim 1.0 +978 2 model.dropout_0 0.3197305646656988 +978 2 model.dropout_1 0.33511754733233184 +978 2 model.dropout_2 0.32270519787143814 +978 2 optimizer.lr 0.004780448401117509 +978 2 training.batch_size 1.0 +978 2 training.label_smoothing 0.05078997871041331 +978 3 model.embedding_dim 2.0 +978 3 model.relation_dim 1.0 +978 3 model.dropout_0 0.3240126552562291 +978 3 model.dropout_1 0.3410521972945217 +978 3 model.dropout_2 0.42907445959529983 +978 3 optimizer.lr 0.06452780668067494 +978 3 training.batch_size 2.0 +978 3 training.label_smoothing 0.008146653414587897 +978 4 model.embedding_dim 0.0 +978 4 model.relation_dim 2.0 +978 4 model.dropout_0 0.3421313047712233 +978 4 model.dropout_1 0.4953016359149806 +978 4 model.dropout_2 0.3927131943518883 +978 4 optimizer.lr 0.0601395833671203 +978 4 training.batch_size 0.0 +978 4 training.label_smoothing 0.42871760927983055 +978 5 model.embedding_dim 2.0 +978 5 model.relation_dim 2.0 +978 5 model.dropout_0 0.3797397510693101 +978 5 model.dropout_1 0.41560183217464375 +978 5 model.dropout_2 0.1453078840218341 +978 5 optimizer.lr 0.0023286793988223217 +978 5 training.batch_size 1.0 +978 5 training.label_smoothing 0.11641146697447344 +978 6 model.embedding_dim 2.0 +978 6 model.relation_dim 0.0 +978 6 model.dropout_0 0.21431831249264463 +978 6 model.dropout_1 0.11108469139809261 +978 6 model.dropout_2 0.10997503979053853 +978 6 optimizer.lr 0.0010967056758489446 +978 6 training.batch_size 0.0 +978 6 training.label_smoothing 0.06807168869464633 +978 7 model.embedding_dim 1.0 +978 7 model.relation_dim 0.0 +978 7 model.dropout_0 0.3938178581717052 +978 7 model.dropout_1 0.18102951767728817 +978 7 model.dropout_2 0.17054223598715704 +978 7 optimizer.lr 0.0023863483902140403 +978 7 training.batch_size 0.0 +978 7 training.label_smoothing 0.39714446734676967 +978 8 model.embedding_dim 1.0 +978 8 model.relation_dim 1.0 +978 8 model.dropout_0 0.16670480105998242 +978 8 model.dropout_1 0.2139462678116607 +978 8 model.dropout_2 0.40032665332873935 +978 8 optimizer.lr 0.019899481342530227 +978 8 training.batch_size 1.0 +978 8 training.label_smoothing 0.17570831656619398 +978 9 model.embedding_dim 2.0 +978 9 model.relation_dim 0.0 +978 9 model.dropout_0 0.16677255910488853 +978 9 model.dropout_1 0.38383687488345225 +978 9 model.dropout_2 0.27620004583250635 +978 9 optimizer.lr 0.0013686010022365953 +978 9 training.batch_size 0.0 +978 9 training.label_smoothing 0.3475327570285884 +978 10 model.embedding_dim 0.0 +978 10 model.relation_dim 2.0 +978 10 model.dropout_0 0.12921986780496267 +978 10 model.dropout_1 0.40020545529641716 +978 10 model.dropout_2 0.3218317866778188 +978 10 optimizer.lr 0.001517140861276942 +978 10 training.batch_size 0.0 +978 10 training.label_smoothing 0.04116940871771104 +978 11 model.embedding_dim 2.0 +978 11 model.relation_dim 1.0 +978 11 model.dropout_0 0.22738269489287555 +978 11 model.dropout_1 0.15018529460496022 +978 11 model.dropout_2 0.3422675706661381 +978 11 optimizer.lr 0.0021826643448036452 +978 11 training.batch_size 0.0 +978 11 training.label_smoothing 0.33755058781969854 +978 12 model.embedding_dim 2.0 +978 12 model.relation_dim 2.0 +978 12 model.dropout_0 0.21651055734633395 +978 12 model.dropout_1 0.29354563276415785 +978 12 model.dropout_2 0.3790423916950794 +978 12 optimizer.lr 0.011365466421365062 +978 12 training.batch_size 0.0 +978 12 training.label_smoothing 0.0014636120951887009 +978 13 model.embedding_dim 0.0 +978 13 model.relation_dim 0.0 +978 13 model.dropout_0 0.16428992589517444 +978 13 model.dropout_1 0.3350836295413471 +978 13 model.dropout_2 0.3040752006836226 +978 13 optimizer.lr 0.001043477401540346 +978 13 training.batch_size 1.0 +978 13 training.label_smoothing 0.4400425730809248 +978 14 model.embedding_dim 1.0 +978 14 model.relation_dim 0.0 +978 14 model.dropout_0 0.3191623412246835 +978 14 model.dropout_1 0.21256068065091496 +978 14 model.dropout_2 0.4229381620608186 +978 14 optimizer.lr 0.010590111963390322 +978 14 training.batch_size 0.0 +978 14 training.label_smoothing 0.008803265298974102 +978 15 model.embedding_dim 1.0 +978 15 model.relation_dim 2.0 +978 15 model.dropout_0 0.33871608964206856 +978 15 model.dropout_1 0.10179501845436274 +978 15 model.dropout_2 0.3644254444754835 +978 15 optimizer.lr 0.0024023651195859874 +978 15 training.batch_size 2.0 +978 15 training.label_smoothing 0.10437301085827812 +978 16 model.embedding_dim 1.0 +978 16 model.relation_dim 2.0 +978 16 model.dropout_0 0.15809777127721458 +978 16 model.dropout_1 0.44862625203579465 +978 16 model.dropout_2 0.2499634442406567 +978 16 optimizer.lr 0.0651945551717548 +978 16 training.batch_size 0.0 +978 16 training.label_smoothing 0.009384957422591543 +978 17 model.embedding_dim 0.0 +978 17 model.relation_dim 2.0 +978 17 model.dropout_0 0.41242012974661757 +978 17 model.dropout_1 0.10963568211775576 +978 17 model.dropout_2 0.345152474276887 +978 17 optimizer.lr 0.0046875836643696725 +978 17 training.batch_size 0.0 +978 17 training.label_smoothing 0.7093733460707214 +978 18 model.embedding_dim 2.0 +978 18 model.relation_dim 2.0 +978 18 model.dropout_0 0.2625319986206875 +978 18 model.dropout_1 0.40181320106348684 +978 18 model.dropout_2 0.17466446398458857 +978 18 optimizer.lr 0.006192709364106441 +978 18 training.batch_size 0.0 +978 18 training.label_smoothing 0.0017059443752616777 +978 19 model.embedding_dim 1.0 +978 19 model.relation_dim 1.0 +978 19 model.dropout_0 0.3268055271216596 +978 19 model.dropout_1 0.3395761845397934 +978 19 model.dropout_2 0.4457934633667228 +978 19 optimizer.lr 0.011198067410227623 +978 19 training.batch_size 2.0 +978 19 training.label_smoothing 0.27213434110190204 +978 20 model.embedding_dim 2.0 +978 20 model.relation_dim 1.0 +978 20 model.dropout_0 0.3067062897464259 +978 20 model.dropout_1 0.2518022840184843 +978 20 model.dropout_2 0.25495040470354946 +978 20 optimizer.lr 0.03216107116778394 +978 20 training.batch_size 0.0 +978 20 training.label_smoothing 0.22274575878482689 +978 21 model.embedding_dim 0.0 +978 21 model.relation_dim 1.0 +978 21 model.dropout_0 0.21433133282291858 +978 21 model.dropout_1 0.24407878692317686 +978 21 model.dropout_2 0.38478096590170213 +978 21 optimizer.lr 0.01906380655117395 +978 21 training.batch_size 0.0 +978 21 training.label_smoothing 0.29866025293594534 +978 22 model.embedding_dim 1.0 +978 22 model.relation_dim 1.0 +978 22 model.dropout_0 0.40228752308215604 +978 22 model.dropout_1 0.10627839938674306 +978 22 model.dropout_2 0.24946248510442645 +978 22 optimizer.lr 0.035896281567137235 +978 22 training.batch_size 0.0 +978 22 training.label_smoothing 0.0010181637429428 +978 23 model.embedding_dim 2.0 +978 23 model.relation_dim 0.0 +978 23 model.dropout_0 0.18952871492018067 +978 23 model.dropout_1 0.2965875444994765 +978 23 model.dropout_2 0.2605302721606831 +978 23 optimizer.lr 0.013056097623414094 +978 23 training.batch_size 0.0 +978 23 training.label_smoothing 0.03280321708275062 +978 24 model.embedding_dim 0.0 +978 24 model.relation_dim 1.0 +978 24 model.dropout_0 0.46209191488912965 +978 24 model.dropout_1 0.43608604559448727 +978 24 model.dropout_2 0.35985301353690446 +978 24 optimizer.lr 0.0050913203603219145 +978 24 training.batch_size 0.0 +978 24 training.label_smoothing 0.004649678445635634 +978 25 model.embedding_dim 2.0 +978 25 model.relation_dim 1.0 +978 25 model.dropout_0 0.4183432945690377 +978 25 model.dropout_1 0.37253329764457443 +978 25 model.dropout_2 0.18267742238852724 +978 25 optimizer.lr 0.03212156665603926 +978 25 training.batch_size 2.0 +978 25 training.label_smoothing 0.41510525108683144 +978 26 model.embedding_dim 0.0 +978 26 model.relation_dim 0.0 +978 26 model.dropout_0 0.4652855915168879 +978 26 model.dropout_1 0.31732256886432886 +978 26 model.dropout_2 0.18092586923213505 +978 26 optimizer.lr 0.0013562852895021573 +978 26 training.batch_size 0.0 +978 26 training.label_smoothing 0.0825926887594325 +978 27 model.embedding_dim 1.0 +978 27 model.relation_dim 2.0 +978 27 model.dropout_0 0.3803731218438728 +978 27 model.dropout_1 0.4847365521920176 +978 27 model.dropout_2 0.32432759637645264 +978 27 optimizer.lr 0.07069566612225593 +978 27 training.batch_size 0.0 +978 27 training.label_smoothing 0.04987851946458031 +978 28 model.embedding_dim 0.0 +978 28 model.relation_dim 2.0 +978 28 model.dropout_0 0.28731485677627383 +978 28 model.dropout_1 0.4820869464530988 +978 28 model.dropout_2 0.2606505954005659 +978 28 optimizer.lr 0.03029000388144922 +978 28 training.batch_size 1.0 +978 28 training.label_smoothing 0.02262716015007749 +978 29 model.embedding_dim 2.0 +978 29 model.relation_dim 2.0 +978 29 model.dropout_0 0.18909127998617375 +978 29 model.dropout_1 0.3335458181242431 +978 29 model.dropout_2 0.14464032563559037 +978 29 optimizer.lr 0.09832723028950108 +978 29 training.batch_size 2.0 +978 29 training.label_smoothing 0.020816162350644644 +978 30 model.embedding_dim 2.0 +978 30 model.relation_dim 2.0 +978 30 model.dropout_0 0.3016124278187063 +978 30 model.dropout_1 0.2093974732745577 +978 30 model.dropout_2 0.4090816276868042 +978 30 optimizer.lr 0.0013053727635100687 +978 30 training.batch_size 1.0 +978 30 training.label_smoothing 0.0018190134861526621 +978 31 model.embedding_dim 0.0 +978 31 model.relation_dim 2.0 +978 31 model.dropout_0 0.12023911892045094 +978 31 model.dropout_1 0.18101752118135203 +978 31 model.dropout_2 0.2603936837600053 +978 31 optimizer.lr 0.03989756253325578 +978 31 training.batch_size 2.0 +978 31 training.label_smoothing 0.9570430015648952 +978 32 model.embedding_dim 1.0 +978 32 model.relation_dim 1.0 +978 32 model.dropout_0 0.13045774634026489 +978 32 model.dropout_1 0.14421946762300106 +978 32 model.dropout_2 0.3967952202618278 +978 32 optimizer.lr 0.001605225137000122 +978 32 training.batch_size 1.0 +978 32 training.label_smoothing 0.03575244875275173 +978 33 model.embedding_dim 2.0 +978 33 model.relation_dim 0.0 +978 33 model.dropout_0 0.18139892761719661 +978 33 model.dropout_1 0.46474877944321674 +978 33 model.dropout_2 0.2335368958850561 +978 33 optimizer.lr 0.0012985685126101652 +978 33 training.batch_size 0.0 +978 33 training.label_smoothing 0.08640706463441512 +978 34 model.embedding_dim 0.0 +978 34 model.relation_dim 1.0 +978 34 model.dropout_0 0.3661873252836069 +978 34 model.dropout_1 0.291213668995715 +978 34 model.dropout_2 0.3469620797351681 +978 34 optimizer.lr 0.04730623794573513 +978 34 training.batch_size 0.0 +978 34 training.label_smoothing 0.013104170391478868 +978 35 model.embedding_dim 2.0 +978 35 model.relation_dim 1.0 +978 35 model.dropout_0 0.245079701575792 +978 35 model.dropout_1 0.10751162763486422 +978 35 model.dropout_2 0.13308607023436148 +978 35 optimizer.lr 0.020690338529377444 +978 35 training.batch_size 0.0 +978 35 training.label_smoothing 0.10961082542906057 +978 36 model.embedding_dim 0.0 +978 36 model.relation_dim 0.0 +978 36 model.dropout_0 0.1522817133347285 +978 36 model.dropout_1 0.4250200537397138 +978 36 model.dropout_2 0.2397452598184947 +978 36 optimizer.lr 0.001766060563512624 +978 36 training.batch_size 0.0 +978 36 training.label_smoothing 0.0777705540159131 +978 37 model.embedding_dim 2.0 +978 37 model.relation_dim 2.0 +978 37 model.dropout_0 0.4395911351003095 +978 37 model.dropout_1 0.495937325141997 +978 37 model.dropout_2 0.24364865749663234 +978 37 optimizer.lr 0.002565625577700839 +978 37 training.batch_size 1.0 +978 37 training.label_smoothing 0.5219075435959721 +978 38 model.embedding_dim 1.0 +978 38 model.relation_dim 2.0 +978 38 model.dropout_0 0.4717296318641687 +978 38 model.dropout_1 0.2004229447242415 +978 38 model.dropout_2 0.2896327580121875 +978 38 optimizer.lr 0.0014571189108707279 +978 38 training.batch_size 2.0 +978 38 training.label_smoothing 0.9253404235973985 +978 39 model.embedding_dim 1.0 +978 39 model.relation_dim 1.0 +978 39 model.dropout_0 0.18515032608493853 +978 39 model.dropout_1 0.23348016277949443 +978 39 model.dropout_2 0.44903395774315147 +978 39 optimizer.lr 0.006641623363082323 +978 39 training.batch_size 2.0 +978 39 training.label_smoothing 0.01819869740362822 +978 40 model.embedding_dim 0.0 +978 40 model.relation_dim 1.0 +978 40 model.dropout_0 0.4229895919150755 +978 40 model.dropout_1 0.22711915138081162 +978 40 model.dropout_2 0.37841229622235617 +978 40 optimizer.lr 0.008876070821370699 +978 40 training.batch_size 1.0 +978 40 training.label_smoothing 0.0037924785455734473 +978 41 model.embedding_dim 0.0 +978 41 model.relation_dim 0.0 +978 41 model.dropout_0 0.4642817985841467 +978 41 model.dropout_1 0.2155020650805644 +978 41 model.dropout_2 0.1210316713948692 +978 41 optimizer.lr 0.07836665004603174 +978 41 training.batch_size 2.0 +978 41 training.label_smoothing 0.06071100197249852 +978 42 model.embedding_dim 2.0 +978 42 model.relation_dim 1.0 +978 42 model.dropout_0 0.2649695535481607 +978 42 model.dropout_1 0.1317036625826353 +978 42 model.dropout_2 0.4412796719636907 +978 42 optimizer.lr 0.001448843998532923 +978 42 training.batch_size 0.0 +978 42 training.label_smoothing 0.0109378043821728 +978 43 model.embedding_dim 1.0 +978 43 model.relation_dim 0.0 +978 43 model.dropout_0 0.31165275041703705 +978 43 model.dropout_1 0.2735614037813506 +978 43 model.dropout_2 0.3489725880691273 +978 43 optimizer.lr 0.004656620177876758 +978 43 training.batch_size 1.0 +978 43 training.label_smoothing 0.13981302565371662 +978 44 model.embedding_dim 2.0 +978 44 model.relation_dim 1.0 +978 44 model.dropout_0 0.1116813435326256 +978 44 model.dropout_1 0.4004886931131139 +978 44 model.dropout_2 0.22075646452262343 +978 44 optimizer.lr 0.006625967241481149 +978 44 training.batch_size 1.0 +978 44 training.label_smoothing 0.6779027338513891 +978 45 model.embedding_dim 1.0 +978 45 model.relation_dim 2.0 +978 45 model.dropout_0 0.12130849375002067 +978 45 model.dropout_1 0.34979923666127605 +978 45 model.dropout_2 0.15660173624125828 +978 45 optimizer.lr 0.009838786878013435 +978 45 training.batch_size 0.0 +978 45 training.label_smoothing 0.05759062570722492 +978 46 model.embedding_dim 0.0 +978 46 model.relation_dim 1.0 +978 46 model.dropout_0 0.4544739580888543 +978 46 model.dropout_1 0.4729611958236844 +978 46 model.dropout_2 0.21815828329881462 +978 46 optimizer.lr 0.062071378883782856 +978 46 training.batch_size 1.0 +978 46 training.label_smoothing 0.011277233737629287 +978 47 model.embedding_dim 1.0 +978 47 model.relation_dim 1.0 +978 47 model.dropout_0 0.43387049598768934 +978 47 model.dropout_1 0.23959771811134162 +978 47 model.dropout_2 0.15023311010567064 +978 47 optimizer.lr 0.0987438120818281 +978 47 training.batch_size 1.0 +978 47 training.label_smoothing 0.002788059605949565 +978 48 model.embedding_dim 0.0 +978 48 model.relation_dim 0.0 +978 48 model.dropout_0 0.2055765989092636 +978 48 model.dropout_1 0.16706853969717855 +978 48 model.dropout_2 0.34637149969705827 +978 48 optimizer.lr 0.007981142974674131 +978 48 training.batch_size 2.0 +978 48 training.label_smoothing 0.45422534766101935 +978 49 model.embedding_dim 0.0 +978 49 model.relation_dim 0.0 +978 49 model.dropout_0 0.2630334507542536 +978 49 model.dropout_1 0.14050179317724218 +978 49 model.dropout_2 0.2962613651653313 +978 49 optimizer.lr 0.0014046373456420196 +978 49 training.batch_size 0.0 +978 49 training.label_smoothing 0.02305281227831082 +978 50 model.embedding_dim 2.0 +978 50 model.relation_dim 2.0 +978 50 model.dropout_0 0.29586420598309016 +978 50 model.dropout_1 0.41799519618873215 +978 50 model.dropout_2 0.29712066692294814 +978 50 optimizer.lr 0.04942943596926137 +978 50 training.batch_size 0.0 +978 50 training.label_smoothing 0.12329133169507342 +978 51 model.embedding_dim 2.0 +978 51 model.relation_dim 1.0 +978 51 model.dropout_0 0.41213759178552967 +978 51 model.dropout_1 0.18458386980705754 +978 51 model.dropout_2 0.3872173010627926 +978 51 optimizer.lr 0.0011483116642642842 +978 51 training.batch_size 2.0 +978 51 training.label_smoothing 0.04065195866390297 +978 52 model.embedding_dim 2.0 +978 52 model.relation_dim 0.0 +978 52 model.dropout_0 0.45174726374288054 +978 52 model.dropout_1 0.3747335370278605 +978 52 model.dropout_2 0.3135087598869432 +978 52 optimizer.lr 0.006707543059846339 +978 52 training.batch_size 2.0 +978 52 training.label_smoothing 0.023305824073696524 +978 53 model.embedding_dim 0.0 +978 53 model.relation_dim 2.0 +978 53 model.dropout_0 0.20960301302978937 +978 53 model.dropout_1 0.3732857079689139 +978 53 model.dropout_2 0.39127011344606005 +978 53 optimizer.lr 0.00479355765126933 +978 53 training.batch_size 2.0 +978 53 training.label_smoothing 0.13109997079549626 +978 54 model.embedding_dim 0.0 +978 54 model.relation_dim 1.0 +978 54 model.dropout_0 0.15365993839964226 +978 54 model.dropout_1 0.2672146620138846 +978 54 model.dropout_2 0.4940452842227264 +978 54 optimizer.lr 0.0046260679814373006 +978 54 training.batch_size 1.0 +978 54 training.label_smoothing 0.5915796038662501 +978 55 model.embedding_dim 0.0 +978 55 model.relation_dim 1.0 +978 55 model.dropout_0 0.15204977677263126 +978 55 model.dropout_1 0.468358430671871 +978 55 model.dropout_2 0.42812727320273164 +978 55 optimizer.lr 0.024634342104928975 +978 55 training.batch_size 0.0 +978 55 training.label_smoothing 0.0015586311644453807 +978 56 model.embedding_dim 2.0 +978 56 model.relation_dim 2.0 +978 56 model.dropout_0 0.3486534886331849 +978 56 model.dropout_1 0.4084891954129055 +978 56 model.dropout_2 0.1396726707045448 +978 56 optimizer.lr 0.0021614194511236488 +978 56 training.batch_size 0.0 +978 56 training.label_smoothing 0.06335712073478214 +978 57 model.embedding_dim 0.0 +978 57 model.relation_dim 0.0 +978 57 model.dropout_0 0.13928348021156234 +978 57 model.dropout_1 0.4494862996918456 +978 57 model.dropout_2 0.32828753565473257 +978 57 optimizer.lr 0.0021319108435969157 +978 57 training.batch_size 1.0 +978 57 training.label_smoothing 0.22951798007266783 +978 58 model.embedding_dim 0.0 +978 58 model.relation_dim 2.0 +978 58 model.dropout_0 0.43264798274747013 +978 58 model.dropout_1 0.19125186990026402 +978 58 model.dropout_2 0.4841231686155942 +978 58 optimizer.lr 0.002884562968519968 +978 58 training.batch_size 1.0 +978 58 training.label_smoothing 0.024144373959464744 +978 59 model.embedding_dim 0.0 +978 59 model.relation_dim 2.0 +978 59 model.dropout_0 0.4212600870675834 +978 59 model.dropout_1 0.30770195853192833 +978 59 model.dropout_2 0.4745387893830845 +978 59 optimizer.lr 0.013313494799866252 +978 59 training.batch_size 0.0 +978 59 training.label_smoothing 0.00859601057074011 +978 60 model.embedding_dim 2.0 +978 60 model.relation_dim 2.0 +978 60 model.dropout_0 0.2771471855308289 +978 60 model.dropout_1 0.3283878372358505 +978 60 model.dropout_2 0.36041885053984346 +978 60 optimizer.lr 0.011001483852457493 +978 60 training.batch_size 1.0 +978 60 training.label_smoothing 0.001629061836424711 +978 61 model.embedding_dim 0.0 +978 61 model.relation_dim 1.0 +978 61 model.dropout_0 0.4513283866528346 +978 61 model.dropout_1 0.41491963112441965 +978 61 model.dropout_2 0.49653299436434856 +978 61 optimizer.lr 0.0017582352980459754 +978 61 training.batch_size 2.0 +978 61 training.label_smoothing 0.17558469927995812 +978 62 model.embedding_dim 1.0 +978 62 model.relation_dim 2.0 +978 62 model.dropout_0 0.24028282773513024 +978 62 model.dropout_1 0.45989774381933307 +978 62 model.dropout_2 0.2827325552539291 +978 62 optimizer.lr 0.037395767249340454 +978 62 training.batch_size 2.0 +978 62 training.label_smoothing 0.009070249406993561 +978 63 model.embedding_dim 1.0 +978 63 model.relation_dim 1.0 +978 63 model.dropout_0 0.38328749167356835 +978 63 model.dropout_1 0.319166834022639 +978 63 model.dropout_2 0.2676590753362293 +978 63 optimizer.lr 0.004162270731019836 +978 63 training.batch_size 0.0 +978 63 training.label_smoothing 0.0012256788941711744 +978 64 model.embedding_dim 0.0 +978 64 model.relation_dim 2.0 +978 64 model.dropout_0 0.4254402282028699 +978 64 model.dropout_1 0.2746251487524317 +978 64 model.dropout_2 0.3212027205332157 +978 64 optimizer.lr 0.003491613604864998 +978 64 training.batch_size 2.0 +978 64 training.label_smoothing 0.023179528063622733 +978 65 model.embedding_dim 0.0 +978 65 model.relation_dim 0.0 +978 65 model.dropout_0 0.3601691196835183 +978 65 model.dropout_1 0.21547172854585261 +978 65 model.dropout_2 0.2961330226518168 +978 65 optimizer.lr 0.002627303960027473 +978 65 training.batch_size 0.0 +978 65 training.label_smoothing 0.016492296710464472 +978 66 model.embedding_dim 1.0 +978 66 model.relation_dim 2.0 +978 66 model.dropout_0 0.18429299651195952 +978 66 model.dropout_1 0.22145535112918738 +978 66 model.dropout_2 0.18178828873470898 +978 66 optimizer.lr 0.008686325849257869 +978 66 training.batch_size 2.0 +978 66 training.label_smoothing 0.025028000905653464 +978 67 model.embedding_dim 0.0 +978 67 model.relation_dim 2.0 +978 67 model.dropout_0 0.20294285164750403 +978 67 model.dropout_1 0.42945573032263135 +978 67 model.dropout_2 0.2645618251744902 +978 67 optimizer.lr 0.0701597537914461 +978 67 training.batch_size 0.0 +978 67 training.label_smoothing 0.34702442759781094 +978 68 model.embedding_dim 1.0 +978 68 model.relation_dim 0.0 +978 68 model.dropout_0 0.39354084247693605 +978 68 model.dropout_1 0.15467149851628462 +978 68 model.dropout_2 0.385264962546614 +978 68 optimizer.lr 0.006906916970564825 +978 68 training.batch_size 1.0 +978 68 training.label_smoothing 0.0010057888052330852 +978 69 model.embedding_dim 2.0 +978 69 model.relation_dim 1.0 +978 69 model.dropout_0 0.37527429453764904 +978 69 model.dropout_1 0.4327542814063554 +978 69 model.dropout_2 0.2632584747200117 +978 69 optimizer.lr 0.016716761892081346 +978 69 training.batch_size 1.0 +978 69 training.label_smoothing 0.0022239083054224422 +978 70 model.embedding_dim 2.0 +978 70 model.relation_dim 0.0 +978 70 model.dropout_0 0.21281307902453578 +978 70 model.dropout_1 0.35647017440100615 +978 70 model.dropout_2 0.29021912679834183 +978 70 optimizer.lr 0.004583009747791376 +978 70 training.batch_size 1.0 +978 70 training.label_smoothing 0.006899167754272912 +978 71 model.embedding_dim 2.0 +978 71 model.relation_dim 2.0 +978 71 model.dropout_0 0.1778308165875831 +978 71 model.dropout_1 0.15465672284862686 +978 71 model.dropout_2 0.28429869714840544 +978 71 optimizer.lr 0.012798802139549854 +978 71 training.batch_size 1.0 +978 71 training.label_smoothing 0.04951579943806588 +978 72 model.embedding_dim 1.0 +978 72 model.relation_dim 2.0 +978 72 model.dropout_0 0.2637258900140494 +978 72 model.dropout_1 0.3936572227406969 +978 72 model.dropout_2 0.23288122056413246 +978 72 optimizer.lr 0.047585626441334405 +978 72 training.batch_size 1.0 +978 72 training.label_smoothing 0.010085076104301519 +978 73 model.embedding_dim 0.0 +978 73 model.relation_dim 2.0 +978 73 model.dropout_0 0.33727749007011276 +978 73 model.dropout_1 0.19799194814295673 +978 73 model.dropout_2 0.4876702572491465 +978 73 optimizer.lr 0.010223784883384424 +978 73 training.batch_size 2.0 +978 73 training.label_smoothing 0.04788602992135902 +978 74 model.embedding_dim 1.0 +978 74 model.relation_dim 1.0 +978 74 model.dropout_0 0.281696871938279 +978 74 model.dropout_1 0.19147146464123216 +978 74 model.dropout_2 0.49467230896904996 +978 74 optimizer.lr 0.015073456915991403 +978 74 training.batch_size 2.0 +978 74 training.label_smoothing 0.45647909885713406 +978 75 model.embedding_dim 0.0 +978 75 model.relation_dim 0.0 +978 75 model.dropout_0 0.293800507484916 +978 75 model.dropout_1 0.13192540640322056 +978 75 model.dropout_2 0.27048591594102817 +978 75 optimizer.lr 0.03771148544078919 +978 75 training.batch_size 0.0 +978 75 training.label_smoothing 0.0026299333573109244 +978 76 model.embedding_dim 0.0 +978 76 model.relation_dim 1.0 +978 76 model.dropout_0 0.1505671792397529 +978 76 model.dropout_1 0.46048263574713877 +978 76 model.dropout_2 0.2733703057814586 +978 76 optimizer.lr 0.002103356760978584 +978 76 training.batch_size 0.0 +978 76 training.label_smoothing 0.006001395260488337 +978 77 model.embedding_dim 1.0 +978 77 model.relation_dim 2.0 +978 77 model.dropout_0 0.1762975862907663 +978 77 model.dropout_1 0.1516574641673373 +978 77 model.dropout_2 0.17452127621566846 +978 77 optimizer.lr 0.00172588164636875 +978 77 training.batch_size 2.0 +978 77 training.label_smoothing 0.0034558829745663924 +978 78 model.embedding_dim 2.0 +978 78 model.relation_dim 1.0 +978 78 model.dropout_0 0.18006957426426973 +978 78 model.dropout_1 0.2433285904472546 +978 78 model.dropout_2 0.2986997007220586 +978 78 optimizer.lr 0.007438711309460224 +978 78 training.batch_size 0.0 +978 78 training.label_smoothing 0.23549975985955063 +978 79 model.embedding_dim 0.0 +978 79 model.relation_dim 0.0 +978 79 model.dropout_0 0.3067818571208945 +978 79 model.dropout_1 0.4440996209591991 +978 79 model.dropout_2 0.34430328562912216 +978 79 optimizer.lr 0.009032758300428026 +978 79 training.batch_size 1.0 +978 79 training.label_smoothing 0.9875044189483616 +978 80 model.embedding_dim 0.0 +978 80 model.relation_dim 2.0 +978 80 model.dropout_0 0.4397845953555714 +978 80 model.dropout_1 0.44685121817542517 +978 80 model.dropout_2 0.23317227061652446 +978 80 optimizer.lr 0.00737911072774501 +978 80 training.batch_size 0.0 +978 80 training.label_smoothing 0.25169910397572975 +978 81 model.embedding_dim 1.0 +978 81 model.relation_dim 2.0 +978 81 model.dropout_0 0.3391698550361868 +978 81 model.dropout_1 0.32786688391749386 +978 81 model.dropout_2 0.30776175169544473 +978 81 optimizer.lr 0.018941966919522494 +978 81 training.batch_size 0.0 +978 81 training.label_smoothing 0.017124082838417515 +978 82 model.embedding_dim 1.0 +978 82 model.relation_dim 1.0 +978 82 model.dropout_0 0.2214089329057798 +978 82 model.dropout_1 0.20553260275578855 +978 82 model.dropout_2 0.24714144921338055 +978 82 optimizer.lr 0.0968177133134923 +978 82 training.batch_size 0.0 +978 82 training.label_smoothing 0.0021937712748333854 +978 83 model.embedding_dim 0.0 +978 83 model.relation_dim 1.0 +978 83 model.dropout_0 0.47271960132294066 +978 83 model.dropout_1 0.18676513539100148 +978 83 model.dropout_2 0.13604969419437354 +978 83 optimizer.lr 0.0020278904882000456 +978 83 training.batch_size 0.0 +978 83 training.label_smoothing 0.7253251012576087 +978 84 model.embedding_dim 0.0 +978 84 model.relation_dim 2.0 +978 84 model.dropout_0 0.3805471320106368 +978 84 model.dropout_1 0.2192366716401252 +978 84 model.dropout_2 0.18902637713400097 +978 84 optimizer.lr 0.06775966232610693 +978 84 training.batch_size 2.0 +978 84 training.label_smoothing 0.003528097567271092 +978 85 model.embedding_dim 1.0 +978 85 model.relation_dim 1.0 +978 85 model.dropout_0 0.17501587521177792 +978 85 model.dropout_1 0.3241764383468077 +978 85 model.dropout_2 0.24038366413273168 +978 85 optimizer.lr 0.0017380373329683608 +978 85 training.batch_size 0.0 +978 85 training.label_smoothing 0.5002527195873312 +978 86 model.embedding_dim 2.0 +978 86 model.relation_dim 0.0 +978 86 model.dropout_0 0.22726333441909607 +978 86 model.dropout_1 0.4916651485006871 +978 86 model.dropout_2 0.39775518095320883 +978 86 optimizer.lr 0.07814238771323237 +978 86 training.batch_size 1.0 +978 86 training.label_smoothing 0.008981068127225671 +978 87 model.embedding_dim 0.0 +978 87 model.relation_dim 2.0 +978 87 model.dropout_0 0.3549855809721889 +978 87 model.dropout_1 0.49787572580010725 +978 87 model.dropout_2 0.3200510909149316 +978 87 optimizer.lr 0.007988135488267397 +978 87 training.batch_size 0.0 +978 87 training.label_smoothing 0.006509032750876136 +978 88 model.embedding_dim 0.0 +978 88 model.relation_dim 1.0 +978 88 model.dropout_0 0.4974250232494001 +978 88 model.dropout_1 0.4897132177631523 +978 88 model.dropout_2 0.18673540623762494 +978 88 optimizer.lr 0.0023165044410955677 +978 88 training.batch_size 1.0 +978 88 training.label_smoothing 0.0012220324774033504 +978 89 model.embedding_dim 1.0 +978 89 model.relation_dim 2.0 +978 89 model.dropout_0 0.1914240122502993 +978 89 model.dropout_1 0.3469577587205423 +978 89 model.dropout_2 0.1501237161971735 +978 89 optimizer.lr 0.01568582359295027 +978 89 training.batch_size 2.0 +978 89 training.label_smoothing 0.09565963753422961 +978 90 model.embedding_dim 0.0 +978 90 model.relation_dim 2.0 +978 90 model.dropout_0 0.4465324948511625 +978 90 model.dropout_1 0.22725725954160328 +978 90 model.dropout_2 0.3015696571977442 +978 90 optimizer.lr 0.0021569318651578273 +978 90 training.batch_size 2.0 +978 90 training.label_smoothing 0.016088616644469755 +978 91 model.embedding_dim 1.0 +978 91 model.relation_dim 1.0 +978 91 model.dropout_0 0.24031818549547254 +978 91 model.dropout_1 0.24624100693872975 +978 91 model.dropout_2 0.45267081384516744 +978 91 optimizer.lr 0.0013817652784770527 +978 91 training.batch_size 0.0 +978 91 training.label_smoothing 0.007307363386679547 +978 92 model.embedding_dim 0.0 +978 92 model.relation_dim 2.0 +978 92 model.dropout_0 0.1753956092691007 +978 92 model.dropout_1 0.4874341094029024 +978 92 model.dropout_2 0.2712896097361255 +978 92 optimizer.lr 0.03408580379109323 +978 92 training.batch_size 2.0 +978 92 training.label_smoothing 0.6428339469621159 +978 93 model.embedding_dim 0.0 +978 93 model.relation_dim 1.0 +978 93 model.dropout_0 0.4232248975466938 +978 93 model.dropout_1 0.3063297147754134 +978 93 model.dropout_2 0.40816947910644713 +978 93 optimizer.lr 0.004292531610253152 +978 93 training.batch_size 0.0 +978 93 training.label_smoothing 0.6374167352174409 +978 94 model.embedding_dim 1.0 +978 94 model.relation_dim 0.0 +978 94 model.dropout_0 0.12361842961258414 +978 94 model.dropout_1 0.18789094365697198 +978 94 model.dropout_2 0.2320272796167022 +978 94 optimizer.lr 0.003262836737141667 +978 94 training.batch_size 2.0 +978 94 training.label_smoothing 0.6306652866372054 +978 95 model.embedding_dim 2.0 +978 95 model.relation_dim 2.0 +978 95 model.dropout_0 0.16264772778118017 +978 95 model.dropout_1 0.35348390037587785 +978 95 model.dropout_2 0.4831824336946338 +978 95 optimizer.lr 0.044884092393315915 +978 95 training.batch_size 1.0 +978 95 training.label_smoothing 0.007735294752478997 +978 96 model.embedding_dim 1.0 +978 96 model.relation_dim 1.0 +978 96 model.dropout_0 0.22590671827231323 +978 96 model.dropout_1 0.32244254624335206 +978 96 model.dropout_2 0.2693931696284927 +978 96 optimizer.lr 0.011577488457180254 +978 96 training.batch_size 1.0 +978 96 training.label_smoothing 0.10099494315297042 +978 97 model.embedding_dim 2.0 +978 97 model.relation_dim 2.0 +978 97 model.dropout_0 0.25474217840509533 +978 97 model.dropout_1 0.41631337272210056 +978 97 model.dropout_2 0.3904747342906755 +978 97 optimizer.lr 0.06786269772424687 +978 97 training.batch_size 2.0 +978 97 training.label_smoothing 0.7249941085870919 +978 98 model.embedding_dim 2.0 +978 98 model.relation_dim 0.0 +978 98 model.dropout_0 0.21732993923695518 +978 98 model.dropout_1 0.1318651783095904 +978 98 model.dropout_2 0.20798388914515453 +978 98 optimizer.lr 0.0025865180733201944 +978 98 training.batch_size 1.0 +978 98 training.label_smoothing 0.8108813676342715 +978 99 model.embedding_dim 2.0 +978 99 model.relation_dim 0.0 +978 99 model.dropout_0 0.14999270782255994 +978 99 model.dropout_1 0.3571915039949385 +978 99 model.dropout_2 0.10062601025210847 +978 99 optimizer.lr 0.010277253917048429 +978 99 training.batch_size 0.0 +978 99 training.label_smoothing 0.1047144050243286 +978 100 model.embedding_dim 0.0 +978 100 model.relation_dim 1.0 +978 100 model.dropout_0 0.4678258743222316 +978 100 model.dropout_1 0.39462591182549384 +978 100 model.dropout_2 0.342907827304274 +978 100 optimizer.lr 0.04284985467108941 +978 100 training.batch_size 2.0 +978 100 training.label_smoothing 0.08309386988370321 +978 1 dataset """kinships""" +978 1 model """tucker""" +978 1 loss """softplus""" +978 1 regularizer """no""" +978 1 optimizer """adam""" +978 1 training_loop """lcwa""" +978 1 evaluator """rankbased""" +978 2 dataset """kinships""" +978 2 model """tucker""" +978 2 loss """softplus""" +978 2 regularizer """no""" +978 2 optimizer """adam""" +978 2 training_loop """lcwa""" +978 2 evaluator """rankbased""" +978 3 dataset """kinships""" +978 3 model """tucker""" +978 3 loss """softplus""" +978 3 regularizer """no""" +978 3 optimizer """adam""" +978 3 training_loop """lcwa""" +978 3 evaluator """rankbased""" +978 4 dataset """kinships""" +978 4 model """tucker""" +978 4 loss """softplus""" +978 4 regularizer """no""" +978 4 optimizer """adam""" +978 4 training_loop """lcwa""" +978 4 evaluator """rankbased""" +978 5 dataset """kinships""" +978 5 model """tucker""" +978 5 loss """softplus""" +978 5 regularizer """no""" +978 5 optimizer """adam""" +978 5 training_loop """lcwa""" +978 5 evaluator """rankbased""" +978 6 dataset """kinships""" +978 6 model """tucker""" +978 6 loss """softplus""" +978 6 regularizer """no""" +978 6 optimizer """adam""" +978 6 training_loop """lcwa""" +978 6 evaluator """rankbased""" +978 7 dataset """kinships""" +978 7 model """tucker""" +978 7 loss """softplus""" +978 7 regularizer """no""" +978 7 optimizer """adam""" +978 7 training_loop """lcwa""" +978 7 evaluator """rankbased""" +978 8 dataset """kinships""" +978 8 model """tucker""" +978 8 loss """softplus""" +978 8 regularizer """no""" +978 8 optimizer """adam""" +978 8 training_loop """lcwa""" +978 8 evaluator """rankbased""" +978 9 dataset """kinships""" +978 9 model """tucker""" +978 9 loss """softplus""" +978 9 regularizer """no""" +978 9 optimizer """adam""" +978 9 training_loop """lcwa""" +978 9 evaluator """rankbased""" +978 10 dataset """kinships""" +978 10 model """tucker""" +978 10 loss """softplus""" +978 10 regularizer """no""" +978 10 optimizer """adam""" +978 10 training_loop """lcwa""" +978 10 evaluator """rankbased""" +978 11 dataset """kinships""" +978 11 model """tucker""" +978 11 loss """softplus""" +978 11 regularizer """no""" +978 11 optimizer """adam""" +978 11 training_loop """lcwa""" +978 11 evaluator """rankbased""" +978 12 dataset """kinships""" +978 12 model """tucker""" +978 12 loss """softplus""" +978 12 regularizer """no""" +978 12 optimizer """adam""" +978 12 training_loop """lcwa""" +978 12 evaluator """rankbased""" +978 13 dataset """kinships""" +978 13 model """tucker""" +978 13 loss """softplus""" +978 13 regularizer """no""" +978 13 optimizer """adam""" +978 13 training_loop """lcwa""" +978 13 evaluator """rankbased""" +978 14 dataset """kinships""" +978 14 model """tucker""" +978 14 loss """softplus""" +978 14 regularizer """no""" +978 14 optimizer """adam""" +978 14 training_loop """lcwa""" +978 14 evaluator """rankbased""" +978 15 dataset """kinships""" +978 15 model """tucker""" +978 15 loss """softplus""" +978 15 regularizer """no""" +978 15 optimizer """adam""" +978 15 training_loop """lcwa""" +978 15 evaluator """rankbased""" +978 16 dataset """kinships""" +978 16 model """tucker""" +978 16 loss """softplus""" +978 16 regularizer """no""" +978 16 optimizer """adam""" +978 16 training_loop """lcwa""" +978 16 evaluator """rankbased""" +978 17 dataset """kinships""" +978 17 model """tucker""" +978 17 loss """softplus""" +978 17 regularizer """no""" +978 17 optimizer """adam""" +978 17 training_loop """lcwa""" +978 17 evaluator """rankbased""" +978 18 dataset """kinships""" +978 18 model """tucker""" +978 18 loss """softplus""" +978 18 regularizer """no""" +978 18 optimizer """adam""" +978 18 training_loop """lcwa""" +978 18 evaluator """rankbased""" +978 19 dataset """kinships""" +978 19 model """tucker""" +978 19 loss """softplus""" +978 19 regularizer """no""" +978 19 optimizer """adam""" +978 19 training_loop """lcwa""" +978 19 evaluator """rankbased""" +978 20 dataset """kinships""" +978 20 model """tucker""" +978 20 loss """softplus""" +978 20 regularizer """no""" +978 20 optimizer """adam""" +978 20 training_loop """lcwa""" +978 20 evaluator """rankbased""" +978 21 dataset """kinships""" +978 21 model """tucker""" +978 21 loss """softplus""" +978 21 regularizer """no""" +978 21 optimizer """adam""" +978 21 training_loop """lcwa""" +978 21 evaluator """rankbased""" +978 22 dataset """kinships""" +978 22 model """tucker""" +978 22 loss """softplus""" +978 22 regularizer """no""" +978 22 optimizer """adam""" +978 22 training_loop """lcwa""" +978 22 evaluator """rankbased""" +978 23 dataset """kinships""" +978 23 model """tucker""" +978 23 loss """softplus""" +978 23 regularizer """no""" +978 23 optimizer """adam""" +978 23 training_loop """lcwa""" +978 23 evaluator """rankbased""" +978 24 dataset """kinships""" +978 24 model """tucker""" +978 24 loss """softplus""" +978 24 regularizer """no""" +978 24 optimizer """adam""" +978 24 training_loop """lcwa""" +978 24 evaluator """rankbased""" +978 25 dataset """kinships""" +978 25 model """tucker""" +978 25 loss """softplus""" +978 25 regularizer """no""" +978 25 optimizer """adam""" +978 25 training_loop """lcwa""" +978 25 evaluator """rankbased""" +978 26 dataset """kinships""" +978 26 model """tucker""" +978 26 loss """softplus""" +978 26 regularizer """no""" +978 26 optimizer """adam""" +978 26 training_loop """lcwa""" +978 26 evaluator """rankbased""" +978 27 dataset """kinships""" +978 27 model """tucker""" +978 27 loss """softplus""" +978 27 regularizer """no""" +978 27 optimizer """adam""" +978 27 training_loop """lcwa""" +978 27 evaluator """rankbased""" +978 28 dataset """kinships""" +978 28 model """tucker""" +978 28 loss """softplus""" +978 28 regularizer """no""" +978 28 optimizer """adam""" +978 28 training_loop """lcwa""" +978 28 evaluator """rankbased""" +978 29 dataset """kinships""" +978 29 model """tucker""" +978 29 loss """softplus""" +978 29 regularizer """no""" +978 29 optimizer """adam""" +978 29 training_loop """lcwa""" +978 29 evaluator """rankbased""" +978 30 dataset """kinships""" +978 30 model """tucker""" +978 30 loss """softplus""" +978 30 regularizer """no""" +978 30 optimizer """adam""" +978 30 training_loop """lcwa""" +978 30 evaluator """rankbased""" +978 31 dataset """kinships""" +978 31 model """tucker""" +978 31 loss """softplus""" +978 31 regularizer """no""" +978 31 optimizer """adam""" +978 31 training_loop """lcwa""" +978 31 evaluator """rankbased""" +978 32 dataset """kinships""" +978 32 model """tucker""" +978 32 loss """softplus""" +978 32 regularizer """no""" +978 32 optimizer """adam""" +978 32 training_loop """lcwa""" +978 32 evaluator """rankbased""" +978 33 dataset """kinships""" +978 33 model """tucker""" +978 33 loss """softplus""" +978 33 regularizer """no""" +978 33 optimizer """adam""" +978 33 training_loop """lcwa""" +978 33 evaluator """rankbased""" +978 34 dataset """kinships""" +978 34 model """tucker""" +978 34 loss """softplus""" +978 34 regularizer """no""" +978 34 optimizer """adam""" +978 34 training_loop """lcwa""" +978 34 evaluator """rankbased""" +978 35 dataset """kinships""" +978 35 model """tucker""" +978 35 loss """softplus""" +978 35 regularizer """no""" +978 35 optimizer """adam""" +978 35 training_loop """lcwa""" +978 35 evaluator """rankbased""" +978 36 dataset """kinships""" +978 36 model """tucker""" +978 36 loss """softplus""" +978 36 regularizer """no""" +978 36 optimizer """adam""" +978 36 training_loop """lcwa""" +978 36 evaluator """rankbased""" +978 37 dataset """kinships""" +978 37 model """tucker""" +978 37 loss """softplus""" +978 37 regularizer """no""" +978 37 optimizer """adam""" +978 37 training_loop """lcwa""" +978 37 evaluator """rankbased""" +978 38 dataset """kinships""" +978 38 model """tucker""" +978 38 loss """softplus""" +978 38 regularizer """no""" +978 38 optimizer """adam""" +978 38 training_loop """lcwa""" +978 38 evaluator """rankbased""" +978 39 dataset """kinships""" +978 39 model """tucker""" +978 39 loss """softplus""" +978 39 regularizer """no""" +978 39 optimizer """adam""" +978 39 training_loop """lcwa""" +978 39 evaluator """rankbased""" +978 40 dataset """kinships""" +978 40 model """tucker""" +978 40 loss """softplus""" +978 40 regularizer """no""" +978 40 optimizer """adam""" +978 40 training_loop """lcwa""" +978 40 evaluator """rankbased""" +978 41 dataset """kinships""" +978 41 model """tucker""" +978 41 loss """softplus""" +978 41 regularizer """no""" +978 41 optimizer """adam""" +978 41 training_loop """lcwa""" +978 41 evaluator """rankbased""" +978 42 dataset """kinships""" +978 42 model """tucker""" +978 42 loss """softplus""" +978 42 regularizer """no""" +978 42 optimizer """adam""" +978 42 training_loop """lcwa""" +978 42 evaluator """rankbased""" +978 43 dataset """kinships""" +978 43 model """tucker""" +978 43 loss """softplus""" +978 43 regularizer """no""" +978 43 optimizer """adam""" +978 43 training_loop """lcwa""" +978 43 evaluator """rankbased""" +978 44 dataset """kinships""" +978 44 model """tucker""" +978 44 loss """softplus""" +978 44 regularizer """no""" +978 44 optimizer """adam""" +978 44 training_loop """lcwa""" +978 44 evaluator """rankbased""" +978 45 dataset """kinships""" +978 45 model """tucker""" +978 45 loss """softplus""" +978 45 regularizer """no""" +978 45 optimizer """adam""" +978 45 training_loop """lcwa""" +978 45 evaluator """rankbased""" +978 46 dataset """kinships""" +978 46 model """tucker""" +978 46 loss """softplus""" +978 46 regularizer """no""" +978 46 optimizer """adam""" +978 46 training_loop """lcwa""" +978 46 evaluator """rankbased""" +978 47 dataset """kinships""" +978 47 model """tucker""" +978 47 loss """softplus""" +978 47 regularizer """no""" +978 47 optimizer """adam""" +978 47 training_loop """lcwa""" +978 47 evaluator """rankbased""" +978 48 dataset """kinships""" +978 48 model """tucker""" +978 48 loss """softplus""" +978 48 regularizer """no""" +978 48 optimizer """adam""" +978 48 training_loop """lcwa""" +978 48 evaluator """rankbased""" +978 49 dataset """kinships""" +978 49 model """tucker""" +978 49 loss """softplus""" +978 49 regularizer """no""" +978 49 optimizer """adam""" +978 49 training_loop """lcwa""" +978 49 evaluator """rankbased""" +978 50 dataset """kinships""" +978 50 model """tucker""" +978 50 loss """softplus""" +978 50 regularizer """no""" +978 50 optimizer """adam""" +978 50 training_loop """lcwa""" +978 50 evaluator """rankbased""" +978 51 dataset """kinships""" +978 51 model """tucker""" +978 51 loss """softplus""" +978 51 regularizer """no""" +978 51 optimizer """adam""" +978 51 training_loop """lcwa""" +978 51 evaluator """rankbased""" +978 52 dataset """kinships""" +978 52 model """tucker""" +978 52 loss """softplus""" +978 52 regularizer """no""" +978 52 optimizer """adam""" +978 52 training_loop """lcwa""" +978 52 evaluator """rankbased""" +978 53 dataset """kinships""" +978 53 model """tucker""" +978 53 loss """softplus""" +978 53 regularizer """no""" +978 53 optimizer """adam""" +978 53 training_loop """lcwa""" +978 53 evaluator """rankbased""" +978 54 dataset """kinships""" +978 54 model """tucker""" +978 54 loss """softplus""" +978 54 regularizer """no""" +978 54 optimizer """adam""" +978 54 training_loop """lcwa""" +978 54 evaluator """rankbased""" +978 55 dataset """kinships""" +978 55 model """tucker""" +978 55 loss """softplus""" +978 55 regularizer """no""" +978 55 optimizer """adam""" +978 55 training_loop """lcwa""" +978 55 evaluator """rankbased""" +978 56 dataset """kinships""" +978 56 model """tucker""" +978 56 loss """softplus""" +978 56 regularizer """no""" +978 56 optimizer """adam""" +978 56 training_loop """lcwa""" +978 56 evaluator """rankbased""" +978 57 dataset """kinships""" +978 57 model """tucker""" +978 57 loss """softplus""" +978 57 regularizer """no""" +978 57 optimizer """adam""" +978 57 training_loop """lcwa""" +978 57 evaluator """rankbased""" +978 58 dataset """kinships""" +978 58 model """tucker""" +978 58 loss """softplus""" +978 58 regularizer """no""" +978 58 optimizer """adam""" +978 58 training_loop """lcwa""" +978 58 evaluator """rankbased""" +978 59 dataset """kinships""" +978 59 model """tucker""" +978 59 loss """softplus""" +978 59 regularizer """no""" +978 59 optimizer """adam""" +978 59 training_loop """lcwa""" +978 59 evaluator """rankbased""" +978 60 dataset """kinships""" +978 60 model """tucker""" +978 60 loss """softplus""" +978 60 regularizer """no""" +978 60 optimizer """adam""" +978 60 training_loop """lcwa""" +978 60 evaluator """rankbased""" +978 61 dataset """kinships""" +978 61 model """tucker""" +978 61 loss """softplus""" +978 61 regularizer """no""" +978 61 optimizer """adam""" +978 61 training_loop """lcwa""" +978 61 evaluator """rankbased""" +978 62 dataset """kinships""" +978 62 model """tucker""" +978 62 loss """softplus""" +978 62 regularizer """no""" +978 62 optimizer """adam""" +978 62 training_loop """lcwa""" +978 62 evaluator """rankbased""" +978 63 dataset """kinships""" +978 63 model """tucker""" +978 63 loss """softplus""" +978 63 regularizer """no""" +978 63 optimizer """adam""" +978 63 training_loop """lcwa""" +978 63 evaluator """rankbased""" +978 64 dataset """kinships""" +978 64 model """tucker""" +978 64 loss """softplus""" +978 64 regularizer """no""" +978 64 optimizer """adam""" +978 64 training_loop """lcwa""" +978 64 evaluator """rankbased""" +978 65 dataset """kinships""" +978 65 model """tucker""" +978 65 loss """softplus""" +978 65 regularizer """no""" +978 65 optimizer """adam""" +978 65 training_loop """lcwa""" +978 65 evaluator """rankbased""" +978 66 dataset """kinships""" +978 66 model """tucker""" +978 66 loss """softplus""" +978 66 regularizer """no""" +978 66 optimizer """adam""" +978 66 training_loop """lcwa""" +978 66 evaluator """rankbased""" +978 67 dataset """kinships""" +978 67 model """tucker""" +978 67 loss """softplus""" +978 67 regularizer """no""" +978 67 optimizer """adam""" +978 67 training_loop """lcwa""" +978 67 evaluator """rankbased""" +978 68 dataset """kinships""" +978 68 model """tucker""" +978 68 loss """softplus""" +978 68 regularizer """no""" +978 68 optimizer """adam""" +978 68 training_loop """lcwa""" +978 68 evaluator """rankbased""" +978 69 dataset """kinships""" +978 69 model """tucker""" +978 69 loss """softplus""" +978 69 regularizer """no""" +978 69 optimizer """adam""" +978 69 training_loop """lcwa""" +978 69 evaluator """rankbased""" +978 70 dataset """kinships""" +978 70 model """tucker""" +978 70 loss """softplus""" +978 70 regularizer """no""" +978 70 optimizer """adam""" +978 70 training_loop """lcwa""" +978 70 evaluator """rankbased""" +978 71 dataset """kinships""" +978 71 model """tucker""" +978 71 loss """softplus""" +978 71 regularizer """no""" +978 71 optimizer """adam""" +978 71 training_loop """lcwa""" +978 71 evaluator """rankbased""" +978 72 dataset """kinships""" +978 72 model """tucker""" +978 72 loss """softplus""" +978 72 regularizer """no""" +978 72 optimizer """adam""" +978 72 training_loop """lcwa""" +978 72 evaluator """rankbased""" +978 73 dataset """kinships""" +978 73 model """tucker""" +978 73 loss """softplus""" +978 73 regularizer """no""" +978 73 optimizer """adam""" +978 73 training_loop """lcwa""" +978 73 evaluator """rankbased""" +978 74 dataset """kinships""" +978 74 model """tucker""" +978 74 loss """softplus""" +978 74 regularizer """no""" +978 74 optimizer """adam""" +978 74 training_loop """lcwa""" +978 74 evaluator """rankbased""" +978 75 dataset """kinships""" +978 75 model """tucker""" +978 75 loss """softplus""" +978 75 regularizer """no""" +978 75 optimizer """adam""" +978 75 training_loop """lcwa""" +978 75 evaluator """rankbased""" +978 76 dataset """kinships""" +978 76 model """tucker""" +978 76 loss """softplus""" +978 76 regularizer """no""" +978 76 optimizer """adam""" +978 76 training_loop """lcwa""" +978 76 evaluator """rankbased""" +978 77 dataset """kinships""" +978 77 model """tucker""" +978 77 loss """softplus""" +978 77 regularizer """no""" +978 77 optimizer """adam""" +978 77 training_loop """lcwa""" +978 77 evaluator """rankbased""" +978 78 dataset """kinships""" +978 78 model """tucker""" +978 78 loss """softplus""" +978 78 regularizer """no""" +978 78 optimizer """adam""" +978 78 training_loop """lcwa""" +978 78 evaluator """rankbased""" +978 79 dataset """kinships""" +978 79 model """tucker""" +978 79 loss """softplus""" +978 79 regularizer """no""" +978 79 optimizer """adam""" +978 79 training_loop """lcwa""" +978 79 evaluator """rankbased""" +978 80 dataset """kinships""" +978 80 model """tucker""" +978 80 loss """softplus""" +978 80 regularizer """no""" +978 80 optimizer """adam""" +978 80 training_loop """lcwa""" +978 80 evaluator """rankbased""" +978 81 dataset """kinships""" +978 81 model """tucker""" +978 81 loss """softplus""" +978 81 regularizer """no""" +978 81 optimizer """adam""" +978 81 training_loop """lcwa""" +978 81 evaluator """rankbased""" +978 82 dataset """kinships""" +978 82 model """tucker""" +978 82 loss """softplus""" +978 82 regularizer """no""" +978 82 optimizer """adam""" +978 82 training_loop """lcwa""" +978 82 evaluator """rankbased""" +978 83 dataset """kinships""" +978 83 model """tucker""" +978 83 loss """softplus""" +978 83 regularizer """no""" +978 83 optimizer """adam""" +978 83 training_loop """lcwa""" +978 83 evaluator """rankbased""" +978 84 dataset """kinships""" +978 84 model """tucker""" +978 84 loss """softplus""" +978 84 regularizer """no""" +978 84 optimizer """adam""" +978 84 training_loop """lcwa""" +978 84 evaluator """rankbased""" +978 85 dataset """kinships""" +978 85 model """tucker""" +978 85 loss """softplus""" +978 85 regularizer """no""" +978 85 optimizer """adam""" +978 85 training_loop """lcwa""" +978 85 evaluator """rankbased""" +978 86 dataset """kinships""" +978 86 model """tucker""" +978 86 loss """softplus""" +978 86 regularizer """no""" +978 86 optimizer """adam""" +978 86 training_loop """lcwa""" +978 86 evaluator """rankbased""" +978 87 dataset """kinships""" +978 87 model """tucker""" +978 87 loss """softplus""" +978 87 regularizer """no""" +978 87 optimizer """adam""" +978 87 training_loop """lcwa""" +978 87 evaluator """rankbased""" +978 88 dataset """kinships""" +978 88 model """tucker""" +978 88 loss """softplus""" +978 88 regularizer """no""" +978 88 optimizer """adam""" +978 88 training_loop """lcwa""" +978 88 evaluator """rankbased""" +978 89 dataset """kinships""" +978 89 model """tucker""" +978 89 loss """softplus""" +978 89 regularizer """no""" +978 89 optimizer """adam""" +978 89 training_loop """lcwa""" +978 89 evaluator """rankbased""" +978 90 dataset """kinships""" +978 90 model """tucker""" +978 90 loss """softplus""" +978 90 regularizer """no""" +978 90 optimizer """adam""" +978 90 training_loop """lcwa""" +978 90 evaluator """rankbased""" +978 91 dataset """kinships""" +978 91 model """tucker""" +978 91 loss """softplus""" +978 91 regularizer """no""" +978 91 optimizer """adam""" +978 91 training_loop """lcwa""" +978 91 evaluator """rankbased""" +978 92 dataset """kinships""" +978 92 model """tucker""" +978 92 loss """softplus""" +978 92 regularizer """no""" +978 92 optimizer """adam""" +978 92 training_loop """lcwa""" +978 92 evaluator """rankbased""" +978 93 dataset """kinships""" +978 93 model """tucker""" +978 93 loss """softplus""" +978 93 regularizer """no""" +978 93 optimizer """adam""" +978 93 training_loop """lcwa""" +978 93 evaluator """rankbased""" +978 94 dataset """kinships""" +978 94 model """tucker""" +978 94 loss """softplus""" +978 94 regularizer """no""" +978 94 optimizer """adam""" +978 94 training_loop """lcwa""" +978 94 evaluator """rankbased""" +978 95 dataset """kinships""" +978 95 model """tucker""" +978 95 loss """softplus""" +978 95 regularizer """no""" +978 95 optimizer """adam""" +978 95 training_loop """lcwa""" +978 95 evaluator """rankbased""" +978 96 dataset """kinships""" +978 96 model """tucker""" +978 96 loss """softplus""" +978 96 regularizer """no""" +978 96 optimizer """adam""" +978 96 training_loop """lcwa""" +978 96 evaluator """rankbased""" +978 97 dataset """kinships""" +978 97 model """tucker""" +978 97 loss """softplus""" +978 97 regularizer """no""" +978 97 optimizer """adam""" +978 97 training_loop """lcwa""" +978 97 evaluator """rankbased""" +978 98 dataset """kinships""" +978 98 model """tucker""" +978 98 loss """softplus""" +978 98 regularizer """no""" +978 98 optimizer """adam""" +978 98 training_loop """lcwa""" +978 98 evaluator """rankbased""" +978 99 dataset """kinships""" +978 99 model """tucker""" +978 99 loss """softplus""" +978 99 regularizer """no""" +978 99 optimizer """adam""" +978 99 training_loop """lcwa""" +978 99 evaluator """rankbased""" +978 100 dataset """kinships""" +978 100 model """tucker""" +978 100 loss """softplus""" +978 100 regularizer """no""" +978 100 optimizer """adam""" +978 100 training_loop """lcwa""" +978 100 evaluator """rankbased""" +979 1 model.embedding_dim 1.0 +979 1 model.relation_dim 1.0 +979 1 model.dropout_0 0.14914780027908395 +979 1 model.dropout_1 0.31653193023360326 +979 1 model.dropout_2 0.411970226347962 +979 1 loss.margin 9.164011262649678 +979 1 optimizer.lr 0.06759626938159076 +979 1 negative_sampler.num_negs_per_pos 25.0 +979 1 training.batch_size 1.0 +979 2 model.embedding_dim 0.0 +979 2 model.relation_dim 2.0 +979 2 model.dropout_0 0.2800083059299391 +979 2 model.dropout_1 0.46596904870998523 +979 2 model.dropout_2 0.47023523534192757 +979 2 loss.margin 5.896871575333718 +979 2 optimizer.lr 0.016526026242218984 +979 2 negative_sampler.num_negs_per_pos 2.0 +979 2 training.batch_size 1.0 +979 3 model.embedding_dim 1.0 +979 3 model.relation_dim 2.0 +979 3 model.dropout_0 0.2129711215155159 +979 3 model.dropout_1 0.4742215217279132 +979 3 model.dropout_2 0.3491639097273565 +979 3 loss.margin 4.584495249758027 +979 3 optimizer.lr 0.023631148903861844 +979 3 negative_sampler.num_negs_per_pos 73.0 +979 3 training.batch_size 1.0 +979 4 model.embedding_dim 2.0 +979 4 model.relation_dim 2.0 +979 4 model.dropout_0 0.1698766337927981 +979 4 model.dropout_1 0.17693546316364725 +979 4 model.dropout_2 0.3814807654452326 +979 4 loss.margin 9.721291275422724 +979 4 optimizer.lr 0.0031520143793149122 +979 4 negative_sampler.num_negs_per_pos 58.0 +979 4 training.batch_size 0.0 +979 5 model.embedding_dim 2.0 +979 5 model.relation_dim 0.0 +979 5 model.dropout_0 0.20548350852111852 +979 5 model.dropout_1 0.21412281056151256 +979 5 model.dropout_2 0.42099794326982765 +979 5 loss.margin 0.6804734967511601 +979 5 optimizer.lr 0.007470117600838532 +979 5 negative_sampler.num_negs_per_pos 9.0 +979 5 training.batch_size 1.0 +979 6 model.embedding_dim 0.0 +979 6 model.relation_dim 0.0 +979 6 model.dropout_0 0.20288068267545195 +979 6 model.dropout_1 0.3382712417565079 +979 6 model.dropout_2 0.4845376181901303 +979 6 loss.margin 7.482513990832015 +979 6 optimizer.lr 0.05226705121954873 +979 6 negative_sampler.num_negs_per_pos 20.0 +979 6 training.batch_size 2.0 +979 1 dataset """kinships""" +979 1 model """tucker""" +979 1 loss """marginranking""" +979 1 regularizer """no""" +979 1 optimizer """adam""" +979 1 training_loop """owa""" +979 1 negative_sampler """basic""" +979 1 evaluator """rankbased""" +979 2 dataset """kinships""" +979 2 model """tucker""" +979 2 loss """marginranking""" +979 2 regularizer """no""" +979 2 optimizer """adam""" +979 2 training_loop """owa""" +979 2 negative_sampler """basic""" +979 2 evaluator """rankbased""" +979 3 dataset """kinships""" +979 3 model """tucker""" +979 3 loss """marginranking""" +979 3 regularizer """no""" +979 3 optimizer """adam""" +979 3 training_loop """owa""" +979 3 negative_sampler """basic""" +979 3 evaluator """rankbased""" +979 4 dataset """kinships""" +979 4 model """tucker""" +979 4 loss """marginranking""" +979 4 regularizer """no""" +979 4 optimizer """adam""" +979 4 training_loop """owa""" +979 4 negative_sampler """basic""" +979 4 evaluator """rankbased""" +979 5 dataset """kinships""" +979 5 model """tucker""" +979 5 loss """marginranking""" +979 5 regularizer """no""" +979 5 optimizer """adam""" +979 5 training_loop """owa""" +979 5 negative_sampler """basic""" +979 5 evaluator """rankbased""" +979 6 dataset """kinships""" +979 6 model """tucker""" +979 6 loss """marginranking""" +979 6 regularizer """no""" +979 6 optimizer """adam""" +979 6 training_loop """owa""" +979 6 negative_sampler """basic""" +979 6 evaluator """rankbased""" +980 1 model.embedding_dim 2.0 +980 1 model.relation_dim 2.0 +980 1 model.dropout_0 0.2598848132895184 +980 1 model.dropout_1 0.3004133017640081 +980 1 model.dropout_2 0.3998325442183678 +980 1 loss.margin 5.8254783255617815 +980 1 optimizer.lr 0.0129279492206491 +980 1 negative_sampler.num_negs_per_pos 63.0 +980 1 training.batch_size 0.0 +980 2 model.embedding_dim 1.0 +980 2 model.relation_dim 2.0 +980 2 model.dropout_0 0.2415264181913347 +980 2 model.dropout_1 0.3025258642177302 +980 2 model.dropout_2 0.4893485429969817 +980 2 loss.margin 9.435582064157746 +980 2 optimizer.lr 0.007053189545499643 +980 2 negative_sampler.num_negs_per_pos 22.0 +980 2 training.batch_size 2.0 +980 3 model.embedding_dim 0.0 +980 3 model.relation_dim 2.0 +980 3 model.dropout_0 0.1961995857925851 +980 3 model.dropout_1 0.27233490768022606 +980 3 model.dropout_2 0.4065491617972672 +980 3 loss.margin 7.440957398754656 +980 3 optimizer.lr 0.006103140718657813 +980 3 negative_sampler.num_negs_per_pos 44.0 +980 3 training.batch_size 1.0 +980 4 model.embedding_dim 0.0 +980 4 model.relation_dim 1.0 +980 4 model.dropout_0 0.292718864175066 +980 4 model.dropout_1 0.4284143682998989 +980 4 model.dropout_2 0.33989897397782387 +980 4 loss.margin 6.458932679858148 +980 4 optimizer.lr 0.0142111297168568 +980 4 negative_sampler.num_negs_per_pos 47.0 +980 4 training.batch_size 0.0 +980 5 model.embedding_dim 2.0 +980 5 model.relation_dim 2.0 +980 5 model.dropout_0 0.2918850384376669 +980 5 model.dropout_1 0.32892080143819036 +980 5 model.dropout_2 0.26527229129275964 +980 5 loss.margin 2.7187732397897886 +980 5 optimizer.lr 0.0015433113649067115 +980 5 negative_sampler.num_negs_per_pos 60.0 +980 5 training.batch_size 0.0 +980 6 model.embedding_dim 1.0 +980 6 model.relation_dim 2.0 +980 6 model.dropout_0 0.41566522986408827 +980 6 model.dropout_1 0.4771847770792699 +980 6 model.dropout_2 0.24651117874051076 +980 6 loss.margin 1.8984015950756887 +980 6 optimizer.lr 0.060573251107751254 +980 6 negative_sampler.num_negs_per_pos 49.0 +980 6 training.batch_size 1.0 +980 7 model.embedding_dim 0.0 +980 7 model.relation_dim 0.0 +980 7 model.dropout_0 0.27761059380057185 +980 7 model.dropout_1 0.3405213072120167 +980 7 model.dropout_2 0.4771967432701292 +980 7 loss.margin 9.605797009632123 +980 7 optimizer.lr 0.00988956146386061 +980 7 negative_sampler.num_negs_per_pos 66.0 +980 7 training.batch_size 1.0 +980 8 model.embedding_dim 0.0 +980 8 model.relation_dim 0.0 +980 8 model.dropout_0 0.45340341733759193 +980 8 model.dropout_1 0.4048976114467686 +980 8 model.dropout_2 0.3839000549301149 +980 8 loss.margin 4.148011702499473 +980 8 optimizer.lr 0.04566174277012506 +980 8 negative_sampler.num_negs_per_pos 58.0 +980 8 training.batch_size 1.0 +980 9 model.embedding_dim 2.0 +980 9 model.relation_dim 2.0 +980 9 model.dropout_0 0.45348561233056595 +980 9 model.dropout_1 0.4131025927784371 +980 9 model.dropout_2 0.27789743533566585 +980 9 loss.margin 4.668292372106494 +980 9 optimizer.lr 0.001922266801483265 +980 9 negative_sampler.num_negs_per_pos 90.0 +980 9 training.batch_size 1.0 +980 10 model.embedding_dim 2.0 +980 10 model.relation_dim 1.0 +980 10 model.dropout_0 0.3426513264387878 +980 10 model.dropout_1 0.28131972876109584 +980 10 model.dropout_2 0.36542759724384166 +980 10 loss.margin 6.018289340386895 +980 10 optimizer.lr 0.005589008564078465 +980 10 negative_sampler.num_negs_per_pos 5.0 +980 10 training.batch_size 0.0 +980 11 model.embedding_dim 1.0 +980 11 model.relation_dim 1.0 +980 11 model.dropout_0 0.15231338403772657 +980 11 model.dropout_1 0.16956568662299612 +980 11 model.dropout_2 0.22748974987878656 +980 11 loss.margin 3.0753259081265223 +980 11 optimizer.lr 0.00604761215060796 +980 11 negative_sampler.num_negs_per_pos 78.0 +980 11 training.batch_size 0.0 +980 12 model.embedding_dim 2.0 +980 12 model.relation_dim 0.0 +980 12 model.dropout_0 0.4101708878384711 +980 12 model.dropout_1 0.3056304819946152 +980 12 model.dropout_2 0.4900945759825065 +980 12 loss.margin 9.33396038517226 +980 12 optimizer.lr 0.006796158843024388 +980 12 negative_sampler.num_negs_per_pos 45.0 +980 12 training.batch_size 2.0 +980 1 dataset """kinships""" +980 1 model """tucker""" +980 1 loss """marginranking""" +980 1 regularizer """no""" +980 1 optimizer """adam""" +980 1 training_loop """owa""" +980 1 negative_sampler """basic""" +980 1 evaluator """rankbased""" +980 2 dataset """kinships""" +980 2 model """tucker""" +980 2 loss """marginranking""" +980 2 regularizer """no""" +980 2 optimizer """adam""" +980 2 training_loop """owa""" +980 2 negative_sampler """basic""" +980 2 evaluator """rankbased""" +980 3 dataset """kinships""" +980 3 model """tucker""" +980 3 loss """marginranking""" +980 3 regularizer """no""" +980 3 optimizer """adam""" +980 3 training_loop """owa""" +980 3 negative_sampler """basic""" +980 3 evaluator """rankbased""" +980 4 dataset """kinships""" +980 4 model """tucker""" +980 4 loss """marginranking""" +980 4 regularizer """no""" +980 4 optimizer """adam""" +980 4 training_loop """owa""" +980 4 negative_sampler """basic""" +980 4 evaluator """rankbased""" +980 5 dataset """kinships""" +980 5 model """tucker""" +980 5 loss """marginranking""" +980 5 regularizer """no""" +980 5 optimizer """adam""" +980 5 training_loop """owa""" +980 5 negative_sampler """basic""" +980 5 evaluator """rankbased""" +980 6 dataset """kinships""" +980 6 model """tucker""" +980 6 loss """marginranking""" +980 6 regularizer """no""" +980 6 optimizer """adam""" +980 6 training_loop """owa""" +980 6 negative_sampler """basic""" +980 6 evaluator """rankbased""" +980 7 dataset """kinships""" +980 7 model """tucker""" +980 7 loss """marginranking""" +980 7 regularizer """no""" +980 7 optimizer """adam""" +980 7 training_loop """owa""" +980 7 negative_sampler """basic""" +980 7 evaluator """rankbased""" +980 8 dataset """kinships""" +980 8 model """tucker""" +980 8 loss """marginranking""" +980 8 regularizer """no""" +980 8 optimizer """adam""" +980 8 training_loop """owa""" +980 8 negative_sampler """basic""" +980 8 evaluator """rankbased""" +980 9 dataset """kinships""" +980 9 model """tucker""" +980 9 loss """marginranking""" +980 9 regularizer """no""" +980 9 optimizer """adam""" +980 9 training_loop """owa""" +980 9 negative_sampler """basic""" +980 9 evaluator """rankbased""" +980 10 dataset """kinships""" +980 10 model """tucker""" +980 10 loss """marginranking""" +980 10 regularizer """no""" +980 10 optimizer """adam""" +980 10 training_loop """owa""" +980 10 negative_sampler """basic""" +980 10 evaluator """rankbased""" +980 11 dataset """kinships""" +980 11 model """tucker""" +980 11 loss """marginranking""" +980 11 regularizer """no""" +980 11 optimizer """adam""" +980 11 training_loop """owa""" +980 11 negative_sampler """basic""" +980 11 evaluator """rankbased""" +980 12 dataset """kinships""" +980 12 model """tucker""" +980 12 loss """marginranking""" +980 12 regularizer """no""" +980 12 optimizer """adam""" +980 12 training_loop """owa""" +980 12 negative_sampler """basic""" +980 12 evaluator """rankbased""" +981 1 model.embedding_dim 0.0 +981 1 model.relation_dim 2.0 +981 1 model.dropout_0 0.46018515432250273 +981 1 model.dropout_1 0.33229336901051953 +981 1 model.dropout_2 0.14031536994140836 +981 1 loss.margin 13.206055716948816 +981 1 loss.adversarial_temperature 0.7074030361112719 +981 1 optimizer.lr 0.006188843354656909 +981 1 negative_sampler.num_negs_per_pos 69.0 +981 1 training.batch_size 2.0 +981 2 model.embedding_dim 2.0 +981 2 model.relation_dim 2.0 +981 2 model.dropout_0 0.342491625869742 +981 2 model.dropout_1 0.15879407227946968 +981 2 model.dropout_2 0.2245639150398828 +981 2 loss.margin 10.256498240560807 +981 2 loss.adversarial_temperature 0.5792244034412629 +981 2 optimizer.lr 0.0015350960544259814 +981 2 negative_sampler.num_negs_per_pos 83.0 +981 2 training.batch_size 2.0 +981 3 model.embedding_dim 0.0 +981 3 model.relation_dim 1.0 +981 3 model.dropout_0 0.10507894385825033 +981 3 model.dropout_1 0.16804497338607477 +981 3 model.dropout_2 0.31374664833460103 +981 3 loss.margin 25.13719786455084 +981 3 loss.adversarial_temperature 0.950997704848869 +981 3 optimizer.lr 0.00714747804755759 +981 3 negative_sampler.num_negs_per_pos 21.0 +981 3 training.batch_size 1.0 +981 4 model.embedding_dim 1.0 +981 4 model.relation_dim 1.0 +981 4 model.dropout_0 0.11316853542185129 +981 4 model.dropout_1 0.3534934858026354 +981 4 model.dropout_2 0.3436935559941169 +981 4 loss.margin 12.15824761839796 +981 4 loss.adversarial_temperature 0.21244044009307428 +981 4 optimizer.lr 0.005637421589357629 +981 4 negative_sampler.num_negs_per_pos 80.0 +981 4 training.batch_size 0.0 +981 1 dataset """kinships""" +981 1 model """tucker""" +981 1 loss """nssa""" +981 1 regularizer """no""" +981 1 optimizer """adam""" +981 1 training_loop """owa""" +981 1 negative_sampler """basic""" +981 1 evaluator """rankbased""" +981 2 dataset """kinships""" +981 2 model """tucker""" +981 2 loss """nssa""" +981 2 regularizer """no""" +981 2 optimizer """adam""" +981 2 training_loop """owa""" +981 2 negative_sampler """basic""" +981 2 evaluator """rankbased""" +981 3 dataset """kinships""" +981 3 model """tucker""" +981 3 loss """nssa""" +981 3 regularizer """no""" +981 3 optimizer """adam""" +981 3 training_loop """owa""" +981 3 negative_sampler """basic""" +981 3 evaluator """rankbased""" +981 4 dataset """kinships""" +981 4 model """tucker""" +981 4 loss """nssa""" +981 4 regularizer """no""" +981 4 optimizer """adam""" +981 4 training_loop """owa""" +981 4 negative_sampler """basic""" +981 4 evaluator """rankbased""" +982 1 model.embedding_dim 1.0 +982 1 model.relation_dim 0.0 +982 1 model.dropout_0 0.2711679911945289 +982 1 model.dropout_1 0.1702901221886943 +982 1 model.dropout_2 0.3885443686504667 +982 1 optimizer.lr 0.014262424770117946 +982 1 negative_sampler.num_negs_per_pos 86.0 +982 1 training.batch_size 2.0 +982 1 dataset """wn18rr""" +982 1 model """tucker""" +982 1 loss """softplus""" +982 1 regularizer """no""" +982 1 optimizer """adam""" +982 1 training_loop """owa""" +982 1 negative_sampler """basic""" +982 1 evaluator """rankbased""" +983 1 model.embedding_dim 0.0 +983 1 model.relation_dim 2.0 +983 1 model.dropout_0 0.33988292960986316 +983 1 model.dropout_1 0.3412637770445201 +983 1 model.dropout_2 0.1702435201718724 +983 1 optimizer.lr 0.015376255397288855 +983 1 negative_sampler.num_negs_per_pos 17.0 +983 1 training.batch_size 1.0 +983 2 model.embedding_dim 1.0 +983 2 model.relation_dim 0.0 +983 2 model.dropout_0 0.3402799709979906 +983 2 model.dropout_1 0.38171323361717774 +983 2 model.dropout_2 0.4717529706810797 +983 2 optimizer.lr 0.004175909948411946 +983 2 negative_sampler.num_negs_per_pos 91.0 +983 2 training.batch_size 0.0 +983 1 dataset """wn18rr""" +983 1 model """tucker""" +983 1 loss """softplus""" +983 1 regularizer """no""" +983 1 optimizer """adam""" +983 1 training_loop """owa""" +983 1 negative_sampler """basic""" +983 1 evaluator """rankbased""" +983 2 dataset """wn18rr""" +983 2 model """tucker""" +983 2 loss """softplus""" +983 2 regularizer """no""" +983 2 optimizer """adam""" +983 2 training_loop """owa""" +983 2 negative_sampler """basic""" +983 2 evaluator """rankbased""" +984 1 model.embedding_dim 0.0 +984 1 model.relation_dim 2.0 +984 1 model.dropout_0 0.13605780918965232 +984 1 model.dropout_1 0.26208600895011663 +984 1 model.dropout_2 0.12246236070582178 +984 1 optimizer.lr 0.04761522396382778 +984 1 training.batch_size 0.0 +984 1 training.label_smoothing 0.08554141213490259 +984 2 model.embedding_dim 2.0 +984 2 model.relation_dim 0.0 +984 2 model.dropout_0 0.1864621194348673 +984 2 model.dropout_1 0.12090807475708645 +984 2 model.dropout_2 0.44641358670246534 +984 2 optimizer.lr 0.07276824999481525 +984 2 training.batch_size 2.0 +984 2 training.label_smoothing 0.02769150617222908 +984 3 model.embedding_dim 1.0 +984 3 model.relation_dim 0.0 +984 3 model.dropout_0 0.2695307210715214 +984 3 model.dropout_1 0.4754193987378743 +984 3 model.dropout_2 0.40995184893453757 +984 3 optimizer.lr 0.016232581004840527 +984 3 training.batch_size 2.0 +984 3 training.label_smoothing 0.13447212886731616 +984 4 model.embedding_dim 1.0 +984 4 model.relation_dim 2.0 +984 4 model.dropout_0 0.17465906791051777 +984 4 model.dropout_1 0.3673184062469266 +984 4 model.dropout_2 0.4553928550638435 +984 4 optimizer.lr 0.039961092080922175 +984 4 training.batch_size 1.0 +984 4 training.label_smoothing 0.027130340860344028 +984 5 model.embedding_dim 0.0 +984 5 model.relation_dim 1.0 +984 5 model.dropout_0 0.34872384461054395 +984 5 model.dropout_1 0.21781733444080587 +984 5 model.dropout_2 0.1853971564292659 +984 5 optimizer.lr 0.017137561574352558 +984 5 training.batch_size 2.0 +984 5 training.label_smoothing 0.1381039455248635 +984 6 model.embedding_dim 1.0 +984 6 model.relation_dim 1.0 +984 6 model.dropout_0 0.1785494823249726 +984 6 model.dropout_1 0.2769343716872536 +984 6 model.dropout_2 0.2080462720337795 +984 6 optimizer.lr 0.007652729445485501 +984 6 training.batch_size 2.0 +984 6 training.label_smoothing 0.2772830396665846 +984 7 model.embedding_dim 2.0 +984 7 model.relation_dim 0.0 +984 7 model.dropout_0 0.1421474962065806 +984 7 model.dropout_1 0.4745865150465027 +984 7 model.dropout_2 0.39768499071797 +984 7 optimizer.lr 0.012840835010770785 +984 7 training.batch_size 2.0 +984 7 training.label_smoothing 0.06417694025617239 +984 1 dataset """wn18rr""" +984 1 model """tucker""" +984 1 loss """crossentropy""" +984 1 regularizer """no""" +984 1 optimizer """adam""" +984 1 training_loop """lcwa""" +984 1 evaluator """rankbased""" +984 2 dataset """wn18rr""" +984 2 model """tucker""" +984 2 loss """crossentropy""" +984 2 regularizer """no""" +984 2 optimizer """adam""" +984 2 training_loop """lcwa""" +984 2 evaluator """rankbased""" +984 3 dataset """wn18rr""" +984 3 model """tucker""" +984 3 loss """crossentropy""" +984 3 regularizer """no""" +984 3 optimizer """adam""" +984 3 training_loop """lcwa""" +984 3 evaluator """rankbased""" +984 4 dataset """wn18rr""" +984 4 model """tucker""" +984 4 loss """crossentropy""" +984 4 regularizer """no""" +984 4 optimizer """adam""" +984 4 training_loop """lcwa""" +984 4 evaluator """rankbased""" +984 5 dataset """wn18rr""" +984 5 model """tucker""" +984 5 loss """crossentropy""" +984 5 regularizer """no""" +984 5 optimizer """adam""" +984 5 training_loop """lcwa""" +984 5 evaluator """rankbased""" +984 6 dataset """wn18rr""" +984 6 model """tucker""" +984 6 loss """crossentropy""" +984 6 regularizer """no""" +984 6 optimizer """adam""" +984 6 training_loop """lcwa""" +984 6 evaluator """rankbased""" +984 7 dataset """wn18rr""" +984 7 model """tucker""" +984 7 loss """crossentropy""" +984 7 regularizer """no""" +984 7 optimizer """adam""" +984 7 training_loop """lcwa""" +984 7 evaluator """rankbased""" +985 1 model.embedding_dim 0.0 +985 1 model.relation_dim 0.0 +985 1 model.dropout_0 0.16655296736519595 +985 1 model.dropout_1 0.23662327466054647 +985 1 model.dropout_2 0.3986421439899943 +985 1 optimizer.lr 0.006097148110698481 +985 1 training.batch_size 1.0 +985 1 training.label_smoothing 0.07344351372081577 +985 2 model.embedding_dim 1.0 +985 2 model.relation_dim 1.0 +985 2 model.dropout_0 0.279229477337749 +985 2 model.dropout_1 0.4638626173723217 +985 2 model.dropout_2 0.20768520850487754 +985 2 optimizer.lr 0.00196363039037731 +985 2 training.batch_size 1.0 +985 2 training.label_smoothing 0.0016140106231435493 +985 3 model.embedding_dim 0.0 +985 3 model.relation_dim 1.0 +985 3 model.dropout_0 0.4543466018927722 +985 3 model.dropout_1 0.3869032314172344 +985 3 model.dropout_2 0.3681164717760632 +985 3 optimizer.lr 0.03539392173860044 +985 3 training.batch_size 1.0 +985 3 training.label_smoothing 0.07411871331760424 +985 4 model.embedding_dim 0.0 +985 4 model.relation_dim 0.0 +985 4 model.dropout_0 0.1794294828454116 +985 4 model.dropout_1 0.45545745651140207 +985 4 model.dropout_2 0.4902519161718377 +985 4 optimizer.lr 0.03454252134073891 +985 4 training.batch_size 2.0 +985 4 training.label_smoothing 0.003109764741267157 +985 5 model.embedding_dim 2.0 +985 5 model.relation_dim 1.0 +985 5 model.dropout_0 0.24374583719712473 +985 5 model.dropout_1 0.48037270247942265 +985 5 model.dropout_2 0.31260808800060413 +985 5 optimizer.lr 0.022732442155230258 +985 5 training.batch_size 1.0 +985 5 training.label_smoothing 0.00162485211055068 +985 6 model.embedding_dim 1.0 +985 6 model.relation_dim 2.0 +985 6 model.dropout_0 0.11735929055730653 +985 6 model.dropout_1 0.3102834181611185 +985 6 model.dropout_2 0.49248760492673727 +985 6 optimizer.lr 0.002415212658497207 +985 6 training.batch_size 1.0 +985 6 training.label_smoothing 0.0024663172392640634 +985 7 model.embedding_dim 1.0 +985 7 model.relation_dim 1.0 +985 7 model.dropout_0 0.15907432681043676 +985 7 model.dropout_1 0.41425372269514416 +985 7 model.dropout_2 0.29115046655741933 +985 7 optimizer.lr 0.015830611645529985 +985 7 training.batch_size 1.0 +985 7 training.label_smoothing 0.0744636073572405 +985 8 model.embedding_dim 0.0 +985 8 model.relation_dim 2.0 +985 8 model.dropout_0 0.16352995392463832 +985 8 model.dropout_1 0.3058871710735924 +985 8 model.dropout_2 0.42556062591611854 +985 8 optimizer.lr 0.006014993259581756 +985 8 training.batch_size 1.0 +985 8 training.label_smoothing 0.20671289924559447 +985 1 dataset """wn18rr""" +985 1 model """tucker""" +985 1 loss """crossentropy""" +985 1 regularizer """no""" +985 1 optimizer """adam""" +985 1 training_loop """lcwa""" +985 1 evaluator """rankbased""" +985 2 dataset """wn18rr""" +985 2 model """tucker""" +985 2 loss """crossentropy""" +985 2 regularizer """no""" +985 2 optimizer """adam""" +985 2 training_loop """lcwa""" +985 2 evaluator """rankbased""" +985 3 dataset """wn18rr""" +985 3 model """tucker""" +985 3 loss """crossentropy""" +985 3 regularizer """no""" +985 3 optimizer """adam""" +985 3 training_loop """lcwa""" +985 3 evaluator """rankbased""" +985 4 dataset """wn18rr""" +985 4 model """tucker""" +985 4 loss """crossentropy""" +985 4 regularizer """no""" +985 4 optimizer """adam""" +985 4 training_loop """lcwa""" +985 4 evaluator """rankbased""" +985 5 dataset """wn18rr""" +985 5 model """tucker""" +985 5 loss """crossentropy""" +985 5 regularizer """no""" +985 5 optimizer """adam""" +985 5 training_loop """lcwa""" +985 5 evaluator """rankbased""" +985 6 dataset """wn18rr""" +985 6 model """tucker""" +985 6 loss """crossentropy""" +985 6 regularizer """no""" +985 6 optimizer """adam""" +985 6 training_loop """lcwa""" +985 6 evaluator """rankbased""" +985 7 dataset """wn18rr""" +985 7 model """tucker""" +985 7 loss """crossentropy""" +985 7 regularizer """no""" +985 7 optimizer """adam""" +985 7 training_loop """lcwa""" +985 7 evaluator """rankbased""" +985 8 dataset """wn18rr""" +985 8 model """tucker""" +985 8 loss """crossentropy""" +985 8 regularizer """no""" +985 8 optimizer """adam""" +985 8 training_loop """lcwa""" +985 8 evaluator """rankbased""" +986 1 model.embedding_dim 0.0 +986 1 model.relation_dim 2.0 +986 1 model.dropout_0 0.4161637839250648 +986 1 model.dropout_1 0.19269858288502484 +986 1 model.dropout_2 0.3163582191831703 +986 1 loss.margin 9.987398598515869 +986 1 optimizer.lr 0.0018523327076703942 +986 1 negative_sampler.num_negs_per_pos 33.0 +986 1 training.batch_size 1.0 +986 1 dataset """wn18rr""" +986 1 model """tucker""" +986 1 loss """marginranking""" +986 1 regularizer """no""" +986 1 optimizer """adam""" +986 1 training_loop """owa""" +986 1 negative_sampler """basic""" +986 1 evaluator """rankbased""" +987 1 model.embedding_dim 1.0 +987 1 model.relation_dim 0.0 +987 1 model.dropout_0 0.4041338058251436 +987 1 model.dropout_1 0.20498781644387526 +987 1 model.dropout_2 0.15243233228768843 +987 1 loss.margin 5.209989453942851 +987 1 optimizer.lr 0.003067991762523819 +987 1 negative_sampler.num_negs_per_pos 93.0 +987 1 training.batch_size 0.0 +987 1 dataset """wn18rr""" +987 1 model """tucker""" +987 1 loss """marginranking""" +987 1 regularizer """no""" +987 1 optimizer """adam""" +987 1 training_loop """owa""" +987 1 negative_sampler """basic""" +987 1 evaluator """rankbased""" +988 1 model.embedding_dim 1.0 +988 1 model.relation_dim 2.0 +988 1 model.dropout_0 0.24799643489279283 +988 1 model.dropout_1 0.1810712816088357 +988 1 model.dropout_2 0.11933032483903167 +988 1 optimizer.lr 0.0035930432951429897 +988 1 training.batch_size 0.0 +988 1 training.label_smoothing 0.27789145590863934 +988 2 model.embedding_dim 0.0 +988 2 model.relation_dim 0.0 +988 2 model.dropout_0 0.22013012229570894 +988 2 model.dropout_1 0.4463300348009671 +988 2 model.dropout_2 0.4740517734937212 +988 2 optimizer.lr 0.00796955944917666 +988 2 training.batch_size 0.0 +988 2 training.label_smoothing 0.016535570992904647 +988 3 model.embedding_dim 1.0 +988 3 model.relation_dim 0.0 +988 3 model.dropout_0 0.3325592174660085 +988 3 model.dropout_1 0.31966773868339327 +988 3 model.dropout_2 0.1743636959671777 +988 3 optimizer.lr 0.030943702721915472 +988 3 training.batch_size 0.0 +988 3 training.label_smoothing 0.08510946458222704 +988 4 model.embedding_dim 0.0 +988 4 model.relation_dim 2.0 +988 4 model.dropout_0 0.12352480223084444 +988 4 model.dropout_1 0.4506300721320499 +988 4 model.dropout_2 0.26187324854204946 +988 4 optimizer.lr 0.0032191971640985664 +988 4 training.batch_size 1.0 +988 4 training.label_smoothing 0.0016589793391418165 +988 5 model.embedding_dim 0.0 +988 5 model.relation_dim 2.0 +988 5 model.dropout_0 0.1290177245100106 +988 5 model.dropout_1 0.3431496468745532 +988 5 model.dropout_2 0.18376184315760705 +988 5 optimizer.lr 0.01636500576930285 +988 5 training.batch_size 1.0 +988 5 training.label_smoothing 0.03307561470540049 +988 6 model.embedding_dim 2.0 +988 6 model.relation_dim 1.0 +988 6 model.dropout_0 0.4066068791161488 +988 6 model.dropout_1 0.49653076624180625 +988 6 model.dropout_2 0.3971567750866426 +988 6 optimizer.lr 0.0014145008598932863 +988 6 training.batch_size 1.0 +988 6 training.label_smoothing 0.0018900373923601528 +988 7 model.embedding_dim 2.0 +988 7 model.relation_dim 2.0 +988 7 model.dropout_0 0.11121663681161637 +988 7 model.dropout_1 0.10938550136154204 +988 7 model.dropout_2 0.498592831480434 +988 7 optimizer.lr 0.002597083174679281 +988 7 training.batch_size 2.0 +988 7 training.label_smoothing 0.010598652226851624 +988 8 model.embedding_dim 2.0 +988 8 model.relation_dim 1.0 +988 8 model.dropout_0 0.47043059168681595 +988 8 model.dropout_1 0.3072714867810588 +988 8 model.dropout_2 0.2086535303663274 +988 8 optimizer.lr 0.009048794065829638 +988 8 training.batch_size 0.0 +988 8 training.label_smoothing 0.15449418687828828 +988 9 model.embedding_dim 0.0 +988 9 model.relation_dim 1.0 +988 9 model.dropout_0 0.16818449858384896 +988 9 model.dropout_1 0.22109810851462913 +988 9 model.dropout_2 0.3151089674103895 +988 9 optimizer.lr 0.03730968454833588 +988 9 training.batch_size 1.0 +988 9 training.label_smoothing 0.042701309330389285 +988 10 model.embedding_dim 2.0 +988 10 model.relation_dim 1.0 +988 10 model.dropout_0 0.43540411617492675 +988 10 model.dropout_1 0.3129485640026341 +988 10 model.dropout_2 0.22875279525408188 +988 10 optimizer.lr 0.001160502407935446 +988 10 training.batch_size 0.0 +988 10 training.label_smoothing 0.0016912650757860634 +988 1 dataset """wn18rr""" +988 1 model """tucker""" +988 1 loss """softplus""" +988 1 regularizer """no""" +988 1 optimizer """adam""" +988 1 training_loop """lcwa""" +988 1 evaluator """rankbased""" +988 2 dataset """wn18rr""" +988 2 model """tucker""" +988 2 loss """softplus""" +988 2 regularizer """no""" +988 2 optimizer """adam""" +988 2 training_loop """lcwa""" +988 2 evaluator """rankbased""" +988 3 dataset """wn18rr""" +988 3 model """tucker""" +988 3 loss """softplus""" +988 3 regularizer """no""" +988 3 optimizer """adam""" +988 3 training_loop """lcwa""" +988 3 evaluator """rankbased""" +988 4 dataset """wn18rr""" +988 4 model """tucker""" +988 4 loss """softplus""" +988 4 regularizer """no""" +988 4 optimizer """adam""" +988 4 training_loop """lcwa""" +988 4 evaluator """rankbased""" +988 5 dataset """wn18rr""" +988 5 model """tucker""" +988 5 loss """softplus""" +988 5 regularizer """no""" +988 5 optimizer """adam""" +988 5 training_loop """lcwa""" +988 5 evaluator """rankbased""" +988 6 dataset """wn18rr""" +988 6 model """tucker""" +988 6 loss """softplus""" +988 6 regularizer """no""" +988 6 optimizer """adam""" +988 6 training_loop """lcwa""" +988 6 evaluator """rankbased""" +988 7 dataset """wn18rr""" +988 7 model """tucker""" +988 7 loss """softplus""" +988 7 regularizer """no""" +988 7 optimizer """adam""" +988 7 training_loop """lcwa""" +988 7 evaluator """rankbased""" +988 8 dataset """wn18rr""" +988 8 model """tucker""" +988 8 loss """softplus""" +988 8 regularizer """no""" +988 8 optimizer """adam""" +988 8 training_loop """lcwa""" +988 8 evaluator """rankbased""" +988 9 dataset """wn18rr""" +988 9 model """tucker""" +988 9 loss """softplus""" +988 9 regularizer """no""" +988 9 optimizer """adam""" +988 9 training_loop """lcwa""" +988 9 evaluator """rankbased""" +988 10 dataset """wn18rr""" +988 10 model """tucker""" +988 10 loss """softplus""" +988 10 regularizer """no""" +988 10 optimizer """adam""" +988 10 training_loop """lcwa""" +988 10 evaluator """rankbased""" +989 1 model.embedding_dim 1.0 +989 1 model.relation_dim 2.0 +989 1 model.dropout_0 0.13136766777665537 +989 1 model.dropout_1 0.16886017161957617 +989 1 model.dropout_2 0.4179973547873568 +989 1 optimizer.lr 0.08295516357701958 +989 1 training.batch_size 1.0 +989 1 training.label_smoothing 0.05197085452133192 +989 2 model.embedding_dim 1.0 +989 2 model.relation_dim 2.0 +989 2 model.dropout_0 0.10264275798304974 +989 2 model.dropout_1 0.3581754439815482 +989 2 model.dropout_2 0.19170003168275734 +989 2 optimizer.lr 0.06315845576479472 +989 2 training.batch_size 0.0 +989 2 training.label_smoothing 0.0013886732725927554 +989 3 model.embedding_dim 0.0 +989 3 model.relation_dim 1.0 +989 3 model.dropout_0 0.352653222932608 +989 3 model.dropout_1 0.3647541360715138 +989 3 model.dropout_2 0.2209625567538115 +989 3 optimizer.lr 0.0011880045255851725 +989 3 training.batch_size 0.0 +989 3 training.label_smoothing 0.0019668076661676368 +989 4 model.embedding_dim 0.0 +989 4 model.relation_dim 2.0 +989 4 model.dropout_0 0.2918102710488966 +989 4 model.dropout_1 0.26359513273991325 +989 4 model.dropout_2 0.22520960823477726 +989 4 optimizer.lr 0.0017530806136075749 +989 4 training.batch_size 0.0 +989 4 training.label_smoothing 0.0015129924736032032 +989 5 model.embedding_dim 0.0 +989 5 model.relation_dim 0.0 +989 5 model.dropout_0 0.39164904107113396 +989 5 model.dropout_1 0.30587001204218445 +989 5 model.dropout_2 0.49888872549095814 +989 5 optimizer.lr 0.003596572806280185 +989 5 training.batch_size 2.0 +989 5 training.label_smoothing 0.26330704520072623 +989 6 model.embedding_dim 0.0 +989 6 model.relation_dim 2.0 +989 6 model.dropout_0 0.4835902901307738 +989 6 model.dropout_1 0.28633922799280487 +989 6 model.dropout_2 0.4927537774006341 +989 6 optimizer.lr 0.0023667706942396933 +989 6 training.batch_size 0.0 +989 6 training.label_smoothing 0.0014694026637349885 +989 7 model.embedding_dim 0.0 +989 7 model.relation_dim 1.0 +989 7 model.dropout_0 0.11314003984984772 +989 7 model.dropout_1 0.255544638489303 +989 7 model.dropout_2 0.15257475256324582 +989 7 optimizer.lr 0.002820127484659549 +989 7 training.batch_size 2.0 +989 7 training.label_smoothing 0.08383764236895587 +989 8 model.embedding_dim 2.0 +989 8 model.relation_dim 1.0 +989 8 model.dropout_0 0.24602927405016192 +989 8 model.dropout_1 0.4047542743428477 +989 8 model.dropout_2 0.44460845588120734 +989 8 optimizer.lr 0.06080343198426993 +989 8 training.batch_size 2.0 +989 8 training.label_smoothing 0.002428322761349274 +989 9 model.embedding_dim 2.0 +989 9 model.relation_dim 1.0 +989 9 model.dropout_0 0.43356749702718633 +989 9 model.dropout_1 0.25195509243748326 +989 9 model.dropout_2 0.48770615877013834 +989 9 optimizer.lr 0.006870998868482153 +989 9 training.batch_size 0.0 +989 9 training.label_smoothing 0.005309928745461188 +989 1 dataset """wn18rr""" +989 1 model """tucker""" +989 1 loss """softplus""" +989 1 regularizer """no""" +989 1 optimizer """adam""" +989 1 training_loop """lcwa""" +989 1 evaluator """rankbased""" +989 2 dataset """wn18rr""" +989 2 model """tucker""" +989 2 loss """softplus""" +989 2 regularizer """no""" +989 2 optimizer """adam""" +989 2 training_loop """lcwa""" +989 2 evaluator """rankbased""" +989 3 dataset """wn18rr""" +989 3 model """tucker""" +989 3 loss """softplus""" +989 3 regularizer """no""" +989 3 optimizer """adam""" +989 3 training_loop """lcwa""" +989 3 evaluator """rankbased""" +989 4 dataset """wn18rr""" +989 4 model """tucker""" +989 4 loss """softplus""" +989 4 regularizer """no""" +989 4 optimizer """adam""" +989 4 training_loop """lcwa""" +989 4 evaluator """rankbased""" +989 5 dataset """wn18rr""" +989 5 model """tucker""" +989 5 loss """softplus""" +989 5 regularizer """no""" +989 5 optimizer """adam""" +989 5 training_loop """lcwa""" +989 5 evaluator """rankbased""" +989 6 dataset """wn18rr""" +989 6 model """tucker""" +989 6 loss """softplus""" +989 6 regularizer """no""" +989 6 optimizer """adam""" +989 6 training_loop """lcwa""" +989 6 evaluator """rankbased""" +989 7 dataset """wn18rr""" +989 7 model """tucker""" +989 7 loss """softplus""" +989 7 regularizer """no""" +989 7 optimizer """adam""" +989 7 training_loop """lcwa""" +989 7 evaluator """rankbased""" +989 8 dataset """wn18rr""" +989 8 model """tucker""" +989 8 loss """softplus""" +989 8 regularizer """no""" +989 8 optimizer """adam""" +989 8 training_loop """lcwa""" +989 8 evaluator """rankbased""" +989 9 dataset """wn18rr""" +989 9 model """tucker""" +989 9 loss """softplus""" +989 9 regularizer """no""" +989 9 optimizer """adam""" +989 9 training_loop """lcwa""" +989 9 evaluator """rankbased""" +990 1 model.embedding_dim 2.0 +990 1 model.relation_dim 1.0 +990 1 model.dropout_0 0.22910488183017633 +990 1 model.dropout_1 0.29103077787714476 +990 1 model.dropout_2 0.12112451512420624 +990 1 optimizer.lr 0.0018606000202216946 +990 1 training.batch_size 0.0 +990 1 training.label_smoothing 0.028062979726742647 +990 2 model.embedding_dim 2.0 +990 2 model.relation_dim 1.0 +990 2 model.dropout_0 0.49485729350522667 +990 2 model.dropout_1 0.4937255009585502 +990 2 model.dropout_2 0.27934270414271006 +990 2 optimizer.lr 0.0356178376748366 +990 2 training.batch_size 1.0 +990 2 training.label_smoothing 0.4169924008417257 +990 3 model.embedding_dim 0.0 +990 3 model.relation_dim 0.0 +990 3 model.dropout_0 0.2808049873656835 +990 3 model.dropout_1 0.37728808644231454 +990 3 model.dropout_2 0.4305826769807141 +990 3 optimizer.lr 0.0029769380819921797 +990 3 training.batch_size 1.0 +990 3 training.label_smoothing 0.2870281664259176 +990 4 model.embedding_dim 2.0 +990 4 model.relation_dim 2.0 +990 4 model.dropout_0 0.3460915603546916 +990 4 model.dropout_1 0.46347693717548216 +990 4 model.dropout_2 0.1018037653422669 +990 4 optimizer.lr 0.0011303558871675575 +990 4 training.batch_size 2.0 +990 4 training.label_smoothing 0.118886205134609 +990 1 dataset """wn18rr""" +990 1 model """tucker""" +990 1 loss """bceaftersigmoid""" +990 1 regularizer """no""" +990 1 optimizer """adam""" +990 1 training_loop """lcwa""" +990 1 evaluator """rankbased""" +990 2 dataset """wn18rr""" +990 2 model """tucker""" +990 2 loss """bceaftersigmoid""" +990 2 regularizer """no""" +990 2 optimizer """adam""" +990 2 training_loop """lcwa""" +990 2 evaluator """rankbased""" +990 3 dataset """wn18rr""" +990 3 model """tucker""" +990 3 loss """bceaftersigmoid""" +990 3 regularizer """no""" +990 3 optimizer """adam""" +990 3 training_loop """lcwa""" +990 3 evaluator """rankbased""" +990 4 dataset """wn18rr""" +990 4 model """tucker""" +990 4 loss """bceaftersigmoid""" +990 4 regularizer """no""" +990 4 optimizer """adam""" +990 4 training_loop """lcwa""" +990 4 evaluator """rankbased""" +991 1 model.embedding_dim 1.0 +991 1 model.relation_dim 0.0 +991 1 model.dropout_0 0.2008821294180502 +991 1 model.dropout_1 0.20591248882526628 +991 1 model.dropout_2 0.45610836629897844 +991 1 optimizer.lr 0.0156264092218112 +991 1 training.batch_size 0.0 +991 1 training.label_smoothing 0.4700125881451457 +991 2 model.embedding_dim 0.0 +991 2 model.relation_dim 1.0 +991 2 model.dropout_0 0.2063926074846641 +991 2 model.dropout_1 0.2075436824259386 +991 2 model.dropout_2 0.1740611975380269 +991 2 optimizer.lr 0.0010966599418987468 +991 2 training.batch_size 0.0 +991 2 training.label_smoothing 0.03503953582097937 +991 3 model.embedding_dim 0.0 +991 3 model.relation_dim 2.0 +991 3 model.dropout_0 0.4886868798802256 +991 3 model.dropout_1 0.4088221238968688 +991 3 model.dropout_2 0.2850331967920795 +991 3 optimizer.lr 0.004203060870595781 +991 3 training.batch_size 0.0 +991 3 training.label_smoothing 0.011544257825492094 +991 4 model.embedding_dim 2.0 +991 4 model.relation_dim 2.0 +991 4 model.dropout_0 0.42091986297507855 +991 4 model.dropout_1 0.4622803275267244 +991 4 model.dropout_2 0.19015554786645136 +991 4 optimizer.lr 0.042803444205414 +991 4 training.batch_size 1.0 +991 4 training.label_smoothing 0.2985466661404277 +991 5 model.embedding_dim 1.0 +991 5 model.relation_dim 2.0 +991 5 model.dropout_0 0.11624374317571413 +991 5 model.dropout_1 0.10991688702632524 +991 5 model.dropout_2 0.28579417054963174 +991 5 optimizer.lr 0.007664189714123859 +991 5 training.batch_size 0.0 +991 5 training.label_smoothing 0.01959619756798587 +991 6 model.embedding_dim 0.0 +991 6 model.relation_dim 0.0 +991 6 model.dropout_0 0.393001059618745 +991 6 model.dropout_1 0.2493396055130156 +991 6 model.dropout_2 0.16658740695708124 +991 6 optimizer.lr 0.03809040979573119 +991 6 training.batch_size 1.0 +991 6 training.label_smoothing 0.0010178240632993992 +991 7 model.embedding_dim 1.0 +991 7 model.relation_dim 1.0 +991 7 model.dropout_0 0.15233774503690697 +991 7 model.dropout_1 0.46582701758453815 +991 7 model.dropout_2 0.492368319878267 +991 7 optimizer.lr 0.07002742078868242 +991 7 training.batch_size 1.0 +991 7 training.label_smoothing 0.013082227682104127 +991 8 model.embedding_dim 0.0 +991 8 model.relation_dim 2.0 +991 8 model.dropout_0 0.23046956126254836 +991 8 model.dropout_1 0.38073541508096925 +991 8 model.dropout_2 0.4618406850575524 +991 8 optimizer.lr 0.008717893939789719 +991 8 training.batch_size 0.0 +991 8 training.label_smoothing 0.029490926541943248 +991 9 model.embedding_dim 2.0 +991 9 model.relation_dim 2.0 +991 9 model.dropout_0 0.3335997963671366 +991 9 model.dropout_1 0.19809662515190585 +991 9 model.dropout_2 0.4893332086630371 +991 9 optimizer.lr 0.010871083649600639 +991 9 training.batch_size 2.0 +991 9 training.label_smoothing 0.12754568586900736 +991 10 model.embedding_dim 2.0 +991 10 model.relation_dim 2.0 +991 10 model.dropout_0 0.49133957215658475 +991 10 model.dropout_1 0.48032943848866744 +991 10 model.dropout_2 0.21071849010140187 +991 10 optimizer.lr 0.024683068754221438 +991 10 training.batch_size 0.0 +991 10 training.label_smoothing 0.14240720980227317 +991 11 model.embedding_dim 1.0 +991 11 model.relation_dim 2.0 +991 11 model.dropout_0 0.11733416282947542 +991 11 model.dropout_1 0.16386822032689727 +991 11 model.dropout_2 0.2857754455592083 +991 11 optimizer.lr 0.05105989064775857 +991 11 training.batch_size 0.0 +991 11 training.label_smoothing 0.22478953559169224 +991 12 model.embedding_dim 0.0 +991 12 model.relation_dim 0.0 +991 12 model.dropout_0 0.2776193283977237 +991 12 model.dropout_1 0.13900460995109096 +991 12 model.dropout_2 0.42083662987126863 +991 12 optimizer.lr 0.0016959746569559892 +991 12 training.batch_size 1.0 +991 12 training.label_smoothing 0.17935069494369657 +991 13 model.embedding_dim 2.0 +991 13 model.relation_dim 2.0 +991 13 model.dropout_0 0.17439105348189338 +991 13 model.dropout_1 0.20078201895264766 +991 13 model.dropout_2 0.19161585417980276 +991 13 optimizer.lr 0.01313488636683393 +991 13 training.batch_size 2.0 +991 13 training.label_smoothing 0.1146934163504386 +991 14 model.embedding_dim 0.0 +991 14 model.relation_dim 2.0 +991 14 model.dropout_0 0.34371321195080096 +991 14 model.dropout_1 0.21934457738429988 +991 14 model.dropout_2 0.2979974412814578 +991 14 optimizer.lr 0.07856478222002142 +991 14 training.batch_size 2.0 +991 14 training.label_smoothing 0.10561917772517004 +991 15 model.embedding_dim 0.0 +991 15 model.relation_dim 2.0 +991 15 model.dropout_0 0.3333969696971871 +991 15 model.dropout_1 0.3661897446512289 +991 15 model.dropout_2 0.2708967199321446 +991 15 optimizer.lr 0.003973748070247437 +991 15 training.batch_size 1.0 +991 15 training.label_smoothing 0.006785315389455934 +991 16 model.embedding_dim 0.0 +991 16 model.relation_dim 1.0 +991 16 model.dropout_0 0.4163635266499308 +991 16 model.dropout_1 0.1916145488027861 +991 16 model.dropout_2 0.30899524487420016 +991 16 optimizer.lr 0.0024292610501338973 +991 16 training.batch_size 0.0 +991 16 training.label_smoothing 0.0028208509444391018 +991 17 model.embedding_dim 2.0 +991 17 model.relation_dim 1.0 +991 17 model.dropout_0 0.3854206678076263 +991 17 model.dropout_1 0.3289946238381426 +991 17 model.dropout_2 0.41086001063486877 +991 17 optimizer.lr 0.07223139988048653 +991 17 training.batch_size 2.0 +991 17 training.label_smoothing 0.005008581372681701 +991 1 dataset """wn18rr""" +991 1 model """tucker""" +991 1 loss """bceaftersigmoid""" +991 1 regularizer """no""" +991 1 optimizer """adam""" +991 1 training_loop """lcwa""" +991 1 evaluator """rankbased""" +991 2 dataset """wn18rr""" +991 2 model """tucker""" +991 2 loss """bceaftersigmoid""" +991 2 regularizer """no""" +991 2 optimizer """adam""" +991 2 training_loop """lcwa""" +991 2 evaluator """rankbased""" +991 3 dataset """wn18rr""" +991 3 model """tucker""" +991 3 loss """bceaftersigmoid""" +991 3 regularizer """no""" +991 3 optimizer """adam""" +991 3 training_loop """lcwa""" +991 3 evaluator """rankbased""" +991 4 dataset """wn18rr""" +991 4 model """tucker""" +991 4 loss """bceaftersigmoid""" +991 4 regularizer """no""" +991 4 optimizer """adam""" +991 4 training_loop """lcwa""" +991 4 evaluator """rankbased""" +991 5 dataset """wn18rr""" +991 5 model """tucker""" +991 5 loss """bceaftersigmoid""" +991 5 regularizer """no""" +991 5 optimizer """adam""" +991 5 training_loop """lcwa""" +991 5 evaluator """rankbased""" +991 6 dataset """wn18rr""" +991 6 model """tucker""" +991 6 loss """bceaftersigmoid""" +991 6 regularizer """no""" +991 6 optimizer """adam""" +991 6 training_loop """lcwa""" +991 6 evaluator """rankbased""" +991 7 dataset """wn18rr""" +991 7 model """tucker""" +991 7 loss """bceaftersigmoid""" +991 7 regularizer """no""" +991 7 optimizer """adam""" +991 7 training_loop """lcwa""" +991 7 evaluator """rankbased""" +991 8 dataset """wn18rr""" +991 8 model """tucker""" +991 8 loss """bceaftersigmoid""" +991 8 regularizer """no""" +991 8 optimizer """adam""" +991 8 training_loop """lcwa""" +991 8 evaluator """rankbased""" +991 9 dataset """wn18rr""" +991 9 model """tucker""" +991 9 loss """bceaftersigmoid""" +991 9 regularizer """no""" +991 9 optimizer """adam""" +991 9 training_loop """lcwa""" +991 9 evaluator """rankbased""" +991 10 dataset """wn18rr""" +991 10 model """tucker""" +991 10 loss """bceaftersigmoid""" +991 10 regularizer """no""" +991 10 optimizer """adam""" +991 10 training_loop """lcwa""" +991 10 evaluator """rankbased""" +991 11 dataset """wn18rr""" +991 11 model """tucker""" +991 11 loss """bceaftersigmoid""" +991 11 regularizer """no""" +991 11 optimizer """adam""" +991 11 training_loop """lcwa""" +991 11 evaluator """rankbased""" +991 12 dataset """wn18rr""" +991 12 model """tucker""" +991 12 loss """bceaftersigmoid""" +991 12 regularizer """no""" +991 12 optimizer """adam""" +991 12 training_loop """lcwa""" +991 12 evaluator """rankbased""" +991 13 dataset """wn18rr""" +991 13 model """tucker""" +991 13 loss """bceaftersigmoid""" +991 13 regularizer """no""" +991 13 optimizer """adam""" +991 13 training_loop """lcwa""" +991 13 evaluator """rankbased""" +991 14 dataset """wn18rr""" +991 14 model """tucker""" +991 14 loss """bceaftersigmoid""" +991 14 regularizer """no""" +991 14 optimizer """adam""" +991 14 training_loop """lcwa""" +991 14 evaluator """rankbased""" +991 15 dataset """wn18rr""" +991 15 model """tucker""" +991 15 loss """bceaftersigmoid""" +991 15 regularizer """no""" +991 15 optimizer """adam""" +991 15 training_loop """lcwa""" +991 15 evaluator """rankbased""" +991 16 dataset """wn18rr""" +991 16 model """tucker""" +991 16 loss """bceaftersigmoid""" +991 16 regularizer """no""" +991 16 optimizer """adam""" +991 16 training_loop """lcwa""" +991 16 evaluator """rankbased""" +991 17 dataset """wn18rr""" +991 17 model """tucker""" +991 17 loss """bceaftersigmoid""" +991 17 regularizer """no""" +991 17 optimizer """adam""" +991 17 training_loop """lcwa""" +991 17 evaluator """rankbased""" +992 1 model.embedding_dim 2.0 +992 1 model.scoring_fct_norm 1.0 +992 1 optimizer.lr 0.0353150596984259 +992 1 training.batch_size 2.0 +992 1 training.label_smoothing 0.7499758761925286 +992 2 model.embedding_dim 0.0 +992 2 model.scoring_fct_norm 2.0 +992 2 optimizer.lr 0.00207982883469127 +992 2 training.batch_size 0.0 +992 2 training.label_smoothing 0.0011220693207877929 +992 3 model.embedding_dim 2.0 +992 3 model.scoring_fct_norm 2.0 +992 3 optimizer.lr 0.002083597242468191 +992 3 training.batch_size 1.0 +992 3 training.label_smoothing 0.02038181502353733 +992 4 model.embedding_dim 0.0 +992 4 model.scoring_fct_norm 2.0 +992 4 optimizer.lr 0.009205850150595773 +992 4 training.batch_size 2.0 +992 4 training.label_smoothing 0.017897942046724683 +992 5 model.embedding_dim 1.0 +992 5 model.scoring_fct_norm 1.0 +992 5 optimizer.lr 0.0012068105341949825 +992 5 training.batch_size 1.0 +992 5 training.label_smoothing 0.11204968524483286 +992 6 model.embedding_dim 0.0 +992 6 model.scoring_fct_norm 1.0 +992 6 optimizer.lr 0.007796016382502257 +992 6 training.batch_size 0.0 +992 6 training.label_smoothing 0.010352957912778245 +992 7 model.embedding_dim 2.0 +992 7 model.scoring_fct_norm 1.0 +992 7 optimizer.lr 0.0014241041785307605 +992 7 training.batch_size 0.0 +992 7 training.label_smoothing 0.008575375609567922 +992 8 model.embedding_dim 1.0 +992 8 model.scoring_fct_norm 2.0 +992 8 optimizer.lr 0.012979363487133085 +992 8 training.batch_size 0.0 +992 8 training.label_smoothing 0.17490912576565049 +992 9 model.embedding_dim 1.0 +992 9 model.scoring_fct_norm 2.0 +992 9 optimizer.lr 0.011882575294559581 +992 9 training.batch_size 0.0 +992 9 training.label_smoothing 0.5460126860329405 +992 10 model.embedding_dim 2.0 +992 10 model.scoring_fct_norm 1.0 +992 10 optimizer.lr 0.006666214885730066 +992 10 training.batch_size 1.0 +992 10 training.label_smoothing 0.008994659014435174 +992 11 model.embedding_dim 1.0 +992 11 model.scoring_fct_norm 2.0 +992 11 optimizer.lr 0.018082517597093447 +992 11 training.batch_size 2.0 +992 11 training.label_smoothing 0.6724027988281513 +992 12 model.embedding_dim 2.0 +992 12 model.scoring_fct_norm 1.0 +992 12 optimizer.lr 0.024065653317131338 +992 12 training.batch_size 1.0 +992 12 training.label_smoothing 0.05124611875581385 +992 13 model.embedding_dim 0.0 +992 13 model.scoring_fct_norm 1.0 +992 13 optimizer.lr 0.002395868853267959 +992 13 training.batch_size 0.0 +992 13 training.label_smoothing 0.06752173684105153 +992 14 model.embedding_dim 0.0 +992 14 model.scoring_fct_norm 2.0 +992 14 optimizer.lr 0.015233090601219119 +992 14 training.batch_size 1.0 +992 14 training.label_smoothing 0.04688245946222432 +992 15 model.embedding_dim 0.0 +992 15 model.scoring_fct_norm 1.0 +992 15 optimizer.lr 0.0687250572786446 +992 15 training.batch_size 2.0 +992 15 training.label_smoothing 0.7408758476169168 +992 16 model.embedding_dim 2.0 +992 16 model.scoring_fct_norm 2.0 +992 16 optimizer.lr 0.027820391069219894 +992 16 training.batch_size 0.0 +992 16 training.label_smoothing 0.0025059443166392053 +992 17 model.embedding_dim 0.0 +992 17 model.scoring_fct_norm 1.0 +992 17 optimizer.lr 0.0033768220824745656 +992 17 training.batch_size 2.0 +992 17 training.label_smoothing 0.002216535127724696 +992 18 model.embedding_dim 2.0 +992 18 model.scoring_fct_norm 2.0 +992 18 optimizer.lr 0.017474716260041444 +992 18 training.batch_size 2.0 +992 18 training.label_smoothing 0.0019106665360605476 +992 19 model.embedding_dim 0.0 +992 19 model.scoring_fct_norm 1.0 +992 19 optimizer.lr 0.06252285825329777 +992 19 training.batch_size 2.0 +992 19 training.label_smoothing 0.7061878997041583 +992 20 model.embedding_dim 2.0 +992 20 model.scoring_fct_norm 1.0 +992 20 optimizer.lr 0.0064754362456125834 +992 20 training.batch_size 0.0 +992 20 training.label_smoothing 0.07662930298167539 +992 21 model.embedding_dim 1.0 +992 21 model.scoring_fct_norm 1.0 +992 21 optimizer.lr 0.0016047008927854074 +992 21 training.batch_size 0.0 +992 21 training.label_smoothing 0.0016943027968775333 +992 22 model.embedding_dim 2.0 +992 22 model.scoring_fct_norm 2.0 +992 22 optimizer.lr 0.0016559587688712877 +992 22 training.batch_size 1.0 +992 22 training.label_smoothing 0.012224980227832449 +992 1 dataset """fb15k237""" +992 1 model """unstructuredmodel""" +992 1 loss """bceaftersigmoid""" +992 1 regularizer """no""" +992 1 optimizer """adam""" +992 1 training_loop """lcwa""" +992 1 evaluator """rankbased""" +992 2 dataset """fb15k237""" +992 2 model """unstructuredmodel""" +992 2 loss """bceaftersigmoid""" +992 2 regularizer """no""" +992 2 optimizer """adam""" +992 2 training_loop """lcwa""" +992 2 evaluator """rankbased""" +992 3 dataset """fb15k237""" +992 3 model """unstructuredmodel""" +992 3 loss """bceaftersigmoid""" +992 3 regularizer """no""" +992 3 optimizer """adam""" +992 3 training_loop """lcwa""" +992 3 evaluator """rankbased""" +992 4 dataset """fb15k237""" +992 4 model """unstructuredmodel""" +992 4 loss """bceaftersigmoid""" +992 4 regularizer """no""" +992 4 optimizer """adam""" +992 4 training_loop """lcwa""" +992 4 evaluator """rankbased""" +992 5 dataset """fb15k237""" +992 5 model """unstructuredmodel""" +992 5 loss """bceaftersigmoid""" +992 5 regularizer """no""" +992 5 optimizer """adam""" +992 5 training_loop """lcwa""" +992 5 evaluator """rankbased""" +992 6 dataset """fb15k237""" +992 6 model """unstructuredmodel""" +992 6 loss """bceaftersigmoid""" +992 6 regularizer """no""" +992 6 optimizer """adam""" +992 6 training_loop """lcwa""" +992 6 evaluator """rankbased""" +992 7 dataset """fb15k237""" +992 7 model """unstructuredmodel""" +992 7 loss """bceaftersigmoid""" +992 7 regularizer """no""" +992 7 optimizer """adam""" +992 7 training_loop """lcwa""" +992 7 evaluator """rankbased""" +992 8 dataset """fb15k237""" +992 8 model """unstructuredmodel""" +992 8 loss """bceaftersigmoid""" +992 8 regularizer """no""" +992 8 optimizer """adam""" +992 8 training_loop """lcwa""" +992 8 evaluator """rankbased""" +992 9 dataset """fb15k237""" +992 9 model """unstructuredmodel""" +992 9 loss """bceaftersigmoid""" +992 9 regularizer """no""" +992 9 optimizer """adam""" +992 9 training_loop """lcwa""" +992 9 evaluator """rankbased""" +992 10 dataset """fb15k237""" +992 10 model """unstructuredmodel""" +992 10 loss """bceaftersigmoid""" +992 10 regularizer """no""" +992 10 optimizer """adam""" +992 10 training_loop """lcwa""" +992 10 evaluator """rankbased""" +992 11 dataset """fb15k237""" +992 11 model """unstructuredmodel""" +992 11 loss """bceaftersigmoid""" +992 11 regularizer """no""" +992 11 optimizer """adam""" +992 11 training_loop """lcwa""" +992 11 evaluator """rankbased""" +992 12 dataset """fb15k237""" +992 12 model """unstructuredmodel""" +992 12 loss """bceaftersigmoid""" +992 12 regularizer """no""" +992 12 optimizer """adam""" +992 12 training_loop """lcwa""" +992 12 evaluator """rankbased""" +992 13 dataset """fb15k237""" +992 13 model """unstructuredmodel""" +992 13 loss """bceaftersigmoid""" +992 13 regularizer """no""" +992 13 optimizer """adam""" +992 13 training_loop """lcwa""" +992 13 evaluator """rankbased""" +992 14 dataset """fb15k237""" +992 14 model """unstructuredmodel""" +992 14 loss """bceaftersigmoid""" +992 14 regularizer """no""" +992 14 optimizer """adam""" +992 14 training_loop """lcwa""" +992 14 evaluator """rankbased""" +992 15 dataset """fb15k237""" +992 15 model """unstructuredmodel""" +992 15 loss """bceaftersigmoid""" +992 15 regularizer """no""" +992 15 optimizer """adam""" +992 15 training_loop """lcwa""" +992 15 evaluator """rankbased""" +992 16 dataset """fb15k237""" +992 16 model """unstructuredmodel""" +992 16 loss """bceaftersigmoid""" +992 16 regularizer """no""" +992 16 optimizer """adam""" +992 16 training_loop """lcwa""" +992 16 evaluator """rankbased""" +992 17 dataset """fb15k237""" +992 17 model """unstructuredmodel""" +992 17 loss """bceaftersigmoid""" +992 17 regularizer """no""" +992 17 optimizer """adam""" +992 17 training_loop """lcwa""" +992 17 evaluator """rankbased""" +992 18 dataset """fb15k237""" +992 18 model """unstructuredmodel""" +992 18 loss """bceaftersigmoid""" +992 18 regularizer """no""" +992 18 optimizer """adam""" +992 18 training_loop """lcwa""" +992 18 evaluator """rankbased""" +992 19 dataset """fb15k237""" +992 19 model """unstructuredmodel""" +992 19 loss """bceaftersigmoid""" +992 19 regularizer """no""" +992 19 optimizer """adam""" +992 19 training_loop """lcwa""" +992 19 evaluator """rankbased""" +992 20 dataset """fb15k237""" +992 20 model """unstructuredmodel""" +992 20 loss """bceaftersigmoid""" +992 20 regularizer """no""" +992 20 optimizer """adam""" +992 20 training_loop """lcwa""" +992 20 evaluator """rankbased""" +992 21 dataset """fb15k237""" +992 21 model """unstructuredmodel""" +992 21 loss """bceaftersigmoid""" +992 21 regularizer """no""" +992 21 optimizer """adam""" +992 21 training_loop """lcwa""" +992 21 evaluator """rankbased""" +992 22 dataset """fb15k237""" +992 22 model """unstructuredmodel""" +992 22 loss """bceaftersigmoid""" +992 22 regularizer """no""" +992 22 optimizer """adam""" +992 22 training_loop """lcwa""" +992 22 evaluator """rankbased""" +993 1 model.embedding_dim 0.0 +993 1 model.scoring_fct_norm 2.0 +993 1 optimizer.lr 0.0027180883155425825 +993 1 training.batch_size 1.0 +993 1 training.label_smoothing 0.10916639054874169 +993 2 model.embedding_dim 2.0 +993 2 model.scoring_fct_norm 1.0 +993 2 optimizer.lr 0.009151806270644146 +993 2 training.batch_size 1.0 +993 2 training.label_smoothing 0.16730946362650087 +993 3 model.embedding_dim 2.0 +993 3 model.scoring_fct_norm 1.0 +993 3 optimizer.lr 0.013096166988964193 +993 3 training.batch_size 1.0 +993 3 training.label_smoothing 0.06615251408069014 +993 4 model.embedding_dim 0.0 +993 4 model.scoring_fct_norm 2.0 +993 4 optimizer.lr 0.0025871843013223093 +993 4 training.batch_size 2.0 +993 4 training.label_smoothing 0.002259927219015664 +993 5 model.embedding_dim 2.0 +993 5 model.scoring_fct_norm 1.0 +993 5 optimizer.lr 0.011406656641680795 +993 5 training.batch_size 1.0 +993 5 training.label_smoothing 0.0032778804363988766 +993 6 model.embedding_dim 0.0 +993 6 model.scoring_fct_norm 2.0 +993 6 optimizer.lr 0.0026023298798389916 +993 6 training.batch_size 2.0 +993 6 training.label_smoothing 0.33510057498790874 +993 7 model.embedding_dim 2.0 +993 7 model.scoring_fct_norm 2.0 +993 7 optimizer.lr 0.032723530460045315 +993 7 training.batch_size 0.0 +993 7 training.label_smoothing 0.2714148780389219 +993 8 model.embedding_dim 2.0 +993 8 model.scoring_fct_norm 2.0 +993 8 optimizer.lr 0.017562165629449174 +993 8 training.batch_size 0.0 +993 8 training.label_smoothing 0.029221962616471787 +993 9 model.embedding_dim 0.0 +993 9 model.scoring_fct_norm 2.0 +993 9 optimizer.lr 0.001190749532120861 +993 9 training.batch_size 2.0 +993 9 training.label_smoothing 0.6063953209201457 +993 10 model.embedding_dim 1.0 +993 10 model.scoring_fct_norm 1.0 +993 10 optimizer.lr 0.05564747600890148 +993 10 training.batch_size 1.0 +993 10 training.label_smoothing 0.010609364609923858 +993 11 model.embedding_dim 1.0 +993 11 model.scoring_fct_norm 2.0 +993 11 optimizer.lr 0.07544878385238764 +993 11 training.batch_size 1.0 +993 11 training.label_smoothing 0.017239350199005778 +993 12 model.embedding_dim 2.0 +993 12 model.scoring_fct_norm 1.0 +993 12 optimizer.lr 0.07386434524203954 +993 12 training.batch_size 2.0 +993 12 training.label_smoothing 0.19449727889242455 +993 13 model.embedding_dim 0.0 +993 13 model.scoring_fct_norm 2.0 +993 13 optimizer.lr 0.0017059407096558432 +993 13 training.batch_size 0.0 +993 13 training.label_smoothing 0.42777823844012197 +993 14 model.embedding_dim 0.0 +993 14 model.scoring_fct_norm 1.0 +993 14 optimizer.lr 0.008330906006075493 +993 14 training.batch_size 1.0 +993 14 training.label_smoothing 0.011584969099680294 +993 15 model.embedding_dim 2.0 +993 15 model.scoring_fct_norm 1.0 +993 15 optimizer.lr 0.002014503505756636 +993 15 training.batch_size 1.0 +993 15 training.label_smoothing 0.36188493922240655 +993 16 model.embedding_dim 0.0 +993 16 model.scoring_fct_norm 2.0 +993 16 optimizer.lr 0.029374814537503725 +993 16 training.batch_size 0.0 +993 16 training.label_smoothing 0.0011662686739174301 +993 17 model.embedding_dim 0.0 +993 17 model.scoring_fct_norm 2.0 +993 17 optimizer.lr 0.007351272368172753 +993 17 training.batch_size 0.0 +993 17 training.label_smoothing 0.0027727227961558847 +993 18 model.embedding_dim 2.0 +993 18 model.scoring_fct_norm 1.0 +993 18 optimizer.lr 0.04942937007435632 +993 18 training.batch_size 2.0 +993 18 training.label_smoothing 0.019737470416468523 +993 19 model.embedding_dim 0.0 +993 19 model.scoring_fct_norm 2.0 +993 19 optimizer.lr 0.0010182210580502009 +993 19 training.batch_size 1.0 +993 19 training.label_smoothing 0.004898917212770493 +993 20 model.embedding_dim 2.0 +993 20 model.scoring_fct_norm 1.0 +993 20 optimizer.lr 0.0014379806832594418 +993 20 training.batch_size 0.0 +993 20 training.label_smoothing 0.08223436033753631 +993 1 dataset """fb15k237""" +993 1 model """unstructuredmodel""" +993 1 loss """softplus""" +993 1 regularizer """no""" +993 1 optimizer """adam""" +993 1 training_loop """lcwa""" +993 1 evaluator """rankbased""" +993 2 dataset """fb15k237""" +993 2 model """unstructuredmodel""" +993 2 loss """softplus""" +993 2 regularizer """no""" +993 2 optimizer """adam""" +993 2 training_loop """lcwa""" +993 2 evaluator """rankbased""" +993 3 dataset """fb15k237""" +993 3 model """unstructuredmodel""" +993 3 loss """softplus""" +993 3 regularizer """no""" +993 3 optimizer """adam""" +993 3 training_loop """lcwa""" +993 3 evaluator """rankbased""" +993 4 dataset """fb15k237""" +993 4 model """unstructuredmodel""" +993 4 loss """softplus""" +993 4 regularizer """no""" +993 4 optimizer """adam""" +993 4 training_loop """lcwa""" +993 4 evaluator """rankbased""" +993 5 dataset """fb15k237""" +993 5 model """unstructuredmodel""" +993 5 loss """softplus""" +993 5 regularizer """no""" +993 5 optimizer """adam""" +993 5 training_loop """lcwa""" +993 5 evaluator """rankbased""" +993 6 dataset """fb15k237""" +993 6 model """unstructuredmodel""" +993 6 loss """softplus""" +993 6 regularizer """no""" +993 6 optimizer """adam""" +993 6 training_loop """lcwa""" +993 6 evaluator """rankbased""" +993 7 dataset """fb15k237""" +993 7 model """unstructuredmodel""" +993 7 loss """softplus""" +993 7 regularizer """no""" +993 7 optimizer """adam""" +993 7 training_loop """lcwa""" +993 7 evaluator """rankbased""" +993 8 dataset """fb15k237""" +993 8 model """unstructuredmodel""" +993 8 loss """softplus""" +993 8 regularizer """no""" +993 8 optimizer """adam""" +993 8 training_loop """lcwa""" +993 8 evaluator """rankbased""" +993 9 dataset """fb15k237""" +993 9 model """unstructuredmodel""" +993 9 loss """softplus""" +993 9 regularizer """no""" +993 9 optimizer """adam""" +993 9 training_loop """lcwa""" +993 9 evaluator """rankbased""" +993 10 dataset """fb15k237""" +993 10 model """unstructuredmodel""" +993 10 loss """softplus""" +993 10 regularizer """no""" +993 10 optimizer """adam""" +993 10 training_loop """lcwa""" +993 10 evaluator """rankbased""" +993 11 dataset """fb15k237""" +993 11 model """unstructuredmodel""" +993 11 loss """softplus""" +993 11 regularizer """no""" +993 11 optimizer """adam""" +993 11 training_loop """lcwa""" +993 11 evaluator """rankbased""" +993 12 dataset """fb15k237""" +993 12 model """unstructuredmodel""" +993 12 loss """softplus""" +993 12 regularizer """no""" +993 12 optimizer """adam""" +993 12 training_loop """lcwa""" +993 12 evaluator """rankbased""" +993 13 dataset """fb15k237""" +993 13 model """unstructuredmodel""" +993 13 loss """softplus""" +993 13 regularizer """no""" +993 13 optimizer """adam""" +993 13 training_loop """lcwa""" +993 13 evaluator """rankbased""" +993 14 dataset """fb15k237""" +993 14 model """unstructuredmodel""" +993 14 loss """softplus""" +993 14 regularizer """no""" +993 14 optimizer """adam""" +993 14 training_loop """lcwa""" +993 14 evaluator """rankbased""" +993 15 dataset """fb15k237""" +993 15 model """unstructuredmodel""" +993 15 loss """softplus""" +993 15 regularizer """no""" +993 15 optimizer """adam""" +993 15 training_loop """lcwa""" +993 15 evaluator """rankbased""" +993 16 dataset """fb15k237""" +993 16 model """unstructuredmodel""" +993 16 loss """softplus""" +993 16 regularizer """no""" +993 16 optimizer """adam""" +993 16 training_loop """lcwa""" +993 16 evaluator """rankbased""" +993 17 dataset """fb15k237""" +993 17 model """unstructuredmodel""" +993 17 loss """softplus""" +993 17 regularizer """no""" +993 17 optimizer """adam""" +993 17 training_loop """lcwa""" +993 17 evaluator """rankbased""" +993 18 dataset """fb15k237""" +993 18 model """unstructuredmodel""" +993 18 loss """softplus""" +993 18 regularizer """no""" +993 18 optimizer """adam""" +993 18 training_loop """lcwa""" +993 18 evaluator """rankbased""" +993 19 dataset """fb15k237""" +993 19 model """unstructuredmodel""" +993 19 loss """softplus""" +993 19 regularizer """no""" +993 19 optimizer """adam""" +993 19 training_loop """lcwa""" +993 19 evaluator """rankbased""" +993 20 dataset """fb15k237""" +993 20 model """unstructuredmodel""" +993 20 loss """softplus""" +993 20 regularizer """no""" +993 20 optimizer """adam""" +993 20 training_loop """lcwa""" +993 20 evaluator """rankbased""" +994 1 model.embedding_dim 1.0 +994 1 model.scoring_fct_norm 1.0 +994 1 optimizer.lr 0.04956230684387593 +994 1 training.batch_size 2.0 +994 1 training.label_smoothing 0.22333793203866148 +994 2 model.embedding_dim 2.0 +994 2 model.scoring_fct_norm 1.0 +994 2 optimizer.lr 0.09777146211168491 +994 2 training.batch_size 1.0 +994 2 training.label_smoothing 0.06374899818783444 +994 3 model.embedding_dim 2.0 +994 3 model.scoring_fct_norm 1.0 +994 3 optimizer.lr 0.0030705264763995744 +994 3 training.batch_size 2.0 +994 3 training.label_smoothing 0.18346639384347474 +994 4 model.embedding_dim 2.0 +994 4 model.scoring_fct_norm 1.0 +994 4 optimizer.lr 0.0027692650900358476 +994 4 training.batch_size 1.0 +994 4 training.label_smoothing 0.01931907328996779 +994 5 model.embedding_dim 2.0 +994 5 model.scoring_fct_norm 1.0 +994 5 optimizer.lr 0.0011975109459946287 +994 5 training.batch_size 0.0 +994 5 training.label_smoothing 0.26424990355836553 +994 6 model.embedding_dim 0.0 +994 6 model.scoring_fct_norm 1.0 +994 6 optimizer.lr 0.0010628032292446775 +994 6 training.batch_size 0.0 +994 6 training.label_smoothing 0.021181070024395336 +994 7 model.embedding_dim 2.0 +994 7 model.scoring_fct_norm 2.0 +994 7 optimizer.lr 0.0013286140983305922 +994 7 training.batch_size 0.0 +994 7 training.label_smoothing 0.010986556502394908 +994 8 model.embedding_dim 2.0 +994 8 model.scoring_fct_norm 1.0 +994 8 optimizer.lr 0.042200393837061534 +994 8 training.batch_size 0.0 +994 8 training.label_smoothing 0.29787717263899116 +994 9 model.embedding_dim 2.0 +994 9 model.scoring_fct_norm 1.0 +994 9 optimizer.lr 0.0018616591682488251 +994 9 training.batch_size 2.0 +994 9 training.label_smoothing 0.0017378081877437923 +994 10 model.embedding_dim 0.0 +994 10 model.scoring_fct_norm 1.0 +994 10 optimizer.lr 0.004979193144910875 +994 10 training.batch_size 0.0 +994 10 training.label_smoothing 0.0011525069515140624 +994 11 model.embedding_dim 2.0 +994 11 model.scoring_fct_norm 2.0 +994 11 optimizer.lr 0.0034315683127299243 +994 11 training.batch_size 2.0 +994 11 training.label_smoothing 0.0014386654557167597 +994 12 model.embedding_dim 0.0 +994 12 model.scoring_fct_norm 1.0 +994 12 optimizer.lr 0.016314019493889904 +994 12 training.batch_size 1.0 +994 12 training.label_smoothing 0.2030533236318551 +994 13 model.embedding_dim 0.0 +994 13 model.scoring_fct_norm 2.0 +994 13 optimizer.lr 0.05830504496541641 +994 13 training.batch_size 1.0 +994 13 training.label_smoothing 0.10904307461778322 +994 14 model.embedding_dim 1.0 +994 14 model.scoring_fct_norm 2.0 +994 14 optimizer.lr 0.00455732061628773 +994 14 training.batch_size 2.0 +994 14 training.label_smoothing 0.4320729249168824 +994 15 model.embedding_dim 0.0 +994 15 model.scoring_fct_norm 2.0 +994 15 optimizer.lr 0.0024684996827391086 +994 15 training.batch_size 1.0 +994 15 training.label_smoothing 0.005469477166605616 +994 16 model.embedding_dim 0.0 +994 16 model.scoring_fct_norm 1.0 +994 16 optimizer.lr 0.052495322817708946 +994 16 training.batch_size 1.0 +994 16 training.label_smoothing 0.002966922237842685 +994 17 model.embedding_dim 0.0 +994 17 model.scoring_fct_norm 1.0 +994 17 optimizer.lr 0.09183420671506541 +994 17 training.batch_size 2.0 +994 17 training.label_smoothing 0.0025604888535247588 +994 18 model.embedding_dim 1.0 +994 18 model.scoring_fct_norm 2.0 +994 18 optimizer.lr 0.023686024782003472 +994 18 training.batch_size 0.0 +994 18 training.label_smoothing 0.001601901976974391 +994 19 model.embedding_dim 0.0 +994 19 model.scoring_fct_norm 1.0 +994 19 optimizer.lr 0.002678251596423231 +994 19 training.batch_size 2.0 +994 19 training.label_smoothing 0.04021914938172937 +994 20 model.embedding_dim 1.0 +994 20 model.scoring_fct_norm 1.0 +994 20 optimizer.lr 0.00457430404345947 +994 20 training.batch_size 0.0 +994 20 training.label_smoothing 0.005805547164639313 +994 21 model.embedding_dim 1.0 +994 21 model.scoring_fct_norm 1.0 +994 21 optimizer.lr 0.0026462198437281455 +994 21 training.batch_size 1.0 +994 21 training.label_smoothing 0.005359834231180314 +994 22 model.embedding_dim 0.0 +994 22 model.scoring_fct_norm 2.0 +994 22 optimizer.lr 0.09927393831105029 +994 22 training.batch_size 2.0 +994 22 training.label_smoothing 0.6853455599736737 +994 23 model.embedding_dim 2.0 +994 23 model.scoring_fct_norm 1.0 +994 23 optimizer.lr 0.06541128652514436 +994 23 training.batch_size 0.0 +994 23 training.label_smoothing 0.13098999283896093 +994 24 model.embedding_dim 0.0 +994 24 model.scoring_fct_norm 2.0 +994 24 optimizer.lr 0.010569198357515023 +994 24 training.batch_size 1.0 +994 24 training.label_smoothing 0.009549032735571289 +994 25 model.embedding_dim 1.0 +994 25 model.scoring_fct_norm 2.0 +994 25 optimizer.lr 0.0020946669871166556 +994 25 training.batch_size 2.0 +994 25 training.label_smoothing 0.051217823105736214 +994 26 model.embedding_dim 1.0 +994 26 model.scoring_fct_norm 2.0 +994 26 optimizer.lr 0.0011486012674516675 +994 26 training.batch_size 1.0 +994 26 training.label_smoothing 0.0687598780068834 +994 27 model.embedding_dim 0.0 +994 27 model.scoring_fct_norm 1.0 +994 27 optimizer.lr 0.002353939427561364 +994 27 training.batch_size 0.0 +994 27 training.label_smoothing 0.005756505298907252 +994 28 model.embedding_dim 1.0 +994 28 model.scoring_fct_norm 2.0 +994 28 optimizer.lr 0.0015571485914295973 +994 28 training.batch_size 1.0 +994 28 training.label_smoothing 0.014591373528940105 +994 29 model.embedding_dim 1.0 +994 29 model.scoring_fct_norm 2.0 +994 29 optimizer.lr 0.007437388734791972 +994 29 training.batch_size 0.0 +994 29 training.label_smoothing 0.2823214935597654 +994 30 model.embedding_dim 0.0 +994 30 model.scoring_fct_norm 2.0 +994 30 optimizer.lr 0.08151048717948549 +994 30 training.batch_size 2.0 +994 30 training.label_smoothing 0.6691290588240002 +994 31 model.embedding_dim 2.0 +994 31 model.scoring_fct_norm 1.0 +994 31 optimizer.lr 0.002035008233719746 +994 31 training.batch_size 1.0 +994 31 training.label_smoothing 0.7073449171552173 +994 32 model.embedding_dim 1.0 +994 32 model.scoring_fct_norm 2.0 +994 32 optimizer.lr 0.0021887399674701984 +994 32 training.batch_size 1.0 +994 32 training.label_smoothing 0.0022725538671665092 +994 33 model.embedding_dim 0.0 +994 33 model.scoring_fct_norm 1.0 +994 33 optimizer.lr 0.027840746050511393 +994 33 training.batch_size 2.0 +994 33 training.label_smoothing 0.0012250001258692054 +994 34 model.embedding_dim 2.0 +994 34 model.scoring_fct_norm 1.0 +994 34 optimizer.lr 0.011288030222271967 +994 34 training.batch_size 0.0 +994 34 training.label_smoothing 0.8083090537647502 +994 35 model.embedding_dim 0.0 +994 35 model.scoring_fct_norm 1.0 +994 35 optimizer.lr 0.006036354403818991 +994 35 training.batch_size 0.0 +994 35 training.label_smoothing 0.0012147100879162445 +994 36 model.embedding_dim 0.0 +994 36 model.scoring_fct_norm 2.0 +994 36 optimizer.lr 0.05499125049804451 +994 36 training.batch_size 2.0 +994 36 training.label_smoothing 0.31314239125148025 +994 37 model.embedding_dim 0.0 +994 37 model.scoring_fct_norm 2.0 +994 37 optimizer.lr 0.005112867578535092 +994 37 training.batch_size 1.0 +994 37 training.label_smoothing 0.008789374461883544 +994 38 model.embedding_dim 2.0 +994 38 model.scoring_fct_norm 2.0 +994 38 optimizer.lr 0.048676436762756285 +994 38 training.batch_size 1.0 +994 38 training.label_smoothing 0.052800759338556054 +994 39 model.embedding_dim 1.0 +994 39 model.scoring_fct_norm 1.0 +994 39 optimizer.lr 0.004202602992667864 +994 39 training.batch_size 2.0 +994 39 training.label_smoothing 0.01643504072658827 +994 40 model.embedding_dim 2.0 +994 40 model.scoring_fct_norm 2.0 +994 40 optimizer.lr 0.001836557225317173 +994 40 training.batch_size 1.0 +994 40 training.label_smoothing 0.00437803509181198 +994 1 dataset """fb15k237""" +994 1 model """unstructuredmodel""" +994 1 loss """bceaftersigmoid""" +994 1 regularizer """no""" +994 1 optimizer """adam""" +994 1 training_loop """lcwa""" +994 1 evaluator """rankbased""" +994 2 dataset """fb15k237""" +994 2 model """unstructuredmodel""" +994 2 loss """bceaftersigmoid""" +994 2 regularizer """no""" +994 2 optimizer """adam""" +994 2 training_loop """lcwa""" +994 2 evaluator """rankbased""" +994 3 dataset """fb15k237""" +994 3 model """unstructuredmodel""" +994 3 loss """bceaftersigmoid""" +994 3 regularizer """no""" +994 3 optimizer """adam""" +994 3 training_loop """lcwa""" +994 3 evaluator """rankbased""" +994 4 dataset """fb15k237""" +994 4 model """unstructuredmodel""" +994 4 loss """bceaftersigmoid""" +994 4 regularizer """no""" +994 4 optimizer """adam""" +994 4 training_loop """lcwa""" +994 4 evaluator """rankbased""" +994 5 dataset """fb15k237""" +994 5 model """unstructuredmodel""" +994 5 loss """bceaftersigmoid""" +994 5 regularizer """no""" +994 5 optimizer """adam""" +994 5 training_loop """lcwa""" +994 5 evaluator """rankbased""" +994 6 dataset """fb15k237""" +994 6 model """unstructuredmodel""" +994 6 loss """bceaftersigmoid""" +994 6 regularizer """no""" +994 6 optimizer """adam""" +994 6 training_loop """lcwa""" +994 6 evaluator """rankbased""" +994 7 dataset """fb15k237""" +994 7 model """unstructuredmodel""" +994 7 loss """bceaftersigmoid""" +994 7 regularizer """no""" +994 7 optimizer """adam""" +994 7 training_loop """lcwa""" +994 7 evaluator """rankbased""" +994 8 dataset """fb15k237""" +994 8 model """unstructuredmodel""" +994 8 loss """bceaftersigmoid""" +994 8 regularizer """no""" +994 8 optimizer """adam""" +994 8 training_loop """lcwa""" +994 8 evaluator """rankbased""" +994 9 dataset """fb15k237""" +994 9 model """unstructuredmodel""" +994 9 loss """bceaftersigmoid""" +994 9 regularizer """no""" +994 9 optimizer """adam""" +994 9 training_loop """lcwa""" +994 9 evaluator """rankbased""" +994 10 dataset """fb15k237""" +994 10 model """unstructuredmodel""" +994 10 loss """bceaftersigmoid""" +994 10 regularizer """no""" +994 10 optimizer """adam""" +994 10 training_loop """lcwa""" +994 10 evaluator """rankbased""" +994 11 dataset """fb15k237""" +994 11 model """unstructuredmodel""" +994 11 loss """bceaftersigmoid""" +994 11 regularizer """no""" +994 11 optimizer """adam""" +994 11 training_loop """lcwa""" +994 11 evaluator """rankbased""" +994 12 dataset """fb15k237""" +994 12 model """unstructuredmodel""" +994 12 loss """bceaftersigmoid""" +994 12 regularizer """no""" +994 12 optimizer """adam""" +994 12 training_loop """lcwa""" +994 12 evaluator """rankbased""" +994 13 dataset """fb15k237""" +994 13 model """unstructuredmodel""" +994 13 loss """bceaftersigmoid""" +994 13 regularizer """no""" +994 13 optimizer """adam""" +994 13 training_loop """lcwa""" +994 13 evaluator """rankbased""" +994 14 dataset """fb15k237""" +994 14 model """unstructuredmodel""" +994 14 loss """bceaftersigmoid""" +994 14 regularizer """no""" +994 14 optimizer """adam""" +994 14 training_loop """lcwa""" +994 14 evaluator """rankbased""" +994 15 dataset """fb15k237""" +994 15 model """unstructuredmodel""" +994 15 loss """bceaftersigmoid""" +994 15 regularizer """no""" +994 15 optimizer """adam""" +994 15 training_loop """lcwa""" +994 15 evaluator """rankbased""" +994 16 dataset """fb15k237""" +994 16 model """unstructuredmodel""" +994 16 loss """bceaftersigmoid""" +994 16 regularizer """no""" +994 16 optimizer """adam""" +994 16 training_loop """lcwa""" +994 16 evaluator """rankbased""" +994 17 dataset """fb15k237""" +994 17 model """unstructuredmodel""" +994 17 loss """bceaftersigmoid""" +994 17 regularizer """no""" +994 17 optimizer """adam""" +994 17 training_loop """lcwa""" +994 17 evaluator """rankbased""" +994 18 dataset """fb15k237""" +994 18 model """unstructuredmodel""" +994 18 loss """bceaftersigmoid""" +994 18 regularizer """no""" +994 18 optimizer """adam""" +994 18 training_loop """lcwa""" +994 18 evaluator """rankbased""" +994 19 dataset """fb15k237""" +994 19 model """unstructuredmodel""" +994 19 loss """bceaftersigmoid""" +994 19 regularizer """no""" +994 19 optimizer """adam""" +994 19 training_loop """lcwa""" +994 19 evaluator """rankbased""" +994 20 dataset """fb15k237""" +994 20 model """unstructuredmodel""" +994 20 loss """bceaftersigmoid""" +994 20 regularizer """no""" +994 20 optimizer """adam""" +994 20 training_loop """lcwa""" +994 20 evaluator """rankbased""" +994 21 dataset """fb15k237""" +994 21 model """unstructuredmodel""" +994 21 loss """bceaftersigmoid""" +994 21 regularizer """no""" +994 21 optimizer """adam""" +994 21 training_loop """lcwa""" +994 21 evaluator """rankbased""" +994 22 dataset """fb15k237""" +994 22 model """unstructuredmodel""" +994 22 loss """bceaftersigmoid""" +994 22 regularizer """no""" +994 22 optimizer """adam""" +994 22 training_loop """lcwa""" +994 22 evaluator """rankbased""" +994 23 dataset """fb15k237""" +994 23 model """unstructuredmodel""" +994 23 loss """bceaftersigmoid""" +994 23 regularizer """no""" +994 23 optimizer """adam""" +994 23 training_loop """lcwa""" +994 23 evaluator """rankbased""" +994 24 dataset """fb15k237""" +994 24 model """unstructuredmodel""" +994 24 loss """bceaftersigmoid""" +994 24 regularizer """no""" +994 24 optimizer """adam""" +994 24 training_loop """lcwa""" +994 24 evaluator """rankbased""" +994 25 dataset """fb15k237""" +994 25 model """unstructuredmodel""" +994 25 loss """bceaftersigmoid""" +994 25 regularizer """no""" +994 25 optimizer """adam""" +994 25 training_loop """lcwa""" +994 25 evaluator """rankbased""" +994 26 dataset """fb15k237""" +994 26 model """unstructuredmodel""" +994 26 loss """bceaftersigmoid""" +994 26 regularizer """no""" +994 26 optimizer """adam""" +994 26 training_loop """lcwa""" +994 26 evaluator """rankbased""" +994 27 dataset """fb15k237""" +994 27 model """unstructuredmodel""" +994 27 loss """bceaftersigmoid""" +994 27 regularizer """no""" +994 27 optimizer """adam""" +994 27 training_loop """lcwa""" +994 27 evaluator """rankbased""" +994 28 dataset """fb15k237""" +994 28 model """unstructuredmodel""" +994 28 loss """bceaftersigmoid""" +994 28 regularizer """no""" +994 28 optimizer """adam""" +994 28 training_loop """lcwa""" +994 28 evaluator """rankbased""" +994 29 dataset """fb15k237""" +994 29 model """unstructuredmodel""" +994 29 loss """bceaftersigmoid""" +994 29 regularizer """no""" +994 29 optimizer """adam""" +994 29 training_loop """lcwa""" +994 29 evaluator """rankbased""" +994 30 dataset """fb15k237""" +994 30 model """unstructuredmodel""" +994 30 loss """bceaftersigmoid""" +994 30 regularizer """no""" +994 30 optimizer """adam""" +994 30 training_loop """lcwa""" +994 30 evaluator """rankbased""" +994 31 dataset """fb15k237""" +994 31 model """unstructuredmodel""" +994 31 loss """bceaftersigmoid""" +994 31 regularizer """no""" +994 31 optimizer """adam""" +994 31 training_loop """lcwa""" +994 31 evaluator """rankbased""" +994 32 dataset """fb15k237""" +994 32 model """unstructuredmodel""" +994 32 loss """bceaftersigmoid""" +994 32 regularizer """no""" +994 32 optimizer """adam""" +994 32 training_loop """lcwa""" +994 32 evaluator """rankbased""" +994 33 dataset """fb15k237""" +994 33 model """unstructuredmodel""" +994 33 loss """bceaftersigmoid""" +994 33 regularizer """no""" +994 33 optimizer """adam""" +994 33 training_loop """lcwa""" +994 33 evaluator """rankbased""" +994 34 dataset """fb15k237""" +994 34 model """unstructuredmodel""" +994 34 loss """bceaftersigmoid""" +994 34 regularizer """no""" +994 34 optimizer """adam""" +994 34 training_loop """lcwa""" +994 34 evaluator """rankbased""" +994 35 dataset """fb15k237""" +994 35 model """unstructuredmodel""" +994 35 loss """bceaftersigmoid""" +994 35 regularizer """no""" +994 35 optimizer """adam""" +994 35 training_loop """lcwa""" +994 35 evaluator """rankbased""" +994 36 dataset """fb15k237""" +994 36 model """unstructuredmodel""" +994 36 loss """bceaftersigmoid""" +994 36 regularizer """no""" +994 36 optimizer """adam""" +994 36 training_loop """lcwa""" +994 36 evaluator """rankbased""" +994 37 dataset """fb15k237""" +994 37 model """unstructuredmodel""" +994 37 loss """bceaftersigmoid""" +994 37 regularizer """no""" +994 37 optimizer """adam""" +994 37 training_loop """lcwa""" +994 37 evaluator """rankbased""" +994 38 dataset """fb15k237""" +994 38 model """unstructuredmodel""" +994 38 loss """bceaftersigmoid""" +994 38 regularizer """no""" +994 38 optimizer """adam""" +994 38 training_loop """lcwa""" +994 38 evaluator """rankbased""" +994 39 dataset """fb15k237""" +994 39 model """unstructuredmodel""" +994 39 loss """bceaftersigmoid""" +994 39 regularizer """no""" +994 39 optimizer """adam""" +994 39 training_loop """lcwa""" +994 39 evaluator """rankbased""" +994 40 dataset """fb15k237""" +994 40 model """unstructuredmodel""" +994 40 loss """bceaftersigmoid""" +994 40 regularizer """no""" +994 40 optimizer """adam""" +994 40 training_loop """lcwa""" +994 40 evaluator """rankbased""" +995 1 model.embedding_dim 2.0 +995 1 model.scoring_fct_norm 2.0 +995 1 optimizer.lr 0.08589682409107076 +995 1 training.batch_size 0.0 +995 1 training.label_smoothing 0.1263360923510398 +995 2 model.embedding_dim 2.0 +995 2 model.scoring_fct_norm 2.0 +995 2 optimizer.lr 0.0017486942303809752 +995 2 training.batch_size 1.0 +995 2 training.label_smoothing 0.048837245293987246 +995 3 model.embedding_dim 2.0 +995 3 model.scoring_fct_norm 2.0 +995 3 optimizer.lr 0.002313179511431622 +995 3 training.batch_size 0.0 +995 3 training.label_smoothing 0.18136566434665477 +995 4 model.embedding_dim 0.0 +995 4 model.scoring_fct_norm 1.0 +995 4 optimizer.lr 0.00788637512772404 +995 4 training.batch_size 2.0 +995 4 training.label_smoothing 0.003762261936925913 +995 5 model.embedding_dim 2.0 +995 5 model.scoring_fct_norm 2.0 +995 5 optimizer.lr 0.028821701861291314 +995 5 training.batch_size 2.0 +995 5 training.label_smoothing 0.004218109488598666 +995 6 model.embedding_dim 1.0 +995 6 model.scoring_fct_norm 1.0 +995 6 optimizer.lr 0.007210820652499935 +995 6 training.batch_size 1.0 +995 6 training.label_smoothing 0.09695465311939123 +995 7 model.embedding_dim 1.0 +995 7 model.scoring_fct_norm 2.0 +995 7 optimizer.lr 0.01054364738573546 +995 7 training.batch_size 1.0 +995 7 training.label_smoothing 0.7283707695210038 +995 8 model.embedding_dim 2.0 +995 8 model.scoring_fct_norm 1.0 +995 8 optimizer.lr 0.00170981484827827 +995 8 training.batch_size 2.0 +995 8 training.label_smoothing 0.00550613210633197 +995 9 model.embedding_dim 2.0 +995 9 model.scoring_fct_norm 2.0 +995 9 optimizer.lr 0.005857778822921541 +995 9 training.batch_size 2.0 +995 9 training.label_smoothing 0.04333383217440709 +995 10 model.embedding_dim 0.0 +995 10 model.scoring_fct_norm 2.0 +995 10 optimizer.lr 0.01021501615746534 +995 10 training.batch_size 0.0 +995 10 training.label_smoothing 0.005646285971102303 +995 11 model.embedding_dim 2.0 +995 11 model.scoring_fct_norm 1.0 +995 11 optimizer.lr 0.06504232911961712 +995 11 training.batch_size 2.0 +995 11 training.label_smoothing 0.008047372176893698 +995 12 model.embedding_dim 2.0 +995 12 model.scoring_fct_norm 1.0 +995 12 optimizer.lr 0.0016119104158084532 +995 12 training.batch_size 2.0 +995 12 training.label_smoothing 0.00714067057693111 +995 13 model.embedding_dim 1.0 +995 13 model.scoring_fct_norm 1.0 +995 13 optimizer.lr 0.008871767345846668 +995 13 training.batch_size 1.0 +995 13 training.label_smoothing 0.00907846183971281 +995 14 model.embedding_dim 0.0 +995 14 model.scoring_fct_norm 2.0 +995 14 optimizer.lr 0.012274483630066791 +995 14 training.batch_size 2.0 +995 14 training.label_smoothing 0.042463671254597754 +995 15 model.embedding_dim 2.0 +995 15 model.scoring_fct_norm 2.0 +995 15 optimizer.lr 0.024813820759318556 +995 15 training.batch_size 2.0 +995 15 training.label_smoothing 0.3585258398522485 +995 16 model.embedding_dim 1.0 +995 16 model.scoring_fct_norm 2.0 +995 16 optimizer.lr 0.0018321395760377782 +995 16 training.batch_size 0.0 +995 16 training.label_smoothing 0.015132460864857278 +995 17 model.embedding_dim 1.0 +995 17 model.scoring_fct_norm 2.0 +995 17 optimizer.lr 0.00141459992153574 +995 17 training.batch_size 1.0 +995 17 training.label_smoothing 0.011125968910291105 +995 18 model.embedding_dim 2.0 +995 18 model.scoring_fct_norm 1.0 +995 18 optimizer.lr 0.036566443209744455 +995 18 training.batch_size 1.0 +995 18 training.label_smoothing 0.0013278398673943418 +995 19 model.embedding_dim 1.0 +995 19 model.scoring_fct_norm 2.0 +995 19 optimizer.lr 0.01399120748886788 +995 19 training.batch_size 2.0 +995 19 training.label_smoothing 0.01353032275625272 +995 20 model.embedding_dim 0.0 +995 20 model.scoring_fct_norm 1.0 +995 20 optimizer.lr 0.04896627927742088 +995 20 training.batch_size 0.0 +995 20 training.label_smoothing 0.11892752658563312 +995 21 model.embedding_dim 2.0 +995 21 model.scoring_fct_norm 1.0 +995 21 optimizer.lr 0.03315916659766707 +995 21 training.batch_size 1.0 +995 21 training.label_smoothing 0.0017570786073485114 +995 22 model.embedding_dim 2.0 +995 22 model.scoring_fct_norm 2.0 +995 22 optimizer.lr 0.0011820427840619778 +995 22 training.batch_size 0.0 +995 22 training.label_smoothing 0.8576030453835805 +995 23 model.embedding_dim 2.0 +995 23 model.scoring_fct_norm 1.0 +995 23 optimizer.lr 0.007663767230301297 +995 23 training.batch_size 2.0 +995 23 training.label_smoothing 0.576288402180275 +995 24 model.embedding_dim 0.0 +995 24 model.scoring_fct_norm 1.0 +995 24 optimizer.lr 0.0015981933328701968 +995 24 training.batch_size 0.0 +995 24 training.label_smoothing 0.09442115742572249 +995 25 model.embedding_dim 1.0 +995 25 model.scoring_fct_norm 1.0 +995 25 optimizer.lr 0.08973954220380768 +995 25 training.batch_size 2.0 +995 25 training.label_smoothing 0.006589053608402081 +995 26 model.embedding_dim 2.0 +995 26 model.scoring_fct_norm 1.0 +995 26 optimizer.lr 0.030956350308112023 +995 26 training.batch_size 1.0 +995 26 training.label_smoothing 0.3590395822887818 +995 27 model.embedding_dim 1.0 +995 27 model.scoring_fct_norm 2.0 +995 27 optimizer.lr 0.010871152536891873 +995 27 training.batch_size 2.0 +995 27 training.label_smoothing 0.011969885891140822 +995 28 model.embedding_dim 1.0 +995 28 model.scoring_fct_norm 2.0 +995 28 optimizer.lr 0.06003924053769668 +995 28 training.batch_size 2.0 +995 28 training.label_smoothing 0.009014019621491608 +995 29 model.embedding_dim 1.0 +995 29 model.scoring_fct_norm 2.0 +995 29 optimizer.lr 0.030086452417188263 +995 29 training.batch_size 1.0 +995 29 training.label_smoothing 0.3568953497779305 +995 30 model.embedding_dim 0.0 +995 30 model.scoring_fct_norm 1.0 +995 30 optimizer.lr 0.011410720052153203 +995 30 training.batch_size 2.0 +995 30 training.label_smoothing 0.02123901748310271 +995 31 model.embedding_dim 2.0 +995 31 model.scoring_fct_norm 2.0 +995 31 optimizer.lr 0.038201346692688355 +995 31 training.batch_size 0.0 +995 31 training.label_smoothing 0.0031046224715519637 +995 1 dataset """fb15k237""" +995 1 model """unstructuredmodel""" +995 1 loss """softplus""" +995 1 regularizer """no""" +995 1 optimizer """adam""" +995 1 training_loop """lcwa""" +995 1 evaluator """rankbased""" +995 2 dataset """fb15k237""" +995 2 model """unstructuredmodel""" +995 2 loss """softplus""" +995 2 regularizer """no""" +995 2 optimizer """adam""" +995 2 training_loop """lcwa""" +995 2 evaluator """rankbased""" +995 3 dataset """fb15k237""" +995 3 model """unstructuredmodel""" +995 3 loss """softplus""" +995 3 regularizer """no""" +995 3 optimizer """adam""" +995 3 training_loop """lcwa""" +995 3 evaluator """rankbased""" +995 4 dataset """fb15k237""" +995 4 model """unstructuredmodel""" +995 4 loss """softplus""" +995 4 regularizer """no""" +995 4 optimizer """adam""" +995 4 training_loop """lcwa""" +995 4 evaluator """rankbased""" +995 5 dataset """fb15k237""" +995 5 model """unstructuredmodel""" +995 5 loss """softplus""" +995 5 regularizer """no""" +995 5 optimizer """adam""" +995 5 training_loop """lcwa""" +995 5 evaluator """rankbased""" +995 6 dataset """fb15k237""" +995 6 model """unstructuredmodel""" +995 6 loss """softplus""" +995 6 regularizer """no""" +995 6 optimizer """adam""" +995 6 training_loop """lcwa""" +995 6 evaluator """rankbased""" +995 7 dataset """fb15k237""" +995 7 model """unstructuredmodel""" +995 7 loss """softplus""" +995 7 regularizer """no""" +995 7 optimizer """adam""" +995 7 training_loop """lcwa""" +995 7 evaluator """rankbased""" +995 8 dataset """fb15k237""" +995 8 model """unstructuredmodel""" +995 8 loss """softplus""" +995 8 regularizer """no""" +995 8 optimizer """adam""" +995 8 training_loop """lcwa""" +995 8 evaluator """rankbased""" +995 9 dataset """fb15k237""" +995 9 model """unstructuredmodel""" +995 9 loss """softplus""" +995 9 regularizer """no""" +995 9 optimizer """adam""" +995 9 training_loop """lcwa""" +995 9 evaluator """rankbased""" +995 10 dataset """fb15k237""" +995 10 model """unstructuredmodel""" +995 10 loss """softplus""" +995 10 regularizer """no""" +995 10 optimizer """adam""" +995 10 training_loop """lcwa""" +995 10 evaluator """rankbased""" +995 11 dataset """fb15k237""" +995 11 model """unstructuredmodel""" +995 11 loss """softplus""" +995 11 regularizer """no""" +995 11 optimizer """adam""" +995 11 training_loop """lcwa""" +995 11 evaluator """rankbased""" +995 12 dataset """fb15k237""" +995 12 model """unstructuredmodel""" +995 12 loss """softplus""" +995 12 regularizer """no""" +995 12 optimizer """adam""" +995 12 training_loop """lcwa""" +995 12 evaluator """rankbased""" +995 13 dataset """fb15k237""" +995 13 model """unstructuredmodel""" +995 13 loss """softplus""" +995 13 regularizer """no""" +995 13 optimizer """adam""" +995 13 training_loop """lcwa""" +995 13 evaluator """rankbased""" +995 14 dataset """fb15k237""" +995 14 model """unstructuredmodel""" +995 14 loss """softplus""" +995 14 regularizer """no""" +995 14 optimizer """adam""" +995 14 training_loop """lcwa""" +995 14 evaluator """rankbased""" +995 15 dataset """fb15k237""" +995 15 model """unstructuredmodel""" +995 15 loss """softplus""" +995 15 regularizer """no""" +995 15 optimizer """adam""" +995 15 training_loop """lcwa""" +995 15 evaluator """rankbased""" +995 16 dataset """fb15k237""" +995 16 model """unstructuredmodel""" +995 16 loss """softplus""" +995 16 regularizer """no""" +995 16 optimizer """adam""" +995 16 training_loop """lcwa""" +995 16 evaluator """rankbased""" +995 17 dataset """fb15k237""" +995 17 model """unstructuredmodel""" +995 17 loss """softplus""" +995 17 regularizer """no""" +995 17 optimizer """adam""" +995 17 training_loop """lcwa""" +995 17 evaluator """rankbased""" +995 18 dataset """fb15k237""" +995 18 model """unstructuredmodel""" +995 18 loss """softplus""" +995 18 regularizer """no""" +995 18 optimizer """adam""" +995 18 training_loop """lcwa""" +995 18 evaluator """rankbased""" +995 19 dataset """fb15k237""" +995 19 model """unstructuredmodel""" +995 19 loss """softplus""" +995 19 regularizer """no""" +995 19 optimizer """adam""" +995 19 training_loop """lcwa""" +995 19 evaluator """rankbased""" +995 20 dataset """fb15k237""" +995 20 model """unstructuredmodel""" +995 20 loss """softplus""" +995 20 regularizer """no""" +995 20 optimizer """adam""" +995 20 training_loop """lcwa""" +995 20 evaluator """rankbased""" +995 21 dataset """fb15k237""" +995 21 model """unstructuredmodel""" +995 21 loss """softplus""" +995 21 regularizer """no""" +995 21 optimizer """adam""" +995 21 training_loop """lcwa""" +995 21 evaluator """rankbased""" +995 22 dataset """fb15k237""" +995 22 model """unstructuredmodel""" +995 22 loss """softplus""" +995 22 regularizer """no""" +995 22 optimizer """adam""" +995 22 training_loop """lcwa""" +995 22 evaluator """rankbased""" +995 23 dataset """fb15k237""" +995 23 model """unstructuredmodel""" +995 23 loss """softplus""" +995 23 regularizer """no""" +995 23 optimizer """adam""" +995 23 training_loop """lcwa""" +995 23 evaluator """rankbased""" +995 24 dataset """fb15k237""" +995 24 model """unstructuredmodel""" +995 24 loss """softplus""" +995 24 regularizer """no""" +995 24 optimizer """adam""" +995 24 training_loop """lcwa""" +995 24 evaluator """rankbased""" +995 25 dataset """fb15k237""" +995 25 model """unstructuredmodel""" +995 25 loss """softplus""" +995 25 regularizer """no""" +995 25 optimizer """adam""" +995 25 training_loop """lcwa""" +995 25 evaluator """rankbased""" +995 26 dataset """fb15k237""" +995 26 model """unstructuredmodel""" +995 26 loss """softplus""" +995 26 regularizer """no""" +995 26 optimizer """adam""" +995 26 training_loop """lcwa""" +995 26 evaluator """rankbased""" +995 27 dataset """fb15k237""" +995 27 model """unstructuredmodel""" +995 27 loss """softplus""" +995 27 regularizer """no""" +995 27 optimizer """adam""" +995 27 training_loop """lcwa""" +995 27 evaluator """rankbased""" +995 28 dataset """fb15k237""" +995 28 model """unstructuredmodel""" +995 28 loss """softplus""" +995 28 regularizer """no""" +995 28 optimizer """adam""" +995 28 training_loop """lcwa""" +995 28 evaluator """rankbased""" +995 29 dataset """fb15k237""" +995 29 model """unstructuredmodel""" +995 29 loss """softplus""" +995 29 regularizer """no""" +995 29 optimizer """adam""" +995 29 training_loop """lcwa""" +995 29 evaluator """rankbased""" +995 30 dataset """fb15k237""" +995 30 model """unstructuredmodel""" +995 30 loss """softplus""" +995 30 regularizer """no""" +995 30 optimizer """adam""" +995 30 training_loop """lcwa""" +995 30 evaluator """rankbased""" +995 31 dataset """fb15k237""" +995 31 model """unstructuredmodel""" +995 31 loss """softplus""" +995 31 regularizer """no""" +995 31 optimizer """adam""" +995 31 training_loop """lcwa""" +995 31 evaluator """rankbased""" +996 1 model.embedding_dim 2.0 +996 1 model.scoring_fct_norm 2.0 +996 1 optimizer.lr 0.05007989586227654 +996 1 training.batch_size 1.0 +996 1 training.label_smoothing 0.034284026256858155 +996 2 model.embedding_dim 1.0 +996 2 model.scoring_fct_norm 2.0 +996 2 optimizer.lr 0.0027940209379308614 +996 2 training.batch_size 1.0 +996 2 training.label_smoothing 0.01756009210255748 +996 3 model.embedding_dim 2.0 +996 3 model.scoring_fct_norm 2.0 +996 3 optimizer.lr 0.001788492163016688 +996 3 training.batch_size 0.0 +996 3 training.label_smoothing 0.11751614168047438 +996 4 model.embedding_dim 1.0 +996 4 model.scoring_fct_norm 1.0 +996 4 optimizer.lr 0.009845179255195917 +996 4 training.batch_size 0.0 +996 4 training.label_smoothing 0.046456612619588625 +996 5 model.embedding_dim 1.0 +996 5 model.scoring_fct_norm 2.0 +996 5 optimizer.lr 0.011089440544374262 +996 5 training.batch_size 2.0 +996 5 training.label_smoothing 0.178404586403738 +996 6 model.embedding_dim 1.0 +996 6 model.scoring_fct_norm 2.0 +996 6 optimizer.lr 0.010225669217396293 +996 6 training.batch_size 2.0 +996 6 training.label_smoothing 0.08550928525776191 +996 7 model.embedding_dim 0.0 +996 7 model.scoring_fct_norm 2.0 +996 7 optimizer.lr 0.09990187807455762 +996 7 training.batch_size 2.0 +996 7 training.label_smoothing 0.0063626353497217535 +996 8 model.embedding_dim 2.0 +996 8 model.scoring_fct_norm 2.0 +996 8 optimizer.lr 0.05596796091859905 +996 8 training.batch_size 1.0 +996 8 training.label_smoothing 0.546515857920716 +996 9 model.embedding_dim 2.0 +996 9 model.scoring_fct_norm 2.0 +996 9 optimizer.lr 0.006339251379702486 +996 9 training.batch_size 1.0 +996 9 training.label_smoothing 0.2614163513417801 +996 10 model.embedding_dim 1.0 +996 10 model.scoring_fct_norm 1.0 +996 10 optimizer.lr 0.015454549991863716 +996 10 training.batch_size 0.0 +996 10 training.label_smoothing 0.00983896126259929 +996 11 model.embedding_dim 1.0 +996 11 model.scoring_fct_norm 1.0 +996 11 optimizer.lr 0.06929023723440039 +996 11 training.batch_size 1.0 +996 11 training.label_smoothing 0.2714667490187892 +996 12 model.embedding_dim 2.0 +996 12 model.scoring_fct_norm 1.0 +996 12 optimizer.lr 0.0013003624415795542 +996 12 training.batch_size 2.0 +996 12 training.label_smoothing 0.25055036536046343 +996 13 model.embedding_dim 2.0 +996 13 model.scoring_fct_norm 2.0 +996 13 optimizer.lr 0.004206581970819865 +996 13 training.batch_size 2.0 +996 13 training.label_smoothing 0.01125731961614021 +996 14 model.embedding_dim 2.0 +996 14 model.scoring_fct_norm 1.0 +996 14 optimizer.lr 0.023108395731943843 +996 14 training.batch_size 0.0 +996 14 training.label_smoothing 0.004663441170601265 +996 15 model.embedding_dim 2.0 +996 15 model.scoring_fct_norm 1.0 +996 15 optimizer.lr 0.007959238461713324 +996 15 training.batch_size 1.0 +996 15 training.label_smoothing 0.05909423074814932 +996 16 model.embedding_dim 0.0 +996 16 model.scoring_fct_norm 2.0 +996 16 optimizer.lr 0.05775158349963467 +996 16 training.batch_size 1.0 +996 16 training.label_smoothing 0.001491455658980862 +996 17 model.embedding_dim 2.0 +996 17 model.scoring_fct_norm 2.0 +996 17 optimizer.lr 0.0028132984159188316 +996 17 training.batch_size 0.0 +996 17 training.label_smoothing 0.610174224283614 +996 18 model.embedding_dim 2.0 +996 18 model.scoring_fct_norm 2.0 +996 18 optimizer.lr 0.00656591588043553 +996 18 training.batch_size 2.0 +996 18 training.label_smoothing 0.005579362083228855 +996 19 model.embedding_dim 2.0 +996 19 model.scoring_fct_norm 2.0 +996 19 optimizer.lr 0.023629264953759187 +996 19 training.batch_size 1.0 +996 19 training.label_smoothing 0.026629709455099845 +996 20 model.embedding_dim 1.0 +996 20 model.scoring_fct_norm 1.0 +996 20 optimizer.lr 0.007296962313875222 +996 20 training.batch_size 0.0 +996 20 training.label_smoothing 0.07267101379091744 +996 1 dataset """fb15k237""" +996 1 model """unstructuredmodel""" +996 1 loss """crossentropy""" +996 1 regularizer """no""" +996 1 optimizer """adam""" +996 1 training_loop """lcwa""" +996 1 evaluator """rankbased""" +996 2 dataset """fb15k237""" +996 2 model """unstructuredmodel""" +996 2 loss """crossentropy""" +996 2 regularizer """no""" +996 2 optimizer """adam""" +996 2 training_loop """lcwa""" +996 2 evaluator """rankbased""" +996 3 dataset """fb15k237""" +996 3 model """unstructuredmodel""" +996 3 loss """crossentropy""" +996 3 regularizer """no""" +996 3 optimizer """adam""" +996 3 training_loop """lcwa""" +996 3 evaluator """rankbased""" +996 4 dataset """fb15k237""" +996 4 model """unstructuredmodel""" +996 4 loss """crossentropy""" +996 4 regularizer """no""" +996 4 optimizer """adam""" +996 4 training_loop """lcwa""" +996 4 evaluator """rankbased""" +996 5 dataset """fb15k237""" +996 5 model """unstructuredmodel""" +996 5 loss """crossentropy""" +996 5 regularizer """no""" +996 5 optimizer """adam""" +996 5 training_loop """lcwa""" +996 5 evaluator """rankbased""" +996 6 dataset """fb15k237""" +996 6 model """unstructuredmodel""" +996 6 loss """crossentropy""" +996 6 regularizer """no""" +996 6 optimizer """adam""" +996 6 training_loop """lcwa""" +996 6 evaluator """rankbased""" +996 7 dataset """fb15k237""" +996 7 model """unstructuredmodel""" +996 7 loss """crossentropy""" +996 7 regularizer """no""" +996 7 optimizer """adam""" +996 7 training_loop """lcwa""" +996 7 evaluator """rankbased""" +996 8 dataset """fb15k237""" +996 8 model """unstructuredmodel""" +996 8 loss """crossentropy""" +996 8 regularizer """no""" +996 8 optimizer """adam""" +996 8 training_loop """lcwa""" +996 8 evaluator """rankbased""" +996 9 dataset """fb15k237""" +996 9 model """unstructuredmodel""" +996 9 loss """crossentropy""" +996 9 regularizer """no""" +996 9 optimizer """adam""" +996 9 training_loop """lcwa""" +996 9 evaluator """rankbased""" +996 10 dataset """fb15k237""" +996 10 model """unstructuredmodel""" +996 10 loss """crossentropy""" +996 10 regularizer """no""" +996 10 optimizer """adam""" +996 10 training_loop """lcwa""" +996 10 evaluator """rankbased""" +996 11 dataset """fb15k237""" +996 11 model """unstructuredmodel""" +996 11 loss """crossentropy""" +996 11 regularizer """no""" +996 11 optimizer """adam""" +996 11 training_loop """lcwa""" +996 11 evaluator """rankbased""" +996 12 dataset """fb15k237""" +996 12 model """unstructuredmodel""" +996 12 loss """crossentropy""" +996 12 regularizer """no""" +996 12 optimizer """adam""" +996 12 training_loop """lcwa""" +996 12 evaluator """rankbased""" +996 13 dataset """fb15k237""" +996 13 model """unstructuredmodel""" +996 13 loss """crossentropy""" +996 13 regularizer """no""" +996 13 optimizer """adam""" +996 13 training_loop """lcwa""" +996 13 evaluator """rankbased""" +996 14 dataset """fb15k237""" +996 14 model """unstructuredmodel""" +996 14 loss """crossentropy""" +996 14 regularizer """no""" +996 14 optimizer """adam""" +996 14 training_loop """lcwa""" +996 14 evaluator """rankbased""" +996 15 dataset """fb15k237""" +996 15 model """unstructuredmodel""" +996 15 loss """crossentropy""" +996 15 regularizer """no""" +996 15 optimizer """adam""" +996 15 training_loop """lcwa""" +996 15 evaluator """rankbased""" +996 16 dataset """fb15k237""" +996 16 model """unstructuredmodel""" +996 16 loss """crossentropy""" +996 16 regularizer """no""" +996 16 optimizer """adam""" +996 16 training_loop """lcwa""" +996 16 evaluator """rankbased""" +996 17 dataset """fb15k237""" +996 17 model """unstructuredmodel""" +996 17 loss """crossentropy""" +996 17 regularizer """no""" +996 17 optimizer """adam""" +996 17 training_loop """lcwa""" +996 17 evaluator """rankbased""" +996 18 dataset """fb15k237""" +996 18 model """unstructuredmodel""" +996 18 loss """crossentropy""" +996 18 regularizer """no""" +996 18 optimizer """adam""" +996 18 training_loop """lcwa""" +996 18 evaluator """rankbased""" +996 19 dataset """fb15k237""" +996 19 model """unstructuredmodel""" +996 19 loss """crossentropy""" +996 19 regularizer """no""" +996 19 optimizer """adam""" +996 19 training_loop """lcwa""" +996 19 evaluator """rankbased""" +996 20 dataset """fb15k237""" +996 20 model """unstructuredmodel""" +996 20 loss """crossentropy""" +996 20 regularizer """no""" +996 20 optimizer """adam""" +996 20 training_loop """lcwa""" +996 20 evaluator """rankbased""" +997 1 model.embedding_dim 1.0 +997 1 model.scoring_fct_norm 2.0 +997 1 optimizer.lr 0.0017859646249394486 +997 1 training.batch_size 2.0 +997 1 training.label_smoothing 0.017033012896736387 +997 2 model.embedding_dim 1.0 +997 2 model.scoring_fct_norm 1.0 +997 2 optimizer.lr 0.04106100609560898 +997 2 training.batch_size 0.0 +997 2 training.label_smoothing 0.03640556909628706 +997 3 model.embedding_dim 0.0 +997 3 model.scoring_fct_norm 1.0 +997 3 optimizer.lr 0.09585568459452926 +997 3 training.batch_size 2.0 +997 3 training.label_smoothing 0.0032002592215337427 +997 4 model.embedding_dim 1.0 +997 4 model.scoring_fct_norm 2.0 +997 4 optimizer.lr 0.0013842404471382315 +997 4 training.batch_size 1.0 +997 4 training.label_smoothing 0.6881721011585398 +997 5 model.embedding_dim 2.0 +997 5 model.scoring_fct_norm 1.0 +997 5 optimizer.lr 0.012534199326548251 +997 5 training.batch_size 2.0 +997 5 training.label_smoothing 0.002857312011247234 +997 6 model.embedding_dim 2.0 +997 6 model.scoring_fct_norm 1.0 +997 6 optimizer.lr 0.010505609904920611 +997 6 training.batch_size 1.0 +997 6 training.label_smoothing 0.22920511442939265 +997 7 model.embedding_dim 1.0 +997 7 model.scoring_fct_norm 1.0 +997 7 optimizer.lr 0.0024258221038426964 +997 7 training.batch_size 0.0 +997 7 training.label_smoothing 0.8015480581248711 +997 8 model.embedding_dim 0.0 +997 8 model.scoring_fct_norm 1.0 +997 8 optimizer.lr 0.00839297505146053 +997 8 training.batch_size 2.0 +997 8 training.label_smoothing 0.4512133167855031 +997 9 model.embedding_dim 2.0 +997 9 model.scoring_fct_norm 1.0 +997 9 optimizer.lr 0.0015436186117706375 +997 9 training.batch_size 2.0 +997 9 training.label_smoothing 0.0189984198755803 +997 10 model.embedding_dim 2.0 +997 10 model.scoring_fct_norm 1.0 +997 10 optimizer.lr 0.007444367233114981 +997 10 training.batch_size 1.0 +997 10 training.label_smoothing 0.08915105194031143 +997 11 model.embedding_dim 1.0 +997 11 model.scoring_fct_norm 2.0 +997 11 optimizer.lr 0.0039015780973918218 +997 11 training.batch_size 1.0 +997 11 training.label_smoothing 0.09248790645044312 +997 12 model.embedding_dim 1.0 +997 12 model.scoring_fct_norm 1.0 +997 12 optimizer.lr 0.003319528754191165 +997 12 training.batch_size 2.0 +997 12 training.label_smoothing 0.002013129965777973 +997 13 model.embedding_dim 1.0 +997 13 model.scoring_fct_norm 2.0 +997 13 optimizer.lr 0.0013378469490159177 +997 13 training.batch_size 1.0 +997 13 training.label_smoothing 0.3274939986539274 +997 14 model.embedding_dim 2.0 +997 14 model.scoring_fct_norm 2.0 +997 14 optimizer.lr 0.08991493676165034 +997 14 training.batch_size 2.0 +997 14 training.label_smoothing 0.04720648780545217 +997 15 model.embedding_dim 0.0 +997 15 model.scoring_fct_norm 2.0 +997 15 optimizer.lr 0.009067432236337849 +997 15 training.batch_size 1.0 +997 15 training.label_smoothing 0.2240473456837116 +997 16 model.embedding_dim 2.0 +997 16 model.scoring_fct_norm 2.0 +997 16 optimizer.lr 0.006096466947381441 +997 16 training.batch_size 2.0 +997 16 training.label_smoothing 0.0022357635667172348 +997 17 model.embedding_dim 2.0 +997 17 model.scoring_fct_norm 2.0 +997 17 optimizer.lr 0.013489172596886595 +997 17 training.batch_size 2.0 +997 17 training.label_smoothing 0.0012127583075967718 +997 18 model.embedding_dim 1.0 +997 18 model.scoring_fct_norm 2.0 +997 18 optimizer.lr 0.0014852268872341545 +997 18 training.batch_size 2.0 +997 18 training.label_smoothing 0.004814244074940013 +997 19 model.embedding_dim 2.0 +997 19 model.scoring_fct_norm 1.0 +997 19 optimizer.lr 0.0018856472458650987 +997 19 training.batch_size 1.0 +997 19 training.label_smoothing 0.0020836896118140266 +997 20 model.embedding_dim 1.0 +997 20 model.scoring_fct_norm 2.0 +997 20 optimizer.lr 0.038349691623753304 +997 20 training.batch_size 0.0 +997 20 training.label_smoothing 0.06501187807778978 +997 21 model.embedding_dim 1.0 +997 21 model.scoring_fct_norm 2.0 +997 21 optimizer.lr 0.0013544289467618941 +997 21 training.batch_size 1.0 +997 21 training.label_smoothing 0.38126856595082925 +997 22 model.embedding_dim 0.0 +997 22 model.scoring_fct_norm 1.0 +997 22 optimizer.lr 0.0014737305051181098 +997 22 training.batch_size 2.0 +997 22 training.label_smoothing 0.002020542129732367 +997 23 model.embedding_dim 0.0 +997 23 model.scoring_fct_norm 2.0 +997 23 optimizer.lr 0.007655546990837202 +997 23 training.batch_size 2.0 +997 23 training.label_smoothing 0.0453461056454895 +997 24 model.embedding_dim 2.0 +997 24 model.scoring_fct_norm 1.0 +997 24 optimizer.lr 0.02128057785160429 +997 24 training.batch_size 2.0 +997 24 training.label_smoothing 0.007820162682033905 +997 25 model.embedding_dim 2.0 +997 25 model.scoring_fct_norm 2.0 +997 25 optimizer.lr 0.05689950306499224 +997 25 training.batch_size 1.0 +997 25 training.label_smoothing 0.00695240451838766 +997 26 model.embedding_dim 0.0 +997 26 model.scoring_fct_norm 1.0 +997 26 optimizer.lr 0.024368636068027635 +997 26 training.batch_size 2.0 +997 26 training.label_smoothing 0.04604485279545301 +997 27 model.embedding_dim 2.0 +997 27 model.scoring_fct_norm 1.0 +997 27 optimizer.lr 0.02744215933107183 +997 27 training.batch_size 2.0 +997 27 training.label_smoothing 0.003258452973333913 +997 28 model.embedding_dim 2.0 +997 28 model.scoring_fct_norm 2.0 +997 28 optimizer.lr 0.0033599170935669255 +997 28 training.batch_size 2.0 +997 28 training.label_smoothing 0.049180034766231076 +997 29 model.embedding_dim 1.0 +997 29 model.scoring_fct_norm 1.0 +997 29 optimizer.lr 0.04719008792832793 +997 29 training.batch_size 0.0 +997 29 training.label_smoothing 0.1319389415429543 +997 30 model.embedding_dim 1.0 +997 30 model.scoring_fct_norm 2.0 +997 30 optimizer.lr 0.02532009884823637 +997 30 training.batch_size 1.0 +997 30 training.label_smoothing 0.02830117004233622 +997 31 model.embedding_dim 0.0 +997 31 model.scoring_fct_norm 2.0 +997 31 optimizer.lr 0.07116800290167721 +997 31 training.batch_size 0.0 +997 31 training.label_smoothing 0.0097835714590569 +997 32 model.embedding_dim 1.0 +997 32 model.scoring_fct_norm 2.0 +997 32 optimizer.lr 0.0015682150935718033 +997 32 training.batch_size 1.0 +997 32 training.label_smoothing 0.03920998652801954 +997 33 model.embedding_dim 2.0 +997 33 model.scoring_fct_norm 1.0 +997 33 optimizer.lr 0.02064673412112814 +997 33 training.batch_size 2.0 +997 33 training.label_smoothing 0.18298415479544963 +997 34 model.embedding_dim 0.0 +997 34 model.scoring_fct_norm 1.0 +997 34 optimizer.lr 0.0028977363516118335 +997 34 training.batch_size 2.0 +997 34 training.label_smoothing 0.17122503208812354 +997 35 model.embedding_dim 1.0 +997 35 model.scoring_fct_norm 1.0 +997 35 optimizer.lr 0.006911337091708388 +997 35 training.batch_size 0.0 +997 35 training.label_smoothing 0.0632450865161539 +997 1 dataset """fb15k237""" +997 1 model """unstructuredmodel""" +997 1 loss """crossentropy""" +997 1 regularizer """no""" +997 1 optimizer """adam""" +997 1 training_loop """lcwa""" +997 1 evaluator """rankbased""" +997 2 dataset """fb15k237""" +997 2 model """unstructuredmodel""" +997 2 loss """crossentropy""" +997 2 regularizer """no""" +997 2 optimizer """adam""" +997 2 training_loop """lcwa""" +997 2 evaluator """rankbased""" +997 3 dataset """fb15k237""" +997 3 model """unstructuredmodel""" +997 3 loss """crossentropy""" +997 3 regularizer """no""" +997 3 optimizer """adam""" +997 3 training_loop """lcwa""" +997 3 evaluator """rankbased""" +997 4 dataset """fb15k237""" +997 4 model """unstructuredmodel""" +997 4 loss """crossentropy""" +997 4 regularizer """no""" +997 4 optimizer """adam""" +997 4 training_loop """lcwa""" +997 4 evaluator """rankbased""" +997 5 dataset """fb15k237""" +997 5 model """unstructuredmodel""" +997 5 loss """crossentropy""" +997 5 regularizer """no""" +997 5 optimizer """adam""" +997 5 training_loop """lcwa""" +997 5 evaluator """rankbased""" +997 6 dataset """fb15k237""" +997 6 model """unstructuredmodel""" +997 6 loss """crossentropy""" +997 6 regularizer """no""" +997 6 optimizer """adam""" +997 6 training_loop """lcwa""" +997 6 evaluator """rankbased""" +997 7 dataset """fb15k237""" +997 7 model """unstructuredmodel""" +997 7 loss """crossentropy""" +997 7 regularizer """no""" +997 7 optimizer """adam""" +997 7 training_loop """lcwa""" +997 7 evaluator """rankbased""" +997 8 dataset """fb15k237""" +997 8 model """unstructuredmodel""" +997 8 loss """crossentropy""" +997 8 regularizer """no""" +997 8 optimizer """adam""" +997 8 training_loop """lcwa""" +997 8 evaluator """rankbased""" +997 9 dataset """fb15k237""" +997 9 model """unstructuredmodel""" +997 9 loss """crossentropy""" +997 9 regularizer """no""" +997 9 optimizer """adam""" +997 9 training_loop """lcwa""" +997 9 evaluator """rankbased""" +997 10 dataset """fb15k237""" +997 10 model """unstructuredmodel""" +997 10 loss """crossentropy""" +997 10 regularizer """no""" +997 10 optimizer """adam""" +997 10 training_loop """lcwa""" +997 10 evaluator """rankbased""" +997 11 dataset """fb15k237""" +997 11 model """unstructuredmodel""" +997 11 loss """crossentropy""" +997 11 regularizer """no""" +997 11 optimizer """adam""" +997 11 training_loop """lcwa""" +997 11 evaluator """rankbased""" +997 12 dataset """fb15k237""" +997 12 model """unstructuredmodel""" +997 12 loss """crossentropy""" +997 12 regularizer """no""" +997 12 optimizer """adam""" +997 12 training_loop """lcwa""" +997 12 evaluator """rankbased""" +997 13 dataset """fb15k237""" +997 13 model """unstructuredmodel""" +997 13 loss """crossentropy""" +997 13 regularizer """no""" +997 13 optimizer """adam""" +997 13 training_loop """lcwa""" +997 13 evaluator """rankbased""" +997 14 dataset """fb15k237""" +997 14 model """unstructuredmodel""" +997 14 loss """crossentropy""" +997 14 regularizer """no""" +997 14 optimizer """adam""" +997 14 training_loop """lcwa""" +997 14 evaluator """rankbased""" +997 15 dataset """fb15k237""" +997 15 model """unstructuredmodel""" +997 15 loss """crossentropy""" +997 15 regularizer """no""" +997 15 optimizer """adam""" +997 15 training_loop """lcwa""" +997 15 evaluator """rankbased""" +997 16 dataset """fb15k237""" +997 16 model """unstructuredmodel""" +997 16 loss """crossentropy""" +997 16 regularizer """no""" +997 16 optimizer """adam""" +997 16 training_loop """lcwa""" +997 16 evaluator """rankbased""" +997 17 dataset """fb15k237""" +997 17 model """unstructuredmodel""" +997 17 loss """crossentropy""" +997 17 regularizer """no""" +997 17 optimizer """adam""" +997 17 training_loop """lcwa""" +997 17 evaluator """rankbased""" +997 18 dataset """fb15k237""" +997 18 model """unstructuredmodel""" +997 18 loss """crossentropy""" +997 18 regularizer """no""" +997 18 optimizer """adam""" +997 18 training_loop """lcwa""" +997 18 evaluator """rankbased""" +997 19 dataset """fb15k237""" +997 19 model """unstructuredmodel""" +997 19 loss """crossentropy""" +997 19 regularizer """no""" +997 19 optimizer """adam""" +997 19 training_loop """lcwa""" +997 19 evaluator """rankbased""" +997 20 dataset """fb15k237""" +997 20 model """unstructuredmodel""" +997 20 loss """crossentropy""" +997 20 regularizer """no""" +997 20 optimizer """adam""" +997 20 training_loop """lcwa""" +997 20 evaluator """rankbased""" +997 21 dataset """fb15k237""" +997 21 model """unstructuredmodel""" +997 21 loss """crossentropy""" +997 21 regularizer """no""" +997 21 optimizer """adam""" +997 21 training_loop """lcwa""" +997 21 evaluator """rankbased""" +997 22 dataset """fb15k237""" +997 22 model """unstructuredmodel""" +997 22 loss """crossentropy""" +997 22 regularizer """no""" +997 22 optimizer """adam""" +997 22 training_loop """lcwa""" +997 22 evaluator """rankbased""" +997 23 dataset """fb15k237""" +997 23 model """unstructuredmodel""" +997 23 loss """crossentropy""" +997 23 regularizer """no""" +997 23 optimizer """adam""" +997 23 training_loop """lcwa""" +997 23 evaluator """rankbased""" +997 24 dataset """fb15k237""" +997 24 model """unstructuredmodel""" +997 24 loss """crossentropy""" +997 24 regularizer """no""" +997 24 optimizer """adam""" +997 24 training_loop """lcwa""" +997 24 evaluator """rankbased""" +997 25 dataset """fb15k237""" +997 25 model """unstructuredmodel""" +997 25 loss """crossentropy""" +997 25 regularizer """no""" +997 25 optimizer """adam""" +997 25 training_loop """lcwa""" +997 25 evaluator """rankbased""" +997 26 dataset """fb15k237""" +997 26 model """unstructuredmodel""" +997 26 loss """crossentropy""" +997 26 regularizer """no""" +997 26 optimizer """adam""" +997 26 training_loop """lcwa""" +997 26 evaluator """rankbased""" +997 27 dataset """fb15k237""" +997 27 model """unstructuredmodel""" +997 27 loss """crossentropy""" +997 27 regularizer """no""" +997 27 optimizer """adam""" +997 27 training_loop """lcwa""" +997 27 evaluator """rankbased""" +997 28 dataset """fb15k237""" +997 28 model """unstructuredmodel""" +997 28 loss """crossentropy""" +997 28 regularizer """no""" +997 28 optimizer """adam""" +997 28 training_loop """lcwa""" +997 28 evaluator """rankbased""" +997 29 dataset """fb15k237""" +997 29 model """unstructuredmodel""" +997 29 loss """crossentropy""" +997 29 regularizer """no""" +997 29 optimizer """adam""" +997 29 training_loop """lcwa""" +997 29 evaluator """rankbased""" +997 30 dataset """fb15k237""" +997 30 model """unstructuredmodel""" +997 30 loss """crossentropy""" +997 30 regularizer """no""" +997 30 optimizer """adam""" +997 30 training_loop """lcwa""" +997 30 evaluator """rankbased""" +997 31 dataset """fb15k237""" +997 31 model """unstructuredmodel""" +997 31 loss """crossentropy""" +997 31 regularizer """no""" +997 31 optimizer """adam""" +997 31 training_loop """lcwa""" +997 31 evaluator """rankbased""" +997 32 dataset """fb15k237""" +997 32 model """unstructuredmodel""" +997 32 loss """crossentropy""" +997 32 regularizer """no""" +997 32 optimizer """adam""" +997 32 training_loop """lcwa""" +997 32 evaluator """rankbased""" +997 33 dataset """fb15k237""" +997 33 model """unstructuredmodel""" +997 33 loss """crossentropy""" +997 33 regularizer """no""" +997 33 optimizer """adam""" +997 33 training_loop """lcwa""" +997 33 evaluator """rankbased""" +997 34 dataset """fb15k237""" +997 34 model """unstructuredmodel""" +997 34 loss """crossentropy""" +997 34 regularizer """no""" +997 34 optimizer """adam""" +997 34 training_loop """lcwa""" +997 34 evaluator """rankbased""" +997 35 dataset """fb15k237""" +997 35 model """unstructuredmodel""" +997 35 loss """crossentropy""" +997 35 regularizer """no""" +997 35 optimizer """adam""" +997 35 training_loop """lcwa""" +997 35 evaluator """rankbased""" +998 1 model.embedding_dim 2.0 +998 1 model.scoring_fct_norm 1.0 +998 1 loss.margin 29.773953082586402 +998 1 loss.adversarial_temperature 0.5599402116560028 +998 1 optimizer.lr 0.009924555580393875 +998 1 negative_sampler.num_negs_per_pos 33.0 +998 1 training.batch_size 2.0 +998 2 model.embedding_dim 2.0 +998 2 model.scoring_fct_norm 1.0 +998 2 loss.margin 16.571093497324934 +998 2 loss.adversarial_temperature 0.30801306861646044 +998 2 optimizer.lr 0.004742657070370641 +998 2 negative_sampler.num_negs_per_pos 9.0 +998 2 training.batch_size 2.0 +998 3 model.embedding_dim 1.0 +998 3 model.scoring_fct_norm 2.0 +998 3 loss.margin 29.60740359856088 +998 3 loss.adversarial_temperature 0.5189221140192007 +998 3 optimizer.lr 0.012993320444745144 +998 3 negative_sampler.num_negs_per_pos 34.0 +998 3 training.batch_size 2.0 +998 4 model.embedding_dim 2.0 +998 4 model.scoring_fct_norm 1.0 +998 4 loss.margin 3.1776988181508705 +998 4 loss.adversarial_temperature 0.3038255867753224 +998 4 optimizer.lr 0.004081688928666748 +998 4 negative_sampler.num_negs_per_pos 35.0 +998 4 training.batch_size 2.0 +998 5 model.embedding_dim 2.0 +998 5 model.scoring_fct_norm 2.0 +998 5 loss.margin 9.860526971582422 +998 5 loss.adversarial_temperature 0.3668470108269061 +998 5 optimizer.lr 0.0033884107056489088 +998 5 negative_sampler.num_negs_per_pos 46.0 +998 5 training.batch_size 1.0 +998 6 model.embedding_dim 2.0 +998 6 model.scoring_fct_norm 2.0 +998 6 loss.margin 20.102531915752454 +998 6 loss.adversarial_temperature 0.22176837124358914 +998 6 optimizer.lr 0.02856773875058855 +998 6 negative_sampler.num_negs_per_pos 64.0 +998 6 training.batch_size 0.0 +998 7 model.embedding_dim 2.0 +998 7 model.scoring_fct_norm 1.0 +998 7 loss.margin 8.568360707444114 +998 7 loss.adversarial_temperature 0.6558448906096106 +998 7 optimizer.lr 0.0012044403826790271 +998 7 negative_sampler.num_negs_per_pos 94.0 +998 7 training.batch_size 2.0 +998 8 model.embedding_dim 2.0 +998 8 model.scoring_fct_norm 1.0 +998 8 loss.margin 3.116988765368505 +998 8 loss.adversarial_temperature 0.2589662695713143 +998 8 optimizer.lr 0.03206005988651255 +998 8 negative_sampler.num_negs_per_pos 31.0 +998 8 training.batch_size 0.0 +998 9 model.embedding_dim 2.0 +998 9 model.scoring_fct_norm 2.0 +998 9 loss.margin 14.530614288174528 +998 9 loss.adversarial_temperature 0.35638373416085756 +998 9 optimizer.lr 0.041285751276358015 +998 9 negative_sampler.num_negs_per_pos 12.0 +998 9 training.batch_size 1.0 +998 10 model.embedding_dim 0.0 +998 10 model.scoring_fct_norm 1.0 +998 10 loss.margin 16.257162680187395 +998 10 loss.adversarial_temperature 0.9811134339553813 +998 10 optimizer.lr 0.0014070981169555742 +998 10 negative_sampler.num_negs_per_pos 22.0 +998 10 training.batch_size 1.0 +998 11 model.embedding_dim 2.0 +998 11 model.scoring_fct_norm 1.0 +998 11 loss.margin 24.09689696235783 +998 11 loss.adversarial_temperature 0.18772049383282186 +998 11 optimizer.lr 0.0553365858730827 +998 11 negative_sampler.num_negs_per_pos 32.0 +998 11 training.batch_size 1.0 +998 12 model.embedding_dim 0.0 +998 12 model.scoring_fct_norm 2.0 +998 12 loss.margin 27.750245062295612 +998 12 loss.adversarial_temperature 0.20560137084845254 +998 12 optimizer.lr 0.002997945219182169 +998 12 negative_sampler.num_negs_per_pos 86.0 +998 12 training.batch_size 0.0 +998 13 model.embedding_dim 1.0 +998 13 model.scoring_fct_norm 2.0 +998 13 loss.margin 24.17469194607996 +998 13 loss.adversarial_temperature 0.7857373664963869 +998 13 optimizer.lr 0.0036287055198829035 +998 13 negative_sampler.num_negs_per_pos 92.0 +998 13 training.batch_size 0.0 +998 14 model.embedding_dim 0.0 +998 14 model.scoring_fct_norm 1.0 +998 14 loss.margin 18.410733177942358 +998 14 loss.adversarial_temperature 0.3977473304310224 +998 14 optimizer.lr 0.023340680415763457 +998 14 negative_sampler.num_negs_per_pos 20.0 +998 14 training.batch_size 0.0 +998 15 model.embedding_dim 0.0 +998 15 model.scoring_fct_norm 2.0 +998 15 loss.margin 16.47437737421096 +998 15 loss.adversarial_temperature 0.5618599165889135 +998 15 optimizer.lr 0.0018905836451357586 +998 15 negative_sampler.num_negs_per_pos 4.0 +998 15 training.batch_size 1.0 +998 16 model.embedding_dim 0.0 +998 16 model.scoring_fct_norm 2.0 +998 16 loss.margin 22.344198748810463 +998 16 loss.adversarial_temperature 0.24093175294866442 +998 16 optimizer.lr 0.016243808354577685 +998 16 negative_sampler.num_negs_per_pos 68.0 +998 16 training.batch_size 1.0 +998 17 model.embedding_dim 0.0 +998 17 model.scoring_fct_norm 1.0 +998 17 loss.margin 21.249497920806107 +998 17 loss.adversarial_temperature 0.2548951895004574 +998 17 optimizer.lr 0.02356179738904684 +998 17 negative_sampler.num_negs_per_pos 55.0 +998 17 training.batch_size 0.0 +998 18 model.embedding_dim 2.0 +998 18 model.scoring_fct_norm 2.0 +998 18 loss.margin 25.355569237643653 +998 18 loss.adversarial_temperature 0.7280027721026445 +998 18 optimizer.lr 0.007961772845900668 +998 18 negative_sampler.num_negs_per_pos 51.0 +998 18 training.batch_size 2.0 +998 19 model.embedding_dim 1.0 +998 19 model.scoring_fct_norm 1.0 +998 19 loss.margin 14.8111596910743 +998 19 loss.adversarial_temperature 0.80577377628519 +998 19 optimizer.lr 0.008754543786453281 +998 19 negative_sampler.num_negs_per_pos 38.0 +998 19 training.batch_size 0.0 +998 20 model.embedding_dim 2.0 +998 20 model.scoring_fct_norm 1.0 +998 20 loss.margin 2.971507807336403 +998 20 loss.adversarial_temperature 0.798297551106655 +998 20 optimizer.lr 0.00348303504267176 +998 20 negative_sampler.num_negs_per_pos 44.0 +998 20 training.batch_size 0.0 +998 21 model.embedding_dim 1.0 +998 21 model.scoring_fct_norm 1.0 +998 21 loss.margin 24.21351514374205 +998 21 loss.adversarial_temperature 0.878117440130462 +998 21 optimizer.lr 0.0022269317133096034 +998 21 negative_sampler.num_negs_per_pos 60.0 +998 21 training.batch_size 0.0 +998 22 model.embedding_dim 0.0 +998 22 model.scoring_fct_norm 1.0 +998 22 loss.margin 25.493836350341056 +998 22 loss.adversarial_temperature 0.7437627833736942 +998 22 optimizer.lr 0.017120537229897936 +998 22 negative_sampler.num_negs_per_pos 32.0 +998 22 training.batch_size 0.0 +998 23 model.embedding_dim 1.0 +998 23 model.scoring_fct_norm 1.0 +998 23 loss.margin 2.578162760966611 +998 23 loss.adversarial_temperature 0.40072342937539174 +998 23 optimizer.lr 0.028125955439843458 +998 23 negative_sampler.num_negs_per_pos 90.0 +998 23 training.batch_size 1.0 +998 24 model.embedding_dim 0.0 +998 24 model.scoring_fct_norm 2.0 +998 24 loss.margin 24.878187927564266 +998 24 loss.adversarial_temperature 0.45374036746290414 +998 24 optimizer.lr 0.056417771005772055 +998 24 negative_sampler.num_negs_per_pos 4.0 +998 24 training.batch_size 2.0 +998 25 model.embedding_dim 0.0 +998 25 model.scoring_fct_norm 1.0 +998 25 loss.margin 26.658832567359994 +998 25 loss.adversarial_temperature 0.5589042709234553 +998 25 optimizer.lr 0.006485562496740607 +998 25 negative_sampler.num_negs_per_pos 85.0 +998 25 training.batch_size 0.0 +998 26 model.embedding_dim 2.0 +998 26 model.scoring_fct_norm 2.0 +998 26 loss.margin 3.286682374970658 +998 26 loss.adversarial_temperature 0.8808740119319429 +998 26 optimizer.lr 0.011688257522267653 +998 26 negative_sampler.num_negs_per_pos 66.0 +998 26 training.batch_size 2.0 +998 27 model.embedding_dim 1.0 +998 27 model.scoring_fct_norm 1.0 +998 27 loss.margin 12.379243804592205 +998 27 loss.adversarial_temperature 0.16273572625826285 +998 27 optimizer.lr 0.0021503503461529094 +998 27 negative_sampler.num_negs_per_pos 91.0 +998 27 training.batch_size 0.0 +998 28 model.embedding_dim 2.0 +998 28 model.scoring_fct_norm 2.0 +998 28 loss.margin 5.252492942748583 +998 28 loss.adversarial_temperature 0.40672580004012204 +998 28 optimizer.lr 0.001294331503101361 +998 28 negative_sampler.num_negs_per_pos 4.0 +998 28 training.batch_size 2.0 +998 29 model.embedding_dim 0.0 +998 29 model.scoring_fct_norm 1.0 +998 29 loss.margin 7.528443802548436 +998 29 loss.adversarial_temperature 0.5935841782622229 +998 29 optimizer.lr 0.0064512987091007975 +998 29 negative_sampler.num_negs_per_pos 32.0 +998 29 training.batch_size 2.0 +998 30 model.embedding_dim 2.0 +998 30 model.scoring_fct_norm 2.0 +998 30 loss.margin 17.199543602872517 +998 30 loss.adversarial_temperature 0.6281054504680894 +998 30 optimizer.lr 0.04099694859772205 +998 30 negative_sampler.num_negs_per_pos 43.0 +998 30 training.batch_size 0.0 +998 31 model.embedding_dim 2.0 +998 31 model.scoring_fct_norm 2.0 +998 31 loss.margin 23.230310183565134 +998 31 loss.adversarial_temperature 0.8945661498594669 +998 31 optimizer.lr 0.005130712407553863 +998 31 negative_sampler.num_negs_per_pos 16.0 +998 31 training.batch_size 1.0 +998 32 model.embedding_dim 1.0 +998 32 model.scoring_fct_norm 1.0 +998 32 loss.margin 21.86997833670631 +998 32 loss.adversarial_temperature 0.6890409369552394 +998 32 optimizer.lr 0.006294722446069955 +998 32 negative_sampler.num_negs_per_pos 29.0 +998 32 training.batch_size 2.0 +998 33 model.embedding_dim 2.0 +998 33 model.scoring_fct_norm 2.0 +998 33 loss.margin 16.131597463858878 +998 33 loss.adversarial_temperature 0.3493289574360289 +998 33 optimizer.lr 0.05485211400420459 +998 33 negative_sampler.num_negs_per_pos 71.0 +998 33 training.batch_size 0.0 +998 34 model.embedding_dim 2.0 +998 34 model.scoring_fct_norm 1.0 +998 34 loss.margin 29.88891841286811 +998 34 loss.adversarial_temperature 0.7401747916556207 +998 34 optimizer.lr 0.005986962357598053 +998 34 negative_sampler.num_negs_per_pos 77.0 +998 34 training.batch_size 0.0 +998 35 model.embedding_dim 1.0 +998 35 model.scoring_fct_norm 1.0 +998 35 loss.margin 20.373901317869738 +998 35 loss.adversarial_temperature 0.9138614038849843 +998 35 optimizer.lr 0.0023237605323949613 +998 35 negative_sampler.num_negs_per_pos 52.0 +998 35 training.batch_size 2.0 +998 36 model.embedding_dim 0.0 +998 36 model.scoring_fct_norm 1.0 +998 36 loss.margin 17.9563364268096 +998 36 loss.adversarial_temperature 0.19569067481745012 +998 36 optimizer.lr 0.020150585630831875 +998 36 negative_sampler.num_negs_per_pos 82.0 +998 36 training.batch_size 1.0 +998 37 model.embedding_dim 1.0 +998 37 model.scoring_fct_norm 1.0 +998 37 loss.margin 22.89822190143946 +998 37 loss.adversarial_temperature 0.6506372653369438 +998 37 optimizer.lr 0.006829736834742156 +998 37 negative_sampler.num_negs_per_pos 50.0 +998 37 training.batch_size 2.0 +998 38 model.embedding_dim 1.0 +998 38 model.scoring_fct_norm 1.0 +998 38 loss.margin 28.871571514574462 +998 38 loss.adversarial_temperature 0.34737377019759597 +998 38 optimizer.lr 0.020876028958253787 +998 38 negative_sampler.num_negs_per_pos 63.0 +998 38 training.batch_size 1.0 +998 39 model.embedding_dim 1.0 +998 39 model.scoring_fct_norm 1.0 +998 39 loss.margin 27.475654269697905 +998 39 loss.adversarial_temperature 0.738957606018149 +998 39 optimizer.lr 0.09250201708258661 +998 39 negative_sampler.num_negs_per_pos 97.0 +998 39 training.batch_size 0.0 +998 40 model.embedding_dim 1.0 +998 40 model.scoring_fct_norm 2.0 +998 40 loss.margin 14.551284575600006 +998 40 loss.adversarial_temperature 0.5818969418703749 +998 40 optimizer.lr 0.0013964804552796292 +998 40 negative_sampler.num_negs_per_pos 85.0 +998 40 training.batch_size 2.0 +998 41 model.embedding_dim 2.0 +998 41 model.scoring_fct_norm 1.0 +998 41 loss.margin 27.95931480895065 +998 41 loss.adversarial_temperature 0.6248881164324668 +998 41 optimizer.lr 0.053225126582108095 +998 41 negative_sampler.num_negs_per_pos 14.0 +998 41 training.batch_size 0.0 +998 42 model.embedding_dim 0.0 +998 42 model.scoring_fct_norm 2.0 +998 42 loss.margin 20.831187888006365 +998 42 loss.adversarial_temperature 0.4210468722681079 +998 42 optimizer.lr 0.005375664383048531 +998 42 negative_sampler.num_negs_per_pos 41.0 +998 42 training.batch_size 0.0 +998 43 model.embedding_dim 0.0 +998 43 model.scoring_fct_norm 1.0 +998 43 loss.margin 26.176674160517393 +998 43 loss.adversarial_temperature 0.3224496851160762 +998 43 optimizer.lr 0.009827481374101893 +998 43 negative_sampler.num_negs_per_pos 47.0 +998 43 training.batch_size 0.0 +998 44 model.embedding_dim 2.0 +998 44 model.scoring_fct_norm 1.0 +998 44 loss.margin 9.974730109398884 +998 44 loss.adversarial_temperature 0.4997205402801662 +998 44 optimizer.lr 0.0026675833965570717 +998 44 negative_sampler.num_negs_per_pos 23.0 +998 44 training.batch_size 1.0 +998 45 model.embedding_dim 1.0 +998 45 model.scoring_fct_norm 1.0 +998 45 loss.margin 27.690894011247508 +998 45 loss.adversarial_temperature 0.1261477009714922 +998 45 optimizer.lr 0.001072958997409739 +998 45 negative_sampler.num_negs_per_pos 71.0 +998 45 training.batch_size 0.0 +998 46 model.embedding_dim 1.0 +998 46 model.scoring_fct_norm 2.0 +998 46 loss.margin 8.476946613936551 +998 46 loss.adversarial_temperature 0.782397957175027 +998 46 optimizer.lr 0.0111408817804304 +998 46 negative_sampler.num_negs_per_pos 17.0 +998 46 training.batch_size 0.0 +998 47 model.embedding_dim 0.0 +998 47 model.scoring_fct_norm 1.0 +998 47 loss.margin 1.1813031536846261 +998 47 loss.adversarial_temperature 0.373325303042485 +998 47 optimizer.lr 0.04242759934740591 +998 47 negative_sampler.num_negs_per_pos 59.0 +998 47 training.batch_size 0.0 +998 48 model.embedding_dim 0.0 +998 48 model.scoring_fct_norm 1.0 +998 48 loss.margin 8.04628237825522 +998 48 loss.adversarial_temperature 0.893134887133768 +998 48 optimizer.lr 0.036748847196667295 +998 48 negative_sampler.num_negs_per_pos 74.0 +998 48 training.batch_size 1.0 +998 49 model.embedding_dim 1.0 +998 49 model.scoring_fct_norm 1.0 +998 49 loss.margin 15.54743668152731 +998 49 loss.adversarial_temperature 0.6665199837916502 +998 49 optimizer.lr 0.005761989158355924 +998 49 negative_sampler.num_negs_per_pos 77.0 +998 49 training.batch_size 1.0 +998 1 dataset """fb15k237""" +998 1 model """unstructuredmodel""" +998 1 loss """nssa""" +998 1 regularizer """no""" +998 1 optimizer """adam""" +998 1 training_loop """owa""" +998 1 negative_sampler """basic""" +998 1 evaluator """rankbased""" +998 2 dataset """fb15k237""" +998 2 model """unstructuredmodel""" +998 2 loss """nssa""" +998 2 regularizer """no""" +998 2 optimizer """adam""" +998 2 training_loop """owa""" +998 2 negative_sampler """basic""" +998 2 evaluator """rankbased""" +998 3 dataset """fb15k237""" +998 3 model """unstructuredmodel""" +998 3 loss """nssa""" +998 3 regularizer """no""" +998 3 optimizer """adam""" +998 3 training_loop """owa""" +998 3 negative_sampler """basic""" +998 3 evaluator """rankbased""" +998 4 dataset """fb15k237""" +998 4 model """unstructuredmodel""" +998 4 loss """nssa""" +998 4 regularizer """no""" +998 4 optimizer """adam""" +998 4 training_loop """owa""" +998 4 negative_sampler """basic""" +998 4 evaluator """rankbased""" +998 5 dataset """fb15k237""" +998 5 model """unstructuredmodel""" +998 5 loss """nssa""" +998 5 regularizer """no""" +998 5 optimizer """adam""" +998 5 training_loop """owa""" +998 5 negative_sampler """basic""" +998 5 evaluator """rankbased""" +998 6 dataset """fb15k237""" +998 6 model """unstructuredmodel""" +998 6 loss """nssa""" +998 6 regularizer """no""" +998 6 optimizer """adam""" +998 6 training_loop """owa""" +998 6 negative_sampler """basic""" +998 6 evaluator """rankbased""" +998 7 dataset """fb15k237""" +998 7 model """unstructuredmodel""" +998 7 loss """nssa""" +998 7 regularizer """no""" +998 7 optimizer """adam""" +998 7 training_loop """owa""" +998 7 negative_sampler """basic""" +998 7 evaluator """rankbased""" +998 8 dataset """fb15k237""" +998 8 model """unstructuredmodel""" +998 8 loss """nssa""" +998 8 regularizer """no""" +998 8 optimizer """adam""" +998 8 training_loop """owa""" +998 8 negative_sampler """basic""" +998 8 evaluator """rankbased""" +998 9 dataset """fb15k237""" +998 9 model """unstructuredmodel""" +998 9 loss """nssa""" +998 9 regularizer """no""" +998 9 optimizer """adam""" +998 9 training_loop """owa""" +998 9 negative_sampler """basic""" +998 9 evaluator """rankbased""" +998 10 dataset """fb15k237""" +998 10 model """unstructuredmodel""" +998 10 loss """nssa""" +998 10 regularizer """no""" +998 10 optimizer """adam""" +998 10 training_loop """owa""" +998 10 negative_sampler """basic""" +998 10 evaluator """rankbased""" +998 11 dataset """fb15k237""" +998 11 model """unstructuredmodel""" +998 11 loss """nssa""" +998 11 regularizer """no""" +998 11 optimizer """adam""" +998 11 training_loop """owa""" +998 11 negative_sampler """basic""" +998 11 evaluator """rankbased""" +998 12 dataset """fb15k237""" +998 12 model """unstructuredmodel""" +998 12 loss """nssa""" +998 12 regularizer """no""" +998 12 optimizer """adam""" +998 12 training_loop """owa""" +998 12 negative_sampler """basic""" +998 12 evaluator """rankbased""" +998 13 dataset """fb15k237""" +998 13 model """unstructuredmodel""" +998 13 loss """nssa""" +998 13 regularizer """no""" +998 13 optimizer """adam""" +998 13 training_loop """owa""" +998 13 negative_sampler """basic""" +998 13 evaluator """rankbased""" +998 14 dataset """fb15k237""" +998 14 model """unstructuredmodel""" +998 14 loss """nssa""" +998 14 regularizer """no""" +998 14 optimizer """adam""" +998 14 training_loop """owa""" +998 14 negative_sampler """basic""" +998 14 evaluator """rankbased""" +998 15 dataset """fb15k237""" +998 15 model """unstructuredmodel""" +998 15 loss """nssa""" +998 15 regularizer """no""" +998 15 optimizer """adam""" +998 15 training_loop """owa""" +998 15 negative_sampler """basic""" +998 15 evaluator """rankbased""" +998 16 dataset """fb15k237""" +998 16 model """unstructuredmodel""" +998 16 loss """nssa""" +998 16 regularizer """no""" +998 16 optimizer """adam""" +998 16 training_loop """owa""" +998 16 negative_sampler """basic""" +998 16 evaluator """rankbased""" +998 17 dataset """fb15k237""" +998 17 model """unstructuredmodel""" +998 17 loss """nssa""" +998 17 regularizer """no""" +998 17 optimizer """adam""" +998 17 training_loop """owa""" +998 17 negative_sampler """basic""" +998 17 evaluator """rankbased""" +998 18 dataset """fb15k237""" +998 18 model """unstructuredmodel""" +998 18 loss """nssa""" +998 18 regularizer """no""" +998 18 optimizer """adam""" +998 18 training_loop """owa""" +998 18 negative_sampler """basic""" +998 18 evaluator """rankbased""" +998 19 dataset """fb15k237""" +998 19 model """unstructuredmodel""" +998 19 loss """nssa""" +998 19 regularizer """no""" +998 19 optimizer """adam""" +998 19 training_loop """owa""" +998 19 negative_sampler """basic""" +998 19 evaluator """rankbased""" +998 20 dataset """fb15k237""" +998 20 model """unstructuredmodel""" +998 20 loss """nssa""" +998 20 regularizer """no""" +998 20 optimizer """adam""" +998 20 training_loop """owa""" +998 20 negative_sampler """basic""" +998 20 evaluator """rankbased""" +998 21 dataset """fb15k237""" +998 21 model """unstructuredmodel""" +998 21 loss """nssa""" +998 21 regularizer """no""" +998 21 optimizer """adam""" +998 21 training_loop """owa""" +998 21 negative_sampler """basic""" +998 21 evaluator """rankbased""" +998 22 dataset """fb15k237""" +998 22 model """unstructuredmodel""" +998 22 loss """nssa""" +998 22 regularizer """no""" +998 22 optimizer """adam""" +998 22 training_loop """owa""" +998 22 negative_sampler """basic""" +998 22 evaluator """rankbased""" +998 23 dataset """fb15k237""" +998 23 model """unstructuredmodel""" +998 23 loss """nssa""" +998 23 regularizer """no""" +998 23 optimizer """adam""" +998 23 training_loop """owa""" +998 23 negative_sampler """basic""" +998 23 evaluator """rankbased""" +998 24 dataset """fb15k237""" +998 24 model """unstructuredmodel""" +998 24 loss """nssa""" +998 24 regularizer """no""" +998 24 optimizer """adam""" +998 24 training_loop """owa""" +998 24 negative_sampler """basic""" +998 24 evaluator """rankbased""" +998 25 dataset """fb15k237""" +998 25 model """unstructuredmodel""" +998 25 loss """nssa""" +998 25 regularizer """no""" +998 25 optimizer """adam""" +998 25 training_loop """owa""" +998 25 negative_sampler """basic""" +998 25 evaluator """rankbased""" +998 26 dataset """fb15k237""" +998 26 model """unstructuredmodel""" +998 26 loss """nssa""" +998 26 regularizer """no""" +998 26 optimizer """adam""" +998 26 training_loop """owa""" +998 26 negative_sampler """basic""" +998 26 evaluator """rankbased""" +998 27 dataset """fb15k237""" +998 27 model """unstructuredmodel""" +998 27 loss """nssa""" +998 27 regularizer """no""" +998 27 optimizer """adam""" +998 27 training_loop """owa""" +998 27 negative_sampler """basic""" +998 27 evaluator """rankbased""" +998 28 dataset """fb15k237""" +998 28 model """unstructuredmodel""" +998 28 loss """nssa""" +998 28 regularizer """no""" +998 28 optimizer """adam""" +998 28 training_loop """owa""" +998 28 negative_sampler """basic""" +998 28 evaluator """rankbased""" +998 29 dataset """fb15k237""" +998 29 model """unstructuredmodel""" +998 29 loss """nssa""" +998 29 regularizer """no""" +998 29 optimizer """adam""" +998 29 training_loop """owa""" +998 29 negative_sampler """basic""" +998 29 evaluator """rankbased""" +998 30 dataset """fb15k237""" +998 30 model """unstructuredmodel""" +998 30 loss """nssa""" +998 30 regularizer """no""" +998 30 optimizer """adam""" +998 30 training_loop """owa""" +998 30 negative_sampler """basic""" +998 30 evaluator """rankbased""" +998 31 dataset """fb15k237""" +998 31 model """unstructuredmodel""" +998 31 loss """nssa""" +998 31 regularizer """no""" +998 31 optimizer """adam""" +998 31 training_loop """owa""" +998 31 negative_sampler """basic""" +998 31 evaluator """rankbased""" +998 32 dataset """fb15k237""" +998 32 model """unstructuredmodel""" +998 32 loss """nssa""" +998 32 regularizer """no""" +998 32 optimizer """adam""" +998 32 training_loop """owa""" +998 32 negative_sampler """basic""" +998 32 evaluator """rankbased""" +998 33 dataset """fb15k237""" +998 33 model """unstructuredmodel""" +998 33 loss """nssa""" +998 33 regularizer """no""" +998 33 optimizer """adam""" +998 33 training_loop """owa""" +998 33 negative_sampler """basic""" +998 33 evaluator """rankbased""" +998 34 dataset """fb15k237""" +998 34 model """unstructuredmodel""" +998 34 loss """nssa""" +998 34 regularizer """no""" +998 34 optimizer """adam""" +998 34 training_loop """owa""" +998 34 negative_sampler """basic""" +998 34 evaluator """rankbased""" +998 35 dataset """fb15k237""" +998 35 model """unstructuredmodel""" +998 35 loss """nssa""" +998 35 regularizer """no""" +998 35 optimizer """adam""" +998 35 training_loop """owa""" +998 35 negative_sampler """basic""" +998 35 evaluator """rankbased""" +998 36 dataset """fb15k237""" +998 36 model """unstructuredmodel""" +998 36 loss """nssa""" +998 36 regularizer """no""" +998 36 optimizer """adam""" +998 36 training_loop """owa""" +998 36 negative_sampler """basic""" +998 36 evaluator """rankbased""" +998 37 dataset """fb15k237""" +998 37 model """unstructuredmodel""" +998 37 loss """nssa""" +998 37 regularizer """no""" +998 37 optimizer """adam""" +998 37 training_loop """owa""" +998 37 negative_sampler """basic""" +998 37 evaluator """rankbased""" +998 38 dataset """fb15k237""" +998 38 model """unstructuredmodel""" +998 38 loss """nssa""" +998 38 regularizer """no""" +998 38 optimizer """adam""" +998 38 training_loop """owa""" +998 38 negative_sampler """basic""" +998 38 evaluator """rankbased""" +998 39 dataset """fb15k237""" +998 39 model """unstructuredmodel""" +998 39 loss """nssa""" +998 39 regularizer """no""" +998 39 optimizer """adam""" +998 39 training_loop """owa""" +998 39 negative_sampler """basic""" +998 39 evaluator """rankbased""" +998 40 dataset """fb15k237""" +998 40 model """unstructuredmodel""" +998 40 loss """nssa""" +998 40 regularizer """no""" +998 40 optimizer """adam""" +998 40 training_loop """owa""" +998 40 negative_sampler """basic""" +998 40 evaluator """rankbased""" +998 41 dataset """fb15k237""" +998 41 model """unstructuredmodel""" +998 41 loss """nssa""" +998 41 regularizer """no""" +998 41 optimizer """adam""" +998 41 training_loop """owa""" +998 41 negative_sampler """basic""" +998 41 evaluator """rankbased""" +998 42 dataset """fb15k237""" +998 42 model """unstructuredmodel""" +998 42 loss """nssa""" +998 42 regularizer """no""" +998 42 optimizer """adam""" +998 42 training_loop """owa""" +998 42 negative_sampler """basic""" +998 42 evaluator """rankbased""" +998 43 dataset """fb15k237""" +998 43 model """unstructuredmodel""" +998 43 loss """nssa""" +998 43 regularizer """no""" +998 43 optimizer """adam""" +998 43 training_loop """owa""" +998 43 negative_sampler """basic""" +998 43 evaluator """rankbased""" +998 44 dataset """fb15k237""" +998 44 model """unstructuredmodel""" +998 44 loss """nssa""" +998 44 regularizer """no""" +998 44 optimizer """adam""" +998 44 training_loop """owa""" +998 44 negative_sampler """basic""" +998 44 evaluator """rankbased""" +998 45 dataset """fb15k237""" +998 45 model """unstructuredmodel""" +998 45 loss """nssa""" +998 45 regularizer """no""" +998 45 optimizer """adam""" +998 45 training_loop """owa""" +998 45 negative_sampler """basic""" +998 45 evaluator """rankbased""" +998 46 dataset """fb15k237""" +998 46 model """unstructuredmodel""" +998 46 loss """nssa""" +998 46 regularizer """no""" +998 46 optimizer """adam""" +998 46 training_loop """owa""" +998 46 negative_sampler """basic""" +998 46 evaluator """rankbased""" +998 47 dataset """fb15k237""" +998 47 model """unstructuredmodel""" +998 47 loss """nssa""" +998 47 regularizer """no""" +998 47 optimizer """adam""" +998 47 training_loop """owa""" +998 47 negative_sampler """basic""" +998 47 evaluator """rankbased""" +998 48 dataset """fb15k237""" +998 48 model """unstructuredmodel""" +998 48 loss """nssa""" +998 48 regularizer """no""" +998 48 optimizer """adam""" +998 48 training_loop """owa""" +998 48 negative_sampler """basic""" +998 48 evaluator """rankbased""" +998 49 dataset """fb15k237""" +998 49 model """unstructuredmodel""" +998 49 loss """nssa""" +998 49 regularizer """no""" +998 49 optimizer """adam""" +998 49 training_loop """owa""" +998 49 negative_sampler """basic""" +998 49 evaluator """rankbased""" +999 1 model.embedding_dim 0.0 +999 1 model.scoring_fct_norm 2.0 +999 1 loss.margin 6.4346200864782706 +999 1 loss.adversarial_temperature 0.9415386630960946 +999 1 optimizer.lr 0.01908478036042632 +999 1 negative_sampler.num_negs_per_pos 5.0 +999 1 training.batch_size 2.0 +999 2 model.embedding_dim 1.0 +999 2 model.scoring_fct_norm 2.0 +999 2 loss.margin 18.18082119937817 +999 2 loss.adversarial_temperature 0.7048975608023432 +999 2 optimizer.lr 0.019356816994216163 +999 2 negative_sampler.num_negs_per_pos 56.0 +999 2 training.batch_size 0.0 +999 3 model.embedding_dim 0.0 +999 3 model.scoring_fct_norm 1.0 +999 3 loss.margin 22.236806347538337 +999 3 loss.adversarial_temperature 0.1117223662429795 +999 3 optimizer.lr 0.004360070062058542 +999 3 negative_sampler.num_negs_per_pos 98.0 +999 3 training.batch_size 1.0 +999 4 model.embedding_dim 2.0 +999 4 model.scoring_fct_norm 2.0 +999 4 loss.margin 16.303881552135547 +999 4 loss.adversarial_temperature 0.9792059370083227 +999 4 optimizer.lr 0.07125583171346464 +999 4 negative_sampler.num_negs_per_pos 99.0 +999 4 training.batch_size 0.0 +999 5 model.embedding_dim 2.0 +999 5 model.scoring_fct_norm 1.0 +999 5 loss.margin 29.361430838060176 +999 5 loss.adversarial_temperature 0.8938917310886664 +999 5 optimizer.lr 0.001556662918238312 +999 5 negative_sampler.num_negs_per_pos 49.0 +999 5 training.batch_size 2.0 +999 6 model.embedding_dim 1.0 +999 6 model.scoring_fct_norm 2.0 +999 6 loss.margin 14.794654693062117 +999 6 loss.adversarial_temperature 0.5732508229889509 +999 6 optimizer.lr 0.017263181107174518 +999 6 negative_sampler.num_negs_per_pos 20.0 +999 6 training.batch_size 0.0 +999 7 model.embedding_dim 1.0 +999 7 model.scoring_fct_norm 1.0 +999 7 loss.margin 5.685408163273237 +999 7 loss.adversarial_temperature 0.8800862829012323 +999 7 optimizer.lr 0.0015488818114900635 +999 7 negative_sampler.num_negs_per_pos 15.0 +999 7 training.batch_size 0.0 +999 8 model.embedding_dim 0.0 +999 8 model.scoring_fct_norm 1.0 +999 8 loss.margin 4.567005843622969 +999 8 loss.adversarial_temperature 0.9124198679273325 +999 8 optimizer.lr 0.0740875045913203 +999 8 negative_sampler.num_negs_per_pos 46.0 +999 8 training.batch_size 1.0 +999 9 model.embedding_dim 1.0 +999 9 model.scoring_fct_norm 2.0 +999 9 loss.margin 8.190416027981357 +999 9 loss.adversarial_temperature 0.7460391363851903 +999 9 optimizer.lr 0.04597240486935956 +999 9 negative_sampler.num_negs_per_pos 28.0 +999 9 training.batch_size 2.0 +999 10 model.embedding_dim 1.0 +999 10 model.scoring_fct_norm 2.0 +999 10 loss.margin 11.144515207571144 +999 10 loss.adversarial_temperature 0.8233806322021344 +999 10 optimizer.lr 0.002221570701936348 +999 10 negative_sampler.num_negs_per_pos 65.0 +999 10 training.batch_size 0.0 +999 11 model.embedding_dim 0.0 +999 11 model.scoring_fct_norm 2.0 +999 11 loss.margin 20.727885854128445 +999 11 loss.adversarial_temperature 0.32175064064415554 +999 11 optimizer.lr 0.0012416543855583325 +999 11 negative_sampler.num_negs_per_pos 14.0 +999 11 training.batch_size 2.0 +999 12 model.embedding_dim 1.0 +999 12 model.scoring_fct_norm 2.0 +999 12 loss.margin 1.1303247178780356 +999 12 loss.adversarial_temperature 0.31387297374300527 +999 12 optimizer.lr 0.010203620316668481 +999 12 negative_sampler.num_negs_per_pos 29.0 +999 12 training.batch_size 2.0 +999 13 model.embedding_dim 2.0 +999 13 model.scoring_fct_norm 2.0 +999 13 loss.margin 10.892150775177695 +999 13 loss.adversarial_temperature 0.176975929253671 +999 13 optimizer.lr 0.0018916865756964602 +999 13 negative_sampler.num_negs_per_pos 99.0 +999 13 training.batch_size 1.0 +999 14 model.embedding_dim 2.0 +999 14 model.scoring_fct_norm 2.0 +999 14 loss.margin 21.80512860873135 +999 14 loss.adversarial_temperature 0.3599991034759895 +999 14 optimizer.lr 0.0012245246140258155 +999 14 negative_sampler.num_negs_per_pos 0.0 +999 14 training.batch_size 1.0 +999 15 model.embedding_dim 2.0 +999 15 model.scoring_fct_norm 1.0 +999 15 loss.margin 14.777734657067896 +999 15 loss.adversarial_temperature 0.7912364780975426 +999 15 optimizer.lr 0.07807860232544997 +999 15 negative_sampler.num_negs_per_pos 96.0 +999 15 training.batch_size 2.0 +999 16 model.embedding_dim 1.0 +999 16 model.scoring_fct_norm 1.0 +999 16 loss.margin 18.607815233047255 +999 16 loss.adversarial_temperature 0.4480480292971819 +999 16 optimizer.lr 0.0010943806401264016 +999 16 negative_sampler.num_negs_per_pos 89.0 +999 16 training.batch_size 2.0 +999 17 model.embedding_dim 2.0 +999 17 model.scoring_fct_norm 1.0 +999 17 loss.margin 20.86791239424263 +999 17 loss.adversarial_temperature 0.8800114678288905 +999 17 optimizer.lr 0.0014844655748717614 +999 17 negative_sampler.num_negs_per_pos 95.0 +999 17 training.batch_size 1.0 +999 18 model.embedding_dim 1.0 +999 18 model.scoring_fct_norm 2.0 +999 18 loss.margin 27.11744591121539 +999 18 loss.adversarial_temperature 0.4766962423208312 +999 18 optimizer.lr 0.011454204990663373 +999 18 negative_sampler.num_negs_per_pos 79.0 +999 18 training.batch_size 0.0 +999 19 model.embedding_dim 1.0 +999 19 model.scoring_fct_norm 1.0 +999 19 loss.margin 3.2277138164418346 +999 19 loss.adversarial_temperature 0.13007603289105168 +999 19 optimizer.lr 0.008672159496662803 +999 19 negative_sampler.num_negs_per_pos 18.0 +999 19 training.batch_size 1.0 +999 20 model.embedding_dim 1.0 +999 20 model.scoring_fct_norm 1.0 +999 20 loss.margin 5.1669788268092764 +999 20 loss.adversarial_temperature 0.9862086475748003 +999 20 optimizer.lr 0.03885920255341916 +999 20 negative_sampler.num_negs_per_pos 7.0 +999 20 training.batch_size 0.0 +999 21 model.embedding_dim 0.0 +999 21 model.scoring_fct_norm 1.0 +999 21 loss.margin 21.942812461868677 +999 21 loss.adversarial_temperature 0.7477553280566555 +999 21 optimizer.lr 0.0012084987974291094 +999 21 negative_sampler.num_negs_per_pos 13.0 +999 21 training.batch_size 2.0 +999 22 model.embedding_dim 1.0 +999 22 model.scoring_fct_norm 1.0 +999 22 loss.margin 27.427032821146273 +999 22 loss.adversarial_temperature 0.9876908601266218 +999 22 optimizer.lr 0.09599494275562226 +999 22 negative_sampler.num_negs_per_pos 69.0 +999 22 training.batch_size 0.0 +999 23 model.embedding_dim 0.0 +999 23 model.scoring_fct_norm 1.0 +999 23 loss.margin 6.137829019356771 +999 23 loss.adversarial_temperature 0.9495583149670804 +999 23 optimizer.lr 0.0022335057006266155 +999 23 negative_sampler.num_negs_per_pos 35.0 +999 23 training.batch_size 0.0 +999 24 model.embedding_dim 0.0 +999 24 model.scoring_fct_norm 2.0 +999 24 loss.margin 21.75090040764131 +999 24 loss.adversarial_temperature 0.49504500445921495 +999 24 optimizer.lr 0.010211807284394676 +999 24 negative_sampler.num_negs_per_pos 91.0 +999 24 training.batch_size 2.0 +999 25 model.embedding_dim 1.0 +999 25 model.scoring_fct_norm 2.0 +999 25 loss.margin 2.603280690623988 +999 25 loss.adversarial_temperature 0.7963273516646026 +999 25 optimizer.lr 0.07992429816579695 +999 25 negative_sampler.num_negs_per_pos 71.0 +999 25 training.batch_size 1.0 +999 26 model.embedding_dim 1.0 +999 26 model.scoring_fct_norm 1.0 +999 26 loss.margin 19.079968075341622 +999 26 loss.adversarial_temperature 0.6813263885984373 +999 26 optimizer.lr 0.0018323924954535243 +999 26 negative_sampler.num_negs_per_pos 81.0 +999 26 training.batch_size 0.0 +999 27 model.embedding_dim 1.0 +999 27 model.scoring_fct_norm 2.0 +999 27 loss.margin 21.851228328028007 +999 27 loss.adversarial_temperature 0.8955689330936741 +999 27 optimizer.lr 0.008478807366573212 +999 27 negative_sampler.num_negs_per_pos 97.0 +999 27 training.batch_size 2.0 +999 28 model.embedding_dim 1.0 +999 28 model.scoring_fct_norm 2.0 +999 28 loss.margin 28.094238175400356 +999 28 loss.adversarial_temperature 0.7805943848353786 +999 28 optimizer.lr 0.023239336685010163 +999 28 negative_sampler.num_negs_per_pos 34.0 +999 28 training.batch_size 1.0 +999 29 model.embedding_dim 0.0 +999 29 model.scoring_fct_norm 1.0 +999 29 loss.margin 10.525486380503832 +999 29 loss.adversarial_temperature 0.9670415442195336 +999 29 optimizer.lr 0.020856630012879098 +999 29 negative_sampler.num_negs_per_pos 72.0 +999 29 training.batch_size 2.0 +999 30 model.embedding_dim 1.0 +999 30 model.scoring_fct_norm 2.0 +999 30 loss.margin 11.07469136554006 +999 30 loss.adversarial_temperature 0.6253196453928465 +999 30 optimizer.lr 0.021325015527175018 +999 30 negative_sampler.num_negs_per_pos 38.0 +999 30 training.batch_size 1.0 +999 31 model.embedding_dim 1.0 +999 31 model.scoring_fct_norm 1.0 +999 31 loss.margin 29.579820086528837 +999 31 loss.adversarial_temperature 0.5412758243207297 +999 31 optimizer.lr 0.03117537154503976 +999 31 negative_sampler.num_negs_per_pos 82.0 +999 31 training.batch_size 1.0 +999 32 model.embedding_dim 2.0 +999 32 model.scoring_fct_norm 1.0 +999 32 loss.margin 11.62145329535502 +999 32 loss.adversarial_temperature 0.6387196334800483 +999 32 optimizer.lr 0.0015239879332891333 +999 32 negative_sampler.num_negs_per_pos 47.0 +999 32 training.batch_size 1.0 +999 33 model.embedding_dim 1.0 +999 33 model.scoring_fct_norm 1.0 +999 33 loss.margin 24.462509542088622 +999 33 loss.adversarial_temperature 0.45277373241600016 +999 33 optimizer.lr 0.0016225040264676084 +999 33 negative_sampler.num_negs_per_pos 88.0 +999 33 training.batch_size 1.0 +999 34 model.embedding_dim 1.0 +999 34 model.scoring_fct_norm 2.0 +999 34 loss.margin 29.321277059196277 +999 34 loss.adversarial_temperature 0.9421999012564152 +999 34 optimizer.lr 0.06857704049137495 +999 34 negative_sampler.num_negs_per_pos 27.0 +999 34 training.batch_size 1.0 +999 35 model.embedding_dim 2.0 +999 35 model.scoring_fct_norm 2.0 +999 35 loss.margin 13.80960718154189 +999 35 loss.adversarial_temperature 0.1377827124338998 +999 35 optimizer.lr 0.004506582687243194 +999 35 negative_sampler.num_negs_per_pos 9.0 +999 35 training.batch_size 0.0 +999 36 model.embedding_dim 1.0 +999 36 model.scoring_fct_norm 1.0 +999 36 loss.margin 6.391460141091543 +999 36 loss.adversarial_temperature 0.6562923433809053 +999 36 optimizer.lr 0.01422781525802785 +999 36 negative_sampler.num_negs_per_pos 88.0 +999 36 training.batch_size 1.0 +999 37 model.embedding_dim 2.0 +999 37 model.scoring_fct_norm 1.0 +999 37 loss.margin 13.452350520538538 +999 37 loss.adversarial_temperature 0.35013638426796223 +999 37 optimizer.lr 0.0566892593470993 +999 37 negative_sampler.num_negs_per_pos 95.0 +999 37 training.batch_size 1.0 +999 38 model.embedding_dim 1.0 +999 38 model.scoring_fct_norm 2.0 +999 38 loss.margin 20.470389793582722 +999 38 loss.adversarial_temperature 0.8299160055479121 +999 38 optimizer.lr 0.0019960256299483468 +999 38 negative_sampler.num_negs_per_pos 81.0 +999 38 training.batch_size 0.0 +999 39 model.embedding_dim 1.0 +999 39 model.scoring_fct_norm 2.0 +999 39 loss.margin 6.398452434113399 +999 39 loss.adversarial_temperature 0.9231232911340657 +999 39 optimizer.lr 0.0010779781115293407 +999 39 negative_sampler.num_negs_per_pos 7.0 +999 39 training.batch_size 1.0 +999 40 model.embedding_dim 0.0 +999 40 model.scoring_fct_norm 1.0 +999 40 loss.margin 10.20232381019392 +999 40 loss.adversarial_temperature 0.745869397337135 +999 40 optimizer.lr 0.0011069134849732449 +999 40 negative_sampler.num_negs_per_pos 19.0 +999 40 training.batch_size 0.0 +999 41 model.embedding_dim 2.0 +999 41 model.scoring_fct_norm 2.0 +999 41 loss.margin 4.390721643430902 +999 41 loss.adversarial_temperature 0.4350277016421634 +999 41 optimizer.lr 0.003754427738754801 +999 41 negative_sampler.num_negs_per_pos 40.0 +999 41 training.batch_size 2.0 +999 42 model.embedding_dim 1.0 +999 42 model.scoring_fct_norm 2.0 +999 42 loss.margin 17.121533254573038 +999 42 loss.adversarial_temperature 0.8163249671044039 +999 42 optimizer.lr 0.005045269680725447 +999 42 negative_sampler.num_negs_per_pos 74.0 +999 42 training.batch_size 0.0 +999 43 model.embedding_dim 1.0 +999 43 model.scoring_fct_norm 2.0 +999 43 loss.margin 28.249728724706166 +999 43 loss.adversarial_temperature 0.6334375107934925 +999 43 optimizer.lr 0.013309396429038448 +999 43 negative_sampler.num_negs_per_pos 19.0 +999 43 training.batch_size 0.0 +999 44 model.embedding_dim 0.0 +999 44 model.scoring_fct_norm 1.0 +999 44 loss.margin 1.7060769293239484 +999 44 loss.adversarial_temperature 0.5415722487843772 +999 44 optimizer.lr 0.002882869455952105 +999 44 negative_sampler.num_negs_per_pos 81.0 +999 44 training.batch_size 0.0 +999 45 model.embedding_dim 0.0 +999 45 model.scoring_fct_norm 1.0 +999 45 loss.margin 12.875516855699843 +999 45 loss.adversarial_temperature 0.11800563611807637 +999 45 optimizer.lr 0.0024710782551314024 +999 45 negative_sampler.num_negs_per_pos 79.0 +999 45 training.batch_size 1.0 +999 46 model.embedding_dim 0.0 +999 46 model.scoring_fct_norm 1.0 +999 46 loss.margin 1.3498127239011257 +999 46 loss.adversarial_temperature 0.22736484067061158 +999 46 optimizer.lr 0.08979621860784227 +999 46 negative_sampler.num_negs_per_pos 5.0 +999 46 training.batch_size 0.0 +999 47 model.embedding_dim 1.0 +999 47 model.scoring_fct_norm 2.0 +999 47 loss.margin 5.65624325411239 +999 47 loss.adversarial_temperature 0.415727265688288 +999 47 optimizer.lr 0.01649222836439914 +999 47 negative_sampler.num_negs_per_pos 13.0 +999 47 training.batch_size 2.0 +999 48 model.embedding_dim 2.0 +999 48 model.scoring_fct_norm 1.0 +999 48 loss.margin 26.077802814301045 +999 48 loss.adversarial_temperature 0.5826711194745746 +999 48 optimizer.lr 0.010547107009796263 +999 48 negative_sampler.num_negs_per_pos 62.0 +999 48 training.batch_size 2.0 +999 49 model.embedding_dim 1.0 +999 49 model.scoring_fct_norm 2.0 +999 49 loss.margin 7.302882318226301 +999 49 loss.adversarial_temperature 0.2573370158074426 +999 49 optimizer.lr 0.012224564055436569 +999 49 negative_sampler.num_negs_per_pos 41.0 +999 49 training.batch_size 1.0 +999 50 model.embedding_dim 1.0 +999 50 model.scoring_fct_norm 2.0 +999 50 loss.margin 19.580614669137645 +999 50 loss.adversarial_temperature 0.7690045048986539 +999 50 optimizer.lr 0.0012390091749124596 +999 50 negative_sampler.num_negs_per_pos 11.0 +999 50 training.batch_size 1.0 +999 51 model.embedding_dim 2.0 +999 51 model.scoring_fct_norm 1.0 +999 51 loss.margin 22.02405352783656 +999 51 loss.adversarial_temperature 0.20761835243032384 +999 51 optimizer.lr 0.04554379985435451 +999 51 negative_sampler.num_negs_per_pos 40.0 +999 51 training.batch_size 0.0 +999 52 model.embedding_dim 0.0 +999 52 model.scoring_fct_norm 1.0 +999 52 loss.margin 26.456705598993988 +999 52 loss.adversarial_temperature 0.43214140437314374 +999 52 optimizer.lr 0.0033468148130262566 +999 52 negative_sampler.num_negs_per_pos 68.0 +999 52 training.batch_size 2.0 +999 53 model.embedding_dim 1.0 +999 53 model.scoring_fct_norm 2.0 +999 53 loss.margin 28.54909868219556 +999 53 loss.adversarial_temperature 0.6938469913039731 +999 53 optimizer.lr 0.009144441434008594 +999 53 negative_sampler.num_negs_per_pos 2.0 +999 53 training.batch_size 2.0 +999 54 model.embedding_dim 0.0 +999 54 model.scoring_fct_norm 2.0 +999 54 loss.margin 27.3176521893849 +999 54 loss.adversarial_temperature 0.9180340359917291 +999 54 optimizer.lr 0.08846391187907232 +999 54 negative_sampler.num_negs_per_pos 70.0 +999 54 training.batch_size 0.0 +999 55 model.embedding_dim 1.0 +999 55 model.scoring_fct_norm 1.0 +999 55 loss.margin 20.27033538522819 +999 55 loss.adversarial_temperature 0.8126966805437911 +999 55 optimizer.lr 0.07198703216570305 +999 55 negative_sampler.num_negs_per_pos 36.0 +999 55 training.batch_size 1.0 +999 56 model.embedding_dim 0.0 +999 56 model.scoring_fct_norm 1.0 +999 56 loss.margin 8.143308604348473 +999 56 loss.adversarial_temperature 0.8220435979921837 +999 56 optimizer.lr 0.02363788147783451 +999 56 negative_sampler.num_negs_per_pos 73.0 +999 56 training.batch_size 1.0 +999 57 model.embedding_dim 2.0 +999 57 model.scoring_fct_norm 2.0 +999 57 loss.margin 22.767659292393365 +999 57 loss.adversarial_temperature 0.7581679349311943 +999 57 optimizer.lr 0.09899319437229512 +999 57 negative_sampler.num_negs_per_pos 78.0 +999 57 training.batch_size 2.0 +999 58 model.embedding_dim 0.0 +999 58 model.scoring_fct_norm 1.0 +999 58 loss.margin 20.531838365143273 +999 58 loss.adversarial_temperature 0.6678270731571772 +999 58 optimizer.lr 0.008421731372075794 +999 58 negative_sampler.num_negs_per_pos 46.0 +999 58 training.batch_size 0.0 +999 59 model.embedding_dim 2.0 +999 59 model.scoring_fct_norm 2.0 +999 59 loss.margin 9.903682584109976 +999 59 loss.adversarial_temperature 0.9501397178494564 +999 59 optimizer.lr 0.05893369706256945 +999 59 negative_sampler.num_negs_per_pos 18.0 +999 59 training.batch_size 0.0 +999 60 model.embedding_dim 2.0 +999 60 model.scoring_fct_norm 1.0 +999 60 loss.margin 21.406203905273284 +999 60 loss.adversarial_temperature 0.11221328472592762 +999 60 optimizer.lr 0.002481011744080169 +999 60 negative_sampler.num_negs_per_pos 9.0 +999 60 training.batch_size 2.0 +999 61 model.embedding_dim 1.0 +999 61 model.scoring_fct_norm 1.0 +999 61 loss.margin 9.304951439547928 +999 61 loss.adversarial_temperature 0.4285270378581092 +999 61 optimizer.lr 0.0013718889924857402 +999 61 negative_sampler.num_negs_per_pos 51.0 +999 61 training.batch_size 1.0 +999 62 model.embedding_dim 1.0 +999 62 model.scoring_fct_norm 1.0 +999 62 loss.margin 14.189995368271733 +999 62 loss.adversarial_temperature 0.7199114684896275 +999 62 optimizer.lr 0.0033745961993856466 +999 62 negative_sampler.num_negs_per_pos 78.0 +999 62 training.batch_size 1.0 +999 63 model.embedding_dim 1.0 +999 63 model.scoring_fct_norm 1.0 +999 63 loss.margin 24.97502508461423 +999 63 loss.adversarial_temperature 0.9487332178812757 +999 63 optimizer.lr 0.04718799062463138 +999 63 negative_sampler.num_negs_per_pos 69.0 +999 63 training.batch_size 2.0 +999 64 model.embedding_dim 1.0 +999 64 model.scoring_fct_norm 2.0 +999 64 loss.margin 10.349093458339462 +999 64 loss.adversarial_temperature 0.24943451380520554 +999 64 optimizer.lr 0.0012523953573851372 +999 64 negative_sampler.num_negs_per_pos 1.0 +999 64 training.batch_size 2.0 +999 65 model.embedding_dim 2.0 +999 65 model.scoring_fct_norm 1.0 +999 65 loss.margin 5.551061138872654 +999 65 loss.adversarial_temperature 0.6341619233382809 +999 65 optimizer.lr 0.003158856974897389 +999 65 negative_sampler.num_negs_per_pos 6.0 +999 65 training.batch_size 1.0 +999 66 model.embedding_dim 0.0 +999 66 model.scoring_fct_norm 2.0 +999 66 loss.margin 2.3886475031061485 +999 66 loss.adversarial_temperature 0.8377427164604162 +999 66 optimizer.lr 0.02813613868017478 +999 66 negative_sampler.num_negs_per_pos 34.0 +999 66 training.batch_size 0.0 +999 67 model.embedding_dim 2.0 +999 67 model.scoring_fct_norm 1.0 +999 67 loss.margin 10.214816182156325 +999 67 loss.adversarial_temperature 0.30792231366034784 +999 67 optimizer.lr 0.004317677186968748 +999 67 negative_sampler.num_negs_per_pos 18.0 +999 67 training.batch_size 2.0 +999 68 model.embedding_dim 2.0 +999 68 model.scoring_fct_norm 1.0 +999 68 loss.margin 23.433919109671365 +999 68 loss.adversarial_temperature 0.15061915751632318 +999 68 optimizer.lr 0.00418203754666534 +999 68 negative_sampler.num_negs_per_pos 34.0 +999 68 training.batch_size 1.0 +999 69 model.embedding_dim 1.0 +999 69 model.scoring_fct_norm 1.0 +999 69 loss.margin 3.3591211183870078 +999 69 loss.adversarial_temperature 0.3186292438332429 +999 69 optimizer.lr 0.0032289771797565595 +999 69 negative_sampler.num_negs_per_pos 74.0 +999 69 training.batch_size 1.0 +999 70 model.embedding_dim 2.0 +999 70 model.scoring_fct_norm 2.0 +999 70 loss.margin 20.125650516884804 +999 70 loss.adversarial_temperature 0.8825488318855099 +999 70 optimizer.lr 0.046290325055868255 +999 70 negative_sampler.num_negs_per_pos 31.0 +999 70 training.batch_size 2.0 +999 71 model.embedding_dim 2.0 +999 71 model.scoring_fct_norm 1.0 +999 71 loss.margin 26.763954418308998 +999 71 loss.adversarial_temperature 0.12817367428040013 +999 71 optimizer.lr 0.012531810682651537 +999 71 negative_sampler.num_negs_per_pos 77.0 +999 71 training.batch_size 2.0 +999 72 model.embedding_dim 0.0 +999 72 model.scoring_fct_norm 1.0 +999 72 loss.margin 20.427934494789056 +999 72 loss.adversarial_temperature 0.7830828416695771 +999 72 optimizer.lr 0.059983743828201284 +999 72 negative_sampler.num_negs_per_pos 17.0 +999 72 training.batch_size 2.0 +999 73 model.embedding_dim 1.0 +999 73 model.scoring_fct_norm 1.0 +999 73 loss.margin 29.35983047078375 +999 73 loss.adversarial_temperature 0.6480310147991915 +999 73 optimizer.lr 0.006253151800861899 +999 73 negative_sampler.num_negs_per_pos 61.0 +999 73 training.batch_size 2.0 +999 74 model.embedding_dim 0.0 +999 74 model.scoring_fct_norm 2.0 +999 74 loss.margin 21.677676891286122 +999 74 loss.adversarial_temperature 0.3241464483779516 +999 74 optimizer.lr 0.0013304546896614276 +999 74 negative_sampler.num_negs_per_pos 51.0 +999 74 training.batch_size 1.0 +999 75 model.embedding_dim 1.0 +999 75 model.scoring_fct_norm 2.0 +999 75 loss.margin 21.18700918598715 +999 75 loss.adversarial_temperature 0.5544413565314791 +999 75 optimizer.lr 0.019961323653756417 +999 75 negative_sampler.num_negs_per_pos 46.0 +999 75 training.batch_size 0.0 +999 76 model.embedding_dim 0.0 +999 76 model.scoring_fct_norm 1.0 +999 76 loss.margin 15.079112278794318 +999 76 loss.adversarial_temperature 0.6693677098454738 +999 76 optimizer.lr 0.0010365608948957874 +999 76 negative_sampler.num_negs_per_pos 44.0 +999 76 training.batch_size 1.0 +999 77 model.embedding_dim 2.0 +999 77 model.scoring_fct_norm 2.0 +999 77 loss.margin 2.0114271544711917 +999 77 loss.adversarial_temperature 0.69075298989559 +999 77 optimizer.lr 0.00831641847787984 +999 77 negative_sampler.num_negs_per_pos 63.0 +999 77 training.batch_size 1.0 +999 78 model.embedding_dim 0.0 +999 78 model.scoring_fct_norm 2.0 +999 78 loss.margin 21.190221523126684 +999 78 loss.adversarial_temperature 0.7206725946887728 +999 78 optimizer.lr 0.03779934102641631 +999 78 negative_sampler.num_negs_per_pos 83.0 +999 78 training.batch_size 1.0 +999 79 model.embedding_dim 0.0 +999 79 model.scoring_fct_norm 1.0 +999 79 loss.margin 17.351850282356413 +999 79 loss.adversarial_temperature 0.6307131367506744 +999 79 optimizer.lr 0.05729100295360845 +999 79 negative_sampler.num_negs_per_pos 4.0 +999 79 training.batch_size 2.0 +999 80 model.embedding_dim 1.0 +999 80 model.scoring_fct_norm 1.0 +999 80 loss.margin 8.239263717146233 +999 80 loss.adversarial_temperature 0.868532664052764 +999 80 optimizer.lr 0.004513410529552212 +999 80 negative_sampler.num_negs_per_pos 52.0 +999 80 training.batch_size 0.0 +999 81 model.embedding_dim 2.0 +999 81 model.scoring_fct_norm 2.0 +999 81 loss.margin 18.5491899154182 +999 81 loss.adversarial_temperature 0.26767408017905625 +999 81 optimizer.lr 0.08925908550853703 +999 81 negative_sampler.num_negs_per_pos 65.0 +999 81 training.batch_size 2.0 +999 82 model.embedding_dim 2.0 +999 82 model.scoring_fct_norm 2.0 +999 82 loss.margin 20.287493935190135 +999 82 loss.adversarial_temperature 0.8618356237486406 +999 82 optimizer.lr 0.036650313340094075 +999 82 negative_sampler.num_negs_per_pos 0.0 +999 82 training.batch_size 2.0 +999 83 model.embedding_dim 2.0 +999 83 model.scoring_fct_norm 2.0 +999 83 loss.margin 7.07458882746167 +999 83 loss.adversarial_temperature 0.31500271399707136 +999 83 optimizer.lr 0.002931735459944962 +999 83 negative_sampler.num_negs_per_pos 33.0 +999 83 training.batch_size 1.0 +999 84 model.embedding_dim 1.0 +999 84 model.scoring_fct_norm 2.0 +999 84 loss.margin 12.749138484662158 +999 84 loss.adversarial_temperature 0.2938790433403181 +999 84 optimizer.lr 0.01904041474677595 +999 84 negative_sampler.num_negs_per_pos 94.0 +999 84 training.batch_size 0.0 +999 85 model.embedding_dim 1.0 +999 85 model.scoring_fct_norm 2.0 +999 85 loss.margin 3.351493269426007 +999 85 loss.adversarial_temperature 0.6176730256498231 +999 85 optimizer.lr 0.0015981230894359447 +999 85 negative_sampler.num_negs_per_pos 35.0 +999 85 training.batch_size 2.0 +999 86 model.embedding_dim 0.0 +999 86 model.scoring_fct_norm 2.0 +999 86 loss.margin 6.014082696730222 +999 86 loss.adversarial_temperature 0.7951499611284246 +999 86 optimizer.lr 0.0070778760050030105 +999 86 negative_sampler.num_negs_per_pos 35.0 +999 86 training.batch_size 2.0 +999 87 model.embedding_dim 0.0 +999 87 model.scoring_fct_norm 2.0 +999 87 loss.margin 27.975845432030052 +999 87 loss.adversarial_temperature 0.32669694322482 +999 87 optimizer.lr 0.005454639095588804 +999 87 negative_sampler.num_negs_per_pos 0.0 +999 87 training.batch_size 2.0 +999 88 model.embedding_dim 0.0 +999 88 model.scoring_fct_norm 2.0 +999 88 loss.margin 13.658220591969618 +999 88 loss.adversarial_temperature 0.1854329758784189 +999 88 optimizer.lr 0.06564602900351396 +999 88 negative_sampler.num_negs_per_pos 6.0 +999 88 training.batch_size 2.0 +999 89 model.embedding_dim 0.0 +999 89 model.scoring_fct_norm 2.0 +999 89 loss.margin 7.305123306611625 +999 89 loss.adversarial_temperature 0.8221069170502273 +999 89 optimizer.lr 0.05379613292159417 +999 89 negative_sampler.num_negs_per_pos 96.0 +999 89 training.batch_size 0.0 +999 90 model.embedding_dim 0.0 +999 90 model.scoring_fct_norm 2.0 +999 90 loss.margin 22.294784061847533 +999 90 loss.adversarial_temperature 0.8332011796810829 +999 90 optimizer.lr 0.00990867026693238 +999 90 negative_sampler.num_negs_per_pos 33.0 +999 90 training.batch_size 1.0 +999 91 model.embedding_dim 0.0 +999 91 model.scoring_fct_norm 1.0 +999 91 loss.margin 14.77559948262527 +999 91 loss.adversarial_temperature 0.5858824564159087 +999 91 optimizer.lr 0.014368263087850192 +999 91 negative_sampler.num_negs_per_pos 30.0 +999 91 training.batch_size 1.0 +999 92 model.embedding_dim 0.0 +999 92 model.scoring_fct_norm 2.0 +999 92 loss.margin 12.844324649215892 +999 92 loss.adversarial_temperature 0.3423746149433027 +999 92 optimizer.lr 0.09361218312733378 +999 92 negative_sampler.num_negs_per_pos 32.0 +999 92 training.batch_size 1.0 +999 93 model.embedding_dim 0.0 +999 93 model.scoring_fct_norm 2.0 +999 93 loss.margin 8.00602413414719 +999 93 loss.adversarial_temperature 0.6409025325091635 +999 93 optimizer.lr 0.0444563122875194 +999 93 negative_sampler.num_negs_per_pos 7.0 +999 93 training.batch_size 1.0 +999 94 model.embedding_dim 0.0 +999 94 model.scoring_fct_norm 1.0 +999 94 loss.margin 21.050347899768493 +999 94 loss.adversarial_temperature 0.9327070925612706 +999 94 optimizer.lr 0.0019480026518350162 +999 94 negative_sampler.num_negs_per_pos 3.0 +999 94 training.batch_size 2.0 +999 95 model.embedding_dim 1.0 +999 95 model.scoring_fct_norm 2.0 +999 95 loss.margin 18.345074378151914 +999 95 loss.adversarial_temperature 0.3170603885069012 +999 95 optimizer.lr 0.00336931280530793 +999 95 negative_sampler.num_negs_per_pos 39.0 +999 95 training.batch_size 0.0 +999 96 model.embedding_dim 0.0 +999 96 model.scoring_fct_norm 2.0 +999 96 loss.margin 10.319543066265974 +999 96 loss.adversarial_temperature 0.2637006236673058 +999 96 optimizer.lr 0.08258972198928917 +999 96 negative_sampler.num_negs_per_pos 15.0 +999 96 training.batch_size 0.0 +999 97 model.embedding_dim 2.0 +999 97 model.scoring_fct_norm 2.0 +999 97 loss.margin 20.16925296238067 +999 97 loss.adversarial_temperature 0.6042795057955795 +999 97 optimizer.lr 0.01853538075674883 +999 97 negative_sampler.num_negs_per_pos 70.0 +999 97 training.batch_size 0.0 +999 98 model.embedding_dim 2.0 +999 98 model.scoring_fct_norm 1.0 +999 98 loss.margin 4.615203672330788 +999 98 loss.adversarial_temperature 0.32350881604487514 +999 98 optimizer.lr 0.014465618532582537 +999 98 negative_sampler.num_negs_per_pos 26.0 +999 98 training.batch_size 2.0 +999 99 model.embedding_dim 1.0 +999 99 model.scoring_fct_norm 2.0 +999 99 loss.margin 16.459581840977208 +999 99 loss.adversarial_temperature 0.5396907244349576 +999 99 optimizer.lr 0.0016438455599311176 +999 99 negative_sampler.num_negs_per_pos 87.0 +999 99 training.batch_size 0.0 +999 100 model.embedding_dim 1.0 +999 100 model.scoring_fct_norm 2.0 +999 100 loss.margin 25.208633280699118 +999 100 loss.adversarial_temperature 0.2901208714589546 +999 100 optimizer.lr 0.01877520927257773 +999 100 negative_sampler.num_negs_per_pos 96.0 +999 100 training.batch_size 1.0 +999 1 dataset """fb15k237""" +999 1 model """unstructuredmodel""" +999 1 loss """nssa""" +999 1 regularizer """no""" +999 1 optimizer """adam""" +999 1 training_loop """owa""" +999 1 negative_sampler """basic""" +999 1 evaluator """rankbased""" +999 2 dataset """fb15k237""" +999 2 model """unstructuredmodel""" +999 2 loss """nssa""" +999 2 regularizer """no""" +999 2 optimizer """adam""" +999 2 training_loop """owa""" +999 2 negative_sampler """basic""" +999 2 evaluator """rankbased""" +999 3 dataset """fb15k237""" +999 3 model """unstructuredmodel""" +999 3 loss """nssa""" +999 3 regularizer """no""" +999 3 optimizer """adam""" +999 3 training_loop """owa""" +999 3 negative_sampler """basic""" +999 3 evaluator """rankbased""" +999 4 dataset """fb15k237""" +999 4 model """unstructuredmodel""" +999 4 loss """nssa""" +999 4 regularizer """no""" +999 4 optimizer """adam""" +999 4 training_loop """owa""" +999 4 negative_sampler """basic""" +999 4 evaluator """rankbased""" +999 5 dataset """fb15k237""" +999 5 model """unstructuredmodel""" +999 5 loss """nssa""" +999 5 regularizer """no""" +999 5 optimizer """adam""" +999 5 training_loop """owa""" +999 5 negative_sampler """basic""" +999 5 evaluator """rankbased""" +999 6 dataset """fb15k237""" +999 6 model """unstructuredmodel""" +999 6 loss """nssa""" +999 6 regularizer """no""" +999 6 optimizer """adam""" +999 6 training_loop """owa""" +999 6 negative_sampler """basic""" +999 6 evaluator """rankbased""" +999 7 dataset """fb15k237""" +999 7 model """unstructuredmodel""" +999 7 loss """nssa""" +999 7 regularizer """no""" +999 7 optimizer """adam""" +999 7 training_loop """owa""" +999 7 negative_sampler """basic""" +999 7 evaluator """rankbased""" +999 8 dataset """fb15k237""" +999 8 model """unstructuredmodel""" +999 8 loss """nssa""" +999 8 regularizer """no""" +999 8 optimizer """adam""" +999 8 training_loop """owa""" +999 8 negative_sampler """basic""" +999 8 evaluator """rankbased""" +999 9 dataset """fb15k237""" +999 9 model """unstructuredmodel""" +999 9 loss """nssa""" +999 9 regularizer """no""" +999 9 optimizer """adam""" +999 9 training_loop """owa""" +999 9 negative_sampler """basic""" +999 9 evaluator """rankbased""" +999 10 dataset """fb15k237""" +999 10 model """unstructuredmodel""" +999 10 loss """nssa""" +999 10 regularizer """no""" +999 10 optimizer """adam""" +999 10 training_loop """owa""" +999 10 negative_sampler """basic""" +999 10 evaluator """rankbased""" +999 11 dataset """fb15k237""" +999 11 model """unstructuredmodel""" +999 11 loss """nssa""" +999 11 regularizer """no""" +999 11 optimizer """adam""" +999 11 training_loop """owa""" +999 11 negative_sampler """basic""" +999 11 evaluator """rankbased""" +999 12 dataset """fb15k237""" +999 12 model """unstructuredmodel""" +999 12 loss """nssa""" +999 12 regularizer """no""" +999 12 optimizer """adam""" +999 12 training_loop """owa""" +999 12 negative_sampler """basic""" +999 12 evaluator """rankbased""" +999 13 dataset """fb15k237""" +999 13 model """unstructuredmodel""" +999 13 loss """nssa""" +999 13 regularizer """no""" +999 13 optimizer """adam""" +999 13 training_loop """owa""" +999 13 negative_sampler """basic""" +999 13 evaluator """rankbased""" +999 14 dataset """fb15k237""" +999 14 model """unstructuredmodel""" +999 14 loss """nssa""" +999 14 regularizer """no""" +999 14 optimizer """adam""" +999 14 training_loop """owa""" +999 14 negative_sampler """basic""" +999 14 evaluator """rankbased""" +999 15 dataset """fb15k237""" +999 15 model """unstructuredmodel""" +999 15 loss """nssa""" +999 15 regularizer """no""" +999 15 optimizer """adam""" +999 15 training_loop """owa""" +999 15 negative_sampler """basic""" +999 15 evaluator """rankbased""" +999 16 dataset """fb15k237""" +999 16 model """unstructuredmodel""" +999 16 loss """nssa""" +999 16 regularizer """no""" +999 16 optimizer """adam""" +999 16 training_loop """owa""" +999 16 negative_sampler """basic""" +999 16 evaluator """rankbased""" +999 17 dataset """fb15k237""" +999 17 model """unstructuredmodel""" +999 17 loss """nssa""" +999 17 regularizer """no""" +999 17 optimizer """adam""" +999 17 training_loop """owa""" +999 17 negative_sampler """basic""" +999 17 evaluator """rankbased""" +999 18 dataset """fb15k237""" +999 18 model """unstructuredmodel""" +999 18 loss """nssa""" +999 18 regularizer """no""" +999 18 optimizer """adam""" +999 18 training_loop """owa""" +999 18 negative_sampler """basic""" +999 18 evaluator """rankbased""" +999 19 dataset """fb15k237""" +999 19 model """unstructuredmodel""" +999 19 loss """nssa""" +999 19 regularizer """no""" +999 19 optimizer """adam""" +999 19 training_loop """owa""" +999 19 negative_sampler """basic""" +999 19 evaluator """rankbased""" +999 20 dataset """fb15k237""" +999 20 model """unstructuredmodel""" +999 20 loss """nssa""" +999 20 regularizer """no""" +999 20 optimizer """adam""" +999 20 training_loop """owa""" +999 20 negative_sampler """basic""" +999 20 evaluator """rankbased""" +999 21 dataset """fb15k237""" +999 21 model """unstructuredmodel""" +999 21 loss """nssa""" +999 21 regularizer """no""" +999 21 optimizer """adam""" +999 21 training_loop """owa""" +999 21 negative_sampler """basic""" +999 21 evaluator """rankbased""" +999 22 dataset """fb15k237""" +999 22 model """unstructuredmodel""" +999 22 loss """nssa""" +999 22 regularizer """no""" +999 22 optimizer """adam""" +999 22 training_loop """owa""" +999 22 negative_sampler """basic""" +999 22 evaluator """rankbased""" +999 23 dataset """fb15k237""" +999 23 model """unstructuredmodel""" +999 23 loss """nssa""" +999 23 regularizer """no""" +999 23 optimizer """adam""" +999 23 training_loop """owa""" +999 23 negative_sampler """basic""" +999 23 evaluator """rankbased""" +999 24 dataset """fb15k237""" +999 24 model """unstructuredmodel""" +999 24 loss """nssa""" +999 24 regularizer """no""" +999 24 optimizer """adam""" +999 24 training_loop """owa""" +999 24 negative_sampler """basic""" +999 24 evaluator """rankbased""" +999 25 dataset """fb15k237""" +999 25 model """unstructuredmodel""" +999 25 loss """nssa""" +999 25 regularizer """no""" +999 25 optimizer """adam""" +999 25 training_loop """owa""" +999 25 negative_sampler """basic""" +999 25 evaluator """rankbased""" +999 26 dataset """fb15k237""" +999 26 model """unstructuredmodel""" +999 26 loss """nssa""" +999 26 regularizer """no""" +999 26 optimizer """adam""" +999 26 training_loop """owa""" +999 26 negative_sampler """basic""" +999 26 evaluator """rankbased""" +999 27 dataset """fb15k237""" +999 27 model """unstructuredmodel""" +999 27 loss """nssa""" +999 27 regularizer """no""" +999 27 optimizer """adam""" +999 27 training_loop """owa""" +999 27 negative_sampler """basic""" +999 27 evaluator """rankbased""" +999 28 dataset """fb15k237""" +999 28 model """unstructuredmodel""" +999 28 loss """nssa""" +999 28 regularizer """no""" +999 28 optimizer """adam""" +999 28 training_loop """owa""" +999 28 negative_sampler """basic""" +999 28 evaluator """rankbased""" +999 29 dataset """fb15k237""" +999 29 model """unstructuredmodel""" +999 29 loss """nssa""" +999 29 regularizer """no""" +999 29 optimizer """adam""" +999 29 training_loop """owa""" +999 29 negative_sampler """basic""" +999 29 evaluator """rankbased""" +999 30 dataset """fb15k237""" +999 30 model """unstructuredmodel""" +999 30 loss """nssa""" +999 30 regularizer """no""" +999 30 optimizer """adam""" +999 30 training_loop """owa""" +999 30 negative_sampler """basic""" +999 30 evaluator """rankbased""" +999 31 dataset """fb15k237""" +999 31 model """unstructuredmodel""" +999 31 loss """nssa""" +999 31 regularizer """no""" +999 31 optimizer """adam""" +999 31 training_loop """owa""" +999 31 negative_sampler """basic""" +999 31 evaluator """rankbased""" +999 32 dataset """fb15k237""" +999 32 model """unstructuredmodel""" +999 32 loss """nssa""" +999 32 regularizer """no""" +999 32 optimizer """adam""" +999 32 training_loop """owa""" +999 32 negative_sampler """basic""" +999 32 evaluator """rankbased""" +999 33 dataset """fb15k237""" +999 33 model """unstructuredmodel""" +999 33 loss """nssa""" +999 33 regularizer """no""" +999 33 optimizer """adam""" +999 33 training_loop """owa""" +999 33 negative_sampler """basic""" +999 33 evaluator """rankbased""" +999 34 dataset """fb15k237""" +999 34 model """unstructuredmodel""" +999 34 loss """nssa""" +999 34 regularizer """no""" +999 34 optimizer """adam""" +999 34 training_loop """owa""" +999 34 negative_sampler """basic""" +999 34 evaluator """rankbased""" +999 35 dataset """fb15k237""" +999 35 model """unstructuredmodel""" +999 35 loss """nssa""" +999 35 regularizer """no""" +999 35 optimizer """adam""" +999 35 training_loop """owa""" +999 35 negative_sampler """basic""" +999 35 evaluator """rankbased""" +999 36 dataset """fb15k237""" +999 36 model """unstructuredmodel""" +999 36 loss """nssa""" +999 36 regularizer """no""" +999 36 optimizer """adam""" +999 36 training_loop """owa""" +999 36 negative_sampler """basic""" +999 36 evaluator """rankbased""" +999 37 dataset """fb15k237""" +999 37 model """unstructuredmodel""" +999 37 loss """nssa""" +999 37 regularizer """no""" +999 37 optimizer """adam""" +999 37 training_loop """owa""" +999 37 negative_sampler """basic""" +999 37 evaluator """rankbased""" +999 38 dataset """fb15k237""" +999 38 model """unstructuredmodel""" +999 38 loss """nssa""" +999 38 regularizer """no""" +999 38 optimizer """adam""" +999 38 training_loop """owa""" +999 38 negative_sampler """basic""" +999 38 evaluator """rankbased""" +999 39 dataset """fb15k237""" +999 39 model """unstructuredmodel""" +999 39 loss """nssa""" +999 39 regularizer """no""" +999 39 optimizer """adam""" +999 39 training_loop """owa""" +999 39 negative_sampler """basic""" +999 39 evaluator """rankbased""" +999 40 dataset """fb15k237""" +999 40 model """unstructuredmodel""" +999 40 loss """nssa""" +999 40 regularizer """no""" +999 40 optimizer """adam""" +999 40 training_loop """owa""" +999 40 negative_sampler """basic""" +999 40 evaluator """rankbased""" +999 41 dataset """fb15k237""" +999 41 model """unstructuredmodel""" +999 41 loss """nssa""" +999 41 regularizer """no""" +999 41 optimizer """adam""" +999 41 training_loop """owa""" +999 41 negative_sampler """basic""" +999 41 evaluator """rankbased""" +999 42 dataset """fb15k237""" +999 42 model """unstructuredmodel""" +999 42 loss """nssa""" +999 42 regularizer """no""" +999 42 optimizer """adam""" +999 42 training_loop """owa""" +999 42 negative_sampler """basic""" +999 42 evaluator """rankbased""" +999 43 dataset """fb15k237""" +999 43 model """unstructuredmodel""" +999 43 loss """nssa""" +999 43 regularizer """no""" +999 43 optimizer """adam""" +999 43 training_loop """owa""" +999 43 negative_sampler """basic""" +999 43 evaluator """rankbased""" +999 44 dataset """fb15k237""" +999 44 model """unstructuredmodel""" +999 44 loss """nssa""" +999 44 regularizer """no""" +999 44 optimizer """adam""" +999 44 training_loop """owa""" +999 44 negative_sampler """basic""" +999 44 evaluator """rankbased""" +999 45 dataset """fb15k237""" +999 45 model """unstructuredmodel""" +999 45 loss """nssa""" +999 45 regularizer """no""" +999 45 optimizer """adam""" +999 45 training_loop """owa""" +999 45 negative_sampler """basic""" +999 45 evaluator """rankbased""" +999 46 dataset """fb15k237""" +999 46 model """unstructuredmodel""" +999 46 loss """nssa""" +999 46 regularizer """no""" +999 46 optimizer """adam""" +999 46 training_loop """owa""" +999 46 negative_sampler """basic""" +999 46 evaluator """rankbased""" +999 47 dataset """fb15k237""" +999 47 model """unstructuredmodel""" +999 47 loss """nssa""" +999 47 regularizer """no""" +999 47 optimizer """adam""" +999 47 training_loop """owa""" +999 47 negative_sampler """basic""" +999 47 evaluator """rankbased""" +999 48 dataset """fb15k237""" +999 48 model """unstructuredmodel""" +999 48 loss """nssa""" +999 48 regularizer """no""" +999 48 optimizer """adam""" +999 48 training_loop """owa""" +999 48 negative_sampler """basic""" +999 48 evaluator """rankbased""" +999 49 dataset """fb15k237""" +999 49 model """unstructuredmodel""" +999 49 loss """nssa""" +999 49 regularizer """no""" +999 49 optimizer """adam""" +999 49 training_loop """owa""" +999 49 negative_sampler """basic""" +999 49 evaluator """rankbased""" +999 50 dataset """fb15k237""" +999 50 model """unstructuredmodel""" +999 50 loss """nssa""" +999 50 regularizer """no""" +999 50 optimizer """adam""" +999 50 training_loop """owa""" +999 50 negative_sampler """basic""" +999 50 evaluator """rankbased""" +999 51 dataset """fb15k237""" +999 51 model """unstructuredmodel""" +999 51 loss """nssa""" +999 51 regularizer """no""" +999 51 optimizer """adam""" +999 51 training_loop """owa""" +999 51 negative_sampler """basic""" +999 51 evaluator """rankbased""" +999 52 dataset """fb15k237""" +999 52 model """unstructuredmodel""" +999 52 loss """nssa""" +999 52 regularizer """no""" +999 52 optimizer """adam""" +999 52 training_loop """owa""" +999 52 negative_sampler """basic""" +999 52 evaluator """rankbased""" +999 53 dataset """fb15k237""" +999 53 model """unstructuredmodel""" +999 53 loss """nssa""" +999 53 regularizer """no""" +999 53 optimizer """adam""" +999 53 training_loop """owa""" +999 53 negative_sampler """basic""" +999 53 evaluator """rankbased""" +999 54 dataset """fb15k237""" +999 54 model """unstructuredmodel""" +999 54 loss """nssa""" +999 54 regularizer """no""" +999 54 optimizer """adam""" +999 54 training_loop """owa""" +999 54 negative_sampler """basic""" +999 54 evaluator """rankbased""" +999 55 dataset """fb15k237""" +999 55 model """unstructuredmodel""" +999 55 loss """nssa""" +999 55 regularizer """no""" +999 55 optimizer """adam""" +999 55 training_loop """owa""" +999 55 negative_sampler """basic""" +999 55 evaluator """rankbased""" +999 56 dataset """fb15k237""" +999 56 model """unstructuredmodel""" +999 56 loss """nssa""" +999 56 regularizer """no""" +999 56 optimizer """adam""" +999 56 training_loop """owa""" +999 56 negative_sampler """basic""" +999 56 evaluator """rankbased""" +999 57 dataset """fb15k237""" +999 57 model """unstructuredmodel""" +999 57 loss """nssa""" +999 57 regularizer """no""" +999 57 optimizer """adam""" +999 57 training_loop """owa""" +999 57 negative_sampler """basic""" +999 57 evaluator """rankbased""" +999 58 dataset """fb15k237""" +999 58 model """unstructuredmodel""" +999 58 loss """nssa""" +999 58 regularizer """no""" +999 58 optimizer """adam""" +999 58 training_loop """owa""" +999 58 negative_sampler """basic""" +999 58 evaluator """rankbased""" +999 59 dataset """fb15k237""" +999 59 model """unstructuredmodel""" +999 59 loss """nssa""" +999 59 regularizer """no""" +999 59 optimizer """adam""" +999 59 training_loop """owa""" +999 59 negative_sampler """basic""" +999 59 evaluator """rankbased""" +999 60 dataset """fb15k237""" +999 60 model """unstructuredmodel""" +999 60 loss """nssa""" +999 60 regularizer """no""" +999 60 optimizer """adam""" +999 60 training_loop """owa""" +999 60 negative_sampler """basic""" +999 60 evaluator """rankbased""" +999 61 dataset """fb15k237""" +999 61 model """unstructuredmodel""" +999 61 loss """nssa""" +999 61 regularizer """no""" +999 61 optimizer """adam""" +999 61 training_loop """owa""" +999 61 negative_sampler """basic""" +999 61 evaluator """rankbased""" +999 62 dataset """fb15k237""" +999 62 model """unstructuredmodel""" +999 62 loss """nssa""" +999 62 regularizer """no""" +999 62 optimizer """adam""" +999 62 training_loop """owa""" +999 62 negative_sampler """basic""" +999 62 evaluator """rankbased""" +999 63 dataset """fb15k237""" +999 63 model """unstructuredmodel""" +999 63 loss """nssa""" +999 63 regularizer """no""" +999 63 optimizer """adam""" +999 63 training_loop """owa""" +999 63 negative_sampler """basic""" +999 63 evaluator """rankbased""" +999 64 dataset """fb15k237""" +999 64 model """unstructuredmodel""" +999 64 loss """nssa""" +999 64 regularizer """no""" +999 64 optimizer """adam""" +999 64 training_loop """owa""" +999 64 negative_sampler """basic""" +999 64 evaluator """rankbased""" +999 65 dataset """fb15k237""" +999 65 model """unstructuredmodel""" +999 65 loss """nssa""" +999 65 regularizer """no""" +999 65 optimizer """adam""" +999 65 training_loop """owa""" +999 65 negative_sampler """basic""" +999 65 evaluator """rankbased""" +999 66 dataset """fb15k237""" +999 66 model """unstructuredmodel""" +999 66 loss """nssa""" +999 66 regularizer """no""" +999 66 optimizer """adam""" +999 66 training_loop """owa""" +999 66 negative_sampler """basic""" +999 66 evaluator """rankbased""" +999 67 dataset """fb15k237""" +999 67 model """unstructuredmodel""" +999 67 loss """nssa""" +999 67 regularizer """no""" +999 67 optimizer """adam""" +999 67 training_loop """owa""" +999 67 negative_sampler """basic""" +999 67 evaluator """rankbased""" +999 68 dataset """fb15k237""" +999 68 model """unstructuredmodel""" +999 68 loss """nssa""" +999 68 regularizer """no""" +999 68 optimizer """adam""" +999 68 training_loop """owa""" +999 68 negative_sampler """basic""" +999 68 evaluator """rankbased""" +999 69 dataset """fb15k237""" +999 69 model """unstructuredmodel""" +999 69 loss """nssa""" +999 69 regularizer """no""" +999 69 optimizer """adam""" +999 69 training_loop """owa""" +999 69 negative_sampler """basic""" +999 69 evaluator """rankbased""" +999 70 dataset """fb15k237""" +999 70 model """unstructuredmodel""" +999 70 loss """nssa""" +999 70 regularizer """no""" +999 70 optimizer """adam""" +999 70 training_loop """owa""" +999 70 negative_sampler """basic""" +999 70 evaluator """rankbased""" +999 71 dataset """fb15k237""" +999 71 model """unstructuredmodel""" +999 71 loss """nssa""" +999 71 regularizer """no""" +999 71 optimizer """adam""" +999 71 training_loop """owa""" +999 71 negative_sampler """basic""" +999 71 evaluator """rankbased""" +999 72 dataset """fb15k237""" +999 72 model """unstructuredmodel""" +999 72 loss """nssa""" +999 72 regularizer """no""" +999 72 optimizer """adam""" +999 72 training_loop """owa""" +999 72 negative_sampler """basic""" +999 72 evaluator """rankbased""" +999 73 dataset """fb15k237""" +999 73 model """unstructuredmodel""" +999 73 loss """nssa""" +999 73 regularizer """no""" +999 73 optimizer """adam""" +999 73 training_loop """owa""" +999 73 negative_sampler """basic""" +999 73 evaluator """rankbased""" +999 74 dataset """fb15k237""" +999 74 model """unstructuredmodel""" +999 74 loss """nssa""" +999 74 regularizer """no""" +999 74 optimizer """adam""" +999 74 training_loop """owa""" +999 74 negative_sampler """basic""" +999 74 evaluator """rankbased""" +999 75 dataset """fb15k237""" +999 75 model """unstructuredmodel""" +999 75 loss """nssa""" +999 75 regularizer """no""" +999 75 optimizer """adam""" +999 75 training_loop """owa""" +999 75 negative_sampler """basic""" +999 75 evaluator """rankbased""" +999 76 dataset """fb15k237""" +999 76 model """unstructuredmodel""" +999 76 loss """nssa""" +999 76 regularizer """no""" +999 76 optimizer """adam""" +999 76 training_loop """owa""" +999 76 negative_sampler """basic""" +999 76 evaluator """rankbased""" +999 77 dataset """fb15k237""" +999 77 model """unstructuredmodel""" +999 77 loss """nssa""" +999 77 regularizer """no""" +999 77 optimizer """adam""" +999 77 training_loop """owa""" +999 77 negative_sampler """basic""" +999 77 evaluator """rankbased""" +999 78 dataset """fb15k237""" +999 78 model """unstructuredmodel""" +999 78 loss """nssa""" +999 78 regularizer """no""" +999 78 optimizer """adam""" +999 78 training_loop """owa""" +999 78 negative_sampler """basic""" +999 78 evaluator """rankbased""" +999 79 dataset """fb15k237""" +999 79 model """unstructuredmodel""" +999 79 loss """nssa""" +999 79 regularizer """no""" +999 79 optimizer """adam""" +999 79 training_loop """owa""" +999 79 negative_sampler """basic""" +999 79 evaluator """rankbased""" +999 80 dataset """fb15k237""" +999 80 model """unstructuredmodel""" +999 80 loss """nssa""" +999 80 regularizer """no""" +999 80 optimizer """adam""" +999 80 training_loop """owa""" +999 80 negative_sampler """basic""" +999 80 evaluator """rankbased""" +999 81 dataset """fb15k237""" +999 81 model """unstructuredmodel""" +999 81 loss """nssa""" +999 81 regularizer """no""" +999 81 optimizer """adam""" +999 81 training_loop """owa""" +999 81 negative_sampler """basic""" +999 81 evaluator """rankbased""" +999 82 dataset """fb15k237""" +999 82 model """unstructuredmodel""" +999 82 loss """nssa""" +999 82 regularizer """no""" +999 82 optimizer """adam""" +999 82 training_loop """owa""" +999 82 negative_sampler """basic""" +999 82 evaluator """rankbased""" +999 83 dataset """fb15k237""" +999 83 model """unstructuredmodel""" +999 83 loss """nssa""" +999 83 regularizer """no""" +999 83 optimizer """adam""" +999 83 training_loop """owa""" +999 83 negative_sampler """basic""" +999 83 evaluator """rankbased""" +999 84 dataset """fb15k237""" +999 84 model """unstructuredmodel""" +999 84 loss """nssa""" +999 84 regularizer """no""" +999 84 optimizer """adam""" +999 84 training_loop """owa""" +999 84 negative_sampler """basic""" +999 84 evaluator """rankbased""" +999 85 dataset """fb15k237""" +999 85 model """unstructuredmodel""" +999 85 loss """nssa""" +999 85 regularizer """no""" +999 85 optimizer """adam""" +999 85 training_loop """owa""" +999 85 negative_sampler """basic""" +999 85 evaluator """rankbased""" +999 86 dataset """fb15k237""" +999 86 model """unstructuredmodel""" +999 86 loss """nssa""" +999 86 regularizer """no""" +999 86 optimizer """adam""" +999 86 training_loop """owa""" +999 86 negative_sampler """basic""" +999 86 evaluator """rankbased""" +999 87 dataset """fb15k237""" +999 87 model """unstructuredmodel""" +999 87 loss """nssa""" +999 87 regularizer """no""" +999 87 optimizer """adam""" +999 87 training_loop """owa""" +999 87 negative_sampler """basic""" +999 87 evaluator """rankbased""" +999 88 dataset """fb15k237""" +999 88 model """unstructuredmodel""" +999 88 loss """nssa""" +999 88 regularizer """no""" +999 88 optimizer """adam""" +999 88 training_loop """owa""" +999 88 negative_sampler """basic""" +999 88 evaluator """rankbased""" +999 89 dataset """fb15k237""" +999 89 model """unstructuredmodel""" +999 89 loss """nssa""" +999 89 regularizer """no""" +999 89 optimizer """adam""" +999 89 training_loop """owa""" +999 89 negative_sampler """basic""" +999 89 evaluator """rankbased""" +999 90 dataset """fb15k237""" +999 90 model """unstructuredmodel""" +999 90 loss """nssa""" +999 90 regularizer """no""" +999 90 optimizer """adam""" +999 90 training_loop """owa""" +999 90 negative_sampler """basic""" +999 90 evaluator """rankbased""" +999 91 dataset """fb15k237""" +999 91 model """unstructuredmodel""" +999 91 loss """nssa""" +999 91 regularizer """no""" +999 91 optimizer """adam""" +999 91 training_loop """owa""" +999 91 negative_sampler """basic""" +999 91 evaluator """rankbased""" +999 92 dataset """fb15k237""" +999 92 model """unstructuredmodel""" +999 92 loss """nssa""" +999 92 regularizer """no""" +999 92 optimizer """adam""" +999 92 training_loop """owa""" +999 92 negative_sampler """basic""" +999 92 evaluator """rankbased""" +999 93 dataset """fb15k237""" +999 93 model """unstructuredmodel""" +999 93 loss """nssa""" +999 93 regularizer """no""" +999 93 optimizer """adam""" +999 93 training_loop """owa""" +999 93 negative_sampler """basic""" +999 93 evaluator """rankbased""" +999 94 dataset """fb15k237""" +999 94 model """unstructuredmodel""" +999 94 loss """nssa""" +999 94 regularizer """no""" +999 94 optimizer """adam""" +999 94 training_loop """owa""" +999 94 negative_sampler """basic""" +999 94 evaluator """rankbased""" +999 95 dataset """fb15k237""" +999 95 model """unstructuredmodel""" +999 95 loss """nssa""" +999 95 regularizer """no""" +999 95 optimizer """adam""" +999 95 training_loop """owa""" +999 95 negative_sampler """basic""" +999 95 evaluator """rankbased""" +999 96 dataset """fb15k237""" +999 96 model """unstructuredmodel""" +999 96 loss """nssa""" +999 96 regularizer """no""" +999 96 optimizer """adam""" +999 96 training_loop """owa""" +999 96 negative_sampler """basic""" +999 96 evaluator """rankbased""" +999 97 dataset """fb15k237""" +999 97 model """unstructuredmodel""" +999 97 loss """nssa""" +999 97 regularizer """no""" +999 97 optimizer """adam""" +999 97 training_loop """owa""" +999 97 negative_sampler """basic""" +999 97 evaluator """rankbased""" +999 98 dataset """fb15k237""" +999 98 model """unstructuredmodel""" +999 98 loss """nssa""" +999 98 regularizer """no""" +999 98 optimizer """adam""" +999 98 training_loop """owa""" +999 98 negative_sampler """basic""" +999 98 evaluator """rankbased""" +999 99 dataset """fb15k237""" +999 99 model """unstructuredmodel""" +999 99 loss """nssa""" +999 99 regularizer """no""" +999 99 optimizer """adam""" +999 99 training_loop """owa""" +999 99 negative_sampler """basic""" +999 99 evaluator """rankbased""" +999 100 dataset """fb15k237""" +999 100 model """unstructuredmodel""" +999 100 loss """nssa""" +999 100 regularizer """no""" +999 100 optimizer """adam""" +999 100 training_loop """owa""" +999 100 negative_sampler """basic""" +999 100 evaluator """rankbased""" +1000 1 model.embedding_dim 0.0 +1000 1 model.scoring_fct_norm 2.0 +1000 1 loss.margin 9.11875242633977 +1000 1 optimizer.lr 0.0011135728840371622 +1000 1 negative_sampler.num_negs_per_pos 83.0 +1000 1 training.batch_size 0.0 +1000 2 model.embedding_dim 0.0 +1000 2 model.scoring_fct_norm 1.0 +1000 2 loss.margin 6.816502779107478 +1000 2 optimizer.lr 0.0729109776656288 +1000 2 negative_sampler.num_negs_per_pos 84.0 +1000 2 training.batch_size 0.0 +1000 3 model.embedding_dim 0.0 +1000 3 model.scoring_fct_norm 2.0 +1000 3 loss.margin 3.7540557952024662 +1000 3 optimizer.lr 0.00237514885928834 +1000 3 negative_sampler.num_negs_per_pos 38.0 +1000 3 training.batch_size 0.0 +1000 4 model.embedding_dim 0.0 +1000 4 model.scoring_fct_norm 2.0 +1000 4 loss.margin 6.986760199421981 +1000 4 optimizer.lr 0.028558696949251155 +1000 4 negative_sampler.num_negs_per_pos 35.0 +1000 4 training.batch_size 0.0 +1000 5 model.embedding_dim 1.0 +1000 5 model.scoring_fct_norm 2.0 +1000 5 loss.margin 2.343983960103005 +1000 5 optimizer.lr 0.009652419531334966 +1000 5 negative_sampler.num_negs_per_pos 5.0 +1000 5 training.batch_size 2.0 +1000 6 model.embedding_dim 1.0 +1000 6 model.scoring_fct_norm 2.0 +1000 6 loss.margin 7.436472357478039 +1000 6 optimizer.lr 0.00763285144552634 +1000 6 negative_sampler.num_negs_per_pos 19.0 +1000 6 training.batch_size 1.0 +1000 7 model.embedding_dim 0.0 +1000 7 model.scoring_fct_norm 1.0 +1000 7 loss.margin 3.062533197777565 +1000 7 optimizer.lr 0.002091301671975713 +1000 7 negative_sampler.num_negs_per_pos 9.0 +1000 7 training.batch_size 1.0 +1000 8 model.embedding_dim 2.0 +1000 8 model.scoring_fct_norm 1.0 +1000 8 loss.margin 9.767596846714472 +1000 8 optimizer.lr 0.023968221721668154 +1000 8 negative_sampler.num_negs_per_pos 79.0 +1000 8 training.batch_size 1.0 +1000 9 model.embedding_dim 2.0 +1000 9 model.scoring_fct_norm 1.0 +1000 9 loss.margin 8.79232447661057 +1000 9 optimizer.lr 0.045075179256043915 +1000 9 negative_sampler.num_negs_per_pos 51.0 +1000 9 training.batch_size 1.0 +1000 10 model.embedding_dim 1.0 +1000 10 model.scoring_fct_norm 1.0 +1000 10 loss.margin 8.509775973366857 +1000 10 optimizer.lr 0.015623348045004933 +1000 10 negative_sampler.num_negs_per_pos 48.0 +1000 10 training.batch_size 0.0 +1000 11 model.embedding_dim 1.0 +1000 11 model.scoring_fct_norm 2.0 +1000 11 loss.margin 3.687012906867293 +1000 11 optimizer.lr 0.013841787043259047 +1000 11 negative_sampler.num_negs_per_pos 46.0 +1000 11 training.batch_size 0.0 +1000 12 model.embedding_dim 1.0 +1000 12 model.scoring_fct_norm 2.0 +1000 12 loss.margin 7.416677093775148 +1000 12 optimizer.lr 0.00527011022643971 +1000 12 negative_sampler.num_negs_per_pos 50.0 +1000 12 training.batch_size 2.0 +1000 13 model.embedding_dim 1.0 +1000 13 model.scoring_fct_norm 1.0 +1000 13 loss.margin 5.628969515842629 +1000 13 optimizer.lr 0.0216668995159082 +1000 13 negative_sampler.num_negs_per_pos 95.0 +1000 13 training.batch_size 0.0 +1000 14 model.embedding_dim 1.0 +1000 14 model.scoring_fct_norm 2.0 +1000 14 loss.margin 4.8581615558545135 +1000 14 optimizer.lr 0.004943832157466938 +1000 14 negative_sampler.num_negs_per_pos 35.0 +1000 14 training.batch_size 1.0 +1000 15 model.embedding_dim 0.0 +1000 15 model.scoring_fct_norm 1.0 +1000 15 loss.margin 2.3871056769848145 +1000 15 optimizer.lr 0.052114056690008334 +1000 15 negative_sampler.num_negs_per_pos 61.0 +1000 15 training.batch_size 0.0 +1000 16 model.embedding_dim 0.0 +1000 16 model.scoring_fct_norm 1.0 +1000 16 loss.margin 2.70691972228485 +1000 16 optimizer.lr 0.008814172830879631 +1000 16 negative_sampler.num_negs_per_pos 26.0 +1000 16 training.batch_size 0.0 +1000 17 model.embedding_dim 2.0 +1000 17 model.scoring_fct_norm 2.0 +1000 17 loss.margin 5.055184261529989 +1000 17 optimizer.lr 0.021190913336237564 +1000 17 negative_sampler.num_negs_per_pos 51.0 +1000 17 training.batch_size 1.0 +1000 18 model.embedding_dim 0.0 +1000 18 model.scoring_fct_norm 2.0 +1000 18 loss.margin 5.85687091888178 +1000 18 optimizer.lr 0.0035610303557038225 +1000 18 negative_sampler.num_negs_per_pos 71.0 +1000 18 training.batch_size 2.0 +1000 19 model.embedding_dim 2.0 +1000 19 model.scoring_fct_norm 1.0 +1000 19 loss.margin 2.021397387408287 +1000 19 optimizer.lr 0.0014000586452661945 +1000 19 negative_sampler.num_negs_per_pos 7.0 +1000 19 training.batch_size 1.0 +1000 20 model.embedding_dim 2.0 +1000 20 model.scoring_fct_norm 1.0 +1000 20 loss.margin 9.531311388899425 +1000 20 optimizer.lr 0.07410415171471804 +1000 20 negative_sampler.num_negs_per_pos 18.0 +1000 20 training.batch_size 2.0 +1000 21 model.embedding_dim 1.0 +1000 21 model.scoring_fct_norm 1.0 +1000 21 loss.margin 7.808246136763673 +1000 21 optimizer.lr 0.0191432516854345 +1000 21 negative_sampler.num_negs_per_pos 99.0 +1000 21 training.batch_size 1.0 +1000 22 model.embedding_dim 0.0 +1000 22 model.scoring_fct_norm 1.0 +1000 22 loss.margin 5.038775649637975 +1000 22 optimizer.lr 0.026953641907008993 +1000 22 negative_sampler.num_negs_per_pos 66.0 +1000 22 training.batch_size 1.0 +1000 23 model.embedding_dim 0.0 +1000 23 model.scoring_fct_norm 2.0 +1000 23 loss.margin 7.037262771134375 +1000 23 optimizer.lr 0.03396487544952586 +1000 23 negative_sampler.num_negs_per_pos 0.0 +1000 23 training.batch_size 2.0 +1000 24 model.embedding_dim 1.0 +1000 24 model.scoring_fct_norm 1.0 +1000 24 loss.margin 2.905364215820802 +1000 24 optimizer.lr 0.03873128745333346 +1000 24 negative_sampler.num_negs_per_pos 0.0 +1000 24 training.batch_size 1.0 +1000 25 model.embedding_dim 2.0 +1000 25 model.scoring_fct_norm 2.0 +1000 25 loss.margin 1.6792169318338912 +1000 25 optimizer.lr 0.0368444755304445 +1000 25 negative_sampler.num_negs_per_pos 63.0 +1000 25 training.batch_size 1.0 +1000 26 model.embedding_dim 1.0 +1000 26 model.scoring_fct_norm 2.0 +1000 26 loss.margin 7.159531571497209 +1000 26 optimizer.lr 0.01805585371475578 +1000 26 negative_sampler.num_negs_per_pos 11.0 +1000 26 training.batch_size 1.0 +1000 27 model.embedding_dim 0.0 +1000 27 model.scoring_fct_norm 2.0 +1000 27 loss.margin 6.040830096933074 +1000 27 optimizer.lr 0.05206479036515996 +1000 27 negative_sampler.num_negs_per_pos 29.0 +1000 27 training.batch_size 0.0 +1000 28 model.embedding_dim 0.0 +1000 28 model.scoring_fct_norm 1.0 +1000 28 loss.margin 9.358743551070656 +1000 28 optimizer.lr 0.003791239278462225 +1000 28 negative_sampler.num_negs_per_pos 76.0 +1000 28 training.batch_size 2.0 +1000 29 model.embedding_dim 2.0 +1000 29 model.scoring_fct_norm 1.0 +1000 29 loss.margin 2.7905600460983564 +1000 29 optimizer.lr 0.0022693564995990615 +1000 29 negative_sampler.num_negs_per_pos 40.0 +1000 29 training.batch_size 2.0 +1000 30 model.embedding_dim 0.0 +1000 30 model.scoring_fct_norm 2.0 +1000 30 loss.margin 2.6839525302490403 +1000 30 optimizer.lr 0.009348721102670085 +1000 30 negative_sampler.num_negs_per_pos 82.0 +1000 30 training.batch_size 0.0 +1000 31 model.embedding_dim 2.0 +1000 31 model.scoring_fct_norm 2.0 +1000 31 loss.margin 1.1769431862475757 +1000 31 optimizer.lr 0.015435108335313303 +1000 31 negative_sampler.num_negs_per_pos 69.0 +1000 31 training.batch_size 2.0 +1000 32 model.embedding_dim 2.0 +1000 32 model.scoring_fct_norm 2.0 +1000 32 loss.margin 4.2031679114047416 +1000 32 optimizer.lr 0.0029298479587684463 +1000 32 negative_sampler.num_negs_per_pos 97.0 +1000 32 training.batch_size 0.0 +1000 33 model.embedding_dim 2.0 +1000 33 model.scoring_fct_norm 1.0 +1000 33 loss.margin 4.369535706834471 +1000 33 optimizer.lr 0.0037678308131319474 +1000 33 negative_sampler.num_negs_per_pos 30.0 +1000 33 training.batch_size 0.0 +1000 34 model.embedding_dim 2.0 +1000 34 model.scoring_fct_norm 1.0 +1000 34 loss.margin 5.090131909490706 +1000 34 optimizer.lr 0.007248918205745503 +1000 34 negative_sampler.num_negs_per_pos 45.0 +1000 34 training.batch_size 0.0 +1000 35 model.embedding_dim 0.0 +1000 35 model.scoring_fct_norm 1.0 +1000 35 loss.margin 6.284830874441592 +1000 35 optimizer.lr 0.0730331280569831 +1000 35 negative_sampler.num_negs_per_pos 52.0 +1000 35 training.batch_size 0.0 +1000 36 model.embedding_dim 0.0 +1000 36 model.scoring_fct_norm 1.0 +1000 36 loss.margin 8.812598009521587 +1000 36 optimizer.lr 0.059427916139427404 +1000 36 negative_sampler.num_negs_per_pos 27.0 +1000 36 training.batch_size 0.0 +1000 37 model.embedding_dim 2.0 +1000 37 model.scoring_fct_norm 1.0 +1000 37 loss.margin 7.858009137661914 +1000 37 optimizer.lr 0.0019390681399422656 +1000 37 negative_sampler.num_negs_per_pos 11.0 +1000 37 training.batch_size 1.0 +1000 38 model.embedding_dim 0.0 +1000 38 model.scoring_fct_norm 2.0 +1000 38 loss.margin 9.38226844977931 +1000 38 optimizer.lr 0.03158206511888844 +1000 38 negative_sampler.num_negs_per_pos 92.0 +1000 38 training.batch_size 0.0 +1000 39 model.embedding_dim 0.0 +1000 39 model.scoring_fct_norm 1.0 +1000 39 loss.margin 4.771947198238952 +1000 39 optimizer.lr 0.03200974174519754 +1000 39 negative_sampler.num_negs_per_pos 55.0 +1000 39 training.batch_size 1.0 +1000 40 model.embedding_dim 2.0 +1000 40 model.scoring_fct_norm 1.0 +1000 40 loss.margin 9.478663666376367 +1000 40 optimizer.lr 0.012281118026775376 +1000 40 negative_sampler.num_negs_per_pos 8.0 +1000 40 training.batch_size 2.0 +1000 41 model.embedding_dim 2.0 +1000 41 model.scoring_fct_norm 2.0 +1000 41 loss.margin 9.622901473785294 +1000 41 optimizer.lr 0.002379620735887046 +1000 41 negative_sampler.num_negs_per_pos 52.0 +1000 41 training.batch_size 2.0 +1000 42 model.embedding_dim 1.0 +1000 42 model.scoring_fct_norm 2.0 +1000 42 loss.margin 8.64920535194763 +1000 42 optimizer.lr 0.01415497437297276 +1000 42 negative_sampler.num_negs_per_pos 64.0 +1000 42 training.batch_size 1.0 +1000 43 model.embedding_dim 2.0 +1000 43 model.scoring_fct_norm 1.0 +1000 43 loss.margin 8.157468521686585 +1000 43 optimizer.lr 0.006608994179106528 +1000 43 negative_sampler.num_negs_per_pos 45.0 +1000 43 training.batch_size 1.0 +1000 44 model.embedding_dim 0.0 +1000 44 model.scoring_fct_norm 2.0 +1000 44 loss.margin 8.09407045403086 +1000 44 optimizer.lr 0.0041165216616923335 +1000 44 negative_sampler.num_negs_per_pos 57.0 +1000 44 training.batch_size 0.0 +1000 45 model.embedding_dim 2.0 +1000 45 model.scoring_fct_norm 1.0 +1000 45 loss.margin 2.7603591112686607 +1000 45 optimizer.lr 0.03570557124397404 +1000 45 negative_sampler.num_negs_per_pos 85.0 +1000 45 training.batch_size 1.0 +1000 46 model.embedding_dim 1.0 +1000 46 model.scoring_fct_norm 1.0 +1000 46 loss.margin 5.528583094739588 +1000 46 optimizer.lr 0.004582401997497633 +1000 46 negative_sampler.num_negs_per_pos 29.0 +1000 46 training.batch_size 0.0 +1000 47 model.embedding_dim 2.0 +1000 47 model.scoring_fct_norm 2.0 +1000 47 loss.margin 3.2150151956272603 +1000 47 optimizer.lr 0.001141522215221401 +1000 47 negative_sampler.num_negs_per_pos 34.0 +1000 47 training.batch_size 2.0 +1000 48 model.embedding_dim 1.0 +1000 48 model.scoring_fct_norm 2.0 +1000 48 loss.margin 5.323406412462193 +1000 48 optimizer.lr 0.0037105375463007996 +1000 48 negative_sampler.num_negs_per_pos 1.0 +1000 48 training.batch_size 1.0 +1000 49 model.embedding_dim 2.0 +1000 49 model.scoring_fct_norm 2.0 +1000 49 loss.margin 4.518725388791851 +1000 49 optimizer.lr 0.05470339771695602 +1000 49 negative_sampler.num_negs_per_pos 51.0 +1000 49 training.batch_size 0.0 +1000 50 model.embedding_dim 0.0 +1000 50 model.scoring_fct_norm 2.0 +1000 50 loss.margin 8.738743386164233 +1000 50 optimizer.lr 0.005842226349165958 +1000 50 negative_sampler.num_negs_per_pos 41.0 +1000 50 training.batch_size 1.0 +1000 51 model.embedding_dim 1.0 +1000 51 model.scoring_fct_norm 1.0 +1000 51 loss.margin 3.776076202447652 +1000 51 optimizer.lr 0.010101347135164437 +1000 51 negative_sampler.num_negs_per_pos 11.0 +1000 51 training.batch_size 1.0 +1000 52 model.embedding_dim 0.0 +1000 52 model.scoring_fct_norm 2.0 +1000 52 loss.margin 5.228468322584744 +1000 52 optimizer.lr 0.02667456506520992 +1000 52 negative_sampler.num_negs_per_pos 22.0 +1000 52 training.batch_size 0.0 +1000 53 model.embedding_dim 2.0 +1000 53 model.scoring_fct_norm 1.0 +1000 53 loss.margin 7.930401860142877 +1000 53 optimizer.lr 0.0179305681205513 +1000 53 negative_sampler.num_negs_per_pos 97.0 +1000 53 training.batch_size 2.0 +1000 54 model.embedding_dim 0.0 +1000 54 model.scoring_fct_norm 2.0 +1000 54 loss.margin 2.646606408052445 +1000 54 optimizer.lr 0.004626362500800245 +1000 54 negative_sampler.num_negs_per_pos 54.0 +1000 54 training.batch_size 2.0 +1000 55 model.embedding_dim 2.0 +1000 55 model.scoring_fct_norm 2.0 +1000 55 loss.margin 7.84239033690917 +1000 55 optimizer.lr 0.0010874034332008559 +1000 55 negative_sampler.num_negs_per_pos 39.0 +1000 55 training.batch_size 1.0 +1000 56 model.embedding_dim 2.0 +1000 56 model.scoring_fct_norm 1.0 +1000 56 loss.margin 3.9140141359698446 +1000 56 optimizer.lr 0.04113265670932667 +1000 56 negative_sampler.num_negs_per_pos 85.0 +1000 56 training.batch_size 2.0 +1000 57 model.embedding_dim 0.0 +1000 57 model.scoring_fct_norm 1.0 +1000 57 loss.margin 3.9479353351460187 +1000 57 optimizer.lr 0.002116947491952414 +1000 57 negative_sampler.num_negs_per_pos 56.0 +1000 57 training.batch_size 2.0 +1000 58 model.embedding_dim 2.0 +1000 58 model.scoring_fct_norm 2.0 +1000 58 loss.margin 1.152260389585758 +1000 58 optimizer.lr 0.002828738788901356 +1000 58 negative_sampler.num_negs_per_pos 49.0 +1000 58 training.batch_size 2.0 +1000 59 model.embedding_dim 2.0 +1000 59 model.scoring_fct_norm 1.0 +1000 59 loss.margin 5.710099290876622 +1000 59 optimizer.lr 0.04676312386341838 +1000 59 negative_sampler.num_negs_per_pos 4.0 +1000 59 training.batch_size 2.0 +1000 60 model.embedding_dim 0.0 +1000 60 model.scoring_fct_norm 1.0 +1000 60 loss.margin 7.606925876086808 +1000 60 optimizer.lr 0.003356577590321658 +1000 60 negative_sampler.num_negs_per_pos 24.0 +1000 60 training.batch_size 1.0 +1000 61 model.embedding_dim 0.0 +1000 61 model.scoring_fct_norm 1.0 +1000 61 loss.margin 4.5937650996797466 +1000 61 optimizer.lr 0.0014742452005475529 +1000 61 negative_sampler.num_negs_per_pos 80.0 +1000 61 training.batch_size 1.0 +1000 1 dataset """fb15k237""" +1000 1 model """unstructuredmodel""" +1000 1 loss """marginranking""" +1000 1 regularizer """no""" +1000 1 optimizer """adam""" +1000 1 training_loop """owa""" +1000 1 negative_sampler """basic""" +1000 1 evaluator """rankbased""" +1000 2 dataset """fb15k237""" +1000 2 model """unstructuredmodel""" +1000 2 loss """marginranking""" +1000 2 regularizer """no""" +1000 2 optimizer """adam""" +1000 2 training_loop """owa""" +1000 2 negative_sampler """basic""" +1000 2 evaluator """rankbased""" +1000 3 dataset """fb15k237""" +1000 3 model """unstructuredmodel""" +1000 3 loss """marginranking""" +1000 3 regularizer """no""" +1000 3 optimizer """adam""" +1000 3 training_loop """owa""" +1000 3 negative_sampler """basic""" +1000 3 evaluator """rankbased""" +1000 4 dataset """fb15k237""" +1000 4 model """unstructuredmodel""" +1000 4 loss """marginranking""" +1000 4 regularizer """no""" +1000 4 optimizer """adam""" +1000 4 training_loop """owa""" +1000 4 negative_sampler """basic""" +1000 4 evaluator """rankbased""" +1000 5 dataset """fb15k237""" +1000 5 model """unstructuredmodel""" +1000 5 loss """marginranking""" +1000 5 regularizer """no""" +1000 5 optimizer """adam""" +1000 5 training_loop """owa""" +1000 5 negative_sampler """basic""" +1000 5 evaluator """rankbased""" +1000 6 dataset """fb15k237""" +1000 6 model """unstructuredmodel""" +1000 6 loss """marginranking""" +1000 6 regularizer """no""" +1000 6 optimizer """adam""" +1000 6 training_loop """owa""" +1000 6 negative_sampler """basic""" +1000 6 evaluator """rankbased""" +1000 7 dataset """fb15k237""" +1000 7 model """unstructuredmodel""" +1000 7 loss """marginranking""" +1000 7 regularizer """no""" +1000 7 optimizer """adam""" +1000 7 training_loop """owa""" +1000 7 negative_sampler """basic""" +1000 7 evaluator """rankbased""" +1000 8 dataset """fb15k237""" +1000 8 model """unstructuredmodel""" +1000 8 loss """marginranking""" +1000 8 regularizer """no""" +1000 8 optimizer """adam""" +1000 8 training_loop """owa""" +1000 8 negative_sampler """basic""" +1000 8 evaluator """rankbased""" +1000 9 dataset """fb15k237""" +1000 9 model """unstructuredmodel""" +1000 9 loss """marginranking""" +1000 9 regularizer """no""" +1000 9 optimizer """adam""" +1000 9 training_loop """owa""" +1000 9 negative_sampler """basic""" +1000 9 evaluator """rankbased""" +1000 10 dataset """fb15k237""" +1000 10 model """unstructuredmodel""" +1000 10 loss """marginranking""" +1000 10 regularizer """no""" +1000 10 optimizer """adam""" +1000 10 training_loop """owa""" +1000 10 negative_sampler """basic""" +1000 10 evaluator """rankbased""" +1000 11 dataset """fb15k237""" +1000 11 model """unstructuredmodel""" +1000 11 loss """marginranking""" +1000 11 regularizer """no""" +1000 11 optimizer """adam""" +1000 11 training_loop """owa""" +1000 11 negative_sampler """basic""" +1000 11 evaluator """rankbased""" +1000 12 dataset """fb15k237""" +1000 12 model """unstructuredmodel""" +1000 12 loss """marginranking""" +1000 12 regularizer """no""" +1000 12 optimizer """adam""" +1000 12 training_loop """owa""" +1000 12 negative_sampler """basic""" +1000 12 evaluator """rankbased""" +1000 13 dataset """fb15k237""" +1000 13 model """unstructuredmodel""" +1000 13 loss """marginranking""" +1000 13 regularizer """no""" +1000 13 optimizer """adam""" +1000 13 training_loop """owa""" +1000 13 negative_sampler """basic""" +1000 13 evaluator """rankbased""" +1000 14 dataset """fb15k237""" +1000 14 model """unstructuredmodel""" +1000 14 loss """marginranking""" +1000 14 regularizer """no""" +1000 14 optimizer """adam""" +1000 14 training_loop """owa""" +1000 14 negative_sampler """basic""" +1000 14 evaluator """rankbased""" +1000 15 dataset """fb15k237""" +1000 15 model """unstructuredmodel""" +1000 15 loss """marginranking""" +1000 15 regularizer """no""" +1000 15 optimizer """adam""" +1000 15 training_loop """owa""" +1000 15 negative_sampler """basic""" +1000 15 evaluator """rankbased""" +1000 16 dataset """fb15k237""" +1000 16 model """unstructuredmodel""" +1000 16 loss """marginranking""" +1000 16 regularizer """no""" +1000 16 optimizer """adam""" +1000 16 training_loop """owa""" +1000 16 negative_sampler """basic""" +1000 16 evaluator """rankbased""" +1000 17 dataset """fb15k237""" +1000 17 model """unstructuredmodel""" +1000 17 loss """marginranking""" +1000 17 regularizer """no""" +1000 17 optimizer """adam""" +1000 17 training_loop """owa""" +1000 17 negative_sampler """basic""" +1000 17 evaluator """rankbased""" +1000 18 dataset """fb15k237""" +1000 18 model """unstructuredmodel""" +1000 18 loss """marginranking""" +1000 18 regularizer """no""" +1000 18 optimizer """adam""" +1000 18 training_loop """owa""" +1000 18 negative_sampler """basic""" +1000 18 evaluator """rankbased""" +1000 19 dataset """fb15k237""" +1000 19 model """unstructuredmodel""" +1000 19 loss """marginranking""" +1000 19 regularizer """no""" +1000 19 optimizer """adam""" +1000 19 training_loop """owa""" +1000 19 negative_sampler """basic""" +1000 19 evaluator """rankbased""" +1000 20 dataset """fb15k237""" +1000 20 model """unstructuredmodel""" +1000 20 loss """marginranking""" +1000 20 regularizer """no""" +1000 20 optimizer """adam""" +1000 20 training_loop """owa""" +1000 20 negative_sampler """basic""" +1000 20 evaluator """rankbased""" +1000 21 dataset """fb15k237""" +1000 21 model """unstructuredmodel""" +1000 21 loss """marginranking""" +1000 21 regularizer """no""" +1000 21 optimizer """adam""" +1000 21 training_loop """owa""" +1000 21 negative_sampler """basic""" +1000 21 evaluator """rankbased""" +1000 22 dataset """fb15k237""" +1000 22 model """unstructuredmodel""" +1000 22 loss """marginranking""" +1000 22 regularizer """no""" +1000 22 optimizer """adam""" +1000 22 training_loop """owa""" +1000 22 negative_sampler """basic""" +1000 22 evaluator """rankbased""" +1000 23 dataset """fb15k237""" +1000 23 model """unstructuredmodel""" +1000 23 loss """marginranking""" +1000 23 regularizer """no""" +1000 23 optimizer """adam""" +1000 23 training_loop """owa""" +1000 23 negative_sampler """basic""" +1000 23 evaluator """rankbased""" +1000 24 dataset """fb15k237""" +1000 24 model """unstructuredmodel""" +1000 24 loss """marginranking""" +1000 24 regularizer """no""" +1000 24 optimizer """adam""" +1000 24 training_loop """owa""" +1000 24 negative_sampler """basic""" +1000 24 evaluator """rankbased""" +1000 25 dataset """fb15k237""" +1000 25 model """unstructuredmodel""" +1000 25 loss """marginranking""" +1000 25 regularizer """no""" +1000 25 optimizer """adam""" +1000 25 training_loop """owa""" +1000 25 negative_sampler """basic""" +1000 25 evaluator """rankbased""" +1000 26 dataset """fb15k237""" +1000 26 model """unstructuredmodel""" +1000 26 loss """marginranking""" +1000 26 regularizer """no""" +1000 26 optimizer """adam""" +1000 26 training_loop """owa""" +1000 26 negative_sampler """basic""" +1000 26 evaluator """rankbased""" +1000 27 dataset """fb15k237""" +1000 27 model """unstructuredmodel""" +1000 27 loss """marginranking""" +1000 27 regularizer """no""" +1000 27 optimizer """adam""" +1000 27 training_loop """owa""" +1000 27 negative_sampler """basic""" +1000 27 evaluator """rankbased""" +1000 28 dataset """fb15k237""" +1000 28 model """unstructuredmodel""" +1000 28 loss """marginranking""" +1000 28 regularizer """no""" +1000 28 optimizer """adam""" +1000 28 training_loop """owa""" +1000 28 negative_sampler """basic""" +1000 28 evaluator """rankbased""" +1000 29 dataset """fb15k237""" +1000 29 model """unstructuredmodel""" +1000 29 loss """marginranking""" +1000 29 regularizer """no""" +1000 29 optimizer """adam""" +1000 29 training_loop """owa""" +1000 29 negative_sampler """basic""" +1000 29 evaluator """rankbased""" +1000 30 dataset """fb15k237""" +1000 30 model """unstructuredmodel""" +1000 30 loss """marginranking""" +1000 30 regularizer """no""" +1000 30 optimizer """adam""" +1000 30 training_loop """owa""" +1000 30 negative_sampler """basic""" +1000 30 evaluator """rankbased""" +1000 31 dataset """fb15k237""" +1000 31 model """unstructuredmodel""" +1000 31 loss """marginranking""" +1000 31 regularizer """no""" +1000 31 optimizer """adam""" +1000 31 training_loop """owa""" +1000 31 negative_sampler """basic""" +1000 31 evaluator """rankbased""" +1000 32 dataset """fb15k237""" +1000 32 model """unstructuredmodel""" +1000 32 loss """marginranking""" +1000 32 regularizer """no""" +1000 32 optimizer """adam""" +1000 32 training_loop """owa""" +1000 32 negative_sampler """basic""" +1000 32 evaluator """rankbased""" +1000 33 dataset """fb15k237""" +1000 33 model """unstructuredmodel""" +1000 33 loss """marginranking""" +1000 33 regularizer """no""" +1000 33 optimizer """adam""" +1000 33 training_loop """owa""" +1000 33 negative_sampler """basic""" +1000 33 evaluator """rankbased""" +1000 34 dataset """fb15k237""" +1000 34 model """unstructuredmodel""" +1000 34 loss """marginranking""" +1000 34 regularizer """no""" +1000 34 optimizer """adam""" +1000 34 training_loop """owa""" +1000 34 negative_sampler """basic""" +1000 34 evaluator """rankbased""" +1000 35 dataset """fb15k237""" +1000 35 model """unstructuredmodel""" +1000 35 loss """marginranking""" +1000 35 regularizer """no""" +1000 35 optimizer """adam""" +1000 35 training_loop """owa""" +1000 35 negative_sampler """basic""" +1000 35 evaluator """rankbased""" +1000 36 dataset """fb15k237""" +1000 36 model """unstructuredmodel""" +1000 36 loss """marginranking""" +1000 36 regularizer """no""" +1000 36 optimizer """adam""" +1000 36 training_loop """owa""" +1000 36 negative_sampler """basic""" +1000 36 evaluator """rankbased""" +1000 37 dataset """fb15k237""" +1000 37 model """unstructuredmodel""" +1000 37 loss """marginranking""" +1000 37 regularizer """no""" +1000 37 optimizer """adam""" +1000 37 training_loop """owa""" +1000 37 negative_sampler """basic""" +1000 37 evaluator """rankbased""" +1000 38 dataset """fb15k237""" +1000 38 model """unstructuredmodel""" +1000 38 loss """marginranking""" +1000 38 regularizer """no""" +1000 38 optimizer """adam""" +1000 38 training_loop """owa""" +1000 38 negative_sampler """basic""" +1000 38 evaluator """rankbased""" +1000 39 dataset """fb15k237""" +1000 39 model """unstructuredmodel""" +1000 39 loss """marginranking""" +1000 39 regularizer """no""" +1000 39 optimizer """adam""" +1000 39 training_loop """owa""" +1000 39 negative_sampler """basic""" +1000 39 evaluator """rankbased""" +1000 40 dataset """fb15k237""" +1000 40 model """unstructuredmodel""" +1000 40 loss """marginranking""" +1000 40 regularizer """no""" +1000 40 optimizer """adam""" +1000 40 training_loop """owa""" +1000 40 negative_sampler """basic""" +1000 40 evaluator """rankbased""" +1000 41 dataset """fb15k237""" +1000 41 model """unstructuredmodel""" +1000 41 loss """marginranking""" +1000 41 regularizer """no""" +1000 41 optimizer """adam""" +1000 41 training_loop """owa""" +1000 41 negative_sampler """basic""" +1000 41 evaluator """rankbased""" +1000 42 dataset """fb15k237""" +1000 42 model """unstructuredmodel""" +1000 42 loss """marginranking""" +1000 42 regularizer """no""" +1000 42 optimizer """adam""" +1000 42 training_loop """owa""" +1000 42 negative_sampler """basic""" +1000 42 evaluator """rankbased""" +1000 43 dataset """fb15k237""" +1000 43 model """unstructuredmodel""" +1000 43 loss """marginranking""" +1000 43 regularizer """no""" +1000 43 optimizer """adam""" +1000 43 training_loop """owa""" +1000 43 negative_sampler """basic""" +1000 43 evaluator """rankbased""" +1000 44 dataset """fb15k237""" +1000 44 model """unstructuredmodel""" +1000 44 loss """marginranking""" +1000 44 regularizer """no""" +1000 44 optimizer """adam""" +1000 44 training_loop """owa""" +1000 44 negative_sampler """basic""" +1000 44 evaluator """rankbased""" +1000 45 dataset """fb15k237""" +1000 45 model """unstructuredmodel""" +1000 45 loss """marginranking""" +1000 45 regularizer """no""" +1000 45 optimizer """adam""" +1000 45 training_loop """owa""" +1000 45 negative_sampler """basic""" +1000 45 evaluator """rankbased""" +1000 46 dataset """fb15k237""" +1000 46 model """unstructuredmodel""" +1000 46 loss """marginranking""" +1000 46 regularizer """no""" +1000 46 optimizer """adam""" +1000 46 training_loop """owa""" +1000 46 negative_sampler """basic""" +1000 46 evaluator """rankbased""" +1000 47 dataset """fb15k237""" +1000 47 model """unstructuredmodel""" +1000 47 loss """marginranking""" +1000 47 regularizer """no""" +1000 47 optimizer """adam""" +1000 47 training_loop """owa""" +1000 47 negative_sampler """basic""" +1000 47 evaluator """rankbased""" +1000 48 dataset """fb15k237""" +1000 48 model """unstructuredmodel""" +1000 48 loss """marginranking""" +1000 48 regularizer """no""" +1000 48 optimizer """adam""" +1000 48 training_loop """owa""" +1000 48 negative_sampler """basic""" +1000 48 evaluator """rankbased""" +1000 49 dataset """fb15k237""" +1000 49 model """unstructuredmodel""" +1000 49 loss """marginranking""" +1000 49 regularizer """no""" +1000 49 optimizer """adam""" +1000 49 training_loop """owa""" +1000 49 negative_sampler """basic""" +1000 49 evaluator """rankbased""" +1000 50 dataset """fb15k237""" +1000 50 model """unstructuredmodel""" +1000 50 loss """marginranking""" +1000 50 regularizer """no""" +1000 50 optimizer """adam""" +1000 50 training_loop """owa""" +1000 50 negative_sampler """basic""" +1000 50 evaluator """rankbased""" +1000 51 dataset """fb15k237""" +1000 51 model """unstructuredmodel""" +1000 51 loss """marginranking""" +1000 51 regularizer """no""" +1000 51 optimizer """adam""" +1000 51 training_loop """owa""" +1000 51 negative_sampler """basic""" +1000 51 evaluator """rankbased""" +1000 52 dataset """fb15k237""" +1000 52 model """unstructuredmodel""" +1000 52 loss """marginranking""" +1000 52 regularizer """no""" +1000 52 optimizer """adam""" +1000 52 training_loop """owa""" +1000 52 negative_sampler """basic""" +1000 52 evaluator """rankbased""" +1000 53 dataset """fb15k237""" +1000 53 model """unstructuredmodel""" +1000 53 loss """marginranking""" +1000 53 regularizer """no""" +1000 53 optimizer """adam""" +1000 53 training_loop """owa""" +1000 53 negative_sampler """basic""" +1000 53 evaluator """rankbased""" +1000 54 dataset """fb15k237""" +1000 54 model """unstructuredmodel""" +1000 54 loss """marginranking""" +1000 54 regularizer """no""" +1000 54 optimizer """adam""" +1000 54 training_loop """owa""" +1000 54 negative_sampler """basic""" +1000 54 evaluator """rankbased""" +1000 55 dataset """fb15k237""" +1000 55 model """unstructuredmodel""" +1000 55 loss """marginranking""" +1000 55 regularizer """no""" +1000 55 optimizer """adam""" +1000 55 training_loop """owa""" +1000 55 negative_sampler """basic""" +1000 55 evaluator """rankbased""" +1000 56 dataset """fb15k237""" +1000 56 model """unstructuredmodel""" +1000 56 loss """marginranking""" +1000 56 regularizer """no""" +1000 56 optimizer """adam""" +1000 56 training_loop """owa""" +1000 56 negative_sampler """basic""" +1000 56 evaluator """rankbased""" +1000 57 dataset """fb15k237""" +1000 57 model """unstructuredmodel""" +1000 57 loss """marginranking""" +1000 57 regularizer """no""" +1000 57 optimizer """adam""" +1000 57 training_loop """owa""" +1000 57 negative_sampler """basic""" +1000 57 evaluator """rankbased""" +1000 58 dataset """fb15k237""" +1000 58 model """unstructuredmodel""" +1000 58 loss """marginranking""" +1000 58 regularizer """no""" +1000 58 optimizer """adam""" +1000 58 training_loop """owa""" +1000 58 negative_sampler """basic""" +1000 58 evaluator """rankbased""" +1000 59 dataset """fb15k237""" +1000 59 model """unstructuredmodel""" +1000 59 loss """marginranking""" +1000 59 regularizer """no""" +1000 59 optimizer """adam""" +1000 59 training_loop """owa""" +1000 59 negative_sampler """basic""" +1000 59 evaluator """rankbased""" +1000 60 dataset """fb15k237""" +1000 60 model """unstructuredmodel""" +1000 60 loss """marginranking""" +1000 60 regularizer """no""" +1000 60 optimizer """adam""" +1000 60 training_loop """owa""" +1000 60 negative_sampler """basic""" +1000 60 evaluator """rankbased""" +1000 61 dataset """fb15k237""" +1000 61 model """unstructuredmodel""" +1000 61 loss """marginranking""" +1000 61 regularizer """no""" +1000 61 optimizer """adam""" +1000 61 training_loop """owa""" +1000 61 negative_sampler """basic""" +1000 61 evaluator """rankbased""" +1001 1 model.embedding_dim 1.0 +1001 1 model.scoring_fct_norm 2.0 +1001 1 loss.margin 9.139984673431513 +1001 1 optimizer.lr 0.08257767521742007 +1001 1 negative_sampler.num_negs_per_pos 33.0 +1001 1 training.batch_size 1.0 +1001 2 model.embedding_dim 1.0 +1001 2 model.scoring_fct_norm 1.0 +1001 2 loss.margin 1.3518652179304782 +1001 2 optimizer.lr 0.006971431333686981 +1001 2 negative_sampler.num_negs_per_pos 68.0 +1001 2 training.batch_size 0.0 +1001 3 model.embedding_dim 0.0 +1001 3 model.scoring_fct_norm 1.0 +1001 3 loss.margin 2.7768529860030786 +1001 3 optimizer.lr 0.08529076072951945 +1001 3 negative_sampler.num_negs_per_pos 59.0 +1001 3 training.batch_size 1.0 +1001 4 model.embedding_dim 2.0 +1001 4 model.scoring_fct_norm 2.0 +1001 4 loss.margin 3.528301402246492 +1001 4 optimizer.lr 0.004551616244563894 +1001 4 negative_sampler.num_negs_per_pos 69.0 +1001 4 training.batch_size 2.0 +1001 5 model.embedding_dim 2.0 +1001 5 model.scoring_fct_norm 2.0 +1001 5 loss.margin 7.683024575483555 +1001 5 optimizer.lr 0.016229859651409908 +1001 5 negative_sampler.num_negs_per_pos 40.0 +1001 5 training.batch_size 2.0 +1001 6 model.embedding_dim 0.0 +1001 6 model.scoring_fct_norm 2.0 +1001 6 loss.margin 6.171308953943018 +1001 6 optimizer.lr 0.0012541603999167324 +1001 6 negative_sampler.num_negs_per_pos 41.0 +1001 6 training.batch_size 1.0 +1001 7 model.embedding_dim 1.0 +1001 7 model.scoring_fct_norm 2.0 +1001 7 loss.margin 2.8059400544553186 +1001 7 optimizer.lr 0.021499899458016964 +1001 7 negative_sampler.num_negs_per_pos 7.0 +1001 7 training.batch_size 1.0 +1001 8 model.embedding_dim 1.0 +1001 8 model.scoring_fct_norm 1.0 +1001 8 loss.margin 2.8021514557563223 +1001 8 optimizer.lr 0.04357258163383782 +1001 8 negative_sampler.num_negs_per_pos 22.0 +1001 8 training.batch_size 1.0 +1001 9 model.embedding_dim 1.0 +1001 9 model.scoring_fct_norm 2.0 +1001 9 loss.margin 6.278474766709328 +1001 9 optimizer.lr 0.012337719988592464 +1001 9 negative_sampler.num_negs_per_pos 62.0 +1001 9 training.batch_size 2.0 +1001 10 model.embedding_dim 1.0 +1001 10 model.scoring_fct_norm 2.0 +1001 10 loss.margin 2.3461578040709745 +1001 10 optimizer.lr 0.008862702414696716 +1001 10 negative_sampler.num_negs_per_pos 62.0 +1001 10 training.batch_size 0.0 +1001 11 model.embedding_dim 0.0 +1001 11 model.scoring_fct_norm 2.0 +1001 11 loss.margin 0.6781285734916553 +1001 11 optimizer.lr 0.0014386585700924878 +1001 11 negative_sampler.num_negs_per_pos 69.0 +1001 11 training.batch_size 0.0 +1001 12 model.embedding_dim 1.0 +1001 12 model.scoring_fct_norm 2.0 +1001 12 loss.margin 9.36413175349465 +1001 12 optimizer.lr 0.0059949930461621805 +1001 12 negative_sampler.num_negs_per_pos 70.0 +1001 12 training.batch_size 1.0 +1001 13 model.embedding_dim 1.0 +1001 13 model.scoring_fct_norm 1.0 +1001 13 loss.margin 5.651298945718084 +1001 13 optimizer.lr 0.010594114974744825 +1001 13 negative_sampler.num_negs_per_pos 77.0 +1001 13 training.batch_size 0.0 +1001 14 model.embedding_dim 0.0 +1001 14 model.scoring_fct_norm 2.0 +1001 14 loss.margin 6.820962956393857 +1001 14 optimizer.lr 0.009159118149084195 +1001 14 negative_sampler.num_negs_per_pos 21.0 +1001 14 training.batch_size 1.0 +1001 15 model.embedding_dim 0.0 +1001 15 model.scoring_fct_norm 2.0 +1001 15 loss.margin 9.84762307198284 +1001 15 optimizer.lr 0.009502074474508121 +1001 15 negative_sampler.num_negs_per_pos 91.0 +1001 15 training.batch_size 2.0 +1001 16 model.embedding_dim 1.0 +1001 16 model.scoring_fct_norm 2.0 +1001 16 loss.margin 5.39078183923 +1001 16 optimizer.lr 0.003537856149662185 +1001 16 negative_sampler.num_negs_per_pos 38.0 +1001 16 training.batch_size 2.0 +1001 17 model.embedding_dim 2.0 +1001 17 model.scoring_fct_norm 2.0 +1001 17 loss.margin 9.288218547699708 +1001 17 optimizer.lr 0.0025026555846365674 +1001 17 negative_sampler.num_negs_per_pos 26.0 +1001 17 training.batch_size 0.0 +1001 18 model.embedding_dim 1.0 +1001 18 model.scoring_fct_norm 1.0 +1001 18 loss.margin 8.836539027040102 +1001 18 optimizer.lr 0.04844510833484849 +1001 18 negative_sampler.num_negs_per_pos 69.0 +1001 18 training.batch_size 2.0 +1001 19 model.embedding_dim 0.0 +1001 19 model.scoring_fct_norm 1.0 +1001 19 loss.margin 8.436481893829123 +1001 19 optimizer.lr 0.01400040421395254 +1001 19 negative_sampler.num_negs_per_pos 20.0 +1001 19 training.batch_size 0.0 +1001 20 model.embedding_dim 0.0 +1001 20 model.scoring_fct_norm 2.0 +1001 20 loss.margin 5.512723513166901 +1001 20 optimizer.lr 0.03914097999477234 +1001 20 negative_sampler.num_negs_per_pos 9.0 +1001 20 training.batch_size 0.0 +1001 21 model.embedding_dim 2.0 +1001 21 model.scoring_fct_norm 2.0 +1001 21 loss.margin 7.268496132171568 +1001 21 optimizer.lr 0.03411213480470097 +1001 21 negative_sampler.num_negs_per_pos 96.0 +1001 21 training.batch_size 1.0 +1001 22 model.embedding_dim 1.0 +1001 22 model.scoring_fct_norm 2.0 +1001 22 loss.margin 6.836807509842438 +1001 22 optimizer.lr 0.01029077338018972 +1001 22 negative_sampler.num_negs_per_pos 21.0 +1001 22 training.batch_size 0.0 +1001 23 model.embedding_dim 2.0 +1001 23 model.scoring_fct_norm 1.0 +1001 23 loss.margin 9.386719300217633 +1001 23 optimizer.lr 0.03582384640077503 +1001 23 negative_sampler.num_negs_per_pos 93.0 +1001 23 training.batch_size 0.0 +1001 24 model.embedding_dim 2.0 +1001 24 model.scoring_fct_norm 1.0 +1001 24 loss.margin 5.758311326179175 +1001 24 optimizer.lr 0.0027169761967430323 +1001 24 negative_sampler.num_negs_per_pos 35.0 +1001 24 training.batch_size 1.0 +1001 25 model.embedding_dim 2.0 +1001 25 model.scoring_fct_norm 2.0 +1001 25 loss.margin 7.57921244467985 +1001 25 optimizer.lr 0.006916564488764 +1001 25 negative_sampler.num_negs_per_pos 49.0 +1001 25 training.batch_size 2.0 +1001 26 model.embedding_dim 1.0 +1001 26 model.scoring_fct_norm 1.0 +1001 26 loss.margin 3.988103637539738 +1001 26 optimizer.lr 0.002243427082074656 +1001 26 negative_sampler.num_negs_per_pos 68.0 +1001 26 training.batch_size 2.0 +1001 27 model.embedding_dim 0.0 +1001 27 model.scoring_fct_norm 2.0 +1001 27 loss.margin 9.732891442297866 +1001 27 optimizer.lr 0.0012083818553096687 +1001 27 negative_sampler.num_negs_per_pos 72.0 +1001 27 training.batch_size 1.0 +1001 28 model.embedding_dim 1.0 +1001 28 model.scoring_fct_norm 2.0 +1001 28 loss.margin 3.6034335895618685 +1001 28 optimizer.lr 0.023518225674357525 +1001 28 negative_sampler.num_negs_per_pos 10.0 +1001 28 training.batch_size 0.0 +1001 29 model.embedding_dim 2.0 +1001 29 model.scoring_fct_norm 2.0 +1001 29 loss.margin 0.5845802125507069 +1001 29 optimizer.lr 0.0027286806478911407 +1001 29 negative_sampler.num_negs_per_pos 53.0 +1001 29 training.batch_size 0.0 +1001 30 model.embedding_dim 1.0 +1001 30 model.scoring_fct_norm 2.0 +1001 30 loss.margin 2.854817223866304 +1001 30 optimizer.lr 0.0027514379744563574 +1001 30 negative_sampler.num_negs_per_pos 55.0 +1001 30 training.batch_size 2.0 +1001 31 model.embedding_dim 0.0 +1001 31 model.scoring_fct_norm 1.0 +1001 31 loss.margin 3.1436034448534462 +1001 31 optimizer.lr 0.012891646619473725 +1001 31 negative_sampler.num_negs_per_pos 19.0 +1001 31 training.batch_size 1.0 +1001 32 model.embedding_dim 2.0 +1001 32 model.scoring_fct_norm 1.0 +1001 32 loss.margin 8.040179939102796 +1001 32 optimizer.lr 0.07274111612104436 +1001 32 negative_sampler.num_negs_per_pos 83.0 +1001 32 training.batch_size 1.0 +1001 33 model.embedding_dim 2.0 +1001 33 model.scoring_fct_norm 2.0 +1001 33 loss.margin 9.359079364082035 +1001 33 optimizer.lr 0.004802072390221299 +1001 33 negative_sampler.num_negs_per_pos 16.0 +1001 33 training.batch_size 2.0 +1001 34 model.embedding_dim 2.0 +1001 34 model.scoring_fct_norm 1.0 +1001 34 loss.margin 4.472050531418613 +1001 34 optimizer.lr 0.0033702989570302674 +1001 34 negative_sampler.num_negs_per_pos 13.0 +1001 34 training.batch_size 1.0 +1001 35 model.embedding_dim 1.0 +1001 35 model.scoring_fct_norm 1.0 +1001 35 loss.margin 2.3651093060164423 +1001 35 optimizer.lr 0.0014087074655176367 +1001 35 negative_sampler.num_negs_per_pos 34.0 +1001 35 training.batch_size 1.0 +1001 36 model.embedding_dim 2.0 +1001 36 model.scoring_fct_norm 2.0 +1001 36 loss.margin 3.919456392472317 +1001 36 optimizer.lr 0.0013463466792419903 +1001 36 negative_sampler.num_negs_per_pos 15.0 +1001 36 training.batch_size 0.0 +1001 37 model.embedding_dim 2.0 +1001 37 model.scoring_fct_norm 2.0 +1001 37 loss.margin 6.6589240467009745 +1001 37 optimizer.lr 0.04282112149571496 +1001 37 negative_sampler.num_negs_per_pos 24.0 +1001 37 training.batch_size 0.0 +1001 38 model.embedding_dim 0.0 +1001 38 model.scoring_fct_norm 1.0 +1001 38 loss.margin 9.187386279064686 +1001 38 optimizer.lr 0.01246233879391579 +1001 38 negative_sampler.num_negs_per_pos 37.0 +1001 38 training.batch_size 2.0 +1001 39 model.embedding_dim 2.0 +1001 39 model.scoring_fct_norm 1.0 +1001 39 loss.margin 9.015821704026633 +1001 39 optimizer.lr 0.0037191750480951666 +1001 39 negative_sampler.num_negs_per_pos 71.0 +1001 39 training.batch_size 0.0 +1001 40 model.embedding_dim 2.0 +1001 40 model.scoring_fct_norm 2.0 +1001 40 loss.margin 4.430358797730813 +1001 40 optimizer.lr 0.07696964150328225 +1001 40 negative_sampler.num_negs_per_pos 84.0 +1001 40 training.batch_size 1.0 +1001 41 model.embedding_dim 0.0 +1001 41 model.scoring_fct_norm 1.0 +1001 41 loss.margin 5.630062869973191 +1001 41 optimizer.lr 0.05220494194552908 +1001 41 negative_sampler.num_negs_per_pos 9.0 +1001 41 training.batch_size 2.0 +1001 42 model.embedding_dim 2.0 +1001 42 model.scoring_fct_norm 1.0 +1001 42 loss.margin 3.4156908931641783 +1001 42 optimizer.lr 0.001140420768319295 +1001 42 negative_sampler.num_negs_per_pos 13.0 +1001 42 training.batch_size 2.0 +1001 43 model.embedding_dim 1.0 +1001 43 model.scoring_fct_norm 1.0 +1001 43 loss.margin 8.414775926741267 +1001 43 optimizer.lr 0.02099838285333606 +1001 43 negative_sampler.num_negs_per_pos 22.0 +1001 43 training.batch_size 1.0 +1001 44 model.embedding_dim 2.0 +1001 44 model.scoring_fct_norm 2.0 +1001 44 loss.margin 8.118285557073452 +1001 44 optimizer.lr 0.025231445685892633 +1001 44 negative_sampler.num_negs_per_pos 77.0 +1001 44 training.batch_size 0.0 +1001 45 model.embedding_dim 1.0 +1001 45 model.scoring_fct_norm 1.0 +1001 45 loss.margin 9.419682965566665 +1001 45 optimizer.lr 0.07442606376363411 +1001 45 negative_sampler.num_negs_per_pos 8.0 +1001 45 training.batch_size 1.0 +1001 46 model.embedding_dim 2.0 +1001 46 model.scoring_fct_norm 1.0 +1001 46 loss.margin 2.640716557557097 +1001 46 optimizer.lr 0.0031765092989720946 +1001 46 negative_sampler.num_negs_per_pos 21.0 +1001 46 training.batch_size 1.0 +1001 47 model.embedding_dim 0.0 +1001 47 model.scoring_fct_norm 2.0 +1001 47 loss.margin 7.474083665276229 +1001 47 optimizer.lr 0.07042899607833998 +1001 47 negative_sampler.num_negs_per_pos 39.0 +1001 47 training.batch_size 2.0 +1001 48 model.embedding_dim 0.0 +1001 48 model.scoring_fct_norm 2.0 +1001 48 loss.margin 7.986290842658019 +1001 48 optimizer.lr 0.012318149190085588 +1001 48 negative_sampler.num_negs_per_pos 12.0 +1001 48 training.batch_size 2.0 +1001 49 model.embedding_dim 2.0 +1001 49 model.scoring_fct_norm 2.0 +1001 49 loss.margin 8.752988347265116 +1001 49 optimizer.lr 0.00333375122647867 +1001 49 negative_sampler.num_negs_per_pos 93.0 +1001 49 training.batch_size 1.0 +1001 50 model.embedding_dim 2.0 +1001 50 model.scoring_fct_norm 1.0 +1001 50 loss.margin 0.6535742501642405 +1001 50 optimizer.lr 0.001368919222447291 +1001 50 negative_sampler.num_negs_per_pos 42.0 +1001 50 training.batch_size 2.0 +1001 51 model.embedding_dim 0.0 +1001 51 model.scoring_fct_norm 1.0 +1001 51 loss.margin 6.221891387340042 +1001 51 optimizer.lr 0.08772067561666361 +1001 51 negative_sampler.num_negs_per_pos 1.0 +1001 51 training.batch_size 2.0 +1001 52 model.embedding_dim 2.0 +1001 52 model.scoring_fct_norm 1.0 +1001 52 loss.margin 5.850806599306834 +1001 52 optimizer.lr 0.0034110551349530436 +1001 52 negative_sampler.num_negs_per_pos 70.0 +1001 52 training.batch_size 1.0 +1001 53 model.embedding_dim 2.0 +1001 53 model.scoring_fct_norm 1.0 +1001 53 loss.margin 2.838810686725244 +1001 53 optimizer.lr 0.016926703641491764 +1001 53 negative_sampler.num_negs_per_pos 22.0 +1001 53 training.batch_size 1.0 +1001 54 model.embedding_dim 2.0 +1001 54 model.scoring_fct_norm 2.0 +1001 54 loss.margin 0.7347780064944283 +1001 54 optimizer.lr 0.0019682449101125613 +1001 54 negative_sampler.num_negs_per_pos 60.0 +1001 54 training.batch_size 1.0 +1001 55 model.embedding_dim 2.0 +1001 55 model.scoring_fct_norm 1.0 +1001 55 loss.margin 2.789144719643381 +1001 55 optimizer.lr 0.0060024495488478664 +1001 55 negative_sampler.num_negs_per_pos 20.0 +1001 55 training.batch_size 0.0 +1001 56 model.embedding_dim 1.0 +1001 56 model.scoring_fct_norm 2.0 +1001 56 loss.margin 1.585086853298441 +1001 56 optimizer.lr 0.0027203177422045013 +1001 56 negative_sampler.num_negs_per_pos 53.0 +1001 56 training.batch_size 1.0 +1001 57 model.embedding_dim 0.0 +1001 57 model.scoring_fct_norm 1.0 +1001 57 loss.margin 2.7921827161019275 +1001 57 optimizer.lr 0.0015980609964026404 +1001 57 negative_sampler.num_negs_per_pos 18.0 +1001 57 training.batch_size 0.0 +1001 58 model.embedding_dim 0.0 +1001 58 model.scoring_fct_norm 1.0 +1001 58 loss.margin 8.779890923581524 +1001 58 optimizer.lr 0.0014976588178118736 +1001 58 negative_sampler.num_negs_per_pos 72.0 +1001 58 training.batch_size 0.0 +1001 59 model.embedding_dim 0.0 +1001 59 model.scoring_fct_norm 1.0 +1001 59 loss.margin 3.7846786483863624 +1001 59 optimizer.lr 0.022493674153295708 +1001 59 negative_sampler.num_negs_per_pos 21.0 +1001 59 training.batch_size 1.0 +1001 60 model.embedding_dim 0.0 +1001 60 model.scoring_fct_norm 2.0 +1001 60 loss.margin 5.920087335995436 +1001 60 optimizer.lr 0.005553153742181278 +1001 60 negative_sampler.num_negs_per_pos 48.0 +1001 60 training.batch_size 2.0 +1001 61 model.embedding_dim 0.0 +1001 61 model.scoring_fct_norm 2.0 +1001 61 loss.margin 1.1463353640222786 +1001 61 optimizer.lr 0.0031302643045606113 +1001 61 negative_sampler.num_negs_per_pos 86.0 +1001 61 training.batch_size 2.0 +1001 62 model.embedding_dim 0.0 +1001 62 model.scoring_fct_norm 1.0 +1001 62 loss.margin 2.0402706357999993 +1001 62 optimizer.lr 0.02368197311407856 +1001 62 negative_sampler.num_negs_per_pos 8.0 +1001 62 training.batch_size 2.0 +1001 63 model.embedding_dim 2.0 +1001 63 model.scoring_fct_norm 2.0 +1001 63 loss.margin 6.11684448084979 +1001 63 optimizer.lr 0.036479516950029635 +1001 63 negative_sampler.num_negs_per_pos 52.0 +1001 63 training.batch_size 1.0 +1001 64 model.embedding_dim 0.0 +1001 64 model.scoring_fct_norm 2.0 +1001 64 loss.margin 5.651883308838194 +1001 64 optimizer.lr 0.0011419904046656567 +1001 64 negative_sampler.num_negs_per_pos 5.0 +1001 64 training.batch_size 1.0 +1001 65 model.embedding_dim 0.0 +1001 65 model.scoring_fct_norm 1.0 +1001 65 loss.margin 2.2288768303474904 +1001 65 optimizer.lr 0.006322942125373589 +1001 65 negative_sampler.num_negs_per_pos 13.0 +1001 65 training.batch_size 0.0 +1001 66 model.embedding_dim 1.0 +1001 66 model.scoring_fct_norm 1.0 +1001 66 loss.margin 8.139519644901391 +1001 66 optimizer.lr 0.008376317004368944 +1001 66 negative_sampler.num_negs_per_pos 67.0 +1001 66 training.batch_size 1.0 +1001 67 model.embedding_dim 1.0 +1001 67 model.scoring_fct_norm 2.0 +1001 67 loss.margin 8.351380607416768 +1001 67 optimizer.lr 0.0073869884313865625 +1001 67 negative_sampler.num_negs_per_pos 17.0 +1001 67 training.batch_size 1.0 +1001 68 model.embedding_dim 0.0 +1001 68 model.scoring_fct_norm 2.0 +1001 68 loss.margin 6.045033713316171 +1001 68 optimizer.lr 0.025254976101564366 +1001 68 negative_sampler.num_negs_per_pos 99.0 +1001 68 training.batch_size 0.0 +1001 69 model.embedding_dim 1.0 +1001 69 model.scoring_fct_norm 1.0 +1001 69 loss.margin 4.447346463894188 +1001 69 optimizer.lr 0.06718299853557384 +1001 69 negative_sampler.num_negs_per_pos 11.0 +1001 69 training.batch_size 0.0 +1001 70 model.embedding_dim 2.0 +1001 70 model.scoring_fct_norm 2.0 +1001 70 loss.margin 9.387611576276088 +1001 70 optimizer.lr 0.004017341110746027 +1001 70 negative_sampler.num_negs_per_pos 72.0 +1001 70 training.batch_size 1.0 +1001 71 model.embedding_dim 0.0 +1001 71 model.scoring_fct_norm 1.0 +1001 71 loss.margin 3.1400362415943404 +1001 71 optimizer.lr 0.011675557167527318 +1001 71 negative_sampler.num_negs_per_pos 66.0 +1001 71 training.batch_size 0.0 +1001 72 model.embedding_dim 2.0 +1001 72 model.scoring_fct_norm 2.0 +1001 72 loss.margin 8.614544045349138 +1001 72 optimizer.lr 0.007906436113461217 +1001 72 negative_sampler.num_negs_per_pos 84.0 +1001 72 training.batch_size 1.0 +1001 73 model.embedding_dim 0.0 +1001 73 model.scoring_fct_norm 1.0 +1001 73 loss.margin 3.829275360963285 +1001 73 optimizer.lr 0.0013577938091784798 +1001 73 negative_sampler.num_negs_per_pos 95.0 +1001 73 training.batch_size 0.0 +1001 74 model.embedding_dim 0.0 +1001 74 model.scoring_fct_norm 1.0 +1001 74 loss.margin 2.582945347448372 +1001 74 optimizer.lr 0.001319928117491129 +1001 74 negative_sampler.num_negs_per_pos 51.0 +1001 74 training.batch_size 2.0 +1001 75 model.embedding_dim 1.0 +1001 75 model.scoring_fct_norm 2.0 +1001 75 loss.margin 8.113923634464479 +1001 75 optimizer.lr 0.09385140235264255 +1001 75 negative_sampler.num_negs_per_pos 29.0 +1001 75 training.batch_size 2.0 +1001 76 model.embedding_dim 0.0 +1001 76 model.scoring_fct_norm 2.0 +1001 76 loss.margin 9.920363531455408 +1001 76 optimizer.lr 0.007276172372407171 +1001 76 negative_sampler.num_negs_per_pos 48.0 +1001 76 training.batch_size 0.0 +1001 77 model.embedding_dim 2.0 +1001 77 model.scoring_fct_norm 1.0 +1001 77 loss.margin 3.587541648623224 +1001 77 optimizer.lr 0.05830735442272663 +1001 77 negative_sampler.num_negs_per_pos 11.0 +1001 77 training.batch_size 0.0 +1001 78 model.embedding_dim 1.0 +1001 78 model.scoring_fct_norm 1.0 +1001 78 loss.margin 9.063781759055766 +1001 78 optimizer.lr 0.05453112349525184 +1001 78 negative_sampler.num_negs_per_pos 53.0 +1001 78 training.batch_size 1.0 +1001 79 model.embedding_dim 1.0 +1001 79 model.scoring_fct_norm 2.0 +1001 79 loss.margin 0.8934650881802353 +1001 79 optimizer.lr 0.01706882802598921 +1001 79 negative_sampler.num_negs_per_pos 40.0 +1001 79 training.batch_size 2.0 +1001 80 model.embedding_dim 1.0 +1001 80 model.scoring_fct_norm 2.0 +1001 80 loss.margin 3.888244663372453 +1001 80 optimizer.lr 0.0017828800157291083 +1001 80 negative_sampler.num_negs_per_pos 51.0 +1001 80 training.batch_size 1.0 +1001 81 model.embedding_dim 0.0 +1001 81 model.scoring_fct_norm 2.0 +1001 81 loss.margin 5.997542062337066 +1001 81 optimizer.lr 0.007839940542632552 +1001 81 negative_sampler.num_negs_per_pos 80.0 +1001 81 training.batch_size 0.0 +1001 82 model.embedding_dim 2.0 +1001 82 model.scoring_fct_norm 2.0 +1001 82 loss.margin 8.536127415670048 +1001 82 optimizer.lr 0.006266763630315015 +1001 82 negative_sampler.num_negs_per_pos 18.0 +1001 82 training.batch_size 2.0 +1001 83 model.embedding_dim 0.0 +1001 83 model.scoring_fct_norm 1.0 +1001 83 loss.margin 7.747068836944437 +1001 83 optimizer.lr 0.0044714497709442 +1001 83 negative_sampler.num_negs_per_pos 12.0 +1001 83 training.batch_size 0.0 +1001 84 model.embedding_dim 0.0 +1001 84 model.scoring_fct_norm 2.0 +1001 84 loss.margin 1.2455028853402539 +1001 84 optimizer.lr 0.0904545660801849 +1001 84 negative_sampler.num_negs_per_pos 68.0 +1001 84 training.batch_size 0.0 +1001 85 model.embedding_dim 0.0 +1001 85 model.scoring_fct_norm 1.0 +1001 85 loss.margin 7.003947403097425 +1001 85 optimizer.lr 0.0017538489955304301 +1001 85 negative_sampler.num_negs_per_pos 93.0 +1001 85 training.batch_size 2.0 +1001 86 model.embedding_dim 0.0 +1001 86 model.scoring_fct_norm 1.0 +1001 86 loss.margin 8.995465376848015 +1001 86 optimizer.lr 0.007328113942696558 +1001 86 negative_sampler.num_negs_per_pos 69.0 +1001 86 training.batch_size 0.0 +1001 87 model.embedding_dim 0.0 +1001 87 model.scoring_fct_norm 2.0 +1001 87 loss.margin 4.2600663686548845 +1001 87 optimizer.lr 0.010075061106587152 +1001 87 negative_sampler.num_negs_per_pos 46.0 +1001 87 training.batch_size 2.0 +1001 88 model.embedding_dim 0.0 +1001 88 model.scoring_fct_norm 1.0 +1001 88 loss.margin 3.224298817114916 +1001 88 optimizer.lr 0.010331319948194218 +1001 88 negative_sampler.num_negs_per_pos 23.0 +1001 88 training.batch_size 0.0 +1001 89 model.embedding_dim 1.0 +1001 89 model.scoring_fct_norm 2.0 +1001 89 loss.margin 7.003116536925805 +1001 89 optimizer.lr 0.05918527231013428 +1001 89 negative_sampler.num_negs_per_pos 47.0 +1001 89 training.batch_size 1.0 +1001 90 model.embedding_dim 2.0 +1001 90 model.scoring_fct_norm 1.0 +1001 90 loss.margin 6.575872882994401 +1001 90 optimizer.lr 0.08947847669207322 +1001 90 negative_sampler.num_negs_per_pos 94.0 +1001 90 training.batch_size 2.0 +1001 91 model.embedding_dim 0.0 +1001 91 model.scoring_fct_norm 2.0 +1001 91 loss.margin 7.041921312366052 +1001 91 optimizer.lr 0.07557840227685522 +1001 91 negative_sampler.num_negs_per_pos 6.0 +1001 91 training.batch_size 0.0 +1001 92 model.embedding_dim 0.0 +1001 92 model.scoring_fct_norm 1.0 +1001 92 loss.margin 5.842291418185209 +1001 92 optimizer.lr 0.001489079681302194 +1001 92 negative_sampler.num_negs_per_pos 55.0 +1001 92 training.batch_size 1.0 +1001 93 model.embedding_dim 2.0 +1001 93 model.scoring_fct_norm 1.0 +1001 93 loss.margin 4.750922157595973 +1001 93 optimizer.lr 0.0011460795962148 +1001 93 negative_sampler.num_negs_per_pos 39.0 +1001 93 training.batch_size 2.0 +1001 94 model.embedding_dim 2.0 +1001 94 model.scoring_fct_norm 1.0 +1001 94 loss.margin 7.664694506293274 +1001 94 optimizer.lr 0.01503305901505033 +1001 94 negative_sampler.num_negs_per_pos 54.0 +1001 94 training.batch_size 1.0 +1001 95 model.embedding_dim 0.0 +1001 95 model.scoring_fct_norm 1.0 +1001 95 loss.margin 2.696523641526305 +1001 95 optimizer.lr 0.002907871376355653 +1001 95 negative_sampler.num_negs_per_pos 72.0 +1001 95 training.batch_size 1.0 +1001 96 model.embedding_dim 1.0 +1001 96 model.scoring_fct_norm 2.0 +1001 96 loss.margin 6.561096085278557 +1001 96 optimizer.lr 0.022517562861690488 +1001 96 negative_sampler.num_negs_per_pos 81.0 +1001 96 training.batch_size 2.0 +1001 97 model.embedding_dim 2.0 +1001 97 model.scoring_fct_norm 2.0 +1001 97 loss.margin 5.290304865812787 +1001 97 optimizer.lr 0.0036623056936453432 +1001 97 negative_sampler.num_negs_per_pos 40.0 +1001 97 training.batch_size 0.0 +1001 98 model.embedding_dim 1.0 +1001 98 model.scoring_fct_norm 2.0 +1001 98 loss.margin 1.2815589278451363 +1001 98 optimizer.lr 0.003114450603088563 +1001 98 negative_sampler.num_negs_per_pos 77.0 +1001 98 training.batch_size 0.0 +1001 99 model.embedding_dim 0.0 +1001 99 model.scoring_fct_norm 1.0 +1001 99 loss.margin 4.636445373647715 +1001 99 optimizer.lr 0.021206410252316104 +1001 99 negative_sampler.num_negs_per_pos 9.0 +1001 99 training.batch_size 1.0 +1001 100 model.embedding_dim 1.0 +1001 100 model.scoring_fct_norm 2.0 +1001 100 loss.margin 7.076293372109996 +1001 100 optimizer.lr 0.0019097096364017756 +1001 100 negative_sampler.num_negs_per_pos 55.0 +1001 100 training.batch_size 2.0 +1001 1 dataset """fb15k237""" +1001 1 model """unstructuredmodel""" +1001 1 loss """marginranking""" +1001 1 regularizer """no""" +1001 1 optimizer """adam""" +1001 1 training_loop """owa""" +1001 1 negative_sampler """basic""" +1001 1 evaluator """rankbased""" +1001 2 dataset """fb15k237""" +1001 2 model """unstructuredmodel""" +1001 2 loss """marginranking""" +1001 2 regularizer """no""" +1001 2 optimizer """adam""" +1001 2 training_loop """owa""" +1001 2 negative_sampler """basic""" +1001 2 evaluator """rankbased""" +1001 3 dataset """fb15k237""" +1001 3 model """unstructuredmodel""" +1001 3 loss """marginranking""" +1001 3 regularizer """no""" +1001 3 optimizer """adam""" +1001 3 training_loop """owa""" +1001 3 negative_sampler """basic""" +1001 3 evaluator """rankbased""" +1001 4 dataset """fb15k237""" +1001 4 model """unstructuredmodel""" +1001 4 loss """marginranking""" +1001 4 regularizer """no""" +1001 4 optimizer """adam""" +1001 4 training_loop """owa""" +1001 4 negative_sampler """basic""" +1001 4 evaluator """rankbased""" +1001 5 dataset """fb15k237""" +1001 5 model """unstructuredmodel""" +1001 5 loss """marginranking""" +1001 5 regularizer """no""" +1001 5 optimizer """adam""" +1001 5 training_loop """owa""" +1001 5 negative_sampler """basic""" +1001 5 evaluator """rankbased""" +1001 6 dataset """fb15k237""" +1001 6 model """unstructuredmodel""" +1001 6 loss """marginranking""" +1001 6 regularizer """no""" +1001 6 optimizer """adam""" +1001 6 training_loop """owa""" +1001 6 negative_sampler """basic""" +1001 6 evaluator """rankbased""" +1001 7 dataset """fb15k237""" +1001 7 model """unstructuredmodel""" +1001 7 loss """marginranking""" +1001 7 regularizer """no""" +1001 7 optimizer """adam""" +1001 7 training_loop """owa""" +1001 7 negative_sampler """basic""" +1001 7 evaluator """rankbased""" +1001 8 dataset """fb15k237""" +1001 8 model """unstructuredmodel""" +1001 8 loss """marginranking""" +1001 8 regularizer """no""" +1001 8 optimizer """adam""" +1001 8 training_loop """owa""" +1001 8 negative_sampler """basic""" +1001 8 evaluator """rankbased""" +1001 9 dataset """fb15k237""" +1001 9 model """unstructuredmodel""" +1001 9 loss """marginranking""" +1001 9 regularizer """no""" +1001 9 optimizer """adam""" +1001 9 training_loop """owa""" +1001 9 negative_sampler """basic""" +1001 9 evaluator """rankbased""" +1001 10 dataset """fb15k237""" +1001 10 model """unstructuredmodel""" +1001 10 loss """marginranking""" +1001 10 regularizer """no""" +1001 10 optimizer """adam""" +1001 10 training_loop """owa""" +1001 10 negative_sampler """basic""" +1001 10 evaluator """rankbased""" +1001 11 dataset """fb15k237""" +1001 11 model """unstructuredmodel""" +1001 11 loss """marginranking""" +1001 11 regularizer """no""" +1001 11 optimizer """adam""" +1001 11 training_loop """owa""" +1001 11 negative_sampler """basic""" +1001 11 evaluator """rankbased""" +1001 12 dataset """fb15k237""" +1001 12 model """unstructuredmodel""" +1001 12 loss """marginranking""" +1001 12 regularizer """no""" +1001 12 optimizer """adam""" +1001 12 training_loop """owa""" +1001 12 negative_sampler """basic""" +1001 12 evaluator """rankbased""" +1001 13 dataset """fb15k237""" +1001 13 model """unstructuredmodel""" +1001 13 loss """marginranking""" +1001 13 regularizer """no""" +1001 13 optimizer """adam""" +1001 13 training_loop """owa""" +1001 13 negative_sampler """basic""" +1001 13 evaluator """rankbased""" +1001 14 dataset """fb15k237""" +1001 14 model """unstructuredmodel""" +1001 14 loss """marginranking""" +1001 14 regularizer """no""" +1001 14 optimizer """adam""" +1001 14 training_loop """owa""" +1001 14 negative_sampler """basic""" +1001 14 evaluator """rankbased""" +1001 15 dataset """fb15k237""" +1001 15 model """unstructuredmodel""" +1001 15 loss """marginranking""" +1001 15 regularizer """no""" +1001 15 optimizer """adam""" +1001 15 training_loop """owa""" +1001 15 negative_sampler """basic""" +1001 15 evaluator """rankbased""" +1001 16 dataset """fb15k237""" +1001 16 model """unstructuredmodel""" +1001 16 loss """marginranking""" +1001 16 regularizer """no""" +1001 16 optimizer """adam""" +1001 16 training_loop """owa""" +1001 16 negative_sampler """basic""" +1001 16 evaluator """rankbased""" +1001 17 dataset """fb15k237""" +1001 17 model """unstructuredmodel""" +1001 17 loss """marginranking""" +1001 17 regularizer """no""" +1001 17 optimizer """adam""" +1001 17 training_loop """owa""" +1001 17 negative_sampler """basic""" +1001 17 evaluator """rankbased""" +1001 18 dataset """fb15k237""" +1001 18 model """unstructuredmodel""" +1001 18 loss """marginranking""" +1001 18 regularizer """no""" +1001 18 optimizer """adam""" +1001 18 training_loop """owa""" +1001 18 negative_sampler """basic""" +1001 18 evaluator """rankbased""" +1001 19 dataset """fb15k237""" +1001 19 model """unstructuredmodel""" +1001 19 loss """marginranking""" +1001 19 regularizer """no""" +1001 19 optimizer """adam""" +1001 19 training_loop """owa""" +1001 19 negative_sampler """basic""" +1001 19 evaluator """rankbased""" +1001 20 dataset """fb15k237""" +1001 20 model """unstructuredmodel""" +1001 20 loss """marginranking""" +1001 20 regularizer """no""" +1001 20 optimizer """adam""" +1001 20 training_loop """owa""" +1001 20 negative_sampler """basic""" +1001 20 evaluator """rankbased""" +1001 21 dataset """fb15k237""" +1001 21 model """unstructuredmodel""" +1001 21 loss """marginranking""" +1001 21 regularizer """no""" +1001 21 optimizer """adam""" +1001 21 training_loop """owa""" +1001 21 negative_sampler """basic""" +1001 21 evaluator """rankbased""" +1001 22 dataset """fb15k237""" +1001 22 model """unstructuredmodel""" +1001 22 loss """marginranking""" +1001 22 regularizer """no""" +1001 22 optimizer """adam""" +1001 22 training_loop """owa""" +1001 22 negative_sampler """basic""" +1001 22 evaluator """rankbased""" +1001 23 dataset """fb15k237""" +1001 23 model """unstructuredmodel""" +1001 23 loss """marginranking""" +1001 23 regularizer """no""" +1001 23 optimizer """adam""" +1001 23 training_loop """owa""" +1001 23 negative_sampler """basic""" +1001 23 evaluator """rankbased""" +1001 24 dataset """fb15k237""" +1001 24 model """unstructuredmodel""" +1001 24 loss """marginranking""" +1001 24 regularizer """no""" +1001 24 optimizer """adam""" +1001 24 training_loop """owa""" +1001 24 negative_sampler """basic""" +1001 24 evaluator """rankbased""" +1001 25 dataset """fb15k237""" +1001 25 model """unstructuredmodel""" +1001 25 loss """marginranking""" +1001 25 regularizer """no""" +1001 25 optimizer """adam""" +1001 25 training_loop """owa""" +1001 25 negative_sampler """basic""" +1001 25 evaluator """rankbased""" +1001 26 dataset """fb15k237""" +1001 26 model """unstructuredmodel""" +1001 26 loss """marginranking""" +1001 26 regularizer """no""" +1001 26 optimizer """adam""" +1001 26 training_loop """owa""" +1001 26 negative_sampler """basic""" +1001 26 evaluator """rankbased""" +1001 27 dataset """fb15k237""" +1001 27 model """unstructuredmodel""" +1001 27 loss """marginranking""" +1001 27 regularizer """no""" +1001 27 optimizer """adam""" +1001 27 training_loop """owa""" +1001 27 negative_sampler """basic""" +1001 27 evaluator """rankbased""" +1001 28 dataset """fb15k237""" +1001 28 model """unstructuredmodel""" +1001 28 loss """marginranking""" +1001 28 regularizer """no""" +1001 28 optimizer """adam""" +1001 28 training_loop """owa""" +1001 28 negative_sampler """basic""" +1001 28 evaluator """rankbased""" +1001 29 dataset """fb15k237""" +1001 29 model """unstructuredmodel""" +1001 29 loss """marginranking""" +1001 29 regularizer """no""" +1001 29 optimizer """adam""" +1001 29 training_loop """owa""" +1001 29 negative_sampler """basic""" +1001 29 evaluator """rankbased""" +1001 30 dataset """fb15k237""" +1001 30 model """unstructuredmodel""" +1001 30 loss """marginranking""" +1001 30 regularizer """no""" +1001 30 optimizer """adam""" +1001 30 training_loop """owa""" +1001 30 negative_sampler """basic""" +1001 30 evaluator """rankbased""" +1001 31 dataset """fb15k237""" +1001 31 model """unstructuredmodel""" +1001 31 loss """marginranking""" +1001 31 regularizer """no""" +1001 31 optimizer """adam""" +1001 31 training_loop """owa""" +1001 31 negative_sampler """basic""" +1001 31 evaluator """rankbased""" +1001 32 dataset """fb15k237""" +1001 32 model """unstructuredmodel""" +1001 32 loss """marginranking""" +1001 32 regularizer """no""" +1001 32 optimizer """adam""" +1001 32 training_loop """owa""" +1001 32 negative_sampler """basic""" +1001 32 evaluator """rankbased""" +1001 33 dataset """fb15k237""" +1001 33 model """unstructuredmodel""" +1001 33 loss """marginranking""" +1001 33 regularizer """no""" +1001 33 optimizer """adam""" +1001 33 training_loop """owa""" +1001 33 negative_sampler """basic""" +1001 33 evaluator """rankbased""" +1001 34 dataset """fb15k237""" +1001 34 model """unstructuredmodel""" +1001 34 loss """marginranking""" +1001 34 regularizer """no""" +1001 34 optimizer """adam""" +1001 34 training_loop """owa""" +1001 34 negative_sampler """basic""" +1001 34 evaluator """rankbased""" +1001 35 dataset """fb15k237""" +1001 35 model """unstructuredmodel""" +1001 35 loss """marginranking""" +1001 35 regularizer """no""" +1001 35 optimizer """adam""" +1001 35 training_loop """owa""" +1001 35 negative_sampler """basic""" +1001 35 evaluator """rankbased""" +1001 36 dataset """fb15k237""" +1001 36 model """unstructuredmodel""" +1001 36 loss """marginranking""" +1001 36 regularizer """no""" +1001 36 optimizer """adam""" +1001 36 training_loop """owa""" +1001 36 negative_sampler """basic""" +1001 36 evaluator """rankbased""" +1001 37 dataset """fb15k237""" +1001 37 model """unstructuredmodel""" +1001 37 loss """marginranking""" +1001 37 regularizer """no""" +1001 37 optimizer """adam""" +1001 37 training_loop """owa""" +1001 37 negative_sampler """basic""" +1001 37 evaluator """rankbased""" +1001 38 dataset """fb15k237""" +1001 38 model """unstructuredmodel""" +1001 38 loss """marginranking""" +1001 38 regularizer """no""" +1001 38 optimizer """adam""" +1001 38 training_loop """owa""" +1001 38 negative_sampler """basic""" +1001 38 evaluator """rankbased""" +1001 39 dataset """fb15k237""" +1001 39 model """unstructuredmodel""" +1001 39 loss """marginranking""" +1001 39 regularizer """no""" +1001 39 optimizer """adam""" +1001 39 training_loop """owa""" +1001 39 negative_sampler """basic""" +1001 39 evaluator """rankbased""" +1001 40 dataset """fb15k237""" +1001 40 model """unstructuredmodel""" +1001 40 loss """marginranking""" +1001 40 regularizer """no""" +1001 40 optimizer """adam""" +1001 40 training_loop """owa""" +1001 40 negative_sampler """basic""" +1001 40 evaluator """rankbased""" +1001 41 dataset """fb15k237""" +1001 41 model """unstructuredmodel""" +1001 41 loss """marginranking""" +1001 41 regularizer """no""" +1001 41 optimizer """adam""" +1001 41 training_loop """owa""" +1001 41 negative_sampler """basic""" +1001 41 evaluator """rankbased""" +1001 42 dataset """fb15k237""" +1001 42 model """unstructuredmodel""" +1001 42 loss """marginranking""" +1001 42 regularizer """no""" +1001 42 optimizer """adam""" +1001 42 training_loop """owa""" +1001 42 negative_sampler """basic""" +1001 42 evaluator """rankbased""" +1001 43 dataset """fb15k237""" +1001 43 model """unstructuredmodel""" +1001 43 loss """marginranking""" +1001 43 regularizer """no""" +1001 43 optimizer """adam""" +1001 43 training_loop """owa""" +1001 43 negative_sampler """basic""" +1001 43 evaluator """rankbased""" +1001 44 dataset """fb15k237""" +1001 44 model """unstructuredmodel""" +1001 44 loss """marginranking""" +1001 44 regularizer """no""" +1001 44 optimizer """adam""" +1001 44 training_loop """owa""" +1001 44 negative_sampler """basic""" +1001 44 evaluator """rankbased""" +1001 45 dataset """fb15k237""" +1001 45 model """unstructuredmodel""" +1001 45 loss """marginranking""" +1001 45 regularizer """no""" +1001 45 optimizer """adam""" +1001 45 training_loop """owa""" +1001 45 negative_sampler """basic""" +1001 45 evaluator """rankbased""" +1001 46 dataset """fb15k237""" +1001 46 model """unstructuredmodel""" +1001 46 loss """marginranking""" +1001 46 regularizer """no""" +1001 46 optimizer """adam""" +1001 46 training_loop """owa""" +1001 46 negative_sampler """basic""" +1001 46 evaluator """rankbased""" +1001 47 dataset """fb15k237""" +1001 47 model """unstructuredmodel""" +1001 47 loss """marginranking""" +1001 47 regularizer """no""" +1001 47 optimizer """adam""" +1001 47 training_loop """owa""" +1001 47 negative_sampler """basic""" +1001 47 evaluator """rankbased""" +1001 48 dataset """fb15k237""" +1001 48 model """unstructuredmodel""" +1001 48 loss """marginranking""" +1001 48 regularizer """no""" +1001 48 optimizer """adam""" +1001 48 training_loop """owa""" +1001 48 negative_sampler """basic""" +1001 48 evaluator """rankbased""" +1001 49 dataset """fb15k237""" +1001 49 model """unstructuredmodel""" +1001 49 loss """marginranking""" +1001 49 regularizer """no""" +1001 49 optimizer """adam""" +1001 49 training_loop """owa""" +1001 49 negative_sampler """basic""" +1001 49 evaluator """rankbased""" +1001 50 dataset """fb15k237""" +1001 50 model """unstructuredmodel""" +1001 50 loss """marginranking""" +1001 50 regularizer """no""" +1001 50 optimizer """adam""" +1001 50 training_loop """owa""" +1001 50 negative_sampler """basic""" +1001 50 evaluator """rankbased""" +1001 51 dataset """fb15k237""" +1001 51 model """unstructuredmodel""" +1001 51 loss """marginranking""" +1001 51 regularizer """no""" +1001 51 optimizer """adam""" +1001 51 training_loop """owa""" +1001 51 negative_sampler """basic""" +1001 51 evaluator """rankbased""" +1001 52 dataset """fb15k237""" +1001 52 model """unstructuredmodel""" +1001 52 loss """marginranking""" +1001 52 regularizer """no""" +1001 52 optimizer """adam""" +1001 52 training_loop """owa""" +1001 52 negative_sampler """basic""" +1001 52 evaluator """rankbased""" +1001 53 dataset """fb15k237""" +1001 53 model """unstructuredmodel""" +1001 53 loss """marginranking""" +1001 53 regularizer """no""" +1001 53 optimizer """adam""" +1001 53 training_loop """owa""" +1001 53 negative_sampler """basic""" +1001 53 evaluator """rankbased""" +1001 54 dataset """fb15k237""" +1001 54 model """unstructuredmodel""" +1001 54 loss """marginranking""" +1001 54 regularizer """no""" +1001 54 optimizer """adam""" +1001 54 training_loop """owa""" +1001 54 negative_sampler """basic""" +1001 54 evaluator """rankbased""" +1001 55 dataset """fb15k237""" +1001 55 model """unstructuredmodel""" +1001 55 loss """marginranking""" +1001 55 regularizer """no""" +1001 55 optimizer """adam""" +1001 55 training_loop """owa""" +1001 55 negative_sampler """basic""" +1001 55 evaluator """rankbased""" +1001 56 dataset """fb15k237""" +1001 56 model """unstructuredmodel""" +1001 56 loss """marginranking""" +1001 56 regularizer """no""" +1001 56 optimizer """adam""" +1001 56 training_loop """owa""" +1001 56 negative_sampler """basic""" +1001 56 evaluator """rankbased""" +1001 57 dataset """fb15k237""" +1001 57 model """unstructuredmodel""" +1001 57 loss """marginranking""" +1001 57 regularizer """no""" +1001 57 optimizer """adam""" +1001 57 training_loop """owa""" +1001 57 negative_sampler """basic""" +1001 57 evaluator """rankbased""" +1001 58 dataset """fb15k237""" +1001 58 model """unstructuredmodel""" +1001 58 loss """marginranking""" +1001 58 regularizer """no""" +1001 58 optimizer """adam""" +1001 58 training_loop """owa""" +1001 58 negative_sampler """basic""" +1001 58 evaluator """rankbased""" +1001 59 dataset """fb15k237""" +1001 59 model """unstructuredmodel""" +1001 59 loss """marginranking""" +1001 59 regularizer """no""" +1001 59 optimizer """adam""" +1001 59 training_loop """owa""" +1001 59 negative_sampler """basic""" +1001 59 evaluator """rankbased""" +1001 60 dataset """fb15k237""" +1001 60 model """unstructuredmodel""" +1001 60 loss """marginranking""" +1001 60 regularizer """no""" +1001 60 optimizer """adam""" +1001 60 training_loop """owa""" +1001 60 negative_sampler """basic""" +1001 60 evaluator """rankbased""" +1001 61 dataset """fb15k237""" +1001 61 model """unstructuredmodel""" +1001 61 loss """marginranking""" +1001 61 regularizer """no""" +1001 61 optimizer """adam""" +1001 61 training_loop """owa""" +1001 61 negative_sampler """basic""" +1001 61 evaluator """rankbased""" +1001 62 dataset """fb15k237""" +1001 62 model """unstructuredmodel""" +1001 62 loss """marginranking""" +1001 62 regularizer """no""" +1001 62 optimizer """adam""" +1001 62 training_loop """owa""" +1001 62 negative_sampler """basic""" +1001 62 evaluator """rankbased""" +1001 63 dataset """fb15k237""" +1001 63 model """unstructuredmodel""" +1001 63 loss """marginranking""" +1001 63 regularizer """no""" +1001 63 optimizer """adam""" +1001 63 training_loop """owa""" +1001 63 negative_sampler """basic""" +1001 63 evaluator """rankbased""" +1001 64 dataset """fb15k237""" +1001 64 model """unstructuredmodel""" +1001 64 loss """marginranking""" +1001 64 regularizer """no""" +1001 64 optimizer """adam""" +1001 64 training_loop """owa""" +1001 64 negative_sampler """basic""" +1001 64 evaluator """rankbased""" +1001 65 dataset """fb15k237""" +1001 65 model """unstructuredmodel""" +1001 65 loss """marginranking""" +1001 65 regularizer """no""" +1001 65 optimizer """adam""" +1001 65 training_loop """owa""" +1001 65 negative_sampler """basic""" +1001 65 evaluator """rankbased""" +1001 66 dataset """fb15k237""" +1001 66 model """unstructuredmodel""" +1001 66 loss """marginranking""" +1001 66 regularizer """no""" +1001 66 optimizer """adam""" +1001 66 training_loop """owa""" +1001 66 negative_sampler """basic""" +1001 66 evaluator """rankbased""" +1001 67 dataset """fb15k237""" +1001 67 model """unstructuredmodel""" +1001 67 loss """marginranking""" +1001 67 regularizer """no""" +1001 67 optimizer """adam""" +1001 67 training_loop """owa""" +1001 67 negative_sampler """basic""" +1001 67 evaluator """rankbased""" +1001 68 dataset """fb15k237""" +1001 68 model """unstructuredmodel""" +1001 68 loss """marginranking""" +1001 68 regularizer """no""" +1001 68 optimizer """adam""" +1001 68 training_loop """owa""" +1001 68 negative_sampler """basic""" +1001 68 evaluator """rankbased""" +1001 69 dataset """fb15k237""" +1001 69 model """unstructuredmodel""" +1001 69 loss """marginranking""" +1001 69 regularizer """no""" +1001 69 optimizer """adam""" +1001 69 training_loop """owa""" +1001 69 negative_sampler """basic""" +1001 69 evaluator """rankbased""" +1001 70 dataset """fb15k237""" +1001 70 model """unstructuredmodel""" +1001 70 loss """marginranking""" +1001 70 regularizer """no""" +1001 70 optimizer """adam""" +1001 70 training_loop """owa""" +1001 70 negative_sampler """basic""" +1001 70 evaluator """rankbased""" +1001 71 dataset """fb15k237""" +1001 71 model """unstructuredmodel""" +1001 71 loss """marginranking""" +1001 71 regularizer """no""" +1001 71 optimizer """adam""" +1001 71 training_loop """owa""" +1001 71 negative_sampler """basic""" +1001 71 evaluator """rankbased""" +1001 72 dataset """fb15k237""" +1001 72 model """unstructuredmodel""" +1001 72 loss """marginranking""" +1001 72 regularizer """no""" +1001 72 optimizer """adam""" +1001 72 training_loop """owa""" +1001 72 negative_sampler """basic""" +1001 72 evaluator """rankbased""" +1001 73 dataset """fb15k237""" +1001 73 model """unstructuredmodel""" +1001 73 loss """marginranking""" +1001 73 regularizer """no""" +1001 73 optimizer """adam""" +1001 73 training_loop """owa""" +1001 73 negative_sampler """basic""" +1001 73 evaluator """rankbased""" +1001 74 dataset """fb15k237""" +1001 74 model """unstructuredmodel""" +1001 74 loss """marginranking""" +1001 74 regularizer """no""" +1001 74 optimizer """adam""" +1001 74 training_loop """owa""" +1001 74 negative_sampler """basic""" +1001 74 evaluator """rankbased""" +1001 75 dataset """fb15k237""" +1001 75 model """unstructuredmodel""" +1001 75 loss """marginranking""" +1001 75 regularizer """no""" +1001 75 optimizer """adam""" +1001 75 training_loop """owa""" +1001 75 negative_sampler """basic""" +1001 75 evaluator """rankbased""" +1001 76 dataset """fb15k237""" +1001 76 model """unstructuredmodel""" +1001 76 loss """marginranking""" +1001 76 regularizer """no""" +1001 76 optimizer """adam""" +1001 76 training_loop """owa""" +1001 76 negative_sampler """basic""" +1001 76 evaluator """rankbased""" +1001 77 dataset """fb15k237""" +1001 77 model """unstructuredmodel""" +1001 77 loss """marginranking""" +1001 77 regularizer """no""" +1001 77 optimizer """adam""" +1001 77 training_loop """owa""" +1001 77 negative_sampler """basic""" +1001 77 evaluator """rankbased""" +1001 78 dataset """fb15k237""" +1001 78 model """unstructuredmodel""" +1001 78 loss """marginranking""" +1001 78 regularizer """no""" +1001 78 optimizer """adam""" +1001 78 training_loop """owa""" +1001 78 negative_sampler """basic""" +1001 78 evaluator """rankbased""" +1001 79 dataset """fb15k237""" +1001 79 model """unstructuredmodel""" +1001 79 loss """marginranking""" +1001 79 regularizer """no""" +1001 79 optimizer """adam""" +1001 79 training_loop """owa""" +1001 79 negative_sampler """basic""" +1001 79 evaluator """rankbased""" +1001 80 dataset """fb15k237""" +1001 80 model """unstructuredmodel""" +1001 80 loss """marginranking""" +1001 80 regularizer """no""" +1001 80 optimizer """adam""" +1001 80 training_loop """owa""" +1001 80 negative_sampler """basic""" +1001 80 evaluator """rankbased""" +1001 81 dataset """fb15k237""" +1001 81 model """unstructuredmodel""" +1001 81 loss """marginranking""" +1001 81 regularizer """no""" +1001 81 optimizer """adam""" +1001 81 training_loop """owa""" +1001 81 negative_sampler """basic""" +1001 81 evaluator """rankbased""" +1001 82 dataset """fb15k237""" +1001 82 model """unstructuredmodel""" +1001 82 loss """marginranking""" +1001 82 regularizer """no""" +1001 82 optimizer """adam""" +1001 82 training_loop """owa""" +1001 82 negative_sampler """basic""" +1001 82 evaluator """rankbased""" +1001 83 dataset """fb15k237""" +1001 83 model """unstructuredmodel""" +1001 83 loss """marginranking""" +1001 83 regularizer """no""" +1001 83 optimizer """adam""" +1001 83 training_loop """owa""" +1001 83 negative_sampler """basic""" +1001 83 evaluator """rankbased""" +1001 84 dataset """fb15k237""" +1001 84 model """unstructuredmodel""" +1001 84 loss """marginranking""" +1001 84 regularizer """no""" +1001 84 optimizer """adam""" +1001 84 training_loop """owa""" +1001 84 negative_sampler """basic""" +1001 84 evaluator """rankbased""" +1001 85 dataset """fb15k237""" +1001 85 model """unstructuredmodel""" +1001 85 loss """marginranking""" +1001 85 regularizer """no""" +1001 85 optimizer """adam""" +1001 85 training_loop """owa""" +1001 85 negative_sampler """basic""" +1001 85 evaluator """rankbased""" +1001 86 dataset """fb15k237""" +1001 86 model """unstructuredmodel""" +1001 86 loss """marginranking""" +1001 86 regularizer """no""" +1001 86 optimizer """adam""" +1001 86 training_loop """owa""" +1001 86 negative_sampler """basic""" +1001 86 evaluator """rankbased""" +1001 87 dataset """fb15k237""" +1001 87 model """unstructuredmodel""" +1001 87 loss """marginranking""" +1001 87 regularizer """no""" +1001 87 optimizer """adam""" +1001 87 training_loop """owa""" +1001 87 negative_sampler """basic""" +1001 87 evaluator """rankbased""" +1001 88 dataset """fb15k237""" +1001 88 model """unstructuredmodel""" +1001 88 loss """marginranking""" +1001 88 regularizer """no""" +1001 88 optimizer """adam""" +1001 88 training_loop """owa""" +1001 88 negative_sampler """basic""" +1001 88 evaluator """rankbased""" +1001 89 dataset """fb15k237""" +1001 89 model """unstructuredmodel""" +1001 89 loss """marginranking""" +1001 89 regularizer """no""" +1001 89 optimizer """adam""" +1001 89 training_loop """owa""" +1001 89 negative_sampler """basic""" +1001 89 evaluator """rankbased""" +1001 90 dataset """fb15k237""" +1001 90 model """unstructuredmodel""" +1001 90 loss """marginranking""" +1001 90 regularizer """no""" +1001 90 optimizer """adam""" +1001 90 training_loop """owa""" +1001 90 negative_sampler """basic""" +1001 90 evaluator """rankbased""" +1001 91 dataset """fb15k237""" +1001 91 model """unstructuredmodel""" +1001 91 loss """marginranking""" +1001 91 regularizer """no""" +1001 91 optimizer """adam""" +1001 91 training_loop """owa""" +1001 91 negative_sampler """basic""" +1001 91 evaluator """rankbased""" +1001 92 dataset """fb15k237""" +1001 92 model """unstructuredmodel""" +1001 92 loss """marginranking""" +1001 92 regularizer """no""" +1001 92 optimizer """adam""" +1001 92 training_loop """owa""" +1001 92 negative_sampler """basic""" +1001 92 evaluator """rankbased""" +1001 93 dataset """fb15k237""" +1001 93 model """unstructuredmodel""" +1001 93 loss """marginranking""" +1001 93 regularizer """no""" +1001 93 optimizer """adam""" +1001 93 training_loop """owa""" +1001 93 negative_sampler """basic""" +1001 93 evaluator """rankbased""" +1001 94 dataset """fb15k237""" +1001 94 model """unstructuredmodel""" +1001 94 loss """marginranking""" +1001 94 regularizer """no""" +1001 94 optimizer """adam""" +1001 94 training_loop """owa""" +1001 94 negative_sampler """basic""" +1001 94 evaluator """rankbased""" +1001 95 dataset """fb15k237""" +1001 95 model """unstructuredmodel""" +1001 95 loss """marginranking""" +1001 95 regularizer """no""" +1001 95 optimizer """adam""" +1001 95 training_loop """owa""" +1001 95 negative_sampler """basic""" +1001 95 evaluator """rankbased""" +1001 96 dataset """fb15k237""" +1001 96 model """unstructuredmodel""" +1001 96 loss """marginranking""" +1001 96 regularizer """no""" +1001 96 optimizer """adam""" +1001 96 training_loop """owa""" +1001 96 negative_sampler """basic""" +1001 96 evaluator """rankbased""" +1001 97 dataset """fb15k237""" +1001 97 model """unstructuredmodel""" +1001 97 loss """marginranking""" +1001 97 regularizer """no""" +1001 97 optimizer """adam""" +1001 97 training_loop """owa""" +1001 97 negative_sampler """basic""" +1001 97 evaluator """rankbased""" +1001 98 dataset """fb15k237""" +1001 98 model """unstructuredmodel""" +1001 98 loss """marginranking""" +1001 98 regularizer """no""" +1001 98 optimizer """adam""" +1001 98 training_loop """owa""" +1001 98 negative_sampler """basic""" +1001 98 evaluator """rankbased""" +1001 99 dataset """fb15k237""" +1001 99 model """unstructuredmodel""" +1001 99 loss """marginranking""" +1001 99 regularizer """no""" +1001 99 optimizer """adam""" +1001 99 training_loop """owa""" +1001 99 negative_sampler """basic""" +1001 99 evaluator """rankbased""" +1001 100 dataset """fb15k237""" +1001 100 model """unstructuredmodel""" +1001 100 loss """marginranking""" +1001 100 regularizer """no""" +1001 100 optimizer """adam""" +1001 100 training_loop """owa""" +1001 100 negative_sampler """basic""" +1001 100 evaluator """rankbased""" +1002 1 model.embedding_dim 0.0 +1002 1 model.scoring_fct_norm 1.0 +1002 1 optimizer.lr 0.07195754456493139 +1002 1 negative_sampler.num_negs_per_pos 61.0 +1002 1 training.batch_size 2.0 +1002 2 model.embedding_dim 0.0 +1002 2 model.scoring_fct_norm 2.0 +1002 2 optimizer.lr 0.0034654777906048353 +1002 2 negative_sampler.num_negs_per_pos 48.0 +1002 2 training.batch_size 1.0 +1002 3 model.embedding_dim 1.0 +1002 3 model.scoring_fct_norm 1.0 +1002 3 optimizer.lr 0.0029681550836456326 +1002 3 negative_sampler.num_negs_per_pos 37.0 +1002 3 training.batch_size 2.0 +1002 4 model.embedding_dim 1.0 +1002 4 model.scoring_fct_norm 2.0 +1002 4 optimizer.lr 0.004191210521509939 +1002 4 negative_sampler.num_negs_per_pos 75.0 +1002 4 training.batch_size 1.0 +1002 5 model.embedding_dim 1.0 +1002 5 model.scoring_fct_norm 2.0 +1002 5 optimizer.lr 0.08857324144741006 +1002 5 negative_sampler.num_negs_per_pos 42.0 +1002 5 training.batch_size 2.0 +1002 6 model.embedding_dim 0.0 +1002 6 model.scoring_fct_norm 2.0 +1002 6 optimizer.lr 0.03133995041499329 +1002 6 negative_sampler.num_negs_per_pos 18.0 +1002 6 training.batch_size 0.0 +1002 7 model.embedding_dim 2.0 +1002 7 model.scoring_fct_norm 2.0 +1002 7 optimizer.lr 0.023630914305804607 +1002 7 negative_sampler.num_negs_per_pos 20.0 +1002 7 training.batch_size 1.0 +1002 8 model.embedding_dim 2.0 +1002 8 model.scoring_fct_norm 2.0 +1002 8 optimizer.lr 0.0011537393280958554 +1002 8 negative_sampler.num_negs_per_pos 58.0 +1002 8 training.batch_size 2.0 +1002 9 model.embedding_dim 1.0 +1002 9 model.scoring_fct_norm 2.0 +1002 9 optimizer.lr 0.0016360324422017513 +1002 9 negative_sampler.num_negs_per_pos 60.0 +1002 9 training.batch_size 0.0 +1002 10 model.embedding_dim 1.0 +1002 10 model.scoring_fct_norm 1.0 +1002 10 optimizer.lr 0.0016064032490644134 +1002 10 negative_sampler.num_negs_per_pos 40.0 +1002 10 training.batch_size 2.0 +1002 11 model.embedding_dim 2.0 +1002 11 model.scoring_fct_norm 2.0 +1002 11 optimizer.lr 0.002018391918044986 +1002 11 negative_sampler.num_negs_per_pos 35.0 +1002 11 training.batch_size 2.0 +1002 12 model.embedding_dim 1.0 +1002 12 model.scoring_fct_norm 2.0 +1002 12 optimizer.lr 0.0468099652003049 +1002 12 negative_sampler.num_negs_per_pos 80.0 +1002 12 training.batch_size 1.0 +1002 13 model.embedding_dim 2.0 +1002 13 model.scoring_fct_norm 1.0 +1002 13 optimizer.lr 0.05732972841451761 +1002 13 negative_sampler.num_negs_per_pos 22.0 +1002 13 training.batch_size 0.0 +1002 14 model.embedding_dim 0.0 +1002 14 model.scoring_fct_norm 2.0 +1002 14 optimizer.lr 0.019684012635460785 +1002 14 negative_sampler.num_negs_per_pos 80.0 +1002 14 training.batch_size 2.0 +1002 15 model.embedding_dim 0.0 +1002 15 model.scoring_fct_norm 1.0 +1002 15 optimizer.lr 0.0012248597376172136 +1002 15 negative_sampler.num_negs_per_pos 23.0 +1002 15 training.batch_size 0.0 +1002 16 model.embedding_dim 1.0 +1002 16 model.scoring_fct_norm 2.0 +1002 16 optimizer.lr 0.04065701399468606 +1002 16 negative_sampler.num_negs_per_pos 39.0 +1002 16 training.batch_size 1.0 +1002 17 model.embedding_dim 0.0 +1002 17 model.scoring_fct_norm 1.0 +1002 17 optimizer.lr 0.019326624142070908 +1002 17 negative_sampler.num_negs_per_pos 34.0 +1002 17 training.batch_size 2.0 +1002 18 model.embedding_dim 0.0 +1002 18 model.scoring_fct_norm 2.0 +1002 18 optimizer.lr 0.006957954399156449 +1002 18 negative_sampler.num_negs_per_pos 53.0 +1002 18 training.batch_size 0.0 +1002 19 model.embedding_dim 1.0 +1002 19 model.scoring_fct_norm 1.0 +1002 19 optimizer.lr 0.005731888698157765 +1002 19 negative_sampler.num_negs_per_pos 96.0 +1002 19 training.batch_size 2.0 +1002 20 model.embedding_dim 2.0 +1002 20 model.scoring_fct_norm 1.0 +1002 20 optimizer.lr 0.00911986816317139 +1002 20 negative_sampler.num_negs_per_pos 17.0 +1002 20 training.batch_size 2.0 +1002 21 model.embedding_dim 0.0 +1002 21 model.scoring_fct_norm 2.0 +1002 21 optimizer.lr 0.0630761962529182 +1002 21 negative_sampler.num_negs_per_pos 52.0 +1002 21 training.batch_size 2.0 +1002 22 model.embedding_dim 0.0 +1002 22 model.scoring_fct_norm 1.0 +1002 22 optimizer.lr 0.01855075092517443 +1002 22 negative_sampler.num_negs_per_pos 59.0 +1002 22 training.batch_size 0.0 +1002 23 model.embedding_dim 1.0 +1002 23 model.scoring_fct_norm 1.0 +1002 23 optimizer.lr 0.0028182508916765945 +1002 23 negative_sampler.num_negs_per_pos 29.0 +1002 23 training.batch_size 0.0 +1002 24 model.embedding_dim 0.0 +1002 24 model.scoring_fct_norm 1.0 +1002 24 optimizer.lr 0.005830331368036977 +1002 24 negative_sampler.num_negs_per_pos 9.0 +1002 24 training.batch_size 2.0 +1002 25 model.embedding_dim 2.0 +1002 25 model.scoring_fct_norm 1.0 +1002 25 optimizer.lr 0.06398715528411704 +1002 25 negative_sampler.num_negs_per_pos 45.0 +1002 25 training.batch_size 1.0 +1002 26 model.embedding_dim 0.0 +1002 26 model.scoring_fct_norm 1.0 +1002 26 optimizer.lr 0.009700484108000307 +1002 26 negative_sampler.num_negs_per_pos 38.0 +1002 26 training.batch_size 0.0 +1002 27 model.embedding_dim 2.0 +1002 27 model.scoring_fct_norm 2.0 +1002 27 optimizer.lr 0.03211584863047269 +1002 27 negative_sampler.num_negs_per_pos 89.0 +1002 27 training.batch_size 2.0 +1002 28 model.embedding_dim 0.0 +1002 28 model.scoring_fct_norm 1.0 +1002 28 optimizer.lr 0.04795017388690871 +1002 28 negative_sampler.num_negs_per_pos 0.0 +1002 28 training.batch_size 2.0 +1002 29 model.embedding_dim 0.0 +1002 29 model.scoring_fct_norm 1.0 +1002 29 optimizer.lr 0.03289252179253562 +1002 29 negative_sampler.num_negs_per_pos 96.0 +1002 29 training.batch_size 0.0 +1002 30 model.embedding_dim 0.0 +1002 30 model.scoring_fct_norm 2.0 +1002 30 optimizer.lr 0.04454885992538632 +1002 30 negative_sampler.num_negs_per_pos 23.0 +1002 30 training.batch_size 2.0 +1002 31 model.embedding_dim 1.0 +1002 31 model.scoring_fct_norm 2.0 +1002 31 optimizer.lr 0.0011113856508807739 +1002 31 negative_sampler.num_negs_per_pos 82.0 +1002 31 training.batch_size 2.0 +1002 32 model.embedding_dim 2.0 +1002 32 model.scoring_fct_norm 1.0 +1002 32 optimizer.lr 0.06619068151338404 +1002 32 negative_sampler.num_negs_per_pos 66.0 +1002 32 training.batch_size 2.0 +1002 33 model.embedding_dim 1.0 +1002 33 model.scoring_fct_norm 2.0 +1002 33 optimizer.lr 0.00295179019725155 +1002 33 negative_sampler.num_negs_per_pos 22.0 +1002 33 training.batch_size 0.0 +1002 34 model.embedding_dim 0.0 +1002 34 model.scoring_fct_norm 1.0 +1002 34 optimizer.lr 0.0011404730132243157 +1002 34 negative_sampler.num_negs_per_pos 67.0 +1002 34 training.batch_size 1.0 +1002 35 model.embedding_dim 0.0 +1002 35 model.scoring_fct_norm 2.0 +1002 35 optimizer.lr 0.00344279740014495 +1002 35 negative_sampler.num_negs_per_pos 90.0 +1002 35 training.batch_size 2.0 +1002 36 model.embedding_dim 2.0 +1002 36 model.scoring_fct_norm 1.0 +1002 36 optimizer.lr 0.0013994230161090341 +1002 36 negative_sampler.num_negs_per_pos 51.0 +1002 36 training.batch_size 2.0 +1002 37 model.embedding_dim 2.0 +1002 37 model.scoring_fct_norm 2.0 +1002 37 optimizer.lr 0.0051993737676037455 +1002 37 negative_sampler.num_negs_per_pos 74.0 +1002 37 training.batch_size 1.0 +1002 38 model.embedding_dim 2.0 +1002 38 model.scoring_fct_norm 2.0 +1002 38 optimizer.lr 0.015685733283585707 +1002 38 negative_sampler.num_negs_per_pos 58.0 +1002 38 training.batch_size 2.0 +1002 39 model.embedding_dim 2.0 +1002 39 model.scoring_fct_norm 1.0 +1002 39 optimizer.lr 0.050283254517421924 +1002 39 negative_sampler.num_negs_per_pos 51.0 +1002 39 training.batch_size 0.0 +1002 40 model.embedding_dim 0.0 +1002 40 model.scoring_fct_norm 2.0 +1002 40 optimizer.lr 0.001580636413490356 +1002 40 negative_sampler.num_negs_per_pos 16.0 +1002 40 training.batch_size 0.0 +1002 41 model.embedding_dim 1.0 +1002 41 model.scoring_fct_norm 2.0 +1002 41 optimizer.lr 0.01175184193744043 +1002 41 negative_sampler.num_negs_per_pos 80.0 +1002 41 training.batch_size 2.0 +1002 42 model.embedding_dim 2.0 +1002 42 model.scoring_fct_norm 1.0 +1002 42 optimizer.lr 0.05721767219886727 +1002 42 negative_sampler.num_negs_per_pos 64.0 +1002 42 training.batch_size 0.0 +1002 43 model.embedding_dim 2.0 +1002 43 model.scoring_fct_norm 1.0 +1002 43 optimizer.lr 0.0012102792460816365 +1002 43 negative_sampler.num_negs_per_pos 13.0 +1002 43 training.batch_size 1.0 +1002 44 model.embedding_dim 1.0 +1002 44 model.scoring_fct_norm 1.0 +1002 44 optimizer.lr 0.0028795414038490993 +1002 44 negative_sampler.num_negs_per_pos 56.0 +1002 44 training.batch_size 0.0 +1002 45 model.embedding_dim 0.0 +1002 45 model.scoring_fct_norm 2.0 +1002 45 optimizer.lr 0.0425546784683584 +1002 45 negative_sampler.num_negs_per_pos 15.0 +1002 45 training.batch_size 0.0 +1002 46 model.embedding_dim 1.0 +1002 46 model.scoring_fct_norm 2.0 +1002 46 optimizer.lr 0.0020549950166137455 +1002 46 negative_sampler.num_negs_per_pos 56.0 +1002 46 training.batch_size 2.0 +1002 47 model.embedding_dim 2.0 +1002 47 model.scoring_fct_norm 1.0 +1002 47 optimizer.lr 0.0070969600763569505 +1002 47 negative_sampler.num_negs_per_pos 2.0 +1002 47 training.batch_size 1.0 +1002 48 model.embedding_dim 1.0 +1002 48 model.scoring_fct_norm 1.0 +1002 48 optimizer.lr 0.016907945562352023 +1002 48 negative_sampler.num_negs_per_pos 9.0 +1002 48 training.batch_size 2.0 +1002 49 model.embedding_dim 0.0 +1002 49 model.scoring_fct_norm 2.0 +1002 49 optimizer.lr 0.008507166969989287 +1002 49 negative_sampler.num_negs_per_pos 62.0 +1002 49 training.batch_size 1.0 +1002 50 model.embedding_dim 0.0 +1002 50 model.scoring_fct_norm 2.0 +1002 50 optimizer.lr 0.01782020442271896 +1002 50 negative_sampler.num_negs_per_pos 93.0 +1002 50 training.batch_size 2.0 +1002 51 model.embedding_dim 0.0 +1002 51 model.scoring_fct_norm 2.0 +1002 51 optimizer.lr 0.005635255413858733 +1002 51 negative_sampler.num_negs_per_pos 30.0 +1002 51 training.batch_size 0.0 +1002 52 model.embedding_dim 2.0 +1002 52 model.scoring_fct_norm 2.0 +1002 52 optimizer.lr 0.09852755629978674 +1002 52 negative_sampler.num_negs_per_pos 76.0 +1002 52 training.batch_size 0.0 +1002 53 model.embedding_dim 1.0 +1002 53 model.scoring_fct_norm 2.0 +1002 53 optimizer.lr 0.002264041062792191 +1002 53 negative_sampler.num_negs_per_pos 34.0 +1002 53 training.batch_size 1.0 +1002 54 model.embedding_dim 2.0 +1002 54 model.scoring_fct_norm 1.0 +1002 54 optimizer.lr 0.01123245990761361 +1002 54 negative_sampler.num_negs_per_pos 55.0 +1002 54 training.batch_size 1.0 +1002 55 model.embedding_dim 1.0 +1002 55 model.scoring_fct_norm 1.0 +1002 55 optimizer.lr 0.0018065256771074257 +1002 55 negative_sampler.num_negs_per_pos 85.0 +1002 55 training.batch_size 2.0 +1002 56 model.embedding_dim 0.0 +1002 56 model.scoring_fct_norm 1.0 +1002 56 optimizer.lr 0.005957926090566023 +1002 56 negative_sampler.num_negs_per_pos 51.0 +1002 56 training.batch_size 1.0 +1002 57 model.embedding_dim 0.0 +1002 57 model.scoring_fct_norm 2.0 +1002 57 optimizer.lr 0.01219519745751947 +1002 57 negative_sampler.num_negs_per_pos 72.0 +1002 57 training.batch_size 0.0 +1002 58 model.embedding_dim 2.0 +1002 58 model.scoring_fct_norm 1.0 +1002 58 optimizer.lr 0.0016477369418795016 +1002 58 negative_sampler.num_negs_per_pos 25.0 +1002 58 training.batch_size 2.0 +1002 59 model.embedding_dim 1.0 +1002 59 model.scoring_fct_norm 2.0 +1002 59 optimizer.lr 0.0705417286726397 +1002 59 negative_sampler.num_negs_per_pos 37.0 +1002 59 training.batch_size 2.0 +1002 60 model.embedding_dim 0.0 +1002 60 model.scoring_fct_norm 2.0 +1002 60 optimizer.lr 0.050601836412420255 +1002 60 negative_sampler.num_negs_per_pos 53.0 +1002 60 training.batch_size 2.0 +1002 61 model.embedding_dim 1.0 +1002 61 model.scoring_fct_norm 1.0 +1002 61 optimizer.lr 0.0890340198057435 +1002 61 negative_sampler.num_negs_per_pos 93.0 +1002 61 training.batch_size 1.0 +1002 62 model.embedding_dim 1.0 +1002 62 model.scoring_fct_norm 2.0 +1002 62 optimizer.lr 0.06314674816911828 +1002 62 negative_sampler.num_negs_per_pos 52.0 +1002 62 training.batch_size 2.0 +1002 63 model.embedding_dim 2.0 +1002 63 model.scoring_fct_norm 2.0 +1002 63 optimizer.lr 0.01459568559318578 +1002 63 negative_sampler.num_negs_per_pos 41.0 +1002 63 training.batch_size 0.0 +1002 64 model.embedding_dim 0.0 +1002 64 model.scoring_fct_norm 2.0 +1002 64 optimizer.lr 0.005094343976015771 +1002 64 negative_sampler.num_negs_per_pos 94.0 +1002 64 training.batch_size 1.0 +1002 65 model.embedding_dim 1.0 +1002 65 model.scoring_fct_norm 1.0 +1002 65 optimizer.lr 0.030806231432641034 +1002 65 negative_sampler.num_negs_per_pos 97.0 +1002 65 training.batch_size 2.0 +1002 66 model.embedding_dim 0.0 +1002 66 model.scoring_fct_norm 1.0 +1002 66 optimizer.lr 0.003909792022950626 +1002 66 negative_sampler.num_negs_per_pos 62.0 +1002 66 training.batch_size 2.0 +1002 67 model.embedding_dim 0.0 +1002 67 model.scoring_fct_norm 1.0 +1002 67 optimizer.lr 0.00251829146473835 +1002 67 negative_sampler.num_negs_per_pos 90.0 +1002 67 training.batch_size 0.0 +1002 1 dataset """fb15k237""" +1002 1 model """unstructuredmodel""" +1002 1 loss """bceaftersigmoid""" +1002 1 regularizer """no""" +1002 1 optimizer """adam""" +1002 1 training_loop """owa""" +1002 1 negative_sampler """basic""" +1002 1 evaluator """rankbased""" +1002 2 dataset """fb15k237""" +1002 2 model """unstructuredmodel""" +1002 2 loss """bceaftersigmoid""" +1002 2 regularizer """no""" +1002 2 optimizer """adam""" +1002 2 training_loop """owa""" +1002 2 negative_sampler """basic""" +1002 2 evaluator """rankbased""" +1002 3 dataset """fb15k237""" +1002 3 model """unstructuredmodel""" +1002 3 loss """bceaftersigmoid""" +1002 3 regularizer """no""" +1002 3 optimizer """adam""" +1002 3 training_loop """owa""" +1002 3 negative_sampler """basic""" +1002 3 evaluator """rankbased""" +1002 4 dataset """fb15k237""" +1002 4 model """unstructuredmodel""" +1002 4 loss """bceaftersigmoid""" +1002 4 regularizer """no""" +1002 4 optimizer """adam""" +1002 4 training_loop """owa""" +1002 4 negative_sampler """basic""" +1002 4 evaluator """rankbased""" +1002 5 dataset """fb15k237""" +1002 5 model """unstructuredmodel""" +1002 5 loss """bceaftersigmoid""" +1002 5 regularizer """no""" +1002 5 optimizer """adam""" +1002 5 training_loop """owa""" +1002 5 negative_sampler """basic""" +1002 5 evaluator """rankbased""" +1002 6 dataset """fb15k237""" +1002 6 model """unstructuredmodel""" +1002 6 loss """bceaftersigmoid""" +1002 6 regularizer """no""" +1002 6 optimizer """adam""" +1002 6 training_loop """owa""" +1002 6 negative_sampler """basic""" +1002 6 evaluator """rankbased""" +1002 7 dataset """fb15k237""" +1002 7 model """unstructuredmodel""" +1002 7 loss """bceaftersigmoid""" +1002 7 regularizer """no""" +1002 7 optimizer """adam""" +1002 7 training_loop """owa""" +1002 7 negative_sampler """basic""" +1002 7 evaluator """rankbased""" +1002 8 dataset """fb15k237""" +1002 8 model """unstructuredmodel""" +1002 8 loss """bceaftersigmoid""" +1002 8 regularizer """no""" +1002 8 optimizer """adam""" +1002 8 training_loop """owa""" +1002 8 negative_sampler """basic""" +1002 8 evaluator """rankbased""" +1002 9 dataset """fb15k237""" +1002 9 model """unstructuredmodel""" +1002 9 loss """bceaftersigmoid""" +1002 9 regularizer """no""" +1002 9 optimizer """adam""" +1002 9 training_loop """owa""" +1002 9 negative_sampler """basic""" +1002 9 evaluator """rankbased""" +1002 10 dataset """fb15k237""" +1002 10 model """unstructuredmodel""" +1002 10 loss """bceaftersigmoid""" +1002 10 regularizer """no""" +1002 10 optimizer """adam""" +1002 10 training_loop """owa""" +1002 10 negative_sampler """basic""" +1002 10 evaluator """rankbased""" +1002 11 dataset """fb15k237""" +1002 11 model """unstructuredmodel""" +1002 11 loss """bceaftersigmoid""" +1002 11 regularizer """no""" +1002 11 optimizer """adam""" +1002 11 training_loop """owa""" +1002 11 negative_sampler """basic""" +1002 11 evaluator """rankbased""" +1002 12 dataset """fb15k237""" +1002 12 model """unstructuredmodel""" +1002 12 loss """bceaftersigmoid""" +1002 12 regularizer """no""" +1002 12 optimizer """adam""" +1002 12 training_loop """owa""" +1002 12 negative_sampler """basic""" +1002 12 evaluator """rankbased""" +1002 13 dataset """fb15k237""" +1002 13 model """unstructuredmodel""" +1002 13 loss """bceaftersigmoid""" +1002 13 regularizer """no""" +1002 13 optimizer """adam""" +1002 13 training_loop """owa""" +1002 13 negative_sampler """basic""" +1002 13 evaluator """rankbased""" +1002 14 dataset """fb15k237""" +1002 14 model """unstructuredmodel""" +1002 14 loss """bceaftersigmoid""" +1002 14 regularizer """no""" +1002 14 optimizer """adam""" +1002 14 training_loop """owa""" +1002 14 negative_sampler """basic""" +1002 14 evaluator """rankbased""" +1002 15 dataset """fb15k237""" +1002 15 model """unstructuredmodel""" +1002 15 loss """bceaftersigmoid""" +1002 15 regularizer """no""" +1002 15 optimizer """adam""" +1002 15 training_loop """owa""" +1002 15 negative_sampler """basic""" +1002 15 evaluator """rankbased""" +1002 16 dataset """fb15k237""" +1002 16 model """unstructuredmodel""" +1002 16 loss """bceaftersigmoid""" +1002 16 regularizer """no""" +1002 16 optimizer """adam""" +1002 16 training_loop """owa""" +1002 16 negative_sampler """basic""" +1002 16 evaluator """rankbased""" +1002 17 dataset """fb15k237""" +1002 17 model """unstructuredmodel""" +1002 17 loss """bceaftersigmoid""" +1002 17 regularizer """no""" +1002 17 optimizer """adam""" +1002 17 training_loop """owa""" +1002 17 negative_sampler """basic""" +1002 17 evaluator """rankbased""" +1002 18 dataset """fb15k237""" +1002 18 model """unstructuredmodel""" +1002 18 loss """bceaftersigmoid""" +1002 18 regularizer """no""" +1002 18 optimizer """adam""" +1002 18 training_loop """owa""" +1002 18 negative_sampler """basic""" +1002 18 evaluator """rankbased""" +1002 19 dataset """fb15k237""" +1002 19 model """unstructuredmodel""" +1002 19 loss """bceaftersigmoid""" +1002 19 regularizer """no""" +1002 19 optimizer """adam""" +1002 19 training_loop """owa""" +1002 19 negative_sampler """basic""" +1002 19 evaluator """rankbased""" +1002 20 dataset """fb15k237""" +1002 20 model """unstructuredmodel""" +1002 20 loss """bceaftersigmoid""" +1002 20 regularizer """no""" +1002 20 optimizer """adam""" +1002 20 training_loop """owa""" +1002 20 negative_sampler """basic""" +1002 20 evaluator """rankbased""" +1002 21 dataset """fb15k237""" +1002 21 model """unstructuredmodel""" +1002 21 loss """bceaftersigmoid""" +1002 21 regularizer """no""" +1002 21 optimizer """adam""" +1002 21 training_loop """owa""" +1002 21 negative_sampler """basic""" +1002 21 evaluator """rankbased""" +1002 22 dataset """fb15k237""" +1002 22 model """unstructuredmodel""" +1002 22 loss """bceaftersigmoid""" +1002 22 regularizer """no""" +1002 22 optimizer """adam""" +1002 22 training_loop """owa""" +1002 22 negative_sampler """basic""" +1002 22 evaluator """rankbased""" +1002 23 dataset """fb15k237""" +1002 23 model """unstructuredmodel""" +1002 23 loss """bceaftersigmoid""" +1002 23 regularizer """no""" +1002 23 optimizer """adam""" +1002 23 training_loop """owa""" +1002 23 negative_sampler """basic""" +1002 23 evaluator """rankbased""" +1002 24 dataset """fb15k237""" +1002 24 model """unstructuredmodel""" +1002 24 loss """bceaftersigmoid""" +1002 24 regularizer """no""" +1002 24 optimizer """adam""" +1002 24 training_loop """owa""" +1002 24 negative_sampler """basic""" +1002 24 evaluator """rankbased""" +1002 25 dataset """fb15k237""" +1002 25 model """unstructuredmodel""" +1002 25 loss """bceaftersigmoid""" +1002 25 regularizer """no""" +1002 25 optimizer """adam""" +1002 25 training_loop """owa""" +1002 25 negative_sampler """basic""" +1002 25 evaluator """rankbased""" +1002 26 dataset """fb15k237""" +1002 26 model """unstructuredmodel""" +1002 26 loss """bceaftersigmoid""" +1002 26 regularizer """no""" +1002 26 optimizer """adam""" +1002 26 training_loop """owa""" +1002 26 negative_sampler """basic""" +1002 26 evaluator """rankbased""" +1002 27 dataset """fb15k237""" +1002 27 model """unstructuredmodel""" +1002 27 loss """bceaftersigmoid""" +1002 27 regularizer """no""" +1002 27 optimizer """adam""" +1002 27 training_loop """owa""" +1002 27 negative_sampler """basic""" +1002 27 evaluator """rankbased""" +1002 28 dataset """fb15k237""" +1002 28 model """unstructuredmodel""" +1002 28 loss """bceaftersigmoid""" +1002 28 regularizer """no""" +1002 28 optimizer """adam""" +1002 28 training_loop """owa""" +1002 28 negative_sampler """basic""" +1002 28 evaluator """rankbased""" +1002 29 dataset """fb15k237""" +1002 29 model """unstructuredmodel""" +1002 29 loss """bceaftersigmoid""" +1002 29 regularizer """no""" +1002 29 optimizer """adam""" +1002 29 training_loop """owa""" +1002 29 negative_sampler """basic""" +1002 29 evaluator """rankbased""" +1002 30 dataset """fb15k237""" +1002 30 model """unstructuredmodel""" +1002 30 loss """bceaftersigmoid""" +1002 30 regularizer """no""" +1002 30 optimizer """adam""" +1002 30 training_loop """owa""" +1002 30 negative_sampler """basic""" +1002 30 evaluator """rankbased""" +1002 31 dataset """fb15k237""" +1002 31 model """unstructuredmodel""" +1002 31 loss """bceaftersigmoid""" +1002 31 regularizer """no""" +1002 31 optimizer """adam""" +1002 31 training_loop """owa""" +1002 31 negative_sampler """basic""" +1002 31 evaluator """rankbased""" +1002 32 dataset """fb15k237""" +1002 32 model """unstructuredmodel""" +1002 32 loss """bceaftersigmoid""" +1002 32 regularizer """no""" +1002 32 optimizer """adam""" +1002 32 training_loop """owa""" +1002 32 negative_sampler """basic""" +1002 32 evaluator """rankbased""" +1002 33 dataset """fb15k237""" +1002 33 model """unstructuredmodel""" +1002 33 loss """bceaftersigmoid""" +1002 33 regularizer """no""" +1002 33 optimizer """adam""" +1002 33 training_loop """owa""" +1002 33 negative_sampler """basic""" +1002 33 evaluator """rankbased""" +1002 34 dataset """fb15k237""" +1002 34 model """unstructuredmodel""" +1002 34 loss """bceaftersigmoid""" +1002 34 regularizer """no""" +1002 34 optimizer """adam""" +1002 34 training_loop """owa""" +1002 34 negative_sampler """basic""" +1002 34 evaluator """rankbased""" +1002 35 dataset """fb15k237""" +1002 35 model """unstructuredmodel""" +1002 35 loss """bceaftersigmoid""" +1002 35 regularizer """no""" +1002 35 optimizer """adam""" +1002 35 training_loop """owa""" +1002 35 negative_sampler """basic""" +1002 35 evaluator """rankbased""" +1002 36 dataset """fb15k237""" +1002 36 model """unstructuredmodel""" +1002 36 loss """bceaftersigmoid""" +1002 36 regularizer """no""" +1002 36 optimizer """adam""" +1002 36 training_loop """owa""" +1002 36 negative_sampler """basic""" +1002 36 evaluator """rankbased""" +1002 37 dataset """fb15k237""" +1002 37 model """unstructuredmodel""" +1002 37 loss """bceaftersigmoid""" +1002 37 regularizer """no""" +1002 37 optimizer """adam""" +1002 37 training_loop """owa""" +1002 37 negative_sampler """basic""" +1002 37 evaluator """rankbased""" +1002 38 dataset """fb15k237""" +1002 38 model """unstructuredmodel""" +1002 38 loss """bceaftersigmoid""" +1002 38 regularizer """no""" +1002 38 optimizer """adam""" +1002 38 training_loop """owa""" +1002 38 negative_sampler """basic""" +1002 38 evaluator """rankbased""" +1002 39 dataset """fb15k237""" +1002 39 model """unstructuredmodel""" +1002 39 loss """bceaftersigmoid""" +1002 39 regularizer """no""" +1002 39 optimizer """adam""" +1002 39 training_loop """owa""" +1002 39 negative_sampler """basic""" +1002 39 evaluator """rankbased""" +1002 40 dataset """fb15k237""" +1002 40 model """unstructuredmodel""" +1002 40 loss """bceaftersigmoid""" +1002 40 regularizer """no""" +1002 40 optimizer """adam""" +1002 40 training_loop """owa""" +1002 40 negative_sampler """basic""" +1002 40 evaluator """rankbased""" +1002 41 dataset """fb15k237""" +1002 41 model """unstructuredmodel""" +1002 41 loss """bceaftersigmoid""" +1002 41 regularizer """no""" +1002 41 optimizer """adam""" +1002 41 training_loop """owa""" +1002 41 negative_sampler """basic""" +1002 41 evaluator """rankbased""" +1002 42 dataset """fb15k237""" +1002 42 model """unstructuredmodel""" +1002 42 loss """bceaftersigmoid""" +1002 42 regularizer """no""" +1002 42 optimizer """adam""" +1002 42 training_loop """owa""" +1002 42 negative_sampler """basic""" +1002 42 evaluator """rankbased""" +1002 43 dataset """fb15k237""" +1002 43 model """unstructuredmodel""" +1002 43 loss """bceaftersigmoid""" +1002 43 regularizer """no""" +1002 43 optimizer """adam""" +1002 43 training_loop """owa""" +1002 43 negative_sampler """basic""" +1002 43 evaluator """rankbased""" +1002 44 dataset """fb15k237""" +1002 44 model """unstructuredmodel""" +1002 44 loss """bceaftersigmoid""" +1002 44 regularizer """no""" +1002 44 optimizer """adam""" +1002 44 training_loop """owa""" +1002 44 negative_sampler """basic""" +1002 44 evaluator """rankbased""" +1002 45 dataset """fb15k237""" +1002 45 model """unstructuredmodel""" +1002 45 loss """bceaftersigmoid""" +1002 45 regularizer """no""" +1002 45 optimizer """adam""" +1002 45 training_loop """owa""" +1002 45 negative_sampler """basic""" +1002 45 evaluator """rankbased""" +1002 46 dataset """fb15k237""" +1002 46 model """unstructuredmodel""" +1002 46 loss """bceaftersigmoid""" +1002 46 regularizer """no""" +1002 46 optimizer """adam""" +1002 46 training_loop """owa""" +1002 46 negative_sampler """basic""" +1002 46 evaluator """rankbased""" +1002 47 dataset """fb15k237""" +1002 47 model """unstructuredmodel""" +1002 47 loss """bceaftersigmoid""" +1002 47 regularizer """no""" +1002 47 optimizer """adam""" +1002 47 training_loop """owa""" +1002 47 negative_sampler """basic""" +1002 47 evaluator """rankbased""" +1002 48 dataset """fb15k237""" +1002 48 model """unstructuredmodel""" +1002 48 loss """bceaftersigmoid""" +1002 48 regularizer """no""" +1002 48 optimizer """adam""" +1002 48 training_loop """owa""" +1002 48 negative_sampler """basic""" +1002 48 evaluator """rankbased""" +1002 49 dataset """fb15k237""" +1002 49 model """unstructuredmodel""" +1002 49 loss """bceaftersigmoid""" +1002 49 regularizer """no""" +1002 49 optimizer """adam""" +1002 49 training_loop """owa""" +1002 49 negative_sampler """basic""" +1002 49 evaluator """rankbased""" +1002 50 dataset """fb15k237""" +1002 50 model """unstructuredmodel""" +1002 50 loss """bceaftersigmoid""" +1002 50 regularizer """no""" +1002 50 optimizer """adam""" +1002 50 training_loop """owa""" +1002 50 negative_sampler """basic""" +1002 50 evaluator """rankbased""" +1002 51 dataset """fb15k237""" +1002 51 model """unstructuredmodel""" +1002 51 loss """bceaftersigmoid""" +1002 51 regularizer """no""" +1002 51 optimizer """adam""" +1002 51 training_loop """owa""" +1002 51 negative_sampler """basic""" +1002 51 evaluator """rankbased""" +1002 52 dataset """fb15k237""" +1002 52 model """unstructuredmodel""" +1002 52 loss """bceaftersigmoid""" +1002 52 regularizer """no""" +1002 52 optimizer """adam""" +1002 52 training_loop """owa""" +1002 52 negative_sampler """basic""" +1002 52 evaluator """rankbased""" +1002 53 dataset """fb15k237""" +1002 53 model """unstructuredmodel""" +1002 53 loss """bceaftersigmoid""" +1002 53 regularizer """no""" +1002 53 optimizer """adam""" +1002 53 training_loop """owa""" +1002 53 negative_sampler """basic""" +1002 53 evaluator """rankbased""" +1002 54 dataset """fb15k237""" +1002 54 model """unstructuredmodel""" +1002 54 loss """bceaftersigmoid""" +1002 54 regularizer """no""" +1002 54 optimizer """adam""" +1002 54 training_loop """owa""" +1002 54 negative_sampler """basic""" +1002 54 evaluator """rankbased""" +1002 55 dataset """fb15k237""" +1002 55 model """unstructuredmodel""" +1002 55 loss """bceaftersigmoid""" +1002 55 regularizer """no""" +1002 55 optimizer """adam""" +1002 55 training_loop """owa""" +1002 55 negative_sampler """basic""" +1002 55 evaluator """rankbased""" +1002 56 dataset """fb15k237""" +1002 56 model """unstructuredmodel""" +1002 56 loss """bceaftersigmoid""" +1002 56 regularizer """no""" +1002 56 optimizer """adam""" +1002 56 training_loop """owa""" +1002 56 negative_sampler """basic""" +1002 56 evaluator """rankbased""" +1002 57 dataset """fb15k237""" +1002 57 model """unstructuredmodel""" +1002 57 loss """bceaftersigmoid""" +1002 57 regularizer """no""" +1002 57 optimizer """adam""" +1002 57 training_loop """owa""" +1002 57 negative_sampler """basic""" +1002 57 evaluator """rankbased""" +1002 58 dataset """fb15k237""" +1002 58 model """unstructuredmodel""" +1002 58 loss """bceaftersigmoid""" +1002 58 regularizer """no""" +1002 58 optimizer """adam""" +1002 58 training_loop """owa""" +1002 58 negative_sampler """basic""" +1002 58 evaluator """rankbased""" +1002 59 dataset """fb15k237""" +1002 59 model """unstructuredmodel""" +1002 59 loss """bceaftersigmoid""" +1002 59 regularizer """no""" +1002 59 optimizer """adam""" +1002 59 training_loop """owa""" +1002 59 negative_sampler """basic""" +1002 59 evaluator """rankbased""" +1002 60 dataset """fb15k237""" +1002 60 model """unstructuredmodel""" +1002 60 loss """bceaftersigmoid""" +1002 60 regularizer """no""" +1002 60 optimizer """adam""" +1002 60 training_loop """owa""" +1002 60 negative_sampler """basic""" +1002 60 evaluator """rankbased""" +1002 61 dataset """fb15k237""" +1002 61 model """unstructuredmodel""" +1002 61 loss """bceaftersigmoid""" +1002 61 regularizer """no""" +1002 61 optimizer """adam""" +1002 61 training_loop """owa""" +1002 61 negative_sampler """basic""" +1002 61 evaluator """rankbased""" +1002 62 dataset """fb15k237""" +1002 62 model """unstructuredmodel""" +1002 62 loss """bceaftersigmoid""" +1002 62 regularizer """no""" +1002 62 optimizer """adam""" +1002 62 training_loop """owa""" +1002 62 negative_sampler """basic""" +1002 62 evaluator """rankbased""" +1002 63 dataset """fb15k237""" +1002 63 model """unstructuredmodel""" +1002 63 loss """bceaftersigmoid""" +1002 63 regularizer """no""" +1002 63 optimizer """adam""" +1002 63 training_loop """owa""" +1002 63 negative_sampler """basic""" +1002 63 evaluator """rankbased""" +1002 64 dataset """fb15k237""" +1002 64 model """unstructuredmodel""" +1002 64 loss """bceaftersigmoid""" +1002 64 regularizer """no""" +1002 64 optimizer """adam""" +1002 64 training_loop """owa""" +1002 64 negative_sampler """basic""" +1002 64 evaluator """rankbased""" +1002 65 dataset """fb15k237""" +1002 65 model """unstructuredmodel""" +1002 65 loss """bceaftersigmoid""" +1002 65 regularizer """no""" +1002 65 optimizer """adam""" +1002 65 training_loop """owa""" +1002 65 negative_sampler """basic""" +1002 65 evaluator """rankbased""" +1002 66 dataset """fb15k237""" +1002 66 model """unstructuredmodel""" +1002 66 loss """bceaftersigmoid""" +1002 66 regularizer """no""" +1002 66 optimizer """adam""" +1002 66 training_loop """owa""" +1002 66 negative_sampler """basic""" +1002 66 evaluator """rankbased""" +1002 67 dataset """fb15k237""" +1002 67 model """unstructuredmodel""" +1002 67 loss """bceaftersigmoid""" +1002 67 regularizer """no""" +1002 67 optimizer """adam""" +1002 67 training_loop """owa""" +1002 67 negative_sampler """basic""" +1002 67 evaluator """rankbased""" +1003 1 model.embedding_dim 2.0 +1003 1 model.scoring_fct_norm 2.0 +1003 1 optimizer.lr 0.010541752004873867 +1003 1 negative_sampler.num_negs_per_pos 16.0 +1003 1 training.batch_size 1.0 +1003 2 model.embedding_dim 2.0 +1003 2 model.scoring_fct_norm 2.0 +1003 2 optimizer.lr 0.002896483309782621 +1003 2 negative_sampler.num_negs_per_pos 51.0 +1003 2 training.batch_size 1.0 +1003 3 model.embedding_dim 0.0 +1003 3 model.scoring_fct_norm 1.0 +1003 3 optimizer.lr 0.012785418945784043 +1003 3 negative_sampler.num_negs_per_pos 63.0 +1003 3 training.batch_size 1.0 +1003 4 model.embedding_dim 1.0 +1003 4 model.scoring_fct_norm 2.0 +1003 4 optimizer.lr 0.06316596753212049 +1003 4 negative_sampler.num_negs_per_pos 45.0 +1003 4 training.batch_size 0.0 +1003 5 model.embedding_dim 2.0 +1003 5 model.scoring_fct_norm 2.0 +1003 5 optimizer.lr 0.0027822191216905208 +1003 5 negative_sampler.num_negs_per_pos 45.0 +1003 5 training.batch_size 1.0 +1003 6 model.embedding_dim 2.0 +1003 6 model.scoring_fct_norm 1.0 +1003 6 optimizer.lr 0.012261037038049557 +1003 6 negative_sampler.num_negs_per_pos 36.0 +1003 6 training.batch_size 0.0 +1003 7 model.embedding_dim 1.0 +1003 7 model.scoring_fct_norm 2.0 +1003 7 optimizer.lr 0.08888827077875074 +1003 7 negative_sampler.num_negs_per_pos 52.0 +1003 7 training.batch_size 2.0 +1003 8 model.embedding_dim 2.0 +1003 8 model.scoring_fct_norm 1.0 +1003 8 optimizer.lr 0.0014949834782067933 +1003 8 negative_sampler.num_negs_per_pos 0.0 +1003 8 training.batch_size 0.0 +1003 9 model.embedding_dim 1.0 +1003 9 model.scoring_fct_norm 1.0 +1003 9 optimizer.lr 0.007966114499137431 +1003 9 negative_sampler.num_negs_per_pos 91.0 +1003 9 training.batch_size 1.0 +1003 10 model.embedding_dim 1.0 +1003 10 model.scoring_fct_norm 2.0 +1003 10 optimizer.lr 0.0032717466342214252 +1003 10 negative_sampler.num_negs_per_pos 35.0 +1003 10 training.batch_size 2.0 +1003 11 model.embedding_dim 0.0 +1003 11 model.scoring_fct_norm 2.0 +1003 11 optimizer.lr 0.005332087243581074 +1003 11 negative_sampler.num_negs_per_pos 6.0 +1003 11 training.batch_size 1.0 +1003 12 model.embedding_dim 0.0 +1003 12 model.scoring_fct_norm 2.0 +1003 12 optimizer.lr 0.0648368800483826 +1003 12 negative_sampler.num_negs_per_pos 31.0 +1003 12 training.batch_size 0.0 +1003 13 model.embedding_dim 2.0 +1003 13 model.scoring_fct_norm 2.0 +1003 13 optimizer.lr 0.02244539074060096 +1003 13 negative_sampler.num_negs_per_pos 97.0 +1003 13 training.batch_size 2.0 +1003 14 model.embedding_dim 1.0 +1003 14 model.scoring_fct_norm 1.0 +1003 14 optimizer.lr 0.014739770547627705 +1003 14 negative_sampler.num_negs_per_pos 3.0 +1003 14 training.batch_size 2.0 +1003 15 model.embedding_dim 1.0 +1003 15 model.scoring_fct_norm 1.0 +1003 15 optimizer.lr 0.03674457015518787 +1003 15 negative_sampler.num_negs_per_pos 13.0 +1003 15 training.batch_size 1.0 +1003 16 model.embedding_dim 2.0 +1003 16 model.scoring_fct_norm 1.0 +1003 16 optimizer.lr 0.008117987221515575 +1003 16 negative_sampler.num_negs_per_pos 93.0 +1003 16 training.batch_size 0.0 +1003 17 model.embedding_dim 0.0 +1003 17 model.scoring_fct_norm 1.0 +1003 17 optimizer.lr 0.0065062570684205736 +1003 17 negative_sampler.num_negs_per_pos 31.0 +1003 17 training.batch_size 1.0 +1003 18 model.embedding_dim 0.0 +1003 18 model.scoring_fct_norm 2.0 +1003 18 optimizer.lr 0.002613130701190637 +1003 18 negative_sampler.num_negs_per_pos 94.0 +1003 18 training.batch_size 2.0 +1003 19 model.embedding_dim 1.0 +1003 19 model.scoring_fct_norm 2.0 +1003 19 optimizer.lr 0.02627263279541285 +1003 19 negative_sampler.num_negs_per_pos 0.0 +1003 19 training.batch_size 1.0 +1003 20 model.embedding_dim 1.0 +1003 20 model.scoring_fct_norm 1.0 +1003 20 optimizer.lr 0.00136370129413277 +1003 20 negative_sampler.num_negs_per_pos 28.0 +1003 20 training.batch_size 2.0 +1003 21 model.embedding_dim 2.0 +1003 21 model.scoring_fct_norm 2.0 +1003 21 optimizer.lr 0.003328903920066744 +1003 21 negative_sampler.num_negs_per_pos 24.0 +1003 21 training.batch_size 1.0 +1003 22 model.embedding_dim 2.0 +1003 22 model.scoring_fct_norm 1.0 +1003 22 optimizer.lr 0.007860491222226616 +1003 22 negative_sampler.num_negs_per_pos 68.0 +1003 22 training.batch_size 2.0 +1003 23 model.embedding_dim 1.0 +1003 23 model.scoring_fct_norm 2.0 +1003 23 optimizer.lr 0.0012504687805255164 +1003 23 negative_sampler.num_negs_per_pos 96.0 +1003 23 training.batch_size 2.0 +1003 24 model.embedding_dim 0.0 +1003 24 model.scoring_fct_norm 1.0 +1003 24 optimizer.lr 0.0011551898709640123 +1003 24 negative_sampler.num_negs_per_pos 32.0 +1003 24 training.batch_size 1.0 +1003 25 model.embedding_dim 1.0 +1003 25 model.scoring_fct_norm 2.0 +1003 25 optimizer.lr 0.0010599804195164208 +1003 25 negative_sampler.num_negs_per_pos 67.0 +1003 25 training.batch_size 2.0 +1003 26 model.embedding_dim 2.0 +1003 26 model.scoring_fct_norm 1.0 +1003 26 optimizer.lr 0.03178699638736986 +1003 26 negative_sampler.num_negs_per_pos 25.0 +1003 26 training.batch_size 2.0 +1003 27 model.embedding_dim 2.0 +1003 27 model.scoring_fct_norm 2.0 +1003 27 optimizer.lr 0.003119341491849647 +1003 27 negative_sampler.num_negs_per_pos 85.0 +1003 27 training.batch_size 1.0 +1003 28 model.embedding_dim 1.0 +1003 28 model.scoring_fct_norm 1.0 +1003 28 optimizer.lr 0.0016762257506074928 +1003 28 negative_sampler.num_negs_per_pos 12.0 +1003 28 training.batch_size 2.0 +1003 29 model.embedding_dim 0.0 +1003 29 model.scoring_fct_norm 1.0 +1003 29 optimizer.lr 0.01603458835023829 +1003 29 negative_sampler.num_negs_per_pos 75.0 +1003 29 training.batch_size 1.0 +1003 30 model.embedding_dim 1.0 +1003 30 model.scoring_fct_norm 1.0 +1003 30 optimizer.lr 0.0014369684529252174 +1003 30 negative_sampler.num_negs_per_pos 9.0 +1003 30 training.batch_size 0.0 +1003 31 model.embedding_dim 2.0 +1003 31 model.scoring_fct_norm 2.0 +1003 31 optimizer.lr 0.009442911410583366 +1003 31 negative_sampler.num_negs_per_pos 74.0 +1003 31 training.batch_size 1.0 +1003 32 model.embedding_dim 0.0 +1003 32 model.scoring_fct_norm 2.0 +1003 32 optimizer.lr 0.07685332163214709 +1003 32 negative_sampler.num_negs_per_pos 27.0 +1003 32 training.batch_size 1.0 +1003 33 model.embedding_dim 2.0 +1003 33 model.scoring_fct_norm 1.0 +1003 33 optimizer.lr 0.08603620094902638 +1003 33 negative_sampler.num_negs_per_pos 19.0 +1003 33 training.batch_size 1.0 +1003 34 model.embedding_dim 2.0 +1003 34 model.scoring_fct_norm 1.0 +1003 34 optimizer.lr 0.001797599515344101 +1003 34 negative_sampler.num_negs_per_pos 19.0 +1003 34 training.batch_size 2.0 +1003 35 model.embedding_dim 1.0 +1003 35 model.scoring_fct_norm 1.0 +1003 35 optimizer.lr 0.00690906664614703 +1003 35 negative_sampler.num_negs_per_pos 92.0 +1003 35 training.batch_size 0.0 +1003 36 model.embedding_dim 2.0 +1003 36 model.scoring_fct_norm 1.0 +1003 36 optimizer.lr 0.0211899441027378 +1003 36 negative_sampler.num_negs_per_pos 7.0 +1003 36 training.batch_size 2.0 +1003 37 model.embedding_dim 1.0 +1003 37 model.scoring_fct_norm 1.0 +1003 37 optimizer.lr 0.040165306753038414 +1003 37 negative_sampler.num_negs_per_pos 27.0 +1003 37 training.batch_size 1.0 +1003 38 model.embedding_dim 2.0 +1003 38 model.scoring_fct_norm 2.0 +1003 38 optimizer.lr 0.05449248751643971 +1003 38 negative_sampler.num_negs_per_pos 33.0 +1003 38 training.batch_size 1.0 +1003 39 model.embedding_dim 2.0 +1003 39 model.scoring_fct_norm 2.0 +1003 39 optimizer.lr 0.031165027558713927 +1003 39 negative_sampler.num_negs_per_pos 29.0 +1003 39 training.batch_size 1.0 +1003 40 model.embedding_dim 2.0 +1003 40 model.scoring_fct_norm 2.0 +1003 40 optimizer.lr 0.007814397012489746 +1003 40 negative_sampler.num_negs_per_pos 55.0 +1003 40 training.batch_size 2.0 +1003 41 model.embedding_dim 1.0 +1003 41 model.scoring_fct_norm 2.0 +1003 41 optimizer.lr 0.025190266104917054 +1003 41 negative_sampler.num_negs_per_pos 66.0 +1003 41 training.batch_size 0.0 +1003 42 model.embedding_dim 1.0 +1003 42 model.scoring_fct_norm 1.0 +1003 42 optimizer.lr 0.006004495438868648 +1003 42 negative_sampler.num_negs_per_pos 32.0 +1003 42 training.batch_size 1.0 +1003 43 model.embedding_dim 0.0 +1003 43 model.scoring_fct_norm 2.0 +1003 43 optimizer.lr 0.004253984002645738 +1003 43 negative_sampler.num_negs_per_pos 81.0 +1003 43 training.batch_size 2.0 +1003 44 model.embedding_dim 0.0 +1003 44 model.scoring_fct_norm 2.0 +1003 44 optimizer.lr 0.0013667714356944886 +1003 44 negative_sampler.num_negs_per_pos 32.0 +1003 44 training.batch_size 2.0 +1003 45 model.embedding_dim 1.0 +1003 45 model.scoring_fct_norm 2.0 +1003 45 optimizer.lr 0.05763645779747032 +1003 45 negative_sampler.num_negs_per_pos 2.0 +1003 45 training.batch_size 2.0 +1003 46 model.embedding_dim 0.0 +1003 46 model.scoring_fct_norm 1.0 +1003 46 optimizer.lr 0.016984264150888443 +1003 46 negative_sampler.num_negs_per_pos 10.0 +1003 46 training.batch_size 2.0 +1003 47 model.embedding_dim 2.0 +1003 47 model.scoring_fct_norm 1.0 +1003 47 optimizer.lr 0.002541491277650299 +1003 47 negative_sampler.num_negs_per_pos 47.0 +1003 47 training.batch_size 2.0 +1003 48 model.embedding_dim 1.0 +1003 48 model.scoring_fct_norm 2.0 +1003 48 optimizer.lr 0.06951536811906775 +1003 48 negative_sampler.num_negs_per_pos 30.0 +1003 48 training.batch_size 0.0 +1003 49 model.embedding_dim 2.0 +1003 49 model.scoring_fct_norm 2.0 +1003 49 optimizer.lr 0.026908407146025926 +1003 49 negative_sampler.num_negs_per_pos 91.0 +1003 49 training.batch_size 1.0 +1003 50 model.embedding_dim 0.0 +1003 50 model.scoring_fct_norm 2.0 +1003 50 optimizer.lr 0.0013725596779164664 +1003 50 negative_sampler.num_negs_per_pos 78.0 +1003 50 training.batch_size 0.0 +1003 51 model.embedding_dim 0.0 +1003 51 model.scoring_fct_norm 1.0 +1003 51 optimizer.lr 0.004824209110680476 +1003 51 negative_sampler.num_negs_per_pos 59.0 +1003 51 training.batch_size 1.0 +1003 52 model.embedding_dim 1.0 +1003 52 model.scoring_fct_norm 1.0 +1003 52 optimizer.lr 0.0012601357109171892 +1003 52 negative_sampler.num_negs_per_pos 16.0 +1003 52 training.batch_size 2.0 +1003 53 model.embedding_dim 1.0 +1003 53 model.scoring_fct_norm 2.0 +1003 53 optimizer.lr 0.029433360386044292 +1003 53 negative_sampler.num_negs_per_pos 73.0 +1003 53 training.batch_size 2.0 +1003 54 model.embedding_dim 0.0 +1003 54 model.scoring_fct_norm 2.0 +1003 54 optimizer.lr 0.004544837926910416 +1003 54 negative_sampler.num_negs_per_pos 44.0 +1003 54 training.batch_size 2.0 +1003 55 model.embedding_dim 0.0 +1003 55 model.scoring_fct_norm 1.0 +1003 55 optimizer.lr 0.07651465978786079 +1003 55 negative_sampler.num_negs_per_pos 48.0 +1003 55 training.batch_size 2.0 +1003 56 model.embedding_dim 1.0 +1003 56 model.scoring_fct_norm 2.0 +1003 56 optimizer.lr 0.01455118112045799 +1003 56 negative_sampler.num_negs_per_pos 46.0 +1003 56 training.batch_size 1.0 +1003 57 model.embedding_dim 2.0 +1003 57 model.scoring_fct_norm 1.0 +1003 57 optimizer.lr 0.0028929593107701003 +1003 57 negative_sampler.num_negs_per_pos 93.0 +1003 57 training.batch_size 0.0 +1003 58 model.embedding_dim 1.0 +1003 58 model.scoring_fct_norm 2.0 +1003 58 optimizer.lr 0.021744743726267193 +1003 58 negative_sampler.num_negs_per_pos 27.0 +1003 58 training.batch_size 1.0 +1003 1 dataset """fb15k237""" +1003 1 model """unstructuredmodel""" +1003 1 loss """softplus""" +1003 1 regularizer """no""" +1003 1 optimizer """adam""" +1003 1 training_loop """owa""" +1003 1 negative_sampler """basic""" +1003 1 evaluator """rankbased""" +1003 2 dataset """fb15k237""" +1003 2 model """unstructuredmodel""" +1003 2 loss """softplus""" +1003 2 regularizer """no""" +1003 2 optimizer """adam""" +1003 2 training_loop """owa""" +1003 2 negative_sampler """basic""" +1003 2 evaluator """rankbased""" +1003 3 dataset """fb15k237""" +1003 3 model """unstructuredmodel""" +1003 3 loss """softplus""" +1003 3 regularizer """no""" +1003 3 optimizer """adam""" +1003 3 training_loop """owa""" +1003 3 negative_sampler """basic""" +1003 3 evaluator """rankbased""" +1003 4 dataset """fb15k237""" +1003 4 model """unstructuredmodel""" +1003 4 loss """softplus""" +1003 4 regularizer """no""" +1003 4 optimizer """adam""" +1003 4 training_loop """owa""" +1003 4 negative_sampler """basic""" +1003 4 evaluator """rankbased""" +1003 5 dataset """fb15k237""" +1003 5 model """unstructuredmodel""" +1003 5 loss """softplus""" +1003 5 regularizer """no""" +1003 5 optimizer """adam""" +1003 5 training_loop """owa""" +1003 5 negative_sampler """basic""" +1003 5 evaluator """rankbased""" +1003 6 dataset """fb15k237""" +1003 6 model """unstructuredmodel""" +1003 6 loss """softplus""" +1003 6 regularizer """no""" +1003 6 optimizer """adam""" +1003 6 training_loop """owa""" +1003 6 negative_sampler """basic""" +1003 6 evaluator """rankbased""" +1003 7 dataset """fb15k237""" +1003 7 model """unstructuredmodel""" +1003 7 loss """softplus""" +1003 7 regularizer """no""" +1003 7 optimizer """adam""" +1003 7 training_loop """owa""" +1003 7 negative_sampler """basic""" +1003 7 evaluator """rankbased""" +1003 8 dataset """fb15k237""" +1003 8 model """unstructuredmodel""" +1003 8 loss """softplus""" +1003 8 regularizer """no""" +1003 8 optimizer """adam""" +1003 8 training_loop """owa""" +1003 8 negative_sampler """basic""" +1003 8 evaluator """rankbased""" +1003 9 dataset """fb15k237""" +1003 9 model """unstructuredmodel""" +1003 9 loss """softplus""" +1003 9 regularizer """no""" +1003 9 optimizer """adam""" +1003 9 training_loop """owa""" +1003 9 negative_sampler """basic""" +1003 9 evaluator """rankbased""" +1003 10 dataset """fb15k237""" +1003 10 model """unstructuredmodel""" +1003 10 loss """softplus""" +1003 10 regularizer """no""" +1003 10 optimizer """adam""" +1003 10 training_loop """owa""" +1003 10 negative_sampler """basic""" +1003 10 evaluator """rankbased""" +1003 11 dataset """fb15k237""" +1003 11 model """unstructuredmodel""" +1003 11 loss """softplus""" +1003 11 regularizer """no""" +1003 11 optimizer """adam""" +1003 11 training_loop """owa""" +1003 11 negative_sampler """basic""" +1003 11 evaluator """rankbased""" +1003 12 dataset """fb15k237""" +1003 12 model """unstructuredmodel""" +1003 12 loss """softplus""" +1003 12 regularizer """no""" +1003 12 optimizer """adam""" +1003 12 training_loop """owa""" +1003 12 negative_sampler """basic""" +1003 12 evaluator """rankbased""" +1003 13 dataset """fb15k237""" +1003 13 model """unstructuredmodel""" +1003 13 loss """softplus""" +1003 13 regularizer """no""" +1003 13 optimizer """adam""" +1003 13 training_loop """owa""" +1003 13 negative_sampler """basic""" +1003 13 evaluator """rankbased""" +1003 14 dataset """fb15k237""" +1003 14 model """unstructuredmodel""" +1003 14 loss """softplus""" +1003 14 regularizer """no""" +1003 14 optimizer """adam""" +1003 14 training_loop """owa""" +1003 14 negative_sampler """basic""" +1003 14 evaluator """rankbased""" +1003 15 dataset """fb15k237""" +1003 15 model """unstructuredmodel""" +1003 15 loss """softplus""" +1003 15 regularizer """no""" +1003 15 optimizer """adam""" +1003 15 training_loop """owa""" +1003 15 negative_sampler """basic""" +1003 15 evaluator """rankbased""" +1003 16 dataset """fb15k237""" +1003 16 model """unstructuredmodel""" +1003 16 loss """softplus""" +1003 16 regularizer """no""" +1003 16 optimizer """adam""" +1003 16 training_loop """owa""" +1003 16 negative_sampler """basic""" +1003 16 evaluator """rankbased""" +1003 17 dataset """fb15k237""" +1003 17 model """unstructuredmodel""" +1003 17 loss """softplus""" +1003 17 regularizer """no""" +1003 17 optimizer """adam""" +1003 17 training_loop """owa""" +1003 17 negative_sampler """basic""" +1003 17 evaluator """rankbased""" +1003 18 dataset """fb15k237""" +1003 18 model """unstructuredmodel""" +1003 18 loss """softplus""" +1003 18 regularizer """no""" +1003 18 optimizer """adam""" +1003 18 training_loop """owa""" +1003 18 negative_sampler """basic""" +1003 18 evaluator """rankbased""" +1003 19 dataset """fb15k237""" +1003 19 model """unstructuredmodel""" +1003 19 loss """softplus""" +1003 19 regularizer """no""" +1003 19 optimizer """adam""" +1003 19 training_loop """owa""" +1003 19 negative_sampler """basic""" +1003 19 evaluator """rankbased""" +1003 20 dataset """fb15k237""" +1003 20 model """unstructuredmodel""" +1003 20 loss """softplus""" +1003 20 regularizer """no""" +1003 20 optimizer """adam""" +1003 20 training_loop """owa""" +1003 20 negative_sampler """basic""" +1003 20 evaluator """rankbased""" +1003 21 dataset """fb15k237""" +1003 21 model """unstructuredmodel""" +1003 21 loss """softplus""" +1003 21 regularizer """no""" +1003 21 optimizer """adam""" +1003 21 training_loop """owa""" +1003 21 negative_sampler """basic""" +1003 21 evaluator """rankbased""" +1003 22 dataset """fb15k237""" +1003 22 model """unstructuredmodel""" +1003 22 loss """softplus""" +1003 22 regularizer """no""" +1003 22 optimizer """adam""" +1003 22 training_loop """owa""" +1003 22 negative_sampler """basic""" +1003 22 evaluator """rankbased""" +1003 23 dataset """fb15k237""" +1003 23 model """unstructuredmodel""" +1003 23 loss """softplus""" +1003 23 regularizer """no""" +1003 23 optimizer """adam""" +1003 23 training_loop """owa""" +1003 23 negative_sampler """basic""" +1003 23 evaluator """rankbased""" +1003 24 dataset """fb15k237""" +1003 24 model """unstructuredmodel""" +1003 24 loss """softplus""" +1003 24 regularizer """no""" +1003 24 optimizer """adam""" +1003 24 training_loop """owa""" +1003 24 negative_sampler """basic""" +1003 24 evaluator """rankbased""" +1003 25 dataset """fb15k237""" +1003 25 model """unstructuredmodel""" +1003 25 loss """softplus""" +1003 25 regularizer """no""" +1003 25 optimizer """adam""" +1003 25 training_loop """owa""" +1003 25 negative_sampler """basic""" +1003 25 evaluator """rankbased""" +1003 26 dataset """fb15k237""" +1003 26 model """unstructuredmodel""" +1003 26 loss """softplus""" +1003 26 regularizer """no""" +1003 26 optimizer """adam""" +1003 26 training_loop """owa""" +1003 26 negative_sampler """basic""" +1003 26 evaluator """rankbased""" +1003 27 dataset """fb15k237""" +1003 27 model """unstructuredmodel""" +1003 27 loss """softplus""" +1003 27 regularizer """no""" +1003 27 optimizer """adam""" +1003 27 training_loop """owa""" +1003 27 negative_sampler """basic""" +1003 27 evaluator """rankbased""" +1003 28 dataset """fb15k237""" +1003 28 model """unstructuredmodel""" +1003 28 loss """softplus""" +1003 28 regularizer """no""" +1003 28 optimizer """adam""" +1003 28 training_loop """owa""" +1003 28 negative_sampler """basic""" +1003 28 evaluator """rankbased""" +1003 29 dataset """fb15k237""" +1003 29 model """unstructuredmodel""" +1003 29 loss """softplus""" +1003 29 regularizer """no""" +1003 29 optimizer """adam""" +1003 29 training_loop """owa""" +1003 29 negative_sampler """basic""" +1003 29 evaluator """rankbased""" +1003 30 dataset """fb15k237""" +1003 30 model """unstructuredmodel""" +1003 30 loss """softplus""" +1003 30 regularizer """no""" +1003 30 optimizer """adam""" +1003 30 training_loop """owa""" +1003 30 negative_sampler """basic""" +1003 30 evaluator """rankbased""" +1003 31 dataset """fb15k237""" +1003 31 model """unstructuredmodel""" +1003 31 loss """softplus""" +1003 31 regularizer """no""" +1003 31 optimizer """adam""" +1003 31 training_loop """owa""" +1003 31 negative_sampler """basic""" +1003 31 evaluator """rankbased""" +1003 32 dataset """fb15k237""" +1003 32 model """unstructuredmodel""" +1003 32 loss """softplus""" +1003 32 regularizer """no""" +1003 32 optimizer """adam""" +1003 32 training_loop """owa""" +1003 32 negative_sampler """basic""" +1003 32 evaluator """rankbased""" +1003 33 dataset """fb15k237""" +1003 33 model """unstructuredmodel""" +1003 33 loss """softplus""" +1003 33 regularizer """no""" +1003 33 optimizer """adam""" +1003 33 training_loop """owa""" +1003 33 negative_sampler """basic""" +1003 33 evaluator """rankbased""" +1003 34 dataset """fb15k237""" +1003 34 model """unstructuredmodel""" +1003 34 loss """softplus""" +1003 34 regularizer """no""" +1003 34 optimizer """adam""" +1003 34 training_loop """owa""" +1003 34 negative_sampler """basic""" +1003 34 evaluator """rankbased""" +1003 35 dataset """fb15k237""" +1003 35 model """unstructuredmodel""" +1003 35 loss """softplus""" +1003 35 regularizer """no""" +1003 35 optimizer """adam""" +1003 35 training_loop """owa""" +1003 35 negative_sampler """basic""" +1003 35 evaluator """rankbased""" +1003 36 dataset """fb15k237""" +1003 36 model """unstructuredmodel""" +1003 36 loss """softplus""" +1003 36 regularizer """no""" +1003 36 optimizer """adam""" +1003 36 training_loop """owa""" +1003 36 negative_sampler """basic""" +1003 36 evaluator """rankbased""" +1003 37 dataset """fb15k237""" +1003 37 model """unstructuredmodel""" +1003 37 loss """softplus""" +1003 37 regularizer """no""" +1003 37 optimizer """adam""" +1003 37 training_loop """owa""" +1003 37 negative_sampler """basic""" +1003 37 evaluator """rankbased""" +1003 38 dataset """fb15k237""" +1003 38 model """unstructuredmodel""" +1003 38 loss """softplus""" +1003 38 regularizer """no""" +1003 38 optimizer """adam""" +1003 38 training_loop """owa""" +1003 38 negative_sampler """basic""" +1003 38 evaluator """rankbased""" +1003 39 dataset """fb15k237""" +1003 39 model """unstructuredmodel""" +1003 39 loss """softplus""" +1003 39 regularizer """no""" +1003 39 optimizer """adam""" +1003 39 training_loop """owa""" +1003 39 negative_sampler """basic""" +1003 39 evaluator """rankbased""" +1003 40 dataset """fb15k237""" +1003 40 model """unstructuredmodel""" +1003 40 loss """softplus""" +1003 40 regularizer """no""" +1003 40 optimizer """adam""" +1003 40 training_loop """owa""" +1003 40 negative_sampler """basic""" +1003 40 evaluator """rankbased""" +1003 41 dataset """fb15k237""" +1003 41 model """unstructuredmodel""" +1003 41 loss """softplus""" +1003 41 regularizer """no""" +1003 41 optimizer """adam""" +1003 41 training_loop """owa""" +1003 41 negative_sampler """basic""" +1003 41 evaluator """rankbased""" +1003 42 dataset """fb15k237""" +1003 42 model """unstructuredmodel""" +1003 42 loss """softplus""" +1003 42 regularizer """no""" +1003 42 optimizer """adam""" +1003 42 training_loop """owa""" +1003 42 negative_sampler """basic""" +1003 42 evaluator """rankbased""" +1003 43 dataset """fb15k237""" +1003 43 model """unstructuredmodel""" +1003 43 loss """softplus""" +1003 43 regularizer """no""" +1003 43 optimizer """adam""" +1003 43 training_loop """owa""" +1003 43 negative_sampler """basic""" +1003 43 evaluator """rankbased""" +1003 44 dataset """fb15k237""" +1003 44 model """unstructuredmodel""" +1003 44 loss """softplus""" +1003 44 regularizer """no""" +1003 44 optimizer """adam""" +1003 44 training_loop """owa""" +1003 44 negative_sampler """basic""" +1003 44 evaluator """rankbased""" +1003 45 dataset """fb15k237""" +1003 45 model """unstructuredmodel""" +1003 45 loss """softplus""" +1003 45 regularizer """no""" +1003 45 optimizer """adam""" +1003 45 training_loop """owa""" +1003 45 negative_sampler """basic""" +1003 45 evaluator """rankbased""" +1003 46 dataset """fb15k237""" +1003 46 model """unstructuredmodel""" +1003 46 loss """softplus""" +1003 46 regularizer """no""" +1003 46 optimizer """adam""" +1003 46 training_loop """owa""" +1003 46 negative_sampler """basic""" +1003 46 evaluator """rankbased""" +1003 47 dataset """fb15k237""" +1003 47 model """unstructuredmodel""" +1003 47 loss """softplus""" +1003 47 regularizer """no""" +1003 47 optimizer """adam""" +1003 47 training_loop """owa""" +1003 47 negative_sampler """basic""" +1003 47 evaluator """rankbased""" +1003 48 dataset """fb15k237""" +1003 48 model """unstructuredmodel""" +1003 48 loss """softplus""" +1003 48 regularizer """no""" +1003 48 optimizer """adam""" +1003 48 training_loop """owa""" +1003 48 negative_sampler """basic""" +1003 48 evaluator """rankbased""" +1003 49 dataset """fb15k237""" +1003 49 model """unstructuredmodel""" +1003 49 loss """softplus""" +1003 49 regularizer """no""" +1003 49 optimizer """adam""" +1003 49 training_loop """owa""" +1003 49 negative_sampler """basic""" +1003 49 evaluator """rankbased""" +1003 50 dataset """fb15k237""" +1003 50 model """unstructuredmodel""" +1003 50 loss """softplus""" +1003 50 regularizer """no""" +1003 50 optimizer """adam""" +1003 50 training_loop """owa""" +1003 50 negative_sampler """basic""" +1003 50 evaluator """rankbased""" +1003 51 dataset """fb15k237""" +1003 51 model """unstructuredmodel""" +1003 51 loss """softplus""" +1003 51 regularizer """no""" +1003 51 optimizer """adam""" +1003 51 training_loop """owa""" +1003 51 negative_sampler """basic""" +1003 51 evaluator """rankbased""" +1003 52 dataset """fb15k237""" +1003 52 model """unstructuredmodel""" +1003 52 loss """softplus""" +1003 52 regularizer """no""" +1003 52 optimizer """adam""" +1003 52 training_loop """owa""" +1003 52 negative_sampler """basic""" +1003 52 evaluator """rankbased""" +1003 53 dataset """fb15k237""" +1003 53 model """unstructuredmodel""" +1003 53 loss """softplus""" +1003 53 regularizer """no""" +1003 53 optimizer """adam""" +1003 53 training_loop """owa""" +1003 53 negative_sampler """basic""" +1003 53 evaluator """rankbased""" +1003 54 dataset """fb15k237""" +1003 54 model """unstructuredmodel""" +1003 54 loss """softplus""" +1003 54 regularizer """no""" +1003 54 optimizer """adam""" +1003 54 training_loop """owa""" +1003 54 negative_sampler """basic""" +1003 54 evaluator """rankbased""" +1003 55 dataset """fb15k237""" +1003 55 model """unstructuredmodel""" +1003 55 loss """softplus""" +1003 55 regularizer """no""" +1003 55 optimizer """adam""" +1003 55 training_loop """owa""" +1003 55 negative_sampler """basic""" +1003 55 evaluator """rankbased""" +1003 56 dataset """fb15k237""" +1003 56 model """unstructuredmodel""" +1003 56 loss """softplus""" +1003 56 regularizer """no""" +1003 56 optimizer """adam""" +1003 56 training_loop """owa""" +1003 56 negative_sampler """basic""" +1003 56 evaluator """rankbased""" +1003 57 dataset """fb15k237""" +1003 57 model """unstructuredmodel""" +1003 57 loss """softplus""" +1003 57 regularizer """no""" +1003 57 optimizer """adam""" +1003 57 training_loop """owa""" +1003 57 negative_sampler """basic""" +1003 57 evaluator """rankbased""" +1003 58 dataset """fb15k237""" +1003 58 model """unstructuredmodel""" +1003 58 loss """softplus""" +1003 58 regularizer """no""" +1003 58 optimizer """adam""" +1003 58 training_loop """owa""" +1003 58 negative_sampler """basic""" +1003 58 evaluator """rankbased""" +1004 1 model.embedding_dim 2.0 +1004 1 model.scoring_fct_norm 1.0 +1004 1 optimizer.lr 0.03236356335111042 +1004 1 negative_sampler.num_negs_per_pos 12.0 +1004 1 training.batch_size 1.0 +1004 2 model.embedding_dim 1.0 +1004 2 model.scoring_fct_norm 1.0 +1004 2 optimizer.lr 0.016023742133205038 +1004 2 negative_sampler.num_negs_per_pos 2.0 +1004 2 training.batch_size 2.0 +1004 3 model.embedding_dim 0.0 +1004 3 model.scoring_fct_norm 1.0 +1004 3 optimizer.lr 0.016013285415878677 +1004 3 negative_sampler.num_negs_per_pos 84.0 +1004 3 training.batch_size 2.0 +1004 4 model.embedding_dim 1.0 +1004 4 model.scoring_fct_norm 1.0 +1004 4 optimizer.lr 0.00936675120953429 +1004 4 negative_sampler.num_negs_per_pos 4.0 +1004 4 training.batch_size 1.0 +1004 5 model.embedding_dim 0.0 +1004 5 model.scoring_fct_norm 1.0 +1004 5 optimizer.lr 0.010058484472642525 +1004 5 negative_sampler.num_negs_per_pos 70.0 +1004 5 training.batch_size 2.0 +1004 6 model.embedding_dim 1.0 +1004 6 model.scoring_fct_norm 1.0 +1004 6 optimizer.lr 0.00446315731453215 +1004 6 negative_sampler.num_negs_per_pos 4.0 +1004 6 training.batch_size 2.0 +1004 7 model.embedding_dim 1.0 +1004 7 model.scoring_fct_norm 2.0 +1004 7 optimizer.lr 0.009609631770999034 +1004 7 negative_sampler.num_negs_per_pos 15.0 +1004 7 training.batch_size 0.0 +1004 8 model.embedding_dim 1.0 +1004 8 model.scoring_fct_norm 1.0 +1004 8 optimizer.lr 0.008889620462995415 +1004 8 negative_sampler.num_negs_per_pos 9.0 +1004 8 training.batch_size 1.0 +1004 9 model.embedding_dim 2.0 +1004 9 model.scoring_fct_norm 1.0 +1004 9 optimizer.lr 0.02048356767327726 +1004 9 negative_sampler.num_negs_per_pos 79.0 +1004 9 training.batch_size 1.0 +1004 10 model.embedding_dim 0.0 +1004 10 model.scoring_fct_norm 1.0 +1004 10 optimizer.lr 0.04461865073743593 +1004 10 negative_sampler.num_negs_per_pos 19.0 +1004 10 training.batch_size 2.0 +1004 11 model.embedding_dim 1.0 +1004 11 model.scoring_fct_norm 2.0 +1004 11 optimizer.lr 0.030958258652921237 +1004 11 negative_sampler.num_negs_per_pos 18.0 +1004 11 training.batch_size 1.0 +1004 12 model.embedding_dim 1.0 +1004 12 model.scoring_fct_norm 2.0 +1004 12 optimizer.lr 0.02902858191901904 +1004 12 negative_sampler.num_negs_per_pos 12.0 +1004 12 training.batch_size 1.0 +1004 13 model.embedding_dim 1.0 +1004 13 model.scoring_fct_norm 1.0 +1004 13 optimizer.lr 0.029013776450981868 +1004 13 negative_sampler.num_negs_per_pos 94.0 +1004 13 training.batch_size 0.0 +1004 14 model.embedding_dim 1.0 +1004 14 model.scoring_fct_norm 2.0 +1004 14 optimizer.lr 0.00953303774335484 +1004 14 negative_sampler.num_negs_per_pos 21.0 +1004 14 training.batch_size 2.0 +1004 15 model.embedding_dim 1.0 +1004 15 model.scoring_fct_norm 2.0 +1004 15 optimizer.lr 0.0460008530043304 +1004 15 negative_sampler.num_negs_per_pos 8.0 +1004 15 training.batch_size 2.0 +1004 16 model.embedding_dim 2.0 +1004 16 model.scoring_fct_norm 2.0 +1004 16 optimizer.lr 0.012753328878973026 +1004 16 negative_sampler.num_negs_per_pos 67.0 +1004 16 training.batch_size 2.0 +1004 17 model.embedding_dim 1.0 +1004 17 model.scoring_fct_norm 2.0 +1004 17 optimizer.lr 0.01572115899804351 +1004 17 negative_sampler.num_negs_per_pos 14.0 +1004 17 training.batch_size 0.0 +1004 18 model.embedding_dim 2.0 +1004 18 model.scoring_fct_norm 1.0 +1004 18 optimizer.lr 0.036221482852634185 +1004 18 negative_sampler.num_negs_per_pos 82.0 +1004 18 training.batch_size 1.0 +1004 19 model.embedding_dim 0.0 +1004 19 model.scoring_fct_norm 2.0 +1004 19 optimizer.lr 0.0916138353706808 +1004 19 negative_sampler.num_negs_per_pos 10.0 +1004 19 training.batch_size 2.0 +1004 20 model.embedding_dim 1.0 +1004 20 model.scoring_fct_norm 1.0 +1004 20 optimizer.lr 0.030773961946022027 +1004 20 negative_sampler.num_negs_per_pos 28.0 +1004 20 training.batch_size 1.0 +1004 21 model.embedding_dim 2.0 +1004 21 model.scoring_fct_norm 1.0 +1004 21 optimizer.lr 0.009840607760703205 +1004 21 negative_sampler.num_negs_per_pos 7.0 +1004 21 training.batch_size 1.0 +1004 22 model.embedding_dim 1.0 +1004 22 model.scoring_fct_norm 1.0 +1004 22 optimizer.lr 0.001625100315146693 +1004 22 negative_sampler.num_negs_per_pos 92.0 +1004 22 training.batch_size 0.0 +1004 23 model.embedding_dim 0.0 +1004 23 model.scoring_fct_norm 1.0 +1004 23 optimizer.lr 0.04003995575956664 +1004 23 negative_sampler.num_negs_per_pos 81.0 +1004 23 training.batch_size 0.0 +1004 24 model.embedding_dim 1.0 +1004 24 model.scoring_fct_norm 2.0 +1004 24 optimizer.lr 0.0031976746867515195 +1004 24 negative_sampler.num_negs_per_pos 98.0 +1004 24 training.batch_size 0.0 +1004 25 model.embedding_dim 1.0 +1004 25 model.scoring_fct_norm 1.0 +1004 25 optimizer.lr 0.001693212483365101 +1004 25 negative_sampler.num_negs_per_pos 89.0 +1004 25 training.batch_size 0.0 +1004 26 model.embedding_dim 2.0 +1004 26 model.scoring_fct_norm 1.0 +1004 26 optimizer.lr 0.002735065947572092 +1004 26 negative_sampler.num_negs_per_pos 41.0 +1004 26 training.batch_size 2.0 +1004 27 model.embedding_dim 0.0 +1004 27 model.scoring_fct_norm 2.0 +1004 27 optimizer.lr 0.004707103496722045 +1004 27 negative_sampler.num_negs_per_pos 87.0 +1004 27 training.batch_size 2.0 +1004 28 model.embedding_dim 1.0 +1004 28 model.scoring_fct_norm 1.0 +1004 28 optimizer.lr 0.0011456027367201017 +1004 28 negative_sampler.num_negs_per_pos 58.0 +1004 28 training.batch_size 1.0 +1004 29 model.embedding_dim 1.0 +1004 29 model.scoring_fct_norm 1.0 +1004 29 optimizer.lr 0.011067289906120404 +1004 29 negative_sampler.num_negs_per_pos 96.0 +1004 29 training.batch_size 1.0 +1004 30 model.embedding_dim 2.0 +1004 30 model.scoring_fct_norm 1.0 +1004 30 optimizer.lr 0.04952118275676485 +1004 30 negative_sampler.num_negs_per_pos 59.0 +1004 30 training.batch_size 2.0 +1004 31 model.embedding_dim 1.0 +1004 31 model.scoring_fct_norm 1.0 +1004 31 optimizer.lr 0.026352913883589395 +1004 31 negative_sampler.num_negs_per_pos 25.0 +1004 31 training.batch_size 2.0 +1004 32 model.embedding_dim 2.0 +1004 32 model.scoring_fct_norm 2.0 +1004 32 optimizer.lr 0.002792548535861454 +1004 32 negative_sampler.num_negs_per_pos 7.0 +1004 32 training.batch_size 0.0 +1004 33 model.embedding_dim 0.0 +1004 33 model.scoring_fct_norm 2.0 +1004 33 optimizer.lr 0.01700770803054441 +1004 33 negative_sampler.num_negs_per_pos 42.0 +1004 33 training.batch_size 1.0 +1004 34 model.embedding_dim 2.0 +1004 34 model.scoring_fct_norm 2.0 +1004 34 optimizer.lr 0.0038471504241819367 +1004 34 negative_sampler.num_negs_per_pos 52.0 +1004 34 training.batch_size 1.0 +1004 35 model.embedding_dim 2.0 +1004 35 model.scoring_fct_norm 1.0 +1004 35 optimizer.lr 0.003403039767218264 +1004 35 negative_sampler.num_negs_per_pos 2.0 +1004 35 training.batch_size 1.0 +1004 36 model.embedding_dim 0.0 +1004 36 model.scoring_fct_norm 2.0 +1004 36 optimizer.lr 0.008859646556448232 +1004 36 negative_sampler.num_negs_per_pos 93.0 +1004 36 training.batch_size 1.0 +1004 37 model.embedding_dim 1.0 +1004 37 model.scoring_fct_norm 2.0 +1004 37 optimizer.lr 0.024014638026945697 +1004 37 negative_sampler.num_negs_per_pos 46.0 +1004 37 training.batch_size 1.0 +1004 38 model.embedding_dim 1.0 +1004 38 model.scoring_fct_norm 2.0 +1004 38 optimizer.lr 0.02201988917528821 +1004 38 negative_sampler.num_negs_per_pos 5.0 +1004 38 training.batch_size 2.0 +1004 39 model.embedding_dim 2.0 +1004 39 model.scoring_fct_norm 1.0 +1004 39 optimizer.lr 0.01656835269342107 +1004 39 negative_sampler.num_negs_per_pos 55.0 +1004 39 training.batch_size 0.0 +1004 40 model.embedding_dim 0.0 +1004 40 model.scoring_fct_norm 2.0 +1004 40 optimizer.lr 0.0013524255617833186 +1004 40 negative_sampler.num_negs_per_pos 76.0 +1004 40 training.batch_size 2.0 +1004 41 model.embedding_dim 1.0 +1004 41 model.scoring_fct_norm 2.0 +1004 41 optimizer.lr 0.0873383421530552 +1004 41 negative_sampler.num_negs_per_pos 22.0 +1004 41 training.batch_size 0.0 +1004 42 model.embedding_dim 1.0 +1004 42 model.scoring_fct_norm 1.0 +1004 42 optimizer.lr 0.008895970268139312 +1004 42 negative_sampler.num_negs_per_pos 83.0 +1004 42 training.batch_size 0.0 +1004 43 model.embedding_dim 2.0 +1004 43 model.scoring_fct_norm 2.0 +1004 43 optimizer.lr 0.04293927735984986 +1004 43 negative_sampler.num_negs_per_pos 26.0 +1004 43 training.batch_size 1.0 +1004 44 model.embedding_dim 1.0 +1004 44 model.scoring_fct_norm 2.0 +1004 44 optimizer.lr 0.008116646676981167 +1004 44 negative_sampler.num_negs_per_pos 70.0 +1004 44 training.batch_size 0.0 +1004 45 model.embedding_dim 0.0 +1004 45 model.scoring_fct_norm 1.0 +1004 45 optimizer.lr 0.03775841882419417 +1004 45 negative_sampler.num_negs_per_pos 53.0 +1004 45 training.batch_size 2.0 +1004 46 model.embedding_dim 2.0 +1004 46 model.scoring_fct_norm 1.0 +1004 46 optimizer.lr 0.0017712571587050142 +1004 46 negative_sampler.num_negs_per_pos 12.0 +1004 46 training.batch_size 1.0 +1004 47 model.embedding_dim 2.0 +1004 47 model.scoring_fct_norm 1.0 +1004 47 optimizer.lr 0.01506808015914477 +1004 47 negative_sampler.num_negs_per_pos 43.0 +1004 47 training.batch_size 1.0 +1004 48 model.embedding_dim 0.0 +1004 48 model.scoring_fct_norm 1.0 +1004 48 optimizer.lr 0.004377351897933309 +1004 48 negative_sampler.num_negs_per_pos 15.0 +1004 48 training.batch_size 2.0 +1004 49 model.embedding_dim 2.0 +1004 49 model.scoring_fct_norm 2.0 +1004 49 optimizer.lr 0.0061359174767126766 +1004 49 negative_sampler.num_negs_per_pos 80.0 +1004 49 training.batch_size 1.0 +1004 50 model.embedding_dim 1.0 +1004 50 model.scoring_fct_norm 1.0 +1004 50 optimizer.lr 0.03441964812143521 +1004 50 negative_sampler.num_negs_per_pos 29.0 +1004 50 training.batch_size 0.0 +1004 51 model.embedding_dim 1.0 +1004 51 model.scoring_fct_norm 1.0 +1004 51 optimizer.lr 0.008165896076632323 +1004 51 negative_sampler.num_negs_per_pos 31.0 +1004 51 training.batch_size 1.0 +1004 52 model.embedding_dim 0.0 +1004 52 model.scoring_fct_norm 1.0 +1004 52 optimizer.lr 0.0022541142853106703 +1004 52 negative_sampler.num_negs_per_pos 89.0 +1004 52 training.batch_size 1.0 +1004 53 model.embedding_dim 0.0 +1004 53 model.scoring_fct_norm 2.0 +1004 53 optimizer.lr 0.002726778516002405 +1004 53 negative_sampler.num_negs_per_pos 27.0 +1004 53 training.batch_size 1.0 +1004 54 model.embedding_dim 1.0 +1004 54 model.scoring_fct_norm 2.0 +1004 54 optimizer.lr 0.004193933978238065 +1004 54 negative_sampler.num_negs_per_pos 53.0 +1004 54 training.batch_size 2.0 +1004 55 model.embedding_dim 2.0 +1004 55 model.scoring_fct_norm 1.0 +1004 55 optimizer.lr 0.0025259538670934067 +1004 55 negative_sampler.num_negs_per_pos 45.0 +1004 55 training.batch_size 0.0 +1004 56 model.embedding_dim 2.0 +1004 56 model.scoring_fct_norm 2.0 +1004 56 optimizer.lr 0.032117820796255137 +1004 56 negative_sampler.num_negs_per_pos 90.0 +1004 56 training.batch_size 0.0 +1004 57 model.embedding_dim 1.0 +1004 57 model.scoring_fct_norm 2.0 +1004 57 optimizer.lr 0.010786350174158525 +1004 57 negative_sampler.num_negs_per_pos 89.0 +1004 57 training.batch_size 1.0 +1004 58 model.embedding_dim 0.0 +1004 58 model.scoring_fct_norm 1.0 +1004 58 optimizer.lr 0.007693738288757682 +1004 58 negative_sampler.num_negs_per_pos 38.0 +1004 58 training.batch_size 2.0 +1004 59 model.embedding_dim 1.0 +1004 59 model.scoring_fct_norm 2.0 +1004 59 optimizer.lr 0.012151451242213706 +1004 59 negative_sampler.num_negs_per_pos 54.0 +1004 59 training.batch_size 1.0 +1004 60 model.embedding_dim 2.0 +1004 60 model.scoring_fct_norm 2.0 +1004 60 optimizer.lr 0.0040276877689149684 +1004 60 negative_sampler.num_negs_per_pos 2.0 +1004 60 training.batch_size 0.0 +1004 61 model.embedding_dim 2.0 +1004 61 model.scoring_fct_norm 2.0 +1004 61 optimizer.lr 0.015480594130816504 +1004 61 negative_sampler.num_negs_per_pos 92.0 +1004 61 training.batch_size 2.0 +1004 62 model.embedding_dim 0.0 +1004 62 model.scoring_fct_norm 2.0 +1004 62 optimizer.lr 0.002619411077916223 +1004 62 negative_sampler.num_negs_per_pos 13.0 +1004 62 training.batch_size 0.0 +1004 63 model.embedding_dim 0.0 +1004 63 model.scoring_fct_norm 1.0 +1004 63 optimizer.lr 0.07093861175456102 +1004 63 negative_sampler.num_negs_per_pos 5.0 +1004 63 training.batch_size 0.0 +1004 64 model.embedding_dim 0.0 +1004 64 model.scoring_fct_norm 1.0 +1004 64 optimizer.lr 0.018354261017877113 +1004 64 negative_sampler.num_negs_per_pos 59.0 +1004 64 training.batch_size 2.0 +1004 65 model.embedding_dim 2.0 +1004 65 model.scoring_fct_norm 1.0 +1004 65 optimizer.lr 0.0025974116975055017 +1004 65 negative_sampler.num_negs_per_pos 28.0 +1004 65 training.batch_size 1.0 +1004 66 model.embedding_dim 1.0 +1004 66 model.scoring_fct_norm 1.0 +1004 66 optimizer.lr 0.05343232797569754 +1004 66 negative_sampler.num_negs_per_pos 74.0 +1004 66 training.batch_size 2.0 +1004 67 model.embedding_dim 0.0 +1004 67 model.scoring_fct_norm 1.0 +1004 67 optimizer.lr 0.043551110239815075 +1004 67 negative_sampler.num_negs_per_pos 51.0 +1004 67 training.batch_size 1.0 +1004 68 model.embedding_dim 1.0 +1004 68 model.scoring_fct_norm 1.0 +1004 68 optimizer.lr 0.05952380423864874 +1004 68 negative_sampler.num_negs_per_pos 90.0 +1004 68 training.batch_size 2.0 +1004 69 model.embedding_dim 0.0 +1004 69 model.scoring_fct_norm 2.0 +1004 69 optimizer.lr 0.017622420978153728 +1004 69 negative_sampler.num_negs_per_pos 19.0 +1004 69 training.batch_size 1.0 +1004 70 model.embedding_dim 0.0 +1004 70 model.scoring_fct_norm 2.0 +1004 70 optimizer.lr 0.0014023046638452059 +1004 70 negative_sampler.num_negs_per_pos 22.0 +1004 70 training.batch_size 1.0 +1004 71 model.embedding_dim 1.0 +1004 71 model.scoring_fct_norm 2.0 +1004 71 optimizer.lr 0.09022183261986429 +1004 71 negative_sampler.num_negs_per_pos 38.0 +1004 71 training.batch_size 2.0 +1004 72 model.embedding_dim 2.0 +1004 72 model.scoring_fct_norm 1.0 +1004 72 optimizer.lr 0.001335966759795569 +1004 72 negative_sampler.num_negs_per_pos 40.0 +1004 72 training.batch_size 1.0 +1004 73 model.embedding_dim 2.0 +1004 73 model.scoring_fct_norm 2.0 +1004 73 optimizer.lr 0.09245245990807022 +1004 73 negative_sampler.num_negs_per_pos 66.0 +1004 73 training.batch_size 1.0 +1004 74 model.embedding_dim 2.0 +1004 74 model.scoring_fct_norm 1.0 +1004 74 optimizer.lr 0.012332268473094278 +1004 74 negative_sampler.num_negs_per_pos 8.0 +1004 74 training.batch_size 1.0 +1004 75 model.embedding_dim 1.0 +1004 75 model.scoring_fct_norm 1.0 +1004 75 optimizer.lr 0.01663804559902834 +1004 75 negative_sampler.num_negs_per_pos 46.0 +1004 75 training.batch_size 0.0 +1004 76 model.embedding_dim 2.0 +1004 76 model.scoring_fct_norm 1.0 +1004 76 optimizer.lr 0.04209312298958615 +1004 76 negative_sampler.num_negs_per_pos 46.0 +1004 76 training.batch_size 0.0 +1004 77 model.embedding_dim 2.0 +1004 77 model.scoring_fct_norm 1.0 +1004 77 optimizer.lr 0.007748217583349752 +1004 77 negative_sampler.num_negs_per_pos 7.0 +1004 77 training.batch_size 1.0 +1004 78 model.embedding_dim 1.0 +1004 78 model.scoring_fct_norm 1.0 +1004 78 optimizer.lr 0.006352052254156576 +1004 78 negative_sampler.num_negs_per_pos 66.0 +1004 78 training.batch_size 1.0 +1004 79 model.embedding_dim 0.0 +1004 79 model.scoring_fct_norm 1.0 +1004 79 optimizer.lr 0.0029206292286088243 +1004 79 negative_sampler.num_negs_per_pos 85.0 +1004 79 training.batch_size 1.0 +1004 80 model.embedding_dim 0.0 +1004 80 model.scoring_fct_norm 1.0 +1004 80 optimizer.lr 0.09821231707139338 +1004 80 negative_sampler.num_negs_per_pos 67.0 +1004 80 training.batch_size 0.0 +1004 81 model.embedding_dim 2.0 +1004 81 model.scoring_fct_norm 2.0 +1004 81 optimizer.lr 0.009121372891962269 +1004 81 negative_sampler.num_negs_per_pos 35.0 +1004 81 training.batch_size 1.0 +1004 82 model.embedding_dim 2.0 +1004 82 model.scoring_fct_norm 2.0 +1004 82 optimizer.lr 0.04688363964809122 +1004 82 negative_sampler.num_negs_per_pos 79.0 +1004 82 training.batch_size 2.0 +1004 83 model.embedding_dim 0.0 +1004 83 model.scoring_fct_norm 1.0 +1004 83 optimizer.lr 0.0107632042257307 +1004 83 negative_sampler.num_negs_per_pos 2.0 +1004 83 training.batch_size 0.0 +1004 84 model.embedding_dim 1.0 +1004 84 model.scoring_fct_norm 2.0 +1004 84 optimizer.lr 0.0032017743112773608 +1004 84 negative_sampler.num_negs_per_pos 79.0 +1004 84 training.batch_size 1.0 +1004 85 model.embedding_dim 0.0 +1004 85 model.scoring_fct_norm 1.0 +1004 85 optimizer.lr 0.0056097400811712505 +1004 85 negative_sampler.num_negs_per_pos 71.0 +1004 85 training.batch_size 0.0 +1004 86 model.embedding_dim 0.0 +1004 86 model.scoring_fct_norm 1.0 +1004 86 optimizer.lr 0.0017044096857923487 +1004 86 negative_sampler.num_negs_per_pos 18.0 +1004 86 training.batch_size 1.0 +1004 87 model.embedding_dim 1.0 +1004 87 model.scoring_fct_norm 2.0 +1004 87 optimizer.lr 0.024512097024256888 +1004 87 negative_sampler.num_negs_per_pos 39.0 +1004 87 training.batch_size 0.0 +1004 88 model.embedding_dim 0.0 +1004 88 model.scoring_fct_norm 1.0 +1004 88 optimizer.lr 0.020829083278414647 +1004 88 negative_sampler.num_negs_per_pos 30.0 +1004 88 training.batch_size 1.0 +1004 89 model.embedding_dim 1.0 +1004 89 model.scoring_fct_norm 1.0 +1004 89 optimizer.lr 0.004078113066793704 +1004 89 negative_sampler.num_negs_per_pos 25.0 +1004 89 training.batch_size 0.0 +1004 90 model.embedding_dim 2.0 +1004 90 model.scoring_fct_norm 2.0 +1004 90 optimizer.lr 0.05847414160621003 +1004 90 negative_sampler.num_negs_per_pos 96.0 +1004 90 training.batch_size 1.0 +1004 91 model.embedding_dim 2.0 +1004 91 model.scoring_fct_norm 1.0 +1004 91 optimizer.lr 0.04826513113005904 +1004 91 negative_sampler.num_negs_per_pos 56.0 +1004 91 training.batch_size 1.0 +1004 92 model.embedding_dim 2.0 +1004 92 model.scoring_fct_norm 1.0 +1004 92 optimizer.lr 0.016792915014951112 +1004 92 negative_sampler.num_negs_per_pos 8.0 +1004 92 training.batch_size 2.0 +1004 93 model.embedding_dim 0.0 +1004 93 model.scoring_fct_norm 2.0 +1004 93 optimizer.lr 0.0010257670513793768 +1004 93 negative_sampler.num_negs_per_pos 20.0 +1004 93 training.batch_size 0.0 +1004 94 model.embedding_dim 0.0 +1004 94 model.scoring_fct_norm 1.0 +1004 94 optimizer.lr 0.02339529029857976 +1004 94 negative_sampler.num_negs_per_pos 41.0 +1004 94 training.batch_size 2.0 +1004 95 model.embedding_dim 2.0 +1004 95 model.scoring_fct_norm 2.0 +1004 95 optimizer.lr 0.018548890865324102 +1004 95 negative_sampler.num_negs_per_pos 62.0 +1004 95 training.batch_size 2.0 +1004 96 model.embedding_dim 0.0 +1004 96 model.scoring_fct_norm 2.0 +1004 96 optimizer.lr 0.06088533523736531 +1004 96 negative_sampler.num_negs_per_pos 38.0 +1004 96 training.batch_size 2.0 +1004 97 model.embedding_dim 2.0 +1004 97 model.scoring_fct_norm 1.0 +1004 97 optimizer.lr 0.03633729047048514 +1004 97 negative_sampler.num_negs_per_pos 91.0 +1004 97 training.batch_size 1.0 +1004 98 model.embedding_dim 1.0 +1004 98 model.scoring_fct_norm 2.0 +1004 98 optimizer.lr 0.0015569463126260679 +1004 98 negative_sampler.num_negs_per_pos 95.0 +1004 98 training.batch_size 0.0 +1004 99 model.embedding_dim 0.0 +1004 99 model.scoring_fct_norm 2.0 +1004 99 optimizer.lr 0.002455684636059034 +1004 99 negative_sampler.num_negs_per_pos 91.0 +1004 99 training.batch_size 0.0 +1004 100 model.embedding_dim 1.0 +1004 100 model.scoring_fct_norm 1.0 +1004 100 optimizer.lr 0.0045589868447825825 +1004 100 negative_sampler.num_negs_per_pos 64.0 +1004 100 training.batch_size 0.0 +1004 1 dataset """fb15k237""" +1004 1 model """unstructuredmodel""" +1004 1 loss """bceaftersigmoid""" +1004 1 regularizer """no""" +1004 1 optimizer """adam""" +1004 1 training_loop """owa""" +1004 1 negative_sampler """basic""" +1004 1 evaluator """rankbased""" +1004 2 dataset """fb15k237""" +1004 2 model """unstructuredmodel""" +1004 2 loss """bceaftersigmoid""" +1004 2 regularizer """no""" +1004 2 optimizer """adam""" +1004 2 training_loop """owa""" +1004 2 negative_sampler """basic""" +1004 2 evaluator """rankbased""" +1004 3 dataset """fb15k237""" +1004 3 model """unstructuredmodel""" +1004 3 loss """bceaftersigmoid""" +1004 3 regularizer """no""" +1004 3 optimizer """adam""" +1004 3 training_loop """owa""" +1004 3 negative_sampler """basic""" +1004 3 evaluator """rankbased""" +1004 4 dataset """fb15k237""" +1004 4 model """unstructuredmodel""" +1004 4 loss """bceaftersigmoid""" +1004 4 regularizer """no""" +1004 4 optimizer """adam""" +1004 4 training_loop """owa""" +1004 4 negative_sampler """basic""" +1004 4 evaluator """rankbased""" +1004 5 dataset """fb15k237""" +1004 5 model """unstructuredmodel""" +1004 5 loss """bceaftersigmoid""" +1004 5 regularizer """no""" +1004 5 optimizer """adam""" +1004 5 training_loop """owa""" +1004 5 negative_sampler """basic""" +1004 5 evaluator """rankbased""" +1004 6 dataset """fb15k237""" +1004 6 model """unstructuredmodel""" +1004 6 loss """bceaftersigmoid""" +1004 6 regularizer """no""" +1004 6 optimizer """adam""" +1004 6 training_loop """owa""" +1004 6 negative_sampler """basic""" +1004 6 evaluator """rankbased""" +1004 7 dataset """fb15k237""" +1004 7 model """unstructuredmodel""" +1004 7 loss """bceaftersigmoid""" +1004 7 regularizer """no""" +1004 7 optimizer """adam""" +1004 7 training_loop """owa""" +1004 7 negative_sampler """basic""" +1004 7 evaluator """rankbased""" +1004 8 dataset """fb15k237""" +1004 8 model """unstructuredmodel""" +1004 8 loss """bceaftersigmoid""" +1004 8 regularizer """no""" +1004 8 optimizer """adam""" +1004 8 training_loop """owa""" +1004 8 negative_sampler """basic""" +1004 8 evaluator """rankbased""" +1004 9 dataset """fb15k237""" +1004 9 model """unstructuredmodel""" +1004 9 loss """bceaftersigmoid""" +1004 9 regularizer """no""" +1004 9 optimizer """adam""" +1004 9 training_loop """owa""" +1004 9 negative_sampler """basic""" +1004 9 evaluator """rankbased""" +1004 10 dataset """fb15k237""" +1004 10 model """unstructuredmodel""" +1004 10 loss """bceaftersigmoid""" +1004 10 regularizer """no""" +1004 10 optimizer """adam""" +1004 10 training_loop """owa""" +1004 10 negative_sampler """basic""" +1004 10 evaluator """rankbased""" +1004 11 dataset """fb15k237""" +1004 11 model """unstructuredmodel""" +1004 11 loss """bceaftersigmoid""" +1004 11 regularizer """no""" +1004 11 optimizer """adam""" +1004 11 training_loop """owa""" +1004 11 negative_sampler """basic""" +1004 11 evaluator """rankbased""" +1004 12 dataset """fb15k237""" +1004 12 model """unstructuredmodel""" +1004 12 loss """bceaftersigmoid""" +1004 12 regularizer """no""" +1004 12 optimizer """adam""" +1004 12 training_loop """owa""" +1004 12 negative_sampler """basic""" +1004 12 evaluator """rankbased""" +1004 13 dataset """fb15k237""" +1004 13 model """unstructuredmodel""" +1004 13 loss """bceaftersigmoid""" +1004 13 regularizer """no""" +1004 13 optimizer """adam""" +1004 13 training_loop """owa""" +1004 13 negative_sampler """basic""" +1004 13 evaluator """rankbased""" +1004 14 dataset """fb15k237""" +1004 14 model """unstructuredmodel""" +1004 14 loss """bceaftersigmoid""" +1004 14 regularizer """no""" +1004 14 optimizer """adam""" +1004 14 training_loop """owa""" +1004 14 negative_sampler """basic""" +1004 14 evaluator """rankbased""" +1004 15 dataset """fb15k237""" +1004 15 model """unstructuredmodel""" +1004 15 loss """bceaftersigmoid""" +1004 15 regularizer """no""" +1004 15 optimizer """adam""" +1004 15 training_loop """owa""" +1004 15 negative_sampler """basic""" +1004 15 evaluator """rankbased""" +1004 16 dataset """fb15k237""" +1004 16 model """unstructuredmodel""" +1004 16 loss """bceaftersigmoid""" +1004 16 regularizer """no""" +1004 16 optimizer """adam""" +1004 16 training_loop """owa""" +1004 16 negative_sampler """basic""" +1004 16 evaluator """rankbased""" +1004 17 dataset """fb15k237""" +1004 17 model """unstructuredmodel""" +1004 17 loss """bceaftersigmoid""" +1004 17 regularizer """no""" +1004 17 optimizer """adam""" +1004 17 training_loop """owa""" +1004 17 negative_sampler """basic""" +1004 17 evaluator """rankbased""" +1004 18 dataset """fb15k237""" +1004 18 model """unstructuredmodel""" +1004 18 loss """bceaftersigmoid""" +1004 18 regularizer """no""" +1004 18 optimizer """adam""" +1004 18 training_loop """owa""" +1004 18 negative_sampler """basic""" +1004 18 evaluator """rankbased""" +1004 19 dataset """fb15k237""" +1004 19 model """unstructuredmodel""" +1004 19 loss """bceaftersigmoid""" +1004 19 regularizer """no""" +1004 19 optimizer """adam""" +1004 19 training_loop """owa""" +1004 19 negative_sampler """basic""" +1004 19 evaluator """rankbased""" +1004 20 dataset """fb15k237""" +1004 20 model """unstructuredmodel""" +1004 20 loss """bceaftersigmoid""" +1004 20 regularizer """no""" +1004 20 optimizer """adam""" +1004 20 training_loop """owa""" +1004 20 negative_sampler """basic""" +1004 20 evaluator """rankbased""" +1004 21 dataset """fb15k237""" +1004 21 model """unstructuredmodel""" +1004 21 loss """bceaftersigmoid""" +1004 21 regularizer """no""" +1004 21 optimizer """adam""" +1004 21 training_loop """owa""" +1004 21 negative_sampler """basic""" +1004 21 evaluator """rankbased""" +1004 22 dataset """fb15k237""" +1004 22 model """unstructuredmodel""" +1004 22 loss """bceaftersigmoid""" +1004 22 regularizer """no""" +1004 22 optimizer """adam""" +1004 22 training_loop """owa""" +1004 22 negative_sampler """basic""" +1004 22 evaluator """rankbased""" +1004 23 dataset """fb15k237""" +1004 23 model """unstructuredmodel""" +1004 23 loss """bceaftersigmoid""" +1004 23 regularizer """no""" +1004 23 optimizer """adam""" +1004 23 training_loop """owa""" +1004 23 negative_sampler """basic""" +1004 23 evaluator """rankbased""" +1004 24 dataset """fb15k237""" +1004 24 model """unstructuredmodel""" +1004 24 loss """bceaftersigmoid""" +1004 24 regularizer """no""" +1004 24 optimizer """adam""" +1004 24 training_loop """owa""" +1004 24 negative_sampler """basic""" +1004 24 evaluator """rankbased""" +1004 25 dataset """fb15k237""" +1004 25 model """unstructuredmodel""" +1004 25 loss """bceaftersigmoid""" +1004 25 regularizer """no""" +1004 25 optimizer """adam""" +1004 25 training_loop """owa""" +1004 25 negative_sampler """basic""" +1004 25 evaluator """rankbased""" +1004 26 dataset """fb15k237""" +1004 26 model """unstructuredmodel""" +1004 26 loss """bceaftersigmoid""" +1004 26 regularizer """no""" +1004 26 optimizer """adam""" +1004 26 training_loop """owa""" +1004 26 negative_sampler """basic""" +1004 26 evaluator """rankbased""" +1004 27 dataset """fb15k237""" +1004 27 model """unstructuredmodel""" +1004 27 loss """bceaftersigmoid""" +1004 27 regularizer """no""" +1004 27 optimizer """adam""" +1004 27 training_loop """owa""" +1004 27 negative_sampler """basic""" +1004 27 evaluator """rankbased""" +1004 28 dataset """fb15k237""" +1004 28 model """unstructuredmodel""" +1004 28 loss """bceaftersigmoid""" +1004 28 regularizer """no""" +1004 28 optimizer """adam""" +1004 28 training_loop """owa""" +1004 28 negative_sampler """basic""" +1004 28 evaluator """rankbased""" +1004 29 dataset """fb15k237""" +1004 29 model """unstructuredmodel""" +1004 29 loss """bceaftersigmoid""" +1004 29 regularizer """no""" +1004 29 optimizer """adam""" +1004 29 training_loop """owa""" +1004 29 negative_sampler """basic""" +1004 29 evaluator """rankbased""" +1004 30 dataset """fb15k237""" +1004 30 model """unstructuredmodel""" +1004 30 loss """bceaftersigmoid""" +1004 30 regularizer """no""" +1004 30 optimizer """adam""" +1004 30 training_loop """owa""" +1004 30 negative_sampler """basic""" +1004 30 evaluator """rankbased""" +1004 31 dataset """fb15k237""" +1004 31 model """unstructuredmodel""" +1004 31 loss """bceaftersigmoid""" +1004 31 regularizer """no""" +1004 31 optimizer """adam""" +1004 31 training_loop """owa""" +1004 31 negative_sampler """basic""" +1004 31 evaluator """rankbased""" +1004 32 dataset """fb15k237""" +1004 32 model """unstructuredmodel""" +1004 32 loss """bceaftersigmoid""" +1004 32 regularizer """no""" +1004 32 optimizer """adam""" +1004 32 training_loop """owa""" +1004 32 negative_sampler """basic""" +1004 32 evaluator """rankbased""" +1004 33 dataset """fb15k237""" +1004 33 model """unstructuredmodel""" +1004 33 loss """bceaftersigmoid""" +1004 33 regularizer """no""" +1004 33 optimizer """adam""" +1004 33 training_loop """owa""" +1004 33 negative_sampler """basic""" +1004 33 evaluator """rankbased""" +1004 34 dataset """fb15k237""" +1004 34 model """unstructuredmodel""" +1004 34 loss """bceaftersigmoid""" +1004 34 regularizer """no""" +1004 34 optimizer """adam""" +1004 34 training_loop """owa""" +1004 34 negative_sampler """basic""" +1004 34 evaluator """rankbased""" +1004 35 dataset """fb15k237""" +1004 35 model """unstructuredmodel""" +1004 35 loss """bceaftersigmoid""" +1004 35 regularizer """no""" +1004 35 optimizer """adam""" +1004 35 training_loop """owa""" +1004 35 negative_sampler """basic""" +1004 35 evaluator """rankbased""" +1004 36 dataset """fb15k237""" +1004 36 model """unstructuredmodel""" +1004 36 loss """bceaftersigmoid""" +1004 36 regularizer """no""" +1004 36 optimizer """adam""" +1004 36 training_loop """owa""" +1004 36 negative_sampler """basic""" +1004 36 evaluator """rankbased""" +1004 37 dataset """fb15k237""" +1004 37 model """unstructuredmodel""" +1004 37 loss """bceaftersigmoid""" +1004 37 regularizer """no""" +1004 37 optimizer """adam""" +1004 37 training_loop """owa""" +1004 37 negative_sampler """basic""" +1004 37 evaluator """rankbased""" +1004 38 dataset """fb15k237""" +1004 38 model """unstructuredmodel""" +1004 38 loss """bceaftersigmoid""" +1004 38 regularizer """no""" +1004 38 optimizer """adam""" +1004 38 training_loop """owa""" +1004 38 negative_sampler """basic""" +1004 38 evaluator """rankbased""" +1004 39 dataset """fb15k237""" +1004 39 model """unstructuredmodel""" +1004 39 loss """bceaftersigmoid""" +1004 39 regularizer """no""" +1004 39 optimizer """adam""" +1004 39 training_loop """owa""" +1004 39 negative_sampler """basic""" +1004 39 evaluator """rankbased""" +1004 40 dataset """fb15k237""" +1004 40 model """unstructuredmodel""" +1004 40 loss """bceaftersigmoid""" +1004 40 regularizer """no""" +1004 40 optimizer """adam""" +1004 40 training_loop """owa""" +1004 40 negative_sampler """basic""" +1004 40 evaluator """rankbased""" +1004 41 dataset """fb15k237""" +1004 41 model """unstructuredmodel""" +1004 41 loss """bceaftersigmoid""" +1004 41 regularizer """no""" +1004 41 optimizer """adam""" +1004 41 training_loop """owa""" +1004 41 negative_sampler """basic""" +1004 41 evaluator """rankbased""" +1004 42 dataset """fb15k237""" +1004 42 model """unstructuredmodel""" +1004 42 loss """bceaftersigmoid""" +1004 42 regularizer """no""" +1004 42 optimizer """adam""" +1004 42 training_loop """owa""" +1004 42 negative_sampler """basic""" +1004 42 evaluator """rankbased""" +1004 43 dataset """fb15k237""" +1004 43 model """unstructuredmodel""" +1004 43 loss """bceaftersigmoid""" +1004 43 regularizer """no""" +1004 43 optimizer """adam""" +1004 43 training_loop """owa""" +1004 43 negative_sampler """basic""" +1004 43 evaluator """rankbased""" +1004 44 dataset """fb15k237""" +1004 44 model """unstructuredmodel""" +1004 44 loss """bceaftersigmoid""" +1004 44 regularizer """no""" +1004 44 optimizer """adam""" +1004 44 training_loop """owa""" +1004 44 negative_sampler """basic""" +1004 44 evaluator """rankbased""" +1004 45 dataset """fb15k237""" +1004 45 model """unstructuredmodel""" +1004 45 loss """bceaftersigmoid""" +1004 45 regularizer """no""" +1004 45 optimizer """adam""" +1004 45 training_loop """owa""" +1004 45 negative_sampler """basic""" +1004 45 evaluator """rankbased""" +1004 46 dataset """fb15k237""" +1004 46 model """unstructuredmodel""" +1004 46 loss """bceaftersigmoid""" +1004 46 regularizer """no""" +1004 46 optimizer """adam""" +1004 46 training_loop """owa""" +1004 46 negative_sampler """basic""" +1004 46 evaluator """rankbased""" +1004 47 dataset """fb15k237""" +1004 47 model """unstructuredmodel""" +1004 47 loss """bceaftersigmoid""" +1004 47 regularizer """no""" +1004 47 optimizer """adam""" +1004 47 training_loop """owa""" +1004 47 negative_sampler """basic""" +1004 47 evaluator """rankbased""" +1004 48 dataset """fb15k237""" +1004 48 model """unstructuredmodel""" +1004 48 loss """bceaftersigmoid""" +1004 48 regularizer """no""" +1004 48 optimizer """adam""" +1004 48 training_loop """owa""" +1004 48 negative_sampler """basic""" +1004 48 evaluator """rankbased""" +1004 49 dataset """fb15k237""" +1004 49 model """unstructuredmodel""" +1004 49 loss """bceaftersigmoid""" +1004 49 regularizer """no""" +1004 49 optimizer """adam""" +1004 49 training_loop """owa""" +1004 49 negative_sampler """basic""" +1004 49 evaluator """rankbased""" +1004 50 dataset """fb15k237""" +1004 50 model """unstructuredmodel""" +1004 50 loss """bceaftersigmoid""" +1004 50 regularizer """no""" +1004 50 optimizer """adam""" +1004 50 training_loop """owa""" +1004 50 negative_sampler """basic""" +1004 50 evaluator """rankbased""" +1004 51 dataset """fb15k237""" +1004 51 model """unstructuredmodel""" +1004 51 loss """bceaftersigmoid""" +1004 51 regularizer """no""" +1004 51 optimizer """adam""" +1004 51 training_loop """owa""" +1004 51 negative_sampler """basic""" +1004 51 evaluator """rankbased""" +1004 52 dataset """fb15k237""" +1004 52 model """unstructuredmodel""" +1004 52 loss """bceaftersigmoid""" +1004 52 regularizer """no""" +1004 52 optimizer """adam""" +1004 52 training_loop """owa""" +1004 52 negative_sampler """basic""" +1004 52 evaluator """rankbased""" +1004 53 dataset """fb15k237""" +1004 53 model """unstructuredmodel""" +1004 53 loss """bceaftersigmoid""" +1004 53 regularizer """no""" +1004 53 optimizer """adam""" +1004 53 training_loop """owa""" +1004 53 negative_sampler """basic""" +1004 53 evaluator """rankbased""" +1004 54 dataset """fb15k237""" +1004 54 model """unstructuredmodel""" +1004 54 loss """bceaftersigmoid""" +1004 54 regularizer """no""" +1004 54 optimizer """adam""" +1004 54 training_loop """owa""" +1004 54 negative_sampler """basic""" +1004 54 evaluator """rankbased""" +1004 55 dataset """fb15k237""" +1004 55 model """unstructuredmodel""" +1004 55 loss """bceaftersigmoid""" +1004 55 regularizer """no""" +1004 55 optimizer """adam""" +1004 55 training_loop """owa""" +1004 55 negative_sampler """basic""" +1004 55 evaluator """rankbased""" +1004 56 dataset """fb15k237""" +1004 56 model """unstructuredmodel""" +1004 56 loss """bceaftersigmoid""" +1004 56 regularizer """no""" +1004 56 optimizer """adam""" +1004 56 training_loop """owa""" +1004 56 negative_sampler """basic""" +1004 56 evaluator """rankbased""" +1004 57 dataset """fb15k237""" +1004 57 model """unstructuredmodel""" +1004 57 loss """bceaftersigmoid""" +1004 57 regularizer """no""" +1004 57 optimizer """adam""" +1004 57 training_loop """owa""" +1004 57 negative_sampler """basic""" +1004 57 evaluator """rankbased""" +1004 58 dataset """fb15k237""" +1004 58 model """unstructuredmodel""" +1004 58 loss """bceaftersigmoid""" +1004 58 regularizer """no""" +1004 58 optimizer """adam""" +1004 58 training_loop """owa""" +1004 58 negative_sampler """basic""" +1004 58 evaluator """rankbased""" +1004 59 dataset """fb15k237""" +1004 59 model """unstructuredmodel""" +1004 59 loss """bceaftersigmoid""" +1004 59 regularizer """no""" +1004 59 optimizer """adam""" +1004 59 training_loop """owa""" +1004 59 negative_sampler """basic""" +1004 59 evaluator """rankbased""" +1004 60 dataset """fb15k237""" +1004 60 model """unstructuredmodel""" +1004 60 loss """bceaftersigmoid""" +1004 60 regularizer """no""" +1004 60 optimizer """adam""" +1004 60 training_loop """owa""" +1004 60 negative_sampler """basic""" +1004 60 evaluator """rankbased""" +1004 61 dataset """fb15k237""" +1004 61 model """unstructuredmodel""" +1004 61 loss """bceaftersigmoid""" +1004 61 regularizer """no""" +1004 61 optimizer """adam""" +1004 61 training_loop """owa""" +1004 61 negative_sampler """basic""" +1004 61 evaluator """rankbased""" +1004 62 dataset """fb15k237""" +1004 62 model """unstructuredmodel""" +1004 62 loss """bceaftersigmoid""" +1004 62 regularizer """no""" +1004 62 optimizer """adam""" +1004 62 training_loop """owa""" +1004 62 negative_sampler """basic""" +1004 62 evaluator """rankbased""" +1004 63 dataset """fb15k237""" +1004 63 model """unstructuredmodel""" +1004 63 loss """bceaftersigmoid""" +1004 63 regularizer """no""" +1004 63 optimizer """adam""" +1004 63 training_loop """owa""" +1004 63 negative_sampler """basic""" +1004 63 evaluator """rankbased""" +1004 64 dataset """fb15k237""" +1004 64 model """unstructuredmodel""" +1004 64 loss """bceaftersigmoid""" +1004 64 regularizer """no""" +1004 64 optimizer """adam""" +1004 64 training_loop """owa""" +1004 64 negative_sampler """basic""" +1004 64 evaluator """rankbased""" +1004 65 dataset """fb15k237""" +1004 65 model """unstructuredmodel""" +1004 65 loss """bceaftersigmoid""" +1004 65 regularizer """no""" +1004 65 optimizer """adam""" +1004 65 training_loop """owa""" +1004 65 negative_sampler """basic""" +1004 65 evaluator """rankbased""" +1004 66 dataset """fb15k237""" +1004 66 model """unstructuredmodel""" +1004 66 loss """bceaftersigmoid""" +1004 66 regularizer """no""" +1004 66 optimizer """adam""" +1004 66 training_loop """owa""" +1004 66 negative_sampler """basic""" +1004 66 evaluator """rankbased""" +1004 67 dataset """fb15k237""" +1004 67 model """unstructuredmodel""" +1004 67 loss """bceaftersigmoid""" +1004 67 regularizer """no""" +1004 67 optimizer """adam""" +1004 67 training_loop """owa""" +1004 67 negative_sampler """basic""" +1004 67 evaluator """rankbased""" +1004 68 dataset """fb15k237""" +1004 68 model """unstructuredmodel""" +1004 68 loss """bceaftersigmoid""" +1004 68 regularizer """no""" +1004 68 optimizer """adam""" +1004 68 training_loop """owa""" +1004 68 negative_sampler """basic""" +1004 68 evaluator """rankbased""" +1004 69 dataset """fb15k237""" +1004 69 model """unstructuredmodel""" +1004 69 loss """bceaftersigmoid""" +1004 69 regularizer """no""" +1004 69 optimizer """adam""" +1004 69 training_loop """owa""" +1004 69 negative_sampler """basic""" +1004 69 evaluator """rankbased""" +1004 70 dataset """fb15k237""" +1004 70 model """unstructuredmodel""" +1004 70 loss """bceaftersigmoid""" +1004 70 regularizer """no""" +1004 70 optimizer """adam""" +1004 70 training_loop """owa""" +1004 70 negative_sampler """basic""" +1004 70 evaluator """rankbased""" +1004 71 dataset """fb15k237""" +1004 71 model """unstructuredmodel""" +1004 71 loss """bceaftersigmoid""" +1004 71 regularizer """no""" +1004 71 optimizer """adam""" +1004 71 training_loop """owa""" +1004 71 negative_sampler """basic""" +1004 71 evaluator """rankbased""" +1004 72 dataset """fb15k237""" +1004 72 model """unstructuredmodel""" +1004 72 loss """bceaftersigmoid""" +1004 72 regularizer """no""" +1004 72 optimizer """adam""" +1004 72 training_loop """owa""" +1004 72 negative_sampler """basic""" +1004 72 evaluator """rankbased""" +1004 73 dataset """fb15k237""" +1004 73 model """unstructuredmodel""" +1004 73 loss """bceaftersigmoid""" +1004 73 regularizer """no""" +1004 73 optimizer """adam""" +1004 73 training_loop """owa""" +1004 73 negative_sampler """basic""" +1004 73 evaluator """rankbased""" +1004 74 dataset """fb15k237""" +1004 74 model """unstructuredmodel""" +1004 74 loss """bceaftersigmoid""" +1004 74 regularizer """no""" +1004 74 optimizer """adam""" +1004 74 training_loop """owa""" +1004 74 negative_sampler """basic""" +1004 74 evaluator """rankbased""" +1004 75 dataset """fb15k237""" +1004 75 model """unstructuredmodel""" +1004 75 loss """bceaftersigmoid""" +1004 75 regularizer """no""" +1004 75 optimizer """adam""" +1004 75 training_loop """owa""" +1004 75 negative_sampler """basic""" +1004 75 evaluator """rankbased""" +1004 76 dataset """fb15k237""" +1004 76 model """unstructuredmodel""" +1004 76 loss """bceaftersigmoid""" +1004 76 regularizer """no""" +1004 76 optimizer """adam""" +1004 76 training_loop """owa""" +1004 76 negative_sampler """basic""" +1004 76 evaluator """rankbased""" +1004 77 dataset """fb15k237""" +1004 77 model """unstructuredmodel""" +1004 77 loss """bceaftersigmoid""" +1004 77 regularizer """no""" +1004 77 optimizer """adam""" +1004 77 training_loop """owa""" +1004 77 negative_sampler """basic""" +1004 77 evaluator """rankbased""" +1004 78 dataset """fb15k237""" +1004 78 model """unstructuredmodel""" +1004 78 loss """bceaftersigmoid""" +1004 78 regularizer """no""" +1004 78 optimizer """adam""" +1004 78 training_loop """owa""" +1004 78 negative_sampler """basic""" +1004 78 evaluator """rankbased""" +1004 79 dataset """fb15k237""" +1004 79 model """unstructuredmodel""" +1004 79 loss """bceaftersigmoid""" +1004 79 regularizer """no""" +1004 79 optimizer """adam""" +1004 79 training_loop """owa""" +1004 79 negative_sampler """basic""" +1004 79 evaluator """rankbased""" +1004 80 dataset """fb15k237""" +1004 80 model """unstructuredmodel""" +1004 80 loss """bceaftersigmoid""" +1004 80 regularizer """no""" +1004 80 optimizer """adam""" +1004 80 training_loop """owa""" +1004 80 negative_sampler """basic""" +1004 80 evaluator """rankbased""" +1004 81 dataset """fb15k237""" +1004 81 model """unstructuredmodel""" +1004 81 loss """bceaftersigmoid""" +1004 81 regularizer """no""" +1004 81 optimizer """adam""" +1004 81 training_loop """owa""" +1004 81 negative_sampler """basic""" +1004 81 evaluator """rankbased""" +1004 82 dataset """fb15k237""" +1004 82 model """unstructuredmodel""" +1004 82 loss """bceaftersigmoid""" +1004 82 regularizer """no""" +1004 82 optimizer """adam""" +1004 82 training_loop """owa""" +1004 82 negative_sampler """basic""" +1004 82 evaluator """rankbased""" +1004 83 dataset """fb15k237""" +1004 83 model """unstructuredmodel""" +1004 83 loss """bceaftersigmoid""" +1004 83 regularizer """no""" +1004 83 optimizer """adam""" +1004 83 training_loop """owa""" +1004 83 negative_sampler """basic""" +1004 83 evaluator """rankbased""" +1004 84 dataset """fb15k237""" +1004 84 model """unstructuredmodel""" +1004 84 loss """bceaftersigmoid""" +1004 84 regularizer """no""" +1004 84 optimizer """adam""" +1004 84 training_loop """owa""" +1004 84 negative_sampler """basic""" +1004 84 evaluator """rankbased""" +1004 85 dataset """fb15k237""" +1004 85 model """unstructuredmodel""" +1004 85 loss """bceaftersigmoid""" +1004 85 regularizer """no""" +1004 85 optimizer """adam""" +1004 85 training_loop """owa""" +1004 85 negative_sampler """basic""" +1004 85 evaluator """rankbased""" +1004 86 dataset """fb15k237""" +1004 86 model """unstructuredmodel""" +1004 86 loss """bceaftersigmoid""" +1004 86 regularizer """no""" +1004 86 optimizer """adam""" +1004 86 training_loop """owa""" +1004 86 negative_sampler """basic""" +1004 86 evaluator """rankbased""" +1004 87 dataset """fb15k237""" +1004 87 model """unstructuredmodel""" +1004 87 loss """bceaftersigmoid""" +1004 87 regularizer """no""" +1004 87 optimizer """adam""" +1004 87 training_loop """owa""" +1004 87 negative_sampler """basic""" +1004 87 evaluator """rankbased""" +1004 88 dataset """fb15k237""" +1004 88 model """unstructuredmodel""" +1004 88 loss """bceaftersigmoid""" +1004 88 regularizer """no""" +1004 88 optimizer """adam""" +1004 88 training_loop """owa""" +1004 88 negative_sampler """basic""" +1004 88 evaluator """rankbased""" +1004 89 dataset """fb15k237""" +1004 89 model """unstructuredmodel""" +1004 89 loss """bceaftersigmoid""" +1004 89 regularizer """no""" +1004 89 optimizer """adam""" +1004 89 training_loop """owa""" +1004 89 negative_sampler """basic""" +1004 89 evaluator """rankbased""" +1004 90 dataset """fb15k237""" +1004 90 model """unstructuredmodel""" +1004 90 loss """bceaftersigmoid""" +1004 90 regularizer """no""" +1004 90 optimizer """adam""" +1004 90 training_loop """owa""" +1004 90 negative_sampler """basic""" +1004 90 evaluator """rankbased""" +1004 91 dataset """fb15k237""" +1004 91 model """unstructuredmodel""" +1004 91 loss """bceaftersigmoid""" +1004 91 regularizer """no""" +1004 91 optimizer """adam""" +1004 91 training_loop """owa""" +1004 91 negative_sampler """basic""" +1004 91 evaluator """rankbased""" +1004 92 dataset """fb15k237""" +1004 92 model """unstructuredmodel""" +1004 92 loss """bceaftersigmoid""" +1004 92 regularizer """no""" +1004 92 optimizer """adam""" +1004 92 training_loop """owa""" +1004 92 negative_sampler """basic""" +1004 92 evaluator """rankbased""" +1004 93 dataset """fb15k237""" +1004 93 model """unstructuredmodel""" +1004 93 loss """bceaftersigmoid""" +1004 93 regularizer """no""" +1004 93 optimizer """adam""" +1004 93 training_loop """owa""" +1004 93 negative_sampler """basic""" +1004 93 evaluator """rankbased""" +1004 94 dataset """fb15k237""" +1004 94 model """unstructuredmodel""" +1004 94 loss """bceaftersigmoid""" +1004 94 regularizer """no""" +1004 94 optimizer """adam""" +1004 94 training_loop """owa""" +1004 94 negative_sampler """basic""" +1004 94 evaluator """rankbased""" +1004 95 dataset """fb15k237""" +1004 95 model """unstructuredmodel""" +1004 95 loss """bceaftersigmoid""" +1004 95 regularizer """no""" +1004 95 optimizer """adam""" +1004 95 training_loop """owa""" +1004 95 negative_sampler """basic""" +1004 95 evaluator """rankbased""" +1004 96 dataset """fb15k237""" +1004 96 model """unstructuredmodel""" +1004 96 loss """bceaftersigmoid""" +1004 96 regularizer """no""" +1004 96 optimizer """adam""" +1004 96 training_loop """owa""" +1004 96 negative_sampler """basic""" +1004 96 evaluator """rankbased""" +1004 97 dataset """fb15k237""" +1004 97 model """unstructuredmodel""" +1004 97 loss """bceaftersigmoid""" +1004 97 regularizer """no""" +1004 97 optimizer """adam""" +1004 97 training_loop """owa""" +1004 97 negative_sampler """basic""" +1004 97 evaluator """rankbased""" +1004 98 dataset """fb15k237""" +1004 98 model """unstructuredmodel""" +1004 98 loss """bceaftersigmoid""" +1004 98 regularizer """no""" +1004 98 optimizer """adam""" +1004 98 training_loop """owa""" +1004 98 negative_sampler """basic""" +1004 98 evaluator """rankbased""" +1004 99 dataset """fb15k237""" +1004 99 model """unstructuredmodel""" +1004 99 loss """bceaftersigmoid""" +1004 99 regularizer """no""" +1004 99 optimizer """adam""" +1004 99 training_loop """owa""" +1004 99 negative_sampler """basic""" +1004 99 evaluator """rankbased""" +1004 100 dataset """fb15k237""" +1004 100 model """unstructuredmodel""" +1004 100 loss """bceaftersigmoid""" +1004 100 regularizer """no""" +1004 100 optimizer """adam""" +1004 100 training_loop """owa""" +1004 100 negative_sampler """basic""" +1004 100 evaluator """rankbased""" +1005 1 model.embedding_dim 2.0 +1005 1 model.scoring_fct_norm 2.0 +1005 1 optimizer.lr 0.0015445323292437303 +1005 1 negative_sampler.num_negs_per_pos 58.0 +1005 1 training.batch_size 1.0 +1005 2 model.embedding_dim 2.0 +1005 2 model.scoring_fct_norm 2.0 +1005 2 optimizer.lr 0.010519745727329107 +1005 2 negative_sampler.num_negs_per_pos 51.0 +1005 2 training.batch_size 2.0 +1005 3 model.embedding_dim 1.0 +1005 3 model.scoring_fct_norm 1.0 +1005 3 optimizer.lr 0.01690895270237877 +1005 3 negative_sampler.num_negs_per_pos 66.0 +1005 3 training.batch_size 2.0 +1005 4 model.embedding_dim 0.0 +1005 4 model.scoring_fct_norm 1.0 +1005 4 optimizer.lr 0.014874417575378664 +1005 4 negative_sampler.num_negs_per_pos 16.0 +1005 4 training.batch_size 2.0 +1005 5 model.embedding_dim 2.0 +1005 5 model.scoring_fct_norm 1.0 +1005 5 optimizer.lr 0.0022234773649769525 +1005 5 negative_sampler.num_negs_per_pos 86.0 +1005 5 training.batch_size 0.0 +1005 6 model.embedding_dim 2.0 +1005 6 model.scoring_fct_norm 2.0 +1005 6 optimizer.lr 0.016921480114955402 +1005 6 negative_sampler.num_negs_per_pos 49.0 +1005 6 training.batch_size 1.0 +1005 7 model.embedding_dim 0.0 +1005 7 model.scoring_fct_norm 1.0 +1005 7 optimizer.lr 0.0271586098814142 +1005 7 negative_sampler.num_negs_per_pos 58.0 +1005 7 training.batch_size 1.0 +1005 8 model.embedding_dim 1.0 +1005 8 model.scoring_fct_norm 2.0 +1005 8 optimizer.lr 0.005564560142818137 +1005 8 negative_sampler.num_negs_per_pos 39.0 +1005 8 training.batch_size 0.0 +1005 9 model.embedding_dim 2.0 +1005 9 model.scoring_fct_norm 2.0 +1005 9 optimizer.lr 0.013941469307978637 +1005 9 negative_sampler.num_negs_per_pos 85.0 +1005 9 training.batch_size 1.0 +1005 10 model.embedding_dim 0.0 +1005 10 model.scoring_fct_norm 2.0 +1005 10 optimizer.lr 0.001079732247000891 +1005 10 negative_sampler.num_negs_per_pos 5.0 +1005 10 training.batch_size 2.0 +1005 11 model.embedding_dim 2.0 +1005 11 model.scoring_fct_norm 1.0 +1005 11 optimizer.lr 0.008269247571482567 +1005 11 negative_sampler.num_negs_per_pos 7.0 +1005 11 training.batch_size 0.0 +1005 12 model.embedding_dim 1.0 +1005 12 model.scoring_fct_norm 1.0 +1005 12 optimizer.lr 0.002898968240868792 +1005 12 negative_sampler.num_negs_per_pos 56.0 +1005 12 training.batch_size 2.0 +1005 13 model.embedding_dim 2.0 +1005 13 model.scoring_fct_norm 1.0 +1005 13 optimizer.lr 0.07720857169093844 +1005 13 negative_sampler.num_negs_per_pos 37.0 +1005 13 training.batch_size 1.0 +1005 14 model.embedding_dim 0.0 +1005 14 model.scoring_fct_norm 2.0 +1005 14 optimizer.lr 0.04366962342858223 +1005 14 negative_sampler.num_negs_per_pos 69.0 +1005 14 training.batch_size 1.0 +1005 15 model.embedding_dim 0.0 +1005 15 model.scoring_fct_norm 1.0 +1005 15 optimizer.lr 0.06601049043132183 +1005 15 negative_sampler.num_negs_per_pos 68.0 +1005 15 training.batch_size 2.0 +1005 16 model.embedding_dim 2.0 +1005 16 model.scoring_fct_norm 2.0 +1005 16 optimizer.lr 0.05257210006364073 +1005 16 negative_sampler.num_negs_per_pos 73.0 +1005 16 training.batch_size 2.0 +1005 17 model.embedding_dim 1.0 +1005 17 model.scoring_fct_norm 1.0 +1005 17 optimizer.lr 0.015023551668396766 +1005 17 negative_sampler.num_negs_per_pos 52.0 +1005 17 training.batch_size 1.0 +1005 18 model.embedding_dim 2.0 +1005 18 model.scoring_fct_norm 1.0 +1005 18 optimizer.lr 0.0017126789196081915 +1005 18 negative_sampler.num_negs_per_pos 74.0 +1005 18 training.batch_size 2.0 +1005 19 model.embedding_dim 2.0 +1005 19 model.scoring_fct_norm 1.0 +1005 19 optimizer.lr 0.05813323116270665 +1005 19 negative_sampler.num_negs_per_pos 4.0 +1005 19 training.batch_size 0.0 +1005 20 model.embedding_dim 2.0 +1005 20 model.scoring_fct_norm 2.0 +1005 20 optimizer.lr 0.06116940835912117 +1005 20 negative_sampler.num_negs_per_pos 13.0 +1005 20 training.batch_size 1.0 +1005 21 model.embedding_dim 1.0 +1005 21 model.scoring_fct_norm 1.0 +1005 21 optimizer.lr 0.00118279146044219 +1005 21 negative_sampler.num_negs_per_pos 98.0 +1005 21 training.batch_size 2.0 +1005 22 model.embedding_dim 1.0 +1005 22 model.scoring_fct_norm 1.0 +1005 22 optimizer.lr 0.004385205342858366 +1005 22 negative_sampler.num_negs_per_pos 30.0 +1005 22 training.batch_size 0.0 +1005 23 model.embedding_dim 1.0 +1005 23 model.scoring_fct_norm 2.0 +1005 23 optimizer.lr 0.020158458236913635 +1005 23 negative_sampler.num_negs_per_pos 12.0 +1005 23 training.batch_size 2.0 +1005 24 model.embedding_dim 2.0 +1005 24 model.scoring_fct_norm 2.0 +1005 24 optimizer.lr 0.07079894656397882 +1005 24 negative_sampler.num_negs_per_pos 71.0 +1005 24 training.batch_size 1.0 +1005 25 model.embedding_dim 0.0 +1005 25 model.scoring_fct_norm 1.0 +1005 25 optimizer.lr 0.006445649124810469 +1005 25 negative_sampler.num_negs_per_pos 75.0 +1005 25 training.batch_size 0.0 +1005 26 model.embedding_dim 2.0 +1005 26 model.scoring_fct_norm 1.0 +1005 26 optimizer.lr 0.006617285926297135 +1005 26 negative_sampler.num_negs_per_pos 9.0 +1005 26 training.batch_size 0.0 +1005 27 model.embedding_dim 2.0 +1005 27 model.scoring_fct_norm 2.0 +1005 27 optimizer.lr 0.003563917396370049 +1005 27 negative_sampler.num_negs_per_pos 70.0 +1005 27 training.batch_size 0.0 +1005 28 model.embedding_dim 2.0 +1005 28 model.scoring_fct_norm 2.0 +1005 28 optimizer.lr 0.0020010441006783784 +1005 28 negative_sampler.num_negs_per_pos 74.0 +1005 28 training.batch_size 0.0 +1005 29 model.embedding_dim 1.0 +1005 29 model.scoring_fct_norm 2.0 +1005 29 optimizer.lr 0.0011351020590248456 +1005 29 negative_sampler.num_negs_per_pos 75.0 +1005 29 training.batch_size 2.0 +1005 30 model.embedding_dim 2.0 +1005 30 model.scoring_fct_norm 1.0 +1005 30 optimizer.lr 0.0013190882702394256 +1005 30 negative_sampler.num_negs_per_pos 38.0 +1005 30 training.batch_size 1.0 +1005 31 model.embedding_dim 1.0 +1005 31 model.scoring_fct_norm 2.0 +1005 31 optimizer.lr 0.003641893086504388 +1005 31 negative_sampler.num_negs_per_pos 46.0 +1005 31 training.batch_size 0.0 +1005 32 model.embedding_dim 1.0 +1005 32 model.scoring_fct_norm 1.0 +1005 32 optimizer.lr 0.0011885717770588069 +1005 32 negative_sampler.num_negs_per_pos 73.0 +1005 32 training.batch_size 2.0 +1005 33 model.embedding_dim 1.0 +1005 33 model.scoring_fct_norm 1.0 +1005 33 optimizer.lr 0.07596765950283124 +1005 33 negative_sampler.num_negs_per_pos 87.0 +1005 33 training.batch_size 2.0 +1005 34 model.embedding_dim 0.0 +1005 34 model.scoring_fct_norm 1.0 +1005 34 optimizer.lr 0.020164329604230785 +1005 34 negative_sampler.num_negs_per_pos 23.0 +1005 34 training.batch_size 1.0 +1005 35 model.embedding_dim 2.0 +1005 35 model.scoring_fct_norm 1.0 +1005 35 optimizer.lr 0.056137042081022354 +1005 35 negative_sampler.num_negs_per_pos 43.0 +1005 35 training.batch_size 1.0 +1005 36 model.embedding_dim 1.0 +1005 36 model.scoring_fct_norm 2.0 +1005 36 optimizer.lr 0.001634413232299629 +1005 36 negative_sampler.num_negs_per_pos 41.0 +1005 36 training.batch_size 1.0 +1005 37 model.embedding_dim 2.0 +1005 37 model.scoring_fct_norm 1.0 +1005 37 optimizer.lr 0.0021097991560549813 +1005 37 negative_sampler.num_negs_per_pos 12.0 +1005 37 training.batch_size 2.0 +1005 38 model.embedding_dim 0.0 +1005 38 model.scoring_fct_norm 2.0 +1005 38 optimizer.lr 0.036358949511259585 +1005 38 negative_sampler.num_negs_per_pos 53.0 +1005 38 training.batch_size 0.0 +1005 39 model.embedding_dim 0.0 +1005 39 model.scoring_fct_norm 2.0 +1005 39 optimizer.lr 0.019463957942527126 +1005 39 negative_sampler.num_negs_per_pos 21.0 +1005 39 training.batch_size 1.0 +1005 40 model.embedding_dim 1.0 +1005 40 model.scoring_fct_norm 1.0 +1005 40 optimizer.lr 0.002287608863418629 +1005 40 negative_sampler.num_negs_per_pos 45.0 +1005 40 training.batch_size 2.0 +1005 41 model.embedding_dim 0.0 +1005 41 model.scoring_fct_norm 1.0 +1005 41 optimizer.lr 0.04537760191943721 +1005 41 negative_sampler.num_negs_per_pos 27.0 +1005 41 training.batch_size 0.0 +1005 42 model.embedding_dim 2.0 +1005 42 model.scoring_fct_norm 1.0 +1005 42 optimizer.lr 0.0010375784895440595 +1005 42 negative_sampler.num_negs_per_pos 93.0 +1005 42 training.batch_size 0.0 +1005 43 model.embedding_dim 0.0 +1005 43 model.scoring_fct_norm 2.0 +1005 43 optimizer.lr 0.08986376806232303 +1005 43 negative_sampler.num_negs_per_pos 82.0 +1005 43 training.batch_size 0.0 +1005 44 model.embedding_dim 1.0 +1005 44 model.scoring_fct_norm 2.0 +1005 44 optimizer.lr 0.042647156737904306 +1005 44 negative_sampler.num_negs_per_pos 65.0 +1005 44 training.batch_size 2.0 +1005 45 model.embedding_dim 0.0 +1005 45 model.scoring_fct_norm 1.0 +1005 45 optimizer.lr 0.01567646927942449 +1005 45 negative_sampler.num_negs_per_pos 18.0 +1005 45 training.batch_size 2.0 +1005 46 model.embedding_dim 2.0 +1005 46 model.scoring_fct_norm 2.0 +1005 46 optimizer.lr 0.005261661152239424 +1005 46 negative_sampler.num_negs_per_pos 1.0 +1005 46 training.batch_size 2.0 +1005 47 model.embedding_dim 1.0 +1005 47 model.scoring_fct_norm 2.0 +1005 47 optimizer.lr 0.0027027407508194833 +1005 47 negative_sampler.num_negs_per_pos 53.0 +1005 47 training.batch_size 1.0 +1005 48 model.embedding_dim 1.0 +1005 48 model.scoring_fct_norm 2.0 +1005 48 optimizer.lr 0.004798315549271477 +1005 48 negative_sampler.num_negs_per_pos 10.0 +1005 48 training.batch_size 2.0 +1005 49 model.embedding_dim 1.0 +1005 49 model.scoring_fct_norm 2.0 +1005 49 optimizer.lr 0.001650787568495099 +1005 49 negative_sampler.num_negs_per_pos 74.0 +1005 49 training.batch_size 1.0 +1005 50 model.embedding_dim 0.0 +1005 50 model.scoring_fct_norm 2.0 +1005 50 optimizer.lr 0.015917171790857144 +1005 50 negative_sampler.num_negs_per_pos 24.0 +1005 50 training.batch_size 2.0 +1005 51 model.embedding_dim 1.0 +1005 51 model.scoring_fct_norm 1.0 +1005 51 optimizer.lr 0.0015971585135452046 +1005 51 negative_sampler.num_negs_per_pos 9.0 +1005 51 training.batch_size 1.0 +1005 52 model.embedding_dim 2.0 +1005 52 model.scoring_fct_norm 2.0 +1005 52 optimizer.lr 0.010186130707873251 +1005 52 negative_sampler.num_negs_per_pos 81.0 +1005 52 training.batch_size 0.0 +1005 53 model.embedding_dim 1.0 +1005 53 model.scoring_fct_norm 2.0 +1005 53 optimizer.lr 0.006393697942049038 +1005 53 negative_sampler.num_negs_per_pos 48.0 +1005 53 training.batch_size 2.0 +1005 54 model.embedding_dim 1.0 +1005 54 model.scoring_fct_norm 1.0 +1005 54 optimizer.lr 0.05161040694717531 +1005 54 negative_sampler.num_negs_per_pos 84.0 +1005 54 training.batch_size 1.0 +1005 55 model.embedding_dim 1.0 +1005 55 model.scoring_fct_norm 2.0 +1005 55 optimizer.lr 0.003947557535980309 +1005 55 negative_sampler.num_negs_per_pos 96.0 +1005 55 training.batch_size 0.0 +1005 56 model.embedding_dim 0.0 +1005 56 model.scoring_fct_norm 1.0 +1005 56 optimizer.lr 0.014682799980932982 +1005 56 negative_sampler.num_negs_per_pos 31.0 +1005 56 training.batch_size 1.0 +1005 57 model.embedding_dim 1.0 +1005 57 model.scoring_fct_norm 2.0 +1005 57 optimizer.lr 0.0019465549078177841 +1005 57 negative_sampler.num_negs_per_pos 91.0 +1005 57 training.batch_size 1.0 +1005 58 model.embedding_dim 0.0 +1005 58 model.scoring_fct_norm 1.0 +1005 58 optimizer.lr 0.0778605756377262 +1005 58 negative_sampler.num_negs_per_pos 63.0 +1005 58 training.batch_size 1.0 +1005 59 model.embedding_dim 0.0 +1005 59 model.scoring_fct_norm 2.0 +1005 59 optimizer.lr 0.01989975126997039 +1005 59 negative_sampler.num_negs_per_pos 16.0 +1005 59 training.batch_size 0.0 +1005 60 model.embedding_dim 2.0 +1005 60 model.scoring_fct_norm 2.0 +1005 60 optimizer.lr 0.005530085922417849 +1005 60 negative_sampler.num_negs_per_pos 18.0 +1005 60 training.batch_size 2.0 +1005 61 model.embedding_dim 1.0 +1005 61 model.scoring_fct_norm 1.0 +1005 61 optimizer.lr 0.052247011217035705 +1005 61 negative_sampler.num_negs_per_pos 56.0 +1005 61 training.batch_size 0.0 +1005 62 model.embedding_dim 1.0 +1005 62 model.scoring_fct_norm 1.0 +1005 62 optimizer.lr 0.0025637017574020446 +1005 62 negative_sampler.num_negs_per_pos 60.0 +1005 62 training.batch_size 0.0 +1005 63 model.embedding_dim 0.0 +1005 63 model.scoring_fct_norm 2.0 +1005 63 optimizer.lr 0.011746762964875082 +1005 63 negative_sampler.num_negs_per_pos 84.0 +1005 63 training.batch_size 2.0 +1005 64 model.embedding_dim 1.0 +1005 64 model.scoring_fct_norm 1.0 +1005 64 optimizer.lr 0.0741064834262959 +1005 64 negative_sampler.num_negs_per_pos 51.0 +1005 64 training.batch_size 0.0 +1005 65 model.embedding_dim 0.0 +1005 65 model.scoring_fct_norm 2.0 +1005 65 optimizer.lr 0.04977308840346096 +1005 65 negative_sampler.num_negs_per_pos 74.0 +1005 65 training.batch_size 1.0 +1005 66 model.embedding_dim 2.0 +1005 66 model.scoring_fct_norm 1.0 +1005 66 optimizer.lr 0.01378468291948651 +1005 66 negative_sampler.num_negs_per_pos 34.0 +1005 66 training.batch_size 0.0 +1005 67 model.embedding_dim 0.0 +1005 67 model.scoring_fct_norm 2.0 +1005 67 optimizer.lr 0.008959208813239364 +1005 67 negative_sampler.num_negs_per_pos 70.0 +1005 67 training.batch_size 1.0 +1005 68 model.embedding_dim 2.0 +1005 68 model.scoring_fct_norm 2.0 +1005 68 optimizer.lr 0.017400299212842037 +1005 68 negative_sampler.num_negs_per_pos 70.0 +1005 68 training.batch_size 1.0 +1005 69 model.embedding_dim 1.0 +1005 69 model.scoring_fct_norm 1.0 +1005 69 optimizer.lr 0.031307590339200685 +1005 69 negative_sampler.num_negs_per_pos 89.0 +1005 69 training.batch_size 1.0 +1005 70 model.embedding_dim 2.0 +1005 70 model.scoring_fct_norm 2.0 +1005 70 optimizer.lr 0.008630064408830527 +1005 70 negative_sampler.num_negs_per_pos 44.0 +1005 70 training.batch_size 2.0 +1005 71 model.embedding_dim 1.0 +1005 71 model.scoring_fct_norm 2.0 +1005 71 optimizer.lr 0.04998850994019677 +1005 71 negative_sampler.num_negs_per_pos 69.0 +1005 71 training.batch_size 1.0 +1005 72 model.embedding_dim 1.0 +1005 72 model.scoring_fct_norm 1.0 +1005 72 optimizer.lr 0.0010841394170618276 +1005 72 negative_sampler.num_negs_per_pos 80.0 +1005 72 training.batch_size 1.0 +1005 73 model.embedding_dim 2.0 +1005 73 model.scoring_fct_norm 1.0 +1005 73 optimizer.lr 0.0517912211664011 +1005 73 negative_sampler.num_negs_per_pos 25.0 +1005 73 training.batch_size 2.0 +1005 74 model.embedding_dim 2.0 +1005 74 model.scoring_fct_norm 2.0 +1005 74 optimizer.lr 0.006728961689622877 +1005 74 negative_sampler.num_negs_per_pos 84.0 +1005 74 training.batch_size 1.0 +1005 75 model.embedding_dim 1.0 +1005 75 model.scoring_fct_norm 1.0 +1005 75 optimizer.lr 0.013730522794017219 +1005 75 negative_sampler.num_negs_per_pos 69.0 +1005 75 training.batch_size 0.0 +1005 76 model.embedding_dim 2.0 +1005 76 model.scoring_fct_norm 1.0 +1005 76 optimizer.lr 0.028165841492387763 +1005 76 negative_sampler.num_negs_per_pos 32.0 +1005 76 training.batch_size 2.0 +1005 77 model.embedding_dim 1.0 +1005 77 model.scoring_fct_norm 2.0 +1005 77 optimizer.lr 0.049214328387410265 +1005 77 negative_sampler.num_negs_per_pos 81.0 +1005 77 training.batch_size 2.0 +1005 78 model.embedding_dim 1.0 +1005 78 model.scoring_fct_norm 2.0 +1005 78 optimizer.lr 0.01037756358627981 +1005 78 negative_sampler.num_negs_per_pos 75.0 +1005 78 training.batch_size 1.0 +1005 79 model.embedding_dim 0.0 +1005 79 model.scoring_fct_norm 2.0 +1005 79 optimizer.lr 0.02195575869933628 +1005 79 negative_sampler.num_negs_per_pos 24.0 +1005 79 training.batch_size 2.0 +1005 80 model.embedding_dim 2.0 +1005 80 model.scoring_fct_norm 1.0 +1005 80 optimizer.lr 0.07748677639803175 +1005 80 negative_sampler.num_negs_per_pos 42.0 +1005 80 training.batch_size 2.0 +1005 81 model.embedding_dim 1.0 +1005 81 model.scoring_fct_norm 1.0 +1005 81 optimizer.lr 0.01058525342289056 +1005 81 negative_sampler.num_negs_per_pos 95.0 +1005 81 training.batch_size 0.0 +1005 82 model.embedding_dim 2.0 +1005 82 model.scoring_fct_norm 1.0 +1005 82 optimizer.lr 0.001910303119189772 +1005 82 negative_sampler.num_negs_per_pos 38.0 +1005 82 training.batch_size 2.0 +1005 83 model.embedding_dim 0.0 +1005 83 model.scoring_fct_norm 2.0 +1005 83 optimizer.lr 0.06683271445285907 +1005 83 negative_sampler.num_negs_per_pos 77.0 +1005 83 training.batch_size 0.0 +1005 84 model.embedding_dim 1.0 +1005 84 model.scoring_fct_norm 2.0 +1005 84 optimizer.lr 0.0024492212706532066 +1005 84 negative_sampler.num_negs_per_pos 91.0 +1005 84 training.batch_size 1.0 +1005 85 model.embedding_dim 1.0 +1005 85 model.scoring_fct_norm 2.0 +1005 85 optimizer.lr 0.00367596456501526 +1005 85 negative_sampler.num_negs_per_pos 65.0 +1005 85 training.batch_size 1.0 +1005 86 model.embedding_dim 0.0 +1005 86 model.scoring_fct_norm 2.0 +1005 86 optimizer.lr 0.001285270427857856 +1005 86 negative_sampler.num_negs_per_pos 69.0 +1005 86 training.batch_size 2.0 +1005 87 model.embedding_dim 2.0 +1005 87 model.scoring_fct_norm 2.0 +1005 87 optimizer.lr 0.04937699204389825 +1005 87 negative_sampler.num_negs_per_pos 56.0 +1005 87 training.batch_size 1.0 +1005 88 model.embedding_dim 2.0 +1005 88 model.scoring_fct_norm 2.0 +1005 88 optimizer.lr 0.006491265600076624 +1005 88 negative_sampler.num_negs_per_pos 63.0 +1005 88 training.batch_size 1.0 +1005 89 model.embedding_dim 0.0 +1005 89 model.scoring_fct_norm 2.0 +1005 89 optimizer.lr 0.0055832131130685225 +1005 89 negative_sampler.num_negs_per_pos 99.0 +1005 89 training.batch_size 2.0 +1005 90 model.embedding_dim 1.0 +1005 90 model.scoring_fct_norm 1.0 +1005 90 optimizer.lr 0.004465752527339644 +1005 90 negative_sampler.num_negs_per_pos 44.0 +1005 90 training.batch_size 1.0 +1005 91 model.embedding_dim 0.0 +1005 91 model.scoring_fct_norm 2.0 +1005 91 optimizer.lr 0.0018303121118582034 +1005 91 negative_sampler.num_negs_per_pos 56.0 +1005 91 training.batch_size 0.0 +1005 92 model.embedding_dim 2.0 +1005 92 model.scoring_fct_norm 1.0 +1005 92 optimizer.lr 0.025500760386998136 +1005 92 negative_sampler.num_negs_per_pos 36.0 +1005 92 training.batch_size 1.0 +1005 93 model.embedding_dim 1.0 +1005 93 model.scoring_fct_norm 2.0 +1005 93 optimizer.lr 0.013775132554250225 +1005 93 negative_sampler.num_negs_per_pos 42.0 +1005 93 training.batch_size 2.0 +1005 94 model.embedding_dim 2.0 +1005 94 model.scoring_fct_norm 1.0 +1005 94 optimizer.lr 0.0014397338722491881 +1005 94 negative_sampler.num_negs_per_pos 16.0 +1005 94 training.batch_size 0.0 +1005 95 model.embedding_dim 0.0 +1005 95 model.scoring_fct_norm 1.0 +1005 95 optimizer.lr 0.0015270958466402085 +1005 95 negative_sampler.num_negs_per_pos 22.0 +1005 95 training.batch_size 1.0 +1005 96 model.embedding_dim 1.0 +1005 96 model.scoring_fct_norm 2.0 +1005 96 optimizer.lr 0.022608471602983873 +1005 96 negative_sampler.num_negs_per_pos 67.0 +1005 96 training.batch_size 1.0 +1005 97 model.embedding_dim 2.0 +1005 97 model.scoring_fct_norm 2.0 +1005 97 optimizer.lr 0.02957876047675972 +1005 97 negative_sampler.num_negs_per_pos 23.0 +1005 97 training.batch_size 2.0 +1005 98 model.embedding_dim 1.0 +1005 98 model.scoring_fct_norm 1.0 +1005 98 optimizer.lr 0.018679698684456923 +1005 98 negative_sampler.num_negs_per_pos 91.0 +1005 98 training.batch_size 2.0 +1005 99 model.embedding_dim 1.0 +1005 99 model.scoring_fct_norm 2.0 +1005 99 optimizer.lr 0.0031216380844831183 +1005 99 negative_sampler.num_negs_per_pos 15.0 +1005 99 training.batch_size 1.0 +1005 100 model.embedding_dim 1.0 +1005 100 model.scoring_fct_norm 1.0 +1005 100 optimizer.lr 0.024683895490003962 +1005 100 negative_sampler.num_negs_per_pos 22.0 +1005 100 training.batch_size 0.0 +1005 1 dataset """fb15k237""" +1005 1 model """unstructuredmodel""" +1005 1 loss """softplus""" +1005 1 regularizer """no""" +1005 1 optimizer """adam""" +1005 1 training_loop """owa""" +1005 1 negative_sampler """basic""" +1005 1 evaluator """rankbased""" +1005 2 dataset """fb15k237""" +1005 2 model """unstructuredmodel""" +1005 2 loss """softplus""" +1005 2 regularizer """no""" +1005 2 optimizer """adam""" +1005 2 training_loop """owa""" +1005 2 negative_sampler """basic""" +1005 2 evaluator """rankbased""" +1005 3 dataset """fb15k237""" +1005 3 model """unstructuredmodel""" +1005 3 loss """softplus""" +1005 3 regularizer """no""" +1005 3 optimizer """adam""" +1005 3 training_loop """owa""" +1005 3 negative_sampler """basic""" +1005 3 evaluator """rankbased""" +1005 4 dataset """fb15k237""" +1005 4 model """unstructuredmodel""" +1005 4 loss """softplus""" +1005 4 regularizer """no""" +1005 4 optimizer """adam""" +1005 4 training_loop """owa""" +1005 4 negative_sampler """basic""" +1005 4 evaluator """rankbased""" +1005 5 dataset """fb15k237""" +1005 5 model """unstructuredmodel""" +1005 5 loss """softplus""" +1005 5 regularizer """no""" +1005 5 optimizer """adam""" +1005 5 training_loop """owa""" +1005 5 negative_sampler """basic""" +1005 5 evaluator """rankbased""" +1005 6 dataset """fb15k237""" +1005 6 model """unstructuredmodel""" +1005 6 loss """softplus""" +1005 6 regularizer """no""" +1005 6 optimizer """adam""" +1005 6 training_loop """owa""" +1005 6 negative_sampler """basic""" +1005 6 evaluator """rankbased""" +1005 7 dataset """fb15k237""" +1005 7 model """unstructuredmodel""" +1005 7 loss """softplus""" +1005 7 regularizer """no""" +1005 7 optimizer """adam""" +1005 7 training_loop """owa""" +1005 7 negative_sampler """basic""" +1005 7 evaluator """rankbased""" +1005 8 dataset """fb15k237""" +1005 8 model """unstructuredmodel""" +1005 8 loss """softplus""" +1005 8 regularizer """no""" +1005 8 optimizer """adam""" +1005 8 training_loop """owa""" +1005 8 negative_sampler """basic""" +1005 8 evaluator """rankbased""" +1005 9 dataset """fb15k237""" +1005 9 model """unstructuredmodel""" +1005 9 loss """softplus""" +1005 9 regularizer """no""" +1005 9 optimizer """adam""" +1005 9 training_loop """owa""" +1005 9 negative_sampler """basic""" +1005 9 evaluator """rankbased""" +1005 10 dataset """fb15k237""" +1005 10 model """unstructuredmodel""" +1005 10 loss """softplus""" +1005 10 regularizer """no""" +1005 10 optimizer """adam""" +1005 10 training_loop """owa""" +1005 10 negative_sampler """basic""" +1005 10 evaluator """rankbased""" +1005 11 dataset """fb15k237""" +1005 11 model """unstructuredmodel""" +1005 11 loss """softplus""" +1005 11 regularizer """no""" +1005 11 optimizer """adam""" +1005 11 training_loop """owa""" +1005 11 negative_sampler """basic""" +1005 11 evaluator """rankbased""" +1005 12 dataset """fb15k237""" +1005 12 model """unstructuredmodel""" +1005 12 loss """softplus""" +1005 12 regularizer """no""" +1005 12 optimizer """adam""" +1005 12 training_loop """owa""" +1005 12 negative_sampler """basic""" +1005 12 evaluator """rankbased""" +1005 13 dataset """fb15k237""" +1005 13 model """unstructuredmodel""" +1005 13 loss """softplus""" +1005 13 regularizer """no""" +1005 13 optimizer """adam""" +1005 13 training_loop """owa""" +1005 13 negative_sampler """basic""" +1005 13 evaluator """rankbased""" +1005 14 dataset """fb15k237""" +1005 14 model """unstructuredmodel""" +1005 14 loss """softplus""" +1005 14 regularizer """no""" +1005 14 optimizer """adam""" +1005 14 training_loop """owa""" +1005 14 negative_sampler """basic""" +1005 14 evaluator """rankbased""" +1005 15 dataset """fb15k237""" +1005 15 model """unstructuredmodel""" +1005 15 loss """softplus""" +1005 15 regularizer """no""" +1005 15 optimizer """adam""" +1005 15 training_loop """owa""" +1005 15 negative_sampler """basic""" +1005 15 evaluator """rankbased""" +1005 16 dataset """fb15k237""" +1005 16 model """unstructuredmodel""" +1005 16 loss """softplus""" +1005 16 regularizer """no""" +1005 16 optimizer """adam""" +1005 16 training_loop """owa""" +1005 16 negative_sampler """basic""" +1005 16 evaluator """rankbased""" +1005 17 dataset """fb15k237""" +1005 17 model """unstructuredmodel""" +1005 17 loss """softplus""" +1005 17 regularizer """no""" +1005 17 optimizer """adam""" +1005 17 training_loop """owa""" +1005 17 negative_sampler """basic""" +1005 17 evaluator """rankbased""" +1005 18 dataset """fb15k237""" +1005 18 model """unstructuredmodel""" +1005 18 loss """softplus""" +1005 18 regularizer """no""" +1005 18 optimizer """adam""" +1005 18 training_loop """owa""" +1005 18 negative_sampler """basic""" +1005 18 evaluator """rankbased""" +1005 19 dataset """fb15k237""" +1005 19 model """unstructuredmodel""" +1005 19 loss """softplus""" +1005 19 regularizer """no""" +1005 19 optimizer """adam""" +1005 19 training_loop """owa""" +1005 19 negative_sampler """basic""" +1005 19 evaluator """rankbased""" +1005 20 dataset """fb15k237""" +1005 20 model """unstructuredmodel""" +1005 20 loss """softplus""" +1005 20 regularizer """no""" +1005 20 optimizer """adam""" +1005 20 training_loop """owa""" +1005 20 negative_sampler """basic""" +1005 20 evaluator """rankbased""" +1005 21 dataset """fb15k237""" +1005 21 model """unstructuredmodel""" +1005 21 loss """softplus""" +1005 21 regularizer """no""" +1005 21 optimizer """adam""" +1005 21 training_loop """owa""" +1005 21 negative_sampler """basic""" +1005 21 evaluator """rankbased""" +1005 22 dataset """fb15k237""" +1005 22 model """unstructuredmodel""" +1005 22 loss """softplus""" +1005 22 regularizer """no""" +1005 22 optimizer """adam""" +1005 22 training_loop """owa""" +1005 22 negative_sampler """basic""" +1005 22 evaluator """rankbased""" +1005 23 dataset """fb15k237""" +1005 23 model """unstructuredmodel""" +1005 23 loss """softplus""" +1005 23 regularizer """no""" +1005 23 optimizer """adam""" +1005 23 training_loop """owa""" +1005 23 negative_sampler """basic""" +1005 23 evaluator """rankbased""" +1005 24 dataset """fb15k237""" +1005 24 model """unstructuredmodel""" +1005 24 loss """softplus""" +1005 24 regularizer """no""" +1005 24 optimizer """adam""" +1005 24 training_loop """owa""" +1005 24 negative_sampler """basic""" +1005 24 evaluator """rankbased""" +1005 25 dataset """fb15k237""" +1005 25 model """unstructuredmodel""" +1005 25 loss """softplus""" +1005 25 regularizer """no""" +1005 25 optimizer """adam""" +1005 25 training_loop """owa""" +1005 25 negative_sampler """basic""" +1005 25 evaluator """rankbased""" +1005 26 dataset """fb15k237""" +1005 26 model """unstructuredmodel""" +1005 26 loss """softplus""" +1005 26 regularizer """no""" +1005 26 optimizer """adam""" +1005 26 training_loop """owa""" +1005 26 negative_sampler """basic""" +1005 26 evaluator """rankbased""" +1005 27 dataset """fb15k237""" +1005 27 model """unstructuredmodel""" +1005 27 loss """softplus""" +1005 27 regularizer """no""" +1005 27 optimizer """adam""" +1005 27 training_loop """owa""" +1005 27 negative_sampler """basic""" +1005 27 evaluator """rankbased""" +1005 28 dataset """fb15k237""" +1005 28 model """unstructuredmodel""" +1005 28 loss """softplus""" +1005 28 regularizer """no""" +1005 28 optimizer """adam""" +1005 28 training_loop """owa""" +1005 28 negative_sampler """basic""" +1005 28 evaluator """rankbased""" +1005 29 dataset """fb15k237""" +1005 29 model """unstructuredmodel""" +1005 29 loss """softplus""" +1005 29 regularizer """no""" +1005 29 optimizer """adam""" +1005 29 training_loop """owa""" +1005 29 negative_sampler """basic""" +1005 29 evaluator """rankbased""" +1005 30 dataset """fb15k237""" +1005 30 model """unstructuredmodel""" +1005 30 loss """softplus""" +1005 30 regularizer """no""" +1005 30 optimizer """adam""" +1005 30 training_loop """owa""" +1005 30 negative_sampler """basic""" +1005 30 evaluator """rankbased""" +1005 31 dataset """fb15k237""" +1005 31 model """unstructuredmodel""" +1005 31 loss """softplus""" +1005 31 regularizer """no""" +1005 31 optimizer """adam""" +1005 31 training_loop """owa""" +1005 31 negative_sampler """basic""" +1005 31 evaluator """rankbased""" +1005 32 dataset """fb15k237""" +1005 32 model """unstructuredmodel""" +1005 32 loss """softplus""" +1005 32 regularizer """no""" +1005 32 optimizer """adam""" +1005 32 training_loop """owa""" +1005 32 negative_sampler """basic""" +1005 32 evaluator """rankbased""" +1005 33 dataset """fb15k237""" +1005 33 model """unstructuredmodel""" +1005 33 loss """softplus""" +1005 33 regularizer """no""" +1005 33 optimizer """adam""" +1005 33 training_loop """owa""" +1005 33 negative_sampler """basic""" +1005 33 evaluator """rankbased""" +1005 34 dataset """fb15k237""" +1005 34 model """unstructuredmodel""" +1005 34 loss """softplus""" +1005 34 regularizer """no""" +1005 34 optimizer """adam""" +1005 34 training_loop """owa""" +1005 34 negative_sampler """basic""" +1005 34 evaluator """rankbased""" +1005 35 dataset """fb15k237""" +1005 35 model """unstructuredmodel""" +1005 35 loss """softplus""" +1005 35 regularizer """no""" +1005 35 optimizer """adam""" +1005 35 training_loop """owa""" +1005 35 negative_sampler """basic""" +1005 35 evaluator """rankbased""" +1005 36 dataset """fb15k237""" +1005 36 model """unstructuredmodel""" +1005 36 loss """softplus""" +1005 36 regularizer """no""" +1005 36 optimizer """adam""" +1005 36 training_loop """owa""" +1005 36 negative_sampler """basic""" +1005 36 evaluator """rankbased""" +1005 37 dataset """fb15k237""" +1005 37 model """unstructuredmodel""" +1005 37 loss """softplus""" +1005 37 regularizer """no""" +1005 37 optimizer """adam""" +1005 37 training_loop """owa""" +1005 37 negative_sampler """basic""" +1005 37 evaluator """rankbased""" +1005 38 dataset """fb15k237""" +1005 38 model """unstructuredmodel""" +1005 38 loss """softplus""" +1005 38 regularizer """no""" +1005 38 optimizer """adam""" +1005 38 training_loop """owa""" +1005 38 negative_sampler """basic""" +1005 38 evaluator """rankbased""" +1005 39 dataset """fb15k237""" +1005 39 model """unstructuredmodel""" +1005 39 loss """softplus""" +1005 39 regularizer """no""" +1005 39 optimizer """adam""" +1005 39 training_loop """owa""" +1005 39 negative_sampler """basic""" +1005 39 evaluator """rankbased""" +1005 40 dataset """fb15k237""" +1005 40 model """unstructuredmodel""" +1005 40 loss """softplus""" +1005 40 regularizer """no""" +1005 40 optimizer """adam""" +1005 40 training_loop """owa""" +1005 40 negative_sampler """basic""" +1005 40 evaluator """rankbased""" +1005 41 dataset """fb15k237""" +1005 41 model """unstructuredmodel""" +1005 41 loss """softplus""" +1005 41 regularizer """no""" +1005 41 optimizer """adam""" +1005 41 training_loop """owa""" +1005 41 negative_sampler """basic""" +1005 41 evaluator """rankbased""" +1005 42 dataset """fb15k237""" +1005 42 model """unstructuredmodel""" +1005 42 loss """softplus""" +1005 42 regularizer """no""" +1005 42 optimizer """adam""" +1005 42 training_loop """owa""" +1005 42 negative_sampler """basic""" +1005 42 evaluator """rankbased""" +1005 43 dataset """fb15k237""" +1005 43 model """unstructuredmodel""" +1005 43 loss """softplus""" +1005 43 regularizer """no""" +1005 43 optimizer """adam""" +1005 43 training_loop """owa""" +1005 43 negative_sampler """basic""" +1005 43 evaluator """rankbased""" +1005 44 dataset """fb15k237""" +1005 44 model """unstructuredmodel""" +1005 44 loss """softplus""" +1005 44 regularizer """no""" +1005 44 optimizer """adam""" +1005 44 training_loop """owa""" +1005 44 negative_sampler """basic""" +1005 44 evaluator """rankbased""" +1005 45 dataset """fb15k237""" +1005 45 model """unstructuredmodel""" +1005 45 loss """softplus""" +1005 45 regularizer """no""" +1005 45 optimizer """adam""" +1005 45 training_loop """owa""" +1005 45 negative_sampler """basic""" +1005 45 evaluator """rankbased""" +1005 46 dataset """fb15k237""" +1005 46 model """unstructuredmodel""" +1005 46 loss """softplus""" +1005 46 regularizer """no""" +1005 46 optimizer """adam""" +1005 46 training_loop """owa""" +1005 46 negative_sampler """basic""" +1005 46 evaluator """rankbased""" +1005 47 dataset """fb15k237""" +1005 47 model """unstructuredmodel""" +1005 47 loss """softplus""" +1005 47 regularizer """no""" +1005 47 optimizer """adam""" +1005 47 training_loop """owa""" +1005 47 negative_sampler """basic""" +1005 47 evaluator """rankbased""" +1005 48 dataset """fb15k237""" +1005 48 model """unstructuredmodel""" +1005 48 loss """softplus""" +1005 48 regularizer """no""" +1005 48 optimizer """adam""" +1005 48 training_loop """owa""" +1005 48 negative_sampler """basic""" +1005 48 evaluator """rankbased""" +1005 49 dataset """fb15k237""" +1005 49 model """unstructuredmodel""" +1005 49 loss """softplus""" +1005 49 regularizer """no""" +1005 49 optimizer """adam""" +1005 49 training_loop """owa""" +1005 49 negative_sampler """basic""" +1005 49 evaluator """rankbased""" +1005 50 dataset """fb15k237""" +1005 50 model """unstructuredmodel""" +1005 50 loss """softplus""" +1005 50 regularizer """no""" +1005 50 optimizer """adam""" +1005 50 training_loop """owa""" +1005 50 negative_sampler """basic""" +1005 50 evaluator """rankbased""" +1005 51 dataset """fb15k237""" +1005 51 model """unstructuredmodel""" +1005 51 loss """softplus""" +1005 51 regularizer """no""" +1005 51 optimizer """adam""" +1005 51 training_loop """owa""" +1005 51 negative_sampler """basic""" +1005 51 evaluator """rankbased""" +1005 52 dataset """fb15k237""" +1005 52 model """unstructuredmodel""" +1005 52 loss """softplus""" +1005 52 regularizer """no""" +1005 52 optimizer """adam""" +1005 52 training_loop """owa""" +1005 52 negative_sampler """basic""" +1005 52 evaluator """rankbased""" +1005 53 dataset """fb15k237""" +1005 53 model """unstructuredmodel""" +1005 53 loss """softplus""" +1005 53 regularizer """no""" +1005 53 optimizer """adam""" +1005 53 training_loop """owa""" +1005 53 negative_sampler """basic""" +1005 53 evaluator """rankbased""" +1005 54 dataset """fb15k237""" +1005 54 model """unstructuredmodel""" +1005 54 loss """softplus""" +1005 54 regularizer """no""" +1005 54 optimizer """adam""" +1005 54 training_loop """owa""" +1005 54 negative_sampler """basic""" +1005 54 evaluator """rankbased""" +1005 55 dataset """fb15k237""" +1005 55 model """unstructuredmodel""" +1005 55 loss """softplus""" +1005 55 regularizer """no""" +1005 55 optimizer """adam""" +1005 55 training_loop """owa""" +1005 55 negative_sampler """basic""" +1005 55 evaluator """rankbased""" +1005 56 dataset """fb15k237""" +1005 56 model """unstructuredmodel""" +1005 56 loss """softplus""" +1005 56 regularizer """no""" +1005 56 optimizer """adam""" +1005 56 training_loop """owa""" +1005 56 negative_sampler """basic""" +1005 56 evaluator """rankbased""" +1005 57 dataset """fb15k237""" +1005 57 model """unstructuredmodel""" +1005 57 loss """softplus""" +1005 57 regularizer """no""" +1005 57 optimizer """adam""" +1005 57 training_loop """owa""" +1005 57 negative_sampler """basic""" +1005 57 evaluator """rankbased""" +1005 58 dataset """fb15k237""" +1005 58 model """unstructuredmodel""" +1005 58 loss """softplus""" +1005 58 regularizer """no""" +1005 58 optimizer """adam""" +1005 58 training_loop """owa""" +1005 58 negative_sampler """basic""" +1005 58 evaluator """rankbased""" +1005 59 dataset """fb15k237""" +1005 59 model """unstructuredmodel""" +1005 59 loss """softplus""" +1005 59 regularizer """no""" +1005 59 optimizer """adam""" +1005 59 training_loop """owa""" +1005 59 negative_sampler """basic""" +1005 59 evaluator """rankbased""" +1005 60 dataset """fb15k237""" +1005 60 model """unstructuredmodel""" +1005 60 loss """softplus""" +1005 60 regularizer """no""" +1005 60 optimizer """adam""" +1005 60 training_loop """owa""" +1005 60 negative_sampler """basic""" +1005 60 evaluator """rankbased""" +1005 61 dataset """fb15k237""" +1005 61 model """unstructuredmodel""" +1005 61 loss """softplus""" +1005 61 regularizer """no""" +1005 61 optimizer """adam""" +1005 61 training_loop """owa""" +1005 61 negative_sampler """basic""" +1005 61 evaluator """rankbased""" +1005 62 dataset """fb15k237""" +1005 62 model """unstructuredmodel""" +1005 62 loss """softplus""" +1005 62 regularizer """no""" +1005 62 optimizer """adam""" +1005 62 training_loop """owa""" +1005 62 negative_sampler """basic""" +1005 62 evaluator """rankbased""" +1005 63 dataset """fb15k237""" +1005 63 model """unstructuredmodel""" +1005 63 loss """softplus""" +1005 63 regularizer """no""" +1005 63 optimizer """adam""" +1005 63 training_loop """owa""" +1005 63 negative_sampler """basic""" +1005 63 evaluator """rankbased""" +1005 64 dataset """fb15k237""" +1005 64 model """unstructuredmodel""" +1005 64 loss """softplus""" +1005 64 regularizer """no""" +1005 64 optimizer """adam""" +1005 64 training_loop """owa""" +1005 64 negative_sampler """basic""" +1005 64 evaluator """rankbased""" +1005 65 dataset """fb15k237""" +1005 65 model """unstructuredmodel""" +1005 65 loss """softplus""" +1005 65 regularizer """no""" +1005 65 optimizer """adam""" +1005 65 training_loop """owa""" +1005 65 negative_sampler """basic""" +1005 65 evaluator """rankbased""" +1005 66 dataset """fb15k237""" +1005 66 model """unstructuredmodel""" +1005 66 loss """softplus""" +1005 66 regularizer """no""" +1005 66 optimizer """adam""" +1005 66 training_loop """owa""" +1005 66 negative_sampler """basic""" +1005 66 evaluator """rankbased""" +1005 67 dataset """fb15k237""" +1005 67 model """unstructuredmodel""" +1005 67 loss """softplus""" +1005 67 regularizer """no""" +1005 67 optimizer """adam""" +1005 67 training_loop """owa""" +1005 67 negative_sampler """basic""" +1005 67 evaluator """rankbased""" +1005 68 dataset """fb15k237""" +1005 68 model """unstructuredmodel""" +1005 68 loss """softplus""" +1005 68 regularizer """no""" +1005 68 optimizer """adam""" +1005 68 training_loop """owa""" +1005 68 negative_sampler """basic""" +1005 68 evaluator """rankbased""" +1005 69 dataset """fb15k237""" +1005 69 model """unstructuredmodel""" +1005 69 loss """softplus""" +1005 69 regularizer """no""" +1005 69 optimizer """adam""" +1005 69 training_loop """owa""" +1005 69 negative_sampler """basic""" +1005 69 evaluator """rankbased""" +1005 70 dataset """fb15k237""" +1005 70 model """unstructuredmodel""" +1005 70 loss """softplus""" +1005 70 regularizer """no""" +1005 70 optimizer """adam""" +1005 70 training_loop """owa""" +1005 70 negative_sampler """basic""" +1005 70 evaluator """rankbased""" +1005 71 dataset """fb15k237""" +1005 71 model """unstructuredmodel""" +1005 71 loss """softplus""" +1005 71 regularizer """no""" +1005 71 optimizer """adam""" +1005 71 training_loop """owa""" +1005 71 negative_sampler """basic""" +1005 71 evaluator """rankbased""" +1005 72 dataset """fb15k237""" +1005 72 model """unstructuredmodel""" +1005 72 loss """softplus""" +1005 72 regularizer """no""" +1005 72 optimizer """adam""" +1005 72 training_loop """owa""" +1005 72 negative_sampler """basic""" +1005 72 evaluator """rankbased""" +1005 73 dataset """fb15k237""" +1005 73 model """unstructuredmodel""" +1005 73 loss """softplus""" +1005 73 regularizer """no""" +1005 73 optimizer """adam""" +1005 73 training_loop """owa""" +1005 73 negative_sampler """basic""" +1005 73 evaluator """rankbased""" +1005 74 dataset """fb15k237""" +1005 74 model """unstructuredmodel""" +1005 74 loss """softplus""" +1005 74 regularizer """no""" +1005 74 optimizer """adam""" +1005 74 training_loop """owa""" +1005 74 negative_sampler """basic""" +1005 74 evaluator """rankbased""" +1005 75 dataset """fb15k237""" +1005 75 model """unstructuredmodel""" +1005 75 loss """softplus""" +1005 75 regularizer """no""" +1005 75 optimizer """adam""" +1005 75 training_loop """owa""" +1005 75 negative_sampler """basic""" +1005 75 evaluator """rankbased""" +1005 76 dataset """fb15k237""" +1005 76 model """unstructuredmodel""" +1005 76 loss """softplus""" +1005 76 regularizer """no""" +1005 76 optimizer """adam""" +1005 76 training_loop """owa""" +1005 76 negative_sampler """basic""" +1005 76 evaluator """rankbased""" +1005 77 dataset """fb15k237""" +1005 77 model """unstructuredmodel""" +1005 77 loss """softplus""" +1005 77 regularizer """no""" +1005 77 optimizer """adam""" +1005 77 training_loop """owa""" +1005 77 negative_sampler """basic""" +1005 77 evaluator """rankbased""" +1005 78 dataset """fb15k237""" +1005 78 model """unstructuredmodel""" +1005 78 loss """softplus""" +1005 78 regularizer """no""" +1005 78 optimizer """adam""" +1005 78 training_loop """owa""" +1005 78 negative_sampler """basic""" +1005 78 evaluator """rankbased""" +1005 79 dataset """fb15k237""" +1005 79 model """unstructuredmodel""" +1005 79 loss """softplus""" +1005 79 regularizer """no""" +1005 79 optimizer """adam""" +1005 79 training_loop """owa""" +1005 79 negative_sampler """basic""" +1005 79 evaluator """rankbased""" +1005 80 dataset """fb15k237""" +1005 80 model """unstructuredmodel""" +1005 80 loss """softplus""" +1005 80 regularizer """no""" +1005 80 optimizer """adam""" +1005 80 training_loop """owa""" +1005 80 negative_sampler """basic""" +1005 80 evaluator """rankbased""" +1005 81 dataset """fb15k237""" +1005 81 model """unstructuredmodel""" +1005 81 loss """softplus""" +1005 81 regularizer """no""" +1005 81 optimizer """adam""" +1005 81 training_loop """owa""" +1005 81 negative_sampler """basic""" +1005 81 evaluator """rankbased""" +1005 82 dataset """fb15k237""" +1005 82 model """unstructuredmodel""" +1005 82 loss """softplus""" +1005 82 regularizer """no""" +1005 82 optimizer """adam""" +1005 82 training_loop """owa""" +1005 82 negative_sampler """basic""" +1005 82 evaluator """rankbased""" +1005 83 dataset """fb15k237""" +1005 83 model """unstructuredmodel""" +1005 83 loss """softplus""" +1005 83 regularizer """no""" +1005 83 optimizer """adam""" +1005 83 training_loop """owa""" +1005 83 negative_sampler """basic""" +1005 83 evaluator """rankbased""" +1005 84 dataset """fb15k237""" +1005 84 model """unstructuredmodel""" +1005 84 loss """softplus""" +1005 84 regularizer """no""" +1005 84 optimizer """adam""" +1005 84 training_loop """owa""" +1005 84 negative_sampler """basic""" +1005 84 evaluator """rankbased""" +1005 85 dataset """fb15k237""" +1005 85 model """unstructuredmodel""" +1005 85 loss """softplus""" +1005 85 regularizer """no""" +1005 85 optimizer """adam""" +1005 85 training_loop """owa""" +1005 85 negative_sampler """basic""" +1005 85 evaluator """rankbased""" +1005 86 dataset """fb15k237""" +1005 86 model """unstructuredmodel""" +1005 86 loss """softplus""" +1005 86 regularizer """no""" +1005 86 optimizer """adam""" +1005 86 training_loop """owa""" +1005 86 negative_sampler """basic""" +1005 86 evaluator """rankbased""" +1005 87 dataset """fb15k237""" +1005 87 model """unstructuredmodel""" +1005 87 loss """softplus""" +1005 87 regularizer """no""" +1005 87 optimizer """adam""" +1005 87 training_loop """owa""" +1005 87 negative_sampler """basic""" +1005 87 evaluator """rankbased""" +1005 88 dataset """fb15k237""" +1005 88 model """unstructuredmodel""" +1005 88 loss """softplus""" +1005 88 regularizer """no""" +1005 88 optimizer """adam""" +1005 88 training_loop """owa""" +1005 88 negative_sampler """basic""" +1005 88 evaluator """rankbased""" +1005 89 dataset """fb15k237""" +1005 89 model """unstructuredmodel""" +1005 89 loss """softplus""" +1005 89 regularizer """no""" +1005 89 optimizer """adam""" +1005 89 training_loop """owa""" +1005 89 negative_sampler """basic""" +1005 89 evaluator """rankbased""" +1005 90 dataset """fb15k237""" +1005 90 model """unstructuredmodel""" +1005 90 loss """softplus""" +1005 90 regularizer """no""" +1005 90 optimizer """adam""" +1005 90 training_loop """owa""" +1005 90 negative_sampler """basic""" +1005 90 evaluator """rankbased""" +1005 91 dataset """fb15k237""" +1005 91 model """unstructuredmodel""" +1005 91 loss """softplus""" +1005 91 regularizer """no""" +1005 91 optimizer """adam""" +1005 91 training_loop """owa""" +1005 91 negative_sampler """basic""" +1005 91 evaluator """rankbased""" +1005 92 dataset """fb15k237""" +1005 92 model """unstructuredmodel""" +1005 92 loss """softplus""" +1005 92 regularizer """no""" +1005 92 optimizer """adam""" +1005 92 training_loop """owa""" +1005 92 negative_sampler """basic""" +1005 92 evaluator """rankbased""" +1005 93 dataset """fb15k237""" +1005 93 model """unstructuredmodel""" +1005 93 loss """softplus""" +1005 93 regularizer """no""" +1005 93 optimizer """adam""" +1005 93 training_loop """owa""" +1005 93 negative_sampler """basic""" +1005 93 evaluator """rankbased""" +1005 94 dataset """fb15k237""" +1005 94 model """unstructuredmodel""" +1005 94 loss """softplus""" +1005 94 regularizer """no""" +1005 94 optimizer """adam""" +1005 94 training_loop """owa""" +1005 94 negative_sampler """basic""" +1005 94 evaluator """rankbased""" +1005 95 dataset """fb15k237""" +1005 95 model """unstructuredmodel""" +1005 95 loss """softplus""" +1005 95 regularizer """no""" +1005 95 optimizer """adam""" +1005 95 training_loop """owa""" +1005 95 negative_sampler """basic""" +1005 95 evaluator """rankbased""" +1005 96 dataset """fb15k237""" +1005 96 model """unstructuredmodel""" +1005 96 loss """softplus""" +1005 96 regularizer """no""" +1005 96 optimizer """adam""" +1005 96 training_loop """owa""" +1005 96 negative_sampler """basic""" +1005 96 evaluator """rankbased""" +1005 97 dataset """fb15k237""" +1005 97 model """unstructuredmodel""" +1005 97 loss """softplus""" +1005 97 regularizer """no""" +1005 97 optimizer """adam""" +1005 97 training_loop """owa""" +1005 97 negative_sampler """basic""" +1005 97 evaluator """rankbased""" +1005 98 dataset """fb15k237""" +1005 98 model """unstructuredmodel""" +1005 98 loss """softplus""" +1005 98 regularizer """no""" +1005 98 optimizer """adam""" +1005 98 training_loop """owa""" +1005 98 negative_sampler """basic""" +1005 98 evaluator """rankbased""" +1005 99 dataset """fb15k237""" +1005 99 model """unstructuredmodel""" +1005 99 loss """softplus""" +1005 99 regularizer """no""" +1005 99 optimizer """adam""" +1005 99 training_loop """owa""" +1005 99 negative_sampler """basic""" +1005 99 evaluator """rankbased""" +1005 100 dataset """fb15k237""" +1005 100 model """unstructuredmodel""" +1005 100 loss """softplus""" +1005 100 regularizer """no""" +1005 100 optimizer """adam""" +1005 100 training_loop """owa""" +1005 100 negative_sampler """basic""" +1005 100 evaluator """rankbased""" +1006 1 model.embedding_dim 1.0 +1006 1 model.scoring_fct_norm 1.0 +1006 1 training.batch_size 0.0 +1006 1 training.label_smoothing 0.012605250789456592 +1006 2 model.embedding_dim 0.0 +1006 2 model.scoring_fct_norm 1.0 +1006 2 training.batch_size 2.0 +1006 2 training.label_smoothing 0.03847232382220154 +1006 3 model.embedding_dim 2.0 +1006 3 model.scoring_fct_norm 1.0 +1006 3 training.batch_size 1.0 +1006 3 training.label_smoothing 0.7457230313993845 +1006 4 model.embedding_dim 1.0 +1006 4 model.scoring_fct_norm 2.0 +1006 4 training.batch_size 1.0 +1006 4 training.label_smoothing 0.0021818626623867333 +1006 5 model.embedding_dim 2.0 +1006 5 model.scoring_fct_norm 1.0 +1006 5 training.batch_size 2.0 +1006 5 training.label_smoothing 0.5541873868827704 +1006 6 model.embedding_dim 2.0 +1006 6 model.scoring_fct_norm 2.0 +1006 6 training.batch_size 2.0 +1006 6 training.label_smoothing 0.001262317448648176 +1006 7 model.embedding_dim 1.0 +1006 7 model.scoring_fct_norm 2.0 +1006 7 training.batch_size 1.0 +1006 7 training.label_smoothing 0.013696167073310353 +1006 8 model.embedding_dim 1.0 +1006 8 model.scoring_fct_norm 2.0 +1006 8 training.batch_size 2.0 +1006 8 training.label_smoothing 0.002149731838205727 +1006 9 model.embedding_dim 2.0 +1006 9 model.scoring_fct_norm 2.0 +1006 9 training.batch_size 1.0 +1006 9 training.label_smoothing 0.05732864240875983 +1006 10 model.embedding_dim 2.0 +1006 10 model.scoring_fct_norm 2.0 +1006 10 training.batch_size 2.0 +1006 10 training.label_smoothing 0.2571344310494094 +1006 11 model.embedding_dim 1.0 +1006 11 model.scoring_fct_norm 2.0 +1006 11 training.batch_size 2.0 +1006 11 training.label_smoothing 0.02437377766986229 +1006 12 model.embedding_dim 1.0 +1006 12 model.scoring_fct_norm 1.0 +1006 12 training.batch_size 1.0 +1006 12 training.label_smoothing 0.03649487881553969 +1006 13 model.embedding_dim 2.0 +1006 13 model.scoring_fct_norm 1.0 +1006 13 training.batch_size 1.0 +1006 13 training.label_smoothing 0.704410459314649 +1006 14 model.embedding_dim 2.0 +1006 14 model.scoring_fct_norm 1.0 +1006 14 training.batch_size 1.0 +1006 14 training.label_smoothing 0.005519256204649218 +1006 15 model.embedding_dim 2.0 +1006 15 model.scoring_fct_norm 1.0 +1006 15 training.batch_size 0.0 +1006 15 training.label_smoothing 0.00867171146621864 +1006 16 model.embedding_dim 1.0 +1006 16 model.scoring_fct_norm 1.0 +1006 16 training.batch_size 0.0 +1006 16 training.label_smoothing 0.01629899775412296 +1006 17 model.embedding_dim 1.0 +1006 17 model.scoring_fct_norm 2.0 +1006 17 training.batch_size 0.0 +1006 17 training.label_smoothing 0.0015141612629311898 +1006 18 model.embedding_dim 0.0 +1006 18 model.scoring_fct_norm 2.0 +1006 18 training.batch_size 2.0 +1006 18 training.label_smoothing 0.04877093164536244 +1006 19 model.embedding_dim 1.0 +1006 19 model.scoring_fct_norm 2.0 +1006 19 training.batch_size 1.0 +1006 19 training.label_smoothing 0.0014334718540760228 +1006 20 model.embedding_dim 2.0 +1006 20 model.scoring_fct_norm 2.0 +1006 20 training.batch_size 1.0 +1006 20 training.label_smoothing 0.0022852809825052354 +1006 21 model.embedding_dim 2.0 +1006 21 model.scoring_fct_norm 2.0 +1006 21 training.batch_size 0.0 +1006 21 training.label_smoothing 0.005745539232394021 +1006 22 model.embedding_dim 2.0 +1006 22 model.scoring_fct_norm 2.0 +1006 22 training.batch_size 1.0 +1006 22 training.label_smoothing 0.023218302651383384 +1006 23 model.embedding_dim 1.0 +1006 23 model.scoring_fct_norm 2.0 +1006 23 training.batch_size 0.0 +1006 23 training.label_smoothing 0.05775929306136312 +1006 24 model.embedding_dim 1.0 +1006 24 model.scoring_fct_norm 2.0 +1006 24 training.batch_size 2.0 +1006 24 training.label_smoothing 0.20210779079138483 +1006 25 model.embedding_dim 1.0 +1006 25 model.scoring_fct_norm 1.0 +1006 25 training.batch_size 2.0 +1006 25 training.label_smoothing 0.1044301393916001 +1006 26 model.embedding_dim 0.0 +1006 26 model.scoring_fct_norm 1.0 +1006 26 training.batch_size 0.0 +1006 26 training.label_smoothing 0.04080632915892372 +1006 27 model.embedding_dim 2.0 +1006 27 model.scoring_fct_norm 2.0 +1006 27 training.batch_size 2.0 +1006 27 training.label_smoothing 0.07305082790962586 +1006 28 model.embedding_dim 0.0 +1006 28 model.scoring_fct_norm 2.0 +1006 28 training.batch_size 1.0 +1006 28 training.label_smoothing 0.07720933260436616 +1006 29 model.embedding_dim 1.0 +1006 29 model.scoring_fct_norm 2.0 +1006 29 training.batch_size 2.0 +1006 29 training.label_smoothing 0.05182254372846499 +1006 30 model.embedding_dim 0.0 +1006 30 model.scoring_fct_norm 2.0 +1006 30 training.batch_size 0.0 +1006 30 training.label_smoothing 0.010334616800501165 +1006 31 model.embedding_dim 0.0 +1006 31 model.scoring_fct_norm 2.0 +1006 31 training.batch_size 1.0 +1006 31 training.label_smoothing 0.003059235969268075 +1006 32 model.embedding_dim 2.0 +1006 32 model.scoring_fct_norm 1.0 +1006 32 training.batch_size 0.0 +1006 32 training.label_smoothing 0.4445044566808884 +1006 33 model.embedding_dim 2.0 +1006 33 model.scoring_fct_norm 1.0 +1006 33 training.batch_size 0.0 +1006 33 training.label_smoothing 0.0051315898654385435 +1006 34 model.embedding_dim 0.0 +1006 34 model.scoring_fct_norm 1.0 +1006 34 training.batch_size 1.0 +1006 34 training.label_smoothing 0.3940835928667975 +1006 35 model.embedding_dim 1.0 +1006 35 model.scoring_fct_norm 1.0 +1006 35 training.batch_size 0.0 +1006 35 training.label_smoothing 0.5406869466075417 +1006 36 model.embedding_dim 2.0 +1006 36 model.scoring_fct_norm 2.0 +1006 36 training.batch_size 2.0 +1006 36 training.label_smoothing 0.020865093794767225 +1006 37 model.embedding_dim 1.0 +1006 37 model.scoring_fct_norm 1.0 +1006 37 training.batch_size 0.0 +1006 37 training.label_smoothing 0.0018825107697724155 +1006 38 model.embedding_dim 2.0 +1006 38 model.scoring_fct_norm 1.0 +1006 38 training.batch_size 1.0 +1006 38 training.label_smoothing 0.002593888529607267 +1006 39 model.embedding_dim 1.0 +1006 39 model.scoring_fct_norm 2.0 +1006 39 training.batch_size 2.0 +1006 39 training.label_smoothing 0.29353204136386457 +1006 40 model.embedding_dim 0.0 +1006 40 model.scoring_fct_norm 2.0 +1006 40 training.batch_size 2.0 +1006 40 training.label_smoothing 0.18182276335439965 +1006 41 model.embedding_dim 2.0 +1006 41 model.scoring_fct_norm 2.0 +1006 41 training.batch_size 0.0 +1006 41 training.label_smoothing 0.23312366613573388 +1006 42 model.embedding_dim 0.0 +1006 42 model.scoring_fct_norm 2.0 +1006 42 training.batch_size 1.0 +1006 42 training.label_smoothing 0.07567327380145147 +1006 43 model.embedding_dim 2.0 +1006 43 model.scoring_fct_norm 1.0 +1006 43 training.batch_size 0.0 +1006 43 training.label_smoothing 0.08826736415944417 +1006 44 model.embedding_dim 2.0 +1006 44 model.scoring_fct_norm 1.0 +1006 44 training.batch_size 1.0 +1006 44 training.label_smoothing 0.033551982496467264 +1006 45 model.embedding_dim 2.0 +1006 45 model.scoring_fct_norm 1.0 +1006 45 training.batch_size 0.0 +1006 45 training.label_smoothing 0.030405016053575228 +1006 46 model.embedding_dim 1.0 +1006 46 model.scoring_fct_norm 1.0 +1006 46 training.batch_size 0.0 +1006 46 training.label_smoothing 0.10969622078308718 +1006 47 model.embedding_dim 1.0 +1006 47 model.scoring_fct_norm 2.0 +1006 47 training.batch_size 2.0 +1006 47 training.label_smoothing 0.10041958336672697 +1006 48 model.embedding_dim 2.0 +1006 48 model.scoring_fct_norm 1.0 +1006 48 training.batch_size 2.0 +1006 48 training.label_smoothing 0.02659192521325166 +1006 49 model.embedding_dim 2.0 +1006 49 model.scoring_fct_norm 2.0 +1006 49 training.batch_size 0.0 +1006 49 training.label_smoothing 0.009569832139770954 +1006 50 model.embedding_dim 0.0 +1006 50 model.scoring_fct_norm 1.0 +1006 50 training.batch_size 0.0 +1006 50 training.label_smoothing 0.44911452017973147 +1006 51 model.embedding_dim 2.0 +1006 51 model.scoring_fct_norm 1.0 +1006 51 training.batch_size 0.0 +1006 51 training.label_smoothing 0.03179857831675973 +1006 52 model.embedding_dim 1.0 +1006 52 model.scoring_fct_norm 1.0 +1006 52 training.batch_size 1.0 +1006 52 training.label_smoothing 0.0010392125096832172 +1006 53 model.embedding_dim 2.0 +1006 53 model.scoring_fct_norm 2.0 +1006 53 training.batch_size 0.0 +1006 53 training.label_smoothing 0.010622055724670029 +1006 54 model.embedding_dim 0.0 +1006 54 model.scoring_fct_norm 2.0 +1006 54 training.batch_size 0.0 +1006 54 training.label_smoothing 0.0021478417032537956 +1006 55 model.embedding_dim 2.0 +1006 55 model.scoring_fct_norm 1.0 +1006 55 training.batch_size 2.0 +1006 55 training.label_smoothing 0.002601592612557256 +1006 56 model.embedding_dim 0.0 +1006 56 model.scoring_fct_norm 1.0 +1006 56 training.batch_size 2.0 +1006 56 training.label_smoothing 0.4132547078016809 +1006 57 model.embedding_dim 2.0 +1006 57 model.scoring_fct_norm 1.0 +1006 57 training.batch_size 0.0 +1006 57 training.label_smoothing 0.008412227346261989 +1006 58 model.embedding_dim 0.0 +1006 58 model.scoring_fct_norm 1.0 +1006 58 training.batch_size 2.0 +1006 58 training.label_smoothing 0.07549081219730382 +1006 59 model.embedding_dim 2.0 +1006 59 model.scoring_fct_norm 2.0 +1006 59 training.batch_size 0.0 +1006 59 training.label_smoothing 0.021745081132567866 +1006 60 model.embedding_dim 1.0 +1006 60 model.scoring_fct_norm 1.0 +1006 60 training.batch_size 0.0 +1006 60 training.label_smoothing 0.020828498511879567 +1006 61 model.embedding_dim 1.0 +1006 61 model.scoring_fct_norm 2.0 +1006 61 training.batch_size 1.0 +1006 61 training.label_smoothing 0.005256938124411319 +1006 62 model.embedding_dim 2.0 +1006 62 model.scoring_fct_norm 1.0 +1006 62 training.batch_size 1.0 +1006 62 training.label_smoothing 0.033876975389343235 +1006 63 model.embedding_dim 2.0 +1006 63 model.scoring_fct_norm 2.0 +1006 63 training.batch_size 1.0 +1006 63 training.label_smoothing 0.38061788049228457 +1006 64 model.embedding_dim 1.0 +1006 64 model.scoring_fct_norm 2.0 +1006 64 training.batch_size 0.0 +1006 64 training.label_smoothing 0.008395722646479517 +1006 65 model.embedding_dim 0.0 +1006 65 model.scoring_fct_norm 2.0 +1006 65 training.batch_size 0.0 +1006 65 training.label_smoothing 0.49578828051782636 +1006 66 model.embedding_dim 0.0 +1006 66 model.scoring_fct_norm 1.0 +1006 66 training.batch_size 1.0 +1006 66 training.label_smoothing 0.012700028276308825 +1006 67 model.embedding_dim 0.0 +1006 67 model.scoring_fct_norm 1.0 +1006 67 training.batch_size 2.0 +1006 67 training.label_smoothing 0.01787883724623212 +1006 68 model.embedding_dim 0.0 +1006 68 model.scoring_fct_norm 2.0 +1006 68 training.batch_size 2.0 +1006 68 training.label_smoothing 0.409563406332308 +1006 69 model.embedding_dim 1.0 +1006 69 model.scoring_fct_norm 2.0 +1006 69 training.batch_size 1.0 +1006 69 training.label_smoothing 0.26514880319716133 +1006 70 model.embedding_dim 0.0 +1006 70 model.scoring_fct_norm 1.0 +1006 70 training.batch_size 2.0 +1006 70 training.label_smoothing 0.37780790858184615 +1006 71 model.embedding_dim 1.0 +1006 71 model.scoring_fct_norm 2.0 +1006 71 training.batch_size 0.0 +1006 71 training.label_smoothing 0.0017279274808953565 +1006 72 model.embedding_dim 0.0 +1006 72 model.scoring_fct_norm 2.0 +1006 72 training.batch_size 1.0 +1006 72 training.label_smoothing 0.32749163860939096 +1006 73 model.embedding_dim 2.0 +1006 73 model.scoring_fct_norm 2.0 +1006 73 training.batch_size 2.0 +1006 73 training.label_smoothing 0.02608937244615943 +1006 74 model.embedding_dim 2.0 +1006 74 model.scoring_fct_norm 2.0 +1006 74 training.batch_size 1.0 +1006 74 training.label_smoothing 0.22935344704814745 +1006 75 model.embedding_dim 0.0 +1006 75 model.scoring_fct_norm 2.0 +1006 75 training.batch_size 1.0 +1006 75 training.label_smoothing 0.13313336953687685 +1006 76 model.embedding_dim 0.0 +1006 76 model.scoring_fct_norm 1.0 +1006 76 training.batch_size 1.0 +1006 76 training.label_smoothing 0.01615793112238681 +1006 77 model.embedding_dim 1.0 +1006 77 model.scoring_fct_norm 1.0 +1006 77 training.batch_size 0.0 +1006 77 training.label_smoothing 0.01004478591576039 +1006 78 model.embedding_dim 0.0 +1006 78 model.scoring_fct_norm 1.0 +1006 78 training.batch_size 1.0 +1006 78 training.label_smoothing 0.41962641001668277 +1006 79 model.embedding_dim 2.0 +1006 79 model.scoring_fct_norm 2.0 +1006 79 training.batch_size 1.0 +1006 79 training.label_smoothing 0.36579109302776014 +1006 80 model.embedding_dim 2.0 +1006 80 model.scoring_fct_norm 2.0 +1006 80 training.batch_size 0.0 +1006 80 training.label_smoothing 0.04483473670497006 +1006 81 model.embedding_dim 2.0 +1006 81 model.scoring_fct_norm 2.0 +1006 81 training.batch_size 2.0 +1006 81 training.label_smoothing 0.09194240487772697 +1006 82 model.embedding_dim 0.0 +1006 82 model.scoring_fct_norm 1.0 +1006 82 training.batch_size 1.0 +1006 82 training.label_smoothing 0.08474055572242524 +1006 83 model.embedding_dim 0.0 +1006 83 model.scoring_fct_norm 1.0 +1006 83 training.batch_size 0.0 +1006 83 training.label_smoothing 0.8862065300211014 +1006 84 model.embedding_dim 2.0 +1006 84 model.scoring_fct_norm 1.0 +1006 84 training.batch_size 0.0 +1006 84 training.label_smoothing 0.2424546919575106 +1006 85 model.embedding_dim 2.0 +1006 85 model.scoring_fct_norm 1.0 +1006 85 training.batch_size 0.0 +1006 85 training.label_smoothing 0.9540816584907228 +1006 86 model.embedding_dim 0.0 +1006 86 model.scoring_fct_norm 1.0 +1006 86 training.batch_size 1.0 +1006 86 training.label_smoothing 0.09561874867387105 +1006 87 model.embedding_dim 0.0 +1006 87 model.scoring_fct_norm 2.0 +1006 87 training.batch_size 0.0 +1006 87 training.label_smoothing 0.05916057212577291 +1006 88 model.embedding_dim 0.0 +1006 88 model.scoring_fct_norm 1.0 +1006 88 training.batch_size 2.0 +1006 88 training.label_smoothing 0.2617285975750814 +1006 89 model.embedding_dim 2.0 +1006 89 model.scoring_fct_norm 2.0 +1006 89 training.batch_size 1.0 +1006 89 training.label_smoothing 0.029013758388706762 +1006 90 model.embedding_dim 1.0 +1006 90 model.scoring_fct_norm 1.0 +1006 90 training.batch_size 2.0 +1006 90 training.label_smoothing 0.0026597805379225286 +1006 91 model.embedding_dim 1.0 +1006 91 model.scoring_fct_norm 1.0 +1006 91 training.batch_size 2.0 +1006 91 training.label_smoothing 0.016335932594986276 +1006 92 model.embedding_dim 0.0 +1006 92 model.scoring_fct_norm 1.0 +1006 92 training.batch_size 2.0 +1006 92 training.label_smoothing 0.003900128177521359 +1006 93 model.embedding_dim 0.0 +1006 93 model.scoring_fct_norm 1.0 +1006 93 training.batch_size 0.0 +1006 93 training.label_smoothing 0.006049547489004875 +1006 94 model.embedding_dim 0.0 +1006 94 model.scoring_fct_norm 2.0 +1006 94 training.batch_size 1.0 +1006 94 training.label_smoothing 0.004741956866156056 +1006 95 model.embedding_dim 2.0 +1006 95 model.scoring_fct_norm 1.0 +1006 95 training.batch_size 2.0 +1006 95 training.label_smoothing 0.10970277646385472 +1006 96 model.embedding_dim 0.0 +1006 96 model.scoring_fct_norm 2.0 +1006 96 training.batch_size 2.0 +1006 96 training.label_smoothing 0.10569627483325732 +1006 97 model.embedding_dim 1.0 +1006 97 model.scoring_fct_norm 2.0 +1006 97 training.batch_size 2.0 +1006 97 training.label_smoothing 0.7806796228375165 +1006 98 model.embedding_dim 1.0 +1006 98 model.scoring_fct_norm 1.0 +1006 98 training.batch_size 1.0 +1006 98 training.label_smoothing 0.5529710653755979 +1006 99 model.embedding_dim 1.0 +1006 99 model.scoring_fct_norm 1.0 +1006 99 training.batch_size 2.0 +1006 99 training.label_smoothing 0.0014050689163170838 +1006 100 model.embedding_dim 1.0 +1006 100 model.scoring_fct_norm 2.0 +1006 100 training.batch_size 0.0 +1006 100 training.label_smoothing 0.007209637844033664 +1006 1 dataset """kinships""" +1006 1 model """unstructuredmodel""" +1006 1 loss """bceaftersigmoid""" +1006 1 regularizer """no""" +1006 1 optimizer """adadelta""" +1006 1 training_loop """lcwa""" +1006 1 evaluator """rankbased""" +1006 2 dataset """kinships""" +1006 2 model """unstructuredmodel""" +1006 2 loss """bceaftersigmoid""" +1006 2 regularizer """no""" +1006 2 optimizer """adadelta""" +1006 2 training_loop """lcwa""" +1006 2 evaluator """rankbased""" +1006 3 dataset """kinships""" +1006 3 model """unstructuredmodel""" +1006 3 loss """bceaftersigmoid""" +1006 3 regularizer """no""" +1006 3 optimizer """adadelta""" +1006 3 training_loop """lcwa""" +1006 3 evaluator """rankbased""" +1006 4 dataset """kinships""" +1006 4 model """unstructuredmodel""" +1006 4 loss """bceaftersigmoid""" +1006 4 regularizer """no""" +1006 4 optimizer """adadelta""" +1006 4 training_loop """lcwa""" +1006 4 evaluator """rankbased""" +1006 5 dataset """kinships""" +1006 5 model """unstructuredmodel""" +1006 5 loss """bceaftersigmoid""" +1006 5 regularizer """no""" +1006 5 optimizer """adadelta""" +1006 5 training_loop """lcwa""" +1006 5 evaluator """rankbased""" +1006 6 dataset """kinships""" +1006 6 model """unstructuredmodel""" +1006 6 loss """bceaftersigmoid""" +1006 6 regularizer """no""" +1006 6 optimizer """adadelta""" +1006 6 training_loop """lcwa""" +1006 6 evaluator """rankbased""" +1006 7 dataset """kinships""" +1006 7 model """unstructuredmodel""" +1006 7 loss """bceaftersigmoid""" +1006 7 regularizer """no""" +1006 7 optimizer """adadelta""" +1006 7 training_loop """lcwa""" +1006 7 evaluator """rankbased""" +1006 8 dataset """kinships""" +1006 8 model """unstructuredmodel""" +1006 8 loss """bceaftersigmoid""" +1006 8 regularizer """no""" +1006 8 optimizer """adadelta""" +1006 8 training_loop """lcwa""" +1006 8 evaluator """rankbased""" +1006 9 dataset """kinships""" +1006 9 model """unstructuredmodel""" +1006 9 loss """bceaftersigmoid""" +1006 9 regularizer """no""" +1006 9 optimizer """adadelta""" +1006 9 training_loop """lcwa""" +1006 9 evaluator """rankbased""" +1006 10 dataset """kinships""" +1006 10 model """unstructuredmodel""" +1006 10 loss """bceaftersigmoid""" +1006 10 regularizer """no""" +1006 10 optimizer """adadelta""" +1006 10 training_loop """lcwa""" +1006 10 evaluator """rankbased""" +1006 11 dataset """kinships""" +1006 11 model """unstructuredmodel""" +1006 11 loss """bceaftersigmoid""" +1006 11 regularizer """no""" +1006 11 optimizer """adadelta""" +1006 11 training_loop """lcwa""" +1006 11 evaluator """rankbased""" +1006 12 dataset """kinships""" +1006 12 model """unstructuredmodel""" +1006 12 loss """bceaftersigmoid""" +1006 12 regularizer """no""" +1006 12 optimizer """adadelta""" +1006 12 training_loop """lcwa""" +1006 12 evaluator """rankbased""" +1006 13 dataset """kinships""" +1006 13 model """unstructuredmodel""" +1006 13 loss """bceaftersigmoid""" +1006 13 regularizer """no""" +1006 13 optimizer """adadelta""" +1006 13 training_loop """lcwa""" +1006 13 evaluator """rankbased""" +1006 14 dataset """kinships""" +1006 14 model """unstructuredmodel""" +1006 14 loss """bceaftersigmoid""" +1006 14 regularizer """no""" +1006 14 optimizer """adadelta""" +1006 14 training_loop """lcwa""" +1006 14 evaluator """rankbased""" +1006 15 dataset """kinships""" +1006 15 model """unstructuredmodel""" +1006 15 loss """bceaftersigmoid""" +1006 15 regularizer """no""" +1006 15 optimizer """adadelta""" +1006 15 training_loop """lcwa""" +1006 15 evaluator """rankbased""" +1006 16 dataset """kinships""" +1006 16 model """unstructuredmodel""" +1006 16 loss """bceaftersigmoid""" +1006 16 regularizer """no""" +1006 16 optimizer """adadelta""" +1006 16 training_loop """lcwa""" +1006 16 evaluator """rankbased""" +1006 17 dataset """kinships""" +1006 17 model """unstructuredmodel""" +1006 17 loss """bceaftersigmoid""" +1006 17 regularizer """no""" +1006 17 optimizer """adadelta""" +1006 17 training_loop """lcwa""" +1006 17 evaluator """rankbased""" +1006 18 dataset """kinships""" +1006 18 model """unstructuredmodel""" +1006 18 loss """bceaftersigmoid""" +1006 18 regularizer """no""" +1006 18 optimizer """adadelta""" +1006 18 training_loop """lcwa""" +1006 18 evaluator """rankbased""" +1006 19 dataset """kinships""" +1006 19 model """unstructuredmodel""" +1006 19 loss """bceaftersigmoid""" +1006 19 regularizer """no""" +1006 19 optimizer """adadelta""" +1006 19 training_loop """lcwa""" +1006 19 evaluator """rankbased""" +1006 20 dataset """kinships""" +1006 20 model """unstructuredmodel""" +1006 20 loss """bceaftersigmoid""" +1006 20 regularizer """no""" +1006 20 optimizer """adadelta""" +1006 20 training_loop """lcwa""" +1006 20 evaluator """rankbased""" +1006 21 dataset """kinships""" +1006 21 model """unstructuredmodel""" +1006 21 loss """bceaftersigmoid""" +1006 21 regularizer """no""" +1006 21 optimizer """adadelta""" +1006 21 training_loop """lcwa""" +1006 21 evaluator """rankbased""" +1006 22 dataset """kinships""" +1006 22 model """unstructuredmodel""" +1006 22 loss """bceaftersigmoid""" +1006 22 regularizer """no""" +1006 22 optimizer """adadelta""" +1006 22 training_loop """lcwa""" +1006 22 evaluator """rankbased""" +1006 23 dataset """kinships""" +1006 23 model """unstructuredmodel""" +1006 23 loss """bceaftersigmoid""" +1006 23 regularizer """no""" +1006 23 optimizer """adadelta""" +1006 23 training_loop """lcwa""" +1006 23 evaluator """rankbased""" +1006 24 dataset """kinships""" +1006 24 model """unstructuredmodel""" +1006 24 loss """bceaftersigmoid""" +1006 24 regularizer """no""" +1006 24 optimizer """adadelta""" +1006 24 training_loop """lcwa""" +1006 24 evaluator """rankbased""" +1006 25 dataset """kinships""" +1006 25 model """unstructuredmodel""" +1006 25 loss """bceaftersigmoid""" +1006 25 regularizer """no""" +1006 25 optimizer """adadelta""" +1006 25 training_loop """lcwa""" +1006 25 evaluator """rankbased""" +1006 26 dataset """kinships""" +1006 26 model """unstructuredmodel""" +1006 26 loss """bceaftersigmoid""" +1006 26 regularizer """no""" +1006 26 optimizer """adadelta""" +1006 26 training_loop """lcwa""" +1006 26 evaluator """rankbased""" +1006 27 dataset """kinships""" +1006 27 model """unstructuredmodel""" +1006 27 loss """bceaftersigmoid""" +1006 27 regularizer """no""" +1006 27 optimizer """adadelta""" +1006 27 training_loop """lcwa""" +1006 27 evaluator """rankbased""" +1006 28 dataset """kinships""" +1006 28 model """unstructuredmodel""" +1006 28 loss """bceaftersigmoid""" +1006 28 regularizer """no""" +1006 28 optimizer """adadelta""" +1006 28 training_loop """lcwa""" +1006 28 evaluator """rankbased""" +1006 29 dataset """kinships""" +1006 29 model """unstructuredmodel""" +1006 29 loss """bceaftersigmoid""" +1006 29 regularizer """no""" +1006 29 optimizer """adadelta""" +1006 29 training_loop """lcwa""" +1006 29 evaluator """rankbased""" +1006 30 dataset """kinships""" +1006 30 model """unstructuredmodel""" +1006 30 loss """bceaftersigmoid""" +1006 30 regularizer """no""" +1006 30 optimizer """adadelta""" +1006 30 training_loop """lcwa""" +1006 30 evaluator """rankbased""" +1006 31 dataset """kinships""" +1006 31 model """unstructuredmodel""" +1006 31 loss """bceaftersigmoid""" +1006 31 regularizer """no""" +1006 31 optimizer """adadelta""" +1006 31 training_loop """lcwa""" +1006 31 evaluator """rankbased""" +1006 32 dataset """kinships""" +1006 32 model """unstructuredmodel""" +1006 32 loss """bceaftersigmoid""" +1006 32 regularizer """no""" +1006 32 optimizer """adadelta""" +1006 32 training_loop """lcwa""" +1006 32 evaluator """rankbased""" +1006 33 dataset """kinships""" +1006 33 model """unstructuredmodel""" +1006 33 loss """bceaftersigmoid""" +1006 33 regularizer """no""" +1006 33 optimizer """adadelta""" +1006 33 training_loop """lcwa""" +1006 33 evaluator """rankbased""" +1006 34 dataset """kinships""" +1006 34 model """unstructuredmodel""" +1006 34 loss """bceaftersigmoid""" +1006 34 regularizer """no""" +1006 34 optimizer """adadelta""" +1006 34 training_loop """lcwa""" +1006 34 evaluator """rankbased""" +1006 35 dataset """kinships""" +1006 35 model """unstructuredmodel""" +1006 35 loss """bceaftersigmoid""" +1006 35 regularizer """no""" +1006 35 optimizer """adadelta""" +1006 35 training_loop """lcwa""" +1006 35 evaluator """rankbased""" +1006 36 dataset """kinships""" +1006 36 model """unstructuredmodel""" +1006 36 loss """bceaftersigmoid""" +1006 36 regularizer """no""" +1006 36 optimizer """adadelta""" +1006 36 training_loop """lcwa""" +1006 36 evaluator """rankbased""" +1006 37 dataset """kinships""" +1006 37 model """unstructuredmodel""" +1006 37 loss """bceaftersigmoid""" +1006 37 regularizer """no""" +1006 37 optimizer """adadelta""" +1006 37 training_loop """lcwa""" +1006 37 evaluator """rankbased""" +1006 38 dataset """kinships""" +1006 38 model """unstructuredmodel""" +1006 38 loss """bceaftersigmoid""" +1006 38 regularizer """no""" +1006 38 optimizer """adadelta""" +1006 38 training_loop """lcwa""" +1006 38 evaluator """rankbased""" +1006 39 dataset """kinships""" +1006 39 model """unstructuredmodel""" +1006 39 loss """bceaftersigmoid""" +1006 39 regularizer """no""" +1006 39 optimizer """adadelta""" +1006 39 training_loop """lcwa""" +1006 39 evaluator """rankbased""" +1006 40 dataset """kinships""" +1006 40 model """unstructuredmodel""" +1006 40 loss """bceaftersigmoid""" +1006 40 regularizer """no""" +1006 40 optimizer """adadelta""" +1006 40 training_loop """lcwa""" +1006 40 evaluator """rankbased""" +1006 41 dataset """kinships""" +1006 41 model """unstructuredmodel""" +1006 41 loss """bceaftersigmoid""" +1006 41 regularizer """no""" +1006 41 optimizer """adadelta""" +1006 41 training_loop """lcwa""" +1006 41 evaluator """rankbased""" +1006 42 dataset """kinships""" +1006 42 model """unstructuredmodel""" +1006 42 loss """bceaftersigmoid""" +1006 42 regularizer """no""" +1006 42 optimizer """adadelta""" +1006 42 training_loop """lcwa""" +1006 42 evaluator """rankbased""" +1006 43 dataset """kinships""" +1006 43 model """unstructuredmodel""" +1006 43 loss """bceaftersigmoid""" +1006 43 regularizer """no""" +1006 43 optimizer """adadelta""" +1006 43 training_loop """lcwa""" +1006 43 evaluator """rankbased""" +1006 44 dataset """kinships""" +1006 44 model """unstructuredmodel""" +1006 44 loss """bceaftersigmoid""" +1006 44 regularizer """no""" +1006 44 optimizer """adadelta""" +1006 44 training_loop """lcwa""" +1006 44 evaluator """rankbased""" +1006 45 dataset """kinships""" +1006 45 model """unstructuredmodel""" +1006 45 loss """bceaftersigmoid""" +1006 45 regularizer """no""" +1006 45 optimizer """adadelta""" +1006 45 training_loop """lcwa""" +1006 45 evaluator """rankbased""" +1006 46 dataset """kinships""" +1006 46 model """unstructuredmodel""" +1006 46 loss """bceaftersigmoid""" +1006 46 regularizer """no""" +1006 46 optimizer """adadelta""" +1006 46 training_loop """lcwa""" +1006 46 evaluator """rankbased""" +1006 47 dataset """kinships""" +1006 47 model """unstructuredmodel""" +1006 47 loss """bceaftersigmoid""" +1006 47 regularizer """no""" +1006 47 optimizer """adadelta""" +1006 47 training_loop """lcwa""" +1006 47 evaluator """rankbased""" +1006 48 dataset """kinships""" +1006 48 model """unstructuredmodel""" +1006 48 loss """bceaftersigmoid""" +1006 48 regularizer """no""" +1006 48 optimizer """adadelta""" +1006 48 training_loop """lcwa""" +1006 48 evaluator """rankbased""" +1006 49 dataset """kinships""" +1006 49 model """unstructuredmodel""" +1006 49 loss """bceaftersigmoid""" +1006 49 regularizer """no""" +1006 49 optimizer """adadelta""" +1006 49 training_loop """lcwa""" +1006 49 evaluator """rankbased""" +1006 50 dataset """kinships""" +1006 50 model """unstructuredmodel""" +1006 50 loss """bceaftersigmoid""" +1006 50 regularizer """no""" +1006 50 optimizer """adadelta""" +1006 50 training_loop """lcwa""" +1006 50 evaluator """rankbased""" +1006 51 dataset """kinships""" +1006 51 model """unstructuredmodel""" +1006 51 loss """bceaftersigmoid""" +1006 51 regularizer """no""" +1006 51 optimizer """adadelta""" +1006 51 training_loop """lcwa""" +1006 51 evaluator """rankbased""" +1006 52 dataset """kinships""" +1006 52 model """unstructuredmodel""" +1006 52 loss """bceaftersigmoid""" +1006 52 regularizer """no""" +1006 52 optimizer """adadelta""" +1006 52 training_loop """lcwa""" +1006 52 evaluator """rankbased""" +1006 53 dataset """kinships""" +1006 53 model """unstructuredmodel""" +1006 53 loss """bceaftersigmoid""" +1006 53 regularizer """no""" +1006 53 optimizer """adadelta""" +1006 53 training_loop """lcwa""" +1006 53 evaluator """rankbased""" +1006 54 dataset """kinships""" +1006 54 model """unstructuredmodel""" +1006 54 loss """bceaftersigmoid""" +1006 54 regularizer """no""" +1006 54 optimizer """adadelta""" +1006 54 training_loop """lcwa""" +1006 54 evaluator """rankbased""" +1006 55 dataset """kinships""" +1006 55 model """unstructuredmodel""" +1006 55 loss """bceaftersigmoid""" +1006 55 regularizer """no""" +1006 55 optimizer """adadelta""" +1006 55 training_loop """lcwa""" +1006 55 evaluator """rankbased""" +1006 56 dataset """kinships""" +1006 56 model """unstructuredmodel""" +1006 56 loss """bceaftersigmoid""" +1006 56 regularizer """no""" +1006 56 optimizer """adadelta""" +1006 56 training_loop """lcwa""" +1006 56 evaluator """rankbased""" +1006 57 dataset """kinships""" +1006 57 model """unstructuredmodel""" +1006 57 loss """bceaftersigmoid""" +1006 57 regularizer """no""" +1006 57 optimizer """adadelta""" +1006 57 training_loop """lcwa""" +1006 57 evaluator """rankbased""" +1006 58 dataset """kinships""" +1006 58 model """unstructuredmodel""" +1006 58 loss """bceaftersigmoid""" +1006 58 regularizer """no""" +1006 58 optimizer """adadelta""" +1006 58 training_loop """lcwa""" +1006 58 evaluator """rankbased""" +1006 59 dataset """kinships""" +1006 59 model """unstructuredmodel""" +1006 59 loss """bceaftersigmoid""" +1006 59 regularizer """no""" +1006 59 optimizer """adadelta""" +1006 59 training_loop """lcwa""" +1006 59 evaluator """rankbased""" +1006 60 dataset """kinships""" +1006 60 model """unstructuredmodel""" +1006 60 loss """bceaftersigmoid""" +1006 60 regularizer """no""" +1006 60 optimizer """adadelta""" +1006 60 training_loop """lcwa""" +1006 60 evaluator """rankbased""" +1006 61 dataset """kinships""" +1006 61 model """unstructuredmodel""" +1006 61 loss """bceaftersigmoid""" +1006 61 regularizer """no""" +1006 61 optimizer """adadelta""" +1006 61 training_loop """lcwa""" +1006 61 evaluator """rankbased""" +1006 62 dataset """kinships""" +1006 62 model """unstructuredmodel""" +1006 62 loss """bceaftersigmoid""" +1006 62 regularizer """no""" +1006 62 optimizer """adadelta""" +1006 62 training_loop """lcwa""" +1006 62 evaluator """rankbased""" +1006 63 dataset """kinships""" +1006 63 model """unstructuredmodel""" +1006 63 loss """bceaftersigmoid""" +1006 63 regularizer """no""" +1006 63 optimizer """adadelta""" +1006 63 training_loop """lcwa""" +1006 63 evaluator """rankbased""" +1006 64 dataset """kinships""" +1006 64 model """unstructuredmodel""" +1006 64 loss """bceaftersigmoid""" +1006 64 regularizer """no""" +1006 64 optimizer """adadelta""" +1006 64 training_loop """lcwa""" +1006 64 evaluator """rankbased""" +1006 65 dataset """kinships""" +1006 65 model """unstructuredmodel""" +1006 65 loss """bceaftersigmoid""" +1006 65 regularizer """no""" +1006 65 optimizer """adadelta""" +1006 65 training_loop """lcwa""" +1006 65 evaluator """rankbased""" +1006 66 dataset """kinships""" +1006 66 model """unstructuredmodel""" +1006 66 loss """bceaftersigmoid""" +1006 66 regularizer """no""" +1006 66 optimizer """adadelta""" +1006 66 training_loop """lcwa""" +1006 66 evaluator """rankbased""" +1006 67 dataset """kinships""" +1006 67 model """unstructuredmodel""" +1006 67 loss """bceaftersigmoid""" +1006 67 regularizer """no""" +1006 67 optimizer """adadelta""" +1006 67 training_loop """lcwa""" +1006 67 evaluator """rankbased""" +1006 68 dataset """kinships""" +1006 68 model """unstructuredmodel""" +1006 68 loss """bceaftersigmoid""" +1006 68 regularizer """no""" +1006 68 optimizer """adadelta""" +1006 68 training_loop """lcwa""" +1006 68 evaluator """rankbased""" +1006 69 dataset """kinships""" +1006 69 model """unstructuredmodel""" +1006 69 loss """bceaftersigmoid""" +1006 69 regularizer """no""" +1006 69 optimizer """adadelta""" +1006 69 training_loop """lcwa""" +1006 69 evaluator """rankbased""" +1006 70 dataset """kinships""" +1006 70 model """unstructuredmodel""" +1006 70 loss """bceaftersigmoid""" +1006 70 regularizer """no""" +1006 70 optimizer """adadelta""" +1006 70 training_loop """lcwa""" +1006 70 evaluator """rankbased""" +1006 71 dataset """kinships""" +1006 71 model """unstructuredmodel""" +1006 71 loss """bceaftersigmoid""" +1006 71 regularizer """no""" +1006 71 optimizer """adadelta""" +1006 71 training_loop """lcwa""" +1006 71 evaluator """rankbased""" +1006 72 dataset """kinships""" +1006 72 model """unstructuredmodel""" +1006 72 loss """bceaftersigmoid""" +1006 72 regularizer """no""" +1006 72 optimizer """adadelta""" +1006 72 training_loop """lcwa""" +1006 72 evaluator """rankbased""" +1006 73 dataset """kinships""" +1006 73 model """unstructuredmodel""" +1006 73 loss """bceaftersigmoid""" +1006 73 regularizer """no""" +1006 73 optimizer """adadelta""" +1006 73 training_loop """lcwa""" +1006 73 evaluator """rankbased""" +1006 74 dataset """kinships""" +1006 74 model """unstructuredmodel""" +1006 74 loss """bceaftersigmoid""" +1006 74 regularizer """no""" +1006 74 optimizer """adadelta""" +1006 74 training_loop """lcwa""" +1006 74 evaluator """rankbased""" +1006 75 dataset """kinships""" +1006 75 model """unstructuredmodel""" +1006 75 loss """bceaftersigmoid""" +1006 75 regularizer """no""" +1006 75 optimizer """adadelta""" +1006 75 training_loop """lcwa""" +1006 75 evaluator """rankbased""" +1006 76 dataset """kinships""" +1006 76 model """unstructuredmodel""" +1006 76 loss """bceaftersigmoid""" +1006 76 regularizer """no""" +1006 76 optimizer """adadelta""" +1006 76 training_loop """lcwa""" +1006 76 evaluator """rankbased""" +1006 77 dataset """kinships""" +1006 77 model """unstructuredmodel""" +1006 77 loss """bceaftersigmoid""" +1006 77 regularizer """no""" +1006 77 optimizer """adadelta""" +1006 77 training_loop """lcwa""" +1006 77 evaluator """rankbased""" +1006 78 dataset """kinships""" +1006 78 model """unstructuredmodel""" +1006 78 loss """bceaftersigmoid""" +1006 78 regularizer """no""" +1006 78 optimizer """adadelta""" +1006 78 training_loop """lcwa""" +1006 78 evaluator """rankbased""" +1006 79 dataset """kinships""" +1006 79 model """unstructuredmodel""" +1006 79 loss """bceaftersigmoid""" +1006 79 regularizer """no""" +1006 79 optimizer """adadelta""" +1006 79 training_loop """lcwa""" +1006 79 evaluator """rankbased""" +1006 80 dataset """kinships""" +1006 80 model """unstructuredmodel""" +1006 80 loss """bceaftersigmoid""" +1006 80 regularizer """no""" +1006 80 optimizer """adadelta""" +1006 80 training_loop """lcwa""" +1006 80 evaluator """rankbased""" +1006 81 dataset """kinships""" +1006 81 model """unstructuredmodel""" +1006 81 loss """bceaftersigmoid""" +1006 81 regularizer """no""" +1006 81 optimizer """adadelta""" +1006 81 training_loop """lcwa""" +1006 81 evaluator """rankbased""" +1006 82 dataset """kinships""" +1006 82 model """unstructuredmodel""" +1006 82 loss """bceaftersigmoid""" +1006 82 regularizer """no""" +1006 82 optimizer """adadelta""" +1006 82 training_loop """lcwa""" +1006 82 evaluator """rankbased""" +1006 83 dataset """kinships""" +1006 83 model """unstructuredmodel""" +1006 83 loss """bceaftersigmoid""" +1006 83 regularizer """no""" +1006 83 optimizer """adadelta""" +1006 83 training_loop """lcwa""" +1006 83 evaluator """rankbased""" +1006 84 dataset """kinships""" +1006 84 model """unstructuredmodel""" +1006 84 loss """bceaftersigmoid""" +1006 84 regularizer """no""" +1006 84 optimizer """adadelta""" +1006 84 training_loop """lcwa""" +1006 84 evaluator """rankbased""" +1006 85 dataset """kinships""" +1006 85 model """unstructuredmodel""" +1006 85 loss """bceaftersigmoid""" +1006 85 regularizer """no""" +1006 85 optimizer """adadelta""" +1006 85 training_loop """lcwa""" +1006 85 evaluator """rankbased""" +1006 86 dataset """kinships""" +1006 86 model """unstructuredmodel""" +1006 86 loss """bceaftersigmoid""" +1006 86 regularizer """no""" +1006 86 optimizer """adadelta""" +1006 86 training_loop """lcwa""" +1006 86 evaluator """rankbased""" +1006 87 dataset """kinships""" +1006 87 model """unstructuredmodel""" +1006 87 loss """bceaftersigmoid""" +1006 87 regularizer """no""" +1006 87 optimizer """adadelta""" +1006 87 training_loop """lcwa""" +1006 87 evaluator """rankbased""" +1006 88 dataset """kinships""" +1006 88 model """unstructuredmodel""" +1006 88 loss """bceaftersigmoid""" +1006 88 regularizer """no""" +1006 88 optimizer """adadelta""" +1006 88 training_loop """lcwa""" +1006 88 evaluator """rankbased""" +1006 89 dataset """kinships""" +1006 89 model """unstructuredmodel""" +1006 89 loss """bceaftersigmoid""" +1006 89 regularizer """no""" +1006 89 optimizer """adadelta""" +1006 89 training_loop """lcwa""" +1006 89 evaluator """rankbased""" +1006 90 dataset """kinships""" +1006 90 model """unstructuredmodel""" +1006 90 loss """bceaftersigmoid""" +1006 90 regularizer """no""" +1006 90 optimizer """adadelta""" +1006 90 training_loop """lcwa""" +1006 90 evaluator """rankbased""" +1006 91 dataset """kinships""" +1006 91 model """unstructuredmodel""" +1006 91 loss """bceaftersigmoid""" +1006 91 regularizer """no""" +1006 91 optimizer """adadelta""" +1006 91 training_loop """lcwa""" +1006 91 evaluator """rankbased""" +1006 92 dataset """kinships""" +1006 92 model """unstructuredmodel""" +1006 92 loss """bceaftersigmoid""" +1006 92 regularizer """no""" +1006 92 optimizer """adadelta""" +1006 92 training_loop """lcwa""" +1006 92 evaluator """rankbased""" +1006 93 dataset """kinships""" +1006 93 model """unstructuredmodel""" +1006 93 loss """bceaftersigmoid""" +1006 93 regularizer """no""" +1006 93 optimizer """adadelta""" +1006 93 training_loop """lcwa""" +1006 93 evaluator """rankbased""" +1006 94 dataset """kinships""" +1006 94 model """unstructuredmodel""" +1006 94 loss """bceaftersigmoid""" +1006 94 regularizer """no""" +1006 94 optimizer """adadelta""" +1006 94 training_loop """lcwa""" +1006 94 evaluator """rankbased""" +1006 95 dataset """kinships""" +1006 95 model """unstructuredmodel""" +1006 95 loss """bceaftersigmoid""" +1006 95 regularizer """no""" +1006 95 optimizer """adadelta""" +1006 95 training_loop """lcwa""" +1006 95 evaluator """rankbased""" +1006 96 dataset """kinships""" +1006 96 model """unstructuredmodel""" +1006 96 loss """bceaftersigmoid""" +1006 96 regularizer """no""" +1006 96 optimizer """adadelta""" +1006 96 training_loop """lcwa""" +1006 96 evaluator """rankbased""" +1006 97 dataset """kinships""" +1006 97 model """unstructuredmodel""" +1006 97 loss """bceaftersigmoid""" +1006 97 regularizer """no""" +1006 97 optimizer """adadelta""" +1006 97 training_loop """lcwa""" +1006 97 evaluator """rankbased""" +1006 98 dataset """kinships""" +1006 98 model """unstructuredmodel""" +1006 98 loss """bceaftersigmoid""" +1006 98 regularizer """no""" +1006 98 optimizer """adadelta""" +1006 98 training_loop """lcwa""" +1006 98 evaluator """rankbased""" +1006 99 dataset """kinships""" +1006 99 model """unstructuredmodel""" +1006 99 loss """bceaftersigmoid""" +1006 99 regularizer """no""" +1006 99 optimizer """adadelta""" +1006 99 training_loop """lcwa""" +1006 99 evaluator """rankbased""" +1006 100 dataset """kinships""" +1006 100 model """unstructuredmodel""" +1006 100 loss """bceaftersigmoid""" +1006 100 regularizer """no""" +1006 100 optimizer """adadelta""" +1006 100 training_loop """lcwa""" +1006 100 evaluator """rankbased""" +1007 1 model.embedding_dim 1.0 +1007 1 model.scoring_fct_norm 1.0 +1007 1 training.batch_size 1.0 +1007 1 training.label_smoothing 0.016821503199363142 +1007 2 model.embedding_dim 1.0 +1007 2 model.scoring_fct_norm 2.0 +1007 2 training.batch_size 2.0 +1007 2 training.label_smoothing 0.0025102332587383855 +1007 3 model.embedding_dim 1.0 +1007 3 model.scoring_fct_norm 1.0 +1007 3 training.batch_size 2.0 +1007 3 training.label_smoothing 0.05833498442196471 +1007 4 model.embedding_dim 0.0 +1007 4 model.scoring_fct_norm 1.0 +1007 4 training.batch_size 1.0 +1007 4 training.label_smoothing 0.09408534451411625 +1007 5 model.embedding_dim 1.0 +1007 5 model.scoring_fct_norm 2.0 +1007 5 training.batch_size 1.0 +1007 5 training.label_smoothing 0.3631794466133357 +1007 6 model.embedding_dim 2.0 +1007 6 model.scoring_fct_norm 1.0 +1007 6 training.batch_size 0.0 +1007 6 training.label_smoothing 0.6358815562451441 +1007 7 model.embedding_dim 2.0 +1007 7 model.scoring_fct_norm 2.0 +1007 7 training.batch_size 0.0 +1007 7 training.label_smoothing 0.4054125923661306 +1007 8 model.embedding_dim 2.0 +1007 8 model.scoring_fct_norm 1.0 +1007 8 training.batch_size 2.0 +1007 8 training.label_smoothing 0.7992945901254396 +1007 9 model.embedding_dim 0.0 +1007 9 model.scoring_fct_norm 1.0 +1007 9 training.batch_size 0.0 +1007 9 training.label_smoothing 0.0074275740724235 +1007 10 model.embedding_dim 2.0 +1007 10 model.scoring_fct_norm 1.0 +1007 10 training.batch_size 0.0 +1007 10 training.label_smoothing 0.33309043107696107 +1007 11 model.embedding_dim 0.0 +1007 11 model.scoring_fct_norm 1.0 +1007 11 training.batch_size 1.0 +1007 11 training.label_smoothing 0.4876644854733814 +1007 12 model.embedding_dim 0.0 +1007 12 model.scoring_fct_norm 2.0 +1007 12 training.batch_size 2.0 +1007 12 training.label_smoothing 0.11321960457393443 +1007 13 model.embedding_dim 2.0 +1007 13 model.scoring_fct_norm 1.0 +1007 13 training.batch_size 2.0 +1007 13 training.label_smoothing 0.042660088051307145 +1007 14 model.embedding_dim 0.0 +1007 14 model.scoring_fct_norm 2.0 +1007 14 training.batch_size 1.0 +1007 14 training.label_smoothing 0.7176488926565563 +1007 15 model.embedding_dim 2.0 +1007 15 model.scoring_fct_norm 1.0 +1007 15 training.batch_size 2.0 +1007 15 training.label_smoothing 0.0034972168504889237 +1007 16 model.embedding_dim 2.0 +1007 16 model.scoring_fct_norm 2.0 +1007 16 training.batch_size 1.0 +1007 16 training.label_smoothing 0.04105762638876317 +1007 17 model.embedding_dim 1.0 +1007 17 model.scoring_fct_norm 1.0 +1007 17 training.batch_size 0.0 +1007 17 training.label_smoothing 0.6065388208337873 +1007 18 model.embedding_dim 2.0 +1007 18 model.scoring_fct_norm 2.0 +1007 18 training.batch_size 2.0 +1007 18 training.label_smoothing 0.007683218001697946 +1007 19 model.embedding_dim 0.0 +1007 19 model.scoring_fct_norm 2.0 +1007 19 training.batch_size 2.0 +1007 19 training.label_smoothing 0.3728002834481649 +1007 20 model.embedding_dim 0.0 +1007 20 model.scoring_fct_norm 2.0 +1007 20 training.batch_size 0.0 +1007 20 training.label_smoothing 0.001855526072299 +1007 21 model.embedding_dim 2.0 +1007 21 model.scoring_fct_norm 2.0 +1007 21 training.batch_size 0.0 +1007 21 training.label_smoothing 0.00149062272411704 +1007 22 model.embedding_dim 0.0 +1007 22 model.scoring_fct_norm 2.0 +1007 22 training.batch_size 1.0 +1007 22 training.label_smoothing 0.0028455105046431566 +1007 23 model.embedding_dim 1.0 +1007 23 model.scoring_fct_norm 1.0 +1007 23 training.batch_size 1.0 +1007 23 training.label_smoothing 0.029554595827317398 +1007 24 model.embedding_dim 0.0 +1007 24 model.scoring_fct_norm 1.0 +1007 24 training.batch_size 0.0 +1007 24 training.label_smoothing 0.057660555143967895 +1007 25 model.embedding_dim 2.0 +1007 25 model.scoring_fct_norm 2.0 +1007 25 training.batch_size 0.0 +1007 25 training.label_smoothing 0.0024844744712163276 +1007 26 model.embedding_dim 1.0 +1007 26 model.scoring_fct_norm 2.0 +1007 26 training.batch_size 0.0 +1007 26 training.label_smoothing 0.039937968585366065 +1007 27 model.embedding_dim 2.0 +1007 27 model.scoring_fct_norm 1.0 +1007 27 training.batch_size 0.0 +1007 27 training.label_smoothing 0.028259306311357014 +1007 28 model.embedding_dim 2.0 +1007 28 model.scoring_fct_norm 1.0 +1007 28 training.batch_size 0.0 +1007 28 training.label_smoothing 0.010367713377331526 +1007 29 model.embedding_dim 0.0 +1007 29 model.scoring_fct_norm 1.0 +1007 29 training.batch_size 1.0 +1007 29 training.label_smoothing 0.41287238451132535 +1007 30 model.embedding_dim 1.0 +1007 30 model.scoring_fct_norm 1.0 +1007 30 training.batch_size 2.0 +1007 30 training.label_smoothing 0.6554862057126993 +1007 31 model.embedding_dim 2.0 +1007 31 model.scoring_fct_norm 2.0 +1007 31 training.batch_size 1.0 +1007 31 training.label_smoothing 0.006826846931923245 +1007 32 model.embedding_dim 1.0 +1007 32 model.scoring_fct_norm 2.0 +1007 32 training.batch_size 1.0 +1007 32 training.label_smoothing 0.003209447046165455 +1007 33 model.embedding_dim 2.0 +1007 33 model.scoring_fct_norm 1.0 +1007 33 training.batch_size 0.0 +1007 33 training.label_smoothing 0.004768119138119472 +1007 34 model.embedding_dim 2.0 +1007 34 model.scoring_fct_norm 1.0 +1007 34 training.batch_size 1.0 +1007 34 training.label_smoothing 0.004283061475835942 +1007 35 model.embedding_dim 2.0 +1007 35 model.scoring_fct_norm 1.0 +1007 35 training.batch_size 0.0 +1007 35 training.label_smoothing 0.0724352346978406 +1007 36 model.embedding_dim 1.0 +1007 36 model.scoring_fct_norm 1.0 +1007 36 training.batch_size 0.0 +1007 36 training.label_smoothing 0.22168374658983211 +1007 37 model.embedding_dim 1.0 +1007 37 model.scoring_fct_norm 2.0 +1007 37 training.batch_size 0.0 +1007 37 training.label_smoothing 0.022063298078332745 +1007 38 model.embedding_dim 2.0 +1007 38 model.scoring_fct_norm 2.0 +1007 38 training.batch_size 0.0 +1007 38 training.label_smoothing 0.7282032833052791 +1007 39 model.embedding_dim 2.0 +1007 39 model.scoring_fct_norm 2.0 +1007 39 training.batch_size 0.0 +1007 39 training.label_smoothing 0.04651011300534748 +1007 40 model.embedding_dim 2.0 +1007 40 model.scoring_fct_norm 1.0 +1007 40 training.batch_size 0.0 +1007 40 training.label_smoothing 0.0014039387770166277 +1007 41 model.embedding_dim 2.0 +1007 41 model.scoring_fct_norm 1.0 +1007 41 training.batch_size 0.0 +1007 41 training.label_smoothing 0.790643964641142 +1007 42 model.embedding_dim 0.0 +1007 42 model.scoring_fct_norm 1.0 +1007 42 training.batch_size 0.0 +1007 42 training.label_smoothing 0.06205168106782713 +1007 43 model.embedding_dim 1.0 +1007 43 model.scoring_fct_norm 1.0 +1007 43 training.batch_size 2.0 +1007 43 training.label_smoothing 0.03474245681074884 +1007 44 model.embedding_dim 0.0 +1007 44 model.scoring_fct_norm 1.0 +1007 44 training.batch_size 2.0 +1007 44 training.label_smoothing 0.02362236792803719 +1007 45 model.embedding_dim 0.0 +1007 45 model.scoring_fct_norm 2.0 +1007 45 training.batch_size 2.0 +1007 45 training.label_smoothing 0.0959896524021014 +1007 46 model.embedding_dim 0.0 +1007 46 model.scoring_fct_norm 1.0 +1007 46 training.batch_size 1.0 +1007 46 training.label_smoothing 0.6470510899480493 +1007 47 model.embedding_dim 2.0 +1007 47 model.scoring_fct_norm 1.0 +1007 47 training.batch_size 0.0 +1007 47 training.label_smoothing 0.012034370943748552 +1007 48 model.embedding_dim 0.0 +1007 48 model.scoring_fct_norm 2.0 +1007 48 training.batch_size 2.0 +1007 48 training.label_smoothing 0.041612351432660566 +1007 49 model.embedding_dim 1.0 +1007 49 model.scoring_fct_norm 1.0 +1007 49 training.batch_size 0.0 +1007 49 training.label_smoothing 0.30654069267657025 +1007 50 model.embedding_dim 0.0 +1007 50 model.scoring_fct_norm 1.0 +1007 50 training.batch_size 1.0 +1007 50 training.label_smoothing 0.0030385019759618464 +1007 51 model.embedding_dim 0.0 +1007 51 model.scoring_fct_norm 2.0 +1007 51 training.batch_size 1.0 +1007 51 training.label_smoothing 0.05969005690147755 +1007 52 model.embedding_dim 0.0 +1007 52 model.scoring_fct_norm 2.0 +1007 52 training.batch_size 2.0 +1007 52 training.label_smoothing 0.058584839545007344 +1007 53 model.embedding_dim 0.0 +1007 53 model.scoring_fct_norm 2.0 +1007 53 training.batch_size 2.0 +1007 53 training.label_smoothing 0.9211682234824063 +1007 54 model.embedding_dim 2.0 +1007 54 model.scoring_fct_norm 1.0 +1007 54 training.batch_size 0.0 +1007 54 training.label_smoothing 0.0014276933649707898 +1007 55 model.embedding_dim 1.0 +1007 55 model.scoring_fct_norm 1.0 +1007 55 training.batch_size 2.0 +1007 55 training.label_smoothing 0.01626173172131393 +1007 56 model.embedding_dim 2.0 +1007 56 model.scoring_fct_norm 2.0 +1007 56 training.batch_size 1.0 +1007 56 training.label_smoothing 0.08622492290428665 +1007 57 model.embedding_dim 1.0 +1007 57 model.scoring_fct_norm 1.0 +1007 57 training.batch_size 1.0 +1007 57 training.label_smoothing 0.4647387656255673 +1007 58 model.embedding_dim 1.0 +1007 58 model.scoring_fct_norm 2.0 +1007 58 training.batch_size 2.0 +1007 58 training.label_smoothing 0.05778681422517731 +1007 59 model.embedding_dim 0.0 +1007 59 model.scoring_fct_norm 1.0 +1007 59 training.batch_size 2.0 +1007 59 training.label_smoothing 0.002339573125868351 +1007 60 model.embedding_dim 1.0 +1007 60 model.scoring_fct_norm 2.0 +1007 60 training.batch_size 0.0 +1007 60 training.label_smoothing 0.1211901325321552 +1007 61 model.embedding_dim 0.0 +1007 61 model.scoring_fct_norm 2.0 +1007 61 training.batch_size 0.0 +1007 61 training.label_smoothing 0.004244112416576079 +1007 62 model.embedding_dim 1.0 +1007 62 model.scoring_fct_norm 2.0 +1007 62 training.batch_size 2.0 +1007 62 training.label_smoothing 0.24116470467748727 +1007 63 model.embedding_dim 2.0 +1007 63 model.scoring_fct_norm 2.0 +1007 63 training.batch_size 2.0 +1007 63 training.label_smoothing 0.08528865465195068 +1007 64 model.embedding_dim 2.0 +1007 64 model.scoring_fct_norm 2.0 +1007 64 training.batch_size 2.0 +1007 64 training.label_smoothing 0.00642450202480521 +1007 65 model.embedding_dim 0.0 +1007 65 model.scoring_fct_norm 2.0 +1007 65 training.batch_size 2.0 +1007 65 training.label_smoothing 0.01582991994305307 +1007 66 model.embedding_dim 1.0 +1007 66 model.scoring_fct_norm 1.0 +1007 66 training.batch_size 0.0 +1007 66 training.label_smoothing 0.014491957590396044 +1007 67 model.embedding_dim 0.0 +1007 67 model.scoring_fct_norm 1.0 +1007 67 training.batch_size 1.0 +1007 67 training.label_smoothing 0.0023711764911361734 +1007 68 model.embedding_dim 1.0 +1007 68 model.scoring_fct_norm 1.0 +1007 68 training.batch_size 2.0 +1007 68 training.label_smoothing 0.0019910458234981532 +1007 69 model.embedding_dim 1.0 +1007 69 model.scoring_fct_norm 1.0 +1007 69 training.batch_size 2.0 +1007 69 training.label_smoothing 0.0012029144303277485 +1007 70 model.embedding_dim 2.0 +1007 70 model.scoring_fct_norm 2.0 +1007 70 training.batch_size 2.0 +1007 70 training.label_smoothing 0.027982135727216053 +1007 71 model.embedding_dim 0.0 +1007 71 model.scoring_fct_norm 2.0 +1007 71 training.batch_size 1.0 +1007 71 training.label_smoothing 0.6377070631030145 +1007 72 model.embedding_dim 2.0 +1007 72 model.scoring_fct_norm 1.0 +1007 72 training.batch_size 2.0 +1007 72 training.label_smoothing 0.001310743892565088 +1007 73 model.embedding_dim 1.0 +1007 73 model.scoring_fct_norm 2.0 +1007 73 training.batch_size 0.0 +1007 73 training.label_smoothing 0.3177831698046825 +1007 74 model.embedding_dim 2.0 +1007 74 model.scoring_fct_norm 2.0 +1007 74 training.batch_size 2.0 +1007 74 training.label_smoothing 0.02729420939475015 +1007 75 model.embedding_dim 2.0 +1007 75 model.scoring_fct_norm 2.0 +1007 75 training.batch_size 0.0 +1007 75 training.label_smoothing 0.011578950304556676 +1007 76 model.embedding_dim 1.0 +1007 76 model.scoring_fct_norm 1.0 +1007 76 training.batch_size 1.0 +1007 76 training.label_smoothing 0.007752727363288574 +1007 77 model.embedding_dim 0.0 +1007 77 model.scoring_fct_norm 2.0 +1007 77 training.batch_size 0.0 +1007 77 training.label_smoothing 0.2970491099480584 +1007 78 model.embedding_dim 2.0 +1007 78 model.scoring_fct_norm 1.0 +1007 78 training.batch_size 2.0 +1007 78 training.label_smoothing 0.5536375956588597 +1007 79 model.embedding_dim 2.0 +1007 79 model.scoring_fct_norm 1.0 +1007 79 training.batch_size 2.0 +1007 79 training.label_smoothing 0.2365685247514297 +1007 80 model.embedding_dim 1.0 +1007 80 model.scoring_fct_norm 1.0 +1007 80 training.batch_size 0.0 +1007 80 training.label_smoothing 0.0014049093077228825 +1007 81 model.embedding_dim 2.0 +1007 81 model.scoring_fct_norm 1.0 +1007 81 training.batch_size 1.0 +1007 81 training.label_smoothing 0.5933183568539593 +1007 82 model.embedding_dim 1.0 +1007 82 model.scoring_fct_norm 2.0 +1007 82 training.batch_size 0.0 +1007 82 training.label_smoothing 0.20035528652770696 +1007 83 model.embedding_dim 1.0 +1007 83 model.scoring_fct_norm 1.0 +1007 83 training.batch_size 0.0 +1007 83 training.label_smoothing 0.0044298077682155225 +1007 84 model.embedding_dim 0.0 +1007 84 model.scoring_fct_norm 2.0 +1007 84 training.batch_size 1.0 +1007 84 training.label_smoothing 0.298161044411536 +1007 85 model.embedding_dim 1.0 +1007 85 model.scoring_fct_norm 1.0 +1007 85 training.batch_size 0.0 +1007 85 training.label_smoothing 0.05688117954637664 +1007 86 model.embedding_dim 0.0 +1007 86 model.scoring_fct_norm 1.0 +1007 86 training.batch_size 0.0 +1007 86 training.label_smoothing 0.010489604652955276 +1007 87 model.embedding_dim 1.0 +1007 87 model.scoring_fct_norm 2.0 +1007 87 training.batch_size 0.0 +1007 87 training.label_smoothing 0.003396590300228821 +1007 88 model.embedding_dim 0.0 +1007 88 model.scoring_fct_norm 2.0 +1007 88 training.batch_size 0.0 +1007 88 training.label_smoothing 0.0020710181103368846 +1007 89 model.embedding_dim 2.0 +1007 89 model.scoring_fct_norm 1.0 +1007 89 training.batch_size 0.0 +1007 89 training.label_smoothing 0.030340305798198727 +1007 90 model.embedding_dim 0.0 +1007 90 model.scoring_fct_norm 2.0 +1007 90 training.batch_size 0.0 +1007 90 training.label_smoothing 0.3050279347549941 +1007 91 model.embedding_dim 2.0 +1007 91 model.scoring_fct_norm 2.0 +1007 91 training.batch_size 2.0 +1007 91 training.label_smoothing 0.005877799411418676 +1007 92 model.embedding_dim 2.0 +1007 92 model.scoring_fct_norm 1.0 +1007 92 training.batch_size 2.0 +1007 92 training.label_smoothing 0.021106920936787125 +1007 93 model.embedding_dim 1.0 +1007 93 model.scoring_fct_norm 1.0 +1007 93 training.batch_size 1.0 +1007 93 training.label_smoothing 0.403530662007219 +1007 94 model.embedding_dim 1.0 +1007 94 model.scoring_fct_norm 1.0 +1007 94 training.batch_size 1.0 +1007 94 training.label_smoothing 0.0014335866311176375 +1007 95 model.embedding_dim 1.0 +1007 95 model.scoring_fct_norm 2.0 +1007 95 training.batch_size 2.0 +1007 95 training.label_smoothing 0.10941367772337639 +1007 96 model.embedding_dim 2.0 +1007 96 model.scoring_fct_norm 2.0 +1007 96 training.batch_size 2.0 +1007 96 training.label_smoothing 0.6391710893548186 +1007 97 model.embedding_dim 1.0 +1007 97 model.scoring_fct_norm 1.0 +1007 97 training.batch_size 1.0 +1007 97 training.label_smoothing 0.2854048124756126 +1007 98 model.embedding_dim 0.0 +1007 98 model.scoring_fct_norm 1.0 +1007 98 training.batch_size 2.0 +1007 98 training.label_smoothing 0.6521159627473487 +1007 99 model.embedding_dim 0.0 +1007 99 model.scoring_fct_norm 2.0 +1007 99 training.batch_size 2.0 +1007 99 training.label_smoothing 0.01859203642867078 +1007 100 model.embedding_dim 1.0 +1007 100 model.scoring_fct_norm 2.0 +1007 100 training.batch_size 2.0 +1007 100 training.label_smoothing 0.0012148414797909478 +1007 1 dataset """kinships""" +1007 1 model """unstructuredmodel""" +1007 1 loss """softplus""" +1007 1 regularizer """no""" +1007 1 optimizer """adadelta""" +1007 1 training_loop """lcwa""" +1007 1 evaluator """rankbased""" +1007 2 dataset """kinships""" +1007 2 model """unstructuredmodel""" +1007 2 loss """softplus""" +1007 2 regularizer """no""" +1007 2 optimizer """adadelta""" +1007 2 training_loop """lcwa""" +1007 2 evaluator """rankbased""" +1007 3 dataset """kinships""" +1007 3 model """unstructuredmodel""" +1007 3 loss """softplus""" +1007 3 regularizer """no""" +1007 3 optimizer """adadelta""" +1007 3 training_loop """lcwa""" +1007 3 evaluator """rankbased""" +1007 4 dataset """kinships""" +1007 4 model """unstructuredmodel""" +1007 4 loss """softplus""" +1007 4 regularizer """no""" +1007 4 optimizer """adadelta""" +1007 4 training_loop """lcwa""" +1007 4 evaluator """rankbased""" +1007 5 dataset """kinships""" +1007 5 model """unstructuredmodel""" +1007 5 loss """softplus""" +1007 5 regularizer """no""" +1007 5 optimizer """adadelta""" +1007 5 training_loop """lcwa""" +1007 5 evaluator """rankbased""" +1007 6 dataset """kinships""" +1007 6 model """unstructuredmodel""" +1007 6 loss """softplus""" +1007 6 regularizer """no""" +1007 6 optimizer """adadelta""" +1007 6 training_loop """lcwa""" +1007 6 evaluator """rankbased""" +1007 7 dataset """kinships""" +1007 7 model """unstructuredmodel""" +1007 7 loss """softplus""" +1007 7 regularizer """no""" +1007 7 optimizer """adadelta""" +1007 7 training_loop """lcwa""" +1007 7 evaluator """rankbased""" +1007 8 dataset """kinships""" +1007 8 model """unstructuredmodel""" +1007 8 loss """softplus""" +1007 8 regularizer """no""" +1007 8 optimizer """adadelta""" +1007 8 training_loop """lcwa""" +1007 8 evaluator """rankbased""" +1007 9 dataset """kinships""" +1007 9 model """unstructuredmodel""" +1007 9 loss """softplus""" +1007 9 regularizer """no""" +1007 9 optimizer """adadelta""" +1007 9 training_loop """lcwa""" +1007 9 evaluator """rankbased""" +1007 10 dataset """kinships""" +1007 10 model """unstructuredmodel""" +1007 10 loss """softplus""" +1007 10 regularizer """no""" +1007 10 optimizer """adadelta""" +1007 10 training_loop """lcwa""" +1007 10 evaluator """rankbased""" +1007 11 dataset """kinships""" +1007 11 model """unstructuredmodel""" +1007 11 loss """softplus""" +1007 11 regularizer """no""" +1007 11 optimizer """adadelta""" +1007 11 training_loop """lcwa""" +1007 11 evaluator """rankbased""" +1007 12 dataset """kinships""" +1007 12 model """unstructuredmodel""" +1007 12 loss """softplus""" +1007 12 regularizer """no""" +1007 12 optimizer """adadelta""" +1007 12 training_loop """lcwa""" +1007 12 evaluator """rankbased""" +1007 13 dataset """kinships""" +1007 13 model """unstructuredmodel""" +1007 13 loss """softplus""" +1007 13 regularizer """no""" +1007 13 optimizer """adadelta""" +1007 13 training_loop """lcwa""" +1007 13 evaluator """rankbased""" +1007 14 dataset """kinships""" +1007 14 model """unstructuredmodel""" +1007 14 loss """softplus""" +1007 14 regularizer """no""" +1007 14 optimizer """adadelta""" +1007 14 training_loop """lcwa""" +1007 14 evaluator """rankbased""" +1007 15 dataset """kinships""" +1007 15 model """unstructuredmodel""" +1007 15 loss """softplus""" +1007 15 regularizer """no""" +1007 15 optimizer """adadelta""" +1007 15 training_loop """lcwa""" +1007 15 evaluator """rankbased""" +1007 16 dataset """kinships""" +1007 16 model """unstructuredmodel""" +1007 16 loss """softplus""" +1007 16 regularizer """no""" +1007 16 optimizer """adadelta""" +1007 16 training_loop """lcwa""" +1007 16 evaluator """rankbased""" +1007 17 dataset """kinships""" +1007 17 model """unstructuredmodel""" +1007 17 loss """softplus""" +1007 17 regularizer """no""" +1007 17 optimizer """adadelta""" +1007 17 training_loop """lcwa""" +1007 17 evaluator """rankbased""" +1007 18 dataset """kinships""" +1007 18 model """unstructuredmodel""" +1007 18 loss """softplus""" +1007 18 regularizer """no""" +1007 18 optimizer """adadelta""" +1007 18 training_loop """lcwa""" +1007 18 evaluator """rankbased""" +1007 19 dataset """kinships""" +1007 19 model """unstructuredmodel""" +1007 19 loss """softplus""" +1007 19 regularizer """no""" +1007 19 optimizer """adadelta""" +1007 19 training_loop """lcwa""" +1007 19 evaluator """rankbased""" +1007 20 dataset """kinships""" +1007 20 model """unstructuredmodel""" +1007 20 loss """softplus""" +1007 20 regularizer """no""" +1007 20 optimizer """adadelta""" +1007 20 training_loop """lcwa""" +1007 20 evaluator """rankbased""" +1007 21 dataset """kinships""" +1007 21 model """unstructuredmodel""" +1007 21 loss """softplus""" +1007 21 regularizer """no""" +1007 21 optimizer """adadelta""" +1007 21 training_loop """lcwa""" +1007 21 evaluator """rankbased""" +1007 22 dataset """kinships""" +1007 22 model """unstructuredmodel""" +1007 22 loss """softplus""" +1007 22 regularizer """no""" +1007 22 optimizer """adadelta""" +1007 22 training_loop """lcwa""" +1007 22 evaluator """rankbased""" +1007 23 dataset """kinships""" +1007 23 model """unstructuredmodel""" +1007 23 loss """softplus""" +1007 23 regularizer """no""" +1007 23 optimizer """adadelta""" +1007 23 training_loop """lcwa""" +1007 23 evaluator """rankbased""" +1007 24 dataset """kinships""" +1007 24 model """unstructuredmodel""" +1007 24 loss """softplus""" +1007 24 regularizer """no""" +1007 24 optimizer """adadelta""" +1007 24 training_loop """lcwa""" +1007 24 evaluator """rankbased""" +1007 25 dataset """kinships""" +1007 25 model """unstructuredmodel""" +1007 25 loss """softplus""" +1007 25 regularizer """no""" +1007 25 optimizer """adadelta""" +1007 25 training_loop """lcwa""" +1007 25 evaluator """rankbased""" +1007 26 dataset """kinships""" +1007 26 model """unstructuredmodel""" +1007 26 loss """softplus""" +1007 26 regularizer """no""" +1007 26 optimizer """adadelta""" +1007 26 training_loop """lcwa""" +1007 26 evaluator """rankbased""" +1007 27 dataset """kinships""" +1007 27 model """unstructuredmodel""" +1007 27 loss """softplus""" +1007 27 regularizer """no""" +1007 27 optimizer """adadelta""" +1007 27 training_loop """lcwa""" +1007 27 evaluator """rankbased""" +1007 28 dataset """kinships""" +1007 28 model """unstructuredmodel""" +1007 28 loss """softplus""" +1007 28 regularizer """no""" +1007 28 optimizer """adadelta""" +1007 28 training_loop """lcwa""" +1007 28 evaluator """rankbased""" +1007 29 dataset """kinships""" +1007 29 model """unstructuredmodel""" +1007 29 loss """softplus""" +1007 29 regularizer """no""" +1007 29 optimizer """adadelta""" +1007 29 training_loop """lcwa""" +1007 29 evaluator """rankbased""" +1007 30 dataset """kinships""" +1007 30 model """unstructuredmodel""" +1007 30 loss """softplus""" +1007 30 regularizer """no""" +1007 30 optimizer """adadelta""" +1007 30 training_loop """lcwa""" +1007 30 evaluator """rankbased""" +1007 31 dataset """kinships""" +1007 31 model """unstructuredmodel""" +1007 31 loss """softplus""" +1007 31 regularizer """no""" +1007 31 optimizer """adadelta""" +1007 31 training_loop """lcwa""" +1007 31 evaluator """rankbased""" +1007 32 dataset """kinships""" +1007 32 model """unstructuredmodel""" +1007 32 loss """softplus""" +1007 32 regularizer """no""" +1007 32 optimizer """adadelta""" +1007 32 training_loop """lcwa""" +1007 32 evaluator """rankbased""" +1007 33 dataset """kinships""" +1007 33 model """unstructuredmodel""" +1007 33 loss """softplus""" +1007 33 regularizer """no""" +1007 33 optimizer """adadelta""" +1007 33 training_loop """lcwa""" +1007 33 evaluator """rankbased""" +1007 34 dataset """kinships""" +1007 34 model """unstructuredmodel""" +1007 34 loss """softplus""" +1007 34 regularizer """no""" +1007 34 optimizer """adadelta""" +1007 34 training_loop """lcwa""" +1007 34 evaluator """rankbased""" +1007 35 dataset """kinships""" +1007 35 model """unstructuredmodel""" +1007 35 loss """softplus""" +1007 35 regularizer """no""" +1007 35 optimizer """adadelta""" +1007 35 training_loop """lcwa""" +1007 35 evaluator """rankbased""" +1007 36 dataset """kinships""" +1007 36 model """unstructuredmodel""" +1007 36 loss """softplus""" +1007 36 regularizer """no""" +1007 36 optimizer """adadelta""" +1007 36 training_loop """lcwa""" +1007 36 evaluator """rankbased""" +1007 37 dataset """kinships""" +1007 37 model """unstructuredmodel""" +1007 37 loss """softplus""" +1007 37 regularizer """no""" +1007 37 optimizer """adadelta""" +1007 37 training_loop """lcwa""" +1007 37 evaluator """rankbased""" +1007 38 dataset """kinships""" +1007 38 model """unstructuredmodel""" +1007 38 loss """softplus""" +1007 38 regularizer """no""" +1007 38 optimizer """adadelta""" +1007 38 training_loop """lcwa""" +1007 38 evaluator """rankbased""" +1007 39 dataset """kinships""" +1007 39 model """unstructuredmodel""" +1007 39 loss """softplus""" +1007 39 regularizer """no""" +1007 39 optimizer """adadelta""" +1007 39 training_loop """lcwa""" +1007 39 evaluator """rankbased""" +1007 40 dataset """kinships""" +1007 40 model """unstructuredmodel""" +1007 40 loss """softplus""" +1007 40 regularizer """no""" +1007 40 optimizer """adadelta""" +1007 40 training_loop """lcwa""" +1007 40 evaluator """rankbased""" +1007 41 dataset """kinships""" +1007 41 model """unstructuredmodel""" +1007 41 loss """softplus""" +1007 41 regularizer """no""" +1007 41 optimizer """adadelta""" +1007 41 training_loop """lcwa""" +1007 41 evaluator """rankbased""" +1007 42 dataset """kinships""" +1007 42 model """unstructuredmodel""" +1007 42 loss """softplus""" +1007 42 regularizer """no""" +1007 42 optimizer """adadelta""" +1007 42 training_loop """lcwa""" +1007 42 evaluator """rankbased""" +1007 43 dataset """kinships""" +1007 43 model """unstructuredmodel""" +1007 43 loss """softplus""" +1007 43 regularizer """no""" +1007 43 optimizer """adadelta""" +1007 43 training_loop """lcwa""" +1007 43 evaluator """rankbased""" +1007 44 dataset """kinships""" +1007 44 model """unstructuredmodel""" +1007 44 loss """softplus""" +1007 44 regularizer """no""" +1007 44 optimizer """adadelta""" +1007 44 training_loop """lcwa""" +1007 44 evaluator """rankbased""" +1007 45 dataset """kinships""" +1007 45 model """unstructuredmodel""" +1007 45 loss """softplus""" +1007 45 regularizer """no""" +1007 45 optimizer """adadelta""" +1007 45 training_loop """lcwa""" +1007 45 evaluator """rankbased""" +1007 46 dataset """kinships""" +1007 46 model """unstructuredmodel""" +1007 46 loss """softplus""" +1007 46 regularizer """no""" +1007 46 optimizer """adadelta""" +1007 46 training_loop """lcwa""" +1007 46 evaluator """rankbased""" +1007 47 dataset """kinships""" +1007 47 model """unstructuredmodel""" +1007 47 loss """softplus""" +1007 47 regularizer """no""" +1007 47 optimizer """adadelta""" +1007 47 training_loop """lcwa""" +1007 47 evaluator """rankbased""" +1007 48 dataset """kinships""" +1007 48 model """unstructuredmodel""" +1007 48 loss """softplus""" +1007 48 regularizer """no""" +1007 48 optimizer """adadelta""" +1007 48 training_loop """lcwa""" +1007 48 evaluator """rankbased""" +1007 49 dataset """kinships""" +1007 49 model """unstructuredmodel""" +1007 49 loss """softplus""" +1007 49 regularizer """no""" +1007 49 optimizer """adadelta""" +1007 49 training_loop """lcwa""" +1007 49 evaluator """rankbased""" +1007 50 dataset """kinships""" +1007 50 model """unstructuredmodel""" +1007 50 loss """softplus""" +1007 50 regularizer """no""" +1007 50 optimizer """adadelta""" +1007 50 training_loop """lcwa""" +1007 50 evaluator """rankbased""" +1007 51 dataset """kinships""" +1007 51 model """unstructuredmodel""" +1007 51 loss """softplus""" +1007 51 regularizer """no""" +1007 51 optimizer """adadelta""" +1007 51 training_loop """lcwa""" +1007 51 evaluator """rankbased""" +1007 52 dataset """kinships""" +1007 52 model """unstructuredmodel""" +1007 52 loss """softplus""" +1007 52 regularizer """no""" +1007 52 optimizer """adadelta""" +1007 52 training_loop """lcwa""" +1007 52 evaluator """rankbased""" +1007 53 dataset """kinships""" +1007 53 model """unstructuredmodel""" +1007 53 loss """softplus""" +1007 53 regularizer """no""" +1007 53 optimizer """adadelta""" +1007 53 training_loop """lcwa""" +1007 53 evaluator """rankbased""" +1007 54 dataset """kinships""" +1007 54 model """unstructuredmodel""" +1007 54 loss """softplus""" +1007 54 regularizer """no""" +1007 54 optimizer """adadelta""" +1007 54 training_loop """lcwa""" +1007 54 evaluator """rankbased""" +1007 55 dataset """kinships""" +1007 55 model """unstructuredmodel""" +1007 55 loss """softplus""" +1007 55 regularizer """no""" +1007 55 optimizer """adadelta""" +1007 55 training_loop """lcwa""" +1007 55 evaluator """rankbased""" +1007 56 dataset """kinships""" +1007 56 model """unstructuredmodel""" +1007 56 loss """softplus""" +1007 56 regularizer """no""" +1007 56 optimizer """adadelta""" +1007 56 training_loop """lcwa""" +1007 56 evaluator """rankbased""" +1007 57 dataset """kinships""" +1007 57 model """unstructuredmodel""" +1007 57 loss """softplus""" +1007 57 regularizer """no""" +1007 57 optimizer """adadelta""" +1007 57 training_loop """lcwa""" +1007 57 evaluator """rankbased""" +1007 58 dataset """kinships""" +1007 58 model """unstructuredmodel""" +1007 58 loss """softplus""" +1007 58 regularizer """no""" +1007 58 optimizer """adadelta""" +1007 58 training_loop """lcwa""" +1007 58 evaluator """rankbased""" +1007 59 dataset """kinships""" +1007 59 model """unstructuredmodel""" +1007 59 loss """softplus""" +1007 59 regularizer """no""" +1007 59 optimizer """adadelta""" +1007 59 training_loop """lcwa""" +1007 59 evaluator """rankbased""" +1007 60 dataset """kinships""" +1007 60 model """unstructuredmodel""" +1007 60 loss """softplus""" +1007 60 regularizer """no""" +1007 60 optimizer """adadelta""" +1007 60 training_loop """lcwa""" +1007 60 evaluator """rankbased""" +1007 61 dataset """kinships""" +1007 61 model """unstructuredmodel""" +1007 61 loss """softplus""" +1007 61 regularizer """no""" +1007 61 optimizer """adadelta""" +1007 61 training_loop """lcwa""" +1007 61 evaluator """rankbased""" +1007 62 dataset """kinships""" +1007 62 model """unstructuredmodel""" +1007 62 loss """softplus""" +1007 62 regularizer """no""" +1007 62 optimizer """adadelta""" +1007 62 training_loop """lcwa""" +1007 62 evaluator """rankbased""" +1007 63 dataset """kinships""" +1007 63 model """unstructuredmodel""" +1007 63 loss """softplus""" +1007 63 regularizer """no""" +1007 63 optimizer """adadelta""" +1007 63 training_loop """lcwa""" +1007 63 evaluator """rankbased""" +1007 64 dataset """kinships""" +1007 64 model """unstructuredmodel""" +1007 64 loss """softplus""" +1007 64 regularizer """no""" +1007 64 optimizer """adadelta""" +1007 64 training_loop """lcwa""" +1007 64 evaluator """rankbased""" +1007 65 dataset """kinships""" +1007 65 model """unstructuredmodel""" +1007 65 loss """softplus""" +1007 65 regularizer """no""" +1007 65 optimizer """adadelta""" +1007 65 training_loop """lcwa""" +1007 65 evaluator """rankbased""" +1007 66 dataset """kinships""" +1007 66 model """unstructuredmodel""" +1007 66 loss """softplus""" +1007 66 regularizer """no""" +1007 66 optimizer """adadelta""" +1007 66 training_loop """lcwa""" +1007 66 evaluator """rankbased""" +1007 67 dataset """kinships""" +1007 67 model """unstructuredmodel""" +1007 67 loss """softplus""" +1007 67 regularizer """no""" +1007 67 optimizer """adadelta""" +1007 67 training_loop """lcwa""" +1007 67 evaluator """rankbased""" +1007 68 dataset """kinships""" +1007 68 model """unstructuredmodel""" +1007 68 loss """softplus""" +1007 68 regularizer """no""" +1007 68 optimizer """adadelta""" +1007 68 training_loop """lcwa""" +1007 68 evaluator """rankbased""" +1007 69 dataset """kinships""" +1007 69 model """unstructuredmodel""" +1007 69 loss """softplus""" +1007 69 regularizer """no""" +1007 69 optimizer """adadelta""" +1007 69 training_loop """lcwa""" +1007 69 evaluator """rankbased""" +1007 70 dataset """kinships""" +1007 70 model """unstructuredmodel""" +1007 70 loss """softplus""" +1007 70 regularizer """no""" +1007 70 optimizer """adadelta""" +1007 70 training_loop """lcwa""" +1007 70 evaluator """rankbased""" +1007 71 dataset """kinships""" +1007 71 model """unstructuredmodel""" +1007 71 loss """softplus""" +1007 71 regularizer """no""" +1007 71 optimizer """adadelta""" +1007 71 training_loop """lcwa""" +1007 71 evaluator """rankbased""" +1007 72 dataset """kinships""" +1007 72 model """unstructuredmodel""" +1007 72 loss """softplus""" +1007 72 regularizer """no""" +1007 72 optimizer """adadelta""" +1007 72 training_loop """lcwa""" +1007 72 evaluator """rankbased""" +1007 73 dataset """kinships""" +1007 73 model """unstructuredmodel""" +1007 73 loss """softplus""" +1007 73 regularizer """no""" +1007 73 optimizer """adadelta""" +1007 73 training_loop """lcwa""" +1007 73 evaluator """rankbased""" +1007 74 dataset """kinships""" +1007 74 model """unstructuredmodel""" +1007 74 loss """softplus""" +1007 74 regularizer """no""" +1007 74 optimizer """adadelta""" +1007 74 training_loop """lcwa""" +1007 74 evaluator """rankbased""" +1007 75 dataset """kinships""" +1007 75 model """unstructuredmodel""" +1007 75 loss """softplus""" +1007 75 regularizer """no""" +1007 75 optimizer """adadelta""" +1007 75 training_loop """lcwa""" +1007 75 evaluator """rankbased""" +1007 76 dataset """kinships""" +1007 76 model """unstructuredmodel""" +1007 76 loss """softplus""" +1007 76 regularizer """no""" +1007 76 optimizer """adadelta""" +1007 76 training_loop """lcwa""" +1007 76 evaluator """rankbased""" +1007 77 dataset """kinships""" +1007 77 model """unstructuredmodel""" +1007 77 loss """softplus""" +1007 77 regularizer """no""" +1007 77 optimizer """adadelta""" +1007 77 training_loop """lcwa""" +1007 77 evaluator """rankbased""" +1007 78 dataset """kinships""" +1007 78 model """unstructuredmodel""" +1007 78 loss """softplus""" +1007 78 regularizer """no""" +1007 78 optimizer """adadelta""" +1007 78 training_loop """lcwa""" +1007 78 evaluator """rankbased""" +1007 79 dataset """kinships""" +1007 79 model """unstructuredmodel""" +1007 79 loss """softplus""" +1007 79 regularizer """no""" +1007 79 optimizer """adadelta""" +1007 79 training_loop """lcwa""" +1007 79 evaluator """rankbased""" +1007 80 dataset """kinships""" +1007 80 model """unstructuredmodel""" +1007 80 loss """softplus""" +1007 80 regularizer """no""" +1007 80 optimizer """adadelta""" +1007 80 training_loop """lcwa""" +1007 80 evaluator """rankbased""" +1007 81 dataset """kinships""" +1007 81 model """unstructuredmodel""" +1007 81 loss """softplus""" +1007 81 regularizer """no""" +1007 81 optimizer """adadelta""" +1007 81 training_loop """lcwa""" +1007 81 evaluator """rankbased""" +1007 82 dataset """kinships""" +1007 82 model """unstructuredmodel""" +1007 82 loss """softplus""" +1007 82 regularizer """no""" +1007 82 optimizer """adadelta""" +1007 82 training_loop """lcwa""" +1007 82 evaluator """rankbased""" +1007 83 dataset """kinships""" +1007 83 model """unstructuredmodel""" +1007 83 loss """softplus""" +1007 83 regularizer """no""" +1007 83 optimizer """adadelta""" +1007 83 training_loop """lcwa""" +1007 83 evaluator """rankbased""" +1007 84 dataset """kinships""" +1007 84 model """unstructuredmodel""" +1007 84 loss """softplus""" +1007 84 regularizer """no""" +1007 84 optimizer """adadelta""" +1007 84 training_loop """lcwa""" +1007 84 evaluator """rankbased""" +1007 85 dataset """kinships""" +1007 85 model """unstructuredmodel""" +1007 85 loss """softplus""" +1007 85 regularizer """no""" +1007 85 optimizer """adadelta""" +1007 85 training_loop """lcwa""" +1007 85 evaluator """rankbased""" +1007 86 dataset """kinships""" +1007 86 model """unstructuredmodel""" +1007 86 loss """softplus""" +1007 86 regularizer """no""" +1007 86 optimizer """adadelta""" +1007 86 training_loop """lcwa""" +1007 86 evaluator """rankbased""" +1007 87 dataset """kinships""" +1007 87 model """unstructuredmodel""" +1007 87 loss """softplus""" +1007 87 regularizer """no""" +1007 87 optimizer """adadelta""" +1007 87 training_loop """lcwa""" +1007 87 evaluator """rankbased""" +1007 88 dataset """kinships""" +1007 88 model """unstructuredmodel""" +1007 88 loss """softplus""" +1007 88 regularizer """no""" +1007 88 optimizer """adadelta""" +1007 88 training_loop """lcwa""" +1007 88 evaluator """rankbased""" +1007 89 dataset """kinships""" +1007 89 model """unstructuredmodel""" +1007 89 loss """softplus""" +1007 89 regularizer """no""" +1007 89 optimizer """adadelta""" +1007 89 training_loop """lcwa""" +1007 89 evaluator """rankbased""" +1007 90 dataset """kinships""" +1007 90 model """unstructuredmodel""" +1007 90 loss """softplus""" +1007 90 regularizer """no""" +1007 90 optimizer """adadelta""" +1007 90 training_loop """lcwa""" +1007 90 evaluator """rankbased""" +1007 91 dataset """kinships""" +1007 91 model """unstructuredmodel""" +1007 91 loss """softplus""" +1007 91 regularizer """no""" +1007 91 optimizer """adadelta""" +1007 91 training_loop """lcwa""" +1007 91 evaluator """rankbased""" +1007 92 dataset """kinships""" +1007 92 model """unstructuredmodel""" +1007 92 loss """softplus""" +1007 92 regularizer """no""" +1007 92 optimizer """adadelta""" +1007 92 training_loop """lcwa""" +1007 92 evaluator """rankbased""" +1007 93 dataset """kinships""" +1007 93 model """unstructuredmodel""" +1007 93 loss """softplus""" +1007 93 regularizer """no""" +1007 93 optimizer """adadelta""" +1007 93 training_loop """lcwa""" +1007 93 evaluator """rankbased""" +1007 94 dataset """kinships""" +1007 94 model """unstructuredmodel""" +1007 94 loss """softplus""" +1007 94 regularizer """no""" +1007 94 optimizer """adadelta""" +1007 94 training_loop """lcwa""" +1007 94 evaluator """rankbased""" +1007 95 dataset """kinships""" +1007 95 model """unstructuredmodel""" +1007 95 loss """softplus""" +1007 95 regularizer """no""" +1007 95 optimizer """adadelta""" +1007 95 training_loop """lcwa""" +1007 95 evaluator """rankbased""" +1007 96 dataset """kinships""" +1007 96 model """unstructuredmodel""" +1007 96 loss """softplus""" +1007 96 regularizer """no""" +1007 96 optimizer """adadelta""" +1007 96 training_loop """lcwa""" +1007 96 evaluator """rankbased""" +1007 97 dataset """kinships""" +1007 97 model """unstructuredmodel""" +1007 97 loss """softplus""" +1007 97 regularizer """no""" +1007 97 optimizer """adadelta""" +1007 97 training_loop """lcwa""" +1007 97 evaluator """rankbased""" +1007 98 dataset """kinships""" +1007 98 model """unstructuredmodel""" +1007 98 loss """softplus""" +1007 98 regularizer """no""" +1007 98 optimizer """adadelta""" +1007 98 training_loop """lcwa""" +1007 98 evaluator """rankbased""" +1007 99 dataset """kinships""" +1007 99 model """unstructuredmodel""" +1007 99 loss """softplus""" +1007 99 regularizer """no""" +1007 99 optimizer """adadelta""" +1007 99 training_loop """lcwa""" +1007 99 evaluator """rankbased""" +1007 100 dataset """kinships""" +1007 100 model """unstructuredmodel""" +1007 100 loss """softplus""" +1007 100 regularizer """no""" +1007 100 optimizer """adadelta""" +1007 100 training_loop """lcwa""" +1007 100 evaluator """rankbased""" +1008 1 model.embedding_dim 2.0 +1008 1 model.scoring_fct_norm 1.0 +1008 1 training.batch_size 1.0 +1008 1 training.label_smoothing 0.005316433452274392 +1008 2 model.embedding_dim 0.0 +1008 2 model.scoring_fct_norm 2.0 +1008 2 training.batch_size 1.0 +1008 2 training.label_smoothing 0.019097340449062587 +1008 3 model.embedding_dim 0.0 +1008 3 model.scoring_fct_norm 1.0 +1008 3 training.batch_size 0.0 +1008 3 training.label_smoothing 0.030832144629193585 +1008 4 model.embedding_dim 1.0 +1008 4 model.scoring_fct_norm 1.0 +1008 4 training.batch_size 0.0 +1008 4 training.label_smoothing 0.0022591670788342307 +1008 5 model.embedding_dim 0.0 +1008 5 model.scoring_fct_norm 2.0 +1008 5 training.batch_size 0.0 +1008 5 training.label_smoothing 0.003348953749452311 +1008 6 model.embedding_dim 1.0 +1008 6 model.scoring_fct_norm 1.0 +1008 6 training.batch_size 0.0 +1008 6 training.label_smoothing 0.10323452834783592 +1008 7 model.embedding_dim 2.0 +1008 7 model.scoring_fct_norm 2.0 +1008 7 training.batch_size 1.0 +1008 7 training.label_smoothing 0.4318495571898537 +1008 8 model.embedding_dim 1.0 +1008 8 model.scoring_fct_norm 1.0 +1008 8 training.batch_size 1.0 +1008 8 training.label_smoothing 0.0021080765017101114 +1008 9 model.embedding_dim 0.0 +1008 9 model.scoring_fct_norm 1.0 +1008 9 training.batch_size 1.0 +1008 9 training.label_smoothing 0.09325133422023445 +1008 10 model.embedding_dim 1.0 +1008 10 model.scoring_fct_norm 2.0 +1008 10 training.batch_size 0.0 +1008 10 training.label_smoothing 0.0026301107523664887 +1008 11 model.embedding_dim 2.0 +1008 11 model.scoring_fct_norm 1.0 +1008 11 training.batch_size 1.0 +1008 11 training.label_smoothing 0.004354300600492507 +1008 12 model.embedding_dim 1.0 +1008 12 model.scoring_fct_norm 1.0 +1008 12 training.batch_size 0.0 +1008 12 training.label_smoothing 0.01423353616938819 +1008 13 model.embedding_dim 2.0 +1008 13 model.scoring_fct_norm 2.0 +1008 13 training.batch_size 1.0 +1008 13 training.label_smoothing 0.12143453679201956 +1008 14 model.embedding_dim 0.0 +1008 14 model.scoring_fct_norm 1.0 +1008 14 training.batch_size 0.0 +1008 14 training.label_smoothing 0.0946641021528477 +1008 15 model.embedding_dim 2.0 +1008 15 model.scoring_fct_norm 1.0 +1008 15 training.batch_size 0.0 +1008 15 training.label_smoothing 0.006537649855352819 +1008 16 model.embedding_dim 0.0 +1008 16 model.scoring_fct_norm 1.0 +1008 16 training.batch_size 2.0 +1008 16 training.label_smoothing 0.026617193312595894 +1008 17 model.embedding_dim 2.0 +1008 17 model.scoring_fct_norm 1.0 +1008 17 training.batch_size 2.0 +1008 17 training.label_smoothing 0.002754306722868265 +1008 18 model.embedding_dim 1.0 +1008 18 model.scoring_fct_norm 1.0 +1008 18 training.batch_size 2.0 +1008 18 training.label_smoothing 0.005656185429489431 +1008 19 model.embedding_dim 0.0 +1008 19 model.scoring_fct_norm 1.0 +1008 19 training.batch_size 1.0 +1008 19 training.label_smoothing 0.2289596896728884 +1008 20 model.embedding_dim 1.0 +1008 20 model.scoring_fct_norm 1.0 +1008 20 training.batch_size 1.0 +1008 20 training.label_smoothing 0.03451980242538374 +1008 21 model.embedding_dim 2.0 +1008 21 model.scoring_fct_norm 2.0 +1008 21 training.batch_size 2.0 +1008 21 training.label_smoothing 0.0020267113903677447 +1008 22 model.embedding_dim 0.0 +1008 22 model.scoring_fct_norm 2.0 +1008 22 training.batch_size 1.0 +1008 22 training.label_smoothing 0.0071883649969992506 +1008 23 model.embedding_dim 2.0 +1008 23 model.scoring_fct_norm 2.0 +1008 23 training.batch_size 1.0 +1008 23 training.label_smoothing 0.018125911776236396 +1008 24 model.embedding_dim 2.0 +1008 24 model.scoring_fct_norm 1.0 +1008 24 training.batch_size 0.0 +1008 24 training.label_smoothing 0.037241392825557135 +1008 25 model.embedding_dim 2.0 +1008 25 model.scoring_fct_norm 1.0 +1008 25 training.batch_size 1.0 +1008 25 training.label_smoothing 0.004139217499196066 +1008 26 model.embedding_dim 0.0 +1008 26 model.scoring_fct_norm 2.0 +1008 26 training.batch_size 1.0 +1008 26 training.label_smoothing 0.0014798161366756063 +1008 27 model.embedding_dim 1.0 +1008 27 model.scoring_fct_norm 1.0 +1008 27 training.batch_size 1.0 +1008 27 training.label_smoothing 0.0849798494676245 +1008 28 model.embedding_dim 0.0 +1008 28 model.scoring_fct_norm 1.0 +1008 28 training.batch_size 1.0 +1008 28 training.label_smoothing 0.04493271528564892 +1008 29 model.embedding_dim 2.0 +1008 29 model.scoring_fct_norm 1.0 +1008 29 training.batch_size 1.0 +1008 29 training.label_smoothing 0.10497460835717277 +1008 30 model.embedding_dim 0.0 +1008 30 model.scoring_fct_norm 2.0 +1008 30 training.batch_size 0.0 +1008 30 training.label_smoothing 0.15095311079779422 +1008 31 model.embedding_dim 2.0 +1008 31 model.scoring_fct_norm 1.0 +1008 31 training.batch_size 1.0 +1008 31 training.label_smoothing 0.0030687478504132196 +1008 32 model.embedding_dim 0.0 +1008 32 model.scoring_fct_norm 1.0 +1008 32 training.batch_size 1.0 +1008 32 training.label_smoothing 0.36977601066192667 +1008 33 model.embedding_dim 0.0 +1008 33 model.scoring_fct_norm 1.0 +1008 33 training.batch_size 1.0 +1008 33 training.label_smoothing 0.42377717058030856 +1008 34 model.embedding_dim 0.0 +1008 34 model.scoring_fct_norm 1.0 +1008 34 training.batch_size 2.0 +1008 34 training.label_smoothing 0.018009027235892094 +1008 35 model.embedding_dim 2.0 +1008 35 model.scoring_fct_norm 2.0 +1008 35 training.batch_size 2.0 +1008 35 training.label_smoothing 0.009490351473721886 +1008 36 model.embedding_dim 2.0 +1008 36 model.scoring_fct_norm 2.0 +1008 36 training.batch_size 0.0 +1008 36 training.label_smoothing 0.27977543550060957 +1008 37 model.embedding_dim 2.0 +1008 37 model.scoring_fct_norm 2.0 +1008 37 training.batch_size 0.0 +1008 37 training.label_smoothing 0.28842537057451795 +1008 38 model.embedding_dim 0.0 +1008 38 model.scoring_fct_norm 2.0 +1008 38 training.batch_size 1.0 +1008 38 training.label_smoothing 0.003784905223239814 +1008 39 model.embedding_dim 2.0 +1008 39 model.scoring_fct_norm 2.0 +1008 39 training.batch_size 1.0 +1008 39 training.label_smoothing 0.04103066792839736 +1008 40 model.embedding_dim 1.0 +1008 40 model.scoring_fct_norm 2.0 +1008 40 training.batch_size 2.0 +1008 40 training.label_smoothing 0.0017453249710927213 +1008 41 model.embedding_dim 0.0 +1008 41 model.scoring_fct_norm 2.0 +1008 41 training.batch_size 2.0 +1008 41 training.label_smoothing 0.052000235230521615 +1008 42 model.embedding_dim 2.0 +1008 42 model.scoring_fct_norm 2.0 +1008 42 training.batch_size 2.0 +1008 42 training.label_smoothing 0.12680241579852894 +1008 43 model.embedding_dim 2.0 +1008 43 model.scoring_fct_norm 1.0 +1008 43 training.batch_size 1.0 +1008 43 training.label_smoothing 0.005362862896207222 +1008 44 model.embedding_dim 2.0 +1008 44 model.scoring_fct_norm 1.0 +1008 44 training.batch_size 1.0 +1008 44 training.label_smoothing 0.012866028039207835 +1008 45 model.embedding_dim 2.0 +1008 45 model.scoring_fct_norm 2.0 +1008 45 training.batch_size 0.0 +1008 45 training.label_smoothing 0.00723735478392108 +1008 46 model.embedding_dim 1.0 +1008 46 model.scoring_fct_norm 1.0 +1008 46 training.batch_size 0.0 +1008 46 training.label_smoothing 0.0034013557932640595 +1008 47 model.embedding_dim 2.0 +1008 47 model.scoring_fct_norm 1.0 +1008 47 training.batch_size 1.0 +1008 47 training.label_smoothing 0.0023128875780631824 +1008 48 model.embedding_dim 0.0 +1008 48 model.scoring_fct_norm 1.0 +1008 48 training.batch_size 0.0 +1008 48 training.label_smoothing 0.35891707528750405 +1008 49 model.embedding_dim 1.0 +1008 49 model.scoring_fct_norm 1.0 +1008 49 training.batch_size 1.0 +1008 49 training.label_smoothing 0.041621713583938354 +1008 50 model.embedding_dim 1.0 +1008 50 model.scoring_fct_norm 1.0 +1008 50 training.batch_size 1.0 +1008 50 training.label_smoothing 0.051237178979242355 +1008 51 model.embedding_dim 0.0 +1008 51 model.scoring_fct_norm 1.0 +1008 51 training.batch_size 1.0 +1008 51 training.label_smoothing 0.22526952502038688 +1008 52 model.embedding_dim 2.0 +1008 52 model.scoring_fct_norm 1.0 +1008 52 training.batch_size 2.0 +1008 52 training.label_smoothing 0.8117885651731896 +1008 53 model.embedding_dim 2.0 +1008 53 model.scoring_fct_norm 1.0 +1008 53 training.batch_size 2.0 +1008 53 training.label_smoothing 0.5062019412013713 +1008 54 model.embedding_dim 2.0 +1008 54 model.scoring_fct_norm 1.0 +1008 54 training.batch_size 0.0 +1008 54 training.label_smoothing 0.014721759983847036 +1008 55 model.embedding_dim 0.0 +1008 55 model.scoring_fct_norm 2.0 +1008 55 training.batch_size 2.0 +1008 55 training.label_smoothing 0.1324608529321172 +1008 56 model.embedding_dim 2.0 +1008 56 model.scoring_fct_norm 2.0 +1008 56 training.batch_size 0.0 +1008 56 training.label_smoothing 0.09584339890151547 +1008 57 model.embedding_dim 2.0 +1008 57 model.scoring_fct_norm 1.0 +1008 57 training.batch_size 2.0 +1008 57 training.label_smoothing 0.0014031782182206062 +1008 58 model.embedding_dim 1.0 +1008 58 model.scoring_fct_norm 1.0 +1008 58 training.batch_size 1.0 +1008 58 training.label_smoothing 0.667458074986301 +1008 59 model.embedding_dim 1.0 +1008 59 model.scoring_fct_norm 1.0 +1008 59 training.batch_size 2.0 +1008 59 training.label_smoothing 0.0032421060799684813 +1008 60 model.embedding_dim 0.0 +1008 60 model.scoring_fct_norm 2.0 +1008 60 training.batch_size 1.0 +1008 60 training.label_smoothing 0.21700508881639635 +1008 61 model.embedding_dim 0.0 +1008 61 model.scoring_fct_norm 1.0 +1008 61 training.batch_size 2.0 +1008 61 training.label_smoothing 0.14743335063809254 +1008 62 model.embedding_dim 2.0 +1008 62 model.scoring_fct_norm 1.0 +1008 62 training.batch_size 2.0 +1008 62 training.label_smoothing 0.13850983921889937 +1008 63 model.embedding_dim 1.0 +1008 63 model.scoring_fct_norm 2.0 +1008 63 training.batch_size 1.0 +1008 63 training.label_smoothing 0.08084128671963539 +1008 64 model.embedding_dim 0.0 +1008 64 model.scoring_fct_norm 2.0 +1008 64 training.batch_size 0.0 +1008 64 training.label_smoothing 0.14012545971108786 +1008 65 model.embedding_dim 2.0 +1008 65 model.scoring_fct_norm 1.0 +1008 65 training.batch_size 1.0 +1008 65 training.label_smoothing 0.3752071726231309 +1008 66 model.embedding_dim 2.0 +1008 66 model.scoring_fct_norm 2.0 +1008 66 training.batch_size 2.0 +1008 66 training.label_smoothing 0.0019150332053202129 +1008 67 model.embedding_dim 1.0 +1008 67 model.scoring_fct_norm 2.0 +1008 67 training.batch_size 0.0 +1008 67 training.label_smoothing 0.02740869743071464 +1008 68 model.embedding_dim 2.0 +1008 68 model.scoring_fct_norm 1.0 +1008 68 training.batch_size 0.0 +1008 68 training.label_smoothing 0.3786401154907381 +1008 69 model.embedding_dim 0.0 +1008 69 model.scoring_fct_norm 2.0 +1008 69 training.batch_size 0.0 +1008 69 training.label_smoothing 0.005603141696221833 +1008 70 model.embedding_dim 0.0 +1008 70 model.scoring_fct_norm 2.0 +1008 70 training.batch_size 1.0 +1008 70 training.label_smoothing 0.06322472578377271 +1008 71 model.embedding_dim 0.0 +1008 71 model.scoring_fct_norm 2.0 +1008 71 training.batch_size 1.0 +1008 71 training.label_smoothing 0.05044899938568265 +1008 72 model.embedding_dim 2.0 +1008 72 model.scoring_fct_norm 1.0 +1008 72 training.batch_size 0.0 +1008 72 training.label_smoothing 0.03679238040145642 +1008 73 model.embedding_dim 0.0 +1008 73 model.scoring_fct_norm 2.0 +1008 73 training.batch_size 2.0 +1008 73 training.label_smoothing 0.0193781212263295 +1008 74 model.embedding_dim 2.0 +1008 74 model.scoring_fct_norm 2.0 +1008 74 training.batch_size 2.0 +1008 74 training.label_smoothing 0.048885821473965724 +1008 75 model.embedding_dim 0.0 +1008 75 model.scoring_fct_norm 2.0 +1008 75 training.batch_size 2.0 +1008 75 training.label_smoothing 0.01039758758231943 +1008 76 model.embedding_dim 2.0 +1008 76 model.scoring_fct_norm 2.0 +1008 76 training.batch_size 1.0 +1008 76 training.label_smoothing 0.7544314744394378 +1008 77 model.embedding_dim 0.0 +1008 77 model.scoring_fct_norm 1.0 +1008 77 training.batch_size 0.0 +1008 77 training.label_smoothing 0.03821426158965007 +1008 78 model.embedding_dim 1.0 +1008 78 model.scoring_fct_norm 1.0 +1008 78 training.batch_size 1.0 +1008 78 training.label_smoothing 0.14398336659458796 +1008 79 model.embedding_dim 0.0 +1008 79 model.scoring_fct_norm 1.0 +1008 79 training.batch_size 1.0 +1008 79 training.label_smoothing 0.0016594718390797048 +1008 80 model.embedding_dim 1.0 +1008 80 model.scoring_fct_norm 2.0 +1008 80 training.batch_size 0.0 +1008 80 training.label_smoothing 0.0015622232978656538 +1008 81 model.embedding_dim 1.0 +1008 81 model.scoring_fct_norm 2.0 +1008 81 training.batch_size 0.0 +1008 81 training.label_smoothing 0.012100964557096024 +1008 82 model.embedding_dim 2.0 +1008 82 model.scoring_fct_norm 2.0 +1008 82 training.batch_size 2.0 +1008 82 training.label_smoothing 0.23167789968148286 +1008 83 model.embedding_dim 2.0 +1008 83 model.scoring_fct_norm 1.0 +1008 83 training.batch_size 0.0 +1008 83 training.label_smoothing 0.02419130525004753 +1008 84 model.embedding_dim 2.0 +1008 84 model.scoring_fct_norm 2.0 +1008 84 training.batch_size 0.0 +1008 84 training.label_smoothing 0.008648227103172437 +1008 85 model.embedding_dim 2.0 +1008 85 model.scoring_fct_norm 2.0 +1008 85 training.batch_size 0.0 +1008 85 training.label_smoothing 0.07010394830728298 +1008 86 model.embedding_dim 2.0 +1008 86 model.scoring_fct_norm 2.0 +1008 86 training.batch_size 0.0 +1008 86 training.label_smoothing 0.008627256770631305 +1008 87 model.embedding_dim 1.0 +1008 87 model.scoring_fct_norm 1.0 +1008 87 training.batch_size 2.0 +1008 87 training.label_smoothing 0.0032593544254323286 +1008 88 model.embedding_dim 1.0 +1008 88 model.scoring_fct_norm 2.0 +1008 88 training.batch_size 0.0 +1008 88 training.label_smoothing 0.0024952324634463802 +1008 89 model.embedding_dim 1.0 +1008 89 model.scoring_fct_norm 1.0 +1008 89 training.batch_size 1.0 +1008 89 training.label_smoothing 0.0015630974948631661 +1008 90 model.embedding_dim 0.0 +1008 90 model.scoring_fct_norm 1.0 +1008 90 training.batch_size 1.0 +1008 90 training.label_smoothing 0.013961538421106819 +1008 91 model.embedding_dim 1.0 +1008 91 model.scoring_fct_norm 2.0 +1008 91 training.batch_size 2.0 +1008 91 training.label_smoothing 0.03213852604763279 +1008 92 model.embedding_dim 0.0 +1008 92 model.scoring_fct_norm 2.0 +1008 92 training.batch_size 1.0 +1008 92 training.label_smoothing 0.25652749865690083 +1008 93 model.embedding_dim 1.0 +1008 93 model.scoring_fct_norm 2.0 +1008 93 training.batch_size 0.0 +1008 93 training.label_smoothing 0.23319941084545118 +1008 94 model.embedding_dim 2.0 +1008 94 model.scoring_fct_norm 2.0 +1008 94 training.batch_size 1.0 +1008 94 training.label_smoothing 0.1890951997500522 +1008 95 model.embedding_dim 2.0 +1008 95 model.scoring_fct_norm 1.0 +1008 95 training.batch_size 1.0 +1008 95 training.label_smoothing 0.0014577690695756786 +1008 96 model.embedding_dim 2.0 +1008 96 model.scoring_fct_norm 1.0 +1008 96 training.batch_size 1.0 +1008 96 training.label_smoothing 0.13333460407348985 +1008 97 model.embedding_dim 0.0 +1008 97 model.scoring_fct_norm 1.0 +1008 97 training.batch_size 0.0 +1008 97 training.label_smoothing 0.0032902317344871766 +1008 98 model.embedding_dim 1.0 +1008 98 model.scoring_fct_norm 1.0 +1008 98 training.batch_size 2.0 +1008 98 training.label_smoothing 0.16362765942807253 +1008 99 model.embedding_dim 2.0 +1008 99 model.scoring_fct_norm 2.0 +1008 99 training.batch_size 2.0 +1008 99 training.label_smoothing 0.03745626500487815 +1008 100 model.embedding_dim 0.0 +1008 100 model.scoring_fct_norm 2.0 +1008 100 training.batch_size 1.0 +1008 100 training.label_smoothing 0.27722835464621376 +1008 1 dataset """kinships""" +1008 1 model """unstructuredmodel""" +1008 1 loss """bceaftersigmoid""" +1008 1 regularizer """no""" +1008 1 optimizer """adadelta""" +1008 1 training_loop """lcwa""" +1008 1 evaluator """rankbased""" +1008 2 dataset """kinships""" +1008 2 model """unstructuredmodel""" +1008 2 loss """bceaftersigmoid""" +1008 2 regularizer """no""" +1008 2 optimizer """adadelta""" +1008 2 training_loop """lcwa""" +1008 2 evaluator """rankbased""" +1008 3 dataset """kinships""" +1008 3 model """unstructuredmodel""" +1008 3 loss """bceaftersigmoid""" +1008 3 regularizer """no""" +1008 3 optimizer """adadelta""" +1008 3 training_loop """lcwa""" +1008 3 evaluator """rankbased""" +1008 4 dataset """kinships""" +1008 4 model """unstructuredmodel""" +1008 4 loss """bceaftersigmoid""" +1008 4 regularizer """no""" +1008 4 optimizer """adadelta""" +1008 4 training_loop """lcwa""" +1008 4 evaluator """rankbased""" +1008 5 dataset """kinships""" +1008 5 model """unstructuredmodel""" +1008 5 loss """bceaftersigmoid""" +1008 5 regularizer """no""" +1008 5 optimizer """adadelta""" +1008 5 training_loop """lcwa""" +1008 5 evaluator """rankbased""" +1008 6 dataset """kinships""" +1008 6 model """unstructuredmodel""" +1008 6 loss """bceaftersigmoid""" +1008 6 regularizer """no""" +1008 6 optimizer """adadelta""" +1008 6 training_loop """lcwa""" +1008 6 evaluator """rankbased""" +1008 7 dataset """kinships""" +1008 7 model """unstructuredmodel""" +1008 7 loss """bceaftersigmoid""" +1008 7 regularizer """no""" +1008 7 optimizer """adadelta""" +1008 7 training_loop """lcwa""" +1008 7 evaluator """rankbased""" +1008 8 dataset """kinships""" +1008 8 model """unstructuredmodel""" +1008 8 loss """bceaftersigmoid""" +1008 8 regularizer """no""" +1008 8 optimizer """adadelta""" +1008 8 training_loop """lcwa""" +1008 8 evaluator """rankbased""" +1008 9 dataset """kinships""" +1008 9 model """unstructuredmodel""" +1008 9 loss """bceaftersigmoid""" +1008 9 regularizer """no""" +1008 9 optimizer """adadelta""" +1008 9 training_loop """lcwa""" +1008 9 evaluator """rankbased""" +1008 10 dataset """kinships""" +1008 10 model """unstructuredmodel""" +1008 10 loss """bceaftersigmoid""" +1008 10 regularizer """no""" +1008 10 optimizer """adadelta""" +1008 10 training_loop """lcwa""" +1008 10 evaluator """rankbased""" +1008 11 dataset """kinships""" +1008 11 model """unstructuredmodel""" +1008 11 loss """bceaftersigmoid""" +1008 11 regularizer """no""" +1008 11 optimizer """adadelta""" +1008 11 training_loop """lcwa""" +1008 11 evaluator """rankbased""" +1008 12 dataset """kinships""" +1008 12 model """unstructuredmodel""" +1008 12 loss """bceaftersigmoid""" +1008 12 regularizer """no""" +1008 12 optimizer """adadelta""" +1008 12 training_loop """lcwa""" +1008 12 evaluator """rankbased""" +1008 13 dataset """kinships""" +1008 13 model """unstructuredmodel""" +1008 13 loss """bceaftersigmoid""" +1008 13 regularizer """no""" +1008 13 optimizer """adadelta""" +1008 13 training_loop """lcwa""" +1008 13 evaluator """rankbased""" +1008 14 dataset """kinships""" +1008 14 model """unstructuredmodel""" +1008 14 loss """bceaftersigmoid""" +1008 14 regularizer """no""" +1008 14 optimizer """adadelta""" +1008 14 training_loop """lcwa""" +1008 14 evaluator """rankbased""" +1008 15 dataset """kinships""" +1008 15 model """unstructuredmodel""" +1008 15 loss """bceaftersigmoid""" +1008 15 regularizer """no""" +1008 15 optimizer """adadelta""" +1008 15 training_loop """lcwa""" +1008 15 evaluator """rankbased""" +1008 16 dataset """kinships""" +1008 16 model """unstructuredmodel""" +1008 16 loss """bceaftersigmoid""" +1008 16 regularizer """no""" +1008 16 optimizer """adadelta""" +1008 16 training_loop """lcwa""" +1008 16 evaluator """rankbased""" +1008 17 dataset """kinships""" +1008 17 model """unstructuredmodel""" +1008 17 loss """bceaftersigmoid""" +1008 17 regularizer """no""" +1008 17 optimizer """adadelta""" +1008 17 training_loop """lcwa""" +1008 17 evaluator """rankbased""" +1008 18 dataset """kinships""" +1008 18 model """unstructuredmodel""" +1008 18 loss """bceaftersigmoid""" +1008 18 regularizer """no""" +1008 18 optimizer """adadelta""" +1008 18 training_loop """lcwa""" +1008 18 evaluator """rankbased""" +1008 19 dataset """kinships""" +1008 19 model """unstructuredmodel""" +1008 19 loss """bceaftersigmoid""" +1008 19 regularizer """no""" +1008 19 optimizer """adadelta""" +1008 19 training_loop """lcwa""" +1008 19 evaluator """rankbased""" +1008 20 dataset """kinships""" +1008 20 model """unstructuredmodel""" +1008 20 loss """bceaftersigmoid""" +1008 20 regularizer """no""" +1008 20 optimizer """adadelta""" +1008 20 training_loop """lcwa""" +1008 20 evaluator """rankbased""" +1008 21 dataset """kinships""" +1008 21 model """unstructuredmodel""" +1008 21 loss """bceaftersigmoid""" +1008 21 regularizer """no""" +1008 21 optimizer """adadelta""" +1008 21 training_loop """lcwa""" +1008 21 evaluator """rankbased""" +1008 22 dataset """kinships""" +1008 22 model """unstructuredmodel""" +1008 22 loss """bceaftersigmoid""" +1008 22 regularizer """no""" +1008 22 optimizer """adadelta""" +1008 22 training_loop """lcwa""" +1008 22 evaluator """rankbased""" +1008 23 dataset """kinships""" +1008 23 model """unstructuredmodel""" +1008 23 loss """bceaftersigmoid""" +1008 23 regularizer """no""" +1008 23 optimizer """adadelta""" +1008 23 training_loop """lcwa""" +1008 23 evaluator """rankbased""" +1008 24 dataset """kinships""" +1008 24 model """unstructuredmodel""" +1008 24 loss """bceaftersigmoid""" +1008 24 regularizer """no""" +1008 24 optimizer """adadelta""" +1008 24 training_loop """lcwa""" +1008 24 evaluator """rankbased""" +1008 25 dataset """kinships""" +1008 25 model """unstructuredmodel""" +1008 25 loss """bceaftersigmoid""" +1008 25 regularizer """no""" +1008 25 optimizer """adadelta""" +1008 25 training_loop """lcwa""" +1008 25 evaluator """rankbased""" +1008 26 dataset """kinships""" +1008 26 model """unstructuredmodel""" +1008 26 loss """bceaftersigmoid""" +1008 26 regularizer """no""" +1008 26 optimizer """adadelta""" +1008 26 training_loop """lcwa""" +1008 26 evaluator """rankbased""" +1008 27 dataset """kinships""" +1008 27 model """unstructuredmodel""" +1008 27 loss """bceaftersigmoid""" +1008 27 regularizer """no""" +1008 27 optimizer """adadelta""" +1008 27 training_loop """lcwa""" +1008 27 evaluator """rankbased""" +1008 28 dataset """kinships""" +1008 28 model """unstructuredmodel""" +1008 28 loss """bceaftersigmoid""" +1008 28 regularizer """no""" +1008 28 optimizer """adadelta""" +1008 28 training_loop """lcwa""" +1008 28 evaluator """rankbased""" +1008 29 dataset """kinships""" +1008 29 model """unstructuredmodel""" +1008 29 loss """bceaftersigmoid""" +1008 29 regularizer """no""" +1008 29 optimizer """adadelta""" +1008 29 training_loop """lcwa""" +1008 29 evaluator """rankbased""" +1008 30 dataset """kinships""" +1008 30 model """unstructuredmodel""" +1008 30 loss """bceaftersigmoid""" +1008 30 regularizer """no""" +1008 30 optimizer """adadelta""" +1008 30 training_loop """lcwa""" +1008 30 evaluator """rankbased""" +1008 31 dataset """kinships""" +1008 31 model """unstructuredmodel""" +1008 31 loss """bceaftersigmoid""" +1008 31 regularizer """no""" +1008 31 optimizer """adadelta""" +1008 31 training_loop """lcwa""" +1008 31 evaluator """rankbased""" +1008 32 dataset """kinships""" +1008 32 model """unstructuredmodel""" +1008 32 loss """bceaftersigmoid""" +1008 32 regularizer """no""" +1008 32 optimizer """adadelta""" +1008 32 training_loop """lcwa""" +1008 32 evaluator """rankbased""" +1008 33 dataset """kinships""" +1008 33 model """unstructuredmodel""" +1008 33 loss """bceaftersigmoid""" +1008 33 regularizer """no""" +1008 33 optimizer """adadelta""" +1008 33 training_loop """lcwa""" +1008 33 evaluator """rankbased""" +1008 34 dataset """kinships""" +1008 34 model """unstructuredmodel""" +1008 34 loss """bceaftersigmoid""" +1008 34 regularizer """no""" +1008 34 optimizer """adadelta""" +1008 34 training_loop """lcwa""" +1008 34 evaluator """rankbased""" +1008 35 dataset """kinships""" +1008 35 model """unstructuredmodel""" +1008 35 loss """bceaftersigmoid""" +1008 35 regularizer """no""" +1008 35 optimizer """adadelta""" +1008 35 training_loop """lcwa""" +1008 35 evaluator """rankbased""" +1008 36 dataset """kinships""" +1008 36 model """unstructuredmodel""" +1008 36 loss """bceaftersigmoid""" +1008 36 regularizer """no""" +1008 36 optimizer """adadelta""" +1008 36 training_loop """lcwa""" +1008 36 evaluator """rankbased""" +1008 37 dataset """kinships""" +1008 37 model """unstructuredmodel""" +1008 37 loss """bceaftersigmoid""" +1008 37 regularizer """no""" +1008 37 optimizer """adadelta""" +1008 37 training_loop """lcwa""" +1008 37 evaluator """rankbased""" +1008 38 dataset """kinships""" +1008 38 model """unstructuredmodel""" +1008 38 loss """bceaftersigmoid""" +1008 38 regularizer """no""" +1008 38 optimizer """adadelta""" +1008 38 training_loop """lcwa""" +1008 38 evaluator """rankbased""" +1008 39 dataset """kinships""" +1008 39 model """unstructuredmodel""" +1008 39 loss """bceaftersigmoid""" +1008 39 regularizer """no""" +1008 39 optimizer """adadelta""" +1008 39 training_loop """lcwa""" +1008 39 evaluator """rankbased""" +1008 40 dataset """kinships""" +1008 40 model """unstructuredmodel""" +1008 40 loss """bceaftersigmoid""" +1008 40 regularizer """no""" +1008 40 optimizer """adadelta""" +1008 40 training_loop """lcwa""" +1008 40 evaluator """rankbased""" +1008 41 dataset """kinships""" +1008 41 model """unstructuredmodel""" +1008 41 loss """bceaftersigmoid""" +1008 41 regularizer """no""" +1008 41 optimizer """adadelta""" +1008 41 training_loop """lcwa""" +1008 41 evaluator """rankbased""" +1008 42 dataset """kinships""" +1008 42 model """unstructuredmodel""" +1008 42 loss """bceaftersigmoid""" +1008 42 regularizer """no""" +1008 42 optimizer """adadelta""" +1008 42 training_loop """lcwa""" +1008 42 evaluator """rankbased""" +1008 43 dataset """kinships""" +1008 43 model """unstructuredmodel""" +1008 43 loss """bceaftersigmoid""" +1008 43 regularizer """no""" +1008 43 optimizer """adadelta""" +1008 43 training_loop """lcwa""" +1008 43 evaluator """rankbased""" +1008 44 dataset """kinships""" +1008 44 model """unstructuredmodel""" +1008 44 loss """bceaftersigmoid""" +1008 44 regularizer """no""" +1008 44 optimizer """adadelta""" +1008 44 training_loop """lcwa""" +1008 44 evaluator """rankbased""" +1008 45 dataset """kinships""" +1008 45 model """unstructuredmodel""" +1008 45 loss """bceaftersigmoid""" +1008 45 regularizer """no""" +1008 45 optimizer """adadelta""" +1008 45 training_loop """lcwa""" +1008 45 evaluator """rankbased""" +1008 46 dataset """kinships""" +1008 46 model """unstructuredmodel""" +1008 46 loss """bceaftersigmoid""" +1008 46 regularizer """no""" +1008 46 optimizer """adadelta""" +1008 46 training_loop """lcwa""" +1008 46 evaluator """rankbased""" +1008 47 dataset """kinships""" +1008 47 model """unstructuredmodel""" +1008 47 loss """bceaftersigmoid""" +1008 47 regularizer """no""" +1008 47 optimizer """adadelta""" +1008 47 training_loop """lcwa""" +1008 47 evaluator """rankbased""" +1008 48 dataset """kinships""" +1008 48 model """unstructuredmodel""" +1008 48 loss """bceaftersigmoid""" +1008 48 regularizer """no""" +1008 48 optimizer """adadelta""" +1008 48 training_loop """lcwa""" +1008 48 evaluator """rankbased""" +1008 49 dataset """kinships""" +1008 49 model """unstructuredmodel""" +1008 49 loss """bceaftersigmoid""" +1008 49 regularizer """no""" +1008 49 optimizer """adadelta""" +1008 49 training_loop """lcwa""" +1008 49 evaluator """rankbased""" +1008 50 dataset """kinships""" +1008 50 model """unstructuredmodel""" +1008 50 loss """bceaftersigmoid""" +1008 50 regularizer """no""" +1008 50 optimizer """adadelta""" +1008 50 training_loop """lcwa""" +1008 50 evaluator """rankbased""" +1008 51 dataset """kinships""" +1008 51 model """unstructuredmodel""" +1008 51 loss """bceaftersigmoid""" +1008 51 regularizer """no""" +1008 51 optimizer """adadelta""" +1008 51 training_loop """lcwa""" +1008 51 evaluator """rankbased""" +1008 52 dataset """kinships""" +1008 52 model """unstructuredmodel""" +1008 52 loss """bceaftersigmoid""" +1008 52 regularizer """no""" +1008 52 optimizer """adadelta""" +1008 52 training_loop """lcwa""" +1008 52 evaluator """rankbased""" +1008 53 dataset """kinships""" +1008 53 model """unstructuredmodel""" +1008 53 loss """bceaftersigmoid""" +1008 53 regularizer """no""" +1008 53 optimizer """adadelta""" +1008 53 training_loop """lcwa""" +1008 53 evaluator """rankbased""" +1008 54 dataset """kinships""" +1008 54 model """unstructuredmodel""" +1008 54 loss """bceaftersigmoid""" +1008 54 regularizer """no""" +1008 54 optimizer """adadelta""" +1008 54 training_loop """lcwa""" +1008 54 evaluator """rankbased""" +1008 55 dataset """kinships""" +1008 55 model """unstructuredmodel""" +1008 55 loss """bceaftersigmoid""" +1008 55 regularizer """no""" +1008 55 optimizer """adadelta""" +1008 55 training_loop """lcwa""" +1008 55 evaluator """rankbased""" +1008 56 dataset """kinships""" +1008 56 model """unstructuredmodel""" +1008 56 loss """bceaftersigmoid""" +1008 56 regularizer """no""" +1008 56 optimizer """adadelta""" +1008 56 training_loop """lcwa""" +1008 56 evaluator """rankbased""" +1008 57 dataset """kinships""" +1008 57 model """unstructuredmodel""" +1008 57 loss """bceaftersigmoid""" +1008 57 regularizer """no""" +1008 57 optimizer """adadelta""" +1008 57 training_loop """lcwa""" +1008 57 evaluator """rankbased""" +1008 58 dataset """kinships""" +1008 58 model """unstructuredmodel""" +1008 58 loss """bceaftersigmoid""" +1008 58 regularizer """no""" +1008 58 optimizer """adadelta""" +1008 58 training_loop """lcwa""" +1008 58 evaluator """rankbased""" +1008 59 dataset """kinships""" +1008 59 model """unstructuredmodel""" +1008 59 loss """bceaftersigmoid""" +1008 59 regularizer """no""" +1008 59 optimizer """adadelta""" +1008 59 training_loop """lcwa""" +1008 59 evaluator """rankbased""" +1008 60 dataset """kinships""" +1008 60 model """unstructuredmodel""" +1008 60 loss """bceaftersigmoid""" +1008 60 regularizer """no""" +1008 60 optimizer """adadelta""" +1008 60 training_loop """lcwa""" +1008 60 evaluator """rankbased""" +1008 61 dataset """kinships""" +1008 61 model """unstructuredmodel""" +1008 61 loss """bceaftersigmoid""" +1008 61 regularizer """no""" +1008 61 optimizer """adadelta""" +1008 61 training_loop """lcwa""" +1008 61 evaluator """rankbased""" +1008 62 dataset """kinships""" +1008 62 model """unstructuredmodel""" +1008 62 loss """bceaftersigmoid""" +1008 62 regularizer """no""" +1008 62 optimizer """adadelta""" +1008 62 training_loop """lcwa""" +1008 62 evaluator """rankbased""" +1008 63 dataset """kinships""" +1008 63 model """unstructuredmodel""" +1008 63 loss """bceaftersigmoid""" +1008 63 regularizer """no""" +1008 63 optimizer """adadelta""" +1008 63 training_loop """lcwa""" +1008 63 evaluator """rankbased""" +1008 64 dataset """kinships""" +1008 64 model """unstructuredmodel""" +1008 64 loss """bceaftersigmoid""" +1008 64 regularizer """no""" +1008 64 optimizer """adadelta""" +1008 64 training_loop """lcwa""" +1008 64 evaluator """rankbased""" +1008 65 dataset """kinships""" +1008 65 model """unstructuredmodel""" +1008 65 loss """bceaftersigmoid""" +1008 65 regularizer """no""" +1008 65 optimizer """adadelta""" +1008 65 training_loop """lcwa""" +1008 65 evaluator """rankbased""" +1008 66 dataset """kinships""" +1008 66 model """unstructuredmodel""" +1008 66 loss """bceaftersigmoid""" +1008 66 regularizer """no""" +1008 66 optimizer """adadelta""" +1008 66 training_loop """lcwa""" +1008 66 evaluator """rankbased""" +1008 67 dataset """kinships""" +1008 67 model """unstructuredmodel""" +1008 67 loss """bceaftersigmoid""" +1008 67 regularizer """no""" +1008 67 optimizer """adadelta""" +1008 67 training_loop """lcwa""" +1008 67 evaluator """rankbased""" +1008 68 dataset """kinships""" +1008 68 model """unstructuredmodel""" +1008 68 loss """bceaftersigmoid""" +1008 68 regularizer """no""" +1008 68 optimizer """adadelta""" +1008 68 training_loop """lcwa""" +1008 68 evaluator """rankbased""" +1008 69 dataset """kinships""" +1008 69 model """unstructuredmodel""" +1008 69 loss """bceaftersigmoid""" +1008 69 regularizer """no""" +1008 69 optimizer """adadelta""" +1008 69 training_loop """lcwa""" +1008 69 evaluator """rankbased""" +1008 70 dataset """kinships""" +1008 70 model """unstructuredmodel""" +1008 70 loss """bceaftersigmoid""" +1008 70 regularizer """no""" +1008 70 optimizer """adadelta""" +1008 70 training_loop """lcwa""" +1008 70 evaluator """rankbased""" +1008 71 dataset """kinships""" +1008 71 model """unstructuredmodel""" +1008 71 loss """bceaftersigmoid""" +1008 71 regularizer """no""" +1008 71 optimizer """adadelta""" +1008 71 training_loop """lcwa""" +1008 71 evaluator """rankbased""" +1008 72 dataset """kinships""" +1008 72 model """unstructuredmodel""" +1008 72 loss """bceaftersigmoid""" +1008 72 regularizer """no""" +1008 72 optimizer """adadelta""" +1008 72 training_loop """lcwa""" +1008 72 evaluator """rankbased""" +1008 73 dataset """kinships""" +1008 73 model """unstructuredmodel""" +1008 73 loss """bceaftersigmoid""" +1008 73 regularizer """no""" +1008 73 optimizer """adadelta""" +1008 73 training_loop """lcwa""" +1008 73 evaluator """rankbased""" +1008 74 dataset """kinships""" +1008 74 model """unstructuredmodel""" +1008 74 loss """bceaftersigmoid""" +1008 74 regularizer """no""" +1008 74 optimizer """adadelta""" +1008 74 training_loop """lcwa""" +1008 74 evaluator """rankbased""" +1008 75 dataset """kinships""" +1008 75 model """unstructuredmodel""" +1008 75 loss """bceaftersigmoid""" +1008 75 regularizer """no""" +1008 75 optimizer """adadelta""" +1008 75 training_loop """lcwa""" +1008 75 evaluator """rankbased""" +1008 76 dataset """kinships""" +1008 76 model """unstructuredmodel""" +1008 76 loss """bceaftersigmoid""" +1008 76 regularizer """no""" +1008 76 optimizer """adadelta""" +1008 76 training_loop """lcwa""" +1008 76 evaluator """rankbased""" +1008 77 dataset """kinships""" +1008 77 model """unstructuredmodel""" +1008 77 loss """bceaftersigmoid""" +1008 77 regularizer """no""" +1008 77 optimizer """adadelta""" +1008 77 training_loop """lcwa""" +1008 77 evaluator """rankbased""" +1008 78 dataset """kinships""" +1008 78 model """unstructuredmodel""" +1008 78 loss """bceaftersigmoid""" +1008 78 regularizer """no""" +1008 78 optimizer """adadelta""" +1008 78 training_loop """lcwa""" +1008 78 evaluator """rankbased""" +1008 79 dataset """kinships""" +1008 79 model """unstructuredmodel""" +1008 79 loss """bceaftersigmoid""" +1008 79 regularizer """no""" +1008 79 optimizer """adadelta""" +1008 79 training_loop """lcwa""" +1008 79 evaluator """rankbased""" +1008 80 dataset """kinships""" +1008 80 model """unstructuredmodel""" +1008 80 loss """bceaftersigmoid""" +1008 80 regularizer """no""" +1008 80 optimizer """adadelta""" +1008 80 training_loop """lcwa""" +1008 80 evaluator """rankbased""" +1008 81 dataset """kinships""" +1008 81 model """unstructuredmodel""" +1008 81 loss """bceaftersigmoid""" +1008 81 regularizer """no""" +1008 81 optimizer """adadelta""" +1008 81 training_loop """lcwa""" +1008 81 evaluator """rankbased""" +1008 82 dataset """kinships""" +1008 82 model """unstructuredmodel""" +1008 82 loss """bceaftersigmoid""" +1008 82 regularizer """no""" +1008 82 optimizer """adadelta""" +1008 82 training_loop """lcwa""" +1008 82 evaluator """rankbased""" +1008 83 dataset """kinships""" +1008 83 model """unstructuredmodel""" +1008 83 loss """bceaftersigmoid""" +1008 83 regularizer """no""" +1008 83 optimizer """adadelta""" +1008 83 training_loop """lcwa""" +1008 83 evaluator """rankbased""" +1008 84 dataset """kinships""" +1008 84 model """unstructuredmodel""" +1008 84 loss """bceaftersigmoid""" +1008 84 regularizer """no""" +1008 84 optimizer """adadelta""" +1008 84 training_loop """lcwa""" +1008 84 evaluator """rankbased""" +1008 85 dataset """kinships""" +1008 85 model """unstructuredmodel""" +1008 85 loss """bceaftersigmoid""" +1008 85 regularizer """no""" +1008 85 optimizer """adadelta""" +1008 85 training_loop """lcwa""" +1008 85 evaluator """rankbased""" +1008 86 dataset """kinships""" +1008 86 model """unstructuredmodel""" +1008 86 loss """bceaftersigmoid""" +1008 86 regularizer """no""" +1008 86 optimizer """adadelta""" +1008 86 training_loop """lcwa""" +1008 86 evaluator """rankbased""" +1008 87 dataset """kinships""" +1008 87 model """unstructuredmodel""" +1008 87 loss """bceaftersigmoid""" +1008 87 regularizer """no""" +1008 87 optimizer """adadelta""" +1008 87 training_loop """lcwa""" +1008 87 evaluator """rankbased""" +1008 88 dataset """kinships""" +1008 88 model """unstructuredmodel""" +1008 88 loss """bceaftersigmoid""" +1008 88 regularizer """no""" +1008 88 optimizer """adadelta""" +1008 88 training_loop """lcwa""" +1008 88 evaluator """rankbased""" +1008 89 dataset """kinships""" +1008 89 model """unstructuredmodel""" +1008 89 loss """bceaftersigmoid""" +1008 89 regularizer """no""" +1008 89 optimizer """adadelta""" +1008 89 training_loop """lcwa""" +1008 89 evaluator """rankbased""" +1008 90 dataset """kinships""" +1008 90 model """unstructuredmodel""" +1008 90 loss """bceaftersigmoid""" +1008 90 regularizer """no""" +1008 90 optimizer """adadelta""" +1008 90 training_loop """lcwa""" +1008 90 evaluator """rankbased""" +1008 91 dataset """kinships""" +1008 91 model """unstructuredmodel""" +1008 91 loss """bceaftersigmoid""" +1008 91 regularizer """no""" +1008 91 optimizer """adadelta""" +1008 91 training_loop """lcwa""" +1008 91 evaluator """rankbased""" +1008 92 dataset """kinships""" +1008 92 model """unstructuredmodel""" +1008 92 loss """bceaftersigmoid""" +1008 92 regularizer """no""" +1008 92 optimizer """adadelta""" +1008 92 training_loop """lcwa""" +1008 92 evaluator """rankbased""" +1008 93 dataset """kinships""" +1008 93 model """unstructuredmodel""" +1008 93 loss """bceaftersigmoid""" +1008 93 regularizer """no""" +1008 93 optimizer """adadelta""" +1008 93 training_loop """lcwa""" +1008 93 evaluator """rankbased""" +1008 94 dataset """kinships""" +1008 94 model """unstructuredmodel""" +1008 94 loss """bceaftersigmoid""" +1008 94 regularizer """no""" +1008 94 optimizer """adadelta""" +1008 94 training_loop """lcwa""" +1008 94 evaluator """rankbased""" +1008 95 dataset """kinships""" +1008 95 model """unstructuredmodel""" +1008 95 loss """bceaftersigmoid""" +1008 95 regularizer """no""" +1008 95 optimizer """adadelta""" +1008 95 training_loop """lcwa""" +1008 95 evaluator """rankbased""" +1008 96 dataset """kinships""" +1008 96 model """unstructuredmodel""" +1008 96 loss """bceaftersigmoid""" +1008 96 regularizer """no""" +1008 96 optimizer """adadelta""" +1008 96 training_loop """lcwa""" +1008 96 evaluator """rankbased""" +1008 97 dataset """kinships""" +1008 97 model """unstructuredmodel""" +1008 97 loss """bceaftersigmoid""" +1008 97 regularizer """no""" +1008 97 optimizer """adadelta""" +1008 97 training_loop """lcwa""" +1008 97 evaluator """rankbased""" +1008 98 dataset """kinships""" +1008 98 model """unstructuredmodel""" +1008 98 loss """bceaftersigmoid""" +1008 98 regularizer """no""" +1008 98 optimizer """adadelta""" +1008 98 training_loop """lcwa""" +1008 98 evaluator """rankbased""" +1008 99 dataset """kinships""" +1008 99 model """unstructuredmodel""" +1008 99 loss """bceaftersigmoid""" +1008 99 regularizer """no""" +1008 99 optimizer """adadelta""" +1008 99 training_loop """lcwa""" +1008 99 evaluator """rankbased""" +1008 100 dataset """kinships""" +1008 100 model """unstructuredmodel""" +1008 100 loss """bceaftersigmoid""" +1008 100 regularizer """no""" +1008 100 optimizer """adadelta""" +1008 100 training_loop """lcwa""" +1008 100 evaluator """rankbased""" +1009 1 model.embedding_dim 1.0 +1009 1 model.scoring_fct_norm 1.0 +1009 1 training.batch_size 1.0 +1009 1 training.label_smoothing 0.0026765688070167757 +1009 2 model.embedding_dim 2.0 +1009 2 model.scoring_fct_norm 1.0 +1009 2 training.batch_size 1.0 +1009 2 training.label_smoothing 0.032112523661599536 +1009 3 model.embedding_dim 1.0 +1009 3 model.scoring_fct_norm 1.0 +1009 3 training.batch_size 1.0 +1009 3 training.label_smoothing 0.013040593479369552 +1009 4 model.embedding_dim 1.0 +1009 4 model.scoring_fct_norm 2.0 +1009 4 training.batch_size 1.0 +1009 4 training.label_smoothing 0.03630667965949822 +1009 5 model.embedding_dim 0.0 +1009 5 model.scoring_fct_norm 1.0 +1009 5 training.batch_size 1.0 +1009 5 training.label_smoothing 0.05462868117820888 +1009 6 model.embedding_dim 2.0 +1009 6 model.scoring_fct_norm 2.0 +1009 6 training.batch_size 0.0 +1009 6 training.label_smoothing 0.004161457834010419 +1009 7 model.embedding_dim 0.0 +1009 7 model.scoring_fct_norm 2.0 +1009 7 training.batch_size 0.0 +1009 7 training.label_smoothing 0.2574672203109131 +1009 8 model.embedding_dim 2.0 +1009 8 model.scoring_fct_norm 2.0 +1009 8 training.batch_size 2.0 +1009 8 training.label_smoothing 0.07489580940153069 +1009 9 model.embedding_dim 0.0 +1009 9 model.scoring_fct_norm 2.0 +1009 9 training.batch_size 2.0 +1009 9 training.label_smoothing 0.023091169703870883 +1009 10 model.embedding_dim 1.0 +1009 10 model.scoring_fct_norm 2.0 +1009 10 training.batch_size 1.0 +1009 10 training.label_smoothing 0.0037822410111323632 +1009 11 model.embedding_dim 2.0 +1009 11 model.scoring_fct_norm 2.0 +1009 11 training.batch_size 1.0 +1009 11 training.label_smoothing 0.07906450525488098 +1009 12 model.embedding_dim 2.0 +1009 12 model.scoring_fct_norm 1.0 +1009 12 training.batch_size 1.0 +1009 12 training.label_smoothing 0.001507075660530177 +1009 13 model.embedding_dim 0.0 +1009 13 model.scoring_fct_norm 1.0 +1009 13 training.batch_size 2.0 +1009 13 training.label_smoothing 0.2212548298285753 +1009 14 model.embedding_dim 1.0 +1009 14 model.scoring_fct_norm 2.0 +1009 14 training.batch_size 1.0 +1009 14 training.label_smoothing 0.9237612314414193 +1009 15 model.embedding_dim 0.0 +1009 15 model.scoring_fct_norm 2.0 +1009 15 training.batch_size 2.0 +1009 15 training.label_smoothing 0.15248848535650705 +1009 16 model.embedding_dim 1.0 +1009 16 model.scoring_fct_norm 2.0 +1009 16 training.batch_size 1.0 +1009 16 training.label_smoothing 0.006506924160854582 +1009 17 model.embedding_dim 0.0 +1009 17 model.scoring_fct_norm 2.0 +1009 17 training.batch_size 1.0 +1009 17 training.label_smoothing 0.0016296167377822474 +1009 18 model.embedding_dim 0.0 +1009 18 model.scoring_fct_norm 2.0 +1009 18 training.batch_size 0.0 +1009 18 training.label_smoothing 0.2660166191952194 +1009 19 model.embedding_dim 2.0 +1009 19 model.scoring_fct_norm 2.0 +1009 19 training.batch_size 2.0 +1009 19 training.label_smoothing 0.0014130732994846221 +1009 20 model.embedding_dim 0.0 +1009 20 model.scoring_fct_norm 2.0 +1009 20 training.batch_size 2.0 +1009 20 training.label_smoothing 0.0040250654930850055 +1009 21 model.embedding_dim 2.0 +1009 21 model.scoring_fct_norm 1.0 +1009 21 training.batch_size 0.0 +1009 21 training.label_smoothing 0.0015263834172863627 +1009 22 model.embedding_dim 0.0 +1009 22 model.scoring_fct_norm 2.0 +1009 22 training.batch_size 2.0 +1009 22 training.label_smoothing 0.020037119192992357 +1009 23 model.embedding_dim 1.0 +1009 23 model.scoring_fct_norm 1.0 +1009 23 training.batch_size 1.0 +1009 23 training.label_smoothing 0.05915568265773369 +1009 24 model.embedding_dim 1.0 +1009 24 model.scoring_fct_norm 1.0 +1009 24 training.batch_size 2.0 +1009 24 training.label_smoothing 0.03869419722956053 +1009 25 model.embedding_dim 0.0 +1009 25 model.scoring_fct_norm 1.0 +1009 25 training.batch_size 0.0 +1009 25 training.label_smoothing 0.0076724369051633465 +1009 26 model.embedding_dim 1.0 +1009 26 model.scoring_fct_norm 1.0 +1009 26 training.batch_size 1.0 +1009 26 training.label_smoothing 0.0010081529908731234 +1009 27 model.embedding_dim 0.0 +1009 27 model.scoring_fct_norm 1.0 +1009 27 training.batch_size 0.0 +1009 27 training.label_smoothing 0.2465863474750005 +1009 28 model.embedding_dim 1.0 +1009 28 model.scoring_fct_norm 2.0 +1009 28 training.batch_size 1.0 +1009 28 training.label_smoothing 0.06882203181903529 +1009 29 model.embedding_dim 2.0 +1009 29 model.scoring_fct_norm 1.0 +1009 29 training.batch_size 1.0 +1009 29 training.label_smoothing 0.040423511246456115 +1009 30 model.embedding_dim 0.0 +1009 30 model.scoring_fct_norm 1.0 +1009 30 training.batch_size 2.0 +1009 30 training.label_smoothing 0.06953953988083644 +1009 31 model.embedding_dim 2.0 +1009 31 model.scoring_fct_norm 1.0 +1009 31 training.batch_size 0.0 +1009 31 training.label_smoothing 0.34342932650713154 +1009 32 model.embedding_dim 0.0 +1009 32 model.scoring_fct_norm 2.0 +1009 32 training.batch_size 2.0 +1009 32 training.label_smoothing 0.05794016328184925 +1009 33 model.embedding_dim 1.0 +1009 33 model.scoring_fct_norm 2.0 +1009 33 training.batch_size 2.0 +1009 33 training.label_smoothing 0.009927801542647741 +1009 34 model.embedding_dim 2.0 +1009 34 model.scoring_fct_norm 2.0 +1009 34 training.batch_size 1.0 +1009 34 training.label_smoothing 0.04086835455378159 +1009 35 model.embedding_dim 2.0 +1009 35 model.scoring_fct_norm 1.0 +1009 35 training.batch_size 0.0 +1009 35 training.label_smoothing 0.007535999841231615 +1009 36 model.embedding_dim 1.0 +1009 36 model.scoring_fct_norm 1.0 +1009 36 training.batch_size 0.0 +1009 36 training.label_smoothing 0.011102932421667391 +1009 37 model.embedding_dim 0.0 +1009 37 model.scoring_fct_norm 1.0 +1009 37 training.batch_size 2.0 +1009 37 training.label_smoothing 0.07227128787531298 +1009 38 model.embedding_dim 2.0 +1009 38 model.scoring_fct_norm 2.0 +1009 38 training.batch_size 2.0 +1009 38 training.label_smoothing 0.0017847232168676079 +1009 39 model.embedding_dim 2.0 +1009 39 model.scoring_fct_norm 1.0 +1009 39 training.batch_size 2.0 +1009 39 training.label_smoothing 0.046016417457927165 +1009 40 model.embedding_dim 0.0 +1009 40 model.scoring_fct_norm 1.0 +1009 40 training.batch_size 2.0 +1009 40 training.label_smoothing 0.07351146877466617 +1009 41 model.embedding_dim 0.0 +1009 41 model.scoring_fct_norm 2.0 +1009 41 training.batch_size 1.0 +1009 41 training.label_smoothing 0.0020754767584704632 +1009 42 model.embedding_dim 2.0 +1009 42 model.scoring_fct_norm 1.0 +1009 42 training.batch_size 0.0 +1009 42 training.label_smoothing 0.002340538331474243 +1009 43 model.embedding_dim 0.0 +1009 43 model.scoring_fct_norm 1.0 +1009 43 training.batch_size 0.0 +1009 43 training.label_smoothing 0.008458813766306764 +1009 44 model.embedding_dim 2.0 +1009 44 model.scoring_fct_norm 1.0 +1009 44 training.batch_size 2.0 +1009 44 training.label_smoothing 0.0012264554638340658 +1009 45 model.embedding_dim 2.0 +1009 45 model.scoring_fct_norm 2.0 +1009 45 training.batch_size 2.0 +1009 45 training.label_smoothing 0.020153293469811852 +1009 46 model.embedding_dim 0.0 +1009 46 model.scoring_fct_norm 1.0 +1009 46 training.batch_size 1.0 +1009 46 training.label_smoothing 0.04883015099025076 +1009 47 model.embedding_dim 1.0 +1009 47 model.scoring_fct_norm 1.0 +1009 47 training.batch_size 1.0 +1009 47 training.label_smoothing 0.48684100505510797 +1009 48 model.embedding_dim 0.0 +1009 48 model.scoring_fct_norm 2.0 +1009 48 training.batch_size 1.0 +1009 48 training.label_smoothing 0.16848508292380673 +1009 49 model.embedding_dim 1.0 +1009 49 model.scoring_fct_norm 2.0 +1009 49 training.batch_size 0.0 +1009 49 training.label_smoothing 0.00168309828656237 +1009 50 model.embedding_dim 0.0 +1009 50 model.scoring_fct_norm 1.0 +1009 50 training.batch_size 1.0 +1009 50 training.label_smoothing 0.5088025917460993 +1009 51 model.embedding_dim 1.0 +1009 51 model.scoring_fct_norm 1.0 +1009 51 training.batch_size 0.0 +1009 51 training.label_smoothing 0.07639159983404453 +1009 52 model.embedding_dim 1.0 +1009 52 model.scoring_fct_norm 2.0 +1009 52 training.batch_size 2.0 +1009 52 training.label_smoothing 0.00874590359895125 +1009 53 model.embedding_dim 0.0 +1009 53 model.scoring_fct_norm 2.0 +1009 53 training.batch_size 2.0 +1009 53 training.label_smoothing 0.006305778625026747 +1009 54 model.embedding_dim 1.0 +1009 54 model.scoring_fct_norm 1.0 +1009 54 training.batch_size 1.0 +1009 54 training.label_smoothing 0.019842739403612426 +1009 55 model.embedding_dim 0.0 +1009 55 model.scoring_fct_norm 2.0 +1009 55 training.batch_size 2.0 +1009 55 training.label_smoothing 0.005413990009377063 +1009 56 model.embedding_dim 1.0 +1009 56 model.scoring_fct_norm 1.0 +1009 56 training.batch_size 1.0 +1009 56 training.label_smoothing 0.9613948823445128 +1009 57 model.embedding_dim 0.0 +1009 57 model.scoring_fct_norm 2.0 +1009 57 training.batch_size 2.0 +1009 57 training.label_smoothing 0.8056512556393172 +1009 58 model.embedding_dim 1.0 +1009 58 model.scoring_fct_norm 1.0 +1009 58 training.batch_size 1.0 +1009 58 training.label_smoothing 0.8059813969402962 +1009 59 model.embedding_dim 0.0 +1009 59 model.scoring_fct_norm 1.0 +1009 59 training.batch_size 0.0 +1009 59 training.label_smoothing 0.10528875826540733 +1009 60 model.embedding_dim 0.0 +1009 60 model.scoring_fct_norm 1.0 +1009 60 training.batch_size 1.0 +1009 60 training.label_smoothing 0.014865130185666603 +1009 61 model.embedding_dim 2.0 +1009 61 model.scoring_fct_norm 2.0 +1009 61 training.batch_size 1.0 +1009 61 training.label_smoothing 0.06452006123681513 +1009 62 model.embedding_dim 1.0 +1009 62 model.scoring_fct_norm 1.0 +1009 62 training.batch_size 2.0 +1009 62 training.label_smoothing 0.7418187864632869 +1009 63 model.embedding_dim 0.0 +1009 63 model.scoring_fct_norm 1.0 +1009 63 training.batch_size 1.0 +1009 63 training.label_smoothing 0.13042013714179454 +1009 64 model.embedding_dim 2.0 +1009 64 model.scoring_fct_norm 1.0 +1009 64 training.batch_size 2.0 +1009 64 training.label_smoothing 0.1983607917944902 +1009 65 model.embedding_dim 2.0 +1009 65 model.scoring_fct_norm 2.0 +1009 65 training.batch_size 0.0 +1009 65 training.label_smoothing 0.11838950726889325 +1009 66 model.embedding_dim 2.0 +1009 66 model.scoring_fct_norm 2.0 +1009 66 training.batch_size 2.0 +1009 66 training.label_smoothing 0.00841714057898418 +1009 67 model.embedding_dim 0.0 +1009 67 model.scoring_fct_norm 1.0 +1009 67 training.batch_size 2.0 +1009 67 training.label_smoothing 0.956409229551333 +1009 68 model.embedding_dim 1.0 +1009 68 model.scoring_fct_norm 2.0 +1009 68 training.batch_size 1.0 +1009 68 training.label_smoothing 0.0049117905712333 +1009 69 model.embedding_dim 0.0 +1009 69 model.scoring_fct_norm 2.0 +1009 69 training.batch_size 2.0 +1009 69 training.label_smoothing 0.0012544900054265661 +1009 70 model.embedding_dim 1.0 +1009 70 model.scoring_fct_norm 1.0 +1009 70 training.batch_size 2.0 +1009 70 training.label_smoothing 0.0015587375919317694 +1009 71 model.embedding_dim 1.0 +1009 71 model.scoring_fct_norm 2.0 +1009 71 training.batch_size 0.0 +1009 71 training.label_smoothing 0.001044911505217065 +1009 72 model.embedding_dim 0.0 +1009 72 model.scoring_fct_norm 2.0 +1009 72 training.batch_size 2.0 +1009 72 training.label_smoothing 0.0023430756626283723 +1009 73 model.embedding_dim 0.0 +1009 73 model.scoring_fct_norm 2.0 +1009 73 training.batch_size 0.0 +1009 73 training.label_smoothing 0.2920482409463197 +1009 74 model.embedding_dim 2.0 +1009 74 model.scoring_fct_norm 1.0 +1009 74 training.batch_size 1.0 +1009 74 training.label_smoothing 0.03216510571731179 +1009 75 model.embedding_dim 0.0 +1009 75 model.scoring_fct_norm 2.0 +1009 75 training.batch_size 1.0 +1009 75 training.label_smoothing 0.002527605162549893 +1009 76 model.embedding_dim 1.0 +1009 76 model.scoring_fct_norm 2.0 +1009 76 training.batch_size 0.0 +1009 76 training.label_smoothing 0.01129885241861121 +1009 77 model.embedding_dim 2.0 +1009 77 model.scoring_fct_norm 2.0 +1009 77 training.batch_size 1.0 +1009 77 training.label_smoothing 0.561485422424688 +1009 78 model.embedding_dim 0.0 +1009 78 model.scoring_fct_norm 1.0 +1009 78 training.batch_size 0.0 +1009 78 training.label_smoothing 0.23431140385042337 +1009 79 model.embedding_dim 0.0 +1009 79 model.scoring_fct_norm 1.0 +1009 79 training.batch_size 0.0 +1009 79 training.label_smoothing 0.09294894078497959 +1009 80 model.embedding_dim 1.0 +1009 80 model.scoring_fct_norm 1.0 +1009 80 training.batch_size 2.0 +1009 80 training.label_smoothing 0.07106216200641209 +1009 81 model.embedding_dim 2.0 +1009 81 model.scoring_fct_norm 2.0 +1009 81 training.batch_size 0.0 +1009 81 training.label_smoothing 0.4729781905843298 +1009 82 model.embedding_dim 2.0 +1009 82 model.scoring_fct_norm 2.0 +1009 82 training.batch_size 0.0 +1009 82 training.label_smoothing 0.0012990603175990622 +1009 83 model.embedding_dim 0.0 +1009 83 model.scoring_fct_norm 1.0 +1009 83 training.batch_size 2.0 +1009 83 training.label_smoothing 0.00204603538114531 +1009 84 model.embedding_dim 0.0 +1009 84 model.scoring_fct_norm 2.0 +1009 84 training.batch_size 2.0 +1009 84 training.label_smoothing 0.409426814162669 +1009 85 model.embedding_dim 0.0 +1009 85 model.scoring_fct_norm 2.0 +1009 85 training.batch_size 2.0 +1009 85 training.label_smoothing 0.03686012638733939 +1009 86 model.embedding_dim 2.0 +1009 86 model.scoring_fct_norm 2.0 +1009 86 training.batch_size 2.0 +1009 86 training.label_smoothing 0.28853048107744206 +1009 87 model.embedding_dim 1.0 +1009 87 model.scoring_fct_norm 1.0 +1009 87 training.batch_size 0.0 +1009 87 training.label_smoothing 0.007108977449276884 +1009 88 model.embedding_dim 1.0 +1009 88 model.scoring_fct_norm 1.0 +1009 88 training.batch_size 0.0 +1009 88 training.label_smoothing 0.1314462510296249 +1009 89 model.embedding_dim 0.0 +1009 89 model.scoring_fct_norm 1.0 +1009 89 training.batch_size 0.0 +1009 89 training.label_smoothing 0.22118967476671858 +1009 90 model.embedding_dim 0.0 +1009 90 model.scoring_fct_norm 1.0 +1009 90 training.batch_size 1.0 +1009 90 training.label_smoothing 0.27993955173566615 +1009 91 model.embedding_dim 1.0 +1009 91 model.scoring_fct_norm 1.0 +1009 91 training.batch_size 0.0 +1009 91 training.label_smoothing 0.37212689838644164 +1009 92 model.embedding_dim 0.0 +1009 92 model.scoring_fct_norm 1.0 +1009 92 training.batch_size 0.0 +1009 92 training.label_smoothing 0.2270270647732193 +1009 93 model.embedding_dim 1.0 +1009 93 model.scoring_fct_norm 1.0 +1009 93 training.batch_size 0.0 +1009 93 training.label_smoothing 0.0024059501815976306 +1009 94 model.embedding_dim 1.0 +1009 94 model.scoring_fct_norm 2.0 +1009 94 training.batch_size 2.0 +1009 94 training.label_smoothing 0.0012101509529186356 +1009 95 model.embedding_dim 2.0 +1009 95 model.scoring_fct_norm 2.0 +1009 95 training.batch_size 0.0 +1009 95 training.label_smoothing 0.008597480692553462 +1009 96 model.embedding_dim 0.0 +1009 96 model.scoring_fct_norm 1.0 +1009 96 training.batch_size 1.0 +1009 96 training.label_smoothing 0.00337018008209568 +1009 97 model.embedding_dim 2.0 +1009 97 model.scoring_fct_norm 2.0 +1009 97 training.batch_size 0.0 +1009 97 training.label_smoothing 0.021585427054405373 +1009 98 model.embedding_dim 0.0 +1009 98 model.scoring_fct_norm 1.0 +1009 98 training.batch_size 2.0 +1009 98 training.label_smoothing 0.6819357100747051 +1009 99 model.embedding_dim 2.0 +1009 99 model.scoring_fct_norm 1.0 +1009 99 training.batch_size 2.0 +1009 99 training.label_smoothing 0.01732599879498541 +1009 100 model.embedding_dim 2.0 +1009 100 model.scoring_fct_norm 1.0 +1009 100 training.batch_size 1.0 +1009 100 training.label_smoothing 0.13822827101027146 +1009 1 dataset """kinships""" +1009 1 model """unstructuredmodel""" +1009 1 loss """softplus""" +1009 1 regularizer """no""" +1009 1 optimizer """adadelta""" +1009 1 training_loop """lcwa""" +1009 1 evaluator """rankbased""" +1009 2 dataset """kinships""" +1009 2 model """unstructuredmodel""" +1009 2 loss """softplus""" +1009 2 regularizer """no""" +1009 2 optimizer """adadelta""" +1009 2 training_loop """lcwa""" +1009 2 evaluator """rankbased""" +1009 3 dataset """kinships""" +1009 3 model """unstructuredmodel""" +1009 3 loss """softplus""" +1009 3 regularizer """no""" +1009 3 optimizer """adadelta""" +1009 3 training_loop """lcwa""" +1009 3 evaluator """rankbased""" +1009 4 dataset """kinships""" +1009 4 model """unstructuredmodel""" +1009 4 loss """softplus""" +1009 4 regularizer """no""" +1009 4 optimizer """adadelta""" +1009 4 training_loop """lcwa""" +1009 4 evaluator """rankbased""" +1009 5 dataset """kinships""" +1009 5 model """unstructuredmodel""" +1009 5 loss """softplus""" +1009 5 regularizer """no""" +1009 5 optimizer """adadelta""" +1009 5 training_loop """lcwa""" +1009 5 evaluator """rankbased""" +1009 6 dataset """kinships""" +1009 6 model """unstructuredmodel""" +1009 6 loss """softplus""" +1009 6 regularizer """no""" +1009 6 optimizer """adadelta""" +1009 6 training_loop """lcwa""" +1009 6 evaluator """rankbased""" +1009 7 dataset """kinships""" +1009 7 model """unstructuredmodel""" +1009 7 loss """softplus""" +1009 7 regularizer """no""" +1009 7 optimizer """adadelta""" +1009 7 training_loop """lcwa""" +1009 7 evaluator """rankbased""" +1009 8 dataset """kinships""" +1009 8 model """unstructuredmodel""" +1009 8 loss """softplus""" +1009 8 regularizer """no""" +1009 8 optimizer """adadelta""" +1009 8 training_loop """lcwa""" +1009 8 evaluator """rankbased""" +1009 9 dataset """kinships""" +1009 9 model """unstructuredmodel""" +1009 9 loss """softplus""" +1009 9 regularizer """no""" +1009 9 optimizer """adadelta""" +1009 9 training_loop """lcwa""" +1009 9 evaluator """rankbased""" +1009 10 dataset """kinships""" +1009 10 model """unstructuredmodel""" +1009 10 loss """softplus""" +1009 10 regularizer """no""" +1009 10 optimizer """adadelta""" +1009 10 training_loop """lcwa""" +1009 10 evaluator """rankbased""" +1009 11 dataset """kinships""" +1009 11 model """unstructuredmodel""" +1009 11 loss """softplus""" +1009 11 regularizer """no""" +1009 11 optimizer """adadelta""" +1009 11 training_loop """lcwa""" +1009 11 evaluator """rankbased""" +1009 12 dataset """kinships""" +1009 12 model """unstructuredmodel""" +1009 12 loss """softplus""" +1009 12 regularizer """no""" +1009 12 optimizer """adadelta""" +1009 12 training_loop """lcwa""" +1009 12 evaluator """rankbased""" +1009 13 dataset """kinships""" +1009 13 model """unstructuredmodel""" +1009 13 loss """softplus""" +1009 13 regularizer """no""" +1009 13 optimizer """adadelta""" +1009 13 training_loop """lcwa""" +1009 13 evaluator """rankbased""" +1009 14 dataset """kinships""" +1009 14 model """unstructuredmodel""" +1009 14 loss """softplus""" +1009 14 regularizer """no""" +1009 14 optimizer """adadelta""" +1009 14 training_loop """lcwa""" +1009 14 evaluator """rankbased""" +1009 15 dataset """kinships""" +1009 15 model """unstructuredmodel""" +1009 15 loss """softplus""" +1009 15 regularizer """no""" +1009 15 optimizer """adadelta""" +1009 15 training_loop """lcwa""" +1009 15 evaluator """rankbased""" +1009 16 dataset """kinships""" +1009 16 model """unstructuredmodel""" +1009 16 loss """softplus""" +1009 16 regularizer """no""" +1009 16 optimizer """adadelta""" +1009 16 training_loop """lcwa""" +1009 16 evaluator """rankbased""" +1009 17 dataset """kinships""" +1009 17 model """unstructuredmodel""" +1009 17 loss """softplus""" +1009 17 regularizer """no""" +1009 17 optimizer """adadelta""" +1009 17 training_loop """lcwa""" +1009 17 evaluator """rankbased""" +1009 18 dataset """kinships""" +1009 18 model """unstructuredmodel""" +1009 18 loss """softplus""" +1009 18 regularizer """no""" +1009 18 optimizer """adadelta""" +1009 18 training_loop """lcwa""" +1009 18 evaluator """rankbased""" +1009 19 dataset """kinships""" +1009 19 model """unstructuredmodel""" +1009 19 loss """softplus""" +1009 19 regularizer """no""" +1009 19 optimizer """adadelta""" +1009 19 training_loop """lcwa""" +1009 19 evaluator """rankbased""" +1009 20 dataset """kinships""" +1009 20 model """unstructuredmodel""" +1009 20 loss """softplus""" +1009 20 regularizer """no""" +1009 20 optimizer """adadelta""" +1009 20 training_loop """lcwa""" +1009 20 evaluator """rankbased""" +1009 21 dataset """kinships""" +1009 21 model """unstructuredmodel""" +1009 21 loss """softplus""" +1009 21 regularizer """no""" +1009 21 optimizer """adadelta""" +1009 21 training_loop """lcwa""" +1009 21 evaluator """rankbased""" +1009 22 dataset """kinships""" +1009 22 model """unstructuredmodel""" +1009 22 loss """softplus""" +1009 22 regularizer """no""" +1009 22 optimizer """adadelta""" +1009 22 training_loop """lcwa""" +1009 22 evaluator """rankbased""" +1009 23 dataset """kinships""" +1009 23 model """unstructuredmodel""" +1009 23 loss """softplus""" +1009 23 regularizer """no""" +1009 23 optimizer """adadelta""" +1009 23 training_loop """lcwa""" +1009 23 evaluator """rankbased""" +1009 24 dataset """kinships""" +1009 24 model """unstructuredmodel""" +1009 24 loss """softplus""" +1009 24 regularizer """no""" +1009 24 optimizer """adadelta""" +1009 24 training_loop """lcwa""" +1009 24 evaluator """rankbased""" +1009 25 dataset """kinships""" +1009 25 model """unstructuredmodel""" +1009 25 loss """softplus""" +1009 25 regularizer """no""" +1009 25 optimizer """adadelta""" +1009 25 training_loop """lcwa""" +1009 25 evaluator """rankbased""" +1009 26 dataset """kinships""" +1009 26 model """unstructuredmodel""" +1009 26 loss """softplus""" +1009 26 regularizer """no""" +1009 26 optimizer """adadelta""" +1009 26 training_loop """lcwa""" +1009 26 evaluator """rankbased""" +1009 27 dataset """kinships""" +1009 27 model """unstructuredmodel""" +1009 27 loss """softplus""" +1009 27 regularizer """no""" +1009 27 optimizer """adadelta""" +1009 27 training_loop """lcwa""" +1009 27 evaluator """rankbased""" +1009 28 dataset """kinships""" +1009 28 model """unstructuredmodel""" +1009 28 loss """softplus""" +1009 28 regularizer """no""" +1009 28 optimizer """adadelta""" +1009 28 training_loop """lcwa""" +1009 28 evaluator """rankbased""" +1009 29 dataset """kinships""" +1009 29 model """unstructuredmodel""" +1009 29 loss """softplus""" +1009 29 regularizer """no""" +1009 29 optimizer """adadelta""" +1009 29 training_loop """lcwa""" +1009 29 evaluator """rankbased""" +1009 30 dataset """kinships""" +1009 30 model """unstructuredmodel""" +1009 30 loss """softplus""" +1009 30 regularizer """no""" +1009 30 optimizer """adadelta""" +1009 30 training_loop """lcwa""" +1009 30 evaluator """rankbased""" +1009 31 dataset """kinships""" +1009 31 model """unstructuredmodel""" +1009 31 loss """softplus""" +1009 31 regularizer """no""" +1009 31 optimizer """adadelta""" +1009 31 training_loop """lcwa""" +1009 31 evaluator """rankbased""" +1009 32 dataset """kinships""" +1009 32 model """unstructuredmodel""" +1009 32 loss """softplus""" +1009 32 regularizer """no""" +1009 32 optimizer """adadelta""" +1009 32 training_loop """lcwa""" +1009 32 evaluator """rankbased""" +1009 33 dataset """kinships""" +1009 33 model """unstructuredmodel""" +1009 33 loss """softplus""" +1009 33 regularizer """no""" +1009 33 optimizer """adadelta""" +1009 33 training_loop """lcwa""" +1009 33 evaluator """rankbased""" +1009 34 dataset """kinships""" +1009 34 model """unstructuredmodel""" +1009 34 loss """softplus""" +1009 34 regularizer """no""" +1009 34 optimizer """adadelta""" +1009 34 training_loop """lcwa""" +1009 34 evaluator """rankbased""" +1009 35 dataset """kinships""" +1009 35 model """unstructuredmodel""" +1009 35 loss """softplus""" +1009 35 regularizer """no""" +1009 35 optimizer """adadelta""" +1009 35 training_loop """lcwa""" +1009 35 evaluator """rankbased""" +1009 36 dataset """kinships""" +1009 36 model """unstructuredmodel""" +1009 36 loss """softplus""" +1009 36 regularizer """no""" +1009 36 optimizer """adadelta""" +1009 36 training_loop """lcwa""" +1009 36 evaluator """rankbased""" +1009 37 dataset """kinships""" +1009 37 model """unstructuredmodel""" +1009 37 loss """softplus""" +1009 37 regularizer """no""" +1009 37 optimizer """adadelta""" +1009 37 training_loop """lcwa""" +1009 37 evaluator """rankbased""" +1009 38 dataset """kinships""" +1009 38 model """unstructuredmodel""" +1009 38 loss """softplus""" +1009 38 regularizer """no""" +1009 38 optimizer """adadelta""" +1009 38 training_loop """lcwa""" +1009 38 evaluator """rankbased""" +1009 39 dataset """kinships""" +1009 39 model """unstructuredmodel""" +1009 39 loss """softplus""" +1009 39 regularizer """no""" +1009 39 optimizer """adadelta""" +1009 39 training_loop """lcwa""" +1009 39 evaluator """rankbased""" +1009 40 dataset """kinships""" +1009 40 model """unstructuredmodel""" +1009 40 loss """softplus""" +1009 40 regularizer """no""" +1009 40 optimizer """adadelta""" +1009 40 training_loop """lcwa""" +1009 40 evaluator """rankbased""" +1009 41 dataset """kinships""" +1009 41 model """unstructuredmodel""" +1009 41 loss """softplus""" +1009 41 regularizer """no""" +1009 41 optimizer """adadelta""" +1009 41 training_loop """lcwa""" +1009 41 evaluator """rankbased""" +1009 42 dataset """kinships""" +1009 42 model """unstructuredmodel""" +1009 42 loss """softplus""" +1009 42 regularizer """no""" +1009 42 optimizer """adadelta""" +1009 42 training_loop """lcwa""" +1009 42 evaluator """rankbased""" +1009 43 dataset """kinships""" +1009 43 model """unstructuredmodel""" +1009 43 loss """softplus""" +1009 43 regularizer """no""" +1009 43 optimizer """adadelta""" +1009 43 training_loop """lcwa""" +1009 43 evaluator """rankbased""" +1009 44 dataset """kinships""" +1009 44 model """unstructuredmodel""" +1009 44 loss """softplus""" +1009 44 regularizer """no""" +1009 44 optimizer """adadelta""" +1009 44 training_loop """lcwa""" +1009 44 evaluator """rankbased""" +1009 45 dataset """kinships""" +1009 45 model """unstructuredmodel""" +1009 45 loss """softplus""" +1009 45 regularizer """no""" +1009 45 optimizer """adadelta""" +1009 45 training_loop """lcwa""" +1009 45 evaluator """rankbased""" +1009 46 dataset """kinships""" +1009 46 model """unstructuredmodel""" +1009 46 loss """softplus""" +1009 46 regularizer """no""" +1009 46 optimizer """adadelta""" +1009 46 training_loop """lcwa""" +1009 46 evaluator """rankbased""" +1009 47 dataset """kinships""" +1009 47 model """unstructuredmodel""" +1009 47 loss """softplus""" +1009 47 regularizer """no""" +1009 47 optimizer """adadelta""" +1009 47 training_loop """lcwa""" +1009 47 evaluator """rankbased""" +1009 48 dataset """kinships""" +1009 48 model """unstructuredmodel""" +1009 48 loss """softplus""" +1009 48 regularizer """no""" +1009 48 optimizer """adadelta""" +1009 48 training_loop """lcwa""" +1009 48 evaluator """rankbased""" +1009 49 dataset """kinships""" +1009 49 model """unstructuredmodel""" +1009 49 loss """softplus""" +1009 49 regularizer """no""" +1009 49 optimizer """adadelta""" +1009 49 training_loop """lcwa""" +1009 49 evaluator """rankbased""" +1009 50 dataset """kinships""" +1009 50 model """unstructuredmodel""" +1009 50 loss """softplus""" +1009 50 regularizer """no""" +1009 50 optimizer """adadelta""" +1009 50 training_loop """lcwa""" +1009 50 evaluator """rankbased""" +1009 51 dataset """kinships""" +1009 51 model """unstructuredmodel""" +1009 51 loss """softplus""" +1009 51 regularizer """no""" +1009 51 optimizer """adadelta""" +1009 51 training_loop """lcwa""" +1009 51 evaluator """rankbased""" +1009 52 dataset """kinships""" +1009 52 model """unstructuredmodel""" +1009 52 loss """softplus""" +1009 52 regularizer """no""" +1009 52 optimizer """adadelta""" +1009 52 training_loop """lcwa""" +1009 52 evaluator """rankbased""" +1009 53 dataset """kinships""" +1009 53 model """unstructuredmodel""" +1009 53 loss """softplus""" +1009 53 regularizer """no""" +1009 53 optimizer """adadelta""" +1009 53 training_loop """lcwa""" +1009 53 evaluator """rankbased""" +1009 54 dataset """kinships""" +1009 54 model """unstructuredmodel""" +1009 54 loss """softplus""" +1009 54 regularizer """no""" +1009 54 optimizer """adadelta""" +1009 54 training_loop """lcwa""" +1009 54 evaluator """rankbased""" +1009 55 dataset """kinships""" +1009 55 model """unstructuredmodel""" +1009 55 loss """softplus""" +1009 55 regularizer """no""" +1009 55 optimizer """adadelta""" +1009 55 training_loop """lcwa""" +1009 55 evaluator """rankbased""" +1009 56 dataset """kinships""" +1009 56 model """unstructuredmodel""" +1009 56 loss """softplus""" +1009 56 regularizer """no""" +1009 56 optimizer """adadelta""" +1009 56 training_loop """lcwa""" +1009 56 evaluator """rankbased""" +1009 57 dataset """kinships""" +1009 57 model """unstructuredmodel""" +1009 57 loss """softplus""" +1009 57 regularizer """no""" +1009 57 optimizer """adadelta""" +1009 57 training_loop """lcwa""" +1009 57 evaluator """rankbased""" +1009 58 dataset """kinships""" +1009 58 model """unstructuredmodel""" +1009 58 loss """softplus""" +1009 58 regularizer """no""" +1009 58 optimizer """adadelta""" +1009 58 training_loop """lcwa""" +1009 58 evaluator """rankbased""" +1009 59 dataset """kinships""" +1009 59 model """unstructuredmodel""" +1009 59 loss """softplus""" +1009 59 regularizer """no""" +1009 59 optimizer """adadelta""" +1009 59 training_loop """lcwa""" +1009 59 evaluator """rankbased""" +1009 60 dataset """kinships""" +1009 60 model """unstructuredmodel""" +1009 60 loss """softplus""" +1009 60 regularizer """no""" +1009 60 optimizer """adadelta""" +1009 60 training_loop """lcwa""" +1009 60 evaluator """rankbased""" +1009 61 dataset """kinships""" +1009 61 model """unstructuredmodel""" +1009 61 loss """softplus""" +1009 61 regularizer """no""" +1009 61 optimizer """adadelta""" +1009 61 training_loop """lcwa""" +1009 61 evaluator """rankbased""" +1009 62 dataset """kinships""" +1009 62 model """unstructuredmodel""" +1009 62 loss """softplus""" +1009 62 regularizer """no""" +1009 62 optimizer """adadelta""" +1009 62 training_loop """lcwa""" +1009 62 evaluator """rankbased""" +1009 63 dataset """kinships""" +1009 63 model """unstructuredmodel""" +1009 63 loss """softplus""" +1009 63 regularizer """no""" +1009 63 optimizer """adadelta""" +1009 63 training_loop """lcwa""" +1009 63 evaluator """rankbased""" +1009 64 dataset """kinships""" +1009 64 model """unstructuredmodel""" +1009 64 loss """softplus""" +1009 64 regularizer """no""" +1009 64 optimizer """adadelta""" +1009 64 training_loop """lcwa""" +1009 64 evaluator """rankbased""" +1009 65 dataset """kinships""" +1009 65 model """unstructuredmodel""" +1009 65 loss """softplus""" +1009 65 regularizer """no""" +1009 65 optimizer """adadelta""" +1009 65 training_loop """lcwa""" +1009 65 evaluator """rankbased""" +1009 66 dataset """kinships""" +1009 66 model """unstructuredmodel""" +1009 66 loss """softplus""" +1009 66 regularizer """no""" +1009 66 optimizer """adadelta""" +1009 66 training_loop """lcwa""" +1009 66 evaluator """rankbased""" +1009 67 dataset """kinships""" +1009 67 model """unstructuredmodel""" +1009 67 loss """softplus""" +1009 67 regularizer """no""" +1009 67 optimizer """adadelta""" +1009 67 training_loop """lcwa""" +1009 67 evaluator """rankbased""" +1009 68 dataset """kinships""" +1009 68 model """unstructuredmodel""" +1009 68 loss """softplus""" +1009 68 regularizer """no""" +1009 68 optimizer """adadelta""" +1009 68 training_loop """lcwa""" +1009 68 evaluator """rankbased""" +1009 69 dataset """kinships""" +1009 69 model """unstructuredmodel""" +1009 69 loss """softplus""" +1009 69 regularizer """no""" +1009 69 optimizer """adadelta""" +1009 69 training_loop """lcwa""" +1009 69 evaluator """rankbased""" +1009 70 dataset """kinships""" +1009 70 model """unstructuredmodel""" +1009 70 loss """softplus""" +1009 70 regularizer """no""" +1009 70 optimizer """adadelta""" +1009 70 training_loop """lcwa""" +1009 70 evaluator """rankbased""" +1009 71 dataset """kinships""" +1009 71 model """unstructuredmodel""" +1009 71 loss """softplus""" +1009 71 regularizer """no""" +1009 71 optimizer """adadelta""" +1009 71 training_loop """lcwa""" +1009 71 evaluator """rankbased""" +1009 72 dataset """kinships""" +1009 72 model """unstructuredmodel""" +1009 72 loss """softplus""" +1009 72 regularizer """no""" +1009 72 optimizer """adadelta""" +1009 72 training_loop """lcwa""" +1009 72 evaluator """rankbased""" +1009 73 dataset """kinships""" +1009 73 model """unstructuredmodel""" +1009 73 loss """softplus""" +1009 73 regularizer """no""" +1009 73 optimizer """adadelta""" +1009 73 training_loop """lcwa""" +1009 73 evaluator """rankbased""" +1009 74 dataset """kinships""" +1009 74 model """unstructuredmodel""" +1009 74 loss """softplus""" +1009 74 regularizer """no""" +1009 74 optimizer """adadelta""" +1009 74 training_loop """lcwa""" +1009 74 evaluator """rankbased""" +1009 75 dataset """kinships""" +1009 75 model """unstructuredmodel""" +1009 75 loss """softplus""" +1009 75 regularizer """no""" +1009 75 optimizer """adadelta""" +1009 75 training_loop """lcwa""" +1009 75 evaluator """rankbased""" +1009 76 dataset """kinships""" +1009 76 model """unstructuredmodel""" +1009 76 loss """softplus""" +1009 76 regularizer """no""" +1009 76 optimizer """adadelta""" +1009 76 training_loop """lcwa""" +1009 76 evaluator """rankbased""" +1009 77 dataset """kinships""" +1009 77 model """unstructuredmodel""" +1009 77 loss """softplus""" +1009 77 regularizer """no""" +1009 77 optimizer """adadelta""" +1009 77 training_loop """lcwa""" +1009 77 evaluator """rankbased""" +1009 78 dataset """kinships""" +1009 78 model """unstructuredmodel""" +1009 78 loss """softplus""" +1009 78 regularizer """no""" +1009 78 optimizer """adadelta""" +1009 78 training_loop """lcwa""" +1009 78 evaluator """rankbased""" +1009 79 dataset """kinships""" +1009 79 model """unstructuredmodel""" +1009 79 loss """softplus""" +1009 79 regularizer """no""" +1009 79 optimizer """adadelta""" +1009 79 training_loop """lcwa""" +1009 79 evaluator """rankbased""" +1009 80 dataset """kinships""" +1009 80 model """unstructuredmodel""" +1009 80 loss """softplus""" +1009 80 regularizer """no""" +1009 80 optimizer """adadelta""" +1009 80 training_loop """lcwa""" +1009 80 evaluator """rankbased""" +1009 81 dataset """kinships""" +1009 81 model """unstructuredmodel""" +1009 81 loss """softplus""" +1009 81 regularizer """no""" +1009 81 optimizer """adadelta""" +1009 81 training_loop """lcwa""" +1009 81 evaluator """rankbased""" +1009 82 dataset """kinships""" +1009 82 model """unstructuredmodel""" +1009 82 loss """softplus""" +1009 82 regularizer """no""" +1009 82 optimizer """adadelta""" +1009 82 training_loop """lcwa""" +1009 82 evaluator """rankbased""" +1009 83 dataset """kinships""" +1009 83 model """unstructuredmodel""" +1009 83 loss """softplus""" +1009 83 regularizer """no""" +1009 83 optimizer """adadelta""" +1009 83 training_loop """lcwa""" +1009 83 evaluator """rankbased""" +1009 84 dataset """kinships""" +1009 84 model """unstructuredmodel""" +1009 84 loss """softplus""" +1009 84 regularizer """no""" +1009 84 optimizer """adadelta""" +1009 84 training_loop """lcwa""" +1009 84 evaluator """rankbased""" +1009 85 dataset """kinships""" +1009 85 model """unstructuredmodel""" +1009 85 loss """softplus""" +1009 85 regularizer """no""" +1009 85 optimizer """adadelta""" +1009 85 training_loop """lcwa""" +1009 85 evaluator """rankbased""" +1009 86 dataset """kinships""" +1009 86 model """unstructuredmodel""" +1009 86 loss """softplus""" +1009 86 regularizer """no""" +1009 86 optimizer """adadelta""" +1009 86 training_loop """lcwa""" +1009 86 evaluator """rankbased""" +1009 87 dataset """kinships""" +1009 87 model """unstructuredmodel""" +1009 87 loss """softplus""" +1009 87 regularizer """no""" +1009 87 optimizer """adadelta""" +1009 87 training_loop """lcwa""" +1009 87 evaluator """rankbased""" +1009 88 dataset """kinships""" +1009 88 model """unstructuredmodel""" +1009 88 loss """softplus""" +1009 88 regularizer """no""" +1009 88 optimizer """adadelta""" +1009 88 training_loop """lcwa""" +1009 88 evaluator """rankbased""" +1009 89 dataset """kinships""" +1009 89 model """unstructuredmodel""" +1009 89 loss """softplus""" +1009 89 regularizer """no""" +1009 89 optimizer """adadelta""" +1009 89 training_loop """lcwa""" +1009 89 evaluator """rankbased""" +1009 90 dataset """kinships""" +1009 90 model """unstructuredmodel""" +1009 90 loss """softplus""" +1009 90 regularizer """no""" +1009 90 optimizer """adadelta""" +1009 90 training_loop """lcwa""" +1009 90 evaluator """rankbased""" +1009 91 dataset """kinships""" +1009 91 model """unstructuredmodel""" +1009 91 loss """softplus""" +1009 91 regularizer """no""" +1009 91 optimizer """adadelta""" +1009 91 training_loop """lcwa""" +1009 91 evaluator """rankbased""" +1009 92 dataset """kinships""" +1009 92 model """unstructuredmodel""" +1009 92 loss """softplus""" +1009 92 regularizer """no""" +1009 92 optimizer """adadelta""" +1009 92 training_loop """lcwa""" +1009 92 evaluator """rankbased""" +1009 93 dataset """kinships""" +1009 93 model """unstructuredmodel""" +1009 93 loss """softplus""" +1009 93 regularizer """no""" +1009 93 optimizer """adadelta""" +1009 93 training_loop """lcwa""" +1009 93 evaluator """rankbased""" +1009 94 dataset """kinships""" +1009 94 model """unstructuredmodel""" +1009 94 loss """softplus""" +1009 94 regularizer """no""" +1009 94 optimizer """adadelta""" +1009 94 training_loop """lcwa""" +1009 94 evaluator """rankbased""" +1009 95 dataset """kinships""" +1009 95 model """unstructuredmodel""" +1009 95 loss """softplus""" +1009 95 regularizer """no""" +1009 95 optimizer """adadelta""" +1009 95 training_loop """lcwa""" +1009 95 evaluator """rankbased""" +1009 96 dataset """kinships""" +1009 96 model """unstructuredmodel""" +1009 96 loss """softplus""" +1009 96 regularizer """no""" +1009 96 optimizer """adadelta""" +1009 96 training_loop """lcwa""" +1009 96 evaluator """rankbased""" +1009 97 dataset """kinships""" +1009 97 model """unstructuredmodel""" +1009 97 loss """softplus""" +1009 97 regularizer """no""" +1009 97 optimizer """adadelta""" +1009 97 training_loop """lcwa""" +1009 97 evaluator """rankbased""" +1009 98 dataset """kinships""" +1009 98 model """unstructuredmodel""" +1009 98 loss """softplus""" +1009 98 regularizer """no""" +1009 98 optimizer """adadelta""" +1009 98 training_loop """lcwa""" +1009 98 evaluator """rankbased""" +1009 99 dataset """kinships""" +1009 99 model """unstructuredmodel""" +1009 99 loss """softplus""" +1009 99 regularizer """no""" +1009 99 optimizer """adadelta""" +1009 99 training_loop """lcwa""" +1009 99 evaluator """rankbased""" +1009 100 dataset """kinships""" +1009 100 model """unstructuredmodel""" +1009 100 loss """softplus""" +1009 100 regularizer """no""" +1009 100 optimizer """adadelta""" +1009 100 training_loop """lcwa""" +1009 100 evaluator """rankbased""" +1010 1 model.embedding_dim 2.0 +1010 1 model.scoring_fct_norm 2.0 +1010 1 training.batch_size 2.0 +1010 1 training.label_smoothing 0.04171774318917126 +1010 2 model.embedding_dim 0.0 +1010 2 model.scoring_fct_norm 1.0 +1010 2 training.batch_size 1.0 +1010 2 training.label_smoothing 0.007859316465866446 +1010 3 model.embedding_dim 2.0 +1010 3 model.scoring_fct_norm 2.0 +1010 3 training.batch_size 1.0 +1010 3 training.label_smoothing 0.0022260594640584773 +1010 4 model.embedding_dim 1.0 +1010 4 model.scoring_fct_norm 2.0 +1010 4 training.batch_size 2.0 +1010 4 training.label_smoothing 0.007148061800236135 +1010 5 model.embedding_dim 2.0 +1010 5 model.scoring_fct_norm 1.0 +1010 5 training.batch_size 0.0 +1010 5 training.label_smoothing 0.0059071215272791045 +1010 6 model.embedding_dim 2.0 +1010 6 model.scoring_fct_norm 2.0 +1010 6 training.batch_size 2.0 +1010 6 training.label_smoothing 0.16518399920207327 +1010 7 model.embedding_dim 0.0 +1010 7 model.scoring_fct_norm 1.0 +1010 7 training.batch_size 2.0 +1010 7 training.label_smoothing 0.8412993752535028 +1010 8 model.embedding_dim 2.0 +1010 8 model.scoring_fct_norm 1.0 +1010 8 training.batch_size 1.0 +1010 8 training.label_smoothing 0.16507520360905303 +1010 9 model.embedding_dim 1.0 +1010 9 model.scoring_fct_norm 2.0 +1010 9 training.batch_size 2.0 +1010 9 training.label_smoothing 0.004867244433677676 +1010 10 model.embedding_dim 1.0 +1010 10 model.scoring_fct_norm 2.0 +1010 10 training.batch_size 1.0 +1010 10 training.label_smoothing 0.028649224258203515 +1010 11 model.embedding_dim 0.0 +1010 11 model.scoring_fct_norm 1.0 +1010 11 training.batch_size 1.0 +1010 11 training.label_smoothing 0.6543979881716785 +1010 12 model.embedding_dim 2.0 +1010 12 model.scoring_fct_norm 2.0 +1010 12 training.batch_size 1.0 +1010 12 training.label_smoothing 0.2015344114728093 +1010 13 model.embedding_dim 2.0 +1010 13 model.scoring_fct_norm 1.0 +1010 13 training.batch_size 2.0 +1010 13 training.label_smoothing 0.8065787989695439 +1010 14 model.embedding_dim 0.0 +1010 14 model.scoring_fct_norm 2.0 +1010 14 training.batch_size 0.0 +1010 14 training.label_smoothing 0.016904782831520783 +1010 15 model.embedding_dim 1.0 +1010 15 model.scoring_fct_norm 2.0 +1010 15 training.batch_size 1.0 +1010 15 training.label_smoothing 0.041500175380279246 +1010 16 model.embedding_dim 0.0 +1010 16 model.scoring_fct_norm 1.0 +1010 16 training.batch_size 0.0 +1010 16 training.label_smoothing 0.03297249827053217 +1010 17 model.embedding_dim 0.0 +1010 17 model.scoring_fct_norm 1.0 +1010 17 training.batch_size 0.0 +1010 17 training.label_smoothing 0.005007192096814115 +1010 18 model.embedding_dim 0.0 +1010 18 model.scoring_fct_norm 2.0 +1010 18 training.batch_size 2.0 +1010 18 training.label_smoothing 0.018457459732015076 +1010 19 model.embedding_dim 0.0 +1010 19 model.scoring_fct_norm 1.0 +1010 19 training.batch_size 0.0 +1010 19 training.label_smoothing 0.004343242912045723 +1010 20 model.embedding_dim 1.0 +1010 20 model.scoring_fct_norm 1.0 +1010 20 training.batch_size 0.0 +1010 20 training.label_smoothing 0.05656734024251969 +1010 21 model.embedding_dim 1.0 +1010 21 model.scoring_fct_norm 2.0 +1010 21 training.batch_size 1.0 +1010 21 training.label_smoothing 0.04734617056265757 +1010 22 model.embedding_dim 2.0 +1010 22 model.scoring_fct_norm 1.0 +1010 22 training.batch_size 2.0 +1010 22 training.label_smoothing 0.048552585785995826 +1010 23 model.embedding_dim 2.0 +1010 23 model.scoring_fct_norm 2.0 +1010 23 training.batch_size 1.0 +1010 23 training.label_smoothing 0.09036209516482085 +1010 24 model.embedding_dim 0.0 +1010 24 model.scoring_fct_norm 1.0 +1010 24 training.batch_size 2.0 +1010 24 training.label_smoothing 0.2251480594382274 +1010 25 model.embedding_dim 1.0 +1010 25 model.scoring_fct_norm 2.0 +1010 25 training.batch_size 2.0 +1010 25 training.label_smoothing 0.008590805601898576 +1010 26 model.embedding_dim 1.0 +1010 26 model.scoring_fct_norm 1.0 +1010 26 training.batch_size 1.0 +1010 26 training.label_smoothing 0.39312125726129216 +1010 27 model.embedding_dim 2.0 +1010 27 model.scoring_fct_norm 1.0 +1010 27 training.batch_size 2.0 +1010 27 training.label_smoothing 0.44289704914892447 +1010 28 model.embedding_dim 0.0 +1010 28 model.scoring_fct_norm 1.0 +1010 28 training.batch_size 1.0 +1010 28 training.label_smoothing 0.001238140947123468 +1010 29 model.embedding_dim 1.0 +1010 29 model.scoring_fct_norm 1.0 +1010 29 training.batch_size 0.0 +1010 29 training.label_smoothing 0.0023966201774995935 +1010 30 model.embedding_dim 2.0 +1010 30 model.scoring_fct_norm 2.0 +1010 30 training.batch_size 1.0 +1010 30 training.label_smoothing 0.012349882602113713 +1010 31 model.embedding_dim 2.0 +1010 31 model.scoring_fct_norm 2.0 +1010 31 training.batch_size 1.0 +1010 31 training.label_smoothing 0.19624296388270013 +1010 32 model.embedding_dim 2.0 +1010 32 model.scoring_fct_norm 1.0 +1010 32 training.batch_size 0.0 +1010 32 training.label_smoothing 0.03275775228034868 +1010 33 model.embedding_dim 0.0 +1010 33 model.scoring_fct_norm 1.0 +1010 33 training.batch_size 1.0 +1010 33 training.label_smoothing 0.16149599734544723 +1010 34 model.embedding_dim 1.0 +1010 34 model.scoring_fct_norm 2.0 +1010 34 training.batch_size 1.0 +1010 34 training.label_smoothing 0.47869432143270335 +1010 35 model.embedding_dim 0.0 +1010 35 model.scoring_fct_norm 1.0 +1010 35 training.batch_size 0.0 +1010 35 training.label_smoothing 0.006215072078738305 +1010 36 model.embedding_dim 1.0 +1010 36 model.scoring_fct_norm 2.0 +1010 36 training.batch_size 2.0 +1010 36 training.label_smoothing 0.8088246970537136 +1010 37 model.embedding_dim 1.0 +1010 37 model.scoring_fct_norm 1.0 +1010 37 training.batch_size 1.0 +1010 37 training.label_smoothing 0.22588637524454883 +1010 38 model.embedding_dim 0.0 +1010 38 model.scoring_fct_norm 1.0 +1010 38 training.batch_size 0.0 +1010 38 training.label_smoothing 0.0037582831478616295 +1010 39 model.embedding_dim 1.0 +1010 39 model.scoring_fct_norm 1.0 +1010 39 training.batch_size 1.0 +1010 39 training.label_smoothing 0.015696176644263423 +1010 40 model.embedding_dim 2.0 +1010 40 model.scoring_fct_norm 2.0 +1010 40 training.batch_size 2.0 +1010 40 training.label_smoothing 0.01786153528035224 +1010 41 model.embedding_dim 0.0 +1010 41 model.scoring_fct_norm 1.0 +1010 41 training.batch_size 2.0 +1010 41 training.label_smoothing 0.0030167342166290113 +1010 42 model.embedding_dim 1.0 +1010 42 model.scoring_fct_norm 2.0 +1010 42 training.batch_size 1.0 +1010 42 training.label_smoothing 0.4276425823474997 +1010 43 model.embedding_dim 2.0 +1010 43 model.scoring_fct_norm 1.0 +1010 43 training.batch_size 2.0 +1010 43 training.label_smoothing 0.005883931889826917 +1010 44 model.embedding_dim 1.0 +1010 44 model.scoring_fct_norm 2.0 +1010 44 training.batch_size 2.0 +1010 44 training.label_smoothing 0.06130812168754581 +1010 45 model.embedding_dim 2.0 +1010 45 model.scoring_fct_norm 2.0 +1010 45 training.batch_size 2.0 +1010 45 training.label_smoothing 0.1924458455069385 +1010 46 model.embedding_dim 1.0 +1010 46 model.scoring_fct_norm 2.0 +1010 46 training.batch_size 1.0 +1010 46 training.label_smoothing 0.0022857302415669723 +1010 47 model.embedding_dim 1.0 +1010 47 model.scoring_fct_norm 1.0 +1010 47 training.batch_size 0.0 +1010 47 training.label_smoothing 0.2369294189928116 +1010 48 model.embedding_dim 1.0 +1010 48 model.scoring_fct_norm 2.0 +1010 48 training.batch_size 2.0 +1010 48 training.label_smoothing 0.0016217590382085832 +1010 49 model.embedding_dim 0.0 +1010 49 model.scoring_fct_norm 2.0 +1010 49 training.batch_size 2.0 +1010 49 training.label_smoothing 0.12948662593007107 +1010 50 model.embedding_dim 2.0 +1010 50 model.scoring_fct_norm 1.0 +1010 50 training.batch_size 0.0 +1010 50 training.label_smoothing 0.10804530653817605 +1010 51 model.embedding_dim 1.0 +1010 51 model.scoring_fct_norm 2.0 +1010 51 training.batch_size 1.0 +1010 51 training.label_smoothing 0.0011806255113020498 +1010 52 model.embedding_dim 2.0 +1010 52 model.scoring_fct_norm 1.0 +1010 52 training.batch_size 0.0 +1010 52 training.label_smoothing 0.022815982119952102 +1010 53 model.embedding_dim 0.0 +1010 53 model.scoring_fct_norm 1.0 +1010 53 training.batch_size 1.0 +1010 53 training.label_smoothing 0.0016260222510311251 +1010 54 model.embedding_dim 1.0 +1010 54 model.scoring_fct_norm 1.0 +1010 54 training.batch_size 0.0 +1010 54 training.label_smoothing 0.08442840232520334 +1010 55 model.embedding_dim 2.0 +1010 55 model.scoring_fct_norm 1.0 +1010 55 training.batch_size 1.0 +1010 55 training.label_smoothing 0.013262243692280344 +1010 56 model.embedding_dim 1.0 +1010 56 model.scoring_fct_norm 2.0 +1010 56 training.batch_size 2.0 +1010 56 training.label_smoothing 0.16100587862743315 +1010 57 model.embedding_dim 0.0 +1010 57 model.scoring_fct_norm 2.0 +1010 57 training.batch_size 1.0 +1010 57 training.label_smoothing 0.008163719318485384 +1010 58 model.embedding_dim 2.0 +1010 58 model.scoring_fct_norm 1.0 +1010 58 training.batch_size 2.0 +1010 58 training.label_smoothing 0.2318590231742873 +1010 59 model.embedding_dim 2.0 +1010 59 model.scoring_fct_norm 1.0 +1010 59 training.batch_size 0.0 +1010 59 training.label_smoothing 0.9699022197709352 +1010 60 model.embedding_dim 0.0 +1010 60 model.scoring_fct_norm 2.0 +1010 60 training.batch_size 0.0 +1010 60 training.label_smoothing 0.6797666692166554 +1010 61 model.embedding_dim 2.0 +1010 61 model.scoring_fct_norm 1.0 +1010 61 training.batch_size 0.0 +1010 61 training.label_smoothing 0.07122888260961298 +1010 62 model.embedding_dim 0.0 +1010 62 model.scoring_fct_norm 2.0 +1010 62 training.batch_size 0.0 +1010 62 training.label_smoothing 0.00825038796211103 +1010 63 model.embedding_dim 1.0 +1010 63 model.scoring_fct_norm 1.0 +1010 63 training.batch_size 1.0 +1010 63 training.label_smoothing 0.20903463170952669 +1010 64 model.embedding_dim 1.0 +1010 64 model.scoring_fct_norm 2.0 +1010 64 training.batch_size 1.0 +1010 64 training.label_smoothing 0.0016459296201647626 +1010 65 model.embedding_dim 1.0 +1010 65 model.scoring_fct_norm 1.0 +1010 65 training.batch_size 2.0 +1010 65 training.label_smoothing 0.014978452507479307 +1010 66 model.embedding_dim 0.0 +1010 66 model.scoring_fct_norm 2.0 +1010 66 training.batch_size 2.0 +1010 66 training.label_smoothing 0.9586367915387926 +1010 67 model.embedding_dim 2.0 +1010 67 model.scoring_fct_norm 2.0 +1010 67 training.batch_size 2.0 +1010 67 training.label_smoothing 0.16663847303889698 +1010 68 model.embedding_dim 0.0 +1010 68 model.scoring_fct_norm 1.0 +1010 68 training.batch_size 1.0 +1010 68 training.label_smoothing 0.5133660725186675 +1010 69 model.embedding_dim 2.0 +1010 69 model.scoring_fct_norm 1.0 +1010 69 training.batch_size 0.0 +1010 69 training.label_smoothing 0.22866023395319332 +1010 70 model.embedding_dim 1.0 +1010 70 model.scoring_fct_norm 1.0 +1010 70 training.batch_size 0.0 +1010 70 training.label_smoothing 0.003298878576595382 +1010 71 model.embedding_dim 2.0 +1010 71 model.scoring_fct_norm 1.0 +1010 71 training.batch_size 1.0 +1010 71 training.label_smoothing 0.004775534349829034 +1010 72 model.embedding_dim 0.0 +1010 72 model.scoring_fct_norm 1.0 +1010 72 training.batch_size 2.0 +1010 72 training.label_smoothing 0.003934466604211594 +1010 73 model.embedding_dim 1.0 +1010 73 model.scoring_fct_norm 2.0 +1010 73 training.batch_size 0.0 +1010 73 training.label_smoothing 0.0038373099749181095 +1010 74 model.embedding_dim 1.0 +1010 74 model.scoring_fct_norm 2.0 +1010 74 training.batch_size 1.0 +1010 74 training.label_smoothing 0.004814817898537179 +1010 75 model.embedding_dim 1.0 +1010 75 model.scoring_fct_norm 1.0 +1010 75 training.batch_size 0.0 +1010 75 training.label_smoothing 0.057915607810665265 +1010 76 model.embedding_dim 1.0 +1010 76 model.scoring_fct_norm 2.0 +1010 76 training.batch_size 2.0 +1010 76 training.label_smoothing 0.0012023448107357545 +1010 77 model.embedding_dim 1.0 +1010 77 model.scoring_fct_norm 1.0 +1010 77 training.batch_size 2.0 +1010 77 training.label_smoothing 0.38742418941340223 +1010 78 model.embedding_dim 0.0 +1010 78 model.scoring_fct_norm 1.0 +1010 78 training.batch_size 1.0 +1010 78 training.label_smoothing 0.0031123536219879394 +1010 79 model.embedding_dim 0.0 +1010 79 model.scoring_fct_norm 1.0 +1010 79 training.batch_size 0.0 +1010 79 training.label_smoothing 0.1992101439832134 +1010 80 model.embedding_dim 1.0 +1010 80 model.scoring_fct_norm 1.0 +1010 80 training.batch_size 1.0 +1010 80 training.label_smoothing 0.8253378532555093 +1010 81 model.embedding_dim 2.0 +1010 81 model.scoring_fct_norm 2.0 +1010 81 training.batch_size 1.0 +1010 81 training.label_smoothing 0.16315962146001092 +1010 82 model.embedding_dim 1.0 +1010 82 model.scoring_fct_norm 1.0 +1010 82 training.batch_size 1.0 +1010 82 training.label_smoothing 0.009168974266622925 +1010 83 model.embedding_dim 1.0 +1010 83 model.scoring_fct_norm 2.0 +1010 83 training.batch_size 2.0 +1010 83 training.label_smoothing 0.5382042780464333 +1010 84 model.embedding_dim 2.0 +1010 84 model.scoring_fct_norm 2.0 +1010 84 training.batch_size 1.0 +1010 84 training.label_smoothing 0.0017621351882404401 +1010 85 model.embedding_dim 2.0 +1010 85 model.scoring_fct_norm 2.0 +1010 85 training.batch_size 1.0 +1010 85 training.label_smoothing 0.062276246981733965 +1010 86 model.embedding_dim 1.0 +1010 86 model.scoring_fct_norm 1.0 +1010 86 training.batch_size 1.0 +1010 86 training.label_smoothing 0.004774081291495484 +1010 87 model.embedding_dim 2.0 +1010 87 model.scoring_fct_norm 2.0 +1010 87 training.batch_size 1.0 +1010 87 training.label_smoothing 0.2057346796281943 +1010 88 model.embedding_dim 1.0 +1010 88 model.scoring_fct_norm 2.0 +1010 88 training.batch_size 2.0 +1010 88 training.label_smoothing 0.41133589707145923 +1010 89 model.embedding_dim 1.0 +1010 89 model.scoring_fct_norm 1.0 +1010 89 training.batch_size 1.0 +1010 89 training.label_smoothing 0.7658751812551603 +1010 90 model.embedding_dim 0.0 +1010 90 model.scoring_fct_norm 2.0 +1010 90 training.batch_size 1.0 +1010 90 training.label_smoothing 0.035433092216521245 +1010 91 model.embedding_dim 1.0 +1010 91 model.scoring_fct_norm 2.0 +1010 91 training.batch_size 1.0 +1010 91 training.label_smoothing 0.03184800208197583 +1010 92 model.embedding_dim 2.0 +1010 92 model.scoring_fct_norm 2.0 +1010 92 training.batch_size 1.0 +1010 92 training.label_smoothing 0.10476902572885105 +1010 93 model.embedding_dim 1.0 +1010 93 model.scoring_fct_norm 1.0 +1010 93 training.batch_size 1.0 +1010 93 training.label_smoothing 0.4675133408447603 +1010 94 model.embedding_dim 2.0 +1010 94 model.scoring_fct_norm 1.0 +1010 94 training.batch_size 0.0 +1010 94 training.label_smoothing 0.00348513215013554 +1010 95 model.embedding_dim 0.0 +1010 95 model.scoring_fct_norm 1.0 +1010 95 training.batch_size 0.0 +1010 95 training.label_smoothing 0.0014833511881607113 +1010 96 model.embedding_dim 1.0 +1010 96 model.scoring_fct_norm 1.0 +1010 96 training.batch_size 2.0 +1010 96 training.label_smoothing 0.0042875938847236825 +1010 97 model.embedding_dim 1.0 +1010 97 model.scoring_fct_norm 2.0 +1010 97 training.batch_size 2.0 +1010 97 training.label_smoothing 0.03259736874074592 +1010 98 model.embedding_dim 0.0 +1010 98 model.scoring_fct_norm 1.0 +1010 98 training.batch_size 2.0 +1010 98 training.label_smoothing 0.004513128078706561 +1010 99 model.embedding_dim 1.0 +1010 99 model.scoring_fct_norm 1.0 +1010 99 training.batch_size 2.0 +1010 99 training.label_smoothing 0.003984935045088935 +1010 100 model.embedding_dim 2.0 +1010 100 model.scoring_fct_norm 1.0 +1010 100 training.batch_size 1.0 +1010 100 training.label_smoothing 0.009820646642141759 +1010 1 dataset """kinships""" +1010 1 model """unstructuredmodel""" +1010 1 loss """crossentropy""" +1010 1 regularizer """no""" +1010 1 optimizer """adadelta""" +1010 1 training_loop """lcwa""" +1010 1 evaluator """rankbased""" +1010 2 dataset """kinships""" +1010 2 model """unstructuredmodel""" +1010 2 loss """crossentropy""" +1010 2 regularizer """no""" +1010 2 optimizer """adadelta""" +1010 2 training_loop """lcwa""" +1010 2 evaluator """rankbased""" +1010 3 dataset """kinships""" +1010 3 model """unstructuredmodel""" +1010 3 loss """crossentropy""" +1010 3 regularizer """no""" +1010 3 optimizer """adadelta""" +1010 3 training_loop """lcwa""" +1010 3 evaluator """rankbased""" +1010 4 dataset """kinships""" +1010 4 model """unstructuredmodel""" +1010 4 loss """crossentropy""" +1010 4 regularizer """no""" +1010 4 optimizer """adadelta""" +1010 4 training_loop """lcwa""" +1010 4 evaluator """rankbased""" +1010 5 dataset """kinships""" +1010 5 model """unstructuredmodel""" +1010 5 loss """crossentropy""" +1010 5 regularizer """no""" +1010 5 optimizer """adadelta""" +1010 5 training_loop """lcwa""" +1010 5 evaluator """rankbased""" +1010 6 dataset """kinships""" +1010 6 model """unstructuredmodel""" +1010 6 loss """crossentropy""" +1010 6 regularizer """no""" +1010 6 optimizer """adadelta""" +1010 6 training_loop """lcwa""" +1010 6 evaluator """rankbased""" +1010 7 dataset """kinships""" +1010 7 model """unstructuredmodel""" +1010 7 loss """crossentropy""" +1010 7 regularizer """no""" +1010 7 optimizer """adadelta""" +1010 7 training_loop """lcwa""" +1010 7 evaluator """rankbased""" +1010 8 dataset """kinships""" +1010 8 model """unstructuredmodel""" +1010 8 loss """crossentropy""" +1010 8 regularizer """no""" +1010 8 optimizer """adadelta""" +1010 8 training_loop """lcwa""" +1010 8 evaluator """rankbased""" +1010 9 dataset """kinships""" +1010 9 model """unstructuredmodel""" +1010 9 loss """crossentropy""" +1010 9 regularizer """no""" +1010 9 optimizer """adadelta""" +1010 9 training_loop """lcwa""" +1010 9 evaluator """rankbased""" +1010 10 dataset """kinships""" +1010 10 model """unstructuredmodel""" +1010 10 loss """crossentropy""" +1010 10 regularizer """no""" +1010 10 optimizer """adadelta""" +1010 10 training_loop """lcwa""" +1010 10 evaluator """rankbased""" +1010 11 dataset """kinships""" +1010 11 model """unstructuredmodel""" +1010 11 loss """crossentropy""" +1010 11 regularizer """no""" +1010 11 optimizer """adadelta""" +1010 11 training_loop """lcwa""" +1010 11 evaluator """rankbased""" +1010 12 dataset """kinships""" +1010 12 model """unstructuredmodel""" +1010 12 loss """crossentropy""" +1010 12 regularizer """no""" +1010 12 optimizer """adadelta""" +1010 12 training_loop """lcwa""" +1010 12 evaluator """rankbased""" +1010 13 dataset """kinships""" +1010 13 model """unstructuredmodel""" +1010 13 loss """crossentropy""" +1010 13 regularizer """no""" +1010 13 optimizer """adadelta""" +1010 13 training_loop """lcwa""" +1010 13 evaluator """rankbased""" +1010 14 dataset """kinships""" +1010 14 model """unstructuredmodel""" +1010 14 loss """crossentropy""" +1010 14 regularizer """no""" +1010 14 optimizer """adadelta""" +1010 14 training_loop """lcwa""" +1010 14 evaluator """rankbased""" +1010 15 dataset """kinships""" +1010 15 model """unstructuredmodel""" +1010 15 loss """crossentropy""" +1010 15 regularizer """no""" +1010 15 optimizer """adadelta""" +1010 15 training_loop """lcwa""" +1010 15 evaluator """rankbased""" +1010 16 dataset """kinships""" +1010 16 model """unstructuredmodel""" +1010 16 loss """crossentropy""" +1010 16 regularizer """no""" +1010 16 optimizer """adadelta""" +1010 16 training_loop """lcwa""" +1010 16 evaluator """rankbased""" +1010 17 dataset """kinships""" +1010 17 model """unstructuredmodel""" +1010 17 loss """crossentropy""" +1010 17 regularizer """no""" +1010 17 optimizer """adadelta""" +1010 17 training_loop """lcwa""" +1010 17 evaluator """rankbased""" +1010 18 dataset """kinships""" +1010 18 model """unstructuredmodel""" +1010 18 loss """crossentropy""" +1010 18 regularizer """no""" +1010 18 optimizer """adadelta""" +1010 18 training_loop """lcwa""" +1010 18 evaluator """rankbased""" +1010 19 dataset """kinships""" +1010 19 model """unstructuredmodel""" +1010 19 loss """crossentropy""" +1010 19 regularizer """no""" +1010 19 optimizer """adadelta""" +1010 19 training_loop """lcwa""" +1010 19 evaluator """rankbased""" +1010 20 dataset """kinships""" +1010 20 model """unstructuredmodel""" +1010 20 loss """crossentropy""" +1010 20 regularizer """no""" +1010 20 optimizer """adadelta""" +1010 20 training_loop """lcwa""" +1010 20 evaluator """rankbased""" +1010 21 dataset """kinships""" +1010 21 model """unstructuredmodel""" +1010 21 loss """crossentropy""" +1010 21 regularizer """no""" +1010 21 optimizer """adadelta""" +1010 21 training_loop """lcwa""" +1010 21 evaluator """rankbased""" +1010 22 dataset """kinships""" +1010 22 model """unstructuredmodel""" +1010 22 loss """crossentropy""" +1010 22 regularizer """no""" +1010 22 optimizer """adadelta""" +1010 22 training_loop """lcwa""" +1010 22 evaluator """rankbased""" +1010 23 dataset """kinships""" +1010 23 model """unstructuredmodel""" +1010 23 loss """crossentropy""" +1010 23 regularizer """no""" +1010 23 optimizer """adadelta""" +1010 23 training_loop """lcwa""" +1010 23 evaluator """rankbased""" +1010 24 dataset """kinships""" +1010 24 model """unstructuredmodel""" +1010 24 loss """crossentropy""" +1010 24 regularizer """no""" +1010 24 optimizer """adadelta""" +1010 24 training_loop """lcwa""" +1010 24 evaluator """rankbased""" +1010 25 dataset """kinships""" +1010 25 model """unstructuredmodel""" +1010 25 loss """crossentropy""" +1010 25 regularizer """no""" +1010 25 optimizer """adadelta""" +1010 25 training_loop """lcwa""" +1010 25 evaluator """rankbased""" +1010 26 dataset """kinships""" +1010 26 model """unstructuredmodel""" +1010 26 loss """crossentropy""" +1010 26 regularizer """no""" +1010 26 optimizer """adadelta""" +1010 26 training_loop """lcwa""" +1010 26 evaluator """rankbased""" +1010 27 dataset """kinships""" +1010 27 model """unstructuredmodel""" +1010 27 loss """crossentropy""" +1010 27 regularizer """no""" +1010 27 optimizer """adadelta""" +1010 27 training_loop """lcwa""" +1010 27 evaluator """rankbased""" +1010 28 dataset """kinships""" +1010 28 model """unstructuredmodel""" +1010 28 loss """crossentropy""" +1010 28 regularizer """no""" +1010 28 optimizer """adadelta""" +1010 28 training_loop """lcwa""" +1010 28 evaluator """rankbased""" +1010 29 dataset """kinships""" +1010 29 model """unstructuredmodel""" +1010 29 loss """crossentropy""" +1010 29 regularizer """no""" +1010 29 optimizer """adadelta""" +1010 29 training_loop """lcwa""" +1010 29 evaluator """rankbased""" +1010 30 dataset """kinships""" +1010 30 model """unstructuredmodel""" +1010 30 loss """crossentropy""" +1010 30 regularizer """no""" +1010 30 optimizer """adadelta""" +1010 30 training_loop """lcwa""" +1010 30 evaluator """rankbased""" +1010 31 dataset """kinships""" +1010 31 model """unstructuredmodel""" +1010 31 loss """crossentropy""" +1010 31 regularizer """no""" +1010 31 optimizer """adadelta""" +1010 31 training_loop """lcwa""" +1010 31 evaluator """rankbased""" +1010 32 dataset """kinships""" +1010 32 model """unstructuredmodel""" +1010 32 loss """crossentropy""" +1010 32 regularizer """no""" +1010 32 optimizer """adadelta""" +1010 32 training_loop """lcwa""" +1010 32 evaluator """rankbased""" +1010 33 dataset """kinships""" +1010 33 model """unstructuredmodel""" +1010 33 loss """crossentropy""" +1010 33 regularizer """no""" +1010 33 optimizer """adadelta""" +1010 33 training_loop """lcwa""" +1010 33 evaluator """rankbased""" +1010 34 dataset """kinships""" +1010 34 model """unstructuredmodel""" +1010 34 loss """crossentropy""" +1010 34 regularizer """no""" +1010 34 optimizer """adadelta""" +1010 34 training_loop """lcwa""" +1010 34 evaluator """rankbased""" +1010 35 dataset """kinships""" +1010 35 model """unstructuredmodel""" +1010 35 loss """crossentropy""" +1010 35 regularizer """no""" +1010 35 optimizer """adadelta""" +1010 35 training_loop """lcwa""" +1010 35 evaluator """rankbased""" +1010 36 dataset """kinships""" +1010 36 model """unstructuredmodel""" +1010 36 loss """crossentropy""" +1010 36 regularizer """no""" +1010 36 optimizer """adadelta""" +1010 36 training_loop """lcwa""" +1010 36 evaluator """rankbased""" +1010 37 dataset """kinships""" +1010 37 model """unstructuredmodel""" +1010 37 loss """crossentropy""" +1010 37 regularizer """no""" +1010 37 optimizer """adadelta""" +1010 37 training_loop """lcwa""" +1010 37 evaluator """rankbased""" +1010 38 dataset """kinships""" +1010 38 model """unstructuredmodel""" +1010 38 loss """crossentropy""" +1010 38 regularizer """no""" +1010 38 optimizer """adadelta""" +1010 38 training_loop """lcwa""" +1010 38 evaluator """rankbased""" +1010 39 dataset """kinships""" +1010 39 model """unstructuredmodel""" +1010 39 loss """crossentropy""" +1010 39 regularizer """no""" +1010 39 optimizer """adadelta""" +1010 39 training_loop """lcwa""" +1010 39 evaluator """rankbased""" +1010 40 dataset """kinships""" +1010 40 model """unstructuredmodel""" +1010 40 loss """crossentropy""" +1010 40 regularizer """no""" +1010 40 optimizer """adadelta""" +1010 40 training_loop """lcwa""" +1010 40 evaluator """rankbased""" +1010 41 dataset """kinships""" +1010 41 model """unstructuredmodel""" +1010 41 loss """crossentropy""" +1010 41 regularizer """no""" +1010 41 optimizer """adadelta""" +1010 41 training_loop """lcwa""" +1010 41 evaluator """rankbased""" +1010 42 dataset """kinships""" +1010 42 model """unstructuredmodel""" +1010 42 loss """crossentropy""" +1010 42 regularizer """no""" +1010 42 optimizer """adadelta""" +1010 42 training_loop """lcwa""" +1010 42 evaluator """rankbased""" +1010 43 dataset """kinships""" +1010 43 model """unstructuredmodel""" +1010 43 loss """crossentropy""" +1010 43 regularizer """no""" +1010 43 optimizer """adadelta""" +1010 43 training_loop """lcwa""" +1010 43 evaluator """rankbased""" +1010 44 dataset """kinships""" +1010 44 model """unstructuredmodel""" +1010 44 loss """crossentropy""" +1010 44 regularizer """no""" +1010 44 optimizer """adadelta""" +1010 44 training_loop """lcwa""" +1010 44 evaluator """rankbased""" +1010 45 dataset """kinships""" +1010 45 model """unstructuredmodel""" +1010 45 loss """crossentropy""" +1010 45 regularizer """no""" +1010 45 optimizer """adadelta""" +1010 45 training_loop """lcwa""" +1010 45 evaluator """rankbased""" +1010 46 dataset """kinships""" +1010 46 model """unstructuredmodel""" +1010 46 loss """crossentropy""" +1010 46 regularizer """no""" +1010 46 optimizer """adadelta""" +1010 46 training_loop """lcwa""" +1010 46 evaluator """rankbased""" +1010 47 dataset """kinships""" +1010 47 model """unstructuredmodel""" +1010 47 loss """crossentropy""" +1010 47 regularizer """no""" +1010 47 optimizer """adadelta""" +1010 47 training_loop """lcwa""" +1010 47 evaluator """rankbased""" +1010 48 dataset """kinships""" +1010 48 model """unstructuredmodel""" +1010 48 loss """crossentropy""" +1010 48 regularizer """no""" +1010 48 optimizer """adadelta""" +1010 48 training_loop """lcwa""" +1010 48 evaluator """rankbased""" +1010 49 dataset """kinships""" +1010 49 model """unstructuredmodel""" +1010 49 loss """crossentropy""" +1010 49 regularizer """no""" +1010 49 optimizer """adadelta""" +1010 49 training_loop """lcwa""" +1010 49 evaluator """rankbased""" +1010 50 dataset """kinships""" +1010 50 model """unstructuredmodel""" +1010 50 loss """crossentropy""" +1010 50 regularizer """no""" +1010 50 optimizer """adadelta""" +1010 50 training_loop """lcwa""" +1010 50 evaluator """rankbased""" +1010 51 dataset """kinships""" +1010 51 model """unstructuredmodel""" +1010 51 loss """crossentropy""" +1010 51 regularizer """no""" +1010 51 optimizer """adadelta""" +1010 51 training_loop """lcwa""" +1010 51 evaluator """rankbased""" +1010 52 dataset """kinships""" +1010 52 model """unstructuredmodel""" +1010 52 loss """crossentropy""" +1010 52 regularizer """no""" +1010 52 optimizer """adadelta""" +1010 52 training_loop """lcwa""" +1010 52 evaluator """rankbased""" +1010 53 dataset """kinships""" +1010 53 model """unstructuredmodel""" +1010 53 loss """crossentropy""" +1010 53 regularizer """no""" +1010 53 optimizer """adadelta""" +1010 53 training_loop """lcwa""" +1010 53 evaluator """rankbased""" +1010 54 dataset """kinships""" +1010 54 model """unstructuredmodel""" +1010 54 loss """crossentropy""" +1010 54 regularizer """no""" +1010 54 optimizer """adadelta""" +1010 54 training_loop """lcwa""" +1010 54 evaluator """rankbased""" +1010 55 dataset """kinships""" +1010 55 model """unstructuredmodel""" +1010 55 loss """crossentropy""" +1010 55 regularizer """no""" +1010 55 optimizer """adadelta""" +1010 55 training_loop """lcwa""" +1010 55 evaluator """rankbased""" +1010 56 dataset """kinships""" +1010 56 model """unstructuredmodel""" +1010 56 loss """crossentropy""" +1010 56 regularizer """no""" +1010 56 optimizer """adadelta""" +1010 56 training_loop """lcwa""" +1010 56 evaluator """rankbased""" +1010 57 dataset """kinships""" +1010 57 model """unstructuredmodel""" +1010 57 loss """crossentropy""" +1010 57 regularizer """no""" +1010 57 optimizer """adadelta""" +1010 57 training_loop """lcwa""" +1010 57 evaluator """rankbased""" +1010 58 dataset """kinships""" +1010 58 model """unstructuredmodel""" +1010 58 loss """crossentropy""" +1010 58 regularizer """no""" +1010 58 optimizer """adadelta""" +1010 58 training_loop """lcwa""" +1010 58 evaluator """rankbased""" +1010 59 dataset """kinships""" +1010 59 model """unstructuredmodel""" +1010 59 loss """crossentropy""" +1010 59 regularizer """no""" +1010 59 optimizer """adadelta""" +1010 59 training_loop """lcwa""" +1010 59 evaluator """rankbased""" +1010 60 dataset """kinships""" +1010 60 model """unstructuredmodel""" +1010 60 loss """crossentropy""" +1010 60 regularizer """no""" +1010 60 optimizer """adadelta""" +1010 60 training_loop """lcwa""" +1010 60 evaluator """rankbased""" +1010 61 dataset """kinships""" +1010 61 model """unstructuredmodel""" +1010 61 loss """crossentropy""" +1010 61 regularizer """no""" +1010 61 optimizer """adadelta""" +1010 61 training_loop """lcwa""" +1010 61 evaluator """rankbased""" +1010 62 dataset """kinships""" +1010 62 model """unstructuredmodel""" +1010 62 loss """crossentropy""" +1010 62 regularizer """no""" +1010 62 optimizer """adadelta""" +1010 62 training_loop """lcwa""" +1010 62 evaluator """rankbased""" +1010 63 dataset """kinships""" +1010 63 model """unstructuredmodel""" +1010 63 loss """crossentropy""" +1010 63 regularizer """no""" +1010 63 optimizer """adadelta""" +1010 63 training_loop """lcwa""" +1010 63 evaluator """rankbased""" +1010 64 dataset """kinships""" +1010 64 model """unstructuredmodel""" +1010 64 loss """crossentropy""" +1010 64 regularizer """no""" +1010 64 optimizer """adadelta""" +1010 64 training_loop """lcwa""" +1010 64 evaluator """rankbased""" +1010 65 dataset """kinships""" +1010 65 model """unstructuredmodel""" +1010 65 loss """crossentropy""" +1010 65 regularizer """no""" +1010 65 optimizer """adadelta""" +1010 65 training_loop """lcwa""" +1010 65 evaluator """rankbased""" +1010 66 dataset """kinships""" +1010 66 model """unstructuredmodel""" +1010 66 loss """crossentropy""" +1010 66 regularizer """no""" +1010 66 optimizer """adadelta""" +1010 66 training_loop """lcwa""" +1010 66 evaluator """rankbased""" +1010 67 dataset """kinships""" +1010 67 model """unstructuredmodel""" +1010 67 loss """crossentropy""" +1010 67 regularizer """no""" +1010 67 optimizer """adadelta""" +1010 67 training_loop """lcwa""" +1010 67 evaluator """rankbased""" +1010 68 dataset """kinships""" +1010 68 model """unstructuredmodel""" +1010 68 loss """crossentropy""" +1010 68 regularizer """no""" +1010 68 optimizer """adadelta""" +1010 68 training_loop """lcwa""" +1010 68 evaluator """rankbased""" +1010 69 dataset """kinships""" +1010 69 model """unstructuredmodel""" +1010 69 loss """crossentropy""" +1010 69 regularizer """no""" +1010 69 optimizer """adadelta""" +1010 69 training_loop """lcwa""" +1010 69 evaluator """rankbased""" +1010 70 dataset """kinships""" +1010 70 model """unstructuredmodel""" +1010 70 loss """crossentropy""" +1010 70 regularizer """no""" +1010 70 optimizer """adadelta""" +1010 70 training_loop """lcwa""" +1010 70 evaluator """rankbased""" +1010 71 dataset """kinships""" +1010 71 model """unstructuredmodel""" +1010 71 loss """crossentropy""" +1010 71 regularizer """no""" +1010 71 optimizer """adadelta""" +1010 71 training_loop """lcwa""" +1010 71 evaluator """rankbased""" +1010 72 dataset """kinships""" +1010 72 model """unstructuredmodel""" +1010 72 loss """crossentropy""" +1010 72 regularizer """no""" +1010 72 optimizer """adadelta""" +1010 72 training_loop """lcwa""" +1010 72 evaluator """rankbased""" +1010 73 dataset """kinships""" +1010 73 model """unstructuredmodel""" +1010 73 loss """crossentropy""" +1010 73 regularizer """no""" +1010 73 optimizer """adadelta""" +1010 73 training_loop """lcwa""" +1010 73 evaluator """rankbased""" +1010 74 dataset """kinships""" +1010 74 model """unstructuredmodel""" +1010 74 loss """crossentropy""" +1010 74 regularizer """no""" +1010 74 optimizer """adadelta""" +1010 74 training_loop """lcwa""" +1010 74 evaluator """rankbased""" +1010 75 dataset """kinships""" +1010 75 model """unstructuredmodel""" +1010 75 loss """crossentropy""" +1010 75 regularizer """no""" +1010 75 optimizer """adadelta""" +1010 75 training_loop """lcwa""" +1010 75 evaluator """rankbased""" +1010 76 dataset """kinships""" +1010 76 model """unstructuredmodel""" +1010 76 loss """crossentropy""" +1010 76 regularizer """no""" +1010 76 optimizer """adadelta""" +1010 76 training_loop """lcwa""" +1010 76 evaluator """rankbased""" +1010 77 dataset """kinships""" +1010 77 model """unstructuredmodel""" +1010 77 loss """crossentropy""" +1010 77 regularizer """no""" +1010 77 optimizer """adadelta""" +1010 77 training_loop """lcwa""" +1010 77 evaluator """rankbased""" +1010 78 dataset """kinships""" +1010 78 model """unstructuredmodel""" +1010 78 loss """crossentropy""" +1010 78 regularizer """no""" +1010 78 optimizer """adadelta""" +1010 78 training_loop """lcwa""" +1010 78 evaluator """rankbased""" +1010 79 dataset """kinships""" +1010 79 model """unstructuredmodel""" +1010 79 loss """crossentropy""" +1010 79 regularizer """no""" +1010 79 optimizer """adadelta""" +1010 79 training_loop """lcwa""" +1010 79 evaluator """rankbased""" +1010 80 dataset """kinships""" +1010 80 model """unstructuredmodel""" +1010 80 loss """crossentropy""" +1010 80 regularizer """no""" +1010 80 optimizer """adadelta""" +1010 80 training_loop """lcwa""" +1010 80 evaluator """rankbased""" +1010 81 dataset """kinships""" +1010 81 model """unstructuredmodel""" +1010 81 loss """crossentropy""" +1010 81 regularizer """no""" +1010 81 optimizer """adadelta""" +1010 81 training_loop """lcwa""" +1010 81 evaluator """rankbased""" +1010 82 dataset """kinships""" +1010 82 model """unstructuredmodel""" +1010 82 loss """crossentropy""" +1010 82 regularizer """no""" +1010 82 optimizer """adadelta""" +1010 82 training_loop """lcwa""" +1010 82 evaluator """rankbased""" +1010 83 dataset """kinships""" +1010 83 model """unstructuredmodel""" +1010 83 loss """crossentropy""" +1010 83 regularizer """no""" +1010 83 optimizer """adadelta""" +1010 83 training_loop """lcwa""" +1010 83 evaluator """rankbased""" +1010 84 dataset """kinships""" +1010 84 model """unstructuredmodel""" +1010 84 loss """crossentropy""" +1010 84 regularizer """no""" +1010 84 optimizer """adadelta""" +1010 84 training_loop """lcwa""" +1010 84 evaluator """rankbased""" +1010 85 dataset """kinships""" +1010 85 model """unstructuredmodel""" +1010 85 loss """crossentropy""" +1010 85 regularizer """no""" +1010 85 optimizer """adadelta""" +1010 85 training_loop """lcwa""" +1010 85 evaluator """rankbased""" +1010 86 dataset """kinships""" +1010 86 model """unstructuredmodel""" +1010 86 loss """crossentropy""" +1010 86 regularizer """no""" +1010 86 optimizer """adadelta""" +1010 86 training_loop """lcwa""" +1010 86 evaluator """rankbased""" +1010 87 dataset """kinships""" +1010 87 model """unstructuredmodel""" +1010 87 loss """crossentropy""" +1010 87 regularizer """no""" +1010 87 optimizer """adadelta""" +1010 87 training_loop """lcwa""" +1010 87 evaluator """rankbased""" +1010 88 dataset """kinships""" +1010 88 model """unstructuredmodel""" +1010 88 loss """crossentropy""" +1010 88 regularizer """no""" +1010 88 optimizer """adadelta""" +1010 88 training_loop """lcwa""" +1010 88 evaluator """rankbased""" +1010 89 dataset """kinships""" +1010 89 model """unstructuredmodel""" +1010 89 loss """crossentropy""" +1010 89 regularizer """no""" +1010 89 optimizer """adadelta""" +1010 89 training_loop """lcwa""" +1010 89 evaluator """rankbased""" +1010 90 dataset """kinships""" +1010 90 model """unstructuredmodel""" +1010 90 loss """crossentropy""" +1010 90 regularizer """no""" +1010 90 optimizer """adadelta""" +1010 90 training_loop """lcwa""" +1010 90 evaluator """rankbased""" +1010 91 dataset """kinships""" +1010 91 model """unstructuredmodel""" +1010 91 loss """crossentropy""" +1010 91 regularizer """no""" +1010 91 optimizer """adadelta""" +1010 91 training_loop """lcwa""" +1010 91 evaluator """rankbased""" +1010 92 dataset """kinships""" +1010 92 model """unstructuredmodel""" +1010 92 loss """crossentropy""" +1010 92 regularizer """no""" +1010 92 optimizer """adadelta""" +1010 92 training_loop """lcwa""" +1010 92 evaluator """rankbased""" +1010 93 dataset """kinships""" +1010 93 model """unstructuredmodel""" +1010 93 loss """crossentropy""" +1010 93 regularizer """no""" +1010 93 optimizer """adadelta""" +1010 93 training_loop """lcwa""" +1010 93 evaluator """rankbased""" +1010 94 dataset """kinships""" +1010 94 model """unstructuredmodel""" +1010 94 loss """crossentropy""" +1010 94 regularizer """no""" +1010 94 optimizer """adadelta""" +1010 94 training_loop """lcwa""" +1010 94 evaluator """rankbased""" +1010 95 dataset """kinships""" +1010 95 model """unstructuredmodel""" +1010 95 loss """crossentropy""" +1010 95 regularizer """no""" +1010 95 optimizer """adadelta""" +1010 95 training_loop """lcwa""" +1010 95 evaluator """rankbased""" +1010 96 dataset """kinships""" +1010 96 model """unstructuredmodel""" +1010 96 loss """crossentropy""" +1010 96 regularizer """no""" +1010 96 optimizer """adadelta""" +1010 96 training_loop """lcwa""" +1010 96 evaluator """rankbased""" +1010 97 dataset """kinships""" +1010 97 model """unstructuredmodel""" +1010 97 loss """crossentropy""" +1010 97 regularizer """no""" +1010 97 optimizer """adadelta""" +1010 97 training_loop """lcwa""" +1010 97 evaluator """rankbased""" +1010 98 dataset """kinships""" +1010 98 model """unstructuredmodel""" +1010 98 loss """crossentropy""" +1010 98 regularizer """no""" +1010 98 optimizer """adadelta""" +1010 98 training_loop """lcwa""" +1010 98 evaluator """rankbased""" +1010 99 dataset """kinships""" +1010 99 model """unstructuredmodel""" +1010 99 loss """crossentropy""" +1010 99 regularizer """no""" +1010 99 optimizer """adadelta""" +1010 99 training_loop """lcwa""" +1010 99 evaluator """rankbased""" +1010 100 dataset """kinships""" +1010 100 model """unstructuredmodel""" +1010 100 loss """crossentropy""" +1010 100 regularizer """no""" +1010 100 optimizer """adadelta""" +1010 100 training_loop """lcwa""" +1010 100 evaluator """rankbased""" +1011 1 model.embedding_dim 1.0 +1011 1 model.scoring_fct_norm 2.0 +1011 1 training.batch_size 1.0 +1011 1 training.label_smoothing 0.14349544852286894 +1011 2 model.embedding_dim 1.0 +1011 2 model.scoring_fct_norm 1.0 +1011 2 training.batch_size 2.0 +1011 2 training.label_smoothing 0.0059194570189286845 +1011 3 model.embedding_dim 2.0 +1011 3 model.scoring_fct_norm 1.0 +1011 3 training.batch_size 2.0 +1011 3 training.label_smoothing 0.19791062157777672 +1011 4 model.embedding_dim 0.0 +1011 4 model.scoring_fct_norm 2.0 +1011 4 training.batch_size 1.0 +1011 4 training.label_smoothing 0.9326698705533818 +1011 5 model.embedding_dim 1.0 +1011 5 model.scoring_fct_norm 1.0 +1011 5 training.batch_size 1.0 +1011 5 training.label_smoothing 0.1968086398443264 +1011 6 model.embedding_dim 1.0 +1011 6 model.scoring_fct_norm 1.0 +1011 6 training.batch_size 2.0 +1011 6 training.label_smoothing 0.008196952503080107 +1011 7 model.embedding_dim 1.0 +1011 7 model.scoring_fct_norm 1.0 +1011 7 training.batch_size 2.0 +1011 7 training.label_smoothing 0.6298923181909821 +1011 8 model.embedding_dim 1.0 +1011 8 model.scoring_fct_norm 1.0 +1011 8 training.batch_size 0.0 +1011 8 training.label_smoothing 0.04512684486111655 +1011 9 model.embedding_dim 0.0 +1011 9 model.scoring_fct_norm 1.0 +1011 9 training.batch_size 0.0 +1011 9 training.label_smoothing 0.04767306075434364 +1011 10 model.embedding_dim 1.0 +1011 10 model.scoring_fct_norm 1.0 +1011 10 training.batch_size 0.0 +1011 10 training.label_smoothing 0.23017798349536414 +1011 11 model.embedding_dim 2.0 +1011 11 model.scoring_fct_norm 1.0 +1011 11 training.batch_size 0.0 +1011 11 training.label_smoothing 0.44566542875934445 +1011 12 model.embedding_dim 1.0 +1011 12 model.scoring_fct_norm 2.0 +1011 12 training.batch_size 2.0 +1011 12 training.label_smoothing 0.011623359038235227 +1011 13 model.embedding_dim 2.0 +1011 13 model.scoring_fct_norm 1.0 +1011 13 training.batch_size 1.0 +1011 13 training.label_smoothing 0.03323555670464234 +1011 14 model.embedding_dim 1.0 +1011 14 model.scoring_fct_norm 2.0 +1011 14 training.batch_size 1.0 +1011 14 training.label_smoothing 0.005856027925465546 +1011 15 model.embedding_dim 2.0 +1011 15 model.scoring_fct_norm 1.0 +1011 15 training.batch_size 1.0 +1011 15 training.label_smoothing 0.007397124624056693 +1011 16 model.embedding_dim 2.0 +1011 16 model.scoring_fct_norm 1.0 +1011 16 training.batch_size 1.0 +1011 16 training.label_smoothing 0.0010104344714954871 +1011 17 model.embedding_dim 2.0 +1011 17 model.scoring_fct_norm 1.0 +1011 17 training.batch_size 0.0 +1011 17 training.label_smoothing 0.33385868973956434 +1011 18 model.embedding_dim 2.0 +1011 18 model.scoring_fct_norm 2.0 +1011 18 training.batch_size 1.0 +1011 18 training.label_smoothing 0.023997328240706384 +1011 19 model.embedding_dim 1.0 +1011 19 model.scoring_fct_norm 2.0 +1011 19 training.batch_size 2.0 +1011 19 training.label_smoothing 0.04457982219272441 +1011 20 model.embedding_dim 1.0 +1011 20 model.scoring_fct_norm 1.0 +1011 20 training.batch_size 0.0 +1011 20 training.label_smoothing 0.019874851618882076 +1011 21 model.embedding_dim 1.0 +1011 21 model.scoring_fct_norm 2.0 +1011 21 training.batch_size 1.0 +1011 21 training.label_smoothing 0.3621638580372603 +1011 22 model.embedding_dim 2.0 +1011 22 model.scoring_fct_norm 2.0 +1011 22 training.batch_size 1.0 +1011 22 training.label_smoothing 0.47920268795587434 +1011 23 model.embedding_dim 2.0 +1011 23 model.scoring_fct_norm 1.0 +1011 23 training.batch_size 1.0 +1011 23 training.label_smoothing 0.7244923033307189 +1011 24 model.embedding_dim 0.0 +1011 24 model.scoring_fct_norm 1.0 +1011 24 training.batch_size 0.0 +1011 24 training.label_smoothing 0.4057266946529661 +1011 25 model.embedding_dim 0.0 +1011 25 model.scoring_fct_norm 1.0 +1011 25 training.batch_size 2.0 +1011 25 training.label_smoothing 0.10387475488233273 +1011 26 model.embedding_dim 2.0 +1011 26 model.scoring_fct_norm 2.0 +1011 26 training.batch_size 0.0 +1011 26 training.label_smoothing 0.02711250627093868 +1011 27 model.embedding_dim 1.0 +1011 27 model.scoring_fct_norm 2.0 +1011 27 training.batch_size 1.0 +1011 27 training.label_smoothing 0.007237667383000014 +1011 28 model.embedding_dim 2.0 +1011 28 model.scoring_fct_norm 1.0 +1011 28 training.batch_size 2.0 +1011 28 training.label_smoothing 0.002035007824065451 +1011 29 model.embedding_dim 1.0 +1011 29 model.scoring_fct_norm 2.0 +1011 29 training.batch_size 2.0 +1011 29 training.label_smoothing 0.9031689918511953 +1011 30 model.embedding_dim 2.0 +1011 30 model.scoring_fct_norm 2.0 +1011 30 training.batch_size 0.0 +1011 30 training.label_smoothing 0.005730262088772685 +1011 31 model.embedding_dim 1.0 +1011 31 model.scoring_fct_norm 1.0 +1011 31 training.batch_size 1.0 +1011 31 training.label_smoothing 0.3937030565764654 +1011 32 model.embedding_dim 2.0 +1011 32 model.scoring_fct_norm 1.0 +1011 32 training.batch_size 1.0 +1011 32 training.label_smoothing 0.08171969609456978 +1011 33 model.embedding_dim 2.0 +1011 33 model.scoring_fct_norm 2.0 +1011 33 training.batch_size 0.0 +1011 33 training.label_smoothing 0.004283647548609954 +1011 34 model.embedding_dim 0.0 +1011 34 model.scoring_fct_norm 1.0 +1011 34 training.batch_size 2.0 +1011 34 training.label_smoothing 0.09504195166812843 +1011 35 model.embedding_dim 0.0 +1011 35 model.scoring_fct_norm 2.0 +1011 35 training.batch_size 1.0 +1011 35 training.label_smoothing 0.050979306740959476 +1011 36 model.embedding_dim 0.0 +1011 36 model.scoring_fct_norm 1.0 +1011 36 training.batch_size 2.0 +1011 36 training.label_smoothing 0.005563457307123989 +1011 37 model.embedding_dim 0.0 +1011 37 model.scoring_fct_norm 2.0 +1011 37 training.batch_size 2.0 +1011 37 training.label_smoothing 0.06321590672213602 +1011 38 model.embedding_dim 2.0 +1011 38 model.scoring_fct_norm 1.0 +1011 38 training.batch_size 1.0 +1011 38 training.label_smoothing 0.0014167868635821283 +1011 39 model.embedding_dim 1.0 +1011 39 model.scoring_fct_norm 2.0 +1011 39 training.batch_size 2.0 +1011 39 training.label_smoothing 0.05235134089824447 +1011 40 model.embedding_dim 2.0 +1011 40 model.scoring_fct_norm 2.0 +1011 40 training.batch_size 2.0 +1011 40 training.label_smoothing 0.03886186935915746 +1011 41 model.embedding_dim 2.0 +1011 41 model.scoring_fct_norm 1.0 +1011 41 training.batch_size 0.0 +1011 41 training.label_smoothing 0.005707049704869829 +1011 42 model.embedding_dim 1.0 +1011 42 model.scoring_fct_norm 2.0 +1011 42 training.batch_size 0.0 +1011 42 training.label_smoothing 0.022837500287101197 +1011 43 model.embedding_dim 0.0 +1011 43 model.scoring_fct_norm 2.0 +1011 43 training.batch_size 2.0 +1011 43 training.label_smoothing 0.015376197741792965 +1011 44 model.embedding_dim 2.0 +1011 44 model.scoring_fct_norm 2.0 +1011 44 training.batch_size 1.0 +1011 44 training.label_smoothing 0.0012982559297101798 +1011 45 model.embedding_dim 0.0 +1011 45 model.scoring_fct_norm 1.0 +1011 45 training.batch_size 2.0 +1011 45 training.label_smoothing 0.0012580909336203899 +1011 46 model.embedding_dim 2.0 +1011 46 model.scoring_fct_norm 1.0 +1011 46 training.batch_size 2.0 +1011 46 training.label_smoothing 0.1446867516893484 +1011 47 model.embedding_dim 2.0 +1011 47 model.scoring_fct_norm 1.0 +1011 47 training.batch_size 2.0 +1011 47 training.label_smoothing 0.7946783603559102 +1011 48 model.embedding_dim 1.0 +1011 48 model.scoring_fct_norm 1.0 +1011 48 training.batch_size 1.0 +1011 48 training.label_smoothing 0.0015943820877300215 +1011 49 model.embedding_dim 1.0 +1011 49 model.scoring_fct_norm 1.0 +1011 49 training.batch_size 2.0 +1011 49 training.label_smoothing 0.006134506623335117 +1011 50 model.embedding_dim 0.0 +1011 50 model.scoring_fct_norm 2.0 +1011 50 training.batch_size 1.0 +1011 50 training.label_smoothing 0.15381552074065086 +1011 51 model.embedding_dim 2.0 +1011 51 model.scoring_fct_norm 1.0 +1011 51 training.batch_size 2.0 +1011 51 training.label_smoothing 0.0013086169366796677 +1011 52 model.embedding_dim 2.0 +1011 52 model.scoring_fct_norm 1.0 +1011 52 training.batch_size 1.0 +1011 52 training.label_smoothing 0.002578827216203264 +1011 53 model.embedding_dim 1.0 +1011 53 model.scoring_fct_norm 2.0 +1011 53 training.batch_size 2.0 +1011 53 training.label_smoothing 0.0745345483337305 +1011 54 model.embedding_dim 2.0 +1011 54 model.scoring_fct_norm 1.0 +1011 54 training.batch_size 2.0 +1011 54 training.label_smoothing 0.12529666380571283 +1011 55 model.embedding_dim 1.0 +1011 55 model.scoring_fct_norm 2.0 +1011 55 training.batch_size 0.0 +1011 55 training.label_smoothing 0.4032129815691014 +1011 56 model.embedding_dim 0.0 +1011 56 model.scoring_fct_norm 1.0 +1011 56 training.batch_size 0.0 +1011 56 training.label_smoothing 0.019693702363467912 +1011 57 model.embedding_dim 0.0 +1011 57 model.scoring_fct_norm 2.0 +1011 57 training.batch_size 1.0 +1011 57 training.label_smoothing 0.001192403285973431 +1011 58 model.embedding_dim 1.0 +1011 58 model.scoring_fct_norm 1.0 +1011 58 training.batch_size 1.0 +1011 58 training.label_smoothing 0.017462700421870685 +1011 59 model.embedding_dim 1.0 +1011 59 model.scoring_fct_norm 1.0 +1011 59 training.batch_size 1.0 +1011 59 training.label_smoothing 0.09047586006323742 +1011 60 model.embedding_dim 1.0 +1011 60 model.scoring_fct_norm 1.0 +1011 60 training.batch_size 0.0 +1011 60 training.label_smoothing 0.0010031942893423446 +1011 61 model.embedding_dim 1.0 +1011 61 model.scoring_fct_norm 1.0 +1011 61 training.batch_size 0.0 +1011 61 training.label_smoothing 0.0045707102491691996 +1011 62 model.embedding_dim 2.0 +1011 62 model.scoring_fct_norm 1.0 +1011 62 training.batch_size 2.0 +1011 62 training.label_smoothing 0.10933700245374298 +1011 63 model.embedding_dim 2.0 +1011 63 model.scoring_fct_norm 1.0 +1011 63 training.batch_size 2.0 +1011 63 training.label_smoothing 0.7893597669227623 +1011 64 model.embedding_dim 1.0 +1011 64 model.scoring_fct_norm 2.0 +1011 64 training.batch_size 1.0 +1011 64 training.label_smoothing 0.0013359782090395843 +1011 65 model.embedding_dim 0.0 +1011 65 model.scoring_fct_norm 2.0 +1011 65 training.batch_size 0.0 +1011 65 training.label_smoothing 0.053728381231968596 +1011 66 model.embedding_dim 1.0 +1011 66 model.scoring_fct_norm 1.0 +1011 66 training.batch_size 1.0 +1011 66 training.label_smoothing 0.0646697816274574 +1011 67 model.embedding_dim 2.0 +1011 67 model.scoring_fct_norm 2.0 +1011 67 training.batch_size 0.0 +1011 67 training.label_smoothing 0.005347943334322919 +1011 68 model.embedding_dim 1.0 +1011 68 model.scoring_fct_norm 2.0 +1011 68 training.batch_size 2.0 +1011 68 training.label_smoothing 0.29848285629688276 +1011 69 model.embedding_dim 2.0 +1011 69 model.scoring_fct_norm 1.0 +1011 69 training.batch_size 1.0 +1011 69 training.label_smoothing 0.02668502037551488 +1011 70 model.embedding_dim 2.0 +1011 70 model.scoring_fct_norm 1.0 +1011 70 training.batch_size 0.0 +1011 70 training.label_smoothing 0.978396919151514 +1011 71 model.embedding_dim 2.0 +1011 71 model.scoring_fct_norm 2.0 +1011 71 training.batch_size 1.0 +1011 71 training.label_smoothing 0.12901016224878464 +1011 72 model.embedding_dim 2.0 +1011 72 model.scoring_fct_norm 1.0 +1011 72 training.batch_size 2.0 +1011 72 training.label_smoothing 0.7384607987867945 +1011 73 model.embedding_dim 1.0 +1011 73 model.scoring_fct_norm 2.0 +1011 73 training.batch_size 1.0 +1011 73 training.label_smoothing 0.012855264412112296 +1011 74 model.embedding_dim 0.0 +1011 74 model.scoring_fct_norm 2.0 +1011 74 training.batch_size 0.0 +1011 74 training.label_smoothing 0.0011669561028095979 +1011 75 model.embedding_dim 0.0 +1011 75 model.scoring_fct_norm 2.0 +1011 75 training.batch_size 2.0 +1011 75 training.label_smoothing 0.005658123215309187 +1011 76 model.embedding_dim 1.0 +1011 76 model.scoring_fct_norm 2.0 +1011 76 training.batch_size 0.0 +1011 76 training.label_smoothing 0.1395975685426156 +1011 77 model.embedding_dim 2.0 +1011 77 model.scoring_fct_norm 1.0 +1011 77 training.batch_size 1.0 +1011 77 training.label_smoothing 0.01644649061039823 +1011 78 model.embedding_dim 0.0 +1011 78 model.scoring_fct_norm 1.0 +1011 78 training.batch_size 0.0 +1011 78 training.label_smoothing 0.40988385734290117 +1011 79 model.embedding_dim 1.0 +1011 79 model.scoring_fct_norm 1.0 +1011 79 training.batch_size 2.0 +1011 79 training.label_smoothing 0.059769576251978214 +1011 80 model.embedding_dim 1.0 +1011 80 model.scoring_fct_norm 1.0 +1011 80 training.batch_size 1.0 +1011 80 training.label_smoothing 0.022969812972657544 +1011 81 model.embedding_dim 2.0 +1011 81 model.scoring_fct_norm 1.0 +1011 81 training.batch_size 0.0 +1011 81 training.label_smoothing 0.06252542493253271 +1011 82 model.embedding_dim 0.0 +1011 82 model.scoring_fct_norm 1.0 +1011 82 training.batch_size 1.0 +1011 82 training.label_smoothing 0.0018756629508014289 +1011 83 model.embedding_dim 0.0 +1011 83 model.scoring_fct_norm 2.0 +1011 83 training.batch_size 0.0 +1011 83 training.label_smoothing 0.2125296445661109 +1011 84 model.embedding_dim 2.0 +1011 84 model.scoring_fct_norm 2.0 +1011 84 training.batch_size 1.0 +1011 84 training.label_smoothing 0.027644896046190227 +1011 85 model.embedding_dim 2.0 +1011 85 model.scoring_fct_norm 2.0 +1011 85 training.batch_size 2.0 +1011 85 training.label_smoothing 0.5437288724776606 +1011 86 model.embedding_dim 2.0 +1011 86 model.scoring_fct_norm 1.0 +1011 86 training.batch_size 2.0 +1011 86 training.label_smoothing 0.0708077047873479 +1011 87 model.embedding_dim 2.0 +1011 87 model.scoring_fct_norm 1.0 +1011 87 training.batch_size 1.0 +1011 87 training.label_smoothing 0.07001231554989666 +1011 88 model.embedding_dim 2.0 +1011 88 model.scoring_fct_norm 2.0 +1011 88 training.batch_size 1.0 +1011 88 training.label_smoothing 0.2785083024025302 +1011 89 model.embedding_dim 1.0 +1011 89 model.scoring_fct_norm 1.0 +1011 89 training.batch_size 0.0 +1011 89 training.label_smoothing 0.11919288035370514 +1011 90 model.embedding_dim 0.0 +1011 90 model.scoring_fct_norm 2.0 +1011 90 training.batch_size 0.0 +1011 90 training.label_smoothing 0.07533110416337103 +1011 91 model.embedding_dim 0.0 +1011 91 model.scoring_fct_norm 1.0 +1011 91 training.batch_size 1.0 +1011 91 training.label_smoothing 0.005238228726888232 +1011 92 model.embedding_dim 1.0 +1011 92 model.scoring_fct_norm 1.0 +1011 92 training.batch_size 1.0 +1011 92 training.label_smoothing 0.002425033101061627 +1011 93 model.embedding_dim 2.0 +1011 93 model.scoring_fct_norm 2.0 +1011 93 training.batch_size 1.0 +1011 93 training.label_smoothing 0.3637510656256514 +1011 94 model.embedding_dim 0.0 +1011 94 model.scoring_fct_norm 1.0 +1011 94 training.batch_size 0.0 +1011 94 training.label_smoothing 0.048150672370598294 +1011 95 model.embedding_dim 0.0 +1011 95 model.scoring_fct_norm 2.0 +1011 95 training.batch_size 2.0 +1011 95 training.label_smoothing 0.0024877680109283524 +1011 96 model.embedding_dim 2.0 +1011 96 model.scoring_fct_norm 1.0 +1011 96 training.batch_size 2.0 +1011 96 training.label_smoothing 0.003401491286054628 +1011 97 model.embedding_dim 1.0 +1011 97 model.scoring_fct_norm 2.0 +1011 97 training.batch_size 0.0 +1011 97 training.label_smoothing 0.01795219920618108 +1011 98 model.embedding_dim 2.0 +1011 98 model.scoring_fct_norm 1.0 +1011 98 training.batch_size 0.0 +1011 98 training.label_smoothing 0.0543033879461457 +1011 99 model.embedding_dim 2.0 +1011 99 model.scoring_fct_norm 1.0 +1011 99 training.batch_size 1.0 +1011 99 training.label_smoothing 0.014247998222655033 +1011 100 model.embedding_dim 0.0 +1011 100 model.scoring_fct_norm 1.0 +1011 100 training.batch_size 1.0 +1011 100 training.label_smoothing 0.19936798393737984 +1011 1 dataset """kinships""" +1011 1 model """unstructuredmodel""" +1011 1 loss """crossentropy""" +1011 1 regularizer """no""" +1011 1 optimizer """adadelta""" +1011 1 training_loop """lcwa""" +1011 1 evaluator """rankbased""" +1011 2 dataset """kinships""" +1011 2 model """unstructuredmodel""" +1011 2 loss """crossentropy""" +1011 2 regularizer """no""" +1011 2 optimizer """adadelta""" +1011 2 training_loop """lcwa""" +1011 2 evaluator """rankbased""" +1011 3 dataset """kinships""" +1011 3 model """unstructuredmodel""" +1011 3 loss """crossentropy""" +1011 3 regularizer """no""" +1011 3 optimizer """adadelta""" +1011 3 training_loop """lcwa""" +1011 3 evaluator """rankbased""" +1011 4 dataset """kinships""" +1011 4 model """unstructuredmodel""" +1011 4 loss """crossentropy""" +1011 4 regularizer """no""" +1011 4 optimizer """adadelta""" +1011 4 training_loop """lcwa""" +1011 4 evaluator """rankbased""" +1011 5 dataset """kinships""" +1011 5 model """unstructuredmodel""" +1011 5 loss """crossentropy""" +1011 5 regularizer """no""" +1011 5 optimizer """adadelta""" +1011 5 training_loop """lcwa""" +1011 5 evaluator """rankbased""" +1011 6 dataset """kinships""" +1011 6 model """unstructuredmodel""" +1011 6 loss """crossentropy""" +1011 6 regularizer """no""" +1011 6 optimizer """adadelta""" +1011 6 training_loop """lcwa""" +1011 6 evaluator """rankbased""" +1011 7 dataset """kinships""" +1011 7 model """unstructuredmodel""" +1011 7 loss """crossentropy""" +1011 7 regularizer """no""" +1011 7 optimizer """adadelta""" +1011 7 training_loop """lcwa""" +1011 7 evaluator """rankbased""" +1011 8 dataset """kinships""" +1011 8 model """unstructuredmodel""" +1011 8 loss """crossentropy""" +1011 8 regularizer """no""" +1011 8 optimizer """adadelta""" +1011 8 training_loop """lcwa""" +1011 8 evaluator """rankbased""" +1011 9 dataset """kinships""" +1011 9 model """unstructuredmodel""" +1011 9 loss """crossentropy""" +1011 9 regularizer """no""" +1011 9 optimizer """adadelta""" +1011 9 training_loop """lcwa""" +1011 9 evaluator """rankbased""" +1011 10 dataset """kinships""" +1011 10 model """unstructuredmodel""" +1011 10 loss """crossentropy""" +1011 10 regularizer """no""" +1011 10 optimizer """adadelta""" +1011 10 training_loop """lcwa""" +1011 10 evaluator """rankbased""" +1011 11 dataset """kinships""" +1011 11 model """unstructuredmodel""" +1011 11 loss """crossentropy""" +1011 11 regularizer """no""" +1011 11 optimizer """adadelta""" +1011 11 training_loop """lcwa""" +1011 11 evaluator """rankbased""" +1011 12 dataset """kinships""" +1011 12 model """unstructuredmodel""" +1011 12 loss """crossentropy""" +1011 12 regularizer """no""" +1011 12 optimizer """adadelta""" +1011 12 training_loop """lcwa""" +1011 12 evaluator """rankbased""" +1011 13 dataset """kinships""" +1011 13 model """unstructuredmodel""" +1011 13 loss """crossentropy""" +1011 13 regularizer """no""" +1011 13 optimizer """adadelta""" +1011 13 training_loop """lcwa""" +1011 13 evaluator """rankbased""" +1011 14 dataset """kinships""" +1011 14 model """unstructuredmodel""" +1011 14 loss """crossentropy""" +1011 14 regularizer """no""" +1011 14 optimizer """adadelta""" +1011 14 training_loop """lcwa""" +1011 14 evaluator """rankbased""" +1011 15 dataset """kinships""" +1011 15 model """unstructuredmodel""" +1011 15 loss """crossentropy""" +1011 15 regularizer """no""" +1011 15 optimizer """adadelta""" +1011 15 training_loop """lcwa""" +1011 15 evaluator """rankbased""" +1011 16 dataset """kinships""" +1011 16 model """unstructuredmodel""" +1011 16 loss """crossentropy""" +1011 16 regularizer """no""" +1011 16 optimizer """adadelta""" +1011 16 training_loop """lcwa""" +1011 16 evaluator """rankbased""" +1011 17 dataset """kinships""" +1011 17 model """unstructuredmodel""" +1011 17 loss """crossentropy""" +1011 17 regularizer """no""" +1011 17 optimizer """adadelta""" +1011 17 training_loop """lcwa""" +1011 17 evaluator """rankbased""" +1011 18 dataset """kinships""" +1011 18 model """unstructuredmodel""" +1011 18 loss """crossentropy""" +1011 18 regularizer """no""" +1011 18 optimizer """adadelta""" +1011 18 training_loop """lcwa""" +1011 18 evaluator """rankbased""" +1011 19 dataset """kinships""" +1011 19 model """unstructuredmodel""" +1011 19 loss """crossentropy""" +1011 19 regularizer """no""" +1011 19 optimizer """adadelta""" +1011 19 training_loop """lcwa""" +1011 19 evaluator """rankbased""" +1011 20 dataset """kinships""" +1011 20 model """unstructuredmodel""" +1011 20 loss """crossentropy""" +1011 20 regularizer """no""" +1011 20 optimizer """adadelta""" +1011 20 training_loop """lcwa""" +1011 20 evaluator """rankbased""" +1011 21 dataset """kinships""" +1011 21 model """unstructuredmodel""" +1011 21 loss """crossentropy""" +1011 21 regularizer """no""" +1011 21 optimizer """adadelta""" +1011 21 training_loop """lcwa""" +1011 21 evaluator """rankbased""" +1011 22 dataset """kinships""" +1011 22 model """unstructuredmodel""" +1011 22 loss """crossentropy""" +1011 22 regularizer """no""" +1011 22 optimizer """adadelta""" +1011 22 training_loop """lcwa""" +1011 22 evaluator """rankbased""" +1011 23 dataset """kinships""" +1011 23 model """unstructuredmodel""" +1011 23 loss """crossentropy""" +1011 23 regularizer """no""" +1011 23 optimizer """adadelta""" +1011 23 training_loop """lcwa""" +1011 23 evaluator """rankbased""" +1011 24 dataset """kinships""" +1011 24 model """unstructuredmodel""" +1011 24 loss """crossentropy""" +1011 24 regularizer """no""" +1011 24 optimizer """adadelta""" +1011 24 training_loop """lcwa""" +1011 24 evaluator """rankbased""" +1011 25 dataset """kinships""" +1011 25 model """unstructuredmodel""" +1011 25 loss """crossentropy""" +1011 25 regularizer """no""" +1011 25 optimizer """adadelta""" +1011 25 training_loop """lcwa""" +1011 25 evaluator """rankbased""" +1011 26 dataset """kinships""" +1011 26 model """unstructuredmodel""" +1011 26 loss """crossentropy""" +1011 26 regularizer """no""" +1011 26 optimizer """adadelta""" +1011 26 training_loop """lcwa""" +1011 26 evaluator """rankbased""" +1011 27 dataset """kinships""" +1011 27 model """unstructuredmodel""" +1011 27 loss """crossentropy""" +1011 27 regularizer """no""" +1011 27 optimizer """adadelta""" +1011 27 training_loop """lcwa""" +1011 27 evaluator """rankbased""" +1011 28 dataset """kinships""" +1011 28 model """unstructuredmodel""" +1011 28 loss """crossentropy""" +1011 28 regularizer """no""" +1011 28 optimizer """adadelta""" +1011 28 training_loop """lcwa""" +1011 28 evaluator """rankbased""" +1011 29 dataset """kinships""" +1011 29 model """unstructuredmodel""" +1011 29 loss """crossentropy""" +1011 29 regularizer """no""" +1011 29 optimizer """adadelta""" +1011 29 training_loop """lcwa""" +1011 29 evaluator """rankbased""" +1011 30 dataset """kinships""" +1011 30 model """unstructuredmodel""" +1011 30 loss """crossentropy""" +1011 30 regularizer """no""" +1011 30 optimizer """adadelta""" +1011 30 training_loop """lcwa""" +1011 30 evaluator """rankbased""" +1011 31 dataset """kinships""" +1011 31 model """unstructuredmodel""" +1011 31 loss """crossentropy""" +1011 31 regularizer """no""" +1011 31 optimizer """adadelta""" +1011 31 training_loop """lcwa""" +1011 31 evaluator """rankbased""" +1011 32 dataset """kinships""" +1011 32 model """unstructuredmodel""" +1011 32 loss """crossentropy""" +1011 32 regularizer """no""" +1011 32 optimizer """adadelta""" +1011 32 training_loop """lcwa""" +1011 32 evaluator """rankbased""" +1011 33 dataset """kinships""" +1011 33 model """unstructuredmodel""" +1011 33 loss """crossentropy""" +1011 33 regularizer """no""" +1011 33 optimizer """adadelta""" +1011 33 training_loop """lcwa""" +1011 33 evaluator """rankbased""" +1011 34 dataset """kinships""" +1011 34 model """unstructuredmodel""" +1011 34 loss """crossentropy""" +1011 34 regularizer """no""" +1011 34 optimizer """adadelta""" +1011 34 training_loop """lcwa""" +1011 34 evaluator """rankbased""" +1011 35 dataset """kinships""" +1011 35 model """unstructuredmodel""" +1011 35 loss """crossentropy""" +1011 35 regularizer """no""" +1011 35 optimizer """adadelta""" +1011 35 training_loop """lcwa""" +1011 35 evaluator """rankbased""" +1011 36 dataset """kinships""" +1011 36 model """unstructuredmodel""" +1011 36 loss """crossentropy""" +1011 36 regularizer """no""" +1011 36 optimizer """adadelta""" +1011 36 training_loop """lcwa""" +1011 36 evaluator """rankbased""" +1011 37 dataset """kinships""" +1011 37 model """unstructuredmodel""" +1011 37 loss """crossentropy""" +1011 37 regularizer """no""" +1011 37 optimizer """adadelta""" +1011 37 training_loop """lcwa""" +1011 37 evaluator """rankbased""" +1011 38 dataset """kinships""" +1011 38 model """unstructuredmodel""" +1011 38 loss """crossentropy""" +1011 38 regularizer """no""" +1011 38 optimizer """adadelta""" +1011 38 training_loop """lcwa""" +1011 38 evaluator """rankbased""" +1011 39 dataset """kinships""" +1011 39 model """unstructuredmodel""" +1011 39 loss """crossentropy""" +1011 39 regularizer """no""" +1011 39 optimizer """adadelta""" +1011 39 training_loop """lcwa""" +1011 39 evaluator """rankbased""" +1011 40 dataset """kinships""" +1011 40 model """unstructuredmodel""" +1011 40 loss """crossentropy""" +1011 40 regularizer """no""" +1011 40 optimizer """adadelta""" +1011 40 training_loop """lcwa""" +1011 40 evaluator """rankbased""" +1011 41 dataset """kinships""" +1011 41 model """unstructuredmodel""" +1011 41 loss """crossentropy""" +1011 41 regularizer """no""" +1011 41 optimizer """adadelta""" +1011 41 training_loop """lcwa""" +1011 41 evaluator """rankbased""" +1011 42 dataset """kinships""" +1011 42 model """unstructuredmodel""" +1011 42 loss """crossentropy""" +1011 42 regularizer """no""" +1011 42 optimizer """adadelta""" +1011 42 training_loop """lcwa""" +1011 42 evaluator """rankbased""" +1011 43 dataset """kinships""" +1011 43 model """unstructuredmodel""" +1011 43 loss """crossentropy""" +1011 43 regularizer """no""" +1011 43 optimizer """adadelta""" +1011 43 training_loop """lcwa""" +1011 43 evaluator """rankbased""" +1011 44 dataset """kinships""" +1011 44 model """unstructuredmodel""" +1011 44 loss """crossentropy""" +1011 44 regularizer """no""" +1011 44 optimizer """adadelta""" +1011 44 training_loop """lcwa""" +1011 44 evaluator """rankbased""" +1011 45 dataset """kinships""" +1011 45 model """unstructuredmodel""" +1011 45 loss """crossentropy""" +1011 45 regularizer """no""" +1011 45 optimizer """adadelta""" +1011 45 training_loop """lcwa""" +1011 45 evaluator """rankbased""" +1011 46 dataset """kinships""" +1011 46 model """unstructuredmodel""" +1011 46 loss """crossentropy""" +1011 46 regularizer """no""" +1011 46 optimizer """adadelta""" +1011 46 training_loop """lcwa""" +1011 46 evaluator """rankbased""" +1011 47 dataset """kinships""" +1011 47 model """unstructuredmodel""" +1011 47 loss """crossentropy""" +1011 47 regularizer """no""" +1011 47 optimizer """adadelta""" +1011 47 training_loop """lcwa""" +1011 47 evaluator """rankbased""" +1011 48 dataset """kinships""" +1011 48 model """unstructuredmodel""" +1011 48 loss """crossentropy""" +1011 48 regularizer """no""" +1011 48 optimizer """adadelta""" +1011 48 training_loop """lcwa""" +1011 48 evaluator """rankbased""" +1011 49 dataset """kinships""" +1011 49 model """unstructuredmodel""" +1011 49 loss """crossentropy""" +1011 49 regularizer """no""" +1011 49 optimizer """adadelta""" +1011 49 training_loop """lcwa""" +1011 49 evaluator """rankbased""" +1011 50 dataset """kinships""" +1011 50 model """unstructuredmodel""" +1011 50 loss """crossentropy""" +1011 50 regularizer """no""" +1011 50 optimizer """adadelta""" +1011 50 training_loop """lcwa""" +1011 50 evaluator """rankbased""" +1011 51 dataset """kinships""" +1011 51 model """unstructuredmodel""" +1011 51 loss """crossentropy""" +1011 51 regularizer """no""" +1011 51 optimizer """adadelta""" +1011 51 training_loop """lcwa""" +1011 51 evaluator """rankbased""" +1011 52 dataset """kinships""" +1011 52 model """unstructuredmodel""" +1011 52 loss """crossentropy""" +1011 52 regularizer """no""" +1011 52 optimizer """adadelta""" +1011 52 training_loop """lcwa""" +1011 52 evaluator """rankbased""" +1011 53 dataset """kinships""" +1011 53 model """unstructuredmodel""" +1011 53 loss """crossentropy""" +1011 53 regularizer """no""" +1011 53 optimizer """adadelta""" +1011 53 training_loop """lcwa""" +1011 53 evaluator """rankbased""" +1011 54 dataset """kinships""" +1011 54 model """unstructuredmodel""" +1011 54 loss """crossentropy""" +1011 54 regularizer """no""" +1011 54 optimizer """adadelta""" +1011 54 training_loop """lcwa""" +1011 54 evaluator """rankbased""" +1011 55 dataset """kinships""" +1011 55 model """unstructuredmodel""" +1011 55 loss """crossentropy""" +1011 55 regularizer """no""" +1011 55 optimizer """adadelta""" +1011 55 training_loop """lcwa""" +1011 55 evaluator """rankbased""" +1011 56 dataset """kinships""" +1011 56 model """unstructuredmodel""" +1011 56 loss """crossentropy""" +1011 56 regularizer """no""" +1011 56 optimizer """adadelta""" +1011 56 training_loop """lcwa""" +1011 56 evaluator """rankbased""" +1011 57 dataset """kinships""" +1011 57 model """unstructuredmodel""" +1011 57 loss """crossentropy""" +1011 57 regularizer """no""" +1011 57 optimizer """adadelta""" +1011 57 training_loop """lcwa""" +1011 57 evaluator """rankbased""" +1011 58 dataset """kinships""" +1011 58 model """unstructuredmodel""" +1011 58 loss """crossentropy""" +1011 58 regularizer """no""" +1011 58 optimizer """adadelta""" +1011 58 training_loop """lcwa""" +1011 58 evaluator """rankbased""" +1011 59 dataset """kinships""" +1011 59 model """unstructuredmodel""" +1011 59 loss """crossentropy""" +1011 59 regularizer """no""" +1011 59 optimizer """adadelta""" +1011 59 training_loop """lcwa""" +1011 59 evaluator """rankbased""" +1011 60 dataset """kinships""" +1011 60 model """unstructuredmodel""" +1011 60 loss """crossentropy""" +1011 60 regularizer """no""" +1011 60 optimizer """adadelta""" +1011 60 training_loop """lcwa""" +1011 60 evaluator """rankbased""" +1011 61 dataset """kinships""" +1011 61 model """unstructuredmodel""" +1011 61 loss """crossentropy""" +1011 61 regularizer """no""" +1011 61 optimizer """adadelta""" +1011 61 training_loop """lcwa""" +1011 61 evaluator """rankbased""" +1011 62 dataset """kinships""" +1011 62 model """unstructuredmodel""" +1011 62 loss """crossentropy""" +1011 62 regularizer """no""" +1011 62 optimizer """adadelta""" +1011 62 training_loop """lcwa""" +1011 62 evaluator """rankbased""" +1011 63 dataset """kinships""" +1011 63 model """unstructuredmodel""" +1011 63 loss """crossentropy""" +1011 63 regularizer """no""" +1011 63 optimizer """adadelta""" +1011 63 training_loop """lcwa""" +1011 63 evaluator """rankbased""" +1011 64 dataset """kinships""" +1011 64 model """unstructuredmodel""" +1011 64 loss """crossentropy""" +1011 64 regularizer """no""" +1011 64 optimizer """adadelta""" +1011 64 training_loop """lcwa""" +1011 64 evaluator """rankbased""" +1011 65 dataset """kinships""" +1011 65 model """unstructuredmodel""" +1011 65 loss """crossentropy""" +1011 65 regularizer """no""" +1011 65 optimizer """adadelta""" +1011 65 training_loop """lcwa""" +1011 65 evaluator """rankbased""" +1011 66 dataset """kinships""" +1011 66 model """unstructuredmodel""" +1011 66 loss """crossentropy""" +1011 66 regularizer """no""" +1011 66 optimizer """adadelta""" +1011 66 training_loop """lcwa""" +1011 66 evaluator """rankbased""" +1011 67 dataset """kinships""" +1011 67 model """unstructuredmodel""" +1011 67 loss """crossentropy""" +1011 67 regularizer """no""" +1011 67 optimizer """adadelta""" +1011 67 training_loop """lcwa""" +1011 67 evaluator """rankbased""" +1011 68 dataset """kinships""" +1011 68 model """unstructuredmodel""" +1011 68 loss """crossentropy""" +1011 68 regularizer """no""" +1011 68 optimizer """adadelta""" +1011 68 training_loop """lcwa""" +1011 68 evaluator """rankbased""" +1011 69 dataset """kinships""" +1011 69 model """unstructuredmodel""" +1011 69 loss """crossentropy""" +1011 69 regularizer """no""" +1011 69 optimizer """adadelta""" +1011 69 training_loop """lcwa""" +1011 69 evaluator """rankbased""" +1011 70 dataset """kinships""" +1011 70 model """unstructuredmodel""" +1011 70 loss """crossentropy""" +1011 70 regularizer """no""" +1011 70 optimizer """adadelta""" +1011 70 training_loop """lcwa""" +1011 70 evaluator """rankbased""" +1011 71 dataset """kinships""" +1011 71 model """unstructuredmodel""" +1011 71 loss """crossentropy""" +1011 71 regularizer """no""" +1011 71 optimizer """adadelta""" +1011 71 training_loop """lcwa""" +1011 71 evaluator """rankbased""" +1011 72 dataset """kinships""" +1011 72 model """unstructuredmodel""" +1011 72 loss """crossentropy""" +1011 72 regularizer """no""" +1011 72 optimizer """adadelta""" +1011 72 training_loop """lcwa""" +1011 72 evaluator """rankbased""" +1011 73 dataset """kinships""" +1011 73 model """unstructuredmodel""" +1011 73 loss """crossentropy""" +1011 73 regularizer """no""" +1011 73 optimizer """adadelta""" +1011 73 training_loop """lcwa""" +1011 73 evaluator """rankbased""" +1011 74 dataset """kinships""" +1011 74 model """unstructuredmodel""" +1011 74 loss """crossentropy""" +1011 74 regularizer """no""" +1011 74 optimizer """adadelta""" +1011 74 training_loop """lcwa""" +1011 74 evaluator """rankbased""" +1011 75 dataset """kinships""" +1011 75 model """unstructuredmodel""" +1011 75 loss """crossentropy""" +1011 75 regularizer """no""" +1011 75 optimizer """adadelta""" +1011 75 training_loop """lcwa""" +1011 75 evaluator """rankbased""" +1011 76 dataset """kinships""" +1011 76 model """unstructuredmodel""" +1011 76 loss """crossentropy""" +1011 76 regularizer """no""" +1011 76 optimizer """adadelta""" +1011 76 training_loop """lcwa""" +1011 76 evaluator """rankbased""" +1011 77 dataset """kinships""" +1011 77 model """unstructuredmodel""" +1011 77 loss """crossentropy""" +1011 77 regularizer """no""" +1011 77 optimizer """adadelta""" +1011 77 training_loop """lcwa""" +1011 77 evaluator """rankbased""" +1011 78 dataset """kinships""" +1011 78 model """unstructuredmodel""" +1011 78 loss """crossentropy""" +1011 78 regularizer """no""" +1011 78 optimizer """adadelta""" +1011 78 training_loop """lcwa""" +1011 78 evaluator """rankbased""" +1011 79 dataset """kinships""" +1011 79 model """unstructuredmodel""" +1011 79 loss """crossentropy""" +1011 79 regularizer """no""" +1011 79 optimizer """adadelta""" +1011 79 training_loop """lcwa""" +1011 79 evaluator """rankbased""" +1011 80 dataset """kinships""" +1011 80 model """unstructuredmodel""" +1011 80 loss """crossentropy""" +1011 80 regularizer """no""" +1011 80 optimizer """adadelta""" +1011 80 training_loop """lcwa""" +1011 80 evaluator """rankbased""" +1011 81 dataset """kinships""" +1011 81 model """unstructuredmodel""" +1011 81 loss """crossentropy""" +1011 81 regularizer """no""" +1011 81 optimizer """adadelta""" +1011 81 training_loop """lcwa""" +1011 81 evaluator """rankbased""" +1011 82 dataset """kinships""" +1011 82 model """unstructuredmodel""" +1011 82 loss """crossentropy""" +1011 82 regularizer """no""" +1011 82 optimizer """adadelta""" +1011 82 training_loop """lcwa""" +1011 82 evaluator """rankbased""" +1011 83 dataset """kinships""" +1011 83 model """unstructuredmodel""" +1011 83 loss """crossentropy""" +1011 83 regularizer """no""" +1011 83 optimizer """adadelta""" +1011 83 training_loop """lcwa""" +1011 83 evaluator """rankbased""" +1011 84 dataset """kinships""" +1011 84 model """unstructuredmodel""" +1011 84 loss """crossentropy""" +1011 84 regularizer """no""" +1011 84 optimizer """adadelta""" +1011 84 training_loop """lcwa""" +1011 84 evaluator """rankbased""" +1011 85 dataset """kinships""" +1011 85 model """unstructuredmodel""" +1011 85 loss """crossentropy""" +1011 85 regularizer """no""" +1011 85 optimizer """adadelta""" +1011 85 training_loop """lcwa""" +1011 85 evaluator """rankbased""" +1011 86 dataset """kinships""" +1011 86 model """unstructuredmodel""" +1011 86 loss """crossentropy""" +1011 86 regularizer """no""" +1011 86 optimizer """adadelta""" +1011 86 training_loop """lcwa""" +1011 86 evaluator """rankbased""" +1011 87 dataset """kinships""" +1011 87 model """unstructuredmodel""" +1011 87 loss """crossentropy""" +1011 87 regularizer """no""" +1011 87 optimizer """adadelta""" +1011 87 training_loop """lcwa""" +1011 87 evaluator """rankbased""" +1011 88 dataset """kinships""" +1011 88 model """unstructuredmodel""" +1011 88 loss """crossentropy""" +1011 88 regularizer """no""" +1011 88 optimizer """adadelta""" +1011 88 training_loop """lcwa""" +1011 88 evaluator """rankbased""" +1011 89 dataset """kinships""" +1011 89 model """unstructuredmodel""" +1011 89 loss """crossentropy""" +1011 89 regularizer """no""" +1011 89 optimizer """adadelta""" +1011 89 training_loop """lcwa""" +1011 89 evaluator """rankbased""" +1011 90 dataset """kinships""" +1011 90 model """unstructuredmodel""" +1011 90 loss """crossentropy""" +1011 90 regularizer """no""" +1011 90 optimizer """adadelta""" +1011 90 training_loop """lcwa""" +1011 90 evaluator """rankbased""" +1011 91 dataset """kinships""" +1011 91 model """unstructuredmodel""" +1011 91 loss """crossentropy""" +1011 91 regularizer """no""" +1011 91 optimizer """adadelta""" +1011 91 training_loop """lcwa""" +1011 91 evaluator """rankbased""" +1011 92 dataset """kinships""" +1011 92 model """unstructuredmodel""" +1011 92 loss """crossentropy""" +1011 92 regularizer """no""" +1011 92 optimizer """adadelta""" +1011 92 training_loop """lcwa""" +1011 92 evaluator """rankbased""" +1011 93 dataset """kinships""" +1011 93 model """unstructuredmodel""" +1011 93 loss """crossentropy""" +1011 93 regularizer """no""" +1011 93 optimizer """adadelta""" +1011 93 training_loop """lcwa""" +1011 93 evaluator """rankbased""" +1011 94 dataset """kinships""" +1011 94 model """unstructuredmodel""" +1011 94 loss """crossentropy""" +1011 94 regularizer """no""" +1011 94 optimizer """adadelta""" +1011 94 training_loop """lcwa""" +1011 94 evaluator """rankbased""" +1011 95 dataset """kinships""" +1011 95 model """unstructuredmodel""" +1011 95 loss """crossentropy""" +1011 95 regularizer """no""" +1011 95 optimizer """adadelta""" +1011 95 training_loop """lcwa""" +1011 95 evaluator """rankbased""" +1011 96 dataset """kinships""" +1011 96 model """unstructuredmodel""" +1011 96 loss """crossentropy""" +1011 96 regularizer """no""" +1011 96 optimizer """adadelta""" +1011 96 training_loop """lcwa""" +1011 96 evaluator """rankbased""" +1011 97 dataset """kinships""" +1011 97 model """unstructuredmodel""" +1011 97 loss """crossentropy""" +1011 97 regularizer """no""" +1011 97 optimizer """adadelta""" +1011 97 training_loop """lcwa""" +1011 97 evaluator """rankbased""" +1011 98 dataset """kinships""" +1011 98 model """unstructuredmodel""" +1011 98 loss """crossentropy""" +1011 98 regularizer """no""" +1011 98 optimizer """adadelta""" +1011 98 training_loop """lcwa""" +1011 98 evaluator """rankbased""" +1011 99 dataset """kinships""" +1011 99 model """unstructuredmodel""" +1011 99 loss """crossentropy""" +1011 99 regularizer """no""" +1011 99 optimizer """adadelta""" +1011 99 training_loop """lcwa""" +1011 99 evaluator """rankbased""" +1011 100 dataset """kinships""" +1011 100 model """unstructuredmodel""" +1011 100 loss """crossentropy""" +1011 100 regularizer """no""" +1011 100 optimizer """adadelta""" +1011 100 training_loop """lcwa""" +1011 100 evaluator """rankbased""" +1012 1 model.embedding_dim 2.0 +1012 1 model.scoring_fct_norm 2.0 +1012 1 negative_sampler.num_negs_per_pos 13.0 +1012 1 training.batch_size 1.0 +1012 2 model.embedding_dim 2.0 +1012 2 model.scoring_fct_norm 2.0 +1012 2 negative_sampler.num_negs_per_pos 70.0 +1012 2 training.batch_size 1.0 +1012 3 model.embedding_dim 2.0 +1012 3 model.scoring_fct_norm 2.0 +1012 3 negative_sampler.num_negs_per_pos 53.0 +1012 3 training.batch_size 0.0 +1012 4 model.embedding_dim 2.0 +1012 4 model.scoring_fct_norm 1.0 +1012 4 negative_sampler.num_negs_per_pos 28.0 +1012 4 training.batch_size 0.0 +1012 5 model.embedding_dim 0.0 +1012 5 model.scoring_fct_norm 2.0 +1012 5 negative_sampler.num_negs_per_pos 29.0 +1012 5 training.batch_size 1.0 +1012 6 model.embedding_dim 1.0 +1012 6 model.scoring_fct_norm 2.0 +1012 6 negative_sampler.num_negs_per_pos 30.0 +1012 6 training.batch_size 0.0 +1012 7 model.embedding_dim 2.0 +1012 7 model.scoring_fct_norm 1.0 +1012 7 negative_sampler.num_negs_per_pos 18.0 +1012 7 training.batch_size 1.0 +1012 8 model.embedding_dim 1.0 +1012 8 model.scoring_fct_norm 1.0 +1012 8 negative_sampler.num_negs_per_pos 49.0 +1012 8 training.batch_size 2.0 +1012 9 model.embedding_dim 2.0 +1012 9 model.scoring_fct_norm 1.0 +1012 9 negative_sampler.num_negs_per_pos 58.0 +1012 9 training.batch_size 1.0 +1012 10 model.embedding_dim 0.0 +1012 10 model.scoring_fct_norm 2.0 +1012 10 negative_sampler.num_negs_per_pos 2.0 +1012 10 training.batch_size 2.0 +1012 11 model.embedding_dim 0.0 +1012 11 model.scoring_fct_norm 2.0 +1012 11 negative_sampler.num_negs_per_pos 99.0 +1012 11 training.batch_size 0.0 +1012 12 model.embedding_dim 1.0 +1012 12 model.scoring_fct_norm 2.0 +1012 12 negative_sampler.num_negs_per_pos 97.0 +1012 12 training.batch_size 1.0 +1012 13 model.embedding_dim 0.0 +1012 13 model.scoring_fct_norm 2.0 +1012 13 negative_sampler.num_negs_per_pos 83.0 +1012 13 training.batch_size 2.0 +1012 14 model.embedding_dim 2.0 +1012 14 model.scoring_fct_norm 2.0 +1012 14 negative_sampler.num_negs_per_pos 48.0 +1012 14 training.batch_size 1.0 +1012 15 model.embedding_dim 2.0 +1012 15 model.scoring_fct_norm 1.0 +1012 15 negative_sampler.num_negs_per_pos 14.0 +1012 15 training.batch_size 1.0 +1012 16 model.embedding_dim 2.0 +1012 16 model.scoring_fct_norm 2.0 +1012 16 negative_sampler.num_negs_per_pos 68.0 +1012 16 training.batch_size 1.0 +1012 17 model.embedding_dim 2.0 +1012 17 model.scoring_fct_norm 2.0 +1012 17 negative_sampler.num_negs_per_pos 61.0 +1012 17 training.batch_size 1.0 +1012 18 model.embedding_dim 0.0 +1012 18 model.scoring_fct_norm 1.0 +1012 18 negative_sampler.num_negs_per_pos 69.0 +1012 18 training.batch_size 2.0 +1012 19 model.embedding_dim 0.0 +1012 19 model.scoring_fct_norm 2.0 +1012 19 negative_sampler.num_negs_per_pos 86.0 +1012 19 training.batch_size 0.0 +1012 20 model.embedding_dim 0.0 +1012 20 model.scoring_fct_norm 2.0 +1012 20 negative_sampler.num_negs_per_pos 9.0 +1012 20 training.batch_size 2.0 +1012 21 model.embedding_dim 1.0 +1012 21 model.scoring_fct_norm 2.0 +1012 21 negative_sampler.num_negs_per_pos 11.0 +1012 21 training.batch_size 1.0 +1012 22 model.embedding_dim 2.0 +1012 22 model.scoring_fct_norm 1.0 +1012 22 negative_sampler.num_negs_per_pos 74.0 +1012 22 training.batch_size 0.0 +1012 23 model.embedding_dim 1.0 +1012 23 model.scoring_fct_norm 2.0 +1012 23 negative_sampler.num_negs_per_pos 12.0 +1012 23 training.batch_size 1.0 +1012 24 model.embedding_dim 2.0 +1012 24 model.scoring_fct_norm 2.0 +1012 24 negative_sampler.num_negs_per_pos 5.0 +1012 24 training.batch_size 0.0 +1012 25 model.embedding_dim 0.0 +1012 25 model.scoring_fct_norm 2.0 +1012 25 negative_sampler.num_negs_per_pos 56.0 +1012 25 training.batch_size 0.0 +1012 26 model.embedding_dim 0.0 +1012 26 model.scoring_fct_norm 2.0 +1012 26 negative_sampler.num_negs_per_pos 21.0 +1012 26 training.batch_size 2.0 +1012 27 model.embedding_dim 2.0 +1012 27 model.scoring_fct_norm 1.0 +1012 27 negative_sampler.num_negs_per_pos 46.0 +1012 27 training.batch_size 2.0 +1012 28 model.embedding_dim 2.0 +1012 28 model.scoring_fct_norm 2.0 +1012 28 negative_sampler.num_negs_per_pos 63.0 +1012 28 training.batch_size 1.0 +1012 29 model.embedding_dim 2.0 +1012 29 model.scoring_fct_norm 2.0 +1012 29 negative_sampler.num_negs_per_pos 16.0 +1012 29 training.batch_size 2.0 +1012 30 model.embedding_dim 2.0 +1012 30 model.scoring_fct_norm 1.0 +1012 30 negative_sampler.num_negs_per_pos 56.0 +1012 30 training.batch_size 2.0 +1012 31 model.embedding_dim 2.0 +1012 31 model.scoring_fct_norm 1.0 +1012 31 negative_sampler.num_negs_per_pos 42.0 +1012 31 training.batch_size 2.0 +1012 32 model.embedding_dim 1.0 +1012 32 model.scoring_fct_norm 1.0 +1012 32 negative_sampler.num_negs_per_pos 29.0 +1012 32 training.batch_size 2.0 +1012 33 model.embedding_dim 0.0 +1012 33 model.scoring_fct_norm 1.0 +1012 33 negative_sampler.num_negs_per_pos 81.0 +1012 33 training.batch_size 2.0 +1012 34 model.embedding_dim 2.0 +1012 34 model.scoring_fct_norm 2.0 +1012 34 negative_sampler.num_negs_per_pos 97.0 +1012 34 training.batch_size 0.0 +1012 35 model.embedding_dim 1.0 +1012 35 model.scoring_fct_norm 1.0 +1012 35 negative_sampler.num_negs_per_pos 16.0 +1012 35 training.batch_size 1.0 +1012 36 model.embedding_dim 0.0 +1012 36 model.scoring_fct_norm 1.0 +1012 36 negative_sampler.num_negs_per_pos 85.0 +1012 36 training.batch_size 2.0 +1012 37 model.embedding_dim 2.0 +1012 37 model.scoring_fct_norm 1.0 +1012 37 negative_sampler.num_negs_per_pos 7.0 +1012 37 training.batch_size 1.0 +1012 38 model.embedding_dim 1.0 +1012 38 model.scoring_fct_norm 2.0 +1012 38 negative_sampler.num_negs_per_pos 37.0 +1012 38 training.batch_size 0.0 +1012 39 model.embedding_dim 1.0 +1012 39 model.scoring_fct_norm 2.0 +1012 39 negative_sampler.num_negs_per_pos 94.0 +1012 39 training.batch_size 0.0 +1012 40 model.embedding_dim 0.0 +1012 40 model.scoring_fct_norm 1.0 +1012 40 negative_sampler.num_negs_per_pos 59.0 +1012 40 training.batch_size 2.0 +1012 41 model.embedding_dim 1.0 +1012 41 model.scoring_fct_norm 2.0 +1012 41 negative_sampler.num_negs_per_pos 31.0 +1012 41 training.batch_size 2.0 +1012 42 model.embedding_dim 1.0 +1012 42 model.scoring_fct_norm 1.0 +1012 42 negative_sampler.num_negs_per_pos 92.0 +1012 42 training.batch_size 1.0 +1012 43 model.embedding_dim 2.0 +1012 43 model.scoring_fct_norm 2.0 +1012 43 negative_sampler.num_negs_per_pos 61.0 +1012 43 training.batch_size 1.0 +1012 44 model.embedding_dim 2.0 +1012 44 model.scoring_fct_norm 1.0 +1012 44 negative_sampler.num_negs_per_pos 18.0 +1012 44 training.batch_size 1.0 +1012 45 model.embedding_dim 2.0 +1012 45 model.scoring_fct_norm 1.0 +1012 45 negative_sampler.num_negs_per_pos 79.0 +1012 45 training.batch_size 2.0 +1012 46 model.embedding_dim 0.0 +1012 46 model.scoring_fct_norm 2.0 +1012 46 negative_sampler.num_negs_per_pos 28.0 +1012 46 training.batch_size 1.0 +1012 47 model.embedding_dim 1.0 +1012 47 model.scoring_fct_norm 2.0 +1012 47 negative_sampler.num_negs_per_pos 65.0 +1012 47 training.batch_size 0.0 +1012 48 model.embedding_dim 0.0 +1012 48 model.scoring_fct_norm 2.0 +1012 48 negative_sampler.num_negs_per_pos 59.0 +1012 48 training.batch_size 2.0 +1012 49 model.embedding_dim 1.0 +1012 49 model.scoring_fct_norm 2.0 +1012 49 negative_sampler.num_negs_per_pos 58.0 +1012 49 training.batch_size 2.0 +1012 50 model.embedding_dim 2.0 +1012 50 model.scoring_fct_norm 1.0 +1012 50 negative_sampler.num_negs_per_pos 74.0 +1012 50 training.batch_size 0.0 +1012 51 model.embedding_dim 1.0 +1012 51 model.scoring_fct_norm 2.0 +1012 51 negative_sampler.num_negs_per_pos 70.0 +1012 51 training.batch_size 0.0 +1012 52 model.embedding_dim 2.0 +1012 52 model.scoring_fct_norm 1.0 +1012 52 negative_sampler.num_negs_per_pos 63.0 +1012 52 training.batch_size 0.0 +1012 53 model.embedding_dim 0.0 +1012 53 model.scoring_fct_norm 1.0 +1012 53 negative_sampler.num_negs_per_pos 46.0 +1012 53 training.batch_size 1.0 +1012 54 model.embedding_dim 2.0 +1012 54 model.scoring_fct_norm 1.0 +1012 54 negative_sampler.num_negs_per_pos 15.0 +1012 54 training.batch_size 2.0 +1012 55 model.embedding_dim 1.0 +1012 55 model.scoring_fct_norm 2.0 +1012 55 negative_sampler.num_negs_per_pos 73.0 +1012 55 training.batch_size 1.0 +1012 56 model.embedding_dim 2.0 +1012 56 model.scoring_fct_norm 1.0 +1012 56 negative_sampler.num_negs_per_pos 79.0 +1012 56 training.batch_size 2.0 +1012 57 model.embedding_dim 2.0 +1012 57 model.scoring_fct_norm 1.0 +1012 57 negative_sampler.num_negs_per_pos 51.0 +1012 57 training.batch_size 2.0 +1012 58 model.embedding_dim 1.0 +1012 58 model.scoring_fct_norm 2.0 +1012 58 negative_sampler.num_negs_per_pos 97.0 +1012 58 training.batch_size 2.0 +1012 59 model.embedding_dim 0.0 +1012 59 model.scoring_fct_norm 1.0 +1012 59 negative_sampler.num_negs_per_pos 11.0 +1012 59 training.batch_size 1.0 +1012 60 model.embedding_dim 1.0 +1012 60 model.scoring_fct_norm 1.0 +1012 60 negative_sampler.num_negs_per_pos 3.0 +1012 60 training.batch_size 2.0 +1012 61 model.embedding_dim 0.0 +1012 61 model.scoring_fct_norm 1.0 +1012 61 negative_sampler.num_negs_per_pos 28.0 +1012 61 training.batch_size 0.0 +1012 62 model.embedding_dim 1.0 +1012 62 model.scoring_fct_norm 1.0 +1012 62 negative_sampler.num_negs_per_pos 11.0 +1012 62 training.batch_size 0.0 +1012 63 model.embedding_dim 2.0 +1012 63 model.scoring_fct_norm 1.0 +1012 63 negative_sampler.num_negs_per_pos 66.0 +1012 63 training.batch_size 0.0 +1012 64 model.embedding_dim 1.0 +1012 64 model.scoring_fct_norm 1.0 +1012 64 negative_sampler.num_negs_per_pos 47.0 +1012 64 training.batch_size 2.0 +1012 65 model.embedding_dim 1.0 +1012 65 model.scoring_fct_norm 1.0 +1012 65 negative_sampler.num_negs_per_pos 78.0 +1012 65 training.batch_size 2.0 +1012 66 model.embedding_dim 2.0 +1012 66 model.scoring_fct_norm 2.0 +1012 66 negative_sampler.num_negs_per_pos 23.0 +1012 66 training.batch_size 1.0 +1012 67 model.embedding_dim 1.0 +1012 67 model.scoring_fct_norm 1.0 +1012 67 negative_sampler.num_negs_per_pos 3.0 +1012 67 training.batch_size 2.0 +1012 68 model.embedding_dim 1.0 +1012 68 model.scoring_fct_norm 1.0 +1012 68 negative_sampler.num_negs_per_pos 95.0 +1012 68 training.batch_size 1.0 +1012 69 model.embedding_dim 0.0 +1012 69 model.scoring_fct_norm 2.0 +1012 69 negative_sampler.num_negs_per_pos 93.0 +1012 69 training.batch_size 2.0 +1012 70 model.embedding_dim 2.0 +1012 70 model.scoring_fct_norm 2.0 +1012 70 negative_sampler.num_negs_per_pos 24.0 +1012 70 training.batch_size 1.0 +1012 71 model.embedding_dim 2.0 +1012 71 model.scoring_fct_norm 1.0 +1012 71 negative_sampler.num_negs_per_pos 81.0 +1012 71 training.batch_size 1.0 +1012 72 model.embedding_dim 1.0 +1012 72 model.scoring_fct_norm 2.0 +1012 72 negative_sampler.num_negs_per_pos 54.0 +1012 72 training.batch_size 0.0 +1012 73 model.embedding_dim 0.0 +1012 73 model.scoring_fct_norm 1.0 +1012 73 negative_sampler.num_negs_per_pos 22.0 +1012 73 training.batch_size 0.0 +1012 74 model.embedding_dim 1.0 +1012 74 model.scoring_fct_norm 1.0 +1012 74 negative_sampler.num_negs_per_pos 13.0 +1012 74 training.batch_size 2.0 +1012 75 model.embedding_dim 0.0 +1012 75 model.scoring_fct_norm 1.0 +1012 75 negative_sampler.num_negs_per_pos 91.0 +1012 75 training.batch_size 1.0 +1012 76 model.embedding_dim 1.0 +1012 76 model.scoring_fct_norm 1.0 +1012 76 negative_sampler.num_negs_per_pos 28.0 +1012 76 training.batch_size 0.0 +1012 77 model.embedding_dim 1.0 +1012 77 model.scoring_fct_norm 1.0 +1012 77 negative_sampler.num_negs_per_pos 23.0 +1012 77 training.batch_size 0.0 +1012 78 model.embedding_dim 2.0 +1012 78 model.scoring_fct_norm 2.0 +1012 78 negative_sampler.num_negs_per_pos 92.0 +1012 78 training.batch_size 1.0 +1012 79 model.embedding_dim 1.0 +1012 79 model.scoring_fct_norm 1.0 +1012 79 negative_sampler.num_negs_per_pos 49.0 +1012 79 training.batch_size 0.0 +1012 80 model.embedding_dim 1.0 +1012 80 model.scoring_fct_norm 2.0 +1012 80 negative_sampler.num_negs_per_pos 59.0 +1012 80 training.batch_size 2.0 +1012 81 model.embedding_dim 1.0 +1012 81 model.scoring_fct_norm 1.0 +1012 81 negative_sampler.num_negs_per_pos 80.0 +1012 81 training.batch_size 0.0 +1012 82 model.embedding_dim 0.0 +1012 82 model.scoring_fct_norm 1.0 +1012 82 negative_sampler.num_negs_per_pos 90.0 +1012 82 training.batch_size 1.0 +1012 83 model.embedding_dim 0.0 +1012 83 model.scoring_fct_norm 1.0 +1012 83 negative_sampler.num_negs_per_pos 51.0 +1012 83 training.batch_size 0.0 +1012 84 model.embedding_dim 0.0 +1012 84 model.scoring_fct_norm 2.0 +1012 84 negative_sampler.num_negs_per_pos 9.0 +1012 84 training.batch_size 0.0 +1012 85 model.embedding_dim 1.0 +1012 85 model.scoring_fct_norm 2.0 +1012 85 negative_sampler.num_negs_per_pos 92.0 +1012 85 training.batch_size 0.0 +1012 86 model.embedding_dim 1.0 +1012 86 model.scoring_fct_norm 1.0 +1012 86 negative_sampler.num_negs_per_pos 10.0 +1012 86 training.batch_size 0.0 +1012 87 model.embedding_dim 1.0 +1012 87 model.scoring_fct_norm 2.0 +1012 87 negative_sampler.num_negs_per_pos 12.0 +1012 87 training.batch_size 1.0 +1012 88 model.embedding_dim 0.0 +1012 88 model.scoring_fct_norm 2.0 +1012 88 negative_sampler.num_negs_per_pos 74.0 +1012 88 training.batch_size 1.0 +1012 89 model.embedding_dim 2.0 +1012 89 model.scoring_fct_norm 2.0 +1012 89 negative_sampler.num_negs_per_pos 54.0 +1012 89 training.batch_size 2.0 +1012 90 model.embedding_dim 0.0 +1012 90 model.scoring_fct_norm 1.0 +1012 90 negative_sampler.num_negs_per_pos 76.0 +1012 90 training.batch_size 0.0 +1012 91 model.embedding_dim 0.0 +1012 91 model.scoring_fct_norm 2.0 +1012 91 negative_sampler.num_negs_per_pos 29.0 +1012 91 training.batch_size 1.0 +1012 92 model.embedding_dim 1.0 +1012 92 model.scoring_fct_norm 2.0 +1012 92 negative_sampler.num_negs_per_pos 78.0 +1012 92 training.batch_size 1.0 +1012 93 model.embedding_dim 0.0 +1012 93 model.scoring_fct_norm 1.0 +1012 93 negative_sampler.num_negs_per_pos 13.0 +1012 93 training.batch_size 0.0 +1012 94 model.embedding_dim 2.0 +1012 94 model.scoring_fct_norm 1.0 +1012 94 negative_sampler.num_negs_per_pos 77.0 +1012 94 training.batch_size 2.0 +1012 95 model.embedding_dim 1.0 +1012 95 model.scoring_fct_norm 1.0 +1012 95 negative_sampler.num_negs_per_pos 41.0 +1012 95 training.batch_size 1.0 +1012 96 model.embedding_dim 2.0 +1012 96 model.scoring_fct_norm 1.0 +1012 96 negative_sampler.num_negs_per_pos 10.0 +1012 96 training.batch_size 2.0 +1012 97 model.embedding_dim 0.0 +1012 97 model.scoring_fct_norm 2.0 +1012 97 negative_sampler.num_negs_per_pos 29.0 +1012 97 training.batch_size 1.0 +1012 98 model.embedding_dim 2.0 +1012 98 model.scoring_fct_norm 1.0 +1012 98 negative_sampler.num_negs_per_pos 6.0 +1012 98 training.batch_size 1.0 +1012 99 model.embedding_dim 2.0 +1012 99 model.scoring_fct_norm 2.0 +1012 99 negative_sampler.num_negs_per_pos 17.0 +1012 99 training.batch_size 1.0 +1012 100 model.embedding_dim 1.0 +1012 100 model.scoring_fct_norm 2.0 +1012 100 negative_sampler.num_negs_per_pos 24.0 +1012 100 training.batch_size 2.0 +1012 1 dataset """kinships""" +1012 1 model """unstructuredmodel""" +1012 1 loss """bceaftersigmoid""" +1012 1 regularizer """no""" +1012 1 optimizer """adadelta""" +1012 1 training_loop """owa""" +1012 1 negative_sampler """basic""" +1012 1 evaluator """rankbased""" +1012 2 dataset """kinships""" +1012 2 model """unstructuredmodel""" +1012 2 loss """bceaftersigmoid""" +1012 2 regularizer """no""" +1012 2 optimizer """adadelta""" +1012 2 training_loop """owa""" +1012 2 negative_sampler """basic""" +1012 2 evaluator """rankbased""" +1012 3 dataset """kinships""" +1012 3 model """unstructuredmodel""" +1012 3 loss """bceaftersigmoid""" +1012 3 regularizer """no""" +1012 3 optimizer """adadelta""" +1012 3 training_loop """owa""" +1012 3 negative_sampler """basic""" +1012 3 evaluator """rankbased""" +1012 4 dataset """kinships""" +1012 4 model """unstructuredmodel""" +1012 4 loss """bceaftersigmoid""" +1012 4 regularizer """no""" +1012 4 optimizer """adadelta""" +1012 4 training_loop """owa""" +1012 4 negative_sampler """basic""" +1012 4 evaluator """rankbased""" +1012 5 dataset """kinships""" +1012 5 model """unstructuredmodel""" +1012 5 loss """bceaftersigmoid""" +1012 5 regularizer """no""" +1012 5 optimizer """adadelta""" +1012 5 training_loop """owa""" +1012 5 negative_sampler """basic""" +1012 5 evaluator """rankbased""" +1012 6 dataset """kinships""" +1012 6 model """unstructuredmodel""" +1012 6 loss """bceaftersigmoid""" +1012 6 regularizer """no""" +1012 6 optimizer """adadelta""" +1012 6 training_loop """owa""" +1012 6 negative_sampler """basic""" +1012 6 evaluator """rankbased""" +1012 7 dataset """kinships""" +1012 7 model """unstructuredmodel""" +1012 7 loss """bceaftersigmoid""" +1012 7 regularizer """no""" +1012 7 optimizer """adadelta""" +1012 7 training_loop """owa""" +1012 7 negative_sampler """basic""" +1012 7 evaluator """rankbased""" +1012 8 dataset """kinships""" +1012 8 model """unstructuredmodel""" +1012 8 loss """bceaftersigmoid""" +1012 8 regularizer """no""" +1012 8 optimizer """adadelta""" +1012 8 training_loop """owa""" +1012 8 negative_sampler """basic""" +1012 8 evaluator """rankbased""" +1012 9 dataset """kinships""" +1012 9 model """unstructuredmodel""" +1012 9 loss """bceaftersigmoid""" +1012 9 regularizer """no""" +1012 9 optimizer """adadelta""" +1012 9 training_loop """owa""" +1012 9 negative_sampler """basic""" +1012 9 evaluator """rankbased""" +1012 10 dataset """kinships""" +1012 10 model """unstructuredmodel""" +1012 10 loss """bceaftersigmoid""" +1012 10 regularizer """no""" +1012 10 optimizer """adadelta""" +1012 10 training_loop """owa""" +1012 10 negative_sampler """basic""" +1012 10 evaluator """rankbased""" +1012 11 dataset """kinships""" +1012 11 model """unstructuredmodel""" +1012 11 loss """bceaftersigmoid""" +1012 11 regularizer """no""" +1012 11 optimizer """adadelta""" +1012 11 training_loop """owa""" +1012 11 negative_sampler """basic""" +1012 11 evaluator """rankbased""" +1012 12 dataset """kinships""" +1012 12 model """unstructuredmodel""" +1012 12 loss """bceaftersigmoid""" +1012 12 regularizer """no""" +1012 12 optimizer """adadelta""" +1012 12 training_loop """owa""" +1012 12 negative_sampler """basic""" +1012 12 evaluator """rankbased""" +1012 13 dataset """kinships""" +1012 13 model """unstructuredmodel""" +1012 13 loss """bceaftersigmoid""" +1012 13 regularizer """no""" +1012 13 optimizer """adadelta""" +1012 13 training_loop """owa""" +1012 13 negative_sampler """basic""" +1012 13 evaluator """rankbased""" +1012 14 dataset """kinships""" +1012 14 model """unstructuredmodel""" +1012 14 loss """bceaftersigmoid""" +1012 14 regularizer """no""" +1012 14 optimizer """adadelta""" +1012 14 training_loop """owa""" +1012 14 negative_sampler """basic""" +1012 14 evaluator """rankbased""" +1012 15 dataset """kinships""" +1012 15 model """unstructuredmodel""" +1012 15 loss """bceaftersigmoid""" +1012 15 regularizer """no""" +1012 15 optimizer """adadelta""" +1012 15 training_loop """owa""" +1012 15 negative_sampler """basic""" +1012 15 evaluator """rankbased""" +1012 16 dataset """kinships""" +1012 16 model """unstructuredmodel""" +1012 16 loss """bceaftersigmoid""" +1012 16 regularizer """no""" +1012 16 optimizer """adadelta""" +1012 16 training_loop """owa""" +1012 16 negative_sampler """basic""" +1012 16 evaluator """rankbased""" +1012 17 dataset """kinships""" +1012 17 model """unstructuredmodel""" +1012 17 loss """bceaftersigmoid""" +1012 17 regularizer """no""" +1012 17 optimizer """adadelta""" +1012 17 training_loop """owa""" +1012 17 negative_sampler """basic""" +1012 17 evaluator """rankbased""" +1012 18 dataset """kinships""" +1012 18 model """unstructuredmodel""" +1012 18 loss """bceaftersigmoid""" +1012 18 regularizer """no""" +1012 18 optimizer """adadelta""" +1012 18 training_loop """owa""" +1012 18 negative_sampler """basic""" +1012 18 evaluator """rankbased""" +1012 19 dataset """kinships""" +1012 19 model """unstructuredmodel""" +1012 19 loss """bceaftersigmoid""" +1012 19 regularizer """no""" +1012 19 optimizer """adadelta""" +1012 19 training_loop """owa""" +1012 19 negative_sampler """basic""" +1012 19 evaluator """rankbased""" +1012 20 dataset """kinships""" +1012 20 model """unstructuredmodel""" +1012 20 loss """bceaftersigmoid""" +1012 20 regularizer """no""" +1012 20 optimizer """adadelta""" +1012 20 training_loop """owa""" +1012 20 negative_sampler """basic""" +1012 20 evaluator """rankbased""" +1012 21 dataset """kinships""" +1012 21 model """unstructuredmodel""" +1012 21 loss """bceaftersigmoid""" +1012 21 regularizer """no""" +1012 21 optimizer """adadelta""" +1012 21 training_loop """owa""" +1012 21 negative_sampler """basic""" +1012 21 evaluator """rankbased""" +1012 22 dataset """kinships""" +1012 22 model """unstructuredmodel""" +1012 22 loss """bceaftersigmoid""" +1012 22 regularizer """no""" +1012 22 optimizer """adadelta""" +1012 22 training_loop """owa""" +1012 22 negative_sampler """basic""" +1012 22 evaluator """rankbased""" +1012 23 dataset """kinships""" +1012 23 model """unstructuredmodel""" +1012 23 loss """bceaftersigmoid""" +1012 23 regularizer """no""" +1012 23 optimizer """adadelta""" +1012 23 training_loop """owa""" +1012 23 negative_sampler """basic""" +1012 23 evaluator """rankbased""" +1012 24 dataset """kinships""" +1012 24 model """unstructuredmodel""" +1012 24 loss """bceaftersigmoid""" +1012 24 regularizer """no""" +1012 24 optimizer """adadelta""" +1012 24 training_loop """owa""" +1012 24 negative_sampler """basic""" +1012 24 evaluator """rankbased""" +1012 25 dataset """kinships""" +1012 25 model """unstructuredmodel""" +1012 25 loss """bceaftersigmoid""" +1012 25 regularizer """no""" +1012 25 optimizer """adadelta""" +1012 25 training_loop """owa""" +1012 25 negative_sampler """basic""" +1012 25 evaluator """rankbased""" +1012 26 dataset """kinships""" +1012 26 model """unstructuredmodel""" +1012 26 loss """bceaftersigmoid""" +1012 26 regularizer """no""" +1012 26 optimizer """adadelta""" +1012 26 training_loop """owa""" +1012 26 negative_sampler """basic""" +1012 26 evaluator """rankbased""" +1012 27 dataset """kinships""" +1012 27 model """unstructuredmodel""" +1012 27 loss """bceaftersigmoid""" +1012 27 regularizer """no""" +1012 27 optimizer """adadelta""" +1012 27 training_loop """owa""" +1012 27 negative_sampler """basic""" +1012 27 evaluator """rankbased""" +1012 28 dataset """kinships""" +1012 28 model """unstructuredmodel""" +1012 28 loss """bceaftersigmoid""" +1012 28 regularizer """no""" +1012 28 optimizer """adadelta""" +1012 28 training_loop """owa""" +1012 28 negative_sampler """basic""" +1012 28 evaluator """rankbased""" +1012 29 dataset """kinships""" +1012 29 model """unstructuredmodel""" +1012 29 loss """bceaftersigmoid""" +1012 29 regularizer """no""" +1012 29 optimizer """adadelta""" +1012 29 training_loop """owa""" +1012 29 negative_sampler """basic""" +1012 29 evaluator """rankbased""" +1012 30 dataset """kinships""" +1012 30 model """unstructuredmodel""" +1012 30 loss """bceaftersigmoid""" +1012 30 regularizer """no""" +1012 30 optimizer """adadelta""" +1012 30 training_loop """owa""" +1012 30 negative_sampler """basic""" +1012 30 evaluator """rankbased""" +1012 31 dataset """kinships""" +1012 31 model """unstructuredmodel""" +1012 31 loss """bceaftersigmoid""" +1012 31 regularizer """no""" +1012 31 optimizer """adadelta""" +1012 31 training_loop """owa""" +1012 31 negative_sampler """basic""" +1012 31 evaluator """rankbased""" +1012 32 dataset """kinships""" +1012 32 model """unstructuredmodel""" +1012 32 loss """bceaftersigmoid""" +1012 32 regularizer """no""" +1012 32 optimizer """adadelta""" +1012 32 training_loop """owa""" +1012 32 negative_sampler """basic""" +1012 32 evaluator """rankbased""" +1012 33 dataset """kinships""" +1012 33 model """unstructuredmodel""" +1012 33 loss """bceaftersigmoid""" +1012 33 regularizer """no""" +1012 33 optimizer """adadelta""" +1012 33 training_loop """owa""" +1012 33 negative_sampler """basic""" +1012 33 evaluator """rankbased""" +1012 34 dataset """kinships""" +1012 34 model """unstructuredmodel""" +1012 34 loss """bceaftersigmoid""" +1012 34 regularizer """no""" +1012 34 optimizer """adadelta""" +1012 34 training_loop """owa""" +1012 34 negative_sampler """basic""" +1012 34 evaluator """rankbased""" +1012 35 dataset """kinships""" +1012 35 model """unstructuredmodel""" +1012 35 loss """bceaftersigmoid""" +1012 35 regularizer """no""" +1012 35 optimizer """adadelta""" +1012 35 training_loop """owa""" +1012 35 negative_sampler """basic""" +1012 35 evaluator """rankbased""" +1012 36 dataset """kinships""" +1012 36 model """unstructuredmodel""" +1012 36 loss """bceaftersigmoid""" +1012 36 regularizer """no""" +1012 36 optimizer """adadelta""" +1012 36 training_loop """owa""" +1012 36 negative_sampler """basic""" +1012 36 evaluator """rankbased""" +1012 37 dataset """kinships""" +1012 37 model """unstructuredmodel""" +1012 37 loss """bceaftersigmoid""" +1012 37 regularizer """no""" +1012 37 optimizer """adadelta""" +1012 37 training_loop """owa""" +1012 37 negative_sampler """basic""" +1012 37 evaluator """rankbased""" +1012 38 dataset """kinships""" +1012 38 model """unstructuredmodel""" +1012 38 loss """bceaftersigmoid""" +1012 38 regularizer """no""" +1012 38 optimizer """adadelta""" +1012 38 training_loop """owa""" +1012 38 negative_sampler """basic""" +1012 38 evaluator """rankbased""" +1012 39 dataset """kinships""" +1012 39 model """unstructuredmodel""" +1012 39 loss """bceaftersigmoid""" +1012 39 regularizer """no""" +1012 39 optimizer """adadelta""" +1012 39 training_loop """owa""" +1012 39 negative_sampler """basic""" +1012 39 evaluator """rankbased""" +1012 40 dataset """kinships""" +1012 40 model """unstructuredmodel""" +1012 40 loss """bceaftersigmoid""" +1012 40 regularizer """no""" +1012 40 optimizer """adadelta""" +1012 40 training_loop """owa""" +1012 40 negative_sampler """basic""" +1012 40 evaluator """rankbased""" +1012 41 dataset """kinships""" +1012 41 model """unstructuredmodel""" +1012 41 loss """bceaftersigmoid""" +1012 41 regularizer """no""" +1012 41 optimizer """adadelta""" +1012 41 training_loop """owa""" +1012 41 negative_sampler """basic""" +1012 41 evaluator """rankbased""" +1012 42 dataset """kinships""" +1012 42 model """unstructuredmodel""" +1012 42 loss """bceaftersigmoid""" +1012 42 regularizer """no""" +1012 42 optimizer """adadelta""" +1012 42 training_loop """owa""" +1012 42 negative_sampler """basic""" +1012 42 evaluator """rankbased""" +1012 43 dataset """kinships""" +1012 43 model """unstructuredmodel""" +1012 43 loss """bceaftersigmoid""" +1012 43 regularizer """no""" +1012 43 optimizer """adadelta""" +1012 43 training_loop """owa""" +1012 43 negative_sampler """basic""" +1012 43 evaluator """rankbased""" +1012 44 dataset """kinships""" +1012 44 model """unstructuredmodel""" +1012 44 loss """bceaftersigmoid""" +1012 44 regularizer """no""" +1012 44 optimizer """adadelta""" +1012 44 training_loop """owa""" +1012 44 negative_sampler """basic""" +1012 44 evaluator """rankbased""" +1012 45 dataset """kinships""" +1012 45 model """unstructuredmodel""" +1012 45 loss """bceaftersigmoid""" +1012 45 regularizer """no""" +1012 45 optimizer """adadelta""" +1012 45 training_loop """owa""" +1012 45 negative_sampler """basic""" +1012 45 evaluator """rankbased""" +1012 46 dataset """kinships""" +1012 46 model """unstructuredmodel""" +1012 46 loss """bceaftersigmoid""" +1012 46 regularizer """no""" +1012 46 optimizer """adadelta""" +1012 46 training_loop """owa""" +1012 46 negative_sampler """basic""" +1012 46 evaluator """rankbased""" +1012 47 dataset """kinships""" +1012 47 model """unstructuredmodel""" +1012 47 loss """bceaftersigmoid""" +1012 47 regularizer """no""" +1012 47 optimizer """adadelta""" +1012 47 training_loop """owa""" +1012 47 negative_sampler """basic""" +1012 47 evaluator """rankbased""" +1012 48 dataset """kinships""" +1012 48 model """unstructuredmodel""" +1012 48 loss """bceaftersigmoid""" +1012 48 regularizer """no""" +1012 48 optimizer """adadelta""" +1012 48 training_loop """owa""" +1012 48 negative_sampler """basic""" +1012 48 evaluator """rankbased""" +1012 49 dataset """kinships""" +1012 49 model """unstructuredmodel""" +1012 49 loss """bceaftersigmoid""" +1012 49 regularizer """no""" +1012 49 optimizer """adadelta""" +1012 49 training_loop """owa""" +1012 49 negative_sampler """basic""" +1012 49 evaluator """rankbased""" +1012 50 dataset """kinships""" +1012 50 model """unstructuredmodel""" +1012 50 loss """bceaftersigmoid""" +1012 50 regularizer """no""" +1012 50 optimizer """adadelta""" +1012 50 training_loop """owa""" +1012 50 negative_sampler """basic""" +1012 50 evaluator """rankbased""" +1012 51 dataset """kinships""" +1012 51 model """unstructuredmodel""" +1012 51 loss """bceaftersigmoid""" +1012 51 regularizer """no""" +1012 51 optimizer """adadelta""" +1012 51 training_loop """owa""" +1012 51 negative_sampler """basic""" +1012 51 evaluator """rankbased""" +1012 52 dataset """kinships""" +1012 52 model """unstructuredmodel""" +1012 52 loss """bceaftersigmoid""" +1012 52 regularizer """no""" +1012 52 optimizer """adadelta""" +1012 52 training_loop """owa""" +1012 52 negative_sampler """basic""" +1012 52 evaluator """rankbased""" +1012 53 dataset """kinships""" +1012 53 model """unstructuredmodel""" +1012 53 loss """bceaftersigmoid""" +1012 53 regularizer """no""" +1012 53 optimizer """adadelta""" +1012 53 training_loop """owa""" +1012 53 negative_sampler """basic""" +1012 53 evaluator """rankbased""" +1012 54 dataset """kinships""" +1012 54 model """unstructuredmodel""" +1012 54 loss """bceaftersigmoid""" +1012 54 regularizer """no""" +1012 54 optimizer """adadelta""" +1012 54 training_loop """owa""" +1012 54 negative_sampler """basic""" +1012 54 evaluator """rankbased""" +1012 55 dataset """kinships""" +1012 55 model """unstructuredmodel""" +1012 55 loss """bceaftersigmoid""" +1012 55 regularizer """no""" +1012 55 optimizer """adadelta""" +1012 55 training_loop """owa""" +1012 55 negative_sampler """basic""" +1012 55 evaluator """rankbased""" +1012 56 dataset """kinships""" +1012 56 model """unstructuredmodel""" +1012 56 loss """bceaftersigmoid""" +1012 56 regularizer """no""" +1012 56 optimizer """adadelta""" +1012 56 training_loop """owa""" +1012 56 negative_sampler """basic""" +1012 56 evaluator """rankbased""" +1012 57 dataset """kinships""" +1012 57 model """unstructuredmodel""" +1012 57 loss """bceaftersigmoid""" +1012 57 regularizer """no""" +1012 57 optimizer """adadelta""" +1012 57 training_loop """owa""" +1012 57 negative_sampler """basic""" +1012 57 evaluator """rankbased""" +1012 58 dataset """kinships""" +1012 58 model """unstructuredmodel""" +1012 58 loss """bceaftersigmoid""" +1012 58 regularizer """no""" +1012 58 optimizer """adadelta""" +1012 58 training_loop """owa""" +1012 58 negative_sampler """basic""" +1012 58 evaluator """rankbased""" +1012 59 dataset """kinships""" +1012 59 model """unstructuredmodel""" +1012 59 loss """bceaftersigmoid""" +1012 59 regularizer """no""" +1012 59 optimizer """adadelta""" +1012 59 training_loop """owa""" +1012 59 negative_sampler """basic""" +1012 59 evaluator """rankbased""" +1012 60 dataset """kinships""" +1012 60 model """unstructuredmodel""" +1012 60 loss """bceaftersigmoid""" +1012 60 regularizer """no""" +1012 60 optimizer """adadelta""" +1012 60 training_loop """owa""" +1012 60 negative_sampler """basic""" +1012 60 evaluator """rankbased""" +1012 61 dataset """kinships""" +1012 61 model """unstructuredmodel""" +1012 61 loss """bceaftersigmoid""" +1012 61 regularizer """no""" +1012 61 optimizer """adadelta""" +1012 61 training_loop """owa""" +1012 61 negative_sampler """basic""" +1012 61 evaluator """rankbased""" +1012 62 dataset """kinships""" +1012 62 model """unstructuredmodel""" +1012 62 loss """bceaftersigmoid""" +1012 62 regularizer """no""" +1012 62 optimizer """adadelta""" +1012 62 training_loop """owa""" +1012 62 negative_sampler """basic""" +1012 62 evaluator """rankbased""" +1012 63 dataset """kinships""" +1012 63 model """unstructuredmodel""" +1012 63 loss """bceaftersigmoid""" +1012 63 regularizer """no""" +1012 63 optimizer """adadelta""" +1012 63 training_loop """owa""" +1012 63 negative_sampler """basic""" +1012 63 evaluator """rankbased""" +1012 64 dataset """kinships""" +1012 64 model """unstructuredmodel""" +1012 64 loss """bceaftersigmoid""" +1012 64 regularizer """no""" +1012 64 optimizer """adadelta""" +1012 64 training_loop """owa""" +1012 64 negative_sampler """basic""" +1012 64 evaluator """rankbased""" +1012 65 dataset """kinships""" +1012 65 model """unstructuredmodel""" +1012 65 loss """bceaftersigmoid""" +1012 65 regularizer """no""" +1012 65 optimizer """adadelta""" +1012 65 training_loop """owa""" +1012 65 negative_sampler """basic""" +1012 65 evaluator """rankbased""" +1012 66 dataset """kinships""" +1012 66 model """unstructuredmodel""" +1012 66 loss """bceaftersigmoid""" +1012 66 regularizer """no""" +1012 66 optimizer """adadelta""" +1012 66 training_loop """owa""" +1012 66 negative_sampler """basic""" +1012 66 evaluator """rankbased""" +1012 67 dataset """kinships""" +1012 67 model """unstructuredmodel""" +1012 67 loss """bceaftersigmoid""" +1012 67 regularizer """no""" +1012 67 optimizer """adadelta""" +1012 67 training_loop """owa""" +1012 67 negative_sampler """basic""" +1012 67 evaluator """rankbased""" +1012 68 dataset """kinships""" +1012 68 model """unstructuredmodel""" +1012 68 loss """bceaftersigmoid""" +1012 68 regularizer """no""" +1012 68 optimizer """adadelta""" +1012 68 training_loop """owa""" +1012 68 negative_sampler """basic""" +1012 68 evaluator """rankbased""" +1012 69 dataset """kinships""" +1012 69 model """unstructuredmodel""" +1012 69 loss """bceaftersigmoid""" +1012 69 regularizer """no""" +1012 69 optimizer """adadelta""" +1012 69 training_loop """owa""" +1012 69 negative_sampler """basic""" +1012 69 evaluator """rankbased""" +1012 70 dataset """kinships""" +1012 70 model """unstructuredmodel""" +1012 70 loss """bceaftersigmoid""" +1012 70 regularizer """no""" +1012 70 optimizer """adadelta""" +1012 70 training_loop """owa""" +1012 70 negative_sampler """basic""" +1012 70 evaluator """rankbased""" +1012 71 dataset """kinships""" +1012 71 model """unstructuredmodel""" +1012 71 loss """bceaftersigmoid""" +1012 71 regularizer """no""" +1012 71 optimizer """adadelta""" +1012 71 training_loop """owa""" +1012 71 negative_sampler """basic""" +1012 71 evaluator """rankbased""" +1012 72 dataset """kinships""" +1012 72 model """unstructuredmodel""" +1012 72 loss """bceaftersigmoid""" +1012 72 regularizer """no""" +1012 72 optimizer """adadelta""" +1012 72 training_loop """owa""" +1012 72 negative_sampler """basic""" +1012 72 evaluator """rankbased""" +1012 73 dataset """kinships""" +1012 73 model """unstructuredmodel""" +1012 73 loss """bceaftersigmoid""" +1012 73 regularizer """no""" +1012 73 optimizer """adadelta""" +1012 73 training_loop """owa""" +1012 73 negative_sampler """basic""" +1012 73 evaluator """rankbased""" +1012 74 dataset """kinships""" +1012 74 model """unstructuredmodel""" +1012 74 loss """bceaftersigmoid""" +1012 74 regularizer """no""" +1012 74 optimizer """adadelta""" +1012 74 training_loop """owa""" +1012 74 negative_sampler """basic""" +1012 74 evaluator """rankbased""" +1012 75 dataset """kinships""" +1012 75 model """unstructuredmodel""" +1012 75 loss """bceaftersigmoid""" +1012 75 regularizer """no""" +1012 75 optimizer """adadelta""" +1012 75 training_loop """owa""" +1012 75 negative_sampler """basic""" +1012 75 evaluator """rankbased""" +1012 76 dataset """kinships""" +1012 76 model """unstructuredmodel""" +1012 76 loss """bceaftersigmoid""" +1012 76 regularizer """no""" +1012 76 optimizer """adadelta""" +1012 76 training_loop """owa""" +1012 76 negative_sampler """basic""" +1012 76 evaluator """rankbased""" +1012 77 dataset """kinships""" +1012 77 model """unstructuredmodel""" +1012 77 loss """bceaftersigmoid""" +1012 77 regularizer """no""" +1012 77 optimizer """adadelta""" +1012 77 training_loop """owa""" +1012 77 negative_sampler """basic""" +1012 77 evaluator """rankbased""" +1012 78 dataset """kinships""" +1012 78 model """unstructuredmodel""" +1012 78 loss """bceaftersigmoid""" +1012 78 regularizer """no""" +1012 78 optimizer """adadelta""" +1012 78 training_loop """owa""" +1012 78 negative_sampler """basic""" +1012 78 evaluator """rankbased""" +1012 79 dataset """kinships""" +1012 79 model """unstructuredmodel""" +1012 79 loss """bceaftersigmoid""" +1012 79 regularizer """no""" +1012 79 optimizer """adadelta""" +1012 79 training_loop """owa""" +1012 79 negative_sampler """basic""" +1012 79 evaluator """rankbased""" +1012 80 dataset """kinships""" +1012 80 model """unstructuredmodel""" +1012 80 loss """bceaftersigmoid""" +1012 80 regularizer """no""" +1012 80 optimizer """adadelta""" +1012 80 training_loop """owa""" +1012 80 negative_sampler """basic""" +1012 80 evaluator """rankbased""" +1012 81 dataset """kinships""" +1012 81 model """unstructuredmodel""" +1012 81 loss """bceaftersigmoid""" +1012 81 regularizer """no""" +1012 81 optimizer """adadelta""" +1012 81 training_loop """owa""" +1012 81 negative_sampler """basic""" +1012 81 evaluator """rankbased""" +1012 82 dataset """kinships""" +1012 82 model """unstructuredmodel""" +1012 82 loss """bceaftersigmoid""" +1012 82 regularizer """no""" +1012 82 optimizer """adadelta""" +1012 82 training_loop """owa""" +1012 82 negative_sampler """basic""" +1012 82 evaluator """rankbased""" +1012 83 dataset """kinships""" +1012 83 model """unstructuredmodel""" +1012 83 loss """bceaftersigmoid""" +1012 83 regularizer """no""" +1012 83 optimizer """adadelta""" +1012 83 training_loop """owa""" +1012 83 negative_sampler """basic""" +1012 83 evaluator """rankbased""" +1012 84 dataset """kinships""" +1012 84 model """unstructuredmodel""" +1012 84 loss """bceaftersigmoid""" +1012 84 regularizer """no""" +1012 84 optimizer """adadelta""" +1012 84 training_loop """owa""" +1012 84 negative_sampler """basic""" +1012 84 evaluator """rankbased""" +1012 85 dataset """kinships""" +1012 85 model """unstructuredmodel""" +1012 85 loss """bceaftersigmoid""" +1012 85 regularizer """no""" +1012 85 optimizer """adadelta""" +1012 85 training_loop """owa""" +1012 85 negative_sampler """basic""" +1012 85 evaluator """rankbased""" +1012 86 dataset """kinships""" +1012 86 model """unstructuredmodel""" +1012 86 loss """bceaftersigmoid""" +1012 86 regularizer """no""" +1012 86 optimizer """adadelta""" +1012 86 training_loop """owa""" +1012 86 negative_sampler """basic""" +1012 86 evaluator """rankbased""" +1012 87 dataset """kinships""" +1012 87 model """unstructuredmodel""" +1012 87 loss """bceaftersigmoid""" +1012 87 regularizer """no""" +1012 87 optimizer """adadelta""" +1012 87 training_loop """owa""" +1012 87 negative_sampler """basic""" +1012 87 evaluator """rankbased""" +1012 88 dataset """kinships""" +1012 88 model """unstructuredmodel""" +1012 88 loss """bceaftersigmoid""" +1012 88 regularizer """no""" +1012 88 optimizer """adadelta""" +1012 88 training_loop """owa""" +1012 88 negative_sampler """basic""" +1012 88 evaluator """rankbased""" +1012 89 dataset """kinships""" +1012 89 model """unstructuredmodel""" +1012 89 loss """bceaftersigmoid""" +1012 89 regularizer """no""" +1012 89 optimizer """adadelta""" +1012 89 training_loop """owa""" +1012 89 negative_sampler """basic""" +1012 89 evaluator """rankbased""" +1012 90 dataset """kinships""" +1012 90 model """unstructuredmodel""" +1012 90 loss """bceaftersigmoid""" +1012 90 regularizer """no""" +1012 90 optimizer """adadelta""" +1012 90 training_loop """owa""" +1012 90 negative_sampler """basic""" +1012 90 evaluator """rankbased""" +1012 91 dataset """kinships""" +1012 91 model """unstructuredmodel""" +1012 91 loss """bceaftersigmoid""" +1012 91 regularizer """no""" +1012 91 optimizer """adadelta""" +1012 91 training_loop """owa""" +1012 91 negative_sampler """basic""" +1012 91 evaluator """rankbased""" +1012 92 dataset """kinships""" +1012 92 model """unstructuredmodel""" +1012 92 loss """bceaftersigmoid""" +1012 92 regularizer """no""" +1012 92 optimizer """adadelta""" +1012 92 training_loop """owa""" +1012 92 negative_sampler """basic""" +1012 92 evaluator """rankbased""" +1012 93 dataset """kinships""" +1012 93 model """unstructuredmodel""" +1012 93 loss """bceaftersigmoid""" +1012 93 regularizer """no""" +1012 93 optimizer """adadelta""" +1012 93 training_loop """owa""" +1012 93 negative_sampler """basic""" +1012 93 evaluator """rankbased""" +1012 94 dataset """kinships""" +1012 94 model """unstructuredmodel""" +1012 94 loss """bceaftersigmoid""" +1012 94 regularizer """no""" +1012 94 optimizer """adadelta""" +1012 94 training_loop """owa""" +1012 94 negative_sampler """basic""" +1012 94 evaluator """rankbased""" +1012 95 dataset """kinships""" +1012 95 model """unstructuredmodel""" +1012 95 loss """bceaftersigmoid""" +1012 95 regularizer """no""" +1012 95 optimizer """adadelta""" +1012 95 training_loop """owa""" +1012 95 negative_sampler """basic""" +1012 95 evaluator """rankbased""" +1012 96 dataset """kinships""" +1012 96 model """unstructuredmodel""" +1012 96 loss """bceaftersigmoid""" +1012 96 regularizer """no""" +1012 96 optimizer """adadelta""" +1012 96 training_loop """owa""" +1012 96 negative_sampler """basic""" +1012 96 evaluator """rankbased""" +1012 97 dataset """kinships""" +1012 97 model """unstructuredmodel""" +1012 97 loss """bceaftersigmoid""" +1012 97 regularizer """no""" +1012 97 optimizer """adadelta""" +1012 97 training_loop """owa""" +1012 97 negative_sampler """basic""" +1012 97 evaluator """rankbased""" +1012 98 dataset """kinships""" +1012 98 model """unstructuredmodel""" +1012 98 loss """bceaftersigmoid""" +1012 98 regularizer """no""" +1012 98 optimizer """adadelta""" +1012 98 training_loop """owa""" +1012 98 negative_sampler """basic""" +1012 98 evaluator """rankbased""" +1012 99 dataset """kinships""" +1012 99 model """unstructuredmodel""" +1012 99 loss """bceaftersigmoid""" +1012 99 regularizer """no""" +1012 99 optimizer """adadelta""" +1012 99 training_loop """owa""" +1012 99 negative_sampler """basic""" +1012 99 evaluator """rankbased""" +1012 100 dataset """kinships""" +1012 100 model """unstructuredmodel""" +1012 100 loss """bceaftersigmoid""" +1012 100 regularizer """no""" +1012 100 optimizer """adadelta""" +1012 100 training_loop """owa""" +1012 100 negative_sampler """basic""" +1012 100 evaluator """rankbased""" +1013 1 model.embedding_dim 2.0 +1013 1 model.scoring_fct_norm 2.0 +1013 1 negative_sampler.num_negs_per_pos 28.0 +1013 1 training.batch_size 0.0 +1013 2 model.embedding_dim 2.0 +1013 2 model.scoring_fct_norm 2.0 +1013 2 negative_sampler.num_negs_per_pos 38.0 +1013 2 training.batch_size 1.0 +1013 3 model.embedding_dim 1.0 +1013 3 model.scoring_fct_norm 2.0 +1013 3 negative_sampler.num_negs_per_pos 49.0 +1013 3 training.batch_size 0.0 +1013 4 model.embedding_dim 2.0 +1013 4 model.scoring_fct_norm 2.0 +1013 4 negative_sampler.num_negs_per_pos 6.0 +1013 4 training.batch_size 0.0 +1013 5 model.embedding_dim 0.0 +1013 5 model.scoring_fct_norm 2.0 +1013 5 negative_sampler.num_negs_per_pos 36.0 +1013 5 training.batch_size 0.0 +1013 6 model.embedding_dim 1.0 +1013 6 model.scoring_fct_norm 1.0 +1013 6 negative_sampler.num_negs_per_pos 75.0 +1013 6 training.batch_size 2.0 +1013 7 model.embedding_dim 1.0 +1013 7 model.scoring_fct_norm 2.0 +1013 7 negative_sampler.num_negs_per_pos 9.0 +1013 7 training.batch_size 0.0 +1013 8 model.embedding_dim 1.0 +1013 8 model.scoring_fct_norm 2.0 +1013 8 negative_sampler.num_negs_per_pos 24.0 +1013 8 training.batch_size 0.0 +1013 9 model.embedding_dim 2.0 +1013 9 model.scoring_fct_norm 1.0 +1013 9 negative_sampler.num_negs_per_pos 98.0 +1013 9 training.batch_size 1.0 +1013 10 model.embedding_dim 1.0 +1013 10 model.scoring_fct_norm 1.0 +1013 10 negative_sampler.num_negs_per_pos 55.0 +1013 10 training.batch_size 0.0 +1013 11 model.embedding_dim 2.0 +1013 11 model.scoring_fct_norm 1.0 +1013 11 negative_sampler.num_negs_per_pos 61.0 +1013 11 training.batch_size 0.0 +1013 12 model.embedding_dim 1.0 +1013 12 model.scoring_fct_norm 2.0 +1013 12 negative_sampler.num_negs_per_pos 26.0 +1013 12 training.batch_size 1.0 +1013 13 model.embedding_dim 0.0 +1013 13 model.scoring_fct_norm 1.0 +1013 13 negative_sampler.num_negs_per_pos 96.0 +1013 13 training.batch_size 2.0 +1013 14 model.embedding_dim 1.0 +1013 14 model.scoring_fct_norm 2.0 +1013 14 negative_sampler.num_negs_per_pos 81.0 +1013 14 training.batch_size 2.0 +1013 15 model.embedding_dim 0.0 +1013 15 model.scoring_fct_norm 2.0 +1013 15 negative_sampler.num_negs_per_pos 47.0 +1013 15 training.batch_size 1.0 +1013 16 model.embedding_dim 1.0 +1013 16 model.scoring_fct_norm 2.0 +1013 16 negative_sampler.num_negs_per_pos 41.0 +1013 16 training.batch_size 1.0 +1013 17 model.embedding_dim 2.0 +1013 17 model.scoring_fct_norm 2.0 +1013 17 negative_sampler.num_negs_per_pos 30.0 +1013 17 training.batch_size 1.0 +1013 18 model.embedding_dim 2.0 +1013 18 model.scoring_fct_norm 2.0 +1013 18 negative_sampler.num_negs_per_pos 46.0 +1013 18 training.batch_size 1.0 +1013 19 model.embedding_dim 2.0 +1013 19 model.scoring_fct_norm 2.0 +1013 19 negative_sampler.num_negs_per_pos 1.0 +1013 19 training.batch_size 2.0 +1013 20 model.embedding_dim 2.0 +1013 20 model.scoring_fct_norm 2.0 +1013 20 negative_sampler.num_negs_per_pos 19.0 +1013 20 training.batch_size 1.0 +1013 21 model.embedding_dim 2.0 +1013 21 model.scoring_fct_norm 1.0 +1013 21 negative_sampler.num_negs_per_pos 24.0 +1013 21 training.batch_size 2.0 +1013 22 model.embedding_dim 2.0 +1013 22 model.scoring_fct_norm 1.0 +1013 22 negative_sampler.num_negs_per_pos 69.0 +1013 22 training.batch_size 1.0 +1013 23 model.embedding_dim 0.0 +1013 23 model.scoring_fct_norm 1.0 +1013 23 negative_sampler.num_negs_per_pos 35.0 +1013 23 training.batch_size 1.0 +1013 24 model.embedding_dim 1.0 +1013 24 model.scoring_fct_norm 1.0 +1013 24 negative_sampler.num_negs_per_pos 31.0 +1013 24 training.batch_size 2.0 +1013 25 model.embedding_dim 0.0 +1013 25 model.scoring_fct_norm 2.0 +1013 25 negative_sampler.num_negs_per_pos 44.0 +1013 25 training.batch_size 0.0 +1013 26 model.embedding_dim 1.0 +1013 26 model.scoring_fct_norm 1.0 +1013 26 negative_sampler.num_negs_per_pos 25.0 +1013 26 training.batch_size 1.0 +1013 27 model.embedding_dim 0.0 +1013 27 model.scoring_fct_norm 1.0 +1013 27 negative_sampler.num_negs_per_pos 80.0 +1013 27 training.batch_size 0.0 +1013 28 model.embedding_dim 2.0 +1013 28 model.scoring_fct_norm 2.0 +1013 28 negative_sampler.num_negs_per_pos 65.0 +1013 28 training.batch_size 0.0 +1013 29 model.embedding_dim 2.0 +1013 29 model.scoring_fct_norm 1.0 +1013 29 negative_sampler.num_negs_per_pos 45.0 +1013 29 training.batch_size 2.0 +1013 30 model.embedding_dim 1.0 +1013 30 model.scoring_fct_norm 2.0 +1013 30 negative_sampler.num_negs_per_pos 60.0 +1013 30 training.batch_size 0.0 +1013 31 model.embedding_dim 2.0 +1013 31 model.scoring_fct_norm 1.0 +1013 31 negative_sampler.num_negs_per_pos 38.0 +1013 31 training.batch_size 1.0 +1013 32 model.embedding_dim 0.0 +1013 32 model.scoring_fct_norm 2.0 +1013 32 negative_sampler.num_negs_per_pos 75.0 +1013 32 training.batch_size 1.0 +1013 33 model.embedding_dim 0.0 +1013 33 model.scoring_fct_norm 2.0 +1013 33 negative_sampler.num_negs_per_pos 44.0 +1013 33 training.batch_size 1.0 +1013 34 model.embedding_dim 2.0 +1013 34 model.scoring_fct_norm 2.0 +1013 34 negative_sampler.num_negs_per_pos 44.0 +1013 34 training.batch_size 0.0 +1013 35 model.embedding_dim 0.0 +1013 35 model.scoring_fct_norm 1.0 +1013 35 negative_sampler.num_negs_per_pos 5.0 +1013 35 training.batch_size 2.0 +1013 36 model.embedding_dim 2.0 +1013 36 model.scoring_fct_norm 1.0 +1013 36 negative_sampler.num_negs_per_pos 55.0 +1013 36 training.batch_size 1.0 +1013 37 model.embedding_dim 2.0 +1013 37 model.scoring_fct_norm 2.0 +1013 37 negative_sampler.num_negs_per_pos 64.0 +1013 37 training.batch_size 2.0 +1013 38 model.embedding_dim 0.0 +1013 38 model.scoring_fct_norm 1.0 +1013 38 negative_sampler.num_negs_per_pos 60.0 +1013 38 training.batch_size 2.0 +1013 39 model.embedding_dim 0.0 +1013 39 model.scoring_fct_norm 1.0 +1013 39 negative_sampler.num_negs_per_pos 22.0 +1013 39 training.batch_size 1.0 +1013 40 model.embedding_dim 2.0 +1013 40 model.scoring_fct_norm 1.0 +1013 40 negative_sampler.num_negs_per_pos 23.0 +1013 40 training.batch_size 2.0 +1013 41 model.embedding_dim 2.0 +1013 41 model.scoring_fct_norm 1.0 +1013 41 negative_sampler.num_negs_per_pos 75.0 +1013 41 training.batch_size 0.0 +1013 42 model.embedding_dim 0.0 +1013 42 model.scoring_fct_norm 2.0 +1013 42 negative_sampler.num_negs_per_pos 63.0 +1013 42 training.batch_size 2.0 +1013 43 model.embedding_dim 1.0 +1013 43 model.scoring_fct_norm 2.0 +1013 43 negative_sampler.num_negs_per_pos 35.0 +1013 43 training.batch_size 2.0 +1013 44 model.embedding_dim 0.0 +1013 44 model.scoring_fct_norm 2.0 +1013 44 negative_sampler.num_negs_per_pos 86.0 +1013 44 training.batch_size 1.0 +1013 45 model.embedding_dim 2.0 +1013 45 model.scoring_fct_norm 2.0 +1013 45 negative_sampler.num_negs_per_pos 78.0 +1013 45 training.batch_size 1.0 +1013 46 model.embedding_dim 0.0 +1013 46 model.scoring_fct_norm 2.0 +1013 46 negative_sampler.num_negs_per_pos 12.0 +1013 46 training.batch_size 1.0 +1013 47 model.embedding_dim 0.0 +1013 47 model.scoring_fct_norm 1.0 +1013 47 negative_sampler.num_negs_per_pos 94.0 +1013 47 training.batch_size 2.0 +1013 48 model.embedding_dim 0.0 +1013 48 model.scoring_fct_norm 2.0 +1013 48 negative_sampler.num_negs_per_pos 21.0 +1013 48 training.batch_size 2.0 +1013 49 model.embedding_dim 1.0 +1013 49 model.scoring_fct_norm 1.0 +1013 49 negative_sampler.num_negs_per_pos 21.0 +1013 49 training.batch_size 2.0 +1013 50 model.embedding_dim 1.0 +1013 50 model.scoring_fct_norm 1.0 +1013 50 negative_sampler.num_negs_per_pos 81.0 +1013 50 training.batch_size 1.0 +1013 51 model.embedding_dim 2.0 +1013 51 model.scoring_fct_norm 1.0 +1013 51 negative_sampler.num_negs_per_pos 7.0 +1013 51 training.batch_size 1.0 +1013 52 model.embedding_dim 1.0 +1013 52 model.scoring_fct_norm 2.0 +1013 52 negative_sampler.num_negs_per_pos 8.0 +1013 52 training.batch_size 2.0 +1013 53 model.embedding_dim 2.0 +1013 53 model.scoring_fct_norm 2.0 +1013 53 negative_sampler.num_negs_per_pos 70.0 +1013 53 training.batch_size 0.0 +1013 54 model.embedding_dim 2.0 +1013 54 model.scoring_fct_norm 1.0 +1013 54 negative_sampler.num_negs_per_pos 76.0 +1013 54 training.batch_size 0.0 +1013 55 model.embedding_dim 2.0 +1013 55 model.scoring_fct_norm 1.0 +1013 55 negative_sampler.num_negs_per_pos 61.0 +1013 55 training.batch_size 1.0 +1013 56 model.embedding_dim 1.0 +1013 56 model.scoring_fct_norm 2.0 +1013 56 negative_sampler.num_negs_per_pos 57.0 +1013 56 training.batch_size 2.0 +1013 57 model.embedding_dim 2.0 +1013 57 model.scoring_fct_norm 1.0 +1013 57 negative_sampler.num_negs_per_pos 17.0 +1013 57 training.batch_size 2.0 +1013 58 model.embedding_dim 1.0 +1013 58 model.scoring_fct_norm 1.0 +1013 58 negative_sampler.num_negs_per_pos 33.0 +1013 58 training.batch_size 2.0 +1013 59 model.embedding_dim 0.0 +1013 59 model.scoring_fct_norm 1.0 +1013 59 negative_sampler.num_negs_per_pos 62.0 +1013 59 training.batch_size 1.0 +1013 60 model.embedding_dim 0.0 +1013 60 model.scoring_fct_norm 1.0 +1013 60 negative_sampler.num_negs_per_pos 84.0 +1013 60 training.batch_size 2.0 +1013 61 model.embedding_dim 1.0 +1013 61 model.scoring_fct_norm 2.0 +1013 61 negative_sampler.num_negs_per_pos 30.0 +1013 61 training.batch_size 2.0 +1013 62 model.embedding_dim 1.0 +1013 62 model.scoring_fct_norm 1.0 +1013 62 negative_sampler.num_negs_per_pos 50.0 +1013 62 training.batch_size 2.0 +1013 63 model.embedding_dim 0.0 +1013 63 model.scoring_fct_norm 2.0 +1013 63 negative_sampler.num_negs_per_pos 7.0 +1013 63 training.batch_size 2.0 +1013 64 model.embedding_dim 2.0 +1013 64 model.scoring_fct_norm 1.0 +1013 64 negative_sampler.num_negs_per_pos 40.0 +1013 64 training.batch_size 2.0 +1013 65 model.embedding_dim 0.0 +1013 65 model.scoring_fct_norm 2.0 +1013 65 negative_sampler.num_negs_per_pos 33.0 +1013 65 training.batch_size 0.0 +1013 66 model.embedding_dim 0.0 +1013 66 model.scoring_fct_norm 2.0 +1013 66 negative_sampler.num_negs_per_pos 58.0 +1013 66 training.batch_size 1.0 +1013 67 model.embedding_dim 1.0 +1013 67 model.scoring_fct_norm 2.0 +1013 67 negative_sampler.num_negs_per_pos 79.0 +1013 67 training.batch_size 2.0 +1013 68 model.embedding_dim 0.0 +1013 68 model.scoring_fct_norm 1.0 +1013 68 negative_sampler.num_negs_per_pos 44.0 +1013 68 training.batch_size 2.0 +1013 69 model.embedding_dim 2.0 +1013 69 model.scoring_fct_norm 1.0 +1013 69 negative_sampler.num_negs_per_pos 49.0 +1013 69 training.batch_size 1.0 +1013 70 model.embedding_dim 1.0 +1013 70 model.scoring_fct_norm 2.0 +1013 70 negative_sampler.num_negs_per_pos 85.0 +1013 70 training.batch_size 2.0 +1013 71 model.embedding_dim 1.0 +1013 71 model.scoring_fct_norm 1.0 +1013 71 negative_sampler.num_negs_per_pos 94.0 +1013 71 training.batch_size 1.0 +1013 72 model.embedding_dim 1.0 +1013 72 model.scoring_fct_norm 1.0 +1013 72 negative_sampler.num_negs_per_pos 57.0 +1013 72 training.batch_size 2.0 +1013 73 model.embedding_dim 1.0 +1013 73 model.scoring_fct_norm 1.0 +1013 73 negative_sampler.num_negs_per_pos 69.0 +1013 73 training.batch_size 0.0 +1013 74 model.embedding_dim 0.0 +1013 74 model.scoring_fct_norm 1.0 +1013 74 negative_sampler.num_negs_per_pos 88.0 +1013 74 training.batch_size 1.0 +1013 75 model.embedding_dim 1.0 +1013 75 model.scoring_fct_norm 2.0 +1013 75 negative_sampler.num_negs_per_pos 33.0 +1013 75 training.batch_size 1.0 +1013 76 model.embedding_dim 1.0 +1013 76 model.scoring_fct_norm 1.0 +1013 76 negative_sampler.num_negs_per_pos 17.0 +1013 76 training.batch_size 0.0 +1013 77 model.embedding_dim 0.0 +1013 77 model.scoring_fct_norm 1.0 +1013 77 negative_sampler.num_negs_per_pos 95.0 +1013 77 training.batch_size 2.0 +1013 78 model.embedding_dim 1.0 +1013 78 model.scoring_fct_norm 2.0 +1013 78 negative_sampler.num_negs_per_pos 4.0 +1013 78 training.batch_size 0.0 +1013 79 model.embedding_dim 2.0 +1013 79 model.scoring_fct_norm 2.0 +1013 79 negative_sampler.num_negs_per_pos 38.0 +1013 79 training.batch_size 2.0 +1013 80 model.embedding_dim 0.0 +1013 80 model.scoring_fct_norm 2.0 +1013 80 negative_sampler.num_negs_per_pos 95.0 +1013 80 training.batch_size 0.0 +1013 81 model.embedding_dim 0.0 +1013 81 model.scoring_fct_norm 2.0 +1013 81 negative_sampler.num_negs_per_pos 58.0 +1013 81 training.batch_size 2.0 +1013 82 model.embedding_dim 0.0 +1013 82 model.scoring_fct_norm 2.0 +1013 82 negative_sampler.num_negs_per_pos 81.0 +1013 82 training.batch_size 2.0 +1013 83 model.embedding_dim 2.0 +1013 83 model.scoring_fct_norm 2.0 +1013 83 negative_sampler.num_negs_per_pos 14.0 +1013 83 training.batch_size 2.0 +1013 84 model.embedding_dim 2.0 +1013 84 model.scoring_fct_norm 2.0 +1013 84 negative_sampler.num_negs_per_pos 98.0 +1013 84 training.batch_size 0.0 +1013 85 model.embedding_dim 1.0 +1013 85 model.scoring_fct_norm 1.0 +1013 85 negative_sampler.num_negs_per_pos 48.0 +1013 85 training.batch_size 2.0 +1013 86 model.embedding_dim 2.0 +1013 86 model.scoring_fct_norm 2.0 +1013 86 negative_sampler.num_negs_per_pos 38.0 +1013 86 training.batch_size 1.0 +1013 87 model.embedding_dim 1.0 +1013 87 model.scoring_fct_norm 2.0 +1013 87 negative_sampler.num_negs_per_pos 2.0 +1013 87 training.batch_size 1.0 +1013 88 model.embedding_dim 0.0 +1013 88 model.scoring_fct_norm 2.0 +1013 88 negative_sampler.num_negs_per_pos 27.0 +1013 88 training.batch_size 0.0 +1013 89 model.embedding_dim 0.0 +1013 89 model.scoring_fct_norm 2.0 +1013 89 negative_sampler.num_negs_per_pos 70.0 +1013 89 training.batch_size 1.0 +1013 90 model.embedding_dim 2.0 +1013 90 model.scoring_fct_norm 1.0 +1013 90 negative_sampler.num_negs_per_pos 94.0 +1013 90 training.batch_size 2.0 +1013 91 model.embedding_dim 2.0 +1013 91 model.scoring_fct_norm 1.0 +1013 91 negative_sampler.num_negs_per_pos 46.0 +1013 91 training.batch_size 2.0 +1013 92 model.embedding_dim 2.0 +1013 92 model.scoring_fct_norm 2.0 +1013 92 negative_sampler.num_negs_per_pos 62.0 +1013 92 training.batch_size 1.0 +1013 93 model.embedding_dim 1.0 +1013 93 model.scoring_fct_norm 1.0 +1013 93 negative_sampler.num_negs_per_pos 8.0 +1013 93 training.batch_size 2.0 +1013 94 model.embedding_dim 0.0 +1013 94 model.scoring_fct_norm 1.0 +1013 94 negative_sampler.num_negs_per_pos 23.0 +1013 94 training.batch_size 0.0 +1013 95 model.embedding_dim 1.0 +1013 95 model.scoring_fct_norm 2.0 +1013 95 negative_sampler.num_negs_per_pos 33.0 +1013 95 training.batch_size 0.0 +1013 96 model.embedding_dim 1.0 +1013 96 model.scoring_fct_norm 1.0 +1013 96 negative_sampler.num_negs_per_pos 89.0 +1013 96 training.batch_size 0.0 +1013 97 model.embedding_dim 2.0 +1013 97 model.scoring_fct_norm 2.0 +1013 97 negative_sampler.num_negs_per_pos 47.0 +1013 97 training.batch_size 2.0 +1013 98 model.embedding_dim 0.0 +1013 98 model.scoring_fct_norm 1.0 +1013 98 negative_sampler.num_negs_per_pos 42.0 +1013 98 training.batch_size 0.0 +1013 99 model.embedding_dim 0.0 +1013 99 model.scoring_fct_norm 1.0 +1013 99 negative_sampler.num_negs_per_pos 35.0 +1013 99 training.batch_size 2.0 +1013 100 model.embedding_dim 0.0 +1013 100 model.scoring_fct_norm 2.0 +1013 100 negative_sampler.num_negs_per_pos 58.0 +1013 100 training.batch_size 1.0 +1013 1 dataset """kinships""" +1013 1 model """unstructuredmodel""" +1013 1 loss """softplus""" +1013 1 regularizer """no""" +1013 1 optimizer """adadelta""" +1013 1 training_loop """owa""" +1013 1 negative_sampler """basic""" +1013 1 evaluator """rankbased""" +1013 2 dataset """kinships""" +1013 2 model """unstructuredmodel""" +1013 2 loss """softplus""" +1013 2 regularizer """no""" +1013 2 optimizer """adadelta""" +1013 2 training_loop """owa""" +1013 2 negative_sampler """basic""" +1013 2 evaluator """rankbased""" +1013 3 dataset """kinships""" +1013 3 model """unstructuredmodel""" +1013 3 loss """softplus""" +1013 3 regularizer """no""" +1013 3 optimizer """adadelta""" +1013 3 training_loop """owa""" +1013 3 negative_sampler """basic""" +1013 3 evaluator """rankbased""" +1013 4 dataset """kinships""" +1013 4 model """unstructuredmodel""" +1013 4 loss """softplus""" +1013 4 regularizer """no""" +1013 4 optimizer """adadelta""" +1013 4 training_loop """owa""" +1013 4 negative_sampler """basic""" +1013 4 evaluator """rankbased""" +1013 5 dataset """kinships""" +1013 5 model """unstructuredmodel""" +1013 5 loss """softplus""" +1013 5 regularizer """no""" +1013 5 optimizer """adadelta""" +1013 5 training_loop """owa""" +1013 5 negative_sampler """basic""" +1013 5 evaluator """rankbased""" +1013 6 dataset """kinships""" +1013 6 model """unstructuredmodel""" +1013 6 loss """softplus""" +1013 6 regularizer """no""" +1013 6 optimizer """adadelta""" +1013 6 training_loop """owa""" +1013 6 negative_sampler """basic""" +1013 6 evaluator """rankbased""" +1013 7 dataset """kinships""" +1013 7 model """unstructuredmodel""" +1013 7 loss """softplus""" +1013 7 regularizer """no""" +1013 7 optimizer """adadelta""" +1013 7 training_loop """owa""" +1013 7 negative_sampler """basic""" +1013 7 evaluator """rankbased""" +1013 8 dataset """kinships""" +1013 8 model """unstructuredmodel""" +1013 8 loss """softplus""" +1013 8 regularizer """no""" +1013 8 optimizer """adadelta""" +1013 8 training_loop """owa""" +1013 8 negative_sampler """basic""" +1013 8 evaluator """rankbased""" +1013 9 dataset """kinships""" +1013 9 model """unstructuredmodel""" +1013 9 loss """softplus""" +1013 9 regularizer """no""" +1013 9 optimizer """adadelta""" +1013 9 training_loop """owa""" +1013 9 negative_sampler """basic""" +1013 9 evaluator """rankbased""" +1013 10 dataset """kinships""" +1013 10 model """unstructuredmodel""" +1013 10 loss """softplus""" +1013 10 regularizer """no""" +1013 10 optimizer """adadelta""" +1013 10 training_loop """owa""" +1013 10 negative_sampler """basic""" +1013 10 evaluator """rankbased""" +1013 11 dataset """kinships""" +1013 11 model """unstructuredmodel""" +1013 11 loss """softplus""" +1013 11 regularizer """no""" +1013 11 optimizer """adadelta""" +1013 11 training_loop """owa""" +1013 11 negative_sampler """basic""" +1013 11 evaluator """rankbased""" +1013 12 dataset """kinships""" +1013 12 model """unstructuredmodel""" +1013 12 loss """softplus""" +1013 12 regularizer """no""" +1013 12 optimizer """adadelta""" +1013 12 training_loop """owa""" +1013 12 negative_sampler """basic""" +1013 12 evaluator """rankbased""" +1013 13 dataset """kinships""" +1013 13 model """unstructuredmodel""" +1013 13 loss """softplus""" +1013 13 regularizer """no""" +1013 13 optimizer """adadelta""" +1013 13 training_loop """owa""" +1013 13 negative_sampler """basic""" +1013 13 evaluator """rankbased""" +1013 14 dataset """kinships""" +1013 14 model """unstructuredmodel""" +1013 14 loss """softplus""" +1013 14 regularizer """no""" +1013 14 optimizer """adadelta""" +1013 14 training_loop """owa""" +1013 14 negative_sampler """basic""" +1013 14 evaluator """rankbased""" +1013 15 dataset """kinships""" +1013 15 model """unstructuredmodel""" +1013 15 loss """softplus""" +1013 15 regularizer """no""" +1013 15 optimizer """adadelta""" +1013 15 training_loop """owa""" +1013 15 negative_sampler """basic""" +1013 15 evaluator """rankbased""" +1013 16 dataset """kinships""" +1013 16 model """unstructuredmodel""" +1013 16 loss """softplus""" +1013 16 regularizer """no""" +1013 16 optimizer """adadelta""" +1013 16 training_loop """owa""" +1013 16 negative_sampler """basic""" +1013 16 evaluator """rankbased""" +1013 17 dataset """kinships""" +1013 17 model """unstructuredmodel""" +1013 17 loss """softplus""" +1013 17 regularizer """no""" +1013 17 optimizer """adadelta""" +1013 17 training_loop """owa""" +1013 17 negative_sampler """basic""" +1013 17 evaluator """rankbased""" +1013 18 dataset """kinships""" +1013 18 model """unstructuredmodel""" +1013 18 loss """softplus""" +1013 18 regularizer """no""" +1013 18 optimizer """adadelta""" +1013 18 training_loop """owa""" +1013 18 negative_sampler """basic""" +1013 18 evaluator """rankbased""" +1013 19 dataset """kinships""" +1013 19 model """unstructuredmodel""" +1013 19 loss """softplus""" +1013 19 regularizer """no""" +1013 19 optimizer """adadelta""" +1013 19 training_loop """owa""" +1013 19 negative_sampler """basic""" +1013 19 evaluator """rankbased""" +1013 20 dataset """kinships""" +1013 20 model """unstructuredmodel""" +1013 20 loss """softplus""" +1013 20 regularizer """no""" +1013 20 optimizer """adadelta""" +1013 20 training_loop """owa""" +1013 20 negative_sampler """basic""" +1013 20 evaluator """rankbased""" +1013 21 dataset """kinships""" +1013 21 model """unstructuredmodel""" +1013 21 loss """softplus""" +1013 21 regularizer """no""" +1013 21 optimizer """adadelta""" +1013 21 training_loop """owa""" +1013 21 negative_sampler """basic""" +1013 21 evaluator """rankbased""" +1013 22 dataset """kinships""" +1013 22 model """unstructuredmodel""" +1013 22 loss """softplus""" +1013 22 regularizer """no""" +1013 22 optimizer """adadelta""" +1013 22 training_loop """owa""" +1013 22 negative_sampler """basic""" +1013 22 evaluator """rankbased""" +1013 23 dataset """kinships""" +1013 23 model """unstructuredmodel""" +1013 23 loss """softplus""" +1013 23 regularizer """no""" +1013 23 optimizer """adadelta""" +1013 23 training_loop """owa""" +1013 23 negative_sampler """basic""" +1013 23 evaluator """rankbased""" +1013 24 dataset """kinships""" +1013 24 model """unstructuredmodel""" +1013 24 loss """softplus""" +1013 24 regularizer """no""" +1013 24 optimizer """adadelta""" +1013 24 training_loop """owa""" +1013 24 negative_sampler """basic""" +1013 24 evaluator """rankbased""" +1013 25 dataset """kinships""" +1013 25 model """unstructuredmodel""" +1013 25 loss """softplus""" +1013 25 regularizer """no""" +1013 25 optimizer """adadelta""" +1013 25 training_loop """owa""" +1013 25 negative_sampler """basic""" +1013 25 evaluator """rankbased""" +1013 26 dataset """kinships""" +1013 26 model """unstructuredmodel""" +1013 26 loss """softplus""" +1013 26 regularizer """no""" +1013 26 optimizer """adadelta""" +1013 26 training_loop """owa""" +1013 26 negative_sampler """basic""" +1013 26 evaluator """rankbased""" +1013 27 dataset """kinships""" +1013 27 model """unstructuredmodel""" +1013 27 loss """softplus""" +1013 27 regularizer """no""" +1013 27 optimizer """adadelta""" +1013 27 training_loop """owa""" +1013 27 negative_sampler """basic""" +1013 27 evaluator """rankbased""" +1013 28 dataset """kinships""" +1013 28 model """unstructuredmodel""" +1013 28 loss """softplus""" +1013 28 regularizer """no""" +1013 28 optimizer """adadelta""" +1013 28 training_loop """owa""" +1013 28 negative_sampler """basic""" +1013 28 evaluator """rankbased""" +1013 29 dataset """kinships""" +1013 29 model """unstructuredmodel""" +1013 29 loss """softplus""" +1013 29 regularizer """no""" +1013 29 optimizer """adadelta""" +1013 29 training_loop """owa""" +1013 29 negative_sampler """basic""" +1013 29 evaluator """rankbased""" +1013 30 dataset """kinships""" +1013 30 model """unstructuredmodel""" +1013 30 loss """softplus""" +1013 30 regularizer """no""" +1013 30 optimizer """adadelta""" +1013 30 training_loop """owa""" +1013 30 negative_sampler """basic""" +1013 30 evaluator """rankbased""" +1013 31 dataset """kinships""" +1013 31 model """unstructuredmodel""" +1013 31 loss """softplus""" +1013 31 regularizer """no""" +1013 31 optimizer """adadelta""" +1013 31 training_loop """owa""" +1013 31 negative_sampler """basic""" +1013 31 evaluator """rankbased""" +1013 32 dataset """kinships""" +1013 32 model """unstructuredmodel""" +1013 32 loss """softplus""" +1013 32 regularizer """no""" +1013 32 optimizer """adadelta""" +1013 32 training_loop """owa""" +1013 32 negative_sampler """basic""" +1013 32 evaluator """rankbased""" +1013 33 dataset """kinships""" +1013 33 model """unstructuredmodel""" +1013 33 loss """softplus""" +1013 33 regularizer """no""" +1013 33 optimizer """adadelta""" +1013 33 training_loop """owa""" +1013 33 negative_sampler """basic""" +1013 33 evaluator """rankbased""" +1013 34 dataset """kinships""" +1013 34 model """unstructuredmodel""" +1013 34 loss """softplus""" +1013 34 regularizer """no""" +1013 34 optimizer """adadelta""" +1013 34 training_loop """owa""" +1013 34 negative_sampler """basic""" +1013 34 evaluator """rankbased""" +1013 35 dataset """kinships""" +1013 35 model """unstructuredmodel""" +1013 35 loss """softplus""" +1013 35 regularizer """no""" +1013 35 optimizer """adadelta""" +1013 35 training_loop """owa""" +1013 35 negative_sampler """basic""" +1013 35 evaluator """rankbased""" +1013 36 dataset """kinships""" +1013 36 model """unstructuredmodel""" +1013 36 loss """softplus""" +1013 36 regularizer """no""" +1013 36 optimizer """adadelta""" +1013 36 training_loop """owa""" +1013 36 negative_sampler """basic""" +1013 36 evaluator """rankbased""" +1013 37 dataset """kinships""" +1013 37 model """unstructuredmodel""" +1013 37 loss """softplus""" +1013 37 regularizer """no""" +1013 37 optimizer """adadelta""" +1013 37 training_loop """owa""" +1013 37 negative_sampler """basic""" +1013 37 evaluator """rankbased""" +1013 38 dataset """kinships""" +1013 38 model """unstructuredmodel""" +1013 38 loss """softplus""" +1013 38 regularizer """no""" +1013 38 optimizer """adadelta""" +1013 38 training_loop """owa""" +1013 38 negative_sampler """basic""" +1013 38 evaluator """rankbased""" +1013 39 dataset """kinships""" +1013 39 model """unstructuredmodel""" +1013 39 loss """softplus""" +1013 39 regularizer """no""" +1013 39 optimizer """adadelta""" +1013 39 training_loop """owa""" +1013 39 negative_sampler """basic""" +1013 39 evaluator """rankbased""" +1013 40 dataset """kinships""" +1013 40 model """unstructuredmodel""" +1013 40 loss """softplus""" +1013 40 regularizer """no""" +1013 40 optimizer """adadelta""" +1013 40 training_loop """owa""" +1013 40 negative_sampler """basic""" +1013 40 evaluator """rankbased""" +1013 41 dataset """kinships""" +1013 41 model """unstructuredmodel""" +1013 41 loss """softplus""" +1013 41 regularizer """no""" +1013 41 optimizer """adadelta""" +1013 41 training_loop """owa""" +1013 41 negative_sampler """basic""" +1013 41 evaluator """rankbased""" +1013 42 dataset """kinships""" +1013 42 model """unstructuredmodel""" +1013 42 loss """softplus""" +1013 42 regularizer """no""" +1013 42 optimizer """adadelta""" +1013 42 training_loop """owa""" +1013 42 negative_sampler """basic""" +1013 42 evaluator """rankbased""" +1013 43 dataset """kinships""" +1013 43 model """unstructuredmodel""" +1013 43 loss """softplus""" +1013 43 regularizer """no""" +1013 43 optimizer """adadelta""" +1013 43 training_loop """owa""" +1013 43 negative_sampler """basic""" +1013 43 evaluator """rankbased""" +1013 44 dataset """kinships""" +1013 44 model """unstructuredmodel""" +1013 44 loss """softplus""" +1013 44 regularizer """no""" +1013 44 optimizer """adadelta""" +1013 44 training_loop """owa""" +1013 44 negative_sampler """basic""" +1013 44 evaluator """rankbased""" +1013 45 dataset """kinships""" +1013 45 model """unstructuredmodel""" +1013 45 loss """softplus""" +1013 45 regularizer """no""" +1013 45 optimizer """adadelta""" +1013 45 training_loop """owa""" +1013 45 negative_sampler """basic""" +1013 45 evaluator """rankbased""" +1013 46 dataset """kinships""" +1013 46 model """unstructuredmodel""" +1013 46 loss """softplus""" +1013 46 regularizer """no""" +1013 46 optimizer """adadelta""" +1013 46 training_loop """owa""" +1013 46 negative_sampler """basic""" +1013 46 evaluator """rankbased""" +1013 47 dataset """kinships""" +1013 47 model """unstructuredmodel""" +1013 47 loss """softplus""" +1013 47 regularizer """no""" +1013 47 optimizer """adadelta""" +1013 47 training_loop """owa""" +1013 47 negative_sampler """basic""" +1013 47 evaluator """rankbased""" +1013 48 dataset """kinships""" +1013 48 model """unstructuredmodel""" +1013 48 loss """softplus""" +1013 48 regularizer """no""" +1013 48 optimizer """adadelta""" +1013 48 training_loop """owa""" +1013 48 negative_sampler """basic""" +1013 48 evaluator """rankbased""" +1013 49 dataset """kinships""" +1013 49 model """unstructuredmodel""" +1013 49 loss """softplus""" +1013 49 regularizer """no""" +1013 49 optimizer """adadelta""" +1013 49 training_loop """owa""" +1013 49 negative_sampler """basic""" +1013 49 evaluator """rankbased""" +1013 50 dataset """kinships""" +1013 50 model """unstructuredmodel""" +1013 50 loss """softplus""" +1013 50 regularizer """no""" +1013 50 optimizer """adadelta""" +1013 50 training_loop """owa""" +1013 50 negative_sampler """basic""" +1013 50 evaluator """rankbased""" +1013 51 dataset """kinships""" +1013 51 model """unstructuredmodel""" +1013 51 loss """softplus""" +1013 51 regularizer """no""" +1013 51 optimizer """adadelta""" +1013 51 training_loop """owa""" +1013 51 negative_sampler """basic""" +1013 51 evaluator """rankbased""" +1013 52 dataset """kinships""" +1013 52 model """unstructuredmodel""" +1013 52 loss """softplus""" +1013 52 regularizer """no""" +1013 52 optimizer """adadelta""" +1013 52 training_loop """owa""" +1013 52 negative_sampler """basic""" +1013 52 evaluator """rankbased""" +1013 53 dataset """kinships""" +1013 53 model """unstructuredmodel""" +1013 53 loss """softplus""" +1013 53 regularizer """no""" +1013 53 optimizer """adadelta""" +1013 53 training_loop """owa""" +1013 53 negative_sampler """basic""" +1013 53 evaluator """rankbased""" +1013 54 dataset """kinships""" +1013 54 model """unstructuredmodel""" +1013 54 loss """softplus""" +1013 54 regularizer """no""" +1013 54 optimizer """adadelta""" +1013 54 training_loop """owa""" +1013 54 negative_sampler """basic""" +1013 54 evaluator """rankbased""" +1013 55 dataset """kinships""" +1013 55 model """unstructuredmodel""" +1013 55 loss """softplus""" +1013 55 regularizer """no""" +1013 55 optimizer """adadelta""" +1013 55 training_loop """owa""" +1013 55 negative_sampler """basic""" +1013 55 evaluator """rankbased""" +1013 56 dataset """kinships""" +1013 56 model """unstructuredmodel""" +1013 56 loss """softplus""" +1013 56 regularizer """no""" +1013 56 optimizer """adadelta""" +1013 56 training_loop """owa""" +1013 56 negative_sampler """basic""" +1013 56 evaluator """rankbased""" +1013 57 dataset """kinships""" +1013 57 model """unstructuredmodel""" +1013 57 loss """softplus""" +1013 57 regularizer """no""" +1013 57 optimizer """adadelta""" +1013 57 training_loop """owa""" +1013 57 negative_sampler """basic""" +1013 57 evaluator """rankbased""" +1013 58 dataset """kinships""" +1013 58 model """unstructuredmodel""" +1013 58 loss """softplus""" +1013 58 regularizer """no""" +1013 58 optimizer """adadelta""" +1013 58 training_loop """owa""" +1013 58 negative_sampler """basic""" +1013 58 evaluator """rankbased""" +1013 59 dataset """kinships""" +1013 59 model """unstructuredmodel""" +1013 59 loss """softplus""" +1013 59 regularizer """no""" +1013 59 optimizer """adadelta""" +1013 59 training_loop """owa""" +1013 59 negative_sampler """basic""" +1013 59 evaluator """rankbased""" +1013 60 dataset """kinships""" +1013 60 model """unstructuredmodel""" +1013 60 loss """softplus""" +1013 60 regularizer """no""" +1013 60 optimizer """adadelta""" +1013 60 training_loop """owa""" +1013 60 negative_sampler """basic""" +1013 60 evaluator """rankbased""" +1013 61 dataset """kinships""" +1013 61 model """unstructuredmodel""" +1013 61 loss """softplus""" +1013 61 regularizer """no""" +1013 61 optimizer """adadelta""" +1013 61 training_loop """owa""" +1013 61 negative_sampler """basic""" +1013 61 evaluator """rankbased""" +1013 62 dataset """kinships""" +1013 62 model """unstructuredmodel""" +1013 62 loss """softplus""" +1013 62 regularizer """no""" +1013 62 optimizer """adadelta""" +1013 62 training_loop """owa""" +1013 62 negative_sampler """basic""" +1013 62 evaluator """rankbased""" +1013 63 dataset """kinships""" +1013 63 model """unstructuredmodel""" +1013 63 loss """softplus""" +1013 63 regularizer """no""" +1013 63 optimizer """adadelta""" +1013 63 training_loop """owa""" +1013 63 negative_sampler """basic""" +1013 63 evaluator """rankbased""" +1013 64 dataset """kinships""" +1013 64 model """unstructuredmodel""" +1013 64 loss """softplus""" +1013 64 regularizer """no""" +1013 64 optimizer """adadelta""" +1013 64 training_loop """owa""" +1013 64 negative_sampler """basic""" +1013 64 evaluator """rankbased""" +1013 65 dataset """kinships""" +1013 65 model """unstructuredmodel""" +1013 65 loss """softplus""" +1013 65 regularizer """no""" +1013 65 optimizer """adadelta""" +1013 65 training_loop """owa""" +1013 65 negative_sampler """basic""" +1013 65 evaluator """rankbased""" +1013 66 dataset """kinships""" +1013 66 model """unstructuredmodel""" +1013 66 loss """softplus""" +1013 66 regularizer """no""" +1013 66 optimizer """adadelta""" +1013 66 training_loop """owa""" +1013 66 negative_sampler """basic""" +1013 66 evaluator """rankbased""" +1013 67 dataset """kinships""" +1013 67 model """unstructuredmodel""" +1013 67 loss """softplus""" +1013 67 regularizer """no""" +1013 67 optimizer """adadelta""" +1013 67 training_loop """owa""" +1013 67 negative_sampler """basic""" +1013 67 evaluator """rankbased""" +1013 68 dataset """kinships""" +1013 68 model """unstructuredmodel""" +1013 68 loss """softplus""" +1013 68 regularizer """no""" +1013 68 optimizer """adadelta""" +1013 68 training_loop """owa""" +1013 68 negative_sampler """basic""" +1013 68 evaluator """rankbased""" +1013 69 dataset """kinships""" +1013 69 model """unstructuredmodel""" +1013 69 loss """softplus""" +1013 69 regularizer """no""" +1013 69 optimizer """adadelta""" +1013 69 training_loop """owa""" +1013 69 negative_sampler """basic""" +1013 69 evaluator """rankbased""" +1013 70 dataset """kinships""" +1013 70 model """unstructuredmodel""" +1013 70 loss """softplus""" +1013 70 regularizer """no""" +1013 70 optimizer """adadelta""" +1013 70 training_loop """owa""" +1013 70 negative_sampler """basic""" +1013 70 evaluator """rankbased""" +1013 71 dataset """kinships""" +1013 71 model """unstructuredmodel""" +1013 71 loss """softplus""" +1013 71 regularizer """no""" +1013 71 optimizer """adadelta""" +1013 71 training_loop """owa""" +1013 71 negative_sampler """basic""" +1013 71 evaluator """rankbased""" +1013 72 dataset """kinships""" +1013 72 model """unstructuredmodel""" +1013 72 loss """softplus""" +1013 72 regularizer """no""" +1013 72 optimizer """adadelta""" +1013 72 training_loop """owa""" +1013 72 negative_sampler """basic""" +1013 72 evaluator """rankbased""" +1013 73 dataset """kinships""" +1013 73 model """unstructuredmodel""" +1013 73 loss """softplus""" +1013 73 regularizer """no""" +1013 73 optimizer """adadelta""" +1013 73 training_loop """owa""" +1013 73 negative_sampler """basic""" +1013 73 evaluator """rankbased""" +1013 74 dataset """kinships""" +1013 74 model """unstructuredmodel""" +1013 74 loss """softplus""" +1013 74 regularizer """no""" +1013 74 optimizer """adadelta""" +1013 74 training_loop """owa""" +1013 74 negative_sampler """basic""" +1013 74 evaluator """rankbased""" +1013 75 dataset """kinships""" +1013 75 model """unstructuredmodel""" +1013 75 loss """softplus""" +1013 75 regularizer """no""" +1013 75 optimizer """adadelta""" +1013 75 training_loop """owa""" +1013 75 negative_sampler """basic""" +1013 75 evaluator """rankbased""" +1013 76 dataset """kinships""" +1013 76 model """unstructuredmodel""" +1013 76 loss """softplus""" +1013 76 regularizer """no""" +1013 76 optimizer """adadelta""" +1013 76 training_loop """owa""" +1013 76 negative_sampler """basic""" +1013 76 evaluator """rankbased""" +1013 77 dataset """kinships""" +1013 77 model """unstructuredmodel""" +1013 77 loss """softplus""" +1013 77 regularizer """no""" +1013 77 optimizer """adadelta""" +1013 77 training_loop """owa""" +1013 77 negative_sampler """basic""" +1013 77 evaluator """rankbased""" +1013 78 dataset """kinships""" +1013 78 model """unstructuredmodel""" +1013 78 loss """softplus""" +1013 78 regularizer """no""" +1013 78 optimizer """adadelta""" +1013 78 training_loop """owa""" +1013 78 negative_sampler """basic""" +1013 78 evaluator """rankbased""" +1013 79 dataset """kinships""" +1013 79 model """unstructuredmodel""" +1013 79 loss """softplus""" +1013 79 regularizer """no""" +1013 79 optimizer """adadelta""" +1013 79 training_loop """owa""" +1013 79 negative_sampler """basic""" +1013 79 evaluator """rankbased""" +1013 80 dataset """kinships""" +1013 80 model """unstructuredmodel""" +1013 80 loss """softplus""" +1013 80 regularizer """no""" +1013 80 optimizer """adadelta""" +1013 80 training_loop """owa""" +1013 80 negative_sampler """basic""" +1013 80 evaluator """rankbased""" +1013 81 dataset """kinships""" +1013 81 model """unstructuredmodel""" +1013 81 loss """softplus""" +1013 81 regularizer """no""" +1013 81 optimizer """adadelta""" +1013 81 training_loop """owa""" +1013 81 negative_sampler """basic""" +1013 81 evaluator """rankbased""" +1013 82 dataset """kinships""" +1013 82 model """unstructuredmodel""" +1013 82 loss """softplus""" +1013 82 regularizer """no""" +1013 82 optimizer """adadelta""" +1013 82 training_loop """owa""" +1013 82 negative_sampler """basic""" +1013 82 evaluator """rankbased""" +1013 83 dataset """kinships""" +1013 83 model """unstructuredmodel""" +1013 83 loss """softplus""" +1013 83 regularizer """no""" +1013 83 optimizer """adadelta""" +1013 83 training_loop """owa""" +1013 83 negative_sampler """basic""" +1013 83 evaluator """rankbased""" +1013 84 dataset """kinships""" +1013 84 model """unstructuredmodel""" +1013 84 loss """softplus""" +1013 84 regularizer """no""" +1013 84 optimizer """adadelta""" +1013 84 training_loop """owa""" +1013 84 negative_sampler """basic""" +1013 84 evaluator """rankbased""" +1013 85 dataset """kinships""" +1013 85 model """unstructuredmodel""" +1013 85 loss """softplus""" +1013 85 regularizer """no""" +1013 85 optimizer """adadelta""" +1013 85 training_loop """owa""" +1013 85 negative_sampler """basic""" +1013 85 evaluator """rankbased""" +1013 86 dataset """kinships""" +1013 86 model """unstructuredmodel""" +1013 86 loss """softplus""" +1013 86 regularizer """no""" +1013 86 optimizer """adadelta""" +1013 86 training_loop """owa""" +1013 86 negative_sampler """basic""" +1013 86 evaluator """rankbased""" +1013 87 dataset """kinships""" +1013 87 model """unstructuredmodel""" +1013 87 loss """softplus""" +1013 87 regularizer """no""" +1013 87 optimizer """adadelta""" +1013 87 training_loop """owa""" +1013 87 negative_sampler """basic""" +1013 87 evaluator """rankbased""" +1013 88 dataset """kinships""" +1013 88 model """unstructuredmodel""" +1013 88 loss """softplus""" +1013 88 regularizer """no""" +1013 88 optimizer """adadelta""" +1013 88 training_loop """owa""" +1013 88 negative_sampler """basic""" +1013 88 evaluator """rankbased""" +1013 89 dataset """kinships""" +1013 89 model """unstructuredmodel""" +1013 89 loss """softplus""" +1013 89 regularizer """no""" +1013 89 optimizer """adadelta""" +1013 89 training_loop """owa""" +1013 89 negative_sampler """basic""" +1013 89 evaluator """rankbased""" +1013 90 dataset """kinships""" +1013 90 model """unstructuredmodel""" +1013 90 loss """softplus""" +1013 90 regularizer """no""" +1013 90 optimizer """adadelta""" +1013 90 training_loop """owa""" +1013 90 negative_sampler """basic""" +1013 90 evaluator """rankbased""" +1013 91 dataset """kinships""" +1013 91 model """unstructuredmodel""" +1013 91 loss """softplus""" +1013 91 regularizer """no""" +1013 91 optimizer """adadelta""" +1013 91 training_loop """owa""" +1013 91 negative_sampler """basic""" +1013 91 evaluator """rankbased""" +1013 92 dataset """kinships""" +1013 92 model """unstructuredmodel""" +1013 92 loss """softplus""" +1013 92 regularizer """no""" +1013 92 optimizer """adadelta""" +1013 92 training_loop """owa""" +1013 92 negative_sampler """basic""" +1013 92 evaluator """rankbased""" +1013 93 dataset """kinships""" +1013 93 model """unstructuredmodel""" +1013 93 loss """softplus""" +1013 93 regularizer """no""" +1013 93 optimizer """adadelta""" +1013 93 training_loop """owa""" +1013 93 negative_sampler """basic""" +1013 93 evaluator """rankbased""" +1013 94 dataset """kinships""" +1013 94 model """unstructuredmodel""" +1013 94 loss """softplus""" +1013 94 regularizer """no""" +1013 94 optimizer """adadelta""" +1013 94 training_loop """owa""" +1013 94 negative_sampler """basic""" +1013 94 evaluator """rankbased""" +1013 95 dataset """kinships""" +1013 95 model """unstructuredmodel""" +1013 95 loss """softplus""" +1013 95 regularizer """no""" +1013 95 optimizer """adadelta""" +1013 95 training_loop """owa""" +1013 95 negative_sampler """basic""" +1013 95 evaluator """rankbased""" +1013 96 dataset """kinships""" +1013 96 model """unstructuredmodel""" +1013 96 loss """softplus""" +1013 96 regularizer """no""" +1013 96 optimizer """adadelta""" +1013 96 training_loop """owa""" +1013 96 negative_sampler """basic""" +1013 96 evaluator """rankbased""" +1013 97 dataset """kinships""" +1013 97 model """unstructuredmodel""" +1013 97 loss """softplus""" +1013 97 regularizer """no""" +1013 97 optimizer """adadelta""" +1013 97 training_loop """owa""" +1013 97 negative_sampler """basic""" +1013 97 evaluator """rankbased""" +1013 98 dataset """kinships""" +1013 98 model """unstructuredmodel""" +1013 98 loss """softplus""" +1013 98 regularizer """no""" +1013 98 optimizer """adadelta""" +1013 98 training_loop """owa""" +1013 98 negative_sampler """basic""" +1013 98 evaluator """rankbased""" +1013 99 dataset """kinships""" +1013 99 model """unstructuredmodel""" +1013 99 loss """softplus""" +1013 99 regularizer """no""" +1013 99 optimizer """adadelta""" +1013 99 training_loop """owa""" +1013 99 negative_sampler """basic""" +1013 99 evaluator """rankbased""" +1013 100 dataset """kinships""" +1013 100 model """unstructuredmodel""" +1013 100 loss """softplus""" +1013 100 regularizer """no""" +1013 100 optimizer """adadelta""" +1013 100 training_loop """owa""" +1013 100 negative_sampler """basic""" +1013 100 evaluator """rankbased""" +1014 1 model.embedding_dim 2.0 +1014 1 model.scoring_fct_norm 2.0 +1014 1 negative_sampler.num_negs_per_pos 68.0 +1014 1 training.batch_size 0.0 +1014 2 model.embedding_dim 2.0 +1014 2 model.scoring_fct_norm 2.0 +1014 2 negative_sampler.num_negs_per_pos 79.0 +1014 2 training.batch_size 2.0 +1014 3 model.embedding_dim 2.0 +1014 3 model.scoring_fct_norm 2.0 +1014 3 negative_sampler.num_negs_per_pos 5.0 +1014 3 training.batch_size 0.0 +1014 4 model.embedding_dim 1.0 +1014 4 model.scoring_fct_norm 1.0 +1014 4 negative_sampler.num_negs_per_pos 69.0 +1014 4 training.batch_size 2.0 +1014 5 model.embedding_dim 0.0 +1014 5 model.scoring_fct_norm 1.0 +1014 5 negative_sampler.num_negs_per_pos 77.0 +1014 5 training.batch_size 2.0 +1014 6 model.embedding_dim 0.0 +1014 6 model.scoring_fct_norm 1.0 +1014 6 negative_sampler.num_negs_per_pos 2.0 +1014 6 training.batch_size 1.0 +1014 7 model.embedding_dim 2.0 +1014 7 model.scoring_fct_norm 1.0 +1014 7 negative_sampler.num_negs_per_pos 90.0 +1014 7 training.batch_size 0.0 +1014 8 model.embedding_dim 2.0 +1014 8 model.scoring_fct_norm 2.0 +1014 8 negative_sampler.num_negs_per_pos 41.0 +1014 8 training.batch_size 1.0 +1014 9 model.embedding_dim 2.0 +1014 9 model.scoring_fct_norm 2.0 +1014 9 negative_sampler.num_negs_per_pos 42.0 +1014 9 training.batch_size 1.0 +1014 10 model.embedding_dim 2.0 +1014 10 model.scoring_fct_norm 1.0 +1014 10 negative_sampler.num_negs_per_pos 11.0 +1014 10 training.batch_size 2.0 +1014 11 model.embedding_dim 2.0 +1014 11 model.scoring_fct_norm 2.0 +1014 11 negative_sampler.num_negs_per_pos 84.0 +1014 11 training.batch_size 0.0 +1014 12 model.embedding_dim 2.0 +1014 12 model.scoring_fct_norm 2.0 +1014 12 negative_sampler.num_negs_per_pos 48.0 +1014 12 training.batch_size 2.0 +1014 13 model.embedding_dim 2.0 +1014 13 model.scoring_fct_norm 2.0 +1014 13 negative_sampler.num_negs_per_pos 77.0 +1014 13 training.batch_size 0.0 +1014 14 model.embedding_dim 2.0 +1014 14 model.scoring_fct_norm 1.0 +1014 14 negative_sampler.num_negs_per_pos 88.0 +1014 14 training.batch_size 2.0 +1014 15 model.embedding_dim 2.0 +1014 15 model.scoring_fct_norm 1.0 +1014 15 negative_sampler.num_negs_per_pos 7.0 +1014 15 training.batch_size 1.0 +1014 16 model.embedding_dim 2.0 +1014 16 model.scoring_fct_norm 1.0 +1014 16 negative_sampler.num_negs_per_pos 53.0 +1014 16 training.batch_size 0.0 +1014 17 model.embedding_dim 1.0 +1014 17 model.scoring_fct_norm 2.0 +1014 17 negative_sampler.num_negs_per_pos 60.0 +1014 17 training.batch_size 1.0 +1014 18 model.embedding_dim 2.0 +1014 18 model.scoring_fct_norm 2.0 +1014 18 negative_sampler.num_negs_per_pos 0.0 +1014 18 training.batch_size 0.0 +1014 19 model.embedding_dim 1.0 +1014 19 model.scoring_fct_norm 1.0 +1014 19 negative_sampler.num_negs_per_pos 65.0 +1014 19 training.batch_size 1.0 +1014 20 model.embedding_dim 1.0 +1014 20 model.scoring_fct_norm 2.0 +1014 20 negative_sampler.num_negs_per_pos 9.0 +1014 20 training.batch_size 1.0 +1014 21 model.embedding_dim 0.0 +1014 21 model.scoring_fct_norm 1.0 +1014 21 negative_sampler.num_negs_per_pos 58.0 +1014 21 training.batch_size 2.0 +1014 22 model.embedding_dim 1.0 +1014 22 model.scoring_fct_norm 2.0 +1014 22 negative_sampler.num_negs_per_pos 55.0 +1014 22 training.batch_size 0.0 +1014 23 model.embedding_dim 2.0 +1014 23 model.scoring_fct_norm 2.0 +1014 23 negative_sampler.num_negs_per_pos 47.0 +1014 23 training.batch_size 1.0 +1014 24 model.embedding_dim 2.0 +1014 24 model.scoring_fct_norm 2.0 +1014 24 negative_sampler.num_negs_per_pos 33.0 +1014 24 training.batch_size 2.0 +1014 25 model.embedding_dim 2.0 +1014 25 model.scoring_fct_norm 2.0 +1014 25 negative_sampler.num_negs_per_pos 14.0 +1014 25 training.batch_size 1.0 +1014 26 model.embedding_dim 1.0 +1014 26 model.scoring_fct_norm 2.0 +1014 26 negative_sampler.num_negs_per_pos 10.0 +1014 26 training.batch_size 1.0 +1014 27 model.embedding_dim 1.0 +1014 27 model.scoring_fct_norm 1.0 +1014 27 negative_sampler.num_negs_per_pos 96.0 +1014 27 training.batch_size 2.0 +1014 28 model.embedding_dim 0.0 +1014 28 model.scoring_fct_norm 2.0 +1014 28 negative_sampler.num_negs_per_pos 74.0 +1014 28 training.batch_size 0.0 +1014 29 model.embedding_dim 1.0 +1014 29 model.scoring_fct_norm 2.0 +1014 29 negative_sampler.num_negs_per_pos 1.0 +1014 29 training.batch_size 1.0 +1014 30 model.embedding_dim 1.0 +1014 30 model.scoring_fct_norm 1.0 +1014 30 negative_sampler.num_negs_per_pos 61.0 +1014 30 training.batch_size 0.0 +1014 31 model.embedding_dim 0.0 +1014 31 model.scoring_fct_norm 2.0 +1014 31 negative_sampler.num_negs_per_pos 74.0 +1014 31 training.batch_size 1.0 +1014 32 model.embedding_dim 0.0 +1014 32 model.scoring_fct_norm 2.0 +1014 32 negative_sampler.num_negs_per_pos 61.0 +1014 32 training.batch_size 0.0 +1014 33 model.embedding_dim 2.0 +1014 33 model.scoring_fct_norm 1.0 +1014 33 negative_sampler.num_negs_per_pos 36.0 +1014 33 training.batch_size 1.0 +1014 34 model.embedding_dim 1.0 +1014 34 model.scoring_fct_norm 2.0 +1014 34 negative_sampler.num_negs_per_pos 59.0 +1014 34 training.batch_size 2.0 +1014 35 model.embedding_dim 0.0 +1014 35 model.scoring_fct_norm 2.0 +1014 35 negative_sampler.num_negs_per_pos 53.0 +1014 35 training.batch_size 2.0 +1014 36 model.embedding_dim 2.0 +1014 36 model.scoring_fct_norm 1.0 +1014 36 negative_sampler.num_negs_per_pos 42.0 +1014 36 training.batch_size 2.0 +1014 37 model.embedding_dim 1.0 +1014 37 model.scoring_fct_norm 2.0 +1014 37 negative_sampler.num_negs_per_pos 96.0 +1014 37 training.batch_size 1.0 +1014 38 model.embedding_dim 2.0 +1014 38 model.scoring_fct_norm 1.0 +1014 38 negative_sampler.num_negs_per_pos 34.0 +1014 38 training.batch_size 2.0 +1014 39 model.embedding_dim 1.0 +1014 39 model.scoring_fct_norm 2.0 +1014 39 negative_sampler.num_negs_per_pos 84.0 +1014 39 training.batch_size 1.0 +1014 40 model.embedding_dim 0.0 +1014 40 model.scoring_fct_norm 2.0 +1014 40 negative_sampler.num_negs_per_pos 45.0 +1014 40 training.batch_size 2.0 +1014 41 model.embedding_dim 2.0 +1014 41 model.scoring_fct_norm 1.0 +1014 41 negative_sampler.num_negs_per_pos 32.0 +1014 41 training.batch_size 0.0 +1014 42 model.embedding_dim 1.0 +1014 42 model.scoring_fct_norm 1.0 +1014 42 negative_sampler.num_negs_per_pos 89.0 +1014 42 training.batch_size 2.0 +1014 43 model.embedding_dim 1.0 +1014 43 model.scoring_fct_norm 1.0 +1014 43 negative_sampler.num_negs_per_pos 6.0 +1014 43 training.batch_size 2.0 +1014 44 model.embedding_dim 1.0 +1014 44 model.scoring_fct_norm 1.0 +1014 44 negative_sampler.num_negs_per_pos 58.0 +1014 44 training.batch_size 1.0 +1014 45 model.embedding_dim 1.0 +1014 45 model.scoring_fct_norm 2.0 +1014 45 negative_sampler.num_negs_per_pos 32.0 +1014 45 training.batch_size 2.0 +1014 46 model.embedding_dim 1.0 +1014 46 model.scoring_fct_norm 1.0 +1014 46 negative_sampler.num_negs_per_pos 10.0 +1014 46 training.batch_size 2.0 +1014 47 model.embedding_dim 1.0 +1014 47 model.scoring_fct_norm 2.0 +1014 47 negative_sampler.num_negs_per_pos 45.0 +1014 47 training.batch_size 0.0 +1014 48 model.embedding_dim 2.0 +1014 48 model.scoring_fct_norm 1.0 +1014 48 negative_sampler.num_negs_per_pos 77.0 +1014 48 training.batch_size 1.0 +1014 49 model.embedding_dim 1.0 +1014 49 model.scoring_fct_norm 2.0 +1014 49 negative_sampler.num_negs_per_pos 10.0 +1014 49 training.batch_size 2.0 +1014 50 model.embedding_dim 0.0 +1014 50 model.scoring_fct_norm 2.0 +1014 50 negative_sampler.num_negs_per_pos 64.0 +1014 50 training.batch_size 1.0 +1014 51 model.embedding_dim 1.0 +1014 51 model.scoring_fct_norm 2.0 +1014 51 negative_sampler.num_negs_per_pos 13.0 +1014 51 training.batch_size 0.0 +1014 52 model.embedding_dim 2.0 +1014 52 model.scoring_fct_norm 1.0 +1014 52 negative_sampler.num_negs_per_pos 70.0 +1014 52 training.batch_size 0.0 +1014 53 model.embedding_dim 0.0 +1014 53 model.scoring_fct_norm 2.0 +1014 53 negative_sampler.num_negs_per_pos 38.0 +1014 53 training.batch_size 1.0 +1014 54 model.embedding_dim 0.0 +1014 54 model.scoring_fct_norm 2.0 +1014 54 negative_sampler.num_negs_per_pos 1.0 +1014 54 training.batch_size 1.0 +1014 55 model.embedding_dim 0.0 +1014 55 model.scoring_fct_norm 2.0 +1014 55 negative_sampler.num_negs_per_pos 56.0 +1014 55 training.batch_size 1.0 +1014 56 model.embedding_dim 1.0 +1014 56 model.scoring_fct_norm 1.0 +1014 56 negative_sampler.num_negs_per_pos 57.0 +1014 56 training.batch_size 2.0 +1014 57 model.embedding_dim 0.0 +1014 57 model.scoring_fct_norm 2.0 +1014 57 negative_sampler.num_negs_per_pos 90.0 +1014 57 training.batch_size 1.0 +1014 58 model.embedding_dim 1.0 +1014 58 model.scoring_fct_norm 1.0 +1014 58 negative_sampler.num_negs_per_pos 61.0 +1014 58 training.batch_size 0.0 +1014 59 model.embedding_dim 1.0 +1014 59 model.scoring_fct_norm 1.0 +1014 59 negative_sampler.num_negs_per_pos 33.0 +1014 59 training.batch_size 0.0 +1014 60 model.embedding_dim 0.0 +1014 60 model.scoring_fct_norm 2.0 +1014 60 negative_sampler.num_negs_per_pos 3.0 +1014 60 training.batch_size 1.0 +1014 61 model.embedding_dim 2.0 +1014 61 model.scoring_fct_norm 2.0 +1014 61 negative_sampler.num_negs_per_pos 83.0 +1014 61 training.batch_size 1.0 +1014 62 model.embedding_dim 1.0 +1014 62 model.scoring_fct_norm 1.0 +1014 62 negative_sampler.num_negs_per_pos 28.0 +1014 62 training.batch_size 0.0 +1014 63 model.embedding_dim 1.0 +1014 63 model.scoring_fct_norm 1.0 +1014 63 negative_sampler.num_negs_per_pos 8.0 +1014 63 training.batch_size 2.0 +1014 64 model.embedding_dim 2.0 +1014 64 model.scoring_fct_norm 2.0 +1014 64 negative_sampler.num_negs_per_pos 3.0 +1014 64 training.batch_size 1.0 +1014 65 model.embedding_dim 1.0 +1014 65 model.scoring_fct_norm 2.0 +1014 65 negative_sampler.num_negs_per_pos 65.0 +1014 65 training.batch_size 1.0 +1014 66 model.embedding_dim 1.0 +1014 66 model.scoring_fct_norm 1.0 +1014 66 negative_sampler.num_negs_per_pos 40.0 +1014 66 training.batch_size 1.0 +1014 67 model.embedding_dim 2.0 +1014 67 model.scoring_fct_norm 2.0 +1014 67 negative_sampler.num_negs_per_pos 92.0 +1014 67 training.batch_size 1.0 +1014 68 model.embedding_dim 1.0 +1014 68 model.scoring_fct_norm 2.0 +1014 68 negative_sampler.num_negs_per_pos 31.0 +1014 68 training.batch_size 2.0 +1014 69 model.embedding_dim 2.0 +1014 69 model.scoring_fct_norm 1.0 +1014 69 negative_sampler.num_negs_per_pos 70.0 +1014 69 training.batch_size 2.0 +1014 70 model.embedding_dim 2.0 +1014 70 model.scoring_fct_norm 1.0 +1014 70 negative_sampler.num_negs_per_pos 39.0 +1014 70 training.batch_size 0.0 +1014 71 model.embedding_dim 2.0 +1014 71 model.scoring_fct_norm 1.0 +1014 71 negative_sampler.num_negs_per_pos 50.0 +1014 71 training.batch_size 0.0 +1014 72 model.embedding_dim 2.0 +1014 72 model.scoring_fct_norm 2.0 +1014 72 negative_sampler.num_negs_per_pos 76.0 +1014 72 training.batch_size 0.0 +1014 73 model.embedding_dim 0.0 +1014 73 model.scoring_fct_norm 1.0 +1014 73 negative_sampler.num_negs_per_pos 81.0 +1014 73 training.batch_size 2.0 +1014 74 model.embedding_dim 0.0 +1014 74 model.scoring_fct_norm 2.0 +1014 74 negative_sampler.num_negs_per_pos 42.0 +1014 74 training.batch_size 1.0 +1014 75 model.embedding_dim 2.0 +1014 75 model.scoring_fct_norm 1.0 +1014 75 negative_sampler.num_negs_per_pos 13.0 +1014 75 training.batch_size 0.0 +1014 76 model.embedding_dim 1.0 +1014 76 model.scoring_fct_norm 1.0 +1014 76 negative_sampler.num_negs_per_pos 77.0 +1014 76 training.batch_size 0.0 +1014 77 model.embedding_dim 1.0 +1014 77 model.scoring_fct_norm 2.0 +1014 77 negative_sampler.num_negs_per_pos 4.0 +1014 77 training.batch_size 2.0 +1014 78 model.embedding_dim 2.0 +1014 78 model.scoring_fct_norm 1.0 +1014 78 negative_sampler.num_negs_per_pos 82.0 +1014 78 training.batch_size 1.0 +1014 79 model.embedding_dim 2.0 +1014 79 model.scoring_fct_norm 1.0 +1014 79 negative_sampler.num_negs_per_pos 52.0 +1014 79 training.batch_size 1.0 +1014 80 model.embedding_dim 2.0 +1014 80 model.scoring_fct_norm 2.0 +1014 80 negative_sampler.num_negs_per_pos 6.0 +1014 80 training.batch_size 1.0 +1014 81 model.embedding_dim 0.0 +1014 81 model.scoring_fct_norm 2.0 +1014 81 negative_sampler.num_negs_per_pos 12.0 +1014 81 training.batch_size 2.0 +1014 82 model.embedding_dim 0.0 +1014 82 model.scoring_fct_norm 1.0 +1014 82 negative_sampler.num_negs_per_pos 38.0 +1014 82 training.batch_size 2.0 +1014 83 model.embedding_dim 2.0 +1014 83 model.scoring_fct_norm 2.0 +1014 83 negative_sampler.num_negs_per_pos 16.0 +1014 83 training.batch_size 2.0 +1014 84 model.embedding_dim 2.0 +1014 84 model.scoring_fct_norm 2.0 +1014 84 negative_sampler.num_negs_per_pos 93.0 +1014 84 training.batch_size 2.0 +1014 85 model.embedding_dim 0.0 +1014 85 model.scoring_fct_norm 1.0 +1014 85 negative_sampler.num_negs_per_pos 35.0 +1014 85 training.batch_size 2.0 +1014 86 model.embedding_dim 0.0 +1014 86 model.scoring_fct_norm 2.0 +1014 86 negative_sampler.num_negs_per_pos 69.0 +1014 86 training.batch_size 0.0 +1014 87 model.embedding_dim 1.0 +1014 87 model.scoring_fct_norm 1.0 +1014 87 negative_sampler.num_negs_per_pos 14.0 +1014 87 training.batch_size 1.0 +1014 88 model.embedding_dim 1.0 +1014 88 model.scoring_fct_norm 2.0 +1014 88 negative_sampler.num_negs_per_pos 40.0 +1014 88 training.batch_size 1.0 +1014 89 model.embedding_dim 1.0 +1014 89 model.scoring_fct_norm 1.0 +1014 89 negative_sampler.num_negs_per_pos 32.0 +1014 89 training.batch_size 0.0 +1014 90 model.embedding_dim 2.0 +1014 90 model.scoring_fct_norm 1.0 +1014 90 negative_sampler.num_negs_per_pos 42.0 +1014 90 training.batch_size 0.0 +1014 91 model.embedding_dim 2.0 +1014 91 model.scoring_fct_norm 1.0 +1014 91 negative_sampler.num_negs_per_pos 31.0 +1014 91 training.batch_size 2.0 +1014 92 model.embedding_dim 1.0 +1014 92 model.scoring_fct_norm 2.0 +1014 92 negative_sampler.num_negs_per_pos 18.0 +1014 92 training.batch_size 2.0 +1014 93 model.embedding_dim 0.0 +1014 93 model.scoring_fct_norm 1.0 +1014 93 negative_sampler.num_negs_per_pos 24.0 +1014 93 training.batch_size 2.0 +1014 94 model.embedding_dim 2.0 +1014 94 model.scoring_fct_norm 1.0 +1014 94 negative_sampler.num_negs_per_pos 73.0 +1014 94 training.batch_size 0.0 +1014 95 model.embedding_dim 0.0 +1014 95 model.scoring_fct_norm 1.0 +1014 95 negative_sampler.num_negs_per_pos 55.0 +1014 95 training.batch_size 1.0 +1014 96 model.embedding_dim 2.0 +1014 96 model.scoring_fct_norm 2.0 +1014 96 negative_sampler.num_negs_per_pos 57.0 +1014 96 training.batch_size 2.0 +1014 97 model.embedding_dim 2.0 +1014 97 model.scoring_fct_norm 1.0 +1014 97 negative_sampler.num_negs_per_pos 28.0 +1014 97 training.batch_size 2.0 +1014 98 model.embedding_dim 1.0 +1014 98 model.scoring_fct_norm 2.0 +1014 98 negative_sampler.num_negs_per_pos 60.0 +1014 98 training.batch_size 0.0 +1014 99 model.embedding_dim 0.0 +1014 99 model.scoring_fct_norm 1.0 +1014 99 negative_sampler.num_negs_per_pos 36.0 +1014 99 training.batch_size 0.0 +1014 100 model.embedding_dim 0.0 +1014 100 model.scoring_fct_norm 1.0 +1014 100 negative_sampler.num_negs_per_pos 60.0 +1014 100 training.batch_size 2.0 +1014 1 dataset """kinships""" +1014 1 model """unstructuredmodel""" +1014 1 loss """bceaftersigmoid""" +1014 1 regularizer """no""" +1014 1 optimizer """adadelta""" +1014 1 training_loop """owa""" +1014 1 negative_sampler """basic""" +1014 1 evaluator """rankbased""" +1014 2 dataset """kinships""" +1014 2 model """unstructuredmodel""" +1014 2 loss """bceaftersigmoid""" +1014 2 regularizer """no""" +1014 2 optimizer """adadelta""" +1014 2 training_loop """owa""" +1014 2 negative_sampler """basic""" +1014 2 evaluator """rankbased""" +1014 3 dataset """kinships""" +1014 3 model """unstructuredmodel""" +1014 3 loss """bceaftersigmoid""" +1014 3 regularizer """no""" +1014 3 optimizer """adadelta""" +1014 3 training_loop """owa""" +1014 3 negative_sampler """basic""" +1014 3 evaluator """rankbased""" +1014 4 dataset """kinships""" +1014 4 model """unstructuredmodel""" +1014 4 loss """bceaftersigmoid""" +1014 4 regularizer """no""" +1014 4 optimizer """adadelta""" +1014 4 training_loop """owa""" +1014 4 negative_sampler """basic""" +1014 4 evaluator """rankbased""" +1014 5 dataset """kinships""" +1014 5 model """unstructuredmodel""" +1014 5 loss """bceaftersigmoid""" +1014 5 regularizer """no""" +1014 5 optimizer """adadelta""" +1014 5 training_loop """owa""" +1014 5 negative_sampler """basic""" +1014 5 evaluator """rankbased""" +1014 6 dataset """kinships""" +1014 6 model """unstructuredmodel""" +1014 6 loss """bceaftersigmoid""" +1014 6 regularizer """no""" +1014 6 optimizer """adadelta""" +1014 6 training_loop """owa""" +1014 6 negative_sampler """basic""" +1014 6 evaluator """rankbased""" +1014 7 dataset """kinships""" +1014 7 model """unstructuredmodel""" +1014 7 loss """bceaftersigmoid""" +1014 7 regularizer """no""" +1014 7 optimizer """adadelta""" +1014 7 training_loop """owa""" +1014 7 negative_sampler """basic""" +1014 7 evaluator """rankbased""" +1014 8 dataset """kinships""" +1014 8 model """unstructuredmodel""" +1014 8 loss """bceaftersigmoid""" +1014 8 regularizer """no""" +1014 8 optimizer """adadelta""" +1014 8 training_loop """owa""" +1014 8 negative_sampler """basic""" +1014 8 evaluator """rankbased""" +1014 9 dataset """kinships""" +1014 9 model """unstructuredmodel""" +1014 9 loss """bceaftersigmoid""" +1014 9 regularizer """no""" +1014 9 optimizer """adadelta""" +1014 9 training_loop """owa""" +1014 9 negative_sampler """basic""" +1014 9 evaluator """rankbased""" +1014 10 dataset """kinships""" +1014 10 model """unstructuredmodel""" +1014 10 loss """bceaftersigmoid""" +1014 10 regularizer """no""" +1014 10 optimizer """adadelta""" +1014 10 training_loop """owa""" +1014 10 negative_sampler """basic""" +1014 10 evaluator """rankbased""" +1014 11 dataset """kinships""" +1014 11 model """unstructuredmodel""" +1014 11 loss """bceaftersigmoid""" +1014 11 regularizer """no""" +1014 11 optimizer """adadelta""" +1014 11 training_loop """owa""" +1014 11 negative_sampler """basic""" +1014 11 evaluator """rankbased""" +1014 12 dataset """kinships""" +1014 12 model """unstructuredmodel""" +1014 12 loss """bceaftersigmoid""" +1014 12 regularizer """no""" +1014 12 optimizer """adadelta""" +1014 12 training_loop """owa""" +1014 12 negative_sampler """basic""" +1014 12 evaluator """rankbased""" +1014 13 dataset """kinships""" +1014 13 model """unstructuredmodel""" +1014 13 loss """bceaftersigmoid""" +1014 13 regularizer """no""" +1014 13 optimizer """adadelta""" +1014 13 training_loop """owa""" +1014 13 negative_sampler """basic""" +1014 13 evaluator """rankbased""" +1014 14 dataset """kinships""" +1014 14 model """unstructuredmodel""" +1014 14 loss """bceaftersigmoid""" +1014 14 regularizer """no""" +1014 14 optimizer """adadelta""" +1014 14 training_loop """owa""" +1014 14 negative_sampler """basic""" +1014 14 evaluator """rankbased""" +1014 15 dataset """kinships""" +1014 15 model """unstructuredmodel""" +1014 15 loss """bceaftersigmoid""" +1014 15 regularizer """no""" +1014 15 optimizer """adadelta""" +1014 15 training_loop """owa""" +1014 15 negative_sampler """basic""" +1014 15 evaluator """rankbased""" +1014 16 dataset """kinships""" +1014 16 model """unstructuredmodel""" +1014 16 loss """bceaftersigmoid""" +1014 16 regularizer """no""" +1014 16 optimizer """adadelta""" +1014 16 training_loop """owa""" +1014 16 negative_sampler """basic""" +1014 16 evaluator """rankbased""" +1014 17 dataset """kinships""" +1014 17 model """unstructuredmodel""" +1014 17 loss """bceaftersigmoid""" +1014 17 regularizer """no""" +1014 17 optimizer """adadelta""" +1014 17 training_loop """owa""" +1014 17 negative_sampler """basic""" +1014 17 evaluator """rankbased""" +1014 18 dataset """kinships""" +1014 18 model """unstructuredmodel""" +1014 18 loss """bceaftersigmoid""" +1014 18 regularizer """no""" +1014 18 optimizer """adadelta""" +1014 18 training_loop """owa""" +1014 18 negative_sampler """basic""" +1014 18 evaluator """rankbased""" +1014 19 dataset """kinships""" +1014 19 model """unstructuredmodel""" +1014 19 loss """bceaftersigmoid""" +1014 19 regularizer """no""" +1014 19 optimizer """adadelta""" +1014 19 training_loop """owa""" +1014 19 negative_sampler """basic""" +1014 19 evaluator """rankbased""" +1014 20 dataset """kinships""" +1014 20 model """unstructuredmodel""" +1014 20 loss """bceaftersigmoid""" +1014 20 regularizer """no""" +1014 20 optimizer """adadelta""" +1014 20 training_loop """owa""" +1014 20 negative_sampler """basic""" +1014 20 evaluator """rankbased""" +1014 21 dataset """kinships""" +1014 21 model """unstructuredmodel""" +1014 21 loss """bceaftersigmoid""" +1014 21 regularizer """no""" +1014 21 optimizer """adadelta""" +1014 21 training_loop """owa""" +1014 21 negative_sampler """basic""" +1014 21 evaluator """rankbased""" +1014 22 dataset """kinships""" +1014 22 model """unstructuredmodel""" +1014 22 loss """bceaftersigmoid""" +1014 22 regularizer """no""" +1014 22 optimizer """adadelta""" +1014 22 training_loop """owa""" +1014 22 negative_sampler """basic""" +1014 22 evaluator """rankbased""" +1014 23 dataset """kinships""" +1014 23 model """unstructuredmodel""" +1014 23 loss """bceaftersigmoid""" +1014 23 regularizer """no""" +1014 23 optimizer """adadelta""" +1014 23 training_loop """owa""" +1014 23 negative_sampler """basic""" +1014 23 evaluator """rankbased""" +1014 24 dataset """kinships""" +1014 24 model """unstructuredmodel""" +1014 24 loss """bceaftersigmoid""" +1014 24 regularizer """no""" +1014 24 optimizer """adadelta""" +1014 24 training_loop """owa""" +1014 24 negative_sampler """basic""" +1014 24 evaluator """rankbased""" +1014 25 dataset """kinships""" +1014 25 model """unstructuredmodel""" +1014 25 loss """bceaftersigmoid""" +1014 25 regularizer """no""" +1014 25 optimizer """adadelta""" +1014 25 training_loop """owa""" +1014 25 negative_sampler """basic""" +1014 25 evaluator """rankbased""" +1014 26 dataset """kinships""" +1014 26 model """unstructuredmodel""" +1014 26 loss """bceaftersigmoid""" +1014 26 regularizer """no""" +1014 26 optimizer """adadelta""" +1014 26 training_loop """owa""" +1014 26 negative_sampler """basic""" +1014 26 evaluator """rankbased""" +1014 27 dataset """kinships""" +1014 27 model """unstructuredmodel""" +1014 27 loss """bceaftersigmoid""" +1014 27 regularizer """no""" +1014 27 optimizer """adadelta""" +1014 27 training_loop """owa""" +1014 27 negative_sampler """basic""" +1014 27 evaluator """rankbased""" +1014 28 dataset """kinships""" +1014 28 model """unstructuredmodel""" +1014 28 loss """bceaftersigmoid""" +1014 28 regularizer """no""" +1014 28 optimizer """adadelta""" +1014 28 training_loop """owa""" +1014 28 negative_sampler """basic""" +1014 28 evaluator """rankbased""" +1014 29 dataset """kinships""" +1014 29 model """unstructuredmodel""" +1014 29 loss """bceaftersigmoid""" +1014 29 regularizer """no""" +1014 29 optimizer """adadelta""" +1014 29 training_loop """owa""" +1014 29 negative_sampler """basic""" +1014 29 evaluator """rankbased""" +1014 30 dataset """kinships""" +1014 30 model """unstructuredmodel""" +1014 30 loss """bceaftersigmoid""" +1014 30 regularizer """no""" +1014 30 optimizer """adadelta""" +1014 30 training_loop """owa""" +1014 30 negative_sampler """basic""" +1014 30 evaluator """rankbased""" +1014 31 dataset """kinships""" +1014 31 model """unstructuredmodel""" +1014 31 loss """bceaftersigmoid""" +1014 31 regularizer """no""" +1014 31 optimizer """adadelta""" +1014 31 training_loop """owa""" +1014 31 negative_sampler """basic""" +1014 31 evaluator """rankbased""" +1014 32 dataset """kinships""" +1014 32 model """unstructuredmodel""" +1014 32 loss """bceaftersigmoid""" +1014 32 regularizer """no""" +1014 32 optimizer """adadelta""" +1014 32 training_loop """owa""" +1014 32 negative_sampler """basic""" +1014 32 evaluator """rankbased""" +1014 33 dataset """kinships""" +1014 33 model """unstructuredmodel""" +1014 33 loss """bceaftersigmoid""" +1014 33 regularizer """no""" +1014 33 optimizer """adadelta""" +1014 33 training_loop """owa""" +1014 33 negative_sampler """basic""" +1014 33 evaluator """rankbased""" +1014 34 dataset """kinships""" +1014 34 model """unstructuredmodel""" +1014 34 loss """bceaftersigmoid""" +1014 34 regularizer """no""" +1014 34 optimizer """adadelta""" +1014 34 training_loop """owa""" +1014 34 negative_sampler """basic""" +1014 34 evaluator """rankbased""" +1014 35 dataset """kinships""" +1014 35 model """unstructuredmodel""" +1014 35 loss """bceaftersigmoid""" +1014 35 regularizer """no""" +1014 35 optimizer """adadelta""" +1014 35 training_loop """owa""" +1014 35 negative_sampler """basic""" +1014 35 evaluator """rankbased""" +1014 36 dataset """kinships""" +1014 36 model """unstructuredmodel""" +1014 36 loss """bceaftersigmoid""" +1014 36 regularizer """no""" +1014 36 optimizer """adadelta""" +1014 36 training_loop """owa""" +1014 36 negative_sampler """basic""" +1014 36 evaluator """rankbased""" +1014 37 dataset """kinships""" +1014 37 model """unstructuredmodel""" +1014 37 loss """bceaftersigmoid""" +1014 37 regularizer """no""" +1014 37 optimizer """adadelta""" +1014 37 training_loop """owa""" +1014 37 negative_sampler """basic""" +1014 37 evaluator """rankbased""" +1014 38 dataset """kinships""" +1014 38 model """unstructuredmodel""" +1014 38 loss """bceaftersigmoid""" +1014 38 regularizer """no""" +1014 38 optimizer """adadelta""" +1014 38 training_loop """owa""" +1014 38 negative_sampler """basic""" +1014 38 evaluator """rankbased""" +1014 39 dataset """kinships""" +1014 39 model """unstructuredmodel""" +1014 39 loss """bceaftersigmoid""" +1014 39 regularizer """no""" +1014 39 optimizer """adadelta""" +1014 39 training_loop """owa""" +1014 39 negative_sampler """basic""" +1014 39 evaluator """rankbased""" +1014 40 dataset """kinships""" +1014 40 model """unstructuredmodel""" +1014 40 loss """bceaftersigmoid""" +1014 40 regularizer """no""" +1014 40 optimizer """adadelta""" +1014 40 training_loop """owa""" +1014 40 negative_sampler """basic""" +1014 40 evaluator """rankbased""" +1014 41 dataset """kinships""" +1014 41 model """unstructuredmodel""" +1014 41 loss """bceaftersigmoid""" +1014 41 regularizer """no""" +1014 41 optimizer """adadelta""" +1014 41 training_loop """owa""" +1014 41 negative_sampler """basic""" +1014 41 evaluator """rankbased""" +1014 42 dataset """kinships""" +1014 42 model """unstructuredmodel""" +1014 42 loss """bceaftersigmoid""" +1014 42 regularizer """no""" +1014 42 optimizer """adadelta""" +1014 42 training_loop """owa""" +1014 42 negative_sampler """basic""" +1014 42 evaluator """rankbased""" +1014 43 dataset """kinships""" +1014 43 model """unstructuredmodel""" +1014 43 loss """bceaftersigmoid""" +1014 43 regularizer """no""" +1014 43 optimizer """adadelta""" +1014 43 training_loop """owa""" +1014 43 negative_sampler """basic""" +1014 43 evaluator """rankbased""" +1014 44 dataset """kinships""" +1014 44 model """unstructuredmodel""" +1014 44 loss """bceaftersigmoid""" +1014 44 regularizer """no""" +1014 44 optimizer """adadelta""" +1014 44 training_loop """owa""" +1014 44 negative_sampler """basic""" +1014 44 evaluator """rankbased""" +1014 45 dataset """kinships""" +1014 45 model """unstructuredmodel""" +1014 45 loss """bceaftersigmoid""" +1014 45 regularizer """no""" +1014 45 optimizer """adadelta""" +1014 45 training_loop """owa""" +1014 45 negative_sampler """basic""" +1014 45 evaluator """rankbased""" +1014 46 dataset """kinships""" +1014 46 model """unstructuredmodel""" +1014 46 loss """bceaftersigmoid""" +1014 46 regularizer """no""" +1014 46 optimizer """adadelta""" +1014 46 training_loop """owa""" +1014 46 negative_sampler """basic""" +1014 46 evaluator """rankbased""" +1014 47 dataset """kinships""" +1014 47 model """unstructuredmodel""" +1014 47 loss """bceaftersigmoid""" +1014 47 regularizer """no""" +1014 47 optimizer """adadelta""" +1014 47 training_loop """owa""" +1014 47 negative_sampler """basic""" +1014 47 evaluator """rankbased""" +1014 48 dataset """kinships""" +1014 48 model """unstructuredmodel""" +1014 48 loss """bceaftersigmoid""" +1014 48 regularizer """no""" +1014 48 optimizer """adadelta""" +1014 48 training_loop """owa""" +1014 48 negative_sampler """basic""" +1014 48 evaluator """rankbased""" +1014 49 dataset """kinships""" +1014 49 model """unstructuredmodel""" +1014 49 loss """bceaftersigmoid""" +1014 49 regularizer """no""" +1014 49 optimizer """adadelta""" +1014 49 training_loop """owa""" +1014 49 negative_sampler """basic""" +1014 49 evaluator """rankbased""" +1014 50 dataset """kinships""" +1014 50 model """unstructuredmodel""" +1014 50 loss """bceaftersigmoid""" +1014 50 regularizer """no""" +1014 50 optimizer """adadelta""" +1014 50 training_loop """owa""" +1014 50 negative_sampler """basic""" +1014 50 evaluator """rankbased""" +1014 51 dataset """kinships""" +1014 51 model """unstructuredmodel""" +1014 51 loss """bceaftersigmoid""" +1014 51 regularizer """no""" +1014 51 optimizer """adadelta""" +1014 51 training_loop """owa""" +1014 51 negative_sampler """basic""" +1014 51 evaluator """rankbased""" +1014 52 dataset """kinships""" +1014 52 model """unstructuredmodel""" +1014 52 loss """bceaftersigmoid""" +1014 52 regularizer """no""" +1014 52 optimizer """adadelta""" +1014 52 training_loop """owa""" +1014 52 negative_sampler """basic""" +1014 52 evaluator """rankbased""" +1014 53 dataset """kinships""" +1014 53 model """unstructuredmodel""" +1014 53 loss """bceaftersigmoid""" +1014 53 regularizer """no""" +1014 53 optimizer """adadelta""" +1014 53 training_loop """owa""" +1014 53 negative_sampler """basic""" +1014 53 evaluator """rankbased""" +1014 54 dataset """kinships""" +1014 54 model """unstructuredmodel""" +1014 54 loss """bceaftersigmoid""" +1014 54 regularizer """no""" +1014 54 optimizer """adadelta""" +1014 54 training_loop """owa""" +1014 54 negative_sampler """basic""" +1014 54 evaluator """rankbased""" +1014 55 dataset """kinships""" +1014 55 model """unstructuredmodel""" +1014 55 loss """bceaftersigmoid""" +1014 55 regularizer """no""" +1014 55 optimizer """adadelta""" +1014 55 training_loop """owa""" +1014 55 negative_sampler """basic""" +1014 55 evaluator """rankbased""" +1014 56 dataset """kinships""" +1014 56 model """unstructuredmodel""" +1014 56 loss """bceaftersigmoid""" +1014 56 regularizer """no""" +1014 56 optimizer """adadelta""" +1014 56 training_loop """owa""" +1014 56 negative_sampler """basic""" +1014 56 evaluator """rankbased""" +1014 57 dataset """kinships""" +1014 57 model """unstructuredmodel""" +1014 57 loss """bceaftersigmoid""" +1014 57 regularizer """no""" +1014 57 optimizer """adadelta""" +1014 57 training_loop """owa""" +1014 57 negative_sampler """basic""" +1014 57 evaluator """rankbased""" +1014 58 dataset """kinships""" +1014 58 model """unstructuredmodel""" +1014 58 loss """bceaftersigmoid""" +1014 58 regularizer """no""" +1014 58 optimizer """adadelta""" +1014 58 training_loop """owa""" +1014 58 negative_sampler """basic""" +1014 58 evaluator """rankbased""" +1014 59 dataset """kinships""" +1014 59 model """unstructuredmodel""" +1014 59 loss """bceaftersigmoid""" +1014 59 regularizer """no""" +1014 59 optimizer """adadelta""" +1014 59 training_loop """owa""" +1014 59 negative_sampler """basic""" +1014 59 evaluator """rankbased""" +1014 60 dataset """kinships""" +1014 60 model """unstructuredmodel""" +1014 60 loss """bceaftersigmoid""" +1014 60 regularizer """no""" +1014 60 optimizer """adadelta""" +1014 60 training_loop """owa""" +1014 60 negative_sampler """basic""" +1014 60 evaluator """rankbased""" +1014 61 dataset """kinships""" +1014 61 model """unstructuredmodel""" +1014 61 loss """bceaftersigmoid""" +1014 61 regularizer """no""" +1014 61 optimizer """adadelta""" +1014 61 training_loop """owa""" +1014 61 negative_sampler """basic""" +1014 61 evaluator """rankbased""" +1014 62 dataset """kinships""" +1014 62 model """unstructuredmodel""" +1014 62 loss """bceaftersigmoid""" +1014 62 regularizer """no""" +1014 62 optimizer """adadelta""" +1014 62 training_loop """owa""" +1014 62 negative_sampler """basic""" +1014 62 evaluator """rankbased""" +1014 63 dataset """kinships""" +1014 63 model """unstructuredmodel""" +1014 63 loss """bceaftersigmoid""" +1014 63 regularizer """no""" +1014 63 optimizer """adadelta""" +1014 63 training_loop """owa""" +1014 63 negative_sampler """basic""" +1014 63 evaluator """rankbased""" +1014 64 dataset """kinships""" +1014 64 model """unstructuredmodel""" +1014 64 loss """bceaftersigmoid""" +1014 64 regularizer """no""" +1014 64 optimizer """adadelta""" +1014 64 training_loop """owa""" +1014 64 negative_sampler """basic""" +1014 64 evaluator """rankbased""" +1014 65 dataset """kinships""" +1014 65 model """unstructuredmodel""" +1014 65 loss """bceaftersigmoid""" +1014 65 regularizer """no""" +1014 65 optimizer """adadelta""" +1014 65 training_loop """owa""" +1014 65 negative_sampler """basic""" +1014 65 evaluator """rankbased""" +1014 66 dataset """kinships""" +1014 66 model """unstructuredmodel""" +1014 66 loss """bceaftersigmoid""" +1014 66 regularizer """no""" +1014 66 optimizer """adadelta""" +1014 66 training_loop """owa""" +1014 66 negative_sampler """basic""" +1014 66 evaluator """rankbased""" +1014 67 dataset """kinships""" +1014 67 model """unstructuredmodel""" +1014 67 loss """bceaftersigmoid""" +1014 67 regularizer """no""" +1014 67 optimizer """adadelta""" +1014 67 training_loop """owa""" +1014 67 negative_sampler """basic""" +1014 67 evaluator """rankbased""" +1014 68 dataset """kinships""" +1014 68 model """unstructuredmodel""" +1014 68 loss """bceaftersigmoid""" +1014 68 regularizer """no""" +1014 68 optimizer """adadelta""" +1014 68 training_loop """owa""" +1014 68 negative_sampler """basic""" +1014 68 evaluator """rankbased""" +1014 69 dataset """kinships""" +1014 69 model """unstructuredmodel""" +1014 69 loss """bceaftersigmoid""" +1014 69 regularizer """no""" +1014 69 optimizer """adadelta""" +1014 69 training_loop """owa""" +1014 69 negative_sampler """basic""" +1014 69 evaluator """rankbased""" +1014 70 dataset """kinships""" +1014 70 model """unstructuredmodel""" +1014 70 loss """bceaftersigmoid""" +1014 70 regularizer """no""" +1014 70 optimizer """adadelta""" +1014 70 training_loop """owa""" +1014 70 negative_sampler """basic""" +1014 70 evaluator """rankbased""" +1014 71 dataset """kinships""" +1014 71 model """unstructuredmodel""" +1014 71 loss """bceaftersigmoid""" +1014 71 regularizer """no""" +1014 71 optimizer """adadelta""" +1014 71 training_loop """owa""" +1014 71 negative_sampler """basic""" +1014 71 evaluator """rankbased""" +1014 72 dataset """kinships""" +1014 72 model """unstructuredmodel""" +1014 72 loss """bceaftersigmoid""" +1014 72 regularizer """no""" +1014 72 optimizer """adadelta""" +1014 72 training_loop """owa""" +1014 72 negative_sampler """basic""" +1014 72 evaluator """rankbased""" +1014 73 dataset """kinships""" +1014 73 model """unstructuredmodel""" +1014 73 loss """bceaftersigmoid""" +1014 73 regularizer """no""" +1014 73 optimizer """adadelta""" +1014 73 training_loop """owa""" +1014 73 negative_sampler """basic""" +1014 73 evaluator """rankbased""" +1014 74 dataset """kinships""" +1014 74 model """unstructuredmodel""" +1014 74 loss """bceaftersigmoid""" +1014 74 regularizer """no""" +1014 74 optimizer """adadelta""" +1014 74 training_loop """owa""" +1014 74 negative_sampler """basic""" +1014 74 evaluator """rankbased""" +1014 75 dataset """kinships""" +1014 75 model """unstructuredmodel""" +1014 75 loss """bceaftersigmoid""" +1014 75 regularizer """no""" +1014 75 optimizer """adadelta""" +1014 75 training_loop """owa""" +1014 75 negative_sampler """basic""" +1014 75 evaluator """rankbased""" +1014 76 dataset """kinships""" +1014 76 model """unstructuredmodel""" +1014 76 loss """bceaftersigmoid""" +1014 76 regularizer """no""" +1014 76 optimizer """adadelta""" +1014 76 training_loop """owa""" +1014 76 negative_sampler """basic""" +1014 76 evaluator """rankbased""" +1014 77 dataset """kinships""" +1014 77 model """unstructuredmodel""" +1014 77 loss """bceaftersigmoid""" +1014 77 regularizer """no""" +1014 77 optimizer """adadelta""" +1014 77 training_loop """owa""" +1014 77 negative_sampler """basic""" +1014 77 evaluator """rankbased""" +1014 78 dataset """kinships""" +1014 78 model """unstructuredmodel""" +1014 78 loss """bceaftersigmoid""" +1014 78 regularizer """no""" +1014 78 optimizer """adadelta""" +1014 78 training_loop """owa""" +1014 78 negative_sampler """basic""" +1014 78 evaluator """rankbased""" +1014 79 dataset """kinships""" +1014 79 model """unstructuredmodel""" +1014 79 loss """bceaftersigmoid""" +1014 79 regularizer """no""" +1014 79 optimizer """adadelta""" +1014 79 training_loop """owa""" +1014 79 negative_sampler """basic""" +1014 79 evaluator """rankbased""" +1014 80 dataset """kinships""" +1014 80 model """unstructuredmodel""" +1014 80 loss """bceaftersigmoid""" +1014 80 regularizer """no""" +1014 80 optimizer """adadelta""" +1014 80 training_loop """owa""" +1014 80 negative_sampler """basic""" +1014 80 evaluator """rankbased""" +1014 81 dataset """kinships""" +1014 81 model """unstructuredmodel""" +1014 81 loss """bceaftersigmoid""" +1014 81 regularizer """no""" +1014 81 optimizer """adadelta""" +1014 81 training_loop """owa""" +1014 81 negative_sampler """basic""" +1014 81 evaluator """rankbased""" +1014 82 dataset """kinships""" +1014 82 model """unstructuredmodel""" +1014 82 loss """bceaftersigmoid""" +1014 82 regularizer """no""" +1014 82 optimizer """adadelta""" +1014 82 training_loop """owa""" +1014 82 negative_sampler """basic""" +1014 82 evaluator """rankbased""" +1014 83 dataset """kinships""" +1014 83 model """unstructuredmodel""" +1014 83 loss """bceaftersigmoid""" +1014 83 regularizer """no""" +1014 83 optimizer """adadelta""" +1014 83 training_loop """owa""" +1014 83 negative_sampler """basic""" +1014 83 evaluator """rankbased""" +1014 84 dataset """kinships""" +1014 84 model """unstructuredmodel""" +1014 84 loss """bceaftersigmoid""" +1014 84 regularizer """no""" +1014 84 optimizer """adadelta""" +1014 84 training_loop """owa""" +1014 84 negative_sampler """basic""" +1014 84 evaluator """rankbased""" +1014 85 dataset """kinships""" +1014 85 model """unstructuredmodel""" +1014 85 loss """bceaftersigmoid""" +1014 85 regularizer """no""" +1014 85 optimizer """adadelta""" +1014 85 training_loop """owa""" +1014 85 negative_sampler """basic""" +1014 85 evaluator """rankbased""" +1014 86 dataset """kinships""" +1014 86 model """unstructuredmodel""" +1014 86 loss """bceaftersigmoid""" +1014 86 regularizer """no""" +1014 86 optimizer """adadelta""" +1014 86 training_loop """owa""" +1014 86 negative_sampler """basic""" +1014 86 evaluator """rankbased""" +1014 87 dataset """kinships""" +1014 87 model """unstructuredmodel""" +1014 87 loss """bceaftersigmoid""" +1014 87 regularizer """no""" +1014 87 optimizer """adadelta""" +1014 87 training_loop """owa""" +1014 87 negative_sampler """basic""" +1014 87 evaluator """rankbased""" +1014 88 dataset """kinships""" +1014 88 model """unstructuredmodel""" +1014 88 loss """bceaftersigmoid""" +1014 88 regularizer """no""" +1014 88 optimizer """adadelta""" +1014 88 training_loop """owa""" +1014 88 negative_sampler """basic""" +1014 88 evaluator """rankbased""" +1014 89 dataset """kinships""" +1014 89 model """unstructuredmodel""" +1014 89 loss """bceaftersigmoid""" +1014 89 regularizer """no""" +1014 89 optimizer """adadelta""" +1014 89 training_loop """owa""" +1014 89 negative_sampler """basic""" +1014 89 evaluator """rankbased""" +1014 90 dataset """kinships""" +1014 90 model """unstructuredmodel""" +1014 90 loss """bceaftersigmoid""" +1014 90 regularizer """no""" +1014 90 optimizer """adadelta""" +1014 90 training_loop """owa""" +1014 90 negative_sampler """basic""" +1014 90 evaluator """rankbased""" +1014 91 dataset """kinships""" +1014 91 model """unstructuredmodel""" +1014 91 loss """bceaftersigmoid""" +1014 91 regularizer """no""" +1014 91 optimizer """adadelta""" +1014 91 training_loop """owa""" +1014 91 negative_sampler """basic""" +1014 91 evaluator """rankbased""" +1014 92 dataset """kinships""" +1014 92 model """unstructuredmodel""" +1014 92 loss """bceaftersigmoid""" +1014 92 regularizer """no""" +1014 92 optimizer """adadelta""" +1014 92 training_loop """owa""" +1014 92 negative_sampler """basic""" +1014 92 evaluator """rankbased""" +1014 93 dataset """kinships""" +1014 93 model """unstructuredmodel""" +1014 93 loss """bceaftersigmoid""" +1014 93 regularizer """no""" +1014 93 optimizer """adadelta""" +1014 93 training_loop """owa""" +1014 93 negative_sampler """basic""" +1014 93 evaluator """rankbased""" +1014 94 dataset """kinships""" +1014 94 model """unstructuredmodel""" +1014 94 loss """bceaftersigmoid""" +1014 94 regularizer """no""" +1014 94 optimizer """adadelta""" +1014 94 training_loop """owa""" +1014 94 negative_sampler """basic""" +1014 94 evaluator """rankbased""" +1014 95 dataset """kinships""" +1014 95 model """unstructuredmodel""" +1014 95 loss """bceaftersigmoid""" +1014 95 regularizer """no""" +1014 95 optimizer """adadelta""" +1014 95 training_loop """owa""" +1014 95 negative_sampler """basic""" +1014 95 evaluator """rankbased""" +1014 96 dataset """kinships""" +1014 96 model """unstructuredmodel""" +1014 96 loss """bceaftersigmoid""" +1014 96 regularizer """no""" +1014 96 optimizer """adadelta""" +1014 96 training_loop """owa""" +1014 96 negative_sampler """basic""" +1014 96 evaluator """rankbased""" +1014 97 dataset """kinships""" +1014 97 model """unstructuredmodel""" +1014 97 loss """bceaftersigmoid""" +1014 97 regularizer """no""" +1014 97 optimizer """adadelta""" +1014 97 training_loop """owa""" +1014 97 negative_sampler """basic""" +1014 97 evaluator """rankbased""" +1014 98 dataset """kinships""" +1014 98 model """unstructuredmodel""" +1014 98 loss """bceaftersigmoid""" +1014 98 regularizer """no""" +1014 98 optimizer """adadelta""" +1014 98 training_loop """owa""" +1014 98 negative_sampler """basic""" +1014 98 evaluator """rankbased""" +1014 99 dataset """kinships""" +1014 99 model """unstructuredmodel""" +1014 99 loss """bceaftersigmoid""" +1014 99 regularizer """no""" +1014 99 optimizer """adadelta""" +1014 99 training_loop """owa""" +1014 99 negative_sampler """basic""" +1014 99 evaluator """rankbased""" +1014 100 dataset """kinships""" +1014 100 model """unstructuredmodel""" +1014 100 loss """bceaftersigmoid""" +1014 100 regularizer """no""" +1014 100 optimizer """adadelta""" +1014 100 training_loop """owa""" +1014 100 negative_sampler """basic""" +1014 100 evaluator """rankbased""" +1015 1 model.embedding_dim 2.0 +1015 1 model.scoring_fct_norm 2.0 +1015 1 negative_sampler.num_negs_per_pos 86.0 +1015 1 training.batch_size 0.0 +1015 2 model.embedding_dim 1.0 +1015 2 model.scoring_fct_norm 2.0 +1015 2 negative_sampler.num_negs_per_pos 56.0 +1015 2 training.batch_size 0.0 +1015 3 model.embedding_dim 0.0 +1015 3 model.scoring_fct_norm 2.0 +1015 3 negative_sampler.num_negs_per_pos 57.0 +1015 3 training.batch_size 1.0 +1015 4 model.embedding_dim 0.0 +1015 4 model.scoring_fct_norm 2.0 +1015 4 negative_sampler.num_negs_per_pos 24.0 +1015 4 training.batch_size 1.0 +1015 5 model.embedding_dim 1.0 +1015 5 model.scoring_fct_norm 2.0 +1015 5 negative_sampler.num_negs_per_pos 47.0 +1015 5 training.batch_size 2.0 +1015 6 model.embedding_dim 1.0 +1015 6 model.scoring_fct_norm 2.0 +1015 6 negative_sampler.num_negs_per_pos 27.0 +1015 6 training.batch_size 1.0 +1015 7 model.embedding_dim 1.0 +1015 7 model.scoring_fct_norm 2.0 +1015 7 negative_sampler.num_negs_per_pos 76.0 +1015 7 training.batch_size 1.0 +1015 8 model.embedding_dim 1.0 +1015 8 model.scoring_fct_norm 1.0 +1015 8 negative_sampler.num_negs_per_pos 94.0 +1015 8 training.batch_size 0.0 +1015 9 model.embedding_dim 1.0 +1015 9 model.scoring_fct_norm 1.0 +1015 9 negative_sampler.num_negs_per_pos 91.0 +1015 9 training.batch_size 1.0 +1015 10 model.embedding_dim 2.0 +1015 10 model.scoring_fct_norm 2.0 +1015 10 negative_sampler.num_negs_per_pos 68.0 +1015 10 training.batch_size 1.0 +1015 11 model.embedding_dim 2.0 +1015 11 model.scoring_fct_norm 2.0 +1015 11 negative_sampler.num_negs_per_pos 82.0 +1015 11 training.batch_size 1.0 +1015 12 model.embedding_dim 2.0 +1015 12 model.scoring_fct_norm 2.0 +1015 12 negative_sampler.num_negs_per_pos 53.0 +1015 12 training.batch_size 2.0 +1015 13 model.embedding_dim 2.0 +1015 13 model.scoring_fct_norm 2.0 +1015 13 negative_sampler.num_negs_per_pos 64.0 +1015 13 training.batch_size 1.0 +1015 14 model.embedding_dim 2.0 +1015 14 model.scoring_fct_norm 2.0 +1015 14 negative_sampler.num_negs_per_pos 35.0 +1015 14 training.batch_size 0.0 +1015 15 model.embedding_dim 0.0 +1015 15 model.scoring_fct_norm 1.0 +1015 15 negative_sampler.num_negs_per_pos 31.0 +1015 15 training.batch_size 2.0 +1015 16 model.embedding_dim 0.0 +1015 16 model.scoring_fct_norm 1.0 +1015 16 negative_sampler.num_negs_per_pos 10.0 +1015 16 training.batch_size 0.0 +1015 17 model.embedding_dim 0.0 +1015 17 model.scoring_fct_norm 1.0 +1015 17 negative_sampler.num_negs_per_pos 19.0 +1015 17 training.batch_size 0.0 +1015 18 model.embedding_dim 2.0 +1015 18 model.scoring_fct_norm 1.0 +1015 18 negative_sampler.num_negs_per_pos 99.0 +1015 18 training.batch_size 2.0 +1015 19 model.embedding_dim 2.0 +1015 19 model.scoring_fct_norm 1.0 +1015 19 negative_sampler.num_negs_per_pos 34.0 +1015 19 training.batch_size 0.0 +1015 20 model.embedding_dim 1.0 +1015 20 model.scoring_fct_norm 1.0 +1015 20 negative_sampler.num_negs_per_pos 1.0 +1015 20 training.batch_size 2.0 +1015 21 model.embedding_dim 0.0 +1015 21 model.scoring_fct_norm 2.0 +1015 21 negative_sampler.num_negs_per_pos 25.0 +1015 21 training.batch_size 0.0 +1015 22 model.embedding_dim 1.0 +1015 22 model.scoring_fct_norm 1.0 +1015 22 negative_sampler.num_negs_per_pos 91.0 +1015 22 training.batch_size 0.0 +1015 23 model.embedding_dim 0.0 +1015 23 model.scoring_fct_norm 2.0 +1015 23 negative_sampler.num_negs_per_pos 41.0 +1015 23 training.batch_size 2.0 +1015 24 model.embedding_dim 1.0 +1015 24 model.scoring_fct_norm 1.0 +1015 24 negative_sampler.num_negs_per_pos 14.0 +1015 24 training.batch_size 1.0 +1015 25 model.embedding_dim 2.0 +1015 25 model.scoring_fct_norm 1.0 +1015 25 negative_sampler.num_negs_per_pos 50.0 +1015 25 training.batch_size 0.0 +1015 26 model.embedding_dim 2.0 +1015 26 model.scoring_fct_norm 2.0 +1015 26 negative_sampler.num_negs_per_pos 73.0 +1015 26 training.batch_size 2.0 +1015 27 model.embedding_dim 1.0 +1015 27 model.scoring_fct_norm 2.0 +1015 27 negative_sampler.num_negs_per_pos 15.0 +1015 27 training.batch_size 0.0 +1015 28 model.embedding_dim 0.0 +1015 28 model.scoring_fct_norm 1.0 +1015 28 negative_sampler.num_negs_per_pos 25.0 +1015 28 training.batch_size 0.0 +1015 29 model.embedding_dim 2.0 +1015 29 model.scoring_fct_norm 1.0 +1015 29 negative_sampler.num_negs_per_pos 67.0 +1015 29 training.batch_size 2.0 +1015 30 model.embedding_dim 2.0 +1015 30 model.scoring_fct_norm 2.0 +1015 30 negative_sampler.num_negs_per_pos 2.0 +1015 30 training.batch_size 0.0 +1015 31 model.embedding_dim 0.0 +1015 31 model.scoring_fct_norm 2.0 +1015 31 negative_sampler.num_negs_per_pos 61.0 +1015 31 training.batch_size 1.0 +1015 32 model.embedding_dim 2.0 +1015 32 model.scoring_fct_norm 1.0 +1015 32 negative_sampler.num_negs_per_pos 66.0 +1015 32 training.batch_size 2.0 +1015 33 model.embedding_dim 0.0 +1015 33 model.scoring_fct_norm 1.0 +1015 33 negative_sampler.num_negs_per_pos 11.0 +1015 33 training.batch_size 1.0 +1015 34 model.embedding_dim 1.0 +1015 34 model.scoring_fct_norm 2.0 +1015 34 negative_sampler.num_negs_per_pos 74.0 +1015 34 training.batch_size 0.0 +1015 35 model.embedding_dim 2.0 +1015 35 model.scoring_fct_norm 1.0 +1015 35 negative_sampler.num_negs_per_pos 44.0 +1015 35 training.batch_size 1.0 +1015 36 model.embedding_dim 0.0 +1015 36 model.scoring_fct_norm 1.0 +1015 36 negative_sampler.num_negs_per_pos 89.0 +1015 36 training.batch_size 1.0 +1015 37 model.embedding_dim 0.0 +1015 37 model.scoring_fct_norm 2.0 +1015 37 negative_sampler.num_negs_per_pos 99.0 +1015 37 training.batch_size 0.0 +1015 38 model.embedding_dim 2.0 +1015 38 model.scoring_fct_norm 2.0 +1015 38 negative_sampler.num_negs_per_pos 10.0 +1015 38 training.batch_size 1.0 +1015 39 model.embedding_dim 1.0 +1015 39 model.scoring_fct_norm 1.0 +1015 39 negative_sampler.num_negs_per_pos 81.0 +1015 39 training.batch_size 2.0 +1015 40 model.embedding_dim 0.0 +1015 40 model.scoring_fct_norm 2.0 +1015 40 negative_sampler.num_negs_per_pos 63.0 +1015 40 training.batch_size 1.0 +1015 41 model.embedding_dim 1.0 +1015 41 model.scoring_fct_norm 1.0 +1015 41 negative_sampler.num_negs_per_pos 0.0 +1015 41 training.batch_size 0.0 +1015 42 model.embedding_dim 2.0 +1015 42 model.scoring_fct_norm 1.0 +1015 42 negative_sampler.num_negs_per_pos 82.0 +1015 42 training.batch_size 0.0 +1015 43 model.embedding_dim 1.0 +1015 43 model.scoring_fct_norm 2.0 +1015 43 negative_sampler.num_negs_per_pos 31.0 +1015 43 training.batch_size 1.0 +1015 44 model.embedding_dim 1.0 +1015 44 model.scoring_fct_norm 1.0 +1015 44 negative_sampler.num_negs_per_pos 99.0 +1015 44 training.batch_size 0.0 +1015 45 model.embedding_dim 0.0 +1015 45 model.scoring_fct_norm 1.0 +1015 45 negative_sampler.num_negs_per_pos 50.0 +1015 45 training.batch_size 0.0 +1015 46 model.embedding_dim 0.0 +1015 46 model.scoring_fct_norm 1.0 +1015 46 negative_sampler.num_negs_per_pos 42.0 +1015 46 training.batch_size 0.0 +1015 47 model.embedding_dim 2.0 +1015 47 model.scoring_fct_norm 2.0 +1015 47 negative_sampler.num_negs_per_pos 38.0 +1015 47 training.batch_size 2.0 +1015 48 model.embedding_dim 1.0 +1015 48 model.scoring_fct_norm 2.0 +1015 48 negative_sampler.num_negs_per_pos 14.0 +1015 48 training.batch_size 1.0 +1015 49 model.embedding_dim 2.0 +1015 49 model.scoring_fct_norm 1.0 +1015 49 negative_sampler.num_negs_per_pos 0.0 +1015 49 training.batch_size 1.0 +1015 50 model.embedding_dim 1.0 +1015 50 model.scoring_fct_norm 1.0 +1015 50 negative_sampler.num_negs_per_pos 62.0 +1015 50 training.batch_size 1.0 +1015 51 model.embedding_dim 2.0 +1015 51 model.scoring_fct_norm 1.0 +1015 51 negative_sampler.num_negs_per_pos 91.0 +1015 51 training.batch_size 0.0 +1015 52 model.embedding_dim 2.0 +1015 52 model.scoring_fct_norm 2.0 +1015 52 negative_sampler.num_negs_per_pos 95.0 +1015 52 training.batch_size 1.0 +1015 53 model.embedding_dim 0.0 +1015 53 model.scoring_fct_norm 2.0 +1015 53 negative_sampler.num_negs_per_pos 60.0 +1015 53 training.batch_size 0.0 +1015 54 model.embedding_dim 1.0 +1015 54 model.scoring_fct_norm 2.0 +1015 54 negative_sampler.num_negs_per_pos 56.0 +1015 54 training.batch_size 0.0 +1015 55 model.embedding_dim 0.0 +1015 55 model.scoring_fct_norm 2.0 +1015 55 negative_sampler.num_negs_per_pos 86.0 +1015 55 training.batch_size 2.0 +1015 56 model.embedding_dim 1.0 +1015 56 model.scoring_fct_norm 1.0 +1015 56 negative_sampler.num_negs_per_pos 19.0 +1015 56 training.batch_size 2.0 +1015 57 model.embedding_dim 2.0 +1015 57 model.scoring_fct_norm 2.0 +1015 57 negative_sampler.num_negs_per_pos 29.0 +1015 57 training.batch_size 2.0 +1015 58 model.embedding_dim 0.0 +1015 58 model.scoring_fct_norm 2.0 +1015 58 negative_sampler.num_negs_per_pos 2.0 +1015 58 training.batch_size 0.0 +1015 59 model.embedding_dim 2.0 +1015 59 model.scoring_fct_norm 2.0 +1015 59 negative_sampler.num_negs_per_pos 32.0 +1015 59 training.batch_size 0.0 +1015 60 model.embedding_dim 2.0 +1015 60 model.scoring_fct_norm 2.0 +1015 60 negative_sampler.num_negs_per_pos 50.0 +1015 60 training.batch_size 2.0 +1015 61 model.embedding_dim 0.0 +1015 61 model.scoring_fct_norm 1.0 +1015 61 negative_sampler.num_negs_per_pos 44.0 +1015 61 training.batch_size 2.0 +1015 62 model.embedding_dim 0.0 +1015 62 model.scoring_fct_norm 2.0 +1015 62 negative_sampler.num_negs_per_pos 56.0 +1015 62 training.batch_size 2.0 +1015 63 model.embedding_dim 2.0 +1015 63 model.scoring_fct_norm 1.0 +1015 63 negative_sampler.num_negs_per_pos 7.0 +1015 63 training.batch_size 0.0 +1015 64 model.embedding_dim 1.0 +1015 64 model.scoring_fct_norm 1.0 +1015 64 negative_sampler.num_negs_per_pos 27.0 +1015 64 training.batch_size 2.0 +1015 65 model.embedding_dim 0.0 +1015 65 model.scoring_fct_norm 1.0 +1015 65 negative_sampler.num_negs_per_pos 50.0 +1015 65 training.batch_size 2.0 +1015 66 model.embedding_dim 0.0 +1015 66 model.scoring_fct_norm 2.0 +1015 66 negative_sampler.num_negs_per_pos 19.0 +1015 66 training.batch_size 2.0 +1015 67 model.embedding_dim 0.0 +1015 67 model.scoring_fct_norm 1.0 +1015 67 negative_sampler.num_negs_per_pos 34.0 +1015 67 training.batch_size 2.0 +1015 68 model.embedding_dim 1.0 +1015 68 model.scoring_fct_norm 2.0 +1015 68 negative_sampler.num_negs_per_pos 48.0 +1015 68 training.batch_size 1.0 +1015 69 model.embedding_dim 1.0 +1015 69 model.scoring_fct_norm 2.0 +1015 69 negative_sampler.num_negs_per_pos 33.0 +1015 69 training.batch_size 1.0 +1015 70 model.embedding_dim 1.0 +1015 70 model.scoring_fct_norm 2.0 +1015 70 negative_sampler.num_negs_per_pos 21.0 +1015 70 training.batch_size 2.0 +1015 71 model.embedding_dim 0.0 +1015 71 model.scoring_fct_norm 2.0 +1015 71 negative_sampler.num_negs_per_pos 15.0 +1015 71 training.batch_size 0.0 +1015 72 model.embedding_dim 1.0 +1015 72 model.scoring_fct_norm 1.0 +1015 72 negative_sampler.num_negs_per_pos 78.0 +1015 72 training.batch_size 1.0 +1015 73 model.embedding_dim 1.0 +1015 73 model.scoring_fct_norm 1.0 +1015 73 negative_sampler.num_negs_per_pos 85.0 +1015 73 training.batch_size 2.0 +1015 74 model.embedding_dim 2.0 +1015 74 model.scoring_fct_norm 2.0 +1015 74 negative_sampler.num_negs_per_pos 10.0 +1015 74 training.batch_size 0.0 +1015 75 model.embedding_dim 1.0 +1015 75 model.scoring_fct_norm 2.0 +1015 75 negative_sampler.num_negs_per_pos 6.0 +1015 75 training.batch_size 2.0 +1015 76 model.embedding_dim 1.0 +1015 76 model.scoring_fct_norm 2.0 +1015 76 negative_sampler.num_negs_per_pos 61.0 +1015 76 training.batch_size 1.0 +1015 77 model.embedding_dim 0.0 +1015 77 model.scoring_fct_norm 1.0 +1015 77 negative_sampler.num_negs_per_pos 87.0 +1015 77 training.batch_size 2.0 +1015 78 model.embedding_dim 0.0 +1015 78 model.scoring_fct_norm 2.0 +1015 78 negative_sampler.num_negs_per_pos 62.0 +1015 78 training.batch_size 0.0 +1015 79 model.embedding_dim 1.0 +1015 79 model.scoring_fct_norm 1.0 +1015 79 negative_sampler.num_negs_per_pos 35.0 +1015 79 training.batch_size 1.0 +1015 80 model.embedding_dim 0.0 +1015 80 model.scoring_fct_norm 1.0 +1015 80 negative_sampler.num_negs_per_pos 64.0 +1015 80 training.batch_size 1.0 +1015 81 model.embedding_dim 0.0 +1015 81 model.scoring_fct_norm 1.0 +1015 81 negative_sampler.num_negs_per_pos 74.0 +1015 81 training.batch_size 1.0 +1015 82 model.embedding_dim 2.0 +1015 82 model.scoring_fct_norm 2.0 +1015 82 negative_sampler.num_negs_per_pos 2.0 +1015 82 training.batch_size 1.0 +1015 83 model.embedding_dim 2.0 +1015 83 model.scoring_fct_norm 1.0 +1015 83 negative_sampler.num_negs_per_pos 10.0 +1015 83 training.batch_size 2.0 +1015 84 model.embedding_dim 0.0 +1015 84 model.scoring_fct_norm 2.0 +1015 84 negative_sampler.num_negs_per_pos 65.0 +1015 84 training.batch_size 2.0 +1015 85 model.embedding_dim 2.0 +1015 85 model.scoring_fct_norm 2.0 +1015 85 negative_sampler.num_negs_per_pos 90.0 +1015 85 training.batch_size 0.0 +1015 86 model.embedding_dim 0.0 +1015 86 model.scoring_fct_norm 1.0 +1015 86 negative_sampler.num_negs_per_pos 51.0 +1015 86 training.batch_size 2.0 +1015 87 model.embedding_dim 0.0 +1015 87 model.scoring_fct_norm 1.0 +1015 87 negative_sampler.num_negs_per_pos 13.0 +1015 87 training.batch_size 2.0 +1015 88 model.embedding_dim 2.0 +1015 88 model.scoring_fct_norm 1.0 +1015 88 negative_sampler.num_negs_per_pos 37.0 +1015 88 training.batch_size 0.0 +1015 89 model.embedding_dim 0.0 +1015 89 model.scoring_fct_norm 1.0 +1015 89 negative_sampler.num_negs_per_pos 28.0 +1015 89 training.batch_size 1.0 +1015 90 model.embedding_dim 0.0 +1015 90 model.scoring_fct_norm 2.0 +1015 90 negative_sampler.num_negs_per_pos 61.0 +1015 90 training.batch_size 1.0 +1015 91 model.embedding_dim 2.0 +1015 91 model.scoring_fct_norm 1.0 +1015 91 negative_sampler.num_negs_per_pos 96.0 +1015 91 training.batch_size 1.0 +1015 92 model.embedding_dim 2.0 +1015 92 model.scoring_fct_norm 1.0 +1015 92 negative_sampler.num_negs_per_pos 86.0 +1015 92 training.batch_size 0.0 +1015 93 model.embedding_dim 1.0 +1015 93 model.scoring_fct_norm 1.0 +1015 93 negative_sampler.num_negs_per_pos 94.0 +1015 93 training.batch_size 0.0 +1015 94 model.embedding_dim 1.0 +1015 94 model.scoring_fct_norm 1.0 +1015 94 negative_sampler.num_negs_per_pos 70.0 +1015 94 training.batch_size 2.0 +1015 95 model.embedding_dim 0.0 +1015 95 model.scoring_fct_norm 2.0 +1015 95 negative_sampler.num_negs_per_pos 9.0 +1015 95 training.batch_size 0.0 +1015 96 model.embedding_dim 1.0 +1015 96 model.scoring_fct_norm 2.0 +1015 96 negative_sampler.num_negs_per_pos 39.0 +1015 96 training.batch_size 2.0 +1015 97 model.embedding_dim 1.0 +1015 97 model.scoring_fct_norm 1.0 +1015 97 negative_sampler.num_negs_per_pos 43.0 +1015 97 training.batch_size 1.0 +1015 98 model.embedding_dim 1.0 +1015 98 model.scoring_fct_norm 2.0 +1015 98 negative_sampler.num_negs_per_pos 23.0 +1015 98 training.batch_size 2.0 +1015 99 model.embedding_dim 0.0 +1015 99 model.scoring_fct_norm 1.0 +1015 99 negative_sampler.num_negs_per_pos 69.0 +1015 99 training.batch_size 2.0 +1015 100 model.embedding_dim 2.0 +1015 100 model.scoring_fct_norm 1.0 +1015 100 negative_sampler.num_negs_per_pos 53.0 +1015 100 training.batch_size 0.0 +1015 1 dataset """kinships""" +1015 1 model """unstructuredmodel""" +1015 1 loss """softplus""" +1015 1 regularizer """no""" +1015 1 optimizer """adadelta""" +1015 1 training_loop """owa""" +1015 1 negative_sampler """basic""" +1015 1 evaluator """rankbased""" +1015 2 dataset """kinships""" +1015 2 model """unstructuredmodel""" +1015 2 loss """softplus""" +1015 2 regularizer """no""" +1015 2 optimizer """adadelta""" +1015 2 training_loop """owa""" +1015 2 negative_sampler """basic""" +1015 2 evaluator """rankbased""" +1015 3 dataset """kinships""" +1015 3 model """unstructuredmodel""" +1015 3 loss """softplus""" +1015 3 regularizer """no""" +1015 3 optimizer """adadelta""" +1015 3 training_loop """owa""" +1015 3 negative_sampler """basic""" +1015 3 evaluator """rankbased""" +1015 4 dataset """kinships""" +1015 4 model """unstructuredmodel""" +1015 4 loss """softplus""" +1015 4 regularizer """no""" +1015 4 optimizer """adadelta""" +1015 4 training_loop """owa""" +1015 4 negative_sampler """basic""" +1015 4 evaluator """rankbased""" +1015 5 dataset """kinships""" +1015 5 model """unstructuredmodel""" +1015 5 loss """softplus""" +1015 5 regularizer """no""" +1015 5 optimizer """adadelta""" +1015 5 training_loop """owa""" +1015 5 negative_sampler """basic""" +1015 5 evaluator """rankbased""" +1015 6 dataset """kinships""" +1015 6 model """unstructuredmodel""" +1015 6 loss """softplus""" +1015 6 regularizer """no""" +1015 6 optimizer """adadelta""" +1015 6 training_loop """owa""" +1015 6 negative_sampler """basic""" +1015 6 evaluator """rankbased""" +1015 7 dataset """kinships""" +1015 7 model """unstructuredmodel""" +1015 7 loss """softplus""" +1015 7 regularizer """no""" +1015 7 optimizer """adadelta""" +1015 7 training_loop """owa""" +1015 7 negative_sampler """basic""" +1015 7 evaluator """rankbased""" +1015 8 dataset """kinships""" +1015 8 model """unstructuredmodel""" +1015 8 loss """softplus""" +1015 8 regularizer """no""" +1015 8 optimizer """adadelta""" +1015 8 training_loop """owa""" +1015 8 negative_sampler """basic""" +1015 8 evaluator """rankbased""" +1015 9 dataset """kinships""" +1015 9 model """unstructuredmodel""" +1015 9 loss """softplus""" +1015 9 regularizer """no""" +1015 9 optimizer """adadelta""" +1015 9 training_loop """owa""" +1015 9 negative_sampler """basic""" +1015 9 evaluator """rankbased""" +1015 10 dataset """kinships""" +1015 10 model """unstructuredmodel""" +1015 10 loss """softplus""" +1015 10 regularizer """no""" +1015 10 optimizer """adadelta""" +1015 10 training_loop """owa""" +1015 10 negative_sampler """basic""" +1015 10 evaluator """rankbased""" +1015 11 dataset """kinships""" +1015 11 model """unstructuredmodel""" +1015 11 loss """softplus""" +1015 11 regularizer """no""" +1015 11 optimizer """adadelta""" +1015 11 training_loop """owa""" +1015 11 negative_sampler """basic""" +1015 11 evaluator """rankbased""" +1015 12 dataset """kinships""" +1015 12 model """unstructuredmodel""" +1015 12 loss """softplus""" +1015 12 regularizer """no""" +1015 12 optimizer """adadelta""" +1015 12 training_loop """owa""" +1015 12 negative_sampler """basic""" +1015 12 evaluator """rankbased""" +1015 13 dataset """kinships""" +1015 13 model """unstructuredmodel""" +1015 13 loss """softplus""" +1015 13 regularizer """no""" +1015 13 optimizer """adadelta""" +1015 13 training_loop """owa""" +1015 13 negative_sampler """basic""" +1015 13 evaluator """rankbased""" +1015 14 dataset """kinships""" +1015 14 model """unstructuredmodel""" +1015 14 loss """softplus""" +1015 14 regularizer """no""" +1015 14 optimizer """adadelta""" +1015 14 training_loop """owa""" +1015 14 negative_sampler """basic""" +1015 14 evaluator """rankbased""" +1015 15 dataset """kinships""" +1015 15 model """unstructuredmodel""" +1015 15 loss """softplus""" +1015 15 regularizer """no""" +1015 15 optimizer """adadelta""" +1015 15 training_loop """owa""" +1015 15 negative_sampler """basic""" +1015 15 evaluator """rankbased""" +1015 16 dataset """kinships""" +1015 16 model """unstructuredmodel""" +1015 16 loss """softplus""" +1015 16 regularizer """no""" +1015 16 optimizer """adadelta""" +1015 16 training_loop """owa""" +1015 16 negative_sampler """basic""" +1015 16 evaluator """rankbased""" +1015 17 dataset """kinships""" +1015 17 model """unstructuredmodel""" +1015 17 loss """softplus""" +1015 17 regularizer """no""" +1015 17 optimizer """adadelta""" +1015 17 training_loop """owa""" +1015 17 negative_sampler """basic""" +1015 17 evaluator """rankbased""" +1015 18 dataset """kinships""" +1015 18 model """unstructuredmodel""" +1015 18 loss """softplus""" +1015 18 regularizer """no""" +1015 18 optimizer """adadelta""" +1015 18 training_loop """owa""" +1015 18 negative_sampler """basic""" +1015 18 evaluator """rankbased""" +1015 19 dataset """kinships""" +1015 19 model """unstructuredmodel""" +1015 19 loss """softplus""" +1015 19 regularizer """no""" +1015 19 optimizer """adadelta""" +1015 19 training_loop """owa""" +1015 19 negative_sampler """basic""" +1015 19 evaluator """rankbased""" +1015 20 dataset """kinships""" +1015 20 model """unstructuredmodel""" +1015 20 loss """softplus""" +1015 20 regularizer """no""" +1015 20 optimizer """adadelta""" +1015 20 training_loop """owa""" +1015 20 negative_sampler """basic""" +1015 20 evaluator """rankbased""" +1015 21 dataset """kinships""" +1015 21 model """unstructuredmodel""" +1015 21 loss """softplus""" +1015 21 regularizer """no""" +1015 21 optimizer """adadelta""" +1015 21 training_loop """owa""" +1015 21 negative_sampler """basic""" +1015 21 evaluator """rankbased""" +1015 22 dataset """kinships""" +1015 22 model """unstructuredmodel""" +1015 22 loss """softplus""" +1015 22 regularizer """no""" +1015 22 optimizer """adadelta""" +1015 22 training_loop """owa""" +1015 22 negative_sampler """basic""" +1015 22 evaluator """rankbased""" +1015 23 dataset """kinships""" +1015 23 model """unstructuredmodel""" +1015 23 loss """softplus""" +1015 23 regularizer """no""" +1015 23 optimizer """adadelta""" +1015 23 training_loop """owa""" +1015 23 negative_sampler """basic""" +1015 23 evaluator """rankbased""" +1015 24 dataset """kinships""" +1015 24 model """unstructuredmodel""" +1015 24 loss """softplus""" +1015 24 regularizer """no""" +1015 24 optimizer """adadelta""" +1015 24 training_loop """owa""" +1015 24 negative_sampler """basic""" +1015 24 evaluator """rankbased""" +1015 25 dataset """kinships""" +1015 25 model """unstructuredmodel""" +1015 25 loss """softplus""" +1015 25 regularizer """no""" +1015 25 optimizer """adadelta""" +1015 25 training_loop """owa""" +1015 25 negative_sampler """basic""" +1015 25 evaluator """rankbased""" +1015 26 dataset """kinships""" +1015 26 model """unstructuredmodel""" +1015 26 loss """softplus""" +1015 26 regularizer """no""" +1015 26 optimizer """adadelta""" +1015 26 training_loop """owa""" +1015 26 negative_sampler """basic""" +1015 26 evaluator """rankbased""" +1015 27 dataset """kinships""" +1015 27 model """unstructuredmodel""" +1015 27 loss """softplus""" +1015 27 regularizer """no""" +1015 27 optimizer """adadelta""" +1015 27 training_loop """owa""" +1015 27 negative_sampler """basic""" +1015 27 evaluator """rankbased""" +1015 28 dataset """kinships""" +1015 28 model """unstructuredmodel""" +1015 28 loss """softplus""" +1015 28 regularizer """no""" +1015 28 optimizer """adadelta""" +1015 28 training_loop """owa""" +1015 28 negative_sampler """basic""" +1015 28 evaluator """rankbased""" +1015 29 dataset """kinships""" +1015 29 model """unstructuredmodel""" +1015 29 loss """softplus""" +1015 29 regularizer """no""" +1015 29 optimizer """adadelta""" +1015 29 training_loop """owa""" +1015 29 negative_sampler """basic""" +1015 29 evaluator """rankbased""" +1015 30 dataset """kinships""" +1015 30 model """unstructuredmodel""" +1015 30 loss """softplus""" +1015 30 regularizer """no""" +1015 30 optimizer """adadelta""" +1015 30 training_loop """owa""" +1015 30 negative_sampler """basic""" +1015 30 evaluator """rankbased""" +1015 31 dataset """kinships""" +1015 31 model """unstructuredmodel""" +1015 31 loss """softplus""" +1015 31 regularizer """no""" +1015 31 optimizer """adadelta""" +1015 31 training_loop """owa""" +1015 31 negative_sampler """basic""" +1015 31 evaluator """rankbased""" +1015 32 dataset """kinships""" +1015 32 model """unstructuredmodel""" +1015 32 loss """softplus""" +1015 32 regularizer """no""" +1015 32 optimizer """adadelta""" +1015 32 training_loop """owa""" +1015 32 negative_sampler """basic""" +1015 32 evaluator """rankbased""" +1015 33 dataset """kinships""" +1015 33 model """unstructuredmodel""" +1015 33 loss """softplus""" +1015 33 regularizer """no""" +1015 33 optimizer """adadelta""" +1015 33 training_loop """owa""" +1015 33 negative_sampler """basic""" +1015 33 evaluator """rankbased""" +1015 34 dataset """kinships""" +1015 34 model """unstructuredmodel""" +1015 34 loss """softplus""" +1015 34 regularizer """no""" +1015 34 optimizer """adadelta""" +1015 34 training_loop """owa""" +1015 34 negative_sampler """basic""" +1015 34 evaluator """rankbased""" +1015 35 dataset """kinships""" +1015 35 model """unstructuredmodel""" +1015 35 loss """softplus""" +1015 35 regularizer """no""" +1015 35 optimizer """adadelta""" +1015 35 training_loop """owa""" +1015 35 negative_sampler """basic""" +1015 35 evaluator """rankbased""" +1015 36 dataset """kinships""" +1015 36 model """unstructuredmodel""" +1015 36 loss """softplus""" +1015 36 regularizer """no""" +1015 36 optimizer """adadelta""" +1015 36 training_loop """owa""" +1015 36 negative_sampler """basic""" +1015 36 evaluator """rankbased""" +1015 37 dataset """kinships""" +1015 37 model """unstructuredmodel""" +1015 37 loss """softplus""" +1015 37 regularizer """no""" +1015 37 optimizer """adadelta""" +1015 37 training_loop """owa""" +1015 37 negative_sampler """basic""" +1015 37 evaluator """rankbased""" +1015 38 dataset """kinships""" +1015 38 model """unstructuredmodel""" +1015 38 loss """softplus""" +1015 38 regularizer """no""" +1015 38 optimizer """adadelta""" +1015 38 training_loop """owa""" +1015 38 negative_sampler """basic""" +1015 38 evaluator """rankbased""" +1015 39 dataset """kinships""" +1015 39 model """unstructuredmodel""" +1015 39 loss """softplus""" +1015 39 regularizer """no""" +1015 39 optimizer """adadelta""" +1015 39 training_loop """owa""" +1015 39 negative_sampler """basic""" +1015 39 evaluator """rankbased""" +1015 40 dataset """kinships""" +1015 40 model """unstructuredmodel""" +1015 40 loss """softplus""" +1015 40 regularizer """no""" +1015 40 optimizer """adadelta""" +1015 40 training_loop """owa""" +1015 40 negative_sampler """basic""" +1015 40 evaluator """rankbased""" +1015 41 dataset """kinships""" +1015 41 model """unstructuredmodel""" +1015 41 loss """softplus""" +1015 41 regularizer """no""" +1015 41 optimizer """adadelta""" +1015 41 training_loop """owa""" +1015 41 negative_sampler """basic""" +1015 41 evaluator """rankbased""" +1015 42 dataset """kinships""" +1015 42 model """unstructuredmodel""" +1015 42 loss """softplus""" +1015 42 regularizer """no""" +1015 42 optimizer """adadelta""" +1015 42 training_loop """owa""" +1015 42 negative_sampler """basic""" +1015 42 evaluator """rankbased""" +1015 43 dataset """kinships""" +1015 43 model """unstructuredmodel""" +1015 43 loss """softplus""" +1015 43 regularizer """no""" +1015 43 optimizer """adadelta""" +1015 43 training_loop """owa""" +1015 43 negative_sampler """basic""" +1015 43 evaluator """rankbased""" +1015 44 dataset """kinships""" +1015 44 model """unstructuredmodel""" +1015 44 loss """softplus""" +1015 44 regularizer """no""" +1015 44 optimizer """adadelta""" +1015 44 training_loop """owa""" +1015 44 negative_sampler """basic""" +1015 44 evaluator """rankbased""" +1015 45 dataset """kinships""" +1015 45 model """unstructuredmodel""" +1015 45 loss """softplus""" +1015 45 regularizer """no""" +1015 45 optimizer """adadelta""" +1015 45 training_loop """owa""" +1015 45 negative_sampler """basic""" +1015 45 evaluator """rankbased""" +1015 46 dataset """kinships""" +1015 46 model """unstructuredmodel""" +1015 46 loss """softplus""" +1015 46 regularizer """no""" +1015 46 optimizer """adadelta""" +1015 46 training_loop """owa""" +1015 46 negative_sampler """basic""" +1015 46 evaluator """rankbased""" +1015 47 dataset """kinships""" +1015 47 model """unstructuredmodel""" +1015 47 loss """softplus""" +1015 47 regularizer """no""" +1015 47 optimizer """adadelta""" +1015 47 training_loop """owa""" +1015 47 negative_sampler """basic""" +1015 47 evaluator """rankbased""" +1015 48 dataset """kinships""" +1015 48 model """unstructuredmodel""" +1015 48 loss """softplus""" +1015 48 regularizer """no""" +1015 48 optimizer """adadelta""" +1015 48 training_loop """owa""" +1015 48 negative_sampler """basic""" +1015 48 evaluator """rankbased""" +1015 49 dataset """kinships""" +1015 49 model """unstructuredmodel""" +1015 49 loss """softplus""" +1015 49 regularizer """no""" +1015 49 optimizer """adadelta""" +1015 49 training_loop """owa""" +1015 49 negative_sampler """basic""" +1015 49 evaluator """rankbased""" +1015 50 dataset """kinships""" +1015 50 model """unstructuredmodel""" +1015 50 loss """softplus""" +1015 50 regularizer """no""" +1015 50 optimizer """adadelta""" +1015 50 training_loop """owa""" +1015 50 negative_sampler """basic""" +1015 50 evaluator """rankbased""" +1015 51 dataset """kinships""" +1015 51 model """unstructuredmodel""" +1015 51 loss """softplus""" +1015 51 regularizer """no""" +1015 51 optimizer """adadelta""" +1015 51 training_loop """owa""" +1015 51 negative_sampler """basic""" +1015 51 evaluator """rankbased""" +1015 52 dataset """kinships""" +1015 52 model """unstructuredmodel""" +1015 52 loss """softplus""" +1015 52 regularizer """no""" +1015 52 optimizer """adadelta""" +1015 52 training_loop """owa""" +1015 52 negative_sampler """basic""" +1015 52 evaluator """rankbased""" +1015 53 dataset """kinships""" +1015 53 model """unstructuredmodel""" +1015 53 loss """softplus""" +1015 53 regularizer """no""" +1015 53 optimizer """adadelta""" +1015 53 training_loop """owa""" +1015 53 negative_sampler """basic""" +1015 53 evaluator """rankbased""" +1015 54 dataset """kinships""" +1015 54 model """unstructuredmodel""" +1015 54 loss """softplus""" +1015 54 regularizer """no""" +1015 54 optimizer """adadelta""" +1015 54 training_loop """owa""" +1015 54 negative_sampler """basic""" +1015 54 evaluator """rankbased""" +1015 55 dataset """kinships""" +1015 55 model """unstructuredmodel""" +1015 55 loss """softplus""" +1015 55 regularizer """no""" +1015 55 optimizer """adadelta""" +1015 55 training_loop """owa""" +1015 55 negative_sampler """basic""" +1015 55 evaluator """rankbased""" +1015 56 dataset """kinships""" +1015 56 model """unstructuredmodel""" +1015 56 loss """softplus""" +1015 56 regularizer """no""" +1015 56 optimizer """adadelta""" +1015 56 training_loop """owa""" +1015 56 negative_sampler """basic""" +1015 56 evaluator """rankbased""" +1015 57 dataset """kinships""" +1015 57 model """unstructuredmodel""" +1015 57 loss """softplus""" +1015 57 regularizer """no""" +1015 57 optimizer """adadelta""" +1015 57 training_loop """owa""" +1015 57 negative_sampler """basic""" +1015 57 evaluator """rankbased""" +1015 58 dataset """kinships""" +1015 58 model """unstructuredmodel""" +1015 58 loss """softplus""" +1015 58 regularizer """no""" +1015 58 optimizer """adadelta""" +1015 58 training_loop """owa""" +1015 58 negative_sampler """basic""" +1015 58 evaluator """rankbased""" +1015 59 dataset """kinships""" +1015 59 model """unstructuredmodel""" +1015 59 loss """softplus""" +1015 59 regularizer """no""" +1015 59 optimizer """adadelta""" +1015 59 training_loop """owa""" +1015 59 negative_sampler """basic""" +1015 59 evaluator """rankbased""" +1015 60 dataset """kinships""" +1015 60 model """unstructuredmodel""" +1015 60 loss """softplus""" +1015 60 regularizer """no""" +1015 60 optimizer """adadelta""" +1015 60 training_loop """owa""" +1015 60 negative_sampler """basic""" +1015 60 evaluator """rankbased""" +1015 61 dataset """kinships""" +1015 61 model """unstructuredmodel""" +1015 61 loss """softplus""" +1015 61 regularizer """no""" +1015 61 optimizer """adadelta""" +1015 61 training_loop """owa""" +1015 61 negative_sampler """basic""" +1015 61 evaluator """rankbased""" +1015 62 dataset """kinships""" +1015 62 model """unstructuredmodel""" +1015 62 loss """softplus""" +1015 62 regularizer """no""" +1015 62 optimizer """adadelta""" +1015 62 training_loop """owa""" +1015 62 negative_sampler """basic""" +1015 62 evaluator """rankbased""" +1015 63 dataset """kinships""" +1015 63 model """unstructuredmodel""" +1015 63 loss """softplus""" +1015 63 regularizer """no""" +1015 63 optimizer """adadelta""" +1015 63 training_loop """owa""" +1015 63 negative_sampler """basic""" +1015 63 evaluator """rankbased""" +1015 64 dataset """kinships""" +1015 64 model """unstructuredmodel""" +1015 64 loss """softplus""" +1015 64 regularizer """no""" +1015 64 optimizer """adadelta""" +1015 64 training_loop """owa""" +1015 64 negative_sampler """basic""" +1015 64 evaluator """rankbased""" +1015 65 dataset """kinships""" +1015 65 model """unstructuredmodel""" +1015 65 loss """softplus""" +1015 65 regularizer """no""" +1015 65 optimizer """adadelta""" +1015 65 training_loop """owa""" +1015 65 negative_sampler """basic""" +1015 65 evaluator """rankbased""" +1015 66 dataset """kinships""" +1015 66 model """unstructuredmodel""" +1015 66 loss """softplus""" +1015 66 regularizer """no""" +1015 66 optimizer """adadelta""" +1015 66 training_loop """owa""" +1015 66 negative_sampler """basic""" +1015 66 evaluator """rankbased""" +1015 67 dataset """kinships""" +1015 67 model """unstructuredmodel""" +1015 67 loss """softplus""" +1015 67 regularizer """no""" +1015 67 optimizer """adadelta""" +1015 67 training_loop """owa""" +1015 67 negative_sampler """basic""" +1015 67 evaluator """rankbased""" +1015 68 dataset """kinships""" +1015 68 model """unstructuredmodel""" +1015 68 loss """softplus""" +1015 68 regularizer """no""" +1015 68 optimizer """adadelta""" +1015 68 training_loop """owa""" +1015 68 negative_sampler """basic""" +1015 68 evaluator """rankbased""" +1015 69 dataset """kinships""" +1015 69 model """unstructuredmodel""" +1015 69 loss """softplus""" +1015 69 regularizer """no""" +1015 69 optimizer """adadelta""" +1015 69 training_loop """owa""" +1015 69 negative_sampler """basic""" +1015 69 evaluator """rankbased""" +1015 70 dataset """kinships""" +1015 70 model """unstructuredmodel""" +1015 70 loss """softplus""" +1015 70 regularizer """no""" +1015 70 optimizer """adadelta""" +1015 70 training_loop """owa""" +1015 70 negative_sampler """basic""" +1015 70 evaluator """rankbased""" +1015 71 dataset """kinships""" +1015 71 model """unstructuredmodel""" +1015 71 loss """softplus""" +1015 71 regularizer """no""" +1015 71 optimizer """adadelta""" +1015 71 training_loop """owa""" +1015 71 negative_sampler """basic""" +1015 71 evaluator """rankbased""" +1015 72 dataset """kinships""" +1015 72 model """unstructuredmodel""" +1015 72 loss """softplus""" +1015 72 regularizer """no""" +1015 72 optimizer """adadelta""" +1015 72 training_loop """owa""" +1015 72 negative_sampler """basic""" +1015 72 evaluator """rankbased""" +1015 73 dataset """kinships""" +1015 73 model """unstructuredmodel""" +1015 73 loss """softplus""" +1015 73 regularizer """no""" +1015 73 optimizer """adadelta""" +1015 73 training_loop """owa""" +1015 73 negative_sampler """basic""" +1015 73 evaluator """rankbased""" +1015 74 dataset """kinships""" +1015 74 model """unstructuredmodel""" +1015 74 loss """softplus""" +1015 74 regularizer """no""" +1015 74 optimizer """adadelta""" +1015 74 training_loop """owa""" +1015 74 negative_sampler """basic""" +1015 74 evaluator """rankbased""" +1015 75 dataset """kinships""" +1015 75 model """unstructuredmodel""" +1015 75 loss """softplus""" +1015 75 regularizer """no""" +1015 75 optimizer """adadelta""" +1015 75 training_loop """owa""" +1015 75 negative_sampler """basic""" +1015 75 evaluator """rankbased""" +1015 76 dataset """kinships""" +1015 76 model """unstructuredmodel""" +1015 76 loss """softplus""" +1015 76 regularizer """no""" +1015 76 optimizer """adadelta""" +1015 76 training_loop """owa""" +1015 76 negative_sampler """basic""" +1015 76 evaluator """rankbased""" +1015 77 dataset """kinships""" +1015 77 model """unstructuredmodel""" +1015 77 loss """softplus""" +1015 77 regularizer """no""" +1015 77 optimizer """adadelta""" +1015 77 training_loop """owa""" +1015 77 negative_sampler """basic""" +1015 77 evaluator """rankbased""" +1015 78 dataset """kinships""" +1015 78 model """unstructuredmodel""" +1015 78 loss """softplus""" +1015 78 regularizer """no""" +1015 78 optimizer """adadelta""" +1015 78 training_loop """owa""" +1015 78 negative_sampler """basic""" +1015 78 evaluator """rankbased""" +1015 79 dataset """kinships""" +1015 79 model """unstructuredmodel""" +1015 79 loss """softplus""" +1015 79 regularizer """no""" +1015 79 optimizer """adadelta""" +1015 79 training_loop """owa""" +1015 79 negative_sampler """basic""" +1015 79 evaluator """rankbased""" +1015 80 dataset """kinships""" +1015 80 model """unstructuredmodel""" +1015 80 loss """softplus""" +1015 80 regularizer """no""" +1015 80 optimizer """adadelta""" +1015 80 training_loop """owa""" +1015 80 negative_sampler """basic""" +1015 80 evaluator """rankbased""" +1015 81 dataset """kinships""" +1015 81 model """unstructuredmodel""" +1015 81 loss """softplus""" +1015 81 regularizer """no""" +1015 81 optimizer """adadelta""" +1015 81 training_loop """owa""" +1015 81 negative_sampler """basic""" +1015 81 evaluator """rankbased""" +1015 82 dataset """kinships""" +1015 82 model """unstructuredmodel""" +1015 82 loss """softplus""" +1015 82 regularizer """no""" +1015 82 optimizer """adadelta""" +1015 82 training_loop """owa""" +1015 82 negative_sampler """basic""" +1015 82 evaluator """rankbased""" +1015 83 dataset """kinships""" +1015 83 model """unstructuredmodel""" +1015 83 loss """softplus""" +1015 83 regularizer """no""" +1015 83 optimizer """adadelta""" +1015 83 training_loop """owa""" +1015 83 negative_sampler """basic""" +1015 83 evaluator """rankbased""" +1015 84 dataset """kinships""" +1015 84 model """unstructuredmodel""" +1015 84 loss """softplus""" +1015 84 regularizer """no""" +1015 84 optimizer """adadelta""" +1015 84 training_loop """owa""" +1015 84 negative_sampler """basic""" +1015 84 evaluator """rankbased""" +1015 85 dataset """kinships""" +1015 85 model """unstructuredmodel""" +1015 85 loss """softplus""" +1015 85 regularizer """no""" +1015 85 optimizer """adadelta""" +1015 85 training_loop """owa""" +1015 85 negative_sampler """basic""" +1015 85 evaluator """rankbased""" +1015 86 dataset """kinships""" +1015 86 model """unstructuredmodel""" +1015 86 loss """softplus""" +1015 86 regularizer """no""" +1015 86 optimizer """adadelta""" +1015 86 training_loop """owa""" +1015 86 negative_sampler """basic""" +1015 86 evaluator """rankbased""" +1015 87 dataset """kinships""" +1015 87 model """unstructuredmodel""" +1015 87 loss """softplus""" +1015 87 regularizer """no""" +1015 87 optimizer """adadelta""" +1015 87 training_loop """owa""" +1015 87 negative_sampler """basic""" +1015 87 evaluator """rankbased""" +1015 88 dataset """kinships""" +1015 88 model """unstructuredmodel""" +1015 88 loss """softplus""" +1015 88 regularizer """no""" +1015 88 optimizer """adadelta""" +1015 88 training_loop """owa""" +1015 88 negative_sampler """basic""" +1015 88 evaluator """rankbased""" +1015 89 dataset """kinships""" +1015 89 model """unstructuredmodel""" +1015 89 loss """softplus""" +1015 89 regularizer """no""" +1015 89 optimizer """adadelta""" +1015 89 training_loop """owa""" +1015 89 negative_sampler """basic""" +1015 89 evaluator """rankbased""" +1015 90 dataset """kinships""" +1015 90 model """unstructuredmodel""" +1015 90 loss """softplus""" +1015 90 regularizer """no""" +1015 90 optimizer """adadelta""" +1015 90 training_loop """owa""" +1015 90 negative_sampler """basic""" +1015 90 evaluator """rankbased""" +1015 91 dataset """kinships""" +1015 91 model """unstructuredmodel""" +1015 91 loss """softplus""" +1015 91 regularizer """no""" +1015 91 optimizer """adadelta""" +1015 91 training_loop """owa""" +1015 91 negative_sampler """basic""" +1015 91 evaluator """rankbased""" +1015 92 dataset """kinships""" +1015 92 model """unstructuredmodel""" +1015 92 loss """softplus""" +1015 92 regularizer """no""" +1015 92 optimizer """adadelta""" +1015 92 training_loop """owa""" +1015 92 negative_sampler """basic""" +1015 92 evaluator """rankbased""" +1015 93 dataset """kinships""" +1015 93 model """unstructuredmodel""" +1015 93 loss """softplus""" +1015 93 regularizer """no""" +1015 93 optimizer """adadelta""" +1015 93 training_loop """owa""" +1015 93 negative_sampler """basic""" +1015 93 evaluator """rankbased""" +1015 94 dataset """kinships""" +1015 94 model """unstructuredmodel""" +1015 94 loss """softplus""" +1015 94 regularizer """no""" +1015 94 optimizer """adadelta""" +1015 94 training_loop """owa""" +1015 94 negative_sampler """basic""" +1015 94 evaluator """rankbased""" +1015 95 dataset """kinships""" +1015 95 model """unstructuredmodel""" +1015 95 loss """softplus""" +1015 95 regularizer """no""" +1015 95 optimizer """adadelta""" +1015 95 training_loop """owa""" +1015 95 negative_sampler """basic""" +1015 95 evaluator """rankbased""" +1015 96 dataset """kinships""" +1015 96 model """unstructuredmodel""" +1015 96 loss """softplus""" +1015 96 regularizer """no""" +1015 96 optimizer """adadelta""" +1015 96 training_loop """owa""" +1015 96 negative_sampler """basic""" +1015 96 evaluator """rankbased""" +1015 97 dataset """kinships""" +1015 97 model """unstructuredmodel""" +1015 97 loss """softplus""" +1015 97 regularizer """no""" +1015 97 optimizer """adadelta""" +1015 97 training_loop """owa""" +1015 97 negative_sampler """basic""" +1015 97 evaluator """rankbased""" +1015 98 dataset """kinships""" +1015 98 model """unstructuredmodel""" +1015 98 loss """softplus""" +1015 98 regularizer """no""" +1015 98 optimizer """adadelta""" +1015 98 training_loop """owa""" +1015 98 negative_sampler """basic""" +1015 98 evaluator """rankbased""" +1015 99 dataset """kinships""" +1015 99 model """unstructuredmodel""" +1015 99 loss """softplus""" +1015 99 regularizer """no""" +1015 99 optimizer """adadelta""" +1015 99 training_loop """owa""" +1015 99 negative_sampler """basic""" +1015 99 evaluator """rankbased""" +1015 100 dataset """kinships""" +1015 100 model """unstructuredmodel""" +1015 100 loss """softplus""" +1015 100 regularizer """no""" +1015 100 optimizer """adadelta""" +1015 100 training_loop """owa""" +1015 100 negative_sampler """basic""" +1015 100 evaluator """rankbased""" +1016 1 model.embedding_dim 0.0 +1016 1 model.scoring_fct_norm 1.0 +1016 1 loss.margin 21.18126814649138 +1016 1 loss.adversarial_temperature 0.8236266827727876 +1016 1 negative_sampler.num_negs_per_pos 93.0 +1016 1 training.batch_size 0.0 +1016 2 model.embedding_dim 0.0 +1016 2 model.scoring_fct_norm 2.0 +1016 2 loss.margin 8.931828140582814 +1016 2 loss.adversarial_temperature 0.2504750080059651 +1016 2 negative_sampler.num_negs_per_pos 17.0 +1016 2 training.batch_size 2.0 +1016 3 model.embedding_dim 1.0 +1016 3 model.scoring_fct_norm 2.0 +1016 3 loss.margin 17.518716596196374 +1016 3 loss.adversarial_temperature 0.9685990611121639 +1016 3 negative_sampler.num_negs_per_pos 82.0 +1016 3 training.batch_size 0.0 +1016 4 model.embedding_dim 1.0 +1016 4 model.scoring_fct_norm 2.0 +1016 4 loss.margin 16.72075400011285 +1016 4 loss.adversarial_temperature 0.8965626800113766 +1016 4 negative_sampler.num_negs_per_pos 21.0 +1016 4 training.batch_size 2.0 +1016 5 model.embedding_dim 2.0 +1016 5 model.scoring_fct_norm 1.0 +1016 5 loss.margin 12.915665459872669 +1016 5 loss.adversarial_temperature 0.46170470381380835 +1016 5 negative_sampler.num_negs_per_pos 75.0 +1016 5 training.batch_size 0.0 +1016 6 model.embedding_dim 1.0 +1016 6 model.scoring_fct_norm 2.0 +1016 6 loss.margin 14.676131755164937 +1016 6 loss.adversarial_temperature 0.399979871106684 +1016 6 negative_sampler.num_negs_per_pos 86.0 +1016 6 training.batch_size 1.0 +1016 7 model.embedding_dim 2.0 +1016 7 model.scoring_fct_norm 2.0 +1016 7 loss.margin 11.108954831095325 +1016 7 loss.adversarial_temperature 0.11067222626809393 +1016 7 negative_sampler.num_negs_per_pos 35.0 +1016 7 training.batch_size 1.0 +1016 8 model.embedding_dim 2.0 +1016 8 model.scoring_fct_norm 1.0 +1016 8 loss.margin 13.08921922956658 +1016 8 loss.adversarial_temperature 0.14919756198397494 +1016 8 negative_sampler.num_negs_per_pos 93.0 +1016 8 training.batch_size 0.0 +1016 9 model.embedding_dim 2.0 +1016 9 model.scoring_fct_norm 1.0 +1016 9 loss.margin 16.409960963415124 +1016 9 loss.adversarial_temperature 0.2230179646464144 +1016 9 negative_sampler.num_negs_per_pos 80.0 +1016 9 training.batch_size 1.0 +1016 10 model.embedding_dim 0.0 +1016 10 model.scoring_fct_norm 1.0 +1016 10 loss.margin 22.670537158263215 +1016 10 loss.adversarial_temperature 0.5689033696535588 +1016 10 negative_sampler.num_negs_per_pos 19.0 +1016 10 training.batch_size 2.0 +1016 11 model.embedding_dim 2.0 +1016 11 model.scoring_fct_norm 1.0 +1016 11 loss.margin 11.18283898939066 +1016 11 loss.adversarial_temperature 0.17610016015161492 +1016 11 negative_sampler.num_negs_per_pos 46.0 +1016 11 training.batch_size 0.0 +1016 12 model.embedding_dim 2.0 +1016 12 model.scoring_fct_norm 1.0 +1016 12 loss.margin 22.50877065541106 +1016 12 loss.adversarial_temperature 0.9955603077409736 +1016 12 negative_sampler.num_negs_per_pos 67.0 +1016 12 training.batch_size 0.0 +1016 13 model.embedding_dim 1.0 +1016 13 model.scoring_fct_norm 1.0 +1016 13 loss.margin 13.393454777637235 +1016 13 loss.adversarial_temperature 0.7062787154195374 +1016 13 negative_sampler.num_negs_per_pos 27.0 +1016 13 training.batch_size 1.0 +1016 14 model.embedding_dim 1.0 +1016 14 model.scoring_fct_norm 2.0 +1016 14 loss.margin 22.667180329323372 +1016 14 loss.adversarial_temperature 0.35258990290342307 +1016 14 negative_sampler.num_negs_per_pos 43.0 +1016 14 training.batch_size 0.0 +1016 15 model.embedding_dim 2.0 +1016 15 model.scoring_fct_norm 2.0 +1016 15 loss.margin 22.733158649906205 +1016 15 loss.adversarial_temperature 0.1777187415390427 +1016 15 negative_sampler.num_negs_per_pos 58.0 +1016 15 training.batch_size 2.0 +1016 16 model.embedding_dim 1.0 +1016 16 model.scoring_fct_norm 2.0 +1016 16 loss.margin 15.451045245763687 +1016 16 loss.adversarial_temperature 0.6922281023270157 +1016 16 negative_sampler.num_negs_per_pos 13.0 +1016 16 training.batch_size 0.0 +1016 17 model.embedding_dim 0.0 +1016 17 model.scoring_fct_norm 2.0 +1016 17 loss.margin 29.54631705817821 +1016 17 loss.adversarial_temperature 0.5950636914606157 +1016 17 negative_sampler.num_negs_per_pos 75.0 +1016 17 training.batch_size 1.0 +1016 18 model.embedding_dim 2.0 +1016 18 model.scoring_fct_norm 1.0 +1016 18 loss.margin 23.318659117618527 +1016 18 loss.adversarial_temperature 0.6496325102832554 +1016 18 negative_sampler.num_negs_per_pos 1.0 +1016 18 training.batch_size 0.0 +1016 19 model.embedding_dim 1.0 +1016 19 model.scoring_fct_norm 2.0 +1016 19 loss.margin 14.159253039543119 +1016 19 loss.adversarial_temperature 0.210071223590511 +1016 19 negative_sampler.num_negs_per_pos 22.0 +1016 19 training.batch_size 0.0 +1016 20 model.embedding_dim 2.0 +1016 20 model.scoring_fct_norm 1.0 +1016 20 loss.margin 28.179114598392417 +1016 20 loss.adversarial_temperature 0.8643812910434522 +1016 20 negative_sampler.num_negs_per_pos 85.0 +1016 20 training.batch_size 0.0 +1016 21 model.embedding_dim 1.0 +1016 21 model.scoring_fct_norm 2.0 +1016 21 loss.margin 11.45830600882007 +1016 21 loss.adversarial_temperature 0.6652692581641383 +1016 21 negative_sampler.num_negs_per_pos 13.0 +1016 21 training.batch_size 2.0 +1016 22 model.embedding_dim 2.0 +1016 22 model.scoring_fct_norm 2.0 +1016 22 loss.margin 17.526959688954857 +1016 22 loss.adversarial_temperature 0.8610273162544574 +1016 22 negative_sampler.num_negs_per_pos 51.0 +1016 22 training.batch_size 2.0 +1016 23 model.embedding_dim 2.0 +1016 23 model.scoring_fct_norm 2.0 +1016 23 loss.margin 2.298222153043265 +1016 23 loss.adversarial_temperature 0.7093769170217684 +1016 23 negative_sampler.num_negs_per_pos 51.0 +1016 23 training.batch_size 2.0 +1016 24 model.embedding_dim 2.0 +1016 24 model.scoring_fct_norm 1.0 +1016 24 loss.margin 23.945441114886524 +1016 24 loss.adversarial_temperature 0.41027938504243855 +1016 24 negative_sampler.num_negs_per_pos 78.0 +1016 24 training.batch_size 0.0 +1016 25 model.embedding_dim 0.0 +1016 25 model.scoring_fct_norm 1.0 +1016 25 loss.margin 16.730645551519636 +1016 25 loss.adversarial_temperature 0.7510172619756967 +1016 25 negative_sampler.num_negs_per_pos 79.0 +1016 25 training.batch_size 2.0 +1016 26 model.embedding_dim 2.0 +1016 26 model.scoring_fct_norm 1.0 +1016 26 loss.margin 11.068718967357903 +1016 26 loss.adversarial_temperature 0.5529510073621947 +1016 26 negative_sampler.num_negs_per_pos 99.0 +1016 26 training.batch_size 1.0 +1016 27 model.embedding_dim 1.0 +1016 27 model.scoring_fct_norm 2.0 +1016 27 loss.margin 4.628489956373988 +1016 27 loss.adversarial_temperature 0.8915636231449604 +1016 27 negative_sampler.num_negs_per_pos 65.0 +1016 27 training.batch_size 0.0 +1016 28 model.embedding_dim 1.0 +1016 28 model.scoring_fct_norm 2.0 +1016 28 loss.margin 11.439885101710992 +1016 28 loss.adversarial_temperature 0.43386016082021883 +1016 28 negative_sampler.num_negs_per_pos 27.0 +1016 28 training.batch_size 0.0 +1016 29 model.embedding_dim 0.0 +1016 29 model.scoring_fct_norm 1.0 +1016 29 loss.margin 25.5749346649419 +1016 29 loss.adversarial_temperature 0.9090889782549761 +1016 29 negative_sampler.num_negs_per_pos 30.0 +1016 29 training.batch_size 0.0 +1016 30 model.embedding_dim 0.0 +1016 30 model.scoring_fct_norm 1.0 +1016 30 loss.margin 2.631954177527403 +1016 30 loss.adversarial_temperature 0.6667488655625848 +1016 30 negative_sampler.num_negs_per_pos 58.0 +1016 30 training.batch_size 0.0 +1016 31 model.embedding_dim 0.0 +1016 31 model.scoring_fct_norm 2.0 +1016 31 loss.margin 12.423418868732934 +1016 31 loss.adversarial_temperature 0.4382312221479614 +1016 31 negative_sampler.num_negs_per_pos 36.0 +1016 31 training.batch_size 0.0 +1016 32 model.embedding_dim 2.0 +1016 32 model.scoring_fct_norm 2.0 +1016 32 loss.margin 10.552778261543114 +1016 32 loss.adversarial_temperature 0.7586165902688254 +1016 32 negative_sampler.num_negs_per_pos 20.0 +1016 32 training.batch_size 1.0 +1016 33 model.embedding_dim 1.0 +1016 33 model.scoring_fct_norm 2.0 +1016 33 loss.margin 21.84327933940104 +1016 33 loss.adversarial_temperature 0.7198567515509149 +1016 33 negative_sampler.num_negs_per_pos 51.0 +1016 33 training.batch_size 0.0 +1016 34 model.embedding_dim 1.0 +1016 34 model.scoring_fct_norm 2.0 +1016 34 loss.margin 1.2301005242138578 +1016 34 loss.adversarial_temperature 0.1285925417207847 +1016 34 negative_sampler.num_negs_per_pos 50.0 +1016 34 training.batch_size 1.0 +1016 35 model.embedding_dim 2.0 +1016 35 model.scoring_fct_norm 1.0 +1016 35 loss.margin 25.3911620618767 +1016 35 loss.adversarial_temperature 0.45175213459954644 +1016 35 negative_sampler.num_negs_per_pos 10.0 +1016 35 training.batch_size 1.0 +1016 36 model.embedding_dim 1.0 +1016 36 model.scoring_fct_norm 1.0 +1016 36 loss.margin 22.30049070013691 +1016 36 loss.adversarial_temperature 0.11739307952417549 +1016 36 negative_sampler.num_negs_per_pos 80.0 +1016 36 training.batch_size 0.0 +1016 37 model.embedding_dim 0.0 +1016 37 model.scoring_fct_norm 2.0 +1016 37 loss.margin 28.400983773705484 +1016 37 loss.adversarial_temperature 0.7599657255575827 +1016 37 negative_sampler.num_negs_per_pos 88.0 +1016 37 training.batch_size 0.0 +1016 38 model.embedding_dim 2.0 +1016 38 model.scoring_fct_norm 2.0 +1016 38 loss.margin 24.619768506371262 +1016 38 loss.adversarial_temperature 0.46018199800052406 +1016 38 negative_sampler.num_negs_per_pos 54.0 +1016 38 training.batch_size 0.0 +1016 39 model.embedding_dim 2.0 +1016 39 model.scoring_fct_norm 2.0 +1016 39 loss.margin 18.787165468528425 +1016 39 loss.adversarial_temperature 0.5378701513702168 +1016 39 negative_sampler.num_negs_per_pos 44.0 +1016 39 training.batch_size 1.0 +1016 40 model.embedding_dim 0.0 +1016 40 model.scoring_fct_norm 2.0 +1016 40 loss.margin 25.535445537424877 +1016 40 loss.adversarial_temperature 0.8905933405870465 +1016 40 negative_sampler.num_negs_per_pos 0.0 +1016 40 training.batch_size 1.0 +1016 41 model.embedding_dim 2.0 +1016 41 model.scoring_fct_norm 1.0 +1016 41 loss.margin 14.526602229653905 +1016 41 loss.adversarial_temperature 0.14044636421561985 +1016 41 negative_sampler.num_negs_per_pos 96.0 +1016 41 training.batch_size 2.0 +1016 42 model.embedding_dim 2.0 +1016 42 model.scoring_fct_norm 2.0 +1016 42 loss.margin 19.194353504295094 +1016 42 loss.adversarial_temperature 0.8096562609566428 +1016 42 negative_sampler.num_negs_per_pos 26.0 +1016 42 training.batch_size 1.0 +1016 43 model.embedding_dim 1.0 +1016 43 model.scoring_fct_norm 1.0 +1016 43 loss.margin 3.522009758845884 +1016 43 loss.adversarial_temperature 0.28758501662679614 +1016 43 negative_sampler.num_negs_per_pos 15.0 +1016 43 training.batch_size 0.0 +1016 44 model.embedding_dim 0.0 +1016 44 model.scoring_fct_norm 1.0 +1016 44 loss.margin 3.298558054568069 +1016 44 loss.adversarial_temperature 0.5076099004549713 +1016 44 negative_sampler.num_negs_per_pos 98.0 +1016 44 training.batch_size 1.0 +1016 45 model.embedding_dim 1.0 +1016 45 model.scoring_fct_norm 1.0 +1016 45 loss.margin 25.451802828480144 +1016 45 loss.adversarial_temperature 0.4546678832070645 +1016 45 negative_sampler.num_negs_per_pos 70.0 +1016 45 training.batch_size 0.0 +1016 46 model.embedding_dim 0.0 +1016 46 model.scoring_fct_norm 2.0 +1016 46 loss.margin 24.223883151514293 +1016 46 loss.adversarial_temperature 0.48523990959481855 +1016 46 negative_sampler.num_negs_per_pos 24.0 +1016 46 training.batch_size 2.0 +1016 47 model.embedding_dim 1.0 +1016 47 model.scoring_fct_norm 1.0 +1016 47 loss.margin 8.683401142430398 +1016 47 loss.adversarial_temperature 0.4455499214544657 +1016 47 negative_sampler.num_negs_per_pos 6.0 +1016 47 training.batch_size 1.0 +1016 48 model.embedding_dim 1.0 +1016 48 model.scoring_fct_norm 1.0 +1016 48 loss.margin 27.104285276587408 +1016 48 loss.adversarial_temperature 0.8401801249495183 +1016 48 negative_sampler.num_negs_per_pos 41.0 +1016 48 training.batch_size 2.0 +1016 49 model.embedding_dim 1.0 +1016 49 model.scoring_fct_norm 1.0 +1016 49 loss.margin 26.63520300021781 +1016 49 loss.adversarial_temperature 0.7110177464908618 +1016 49 negative_sampler.num_negs_per_pos 69.0 +1016 49 training.batch_size 0.0 +1016 50 model.embedding_dim 0.0 +1016 50 model.scoring_fct_norm 1.0 +1016 50 loss.margin 17.39850270393177 +1016 50 loss.adversarial_temperature 0.8847871752218323 +1016 50 negative_sampler.num_negs_per_pos 99.0 +1016 50 training.batch_size 0.0 +1016 51 model.embedding_dim 1.0 +1016 51 model.scoring_fct_norm 2.0 +1016 51 loss.margin 2.3930099118012 +1016 51 loss.adversarial_temperature 0.3162070955591403 +1016 51 negative_sampler.num_negs_per_pos 55.0 +1016 51 training.batch_size 2.0 +1016 52 model.embedding_dim 1.0 +1016 52 model.scoring_fct_norm 1.0 +1016 52 loss.margin 26.37911992805285 +1016 52 loss.adversarial_temperature 0.3602405698636224 +1016 52 negative_sampler.num_negs_per_pos 14.0 +1016 52 training.batch_size 1.0 +1016 53 model.embedding_dim 1.0 +1016 53 model.scoring_fct_norm 2.0 +1016 53 loss.margin 1.4260385894874226 +1016 53 loss.adversarial_temperature 0.14742898978413072 +1016 53 negative_sampler.num_negs_per_pos 61.0 +1016 53 training.batch_size 1.0 +1016 54 model.embedding_dim 1.0 +1016 54 model.scoring_fct_norm 2.0 +1016 54 loss.margin 16.404304795852934 +1016 54 loss.adversarial_temperature 0.5164557313101827 +1016 54 negative_sampler.num_negs_per_pos 31.0 +1016 54 training.batch_size 0.0 +1016 55 model.embedding_dim 2.0 +1016 55 model.scoring_fct_norm 2.0 +1016 55 loss.margin 27.03465435721385 +1016 55 loss.adversarial_temperature 0.5260957566120944 +1016 55 negative_sampler.num_negs_per_pos 80.0 +1016 55 training.batch_size 1.0 +1016 56 model.embedding_dim 0.0 +1016 56 model.scoring_fct_norm 2.0 +1016 56 loss.margin 20.94925364301028 +1016 56 loss.adversarial_temperature 0.88846707011448 +1016 56 negative_sampler.num_negs_per_pos 71.0 +1016 56 training.batch_size 2.0 +1016 57 model.embedding_dim 0.0 +1016 57 model.scoring_fct_norm 2.0 +1016 57 loss.margin 6.37217756244985 +1016 57 loss.adversarial_temperature 0.2906913903427085 +1016 57 negative_sampler.num_negs_per_pos 63.0 +1016 57 training.batch_size 1.0 +1016 58 model.embedding_dim 0.0 +1016 58 model.scoring_fct_norm 1.0 +1016 58 loss.margin 21.099784024885103 +1016 58 loss.adversarial_temperature 0.8809273127784801 +1016 58 negative_sampler.num_negs_per_pos 85.0 +1016 58 training.batch_size 1.0 +1016 59 model.embedding_dim 2.0 +1016 59 model.scoring_fct_norm 2.0 +1016 59 loss.margin 13.39727099992334 +1016 59 loss.adversarial_temperature 0.6987561412987423 +1016 59 negative_sampler.num_negs_per_pos 43.0 +1016 59 training.batch_size 1.0 +1016 60 model.embedding_dim 0.0 +1016 60 model.scoring_fct_norm 2.0 +1016 60 loss.margin 7.789603536490497 +1016 60 loss.adversarial_temperature 0.430308126858042 +1016 60 negative_sampler.num_negs_per_pos 27.0 +1016 60 training.batch_size 0.0 +1016 61 model.embedding_dim 0.0 +1016 61 model.scoring_fct_norm 2.0 +1016 61 loss.margin 15.180481783740886 +1016 61 loss.adversarial_temperature 0.14423845123630158 +1016 61 negative_sampler.num_negs_per_pos 53.0 +1016 61 training.batch_size 2.0 +1016 62 model.embedding_dim 0.0 +1016 62 model.scoring_fct_norm 2.0 +1016 62 loss.margin 2.903643921121572 +1016 62 loss.adversarial_temperature 0.16197925504224 +1016 62 negative_sampler.num_negs_per_pos 27.0 +1016 62 training.batch_size 0.0 +1016 63 model.embedding_dim 1.0 +1016 63 model.scoring_fct_norm 2.0 +1016 63 loss.margin 14.945337422379952 +1016 63 loss.adversarial_temperature 0.6571863141943615 +1016 63 negative_sampler.num_negs_per_pos 2.0 +1016 63 training.batch_size 2.0 +1016 64 model.embedding_dim 1.0 +1016 64 model.scoring_fct_norm 2.0 +1016 64 loss.margin 8.781849027859714 +1016 64 loss.adversarial_temperature 0.9002059085482232 +1016 64 negative_sampler.num_negs_per_pos 0.0 +1016 64 training.batch_size 2.0 +1016 65 model.embedding_dim 2.0 +1016 65 model.scoring_fct_norm 2.0 +1016 65 loss.margin 4.003632571661868 +1016 65 loss.adversarial_temperature 0.6781242666566698 +1016 65 negative_sampler.num_negs_per_pos 53.0 +1016 65 training.batch_size 0.0 +1016 66 model.embedding_dim 0.0 +1016 66 model.scoring_fct_norm 2.0 +1016 66 loss.margin 12.212125674800783 +1016 66 loss.adversarial_temperature 0.8261250806299545 +1016 66 negative_sampler.num_negs_per_pos 18.0 +1016 66 training.batch_size 0.0 +1016 67 model.embedding_dim 0.0 +1016 67 model.scoring_fct_norm 1.0 +1016 67 loss.margin 3.4353924593969754 +1016 67 loss.adversarial_temperature 0.7785998060386325 +1016 67 negative_sampler.num_negs_per_pos 90.0 +1016 67 training.batch_size 0.0 +1016 68 model.embedding_dim 1.0 +1016 68 model.scoring_fct_norm 1.0 +1016 68 loss.margin 3.8725990955237646 +1016 68 loss.adversarial_temperature 0.2230096535912833 +1016 68 negative_sampler.num_negs_per_pos 18.0 +1016 68 training.batch_size 0.0 +1016 69 model.embedding_dim 1.0 +1016 69 model.scoring_fct_norm 2.0 +1016 69 loss.margin 20.976324125318076 +1016 69 loss.adversarial_temperature 0.33969408174048293 +1016 69 negative_sampler.num_negs_per_pos 73.0 +1016 69 training.batch_size 1.0 +1016 70 model.embedding_dim 2.0 +1016 70 model.scoring_fct_norm 2.0 +1016 70 loss.margin 4.129145880395263 +1016 70 loss.adversarial_temperature 0.6338175335089283 +1016 70 negative_sampler.num_negs_per_pos 55.0 +1016 70 training.batch_size 1.0 +1016 71 model.embedding_dim 1.0 +1016 71 model.scoring_fct_norm 2.0 +1016 71 loss.margin 11.086457535414146 +1016 71 loss.adversarial_temperature 0.7420757818510355 +1016 71 negative_sampler.num_negs_per_pos 81.0 +1016 71 training.batch_size 0.0 +1016 72 model.embedding_dim 0.0 +1016 72 model.scoring_fct_norm 2.0 +1016 72 loss.margin 14.57185408230034 +1016 72 loss.adversarial_temperature 0.48121917091859956 +1016 72 negative_sampler.num_negs_per_pos 70.0 +1016 72 training.batch_size 0.0 +1016 73 model.embedding_dim 1.0 +1016 73 model.scoring_fct_norm 1.0 +1016 73 loss.margin 8.338351984133283 +1016 73 loss.adversarial_temperature 0.12904901142379788 +1016 73 negative_sampler.num_negs_per_pos 23.0 +1016 73 training.batch_size 2.0 +1016 74 model.embedding_dim 2.0 +1016 74 model.scoring_fct_norm 1.0 +1016 74 loss.margin 28.880513525250045 +1016 74 loss.adversarial_temperature 0.1410348803441661 +1016 74 negative_sampler.num_negs_per_pos 17.0 +1016 74 training.batch_size 1.0 +1016 75 model.embedding_dim 0.0 +1016 75 model.scoring_fct_norm 2.0 +1016 75 loss.margin 20.79790294561235 +1016 75 loss.adversarial_temperature 0.4992827039721017 +1016 75 negative_sampler.num_negs_per_pos 77.0 +1016 75 training.batch_size 0.0 +1016 76 model.embedding_dim 2.0 +1016 76 model.scoring_fct_norm 1.0 +1016 76 loss.margin 29.87300589249492 +1016 76 loss.adversarial_temperature 0.27864724971175914 +1016 76 negative_sampler.num_negs_per_pos 47.0 +1016 76 training.batch_size 1.0 +1016 77 model.embedding_dim 1.0 +1016 77 model.scoring_fct_norm 1.0 +1016 77 loss.margin 28.943204522623454 +1016 77 loss.adversarial_temperature 0.17817391693520362 +1016 77 negative_sampler.num_negs_per_pos 20.0 +1016 77 training.batch_size 2.0 +1016 78 model.embedding_dim 2.0 +1016 78 model.scoring_fct_norm 1.0 +1016 78 loss.margin 4.934434103113517 +1016 78 loss.adversarial_temperature 0.43103871438938923 +1016 78 negative_sampler.num_negs_per_pos 86.0 +1016 78 training.batch_size 2.0 +1016 79 model.embedding_dim 1.0 +1016 79 model.scoring_fct_norm 2.0 +1016 79 loss.margin 13.73891058754334 +1016 79 loss.adversarial_temperature 0.6433363576144818 +1016 79 negative_sampler.num_negs_per_pos 13.0 +1016 79 training.batch_size 1.0 +1016 80 model.embedding_dim 1.0 +1016 80 model.scoring_fct_norm 1.0 +1016 80 loss.margin 8.696292402019758 +1016 80 loss.adversarial_temperature 0.8800334608706674 +1016 80 negative_sampler.num_negs_per_pos 18.0 +1016 80 training.batch_size 1.0 +1016 81 model.embedding_dim 1.0 +1016 81 model.scoring_fct_norm 1.0 +1016 81 loss.margin 26.920882640959963 +1016 81 loss.adversarial_temperature 0.4777940889973281 +1016 81 negative_sampler.num_negs_per_pos 79.0 +1016 81 training.batch_size 0.0 +1016 82 model.embedding_dim 0.0 +1016 82 model.scoring_fct_norm 1.0 +1016 82 loss.margin 2.4647825505954923 +1016 82 loss.adversarial_temperature 0.2662591774464701 +1016 82 negative_sampler.num_negs_per_pos 82.0 +1016 82 training.batch_size 1.0 +1016 83 model.embedding_dim 1.0 +1016 83 model.scoring_fct_norm 2.0 +1016 83 loss.margin 21.471015506460045 +1016 83 loss.adversarial_temperature 0.21778706328062652 +1016 83 negative_sampler.num_negs_per_pos 7.0 +1016 83 training.batch_size 1.0 +1016 84 model.embedding_dim 0.0 +1016 84 model.scoring_fct_norm 2.0 +1016 84 loss.margin 15.579945154671103 +1016 84 loss.adversarial_temperature 0.19943275524381182 +1016 84 negative_sampler.num_negs_per_pos 97.0 +1016 84 training.batch_size 0.0 +1016 85 model.embedding_dim 1.0 +1016 85 model.scoring_fct_norm 1.0 +1016 85 loss.margin 25.246510115681982 +1016 85 loss.adversarial_temperature 0.4377993711370516 +1016 85 negative_sampler.num_negs_per_pos 96.0 +1016 85 training.batch_size 1.0 +1016 86 model.embedding_dim 2.0 +1016 86 model.scoring_fct_norm 1.0 +1016 86 loss.margin 6.719757086752793 +1016 86 loss.adversarial_temperature 0.8587855384466925 +1016 86 negative_sampler.num_negs_per_pos 60.0 +1016 86 training.batch_size 1.0 +1016 87 model.embedding_dim 1.0 +1016 87 model.scoring_fct_norm 1.0 +1016 87 loss.margin 15.77870012405916 +1016 87 loss.adversarial_temperature 0.21808171757827038 +1016 87 negative_sampler.num_negs_per_pos 33.0 +1016 87 training.batch_size 1.0 +1016 88 model.embedding_dim 2.0 +1016 88 model.scoring_fct_norm 2.0 +1016 88 loss.margin 25.499403123631225 +1016 88 loss.adversarial_temperature 0.7648721151331069 +1016 88 negative_sampler.num_negs_per_pos 93.0 +1016 88 training.batch_size 1.0 +1016 89 model.embedding_dim 1.0 +1016 89 model.scoring_fct_norm 2.0 +1016 89 loss.margin 6.931297733581821 +1016 89 loss.adversarial_temperature 0.712737258865303 +1016 89 negative_sampler.num_negs_per_pos 64.0 +1016 89 training.batch_size 0.0 +1016 90 model.embedding_dim 2.0 +1016 90 model.scoring_fct_norm 2.0 +1016 90 loss.margin 12.375868956846366 +1016 90 loss.adversarial_temperature 0.5455644333444704 +1016 90 negative_sampler.num_negs_per_pos 16.0 +1016 90 training.batch_size 1.0 +1016 91 model.embedding_dim 1.0 +1016 91 model.scoring_fct_norm 1.0 +1016 91 loss.margin 24.845939700756126 +1016 91 loss.adversarial_temperature 0.7943019135671159 +1016 91 negative_sampler.num_negs_per_pos 21.0 +1016 91 training.batch_size 0.0 +1016 92 model.embedding_dim 0.0 +1016 92 model.scoring_fct_norm 2.0 +1016 92 loss.margin 17.942785409077363 +1016 92 loss.adversarial_temperature 0.4556792193968553 +1016 92 negative_sampler.num_negs_per_pos 17.0 +1016 92 training.batch_size 2.0 +1016 93 model.embedding_dim 0.0 +1016 93 model.scoring_fct_norm 1.0 +1016 93 loss.margin 10.892949019319904 +1016 93 loss.adversarial_temperature 0.7684050524657561 +1016 93 negative_sampler.num_negs_per_pos 93.0 +1016 93 training.batch_size 0.0 +1016 94 model.embedding_dim 2.0 +1016 94 model.scoring_fct_norm 1.0 +1016 94 loss.margin 6.520955430612207 +1016 94 loss.adversarial_temperature 0.33188015193826337 +1016 94 negative_sampler.num_negs_per_pos 86.0 +1016 94 training.batch_size 1.0 +1016 95 model.embedding_dim 0.0 +1016 95 model.scoring_fct_norm 1.0 +1016 95 loss.margin 16.51842660457961 +1016 95 loss.adversarial_temperature 0.6123556601301596 +1016 95 negative_sampler.num_negs_per_pos 35.0 +1016 95 training.batch_size 2.0 +1016 96 model.embedding_dim 1.0 +1016 96 model.scoring_fct_norm 2.0 +1016 96 loss.margin 5.637860878981636 +1016 96 loss.adversarial_temperature 0.5424691211334634 +1016 96 negative_sampler.num_negs_per_pos 42.0 +1016 96 training.batch_size 0.0 +1016 97 model.embedding_dim 0.0 +1016 97 model.scoring_fct_norm 1.0 +1016 97 loss.margin 7.030922601399427 +1016 97 loss.adversarial_temperature 0.8415299919664578 +1016 97 negative_sampler.num_negs_per_pos 25.0 +1016 97 training.batch_size 2.0 +1016 98 model.embedding_dim 0.0 +1016 98 model.scoring_fct_norm 2.0 +1016 98 loss.margin 20.645104545266697 +1016 98 loss.adversarial_temperature 0.4889538384361567 +1016 98 negative_sampler.num_negs_per_pos 55.0 +1016 98 training.batch_size 0.0 +1016 99 model.embedding_dim 2.0 +1016 99 model.scoring_fct_norm 2.0 +1016 99 loss.margin 8.727268440523305 +1016 99 loss.adversarial_temperature 0.5275846582818455 +1016 99 negative_sampler.num_negs_per_pos 42.0 +1016 99 training.batch_size 0.0 +1016 100 model.embedding_dim 2.0 +1016 100 model.scoring_fct_norm 1.0 +1016 100 loss.margin 12.08352350056236 +1016 100 loss.adversarial_temperature 0.45022399586688533 +1016 100 negative_sampler.num_negs_per_pos 95.0 +1016 100 training.batch_size 2.0 +1016 1 dataset """kinships""" +1016 1 model """unstructuredmodel""" +1016 1 loss """nssa""" +1016 1 regularizer """no""" +1016 1 optimizer """adadelta""" +1016 1 training_loop """owa""" +1016 1 negative_sampler """basic""" +1016 1 evaluator """rankbased""" +1016 2 dataset """kinships""" +1016 2 model """unstructuredmodel""" +1016 2 loss """nssa""" +1016 2 regularizer """no""" +1016 2 optimizer """adadelta""" +1016 2 training_loop """owa""" +1016 2 negative_sampler """basic""" +1016 2 evaluator """rankbased""" +1016 3 dataset """kinships""" +1016 3 model """unstructuredmodel""" +1016 3 loss """nssa""" +1016 3 regularizer """no""" +1016 3 optimizer """adadelta""" +1016 3 training_loop """owa""" +1016 3 negative_sampler """basic""" +1016 3 evaluator """rankbased""" +1016 4 dataset """kinships""" +1016 4 model """unstructuredmodel""" +1016 4 loss """nssa""" +1016 4 regularizer """no""" +1016 4 optimizer """adadelta""" +1016 4 training_loop """owa""" +1016 4 negative_sampler """basic""" +1016 4 evaluator """rankbased""" +1016 5 dataset """kinships""" +1016 5 model """unstructuredmodel""" +1016 5 loss """nssa""" +1016 5 regularizer """no""" +1016 5 optimizer """adadelta""" +1016 5 training_loop """owa""" +1016 5 negative_sampler """basic""" +1016 5 evaluator """rankbased""" +1016 6 dataset """kinships""" +1016 6 model """unstructuredmodel""" +1016 6 loss """nssa""" +1016 6 regularizer """no""" +1016 6 optimizer """adadelta""" +1016 6 training_loop """owa""" +1016 6 negative_sampler """basic""" +1016 6 evaluator """rankbased""" +1016 7 dataset """kinships""" +1016 7 model """unstructuredmodel""" +1016 7 loss """nssa""" +1016 7 regularizer """no""" +1016 7 optimizer """adadelta""" +1016 7 training_loop """owa""" +1016 7 negative_sampler """basic""" +1016 7 evaluator """rankbased""" +1016 8 dataset """kinships""" +1016 8 model """unstructuredmodel""" +1016 8 loss """nssa""" +1016 8 regularizer """no""" +1016 8 optimizer """adadelta""" +1016 8 training_loop """owa""" +1016 8 negative_sampler """basic""" +1016 8 evaluator """rankbased""" +1016 9 dataset """kinships""" +1016 9 model """unstructuredmodel""" +1016 9 loss """nssa""" +1016 9 regularizer """no""" +1016 9 optimizer """adadelta""" +1016 9 training_loop """owa""" +1016 9 negative_sampler """basic""" +1016 9 evaluator """rankbased""" +1016 10 dataset """kinships""" +1016 10 model """unstructuredmodel""" +1016 10 loss """nssa""" +1016 10 regularizer """no""" +1016 10 optimizer """adadelta""" +1016 10 training_loop """owa""" +1016 10 negative_sampler """basic""" +1016 10 evaluator """rankbased""" +1016 11 dataset """kinships""" +1016 11 model """unstructuredmodel""" +1016 11 loss """nssa""" +1016 11 regularizer """no""" +1016 11 optimizer """adadelta""" +1016 11 training_loop """owa""" +1016 11 negative_sampler """basic""" +1016 11 evaluator """rankbased""" +1016 12 dataset """kinships""" +1016 12 model """unstructuredmodel""" +1016 12 loss """nssa""" +1016 12 regularizer """no""" +1016 12 optimizer """adadelta""" +1016 12 training_loop """owa""" +1016 12 negative_sampler """basic""" +1016 12 evaluator """rankbased""" +1016 13 dataset """kinships""" +1016 13 model """unstructuredmodel""" +1016 13 loss """nssa""" +1016 13 regularizer """no""" +1016 13 optimizer """adadelta""" +1016 13 training_loop """owa""" +1016 13 negative_sampler """basic""" +1016 13 evaluator """rankbased""" +1016 14 dataset """kinships""" +1016 14 model """unstructuredmodel""" +1016 14 loss """nssa""" +1016 14 regularizer """no""" +1016 14 optimizer """adadelta""" +1016 14 training_loop """owa""" +1016 14 negative_sampler """basic""" +1016 14 evaluator """rankbased""" +1016 15 dataset """kinships""" +1016 15 model """unstructuredmodel""" +1016 15 loss """nssa""" +1016 15 regularizer """no""" +1016 15 optimizer """adadelta""" +1016 15 training_loop """owa""" +1016 15 negative_sampler """basic""" +1016 15 evaluator """rankbased""" +1016 16 dataset """kinships""" +1016 16 model """unstructuredmodel""" +1016 16 loss """nssa""" +1016 16 regularizer """no""" +1016 16 optimizer """adadelta""" +1016 16 training_loop """owa""" +1016 16 negative_sampler """basic""" +1016 16 evaluator """rankbased""" +1016 17 dataset """kinships""" +1016 17 model """unstructuredmodel""" +1016 17 loss """nssa""" +1016 17 regularizer """no""" +1016 17 optimizer """adadelta""" +1016 17 training_loop """owa""" +1016 17 negative_sampler """basic""" +1016 17 evaluator """rankbased""" +1016 18 dataset """kinships""" +1016 18 model """unstructuredmodel""" +1016 18 loss """nssa""" +1016 18 regularizer """no""" +1016 18 optimizer """adadelta""" +1016 18 training_loop """owa""" +1016 18 negative_sampler """basic""" +1016 18 evaluator """rankbased""" +1016 19 dataset """kinships""" +1016 19 model """unstructuredmodel""" +1016 19 loss """nssa""" +1016 19 regularizer """no""" +1016 19 optimizer """adadelta""" +1016 19 training_loop """owa""" +1016 19 negative_sampler """basic""" +1016 19 evaluator """rankbased""" +1016 20 dataset """kinships""" +1016 20 model """unstructuredmodel""" +1016 20 loss """nssa""" +1016 20 regularizer """no""" +1016 20 optimizer """adadelta""" +1016 20 training_loop """owa""" +1016 20 negative_sampler """basic""" +1016 20 evaluator """rankbased""" +1016 21 dataset """kinships""" +1016 21 model """unstructuredmodel""" +1016 21 loss """nssa""" +1016 21 regularizer """no""" +1016 21 optimizer """adadelta""" +1016 21 training_loop """owa""" +1016 21 negative_sampler """basic""" +1016 21 evaluator """rankbased""" +1016 22 dataset """kinships""" +1016 22 model """unstructuredmodel""" +1016 22 loss """nssa""" +1016 22 regularizer """no""" +1016 22 optimizer """adadelta""" +1016 22 training_loop """owa""" +1016 22 negative_sampler """basic""" +1016 22 evaluator """rankbased""" +1016 23 dataset """kinships""" +1016 23 model """unstructuredmodel""" +1016 23 loss """nssa""" +1016 23 regularizer """no""" +1016 23 optimizer """adadelta""" +1016 23 training_loop """owa""" +1016 23 negative_sampler """basic""" +1016 23 evaluator """rankbased""" +1016 24 dataset """kinships""" +1016 24 model """unstructuredmodel""" +1016 24 loss """nssa""" +1016 24 regularizer """no""" +1016 24 optimizer """adadelta""" +1016 24 training_loop """owa""" +1016 24 negative_sampler """basic""" +1016 24 evaluator """rankbased""" +1016 25 dataset """kinships""" +1016 25 model """unstructuredmodel""" +1016 25 loss """nssa""" +1016 25 regularizer """no""" +1016 25 optimizer """adadelta""" +1016 25 training_loop """owa""" +1016 25 negative_sampler """basic""" +1016 25 evaluator """rankbased""" +1016 26 dataset """kinships""" +1016 26 model """unstructuredmodel""" +1016 26 loss """nssa""" +1016 26 regularizer """no""" +1016 26 optimizer """adadelta""" +1016 26 training_loop """owa""" +1016 26 negative_sampler """basic""" +1016 26 evaluator """rankbased""" +1016 27 dataset """kinships""" +1016 27 model """unstructuredmodel""" +1016 27 loss """nssa""" +1016 27 regularizer """no""" +1016 27 optimizer """adadelta""" +1016 27 training_loop """owa""" +1016 27 negative_sampler """basic""" +1016 27 evaluator """rankbased""" +1016 28 dataset """kinships""" +1016 28 model """unstructuredmodel""" +1016 28 loss """nssa""" +1016 28 regularizer """no""" +1016 28 optimizer """adadelta""" +1016 28 training_loop """owa""" +1016 28 negative_sampler """basic""" +1016 28 evaluator """rankbased""" +1016 29 dataset """kinships""" +1016 29 model """unstructuredmodel""" +1016 29 loss """nssa""" +1016 29 regularizer """no""" +1016 29 optimizer """adadelta""" +1016 29 training_loop """owa""" +1016 29 negative_sampler """basic""" +1016 29 evaluator """rankbased""" +1016 30 dataset """kinships""" +1016 30 model """unstructuredmodel""" +1016 30 loss """nssa""" +1016 30 regularizer """no""" +1016 30 optimizer """adadelta""" +1016 30 training_loop """owa""" +1016 30 negative_sampler """basic""" +1016 30 evaluator """rankbased""" +1016 31 dataset """kinships""" +1016 31 model """unstructuredmodel""" +1016 31 loss """nssa""" +1016 31 regularizer """no""" +1016 31 optimizer """adadelta""" +1016 31 training_loop """owa""" +1016 31 negative_sampler """basic""" +1016 31 evaluator """rankbased""" +1016 32 dataset """kinships""" +1016 32 model """unstructuredmodel""" +1016 32 loss """nssa""" +1016 32 regularizer """no""" +1016 32 optimizer """adadelta""" +1016 32 training_loop """owa""" +1016 32 negative_sampler """basic""" +1016 32 evaluator """rankbased""" +1016 33 dataset """kinships""" +1016 33 model """unstructuredmodel""" +1016 33 loss """nssa""" +1016 33 regularizer """no""" +1016 33 optimizer """adadelta""" +1016 33 training_loop """owa""" +1016 33 negative_sampler """basic""" +1016 33 evaluator """rankbased""" +1016 34 dataset """kinships""" +1016 34 model """unstructuredmodel""" +1016 34 loss """nssa""" +1016 34 regularizer """no""" +1016 34 optimizer """adadelta""" +1016 34 training_loop """owa""" +1016 34 negative_sampler """basic""" +1016 34 evaluator """rankbased""" +1016 35 dataset """kinships""" +1016 35 model """unstructuredmodel""" +1016 35 loss """nssa""" +1016 35 regularizer """no""" +1016 35 optimizer """adadelta""" +1016 35 training_loop """owa""" +1016 35 negative_sampler """basic""" +1016 35 evaluator """rankbased""" +1016 36 dataset """kinships""" +1016 36 model """unstructuredmodel""" +1016 36 loss """nssa""" +1016 36 regularizer """no""" +1016 36 optimizer """adadelta""" +1016 36 training_loop """owa""" +1016 36 negative_sampler """basic""" +1016 36 evaluator """rankbased""" +1016 37 dataset """kinships""" +1016 37 model """unstructuredmodel""" +1016 37 loss """nssa""" +1016 37 regularizer """no""" +1016 37 optimizer """adadelta""" +1016 37 training_loop """owa""" +1016 37 negative_sampler """basic""" +1016 37 evaluator """rankbased""" +1016 38 dataset """kinships""" +1016 38 model """unstructuredmodel""" +1016 38 loss """nssa""" +1016 38 regularizer """no""" +1016 38 optimizer """adadelta""" +1016 38 training_loop """owa""" +1016 38 negative_sampler """basic""" +1016 38 evaluator """rankbased""" +1016 39 dataset """kinships""" +1016 39 model """unstructuredmodel""" +1016 39 loss """nssa""" +1016 39 regularizer """no""" +1016 39 optimizer """adadelta""" +1016 39 training_loop """owa""" +1016 39 negative_sampler """basic""" +1016 39 evaluator """rankbased""" +1016 40 dataset """kinships""" +1016 40 model """unstructuredmodel""" +1016 40 loss """nssa""" +1016 40 regularizer """no""" +1016 40 optimizer """adadelta""" +1016 40 training_loop """owa""" +1016 40 negative_sampler """basic""" +1016 40 evaluator """rankbased""" +1016 41 dataset """kinships""" +1016 41 model """unstructuredmodel""" +1016 41 loss """nssa""" +1016 41 regularizer """no""" +1016 41 optimizer """adadelta""" +1016 41 training_loop """owa""" +1016 41 negative_sampler """basic""" +1016 41 evaluator """rankbased""" +1016 42 dataset """kinships""" +1016 42 model """unstructuredmodel""" +1016 42 loss """nssa""" +1016 42 regularizer """no""" +1016 42 optimizer """adadelta""" +1016 42 training_loop """owa""" +1016 42 negative_sampler """basic""" +1016 42 evaluator """rankbased""" +1016 43 dataset """kinships""" +1016 43 model """unstructuredmodel""" +1016 43 loss """nssa""" +1016 43 regularizer """no""" +1016 43 optimizer """adadelta""" +1016 43 training_loop """owa""" +1016 43 negative_sampler """basic""" +1016 43 evaluator """rankbased""" +1016 44 dataset """kinships""" +1016 44 model """unstructuredmodel""" +1016 44 loss """nssa""" +1016 44 regularizer """no""" +1016 44 optimizer """adadelta""" +1016 44 training_loop """owa""" +1016 44 negative_sampler """basic""" +1016 44 evaluator """rankbased""" +1016 45 dataset """kinships""" +1016 45 model """unstructuredmodel""" +1016 45 loss """nssa""" +1016 45 regularizer """no""" +1016 45 optimizer """adadelta""" +1016 45 training_loop """owa""" +1016 45 negative_sampler """basic""" +1016 45 evaluator """rankbased""" +1016 46 dataset """kinships""" +1016 46 model """unstructuredmodel""" +1016 46 loss """nssa""" +1016 46 regularizer """no""" +1016 46 optimizer """adadelta""" +1016 46 training_loop """owa""" +1016 46 negative_sampler """basic""" +1016 46 evaluator """rankbased""" +1016 47 dataset """kinships""" +1016 47 model """unstructuredmodel""" +1016 47 loss """nssa""" +1016 47 regularizer """no""" +1016 47 optimizer """adadelta""" +1016 47 training_loop """owa""" +1016 47 negative_sampler """basic""" +1016 47 evaluator """rankbased""" +1016 48 dataset """kinships""" +1016 48 model """unstructuredmodel""" +1016 48 loss """nssa""" +1016 48 regularizer """no""" +1016 48 optimizer """adadelta""" +1016 48 training_loop """owa""" +1016 48 negative_sampler """basic""" +1016 48 evaluator """rankbased""" +1016 49 dataset """kinships""" +1016 49 model """unstructuredmodel""" +1016 49 loss """nssa""" +1016 49 regularizer """no""" +1016 49 optimizer """adadelta""" +1016 49 training_loop """owa""" +1016 49 negative_sampler """basic""" +1016 49 evaluator """rankbased""" +1016 50 dataset """kinships""" +1016 50 model """unstructuredmodel""" +1016 50 loss """nssa""" +1016 50 regularizer """no""" +1016 50 optimizer """adadelta""" +1016 50 training_loop """owa""" +1016 50 negative_sampler """basic""" +1016 50 evaluator """rankbased""" +1016 51 dataset """kinships""" +1016 51 model """unstructuredmodel""" +1016 51 loss """nssa""" +1016 51 regularizer """no""" +1016 51 optimizer """adadelta""" +1016 51 training_loop """owa""" +1016 51 negative_sampler """basic""" +1016 51 evaluator """rankbased""" +1016 52 dataset """kinships""" +1016 52 model """unstructuredmodel""" +1016 52 loss """nssa""" +1016 52 regularizer """no""" +1016 52 optimizer """adadelta""" +1016 52 training_loop """owa""" +1016 52 negative_sampler """basic""" +1016 52 evaluator """rankbased""" +1016 53 dataset """kinships""" +1016 53 model """unstructuredmodel""" +1016 53 loss """nssa""" +1016 53 regularizer """no""" +1016 53 optimizer """adadelta""" +1016 53 training_loop """owa""" +1016 53 negative_sampler """basic""" +1016 53 evaluator """rankbased""" +1016 54 dataset """kinships""" +1016 54 model """unstructuredmodel""" +1016 54 loss """nssa""" +1016 54 regularizer """no""" +1016 54 optimizer """adadelta""" +1016 54 training_loop """owa""" +1016 54 negative_sampler """basic""" +1016 54 evaluator """rankbased""" +1016 55 dataset """kinships""" +1016 55 model """unstructuredmodel""" +1016 55 loss """nssa""" +1016 55 regularizer """no""" +1016 55 optimizer """adadelta""" +1016 55 training_loop """owa""" +1016 55 negative_sampler """basic""" +1016 55 evaluator """rankbased""" +1016 56 dataset """kinships""" +1016 56 model """unstructuredmodel""" +1016 56 loss """nssa""" +1016 56 regularizer """no""" +1016 56 optimizer """adadelta""" +1016 56 training_loop """owa""" +1016 56 negative_sampler """basic""" +1016 56 evaluator """rankbased""" +1016 57 dataset """kinships""" +1016 57 model """unstructuredmodel""" +1016 57 loss """nssa""" +1016 57 regularizer """no""" +1016 57 optimizer """adadelta""" +1016 57 training_loop """owa""" +1016 57 negative_sampler """basic""" +1016 57 evaluator """rankbased""" +1016 58 dataset """kinships""" +1016 58 model """unstructuredmodel""" +1016 58 loss """nssa""" +1016 58 regularizer """no""" +1016 58 optimizer """adadelta""" +1016 58 training_loop """owa""" +1016 58 negative_sampler """basic""" +1016 58 evaluator """rankbased""" +1016 59 dataset """kinships""" +1016 59 model """unstructuredmodel""" +1016 59 loss """nssa""" +1016 59 regularizer """no""" +1016 59 optimizer """adadelta""" +1016 59 training_loop """owa""" +1016 59 negative_sampler """basic""" +1016 59 evaluator """rankbased""" +1016 60 dataset """kinships""" +1016 60 model """unstructuredmodel""" +1016 60 loss """nssa""" +1016 60 regularizer """no""" +1016 60 optimizer """adadelta""" +1016 60 training_loop """owa""" +1016 60 negative_sampler """basic""" +1016 60 evaluator """rankbased""" +1016 61 dataset """kinships""" +1016 61 model """unstructuredmodel""" +1016 61 loss """nssa""" +1016 61 regularizer """no""" +1016 61 optimizer """adadelta""" +1016 61 training_loop """owa""" +1016 61 negative_sampler """basic""" +1016 61 evaluator """rankbased""" +1016 62 dataset """kinships""" +1016 62 model """unstructuredmodel""" +1016 62 loss """nssa""" +1016 62 regularizer """no""" +1016 62 optimizer """adadelta""" +1016 62 training_loop """owa""" +1016 62 negative_sampler """basic""" +1016 62 evaluator """rankbased""" +1016 63 dataset """kinships""" +1016 63 model """unstructuredmodel""" +1016 63 loss """nssa""" +1016 63 regularizer """no""" +1016 63 optimizer """adadelta""" +1016 63 training_loop """owa""" +1016 63 negative_sampler """basic""" +1016 63 evaluator """rankbased""" +1016 64 dataset """kinships""" +1016 64 model """unstructuredmodel""" +1016 64 loss """nssa""" +1016 64 regularizer """no""" +1016 64 optimizer """adadelta""" +1016 64 training_loop """owa""" +1016 64 negative_sampler """basic""" +1016 64 evaluator """rankbased""" +1016 65 dataset """kinships""" +1016 65 model """unstructuredmodel""" +1016 65 loss """nssa""" +1016 65 regularizer """no""" +1016 65 optimizer """adadelta""" +1016 65 training_loop """owa""" +1016 65 negative_sampler """basic""" +1016 65 evaluator """rankbased""" +1016 66 dataset """kinships""" +1016 66 model """unstructuredmodel""" +1016 66 loss """nssa""" +1016 66 regularizer """no""" +1016 66 optimizer """adadelta""" +1016 66 training_loop """owa""" +1016 66 negative_sampler """basic""" +1016 66 evaluator """rankbased""" +1016 67 dataset """kinships""" +1016 67 model """unstructuredmodel""" +1016 67 loss """nssa""" +1016 67 regularizer """no""" +1016 67 optimizer """adadelta""" +1016 67 training_loop """owa""" +1016 67 negative_sampler """basic""" +1016 67 evaluator """rankbased""" +1016 68 dataset """kinships""" +1016 68 model """unstructuredmodel""" +1016 68 loss """nssa""" +1016 68 regularizer """no""" +1016 68 optimizer """adadelta""" +1016 68 training_loop """owa""" +1016 68 negative_sampler """basic""" +1016 68 evaluator """rankbased""" +1016 69 dataset """kinships""" +1016 69 model """unstructuredmodel""" +1016 69 loss """nssa""" +1016 69 regularizer """no""" +1016 69 optimizer """adadelta""" +1016 69 training_loop """owa""" +1016 69 negative_sampler """basic""" +1016 69 evaluator """rankbased""" +1016 70 dataset """kinships""" +1016 70 model """unstructuredmodel""" +1016 70 loss """nssa""" +1016 70 regularizer """no""" +1016 70 optimizer """adadelta""" +1016 70 training_loop """owa""" +1016 70 negative_sampler """basic""" +1016 70 evaluator """rankbased""" +1016 71 dataset """kinships""" +1016 71 model """unstructuredmodel""" +1016 71 loss """nssa""" +1016 71 regularizer """no""" +1016 71 optimizer """adadelta""" +1016 71 training_loop """owa""" +1016 71 negative_sampler """basic""" +1016 71 evaluator """rankbased""" +1016 72 dataset """kinships""" +1016 72 model """unstructuredmodel""" +1016 72 loss """nssa""" +1016 72 regularizer """no""" +1016 72 optimizer """adadelta""" +1016 72 training_loop """owa""" +1016 72 negative_sampler """basic""" +1016 72 evaluator """rankbased""" +1016 73 dataset """kinships""" +1016 73 model """unstructuredmodel""" +1016 73 loss """nssa""" +1016 73 regularizer """no""" +1016 73 optimizer """adadelta""" +1016 73 training_loop """owa""" +1016 73 negative_sampler """basic""" +1016 73 evaluator """rankbased""" +1016 74 dataset """kinships""" +1016 74 model """unstructuredmodel""" +1016 74 loss """nssa""" +1016 74 regularizer """no""" +1016 74 optimizer """adadelta""" +1016 74 training_loop """owa""" +1016 74 negative_sampler """basic""" +1016 74 evaluator """rankbased""" +1016 75 dataset """kinships""" +1016 75 model """unstructuredmodel""" +1016 75 loss """nssa""" +1016 75 regularizer """no""" +1016 75 optimizer """adadelta""" +1016 75 training_loop """owa""" +1016 75 negative_sampler """basic""" +1016 75 evaluator """rankbased""" +1016 76 dataset """kinships""" +1016 76 model """unstructuredmodel""" +1016 76 loss """nssa""" +1016 76 regularizer """no""" +1016 76 optimizer """adadelta""" +1016 76 training_loop """owa""" +1016 76 negative_sampler """basic""" +1016 76 evaluator """rankbased""" +1016 77 dataset """kinships""" +1016 77 model """unstructuredmodel""" +1016 77 loss """nssa""" +1016 77 regularizer """no""" +1016 77 optimizer """adadelta""" +1016 77 training_loop """owa""" +1016 77 negative_sampler """basic""" +1016 77 evaluator """rankbased""" +1016 78 dataset """kinships""" +1016 78 model """unstructuredmodel""" +1016 78 loss """nssa""" +1016 78 regularizer """no""" +1016 78 optimizer """adadelta""" +1016 78 training_loop """owa""" +1016 78 negative_sampler """basic""" +1016 78 evaluator """rankbased""" +1016 79 dataset """kinships""" +1016 79 model """unstructuredmodel""" +1016 79 loss """nssa""" +1016 79 regularizer """no""" +1016 79 optimizer """adadelta""" +1016 79 training_loop """owa""" +1016 79 negative_sampler """basic""" +1016 79 evaluator """rankbased""" +1016 80 dataset """kinships""" +1016 80 model """unstructuredmodel""" +1016 80 loss """nssa""" +1016 80 regularizer """no""" +1016 80 optimizer """adadelta""" +1016 80 training_loop """owa""" +1016 80 negative_sampler """basic""" +1016 80 evaluator """rankbased""" +1016 81 dataset """kinships""" +1016 81 model """unstructuredmodel""" +1016 81 loss """nssa""" +1016 81 regularizer """no""" +1016 81 optimizer """adadelta""" +1016 81 training_loop """owa""" +1016 81 negative_sampler """basic""" +1016 81 evaluator """rankbased""" +1016 82 dataset """kinships""" +1016 82 model """unstructuredmodel""" +1016 82 loss """nssa""" +1016 82 regularizer """no""" +1016 82 optimizer """adadelta""" +1016 82 training_loop """owa""" +1016 82 negative_sampler """basic""" +1016 82 evaluator """rankbased""" +1016 83 dataset """kinships""" +1016 83 model """unstructuredmodel""" +1016 83 loss """nssa""" +1016 83 regularizer """no""" +1016 83 optimizer """adadelta""" +1016 83 training_loop """owa""" +1016 83 negative_sampler """basic""" +1016 83 evaluator """rankbased""" +1016 84 dataset """kinships""" +1016 84 model """unstructuredmodel""" +1016 84 loss """nssa""" +1016 84 regularizer """no""" +1016 84 optimizer """adadelta""" +1016 84 training_loop """owa""" +1016 84 negative_sampler """basic""" +1016 84 evaluator """rankbased""" +1016 85 dataset """kinships""" +1016 85 model """unstructuredmodel""" +1016 85 loss """nssa""" +1016 85 regularizer """no""" +1016 85 optimizer """adadelta""" +1016 85 training_loop """owa""" +1016 85 negative_sampler """basic""" +1016 85 evaluator """rankbased""" +1016 86 dataset """kinships""" +1016 86 model """unstructuredmodel""" +1016 86 loss """nssa""" +1016 86 regularizer """no""" +1016 86 optimizer """adadelta""" +1016 86 training_loop """owa""" +1016 86 negative_sampler """basic""" +1016 86 evaluator """rankbased""" +1016 87 dataset """kinships""" +1016 87 model """unstructuredmodel""" +1016 87 loss """nssa""" +1016 87 regularizer """no""" +1016 87 optimizer """adadelta""" +1016 87 training_loop """owa""" +1016 87 negative_sampler """basic""" +1016 87 evaluator """rankbased""" +1016 88 dataset """kinships""" +1016 88 model """unstructuredmodel""" +1016 88 loss """nssa""" +1016 88 regularizer """no""" +1016 88 optimizer """adadelta""" +1016 88 training_loop """owa""" +1016 88 negative_sampler """basic""" +1016 88 evaluator """rankbased""" +1016 89 dataset """kinships""" +1016 89 model """unstructuredmodel""" +1016 89 loss """nssa""" +1016 89 regularizer """no""" +1016 89 optimizer """adadelta""" +1016 89 training_loop """owa""" +1016 89 negative_sampler """basic""" +1016 89 evaluator """rankbased""" +1016 90 dataset """kinships""" +1016 90 model """unstructuredmodel""" +1016 90 loss """nssa""" +1016 90 regularizer """no""" +1016 90 optimizer """adadelta""" +1016 90 training_loop """owa""" +1016 90 negative_sampler """basic""" +1016 90 evaluator """rankbased""" +1016 91 dataset """kinships""" +1016 91 model """unstructuredmodel""" +1016 91 loss """nssa""" +1016 91 regularizer """no""" +1016 91 optimizer """adadelta""" +1016 91 training_loop """owa""" +1016 91 negative_sampler """basic""" +1016 91 evaluator """rankbased""" +1016 92 dataset """kinships""" +1016 92 model """unstructuredmodel""" +1016 92 loss """nssa""" +1016 92 regularizer """no""" +1016 92 optimizer """adadelta""" +1016 92 training_loop """owa""" +1016 92 negative_sampler """basic""" +1016 92 evaluator """rankbased""" +1016 93 dataset """kinships""" +1016 93 model """unstructuredmodel""" +1016 93 loss """nssa""" +1016 93 regularizer """no""" +1016 93 optimizer """adadelta""" +1016 93 training_loop """owa""" +1016 93 negative_sampler """basic""" +1016 93 evaluator """rankbased""" +1016 94 dataset """kinships""" +1016 94 model """unstructuredmodel""" +1016 94 loss """nssa""" +1016 94 regularizer """no""" +1016 94 optimizer """adadelta""" +1016 94 training_loop """owa""" +1016 94 negative_sampler """basic""" +1016 94 evaluator """rankbased""" +1016 95 dataset """kinships""" +1016 95 model """unstructuredmodel""" +1016 95 loss """nssa""" +1016 95 regularizer """no""" +1016 95 optimizer """adadelta""" +1016 95 training_loop """owa""" +1016 95 negative_sampler """basic""" +1016 95 evaluator """rankbased""" +1016 96 dataset """kinships""" +1016 96 model """unstructuredmodel""" +1016 96 loss """nssa""" +1016 96 regularizer """no""" +1016 96 optimizer """adadelta""" +1016 96 training_loop """owa""" +1016 96 negative_sampler """basic""" +1016 96 evaluator """rankbased""" +1016 97 dataset """kinships""" +1016 97 model """unstructuredmodel""" +1016 97 loss """nssa""" +1016 97 regularizer """no""" +1016 97 optimizer """adadelta""" +1016 97 training_loop """owa""" +1016 97 negative_sampler """basic""" +1016 97 evaluator """rankbased""" +1016 98 dataset """kinships""" +1016 98 model """unstructuredmodel""" +1016 98 loss """nssa""" +1016 98 regularizer """no""" +1016 98 optimizer """adadelta""" +1016 98 training_loop """owa""" +1016 98 negative_sampler """basic""" +1016 98 evaluator """rankbased""" +1016 99 dataset """kinships""" +1016 99 model """unstructuredmodel""" +1016 99 loss """nssa""" +1016 99 regularizer """no""" +1016 99 optimizer """adadelta""" +1016 99 training_loop """owa""" +1016 99 negative_sampler """basic""" +1016 99 evaluator """rankbased""" +1016 100 dataset """kinships""" +1016 100 model """unstructuredmodel""" +1016 100 loss """nssa""" +1016 100 regularizer """no""" +1016 100 optimizer """adadelta""" +1016 100 training_loop """owa""" +1016 100 negative_sampler """basic""" +1016 100 evaluator """rankbased""" +1017 1 model.embedding_dim 2.0 +1017 1 model.scoring_fct_norm 2.0 +1017 1 loss.margin 5.471473172693054 +1017 1 loss.adversarial_temperature 0.3028755114787223 +1017 1 negative_sampler.num_negs_per_pos 81.0 +1017 1 training.batch_size 1.0 +1017 2 model.embedding_dim 0.0 +1017 2 model.scoring_fct_norm 2.0 +1017 2 loss.margin 19.075339629171435 +1017 2 loss.adversarial_temperature 0.9627258399506917 +1017 2 negative_sampler.num_negs_per_pos 75.0 +1017 2 training.batch_size 1.0 +1017 3 model.embedding_dim 0.0 +1017 3 model.scoring_fct_norm 2.0 +1017 3 loss.margin 7.74362186973892 +1017 3 loss.adversarial_temperature 0.7309294291868321 +1017 3 negative_sampler.num_negs_per_pos 8.0 +1017 3 training.batch_size 1.0 +1017 4 model.embedding_dim 1.0 +1017 4 model.scoring_fct_norm 1.0 +1017 4 loss.margin 19.919382000838272 +1017 4 loss.adversarial_temperature 0.8865064117453921 +1017 4 negative_sampler.num_negs_per_pos 5.0 +1017 4 training.batch_size 2.0 +1017 5 model.embedding_dim 2.0 +1017 5 model.scoring_fct_norm 1.0 +1017 5 loss.margin 5.749556248695775 +1017 5 loss.adversarial_temperature 0.5237108994498628 +1017 5 negative_sampler.num_negs_per_pos 65.0 +1017 5 training.batch_size 2.0 +1017 6 model.embedding_dim 0.0 +1017 6 model.scoring_fct_norm 2.0 +1017 6 loss.margin 18.56012921933164 +1017 6 loss.adversarial_temperature 0.3532990298067392 +1017 6 negative_sampler.num_negs_per_pos 63.0 +1017 6 training.batch_size 2.0 +1017 7 model.embedding_dim 2.0 +1017 7 model.scoring_fct_norm 2.0 +1017 7 loss.margin 28.374112431555993 +1017 7 loss.adversarial_temperature 0.44487111514888444 +1017 7 negative_sampler.num_negs_per_pos 3.0 +1017 7 training.batch_size 0.0 +1017 8 model.embedding_dim 1.0 +1017 8 model.scoring_fct_norm 1.0 +1017 8 loss.margin 4.493777927871061 +1017 8 loss.adversarial_temperature 0.12102318859035667 +1017 8 negative_sampler.num_negs_per_pos 45.0 +1017 8 training.batch_size 1.0 +1017 9 model.embedding_dim 1.0 +1017 9 model.scoring_fct_norm 2.0 +1017 9 loss.margin 29.593186944185195 +1017 9 loss.adversarial_temperature 0.4039028565514038 +1017 9 negative_sampler.num_negs_per_pos 11.0 +1017 9 training.batch_size 1.0 +1017 10 model.embedding_dim 2.0 +1017 10 model.scoring_fct_norm 1.0 +1017 10 loss.margin 28.643870939079388 +1017 10 loss.adversarial_temperature 0.638619830909752 +1017 10 negative_sampler.num_negs_per_pos 4.0 +1017 10 training.batch_size 1.0 +1017 11 model.embedding_dim 0.0 +1017 11 model.scoring_fct_norm 2.0 +1017 11 loss.margin 23.173347642429402 +1017 11 loss.adversarial_temperature 0.23360474670324521 +1017 11 negative_sampler.num_negs_per_pos 75.0 +1017 11 training.batch_size 2.0 +1017 12 model.embedding_dim 2.0 +1017 12 model.scoring_fct_norm 2.0 +1017 12 loss.margin 26.720272039099896 +1017 12 loss.adversarial_temperature 0.37524403942494 +1017 12 negative_sampler.num_negs_per_pos 9.0 +1017 12 training.batch_size 1.0 +1017 13 model.embedding_dim 2.0 +1017 13 model.scoring_fct_norm 2.0 +1017 13 loss.margin 7.495189247687393 +1017 13 loss.adversarial_temperature 0.9673834789443546 +1017 13 negative_sampler.num_negs_per_pos 85.0 +1017 13 training.batch_size 1.0 +1017 14 model.embedding_dim 2.0 +1017 14 model.scoring_fct_norm 1.0 +1017 14 loss.margin 17.16626829485492 +1017 14 loss.adversarial_temperature 0.2727777354994544 +1017 14 negative_sampler.num_negs_per_pos 36.0 +1017 14 training.batch_size 1.0 +1017 15 model.embedding_dim 2.0 +1017 15 model.scoring_fct_norm 1.0 +1017 15 loss.margin 9.004463182138087 +1017 15 loss.adversarial_temperature 0.9818207430686681 +1017 15 negative_sampler.num_negs_per_pos 15.0 +1017 15 training.batch_size 2.0 +1017 16 model.embedding_dim 2.0 +1017 16 model.scoring_fct_norm 1.0 +1017 16 loss.margin 7.694238387547768 +1017 16 loss.adversarial_temperature 0.46128925751751226 +1017 16 negative_sampler.num_negs_per_pos 3.0 +1017 16 training.batch_size 1.0 +1017 17 model.embedding_dim 1.0 +1017 17 model.scoring_fct_norm 1.0 +1017 17 loss.margin 29.334557273153575 +1017 17 loss.adversarial_temperature 0.856991171974883 +1017 17 negative_sampler.num_negs_per_pos 93.0 +1017 17 training.batch_size 1.0 +1017 18 model.embedding_dim 1.0 +1017 18 model.scoring_fct_norm 2.0 +1017 18 loss.margin 5.284183782364672 +1017 18 loss.adversarial_temperature 0.40671344935222997 +1017 18 negative_sampler.num_negs_per_pos 62.0 +1017 18 training.batch_size 2.0 +1017 19 model.embedding_dim 1.0 +1017 19 model.scoring_fct_norm 1.0 +1017 19 loss.margin 2.8404099659252937 +1017 19 loss.adversarial_temperature 0.37683306050586907 +1017 19 negative_sampler.num_negs_per_pos 73.0 +1017 19 training.batch_size 2.0 +1017 20 model.embedding_dim 0.0 +1017 20 model.scoring_fct_norm 1.0 +1017 20 loss.margin 21.521400008811458 +1017 20 loss.adversarial_temperature 0.918932672105208 +1017 20 negative_sampler.num_negs_per_pos 45.0 +1017 20 training.batch_size 1.0 +1017 21 model.embedding_dim 0.0 +1017 21 model.scoring_fct_norm 1.0 +1017 21 loss.margin 10.463891115071002 +1017 21 loss.adversarial_temperature 0.7965628441781709 +1017 21 negative_sampler.num_negs_per_pos 43.0 +1017 21 training.batch_size 0.0 +1017 22 model.embedding_dim 0.0 +1017 22 model.scoring_fct_norm 1.0 +1017 22 loss.margin 25.212158488901753 +1017 22 loss.adversarial_temperature 0.14155193325519197 +1017 22 negative_sampler.num_negs_per_pos 29.0 +1017 22 training.batch_size 2.0 +1017 23 model.embedding_dim 0.0 +1017 23 model.scoring_fct_norm 1.0 +1017 23 loss.margin 16.534688449169106 +1017 23 loss.adversarial_temperature 0.31878319173483927 +1017 23 negative_sampler.num_negs_per_pos 63.0 +1017 23 training.batch_size 0.0 +1017 24 model.embedding_dim 1.0 +1017 24 model.scoring_fct_norm 2.0 +1017 24 loss.margin 9.793754712769594 +1017 24 loss.adversarial_temperature 0.45581722571208816 +1017 24 negative_sampler.num_negs_per_pos 41.0 +1017 24 training.batch_size 0.0 +1017 25 model.embedding_dim 2.0 +1017 25 model.scoring_fct_norm 1.0 +1017 25 loss.margin 4.091057920698612 +1017 25 loss.adversarial_temperature 0.8409799020133649 +1017 25 negative_sampler.num_negs_per_pos 69.0 +1017 25 training.batch_size 2.0 +1017 26 model.embedding_dim 2.0 +1017 26 model.scoring_fct_norm 2.0 +1017 26 loss.margin 17.992429243322796 +1017 26 loss.adversarial_temperature 0.6728433108636266 +1017 26 negative_sampler.num_negs_per_pos 47.0 +1017 26 training.batch_size 0.0 +1017 27 model.embedding_dim 1.0 +1017 27 model.scoring_fct_norm 2.0 +1017 27 loss.margin 11.730039530864422 +1017 27 loss.adversarial_temperature 0.2859722500967859 +1017 27 negative_sampler.num_negs_per_pos 69.0 +1017 27 training.batch_size 1.0 +1017 28 model.embedding_dim 2.0 +1017 28 model.scoring_fct_norm 2.0 +1017 28 loss.margin 1.0650990502704731 +1017 28 loss.adversarial_temperature 0.21154736032940127 +1017 28 negative_sampler.num_negs_per_pos 46.0 +1017 28 training.batch_size 0.0 +1017 29 model.embedding_dim 1.0 +1017 29 model.scoring_fct_norm 2.0 +1017 29 loss.margin 5.139284473007965 +1017 29 loss.adversarial_temperature 0.7550848111253572 +1017 29 negative_sampler.num_negs_per_pos 82.0 +1017 29 training.batch_size 2.0 +1017 30 model.embedding_dim 1.0 +1017 30 model.scoring_fct_norm 1.0 +1017 30 loss.margin 3.5198996604298847 +1017 30 loss.adversarial_temperature 0.8351206736975851 +1017 30 negative_sampler.num_negs_per_pos 40.0 +1017 30 training.batch_size 1.0 +1017 31 model.embedding_dim 2.0 +1017 31 model.scoring_fct_norm 1.0 +1017 31 loss.margin 28.56612086447198 +1017 31 loss.adversarial_temperature 0.981224229897883 +1017 31 negative_sampler.num_negs_per_pos 83.0 +1017 31 training.batch_size 2.0 +1017 32 model.embedding_dim 1.0 +1017 32 model.scoring_fct_norm 1.0 +1017 32 loss.margin 16.377731128209447 +1017 32 loss.adversarial_temperature 0.22399890330814826 +1017 32 negative_sampler.num_negs_per_pos 56.0 +1017 32 training.batch_size 0.0 +1017 33 model.embedding_dim 2.0 +1017 33 model.scoring_fct_norm 2.0 +1017 33 loss.margin 6.329800800756489 +1017 33 loss.adversarial_temperature 0.5965881913270494 +1017 33 negative_sampler.num_negs_per_pos 33.0 +1017 33 training.batch_size 0.0 +1017 34 model.embedding_dim 2.0 +1017 34 model.scoring_fct_norm 1.0 +1017 34 loss.margin 4.016257876975747 +1017 34 loss.adversarial_temperature 0.7830752650862801 +1017 34 negative_sampler.num_negs_per_pos 62.0 +1017 34 training.batch_size 0.0 +1017 35 model.embedding_dim 1.0 +1017 35 model.scoring_fct_norm 1.0 +1017 35 loss.margin 13.824463251828766 +1017 35 loss.adversarial_temperature 0.9798746326511415 +1017 35 negative_sampler.num_negs_per_pos 13.0 +1017 35 training.batch_size 0.0 +1017 36 model.embedding_dim 2.0 +1017 36 model.scoring_fct_norm 2.0 +1017 36 loss.margin 12.257892751345311 +1017 36 loss.adversarial_temperature 0.15120866002974892 +1017 36 negative_sampler.num_negs_per_pos 86.0 +1017 36 training.batch_size 2.0 +1017 37 model.embedding_dim 0.0 +1017 37 model.scoring_fct_norm 2.0 +1017 37 loss.margin 25.88121654109264 +1017 37 loss.adversarial_temperature 0.6866059504265272 +1017 37 negative_sampler.num_negs_per_pos 57.0 +1017 37 training.batch_size 2.0 +1017 38 model.embedding_dim 0.0 +1017 38 model.scoring_fct_norm 1.0 +1017 38 loss.margin 19.806995494641274 +1017 38 loss.adversarial_temperature 0.16618935156326148 +1017 38 negative_sampler.num_negs_per_pos 6.0 +1017 38 training.batch_size 0.0 +1017 39 model.embedding_dim 0.0 +1017 39 model.scoring_fct_norm 1.0 +1017 39 loss.margin 6.038096874237269 +1017 39 loss.adversarial_temperature 0.8494965911046135 +1017 39 negative_sampler.num_negs_per_pos 94.0 +1017 39 training.batch_size 1.0 +1017 40 model.embedding_dim 2.0 +1017 40 model.scoring_fct_norm 2.0 +1017 40 loss.margin 24.48923745888298 +1017 40 loss.adversarial_temperature 0.7346338571050997 +1017 40 negative_sampler.num_negs_per_pos 43.0 +1017 40 training.batch_size 2.0 +1017 41 model.embedding_dim 1.0 +1017 41 model.scoring_fct_norm 2.0 +1017 41 loss.margin 9.133307812965581 +1017 41 loss.adversarial_temperature 0.6529367502571171 +1017 41 negative_sampler.num_negs_per_pos 56.0 +1017 41 training.batch_size 0.0 +1017 42 model.embedding_dim 1.0 +1017 42 model.scoring_fct_norm 1.0 +1017 42 loss.margin 13.057966199308979 +1017 42 loss.adversarial_temperature 0.4721596095164512 +1017 42 negative_sampler.num_negs_per_pos 76.0 +1017 42 training.batch_size 0.0 +1017 43 model.embedding_dim 1.0 +1017 43 model.scoring_fct_norm 2.0 +1017 43 loss.margin 24.244584771327936 +1017 43 loss.adversarial_temperature 0.10942511905211506 +1017 43 negative_sampler.num_negs_per_pos 4.0 +1017 43 training.batch_size 2.0 +1017 44 model.embedding_dim 0.0 +1017 44 model.scoring_fct_norm 2.0 +1017 44 loss.margin 5.69534303335964 +1017 44 loss.adversarial_temperature 0.6574314559492972 +1017 44 negative_sampler.num_negs_per_pos 45.0 +1017 44 training.batch_size 0.0 +1017 45 model.embedding_dim 0.0 +1017 45 model.scoring_fct_norm 1.0 +1017 45 loss.margin 22.687959192863335 +1017 45 loss.adversarial_temperature 0.6053634481055093 +1017 45 negative_sampler.num_negs_per_pos 5.0 +1017 45 training.batch_size 2.0 +1017 46 model.embedding_dim 0.0 +1017 46 model.scoring_fct_norm 1.0 +1017 46 loss.margin 16.83957950621975 +1017 46 loss.adversarial_temperature 0.47260304929897234 +1017 46 negative_sampler.num_negs_per_pos 9.0 +1017 46 training.batch_size 2.0 +1017 47 model.embedding_dim 0.0 +1017 47 model.scoring_fct_norm 1.0 +1017 47 loss.margin 9.368912598978364 +1017 47 loss.adversarial_temperature 0.9016994054147089 +1017 47 negative_sampler.num_negs_per_pos 28.0 +1017 47 training.batch_size 1.0 +1017 48 model.embedding_dim 1.0 +1017 48 model.scoring_fct_norm 2.0 +1017 48 loss.margin 17.397684806413746 +1017 48 loss.adversarial_temperature 0.6238248026811105 +1017 48 negative_sampler.num_negs_per_pos 34.0 +1017 48 training.batch_size 1.0 +1017 49 model.embedding_dim 1.0 +1017 49 model.scoring_fct_norm 1.0 +1017 49 loss.margin 4.746064062295144 +1017 49 loss.adversarial_temperature 0.5642643058887595 +1017 49 negative_sampler.num_negs_per_pos 81.0 +1017 49 training.batch_size 0.0 +1017 50 model.embedding_dim 1.0 +1017 50 model.scoring_fct_norm 1.0 +1017 50 loss.margin 2.162212684090272 +1017 50 loss.adversarial_temperature 0.8769612977809609 +1017 50 negative_sampler.num_negs_per_pos 4.0 +1017 50 training.batch_size 1.0 +1017 51 model.embedding_dim 0.0 +1017 51 model.scoring_fct_norm 1.0 +1017 51 loss.margin 11.38048097070566 +1017 51 loss.adversarial_temperature 0.25122345098870497 +1017 51 negative_sampler.num_negs_per_pos 38.0 +1017 51 training.batch_size 2.0 +1017 52 model.embedding_dim 1.0 +1017 52 model.scoring_fct_norm 2.0 +1017 52 loss.margin 10.334563497152475 +1017 52 loss.adversarial_temperature 0.9585864394253498 +1017 52 negative_sampler.num_negs_per_pos 30.0 +1017 52 training.batch_size 2.0 +1017 53 model.embedding_dim 1.0 +1017 53 model.scoring_fct_norm 2.0 +1017 53 loss.margin 8.860518473052304 +1017 53 loss.adversarial_temperature 0.8938684125800421 +1017 53 negative_sampler.num_negs_per_pos 82.0 +1017 53 training.batch_size 2.0 +1017 54 model.embedding_dim 0.0 +1017 54 model.scoring_fct_norm 1.0 +1017 54 loss.margin 9.937999192571978 +1017 54 loss.adversarial_temperature 0.47961322739585055 +1017 54 negative_sampler.num_negs_per_pos 1.0 +1017 54 training.batch_size 0.0 +1017 55 model.embedding_dim 2.0 +1017 55 model.scoring_fct_norm 1.0 +1017 55 loss.margin 23.408024939554426 +1017 55 loss.adversarial_temperature 0.9300229776323927 +1017 55 negative_sampler.num_negs_per_pos 72.0 +1017 55 training.batch_size 1.0 +1017 56 model.embedding_dim 1.0 +1017 56 model.scoring_fct_norm 2.0 +1017 56 loss.margin 5.962007661594478 +1017 56 loss.adversarial_temperature 0.22070955182784774 +1017 56 negative_sampler.num_negs_per_pos 55.0 +1017 56 training.batch_size 2.0 +1017 57 model.embedding_dim 2.0 +1017 57 model.scoring_fct_norm 1.0 +1017 57 loss.margin 2.407359113014685 +1017 57 loss.adversarial_temperature 0.7229608758098512 +1017 57 negative_sampler.num_negs_per_pos 34.0 +1017 57 training.batch_size 0.0 +1017 58 model.embedding_dim 1.0 +1017 58 model.scoring_fct_norm 2.0 +1017 58 loss.margin 25.40551231251832 +1017 58 loss.adversarial_temperature 0.3087965059922975 +1017 58 negative_sampler.num_negs_per_pos 67.0 +1017 58 training.batch_size 2.0 +1017 59 model.embedding_dim 2.0 +1017 59 model.scoring_fct_norm 1.0 +1017 59 loss.margin 22.543243341704894 +1017 59 loss.adversarial_temperature 0.9999530790126891 +1017 59 negative_sampler.num_negs_per_pos 91.0 +1017 59 training.batch_size 2.0 +1017 60 model.embedding_dim 2.0 +1017 60 model.scoring_fct_norm 2.0 +1017 60 loss.margin 23.948931956216505 +1017 60 loss.adversarial_temperature 0.2634908125655697 +1017 60 negative_sampler.num_negs_per_pos 60.0 +1017 60 training.batch_size 0.0 +1017 61 model.embedding_dim 2.0 +1017 61 model.scoring_fct_norm 2.0 +1017 61 loss.margin 27.29268583229198 +1017 61 loss.adversarial_temperature 0.1762885323340739 +1017 61 negative_sampler.num_negs_per_pos 34.0 +1017 61 training.batch_size 1.0 +1017 62 model.embedding_dim 1.0 +1017 62 model.scoring_fct_norm 2.0 +1017 62 loss.margin 12.752848861440157 +1017 62 loss.adversarial_temperature 0.25261414067551574 +1017 62 negative_sampler.num_negs_per_pos 56.0 +1017 62 training.batch_size 1.0 +1017 63 model.embedding_dim 1.0 +1017 63 model.scoring_fct_norm 2.0 +1017 63 loss.margin 6.4271668338395855 +1017 63 loss.adversarial_temperature 0.5414848460285598 +1017 63 negative_sampler.num_negs_per_pos 84.0 +1017 63 training.batch_size 1.0 +1017 64 model.embedding_dim 1.0 +1017 64 model.scoring_fct_norm 2.0 +1017 64 loss.margin 27.489778513983936 +1017 64 loss.adversarial_temperature 0.4480029805178182 +1017 64 negative_sampler.num_negs_per_pos 64.0 +1017 64 training.batch_size 1.0 +1017 65 model.embedding_dim 2.0 +1017 65 model.scoring_fct_norm 1.0 +1017 65 loss.margin 18.4708303846579 +1017 65 loss.adversarial_temperature 0.5548842105460842 +1017 65 negative_sampler.num_negs_per_pos 50.0 +1017 65 training.batch_size 2.0 +1017 66 model.embedding_dim 2.0 +1017 66 model.scoring_fct_norm 1.0 +1017 66 loss.margin 25.171246569475624 +1017 66 loss.adversarial_temperature 0.20310163948835325 +1017 66 negative_sampler.num_negs_per_pos 21.0 +1017 66 training.batch_size 1.0 +1017 67 model.embedding_dim 2.0 +1017 67 model.scoring_fct_norm 2.0 +1017 67 loss.margin 15.541507659730462 +1017 67 loss.adversarial_temperature 0.7487916447849989 +1017 67 negative_sampler.num_negs_per_pos 7.0 +1017 67 training.batch_size 0.0 +1017 68 model.embedding_dim 1.0 +1017 68 model.scoring_fct_norm 1.0 +1017 68 loss.margin 25.51143591779274 +1017 68 loss.adversarial_temperature 0.27319624897453876 +1017 68 negative_sampler.num_negs_per_pos 75.0 +1017 68 training.batch_size 2.0 +1017 69 model.embedding_dim 2.0 +1017 69 model.scoring_fct_norm 1.0 +1017 69 loss.margin 8.92903761222411 +1017 69 loss.adversarial_temperature 0.3817381450508086 +1017 69 negative_sampler.num_negs_per_pos 30.0 +1017 69 training.batch_size 1.0 +1017 70 model.embedding_dim 2.0 +1017 70 model.scoring_fct_norm 2.0 +1017 70 loss.margin 1.232618044454657 +1017 70 loss.adversarial_temperature 0.9755612327750012 +1017 70 negative_sampler.num_negs_per_pos 87.0 +1017 70 training.batch_size 0.0 +1017 71 model.embedding_dim 2.0 +1017 71 model.scoring_fct_norm 2.0 +1017 71 loss.margin 15.627536513348613 +1017 71 loss.adversarial_temperature 0.21824875722156706 +1017 71 negative_sampler.num_negs_per_pos 51.0 +1017 71 training.batch_size 1.0 +1017 72 model.embedding_dim 2.0 +1017 72 model.scoring_fct_norm 1.0 +1017 72 loss.margin 2.548430113203091 +1017 72 loss.adversarial_temperature 0.45075175550097213 +1017 72 negative_sampler.num_negs_per_pos 4.0 +1017 72 training.batch_size 1.0 +1017 73 model.embedding_dim 1.0 +1017 73 model.scoring_fct_norm 2.0 +1017 73 loss.margin 21.222055060998247 +1017 73 loss.adversarial_temperature 0.4036799220296129 +1017 73 negative_sampler.num_negs_per_pos 49.0 +1017 73 training.batch_size 1.0 +1017 74 model.embedding_dim 0.0 +1017 74 model.scoring_fct_norm 1.0 +1017 74 loss.margin 22.871825581545917 +1017 74 loss.adversarial_temperature 0.7789043740897003 +1017 74 negative_sampler.num_negs_per_pos 66.0 +1017 74 training.batch_size 2.0 +1017 75 model.embedding_dim 2.0 +1017 75 model.scoring_fct_norm 2.0 +1017 75 loss.margin 4.832661315932159 +1017 75 loss.adversarial_temperature 0.3390924829266138 +1017 75 negative_sampler.num_negs_per_pos 89.0 +1017 75 training.batch_size 0.0 +1017 76 model.embedding_dim 2.0 +1017 76 model.scoring_fct_norm 1.0 +1017 76 loss.margin 26.81548753023303 +1017 76 loss.adversarial_temperature 0.2506438515971693 +1017 76 negative_sampler.num_negs_per_pos 2.0 +1017 76 training.batch_size 1.0 +1017 77 model.embedding_dim 0.0 +1017 77 model.scoring_fct_norm 1.0 +1017 77 loss.margin 23.73890250368804 +1017 77 loss.adversarial_temperature 0.30355278823869913 +1017 77 negative_sampler.num_negs_per_pos 6.0 +1017 77 training.batch_size 0.0 +1017 78 model.embedding_dim 0.0 +1017 78 model.scoring_fct_norm 2.0 +1017 78 loss.margin 18.18003244094846 +1017 78 loss.adversarial_temperature 0.4807390426275939 +1017 78 negative_sampler.num_negs_per_pos 24.0 +1017 78 training.batch_size 2.0 +1017 79 model.embedding_dim 0.0 +1017 79 model.scoring_fct_norm 2.0 +1017 79 loss.margin 29.086032509370437 +1017 79 loss.adversarial_temperature 0.6769112899181393 +1017 79 negative_sampler.num_negs_per_pos 71.0 +1017 79 training.batch_size 1.0 +1017 80 model.embedding_dim 1.0 +1017 80 model.scoring_fct_norm 1.0 +1017 80 loss.margin 2.232005903764926 +1017 80 loss.adversarial_temperature 0.251267352296823 +1017 80 negative_sampler.num_negs_per_pos 54.0 +1017 80 training.batch_size 0.0 +1017 81 model.embedding_dim 2.0 +1017 81 model.scoring_fct_norm 1.0 +1017 81 loss.margin 10.121574076794293 +1017 81 loss.adversarial_temperature 0.5856614771611796 +1017 81 negative_sampler.num_negs_per_pos 47.0 +1017 81 training.batch_size 0.0 +1017 82 model.embedding_dim 1.0 +1017 82 model.scoring_fct_norm 2.0 +1017 82 loss.margin 16.246509428253603 +1017 82 loss.adversarial_temperature 0.6508667412336753 +1017 82 negative_sampler.num_negs_per_pos 76.0 +1017 82 training.batch_size 2.0 +1017 83 model.embedding_dim 1.0 +1017 83 model.scoring_fct_norm 1.0 +1017 83 loss.margin 6.242398647794044 +1017 83 loss.adversarial_temperature 0.7250499644122043 +1017 83 negative_sampler.num_negs_per_pos 61.0 +1017 83 training.batch_size 1.0 +1017 84 model.embedding_dim 0.0 +1017 84 model.scoring_fct_norm 1.0 +1017 84 loss.margin 28.440386644125287 +1017 84 loss.adversarial_temperature 0.1325884754284336 +1017 84 negative_sampler.num_negs_per_pos 4.0 +1017 84 training.batch_size 1.0 +1017 85 model.embedding_dim 2.0 +1017 85 model.scoring_fct_norm 2.0 +1017 85 loss.margin 12.331324939116007 +1017 85 loss.adversarial_temperature 0.6419757091580388 +1017 85 negative_sampler.num_negs_per_pos 30.0 +1017 85 training.batch_size 1.0 +1017 86 model.embedding_dim 1.0 +1017 86 model.scoring_fct_norm 2.0 +1017 86 loss.margin 22.893169115651226 +1017 86 loss.adversarial_temperature 0.14029444688853918 +1017 86 negative_sampler.num_negs_per_pos 58.0 +1017 86 training.batch_size 2.0 +1017 87 model.embedding_dim 1.0 +1017 87 model.scoring_fct_norm 2.0 +1017 87 loss.margin 6.623649863890705 +1017 87 loss.adversarial_temperature 0.6074208790824307 +1017 87 negative_sampler.num_negs_per_pos 43.0 +1017 87 training.batch_size 2.0 +1017 88 model.embedding_dim 0.0 +1017 88 model.scoring_fct_norm 1.0 +1017 88 loss.margin 28.88087192199076 +1017 88 loss.adversarial_temperature 0.23901317979482098 +1017 88 negative_sampler.num_negs_per_pos 66.0 +1017 88 training.batch_size 2.0 +1017 89 model.embedding_dim 0.0 +1017 89 model.scoring_fct_norm 2.0 +1017 89 loss.margin 28.085335338085113 +1017 89 loss.adversarial_temperature 0.8596090867414642 +1017 89 negative_sampler.num_negs_per_pos 92.0 +1017 89 training.batch_size 1.0 +1017 90 model.embedding_dim 2.0 +1017 90 model.scoring_fct_norm 1.0 +1017 90 loss.margin 24.16863262264411 +1017 90 loss.adversarial_temperature 0.7208040252524732 +1017 90 negative_sampler.num_negs_per_pos 28.0 +1017 90 training.batch_size 2.0 +1017 91 model.embedding_dim 0.0 +1017 91 model.scoring_fct_norm 2.0 +1017 91 loss.margin 25.896314642364576 +1017 91 loss.adversarial_temperature 0.2786847567603642 +1017 91 negative_sampler.num_negs_per_pos 11.0 +1017 91 training.batch_size 0.0 +1017 92 model.embedding_dim 0.0 +1017 92 model.scoring_fct_norm 1.0 +1017 92 loss.margin 11.18698240908781 +1017 92 loss.adversarial_temperature 0.8756732217693757 +1017 92 negative_sampler.num_negs_per_pos 79.0 +1017 92 training.batch_size 2.0 +1017 93 model.embedding_dim 1.0 +1017 93 model.scoring_fct_norm 1.0 +1017 93 loss.margin 8.973192135635935 +1017 93 loss.adversarial_temperature 0.5907024347741733 +1017 93 negative_sampler.num_negs_per_pos 30.0 +1017 93 training.batch_size 2.0 +1017 94 model.embedding_dim 0.0 +1017 94 model.scoring_fct_norm 1.0 +1017 94 loss.margin 25.935955378936008 +1017 94 loss.adversarial_temperature 0.8371006117714822 +1017 94 negative_sampler.num_negs_per_pos 10.0 +1017 94 training.batch_size 2.0 +1017 95 model.embedding_dim 2.0 +1017 95 model.scoring_fct_norm 1.0 +1017 95 loss.margin 16.153896009454392 +1017 95 loss.adversarial_temperature 0.7177397566706671 +1017 95 negative_sampler.num_negs_per_pos 51.0 +1017 95 training.batch_size 1.0 +1017 96 model.embedding_dim 0.0 +1017 96 model.scoring_fct_norm 1.0 +1017 96 loss.margin 6.660373562064721 +1017 96 loss.adversarial_temperature 0.34674625282142446 +1017 96 negative_sampler.num_negs_per_pos 16.0 +1017 96 training.batch_size 2.0 +1017 97 model.embedding_dim 2.0 +1017 97 model.scoring_fct_norm 1.0 +1017 97 loss.margin 4.322507726488045 +1017 97 loss.adversarial_temperature 0.2221829278273964 +1017 97 negative_sampler.num_negs_per_pos 59.0 +1017 97 training.batch_size 0.0 +1017 98 model.embedding_dim 1.0 +1017 98 model.scoring_fct_norm 1.0 +1017 98 loss.margin 24.74308040200496 +1017 98 loss.adversarial_temperature 0.4086650025208107 +1017 98 negative_sampler.num_negs_per_pos 30.0 +1017 98 training.batch_size 2.0 +1017 99 model.embedding_dim 1.0 +1017 99 model.scoring_fct_norm 2.0 +1017 99 loss.margin 18.01935896839926 +1017 99 loss.adversarial_temperature 0.4659544698537491 +1017 99 negative_sampler.num_negs_per_pos 82.0 +1017 99 training.batch_size 0.0 +1017 100 model.embedding_dim 0.0 +1017 100 model.scoring_fct_norm 2.0 +1017 100 loss.margin 28.93670361116813 +1017 100 loss.adversarial_temperature 0.10984004754643212 +1017 100 negative_sampler.num_negs_per_pos 45.0 +1017 100 training.batch_size 1.0 +1017 1 dataset """kinships""" +1017 1 model """unstructuredmodel""" +1017 1 loss """nssa""" +1017 1 regularizer """no""" +1017 1 optimizer """adadelta""" +1017 1 training_loop """owa""" +1017 1 negative_sampler """basic""" +1017 1 evaluator """rankbased""" +1017 2 dataset """kinships""" +1017 2 model """unstructuredmodel""" +1017 2 loss """nssa""" +1017 2 regularizer """no""" +1017 2 optimizer """adadelta""" +1017 2 training_loop """owa""" +1017 2 negative_sampler """basic""" +1017 2 evaluator """rankbased""" +1017 3 dataset """kinships""" +1017 3 model """unstructuredmodel""" +1017 3 loss """nssa""" +1017 3 regularizer """no""" +1017 3 optimizer """adadelta""" +1017 3 training_loop """owa""" +1017 3 negative_sampler """basic""" +1017 3 evaluator """rankbased""" +1017 4 dataset """kinships""" +1017 4 model """unstructuredmodel""" +1017 4 loss """nssa""" +1017 4 regularizer """no""" +1017 4 optimizer """adadelta""" +1017 4 training_loop """owa""" +1017 4 negative_sampler """basic""" +1017 4 evaluator """rankbased""" +1017 5 dataset """kinships""" +1017 5 model """unstructuredmodel""" +1017 5 loss """nssa""" +1017 5 regularizer """no""" +1017 5 optimizer """adadelta""" +1017 5 training_loop """owa""" +1017 5 negative_sampler """basic""" +1017 5 evaluator """rankbased""" +1017 6 dataset """kinships""" +1017 6 model """unstructuredmodel""" +1017 6 loss """nssa""" +1017 6 regularizer """no""" +1017 6 optimizer """adadelta""" +1017 6 training_loop """owa""" +1017 6 negative_sampler """basic""" +1017 6 evaluator """rankbased""" +1017 7 dataset """kinships""" +1017 7 model """unstructuredmodel""" +1017 7 loss """nssa""" +1017 7 regularizer """no""" +1017 7 optimizer """adadelta""" +1017 7 training_loop """owa""" +1017 7 negative_sampler """basic""" +1017 7 evaluator """rankbased""" +1017 8 dataset """kinships""" +1017 8 model """unstructuredmodel""" +1017 8 loss """nssa""" +1017 8 regularizer """no""" +1017 8 optimizer """adadelta""" +1017 8 training_loop """owa""" +1017 8 negative_sampler """basic""" +1017 8 evaluator """rankbased""" +1017 9 dataset """kinships""" +1017 9 model """unstructuredmodel""" +1017 9 loss """nssa""" +1017 9 regularizer """no""" +1017 9 optimizer """adadelta""" +1017 9 training_loop """owa""" +1017 9 negative_sampler """basic""" +1017 9 evaluator """rankbased""" +1017 10 dataset """kinships""" +1017 10 model """unstructuredmodel""" +1017 10 loss """nssa""" +1017 10 regularizer """no""" +1017 10 optimizer """adadelta""" +1017 10 training_loop """owa""" +1017 10 negative_sampler """basic""" +1017 10 evaluator """rankbased""" +1017 11 dataset """kinships""" +1017 11 model """unstructuredmodel""" +1017 11 loss """nssa""" +1017 11 regularizer """no""" +1017 11 optimizer """adadelta""" +1017 11 training_loop """owa""" +1017 11 negative_sampler """basic""" +1017 11 evaluator """rankbased""" +1017 12 dataset """kinships""" +1017 12 model """unstructuredmodel""" +1017 12 loss """nssa""" +1017 12 regularizer """no""" +1017 12 optimizer """adadelta""" +1017 12 training_loop """owa""" +1017 12 negative_sampler """basic""" +1017 12 evaluator """rankbased""" +1017 13 dataset """kinships""" +1017 13 model """unstructuredmodel""" +1017 13 loss """nssa""" +1017 13 regularizer """no""" +1017 13 optimizer """adadelta""" +1017 13 training_loop """owa""" +1017 13 negative_sampler """basic""" +1017 13 evaluator """rankbased""" +1017 14 dataset """kinships""" +1017 14 model """unstructuredmodel""" +1017 14 loss """nssa""" +1017 14 regularizer """no""" +1017 14 optimizer """adadelta""" +1017 14 training_loop """owa""" +1017 14 negative_sampler """basic""" +1017 14 evaluator """rankbased""" +1017 15 dataset """kinships""" +1017 15 model """unstructuredmodel""" +1017 15 loss """nssa""" +1017 15 regularizer """no""" +1017 15 optimizer """adadelta""" +1017 15 training_loop """owa""" +1017 15 negative_sampler """basic""" +1017 15 evaluator """rankbased""" +1017 16 dataset """kinships""" +1017 16 model """unstructuredmodel""" +1017 16 loss """nssa""" +1017 16 regularizer """no""" +1017 16 optimizer """adadelta""" +1017 16 training_loop """owa""" +1017 16 negative_sampler """basic""" +1017 16 evaluator """rankbased""" +1017 17 dataset """kinships""" +1017 17 model """unstructuredmodel""" +1017 17 loss """nssa""" +1017 17 regularizer """no""" +1017 17 optimizer """adadelta""" +1017 17 training_loop """owa""" +1017 17 negative_sampler """basic""" +1017 17 evaluator """rankbased""" +1017 18 dataset """kinships""" +1017 18 model """unstructuredmodel""" +1017 18 loss """nssa""" +1017 18 regularizer """no""" +1017 18 optimizer """adadelta""" +1017 18 training_loop """owa""" +1017 18 negative_sampler """basic""" +1017 18 evaluator """rankbased""" +1017 19 dataset """kinships""" +1017 19 model """unstructuredmodel""" +1017 19 loss """nssa""" +1017 19 regularizer """no""" +1017 19 optimizer """adadelta""" +1017 19 training_loop """owa""" +1017 19 negative_sampler """basic""" +1017 19 evaluator """rankbased""" +1017 20 dataset """kinships""" +1017 20 model """unstructuredmodel""" +1017 20 loss """nssa""" +1017 20 regularizer """no""" +1017 20 optimizer """adadelta""" +1017 20 training_loop """owa""" +1017 20 negative_sampler """basic""" +1017 20 evaluator """rankbased""" +1017 21 dataset """kinships""" +1017 21 model """unstructuredmodel""" +1017 21 loss """nssa""" +1017 21 regularizer """no""" +1017 21 optimizer """adadelta""" +1017 21 training_loop """owa""" +1017 21 negative_sampler """basic""" +1017 21 evaluator """rankbased""" +1017 22 dataset """kinships""" +1017 22 model """unstructuredmodel""" +1017 22 loss """nssa""" +1017 22 regularizer """no""" +1017 22 optimizer """adadelta""" +1017 22 training_loop """owa""" +1017 22 negative_sampler """basic""" +1017 22 evaluator """rankbased""" +1017 23 dataset """kinships""" +1017 23 model """unstructuredmodel""" +1017 23 loss """nssa""" +1017 23 regularizer """no""" +1017 23 optimizer """adadelta""" +1017 23 training_loop """owa""" +1017 23 negative_sampler """basic""" +1017 23 evaluator """rankbased""" +1017 24 dataset """kinships""" +1017 24 model """unstructuredmodel""" +1017 24 loss """nssa""" +1017 24 regularizer """no""" +1017 24 optimizer """adadelta""" +1017 24 training_loop """owa""" +1017 24 negative_sampler """basic""" +1017 24 evaluator """rankbased""" +1017 25 dataset """kinships""" +1017 25 model """unstructuredmodel""" +1017 25 loss """nssa""" +1017 25 regularizer """no""" +1017 25 optimizer """adadelta""" +1017 25 training_loop """owa""" +1017 25 negative_sampler """basic""" +1017 25 evaluator """rankbased""" +1017 26 dataset """kinships""" +1017 26 model """unstructuredmodel""" +1017 26 loss """nssa""" +1017 26 regularizer """no""" +1017 26 optimizer """adadelta""" +1017 26 training_loop """owa""" +1017 26 negative_sampler """basic""" +1017 26 evaluator """rankbased""" +1017 27 dataset """kinships""" +1017 27 model """unstructuredmodel""" +1017 27 loss """nssa""" +1017 27 regularizer """no""" +1017 27 optimizer """adadelta""" +1017 27 training_loop """owa""" +1017 27 negative_sampler """basic""" +1017 27 evaluator """rankbased""" +1017 28 dataset """kinships""" +1017 28 model """unstructuredmodel""" +1017 28 loss """nssa""" +1017 28 regularizer """no""" +1017 28 optimizer """adadelta""" +1017 28 training_loop """owa""" +1017 28 negative_sampler """basic""" +1017 28 evaluator """rankbased""" +1017 29 dataset """kinships""" +1017 29 model """unstructuredmodel""" +1017 29 loss """nssa""" +1017 29 regularizer """no""" +1017 29 optimizer """adadelta""" +1017 29 training_loop """owa""" +1017 29 negative_sampler """basic""" +1017 29 evaluator """rankbased""" +1017 30 dataset """kinships""" +1017 30 model """unstructuredmodel""" +1017 30 loss """nssa""" +1017 30 regularizer """no""" +1017 30 optimizer """adadelta""" +1017 30 training_loop """owa""" +1017 30 negative_sampler """basic""" +1017 30 evaluator """rankbased""" +1017 31 dataset """kinships""" +1017 31 model """unstructuredmodel""" +1017 31 loss """nssa""" +1017 31 regularizer """no""" +1017 31 optimizer """adadelta""" +1017 31 training_loop """owa""" +1017 31 negative_sampler """basic""" +1017 31 evaluator """rankbased""" +1017 32 dataset """kinships""" +1017 32 model """unstructuredmodel""" +1017 32 loss """nssa""" +1017 32 regularizer """no""" +1017 32 optimizer """adadelta""" +1017 32 training_loop """owa""" +1017 32 negative_sampler """basic""" +1017 32 evaluator """rankbased""" +1017 33 dataset """kinships""" +1017 33 model """unstructuredmodel""" +1017 33 loss """nssa""" +1017 33 regularizer """no""" +1017 33 optimizer """adadelta""" +1017 33 training_loop """owa""" +1017 33 negative_sampler """basic""" +1017 33 evaluator """rankbased""" +1017 34 dataset """kinships""" +1017 34 model """unstructuredmodel""" +1017 34 loss """nssa""" +1017 34 regularizer """no""" +1017 34 optimizer """adadelta""" +1017 34 training_loop """owa""" +1017 34 negative_sampler """basic""" +1017 34 evaluator """rankbased""" +1017 35 dataset """kinships""" +1017 35 model """unstructuredmodel""" +1017 35 loss """nssa""" +1017 35 regularizer """no""" +1017 35 optimizer """adadelta""" +1017 35 training_loop """owa""" +1017 35 negative_sampler """basic""" +1017 35 evaluator """rankbased""" +1017 36 dataset """kinships""" +1017 36 model """unstructuredmodel""" +1017 36 loss """nssa""" +1017 36 regularizer """no""" +1017 36 optimizer """adadelta""" +1017 36 training_loop """owa""" +1017 36 negative_sampler """basic""" +1017 36 evaluator """rankbased""" +1017 37 dataset """kinships""" +1017 37 model """unstructuredmodel""" +1017 37 loss """nssa""" +1017 37 regularizer """no""" +1017 37 optimizer """adadelta""" +1017 37 training_loop """owa""" +1017 37 negative_sampler """basic""" +1017 37 evaluator """rankbased""" +1017 38 dataset """kinships""" +1017 38 model """unstructuredmodel""" +1017 38 loss """nssa""" +1017 38 regularizer """no""" +1017 38 optimizer """adadelta""" +1017 38 training_loop """owa""" +1017 38 negative_sampler """basic""" +1017 38 evaluator """rankbased""" +1017 39 dataset """kinships""" +1017 39 model """unstructuredmodel""" +1017 39 loss """nssa""" +1017 39 regularizer """no""" +1017 39 optimizer """adadelta""" +1017 39 training_loop """owa""" +1017 39 negative_sampler """basic""" +1017 39 evaluator """rankbased""" +1017 40 dataset """kinships""" +1017 40 model """unstructuredmodel""" +1017 40 loss """nssa""" +1017 40 regularizer """no""" +1017 40 optimizer """adadelta""" +1017 40 training_loop """owa""" +1017 40 negative_sampler """basic""" +1017 40 evaluator """rankbased""" +1017 41 dataset """kinships""" +1017 41 model """unstructuredmodel""" +1017 41 loss """nssa""" +1017 41 regularizer """no""" +1017 41 optimizer """adadelta""" +1017 41 training_loop """owa""" +1017 41 negative_sampler """basic""" +1017 41 evaluator """rankbased""" +1017 42 dataset """kinships""" +1017 42 model """unstructuredmodel""" +1017 42 loss """nssa""" +1017 42 regularizer """no""" +1017 42 optimizer """adadelta""" +1017 42 training_loop """owa""" +1017 42 negative_sampler """basic""" +1017 42 evaluator """rankbased""" +1017 43 dataset """kinships""" +1017 43 model """unstructuredmodel""" +1017 43 loss """nssa""" +1017 43 regularizer """no""" +1017 43 optimizer """adadelta""" +1017 43 training_loop """owa""" +1017 43 negative_sampler """basic""" +1017 43 evaluator """rankbased""" +1017 44 dataset """kinships""" +1017 44 model """unstructuredmodel""" +1017 44 loss """nssa""" +1017 44 regularizer """no""" +1017 44 optimizer """adadelta""" +1017 44 training_loop """owa""" +1017 44 negative_sampler """basic""" +1017 44 evaluator """rankbased""" +1017 45 dataset """kinships""" +1017 45 model """unstructuredmodel""" +1017 45 loss """nssa""" +1017 45 regularizer """no""" +1017 45 optimizer """adadelta""" +1017 45 training_loop """owa""" +1017 45 negative_sampler """basic""" +1017 45 evaluator """rankbased""" +1017 46 dataset """kinships""" +1017 46 model """unstructuredmodel""" +1017 46 loss """nssa""" +1017 46 regularizer """no""" +1017 46 optimizer """adadelta""" +1017 46 training_loop """owa""" +1017 46 negative_sampler """basic""" +1017 46 evaluator """rankbased""" +1017 47 dataset """kinships""" +1017 47 model """unstructuredmodel""" +1017 47 loss """nssa""" +1017 47 regularizer """no""" +1017 47 optimizer """adadelta""" +1017 47 training_loop """owa""" +1017 47 negative_sampler """basic""" +1017 47 evaluator """rankbased""" +1017 48 dataset """kinships""" +1017 48 model """unstructuredmodel""" +1017 48 loss """nssa""" +1017 48 regularizer """no""" +1017 48 optimizer """adadelta""" +1017 48 training_loop """owa""" +1017 48 negative_sampler """basic""" +1017 48 evaluator """rankbased""" +1017 49 dataset """kinships""" +1017 49 model """unstructuredmodel""" +1017 49 loss """nssa""" +1017 49 regularizer """no""" +1017 49 optimizer """adadelta""" +1017 49 training_loop """owa""" +1017 49 negative_sampler """basic""" +1017 49 evaluator """rankbased""" +1017 50 dataset """kinships""" +1017 50 model """unstructuredmodel""" +1017 50 loss """nssa""" +1017 50 regularizer """no""" +1017 50 optimizer """adadelta""" +1017 50 training_loop """owa""" +1017 50 negative_sampler """basic""" +1017 50 evaluator """rankbased""" +1017 51 dataset """kinships""" +1017 51 model """unstructuredmodel""" +1017 51 loss """nssa""" +1017 51 regularizer """no""" +1017 51 optimizer """adadelta""" +1017 51 training_loop """owa""" +1017 51 negative_sampler """basic""" +1017 51 evaluator """rankbased""" +1017 52 dataset """kinships""" +1017 52 model """unstructuredmodel""" +1017 52 loss """nssa""" +1017 52 regularizer """no""" +1017 52 optimizer """adadelta""" +1017 52 training_loop """owa""" +1017 52 negative_sampler """basic""" +1017 52 evaluator """rankbased""" +1017 53 dataset """kinships""" +1017 53 model """unstructuredmodel""" +1017 53 loss """nssa""" +1017 53 regularizer """no""" +1017 53 optimizer """adadelta""" +1017 53 training_loop """owa""" +1017 53 negative_sampler """basic""" +1017 53 evaluator """rankbased""" +1017 54 dataset """kinships""" +1017 54 model """unstructuredmodel""" +1017 54 loss """nssa""" +1017 54 regularizer """no""" +1017 54 optimizer """adadelta""" +1017 54 training_loop """owa""" +1017 54 negative_sampler """basic""" +1017 54 evaluator """rankbased""" +1017 55 dataset """kinships""" +1017 55 model """unstructuredmodel""" +1017 55 loss """nssa""" +1017 55 regularizer """no""" +1017 55 optimizer """adadelta""" +1017 55 training_loop """owa""" +1017 55 negative_sampler """basic""" +1017 55 evaluator """rankbased""" +1017 56 dataset """kinships""" +1017 56 model """unstructuredmodel""" +1017 56 loss """nssa""" +1017 56 regularizer """no""" +1017 56 optimizer """adadelta""" +1017 56 training_loop """owa""" +1017 56 negative_sampler """basic""" +1017 56 evaluator """rankbased""" +1017 57 dataset """kinships""" +1017 57 model """unstructuredmodel""" +1017 57 loss """nssa""" +1017 57 regularizer """no""" +1017 57 optimizer """adadelta""" +1017 57 training_loop """owa""" +1017 57 negative_sampler """basic""" +1017 57 evaluator """rankbased""" +1017 58 dataset """kinships""" +1017 58 model """unstructuredmodel""" +1017 58 loss """nssa""" +1017 58 regularizer """no""" +1017 58 optimizer """adadelta""" +1017 58 training_loop """owa""" +1017 58 negative_sampler """basic""" +1017 58 evaluator """rankbased""" +1017 59 dataset """kinships""" +1017 59 model """unstructuredmodel""" +1017 59 loss """nssa""" +1017 59 regularizer """no""" +1017 59 optimizer """adadelta""" +1017 59 training_loop """owa""" +1017 59 negative_sampler """basic""" +1017 59 evaluator """rankbased""" +1017 60 dataset """kinships""" +1017 60 model """unstructuredmodel""" +1017 60 loss """nssa""" +1017 60 regularizer """no""" +1017 60 optimizer """adadelta""" +1017 60 training_loop """owa""" +1017 60 negative_sampler """basic""" +1017 60 evaluator """rankbased""" +1017 61 dataset """kinships""" +1017 61 model """unstructuredmodel""" +1017 61 loss """nssa""" +1017 61 regularizer """no""" +1017 61 optimizer """adadelta""" +1017 61 training_loop """owa""" +1017 61 negative_sampler """basic""" +1017 61 evaluator """rankbased""" +1017 62 dataset """kinships""" +1017 62 model """unstructuredmodel""" +1017 62 loss """nssa""" +1017 62 regularizer """no""" +1017 62 optimizer """adadelta""" +1017 62 training_loop """owa""" +1017 62 negative_sampler """basic""" +1017 62 evaluator """rankbased""" +1017 63 dataset """kinships""" +1017 63 model """unstructuredmodel""" +1017 63 loss """nssa""" +1017 63 regularizer """no""" +1017 63 optimizer """adadelta""" +1017 63 training_loop """owa""" +1017 63 negative_sampler """basic""" +1017 63 evaluator """rankbased""" +1017 64 dataset """kinships""" +1017 64 model """unstructuredmodel""" +1017 64 loss """nssa""" +1017 64 regularizer """no""" +1017 64 optimizer """adadelta""" +1017 64 training_loop """owa""" +1017 64 negative_sampler """basic""" +1017 64 evaluator """rankbased""" +1017 65 dataset """kinships""" +1017 65 model """unstructuredmodel""" +1017 65 loss """nssa""" +1017 65 regularizer """no""" +1017 65 optimizer """adadelta""" +1017 65 training_loop """owa""" +1017 65 negative_sampler """basic""" +1017 65 evaluator """rankbased""" +1017 66 dataset """kinships""" +1017 66 model """unstructuredmodel""" +1017 66 loss """nssa""" +1017 66 regularizer """no""" +1017 66 optimizer """adadelta""" +1017 66 training_loop """owa""" +1017 66 negative_sampler """basic""" +1017 66 evaluator """rankbased""" +1017 67 dataset """kinships""" +1017 67 model """unstructuredmodel""" +1017 67 loss """nssa""" +1017 67 regularizer """no""" +1017 67 optimizer """adadelta""" +1017 67 training_loop """owa""" +1017 67 negative_sampler """basic""" +1017 67 evaluator """rankbased""" +1017 68 dataset """kinships""" +1017 68 model """unstructuredmodel""" +1017 68 loss """nssa""" +1017 68 regularizer """no""" +1017 68 optimizer """adadelta""" +1017 68 training_loop """owa""" +1017 68 negative_sampler """basic""" +1017 68 evaluator """rankbased""" +1017 69 dataset """kinships""" +1017 69 model """unstructuredmodel""" +1017 69 loss """nssa""" +1017 69 regularizer """no""" +1017 69 optimizer """adadelta""" +1017 69 training_loop """owa""" +1017 69 negative_sampler """basic""" +1017 69 evaluator """rankbased""" +1017 70 dataset """kinships""" +1017 70 model """unstructuredmodel""" +1017 70 loss """nssa""" +1017 70 regularizer """no""" +1017 70 optimizer """adadelta""" +1017 70 training_loop """owa""" +1017 70 negative_sampler """basic""" +1017 70 evaluator """rankbased""" +1017 71 dataset """kinships""" +1017 71 model """unstructuredmodel""" +1017 71 loss """nssa""" +1017 71 regularizer """no""" +1017 71 optimizer """adadelta""" +1017 71 training_loop """owa""" +1017 71 negative_sampler """basic""" +1017 71 evaluator """rankbased""" +1017 72 dataset """kinships""" +1017 72 model """unstructuredmodel""" +1017 72 loss """nssa""" +1017 72 regularizer """no""" +1017 72 optimizer """adadelta""" +1017 72 training_loop """owa""" +1017 72 negative_sampler """basic""" +1017 72 evaluator """rankbased""" +1017 73 dataset """kinships""" +1017 73 model """unstructuredmodel""" +1017 73 loss """nssa""" +1017 73 regularizer """no""" +1017 73 optimizer """adadelta""" +1017 73 training_loop """owa""" +1017 73 negative_sampler """basic""" +1017 73 evaluator """rankbased""" +1017 74 dataset """kinships""" +1017 74 model """unstructuredmodel""" +1017 74 loss """nssa""" +1017 74 regularizer """no""" +1017 74 optimizer """adadelta""" +1017 74 training_loop """owa""" +1017 74 negative_sampler """basic""" +1017 74 evaluator """rankbased""" +1017 75 dataset """kinships""" +1017 75 model """unstructuredmodel""" +1017 75 loss """nssa""" +1017 75 regularizer """no""" +1017 75 optimizer """adadelta""" +1017 75 training_loop """owa""" +1017 75 negative_sampler """basic""" +1017 75 evaluator """rankbased""" +1017 76 dataset """kinships""" +1017 76 model """unstructuredmodel""" +1017 76 loss """nssa""" +1017 76 regularizer """no""" +1017 76 optimizer """adadelta""" +1017 76 training_loop """owa""" +1017 76 negative_sampler """basic""" +1017 76 evaluator """rankbased""" +1017 77 dataset """kinships""" +1017 77 model """unstructuredmodel""" +1017 77 loss """nssa""" +1017 77 regularizer """no""" +1017 77 optimizer """adadelta""" +1017 77 training_loop """owa""" +1017 77 negative_sampler """basic""" +1017 77 evaluator """rankbased""" +1017 78 dataset """kinships""" +1017 78 model """unstructuredmodel""" +1017 78 loss """nssa""" +1017 78 regularizer """no""" +1017 78 optimizer """adadelta""" +1017 78 training_loop """owa""" +1017 78 negative_sampler """basic""" +1017 78 evaluator """rankbased""" +1017 79 dataset """kinships""" +1017 79 model """unstructuredmodel""" +1017 79 loss """nssa""" +1017 79 regularizer """no""" +1017 79 optimizer """adadelta""" +1017 79 training_loop """owa""" +1017 79 negative_sampler """basic""" +1017 79 evaluator """rankbased""" +1017 80 dataset """kinships""" +1017 80 model """unstructuredmodel""" +1017 80 loss """nssa""" +1017 80 regularizer """no""" +1017 80 optimizer """adadelta""" +1017 80 training_loop """owa""" +1017 80 negative_sampler """basic""" +1017 80 evaluator """rankbased""" +1017 81 dataset """kinships""" +1017 81 model """unstructuredmodel""" +1017 81 loss """nssa""" +1017 81 regularizer """no""" +1017 81 optimizer """adadelta""" +1017 81 training_loop """owa""" +1017 81 negative_sampler """basic""" +1017 81 evaluator """rankbased""" +1017 82 dataset """kinships""" +1017 82 model """unstructuredmodel""" +1017 82 loss """nssa""" +1017 82 regularizer """no""" +1017 82 optimizer """adadelta""" +1017 82 training_loop """owa""" +1017 82 negative_sampler """basic""" +1017 82 evaluator """rankbased""" +1017 83 dataset """kinships""" +1017 83 model """unstructuredmodel""" +1017 83 loss """nssa""" +1017 83 regularizer """no""" +1017 83 optimizer """adadelta""" +1017 83 training_loop """owa""" +1017 83 negative_sampler """basic""" +1017 83 evaluator """rankbased""" +1017 84 dataset """kinships""" +1017 84 model """unstructuredmodel""" +1017 84 loss """nssa""" +1017 84 regularizer """no""" +1017 84 optimizer """adadelta""" +1017 84 training_loop """owa""" +1017 84 negative_sampler """basic""" +1017 84 evaluator """rankbased""" +1017 85 dataset """kinships""" +1017 85 model """unstructuredmodel""" +1017 85 loss """nssa""" +1017 85 regularizer """no""" +1017 85 optimizer """adadelta""" +1017 85 training_loop """owa""" +1017 85 negative_sampler """basic""" +1017 85 evaluator """rankbased""" +1017 86 dataset """kinships""" +1017 86 model """unstructuredmodel""" +1017 86 loss """nssa""" +1017 86 regularizer """no""" +1017 86 optimizer """adadelta""" +1017 86 training_loop """owa""" +1017 86 negative_sampler """basic""" +1017 86 evaluator """rankbased""" +1017 87 dataset """kinships""" +1017 87 model """unstructuredmodel""" +1017 87 loss """nssa""" +1017 87 regularizer """no""" +1017 87 optimizer """adadelta""" +1017 87 training_loop """owa""" +1017 87 negative_sampler """basic""" +1017 87 evaluator """rankbased""" +1017 88 dataset """kinships""" +1017 88 model """unstructuredmodel""" +1017 88 loss """nssa""" +1017 88 regularizer """no""" +1017 88 optimizer """adadelta""" +1017 88 training_loop """owa""" +1017 88 negative_sampler """basic""" +1017 88 evaluator """rankbased""" +1017 89 dataset """kinships""" +1017 89 model """unstructuredmodel""" +1017 89 loss """nssa""" +1017 89 regularizer """no""" +1017 89 optimizer """adadelta""" +1017 89 training_loop """owa""" +1017 89 negative_sampler """basic""" +1017 89 evaluator """rankbased""" +1017 90 dataset """kinships""" +1017 90 model """unstructuredmodel""" +1017 90 loss """nssa""" +1017 90 regularizer """no""" +1017 90 optimizer """adadelta""" +1017 90 training_loop """owa""" +1017 90 negative_sampler """basic""" +1017 90 evaluator """rankbased""" +1017 91 dataset """kinships""" +1017 91 model """unstructuredmodel""" +1017 91 loss """nssa""" +1017 91 regularizer """no""" +1017 91 optimizer """adadelta""" +1017 91 training_loop """owa""" +1017 91 negative_sampler """basic""" +1017 91 evaluator """rankbased""" +1017 92 dataset """kinships""" +1017 92 model """unstructuredmodel""" +1017 92 loss """nssa""" +1017 92 regularizer """no""" +1017 92 optimizer """adadelta""" +1017 92 training_loop """owa""" +1017 92 negative_sampler """basic""" +1017 92 evaluator """rankbased""" +1017 93 dataset """kinships""" +1017 93 model """unstructuredmodel""" +1017 93 loss """nssa""" +1017 93 regularizer """no""" +1017 93 optimizer """adadelta""" +1017 93 training_loop """owa""" +1017 93 negative_sampler """basic""" +1017 93 evaluator """rankbased""" +1017 94 dataset """kinships""" +1017 94 model """unstructuredmodel""" +1017 94 loss """nssa""" +1017 94 regularizer """no""" +1017 94 optimizer """adadelta""" +1017 94 training_loop """owa""" +1017 94 negative_sampler """basic""" +1017 94 evaluator """rankbased""" +1017 95 dataset """kinships""" +1017 95 model """unstructuredmodel""" +1017 95 loss """nssa""" +1017 95 regularizer """no""" +1017 95 optimizer """adadelta""" +1017 95 training_loop """owa""" +1017 95 negative_sampler """basic""" +1017 95 evaluator """rankbased""" +1017 96 dataset """kinships""" +1017 96 model """unstructuredmodel""" +1017 96 loss """nssa""" +1017 96 regularizer """no""" +1017 96 optimizer """adadelta""" +1017 96 training_loop """owa""" +1017 96 negative_sampler """basic""" +1017 96 evaluator """rankbased""" +1017 97 dataset """kinships""" +1017 97 model """unstructuredmodel""" +1017 97 loss """nssa""" +1017 97 regularizer """no""" +1017 97 optimizer """adadelta""" +1017 97 training_loop """owa""" +1017 97 negative_sampler """basic""" +1017 97 evaluator """rankbased""" +1017 98 dataset """kinships""" +1017 98 model """unstructuredmodel""" +1017 98 loss """nssa""" +1017 98 regularizer """no""" +1017 98 optimizer """adadelta""" +1017 98 training_loop """owa""" +1017 98 negative_sampler """basic""" +1017 98 evaluator """rankbased""" +1017 99 dataset """kinships""" +1017 99 model """unstructuredmodel""" +1017 99 loss """nssa""" +1017 99 regularizer """no""" +1017 99 optimizer """adadelta""" +1017 99 training_loop """owa""" +1017 99 negative_sampler """basic""" +1017 99 evaluator """rankbased""" +1017 100 dataset """kinships""" +1017 100 model """unstructuredmodel""" +1017 100 loss """nssa""" +1017 100 regularizer """no""" +1017 100 optimizer """adadelta""" +1017 100 training_loop """owa""" +1017 100 negative_sampler """basic""" +1017 100 evaluator """rankbased""" +1018 1 model.embedding_dim 2.0 +1018 1 model.scoring_fct_norm 1.0 +1018 1 loss.margin 2.8209411285719863 +1018 1 negative_sampler.num_negs_per_pos 95.0 +1018 1 training.batch_size 2.0 +1018 2 model.embedding_dim 0.0 +1018 2 model.scoring_fct_norm 1.0 +1018 2 loss.margin 5.319981015433393 +1018 2 negative_sampler.num_negs_per_pos 34.0 +1018 2 training.batch_size 1.0 +1018 3 model.embedding_dim 1.0 +1018 3 model.scoring_fct_norm 2.0 +1018 3 loss.margin 6.380536822381778 +1018 3 negative_sampler.num_negs_per_pos 54.0 +1018 3 training.batch_size 2.0 +1018 4 model.embedding_dim 1.0 +1018 4 model.scoring_fct_norm 1.0 +1018 4 loss.margin 2.9281860382568237 +1018 4 negative_sampler.num_negs_per_pos 24.0 +1018 4 training.batch_size 0.0 +1018 5 model.embedding_dim 2.0 +1018 5 model.scoring_fct_norm 1.0 +1018 5 loss.margin 4.888399950472372 +1018 5 negative_sampler.num_negs_per_pos 76.0 +1018 5 training.batch_size 1.0 +1018 6 model.embedding_dim 1.0 +1018 6 model.scoring_fct_norm 2.0 +1018 6 loss.margin 9.6423436469828 +1018 6 negative_sampler.num_negs_per_pos 54.0 +1018 6 training.batch_size 2.0 +1018 7 model.embedding_dim 2.0 +1018 7 model.scoring_fct_norm 1.0 +1018 7 loss.margin 3.122134825161724 +1018 7 negative_sampler.num_negs_per_pos 59.0 +1018 7 training.batch_size 1.0 +1018 8 model.embedding_dim 2.0 +1018 8 model.scoring_fct_norm 2.0 +1018 8 loss.margin 1.4814879062901298 +1018 8 negative_sampler.num_negs_per_pos 87.0 +1018 8 training.batch_size 2.0 +1018 9 model.embedding_dim 0.0 +1018 9 model.scoring_fct_norm 1.0 +1018 9 loss.margin 2.21936465951723 +1018 9 negative_sampler.num_negs_per_pos 56.0 +1018 9 training.batch_size 2.0 +1018 10 model.embedding_dim 1.0 +1018 10 model.scoring_fct_norm 1.0 +1018 10 loss.margin 2.859728903620395 +1018 10 negative_sampler.num_negs_per_pos 73.0 +1018 10 training.batch_size 0.0 +1018 11 model.embedding_dim 1.0 +1018 11 model.scoring_fct_norm 2.0 +1018 11 loss.margin 8.59965799124913 +1018 11 negative_sampler.num_negs_per_pos 17.0 +1018 11 training.batch_size 0.0 +1018 12 model.embedding_dim 1.0 +1018 12 model.scoring_fct_norm 1.0 +1018 12 loss.margin 8.096264739489325 +1018 12 negative_sampler.num_negs_per_pos 33.0 +1018 12 training.batch_size 1.0 +1018 13 model.embedding_dim 0.0 +1018 13 model.scoring_fct_norm 1.0 +1018 13 loss.margin 4.019525318952969 +1018 13 negative_sampler.num_negs_per_pos 94.0 +1018 13 training.batch_size 1.0 +1018 14 model.embedding_dim 0.0 +1018 14 model.scoring_fct_norm 1.0 +1018 14 loss.margin 6.039455130838887 +1018 14 negative_sampler.num_negs_per_pos 78.0 +1018 14 training.batch_size 1.0 +1018 15 model.embedding_dim 0.0 +1018 15 model.scoring_fct_norm 1.0 +1018 15 loss.margin 8.506302105683249 +1018 15 negative_sampler.num_negs_per_pos 19.0 +1018 15 training.batch_size 0.0 +1018 16 model.embedding_dim 1.0 +1018 16 model.scoring_fct_norm 1.0 +1018 16 loss.margin 3.4536341840385605 +1018 16 negative_sampler.num_negs_per_pos 76.0 +1018 16 training.batch_size 2.0 +1018 17 model.embedding_dim 2.0 +1018 17 model.scoring_fct_norm 1.0 +1018 17 loss.margin 4.7340778628063 +1018 17 negative_sampler.num_negs_per_pos 61.0 +1018 17 training.batch_size 1.0 +1018 18 model.embedding_dim 1.0 +1018 18 model.scoring_fct_norm 1.0 +1018 18 loss.margin 3.136466795343231 +1018 18 negative_sampler.num_negs_per_pos 92.0 +1018 18 training.batch_size 0.0 +1018 19 model.embedding_dim 2.0 +1018 19 model.scoring_fct_norm 2.0 +1018 19 loss.margin 1.5318508164979625 +1018 19 negative_sampler.num_negs_per_pos 23.0 +1018 19 training.batch_size 2.0 +1018 20 model.embedding_dim 0.0 +1018 20 model.scoring_fct_norm 1.0 +1018 20 loss.margin 2.3360573382352756 +1018 20 negative_sampler.num_negs_per_pos 86.0 +1018 20 training.batch_size 1.0 +1018 21 model.embedding_dim 2.0 +1018 21 model.scoring_fct_norm 1.0 +1018 21 loss.margin 7.35943529044795 +1018 21 negative_sampler.num_negs_per_pos 17.0 +1018 21 training.batch_size 1.0 +1018 22 model.embedding_dim 2.0 +1018 22 model.scoring_fct_norm 1.0 +1018 22 loss.margin 2.215372218296626 +1018 22 negative_sampler.num_negs_per_pos 84.0 +1018 22 training.batch_size 2.0 +1018 23 model.embedding_dim 1.0 +1018 23 model.scoring_fct_norm 1.0 +1018 23 loss.margin 2.5461413205598387 +1018 23 negative_sampler.num_negs_per_pos 3.0 +1018 23 training.batch_size 2.0 +1018 24 model.embedding_dim 0.0 +1018 24 model.scoring_fct_norm 2.0 +1018 24 loss.margin 6.699215244289602 +1018 24 negative_sampler.num_negs_per_pos 27.0 +1018 24 training.batch_size 1.0 +1018 25 model.embedding_dim 1.0 +1018 25 model.scoring_fct_norm 1.0 +1018 25 loss.margin 8.779089728433355 +1018 25 negative_sampler.num_negs_per_pos 75.0 +1018 25 training.batch_size 1.0 +1018 26 model.embedding_dim 2.0 +1018 26 model.scoring_fct_norm 2.0 +1018 26 loss.margin 3.798417769657684 +1018 26 negative_sampler.num_negs_per_pos 30.0 +1018 26 training.batch_size 0.0 +1018 27 model.embedding_dim 0.0 +1018 27 model.scoring_fct_norm 1.0 +1018 27 loss.margin 4.585221550826609 +1018 27 negative_sampler.num_negs_per_pos 80.0 +1018 27 training.batch_size 0.0 +1018 28 model.embedding_dim 1.0 +1018 28 model.scoring_fct_norm 2.0 +1018 28 loss.margin 6.124386503878115 +1018 28 negative_sampler.num_negs_per_pos 28.0 +1018 28 training.batch_size 0.0 +1018 29 model.embedding_dim 0.0 +1018 29 model.scoring_fct_norm 1.0 +1018 29 loss.margin 2.828798941242492 +1018 29 negative_sampler.num_negs_per_pos 90.0 +1018 29 training.batch_size 0.0 +1018 30 model.embedding_dim 2.0 +1018 30 model.scoring_fct_norm 1.0 +1018 30 loss.margin 4.985830704675904 +1018 30 negative_sampler.num_negs_per_pos 27.0 +1018 30 training.batch_size 0.0 +1018 31 model.embedding_dim 0.0 +1018 31 model.scoring_fct_norm 2.0 +1018 31 loss.margin 8.030232986858069 +1018 31 negative_sampler.num_negs_per_pos 58.0 +1018 31 training.batch_size 2.0 +1018 32 model.embedding_dim 1.0 +1018 32 model.scoring_fct_norm 2.0 +1018 32 loss.margin 4.893068233799726 +1018 32 negative_sampler.num_negs_per_pos 68.0 +1018 32 training.batch_size 1.0 +1018 33 model.embedding_dim 1.0 +1018 33 model.scoring_fct_norm 2.0 +1018 33 loss.margin 6.702748527507591 +1018 33 negative_sampler.num_negs_per_pos 58.0 +1018 33 training.batch_size 1.0 +1018 34 model.embedding_dim 1.0 +1018 34 model.scoring_fct_norm 2.0 +1018 34 loss.margin 4.9775596826402655 +1018 34 negative_sampler.num_negs_per_pos 7.0 +1018 34 training.batch_size 0.0 +1018 35 model.embedding_dim 2.0 +1018 35 model.scoring_fct_norm 2.0 +1018 35 loss.margin 7.788344813761991 +1018 35 negative_sampler.num_negs_per_pos 75.0 +1018 35 training.batch_size 0.0 +1018 36 model.embedding_dim 0.0 +1018 36 model.scoring_fct_norm 1.0 +1018 36 loss.margin 7.347628053458656 +1018 36 negative_sampler.num_negs_per_pos 63.0 +1018 36 training.batch_size 1.0 +1018 37 model.embedding_dim 2.0 +1018 37 model.scoring_fct_norm 1.0 +1018 37 loss.margin 4.182170503067146 +1018 37 negative_sampler.num_negs_per_pos 77.0 +1018 37 training.batch_size 0.0 +1018 38 model.embedding_dim 0.0 +1018 38 model.scoring_fct_norm 1.0 +1018 38 loss.margin 7.2838630699616616 +1018 38 negative_sampler.num_negs_per_pos 25.0 +1018 38 training.batch_size 1.0 +1018 39 model.embedding_dim 2.0 +1018 39 model.scoring_fct_norm 1.0 +1018 39 loss.margin 4.131770808874679 +1018 39 negative_sampler.num_negs_per_pos 68.0 +1018 39 training.batch_size 1.0 +1018 40 model.embedding_dim 2.0 +1018 40 model.scoring_fct_norm 1.0 +1018 40 loss.margin 0.7563594657544712 +1018 40 negative_sampler.num_negs_per_pos 4.0 +1018 40 training.batch_size 0.0 +1018 41 model.embedding_dim 0.0 +1018 41 model.scoring_fct_norm 1.0 +1018 41 loss.margin 8.589218319417094 +1018 41 negative_sampler.num_negs_per_pos 93.0 +1018 41 training.batch_size 2.0 +1018 42 model.embedding_dim 1.0 +1018 42 model.scoring_fct_norm 2.0 +1018 42 loss.margin 9.765017475017821 +1018 42 negative_sampler.num_negs_per_pos 29.0 +1018 42 training.batch_size 1.0 +1018 43 model.embedding_dim 0.0 +1018 43 model.scoring_fct_norm 1.0 +1018 43 loss.margin 7.7729378252634795 +1018 43 negative_sampler.num_negs_per_pos 72.0 +1018 43 training.batch_size 0.0 +1018 44 model.embedding_dim 2.0 +1018 44 model.scoring_fct_norm 2.0 +1018 44 loss.margin 4.203206899193671 +1018 44 negative_sampler.num_negs_per_pos 65.0 +1018 44 training.batch_size 2.0 +1018 45 model.embedding_dim 0.0 +1018 45 model.scoring_fct_norm 1.0 +1018 45 loss.margin 4.558039283014849 +1018 45 negative_sampler.num_negs_per_pos 57.0 +1018 45 training.batch_size 1.0 +1018 46 model.embedding_dim 0.0 +1018 46 model.scoring_fct_norm 2.0 +1018 46 loss.margin 6.589003845443532 +1018 46 negative_sampler.num_negs_per_pos 21.0 +1018 46 training.batch_size 1.0 +1018 47 model.embedding_dim 0.0 +1018 47 model.scoring_fct_norm 1.0 +1018 47 loss.margin 7.528065759791013 +1018 47 negative_sampler.num_negs_per_pos 16.0 +1018 47 training.batch_size 2.0 +1018 48 model.embedding_dim 1.0 +1018 48 model.scoring_fct_norm 1.0 +1018 48 loss.margin 6.935309396257847 +1018 48 negative_sampler.num_negs_per_pos 83.0 +1018 48 training.batch_size 2.0 +1018 49 model.embedding_dim 0.0 +1018 49 model.scoring_fct_norm 1.0 +1018 49 loss.margin 6.555637000741201 +1018 49 negative_sampler.num_negs_per_pos 40.0 +1018 49 training.batch_size 0.0 +1018 50 model.embedding_dim 2.0 +1018 50 model.scoring_fct_norm 1.0 +1018 50 loss.margin 3.356620142280854 +1018 50 negative_sampler.num_negs_per_pos 84.0 +1018 50 training.batch_size 1.0 +1018 51 model.embedding_dim 0.0 +1018 51 model.scoring_fct_norm 2.0 +1018 51 loss.margin 7.532503271915151 +1018 51 negative_sampler.num_negs_per_pos 31.0 +1018 51 training.batch_size 2.0 +1018 52 model.embedding_dim 2.0 +1018 52 model.scoring_fct_norm 1.0 +1018 52 loss.margin 9.693409579025813 +1018 52 negative_sampler.num_negs_per_pos 49.0 +1018 52 training.batch_size 1.0 +1018 53 model.embedding_dim 1.0 +1018 53 model.scoring_fct_norm 2.0 +1018 53 loss.margin 5.748946490064334 +1018 53 negative_sampler.num_negs_per_pos 84.0 +1018 53 training.batch_size 1.0 +1018 54 model.embedding_dim 0.0 +1018 54 model.scoring_fct_norm 1.0 +1018 54 loss.margin 4.1595560019578235 +1018 54 negative_sampler.num_negs_per_pos 72.0 +1018 54 training.batch_size 2.0 +1018 55 model.embedding_dim 1.0 +1018 55 model.scoring_fct_norm 2.0 +1018 55 loss.margin 5.549351969600339 +1018 55 negative_sampler.num_negs_per_pos 75.0 +1018 55 training.batch_size 2.0 +1018 56 model.embedding_dim 2.0 +1018 56 model.scoring_fct_norm 2.0 +1018 56 loss.margin 4.554259850551545 +1018 56 negative_sampler.num_negs_per_pos 2.0 +1018 56 training.batch_size 1.0 +1018 57 model.embedding_dim 0.0 +1018 57 model.scoring_fct_norm 1.0 +1018 57 loss.margin 6.545965048043624 +1018 57 negative_sampler.num_negs_per_pos 9.0 +1018 57 training.batch_size 0.0 +1018 58 model.embedding_dim 2.0 +1018 58 model.scoring_fct_norm 1.0 +1018 58 loss.margin 0.5195283662427261 +1018 58 negative_sampler.num_negs_per_pos 90.0 +1018 58 training.batch_size 0.0 +1018 59 model.embedding_dim 0.0 +1018 59 model.scoring_fct_norm 1.0 +1018 59 loss.margin 3.1432989567684073 +1018 59 negative_sampler.num_negs_per_pos 12.0 +1018 59 training.batch_size 1.0 +1018 60 model.embedding_dim 2.0 +1018 60 model.scoring_fct_norm 2.0 +1018 60 loss.margin 1.296870036900693 +1018 60 negative_sampler.num_negs_per_pos 57.0 +1018 60 training.batch_size 0.0 +1018 61 model.embedding_dim 2.0 +1018 61 model.scoring_fct_norm 2.0 +1018 61 loss.margin 9.884266266336123 +1018 61 negative_sampler.num_negs_per_pos 77.0 +1018 61 training.batch_size 0.0 +1018 62 model.embedding_dim 0.0 +1018 62 model.scoring_fct_norm 1.0 +1018 62 loss.margin 0.5442027831537277 +1018 62 negative_sampler.num_negs_per_pos 94.0 +1018 62 training.batch_size 0.0 +1018 63 model.embedding_dim 1.0 +1018 63 model.scoring_fct_norm 1.0 +1018 63 loss.margin 3.6333696165749854 +1018 63 negative_sampler.num_negs_per_pos 28.0 +1018 63 training.batch_size 1.0 +1018 64 model.embedding_dim 0.0 +1018 64 model.scoring_fct_norm 1.0 +1018 64 loss.margin 4.756772183862247 +1018 64 negative_sampler.num_negs_per_pos 42.0 +1018 64 training.batch_size 0.0 +1018 65 model.embedding_dim 1.0 +1018 65 model.scoring_fct_norm 2.0 +1018 65 loss.margin 4.161839537774135 +1018 65 negative_sampler.num_negs_per_pos 44.0 +1018 65 training.batch_size 1.0 +1018 66 model.embedding_dim 1.0 +1018 66 model.scoring_fct_norm 1.0 +1018 66 loss.margin 8.420558657947936 +1018 66 negative_sampler.num_negs_per_pos 36.0 +1018 66 training.batch_size 0.0 +1018 67 model.embedding_dim 2.0 +1018 67 model.scoring_fct_norm 2.0 +1018 67 loss.margin 1.4537432885214965 +1018 67 negative_sampler.num_negs_per_pos 35.0 +1018 67 training.batch_size 1.0 +1018 68 model.embedding_dim 2.0 +1018 68 model.scoring_fct_norm 2.0 +1018 68 loss.margin 7.8587088091347646 +1018 68 negative_sampler.num_negs_per_pos 56.0 +1018 68 training.batch_size 2.0 +1018 69 model.embedding_dim 2.0 +1018 69 model.scoring_fct_norm 2.0 +1018 69 loss.margin 8.048868305756722 +1018 69 negative_sampler.num_negs_per_pos 44.0 +1018 69 training.batch_size 2.0 +1018 70 model.embedding_dim 1.0 +1018 70 model.scoring_fct_norm 2.0 +1018 70 loss.margin 5.990581328389298 +1018 70 negative_sampler.num_negs_per_pos 29.0 +1018 70 training.batch_size 1.0 +1018 71 model.embedding_dim 1.0 +1018 71 model.scoring_fct_norm 1.0 +1018 71 loss.margin 1.9099831962351626 +1018 71 negative_sampler.num_negs_per_pos 11.0 +1018 71 training.batch_size 1.0 +1018 72 model.embedding_dim 2.0 +1018 72 model.scoring_fct_norm 1.0 +1018 72 loss.margin 9.492874639093142 +1018 72 negative_sampler.num_negs_per_pos 37.0 +1018 72 training.batch_size 1.0 +1018 73 model.embedding_dim 2.0 +1018 73 model.scoring_fct_norm 2.0 +1018 73 loss.margin 4.703251291270065 +1018 73 negative_sampler.num_negs_per_pos 88.0 +1018 73 training.batch_size 2.0 +1018 74 model.embedding_dim 1.0 +1018 74 model.scoring_fct_norm 1.0 +1018 74 loss.margin 1.408189694518314 +1018 74 negative_sampler.num_negs_per_pos 5.0 +1018 74 training.batch_size 2.0 +1018 75 model.embedding_dim 0.0 +1018 75 model.scoring_fct_norm 2.0 +1018 75 loss.margin 4.782137238962196 +1018 75 negative_sampler.num_negs_per_pos 92.0 +1018 75 training.batch_size 1.0 +1018 76 model.embedding_dim 2.0 +1018 76 model.scoring_fct_norm 2.0 +1018 76 loss.margin 0.8921161907589114 +1018 76 negative_sampler.num_negs_per_pos 85.0 +1018 76 training.batch_size 2.0 +1018 77 model.embedding_dim 2.0 +1018 77 model.scoring_fct_norm 1.0 +1018 77 loss.margin 9.663183196693504 +1018 77 negative_sampler.num_negs_per_pos 17.0 +1018 77 training.batch_size 0.0 +1018 78 model.embedding_dim 1.0 +1018 78 model.scoring_fct_norm 2.0 +1018 78 loss.margin 9.624324114205402 +1018 78 negative_sampler.num_negs_per_pos 87.0 +1018 78 training.batch_size 1.0 +1018 79 model.embedding_dim 2.0 +1018 79 model.scoring_fct_norm 2.0 +1018 79 loss.margin 9.149820149959629 +1018 79 negative_sampler.num_negs_per_pos 27.0 +1018 79 training.batch_size 0.0 +1018 80 model.embedding_dim 0.0 +1018 80 model.scoring_fct_norm 1.0 +1018 80 loss.margin 9.706852999136991 +1018 80 negative_sampler.num_negs_per_pos 31.0 +1018 80 training.batch_size 1.0 +1018 81 model.embedding_dim 0.0 +1018 81 model.scoring_fct_norm 1.0 +1018 81 loss.margin 4.607832638711503 +1018 81 negative_sampler.num_negs_per_pos 63.0 +1018 81 training.batch_size 0.0 +1018 82 model.embedding_dim 0.0 +1018 82 model.scoring_fct_norm 1.0 +1018 82 loss.margin 9.930609115730517 +1018 82 negative_sampler.num_negs_per_pos 91.0 +1018 82 training.batch_size 1.0 +1018 83 model.embedding_dim 1.0 +1018 83 model.scoring_fct_norm 1.0 +1018 83 loss.margin 5.316447234965217 +1018 83 negative_sampler.num_negs_per_pos 77.0 +1018 83 training.batch_size 2.0 +1018 84 model.embedding_dim 0.0 +1018 84 model.scoring_fct_norm 1.0 +1018 84 loss.margin 1.564051430291428 +1018 84 negative_sampler.num_negs_per_pos 32.0 +1018 84 training.batch_size 2.0 +1018 85 model.embedding_dim 2.0 +1018 85 model.scoring_fct_norm 2.0 +1018 85 loss.margin 2.846672812056897 +1018 85 negative_sampler.num_negs_per_pos 66.0 +1018 85 training.batch_size 1.0 +1018 86 model.embedding_dim 1.0 +1018 86 model.scoring_fct_norm 2.0 +1018 86 loss.margin 6.548002927570673 +1018 86 negative_sampler.num_negs_per_pos 44.0 +1018 86 training.batch_size 1.0 +1018 87 model.embedding_dim 2.0 +1018 87 model.scoring_fct_norm 1.0 +1018 87 loss.margin 5.818375342792298 +1018 87 negative_sampler.num_negs_per_pos 36.0 +1018 87 training.batch_size 1.0 +1018 88 model.embedding_dim 0.0 +1018 88 model.scoring_fct_norm 2.0 +1018 88 loss.margin 8.182780016569826 +1018 88 negative_sampler.num_negs_per_pos 70.0 +1018 88 training.batch_size 0.0 +1018 89 model.embedding_dim 2.0 +1018 89 model.scoring_fct_norm 1.0 +1018 89 loss.margin 5.044979871710826 +1018 89 negative_sampler.num_negs_per_pos 9.0 +1018 89 training.batch_size 1.0 +1018 90 model.embedding_dim 1.0 +1018 90 model.scoring_fct_norm 1.0 +1018 90 loss.margin 3.0709816796083205 +1018 90 negative_sampler.num_negs_per_pos 34.0 +1018 90 training.batch_size 0.0 +1018 91 model.embedding_dim 2.0 +1018 91 model.scoring_fct_norm 2.0 +1018 91 loss.margin 8.424711433030307 +1018 91 negative_sampler.num_negs_per_pos 12.0 +1018 91 training.batch_size 0.0 +1018 92 model.embedding_dim 0.0 +1018 92 model.scoring_fct_norm 2.0 +1018 92 loss.margin 5.708528140930845 +1018 92 negative_sampler.num_negs_per_pos 37.0 +1018 92 training.batch_size 0.0 +1018 93 model.embedding_dim 0.0 +1018 93 model.scoring_fct_norm 1.0 +1018 93 loss.margin 5.379076577054745 +1018 93 negative_sampler.num_negs_per_pos 7.0 +1018 93 training.batch_size 2.0 +1018 94 model.embedding_dim 0.0 +1018 94 model.scoring_fct_norm 2.0 +1018 94 loss.margin 6.4095883827118145 +1018 94 negative_sampler.num_negs_per_pos 36.0 +1018 94 training.batch_size 0.0 +1018 95 model.embedding_dim 0.0 +1018 95 model.scoring_fct_norm 2.0 +1018 95 loss.margin 8.444367918279024 +1018 95 negative_sampler.num_negs_per_pos 94.0 +1018 95 training.batch_size 0.0 +1018 96 model.embedding_dim 2.0 +1018 96 model.scoring_fct_norm 2.0 +1018 96 loss.margin 3.3484539954642165 +1018 96 negative_sampler.num_negs_per_pos 91.0 +1018 96 training.batch_size 1.0 +1018 97 model.embedding_dim 0.0 +1018 97 model.scoring_fct_norm 2.0 +1018 97 loss.margin 4.288743054170859 +1018 97 negative_sampler.num_negs_per_pos 47.0 +1018 97 training.batch_size 2.0 +1018 98 model.embedding_dim 2.0 +1018 98 model.scoring_fct_norm 1.0 +1018 98 loss.margin 4.573448727770467 +1018 98 negative_sampler.num_negs_per_pos 38.0 +1018 98 training.batch_size 0.0 +1018 99 model.embedding_dim 2.0 +1018 99 model.scoring_fct_norm 2.0 +1018 99 loss.margin 5.527343029255505 +1018 99 negative_sampler.num_negs_per_pos 50.0 +1018 99 training.batch_size 1.0 +1018 100 model.embedding_dim 2.0 +1018 100 model.scoring_fct_norm 2.0 +1018 100 loss.margin 3.9612623017742794 +1018 100 negative_sampler.num_negs_per_pos 32.0 +1018 100 training.batch_size 2.0 +1018 1 dataset """kinships""" +1018 1 model """unstructuredmodel""" +1018 1 loss """marginranking""" +1018 1 regularizer """no""" +1018 1 optimizer """adadelta""" +1018 1 training_loop """owa""" +1018 1 negative_sampler """basic""" +1018 1 evaluator """rankbased""" +1018 2 dataset """kinships""" +1018 2 model """unstructuredmodel""" +1018 2 loss """marginranking""" +1018 2 regularizer """no""" +1018 2 optimizer """adadelta""" +1018 2 training_loop """owa""" +1018 2 negative_sampler """basic""" +1018 2 evaluator """rankbased""" +1018 3 dataset """kinships""" +1018 3 model """unstructuredmodel""" +1018 3 loss """marginranking""" +1018 3 regularizer """no""" +1018 3 optimizer """adadelta""" +1018 3 training_loop """owa""" +1018 3 negative_sampler """basic""" +1018 3 evaluator """rankbased""" +1018 4 dataset """kinships""" +1018 4 model """unstructuredmodel""" +1018 4 loss """marginranking""" +1018 4 regularizer """no""" +1018 4 optimizer """adadelta""" +1018 4 training_loop """owa""" +1018 4 negative_sampler """basic""" +1018 4 evaluator """rankbased""" +1018 5 dataset """kinships""" +1018 5 model """unstructuredmodel""" +1018 5 loss """marginranking""" +1018 5 regularizer """no""" +1018 5 optimizer """adadelta""" +1018 5 training_loop """owa""" +1018 5 negative_sampler """basic""" +1018 5 evaluator """rankbased""" +1018 6 dataset """kinships""" +1018 6 model """unstructuredmodel""" +1018 6 loss """marginranking""" +1018 6 regularizer """no""" +1018 6 optimizer """adadelta""" +1018 6 training_loop """owa""" +1018 6 negative_sampler """basic""" +1018 6 evaluator """rankbased""" +1018 7 dataset """kinships""" +1018 7 model """unstructuredmodel""" +1018 7 loss """marginranking""" +1018 7 regularizer """no""" +1018 7 optimizer """adadelta""" +1018 7 training_loop """owa""" +1018 7 negative_sampler """basic""" +1018 7 evaluator """rankbased""" +1018 8 dataset """kinships""" +1018 8 model """unstructuredmodel""" +1018 8 loss """marginranking""" +1018 8 regularizer """no""" +1018 8 optimizer """adadelta""" +1018 8 training_loop """owa""" +1018 8 negative_sampler """basic""" +1018 8 evaluator """rankbased""" +1018 9 dataset """kinships""" +1018 9 model """unstructuredmodel""" +1018 9 loss """marginranking""" +1018 9 regularizer """no""" +1018 9 optimizer """adadelta""" +1018 9 training_loop """owa""" +1018 9 negative_sampler """basic""" +1018 9 evaluator """rankbased""" +1018 10 dataset """kinships""" +1018 10 model """unstructuredmodel""" +1018 10 loss """marginranking""" +1018 10 regularizer """no""" +1018 10 optimizer """adadelta""" +1018 10 training_loop """owa""" +1018 10 negative_sampler """basic""" +1018 10 evaluator """rankbased""" +1018 11 dataset """kinships""" +1018 11 model """unstructuredmodel""" +1018 11 loss """marginranking""" +1018 11 regularizer """no""" +1018 11 optimizer """adadelta""" +1018 11 training_loop """owa""" +1018 11 negative_sampler """basic""" +1018 11 evaluator """rankbased""" +1018 12 dataset """kinships""" +1018 12 model """unstructuredmodel""" +1018 12 loss """marginranking""" +1018 12 regularizer """no""" +1018 12 optimizer """adadelta""" +1018 12 training_loop """owa""" +1018 12 negative_sampler """basic""" +1018 12 evaluator """rankbased""" +1018 13 dataset """kinships""" +1018 13 model """unstructuredmodel""" +1018 13 loss """marginranking""" +1018 13 regularizer """no""" +1018 13 optimizer """adadelta""" +1018 13 training_loop """owa""" +1018 13 negative_sampler """basic""" +1018 13 evaluator """rankbased""" +1018 14 dataset """kinships""" +1018 14 model """unstructuredmodel""" +1018 14 loss """marginranking""" +1018 14 regularizer """no""" +1018 14 optimizer """adadelta""" +1018 14 training_loop """owa""" +1018 14 negative_sampler """basic""" +1018 14 evaluator """rankbased""" +1018 15 dataset """kinships""" +1018 15 model """unstructuredmodel""" +1018 15 loss """marginranking""" +1018 15 regularizer """no""" +1018 15 optimizer """adadelta""" +1018 15 training_loop """owa""" +1018 15 negative_sampler """basic""" +1018 15 evaluator """rankbased""" +1018 16 dataset """kinships""" +1018 16 model """unstructuredmodel""" +1018 16 loss """marginranking""" +1018 16 regularizer """no""" +1018 16 optimizer """adadelta""" +1018 16 training_loop """owa""" +1018 16 negative_sampler """basic""" +1018 16 evaluator """rankbased""" +1018 17 dataset """kinships""" +1018 17 model """unstructuredmodel""" +1018 17 loss """marginranking""" +1018 17 regularizer """no""" +1018 17 optimizer """adadelta""" +1018 17 training_loop """owa""" +1018 17 negative_sampler """basic""" +1018 17 evaluator """rankbased""" +1018 18 dataset """kinships""" +1018 18 model """unstructuredmodel""" +1018 18 loss """marginranking""" +1018 18 regularizer """no""" +1018 18 optimizer """adadelta""" +1018 18 training_loop """owa""" +1018 18 negative_sampler """basic""" +1018 18 evaluator """rankbased""" +1018 19 dataset """kinships""" +1018 19 model """unstructuredmodel""" +1018 19 loss """marginranking""" +1018 19 regularizer """no""" +1018 19 optimizer """adadelta""" +1018 19 training_loop """owa""" +1018 19 negative_sampler """basic""" +1018 19 evaluator """rankbased""" +1018 20 dataset """kinships""" +1018 20 model """unstructuredmodel""" +1018 20 loss """marginranking""" +1018 20 regularizer """no""" +1018 20 optimizer """adadelta""" +1018 20 training_loop """owa""" +1018 20 negative_sampler """basic""" +1018 20 evaluator """rankbased""" +1018 21 dataset """kinships""" +1018 21 model """unstructuredmodel""" +1018 21 loss """marginranking""" +1018 21 regularizer """no""" +1018 21 optimizer """adadelta""" +1018 21 training_loop """owa""" +1018 21 negative_sampler """basic""" +1018 21 evaluator """rankbased""" +1018 22 dataset """kinships""" +1018 22 model """unstructuredmodel""" +1018 22 loss """marginranking""" +1018 22 regularizer """no""" +1018 22 optimizer """adadelta""" +1018 22 training_loop """owa""" +1018 22 negative_sampler """basic""" +1018 22 evaluator """rankbased""" +1018 23 dataset """kinships""" +1018 23 model """unstructuredmodel""" +1018 23 loss """marginranking""" +1018 23 regularizer """no""" +1018 23 optimizer """adadelta""" +1018 23 training_loop """owa""" +1018 23 negative_sampler """basic""" +1018 23 evaluator """rankbased""" +1018 24 dataset """kinships""" +1018 24 model """unstructuredmodel""" +1018 24 loss """marginranking""" +1018 24 regularizer """no""" +1018 24 optimizer """adadelta""" +1018 24 training_loop """owa""" +1018 24 negative_sampler """basic""" +1018 24 evaluator """rankbased""" +1018 25 dataset """kinships""" +1018 25 model """unstructuredmodel""" +1018 25 loss """marginranking""" +1018 25 regularizer """no""" +1018 25 optimizer """adadelta""" +1018 25 training_loop """owa""" +1018 25 negative_sampler """basic""" +1018 25 evaluator """rankbased""" +1018 26 dataset """kinships""" +1018 26 model """unstructuredmodel""" +1018 26 loss """marginranking""" +1018 26 regularizer """no""" +1018 26 optimizer """adadelta""" +1018 26 training_loop """owa""" +1018 26 negative_sampler """basic""" +1018 26 evaluator """rankbased""" +1018 27 dataset """kinships""" +1018 27 model """unstructuredmodel""" +1018 27 loss """marginranking""" +1018 27 regularizer """no""" +1018 27 optimizer """adadelta""" +1018 27 training_loop """owa""" +1018 27 negative_sampler """basic""" +1018 27 evaluator """rankbased""" +1018 28 dataset """kinships""" +1018 28 model """unstructuredmodel""" +1018 28 loss """marginranking""" +1018 28 regularizer """no""" +1018 28 optimizer """adadelta""" +1018 28 training_loop """owa""" +1018 28 negative_sampler """basic""" +1018 28 evaluator """rankbased""" +1018 29 dataset """kinships""" +1018 29 model """unstructuredmodel""" +1018 29 loss """marginranking""" +1018 29 regularizer """no""" +1018 29 optimizer """adadelta""" +1018 29 training_loop """owa""" +1018 29 negative_sampler """basic""" +1018 29 evaluator """rankbased""" +1018 30 dataset """kinships""" +1018 30 model """unstructuredmodel""" +1018 30 loss """marginranking""" +1018 30 regularizer """no""" +1018 30 optimizer """adadelta""" +1018 30 training_loop """owa""" +1018 30 negative_sampler """basic""" +1018 30 evaluator """rankbased""" +1018 31 dataset """kinships""" +1018 31 model """unstructuredmodel""" +1018 31 loss """marginranking""" +1018 31 regularizer """no""" +1018 31 optimizer """adadelta""" +1018 31 training_loop """owa""" +1018 31 negative_sampler """basic""" +1018 31 evaluator """rankbased""" +1018 32 dataset """kinships""" +1018 32 model """unstructuredmodel""" +1018 32 loss """marginranking""" +1018 32 regularizer """no""" +1018 32 optimizer """adadelta""" +1018 32 training_loop """owa""" +1018 32 negative_sampler """basic""" +1018 32 evaluator """rankbased""" +1018 33 dataset """kinships""" +1018 33 model """unstructuredmodel""" +1018 33 loss """marginranking""" +1018 33 regularizer """no""" +1018 33 optimizer """adadelta""" +1018 33 training_loop """owa""" +1018 33 negative_sampler """basic""" +1018 33 evaluator """rankbased""" +1018 34 dataset """kinships""" +1018 34 model """unstructuredmodel""" +1018 34 loss """marginranking""" +1018 34 regularizer """no""" +1018 34 optimizer """adadelta""" +1018 34 training_loop """owa""" +1018 34 negative_sampler """basic""" +1018 34 evaluator """rankbased""" +1018 35 dataset """kinships""" +1018 35 model """unstructuredmodel""" +1018 35 loss """marginranking""" +1018 35 regularizer """no""" +1018 35 optimizer """adadelta""" +1018 35 training_loop """owa""" +1018 35 negative_sampler """basic""" +1018 35 evaluator """rankbased""" +1018 36 dataset """kinships""" +1018 36 model """unstructuredmodel""" +1018 36 loss """marginranking""" +1018 36 regularizer """no""" +1018 36 optimizer """adadelta""" +1018 36 training_loop """owa""" +1018 36 negative_sampler """basic""" +1018 36 evaluator """rankbased""" +1018 37 dataset """kinships""" +1018 37 model """unstructuredmodel""" +1018 37 loss """marginranking""" +1018 37 regularizer """no""" +1018 37 optimizer """adadelta""" +1018 37 training_loop """owa""" +1018 37 negative_sampler """basic""" +1018 37 evaluator """rankbased""" +1018 38 dataset """kinships""" +1018 38 model """unstructuredmodel""" +1018 38 loss """marginranking""" +1018 38 regularizer """no""" +1018 38 optimizer """adadelta""" +1018 38 training_loop """owa""" +1018 38 negative_sampler """basic""" +1018 38 evaluator """rankbased""" +1018 39 dataset """kinships""" +1018 39 model """unstructuredmodel""" +1018 39 loss """marginranking""" +1018 39 regularizer """no""" +1018 39 optimizer """adadelta""" +1018 39 training_loop """owa""" +1018 39 negative_sampler """basic""" +1018 39 evaluator """rankbased""" +1018 40 dataset """kinships""" +1018 40 model """unstructuredmodel""" +1018 40 loss """marginranking""" +1018 40 regularizer """no""" +1018 40 optimizer """adadelta""" +1018 40 training_loop """owa""" +1018 40 negative_sampler """basic""" +1018 40 evaluator """rankbased""" +1018 41 dataset """kinships""" +1018 41 model """unstructuredmodel""" +1018 41 loss """marginranking""" +1018 41 regularizer """no""" +1018 41 optimizer """adadelta""" +1018 41 training_loop """owa""" +1018 41 negative_sampler """basic""" +1018 41 evaluator """rankbased""" +1018 42 dataset """kinships""" +1018 42 model """unstructuredmodel""" +1018 42 loss """marginranking""" +1018 42 regularizer """no""" +1018 42 optimizer """adadelta""" +1018 42 training_loop """owa""" +1018 42 negative_sampler """basic""" +1018 42 evaluator """rankbased""" +1018 43 dataset """kinships""" +1018 43 model """unstructuredmodel""" +1018 43 loss """marginranking""" +1018 43 regularizer """no""" +1018 43 optimizer """adadelta""" +1018 43 training_loop """owa""" +1018 43 negative_sampler """basic""" +1018 43 evaluator """rankbased""" +1018 44 dataset """kinships""" +1018 44 model """unstructuredmodel""" +1018 44 loss """marginranking""" +1018 44 regularizer """no""" +1018 44 optimizer """adadelta""" +1018 44 training_loop """owa""" +1018 44 negative_sampler """basic""" +1018 44 evaluator """rankbased""" +1018 45 dataset """kinships""" +1018 45 model """unstructuredmodel""" +1018 45 loss """marginranking""" +1018 45 regularizer """no""" +1018 45 optimizer """adadelta""" +1018 45 training_loop """owa""" +1018 45 negative_sampler """basic""" +1018 45 evaluator """rankbased""" +1018 46 dataset """kinships""" +1018 46 model """unstructuredmodel""" +1018 46 loss """marginranking""" +1018 46 regularizer """no""" +1018 46 optimizer """adadelta""" +1018 46 training_loop """owa""" +1018 46 negative_sampler """basic""" +1018 46 evaluator """rankbased""" +1018 47 dataset """kinships""" +1018 47 model """unstructuredmodel""" +1018 47 loss """marginranking""" +1018 47 regularizer """no""" +1018 47 optimizer """adadelta""" +1018 47 training_loop """owa""" +1018 47 negative_sampler """basic""" +1018 47 evaluator """rankbased""" +1018 48 dataset """kinships""" +1018 48 model """unstructuredmodel""" +1018 48 loss """marginranking""" +1018 48 regularizer """no""" +1018 48 optimizer """adadelta""" +1018 48 training_loop """owa""" +1018 48 negative_sampler """basic""" +1018 48 evaluator """rankbased""" +1018 49 dataset """kinships""" +1018 49 model """unstructuredmodel""" +1018 49 loss """marginranking""" +1018 49 regularizer """no""" +1018 49 optimizer """adadelta""" +1018 49 training_loop """owa""" +1018 49 negative_sampler """basic""" +1018 49 evaluator """rankbased""" +1018 50 dataset """kinships""" +1018 50 model """unstructuredmodel""" +1018 50 loss """marginranking""" +1018 50 regularizer """no""" +1018 50 optimizer """adadelta""" +1018 50 training_loop """owa""" +1018 50 negative_sampler """basic""" +1018 50 evaluator """rankbased""" +1018 51 dataset """kinships""" +1018 51 model """unstructuredmodel""" +1018 51 loss """marginranking""" +1018 51 regularizer """no""" +1018 51 optimizer """adadelta""" +1018 51 training_loop """owa""" +1018 51 negative_sampler """basic""" +1018 51 evaluator """rankbased""" +1018 52 dataset """kinships""" +1018 52 model """unstructuredmodel""" +1018 52 loss """marginranking""" +1018 52 regularizer """no""" +1018 52 optimizer """adadelta""" +1018 52 training_loop """owa""" +1018 52 negative_sampler """basic""" +1018 52 evaluator """rankbased""" +1018 53 dataset """kinships""" +1018 53 model """unstructuredmodel""" +1018 53 loss """marginranking""" +1018 53 regularizer """no""" +1018 53 optimizer """adadelta""" +1018 53 training_loop """owa""" +1018 53 negative_sampler """basic""" +1018 53 evaluator """rankbased""" +1018 54 dataset """kinships""" +1018 54 model """unstructuredmodel""" +1018 54 loss """marginranking""" +1018 54 regularizer """no""" +1018 54 optimizer """adadelta""" +1018 54 training_loop """owa""" +1018 54 negative_sampler """basic""" +1018 54 evaluator """rankbased""" +1018 55 dataset """kinships""" +1018 55 model """unstructuredmodel""" +1018 55 loss """marginranking""" +1018 55 regularizer """no""" +1018 55 optimizer """adadelta""" +1018 55 training_loop """owa""" +1018 55 negative_sampler """basic""" +1018 55 evaluator """rankbased""" +1018 56 dataset """kinships""" +1018 56 model """unstructuredmodel""" +1018 56 loss """marginranking""" +1018 56 regularizer """no""" +1018 56 optimizer """adadelta""" +1018 56 training_loop """owa""" +1018 56 negative_sampler """basic""" +1018 56 evaluator """rankbased""" +1018 57 dataset """kinships""" +1018 57 model """unstructuredmodel""" +1018 57 loss """marginranking""" +1018 57 regularizer """no""" +1018 57 optimizer """adadelta""" +1018 57 training_loop """owa""" +1018 57 negative_sampler """basic""" +1018 57 evaluator """rankbased""" +1018 58 dataset """kinships""" +1018 58 model """unstructuredmodel""" +1018 58 loss """marginranking""" +1018 58 regularizer """no""" +1018 58 optimizer """adadelta""" +1018 58 training_loop """owa""" +1018 58 negative_sampler """basic""" +1018 58 evaluator """rankbased""" +1018 59 dataset """kinships""" +1018 59 model """unstructuredmodel""" +1018 59 loss """marginranking""" +1018 59 regularizer """no""" +1018 59 optimizer """adadelta""" +1018 59 training_loop """owa""" +1018 59 negative_sampler """basic""" +1018 59 evaluator """rankbased""" +1018 60 dataset """kinships""" +1018 60 model """unstructuredmodel""" +1018 60 loss """marginranking""" +1018 60 regularizer """no""" +1018 60 optimizer """adadelta""" +1018 60 training_loop """owa""" +1018 60 negative_sampler """basic""" +1018 60 evaluator """rankbased""" +1018 61 dataset """kinships""" +1018 61 model """unstructuredmodel""" +1018 61 loss """marginranking""" +1018 61 regularizer """no""" +1018 61 optimizer """adadelta""" +1018 61 training_loop """owa""" +1018 61 negative_sampler """basic""" +1018 61 evaluator """rankbased""" +1018 62 dataset """kinships""" +1018 62 model """unstructuredmodel""" +1018 62 loss """marginranking""" +1018 62 regularizer """no""" +1018 62 optimizer """adadelta""" +1018 62 training_loop """owa""" +1018 62 negative_sampler """basic""" +1018 62 evaluator """rankbased""" +1018 63 dataset """kinships""" +1018 63 model """unstructuredmodel""" +1018 63 loss """marginranking""" +1018 63 regularizer """no""" +1018 63 optimizer """adadelta""" +1018 63 training_loop """owa""" +1018 63 negative_sampler """basic""" +1018 63 evaluator """rankbased""" +1018 64 dataset """kinships""" +1018 64 model """unstructuredmodel""" +1018 64 loss """marginranking""" +1018 64 regularizer """no""" +1018 64 optimizer """adadelta""" +1018 64 training_loop """owa""" +1018 64 negative_sampler """basic""" +1018 64 evaluator """rankbased""" +1018 65 dataset """kinships""" +1018 65 model """unstructuredmodel""" +1018 65 loss """marginranking""" +1018 65 regularizer """no""" +1018 65 optimizer """adadelta""" +1018 65 training_loop """owa""" +1018 65 negative_sampler """basic""" +1018 65 evaluator """rankbased""" +1018 66 dataset """kinships""" +1018 66 model """unstructuredmodel""" +1018 66 loss """marginranking""" +1018 66 regularizer """no""" +1018 66 optimizer """adadelta""" +1018 66 training_loop """owa""" +1018 66 negative_sampler """basic""" +1018 66 evaluator """rankbased""" +1018 67 dataset """kinships""" +1018 67 model """unstructuredmodel""" +1018 67 loss """marginranking""" +1018 67 regularizer """no""" +1018 67 optimizer """adadelta""" +1018 67 training_loop """owa""" +1018 67 negative_sampler """basic""" +1018 67 evaluator """rankbased""" +1018 68 dataset """kinships""" +1018 68 model """unstructuredmodel""" +1018 68 loss """marginranking""" +1018 68 regularizer """no""" +1018 68 optimizer """adadelta""" +1018 68 training_loop """owa""" +1018 68 negative_sampler """basic""" +1018 68 evaluator """rankbased""" +1018 69 dataset """kinships""" +1018 69 model """unstructuredmodel""" +1018 69 loss """marginranking""" +1018 69 regularizer """no""" +1018 69 optimizer """adadelta""" +1018 69 training_loop """owa""" +1018 69 negative_sampler """basic""" +1018 69 evaluator """rankbased""" +1018 70 dataset """kinships""" +1018 70 model """unstructuredmodel""" +1018 70 loss """marginranking""" +1018 70 regularizer """no""" +1018 70 optimizer """adadelta""" +1018 70 training_loop """owa""" +1018 70 negative_sampler """basic""" +1018 70 evaluator """rankbased""" +1018 71 dataset """kinships""" +1018 71 model """unstructuredmodel""" +1018 71 loss """marginranking""" +1018 71 regularizer """no""" +1018 71 optimizer """adadelta""" +1018 71 training_loop """owa""" +1018 71 negative_sampler """basic""" +1018 71 evaluator """rankbased""" +1018 72 dataset """kinships""" +1018 72 model """unstructuredmodel""" +1018 72 loss """marginranking""" +1018 72 regularizer """no""" +1018 72 optimizer """adadelta""" +1018 72 training_loop """owa""" +1018 72 negative_sampler """basic""" +1018 72 evaluator """rankbased""" +1018 73 dataset """kinships""" +1018 73 model """unstructuredmodel""" +1018 73 loss """marginranking""" +1018 73 regularizer """no""" +1018 73 optimizer """adadelta""" +1018 73 training_loop """owa""" +1018 73 negative_sampler """basic""" +1018 73 evaluator """rankbased""" +1018 74 dataset """kinships""" +1018 74 model """unstructuredmodel""" +1018 74 loss """marginranking""" +1018 74 regularizer """no""" +1018 74 optimizer """adadelta""" +1018 74 training_loop """owa""" +1018 74 negative_sampler """basic""" +1018 74 evaluator """rankbased""" +1018 75 dataset """kinships""" +1018 75 model """unstructuredmodel""" +1018 75 loss """marginranking""" +1018 75 regularizer """no""" +1018 75 optimizer """adadelta""" +1018 75 training_loop """owa""" +1018 75 negative_sampler """basic""" +1018 75 evaluator """rankbased""" +1018 76 dataset """kinships""" +1018 76 model """unstructuredmodel""" +1018 76 loss """marginranking""" +1018 76 regularizer """no""" +1018 76 optimizer """adadelta""" +1018 76 training_loop """owa""" +1018 76 negative_sampler """basic""" +1018 76 evaluator """rankbased""" +1018 77 dataset """kinships""" +1018 77 model """unstructuredmodel""" +1018 77 loss """marginranking""" +1018 77 regularizer """no""" +1018 77 optimizer """adadelta""" +1018 77 training_loop """owa""" +1018 77 negative_sampler """basic""" +1018 77 evaluator """rankbased""" +1018 78 dataset """kinships""" +1018 78 model """unstructuredmodel""" +1018 78 loss """marginranking""" +1018 78 regularizer """no""" +1018 78 optimizer """adadelta""" +1018 78 training_loop """owa""" +1018 78 negative_sampler """basic""" +1018 78 evaluator """rankbased""" +1018 79 dataset """kinships""" +1018 79 model """unstructuredmodel""" +1018 79 loss """marginranking""" +1018 79 regularizer """no""" +1018 79 optimizer """adadelta""" +1018 79 training_loop """owa""" +1018 79 negative_sampler """basic""" +1018 79 evaluator """rankbased""" +1018 80 dataset """kinships""" +1018 80 model """unstructuredmodel""" +1018 80 loss """marginranking""" +1018 80 regularizer """no""" +1018 80 optimizer """adadelta""" +1018 80 training_loop """owa""" +1018 80 negative_sampler """basic""" +1018 80 evaluator """rankbased""" +1018 81 dataset """kinships""" +1018 81 model """unstructuredmodel""" +1018 81 loss """marginranking""" +1018 81 regularizer """no""" +1018 81 optimizer """adadelta""" +1018 81 training_loop """owa""" +1018 81 negative_sampler """basic""" +1018 81 evaluator """rankbased""" +1018 82 dataset """kinships""" +1018 82 model """unstructuredmodel""" +1018 82 loss """marginranking""" +1018 82 regularizer """no""" +1018 82 optimizer """adadelta""" +1018 82 training_loop """owa""" +1018 82 negative_sampler """basic""" +1018 82 evaluator """rankbased""" +1018 83 dataset """kinships""" +1018 83 model """unstructuredmodel""" +1018 83 loss """marginranking""" +1018 83 regularizer """no""" +1018 83 optimizer """adadelta""" +1018 83 training_loop """owa""" +1018 83 negative_sampler """basic""" +1018 83 evaluator """rankbased""" +1018 84 dataset """kinships""" +1018 84 model """unstructuredmodel""" +1018 84 loss """marginranking""" +1018 84 regularizer """no""" +1018 84 optimizer """adadelta""" +1018 84 training_loop """owa""" +1018 84 negative_sampler """basic""" +1018 84 evaluator """rankbased""" +1018 85 dataset """kinships""" +1018 85 model """unstructuredmodel""" +1018 85 loss """marginranking""" +1018 85 regularizer """no""" +1018 85 optimizer """adadelta""" +1018 85 training_loop """owa""" +1018 85 negative_sampler """basic""" +1018 85 evaluator """rankbased""" +1018 86 dataset """kinships""" +1018 86 model """unstructuredmodel""" +1018 86 loss """marginranking""" +1018 86 regularizer """no""" +1018 86 optimizer """adadelta""" +1018 86 training_loop """owa""" +1018 86 negative_sampler """basic""" +1018 86 evaluator """rankbased""" +1018 87 dataset """kinships""" +1018 87 model """unstructuredmodel""" +1018 87 loss """marginranking""" +1018 87 regularizer """no""" +1018 87 optimizer """adadelta""" +1018 87 training_loop """owa""" +1018 87 negative_sampler """basic""" +1018 87 evaluator """rankbased""" +1018 88 dataset """kinships""" +1018 88 model """unstructuredmodel""" +1018 88 loss """marginranking""" +1018 88 regularizer """no""" +1018 88 optimizer """adadelta""" +1018 88 training_loop """owa""" +1018 88 negative_sampler """basic""" +1018 88 evaluator """rankbased""" +1018 89 dataset """kinships""" +1018 89 model """unstructuredmodel""" +1018 89 loss """marginranking""" +1018 89 regularizer """no""" +1018 89 optimizer """adadelta""" +1018 89 training_loop """owa""" +1018 89 negative_sampler """basic""" +1018 89 evaluator """rankbased""" +1018 90 dataset """kinships""" +1018 90 model """unstructuredmodel""" +1018 90 loss """marginranking""" +1018 90 regularizer """no""" +1018 90 optimizer """adadelta""" +1018 90 training_loop """owa""" +1018 90 negative_sampler """basic""" +1018 90 evaluator """rankbased""" +1018 91 dataset """kinships""" +1018 91 model """unstructuredmodel""" +1018 91 loss """marginranking""" +1018 91 regularizer """no""" +1018 91 optimizer """adadelta""" +1018 91 training_loop """owa""" +1018 91 negative_sampler """basic""" +1018 91 evaluator """rankbased""" +1018 92 dataset """kinships""" +1018 92 model """unstructuredmodel""" +1018 92 loss """marginranking""" +1018 92 regularizer """no""" +1018 92 optimizer """adadelta""" +1018 92 training_loop """owa""" +1018 92 negative_sampler """basic""" +1018 92 evaluator """rankbased""" +1018 93 dataset """kinships""" +1018 93 model """unstructuredmodel""" +1018 93 loss """marginranking""" +1018 93 regularizer """no""" +1018 93 optimizer """adadelta""" +1018 93 training_loop """owa""" +1018 93 negative_sampler """basic""" +1018 93 evaluator """rankbased""" +1018 94 dataset """kinships""" +1018 94 model """unstructuredmodel""" +1018 94 loss """marginranking""" +1018 94 regularizer """no""" +1018 94 optimizer """adadelta""" +1018 94 training_loop """owa""" +1018 94 negative_sampler """basic""" +1018 94 evaluator """rankbased""" +1018 95 dataset """kinships""" +1018 95 model """unstructuredmodel""" +1018 95 loss """marginranking""" +1018 95 regularizer """no""" +1018 95 optimizer """adadelta""" +1018 95 training_loop """owa""" +1018 95 negative_sampler """basic""" +1018 95 evaluator """rankbased""" +1018 96 dataset """kinships""" +1018 96 model """unstructuredmodel""" +1018 96 loss """marginranking""" +1018 96 regularizer """no""" +1018 96 optimizer """adadelta""" +1018 96 training_loop """owa""" +1018 96 negative_sampler """basic""" +1018 96 evaluator """rankbased""" +1018 97 dataset """kinships""" +1018 97 model """unstructuredmodel""" +1018 97 loss """marginranking""" +1018 97 regularizer """no""" +1018 97 optimizer """adadelta""" +1018 97 training_loop """owa""" +1018 97 negative_sampler """basic""" +1018 97 evaluator """rankbased""" +1018 98 dataset """kinships""" +1018 98 model """unstructuredmodel""" +1018 98 loss """marginranking""" +1018 98 regularizer """no""" +1018 98 optimizer """adadelta""" +1018 98 training_loop """owa""" +1018 98 negative_sampler """basic""" +1018 98 evaluator """rankbased""" +1018 99 dataset """kinships""" +1018 99 model """unstructuredmodel""" +1018 99 loss """marginranking""" +1018 99 regularizer """no""" +1018 99 optimizer """adadelta""" +1018 99 training_loop """owa""" +1018 99 negative_sampler """basic""" +1018 99 evaluator """rankbased""" +1018 100 dataset """kinships""" +1018 100 model """unstructuredmodel""" +1018 100 loss """marginranking""" +1018 100 regularizer """no""" +1018 100 optimizer """adadelta""" +1018 100 training_loop """owa""" +1018 100 negative_sampler """basic""" +1018 100 evaluator """rankbased""" +1019 1 model.embedding_dim 1.0 +1019 1 model.scoring_fct_norm 2.0 +1019 1 loss.margin 9.245782378810917 +1019 1 negative_sampler.num_negs_per_pos 39.0 +1019 1 training.batch_size 0.0 +1019 2 model.embedding_dim 2.0 +1019 2 model.scoring_fct_norm 1.0 +1019 2 loss.margin 5.109493431064738 +1019 2 negative_sampler.num_negs_per_pos 14.0 +1019 2 training.batch_size 1.0 +1019 3 model.embedding_dim 2.0 +1019 3 model.scoring_fct_norm 1.0 +1019 3 loss.margin 5.643653922166341 +1019 3 negative_sampler.num_negs_per_pos 49.0 +1019 3 training.batch_size 0.0 +1019 4 model.embedding_dim 2.0 +1019 4 model.scoring_fct_norm 1.0 +1019 4 loss.margin 2.0487860248456755 +1019 4 negative_sampler.num_negs_per_pos 90.0 +1019 4 training.batch_size 2.0 +1019 5 model.embedding_dim 0.0 +1019 5 model.scoring_fct_norm 1.0 +1019 5 loss.margin 7.729231508719995 +1019 5 negative_sampler.num_negs_per_pos 7.0 +1019 5 training.batch_size 0.0 +1019 6 model.embedding_dim 1.0 +1019 6 model.scoring_fct_norm 2.0 +1019 6 loss.margin 7.745280876988503 +1019 6 negative_sampler.num_negs_per_pos 91.0 +1019 6 training.batch_size 0.0 +1019 7 model.embedding_dim 0.0 +1019 7 model.scoring_fct_norm 1.0 +1019 7 loss.margin 4.814685503372325 +1019 7 negative_sampler.num_negs_per_pos 7.0 +1019 7 training.batch_size 0.0 +1019 8 model.embedding_dim 1.0 +1019 8 model.scoring_fct_norm 2.0 +1019 8 loss.margin 6.656916611556158 +1019 8 negative_sampler.num_negs_per_pos 81.0 +1019 8 training.batch_size 2.0 +1019 9 model.embedding_dim 1.0 +1019 9 model.scoring_fct_norm 1.0 +1019 9 loss.margin 6.002521890439202 +1019 9 negative_sampler.num_negs_per_pos 27.0 +1019 9 training.batch_size 1.0 +1019 10 model.embedding_dim 1.0 +1019 10 model.scoring_fct_norm 2.0 +1019 10 loss.margin 1.1885175526291352 +1019 10 negative_sampler.num_negs_per_pos 23.0 +1019 10 training.batch_size 1.0 +1019 11 model.embedding_dim 1.0 +1019 11 model.scoring_fct_norm 1.0 +1019 11 loss.margin 7.143938361859379 +1019 11 negative_sampler.num_negs_per_pos 64.0 +1019 11 training.batch_size 0.0 +1019 12 model.embedding_dim 0.0 +1019 12 model.scoring_fct_norm 1.0 +1019 12 loss.margin 5.941442114573088 +1019 12 negative_sampler.num_negs_per_pos 71.0 +1019 12 training.batch_size 1.0 +1019 13 model.embedding_dim 0.0 +1019 13 model.scoring_fct_norm 1.0 +1019 13 loss.margin 6.689611707476349 +1019 13 negative_sampler.num_negs_per_pos 21.0 +1019 13 training.batch_size 0.0 +1019 14 model.embedding_dim 0.0 +1019 14 model.scoring_fct_norm 2.0 +1019 14 loss.margin 4.045652827839568 +1019 14 negative_sampler.num_negs_per_pos 40.0 +1019 14 training.batch_size 1.0 +1019 15 model.embedding_dim 1.0 +1019 15 model.scoring_fct_norm 2.0 +1019 15 loss.margin 1.5307963418259383 +1019 15 negative_sampler.num_negs_per_pos 39.0 +1019 15 training.batch_size 1.0 +1019 16 model.embedding_dim 0.0 +1019 16 model.scoring_fct_norm 2.0 +1019 16 loss.margin 6.6016461072634 +1019 16 negative_sampler.num_negs_per_pos 81.0 +1019 16 training.batch_size 0.0 +1019 17 model.embedding_dim 2.0 +1019 17 model.scoring_fct_norm 2.0 +1019 17 loss.margin 1.473125265311067 +1019 17 negative_sampler.num_negs_per_pos 53.0 +1019 17 training.batch_size 0.0 +1019 18 model.embedding_dim 1.0 +1019 18 model.scoring_fct_norm 1.0 +1019 18 loss.margin 9.38318625321353 +1019 18 negative_sampler.num_negs_per_pos 5.0 +1019 18 training.batch_size 0.0 +1019 19 model.embedding_dim 2.0 +1019 19 model.scoring_fct_norm 1.0 +1019 19 loss.margin 1.305170232644095 +1019 19 negative_sampler.num_negs_per_pos 86.0 +1019 19 training.batch_size 2.0 +1019 20 model.embedding_dim 2.0 +1019 20 model.scoring_fct_norm 2.0 +1019 20 loss.margin 9.937239181345843 +1019 20 negative_sampler.num_negs_per_pos 54.0 +1019 20 training.batch_size 2.0 +1019 21 model.embedding_dim 0.0 +1019 21 model.scoring_fct_norm 1.0 +1019 21 loss.margin 7.995195803106898 +1019 21 negative_sampler.num_negs_per_pos 30.0 +1019 21 training.batch_size 0.0 +1019 22 model.embedding_dim 1.0 +1019 22 model.scoring_fct_norm 2.0 +1019 22 loss.margin 8.121066765029296 +1019 22 negative_sampler.num_negs_per_pos 71.0 +1019 22 training.batch_size 2.0 +1019 23 model.embedding_dim 1.0 +1019 23 model.scoring_fct_norm 2.0 +1019 23 loss.margin 9.072214176405813 +1019 23 negative_sampler.num_negs_per_pos 70.0 +1019 23 training.batch_size 2.0 +1019 24 model.embedding_dim 1.0 +1019 24 model.scoring_fct_norm 1.0 +1019 24 loss.margin 5.946153201178649 +1019 24 negative_sampler.num_negs_per_pos 81.0 +1019 24 training.batch_size 2.0 +1019 25 model.embedding_dim 1.0 +1019 25 model.scoring_fct_norm 1.0 +1019 25 loss.margin 2.921939927077278 +1019 25 negative_sampler.num_negs_per_pos 53.0 +1019 25 training.batch_size 0.0 +1019 26 model.embedding_dim 2.0 +1019 26 model.scoring_fct_norm 1.0 +1019 26 loss.margin 9.703697312837594 +1019 26 negative_sampler.num_negs_per_pos 42.0 +1019 26 training.batch_size 2.0 +1019 27 model.embedding_dim 2.0 +1019 27 model.scoring_fct_norm 1.0 +1019 27 loss.margin 1.8248708233957784 +1019 27 negative_sampler.num_negs_per_pos 87.0 +1019 27 training.batch_size 1.0 +1019 28 model.embedding_dim 2.0 +1019 28 model.scoring_fct_norm 1.0 +1019 28 loss.margin 3.3946702278552006 +1019 28 negative_sampler.num_negs_per_pos 92.0 +1019 28 training.batch_size 0.0 +1019 29 model.embedding_dim 2.0 +1019 29 model.scoring_fct_norm 2.0 +1019 29 loss.margin 9.18066541138681 +1019 29 negative_sampler.num_negs_per_pos 73.0 +1019 29 training.batch_size 0.0 +1019 30 model.embedding_dim 2.0 +1019 30 model.scoring_fct_norm 1.0 +1019 30 loss.margin 8.74827668966609 +1019 30 negative_sampler.num_negs_per_pos 34.0 +1019 30 training.batch_size 1.0 +1019 31 model.embedding_dim 0.0 +1019 31 model.scoring_fct_norm 2.0 +1019 31 loss.margin 4.452157029872536 +1019 31 negative_sampler.num_negs_per_pos 12.0 +1019 31 training.batch_size 1.0 +1019 32 model.embedding_dim 2.0 +1019 32 model.scoring_fct_norm 2.0 +1019 32 loss.margin 4.4491714210159365 +1019 32 negative_sampler.num_negs_per_pos 50.0 +1019 32 training.batch_size 0.0 +1019 33 model.embedding_dim 1.0 +1019 33 model.scoring_fct_norm 2.0 +1019 33 loss.margin 9.034053643609907 +1019 33 negative_sampler.num_negs_per_pos 66.0 +1019 33 training.batch_size 2.0 +1019 34 model.embedding_dim 0.0 +1019 34 model.scoring_fct_norm 2.0 +1019 34 loss.margin 5.144072023604368 +1019 34 negative_sampler.num_negs_per_pos 1.0 +1019 34 training.batch_size 1.0 +1019 35 model.embedding_dim 2.0 +1019 35 model.scoring_fct_norm 1.0 +1019 35 loss.margin 9.895811669655407 +1019 35 negative_sampler.num_negs_per_pos 35.0 +1019 35 training.batch_size 1.0 +1019 36 model.embedding_dim 0.0 +1019 36 model.scoring_fct_norm 2.0 +1019 36 loss.margin 1.506721245032175 +1019 36 negative_sampler.num_negs_per_pos 57.0 +1019 36 training.batch_size 0.0 +1019 37 model.embedding_dim 1.0 +1019 37 model.scoring_fct_norm 2.0 +1019 37 loss.margin 6.473816289804779 +1019 37 negative_sampler.num_negs_per_pos 47.0 +1019 37 training.batch_size 1.0 +1019 38 model.embedding_dim 2.0 +1019 38 model.scoring_fct_norm 2.0 +1019 38 loss.margin 2.9496823669312575 +1019 38 negative_sampler.num_negs_per_pos 71.0 +1019 38 training.batch_size 1.0 +1019 39 model.embedding_dim 0.0 +1019 39 model.scoring_fct_norm 2.0 +1019 39 loss.margin 5.843592952625308 +1019 39 negative_sampler.num_negs_per_pos 15.0 +1019 39 training.batch_size 1.0 +1019 40 model.embedding_dim 0.0 +1019 40 model.scoring_fct_norm 1.0 +1019 40 loss.margin 8.179470908935087 +1019 40 negative_sampler.num_negs_per_pos 2.0 +1019 40 training.batch_size 1.0 +1019 41 model.embedding_dim 0.0 +1019 41 model.scoring_fct_norm 2.0 +1019 41 loss.margin 7.266491916310681 +1019 41 negative_sampler.num_negs_per_pos 40.0 +1019 41 training.batch_size 2.0 +1019 42 model.embedding_dim 0.0 +1019 42 model.scoring_fct_norm 2.0 +1019 42 loss.margin 3.410108349911246 +1019 42 negative_sampler.num_negs_per_pos 48.0 +1019 42 training.batch_size 2.0 +1019 43 model.embedding_dim 2.0 +1019 43 model.scoring_fct_norm 2.0 +1019 43 loss.margin 7.404148270291762 +1019 43 negative_sampler.num_negs_per_pos 87.0 +1019 43 training.batch_size 0.0 +1019 44 model.embedding_dim 2.0 +1019 44 model.scoring_fct_norm 1.0 +1019 44 loss.margin 7.003357843284012 +1019 44 negative_sampler.num_negs_per_pos 23.0 +1019 44 training.batch_size 2.0 +1019 45 model.embedding_dim 1.0 +1019 45 model.scoring_fct_norm 2.0 +1019 45 loss.margin 6.177732255518018 +1019 45 negative_sampler.num_negs_per_pos 41.0 +1019 45 training.batch_size 0.0 +1019 46 model.embedding_dim 2.0 +1019 46 model.scoring_fct_norm 2.0 +1019 46 loss.margin 6.1599277235817365 +1019 46 negative_sampler.num_negs_per_pos 21.0 +1019 46 training.batch_size 0.0 +1019 47 model.embedding_dim 0.0 +1019 47 model.scoring_fct_norm 2.0 +1019 47 loss.margin 1.698708400540998 +1019 47 negative_sampler.num_negs_per_pos 87.0 +1019 47 training.batch_size 2.0 +1019 48 model.embedding_dim 2.0 +1019 48 model.scoring_fct_norm 1.0 +1019 48 loss.margin 8.231172094705009 +1019 48 negative_sampler.num_negs_per_pos 80.0 +1019 48 training.batch_size 0.0 +1019 49 model.embedding_dim 2.0 +1019 49 model.scoring_fct_norm 1.0 +1019 49 loss.margin 9.913898537340074 +1019 49 negative_sampler.num_negs_per_pos 76.0 +1019 49 training.batch_size 2.0 +1019 50 model.embedding_dim 2.0 +1019 50 model.scoring_fct_norm 1.0 +1019 50 loss.margin 7.697853151034264 +1019 50 negative_sampler.num_negs_per_pos 55.0 +1019 50 training.batch_size 1.0 +1019 51 model.embedding_dim 1.0 +1019 51 model.scoring_fct_norm 2.0 +1019 51 loss.margin 4.17466151430968 +1019 51 negative_sampler.num_negs_per_pos 45.0 +1019 51 training.batch_size 2.0 +1019 52 model.embedding_dim 1.0 +1019 52 model.scoring_fct_norm 1.0 +1019 52 loss.margin 2.04081053283552 +1019 52 negative_sampler.num_negs_per_pos 67.0 +1019 52 training.batch_size 2.0 +1019 53 model.embedding_dim 1.0 +1019 53 model.scoring_fct_norm 1.0 +1019 53 loss.margin 3.1738670977755397 +1019 53 negative_sampler.num_negs_per_pos 79.0 +1019 53 training.batch_size 2.0 +1019 54 model.embedding_dim 2.0 +1019 54 model.scoring_fct_norm 2.0 +1019 54 loss.margin 9.965046366214713 +1019 54 negative_sampler.num_negs_per_pos 1.0 +1019 54 training.batch_size 0.0 +1019 55 model.embedding_dim 0.0 +1019 55 model.scoring_fct_norm 1.0 +1019 55 loss.margin 8.109911442200424 +1019 55 negative_sampler.num_negs_per_pos 19.0 +1019 55 training.batch_size 1.0 +1019 56 model.embedding_dim 1.0 +1019 56 model.scoring_fct_norm 2.0 +1019 56 loss.margin 8.58970449669216 +1019 56 negative_sampler.num_negs_per_pos 87.0 +1019 56 training.batch_size 0.0 +1019 57 model.embedding_dim 2.0 +1019 57 model.scoring_fct_norm 2.0 +1019 57 loss.margin 5.456214203068054 +1019 57 negative_sampler.num_negs_per_pos 20.0 +1019 57 training.batch_size 2.0 +1019 58 model.embedding_dim 1.0 +1019 58 model.scoring_fct_norm 1.0 +1019 58 loss.margin 9.752145787456652 +1019 58 negative_sampler.num_negs_per_pos 93.0 +1019 58 training.batch_size 2.0 +1019 59 model.embedding_dim 1.0 +1019 59 model.scoring_fct_norm 1.0 +1019 59 loss.margin 6.722353619723099 +1019 59 negative_sampler.num_negs_per_pos 35.0 +1019 59 training.batch_size 1.0 +1019 60 model.embedding_dim 2.0 +1019 60 model.scoring_fct_norm 2.0 +1019 60 loss.margin 9.983498997045974 +1019 60 negative_sampler.num_negs_per_pos 29.0 +1019 60 training.batch_size 1.0 +1019 61 model.embedding_dim 2.0 +1019 61 model.scoring_fct_norm 1.0 +1019 61 loss.margin 7.200939952671558 +1019 61 negative_sampler.num_negs_per_pos 8.0 +1019 61 training.batch_size 1.0 +1019 62 model.embedding_dim 1.0 +1019 62 model.scoring_fct_norm 1.0 +1019 62 loss.margin 6.055700397350698 +1019 62 negative_sampler.num_negs_per_pos 0.0 +1019 62 training.batch_size 1.0 +1019 63 model.embedding_dim 1.0 +1019 63 model.scoring_fct_norm 2.0 +1019 63 loss.margin 4.579060768034712 +1019 63 negative_sampler.num_negs_per_pos 76.0 +1019 63 training.batch_size 2.0 +1019 64 model.embedding_dim 1.0 +1019 64 model.scoring_fct_norm 2.0 +1019 64 loss.margin 5.022219872296002 +1019 64 negative_sampler.num_negs_per_pos 74.0 +1019 64 training.batch_size 2.0 +1019 65 model.embedding_dim 0.0 +1019 65 model.scoring_fct_norm 2.0 +1019 65 loss.margin 4.836422343105266 +1019 65 negative_sampler.num_negs_per_pos 0.0 +1019 65 training.batch_size 0.0 +1019 66 model.embedding_dim 1.0 +1019 66 model.scoring_fct_norm 1.0 +1019 66 loss.margin 1.602450402333408 +1019 66 negative_sampler.num_negs_per_pos 13.0 +1019 66 training.batch_size 1.0 +1019 67 model.embedding_dim 1.0 +1019 67 model.scoring_fct_norm 1.0 +1019 67 loss.margin 7.909657489487647 +1019 67 negative_sampler.num_negs_per_pos 47.0 +1019 67 training.batch_size 0.0 +1019 68 model.embedding_dim 0.0 +1019 68 model.scoring_fct_norm 2.0 +1019 68 loss.margin 0.8371425922898571 +1019 68 negative_sampler.num_negs_per_pos 80.0 +1019 68 training.batch_size 1.0 +1019 69 model.embedding_dim 1.0 +1019 69 model.scoring_fct_norm 1.0 +1019 69 loss.margin 2.901021904981384 +1019 69 negative_sampler.num_negs_per_pos 59.0 +1019 69 training.batch_size 1.0 +1019 70 model.embedding_dim 1.0 +1019 70 model.scoring_fct_norm 1.0 +1019 70 loss.margin 4.843760822160866 +1019 70 negative_sampler.num_negs_per_pos 14.0 +1019 70 training.batch_size 0.0 +1019 71 model.embedding_dim 2.0 +1019 71 model.scoring_fct_norm 1.0 +1019 71 loss.margin 2.8695490241423185 +1019 71 negative_sampler.num_negs_per_pos 54.0 +1019 71 training.batch_size 1.0 +1019 72 model.embedding_dim 1.0 +1019 72 model.scoring_fct_norm 1.0 +1019 72 loss.margin 5.189833728909472 +1019 72 negative_sampler.num_negs_per_pos 22.0 +1019 72 training.batch_size 1.0 +1019 73 model.embedding_dim 1.0 +1019 73 model.scoring_fct_norm 1.0 +1019 73 loss.margin 0.9469168194672153 +1019 73 negative_sampler.num_negs_per_pos 2.0 +1019 73 training.batch_size 1.0 +1019 74 model.embedding_dim 0.0 +1019 74 model.scoring_fct_norm 1.0 +1019 74 loss.margin 4.97185941347704 +1019 74 negative_sampler.num_negs_per_pos 19.0 +1019 74 training.batch_size 2.0 +1019 75 model.embedding_dim 2.0 +1019 75 model.scoring_fct_norm 2.0 +1019 75 loss.margin 5.08707222003167 +1019 75 negative_sampler.num_negs_per_pos 26.0 +1019 75 training.batch_size 0.0 +1019 76 model.embedding_dim 2.0 +1019 76 model.scoring_fct_norm 1.0 +1019 76 loss.margin 4.334006118224291 +1019 76 negative_sampler.num_negs_per_pos 86.0 +1019 76 training.batch_size 2.0 +1019 77 model.embedding_dim 2.0 +1019 77 model.scoring_fct_norm 1.0 +1019 77 loss.margin 0.9327979647392279 +1019 77 negative_sampler.num_negs_per_pos 29.0 +1019 77 training.batch_size 1.0 +1019 78 model.embedding_dim 0.0 +1019 78 model.scoring_fct_norm 2.0 +1019 78 loss.margin 7.073352825489729 +1019 78 negative_sampler.num_negs_per_pos 71.0 +1019 78 training.batch_size 2.0 +1019 79 model.embedding_dim 1.0 +1019 79 model.scoring_fct_norm 1.0 +1019 79 loss.margin 7.230286212842681 +1019 79 negative_sampler.num_negs_per_pos 26.0 +1019 79 training.batch_size 0.0 +1019 80 model.embedding_dim 0.0 +1019 80 model.scoring_fct_norm 1.0 +1019 80 loss.margin 6.534457343823193 +1019 80 negative_sampler.num_negs_per_pos 4.0 +1019 80 training.batch_size 1.0 +1019 81 model.embedding_dim 0.0 +1019 81 model.scoring_fct_norm 1.0 +1019 81 loss.margin 1.7935817609579245 +1019 81 negative_sampler.num_negs_per_pos 41.0 +1019 81 training.batch_size 1.0 +1019 82 model.embedding_dim 1.0 +1019 82 model.scoring_fct_norm 2.0 +1019 82 loss.margin 3.6457439655562314 +1019 82 negative_sampler.num_negs_per_pos 92.0 +1019 82 training.batch_size 0.0 +1019 83 model.embedding_dim 2.0 +1019 83 model.scoring_fct_norm 1.0 +1019 83 loss.margin 8.003562749437195 +1019 83 negative_sampler.num_negs_per_pos 43.0 +1019 83 training.batch_size 2.0 +1019 84 model.embedding_dim 0.0 +1019 84 model.scoring_fct_norm 2.0 +1019 84 loss.margin 7.426885466498024 +1019 84 negative_sampler.num_negs_per_pos 47.0 +1019 84 training.batch_size 2.0 +1019 85 model.embedding_dim 1.0 +1019 85 model.scoring_fct_norm 2.0 +1019 85 loss.margin 6.442445356508821 +1019 85 negative_sampler.num_negs_per_pos 30.0 +1019 85 training.batch_size 1.0 +1019 86 model.embedding_dim 2.0 +1019 86 model.scoring_fct_norm 1.0 +1019 86 loss.margin 6.778222847584129 +1019 86 negative_sampler.num_negs_per_pos 12.0 +1019 86 training.batch_size 0.0 +1019 87 model.embedding_dim 1.0 +1019 87 model.scoring_fct_norm 2.0 +1019 87 loss.margin 4.959939424460383 +1019 87 negative_sampler.num_negs_per_pos 75.0 +1019 87 training.batch_size 1.0 +1019 88 model.embedding_dim 2.0 +1019 88 model.scoring_fct_norm 2.0 +1019 88 loss.margin 9.907054957902654 +1019 88 negative_sampler.num_negs_per_pos 23.0 +1019 88 training.batch_size 2.0 +1019 89 model.embedding_dim 1.0 +1019 89 model.scoring_fct_norm 1.0 +1019 89 loss.margin 5.511814544220041 +1019 89 negative_sampler.num_negs_per_pos 85.0 +1019 89 training.batch_size 0.0 +1019 90 model.embedding_dim 1.0 +1019 90 model.scoring_fct_norm 2.0 +1019 90 loss.margin 8.375208943074213 +1019 90 negative_sampler.num_negs_per_pos 0.0 +1019 90 training.batch_size 2.0 +1019 91 model.embedding_dim 1.0 +1019 91 model.scoring_fct_norm 2.0 +1019 91 loss.margin 2.671934973865652 +1019 91 negative_sampler.num_negs_per_pos 88.0 +1019 91 training.batch_size 1.0 +1019 92 model.embedding_dim 2.0 +1019 92 model.scoring_fct_norm 1.0 +1019 92 loss.margin 2.021911607213813 +1019 92 negative_sampler.num_negs_per_pos 44.0 +1019 92 training.batch_size 1.0 +1019 93 model.embedding_dim 1.0 +1019 93 model.scoring_fct_norm 1.0 +1019 93 loss.margin 2.0850054050241584 +1019 93 negative_sampler.num_negs_per_pos 45.0 +1019 93 training.batch_size 2.0 +1019 94 model.embedding_dim 1.0 +1019 94 model.scoring_fct_norm 1.0 +1019 94 loss.margin 1.188300251134307 +1019 94 negative_sampler.num_negs_per_pos 13.0 +1019 94 training.batch_size 2.0 +1019 95 model.embedding_dim 0.0 +1019 95 model.scoring_fct_norm 2.0 +1019 95 loss.margin 7.609770712443388 +1019 95 negative_sampler.num_negs_per_pos 28.0 +1019 95 training.batch_size 2.0 +1019 96 model.embedding_dim 2.0 +1019 96 model.scoring_fct_norm 2.0 +1019 96 loss.margin 4.473887274571007 +1019 96 negative_sampler.num_negs_per_pos 89.0 +1019 96 training.batch_size 0.0 +1019 97 model.embedding_dim 2.0 +1019 97 model.scoring_fct_norm 1.0 +1019 97 loss.margin 4.1789198569820165 +1019 97 negative_sampler.num_negs_per_pos 58.0 +1019 97 training.batch_size 2.0 +1019 98 model.embedding_dim 0.0 +1019 98 model.scoring_fct_norm 1.0 +1019 98 loss.margin 1.1634013334868698 +1019 98 negative_sampler.num_negs_per_pos 12.0 +1019 98 training.batch_size 0.0 +1019 99 model.embedding_dim 0.0 +1019 99 model.scoring_fct_norm 1.0 +1019 99 loss.margin 6.855815694212762 +1019 99 negative_sampler.num_negs_per_pos 97.0 +1019 99 training.batch_size 1.0 +1019 100 model.embedding_dim 2.0 +1019 100 model.scoring_fct_norm 1.0 +1019 100 loss.margin 3.5687136676773745 +1019 100 negative_sampler.num_negs_per_pos 29.0 +1019 100 training.batch_size 2.0 +1019 1 dataset """kinships""" +1019 1 model """unstructuredmodel""" +1019 1 loss """marginranking""" +1019 1 regularizer """no""" +1019 1 optimizer """adadelta""" +1019 1 training_loop """owa""" +1019 1 negative_sampler """basic""" +1019 1 evaluator """rankbased""" +1019 2 dataset """kinships""" +1019 2 model """unstructuredmodel""" +1019 2 loss """marginranking""" +1019 2 regularizer """no""" +1019 2 optimizer """adadelta""" +1019 2 training_loop """owa""" +1019 2 negative_sampler """basic""" +1019 2 evaluator """rankbased""" +1019 3 dataset """kinships""" +1019 3 model """unstructuredmodel""" +1019 3 loss """marginranking""" +1019 3 regularizer """no""" +1019 3 optimizer """adadelta""" +1019 3 training_loop """owa""" +1019 3 negative_sampler """basic""" +1019 3 evaluator """rankbased""" +1019 4 dataset """kinships""" +1019 4 model """unstructuredmodel""" +1019 4 loss """marginranking""" +1019 4 regularizer """no""" +1019 4 optimizer """adadelta""" +1019 4 training_loop """owa""" +1019 4 negative_sampler """basic""" +1019 4 evaluator """rankbased""" +1019 5 dataset """kinships""" +1019 5 model """unstructuredmodel""" +1019 5 loss """marginranking""" +1019 5 regularizer """no""" +1019 5 optimizer """adadelta""" +1019 5 training_loop """owa""" +1019 5 negative_sampler """basic""" +1019 5 evaluator """rankbased""" +1019 6 dataset """kinships""" +1019 6 model """unstructuredmodel""" +1019 6 loss """marginranking""" +1019 6 regularizer """no""" +1019 6 optimizer """adadelta""" +1019 6 training_loop """owa""" +1019 6 negative_sampler """basic""" +1019 6 evaluator """rankbased""" +1019 7 dataset """kinships""" +1019 7 model """unstructuredmodel""" +1019 7 loss """marginranking""" +1019 7 regularizer """no""" +1019 7 optimizer """adadelta""" +1019 7 training_loop """owa""" +1019 7 negative_sampler """basic""" +1019 7 evaluator """rankbased""" +1019 8 dataset """kinships""" +1019 8 model """unstructuredmodel""" +1019 8 loss """marginranking""" +1019 8 regularizer """no""" +1019 8 optimizer """adadelta""" +1019 8 training_loop """owa""" +1019 8 negative_sampler """basic""" +1019 8 evaluator """rankbased""" +1019 9 dataset """kinships""" +1019 9 model """unstructuredmodel""" +1019 9 loss """marginranking""" +1019 9 regularizer """no""" +1019 9 optimizer """adadelta""" +1019 9 training_loop """owa""" +1019 9 negative_sampler """basic""" +1019 9 evaluator """rankbased""" +1019 10 dataset """kinships""" +1019 10 model """unstructuredmodel""" +1019 10 loss """marginranking""" +1019 10 regularizer """no""" +1019 10 optimizer """adadelta""" +1019 10 training_loop """owa""" +1019 10 negative_sampler """basic""" +1019 10 evaluator """rankbased""" +1019 11 dataset """kinships""" +1019 11 model """unstructuredmodel""" +1019 11 loss """marginranking""" +1019 11 regularizer """no""" +1019 11 optimizer """adadelta""" +1019 11 training_loop """owa""" +1019 11 negative_sampler """basic""" +1019 11 evaluator """rankbased""" +1019 12 dataset """kinships""" +1019 12 model """unstructuredmodel""" +1019 12 loss """marginranking""" +1019 12 regularizer """no""" +1019 12 optimizer """adadelta""" +1019 12 training_loop """owa""" +1019 12 negative_sampler """basic""" +1019 12 evaluator """rankbased""" +1019 13 dataset """kinships""" +1019 13 model """unstructuredmodel""" +1019 13 loss """marginranking""" +1019 13 regularizer """no""" +1019 13 optimizer """adadelta""" +1019 13 training_loop """owa""" +1019 13 negative_sampler """basic""" +1019 13 evaluator """rankbased""" +1019 14 dataset """kinships""" +1019 14 model """unstructuredmodel""" +1019 14 loss """marginranking""" +1019 14 regularizer """no""" +1019 14 optimizer """adadelta""" +1019 14 training_loop """owa""" +1019 14 negative_sampler """basic""" +1019 14 evaluator """rankbased""" +1019 15 dataset """kinships""" +1019 15 model """unstructuredmodel""" +1019 15 loss """marginranking""" +1019 15 regularizer """no""" +1019 15 optimizer """adadelta""" +1019 15 training_loop """owa""" +1019 15 negative_sampler """basic""" +1019 15 evaluator """rankbased""" +1019 16 dataset """kinships""" +1019 16 model """unstructuredmodel""" +1019 16 loss """marginranking""" +1019 16 regularizer """no""" +1019 16 optimizer """adadelta""" +1019 16 training_loop """owa""" +1019 16 negative_sampler """basic""" +1019 16 evaluator """rankbased""" +1019 17 dataset """kinships""" +1019 17 model """unstructuredmodel""" +1019 17 loss """marginranking""" +1019 17 regularizer """no""" +1019 17 optimizer """adadelta""" +1019 17 training_loop """owa""" +1019 17 negative_sampler """basic""" +1019 17 evaluator """rankbased""" +1019 18 dataset """kinships""" +1019 18 model """unstructuredmodel""" +1019 18 loss """marginranking""" +1019 18 regularizer """no""" +1019 18 optimizer """adadelta""" +1019 18 training_loop """owa""" +1019 18 negative_sampler """basic""" +1019 18 evaluator """rankbased""" +1019 19 dataset """kinships""" +1019 19 model """unstructuredmodel""" +1019 19 loss """marginranking""" +1019 19 regularizer """no""" +1019 19 optimizer """adadelta""" +1019 19 training_loop """owa""" +1019 19 negative_sampler """basic""" +1019 19 evaluator """rankbased""" +1019 20 dataset """kinships""" +1019 20 model """unstructuredmodel""" +1019 20 loss """marginranking""" +1019 20 regularizer """no""" +1019 20 optimizer """adadelta""" +1019 20 training_loop """owa""" +1019 20 negative_sampler """basic""" +1019 20 evaluator """rankbased""" +1019 21 dataset """kinships""" +1019 21 model """unstructuredmodel""" +1019 21 loss """marginranking""" +1019 21 regularizer """no""" +1019 21 optimizer """adadelta""" +1019 21 training_loop """owa""" +1019 21 negative_sampler """basic""" +1019 21 evaluator """rankbased""" +1019 22 dataset """kinships""" +1019 22 model """unstructuredmodel""" +1019 22 loss """marginranking""" +1019 22 regularizer """no""" +1019 22 optimizer """adadelta""" +1019 22 training_loop """owa""" +1019 22 negative_sampler """basic""" +1019 22 evaluator """rankbased""" +1019 23 dataset """kinships""" +1019 23 model """unstructuredmodel""" +1019 23 loss """marginranking""" +1019 23 regularizer """no""" +1019 23 optimizer """adadelta""" +1019 23 training_loop """owa""" +1019 23 negative_sampler """basic""" +1019 23 evaluator """rankbased""" +1019 24 dataset """kinships""" +1019 24 model """unstructuredmodel""" +1019 24 loss """marginranking""" +1019 24 regularizer """no""" +1019 24 optimizer """adadelta""" +1019 24 training_loop """owa""" +1019 24 negative_sampler """basic""" +1019 24 evaluator """rankbased""" +1019 25 dataset """kinships""" +1019 25 model """unstructuredmodel""" +1019 25 loss """marginranking""" +1019 25 regularizer """no""" +1019 25 optimizer """adadelta""" +1019 25 training_loop """owa""" +1019 25 negative_sampler """basic""" +1019 25 evaluator """rankbased""" +1019 26 dataset """kinships""" +1019 26 model """unstructuredmodel""" +1019 26 loss """marginranking""" +1019 26 regularizer """no""" +1019 26 optimizer """adadelta""" +1019 26 training_loop """owa""" +1019 26 negative_sampler """basic""" +1019 26 evaluator """rankbased""" +1019 27 dataset """kinships""" +1019 27 model """unstructuredmodel""" +1019 27 loss """marginranking""" +1019 27 regularizer """no""" +1019 27 optimizer """adadelta""" +1019 27 training_loop """owa""" +1019 27 negative_sampler """basic""" +1019 27 evaluator """rankbased""" +1019 28 dataset """kinships""" +1019 28 model """unstructuredmodel""" +1019 28 loss """marginranking""" +1019 28 regularizer """no""" +1019 28 optimizer """adadelta""" +1019 28 training_loop """owa""" +1019 28 negative_sampler """basic""" +1019 28 evaluator """rankbased""" +1019 29 dataset """kinships""" +1019 29 model """unstructuredmodel""" +1019 29 loss """marginranking""" +1019 29 regularizer """no""" +1019 29 optimizer """adadelta""" +1019 29 training_loop """owa""" +1019 29 negative_sampler """basic""" +1019 29 evaluator """rankbased""" +1019 30 dataset """kinships""" +1019 30 model """unstructuredmodel""" +1019 30 loss """marginranking""" +1019 30 regularizer """no""" +1019 30 optimizer """adadelta""" +1019 30 training_loop """owa""" +1019 30 negative_sampler """basic""" +1019 30 evaluator """rankbased""" +1019 31 dataset """kinships""" +1019 31 model """unstructuredmodel""" +1019 31 loss """marginranking""" +1019 31 regularizer """no""" +1019 31 optimizer """adadelta""" +1019 31 training_loop """owa""" +1019 31 negative_sampler """basic""" +1019 31 evaluator """rankbased""" +1019 32 dataset """kinships""" +1019 32 model """unstructuredmodel""" +1019 32 loss """marginranking""" +1019 32 regularizer """no""" +1019 32 optimizer """adadelta""" +1019 32 training_loop """owa""" +1019 32 negative_sampler """basic""" +1019 32 evaluator """rankbased""" +1019 33 dataset """kinships""" +1019 33 model """unstructuredmodel""" +1019 33 loss """marginranking""" +1019 33 regularizer """no""" +1019 33 optimizer """adadelta""" +1019 33 training_loop """owa""" +1019 33 negative_sampler """basic""" +1019 33 evaluator """rankbased""" +1019 34 dataset """kinships""" +1019 34 model """unstructuredmodel""" +1019 34 loss """marginranking""" +1019 34 regularizer """no""" +1019 34 optimizer """adadelta""" +1019 34 training_loop """owa""" +1019 34 negative_sampler """basic""" +1019 34 evaluator """rankbased""" +1019 35 dataset """kinships""" +1019 35 model """unstructuredmodel""" +1019 35 loss """marginranking""" +1019 35 regularizer """no""" +1019 35 optimizer """adadelta""" +1019 35 training_loop """owa""" +1019 35 negative_sampler """basic""" +1019 35 evaluator """rankbased""" +1019 36 dataset """kinships""" +1019 36 model """unstructuredmodel""" +1019 36 loss """marginranking""" +1019 36 regularizer """no""" +1019 36 optimizer """adadelta""" +1019 36 training_loop """owa""" +1019 36 negative_sampler """basic""" +1019 36 evaluator """rankbased""" +1019 37 dataset """kinships""" +1019 37 model """unstructuredmodel""" +1019 37 loss """marginranking""" +1019 37 regularizer """no""" +1019 37 optimizer """adadelta""" +1019 37 training_loop """owa""" +1019 37 negative_sampler """basic""" +1019 37 evaluator """rankbased""" +1019 38 dataset """kinships""" +1019 38 model """unstructuredmodel""" +1019 38 loss """marginranking""" +1019 38 regularizer """no""" +1019 38 optimizer """adadelta""" +1019 38 training_loop """owa""" +1019 38 negative_sampler """basic""" +1019 38 evaluator """rankbased""" +1019 39 dataset """kinships""" +1019 39 model """unstructuredmodel""" +1019 39 loss """marginranking""" +1019 39 regularizer """no""" +1019 39 optimizer """adadelta""" +1019 39 training_loop """owa""" +1019 39 negative_sampler """basic""" +1019 39 evaluator """rankbased""" +1019 40 dataset """kinships""" +1019 40 model """unstructuredmodel""" +1019 40 loss """marginranking""" +1019 40 regularizer """no""" +1019 40 optimizer """adadelta""" +1019 40 training_loop """owa""" +1019 40 negative_sampler """basic""" +1019 40 evaluator """rankbased""" +1019 41 dataset """kinships""" +1019 41 model """unstructuredmodel""" +1019 41 loss """marginranking""" +1019 41 regularizer """no""" +1019 41 optimizer """adadelta""" +1019 41 training_loop """owa""" +1019 41 negative_sampler """basic""" +1019 41 evaluator """rankbased""" +1019 42 dataset """kinships""" +1019 42 model """unstructuredmodel""" +1019 42 loss """marginranking""" +1019 42 regularizer """no""" +1019 42 optimizer """adadelta""" +1019 42 training_loop """owa""" +1019 42 negative_sampler """basic""" +1019 42 evaluator """rankbased""" +1019 43 dataset """kinships""" +1019 43 model """unstructuredmodel""" +1019 43 loss """marginranking""" +1019 43 regularizer """no""" +1019 43 optimizer """adadelta""" +1019 43 training_loop """owa""" +1019 43 negative_sampler """basic""" +1019 43 evaluator """rankbased""" +1019 44 dataset """kinships""" +1019 44 model """unstructuredmodel""" +1019 44 loss """marginranking""" +1019 44 regularizer """no""" +1019 44 optimizer """adadelta""" +1019 44 training_loop """owa""" +1019 44 negative_sampler """basic""" +1019 44 evaluator """rankbased""" +1019 45 dataset """kinships""" +1019 45 model """unstructuredmodel""" +1019 45 loss """marginranking""" +1019 45 regularizer """no""" +1019 45 optimizer """adadelta""" +1019 45 training_loop """owa""" +1019 45 negative_sampler """basic""" +1019 45 evaluator """rankbased""" +1019 46 dataset """kinships""" +1019 46 model """unstructuredmodel""" +1019 46 loss """marginranking""" +1019 46 regularizer """no""" +1019 46 optimizer """adadelta""" +1019 46 training_loop """owa""" +1019 46 negative_sampler """basic""" +1019 46 evaluator """rankbased""" +1019 47 dataset """kinships""" +1019 47 model """unstructuredmodel""" +1019 47 loss """marginranking""" +1019 47 regularizer """no""" +1019 47 optimizer """adadelta""" +1019 47 training_loop """owa""" +1019 47 negative_sampler """basic""" +1019 47 evaluator """rankbased""" +1019 48 dataset """kinships""" +1019 48 model """unstructuredmodel""" +1019 48 loss """marginranking""" +1019 48 regularizer """no""" +1019 48 optimizer """adadelta""" +1019 48 training_loop """owa""" +1019 48 negative_sampler """basic""" +1019 48 evaluator """rankbased""" +1019 49 dataset """kinships""" +1019 49 model """unstructuredmodel""" +1019 49 loss """marginranking""" +1019 49 regularizer """no""" +1019 49 optimizer """adadelta""" +1019 49 training_loop """owa""" +1019 49 negative_sampler """basic""" +1019 49 evaluator """rankbased""" +1019 50 dataset """kinships""" +1019 50 model """unstructuredmodel""" +1019 50 loss """marginranking""" +1019 50 regularizer """no""" +1019 50 optimizer """adadelta""" +1019 50 training_loop """owa""" +1019 50 negative_sampler """basic""" +1019 50 evaluator """rankbased""" +1019 51 dataset """kinships""" +1019 51 model """unstructuredmodel""" +1019 51 loss """marginranking""" +1019 51 regularizer """no""" +1019 51 optimizer """adadelta""" +1019 51 training_loop """owa""" +1019 51 negative_sampler """basic""" +1019 51 evaluator """rankbased""" +1019 52 dataset """kinships""" +1019 52 model """unstructuredmodel""" +1019 52 loss """marginranking""" +1019 52 regularizer """no""" +1019 52 optimizer """adadelta""" +1019 52 training_loop """owa""" +1019 52 negative_sampler """basic""" +1019 52 evaluator """rankbased""" +1019 53 dataset """kinships""" +1019 53 model """unstructuredmodel""" +1019 53 loss """marginranking""" +1019 53 regularizer """no""" +1019 53 optimizer """adadelta""" +1019 53 training_loop """owa""" +1019 53 negative_sampler """basic""" +1019 53 evaluator """rankbased""" +1019 54 dataset """kinships""" +1019 54 model """unstructuredmodel""" +1019 54 loss """marginranking""" +1019 54 regularizer """no""" +1019 54 optimizer """adadelta""" +1019 54 training_loop """owa""" +1019 54 negative_sampler """basic""" +1019 54 evaluator """rankbased""" +1019 55 dataset """kinships""" +1019 55 model """unstructuredmodel""" +1019 55 loss """marginranking""" +1019 55 regularizer """no""" +1019 55 optimizer """adadelta""" +1019 55 training_loop """owa""" +1019 55 negative_sampler """basic""" +1019 55 evaluator """rankbased""" +1019 56 dataset """kinships""" +1019 56 model """unstructuredmodel""" +1019 56 loss """marginranking""" +1019 56 regularizer """no""" +1019 56 optimizer """adadelta""" +1019 56 training_loop """owa""" +1019 56 negative_sampler """basic""" +1019 56 evaluator """rankbased""" +1019 57 dataset """kinships""" +1019 57 model """unstructuredmodel""" +1019 57 loss """marginranking""" +1019 57 regularizer """no""" +1019 57 optimizer """adadelta""" +1019 57 training_loop """owa""" +1019 57 negative_sampler """basic""" +1019 57 evaluator """rankbased""" +1019 58 dataset """kinships""" +1019 58 model """unstructuredmodel""" +1019 58 loss """marginranking""" +1019 58 regularizer """no""" +1019 58 optimizer """adadelta""" +1019 58 training_loop """owa""" +1019 58 negative_sampler """basic""" +1019 58 evaluator """rankbased""" +1019 59 dataset """kinships""" +1019 59 model """unstructuredmodel""" +1019 59 loss """marginranking""" +1019 59 regularizer """no""" +1019 59 optimizer """adadelta""" +1019 59 training_loop """owa""" +1019 59 negative_sampler """basic""" +1019 59 evaluator """rankbased""" +1019 60 dataset """kinships""" +1019 60 model """unstructuredmodel""" +1019 60 loss """marginranking""" +1019 60 regularizer """no""" +1019 60 optimizer """adadelta""" +1019 60 training_loop """owa""" +1019 60 negative_sampler """basic""" +1019 60 evaluator """rankbased""" +1019 61 dataset """kinships""" +1019 61 model """unstructuredmodel""" +1019 61 loss """marginranking""" +1019 61 regularizer """no""" +1019 61 optimizer """adadelta""" +1019 61 training_loop """owa""" +1019 61 negative_sampler """basic""" +1019 61 evaluator """rankbased""" +1019 62 dataset """kinships""" +1019 62 model """unstructuredmodel""" +1019 62 loss """marginranking""" +1019 62 regularizer """no""" +1019 62 optimizer """adadelta""" +1019 62 training_loop """owa""" +1019 62 negative_sampler """basic""" +1019 62 evaluator """rankbased""" +1019 63 dataset """kinships""" +1019 63 model """unstructuredmodel""" +1019 63 loss """marginranking""" +1019 63 regularizer """no""" +1019 63 optimizer """adadelta""" +1019 63 training_loop """owa""" +1019 63 negative_sampler """basic""" +1019 63 evaluator """rankbased""" +1019 64 dataset """kinships""" +1019 64 model """unstructuredmodel""" +1019 64 loss """marginranking""" +1019 64 regularizer """no""" +1019 64 optimizer """adadelta""" +1019 64 training_loop """owa""" +1019 64 negative_sampler """basic""" +1019 64 evaluator """rankbased""" +1019 65 dataset """kinships""" +1019 65 model """unstructuredmodel""" +1019 65 loss """marginranking""" +1019 65 regularizer """no""" +1019 65 optimizer """adadelta""" +1019 65 training_loop """owa""" +1019 65 negative_sampler """basic""" +1019 65 evaluator """rankbased""" +1019 66 dataset """kinships""" +1019 66 model """unstructuredmodel""" +1019 66 loss """marginranking""" +1019 66 regularizer """no""" +1019 66 optimizer """adadelta""" +1019 66 training_loop """owa""" +1019 66 negative_sampler """basic""" +1019 66 evaluator """rankbased""" +1019 67 dataset """kinships""" +1019 67 model """unstructuredmodel""" +1019 67 loss """marginranking""" +1019 67 regularizer """no""" +1019 67 optimizer """adadelta""" +1019 67 training_loop """owa""" +1019 67 negative_sampler """basic""" +1019 67 evaluator """rankbased""" +1019 68 dataset """kinships""" +1019 68 model """unstructuredmodel""" +1019 68 loss """marginranking""" +1019 68 regularizer """no""" +1019 68 optimizer """adadelta""" +1019 68 training_loop """owa""" +1019 68 negative_sampler """basic""" +1019 68 evaluator """rankbased""" +1019 69 dataset """kinships""" +1019 69 model """unstructuredmodel""" +1019 69 loss """marginranking""" +1019 69 regularizer """no""" +1019 69 optimizer """adadelta""" +1019 69 training_loop """owa""" +1019 69 negative_sampler """basic""" +1019 69 evaluator """rankbased""" +1019 70 dataset """kinships""" +1019 70 model """unstructuredmodel""" +1019 70 loss """marginranking""" +1019 70 regularizer """no""" +1019 70 optimizer """adadelta""" +1019 70 training_loop """owa""" +1019 70 negative_sampler """basic""" +1019 70 evaluator """rankbased""" +1019 71 dataset """kinships""" +1019 71 model """unstructuredmodel""" +1019 71 loss """marginranking""" +1019 71 regularizer """no""" +1019 71 optimizer """adadelta""" +1019 71 training_loop """owa""" +1019 71 negative_sampler """basic""" +1019 71 evaluator """rankbased""" +1019 72 dataset """kinships""" +1019 72 model """unstructuredmodel""" +1019 72 loss """marginranking""" +1019 72 regularizer """no""" +1019 72 optimizer """adadelta""" +1019 72 training_loop """owa""" +1019 72 negative_sampler """basic""" +1019 72 evaluator """rankbased""" +1019 73 dataset """kinships""" +1019 73 model """unstructuredmodel""" +1019 73 loss """marginranking""" +1019 73 regularizer """no""" +1019 73 optimizer """adadelta""" +1019 73 training_loop """owa""" +1019 73 negative_sampler """basic""" +1019 73 evaluator """rankbased""" +1019 74 dataset """kinships""" +1019 74 model """unstructuredmodel""" +1019 74 loss """marginranking""" +1019 74 regularizer """no""" +1019 74 optimizer """adadelta""" +1019 74 training_loop """owa""" +1019 74 negative_sampler """basic""" +1019 74 evaluator """rankbased""" +1019 75 dataset """kinships""" +1019 75 model """unstructuredmodel""" +1019 75 loss """marginranking""" +1019 75 regularizer """no""" +1019 75 optimizer """adadelta""" +1019 75 training_loop """owa""" +1019 75 negative_sampler """basic""" +1019 75 evaluator """rankbased""" +1019 76 dataset """kinships""" +1019 76 model """unstructuredmodel""" +1019 76 loss """marginranking""" +1019 76 regularizer """no""" +1019 76 optimizer """adadelta""" +1019 76 training_loop """owa""" +1019 76 negative_sampler """basic""" +1019 76 evaluator """rankbased""" +1019 77 dataset """kinships""" +1019 77 model """unstructuredmodel""" +1019 77 loss """marginranking""" +1019 77 regularizer """no""" +1019 77 optimizer """adadelta""" +1019 77 training_loop """owa""" +1019 77 negative_sampler """basic""" +1019 77 evaluator """rankbased""" +1019 78 dataset """kinships""" +1019 78 model """unstructuredmodel""" +1019 78 loss """marginranking""" +1019 78 regularizer """no""" +1019 78 optimizer """adadelta""" +1019 78 training_loop """owa""" +1019 78 negative_sampler """basic""" +1019 78 evaluator """rankbased""" +1019 79 dataset """kinships""" +1019 79 model """unstructuredmodel""" +1019 79 loss """marginranking""" +1019 79 regularizer """no""" +1019 79 optimizer """adadelta""" +1019 79 training_loop """owa""" +1019 79 negative_sampler """basic""" +1019 79 evaluator """rankbased""" +1019 80 dataset """kinships""" +1019 80 model """unstructuredmodel""" +1019 80 loss """marginranking""" +1019 80 regularizer """no""" +1019 80 optimizer """adadelta""" +1019 80 training_loop """owa""" +1019 80 negative_sampler """basic""" +1019 80 evaluator """rankbased""" +1019 81 dataset """kinships""" +1019 81 model """unstructuredmodel""" +1019 81 loss """marginranking""" +1019 81 regularizer """no""" +1019 81 optimizer """adadelta""" +1019 81 training_loop """owa""" +1019 81 negative_sampler """basic""" +1019 81 evaluator """rankbased""" +1019 82 dataset """kinships""" +1019 82 model """unstructuredmodel""" +1019 82 loss """marginranking""" +1019 82 regularizer """no""" +1019 82 optimizer """adadelta""" +1019 82 training_loop """owa""" +1019 82 negative_sampler """basic""" +1019 82 evaluator """rankbased""" +1019 83 dataset """kinships""" +1019 83 model """unstructuredmodel""" +1019 83 loss """marginranking""" +1019 83 regularizer """no""" +1019 83 optimizer """adadelta""" +1019 83 training_loop """owa""" +1019 83 negative_sampler """basic""" +1019 83 evaluator """rankbased""" +1019 84 dataset """kinships""" +1019 84 model """unstructuredmodel""" +1019 84 loss """marginranking""" +1019 84 regularizer """no""" +1019 84 optimizer """adadelta""" +1019 84 training_loop """owa""" +1019 84 negative_sampler """basic""" +1019 84 evaluator """rankbased""" +1019 85 dataset """kinships""" +1019 85 model """unstructuredmodel""" +1019 85 loss """marginranking""" +1019 85 regularizer """no""" +1019 85 optimizer """adadelta""" +1019 85 training_loop """owa""" +1019 85 negative_sampler """basic""" +1019 85 evaluator """rankbased""" +1019 86 dataset """kinships""" +1019 86 model """unstructuredmodel""" +1019 86 loss """marginranking""" +1019 86 regularizer """no""" +1019 86 optimizer """adadelta""" +1019 86 training_loop """owa""" +1019 86 negative_sampler """basic""" +1019 86 evaluator """rankbased""" +1019 87 dataset """kinships""" +1019 87 model """unstructuredmodel""" +1019 87 loss """marginranking""" +1019 87 regularizer """no""" +1019 87 optimizer """adadelta""" +1019 87 training_loop """owa""" +1019 87 negative_sampler """basic""" +1019 87 evaluator """rankbased""" +1019 88 dataset """kinships""" +1019 88 model """unstructuredmodel""" +1019 88 loss """marginranking""" +1019 88 regularizer """no""" +1019 88 optimizer """adadelta""" +1019 88 training_loop """owa""" +1019 88 negative_sampler """basic""" +1019 88 evaluator """rankbased""" +1019 89 dataset """kinships""" +1019 89 model """unstructuredmodel""" +1019 89 loss """marginranking""" +1019 89 regularizer """no""" +1019 89 optimizer """adadelta""" +1019 89 training_loop """owa""" +1019 89 negative_sampler """basic""" +1019 89 evaluator """rankbased""" +1019 90 dataset """kinships""" +1019 90 model """unstructuredmodel""" +1019 90 loss """marginranking""" +1019 90 regularizer """no""" +1019 90 optimizer """adadelta""" +1019 90 training_loop """owa""" +1019 90 negative_sampler """basic""" +1019 90 evaluator """rankbased""" +1019 91 dataset """kinships""" +1019 91 model """unstructuredmodel""" +1019 91 loss """marginranking""" +1019 91 regularizer """no""" +1019 91 optimizer """adadelta""" +1019 91 training_loop """owa""" +1019 91 negative_sampler """basic""" +1019 91 evaluator """rankbased""" +1019 92 dataset """kinships""" +1019 92 model """unstructuredmodel""" +1019 92 loss """marginranking""" +1019 92 regularizer """no""" +1019 92 optimizer """adadelta""" +1019 92 training_loop """owa""" +1019 92 negative_sampler """basic""" +1019 92 evaluator """rankbased""" +1019 93 dataset """kinships""" +1019 93 model """unstructuredmodel""" +1019 93 loss """marginranking""" +1019 93 regularizer """no""" +1019 93 optimizer """adadelta""" +1019 93 training_loop """owa""" +1019 93 negative_sampler """basic""" +1019 93 evaluator """rankbased""" +1019 94 dataset """kinships""" +1019 94 model """unstructuredmodel""" +1019 94 loss """marginranking""" +1019 94 regularizer """no""" +1019 94 optimizer """adadelta""" +1019 94 training_loop """owa""" +1019 94 negative_sampler """basic""" +1019 94 evaluator """rankbased""" +1019 95 dataset """kinships""" +1019 95 model """unstructuredmodel""" +1019 95 loss """marginranking""" +1019 95 regularizer """no""" +1019 95 optimizer """adadelta""" +1019 95 training_loop """owa""" +1019 95 negative_sampler """basic""" +1019 95 evaluator """rankbased""" +1019 96 dataset """kinships""" +1019 96 model """unstructuredmodel""" +1019 96 loss """marginranking""" +1019 96 regularizer """no""" +1019 96 optimizer """adadelta""" +1019 96 training_loop """owa""" +1019 96 negative_sampler """basic""" +1019 96 evaluator """rankbased""" +1019 97 dataset """kinships""" +1019 97 model """unstructuredmodel""" +1019 97 loss """marginranking""" +1019 97 regularizer """no""" +1019 97 optimizer """adadelta""" +1019 97 training_loop """owa""" +1019 97 negative_sampler """basic""" +1019 97 evaluator """rankbased""" +1019 98 dataset """kinships""" +1019 98 model """unstructuredmodel""" +1019 98 loss """marginranking""" +1019 98 regularizer """no""" +1019 98 optimizer """adadelta""" +1019 98 training_loop """owa""" +1019 98 negative_sampler """basic""" +1019 98 evaluator """rankbased""" +1019 99 dataset """kinships""" +1019 99 model """unstructuredmodel""" +1019 99 loss """marginranking""" +1019 99 regularizer """no""" +1019 99 optimizer """adadelta""" +1019 99 training_loop """owa""" +1019 99 negative_sampler """basic""" +1019 99 evaluator """rankbased""" +1019 100 dataset """kinships""" +1019 100 model """unstructuredmodel""" +1019 100 loss """marginranking""" +1019 100 regularizer """no""" +1019 100 optimizer """adadelta""" +1019 100 training_loop """owa""" +1019 100 negative_sampler """basic""" +1019 100 evaluator """rankbased""" +1020 1 model.embedding_dim 0.0 +1020 1 model.scoring_fct_norm 1.0 +1020 1 optimizer.lr 0.017201459783924823 +1020 1 training.batch_size 0.0 +1020 1 training.label_smoothing 0.0030459715342517502 +1020 2 model.embedding_dim 2.0 +1020 2 model.scoring_fct_norm 1.0 +1020 2 optimizer.lr 0.0016068061792123209 +1020 2 training.batch_size 2.0 +1020 2 training.label_smoothing 0.009850968872898037 +1020 3 model.embedding_dim 2.0 +1020 3 model.scoring_fct_norm 2.0 +1020 3 optimizer.lr 0.0600255116230396 +1020 3 training.batch_size 1.0 +1020 3 training.label_smoothing 0.003935394423097281 +1020 4 model.embedding_dim 0.0 +1020 4 model.scoring_fct_norm 2.0 +1020 4 optimizer.lr 0.019724448831440828 +1020 4 training.batch_size 2.0 +1020 4 training.label_smoothing 0.0022436821696885777 +1020 5 model.embedding_dim 2.0 +1020 5 model.scoring_fct_norm 2.0 +1020 5 optimizer.lr 0.001139375878315003 +1020 5 training.batch_size 1.0 +1020 5 training.label_smoothing 0.8075589260841313 +1020 6 model.embedding_dim 0.0 +1020 6 model.scoring_fct_norm 1.0 +1020 6 optimizer.lr 0.00212504739136541 +1020 6 training.batch_size 2.0 +1020 6 training.label_smoothing 0.05562445454209144 +1020 7 model.embedding_dim 0.0 +1020 7 model.scoring_fct_norm 1.0 +1020 7 optimizer.lr 0.029219042924364048 +1020 7 training.batch_size 2.0 +1020 7 training.label_smoothing 0.14558690755764495 +1020 8 model.embedding_dim 0.0 +1020 8 model.scoring_fct_norm 1.0 +1020 8 optimizer.lr 0.01944961424480479 +1020 8 training.batch_size 2.0 +1020 8 training.label_smoothing 0.09014187340745347 +1020 9 model.embedding_dim 1.0 +1020 9 model.scoring_fct_norm 2.0 +1020 9 optimizer.lr 0.0010575423203336533 +1020 9 training.batch_size 2.0 +1020 9 training.label_smoothing 0.6549965936988649 +1020 10 model.embedding_dim 0.0 +1020 10 model.scoring_fct_norm 1.0 +1020 10 optimizer.lr 0.0726440925896387 +1020 10 training.batch_size 1.0 +1020 10 training.label_smoothing 0.007392495669448527 +1020 11 model.embedding_dim 1.0 +1020 11 model.scoring_fct_norm 2.0 +1020 11 optimizer.lr 0.07125285354400028 +1020 11 training.batch_size 1.0 +1020 11 training.label_smoothing 0.753120243483761 +1020 12 model.embedding_dim 0.0 +1020 12 model.scoring_fct_norm 2.0 +1020 12 optimizer.lr 0.001969115747044389 +1020 12 training.batch_size 0.0 +1020 12 training.label_smoothing 0.0014406346485621412 +1020 13 model.embedding_dim 2.0 +1020 13 model.scoring_fct_norm 2.0 +1020 13 optimizer.lr 0.013224707668950335 +1020 13 training.batch_size 2.0 +1020 13 training.label_smoothing 0.001960171825742412 +1020 14 model.embedding_dim 2.0 +1020 14 model.scoring_fct_norm 1.0 +1020 14 optimizer.lr 0.027137001121601674 +1020 14 training.batch_size 0.0 +1020 14 training.label_smoothing 0.007441979660543032 +1020 15 model.embedding_dim 2.0 +1020 15 model.scoring_fct_norm 2.0 +1020 15 optimizer.lr 0.0032234778566991532 +1020 15 training.batch_size 2.0 +1020 15 training.label_smoothing 0.03648715833733988 +1020 16 model.embedding_dim 2.0 +1020 16 model.scoring_fct_norm 1.0 +1020 16 optimizer.lr 0.005352445357697727 +1020 16 training.batch_size 0.0 +1020 16 training.label_smoothing 0.0019712682122287697 +1020 17 model.embedding_dim 0.0 +1020 17 model.scoring_fct_norm 1.0 +1020 17 optimizer.lr 0.0014690728326266457 +1020 17 training.batch_size 0.0 +1020 17 training.label_smoothing 0.05026782030037603 +1020 18 model.embedding_dim 2.0 +1020 18 model.scoring_fct_norm 1.0 +1020 18 optimizer.lr 0.001579181794543177 +1020 18 training.batch_size 0.0 +1020 18 training.label_smoothing 0.005398508300319347 +1020 19 model.embedding_dim 2.0 +1020 19 model.scoring_fct_norm 2.0 +1020 19 optimizer.lr 0.01696866850809608 +1020 19 training.batch_size 2.0 +1020 19 training.label_smoothing 0.7058002813161316 +1020 20 model.embedding_dim 1.0 +1020 20 model.scoring_fct_norm 1.0 +1020 20 optimizer.lr 0.003525725633821904 +1020 20 training.batch_size 0.0 +1020 20 training.label_smoothing 0.2671659961333982 +1020 21 model.embedding_dim 0.0 +1020 21 model.scoring_fct_norm 2.0 +1020 21 optimizer.lr 0.05439989239396657 +1020 21 training.batch_size 2.0 +1020 21 training.label_smoothing 0.19512390196605753 +1020 22 model.embedding_dim 1.0 +1020 22 model.scoring_fct_norm 1.0 +1020 22 optimizer.lr 0.0010643664717045775 +1020 22 training.batch_size 2.0 +1020 22 training.label_smoothing 0.005613470125394456 +1020 23 model.embedding_dim 2.0 +1020 23 model.scoring_fct_norm 1.0 +1020 23 optimizer.lr 0.0014244970350453243 +1020 23 training.batch_size 2.0 +1020 23 training.label_smoothing 0.0015365623357396377 +1020 24 model.embedding_dim 2.0 +1020 24 model.scoring_fct_norm 1.0 +1020 24 optimizer.lr 0.03856335204128142 +1020 24 training.batch_size 1.0 +1020 24 training.label_smoothing 0.021719183709957946 +1020 25 model.embedding_dim 0.0 +1020 25 model.scoring_fct_norm 1.0 +1020 25 optimizer.lr 0.05177082115064293 +1020 25 training.batch_size 1.0 +1020 25 training.label_smoothing 0.16483075955187765 +1020 26 model.embedding_dim 1.0 +1020 26 model.scoring_fct_norm 1.0 +1020 26 optimizer.lr 0.006351835097686101 +1020 26 training.batch_size 1.0 +1020 26 training.label_smoothing 0.09578800870739276 +1020 27 model.embedding_dim 1.0 +1020 27 model.scoring_fct_norm 1.0 +1020 27 optimizer.lr 0.0012245617368412509 +1020 27 training.batch_size 2.0 +1020 27 training.label_smoothing 0.03073528178789623 +1020 28 model.embedding_dim 1.0 +1020 28 model.scoring_fct_norm 2.0 +1020 28 optimizer.lr 0.015705745820521817 +1020 28 training.batch_size 0.0 +1020 28 training.label_smoothing 0.027838946205083946 +1020 29 model.embedding_dim 1.0 +1020 29 model.scoring_fct_norm 2.0 +1020 29 optimizer.lr 0.029940091507850385 +1020 29 training.batch_size 1.0 +1020 29 training.label_smoothing 0.008544214264327827 +1020 30 model.embedding_dim 2.0 +1020 30 model.scoring_fct_norm 2.0 +1020 30 optimizer.lr 0.033554535562970694 +1020 30 training.batch_size 0.0 +1020 30 training.label_smoothing 0.04756330505260439 +1020 31 model.embedding_dim 2.0 +1020 31 model.scoring_fct_norm 2.0 +1020 31 optimizer.lr 0.0014994501895500177 +1020 31 training.batch_size 1.0 +1020 31 training.label_smoothing 0.15216021008823247 +1020 32 model.embedding_dim 1.0 +1020 32 model.scoring_fct_norm 1.0 +1020 32 optimizer.lr 0.010151929947932223 +1020 32 training.batch_size 0.0 +1020 32 training.label_smoothing 0.0012167370397733568 +1020 33 model.embedding_dim 0.0 +1020 33 model.scoring_fct_norm 2.0 +1020 33 optimizer.lr 0.06684992698688243 +1020 33 training.batch_size 1.0 +1020 33 training.label_smoothing 0.15284262412605606 +1020 34 model.embedding_dim 0.0 +1020 34 model.scoring_fct_norm 1.0 +1020 34 optimizer.lr 0.010029318978207512 +1020 34 training.batch_size 0.0 +1020 34 training.label_smoothing 0.11637381616519263 +1020 35 model.embedding_dim 2.0 +1020 35 model.scoring_fct_norm 1.0 +1020 35 optimizer.lr 0.09128976475654885 +1020 35 training.batch_size 0.0 +1020 35 training.label_smoothing 0.5064869013022424 +1020 36 model.embedding_dim 2.0 +1020 36 model.scoring_fct_norm 1.0 +1020 36 optimizer.lr 0.004131305402130111 +1020 36 training.batch_size 1.0 +1020 36 training.label_smoothing 0.3532551553946712 +1020 37 model.embedding_dim 1.0 +1020 37 model.scoring_fct_norm 1.0 +1020 37 optimizer.lr 0.047062321230858864 +1020 37 training.batch_size 2.0 +1020 37 training.label_smoothing 0.11202593887182023 +1020 38 model.embedding_dim 1.0 +1020 38 model.scoring_fct_norm 1.0 +1020 38 optimizer.lr 0.0015812364145628636 +1020 38 training.batch_size 1.0 +1020 38 training.label_smoothing 0.07642490493051594 +1020 39 model.embedding_dim 2.0 +1020 39 model.scoring_fct_norm 1.0 +1020 39 optimizer.lr 0.009547562054855864 +1020 39 training.batch_size 2.0 +1020 39 training.label_smoothing 0.005449454171366146 +1020 40 model.embedding_dim 0.0 +1020 40 model.scoring_fct_norm 1.0 +1020 40 optimizer.lr 0.062348085779015384 +1020 40 training.batch_size 1.0 +1020 40 training.label_smoothing 0.03481924812991064 +1020 41 model.embedding_dim 2.0 +1020 41 model.scoring_fct_norm 1.0 +1020 41 optimizer.lr 0.001449986672679459 +1020 41 training.batch_size 0.0 +1020 41 training.label_smoothing 0.059994957105804915 +1020 42 model.embedding_dim 2.0 +1020 42 model.scoring_fct_norm 1.0 +1020 42 optimizer.lr 0.024440561202767285 +1020 42 training.batch_size 1.0 +1020 42 training.label_smoothing 0.014699252901726575 +1020 43 model.embedding_dim 0.0 +1020 43 model.scoring_fct_norm 2.0 +1020 43 optimizer.lr 0.007239971437186424 +1020 43 training.batch_size 1.0 +1020 43 training.label_smoothing 0.0014109460406567316 +1020 44 model.embedding_dim 1.0 +1020 44 model.scoring_fct_norm 2.0 +1020 44 optimizer.lr 0.05279217129450893 +1020 44 training.batch_size 1.0 +1020 44 training.label_smoothing 0.4185473799697064 +1020 45 model.embedding_dim 1.0 +1020 45 model.scoring_fct_norm 1.0 +1020 45 optimizer.lr 0.013291278343792866 +1020 45 training.batch_size 1.0 +1020 45 training.label_smoothing 0.004360932273060105 +1020 46 model.embedding_dim 0.0 +1020 46 model.scoring_fct_norm 2.0 +1020 46 optimizer.lr 0.04584620216608561 +1020 46 training.batch_size 2.0 +1020 46 training.label_smoothing 0.08453022175556911 +1020 47 model.embedding_dim 1.0 +1020 47 model.scoring_fct_norm 2.0 +1020 47 optimizer.lr 0.0011297606997013137 +1020 47 training.batch_size 1.0 +1020 47 training.label_smoothing 0.012283753937039083 +1020 48 model.embedding_dim 0.0 +1020 48 model.scoring_fct_norm 2.0 +1020 48 optimizer.lr 0.059191987400694535 +1020 48 training.batch_size 1.0 +1020 48 training.label_smoothing 0.003028320249049413 +1020 49 model.embedding_dim 1.0 +1020 49 model.scoring_fct_norm 1.0 +1020 49 optimizer.lr 0.05855165890545518 +1020 49 training.batch_size 2.0 +1020 49 training.label_smoothing 0.003660528382311119 +1020 50 model.embedding_dim 0.0 +1020 50 model.scoring_fct_norm 2.0 +1020 50 optimizer.lr 0.0037149116882506284 +1020 50 training.batch_size 0.0 +1020 50 training.label_smoothing 0.01768792773173763 +1020 51 model.embedding_dim 1.0 +1020 51 model.scoring_fct_norm 2.0 +1020 51 optimizer.lr 0.002183394465041026 +1020 51 training.batch_size 2.0 +1020 51 training.label_smoothing 0.08854945625283528 +1020 52 model.embedding_dim 1.0 +1020 52 model.scoring_fct_norm 1.0 +1020 52 optimizer.lr 0.011832857643009592 +1020 52 training.batch_size 2.0 +1020 52 training.label_smoothing 0.0010718095243653304 +1020 53 model.embedding_dim 0.0 +1020 53 model.scoring_fct_norm 2.0 +1020 53 optimizer.lr 0.04548635321675551 +1020 53 training.batch_size 2.0 +1020 53 training.label_smoothing 0.844713280720985 +1020 54 model.embedding_dim 1.0 +1020 54 model.scoring_fct_norm 1.0 +1020 54 optimizer.lr 0.0024280843492286933 +1020 54 training.batch_size 0.0 +1020 54 training.label_smoothing 0.0013655395148915671 +1020 55 model.embedding_dim 0.0 +1020 55 model.scoring_fct_norm 1.0 +1020 55 optimizer.lr 0.003837913621023967 +1020 55 training.batch_size 0.0 +1020 55 training.label_smoothing 0.10922862400271316 +1020 56 model.embedding_dim 0.0 +1020 56 model.scoring_fct_norm 2.0 +1020 56 optimizer.lr 0.004357830049844721 +1020 56 training.batch_size 1.0 +1020 56 training.label_smoothing 0.1935354948270249 +1020 57 model.embedding_dim 0.0 +1020 57 model.scoring_fct_norm 2.0 +1020 57 optimizer.lr 0.025775775193221544 +1020 57 training.batch_size 2.0 +1020 57 training.label_smoothing 0.791439737677091 +1020 58 model.embedding_dim 2.0 +1020 58 model.scoring_fct_norm 2.0 +1020 58 optimizer.lr 0.0063398791679632974 +1020 58 training.batch_size 2.0 +1020 58 training.label_smoothing 0.26876662390877487 +1020 59 model.embedding_dim 0.0 +1020 59 model.scoring_fct_norm 1.0 +1020 59 optimizer.lr 0.033924761437458414 +1020 59 training.batch_size 0.0 +1020 59 training.label_smoothing 0.0013488244993746126 +1020 60 model.embedding_dim 1.0 +1020 60 model.scoring_fct_norm 1.0 +1020 60 optimizer.lr 0.01177885440837956 +1020 60 training.batch_size 1.0 +1020 60 training.label_smoothing 0.041041770018106424 +1020 61 model.embedding_dim 2.0 +1020 61 model.scoring_fct_norm 1.0 +1020 61 optimizer.lr 0.01468874361432978 +1020 61 training.batch_size 0.0 +1020 61 training.label_smoothing 0.07891491661401781 +1020 62 model.embedding_dim 1.0 +1020 62 model.scoring_fct_norm 1.0 +1020 62 optimizer.lr 0.043961855705871265 +1020 62 training.batch_size 0.0 +1020 62 training.label_smoothing 0.37416070986266825 +1020 63 model.embedding_dim 2.0 +1020 63 model.scoring_fct_norm 2.0 +1020 63 optimizer.lr 0.03718352469207001 +1020 63 training.batch_size 1.0 +1020 63 training.label_smoothing 0.24203208544501778 +1020 64 model.embedding_dim 2.0 +1020 64 model.scoring_fct_norm 1.0 +1020 64 optimizer.lr 0.013389140679132553 +1020 64 training.batch_size 0.0 +1020 64 training.label_smoothing 0.1396623151037372 +1020 65 model.embedding_dim 1.0 +1020 65 model.scoring_fct_norm 1.0 +1020 65 optimizer.lr 0.0071174290337861 +1020 65 training.batch_size 0.0 +1020 65 training.label_smoothing 0.001429015579243391 +1020 66 model.embedding_dim 1.0 +1020 66 model.scoring_fct_norm 1.0 +1020 66 optimizer.lr 0.0026115559658472826 +1020 66 training.batch_size 1.0 +1020 66 training.label_smoothing 0.003457034751465174 +1020 67 model.embedding_dim 0.0 +1020 67 model.scoring_fct_norm 2.0 +1020 67 optimizer.lr 0.06299042571874158 +1020 67 training.batch_size 2.0 +1020 67 training.label_smoothing 0.0028637992245828738 +1020 68 model.embedding_dim 1.0 +1020 68 model.scoring_fct_norm 1.0 +1020 68 optimizer.lr 0.04158941488774733 +1020 68 training.batch_size 0.0 +1020 68 training.label_smoothing 0.4272596831945946 +1020 69 model.embedding_dim 1.0 +1020 69 model.scoring_fct_norm 1.0 +1020 69 optimizer.lr 0.0023262308662511323 +1020 69 training.batch_size 2.0 +1020 69 training.label_smoothing 0.042466716231661816 +1020 70 model.embedding_dim 0.0 +1020 70 model.scoring_fct_norm 1.0 +1020 70 optimizer.lr 0.0036088535670937873 +1020 70 training.batch_size 1.0 +1020 70 training.label_smoothing 0.484692228620739 +1020 71 model.embedding_dim 1.0 +1020 71 model.scoring_fct_norm 1.0 +1020 71 optimizer.lr 0.0015477991120176392 +1020 71 training.batch_size 2.0 +1020 71 training.label_smoothing 0.12160546218804644 +1020 72 model.embedding_dim 1.0 +1020 72 model.scoring_fct_norm 1.0 +1020 72 optimizer.lr 0.01194159276126514 +1020 72 training.batch_size 2.0 +1020 72 training.label_smoothing 0.5907025472113372 +1020 73 model.embedding_dim 2.0 +1020 73 model.scoring_fct_norm 1.0 +1020 73 optimizer.lr 0.0053438967677091696 +1020 73 training.batch_size 2.0 +1020 73 training.label_smoothing 0.002303278661736677 +1020 74 model.embedding_dim 0.0 +1020 74 model.scoring_fct_norm 1.0 +1020 74 optimizer.lr 0.04041932431747894 +1020 74 training.batch_size 2.0 +1020 74 training.label_smoothing 0.011109202182035026 +1020 75 model.embedding_dim 2.0 +1020 75 model.scoring_fct_norm 2.0 +1020 75 optimizer.lr 0.0016847024584335701 +1020 75 training.batch_size 1.0 +1020 75 training.label_smoothing 0.16924767890563855 +1020 76 model.embedding_dim 1.0 +1020 76 model.scoring_fct_norm 1.0 +1020 76 optimizer.lr 0.002065271357286494 +1020 76 training.batch_size 1.0 +1020 76 training.label_smoothing 0.7596318837780802 +1020 77 model.embedding_dim 2.0 +1020 77 model.scoring_fct_norm 1.0 +1020 77 optimizer.lr 0.0019276589699653522 +1020 77 training.batch_size 2.0 +1020 77 training.label_smoothing 0.041271758758489985 +1020 78 model.embedding_dim 0.0 +1020 78 model.scoring_fct_norm 1.0 +1020 78 optimizer.lr 0.002942278339464383 +1020 78 training.batch_size 2.0 +1020 78 training.label_smoothing 0.5953957807503811 +1020 79 model.embedding_dim 1.0 +1020 79 model.scoring_fct_norm 1.0 +1020 79 optimizer.lr 0.0034200385523554338 +1020 79 training.batch_size 1.0 +1020 79 training.label_smoothing 0.6055812781182768 +1020 80 model.embedding_dim 1.0 +1020 80 model.scoring_fct_norm 2.0 +1020 80 optimizer.lr 0.015821026836034712 +1020 80 training.batch_size 1.0 +1020 80 training.label_smoothing 0.23129552890186333 +1020 81 model.embedding_dim 1.0 +1020 81 model.scoring_fct_norm 1.0 +1020 81 optimizer.lr 0.003173523606878899 +1020 81 training.batch_size 1.0 +1020 81 training.label_smoothing 0.001174029997704264 +1020 82 model.embedding_dim 0.0 +1020 82 model.scoring_fct_norm 1.0 +1020 82 optimizer.lr 0.07171896669250274 +1020 82 training.batch_size 1.0 +1020 82 training.label_smoothing 0.15808758310962034 +1020 83 model.embedding_dim 0.0 +1020 83 model.scoring_fct_norm 1.0 +1020 83 optimizer.lr 0.004876987102210069 +1020 83 training.batch_size 0.0 +1020 83 training.label_smoothing 0.015029228713339638 +1020 84 model.embedding_dim 1.0 +1020 84 model.scoring_fct_norm 2.0 +1020 84 optimizer.lr 0.008149825538595664 +1020 84 training.batch_size 0.0 +1020 84 training.label_smoothing 0.0012644684720448138 +1020 85 model.embedding_dim 0.0 +1020 85 model.scoring_fct_norm 1.0 +1020 85 optimizer.lr 0.08846579089209539 +1020 85 training.batch_size 1.0 +1020 85 training.label_smoothing 0.14875211768745564 +1020 86 model.embedding_dim 2.0 +1020 86 model.scoring_fct_norm 2.0 +1020 86 optimizer.lr 0.001657819820405812 +1020 86 training.batch_size 0.0 +1020 86 training.label_smoothing 0.41979471227083237 +1020 87 model.embedding_dim 1.0 +1020 87 model.scoring_fct_norm 1.0 +1020 87 optimizer.lr 0.010467336560363385 +1020 87 training.batch_size 2.0 +1020 87 training.label_smoothing 0.1587327770315632 +1020 88 model.embedding_dim 0.0 +1020 88 model.scoring_fct_norm 2.0 +1020 88 optimizer.lr 0.003979545837634972 +1020 88 training.batch_size 2.0 +1020 88 training.label_smoothing 0.0010207226355624498 +1020 89 model.embedding_dim 2.0 +1020 89 model.scoring_fct_norm 1.0 +1020 89 optimizer.lr 0.0010177449418799658 +1020 89 training.batch_size 0.0 +1020 89 training.label_smoothing 0.16764653407820884 +1020 90 model.embedding_dim 2.0 +1020 90 model.scoring_fct_norm 1.0 +1020 90 optimizer.lr 0.02798626551397788 +1020 90 training.batch_size 2.0 +1020 90 training.label_smoothing 0.22414955477853046 +1020 91 model.embedding_dim 1.0 +1020 91 model.scoring_fct_norm 1.0 +1020 91 optimizer.lr 0.048156155208889245 +1020 91 training.batch_size 2.0 +1020 91 training.label_smoothing 0.41234718879677434 +1020 92 model.embedding_dim 1.0 +1020 92 model.scoring_fct_norm 2.0 +1020 92 optimizer.lr 0.03837738656454792 +1020 92 training.batch_size 2.0 +1020 92 training.label_smoothing 0.03607715173837283 +1020 93 model.embedding_dim 2.0 +1020 93 model.scoring_fct_norm 1.0 +1020 93 optimizer.lr 0.0012138883446805987 +1020 93 training.batch_size 0.0 +1020 93 training.label_smoothing 0.0070350907918327885 +1020 94 model.embedding_dim 0.0 +1020 94 model.scoring_fct_norm 2.0 +1020 94 optimizer.lr 0.0821794894489537 +1020 94 training.batch_size 2.0 +1020 94 training.label_smoothing 0.11674913076241795 +1020 95 model.embedding_dim 2.0 +1020 95 model.scoring_fct_norm 2.0 +1020 95 optimizer.lr 0.005870241413902765 +1020 95 training.batch_size 0.0 +1020 95 training.label_smoothing 0.00335445464907668 +1020 96 model.embedding_dim 2.0 +1020 96 model.scoring_fct_norm 2.0 +1020 96 optimizer.lr 0.007548715247786369 +1020 96 training.batch_size 1.0 +1020 96 training.label_smoothing 0.007122578138076568 +1020 97 model.embedding_dim 1.0 +1020 97 model.scoring_fct_norm 1.0 +1020 97 optimizer.lr 0.016365832054909105 +1020 97 training.batch_size 1.0 +1020 97 training.label_smoothing 0.006367265513709687 +1020 98 model.embedding_dim 2.0 +1020 98 model.scoring_fct_norm 1.0 +1020 98 optimizer.lr 0.0018137452909367455 +1020 98 training.batch_size 0.0 +1020 98 training.label_smoothing 0.09735705024769291 +1020 99 model.embedding_dim 2.0 +1020 99 model.scoring_fct_norm 2.0 +1020 99 optimizer.lr 0.06238936464803121 +1020 99 training.batch_size 2.0 +1020 99 training.label_smoothing 0.016233163703432307 +1020 100 model.embedding_dim 2.0 +1020 100 model.scoring_fct_norm 1.0 +1020 100 optimizer.lr 0.057474598591413344 +1020 100 training.batch_size 0.0 +1020 100 training.label_smoothing 0.008263358067785364 +1020 1 dataset """kinships""" +1020 1 model """unstructuredmodel""" +1020 1 loss """bceaftersigmoid""" +1020 1 regularizer """no""" +1020 1 optimizer """adam""" +1020 1 training_loop """lcwa""" +1020 1 evaluator """rankbased""" +1020 2 dataset """kinships""" +1020 2 model """unstructuredmodel""" +1020 2 loss """bceaftersigmoid""" +1020 2 regularizer """no""" +1020 2 optimizer """adam""" +1020 2 training_loop """lcwa""" +1020 2 evaluator """rankbased""" +1020 3 dataset """kinships""" +1020 3 model """unstructuredmodel""" +1020 3 loss """bceaftersigmoid""" +1020 3 regularizer """no""" +1020 3 optimizer """adam""" +1020 3 training_loop """lcwa""" +1020 3 evaluator """rankbased""" +1020 4 dataset """kinships""" +1020 4 model """unstructuredmodel""" +1020 4 loss """bceaftersigmoid""" +1020 4 regularizer """no""" +1020 4 optimizer """adam""" +1020 4 training_loop """lcwa""" +1020 4 evaluator """rankbased""" +1020 5 dataset """kinships""" +1020 5 model """unstructuredmodel""" +1020 5 loss """bceaftersigmoid""" +1020 5 regularizer """no""" +1020 5 optimizer """adam""" +1020 5 training_loop """lcwa""" +1020 5 evaluator """rankbased""" +1020 6 dataset """kinships""" +1020 6 model """unstructuredmodel""" +1020 6 loss """bceaftersigmoid""" +1020 6 regularizer """no""" +1020 6 optimizer """adam""" +1020 6 training_loop """lcwa""" +1020 6 evaluator """rankbased""" +1020 7 dataset """kinships""" +1020 7 model """unstructuredmodel""" +1020 7 loss """bceaftersigmoid""" +1020 7 regularizer """no""" +1020 7 optimizer """adam""" +1020 7 training_loop """lcwa""" +1020 7 evaluator """rankbased""" +1020 8 dataset """kinships""" +1020 8 model """unstructuredmodel""" +1020 8 loss """bceaftersigmoid""" +1020 8 regularizer """no""" +1020 8 optimizer """adam""" +1020 8 training_loop """lcwa""" +1020 8 evaluator """rankbased""" +1020 9 dataset """kinships""" +1020 9 model """unstructuredmodel""" +1020 9 loss """bceaftersigmoid""" +1020 9 regularizer """no""" +1020 9 optimizer """adam""" +1020 9 training_loop """lcwa""" +1020 9 evaluator """rankbased""" +1020 10 dataset """kinships""" +1020 10 model """unstructuredmodel""" +1020 10 loss """bceaftersigmoid""" +1020 10 regularizer """no""" +1020 10 optimizer """adam""" +1020 10 training_loop """lcwa""" +1020 10 evaluator """rankbased""" +1020 11 dataset """kinships""" +1020 11 model """unstructuredmodel""" +1020 11 loss """bceaftersigmoid""" +1020 11 regularizer """no""" +1020 11 optimizer """adam""" +1020 11 training_loop """lcwa""" +1020 11 evaluator """rankbased""" +1020 12 dataset """kinships""" +1020 12 model """unstructuredmodel""" +1020 12 loss """bceaftersigmoid""" +1020 12 regularizer """no""" +1020 12 optimizer """adam""" +1020 12 training_loop """lcwa""" +1020 12 evaluator """rankbased""" +1020 13 dataset """kinships""" +1020 13 model """unstructuredmodel""" +1020 13 loss """bceaftersigmoid""" +1020 13 regularizer """no""" +1020 13 optimizer """adam""" +1020 13 training_loop """lcwa""" +1020 13 evaluator """rankbased""" +1020 14 dataset """kinships""" +1020 14 model """unstructuredmodel""" +1020 14 loss """bceaftersigmoid""" +1020 14 regularizer """no""" +1020 14 optimizer """adam""" +1020 14 training_loop """lcwa""" +1020 14 evaluator """rankbased""" +1020 15 dataset """kinships""" +1020 15 model """unstructuredmodel""" +1020 15 loss """bceaftersigmoid""" +1020 15 regularizer """no""" +1020 15 optimizer """adam""" +1020 15 training_loop """lcwa""" +1020 15 evaluator """rankbased""" +1020 16 dataset """kinships""" +1020 16 model """unstructuredmodel""" +1020 16 loss """bceaftersigmoid""" +1020 16 regularizer """no""" +1020 16 optimizer """adam""" +1020 16 training_loop """lcwa""" +1020 16 evaluator """rankbased""" +1020 17 dataset """kinships""" +1020 17 model """unstructuredmodel""" +1020 17 loss """bceaftersigmoid""" +1020 17 regularizer """no""" +1020 17 optimizer """adam""" +1020 17 training_loop """lcwa""" +1020 17 evaluator """rankbased""" +1020 18 dataset """kinships""" +1020 18 model """unstructuredmodel""" +1020 18 loss """bceaftersigmoid""" +1020 18 regularizer """no""" +1020 18 optimizer """adam""" +1020 18 training_loop """lcwa""" +1020 18 evaluator """rankbased""" +1020 19 dataset """kinships""" +1020 19 model """unstructuredmodel""" +1020 19 loss """bceaftersigmoid""" +1020 19 regularizer """no""" +1020 19 optimizer """adam""" +1020 19 training_loop """lcwa""" +1020 19 evaluator """rankbased""" +1020 20 dataset """kinships""" +1020 20 model """unstructuredmodel""" +1020 20 loss """bceaftersigmoid""" +1020 20 regularizer """no""" +1020 20 optimizer """adam""" +1020 20 training_loop """lcwa""" +1020 20 evaluator """rankbased""" +1020 21 dataset """kinships""" +1020 21 model """unstructuredmodel""" +1020 21 loss """bceaftersigmoid""" +1020 21 regularizer """no""" +1020 21 optimizer """adam""" +1020 21 training_loop """lcwa""" +1020 21 evaluator """rankbased""" +1020 22 dataset """kinships""" +1020 22 model """unstructuredmodel""" +1020 22 loss """bceaftersigmoid""" +1020 22 regularizer """no""" +1020 22 optimizer """adam""" +1020 22 training_loop """lcwa""" +1020 22 evaluator """rankbased""" +1020 23 dataset """kinships""" +1020 23 model """unstructuredmodel""" +1020 23 loss """bceaftersigmoid""" +1020 23 regularizer """no""" +1020 23 optimizer """adam""" +1020 23 training_loop """lcwa""" +1020 23 evaluator """rankbased""" +1020 24 dataset """kinships""" +1020 24 model """unstructuredmodel""" +1020 24 loss """bceaftersigmoid""" +1020 24 regularizer """no""" +1020 24 optimizer """adam""" +1020 24 training_loop """lcwa""" +1020 24 evaluator """rankbased""" +1020 25 dataset """kinships""" +1020 25 model """unstructuredmodel""" +1020 25 loss """bceaftersigmoid""" +1020 25 regularizer """no""" +1020 25 optimizer """adam""" +1020 25 training_loop """lcwa""" +1020 25 evaluator """rankbased""" +1020 26 dataset """kinships""" +1020 26 model """unstructuredmodel""" +1020 26 loss """bceaftersigmoid""" +1020 26 regularizer """no""" +1020 26 optimizer """adam""" +1020 26 training_loop """lcwa""" +1020 26 evaluator """rankbased""" +1020 27 dataset """kinships""" +1020 27 model """unstructuredmodel""" +1020 27 loss """bceaftersigmoid""" +1020 27 regularizer """no""" +1020 27 optimizer """adam""" +1020 27 training_loop """lcwa""" +1020 27 evaluator """rankbased""" +1020 28 dataset """kinships""" +1020 28 model """unstructuredmodel""" +1020 28 loss """bceaftersigmoid""" +1020 28 regularizer """no""" +1020 28 optimizer """adam""" +1020 28 training_loop """lcwa""" +1020 28 evaluator """rankbased""" +1020 29 dataset """kinships""" +1020 29 model """unstructuredmodel""" +1020 29 loss """bceaftersigmoid""" +1020 29 regularizer """no""" +1020 29 optimizer """adam""" +1020 29 training_loop """lcwa""" +1020 29 evaluator """rankbased""" +1020 30 dataset """kinships""" +1020 30 model """unstructuredmodel""" +1020 30 loss """bceaftersigmoid""" +1020 30 regularizer """no""" +1020 30 optimizer """adam""" +1020 30 training_loop """lcwa""" +1020 30 evaluator """rankbased""" +1020 31 dataset """kinships""" +1020 31 model """unstructuredmodel""" +1020 31 loss """bceaftersigmoid""" +1020 31 regularizer """no""" +1020 31 optimizer """adam""" +1020 31 training_loop """lcwa""" +1020 31 evaluator """rankbased""" +1020 32 dataset """kinships""" +1020 32 model """unstructuredmodel""" +1020 32 loss """bceaftersigmoid""" +1020 32 regularizer """no""" +1020 32 optimizer """adam""" +1020 32 training_loop """lcwa""" +1020 32 evaluator """rankbased""" +1020 33 dataset """kinships""" +1020 33 model """unstructuredmodel""" +1020 33 loss """bceaftersigmoid""" +1020 33 regularizer """no""" +1020 33 optimizer """adam""" +1020 33 training_loop """lcwa""" +1020 33 evaluator """rankbased""" +1020 34 dataset """kinships""" +1020 34 model """unstructuredmodel""" +1020 34 loss """bceaftersigmoid""" +1020 34 regularizer """no""" +1020 34 optimizer """adam""" +1020 34 training_loop """lcwa""" +1020 34 evaluator """rankbased""" +1020 35 dataset """kinships""" +1020 35 model """unstructuredmodel""" +1020 35 loss """bceaftersigmoid""" +1020 35 regularizer """no""" +1020 35 optimizer """adam""" +1020 35 training_loop """lcwa""" +1020 35 evaluator """rankbased""" +1020 36 dataset """kinships""" +1020 36 model """unstructuredmodel""" +1020 36 loss """bceaftersigmoid""" +1020 36 regularizer """no""" +1020 36 optimizer """adam""" +1020 36 training_loop """lcwa""" +1020 36 evaluator """rankbased""" +1020 37 dataset """kinships""" +1020 37 model """unstructuredmodel""" +1020 37 loss """bceaftersigmoid""" +1020 37 regularizer """no""" +1020 37 optimizer """adam""" +1020 37 training_loop """lcwa""" +1020 37 evaluator """rankbased""" +1020 38 dataset """kinships""" +1020 38 model """unstructuredmodel""" +1020 38 loss """bceaftersigmoid""" +1020 38 regularizer """no""" +1020 38 optimizer """adam""" +1020 38 training_loop """lcwa""" +1020 38 evaluator """rankbased""" +1020 39 dataset """kinships""" +1020 39 model """unstructuredmodel""" +1020 39 loss """bceaftersigmoid""" +1020 39 regularizer """no""" +1020 39 optimizer """adam""" +1020 39 training_loop """lcwa""" +1020 39 evaluator """rankbased""" +1020 40 dataset """kinships""" +1020 40 model """unstructuredmodel""" +1020 40 loss """bceaftersigmoid""" +1020 40 regularizer """no""" +1020 40 optimizer """adam""" +1020 40 training_loop """lcwa""" +1020 40 evaluator """rankbased""" +1020 41 dataset """kinships""" +1020 41 model """unstructuredmodel""" +1020 41 loss """bceaftersigmoid""" +1020 41 regularizer """no""" +1020 41 optimizer """adam""" +1020 41 training_loop """lcwa""" +1020 41 evaluator """rankbased""" +1020 42 dataset """kinships""" +1020 42 model """unstructuredmodel""" +1020 42 loss """bceaftersigmoid""" +1020 42 regularizer """no""" +1020 42 optimizer """adam""" +1020 42 training_loop """lcwa""" +1020 42 evaluator """rankbased""" +1020 43 dataset """kinships""" +1020 43 model """unstructuredmodel""" +1020 43 loss """bceaftersigmoid""" +1020 43 regularizer """no""" +1020 43 optimizer """adam""" +1020 43 training_loop """lcwa""" +1020 43 evaluator """rankbased""" +1020 44 dataset """kinships""" +1020 44 model """unstructuredmodel""" +1020 44 loss """bceaftersigmoid""" +1020 44 regularizer """no""" +1020 44 optimizer """adam""" +1020 44 training_loop """lcwa""" +1020 44 evaluator """rankbased""" +1020 45 dataset """kinships""" +1020 45 model """unstructuredmodel""" +1020 45 loss """bceaftersigmoid""" +1020 45 regularizer """no""" +1020 45 optimizer """adam""" +1020 45 training_loop """lcwa""" +1020 45 evaluator """rankbased""" +1020 46 dataset """kinships""" +1020 46 model """unstructuredmodel""" +1020 46 loss """bceaftersigmoid""" +1020 46 regularizer """no""" +1020 46 optimizer """adam""" +1020 46 training_loop """lcwa""" +1020 46 evaluator """rankbased""" +1020 47 dataset """kinships""" +1020 47 model """unstructuredmodel""" +1020 47 loss """bceaftersigmoid""" +1020 47 regularizer """no""" +1020 47 optimizer """adam""" +1020 47 training_loop """lcwa""" +1020 47 evaluator """rankbased""" +1020 48 dataset """kinships""" +1020 48 model """unstructuredmodel""" +1020 48 loss """bceaftersigmoid""" +1020 48 regularizer """no""" +1020 48 optimizer """adam""" +1020 48 training_loop """lcwa""" +1020 48 evaluator """rankbased""" +1020 49 dataset """kinships""" +1020 49 model """unstructuredmodel""" +1020 49 loss """bceaftersigmoid""" +1020 49 regularizer """no""" +1020 49 optimizer """adam""" +1020 49 training_loop """lcwa""" +1020 49 evaluator """rankbased""" +1020 50 dataset """kinships""" +1020 50 model """unstructuredmodel""" +1020 50 loss """bceaftersigmoid""" +1020 50 regularizer """no""" +1020 50 optimizer """adam""" +1020 50 training_loop """lcwa""" +1020 50 evaluator """rankbased""" +1020 51 dataset """kinships""" +1020 51 model """unstructuredmodel""" +1020 51 loss """bceaftersigmoid""" +1020 51 regularizer """no""" +1020 51 optimizer """adam""" +1020 51 training_loop """lcwa""" +1020 51 evaluator """rankbased""" +1020 52 dataset """kinships""" +1020 52 model """unstructuredmodel""" +1020 52 loss """bceaftersigmoid""" +1020 52 regularizer """no""" +1020 52 optimizer """adam""" +1020 52 training_loop """lcwa""" +1020 52 evaluator """rankbased""" +1020 53 dataset """kinships""" +1020 53 model """unstructuredmodel""" +1020 53 loss """bceaftersigmoid""" +1020 53 regularizer """no""" +1020 53 optimizer """adam""" +1020 53 training_loop """lcwa""" +1020 53 evaluator """rankbased""" +1020 54 dataset """kinships""" +1020 54 model """unstructuredmodel""" +1020 54 loss """bceaftersigmoid""" +1020 54 regularizer """no""" +1020 54 optimizer """adam""" +1020 54 training_loop """lcwa""" +1020 54 evaluator """rankbased""" +1020 55 dataset """kinships""" +1020 55 model """unstructuredmodel""" +1020 55 loss """bceaftersigmoid""" +1020 55 regularizer """no""" +1020 55 optimizer """adam""" +1020 55 training_loop """lcwa""" +1020 55 evaluator """rankbased""" +1020 56 dataset """kinships""" +1020 56 model """unstructuredmodel""" +1020 56 loss """bceaftersigmoid""" +1020 56 regularizer """no""" +1020 56 optimizer """adam""" +1020 56 training_loop """lcwa""" +1020 56 evaluator """rankbased""" +1020 57 dataset """kinships""" +1020 57 model """unstructuredmodel""" +1020 57 loss """bceaftersigmoid""" +1020 57 regularizer """no""" +1020 57 optimizer """adam""" +1020 57 training_loop """lcwa""" +1020 57 evaluator """rankbased""" +1020 58 dataset """kinships""" +1020 58 model """unstructuredmodel""" +1020 58 loss """bceaftersigmoid""" +1020 58 regularizer """no""" +1020 58 optimizer """adam""" +1020 58 training_loop """lcwa""" +1020 58 evaluator """rankbased""" +1020 59 dataset """kinships""" +1020 59 model """unstructuredmodel""" +1020 59 loss """bceaftersigmoid""" +1020 59 regularizer """no""" +1020 59 optimizer """adam""" +1020 59 training_loop """lcwa""" +1020 59 evaluator """rankbased""" +1020 60 dataset """kinships""" +1020 60 model """unstructuredmodel""" +1020 60 loss """bceaftersigmoid""" +1020 60 regularizer """no""" +1020 60 optimizer """adam""" +1020 60 training_loop """lcwa""" +1020 60 evaluator """rankbased""" +1020 61 dataset """kinships""" +1020 61 model """unstructuredmodel""" +1020 61 loss """bceaftersigmoid""" +1020 61 regularizer """no""" +1020 61 optimizer """adam""" +1020 61 training_loop """lcwa""" +1020 61 evaluator """rankbased""" +1020 62 dataset """kinships""" +1020 62 model """unstructuredmodel""" +1020 62 loss """bceaftersigmoid""" +1020 62 regularizer """no""" +1020 62 optimizer """adam""" +1020 62 training_loop """lcwa""" +1020 62 evaluator """rankbased""" +1020 63 dataset """kinships""" +1020 63 model """unstructuredmodel""" +1020 63 loss """bceaftersigmoid""" +1020 63 regularizer """no""" +1020 63 optimizer """adam""" +1020 63 training_loop """lcwa""" +1020 63 evaluator """rankbased""" +1020 64 dataset """kinships""" +1020 64 model """unstructuredmodel""" +1020 64 loss """bceaftersigmoid""" +1020 64 regularizer """no""" +1020 64 optimizer """adam""" +1020 64 training_loop """lcwa""" +1020 64 evaluator """rankbased""" +1020 65 dataset """kinships""" +1020 65 model """unstructuredmodel""" +1020 65 loss """bceaftersigmoid""" +1020 65 regularizer """no""" +1020 65 optimizer """adam""" +1020 65 training_loop """lcwa""" +1020 65 evaluator """rankbased""" +1020 66 dataset """kinships""" +1020 66 model """unstructuredmodel""" +1020 66 loss """bceaftersigmoid""" +1020 66 regularizer """no""" +1020 66 optimizer """adam""" +1020 66 training_loop """lcwa""" +1020 66 evaluator """rankbased""" +1020 67 dataset """kinships""" +1020 67 model """unstructuredmodel""" +1020 67 loss """bceaftersigmoid""" +1020 67 regularizer """no""" +1020 67 optimizer """adam""" +1020 67 training_loop """lcwa""" +1020 67 evaluator """rankbased""" +1020 68 dataset """kinships""" +1020 68 model """unstructuredmodel""" +1020 68 loss """bceaftersigmoid""" +1020 68 regularizer """no""" +1020 68 optimizer """adam""" +1020 68 training_loop """lcwa""" +1020 68 evaluator """rankbased""" +1020 69 dataset """kinships""" +1020 69 model """unstructuredmodel""" +1020 69 loss """bceaftersigmoid""" +1020 69 regularizer """no""" +1020 69 optimizer """adam""" +1020 69 training_loop """lcwa""" +1020 69 evaluator """rankbased""" +1020 70 dataset """kinships""" +1020 70 model """unstructuredmodel""" +1020 70 loss """bceaftersigmoid""" +1020 70 regularizer """no""" +1020 70 optimizer """adam""" +1020 70 training_loop """lcwa""" +1020 70 evaluator """rankbased""" +1020 71 dataset """kinships""" +1020 71 model """unstructuredmodel""" +1020 71 loss """bceaftersigmoid""" +1020 71 regularizer """no""" +1020 71 optimizer """adam""" +1020 71 training_loop """lcwa""" +1020 71 evaluator """rankbased""" +1020 72 dataset """kinships""" +1020 72 model """unstructuredmodel""" +1020 72 loss """bceaftersigmoid""" +1020 72 regularizer """no""" +1020 72 optimizer """adam""" +1020 72 training_loop """lcwa""" +1020 72 evaluator """rankbased""" +1020 73 dataset """kinships""" +1020 73 model """unstructuredmodel""" +1020 73 loss """bceaftersigmoid""" +1020 73 regularizer """no""" +1020 73 optimizer """adam""" +1020 73 training_loop """lcwa""" +1020 73 evaluator """rankbased""" +1020 74 dataset """kinships""" +1020 74 model """unstructuredmodel""" +1020 74 loss """bceaftersigmoid""" +1020 74 regularizer """no""" +1020 74 optimizer """adam""" +1020 74 training_loop """lcwa""" +1020 74 evaluator """rankbased""" +1020 75 dataset """kinships""" +1020 75 model """unstructuredmodel""" +1020 75 loss """bceaftersigmoid""" +1020 75 regularizer """no""" +1020 75 optimizer """adam""" +1020 75 training_loop """lcwa""" +1020 75 evaluator """rankbased""" +1020 76 dataset """kinships""" +1020 76 model """unstructuredmodel""" +1020 76 loss """bceaftersigmoid""" +1020 76 regularizer """no""" +1020 76 optimizer """adam""" +1020 76 training_loop """lcwa""" +1020 76 evaluator """rankbased""" +1020 77 dataset """kinships""" +1020 77 model """unstructuredmodel""" +1020 77 loss """bceaftersigmoid""" +1020 77 regularizer """no""" +1020 77 optimizer """adam""" +1020 77 training_loop """lcwa""" +1020 77 evaluator """rankbased""" +1020 78 dataset """kinships""" +1020 78 model """unstructuredmodel""" +1020 78 loss """bceaftersigmoid""" +1020 78 regularizer """no""" +1020 78 optimizer """adam""" +1020 78 training_loop """lcwa""" +1020 78 evaluator """rankbased""" +1020 79 dataset """kinships""" +1020 79 model """unstructuredmodel""" +1020 79 loss """bceaftersigmoid""" +1020 79 regularizer """no""" +1020 79 optimizer """adam""" +1020 79 training_loop """lcwa""" +1020 79 evaluator """rankbased""" +1020 80 dataset """kinships""" +1020 80 model """unstructuredmodel""" +1020 80 loss """bceaftersigmoid""" +1020 80 regularizer """no""" +1020 80 optimizer """adam""" +1020 80 training_loop """lcwa""" +1020 80 evaluator """rankbased""" +1020 81 dataset """kinships""" +1020 81 model """unstructuredmodel""" +1020 81 loss """bceaftersigmoid""" +1020 81 regularizer """no""" +1020 81 optimizer """adam""" +1020 81 training_loop """lcwa""" +1020 81 evaluator """rankbased""" +1020 82 dataset """kinships""" +1020 82 model """unstructuredmodel""" +1020 82 loss """bceaftersigmoid""" +1020 82 regularizer """no""" +1020 82 optimizer """adam""" +1020 82 training_loop """lcwa""" +1020 82 evaluator """rankbased""" +1020 83 dataset """kinships""" +1020 83 model """unstructuredmodel""" +1020 83 loss """bceaftersigmoid""" +1020 83 regularizer """no""" +1020 83 optimizer """adam""" +1020 83 training_loop """lcwa""" +1020 83 evaluator """rankbased""" +1020 84 dataset """kinships""" +1020 84 model """unstructuredmodel""" +1020 84 loss """bceaftersigmoid""" +1020 84 regularizer """no""" +1020 84 optimizer """adam""" +1020 84 training_loop """lcwa""" +1020 84 evaluator """rankbased""" +1020 85 dataset """kinships""" +1020 85 model """unstructuredmodel""" +1020 85 loss """bceaftersigmoid""" +1020 85 regularizer """no""" +1020 85 optimizer """adam""" +1020 85 training_loop """lcwa""" +1020 85 evaluator """rankbased""" +1020 86 dataset """kinships""" +1020 86 model """unstructuredmodel""" +1020 86 loss """bceaftersigmoid""" +1020 86 regularizer """no""" +1020 86 optimizer """adam""" +1020 86 training_loop """lcwa""" +1020 86 evaluator """rankbased""" +1020 87 dataset """kinships""" +1020 87 model """unstructuredmodel""" +1020 87 loss """bceaftersigmoid""" +1020 87 regularizer """no""" +1020 87 optimizer """adam""" +1020 87 training_loop """lcwa""" +1020 87 evaluator """rankbased""" +1020 88 dataset """kinships""" +1020 88 model """unstructuredmodel""" +1020 88 loss """bceaftersigmoid""" +1020 88 regularizer """no""" +1020 88 optimizer """adam""" +1020 88 training_loop """lcwa""" +1020 88 evaluator """rankbased""" +1020 89 dataset """kinships""" +1020 89 model """unstructuredmodel""" +1020 89 loss """bceaftersigmoid""" +1020 89 regularizer """no""" +1020 89 optimizer """adam""" +1020 89 training_loop """lcwa""" +1020 89 evaluator """rankbased""" +1020 90 dataset """kinships""" +1020 90 model """unstructuredmodel""" +1020 90 loss """bceaftersigmoid""" +1020 90 regularizer """no""" +1020 90 optimizer """adam""" +1020 90 training_loop """lcwa""" +1020 90 evaluator """rankbased""" +1020 91 dataset """kinships""" +1020 91 model """unstructuredmodel""" +1020 91 loss """bceaftersigmoid""" +1020 91 regularizer """no""" +1020 91 optimizer """adam""" +1020 91 training_loop """lcwa""" +1020 91 evaluator """rankbased""" +1020 92 dataset """kinships""" +1020 92 model """unstructuredmodel""" +1020 92 loss """bceaftersigmoid""" +1020 92 regularizer """no""" +1020 92 optimizer """adam""" +1020 92 training_loop """lcwa""" +1020 92 evaluator """rankbased""" +1020 93 dataset """kinships""" +1020 93 model """unstructuredmodel""" +1020 93 loss """bceaftersigmoid""" +1020 93 regularizer """no""" +1020 93 optimizer """adam""" +1020 93 training_loop """lcwa""" +1020 93 evaluator """rankbased""" +1020 94 dataset """kinships""" +1020 94 model """unstructuredmodel""" +1020 94 loss """bceaftersigmoid""" +1020 94 regularizer """no""" +1020 94 optimizer """adam""" +1020 94 training_loop """lcwa""" +1020 94 evaluator """rankbased""" +1020 95 dataset """kinships""" +1020 95 model """unstructuredmodel""" +1020 95 loss """bceaftersigmoid""" +1020 95 regularizer """no""" +1020 95 optimizer """adam""" +1020 95 training_loop """lcwa""" +1020 95 evaluator """rankbased""" +1020 96 dataset """kinships""" +1020 96 model """unstructuredmodel""" +1020 96 loss """bceaftersigmoid""" +1020 96 regularizer """no""" +1020 96 optimizer """adam""" +1020 96 training_loop """lcwa""" +1020 96 evaluator """rankbased""" +1020 97 dataset """kinships""" +1020 97 model """unstructuredmodel""" +1020 97 loss """bceaftersigmoid""" +1020 97 regularizer """no""" +1020 97 optimizer """adam""" +1020 97 training_loop """lcwa""" +1020 97 evaluator """rankbased""" +1020 98 dataset """kinships""" +1020 98 model """unstructuredmodel""" +1020 98 loss """bceaftersigmoid""" +1020 98 regularizer """no""" +1020 98 optimizer """adam""" +1020 98 training_loop """lcwa""" +1020 98 evaluator """rankbased""" +1020 99 dataset """kinships""" +1020 99 model """unstructuredmodel""" +1020 99 loss """bceaftersigmoid""" +1020 99 regularizer """no""" +1020 99 optimizer """adam""" +1020 99 training_loop """lcwa""" +1020 99 evaluator """rankbased""" +1020 100 dataset """kinships""" +1020 100 model """unstructuredmodel""" +1020 100 loss """bceaftersigmoid""" +1020 100 regularizer """no""" +1020 100 optimizer """adam""" +1020 100 training_loop """lcwa""" +1020 100 evaluator """rankbased""" +1021 1 model.embedding_dim 2.0 +1021 1 model.scoring_fct_norm 2.0 +1021 1 optimizer.lr 0.0039050129819335807 +1021 1 training.batch_size 0.0 +1021 1 training.label_smoothing 0.04766822915994019 +1021 2 model.embedding_dim 0.0 +1021 2 model.scoring_fct_norm 1.0 +1021 2 optimizer.lr 0.001126874050458203 +1021 2 training.batch_size 2.0 +1021 2 training.label_smoothing 0.48002448560792593 +1021 3 model.embedding_dim 0.0 +1021 3 model.scoring_fct_norm 2.0 +1021 3 optimizer.lr 0.08598221541261607 +1021 3 training.batch_size 0.0 +1021 3 training.label_smoothing 0.004300887085320657 +1021 4 model.embedding_dim 2.0 +1021 4 model.scoring_fct_norm 1.0 +1021 4 optimizer.lr 0.002956503516879133 +1021 4 training.batch_size 2.0 +1021 4 training.label_smoothing 0.021092673322739872 +1021 5 model.embedding_dim 0.0 +1021 5 model.scoring_fct_norm 2.0 +1021 5 optimizer.lr 0.0279697869783084 +1021 5 training.batch_size 2.0 +1021 5 training.label_smoothing 0.00530779966991198 +1021 6 model.embedding_dim 2.0 +1021 6 model.scoring_fct_norm 1.0 +1021 6 optimizer.lr 0.010461631289928568 +1021 6 training.batch_size 2.0 +1021 6 training.label_smoothing 0.5655951129255954 +1021 7 model.embedding_dim 0.0 +1021 7 model.scoring_fct_norm 1.0 +1021 7 optimizer.lr 0.05186834823888004 +1021 7 training.batch_size 0.0 +1021 7 training.label_smoothing 0.12859802048068147 +1021 8 model.embedding_dim 0.0 +1021 8 model.scoring_fct_norm 2.0 +1021 8 optimizer.lr 0.019640456687212766 +1021 8 training.batch_size 2.0 +1021 8 training.label_smoothing 0.5267941562019056 +1021 9 model.embedding_dim 1.0 +1021 9 model.scoring_fct_norm 1.0 +1021 9 optimizer.lr 0.036496493523551915 +1021 9 training.batch_size 1.0 +1021 9 training.label_smoothing 0.0011008573361367925 +1021 10 model.embedding_dim 0.0 +1021 10 model.scoring_fct_norm 2.0 +1021 10 optimizer.lr 0.0027751912505802187 +1021 10 training.batch_size 0.0 +1021 10 training.label_smoothing 0.024367934264333122 +1021 11 model.embedding_dim 0.0 +1021 11 model.scoring_fct_norm 1.0 +1021 11 optimizer.lr 0.05758856988516024 +1021 11 training.batch_size 0.0 +1021 11 training.label_smoothing 0.13013420570107959 +1021 12 model.embedding_dim 0.0 +1021 12 model.scoring_fct_norm 2.0 +1021 12 optimizer.lr 0.0015879931875561657 +1021 12 training.batch_size 2.0 +1021 12 training.label_smoothing 0.037942127421137097 +1021 13 model.embedding_dim 1.0 +1021 13 model.scoring_fct_norm 2.0 +1021 13 optimizer.lr 0.012731125732050944 +1021 13 training.batch_size 2.0 +1021 13 training.label_smoothing 0.6710919621204013 +1021 14 model.embedding_dim 0.0 +1021 14 model.scoring_fct_norm 1.0 +1021 14 optimizer.lr 0.04037978756507916 +1021 14 training.batch_size 1.0 +1021 14 training.label_smoothing 0.03165360450608656 +1021 15 model.embedding_dim 0.0 +1021 15 model.scoring_fct_norm 2.0 +1021 15 optimizer.lr 0.008893642932400733 +1021 15 training.batch_size 2.0 +1021 15 training.label_smoothing 0.016306785138805714 +1021 16 model.embedding_dim 2.0 +1021 16 model.scoring_fct_norm 2.0 +1021 16 optimizer.lr 0.0036685831166343628 +1021 16 training.batch_size 0.0 +1021 16 training.label_smoothing 0.004046159848223075 +1021 17 model.embedding_dim 0.0 +1021 17 model.scoring_fct_norm 2.0 +1021 17 optimizer.lr 0.014135835831189663 +1021 17 training.batch_size 2.0 +1021 17 training.label_smoothing 0.10524293225156826 +1021 18 model.embedding_dim 2.0 +1021 18 model.scoring_fct_norm 2.0 +1021 18 optimizer.lr 0.0367603999982085 +1021 18 training.batch_size 0.0 +1021 18 training.label_smoothing 0.17815773742845648 +1021 19 model.embedding_dim 1.0 +1021 19 model.scoring_fct_norm 2.0 +1021 19 optimizer.lr 0.007909787426898516 +1021 19 training.batch_size 0.0 +1021 19 training.label_smoothing 0.009046154221111671 +1021 20 model.embedding_dim 2.0 +1021 20 model.scoring_fct_norm 1.0 +1021 20 optimizer.lr 0.0010298948422173193 +1021 20 training.batch_size 2.0 +1021 20 training.label_smoothing 0.26565159024038076 +1021 21 model.embedding_dim 2.0 +1021 21 model.scoring_fct_norm 1.0 +1021 21 optimizer.lr 0.0012036184705566581 +1021 21 training.batch_size 0.0 +1021 21 training.label_smoothing 0.006824641027171707 +1021 22 model.embedding_dim 0.0 +1021 22 model.scoring_fct_norm 1.0 +1021 22 optimizer.lr 0.04441691920255533 +1021 22 training.batch_size 2.0 +1021 22 training.label_smoothing 0.020961969862190732 +1021 23 model.embedding_dim 0.0 +1021 23 model.scoring_fct_norm 2.0 +1021 23 optimizer.lr 0.04408412942586999 +1021 23 training.batch_size 0.0 +1021 23 training.label_smoothing 0.01791573115453107 +1021 24 model.embedding_dim 1.0 +1021 24 model.scoring_fct_norm 1.0 +1021 24 optimizer.lr 0.04549220889061384 +1021 24 training.batch_size 2.0 +1021 24 training.label_smoothing 0.21878229966435067 +1021 25 model.embedding_dim 2.0 +1021 25 model.scoring_fct_norm 1.0 +1021 25 optimizer.lr 0.00180181092496723 +1021 25 training.batch_size 0.0 +1021 25 training.label_smoothing 0.04373716680282169 +1021 26 model.embedding_dim 0.0 +1021 26 model.scoring_fct_norm 1.0 +1021 26 optimizer.lr 0.021544024490754606 +1021 26 training.batch_size 0.0 +1021 26 training.label_smoothing 0.49057215118950004 +1021 27 model.embedding_dim 1.0 +1021 27 model.scoring_fct_norm 1.0 +1021 27 optimizer.lr 0.00509091046040696 +1021 27 training.batch_size 2.0 +1021 27 training.label_smoothing 0.021836253210394865 +1021 28 model.embedding_dim 1.0 +1021 28 model.scoring_fct_norm 1.0 +1021 28 optimizer.lr 0.022750381668869963 +1021 28 training.batch_size 2.0 +1021 28 training.label_smoothing 0.007462696600281208 +1021 29 model.embedding_dim 1.0 +1021 29 model.scoring_fct_norm 1.0 +1021 29 optimizer.lr 0.005732474849865231 +1021 29 training.batch_size 1.0 +1021 29 training.label_smoothing 0.006817848315122347 +1021 30 model.embedding_dim 0.0 +1021 30 model.scoring_fct_norm 2.0 +1021 30 optimizer.lr 0.011691080155009966 +1021 30 training.batch_size 0.0 +1021 30 training.label_smoothing 0.09061580942305977 +1021 31 model.embedding_dim 1.0 +1021 31 model.scoring_fct_norm 1.0 +1021 31 optimizer.lr 0.035545868262992844 +1021 31 training.batch_size 2.0 +1021 31 training.label_smoothing 0.5732302120415551 +1021 32 model.embedding_dim 1.0 +1021 32 model.scoring_fct_norm 2.0 +1021 32 optimizer.lr 0.0010043457124668022 +1021 32 training.batch_size 2.0 +1021 32 training.label_smoothing 0.01988518445972188 +1021 33 model.embedding_dim 1.0 +1021 33 model.scoring_fct_norm 2.0 +1021 33 optimizer.lr 0.02278771099205213 +1021 33 training.batch_size 1.0 +1021 33 training.label_smoothing 0.05740838270852856 +1021 34 model.embedding_dim 1.0 +1021 34 model.scoring_fct_norm 1.0 +1021 34 optimizer.lr 0.028485600159380967 +1021 34 training.batch_size 2.0 +1021 34 training.label_smoothing 0.003134008832199199 +1021 35 model.embedding_dim 2.0 +1021 35 model.scoring_fct_norm 2.0 +1021 35 optimizer.lr 0.0033121368356670857 +1021 35 training.batch_size 2.0 +1021 35 training.label_smoothing 0.029750565738650203 +1021 36 model.embedding_dim 1.0 +1021 36 model.scoring_fct_norm 1.0 +1021 36 optimizer.lr 0.011353361465854936 +1021 36 training.batch_size 2.0 +1021 36 training.label_smoothing 0.006402195154673966 +1021 37 model.embedding_dim 2.0 +1021 37 model.scoring_fct_norm 1.0 +1021 37 optimizer.lr 0.08935800535570525 +1021 37 training.batch_size 1.0 +1021 37 training.label_smoothing 0.012386202177468109 +1021 38 model.embedding_dim 0.0 +1021 38 model.scoring_fct_norm 2.0 +1021 38 optimizer.lr 0.0032078072538601096 +1021 38 training.batch_size 0.0 +1021 38 training.label_smoothing 0.3777685426131034 +1021 39 model.embedding_dim 1.0 +1021 39 model.scoring_fct_norm 1.0 +1021 39 optimizer.lr 0.015229739501611524 +1021 39 training.batch_size 0.0 +1021 39 training.label_smoothing 0.02736618385235834 +1021 40 model.embedding_dim 0.0 +1021 40 model.scoring_fct_norm 2.0 +1021 40 optimizer.lr 0.026397917748525172 +1021 40 training.batch_size 2.0 +1021 40 training.label_smoothing 0.045596173751819125 +1021 41 model.embedding_dim 1.0 +1021 41 model.scoring_fct_norm 2.0 +1021 41 optimizer.lr 0.005136416048990154 +1021 41 training.batch_size 1.0 +1021 41 training.label_smoothing 0.006531374038694103 +1021 42 model.embedding_dim 1.0 +1021 42 model.scoring_fct_norm 2.0 +1021 42 optimizer.lr 0.0015722453450179269 +1021 42 training.batch_size 2.0 +1021 42 training.label_smoothing 0.0018255803122761815 +1021 43 model.embedding_dim 1.0 +1021 43 model.scoring_fct_norm 1.0 +1021 43 optimizer.lr 0.06477901476400857 +1021 43 training.batch_size 2.0 +1021 43 training.label_smoothing 0.10597335784011251 +1021 44 model.embedding_dim 2.0 +1021 44 model.scoring_fct_norm 1.0 +1021 44 optimizer.lr 0.062075976089027575 +1021 44 training.batch_size 1.0 +1021 44 training.label_smoothing 0.7567216339811657 +1021 45 model.embedding_dim 2.0 +1021 45 model.scoring_fct_norm 2.0 +1021 45 optimizer.lr 0.0039930833764490765 +1021 45 training.batch_size 2.0 +1021 45 training.label_smoothing 0.3333435283873097 +1021 46 model.embedding_dim 0.0 +1021 46 model.scoring_fct_norm 1.0 +1021 46 optimizer.lr 0.0010094385082810246 +1021 46 training.batch_size 2.0 +1021 46 training.label_smoothing 0.035406535487466924 +1021 47 model.embedding_dim 2.0 +1021 47 model.scoring_fct_norm 2.0 +1021 47 optimizer.lr 0.00496439006938322 +1021 47 training.batch_size 1.0 +1021 47 training.label_smoothing 0.10370821603975931 +1021 48 model.embedding_dim 1.0 +1021 48 model.scoring_fct_norm 1.0 +1021 48 optimizer.lr 0.006054880690150248 +1021 48 training.batch_size 2.0 +1021 48 training.label_smoothing 0.23209982083980873 +1021 49 model.embedding_dim 1.0 +1021 49 model.scoring_fct_norm 2.0 +1021 49 optimizer.lr 0.003975791357345545 +1021 49 training.batch_size 1.0 +1021 49 training.label_smoothing 0.0013746986180089905 +1021 50 model.embedding_dim 2.0 +1021 50 model.scoring_fct_norm 2.0 +1021 50 optimizer.lr 0.0018978290588599206 +1021 50 training.batch_size 1.0 +1021 50 training.label_smoothing 0.02939679366797847 +1021 51 model.embedding_dim 2.0 +1021 51 model.scoring_fct_norm 2.0 +1021 51 optimizer.lr 0.005902665315910749 +1021 51 training.batch_size 2.0 +1021 51 training.label_smoothing 0.006327659985849599 +1021 52 model.embedding_dim 0.0 +1021 52 model.scoring_fct_norm 1.0 +1021 52 optimizer.lr 0.001051199871292944 +1021 52 training.batch_size 1.0 +1021 52 training.label_smoothing 0.00813978375997105 +1021 53 model.embedding_dim 2.0 +1021 53 model.scoring_fct_norm 1.0 +1021 53 optimizer.lr 0.0419349343493908 +1021 53 training.batch_size 2.0 +1021 53 training.label_smoothing 0.49585816369652186 +1021 54 model.embedding_dim 2.0 +1021 54 model.scoring_fct_norm 2.0 +1021 54 optimizer.lr 0.0011171550915730476 +1021 54 training.batch_size 1.0 +1021 54 training.label_smoothing 0.6621931752109532 +1021 55 model.embedding_dim 1.0 +1021 55 model.scoring_fct_norm 1.0 +1021 55 optimizer.lr 0.014845779098716276 +1021 55 training.batch_size 1.0 +1021 55 training.label_smoothing 0.05104094654785997 +1021 56 model.embedding_dim 2.0 +1021 56 model.scoring_fct_norm 1.0 +1021 56 optimizer.lr 0.0025704610364331265 +1021 56 training.batch_size 0.0 +1021 56 training.label_smoothing 0.0017175082983111821 +1021 57 model.embedding_dim 1.0 +1021 57 model.scoring_fct_norm 1.0 +1021 57 optimizer.lr 0.004121602155626126 +1021 57 training.batch_size 2.0 +1021 57 training.label_smoothing 0.012639678200703988 +1021 58 model.embedding_dim 2.0 +1021 58 model.scoring_fct_norm 2.0 +1021 58 optimizer.lr 0.07452377422402326 +1021 58 training.batch_size 2.0 +1021 58 training.label_smoothing 0.005294255193916698 +1021 59 model.embedding_dim 0.0 +1021 59 model.scoring_fct_norm 1.0 +1021 59 optimizer.lr 0.005606344665103449 +1021 59 training.batch_size 1.0 +1021 59 training.label_smoothing 0.0010291723083370474 +1021 60 model.embedding_dim 0.0 +1021 60 model.scoring_fct_norm 2.0 +1021 60 optimizer.lr 0.00645274075990997 +1021 60 training.batch_size 0.0 +1021 60 training.label_smoothing 0.6102034504743789 +1021 61 model.embedding_dim 1.0 +1021 61 model.scoring_fct_norm 1.0 +1021 61 optimizer.lr 0.001512870711879567 +1021 61 training.batch_size 1.0 +1021 61 training.label_smoothing 0.008725100118268618 +1021 62 model.embedding_dim 2.0 +1021 62 model.scoring_fct_norm 2.0 +1021 62 optimizer.lr 0.008613407257327467 +1021 62 training.batch_size 2.0 +1021 62 training.label_smoothing 0.35456960892616285 +1021 63 model.embedding_dim 0.0 +1021 63 model.scoring_fct_norm 2.0 +1021 63 optimizer.lr 0.036905955023149896 +1021 63 training.batch_size 1.0 +1021 63 training.label_smoothing 0.021041031590496213 +1021 64 model.embedding_dim 1.0 +1021 64 model.scoring_fct_norm 1.0 +1021 64 optimizer.lr 0.044857037787108194 +1021 64 training.batch_size 0.0 +1021 64 training.label_smoothing 0.004143739870852154 +1021 65 model.embedding_dim 0.0 +1021 65 model.scoring_fct_norm 1.0 +1021 65 optimizer.lr 0.001056351093443448 +1021 65 training.batch_size 2.0 +1021 65 training.label_smoothing 0.01388096386215094 +1021 66 model.embedding_dim 2.0 +1021 66 model.scoring_fct_norm 2.0 +1021 66 optimizer.lr 0.0032476197584142114 +1021 66 training.batch_size 0.0 +1021 66 training.label_smoothing 0.03468476544716486 +1021 67 model.embedding_dim 1.0 +1021 67 model.scoring_fct_norm 2.0 +1021 67 optimizer.lr 0.09798192207905827 +1021 67 training.batch_size 1.0 +1021 67 training.label_smoothing 0.1322417491904317 +1021 68 model.embedding_dim 2.0 +1021 68 model.scoring_fct_norm 1.0 +1021 68 optimizer.lr 0.001474273822730663 +1021 68 training.batch_size 0.0 +1021 68 training.label_smoothing 0.13431177822691343 +1021 69 model.embedding_dim 0.0 +1021 69 model.scoring_fct_norm 2.0 +1021 69 optimizer.lr 0.003923024910168368 +1021 69 training.batch_size 2.0 +1021 69 training.label_smoothing 0.002018751402636759 +1021 70 model.embedding_dim 0.0 +1021 70 model.scoring_fct_norm 1.0 +1021 70 optimizer.lr 0.0056015882161615784 +1021 70 training.batch_size 1.0 +1021 70 training.label_smoothing 0.00635463781538719 +1021 71 model.embedding_dim 2.0 +1021 71 model.scoring_fct_norm 1.0 +1021 71 optimizer.lr 0.007038273935291227 +1021 71 training.batch_size 2.0 +1021 71 training.label_smoothing 0.0025364689282695494 +1021 72 model.embedding_dim 0.0 +1021 72 model.scoring_fct_norm 1.0 +1021 72 optimizer.lr 0.08758987220926036 +1021 72 training.batch_size 2.0 +1021 72 training.label_smoothing 0.0012696363177965193 +1021 73 model.embedding_dim 1.0 +1021 73 model.scoring_fct_norm 1.0 +1021 73 optimizer.lr 0.0017914345780883636 +1021 73 training.batch_size 2.0 +1021 73 training.label_smoothing 0.006273581336863011 +1021 74 model.embedding_dim 2.0 +1021 74 model.scoring_fct_norm 2.0 +1021 74 optimizer.lr 0.05475591380615417 +1021 74 training.batch_size 2.0 +1021 74 training.label_smoothing 0.0018715793275663683 +1021 75 model.embedding_dim 1.0 +1021 75 model.scoring_fct_norm 2.0 +1021 75 optimizer.lr 0.0028618228237670294 +1021 75 training.batch_size 1.0 +1021 75 training.label_smoothing 0.1374510835198105 +1021 76 model.embedding_dim 2.0 +1021 76 model.scoring_fct_norm 1.0 +1021 76 optimizer.lr 0.0028765096148680737 +1021 76 training.batch_size 0.0 +1021 76 training.label_smoothing 0.0016225624557517097 +1021 77 model.embedding_dim 0.0 +1021 77 model.scoring_fct_norm 1.0 +1021 77 optimizer.lr 0.022350922067623603 +1021 77 training.batch_size 0.0 +1021 77 training.label_smoothing 0.07523262600608117 +1021 78 model.embedding_dim 0.0 +1021 78 model.scoring_fct_norm 2.0 +1021 78 optimizer.lr 0.012378690395186024 +1021 78 training.batch_size 2.0 +1021 78 training.label_smoothing 0.3408116898475586 +1021 79 model.embedding_dim 0.0 +1021 79 model.scoring_fct_norm 1.0 +1021 79 optimizer.lr 0.0017903086683214952 +1021 79 training.batch_size 0.0 +1021 79 training.label_smoothing 0.7500567738637212 +1021 80 model.embedding_dim 1.0 +1021 80 model.scoring_fct_norm 2.0 +1021 80 optimizer.lr 0.0013878419106494167 +1021 80 training.batch_size 0.0 +1021 80 training.label_smoothing 0.14464901410095543 +1021 81 model.embedding_dim 1.0 +1021 81 model.scoring_fct_norm 2.0 +1021 81 optimizer.lr 0.015024970470579501 +1021 81 training.batch_size 0.0 +1021 81 training.label_smoothing 0.8533605441260584 +1021 82 model.embedding_dim 0.0 +1021 82 model.scoring_fct_norm 1.0 +1021 82 optimizer.lr 0.006929255933459761 +1021 82 training.batch_size 1.0 +1021 82 training.label_smoothing 0.014096150055155715 +1021 83 model.embedding_dim 1.0 +1021 83 model.scoring_fct_norm 2.0 +1021 83 optimizer.lr 0.026341048289803052 +1021 83 training.batch_size 1.0 +1021 83 training.label_smoothing 0.47345919809659703 +1021 84 model.embedding_dim 2.0 +1021 84 model.scoring_fct_norm 2.0 +1021 84 optimizer.lr 0.002182852003337756 +1021 84 training.batch_size 2.0 +1021 84 training.label_smoothing 0.008716170685287836 +1021 85 model.embedding_dim 0.0 +1021 85 model.scoring_fct_norm 1.0 +1021 85 optimizer.lr 0.004566745315776318 +1021 85 training.batch_size 2.0 +1021 85 training.label_smoothing 0.02642965237582784 +1021 86 model.embedding_dim 1.0 +1021 86 model.scoring_fct_norm 2.0 +1021 86 optimizer.lr 0.0035851612234507777 +1021 86 training.batch_size 1.0 +1021 86 training.label_smoothing 0.04451314155799292 +1021 87 model.embedding_dim 1.0 +1021 87 model.scoring_fct_norm 2.0 +1021 87 optimizer.lr 0.005798394285103929 +1021 87 training.batch_size 1.0 +1021 87 training.label_smoothing 0.004729960229136708 +1021 88 model.embedding_dim 1.0 +1021 88 model.scoring_fct_norm 1.0 +1021 88 optimizer.lr 0.0557209125679317 +1021 88 training.batch_size 2.0 +1021 88 training.label_smoothing 0.4825406587567529 +1021 89 model.embedding_dim 1.0 +1021 89 model.scoring_fct_norm 2.0 +1021 89 optimizer.lr 0.003476215394026768 +1021 89 training.batch_size 0.0 +1021 89 training.label_smoothing 0.0061542419550351165 +1021 90 model.embedding_dim 2.0 +1021 90 model.scoring_fct_norm 1.0 +1021 90 optimizer.lr 0.0028561879658766027 +1021 90 training.batch_size 1.0 +1021 90 training.label_smoothing 0.020643366763121014 +1021 91 model.embedding_dim 0.0 +1021 91 model.scoring_fct_norm 1.0 +1021 91 optimizer.lr 0.0029693904101228547 +1021 91 training.batch_size 1.0 +1021 91 training.label_smoothing 0.008320282570777894 +1021 92 model.embedding_dim 1.0 +1021 92 model.scoring_fct_norm 1.0 +1021 92 optimizer.lr 0.011062604696431068 +1021 92 training.batch_size 1.0 +1021 92 training.label_smoothing 0.4573523488763063 +1021 93 model.embedding_dim 2.0 +1021 93 model.scoring_fct_norm 1.0 +1021 93 optimizer.lr 0.06758724323476802 +1021 93 training.batch_size 2.0 +1021 93 training.label_smoothing 0.01715247342060591 +1021 94 model.embedding_dim 0.0 +1021 94 model.scoring_fct_norm 1.0 +1021 94 optimizer.lr 0.060005202758103254 +1021 94 training.batch_size 0.0 +1021 94 training.label_smoothing 0.0022166442092360406 +1021 95 model.embedding_dim 1.0 +1021 95 model.scoring_fct_norm 1.0 +1021 95 optimizer.lr 0.002622426342469742 +1021 95 training.batch_size 2.0 +1021 95 training.label_smoothing 0.049046075474018805 +1021 96 model.embedding_dim 2.0 +1021 96 model.scoring_fct_norm 2.0 +1021 96 optimizer.lr 0.029119191004725313 +1021 96 training.batch_size 1.0 +1021 96 training.label_smoothing 0.5829785613511109 +1021 97 model.embedding_dim 2.0 +1021 97 model.scoring_fct_norm 1.0 +1021 97 optimizer.lr 0.005671272475886127 +1021 97 training.batch_size 1.0 +1021 97 training.label_smoothing 0.09370223494825274 +1021 98 model.embedding_dim 2.0 +1021 98 model.scoring_fct_norm 1.0 +1021 98 optimizer.lr 0.00818785488234532 +1021 98 training.batch_size 0.0 +1021 98 training.label_smoothing 0.002684345532341812 +1021 99 model.embedding_dim 2.0 +1021 99 model.scoring_fct_norm 2.0 +1021 99 optimizer.lr 0.0049577227567073655 +1021 99 training.batch_size 1.0 +1021 99 training.label_smoothing 0.05083139590526741 +1021 100 model.embedding_dim 0.0 +1021 100 model.scoring_fct_norm 2.0 +1021 100 optimizer.lr 0.0012173026318207103 +1021 100 training.batch_size 1.0 +1021 100 training.label_smoothing 0.0013475626004724687 +1021 1 dataset """kinships""" +1021 1 model """unstructuredmodel""" +1021 1 loss """softplus""" +1021 1 regularizer """no""" +1021 1 optimizer """adam""" +1021 1 training_loop """lcwa""" +1021 1 evaluator """rankbased""" +1021 2 dataset """kinships""" +1021 2 model """unstructuredmodel""" +1021 2 loss """softplus""" +1021 2 regularizer """no""" +1021 2 optimizer """adam""" +1021 2 training_loop """lcwa""" +1021 2 evaluator """rankbased""" +1021 3 dataset """kinships""" +1021 3 model """unstructuredmodel""" +1021 3 loss """softplus""" +1021 3 regularizer """no""" +1021 3 optimizer """adam""" +1021 3 training_loop """lcwa""" +1021 3 evaluator """rankbased""" +1021 4 dataset """kinships""" +1021 4 model """unstructuredmodel""" +1021 4 loss """softplus""" +1021 4 regularizer """no""" +1021 4 optimizer """adam""" +1021 4 training_loop """lcwa""" +1021 4 evaluator """rankbased""" +1021 5 dataset """kinships""" +1021 5 model """unstructuredmodel""" +1021 5 loss """softplus""" +1021 5 regularizer """no""" +1021 5 optimizer """adam""" +1021 5 training_loop """lcwa""" +1021 5 evaluator """rankbased""" +1021 6 dataset """kinships""" +1021 6 model """unstructuredmodel""" +1021 6 loss """softplus""" +1021 6 regularizer """no""" +1021 6 optimizer """adam""" +1021 6 training_loop """lcwa""" +1021 6 evaluator """rankbased""" +1021 7 dataset """kinships""" +1021 7 model """unstructuredmodel""" +1021 7 loss """softplus""" +1021 7 regularizer """no""" +1021 7 optimizer """adam""" +1021 7 training_loop """lcwa""" +1021 7 evaluator """rankbased""" +1021 8 dataset """kinships""" +1021 8 model """unstructuredmodel""" +1021 8 loss """softplus""" +1021 8 regularizer """no""" +1021 8 optimizer """adam""" +1021 8 training_loop """lcwa""" +1021 8 evaluator """rankbased""" +1021 9 dataset """kinships""" +1021 9 model """unstructuredmodel""" +1021 9 loss """softplus""" +1021 9 regularizer """no""" +1021 9 optimizer """adam""" +1021 9 training_loop """lcwa""" +1021 9 evaluator """rankbased""" +1021 10 dataset """kinships""" +1021 10 model """unstructuredmodel""" +1021 10 loss """softplus""" +1021 10 regularizer """no""" +1021 10 optimizer """adam""" +1021 10 training_loop """lcwa""" +1021 10 evaluator """rankbased""" +1021 11 dataset """kinships""" +1021 11 model """unstructuredmodel""" +1021 11 loss """softplus""" +1021 11 regularizer """no""" +1021 11 optimizer """adam""" +1021 11 training_loop """lcwa""" +1021 11 evaluator """rankbased""" +1021 12 dataset """kinships""" +1021 12 model """unstructuredmodel""" +1021 12 loss """softplus""" +1021 12 regularizer """no""" +1021 12 optimizer """adam""" +1021 12 training_loop """lcwa""" +1021 12 evaluator """rankbased""" +1021 13 dataset """kinships""" +1021 13 model """unstructuredmodel""" +1021 13 loss """softplus""" +1021 13 regularizer """no""" +1021 13 optimizer """adam""" +1021 13 training_loop """lcwa""" +1021 13 evaluator """rankbased""" +1021 14 dataset """kinships""" +1021 14 model """unstructuredmodel""" +1021 14 loss """softplus""" +1021 14 regularizer """no""" +1021 14 optimizer """adam""" +1021 14 training_loop """lcwa""" +1021 14 evaluator """rankbased""" +1021 15 dataset """kinships""" +1021 15 model """unstructuredmodel""" +1021 15 loss """softplus""" +1021 15 regularizer """no""" +1021 15 optimizer """adam""" +1021 15 training_loop """lcwa""" +1021 15 evaluator """rankbased""" +1021 16 dataset """kinships""" +1021 16 model """unstructuredmodel""" +1021 16 loss """softplus""" +1021 16 regularizer """no""" +1021 16 optimizer """adam""" +1021 16 training_loop """lcwa""" +1021 16 evaluator """rankbased""" +1021 17 dataset """kinships""" +1021 17 model """unstructuredmodel""" +1021 17 loss """softplus""" +1021 17 regularizer """no""" +1021 17 optimizer """adam""" +1021 17 training_loop """lcwa""" +1021 17 evaluator """rankbased""" +1021 18 dataset """kinships""" +1021 18 model """unstructuredmodel""" +1021 18 loss """softplus""" +1021 18 regularizer """no""" +1021 18 optimizer """adam""" +1021 18 training_loop """lcwa""" +1021 18 evaluator """rankbased""" +1021 19 dataset """kinships""" +1021 19 model """unstructuredmodel""" +1021 19 loss """softplus""" +1021 19 regularizer """no""" +1021 19 optimizer """adam""" +1021 19 training_loop """lcwa""" +1021 19 evaluator """rankbased""" +1021 20 dataset """kinships""" +1021 20 model """unstructuredmodel""" +1021 20 loss """softplus""" +1021 20 regularizer """no""" +1021 20 optimizer """adam""" +1021 20 training_loop """lcwa""" +1021 20 evaluator """rankbased""" +1021 21 dataset """kinships""" +1021 21 model """unstructuredmodel""" +1021 21 loss """softplus""" +1021 21 regularizer """no""" +1021 21 optimizer """adam""" +1021 21 training_loop """lcwa""" +1021 21 evaluator """rankbased""" +1021 22 dataset """kinships""" +1021 22 model """unstructuredmodel""" +1021 22 loss """softplus""" +1021 22 regularizer """no""" +1021 22 optimizer """adam""" +1021 22 training_loop """lcwa""" +1021 22 evaluator """rankbased""" +1021 23 dataset """kinships""" +1021 23 model """unstructuredmodel""" +1021 23 loss """softplus""" +1021 23 regularizer """no""" +1021 23 optimizer """adam""" +1021 23 training_loop """lcwa""" +1021 23 evaluator """rankbased""" +1021 24 dataset """kinships""" +1021 24 model """unstructuredmodel""" +1021 24 loss """softplus""" +1021 24 regularizer """no""" +1021 24 optimizer """adam""" +1021 24 training_loop """lcwa""" +1021 24 evaluator """rankbased""" +1021 25 dataset """kinships""" +1021 25 model """unstructuredmodel""" +1021 25 loss """softplus""" +1021 25 regularizer """no""" +1021 25 optimizer """adam""" +1021 25 training_loop """lcwa""" +1021 25 evaluator """rankbased""" +1021 26 dataset """kinships""" +1021 26 model """unstructuredmodel""" +1021 26 loss """softplus""" +1021 26 regularizer """no""" +1021 26 optimizer """adam""" +1021 26 training_loop """lcwa""" +1021 26 evaluator """rankbased""" +1021 27 dataset """kinships""" +1021 27 model """unstructuredmodel""" +1021 27 loss """softplus""" +1021 27 regularizer """no""" +1021 27 optimizer """adam""" +1021 27 training_loop """lcwa""" +1021 27 evaluator """rankbased""" +1021 28 dataset """kinships""" +1021 28 model """unstructuredmodel""" +1021 28 loss """softplus""" +1021 28 regularizer """no""" +1021 28 optimizer """adam""" +1021 28 training_loop """lcwa""" +1021 28 evaluator """rankbased""" +1021 29 dataset """kinships""" +1021 29 model """unstructuredmodel""" +1021 29 loss """softplus""" +1021 29 regularizer """no""" +1021 29 optimizer """adam""" +1021 29 training_loop """lcwa""" +1021 29 evaluator """rankbased""" +1021 30 dataset """kinships""" +1021 30 model """unstructuredmodel""" +1021 30 loss """softplus""" +1021 30 regularizer """no""" +1021 30 optimizer """adam""" +1021 30 training_loop """lcwa""" +1021 30 evaluator """rankbased""" +1021 31 dataset """kinships""" +1021 31 model """unstructuredmodel""" +1021 31 loss """softplus""" +1021 31 regularizer """no""" +1021 31 optimizer """adam""" +1021 31 training_loop """lcwa""" +1021 31 evaluator """rankbased""" +1021 32 dataset """kinships""" +1021 32 model """unstructuredmodel""" +1021 32 loss """softplus""" +1021 32 regularizer """no""" +1021 32 optimizer """adam""" +1021 32 training_loop """lcwa""" +1021 32 evaluator """rankbased""" +1021 33 dataset """kinships""" +1021 33 model """unstructuredmodel""" +1021 33 loss """softplus""" +1021 33 regularizer """no""" +1021 33 optimizer """adam""" +1021 33 training_loop """lcwa""" +1021 33 evaluator """rankbased""" +1021 34 dataset """kinships""" +1021 34 model """unstructuredmodel""" +1021 34 loss """softplus""" +1021 34 regularizer """no""" +1021 34 optimizer """adam""" +1021 34 training_loop """lcwa""" +1021 34 evaluator """rankbased""" +1021 35 dataset """kinships""" +1021 35 model """unstructuredmodel""" +1021 35 loss """softplus""" +1021 35 regularizer """no""" +1021 35 optimizer """adam""" +1021 35 training_loop """lcwa""" +1021 35 evaluator """rankbased""" +1021 36 dataset """kinships""" +1021 36 model """unstructuredmodel""" +1021 36 loss """softplus""" +1021 36 regularizer """no""" +1021 36 optimizer """adam""" +1021 36 training_loop """lcwa""" +1021 36 evaluator """rankbased""" +1021 37 dataset """kinships""" +1021 37 model """unstructuredmodel""" +1021 37 loss """softplus""" +1021 37 regularizer """no""" +1021 37 optimizer """adam""" +1021 37 training_loop """lcwa""" +1021 37 evaluator """rankbased""" +1021 38 dataset """kinships""" +1021 38 model """unstructuredmodel""" +1021 38 loss """softplus""" +1021 38 regularizer """no""" +1021 38 optimizer """adam""" +1021 38 training_loop """lcwa""" +1021 38 evaluator """rankbased""" +1021 39 dataset """kinships""" +1021 39 model """unstructuredmodel""" +1021 39 loss """softplus""" +1021 39 regularizer """no""" +1021 39 optimizer """adam""" +1021 39 training_loop """lcwa""" +1021 39 evaluator """rankbased""" +1021 40 dataset """kinships""" +1021 40 model """unstructuredmodel""" +1021 40 loss """softplus""" +1021 40 regularizer """no""" +1021 40 optimizer """adam""" +1021 40 training_loop """lcwa""" +1021 40 evaluator """rankbased""" +1021 41 dataset """kinships""" +1021 41 model """unstructuredmodel""" +1021 41 loss """softplus""" +1021 41 regularizer """no""" +1021 41 optimizer """adam""" +1021 41 training_loop """lcwa""" +1021 41 evaluator """rankbased""" +1021 42 dataset """kinships""" +1021 42 model """unstructuredmodel""" +1021 42 loss """softplus""" +1021 42 regularizer """no""" +1021 42 optimizer """adam""" +1021 42 training_loop """lcwa""" +1021 42 evaluator """rankbased""" +1021 43 dataset """kinships""" +1021 43 model """unstructuredmodel""" +1021 43 loss """softplus""" +1021 43 regularizer """no""" +1021 43 optimizer """adam""" +1021 43 training_loop """lcwa""" +1021 43 evaluator """rankbased""" +1021 44 dataset """kinships""" +1021 44 model """unstructuredmodel""" +1021 44 loss """softplus""" +1021 44 regularizer """no""" +1021 44 optimizer """adam""" +1021 44 training_loop """lcwa""" +1021 44 evaluator """rankbased""" +1021 45 dataset """kinships""" +1021 45 model """unstructuredmodel""" +1021 45 loss """softplus""" +1021 45 regularizer """no""" +1021 45 optimizer """adam""" +1021 45 training_loop """lcwa""" +1021 45 evaluator """rankbased""" +1021 46 dataset """kinships""" +1021 46 model """unstructuredmodel""" +1021 46 loss """softplus""" +1021 46 regularizer """no""" +1021 46 optimizer """adam""" +1021 46 training_loop """lcwa""" +1021 46 evaluator """rankbased""" +1021 47 dataset """kinships""" +1021 47 model """unstructuredmodel""" +1021 47 loss """softplus""" +1021 47 regularizer """no""" +1021 47 optimizer """adam""" +1021 47 training_loop """lcwa""" +1021 47 evaluator """rankbased""" +1021 48 dataset """kinships""" +1021 48 model """unstructuredmodel""" +1021 48 loss """softplus""" +1021 48 regularizer """no""" +1021 48 optimizer """adam""" +1021 48 training_loop """lcwa""" +1021 48 evaluator """rankbased""" +1021 49 dataset """kinships""" +1021 49 model """unstructuredmodel""" +1021 49 loss """softplus""" +1021 49 regularizer """no""" +1021 49 optimizer """adam""" +1021 49 training_loop """lcwa""" +1021 49 evaluator """rankbased""" +1021 50 dataset """kinships""" +1021 50 model """unstructuredmodel""" +1021 50 loss """softplus""" +1021 50 regularizer """no""" +1021 50 optimizer """adam""" +1021 50 training_loop """lcwa""" +1021 50 evaluator """rankbased""" +1021 51 dataset """kinships""" +1021 51 model """unstructuredmodel""" +1021 51 loss """softplus""" +1021 51 regularizer """no""" +1021 51 optimizer """adam""" +1021 51 training_loop """lcwa""" +1021 51 evaluator """rankbased""" +1021 52 dataset """kinships""" +1021 52 model """unstructuredmodel""" +1021 52 loss """softplus""" +1021 52 regularizer """no""" +1021 52 optimizer """adam""" +1021 52 training_loop """lcwa""" +1021 52 evaluator """rankbased""" +1021 53 dataset """kinships""" +1021 53 model """unstructuredmodel""" +1021 53 loss """softplus""" +1021 53 regularizer """no""" +1021 53 optimizer """adam""" +1021 53 training_loop """lcwa""" +1021 53 evaluator """rankbased""" +1021 54 dataset """kinships""" +1021 54 model """unstructuredmodel""" +1021 54 loss """softplus""" +1021 54 regularizer """no""" +1021 54 optimizer """adam""" +1021 54 training_loop """lcwa""" +1021 54 evaluator """rankbased""" +1021 55 dataset """kinships""" +1021 55 model """unstructuredmodel""" +1021 55 loss """softplus""" +1021 55 regularizer """no""" +1021 55 optimizer """adam""" +1021 55 training_loop """lcwa""" +1021 55 evaluator """rankbased""" +1021 56 dataset """kinships""" +1021 56 model """unstructuredmodel""" +1021 56 loss """softplus""" +1021 56 regularizer """no""" +1021 56 optimizer """adam""" +1021 56 training_loop """lcwa""" +1021 56 evaluator """rankbased""" +1021 57 dataset """kinships""" +1021 57 model """unstructuredmodel""" +1021 57 loss """softplus""" +1021 57 regularizer """no""" +1021 57 optimizer """adam""" +1021 57 training_loop """lcwa""" +1021 57 evaluator """rankbased""" +1021 58 dataset """kinships""" +1021 58 model """unstructuredmodel""" +1021 58 loss """softplus""" +1021 58 regularizer """no""" +1021 58 optimizer """adam""" +1021 58 training_loop """lcwa""" +1021 58 evaluator """rankbased""" +1021 59 dataset """kinships""" +1021 59 model """unstructuredmodel""" +1021 59 loss """softplus""" +1021 59 regularizer """no""" +1021 59 optimizer """adam""" +1021 59 training_loop """lcwa""" +1021 59 evaluator """rankbased""" +1021 60 dataset """kinships""" +1021 60 model """unstructuredmodel""" +1021 60 loss """softplus""" +1021 60 regularizer """no""" +1021 60 optimizer """adam""" +1021 60 training_loop """lcwa""" +1021 60 evaluator """rankbased""" +1021 61 dataset """kinships""" +1021 61 model """unstructuredmodel""" +1021 61 loss """softplus""" +1021 61 regularizer """no""" +1021 61 optimizer """adam""" +1021 61 training_loop """lcwa""" +1021 61 evaluator """rankbased""" +1021 62 dataset """kinships""" +1021 62 model """unstructuredmodel""" +1021 62 loss """softplus""" +1021 62 regularizer """no""" +1021 62 optimizer """adam""" +1021 62 training_loop """lcwa""" +1021 62 evaluator """rankbased""" +1021 63 dataset """kinships""" +1021 63 model """unstructuredmodel""" +1021 63 loss """softplus""" +1021 63 regularizer """no""" +1021 63 optimizer """adam""" +1021 63 training_loop """lcwa""" +1021 63 evaluator """rankbased""" +1021 64 dataset """kinships""" +1021 64 model """unstructuredmodel""" +1021 64 loss """softplus""" +1021 64 regularizer """no""" +1021 64 optimizer """adam""" +1021 64 training_loop """lcwa""" +1021 64 evaluator """rankbased""" +1021 65 dataset """kinships""" +1021 65 model """unstructuredmodel""" +1021 65 loss """softplus""" +1021 65 regularizer """no""" +1021 65 optimizer """adam""" +1021 65 training_loop """lcwa""" +1021 65 evaluator """rankbased""" +1021 66 dataset """kinships""" +1021 66 model """unstructuredmodel""" +1021 66 loss """softplus""" +1021 66 regularizer """no""" +1021 66 optimizer """adam""" +1021 66 training_loop """lcwa""" +1021 66 evaluator """rankbased""" +1021 67 dataset """kinships""" +1021 67 model """unstructuredmodel""" +1021 67 loss """softplus""" +1021 67 regularizer """no""" +1021 67 optimizer """adam""" +1021 67 training_loop """lcwa""" +1021 67 evaluator """rankbased""" +1021 68 dataset """kinships""" +1021 68 model """unstructuredmodel""" +1021 68 loss """softplus""" +1021 68 regularizer """no""" +1021 68 optimizer """adam""" +1021 68 training_loop """lcwa""" +1021 68 evaluator """rankbased""" +1021 69 dataset """kinships""" +1021 69 model """unstructuredmodel""" +1021 69 loss """softplus""" +1021 69 regularizer """no""" +1021 69 optimizer """adam""" +1021 69 training_loop """lcwa""" +1021 69 evaluator """rankbased""" +1021 70 dataset """kinships""" +1021 70 model """unstructuredmodel""" +1021 70 loss """softplus""" +1021 70 regularizer """no""" +1021 70 optimizer """adam""" +1021 70 training_loop """lcwa""" +1021 70 evaluator """rankbased""" +1021 71 dataset """kinships""" +1021 71 model """unstructuredmodel""" +1021 71 loss """softplus""" +1021 71 regularizer """no""" +1021 71 optimizer """adam""" +1021 71 training_loop """lcwa""" +1021 71 evaluator """rankbased""" +1021 72 dataset """kinships""" +1021 72 model """unstructuredmodel""" +1021 72 loss """softplus""" +1021 72 regularizer """no""" +1021 72 optimizer """adam""" +1021 72 training_loop """lcwa""" +1021 72 evaluator """rankbased""" +1021 73 dataset """kinships""" +1021 73 model """unstructuredmodel""" +1021 73 loss """softplus""" +1021 73 regularizer """no""" +1021 73 optimizer """adam""" +1021 73 training_loop """lcwa""" +1021 73 evaluator """rankbased""" +1021 74 dataset """kinships""" +1021 74 model """unstructuredmodel""" +1021 74 loss """softplus""" +1021 74 regularizer """no""" +1021 74 optimizer """adam""" +1021 74 training_loop """lcwa""" +1021 74 evaluator """rankbased""" +1021 75 dataset """kinships""" +1021 75 model """unstructuredmodel""" +1021 75 loss """softplus""" +1021 75 regularizer """no""" +1021 75 optimizer """adam""" +1021 75 training_loop """lcwa""" +1021 75 evaluator """rankbased""" +1021 76 dataset """kinships""" +1021 76 model """unstructuredmodel""" +1021 76 loss """softplus""" +1021 76 regularizer """no""" +1021 76 optimizer """adam""" +1021 76 training_loop """lcwa""" +1021 76 evaluator """rankbased""" +1021 77 dataset """kinships""" +1021 77 model """unstructuredmodel""" +1021 77 loss """softplus""" +1021 77 regularizer """no""" +1021 77 optimizer """adam""" +1021 77 training_loop """lcwa""" +1021 77 evaluator """rankbased""" +1021 78 dataset """kinships""" +1021 78 model """unstructuredmodel""" +1021 78 loss """softplus""" +1021 78 regularizer """no""" +1021 78 optimizer """adam""" +1021 78 training_loop """lcwa""" +1021 78 evaluator """rankbased""" +1021 79 dataset """kinships""" +1021 79 model """unstructuredmodel""" +1021 79 loss """softplus""" +1021 79 regularizer """no""" +1021 79 optimizer """adam""" +1021 79 training_loop """lcwa""" +1021 79 evaluator """rankbased""" +1021 80 dataset """kinships""" +1021 80 model """unstructuredmodel""" +1021 80 loss """softplus""" +1021 80 regularizer """no""" +1021 80 optimizer """adam""" +1021 80 training_loop """lcwa""" +1021 80 evaluator """rankbased""" +1021 81 dataset """kinships""" +1021 81 model """unstructuredmodel""" +1021 81 loss """softplus""" +1021 81 regularizer """no""" +1021 81 optimizer """adam""" +1021 81 training_loop """lcwa""" +1021 81 evaluator """rankbased""" +1021 82 dataset """kinships""" +1021 82 model """unstructuredmodel""" +1021 82 loss """softplus""" +1021 82 regularizer """no""" +1021 82 optimizer """adam""" +1021 82 training_loop """lcwa""" +1021 82 evaluator """rankbased""" +1021 83 dataset """kinships""" +1021 83 model """unstructuredmodel""" +1021 83 loss """softplus""" +1021 83 regularizer """no""" +1021 83 optimizer """adam""" +1021 83 training_loop """lcwa""" +1021 83 evaluator """rankbased""" +1021 84 dataset """kinships""" +1021 84 model """unstructuredmodel""" +1021 84 loss """softplus""" +1021 84 regularizer """no""" +1021 84 optimizer """adam""" +1021 84 training_loop """lcwa""" +1021 84 evaluator """rankbased""" +1021 85 dataset """kinships""" +1021 85 model """unstructuredmodel""" +1021 85 loss """softplus""" +1021 85 regularizer """no""" +1021 85 optimizer """adam""" +1021 85 training_loop """lcwa""" +1021 85 evaluator """rankbased""" +1021 86 dataset """kinships""" +1021 86 model """unstructuredmodel""" +1021 86 loss """softplus""" +1021 86 regularizer """no""" +1021 86 optimizer """adam""" +1021 86 training_loop """lcwa""" +1021 86 evaluator """rankbased""" +1021 87 dataset """kinships""" +1021 87 model """unstructuredmodel""" +1021 87 loss """softplus""" +1021 87 regularizer """no""" +1021 87 optimizer """adam""" +1021 87 training_loop """lcwa""" +1021 87 evaluator """rankbased""" +1021 88 dataset """kinships""" +1021 88 model """unstructuredmodel""" +1021 88 loss """softplus""" +1021 88 regularizer """no""" +1021 88 optimizer """adam""" +1021 88 training_loop """lcwa""" +1021 88 evaluator """rankbased""" +1021 89 dataset """kinships""" +1021 89 model """unstructuredmodel""" +1021 89 loss """softplus""" +1021 89 regularizer """no""" +1021 89 optimizer """adam""" +1021 89 training_loop """lcwa""" +1021 89 evaluator """rankbased""" +1021 90 dataset """kinships""" +1021 90 model """unstructuredmodel""" +1021 90 loss """softplus""" +1021 90 regularizer """no""" +1021 90 optimizer """adam""" +1021 90 training_loop """lcwa""" +1021 90 evaluator """rankbased""" +1021 91 dataset """kinships""" +1021 91 model """unstructuredmodel""" +1021 91 loss """softplus""" +1021 91 regularizer """no""" +1021 91 optimizer """adam""" +1021 91 training_loop """lcwa""" +1021 91 evaluator """rankbased""" +1021 92 dataset """kinships""" +1021 92 model """unstructuredmodel""" +1021 92 loss """softplus""" +1021 92 regularizer """no""" +1021 92 optimizer """adam""" +1021 92 training_loop """lcwa""" +1021 92 evaluator """rankbased""" +1021 93 dataset """kinships""" +1021 93 model """unstructuredmodel""" +1021 93 loss """softplus""" +1021 93 regularizer """no""" +1021 93 optimizer """adam""" +1021 93 training_loop """lcwa""" +1021 93 evaluator """rankbased""" +1021 94 dataset """kinships""" +1021 94 model """unstructuredmodel""" +1021 94 loss """softplus""" +1021 94 regularizer """no""" +1021 94 optimizer """adam""" +1021 94 training_loop """lcwa""" +1021 94 evaluator """rankbased""" +1021 95 dataset """kinships""" +1021 95 model """unstructuredmodel""" +1021 95 loss """softplus""" +1021 95 regularizer """no""" +1021 95 optimizer """adam""" +1021 95 training_loop """lcwa""" +1021 95 evaluator """rankbased""" +1021 96 dataset """kinships""" +1021 96 model """unstructuredmodel""" +1021 96 loss """softplus""" +1021 96 regularizer """no""" +1021 96 optimizer """adam""" +1021 96 training_loop """lcwa""" +1021 96 evaluator """rankbased""" +1021 97 dataset """kinships""" +1021 97 model """unstructuredmodel""" +1021 97 loss """softplus""" +1021 97 regularizer """no""" +1021 97 optimizer """adam""" +1021 97 training_loop """lcwa""" +1021 97 evaluator """rankbased""" +1021 98 dataset """kinships""" +1021 98 model """unstructuredmodel""" +1021 98 loss """softplus""" +1021 98 regularizer """no""" +1021 98 optimizer """adam""" +1021 98 training_loop """lcwa""" +1021 98 evaluator """rankbased""" +1021 99 dataset """kinships""" +1021 99 model """unstructuredmodel""" +1021 99 loss """softplus""" +1021 99 regularizer """no""" +1021 99 optimizer """adam""" +1021 99 training_loop """lcwa""" +1021 99 evaluator """rankbased""" +1021 100 dataset """kinships""" +1021 100 model """unstructuredmodel""" +1021 100 loss """softplus""" +1021 100 regularizer """no""" +1021 100 optimizer """adam""" +1021 100 training_loop """lcwa""" +1021 100 evaluator """rankbased""" +1022 1 model.embedding_dim 1.0 +1022 1 model.scoring_fct_norm 2.0 +1022 1 optimizer.lr 0.04666459754377399 +1022 1 training.batch_size 1.0 +1022 1 training.label_smoothing 0.12023325080418165 +1022 2 model.embedding_dim 1.0 +1022 2 model.scoring_fct_norm 1.0 +1022 2 optimizer.lr 0.03269838699140321 +1022 2 training.batch_size 0.0 +1022 2 training.label_smoothing 0.1459024035875679 +1022 3 model.embedding_dim 1.0 +1022 3 model.scoring_fct_norm 1.0 +1022 3 optimizer.lr 0.021554637596669365 +1022 3 training.batch_size 0.0 +1022 3 training.label_smoothing 0.003717234061750332 +1022 4 model.embedding_dim 1.0 +1022 4 model.scoring_fct_norm 1.0 +1022 4 optimizer.lr 0.002990013501991347 +1022 4 training.batch_size 1.0 +1022 4 training.label_smoothing 0.01573257866290313 +1022 5 model.embedding_dim 0.0 +1022 5 model.scoring_fct_norm 2.0 +1022 5 optimizer.lr 0.0010420805921253482 +1022 5 training.batch_size 0.0 +1022 5 training.label_smoothing 0.005195053597018779 +1022 6 model.embedding_dim 1.0 +1022 6 model.scoring_fct_norm 2.0 +1022 6 optimizer.lr 0.06767098303430812 +1022 6 training.batch_size 0.0 +1022 6 training.label_smoothing 0.4390769360726314 +1022 7 model.embedding_dim 0.0 +1022 7 model.scoring_fct_norm 2.0 +1022 7 optimizer.lr 0.08494965929435998 +1022 7 training.batch_size 2.0 +1022 7 training.label_smoothing 0.07720725705224023 +1022 8 model.embedding_dim 2.0 +1022 8 model.scoring_fct_norm 1.0 +1022 8 optimizer.lr 0.0036978315869720703 +1022 8 training.batch_size 2.0 +1022 8 training.label_smoothing 0.0012339745165765006 +1022 9 model.embedding_dim 2.0 +1022 9 model.scoring_fct_norm 2.0 +1022 9 optimizer.lr 0.006068602038819173 +1022 9 training.batch_size 2.0 +1022 9 training.label_smoothing 0.003473707110916187 +1022 10 model.embedding_dim 2.0 +1022 10 model.scoring_fct_norm 1.0 +1022 10 optimizer.lr 0.009163527163046293 +1022 10 training.batch_size 0.0 +1022 10 training.label_smoothing 0.003654056055376327 +1022 11 model.embedding_dim 0.0 +1022 11 model.scoring_fct_norm 1.0 +1022 11 optimizer.lr 0.001275317501442443 +1022 11 training.batch_size 2.0 +1022 11 training.label_smoothing 0.17090259247301012 +1022 12 model.embedding_dim 2.0 +1022 12 model.scoring_fct_norm 2.0 +1022 12 optimizer.lr 0.00495592706810463 +1022 12 training.batch_size 1.0 +1022 12 training.label_smoothing 0.003370532760916581 +1022 13 model.embedding_dim 0.0 +1022 13 model.scoring_fct_norm 1.0 +1022 13 optimizer.lr 0.03450624842024661 +1022 13 training.batch_size 0.0 +1022 13 training.label_smoothing 0.001227949047390518 +1022 14 model.embedding_dim 0.0 +1022 14 model.scoring_fct_norm 2.0 +1022 14 optimizer.lr 0.06254145400717312 +1022 14 training.batch_size 2.0 +1022 14 training.label_smoothing 0.015170493007210933 +1022 15 model.embedding_dim 1.0 +1022 15 model.scoring_fct_norm 2.0 +1022 15 optimizer.lr 0.002269696283954665 +1022 15 training.batch_size 2.0 +1022 15 training.label_smoothing 0.10480528589831106 +1022 16 model.embedding_dim 0.0 +1022 16 model.scoring_fct_norm 1.0 +1022 16 optimizer.lr 0.011429134688133018 +1022 16 training.batch_size 0.0 +1022 16 training.label_smoothing 0.46872239315776026 +1022 17 model.embedding_dim 1.0 +1022 17 model.scoring_fct_norm 1.0 +1022 17 optimizer.lr 0.0014628194514466264 +1022 17 training.batch_size 1.0 +1022 17 training.label_smoothing 0.01592673689436642 +1022 18 model.embedding_dim 2.0 +1022 18 model.scoring_fct_norm 2.0 +1022 18 optimizer.lr 0.04172160749891552 +1022 18 training.batch_size 1.0 +1022 18 training.label_smoothing 0.5085765375972292 +1022 19 model.embedding_dim 2.0 +1022 19 model.scoring_fct_norm 1.0 +1022 19 optimizer.lr 0.010965659266293998 +1022 19 training.batch_size 1.0 +1022 19 training.label_smoothing 0.1643795001231615 +1022 20 model.embedding_dim 0.0 +1022 20 model.scoring_fct_norm 2.0 +1022 20 optimizer.lr 0.017213853949753707 +1022 20 training.batch_size 1.0 +1022 20 training.label_smoothing 0.0018724477122330934 +1022 21 model.embedding_dim 1.0 +1022 21 model.scoring_fct_norm 2.0 +1022 21 optimizer.lr 0.010115567910472764 +1022 21 training.batch_size 2.0 +1022 21 training.label_smoothing 0.0014727854633043596 +1022 22 model.embedding_dim 2.0 +1022 22 model.scoring_fct_norm 1.0 +1022 22 optimizer.lr 0.004918701057956767 +1022 22 training.batch_size 0.0 +1022 22 training.label_smoothing 0.22687232968005006 +1022 23 model.embedding_dim 0.0 +1022 23 model.scoring_fct_norm 1.0 +1022 23 optimizer.lr 0.0017318348659653938 +1022 23 training.batch_size 1.0 +1022 23 training.label_smoothing 0.04781591632273584 +1022 24 model.embedding_dim 2.0 +1022 24 model.scoring_fct_norm 1.0 +1022 24 optimizer.lr 0.02322752879896301 +1022 24 training.batch_size 0.0 +1022 24 training.label_smoothing 0.0012325578822181245 +1022 25 model.embedding_dim 1.0 +1022 25 model.scoring_fct_norm 1.0 +1022 25 optimizer.lr 0.0011526852652887075 +1022 25 training.batch_size 1.0 +1022 25 training.label_smoothing 0.1759455977632129 +1022 26 model.embedding_dim 1.0 +1022 26 model.scoring_fct_norm 2.0 +1022 26 optimizer.lr 0.006727412844830095 +1022 26 training.batch_size 1.0 +1022 26 training.label_smoothing 0.017127813551380506 +1022 27 model.embedding_dim 0.0 +1022 27 model.scoring_fct_norm 2.0 +1022 27 optimizer.lr 0.024317496001568448 +1022 27 training.batch_size 0.0 +1022 27 training.label_smoothing 0.0015857036019007414 +1022 28 model.embedding_dim 0.0 +1022 28 model.scoring_fct_norm 2.0 +1022 28 optimizer.lr 0.00425967710562714 +1022 28 training.batch_size 0.0 +1022 28 training.label_smoothing 0.0024186423600151647 +1022 29 model.embedding_dim 2.0 +1022 29 model.scoring_fct_norm 2.0 +1022 29 optimizer.lr 0.0955433202370616 +1022 29 training.batch_size 2.0 +1022 29 training.label_smoothing 0.0012552443149837214 +1022 30 model.embedding_dim 2.0 +1022 30 model.scoring_fct_norm 1.0 +1022 30 optimizer.lr 0.002024474307778309 +1022 30 training.batch_size 0.0 +1022 30 training.label_smoothing 0.6251423550439611 +1022 31 model.embedding_dim 0.0 +1022 31 model.scoring_fct_norm 2.0 +1022 31 optimizer.lr 0.08466602351599305 +1022 31 training.batch_size 1.0 +1022 31 training.label_smoothing 0.7586142479057592 +1022 32 model.embedding_dim 0.0 +1022 32 model.scoring_fct_norm 1.0 +1022 32 optimizer.lr 0.0972766054974612 +1022 32 training.batch_size 0.0 +1022 32 training.label_smoothing 0.3861241516700991 +1022 33 model.embedding_dim 1.0 +1022 33 model.scoring_fct_norm 1.0 +1022 33 optimizer.lr 0.010140865444247789 +1022 33 training.batch_size 0.0 +1022 33 training.label_smoothing 0.11674916168647417 +1022 34 model.embedding_dim 1.0 +1022 34 model.scoring_fct_norm 2.0 +1022 34 optimizer.lr 0.03942962835125274 +1022 34 training.batch_size 1.0 +1022 34 training.label_smoothing 0.6800476978825966 +1022 35 model.embedding_dim 1.0 +1022 35 model.scoring_fct_norm 1.0 +1022 35 optimizer.lr 0.02658664824604733 +1022 35 training.batch_size 2.0 +1022 35 training.label_smoothing 0.0011403532536336262 +1022 36 model.embedding_dim 1.0 +1022 36 model.scoring_fct_norm 2.0 +1022 36 optimizer.lr 0.0011133889033765178 +1022 36 training.batch_size 2.0 +1022 36 training.label_smoothing 0.11223390536604014 +1022 37 model.embedding_dim 1.0 +1022 37 model.scoring_fct_norm 1.0 +1022 37 optimizer.lr 0.0010449113863362262 +1022 37 training.batch_size 0.0 +1022 37 training.label_smoothing 0.27946244553247257 +1022 38 model.embedding_dim 0.0 +1022 38 model.scoring_fct_norm 1.0 +1022 38 optimizer.lr 0.05462377533467735 +1022 38 training.batch_size 2.0 +1022 38 training.label_smoothing 0.0069634714688074535 +1022 39 model.embedding_dim 0.0 +1022 39 model.scoring_fct_norm 2.0 +1022 39 optimizer.lr 0.023078754086122127 +1022 39 training.batch_size 0.0 +1022 39 training.label_smoothing 0.039957069936153514 +1022 40 model.embedding_dim 1.0 +1022 40 model.scoring_fct_norm 2.0 +1022 40 optimizer.lr 0.004463842211325347 +1022 40 training.batch_size 2.0 +1022 40 training.label_smoothing 0.026552575810761824 +1022 41 model.embedding_dim 1.0 +1022 41 model.scoring_fct_norm 2.0 +1022 41 optimizer.lr 0.04453155386448715 +1022 41 training.batch_size 2.0 +1022 41 training.label_smoothing 0.0010588678892999286 +1022 42 model.embedding_dim 0.0 +1022 42 model.scoring_fct_norm 1.0 +1022 42 optimizer.lr 0.08162673288483807 +1022 42 training.batch_size 1.0 +1022 42 training.label_smoothing 0.0013631492153858295 +1022 43 model.embedding_dim 0.0 +1022 43 model.scoring_fct_norm 1.0 +1022 43 optimizer.lr 0.00692830544800073 +1022 43 training.batch_size 0.0 +1022 43 training.label_smoothing 0.004087022513598835 +1022 44 model.embedding_dim 2.0 +1022 44 model.scoring_fct_norm 1.0 +1022 44 optimizer.lr 0.0062808955395549735 +1022 44 training.batch_size 2.0 +1022 44 training.label_smoothing 0.212723005372885 +1022 45 model.embedding_dim 1.0 +1022 45 model.scoring_fct_norm 2.0 +1022 45 optimizer.lr 0.005316286387542195 +1022 45 training.batch_size 2.0 +1022 45 training.label_smoothing 0.9277345350252262 +1022 46 model.embedding_dim 2.0 +1022 46 model.scoring_fct_norm 2.0 +1022 46 optimizer.lr 0.005085402745464984 +1022 46 training.batch_size 0.0 +1022 46 training.label_smoothing 0.018779220082192495 +1022 47 model.embedding_dim 1.0 +1022 47 model.scoring_fct_norm 1.0 +1022 47 optimizer.lr 0.007829778124520674 +1022 47 training.batch_size 1.0 +1022 47 training.label_smoothing 0.2883815177222711 +1022 48 model.embedding_dim 0.0 +1022 48 model.scoring_fct_norm 2.0 +1022 48 optimizer.lr 0.013980752829742198 +1022 48 training.batch_size 1.0 +1022 48 training.label_smoothing 0.6524859472621826 +1022 49 model.embedding_dim 1.0 +1022 49 model.scoring_fct_norm 2.0 +1022 49 optimizer.lr 0.08126767077983715 +1022 49 training.batch_size 1.0 +1022 49 training.label_smoothing 0.4235751019124428 +1022 50 model.embedding_dim 1.0 +1022 50 model.scoring_fct_norm 1.0 +1022 50 optimizer.lr 0.0338047738127481 +1022 50 training.batch_size 1.0 +1022 50 training.label_smoothing 0.008341384030654351 +1022 51 model.embedding_dim 1.0 +1022 51 model.scoring_fct_norm 1.0 +1022 51 optimizer.lr 0.0011275876425267943 +1022 51 training.batch_size 0.0 +1022 51 training.label_smoothing 0.11407206099165437 +1022 52 model.embedding_dim 1.0 +1022 52 model.scoring_fct_norm 1.0 +1022 52 optimizer.lr 0.01001018095201492 +1022 52 training.batch_size 2.0 +1022 52 training.label_smoothing 0.02065443594028103 +1022 53 model.embedding_dim 1.0 +1022 53 model.scoring_fct_norm 2.0 +1022 53 optimizer.lr 0.004929917397330344 +1022 53 training.batch_size 0.0 +1022 53 training.label_smoothing 0.10108712767246596 +1022 54 model.embedding_dim 2.0 +1022 54 model.scoring_fct_norm 1.0 +1022 54 optimizer.lr 0.08685129846131223 +1022 54 training.batch_size 2.0 +1022 54 training.label_smoothing 0.31013243334721285 +1022 55 model.embedding_dim 2.0 +1022 55 model.scoring_fct_norm 1.0 +1022 55 optimizer.lr 0.0014991468577705178 +1022 55 training.batch_size 0.0 +1022 55 training.label_smoothing 0.08134728275267597 +1022 56 model.embedding_dim 0.0 +1022 56 model.scoring_fct_norm 2.0 +1022 56 optimizer.lr 0.0065484336695891205 +1022 56 training.batch_size 2.0 +1022 56 training.label_smoothing 0.5840335333093014 +1022 57 model.embedding_dim 1.0 +1022 57 model.scoring_fct_norm 2.0 +1022 57 optimizer.lr 0.0032893797618586875 +1022 57 training.batch_size 1.0 +1022 57 training.label_smoothing 0.002904292037820432 +1022 58 model.embedding_dim 1.0 +1022 58 model.scoring_fct_norm 2.0 +1022 58 optimizer.lr 0.08022166312645675 +1022 58 training.batch_size 2.0 +1022 58 training.label_smoothing 0.01650603936582262 +1022 59 model.embedding_dim 2.0 +1022 59 model.scoring_fct_norm 1.0 +1022 59 optimizer.lr 0.035805348002412675 +1022 59 training.batch_size 1.0 +1022 59 training.label_smoothing 0.02374381833603294 +1022 60 model.embedding_dim 0.0 +1022 60 model.scoring_fct_norm 1.0 +1022 60 optimizer.lr 0.031767294038795905 +1022 60 training.batch_size 0.0 +1022 60 training.label_smoothing 0.006381486640621048 +1022 61 model.embedding_dim 1.0 +1022 61 model.scoring_fct_norm 1.0 +1022 61 optimizer.lr 0.008746168419521893 +1022 61 training.batch_size 2.0 +1022 61 training.label_smoothing 0.29435955479727516 +1022 62 model.embedding_dim 2.0 +1022 62 model.scoring_fct_norm 1.0 +1022 62 optimizer.lr 0.0027680989983566453 +1022 62 training.batch_size 0.0 +1022 62 training.label_smoothing 0.01369931745007087 +1022 63 model.embedding_dim 1.0 +1022 63 model.scoring_fct_norm 2.0 +1022 63 optimizer.lr 0.0026443714527083118 +1022 63 training.batch_size 2.0 +1022 63 training.label_smoothing 0.06535078422458358 +1022 64 model.embedding_dim 1.0 +1022 64 model.scoring_fct_norm 1.0 +1022 64 optimizer.lr 0.005595846667479357 +1022 64 training.batch_size 2.0 +1022 64 training.label_smoothing 0.2818353885908444 +1022 65 model.embedding_dim 0.0 +1022 65 model.scoring_fct_norm 1.0 +1022 65 optimizer.lr 0.0023545394642595437 +1022 65 training.batch_size 2.0 +1022 65 training.label_smoothing 0.0013053868128806558 +1022 66 model.embedding_dim 1.0 +1022 66 model.scoring_fct_norm 2.0 +1022 66 optimizer.lr 0.01645260597162516 +1022 66 training.batch_size 2.0 +1022 66 training.label_smoothing 0.0028991532607318205 +1022 67 model.embedding_dim 0.0 +1022 67 model.scoring_fct_norm 1.0 +1022 67 optimizer.lr 0.005035365176883335 +1022 67 training.batch_size 1.0 +1022 67 training.label_smoothing 0.03869946363181288 +1022 68 model.embedding_dim 1.0 +1022 68 model.scoring_fct_norm 1.0 +1022 68 optimizer.lr 0.09150383347682134 +1022 68 training.batch_size 0.0 +1022 68 training.label_smoothing 0.10079401987494752 +1022 69 model.embedding_dim 0.0 +1022 69 model.scoring_fct_norm 1.0 +1022 69 optimizer.lr 0.0035976981507712578 +1022 69 training.batch_size 1.0 +1022 69 training.label_smoothing 0.01712675226194147 +1022 70 model.embedding_dim 2.0 +1022 70 model.scoring_fct_norm 1.0 +1022 70 optimizer.lr 0.02502913610385099 +1022 70 training.batch_size 0.0 +1022 70 training.label_smoothing 0.08277663696793643 +1022 71 model.embedding_dim 0.0 +1022 71 model.scoring_fct_norm 2.0 +1022 71 optimizer.lr 0.0012281504909934246 +1022 71 training.batch_size 1.0 +1022 71 training.label_smoothing 0.013367960889243689 +1022 72 model.embedding_dim 0.0 +1022 72 model.scoring_fct_norm 1.0 +1022 72 optimizer.lr 0.08425258255483488 +1022 72 training.batch_size 1.0 +1022 72 training.label_smoothing 0.07651603128908041 +1022 73 model.embedding_dim 0.0 +1022 73 model.scoring_fct_norm 1.0 +1022 73 optimizer.lr 0.038515855750383875 +1022 73 training.batch_size 2.0 +1022 73 training.label_smoothing 0.0074224585014331925 +1022 74 model.embedding_dim 1.0 +1022 74 model.scoring_fct_norm 1.0 +1022 74 optimizer.lr 0.05357178530260632 +1022 74 training.batch_size 2.0 +1022 74 training.label_smoothing 0.005876764715223921 +1022 75 model.embedding_dim 1.0 +1022 75 model.scoring_fct_norm 1.0 +1022 75 optimizer.lr 0.0998519543367495 +1022 75 training.batch_size 0.0 +1022 75 training.label_smoothing 0.054110432708584164 +1022 76 model.embedding_dim 2.0 +1022 76 model.scoring_fct_norm 2.0 +1022 76 optimizer.lr 0.08127713607283286 +1022 76 training.batch_size 1.0 +1022 76 training.label_smoothing 0.29631723134827487 +1022 77 model.embedding_dim 2.0 +1022 77 model.scoring_fct_norm 2.0 +1022 77 optimizer.lr 0.0020138148391209178 +1022 77 training.batch_size 2.0 +1022 77 training.label_smoothing 0.5653759646845187 +1022 78 model.embedding_dim 0.0 +1022 78 model.scoring_fct_norm 2.0 +1022 78 optimizer.lr 0.011693056864080318 +1022 78 training.batch_size 2.0 +1022 78 training.label_smoothing 0.5150052293038881 +1022 79 model.embedding_dim 2.0 +1022 79 model.scoring_fct_norm 2.0 +1022 79 optimizer.lr 0.002169439533592201 +1022 79 training.batch_size 0.0 +1022 79 training.label_smoothing 0.7410920942551114 +1022 80 model.embedding_dim 2.0 +1022 80 model.scoring_fct_norm 1.0 +1022 80 optimizer.lr 0.0017549795059735246 +1022 80 training.batch_size 2.0 +1022 80 training.label_smoothing 0.001893454538567504 +1022 81 model.embedding_dim 2.0 +1022 81 model.scoring_fct_norm 1.0 +1022 81 optimizer.lr 0.0029003963094455716 +1022 81 training.batch_size 2.0 +1022 81 training.label_smoothing 0.18782997499091328 +1022 82 model.embedding_dim 0.0 +1022 82 model.scoring_fct_norm 2.0 +1022 82 optimizer.lr 0.015634832127031612 +1022 82 training.batch_size 2.0 +1022 82 training.label_smoothing 0.9494415155515092 +1022 83 model.embedding_dim 0.0 +1022 83 model.scoring_fct_norm 2.0 +1022 83 optimizer.lr 0.04205776062950439 +1022 83 training.batch_size 0.0 +1022 83 training.label_smoothing 0.005499751798356403 +1022 84 model.embedding_dim 2.0 +1022 84 model.scoring_fct_norm 1.0 +1022 84 optimizer.lr 0.006551213555186138 +1022 84 training.batch_size 2.0 +1022 84 training.label_smoothing 0.9098740659322746 +1022 85 model.embedding_dim 2.0 +1022 85 model.scoring_fct_norm 1.0 +1022 85 optimizer.lr 0.005448109968075897 +1022 85 training.batch_size 0.0 +1022 85 training.label_smoothing 0.1947465090483988 +1022 86 model.embedding_dim 1.0 +1022 86 model.scoring_fct_norm 1.0 +1022 86 optimizer.lr 0.0017217606527496375 +1022 86 training.batch_size 2.0 +1022 86 training.label_smoothing 0.08144866377777198 +1022 87 model.embedding_dim 2.0 +1022 87 model.scoring_fct_norm 2.0 +1022 87 optimizer.lr 0.0011251218040457525 +1022 87 training.batch_size 1.0 +1022 87 training.label_smoothing 0.29575008871271025 +1022 88 model.embedding_dim 1.0 +1022 88 model.scoring_fct_norm 1.0 +1022 88 optimizer.lr 0.0019609006381342887 +1022 88 training.batch_size 1.0 +1022 88 training.label_smoothing 0.008045014284799165 +1022 89 model.embedding_dim 1.0 +1022 89 model.scoring_fct_norm 2.0 +1022 89 optimizer.lr 0.025653470060476732 +1022 89 training.batch_size 1.0 +1022 89 training.label_smoothing 0.006037469383387267 +1022 90 model.embedding_dim 1.0 +1022 90 model.scoring_fct_norm 1.0 +1022 90 optimizer.lr 0.01012395134806866 +1022 90 training.batch_size 0.0 +1022 90 training.label_smoothing 0.002967950184326103 +1022 91 model.embedding_dim 0.0 +1022 91 model.scoring_fct_norm 2.0 +1022 91 optimizer.lr 0.09451461456412175 +1022 91 training.batch_size 0.0 +1022 91 training.label_smoothing 0.541587357098535 +1022 92 model.embedding_dim 2.0 +1022 92 model.scoring_fct_norm 1.0 +1022 92 optimizer.lr 0.005408385722605436 +1022 92 training.batch_size 0.0 +1022 92 training.label_smoothing 0.008657150927513211 +1022 93 model.embedding_dim 2.0 +1022 93 model.scoring_fct_norm 1.0 +1022 93 optimizer.lr 0.0014288856217907484 +1022 93 training.batch_size 1.0 +1022 93 training.label_smoothing 0.580847694984301 +1022 94 model.embedding_dim 0.0 +1022 94 model.scoring_fct_norm 2.0 +1022 94 optimizer.lr 0.035242150090889025 +1022 94 training.batch_size 0.0 +1022 94 training.label_smoothing 0.19723538715902986 +1022 95 model.embedding_dim 0.0 +1022 95 model.scoring_fct_norm 2.0 +1022 95 optimizer.lr 0.07031889099669496 +1022 95 training.batch_size 1.0 +1022 95 training.label_smoothing 0.0021354920563188603 +1022 96 model.embedding_dim 1.0 +1022 96 model.scoring_fct_norm 1.0 +1022 96 optimizer.lr 0.0010841401690192955 +1022 96 training.batch_size 0.0 +1022 96 training.label_smoothing 0.01124382188522446 +1022 97 model.embedding_dim 0.0 +1022 97 model.scoring_fct_norm 1.0 +1022 97 optimizer.lr 0.02425753295272109 +1022 97 training.batch_size 0.0 +1022 97 training.label_smoothing 0.06830831140613534 +1022 98 model.embedding_dim 2.0 +1022 98 model.scoring_fct_norm 1.0 +1022 98 optimizer.lr 0.06471669242179116 +1022 98 training.batch_size 1.0 +1022 98 training.label_smoothing 0.0044315313873924365 +1022 99 model.embedding_dim 2.0 +1022 99 model.scoring_fct_norm 2.0 +1022 99 optimizer.lr 0.03675875024342579 +1022 99 training.batch_size 1.0 +1022 99 training.label_smoothing 0.022691975570547637 +1022 100 model.embedding_dim 1.0 +1022 100 model.scoring_fct_norm 2.0 +1022 100 optimizer.lr 0.08033275280207047 +1022 100 training.batch_size 0.0 +1022 100 training.label_smoothing 0.00123319113664816 +1022 1 dataset """kinships""" +1022 1 model """unstructuredmodel""" +1022 1 loss """bceaftersigmoid""" +1022 1 regularizer """no""" +1022 1 optimizer """adam""" +1022 1 training_loop """lcwa""" +1022 1 evaluator """rankbased""" +1022 2 dataset """kinships""" +1022 2 model """unstructuredmodel""" +1022 2 loss """bceaftersigmoid""" +1022 2 regularizer """no""" +1022 2 optimizer """adam""" +1022 2 training_loop """lcwa""" +1022 2 evaluator """rankbased""" +1022 3 dataset """kinships""" +1022 3 model """unstructuredmodel""" +1022 3 loss """bceaftersigmoid""" +1022 3 regularizer """no""" +1022 3 optimizer """adam""" +1022 3 training_loop """lcwa""" +1022 3 evaluator """rankbased""" +1022 4 dataset """kinships""" +1022 4 model """unstructuredmodel""" +1022 4 loss """bceaftersigmoid""" +1022 4 regularizer """no""" +1022 4 optimizer """adam""" +1022 4 training_loop """lcwa""" +1022 4 evaluator """rankbased""" +1022 5 dataset """kinships""" +1022 5 model """unstructuredmodel""" +1022 5 loss """bceaftersigmoid""" +1022 5 regularizer """no""" +1022 5 optimizer """adam""" +1022 5 training_loop """lcwa""" +1022 5 evaluator """rankbased""" +1022 6 dataset """kinships""" +1022 6 model """unstructuredmodel""" +1022 6 loss """bceaftersigmoid""" +1022 6 regularizer """no""" +1022 6 optimizer """adam""" +1022 6 training_loop """lcwa""" +1022 6 evaluator """rankbased""" +1022 7 dataset """kinships""" +1022 7 model """unstructuredmodel""" +1022 7 loss """bceaftersigmoid""" +1022 7 regularizer """no""" +1022 7 optimizer """adam""" +1022 7 training_loop """lcwa""" +1022 7 evaluator """rankbased""" +1022 8 dataset """kinships""" +1022 8 model """unstructuredmodel""" +1022 8 loss """bceaftersigmoid""" +1022 8 regularizer """no""" +1022 8 optimizer """adam""" +1022 8 training_loop """lcwa""" +1022 8 evaluator """rankbased""" +1022 9 dataset """kinships""" +1022 9 model """unstructuredmodel""" +1022 9 loss """bceaftersigmoid""" +1022 9 regularizer """no""" +1022 9 optimizer """adam""" +1022 9 training_loop """lcwa""" +1022 9 evaluator """rankbased""" +1022 10 dataset """kinships""" +1022 10 model """unstructuredmodel""" +1022 10 loss """bceaftersigmoid""" +1022 10 regularizer """no""" +1022 10 optimizer """adam""" +1022 10 training_loop """lcwa""" +1022 10 evaluator """rankbased""" +1022 11 dataset """kinships""" +1022 11 model """unstructuredmodel""" +1022 11 loss """bceaftersigmoid""" +1022 11 regularizer """no""" +1022 11 optimizer """adam""" +1022 11 training_loop """lcwa""" +1022 11 evaluator """rankbased""" +1022 12 dataset """kinships""" +1022 12 model """unstructuredmodel""" +1022 12 loss """bceaftersigmoid""" +1022 12 regularizer """no""" +1022 12 optimizer """adam""" +1022 12 training_loop """lcwa""" +1022 12 evaluator """rankbased""" +1022 13 dataset """kinships""" +1022 13 model """unstructuredmodel""" +1022 13 loss """bceaftersigmoid""" +1022 13 regularizer """no""" +1022 13 optimizer """adam""" +1022 13 training_loop """lcwa""" +1022 13 evaluator """rankbased""" +1022 14 dataset """kinships""" +1022 14 model """unstructuredmodel""" +1022 14 loss """bceaftersigmoid""" +1022 14 regularizer """no""" +1022 14 optimizer """adam""" +1022 14 training_loop """lcwa""" +1022 14 evaluator """rankbased""" +1022 15 dataset """kinships""" +1022 15 model """unstructuredmodel""" +1022 15 loss """bceaftersigmoid""" +1022 15 regularizer """no""" +1022 15 optimizer """adam""" +1022 15 training_loop """lcwa""" +1022 15 evaluator """rankbased""" +1022 16 dataset """kinships""" +1022 16 model """unstructuredmodel""" +1022 16 loss """bceaftersigmoid""" +1022 16 regularizer """no""" +1022 16 optimizer """adam""" +1022 16 training_loop """lcwa""" +1022 16 evaluator """rankbased""" +1022 17 dataset """kinships""" +1022 17 model """unstructuredmodel""" +1022 17 loss """bceaftersigmoid""" +1022 17 regularizer """no""" +1022 17 optimizer """adam""" +1022 17 training_loop """lcwa""" +1022 17 evaluator """rankbased""" +1022 18 dataset """kinships""" +1022 18 model """unstructuredmodel""" +1022 18 loss """bceaftersigmoid""" +1022 18 regularizer """no""" +1022 18 optimizer """adam""" +1022 18 training_loop """lcwa""" +1022 18 evaluator """rankbased""" +1022 19 dataset """kinships""" +1022 19 model """unstructuredmodel""" +1022 19 loss """bceaftersigmoid""" +1022 19 regularizer """no""" +1022 19 optimizer """adam""" +1022 19 training_loop """lcwa""" +1022 19 evaluator """rankbased""" +1022 20 dataset """kinships""" +1022 20 model """unstructuredmodel""" +1022 20 loss """bceaftersigmoid""" +1022 20 regularizer """no""" +1022 20 optimizer """adam""" +1022 20 training_loop """lcwa""" +1022 20 evaluator """rankbased""" +1022 21 dataset """kinships""" +1022 21 model """unstructuredmodel""" +1022 21 loss """bceaftersigmoid""" +1022 21 regularizer """no""" +1022 21 optimizer """adam""" +1022 21 training_loop """lcwa""" +1022 21 evaluator """rankbased""" +1022 22 dataset """kinships""" +1022 22 model """unstructuredmodel""" +1022 22 loss """bceaftersigmoid""" +1022 22 regularizer """no""" +1022 22 optimizer """adam""" +1022 22 training_loop """lcwa""" +1022 22 evaluator """rankbased""" +1022 23 dataset """kinships""" +1022 23 model """unstructuredmodel""" +1022 23 loss """bceaftersigmoid""" +1022 23 regularizer """no""" +1022 23 optimizer """adam""" +1022 23 training_loop """lcwa""" +1022 23 evaluator """rankbased""" +1022 24 dataset """kinships""" +1022 24 model """unstructuredmodel""" +1022 24 loss """bceaftersigmoid""" +1022 24 regularizer """no""" +1022 24 optimizer """adam""" +1022 24 training_loop """lcwa""" +1022 24 evaluator """rankbased""" +1022 25 dataset """kinships""" +1022 25 model """unstructuredmodel""" +1022 25 loss """bceaftersigmoid""" +1022 25 regularizer """no""" +1022 25 optimizer """adam""" +1022 25 training_loop """lcwa""" +1022 25 evaluator """rankbased""" +1022 26 dataset """kinships""" +1022 26 model """unstructuredmodel""" +1022 26 loss """bceaftersigmoid""" +1022 26 regularizer """no""" +1022 26 optimizer """adam""" +1022 26 training_loop """lcwa""" +1022 26 evaluator """rankbased""" +1022 27 dataset """kinships""" +1022 27 model """unstructuredmodel""" +1022 27 loss """bceaftersigmoid""" +1022 27 regularizer """no""" +1022 27 optimizer """adam""" +1022 27 training_loop """lcwa""" +1022 27 evaluator """rankbased""" +1022 28 dataset """kinships""" +1022 28 model """unstructuredmodel""" +1022 28 loss """bceaftersigmoid""" +1022 28 regularizer """no""" +1022 28 optimizer """adam""" +1022 28 training_loop """lcwa""" +1022 28 evaluator """rankbased""" +1022 29 dataset """kinships""" +1022 29 model """unstructuredmodel""" +1022 29 loss """bceaftersigmoid""" +1022 29 regularizer """no""" +1022 29 optimizer """adam""" +1022 29 training_loop """lcwa""" +1022 29 evaluator """rankbased""" +1022 30 dataset """kinships""" +1022 30 model """unstructuredmodel""" +1022 30 loss """bceaftersigmoid""" +1022 30 regularizer """no""" +1022 30 optimizer """adam""" +1022 30 training_loop """lcwa""" +1022 30 evaluator """rankbased""" +1022 31 dataset """kinships""" +1022 31 model """unstructuredmodel""" +1022 31 loss """bceaftersigmoid""" +1022 31 regularizer """no""" +1022 31 optimizer """adam""" +1022 31 training_loop """lcwa""" +1022 31 evaluator """rankbased""" +1022 32 dataset """kinships""" +1022 32 model """unstructuredmodel""" +1022 32 loss """bceaftersigmoid""" +1022 32 regularizer """no""" +1022 32 optimizer """adam""" +1022 32 training_loop """lcwa""" +1022 32 evaluator """rankbased""" +1022 33 dataset """kinships""" +1022 33 model """unstructuredmodel""" +1022 33 loss """bceaftersigmoid""" +1022 33 regularizer """no""" +1022 33 optimizer """adam""" +1022 33 training_loop """lcwa""" +1022 33 evaluator """rankbased""" +1022 34 dataset """kinships""" +1022 34 model """unstructuredmodel""" +1022 34 loss """bceaftersigmoid""" +1022 34 regularizer """no""" +1022 34 optimizer """adam""" +1022 34 training_loop """lcwa""" +1022 34 evaluator """rankbased""" +1022 35 dataset """kinships""" +1022 35 model """unstructuredmodel""" +1022 35 loss """bceaftersigmoid""" +1022 35 regularizer """no""" +1022 35 optimizer """adam""" +1022 35 training_loop """lcwa""" +1022 35 evaluator """rankbased""" +1022 36 dataset """kinships""" +1022 36 model """unstructuredmodel""" +1022 36 loss """bceaftersigmoid""" +1022 36 regularizer """no""" +1022 36 optimizer """adam""" +1022 36 training_loop """lcwa""" +1022 36 evaluator """rankbased""" +1022 37 dataset """kinships""" +1022 37 model """unstructuredmodel""" +1022 37 loss """bceaftersigmoid""" +1022 37 regularizer """no""" +1022 37 optimizer """adam""" +1022 37 training_loop """lcwa""" +1022 37 evaluator """rankbased""" +1022 38 dataset """kinships""" +1022 38 model """unstructuredmodel""" +1022 38 loss """bceaftersigmoid""" +1022 38 regularizer """no""" +1022 38 optimizer """adam""" +1022 38 training_loop """lcwa""" +1022 38 evaluator """rankbased""" +1022 39 dataset """kinships""" +1022 39 model """unstructuredmodel""" +1022 39 loss """bceaftersigmoid""" +1022 39 regularizer """no""" +1022 39 optimizer """adam""" +1022 39 training_loop """lcwa""" +1022 39 evaluator """rankbased""" +1022 40 dataset """kinships""" +1022 40 model """unstructuredmodel""" +1022 40 loss """bceaftersigmoid""" +1022 40 regularizer """no""" +1022 40 optimizer """adam""" +1022 40 training_loop """lcwa""" +1022 40 evaluator """rankbased""" +1022 41 dataset """kinships""" +1022 41 model """unstructuredmodel""" +1022 41 loss """bceaftersigmoid""" +1022 41 regularizer """no""" +1022 41 optimizer """adam""" +1022 41 training_loop """lcwa""" +1022 41 evaluator """rankbased""" +1022 42 dataset """kinships""" +1022 42 model """unstructuredmodel""" +1022 42 loss """bceaftersigmoid""" +1022 42 regularizer """no""" +1022 42 optimizer """adam""" +1022 42 training_loop """lcwa""" +1022 42 evaluator """rankbased""" +1022 43 dataset """kinships""" +1022 43 model """unstructuredmodel""" +1022 43 loss """bceaftersigmoid""" +1022 43 regularizer """no""" +1022 43 optimizer """adam""" +1022 43 training_loop """lcwa""" +1022 43 evaluator """rankbased""" +1022 44 dataset """kinships""" +1022 44 model """unstructuredmodel""" +1022 44 loss """bceaftersigmoid""" +1022 44 regularizer """no""" +1022 44 optimizer """adam""" +1022 44 training_loop """lcwa""" +1022 44 evaluator """rankbased""" +1022 45 dataset """kinships""" +1022 45 model """unstructuredmodel""" +1022 45 loss """bceaftersigmoid""" +1022 45 regularizer """no""" +1022 45 optimizer """adam""" +1022 45 training_loop """lcwa""" +1022 45 evaluator """rankbased""" +1022 46 dataset """kinships""" +1022 46 model """unstructuredmodel""" +1022 46 loss """bceaftersigmoid""" +1022 46 regularizer """no""" +1022 46 optimizer """adam""" +1022 46 training_loop """lcwa""" +1022 46 evaluator """rankbased""" +1022 47 dataset """kinships""" +1022 47 model """unstructuredmodel""" +1022 47 loss """bceaftersigmoid""" +1022 47 regularizer """no""" +1022 47 optimizer """adam""" +1022 47 training_loop """lcwa""" +1022 47 evaluator """rankbased""" +1022 48 dataset """kinships""" +1022 48 model """unstructuredmodel""" +1022 48 loss """bceaftersigmoid""" +1022 48 regularizer """no""" +1022 48 optimizer """adam""" +1022 48 training_loop """lcwa""" +1022 48 evaluator """rankbased""" +1022 49 dataset """kinships""" +1022 49 model """unstructuredmodel""" +1022 49 loss """bceaftersigmoid""" +1022 49 regularizer """no""" +1022 49 optimizer """adam""" +1022 49 training_loop """lcwa""" +1022 49 evaluator """rankbased""" +1022 50 dataset """kinships""" +1022 50 model """unstructuredmodel""" +1022 50 loss """bceaftersigmoid""" +1022 50 regularizer """no""" +1022 50 optimizer """adam""" +1022 50 training_loop """lcwa""" +1022 50 evaluator """rankbased""" +1022 51 dataset """kinships""" +1022 51 model """unstructuredmodel""" +1022 51 loss """bceaftersigmoid""" +1022 51 regularizer """no""" +1022 51 optimizer """adam""" +1022 51 training_loop """lcwa""" +1022 51 evaluator """rankbased""" +1022 52 dataset """kinships""" +1022 52 model """unstructuredmodel""" +1022 52 loss """bceaftersigmoid""" +1022 52 regularizer """no""" +1022 52 optimizer """adam""" +1022 52 training_loop """lcwa""" +1022 52 evaluator """rankbased""" +1022 53 dataset """kinships""" +1022 53 model """unstructuredmodel""" +1022 53 loss """bceaftersigmoid""" +1022 53 regularizer """no""" +1022 53 optimizer """adam""" +1022 53 training_loop """lcwa""" +1022 53 evaluator """rankbased""" +1022 54 dataset """kinships""" +1022 54 model """unstructuredmodel""" +1022 54 loss """bceaftersigmoid""" +1022 54 regularizer """no""" +1022 54 optimizer """adam""" +1022 54 training_loop """lcwa""" +1022 54 evaluator """rankbased""" +1022 55 dataset """kinships""" +1022 55 model """unstructuredmodel""" +1022 55 loss """bceaftersigmoid""" +1022 55 regularizer """no""" +1022 55 optimizer """adam""" +1022 55 training_loop """lcwa""" +1022 55 evaluator """rankbased""" +1022 56 dataset """kinships""" +1022 56 model """unstructuredmodel""" +1022 56 loss """bceaftersigmoid""" +1022 56 regularizer """no""" +1022 56 optimizer """adam""" +1022 56 training_loop """lcwa""" +1022 56 evaluator """rankbased""" +1022 57 dataset """kinships""" +1022 57 model """unstructuredmodel""" +1022 57 loss """bceaftersigmoid""" +1022 57 regularizer """no""" +1022 57 optimizer """adam""" +1022 57 training_loop """lcwa""" +1022 57 evaluator """rankbased""" +1022 58 dataset """kinships""" +1022 58 model """unstructuredmodel""" +1022 58 loss """bceaftersigmoid""" +1022 58 regularizer """no""" +1022 58 optimizer """adam""" +1022 58 training_loop """lcwa""" +1022 58 evaluator """rankbased""" +1022 59 dataset """kinships""" +1022 59 model """unstructuredmodel""" +1022 59 loss """bceaftersigmoid""" +1022 59 regularizer """no""" +1022 59 optimizer """adam""" +1022 59 training_loop """lcwa""" +1022 59 evaluator """rankbased""" +1022 60 dataset """kinships""" +1022 60 model """unstructuredmodel""" +1022 60 loss """bceaftersigmoid""" +1022 60 regularizer """no""" +1022 60 optimizer """adam""" +1022 60 training_loop """lcwa""" +1022 60 evaluator """rankbased""" +1022 61 dataset """kinships""" +1022 61 model """unstructuredmodel""" +1022 61 loss """bceaftersigmoid""" +1022 61 regularizer """no""" +1022 61 optimizer """adam""" +1022 61 training_loop """lcwa""" +1022 61 evaluator """rankbased""" +1022 62 dataset """kinships""" +1022 62 model """unstructuredmodel""" +1022 62 loss """bceaftersigmoid""" +1022 62 regularizer """no""" +1022 62 optimizer """adam""" +1022 62 training_loop """lcwa""" +1022 62 evaluator """rankbased""" +1022 63 dataset """kinships""" +1022 63 model """unstructuredmodel""" +1022 63 loss """bceaftersigmoid""" +1022 63 regularizer """no""" +1022 63 optimizer """adam""" +1022 63 training_loop """lcwa""" +1022 63 evaluator """rankbased""" +1022 64 dataset """kinships""" +1022 64 model """unstructuredmodel""" +1022 64 loss """bceaftersigmoid""" +1022 64 regularizer """no""" +1022 64 optimizer """adam""" +1022 64 training_loop """lcwa""" +1022 64 evaluator """rankbased""" +1022 65 dataset """kinships""" +1022 65 model """unstructuredmodel""" +1022 65 loss """bceaftersigmoid""" +1022 65 regularizer """no""" +1022 65 optimizer """adam""" +1022 65 training_loop """lcwa""" +1022 65 evaluator """rankbased""" +1022 66 dataset """kinships""" +1022 66 model """unstructuredmodel""" +1022 66 loss """bceaftersigmoid""" +1022 66 regularizer """no""" +1022 66 optimizer """adam""" +1022 66 training_loop """lcwa""" +1022 66 evaluator """rankbased""" +1022 67 dataset """kinships""" +1022 67 model """unstructuredmodel""" +1022 67 loss """bceaftersigmoid""" +1022 67 regularizer """no""" +1022 67 optimizer """adam""" +1022 67 training_loop """lcwa""" +1022 67 evaluator """rankbased""" +1022 68 dataset """kinships""" +1022 68 model """unstructuredmodel""" +1022 68 loss """bceaftersigmoid""" +1022 68 regularizer """no""" +1022 68 optimizer """adam""" +1022 68 training_loop """lcwa""" +1022 68 evaluator """rankbased""" +1022 69 dataset """kinships""" +1022 69 model """unstructuredmodel""" +1022 69 loss """bceaftersigmoid""" +1022 69 regularizer """no""" +1022 69 optimizer """adam""" +1022 69 training_loop """lcwa""" +1022 69 evaluator """rankbased""" +1022 70 dataset """kinships""" +1022 70 model """unstructuredmodel""" +1022 70 loss """bceaftersigmoid""" +1022 70 regularizer """no""" +1022 70 optimizer """adam""" +1022 70 training_loop """lcwa""" +1022 70 evaluator """rankbased""" +1022 71 dataset """kinships""" +1022 71 model """unstructuredmodel""" +1022 71 loss """bceaftersigmoid""" +1022 71 regularizer """no""" +1022 71 optimizer """adam""" +1022 71 training_loop """lcwa""" +1022 71 evaluator """rankbased""" +1022 72 dataset """kinships""" +1022 72 model """unstructuredmodel""" +1022 72 loss """bceaftersigmoid""" +1022 72 regularizer """no""" +1022 72 optimizer """adam""" +1022 72 training_loop """lcwa""" +1022 72 evaluator """rankbased""" +1022 73 dataset """kinships""" +1022 73 model """unstructuredmodel""" +1022 73 loss """bceaftersigmoid""" +1022 73 regularizer """no""" +1022 73 optimizer """adam""" +1022 73 training_loop """lcwa""" +1022 73 evaluator """rankbased""" +1022 74 dataset """kinships""" +1022 74 model """unstructuredmodel""" +1022 74 loss """bceaftersigmoid""" +1022 74 regularizer """no""" +1022 74 optimizer """adam""" +1022 74 training_loop """lcwa""" +1022 74 evaluator """rankbased""" +1022 75 dataset """kinships""" +1022 75 model """unstructuredmodel""" +1022 75 loss """bceaftersigmoid""" +1022 75 regularizer """no""" +1022 75 optimizer """adam""" +1022 75 training_loop """lcwa""" +1022 75 evaluator """rankbased""" +1022 76 dataset """kinships""" +1022 76 model """unstructuredmodel""" +1022 76 loss """bceaftersigmoid""" +1022 76 regularizer """no""" +1022 76 optimizer """adam""" +1022 76 training_loop """lcwa""" +1022 76 evaluator """rankbased""" +1022 77 dataset """kinships""" +1022 77 model """unstructuredmodel""" +1022 77 loss """bceaftersigmoid""" +1022 77 regularizer """no""" +1022 77 optimizer """adam""" +1022 77 training_loop """lcwa""" +1022 77 evaluator """rankbased""" +1022 78 dataset """kinships""" +1022 78 model """unstructuredmodel""" +1022 78 loss """bceaftersigmoid""" +1022 78 regularizer """no""" +1022 78 optimizer """adam""" +1022 78 training_loop """lcwa""" +1022 78 evaluator """rankbased""" +1022 79 dataset """kinships""" +1022 79 model """unstructuredmodel""" +1022 79 loss """bceaftersigmoid""" +1022 79 regularizer """no""" +1022 79 optimizer """adam""" +1022 79 training_loop """lcwa""" +1022 79 evaluator """rankbased""" +1022 80 dataset """kinships""" +1022 80 model """unstructuredmodel""" +1022 80 loss """bceaftersigmoid""" +1022 80 regularizer """no""" +1022 80 optimizer """adam""" +1022 80 training_loop """lcwa""" +1022 80 evaluator """rankbased""" +1022 81 dataset """kinships""" +1022 81 model """unstructuredmodel""" +1022 81 loss """bceaftersigmoid""" +1022 81 regularizer """no""" +1022 81 optimizer """adam""" +1022 81 training_loop """lcwa""" +1022 81 evaluator """rankbased""" +1022 82 dataset """kinships""" +1022 82 model """unstructuredmodel""" +1022 82 loss """bceaftersigmoid""" +1022 82 regularizer """no""" +1022 82 optimizer """adam""" +1022 82 training_loop """lcwa""" +1022 82 evaluator """rankbased""" +1022 83 dataset """kinships""" +1022 83 model """unstructuredmodel""" +1022 83 loss """bceaftersigmoid""" +1022 83 regularizer """no""" +1022 83 optimizer """adam""" +1022 83 training_loop """lcwa""" +1022 83 evaluator """rankbased""" +1022 84 dataset """kinships""" +1022 84 model """unstructuredmodel""" +1022 84 loss """bceaftersigmoid""" +1022 84 regularizer """no""" +1022 84 optimizer """adam""" +1022 84 training_loop """lcwa""" +1022 84 evaluator """rankbased""" +1022 85 dataset """kinships""" +1022 85 model """unstructuredmodel""" +1022 85 loss """bceaftersigmoid""" +1022 85 regularizer """no""" +1022 85 optimizer """adam""" +1022 85 training_loop """lcwa""" +1022 85 evaluator """rankbased""" +1022 86 dataset """kinships""" +1022 86 model """unstructuredmodel""" +1022 86 loss """bceaftersigmoid""" +1022 86 regularizer """no""" +1022 86 optimizer """adam""" +1022 86 training_loop """lcwa""" +1022 86 evaluator """rankbased""" +1022 87 dataset """kinships""" +1022 87 model """unstructuredmodel""" +1022 87 loss """bceaftersigmoid""" +1022 87 regularizer """no""" +1022 87 optimizer """adam""" +1022 87 training_loop """lcwa""" +1022 87 evaluator """rankbased""" +1022 88 dataset """kinships""" +1022 88 model """unstructuredmodel""" +1022 88 loss """bceaftersigmoid""" +1022 88 regularizer """no""" +1022 88 optimizer """adam""" +1022 88 training_loop """lcwa""" +1022 88 evaluator """rankbased""" +1022 89 dataset """kinships""" +1022 89 model """unstructuredmodel""" +1022 89 loss """bceaftersigmoid""" +1022 89 regularizer """no""" +1022 89 optimizer """adam""" +1022 89 training_loop """lcwa""" +1022 89 evaluator """rankbased""" +1022 90 dataset """kinships""" +1022 90 model """unstructuredmodel""" +1022 90 loss """bceaftersigmoid""" +1022 90 regularizer """no""" +1022 90 optimizer """adam""" +1022 90 training_loop """lcwa""" +1022 90 evaluator """rankbased""" +1022 91 dataset """kinships""" +1022 91 model """unstructuredmodel""" +1022 91 loss """bceaftersigmoid""" +1022 91 regularizer """no""" +1022 91 optimizer """adam""" +1022 91 training_loop """lcwa""" +1022 91 evaluator """rankbased""" +1022 92 dataset """kinships""" +1022 92 model """unstructuredmodel""" +1022 92 loss """bceaftersigmoid""" +1022 92 regularizer """no""" +1022 92 optimizer """adam""" +1022 92 training_loop """lcwa""" +1022 92 evaluator """rankbased""" +1022 93 dataset """kinships""" +1022 93 model """unstructuredmodel""" +1022 93 loss """bceaftersigmoid""" +1022 93 regularizer """no""" +1022 93 optimizer """adam""" +1022 93 training_loop """lcwa""" +1022 93 evaluator """rankbased""" +1022 94 dataset """kinships""" +1022 94 model """unstructuredmodel""" +1022 94 loss """bceaftersigmoid""" +1022 94 regularizer """no""" +1022 94 optimizer """adam""" +1022 94 training_loop """lcwa""" +1022 94 evaluator """rankbased""" +1022 95 dataset """kinships""" +1022 95 model """unstructuredmodel""" +1022 95 loss """bceaftersigmoid""" +1022 95 regularizer """no""" +1022 95 optimizer """adam""" +1022 95 training_loop """lcwa""" +1022 95 evaluator """rankbased""" +1022 96 dataset """kinships""" +1022 96 model """unstructuredmodel""" +1022 96 loss """bceaftersigmoid""" +1022 96 regularizer """no""" +1022 96 optimizer """adam""" +1022 96 training_loop """lcwa""" +1022 96 evaluator """rankbased""" +1022 97 dataset """kinships""" +1022 97 model """unstructuredmodel""" +1022 97 loss """bceaftersigmoid""" +1022 97 regularizer """no""" +1022 97 optimizer """adam""" +1022 97 training_loop """lcwa""" +1022 97 evaluator """rankbased""" +1022 98 dataset """kinships""" +1022 98 model """unstructuredmodel""" +1022 98 loss """bceaftersigmoid""" +1022 98 regularizer """no""" +1022 98 optimizer """adam""" +1022 98 training_loop """lcwa""" +1022 98 evaluator """rankbased""" +1022 99 dataset """kinships""" +1022 99 model """unstructuredmodel""" +1022 99 loss """bceaftersigmoid""" +1022 99 regularizer """no""" +1022 99 optimizer """adam""" +1022 99 training_loop """lcwa""" +1022 99 evaluator """rankbased""" +1022 100 dataset """kinships""" +1022 100 model """unstructuredmodel""" +1022 100 loss """bceaftersigmoid""" +1022 100 regularizer """no""" +1022 100 optimizer """adam""" +1022 100 training_loop """lcwa""" +1022 100 evaluator """rankbased""" +1023 1 model.embedding_dim 0.0 +1023 1 model.scoring_fct_norm 2.0 +1023 1 optimizer.lr 0.010792006384056358 +1023 1 training.batch_size 0.0 +1023 1 training.label_smoothing 0.9824205646445072 +1023 2 model.embedding_dim 0.0 +1023 2 model.scoring_fct_norm 1.0 +1023 2 optimizer.lr 0.011613868694799813 +1023 2 training.batch_size 1.0 +1023 2 training.label_smoothing 0.015584131958648651 +1023 3 model.embedding_dim 0.0 +1023 3 model.scoring_fct_norm 2.0 +1023 3 optimizer.lr 0.0065761310128884766 +1023 3 training.batch_size 0.0 +1023 3 training.label_smoothing 0.09604786644257045 +1023 4 model.embedding_dim 2.0 +1023 4 model.scoring_fct_norm 1.0 +1023 4 optimizer.lr 0.03631434809548696 +1023 4 training.batch_size 0.0 +1023 4 training.label_smoothing 0.08427367099969404 +1023 5 model.embedding_dim 1.0 +1023 5 model.scoring_fct_norm 2.0 +1023 5 optimizer.lr 0.007470723703426677 +1023 5 training.batch_size 2.0 +1023 5 training.label_smoothing 0.017747447983433 +1023 6 model.embedding_dim 1.0 +1023 6 model.scoring_fct_norm 2.0 +1023 6 optimizer.lr 0.018498676983624958 +1023 6 training.batch_size 1.0 +1023 6 training.label_smoothing 0.003051868623532437 +1023 7 model.embedding_dim 2.0 +1023 7 model.scoring_fct_norm 2.0 +1023 7 optimizer.lr 0.07458910195035956 +1023 7 training.batch_size 0.0 +1023 7 training.label_smoothing 0.2906219651543074 +1023 8 model.embedding_dim 2.0 +1023 8 model.scoring_fct_norm 2.0 +1023 8 optimizer.lr 0.01975024542417343 +1023 8 training.batch_size 0.0 +1023 8 training.label_smoothing 0.8855635203502319 +1023 9 model.embedding_dim 0.0 +1023 9 model.scoring_fct_norm 2.0 +1023 9 optimizer.lr 0.001294411327378174 +1023 9 training.batch_size 1.0 +1023 9 training.label_smoothing 0.001569165192279389 +1023 10 model.embedding_dim 1.0 +1023 10 model.scoring_fct_norm 1.0 +1023 10 optimizer.lr 0.015021849518038794 +1023 10 training.batch_size 0.0 +1023 10 training.label_smoothing 0.011729550062730623 +1023 11 model.embedding_dim 2.0 +1023 11 model.scoring_fct_norm 2.0 +1023 11 optimizer.lr 0.023438567183426757 +1023 11 training.batch_size 0.0 +1023 11 training.label_smoothing 0.007505625688495735 +1023 12 model.embedding_dim 2.0 +1023 12 model.scoring_fct_norm 2.0 +1023 12 optimizer.lr 0.01304285392615868 +1023 12 training.batch_size 0.0 +1023 12 training.label_smoothing 0.021894015087145675 +1023 13 model.embedding_dim 2.0 +1023 13 model.scoring_fct_norm 1.0 +1023 13 optimizer.lr 0.022745031645721905 +1023 13 training.batch_size 0.0 +1023 13 training.label_smoothing 0.02558092380154482 +1023 14 model.embedding_dim 2.0 +1023 14 model.scoring_fct_norm 1.0 +1023 14 optimizer.lr 0.00436310411469617 +1023 14 training.batch_size 1.0 +1023 14 training.label_smoothing 0.01036898617101077 +1023 15 model.embedding_dim 0.0 +1023 15 model.scoring_fct_norm 1.0 +1023 15 optimizer.lr 0.0022775405757412093 +1023 15 training.batch_size 0.0 +1023 15 training.label_smoothing 0.2462150713070447 +1023 16 model.embedding_dim 0.0 +1023 16 model.scoring_fct_norm 1.0 +1023 16 optimizer.lr 0.0031443289264352337 +1023 16 training.batch_size 1.0 +1023 16 training.label_smoothing 0.5979710910177621 +1023 17 model.embedding_dim 2.0 +1023 17 model.scoring_fct_norm 2.0 +1023 17 optimizer.lr 0.0012230331148755766 +1023 17 training.batch_size 0.0 +1023 17 training.label_smoothing 0.2627686076973914 +1023 18 model.embedding_dim 0.0 +1023 18 model.scoring_fct_norm 1.0 +1023 18 optimizer.lr 0.01971458602094973 +1023 18 training.batch_size 1.0 +1023 18 training.label_smoothing 0.07992849844882659 +1023 19 model.embedding_dim 0.0 +1023 19 model.scoring_fct_norm 1.0 +1023 19 optimizer.lr 0.07024056632662735 +1023 19 training.batch_size 1.0 +1023 19 training.label_smoothing 0.9420627804412658 +1023 20 model.embedding_dim 2.0 +1023 20 model.scoring_fct_norm 1.0 +1023 20 optimizer.lr 0.0832581980723585 +1023 20 training.batch_size 0.0 +1023 20 training.label_smoothing 0.0014972164625665226 +1023 21 model.embedding_dim 1.0 +1023 21 model.scoring_fct_norm 1.0 +1023 21 optimizer.lr 0.008306532386799619 +1023 21 training.batch_size 1.0 +1023 21 training.label_smoothing 0.35729834383891496 +1023 22 model.embedding_dim 1.0 +1023 22 model.scoring_fct_norm 2.0 +1023 22 optimizer.lr 0.0028639625926704495 +1023 22 training.batch_size 0.0 +1023 22 training.label_smoothing 0.11026675535868953 +1023 23 model.embedding_dim 2.0 +1023 23 model.scoring_fct_norm 1.0 +1023 23 optimizer.lr 0.01382545821064136 +1023 23 training.batch_size 1.0 +1023 23 training.label_smoothing 0.005479387392996672 +1023 24 model.embedding_dim 1.0 +1023 24 model.scoring_fct_norm 1.0 +1023 24 optimizer.lr 0.020548686863992346 +1023 24 training.batch_size 1.0 +1023 24 training.label_smoothing 0.0017813472096730569 +1023 25 model.embedding_dim 2.0 +1023 25 model.scoring_fct_norm 1.0 +1023 25 optimizer.lr 0.0017099182087141668 +1023 25 training.batch_size 1.0 +1023 25 training.label_smoothing 0.0022418169669507623 +1023 26 model.embedding_dim 1.0 +1023 26 model.scoring_fct_norm 2.0 +1023 26 optimizer.lr 0.007418436603115933 +1023 26 training.batch_size 2.0 +1023 26 training.label_smoothing 0.0025169207710829 +1023 27 model.embedding_dim 2.0 +1023 27 model.scoring_fct_norm 2.0 +1023 27 optimizer.lr 0.006766757245374072 +1023 27 training.batch_size 1.0 +1023 27 training.label_smoothing 0.025006963912199528 +1023 28 model.embedding_dim 1.0 +1023 28 model.scoring_fct_norm 2.0 +1023 28 optimizer.lr 0.0026490956526494464 +1023 28 training.batch_size 1.0 +1023 28 training.label_smoothing 0.007041901476772457 +1023 29 model.embedding_dim 2.0 +1023 29 model.scoring_fct_norm 1.0 +1023 29 optimizer.lr 0.009966429463296051 +1023 29 training.batch_size 1.0 +1023 29 training.label_smoothing 0.001394117708710649 +1023 30 model.embedding_dim 1.0 +1023 30 model.scoring_fct_norm 1.0 +1023 30 optimizer.lr 0.0022240743734071758 +1023 30 training.batch_size 0.0 +1023 30 training.label_smoothing 0.10777737829188015 +1023 31 model.embedding_dim 2.0 +1023 31 model.scoring_fct_norm 1.0 +1023 31 optimizer.lr 0.0015309057364661363 +1023 31 training.batch_size 1.0 +1023 31 training.label_smoothing 0.0037346979497962507 +1023 32 model.embedding_dim 0.0 +1023 32 model.scoring_fct_norm 2.0 +1023 32 optimizer.lr 0.0010963747334630763 +1023 32 training.batch_size 1.0 +1023 32 training.label_smoothing 0.009959650824029949 +1023 33 model.embedding_dim 2.0 +1023 33 model.scoring_fct_norm 1.0 +1023 33 optimizer.lr 0.0029940173377077757 +1023 33 training.batch_size 2.0 +1023 33 training.label_smoothing 0.03036181132489758 +1023 34 model.embedding_dim 2.0 +1023 34 model.scoring_fct_norm 2.0 +1023 34 optimizer.lr 0.0010979049739601161 +1023 34 training.batch_size 2.0 +1023 34 training.label_smoothing 0.15055215752945994 +1023 35 model.embedding_dim 1.0 +1023 35 model.scoring_fct_norm 2.0 +1023 35 optimizer.lr 0.028940489266504964 +1023 35 training.batch_size 2.0 +1023 35 training.label_smoothing 0.1998213376666657 +1023 36 model.embedding_dim 1.0 +1023 36 model.scoring_fct_norm 2.0 +1023 36 optimizer.lr 0.030473205767619294 +1023 36 training.batch_size 0.0 +1023 36 training.label_smoothing 0.0099384457237412 +1023 37 model.embedding_dim 2.0 +1023 37 model.scoring_fct_norm 2.0 +1023 37 optimizer.lr 0.001765711488824363 +1023 37 training.batch_size 1.0 +1023 37 training.label_smoothing 0.026546825126261504 +1023 38 model.embedding_dim 1.0 +1023 38 model.scoring_fct_norm 1.0 +1023 38 optimizer.lr 0.001234584650009995 +1023 38 training.batch_size 1.0 +1023 38 training.label_smoothing 0.007890976479857982 +1023 39 model.embedding_dim 0.0 +1023 39 model.scoring_fct_norm 2.0 +1023 39 optimizer.lr 0.0654097960852479 +1023 39 training.batch_size 1.0 +1023 39 training.label_smoothing 0.07817792912925547 +1023 40 model.embedding_dim 2.0 +1023 40 model.scoring_fct_norm 1.0 +1023 40 optimizer.lr 0.0020014462226179506 +1023 40 training.batch_size 1.0 +1023 40 training.label_smoothing 0.9251030683503658 +1023 41 model.embedding_dim 2.0 +1023 41 model.scoring_fct_norm 1.0 +1023 41 optimizer.lr 0.0019980822592785496 +1023 41 training.batch_size 2.0 +1023 41 training.label_smoothing 0.04559571040835672 +1023 42 model.embedding_dim 0.0 +1023 42 model.scoring_fct_norm 2.0 +1023 42 optimizer.lr 0.004391667916095348 +1023 42 training.batch_size 1.0 +1023 42 training.label_smoothing 0.18655522862409302 +1023 43 model.embedding_dim 0.0 +1023 43 model.scoring_fct_norm 2.0 +1023 43 optimizer.lr 0.01111831674406899 +1023 43 training.batch_size 0.0 +1023 43 training.label_smoothing 0.07563047537374919 +1023 44 model.embedding_dim 2.0 +1023 44 model.scoring_fct_norm 1.0 +1023 44 optimizer.lr 0.05081824500402342 +1023 44 training.batch_size 1.0 +1023 44 training.label_smoothing 0.0015927560690434303 +1023 45 model.embedding_dim 0.0 +1023 45 model.scoring_fct_norm 2.0 +1023 45 optimizer.lr 0.04694724274594701 +1023 45 training.batch_size 1.0 +1023 45 training.label_smoothing 0.031564095664513424 +1023 46 model.embedding_dim 0.0 +1023 46 model.scoring_fct_norm 1.0 +1023 46 optimizer.lr 0.005507923796854367 +1023 46 training.batch_size 2.0 +1023 46 training.label_smoothing 0.03473918860182556 +1023 47 model.embedding_dim 0.0 +1023 47 model.scoring_fct_norm 2.0 +1023 47 optimizer.lr 0.001396052496711757 +1023 47 training.batch_size 0.0 +1023 47 training.label_smoothing 0.013560474933638987 +1023 48 model.embedding_dim 1.0 +1023 48 model.scoring_fct_norm 1.0 +1023 48 optimizer.lr 0.0013049087293265584 +1023 48 training.batch_size 0.0 +1023 48 training.label_smoothing 0.00207694142470171 +1023 49 model.embedding_dim 1.0 +1023 49 model.scoring_fct_norm 1.0 +1023 49 optimizer.lr 0.022986455539929476 +1023 49 training.batch_size 0.0 +1023 49 training.label_smoothing 0.0063230586434146275 +1023 50 model.embedding_dim 1.0 +1023 50 model.scoring_fct_norm 1.0 +1023 50 optimizer.lr 0.07832707533805727 +1023 50 training.batch_size 1.0 +1023 50 training.label_smoothing 0.07745297969807985 +1023 51 model.embedding_dim 2.0 +1023 51 model.scoring_fct_norm 2.0 +1023 51 optimizer.lr 0.00612068508455807 +1023 51 training.batch_size 2.0 +1023 51 training.label_smoothing 0.0017719300617577921 +1023 52 model.embedding_dim 0.0 +1023 52 model.scoring_fct_norm 2.0 +1023 52 optimizer.lr 0.0010333704378677523 +1023 52 training.batch_size 2.0 +1023 52 training.label_smoothing 0.6798117386342529 +1023 53 model.embedding_dim 1.0 +1023 53 model.scoring_fct_norm 1.0 +1023 53 optimizer.lr 0.019512842716195165 +1023 53 training.batch_size 2.0 +1023 53 training.label_smoothing 0.0084232298021917 +1023 54 model.embedding_dim 1.0 +1023 54 model.scoring_fct_norm 1.0 +1023 54 optimizer.lr 0.0018367979635125187 +1023 54 training.batch_size 2.0 +1023 54 training.label_smoothing 0.08555806033769972 +1023 55 model.embedding_dim 0.0 +1023 55 model.scoring_fct_norm 2.0 +1023 55 optimizer.lr 0.04920676261531341 +1023 55 training.batch_size 2.0 +1023 55 training.label_smoothing 0.007712850251601087 +1023 56 model.embedding_dim 1.0 +1023 56 model.scoring_fct_norm 2.0 +1023 56 optimizer.lr 0.003538172620502777 +1023 56 training.batch_size 0.0 +1023 56 training.label_smoothing 0.0070923869131212566 +1023 57 model.embedding_dim 0.0 +1023 57 model.scoring_fct_norm 2.0 +1023 57 optimizer.lr 0.035612416266465094 +1023 57 training.batch_size 1.0 +1023 57 training.label_smoothing 0.43013701606381927 +1023 58 model.embedding_dim 1.0 +1023 58 model.scoring_fct_norm 1.0 +1023 58 optimizer.lr 0.04760701278343385 +1023 58 training.batch_size 1.0 +1023 58 training.label_smoothing 0.04102040298505633 +1023 59 model.embedding_dim 1.0 +1023 59 model.scoring_fct_norm 2.0 +1023 59 optimizer.lr 0.07107047354578787 +1023 59 training.batch_size 2.0 +1023 59 training.label_smoothing 0.08242544704310428 +1023 60 model.embedding_dim 0.0 +1023 60 model.scoring_fct_norm 1.0 +1023 60 optimizer.lr 0.09313969755224903 +1023 60 training.batch_size 0.0 +1023 60 training.label_smoothing 0.01948464969508709 +1023 61 model.embedding_dim 2.0 +1023 61 model.scoring_fct_norm 2.0 +1023 61 optimizer.lr 0.01911097147683604 +1023 61 training.batch_size 1.0 +1023 61 training.label_smoothing 0.060480323811089066 +1023 62 model.embedding_dim 0.0 +1023 62 model.scoring_fct_norm 2.0 +1023 62 optimizer.lr 0.04543410224607284 +1023 62 training.batch_size 2.0 +1023 62 training.label_smoothing 0.566409443687358 +1023 63 model.embedding_dim 2.0 +1023 63 model.scoring_fct_norm 1.0 +1023 63 optimizer.lr 0.0633415481037026 +1023 63 training.batch_size 2.0 +1023 63 training.label_smoothing 0.47090843998840914 +1023 64 model.embedding_dim 1.0 +1023 64 model.scoring_fct_norm 2.0 +1023 64 optimizer.lr 0.0222832141586961 +1023 64 training.batch_size 0.0 +1023 64 training.label_smoothing 0.0018508940431860505 +1023 65 model.embedding_dim 1.0 +1023 65 model.scoring_fct_norm 1.0 +1023 65 optimizer.lr 0.04789513846897211 +1023 65 training.batch_size 0.0 +1023 65 training.label_smoothing 0.0799563597650221 +1023 66 model.embedding_dim 2.0 +1023 66 model.scoring_fct_norm 2.0 +1023 66 optimizer.lr 0.009067535928403616 +1023 66 training.batch_size 0.0 +1023 66 training.label_smoothing 0.43033733045934486 +1023 67 model.embedding_dim 0.0 +1023 67 model.scoring_fct_norm 1.0 +1023 67 optimizer.lr 0.0781758249324336 +1023 67 training.batch_size 1.0 +1023 67 training.label_smoothing 0.09383293520261715 +1023 68 model.embedding_dim 1.0 +1023 68 model.scoring_fct_norm 1.0 +1023 68 optimizer.lr 0.001994943038939738 +1023 68 training.batch_size 2.0 +1023 68 training.label_smoothing 0.002965972207022713 +1023 69 model.embedding_dim 0.0 +1023 69 model.scoring_fct_norm 2.0 +1023 69 optimizer.lr 0.0010788808286352706 +1023 69 training.batch_size 1.0 +1023 69 training.label_smoothing 0.06407516389510372 +1023 70 model.embedding_dim 0.0 +1023 70 model.scoring_fct_norm 2.0 +1023 70 optimizer.lr 0.023211734425480413 +1023 70 training.batch_size 1.0 +1023 70 training.label_smoothing 0.8033144170689274 +1023 71 model.embedding_dim 2.0 +1023 71 model.scoring_fct_norm 1.0 +1023 71 optimizer.lr 0.0015624005427769395 +1023 71 training.batch_size 0.0 +1023 71 training.label_smoothing 0.02229832245609873 +1023 72 model.embedding_dim 2.0 +1023 72 model.scoring_fct_norm 2.0 +1023 72 optimizer.lr 0.0022714536116492036 +1023 72 training.batch_size 1.0 +1023 72 training.label_smoothing 0.001111104341409376 +1023 73 model.embedding_dim 1.0 +1023 73 model.scoring_fct_norm 1.0 +1023 73 optimizer.lr 0.013037421914148747 +1023 73 training.batch_size 2.0 +1023 73 training.label_smoothing 0.010881538237059818 +1023 74 model.embedding_dim 2.0 +1023 74 model.scoring_fct_norm 1.0 +1023 74 optimizer.lr 0.039098585550036265 +1023 74 training.batch_size 2.0 +1023 74 training.label_smoothing 0.01385239800874611 +1023 75 model.embedding_dim 0.0 +1023 75 model.scoring_fct_norm 2.0 +1023 75 optimizer.lr 0.02723382306468885 +1023 75 training.batch_size 1.0 +1023 75 training.label_smoothing 0.0013502324423667984 +1023 76 model.embedding_dim 0.0 +1023 76 model.scoring_fct_norm 1.0 +1023 76 optimizer.lr 0.007603762592910461 +1023 76 training.batch_size 2.0 +1023 76 training.label_smoothing 0.010736645563180857 +1023 77 model.embedding_dim 1.0 +1023 77 model.scoring_fct_norm 1.0 +1023 77 optimizer.lr 0.05036831378951067 +1023 77 training.batch_size 1.0 +1023 77 training.label_smoothing 0.014046115830879622 +1023 78 model.embedding_dim 0.0 +1023 78 model.scoring_fct_norm 1.0 +1023 78 optimizer.lr 0.0017471962048813828 +1023 78 training.batch_size 2.0 +1023 78 training.label_smoothing 0.003772423148941833 +1023 79 model.embedding_dim 2.0 +1023 79 model.scoring_fct_norm 2.0 +1023 79 optimizer.lr 0.016813976296467716 +1023 79 training.batch_size 1.0 +1023 79 training.label_smoothing 0.9463387492241471 +1023 80 model.embedding_dim 2.0 +1023 80 model.scoring_fct_norm 2.0 +1023 80 optimizer.lr 0.0036013205631184745 +1023 80 training.batch_size 1.0 +1023 80 training.label_smoothing 0.0012373704604983265 +1023 81 model.embedding_dim 2.0 +1023 81 model.scoring_fct_norm 1.0 +1023 81 optimizer.lr 0.001148914775965739 +1023 81 training.batch_size 0.0 +1023 81 training.label_smoothing 0.002073411099157353 +1023 82 model.embedding_dim 0.0 +1023 82 model.scoring_fct_norm 2.0 +1023 82 optimizer.lr 0.04103970711500593 +1023 82 training.batch_size 0.0 +1023 82 training.label_smoothing 0.03982770890635642 +1023 83 model.embedding_dim 0.0 +1023 83 model.scoring_fct_norm 1.0 +1023 83 optimizer.lr 0.05594294062073359 +1023 83 training.batch_size 0.0 +1023 83 training.label_smoothing 0.024487085969314544 +1023 84 model.embedding_dim 2.0 +1023 84 model.scoring_fct_norm 2.0 +1023 84 optimizer.lr 0.003689214874998463 +1023 84 training.batch_size 1.0 +1023 84 training.label_smoothing 0.021932961365345834 +1023 85 model.embedding_dim 0.0 +1023 85 model.scoring_fct_norm 2.0 +1023 85 optimizer.lr 0.0014522654545787177 +1023 85 training.batch_size 0.0 +1023 85 training.label_smoothing 0.6178405522759008 +1023 86 model.embedding_dim 1.0 +1023 86 model.scoring_fct_norm 2.0 +1023 86 optimizer.lr 0.004751672072032902 +1023 86 training.batch_size 2.0 +1023 86 training.label_smoothing 0.012256181090293979 +1023 87 model.embedding_dim 1.0 +1023 87 model.scoring_fct_norm 1.0 +1023 87 optimizer.lr 0.004988967665030058 +1023 87 training.batch_size 0.0 +1023 87 training.label_smoothing 0.0058228678035963264 +1023 88 model.embedding_dim 2.0 +1023 88 model.scoring_fct_norm 1.0 +1023 88 optimizer.lr 0.0032467412424920052 +1023 88 training.batch_size 2.0 +1023 88 training.label_smoothing 0.08276870086815108 +1023 89 model.embedding_dim 0.0 +1023 89 model.scoring_fct_norm 1.0 +1023 89 optimizer.lr 0.04620691253479419 +1023 89 training.batch_size 1.0 +1023 89 training.label_smoothing 0.7735243600130773 +1023 90 model.embedding_dim 1.0 +1023 90 model.scoring_fct_norm 1.0 +1023 90 optimizer.lr 0.038700112051924945 +1023 90 training.batch_size 0.0 +1023 90 training.label_smoothing 0.10291588953818019 +1023 91 model.embedding_dim 2.0 +1023 91 model.scoring_fct_norm 2.0 +1023 91 optimizer.lr 0.05426639665115638 +1023 91 training.batch_size 0.0 +1023 91 training.label_smoothing 0.1614412159780158 +1023 92 model.embedding_dim 1.0 +1023 92 model.scoring_fct_norm 1.0 +1023 92 optimizer.lr 0.031176604309671268 +1023 92 training.batch_size 2.0 +1023 92 training.label_smoothing 0.013083604520736628 +1023 93 model.embedding_dim 2.0 +1023 93 model.scoring_fct_norm 2.0 +1023 93 optimizer.lr 0.004038467160123402 +1023 93 training.batch_size 1.0 +1023 93 training.label_smoothing 0.014316722887818208 +1023 94 model.embedding_dim 1.0 +1023 94 model.scoring_fct_norm 2.0 +1023 94 optimizer.lr 0.025054441117824254 +1023 94 training.batch_size 2.0 +1023 94 training.label_smoothing 0.0018546125742457944 +1023 95 model.embedding_dim 2.0 +1023 95 model.scoring_fct_norm 1.0 +1023 95 optimizer.lr 0.009987605144802472 +1023 95 training.batch_size 0.0 +1023 95 training.label_smoothing 0.002503587099272582 +1023 96 model.embedding_dim 0.0 +1023 96 model.scoring_fct_norm 2.0 +1023 96 optimizer.lr 0.006469065423319346 +1023 96 training.batch_size 2.0 +1023 96 training.label_smoothing 0.14169522215409103 +1023 97 model.embedding_dim 2.0 +1023 97 model.scoring_fct_norm 1.0 +1023 97 optimizer.lr 0.0017356832285034811 +1023 97 training.batch_size 0.0 +1023 97 training.label_smoothing 0.0028948947152529583 +1023 98 model.embedding_dim 0.0 +1023 98 model.scoring_fct_norm 2.0 +1023 98 optimizer.lr 0.0998911150922483 +1023 98 training.batch_size 0.0 +1023 98 training.label_smoothing 0.11351185260951406 +1023 99 model.embedding_dim 2.0 +1023 99 model.scoring_fct_norm 1.0 +1023 99 optimizer.lr 0.04033234961516786 +1023 99 training.batch_size 1.0 +1023 99 training.label_smoothing 0.0029555358289120918 +1023 100 model.embedding_dim 0.0 +1023 100 model.scoring_fct_norm 1.0 +1023 100 optimizer.lr 0.002557636252643034 +1023 100 training.batch_size 2.0 +1023 100 training.label_smoothing 0.0016190170613718054 +1023 1 dataset """kinships""" +1023 1 model """unstructuredmodel""" +1023 1 loss """softplus""" +1023 1 regularizer """no""" +1023 1 optimizer """adam""" +1023 1 training_loop """lcwa""" +1023 1 evaluator """rankbased""" +1023 2 dataset """kinships""" +1023 2 model """unstructuredmodel""" +1023 2 loss """softplus""" +1023 2 regularizer """no""" +1023 2 optimizer """adam""" +1023 2 training_loop """lcwa""" +1023 2 evaluator """rankbased""" +1023 3 dataset """kinships""" +1023 3 model """unstructuredmodel""" +1023 3 loss """softplus""" +1023 3 regularizer """no""" +1023 3 optimizer """adam""" +1023 3 training_loop """lcwa""" +1023 3 evaluator """rankbased""" +1023 4 dataset """kinships""" +1023 4 model """unstructuredmodel""" +1023 4 loss """softplus""" +1023 4 regularizer """no""" +1023 4 optimizer """adam""" +1023 4 training_loop """lcwa""" +1023 4 evaluator """rankbased""" +1023 5 dataset """kinships""" +1023 5 model """unstructuredmodel""" +1023 5 loss """softplus""" +1023 5 regularizer """no""" +1023 5 optimizer """adam""" +1023 5 training_loop """lcwa""" +1023 5 evaluator """rankbased""" +1023 6 dataset """kinships""" +1023 6 model """unstructuredmodel""" +1023 6 loss """softplus""" +1023 6 regularizer """no""" +1023 6 optimizer """adam""" +1023 6 training_loop """lcwa""" +1023 6 evaluator """rankbased""" +1023 7 dataset """kinships""" +1023 7 model """unstructuredmodel""" +1023 7 loss """softplus""" +1023 7 regularizer """no""" +1023 7 optimizer """adam""" +1023 7 training_loop """lcwa""" +1023 7 evaluator """rankbased""" +1023 8 dataset """kinships""" +1023 8 model """unstructuredmodel""" +1023 8 loss """softplus""" +1023 8 regularizer """no""" +1023 8 optimizer """adam""" +1023 8 training_loop """lcwa""" +1023 8 evaluator """rankbased""" +1023 9 dataset """kinships""" +1023 9 model """unstructuredmodel""" +1023 9 loss """softplus""" +1023 9 regularizer """no""" +1023 9 optimizer """adam""" +1023 9 training_loop """lcwa""" +1023 9 evaluator """rankbased""" +1023 10 dataset """kinships""" +1023 10 model """unstructuredmodel""" +1023 10 loss """softplus""" +1023 10 regularizer """no""" +1023 10 optimizer """adam""" +1023 10 training_loop """lcwa""" +1023 10 evaluator """rankbased""" +1023 11 dataset """kinships""" +1023 11 model """unstructuredmodel""" +1023 11 loss """softplus""" +1023 11 regularizer """no""" +1023 11 optimizer """adam""" +1023 11 training_loop """lcwa""" +1023 11 evaluator """rankbased""" +1023 12 dataset """kinships""" +1023 12 model """unstructuredmodel""" +1023 12 loss """softplus""" +1023 12 regularizer """no""" +1023 12 optimizer """adam""" +1023 12 training_loop """lcwa""" +1023 12 evaluator """rankbased""" +1023 13 dataset """kinships""" +1023 13 model """unstructuredmodel""" +1023 13 loss """softplus""" +1023 13 regularizer """no""" +1023 13 optimizer """adam""" +1023 13 training_loop """lcwa""" +1023 13 evaluator """rankbased""" +1023 14 dataset """kinships""" +1023 14 model """unstructuredmodel""" +1023 14 loss """softplus""" +1023 14 regularizer """no""" +1023 14 optimizer """adam""" +1023 14 training_loop """lcwa""" +1023 14 evaluator """rankbased""" +1023 15 dataset """kinships""" +1023 15 model """unstructuredmodel""" +1023 15 loss """softplus""" +1023 15 regularizer """no""" +1023 15 optimizer """adam""" +1023 15 training_loop """lcwa""" +1023 15 evaluator """rankbased""" +1023 16 dataset """kinships""" +1023 16 model """unstructuredmodel""" +1023 16 loss """softplus""" +1023 16 regularizer """no""" +1023 16 optimizer """adam""" +1023 16 training_loop """lcwa""" +1023 16 evaluator """rankbased""" +1023 17 dataset """kinships""" +1023 17 model """unstructuredmodel""" +1023 17 loss """softplus""" +1023 17 regularizer """no""" +1023 17 optimizer """adam""" +1023 17 training_loop """lcwa""" +1023 17 evaluator """rankbased""" +1023 18 dataset """kinships""" +1023 18 model """unstructuredmodel""" +1023 18 loss """softplus""" +1023 18 regularizer """no""" +1023 18 optimizer """adam""" +1023 18 training_loop """lcwa""" +1023 18 evaluator """rankbased""" +1023 19 dataset """kinships""" +1023 19 model """unstructuredmodel""" +1023 19 loss """softplus""" +1023 19 regularizer """no""" +1023 19 optimizer """adam""" +1023 19 training_loop """lcwa""" +1023 19 evaluator """rankbased""" +1023 20 dataset """kinships""" +1023 20 model """unstructuredmodel""" +1023 20 loss """softplus""" +1023 20 regularizer """no""" +1023 20 optimizer """adam""" +1023 20 training_loop """lcwa""" +1023 20 evaluator """rankbased""" +1023 21 dataset """kinships""" +1023 21 model """unstructuredmodel""" +1023 21 loss """softplus""" +1023 21 regularizer """no""" +1023 21 optimizer """adam""" +1023 21 training_loop """lcwa""" +1023 21 evaluator """rankbased""" +1023 22 dataset """kinships""" +1023 22 model """unstructuredmodel""" +1023 22 loss """softplus""" +1023 22 regularizer """no""" +1023 22 optimizer """adam""" +1023 22 training_loop """lcwa""" +1023 22 evaluator """rankbased""" +1023 23 dataset """kinships""" +1023 23 model """unstructuredmodel""" +1023 23 loss """softplus""" +1023 23 regularizer """no""" +1023 23 optimizer """adam""" +1023 23 training_loop """lcwa""" +1023 23 evaluator """rankbased""" +1023 24 dataset """kinships""" +1023 24 model """unstructuredmodel""" +1023 24 loss """softplus""" +1023 24 regularizer """no""" +1023 24 optimizer """adam""" +1023 24 training_loop """lcwa""" +1023 24 evaluator """rankbased""" +1023 25 dataset """kinships""" +1023 25 model """unstructuredmodel""" +1023 25 loss """softplus""" +1023 25 regularizer """no""" +1023 25 optimizer """adam""" +1023 25 training_loop """lcwa""" +1023 25 evaluator """rankbased""" +1023 26 dataset """kinships""" +1023 26 model """unstructuredmodel""" +1023 26 loss """softplus""" +1023 26 regularizer """no""" +1023 26 optimizer """adam""" +1023 26 training_loop """lcwa""" +1023 26 evaluator """rankbased""" +1023 27 dataset """kinships""" +1023 27 model """unstructuredmodel""" +1023 27 loss """softplus""" +1023 27 regularizer """no""" +1023 27 optimizer """adam""" +1023 27 training_loop """lcwa""" +1023 27 evaluator """rankbased""" +1023 28 dataset """kinships""" +1023 28 model """unstructuredmodel""" +1023 28 loss """softplus""" +1023 28 regularizer """no""" +1023 28 optimizer """adam""" +1023 28 training_loop """lcwa""" +1023 28 evaluator """rankbased""" +1023 29 dataset """kinships""" +1023 29 model """unstructuredmodel""" +1023 29 loss """softplus""" +1023 29 regularizer """no""" +1023 29 optimizer """adam""" +1023 29 training_loop """lcwa""" +1023 29 evaluator """rankbased""" +1023 30 dataset """kinships""" +1023 30 model """unstructuredmodel""" +1023 30 loss """softplus""" +1023 30 regularizer """no""" +1023 30 optimizer """adam""" +1023 30 training_loop """lcwa""" +1023 30 evaluator """rankbased""" +1023 31 dataset """kinships""" +1023 31 model """unstructuredmodel""" +1023 31 loss """softplus""" +1023 31 regularizer """no""" +1023 31 optimizer """adam""" +1023 31 training_loop """lcwa""" +1023 31 evaluator """rankbased""" +1023 32 dataset """kinships""" +1023 32 model """unstructuredmodel""" +1023 32 loss """softplus""" +1023 32 regularizer """no""" +1023 32 optimizer """adam""" +1023 32 training_loop """lcwa""" +1023 32 evaluator """rankbased""" +1023 33 dataset """kinships""" +1023 33 model """unstructuredmodel""" +1023 33 loss """softplus""" +1023 33 regularizer """no""" +1023 33 optimizer """adam""" +1023 33 training_loop """lcwa""" +1023 33 evaluator """rankbased""" +1023 34 dataset """kinships""" +1023 34 model """unstructuredmodel""" +1023 34 loss """softplus""" +1023 34 regularizer """no""" +1023 34 optimizer """adam""" +1023 34 training_loop """lcwa""" +1023 34 evaluator """rankbased""" +1023 35 dataset """kinships""" +1023 35 model """unstructuredmodel""" +1023 35 loss """softplus""" +1023 35 regularizer """no""" +1023 35 optimizer """adam""" +1023 35 training_loop """lcwa""" +1023 35 evaluator """rankbased""" +1023 36 dataset """kinships""" +1023 36 model """unstructuredmodel""" +1023 36 loss """softplus""" +1023 36 regularizer """no""" +1023 36 optimizer """adam""" +1023 36 training_loop """lcwa""" +1023 36 evaluator """rankbased""" +1023 37 dataset """kinships""" +1023 37 model """unstructuredmodel""" +1023 37 loss """softplus""" +1023 37 regularizer """no""" +1023 37 optimizer """adam""" +1023 37 training_loop """lcwa""" +1023 37 evaluator """rankbased""" +1023 38 dataset """kinships""" +1023 38 model """unstructuredmodel""" +1023 38 loss """softplus""" +1023 38 regularizer """no""" +1023 38 optimizer """adam""" +1023 38 training_loop """lcwa""" +1023 38 evaluator """rankbased""" +1023 39 dataset """kinships""" +1023 39 model """unstructuredmodel""" +1023 39 loss """softplus""" +1023 39 regularizer """no""" +1023 39 optimizer """adam""" +1023 39 training_loop """lcwa""" +1023 39 evaluator """rankbased""" +1023 40 dataset """kinships""" +1023 40 model """unstructuredmodel""" +1023 40 loss """softplus""" +1023 40 regularizer """no""" +1023 40 optimizer """adam""" +1023 40 training_loop """lcwa""" +1023 40 evaluator """rankbased""" +1023 41 dataset """kinships""" +1023 41 model """unstructuredmodel""" +1023 41 loss """softplus""" +1023 41 regularizer """no""" +1023 41 optimizer """adam""" +1023 41 training_loop """lcwa""" +1023 41 evaluator """rankbased""" +1023 42 dataset """kinships""" +1023 42 model """unstructuredmodel""" +1023 42 loss """softplus""" +1023 42 regularizer """no""" +1023 42 optimizer """adam""" +1023 42 training_loop """lcwa""" +1023 42 evaluator """rankbased""" +1023 43 dataset """kinships""" +1023 43 model """unstructuredmodel""" +1023 43 loss """softplus""" +1023 43 regularizer """no""" +1023 43 optimizer """adam""" +1023 43 training_loop """lcwa""" +1023 43 evaluator """rankbased""" +1023 44 dataset """kinships""" +1023 44 model """unstructuredmodel""" +1023 44 loss """softplus""" +1023 44 regularizer """no""" +1023 44 optimizer """adam""" +1023 44 training_loop """lcwa""" +1023 44 evaluator """rankbased""" +1023 45 dataset """kinships""" +1023 45 model """unstructuredmodel""" +1023 45 loss """softplus""" +1023 45 regularizer """no""" +1023 45 optimizer """adam""" +1023 45 training_loop """lcwa""" +1023 45 evaluator """rankbased""" +1023 46 dataset """kinships""" +1023 46 model """unstructuredmodel""" +1023 46 loss """softplus""" +1023 46 regularizer """no""" +1023 46 optimizer """adam""" +1023 46 training_loop """lcwa""" +1023 46 evaluator """rankbased""" +1023 47 dataset """kinships""" +1023 47 model """unstructuredmodel""" +1023 47 loss """softplus""" +1023 47 regularizer """no""" +1023 47 optimizer """adam""" +1023 47 training_loop """lcwa""" +1023 47 evaluator """rankbased""" +1023 48 dataset """kinships""" +1023 48 model """unstructuredmodel""" +1023 48 loss """softplus""" +1023 48 regularizer """no""" +1023 48 optimizer """adam""" +1023 48 training_loop """lcwa""" +1023 48 evaluator """rankbased""" +1023 49 dataset """kinships""" +1023 49 model """unstructuredmodel""" +1023 49 loss """softplus""" +1023 49 regularizer """no""" +1023 49 optimizer """adam""" +1023 49 training_loop """lcwa""" +1023 49 evaluator """rankbased""" +1023 50 dataset """kinships""" +1023 50 model """unstructuredmodel""" +1023 50 loss """softplus""" +1023 50 regularizer """no""" +1023 50 optimizer """adam""" +1023 50 training_loop """lcwa""" +1023 50 evaluator """rankbased""" +1023 51 dataset """kinships""" +1023 51 model """unstructuredmodel""" +1023 51 loss """softplus""" +1023 51 regularizer """no""" +1023 51 optimizer """adam""" +1023 51 training_loop """lcwa""" +1023 51 evaluator """rankbased""" +1023 52 dataset """kinships""" +1023 52 model """unstructuredmodel""" +1023 52 loss """softplus""" +1023 52 regularizer """no""" +1023 52 optimizer """adam""" +1023 52 training_loop """lcwa""" +1023 52 evaluator """rankbased""" +1023 53 dataset """kinships""" +1023 53 model """unstructuredmodel""" +1023 53 loss """softplus""" +1023 53 regularizer """no""" +1023 53 optimizer """adam""" +1023 53 training_loop """lcwa""" +1023 53 evaluator """rankbased""" +1023 54 dataset """kinships""" +1023 54 model """unstructuredmodel""" +1023 54 loss """softplus""" +1023 54 regularizer """no""" +1023 54 optimizer """adam""" +1023 54 training_loop """lcwa""" +1023 54 evaluator """rankbased""" +1023 55 dataset """kinships""" +1023 55 model """unstructuredmodel""" +1023 55 loss """softplus""" +1023 55 regularizer """no""" +1023 55 optimizer """adam""" +1023 55 training_loop """lcwa""" +1023 55 evaluator """rankbased""" +1023 56 dataset """kinships""" +1023 56 model """unstructuredmodel""" +1023 56 loss """softplus""" +1023 56 regularizer """no""" +1023 56 optimizer """adam""" +1023 56 training_loop """lcwa""" +1023 56 evaluator """rankbased""" +1023 57 dataset """kinships""" +1023 57 model """unstructuredmodel""" +1023 57 loss """softplus""" +1023 57 regularizer """no""" +1023 57 optimizer """adam""" +1023 57 training_loop """lcwa""" +1023 57 evaluator """rankbased""" +1023 58 dataset """kinships""" +1023 58 model """unstructuredmodel""" +1023 58 loss """softplus""" +1023 58 regularizer """no""" +1023 58 optimizer """adam""" +1023 58 training_loop """lcwa""" +1023 58 evaluator """rankbased""" +1023 59 dataset """kinships""" +1023 59 model """unstructuredmodel""" +1023 59 loss """softplus""" +1023 59 regularizer """no""" +1023 59 optimizer """adam""" +1023 59 training_loop """lcwa""" +1023 59 evaluator """rankbased""" +1023 60 dataset """kinships""" +1023 60 model """unstructuredmodel""" +1023 60 loss """softplus""" +1023 60 regularizer """no""" +1023 60 optimizer """adam""" +1023 60 training_loop """lcwa""" +1023 60 evaluator """rankbased""" +1023 61 dataset """kinships""" +1023 61 model """unstructuredmodel""" +1023 61 loss """softplus""" +1023 61 regularizer """no""" +1023 61 optimizer """adam""" +1023 61 training_loop """lcwa""" +1023 61 evaluator """rankbased""" +1023 62 dataset """kinships""" +1023 62 model """unstructuredmodel""" +1023 62 loss """softplus""" +1023 62 regularizer """no""" +1023 62 optimizer """adam""" +1023 62 training_loop """lcwa""" +1023 62 evaluator """rankbased""" +1023 63 dataset """kinships""" +1023 63 model """unstructuredmodel""" +1023 63 loss """softplus""" +1023 63 regularizer """no""" +1023 63 optimizer """adam""" +1023 63 training_loop """lcwa""" +1023 63 evaluator """rankbased""" +1023 64 dataset """kinships""" +1023 64 model """unstructuredmodel""" +1023 64 loss """softplus""" +1023 64 regularizer """no""" +1023 64 optimizer """adam""" +1023 64 training_loop """lcwa""" +1023 64 evaluator """rankbased""" +1023 65 dataset """kinships""" +1023 65 model """unstructuredmodel""" +1023 65 loss """softplus""" +1023 65 regularizer """no""" +1023 65 optimizer """adam""" +1023 65 training_loop """lcwa""" +1023 65 evaluator """rankbased""" +1023 66 dataset """kinships""" +1023 66 model """unstructuredmodel""" +1023 66 loss """softplus""" +1023 66 regularizer """no""" +1023 66 optimizer """adam""" +1023 66 training_loop """lcwa""" +1023 66 evaluator """rankbased""" +1023 67 dataset """kinships""" +1023 67 model """unstructuredmodel""" +1023 67 loss """softplus""" +1023 67 regularizer """no""" +1023 67 optimizer """adam""" +1023 67 training_loop """lcwa""" +1023 67 evaluator """rankbased""" +1023 68 dataset """kinships""" +1023 68 model """unstructuredmodel""" +1023 68 loss """softplus""" +1023 68 regularizer """no""" +1023 68 optimizer """adam""" +1023 68 training_loop """lcwa""" +1023 68 evaluator """rankbased""" +1023 69 dataset """kinships""" +1023 69 model """unstructuredmodel""" +1023 69 loss """softplus""" +1023 69 regularizer """no""" +1023 69 optimizer """adam""" +1023 69 training_loop """lcwa""" +1023 69 evaluator """rankbased""" +1023 70 dataset """kinships""" +1023 70 model """unstructuredmodel""" +1023 70 loss """softplus""" +1023 70 regularizer """no""" +1023 70 optimizer """adam""" +1023 70 training_loop """lcwa""" +1023 70 evaluator """rankbased""" +1023 71 dataset """kinships""" +1023 71 model """unstructuredmodel""" +1023 71 loss """softplus""" +1023 71 regularizer """no""" +1023 71 optimizer """adam""" +1023 71 training_loop """lcwa""" +1023 71 evaluator """rankbased""" +1023 72 dataset """kinships""" +1023 72 model """unstructuredmodel""" +1023 72 loss """softplus""" +1023 72 regularizer """no""" +1023 72 optimizer """adam""" +1023 72 training_loop """lcwa""" +1023 72 evaluator """rankbased""" +1023 73 dataset """kinships""" +1023 73 model """unstructuredmodel""" +1023 73 loss """softplus""" +1023 73 regularizer """no""" +1023 73 optimizer """adam""" +1023 73 training_loop """lcwa""" +1023 73 evaluator """rankbased""" +1023 74 dataset """kinships""" +1023 74 model """unstructuredmodel""" +1023 74 loss """softplus""" +1023 74 regularizer """no""" +1023 74 optimizer """adam""" +1023 74 training_loop """lcwa""" +1023 74 evaluator """rankbased""" +1023 75 dataset """kinships""" +1023 75 model """unstructuredmodel""" +1023 75 loss """softplus""" +1023 75 regularizer """no""" +1023 75 optimizer """adam""" +1023 75 training_loop """lcwa""" +1023 75 evaluator """rankbased""" +1023 76 dataset """kinships""" +1023 76 model """unstructuredmodel""" +1023 76 loss """softplus""" +1023 76 regularizer """no""" +1023 76 optimizer """adam""" +1023 76 training_loop """lcwa""" +1023 76 evaluator """rankbased""" +1023 77 dataset """kinships""" +1023 77 model """unstructuredmodel""" +1023 77 loss """softplus""" +1023 77 regularizer """no""" +1023 77 optimizer """adam""" +1023 77 training_loop """lcwa""" +1023 77 evaluator """rankbased""" +1023 78 dataset """kinships""" +1023 78 model """unstructuredmodel""" +1023 78 loss """softplus""" +1023 78 regularizer """no""" +1023 78 optimizer """adam""" +1023 78 training_loop """lcwa""" +1023 78 evaluator """rankbased""" +1023 79 dataset """kinships""" +1023 79 model """unstructuredmodel""" +1023 79 loss """softplus""" +1023 79 regularizer """no""" +1023 79 optimizer """adam""" +1023 79 training_loop """lcwa""" +1023 79 evaluator """rankbased""" +1023 80 dataset """kinships""" +1023 80 model """unstructuredmodel""" +1023 80 loss """softplus""" +1023 80 regularizer """no""" +1023 80 optimizer """adam""" +1023 80 training_loop """lcwa""" +1023 80 evaluator """rankbased""" +1023 81 dataset """kinships""" +1023 81 model """unstructuredmodel""" +1023 81 loss """softplus""" +1023 81 regularizer """no""" +1023 81 optimizer """adam""" +1023 81 training_loop """lcwa""" +1023 81 evaluator """rankbased""" +1023 82 dataset """kinships""" +1023 82 model """unstructuredmodel""" +1023 82 loss """softplus""" +1023 82 regularizer """no""" +1023 82 optimizer """adam""" +1023 82 training_loop """lcwa""" +1023 82 evaluator """rankbased""" +1023 83 dataset """kinships""" +1023 83 model """unstructuredmodel""" +1023 83 loss """softplus""" +1023 83 regularizer """no""" +1023 83 optimizer """adam""" +1023 83 training_loop """lcwa""" +1023 83 evaluator """rankbased""" +1023 84 dataset """kinships""" +1023 84 model """unstructuredmodel""" +1023 84 loss """softplus""" +1023 84 regularizer """no""" +1023 84 optimizer """adam""" +1023 84 training_loop """lcwa""" +1023 84 evaluator """rankbased""" +1023 85 dataset """kinships""" +1023 85 model """unstructuredmodel""" +1023 85 loss """softplus""" +1023 85 regularizer """no""" +1023 85 optimizer """adam""" +1023 85 training_loop """lcwa""" +1023 85 evaluator """rankbased""" +1023 86 dataset """kinships""" +1023 86 model """unstructuredmodel""" +1023 86 loss """softplus""" +1023 86 regularizer """no""" +1023 86 optimizer """adam""" +1023 86 training_loop """lcwa""" +1023 86 evaluator """rankbased""" +1023 87 dataset """kinships""" +1023 87 model """unstructuredmodel""" +1023 87 loss """softplus""" +1023 87 regularizer """no""" +1023 87 optimizer """adam""" +1023 87 training_loop """lcwa""" +1023 87 evaluator """rankbased""" +1023 88 dataset """kinships""" +1023 88 model """unstructuredmodel""" +1023 88 loss """softplus""" +1023 88 regularizer """no""" +1023 88 optimizer """adam""" +1023 88 training_loop """lcwa""" +1023 88 evaluator """rankbased""" +1023 89 dataset """kinships""" +1023 89 model """unstructuredmodel""" +1023 89 loss """softplus""" +1023 89 regularizer """no""" +1023 89 optimizer """adam""" +1023 89 training_loop """lcwa""" +1023 89 evaluator """rankbased""" +1023 90 dataset """kinships""" +1023 90 model """unstructuredmodel""" +1023 90 loss """softplus""" +1023 90 regularizer """no""" +1023 90 optimizer """adam""" +1023 90 training_loop """lcwa""" +1023 90 evaluator """rankbased""" +1023 91 dataset """kinships""" +1023 91 model """unstructuredmodel""" +1023 91 loss """softplus""" +1023 91 regularizer """no""" +1023 91 optimizer """adam""" +1023 91 training_loop """lcwa""" +1023 91 evaluator """rankbased""" +1023 92 dataset """kinships""" +1023 92 model """unstructuredmodel""" +1023 92 loss """softplus""" +1023 92 regularizer """no""" +1023 92 optimizer """adam""" +1023 92 training_loop """lcwa""" +1023 92 evaluator """rankbased""" +1023 93 dataset """kinships""" +1023 93 model """unstructuredmodel""" +1023 93 loss """softplus""" +1023 93 regularizer """no""" +1023 93 optimizer """adam""" +1023 93 training_loop """lcwa""" +1023 93 evaluator """rankbased""" +1023 94 dataset """kinships""" +1023 94 model """unstructuredmodel""" +1023 94 loss """softplus""" +1023 94 regularizer """no""" +1023 94 optimizer """adam""" +1023 94 training_loop """lcwa""" +1023 94 evaluator """rankbased""" +1023 95 dataset """kinships""" +1023 95 model """unstructuredmodel""" +1023 95 loss """softplus""" +1023 95 regularizer """no""" +1023 95 optimizer """adam""" +1023 95 training_loop """lcwa""" +1023 95 evaluator """rankbased""" +1023 96 dataset """kinships""" +1023 96 model """unstructuredmodel""" +1023 96 loss """softplus""" +1023 96 regularizer """no""" +1023 96 optimizer """adam""" +1023 96 training_loop """lcwa""" +1023 96 evaluator """rankbased""" +1023 97 dataset """kinships""" +1023 97 model """unstructuredmodel""" +1023 97 loss """softplus""" +1023 97 regularizer """no""" +1023 97 optimizer """adam""" +1023 97 training_loop """lcwa""" +1023 97 evaluator """rankbased""" +1023 98 dataset """kinships""" +1023 98 model """unstructuredmodel""" +1023 98 loss """softplus""" +1023 98 regularizer """no""" +1023 98 optimizer """adam""" +1023 98 training_loop """lcwa""" +1023 98 evaluator """rankbased""" +1023 99 dataset """kinships""" +1023 99 model """unstructuredmodel""" +1023 99 loss """softplus""" +1023 99 regularizer """no""" +1023 99 optimizer """adam""" +1023 99 training_loop """lcwa""" +1023 99 evaluator """rankbased""" +1023 100 dataset """kinships""" +1023 100 model """unstructuredmodel""" +1023 100 loss """softplus""" +1023 100 regularizer """no""" +1023 100 optimizer """adam""" +1023 100 training_loop """lcwa""" +1023 100 evaluator """rankbased""" +1024 1 model.embedding_dim 1.0 +1024 1 model.scoring_fct_norm 1.0 +1024 1 optimizer.lr 0.0344846315889543 +1024 1 training.batch_size 2.0 +1024 1 training.label_smoothing 0.038636153653408195 +1024 2 model.embedding_dim 0.0 +1024 2 model.scoring_fct_norm 2.0 +1024 2 optimizer.lr 0.029141431369838013 +1024 2 training.batch_size 1.0 +1024 2 training.label_smoothing 0.0023179495766306765 +1024 3 model.embedding_dim 1.0 +1024 3 model.scoring_fct_norm 2.0 +1024 3 optimizer.lr 0.006098345287280879 +1024 3 training.batch_size 0.0 +1024 3 training.label_smoothing 0.1649203707218102 +1024 4 model.embedding_dim 1.0 +1024 4 model.scoring_fct_norm 2.0 +1024 4 optimizer.lr 0.0011897306288678076 +1024 4 training.batch_size 2.0 +1024 4 training.label_smoothing 0.0022740061925440604 +1024 5 model.embedding_dim 2.0 +1024 5 model.scoring_fct_norm 1.0 +1024 5 optimizer.lr 0.011548290483677287 +1024 5 training.batch_size 0.0 +1024 5 training.label_smoothing 0.0021512062488004617 +1024 6 model.embedding_dim 0.0 +1024 6 model.scoring_fct_norm 2.0 +1024 6 optimizer.lr 0.0019825296801145205 +1024 6 training.batch_size 0.0 +1024 6 training.label_smoothing 0.7219976573247067 +1024 7 model.embedding_dim 0.0 +1024 7 model.scoring_fct_norm 1.0 +1024 7 optimizer.lr 0.051087116777058204 +1024 7 training.batch_size 1.0 +1024 7 training.label_smoothing 0.01078259447904044 +1024 8 model.embedding_dim 2.0 +1024 8 model.scoring_fct_norm 1.0 +1024 8 optimizer.lr 0.00544100706884113 +1024 8 training.batch_size 0.0 +1024 8 training.label_smoothing 0.03803846124927357 +1024 9 model.embedding_dim 0.0 +1024 9 model.scoring_fct_norm 2.0 +1024 9 optimizer.lr 0.004777187597561432 +1024 9 training.batch_size 1.0 +1024 9 training.label_smoothing 0.11469251918451931 +1024 10 model.embedding_dim 2.0 +1024 10 model.scoring_fct_norm 2.0 +1024 10 optimizer.lr 0.0019668875673688243 +1024 10 training.batch_size 2.0 +1024 10 training.label_smoothing 0.07565646068677412 +1024 11 model.embedding_dim 1.0 +1024 11 model.scoring_fct_norm 2.0 +1024 11 optimizer.lr 0.007334506576929926 +1024 11 training.batch_size 2.0 +1024 11 training.label_smoothing 0.02367455780120047 +1024 12 model.embedding_dim 1.0 +1024 12 model.scoring_fct_norm 1.0 +1024 12 optimizer.lr 0.0011047952899108757 +1024 12 training.batch_size 0.0 +1024 12 training.label_smoothing 0.009361267493791547 +1024 13 model.embedding_dim 0.0 +1024 13 model.scoring_fct_norm 2.0 +1024 13 optimizer.lr 0.045203865123976644 +1024 13 training.batch_size 1.0 +1024 13 training.label_smoothing 0.04389515184607028 +1024 14 model.embedding_dim 2.0 +1024 14 model.scoring_fct_norm 2.0 +1024 14 optimizer.lr 0.008151615522308463 +1024 14 training.batch_size 2.0 +1024 14 training.label_smoothing 0.047569077383895826 +1024 15 model.embedding_dim 0.0 +1024 15 model.scoring_fct_norm 1.0 +1024 15 optimizer.lr 0.01075980175991366 +1024 15 training.batch_size 2.0 +1024 15 training.label_smoothing 0.006599384291395973 +1024 16 model.embedding_dim 2.0 +1024 16 model.scoring_fct_norm 2.0 +1024 16 optimizer.lr 0.006327840589226325 +1024 16 training.batch_size 0.0 +1024 16 training.label_smoothing 0.0014065233584742698 +1024 17 model.embedding_dim 2.0 +1024 17 model.scoring_fct_norm 2.0 +1024 17 optimizer.lr 0.009060269061310184 +1024 17 training.batch_size 0.0 +1024 17 training.label_smoothing 0.003669389328978747 +1024 18 model.embedding_dim 2.0 +1024 18 model.scoring_fct_norm 1.0 +1024 18 optimizer.lr 0.0021146178634511422 +1024 18 training.batch_size 2.0 +1024 18 training.label_smoothing 0.009405240995896345 +1024 19 model.embedding_dim 2.0 +1024 19 model.scoring_fct_norm 2.0 +1024 19 optimizer.lr 0.009774334576644096 +1024 19 training.batch_size 1.0 +1024 19 training.label_smoothing 0.015160199063672478 +1024 20 model.embedding_dim 2.0 +1024 20 model.scoring_fct_norm 2.0 +1024 20 optimizer.lr 0.0055435723475628935 +1024 20 training.batch_size 1.0 +1024 20 training.label_smoothing 0.3456403983437242 +1024 21 model.embedding_dim 0.0 +1024 21 model.scoring_fct_norm 1.0 +1024 21 optimizer.lr 0.011786204741383172 +1024 21 training.batch_size 2.0 +1024 21 training.label_smoothing 0.9200076799038904 +1024 22 model.embedding_dim 1.0 +1024 22 model.scoring_fct_norm 1.0 +1024 22 optimizer.lr 0.04673029456737364 +1024 22 training.batch_size 0.0 +1024 22 training.label_smoothing 0.001333846394368266 +1024 23 model.embedding_dim 1.0 +1024 23 model.scoring_fct_norm 2.0 +1024 23 optimizer.lr 0.0011533086751790616 +1024 23 training.batch_size 0.0 +1024 23 training.label_smoothing 0.0056342231409534805 +1024 24 model.embedding_dim 0.0 +1024 24 model.scoring_fct_norm 1.0 +1024 24 optimizer.lr 0.020516811185717274 +1024 24 training.batch_size 2.0 +1024 24 training.label_smoothing 0.001936859310126728 +1024 25 model.embedding_dim 0.0 +1024 25 model.scoring_fct_norm 2.0 +1024 25 optimizer.lr 0.007011654211155037 +1024 25 training.batch_size 2.0 +1024 25 training.label_smoothing 0.006719247468229484 +1024 26 model.embedding_dim 1.0 +1024 26 model.scoring_fct_norm 2.0 +1024 26 optimizer.lr 0.007826473819089129 +1024 26 training.batch_size 1.0 +1024 26 training.label_smoothing 0.0010573797722241794 +1024 27 model.embedding_dim 2.0 +1024 27 model.scoring_fct_norm 2.0 +1024 27 optimizer.lr 0.0030784803553329434 +1024 27 training.batch_size 0.0 +1024 27 training.label_smoothing 0.0027040933417710133 +1024 28 model.embedding_dim 0.0 +1024 28 model.scoring_fct_norm 1.0 +1024 28 optimizer.lr 0.002122108833215085 +1024 28 training.batch_size 2.0 +1024 28 training.label_smoothing 0.953404615008681 +1024 29 model.embedding_dim 1.0 +1024 29 model.scoring_fct_norm 1.0 +1024 29 optimizer.lr 0.0011611915511307066 +1024 29 training.batch_size 0.0 +1024 29 training.label_smoothing 0.001361670649886377 +1024 30 model.embedding_dim 2.0 +1024 30 model.scoring_fct_norm 2.0 +1024 30 optimizer.lr 0.0012707636773928975 +1024 30 training.batch_size 2.0 +1024 30 training.label_smoothing 0.002345527924541067 +1024 31 model.embedding_dim 2.0 +1024 31 model.scoring_fct_norm 2.0 +1024 31 optimizer.lr 0.004921621325589043 +1024 31 training.batch_size 1.0 +1024 31 training.label_smoothing 0.09290243751089586 +1024 32 model.embedding_dim 2.0 +1024 32 model.scoring_fct_norm 1.0 +1024 32 optimizer.lr 0.0263976852064209 +1024 32 training.batch_size 1.0 +1024 32 training.label_smoothing 0.004440083818215007 +1024 33 model.embedding_dim 0.0 +1024 33 model.scoring_fct_norm 2.0 +1024 33 optimizer.lr 0.001748564251679089 +1024 33 training.batch_size 0.0 +1024 33 training.label_smoothing 0.07382949833765239 +1024 34 model.embedding_dim 0.0 +1024 34 model.scoring_fct_norm 2.0 +1024 34 optimizer.lr 0.03290316561897466 +1024 34 training.batch_size 1.0 +1024 34 training.label_smoothing 0.008253626238686959 +1024 35 model.embedding_dim 2.0 +1024 35 model.scoring_fct_norm 2.0 +1024 35 optimizer.lr 0.0011894366493710611 +1024 35 training.batch_size 0.0 +1024 35 training.label_smoothing 0.0018426950819265039 +1024 36 model.embedding_dim 0.0 +1024 36 model.scoring_fct_norm 1.0 +1024 36 optimizer.lr 0.001686515917834707 +1024 36 training.batch_size 2.0 +1024 36 training.label_smoothing 0.0022914809395171275 +1024 37 model.embedding_dim 2.0 +1024 37 model.scoring_fct_norm 2.0 +1024 37 optimizer.lr 0.0012248485721506927 +1024 37 training.batch_size 1.0 +1024 37 training.label_smoothing 0.4408037946372951 +1024 38 model.embedding_dim 1.0 +1024 38 model.scoring_fct_norm 1.0 +1024 38 optimizer.lr 0.05202395211863978 +1024 38 training.batch_size 0.0 +1024 38 training.label_smoothing 0.004652019928191234 +1024 39 model.embedding_dim 1.0 +1024 39 model.scoring_fct_norm 1.0 +1024 39 optimizer.lr 0.0014310983482665445 +1024 39 training.batch_size 1.0 +1024 39 training.label_smoothing 0.49844429319113903 +1024 40 model.embedding_dim 1.0 +1024 40 model.scoring_fct_norm 2.0 +1024 40 optimizer.lr 0.004770817292449735 +1024 40 training.batch_size 1.0 +1024 40 training.label_smoothing 0.7269905119032763 +1024 41 model.embedding_dim 1.0 +1024 41 model.scoring_fct_norm 2.0 +1024 41 optimizer.lr 0.00489609289296627 +1024 41 training.batch_size 0.0 +1024 41 training.label_smoothing 0.2033095951148103 +1024 42 model.embedding_dim 0.0 +1024 42 model.scoring_fct_norm 1.0 +1024 42 optimizer.lr 0.001459520972442421 +1024 42 training.batch_size 0.0 +1024 42 training.label_smoothing 0.4678391098229232 +1024 43 model.embedding_dim 2.0 +1024 43 model.scoring_fct_norm 1.0 +1024 43 optimizer.lr 0.02708420865793912 +1024 43 training.batch_size 1.0 +1024 43 training.label_smoothing 0.0025297072692063065 +1024 44 model.embedding_dim 0.0 +1024 44 model.scoring_fct_norm 1.0 +1024 44 optimizer.lr 0.011030955257563145 +1024 44 training.batch_size 2.0 +1024 44 training.label_smoothing 0.41337060578306706 +1024 45 model.embedding_dim 2.0 +1024 45 model.scoring_fct_norm 2.0 +1024 45 optimizer.lr 0.0010383672184138995 +1024 45 training.batch_size 1.0 +1024 45 training.label_smoothing 0.0011140894538227918 +1024 46 model.embedding_dim 1.0 +1024 46 model.scoring_fct_norm 1.0 +1024 46 optimizer.lr 0.0010534480347907942 +1024 46 training.batch_size 2.0 +1024 46 training.label_smoothing 0.0035882294636314224 +1024 47 model.embedding_dim 1.0 +1024 47 model.scoring_fct_norm 2.0 +1024 47 optimizer.lr 0.0016197952514126874 +1024 47 training.batch_size 0.0 +1024 47 training.label_smoothing 0.038173419751731344 +1024 48 model.embedding_dim 2.0 +1024 48 model.scoring_fct_norm 1.0 +1024 48 optimizer.lr 0.01737388162412263 +1024 48 training.batch_size 2.0 +1024 48 training.label_smoothing 0.0111856704875489 +1024 49 model.embedding_dim 0.0 +1024 49 model.scoring_fct_norm 2.0 +1024 49 optimizer.lr 0.0846197954298825 +1024 49 training.batch_size 2.0 +1024 49 training.label_smoothing 0.0016206011573928962 +1024 50 model.embedding_dim 2.0 +1024 50 model.scoring_fct_norm 2.0 +1024 50 optimizer.lr 0.005004455652506222 +1024 50 training.batch_size 2.0 +1024 50 training.label_smoothing 0.08768960913484516 +1024 51 model.embedding_dim 0.0 +1024 51 model.scoring_fct_norm 2.0 +1024 51 optimizer.lr 0.0010477861678391191 +1024 51 training.batch_size 2.0 +1024 51 training.label_smoothing 0.006613367016220783 +1024 52 model.embedding_dim 1.0 +1024 52 model.scoring_fct_norm 1.0 +1024 52 optimizer.lr 0.00475369091177298 +1024 52 training.batch_size 1.0 +1024 52 training.label_smoothing 0.0010452167615145327 +1024 53 model.embedding_dim 0.0 +1024 53 model.scoring_fct_norm 1.0 +1024 53 optimizer.lr 0.043987115333995054 +1024 53 training.batch_size 1.0 +1024 53 training.label_smoothing 0.002101732037379843 +1024 54 model.embedding_dim 0.0 +1024 54 model.scoring_fct_norm 1.0 +1024 54 optimizer.lr 0.00652818915151672 +1024 54 training.batch_size 2.0 +1024 54 training.label_smoothing 0.012756248899493408 +1024 55 model.embedding_dim 1.0 +1024 55 model.scoring_fct_norm 1.0 +1024 55 optimizer.lr 0.025268624690055783 +1024 55 training.batch_size 1.0 +1024 55 training.label_smoothing 0.8486099189968836 +1024 56 model.embedding_dim 1.0 +1024 56 model.scoring_fct_norm 2.0 +1024 56 optimizer.lr 0.0010861204597849927 +1024 56 training.batch_size 1.0 +1024 56 training.label_smoothing 0.32050300134415927 +1024 57 model.embedding_dim 2.0 +1024 57 model.scoring_fct_norm 1.0 +1024 57 optimizer.lr 0.0028103598697001154 +1024 57 training.batch_size 0.0 +1024 57 training.label_smoothing 0.012435594537756845 +1024 58 model.embedding_dim 0.0 +1024 58 model.scoring_fct_norm 1.0 +1024 58 optimizer.lr 0.003262788250638605 +1024 58 training.batch_size 1.0 +1024 58 training.label_smoothing 0.6410256115275924 +1024 59 model.embedding_dim 1.0 +1024 59 model.scoring_fct_norm 2.0 +1024 59 optimizer.lr 0.052485544078150065 +1024 59 training.batch_size 0.0 +1024 59 training.label_smoothing 0.18125824290535242 +1024 60 model.embedding_dim 2.0 +1024 60 model.scoring_fct_norm 2.0 +1024 60 optimizer.lr 0.012713560306644665 +1024 60 training.batch_size 1.0 +1024 60 training.label_smoothing 0.035963119935140155 +1024 61 model.embedding_dim 1.0 +1024 61 model.scoring_fct_norm 2.0 +1024 61 optimizer.lr 0.017635495643254114 +1024 61 training.batch_size 1.0 +1024 61 training.label_smoothing 0.019402665151500752 +1024 62 model.embedding_dim 1.0 +1024 62 model.scoring_fct_norm 2.0 +1024 62 optimizer.lr 0.08354850885559215 +1024 62 training.batch_size 0.0 +1024 62 training.label_smoothing 0.0799352735941351 +1024 63 model.embedding_dim 2.0 +1024 63 model.scoring_fct_norm 1.0 +1024 63 optimizer.lr 0.09327585027090257 +1024 63 training.batch_size 2.0 +1024 63 training.label_smoothing 0.03348184710526609 +1024 64 model.embedding_dim 2.0 +1024 64 model.scoring_fct_norm 1.0 +1024 64 optimizer.lr 0.006516747474538647 +1024 64 training.batch_size 2.0 +1024 64 training.label_smoothing 0.04308381070880526 +1024 65 model.embedding_dim 1.0 +1024 65 model.scoring_fct_norm 1.0 +1024 65 optimizer.lr 0.0016932160983806075 +1024 65 training.batch_size 1.0 +1024 65 training.label_smoothing 0.18609604241324712 +1024 66 model.embedding_dim 2.0 +1024 66 model.scoring_fct_norm 1.0 +1024 66 optimizer.lr 0.05533533191671331 +1024 66 training.batch_size 1.0 +1024 66 training.label_smoothing 0.016601602005686004 +1024 67 model.embedding_dim 0.0 +1024 67 model.scoring_fct_norm 2.0 +1024 67 optimizer.lr 0.0010825813635931833 +1024 67 training.batch_size 2.0 +1024 67 training.label_smoothing 0.9772519704091135 +1024 68 model.embedding_dim 2.0 +1024 68 model.scoring_fct_norm 2.0 +1024 68 optimizer.lr 0.025832189408067755 +1024 68 training.batch_size 2.0 +1024 68 training.label_smoothing 0.012618887687500007 +1024 69 model.embedding_dim 2.0 +1024 69 model.scoring_fct_norm 1.0 +1024 69 optimizer.lr 0.007289688536163045 +1024 69 training.batch_size 0.0 +1024 69 training.label_smoothing 0.06484911018192695 +1024 70 model.embedding_dim 2.0 +1024 70 model.scoring_fct_norm 1.0 +1024 70 optimizer.lr 0.0013400902502047265 +1024 70 training.batch_size 1.0 +1024 70 training.label_smoothing 0.006191059853838221 +1024 71 model.embedding_dim 1.0 +1024 71 model.scoring_fct_norm 2.0 +1024 71 optimizer.lr 0.002236741582187101 +1024 71 training.batch_size 2.0 +1024 71 training.label_smoothing 0.017542244405822718 +1024 72 model.embedding_dim 1.0 +1024 72 model.scoring_fct_norm 1.0 +1024 72 optimizer.lr 0.06722148730677695 +1024 72 training.batch_size 0.0 +1024 72 training.label_smoothing 0.004449519652809614 +1024 73 model.embedding_dim 0.0 +1024 73 model.scoring_fct_norm 1.0 +1024 73 optimizer.lr 0.010797767559141329 +1024 73 training.batch_size 0.0 +1024 73 training.label_smoothing 0.0012923238863844103 +1024 74 model.embedding_dim 0.0 +1024 74 model.scoring_fct_norm 1.0 +1024 74 optimizer.lr 0.0017114540089481938 +1024 74 training.batch_size 1.0 +1024 74 training.label_smoothing 0.011063796242655813 +1024 75 model.embedding_dim 2.0 +1024 75 model.scoring_fct_norm 1.0 +1024 75 optimizer.lr 0.07914888569039545 +1024 75 training.batch_size 2.0 +1024 75 training.label_smoothing 0.0021907214140777114 +1024 76 model.embedding_dim 2.0 +1024 76 model.scoring_fct_norm 1.0 +1024 76 optimizer.lr 0.07460714664548392 +1024 76 training.batch_size 0.0 +1024 76 training.label_smoothing 0.020366539535484385 +1024 77 model.embedding_dim 0.0 +1024 77 model.scoring_fct_norm 2.0 +1024 77 optimizer.lr 0.029878280740594537 +1024 77 training.batch_size 1.0 +1024 77 training.label_smoothing 0.019975025806557363 +1024 78 model.embedding_dim 1.0 +1024 78 model.scoring_fct_norm 2.0 +1024 78 optimizer.lr 0.028098034327787354 +1024 78 training.batch_size 0.0 +1024 78 training.label_smoothing 0.052063882531464226 +1024 79 model.embedding_dim 2.0 +1024 79 model.scoring_fct_norm 2.0 +1024 79 optimizer.lr 0.0024459043223051848 +1024 79 training.batch_size 2.0 +1024 79 training.label_smoothing 0.06986961648065056 +1024 80 model.embedding_dim 2.0 +1024 80 model.scoring_fct_norm 1.0 +1024 80 optimizer.lr 0.09467649405874191 +1024 80 training.batch_size 0.0 +1024 80 training.label_smoothing 0.002822134243332432 +1024 81 model.embedding_dim 2.0 +1024 81 model.scoring_fct_norm 2.0 +1024 81 optimizer.lr 0.0019063041258189735 +1024 81 training.batch_size 2.0 +1024 81 training.label_smoothing 0.005628350362978394 +1024 82 model.embedding_dim 2.0 +1024 82 model.scoring_fct_norm 1.0 +1024 82 optimizer.lr 0.0068327348312124375 +1024 82 training.batch_size 2.0 +1024 82 training.label_smoothing 0.007545643048730969 +1024 83 model.embedding_dim 2.0 +1024 83 model.scoring_fct_norm 1.0 +1024 83 optimizer.lr 0.04783775249547998 +1024 83 training.batch_size 2.0 +1024 83 training.label_smoothing 0.003934653167001288 +1024 84 model.embedding_dim 1.0 +1024 84 model.scoring_fct_norm 1.0 +1024 84 optimizer.lr 0.0022151067195161915 +1024 84 training.batch_size 2.0 +1024 84 training.label_smoothing 0.20488027233216236 +1024 85 model.embedding_dim 2.0 +1024 85 model.scoring_fct_norm 1.0 +1024 85 optimizer.lr 0.023992132161980286 +1024 85 training.batch_size 1.0 +1024 85 training.label_smoothing 0.02516300180669387 +1024 86 model.embedding_dim 2.0 +1024 86 model.scoring_fct_norm 2.0 +1024 86 optimizer.lr 0.007356698239880807 +1024 86 training.batch_size 0.0 +1024 86 training.label_smoothing 0.09666929256898923 +1024 87 model.embedding_dim 0.0 +1024 87 model.scoring_fct_norm 2.0 +1024 87 optimizer.lr 0.026341809278099322 +1024 87 training.batch_size 1.0 +1024 87 training.label_smoothing 0.006166081203099581 +1024 88 model.embedding_dim 0.0 +1024 88 model.scoring_fct_norm 2.0 +1024 88 optimizer.lr 0.001742065641647749 +1024 88 training.batch_size 2.0 +1024 88 training.label_smoothing 0.0589175201214894 +1024 89 model.embedding_dim 0.0 +1024 89 model.scoring_fct_norm 1.0 +1024 89 optimizer.lr 0.0018090850836663926 +1024 89 training.batch_size 0.0 +1024 89 training.label_smoothing 0.003447226433977537 +1024 90 model.embedding_dim 2.0 +1024 90 model.scoring_fct_norm 2.0 +1024 90 optimizer.lr 0.04788403523358381 +1024 90 training.batch_size 0.0 +1024 90 training.label_smoothing 0.0028389193470925228 +1024 91 model.embedding_dim 2.0 +1024 91 model.scoring_fct_norm 2.0 +1024 91 optimizer.lr 0.004003246177343419 +1024 91 training.batch_size 1.0 +1024 91 training.label_smoothing 0.009576734350034066 +1024 92 model.embedding_dim 0.0 +1024 92 model.scoring_fct_norm 2.0 +1024 92 optimizer.lr 0.0011488166570692367 +1024 92 training.batch_size 1.0 +1024 92 training.label_smoothing 0.06594486911694561 +1024 93 model.embedding_dim 0.0 +1024 93 model.scoring_fct_norm 2.0 +1024 93 optimizer.lr 0.014509175296793698 +1024 93 training.batch_size 0.0 +1024 93 training.label_smoothing 0.002844810702201446 +1024 94 model.embedding_dim 0.0 +1024 94 model.scoring_fct_norm 2.0 +1024 94 optimizer.lr 0.011207530945365963 +1024 94 training.batch_size 1.0 +1024 94 training.label_smoothing 0.27406853683451715 +1024 95 model.embedding_dim 0.0 +1024 95 model.scoring_fct_norm 2.0 +1024 95 optimizer.lr 0.0012876754069985695 +1024 95 training.batch_size 0.0 +1024 95 training.label_smoothing 0.003970639920039213 +1024 96 model.embedding_dim 0.0 +1024 96 model.scoring_fct_norm 2.0 +1024 96 optimizer.lr 0.002267835517971083 +1024 96 training.batch_size 2.0 +1024 96 training.label_smoothing 0.2115577608120169 +1024 97 model.embedding_dim 1.0 +1024 97 model.scoring_fct_norm 1.0 +1024 97 optimizer.lr 0.028993311530873065 +1024 97 training.batch_size 2.0 +1024 97 training.label_smoothing 0.043529040110694785 +1024 98 model.embedding_dim 2.0 +1024 98 model.scoring_fct_norm 1.0 +1024 98 optimizer.lr 0.05937135201593906 +1024 98 training.batch_size 0.0 +1024 98 training.label_smoothing 0.017389984507376065 +1024 99 model.embedding_dim 0.0 +1024 99 model.scoring_fct_norm 2.0 +1024 99 optimizer.lr 0.022394816775930974 +1024 99 training.batch_size 0.0 +1024 99 training.label_smoothing 0.04172511796353882 +1024 100 model.embedding_dim 0.0 +1024 100 model.scoring_fct_norm 2.0 +1024 100 optimizer.lr 0.0013140879104379078 +1024 100 training.batch_size 0.0 +1024 100 training.label_smoothing 0.052207775220961435 +1024 1 dataset """kinships""" +1024 1 model """unstructuredmodel""" +1024 1 loss """crossentropy""" +1024 1 regularizer """no""" +1024 1 optimizer """adam""" +1024 1 training_loop """lcwa""" +1024 1 evaluator """rankbased""" +1024 2 dataset """kinships""" +1024 2 model """unstructuredmodel""" +1024 2 loss """crossentropy""" +1024 2 regularizer """no""" +1024 2 optimizer """adam""" +1024 2 training_loop """lcwa""" +1024 2 evaluator """rankbased""" +1024 3 dataset """kinships""" +1024 3 model """unstructuredmodel""" +1024 3 loss """crossentropy""" +1024 3 regularizer """no""" +1024 3 optimizer """adam""" +1024 3 training_loop """lcwa""" +1024 3 evaluator """rankbased""" +1024 4 dataset """kinships""" +1024 4 model """unstructuredmodel""" +1024 4 loss """crossentropy""" +1024 4 regularizer """no""" +1024 4 optimizer """adam""" +1024 4 training_loop """lcwa""" +1024 4 evaluator """rankbased""" +1024 5 dataset """kinships""" +1024 5 model """unstructuredmodel""" +1024 5 loss """crossentropy""" +1024 5 regularizer """no""" +1024 5 optimizer """adam""" +1024 5 training_loop """lcwa""" +1024 5 evaluator """rankbased""" +1024 6 dataset """kinships""" +1024 6 model """unstructuredmodel""" +1024 6 loss """crossentropy""" +1024 6 regularizer """no""" +1024 6 optimizer """adam""" +1024 6 training_loop """lcwa""" +1024 6 evaluator """rankbased""" +1024 7 dataset """kinships""" +1024 7 model """unstructuredmodel""" +1024 7 loss """crossentropy""" +1024 7 regularizer """no""" +1024 7 optimizer """adam""" +1024 7 training_loop """lcwa""" +1024 7 evaluator """rankbased""" +1024 8 dataset """kinships""" +1024 8 model """unstructuredmodel""" +1024 8 loss """crossentropy""" +1024 8 regularizer """no""" +1024 8 optimizer """adam""" +1024 8 training_loop """lcwa""" +1024 8 evaluator """rankbased""" +1024 9 dataset """kinships""" +1024 9 model """unstructuredmodel""" +1024 9 loss """crossentropy""" +1024 9 regularizer """no""" +1024 9 optimizer """adam""" +1024 9 training_loop """lcwa""" +1024 9 evaluator """rankbased""" +1024 10 dataset """kinships""" +1024 10 model """unstructuredmodel""" +1024 10 loss """crossentropy""" +1024 10 regularizer """no""" +1024 10 optimizer """adam""" +1024 10 training_loop """lcwa""" +1024 10 evaluator """rankbased""" +1024 11 dataset """kinships""" +1024 11 model """unstructuredmodel""" +1024 11 loss """crossentropy""" +1024 11 regularizer """no""" +1024 11 optimizer """adam""" +1024 11 training_loop """lcwa""" +1024 11 evaluator """rankbased""" +1024 12 dataset """kinships""" +1024 12 model """unstructuredmodel""" +1024 12 loss """crossentropy""" +1024 12 regularizer """no""" +1024 12 optimizer """adam""" +1024 12 training_loop """lcwa""" +1024 12 evaluator """rankbased""" +1024 13 dataset """kinships""" +1024 13 model """unstructuredmodel""" +1024 13 loss """crossentropy""" +1024 13 regularizer """no""" +1024 13 optimizer """adam""" +1024 13 training_loop """lcwa""" +1024 13 evaluator """rankbased""" +1024 14 dataset """kinships""" +1024 14 model """unstructuredmodel""" +1024 14 loss """crossentropy""" +1024 14 regularizer """no""" +1024 14 optimizer """adam""" +1024 14 training_loop """lcwa""" +1024 14 evaluator """rankbased""" +1024 15 dataset """kinships""" +1024 15 model """unstructuredmodel""" +1024 15 loss """crossentropy""" +1024 15 regularizer """no""" +1024 15 optimizer """adam""" +1024 15 training_loop """lcwa""" +1024 15 evaluator """rankbased""" +1024 16 dataset """kinships""" +1024 16 model """unstructuredmodel""" +1024 16 loss """crossentropy""" +1024 16 regularizer """no""" +1024 16 optimizer """adam""" +1024 16 training_loop """lcwa""" +1024 16 evaluator """rankbased""" +1024 17 dataset """kinships""" +1024 17 model """unstructuredmodel""" +1024 17 loss """crossentropy""" +1024 17 regularizer """no""" +1024 17 optimizer """adam""" +1024 17 training_loop """lcwa""" +1024 17 evaluator """rankbased""" +1024 18 dataset """kinships""" +1024 18 model """unstructuredmodel""" +1024 18 loss """crossentropy""" +1024 18 regularizer """no""" +1024 18 optimizer """adam""" +1024 18 training_loop """lcwa""" +1024 18 evaluator """rankbased""" +1024 19 dataset """kinships""" +1024 19 model """unstructuredmodel""" +1024 19 loss """crossentropy""" +1024 19 regularizer """no""" +1024 19 optimizer """adam""" +1024 19 training_loop """lcwa""" +1024 19 evaluator """rankbased""" +1024 20 dataset """kinships""" +1024 20 model """unstructuredmodel""" +1024 20 loss """crossentropy""" +1024 20 regularizer """no""" +1024 20 optimizer """adam""" +1024 20 training_loop """lcwa""" +1024 20 evaluator """rankbased""" +1024 21 dataset """kinships""" +1024 21 model """unstructuredmodel""" +1024 21 loss """crossentropy""" +1024 21 regularizer """no""" +1024 21 optimizer """adam""" +1024 21 training_loop """lcwa""" +1024 21 evaluator """rankbased""" +1024 22 dataset """kinships""" +1024 22 model """unstructuredmodel""" +1024 22 loss """crossentropy""" +1024 22 regularizer """no""" +1024 22 optimizer """adam""" +1024 22 training_loop """lcwa""" +1024 22 evaluator """rankbased""" +1024 23 dataset """kinships""" +1024 23 model """unstructuredmodel""" +1024 23 loss """crossentropy""" +1024 23 regularizer """no""" +1024 23 optimizer """adam""" +1024 23 training_loop """lcwa""" +1024 23 evaluator """rankbased""" +1024 24 dataset """kinships""" +1024 24 model """unstructuredmodel""" +1024 24 loss """crossentropy""" +1024 24 regularizer """no""" +1024 24 optimizer """adam""" +1024 24 training_loop """lcwa""" +1024 24 evaluator """rankbased""" +1024 25 dataset """kinships""" +1024 25 model """unstructuredmodel""" +1024 25 loss """crossentropy""" +1024 25 regularizer """no""" +1024 25 optimizer """adam""" +1024 25 training_loop """lcwa""" +1024 25 evaluator """rankbased""" +1024 26 dataset """kinships""" +1024 26 model """unstructuredmodel""" +1024 26 loss """crossentropy""" +1024 26 regularizer """no""" +1024 26 optimizer """adam""" +1024 26 training_loop """lcwa""" +1024 26 evaluator """rankbased""" +1024 27 dataset """kinships""" +1024 27 model """unstructuredmodel""" +1024 27 loss """crossentropy""" +1024 27 regularizer """no""" +1024 27 optimizer """adam""" +1024 27 training_loop """lcwa""" +1024 27 evaluator """rankbased""" +1024 28 dataset """kinships""" +1024 28 model """unstructuredmodel""" +1024 28 loss """crossentropy""" +1024 28 regularizer """no""" +1024 28 optimizer """adam""" +1024 28 training_loop """lcwa""" +1024 28 evaluator """rankbased""" +1024 29 dataset """kinships""" +1024 29 model """unstructuredmodel""" +1024 29 loss """crossentropy""" +1024 29 regularizer """no""" +1024 29 optimizer """adam""" +1024 29 training_loop """lcwa""" +1024 29 evaluator """rankbased""" +1024 30 dataset """kinships""" +1024 30 model """unstructuredmodel""" +1024 30 loss """crossentropy""" +1024 30 regularizer """no""" +1024 30 optimizer """adam""" +1024 30 training_loop """lcwa""" +1024 30 evaluator """rankbased""" +1024 31 dataset """kinships""" +1024 31 model """unstructuredmodel""" +1024 31 loss """crossentropy""" +1024 31 regularizer """no""" +1024 31 optimizer """adam""" +1024 31 training_loop """lcwa""" +1024 31 evaluator """rankbased""" +1024 32 dataset """kinships""" +1024 32 model """unstructuredmodel""" +1024 32 loss """crossentropy""" +1024 32 regularizer """no""" +1024 32 optimizer """adam""" +1024 32 training_loop """lcwa""" +1024 32 evaluator """rankbased""" +1024 33 dataset """kinships""" +1024 33 model """unstructuredmodel""" +1024 33 loss """crossentropy""" +1024 33 regularizer """no""" +1024 33 optimizer """adam""" +1024 33 training_loop """lcwa""" +1024 33 evaluator """rankbased""" +1024 34 dataset """kinships""" +1024 34 model """unstructuredmodel""" +1024 34 loss """crossentropy""" +1024 34 regularizer """no""" +1024 34 optimizer """adam""" +1024 34 training_loop """lcwa""" +1024 34 evaluator """rankbased""" +1024 35 dataset """kinships""" +1024 35 model """unstructuredmodel""" +1024 35 loss """crossentropy""" +1024 35 regularizer """no""" +1024 35 optimizer """adam""" +1024 35 training_loop """lcwa""" +1024 35 evaluator """rankbased""" +1024 36 dataset """kinships""" +1024 36 model """unstructuredmodel""" +1024 36 loss """crossentropy""" +1024 36 regularizer """no""" +1024 36 optimizer """adam""" +1024 36 training_loop """lcwa""" +1024 36 evaluator """rankbased""" +1024 37 dataset """kinships""" +1024 37 model """unstructuredmodel""" +1024 37 loss """crossentropy""" +1024 37 regularizer """no""" +1024 37 optimizer """adam""" +1024 37 training_loop """lcwa""" +1024 37 evaluator """rankbased""" +1024 38 dataset """kinships""" +1024 38 model """unstructuredmodel""" +1024 38 loss """crossentropy""" +1024 38 regularizer """no""" +1024 38 optimizer """adam""" +1024 38 training_loop """lcwa""" +1024 38 evaluator """rankbased""" +1024 39 dataset """kinships""" +1024 39 model """unstructuredmodel""" +1024 39 loss """crossentropy""" +1024 39 regularizer """no""" +1024 39 optimizer """adam""" +1024 39 training_loop """lcwa""" +1024 39 evaluator """rankbased""" +1024 40 dataset """kinships""" +1024 40 model """unstructuredmodel""" +1024 40 loss """crossentropy""" +1024 40 regularizer """no""" +1024 40 optimizer """adam""" +1024 40 training_loop """lcwa""" +1024 40 evaluator """rankbased""" +1024 41 dataset """kinships""" +1024 41 model """unstructuredmodel""" +1024 41 loss """crossentropy""" +1024 41 regularizer """no""" +1024 41 optimizer """adam""" +1024 41 training_loop """lcwa""" +1024 41 evaluator """rankbased""" +1024 42 dataset """kinships""" +1024 42 model """unstructuredmodel""" +1024 42 loss """crossentropy""" +1024 42 regularizer """no""" +1024 42 optimizer """adam""" +1024 42 training_loop """lcwa""" +1024 42 evaluator """rankbased""" +1024 43 dataset """kinships""" +1024 43 model """unstructuredmodel""" +1024 43 loss """crossentropy""" +1024 43 regularizer """no""" +1024 43 optimizer """adam""" +1024 43 training_loop """lcwa""" +1024 43 evaluator """rankbased""" +1024 44 dataset """kinships""" +1024 44 model """unstructuredmodel""" +1024 44 loss """crossentropy""" +1024 44 regularizer """no""" +1024 44 optimizer """adam""" +1024 44 training_loop """lcwa""" +1024 44 evaluator """rankbased""" +1024 45 dataset """kinships""" +1024 45 model """unstructuredmodel""" +1024 45 loss """crossentropy""" +1024 45 regularizer """no""" +1024 45 optimizer """adam""" +1024 45 training_loop """lcwa""" +1024 45 evaluator """rankbased""" +1024 46 dataset """kinships""" +1024 46 model """unstructuredmodel""" +1024 46 loss """crossentropy""" +1024 46 regularizer """no""" +1024 46 optimizer """adam""" +1024 46 training_loop """lcwa""" +1024 46 evaluator """rankbased""" +1024 47 dataset """kinships""" +1024 47 model """unstructuredmodel""" +1024 47 loss """crossentropy""" +1024 47 regularizer """no""" +1024 47 optimizer """adam""" +1024 47 training_loop """lcwa""" +1024 47 evaluator """rankbased""" +1024 48 dataset """kinships""" +1024 48 model """unstructuredmodel""" +1024 48 loss """crossentropy""" +1024 48 regularizer """no""" +1024 48 optimizer """adam""" +1024 48 training_loop """lcwa""" +1024 48 evaluator """rankbased""" +1024 49 dataset """kinships""" +1024 49 model """unstructuredmodel""" +1024 49 loss """crossentropy""" +1024 49 regularizer """no""" +1024 49 optimizer """adam""" +1024 49 training_loop """lcwa""" +1024 49 evaluator """rankbased""" +1024 50 dataset """kinships""" +1024 50 model """unstructuredmodel""" +1024 50 loss """crossentropy""" +1024 50 regularizer """no""" +1024 50 optimizer """adam""" +1024 50 training_loop """lcwa""" +1024 50 evaluator """rankbased""" +1024 51 dataset """kinships""" +1024 51 model """unstructuredmodel""" +1024 51 loss """crossentropy""" +1024 51 regularizer """no""" +1024 51 optimizer """adam""" +1024 51 training_loop """lcwa""" +1024 51 evaluator """rankbased""" +1024 52 dataset """kinships""" +1024 52 model """unstructuredmodel""" +1024 52 loss """crossentropy""" +1024 52 regularizer """no""" +1024 52 optimizer """adam""" +1024 52 training_loop """lcwa""" +1024 52 evaluator """rankbased""" +1024 53 dataset """kinships""" +1024 53 model """unstructuredmodel""" +1024 53 loss """crossentropy""" +1024 53 regularizer """no""" +1024 53 optimizer """adam""" +1024 53 training_loop """lcwa""" +1024 53 evaluator """rankbased""" +1024 54 dataset """kinships""" +1024 54 model """unstructuredmodel""" +1024 54 loss """crossentropy""" +1024 54 regularizer """no""" +1024 54 optimizer """adam""" +1024 54 training_loop """lcwa""" +1024 54 evaluator """rankbased""" +1024 55 dataset """kinships""" +1024 55 model """unstructuredmodel""" +1024 55 loss """crossentropy""" +1024 55 regularizer """no""" +1024 55 optimizer """adam""" +1024 55 training_loop """lcwa""" +1024 55 evaluator """rankbased""" +1024 56 dataset """kinships""" +1024 56 model """unstructuredmodel""" +1024 56 loss """crossentropy""" +1024 56 regularizer """no""" +1024 56 optimizer """adam""" +1024 56 training_loop """lcwa""" +1024 56 evaluator """rankbased""" +1024 57 dataset """kinships""" +1024 57 model """unstructuredmodel""" +1024 57 loss """crossentropy""" +1024 57 regularizer """no""" +1024 57 optimizer """adam""" +1024 57 training_loop """lcwa""" +1024 57 evaluator """rankbased""" +1024 58 dataset """kinships""" +1024 58 model """unstructuredmodel""" +1024 58 loss """crossentropy""" +1024 58 regularizer """no""" +1024 58 optimizer """adam""" +1024 58 training_loop """lcwa""" +1024 58 evaluator """rankbased""" +1024 59 dataset """kinships""" +1024 59 model """unstructuredmodel""" +1024 59 loss """crossentropy""" +1024 59 regularizer """no""" +1024 59 optimizer """adam""" +1024 59 training_loop """lcwa""" +1024 59 evaluator """rankbased""" +1024 60 dataset """kinships""" +1024 60 model """unstructuredmodel""" +1024 60 loss """crossentropy""" +1024 60 regularizer """no""" +1024 60 optimizer """adam""" +1024 60 training_loop """lcwa""" +1024 60 evaluator """rankbased""" +1024 61 dataset """kinships""" +1024 61 model """unstructuredmodel""" +1024 61 loss """crossentropy""" +1024 61 regularizer """no""" +1024 61 optimizer """adam""" +1024 61 training_loop """lcwa""" +1024 61 evaluator """rankbased""" +1024 62 dataset """kinships""" +1024 62 model """unstructuredmodel""" +1024 62 loss """crossentropy""" +1024 62 regularizer """no""" +1024 62 optimizer """adam""" +1024 62 training_loop """lcwa""" +1024 62 evaluator """rankbased""" +1024 63 dataset """kinships""" +1024 63 model """unstructuredmodel""" +1024 63 loss """crossentropy""" +1024 63 regularizer """no""" +1024 63 optimizer """adam""" +1024 63 training_loop """lcwa""" +1024 63 evaluator """rankbased""" +1024 64 dataset """kinships""" +1024 64 model """unstructuredmodel""" +1024 64 loss """crossentropy""" +1024 64 regularizer """no""" +1024 64 optimizer """adam""" +1024 64 training_loop """lcwa""" +1024 64 evaluator """rankbased""" +1024 65 dataset """kinships""" +1024 65 model """unstructuredmodel""" +1024 65 loss """crossentropy""" +1024 65 regularizer """no""" +1024 65 optimizer """adam""" +1024 65 training_loop """lcwa""" +1024 65 evaluator """rankbased""" +1024 66 dataset """kinships""" +1024 66 model """unstructuredmodel""" +1024 66 loss """crossentropy""" +1024 66 regularizer """no""" +1024 66 optimizer """adam""" +1024 66 training_loop """lcwa""" +1024 66 evaluator """rankbased""" +1024 67 dataset """kinships""" +1024 67 model """unstructuredmodel""" +1024 67 loss """crossentropy""" +1024 67 regularizer """no""" +1024 67 optimizer """adam""" +1024 67 training_loop """lcwa""" +1024 67 evaluator """rankbased""" +1024 68 dataset """kinships""" +1024 68 model """unstructuredmodel""" +1024 68 loss """crossentropy""" +1024 68 regularizer """no""" +1024 68 optimizer """adam""" +1024 68 training_loop """lcwa""" +1024 68 evaluator """rankbased""" +1024 69 dataset """kinships""" +1024 69 model """unstructuredmodel""" +1024 69 loss """crossentropy""" +1024 69 regularizer """no""" +1024 69 optimizer """adam""" +1024 69 training_loop """lcwa""" +1024 69 evaluator """rankbased""" +1024 70 dataset """kinships""" +1024 70 model """unstructuredmodel""" +1024 70 loss """crossentropy""" +1024 70 regularizer """no""" +1024 70 optimizer """adam""" +1024 70 training_loop """lcwa""" +1024 70 evaluator """rankbased""" +1024 71 dataset """kinships""" +1024 71 model """unstructuredmodel""" +1024 71 loss """crossentropy""" +1024 71 regularizer """no""" +1024 71 optimizer """adam""" +1024 71 training_loop """lcwa""" +1024 71 evaluator """rankbased""" +1024 72 dataset """kinships""" +1024 72 model """unstructuredmodel""" +1024 72 loss """crossentropy""" +1024 72 regularizer """no""" +1024 72 optimizer """adam""" +1024 72 training_loop """lcwa""" +1024 72 evaluator """rankbased""" +1024 73 dataset """kinships""" +1024 73 model """unstructuredmodel""" +1024 73 loss """crossentropy""" +1024 73 regularizer """no""" +1024 73 optimizer """adam""" +1024 73 training_loop """lcwa""" +1024 73 evaluator """rankbased""" +1024 74 dataset """kinships""" +1024 74 model """unstructuredmodel""" +1024 74 loss """crossentropy""" +1024 74 regularizer """no""" +1024 74 optimizer """adam""" +1024 74 training_loop """lcwa""" +1024 74 evaluator """rankbased""" +1024 75 dataset """kinships""" +1024 75 model """unstructuredmodel""" +1024 75 loss """crossentropy""" +1024 75 regularizer """no""" +1024 75 optimizer """adam""" +1024 75 training_loop """lcwa""" +1024 75 evaluator """rankbased""" +1024 76 dataset """kinships""" +1024 76 model """unstructuredmodel""" +1024 76 loss """crossentropy""" +1024 76 regularizer """no""" +1024 76 optimizer """adam""" +1024 76 training_loop """lcwa""" +1024 76 evaluator """rankbased""" +1024 77 dataset """kinships""" +1024 77 model """unstructuredmodel""" +1024 77 loss """crossentropy""" +1024 77 regularizer """no""" +1024 77 optimizer """adam""" +1024 77 training_loop """lcwa""" +1024 77 evaluator """rankbased""" +1024 78 dataset """kinships""" +1024 78 model """unstructuredmodel""" +1024 78 loss """crossentropy""" +1024 78 regularizer """no""" +1024 78 optimizer """adam""" +1024 78 training_loop """lcwa""" +1024 78 evaluator """rankbased""" +1024 79 dataset """kinships""" +1024 79 model """unstructuredmodel""" +1024 79 loss """crossentropy""" +1024 79 regularizer """no""" +1024 79 optimizer """adam""" +1024 79 training_loop """lcwa""" +1024 79 evaluator """rankbased""" +1024 80 dataset """kinships""" +1024 80 model """unstructuredmodel""" +1024 80 loss """crossentropy""" +1024 80 regularizer """no""" +1024 80 optimizer """adam""" +1024 80 training_loop """lcwa""" +1024 80 evaluator """rankbased""" +1024 81 dataset """kinships""" +1024 81 model """unstructuredmodel""" +1024 81 loss """crossentropy""" +1024 81 regularizer """no""" +1024 81 optimizer """adam""" +1024 81 training_loop """lcwa""" +1024 81 evaluator """rankbased""" +1024 82 dataset """kinships""" +1024 82 model """unstructuredmodel""" +1024 82 loss """crossentropy""" +1024 82 regularizer """no""" +1024 82 optimizer """adam""" +1024 82 training_loop """lcwa""" +1024 82 evaluator """rankbased""" +1024 83 dataset """kinships""" +1024 83 model """unstructuredmodel""" +1024 83 loss """crossentropy""" +1024 83 regularizer """no""" +1024 83 optimizer """adam""" +1024 83 training_loop """lcwa""" +1024 83 evaluator """rankbased""" +1024 84 dataset """kinships""" +1024 84 model """unstructuredmodel""" +1024 84 loss """crossentropy""" +1024 84 regularizer """no""" +1024 84 optimizer """adam""" +1024 84 training_loop """lcwa""" +1024 84 evaluator """rankbased""" +1024 85 dataset """kinships""" +1024 85 model """unstructuredmodel""" +1024 85 loss """crossentropy""" +1024 85 regularizer """no""" +1024 85 optimizer """adam""" +1024 85 training_loop """lcwa""" +1024 85 evaluator """rankbased""" +1024 86 dataset """kinships""" +1024 86 model """unstructuredmodel""" +1024 86 loss """crossentropy""" +1024 86 regularizer """no""" +1024 86 optimizer """adam""" +1024 86 training_loop """lcwa""" +1024 86 evaluator """rankbased""" +1024 87 dataset """kinships""" +1024 87 model """unstructuredmodel""" +1024 87 loss """crossentropy""" +1024 87 regularizer """no""" +1024 87 optimizer """adam""" +1024 87 training_loop """lcwa""" +1024 87 evaluator """rankbased""" +1024 88 dataset """kinships""" +1024 88 model """unstructuredmodel""" +1024 88 loss """crossentropy""" +1024 88 regularizer """no""" +1024 88 optimizer """adam""" +1024 88 training_loop """lcwa""" +1024 88 evaluator """rankbased""" +1024 89 dataset """kinships""" +1024 89 model """unstructuredmodel""" +1024 89 loss """crossentropy""" +1024 89 regularizer """no""" +1024 89 optimizer """adam""" +1024 89 training_loop """lcwa""" +1024 89 evaluator """rankbased""" +1024 90 dataset """kinships""" +1024 90 model """unstructuredmodel""" +1024 90 loss """crossentropy""" +1024 90 regularizer """no""" +1024 90 optimizer """adam""" +1024 90 training_loop """lcwa""" +1024 90 evaluator """rankbased""" +1024 91 dataset """kinships""" +1024 91 model """unstructuredmodel""" +1024 91 loss """crossentropy""" +1024 91 regularizer """no""" +1024 91 optimizer """adam""" +1024 91 training_loop """lcwa""" +1024 91 evaluator """rankbased""" +1024 92 dataset """kinships""" +1024 92 model """unstructuredmodel""" +1024 92 loss """crossentropy""" +1024 92 regularizer """no""" +1024 92 optimizer """adam""" +1024 92 training_loop """lcwa""" +1024 92 evaluator """rankbased""" +1024 93 dataset """kinships""" +1024 93 model """unstructuredmodel""" +1024 93 loss """crossentropy""" +1024 93 regularizer """no""" +1024 93 optimizer """adam""" +1024 93 training_loop """lcwa""" +1024 93 evaluator """rankbased""" +1024 94 dataset """kinships""" +1024 94 model """unstructuredmodel""" +1024 94 loss """crossentropy""" +1024 94 regularizer """no""" +1024 94 optimizer """adam""" +1024 94 training_loop """lcwa""" +1024 94 evaluator """rankbased""" +1024 95 dataset """kinships""" +1024 95 model """unstructuredmodel""" +1024 95 loss """crossentropy""" +1024 95 regularizer """no""" +1024 95 optimizer """adam""" +1024 95 training_loop """lcwa""" +1024 95 evaluator """rankbased""" +1024 96 dataset """kinships""" +1024 96 model """unstructuredmodel""" +1024 96 loss """crossentropy""" +1024 96 regularizer """no""" +1024 96 optimizer """adam""" +1024 96 training_loop """lcwa""" +1024 96 evaluator """rankbased""" +1024 97 dataset """kinships""" +1024 97 model """unstructuredmodel""" +1024 97 loss """crossentropy""" +1024 97 regularizer """no""" +1024 97 optimizer """adam""" +1024 97 training_loop """lcwa""" +1024 97 evaluator """rankbased""" +1024 98 dataset """kinships""" +1024 98 model """unstructuredmodel""" +1024 98 loss """crossentropy""" +1024 98 regularizer """no""" +1024 98 optimizer """adam""" +1024 98 training_loop """lcwa""" +1024 98 evaluator """rankbased""" +1024 99 dataset """kinships""" +1024 99 model """unstructuredmodel""" +1024 99 loss """crossentropy""" +1024 99 regularizer """no""" +1024 99 optimizer """adam""" +1024 99 training_loop """lcwa""" +1024 99 evaluator """rankbased""" +1024 100 dataset """kinships""" +1024 100 model """unstructuredmodel""" +1024 100 loss """crossentropy""" +1024 100 regularizer """no""" +1024 100 optimizer """adam""" +1024 100 training_loop """lcwa""" +1024 100 evaluator """rankbased""" +1025 1 model.embedding_dim 0.0 +1025 1 model.scoring_fct_norm 1.0 +1025 1 optimizer.lr 0.006045443181750964 +1025 1 training.batch_size 0.0 +1025 1 training.label_smoothing 0.11463596773676486 +1025 2 model.embedding_dim 1.0 +1025 2 model.scoring_fct_norm 1.0 +1025 2 optimizer.lr 0.03449956052603216 +1025 2 training.batch_size 2.0 +1025 2 training.label_smoothing 0.0019398483813665797 +1025 3 model.embedding_dim 2.0 +1025 3 model.scoring_fct_norm 1.0 +1025 3 optimizer.lr 0.022670608558525318 +1025 3 training.batch_size 1.0 +1025 3 training.label_smoothing 0.2752879413050654 +1025 4 model.embedding_dim 1.0 +1025 4 model.scoring_fct_norm 1.0 +1025 4 optimizer.lr 0.07626378764381582 +1025 4 training.batch_size 1.0 +1025 4 training.label_smoothing 0.08611820108688674 +1025 5 model.embedding_dim 1.0 +1025 5 model.scoring_fct_norm 1.0 +1025 5 optimizer.lr 0.002594081341155056 +1025 5 training.batch_size 1.0 +1025 5 training.label_smoothing 0.0667974988803767 +1025 6 model.embedding_dim 2.0 +1025 6 model.scoring_fct_norm 1.0 +1025 6 optimizer.lr 0.007592814451495721 +1025 6 training.batch_size 0.0 +1025 6 training.label_smoothing 0.015453421452534562 +1025 7 model.embedding_dim 0.0 +1025 7 model.scoring_fct_norm 2.0 +1025 7 optimizer.lr 0.004055841908379158 +1025 7 training.batch_size 0.0 +1025 7 training.label_smoothing 0.4467622584368143 +1025 8 model.embedding_dim 0.0 +1025 8 model.scoring_fct_norm 1.0 +1025 8 optimizer.lr 0.006988657297355516 +1025 8 training.batch_size 1.0 +1025 8 training.label_smoothing 0.0020125644585637804 +1025 9 model.embedding_dim 0.0 +1025 9 model.scoring_fct_norm 1.0 +1025 9 optimizer.lr 0.04111358038081105 +1025 9 training.batch_size 2.0 +1025 9 training.label_smoothing 0.08069895517693174 +1025 10 model.embedding_dim 0.0 +1025 10 model.scoring_fct_norm 2.0 +1025 10 optimizer.lr 0.006685329345960878 +1025 10 training.batch_size 2.0 +1025 10 training.label_smoothing 0.10628730346392318 +1025 11 model.embedding_dim 1.0 +1025 11 model.scoring_fct_norm 2.0 +1025 11 optimizer.lr 0.09398235688737767 +1025 11 training.batch_size 1.0 +1025 11 training.label_smoothing 0.38302467739261015 +1025 12 model.embedding_dim 1.0 +1025 12 model.scoring_fct_norm 1.0 +1025 12 optimizer.lr 0.0964187705418406 +1025 12 training.batch_size 1.0 +1025 12 training.label_smoothing 0.00150069453599226 +1025 13 model.embedding_dim 2.0 +1025 13 model.scoring_fct_norm 1.0 +1025 13 optimizer.lr 0.0013022811837972938 +1025 13 training.batch_size 0.0 +1025 13 training.label_smoothing 0.007260838516105621 +1025 14 model.embedding_dim 0.0 +1025 14 model.scoring_fct_norm 1.0 +1025 14 optimizer.lr 0.002272152853664437 +1025 14 training.batch_size 0.0 +1025 14 training.label_smoothing 0.0013373858711089044 +1025 15 model.embedding_dim 1.0 +1025 15 model.scoring_fct_norm 2.0 +1025 15 optimizer.lr 0.08240335618645452 +1025 15 training.batch_size 2.0 +1025 15 training.label_smoothing 0.004290798371826488 +1025 16 model.embedding_dim 1.0 +1025 16 model.scoring_fct_norm 2.0 +1025 16 optimizer.lr 0.008178033150117541 +1025 16 training.batch_size 2.0 +1025 16 training.label_smoothing 0.1853813698872767 +1025 17 model.embedding_dim 0.0 +1025 17 model.scoring_fct_norm 1.0 +1025 17 optimizer.lr 0.0025756221689000724 +1025 17 training.batch_size 2.0 +1025 17 training.label_smoothing 0.035099136379072686 +1025 18 model.embedding_dim 2.0 +1025 18 model.scoring_fct_norm 2.0 +1025 18 optimizer.lr 0.03652626588072937 +1025 18 training.batch_size 2.0 +1025 18 training.label_smoothing 0.007330079768442164 +1025 19 model.embedding_dim 2.0 +1025 19 model.scoring_fct_norm 2.0 +1025 19 optimizer.lr 0.014242853924431028 +1025 19 training.batch_size 1.0 +1025 19 training.label_smoothing 0.013786466812175522 +1025 20 model.embedding_dim 1.0 +1025 20 model.scoring_fct_norm 1.0 +1025 20 optimizer.lr 0.002051847026458119 +1025 20 training.batch_size 1.0 +1025 20 training.label_smoothing 0.17910559529338818 +1025 21 model.embedding_dim 1.0 +1025 21 model.scoring_fct_norm 1.0 +1025 21 optimizer.lr 0.041778925245745314 +1025 21 training.batch_size 1.0 +1025 21 training.label_smoothing 0.029703513778736305 +1025 22 model.embedding_dim 1.0 +1025 22 model.scoring_fct_norm 2.0 +1025 22 optimizer.lr 0.0098368635219141 +1025 22 training.batch_size 2.0 +1025 22 training.label_smoothing 0.001887167695419566 +1025 23 model.embedding_dim 1.0 +1025 23 model.scoring_fct_norm 2.0 +1025 23 optimizer.lr 0.0012833444088966028 +1025 23 training.batch_size 2.0 +1025 23 training.label_smoothing 0.10064228246505447 +1025 24 model.embedding_dim 0.0 +1025 24 model.scoring_fct_norm 2.0 +1025 24 optimizer.lr 0.024614008844709656 +1025 24 training.batch_size 2.0 +1025 24 training.label_smoothing 0.03827515976314205 +1025 25 model.embedding_dim 1.0 +1025 25 model.scoring_fct_norm 2.0 +1025 25 optimizer.lr 0.0349830964615983 +1025 25 training.batch_size 1.0 +1025 25 training.label_smoothing 0.20481217377624455 +1025 26 model.embedding_dim 1.0 +1025 26 model.scoring_fct_norm 2.0 +1025 26 optimizer.lr 0.08848024627891636 +1025 26 training.batch_size 0.0 +1025 26 training.label_smoothing 0.08833892674233029 +1025 27 model.embedding_dim 2.0 +1025 27 model.scoring_fct_norm 2.0 +1025 27 optimizer.lr 0.002741599730193889 +1025 27 training.batch_size 1.0 +1025 27 training.label_smoothing 0.13398334589514804 +1025 28 model.embedding_dim 1.0 +1025 28 model.scoring_fct_norm 1.0 +1025 28 optimizer.lr 0.030282837295275623 +1025 28 training.batch_size 1.0 +1025 28 training.label_smoothing 0.07028837032049463 +1025 29 model.embedding_dim 0.0 +1025 29 model.scoring_fct_norm 2.0 +1025 29 optimizer.lr 0.020844088842079358 +1025 29 training.batch_size 0.0 +1025 29 training.label_smoothing 0.24728142559254285 +1025 30 model.embedding_dim 2.0 +1025 30 model.scoring_fct_norm 2.0 +1025 30 optimizer.lr 0.0013855289754202327 +1025 30 training.batch_size 1.0 +1025 30 training.label_smoothing 0.007367115346718653 +1025 31 model.embedding_dim 0.0 +1025 31 model.scoring_fct_norm 1.0 +1025 31 optimizer.lr 0.021450584765933808 +1025 31 training.batch_size 0.0 +1025 31 training.label_smoothing 0.08978390481851005 +1025 32 model.embedding_dim 2.0 +1025 32 model.scoring_fct_norm 2.0 +1025 32 optimizer.lr 0.028149687787862678 +1025 32 training.batch_size 1.0 +1025 32 training.label_smoothing 0.0030456092988359828 +1025 33 model.embedding_dim 1.0 +1025 33 model.scoring_fct_norm 2.0 +1025 33 optimizer.lr 0.048330942743307924 +1025 33 training.batch_size 1.0 +1025 33 training.label_smoothing 0.3438311292081099 +1025 34 model.embedding_dim 0.0 +1025 34 model.scoring_fct_norm 2.0 +1025 34 optimizer.lr 0.001063039703583769 +1025 34 training.batch_size 0.0 +1025 34 training.label_smoothing 0.18574102710932908 +1025 35 model.embedding_dim 1.0 +1025 35 model.scoring_fct_norm 2.0 +1025 35 optimizer.lr 0.010074685562569696 +1025 35 training.batch_size 2.0 +1025 35 training.label_smoothing 0.008236456484758719 +1025 36 model.embedding_dim 0.0 +1025 36 model.scoring_fct_norm 1.0 +1025 36 optimizer.lr 0.03520458584545501 +1025 36 training.batch_size 1.0 +1025 36 training.label_smoothing 0.029439054347487344 +1025 37 model.embedding_dim 2.0 +1025 37 model.scoring_fct_norm 1.0 +1025 37 optimizer.lr 0.0013362262021692748 +1025 37 training.batch_size 1.0 +1025 37 training.label_smoothing 0.004850212605761861 +1025 38 model.embedding_dim 1.0 +1025 38 model.scoring_fct_norm 1.0 +1025 38 optimizer.lr 0.0011082204152533621 +1025 38 training.batch_size 2.0 +1025 38 training.label_smoothing 0.0010391179470062592 +1025 39 model.embedding_dim 2.0 +1025 39 model.scoring_fct_norm 2.0 +1025 39 optimizer.lr 0.009274921646922727 +1025 39 training.batch_size 1.0 +1025 39 training.label_smoothing 0.834084992416726 +1025 40 model.embedding_dim 1.0 +1025 40 model.scoring_fct_norm 2.0 +1025 40 optimizer.lr 0.08937597594546927 +1025 40 training.batch_size 2.0 +1025 40 training.label_smoothing 0.06371632584762908 +1025 41 model.embedding_dim 2.0 +1025 41 model.scoring_fct_norm 2.0 +1025 41 optimizer.lr 0.04242704122907674 +1025 41 training.batch_size 2.0 +1025 41 training.label_smoothing 0.12240273191186825 +1025 42 model.embedding_dim 0.0 +1025 42 model.scoring_fct_norm 1.0 +1025 42 optimizer.lr 0.017467710423150238 +1025 42 training.batch_size 0.0 +1025 42 training.label_smoothing 0.007980780107286813 +1025 43 model.embedding_dim 1.0 +1025 43 model.scoring_fct_norm 1.0 +1025 43 optimizer.lr 0.0015184492094673977 +1025 43 training.batch_size 2.0 +1025 43 training.label_smoothing 0.05070658845468236 +1025 44 model.embedding_dim 1.0 +1025 44 model.scoring_fct_norm 1.0 +1025 44 optimizer.lr 0.006074392237216848 +1025 44 training.batch_size 0.0 +1025 44 training.label_smoothing 0.0024333271864938567 +1025 45 model.embedding_dim 1.0 +1025 45 model.scoring_fct_norm 2.0 +1025 45 optimizer.lr 0.0495412928151072 +1025 45 training.batch_size 0.0 +1025 45 training.label_smoothing 0.427671844452862 +1025 46 model.embedding_dim 0.0 +1025 46 model.scoring_fct_norm 1.0 +1025 46 optimizer.lr 0.07587502250509696 +1025 46 training.batch_size 0.0 +1025 46 training.label_smoothing 0.0022796063116108374 +1025 47 model.embedding_dim 2.0 +1025 47 model.scoring_fct_norm 1.0 +1025 47 optimizer.lr 0.013197076181197545 +1025 47 training.batch_size 1.0 +1025 47 training.label_smoothing 0.003775375851882792 +1025 48 model.embedding_dim 1.0 +1025 48 model.scoring_fct_norm 1.0 +1025 48 optimizer.lr 0.013593857549461666 +1025 48 training.batch_size 1.0 +1025 48 training.label_smoothing 0.1514658529064134 +1025 49 model.embedding_dim 1.0 +1025 49 model.scoring_fct_norm 1.0 +1025 49 optimizer.lr 0.001394507443006076 +1025 49 training.batch_size 0.0 +1025 49 training.label_smoothing 0.0013580663804154047 +1025 50 model.embedding_dim 1.0 +1025 50 model.scoring_fct_norm 2.0 +1025 50 optimizer.lr 0.041060093292589306 +1025 50 training.batch_size 0.0 +1025 50 training.label_smoothing 0.010156844179170616 +1025 51 model.embedding_dim 2.0 +1025 51 model.scoring_fct_norm 1.0 +1025 51 optimizer.lr 0.03289846267569672 +1025 51 training.batch_size 0.0 +1025 51 training.label_smoothing 0.0042415608927155865 +1025 52 model.embedding_dim 1.0 +1025 52 model.scoring_fct_norm 2.0 +1025 52 optimizer.lr 0.0020653403655042928 +1025 52 training.batch_size 0.0 +1025 52 training.label_smoothing 0.08575273918312318 +1025 53 model.embedding_dim 0.0 +1025 53 model.scoring_fct_norm 2.0 +1025 53 optimizer.lr 0.06849963502646325 +1025 53 training.batch_size 1.0 +1025 53 training.label_smoothing 0.005410609019517112 +1025 54 model.embedding_dim 2.0 +1025 54 model.scoring_fct_norm 1.0 +1025 54 optimizer.lr 0.002532987145919638 +1025 54 training.batch_size 1.0 +1025 54 training.label_smoothing 0.029299805333231416 +1025 55 model.embedding_dim 1.0 +1025 55 model.scoring_fct_norm 1.0 +1025 55 optimizer.lr 0.04551074506862756 +1025 55 training.batch_size 0.0 +1025 55 training.label_smoothing 0.0035156249560410534 +1025 56 model.embedding_dim 1.0 +1025 56 model.scoring_fct_norm 2.0 +1025 56 optimizer.lr 0.006133619670424936 +1025 56 training.batch_size 0.0 +1025 56 training.label_smoothing 0.028752391887620618 +1025 57 model.embedding_dim 0.0 +1025 57 model.scoring_fct_norm 1.0 +1025 57 optimizer.lr 0.03196157642977251 +1025 57 training.batch_size 0.0 +1025 57 training.label_smoothing 0.0014781238160900686 +1025 58 model.embedding_dim 2.0 +1025 58 model.scoring_fct_norm 2.0 +1025 58 optimizer.lr 0.008112499246713162 +1025 58 training.batch_size 0.0 +1025 58 training.label_smoothing 0.12344101135771758 +1025 59 model.embedding_dim 1.0 +1025 59 model.scoring_fct_norm 1.0 +1025 59 optimizer.lr 0.0561145649260947 +1025 59 training.batch_size 2.0 +1025 59 training.label_smoothing 0.002810000226720934 +1025 60 model.embedding_dim 1.0 +1025 60 model.scoring_fct_norm 1.0 +1025 60 optimizer.lr 0.023923939018307475 +1025 60 training.batch_size 2.0 +1025 60 training.label_smoothing 0.09638013950116892 +1025 61 model.embedding_dim 1.0 +1025 61 model.scoring_fct_norm 1.0 +1025 61 optimizer.lr 0.06673326651481662 +1025 61 training.batch_size 1.0 +1025 61 training.label_smoothing 0.0022591355383331734 +1025 62 model.embedding_dim 1.0 +1025 62 model.scoring_fct_norm 1.0 +1025 62 optimizer.lr 0.0074488336218576584 +1025 62 training.batch_size 0.0 +1025 62 training.label_smoothing 0.0021721275429204406 +1025 63 model.embedding_dim 1.0 +1025 63 model.scoring_fct_norm 2.0 +1025 63 optimizer.lr 0.00667611448302665 +1025 63 training.batch_size 2.0 +1025 63 training.label_smoothing 0.003773740216136036 +1025 64 model.embedding_dim 2.0 +1025 64 model.scoring_fct_norm 2.0 +1025 64 optimizer.lr 0.001964358235339088 +1025 64 training.batch_size 0.0 +1025 64 training.label_smoothing 0.44144917266744943 +1025 65 model.embedding_dim 2.0 +1025 65 model.scoring_fct_norm 1.0 +1025 65 optimizer.lr 0.015663852317013685 +1025 65 training.batch_size 2.0 +1025 65 training.label_smoothing 0.09739099836015451 +1025 66 model.embedding_dim 2.0 +1025 66 model.scoring_fct_norm 2.0 +1025 66 optimizer.lr 0.002701824215194498 +1025 66 training.batch_size 1.0 +1025 66 training.label_smoothing 0.013343690576789871 +1025 67 model.embedding_dim 1.0 +1025 67 model.scoring_fct_norm 2.0 +1025 67 optimizer.lr 0.010982906774797093 +1025 67 training.batch_size 0.0 +1025 67 training.label_smoothing 0.0014630836242177946 +1025 68 model.embedding_dim 2.0 +1025 68 model.scoring_fct_norm 1.0 +1025 68 optimizer.lr 0.010048131894537946 +1025 68 training.batch_size 1.0 +1025 68 training.label_smoothing 0.13058114947266028 +1025 69 model.embedding_dim 1.0 +1025 69 model.scoring_fct_norm 2.0 +1025 69 optimizer.lr 0.002861418123737759 +1025 69 training.batch_size 1.0 +1025 69 training.label_smoothing 0.004310657523724515 +1025 70 model.embedding_dim 2.0 +1025 70 model.scoring_fct_norm 1.0 +1025 70 optimizer.lr 0.054198340756553186 +1025 70 training.batch_size 1.0 +1025 70 training.label_smoothing 0.005686844256582825 +1025 71 model.embedding_dim 2.0 +1025 71 model.scoring_fct_norm 1.0 +1025 71 optimizer.lr 0.09711490430823742 +1025 71 training.batch_size 0.0 +1025 71 training.label_smoothing 0.003390876766892594 +1025 72 model.embedding_dim 0.0 +1025 72 model.scoring_fct_norm 2.0 +1025 72 optimizer.lr 0.0010079476095565686 +1025 72 training.batch_size 1.0 +1025 72 training.label_smoothing 0.0415603002044801 +1025 73 model.embedding_dim 0.0 +1025 73 model.scoring_fct_norm 1.0 +1025 73 optimizer.lr 0.0028669370348368478 +1025 73 training.batch_size 1.0 +1025 73 training.label_smoothing 0.13457953741473946 +1025 74 model.embedding_dim 1.0 +1025 74 model.scoring_fct_norm 1.0 +1025 74 optimizer.lr 0.007222158509049669 +1025 74 training.batch_size 2.0 +1025 74 training.label_smoothing 0.10456693359821931 +1025 75 model.embedding_dim 1.0 +1025 75 model.scoring_fct_norm 1.0 +1025 75 optimizer.lr 0.005749595672892236 +1025 75 training.batch_size 0.0 +1025 75 training.label_smoothing 0.011072004083338263 +1025 76 model.embedding_dim 1.0 +1025 76 model.scoring_fct_norm 1.0 +1025 76 optimizer.lr 0.0072770191367050666 +1025 76 training.batch_size 1.0 +1025 76 training.label_smoothing 0.18411975579875559 +1025 77 model.embedding_dim 0.0 +1025 77 model.scoring_fct_norm 1.0 +1025 77 optimizer.lr 0.015360607286498063 +1025 77 training.batch_size 0.0 +1025 77 training.label_smoothing 0.004501273479918884 +1025 78 model.embedding_dim 0.0 +1025 78 model.scoring_fct_norm 1.0 +1025 78 optimizer.lr 0.002051626252639241 +1025 78 training.batch_size 0.0 +1025 78 training.label_smoothing 0.18108772134301598 +1025 79 model.embedding_dim 0.0 +1025 79 model.scoring_fct_norm 2.0 +1025 79 optimizer.lr 0.02897684223416158 +1025 79 training.batch_size 1.0 +1025 79 training.label_smoothing 0.06352166879901991 +1025 80 model.embedding_dim 0.0 +1025 80 model.scoring_fct_norm 1.0 +1025 80 optimizer.lr 0.018465168558139292 +1025 80 training.batch_size 1.0 +1025 80 training.label_smoothing 0.4597401143671965 +1025 81 model.embedding_dim 1.0 +1025 81 model.scoring_fct_norm 2.0 +1025 81 optimizer.lr 0.044356607449071404 +1025 81 training.batch_size 1.0 +1025 81 training.label_smoothing 0.022543380043052695 +1025 82 model.embedding_dim 0.0 +1025 82 model.scoring_fct_norm 2.0 +1025 82 optimizer.lr 0.003856058967629141 +1025 82 training.batch_size 1.0 +1025 82 training.label_smoothing 0.03536120836063708 +1025 83 model.embedding_dim 1.0 +1025 83 model.scoring_fct_norm 2.0 +1025 83 optimizer.lr 0.005666549988141502 +1025 83 training.batch_size 1.0 +1025 83 training.label_smoothing 0.04173815971152541 +1025 84 model.embedding_dim 2.0 +1025 84 model.scoring_fct_norm 1.0 +1025 84 optimizer.lr 0.0016778610018395711 +1025 84 training.batch_size 1.0 +1025 84 training.label_smoothing 0.001405236292360449 +1025 85 model.embedding_dim 1.0 +1025 85 model.scoring_fct_norm 2.0 +1025 85 optimizer.lr 0.0024715498058115815 +1025 85 training.batch_size 2.0 +1025 85 training.label_smoothing 0.0015214856944825168 +1025 86 model.embedding_dim 2.0 +1025 86 model.scoring_fct_norm 1.0 +1025 86 optimizer.lr 0.002116652959658892 +1025 86 training.batch_size 0.0 +1025 86 training.label_smoothing 0.004128364355566301 +1025 87 model.embedding_dim 2.0 +1025 87 model.scoring_fct_norm 1.0 +1025 87 optimizer.lr 0.06580290642208854 +1025 87 training.batch_size 2.0 +1025 87 training.label_smoothing 0.23940382576801417 +1025 88 model.embedding_dim 0.0 +1025 88 model.scoring_fct_norm 1.0 +1025 88 optimizer.lr 0.035096769015574875 +1025 88 training.batch_size 1.0 +1025 88 training.label_smoothing 0.0012641321216680071 +1025 89 model.embedding_dim 2.0 +1025 89 model.scoring_fct_norm 1.0 +1025 89 optimizer.lr 0.042814698238617145 +1025 89 training.batch_size 1.0 +1025 89 training.label_smoothing 0.5667185739838481 +1025 90 model.embedding_dim 1.0 +1025 90 model.scoring_fct_norm 1.0 +1025 90 optimizer.lr 0.004740677303496783 +1025 90 training.batch_size 2.0 +1025 90 training.label_smoothing 0.004893694739785868 +1025 91 model.embedding_dim 0.0 +1025 91 model.scoring_fct_norm 2.0 +1025 91 optimizer.lr 0.029254890205861955 +1025 91 training.batch_size 2.0 +1025 91 training.label_smoothing 0.0686114007000126 +1025 92 model.embedding_dim 1.0 +1025 92 model.scoring_fct_norm 2.0 +1025 92 optimizer.lr 0.0023013154499423125 +1025 92 training.batch_size 0.0 +1025 92 training.label_smoothing 0.027268914606279452 +1025 93 model.embedding_dim 1.0 +1025 93 model.scoring_fct_norm 1.0 +1025 93 optimizer.lr 0.049396515231247724 +1025 93 training.batch_size 2.0 +1025 93 training.label_smoothing 0.008994304852303714 +1025 94 model.embedding_dim 2.0 +1025 94 model.scoring_fct_norm 2.0 +1025 94 optimizer.lr 0.005252286071687382 +1025 94 training.batch_size 0.0 +1025 94 training.label_smoothing 0.004679880450656652 +1025 95 model.embedding_dim 0.0 +1025 95 model.scoring_fct_norm 1.0 +1025 95 optimizer.lr 0.0019008285217091757 +1025 95 training.batch_size 2.0 +1025 95 training.label_smoothing 0.2771044492315663 +1025 96 model.embedding_dim 0.0 +1025 96 model.scoring_fct_norm 2.0 +1025 96 optimizer.lr 0.02417509788982871 +1025 96 training.batch_size 1.0 +1025 96 training.label_smoothing 0.69882820431052 +1025 97 model.embedding_dim 2.0 +1025 97 model.scoring_fct_norm 2.0 +1025 97 optimizer.lr 0.011914973513229324 +1025 97 training.batch_size 0.0 +1025 97 training.label_smoothing 0.005761140201240473 +1025 98 model.embedding_dim 1.0 +1025 98 model.scoring_fct_norm 1.0 +1025 98 optimizer.lr 0.0176874193728874 +1025 98 training.batch_size 2.0 +1025 98 training.label_smoothing 0.026141476374822177 +1025 99 model.embedding_dim 1.0 +1025 99 model.scoring_fct_norm 2.0 +1025 99 optimizer.lr 0.0028762164453911554 +1025 99 training.batch_size 2.0 +1025 99 training.label_smoothing 0.35700975493968373 +1025 100 model.embedding_dim 0.0 +1025 100 model.scoring_fct_norm 2.0 +1025 100 optimizer.lr 0.017979010468688005 +1025 100 training.batch_size 1.0 +1025 100 training.label_smoothing 0.004117442006566097 +1025 1 dataset """kinships""" +1025 1 model """unstructuredmodel""" +1025 1 loss """crossentropy""" +1025 1 regularizer """no""" +1025 1 optimizer """adam""" +1025 1 training_loop """lcwa""" +1025 1 evaluator """rankbased""" +1025 2 dataset """kinships""" +1025 2 model """unstructuredmodel""" +1025 2 loss """crossentropy""" +1025 2 regularizer """no""" +1025 2 optimizer """adam""" +1025 2 training_loop """lcwa""" +1025 2 evaluator """rankbased""" +1025 3 dataset """kinships""" +1025 3 model """unstructuredmodel""" +1025 3 loss """crossentropy""" +1025 3 regularizer """no""" +1025 3 optimizer """adam""" +1025 3 training_loop """lcwa""" +1025 3 evaluator """rankbased""" +1025 4 dataset """kinships""" +1025 4 model """unstructuredmodel""" +1025 4 loss """crossentropy""" +1025 4 regularizer """no""" +1025 4 optimizer """adam""" +1025 4 training_loop """lcwa""" +1025 4 evaluator """rankbased""" +1025 5 dataset """kinships""" +1025 5 model """unstructuredmodel""" +1025 5 loss """crossentropy""" +1025 5 regularizer """no""" +1025 5 optimizer """adam""" +1025 5 training_loop """lcwa""" +1025 5 evaluator """rankbased""" +1025 6 dataset """kinships""" +1025 6 model """unstructuredmodel""" +1025 6 loss """crossentropy""" +1025 6 regularizer """no""" +1025 6 optimizer """adam""" +1025 6 training_loop """lcwa""" +1025 6 evaluator """rankbased""" +1025 7 dataset """kinships""" +1025 7 model """unstructuredmodel""" +1025 7 loss """crossentropy""" +1025 7 regularizer """no""" +1025 7 optimizer """adam""" +1025 7 training_loop """lcwa""" +1025 7 evaluator """rankbased""" +1025 8 dataset """kinships""" +1025 8 model """unstructuredmodel""" +1025 8 loss """crossentropy""" +1025 8 regularizer """no""" +1025 8 optimizer """adam""" +1025 8 training_loop """lcwa""" +1025 8 evaluator """rankbased""" +1025 9 dataset """kinships""" +1025 9 model """unstructuredmodel""" +1025 9 loss """crossentropy""" +1025 9 regularizer """no""" +1025 9 optimizer """adam""" +1025 9 training_loop """lcwa""" +1025 9 evaluator """rankbased""" +1025 10 dataset """kinships""" +1025 10 model """unstructuredmodel""" +1025 10 loss """crossentropy""" +1025 10 regularizer """no""" +1025 10 optimizer """adam""" +1025 10 training_loop """lcwa""" +1025 10 evaluator """rankbased""" +1025 11 dataset """kinships""" +1025 11 model """unstructuredmodel""" +1025 11 loss """crossentropy""" +1025 11 regularizer """no""" +1025 11 optimizer """adam""" +1025 11 training_loop """lcwa""" +1025 11 evaluator """rankbased""" +1025 12 dataset """kinships""" +1025 12 model """unstructuredmodel""" +1025 12 loss """crossentropy""" +1025 12 regularizer """no""" +1025 12 optimizer """adam""" +1025 12 training_loop """lcwa""" +1025 12 evaluator """rankbased""" +1025 13 dataset """kinships""" +1025 13 model """unstructuredmodel""" +1025 13 loss """crossentropy""" +1025 13 regularizer """no""" +1025 13 optimizer """adam""" +1025 13 training_loop """lcwa""" +1025 13 evaluator """rankbased""" +1025 14 dataset """kinships""" +1025 14 model """unstructuredmodel""" +1025 14 loss """crossentropy""" +1025 14 regularizer """no""" +1025 14 optimizer """adam""" +1025 14 training_loop """lcwa""" +1025 14 evaluator """rankbased""" +1025 15 dataset """kinships""" +1025 15 model """unstructuredmodel""" +1025 15 loss """crossentropy""" +1025 15 regularizer """no""" +1025 15 optimizer """adam""" +1025 15 training_loop """lcwa""" +1025 15 evaluator """rankbased""" +1025 16 dataset """kinships""" +1025 16 model """unstructuredmodel""" +1025 16 loss """crossentropy""" +1025 16 regularizer """no""" +1025 16 optimizer """adam""" +1025 16 training_loop """lcwa""" +1025 16 evaluator """rankbased""" +1025 17 dataset """kinships""" +1025 17 model """unstructuredmodel""" +1025 17 loss """crossentropy""" +1025 17 regularizer """no""" +1025 17 optimizer """adam""" +1025 17 training_loop """lcwa""" +1025 17 evaluator """rankbased""" +1025 18 dataset """kinships""" +1025 18 model """unstructuredmodel""" +1025 18 loss """crossentropy""" +1025 18 regularizer """no""" +1025 18 optimizer """adam""" +1025 18 training_loop """lcwa""" +1025 18 evaluator """rankbased""" +1025 19 dataset """kinships""" +1025 19 model """unstructuredmodel""" +1025 19 loss """crossentropy""" +1025 19 regularizer """no""" +1025 19 optimizer """adam""" +1025 19 training_loop """lcwa""" +1025 19 evaluator """rankbased""" +1025 20 dataset """kinships""" +1025 20 model """unstructuredmodel""" +1025 20 loss """crossentropy""" +1025 20 regularizer """no""" +1025 20 optimizer """adam""" +1025 20 training_loop """lcwa""" +1025 20 evaluator """rankbased""" +1025 21 dataset """kinships""" +1025 21 model """unstructuredmodel""" +1025 21 loss """crossentropy""" +1025 21 regularizer """no""" +1025 21 optimizer """adam""" +1025 21 training_loop """lcwa""" +1025 21 evaluator """rankbased""" +1025 22 dataset """kinships""" +1025 22 model """unstructuredmodel""" +1025 22 loss """crossentropy""" +1025 22 regularizer """no""" +1025 22 optimizer """adam""" +1025 22 training_loop """lcwa""" +1025 22 evaluator """rankbased""" +1025 23 dataset """kinships""" +1025 23 model """unstructuredmodel""" +1025 23 loss """crossentropy""" +1025 23 regularizer """no""" +1025 23 optimizer """adam""" +1025 23 training_loop """lcwa""" +1025 23 evaluator """rankbased""" +1025 24 dataset """kinships""" +1025 24 model """unstructuredmodel""" +1025 24 loss """crossentropy""" +1025 24 regularizer """no""" +1025 24 optimizer """adam""" +1025 24 training_loop """lcwa""" +1025 24 evaluator """rankbased""" +1025 25 dataset """kinships""" +1025 25 model """unstructuredmodel""" +1025 25 loss """crossentropy""" +1025 25 regularizer """no""" +1025 25 optimizer """adam""" +1025 25 training_loop """lcwa""" +1025 25 evaluator """rankbased""" +1025 26 dataset """kinships""" +1025 26 model """unstructuredmodel""" +1025 26 loss """crossentropy""" +1025 26 regularizer """no""" +1025 26 optimizer """adam""" +1025 26 training_loop """lcwa""" +1025 26 evaluator """rankbased""" +1025 27 dataset """kinships""" +1025 27 model """unstructuredmodel""" +1025 27 loss """crossentropy""" +1025 27 regularizer """no""" +1025 27 optimizer """adam""" +1025 27 training_loop """lcwa""" +1025 27 evaluator """rankbased""" +1025 28 dataset """kinships""" +1025 28 model """unstructuredmodel""" +1025 28 loss """crossentropy""" +1025 28 regularizer """no""" +1025 28 optimizer """adam""" +1025 28 training_loop """lcwa""" +1025 28 evaluator """rankbased""" +1025 29 dataset """kinships""" +1025 29 model """unstructuredmodel""" +1025 29 loss """crossentropy""" +1025 29 regularizer """no""" +1025 29 optimizer """adam""" +1025 29 training_loop """lcwa""" +1025 29 evaluator """rankbased""" +1025 30 dataset """kinships""" +1025 30 model """unstructuredmodel""" +1025 30 loss """crossentropy""" +1025 30 regularizer """no""" +1025 30 optimizer """adam""" +1025 30 training_loop """lcwa""" +1025 30 evaluator """rankbased""" +1025 31 dataset """kinships""" +1025 31 model """unstructuredmodel""" +1025 31 loss """crossentropy""" +1025 31 regularizer """no""" +1025 31 optimizer """adam""" +1025 31 training_loop """lcwa""" +1025 31 evaluator """rankbased""" +1025 32 dataset """kinships""" +1025 32 model """unstructuredmodel""" +1025 32 loss """crossentropy""" +1025 32 regularizer """no""" +1025 32 optimizer """adam""" +1025 32 training_loop """lcwa""" +1025 32 evaluator """rankbased""" +1025 33 dataset """kinships""" +1025 33 model """unstructuredmodel""" +1025 33 loss """crossentropy""" +1025 33 regularizer """no""" +1025 33 optimizer """adam""" +1025 33 training_loop """lcwa""" +1025 33 evaluator """rankbased""" +1025 34 dataset """kinships""" +1025 34 model """unstructuredmodel""" +1025 34 loss """crossentropy""" +1025 34 regularizer """no""" +1025 34 optimizer """adam""" +1025 34 training_loop """lcwa""" +1025 34 evaluator """rankbased""" +1025 35 dataset """kinships""" +1025 35 model """unstructuredmodel""" +1025 35 loss """crossentropy""" +1025 35 regularizer """no""" +1025 35 optimizer """adam""" +1025 35 training_loop """lcwa""" +1025 35 evaluator """rankbased""" +1025 36 dataset """kinships""" +1025 36 model """unstructuredmodel""" +1025 36 loss """crossentropy""" +1025 36 regularizer """no""" +1025 36 optimizer """adam""" +1025 36 training_loop """lcwa""" +1025 36 evaluator """rankbased""" +1025 37 dataset """kinships""" +1025 37 model """unstructuredmodel""" +1025 37 loss """crossentropy""" +1025 37 regularizer """no""" +1025 37 optimizer """adam""" +1025 37 training_loop """lcwa""" +1025 37 evaluator """rankbased""" +1025 38 dataset """kinships""" +1025 38 model """unstructuredmodel""" +1025 38 loss """crossentropy""" +1025 38 regularizer """no""" +1025 38 optimizer """adam""" +1025 38 training_loop """lcwa""" +1025 38 evaluator """rankbased""" +1025 39 dataset """kinships""" +1025 39 model """unstructuredmodel""" +1025 39 loss """crossentropy""" +1025 39 regularizer """no""" +1025 39 optimizer """adam""" +1025 39 training_loop """lcwa""" +1025 39 evaluator """rankbased""" +1025 40 dataset """kinships""" +1025 40 model """unstructuredmodel""" +1025 40 loss """crossentropy""" +1025 40 regularizer """no""" +1025 40 optimizer """adam""" +1025 40 training_loop """lcwa""" +1025 40 evaluator """rankbased""" +1025 41 dataset """kinships""" +1025 41 model """unstructuredmodel""" +1025 41 loss """crossentropy""" +1025 41 regularizer """no""" +1025 41 optimizer """adam""" +1025 41 training_loop """lcwa""" +1025 41 evaluator """rankbased""" +1025 42 dataset """kinships""" +1025 42 model """unstructuredmodel""" +1025 42 loss """crossentropy""" +1025 42 regularizer """no""" +1025 42 optimizer """adam""" +1025 42 training_loop """lcwa""" +1025 42 evaluator """rankbased""" +1025 43 dataset """kinships""" +1025 43 model """unstructuredmodel""" +1025 43 loss """crossentropy""" +1025 43 regularizer """no""" +1025 43 optimizer """adam""" +1025 43 training_loop """lcwa""" +1025 43 evaluator """rankbased""" +1025 44 dataset """kinships""" +1025 44 model """unstructuredmodel""" +1025 44 loss """crossentropy""" +1025 44 regularizer """no""" +1025 44 optimizer """adam""" +1025 44 training_loop """lcwa""" +1025 44 evaluator """rankbased""" +1025 45 dataset """kinships""" +1025 45 model """unstructuredmodel""" +1025 45 loss """crossentropy""" +1025 45 regularizer """no""" +1025 45 optimizer """adam""" +1025 45 training_loop """lcwa""" +1025 45 evaluator """rankbased""" +1025 46 dataset """kinships""" +1025 46 model """unstructuredmodel""" +1025 46 loss """crossentropy""" +1025 46 regularizer """no""" +1025 46 optimizer """adam""" +1025 46 training_loop """lcwa""" +1025 46 evaluator """rankbased""" +1025 47 dataset """kinships""" +1025 47 model """unstructuredmodel""" +1025 47 loss """crossentropy""" +1025 47 regularizer """no""" +1025 47 optimizer """adam""" +1025 47 training_loop """lcwa""" +1025 47 evaluator """rankbased""" +1025 48 dataset """kinships""" +1025 48 model """unstructuredmodel""" +1025 48 loss """crossentropy""" +1025 48 regularizer """no""" +1025 48 optimizer """adam""" +1025 48 training_loop """lcwa""" +1025 48 evaluator """rankbased""" +1025 49 dataset """kinships""" +1025 49 model """unstructuredmodel""" +1025 49 loss """crossentropy""" +1025 49 regularizer """no""" +1025 49 optimizer """adam""" +1025 49 training_loop """lcwa""" +1025 49 evaluator """rankbased""" +1025 50 dataset """kinships""" +1025 50 model """unstructuredmodel""" +1025 50 loss """crossentropy""" +1025 50 regularizer """no""" +1025 50 optimizer """adam""" +1025 50 training_loop """lcwa""" +1025 50 evaluator """rankbased""" +1025 51 dataset """kinships""" +1025 51 model """unstructuredmodel""" +1025 51 loss """crossentropy""" +1025 51 regularizer """no""" +1025 51 optimizer """adam""" +1025 51 training_loop """lcwa""" +1025 51 evaluator """rankbased""" +1025 52 dataset """kinships""" +1025 52 model """unstructuredmodel""" +1025 52 loss """crossentropy""" +1025 52 regularizer """no""" +1025 52 optimizer """adam""" +1025 52 training_loop """lcwa""" +1025 52 evaluator """rankbased""" +1025 53 dataset """kinships""" +1025 53 model """unstructuredmodel""" +1025 53 loss """crossentropy""" +1025 53 regularizer """no""" +1025 53 optimizer """adam""" +1025 53 training_loop """lcwa""" +1025 53 evaluator """rankbased""" +1025 54 dataset """kinships""" +1025 54 model """unstructuredmodel""" +1025 54 loss """crossentropy""" +1025 54 regularizer """no""" +1025 54 optimizer """adam""" +1025 54 training_loop """lcwa""" +1025 54 evaluator """rankbased""" +1025 55 dataset """kinships""" +1025 55 model """unstructuredmodel""" +1025 55 loss """crossentropy""" +1025 55 regularizer """no""" +1025 55 optimizer """adam""" +1025 55 training_loop """lcwa""" +1025 55 evaluator """rankbased""" +1025 56 dataset """kinships""" +1025 56 model """unstructuredmodel""" +1025 56 loss """crossentropy""" +1025 56 regularizer """no""" +1025 56 optimizer """adam""" +1025 56 training_loop """lcwa""" +1025 56 evaluator """rankbased""" +1025 57 dataset """kinships""" +1025 57 model """unstructuredmodel""" +1025 57 loss """crossentropy""" +1025 57 regularizer """no""" +1025 57 optimizer """adam""" +1025 57 training_loop """lcwa""" +1025 57 evaluator """rankbased""" +1025 58 dataset """kinships""" +1025 58 model """unstructuredmodel""" +1025 58 loss """crossentropy""" +1025 58 regularizer """no""" +1025 58 optimizer """adam""" +1025 58 training_loop """lcwa""" +1025 58 evaluator """rankbased""" +1025 59 dataset """kinships""" +1025 59 model """unstructuredmodel""" +1025 59 loss """crossentropy""" +1025 59 regularizer """no""" +1025 59 optimizer """adam""" +1025 59 training_loop """lcwa""" +1025 59 evaluator """rankbased""" +1025 60 dataset """kinships""" +1025 60 model """unstructuredmodel""" +1025 60 loss """crossentropy""" +1025 60 regularizer """no""" +1025 60 optimizer """adam""" +1025 60 training_loop """lcwa""" +1025 60 evaluator """rankbased""" +1025 61 dataset """kinships""" +1025 61 model """unstructuredmodel""" +1025 61 loss """crossentropy""" +1025 61 regularizer """no""" +1025 61 optimizer """adam""" +1025 61 training_loop """lcwa""" +1025 61 evaluator """rankbased""" +1025 62 dataset """kinships""" +1025 62 model """unstructuredmodel""" +1025 62 loss """crossentropy""" +1025 62 regularizer """no""" +1025 62 optimizer """adam""" +1025 62 training_loop """lcwa""" +1025 62 evaluator """rankbased""" +1025 63 dataset """kinships""" +1025 63 model """unstructuredmodel""" +1025 63 loss """crossentropy""" +1025 63 regularizer """no""" +1025 63 optimizer """adam""" +1025 63 training_loop """lcwa""" +1025 63 evaluator """rankbased""" +1025 64 dataset """kinships""" +1025 64 model """unstructuredmodel""" +1025 64 loss """crossentropy""" +1025 64 regularizer """no""" +1025 64 optimizer """adam""" +1025 64 training_loop """lcwa""" +1025 64 evaluator """rankbased""" +1025 65 dataset """kinships""" +1025 65 model """unstructuredmodel""" +1025 65 loss """crossentropy""" +1025 65 regularizer """no""" +1025 65 optimizer """adam""" +1025 65 training_loop """lcwa""" +1025 65 evaluator """rankbased""" +1025 66 dataset """kinships""" +1025 66 model """unstructuredmodel""" +1025 66 loss """crossentropy""" +1025 66 regularizer """no""" +1025 66 optimizer """adam""" +1025 66 training_loop """lcwa""" +1025 66 evaluator """rankbased""" +1025 67 dataset """kinships""" +1025 67 model """unstructuredmodel""" +1025 67 loss """crossentropy""" +1025 67 regularizer """no""" +1025 67 optimizer """adam""" +1025 67 training_loop """lcwa""" +1025 67 evaluator """rankbased""" +1025 68 dataset """kinships""" +1025 68 model """unstructuredmodel""" +1025 68 loss """crossentropy""" +1025 68 regularizer """no""" +1025 68 optimizer """adam""" +1025 68 training_loop """lcwa""" +1025 68 evaluator """rankbased""" +1025 69 dataset """kinships""" +1025 69 model """unstructuredmodel""" +1025 69 loss """crossentropy""" +1025 69 regularizer """no""" +1025 69 optimizer """adam""" +1025 69 training_loop """lcwa""" +1025 69 evaluator """rankbased""" +1025 70 dataset """kinships""" +1025 70 model """unstructuredmodel""" +1025 70 loss """crossentropy""" +1025 70 regularizer """no""" +1025 70 optimizer """adam""" +1025 70 training_loop """lcwa""" +1025 70 evaluator """rankbased""" +1025 71 dataset """kinships""" +1025 71 model """unstructuredmodel""" +1025 71 loss """crossentropy""" +1025 71 regularizer """no""" +1025 71 optimizer """adam""" +1025 71 training_loop """lcwa""" +1025 71 evaluator """rankbased""" +1025 72 dataset """kinships""" +1025 72 model """unstructuredmodel""" +1025 72 loss """crossentropy""" +1025 72 regularizer """no""" +1025 72 optimizer """adam""" +1025 72 training_loop """lcwa""" +1025 72 evaluator """rankbased""" +1025 73 dataset """kinships""" +1025 73 model """unstructuredmodel""" +1025 73 loss """crossentropy""" +1025 73 regularizer """no""" +1025 73 optimizer """adam""" +1025 73 training_loop """lcwa""" +1025 73 evaluator """rankbased""" +1025 74 dataset """kinships""" +1025 74 model """unstructuredmodel""" +1025 74 loss """crossentropy""" +1025 74 regularizer """no""" +1025 74 optimizer """adam""" +1025 74 training_loop """lcwa""" +1025 74 evaluator """rankbased""" +1025 75 dataset """kinships""" +1025 75 model """unstructuredmodel""" +1025 75 loss """crossentropy""" +1025 75 regularizer """no""" +1025 75 optimizer """adam""" +1025 75 training_loop """lcwa""" +1025 75 evaluator """rankbased""" +1025 76 dataset """kinships""" +1025 76 model """unstructuredmodel""" +1025 76 loss """crossentropy""" +1025 76 regularizer """no""" +1025 76 optimizer """adam""" +1025 76 training_loop """lcwa""" +1025 76 evaluator """rankbased""" +1025 77 dataset """kinships""" +1025 77 model """unstructuredmodel""" +1025 77 loss """crossentropy""" +1025 77 regularizer """no""" +1025 77 optimizer """adam""" +1025 77 training_loop """lcwa""" +1025 77 evaluator """rankbased""" +1025 78 dataset """kinships""" +1025 78 model """unstructuredmodel""" +1025 78 loss """crossentropy""" +1025 78 regularizer """no""" +1025 78 optimizer """adam""" +1025 78 training_loop """lcwa""" +1025 78 evaluator """rankbased""" +1025 79 dataset """kinships""" +1025 79 model """unstructuredmodel""" +1025 79 loss """crossentropy""" +1025 79 regularizer """no""" +1025 79 optimizer """adam""" +1025 79 training_loop """lcwa""" +1025 79 evaluator """rankbased""" +1025 80 dataset """kinships""" +1025 80 model """unstructuredmodel""" +1025 80 loss """crossentropy""" +1025 80 regularizer """no""" +1025 80 optimizer """adam""" +1025 80 training_loop """lcwa""" +1025 80 evaluator """rankbased""" +1025 81 dataset """kinships""" +1025 81 model """unstructuredmodel""" +1025 81 loss """crossentropy""" +1025 81 regularizer """no""" +1025 81 optimizer """adam""" +1025 81 training_loop """lcwa""" +1025 81 evaluator """rankbased""" +1025 82 dataset """kinships""" +1025 82 model """unstructuredmodel""" +1025 82 loss """crossentropy""" +1025 82 regularizer """no""" +1025 82 optimizer """adam""" +1025 82 training_loop """lcwa""" +1025 82 evaluator """rankbased""" +1025 83 dataset """kinships""" +1025 83 model """unstructuredmodel""" +1025 83 loss """crossentropy""" +1025 83 regularizer """no""" +1025 83 optimizer """adam""" +1025 83 training_loop """lcwa""" +1025 83 evaluator """rankbased""" +1025 84 dataset """kinships""" +1025 84 model """unstructuredmodel""" +1025 84 loss """crossentropy""" +1025 84 regularizer """no""" +1025 84 optimizer """adam""" +1025 84 training_loop """lcwa""" +1025 84 evaluator """rankbased""" +1025 85 dataset """kinships""" +1025 85 model """unstructuredmodel""" +1025 85 loss """crossentropy""" +1025 85 regularizer """no""" +1025 85 optimizer """adam""" +1025 85 training_loop """lcwa""" +1025 85 evaluator """rankbased""" +1025 86 dataset """kinships""" +1025 86 model """unstructuredmodel""" +1025 86 loss """crossentropy""" +1025 86 regularizer """no""" +1025 86 optimizer """adam""" +1025 86 training_loop """lcwa""" +1025 86 evaluator """rankbased""" +1025 87 dataset """kinships""" +1025 87 model """unstructuredmodel""" +1025 87 loss """crossentropy""" +1025 87 regularizer """no""" +1025 87 optimizer """adam""" +1025 87 training_loop """lcwa""" +1025 87 evaluator """rankbased""" +1025 88 dataset """kinships""" +1025 88 model """unstructuredmodel""" +1025 88 loss """crossentropy""" +1025 88 regularizer """no""" +1025 88 optimizer """adam""" +1025 88 training_loop """lcwa""" +1025 88 evaluator """rankbased""" +1025 89 dataset """kinships""" +1025 89 model """unstructuredmodel""" +1025 89 loss """crossentropy""" +1025 89 regularizer """no""" +1025 89 optimizer """adam""" +1025 89 training_loop """lcwa""" +1025 89 evaluator """rankbased""" +1025 90 dataset """kinships""" +1025 90 model """unstructuredmodel""" +1025 90 loss """crossentropy""" +1025 90 regularizer """no""" +1025 90 optimizer """adam""" +1025 90 training_loop """lcwa""" +1025 90 evaluator """rankbased""" +1025 91 dataset """kinships""" +1025 91 model """unstructuredmodel""" +1025 91 loss """crossentropy""" +1025 91 regularizer """no""" +1025 91 optimizer """adam""" +1025 91 training_loop """lcwa""" +1025 91 evaluator """rankbased""" +1025 92 dataset """kinships""" +1025 92 model """unstructuredmodel""" +1025 92 loss """crossentropy""" +1025 92 regularizer """no""" +1025 92 optimizer """adam""" +1025 92 training_loop """lcwa""" +1025 92 evaluator """rankbased""" +1025 93 dataset """kinships""" +1025 93 model """unstructuredmodel""" +1025 93 loss """crossentropy""" +1025 93 regularizer """no""" +1025 93 optimizer """adam""" +1025 93 training_loop """lcwa""" +1025 93 evaluator """rankbased""" +1025 94 dataset """kinships""" +1025 94 model """unstructuredmodel""" +1025 94 loss """crossentropy""" +1025 94 regularizer """no""" +1025 94 optimizer """adam""" +1025 94 training_loop """lcwa""" +1025 94 evaluator """rankbased""" +1025 95 dataset """kinships""" +1025 95 model """unstructuredmodel""" +1025 95 loss """crossentropy""" +1025 95 regularizer """no""" +1025 95 optimizer """adam""" +1025 95 training_loop """lcwa""" +1025 95 evaluator """rankbased""" +1025 96 dataset """kinships""" +1025 96 model """unstructuredmodel""" +1025 96 loss """crossentropy""" +1025 96 regularizer """no""" +1025 96 optimizer """adam""" +1025 96 training_loop """lcwa""" +1025 96 evaluator """rankbased""" +1025 97 dataset """kinships""" +1025 97 model """unstructuredmodel""" +1025 97 loss """crossentropy""" +1025 97 regularizer """no""" +1025 97 optimizer """adam""" +1025 97 training_loop """lcwa""" +1025 97 evaluator """rankbased""" +1025 98 dataset """kinships""" +1025 98 model """unstructuredmodel""" +1025 98 loss """crossentropy""" +1025 98 regularizer """no""" +1025 98 optimizer """adam""" +1025 98 training_loop """lcwa""" +1025 98 evaluator """rankbased""" +1025 99 dataset """kinships""" +1025 99 model """unstructuredmodel""" +1025 99 loss """crossentropy""" +1025 99 regularizer """no""" +1025 99 optimizer """adam""" +1025 99 training_loop """lcwa""" +1025 99 evaluator """rankbased""" +1025 100 dataset """kinships""" +1025 100 model """unstructuredmodel""" +1025 100 loss """crossentropy""" +1025 100 regularizer """no""" +1025 100 optimizer """adam""" +1025 100 training_loop """lcwa""" +1025 100 evaluator """rankbased""" +1026 1 model.embedding_dim 2.0 +1026 1 model.scoring_fct_norm 2.0 +1026 1 optimizer.lr 0.07942514861644757 +1026 1 negative_sampler.num_negs_per_pos 68.0 +1026 1 training.batch_size 0.0 +1026 2 model.embedding_dim 1.0 +1026 2 model.scoring_fct_norm 1.0 +1026 2 optimizer.lr 0.007925348065176026 +1026 2 negative_sampler.num_negs_per_pos 68.0 +1026 2 training.batch_size 2.0 +1026 3 model.embedding_dim 0.0 +1026 3 model.scoring_fct_norm 1.0 +1026 3 optimizer.lr 0.0012832125958955338 +1026 3 negative_sampler.num_negs_per_pos 57.0 +1026 3 training.batch_size 1.0 +1026 4 model.embedding_dim 0.0 +1026 4 model.scoring_fct_norm 1.0 +1026 4 optimizer.lr 0.02421331413285903 +1026 4 negative_sampler.num_negs_per_pos 81.0 +1026 4 training.batch_size 2.0 +1026 5 model.embedding_dim 2.0 +1026 5 model.scoring_fct_norm 1.0 +1026 5 optimizer.lr 0.006321510246882045 +1026 5 negative_sampler.num_negs_per_pos 72.0 +1026 5 training.batch_size 2.0 +1026 6 model.embedding_dim 0.0 +1026 6 model.scoring_fct_norm 1.0 +1026 6 optimizer.lr 0.08882649974371347 +1026 6 negative_sampler.num_negs_per_pos 31.0 +1026 6 training.batch_size 0.0 +1026 7 model.embedding_dim 1.0 +1026 7 model.scoring_fct_norm 2.0 +1026 7 optimizer.lr 0.014600618523481082 +1026 7 negative_sampler.num_negs_per_pos 27.0 +1026 7 training.batch_size 0.0 +1026 8 model.embedding_dim 1.0 +1026 8 model.scoring_fct_norm 2.0 +1026 8 optimizer.lr 0.031118237052353847 +1026 8 negative_sampler.num_negs_per_pos 22.0 +1026 8 training.batch_size 2.0 +1026 9 model.embedding_dim 0.0 +1026 9 model.scoring_fct_norm 2.0 +1026 9 optimizer.lr 0.023347664743944292 +1026 9 negative_sampler.num_negs_per_pos 0.0 +1026 9 training.batch_size 1.0 +1026 10 model.embedding_dim 2.0 +1026 10 model.scoring_fct_norm 1.0 +1026 10 optimizer.lr 0.0035018930444989 +1026 10 negative_sampler.num_negs_per_pos 47.0 +1026 10 training.batch_size 0.0 +1026 11 model.embedding_dim 0.0 +1026 11 model.scoring_fct_norm 1.0 +1026 11 optimizer.lr 0.002545142780004971 +1026 11 negative_sampler.num_negs_per_pos 85.0 +1026 11 training.batch_size 2.0 +1026 12 model.embedding_dim 1.0 +1026 12 model.scoring_fct_norm 1.0 +1026 12 optimizer.lr 0.001562910763592577 +1026 12 negative_sampler.num_negs_per_pos 66.0 +1026 12 training.batch_size 0.0 +1026 13 model.embedding_dim 2.0 +1026 13 model.scoring_fct_norm 2.0 +1026 13 optimizer.lr 0.003113498237694333 +1026 13 negative_sampler.num_negs_per_pos 23.0 +1026 13 training.batch_size 1.0 +1026 14 model.embedding_dim 0.0 +1026 14 model.scoring_fct_norm 2.0 +1026 14 optimizer.lr 0.0023126277007372467 +1026 14 negative_sampler.num_negs_per_pos 68.0 +1026 14 training.batch_size 0.0 +1026 15 model.embedding_dim 0.0 +1026 15 model.scoring_fct_norm 2.0 +1026 15 optimizer.lr 0.0443891746166951 +1026 15 negative_sampler.num_negs_per_pos 24.0 +1026 15 training.batch_size 0.0 +1026 16 model.embedding_dim 1.0 +1026 16 model.scoring_fct_norm 2.0 +1026 16 optimizer.lr 0.011966546173172976 +1026 16 negative_sampler.num_negs_per_pos 25.0 +1026 16 training.batch_size 0.0 +1026 17 model.embedding_dim 0.0 +1026 17 model.scoring_fct_norm 2.0 +1026 17 optimizer.lr 0.018820143442654813 +1026 17 negative_sampler.num_negs_per_pos 65.0 +1026 17 training.batch_size 0.0 +1026 18 model.embedding_dim 1.0 +1026 18 model.scoring_fct_norm 1.0 +1026 18 optimizer.lr 0.0020009544065877896 +1026 18 negative_sampler.num_negs_per_pos 88.0 +1026 18 training.batch_size 2.0 +1026 19 model.embedding_dim 2.0 +1026 19 model.scoring_fct_norm 1.0 +1026 19 optimizer.lr 0.019633924250999853 +1026 19 negative_sampler.num_negs_per_pos 13.0 +1026 19 training.batch_size 0.0 +1026 20 model.embedding_dim 0.0 +1026 20 model.scoring_fct_norm 1.0 +1026 20 optimizer.lr 0.0476005196657812 +1026 20 negative_sampler.num_negs_per_pos 58.0 +1026 20 training.batch_size 1.0 +1026 21 model.embedding_dim 0.0 +1026 21 model.scoring_fct_norm 2.0 +1026 21 optimizer.lr 0.005780300220464984 +1026 21 negative_sampler.num_negs_per_pos 52.0 +1026 21 training.batch_size 1.0 +1026 22 model.embedding_dim 2.0 +1026 22 model.scoring_fct_norm 2.0 +1026 22 optimizer.lr 0.02568936891883257 +1026 22 negative_sampler.num_negs_per_pos 87.0 +1026 22 training.batch_size 2.0 +1026 23 model.embedding_dim 2.0 +1026 23 model.scoring_fct_norm 1.0 +1026 23 optimizer.lr 0.09083876304216691 +1026 23 negative_sampler.num_negs_per_pos 20.0 +1026 23 training.batch_size 2.0 +1026 24 model.embedding_dim 0.0 +1026 24 model.scoring_fct_norm 2.0 +1026 24 optimizer.lr 0.008242863435504606 +1026 24 negative_sampler.num_negs_per_pos 9.0 +1026 24 training.batch_size 2.0 +1026 25 model.embedding_dim 2.0 +1026 25 model.scoring_fct_norm 1.0 +1026 25 optimizer.lr 0.02427280316913454 +1026 25 negative_sampler.num_negs_per_pos 11.0 +1026 25 training.batch_size 0.0 +1026 26 model.embedding_dim 0.0 +1026 26 model.scoring_fct_norm 2.0 +1026 26 optimizer.lr 0.017209263248062292 +1026 26 negative_sampler.num_negs_per_pos 86.0 +1026 26 training.batch_size 2.0 +1026 27 model.embedding_dim 2.0 +1026 27 model.scoring_fct_norm 1.0 +1026 27 optimizer.lr 0.02228825959270658 +1026 27 negative_sampler.num_negs_per_pos 82.0 +1026 27 training.batch_size 0.0 +1026 28 model.embedding_dim 2.0 +1026 28 model.scoring_fct_norm 1.0 +1026 28 optimizer.lr 0.010310533323615142 +1026 28 negative_sampler.num_negs_per_pos 93.0 +1026 28 training.batch_size 2.0 +1026 29 model.embedding_dim 1.0 +1026 29 model.scoring_fct_norm 2.0 +1026 29 optimizer.lr 0.005523999542463989 +1026 29 negative_sampler.num_negs_per_pos 66.0 +1026 29 training.batch_size 2.0 +1026 30 model.embedding_dim 0.0 +1026 30 model.scoring_fct_norm 2.0 +1026 30 optimizer.lr 0.0010756673774676823 +1026 30 negative_sampler.num_negs_per_pos 91.0 +1026 30 training.batch_size 2.0 +1026 31 model.embedding_dim 1.0 +1026 31 model.scoring_fct_norm 1.0 +1026 31 optimizer.lr 0.019970124051837998 +1026 31 negative_sampler.num_negs_per_pos 83.0 +1026 31 training.batch_size 0.0 +1026 32 model.embedding_dim 1.0 +1026 32 model.scoring_fct_norm 2.0 +1026 32 optimizer.lr 0.0028646551773739725 +1026 32 negative_sampler.num_negs_per_pos 80.0 +1026 32 training.batch_size 2.0 +1026 33 model.embedding_dim 0.0 +1026 33 model.scoring_fct_norm 1.0 +1026 33 optimizer.lr 0.002921520830634402 +1026 33 negative_sampler.num_negs_per_pos 41.0 +1026 33 training.batch_size 0.0 +1026 34 model.embedding_dim 1.0 +1026 34 model.scoring_fct_norm 1.0 +1026 34 optimizer.lr 0.029067109880086033 +1026 34 negative_sampler.num_negs_per_pos 13.0 +1026 34 training.batch_size 0.0 +1026 35 model.embedding_dim 0.0 +1026 35 model.scoring_fct_norm 2.0 +1026 35 optimizer.lr 0.012893494297727924 +1026 35 negative_sampler.num_negs_per_pos 24.0 +1026 35 training.batch_size 1.0 +1026 36 model.embedding_dim 1.0 +1026 36 model.scoring_fct_norm 1.0 +1026 36 optimizer.lr 0.0033587721908063446 +1026 36 negative_sampler.num_negs_per_pos 28.0 +1026 36 training.batch_size 0.0 +1026 37 model.embedding_dim 2.0 +1026 37 model.scoring_fct_norm 1.0 +1026 37 optimizer.lr 0.0032342957006244654 +1026 37 negative_sampler.num_negs_per_pos 53.0 +1026 37 training.batch_size 1.0 +1026 38 model.embedding_dim 1.0 +1026 38 model.scoring_fct_norm 1.0 +1026 38 optimizer.lr 0.08418131766809721 +1026 38 negative_sampler.num_negs_per_pos 6.0 +1026 38 training.batch_size 0.0 +1026 39 model.embedding_dim 2.0 +1026 39 model.scoring_fct_norm 2.0 +1026 39 optimizer.lr 0.004057234207313087 +1026 39 negative_sampler.num_negs_per_pos 66.0 +1026 39 training.batch_size 2.0 +1026 40 model.embedding_dim 0.0 +1026 40 model.scoring_fct_norm 2.0 +1026 40 optimizer.lr 0.02204412536277634 +1026 40 negative_sampler.num_negs_per_pos 0.0 +1026 40 training.batch_size 0.0 +1026 41 model.embedding_dim 1.0 +1026 41 model.scoring_fct_norm 2.0 +1026 41 optimizer.lr 0.07353790554853491 +1026 41 negative_sampler.num_negs_per_pos 80.0 +1026 41 training.batch_size 0.0 +1026 42 model.embedding_dim 0.0 +1026 42 model.scoring_fct_norm 2.0 +1026 42 optimizer.lr 0.0013968792651524893 +1026 42 negative_sampler.num_negs_per_pos 99.0 +1026 42 training.batch_size 0.0 +1026 43 model.embedding_dim 1.0 +1026 43 model.scoring_fct_norm 2.0 +1026 43 optimizer.lr 0.0021102951582231974 +1026 43 negative_sampler.num_negs_per_pos 41.0 +1026 43 training.batch_size 1.0 +1026 44 model.embedding_dim 0.0 +1026 44 model.scoring_fct_norm 2.0 +1026 44 optimizer.lr 0.00868173027644105 +1026 44 negative_sampler.num_negs_per_pos 90.0 +1026 44 training.batch_size 0.0 +1026 45 model.embedding_dim 2.0 +1026 45 model.scoring_fct_norm 2.0 +1026 45 optimizer.lr 0.0034516619549812347 +1026 45 negative_sampler.num_negs_per_pos 88.0 +1026 45 training.batch_size 1.0 +1026 46 model.embedding_dim 2.0 +1026 46 model.scoring_fct_norm 2.0 +1026 46 optimizer.lr 0.01206707510712706 +1026 46 negative_sampler.num_negs_per_pos 31.0 +1026 46 training.batch_size 2.0 +1026 47 model.embedding_dim 1.0 +1026 47 model.scoring_fct_norm 2.0 +1026 47 optimizer.lr 0.011200007493009128 +1026 47 negative_sampler.num_negs_per_pos 89.0 +1026 47 training.batch_size 1.0 +1026 48 model.embedding_dim 0.0 +1026 48 model.scoring_fct_norm 2.0 +1026 48 optimizer.lr 0.03804831379007883 +1026 48 negative_sampler.num_negs_per_pos 33.0 +1026 48 training.batch_size 0.0 +1026 49 model.embedding_dim 2.0 +1026 49 model.scoring_fct_norm 2.0 +1026 49 optimizer.lr 0.006186888547840437 +1026 49 negative_sampler.num_negs_per_pos 98.0 +1026 49 training.batch_size 2.0 +1026 50 model.embedding_dim 2.0 +1026 50 model.scoring_fct_norm 1.0 +1026 50 optimizer.lr 0.0020626470058595736 +1026 50 negative_sampler.num_negs_per_pos 99.0 +1026 50 training.batch_size 2.0 +1026 51 model.embedding_dim 2.0 +1026 51 model.scoring_fct_norm 2.0 +1026 51 optimizer.lr 0.04669696164277636 +1026 51 negative_sampler.num_negs_per_pos 51.0 +1026 51 training.batch_size 2.0 +1026 52 model.embedding_dim 0.0 +1026 52 model.scoring_fct_norm 2.0 +1026 52 optimizer.lr 0.0058834574637691124 +1026 52 negative_sampler.num_negs_per_pos 73.0 +1026 52 training.batch_size 0.0 +1026 53 model.embedding_dim 1.0 +1026 53 model.scoring_fct_norm 2.0 +1026 53 optimizer.lr 0.06496515971623802 +1026 53 negative_sampler.num_negs_per_pos 54.0 +1026 53 training.batch_size 0.0 +1026 54 model.embedding_dim 0.0 +1026 54 model.scoring_fct_norm 1.0 +1026 54 optimizer.lr 0.0834739737657781 +1026 54 negative_sampler.num_negs_per_pos 10.0 +1026 54 training.batch_size 1.0 +1026 55 model.embedding_dim 0.0 +1026 55 model.scoring_fct_norm 1.0 +1026 55 optimizer.lr 0.008094606536767369 +1026 55 negative_sampler.num_negs_per_pos 2.0 +1026 55 training.batch_size 1.0 +1026 56 model.embedding_dim 0.0 +1026 56 model.scoring_fct_norm 2.0 +1026 56 optimizer.lr 0.024598515053348226 +1026 56 negative_sampler.num_negs_per_pos 93.0 +1026 56 training.batch_size 2.0 +1026 57 model.embedding_dim 2.0 +1026 57 model.scoring_fct_norm 1.0 +1026 57 optimizer.lr 0.09157543856881495 +1026 57 negative_sampler.num_negs_per_pos 66.0 +1026 57 training.batch_size 2.0 +1026 58 model.embedding_dim 1.0 +1026 58 model.scoring_fct_norm 1.0 +1026 58 optimizer.lr 0.007545953541842099 +1026 58 negative_sampler.num_negs_per_pos 2.0 +1026 58 training.batch_size 2.0 +1026 59 model.embedding_dim 2.0 +1026 59 model.scoring_fct_norm 2.0 +1026 59 optimizer.lr 0.02291608554766107 +1026 59 negative_sampler.num_negs_per_pos 42.0 +1026 59 training.batch_size 0.0 +1026 60 model.embedding_dim 1.0 +1026 60 model.scoring_fct_norm 1.0 +1026 60 optimizer.lr 0.032409742068647336 +1026 60 negative_sampler.num_negs_per_pos 89.0 +1026 60 training.batch_size 1.0 +1026 61 model.embedding_dim 1.0 +1026 61 model.scoring_fct_norm 2.0 +1026 61 optimizer.lr 0.03317582997301508 +1026 61 negative_sampler.num_negs_per_pos 23.0 +1026 61 training.batch_size 1.0 +1026 62 model.embedding_dim 0.0 +1026 62 model.scoring_fct_norm 1.0 +1026 62 optimizer.lr 0.008491459254640139 +1026 62 negative_sampler.num_negs_per_pos 89.0 +1026 62 training.batch_size 0.0 +1026 63 model.embedding_dim 2.0 +1026 63 model.scoring_fct_norm 2.0 +1026 63 optimizer.lr 0.0994500248604494 +1026 63 negative_sampler.num_negs_per_pos 10.0 +1026 63 training.batch_size 1.0 +1026 64 model.embedding_dim 2.0 +1026 64 model.scoring_fct_norm 2.0 +1026 64 optimizer.lr 0.055620549782913364 +1026 64 negative_sampler.num_negs_per_pos 95.0 +1026 64 training.batch_size 0.0 +1026 65 model.embedding_dim 2.0 +1026 65 model.scoring_fct_norm 2.0 +1026 65 optimizer.lr 0.001253613126394405 +1026 65 negative_sampler.num_negs_per_pos 30.0 +1026 65 training.batch_size 2.0 +1026 66 model.embedding_dim 0.0 +1026 66 model.scoring_fct_norm 1.0 +1026 66 optimizer.lr 0.030129429705393493 +1026 66 negative_sampler.num_negs_per_pos 42.0 +1026 66 training.batch_size 0.0 +1026 67 model.embedding_dim 0.0 +1026 67 model.scoring_fct_norm 2.0 +1026 67 optimizer.lr 0.02700333443711123 +1026 67 negative_sampler.num_negs_per_pos 33.0 +1026 67 training.batch_size 0.0 +1026 68 model.embedding_dim 1.0 +1026 68 model.scoring_fct_norm 1.0 +1026 68 optimizer.lr 0.002259567217337989 +1026 68 negative_sampler.num_negs_per_pos 71.0 +1026 68 training.batch_size 0.0 +1026 69 model.embedding_dim 2.0 +1026 69 model.scoring_fct_norm 1.0 +1026 69 optimizer.lr 0.024298897490043714 +1026 69 negative_sampler.num_negs_per_pos 86.0 +1026 69 training.batch_size 1.0 +1026 70 model.embedding_dim 1.0 +1026 70 model.scoring_fct_norm 1.0 +1026 70 optimizer.lr 0.00580561536951029 +1026 70 negative_sampler.num_negs_per_pos 0.0 +1026 70 training.batch_size 2.0 +1026 71 model.embedding_dim 2.0 +1026 71 model.scoring_fct_norm 2.0 +1026 71 optimizer.lr 0.0331834543095784 +1026 71 negative_sampler.num_negs_per_pos 83.0 +1026 71 training.batch_size 1.0 +1026 72 model.embedding_dim 2.0 +1026 72 model.scoring_fct_norm 2.0 +1026 72 optimizer.lr 0.0646147102811264 +1026 72 negative_sampler.num_negs_per_pos 57.0 +1026 72 training.batch_size 0.0 +1026 73 model.embedding_dim 2.0 +1026 73 model.scoring_fct_norm 2.0 +1026 73 optimizer.lr 0.0018523602500049802 +1026 73 negative_sampler.num_negs_per_pos 8.0 +1026 73 training.batch_size 1.0 +1026 74 model.embedding_dim 1.0 +1026 74 model.scoring_fct_norm 2.0 +1026 74 optimizer.lr 0.0016969622339673193 +1026 74 negative_sampler.num_negs_per_pos 47.0 +1026 74 training.batch_size 0.0 +1026 75 model.embedding_dim 2.0 +1026 75 model.scoring_fct_norm 1.0 +1026 75 optimizer.lr 0.06162222683489593 +1026 75 negative_sampler.num_negs_per_pos 14.0 +1026 75 training.batch_size 2.0 +1026 76 model.embedding_dim 2.0 +1026 76 model.scoring_fct_norm 1.0 +1026 76 optimizer.lr 0.09947744171614825 +1026 76 negative_sampler.num_negs_per_pos 21.0 +1026 76 training.batch_size 2.0 +1026 77 model.embedding_dim 1.0 +1026 77 model.scoring_fct_norm 2.0 +1026 77 optimizer.lr 0.003378429219445947 +1026 77 negative_sampler.num_negs_per_pos 9.0 +1026 77 training.batch_size 2.0 +1026 78 model.embedding_dim 1.0 +1026 78 model.scoring_fct_norm 2.0 +1026 78 optimizer.lr 0.002079322739746163 +1026 78 negative_sampler.num_negs_per_pos 16.0 +1026 78 training.batch_size 2.0 +1026 79 model.embedding_dim 0.0 +1026 79 model.scoring_fct_norm 1.0 +1026 79 optimizer.lr 0.02136186884111824 +1026 79 negative_sampler.num_negs_per_pos 25.0 +1026 79 training.batch_size 0.0 +1026 80 model.embedding_dim 1.0 +1026 80 model.scoring_fct_norm 2.0 +1026 80 optimizer.lr 0.09862059857752863 +1026 80 negative_sampler.num_negs_per_pos 57.0 +1026 80 training.batch_size 1.0 +1026 81 model.embedding_dim 2.0 +1026 81 model.scoring_fct_norm 2.0 +1026 81 optimizer.lr 0.010035040882266814 +1026 81 negative_sampler.num_negs_per_pos 76.0 +1026 81 training.batch_size 0.0 +1026 82 model.embedding_dim 2.0 +1026 82 model.scoring_fct_norm 1.0 +1026 82 optimizer.lr 0.07223326299435268 +1026 82 negative_sampler.num_negs_per_pos 7.0 +1026 82 training.batch_size 0.0 +1026 83 model.embedding_dim 0.0 +1026 83 model.scoring_fct_norm 2.0 +1026 83 optimizer.lr 0.00225636933282844 +1026 83 negative_sampler.num_negs_per_pos 26.0 +1026 83 training.batch_size 1.0 +1026 84 model.embedding_dim 0.0 +1026 84 model.scoring_fct_norm 2.0 +1026 84 optimizer.lr 0.0020514119154199854 +1026 84 negative_sampler.num_negs_per_pos 58.0 +1026 84 training.batch_size 1.0 +1026 85 model.embedding_dim 1.0 +1026 85 model.scoring_fct_norm 1.0 +1026 85 optimizer.lr 0.0010126373195645001 +1026 85 negative_sampler.num_negs_per_pos 70.0 +1026 85 training.batch_size 1.0 +1026 86 model.embedding_dim 2.0 +1026 86 model.scoring_fct_norm 2.0 +1026 86 optimizer.lr 0.003642650977121448 +1026 86 negative_sampler.num_negs_per_pos 15.0 +1026 86 training.batch_size 0.0 +1026 87 model.embedding_dim 2.0 +1026 87 model.scoring_fct_norm 2.0 +1026 87 optimizer.lr 0.021200861662087688 +1026 87 negative_sampler.num_negs_per_pos 54.0 +1026 87 training.batch_size 2.0 +1026 88 model.embedding_dim 2.0 +1026 88 model.scoring_fct_norm 2.0 +1026 88 optimizer.lr 0.00311958015875314 +1026 88 negative_sampler.num_negs_per_pos 11.0 +1026 88 training.batch_size 0.0 +1026 89 model.embedding_dim 0.0 +1026 89 model.scoring_fct_norm 2.0 +1026 89 optimizer.lr 0.005714056862324449 +1026 89 negative_sampler.num_negs_per_pos 52.0 +1026 89 training.batch_size 0.0 +1026 90 model.embedding_dim 0.0 +1026 90 model.scoring_fct_norm 1.0 +1026 90 optimizer.lr 0.001336482281562541 +1026 90 negative_sampler.num_negs_per_pos 96.0 +1026 90 training.batch_size 2.0 +1026 91 model.embedding_dim 1.0 +1026 91 model.scoring_fct_norm 2.0 +1026 91 optimizer.lr 0.0025058709768652912 +1026 91 negative_sampler.num_negs_per_pos 0.0 +1026 91 training.batch_size 1.0 +1026 92 model.embedding_dim 2.0 +1026 92 model.scoring_fct_norm 2.0 +1026 92 optimizer.lr 0.0010054525955774103 +1026 92 negative_sampler.num_negs_per_pos 59.0 +1026 92 training.batch_size 2.0 +1026 93 model.embedding_dim 2.0 +1026 93 model.scoring_fct_norm 1.0 +1026 93 optimizer.lr 0.0016749729646829135 +1026 93 negative_sampler.num_negs_per_pos 92.0 +1026 93 training.batch_size 1.0 +1026 94 model.embedding_dim 2.0 +1026 94 model.scoring_fct_norm 2.0 +1026 94 optimizer.lr 0.0014696952173510012 +1026 94 negative_sampler.num_negs_per_pos 90.0 +1026 94 training.batch_size 1.0 +1026 95 model.embedding_dim 0.0 +1026 95 model.scoring_fct_norm 1.0 +1026 95 optimizer.lr 0.009161581035462432 +1026 95 negative_sampler.num_negs_per_pos 18.0 +1026 95 training.batch_size 2.0 +1026 96 model.embedding_dim 0.0 +1026 96 model.scoring_fct_norm 1.0 +1026 96 optimizer.lr 0.009713040511831823 +1026 96 negative_sampler.num_negs_per_pos 9.0 +1026 96 training.batch_size 2.0 +1026 97 model.embedding_dim 2.0 +1026 97 model.scoring_fct_norm 2.0 +1026 97 optimizer.lr 0.0027461274371294864 +1026 97 negative_sampler.num_negs_per_pos 59.0 +1026 97 training.batch_size 1.0 +1026 98 model.embedding_dim 0.0 +1026 98 model.scoring_fct_norm 1.0 +1026 98 optimizer.lr 0.09444873153435078 +1026 98 negative_sampler.num_negs_per_pos 70.0 +1026 98 training.batch_size 0.0 +1026 99 model.embedding_dim 2.0 +1026 99 model.scoring_fct_norm 1.0 +1026 99 optimizer.lr 0.010728871452567684 +1026 99 negative_sampler.num_negs_per_pos 72.0 +1026 99 training.batch_size 2.0 +1026 100 model.embedding_dim 1.0 +1026 100 model.scoring_fct_norm 1.0 +1026 100 optimizer.lr 0.006065059050493249 +1026 100 negative_sampler.num_negs_per_pos 87.0 +1026 100 training.batch_size 2.0 +1026 1 dataset """kinships""" +1026 1 model """unstructuredmodel""" +1026 1 loss """bceaftersigmoid""" +1026 1 regularizer """no""" +1026 1 optimizer """adam""" +1026 1 training_loop """owa""" +1026 1 negative_sampler """basic""" +1026 1 evaluator """rankbased""" +1026 2 dataset """kinships""" +1026 2 model """unstructuredmodel""" +1026 2 loss """bceaftersigmoid""" +1026 2 regularizer """no""" +1026 2 optimizer """adam""" +1026 2 training_loop """owa""" +1026 2 negative_sampler """basic""" +1026 2 evaluator """rankbased""" +1026 3 dataset """kinships""" +1026 3 model """unstructuredmodel""" +1026 3 loss """bceaftersigmoid""" +1026 3 regularizer """no""" +1026 3 optimizer """adam""" +1026 3 training_loop """owa""" +1026 3 negative_sampler """basic""" +1026 3 evaluator """rankbased""" +1026 4 dataset """kinships""" +1026 4 model """unstructuredmodel""" +1026 4 loss """bceaftersigmoid""" +1026 4 regularizer """no""" +1026 4 optimizer """adam""" +1026 4 training_loop """owa""" +1026 4 negative_sampler """basic""" +1026 4 evaluator """rankbased""" +1026 5 dataset """kinships""" +1026 5 model """unstructuredmodel""" +1026 5 loss """bceaftersigmoid""" +1026 5 regularizer """no""" +1026 5 optimizer """adam""" +1026 5 training_loop """owa""" +1026 5 negative_sampler """basic""" +1026 5 evaluator """rankbased""" +1026 6 dataset """kinships""" +1026 6 model """unstructuredmodel""" +1026 6 loss """bceaftersigmoid""" +1026 6 regularizer """no""" +1026 6 optimizer """adam""" +1026 6 training_loop """owa""" +1026 6 negative_sampler """basic""" +1026 6 evaluator """rankbased""" +1026 7 dataset """kinships""" +1026 7 model """unstructuredmodel""" +1026 7 loss """bceaftersigmoid""" +1026 7 regularizer """no""" +1026 7 optimizer """adam""" +1026 7 training_loop """owa""" +1026 7 negative_sampler """basic""" +1026 7 evaluator """rankbased""" +1026 8 dataset """kinships""" +1026 8 model """unstructuredmodel""" +1026 8 loss """bceaftersigmoid""" +1026 8 regularizer """no""" +1026 8 optimizer """adam""" +1026 8 training_loop """owa""" +1026 8 negative_sampler """basic""" +1026 8 evaluator """rankbased""" +1026 9 dataset """kinships""" +1026 9 model """unstructuredmodel""" +1026 9 loss """bceaftersigmoid""" +1026 9 regularizer """no""" +1026 9 optimizer """adam""" +1026 9 training_loop """owa""" +1026 9 negative_sampler """basic""" +1026 9 evaluator """rankbased""" +1026 10 dataset """kinships""" +1026 10 model """unstructuredmodel""" +1026 10 loss """bceaftersigmoid""" +1026 10 regularizer """no""" +1026 10 optimizer """adam""" +1026 10 training_loop """owa""" +1026 10 negative_sampler """basic""" +1026 10 evaluator """rankbased""" +1026 11 dataset """kinships""" +1026 11 model """unstructuredmodel""" +1026 11 loss """bceaftersigmoid""" +1026 11 regularizer """no""" +1026 11 optimizer """adam""" +1026 11 training_loop """owa""" +1026 11 negative_sampler """basic""" +1026 11 evaluator """rankbased""" +1026 12 dataset """kinships""" +1026 12 model """unstructuredmodel""" +1026 12 loss """bceaftersigmoid""" +1026 12 regularizer """no""" +1026 12 optimizer """adam""" +1026 12 training_loop """owa""" +1026 12 negative_sampler """basic""" +1026 12 evaluator """rankbased""" +1026 13 dataset """kinships""" +1026 13 model """unstructuredmodel""" +1026 13 loss """bceaftersigmoid""" +1026 13 regularizer """no""" +1026 13 optimizer """adam""" +1026 13 training_loop """owa""" +1026 13 negative_sampler """basic""" +1026 13 evaluator """rankbased""" +1026 14 dataset """kinships""" +1026 14 model """unstructuredmodel""" +1026 14 loss """bceaftersigmoid""" +1026 14 regularizer """no""" +1026 14 optimizer """adam""" +1026 14 training_loop """owa""" +1026 14 negative_sampler """basic""" +1026 14 evaluator """rankbased""" +1026 15 dataset """kinships""" +1026 15 model """unstructuredmodel""" +1026 15 loss """bceaftersigmoid""" +1026 15 regularizer """no""" +1026 15 optimizer """adam""" +1026 15 training_loop """owa""" +1026 15 negative_sampler """basic""" +1026 15 evaluator """rankbased""" +1026 16 dataset """kinships""" +1026 16 model """unstructuredmodel""" +1026 16 loss """bceaftersigmoid""" +1026 16 regularizer """no""" +1026 16 optimizer """adam""" +1026 16 training_loop """owa""" +1026 16 negative_sampler """basic""" +1026 16 evaluator """rankbased""" +1026 17 dataset """kinships""" +1026 17 model """unstructuredmodel""" +1026 17 loss """bceaftersigmoid""" +1026 17 regularizer """no""" +1026 17 optimizer """adam""" +1026 17 training_loop """owa""" +1026 17 negative_sampler """basic""" +1026 17 evaluator """rankbased""" +1026 18 dataset """kinships""" +1026 18 model """unstructuredmodel""" +1026 18 loss """bceaftersigmoid""" +1026 18 regularizer """no""" +1026 18 optimizer """adam""" +1026 18 training_loop """owa""" +1026 18 negative_sampler """basic""" +1026 18 evaluator """rankbased""" +1026 19 dataset """kinships""" +1026 19 model """unstructuredmodel""" +1026 19 loss """bceaftersigmoid""" +1026 19 regularizer """no""" +1026 19 optimizer """adam""" +1026 19 training_loop """owa""" +1026 19 negative_sampler """basic""" +1026 19 evaluator """rankbased""" +1026 20 dataset """kinships""" +1026 20 model """unstructuredmodel""" +1026 20 loss """bceaftersigmoid""" +1026 20 regularizer """no""" +1026 20 optimizer """adam""" +1026 20 training_loop """owa""" +1026 20 negative_sampler """basic""" +1026 20 evaluator """rankbased""" +1026 21 dataset """kinships""" +1026 21 model """unstructuredmodel""" +1026 21 loss """bceaftersigmoid""" +1026 21 regularizer """no""" +1026 21 optimizer """adam""" +1026 21 training_loop """owa""" +1026 21 negative_sampler """basic""" +1026 21 evaluator """rankbased""" +1026 22 dataset """kinships""" +1026 22 model """unstructuredmodel""" +1026 22 loss """bceaftersigmoid""" +1026 22 regularizer """no""" +1026 22 optimizer """adam""" +1026 22 training_loop """owa""" +1026 22 negative_sampler """basic""" +1026 22 evaluator """rankbased""" +1026 23 dataset """kinships""" +1026 23 model """unstructuredmodel""" +1026 23 loss """bceaftersigmoid""" +1026 23 regularizer """no""" +1026 23 optimizer """adam""" +1026 23 training_loop """owa""" +1026 23 negative_sampler """basic""" +1026 23 evaluator """rankbased""" +1026 24 dataset """kinships""" +1026 24 model """unstructuredmodel""" +1026 24 loss """bceaftersigmoid""" +1026 24 regularizer """no""" +1026 24 optimizer """adam""" +1026 24 training_loop """owa""" +1026 24 negative_sampler """basic""" +1026 24 evaluator """rankbased""" +1026 25 dataset """kinships""" +1026 25 model """unstructuredmodel""" +1026 25 loss """bceaftersigmoid""" +1026 25 regularizer """no""" +1026 25 optimizer """adam""" +1026 25 training_loop """owa""" +1026 25 negative_sampler """basic""" +1026 25 evaluator """rankbased""" +1026 26 dataset """kinships""" +1026 26 model """unstructuredmodel""" +1026 26 loss """bceaftersigmoid""" +1026 26 regularizer """no""" +1026 26 optimizer """adam""" +1026 26 training_loop """owa""" +1026 26 negative_sampler """basic""" +1026 26 evaluator """rankbased""" +1026 27 dataset """kinships""" +1026 27 model """unstructuredmodel""" +1026 27 loss """bceaftersigmoid""" +1026 27 regularizer """no""" +1026 27 optimizer """adam""" +1026 27 training_loop """owa""" +1026 27 negative_sampler """basic""" +1026 27 evaluator """rankbased""" +1026 28 dataset """kinships""" +1026 28 model """unstructuredmodel""" +1026 28 loss """bceaftersigmoid""" +1026 28 regularizer """no""" +1026 28 optimizer """adam""" +1026 28 training_loop """owa""" +1026 28 negative_sampler """basic""" +1026 28 evaluator """rankbased""" +1026 29 dataset """kinships""" +1026 29 model """unstructuredmodel""" +1026 29 loss """bceaftersigmoid""" +1026 29 regularizer """no""" +1026 29 optimizer """adam""" +1026 29 training_loop """owa""" +1026 29 negative_sampler """basic""" +1026 29 evaluator """rankbased""" +1026 30 dataset """kinships""" +1026 30 model """unstructuredmodel""" +1026 30 loss """bceaftersigmoid""" +1026 30 regularizer """no""" +1026 30 optimizer """adam""" +1026 30 training_loop """owa""" +1026 30 negative_sampler """basic""" +1026 30 evaluator """rankbased""" +1026 31 dataset """kinships""" +1026 31 model """unstructuredmodel""" +1026 31 loss """bceaftersigmoid""" +1026 31 regularizer """no""" +1026 31 optimizer """adam""" +1026 31 training_loop """owa""" +1026 31 negative_sampler """basic""" +1026 31 evaluator """rankbased""" +1026 32 dataset """kinships""" +1026 32 model """unstructuredmodel""" +1026 32 loss """bceaftersigmoid""" +1026 32 regularizer """no""" +1026 32 optimizer """adam""" +1026 32 training_loop """owa""" +1026 32 negative_sampler """basic""" +1026 32 evaluator """rankbased""" +1026 33 dataset """kinships""" +1026 33 model """unstructuredmodel""" +1026 33 loss """bceaftersigmoid""" +1026 33 regularizer """no""" +1026 33 optimizer """adam""" +1026 33 training_loop """owa""" +1026 33 negative_sampler """basic""" +1026 33 evaluator """rankbased""" +1026 34 dataset """kinships""" +1026 34 model """unstructuredmodel""" +1026 34 loss """bceaftersigmoid""" +1026 34 regularizer """no""" +1026 34 optimizer """adam""" +1026 34 training_loop """owa""" +1026 34 negative_sampler """basic""" +1026 34 evaluator """rankbased""" +1026 35 dataset """kinships""" +1026 35 model """unstructuredmodel""" +1026 35 loss """bceaftersigmoid""" +1026 35 regularizer """no""" +1026 35 optimizer """adam""" +1026 35 training_loop """owa""" +1026 35 negative_sampler """basic""" +1026 35 evaluator """rankbased""" +1026 36 dataset """kinships""" +1026 36 model """unstructuredmodel""" +1026 36 loss """bceaftersigmoid""" +1026 36 regularizer """no""" +1026 36 optimizer """adam""" +1026 36 training_loop """owa""" +1026 36 negative_sampler """basic""" +1026 36 evaluator """rankbased""" +1026 37 dataset """kinships""" +1026 37 model """unstructuredmodel""" +1026 37 loss """bceaftersigmoid""" +1026 37 regularizer """no""" +1026 37 optimizer """adam""" +1026 37 training_loop """owa""" +1026 37 negative_sampler """basic""" +1026 37 evaluator """rankbased""" +1026 38 dataset """kinships""" +1026 38 model """unstructuredmodel""" +1026 38 loss """bceaftersigmoid""" +1026 38 regularizer """no""" +1026 38 optimizer """adam""" +1026 38 training_loop """owa""" +1026 38 negative_sampler """basic""" +1026 38 evaluator """rankbased""" +1026 39 dataset """kinships""" +1026 39 model """unstructuredmodel""" +1026 39 loss """bceaftersigmoid""" +1026 39 regularizer """no""" +1026 39 optimizer """adam""" +1026 39 training_loop """owa""" +1026 39 negative_sampler """basic""" +1026 39 evaluator """rankbased""" +1026 40 dataset """kinships""" +1026 40 model """unstructuredmodel""" +1026 40 loss """bceaftersigmoid""" +1026 40 regularizer """no""" +1026 40 optimizer """adam""" +1026 40 training_loop """owa""" +1026 40 negative_sampler """basic""" +1026 40 evaluator """rankbased""" +1026 41 dataset """kinships""" +1026 41 model """unstructuredmodel""" +1026 41 loss """bceaftersigmoid""" +1026 41 regularizer """no""" +1026 41 optimizer """adam""" +1026 41 training_loop """owa""" +1026 41 negative_sampler """basic""" +1026 41 evaluator """rankbased""" +1026 42 dataset """kinships""" +1026 42 model """unstructuredmodel""" +1026 42 loss """bceaftersigmoid""" +1026 42 regularizer """no""" +1026 42 optimizer """adam""" +1026 42 training_loop """owa""" +1026 42 negative_sampler """basic""" +1026 42 evaluator """rankbased""" +1026 43 dataset """kinships""" +1026 43 model """unstructuredmodel""" +1026 43 loss """bceaftersigmoid""" +1026 43 regularizer """no""" +1026 43 optimizer """adam""" +1026 43 training_loop """owa""" +1026 43 negative_sampler """basic""" +1026 43 evaluator """rankbased""" +1026 44 dataset """kinships""" +1026 44 model """unstructuredmodel""" +1026 44 loss """bceaftersigmoid""" +1026 44 regularizer """no""" +1026 44 optimizer """adam""" +1026 44 training_loop """owa""" +1026 44 negative_sampler """basic""" +1026 44 evaluator """rankbased""" +1026 45 dataset """kinships""" +1026 45 model """unstructuredmodel""" +1026 45 loss """bceaftersigmoid""" +1026 45 regularizer """no""" +1026 45 optimizer """adam""" +1026 45 training_loop """owa""" +1026 45 negative_sampler """basic""" +1026 45 evaluator """rankbased""" +1026 46 dataset """kinships""" +1026 46 model """unstructuredmodel""" +1026 46 loss """bceaftersigmoid""" +1026 46 regularizer """no""" +1026 46 optimizer """adam""" +1026 46 training_loop """owa""" +1026 46 negative_sampler """basic""" +1026 46 evaluator """rankbased""" +1026 47 dataset """kinships""" +1026 47 model """unstructuredmodel""" +1026 47 loss """bceaftersigmoid""" +1026 47 regularizer """no""" +1026 47 optimizer """adam""" +1026 47 training_loop """owa""" +1026 47 negative_sampler """basic""" +1026 47 evaluator """rankbased""" +1026 48 dataset """kinships""" +1026 48 model """unstructuredmodel""" +1026 48 loss """bceaftersigmoid""" +1026 48 regularizer """no""" +1026 48 optimizer """adam""" +1026 48 training_loop """owa""" +1026 48 negative_sampler """basic""" +1026 48 evaluator """rankbased""" +1026 49 dataset """kinships""" +1026 49 model """unstructuredmodel""" +1026 49 loss """bceaftersigmoid""" +1026 49 regularizer """no""" +1026 49 optimizer """adam""" +1026 49 training_loop """owa""" +1026 49 negative_sampler """basic""" +1026 49 evaluator """rankbased""" +1026 50 dataset """kinships""" +1026 50 model """unstructuredmodel""" +1026 50 loss """bceaftersigmoid""" +1026 50 regularizer """no""" +1026 50 optimizer """adam""" +1026 50 training_loop """owa""" +1026 50 negative_sampler """basic""" +1026 50 evaluator """rankbased""" +1026 51 dataset """kinships""" +1026 51 model """unstructuredmodel""" +1026 51 loss """bceaftersigmoid""" +1026 51 regularizer """no""" +1026 51 optimizer """adam""" +1026 51 training_loop """owa""" +1026 51 negative_sampler """basic""" +1026 51 evaluator """rankbased""" +1026 52 dataset """kinships""" +1026 52 model """unstructuredmodel""" +1026 52 loss """bceaftersigmoid""" +1026 52 regularizer """no""" +1026 52 optimizer """adam""" +1026 52 training_loop """owa""" +1026 52 negative_sampler """basic""" +1026 52 evaluator """rankbased""" +1026 53 dataset """kinships""" +1026 53 model """unstructuredmodel""" +1026 53 loss """bceaftersigmoid""" +1026 53 regularizer """no""" +1026 53 optimizer """adam""" +1026 53 training_loop """owa""" +1026 53 negative_sampler """basic""" +1026 53 evaluator """rankbased""" +1026 54 dataset """kinships""" +1026 54 model """unstructuredmodel""" +1026 54 loss """bceaftersigmoid""" +1026 54 regularizer """no""" +1026 54 optimizer """adam""" +1026 54 training_loop """owa""" +1026 54 negative_sampler """basic""" +1026 54 evaluator """rankbased""" +1026 55 dataset """kinships""" +1026 55 model """unstructuredmodel""" +1026 55 loss """bceaftersigmoid""" +1026 55 regularizer """no""" +1026 55 optimizer """adam""" +1026 55 training_loop """owa""" +1026 55 negative_sampler """basic""" +1026 55 evaluator """rankbased""" +1026 56 dataset """kinships""" +1026 56 model """unstructuredmodel""" +1026 56 loss """bceaftersigmoid""" +1026 56 regularizer """no""" +1026 56 optimizer """adam""" +1026 56 training_loop """owa""" +1026 56 negative_sampler """basic""" +1026 56 evaluator """rankbased""" +1026 57 dataset """kinships""" +1026 57 model """unstructuredmodel""" +1026 57 loss """bceaftersigmoid""" +1026 57 regularizer """no""" +1026 57 optimizer """adam""" +1026 57 training_loop """owa""" +1026 57 negative_sampler """basic""" +1026 57 evaluator """rankbased""" +1026 58 dataset """kinships""" +1026 58 model """unstructuredmodel""" +1026 58 loss """bceaftersigmoid""" +1026 58 regularizer """no""" +1026 58 optimizer """adam""" +1026 58 training_loop """owa""" +1026 58 negative_sampler """basic""" +1026 58 evaluator """rankbased""" +1026 59 dataset """kinships""" +1026 59 model """unstructuredmodel""" +1026 59 loss """bceaftersigmoid""" +1026 59 regularizer """no""" +1026 59 optimizer """adam""" +1026 59 training_loop """owa""" +1026 59 negative_sampler """basic""" +1026 59 evaluator """rankbased""" +1026 60 dataset """kinships""" +1026 60 model """unstructuredmodel""" +1026 60 loss """bceaftersigmoid""" +1026 60 regularizer """no""" +1026 60 optimizer """adam""" +1026 60 training_loop """owa""" +1026 60 negative_sampler """basic""" +1026 60 evaluator """rankbased""" +1026 61 dataset """kinships""" +1026 61 model """unstructuredmodel""" +1026 61 loss """bceaftersigmoid""" +1026 61 regularizer """no""" +1026 61 optimizer """adam""" +1026 61 training_loop """owa""" +1026 61 negative_sampler """basic""" +1026 61 evaluator """rankbased""" +1026 62 dataset """kinships""" +1026 62 model """unstructuredmodel""" +1026 62 loss """bceaftersigmoid""" +1026 62 regularizer """no""" +1026 62 optimizer """adam""" +1026 62 training_loop """owa""" +1026 62 negative_sampler """basic""" +1026 62 evaluator """rankbased""" +1026 63 dataset """kinships""" +1026 63 model """unstructuredmodel""" +1026 63 loss """bceaftersigmoid""" +1026 63 regularizer """no""" +1026 63 optimizer """adam""" +1026 63 training_loop """owa""" +1026 63 negative_sampler """basic""" +1026 63 evaluator """rankbased""" +1026 64 dataset """kinships""" +1026 64 model """unstructuredmodel""" +1026 64 loss """bceaftersigmoid""" +1026 64 regularizer """no""" +1026 64 optimizer """adam""" +1026 64 training_loop """owa""" +1026 64 negative_sampler """basic""" +1026 64 evaluator """rankbased""" +1026 65 dataset """kinships""" +1026 65 model """unstructuredmodel""" +1026 65 loss """bceaftersigmoid""" +1026 65 regularizer """no""" +1026 65 optimizer """adam""" +1026 65 training_loop """owa""" +1026 65 negative_sampler """basic""" +1026 65 evaluator """rankbased""" +1026 66 dataset """kinships""" +1026 66 model """unstructuredmodel""" +1026 66 loss """bceaftersigmoid""" +1026 66 regularizer """no""" +1026 66 optimizer """adam""" +1026 66 training_loop """owa""" +1026 66 negative_sampler """basic""" +1026 66 evaluator """rankbased""" +1026 67 dataset """kinships""" +1026 67 model """unstructuredmodel""" +1026 67 loss """bceaftersigmoid""" +1026 67 regularizer """no""" +1026 67 optimizer """adam""" +1026 67 training_loop """owa""" +1026 67 negative_sampler """basic""" +1026 67 evaluator """rankbased""" +1026 68 dataset """kinships""" +1026 68 model """unstructuredmodel""" +1026 68 loss """bceaftersigmoid""" +1026 68 regularizer """no""" +1026 68 optimizer """adam""" +1026 68 training_loop """owa""" +1026 68 negative_sampler """basic""" +1026 68 evaluator """rankbased""" +1026 69 dataset """kinships""" +1026 69 model """unstructuredmodel""" +1026 69 loss """bceaftersigmoid""" +1026 69 regularizer """no""" +1026 69 optimizer """adam""" +1026 69 training_loop """owa""" +1026 69 negative_sampler """basic""" +1026 69 evaluator """rankbased""" +1026 70 dataset """kinships""" +1026 70 model """unstructuredmodel""" +1026 70 loss """bceaftersigmoid""" +1026 70 regularizer """no""" +1026 70 optimizer """adam""" +1026 70 training_loop """owa""" +1026 70 negative_sampler """basic""" +1026 70 evaluator """rankbased""" +1026 71 dataset """kinships""" +1026 71 model """unstructuredmodel""" +1026 71 loss """bceaftersigmoid""" +1026 71 regularizer """no""" +1026 71 optimizer """adam""" +1026 71 training_loop """owa""" +1026 71 negative_sampler """basic""" +1026 71 evaluator """rankbased""" +1026 72 dataset """kinships""" +1026 72 model """unstructuredmodel""" +1026 72 loss """bceaftersigmoid""" +1026 72 regularizer """no""" +1026 72 optimizer """adam""" +1026 72 training_loop """owa""" +1026 72 negative_sampler """basic""" +1026 72 evaluator """rankbased""" +1026 73 dataset """kinships""" +1026 73 model """unstructuredmodel""" +1026 73 loss """bceaftersigmoid""" +1026 73 regularizer """no""" +1026 73 optimizer """adam""" +1026 73 training_loop """owa""" +1026 73 negative_sampler """basic""" +1026 73 evaluator """rankbased""" +1026 74 dataset """kinships""" +1026 74 model """unstructuredmodel""" +1026 74 loss """bceaftersigmoid""" +1026 74 regularizer """no""" +1026 74 optimizer """adam""" +1026 74 training_loop """owa""" +1026 74 negative_sampler """basic""" +1026 74 evaluator """rankbased""" +1026 75 dataset """kinships""" +1026 75 model """unstructuredmodel""" +1026 75 loss """bceaftersigmoid""" +1026 75 regularizer """no""" +1026 75 optimizer """adam""" +1026 75 training_loop """owa""" +1026 75 negative_sampler """basic""" +1026 75 evaluator """rankbased""" +1026 76 dataset """kinships""" +1026 76 model """unstructuredmodel""" +1026 76 loss """bceaftersigmoid""" +1026 76 regularizer """no""" +1026 76 optimizer """adam""" +1026 76 training_loop """owa""" +1026 76 negative_sampler """basic""" +1026 76 evaluator """rankbased""" +1026 77 dataset """kinships""" +1026 77 model """unstructuredmodel""" +1026 77 loss """bceaftersigmoid""" +1026 77 regularizer """no""" +1026 77 optimizer """adam""" +1026 77 training_loop """owa""" +1026 77 negative_sampler """basic""" +1026 77 evaluator """rankbased""" +1026 78 dataset """kinships""" +1026 78 model """unstructuredmodel""" +1026 78 loss """bceaftersigmoid""" +1026 78 regularizer """no""" +1026 78 optimizer """adam""" +1026 78 training_loop """owa""" +1026 78 negative_sampler """basic""" +1026 78 evaluator """rankbased""" +1026 79 dataset """kinships""" +1026 79 model """unstructuredmodel""" +1026 79 loss """bceaftersigmoid""" +1026 79 regularizer """no""" +1026 79 optimizer """adam""" +1026 79 training_loop """owa""" +1026 79 negative_sampler """basic""" +1026 79 evaluator """rankbased""" +1026 80 dataset """kinships""" +1026 80 model """unstructuredmodel""" +1026 80 loss """bceaftersigmoid""" +1026 80 regularizer """no""" +1026 80 optimizer """adam""" +1026 80 training_loop """owa""" +1026 80 negative_sampler """basic""" +1026 80 evaluator """rankbased""" +1026 81 dataset """kinships""" +1026 81 model """unstructuredmodel""" +1026 81 loss """bceaftersigmoid""" +1026 81 regularizer """no""" +1026 81 optimizer """adam""" +1026 81 training_loop """owa""" +1026 81 negative_sampler """basic""" +1026 81 evaluator """rankbased""" +1026 82 dataset """kinships""" +1026 82 model """unstructuredmodel""" +1026 82 loss """bceaftersigmoid""" +1026 82 regularizer """no""" +1026 82 optimizer """adam""" +1026 82 training_loop """owa""" +1026 82 negative_sampler """basic""" +1026 82 evaluator """rankbased""" +1026 83 dataset """kinships""" +1026 83 model """unstructuredmodel""" +1026 83 loss """bceaftersigmoid""" +1026 83 regularizer """no""" +1026 83 optimizer """adam""" +1026 83 training_loop """owa""" +1026 83 negative_sampler """basic""" +1026 83 evaluator """rankbased""" +1026 84 dataset """kinships""" +1026 84 model """unstructuredmodel""" +1026 84 loss """bceaftersigmoid""" +1026 84 regularizer """no""" +1026 84 optimizer """adam""" +1026 84 training_loop """owa""" +1026 84 negative_sampler """basic""" +1026 84 evaluator """rankbased""" +1026 85 dataset """kinships""" +1026 85 model """unstructuredmodel""" +1026 85 loss """bceaftersigmoid""" +1026 85 regularizer """no""" +1026 85 optimizer """adam""" +1026 85 training_loop """owa""" +1026 85 negative_sampler """basic""" +1026 85 evaluator """rankbased""" +1026 86 dataset """kinships""" +1026 86 model """unstructuredmodel""" +1026 86 loss """bceaftersigmoid""" +1026 86 regularizer """no""" +1026 86 optimizer """adam""" +1026 86 training_loop """owa""" +1026 86 negative_sampler """basic""" +1026 86 evaluator """rankbased""" +1026 87 dataset """kinships""" +1026 87 model """unstructuredmodel""" +1026 87 loss """bceaftersigmoid""" +1026 87 regularizer """no""" +1026 87 optimizer """adam""" +1026 87 training_loop """owa""" +1026 87 negative_sampler """basic""" +1026 87 evaluator """rankbased""" +1026 88 dataset """kinships""" +1026 88 model """unstructuredmodel""" +1026 88 loss """bceaftersigmoid""" +1026 88 regularizer """no""" +1026 88 optimizer """adam""" +1026 88 training_loop """owa""" +1026 88 negative_sampler """basic""" +1026 88 evaluator """rankbased""" +1026 89 dataset """kinships""" +1026 89 model """unstructuredmodel""" +1026 89 loss """bceaftersigmoid""" +1026 89 regularizer """no""" +1026 89 optimizer """adam""" +1026 89 training_loop """owa""" +1026 89 negative_sampler """basic""" +1026 89 evaluator """rankbased""" +1026 90 dataset """kinships""" +1026 90 model """unstructuredmodel""" +1026 90 loss """bceaftersigmoid""" +1026 90 regularizer """no""" +1026 90 optimizer """adam""" +1026 90 training_loop """owa""" +1026 90 negative_sampler """basic""" +1026 90 evaluator """rankbased""" +1026 91 dataset """kinships""" +1026 91 model """unstructuredmodel""" +1026 91 loss """bceaftersigmoid""" +1026 91 regularizer """no""" +1026 91 optimizer """adam""" +1026 91 training_loop """owa""" +1026 91 negative_sampler """basic""" +1026 91 evaluator """rankbased""" +1026 92 dataset """kinships""" +1026 92 model """unstructuredmodel""" +1026 92 loss """bceaftersigmoid""" +1026 92 regularizer """no""" +1026 92 optimizer """adam""" +1026 92 training_loop """owa""" +1026 92 negative_sampler """basic""" +1026 92 evaluator """rankbased""" +1026 93 dataset """kinships""" +1026 93 model """unstructuredmodel""" +1026 93 loss """bceaftersigmoid""" +1026 93 regularizer """no""" +1026 93 optimizer """adam""" +1026 93 training_loop """owa""" +1026 93 negative_sampler """basic""" +1026 93 evaluator """rankbased""" +1026 94 dataset """kinships""" +1026 94 model """unstructuredmodel""" +1026 94 loss """bceaftersigmoid""" +1026 94 regularizer """no""" +1026 94 optimizer """adam""" +1026 94 training_loop """owa""" +1026 94 negative_sampler """basic""" +1026 94 evaluator """rankbased""" +1026 95 dataset """kinships""" +1026 95 model """unstructuredmodel""" +1026 95 loss """bceaftersigmoid""" +1026 95 regularizer """no""" +1026 95 optimizer """adam""" +1026 95 training_loop """owa""" +1026 95 negative_sampler """basic""" +1026 95 evaluator """rankbased""" +1026 96 dataset """kinships""" +1026 96 model """unstructuredmodel""" +1026 96 loss """bceaftersigmoid""" +1026 96 regularizer """no""" +1026 96 optimizer """adam""" +1026 96 training_loop """owa""" +1026 96 negative_sampler """basic""" +1026 96 evaluator """rankbased""" +1026 97 dataset """kinships""" +1026 97 model """unstructuredmodel""" +1026 97 loss """bceaftersigmoid""" +1026 97 regularizer """no""" +1026 97 optimizer """adam""" +1026 97 training_loop """owa""" +1026 97 negative_sampler """basic""" +1026 97 evaluator """rankbased""" +1026 98 dataset """kinships""" +1026 98 model """unstructuredmodel""" +1026 98 loss """bceaftersigmoid""" +1026 98 regularizer """no""" +1026 98 optimizer """adam""" +1026 98 training_loop """owa""" +1026 98 negative_sampler """basic""" +1026 98 evaluator """rankbased""" +1026 99 dataset """kinships""" +1026 99 model """unstructuredmodel""" +1026 99 loss """bceaftersigmoid""" +1026 99 regularizer """no""" +1026 99 optimizer """adam""" +1026 99 training_loop """owa""" +1026 99 negative_sampler """basic""" +1026 99 evaluator """rankbased""" +1026 100 dataset """kinships""" +1026 100 model """unstructuredmodel""" +1026 100 loss """bceaftersigmoid""" +1026 100 regularizer """no""" +1026 100 optimizer """adam""" +1026 100 training_loop """owa""" +1026 100 negative_sampler """basic""" +1026 100 evaluator """rankbased""" +1027 1 model.embedding_dim 1.0 +1027 1 model.scoring_fct_norm 1.0 +1027 1 optimizer.lr 0.003710659410491518 +1027 1 negative_sampler.num_negs_per_pos 31.0 +1027 1 training.batch_size 2.0 +1027 2 model.embedding_dim 0.0 +1027 2 model.scoring_fct_norm 1.0 +1027 2 optimizer.lr 0.014582170385403827 +1027 2 negative_sampler.num_negs_per_pos 71.0 +1027 2 training.batch_size 1.0 +1027 3 model.embedding_dim 0.0 +1027 3 model.scoring_fct_norm 2.0 +1027 3 optimizer.lr 0.004977973150645801 +1027 3 negative_sampler.num_negs_per_pos 37.0 +1027 3 training.batch_size 2.0 +1027 4 model.embedding_dim 2.0 +1027 4 model.scoring_fct_norm 1.0 +1027 4 optimizer.lr 0.023841508043387197 +1027 4 negative_sampler.num_negs_per_pos 97.0 +1027 4 training.batch_size 1.0 +1027 5 model.embedding_dim 2.0 +1027 5 model.scoring_fct_norm 2.0 +1027 5 optimizer.lr 0.005633371737218628 +1027 5 negative_sampler.num_negs_per_pos 75.0 +1027 5 training.batch_size 1.0 +1027 6 model.embedding_dim 1.0 +1027 6 model.scoring_fct_norm 1.0 +1027 6 optimizer.lr 0.03820616509119815 +1027 6 negative_sampler.num_negs_per_pos 44.0 +1027 6 training.batch_size 1.0 +1027 7 model.embedding_dim 2.0 +1027 7 model.scoring_fct_norm 1.0 +1027 7 optimizer.lr 0.02235007476961875 +1027 7 negative_sampler.num_negs_per_pos 35.0 +1027 7 training.batch_size 2.0 +1027 8 model.embedding_dim 0.0 +1027 8 model.scoring_fct_norm 1.0 +1027 8 optimizer.lr 0.018261678875513228 +1027 8 negative_sampler.num_negs_per_pos 76.0 +1027 8 training.batch_size 1.0 +1027 9 model.embedding_dim 2.0 +1027 9 model.scoring_fct_norm 2.0 +1027 9 optimizer.lr 0.03200665457447848 +1027 9 negative_sampler.num_negs_per_pos 30.0 +1027 9 training.batch_size 1.0 +1027 10 model.embedding_dim 0.0 +1027 10 model.scoring_fct_norm 1.0 +1027 10 optimizer.lr 0.058790194406067475 +1027 10 negative_sampler.num_negs_per_pos 4.0 +1027 10 training.batch_size 1.0 +1027 11 model.embedding_dim 2.0 +1027 11 model.scoring_fct_norm 2.0 +1027 11 optimizer.lr 0.02156753080285898 +1027 11 negative_sampler.num_negs_per_pos 21.0 +1027 11 training.batch_size 2.0 +1027 12 model.embedding_dim 1.0 +1027 12 model.scoring_fct_norm 1.0 +1027 12 optimizer.lr 0.004304864352187477 +1027 12 negative_sampler.num_negs_per_pos 93.0 +1027 12 training.batch_size 0.0 +1027 13 model.embedding_dim 2.0 +1027 13 model.scoring_fct_norm 2.0 +1027 13 optimizer.lr 0.017832241660617077 +1027 13 negative_sampler.num_negs_per_pos 57.0 +1027 13 training.batch_size 2.0 +1027 14 model.embedding_dim 2.0 +1027 14 model.scoring_fct_norm 1.0 +1027 14 optimizer.lr 0.004846630838786051 +1027 14 negative_sampler.num_negs_per_pos 91.0 +1027 14 training.batch_size 0.0 +1027 15 model.embedding_dim 0.0 +1027 15 model.scoring_fct_norm 1.0 +1027 15 optimizer.lr 0.09432172131904747 +1027 15 negative_sampler.num_negs_per_pos 93.0 +1027 15 training.batch_size 2.0 +1027 16 model.embedding_dim 2.0 +1027 16 model.scoring_fct_norm 1.0 +1027 16 optimizer.lr 0.042734381310515705 +1027 16 negative_sampler.num_negs_per_pos 27.0 +1027 16 training.batch_size 1.0 +1027 17 model.embedding_dim 2.0 +1027 17 model.scoring_fct_norm 1.0 +1027 17 optimizer.lr 0.09237211593584099 +1027 17 negative_sampler.num_negs_per_pos 56.0 +1027 17 training.batch_size 2.0 +1027 18 model.embedding_dim 2.0 +1027 18 model.scoring_fct_norm 1.0 +1027 18 optimizer.lr 0.003143505398209912 +1027 18 negative_sampler.num_negs_per_pos 50.0 +1027 18 training.batch_size 2.0 +1027 19 model.embedding_dim 1.0 +1027 19 model.scoring_fct_norm 2.0 +1027 19 optimizer.lr 0.008518134277285793 +1027 19 negative_sampler.num_negs_per_pos 92.0 +1027 19 training.batch_size 0.0 +1027 20 model.embedding_dim 1.0 +1027 20 model.scoring_fct_norm 2.0 +1027 20 optimizer.lr 0.0031672463500275858 +1027 20 negative_sampler.num_negs_per_pos 27.0 +1027 20 training.batch_size 0.0 +1027 21 model.embedding_dim 1.0 +1027 21 model.scoring_fct_norm 1.0 +1027 21 optimizer.lr 0.0010533937017020044 +1027 21 negative_sampler.num_negs_per_pos 36.0 +1027 21 training.batch_size 2.0 +1027 22 model.embedding_dim 2.0 +1027 22 model.scoring_fct_norm 2.0 +1027 22 optimizer.lr 0.05083523461078657 +1027 22 negative_sampler.num_negs_per_pos 92.0 +1027 22 training.batch_size 1.0 +1027 23 model.embedding_dim 1.0 +1027 23 model.scoring_fct_norm 1.0 +1027 23 optimizer.lr 0.052531054190331536 +1027 23 negative_sampler.num_negs_per_pos 60.0 +1027 23 training.batch_size 2.0 +1027 24 model.embedding_dim 0.0 +1027 24 model.scoring_fct_norm 2.0 +1027 24 optimizer.lr 0.01552997346011768 +1027 24 negative_sampler.num_negs_per_pos 9.0 +1027 24 training.batch_size 2.0 +1027 25 model.embedding_dim 0.0 +1027 25 model.scoring_fct_norm 2.0 +1027 25 optimizer.lr 0.0034604384502602676 +1027 25 negative_sampler.num_negs_per_pos 46.0 +1027 25 training.batch_size 0.0 +1027 26 model.embedding_dim 2.0 +1027 26 model.scoring_fct_norm 1.0 +1027 26 optimizer.lr 0.07185887636260521 +1027 26 negative_sampler.num_negs_per_pos 12.0 +1027 26 training.batch_size 1.0 +1027 27 model.embedding_dim 0.0 +1027 27 model.scoring_fct_norm 2.0 +1027 27 optimizer.lr 0.0014664044262426073 +1027 27 negative_sampler.num_negs_per_pos 93.0 +1027 27 training.batch_size 2.0 +1027 28 model.embedding_dim 2.0 +1027 28 model.scoring_fct_norm 2.0 +1027 28 optimizer.lr 0.03829243012102214 +1027 28 negative_sampler.num_negs_per_pos 32.0 +1027 28 training.batch_size 0.0 +1027 29 model.embedding_dim 2.0 +1027 29 model.scoring_fct_norm 2.0 +1027 29 optimizer.lr 0.003460993262990385 +1027 29 negative_sampler.num_negs_per_pos 5.0 +1027 29 training.batch_size 2.0 +1027 30 model.embedding_dim 0.0 +1027 30 model.scoring_fct_norm 2.0 +1027 30 optimizer.lr 0.0012908011009828233 +1027 30 negative_sampler.num_negs_per_pos 72.0 +1027 30 training.batch_size 1.0 +1027 31 model.embedding_dim 0.0 +1027 31 model.scoring_fct_norm 1.0 +1027 31 optimizer.lr 0.0025464889051279092 +1027 31 negative_sampler.num_negs_per_pos 19.0 +1027 31 training.batch_size 0.0 +1027 32 model.embedding_dim 2.0 +1027 32 model.scoring_fct_norm 2.0 +1027 32 optimizer.lr 0.005602114539757526 +1027 32 negative_sampler.num_negs_per_pos 22.0 +1027 32 training.batch_size 1.0 +1027 33 model.embedding_dim 0.0 +1027 33 model.scoring_fct_norm 2.0 +1027 33 optimizer.lr 0.004463730634331363 +1027 33 negative_sampler.num_negs_per_pos 42.0 +1027 33 training.batch_size 2.0 +1027 34 model.embedding_dim 2.0 +1027 34 model.scoring_fct_norm 1.0 +1027 34 optimizer.lr 0.029515885908250234 +1027 34 negative_sampler.num_negs_per_pos 20.0 +1027 34 training.batch_size 1.0 +1027 35 model.embedding_dim 0.0 +1027 35 model.scoring_fct_norm 2.0 +1027 35 optimizer.lr 0.008286423566949122 +1027 35 negative_sampler.num_negs_per_pos 29.0 +1027 35 training.batch_size 1.0 +1027 36 model.embedding_dim 2.0 +1027 36 model.scoring_fct_norm 1.0 +1027 36 optimizer.lr 0.02414239605071978 +1027 36 negative_sampler.num_negs_per_pos 70.0 +1027 36 training.batch_size 0.0 +1027 37 model.embedding_dim 0.0 +1027 37 model.scoring_fct_norm 1.0 +1027 37 optimizer.lr 0.014682441375239729 +1027 37 negative_sampler.num_negs_per_pos 64.0 +1027 37 training.batch_size 2.0 +1027 38 model.embedding_dim 0.0 +1027 38 model.scoring_fct_norm 1.0 +1027 38 optimizer.lr 0.0020712727621810694 +1027 38 negative_sampler.num_negs_per_pos 79.0 +1027 38 training.batch_size 2.0 +1027 39 model.embedding_dim 1.0 +1027 39 model.scoring_fct_norm 2.0 +1027 39 optimizer.lr 0.033610095079908454 +1027 39 negative_sampler.num_negs_per_pos 99.0 +1027 39 training.batch_size 1.0 +1027 40 model.embedding_dim 0.0 +1027 40 model.scoring_fct_norm 2.0 +1027 40 optimizer.lr 0.0172395212924892 +1027 40 negative_sampler.num_negs_per_pos 29.0 +1027 40 training.batch_size 0.0 +1027 41 model.embedding_dim 1.0 +1027 41 model.scoring_fct_norm 1.0 +1027 41 optimizer.lr 0.0018899734600366868 +1027 41 negative_sampler.num_negs_per_pos 70.0 +1027 41 training.batch_size 2.0 +1027 42 model.embedding_dim 0.0 +1027 42 model.scoring_fct_norm 2.0 +1027 42 optimizer.lr 0.009507531900939104 +1027 42 negative_sampler.num_negs_per_pos 18.0 +1027 42 training.batch_size 0.0 +1027 43 model.embedding_dim 0.0 +1027 43 model.scoring_fct_norm 2.0 +1027 43 optimizer.lr 0.07028909449895192 +1027 43 negative_sampler.num_negs_per_pos 79.0 +1027 43 training.batch_size 0.0 +1027 44 model.embedding_dim 2.0 +1027 44 model.scoring_fct_norm 1.0 +1027 44 optimizer.lr 0.0016926343210584274 +1027 44 negative_sampler.num_negs_per_pos 57.0 +1027 44 training.batch_size 1.0 +1027 45 model.embedding_dim 2.0 +1027 45 model.scoring_fct_norm 2.0 +1027 45 optimizer.lr 0.011905770566061407 +1027 45 negative_sampler.num_negs_per_pos 48.0 +1027 45 training.batch_size 2.0 +1027 46 model.embedding_dim 0.0 +1027 46 model.scoring_fct_norm 1.0 +1027 46 optimizer.lr 0.0011871932029359484 +1027 46 negative_sampler.num_negs_per_pos 5.0 +1027 46 training.batch_size 1.0 +1027 47 model.embedding_dim 0.0 +1027 47 model.scoring_fct_norm 1.0 +1027 47 optimizer.lr 0.006886854290036622 +1027 47 negative_sampler.num_negs_per_pos 3.0 +1027 47 training.batch_size 1.0 +1027 48 model.embedding_dim 1.0 +1027 48 model.scoring_fct_norm 2.0 +1027 48 optimizer.lr 0.06556563749064856 +1027 48 negative_sampler.num_negs_per_pos 49.0 +1027 48 training.batch_size 2.0 +1027 49 model.embedding_dim 2.0 +1027 49 model.scoring_fct_norm 2.0 +1027 49 optimizer.lr 0.09813102819268185 +1027 49 negative_sampler.num_negs_per_pos 20.0 +1027 49 training.batch_size 2.0 +1027 50 model.embedding_dim 2.0 +1027 50 model.scoring_fct_norm 2.0 +1027 50 optimizer.lr 0.012032150439283984 +1027 50 negative_sampler.num_negs_per_pos 33.0 +1027 50 training.batch_size 0.0 +1027 51 model.embedding_dim 2.0 +1027 51 model.scoring_fct_norm 1.0 +1027 51 optimizer.lr 0.003766685387667031 +1027 51 negative_sampler.num_negs_per_pos 91.0 +1027 51 training.batch_size 1.0 +1027 52 model.embedding_dim 0.0 +1027 52 model.scoring_fct_norm 2.0 +1027 52 optimizer.lr 0.010780131463281233 +1027 52 negative_sampler.num_negs_per_pos 31.0 +1027 52 training.batch_size 2.0 +1027 53 model.embedding_dim 0.0 +1027 53 model.scoring_fct_norm 2.0 +1027 53 optimizer.lr 0.018842939928710997 +1027 53 negative_sampler.num_negs_per_pos 20.0 +1027 53 training.batch_size 0.0 +1027 54 model.embedding_dim 1.0 +1027 54 model.scoring_fct_norm 2.0 +1027 54 optimizer.lr 0.09796863283670225 +1027 54 negative_sampler.num_negs_per_pos 98.0 +1027 54 training.batch_size 2.0 +1027 55 model.embedding_dim 2.0 +1027 55 model.scoring_fct_norm 1.0 +1027 55 optimizer.lr 0.00985767099527127 +1027 55 negative_sampler.num_negs_per_pos 44.0 +1027 55 training.batch_size 2.0 +1027 56 model.embedding_dim 2.0 +1027 56 model.scoring_fct_norm 2.0 +1027 56 optimizer.lr 0.009606114011415645 +1027 56 negative_sampler.num_negs_per_pos 96.0 +1027 56 training.batch_size 0.0 +1027 57 model.embedding_dim 0.0 +1027 57 model.scoring_fct_norm 1.0 +1027 57 optimizer.lr 0.0065438627525060055 +1027 57 negative_sampler.num_negs_per_pos 5.0 +1027 57 training.batch_size 0.0 +1027 58 model.embedding_dim 0.0 +1027 58 model.scoring_fct_norm 1.0 +1027 58 optimizer.lr 0.0584953978023892 +1027 58 negative_sampler.num_negs_per_pos 68.0 +1027 58 training.batch_size 1.0 +1027 59 model.embedding_dim 2.0 +1027 59 model.scoring_fct_norm 2.0 +1027 59 optimizer.lr 0.003921767336060514 +1027 59 negative_sampler.num_negs_per_pos 69.0 +1027 59 training.batch_size 0.0 +1027 60 model.embedding_dim 1.0 +1027 60 model.scoring_fct_norm 1.0 +1027 60 optimizer.lr 0.059535449410469746 +1027 60 negative_sampler.num_negs_per_pos 63.0 +1027 60 training.batch_size 0.0 +1027 61 model.embedding_dim 1.0 +1027 61 model.scoring_fct_norm 1.0 +1027 61 optimizer.lr 0.008028003984021174 +1027 61 negative_sampler.num_negs_per_pos 1.0 +1027 61 training.batch_size 0.0 +1027 62 model.embedding_dim 0.0 +1027 62 model.scoring_fct_norm 1.0 +1027 62 optimizer.lr 0.049578485455247345 +1027 62 negative_sampler.num_negs_per_pos 99.0 +1027 62 training.batch_size 2.0 +1027 63 model.embedding_dim 0.0 +1027 63 model.scoring_fct_norm 2.0 +1027 63 optimizer.lr 0.0017453319652509518 +1027 63 negative_sampler.num_negs_per_pos 87.0 +1027 63 training.batch_size 2.0 +1027 64 model.embedding_dim 2.0 +1027 64 model.scoring_fct_norm 1.0 +1027 64 optimizer.lr 0.05288994113236076 +1027 64 negative_sampler.num_negs_per_pos 67.0 +1027 64 training.batch_size 1.0 +1027 65 model.embedding_dim 2.0 +1027 65 model.scoring_fct_norm 1.0 +1027 65 optimizer.lr 0.032064857099845225 +1027 65 negative_sampler.num_negs_per_pos 94.0 +1027 65 training.batch_size 2.0 +1027 66 model.embedding_dim 1.0 +1027 66 model.scoring_fct_norm 1.0 +1027 66 optimizer.lr 0.01961670902249627 +1027 66 negative_sampler.num_negs_per_pos 97.0 +1027 66 training.batch_size 2.0 +1027 67 model.embedding_dim 2.0 +1027 67 model.scoring_fct_norm 2.0 +1027 67 optimizer.lr 0.0016746586976125496 +1027 67 negative_sampler.num_negs_per_pos 73.0 +1027 67 training.batch_size 1.0 +1027 68 model.embedding_dim 1.0 +1027 68 model.scoring_fct_norm 1.0 +1027 68 optimizer.lr 0.0302226263772614 +1027 68 negative_sampler.num_negs_per_pos 76.0 +1027 68 training.batch_size 2.0 +1027 69 model.embedding_dim 0.0 +1027 69 model.scoring_fct_norm 2.0 +1027 69 optimizer.lr 0.011653357129427777 +1027 69 negative_sampler.num_negs_per_pos 73.0 +1027 69 training.batch_size 1.0 +1027 70 model.embedding_dim 0.0 +1027 70 model.scoring_fct_norm 1.0 +1027 70 optimizer.lr 0.0513527263667469 +1027 70 negative_sampler.num_negs_per_pos 93.0 +1027 70 training.batch_size 0.0 +1027 71 model.embedding_dim 1.0 +1027 71 model.scoring_fct_norm 2.0 +1027 71 optimizer.lr 0.07333739116105428 +1027 71 negative_sampler.num_negs_per_pos 5.0 +1027 71 training.batch_size 2.0 +1027 72 model.embedding_dim 2.0 +1027 72 model.scoring_fct_norm 1.0 +1027 72 optimizer.lr 0.019141948959330045 +1027 72 negative_sampler.num_negs_per_pos 44.0 +1027 72 training.batch_size 2.0 +1027 73 model.embedding_dim 0.0 +1027 73 model.scoring_fct_norm 2.0 +1027 73 optimizer.lr 0.006096056806515003 +1027 73 negative_sampler.num_negs_per_pos 5.0 +1027 73 training.batch_size 2.0 +1027 74 model.embedding_dim 2.0 +1027 74 model.scoring_fct_norm 1.0 +1027 74 optimizer.lr 0.00797888204679499 +1027 74 negative_sampler.num_negs_per_pos 98.0 +1027 74 training.batch_size 2.0 +1027 75 model.embedding_dim 1.0 +1027 75 model.scoring_fct_norm 2.0 +1027 75 optimizer.lr 0.007062139346419892 +1027 75 negative_sampler.num_negs_per_pos 55.0 +1027 75 training.batch_size 2.0 +1027 76 model.embedding_dim 2.0 +1027 76 model.scoring_fct_norm 1.0 +1027 76 optimizer.lr 0.0012211290574110794 +1027 76 negative_sampler.num_negs_per_pos 17.0 +1027 76 training.batch_size 1.0 +1027 77 model.embedding_dim 1.0 +1027 77 model.scoring_fct_norm 1.0 +1027 77 optimizer.lr 0.006920686456906386 +1027 77 negative_sampler.num_negs_per_pos 83.0 +1027 77 training.batch_size 0.0 +1027 78 model.embedding_dim 1.0 +1027 78 model.scoring_fct_norm 2.0 +1027 78 optimizer.lr 0.05823245994636648 +1027 78 negative_sampler.num_negs_per_pos 1.0 +1027 78 training.batch_size 1.0 +1027 79 model.embedding_dim 1.0 +1027 79 model.scoring_fct_norm 2.0 +1027 79 optimizer.lr 0.004589860128423 +1027 79 negative_sampler.num_negs_per_pos 67.0 +1027 79 training.batch_size 0.0 +1027 80 model.embedding_dim 1.0 +1027 80 model.scoring_fct_norm 1.0 +1027 80 optimizer.lr 0.023958262762026818 +1027 80 negative_sampler.num_negs_per_pos 33.0 +1027 80 training.batch_size 1.0 +1027 81 model.embedding_dim 1.0 +1027 81 model.scoring_fct_norm 1.0 +1027 81 optimizer.lr 0.004441921793927158 +1027 81 negative_sampler.num_negs_per_pos 46.0 +1027 81 training.batch_size 0.0 +1027 82 model.embedding_dim 1.0 +1027 82 model.scoring_fct_norm 2.0 +1027 82 optimizer.lr 0.007272246979603585 +1027 82 negative_sampler.num_negs_per_pos 82.0 +1027 82 training.batch_size 2.0 +1027 83 model.embedding_dim 0.0 +1027 83 model.scoring_fct_norm 2.0 +1027 83 optimizer.lr 0.002373030269649964 +1027 83 negative_sampler.num_negs_per_pos 52.0 +1027 83 training.batch_size 1.0 +1027 84 model.embedding_dim 2.0 +1027 84 model.scoring_fct_norm 2.0 +1027 84 optimizer.lr 0.07350212244264029 +1027 84 negative_sampler.num_negs_per_pos 82.0 +1027 84 training.batch_size 0.0 +1027 85 model.embedding_dim 2.0 +1027 85 model.scoring_fct_norm 2.0 +1027 85 optimizer.lr 0.07071200225784434 +1027 85 negative_sampler.num_negs_per_pos 8.0 +1027 85 training.batch_size 0.0 +1027 86 model.embedding_dim 1.0 +1027 86 model.scoring_fct_norm 1.0 +1027 86 optimizer.lr 0.03866691980409349 +1027 86 negative_sampler.num_negs_per_pos 76.0 +1027 86 training.batch_size 2.0 +1027 87 model.embedding_dim 0.0 +1027 87 model.scoring_fct_norm 1.0 +1027 87 optimizer.lr 0.04225486712649652 +1027 87 negative_sampler.num_negs_per_pos 37.0 +1027 87 training.batch_size 2.0 +1027 88 model.embedding_dim 2.0 +1027 88 model.scoring_fct_norm 2.0 +1027 88 optimizer.lr 0.03806949047774803 +1027 88 negative_sampler.num_negs_per_pos 47.0 +1027 88 training.batch_size 1.0 +1027 89 model.embedding_dim 2.0 +1027 89 model.scoring_fct_norm 1.0 +1027 89 optimizer.lr 0.0023111770135123737 +1027 89 negative_sampler.num_negs_per_pos 95.0 +1027 89 training.batch_size 1.0 +1027 90 model.embedding_dim 1.0 +1027 90 model.scoring_fct_norm 1.0 +1027 90 optimizer.lr 0.008304327443107593 +1027 90 negative_sampler.num_negs_per_pos 2.0 +1027 90 training.batch_size 1.0 +1027 91 model.embedding_dim 0.0 +1027 91 model.scoring_fct_norm 1.0 +1027 91 optimizer.lr 0.0016500756686742596 +1027 91 negative_sampler.num_negs_per_pos 37.0 +1027 91 training.batch_size 1.0 +1027 92 model.embedding_dim 1.0 +1027 92 model.scoring_fct_norm 1.0 +1027 92 optimizer.lr 0.002465758210499756 +1027 92 negative_sampler.num_negs_per_pos 77.0 +1027 92 training.batch_size 1.0 +1027 93 model.embedding_dim 2.0 +1027 93 model.scoring_fct_norm 1.0 +1027 93 optimizer.lr 0.0023873982149431555 +1027 93 negative_sampler.num_negs_per_pos 71.0 +1027 93 training.batch_size 1.0 +1027 94 model.embedding_dim 1.0 +1027 94 model.scoring_fct_norm 1.0 +1027 94 optimizer.lr 0.00656180736035348 +1027 94 negative_sampler.num_negs_per_pos 28.0 +1027 94 training.batch_size 1.0 +1027 95 model.embedding_dim 0.0 +1027 95 model.scoring_fct_norm 1.0 +1027 95 optimizer.lr 0.0036054707601961143 +1027 95 negative_sampler.num_negs_per_pos 35.0 +1027 95 training.batch_size 0.0 +1027 96 model.embedding_dim 2.0 +1027 96 model.scoring_fct_norm 2.0 +1027 96 optimizer.lr 0.007047532565365839 +1027 96 negative_sampler.num_negs_per_pos 38.0 +1027 96 training.batch_size 1.0 +1027 97 model.embedding_dim 2.0 +1027 97 model.scoring_fct_norm 2.0 +1027 97 optimizer.lr 0.0020898147366279554 +1027 97 negative_sampler.num_negs_per_pos 14.0 +1027 97 training.batch_size 0.0 +1027 98 model.embedding_dim 2.0 +1027 98 model.scoring_fct_norm 1.0 +1027 98 optimizer.lr 0.002675789804875314 +1027 98 negative_sampler.num_negs_per_pos 76.0 +1027 98 training.batch_size 1.0 +1027 99 model.embedding_dim 0.0 +1027 99 model.scoring_fct_norm 1.0 +1027 99 optimizer.lr 0.0033036153329893308 +1027 99 negative_sampler.num_negs_per_pos 18.0 +1027 99 training.batch_size 2.0 +1027 100 model.embedding_dim 1.0 +1027 100 model.scoring_fct_norm 2.0 +1027 100 optimizer.lr 0.002336727182243166 +1027 100 negative_sampler.num_negs_per_pos 8.0 +1027 100 training.batch_size 2.0 +1027 1 dataset """kinships""" +1027 1 model """unstructuredmodel""" +1027 1 loss """softplus""" +1027 1 regularizer """no""" +1027 1 optimizer """adam""" +1027 1 training_loop """owa""" +1027 1 negative_sampler """basic""" +1027 1 evaluator """rankbased""" +1027 2 dataset """kinships""" +1027 2 model """unstructuredmodel""" +1027 2 loss """softplus""" +1027 2 regularizer """no""" +1027 2 optimizer """adam""" +1027 2 training_loop """owa""" +1027 2 negative_sampler """basic""" +1027 2 evaluator """rankbased""" +1027 3 dataset """kinships""" +1027 3 model """unstructuredmodel""" +1027 3 loss """softplus""" +1027 3 regularizer """no""" +1027 3 optimizer """adam""" +1027 3 training_loop """owa""" +1027 3 negative_sampler """basic""" +1027 3 evaluator """rankbased""" +1027 4 dataset """kinships""" +1027 4 model """unstructuredmodel""" +1027 4 loss """softplus""" +1027 4 regularizer """no""" +1027 4 optimizer """adam""" +1027 4 training_loop """owa""" +1027 4 negative_sampler """basic""" +1027 4 evaluator """rankbased""" +1027 5 dataset """kinships""" +1027 5 model """unstructuredmodel""" +1027 5 loss """softplus""" +1027 5 regularizer """no""" +1027 5 optimizer """adam""" +1027 5 training_loop """owa""" +1027 5 negative_sampler """basic""" +1027 5 evaluator """rankbased""" +1027 6 dataset """kinships""" +1027 6 model """unstructuredmodel""" +1027 6 loss """softplus""" +1027 6 regularizer """no""" +1027 6 optimizer """adam""" +1027 6 training_loop """owa""" +1027 6 negative_sampler """basic""" +1027 6 evaluator """rankbased""" +1027 7 dataset """kinships""" +1027 7 model """unstructuredmodel""" +1027 7 loss """softplus""" +1027 7 regularizer """no""" +1027 7 optimizer """adam""" +1027 7 training_loop """owa""" +1027 7 negative_sampler """basic""" +1027 7 evaluator """rankbased""" +1027 8 dataset """kinships""" +1027 8 model """unstructuredmodel""" +1027 8 loss """softplus""" +1027 8 regularizer """no""" +1027 8 optimizer """adam""" +1027 8 training_loop """owa""" +1027 8 negative_sampler """basic""" +1027 8 evaluator """rankbased""" +1027 9 dataset """kinships""" +1027 9 model """unstructuredmodel""" +1027 9 loss """softplus""" +1027 9 regularizer """no""" +1027 9 optimizer """adam""" +1027 9 training_loop """owa""" +1027 9 negative_sampler """basic""" +1027 9 evaluator """rankbased""" +1027 10 dataset """kinships""" +1027 10 model """unstructuredmodel""" +1027 10 loss """softplus""" +1027 10 regularizer """no""" +1027 10 optimizer """adam""" +1027 10 training_loop """owa""" +1027 10 negative_sampler """basic""" +1027 10 evaluator """rankbased""" +1027 11 dataset """kinships""" +1027 11 model """unstructuredmodel""" +1027 11 loss """softplus""" +1027 11 regularizer """no""" +1027 11 optimizer """adam""" +1027 11 training_loop """owa""" +1027 11 negative_sampler """basic""" +1027 11 evaluator """rankbased""" +1027 12 dataset """kinships""" +1027 12 model """unstructuredmodel""" +1027 12 loss """softplus""" +1027 12 regularizer """no""" +1027 12 optimizer """adam""" +1027 12 training_loop """owa""" +1027 12 negative_sampler """basic""" +1027 12 evaluator """rankbased""" +1027 13 dataset """kinships""" +1027 13 model """unstructuredmodel""" +1027 13 loss """softplus""" +1027 13 regularizer """no""" +1027 13 optimizer """adam""" +1027 13 training_loop """owa""" +1027 13 negative_sampler """basic""" +1027 13 evaluator """rankbased""" +1027 14 dataset """kinships""" +1027 14 model """unstructuredmodel""" +1027 14 loss """softplus""" +1027 14 regularizer """no""" +1027 14 optimizer """adam""" +1027 14 training_loop """owa""" +1027 14 negative_sampler """basic""" +1027 14 evaluator """rankbased""" +1027 15 dataset """kinships""" +1027 15 model """unstructuredmodel""" +1027 15 loss """softplus""" +1027 15 regularizer """no""" +1027 15 optimizer """adam""" +1027 15 training_loop """owa""" +1027 15 negative_sampler """basic""" +1027 15 evaluator """rankbased""" +1027 16 dataset """kinships""" +1027 16 model """unstructuredmodel""" +1027 16 loss """softplus""" +1027 16 regularizer """no""" +1027 16 optimizer """adam""" +1027 16 training_loop """owa""" +1027 16 negative_sampler """basic""" +1027 16 evaluator """rankbased""" +1027 17 dataset """kinships""" +1027 17 model """unstructuredmodel""" +1027 17 loss """softplus""" +1027 17 regularizer """no""" +1027 17 optimizer """adam""" +1027 17 training_loop """owa""" +1027 17 negative_sampler """basic""" +1027 17 evaluator """rankbased""" +1027 18 dataset """kinships""" +1027 18 model """unstructuredmodel""" +1027 18 loss """softplus""" +1027 18 regularizer """no""" +1027 18 optimizer """adam""" +1027 18 training_loop """owa""" +1027 18 negative_sampler """basic""" +1027 18 evaluator """rankbased""" +1027 19 dataset """kinships""" +1027 19 model """unstructuredmodel""" +1027 19 loss """softplus""" +1027 19 regularizer """no""" +1027 19 optimizer """adam""" +1027 19 training_loop """owa""" +1027 19 negative_sampler """basic""" +1027 19 evaluator """rankbased""" +1027 20 dataset """kinships""" +1027 20 model """unstructuredmodel""" +1027 20 loss """softplus""" +1027 20 regularizer """no""" +1027 20 optimizer """adam""" +1027 20 training_loop """owa""" +1027 20 negative_sampler """basic""" +1027 20 evaluator """rankbased""" +1027 21 dataset """kinships""" +1027 21 model """unstructuredmodel""" +1027 21 loss """softplus""" +1027 21 regularizer """no""" +1027 21 optimizer """adam""" +1027 21 training_loop """owa""" +1027 21 negative_sampler """basic""" +1027 21 evaluator """rankbased""" +1027 22 dataset """kinships""" +1027 22 model """unstructuredmodel""" +1027 22 loss """softplus""" +1027 22 regularizer """no""" +1027 22 optimizer """adam""" +1027 22 training_loop """owa""" +1027 22 negative_sampler """basic""" +1027 22 evaluator """rankbased""" +1027 23 dataset """kinships""" +1027 23 model """unstructuredmodel""" +1027 23 loss """softplus""" +1027 23 regularizer """no""" +1027 23 optimizer """adam""" +1027 23 training_loop """owa""" +1027 23 negative_sampler """basic""" +1027 23 evaluator """rankbased""" +1027 24 dataset """kinships""" +1027 24 model """unstructuredmodel""" +1027 24 loss """softplus""" +1027 24 regularizer """no""" +1027 24 optimizer """adam""" +1027 24 training_loop """owa""" +1027 24 negative_sampler """basic""" +1027 24 evaluator """rankbased""" +1027 25 dataset """kinships""" +1027 25 model """unstructuredmodel""" +1027 25 loss """softplus""" +1027 25 regularizer """no""" +1027 25 optimizer """adam""" +1027 25 training_loop """owa""" +1027 25 negative_sampler """basic""" +1027 25 evaluator """rankbased""" +1027 26 dataset """kinships""" +1027 26 model """unstructuredmodel""" +1027 26 loss """softplus""" +1027 26 regularizer """no""" +1027 26 optimizer """adam""" +1027 26 training_loop """owa""" +1027 26 negative_sampler """basic""" +1027 26 evaluator """rankbased""" +1027 27 dataset """kinships""" +1027 27 model """unstructuredmodel""" +1027 27 loss """softplus""" +1027 27 regularizer """no""" +1027 27 optimizer """adam""" +1027 27 training_loop """owa""" +1027 27 negative_sampler """basic""" +1027 27 evaluator """rankbased""" +1027 28 dataset """kinships""" +1027 28 model """unstructuredmodel""" +1027 28 loss """softplus""" +1027 28 regularizer """no""" +1027 28 optimizer """adam""" +1027 28 training_loop """owa""" +1027 28 negative_sampler """basic""" +1027 28 evaluator """rankbased""" +1027 29 dataset """kinships""" +1027 29 model """unstructuredmodel""" +1027 29 loss """softplus""" +1027 29 regularizer """no""" +1027 29 optimizer """adam""" +1027 29 training_loop """owa""" +1027 29 negative_sampler """basic""" +1027 29 evaluator """rankbased""" +1027 30 dataset """kinships""" +1027 30 model """unstructuredmodel""" +1027 30 loss """softplus""" +1027 30 regularizer """no""" +1027 30 optimizer """adam""" +1027 30 training_loop """owa""" +1027 30 negative_sampler """basic""" +1027 30 evaluator """rankbased""" +1027 31 dataset """kinships""" +1027 31 model """unstructuredmodel""" +1027 31 loss """softplus""" +1027 31 regularizer """no""" +1027 31 optimizer """adam""" +1027 31 training_loop """owa""" +1027 31 negative_sampler """basic""" +1027 31 evaluator """rankbased""" +1027 32 dataset """kinships""" +1027 32 model """unstructuredmodel""" +1027 32 loss """softplus""" +1027 32 regularizer """no""" +1027 32 optimizer """adam""" +1027 32 training_loop """owa""" +1027 32 negative_sampler """basic""" +1027 32 evaluator """rankbased""" +1027 33 dataset """kinships""" +1027 33 model """unstructuredmodel""" +1027 33 loss """softplus""" +1027 33 regularizer """no""" +1027 33 optimizer """adam""" +1027 33 training_loop """owa""" +1027 33 negative_sampler """basic""" +1027 33 evaluator """rankbased""" +1027 34 dataset """kinships""" +1027 34 model """unstructuredmodel""" +1027 34 loss """softplus""" +1027 34 regularizer """no""" +1027 34 optimizer """adam""" +1027 34 training_loop """owa""" +1027 34 negative_sampler """basic""" +1027 34 evaluator """rankbased""" +1027 35 dataset """kinships""" +1027 35 model """unstructuredmodel""" +1027 35 loss """softplus""" +1027 35 regularizer """no""" +1027 35 optimizer """adam""" +1027 35 training_loop """owa""" +1027 35 negative_sampler """basic""" +1027 35 evaluator """rankbased""" +1027 36 dataset """kinships""" +1027 36 model """unstructuredmodel""" +1027 36 loss """softplus""" +1027 36 regularizer """no""" +1027 36 optimizer """adam""" +1027 36 training_loop """owa""" +1027 36 negative_sampler """basic""" +1027 36 evaluator """rankbased""" +1027 37 dataset """kinships""" +1027 37 model """unstructuredmodel""" +1027 37 loss """softplus""" +1027 37 regularizer """no""" +1027 37 optimizer """adam""" +1027 37 training_loop """owa""" +1027 37 negative_sampler """basic""" +1027 37 evaluator """rankbased""" +1027 38 dataset """kinships""" +1027 38 model """unstructuredmodel""" +1027 38 loss """softplus""" +1027 38 regularizer """no""" +1027 38 optimizer """adam""" +1027 38 training_loop """owa""" +1027 38 negative_sampler """basic""" +1027 38 evaluator """rankbased""" +1027 39 dataset """kinships""" +1027 39 model """unstructuredmodel""" +1027 39 loss """softplus""" +1027 39 regularizer """no""" +1027 39 optimizer """adam""" +1027 39 training_loop """owa""" +1027 39 negative_sampler """basic""" +1027 39 evaluator """rankbased""" +1027 40 dataset """kinships""" +1027 40 model """unstructuredmodel""" +1027 40 loss """softplus""" +1027 40 regularizer """no""" +1027 40 optimizer """adam""" +1027 40 training_loop """owa""" +1027 40 negative_sampler """basic""" +1027 40 evaluator """rankbased""" +1027 41 dataset """kinships""" +1027 41 model """unstructuredmodel""" +1027 41 loss """softplus""" +1027 41 regularizer """no""" +1027 41 optimizer """adam""" +1027 41 training_loop """owa""" +1027 41 negative_sampler """basic""" +1027 41 evaluator """rankbased""" +1027 42 dataset """kinships""" +1027 42 model """unstructuredmodel""" +1027 42 loss """softplus""" +1027 42 regularizer """no""" +1027 42 optimizer """adam""" +1027 42 training_loop """owa""" +1027 42 negative_sampler """basic""" +1027 42 evaluator """rankbased""" +1027 43 dataset """kinships""" +1027 43 model """unstructuredmodel""" +1027 43 loss """softplus""" +1027 43 regularizer """no""" +1027 43 optimizer """adam""" +1027 43 training_loop """owa""" +1027 43 negative_sampler """basic""" +1027 43 evaluator """rankbased""" +1027 44 dataset """kinships""" +1027 44 model """unstructuredmodel""" +1027 44 loss """softplus""" +1027 44 regularizer """no""" +1027 44 optimizer """adam""" +1027 44 training_loop """owa""" +1027 44 negative_sampler """basic""" +1027 44 evaluator """rankbased""" +1027 45 dataset """kinships""" +1027 45 model """unstructuredmodel""" +1027 45 loss """softplus""" +1027 45 regularizer """no""" +1027 45 optimizer """adam""" +1027 45 training_loop """owa""" +1027 45 negative_sampler """basic""" +1027 45 evaluator """rankbased""" +1027 46 dataset """kinships""" +1027 46 model """unstructuredmodel""" +1027 46 loss """softplus""" +1027 46 regularizer """no""" +1027 46 optimizer """adam""" +1027 46 training_loop """owa""" +1027 46 negative_sampler """basic""" +1027 46 evaluator """rankbased""" +1027 47 dataset """kinships""" +1027 47 model """unstructuredmodel""" +1027 47 loss """softplus""" +1027 47 regularizer """no""" +1027 47 optimizer """adam""" +1027 47 training_loop """owa""" +1027 47 negative_sampler """basic""" +1027 47 evaluator """rankbased""" +1027 48 dataset """kinships""" +1027 48 model """unstructuredmodel""" +1027 48 loss """softplus""" +1027 48 regularizer """no""" +1027 48 optimizer """adam""" +1027 48 training_loop """owa""" +1027 48 negative_sampler """basic""" +1027 48 evaluator """rankbased""" +1027 49 dataset """kinships""" +1027 49 model """unstructuredmodel""" +1027 49 loss """softplus""" +1027 49 regularizer """no""" +1027 49 optimizer """adam""" +1027 49 training_loop """owa""" +1027 49 negative_sampler """basic""" +1027 49 evaluator """rankbased""" +1027 50 dataset """kinships""" +1027 50 model """unstructuredmodel""" +1027 50 loss """softplus""" +1027 50 regularizer """no""" +1027 50 optimizer """adam""" +1027 50 training_loop """owa""" +1027 50 negative_sampler """basic""" +1027 50 evaluator """rankbased""" +1027 51 dataset """kinships""" +1027 51 model """unstructuredmodel""" +1027 51 loss """softplus""" +1027 51 regularizer """no""" +1027 51 optimizer """adam""" +1027 51 training_loop """owa""" +1027 51 negative_sampler """basic""" +1027 51 evaluator """rankbased""" +1027 52 dataset """kinships""" +1027 52 model """unstructuredmodel""" +1027 52 loss """softplus""" +1027 52 regularizer """no""" +1027 52 optimizer """adam""" +1027 52 training_loop """owa""" +1027 52 negative_sampler """basic""" +1027 52 evaluator """rankbased""" +1027 53 dataset """kinships""" +1027 53 model """unstructuredmodel""" +1027 53 loss """softplus""" +1027 53 regularizer """no""" +1027 53 optimizer """adam""" +1027 53 training_loop """owa""" +1027 53 negative_sampler """basic""" +1027 53 evaluator """rankbased""" +1027 54 dataset """kinships""" +1027 54 model """unstructuredmodel""" +1027 54 loss """softplus""" +1027 54 regularizer """no""" +1027 54 optimizer """adam""" +1027 54 training_loop """owa""" +1027 54 negative_sampler """basic""" +1027 54 evaluator """rankbased""" +1027 55 dataset """kinships""" +1027 55 model """unstructuredmodel""" +1027 55 loss """softplus""" +1027 55 regularizer """no""" +1027 55 optimizer """adam""" +1027 55 training_loop """owa""" +1027 55 negative_sampler """basic""" +1027 55 evaluator """rankbased""" +1027 56 dataset """kinships""" +1027 56 model """unstructuredmodel""" +1027 56 loss """softplus""" +1027 56 regularizer """no""" +1027 56 optimizer """adam""" +1027 56 training_loop """owa""" +1027 56 negative_sampler """basic""" +1027 56 evaluator """rankbased""" +1027 57 dataset """kinships""" +1027 57 model """unstructuredmodel""" +1027 57 loss """softplus""" +1027 57 regularizer """no""" +1027 57 optimizer """adam""" +1027 57 training_loop """owa""" +1027 57 negative_sampler """basic""" +1027 57 evaluator """rankbased""" +1027 58 dataset """kinships""" +1027 58 model """unstructuredmodel""" +1027 58 loss """softplus""" +1027 58 regularizer """no""" +1027 58 optimizer """adam""" +1027 58 training_loop """owa""" +1027 58 negative_sampler """basic""" +1027 58 evaluator """rankbased""" +1027 59 dataset """kinships""" +1027 59 model """unstructuredmodel""" +1027 59 loss """softplus""" +1027 59 regularizer """no""" +1027 59 optimizer """adam""" +1027 59 training_loop """owa""" +1027 59 negative_sampler """basic""" +1027 59 evaluator """rankbased""" +1027 60 dataset """kinships""" +1027 60 model """unstructuredmodel""" +1027 60 loss """softplus""" +1027 60 regularizer """no""" +1027 60 optimizer """adam""" +1027 60 training_loop """owa""" +1027 60 negative_sampler """basic""" +1027 60 evaluator """rankbased""" +1027 61 dataset """kinships""" +1027 61 model """unstructuredmodel""" +1027 61 loss """softplus""" +1027 61 regularizer """no""" +1027 61 optimizer """adam""" +1027 61 training_loop """owa""" +1027 61 negative_sampler """basic""" +1027 61 evaluator """rankbased""" +1027 62 dataset """kinships""" +1027 62 model """unstructuredmodel""" +1027 62 loss """softplus""" +1027 62 regularizer """no""" +1027 62 optimizer """adam""" +1027 62 training_loop """owa""" +1027 62 negative_sampler """basic""" +1027 62 evaluator """rankbased""" +1027 63 dataset """kinships""" +1027 63 model """unstructuredmodel""" +1027 63 loss """softplus""" +1027 63 regularizer """no""" +1027 63 optimizer """adam""" +1027 63 training_loop """owa""" +1027 63 negative_sampler """basic""" +1027 63 evaluator """rankbased""" +1027 64 dataset """kinships""" +1027 64 model """unstructuredmodel""" +1027 64 loss """softplus""" +1027 64 regularizer """no""" +1027 64 optimizer """adam""" +1027 64 training_loop """owa""" +1027 64 negative_sampler """basic""" +1027 64 evaluator """rankbased""" +1027 65 dataset """kinships""" +1027 65 model """unstructuredmodel""" +1027 65 loss """softplus""" +1027 65 regularizer """no""" +1027 65 optimizer """adam""" +1027 65 training_loop """owa""" +1027 65 negative_sampler """basic""" +1027 65 evaluator """rankbased""" +1027 66 dataset """kinships""" +1027 66 model """unstructuredmodel""" +1027 66 loss """softplus""" +1027 66 regularizer """no""" +1027 66 optimizer """adam""" +1027 66 training_loop """owa""" +1027 66 negative_sampler """basic""" +1027 66 evaluator """rankbased""" +1027 67 dataset """kinships""" +1027 67 model """unstructuredmodel""" +1027 67 loss """softplus""" +1027 67 regularizer """no""" +1027 67 optimizer """adam""" +1027 67 training_loop """owa""" +1027 67 negative_sampler """basic""" +1027 67 evaluator """rankbased""" +1027 68 dataset """kinships""" +1027 68 model """unstructuredmodel""" +1027 68 loss """softplus""" +1027 68 regularizer """no""" +1027 68 optimizer """adam""" +1027 68 training_loop """owa""" +1027 68 negative_sampler """basic""" +1027 68 evaluator """rankbased""" +1027 69 dataset """kinships""" +1027 69 model """unstructuredmodel""" +1027 69 loss """softplus""" +1027 69 regularizer """no""" +1027 69 optimizer """adam""" +1027 69 training_loop """owa""" +1027 69 negative_sampler """basic""" +1027 69 evaluator """rankbased""" +1027 70 dataset """kinships""" +1027 70 model """unstructuredmodel""" +1027 70 loss """softplus""" +1027 70 regularizer """no""" +1027 70 optimizer """adam""" +1027 70 training_loop """owa""" +1027 70 negative_sampler """basic""" +1027 70 evaluator """rankbased""" +1027 71 dataset """kinships""" +1027 71 model """unstructuredmodel""" +1027 71 loss """softplus""" +1027 71 regularizer """no""" +1027 71 optimizer """adam""" +1027 71 training_loop """owa""" +1027 71 negative_sampler """basic""" +1027 71 evaluator """rankbased""" +1027 72 dataset """kinships""" +1027 72 model """unstructuredmodel""" +1027 72 loss """softplus""" +1027 72 regularizer """no""" +1027 72 optimizer """adam""" +1027 72 training_loop """owa""" +1027 72 negative_sampler """basic""" +1027 72 evaluator """rankbased""" +1027 73 dataset """kinships""" +1027 73 model """unstructuredmodel""" +1027 73 loss """softplus""" +1027 73 regularizer """no""" +1027 73 optimizer """adam""" +1027 73 training_loop """owa""" +1027 73 negative_sampler """basic""" +1027 73 evaluator """rankbased""" +1027 74 dataset """kinships""" +1027 74 model """unstructuredmodel""" +1027 74 loss """softplus""" +1027 74 regularizer """no""" +1027 74 optimizer """adam""" +1027 74 training_loop """owa""" +1027 74 negative_sampler """basic""" +1027 74 evaluator """rankbased""" +1027 75 dataset """kinships""" +1027 75 model """unstructuredmodel""" +1027 75 loss """softplus""" +1027 75 regularizer """no""" +1027 75 optimizer """adam""" +1027 75 training_loop """owa""" +1027 75 negative_sampler """basic""" +1027 75 evaluator """rankbased""" +1027 76 dataset """kinships""" +1027 76 model """unstructuredmodel""" +1027 76 loss """softplus""" +1027 76 regularizer """no""" +1027 76 optimizer """adam""" +1027 76 training_loop """owa""" +1027 76 negative_sampler """basic""" +1027 76 evaluator """rankbased""" +1027 77 dataset """kinships""" +1027 77 model """unstructuredmodel""" +1027 77 loss """softplus""" +1027 77 regularizer """no""" +1027 77 optimizer """adam""" +1027 77 training_loop """owa""" +1027 77 negative_sampler """basic""" +1027 77 evaluator """rankbased""" +1027 78 dataset """kinships""" +1027 78 model """unstructuredmodel""" +1027 78 loss """softplus""" +1027 78 regularizer """no""" +1027 78 optimizer """adam""" +1027 78 training_loop """owa""" +1027 78 negative_sampler """basic""" +1027 78 evaluator """rankbased""" +1027 79 dataset """kinships""" +1027 79 model """unstructuredmodel""" +1027 79 loss """softplus""" +1027 79 regularizer """no""" +1027 79 optimizer """adam""" +1027 79 training_loop """owa""" +1027 79 negative_sampler """basic""" +1027 79 evaluator """rankbased""" +1027 80 dataset """kinships""" +1027 80 model """unstructuredmodel""" +1027 80 loss """softplus""" +1027 80 regularizer """no""" +1027 80 optimizer """adam""" +1027 80 training_loop """owa""" +1027 80 negative_sampler """basic""" +1027 80 evaluator """rankbased""" +1027 81 dataset """kinships""" +1027 81 model """unstructuredmodel""" +1027 81 loss """softplus""" +1027 81 regularizer """no""" +1027 81 optimizer """adam""" +1027 81 training_loop """owa""" +1027 81 negative_sampler """basic""" +1027 81 evaluator """rankbased""" +1027 82 dataset """kinships""" +1027 82 model """unstructuredmodel""" +1027 82 loss """softplus""" +1027 82 regularizer """no""" +1027 82 optimizer """adam""" +1027 82 training_loop """owa""" +1027 82 negative_sampler """basic""" +1027 82 evaluator """rankbased""" +1027 83 dataset """kinships""" +1027 83 model """unstructuredmodel""" +1027 83 loss """softplus""" +1027 83 regularizer """no""" +1027 83 optimizer """adam""" +1027 83 training_loop """owa""" +1027 83 negative_sampler """basic""" +1027 83 evaluator """rankbased""" +1027 84 dataset """kinships""" +1027 84 model """unstructuredmodel""" +1027 84 loss """softplus""" +1027 84 regularizer """no""" +1027 84 optimizer """adam""" +1027 84 training_loop """owa""" +1027 84 negative_sampler """basic""" +1027 84 evaluator """rankbased""" +1027 85 dataset """kinships""" +1027 85 model """unstructuredmodel""" +1027 85 loss """softplus""" +1027 85 regularizer """no""" +1027 85 optimizer """adam""" +1027 85 training_loop """owa""" +1027 85 negative_sampler """basic""" +1027 85 evaluator """rankbased""" +1027 86 dataset """kinships""" +1027 86 model """unstructuredmodel""" +1027 86 loss """softplus""" +1027 86 regularizer """no""" +1027 86 optimizer """adam""" +1027 86 training_loop """owa""" +1027 86 negative_sampler """basic""" +1027 86 evaluator """rankbased""" +1027 87 dataset """kinships""" +1027 87 model """unstructuredmodel""" +1027 87 loss """softplus""" +1027 87 regularizer """no""" +1027 87 optimizer """adam""" +1027 87 training_loop """owa""" +1027 87 negative_sampler """basic""" +1027 87 evaluator """rankbased""" +1027 88 dataset """kinships""" +1027 88 model """unstructuredmodel""" +1027 88 loss """softplus""" +1027 88 regularizer """no""" +1027 88 optimizer """adam""" +1027 88 training_loop """owa""" +1027 88 negative_sampler """basic""" +1027 88 evaluator """rankbased""" +1027 89 dataset """kinships""" +1027 89 model """unstructuredmodel""" +1027 89 loss """softplus""" +1027 89 regularizer """no""" +1027 89 optimizer """adam""" +1027 89 training_loop """owa""" +1027 89 negative_sampler """basic""" +1027 89 evaluator """rankbased""" +1027 90 dataset """kinships""" +1027 90 model """unstructuredmodel""" +1027 90 loss """softplus""" +1027 90 regularizer """no""" +1027 90 optimizer """adam""" +1027 90 training_loop """owa""" +1027 90 negative_sampler """basic""" +1027 90 evaluator """rankbased""" +1027 91 dataset """kinships""" +1027 91 model """unstructuredmodel""" +1027 91 loss """softplus""" +1027 91 regularizer """no""" +1027 91 optimizer """adam""" +1027 91 training_loop """owa""" +1027 91 negative_sampler """basic""" +1027 91 evaluator """rankbased""" +1027 92 dataset """kinships""" +1027 92 model """unstructuredmodel""" +1027 92 loss """softplus""" +1027 92 regularizer """no""" +1027 92 optimizer """adam""" +1027 92 training_loop """owa""" +1027 92 negative_sampler """basic""" +1027 92 evaluator """rankbased""" +1027 93 dataset """kinships""" +1027 93 model """unstructuredmodel""" +1027 93 loss """softplus""" +1027 93 regularizer """no""" +1027 93 optimizer """adam""" +1027 93 training_loop """owa""" +1027 93 negative_sampler """basic""" +1027 93 evaluator """rankbased""" +1027 94 dataset """kinships""" +1027 94 model """unstructuredmodel""" +1027 94 loss """softplus""" +1027 94 regularizer """no""" +1027 94 optimizer """adam""" +1027 94 training_loop """owa""" +1027 94 negative_sampler """basic""" +1027 94 evaluator """rankbased""" +1027 95 dataset """kinships""" +1027 95 model """unstructuredmodel""" +1027 95 loss """softplus""" +1027 95 regularizer """no""" +1027 95 optimizer """adam""" +1027 95 training_loop """owa""" +1027 95 negative_sampler """basic""" +1027 95 evaluator """rankbased""" +1027 96 dataset """kinships""" +1027 96 model """unstructuredmodel""" +1027 96 loss """softplus""" +1027 96 regularizer """no""" +1027 96 optimizer """adam""" +1027 96 training_loop """owa""" +1027 96 negative_sampler """basic""" +1027 96 evaluator """rankbased""" +1027 97 dataset """kinships""" +1027 97 model """unstructuredmodel""" +1027 97 loss """softplus""" +1027 97 regularizer """no""" +1027 97 optimizer """adam""" +1027 97 training_loop """owa""" +1027 97 negative_sampler """basic""" +1027 97 evaluator """rankbased""" +1027 98 dataset """kinships""" +1027 98 model """unstructuredmodel""" +1027 98 loss """softplus""" +1027 98 regularizer """no""" +1027 98 optimizer """adam""" +1027 98 training_loop """owa""" +1027 98 negative_sampler """basic""" +1027 98 evaluator """rankbased""" +1027 99 dataset """kinships""" +1027 99 model """unstructuredmodel""" +1027 99 loss """softplus""" +1027 99 regularizer """no""" +1027 99 optimizer """adam""" +1027 99 training_loop """owa""" +1027 99 negative_sampler """basic""" +1027 99 evaluator """rankbased""" +1027 100 dataset """kinships""" +1027 100 model """unstructuredmodel""" +1027 100 loss """softplus""" +1027 100 regularizer """no""" +1027 100 optimizer """adam""" +1027 100 training_loop """owa""" +1027 100 negative_sampler """basic""" +1027 100 evaluator """rankbased""" +1028 1 model.embedding_dim 0.0 +1028 1 model.scoring_fct_norm 2.0 +1028 1 optimizer.lr 0.003439648900645845 +1028 1 negative_sampler.num_negs_per_pos 73.0 +1028 1 training.batch_size 1.0 +1028 2 model.embedding_dim 0.0 +1028 2 model.scoring_fct_norm 1.0 +1028 2 optimizer.lr 0.04126930602272396 +1028 2 negative_sampler.num_negs_per_pos 56.0 +1028 2 training.batch_size 0.0 +1028 3 model.embedding_dim 2.0 +1028 3 model.scoring_fct_norm 2.0 +1028 3 optimizer.lr 0.0014272368106501463 +1028 3 negative_sampler.num_negs_per_pos 89.0 +1028 3 training.batch_size 1.0 +1028 4 model.embedding_dim 2.0 +1028 4 model.scoring_fct_norm 2.0 +1028 4 optimizer.lr 0.048590535540995954 +1028 4 negative_sampler.num_negs_per_pos 93.0 +1028 4 training.batch_size 1.0 +1028 5 model.embedding_dim 0.0 +1028 5 model.scoring_fct_norm 1.0 +1028 5 optimizer.lr 0.0015812075900500495 +1028 5 negative_sampler.num_negs_per_pos 15.0 +1028 5 training.batch_size 2.0 +1028 6 model.embedding_dim 2.0 +1028 6 model.scoring_fct_norm 2.0 +1028 6 optimizer.lr 0.0018018508074869957 +1028 6 negative_sampler.num_negs_per_pos 8.0 +1028 6 training.batch_size 2.0 +1028 7 model.embedding_dim 2.0 +1028 7 model.scoring_fct_norm 2.0 +1028 7 optimizer.lr 0.008299802378142745 +1028 7 negative_sampler.num_negs_per_pos 49.0 +1028 7 training.batch_size 0.0 +1028 8 model.embedding_dim 1.0 +1028 8 model.scoring_fct_norm 2.0 +1028 8 optimizer.lr 0.013907171816507346 +1028 8 negative_sampler.num_negs_per_pos 82.0 +1028 8 training.batch_size 0.0 +1028 9 model.embedding_dim 2.0 +1028 9 model.scoring_fct_norm 2.0 +1028 9 optimizer.lr 0.06660238192375835 +1028 9 negative_sampler.num_negs_per_pos 8.0 +1028 9 training.batch_size 2.0 +1028 10 model.embedding_dim 2.0 +1028 10 model.scoring_fct_norm 1.0 +1028 10 optimizer.lr 0.015630115392269668 +1028 10 negative_sampler.num_negs_per_pos 92.0 +1028 10 training.batch_size 2.0 +1028 11 model.embedding_dim 2.0 +1028 11 model.scoring_fct_norm 1.0 +1028 11 optimizer.lr 0.06251412628760651 +1028 11 negative_sampler.num_negs_per_pos 52.0 +1028 11 training.batch_size 0.0 +1028 12 model.embedding_dim 0.0 +1028 12 model.scoring_fct_norm 1.0 +1028 12 optimizer.lr 0.008383564975965822 +1028 12 negative_sampler.num_negs_per_pos 43.0 +1028 12 training.batch_size 1.0 +1028 13 model.embedding_dim 1.0 +1028 13 model.scoring_fct_norm 1.0 +1028 13 optimizer.lr 0.003996973205557849 +1028 13 negative_sampler.num_negs_per_pos 17.0 +1028 13 training.batch_size 0.0 +1028 14 model.embedding_dim 1.0 +1028 14 model.scoring_fct_norm 1.0 +1028 14 optimizer.lr 0.017626189557905923 +1028 14 negative_sampler.num_negs_per_pos 22.0 +1028 14 training.batch_size 1.0 +1028 15 model.embedding_dim 0.0 +1028 15 model.scoring_fct_norm 1.0 +1028 15 optimizer.lr 0.00842318471153704 +1028 15 negative_sampler.num_negs_per_pos 9.0 +1028 15 training.batch_size 0.0 +1028 16 model.embedding_dim 2.0 +1028 16 model.scoring_fct_norm 1.0 +1028 16 optimizer.lr 0.0033401402680993506 +1028 16 negative_sampler.num_negs_per_pos 87.0 +1028 16 training.batch_size 2.0 +1028 17 model.embedding_dim 2.0 +1028 17 model.scoring_fct_norm 1.0 +1028 17 optimizer.lr 0.003083101664034092 +1028 17 negative_sampler.num_negs_per_pos 69.0 +1028 17 training.batch_size 2.0 +1028 18 model.embedding_dim 1.0 +1028 18 model.scoring_fct_norm 2.0 +1028 18 optimizer.lr 0.032929063728760526 +1028 18 negative_sampler.num_negs_per_pos 91.0 +1028 18 training.batch_size 2.0 +1028 19 model.embedding_dim 1.0 +1028 19 model.scoring_fct_norm 2.0 +1028 19 optimizer.lr 0.007947043097705485 +1028 19 negative_sampler.num_negs_per_pos 97.0 +1028 19 training.batch_size 1.0 +1028 20 model.embedding_dim 0.0 +1028 20 model.scoring_fct_norm 2.0 +1028 20 optimizer.lr 0.05523645121910382 +1028 20 negative_sampler.num_negs_per_pos 5.0 +1028 20 training.batch_size 1.0 +1028 21 model.embedding_dim 2.0 +1028 21 model.scoring_fct_norm 1.0 +1028 21 optimizer.lr 0.05815409320640058 +1028 21 negative_sampler.num_negs_per_pos 78.0 +1028 21 training.batch_size 2.0 +1028 22 model.embedding_dim 1.0 +1028 22 model.scoring_fct_norm 1.0 +1028 22 optimizer.lr 0.01553492141948181 +1028 22 negative_sampler.num_negs_per_pos 78.0 +1028 22 training.batch_size 0.0 +1028 23 model.embedding_dim 0.0 +1028 23 model.scoring_fct_norm 1.0 +1028 23 optimizer.lr 0.012135517248514908 +1028 23 negative_sampler.num_negs_per_pos 84.0 +1028 23 training.batch_size 0.0 +1028 24 model.embedding_dim 2.0 +1028 24 model.scoring_fct_norm 2.0 +1028 24 optimizer.lr 0.017602433831618497 +1028 24 negative_sampler.num_negs_per_pos 85.0 +1028 24 training.batch_size 1.0 +1028 25 model.embedding_dim 2.0 +1028 25 model.scoring_fct_norm 1.0 +1028 25 optimizer.lr 0.0017001071039294954 +1028 25 negative_sampler.num_negs_per_pos 98.0 +1028 25 training.batch_size 0.0 +1028 26 model.embedding_dim 0.0 +1028 26 model.scoring_fct_norm 1.0 +1028 26 optimizer.lr 0.048238199865748274 +1028 26 negative_sampler.num_negs_per_pos 79.0 +1028 26 training.batch_size 0.0 +1028 27 model.embedding_dim 0.0 +1028 27 model.scoring_fct_norm 2.0 +1028 27 optimizer.lr 0.009128295042832016 +1028 27 negative_sampler.num_negs_per_pos 15.0 +1028 27 training.batch_size 2.0 +1028 28 model.embedding_dim 1.0 +1028 28 model.scoring_fct_norm 2.0 +1028 28 optimizer.lr 0.013661075381166244 +1028 28 negative_sampler.num_negs_per_pos 36.0 +1028 28 training.batch_size 2.0 +1028 29 model.embedding_dim 1.0 +1028 29 model.scoring_fct_norm 1.0 +1028 29 optimizer.lr 0.003291769955714464 +1028 29 negative_sampler.num_negs_per_pos 82.0 +1028 29 training.batch_size 1.0 +1028 30 model.embedding_dim 1.0 +1028 30 model.scoring_fct_norm 1.0 +1028 30 optimizer.lr 0.003973209777158345 +1028 30 negative_sampler.num_negs_per_pos 91.0 +1028 30 training.batch_size 0.0 +1028 31 model.embedding_dim 1.0 +1028 31 model.scoring_fct_norm 1.0 +1028 31 optimizer.lr 0.0015049904131349042 +1028 31 negative_sampler.num_negs_per_pos 55.0 +1028 31 training.batch_size 0.0 +1028 32 model.embedding_dim 2.0 +1028 32 model.scoring_fct_norm 2.0 +1028 32 optimizer.lr 0.04156749957345877 +1028 32 negative_sampler.num_negs_per_pos 34.0 +1028 32 training.batch_size 1.0 +1028 33 model.embedding_dim 0.0 +1028 33 model.scoring_fct_norm 1.0 +1028 33 optimizer.lr 0.0024913766511487838 +1028 33 negative_sampler.num_negs_per_pos 17.0 +1028 33 training.batch_size 2.0 +1028 34 model.embedding_dim 0.0 +1028 34 model.scoring_fct_norm 2.0 +1028 34 optimizer.lr 0.011170851711276703 +1028 34 negative_sampler.num_negs_per_pos 22.0 +1028 34 training.batch_size 2.0 +1028 35 model.embedding_dim 0.0 +1028 35 model.scoring_fct_norm 1.0 +1028 35 optimizer.lr 0.0023542013875426193 +1028 35 negative_sampler.num_negs_per_pos 66.0 +1028 35 training.batch_size 0.0 +1028 36 model.embedding_dim 0.0 +1028 36 model.scoring_fct_norm 2.0 +1028 36 optimizer.lr 0.005078692961437781 +1028 36 negative_sampler.num_negs_per_pos 3.0 +1028 36 training.batch_size 2.0 +1028 37 model.embedding_dim 0.0 +1028 37 model.scoring_fct_norm 1.0 +1028 37 optimizer.lr 0.0021305295074558602 +1028 37 negative_sampler.num_negs_per_pos 37.0 +1028 37 training.batch_size 2.0 +1028 38 model.embedding_dim 2.0 +1028 38 model.scoring_fct_norm 2.0 +1028 38 optimizer.lr 0.026509652329203888 +1028 38 negative_sampler.num_negs_per_pos 58.0 +1028 38 training.batch_size 0.0 +1028 39 model.embedding_dim 2.0 +1028 39 model.scoring_fct_norm 2.0 +1028 39 optimizer.lr 0.006332991144314214 +1028 39 negative_sampler.num_negs_per_pos 87.0 +1028 39 training.batch_size 0.0 +1028 40 model.embedding_dim 2.0 +1028 40 model.scoring_fct_norm 2.0 +1028 40 optimizer.lr 0.003757366361171155 +1028 40 negative_sampler.num_negs_per_pos 17.0 +1028 40 training.batch_size 0.0 +1028 41 model.embedding_dim 2.0 +1028 41 model.scoring_fct_norm 1.0 +1028 41 optimizer.lr 0.01203911263210675 +1028 41 negative_sampler.num_negs_per_pos 70.0 +1028 41 training.batch_size 2.0 +1028 42 model.embedding_dim 2.0 +1028 42 model.scoring_fct_norm 2.0 +1028 42 optimizer.lr 0.013872178422331615 +1028 42 negative_sampler.num_negs_per_pos 13.0 +1028 42 training.batch_size 2.0 +1028 43 model.embedding_dim 1.0 +1028 43 model.scoring_fct_norm 2.0 +1028 43 optimizer.lr 0.0012676480388539814 +1028 43 negative_sampler.num_negs_per_pos 57.0 +1028 43 training.batch_size 1.0 +1028 44 model.embedding_dim 0.0 +1028 44 model.scoring_fct_norm 2.0 +1028 44 optimizer.lr 0.0012772617004290986 +1028 44 negative_sampler.num_negs_per_pos 27.0 +1028 44 training.batch_size 2.0 +1028 45 model.embedding_dim 2.0 +1028 45 model.scoring_fct_norm 2.0 +1028 45 optimizer.lr 0.07341656645619475 +1028 45 negative_sampler.num_negs_per_pos 30.0 +1028 45 training.batch_size 1.0 +1028 46 model.embedding_dim 0.0 +1028 46 model.scoring_fct_norm 1.0 +1028 46 optimizer.lr 0.02580617045963696 +1028 46 negative_sampler.num_negs_per_pos 28.0 +1028 46 training.batch_size 1.0 +1028 47 model.embedding_dim 2.0 +1028 47 model.scoring_fct_norm 1.0 +1028 47 optimizer.lr 0.0029123544939875915 +1028 47 negative_sampler.num_negs_per_pos 16.0 +1028 47 training.batch_size 1.0 +1028 48 model.embedding_dim 2.0 +1028 48 model.scoring_fct_norm 2.0 +1028 48 optimizer.lr 0.011593247404201914 +1028 48 negative_sampler.num_negs_per_pos 47.0 +1028 48 training.batch_size 0.0 +1028 49 model.embedding_dim 1.0 +1028 49 model.scoring_fct_norm 1.0 +1028 49 optimizer.lr 0.0044999425782511334 +1028 49 negative_sampler.num_negs_per_pos 3.0 +1028 49 training.batch_size 0.0 +1028 50 model.embedding_dim 1.0 +1028 50 model.scoring_fct_norm 2.0 +1028 50 optimizer.lr 0.002476428236303714 +1028 50 negative_sampler.num_negs_per_pos 9.0 +1028 50 training.batch_size 1.0 +1028 51 model.embedding_dim 0.0 +1028 51 model.scoring_fct_norm 1.0 +1028 51 optimizer.lr 0.09978685557087193 +1028 51 negative_sampler.num_negs_per_pos 48.0 +1028 51 training.batch_size 2.0 +1028 52 model.embedding_dim 2.0 +1028 52 model.scoring_fct_norm 2.0 +1028 52 optimizer.lr 0.0012619166815865208 +1028 52 negative_sampler.num_negs_per_pos 11.0 +1028 52 training.batch_size 2.0 +1028 53 model.embedding_dim 1.0 +1028 53 model.scoring_fct_norm 1.0 +1028 53 optimizer.lr 0.003938701878483667 +1028 53 negative_sampler.num_negs_per_pos 16.0 +1028 53 training.batch_size 1.0 +1028 54 model.embedding_dim 0.0 +1028 54 model.scoring_fct_norm 2.0 +1028 54 optimizer.lr 0.0021518024886664393 +1028 54 negative_sampler.num_negs_per_pos 78.0 +1028 54 training.batch_size 2.0 +1028 55 model.embedding_dim 1.0 +1028 55 model.scoring_fct_norm 2.0 +1028 55 optimizer.lr 0.015080933016178959 +1028 55 negative_sampler.num_negs_per_pos 58.0 +1028 55 training.batch_size 1.0 +1028 56 model.embedding_dim 2.0 +1028 56 model.scoring_fct_norm 1.0 +1028 56 optimizer.lr 0.04024654960115774 +1028 56 negative_sampler.num_negs_per_pos 23.0 +1028 56 training.batch_size 0.0 +1028 57 model.embedding_dim 1.0 +1028 57 model.scoring_fct_norm 1.0 +1028 57 optimizer.lr 0.001065528668162438 +1028 57 negative_sampler.num_negs_per_pos 4.0 +1028 57 training.batch_size 1.0 +1028 58 model.embedding_dim 0.0 +1028 58 model.scoring_fct_norm 2.0 +1028 58 optimizer.lr 0.015953252042277922 +1028 58 negative_sampler.num_negs_per_pos 39.0 +1028 58 training.batch_size 1.0 +1028 59 model.embedding_dim 2.0 +1028 59 model.scoring_fct_norm 1.0 +1028 59 optimizer.lr 0.0617307955088461 +1028 59 negative_sampler.num_negs_per_pos 15.0 +1028 59 training.batch_size 2.0 +1028 60 model.embedding_dim 1.0 +1028 60 model.scoring_fct_norm 2.0 +1028 60 optimizer.lr 0.08929876056396907 +1028 60 negative_sampler.num_negs_per_pos 23.0 +1028 60 training.batch_size 2.0 +1028 61 model.embedding_dim 0.0 +1028 61 model.scoring_fct_norm 2.0 +1028 61 optimizer.lr 0.015901354208933207 +1028 61 negative_sampler.num_negs_per_pos 95.0 +1028 61 training.batch_size 0.0 +1028 62 model.embedding_dim 1.0 +1028 62 model.scoring_fct_norm 1.0 +1028 62 optimizer.lr 0.010719595606571259 +1028 62 negative_sampler.num_negs_per_pos 81.0 +1028 62 training.batch_size 2.0 +1028 63 model.embedding_dim 0.0 +1028 63 model.scoring_fct_norm 2.0 +1028 63 optimizer.lr 0.003039160710916348 +1028 63 negative_sampler.num_negs_per_pos 62.0 +1028 63 training.batch_size 1.0 +1028 64 model.embedding_dim 2.0 +1028 64 model.scoring_fct_norm 1.0 +1028 64 optimizer.lr 0.0031611402581202858 +1028 64 negative_sampler.num_negs_per_pos 88.0 +1028 64 training.batch_size 2.0 +1028 65 model.embedding_dim 1.0 +1028 65 model.scoring_fct_norm 1.0 +1028 65 optimizer.lr 0.0025940385434230187 +1028 65 negative_sampler.num_negs_per_pos 4.0 +1028 65 training.batch_size 0.0 +1028 66 model.embedding_dim 1.0 +1028 66 model.scoring_fct_norm 1.0 +1028 66 optimizer.lr 0.009095865932951145 +1028 66 negative_sampler.num_negs_per_pos 61.0 +1028 66 training.batch_size 1.0 +1028 67 model.embedding_dim 2.0 +1028 67 model.scoring_fct_norm 2.0 +1028 67 optimizer.lr 0.009045571381554715 +1028 67 negative_sampler.num_negs_per_pos 74.0 +1028 67 training.batch_size 0.0 +1028 68 model.embedding_dim 0.0 +1028 68 model.scoring_fct_norm 2.0 +1028 68 optimizer.lr 0.0053161668400028145 +1028 68 negative_sampler.num_negs_per_pos 66.0 +1028 68 training.batch_size 2.0 +1028 69 model.embedding_dim 2.0 +1028 69 model.scoring_fct_norm 1.0 +1028 69 optimizer.lr 0.011772827403816443 +1028 69 negative_sampler.num_negs_per_pos 52.0 +1028 69 training.batch_size 1.0 +1028 70 model.embedding_dim 1.0 +1028 70 model.scoring_fct_norm 2.0 +1028 70 optimizer.lr 0.07700002626150516 +1028 70 negative_sampler.num_negs_per_pos 18.0 +1028 70 training.batch_size 2.0 +1028 71 model.embedding_dim 0.0 +1028 71 model.scoring_fct_norm 2.0 +1028 71 optimizer.lr 0.056881814245604186 +1028 71 negative_sampler.num_negs_per_pos 70.0 +1028 71 training.batch_size 0.0 +1028 72 model.embedding_dim 0.0 +1028 72 model.scoring_fct_norm 1.0 +1028 72 optimizer.lr 0.008655446658298123 +1028 72 negative_sampler.num_negs_per_pos 10.0 +1028 72 training.batch_size 1.0 +1028 73 model.embedding_dim 2.0 +1028 73 model.scoring_fct_norm 1.0 +1028 73 optimizer.lr 0.008391895392548133 +1028 73 negative_sampler.num_negs_per_pos 12.0 +1028 73 training.batch_size 0.0 +1028 74 model.embedding_dim 2.0 +1028 74 model.scoring_fct_norm 1.0 +1028 74 optimizer.lr 0.004785036831603575 +1028 74 negative_sampler.num_negs_per_pos 39.0 +1028 74 training.batch_size 2.0 +1028 75 model.embedding_dim 0.0 +1028 75 model.scoring_fct_norm 2.0 +1028 75 optimizer.lr 0.004669961844350978 +1028 75 negative_sampler.num_negs_per_pos 56.0 +1028 75 training.batch_size 1.0 +1028 76 model.embedding_dim 0.0 +1028 76 model.scoring_fct_norm 1.0 +1028 76 optimizer.lr 0.007348188199134156 +1028 76 negative_sampler.num_negs_per_pos 86.0 +1028 76 training.batch_size 1.0 +1028 77 model.embedding_dim 1.0 +1028 77 model.scoring_fct_norm 2.0 +1028 77 optimizer.lr 0.010752441578423713 +1028 77 negative_sampler.num_negs_per_pos 66.0 +1028 77 training.batch_size 0.0 +1028 78 model.embedding_dim 1.0 +1028 78 model.scoring_fct_norm 1.0 +1028 78 optimizer.lr 0.02445299516898956 +1028 78 negative_sampler.num_negs_per_pos 42.0 +1028 78 training.batch_size 2.0 +1028 79 model.embedding_dim 1.0 +1028 79 model.scoring_fct_norm 1.0 +1028 79 optimizer.lr 0.05291758758536946 +1028 79 negative_sampler.num_negs_per_pos 72.0 +1028 79 training.batch_size 2.0 +1028 80 model.embedding_dim 0.0 +1028 80 model.scoring_fct_norm 2.0 +1028 80 optimizer.lr 0.06307405162195472 +1028 80 negative_sampler.num_negs_per_pos 64.0 +1028 80 training.batch_size 1.0 +1028 81 model.embedding_dim 1.0 +1028 81 model.scoring_fct_norm 2.0 +1028 81 optimizer.lr 0.029495544572312973 +1028 81 negative_sampler.num_negs_per_pos 4.0 +1028 81 training.batch_size 2.0 +1028 82 model.embedding_dim 0.0 +1028 82 model.scoring_fct_norm 1.0 +1028 82 optimizer.lr 0.0012115716839973705 +1028 82 negative_sampler.num_negs_per_pos 96.0 +1028 82 training.batch_size 2.0 +1028 83 model.embedding_dim 0.0 +1028 83 model.scoring_fct_norm 2.0 +1028 83 optimizer.lr 0.02723981766648741 +1028 83 negative_sampler.num_negs_per_pos 60.0 +1028 83 training.batch_size 0.0 +1028 84 model.embedding_dim 2.0 +1028 84 model.scoring_fct_norm 1.0 +1028 84 optimizer.lr 0.020079426652071384 +1028 84 negative_sampler.num_negs_per_pos 55.0 +1028 84 training.batch_size 2.0 +1028 85 model.embedding_dim 0.0 +1028 85 model.scoring_fct_norm 1.0 +1028 85 optimizer.lr 0.027162570804678543 +1028 85 negative_sampler.num_negs_per_pos 60.0 +1028 85 training.batch_size 0.0 +1028 86 model.embedding_dim 2.0 +1028 86 model.scoring_fct_norm 1.0 +1028 86 optimizer.lr 0.02425264749047227 +1028 86 negative_sampler.num_negs_per_pos 61.0 +1028 86 training.batch_size 2.0 +1028 87 model.embedding_dim 1.0 +1028 87 model.scoring_fct_norm 1.0 +1028 87 optimizer.lr 0.035533448993757176 +1028 87 negative_sampler.num_negs_per_pos 43.0 +1028 87 training.batch_size 1.0 +1028 88 model.embedding_dim 1.0 +1028 88 model.scoring_fct_norm 2.0 +1028 88 optimizer.lr 0.0015046818201668543 +1028 88 negative_sampler.num_negs_per_pos 9.0 +1028 88 training.batch_size 0.0 +1028 89 model.embedding_dim 1.0 +1028 89 model.scoring_fct_norm 2.0 +1028 89 optimizer.lr 0.03863678846062261 +1028 89 negative_sampler.num_negs_per_pos 68.0 +1028 89 training.batch_size 1.0 +1028 90 model.embedding_dim 1.0 +1028 90 model.scoring_fct_norm 2.0 +1028 90 optimizer.lr 0.05736329051784153 +1028 90 negative_sampler.num_negs_per_pos 55.0 +1028 90 training.batch_size 2.0 +1028 91 model.embedding_dim 2.0 +1028 91 model.scoring_fct_norm 2.0 +1028 91 optimizer.lr 0.001973978318369545 +1028 91 negative_sampler.num_negs_per_pos 28.0 +1028 91 training.batch_size 2.0 +1028 92 model.embedding_dim 1.0 +1028 92 model.scoring_fct_norm 1.0 +1028 92 optimizer.lr 0.010477168273251172 +1028 92 negative_sampler.num_negs_per_pos 90.0 +1028 92 training.batch_size 0.0 +1028 93 model.embedding_dim 0.0 +1028 93 model.scoring_fct_norm 1.0 +1028 93 optimizer.lr 0.0041331229024735355 +1028 93 negative_sampler.num_negs_per_pos 97.0 +1028 93 training.batch_size 1.0 +1028 94 model.embedding_dim 2.0 +1028 94 model.scoring_fct_norm 2.0 +1028 94 optimizer.lr 0.009126927157531065 +1028 94 negative_sampler.num_negs_per_pos 35.0 +1028 94 training.batch_size 1.0 +1028 95 model.embedding_dim 2.0 +1028 95 model.scoring_fct_norm 1.0 +1028 95 optimizer.lr 0.0018355830335642013 +1028 95 negative_sampler.num_negs_per_pos 20.0 +1028 95 training.batch_size 0.0 +1028 96 model.embedding_dim 1.0 +1028 96 model.scoring_fct_norm 2.0 +1028 96 optimizer.lr 0.05184828754424906 +1028 96 negative_sampler.num_negs_per_pos 13.0 +1028 96 training.batch_size 1.0 +1028 97 model.embedding_dim 1.0 +1028 97 model.scoring_fct_norm 1.0 +1028 97 optimizer.lr 0.003634942118678352 +1028 97 negative_sampler.num_negs_per_pos 18.0 +1028 97 training.batch_size 1.0 +1028 98 model.embedding_dim 0.0 +1028 98 model.scoring_fct_norm 2.0 +1028 98 optimizer.lr 0.0010652793381879092 +1028 98 negative_sampler.num_negs_per_pos 73.0 +1028 98 training.batch_size 2.0 +1028 99 model.embedding_dim 1.0 +1028 99 model.scoring_fct_norm 2.0 +1028 99 optimizer.lr 0.0016897365189757411 +1028 99 negative_sampler.num_negs_per_pos 69.0 +1028 99 training.batch_size 0.0 +1028 100 model.embedding_dim 2.0 +1028 100 model.scoring_fct_norm 2.0 +1028 100 optimizer.lr 0.003956893041577883 +1028 100 negative_sampler.num_negs_per_pos 16.0 +1028 100 training.batch_size 1.0 +1028 1 dataset """kinships""" +1028 1 model """unstructuredmodel""" +1028 1 loss """bceaftersigmoid""" +1028 1 regularizer """no""" +1028 1 optimizer """adam""" +1028 1 training_loop """owa""" +1028 1 negative_sampler """basic""" +1028 1 evaluator """rankbased""" +1028 2 dataset """kinships""" +1028 2 model """unstructuredmodel""" +1028 2 loss """bceaftersigmoid""" +1028 2 regularizer """no""" +1028 2 optimizer """adam""" +1028 2 training_loop """owa""" +1028 2 negative_sampler """basic""" +1028 2 evaluator """rankbased""" +1028 3 dataset """kinships""" +1028 3 model """unstructuredmodel""" +1028 3 loss """bceaftersigmoid""" +1028 3 regularizer """no""" +1028 3 optimizer """adam""" +1028 3 training_loop """owa""" +1028 3 negative_sampler """basic""" +1028 3 evaluator """rankbased""" +1028 4 dataset """kinships""" +1028 4 model """unstructuredmodel""" +1028 4 loss """bceaftersigmoid""" +1028 4 regularizer """no""" +1028 4 optimizer """adam""" +1028 4 training_loop """owa""" +1028 4 negative_sampler """basic""" +1028 4 evaluator """rankbased""" +1028 5 dataset """kinships""" +1028 5 model """unstructuredmodel""" +1028 5 loss """bceaftersigmoid""" +1028 5 regularizer """no""" +1028 5 optimizer """adam""" +1028 5 training_loop """owa""" +1028 5 negative_sampler """basic""" +1028 5 evaluator """rankbased""" +1028 6 dataset """kinships""" +1028 6 model """unstructuredmodel""" +1028 6 loss """bceaftersigmoid""" +1028 6 regularizer """no""" +1028 6 optimizer """adam""" +1028 6 training_loop """owa""" +1028 6 negative_sampler """basic""" +1028 6 evaluator """rankbased""" +1028 7 dataset """kinships""" +1028 7 model """unstructuredmodel""" +1028 7 loss """bceaftersigmoid""" +1028 7 regularizer """no""" +1028 7 optimizer """adam""" +1028 7 training_loop """owa""" +1028 7 negative_sampler """basic""" +1028 7 evaluator """rankbased""" +1028 8 dataset """kinships""" +1028 8 model """unstructuredmodel""" +1028 8 loss """bceaftersigmoid""" +1028 8 regularizer """no""" +1028 8 optimizer """adam""" +1028 8 training_loop """owa""" +1028 8 negative_sampler """basic""" +1028 8 evaluator """rankbased""" +1028 9 dataset """kinships""" +1028 9 model """unstructuredmodel""" +1028 9 loss """bceaftersigmoid""" +1028 9 regularizer """no""" +1028 9 optimizer """adam""" +1028 9 training_loop """owa""" +1028 9 negative_sampler """basic""" +1028 9 evaluator """rankbased""" +1028 10 dataset """kinships""" +1028 10 model """unstructuredmodel""" +1028 10 loss """bceaftersigmoid""" +1028 10 regularizer """no""" +1028 10 optimizer """adam""" +1028 10 training_loop """owa""" +1028 10 negative_sampler """basic""" +1028 10 evaluator """rankbased""" +1028 11 dataset """kinships""" +1028 11 model """unstructuredmodel""" +1028 11 loss """bceaftersigmoid""" +1028 11 regularizer """no""" +1028 11 optimizer """adam""" +1028 11 training_loop """owa""" +1028 11 negative_sampler """basic""" +1028 11 evaluator """rankbased""" +1028 12 dataset """kinships""" +1028 12 model """unstructuredmodel""" +1028 12 loss """bceaftersigmoid""" +1028 12 regularizer """no""" +1028 12 optimizer """adam""" +1028 12 training_loop """owa""" +1028 12 negative_sampler """basic""" +1028 12 evaluator """rankbased""" +1028 13 dataset """kinships""" +1028 13 model """unstructuredmodel""" +1028 13 loss """bceaftersigmoid""" +1028 13 regularizer """no""" +1028 13 optimizer """adam""" +1028 13 training_loop """owa""" +1028 13 negative_sampler """basic""" +1028 13 evaluator """rankbased""" +1028 14 dataset """kinships""" +1028 14 model """unstructuredmodel""" +1028 14 loss """bceaftersigmoid""" +1028 14 regularizer """no""" +1028 14 optimizer """adam""" +1028 14 training_loop """owa""" +1028 14 negative_sampler """basic""" +1028 14 evaluator """rankbased""" +1028 15 dataset """kinships""" +1028 15 model """unstructuredmodel""" +1028 15 loss """bceaftersigmoid""" +1028 15 regularizer """no""" +1028 15 optimizer """adam""" +1028 15 training_loop """owa""" +1028 15 negative_sampler """basic""" +1028 15 evaluator """rankbased""" +1028 16 dataset """kinships""" +1028 16 model """unstructuredmodel""" +1028 16 loss """bceaftersigmoid""" +1028 16 regularizer """no""" +1028 16 optimizer """adam""" +1028 16 training_loop """owa""" +1028 16 negative_sampler """basic""" +1028 16 evaluator """rankbased""" +1028 17 dataset """kinships""" +1028 17 model """unstructuredmodel""" +1028 17 loss """bceaftersigmoid""" +1028 17 regularizer """no""" +1028 17 optimizer """adam""" +1028 17 training_loop """owa""" +1028 17 negative_sampler """basic""" +1028 17 evaluator """rankbased""" +1028 18 dataset """kinships""" +1028 18 model """unstructuredmodel""" +1028 18 loss """bceaftersigmoid""" +1028 18 regularizer """no""" +1028 18 optimizer """adam""" +1028 18 training_loop """owa""" +1028 18 negative_sampler """basic""" +1028 18 evaluator """rankbased""" +1028 19 dataset """kinships""" +1028 19 model """unstructuredmodel""" +1028 19 loss """bceaftersigmoid""" +1028 19 regularizer """no""" +1028 19 optimizer """adam""" +1028 19 training_loop """owa""" +1028 19 negative_sampler """basic""" +1028 19 evaluator """rankbased""" +1028 20 dataset """kinships""" +1028 20 model """unstructuredmodel""" +1028 20 loss """bceaftersigmoid""" +1028 20 regularizer """no""" +1028 20 optimizer """adam""" +1028 20 training_loop """owa""" +1028 20 negative_sampler """basic""" +1028 20 evaluator """rankbased""" +1028 21 dataset """kinships""" +1028 21 model """unstructuredmodel""" +1028 21 loss """bceaftersigmoid""" +1028 21 regularizer """no""" +1028 21 optimizer """adam""" +1028 21 training_loop """owa""" +1028 21 negative_sampler """basic""" +1028 21 evaluator """rankbased""" +1028 22 dataset """kinships""" +1028 22 model """unstructuredmodel""" +1028 22 loss """bceaftersigmoid""" +1028 22 regularizer """no""" +1028 22 optimizer """adam""" +1028 22 training_loop """owa""" +1028 22 negative_sampler """basic""" +1028 22 evaluator """rankbased""" +1028 23 dataset """kinships""" +1028 23 model """unstructuredmodel""" +1028 23 loss """bceaftersigmoid""" +1028 23 regularizer """no""" +1028 23 optimizer """adam""" +1028 23 training_loop """owa""" +1028 23 negative_sampler """basic""" +1028 23 evaluator """rankbased""" +1028 24 dataset """kinships""" +1028 24 model """unstructuredmodel""" +1028 24 loss """bceaftersigmoid""" +1028 24 regularizer """no""" +1028 24 optimizer """adam""" +1028 24 training_loop """owa""" +1028 24 negative_sampler """basic""" +1028 24 evaluator """rankbased""" +1028 25 dataset """kinships""" +1028 25 model """unstructuredmodel""" +1028 25 loss """bceaftersigmoid""" +1028 25 regularizer """no""" +1028 25 optimizer """adam""" +1028 25 training_loop """owa""" +1028 25 negative_sampler """basic""" +1028 25 evaluator """rankbased""" +1028 26 dataset """kinships""" +1028 26 model """unstructuredmodel""" +1028 26 loss """bceaftersigmoid""" +1028 26 regularizer """no""" +1028 26 optimizer """adam""" +1028 26 training_loop """owa""" +1028 26 negative_sampler """basic""" +1028 26 evaluator """rankbased""" +1028 27 dataset """kinships""" +1028 27 model """unstructuredmodel""" +1028 27 loss """bceaftersigmoid""" +1028 27 regularizer """no""" +1028 27 optimizer """adam""" +1028 27 training_loop """owa""" +1028 27 negative_sampler """basic""" +1028 27 evaluator """rankbased""" +1028 28 dataset """kinships""" +1028 28 model """unstructuredmodel""" +1028 28 loss """bceaftersigmoid""" +1028 28 regularizer """no""" +1028 28 optimizer """adam""" +1028 28 training_loop """owa""" +1028 28 negative_sampler """basic""" +1028 28 evaluator """rankbased""" +1028 29 dataset """kinships""" +1028 29 model """unstructuredmodel""" +1028 29 loss """bceaftersigmoid""" +1028 29 regularizer """no""" +1028 29 optimizer """adam""" +1028 29 training_loop """owa""" +1028 29 negative_sampler """basic""" +1028 29 evaluator """rankbased""" +1028 30 dataset """kinships""" +1028 30 model """unstructuredmodel""" +1028 30 loss """bceaftersigmoid""" +1028 30 regularizer """no""" +1028 30 optimizer """adam""" +1028 30 training_loop """owa""" +1028 30 negative_sampler """basic""" +1028 30 evaluator """rankbased""" +1028 31 dataset """kinships""" +1028 31 model """unstructuredmodel""" +1028 31 loss """bceaftersigmoid""" +1028 31 regularizer """no""" +1028 31 optimizer """adam""" +1028 31 training_loop """owa""" +1028 31 negative_sampler """basic""" +1028 31 evaluator """rankbased""" +1028 32 dataset """kinships""" +1028 32 model """unstructuredmodel""" +1028 32 loss """bceaftersigmoid""" +1028 32 regularizer """no""" +1028 32 optimizer """adam""" +1028 32 training_loop """owa""" +1028 32 negative_sampler """basic""" +1028 32 evaluator """rankbased""" +1028 33 dataset """kinships""" +1028 33 model """unstructuredmodel""" +1028 33 loss """bceaftersigmoid""" +1028 33 regularizer """no""" +1028 33 optimizer """adam""" +1028 33 training_loop """owa""" +1028 33 negative_sampler """basic""" +1028 33 evaluator """rankbased""" +1028 34 dataset """kinships""" +1028 34 model """unstructuredmodel""" +1028 34 loss """bceaftersigmoid""" +1028 34 regularizer """no""" +1028 34 optimizer """adam""" +1028 34 training_loop """owa""" +1028 34 negative_sampler """basic""" +1028 34 evaluator """rankbased""" +1028 35 dataset """kinships""" +1028 35 model """unstructuredmodel""" +1028 35 loss """bceaftersigmoid""" +1028 35 regularizer """no""" +1028 35 optimizer """adam""" +1028 35 training_loop """owa""" +1028 35 negative_sampler """basic""" +1028 35 evaluator """rankbased""" +1028 36 dataset """kinships""" +1028 36 model """unstructuredmodel""" +1028 36 loss """bceaftersigmoid""" +1028 36 regularizer """no""" +1028 36 optimizer """adam""" +1028 36 training_loop """owa""" +1028 36 negative_sampler """basic""" +1028 36 evaluator """rankbased""" +1028 37 dataset """kinships""" +1028 37 model """unstructuredmodel""" +1028 37 loss """bceaftersigmoid""" +1028 37 regularizer """no""" +1028 37 optimizer """adam""" +1028 37 training_loop """owa""" +1028 37 negative_sampler """basic""" +1028 37 evaluator """rankbased""" +1028 38 dataset """kinships""" +1028 38 model """unstructuredmodel""" +1028 38 loss """bceaftersigmoid""" +1028 38 regularizer """no""" +1028 38 optimizer """adam""" +1028 38 training_loop """owa""" +1028 38 negative_sampler """basic""" +1028 38 evaluator """rankbased""" +1028 39 dataset """kinships""" +1028 39 model """unstructuredmodel""" +1028 39 loss """bceaftersigmoid""" +1028 39 regularizer """no""" +1028 39 optimizer """adam""" +1028 39 training_loop """owa""" +1028 39 negative_sampler """basic""" +1028 39 evaluator """rankbased""" +1028 40 dataset """kinships""" +1028 40 model """unstructuredmodel""" +1028 40 loss """bceaftersigmoid""" +1028 40 regularizer """no""" +1028 40 optimizer """adam""" +1028 40 training_loop """owa""" +1028 40 negative_sampler """basic""" +1028 40 evaluator """rankbased""" +1028 41 dataset """kinships""" +1028 41 model """unstructuredmodel""" +1028 41 loss """bceaftersigmoid""" +1028 41 regularizer """no""" +1028 41 optimizer """adam""" +1028 41 training_loop """owa""" +1028 41 negative_sampler """basic""" +1028 41 evaluator """rankbased""" +1028 42 dataset """kinships""" +1028 42 model """unstructuredmodel""" +1028 42 loss """bceaftersigmoid""" +1028 42 regularizer """no""" +1028 42 optimizer """adam""" +1028 42 training_loop """owa""" +1028 42 negative_sampler """basic""" +1028 42 evaluator """rankbased""" +1028 43 dataset """kinships""" +1028 43 model """unstructuredmodel""" +1028 43 loss """bceaftersigmoid""" +1028 43 regularizer """no""" +1028 43 optimizer """adam""" +1028 43 training_loop """owa""" +1028 43 negative_sampler """basic""" +1028 43 evaluator """rankbased""" +1028 44 dataset """kinships""" +1028 44 model """unstructuredmodel""" +1028 44 loss """bceaftersigmoid""" +1028 44 regularizer """no""" +1028 44 optimizer """adam""" +1028 44 training_loop """owa""" +1028 44 negative_sampler """basic""" +1028 44 evaluator """rankbased""" +1028 45 dataset """kinships""" +1028 45 model """unstructuredmodel""" +1028 45 loss """bceaftersigmoid""" +1028 45 regularizer """no""" +1028 45 optimizer """adam""" +1028 45 training_loop """owa""" +1028 45 negative_sampler """basic""" +1028 45 evaluator """rankbased""" +1028 46 dataset """kinships""" +1028 46 model """unstructuredmodel""" +1028 46 loss """bceaftersigmoid""" +1028 46 regularizer """no""" +1028 46 optimizer """adam""" +1028 46 training_loop """owa""" +1028 46 negative_sampler """basic""" +1028 46 evaluator """rankbased""" +1028 47 dataset """kinships""" +1028 47 model """unstructuredmodel""" +1028 47 loss """bceaftersigmoid""" +1028 47 regularizer """no""" +1028 47 optimizer """adam""" +1028 47 training_loop """owa""" +1028 47 negative_sampler """basic""" +1028 47 evaluator """rankbased""" +1028 48 dataset """kinships""" +1028 48 model """unstructuredmodel""" +1028 48 loss """bceaftersigmoid""" +1028 48 regularizer """no""" +1028 48 optimizer """adam""" +1028 48 training_loop """owa""" +1028 48 negative_sampler """basic""" +1028 48 evaluator """rankbased""" +1028 49 dataset """kinships""" +1028 49 model """unstructuredmodel""" +1028 49 loss """bceaftersigmoid""" +1028 49 regularizer """no""" +1028 49 optimizer """adam""" +1028 49 training_loop """owa""" +1028 49 negative_sampler """basic""" +1028 49 evaluator """rankbased""" +1028 50 dataset """kinships""" +1028 50 model """unstructuredmodel""" +1028 50 loss """bceaftersigmoid""" +1028 50 regularizer """no""" +1028 50 optimizer """adam""" +1028 50 training_loop """owa""" +1028 50 negative_sampler """basic""" +1028 50 evaluator """rankbased""" +1028 51 dataset """kinships""" +1028 51 model """unstructuredmodel""" +1028 51 loss """bceaftersigmoid""" +1028 51 regularizer """no""" +1028 51 optimizer """adam""" +1028 51 training_loop """owa""" +1028 51 negative_sampler """basic""" +1028 51 evaluator """rankbased""" +1028 52 dataset """kinships""" +1028 52 model """unstructuredmodel""" +1028 52 loss """bceaftersigmoid""" +1028 52 regularizer """no""" +1028 52 optimizer """adam""" +1028 52 training_loop """owa""" +1028 52 negative_sampler """basic""" +1028 52 evaluator """rankbased""" +1028 53 dataset """kinships""" +1028 53 model """unstructuredmodel""" +1028 53 loss """bceaftersigmoid""" +1028 53 regularizer """no""" +1028 53 optimizer """adam""" +1028 53 training_loop """owa""" +1028 53 negative_sampler """basic""" +1028 53 evaluator """rankbased""" +1028 54 dataset """kinships""" +1028 54 model """unstructuredmodel""" +1028 54 loss """bceaftersigmoid""" +1028 54 regularizer """no""" +1028 54 optimizer """adam""" +1028 54 training_loop """owa""" +1028 54 negative_sampler """basic""" +1028 54 evaluator """rankbased""" +1028 55 dataset """kinships""" +1028 55 model """unstructuredmodel""" +1028 55 loss """bceaftersigmoid""" +1028 55 regularizer """no""" +1028 55 optimizer """adam""" +1028 55 training_loop """owa""" +1028 55 negative_sampler """basic""" +1028 55 evaluator """rankbased""" +1028 56 dataset """kinships""" +1028 56 model """unstructuredmodel""" +1028 56 loss """bceaftersigmoid""" +1028 56 regularizer """no""" +1028 56 optimizer """adam""" +1028 56 training_loop """owa""" +1028 56 negative_sampler """basic""" +1028 56 evaluator """rankbased""" +1028 57 dataset """kinships""" +1028 57 model """unstructuredmodel""" +1028 57 loss """bceaftersigmoid""" +1028 57 regularizer """no""" +1028 57 optimizer """adam""" +1028 57 training_loop """owa""" +1028 57 negative_sampler """basic""" +1028 57 evaluator """rankbased""" +1028 58 dataset """kinships""" +1028 58 model """unstructuredmodel""" +1028 58 loss """bceaftersigmoid""" +1028 58 regularizer """no""" +1028 58 optimizer """adam""" +1028 58 training_loop """owa""" +1028 58 negative_sampler """basic""" +1028 58 evaluator """rankbased""" +1028 59 dataset """kinships""" +1028 59 model """unstructuredmodel""" +1028 59 loss """bceaftersigmoid""" +1028 59 regularizer """no""" +1028 59 optimizer """adam""" +1028 59 training_loop """owa""" +1028 59 negative_sampler """basic""" +1028 59 evaluator """rankbased""" +1028 60 dataset """kinships""" +1028 60 model """unstructuredmodel""" +1028 60 loss """bceaftersigmoid""" +1028 60 regularizer """no""" +1028 60 optimizer """adam""" +1028 60 training_loop """owa""" +1028 60 negative_sampler """basic""" +1028 60 evaluator """rankbased""" +1028 61 dataset """kinships""" +1028 61 model """unstructuredmodel""" +1028 61 loss """bceaftersigmoid""" +1028 61 regularizer """no""" +1028 61 optimizer """adam""" +1028 61 training_loop """owa""" +1028 61 negative_sampler """basic""" +1028 61 evaluator """rankbased""" +1028 62 dataset """kinships""" +1028 62 model """unstructuredmodel""" +1028 62 loss """bceaftersigmoid""" +1028 62 regularizer """no""" +1028 62 optimizer """adam""" +1028 62 training_loop """owa""" +1028 62 negative_sampler """basic""" +1028 62 evaluator """rankbased""" +1028 63 dataset """kinships""" +1028 63 model """unstructuredmodel""" +1028 63 loss """bceaftersigmoid""" +1028 63 regularizer """no""" +1028 63 optimizer """adam""" +1028 63 training_loop """owa""" +1028 63 negative_sampler """basic""" +1028 63 evaluator """rankbased""" +1028 64 dataset """kinships""" +1028 64 model """unstructuredmodel""" +1028 64 loss """bceaftersigmoid""" +1028 64 regularizer """no""" +1028 64 optimizer """adam""" +1028 64 training_loop """owa""" +1028 64 negative_sampler """basic""" +1028 64 evaluator """rankbased""" +1028 65 dataset """kinships""" +1028 65 model """unstructuredmodel""" +1028 65 loss """bceaftersigmoid""" +1028 65 regularizer """no""" +1028 65 optimizer """adam""" +1028 65 training_loop """owa""" +1028 65 negative_sampler """basic""" +1028 65 evaluator """rankbased""" +1028 66 dataset """kinships""" +1028 66 model """unstructuredmodel""" +1028 66 loss """bceaftersigmoid""" +1028 66 regularizer """no""" +1028 66 optimizer """adam""" +1028 66 training_loop """owa""" +1028 66 negative_sampler """basic""" +1028 66 evaluator """rankbased""" +1028 67 dataset """kinships""" +1028 67 model """unstructuredmodel""" +1028 67 loss """bceaftersigmoid""" +1028 67 regularizer """no""" +1028 67 optimizer """adam""" +1028 67 training_loop """owa""" +1028 67 negative_sampler """basic""" +1028 67 evaluator """rankbased""" +1028 68 dataset """kinships""" +1028 68 model """unstructuredmodel""" +1028 68 loss """bceaftersigmoid""" +1028 68 regularizer """no""" +1028 68 optimizer """adam""" +1028 68 training_loop """owa""" +1028 68 negative_sampler """basic""" +1028 68 evaluator """rankbased""" +1028 69 dataset """kinships""" +1028 69 model """unstructuredmodel""" +1028 69 loss """bceaftersigmoid""" +1028 69 regularizer """no""" +1028 69 optimizer """adam""" +1028 69 training_loop """owa""" +1028 69 negative_sampler """basic""" +1028 69 evaluator """rankbased""" +1028 70 dataset """kinships""" +1028 70 model """unstructuredmodel""" +1028 70 loss """bceaftersigmoid""" +1028 70 regularizer """no""" +1028 70 optimizer """adam""" +1028 70 training_loop """owa""" +1028 70 negative_sampler """basic""" +1028 70 evaluator """rankbased""" +1028 71 dataset """kinships""" +1028 71 model """unstructuredmodel""" +1028 71 loss """bceaftersigmoid""" +1028 71 regularizer """no""" +1028 71 optimizer """adam""" +1028 71 training_loop """owa""" +1028 71 negative_sampler """basic""" +1028 71 evaluator """rankbased""" +1028 72 dataset """kinships""" +1028 72 model """unstructuredmodel""" +1028 72 loss """bceaftersigmoid""" +1028 72 regularizer """no""" +1028 72 optimizer """adam""" +1028 72 training_loop """owa""" +1028 72 negative_sampler """basic""" +1028 72 evaluator """rankbased""" +1028 73 dataset """kinships""" +1028 73 model """unstructuredmodel""" +1028 73 loss """bceaftersigmoid""" +1028 73 regularizer """no""" +1028 73 optimizer """adam""" +1028 73 training_loop """owa""" +1028 73 negative_sampler """basic""" +1028 73 evaluator """rankbased""" +1028 74 dataset """kinships""" +1028 74 model """unstructuredmodel""" +1028 74 loss """bceaftersigmoid""" +1028 74 regularizer """no""" +1028 74 optimizer """adam""" +1028 74 training_loop """owa""" +1028 74 negative_sampler """basic""" +1028 74 evaluator """rankbased""" +1028 75 dataset """kinships""" +1028 75 model """unstructuredmodel""" +1028 75 loss """bceaftersigmoid""" +1028 75 regularizer """no""" +1028 75 optimizer """adam""" +1028 75 training_loop """owa""" +1028 75 negative_sampler """basic""" +1028 75 evaluator """rankbased""" +1028 76 dataset """kinships""" +1028 76 model """unstructuredmodel""" +1028 76 loss """bceaftersigmoid""" +1028 76 regularizer """no""" +1028 76 optimizer """adam""" +1028 76 training_loop """owa""" +1028 76 negative_sampler """basic""" +1028 76 evaluator """rankbased""" +1028 77 dataset """kinships""" +1028 77 model """unstructuredmodel""" +1028 77 loss """bceaftersigmoid""" +1028 77 regularizer """no""" +1028 77 optimizer """adam""" +1028 77 training_loop """owa""" +1028 77 negative_sampler """basic""" +1028 77 evaluator """rankbased""" +1028 78 dataset """kinships""" +1028 78 model """unstructuredmodel""" +1028 78 loss """bceaftersigmoid""" +1028 78 regularizer """no""" +1028 78 optimizer """adam""" +1028 78 training_loop """owa""" +1028 78 negative_sampler """basic""" +1028 78 evaluator """rankbased""" +1028 79 dataset """kinships""" +1028 79 model """unstructuredmodel""" +1028 79 loss """bceaftersigmoid""" +1028 79 regularizer """no""" +1028 79 optimizer """adam""" +1028 79 training_loop """owa""" +1028 79 negative_sampler """basic""" +1028 79 evaluator """rankbased""" +1028 80 dataset """kinships""" +1028 80 model """unstructuredmodel""" +1028 80 loss """bceaftersigmoid""" +1028 80 regularizer """no""" +1028 80 optimizer """adam""" +1028 80 training_loop """owa""" +1028 80 negative_sampler """basic""" +1028 80 evaluator """rankbased""" +1028 81 dataset """kinships""" +1028 81 model """unstructuredmodel""" +1028 81 loss """bceaftersigmoid""" +1028 81 regularizer """no""" +1028 81 optimizer """adam""" +1028 81 training_loop """owa""" +1028 81 negative_sampler """basic""" +1028 81 evaluator """rankbased""" +1028 82 dataset """kinships""" +1028 82 model """unstructuredmodel""" +1028 82 loss """bceaftersigmoid""" +1028 82 regularizer """no""" +1028 82 optimizer """adam""" +1028 82 training_loop """owa""" +1028 82 negative_sampler """basic""" +1028 82 evaluator """rankbased""" +1028 83 dataset """kinships""" +1028 83 model """unstructuredmodel""" +1028 83 loss """bceaftersigmoid""" +1028 83 regularizer """no""" +1028 83 optimizer """adam""" +1028 83 training_loop """owa""" +1028 83 negative_sampler """basic""" +1028 83 evaluator """rankbased""" +1028 84 dataset """kinships""" +1028 84 model """unstructuredmodel""" +1028 84 loss """bceaftersigmoid""" +1028 84 regularizer """no""" +1028 84 optimizer """adam""" +1028 84 training_loop """owa""" +1028 84 negative_sampler """basic""" +1028 84 evaluator """rankbased""" +1028 85 dataset """kinships""" +1028 85 model """unstructuredmodel""" +1028 85 loss """bceaftersigmoid""" +1028 85 regularizer """no""" +1028 85 optimizer """adam""" +1028 85 training_loop """owa""" +1028 85 negative_sampler """basic""" +1028 85 evaluator """rankbased""" +1028 86 dataset """kinships""" +1028 86 model """unstructuredmodel""" +1028 86 loss """bceaftersigmoid""" +1028 86 regularizer """no""" +1028 86 optimizer """adam""" +1028 86 training_loop """owa""" +1028 86 negative_sampler """basic""" +1028 86 evaluator """rankbased""" +1028 87 dataset """kinships""" +1028 87 model """unstructuredmodel""" +1028 87 loss """bceaftersigmoid""" +1028 87 regularizer """no""" +1028 87 optimizer """adam""" +1028 87 training_loop """owa""" +1028 87 negative_sampler """basic""" +1028 87 evaluator """rankbased""" +1028 88 dataset """kinships""" +1028 88 model """unstructuredmodel""" +1028 88 loss """bceaftersigmoid""" +1028 88 regularizer """no""" +1028 88 optimizer """adam""" +1028 88 training_loop """owa""" +1028 88 negative_sampler """basic""" +1028 88 evaluator """rankbased""" +1028 89 dataset """kinships""" +1028 89 model """unstructuredmodel""" +1028 89 loss """bceaftersigmoid""" +1028 89 regularizer """no""" +1028 89 optimizer """adam""" +1028 89 training_loop """owa""" +1028 89 negative_sampler """basic""" +1028 89 evaluator """rankbased""" +1028 90 dataset """kinships""" +1028 90 model """unstructuredmodel""" +1028 90 loss """bceaftersigmoid""" +1028 90 regularizer """no""" +1028 90 optimizer """adam""" +1028 90 training_loop """owa""" +1028 90 negative_sampler """basic""" +1028 90 evaluator """rankbased""" +1028 91 dataset """kinships""" +1028 91 model """unstructuredmodel""" +1028 91 loss """bceaftersigmoid""" +1028 91 regularizer """no""" +1028 91 optimizer """adam""" +1028 91 training_loop """owa""" +1028 91 negative_sampler """basic""" +1028 91 evaluator """rankbased""" +1028 92 dataset """kinships""" +1028 92 model """unstructuredmodel""" +1028 92 loss """bceaftersigmoid""" +1028 92 regularizer """no""" +1028 92 optimizer """adam""" +1028 92 training_loop """owa""" +1028 92 negative_sampler """basic""" +1028 92 evaluator """rankbased""" +1028 93 dataset """kinships""" +1028 93 model """unstructuredmodel""" +1028 93 loss """bceaftersigmoid""" +1028 93 regularizer """no""" +1028 93 optimizer """adam""" +1028 93 training_loop """owa""" +1028 93 negative_sampler """basic""" +1028 93 evaluator """rankbased""" +1028 94 dataset """kinships""" +1028 94 model """unstructuredmodel""" +1028 94 loss """bceaftersigmoid""" +1028 94 regularizer """no""" +1028 94 optimizer """adam""" +1028 94 training_loop """owa""" +1028 94 negative_sampler """basic""" +1028 94 evaluator """rankbased""" +1028 95 dataset """kinships""" +1028 95 model """unstructuredmodel""" +1028 95 loss """bceaftersigmoid""" +1028 95 regularizer """no""" +1028 95 optimizer """adam""" +1028 95 training_loop """owa""" +1028 95 negative_sampler """basic""" +1028 95 evaluator """rankbased""" +1028 96 dataset """kinships""" +1028 96 model """unstructuredmodel""" +1028 96 loss """bceaftersigmoid""" +1028 96 regularizer """no""" +1028 96 optimizer """adam""" +1028 96 training_loop """owa""" +1028 96 negative_sampler """basic""" +1028 96 evaluator """rankbased""" +1028 97 dataset """kinships""" +1028 97 model """unstructuredmodel""" +1028 97 loss """bceaftersigmoid""" +1028 97 regularizer """no""" +1028 97 optimizer """adam""" +1028 97 training_loop """owa""" +1028 97 negative_sampler """basic""" +1028 97 evaluator """rankbased""" +1028 98 dataset """kinships""" +1028 98 model """unstructuredmodel""" +1028 98 loss """bceaftersigmoid""" +1028 98 regularizer """no""" +1028 98 optimizer """adam""" +1028 98 training_loop """owa""" +1028 98 negative_sampler """basic""" +1028 98 evaluator """rankbased""" +1028 99 dataset """kinships""" +1028 99 model """unstructuredmodel""" +1028 99 loss """bceaftersigmoid""" +1028 99 regularizer """no""" +1028 99 optimizer """adam""" +1028 99 training_loop """owa""" +1028 99 negative_sampler """basic""" +1028 99 evaluator """rankbased""" +1028 100 dataset """kinships""" +1028 100 model """unstructuredmodel""" +1028 100 loss """bceaftersigmoid""" +1028 100 regularizer """no""" +1028 100 optimizer """adam""" +1028 100 training_loop """owa""" +1028 100 negative_sampler """basic""" +1028 100 evaluator """rankbased""" +1029 1 model.embedding_dim 0.0 +1029 1 model.scoring_fct_norm 1.0 +1029 1 optimizer.lr 0.01712459140842078 +1029 1 negative_sampler.num_negs_per_pos 44.0 +1029 1 training.batch_size 1.0 +1029 2 model.embedding_dim 0.0 +1029 2 model.scoring_fct_norm 1.0 +1029 2 optimizer.lr 0.08280069281876101 +1029 2 negative_sampler.num_negs_per_pos 65.0 +1029 2 training.batch_size 1.0 +1029 3 model.embedding_dim 0.0 +1029 3 model.scoring_fct_norm 1.0 +1029 3 optimizer.lr 0.031114580010965634 +1029 3 negative_sampler.num_negs_per_pos 81.0 +1029 3 training.batch_size 2.0 +1029 4 model.embedding_dim 1.0 +1029 4 model.scoring_fct_norm 2.0 +1029 4 optimizer.lr 0.009187263632198515 +1029 4 negative_sampler.num_negs_per_pos 37.0 +1029 4 training.batch_size 0.0 +1029 5 model.embedding_dim 1.0 +1029 5 model.scoring_fct_norm 1.0 +1029 5 optimizer.lr 0.005501976297638884 +1029 5 negative_sampler.num_negs_per_pos 32.0 +1029 5 training.batch_size 1.0 +1029 6 model.embedding_dim 2.0 +1029 6 model.scoring_fct_norm 1.0 +1029 6 optimizer.lr 0.0018584884225108396 +1029 6 negative_sampler.num_negs_per_pos 71.0 +1029 6 training.batch_size 0.0 +1029 7 model.embedding_dim 1.0 +1029 7 model.scoring_fct_norm 1.0 +1029 7 optimizer.lr 0.004287673002320632 +1029 7 negative_sampler.num_negs_per_pos 54.0 +1029 7 training.batch_size 0.0 +1029 8 model.embedding_dim 0.0 +1029 8 model.scoring_fct_norm 1.0 +1029 8 optimizer.lr 0.0010837387590542777 +1029 8 negative_sampler.num_negs_per_pos 82.0 +1029 8 training.batch_size 1.0 +1029 9 model.embedding_dim 0.0 +1029 9 model.scoring_fct_norm 2.0 +1029 9 optimizer.lr 0.09469662162832441 +1029 9 negative_sampler.num_negs_per_pos 26.0 +1029 9 training.batch_size 0.0 +1029 10 model.embedding_dim 0.0 +1029 10 model.scoring_fct_norm 2.0 +1029 10 optimizer.lr 0.006230733693018435 +1029 10 negative_sampler.num_negs_per_pos 66.0 +1029 10 training.batch_size 2.0 +1029 11 model.embedding_dim 1.0 +1029 11 model.scoring_fct_norm 2.0 +1029 11 optimizer.lr 0.018336907914717746 +1029 11 negative_sampler.num_negs_per_pos 24.0 +1029 11 training.batch_size 2.0 +1029 12 model.embedding_dim 1.0 +1029 12 model.scoring_fct_norm 1.0 +1029 12 optimizer.lr 0.011036010987701805 +1029 12 negative_sampler.num_negs_per_pos 80.0 +1029 12 training.batch_size 1.0 +1029 13 model.embedding_dim 2.0 +1029 13 model.scoring_fct_norm 1.0 +1029 13 optimizer.lr 0.009819882132043966 +1029 13 negative_sampler.num_negs_per_pos 45.0 +1029 13 training.batch_size 1.0 +1029 14 model.embedding_dim 1.0 +1029 14 model.scoring_fct_norm 1.0 +1029 14 optimizer.lr 0.05515384590456826 +1029 14 negative_sampler.num_negs_per_pos 8.0 +1029 14 training.batch_size 0.0 +1029 15 model.embedding_dim 2.0 +1029 15 model.scoring_fct_norm 2.0 +1029 15 optimizer.lr 0.03657550976621134 +1029 15 negative_sampler.num_negs_per_pos 63.0 +1029 15 training.batch_size 2.0 +1029 16 model.embedding_dim 0.0 +1029 16 model.scoring_fct_norm 1.0 +1029 16 optimizer.lr 0.0120810950926683 +1029 16 negative_sampler.num_negs_per_pos 76.0 +1029 16 training.batch_size 2.0 +1029 17 model.embedding_dim 0.0 +1029 17 model.scoring_fct_norm 2.0 +1029 17 optimizer.lr 0.019249010880146605 +1029 17 negative_sampler.num_negs_per_pos 61.0 +1029 17 training.batch_size 0.0 +1029 18 model.embedding_dim 2.0 +1029 18 model.scoring_fct_norm 2.0 +1029 18 optimizer.lr 0.001358908711110947 +1029 18 negative_sampler.num_negs_per_pos 99.0 +1029 18 training.batch_size 0.0 +1029 19 model.embedding_dim 2.0 +1029 19 model.scoring_fct_norm 1.0 +1029 19 optimizer.lr 0.016423843549966326 +1029 19 negative_sampler.num_negs_per_pos 29.0 +1029 19 training.batch_size 1.0 +1029 20 model.embedding_dim 0.0 +1029 20 model.scoring_fct_norm 2.0 +1029 20 optimizer.lr 0.08636021493134582 +1029 20 negative_sampler.num_negs_per_pos 18.0 +1029 20 training.batch_size 0.0 +1029 21 model.embedding_dim 1.0 +1029 21 model.scoring_fct_norm 1.0 +1029 21 optimizer.lr 0.006626771222319541 +1029 21 negative_sampler.num_negs_per_pos 19.0 +1029 21 training.batch_size 1.0 +1029 22 model.embedding_dim 1.0 +1029 22 model.scoring_fct_norm 1.0 +1029 22 optimizer.lr 0.04789431529282567 +1029 22 negative_sampler.num_negs_per_pos 34.0 +1029 22 training.batch_size 0.0 +1029 23 model.embedding_dim 0.0 +1029 23 model.scoring_fct_norm 2.0 +1029 23 optimizer.lr 0.03463253961027828 +1029 23 negative_sampler.num_negs_per_pos 54.0 +1029 23 training.batch_size 0.0 +1029 24 model.embedding_dim 0.0 +1029 24 model.scoring_fct_norm 2.0 +1029 24 optimizer.lr 0.03112135237628496 +1029 24 negative_sampler.num_negs_per_pos 54.0 +1029 24 training.batch_size 0.0 +1029 25 model.embedding_dim 1.0 +1029 25 model.scoring_fct_norm 1.0 +1029 25 optimizer.lr 0.0018694898112550638 +1029 25 negative_sampler.num_negs_per_pos 83.0 +1029 25 training.batch_size 2.0 +1029 26 model.embedding_dim 2.0 +1029 26 model.scoring_fct_norm 2.0 +1029 26 optimizer.lr 0.019963409513457632 +1029 26 negative_sampler.num_negs_per_pos 84.0 +1029 26 training.batch_size 0.0 +1029 27 model.embedding_dim 2.0 +1029 27 model.scoring_fct_norm 2.0 +1029 27 optimizer.lr 0.0020945698113256734 +1029 27 negative_sampler.num_negs_per_pos 77.0 +1029 27 training.batch_size 0.0 +1029 28 model.embedding_dim 0.0 +1029 28 model.scoring_fct_norm 2.0 +1029 28 optimizer.lr 0.02072897106413721 +1029 28 negative_sampler.num_negs_per_pos 91.0 +1029 28 training.batch_size 2.0 +1029 29 model.embedding_dim 1.0 +1029 29 model.scoring_fct_norm 2.0 +1029 29 optimizer.lr 0.03674004381296121 +1029 29 negative_sampler.num_negs_per_pos 26.0 +1029 29 training.batch_size 0.0 +1029 30 model.embedding_dim 2.0 +1029 30 model.scoring_fct_norm 1.0 +1029 30 optimizer.lr 0.036930405640146456 +1029 30 negative_sampler.num_negs_per_pos 55.0 +1029 30 training.batch_size 1.0 +1029 31 model.embedding_dim 2.0 +1029 31 model.scoring_fct_norm 1.0 +1029 31 optimizer.lr 0.03796262809067944 +1029 31 negative_sampler.num_negs_per_pos 87.0 +1029 31 training.batch_size 1.0 +1029 32 model.embedding_dim 1.0 +1029 32 model.scoring_fct_norm 2.0 +1029 32 optimizer.lr 0.00224762565129041 +1029 32 negative_sampler.num_negs_per_pos 80.0 +1029 32 training.batch_size 1.0 +1029 33 model.embedding_dim 0.0 +1029 33 model.scoring_fct_norm 2.0 +1029 33 optimizer.lr 0.011904826015321777 +1029 33 negative_sampler.num_negs_per_pos 32.0 +1029 33 training.batch_size 1.0 +1029 34 model.embedding_dim 0.0 +1029 34 model.scoring_fct_norm 2.0 +1029 34 optimizer.lr 0.06494292444911295 +1029 34 negative_sampler.num_negs_per_pos 43.0 +1029 34 training.batch_size 2.0 +1029 35 model.embedding_dim 2.0 +1029 35 model.scoring_fct_norm 1.0 +1029 35 optimizer.lr 0.05272942151183068 +1029 35 negative_sampler.num_negs_per_pos 49.0 +1029 35 training.batch_size 2.0 +1029 36 model.embedding_dim 1.0 +1029 36 model.scoring_fct_norm 2.0 +1029 36 optimizer.lr 0.00788568904567587 +1029 36 negative_sampler.num_negs_per_pos 32.0 +1029 36 training.batch_size 1.0 +1029 37 model.embedding_dim 0.0 +1029 37 model.scoring_fct_norm 2.0 +1029 37 optimizer.lr 0.00124239124098052 +1029 37 negative_sampler.num_negs_per_pos 1.0 +1029 37 training.batch_size 2.0 +1029 38 model.embedding_dim 1.0 +1029 38 model.scoring_fct_norm 1.0 +1029 38 optimizer.lr 0.07477927853817187 +1029 38 negative_sampler.num_negs_per_pos 74.0 +1029 38 training.batch_size 2.0 +1029 39 model.embedding_dim 0.0 +1029 39 model.scoring_fct_norm 2.0 +1029 39 optimizer.lr 0.0017221492765820193 +1029 39 negative_sampler.num_negs_per_pos 69.0 +1029 39 training.batch_size 2.0 +1029 40 model.embedding_dim 2.0 +1029 40 model.scoring_fct_norm 1.0 +1029 40 optimizer.lr 0.0011796686015391029 +1029 40 negative_sampler.num_negs_per_pos 78.0 +1029 40 training.batch_size 1.0 +1029 41 model.embedding_dim 2.0 +1029 41 model.scoring_fct_norm 1.0 +1029 41 optimizer.lr 0.0015551463649483791 +1029 41 negative_sampler.num_negs_per_pos 25.0 +1029 41 training.batch_size 0.0 +1029 42 model.embedding_dim 2.0 +1029 42 model.scoring_fct_norm 2.0 +1029 42 optimizer.lr 0.013990310900178065 +1029 42 negative_sampler.num_negs_per_pos 40.0 +1029 42 training.batch_size 1.0 +1029 43 model.embedding_dim 2.0 +1029 43 model.scoring_fct_norm 2.0 +1029 43 optimizer.lr 0.0024207235187967188 +1029 43 negative_sampler.num_negs_per_pos 0.0 +1029 43 training.batch_size 1.0 +1029 44 model.embedding_dim 2.0 +1029 44 model.scoring_fct_norm 1.0 +1029 44 optimizer.lr 0.042988201862320476 +1029 44 negative_sampler.num_negs_per_pos 10.0 +1029 44 training.batch_size 2.0 +1029 45 model.embedding_dim 2.0 +1029 45 model.scoring_fct_norm 2.0 +1029 45 optimizer.lr 0.03259943496218045 +1029 45 negative_sampler.num_negs_per_pos 52.0 +1029 45 training.batch_size 1.0 +1029 46 model.embedding_dim 0.0 +1029 46 model.scoring_fct_norm 2.0 +1029 46 optimizer.lr 0.006831347252457515 +1029 46 negative_sampler.num_negs_per_pos 48.0 +1029 46 training.batch_size 2.0 +1029 47 model.embedding_dim 1.0 +1029 47 model.scoring_fct_norm 1.0 +1029 47 optimizer.lr 0.003572330626402396 +1029 47 negative_sampler.num_negs_per_pos 95.0 +1029 47 training.batch_size 2.0 +1029 48 model.embedding_dim 0.0 +1029 48 model.scoring_fct_norm 2.0 +1029 48 optimizer.lr 0.00508486894135934 +1029 48 negative_sampler.num_negs_per_pos 85.0 +1029 48 training.batch_size 1.0 +1029 49 model.embedding_dim 2.0 +1029 49 model.scoring_fct_norm 2.0 +1029 49 optimizer.lr 0.06210119347842749 +1029 49 negative_sampler.num_negs_per_pos 4.0 +1029 49 training.batch_size 0.0 +1029 50 model.embedding_dim 0.0 +1029 50 model.scoring_fct_norm 2.0 +1029 50 optimizer.lr 0.025762501381946454 +1029 50 negative_sampler.num_negs_per_pos 87.0 +1029 50 training.batch_size 2.0 +1029 51 model.embedding_dim 0.0 +1029 51 model.scoring_fct_norm 2.0 +1029 51 optimizer.lr 0.015186120544496959 +1029 51 negative_sampler.num_negs_per_pos 33.0 +1029 51 training.batch_size 1.0 +1029 52 model.embedding_dim 2.0 +1029 52 model.scoring_fct_norm 2.0 +1029 52 optimizer.lr 0.001002051985690018 +1029 52 negative_sampler.num_negs_per_pos 85.0 +1029 52 training.batch_size 2.0 +1029 53 model.embedding_dim 1.0 +1029 53 model.scoring_fct_norm 1.0 +1029 53 optimizer.lr 0.004600179928337705 +1029 53 negative_sampler.num_negs_per_pos 17.0 +1029 53 training.batch_size 2.0 +1029 54 model.embedding_dim 1.0 +1029 54 model.scoring_fct_norm 2.0 +1029 54 optimizer.lr 0.0010879290542794057 +1029 54 negative_sampler.num_negs_per_pos 62.0 +1029 54 training.batch_size 0.0 +1029 55 model.embedding_dim 2.0 +1029 55 model.scoring_fct_norm 1.0 +1029 55 optimizer.lr 0.061381610909212986 +1029 55 negative_sampler.num_negs_per_pos 38.0 +1029 55 training.batch_size 1.0 +1029 56 model.embedding_dim 1.0 +1029 56 model.scoring_fct_norm 1.0 +1029 56 optimizer.lr 0.0038259916646343492 +1029 56 negative_sampler.num_negs_per_pos 13.0 +1029 56 training.batch_size 2.0 +1029 57 model.embedding_dim 0.0 +1029 57 model.scoring_fct_norm 1.0 +1029 57 optimizer.lr 0.033548642376830674 +1029 57 negative_sampler.num_negs_per_pos 69.0 +1029 57 training.batch_size 2.0 +1029 58 model.embedding_dim 0.0 +1029 58 model.scoring_fct_norm 1.0 +1029 58 optimizer.lr 0.019653702098442694 +1029 58 negative_sampler.num_negs_per_pos 14.0 +1029 58 training.batch_size 2.0 +1029 59 model.embedding_dim 1.0 +1029 59 model.scoring_fct_norm 1.0 +1029 59 optimizer.lr 0.03913717354809125 +1029 59 negative_sampler.num_negs_per_pos 68.0 +1029 59 training.batch_size 2.0 +1029 60 model.embedding_dim 2.0 +1029 60 model.scoring_fct_norm 2.0 +1029 60 optimizer.lr 0.00971051840819259 +1029 60 negative_sampler.num_negs_per_pos 86.0 +1029 60 training.batch_size 2.0 +1029 61 model.embedding_dim 2.0 +1029 61 model.scoring_fct_norm 2.0 +1029 61 optimizer.lr 0.0019424084442485892 +1029 61 negative_sampler.num_negs_per_pos 82.0 +1029 61 training.batch_size 2.0 +1029 62 model.embedding_dim 1.0 +1029 62 model.scoring_fct_norm 1.0 +1029 62 optimizer.lr 0.004184348770996109 +1029 62 negative_sampler.num_negs_per_pos 65.0 +1029 62 training.batch_size 2.0 +1029 63 model.embedding_dim 2.0 +1029 63 model.scoring_fct_norm 1.0 +1029 63 optimizer.lr 0.029132439594045417 +1029 63 negative_sampler.num_negs_per_pos 90.0 +1029 63 training.batch_size 1.0 +1029 64 model.embedding_dim 0.0 +1029 64 model.scoring_fct_norm 1.0 +1029 64 optimizer.lr 0.005065498702495614 +1029 64 negative_sampler.num_negs_per_pos 64.0 +1029 64 training.batch_size 1.0 +1029 65 model.embedding_dim 0.0 +1029 65 model.scoring_fct_norm 2.0 +1029 65 optimizer.lr 0.0016247399805106159 +1029 65 negative_sampler.num_negs_per_pos 58.0 +1029 65 training.batch_size 2.0 +1029 66 model.embedding_dim 2.0 +1029 66 model.scoring_fct_norm 1.0 +1029 66 optimizer.lr 0.004289680460554342 +1029 66 negative_sampler.num_negs_per_pos 80.0 +1029 66 training.batch_size 2.0 +1029 67 model.embedding_dim 1.0 +1029 67 model.scoring_fct_norm 2.0 +1029 67 optimizer.lr 0.07918320969031929 +1029 67 negative_sampler.num_negs_per_pos 35.0 +1029 67 training.batch_size 0.0 +1029 68 model.embedding_dim 1.0 +1029 68 model.scoring_fct_norm 2.0 +1029 68 optimizer.lr 0.01633972896657268 +1029 68 negative_sampler.num_negs_per_pos 5.0 +1029 68 training.batch_size 1.0 +1029 69 model.embedding_dim 2.0 +1029 69 model.scoring_fct_norm 1.0 +1029 69 optimizer.lr 0.004598143781325877 +1029 69 negative_sampler.num_negs_per_pos 9.0 +1029 69 training.batch_size 0.0 +1029 70 model.embedding_dim 1.0 +1029 70 model.scoring_fct_norm 1.0 +1029 70 optimizer.lr 0.07960106417441246 +1029 70 negative_sampler.num_negs_per_pos 97.0 +1029 70 training.batch_size 1.0 +1029 71 model.embedding_dim 0.0 +1029 71 model.scoring_fct_norm 1.0 +1029 71 optimizer.lr 0.06700738283343657 +1029 71 negative_sampler.num_negs_per_pos 75.0 +1029 71 training.batch_size 1.0 +1029 72 model.embedding_dim 0.0 +1029 72 model.scoring_fct_norm 1.0 +1029 72 optimizer.lr 0.050201043581871135 +1029 72 negative_sampler.num_negs_per_pos 45.0 +1029 72 training.batch_size 1.0 +1029 73 model.embedding_dim 1.0 +1029 73 model.scoring_fct_norm 2.0 +1029 73 optimizer.lr 0.005638118591096402 +1029 73 negative_sampler.num_negs_per_pos 7.0 +1029 73 training.batch_size 1.0 +1029 74 model.embedding_dim 0.0 +1029 74 model.scoring_fct_norm 1.0 +1029 74 optimizer.lr 0.06370654580822824 +1029 74 negative_sampler.num_negs_per_pos 92.0 +1029 74 training.batch_size 1.0 +1029 75 model.embedding_dim 1.0 +1029 75 model.scoring_fct_norm 2.0 +1029 75 optimizer.lr 0.05610937593186536 +1029 75 negative_sampler.num_negs_per_pos 42.0 +1029 75 training.batch_size 0.0 +1029 76 model.embedding_dim 2.0 +1029 76 model.scoring_fct_norm 2.0 +1029 76 optimizer.lr 0.002421112557442926 +1029 76 negative_sampler.num_negs_per_pos 24.0 +1029 76 training.batch_size 2.0 +1029 77 model.embedding_dim 0.0 +1029 77 model.scoring_fct_norm 2.0 +1029 77 optimizer.lr 0.03275833144870509 +1029 77 negative_sampler.num_negs_per_pos 13.0 +1029 77 training.batch_size 0.0 +1029 78 model.embedding_dim 0.0 +1029 78 model.scoring_fct_norm 2.0 +1029 78 optimizer.lr 0.013027482898810741 +1029 78 negative_sampler.num_negs_per_pos 15.0 +1029 78 training.batch_size 2.0 +1029 79 model.embedding_dim 0.0 +1029 79 model.scoring_fct_norm 2.0 +1029 79 optimizer.lr 0.006063116947191748 +1029 79 negative_sampler.num_negs_per_pos 10.0 +1029 79 training.batch_size 0.0 +1029 80 model.embedding_dim 2.0 +1029 80 model.scoring_fct_norm 1.0 +1029 80 optimizer.lr 0.001462508129143944 +1029 80 negative_sampler.num_negs_per_pos 58.0 +1029 80 training.batch_size 1.0 +1029 81 model.embedding_dim 1.0 +1029 81 model.scoring_fct_norm 2.0 +1029 81 optimizer.lr 0.055117488112853004 +1029 81 negative_sampler.num_negs_per_pos 90.0 +1029 81 training.batch_size 1.0 +1029 82 model.embedding_dim 2.0 +1029 82 model.scoring_fct_norm 1.0 +1029 82 optimizer.lr 0.011032439565892463 +1029 82 negative_sampler.num_negs_per_pos 69.0 +1029 82 training.batch_size 0.0 +1029 83 model.embedding_dim 2.0 +1029 83 model.scoring_fct_norm 1.0 +1029 83 optimizer.lr 0.06903377998514593 +1029 83 negative_sampler.num_negs_per_pos 33.0 +1029 83 training.batch_size 1.0 +1029 84 model.embedding_dim 1.0 +1029 84 model.scoring_fct_norm 2.0 +1029 84 optimizer.lr 0.030399083777821245 +1029 84 negative_sampler.num_negs_per_pos 10.0 +1029 84 training.batch_size 2.0 +1029 85 model.embedding_dim 2.0 +1029 85 model.scoring_fct_norm 1.0 +1029 85 optimizer.lr 0.06831916791766507 +1029 85 negative_sampler.num_negs_per_pos 81.0 +1029 85 training.batch_size 2.0 +1029 86 model.embedding_dim 0.0 +1029 86 model.scoring_fct_norm 2.0 +1029 86 optimizer.lr 0.002845444619157914 +1029 86 negative_sampler.num_negs_per_pos 18.0 +1029 86 training.batch_size 2.0 +1029 87 model.embedding_dim 2.0 +1029 87 model.scoring_fct_norm 2.0 +1029 87 optimizer.lr 0.007529978928886072 +1029 87 negative_sampler.num_negs_per_pos 29.0 +1029 87 training.batch_size 1.0 +1029 88 model.embedding_dim 1.0 +1029 88 model.scoring_fct_norm 1.0 +1029 88 optimizer.lr 0.018588002070775145 +1029 88 negative_sampler.num_negs_per_pos 55.0 +1029 88 training.batch_size 2.0 +1029 89 model.embedding_dim 0.0 +1029 89 model.scoring_fct_norm 1.0 +1029 89 optimizer.lr 0.00936518342211684 +1029 89 negative_sampler.num_negs_per_pos 8.0 +1029 89 training.batch_size 1.0 +1029 90 model.embedding_dim 0.0 +1029 90 model.scoring_fct_norm 2.0 +1029 90 optimizer.lr 0.007873101241813969 +1029 90 negative_sampler.num_negs_per_pos 25.0 +1029 90 training.batch_size 2.0 +1029 91 model.embedding_dim 2.0 +1029 91 model.scoring_fct_norm 2.0 +1029 91 optimizer.lr 0.024945420931703252 +1029 91 negative_sampler.num_negs_per_pos 32.0 +1029 91 training.batch_size 1.0 +1029 92 model.embedding_dim 2.0 +1029 92 model.scoring_fct_norm 1.0 +1029 92 optimizer.lr 0.0580882960597822 +1029 92 negative_sampler.num_negs_per_pos 80.0 +1029 92 training.batch_size 1.0 +1029 93 model.embedding_dim 1.0 +1029 93 model.scoring_fct_norm 1.0 +1029 93 optimizer.lr 0.006231085717036652 +1029 93 negative_sampler.num_negs_per_pos 91.0 +1029 93 training.batch_size 2.0 +1029 94 model.embedding_dim 0.0 +1029 94 model.scoring_fct_norm 1.0 +1029 94 optimizer.lr 0.0022849818873714907 +1029 94 negative_sampler.num_negs_per_pos 9.0 +1029 94 training.batch_size 1.0 +1029 95 model.embedding_dim 1.0 +1029 95 model.scoring_fct_norm 2.0 +1029 95 optimizer.lr 0.002256194976758668 +1029 95 negative_sampler.num_negs_per_pos 19.0 +1029 95 training.batch_size 2.0 +1029 96 model.embedding_dim 0.0 +1029 96 model.scoring_fct_norm 2.0 +1029 96 optimizer.lr 0.009483568899602774 +1029 96 negative_sampler.num_negs_per_pos 17.0 +1029 96 training.batch_size 0.0 +1029 97 model.embedding_dim 2.0 +1029 97 model.scoring_fct_norm 2.0 +1029 97 optimizer.lr 0.004231691508944341 +1029 97 negative_sampler.num_negs_per_pos 56.0 +1029 97 training.batch_size 0.0 +1029 98 model.embedding_dim 0.0 +1029 98 model.scoring_fct_norm 2.0 +1029 98 optimizer.lr 0.07949431622372385 +1029 98 negative_sampler.num_negs_per_pos 9.0 +1029 98 training.batch_size 0.0 +1029 99 model.embedding_dim 1.0 +1029 99 model.scoring_fct_norm 2.0 +1029 99 optimizer.lr 0.0020807970914532067 +1029 99 negative_sampler.num_negs_per_pos 81.0 +1029 99 training.batch_size 0.0 +1029 100 model.embedding_dim 2.0 +1029 100 model.scoring_fct_norm 1.0 +1029 100 optimizer.lr 0.06452855919737122 +1029 100 negative_sampler.num_negs_per_pos 28.0 +1029 100 training.batch_size 2.0 +1029 1 dataset """kinships""" +1029 1 model """unstructuredmodel""" +1029 1 loss """softplus""" +1029 1 regularizer """no""" +1029 1 optimizer """adam""" +1029 1 training_loop """owa""" +1029 1 negative_sampler """basic""" +1029 1 evaluator """rankbased""" +1029 2 dataset """kinships""" +1029 2 model """unstructuredmodel""" +1029 2 loss """softplus""" +1029 2 regularizer """no""" +1029 2 optimizer """adam""" +1029 2 training_loop """owa""" +1029 2 negative_sampler """basic""" +1029 2 evaluator """rankbased""" +1029 3 dataset """kinships""" +1029 3 model """unstructuredmodel""" +1029 3 loss """softplus""" +1029 3 regularizer """no""" +1029 3 optimizer """adam""" +1029 3 training_loop """owa""" +1029 3 negative_sampler """basic""" +1029 3 evaluator """rankbased""" +1029 4 dataset """kinships""" +1029 4 model """unstructuredmodel""" +1029 4 loss """softplus""" +1029 4 regularizer """no""" +1029 4 optimizer """adam""" +1029 4 training_loop """owa""" +1029 4 negative_sampler """basic""" +1029 4 evaluator """rankbased""" +1029 5 dataset """kinships""" +1029 5 model """unstructuredmodel""" +1029 5 loss """softplus""" +1029 5 regularizer """no""" +1029 5 optimizer """adam""" +1029 5 training_loop """owa""" +1029 5 negative_sampler """basic""" +1029 5 evaluator """rankbased""" +1029 6 dataset """kinships""" +1029 6 model """unstructuredmodel""" +1029 6 loss """softplus""" +1029 6 regularizer """no""" +1029 6 optimizer """adam""" +1029 6 training_loop """owa""" +1029 6 negative_sampler """basic""" +1029 6 evaluator """rankbased""" +1029 7 dataset """kinships""" +1029 7 model """unstructuredmodel""" +1029 7 loss """softplus""" +1029 7 regularizer """no""" +1029 7 optimizer """adam""" +1029 7 training_loop """owa""" +1029 7 negative_sampler """basic""" +1029 7 evaluator """rankbased""" +1029 8 dataset """kinships""" +1029 8 model """unstructuredmodel""" +1029 8 loss """softplus""" +1029 8 regularizer """no""" +1029 8 optimizer """adam""" +1029 8 training_loop """owa""" +1029 8 negative_sampler """basic""" +1029 8 evaluator """rankbased""" +1029 9 dataset """kinships""" +1029 9 model """unstructuredmodel""" +1029 9 loss """softplus""" +1029 9 regularizer """no""" +1029 9 optimizer """adam""" +1029 9 training_loop """owa""" +1029 9 negative_sampler """basic""" +1029 9 evaluator """rankbased""" +1029 10 dataset """kinships""" +1029 10 model """unstructuredmodel""" +1029 10 loss """softplus""" +1029 10 regularizer """no""" +1029 10 optimizer """adam""" +1029 10 training_loop """owa""" +1029 10 negative_sampler """basic""" +1029 10 evaluator """rankbased""" +1029 11 dataset """kinships""" +1029 11 model """unstructuredmodel""" +1029 11 loss """softplus""" +1029 11 regularizer """no""" +1029 11 optimizer """adam""" +1029 11 training_loop """owa""" +1029 11 negative_sampler """basic""" +1029 11 evaluator """rankbased""" +1029 12 dataset """kinships""" +1029 12 model """unstructuredmodel""" +1029 12 loss """softplus""" +1029 12 regularizer """no""" +1029 12 optimizer """adam""" +1029 12 training_loop """owa""" +1029 12 negative_sampler """basic""" +1029 12 evaluator """rankbased""" +1029 13 dataset """kinships""" +1029 13 model """unstructuredmodel""" +1029 13 loss """softplus""" +1029 13 regularizer """no""" +1029 13 optimizer """adam""" +1029 13 training_loop """owa""" +1029 13 negative_sampler """basic""" +1029 13 evaluator """rankbased""" +1029 14 dataset """kinships""" +1029 14 model """unstructuredmodel""" +1029 14 loss """softplus""" +1029 14 regularizer """no""" +1029 14 optimizer """adam""" +1029 14 training_loop """owa""" +1029 14 negative_sampler """basic""" +1029 14 evaluator """rankbased""" +1029 15 dataset """kinships""" +1029 15 model """unstructuredmodel""" +1029 15 loss """softplus""" +1029 15 regularizer """no""" +1029 15 optimizer """adam""" +1029 15 training_loop """owa""" +1029 15 negative_sampler """basic""" +1029 15 evaluator """rankbased""" +1029 16 dataset """kinships""" +1029 16 model """unstructuredmodel""" +1029 16 loss """softplus""" +1029 16 regularizer """no""" +1029 16 optimizer """adam""" +1029 16 training_loop """owa""" +1029 16 negative_sampler """basic""" +1029 16 evaluator """rankbased""" +1029 17 dataset """kinships""" +1029 17 model """unstructuredmodel""" +1029 17 loss """softplus""" +1029 17 regularizer """no""" +1029 17 optimizer """adam""" +1029 17 training_loop """owa""" +1029 17 negative_sampler """basic""" +1029 17 evaluator """rankbased""" +1029 18 dataset """kinships""" +1029 18 model """unstructuredmodel""" +1029 18 loss """softplus""" +1029 18 regularizer """no""" +1029 18 optimizer """adam""" +1029 18 training_loop """owa""" +1029 18 negative_sampler """basic""" +1029 18 evaluator """rankbased""" +1029 19 dataset """kinships""" +1029 19 model """unstructuredmodel""" +1029 19 loss """softplus""" +1029 19 regularizer """no""" +1029 19 optimizer """adam""" +1029 19 training_loop """owa""" +1029 19 negative_sampler """basic""" +1029 19 evaluator """rankbased""" +1029 20 dataset """kinships""" +1029 20 model """unstructuredmodel""" +1029 20 loss """softplus""" +1029 20 regularizer """no""" +1029 20 optimizer """adam""" +1029 20 training_loop """owa""" +1029 20 negative_sampler """basic""" +1029 20 evaluator """rankbased""" +1029 21 dataset """kinships""" +1029 21 model """unstructuredmodel""" +1029 21 loss """softplus""" +1029 21 regularizer """no""" +1029 21 optimizer """adam""" +1029 21 training_loop """owa""" +1029 21 negative_sampler """basic""" +1029 21 evaluator """rankbased""" +1029 22 dataset """kinships""" +1029 22 model """unstructuredmodel""" +1029 22 loss """softplus""" +1029 22 regularizer """no""" +1029 22 optimizer """adam""" +1029 22 training_loop """owa""" +1029 22 negative_sampler """basic""" +1029 22 evaluator """rankbased""" +1029 23 dataset """kinships""" +1029 23 model """unstructuredmodel""" +1029 23 loss """softplus""" +1029 23 regularizer """no""" +1029 23 optimizer """adam""" +1029 23 training_loop """owa""" +1029 23 negative_sampler """basic""" +1029 23 evaluator """rankbased""" +1029 24 dataset """kinships""" +1029 24 model """unstructuredmodel""" +1029 24 loss """softplus""" +1029 24 regularizer """no""" +1029 24 optimizer """adam""" +1029 24 training_loop """owa""" +1029 24 negative_sampler """basic""" +1029 24 evaluator """rankbased""" +1029 25 dataset """kinships""" +1029 25 model """unstructuredmodel""" +1029 25 loss """softplus""" +1029 25 regularizer """no""" +1029 25 optimizer """adam""" +1029 25 training_loop """owa""" +1029 25 negative_sampler """basic""" +1029 25 evaluator """rankbased""" +1029 26 dataset """kinships""" +1029 26 model """unstructuredmodel""" +1029 26 loss """softplus""" +1029 26 regularizer """no""" +1029 26 optimizer """adam""" +1029 26 training_loop """owa""" +1029 26 negative_sampler """basic""" +1029 26 evaluator """rankbased""" +1029 27 dataset """kinships""" +1029 27 model """unstructuredmodel""" +1029 27 loss """softplus""" +1029 27 regularizer """no""" +1029 27 optimizer """adam""" +1029 27 training_loop """owa""" +1029 27 negative_sampler """basic""" +1029 27 evaluator """rankbased""" +1029 28 dataset """kinships""" +1029 28 model """unstructuredmodel""" +1029 28 loss """softplus""" +1029 28 regularizer """no""" +1029 28 optimizer """adam""" +1029 28 training_loop """owa""" +1029 28 negative_sampler """basic""" +1029 28 evaluator """rankbased""" +1029 29 dataset """kinships""" +1029 29 model """unstructuredmodel""" +1029 29 loss """softplus""" +1029 29 regularizer """no""" +1029 29 optimizer """adam""" +1029 29 training_loop """owa""" +1029 29 negative_sampler """basic""" +1029 29 evaluator """rankbased""" +1029 30 dataset """kinships""" +1029 30 model """unstructuredmodel""" +1029 30 loss """softplus""" +1029 30 regularizer """no""" +1029 30 optimizer """adam""" +1029 30 training_loop """owa""" +1029 30 negative_sampler """basic""" +1029 30 evaluator """rankbased""" +1029 31 dataset """kinships""" +1029 31 model """unstructuredmodel""" +1029 31 loss """softplus""" +1029 31 regularizer """no""" +1029 31 optimizer """adam""" +1029 31 training_loop """owa""" +1029 31 negative_sampler """basic""" +1029 31 evaluator """rankbased""" +1029 32 dataset """kinships""" +1029 32 model """unstructuredmodel""" +1029 32 loss """softplus""" +1029 32 regularizer """no""" +1029 32 optimizer """adam""" +1029 32 training_loop """owa""" +1029 32 negative_sampler """basic""" +1029 32 evaluator """rankbased""" +1029 33 dataset """kinships""" +1029 33 model """unstructuredmodel""" +1029 33 loss """softplus""" +1029 33 regularizer """no""" +1029 33 optimizer """adam""" +1029 33 training_loop """owa""" +1029 33 negative_sampler """basic""" +1029 33 evaluator """rankbased""" +1029 34 dataset """kinships""" +1029 34 model """unstructuredmodel""" +1029 34 loss """softplus""" +1029 34 regularizer """no""" +1029 34 optimizer """adam""" +1029 34 training_loop """owa""" +1029 34 negative_sampler """basic""" +1029 34 evaluator """rankbased""" +1029 35 dataset """kinships""" +1029 35 model """unstructuredmodel""" +1029 35 loss """softplus""" +1029 35 regularizer """no""" +1029 35 optimizer """adam""" +1029 35 training_loop """owa""" +1029 35 negative_sampler """basic""" +1029 35 evaluator """rankbased""" +1029 36 dataset """kinships""" +1029 36 model """unstructuredmodel""" +1029 36 loss """softplus""" +1029 36 regularizer """no""" +1029 36 optimizer """adam""" +1029 36 training_loop """owa""" +1029 36 negative_sampler """basic""" +1029 36 evaluator """rankbased""" +1029 37 dataset """kinships""" +1029 37 model """unstructuredmodel""" +1029 37 loss """softplus""" +1029 37 regularizer """no""" +1029 37 optimizer """adam""" +1029 37 training_loop """owa""" +1029 37 negative_sampler """basic""" +1029 37 evaluator """rankbased""" +1029 38 dataset """kinships""" +1029 38 model """unstructuredmodel""" +1029 38 loss """softplus""" +1029 38 regularizer """no""" +1029 38 optimizer """adam""" +1029 38 training_loop """owa""" +1029 38 negative_sampler """basic""" +1029 38 evaluator """rankbased""" +1029 39 dataset """kinships""" +1029 39 model """unstructuredmodel""" +1029 39 loss """softplus""" +1029 39 regularizer """no""" +1029 39 optimizer """adam""" +1029 39 training_loop """owa""" +1029 39 negative_sampler """basic""" +1029 39 evaluator """rankbased""" +1029 40 dataset """kinships""" +1029 40 model """unstructuredmodel""" +1029 40 loss """softplus""" +1029 40 regularizer """no""" +1029 40 optimizer """adam""" +1029 40 training_loop """owa""" +1029 40 negative_sampler """basic""" +1029 40 evaluator """rankbased""" +1029 41 dataset """kinships""" +1029 41 model """unstructuredmodel""" +1029 41 loss """softplus""" +1029 41 regularizer """no""" +1029 41 optimizer """adam""" +1029 41 training_loop """owa""" +1029 41 negative_sampler """basic""" +1029 41 evaluator """rankbased""" +1029 42 dataset """kinships""" +1029 42 model """unstructuredmodel""" +1029 42 loss """softplus""" +1029 42 regularizer """no""" +1029 42 optimizer """adam""" +1029 42 training_loop """owa""" +1029 42 negative_sampler """basic""" +1029 42 evaluator """rankbased""" +1029 43 dataset """kinships""" +1029 43 model """unstructuredmodel""" +1029 43 loss """softplus""" +1029 43 regularizer """no""" +1029 43 optimizer """adam""" +1029 43 training_loop """owa""" +1029 43 negative_sampler """basic""" +1029 43 evaluator """rankbased""" +1029 44 dataset """kinships""" +1029 44 model """unstructuredmodel""" +1029 44 loss """softplus""" +1029 44 regularizer """no""" +1029 44 optimizer """adam""" +1029 44 training_loop """owa""" +1029 44 negative_sampler """basic""" +1029 44 evaluator """rankbased""" +1029 45 dataset """kinships""" +1029 45 model """unstructuredmodel""" +1029 45 loss """softplus""" +1029 45 regularizer """no""" +1029 45 optimizer """adam""" +1029 45 training_loop """owa""" +1029 45 negative_sampler """basic""" +1029 45 evaluator """rankbased""" +1029 46 dataset """kinships""" +1029 46 model """unstructuredmodel""" +1029 46 loss """softplus""" +1029 46 regularizer """no""" +1029 46 optimizer """adam""" +1029 46 training_loop """owa""" +1029 46 negative_sampler """basic""" +1029 46 evaluator """rankbased""" +1029 47 dataset """kinships""" +1029 47 model """unstructuredmodel""" +1029 47 loss """softplus""" +1029 47 regularizer """no""" +1029 47 optimizer """adam""" +1029 47 training_loop """owa""" +1029 47 negative_sampler """basic""" +1029 47 evaluator """rankbased""" +1029 48 dataset """kinships""" +1029 48 model """unstructuredmodel""" +1029 48 loss """softplus""" +1029 48 regularizer """no""" +1029 48 optimizer """adam""" +1029 48 training_loop """owa""" +1029 48 negative_sampler """basic""" +1029 48 evaluator """rankbased""" +1029 49 dataset """kinships""" +1029 49 model """unstructuredmodel""" +1029 49 loss """softplus""" +1029 49 regularizer """no""" +1029 49 optimizer """adam""" +1029 49 training_loop """owa""" +1029 49 negative_sampler """basic""" +1029 49 evaluator """rankbased""" +1029 50 dataset """kinships""" +1029 50 model """unstructuredmodel""" +1029 50 loss """softplus""" +1029 50 regularizer """no""" +1029 50 optimizer """adam""" +1029 50 training_loop """owa""" +1029 50 negative_sampler """basic""" +1029 50 evaluator """rankbased""" +1029 51 dataset """kinships""" +1029 51 model """unstructuredmodel""" +1029 51 loss """softplus""" +1029 51 regularizer """no""" +1029 51 optimizer """adam""" +1029 51 training_loop """owa""" +1029 51 negative_sampler """basic""" +1029 51 evaluator """rankbased""" +1029 52 dataset """kinships""" +1029 52 model """unstructuredmodel""" +1029 52 loss """softplus""" +1029 52 regularizer """no""" +1029 52 optimizer """adam""" +1029 52 training_loop """owa""" +1029 52 negative_sampler """basic""" +1029 52 evaluator """rankbased""" +1029 53 dataset """kinships""" +1029 53 model """unstructuredmodel""" +1029 53 loss """softplus""" +1029 53 regularizer """no""" +1029 53 optimizer """adam""" +1029 53 training_loop """owa""" +1029 53 negative_sampler """basic""" +1029 53 evaluator """rankbased""" +1029 54 dataset """kinships""" +1029 54 model """unstructuredmodel""" +1029 54 loss """softplus""" +1029 54 regularizer """no""" +1029 54 optimizer """adam""" +1029 54 training_loop """owa""" +1029 54 negative_sampler """basic""" +1029 54 evaluator """rankbased""" +1029 55 dataset """kinships""" +1029 55 model """unstructuredmodel""" +1029 55 loss """softplus""" +1029 55 regularizer """no""" +1029 55 optimizer """adam""" +1029 55 training_loop """owa""" +1029 55 negative_sampler """basic""" +1029 55 evaluator """rankbased""" +1029 56 dataset """kinships""" +1029 56 model """unstructuredmodel""" +1029 56 loss """softplus""" +1029 56 regularizer """no""" +1029 56 optimizer """adam""" +1029 56 training_loop """owa""" +1029 56 negative_sampler """basic""" +1029 56 evaluator """rankbased""" +1029 57 dataset """kinships""" +1029 57 model """unstructuredmodel""" +1029 57 loss """softplus""" +1029 57 regularizer """no""" +1029 57 optimizer """adam""" +1029 57 training_loop """owa""" +1029 57 negative_sampler """basic""" +1029 57 evaluator """rankbased""" +1029 58 dataset """kinships""" +1029 58 model """unstructuredmodel""" +1029 58 loss """softplus""" +1029 58 regularizer """no""" +1029 58 optimizer """adam""" +1029 58 training_loop """owa""" +1029 58 negative_sampler """basic""" +1029 58 evaluator """rankbased""" +1029 59 dataset """kinships""" +1029 59 model """unstructuredmodel""" +1029 59 loss """softplus""" +1029 59 regularizer """no""" +1029 59 optimizer """adam""" +1029 59 training_loop """owa""" +1029 59 negative_sampler """basic""" +1029 59 evaluator """rankbased""" +1029 60 dataset """kinships""" +1029 60 model """unstructuredmodel""" +1029 60 loss """softplus""" +1029 60 regularizer """no""" +1029 60 optimizer """adam""" +1029 60 training_loop """owa""" +1029 60 negative_sampler """basic""" +1029 60 evaluator """rankbased""" +1029 61 dataset """kinships""" +1029 61 model """unstructuredmodel""" +1029 61 loss """softplus""" +1029 61 regularizer """no""" +1029 61 optimizer """adam""" +1029 61 training_loop """owa""" +1029 61 negative_sampler """basic""" +1029 61 evaluator """rankbased""" +1029 62 dataset """kinships""" +1029 62 model """unstructuredmodel""" +1029 62 loss """softplus""" +1029 62 regularizer """no""" +1029 62 optimizer """adam""" +1029 62 training_loop """owa""" +1029 62 negative_sampler """basic""" +1029 62 evaluator """rankbased""" +1029 63 dataset """kinships""" +1029 63 model """unstructuredmodel""" +1029 63 loss """softplus""" +1029 63 regularizer """no""" +1029 63 optimizer """adam""" +1029 63 training_loop """owa""" +1029 63 negative_sampler """basic""" +1029 63 evaluator """rankbased""" +1029 64 dataset """kinships""" +1029 64 model """unstructuredmodel""" +1029 64 loss """softplus""" +1029 64 regularizer """no""" +1029 64 optimizer """adam""" +1029 64 training_loop """owa""" +1029 64 negative_sampler """basic""" +1029 64 evaluator """rankbased""" +1029 65 dataset """kinships""" +1029 65 model """unstructuredmodel""" +1029 65 loss """softplus""" +1029 65 regularizer """no""" +1029 65 optimizer """adam""" +1029 65 training_loop """owa""" +1029 65 negative_sampler """basic""" +1029 65 evaluator """rankbased""" +1029 66 dataset """kinships""" +1029 66 model """unstructuredmodel""" +1029 66 loss """softplus""" +1029 66 regularizer """no""" +1029 66 optimizer """adam""" +1029 66 training_loop """owa""" +1029 66 negative_sampler """basic""" +1029 66 evaluator """rankbased""" +1029 67 dataset """kinships""" +1029 67 model """unstructuredmodel""" +1029 67 loss """softplus""" +1029 67 regularizer """no""" +1029 67 optimizer """adam""" +1029 67 training_loop """owa""" +1029 67 negative_sampler """basic""" +1029 67 evaluator """rankbased""" +1029 68 dataset """kinships""" +1029 68 model """unstructuredmodel""" +1029 68 loss """softplus""" +1029 68 regularizer """no""" +1029 68 optimizer """adam""" +1029 68 training_loop """owa""" +1029 68 negative_sampler """basic""" +1029 68 evaluator """rankbased""" +1029 69 dataset """kinships""" +1029 69 model """unstructuredmodel""" +1029 69 loss """softplus""" +1029 69 regularizer """no""" +1029 69 optimizer """adam""" +1029 69 training_loop """owa""" +1029 69 negative_sampler """basic""" +1029 69 evaluator """rankbased""" +1029 70 dataset """kinships""" +1029 70 model """unstructuredmodel""" +1029 70 loss """softplus""" +1029 70 regularizer """no""" +1029 70 optimizer """adam""" +1029 70 training_loop """owa""" +1029 70 negative_sampler """basic""" +1029 70 evaluator """rankbased""" +1029 71 dataset """kinships""" +1029 71 model """unstructuredmodel""" +1029 71 loss """softplus""" +1029 71 regularizer """no""" +1029 71 optimizer """adam""" +1029 71 training_loop """owa""" +1029 71 negative_sampler """basic""" +1029 71 evaluator """rankbased""" +1029 72 dataset """kinships""" +1029 72 model """unstructuredmodel""" +1029 72 loss """softplus""" +1029 72 regularizer """no""" +1029 72 optimizer """adam""" +1029 72 training_loop """owa""" +1029 72 negative_sampler """basic""" +1029 72 evaluator """rankbased""" +1029 73 dataset """kinships""" +1029 73 model """unstructuredmodel""" +1029 73 loss """softplus""" +1029 73 regularizer """no""" +1029 73 optimizer """adam""" +1029 73 training_loop """owa""" +1029 73 negative_sampler """basic""" +1029 73 evaluator """rankbased""" +1029 74 dataset """kinships""" +1029 74 model """unstructuredmodel""" +1029 74 loss """softplus""" +1029 74 regularizer """no""" +1029 74 optimizer """adam""" +1029 74 training_loop """owa""" +1029 74 negative_sampler """basic""" +1029 74 evaluator """rankbased""" +1029 75 dataset """kinships""" +1029 75 model """unstructuredmodel""" +1029 75 loss """softplus""" +1029 75 regularizer """no""" +1029 75 optimizer """adam""" +1029 75 training_loop """owa""" +1029 75 negative_sampler """basic""" +1029 75 evaluator """rankbased""" +1029 76 dataset """kinships""" +1029 76 model """unstructuredmodel""" +1029 76 loss """softplus""" +1029 76 regularizer """no""" +1029 76 optimizer """adam""" +1029 76 training_loop """owa""" +1029 76 negative_sampler """basic""" +1029 76 evaluator """rankbased""" +1029 77 dataset """kinships""" +1029 77 model """unstructuredmodel""" +1029 77 loss """softplus""" +1029 77 regularizer """no""" +1029 77 optimizer """adam""" +1029 77 training_loop """owa""" +1029 77 negative_sampler """basic""" +1029 77 evaluator """rankbased""" +1029 78 dataset """kinships""" +1029 78 model """unstructuredmodel""" +1029 78 loss """softplus""" +1029 78 regularizer """no""" +1029 78 optimizer """adam""" +1029 78 training_loop """owa""" +1029 78 negative_sampler """basic""" +1029 78 evaluator """rankbased""" +1029 79 dataset """kinships""" +1029 79 model """unstructuredmodel""" +1029 79 loss """softplus""" +1029 79 regularizer """no""" +1029 79 optimizer """adam""" +1029 79 training_loop """owa""" +1029 79 negative_sampler """basic""" +1029 79 evaluator """rankbased""" +1029 80 dataset """kinships""" +1029 80 model """unstructuredmodel""" +1029 80 loss """softplus""" +1029 80 regularizer """no""" +1029 80 optimizer """adam""" +1029 80 training_loop """owa""" +1029 80 negative_sampler """basic""" +1029 80 evaluator """rankbased""" +1029 81 dataset """kinships""" +1029 81 model """unstructuredmodel""" +1029 81 loss """softplus""" +1029 81 regularizer """no""" +1029 81 optimizer """adam""" +1029 81 training_loop """owa""" +1029 81 negative_sampler """basic""" +1029 81 evaluator """rankbased""" +1029 82 dataset """kinships""" +1029 82 model """unstructuredmodel""" +1029 82 loss """softplus""" +1029 82 regularizer """no""" +1029 82 optimizer """adam""" +1029 82 training_loop """owa""" +1029 82 negative_sampler """basic""" +1029 82 evaluator """rankbased""" +1029 83 dataset """kinships""" +1029 83 model """unstructuredmodel""" +1029 83 loss """softplus""" +1029 83 regularizer """no""" +1029 83 optimizer """adam""" +1029 83 training_loop """owa""" +1029 83 negative_sampler """basic""" +1029 83 evaluator """rankbased""" +1029 84 dataset """kinships""" +1029 84 model """unstructuredmodel""" +1029 84 loss """softplus""" +1029 84 regularizer """no""" +1029 84 optimizer """adam""" +1029 84 training_loop """owa""" +1029 84 negative_sampler """basic""" +1029 84 evaluator """rankbased""" +1029 85 dataset """kinships""" +1029 85 model """unstructuredmodel""" +1029 85 loss """softplus""" +1029 85 regularizer """no""" +1029 85 optimizer """adam""" +1029 85 training_loop """owa""" +1029 85 negative_sampler """basic""" +1029 85 evaluator """rankbased""" +1029 86 dataset """kinships""" +1029 86 model """unstructuredmodel""" +1029 86 loss """softplus""" +1029 86 regularizer """no""" +1029 86 optimizer """adam""" +1029 86 training_loop """owa""" +1029 86 negative_sampler """basic""" +1029 86 evaluator """rankbased""" +1029 87 dataset """kinships""" +1029 87 model """unstructuredmodel""" +1029 87 loss """softplus""" +1029 87 regularizer """no""" +1029 87 optimizer """adam""" +1029 87 training_loop """owa""" +1029 87 negative_sampler """basic""" +1029 87 evaluator """rankbased""" +1029 88 dataset """kinships""" +1029 88 model """unstructuredmodel""" +1029 88 loss """softplus""" +1029 88 regularizer """no""" +1029 88 optimizer """adam""" +1029 88 training_loop """owa""" +1029 88 negative_sampler """basic""" +1029 88 evaluator """rankbased""" +1029 89 dataset """kinships""" +1029 89 model """unstructuredmodel""" +1029 89 loss """softplus""" +1029 89 regularizer """no""" +1029 89 optimizer """adam""" +1029 89 training_loop """owa""" +1029 89 negative_sampler """basic""" +1029 89 evaluator """rankbased""" +1029 90 dataset """kinships""" +1029 90 model """unstructuredmodel""" +1029 90 loss """softplus""" +1029 90 regularizer """no""" +1029 90 optimizer """adam""" +1029 90 training_loop """owa""" +1029 90 negative_sampler """basic""" +1029 90 evaluator """rankbased""" +1029 91 dataset """kinships""" +1029 91 model """unstructuredmodel""" +1029 91 loss """softplus""" +1029 91 regularizer """no""" +1029 91 optimizer """adam""" +1029 91 training_loop """owa""" +1029 91 negative_sampler """basic""" +1029 91 evaluator """rankbased""" +1029 92 dataset """kinships""" +1029 92 model """unstructuredmodel""" +1029 92 loss """softplus""" +1029 92 regularizer """no""" +1029 92 optimizer """adam""" +1029 92 training_loop """owa""" +1029 92 negative_sampler """basic""" +1029 92 evaluator """rankbased""" +1029 93 dataset """kinships""" +1029 93 model """unstructuredmodel""" +1029 93 loss """softplus""" +1029 93 regularizer """no""" +1029 93 optimizer """adam""" +1029 93 training_loop """owa""" +1029 93 negative_sampler """basic""" +1029 93 evaluator """rankbased""" +1029 94 dataset """kinships""" +1029 94 model """unstructuredmodel""" +1029 94 loss """softplus""" +1029 94 regularizer """no""" +1029 94 optimizer """adam""" +1029 94 training_loop """owa""" +1029 94 negative_sampler """basic""" +1029 94 evaluator """rankbased""" +1029 95 dataset """kinships""" +1029 95 model """unstructuredmodel""" +1029 95 loss """softplus""" +1029 95 regularizer """no""" +1029 95 optimizer """adam""" +1029 95 training_loop """owa""" +1029 95 negative_sampler """basic""" +1029 95 evaluator """rankbased""" +1029 96 dataset """kinships""" +1029 96 model """unstructuredmodel""" +1029 96 loss """softplus""" +1029 96 regularizer """no""" +1029 96 optimizer """adam""" +1029 96 training_loop """owa""" +1029 96 negative_sampler """basic""" +1029 96 evaluator """rankbased""" +1029 97 dataset """kinships""" +1029 97 model """unstructuredmodel""" +1029 97 loss """softplus""" +1029 97 regularizer """no""" +1029 97 optimizer """adam""" +1029 97 training_loop """owa""" +1029 97 negative_sampler """basic""" +1029 97 evaluator """rankbased""" +1029 98 dataset """kinships""" +1029 98 model """unstructuredmodel""" +1029 98 loss """softplus""" +1029 98 regularizer """no""" +1029 98 optimizer """adam""" +1029 98 training_loop """owa""" +1029 98 negative_sampler """basic""" +1029 98 evaluator """rankbased""" +1029 99 dataset """kinships""" +1029 99 model """unstructuredmodel""" +1029 99 loss """softplus""" +1029 99 regularizer """no""" +1029 99 optimizer """adam""" +1029 99 training_loop """owa""" +1029 99 negative_sampler """basic""" +1029 99 evaluator """rankbased""" +1029 100 dataset """kinships""" +1029 100 model """unstructuredmodel""" +1029 100 loss """softplus""" +1029 100 regularizer """no""" +1029 100 optimizer """adam""" +1029 100 training_loop """owa""" +1029 100 negative_sampler """basic""" +1029 100 evaluator """rankbased""" +1030 1 model.embedding_dim 0.0 +1030 1 model.scoring_fct_norm 2.0 +1030 1 loss.margin 3.294723076802766 +1030 1 loss.adversarial_temperature 0.7912205835423586 +1030 1 optimizer.lr 0.0011438700776511873 +1030 1 negative_sampler.num_negs_per_pos 3.0 +1030 1 training.batch_size 1.0 +1030 2 model.embedding_dim 2.0 +1030 2 model.scoring_fct_norm 2.0 +1030 2 loss.margin 17.592842658178387 +1030 2 loss.adversarial_temperature 0.5002859391879184 +1030 2 optimizer.lr 0.06096552610404024 +1030 2 negative_sampler.num_negs_per_pos 69.0 +1030 2 training.batch_size 0.0 +1030 3 model.embedding_dim 1.0 +1030 3 model.scoring_fct_norm 1.0 +1030 3 loss.margin 29.926144451353082 +1030 3 loss.adversarial_temperature 0.4147884660928426 +1030 3 optimizer.lr 0.002835124063851821 +1030 3 negative_sampler.num_negs_per_pos 84.0 +1030 3 training.batch_size 0.0 +1030 4 model.embedding_dim 2.0 +1030 4 model.scoring_fct_norm 1.0 +1030 4 loss.margin 5.022395059187289 +1030 4 loss.adversarial_temperature 0.2663833578945086 +1030 4 optimizer.lr 0.007271182166696507 +1030 4 negative_sampler.num_negs_per_pos 1.0 +1030 4 training.batch_size 2.0 +1030 5 model.embedding_dim 0.0 +1030 5 model.scoring_fct_norm 1.0 +1030 5 loss.margin 6.816902866224374 +1030 5 loss.adversarial_temperature 0.6577057762946252 +1030 5 optimizer.lr 0.0012004490858344 +1030 5 negative_sampler.num_negs_per_pos 17.0 +1030 5 training.batch_size 0.0 +1030 6 model.embedding_dim 0.0 +1030 6 model.scoring_fct_norm 2.0 +1030 6 loss.margin 16.172001267477068 +1030 6 loss.adversarial_temperature 0.7606653543013495 +1030 6 optimizer.lr 0.03423162993820374 +1030 6 negative_sampler.num_negs_per_pos 30.0 +1030 6 training.batch_size 1.0 +1030 7 model.embedding_dim 0.0 +1030 7 model.scoring_fct_norm 2.0 +1030 7 loss.margin 17.489756868013288 +1030 7 loss.adversarial_temperature 0.941724136406145 +1030 7 optimizer.lr 0.02786855529681356 +1030 7 negative_sampler.num_negs_per_pos 57.0 +1030 7 training.batch_size 2.0 +1030 8 model.embedding_dim 1.0 +1030 8 model.scoring_fct_norm 1.0 +1030 8 loss.margin 6.084802607972188 +1030 8 loss.adversarial_temperature 0.9674375543761503 +1030 8 optimizer.lr 0.018719331067177734 +1030 8 negative_sampler.num_negs_per_pos 39.0 +1030 8 training.batch_size 2.0 +1030 9 model.embedding_dim 0.0 +1030 9 model.scoring_fct_norm 2.0 +1030 9 loss.margin 17.41489536440171 +1030 9 loss.adversarial_temperature 0.453821766761377 +1030 9 optimizer.lr 0.03063972395215961 +1030 9 negative_sampler.num_negs_per_pos 57.0 +1030 9 training.batch_size 2.0 +1030 10 model.embedding_dim 0.0 +1030 10 model.scoring_fct_norm 2.0 +1030 10 loss.margin 12.867012506567262 +1030 10 loss.adversarial_temperature 0.36470238909866115 +1030 10 optimizer.lr 0.0011599675234420822 +1030 10 negative_sampler.num_negs_per_pos 59.0 +1030 10 training.batch_size 1.0 +1030 11 model.embedding_dim 1.0 +1030 11 model.scoring_fct_norm 1.0 +1030 11 loss.margin 6.95751980444518 +1030 11 loss.adversarial_temperature 0.5850016126821286 +1030 11 optimizer.lr 0.07474738380967176 +1030 11 negative_sampler.num_negs_per_pos 11.0 +1030 11 training.batch_size 2.0 +1030 12 model.embedding_dim 0.0 +1030 12 model.scoring_fct_norm 2.0 +1030 12 loss.margin 17.563815249220987 +1030 12 loss.adversarial_temperature 0.5063050283069483 +1030 12 optimizer.lr 0.004408299014378173 +1030 12 negative_sampler.num_negs_per_pos 3.0 +1030 12 training.batch_size 0.0 +1030 13 model.embedding_dim 1.0 +1030 13 model.scoring_fct_norm 2.0 +1030 13 loss.margin 26.93691752566969 +1030 13 loss.adversarial_temperature 0.9938017050893516 +1030 13 optimizer.lr 0.023795360556607286 +1030 13 negative_sampler.num_negs_per_pos 81.0 +1030 13 training.batch_size 0.0 +1030 14 model.embedding_dim 1.0 +1030 14 model.scoring_fct_norm 1.0 +1030 14 loss.margin 10.14106855476347 +1030 14 loss.adversarial_temperature 0.2122277986076993 +1030 14 optimizer.lr 0.0011892191321288559 +1030 14 negative_sampler.num_negs_per_pos 69.0 +1030 14 training.batch_size 2.0 +1030 15 model.embedding_dim 0.0 +1030 15 model.scoring_fct_norm 2.0 +1030 15 loss.margin 7.574121261085147 +1030 15 loss.adversarial_temperature 0.35459559457093326 +1030 15 optimizer.lr 0.00877723126786267 +1030 15 negative_sampler.num_negs_per_pos 91.0 +1030 15 training.batch_size 0.0 +1030 16 model.embedding_dim 1.0 +1030 16 model.scoring_fct_norm 1.0 +1030 16 loss.margin 13.785442700180026 +1030 16 loss.adversarial_temperature 0.27621205303443647 +1030 16 optimizer.lr 0.03983959771155975 +1030 16 negative_sampler.num_negs_per_pos 9.0 +1030 16 training.batch_size 2.0 +1030 17 model.embedding_dim 0.0 +1030 17 model.scoring_fct_norm 1.0 +1030 17 loss.margin 19.15641719755949 +1030 17 loss.adversarial_temperature 0.5870309527302084 +1030 17 optimizer.lr 0.008259467796675224 +1030 17 negative_sampler.num_negs_per_pos 35.0 +1030 17 training.batch_size 0.0 +1030 18 model.embedding_dim 0.0 +1030 18 model.scoring_fct_norm 2.0 +1030 18 loss.margin 14.438383708062368 +1030 18 loss.adversarial_temperature 0.5531207873162391 +1030 18 optimizer.lr 0.0031284702976632365 +1030 18 negative_sampler.num_negs_per_pos 77.0 +1030 18 training.batch_size 0.0 +1030 19 model.embedding_dim 2.0 +1030 19 model.scoring_fct_norm 2.0 +1030 19 loss.margin 13.941855452024404 +1030 19 loss.adversarial_temperature 0.23217193382284212 +1030 19 optimizer.lr 0.0010144248090458992 +1030 19 negative_sampler.num_negs_per_pos 65.0 +1030 19 training.batch_size 1.0 +1030 20 model.embedding_dim 2.0 +1030 20 model.scoring_fct_norm 2.0 +1030 20 loss.margin 23.177953023910387 +1030 20 loss.adversarial_temperature 0.4047175624926125 +1030 20 optimizer.lr 0.0010719817100164402 +1030 20 negative_sampler.num_negs_per_pos 90.0 +1030 20 training.batch_size 2.0 +1030 21 model.embedding_dim 1.0 +1030 21 model.scoring_fct_norm 1.0 +1030 21 loss.margin 8.76183453714225 +1030 21 loss.adversarial_temperature 0.5696222213580826 +1030 21 optimizer.lr 0.033346773504593086 +1030 21 negative_sampler.num_negs_per_pos 38.0 +1030 21 training.batch_size 2.0 +1030 22 model.embedding_dim 1.0 +1030 22 model.scoring_fct_norm 1.0 +1030 22 loss.margin 29.87447812972061 +1030 22 loss.adversarial_temperature 0.45710192981677567 +1030 22 optimizer.lr 0.001985648404641135 +1030 22 negative_sampler.num_negs_per_pos 30.0 +1030 22 training.batch_size 1.0 +1030 23 model.embedding_dim 1.0 +1030 23 model.scoring_fct_norm 2.0 +1030 23 loss.margin 11.338022763369382 +1030 23 loss.adversarial_temperature 0.299719389883105 +1030 23 optimizer.lr 0.03582980965573986 +1030 23 negative_sampler.num_negs_per_pos 96.0 +1030 23 training.batch_size 2.0 +1030 24 model.embedding_dim 0.0 +1030 24 model.scoring_fct_norm 1.0 +1030 24 loss.margin 5.414549903404392 +1030 24 loss.adversarial_temperature 0.8063406482961182 +1030 24 optimizer.lr 0.004468067159399164 +1030 24 negative_sampler.num_negs_per_pos 76.0 +1030 24 training.batch_size 0.0 +1030 25 model.embedding_dim 2.0 +1030 25 model.scoring_fct_norm 2.0 +1030 25 loss.margin 22.32714992939112 +1030 25 loss.adversarial_temperature 0.3441834194138638 +1030 25 optimizer.lr 0.0015920176510681753 +1030 25 negative_sampler.num_negs_per_pos 61.0 +1030 25 training.batch_size 1.0 +1030 26 model.embedding_dim 2.0 +1030 26 model.scoring_fct_norm 1.0 +1030 26 loss.margin 15.600419787236332 +1030 26 loss.adversarial_temperature 0.18564661647758174 +1030 26 optimizer.lr 0.0044984044059977885 +1030 26 negative_sampler.num_negs_per_pos 59.0 +1030 26 training.batch_size 0.0 +1030 27 model.embedding_dim 2.0 +1030 27 model.scoring_fct_norm 2.0 +1030 27 loss.margin 8.321127136766547 +1030 27 loss.adversarial_temperature 0.4446179657131296 +1030 27 optimizer.lr 0.0017109058460744655 +1030 27 negative_sampler.num_negs_per_pos 58.0 +1030 27 training.batch_size 0.0 +1030 28 model.embedding_dim 0.0 +1030 28 model.scoring_fct_norm 2.0 +1030 28 loss.margin 11.25413187777764 +1030 28 loss.adversarial_temperature 0.81032974405264 +1030 28 optimizer.lr 0.0611856794889141 +1030 28 negative_sampler.num_negs_per_pos 88.0 +1030 28 training.batch_size 0.0 +1030 29 model.embedding_dim 0.0 +1030 29 model.scoring_fct_norm 2.0 +1030 29 loss.margin 12.2550091907336 +1030 29 loss.adversarial_temperature 0.6990983007030525 +1030 29 optimizer.lr 0.002659087040891865 +1030 29 negative_sampler.num_negs_per_pos 80.0 +1030 29 training.batch_size 2.0 +1030 30 model.embedding_dim 2.0 +1030 30 model.scoring_fct_norm 2.0 +1030 30 loss.margin 27.710843901959908 +1030 30 loss.adversarial_temperature 0.45647040066387434 +1030 30 optimizer.lr 0.05713093782635319 +1030 30 negative_sampler.num_negs_per_pos 82.0 +1030 30 training.batch_size 2.0 +1030 31 model.embedding_dim 2.0 +1030 31 model.scoring_fct_norm 2.0 +1030 31 loss.margin 25.576221285339464 +1030 31 loss.adversarial_temperature 0.5200579216679598 +1030 31 optimizer.lr 0.006885630724567766 +1030 31 negative_sampler.num_negs_per_pos 32.0 +1030 31 training.batch_size 0.0 +1030 32 model.embedding_dim 2.0 +1030 32 model.scoring_fct_norm 2.0 +1030 32 loss.margin 14.005954778937298 +1030 32 loss.adversarial_temperature 0.31087295060479986 +1030 32 optimizer.lr 0.0011778128313858231 +1030 32 negative_sampler.num_negs_per_pos 98.0 +1030 32 training.batch_size 2.0 +1030 33 model.embedding_dim 2.0 +1030 33 model.scoring_fct_norm 1.0 +1030 33 loss.margin 4.3872254460515165 +1030 33 loss.adversarial_temperature 0.42944562662675245 +1030 33 optimizer.lr 0.051116061625977545 +1030 33 negative_sampler.num_negs_per_pos 84.0 +1030 33 training.batch_size 0.0 +1030 34 model.embedding_dim 2.0 +1030 34 model.scoring_fct_norm 2.0 +1030 34 loss.margin 21.69757558795355 +1030 34 loss.adversarial_temperature 0.44654899674265314 +1030 34 optimizer.lr 0.0042059098569800215 +1030 34 negative_sampler.num_negs_per_pos 56.0 +1030 34 training.batch_size 2.0 +1030 35 model.embedding_dim 2.0 +1030 35 model.scoring_fct_norm 2.0 +1030 35 loss.margin 9.533675214074107 +1030 35 loss.adversarial_temperature 0.34345024993853035 +1030 35 optimizer.lr 0.0015462696294786314 +1030 35 negative_sampler.num_negs_per_pos 77.0 +1030 35 training.batch_size 1.0 +1030 36 model.embedding_dim 0.0 +1030 36 model.scoring_fct_norm 2.0 +1030 36 loss.margin 1.9188622788396559 +1030 36 loss.adversarial_temperature 0.24389707875743113 +1030 36 optimizer.lr 0.0026585349063658844 +1030 36 negative_sampler.num_negs_per_pos 45.0 +1030 36 training.batch_size 0.0 +1030 37 model.embedding_dim 1.0 +1030 37 model.scoring_fct_norm 2.0 +1030 37 loss.margin 7.543793670630268 +1030 37 loss.adversarial_temperature 0.5893171622545734 +1030 37 optimizer.lr 0.0016207085873118999 +1030 37 negative_sampler.num_negs_per_pos 6.0 +1030 37 training.batch_size 2.0 +1030 38 model.embedding_dim 0.0 +1030 38 model.scoring_fct_norm 2.0 +1030 38 loss.margin 19.005401415119074 +1030 38 loss.adversarial_temperature 0.7892743477749702 +1030 38 optimizer.lr 0.05392489188033557 +1030 38 negative_sampler.num_negs_per_pos 51.0 +1030 38 training.batch_size 2.0 +1030 39 model.embedding_dim 1.0 +1030 39 model.scoring_fct_norm 1.0 +1030 39 loss.margin 9.514711980219346 +1030 39 loss.adversarial_temperature 0.5558104981623038 +1030 39 optimizer.lr 0.08232959271709892 +1030 39 negative_sampler.num_negs_per_pos 4.0 +1030 39 training.batch_size 1.0 +1030 40 model.embedding_dim 1.0 +1030 40 model.scoring_fct_norm 2.0 +1030 40 loss.margin 18.957043437589242 +1030 40 loss.adversarial_temperature 0.3356561188772998 +1030 40 optimizer.lr 0.0055548350911092444 +1030 40 negative_sampler.num_negs_per_pos 2.0 +1030 40 training.batch_size 2.0 +1030 41 model.embedding_dim 2.0 +1030 41 model.scoring_fct_norm 1.0 +1030 41 loss.margin 10.03289314037857 +1030 41 loss.adversarial_temperature 0.9833689060449972 +1030 41 optimizer.lr 0.06529193222306484 +1030 41 negative_sampler.num_negs_per_pos 81.0 +1030 41 training.batch_size 2.0 +1030 42 model.embedding_dim 1.0 +1030 42 model.scoring_fct_norm 1.0 +1030 42 loss.margin 17.139503897272903 +1030 42 loss.adversarial_temperature 0.6566523115043715 +1030 42 optimizer.lr 0.004685114668936127 +1030 42 negative_sampler.num_negs_per_pos 86.0 +1030 42 training.batch_size 1.0 +1030 43 model.embedding_dim 1.0 +1030 43 model.scoring_fct_norm 1.0 +1030 43 loss.margin 12.627761185775606 +1030 43 loss.adversarial_temperature 0.774722382476042 +1030 43 optimizer.lr 0.07083620415179001 +1030 43 negative_sampler.num_negs_per_pos 51.0 +1030 43 training.batch_size 2.0 +1030 44 model.embedding_dim 0.0 +1030 44 model.scoring_fct_norm 1.0 +1030 44 loss.margin 3.9764019406717237 +1030 44 loss.adversarial_temperature 0.6518218532998123 +1030 44 optimizer.lr 0.008802008267979845 +1030 44 negative_sampler.num_negs_per_pos 80.0 +1030 44 training.batch_size 0.0 +1030 45 model.embedding_dim 2.0 +1030 45 model.scoring_fct_norm 2.0 +1030 45 loss.margin 25.19408634991426 +1030 45 loss.adversarial_temperature 0.1784790722795378 +1030 45 optimizer.lr 0.0016178271994568107 +1030 45 negative_sampler.num_negs_per_pos 44.0 +1030 45 training.batch_size 0.0 +1030 46 model.embedding_dim 0.0 +1030 46 model.scoring_fct_norm 2.0 +1030 46 loss.margin 2.4668540634059015 +1030 46 loss.adversarial_temperature 0.976839531966793 +1030 46 optimizer.lr 0.003214725167278094 +1030 46 negative_sampler.num_negs_per_pos 70.0 +1030 46 training.batch_size 1.0 +1030 47 model.embedding_dim 0.0 +1030 47 model.scoring_fct_norm 1.0 +1030 47 loss.margin 9.234021144116753 +1030 47 loss.adversarial_temperature 0.8515810799569411 +1030 47 optimizer.lr 0.0016748181018268419 +1030 47 negative_sampler.num_negs_per_pos 83.0 +1030 47 training.batch_size 2.0 +1030 48 model.embedding_dim 1.0 +1030 48 model.scoring_fct_norm 1.0 +1030 48 loss.margin 6.681573959797283 +1030 48 loss.adversarial_temperature 0.8010272108537352 +1030 48 optimizer.lr 0.035035091458711635 +1030 48 negative_sampler.num_negs_per_pos 23.0 +1030 48 training.batch_size 0.0 +1030 49 model.embedding_dim 2.0 +1030 49 model.scoring_fct_norm 1.0 +1030 49 loss.margin 7.298740769096585 +1030 49 loss.adversarial_temperature 0.37948152700673765 +1030 49 optimizer.lr 0.010355964002544477 +1030 49 negative_sampler.num_negs_per_pos 30.0 +1030 49 training.batch_size 1.0 +1030 50 model.embedding_dim 0.0 +1030 50 model.scoring_fct_norm 1.0 +1030 50 loss.margin 4.844298826282854 +1030 50 loss.adversarial_temperature 0.6712437182390656 +1030 50 optimizer.lr 0.0030041127308083897 +1030 50 negative_sampler.num_negs_per_pos 31.0 +1030 50 training.batch_size 0.0 +1030 51 model.embedding_dim 2.0 +1030 51 model.scoring_fct_norm 1.0 +1030 51 loss.margin 21.00402976376066 +1030 51 loss.adversarial_temperature 0.9088180476677622 +1030 51 optimizer.lr 0.0031288838328209865 +1030 51 negative_sampler.num_negs_per_pos 68.0 +1030 51 training.batch_size 2.0 +1030 52 model.embedding_dim 2.0 +1030 52 model.scoring_fct_norm 2.0 +1030 52 loss.margin 18.016893493549784 +1030 52 loss.adversarial_temperature 0.3217107804923353 +1030 52 optimizer.lr 0.020377501728434225 +1030 52 negative_sampler.num_negs_per_pos 78.0 +1030 52 training.batch_size 2.0 +1030 53 model.embedding_dim 2.0 +1030 53 model.scoring_fct_norm 2.0 +1030 53 loss.margin 7.166702975797549 +1030 53 loss.adversarial_temperature 0.8786621107691113 +1030 53 optimizer.lr 0.006260666075911539 +1030 53 negative_sampler.num_negs_per_pos 5.0 +1030 53 training.batch_size 2.0 +1030 54 model.embedding_dim 1.0 +1030 54 model.scoring_fct_norm 2.0 +1030 54 loss.margin 17.20361886324555 +1030 54 loss.adversarial_temperature 0.8424741482096652 +1030 54 optimizer.lr 0.04685156092063624 +1030 54 negative_sampler.num_negs_per_pos 69.0 +1030 54 training.batch_size 2.0 +1030 55 model.embedding_dim 0.0 +1030 55 model.scoring_fct_norm 1.0 +1030 55 loss.margin 24.958685258444632 +1030 55 loss.adversarial_temperature 0.9134804966347391 +1030 55 optimizer.lr 0.08250577975514894 +1030 55 negative_sampler.num_negs_per_pos 39.0 +1030 55 training.batch_size 1.0 +1030 56 model.embedding_dim 1.0 +1030 56 model.scoring_fct_norm 2.0 +1030 56 loss.margin 22.135219402002054 +1030 56 loss.adversarial_temperature 0.8903475161171079 +1030 56 optimizer.lr 0.0032083424407010515 +1030 56 negative_sampler.num_negs_per_pos 98.0 +1030 56 training.batch_size 0.0 +1030 57 model.embedding_dim 2.0 +1030 57 model.scoring_fct_norm 1.0 +1030 57 loss.margin 9.9856222366938 +1030 57 loss.adversarial_temperature 0.31838023053524533 +1030 57 optimizer.lr 0.008197682026797047 +1030 57 negative_sampler.num_negs_per_pos 5.0 +1030 57 training.batch_size 2.0 +1030 58 model.embedding_dim 1.0 +1030 58 model.scoring_fct_norm 2.0 +1030 58 loss.margin 10.297216616527036 +1030 58 loss.adversarial_temperature 0.8115119377468678 +1030 58 optimizer.lr 0.006955092465033309 +1030 58 negative_sampler.num_negs_per_pos 66.0 +1030 58 training.batch_size 2.0 +1030 59 model.embedding_dim 1.0 +1030 59 model.scoring_fct_norm 1.0 +1030 59 loss.margin 22.51420470834569 +1030 59 loss.adversarial_temperature 0.5342929954701952 +1030 59 optimizer.lr 0.028825111686021643 +1030 59 negative_sampler.num_negs_per_pos 57.0 +1030 59 training.batch_size 2.0 +1030 60 model.embedding_dim 2.0 +1030 60 model.scoring_fct_norm 2.0 +1030 60 loss.margin 11.084176592724026 +1030 60 loss.adversarial_temperature 0.5598808518235229 +1030 60 optimizer.lr 0.003011427382523611 +1030 60 negative_sampler.num_negs_per_pos 94.0 +1030 60 training.batch_size 1.0 +1030 61 model.embedding_dim 1.0 +1030 61 model.scoring_fct_norm 1.0 +1030 61 loss.margin 27.13705430353071 +1030 61 loss.adversarial_temperature 0.4204410263774049 +1030 61 optimizer.lr 0.0013908046236250683 +1030 61 negative_sampler.num_negs_per_pos 95.0 +1030 61 training.batch_size 2.0 +1030 62 model.embedding_dim 0.0 +1030 62 model.scoring_fct_norm 1.0 +1030 62 loss.margin 6.876284539183939 +1030 62 loss.adversarial_temperature 0.4635156868267096 +1030 62 optimizer.lr 0.004527720293306292 +1030 62 negative_sampler.num_negs_per_pos 76.0 +1030 62 training.batch_size 0.0 +1030 63 model.embedding_dim 1.0 +1030 63 model.scoring_fct_norm 1.0 +1030 63 loss.margin 3.5239092763147712 +1030 63 loss.adversarial_temperature 0.8209629900855124 +1030 63 optimizer.lr 0.0956164516299966 +1030 63 negative_sampler.num_negs_per_pos 4.0 +1030 63 training.batch_size 1.0 +1030 64 model.embedding_dim 1.0 +1030 64 model.scoring_fct_norm 2.0 +1030 64 loss.margin 13.854437291477478 +1030 64 loss.adversarial_temperature 0.2687405012069073 +1030 64 optimizer.lr 0.0025175011031039916 +1030 64 negative_sampler.num_negs_per_pos 19.0 +1030 64 training.batch_size 1.0 +1030 65 model.embedding_dim 1.0 +1030 65 model.scoring_fct_norm 1.0 +1030 65 loss.margin 25.411466742493456 +1030 65 loss.adversarial_temperature 0.8932802974890949 +1030 65 optimizer.lr 0.03377043814383318 +1030 65 negative_sampler.num_negs_per_pos 83.0 +1030 65 training.batch_size 1.0 +1030 66 model.embedding_dim 0.0 +1030 66 model.scoring_fct_norm 1.0 +1030 66 loss.margin 1.6197500257734108 +1030 66 loss.adversarial_temperature 0.16705195559510647 +1030 66 optimizer.lr 0.04246004962892114 +1030 66 negative_sampler.num_negs_per_pos 93.0 +1030 66 training.batch_size 0.0 +1030 67 model.embedding_dim 0.0 +1030 67 model.scoring_fct_norm 1.0 +1030 67 loss.margin 1.6547983189343485 +1030 67 loss.adversarial_temperature 0.6208928469543964 +1030 67 optimizer.lr 0.029508355035969148 +1030 67 negative_sampler.num_negs_per_pos 47.0 +1030 67 training.batch_size 2.0 +1030 68 model.embedding_dim 1.0 +1030 68 model.scoring_fct_norm 1.0 +1030 68 loss.margin 13.761183390275535 +1030 68 loss.adversarial_temperature 0.8478777721038161 +1030 68 optimizer.lr 0.005292428168034044 +1030 68 negative_sampler.num_negs_per_pos 60.0 +1030 68 training.batch_size 1.0 +1030 69 model.embedding_dim 0.0 +1030 69 model.scoring_fct_norm 1.0 +1030 69 loss.margin 24.837132945461317 +1030 69 loss.adversarial_temperature 0.30493356775814673 +1030 69 optimizer.lr 0.0011889026679911543 +1030 69 negative_sampler.num_negs_per_pos 88.0 +1030 69 training.batch_size 0.0 +1030 70 model.embedding_dim 2.0 +1030 70 model.scoring_fct_norm 1.0 +1030 70 loss.margin 3.8710567327640057 +1030 70 loss.adversarial_temperature 0.9966865946193544 +1030 70 optimizer.lr 0.046679219003421944 +1030 70 negative_sampler.num_negs_per_pos 66.0 +1030 70 training.batch_size 2.0 +1030 71 model.embedding_dim 0.0 +1030 71 model.scoring_fct_norm 2.0 +1030 71 loss.margin 13.44776900911005 +1030 71 loss.adversarial_temperature 0.3189268094726653 +1030 71 optimizer.lr 0.0024197227555543973 +1030 71 negative_sampler.num_negs_per_pos 94.0 +1030 71 training.batch_size 0.0 +1030 72 model.embedding_dim 0.0 +1030 72 model.scoring_fct_norm 1.0 +1030 72 loss.margin 15.26832409847278 +1030 72 loss.adversarial_temperature 0.8567919760151212 +1030 72 optimizer.lr 0.0012136865864574168 +1030 72 negative_sampler.num_negs_per_pos 25.0 +1030 72 training.batch_size 2.0 +1030 73 model.embedding_dim 1.0 +1030 73 model.scoring_fct_norm 1.0 +1030 73 loss.margin 7.423585672210076 +1030 73 loss.adversarial_temperature 0.7633121851462541 +1030 73 optimizer.lr 0.007776359064862102 +1030 73 negative_sampler.num_negs_per_pos 68.0 +1030 73 training.batch_size 1.0 +1030 74 model.embedding_dim 1.0 +1030 74 model.scoring_fct_norm 1.0 +1030 74 loss.margin 15.021215987307697 +1030 74 loss.adversarial_temperature 0.3431011503439007 +1030 74 optimizer.lr 0.0028326934817039283 +1030 74 negative_sampler.num_negs_per_pos 18.0 +1030 74 training.batch_size 0.0 +1030 75 model.embedding_dim 1.0 +1030 75 model.scoring_fct_norm 2.0 +1030 75 loss.margin 1.3019945765621586 +1030 75 loss.adversarial_temperature 0.14632160949040585 +1030 75 optimizer.lr 0.012244245930827352 +1030 75 negative_sampler.num_negs_per_pos 38.0 +1030 75 training.batch_size 2.0 +1030 76 model.embedding_dim 2.0 +1030 76 model.scoring_fct_norm 1.0 +1030 76 loss.margin 7.851689002589174 +1030 76 loss.adversarial_temperature 0.9499475564853426 +1030 76 optimizer.lr 0.026705087144267316 +1030 76 negative_sampler.num_negs_per_pos 18.0 +1030 76 training.batch_size 1.0 +1030 77 model.embedding_dim 2.0 +1030 77 model.scoring_fct_norm 2.0 +1030 77 loss.margin 20.17830203855376 +1030 77 loss.adversarial_temperature 0.9910918023798947 +1030 77 optimizer.lr 0.002233501953445512 +1030 77 negative_sampler.num_negs_per_pos 23.0 +1030 77 training.batch_size 0.0 +1030 78 model.embedding_dim 2.0 +1030 78 model.scoring_fct_norm 1.0 +1030 78 loss.margin 6.382616662109592 +1030 78 loss.adversarial_temperature 0.783222488676871 +1030 78 optimizer.lr 0.003925311812505736 +1030 78 negative_sampler.num_negs_per_pos 88.0 +1030 78 training.batch_size 0.0 +1030 79 model.embedding_dim 0.0 +1030 79 model.scoring_fct_norm 2.0 +1030 79 loss.margin 14.593864210409356 +1030 79 loss.adversarial_temperature 0.1428280875184673 +1030 79 optimizer.lr 0.003962580666729313 +1030 79 negative_sampler.num_negs_per_pos 85.0 +1030 79 training.batch_size 0.0 +1030 80 model.embedding_dim 1.0 +1030 80 model.scoring_fct_norm 2.0 +1030 80 loss.margin 12.754904430408825 +1030 80 loss.adversarial_temperature 0.6935970507773573 +1030 80 optimizer.lr 0.049974981662445545 +1030 80 negative_sampler.num_negs_per_pos 83.0 +1030 80 training.batch_size 0.0 +1030 81 model.embedding_dim 2.0 +1030 81 model.scoring_fct_norm 1.0 +1030 81 loss.margin 25.655055111190674 +1030 81 loss.adversarial_temperature 0.8851792550304671 +1030 81 optimizer.lr 0.012347531422955698 +1030 81 negative_sampler.num_negs_per_pos 1.0 +1030 81 training.batch_size 2.0 +1030 82 model.embedding_dim 2.0 +1030 82 model.scoring_fct_norm 2.0 +1030 82 loss.margin 7.7490364406393 +1030 82 loss.adversarial_temperature 0.6688164955948903 +1030 82 optimizer.lr 0.006502440648778073 +1030 82 negative_sampler.num_negs_per_pos 57.0 +1030 82 training.batch_size 1.0 +1030 83 model.embedding_dim 1.0 +1030 83 model.scoring_fct_norm 1.0 +1030 83 loss.margin 1.6523757106169685 +1030 83 loss.adversarial_temperature 0.8733023645932446 +1030 83 optimizer.lr 0.05812544912374368 +1030 83 negative_sampler.num_negs_per_pos 99.0 +1030 83 training.batch_size 1.0 +1030 84 model.embedding_dim 0.0 +1030 84 model.scoring_fct_norm 2.0 +1030 84 loss.margin 12.1360485702477 +1030 84 loss.adversarial_temperature 0.3641261275929095 +1030 84 optimizer.lr 0.0012171171647379318 +1030 84 negative_sampler.num_negs_per_pos 74.0 +1030 84 training.batch_size 1.0 +1030 85 model.embedding_dim 1.0 +1030 85 model.scoring_fct_norm 2.0 +1030 85 loss.margin 13.030322172427093 +1030 85 loss.adversarial_temperature 0.9805236169790751 +1030 85 optimizer.lr 0.00466538016400583 +1030 85 negative_sampler.num_negs_per_pos 81.0 +1030 85 training.batch_size 2.0 +1030 86 model.embedding_dim 2.0 +1030 86 model.scoring_fct_norm 2.0 +1030 86 loss.margin 10.521216769928017 +1030 86 loss.adversarial_temperature 0.22375578218000175 +1030 86 optimizer.lr 0.0010700999714017554 +1030 86 negative_sampler.num_negs_per_pos 53.0 +1030 86 training.batch_size 2.0 +1030 87 model.embedding_dim 1.0 +1030 87 model.scoring_fct_norm 2.0 +1030 87 loss.margin 9.063729249355454 +1030 87 loss.adversarial_temperature 0.6707964922673381 +1030 87 optimizer.lr 0.0323233974760415 +1030 87 negative_sampler.num_negs_per_pos 75.0 +1030 87 training.batch_size 1.0 +1030 88 model.embedding_dim 0.0 +1030 88 model.scoring_fct_norm 1.0 +1030 88 loss.margin 22.239031953885394 +1030 88 loss.adversarial_temperature 0.6024322726625602 +1030 88 optimizer.lr 0.014070019122097146 +1030 88 negative_sampler.num_negs_per_pos 47.0 +1030 88 training.batch_size 2.0 +1030 89 model.embedding_dim 0.0 +1030 89 model.scoring_fct_norm 2.0 +1030 89 loss.margin 20.483012104807027 +1030 89 loss.adversarial_temperature 0.5920493321049255 +1030 89 optimizer.lr 0.008093569082757596 +1030 89 negative_sampler.num_negs_per_pos 52.0 +1030 89 training.batch_size 1.0 +1030 90 model.embedding_dim 1.0 +1030 90 model.scoring_fct_norm 1.0 +1030 90 loss.margin 2.8841318944552885 +1030 90 loss.adversarial_temperature 0.8046989801589681 +1030 90 optimizer.lr 0.001954077063695611 +1030 90 negative_sampler.num_negs_per_pos 21.0 +1030 90 training.batch_size 1.0 +1030 91 model.embedding_dim 1.0 +1030 91 model.scoring_fct_norm 2.0 +1030 91 loss.margin 23.200149034558454 +1030 91 loss.adversarial_temperature 0.4042547022423921 +1030 91 optimizer.lr 0.0027685171272497974 +1030 91 negative_sampler.num_negs_per_pos 89.0 +1030 91 training.batch_size 0.0 +1030 92 model.embedding_dim 1.0 +1030 92 model.scoring_fct_norm 2.0 +1030 92 loss.margin 9.094709824088175 +1030 92 loss.adversarial_temperature 0.913788608500367 +1030 92 optimizer.lr 0.017511890577434255 +1030 92 negative_sampler.num_negs_per_pos 36.0 +1030 92 training.batch_size 0.0 +1030 93 model.embedding_dim 2.0 +1030 93 model.scoring_fct_norm 1.0 +1030 93 loss.margin 8.24854794820358 +1030 93 loss.adversarial_temperature 0.13163824078464867 +1030 93 optimizer.lr 0.003157451540590361 +1030 93 negative_sampler.num_negs_per_pos 1.0 +1030 93 training.batch_size 1.0 +1030 94 model.embedding_dim 2.0 +1030 94 model.scoring_fct_norm 2.0 +1030 94 loss.margin 5.483521359743552 +1030 94 loss.adversarial_temperature 0.9308168280299304 +1030 94 optimizer.lr 0.07370732332513394 +1030 94 negative_sampler.num_negs_per_pos 87.0 +1030 94 training.batch_size 0.0 +1030 95 model.embedding_dim 0.0 +1030 95 model.scoring_fct_norm 2.0 +1030 95 loss.margin 28.668094094084882 +1030 95 loss.adversarial_temperature 0.2173588078566835 +1030 95 optimizer.lr 0.014137801544924434 +1030 95 negative_sampler.num_negs_per_pos 81.0 +1030 95 training.batch_size 1.0 +1030 96 model.embedding_dim 1.0 +1030 96 model.scoring_fct_norm 1.0 +1030 96 loss.margin 22.661963010692983 +1030 96 loss.adversarial_temperature 0.42098895056522734 +1030 96 optimizer.lr 0.01421467806844313 +1030 96 negative_sampler.num_negs_per_pos 81.0 +1030 96 training.batch_size 2.0 +1030 97 model.embedding_dim 2.0 +1030 97 model.scoring_fct_norm 2.0 +1030 97 loss.margin 26.889981646080102 +1030 97 loss.adversarial_temperature 0.4323852201475735 +1030 97 optimizer.lr 0.015002293266198532 +1030 97 negative_sampler.num_negs_per_pos 14.0 +1030 97 training.batch_size 1.0 +1030 98 model.embedding_dim 0.0 +1030 98 model.scoring_fct_norm 1.0 +1030 98 loss.margin 28.534369023629402 +1030 98 loss.adversarial_temperature 0.5949369103639165 +1030 98 optimizer.lr 0.003503499644012762 +1030 98 negative_sampler.num_negs_per_pos 17.0 +1030 98 training.batch_size 0.0 +1030 99 model.embedding_dim 0.0 +1030 99 model.scoring_fct_norm 2.0 +1030 99 loss.margin 14.259730956270749 +1030 99 loss.adversarial_temperature 0.16811451341817796 +1030 99 optimizer.lr 0.0028546970955265687 +1030 99 negative_sampler.num_negs_per_pos 92.0 +1030 99 training.batch_size 0.0 +1030 100 model.embedding_dim 0.0 +1030 100 model.scoring_fct_norm 1.0 +1030 100 loss.margin 11.983362363509798 +1030 100 loss.adversarial_temperature 0.8739102227839879 +1030 100 optimizer.lr 0.039948438560743066 +1030 100 negative_sampler.num_negs_per_pos 31.0 +1030 100 training.batch_size 2.0 +1030 1 dataset """kinships""" +1030 1 model """unstructuredmodel""" +1030 1 loss """nssa""" +1030 1 regularizer """no""" +1030 1 optimizer """adam""" +1030 1 training_loop """owa""" +1030 1 negative_sampler """basic""" +1030 1 evaluator """rankbased""" +1030 2 dataset """kinships""" +1030 2 model """unstructuredmodel""" +1030 2 loss """nssa""" +1030 2 regularizer """no""" +1030 2 optimizer """adam""" +1030 2 training_loop """owa""" +1030 2 negative_sampler """basic""" +1030 2 evaluator """rankbased""" +1030 3 dataset """kinships""" +1030 3 model """unstructuredmodel""" +1030 3 loss """nssa""" +1030 3 regularizer """no""" +1030 3 optimizer """adam""" +1030 3 training_loop """owa""" +1030 3 negative_sampler """basic""" +1030 3 evaluator """rankbased""" +1030 4 dataset """kinships""" +1030 4 model """unstructuredmodel""" +1030 4 loss """nssa""" +1030 4 regularizer """no""" +1030 4 optimizer """adam""" +1030 4 training_loop """owa""" +1030 4 negative_sampler """basic""" +1030 4 evaluator """rankbased""" +1030 5 dataset """kinships""" +1030 5 model """unstructuredmodel""" +1030 5 loss """nssa""" +1030 5 regularizer """no""" +1030 5 optimizer """adam""" +1030 5 training_loop """owa""" +1030 5 negative_sampler """basic""" +1030 5 evaluator """rankbased""" +1030 6 dataset """kinships""" +1030 6 model """unstructuredmodel""" +1030 6 loss """nssa""" +1030 6 regularizer """no""" +1030 6 optimizer """adam""" +1030 6 training_loop """owa""" +1030 6 negative_sampler """basic""" +1030 6 evaluator """rankbased""" +1030 7 dataset """kinships""" +1030 7 model """unstructuredmodel""" +1030 7 loss """nssa""" +1030 7 regularizer """no""" +1030 7 optimizer """adam""" +1030 7 training_loop """owa""" +1030 7 negative_sampler """basic""" +1030 7 evaluator """rankbased""" +1030 8 dataset """kinships""" +1030 8 model """unstructuredmodel""" +1030 8 loss """nssa""" +1030 8 regularizer """no""" +1030 8 optimizer """adam""" +1030 8 training_loop """owa""" +1030 8 negative_sampler """basic""" +1030 8 evaluator """rankbased""" +1030 9 dataset """kinships""" +1030 9 model """unstructuredmodel""" +1030 9 loss """nssa""" +1030 9 regularizer """no""" +1030 9 optimizer """adam""" +1030 9 training_loop """owa""" +1030 9 negative_sampler """basic""" +1030 9 evaluator """rankbased""" +1030 10 dataset """kinships""" +1030 10 model """unstructuredmodel""" +1030 10 loss """nssa""" +1030 10 regularizer """no""" +1030 10 optimizer """adam""" +1030 10 training_loop """owa""" +1030 10 negative_sampler """basic""" +1030 10 evaluator """rankbased""" +1030 11 dataset """kinships""" +1030 11 model """unstructuredmodel""" +1030 11 loss """nssa""" +1030 11 regularizer """no""" +1030 11 optimizer """adam""" +1030 11 training_loop """owa""" +1030 11 negative_sampler """basic""" +1030 11 evaluator """rankbased""" +1030 12 dataset """kinships""" +1030 12 model """unstructuredmodel""" +1030 12 loss """nssa""" +1030 12 regularizer """no""" +1030 12 optimizer """adam""" +1030 12 training_loop """owa""" +1030 12 negative_sampler """basic""" +1030 12 evaluator """rankbased""" +1030 13 dataset """kinships""" +1030 13 model """unstructuredmodel""" +1030 13 loss """nssa""" +1030 13 regularizer """no""" +1030 13 optimizer """adam""" +1030 13 training_loop """owa""" +1030 13 negative_sampler """basic""" +1030 13 evaluator """rankbased""" +1030 14 dataset """kinships""" +1030 14 model """unstructuredmodel""" +1030 14 loss """nssa""" +1030 14 regularizer """no""" +1030 14 optimizer """adam""" +1030 14 training_loop """owa""" +1030 14 negative_sampler """basic""" +1030 14 evaluator """rankbased""" +1030 15 dataset """kinships""" +1030 15 model """unstructuredmodel""" +1030 15 loss """nssa""" +1030 15 regularizer """no""" +1030 15 optimizer """adam""" +1030 15 training_loop """owa""" +1030 15 negative_sampler """basic""" +1030 15 evaluator """rankbased""" +1030 16 dataset """kinships""" +1030 16 model """unstructuredmodel""" +1030 16 loss """nssa""" +1030 16 regularizer """no""" +1030 16 optimizer """adam""" +1030 16 training_loop """owa""" +1030 16 negative_sampler """basic""" +1030 16 evaluator """rankbased""" +1030 17 dataset """kinships""" +1030 17 model """unstructuredmodel""" +1030 17 loss """nssa""" +1030 17 regularizer """no""" +1030 17 optimizer """adam""" +1030 17 training_loop """owa""" +1030 17 negative_sampler """basic""" +1030 17 evaluator """rankbased""" +1030 18 dataset """kinships""" +1030 18 model """unstructuredmodel""" +1030 18 loss """nssa""" +1030 18 regularizer """no""" +1030 18 optimizer """adam""" +1030 18 training_loop """owa""" +1030 18 negative_sampler """basic""" +1030 18 evaluator """rankbased""" +1030 19 dataset """kinships""" +1030 19 model """unstructuredmodel""" +1030 19 loss """nssa""" +1030 19 regularizer """no""" +1030 19 optimizer """adam""" +1030 19 training_loop """owa""" +1030 19 negative_sampler """basic""" +1030 19 evaluator """rankbased""" +1030 20 dataset """kinships""" +1030 20 model """unstructuredmodel""" +1030 20 loss """nssa""" +1030 20 regularizer """no""" +1030 20 optimizer """adam""" +1030 20 training_loop """owa""" +1030 20 negative_sampler """basic""" +1030 20 evaluator """rankbased""" +1030 21 dataset """kinships""" +1030 21 model """unstructuredmodel""" +1030 21 loss """nssa""" +1030 21 regularizer """no""" +1030 21 optimizer """adam""" +1030 21 training_loop """owa""" +1030 21 negative_sampler """basic""" +1030 21 evaluator """rankbased""" +1030 22 dataset """kinships""" +1030 22 model """unstructuredmodel""" +1030 22 loss """nssa""" +1030 22 regularizer """no""" +1030 22 optimizer """adam""" +1030 22 training_loop """owa""" +1030 22 negative_sampler """basic""" +1030 22 evaluator """rankbased""" +1030 23 dataset """kinships""" +1030 23 model """unstructuredmodel""" +1030 23 loss """nssa""" +1030 23 regularizer """no""" +1030 23 optimizer """adam""" +1030 23 training_loop """owa""" +1030 23 negative_sampler """basic""" +1030 23 evaluator """rankbased""" +1030 24 dataset """kinships""" +1030 24 model """unstructuredmodel""" +1030 24 loss """nssa""" +1030 24 regularizer """no""" +1030 24 optimizer """adam""" +1030 24 training_loop """owa""" +1030 24 negative_sampler """basic""" +1030 24 evaluator """rankbased""" +1030 25 dataset """kinships""" +1030 25 model """unstructuredmodel""" +1030 25 loss """nssa""" +1030 25 regularizer """no""" +1030 25 optimizer """adam""" +1030 25 training_loop """owa""" +1030 25 negative_sampler """basic""" +1030 25 evaluator """rankbased""" +1030 26 dataset """kinships""" +1030 26 model """unstructuredmodel""" +1030 26 loss """nssa""" +1030 26 regularizer """no""" +1030 26 optimizer """adam""" +1030 26 training_loop """owa""" +1030 26 negative_sampler """basic""" +1030 26 evaluator """rankbased""" +1030 27 dataset """kinships""" +1030 27 model """unstructuredmodel""" +1030 27 loss """nssa""" +1030 27 regularizer """no""" +1030 27 optimizer """adam""" +1030 27 training_loop """owa""" +1030 27 negative_sampler """basic""" +1030 27 evaluator """rankbased""" +1030 28 dataset """kinships""" +1030 28 model """unstructuredmodel""" +1030 28 loss """nssa""" +1030 28 regularizer """no""" +1030 28 optimizer """adam""" +1030 28 training_loop """owa""" +1030 28 negative_sampler """basic""" +1030 28 evaluator """rankbased""" +1030 29 dataset """kinships""" +1030 29 model """unstructuredmodel""" +1030 29 loss """nssa""" +1030 29 regularizer """no""" +1030 29 optimizer """adam""" +1030 29 training_loop """owa""" +1030 29 negative_sampler """basic""" +1030 29 evaluator """rankbased""" +1030 30 dataset """kinships""" +1030 30 model """unstructuredmodel""" +1030 30 loss """nssa""" +1030 30 regularizer """no""" +1030 30 optimizer """adam""" +1030 30 training_loop """owa""" +1030 30 negative_sampler """basic""" +1030 30 evaluator """rankbased""" +1030 31 dataset """kinships""" +1030 31 model """unstructuredmodel""" +1030 31 loss """nssa""" +1030 31 regularizer """no""" +1030 31 optimizer """adam""" +1030 31 training_loop """owa""" +1030 31 negative_sampler """basic""" +1030 31 evaluator """rankbased""" +1030 32 dataset """kinships""" +1030 32 model """unstructuredmodel""" +1030 32 loss """nssa""" +1030 32 regularizer """no""" +1030 32 optimizer """adam""" +1030 32 training_loop """owa""" +1030 32 negative_sampler """basic""" +1030 32 evaluator """rankbased""" +1030 33 dataset """kinships""" +1030 33 model """unstructuredmodel""" +1030 33 loss """nssa""" +1030 33 regularizer """no""" +1030 33 optimizer """adam""" +1030 33 training_loop """owa""" +1030 33 negative_sampler """basic""" +1030 33 evaluator """rankbased""" +1030 34 dataset """kinships""" +1030 34 model """unstructuredmodel""" +1030 34 loss """nssa""" +1030 34 regularizer """no""" +1030 34 optimizer """adam""" +1030 34 training_loop """owa""" +1030 34 negative_sampler """basic""" +1030 34 evaluator """rankbased""" +1030 35 dataset """kinships""" +1030 35 model """unstructuredmodel""" +1030 35 loss """nssa""" +1030 35 regularizer """no""" +1030 35 optimizer """adam""" +1030 35 training_loop """owa""" +1030 35 negative_sampler """basic""" +1030 35 evaluator """rankbased""" +1030 36 dataset """kinships""" +1030 36 model """unstructuredmodel""" +1030 36 loss """nssa""" +1030 36 regularizer """no""" +1030 36 optimizer """adam""" +1030 36 training_loop """owa""" +1030 36 negative_sampler """basic""" +1030 36 evaluator """rankbased""" +1030 37 dataset """kinships""" +1030 37 model """unstructuredmodel""" +1030 37 loss """nssa""" +1030 37 regularizer """no""" +1030 37 optimizer """adam""" +1030 37 training_loop """owa""" +1030 37 negative_sampler """basic""" +1030 37 evaluator """rankbased""" +1030 38 dataset """kinships""" +1030 38 model """unstructuredmodel""" +1030 38 loss """nssa""" +1030 38 regularizer """no""" +1030 38 optimizer """adam""" +1030 38 training_loop """owa""" +1030 38 negative_sampler """basic""" +1030 38 evaluator """rankbased""" +1030 39 dataset """kinships""" +1030 39 model """unstructuredmodel""" +1030 39 loss """nssa""" +1030 39 regularizer """no""" +1030 39 optimizer """adam""" +1030 39 training_loop """owa""" +1030 39 negative_sampler """basic""" +1030 39 evaluator """rankbased""" +1030 40 dataset """kinships""" +1030 40 model """unstructuredmodel""" +1030 40 loss """nssa""" +1030 40 regularizer """no""" +1030 40 optimizer """adam""" +1030 40 training_loop """owa""" +1030 40 negative_sampler """basic""" +1030 40 evaluator """rankbased""" +1030 41 dataset """kinships""" +1030 41 model """unstructuredmodel""" +1030 41 loss """nssa""" +1030 41 regularizer """no""" +1030 41 optimizer """adam""" +1030 41 training_loop """owa""" +1030 41 negative_sampler """basic""" +1030 41 evaluator """rankbased""" +1030 42 dataset """kinships""" +1030 42 model """unstructuredmodel""" +1030 42 loss """nssa""" +1030 42 regularizer """no""" +1030 42 optimizer """adam""" +1030 42 training_loop """owa""" +1030 42 negative_sampler """basic""" +1030 42 evaluator """rankbased""" +1030 43 dataset """kinships""" +1030 43 model """unstructuredmodel""" +1030 43 loss """nssa""" +1030 43 regularizer """no""" +1030 43 optimizer """adam""" +1030 43 training_loop """owa""" +1030 43 negative_sampler """basic""" +1030 43 evaluator """rankbased""" +1030 44 dataset """kinships""" +1030 44 model """unstructuredmodel""" +1030 44 loss """nssa""" +1030 44 regularizer """no""" +1030 44 optimizer """adam""" +1030 44 training_loop """owa""" +1030 44 negative_sampler """basic""" +1030 44 evaluator """rankbased""" +1030 45 dataset """kinships""" +1030 45 model """unstructuredmodel""" +1030 45 loss """nssa""" +1030 45 regularizer """no""" +1030 45 optimizer """adam""" +1030 45 training_loop """owa""" +1030 45 negative_sampler """basic""" +1030 45 evaluator """rankbased""" +1030 46 dataset """kinships""" +1030 46 model """unstructuredmodel""" +1030 46 loss """nssa""" +1030 46 regularizer """no""" +1030 46 optimizer """adam""" +1030 46 training_loop """owa""" +1030 46 negative_sampler """basic""" +1030 46 evaluator """rankbased""" +1030 47 dataset """kinships""" +1030 47 model """unstructuredmodel""" +1030 47 loss """nssa""" +1030 47 regularizer """no""" +1030 47 optimizer """adam""" +1030 47 training_loop """owa""" +1030 47 negative_sampler """basic""" +1030 47 evaluator """rankbased""" +1030 48 dataset """kinships""" +1030 48 model """unstructuredmodel""" +1030 48 loss """nssa""" +1030 48 regularizer """no""" +1030 48 optimizer """adam""" +1030 48 training_loop """owa""" +1030 48 negative_sampler """basic""" +1030 48 evaluator """rankbased""" +1030 49 dataset """kinships""" +1030 49 model """unstructuredmodel""" +1030 49 loss """nssa""" +1030 49 regularizer """no""" +1030 49 optimizer """adam""" +1030 49 training_loop """owa""" +1030 49 negative_sampler """basic""" +1030 49 evaluator """rankbased""" +1030 50 dataset """kinships""" +1030 50 model """unstructuredmodel""" +1030 50 loss """nssa""" +1030 50 regularizer """no""" +1030 50 optimizer """adam""" +1030 50 training_loop """owa""" +1030 50 negative_sampler """basic""" +1030 50 evaluator """rankbased""" +1030 51 dataset """kinships""" +1030 51 model """unstructuredmodel""" +1030 51 loss """nssa""" +1030 51 regularizer """no""" +1030 51 optimizer """adam""" +1030 51 training_loop """owa""" +1030 51 negative_sampler """basic""" +1030 51 evaluator """rankbased""" +1030 52 dataset """kinships""" +1030 52 model """unstructuredmodel""" +1030 52 loss """nssa""" +1030 52 regularizer """no""" +1030 52 optimizer """adam""" +1030 52 training_loop """owa""" +1030 52 negative_sampler """basic""" +1030 52 evaluator """rankbased""" +1030 53 dataset """kinships""" +1030 53 model """unstructuredmodel""" +1030 53 loss """nssa""" +1030 53 regularizer """no""" +1030 53 optimizer """adam""" +1030 53 training_loop """owa""" +1030 53 negative_sampler """basic""" +1030 53 evaluator """rankbased""" +1030 54 dataset """kinships""" +1030 54 model """unstructuredmodel""" +1030 54 loss """nssa""" +1030 54 regularizer """no""" +1030 54 optimizer """adam""" +1030 54 training_loop """owa""" +1030 54 negative_sampler """basic""" +1030 54 evaluator """rankbased""" +1030 55 dataset """kinships""" +1030 55 model """unstructuredmodel""" +1030 55 loss """nssa""" +1030 55 regularizer """no""" +1030 55 optimizer """adam""" +1030 55 training_loop """owa""" +1030 55 negative_sampler """basic""" +1030 55 evaluator """rankbased""" +1030 56 dataset """kinships""" +1030 56 model """unstructuredmodel""" +1030 56 loss """nssa""" +1030 56 regularizer """no""" +1030 56 optimizer """adam""" +1030 56 training_loop """owa""" +1030 56 negative_sampler """basic""" +1030 56 evaluator """rankbased""" +1030 57 dataset """kinships""" +1030 57 model """unstructuredmodel""" +1030 57 loss """nssa""" +1030 57 regularizer """no""" +1030 57 optimizer """adam""" +1030 57 training_loop """owa""" +1030 57 negative_sampler """basic""" +1030 57 evaluator """rankbased""" +1030 58 dataset """kinships""" +1030 58 model """unstructuredmodel""" +1030 58 loss """nssa""" +1030 58 regularizer """no""" +1030 58 optimizer """adam""" +1030 58 training_loop """owa""" +1030 58 negative_sampler """basic""" +1030 58 evaluator """rankbased""" +1030 59 dataset """kinships""" +1030 59 model """unstructuredmodel""" +1030 59 loss """nssa""" +1030 59 regularizer """no""" +1030 59 optimizer """adam""" +1030 59 training_loop """owa""" +1030 59 negative_sampler """basic""" +1030 59 evaluator """rankbased""" +1030 60 dataset """kinships""" +1030 60 model """unstructuredmodel""" +1030 60 loss """nssa""" +1030 60 regularizer """no""" +1030 60 optimizer """adam""" +1030 60 training_loop """owa""" +1030 60 negative_sampler """basic""" +1030 60 evaluator """rankbased""" +1030 61 dataset """kinships""" +1030 61 model """unstructuredmodel""" +1030 61 loss """nssa""" +1030 61 regularizer """no""" +1030 61 optimizer """adam""" +1030 61 training_loop """owa""" +1030 61 negative_sampler """basic""" +1030 61 evaluator """rankbased""" +1030 62 dataset """kinships""" +1030 62 model """unstructuredmodel""" +1030 62 loss """nssa""" +1030 62 regularizer """no""" +1030 62 optimizer """adam""" +1030 62 training_loop """owa""" +1030 62 negative_sampler """basic""" +1030 62 evaluator """rankbased""" +1030 63 dataset """kinships""" +1030 63 model """unstructuredmodel""" +1030 63 loss """nssa""" +1030 63 regularizer """no""" +1030 63 optimizer """adam""" +1030 63 training_loop """owa""" +1030 63 negative_sampler """basic""" +1030 63 evaluator """rankbased""" +1030 64 dataset """kinships""" +1030 64 model """unstructuredmodel""" +1030 64 loss """nssa""" +1030 64 regularizer """no""" +1030 64 optimizer """adam""" +1030 64 training_loop """owa""" +1030 64 negative_sampler """basic""" +1030 64 evaluator """rankbased""" +1030 65 dataset """kinships""" +1030 65 model """unstructuredmodel""" +1030 65 loss """nssa""" +1030 65 regularizer """no""" +1030 65 optimizer """adam""" +1030 65 training_loop """owa""" +1030 65 negative_sampler """basic""" +1030 65 evaluator """rankbased""" +1030 66 dataset """kinships""" +1030 66 model """unstructuredmodel""" +1030 66 loss """nssa""" +1030 66 regularizer """no""" +1030 66 optimizer """adam""" +1030 66 training_loop """owa""" +1030 66 negative_sampler """basic""" +1030 66 evaluator """rankbased""" +1030 67 dataset """kinships""" +1030 67 model """unstructuredmodel""" +1030 67 loss """nssa""" +1030 67 regularizer """no""" +1030 67 optimizer """adam""" +1030 67 training_loop """owa""" +1030 67 negative_sampler """basic""" +1030 67 evaluator """rankbased""" +1030 68 dataset """kinships""" +1030 68 model """unstructuredmodel""" +1030 68 loss """nssa""" +1030 68 regularizer """no""" +1030 68 optimizer """adam""" +1030 68 training_loop """owa""" +1030 68 negative_sampler """basic""" +1030 68 evaluator """rankbased""" +1030 69 dataset """kinships""" +1030 69 model """unstructuredmodel""" +1030 69 loss """nssa""" +1030 69 regularizer """no""" +1030 69 optimizer """adam""" +1030 69 training_loop """owa""" +1030 69 negative_sampler """basic""" +1030 69 evaluator """rankbased""" +1030 70 dataset """kinships""" +1030 70 model """unstructuredmodel""" +1030 70 loss """nssa""" +1030 70 regularizer """no""" +1030 70 optimizer """adam""" +1030 70 training_loop """owa""" +1030 70 negative_sampler """basic""" +1030 70 evaluator """rankbased""" +1030 71 dataset """kinships""" +1030 71 model """unstructuredmodel""" +1030 71 loss """nssa""" +1030 71 regularizer """no""" +1030 71 optimizer """adam""" +1030 71 training_loop """owa""" +1030 71 negative_sampler """basic""" +1030 71 evaluator """rankbased""" +1030 72 dataset """kinships""" +1030 72 model """unstructuredmodel""" +1030 72 loss """nssa""" +1030 72 regularizer """no""" +1030 72 optimizer """adam""" +1030 72 training_loop """owa""" +1030 72 negative_sampler """basic""" +1030 72 evaluator """rankbased""" +1030 73 dataset """kinships""" +1030 73 model """unstructuredmodel""" +1030 73 loss """nssa""" +1030 73 regularizer """no""" +1030 73 optimizer """adam""" +1030 73 training_loop """owa""" +1030 73 negative_sampler """basic""" +1030 73 evaluator """rankbased""" +1030 74 dataset """kinships""" +1030 74 model """unstructuredmodel""" +1030 74 loss """nssa""" +1030 74 regularizer """no""" +1030 74 optimizer """adam""" +1030 74 training_loop """owa""" +1030 74 negative_sampler """basic""" +1030 74 evaluator """rankbased""" +1030 75 dataset """kinships""" +1030 75 model """unstructuredmodel""" +1030 75 loss """nssa""" +1030 75 regularizer """no""" +1030 75 optimizer """adam""" +1030 75 training_loop """owa""" +1030 75 negative_sampler """basic""" +1030 75 evaluator """rankbased""" +1030 76 dataset """kinships""" +1030 76 model """unstructuredmodel""" +1030 76 loss """nssa""" +1030 76 regularizer """no""" +1030 76 optimizer """adam""" +1030 76 training_loop """owa""" +1030 76 negative_sampler """basic""" +1030 76 evaluator """rankbased""" +1030 77 dataset """kinships""" +1030 77 model """unstructuredmodel""" +1030 77 loss """nssa""" +1030 77 regularizer """no""" +1030 77 optimizer """adam""" +1030 77 training_loop """owa""" +1030 77 negative_sampler """basic""" +1030 77 evaluator """rankbased""" +1030 78 dataset """kinships""" +1030 78 model """unstructuredmodel""" +1030 78 loss """nssa""" +1030 78 regularizer """no""" +1030 78 optimizer """adam""" +1030 78 training_loop """owa""" +1030 78 negative_sampler """basic""" +1030 78 evaluator """rankbased""" +1030 79 dataset """kinships""" +1030 79 model """unstructuredmodel""" +1030 79 loss """nssa""" +1030 79 regularizer """no""" +1030 79 optimizer """adam""" +1030 79 training_loop """owa""" +1030 79 negative_sampler """basic""" +1030 79 evaluator """rankbased""" +1030 80 dataset """kinships""" +1030 80 model """unstructuredmodel""" +1030 80 loss """nssa""" +1030 80 regularizer """no""" +1030 80 optimizer """adam""" +1030 80 training_loop """owa""" +1030 80 negative_sampler """basic""" +1030 80 evaluator """rankbased""" +1030 81 dataset """kinships""" +1030 81 model """unstructuredmodel""" +1030 81 loss """nssa""" +1030 81 regularizer """no""" +1030 81 optimizer """adam""" +1030 81 training_loop """owa""" +1030 81 negative_sampler """basic""" +1030 81 evaluator """rankbased""" +1030 82 dataset """kinships""" +1030 82 model """unstructuredmodel""" +1030 82 loss """nssa""" +1030 82 regularizer """no""" +1030 82 optimizer """adam""" +1030 82 training_loop """owa""" +1030 82 negative_sampler """basic""" +1030 82 evaluator """rankbased""" +1030 83 dataset """kinships""" +1030 83 model """unstructuredmodel""" +1030 83 loss """nssa""" +1030 83 regularizer """no""" +1030 83 optimizer """adam""" +1030 83 training_loop """owa""" +1030 83 negative_sampler """basic""" +1030 83 evaluator """rankbased""" +1030 84 dataset """kinships""" +1030 84 model """unstructuredmodel""" +1030 84 loss """nssa""" +1030 84 regularizer """no""" +1030 84 optimizer """adam""" +1030 84 training_loop """owa""" +1030 84 negative_sampler """basic""" +1030 84 evaluator """rankbased""" +1030 85 dataset """kinships""" +1030 85 model """unstructuredmodel""" +1030 85 loss """nssa""" +1030 85 regularizer """no""" +1030 85 optimizer """adam""" +1030 85 training_loop """owa""" +1030 85 negative_sampler """basic""" +1030 85 evaluator """rankbased""" +1030 86 dataset """kinships""" +1030 86 model """unstructuredmodel""" +1030 86 loss """nssa""" +1030 86 regularizer """no""" +1030 86 optimizer """adam""" +1030 86 training_loop """owa""" +1030 86 negative_sampler """basic""" +1030 86 evaluator """rankbased""" +1030 87 dataset """kinships""" +1030 87 model """unstructuredmodel""" +1030 87 loss """nssa""" +1030 87 regularizer """no""" +1030 87 optimizer """adam""" +1030 87 training_loop """owa""" +1030 87 negative_sampler """basic""" +1030 87 evaluator """rankbased""" +1030 88 dataset """kinships""" +1030 88 model """unstructuredmodel""" +1030 88 loss """nssa""" +1030 88 regularizer """no""" +1030 88 optimizer """adam""" +1030 88 training_loop """owa""" +1030 88 negative_sampler """basic""" +1030 88 evaluator """rankbased""" +1030 89 dataset """kinships""" +1030 89 model """unstructuredmodel""" +1030 89 loss """nssa""" +1030 89 regularizer """no""" +1030 89 optimizer """adam""" +1030 89 training_loop """owa""" +1030 89 negative_sampler """basic""" +1030 89 evaluator """rankbased""" +1030 90 dataset """kinships""" +1030 90 model """unstructuredmodel""" +1030 90 loss """nssa""" +1030 90 regularizer """no""" +1030 90 optimizer """adam""" +1030 90 training_loop """owa""" +1030 90 negative_sampler """basic""" +1030 90 evaluator """rankbased""" +1030 91 dataset """kinships""" +1030 91 model """unstructuredmodel""" +1030 91 loss """nssa""" +1030 91 regularizer """no""" +1030 91 optimizer """adam""" +1030 91 training_loop """owa""" +1030 91 negative_sampler """basic""" +1030 91 evaluator """rankbased""" +1030 92 dataset """kinships""" +1030 92 model """unstructuredmodel""" +1030 92 loss """nssa""" +1030 92 regularizer """no""" +1030 92 optimizer """adam""" +1030 92 training_loop """owa""" +1030 92 negative_sampler """basic""" +1030 92 evaluator """rankbased""" +1030 93 dataset """kinships""" +1030 93 model """unstructuredmodel""" +1030 93 loss """nssa""" +1030 93 regularizer """no""" +1030 93 optimizer """adam""" +1030 93 training_loop """owa""" +1030 93 negative_sampler """basic""" +1030 93 evaluator """rankbased""" +1030 94 dataset """kinships""" +1030 94 model """unstructuredmodel""" +1030 94 loss """nssa""" +1030 94 regularizer """no""" +1030 94 optimizer """adam""" +1030 94 training_loop """owa""" +1030 94 negative_sampler """basic""" +1030 94 evaluator """rankbased""" +1030 95 dataset """kinships""" +1030 95 model """unstructuredmodel""" +1030 95 loss """nssa""" +1030 95 regularizer """no""" +1030 95 optimizer """adam""" +1030 95 training_loop """owa""" +1030 95 negative_sampler """basic""" +1030 95 evaluator """rankbased""" +1030 96 dataset """kinships""" +1030 96 model """unstructuredmodel""" +1030 96 loss """nssa""" +1030 96 regularizer """no""" +1030 96 optimizer """adam""" +1030 96 training_loop """owa""" +1030 96 negative_sampler """basic""" +1030 96 evaluator """rankbased""" +1030 97 dataset """kinships""" +1030 97 model """unstructuredmodel""" +1030 97 loss """nssa""" +1030 97 regularizer """no""" +1030 97 optimizer """adam""" +1030 97 training_loop """owa""" +1030 97 negative_sampler """basic""" +1030 97 evaluator """rankbased""" +1030 98 dataset """kinships""" +1030 98 model """unstructuredmodel""" +1030 98 loss """nssa""" +1030 98 regularizer """no""" +1030 98 optimizer """adam""" +1030 98 training_loop """owa""" +1030 98 negative_sampler """basic""" +1030 98 evaluator """rankbased""" +1030 99 dataset """kinships""" +1030 99 model """unstructuredmodel""" +1030 99 loss """nssa""" +1030 99 regularizer """no""" +1030 99 optimizer """adam""" +1030 99 training_loop """owa""" +1030 99 negative_sampler """basic""" +1030 99 evaluator """rankbased""" +1030 100 dataset """kinships""" +1030 100 model """unstructuredmodel""" +1030 100 loss """nssa""" +1030 100 regularizer """no""" +1030 100 optimizer """adam""" +1030 100 training_loop """owa""" +1030 100 negative_sampler """basic""" +1030 100 evaluator """rankbased""" +1031 1 model.embedding_dim 0.0 +1031 1 model.scoring_fct_norm 2.0 +1031 1 loss.margin 3.202816995993306 +1031 1 loss.adversarial_temperature 0.7169627110376025 +1031 1 optimizer.lr 0.001982127310055253 +1031 1 negative_sampler.num_negs_per_pos 10.0 +1031 1 training.batch_size 1.0 +1031 2 model.embedding_dim 2.0 +1031 2 model.scoring_fct_norm 1.0 +1031 2 loss.margin 12.950641896527516 +1031 2 loss.adversarial_temperature 0.9708501841542946 +1031 2 optimizer.lr 0.0011260586056066006 +1031 2 negative_sampler.num_negs_per_pos 29.0 +1031 2 training.batch_size 2.0 +1031 3 model.embedding_dim 2.0 +1031 3 model.scoring_fct_norm 1.0 +1031 3 loss.margin 17.67200227424512 +1031 3 loss.adversarial_temperature 0.6653778963293782 +1031 3 optimizer.lr 0.04693499674948378 +1031 3 negative_sampler.num_negs_per_pos 22.0 +1031 3 training.batch_size 1.0 +1031 4 model.embedding_dim 0.0 +1031 4 model.scoring_fct_norm 2.0 +1031 4 loss.margin 27.90029187878286 +1031 4 loss.adversarial_temperature 0.17704109806277474 +1031 4 optimizer.lr 0.0036227909974347015 +1031 4 negative_sampler.num_negs_per_pos 72.0 +1031 4 training.batch_size 1.0 +1031 5 model.embedding_dim 2.0 +1031 5 model.scoring_fct_norm 2.0 +1031 5 loss.margin 8.82361584359181 +1031 5 loss.adversarial_temperature 0.6804550351470693 +1031 5 optimizer.lr 0.0011011012059178112 +1031 5 negative_sampler.num_negs_per_pos 94.0 +1031 5 training.batch_size 0.0 +1031 6 model.embedding_dim 2.0 +1031 6 model.scoring_fct_norm 2.0 +1031 6 loss.margin 13.913108531954194 +1031 6 loss.adversarial_temperature 0.6422817976603467 +1031 6 optimizer.lr 0.0036586375708799147 +1031 6 negative_sampler.num_negs_per_pos 28.0 +1031 6 training.batch_size 0.0 +1031 7 model.embedding_dim 2.0 +1031 7 model.scoring_fct_norm 1.0 +1031 7 loss.margin 4.517116137449108 +1031 7 loss.adversarial_temperature 0.6046309279002277 +1031 7 optimizer.lr 0.0044026057046354225 +1031 7 negative_sampler.num_negs_per_pos 86.0 +1031 7 training.batch_size 1.0 +1031 8 model.embedding_dim 2.0 +1031 8 model.scoring_fct_norm 2.0 +1031 8 loss.margin 5.737694501374574 +1031 8 loss.adversarial_temperature 0.8880357649433203 +1031 8 optimizer.lr 0.017160811638245992 +1031 8 negative_sampler.num_negs_per_pos 74.0 +1031 8 training.batch_size 2.0 +1031 9 model.embedding_dim 0.0 +1031 9 model.scoring_fct_norm 2.0 +1031 9 loss.margin 29.259062973385202 +1031 9 loss.adversarial_temperature 0.5373908289694359 +1031 9 optimizer.lr 0.002841482873300698 +1031 9 negative_sampler.num_negs_per_pos 64.0 +1031 9 training.batch_size 2.0 +1031 10 model.embedding_dim 0.0 +1031 10 model.scoring_fct_norm 1.0 +1031 10 loss.margin 5.425914914871002 +1031 10 loss.adversarial_temperature 0.992452490983993 +1031 10 optimizer.lr 0.006021747429306063 +1031 10 negative_sampler.num_negs_per_pos 87.0 +1031 10 training.batch_size 2.0 +1031 11 model.embedding_dim 1.0 +1031 11 model.scoring_fct_norm 1.0 +1031 11 loss.margin 10.678401944428114 +1031 11 loss.adversarial_temperature 0.6144766615781936 +1031 11 optimizer.lr 0.0032860352973750886 +1031 11 negative_sampler.num_negs_per_pos 30.0 +1031 11 training.batch_size 0.0 +1031 12 model.embedding_dim 2.0 +1031 12 model.scoring_fct_norm 2.0 +1031 12 loss.margin 12.677631337509226 +1031 12 loss.adversarial_temperature 0.5068208187199931 +1031 12 optimizer.lr 0.002526257142232376 +1031 12 negative_sampler.num_negs_per_pos 6.0 +1031 12 training.batch_size 0.0 +1031 13 model.embedding_dim 0.0 +1031 13 model.scoring_fct_norm 1.0 +1031 13 loss.margin 2.9398569635011627 +1031 13 loss.adversarial_temperature 0.5313551419369995 +1031 13 optimizer.lr 0.031561484944286726 +1031 13 negative_sampler.num_negs_per_pos 20.0 +1031 13 training.batch_size 2.0 +1031 14 model.embedding_dim 0.0 +1031 14 model.scoring_fct_norm 1.0 +1031 14 loss.margin 26.61534767577109 +1031 14 loss.adversarial_temperature 0.7327772995530247 +1031 14 optimizer.lr 0.03726661559968462 +1031 14 negative_sampler.num_negs_per_pos 37.0 +1031 14 training.batch_size 2.0 +1031 15 model.embedding_dim 1.0 +1031 15 model.scoring_fct_norm 2.0 +1031 15 loss.margin 18.60206553637282 +1031 15 loss.adversarial_temperature 0.9192970623276113 +1031 15 optimizer.lr 0.0015995632011582138 +1031 15 negative_sampler.num_negs_per_pos 7.0 +1031 15 training.batch_size 0.0 +1031 16 model.embedding_dim 2.0 +1031 16 model.scoring_fct_norm 1.0 +1031 16 loss.margin 26.10748004731537 +1031 16 loss.adversarial_temperature 0.622060619952115 +1031 16 optimizer.lr 0.06248140208788917 +1031 16 negative_sampler.num_negs_per_pos 67.0 +1031 16 training.batch_size 2.0 +1031 17 model.embedding_dim 0.0 +1031 17 model.scoring_fct_norm 2.0 +1031 17 loss.margin 10.112450140846182 +1031 17 loss.adversarial_temperature 0.9538972668695338 +1031 17 optimizer.lr 0.02055086132391578 +1031 17 negative_sampler.num_negs_per_pos 9.0 +1031 17 training.batch_size 0.0 +1031 18 model.embedding_dim 2.0 +1031 18 model.scoring_fct_norm 1.0 +1031 18 loss.margin 27.58587420720506 +1031 18 loss.adversarial_temperature 0.9152115224099889 +1031 18 optimizer.lr 0.003404132942089427 +1031 18 negative_sampler.num_negs_per_pos 84.0 +1031 18 training.batch_size 0.0 +1031 19 model.embedding_dim 0.0 +1031 19 model.scoring_fct_norm 1.0 +1031 19 loss.margin 3.6487731285740987 +1031 19 loss.adversarial_temperature 0.3332342942932281 +1031 19 optimizer.lr 0.004265558511862383 +1031 19 negative_sampler.num_negs_per_pos 13.0 +1031 19 training.batch_size 2.0 +1031 20 model.embedding_dim 0.0 +1031 20 model.scoring_fct_norm 2.0 +1031 20 loss.margin 19.097027302214936 +1031 20 loss.adversarial_temperature 0.7906729199222011 +1031 20 optimizer.lr 0.01106152942966291 +1031 20 negative_sampler.num_negs_per_pos 99.0 +1031 20 training.batch_size 1.0 +1031 21 model.embedding_dim 2.0 +1031 21 model.scoring_fct_norm 2.0 +1031 21 loss.margin 6.092471927187372 +1031 21 loss.adversarial_temperature 0.6004636472412375 +1031 21 optimizer.lr 0.0030450061243203993 +1031 21 negative_sampler.num_negs_per_pos 4.0 +1031 21 training.batch_size 2.0 +1031 22 model.embedding_dim 1.0 +1031 22 model.scoring_fct_norm 2.0 +1031 22 loss.margin 28.672500613378094 +1031 22 loss.adversarial_temperature 0.9517209856279344 +1031 22 optimizer.lr 0.0010120538692646112 +1031 22 negative_sampler.num_negs_per_pos 97.0 +1031 22 training.batch_size 1.0 +1031 23 model.embedding_dim 2.0 +1031 23 model.scoring_fct_norm 2.0 +1031 23 loss.margin 20.129608544962064 +1031 23 loss.adversarial_temperature 0.6102898870741497 +1031 23 optimizer.lr 0.04513978661048489 +1031 23 negative_sampler.num_negs_per_pos 78.0 +1031 23 training.batch_size 1.0 +1031 24 model.embedding_dim 2.0 +1031 24 model.scoring_fct_norm 1.0 +1031 24 loss.margin 11.248119573227982 +1031 24 loss.adversarial_temperature 0.48306711732221097 +1031 24 optimizer.lr 0.006419557406194425 +1031 24 negative_sampler.num_negs_per_pos 64.0 +1031 24 training.batch_size 2.0 +1031 25 model.embedding_dim 0.0 +1031 25 model.scoring_fct_norm 2.0 +1031 25 loss.margin 17.407206773711902 +1031 25 loss.adversarial_temperature 0.43438484115814746 +1031 25 optimizer.lr 0.009252270504664714 +1031 25 negative_sampler.num_negs_per_pos 75.0 +1031 25 training.batch_size 0.0 +1031 26 model.embedding_dim 2.0 +1031 26 model.scoring_fct_norm 2.0 +1031 26 loss.margin 2.862551877387488 +1031 26 loss.adversarial_temperature 0.7398961995904946 +1031 26 optimizer.lr 0.0013185954681006196 +1031 26 negative_sampler.num_negs_per_pos 66.0 +1031 26 training.batch_size 1.0 +1031 27 model.embedding_dim 0.0 +1031 27 model.scoring_fct_norm 1.0 +1031 27 loss.margin 18.9228299125774 +1031 27 loss.adversarial_temperature 0.5749779477753888 +1031 27 optimizer.lr 0.004772221767624398 +1031 27 negative_sampler.num_negs_per_pos 3.0 +1031 27 training.batch_size 0.0 +1031 28 model.embedding_dim 0.0 +1031 28 model.scoring_fct_norm 1.0 +1031 28 loss.margin 23.069890753538118 +1031 28 loss.adversarial_temperature 0.726232866598691 +1031 28 optimizer.lr 0.03867522305476739 +1031 28 negative_sampler.num_negs_per_pos 67.0 +1031 28 training.batch_size 0.0 +1031 29 model.embedding_dim 1.0 +1031 29 model.scoring_fct_norm 1.0 +1031 29 loss.margin 6.322887663296557 +1031 29 loss.adversarial_temperature 0.5024994207354344 +1031 29 optimizer.lr 0.03208172337401738 +1031 29 negative_sampler.num_negs_per_pos 90.0 +1031 29 training.batch_size 1.0 +1031 30 model.embedding_dim 0.0 +1031 30 model.scoring_fct_norm 1.0 +1031 30 loss.margin 12.518175610314936 +1031 30 loss.adversarial_temperature 0.7876414477982921 +1031 30 optimizer.lr 0.0033673939010737735 +1031 30 negative_sampler.num_negs_per_pos 3.0 +1031 30 training.batch_size 2.0 +1031 31 model.embedding_dim 2.0 +1031 31 model.scoring_fct_norm 2.0 +1031 31 loss.margin 4.706337848702571 +1031 31 loss.adversarial_temperature 0.6322168104882046 +1031 31 optimizer.lr 0.014137544807210935 +1031 31 negative_sampler.num_negs_per_pos 10.0 +1031 31 training.batch_size 0.0 +1031 32 model.embedding_dim 0.0 +1031 32 model.scoring_fct_norm 2.0 +1031 32 loss.margin 7.349199358978287 +1031 32 loss.adversarial_temperature 0.2673945823502387 +1031 32 optimizer.lr 0.004047448300671831 +1031 32 negative_sampler.num_negs_per_pos 51.0 +1031 32 training.batch_size 0.0 +1031 33 model.embedding_dim 2.0 +1031 33 model.scoring_fct_norm 1.0 +1031 33 loss.margin 2.719903541826074 +1031 33 loss.adversarial_temperature 0.6261095677393724 +1031 33 optimizer.lr 0.05852118408792684 +1031 33 negative_sampler.num_negs_per_pos 16.0 +1031 33 training.batch_size 0.0 +1031 34 model.embedding_dim 1.0 +1031 34 model.scoring_fct_norm 1.0 +1031 34 loss.margin 27.30199998215643 +1031 34 loss.adversarial_temperature 0.9356993370394951 +1031 34 optimizer.lr 0.004661943115215767 +1031 34 negative_sampler.num_negs_per_pos 88.0 +1031 34 training.batch_size 1.0 +1031 35 model.embedding_dim 0.0 +1031 35 model.scoring_fct_norm 2.0 +1031 35 loss.margin 25.287577404989005 +1031 35 loss.adversarial_temperature 0.1615778085319976 +1031 35 optimizer.lr 0.008671841158963552 +1031 35 negative_sampler.num_negs_per_pos 59.0 +1031 35 training.batch_size 2.0 +1031 36 model.embedding_dim 2.0 +1031 36 model.scoring_fct_norm 2.0 +1031 36 loss.margin 3.3478132890110244 +1031 36 loss.adversarial_temperature 0.35065787416804983 +1031 36 optimizer.lr 0.030048232957436204 +1031 36 negative_sampler.num_negs_per_pos 90.0 +1031 36 training.batch_size 0.0 +1031 37 model.embedding_dim 2.0 +1031 37 model.scoring_fct_norm 2.0 +1031 37 loss.margin 22.350193236242838 +1031 37 loss.adversarial_temperature 0.3741440555845079 +1031 37 optimizer.lr 0.013681876135596923 +1031 37 negative_sampler.num_negs_per_pos 47.0 +1031 37 training.batch_size 2.0 +1031 38 model.embedding_dim 1.0 +1031 38 model.scoring_fct_norm 2.0 +1031 38 loss.margin 8.304580329096424 +1031 38 loss.adversarial_temperature 0.4940543619161428 +1031 38 optimizer.lr 0.022313536101974535 +1031 38 negative_sampler.num_negs_per_pos 16.0 +1031 38 training.batch_size 0.0 +1031 39 model.embedding_dim 2.0 +1031 39 model.scoring_fct_norm 1.0 +1031 39 loss.margin 21.146069437208737 +1031 39 loss.adversarial_temperature 0.1228198205261973 +1031 39 optimizer.lr 0.01703273483571788 +1031 39 negative_sampler.num_negs_per_pos 55.0 +1031 39 training.batch_size 2.0 +1031 40 model.embedding_dim 2.0 +1031 40 model.scoring_fct_norm 2.0 +1031 40 loss.margin 21.683401019361863 +1031 40 loss.adversarial_temperature 0.9345283025322537 +1031 40 optimizer.lr 0.009531468149526505 +1031 40 negative_sampler.num_negs_per_pos 32.0 +1031 40 training.batch_size 0.0 +1031 41 model.embedding_dim 1.0 +1031 41 model.scoring_fct_norm 2.0 +1031 41 loss.margin 20.892713639095025 +1031 41 loss.adversarial_temperature 0.9023253150694786 +1031 41 optimizer.lr 0.0020274399112113813 +1031 41 negative_sampler.num_negs_per_pos 98.0 +1031 41 training.batch_size 1.0 +1031 42 model.embedding_dim 0.0 +1031 42 model.scoring_fct_norm 1.0 +1031 42 loss.margin 13.808307643540854 +1031 42 loss.adversarial_temperature 0.5448096753134856 +1031 42 optimizer.lr 0.012139295158361192 +1031 42 negative_sampler.num_negs_per_pos 3.0 +1031 42 training.batch_size 0.0 +1031 43 model.embedding_dim 1.0 +1031 43 model.scoring_fct_norm 1.0 +1031 43 loss.margin 25.867719917136377 +1031 43 loss.adversarial_temperature 0.10229619486406125 +1031 43 optimizer.lr 0.010232450564003067 +1031 43 negative_sampler.num_negs_per_pos 83.0 +1031 43 training.batch_size 0.0 +1031 44 model.embedding_dim 0.0 +1031 44 model.scoring_fct_norm 1.0 +1031 44 loss.margin 12.0979760226688 +1031 44 loss.adversarial_temperature 0.5445693416745792 +1031 44 optimizer.lr 0.08204849216096505 +1031 44 negative_sampler.num_negs_per_pos 69.0 +1031 44 training.batch_size 0.0 +1031 45 model.embedding_dim 0.0 +1031 45 model.scoring_fct_norm 1.0 +1031 45 loss.margin 17.781077403512384 +1031 45 loss.adversarial_temperature 0.819339270047736 +1031 45 optimizer.lr 0.02677402898424278 +1031 45 negative_sampler.num_negs_per_pos 95.0 +1031 45 training.batch_size 1.0 +1031 46 model.embedding_dim 2.0 +1031 46 model.scoring_fct_norm 1.0 +1031 46 loss.margin 8.375563134674678 +1031 46 loss.adversarial_temperature 0.5160630133462467 +1031 46 optimizer.lr 0.010593308705622789 +1031 46 negative_sampler.num_negs_per_pos 76.0 +1031 46 training.batch_size 0.0 +1031 47 model.embedding_dim 2.0 +1031 47 model.scoring_fct_norm 1.0 +1031 47 loss.margin 16.473228146687813 +1031 47 loss.adversarial_temperature 0.16456359757719843 +1031 47 optimizer.lr 0.02590363154942848 +1031 47 negative_sampler.num_negs_per_pos 49.0 +1031 47 training.batch_size 0.0 +1031 48 model.embedding_dim 2.0 +1031 48 model.scoring_fct_norm 2.0 +1031 48 loss.margin 15.000808346914312 +1031 48 loss.adversarial_temperature 0.45469972849495055 +1031 48 optimizer.lr 0.039281875772809276 +1031 48 negative_sampler.num_negs_per_pos 87.0 +1031 48 training.batch_size 1.0 +1031 49 model.embedding_dim 0.0 +1031 49 model.scoring_fct_norm 1.0 +1031 49 loss.margin 15.768576554838964 +1031 49 loss.adversarial_temperature 0.6510771958068385 +1031 49 optimizer.lr 0.031066102106577773 +1031 49 negative_sampler.num_negs_per_pos 71.0 +1031 49 training.batch_size 2.0 +1031 50 model.embedding_dim 2.0 +1031 50 model.scoring_fct_norm 1.0 +1031 50 loss.margin 12.67209012652912 +1031 50 loss.adversarial_temperature 0.8222459558064391 +1031 50 optimizer.lr 0.00809425100038671 +1031 50 negative_sampler.num_negs_per_pos 49.0 +1031 50 training.batch_size 1.0 +1031 51 model.embedding_dim 0.0 +1031 51 model.scoring_fct_norm 1.0 +1031 51 loss.margin 29.327813083062498 +1031 51 loss.adversarial_temperature 0.7621534726295971 +1031 51 optimizer.lr 0.04103338826654128 +1031 51 negative_sampler.num_negs_per_pos 61.0 +1031 51 training.batch_size 0.0 +1031 52 model.embedding_dim 0.0 +1031 52 model.scoring_fct_norm 2.0 +1031 52 loss.margin 23.87914125378746 +1031 52 loss.adversarial_temperature 0.39949210033792604 +1031 52 optimizer.lr 0.06586023150724823 +1031 52 negative_sampler.num_negs_per_pos 94.0 +1031 52 training.batch_size 2.0 +1031 53 model.embedding_dim 1.0 +1031 53 model.scoring_fct_norm 2.0 +1031 53 loss.margin 26.514463042771037 +1031 53 loss.adversarial_temperature 0.24170748074188741 +1031 53 optimizer.lr 0.01240647078861618 +1031 53 negative_sampler.num_negs_per_pos 45.0 +1031 53 training.batch_size 1.0 +1031 54 model.embedding_dim 1.0 +1031 54 model.scoring_fct_norm 2.0 +1031 54 loss.margin 25.978462410747692 +1031 54 loss.adversarial_temperature 0.7183688124149668 +1031 54 optimizer.lr 0.05930491267818316 +1031 54 negative_sampler.num_negs_per_pos 23.0 +1031 54 training.batch_size 1.0 +1031 55 model.embedding_dim 1.0 +1031 55 model.scoring_fct_norm 1.0 +1031 55 loss.margin 15.853484336504604 +1031 55 loss.adversarial_temperature 0.8207040039780158 +1031 55 optimizer.lr 0.010458799666573397 +1031 55 negative_sampler.num_negs_per_pos 85.0 +1031 55 training.batch_size 1.0 +1031 56 model.embedding_dim 0.0 +1031 56 model.scoring_fct_norm 2.0 +1031 56 loss.margin 24.82767520895274 +1031 56 loss.adversarial_temperature 0.33622994297884745 +1031 56 optimizer.lr 0.04910816101042622 +1031 56 negative_sampler.num_negs_per_pos 80.0 +1031 56 training.batch_size 2.0 +1031 57 model.embedding_dim 2.0 +1031 57 model.scoring_fct_norm 1.0 +1031 57 loss.margin 28.4167827243103 +1031 57 loss.adversarial_temperature 0.2847294788514605 +1031 57 optimizer.lr 0.001404921004345578 +1031 57 negative_sampler.num_negs_per_pos 69.0 +1031 57 training.batch_size 1.0 +1031 58 model.embedding_dim 0.0 +1031 58 model.scoring_fct_norm 1.0 +1031 58 loss.margin 29.929526423481157 +1031 58 loss.adversarial_temperature 0.792004955054232 +1031 58 optimizer.lr 0.013329708806483357 +1031 58 negative_sampler.num_negs_per_pos 65.0 +1031 58 training.batch_size 0.0 +1031 59 model.embedding_dim 1.0 +1031 59 model.scoring_fct_norm 2.0 +1031 59 loss.margin 10.807587475428548 +1031 59 loss.adversarial_temperature 0.4286662254276268 +1031 59 optimizer.lr 0.004958514167130258 +1031 59 negative_sampler.num_negs_per_pos 83.0 +1031 59 training.batch_size 2.0 +1031 60 model.embedding_dim 0.0 +1031 60 model.scoring_fct_norm 1.0 +1031 60 loss.margin 10.732901717058448 +1031 60 loss.adversarial_temperature 0.8384721215976322 +1031 60 optimizer.lr 0.034233034131927416 +1031 60 negative_sampler.num_negs_per_pos 87.0 +1031 60 training.batch_size 2.0 +1031 61 model.embedding_dim 0.0 +1031 61 model.scoring_fct_norm 1.0 +1031 61 loss.margin 6.819575476384079 +1031 61 loss.adversarial_temperature 0.35359823648171584 +1031 61 optimizer.lr 0.0023250661958753747 +1031 61 negative_sampler.num_negs_per_pos 84.0 +1031 61 training.batch_size 0.0 +1031 62 model.embedding_dim 0.0 +1031 62 model.scoring_fct_norm 1.0 +1031 62 loss.margin 19.287024773435938 +1031 62 loss.adversarial_temperature 0.7866590497484164 +1031 62 optimizer.lr 0.016988866067314097 +1031 62 negative_sampler.num_negs_per_pos 1.0 +1031 62 training.batch_size 2.0 +1031 63 model.embedding_dim 0.0 +1031 63 model.scoring_fct_norm 2.0 +1031 63 loss.margin 9.76043659108451 +1031 63 loss.adversarial_temperature 0.47092156094013893 +1031 63 optimizer.lr 0.005143936443487488 +1031 63 negative_sampler.num_negs_per_pos 60.0 +1031 63 training.batch_size 1.0 +1031 64 model.embedding_dim 0.0 +1031 64 model.scoring_fct_norm 2.0 +1031 64 loss.margin 27.703780078331743 +1031 64 loss.adversarial_temperature 0.3359833061488541 +1031 64 optimizer.lr 0.06628556973263673 +1031 64 negative_sampler.num_negs_per_pos 25.0 +1031 64 training.batch_size 1.0 +1031 65 model.embedding_dim 2.0 +1031 65 model.scoring_fct_norm 2.0 +1031 65 loss.margin 21.892999738736606 +1031 65 loss.adversarial_temperature 0.5183050120162005 +1031 65 optimizer.lr 0.001344040505987656 +1031 65 negative_sampler.num_negs_per_pos 89.0 +1031 65 training.batch_size 2.0 +1031 66 model.embedding_dim 0.0 +1031 66 model.scoring_fct_norm 1.0 +1031 66 loss.margin 29.152454918471317 +1031 66 loss.adversarial_temperature 0.9415716385003275 +1031 66 optimizer.lr 0.009056681080078809 +1031 66 negative_sampler.num_negs_per_pos 79.0 +1031 66 training.batch_size 1.0 +1031 67 model.embedding_dim 0.0 +1031 67 model.scoring_fct_norm 1.0 +1031 67 loss.margin 4.114618356814816 +1031 67 loss.adversarial_temperature 0.6255868388507424 +1031 67 optimizer.lr 0.055360488438025754 +1031 67 negative_sampler.num_negs_per_pos 36.0 +1031 67 training.batch_size 0.0 +1031 68 model.embedding_dim 0.0 +1031 68 model.scoring_fct_norm 1.0 +1031 68 loss.margin 22.62073697295134 +1031 68 loss.adversarial_temperature 0.9519633301284363 +1031 68 optimizer.lr 0.005399771493194039 +1031 68 negative_sampler.num_negs_per_pos 16.0 +1031 68 training.batch_size 1.0 +1031 69 model.embedding_dim 1.0 +1031 69 model.scoring_fct_norm 1.0 +1031 69 loss.margin 24.33670018821543 +1031 69 loss.adversarial_temperature 0.3690646046611783 +1031 69 optimizer.lr 0.005834006027440274 +1031 69 negative_sampler.num_negs_per_pos 13.0 +1031 69 training.batch_size 0.0 +1031 70 model.embedding_dim 0.0 +1031 70 model.scoring_fct_norm 2.0 +1031 70 loss.margin 17.661817630532024 +1031 70 loss.adversarial_temperature 0.43177755314713984 +1031 70 optimizer.lr 0.01921309273198323 +1031 70 negative_sampler.num_negs_per_pos 40.0 +1031 70 training.batch_size 0.0 +1031 71 model.embedding_dim 2.0 +1031 71 model.scoring_fct_norm 1.0 +1031 71 loss.margin 28.999162276366253 +1031 71 loss.adversarial_temperature 0.796317728158963 +1031 71 optimizer.lr 0.012749413828783637 +1031 71 negative_sampler.num_negs_per_pos 1.0 +1031 71 training.batch_size 0.0 +1031 72 model.embedding_dim 0.0 +1031 72 model.scoring_fct_norm 1.0 +1031 72 loss.margin 27.738294335921523 +1031 72 loss.adversarial_temperature 0.9690477890734647 +1031 72 optimizer.lr 0.021867950885308098 +1031 72 negative_sampler.num_negs_per_pos 95.0 +1031 72 training.batch_size 0.0 +1031 73 model.embedding_dim 1.0 +1031 73 model.scoring_fct_norm 2.0 +1031 73 loss.margin 25.783948470440162 +1031 73 loss.adversarial_temperature 0.20963710998498275 +1031 73 optimizer.lr 0.0670363455630178 +1031 73 negative_sampler.num_negs_per_pos 31.0 +1031 73 training.batch_size 0.0 +1031 74 model.embedding_dim 1.0 +1031 74 model.scoring_fct_norm 1.0 +1031 74 loss.margin 22.18967540821968 +1031 74 loss.adversarial_temperature 0.24738560169052354 +1031 74 optimizer.lr 0.005017008196107748 +1031 74 negative_sampler.num_negs_per_pos 92.0 +1031 74 training.batch_size 1.0 +1031 75 model.embedding_dim 2.0 +1031 75 model.scoring_fct_norm 1.0 +1031 75 loss.margin 4.423193814289618 +1031 75 loss.adversarial_temperature 0.6520146338611862 +1031 75 optimizer.lr 0.08315905533576932 +1031 75 negative_sampler.num_negs_per_pos 35.0 +1031 75 training.batch_size 0.0 +1031 76 model.embedding_dim 1.0 +1031 76 model.scoring_fct_norm 2.0 +1031 76 loss.margin 1.3359966027306975 +1031 76 loss.adversarial_temperature 0.5272247242804343 +1031 76 optimizer.lr 0.015088368973199874 +1031 76 negative_sampler.num_negs_per_pos 94.0 +1031 76 training.batch_size 0.0 +1031 77 model.embedding_dim 2.0 +1031 77 model.scoring_fct_norm 1.0 +1031 77 loss.margin 11.348575174262168 +1031 77 loss.adversarial_temperature 0.7942371776543452 +1031 77 optimizer.lr 0.06785517296589653 +1031 77 negative_sampler.num_negs_per_pos 65.0 +1031 77 training.batch_size 1.0 +1031 78 model.embedding_dim 0.0 +1031 78 model.scoring_fct_norm 2.0 +1031 78 loss.margin 22.825281460793992 +1031 78 loss.adversarial_temperature 0.35398554177932384 +1031 78 optimizer.lr 0.05489806493532903 +1031 78 negative_sampler.num_negs_per_pos 54.0 +1031 78 training.batch_size 0.0 +1031 79 model.embedding_dim 1.0 +1031 79 model.scoring_fct_norm 1.0 +1031 79 loss.margin 15.189765770566748 +1031 79 loss.adversarial_temperature 0.13318041258186133 +1031 79 optimizer.lr 0.0952786907428314 +1031 79 negative_sampler.num_negs_per_pos 76.0 +1031 79 training.batch_size 0.0 +1031 80 model.embedding_dim 2.0 +1031 80 model.scoring_fct_norm 1.0 +1031 80 loss.margin 29.822824854299824 +1031 80 loss.adversarial_temperature 0.8518537955414893 +1031 80 optimizer.lr 0.0034269436069801935 +1031 80 negative_sampler.num_negs_per_pos 55.0 +1031 80 training.batch_size 0.0 +1031 81 model.embedding_dim 2.0 +1031 81 model.scoring_fct_norm 1.0 +1031 81 loss.margin 12.333156679775685 +1031 81 loss.adversarial_temperature 0.6317516769791303 +1031 81 optimizer.lr 0.025716699105168953 +1031 81 negative_sampler.num_negs_per_pos 35.0 +1031 81 training.batch_size 0.0 +1031 82 model.embedding_dim 2.0 +1031 82 model.scoring_fct_norm 1.0 +1031 82 loss.margin 18.004362817637084 +1031 82 loss.adversarial_temperature 0.9050113961047449 +1031 82 optimizer.lr 0.012666662071651935 +1031 82 negative_sampler.num_negs_per_pos 75.0 +1031 82 training.batch_size 1.0 +1031 83 model.embedding_dim 0.0 +1031 83 model.scoring_fct_norm 2.0 +1031 83 loss.margin 24.44124496285672 +1031 83 loss.adversarial_temperature 0.5009554195794683 +1031 83 optimizer.lr 0.023532244560632756 +1031 83 negative_sampler.num_negs_per_pos 17.0 +1031 83 training.batch_size 0.0 +1031 84 model.embedding_dim 1.0 +1031 84 model.scoring_fct_norm 1.0 +1031 84 loss.margin 16.41989870767462 +1031 84 loss.adversarial_temperature 0.9171747230006032 +1031 84 optimizer.lr 0.0014665139212538105 +1031 84 negative_sampler.num_negs_per_pos 45.0 +1031 84 training.batch_size 2.0 +1031 85 model.embedding_dim 0.0 +1031 85 model.scoring_fct_norm 1.0 +1031 85 loss.margin 28.299099644369655 +1031 85 loss.adversarial_temperature 0.8100554148777922 +1031 85 optimizer.lr 0.001322729288563825 +1031 85 negative_sampler.num_negs_per_pos 85.0 +1031 85 training.batch_size 2.0 +1031 86 model.embedding_dim 2.0 +1031 86 model.scoring_fct_norm 2.0 +1031 86 loss.margin 26.91986416286174 +1031 86 loss.adversarial_temperature 0.5426076800788561 +1031 86 optimizer.lr 0.018869474417711665 +1031 86 negative_sampler.num_negs_per_pos 98.0 +1031 86 training.batch_size 2.0 +1031 87 model.embedding_dim 1.0 +1031 87 model.scoring_fct_norm 1.0 +1031 87 loss.margin 17.551091932108623 +1031 87 loss.adversarial_temperature 0.5659282177844716 +1031 87 optimizer.lr 0.001310755838278018 +1031 87 negative_sampler.num_negs_per_pos 16.0 +1031 87 training.batch_size 0.0 +1031 88 model.embedding_dim 2.0 +1031 88 model.scoring_fct_norm 2.0 +1031 88 loss.margin 23.77311367505899 +1031 88 loss.adversarial_temperature 0.9444686639970601 +1031 88 optimizer.lr 0.03574067308413803 +1031 88 negative_sampler.num_negs_per_pos 79.0 +1031 88 training.batch_size 0.0 +1031 89 model.embedding_dim 1.0 +1031 89 model.scoring_fct_norm 2.0 +1031 89 loss.margin 25.980525562027072 +1031 89 loss.adversarial_temperature 0.7854833847069438 +1031 89 optimizer.lr 0.0015415275561165363 +1031 89 negative_sampler.num_negs_per_pos 88.0 +1031 89 training.batch_size 0.0 +1031 90 model.embedding_dim 1.0 +1031 90 model.scoring_fct_norm 1.0 +1031 90 loss.margin 4.963018431736063 +1031 90 loss.adversarial_temperature 0.3941625603857075 +1031 90 optimizer.lr 0.010529263157445854 +1031 90 negative_sampler.num_negs_per_pos 87.0 +1031 90 training.batch_size 2.0 +1031 91 model.embedding_dim 1.0 +1031 91 model.scoring_fct_norm 2.0 +1031 91 loss.margin 29.01518954569388 +1031 91 loss.adversarial_temperature 0.9964536175274699 +1031 91 optimizer.lr 0.0010349148526169008 +1031 91 negative_sampler.num_negs_per_pos 24.0 +1031 91 training.batch_size 1.0 +1031 92 model.embedding_dim 1.0 +1031 92 model.scoring_fct_norm 2.0 +1031 92 loss.margin 17.110111165436145 +1031 92 loss.adversarial_temperature 0.5375881312902877 +1031 92 optimizer.lr 0.00864217719962241 +1031 92 negative_sampler.num_negs_per_pos 63.0 +1031 92 training.batch_size 2.0 +1031 93 model.embedding_dim 0.0 +1031 93 model.scoring_fct_norm 2.0 +1031 93 loss.margin 1.5692141606985182 +1031 93 loss.adversarial_temperature 0.25974649189748356 +1031 93 optimizer.lr 0.005086302224060394 +1031 93 negative_sampler.num_negs_per_pos 37.0 +1031 93 training.batch_size 2.0 +1031 94 model.embedding_dim 2.0 +1031 94 model.scoring_fct_norm 2.0 +1031 94 loss.margin 13.485883860975271 +1031 94 loss.adversarial_temperature 0.6931950417162892 +1031 94 optimizer.lr 0.09890493427455747 +1031 94 negative_sampler.num_negs_per_pos 65.0 +1031 94 training.batch_size 1.0 +1031 95 model.embedding_dim 1.0 +1031 95 model.scoring_fct_norm 2.0 +1031 95 loss.margin 12.23437086710268 +1031 95 loss.adversarial_temperature 0.6524082472183119 +1031 95 optimizer.lr 0.011454397511048976 +1031 95 negative_sampler.num_negs_per_pos 2.0 +1031 95 training.batch_size 2.0 +1031 96 model.embedding_dim 2.0 +1031 96 model.scoring_fct_norm 1.0 +1031 96 loss.margin 8.520140269891805 +1031 96 loss.adversarial_temperature 0.1613619727568379 +1031 96 optimizer.lr 0.008082758543186613 +1031 96 negative_sampler.num_negs_per_pos 23.0 +1031 96 training.batch_size 2.0 +1031 97 model.embedding_dim 2.0 +1031 97 model.scoring_fct_norm 2.0 +1031 97 loss.margin 8.09238406976911 +1031 97 loss.adversarial_temperature 0.2283039396897888 +1031 97 optimizer.lr 0.0010028202013833843 +1031 97 negative_sampler.num_negs_per_pos 23.0 +1031 97 training.batch_size 1.0 +1031 98 model.embedding_dim 1.0 +1031 98 model.scoring_fct_norm 1.0 +1031 98 loss.margin 23.65205025300524 +1031 98 loss.adversarial_temperature 0.5031867308006587 +1031 98 optimizer.lr 0.002426496210898793 +1031 98 negative_sampler.num_negs_per_pos 39.0 +1031 98 training.batch_size 2.0 +1031 99 model.embedding_dim 0.0 +1031 99 model.scoring_fct_norm 1.0 +1031 99 loss.margin 28.198395279359197 +1031 99 loss.adversarial_temperature 0.5787899394600514 +1031 99 optimizer.lr 0.0014400204729191615 +1031 99 negative_sampler.num_negs_per_pos 88.0 +1031 99 training.batch_size 1.0 +1031 100 model.embedding_dim 0.0 +1031 100 model.scoring_fct_norm 2.0 +1031 100 loss.margin 28.943873622818455 +1031 100 loss.adversarial_temperature 0.8479243503633416 +1031 100 optimizer.lr 0.04031097439473728 +1031 100 negative_sampler.num_negs_per_pos 4.0 +1031 100 training.batch_size 1.0 +1031 1 dataset """kinships""" +1031 1 model """unstructuredmodel""" +1031 1 loss """nssa""" +1031 1 regularizer """no""" +1031 1 optimizer """adam""" +1031 1 training_loop """owa""" +1031 1 negative_sampler """basic""" +1031 1 evaluator """rankbased""" +1031 2 dataset """kinships""" +1031 2 model """unstructuredmodel""" +1031 2 loss """nssa""" +1031 2 regularizer """no""" +1031 2 optimizer """adam""" +1031 2 training_loop """owa""" +1031 2 negative_sampler """basic""" +1031 2 evaluator """rankbased""" +1031 3 dataset """kinships""" +1031 3 model """unstructuredmodel""" +1031 3 loss """nssa""" +1031 3 regularizer """no""" +1031 3 optimizer """adam""" +1031 3 training_loop """owa""" +1031 3 negative_sampler """basic""" +1031 3 evaluator """rankbased""" +1031 4 dataset """kinships""" +1031 4 model """unstructuredmodel""" +1031 4 loss """nssa""" +1031 4 regularizer """no""" +1031 4 optimizer """adam""" +1031 4 training_loop """owa""" +1031 4 negative_sampler """basic""" +1031 4 evaluator """rankbased""" +1031 5 dataset """kinships""" +1031 5 model """unstructuredmodel""" +1031 5 loss """nssa""" +1031 5 regularizer """no""" +1031 5 optimizer """adam""" +1031 5 training_loop """owa""" +1031 5 negative_sampler """basic""" +1031 5 evaluator """rankbased""" +1031 6 dataset """kinships""" +1031 6 model """unstructuredmodel""" +1031 6 loss """nssa""" +1031 6 regularizer """no""" +1031 6 optimizer """adam""" +1031 6 training_loop """owa""" +1031 6 negative_sampler """basic""" +1031 6 evaluator """rankbased""" +1031 7 dataset """kinships""" +1031 7 model """unstructuredmodel""" +1031 7 loss """nssa""" +1031 7 regularizer """no""" +1031 7 optimizer """adam""" +1031 7 training_loop """owa""" +1031 7 negative_sampler """basic""" +1031 7 evaluator """rankbased""" +1031 8 dataset """kinships""" +1031 8 model """unstructuredmodel""" +1031 8 loss """nssa""" +1031 8 regularizer """no""" +1031 8 optimizer """adam""" +1031 8 training_loop """owa""" +1031 8 negative_sampler """basic""" +1031 8 evaluator """rankbased""" +1031 9 dataset """kinships""" +1031 9 model """unstructuredmodel""" +1031 9 loss """nssa""" +1031 9 regularizer """no""" +1031 9 optimizer """adam""" +1031 9 training_loop """owa""" +1031 9 negative_sampler """basic""" +1031 9 evaluator """rankbased""" +1031 10 dataset """kinships""" +1031 10 model """unstructuredmodel""" +1031 10 loss """nssa""" +1031 10 regularizer """no""" +1031 10 optimizer """adam""" +1031 10 training_loop """owa""" +1031 10 negative_sampler """basic""" +1031 10 evaluator """rankbased""" +1031 11 dataset """kinships""" +1031 11 model """unstructuredmodel""" +1031 11 loss """nssa""" +1031 11 regularizer """no""" +1031 11 optimizer """adam""" +1031 11 training_loop """owa""" +1031 11 negative_sampler """basic""" +1031 11 evaluator """rankbased""" +1031 12 dataset """kinships""" +1031 12 model """unstructuredmodel""" +1031 12 loss """nssa""" +1031 12 regularizer """no""" +1031 12 optimizer """adam""" +1031 12 training_loop """owa""" +1031 12 negative_sampler """basic""" +1031 12 evaluator """rankbased""" +1031 13 dataset """kinships""" +1031 13 model """unstructuredmodel""" +1031 13 loss """nssa""" +1031 13 regularizer """no""" +1031 13 optimizer """adam""" +1031 13 training_loop """owa""" +1031 13 negative_sampler """basic""" +1031 13 evaluator """rankbased""" +1031 14 dataset """kinships""" +1031 14 model """unstructuredmodel""" +1031 14 loss """nssa""" +1031 14 regularizer """no""" +1031 14 optimizer """adam""" +1031 14 training_loop """owa""" +1031 14 negative_sampler """basic""" +1031 14 evaluator """rankbased""" +1031 15 dataset """kinships""" +1031 15 model """unstructuredmodel""" +1031 15 loss """nssa""" +1031 15 regularizer """no""" +1031 15 optimizer """adam""" +1031 15 training_loop """owa""" +1031 15 negative_sampler """basic""" +1031 15 evaluator """rankbased""" +1031 16 dataset """kinships""" +1031 16 model """unstructuredmodel""" +1031 16 loss """nssa""" +1031 16 regularizer """no""" +1031 16 optimizer """adam""" +1031 16 training_loop """owa""" +1031 16 negative_sampler """basic""" +1031 16 evaluator """rankbased""" +1031 17 dataset """kinships""" +1031 17 model """unstructuredmodel""" +1031 17 loss """nssa""" +1031 17 regularizer """no""" +1031 17 optimizer """adam""" +1031 17 training_loop """owa""" +1031 17 negative_sampler """basic""" +1031 17 evaluator """rankbased""" +1031 18 dataset """kinships""" +1031 18 model """unstructuredmodel""" +1031 18 loss """nssa""" +1031 18 regularizer """no""" +1031 18 optimizer """adam""" +1031 18 training_loop """owa""" +1031 18 negative_sampler """basic""" +1031 18 evaluator """rankbased""" +1031 19 dataset """kinships""" +1031 19 model """unstructuredmodel""" +1031 19 loss """nssa""" +1031 19 regularizer """no""" +1031 19 optimizer """adam""" +1031 19 training_loop """owa""" +1031 19 negative_sampler """basic""" +1031 19 evaluator """rankbased""" +1031 20 dataset """kinships""" +1031 20 model """unstructuredmodel""" +1031 20 loss """nssa""" +1031 20 regularizer """no""" +1031 20 optimizer """adam""" +1031 20 training_loop """owa""" +1031 20 negative_sampler """basic""" +1031 20 evaluator """rankbased""" +1031 21 dataset """kinships""" +1031 21 model """unstructuredmodel""" +1031 21 loss """nssa""" +1031 21 regularizer """no""" +1031 21 optimizer """adam""" +1031 21 training_loop """owa""" +1031 21 negative_sampler """basic""" +1031 21 evaluator """rankbased""" +1031 22 dataset """kinships""" +1031 22 model """unstructuredmodel""" +1031 22 loss """nssa""" +1031 22 regularizer """no""" +1031 22 optimizer """adam""" +1031 22 training_loop """owa""" +1031 22 negative_sampler """basic""" +1031 22 evaluator """rankbased""" +1031 23 dataset """kinships""" +1031 23 model """unstructuredmodel""" +1031 23 loss """nssa""" +1031 23 regularizer """no""" +1031 23 optimizer """adam""" +1031 23 training_loop """owa""" +1031 23 negative_sampler """basic""" +1031 23 evaluator """rankbased""" +1031 24 dataset """kinships""" +1031 24 model """unstructuredmodel""" +1031 24 loss """nssa""" +1031 24 regularizer """no""" +1031 24 optimizer """adam""" +1031 24 training_loop """owa""" +1031 24 negative_sampler """basic""" +1031 24 evaluator """rankbased""" +1031 25 dataset """kinships""" +1031 25 model """unstructuredmodel""" +1031 25 loss """nssa""" +1031 25 regularizer """no""" +1031 25 optimizer """adam""" +1031 25 training_loop """owa""" +1031 25 negative_sampler """basic""" +1031 25 evaluator """rankbased""" +1031 26 dataset """kinships""" +1031 26 model """unstructuredmodel""" +1031 26 loss """nssa""" +1031 26 regularizer """no""" +1031 26 optimizer """adam""" +1031 26 training_loop """owa""" +1031 26 negative_sampler """basic""" +1031 26 evaluator """rankbased""" +1031 27 dataset """kinships""" +1031 27 model """unstructuredmodel""" +1031 27 loss """nssa""" +1031 27 regularizer """no""" +1031 27 optimizer """adam""" +1031 27 training_loop """owa""" +1031 27 negative_sampler """basic""" +1031 27 evaluator """rankbased""" +1031 28 dataset """kinships""" +1031 28 model """unstructuredmodel""" +1031 28 loss """nssa""" +1031 28 regularizer """no""" +1031 28 optimizer """adam""" +1031 28 training_loop """owa""" +1031 28 negative_sampler """basic""" +1031 28 evaluator """rankbased""" +1031 29 dataset """kinships""" +1031 29 model """unstructuredmodel""" +1031 29 loss """nssa""" +1031 29 regularizer """no""" +1031 29 optimizer """adam""" +1031 29 training_loop """owa""" +1031 29 negative_sampler """basic""" +1031 29 evaluator """rankbased""" +1031 30 dataset """kinships""" +1031 30 model """unstructuredmodel""" +1031 30 loss """nssa""" +1031 30 regularizer """no""" +1031 30 optimizer """adam""" +1031 30 training_loop """owa""" +1031 30 negative_sampler """basic""" +1031 30 evaluator """rankbased""" +1031 31 dataset """kinships""" +1031 31 model """unstructuredmodel""" +1031 31 loss """nssa""" +1031 31 regularizer """no""" +1031 31 optimizer """adam""" +1031 31 training_loop """owa""" +1031 31 negative_sampler """basic""" +1031 31 evaluator """rankbased""" +1031 32 dataset """kinships""" +1031 32 model """unstructuredmodel""" +1031 32 loss """nssa""" +1031 32 regularizer """no""" +1031 32 optimizer """adam""" +1031 32 training_loop """owa""" +1031 32 negative_sampler """basic""" +1031 32 evaluator """rankbased""" +1031 33 dataset """kinships""" +1031 33 model """unstructuredmodel""" +1031 33 loss """nssa""" +1031 33 regularizer """no""" +1031 33 optimizer """adam""" +1031 33 training_loop """owa""" +1031 33 negative_sampler """basic""" +1031 33 evaluator """rankbased""" +1031 34 dataset """kinships""" +1031 34 model """unstructuredmodel""" +1031 34 loss """nssa""" +1031 34 regularizer """no""" +1031 34 optimizer """adam""" +1031 34 training_loop """owa""" +1031 34 negative_sampler """basic""" +1031 34 evaluator """rankbased""" +1031 35 dataset """kinships""" +1031 35 model """unstructuredmodel""" +1031 35 loss """nssa""" +1031 35 regularizer """no""" +1031 35 optimizer """adam""" +1031 35 training_loop """owa""" +1031 35 negative_sampler """basic""" +1031 35 evaluator """rankbased""" +1031 36 dataset """kinships""" +1031 36 model """unstructuredmodel""" +1031 36 loss """nssa""" +1031 36 regularizer """no""" +1031 36 optimizer """adam""" +1031 36 training_loop """owa""" +1031 36 negative_sampler """basic""" +1031 36 evaluator """rankbased""" +1031 37 dataset """kinships""" +1031 37 model """unstructuredmodel""" +1031 37 loss """nssa""" +1031 37 regularizer """no""" +1031 37 optimizer """adam""" +1031 37 training_loop """owa""" +1031 37 negative_sampler """basic""" +1031 37 evaluator """rankbased""" +1031 38 dataset """kinships""" +1031 38 model """unstructuredmodel""" +1031 38 loss """nssa""" +1031 38 regularizer """no""" +1031 38 optimizer """adam""" +1031 38 training_loop """owa""" +1031 38 negative_sampler """basic""" +1031 38 evaluator """rankbased""" +1031 39 dataset """kinships""" +1031 39 model """unstructuredmodel""" +1031 39 loss """nssa""" +1031 39 regularizer """no""" +1031 39 optimizer """adam""" +1031 39 training_loop """owa""" +1031 39 negative_sampler """basic""" +1031 39 evaluator """rankbased""" +1031 40 dataset """kinships""" +1031 40 model """unstructuredmodel""" +1031 40 loss """nssa""" +1031 40 regularizer """no""" +1031 40 optimizer """adam""" +1031 40 training_loop """owa""" +1031 40 negative_sampler """basic""" +1031 40 evaluator """rankbased""" +1031 41 dataset """kinships""" +1031 41 model """unstructuredmodel""" +1031 41 loss """nssa""" +1031 41 regularizer """no""" +1031 41 optimizer """adam""" +1031 41 training_loop """owa""" +1031 41 negative_sampler """basic""" +1031 41 evaluator """rankbased""" +1031 42 dataset """kinships""" +1031 42 model """unstructuredmodel""" +1031 42 loss """nssa""" +1031 42 regularizer """no""" +1031 42 optimizer """adam""" +1031 42 training_loop """owa""" +1031 42 negative_sampler """basic""" +1031 42 evaluator """rankbased""" +1031 43 dataset """kinships""" +1031 43 model """unstructuredmodel""" +1031 43 loss """nssa""" +1031 43 regularizer """no""" +1031 43 optimizer """adam""" +1031 43 training_loop """owa""" +1031 43 negative_sampler """basic""" +1031 43 evaluator """rankbased""" +1031 44 dataset """kinships""" +1031 44 model """unstructuredmodel""" +1031 44 loss """nssa""" +1031 44 regularizer """no""" +1031 44 optimizer """adam""" +1031 44 training_loop """owa""" +1031 44 negative_sampler """basic""" +1031 44 evaluator """rankbased""" +1031 45 dataset """kinships""" +1031 45 model """unstructuredmodel""" +1031 45 loss """nssa""" +1031 45 regularizer """no""" +1031 45 optimizer """adam""" +1031 45 training_loop """owa""" +1031 45 negative_sampler """basic""" +1031 45 evaluator """rankbased""" +1031 46 dataset """kinships""" +1031 46 model """unstructuredmodel""" +1031 46 loss """nssa""" +1031 46 regularizer """no""" +1031 46 optimizer """adam""" +1031 46 training_loop """owa""" +1031 46 negative_sampler """basic""" +1031 46 evaluator """rankbased""" +1031 47 dataset """kinships""" +1031 47 model """unstructuredmodel""" +1031 47 loss """nssa""" +1031 47 regularizer """no""" +1031 47 optimizer """adam""" +1031 47 training_loop """owa""" +1031 47 negative_sampler """basic""" +1031 47 evaluator """rankbased""" +1031 48 dataset """kinships""" +1031 48 model """unstructuredmodel""" +1031 48 loss """nssa""" +1031 48 regularizer """no""" +1031 48 optimizer """adam""" +1031 48 training_loop """owa""" +1031 48 negative_sampler """basic""" +1031 48 evaluator """rankbased""" +1031 49 dataset """kinships""" +1031 49 model """unstructuredmodel""" +1031 49 loss """nssa""" +1031 49 regularizer """no""" +1031 49 optimizer """adam""" +1031 49 training_loop """owa""" +1031 49 negative_sampler """basic""" +1031 49 evaluator """rankbased""" +1031 50 dataset """kinships""" +1031 50 model """unstructuredmodel""" +1031 50 loss """nssa""" +1031 50 regularizer """no""" +1031 50 optimizer """adam""" +1031 50 training_loop """owa""" +1031 50 negative_sampler """basic""" +1031 50 evaluator """rankbased""" +1031 51 dataset """kinships""" +1031 51 model """unstructuredmodel""" +1031 51 loss """nssa""" +1031 51 regularizer """no""" +1031 51 optimizer """adam""" +1031 51 training_loop """owa""" +1031 51 negative_sampler """basic""" +1031 51 evaluator """rankbased""" +1031 52 dataset """kinships""" +1031 52 model """unstructuredmodel""" +1031 52 loss """nssa""" +1031 52 regularizer """no""" +1031 52 optimizer """adam""" +1031 52 training_loop """owa""" +1031 52 negative_sampler """basic""" +1031 52 evaluator """rankbased""" +1031 53 dataset """kinships""" +1031 53 model """unstructuredmodel""" +1031 53 loss """nssa""" +1031 53 regularizer """no""" +1031 53 optimizer """adam""" +1031 53 training_loop """owa""" +1031 53 negative_sampler """basic""" +1031 53 evaluator """rankbased""" +1031 54 dataset """kinships""" +1031 54 model """unstructuredmodel""" +1031 54 loss """nssa""" +1031 54 regularizer """no""" +1031 54 optimizer """adam""" +1031 54 training_loop """owa""" +1031 54 negative_sampler """basic""" +1031 54 evaluator """rankbased""" +1031 55 dataset """kinships""" +1031 55 model """unstructuredmodel""" +1031 55 loss """nssa""" +1031 55 regularizer """no""" +1031 55 optimizer """adam""" +1031 55 training_loop """owa""" +1031 55 negative_sampler """basic""" +1031 55 evaluator """rankbased""" +1031 56 dataset """kinships""" +1031 56 model """unstructuredmodel""" +1031 56 loss """nssa""" +1031 56 regularizer """no""" +1031 56 optimizer """adam""" +1031 56 training_loop """owa""" +1031 56 negative_sampler """basic""" +1031 56 evaluator """rankbased""" +1031 57 dataset """kinships""" +1031 57 model """unstructuredmodel""" +1031 57 loss """nssa""" +1031 57 regularizer """no""" +1031 57 optimizer """adam""" +1031 57 training_loop """owa""" +1031 57 negative_sampler """basic""" +1031 57 evaluator """rankbased""" +1031 58 dataset """kinships""" +1031 58 model """unstructuredmodel""" +1031 58 loss """nssa""" +1031 58 regularizer """no""" +1031 58 optimizer """adam""" +1031 58 training_loop """owa""" +1031 58 negative_sampler """basic""" +1031 58 evaluator """rankbased""" +1031 59 dataset """kinships""" +1031 59 model """unstructuredmodel""" +1031 59 loss """nssa""" +1031 59 regularizer """no""" +1031 59 optimizer """adam""" +1031 59 training_loop """owa""" +1031 59 negative_sampler """basic""" +1031 59 evaluator """rankbased""" +1031 60 dataset """kinships""" +1031 60 model """unstructuredmodel""" +1031 60 loss """nssa""" +1031 60 regularizer """no""" +1031 60 optimizer """adam""" +1031 60 training_loop """owa""" +1031 60 negative_sampler """basic""" +1031 60 evaluator """rankbased""" +1031 61 dataset """kinships""" +1031 61 model """unstructuredmodel""" +1031 61 loss """nssa""" +1031 61 regularizer """no""" +1031 61 optimizer """adam""" +1031 61 training_loop """owa""" +1031 61 negative_sampler """basic""" +1031 61 evaluator """rankbased""" +1031 62 dataset """kinships""" +1031 62 model """unstructuredmodel""" +1031 62 loss """nssa""" +1031 62 regularizer """no""" +1031 62 optimizer """adam""" +1031 62 training_loop """owa""" +1031 62 negative_sampler """basic""" +1031 62 evaluator """rankbased""" +1031 63 dataset """kinships""" +1031 63 model """unstructuredmodel""" +1031 63 loss """nssa""" +1031 63 regularizer """no""" +1031 63 optimizer """adam""" +1031 63 training_loop """owa""" +1031 63 negative_sampler """basic""" +1031 63 evaluator """rankbased""" +1031 64 dataset """kinships""" +1031 64 model """unstructuredmodel""" +1031 64 loss """nssa""" +1031 64 regularizer """no""" +1031 64 optimizer """adam""" +1031 64 training_loop """owa""" +1031 64 negative_sampler """basic""" +1031 64 evaluator """rankbased""" +1031 65 dataset """kinships""" +1031 65 model """unstructuredmodel""" +1031 65 loss """nssa""" +1031 65 regularizer """no""" +1031 65 optimizer """adam""" +1031 65 training_loop """owa""" +1031 65 negative_sampler """basic""" +1031 65 evaluator """rankbased""" +1031 66 dataset """kinships""" +1031 66 model """unstructuredmodel""" +1031 66 loss """nssa""" +1031 66 regularizer """no""" +1031 66 optimizer """adam""" +1031 66 training_loop """owa""" +1031 66 negative_sampler """basic""" +1031 66 evaluator """rankbased""" +1031 67 dataset """kinships""" +1031 67 model """unstructuredmodel""" +1031 67 loss """nssa""" +1031 67 regularizer """no""" +1031 67 optimizer """adam""" +1031 67 training_loop """owa""" +1031 67 negative_sampler """basic""" +1031 67 evaluator """rankbased""" +1031 68 dataset """kinships""" +1031 68 model """unstructuredmodel""" +1031 68 loss """nssa""" +1031 68 regularizer """no""" +1031 68 optimizer """adam""" +1031 68 training_loop """owa""" +1031 68 negative_sampler """basic""" +1031 68 evaluator """rankbased""" +1031 69 dataset """kinships""" +1031 69 model """unstructuredmodel""" +1031 69 loss """nssa""" +1031 69 regularizer """no""" +1031 69 optimizer """adam""" +1031 69 training_loop """owa""" +1031 69 negative_sampler """basic""" +1031 69 evaluator """rankbased""" +1031 70 dataset """kinships""" +1031 70 model """unstructuredmodel""" +1031 70 loss """nssa""" +1031 70 regularizer """no""" +1031 70 optimizer """adam""" +1031 70 training_loop """owa""" +1031 70 negative_sampler """basic""" +1031 70 evaluator """rankbased""" +1031 71 dataset """kinships""" +1031 71 model """unstructuredmodel""" +1031 71 loss """nssa""" +1031 71 regularizer """no""" +1031 71 optimizer """adam""" +1031 71 training_loop """owa""" +1031 71 negative_sampler """basic""" +1031 71 evaluator """rankbased""" +1031 72 dataset """kinships""" +1031 72 model """unstructuredmodel""" +1031 72 loss """nssa""" +1031 72 regularizer """no""" +1031 72 optimizer """adam""" +1031 72 training_loop """owa""" +1031 72 negative_sampler """basic""" +1031 72 evaluator """rankbased""" +1031 73 dataset """kinships""" +1031 73 model """unstructuredmodel""" +1031 73 loss """nssa""" +1031 73 regularizer """no""" +1031 73 optimizer """adam""" +1031 73 training_loop """owa""" +1031 73 negative_sampler """basic""" +1031 73 evaluator """rankbased""" +1031 74 dataset """kinships""" +1031 74 model """unstructuredmodel""" +1031 74 loss """nssa""" +1031 74 regularizer """no""" +1031 74 optimizer """adam""" +1031 74 training_loop """owa""" +1031 74 negative_sampler """basic""" +1031 74 evaluator """rankbased""" +1031 75 dataset """kinships""" +1031 75 model """unstructuredmodel""" +1031 75 loss """nssa""" +1031 75 regularizer """no""" +1031 75 optimizer """adam""" +1031 75 training_loop """owa""" +1031 75 negative_sampler """basic""" +1031 75 evaluator """rankbased""" +1031 76 dataset """kinships""" +1031 76 model """unstructuredmodel""" +1031 76 loss """nssa""" +1031 76 regularizer """no""" +1031 76 optimizer """adam""" +1031 76 training_loop """owa""" +1031 76 negative_sampler """basic""" +1031 76 evaluator """rankbased""" +1031 77 dataset """kinships""" +1031 77 model """unstructuredmodel""" +1031 77 loss """nssa""" +1031 77 regularizer """no""" +1031 77 optimizer """adam""" +1031 77 training_loop """owa""" +1031 77 negative_sampler """basic""" +1031 77 evaluator """rankbased""" +1031 78 dataset """kinships""" +1031 78 model """unstructuredmodel""" +1031 78 loss """nssa""" +1031 78 regularizer """no""" +1031 78 optimizer """adam""" +1031 78 training_loop """owa""" +1031 78 negative_sampler """basic""" +1031 78 evaluator """rankbased""" +1031 79 dataset """kinships""" +1031 79 model """unstructuredmodel""" +1031 79 loss """nssa""" +1031 79 regularizer """no""" +1031 79 optimizer """adam""" +1031 79 training_loop """owa""" +1031 79 negative_sampler """basic""" +1031 79 evaluator """rankbased""" +1031 80 dataset """kinships""" +1031 80 model """unstructuredmodel""" +1031 80 loss """nssa""" +1031 80 regularizer """no""" +1031 80 optimizer """adam""" +1031 80 training_loop """owa""" +1031 80 negative_sampler """basic""" +1031 80 evaluator """rankbased""" +1031 81 dataset """kinships""" +1031 81 model """unstructuredmodel""" +1031 81 loss """nssa""" +1031 81 regularizer """no""" +1031 81 optimizer """adam""" +1031 81 training_loop """owa""" +1031 81 negative_sampler """basic""" +1031 81 evaluator """rankbased""" +1031 82 dataset """kinships""" +1031 82 model """unstructuredmodel""" +1031 82 loss """nssa""" +1031 82 regularizer """no""" +1031 82 optimizer """adam""" +1031 82 training_loop """owa""" +1031 82 negative_sampler """basic""" +1031 82 evaluator """rankbased""" +1031 83 dataset """kinships""" +1031 83 model """unstructuredmodel""" +1031 83 loss """nssa""" +1031 83 regularizer """no""" +1031 83 optimizer """adam""" +1031 83 training_loop """owa""" +1031 83 negative_sampler """basic""" +1031 83 evaluator """rankbased""" +1031 84 dataset """kinships""" +1031 84 model """unstructuredmodel""" +1031 84 loss """nssa""" +1031 84 regularizer """no""" +1031 84 optimizer """adam""" +1031 84 training_loop """owa""" +1031 84 negative_sampler """basic""" +1031 84 evaluator """rankbased""" +1031 85 dataset """kinships""" +1031 85 model """unstructuredmodel""" +1031 85 loss """nssa""" +1031 85 regularizer """no""" +1031 85 optimizer """adam""" +1031 85 training_loop """owa""" +1031 85 negative_sampler """basic""" +1031 85 evaluator """rankbased""" +1031 86 dataset """kinships""" +1031 86 model """unstructuredmodel""" +1031 86 loss """nssa""" +1031 86 regularizer """no""" +1031 86 optimizer """adam""" +1031 86 training_loop """owa""" +1031 86 negative_sampler """basic""" +1031 86 evaluator """rankbased""" +1031 87 dataset """kinships""" +1031 87 model """unstructuredmodel""" +1031 87 loss """nssa""" +1031 87 regularizer """no""" +1031 87 optimizer """adam""" +1031 87 training_loop """owa""" +1031 87 negative_sampler """basic""" +1031 87 evaluator """rankbased""" +1031 88 dataset """kinships""" +1031 88 model """unstructuredmodel""" +1031 88 loss """nssa""" +1031 88 regularizer """no""" +1031 88 optimizer """adam""" +1031 88 training_loop """owa""" +1031 88 negative_sampler """basic""" +1031 88 evaluator """rankbased""" +1031 89 dataset """kinships""" +1031 89 model """unstructuredmodel""" +1031 89 loss """nssa""" +1031 89 regularizer """no""" +1031 89 optimizer """adam""" +1031 89 training_loop """owa""" +1031 89 negative_sampler """basic""" +1031 89 evaluator """rankbased""" +1031 90 dataset """kinships""" +1031 90 model """unstructuredmodel""" +1031 90 loss """nssa""" +1031 90 regularizer """no""" +1031 90 optimizer """adam""" +1031 90 training_loop """owa""" +1031 90 negative_sampler """basic""" +1031 90 evaluator """rankbased""" +1031 91 dataset """kinships""" +1031 91 model """unstructuredmodel""" +1031 91 loss """nssa""" +1031 91 regularizer """no""" +1031 91 optimizer """adam""" +1031 91 training_loop """owa""" +1031 91 negative_sampler """basic""" +1031 91 evaluator """rankbased""" +1031 92 dataset """kinships""" +1031 92 model """unstructuredmodel""" +1031 92 loss """nssa""" +1031 92 regularizer """no""" +1031 92 optimizer """adam""" +1031 92 training_loop """owa""" +1031 92 negative_sampler """basic""" +1031 92 evaluator """rankbased""" +1031 93 dataset """kinships""" +1031 93 model """unstructuredmodel""" +1031 93 loss """nssa""" +1031 93 regularizer """no""" +1031 93 optimizer """adam""" +1031 93 training_loop """owa""" +1031 93 negative_sampler """basic""" +1031 93 evaluator """rankbased""" +1031 94 dataset """kinships""" +1031 94 model """unstructuredmodel""" +1031 94 loss """nssa""" +1031 94 regularizer """no""" +1031 94 optimizer """adam""" +1031 94 training_loop """owa""" +1031 94 negative_sampler """basic""" +1031 94 evaluator """rankbased""" +1031 95 dataset """kinships""" +1031 95 model """unstructuredmodel""" +1031 95 loss """nssa""" +1031 95 regularizer """no""" +1031 95 optimizer """adam""" +1031 95 training_loop """owa""" +1031 95 negative_sampler """basic""" +1031 95 evaluator """rankbased""" +1031 96 dataset """kinships""" +1031 96 model """unstructuredmodel""" +1031 96 loss """nssa""" +1031 96 regularizer """no""" +1031 96 optimizer """adam""" +1031 96 training_loop """owa""" +1031 96 negative_sampler """basic""" +1031 96 evaluator """rankbased""" +1031 97 dataset """kinships""" +1031 97 model """unstructuredmodel""" +1031 97 loss """nssa""" +1031 97 regularizer """no""" +1031 97 optimizer """adam""" +1031 97 training_loop """owa""" +1031 97 negative_sampler """basic""" +1031 97 evaluator """rankbased""" +1031 98 dataset """kinships""" +1031 98 model """unstructuredmodel""" +1031 98 loss """nssa""" +1031 98 regularizer """no""" +1031 98 optimizer """adam""" +1031 98 training_loop """owa""" +1031 98 negative_sampler """basic""" +1031 98 evaluator """rankbased""" +1031 99 dataset """kinships""" +1031 99 model """unstructuredmodel""" +1031 99 loss """nssa""" +1031 99 regularizer """no""" +1031 99 optimizer """adam""" +1031 99 training_loop """owa""" +1031 99 negative_sampler """basic""" +1031 99 evaluator """rankbased""" +1031 100 dataset """kinships""" +1031 100 model """unstructuredmodel""" +1031 100 loss """nssa""" +1031 100 regularizer """no""" +1031 100 optimizer """adam""" +1031 100 training_loop """owa""" +1031 100 negative_sampler """basic""" +1031 100 evaluator """rankbased""" +1032 1 model.embedding_dim 0.0 +1032 1 model.scoring_fct_norm 2.0 +1032 1 loss.margin 6.096171454489114 +1032 1 optimizer.lr 0.012994740814440327 +1032 1 negative_sampler.num_negs_per_pos 88.0 +1032 1 training.batch_size 1.0 +1032 2 model.embedding_dim 1.0 +1032 2 model.scoring_fct_norm 2.0 +1032 2 loss.margin 2.6122799903532736 +1032 2 optimizer.lr 0.0556257880770455 +1032 2 negative_sampler.num_negs_per_pos 70.0 +1032 2 training.batch_size 1.0 +1032 3 model.embedding_dim 1.0 +1032 3 model.scoring_fct_norm 2.0 +1032 3 loss.margin 8.866923300297021 +1032 3 optimizer.lr 0.0012012698538237992 +1032 3 negative_sampler.num_negs_per_pos 30.0 +1032 3 training.batch_size 0.0 +1032 4 model.embedding_dim 1.0 +1032 4 model.scoring_fct_norm 1.0 +1032 4 loss.margin 2.364927249653139 +1032 4 optimizer.lr 0.0057544535532136325 +1032 4 negative_sampler.num_negs_per_pos 20.0 +1032 4 training.batch_size 0.0 +1032 5 model.embedding_dim 1.0 +1032 5 model.scoring_fct_norm 2.0 +1032 5 loss.margin 4.1982710968689325 +1032 5 optimizer.lr 0.0812700882825257 +1032 5 negative_sampler.num_negs_per_pos 70.0 +1032 5 training.batch_size 0.0 +1032 6 model.embedding_dim 0.0 +1032 6 model.scoring_fct_norm 2.0 +1032 6 loss.margin 9.997754526012127 +1032 6 optimizer.lr 0.0011095196427967123 +1032 6 negative_sampler.num_negs_per_pos 38.0 +1032 6 training.batch_size 2.0 +1032 7 model.embedding_dim 1.0 +1032 7 model.scoring_fct_norm 2.0 +1032 7 loss.margin 2.105771636931478 +1032 7 optimizer.lr 0.03267895055506864 +1032 7 negative_sampler.num_negs_per_pos 78.0 +1032 7 training.batch_size 2.0 +1032 8 model.embedding_dim 0.0 +1032 8 model.scoring_fct_norm 1.0 +1032 8 loss.margin 7.704796345915488 +1032 8 optimizer.lr 0.021592105972544587 +1032 8 negative_sampler.num_negs_per_pos 66.0 +1032 8 training.batch_size 2.0 +1032 9 model.embedding_dim 2.0 +1032 9 model.scoring_fct_norm 2.0 +1032 9 loss.margin 4.0652626662311455 +1032 9 optimizer.lr 0.002600963585685991 +1032 9 negative_sampler.num_negs_per_pos 2.0 +1032 9 training.batch_size 1.0 +1032 10 model.embedding_dim 0.0 +1032 10 model.scoring_fct_norm 2.0 +1032 10 loss.margin 4.640381729494189 +1032 10 optimizer.lr 0.046819916236288256 +1032 10 negative_sampler.num_negs_per_pos 89.0 +1032 10 training.batch_size 2.0 +1032 11 model.embedding_dim 2.0 +1032 11 model.scoring_fct_norm 1.0 +1032 11 loss.margin 6.571622965042179 +1032 11 optimizer.lr 0.008712405852073371 +1032 11 negative_sampler.num_negs_per_pos 3.0 +1032 11 training.batch_size 1.0 +1032 12 model.embedding_dim 1.0 +1032 12 model.scoring_fct_norm 2.0 +1032 12 loss.margin 8.106256542304045 +1032 12 optimizer.lr 0.00383879220112199 +1032 12 negative_sampler.num_negs_per_pos 32.0 +1032 12 training.batch_size 2.0 +1032 13 model.embedding_dim 0.0 +1032 13 model.scoring_fct_norm 1.0 +1032 13 loss.margin 9.49443302520866 +1032 13 optimizer.lr 0.0021413938507420438 +1032 13 negative_sampler.num_negs_per_pos 34.0 +1032 13 training.batch_size 2.0 +1032 14 model.embedding_dim 1.0 +1032 14 model.scoring_fct_norm 2.0 +1032 14 loss.margin 3.461878987130871 +1032 14 optimizer.lr 0.0017015217015874957 +1032 14 negative_sampler.num_negs_per_pos 93.0 +1032 14 training.batch_size 0.0 +1032 15 model.embedding_dim 2.0 +1032 15 model.scoring_fct_norm 2.0 +1032 15 loss.margin 4.1409111137434325 +1032 15 optimizer.lr 0.057159108480299474 +1032 15 negative_sampler.num_negs_per_pos 95.0 +1032 15 training.batch_size 2.0 +1032 16 model.embedding_dim 2.0 +1032 16 model.scoring_fct_norm 2.0 +1032 16 loss.margin 8.511281594325247 +1032 16 optimizer.lr 0.001432980079875241 +1032 16 negative_sampler.num_negs_per_pos 5.0 +1032 16 training.batch_size 1.0 +1032 17 model.embedding_dim 0.0 +1032 17 model.scoring_fct_norm 1.0 +1032 17 loss.margin 4.56878283922479 +1032 17 optimizer.lr 0.005571401508255305 +1032 17 negative_sampler.num_negs_per_pos 13.0 +1032 17 training.batch_size 1.0 +1032 18 model.embedding_dim 2.0 +1032 18 model.scoring_fct_norm 2.0 +1032 18 loss.margin 8.960415356884898 +1032 18 optimizer.lr 0.008545620778839054 +1032 18 negative_sampler.num_negs_per_pos 51.0 +1032 18 training.batch_size 2.0 +1032 19 model.embedding_dim 0.0 +1032 19 model.scoring_fct_norm 2.0 +1032 19 loss.margin 2.0546901388024774 +1032 19 optimizer.lr 0.0027596349158466645 +1032 19 negative_sampler.num_negs_per_pos 86.0 +1032 19 training.batch_size 2.0 +1032 20 model.embedding_dim 0.0 +1032 20 model.scoring_fct_norm 2.0 +1032 20 loss.margin 0.7979400430061494 +1032 20 optimizer.lr 0.004302777379938672 +1032 20 negative_sampler.num_negs_per_pos 22.0 +1032 20 training.batch_size 1.0 +1032 21 model.embedding_dim 0.0 +1032 21 model.scoring_fct_norm 1.0 +1032 21 loss.margin 3.4557327523008015 +1032 21 optimizer.lr 0.002175966943993676 +1032 21 negative_sampler.num_negs_per_pos 8.0 +1032 21 training.batch_size 0.0 +1032 22 model.embedding_dim 0.0 +1032 22 model.scoring_fct_norm 1.0 +1032 22 loss.margin 0.8021924823317772 +1032 22 optimizer.lr 0.00739589651535352 +1032 22 negative_sampler.num_negs_per_pos 7.0 +1032 22 training.batch_size 0.0 +1032 23 model.embedding_dim 2.0 +1032 23 model.scoring_fct_norm 1.0 +1032 23 loss.margin 2.3944847793695145 +1032 23 optimizer.lr 0.0025177728436624734 +1032 23 negative_sampler.num_negs_per_pos 25.0 +1032 23 training.batch_size 1.0 +1032 24 model.embedding_dim 2.0 +1032 24 model.scoring_fct_norm 1.0 +1032 24 loss.margin 9.830363296983524 +1032 24 optimizer.lr 0.0028682017642480974 +1032 24 negative_sampler.num_negs_per_pos 18.0 +1032 24 training.batch_size 0.0 +1032 25 model.embedding_dim 0.0 +1032 25 model.scoring_fct_norm 2.0 +1032 25 loss.margin 4.251805778999746 +1032 25 optimizer.lr 0.0018790013848151287 +1032 25 negative_sampler.num_negs_per_pos 1.0 +1032 25 training.batch_size 1.0 +1032 26 model.embedding_dim 2.0 +1032 26 model.scoring_fct_norm 2.0 +1032 26 loss.margin 2.6835781183719516 +1032 26 optimizer.lr 0.002844678386916558 +1032 26 negative_sampler.num_negs_per_pos 68.0 +1032 26 training.batch_size 0.0 +1032 27 model.embedding_dim 1.0 +1032 27 model.scoring_fct_norm 2.0 +1032 27 loss.margin 8.30965596385421 +1032 27 optimizer.lr 0.043234709531260795 +1032 27 negative_sampler.num_negs_per_pos 56.0 +1032 27 training.batch_size 1.0 +1032 28 model.embedding_dim 2.0 +1032 28 model.scoring_fct_norm 2.0 +1032 28 loss.margin 3.14738609000539 +1032 28 optimizer.lr 0.0011097711784623058 +1032 28 negative_sampler.num_negs_per_pos 46.0 +1032 28 training.batch_size 0.0 +1032 29 model.embedding_dim 0.0 +1032 29 model.scoring_fct_norm 1.0 +1032 29 loss.margin 5.204008385503481 +1032 29 optimizer.lr 0.03496597878185148 +1032 29 negative_sampler.num_negs_per_pos 72.0 +1032 29 training.batch_size 2.0 +1032 30 model.embedding_dim 1.0 +1032 30 model.scoring_fct_norm 1.0 +1032 30 loss.margin 7.003194405085546 +1032 30 optimizer.lr 0.001318913617498281 +1032 30 negative_sampler.num_negs_per_pos 38.0 +1032 30 training.batch_size 0.0 +1032 31 model.embedding_dim 1.0 +1032 31 model.scoring_fct_norm 1.0 +1032 31 loss.margin 8.111078598588238 +1032 31 optimizer.lr 0.01160238728289307 +1032 31 negative_sampler.num_negs_per_pos 73.0 +1032 31 training.batch_size 1.0 +1032 32 model.embedding_dim 0.0 +1032 32 model.scoring_fct_norm 2.0 +1032 32 loss.margin 4.205106545109636 +1032 32 optimizer.lr 0.019799683737183223 +1032 32 negative_sampler.num_negs_per_pos 90.0 +1032 32 training.batch_size 2.0 +1032 33 model.embedding_dim 2.0 +1032 33 model.scoring_fct_norm 1.0 +1032 33 loss.margin 5.512654151067478 +1032 33 optimizer.lr 0.01335248992638195 +1032 33 negative_sampler.num_negs_per_pos 13.0 +1032 33 training.batch_size 0.0 +1032 34 model.embedding_dim 0.0 +1032 34 model.scoring_fct_norm 1.0 +1032 34 loss.margin 5.333263705259834 +1032 34 optimizer.lr 0.005165930219693575 +1032 34 negative_sampler.num_negs_per_pos 31.0 +1032 34 training.batch_size 2.0 +1032 35 model.embedding_dim 2.0 +1032 35 model.scoring_fct_norm 2.0 +1032 35 loss.margin 7.533580813051242 +1032 35 optimizer.lr 0.0053821888339916244 +1032 35 negative_sampler.num_negs_per_pos 86.0 +1032 35 training.batch_size 2.0 +1032 36 model.embedding_dim 0.0 +1032 36 model.scoring_fct_norm 2.0 +1032 36 loss.margin 4.6202441904523 +1032 36 optimizer.lr 0.0012829957647596398 +1032 36 negative_sampler.num_negs_per_pos 47.0 +1032 36 training.batch_size 1.0 +1032 37 model.embedding_dim 1.0 +1032 37 model.scoring_fct_norm 1.0 +1032 37 loss.margin 4.912712247781595 +1032 37 optimizer.lr 0.0012595469879225843 +1032 37 negative_sampler.num_negs_per_pos 31.0 +1032 37 training.batch_size 2.0 +1032 38 model.embedding_dim 1.0 +1032 38 model.scoring_fct_norm 2.0 +1032 38 loss.margin 5.874369318656299 +1032 38 optimizer.lr 0.0069485235460531435 +1032 38 negative_sampler.num_negs_per_pos 29.0 +1032 38 training.batch_size 2.0 +1032 39 model.embedding_dim 0.0 +1032 39 model.scoring_fct_norm 2.0 +1032 39 loss.margin 7.617694606511053 +1032 39 optimizer.lr 0.0012236306284206515 +1032 39 negative_sampler.num_negs_per_pos 33.0 +1032 39 training.batch_size 2.0 +1032 40 model.embedding_dim 2.0 +1032 40 model.scoring_fct_norm 1.0 +1032 40 loss.margin 9.293425061272627 +1032 40 optimizer.lr 0.0033198431024434693 +1032 40 negative_sampler.num_negs_per_pos 94.0 +1032 40 training.batch_size 0.0 +1032 41 model.embedding_dim 0.0 +1032 41 model.scoring_fct_norm 1.0 +1032 41 loss.margin 6.910880231543757 +1032 41 optimizer.lr 0.001589081071640233 +1032 41 negative_sampler.num_negs_per_pos 76.0 +1032 41 training.batch_size 0.0 +1032 42 model.embedding_dim 1.0 +1032 42 model.scoring_fct_norm 2.0 +1032 42 loss.margin 8.390724619469228 +1032 42 optimizer.lr 0.02503155131234581 +1032 42 negative_sampler.num_negs_per_pos 95.0 +1032 42 training.batch_size 1.0 +1032 43 model.embedding_dim 2.0 +1032 43 model.scoring_fct_norm 2.0 +1032 43 loss.margin 4.726385258644072 +1032 43 optimizer.lr 0.034403898332554875 +1032 43 negative_sampler.num_negs_per_pos 42.0 +1032 43 training.batch_size 0.0 +1032 44 model.embedding_dim 0.0 +1032 44 model.scoring_fct_norm 2.0 +1032 44 loss.margin 5.070088439225595 +1032 44 optimizer.lr 0.007203327835504493 +1032 44 negative_sampler.num_negs_per_pos 10.0 +1032 44 training.batch_size 0.0 +1032 45 model.embedding_dim 1.0 +1032 45 model.scoring_fct_norm 1.0 +1032 45 loss.margin 6.640451611493062 +1032 45 optimizer.lr 0.08103509145057759 +1032 45 negative_sampler.num_negs_per_pos 34.0 +1032 45 training.batch_size 0.0 +1032 46 model.embedding_dim 0.0 +1032 46 model.scoring_fct_norm 1.0 +1032 46 loss.margin 5.2315555279961785 +1032 46 optimizer.lr 0.0012913123877671192 +1032 46 negative_sampler.num_negs_per_pos 74.0 +1032 46 training.batch_size 0.0 +1032 47 model.embedding_dim 0.0 +1032 47 model.scoring_fct_norm 2.0 +1032 47 loss.margin 5.108949987897387 +1032 47 optimizer.lr 0.0033579063380903577 +1032 47 negative_sampler.num_negs_per_pos 23.0 +1032 47 training.batch_size 2.0 +1032 48 model.embedding_dim 1.0 +1032 48 model.scoring_fct_norm 2.0 +1032 48 loss.margin 0.5096666710543927 +1032 48 optimizer.lr 0.03402165919485314 +1032 48 negative_sampler.num_negs_per_pos 51.0 +1032 48 training.batch_size 1.0 +1032 49 model.embedding_dim 1.0 +1032 49 model.scoring_fct_norm 1.0 +1032 49 loss.margin 2.2355939148050545 +1032 49 optimizer.lr 0.016626217023201977 +1032 49 negative_sampler.num_negs_per_pos 14.0 +1032 49 training.batch_size 1.0 +1032 50 model.embedding_dim 1.0 +1032 50 model.scoring_fct_norm 1.0 +1032 50 loss.margin 8.953765565751283 +1032 50 optimizer.lr 0.009016860164742065 +1032 50 negative_sampler.num_negs_per_pos 48.0 +1032 50 training.batch_size 2.0 +1032 51 model.embedding_dim 0.0 +1032 51 model.scoring_fct_norm 1.0 +1032 51 loss.margin 5.85702234100453 +1032 51 optimizer.lr 0.005230520048307348 +1032 51 negative_sampler.num_negs_per_pos 71.0 +1032 51 training.batch_size 2.0 +1032 52 model.embedding_dim 1.0 +1032 52 model.scoring_fct_norm 1.0 +1032 52 loss.margin 4.7228049298707795 +1032 52 optimizer.lr 0.02437766626108564 +1032 52 negative_sampler.num_negs_per_pos 98.0 +1032 52 training.batch_size 1.0 +1032 53 model.embedding_dim 0.0 +1032 53 model.scoring_fct_norm 1.0 +1032 53 loss.margin 6.999035392868166 +1032 53 optimizer.lr 0.026116551583711744 +1032 53 negative_sampler.num_negs_per_pos 20.0 +1032 53 training.batch_size 2.0 +1032 54 model.embedding_dim 1.0 +1032 54 model.scoring_fct_norm 1.0 +1032 54 loss.margin 8.356880037939874 +1032 54 optimizer.lr 0.01515169252415438 +1032 54 negative_sampler.num_negs_per_pos 55.0 +1032 54 training.batch_size 1.0 +1032 55 model.embedding_dim 0.0 +1032 55 model.scoring_fct_norm 2.0 +1032 55 loss.margin 4.947960516827326 +1032 55 optimizer.lr 0.005818737703522339 +1032 55 negative_sampler.num_negs_per_pos 16.0 +1032 55 training.batch_size 2.0 +1032 56 model.embedding_dim 1.0 +1032 56 model.scoring_fct_norm 2.0 +1032 56 loss.margin 5.18092498887936 +1032 56 optimizer.lr 0.008284234371142332 +1032 56 negative_sampler.num_negs_per_pos 6.0 +1032 56 training.batch_size 2.0 +1032 57 model.embedding_dim 2.0 +1032 57 model.scoring_fct_norm 2.0 +1032 57 loss.margin 4.106989377812074 +1032 57 optimizer.lr 0.0015423408291236956 +1032 57 negative_sampler.num_negs_per_pos 41.0 +1032 57 training.batch_size 2.0 +1032 58 model.embedding_dim 2.0 +1032 58 model.scoring_fct_norm 2.0 +1032 58 loss.margin 8.21071828298293 +1032 58 optimizer.lr 0.028633637598172133 +1032 58 negative_sampler.num_negs_per_pos 31.0 +1032 58 training.batch_size 1.0 +1032 59 model.embedding_dim 2.0 +1032 59 model.scoring_fct_norm 1.0 +1032 59 loss.margin 3.471797783233805 +1032 59 optimizer.lr 0.07181011484865493 +1032 59 negative_sampler.num_negs_per_pos 49.0 +1032 59 training.batch_size 2.0 +1032 60 model.embedding_dim 1.0 +1032 60 model.scoring_fct_norm 2.0 +1032 60 loss.margin 7.471247171365556 +1032 60 optimizer.lr 0.0010868439881343034 +1032 60 negative_sampler.num_negs_per_pos 20.0 +1032 60 training.batch_size 2.0 +1032 61 model.embedding_dim 1.0 +1032 61 model.scoring_fct_norm 1.0 +1032 61 loss.margin 4.18633953600291 +1032 61 optimizer.lr 0.02335828872131452 +1032 61 negative_sampler.num_negs_per_pos 2.0 +1032 61 training.batch_size 0.0 +1032 62 model.embedding_dim 1.0 +1032 62 model.scoring_fct_norm 2.0 +1032 62 loss.margin 9.888130239416286 +1032 62 optimizer.lr 0.08133118477720656 +1032 62 negative_sampler.num_negs_per_pos 60.0 +1032 62 training.batch_size 2.0 +1032 63 model.embedding_dim 0.0 +1032 63 model.scoring_fct_norm 1.0 +1032 63 loss.margin 7.702795991767969 +1032 63 optimizer.lr 0.006717151598555168 +1032 63 negative_sampler.num_negs_per_pos 81.0 +1032 63 training.batch_size 1.0 +1032 64 model.embedding_dim 0.0 +1032 64 model.scoring_fct_norm 2.0 +1032 64 loss.margin 7.55545089488601 +1032 64 optimizer.lr 0.0277125881724684 +1032 64 negative_sampler.num_negs_per_pos 61.0 +1032 64 training.batch_size 0.0 +1032 65 model.embedding_dim 1.0 +1032 65 model.scoring_fct_norm 1.0 +1032 65 loss.margin 5.847614660297238 +1032 65 optimizer.lr 0.039418736856360166 +1032 65 negative_sampler.num_negs_per_pos 8.0 +1032 65 training.batch_size 1.0 +1032 66 model.embedding_dim 1.0 +1032 66 model.scoring_fct_norm 1.0 +1032 66 loss.margin 9.701510588721128 +1032 66 optimizer.lr 0.01294043832758242 +1032 66 negative_sampler.num_negs_per_pos 21.0 +1032 66 training.batch_size 0.0 +1032 67 model.embedding_dim 0.0 +1032 67 model.scoring_fct_norm 1.0 +1032 67 loss.margin 1.1431194943976213 +1032 67 optimizer.lr 0.05101145527569779 +1032 67 negative_sampler.num_negs_per_pos 28.0 +1032 67 training.batch_size 2.0 +1032 68 model.embedding_dim 0.0 +1032 68 model.scoring_fct_norm 1.0 +1032 68 loss.margin 8.579937151992187 +1032 68 optimizer.lr 0.05215052529063049 +1032 68 negative_sampler.num_negs_per_pos 94.0 +1032 68 training.batch_size 0.0 +1032 69 model.embedding_dim 1.0 +1032 69 model.scoring_fct_norm 2.0 +1032 69 loss.margin 3.5137002733556533 +1032 69 optimizer.lr 0.017778939478495712 +1032 69 negative_sampler.num_negs_per_pos 50.0 +1032 69 training.batch_size 2.0 +1032 70 model.embedding_dim 0.0 +1032 70 model.scoring_fct_norm 1.0 +1032 70 loss.margin 6.316294324299255 +1032 70 optimizer.lr 0.017837614998174645 +1032 70 negative_sampler.num_negs_per_pos 33.0 +1032 70 training.batch_size 2.0 +1032 71 model.embedding_dim 2.0 +1032 71 model.scoring_fct_norm 2.0 +1032 71 loss.margin 4.039888635678683 +1032 71 optimizer.lr 0.03297394171321116 +1032 71 negative_sampler.num_negs_per_pos 16.0 +1032 71 training.batch_size 1.0 +1032 72 model.embedding_dim 0.0 +1032 72 model.scoring_fct_norm 2.0 +1032 72 loss.margin 7.377326499343308 +1032 72 optimizer.lr 0.005383634283980506 +1032 72 negative_sampler.num_negs_per_pos 83.0 +1032 72 training.batch_size 0.0 +1032 73 model.embedding_dim 1.0 +1032 73 model.scoring_fct_norm 2.0 +1032 73 loss.margin 3.9296069834676093 +1032 73 optimizer.lr 0.012572530087865746 +1032 73 negative_sampler.num_negs_per_pos 73.0 +1032 73 training.batch_size 1.0 +1032 74 model.embedding_dim 1.0 +1032 74 model.scoring_fct_norm 1.0 +1032 74 loss.margin 3.449471203579888 +1032 74 optimizer.lr 0.0032505100336643524 +1032 74 negative_sampler.num_negs_per_pos 51.0 +1032 74 training.batch_size 2.0 +1032 75 model.embedding_dim 0.0 +1032 75 model.scoring_fct_norm 1.0 +1032 75 loss.margin 7.391618433525926 +1032 75 optimizer.lr 0.022614392172492045 +1032 75 negative_sampler.num_negs_per_pos 77.0 +1032 75 training.batch_size 0.0 +1032 76 model.embedding_dim 2.0 +1032 76 model.scoring_fct_norm 1.0 +1032 76 loss.margin 3.8046212614695656 +1032 76 optimizer.lr 0.015426297867190615 +1032 76 negative_sampler.num_negs_per_pos 39.0 +1032 76 training.batch_size 0.0 +1032 77 model.embedding_dim 0.0 +1032 77 model.scoring_fct_norm 1.0 +1032 77 loss.margin 3.434127478271973 +1032 77 optimizer.lr 0.006935383643402676 +1032 77 negative_sampler.num_negs_per_pos 71.0 +1032 77 training.batch_size 2.0 +1032 78 model.embedding_dim 0.0 +1032 78 model.scoring_fct_norm 1.0 +1032 78 loss.margin 0.9912081481946827 +1032 78 optimizer.lr 0.06881809482375989 +1032 78 negative_sampler.num_negs_per_pos 22.0 +1032 78 training.batch_size 2.0 +1032 79 model.embedding_dim 2.0 +1032 79 model.scoring_fct_norm 2.0 +1032 79 loss.margin 0.6396572682384929 +1032 79 optimizer.lr 0.010831585160316638 +1032 79 negative_sampler.num_negs_per_pos 77.0 +1032 79 training.batch_size 2.0 +1032 80 model.embedding_dim 0.0 +1032 80 model.scoring_fct_norm 2.0 +1032 80 loss.margin 2.457371195141563 +1032 80 optimizer.lr 0.01553493948372222 +1032 80 negative_sampler.num_negs_per_pos 48.0 +1032 80 training.batch_size 2.0 +1032 81 model.embedding_dim 2.0 +1032 81 model.scoring_fct_norm 1.0 +1032 81 loss.margin 9.35986004741866 +1032 81 optimizer.lr 0.005139743104510054 +1032 81 negative_sampler.num_negs_per_pos 50.0 +1032 81 training.batch_size 0.0 +1032 82 model.embedding_dim 1.0 +1032 82 model.scoring_fct_norm 2.0 +1032 82 loss.margin 5.454864366267909 +1032 82 optimizer.lr 0.02220483676187879 +1032 82 negative_sampler.num_negs_per_pos 87.0 +1032 82 training.batch_size 1.0 +1032 83 model.embedding_dim 1.0 +1032 83 model.scoring_fct_norm 1.0 +1032 83 loss.margin 7.9963320871637515 +1032 83 optimizer.lr 0.013753054217970814 +1032 83 negative_sampler.num_negs_per_pos 26.0 +1032 83 training.batch_size 0.0 +1032 84 model.embedding_dim 2.0 +1032 84 model.scoring_fct_norm 1.0 +1032 84 loss.margin 4.139241975960502 +1032 84 optimizer.lr 0.003272469076481971 +1032 84 negative_sampler.num_negs_per_pos 50.0 +1032 84 training.batch_size 1.0 +1032 85 model.embedding_dim 1.0 +1032 85 model.scoring_fct_norm 2.0 +1032 85 loss.margin 2.2357975443420006 +1032 85 optimizer.lr 0.08745029760889428 +1032 85 negative_sampler.num_negs_per_pos 8.0 +1032 85 training.batch_size 2.0 +1032 86 model.embedding_dim 2.0 +1032 86 model.scoring_fct_norm 1.0 +1032 86 loss.margin 8.771162190332161 +1032 86 optimizer.lr 0.010803988605959906 +1032 86 negative_sampler.num_negs_per_pos 58.0 +1032 86 training.batch_size 1.0 +1032 87 model.embedding_dim 2.0 +1032 87 model.scoring_fct_norm 1.0 +1032 87 loss.margin 1.8477679312165405 +1032 87 optimizer.lr 0.0040367650286426615 +1032 87 negative_sampler.num_negs_per_pos 5.0 +1032 87 training.batch_size 2.0 +1032 88 model.embedding_dim 1.0 +1032 88 model.scoring_fct_norm 1.0 +1032 88 loss.margin 9.870283862439178 +1032 88 optimizer.lr 0.03978938196027 +1032 88 negative_sampler.num_negs_per_pos 43.0 +1032 88 training.batch_size 1.0 +1032 89 model.embedding_dim 1.0 +1032 89 model.scoring_fct_norm 1.0 +1032 89 loss.margin 4.126047187051519 +1032 89 optimizer.lr 0.003963814095603697 +1032 89 negative_sampler.num_negs_per_pos 56.0 +1032 89 training.batch_size 0.0 +1032 90 model.embedding_dim 2.0 +1032 90 model.scoring_fct_norm 2.0 +1032 90 loss.margin 9.857773378724985 +1032 90 optimizer.lr 0.004026170346945661 +1032 90 negative_sampler.num_negs_per_pos 93.0 +1032 90 training.batch_size 1.0 +1032 91 model.embedding_dim 2.0 +1032 91 model.scoring_fct_norm 2.0 +1032 91 loss.margin 2.225708940035795 +1032 91 optimizer.lr 0.016713116912279762 +1032 91 negative_sampler.num_negs_per_pos 68.0 +1032 91 training.batch_size 1.0 +1032 92 model.embedding_dim 1.0 +1032 92 model.scoring_fct_norm 1.0 +1032 92 loss.margin 8.520350439334258 +1032 92 optimizer.lr 0.029515884367711593 +1032 92 negative_sampler.num_negs_per_pos 97.0 +1032 92 training.batch_size 0.0 +1032 93 model.embedding_dim 0.0 +1032 93 model.scoring_fct_norm 2.0 +1032 93 loss.margin 8.722083123536875 +1032 93 optimizer.lr 0.04196251311286947 +1032 93 negative_sampler.num_negs_per_pos 16.0 +1032 93 training.batch_size 0.0 +1032 94 model.embedding_dim 2.0 +1032 94 model.scoring_fct_norm 2.0 +1032 94 loss.margin 3.693254838665499 +1032 94 optimizer.lr 0.020881665622659383 +1032 94 negative_sampler.num_negs_per_pos 25.0 +1032 94 training.batch_size 1.0 +1032 95 model.embedding_dim 1.0 +1032 95 model.scoring_fct_norm 1.0 +1032 95 loss.margin 2.9418407041918044 +1032 95 optimizer.lr 0.0024620088765719265 +1032 95 negative_sampler.num_negs_per_pos 31.0 +1032 95 training.batch_size 1.0 +1032 96 model.embedding_dim 0.0 +1032 96 model.scoring_fct_norm 2.0 +1032 96 loss.margin 7.55480583811247 +1032 96 optimizer.lr 0.0034069868313931252 +1032 96 negative_sampler.num_negs_per_pos 13.0 +1032 96 training.batch_size 0.0 +1032 97 model.embedding_dim 2.0 +1032 97 model.scoring_fct_norm 1.0 +1032 97 loss.margin 4.611655824197336 +1032 97 optimizer.lr 0.0021531573673966675 +1032 97 negative_sampler.num_negs_per_pos 50.0 +1032 97 training.batch_size 2.0 +1032 98 model.embedding_dim 0.0 +1032 98 model.scoring_fct_norm 2.0 +1032 98 loss.margin 5.769863987322615 +1032 98 optimizer.lr 0.07996375939098299 +1032 98 negative_sampler.num_negs_per_pos 46.0 +1032 98 training.batch_size 0.0 +1032 99 model.embedding_dim 0.0 +1032 99 model.scoring_fct_norm 2.0 +1032 99 loss.margin 9.578846161886927 +1032 99 optimizer.lr 0.0014092866838891372 +1032 99 negative_sampler.num_negs_per_pos 21.0 +1032 99 training.batch_size 0.0 +1032 100 model.embedding_dim 1.0 +1032 100 model.scoring_fct_norm 2.0 +1032 100 loss.margin 6.178124743608553 +1032 100 optimizer.lr 0.002453654865075428 +1032 100 negative_sampler.num_negs_per_pos 80.0 +1032 100 training.batch_size 2.0 +1032 1 dataset """kinships""" +1032 1 model """unstructuredmodel""" +1032 1 loss """marginranking""" +1032 1 regularizer """no""" +1032 1 optimizer """adam""" +1032 1 training_loop """owa""" +1032 1 negative_sampler """basic""" +1032 1 evaluator """rankbased""" +1032 2 dataset """kinships""" +1032 2 model """unstructuredmodel""" +1032 2 loss """marginranking""" +1032 2 regularizer """no""" +1032 2 optimizer """adam""" +1032 2 training_loop """owa""" +1032 2 negative_sampler """basic""" +1032 2 evaluator """rankbased""" +1032 3 dataset """kinships""" +1032 3 model """unstructuredmodel""" +1032 3 loss """marginranking""" +1032 3 regularizer """no""" +1032 3 optimizer """adam""" +1032 3 training_loop """owa""" +1032 3 negative_sampler """basic""" +1032 3 evaluator """rankbased""" +1032 4 dataset """kinships""" +1032 4 model """unstructuredmodel""" +1032 4 loss """marginranking""" +1032 4 regularizer """no""" +1032 4 optimizer """adam""" +1032 4 training_loop """owa""" +1032 4 negative_sampler """basic""" +1032 4 evaluator """rankbased""" +1032 5 dataset """kinships""" +1032 5 model """unstructuredmodel""" +1032 5 loss """marginranking""" +1032 5 regularizer """no""" +1032 5 optimizer """adam""" +1032 5 training_loop """owa""" +1032 5 negative_sampler """basic""" +1032 5 evaluator """rankbased""" +1032 6 dataset """kinships""" +1032 6 model """unstructuredmodel""" +1032 6 loss """marginranking""" +1032 6 regularizer """no""" +1032 6 optimizer """adam""" +1032 6 training_loop """owa""" +1032 6 negative_sampler """basic""" +1032 6 evaluator """rankbased""" +1032 7 dataset """kinships""" +1032 7 model """unstructuredmodel""" +1032 7 loss """marginranking""" +1032 7 regularizer """no""" +1032 7 optimizer """adam""" +1032 7 training_loop """owa""" +1032 7 negative_sampler """basic""" +1032 7 evaluator """rankbased""" +1032 8 dataset """kinships""" +1032 8 model """unstructuredmodel""" +1032 8 loss """marginranking""" +1032 8 regularizer """no""" +1032 8 optimizer """adam""" +1032 8 training_loop """owa""" +1032 8 negative_sampler """basic""" +1032 8 evaluator """rankbased""" +1032 9 dataset """kinships""" +1032 9 model """unstructuredmodel""" +1032 9 loss """marginranking""" +1032 9 regularizer """no""" +1032 9 optimizer """adam""" +1032 9 training_loop """owa""" +1032 9 negative_sampler """basic""" +1032 9 evaluator """rankbased""" +1032 10 dataset """kinships""" +1032 10 model """unstructuredmodel""" +1032 10 loss """marginranking""" +1032 10 regularizer """no""" +1032 10 optimizer """adam""" +1032 10 training_loop """owa""" +1032 10 negative_sampler """basic""" +1032 10 evaluator """rankbased""" +1032 11 dataset """kinships""" +1032 11 model """unstructuredmodel""" +1032 11 loss """marginranking""" +1032 11 regularizer """no""" +1032 11 optimizer """adam""" +1032 11 training_loop """owa""" +1032 11 negative_sampler """basic""" +1032 11 evaluator """rankbased""" +1032 12 dataset """kinships""" +1032 12 model """unstructuredmodel""" +1032 12 loss """marginranking""" +1032 12 regularizer """no""" +1032 12 optimizer """adam""" +1032 12 training_loop """owa""" +1032 12 negative_sampler """basic""" +1032 12 evaluator """rankbased""" +1032 13 dataset """kinships""" +1032 13 model """unstructuredmodel""" +1032 13 loss """marginranking""" +1032 13 regularizer """no""" +1032 13 optimizer """adam""" +1032 13 training_loop """owa""" +1032 13 negative_sampler """basic""" +1032 13 evaluator """rankbased""" +1032 14 dataset """kinships""" +1032 14 model """unstructuredmodel""" +1032 14 loss """marginranking""" +1032 14 regularizer """no""" +1032 14 optimizer """adam""" +1032 14 training_loop """owa""" +1032 14 negative_sampler """basic""" +1032 14 evaluator """rankbased""" +1032 15 dataset """kinships""" +1032 15 model """unstructuredmodel""" +1032 15 loss """marginranking""" +1032 15 regularizer """no""" +1032 15 optimizer """adam""" +1032 15 training_loop """owa""" +1032 15 negative_sampler """basic""" +1032 15 evaluator """rankbased""" +1032 16 dataset """kinships""" +1032 16 model """unstructuredmodel""" +1032 16 loss """marginranking""" +1032 16 regularizer """no""" +1032 16 optimizer """adam""" +1032 16 training_loop """owa""" +1032 16 negative_sampler """basic""" +1032 16 evaluator """rankbased""" +1032 17 dataset """kinships""" +1032 17 model """unstructuredmodel""" +1032 17 loss """marginranking""" +1032 17 regularizer """no""" +1032 17 optimizer """adam""" +1032 17 training_loop """owa""" +1032 17 negative_sampler """basic""" +1032 17 evaluator """rankbased""" +1032 18 dataset """kinships""" +1032 18 model """unstructuredmodel""" +1032 18 loss """marginranking""" +1032 18 regularizer """no""" +1032 18 optimizer """adam""" +1032 18 training_loop """owa""" +1032 18 negative_sampler """basic""" +1032 18 evaluator """rankbased""" +1032 19 dataset """kinships""" +1032 19 model """unstructuredmodel""" +1032 19 loss """marginranking""" +1032 19 regularizer """no""" +1032 19 optimizer """adam""" +1032 19 training_loop """owa""" +1032 19 negative_sampler """basic""" +1032 19 evaluator """rankbased""" +1032 20 dataset """kinships""" +1032 20 model """unstructuredmodel""" +1032 20 loss """marginranking""" +1032 20 regularizer """no""" +1032 20 optimizer """adam""" +1032 20 training_loop """owa""" +1032 20 negative_sampler """basic""" +1032 20 evaluator """rankbased""" +1032 21 dataset """kinships""" +1032 21 model """unstructuredmodel""" +1032 21 loss """marginranking""" +1032 21 regularizer """no""" +1032 21 optimizer """adam""" +1032 21 training_loop """owa""" +1032 21 negative_sampler """basic""" +1032 21 evaluator """rankbased""" +1032 22 dataset """kinships""" +1032 22 model """unstructuredmodel""" +1032 22 loss """marginranking""" +1032 22 regularizer """no""" +1032 22 optimizer """adam""" +1032 22 training_loop """owa""" +1032 22 negative_sampler """basic""" +1032 22 evaluator """rankbased""" +1032 23 dataset """kinships""" +1032 23 model """unstructuredmodel""" +1032 23 loss """marginranking""" +1032 23 regularizer """no""" +1032 23 optimizer """adam""" +1032 23 training_loop """owa""" +1032 23 negative_sampler """basic""" +1032 23 evaluator """rankbased""" +1032 24 dataset """kinships""" +1032 24 model """unstructuredmodel""" +1032 24 loss """marginranking""" +1032 24 regularizer """no""" +1032 24 optimizer """adam""" +1032 24 training_loop """owa""" +1032 24 negative_sampler """basic""" +1032 24 evaluator """rankbased""" +1032 25 dataset """kinships""" +1032 25 model """unstructuredmodel""" +1032 25 loss """marginranking""" +1032 25 regularizer """no""" +1032 25 optimizer """adam""" +1032 25 training_loop """owa""" +1032 25 negative_sampler """basic""" +1032 25 evaluator """rankbased""" +1032 26 dataset """kinships""" +1032 26 model """unstructuredmodel""" +1032 26 loss """marginranking""" +1032 26 regularizer """no""" +1032 26 optimizer """adam""" +1032 26 training_loop """owa""" +1032 26 negative_sampler """basic""" +1032 26 evaluator """rankbased""" +1032 27 dataset """kinships""" +1032 27 model """unstructuredmodel""" +1032 27 loss """marginranking""" +1032 27 regularizer """no""" +1032 27 optimizer """adam""" +1032 27 training_loop """owa""" +1032 27 negative_sampler """basic""" +1032 27 evaluator """rankbased""" +1032 28 dataset """kinships""" +1032 28 model """unstructuredmodel""" +1032 28 loss """marginranking""" +1032 28 regularizer """no""" +1032 28 optimizer """adam""" +1032 28 training_loop """owa""" +1032 28 negative_sampler """basic""" +1032 28 evaluator """rankbased""" +1032 29 dataset """kinships""" +1032 29 model """unstructuredmodel""" +1032 29 loss """marginranking""" +1032 29 regularizer """no""" +1032 29 optimizer """adam""" +1032 29 training_loop """owa""" +1032 29 negative_sampler """basic""" +1032 29 evaluator """rankbased""" +1032 30 dataset """kinships""" +1032 30 model """unstructuredmodel""" +1032 30 loss """marginranking""" +1032 30 regularizer """no""" +1032 30 optimizer """adam""" +1032 30 training_loop """owa""" +1032 30 negative_sampler """basic""" +1032 30 evaluator """rankbased""" +1032 31 dataset """kinships""" +1032 31 model """unstructuredmodel""" +1032 31 loss """marginranking""" +1032 31 regularizer """no""" +1032 31 optimizer """adam""" +1032 31 training_loop """owa""" +1032 31 negative_sampler """basic""" +1032 31 evaluator """rankbased""" +1032 32 dataset """kinships""" +1032 32 model """unstructuredmodel""" +1032 32 loss """marginranking""" +1032 32 regularizer """no""" +1032 32 optimizer """adam""" +1032 32 training_loop """owa""" +1032 32 negative_sampler """basic""" +1032 32 evaluator """rankbased""" +1032 33 dataset """kinships""" +1032 33 model """unstructuredmodel""" +1032 33 loss """marginranking""" +1032 33 regularizer """no""" +1032 33 optimizer """adam""" +1032 33 training_loop """owa""" +1032 33 negative_sampler """basic""" +1032 33 evaluator """rankbased""" +1032 34 dataset """kinships""" +1032 34 model """unstructuredmodel""" +1032 34 loss """marginranking""" +1032 34 regularizer """no""" +1032 34 optimizer """adam""" +1032 34 training_loop """owa""" +1032 34 negative_sampler """basic""" +1032 34 evaluator """rankbased""" +1032 35 dataset """kinships""" +1032 35 model """unstructuredmodel""" +1032 35 loss """marginranking""" +1032 35 regularizer """no""" +1032 35 optimizer """adam""" +1032 35 training_loop """owa""" +1032 35 negative_sampler """basic""" +1032 35 evaluator """rankbased""" +1032 36 dataset """kinships""" +1032 36 model """unstructuredmodel""" +1032 36 loss """marginranking""" +1032 36 regularizer """no""" +1032 36 optimizer """adam""" +1032 36 training_loop """owa""" +1032 36 negative_sampler """basic""" +1032 36 evaluator """rankbased""" +1032 37 dataset """kinships""" +1032 37 model """unstructuredmodel""" +1032 37 loss """marginranking""" +1032 37 regularizer """no""" +1032 37 optimizer """adam""" +1032 37 training_loop """owa""" +1032 37 negative_sampler """basic""" +1032 37 evaluator """rankbased""" +1032 38 dataset """kinships""" +1032 38 model """unstructuredmodel""" +1032 38 loss """marginranking""" +1032 38 regularizer """no""" +1032 38 optimizer """adam""" +1032 38 training_loop """owa""" +1032 38 negative_sampler """basic""" +1032 38 evaluator """rankbased""" +1032 39 dataset """kinships""" +1032 39 model """unstructuredmodel""" +1032 39 loss """marginranking""" +1032 39 regularizer """no""" +1032 39 optimizer """adam""" +1032 39 training_loop """owa""" +1032 39 negative_sampler """basic""" +1032 39 evaluator """rankbased""" +1032 40 dataset """kinships""" +1032 40 model """unstructuredmodel""" +1032 40 loss """marginranking""" +1032 40 regularizer """no""" +1032 40 optimizer """adam""" +1032 40 training_loop """owa""" +1032 40 negative_sampler """basic""" +1032 40 evaluator """rankbased""" +1032 41 dataset """kinships""" +1032 41 model """unstructuredmodel""" +1032 41 loss """marginranking""" +1032 41 regularizer """no""" +1032 41 optimizer """adam""" +1032 41 training_loop """owa""" +1032 41 negative_sampler """basic""" +1032 41 evaluator """rankbased""" +1032 42 dataset """kinships""" +1032 42 model """unstructuredmodel""" +1032 42 loss """marginranking""" +1032 42 regularizer """no""" +1032 42 optimizer """adam""" +1032 42 training_loop """owa""" +1032 42 negative_sampler """basic""" +1032 42 evaluator """rankbased""" +1032 43 dataset """kinships""" +1032 43 model """unstructuredmodel""" +1032 43 loss """marginranking""" +1032 43 regularizer """no""" +1032 43 optimizer """adam""" +1032 43 training_loop """owa""" +1032 43 negative_sampler """basic""" +1032 43 evaluator """rankbased""" +1032 44 dataset """kinships""" +1032 44 model """unstructuredmodel""" +1032 44 loss """marginranking""" +1032 44 regularizer """no""" +1032 44 optimizer """adam""" +1032 44 training_loop """owa""" +1032 44 negative_sampler """basic""" +1032 44 evaluator """rankbased""" +1032 45 dataset """kinships""" +1032 45 model """unstructuredmodel""" +1032 45 loss """marginranking""" +1032 45 regularizer """no""" +1032 45 optimizer """adam""" +1032 45 training_loop """owa""" +1032 45 negative_sampler """basic""" +1032 45 evaluator """rankbased""" +1032 46 dataset """kinships""" +1032 46 model """unstructuredmodel""" +1032 46 loss """marginranking""" +1032 46 regularizer """no""" +1032 46 optimizer """adam""" +1032 46 training_loop """owa""" +1032 46 negative_sampler """basic""" +1032 46 evaluator """rankbased""" +1032 47 dataset """kinships""" +1032 47 model """unstructuredmodel""" +1032 47 loss """marginranking""" +1032 47 regularizer """no""" +1032 47 optimizer """adam""" +1032 47 training_loop """owa""" +1032 47 negative_sampler """basic""" +1032 47 evaluator """rankbased""" +1032 48 dataset """kinships""" +1032 48 model """unstructuredmodel""" +1032 48 loss """marginranking""" +1032 48 regularizer """no""" +1032 48 optimizer """adam""" +1032 48 training_loop """owa""" +1032 48 negative_sampler """basic""" +1032 48 evaluator """rankbased""" +1032 49 dataset """kinships""" +1032 49 model """unstructuredmodel""" +1032 49 loss """marginranking""" +1032 49 regularizer """no""" +1032 49 optimizer """adam""" +1032 49 training_loop """owa""" +1032 49 negative_sampler """basic""" +1032 49 evaluator """rankbased""" +1032 50 dataset """kinships""" +1032 50 model """unstructuredmodel""" +1032 50 loss """marginranking""" +1032 50 regularizer """no""" +1032 50 optimizer """adam""" +1032 50 training_loop """owa""" +1032 50 negative_sampler """basic""" +1032 50 evaluator """rankbased""" +1032 51 dataset """kinships""" +1032 51 model """unstructuredmodel""" +1032 51 loss """marginranking""" +1032 51 regularizer """no""" +1032 51 optimizer """adam""" +1032 51 training_loop """owa""" +1032 51 negative_sampler """basic""" +1032 51 evaluator """rankbased""" +1032 52 dataset """kinships""" +1032 52 model """unstructuredmodel""" +1032 52 loss """marginranking""" +1032 52 regularizer """no""" +1032 52 optimizer """adam""" +1032 52 training_loop """owa""" +1032 52 negative_sampler """basic""" +1032 52 evaluator """rankbased""" +1032 53 dataset """kinships""" +1032 53 model """unstructuredmodel""" +1032 53 loss """marginranking""" +1032 53 regularizer """no""" +1032 53 optimizer """adam""" +1032 53 training_loop """owa""" +1032 53 negative_sampler """basic""" +1032 53 evaluator """rankbased""" +1032 54 dataset """kinships""" +1032 54 model """unstructuredmodel""" +1032 54 loss """marginranking""" +1032 54 regularizer """no""" +1032 54 optimizer """adam""" +1032 54 training_loop """owa""" +1032 54 negative_sampler """basic""" +1032 54 evaluator """rankbased""" +1032 55 dataset """kinships""" +1032 55 model """unstructuredmodel""" +1032 55 loss """marginranking""" +1032 55 regularizer """no""" +1032 55 optimizer """adam""" +1032 55 training_loop """owa""" +1032 55 negative_sampler """basic""" +1032 55 evaluator """rankbased""" +1032 56 dataset """kinships""" +1032 56 model """unstructuredmodel""" +1032 56 loss """marginranking""" +1032 56 regularizer """no""" +1032 56 optimizer """adam""" +1032 56 training_loop """owa""" +1032 56 negative_sampler """basic""" +1032 56 evaluator """rankbased""" +1032 57 dataset """kinships""" +1032 57 model """unstructuredmodel""" +1032 57 loss """marginranking""" +1032 57 regularizer """no""" +1032 57 optimizer """adam""" +1032 57 training_loop """owa""" +1032 57 negative_sampler """basic""" +1032 57 evaluator """rankbased""" +1032 58 dataset """kinships""" +1032 58 model """unstructuredmodel""" +1032 58 loss """marginranking""" +1032 58 regularizer """no""" +1032 58 optimizer """adam""" +1032 58 training_loop """owa""" +1032 58 negative_sampler """basic""" +1032 58 evaluator """rankbased""" +1032 59 dataset """kinships""" +1032 59 model """unstructuredmodel""" +1032 59 loss """marginranking""" +1032 59 regularizer """no""" +1032 59 optimizer """adam""" +1032 59 training_loop """owa""" +1032 59 negative_sampler """basic""" +1032 59 evaluator """rankbased""" +1032 60 dataset """kinships""" +1032 60 model """unstructuredmodel""" +1032 60 loss """marginranking""" +1032 60 regularizer """no""" +1032 60 optimizer """adam""" +1032 60 training_loop """owa""" +1032 60 negative_sampler """basic""" +1032 60 evaluator """rankbased""" +1032 61 dataset """kinships""" +1032 61 model """unstructuredmodel""" +1032 61 loss """marginranking""" +1032 61 regularizer """no""" +1032 61 optimizer """adam""" +1032 61 training_loop """owa""" +1032 61 negative_sampler """basic""" +1032 61 evaluator """rankbased""" +1032 62 dataset """kinships""" +1032 62 model """unstructuredmodel""" +1032 62 loss """marginranking""" +1032 62 regularizer """no""" +1032 62 optimizer """adam""" +1032 62 training_loop """owa""" +1032 62 negative_sampler """basic""" +1032 62 evaluator """rankbased""" +1032 63 dataset """kinships""" +1032 63 model """unstructuredmodel""" +1032 63 loss """marginranking""" +1032 63 regularizer """no""" +1032 63 optimizer """adam""" +1032 63 training_loop """owa""" +1032 63 negative_sampler """basic""" +1032 63 evaluator """rankbased""" +1032 64 dataset """kinships""" +1032 64 model """unstructuredmodel""" +1032 64 loss """marginranking""" +1032 64 regularizer """no""" +1032 64 optimizer """adam""" +1032 64 training_loop """owa""" +1032 64 negative_sampler """basic""" +1032 64 evaluator """rankbased""" +1032 65 dataset """kinships""" +1032 65 model """unstructuredmodel""" +1032 65 loss """marginranking""" +1032 65 regularizer """no""" +1032 65 optimizer """adam""" +1032 65 training_loop """owa""" +1032 65 negative_sampler """basic""" +1032 65 evaluator """rankbased""" +1032 66 dataset """kinships""" +1032 66 model """unstructuredmodel""" +1032 66 loss """marginranking""" +1032 66 regularizer """no""" +1032 66 optimizer """adam""" +1032 66 training_loop """owa""" +1032 66 negative_sampler """basic""" +1032 66 evaluator """rankbased""" +1032 67 dataset """kinships""" +1032 67 model """unstructuredmodel""" +1032 67 loss """marginranking""" +1032 67 regularizer """no""" +1032 67 optimizer """adam""" +1032 67 training_loop """owa""" +1032 67 negative_sampler """basic""" +1032 67 evaluator """rankbased""" +1032 68 dataset """kinships""" +1032 68 model """unstructuredmodel""" +1032 68 loss """marginranking""" +1032 68 regularizer """no""" +1032 68 optimizer """adam""" +1032 68 training_loop """owa""" +1032 68 negative_sampler """basic""" +1032 68 evaluator """rankbased""" +1032 69 dataset """kinships""" +1032 69 model """unstructuredmodel""" +1032 69 loss """marginranking""" +1032 69 regularizer """no""" +1032 69 optimizer """adam""" +1032 69 training_loop """owa""" +1032 69 negative_sampler """basic""" +1032 69 evaluator """rankbased""" +1032 70 dataset """kinships""" +1032 70 model """unstructuredmodel""" +1032 70 loss """marginranking""" +1032 70 regularizer """no""" +1032 70 optimizer """adam""" +1032 70 training_loop """owa""" +1032 70 negative_sampler """basic""" +1032 70 evaluator """rankbased""" +1032 71 dataset """kinships""" +1032 71 model """unstructuredmodel""" +1032 71 loss """marginranking""" +1032 71 regularizer """no""" +1032 71 optimizer """adam""" +1032 71 training_loop """owa""" +1032 71 negative_sampler """basic""" +1032 71 evaluator """rankbased""" +1032 72 dataset """kinships""" +1032 72 model """unstructuredmodel""" +1032 72 loss """marginranking""" +1032 72 regularizer """no""" +1032 72 optimizer """adam""" +1032 72 training_loop """owa""" +1032 72 negative_sampler """basic""" +1032 72 evaluator """rankbased""" +1032 73 dataset """kinships""" +1032 73 model """unstructuredmodel""" +1032 73 loss """marginranking""" +1032 73 regularizer """no""" +1032 73 optimizer """adam""" +1032 73 training_loop """owa""" +1032 73 negative_sampler """basic""" +1032 73 evaluator """rankbased""" +1032 74 dataset """kinships""" +1032 74 model """unstructuredmodel""" +1032 74 loss """marginranking""" +1032 74 regularizer """no""" +1032 74 optimizer """adam""" +1032 74 training_loop """owa""" +1032 74 negative_sampler """basic""" +1032 74 evaluator """rankbased""" +1032 75 dataset """kinships""" +1032 75 model """unstructuredmodel""" +1032 75 loss """marginranking""" +1032 75 regularizer """no""" +1032 75 optimizer """adam""" +1032 75 training_loop """owa""" +1032 75 negative_sampler """basic""" +1032 75 evaluator """rankbased""" +1032 76 dataset """kinships""" +1032 76 model """unstructuredmodel""" +1032 76 loss """marginranking""" +1032 76 regularizer """no""" +1032 76 optimizer """adam""" +1032 76 training_loop """owa""" +1032 76 negative_sampler """basic""" +1032 76 evaluator """rankbased""" +1032 77 dataset """kinships""" +1032 77 model """unstructuredmodel""" +1032 77 loss """marginranking""" +1032 77 regularizer """no""" +1032 77 optimizer """adam""" +1032 77 training_loop """owa""" +1032 77 negative_sampler """basic""" +1032 77 evaluator """rankbased""" +1032 78 dataset """kinships""" +1032 78 model """unstructuredmodel""" +1032 78 loss """marginranking""" +1032 78 regularizer """no""" +1032 78 optimizer """adam""" +1032 78 training_loop """owa""" +1032 78 negative_sampler """basic""" +1032 78 evaluator """rankbased""" +1032 79 dataset """kinships""" +1032 79 model """unstructuredmodel""" +1032 79 loss """marginranking""" +1032 79 regularizer """no""" +1032 79 optimizer """adam""" +1032 79 training_loop """owa""" +1032 79 negative_sampler """basic""" +1032 79 evaluator """rankbased""" +1032 80 dataset """kinships""" +1032 80 model """unstructuredmodel""" +1032 80 loss """marginranking""" +1032 80 regularizer """no""" +1032 80 optimizer """adam""" +1032 80 training_loop """owa""" +1032 80 negative_sampler """basic""" +1032 80 evaluator """rankbased""" +1032 81 dataset """kinships""" +1032 81 model """unstructuredmodel""" +1032 81 loss """marginranking""" +1032 81 regularizer """no""" +1032 81 optimizer """adam""" +1032 81 training_loop """owa""" +1032 81 negative_sampler """basic""" +1032 81 evaluator """rankbased""" +1032 82 dataset """kinships""" +1032 82 model """unstructuredmodel""" +1032 82 loss """marginranking""" +1032 82 regularizer """no""" +1032 82 optimizer """adam""" +1032 82 training_loop """owa""" +1032 82 negative_sampler """basic""" +1032 82 evaluator """rankbased""" +1032 83 dataset """kinships""" +1032 83 model """unstructuredmodel""" +1032 83 loss """marginranking""" +1032 83 regularizer """no""" +1032 83 optimizer """adam""" +1032 83 training_loop """owa""" +1032 83 negative_sampler """basic""" +1032 83 evaluator """rankbased""" +1032 84 dataset """kinships""" +1032 84 model """unstructuredmodel""" +1032 84 loss """marginranking""" +1032 84 regularizer """no""" +1032 84 optimizer """adam""" +1032 84 training_loop """owa""" +1032 84 negative_sampler """basic""" +1032 84 evaluator """rankbased""" +1032 85 dataset """kinships""" +1032 85 model """unstructuredmodel""" +1032 85 loss """marginranking""" +1032 85 regularizer """no""" +1032 85 optimizer """adam""" +1032 85 training_loop """owa""" +1032 85 negative_sampler """basic""" +1032 85 evaluator """rankbased""" +1032 86 dataset """kinships""" +1032 86 model """unstructuredmodel""" +1032 86 loss """marginranking""" +1032 86 regularizer """no""" +1032 86 optimizer """adam""" +1032 86 training_loop """owa""" +1032 86 negative_sampler """basic""" +1032 86 evaluator """rankbased""" +1032 87 dataset """kinships""" +1032 87 model """unstructuredmodel""" +1032 87 loss """marginranking""" +1032 87 regularizer """no""" +1032 87 optimizer """adam""" +1032 87 training_loop """owa""" +1032 87 negative_sampler """basic""" +1032 87 evaluator """rankbased""" +1032 88 dataset """kinships""" +1032 88 model """unstructuredmodel""" +1032 88 loss """marginranking""" +1032 88 regularizer """no""" +1032 88 optimizer """adam""" +1032 88 training_loop """owa""" +1032 88 negative_sampler """basic""" +1032 88 evaluator """rankbased""" +1032 89 dataset """kinships""" +1032 89 model """unstructuredmodel""" +1032 89 loss """marginranking""" +1032 89 regularizer """no""" +1032 89 optimizer """adam""" +1032 89 training_loop """owa""" +1032 89 negative_sampler """basic""" +1032 89 evaluator """rankbased""" +1032 90 dataset """kinships""" +1032 90 model """unstructuredmodel""" +1032 90 loss """marginranking""" +1032 90 regularizer """no""" +1032 90 optimizer """adam""" +1032 90 training_loop """owa""" +1032 90 negative_sampler """basic""" +1032 90 evaluator """rankbased""" +1032 91 dataset """kinships""" +1032 91 model """unstructuredmodel""" +1032 91 loss """marginranking""" +1032 91 regularizer """no""" +1032 91 optimizer """adam""" +1032 91 training_loop """owa""" +1032 91 negative_sampler """basic""" +1032 91 evaluator """rankbased""" +1032 92 dataset """kinships""" +1032 92 model """unstructuredmodel""" +1032 92 loss """marginranking""" +1032 92 regularizer """no""" +1032 92 optimizer """adam""" +1032 92 training_loop """owa""" +1032 92 negative_sampler """basic""" +1032 92 evaluator """rankbased""" +1032 93 dataset """kinships""" +1032 93 model """unstructuredmodel""" +1032 93 loss """marginranking""" +1032 93 regularizer """no""" +1032 93 optimizer """adam""" +1032 93 training_loop """owa""" +1032 93 negative_sampler """basic""" +1032 93 evaluator """rankbased""" +1032 94 dataset """kinships""" +1032 94 model """unstructuredmodel""" +1032 94 loss """marginranking""" +1032 94 regularizer """no""" +1032 94 optimizer """adam""" +1032 94 training_loop """owa""" +1032 94 negative_sampler """basic""" +1032 94 evaluator """rankbased""" +1032 95 dataset """kinships""" +1032 95 model """unstructuredmodel""" +1032 95 loss """marginranking""" +1032 95 regularizer """no""" +1032 95 optimizer """adam""" +1032 95 training_loop """owa""" +1032 95 negative_sampler """basic""" +1032 95 evaluator """rankbased""" +1032 96 dataset """kinships""" +1032 96 model """unstructuredmodel""" +1032 96 loss """marginranking""" +1032 96 regularizer """no""" +1032 96 optimizer """adam""" +1032 96 training_loop """owa""" +1032 96 negative_sampler """basic""" +1032 96 evaluator """rankbased""" +1032 97 dataset """kinships""" +1032 97 model """unstructuredmodel""" +1032 97 loss """marginranking""" +1032 97 regularizer """no""" +1032 97 optimizer """adam""" +1032 97 training_loop """owa""" +1032 97 negative_sampler """basic""" +1032 97 evaluator """rankbased""" +1032 98 dataset """kinships""" +1032 98 model """unstructuredmodel""" +1032 98 loss """marginranking""" +1032 98 regularizer """no""" +1032 98 optimizer """adam""" +1032 98 training_loop """owa""" +1032 98 negative_sampler """basic""" +1032 98 evaluator """rankbased""" +1032 99 dataset """kinships""" +1032 99 model """unstructuredmodel""" +1032 99 loss """marginranking""" +1032 99 regularizer """no""" +1032 99 optimizer """adam""" +1032 99 training_loop """owa""" +1032 99 negative_sampler """basic""" +1032 99 evaluator """rankbased""" +1032 100 dataset """kinships""" +1032 100 model """unstructuredmodel""" +1032 100 loss """marginranking""" +1032 100 regularizer """no""" +1032 100 optimizer """adam""" +1032 100 training_loop """owa""" +1032 100 negative_sampler """basic""" +1032 100 evaluator """rankbased""" +1033 1 model.embedding_dim 0.0 +1033 1 model.scoring_fct_norm 2.0 +1033 1 loss.margin 6.920655100949213 +1033 1 optimizer.lr 0.06537185782258795 +1033 1 negative_sampler.num_negs_per_pos 51.0 +1033 1 training.batch_size 2.0 +1033 2 model.embedding_dim 0.0 +1033 2 model.scoring_fct_norm 2.0 +1033 2 loss.margin 7.7902716881259835 +1033 2 optimizer.lr 0.05712528555358263 +1033 2 negative_sampler.num_negs_per_pos 58.0 +1033 2 training.batch_size 2.0 +1033 3 model.embedding_dim 2.0 +1033 3 model.scoring_fct_norm 2.0 +1033 3 loss.margin 8.51252816481884 +1033 3 optimizer.lr 0.06869528181375952 +1033 3 negative_sampler.num_negs_per_pos 21.0 +1033 3 training.batch_size 2.0 +1033 4 model.embedding_dim 2.0 +1033 4 model.scoring_fct_norm 2.0 +1033 4 loss.margin 5.903225275493043 +1033 4 optimizer.lr 0.006484123952138863 +1033 4 negative_sampler.num_negs_per_pos 95.0 +1033 4 training.batch_size 2.0 +1033 5 model.embedding_dim 0.0 +1033 5 model.scoring_fct_norm 1.0 +1033 5 loss.margin 7.831017539250275 +1033 5 optimizer.lr 0.05036085587543692 +1033 5 negative_sampler.num_negs_per_pos 90.0 +1033 5 training.batch_size 0.0 +1033 6 model.embedding_dim 0.0 +1033 6 model.scoring_fct_norm 2.0 +1033 6 loss.margin 1.1584825425222718 +1033 6 optimizer.lr 0.014248763461251457 +1033 6 negative_sampler.num_negs_per_pos 0.0 +1033 6 training.batch_size 2.0 +1033 7 model.embedding_dim 2.0 +1033 7 model.scoring_fct_norm 2.0 +1033 7 loss.margin 8.810450554402863 +1033 7 optimizer.lr 0.03778747560199314 +1033 7 negative_sampler.num_negs_per_pos 82.0 +1033 7 training.batch_size 1.0 +1033 8 model.embedding_dim 1.0 +1033 8 model.scoring_fct_norm 1.0 +1033 8 loss.margin 2.288078424077291 +1033 8 optimizer.lr 0.0019230883147070966 +1033 8 negative_sampler.num_negs_per_pos 87.0 +1033 8 training.batch_size 1.0 +1033 9 model.embedding_dim 2.0 +1033 9 model.scoring_fct_norm 1.0 +1033 9 loss.margin 5.397457642667915 +1033 9 optimizer.lr 0.003124372005894972 +1033 9 negative_sampler.num_negs_per_pos 91.0 +1033 9 training.batch_size 1.0 +1033 10 model.embedding_dim 0.0 +1033 10 model.scoring_fct_norm 2.0 +1033 10 loss.margin 3.067223878323432 +1033 10 optimizer.lr 0.0025183687661785538 +1033 10 negative_sampler.num_negs_per_pos 46.0 +1033 10 training.batch_size 1.0 +1033 11 model.embedding_dim 1.0 +1033 11 model.scoring_fct_norm 2.0 +1033 11 loss.margin 3.214046098029965 +1033 11 optimizer.lr 0.007662134235029266 +1033 11 negative_sampler.num_negs_per_pos 27.0 +1033 11 training.batch_size 0.0 +1033 12 model.embedding_dim 0.0 +1033 12 model.scoring_fct_norm 2.0 +1033 12 loss.margin 6.864503431926399 +1033 12 optimizer.lr 0.05122395972672901 +1033 12 negative_sampler.num_negs_per_pos 27.0 +1033 12 training.batch_size 0.0 +1033 13 model.embedding_dim 1.0 +1033 13 model.scoring_fct_norm 1.0 +1033 13 loss.margin 7.16888376624741 +1033 13 optimizer.lr 0.004755790275878931 +1033 13 negative_sampler.num_negs_per_pos 32.0 +1033 13 training.batch_size 1.0 +1033 14 model.embedding_dim 2.0 +1033 14 model.scoring_fct_norm 2.0 +1033 14 loss.margin 2.96830307665006 +1033 14 optimizer.lr 0.0010532468712147933 +1033 14 negative_sampler.num_negs_per_pos 13.0 +1033 14 training.batch_size 0.0 +1033 15 model.embedding_dim 2.0 +1033 15 model.scoring_fct_norm 2.0 +1033 15 loss.margin 2.4005947144611097 +1033 15 optimizer.lr 0.015871452454825463 +1033 15 negative_sampler.num_negs_per_pos 30.0 +1033 15 training.batch_size 2.0 +1033 16 model.embedding_dim 2.0 +1033 16 model.scoring_fct_norm 2.0 +1033 16 loss.margin 7.46089562319756 +1033 16 optimizer.lr 0.002484532112904384 +1033 16 negative_sampler.num_negs_per_pos 77.0 +1033 16 training.batch_size 2.0 +1033 17 model.embedding_dim 2.0 +1033 17 model.scoring_fct_norm 2.0 +1033 17 loss.margin 6.562330859887579 +1033 17 optimizer.lr 0.01072741975188479 +1033 17 negative_sampler.num_negs_per_pos 40.0 +1033 17 training.batch_size 1.0 +1033 18 model.embedding_dim 0.0 +1033 18 model.scoring_fct_norm 2.0 +1033 18 loss.margin 5.702750612076884 +1033 18 optimizer.lr 0.08050403943285098 +1033 18 negative_sampler.num_negs_per_pos 92.0 +1033 18 training.batch_size 1.0 +1033 19 model.embedding_dim 2.0 +1033 19 model.scoring_fct_norm 2.0 +1033 19 loss.margin 4.5367166505024255 +1033 19 optimizer.lr 0.0014986590110702151 +1033 19 negative_sampler.num_negs_per_pos 11.0 +1033 19 training.batch_size 1.0 +1033 20 model.embedding_dim 0.0 +1033 20 model.scoring_fct_norm 2.0 +1033 20 loss.margin 9.844853230946368 +1033 20 optimizer.lr 0.005446241691337513 +1033 20 negative_sampler.num_negs_per_pos 76.0 +1033 20 training.batch_size 1.0 +1033 21 model.embedding_dim 1.0 +1033 21 model.scoring_fct_norm 1.0 +1033 21 loss.margin 2.7216745809526204 +1033 21 optimizer.lr 0.001490243636900452 +1033 21 negative_sampler.num_negs_per_pos 58.0 +1033 21 training.batch_size 2.0 +1033 22 model.embedding_dim 2.0 +1033 22 model.scoring_fct_norm 1.0 +1033 22 loss.margin 7.432831738481911 +1033 22 optimizer.lr 0.03198838050812315 +1033 22 negative_sampler.num_negs_per_pos 14.0 +1033 22 training.batch_size 0.0 +1033 23 model.embedding_dim 2.0 +1033 23 model.scoring_fct_norm 2.0 +1033 23 loss.margin 8.068154339443202 +1033 23 optimizer.lr 0.07706849410615255 +1033 23 negative_sampler.num_negs_per_pos 28.0 +1033 23 training.batch_size 1.0 +1033 24 model.embedding_dim 0.0 +1033 24 model.scoring_fct_norm 2.0 +1033 24 loss.margin 1.0914770502334041 +1033 24 optimizer.lr 0.05257577481729161 +1033 24 negative_sampler.num_negs_per_pos 71.0 +1033 24 training.batch_size 2.0 +1033 25 model.embedding_dim 2.0 +1033 25 model.scoring_fct_norm 1.0 +1033 25 loss.margin 6.909524404838458 +1033 25 optimizer.lr 0.005686465922703457 +1033 25 negative_sampler.num_negs_per_pos 19.0 +1033 25 training.batch_size 2.0 +1033 26 model.embedding_dim 2.0 +1033 26 model.scoring_fct_norm 2.0 +1033 26 loss.margin 5.490340668012635 +1033 26 optimizer.lr 0.005492989814000133 +1033 26 negative_sampler.num_negs_per_pos 70.0 +1033 26 training.batch_size 2.0 +1033 27 model.embedding_dim 1.0 +1033 27 model.scoring_fct_norm 1.0 +1033 27 loss.margin 3.891927045511442 +1033 27 optimizer.lr 0.008150943103336946 +1033 27 negative_sampler.num_negs_per_pos 61.0 +1033 27 training.batch_size 1.0 +1033 28 model.embedding_dim 2.0 +1033 28 model.scoring_fct_norm 1.0 +1033 28 loss.margin 9.11273248151785 +1033 28 optimizer.lr 0.0011248241104375222 +1033 28 negative_sampler.num_negs_per_pos 60.0 +1033 28 training.batch_size 1.0 +1033 29 model.embedding_dim 2.0 +1033 29 model.scoring_fct_norm 2.0 +1033 29 loss.margin 4.412928882931761 +1033 29 optimizer.lr 0.0017487756388540816 +1033 29 negative_sampler.num_negs_per_pos 89.0 +1033 29 training.batch_size 0.0 +1033 30 model.embedding_dim 0.0 +1033 30 model.scoring_fct_norm 1.0 +1033 30 loss.margin 0.5515201668780003 +1033 30 optimizer.lr 0.009959988596895581 +1033 30 negative_sampler.num_negs_per_pos 45.0 +1033 30 training.batch_size 1.0 +1033 31 model.embedding_dim 2.0 +1033 31 model.scoring_fct_norm 2.0 +1033 31 loss.margin 3.740509625106854 +1033 31 optimizer.lr 0.006380009642148803 +1033 31 negative_sampler.num_negs_per_pos 36.0 +1033 31 training.batch_size 1.0 +1033 32 model.embedding_dim 1.0 +1033 32 model.scoring_fct_norm 1.0 +1033 32 loss.margin 2.0155874242111933 +1033 32 optimizer.lr 0.014138171037109895 +1033 32 negative_sampler.num_negs_per_pos 16.0 +1033 32 training.batch_size 1.0 +1033 33 model.embedding_dim 1.0 +1033 33 model.scoring_fct_norm 2.0 +1033 33 loss.margin 7.740663157256465 +1033 33 optimizer.lr 0.0835889825563521 +1033 33 negative_sampler.num_negs_per_pos 26.0 +1033 33 training.batch_size 1.0 +1033 34 model.embedding_dim 0.0 +1033 34 model.scoring_fct_norm 1.0 +1033 34 loss.margin 6.923043412099217 +1033 34 optimizer.lr 0.008478275730938898 +1033 34 negative_sampler.num_negs_per_pos 80.0 +1033 34 training.batch_size 1.0 +1033 35 model.embedding_dim 0.0 +1033 35 model.scoring_fct_norm 1.0 +1033 35 loss.margin 7.0210855492655515 +1033 35 optimizer.lr 0.009492015983413185 +1033 35 negative_sampler.num_negs_per_pos 28.0 +1033 35 training.batch_size 1.0 +1033 36 model.embedding_dim 2.0 +1033 36 model.scoring_fct_norm 2.0 +1033 36 loss.margin 4.802185066286629 +1033 36 optimizer.lr 0.015643558772336585 +1033 36 negative_sampler.num_negs_per_pos 32.0 +1033 36 training.batch_size 0.0 +1033 37 model.embedding_dim 2.0 +1033 37 model.scoring_fct_norm 2.0 +1033 37 loss.margin 9.302545309831583 +1033 37 optimizer.lr 0.0014929482117844598 +1033 37 negative_sampler.num_negs_per_pos 79.0 +1033 37 training.batch_size 0.0 +1033 38 model.embedding_dim 2.0 +1033 38 model.scoring_fct_norm 1.0 +1033 38 loss.margin 1.5031092297198365 +1033 38 optimizer.lr 0.019756350357301396 +1033 38 negative_sampler.num_negs_per_pos 97.0 +1033 38 training.batch_size 2.0 +1033 39 model.embedding_dim 2.0 +1033 39 model.scoring_fct_norm 2.0 +1033 39 loss.margin 8.04957670478597 +1033 39 optimizer.lr 0.04591064903208452 +1033 39 negative_sampler.num_negs_per_pos 94.0 +1033 39 training.batch_size 2.0 +1033 40 model.embedding_dim 2.0 +1033 40 model.scoring_fct_norm 2.0 +1033 40 loss.margin 6.1756113188263395 +1033 40 optimizer.lr 0.006792330387191412 +1033 40 negative_sampler.num_negs_per_pos 62.0 +1033 40 training.batch_size 2.0 +1033 41 model.embedding_dim 1.0 +1033 41 model.scoring_fct_norm 1.0 +1033 41 loss.margin 3.775402532321414 +1033 41 optimizer.lr 0.034816186222036444 +1033 41 negative_sampler.num_negs_per_pos 47.0 +1033 41 training.batch_size 1.0 +1033 42 model.embedding_dim 2.0 +1033 42 model.scoring_fct_norm 1.0 +1033 42 loss.margin 6.102440710124494 +1033 42 optimizer.lr 0.012990603254245036 +1033 42 negative_sampler.num_negs_per_pos 93.0 +1033 42 training.batch_size 0.0 +1033 43 model.embedding_dim 0.0 +1033 43 model.scoring_fct_norm 2.0 +1033 43 loss.margin 9.092638628725993 +1033 43 optimizer.lr 0.007066259662139482 +1033 43 negative_sampler.num_negs_per_pos 76.0 +1033 43 training.batch_size 2.0 +1033 44 model.embedding_dim 0.0 +1033 44 model.scoring_fct_norm 2.0 +1033 44 loss.margin 1.637315850683653 +1033 44 optimizer.lr 0.0018564099971476319 +1033 44 negative_sampler.num_negs_per_pos 92.0 +1033 44 training.batch_size 1.0 +1033 45 model.embedding_dim 2.0 +1033 45 model.scoring_fct_norm 1.0 +1033 45 loss.margin 1.2704149484247365 +1033 45 optimizer.lr 0.00459697123742535 +1033 45 negative_sampler.num_negs_per_pos 67.0 +1033 45 training.batch_size 1.0 +1033 46 model.embedding_dim 0.0 +1033 46 model.scoring_fct_norm 1.0 +1033 46 loss.margin 7.63765590351645 +1033 46 optimizer.lr 0.0016375386710285934 +1033 46 negative_sampler.num_negs_per_pos 15.0 +1033 46 training.batch_size 1.0 +1033 47 model.embedding_dim 2.0 +1033 47 model.scoring_fct_norm 2.0 +1033 47 loss.margin 4.180416903218303 +1033 47 optimizer.lr 0.0055574166960771516 +1033 47 negative_sampler.num_negs_per_pos 58.0 +1033 47 training.batch_size 2.0 +1033 48 model.embedding_dim 2.0 +1033 48 model.scoring_fct_norm 1.0 +1033 48 loss.margin 9.408995636699183 +1033 48 optimizer.lr 0.0017736436274548766 +1033 48 negative_sampler.num_negs_per_pos 59.0 +1033 48 training.batch_size 0.0 +1033 49 model.embedding_dim 1.0 +1033 49 model.scoring_fct_norm 1.0 +1033 49 loss.margin 4.84449763241665 +1033 49 optimizer.lr 0.03902884715859748 +1033 49 negative_sampler.num_negs_per_pos 25.0 +1033 49 training.batch_size 0.0 +1033 50 model.embedding_dim 0.0 +1033 50 model.scoring_fct_norm 1.0 +1033 50 loss.margin 3.4017962299585056 +1033 50 optimizer.lr 0.04262030568402302 +1033 50 negative_sampler.num_negs_per_pos 47.0 +1033 50 training.batch_size 2.0 +1033 51 model.embedding_dim 1.0 +1033 51 model.scoring_fct_norm 2.0 +1033 51 loss.margin 7.694051689102567 +1033 51 optimizer.lr 0.0769428685811488 +1033 51 negative_sampler.num_negs_per_pos 11.0 +1033 51 training.batch_size 1.0 +1033 52 model.embedding_dim 0.0 +1033 52 model.scoring_fct_norm 2.0 +1033 52 loss.margin 6.378513676377607 +1033 52 optimizer.lr 0.0030089912321341382 +1033 52 negative_sampler.num_negs_per_pos 94.0 +1033 52 training.batch_size 2.0 +1033 53 model.embedding_dim 2.0 +1033 53 model.scoring_fct_norm 1.0 +1033 53 loss.margin 5.03374929585662 +1033 53 optimizer.lr 0.0015923847880231973 +1033 53 negative_sampler.num_negs_per_pos 6.0 +1033 53 training.batch_size 1.0 +1033 54 model.embedding_dim 1.0 +1033 54 model.scoring_fct_norm 1.0 +1033 54 loss.margin 7.8528818974009384 +1033 54 optimizer.lr 0.003121280121623525 +1033 54 negative_sampler.num_negs_per_pos 19.0 +1033 54 training.batch_size 0.0 +1033 55 model.embedding_dim 1.0 +1033 55 model.scoring_fct_norm 1.0 +1033 55 loss.margin 8.398244465682023 +1033 55 optimizer.lr 0.030784368050080866 +1033 55 negative_sampler.num_negs_per_pos 8.0 +1033 55 training.batch_size 1.0 +1033 56 model.embedding_dim 1.0 +1033 56 model.scoring_fct_norm 1.0 +1033 56 loss.margin 6.29697390159985 +1033 56 optimizer.lr 0.0056443339654763895 +1033 56 negative_sampler.num_negs_per_pos 74.0 +1033 56 training.batch_size 0.0 +1033 57 model.embedding_dim 1.0 +1033 57 model.scoring_fct_norm 2.0 +1033 57 loss.margin 7.179486714688141 +1033 57 optimizer.lr 0.02017138652694954 +1033 57 negative_sampler.num_negs_per_pos 65.0 +1033 57 training.batch_size 2.0 +1033 58 model.embedding_dim 0.0 +1033 58 model.scoring_fct_norm 1.0 +1033 58 loss.margin 7.819931793414744 +1033 58 optimizer.lr 0.0014457001703895324 +1033 58 negative_sampler.num_negs_per_pos 30.0 +1033 58 training.batch_size 0.0 +1033 59 model.embedding_dim 2.0 +1033 59 model.scoring_fct_norm 1.0 +1033 59 loss.margin 6.942272687300621 +1033 59 optimizer.lr 0.09996095207009094 +1033 59 negative_sampler.num_negs_per_pos 57.0 +1033 59 training.batch_size 2.0 +1033 60 model.embedding_dim 2.0 +1033 60 model.scoring_fct_norm 1.0 +1033 60 loss.margin 7.350684905696169 +1033 60 optimizer.lr 0.027801934707128984 +1033 60 negative_sampler.num_negs_per_pos 4.0 +1033 60 training.batch_size 2.0 +1033 61 model.embedding_dim 0.0 +1033 61 model.scoring_fct_norm 2.0 +1033 61 loss.margin 7.164336510851797 +1033 61 optimizer.lr 0.001033780292014927 +1033 61 negative_sampler.num_negs_per_pos 17.0 +1033 61 training.batch_size 2.0 +1033 62 model.embedding_dim 2.0 +1033 62 model.scoring_fct_norm 2.0 +1033 62 loss.margin 0.8887152101873415 +1033 62 optimizer.lr 0.01412665822611976 +1033 62 negative_sampler.num_negs_per_pos 38.0 +1033 62 training.batch_size 0.0 +1033 63 model.embedding_dim 0.0 +1033 63 model.scoring_fct_norm 2.0 +1033 63 loss.margin 0.6819882090039688 +1033 63 optimizer.lr 0.015525632621561695 +1033 63 negative_sampler.num_negs_per_pos 13.0 +1033 63 training.batch_size 2.0 +1033 64 model.embedding_dim 0.0 +1033 64 model.scoring_fct_norm 1.0 +1033 64 loss.margin 1.5542786001818358 +1033 64 optimizer.lr 0.007760217264535014 +1033 64 negative_sampler.num_negs_per_pos 29.0 +1033 64 training.batch_size 1.0 +1033 65 model.embedding_dim 1.0 +1033 65 model.scoring_fct_norm 1.0 +1033 65 loss.margin 7.652624543825639 +1033 65 optimizer.lr 0.002466588279976698 +1033 65 negative_sampler.num_negs_per_pos 60.0 +1033 65 training.batch_size 2.0 +1033 66 model.embedding_dim 0.0 +1033 66 model.scoring_fct_norm 1.0 +1033 66 loss.margin 7.8771738763095245 +1033 66 optimizer.lr 0.03867232658853046 +1033 66 negative_sampler.num_negs_per_pos 34.0 +1033 66 training.batch_size 1.0 +1033 67 model.embedding_dim 1.0 +1033 67 model.scoring_fct_norm 2.0 +1033 67 loss.margin 9.407535237843017 +1033 67 optimizer.lr 0.09687404387359089 +1033 67 negative_sampler.num_negs_per_pos 63.0 +1033 67 training.batch_size 2.0 +1033 68 model.embedding_dim 1.0 +1033 68 model.scoring_fct_norm 1.0 +1033 68 loss.margin 2.134655599868244 +1033 68 optimizer.lr 0.08159793244074562 +1033 68 negative_sampler.num_negs_per_pos 91.0 +1033 68 training.batch_size 1.0 +1033 69 model.embedding_dim 2.0 +1033 69 model.scoring_fct_norm 1.0 +1033 69 loss.margin 5.150593103274598 +1033 69 optimizer.lr 0.005428943792339281 +1033 69 negative_sampler.num_negs_per_pos 64.0 +1033 69 training.batch_size 0.0 +1033 70 model.embedding_dim 1.0 +1033 70 model.scoring_fct_norm 1.0 +1033 70 loss.margin 4.950137531178655 +1033 70 optimizer.lr 0.001790678789295874 +1033 70 negative_sampler.num_negs_per_pos 70.0 +1033 70 training.batch_size 1.0 +1033 71 model.embedding_dim 2.0 +1033 71 model.scoring_fct_norm 1.0 +1033 71 loss.margin 9.94481447287562 +1033 71 optimizer.lr 0.011925240412837613 +1033 71 negative_sampler.num_negs_per_pos 56.0 +1033 71 training.batch_size 2.0 +1033 72 model.embedding_dim 0.0 +1033 72 model.scoring_fct_norm 2.0 +1033 72 loss.margin 9.145925592327059 +1033 72 optimizer.lr 0.004394563741452495 +1033 72 negative_sampler.num_negs_per_pos 90.0 +1033 72 training.batch_size 1.0 +1033 73 model.embedding_dim 0.0 +1033 73 model.scoring_fct_norm 1.0 +1033 73 loss.margin 2.7510650866494526 +1033 73 optimizer.lr 0.03137489118717051 +1033 73 negative_sampler.num_negs_per_pos 51.0 +1033 73 training.batch_size 0.0 +1033 74 model.embedding_dim 2.0 +1033 74 model.scoring_fct_norm 2.0 +1033 74 loss.margin 6.206949913824962 +1033 74 optimizer.lr 0.0020621395785997802 +1033 74 negative_sampler.num_negs_per_pos 57.0 +1033 74 training.batch_size 1.0 +1033 75 model.embedding_dim 2.0 +1033 75 model.scoring_fct_norm 2.0 +1033 75 loss.margin 4.777648178995494 +1033 75 optimizer.lr 0.005707808135873483 +1033 75 negative_sampler.num_negs_per_pos 39.0 +1033 75 training.batch_size 2.0 +1033 76 model.embedding_dim 2.0 +1033 76 model.scoring_fct_norm 1.0 +1033 76 loss.margin 2.4791104971363676 +1033 76 optimizer.lr 0.0011720362381733157 +1033 76 negative_sampler.num_negs_per_pos 96.0 +1033 76 training.batch_size 1.0 +1033 77 model.embedding_dim 2.0 +1033 77 model.scoring_fct_norm 2.0 +1033 77 loss.margin 8.886046134143626 +1033 77 optimizer.lr 0.0027350564936815475 +1033 77 negative_sampler.num_negs_per_pos 30.0 +1033 77 training.batch_size 1.0 +1033 78 model.embedding_dim 1.0 +1033 78 model.scoring_fct_norm 1.0 +1033 78 loss.margin 4.0358193629800345 +1033 78 optimizer.lr 0.009496032335427536 +1033 78 negative_sampler.num_negs_per_pos 81.0 +1033 78 training.batch_size 0.0 +1033 79 model.embedding_dim 2.0 +1033 79 model.scoring_fct_norm 1.0 +1033 79 loss.margin 6.542947794429524 +1033 79 optimizer.lr 0.011262013374993118 +1033 79 negative_sampler.num_negs_per_pos 55.0 +1033 79 training.batch_size 2.0 +1033 80 model.embedding_dim 2.0 +1033 80 model.scoring_fct_norm 2.0 +1033 80 loss.margin 2.0256969156857196 +1033 80 optimizer.lr 0.01375390559645528 +1033 80 negative_sampler.num_negs_per_pos 61.0 +1033 80 training.batch_size 0.0 +1033 81 model.embedding_dim 0.0 +1033 81 model.scoring_fct_norm 2.0 +1033 81 loss.margin 0.9727889881310516 +1033 81 optimizer.lr 0.002268086738698064 +1033 81 negative_sampler.num_negs_per_pos 90.0 +1033 81 training.batch_size 1.0 +1033 82 model.embedding_dim 1.0 +1033 82 model.scoring_fct_norm 1.0 +1033 82 loss.margin 7.571131977021622 +1033 82 optimizer.lr 0.0012031600072056404 +1033 82 negative_sampler.num_negs_per_pos 53.0 +1033 82 training.batch_size 0.0 +1033 83 model.embedding_dim 2.0 +1033 83 model.scoring_fct_norm 1.0 +1033 83 loss.margin 5.534974213580776 +1033 83 optimizer.lr 0.005646432920937685 +1033 83 negative_sampler.num_negs_per_pos 85.0 +1033 83 training.batch_size 2.0 +1033 84 model.embedding_dim 2.0 +1033 84 model.scoring_fct_norm 2.0 +1033 84 loss.margin 5.149710908372231 +1033 84 optimizer.lr 0.0035224739275771973 +1033 84 negative_sampler.num_negs_per_pos 93.0 +1033 84 training.batch_size 2.0 +1033 85 model.embedding_dim 2.0 +1033 85 model.scoring_fct_norm 2.0 +1033 85 loss.margin 9.461695115732288 +1033 85 optimizer.lr 0.0016256891993825975 +1033 85 negative_sampler.num_negs_per_pos 92.0 +1033 85 training.batch_size 0.0 +1033 86 model.embedding_dim 0.0 +1033 86 model.scoring_fct_norm 2.0 +1033 86 loss.margin 3.6920456588558155 +1033 86 optimizer.lr 0.011634250736337778 +1033 86 negative_sampler.num_negs_per_pos 48.0 +1033 86 training.batch_size 0.0 +1033 87 model.embedding_dim 2.0 +1033 87 model.scoring_fct_norm 2.0 +1033 87 loss.margin 1.8321125638813605 +1033 87 optimizer.lr 0.021376257122915132 +1033 87 negative_sampler.num_negs_per_pos 0.0 +1033 87 training.batch_size 0.0 +1033 88 model.embedding_dim 0.0 +1033 88 model.scoring_fct_norm 1.0 +1033 88 loss.margin 7.188300406552208 +1033 88 optimizer.lr 0.002015393308116632 +1033 88 negative_sampler.num_negs_per_pos 71.0 +1033 88 training.batch_size 0.0 +1033 89 model.embedding_dim 0.0 +1033 89 model.scoring_fct_norm 1.0 +1033 89 loss.margin 2.4408156290382923 +1033 89 optimizer.lr 0.08142004431966555 +1033 89 negative_sampler.num_negs_per_pos 61.0 +1033 89 training.batch_size 1.0 +1033 90 model.embedding_dim 2.0 +1033 90 model.scoring_fct_norm 1.0 +1033 90 loss.margin 3.217019120709755 +1033 90 optimizer.lr 0.03788516748604274 +1033 90 negative_sampler.num_negs_per_pos 48.0 +1033 90 training.batch_size 0.0 +1033 91 model.embedding_dim 1.0 +1033 91 model.scoring_fct_norm 1.0 +1033 91 loss.margin 5.1446848926718625 +1033 91 optimizer.lr 0.03560834314837511 +1033 91 negative_sampler.num_negs_per_pos 5.0 +1033 91 training.batch_size 0.0 +1033 92 model.embedding_dim 2.0 +1033 92 model.scoring_fct_norm 1.0 +1033 92 loss.margin 7.847807157498289 +1033 92 optimizer.lr 0.041627870133150086 +1033 92 negative_sampler.num_negs_per_pos 98.0 +1033 92 training.batch_size 0.0 +1033 93 model.embedding_dim 1.0 +1033 93 model.scoring_fct_norm 1.0 +1033 93 loss.margin 8.424922293271214 +1033 93 optimizer.lr 0.022602130052614158 +1033 93 negative_sampler.num_negs_per_pos 60.0 +1033 93 training.batch_size 2.0 +1033 94 model.embedding_dim 1.0 +1033 94 model.scoring_fct_norm 2.0 +1033 94 loss.margin 5.825856133361521 +1033 94 optimizer.lr 0.0030516118618689595 +1033 94 negative_sampler.num_negs_per_pos 17.0 +1033 94 training.batch_size 0.0 +1033 95 model.embedding_dim 1.0 +1033 95 model.scoring_fct_norm 1.0 +1033 95 loss.margin 3.279765489382187 +1033 95 optimizer.lr 0.0032783142080336143 +1033 95 negative_sampler.num_negs_per_pos 96.0 +1033 95 training.batch_size 1.0 +1033 96 model.embedding_dim 2.0 +1033 96 model.scoring_fct_norm 2.0 +1033 96 loss.margin 9.349446682765672 +1033 96 optimizer.lr 0.0011573076697522148 +1033 96 negative_sampler.num_negs_per_pos 15.0 +1033 96 training.batch_size 0.0 +1033 97 model.embedding_dim 1.0 +1033 97 model.scoring_fct_norm 2.0 +1033 97 loss.margin 7.525492100414622 +1033 97 optimizer.lr 0.002117636567697487 +1033 97 negative_sampler.num_negs_per_pos 59.0 +1033 97 training.batch_size 1.0 +1033 98 model.embedding_dim 1.0 +1033 98 model.scoring_fct_norm 2.0 +1033 98 loss.margin 9.116626143168123 +1033 98 optimizer.lr 0.06390860177258094 +1033 98 negative_sampler.num_negs_per_pos 79.0 +1033 98 training.batch_size 2.0 +1033 99 model.embedding_dim 1.0 +1033 99 model.scoring_fct_norm 2.0 +1033 99 loss.margin 7.992554345937378 +1033 99 optimizer.lr 0.007594537353084633 +1033 99 negative_sampler.num_negs_per_pos 6.0 +1033 99 training.batch_size 1.0 +1033 100 model.embedding_dim 0.0 +1033 100 model.scoring_fct_norm 1.0 +1033 100 loss.margin 8.107015054172589 +1033 100 optimizer.lr 0.05665313002900674 +1033 100 negative_sampler.num_negs_per_pos 95.0 +1033 100 training.batch_size 1.0 +1033 1 dataset """kinships""" +1033 1 model """unstructuredmodel""" +1033 1 loss """marginranking""" +1033 1 regularizer """no""" +1033 1 optimizer """adam""" +1033 1 training_loop """owa""" +1033 1 negative_sampler """basic""" +1033 1 evaluator """rankbased""" +1033 2 dataset """kinships""" +1033 2 model """unstructuredmodel""" +1033 2 loss """marginranking""" +1033 2 regularizer """no""" +1033 2 optimizer """adam""" +1033 2 training_loop """owa""" +1033 2 negative_sampler """basic""" +1033 2 evaluator """rankbased""" +1033 3 dataset """kinships""" +1033 3 model """unstructuredmodel""" +1033 3 loss """marginranking""" +1033 3 regularizer """no""" +1033 3 optimizer """adam""" +1033 3 training_loop """owa""" +1033 3 negative_sampler """basic""" +1033 3 evaluator """rankbased""" +1033 4 dataset """kinships""" +1033 4 model """unstructuredmodel""" +1033 4 loss """marginranking""" +1033 4 regularizer """no""" +1033 4 optimizer """adam""" +1033 4 training_loop """owa""" +1033 4 negative_sampler """basic""" +1033 4 evaluator """rankbased""" +1033 5 dataset """kinships""" +1033 5 model """unstructuredmodel""" +1033 5 loss """marginranking""" +1033 5 regularizer """no""" +1033 5 optimizer """adam""" +1033 5 training_loop """owa""" +1033 5 negative_sampler """basic""" +1033 5 evaluator """rankbased""" +1033 6 dataset """kinships""" +1033 6 model """unstructuredmodel""" +1033 6 loss """marginranking""" +1033 6 regularizer """no""" +1033 6 optimizer """adam""" +1033 6 training_loop """owa""" +1033 6 negative_sampler """basic""" +1033 6 evaluator """rankbased""" +1033 7 dataset """kinships""" +1033 7 model """unstructuredmodel""" +1033 7 loss """marginranking""" +1033 7 regularizer """no""" +1033 7 optimizer """adam""" +1033 7 training_loop """owa""" +1033 7 negative_sampler """basic""" +1033 7 evaluator """rankbased""" +1033 8 dataset """kinships""" +1033 8 model """unstructuredmodel""" +1033 8 loss """marginranking""" +1033 8 regularizer """no""" +1033 8 optimizer """adam""" +1033 8 training_loop """owa""" +1033 8 negative_sampler """basic""" +1033 8 evaluator """rankbased""" +1033 9 dataset """kinships""" +1033 9 model """unstructuredmodel""" +1033 9 loss """marginranking""" +1033 9 regularizer """no""" +1033 9 optimizer """adam""" +1033 9 training_loop """owa""" +1033 9 negative_sampler """basic""" +1033 9 evaluator """rankbased""" +1033 10 dataset """kinships""" +1033 10 model """unstructuredmodel""" +1033 10 loss """marginranking""" +1033 10 regularizer """no""" +1033 10 optimizer """adam""" +1033 10 training_loop """owa""" +1033 10 negative_sampler """basic""" +1033 10 evaluator """rankbased""" +1033 11 dataset """kinships""" +1033 11 model """unstructuredmodel""" +1033 11 loss """marginranking""" +1033 11 regularizer """no""" +1033 11 optimizer """adam""" +1033 11 training_loop """owa""" +1033 11 negative_sampler """basic""" +1033 11 evaluator """rankbased""" +1033 12 dataset """kinships""" +1033 12 model """unstructuredmodel""" +1033 12 loss """marginranking""" +1033 12 regularizer """no""" +1033 12 optimizer """adam""" +1033 12 training_loop """owa""" +1033 12 negative_sampler """basic""" +1033 12 evaluator """rankbased""" +1033 13 dataset """kinships""" +1033 13 model """unstructuredmodel""" +1033 13 loss """marginranking""" +1033 13 regularizer """no""" +1033 13 optimizer """adam""" +1033 13 training_loop """owa""" +1033 13 negative_sampler """basic""" +1033 13 evaluator """rankbased""" +1033 14 dataset """kinships""" +1033 14 model """unstructuredmodel""" +1033 14 loss """marginranking""" +1033 14 regularizer """no""" +1033 14 optimizer """adam""" +1033 14 training_loop """owa""" +1033 14 negative_sampler """basic""" +1033 14 evaluator """rankbased""" +1033 15 dataset """kinships""" +1033 15 model """unstructuredmodel""" +1033 15 loss """marginranking""" +1033 15 regularizer """no""" +1033 15 optimizer """adam""" +1033 15 training_loop """owa""" +1033 15 negative_sampler """basic""" +1033 15 evaluator """rankbased""" +1033 16 dataset """kinships""" +1033 16 model """unstructuredmodel""" +1033 16 loss """marginranking""" +1033 16 regularizer """no""" +1033 16 optimizer """adam""" +1033 16 training_loop """owa""" +1033 16 negative_sampler """basic""" +1033 16 evaluator """rankbased""" +1033 17 dataset """kinships""" +1033 17 model """unstructuredmodel""" +1033 17 loss """marginranking""" +1033 17 regularizer """no""" +1033 17 optimizer """adam""" +1033 17 training_loop """owa""" +1033 17 negative_sampler """basic""" +1033 17 evaluator """rankbased""" +1033 18 dataset """kinships""" +1033 18 model """unstructuredmodel""" +1033 18 loss """marginranking""" +1033 18 regularizer """no""" +1033 18 optimizer """adam""" +1033 18 training_loop """owa""" +1033 18 negative_sampler """basic""" +1033 18 evaluator """rankbased""" +1033 19 dataset """kinships""" +1033 19 model """unstructuredmodel""" +1033 19 loss """marginranking""" +1033 19 regularizer """no""" +1033 19 optimizer """adam""" +1033 19 training_loop """owa""" +1033 19 negative_sampler """basic""" +1033 19 evaluator """rankbased""" +1033 20 dataset """kinships""" +1033 20 model """unstructuredmodel""" +1033 20 loss """marginranking""" +1033 20 regularizer """no""" +1033 20 optimizer """adam""" +1033 20 training_loop """owa""" +1033 20 negative_sampler """basic""" +1033 20 evaluator """rankbased""" +1033 21 dataset """kinships""" +1033 21 model """unstructuredmodel""" +1033 21 loss """marginranking""" +1033 21 regularizer """no""" +1033 21 optimizer """adam""" +1033 21 training_loop """owa""" +1033 21 negative_sampler """basic""" +1033 21 evaluator """rankbased""" +1033 22 dataset """kinships""" +1033 22 model """unstructuredmodel""" +1033 22 loss """marginranking""" +1033 22 regularizer """no""" +1033 22 optimizer """adam""" +1033 22 training_loop """owa""" +1033 22 negative_sampler """basic""" +1033 22 evaluator """rankbased""" +1033 23 dataset """kinships""" +1033 23 model """unstructuredmodel""" +1033 23 loss """marginranking""" +1033 23 regularizer """no""" +1033 23 optimizer """adam""" +1033 23 training_loop """owa""" +1033 23 negative_sampler """basic""" +1033 23 evaluator """rankbased""" +1033 24 dataset """kinships""" +1033 24 model """unstructuredmodel""" +1033 24 loss """marginranking""" +1033 24 regularizer """no""" +1033 24 optimizer """adam""" +1033 24 training_loop """owa""" +1033 24 negative_sampler """basic""" +1033 24 evaluator """rankbased""" +1033 25 dataset """kinships""" +1033 25 model """unstructuredmodel""" +1033 25 loss """marginranking""" +1033 25 regularizer """no""" +1033 25 optimizer """adam""" +1033 25 training_loop """owa""" +1033 25 negative_sampler """basic""" +1033 25 evaluator """rankbased""" +1033 26 dataset """kinships""" +1033 26 model """unstructuredmodel""" +1033 26 loss """marginranking""" +1033 26 regularizer """no""" +1033 26 optimizer """adam""" +1033 26 training_loop """owa""" +1033 26 negative_sampler """basic""" +1033 26 evaluator """rankbased""" +1033 27 dataset """kinships""" +1033 27 model """unstructuredmodel""" +1033 27 loss """marginranking""" +1033 27 regularizer """no""" +1033 27 optimizer """adam""" +1033 27 training_loop """owa""" +1033 27 negative_sampler """basic""" +1033 27 evaluator """rankbased""" +1033 28 dataset """kinships""" +1033 28 model """unstructuredmodel""" +1033 28 loss """marginranking""" +1033 28 regularizer """no""" +1033 28 optimizer """adam""" +1033 28 training_loop """owa""" +1033 28 negative_sampler """basic""" +1033 28 evaluator """rankbased""" +1033 29 dataset """kinships""" +1033 29 model """unstructuredmodel""" +1033 29 loss """marginranking""" +1033 29 regularizer """no""" +1033 29 optimizer """adam""" +1033 29 training_loop """owa""" +1033 29 negative_sampler """basic""" +1033 29 evaluator """rankbased""" +1033 30 dataset """kinships""" +1033 30 model """unstructuredmodel""" +1033 30 loss """marginranking""" +1033 30 regularizer """no""" +1033 30 optimizer """adam""" +1033 30 training_loop """owa""" +1033 30 negative_sampler """basic""" +1033 30 evaluator """rankbased""" +1033 31 dataset """kinships""" +1033 31 model """unstructuredmodel""" +1033 31 loss """marginranking""" +1033 31 regularizer """no""" +1033 31 optimizer """adam""" +1033 31 training_loop """owa""" +1033 31 negative_sampler """basic""" +1033 31 evaluator """rankbased""" +1033 32 dataset """kinships""" +1033 32 model """unstructuredmodel""" +1033 32 loss """marginranking""" +1033 32 regularizer """no""" +1033 32 optimizer """adam""" +1033 32 training_loop """owa""" +1033 32 negative_sampler """basic""" +1033 32 evaluator """rankbased""" +1033 33 dataset """kinships""" +1033 33 model """unstructuredmodel""" +1033 33 loss """marginranking""" +1033 33 regularizer """no""" +1033 33 optimizer """adam""" +1033 33 training_loop """owa""" +1033 33 negative_sampler """basic""" +1033 33 evaluator """rankbased""" +1033 34 dataset """kinships""" +1033 34 model """unstructuredmodel""" +1033 34 loss """marginranking""" +1033 34 regularizer """no""" +1033 34 optimizer """adam""" +1033 34 training_loop """owa""" +1033 34 negative_sampler """basic""" +1033 34 evaluator """rankbased""" +1033 35 dataset """kinships""" +1033 35 model """unstructuredmodel""" +1033 35 loss """marginranking""" +1033 35 regularizer """no""" +1033 35 optimizer """adam""" +1033 35 training_loop """owa""" +1033 35 negative_sampler """basic""" +1033 35 evaluator """rankbased""" +1033 36 dataset """kinships""" +1033 36 model """unstructuredmodel""" +1033 36 loss """marginranking""" +1033 36 regularizer """no""" +1033 36 optimizer """adam""" +1033 36 training_loop """owa""" +1033 36 negative_sampler """basic""" +1033 36 evaluator """rankbased""" +1033 37 dataset """kinships""" +1033 37 model """unstructuredmodel""" +1033 37 loss """marginranking""" +1033 37 regularizer """no""" +1033 37 optimizer """adam""" +1033 37 training_loop """owa""" +1033 37 negative_sampler """basic""" +1033 37 evaluator """rankbased""" +1033 38 dataset """kinships""" +1033 38 model """unstructuredmodel""" +1033 38 loss """marginranking""" +1033 38 regularizer """no""" +1033 38 optimizer """adam""" +1033 38 training_loop """owa""" +1033 38 negative_sampler """basic""" +1033 38 evaluator """rankbased""" +1033 39 dataset """kinships""" +1033 39 model """unstructuredmodel""" +1033 39 loss """marginranking""" +1033 39 regularizer """no""" +1033 39 optimizer """adam""" +1033 39 training_loop """owa""" +1033 39 negative_sampler """basic""" +1033 39 evaluator """rankbased""" +1033 40 dataset """kinships""" +1033 40 model """unstructuredmodel""" +1033 40 loss """marginranking""" +1033 40 regularizer """no""" +1033 40 optimizer """adam""" +1033 40 training_loop """owa""" +1033 40 negative_sampler """basic""" +1033 40 evaluator """rankbased""" +1033 41 dataset """kinships""" +1033 41 model """unstructuredmodel""" +1033 41 loss """marginranking""" +1033 41 regularizer """no""" +1033 41 optimizer """adam""" +1033 41 training_loop """owa""" +1033 41 negative_sampler """basic""" +1033 41 evaluator """rankbased""" +1033 42 dataset """kinships""" +1033 42 model """unstructuredmodel""" +1033 42 loss """marginranking""" +1033 42 regularizer """no""" +1033 42 optimizer """adam""" +1033 42 training_loop """owa""" +1033 42 negative_sampler """basic""" +1033 42 evaluator """rankbased""" +1033 43 dataset """kinships""" +1033 43 model """unstructuredmodel""" +1033 43 loss """marginranking""" +1033 43 regularizer """no""" +1033 43 optimizer """adam""" +1033 43 training_loop """owa""" +1033 43 negative_sampler """basic""" +1033 43 evaluator """rankbased""" +1033 44 dataset """kinships""" +1033 44 model """unstructuredmodel""" +1033 44 loss """marginranking""" +1033 44 regularizer """no""" +1033 44 optimizer """adam""" +1033 44 training_loop """owa""" +1033 44 negative_sampler """basic""" +1033 44 evaluator """rankbased""" +1033 45 dataset """kinships""" +1033 45 model """unstructuredmodel""" +1033 45 loss """marginranking""" +1033 45 regularizer """no""" +1033 45 optimizer """adam""" +1033 45 training_loop """owa""" +1033 45 negative_sampler """basic""" +1033 45 evaluator """rankbased""" +1033 46 dataset """kinships""" +1033 46 model """unstructuredmodel""" +1033 46 loss """marginranking""" +1033 46 regularizer """no""" +1033 46 optimizer """adam""" +1033 46 training_loop """owa""" +1033 46 negative_sampler """basic""" +1033 46 evaluator """rankbased""" +1033 47 dataset """kinships""" +1033 47 model """unstructuredmodel""" +1033 47 loss """marginranking""" +1033 47 regularizer """no""" +1033 47 optimizer """adam""" +1033 47 training_loop """owa""" +1033 47 negative_sampler """basic""" +1033 47 evaluator """rankbased""" +1033 48 dataset """kinships""" +1033 48 model """unstructuredmodel""" +1033 48 loss """marginranking""" +1033 48 regularizer """no""" +1033 48 optimizer """adam""" +1033 48 training_loop """owa""" +1033 48 negative_sampler """basic""" +1033 48 evaluator """rankbased""" +1033 49 dataset """kinships""" +1033 49 model """unstructuredmodel""" +1033 49 loss """marginranking""" +1033 49 regularizer """no""" +1033 49 optimizer """adam""" +1033 49 training_loop """owa""" +1033 49 negative_sampler """basic""" +1033 49 evaluator """rankbased""" +1033 50 dataset """kinships""" +1033 50 model """unstructuredmodel""" +1033 50 loss """marginranking""" +1033 50 regularizer """no""" +1033 50 optimizer """adam""" +1033 50 training_loop """owa""" +1033 50 negative_sampler """basic""" +1033 50 evaluator """rankbased""" +1033 51 dataset """kinships""" +1033 51 model """unstructuredmodel""" +1033 51 loss """marginranking""" +1033 51 regularizer """no""" +1033 51 optimizer """adam""" +1033 51 training_loop """owa""" +1033 51 negative_sampler """basic""" +1033 51 evaluator """rankbased""" +1033 52 dataset """kinships""" +1033 52 model """unstructuredmodel""" +1033 52 loss """marginranking""" +1033 52 regularizer """no""" +1033 52 optimizer """adam""" +1033 52 training_loop """owa""" +1033 52 negative_sampler """basic""" +1033 52 evaluator """rankbased""" +1033 53 dataset """kinships""" +1033 53 model """unstructuredmodel""" +1033 53 loss """marginranking""" +1033 53 regularizer """no""" +1033 53 optimizer """adam""" +1033 53 training_loop """owa""" +1033 53 negative_sampler """basic""" +1033 53 evaluator """rankbased""" +1033 54 dataset """kinships""" +1033 54 model """unstructuredmodel""" +1033 54 loss """marginranking""" +1033 54 regularizer """no""" +1033 54 optimizer """adam""" +1033 54 training_loop """owa""" +1033 54 negative_sampler """basic""" +1033 54 evaluator """rankbased""" +1033 55 dataset """kinships""" +1033 55 model """unstructuredmodel""" +1033 55 loss """marginranking""" +1033 55 regularizer """no""" +1033 55 optimizer """adam""" +1033 55 training_loop """owa""" +1033 55 negative_sampler """basic""" +1033 55 evaluator """rankbased""" +1033 56 dataset """kinships""" +1033 56 model """unstructuredmodel""" +1033 56 loss """marginranking""" +1033 56 regularizer """no""" +1033 56 optimizer """adam""" +1033 56 training_loop """owa""" +1033 56 negative_sampler """basic""" +1033 56 evaluator """rankbased""" +1033 57 dataset """kinships""" +1033 57 model """unstructuredmodel""" +1033 57 loss """marginranking""" +1033 57 regularizer """no""" +1033 57 optimizer """adam""" +1033 57 training_loop """owa""" +1033 57 negative_sampler """basic""" +1033 57 evaluator """rankbased""" +1033 58 dataset """kinships""" +1033 58 model """unstructuredmodel""" +1033 58 loss """marginranking""" +1033 58 regularizer """no""" +1033 58 optimizer """adam""" +1033 58 training_loop """owa""" +1033 58 negative_sampler """basic""" +1033 58 evaluator """rankbased""" +1033 59 dataset """kinships""" +1033 59 model """unstructuredmodel""" +1033 59 loss """marginranking""" +1033 59 regularizer """no""" +1033 59 optimizer """adam""" +1033 59 training_loop """owa""" +1033 59 negative_sampler """basic""" +1033 59 evaluator """rankbased""" +1033 60 dataset """kinships""" +1033 60 model """unstructuredmodel""" +1033 60 loss """marginranking""" +1033 60 regularizer """no""" +1033 60 optimizer """adam""" +1033 60 training_loop """owa""" +1033 60 negative_sampler """basic""" +1033 60 evaluator """rankbased""" +1033 61 dataset """kinships""" +1033 61 model """unstructuredmodel""" +1033 61 loss """marginranking""" +1033 61 regularizer """no""" +1033 61 optimizer """adam""" +1033 61 training_loop """owa""" +1033 61 negative_sampler """basic""" +1033 61 evaluator """rankbased""" +1033 62 dataset """kinships""" +1033 62 model """unstructuredmodel""" +1033 62 loss """marginranking""" +1033 62 regularizer """no""" +1033 62 optimizer """adam""" +1033 62 training_loop """owa""" +1033 62 negative_sampler """basic""" +1033 62 evaluator """rankbased""" +1033 63 dataset """kinships""" +1033 63 model """unstructuredmodel""" +1033 63 loss """marginranking""" +1033 63 regularizer """no""" +1033 63 optimizer """adam""" +1033 63 training_loop """owa""" +1033 63 negative_sampler """basic""" +1033 63 evaluator """rankbased""" +1033 64 dataset """kinships""" +1033 64 model """unstructuredmodel""" +1033 64 loss """marginranking""" +1033 64 regularizer """no""" +1033 64 optimizer """adam""" +1033 64 training_loop """owa""" +1033 64 negative_sampler """basic""" +1033 64 evaluator """rankbased""" +1033 65 dataset """kinships""" +1033 65 model """unstructuredmodel""" +1033 65 loss """marginranking""" +1033 65 regularizer """no""" +1033 65 optimizer """adam""" +1033 65 training_loop """owa""" +1033 65 negative_sampler """basic""" +1033 65 evaluator """rankbased""" +1033 66 dataset """kinships""" +1033 66 model """unstructuredmodel""" +1033 66 loss """marginranking""" +1033 66 regularizer """no""" +1033 66 optimizer """adam""" +1033 66 training_loop """owa""" +1033 66 negative_sampler """basic""" +1033 66 evaluator """rankbased""" +1033 67 dataset """kinships""" +1033 67 model """unstructuredmodel""" +1033 67 loss """marginranking""" +1033 67 regularizer """no""" +1033 67 optimizer """adam""" +1033 67 training_loop """owa""" +1033 67 negative_sampler """basic""" +1033 67 evaluator """rankbased""" +1033 68 dataset """kinships""" +1033 68 model """unstructuredmodel""" +1033 68 loss """marginranking""" +1033 68 regularizer """no""" +1033 68 optimizer """adam""" +1033 68 training_loop """owa""" +1033 68 negative_sampler """basic""" +1033 68 evaluator """rankbased""" +1033 69 dataset """kinships""" +1033 69 model """unstructuredmodel""" +1033 69 loss """marginranking""" +1033 69 regularizer """no""" +1033 69 optimizer """adam""" +1033 69 training_loop """owa""" +1033 69 negative_sampler """basic""" +1033 69 evaluator """rankbased""" +1033 70 dataset """kinships""" +1033 70 model """unstructuredmodel""" +1033 70 loss """marginranking""" +1033 70 regularizer """no""" +1033 70 optimizer """adam""" +1033 70 training_loop """owa""" +1033 70 negative_sampler """basic""" +1033 70 evaluator """rankbased""" +1033 71 dataset """kinships""" +1033 71 model """unstructuredmodel""" +1033 71 loss """marginranking""" +1033 71 regularizer """no""" +1033 71 optimizer """adam""" +1033 71 training_loop """owa""" +1033 71 negative_sampler """basic""" +1033 71 evaluator """rankbased""" +1033 72 dataset """kinships""" +1033 72 model """unstructuredmodel""" +1033 72 loss """marginranking""" +1033 72 regularizer """no""" +1033 72 optimizer """adam""" +1033 72 training_loop """owa""" +1033 72 negative_sampler """basic""" +1033 72 evaluator """rankbased""" +1033 73 dataset """kinships""" +1033 73 model """unstructuredmodel""" +1033 73 loss """marginranking""" +1033 73 regularizer """no""" +1033 73 optimizer """adam""" +1033 73 training_loop """owa""" +1033 73 negative_sampler """basic""" +1033 73 evaluator """rankbased""" +1033 74 dataset """kinships""" +1033 74 model """unstructuredmodel""" +1033 74 loss """marginranking""" +1033 74 regularizer """no""" +1033 74 optimizer """adam""" +1033 74 training_loop """owa""" +1033 74 negative_sampler """basic""" +1033 74 evaluator """rankbased""" +1033 75 dataset """kinships""" +1033 75 model """unstructuredmodel""" +1033 75 loss """marginranking""" +1033 75 regularizer """no""" +1033 75 optimizer """adam""" +1033 75 training_loop """owa""" +1033 75 negative_sampler """basic""" +1033 75 evaluator """rankbased""" +1033 76 dataset """kinships""" +1033 76 model """unstructuredmodel""" +1033 76 loss """marginranking""" +1033 76 regularizer """no""" +1033 76 optimizer """adam""" +1033 76 training_loop """owa""" +1033 76 negative_sampler """basic""" +1033 76 evaluator """rankbased""" +1033 77 dataset """kinships""" +1033 77 model """unstructuredmodel""" +1033 77 loss """marginranking""" +1033 77 regularizer """no""" +1033 77 optimizer """adam""" +1033 77 training_loop """owa""" +1033 77 negative_sampler """basic""" +1033 77 evaluator """rankbased""" +1033 78 dataset """kinships""" +1033 78 model """unstructuredmodel""" +1033 78 loss """marginranking""" +1033 78 regularizer """no""" +1033 78 optimizer """adam""" +1033 78 training_loop """owa""" +1033 78 negative_sampler """basic""" +1033 78 evaluator """rankbased""" +1033 79 dataset """kinships""" +1033 79 model """unstructuredmodel""" +1033 79 loss """marginranking""" +1033 79 regularizer """no""" +1033 79 optimizer """adam""" +1033 79 training_loop """owa""" +1033 79 negative_sampler """basic""" +1033 79 evaluator """rankbased""" +1033 80 dataset """kinships""" +1033 80 model """unstructuredmodel""" +1033 80 loss """marginranking""" +1033 80 regularizer """no""" +1033 80 optimizer """adam""" +1033 80 training_loop """owa""" +1033 80 negative_sampler """basic""" +1033 80 evaluator """rankbased""" +1033 81 dataset """kinships""" +1033 81 model """unstructuredmodel""" +1033 81 loss """marginranking""" +1033 81 regularizer """no""" +1033 81 optimizer """adam""" +1033 81 training_loop """owa""" +1033 81 negative_sampler """basic""" +1033 81 evaluator """rankbased""" +1033 82 dataset """kinships""" +1033 82 model """unstructuredmodel""" +1033 82 loss """marginranking""" +1033 82 regularizer """no""" +1033 82 optimizer """adam""" +1033 82 training_loop """owa""" +1033 82 negative_sampler """basic""" +1033 82 evaluator """rankbased""" +1033 83 dataset """kinships""" +1033 83 model """unstructuredmodel""" +1033 83 loss """marginranking""" +1033 83 regularizer """no""" +1033 83 optimizer """adam""" +1033 83 training_loop """owa""" +1033 83 negative_sampler """basic""" +1033 83 evaluator """rankbased""" +1033 84 dataset """kinships""" +1033 84 model """unstructuredmodel""" +1033 84 loss """marginranking""" +1033 84 regularizer """no""" +1033 84 optimizer """adam""" +1033 84 training_loop """owa""" +1033 84 negative_sampler """basic""" +1033 84 evaluator """rankbased""" +1033 85 dataset """kinships""" +1033 85 model """unstructuredmodel""" +1033 85 loss """marginranking""" +1033 85 regularizer """no""" +1033 85 optimizer """adam""" +1033 85 training_loop """owa""" +1033 85 negative_sampler """basic""" +1033 85 evaluator """rankbased""" +1033 86 dataset """kinships""" +1033 86 model """unstructuredmodel""" +1033 86 loss """marginranking""" +1033 86 regularizer """no""" +1033 86 optimizer """adam""" +1033 86 training_loop """owa""" +1033 86 negative_sampler """basic""" +1033 86 evaluator """rankbased""" +1033 87 dataset """kinships""" +1033 87 model """unstructuredmodel""" +1033 87 loss """marginranking""" +1033 87 regularizer """no""" +1033 87 optimizer """adam""" +1033 87 training_loop """owa""" +1033 87 negative_sampler """basic""" +1033 87 evaluator """rankbased""" +1033 88 dataset """kinships""" +1033 88 model """unstructuredmodel""" +1033 88 loss """marginranking""" +1033 88 regularizer """no""" +1033 88 optimizer """adam""" +1033 88 training_loop """owa""" +1033 88 negative_sampler """basic""" +1033 88 evaluator """rankbased""" +1033 89 dataset """kinships""" +1033 89 model """unstructuredmodel""" +1033 89 loss """marginranking""" +1033 89 regularizer """no""" +1033 89 optimizer """adam""" +1033 89 training_loop """owa""" +1033 89 negative_sampler """basic""" +1033 89 evaluator """rankbased""" +1033 90 dataset """kinships""" +1033 90 model """unstructuredmodel""" +1033 90 loss """marginranking""" +1033 90 regularizer """no""" +1033 90 optimizer """adam""" +1033 90 training_loop """owa""" +1033 90 negative_sampler """basic""" +1033 90 evaluator """rankbased""" +1033 91 dataset """kinships""" +1033 91 model """unstructuredmodel""" +1033 91 loss """marginranking""" +1033 91 regularizer """no""" +1033 91 optimizer """adam""" +1033 91 training_loop """owa""" +1033 91 negative_sampler """basic""" +1033 91 evaluator """rankbased""" +1033 92 dataset """kinships""" +1033 92 model """unstructuredmodel""" +1033 92 loss """marginranking""" +1033 92 regularizer """no""" +1033 92 optimizer """adam""" +1033 92 training_loop """owa""" +1033 92 negative_sampler """basic""" +1033 92 evaluator """rankbased""" +1033 93 dataset """kinships""" +1033 93 model """unstructuredmodel""" +1033 93 loss """marginranking""" +1033 93 regularizer """no""" +1033 93 optimizer """adam""" +1033 93 training_loop """owa""" +1033 93 negative_sampler """basic""" +1033 93 evaluator """rankbased""" +1033 94 dataset """kinships""" +1033 94 model """unstructuredmodel""" +1033 94 loss """marginranking""" +1033 94 regularizer """no""" +1033 94 optimizer """adam""" +1033 94 training_loop """owa""" +1033 94 negative_sampler """basic""" +1033 94 evaluator """rankbased""" +1033 95 dataset """kinships""" +1033 95 model """unstructuredmodel""" +1033 95 loss """marginranking""" +1033 95 regularizer """no""" +1033 95 optimizer """adam""" +1033 95 training_loop """owa""" +1033 95 negative_sampler """basic""" +1033 95 evaluator """rankbased""" +1033 96 dataset """kinships""" +1033 96 model """unstructuredmodel""" +1033 96 loss """marginranking""" +1033 96 regularizer """no""" +1033 96 optimizer """adam""" +1033 96 training_loop """owa""" +1033 96 negative_sampler """basic""" +1033 96 evaluator """rankbased""" +1033 97 dataset """kinships""" +1033 97 model """unstructuredmodel""" +1033 97 loss """marginranking""" +1033 97 regularizer """no""" +1033 97 optimizer """adam""" +1033 97 training_loop """owa""" +1033 97 negative_sampler """basic""" +1033 97 evaluator """rankbased""" +1033 98 dataset """kinships""" +1033 98 model """unstructuredmodel""" +1033 98 loss """marginranking""" +1033 98 regularizer """no""" +1033 98 optimizer """adam""" +1033 98 training_loop """owa""" +1033 98 negative_sampler """basic""" +1033 98 evaluator """rankbased""" +1033 99 dataset """kinships""" +1033 99 model """unstructuredmodel""" +1033 99 loss """marginranking""" +1033 99 regularizer """no""" +1033 99 optimizer """adam""" +1033 99 training_loop """owa""" +1033 99 negative_sampler """basic""" +1033 99 evaluator """rankbased""" +1033 100 dataset """kinships""" +1033 100 model """unstructuredmodel""" +1033 100 loss """marginranking""" +1033 100 regularizer """no""" +1033 100 optimizer """adam""" +1033 100 training_loop """owa""" +1033 100 negative_sampler """basic""" +1033 100 evaluator """rankbased""" +1034 1 model.embedding_dim 2.0 +1034 1 model.scoring_fct_norm 2.0 +1034 1 optimizer.lr 0.0012771855680586348 +1034 1 negative_sampler.num_negs_per_pos 85.0 +1034 1 training.batch_size 1.0 +1034 2 model.embedding_dim 0.0 +1034 2 model.scoring_fct_norm 1.0 +1034 2 optimizer.lr 0.014165744870692491 +1034 2 negative_sampler.num_negs_per_pos 55.0 +1034 2 training.batch_size 2.0 +1034 3 model.embedding_dim 1.0 +1034 3 model.scoring_fct_norm 1.0 +1034 3 optimizer.lr 0.010332419455595059 +1034 3 negative_sampler.num_negs_per_pos 66.0 +1034 3 training.batch_size 0.0 +1034 4 model.embedding_dim 1.0 +1034 4 model.scoring_fct_norm 2.0 +1034 4 optimizer.lr 0.052580465613473876 +1034 4 negative_sampler.num_negs_per_pos 43.0 +1034 4 training.batch_size 2.0 +1034 5 model.embedding_dim 0.0 +1034 5 model.scoring_fct_norm 1.0 +1034 5 optimizer.lr 0.08165950038468457 +1034 5 negative_sampler.num_negs_per_pos 38.0 +1034 5 training.batch_size 1.0 +1034 6 model.embedding_dim 2.0 +1034 6 model.scoring_fct_norm 2.0 +1034 6 optimizer.lr 0.017618893420126847 +1034 6 negative_sampler.num_negs_per_pos 66.0 +1034 6 training.batch_size 2.0 +1034 7 model.embedding_dim 2.0 +1034 7 model.scoring_fct_norm 2.0 +1034 7 optimizer.lr 0.011841065445261733 +1034 7 negative_sampler.num_negs_per_pos 89.0 +1034 7 training.batch_size 2.0 +1034 8 model.embedding_dim 0.0 +1034 8 model.scoring_fct_norm 1.0 +1034 8 optimizer.lr 0.0034457369161779417 +1034 8 negative_sampler.num_negs_per_pos 86.0 +1034 8 training.batch_size 1.0 +1034 9 model.embedding_dim 2.0 +1034 9 model.scoring_fct_norm 2.0 +1034 9 optimizer.lr 0.048814930790852536 +1034 9 negative_sampler.num_negs_per_pos 15.0 +1034 9 training.batch_size 2.0 +1034 10 model.embedding_dim 0.0 +1034 10 model.scoring_fct_norm 2.0 +1034 10 optimizer.lr 0.0019211727479835066 +1034 10 negative_sampler.num_negs_per_pos 78.0 +1034 10 training.batch_size 2.0 +1034 11 model.embedding_dim 0.0 +1034 11 model.scoring_fct_norm 1.0 +1034 11 optimizer.lr 0.038389491666501616 +1034 11 negative_sampler.num_negs_per_pos 63.0 +1034 11 training.batch_size 2.0 +1034 12 model.embedding_dim 2.0 +1034 12 model.scoring_fct_norm 1.0 +1034 12 optimizer.lr 0.0015168636493135866 +1034 12 negative_sampler.num_negs_per_pos 16.0 +1034 12 training.batch_size 2.0 +1034 13 model.embedding_dim 1.0 +1034 13 model.scoring_fct_norm 1.0 +1034 13 optimizer.lr 0.02394899455856992 +1034 13 negative_sampler.num_negs_per_pos 73.0 +1034 13 training.batch_size 2.0 +1034 14 model.embedding_dim 1.0 +1034 14 model.scoring_fct_norm 2.0 +1034 14 optimizer.lr 0.05713586704706039 +1034 14 negative_sampler.num_negs_per_pos 0.0 +1034 14 training.batch_size 2.0 +1034 15 model.embedding_dim 1.0 +1034 15 model.scoring_fct_norm 1.0 +1034 15 optimizer.lr 0.002425319696463292 +1034 15 negative_sampler.num_negs_per_pos 58.0 +1034 15 training.batch_size 1.0 +1034 16 model.embedding_dim 2.0 +1034 16 model.scoring_fct_norm 2.0 +1034 16 optimizer.lr 0.00627609620501491 +1034 16 negative_sampler.num_negs_per_pos 23.0 +1034 16 training.batch_size 2.0 +1034 17 model.embedding_dim 0.0 +1034 17 model.scoring_fct_norm 1.0 +1034 17 optimizer.lr 0.0043716577338977994 +1034 17 negative_sampler.num_negs_per_pos 62.0 +1034 17 training.batch_size 2.0 +1034 18 model.embedding_dim 0.0 +1034 18 model.scoring_fct_norm 1.0 +1034 18 optimizer.lr 0.07386240621361723 +1034 18 negative_sampler.num_negs_per_pos 38.0 +1034 18 training.batch_size 0.0 +1034 19 model.embedding_dim 2.0 +1034 19 model.scoring_fct_norm 2.0 +1034 19 optimizer.lr 0.04061038004670263 +1034 19 negative_sampler.num_negs_per_pos 73.0 +1034 19 training.batch_size 2.0 +1034 20 model.embedding_dim 1.0 +1034 20 model.scoring_fct_norm 1.0 +1034 20 optimizer.lr 0.002871374598159227 +1034 20 negative_sampler.num_negs_per_pos 69.0 +1034 20 training.batch_size 0.0 +1034 21 model.embedding_dim 2.0 +1034 21 model.scoring_fct_norm 1.0 +1034 21 optimizer.lr 0.04601147119328239 +1034 21 negative_sampler.num_negs_per_pos 61.0 +1034 21 training.batch_size 2.0 +1034 22 model.embedding_dim 1.0 +1034 22 model.scoring_fct_norm 2.0 +1034 22 optimizer.lr 0.00993694767412437 +1034 22 negative_sampler.num_negs_per_pos 49.0 +1034 22 training.batch_size 2.0 +1034 23 model.embedding_dim 2.0 +1034 23 model.scoring_fct_norm 1.0 +1034 23 optimizer.lr 0.002008383138655724 +1034 23 negative_sampler.num_negs_per_pos 87.0 +1034 23 training.batch_size 0.0 +1034 24 model.embedding_dim 1.0 +1034 24 model.scoring_fct_norm 1.0 +1034 24 optimizer.lr 0.00884726022886829 +1034 24 negative_sampler.num_negs_per_pos 80.0 +1034 24 training.batch_size 0.0 +1034 25 model.embedding_dim 0.0 +1034 25 model.scoring_fct_norm 1.0 +1034 25 optimizer.lr 0.03561766869078596 +1034 25 negative_sampler.num_negs_per_pos 13.0 +1034 25 training.batch_size 2.0 +1034 26 model.embedding_dim 0.0 +1034 26 model.scoring_fct_norm 1.0 +1034 26 optimizer.lr 0.08868502799067655 +1034 26 negative_sampler.num_negs_per_pos 96.0 +1034 26 training.batch_size 0.0 +1034 27 model.embedding_dim 1.0 +1034 27 model.scoring_fct_norm 2.0 +1034 27 optimizer.lr 0.005834383548804757 +1034 27 negative_sampler.num_negs_per_pos 23.0 +1034 27 training.batch_size 0.0 +1034 28 model.embedding_dim 0.0 +1034 28 model.scoring_fct_norm 2.0 +1034 28 optimizer.lr 0.0053342819606739 +1034 28 negative_sampler.num_negs_per_pos 68.0 +1034 28 training.batch_size 2.0 +1034 29 model.embedding_dim 1.0 +1034 29 model.scoring_fct_norm 1.0 +1034 29 optimizer.lr 0.0020938856973343033 +1034 29 negative_sampler.num_negs_per_pos 20.0 +1034 29 training.batch_size 1.0 +1034 30 model.embedding_dim 2.0 +1034 30 model.scoring_fct_norm 1.0 +1034 30 optimizer.lr 0.009797970072970346 +1034 30 negative_sampler.num_negs_per_pos 50.0 +1034 30 training.batch_size 1.0 +1034 31 model.embedding_dim 0.0 +1034 31 model.scoring_fct_norm 1.0 +1034 31 optimizer.lr 0.0028925072387057335 +1034 31 negative_sampler.num_negs_per_pos 71.0 +1034 31 training.batch_size 2.0 +1034 32 model.embedding_dim 0.0 +1034 32 model.scoring_fct_norm 1.0 +1034 32 optimizer.lr 0.007657516219653069 +1034 32 negative_sampler.num_negs_per_pos 39.0 +1034 32 training.batch_size 0.0 +1034 33 model.embedding_dim 2.0 +1034 33 model.scoring_fct_norm 2.0 +1034 33 optimizer.lr 0.003379119880141514 +1034 33 negative_sampler.num_negs_per_pos 5.0 +1034 33 training.batch_size 1.0 +1034 34 model.embedding_dim 2.0 +1034 34 model.scoring_fct_norm 1.0 +1034 34 optimizer.lr 0.0067514659970074106 +1034 34 negative_sampler.num_negs_per_pos 45.0 +1034 34 training.batch_size 1.0 +1034 35 model.embedding_dim 0.0 +1034 35 model.scoring_fct_norm 2.0 +1034 35 optimizer.lr 0.01729941382510998 +1034 35 negative_sampler.num_negs_per_pos 56.0 +1034 35 training.batch_size 2.0 +1034 36 model.embedding_dim 2.0 +1034 36 model.scoring_fct_norm 2.0 +1034 36 optimizer.lr 0.009870662600162623 +1034 36 negative_sampler.num_negs_per_pos 24.0 +1034 36 training.batch_size 0.0 +1034 37 model.embedding_dim 1.0 +1034 37 model.scoring_fct_norm 1.0 +1034 37 optimizer.lr 0.0023609084936496456 +1034 37 negative_sampler.num_negs_per_pos 68.0 +1034 37 training.batch_size 2.0 +1034 38 model.embedding_dim 2.0 +1034 38 model.scoring_fct_norm 2.0 +1034 38 optimizer.lr 0.00706697585033396 +1034 38 negative_sampler.num_negs_per_pos 47.0 +1034 38 training.batch_size 0.0 +1034 39 model.embedding_dim 1.0 +1034 39 model.scoring_fct_norm 1.0 +1034 39 optimizer.lr 0.0946915811820786 +1034 39 negative_sampler.num_negs_per_pos 58.0 +1034 39 training.batch_size 1.0 +1034 40 model.embedding_dim 0.0 +1034 40 model.scoring_fct_norm 2.0 +1034 40 optimizer.lr 0.0017453290028149055 +1034 40 negative_sampler.num_negs_per_pos 34.0 +1034 40 training.batch_size 2.0 +1034 41 model.embedding_dim 2.0 +1034 41 model.scoring_fct_norm 1.0 +1034 41 optimizer.lr 0.021989104491234153 +1034 41 negative_sampler.num_negs_per_pos 22.0 +1034 41 training.batch_size 0.0 +1034 42 model.embedding_dim 1.0 +1034 42 model.scoring_fct_norm 2.0 +1034 42 optimizer.lr 0.03269764242363148 +1034 42 negative_sampler.num_negs_per_pos 66.0 +1034 42 training.batch_size 1.0 +1034 43 model.embedding_dim 0.0 +1034 43 model.scoring_fct_norm 2.0 +1034 43 optimizer.lr 0.08213444983687658 +1034 43 negative_sampler.num_negs_per_pos 29.0 +1034 43 training.batch_size 0.0 +1034 44 model.embedding_dim 2.0 +1034 44 model.scoring_fct_norm 1.0 +1034 44 optimizer.lr 0.010191949107530117 +1034 44 negative_sampler.num_negs_per_pos 33.0 +1034 44 training.batch_size 0.0 +1034 45 model.embedding_dim 0.0 +1034 45 model.scoring_fct_norm 2.0 +1034 45 optimizer.lr 0.001780473475279862 +1034 45 negative_sampler.num_negs_per_pos 6.0 +1034 45 training.batch_size 0.0 +1034 46 model.embedding_dim 0.0 +1034 46 model.scoring_fct_norm 1.0 +1034 46 optimizer.lr 0.003598226348003066 +1034 46 negative_sampler.num_negs_per_pos 19.0 +1034 46 training.batch_size 2.0 +1034 47 model.embedding_dim 1.0 +1034 47 model.scoring_fct_norm 2.0 +1034 47 optimizer.lr 0.06165891875771759 +1034 47 negative_sampler.num_negs_per_pos 48.0 +1034 47 training.batch_size 1.0 +1034 48 model.embedding_dim 1.0 +1034 48 model.scoring_fct_norm 1.0 +1034 48 optimizer.lr 0.007018676011780632 +1034 48 negative_sampler.num_negs_per_pos 25.0 +1034 48 training.batch_size 1.0 +1034 49 model.embedding_dim 0.0 +1034 49 model.scoring_fct_norm 2.0 +1034 49 optimizer.lr 0.018635622592269422 +1034 49 negative_sampler.num_negs_per_pos 51.0 +1034 49 training.batch_size 2.0 +1034 50 model.embedding_dim 2.0 +1034 50 model.scoring_fct_norm 2.0 +1034 50 optimizer.lr 0.005255446430159192 +1034 50 negative_sampler.num_negs_per_pos 62.0 +1034 50 training.batch_size 0.0 +1034 51 model.embedding_dim 2.0 +1034 51 model.scoring_fct_norm 2.0 +1034 51 optimizer.lr 0.004485688382904143 +1034 51 negative_sampler.num_negs_per_pos 54.0 +1034 51 training.batch_size 2.0 +1034 52 model.embedding_dim 2.0 +1034 52 model.scoring_fct_norm 2.0 +1034 52 optimizer.lr 0.005172075329179978 +1034 52 negative_sampler.num_negs_per_pos 7.0 +1034 52 training.batch_size 2.0 +1034 53 model.embedding_dim 0.0 +1034 53 model.scoring_fct_norm 2.0 +1034 53 optimizer.lr 0.001723797945132952 +1034 53 negative_sampler.num_negs_per_pos 57.0 +1034 53 training.batch_size 2.0 +1034 54 model.embedding_dim 0.0 +1034 54 model.scoring_fct_norm 2.0 +1034 54 optimizer.lr 0.03745223979794026 +1034 54 negative_sampler.num_negs_per_pos 36.0 +1034 54 training.batch_size 0.0 +1034 55 model.embedding_dim 1.0 +1034 55 model.scoring_fct_norm 1.0 +1034 55 optimizer.lr 0.0022534044567993315 +1034 55 negative_sampler.num_negs_per_pos 10.0 +1034 55 training.batch_size 1.0 +1034 56 model.embedding_dim 2.0 +1034 56 model.scoring_fct_norm 1.0 +1034 56 optimizer.lr 0.0012822575688421559 +1034 56 negative_sampler.num_negs_per_pos 50.0 +1034 56 training.batch_size 1.0 +1034 57 model.embedding_dim 1.0 +1034 57 model.scoring_fct_norm 2.0 +1034 57 optimizer.lr 0.002390732752947499 +1034 57 negative_sampler.num_negs_per_pos 55.0 +1034 57 training.batch_size 0.0 +1034 58 model.embedding_dim 2.0 +1034 58 model.scoring_fct_norm 2.0 +1034 58 optimizer.lr 0.01704290816801749 +1034 58 negative_sampler.num_negs_per_pos 15.0 +1034 58 training.batch_size 0.0 +1034 59 model.embedding_dim 0.0 +1034 59 model.scoring_fct_norm 2.0 +1034 59 optimizer.lr 0.08869316351883305 +1034 59 negative_sampler.num_negs_per_pos 67.0 +1034 59 training.batch_size 1.0 +1034 60 model.embedding_dim 0.0 +1034 60 model.scoring_fct_norm 1.0 +1034 60 optimizer.lr 0.01134528399117839 +1034 60 negative_sampler.num_negs_per_pos 13.0 +1034 60 training.batch_size 0.0 +1034 61 model.embedding_dim 1.0 +1034 61 model.scoring_fct_norm 1.0 +1034 61 optimizer.lr 0.049678438945282954 +1034 61 negative_sampler.num_negs_per_pos 34.0 +1034 61 training.batch_size 2.0 +1034 62 model.embedding_dim 0.0 +1034 62 model.scoring_fct_norm 2.0 +1034 62 optimizer.lr 0.00486295347885951 +1034 62 negative_sampler.num_negs_per_pos 25.0 +1034 62 training.batch_size 2.0 +1034 63 model.embedding_dim 1.0 +1034 63 model.scoring_fct_norm 2.0 +1034 63 optimizer.lr 0.02850006375107847 +1034 63 negative_sampler.num_negs_per_pos 90.0 +1034 63 training.batch_size 0.0 +1034 64 model.embedding_dim 1.0 +1034 64 model.scoring_fct_norm 2.0 +1034 64 optimizer.lr 0.0019825422752033687 +1034 64 negative_sampler.num_negs_per_pos 81.0 +1034 64 training.batch_size 0.0 +1034 65 model.embedding_dim 2.0 +1034 65 model.scoring_fct_norm 1.0 +1034 65 optimizer.lr 0.005898233499856266 +1034 65 negative_sampler.num_negs_per_pos 34.0 +1034 65 training.batch_size 2.0 +1034 66 model.embedding_dim 0.0 +1034 66 model.scoring_fct_norm 1.0 +1034 66 optimizer.lr 0.003570274398204941 +1034 66 negative_sampler.num_negs_per_pos 11.0 +1034 66 training.batch_size 0.0 +1034 67 model.embedding_dim 2.0 +1034 67 model.scoring_fct_norm 1.0 +1034 67 optimizer.lr 0.0017427181821258334 +1034 67 negative_sampler.num_negs_per_pos 37.0 +1034 67 training.batch_size 2.0 +1034 68 model.embedding_dim 2.0 +1034 68 model.scoring_fct_norm 1.0 +1034 68 optimizer.lr 0.06972658257093382 +1034 68 negative_sampler.num_negs_per_pos 67.0 +1034 68 training.batch_size 2.0 +1034 69 model.embedding_dim 0.0 +1034 69 model.scoring_fct_norm 1.0 +1034 69 optimizer.lr 0.0180575660662761 +1034 69 negative_sampler.num_negs_per_pos 60.0 +1034 69 training.batch_size 1.0 +1034 70 model.embedding_dim 0.0 +1034 70 model.scoring_fct_norm 1.0 +1034 70 optimizer.lr 0.020701823019730472 +1034 70 negative_sampler.num_negs_per_pos 35.0 +1034 70 training.batch_size 1.0 +1034 71 model.embedding_dim 0.0 +1034 71 model.scoring_fct_norm 1.0 +1034 71 optimizer.lr 0.016729158892079436 +1034 71 negative_sampler.num_negs_per_pos 11.0 +1034 71 training.batch_size 1.0 +1034 72 model.embedding_dim 2.0 +1034 72 model.scoring_fct_norm 2.0 +1034 72 optimizer.lr 0.001183235930507799 +1034 72 negative_sampler.num_negs_per_pos 17.0 +1034 72 training.batch_size 0.0 +1034 73 model.embedding_dim 0.0 +1034 73 model.scoring_fct_norm 1.0 +1034 73 optimizer.lr 0.0077125874344410985 +1034 73 negative_sampler.num_negs_per_pos 80.0 +1034 73 training.batch_size 0.0 +1034 74 model.embedding_dim 2.0 +1034 74 model.scoring_fct_norm 2.0 +1034 74 optimizer.lr 0.004164624326611894 +1034 74 negative_sampler.num_negs_per_pos 86.0 +1034 74 training.batch_size 0.0 +1034 75 model.embedding_dim 2.0 +1034 75 model.scoring_fct_norm 1.0 +1034 75 optimizer.lr 0.03753443856056754 +1034 75 negative_sampler.num_negs_per_pos 28.0 +1034 75 training.batch_size 2.0 +1034 76 model.embedding_dim 2.0 +1034 76 model.scoring_fct_norm 2.0 +1034 76 optimizer.lr 0.0025611016335257966 +1034 76 negative_sampler.num_negs_per_pos 53.0 +1034 76 training.batch_size 2.0 +1034 77 model.embedding_dim 1.0 +1034 77 model.scoring_fct_norm 2.0 +1034 77 optimizer.lr 0.030155238369342325 +1034 77 negative_sampler.num_negs_per_pos 98.0 +1034 77 training.batch_size 2.0 +1034 78 model.embedding_dim 1.0 +1034 78 model.scoring_fct_norm 2.0 +1034 78 optimizer.lr 0.05215649985567293 +1034 78 negative_sampler.num_negs_per_pos 97.0 +1034 78 training.batch_size 2.0 +1034 79 model.embedding_dim 1.0 +1034 79 model.scoring_fct_norm 2.0 +1034 79 optimizer.lr 0.023066032270485502 +1034 79 negative_sampler.num_negs_per_pos 93.0 +1034 79 training.batch_size 2.0 +1034 80 model.embedding_dim 2.0 +1034 80 model.scoring_fct_norm 1.0 +1034 80 optimizer.lr 0.0016185234698947708 +1034 80 negative_sampler.num_negs_per_pos 48.0 +1034 80 training.batch_size 2.0 +1034 81 model.embedding_dim 2.0 +1034 81 model.scoring_fct_norm 2.0 +1034 81 optimizer.lr 0.008025571515649747 +1034 81 negative_sampler.num_negs_per_pos 90.0 +1034 81 training.batch_size 1.0 +1034 82 model.embedding_dim 0.0 +1034 82 model.scoring_fct_norm 2.0 +1034 82 optimizer.lr 0.015041565094124312 +1034 82 negative_sampler.num_negs_per_pos 67.0 +1034 82 training.batch_size 2.0 +1034 83 model.embedding_dim 2.0 +1034 83 model.scoring_fct_norm 1.0 +1034 83 optimizer.lr 0.0020294793439232417 +1034 83 negative_sampler.num_negs_per_pos 33.0 +1034 83 training.batch_size 1.0 +1034 84 model.embedding_dim 2.0 +1034 84 model.scoring_fct_norm 2.0 +1034 84 optimizer.lr 0.011292818135320163 +1034 84 negative_sampler.num_negs_per_pos 41.0 +1034 84 training.batch_size 0.0 +1034 85 model.embedding_dim 2.0 +1034 85 model.scoring_fct_norm 1.0 +1034 85 optimizer.lr 0.0035736047301560252 +1034 85 negative_sampler.num_negs_per_pos 49.0 +1034 85 training.batch_size 1.0 +1034 86 model.embedding_dim 2.0 +1034 86 model.scoring_fct_norm 1.0 +1034 86 optimizer.lr 0.01122509651291494 +1034 86 negative_sampler.num_negs_per_pos 52.0 +1034 86 training.batch_size 1.0 +1034 87 model.embedding_dim 2.0 +1034 87 model.scoring_fct_norm 2.0 +1034 87 optimizer.lr 0.01142261753516333 +1034 87 negative_sampler.num_negs_per_pos 32.0 +1034 87 training.batch_size 1.0 +1034 88 model.embedding_dim 1.0 +1034 88 model.scoring_fct_norm 2.0 +1034 88 optimizer.lr 0.0022015975386590865 +1034 88 negative_sampler.num_negs_per_pos 98.0 +1034 88 training.batch_size 1.0 +1034 89 model.embedding_dim 0.0 +1034 89 model.scoring_fct_norm 2.0 +1034 89 optimizer.lr 0.0023026467546179786 +1034 89 negative_sampler.num_negs_per_pos 21.0 +1034 89 training.batch_size 2.0 +1034 90 model.embedding_dim 2.0 +1034 90 model.scoring_fct_norm 1.0 +1034 90 optimizer.lr 0.00580899086981333 +1034 90 negative_sampler.num_negs_per_pos 99.0 +1034 90 training.batch_size 2.0 +1034 91 model.embedding_dim 0.0 +1034 91 model.scoring_fct_norm 1.0 +1034 91 optimizer.lr 0.0017373071860636562 +1034 91 negative_sampler.num_negs_per_pos 66.0 +1034 91 training.batch_size 1.0 +1034 92 model.embedding_dim 1.0 +1034 92 model.scoring_fct_norm 2.0 +1034 92 optimizer.lr 0.0015892241261881775 +1034 92 negative_sampler.num_negs_per_pos 46.0 +1034 92 training.batch_size 2.0 +1034 93 model.embedding_dim 0.0 +1034 93 model.scoring_fct_norm 1.0 +1034 93 optimizer.lr 0.01448340606982053 +1034 93 negative_sampler.num_negs_per_pos 95.0 +1034 93 training.batch_size 1.0 +1034 94 model.embedding_dim 1.0 +1034 94 model.scoring_fct_norm 2.0 +1034 94 optimizer.lr 0.0012067790259534352 +1034 94 negative_sampler.num_negs_per_pos 14.0 +1034 94 training.batch_size 2.0 +1034 95 model.embedding_dim 2.0 +1034 95 model.scoring_fct_norm 2.0 +1034 95 optimizer.lr 0.020250897288443034 +1034 95 negative_sampler.num_negs_per_pos 24.0 +1034 95 training.batch_size 1.0 +1034 96 model.embedding_dim 0.0 +1034 96 model.scoring_fct_norm 2.0 +1034 96 optimizer.lr 0.038045507845408756 +1034 96 negative_sampler.num_negs_per_pos 57.0 +1034 96 training.batch_size 1.0 +1034 97 model.embedding_dim 0.0 +1034 97 model.scoring_fct_norm 2.0 +1034 97 optimizer.lr 0.02725181497107339 +1034 97 negative_sampler.num_negs_per_pos 95.0 +1034 97 training.batch_size 2.0 +1034 98 model.embedding_dim 1.0 +1034 98 model.scoring_fct_norm 2.0 +1034 98 optimizer.lr 0.004511986154820929 +1034 98 negative_sampler.num_negs_per_pos 63.0 +1034 98 training.batch_size 0.0 +1034 99 model.embedding_dim 2.0 +1034 99 model.scoring_fct_norm 2.0 +1034 99 optimizer.lr 0.014732640965824275 +1034 99 negative_sampler.num_negs_per_pos 87.0 +1034 99 training.batch_size 1.0 +1034 100 model.embedding_dim 0.0 +1034 100 model.scoring_fct_norm 2.0 +1034 100 optimizer.lr 0.08409241087717632 +1034 100 negative_sampler.num_negs_per_pos 32.0 +1034 100 training.batch_size 2.0 +1034 1 dataset """wn18rr""" +1034 1 model """unstructuredmodel""" +1034 1 loss """bceaftersigmoid""" +1034 1 regularizer """no""" +1034 1 optimizer """adam""" +1034 1 training_loop """owa""" +1034 1 negative_sampler """basic""" +1034 1 evaluator """rankbased""" +1034 2 dataset """wn18rr""" +1034 2 model """unstructuredmodel""" +1034 2 loss """bceaftersigmoid""" +1034 2 regularizer """no""" +1034 2 optimizer """adam""" +1034 2 training_loop """owa""" +1034 2 negative_sampler """basic""" +1034 2 evaluator """rankbased""" +1034 3 dataset """wn18rr""" +1034 3 model """unstructuredmodel""" +1034 3 loss """bceaftersigmoid""" +1034 3 regularizer """no""" +1034 3 optimizer """adam""" +1034 3 training_loop """owa""" +1034 3 negative_sampler """basic""" +1034 3 evaluator """rankbased""" +1034 4 dataset """wn18rr""" +1034 4 model """unstructuredmodel""" +1034 4 loss """bceaftersigmoid""" +1034 4 regularizer """no""" +1034 4 optimizer """adam""" +1034 4 training_loop """owa""" +1034 4 negative_sampler """basic""" +1034 4 evaluator """rankbased""" +1034 5 dataset """wn18rr""" +1034 5 model """unstructuredmodel""" +1034 5 loss """bceaftersigmoid""" +1034 5 regularizer """no""" +1034 5 optimizer """adam""" +1034 5 training_loop """owa""" +1034 5 negative_sampler """basic""" +1034 5 evaluator """rankbased""" +1034 6 dataset """wn18rr""" +1034 6 model """unstructuredmodel""" +1034 6 loss """bceaftersigmoid""" +1034 6 regularizer """no""" +1034 6 optimizer """adam""" +1034 6 training_loop """owa""" +1034 6 negative_sampler """basic""" +1034 6 evaluator """rankbased""" +1034 7 dataset """wn18rr""" +1034 7 model """unstructuredmodel""" +1034 7 loss """bceaftersigmoid""" +1034 7 regularizer """no""" +1034 7 optimizer """adam""" +1034 7 training_loop """owa""" +1034 7 negative_sampler """basic""" +1034 7 evaluator """rankbased""" +1034 8 dataset """wn18rr""" +1034 8 model """unstructuredmodel""" +1034 8 loss """bceaftersigmoid""" +1034 8 regularizer """no""" +1034 8 optimizer """adam""" +1034 8 training_loop """owa""" +1034 8 negative_sampler """basic""" +1034 8 evaluator """rankbased""" +1034 9 dataset """wn18rr""" +1034 9 model """unstructuredmodel""" +1034 9 loss """bceaftersigmoid""" +1034 9 regularizer """no""" +1034 9 optimizer """adam""" +1034 9 training_loop """owa""" +1034 9 negative_sampler """basic""" +1034 9 evaluator """rankbased""" +1034 10 dataset """wn18rr""" +1034 10 model """unstructuredmodel""" +1034 10 loss """bceaftersigmoid""" +1034 10 regularizer """no""" +1034 10 optimizer """adam""" +1034 10 training_loop """owa""" +1034 10 negative_sampler """basic""" +1034 10 evaluator """rankbased""" +1034 11 dataset """wn18rr""" +1034 11 model """unstructuredmodel""" +1034 11 loss """bceaftersigmoid""" +1034 11 regularizer """no""" +1034 11 optimizer """adam""" +1034 11 training_loop """owa""" +1034 11 negative_sampler """basic""" +1034 11 evaluator """rankbased""" +1034 12 dataset """wn18rr""" +1034 12 model """unstructuredmodel""" +1034 12 loss """bceaftersigmoid""" +1034 12 regularizer """no""" +1034 12 optimizer """adam""" +1034 12 training_loop """owa""" +1034 12 negative_sampler """basic""" +1034 12 evaluator """rankbased""" +1034 13 dataset """wn18rr""" +1034 13 model """unstructuredmodel""" +1034 13 loss """bceaftersigmoid""" +1034 13 regularizer """no""" +1034 13 optimizer """adam""" +1034 13 training_loop """owa""" +1034 13 negative_sampler """basic""" +1034 13 evaluator """rankbased""" +1034 14 dataset """wn18rr""" +1034 14 model """unstructuredmodel""" +1034 14 loss """bceaftersigmoid""" +1034 14 regularizer """no""" +1034 14 optimizer """adam""" +1034 14 training_loop """owa""" +1034 14 negative_sampler """basic""" +1034 14 evaluator """rankbased""" +1034 15 dataset """wn18rr""" +1034 15 model """unstructuredmodel""" +1034 15 loss """bceaftersigmoid""" +1034 15 regularizer """no""" +1034 15 optimizer """adam""" +1034 15 training_loop """owa""" +1034 15 negative_sampler """basic""" +1034 15 evaluator """rankbased""" +1034 16 dataset """wn18rr""" +1034 16 model """unstructuredmodel""" +1034 16 loss """bceaftersigmoid""" +1034 16 regularizer """no""" +1034 16 optimizer """adam""" +1034 16 training_loop """owa""" +1034 16 negative_sampler """basic""" +1034 16 evaluator """rankbased""" +1034 17 dataset """wn18rr""" +1034 17 model """unstructuredmodel""" +1034 17 loss """bceaftersigmoid""" +1034 17 regularizer """no""" +1034 17 optimizer """adam""" +1034 17 training_loop """owa""" +1034 17 negative_sampler """basic""" +1034 17 evaluator """rankbased""" +1034 18 dataset """wn18rr""" +1034 18 model """unstructuredmodel""" +1034 18 loss """bceaftersigmoid""" +1034 18 regularizer """no""" +1034 18 optimizer """adam""" +1034 18 training_loop """owa""" +1034 18 negative_sampler """basic""" +1034 18 evaluator """rankbased""" +1034 19 dataset """wn18rr""" +1034 19 model """unstructuredmodel""" +1034 19 loss """bceaftersigmoid""" +1034 19 regularizer """no""" +1034 19 optimizer """adam""" +1034 19 training_loop """owa""" +1034 19 negative_sampler """basic""" +1034 19 evaluator """rankbased""" +1034 20 dataset """wn18rr""" +1034 20 model """unstructuredmodel""" +1034 20 loss """bceaftersigmoid""" +1034 20 regularizer """no""" +1034 20 optimizer """adam""" +1034 20 training_loop """owa""" +1034 20 negative_sampler """basic""" +1034 20 evaluator """rankbased""" +1034 21 dataset """wn18rr""" +1034 21 model """unstructuredmodel""" +1034 21 loss """bceaftersigmoid""" +1034 21 regularizer """no""" +1034 21 optimizer """adam""" +1034 21 training_loop """owa""" +1034 21 negative_sampler """basic""" +1034 21 evaluator """rankbased""" +1034 22 dataset """wn18rr""" +1034 22 model """unstructuredmodel""" +1034 22 loss """bceaftersigmoid""" +1034 22 regularizer """no""" +1034 22 optimizer """adam""" +1034 22 training_loop """owa""" +1034 22 negative_sampler """basic""" +1034 22 evaluator """rankbased""" +1034 23 dataset """wn18rr""" +1034 23 model """unstructuredmodel""" +1034 23 loss """bceaftersigmoid""" +1034 23 regularizer """no""" +1034 23 optimizer """adam""" +1034 23 training_loop """owa""" +1034 23 negative_sampler """basic""" +1034 23 evaluator """rankbased""" +1034 24 dataset """wn18rr""" +1034 24 model """unstructuredmodel""" +1034 24 loss """bceaftersigmoid""" +1034 24 regularizer """no""" +1034 24 optimizer """adam""" +1034 24 training_loop """owa""" +1034 24 negative_sampler """basic""" +1034 24 evaluator """rankbased""" +1034 25 dataset """wn18rr""" +1034 25 model """unstructuredmodel""" +1034 25 loss """bceaftersigmoid""" +1034 25 regularizer """no""" +1034 25 optimizer """adam""" +1034 25 training_loop """owa""" +1034 25 negative_sampler """basic""" +1034 25 evaluator """rankbased""" +1034 26 dataset """wn18rr""" +1034 26 model """unstructuredmodel""" +1034 26 loss """bceaftersigmoid""" +1034 26 regularizer """no""" +1034 26 optimizer """adam""" +1034 26 training_loop """owa""" +1034 26 negative_sampler """basic""" +1034 26 evaluator """rankbased""" +1034 27 dataset """wn18rr""" +1034 27 model """unstructuredmodel""" +1034 27 loss """bceaftersigmoid""" +1034 27 regularizer """no""" +1034 27 optimizer """adam""" +1034 27 training_loop """owa""" +1034 27 negative_sampler """basic""" +1034 27 evaluator """rankbased""" +1034 28 dataset """wn18rr""" +1034 28 model """unstructuredmodel""" +1034 28 loss """bceaftersigmoid""" +1034 28 regularizer """no""" +1034 28 optimizer """adam""" +1034 28 training_loop """owa""" +1034 28 negative_sampler """basic""" +1034 28 evaluator """rankbased""" +1034 29 dataset """wn18rr""" +1034 29 model """unstructuredmodel""" +1034 29 loss """bceaftersigmoid""" +1034 29 regularizer """no""" +1034 29 optimizer """adam""" +1034 29 training_loop """owa""" +1034 29 negative_sampler """basic""" +1034 29 evaluator """rankbased""" +1034 30 dataset """wn18rr""" +1034 30 model """unstructuredmodel""" +1034 30 loss """bceaftersigmoid""" +1034 30 regularizer """no""" +1034 30 optimizer """adam""" +1034 30 training_loop """owa""" +1034 30 negative_sampler """basic""" +1034 30 evaluator """rankbased""" +1034 31 dataset """wn18rr""" +1034 31 model """unstructuredmodel""" +1034 31 loss """bceaftersigmoid""" +1034 31 regularizer """no""" +1034 31 optimizer """adam""" +1034 31 training_loop """owa""" +1034 31 negative_sampler """basic""" +1034 31 evaluator """rankbased""" +1034 32 dataset """wn18rr""" +1034 32 model """unstructuredmodel""" +1034 32 loss """bceaftersigmoid""" +1034 32 regularizer """no""" +1034 32 optimizer """adam""" +1034 32 training_loop """owa""" +1034 32 negative_sampler """basic""" +1034 32 evaluator """rankbased""" +1034 33 dataset """wn18rr""" +1034 33 model """unstructuredmodel""" +1034 33 loss """bceaftersigmoid""" +1034 33 regularizer """no""" +1034 33 optimizer """adam""" +1034 33 training_loop """owa""" +1034 33 negative_sampler """basic""" +1034 33 evaluator """rankbased""" +1034 34 dataset """wn18rr""" +1034 34 model """unstructuredmodel""" +1034 34 loss """bceaftersigmoid""" +1034 34 regularizer """no""" +1034 34 optimizer """adam""" +1034 34 training_loop """owa""" +1034 34 negative_sampler """basic""" +1034 34 evaluator """rankbased""" +1034 35 dataset """wn18rr""" +1034 35 model """unstructuredmodel""" +1034 35 loss """bceaftersigmoid""" +1034 35 regularizer """no""" +1034 35 optimizer """adam""" +1034 35 training_loop """owa""" +1034 35 negative_sampler """basic""" +1034 35 evaluator """rankbased""" +1034 36 dataset """wn18rr""" +1034 36 model """unstructuredmodel""" +1034 36 loss """bceaftersigmoid""" +1034 36 regularizer """no""" +1034 36 optimizer """adam""" +1034 36 training_loop """owa""" +1034 36 negative_sampler """basic""" +1034 36 evaluator """rankbased""" +1034 37 dataset """wn18rr""" +1034 37 model """unstructuredmodel""" +1034 37 loss """bceaftersigmoid""" +1034 37 regularizer """no""" +1034 37 optimizer """adam""" +1034 37 training_loop """owa""" +1034 37 negative_sampler """basic""" +1034 37 evaluator """rankbased""" +1034 38 dataset """wn18rr""" +1034 38 model """unstructuredmodel""" +1034 38 loss """bceaftersigmoid""" +1034 38 regularizer """no""" +1034 38 optimizer """adam""" +1034 38 training_loop """owa""" +1034 38 negative_sampler """basic""" +1034 38 evaluator """rankbased""" +1034 39 dataset """wn18rr""" +1034 39 model """unstructuredmodel""" +1034 39 loss """bceaftersigmoid""" +1034 39 regularizer """no""" +1034 39 optimizer """adam""" +1034 39 training_loop """owa""" +1034 39 negative_sampler """basic""" +1034 39 evaluator """rankbased""" +1034 40 dataset """wn18rr""" +1034 40 model """unstructuredmodel""" +1034 40 loss """bceaftersigmoid""" +1034 40 regularizer """no""" +1034 40 optimizer """adam""" +1034 40 training_loop """owa""" +1034 40 negative_sampler """basic""" +1034 40 evaluator """rankbased""" +1034 41 dataset """wn18rr""" +1034 41 model """unstructuredmodel""" +1034 41 loss """bceaftersigmoid""" +1034 41 regularizer """no""" +1034 41 optimizer """adam""" +1034 41 training_loop """owa""" +1034 41 negative_sampler """basic""" +1034 41 evaluator """rankbased""" +1034 42 dataset """wn18rr""" +1034 42 model """unstructuredmodel""" +1034 42 loss """bceaftersigmoid""" +1034 42 regularizer """no""" +1034 42 optimizer """adam""" +1034 42 training_loop """owa""" +1034 42 negative_sampler """basic""" +1034 42 evaluator """rankbased""" +1034 43 dataset """wn18rr""" +1034 43 model """unstructuredmodel""" +1034 43 loss """bceaftersigmoid""" +1034 43 regularizer """no""" +1034 43 optimizer """adam""" +1034 43 training_loop """owa""" +1034 43 negative_sampler """basic""" +1034 43 evaluator """rankbased""" +1034 44 dataset """wn18rr""" +1034 44 model """unstructuredmodel""" +1034 44 loss """bceaftersigmoid""" +1034 44 regularizer """no""" +1034 44 optimizer """adam""" +1034 44 training_loop """owa""" +1034 44 negative_sampler """basic""" +1034 44 evaluator """rankbased""" +1034 45 dataset """wn18rr""" +1034 45 model """unstructuredmodel""" +1034 45 loss """bceaftersigmoid""" +1034 45 regularizer """no""" +1034 45 optimizer """adam""" +1034 45 training_loop """owa""" +1034 45 negative_sampler """basic""" +1034 45 evaluator """rankbased""" +1034 46 dataset """wn18rr""" +1034 46 model """unstructuredmodel""" +1034 46 loss """bceaftersigmoid""" +1034 46 regularizer """no""" +1034 46 optimizer """adam""" +1034 46 training_loop """owa""" +1034 46 negative_sampler """basic""" +1034 46 evaluator """rankbased""" +1034 47 dataset """wn18rr""" +1034 47 model """unstructuredmodel""" +1034 47 loss """bceaftersigmoid""" +1034 47 regularizer """no""" +1034 47 optimizer """adam""" +1034 47 training_loop """owa""" +1034 47 negative_sampler """basic""" +1034 47 evaluator """rankbased""" +1034 48 dataset """wn18rr""" +1034 48 model """unstructuredmodel""" +1034 48 loss """bceaftersigmoid""" +1034 48 regularizer """no""" +1034 48 optimizer """adam""" +1034 48 training_loop """owa""" +1034 48 negative_sampler """basic""" +1034 48 evaluator """rankbased""" +1034 49 dataset """wn18rr""" +1034 49 model """unstructuredmodel""" +1034 49 loss """bceaftersigmoid""" +1034 49 regularizer """no""" +1034 49 optimizer """adam""" +1034 49 training_loop """owa""" +1034 49 negative_sampler """basic""" +1034 49 evaluator """rankbased""" +1034 50 dataset """wn18rr""" +1034 50 model """unstructuredmodel""" +1034 50 loss """bceaftersigmoid""" +1034 50 regularizer """no""" +1034 50 optimizer """adam""" +1034 50 training_loop """owa""" +1034 50 negative_sampler """basic""" +1034 50 evaluator """rankbased""" +1034 51 dataset """wn18rr""" +1034 51 model """unstructuredmodel""" +1034 51 loss """bceaftersigmoid""" +1034 51 regularizer """no""" +1034 51 optimizer """adam""" +1034 51 training_loop """owa""" +1034 51 negative_sampler """basic""" +1034 51 evaluator """rankbased""" +1034 52 dataset """wn18rr""" +1034 52 model """unstructuredmodel""" +1034 52 loss """bceaftersigmoid""" +1034 52 regularizer """no""" +1034 52 optimizer """adam""" +1034 52 training_loop """owa""" +1034 52 negative_sampler """basic""" +1034 52 evaluator """rankbased""" +1034 53 dataset """wn18rr""" +1034 53 model """unstructuredmodel""" +1034 53 loss """bceaftersigmoid""" +1034 53 regularizer """no""" +1034 53 optimizer """adam""" +1034 53 training_loop """owa""" +1034 53 negative_sampler """basic""" +1034 53 evaluator """rankbased""" +1034 54 dataset """wn18rr""" +1034 54 model """unstructuredmodel""" +1034 54 loss """bceaftersigmoid""" +1034 54 regularizer """no""" +1034 54 optimizer """adam""" +1034 54 training_loop """owa""" +1034 54 negative_sampler """basic""" +1034 54 evaluator """rankbased""" +1034 55 dataset """wn18rr""" +1034 55 model """unstructuredmodel""" +1034 55 loss """bceaftersigmoid""" +1034 55 regularizer """no""" +1034 55 optimizer """adam""" +1034 55 training_loop """owa""" +1034 55 negative_sampler """basic""" +1034 55 evaluator """rankbased""" +1034 56 dataset """wn18rr""" +1034 56 model """unstructuredmodel""" +1034 56 loss """bceaftersigmoid""" +1034 56 regularizer """no""" +1034 56 optimizer """adam""" +1034 56 training_loop """owa""" +1034 56 negative_sampler """basic""" +1034 56 evaluator """rankbased""" +1034 57 dataset """wn18rr""" +1034 57 model """unstructuredmodel""" +1034 57 loss """bceaftersigmoid""" +1034 57 regularizer """no""" +1034 57 optimizer """adam""" +1034 57 training_loop """owa""" +1034 57 negative_sampler """basic""" +1034 57 evaluator """rankbased""" +1034 58 dataset """wn18rr""" +1034 58 model """unstructuredmodel""" +1034 58 loss """bceaftersigmoid""" +1034 58 regularizer """no""" +1034 58 optimizer """adam""" +1034 58 training_loop """owa""" +1034 58 negative_sampler """basic""" +1034 58 evaluator """rankbased""" +1034 59 dataset """wn18rr""" +1034 59 model """unstructuredmodel""" +1034 59 loss """bceaftersigmoid""" +1034 59 regularizer """no""" +1034 59 optimizer """adam""" +1034 59 training_loop """owa""" +1034 59 negative_sampler """basic""" +1034 59 evaluator """rankbased""" +1034 60 dataset """wn18rr""" +1034 60 model """unstructuredmodel""" +1034 60 loss """bceaftersigmoid""" +1034 60 regularizer """no""" +1034 60 optimizer """adam""" +1034 60 training_loop """owa""" +1034 60 negative_sampler """basic""" +1034 60 evaluator """rankbased""" +1034 61 dataset """wn18rr""" +1034 61 model """unstructuredmodel""" +1034 61 loss """bceaftersigmoid""" +1034 61 regularizer """no""" +1034 61 optimizer """adam""" +1034 61 training_loop """owa""" +1034 61 negative_sampler """basic""" +1034 61 evaluator """rankbased""" +1034 62 dataset """wn18rr""" +1034 62 model """unstructuredmodel""" +1034 62 loss """bceaftersigmoid""" +1034 62 regularizer """no""" +1034 62 optimizer """adam""" +1034 62 training_loop """owa""" +1034 62 negative_sampler """basic""" +1034 62 evaluator """rankbased""" +1034 63 dataset """wn18rr""" +1034 63 model """unstructuredmodel""" +1034 63 loss """bceaftersigmoid""" +1034 63 regularizer """no""" +1034 63 optimizer """adam""" +1034 63 training_loop """owa""" +1034 63 negative_sampler """basic""" +1034 63 evaluator """rankbased""" +1034 64 dataset """wn18rr""" +1034 64 model """unstructuredmodel""" +1034 64 loss """bceaftersigmoid""" +1034 64 regularizer """no""" +1034 64 optimizer """adam""" +1034 64 training_loop """owa""" +1034 64 negative_sampler """basic""" +1034 64 evaluator """rankbased""" +1034 65 dataset """wn18rr""" +1034 65 model """unstructuredmodel""" +1034 65 loss """bceaftersigmoid""" +1034 65 regularizer """no""" +1034 65 optimizer """adam""" +1034 65 training_loop """owa""" +1034 65 negative_sampler """basic""" +1034 65 evaluator """rankbased""" +1034 66 dataset """wn18rr""" +1034 66 model """unstructuredmodel""" +1034 66 loss """bceaftersigmoid""" +1034 66 regularizer """no""" +1034 66 optimizer """adam""" +1034 66 training_loop """owa""" +1034 66 negative_sampler """basic""" +1034 66 evaluator """rankbased""" +1034 67 dataset """wn18rr""" +1034 67 model """unstructuredmodel""" +1034 67 loss """bceaftersigmoid""" +1034 67 regularizer """no""" +1034 67 optimizer """adam""" +1034 67 training_loop """owa""" +1034 67 negative_sampler """basic""" +1034 67 evaluator """rankbased""" +1034 68 dataset """wn18rr""" +1034 68 model """unstructuredmodel""" +1034 68 loss """bceaftersigmoid""" +1034 68 regularizer """no""" +1034 68 optimizer """adam""" +1034 68 training_loop """owa""" +1034 68 negative_sampler """basic""" +1034 68 evaluator """rankbased""" +1034 69 dataset """wn18rr""" +1034 69 model """unstructuredmodel""" +1034 69 loss """bceaftersigmoid""" +1034 69 regularizer """no""" +1034 69 optimizer """adam""" +1034 69 training_loop """owa""" +1034 69 negative_sampler """basic""" +1034 69 evaluator """rankbased""" +1034 70 dataset """wn18rr""" +1034 70 model """unstructuredmodel""" +1034 70 loss """bceaftersigmoid""" +1034 70 regularizer """no""" +1034 70 optimizer """adam""" +1034 70 training_loop """owa""" +1034 70 negative_sampler """basic""" +1034 70 evaluator """rankbased""" +1034 71 dataset """wn18rr""" +1034 71 model """unstructuredmodel""" +1034 71 loss """bceaftersigmoid""" +1034 71 regularizer """no""" +1034 71 optimizer """adam""" +1034 71 training_loop """owa""" +1034 71 negative_sampler """basic""" +1034 71 evaluator """rankbased""" +1034 72 dataset """wn18rr""" +1034 72 model """unstructuredmodel""" +1034 72 loss """bceaftersigmoid""" +1034 72 regularizer """no""" +1034 72 optimizer """adam""" +1034 72 training_loop """owa""" +1034 72 negative_sampler """basic""" +1034 72 evaluator """rankbased""" +1034 73 dataset """wn18rr""" +1034 73 model """unstructuredmodel""" +1034 73 loss """bceaftersigmoid""" +1034 73 regularizer """no""" +1034 73 optimizer """adam""" +1034 73 training_loop """owa""" +1034 73 negative_sampler """basic""" +1034 73 evaluator """rankbased""" +1034 74 dataset """wn18rr""" +1034 74 model """unstructuredmodel""" +1034 74 loss """bceaftersigmoid""" +1034 74 regularizer """no""" +1034 74 optimizer """adam""" +1034 74 training_loop """owa""" +1034 74 negative_sampler """basic""" +1034 74 evaluator """rankbased""" +1034 75 dataset """wn18rr""" +1034 75 model """unstructuredmodel""" +1034 75 loss """bceaftersigmoid""" +1034 75 regularizer """no""" +1034 75 optimizer """adam""" +1034 75 training_loop """owa""" +1034 75 negative_sampler """basic""" +1034 75 evaluator """rankbased""" +1034 76 dataset """wn18rr""" +1034 76 model """unstructuredmodel""" +1034 76 loss """bceaftersigmoid""" +1034 76 regularizer """no""" +1034 76 optimizer """adam""" +1034 76 training_loop """owa""" +1034 76 negative_sampler """basic""" +1034 76 evaluator """rankbased""" +1034 77 dataset """wn18rr""" +1034 77 model """unstructuredmodel""" +1034 77 loss """bceaftersigmoid""" +1034 77 regularizer """no""" +1034 77 optimizer """adam""" +1034 77 training_loop """owa""" +1034 77 negative_sampler """basic""" +1034 77 evaluator """rankbased""" +1034 78 dataset """wn18rr""" +1034 78 model """unstructuredmodel""" +1034 78 loss """bceaftersigmoid""" +1034 78 regularizer """no""" +1034 78 optimizer """adam""" +1034 78 training_loop """owa""" +1034 78 negative_sampler """basic""" +1034 78 evaluator """rankbased""" +1034 79 dataset """wn18rr""" +1034 79 model """unstructuredmodel""" +1034 79 loss """bceaftersigmoid""" +1034 79 regularizer """no""" +1034 79 optimizer """adam""" +1034 79 training_loop """owa""" +1034 79 negative_sampler """basic""" +1034 79 evaluator """rankbased""" +1034 80 dataset """wn18rr""" +1034 80 model """unstructuredmodel""" +1034 80 loss """bceaftersigmoid""" +1034 80 regularizer """no""" +1034 80 optimizer """adam""" +1034 80 training_loop """owa""" +1034 80 negative_sampler """basic""" +1034 80 evaluator """rankbased""" +1034 81 dataset """wn18rr""" +1034 81 model """unstructuredmodel""" +1034 81 loss """bceaftersigmoid""" +1034 81 regularizer """no""" +1034 81 optimizer """adam""" +1034 81 training_loop """owa""" +1034 81 negative_sampler """basic""" +1034 81 evaluator """rankbased""" +1034 82 dataset """wn18rr""" +1034 82 model """unstructuredmodel""" +1034 82 loss """bceaftersigmoid""" +1034 82 regularizer """no""" +1034 82 optimizer """adam""" +1034 82 training_loop """owa""" +1034 82 negative_sampler """basic""" +1034 82 evaluator """rankbased""" +1034 83 dataset """wn18rr""" +1034 83 model """unstructuredmodel""" +1034 83 loss """bceaftersigmoid""" +1034 83 regularizer """no""" +1034 83 optimizer """adam""" +1034 83 training_loop """owa""" +1034 83 negative_sampler """basic""" +1034 83 evaluator """rankbased""" +1034 84 dataset """wn18rr""" +1034 84 model """unstructuredmodel""" +1034 84 loss """bceaftersigmoid""" +1034 84 regularizer """no""" +1034 84 optimizer """adam""" +1034 84 training_loop """owa""" +1034 84 negative_sampler """basic""" +1034 84 evaluator """rankbased""" +1034 85 dataset """wn18rr""" +1034 85 model """unstructuredmodel""" +1034 85 loss """bceaftersigmoid""" +1034 85 regularizer """no""" +1034 85 optimizer """adam""" +1034 85 training_loop """owa""" +1034 85 negative_sampler """basic""" +1034 85 evaluator """rankbased""" +1034 86 dataset """wn18rr""" +1034 86 model """unstructuredmodel""" +1034 86 loss """bceaftersigmoid""" +1034 86 regularizer """no""" +1034 86 optimizer """adam""" +1034 86 training_loop """owa""" +1034 86 negative_sampler """basic""" +1034 86 evaluator """rankbased""" +1034 87 dataset """wn18rr""" +1034 87 model """unstructuredmodel""" +1034 87 loss """bceaftersigmoid""" +1034 87 regularizer """no""" +1034 87 optimizer """adam""" +1034 87 training_loop """owa""" +1034 87 negative_sampler """basic""" +1034 87 evaluator """rankbased""" +1034 88 dataset """wn18rr""" +1034 88 model """unstructuredmodel""" +1034 88 loss """bceaftersigmoid""" +1034 88 regularizer """no""" +1034 88 optimizer """adam""" +1034 88 training_loop """owa""" +1034 88 negative_sampler """basic""" +1034 88 evaluator """rankbased""" +1034 89 dataset """wn18rr""" +1034 89 model """unstructuredmodel""" +1034 89 loss """bceaftersigmoid""" +1034 89 regularizer """no""" +1034 89 optimizer """adam""" +1034 89 training_loop """owa""" +1034 89 negative_sampler """basic""" +1034 89 evaluator """rankbased""" +1034 90 dataset """wn18rr""" +1034 90 model """unstructuredmodel""" +1034 90 loss """bceaftersigmoid""" +1034 90 regularizer """no""" +1034 90 optimizer """adam""" +1034 90 training_loop """owa""" +1034 90 negative_sampler """basic""" +1034 90 evaluator """rankbased""" +1034 91 dataset """wn18rr""" +1034 91 model """unstructuredmodel""" +1034 91 loss """bceaftersigmoid""" +1034 91 regularizer """no""" +1034 91 optimizer """adam""" +1034 91 training_loop """owa""" +1034 91 negative_sampler """basic""" +1034 91 evaluator """rankbased""" +1034 92 dataset """wn18rr""" +1034 92 model """unstructuredmodel""" +1034 92 loss """bceaftersigmoid""" +1034 92 regularizer """no""" +1034 92 optimizer """adam""" +1034 92 training_loop """owa""" +1034 92 negative_sampler """basic""" +1034 92 evaluator """rankbased""" +1034 93 dataset """wn18rr""" +1034 93 model """unstructuredmodel""" +1034 93 loss """bceaftersigmoid""" +1034 93 regularizer """no""" +1034 93 optimizer """adam""" +1034 93 training_loop """owa""" +1034 93 negative_sampler """basic""" +1034 93 evaluator """rankbased""" +1034 94 dataset """wn18rr""" +1034 94 model """unstructuredmodel""" +1034 94 loss """bceaftersigmoid""" +1034 94 regularizer """no""" +1034 94 optimizer """adam""" +1034 94 training_loop """owa""" +1034 94 negative_sampler """basic""" +1034 94 evaluator """rankbased""" +1034 95 dataset """wn18rr""" +1034 95 model """unstructuredmodel""" +1034 95 loss """bceaftersigmoid""" +1034 95 regularizer """no""" +1034 95 optimizer """adam""" +1034 95 training_loop """owa""" +1034 95 negative_sampler """basic""" +1034 95 evaluator """rankbased""" +1034 96 dataset """wn18rr""" +1034 96 model """unstructuredmodel""" +1034 96 loss """bceaftersigmoid""" +1034 96 regularizer """no""" +1034 96 optimizer """adam""" +1034 96 training_loop """owa""" +1034 96 negative_sampler """basic""" +1034 96 evaluator """rankbased""" +1034 97 dataset """wn18rr""" +1034 97 model """unstructuredmodel""" +1034 97 loss """bceaftersigmoid""" +1034 97 regularizer """no""" +1034 97 optimizer """adam""" +1034 97 training_loop """owa""" +1034 97 negative_sampler """basic""" +1034 97 evaluator """rankbased""" +1034 98 dataset """wn18rr""" +1034 98 model """unstructuredmodel""" +1034 98 loss """bceaftersigmoid""" +1034 98 regularizer """no""" +1034 98 optimizer """adam""" +1034 98 training_loop """owa""" +1034 98 negative_sampler """basic""" +1034 98 evaluator """rankbased""" +1034 99 dataset """wn18rr""" +1034 99 model """unstructuredmodel""" +1034 99 loss """bceaftersigmoid""" +1034 99 regularizer """no""" +1034 99 optimizer """adam""" +1034 99 training_loop """owa""" +1034 99 negative_sampler """basic""" +1034 99 evaluator """rankbased""" +1034 100 dataset """wn18rr""" +1034 100 model """unstructuredmodel""" +1034 100 loss """bceaftersigmoid""" +1034 100 regularizer """no""" +1034 100 optimizer """adam""" +1034 100 training_loop """owa""" +1034 100 negative_sampler """basic""" +1034 100 evaluator """rankbased""" +1035 1 model.embedding_dim 0.0 +1035 1 model.scoring_fct_norm 1.0 +1035 1 optimizer.lr 0.002149251026135892 +1035 1 negative_sampler.num_negs_per_pos 51.0 +1035 1 training.batch_size 2.0 +1035 2 model.embedding_dim 1.0 +1035 2 model.scoring_fct_norm 1.0 +1035 2 optimizer.lr 0.022301515963250973 +1035 2 negative_sampler.num_negs_per_pos 88.0 +1035 2 training.batch_size 1.0 +1035 3 model.embedding_dim 1.0 +1035 3 model.scoring_fct_norm 1.0 +1035 3 optimizer.lr 0.03221449722765232 +1035 3 negative_sampler.num_negs_per_pos 55.0 +1035 3 training.batch_size 2.0 +1035 4 model.embedding_dim 1.0 +1035 4 model.scoring_fct_norm 1.0 +1035 4 optimizer.lr 0.0036362790899345104 +1035 4 negative_sampler.num_negs_per_pos 7.0 +1035 4 training.batch_size 1.0 +1035 5 model.embedding_dim 1.0 +1035 5 model.scoring_fct_norm 2.0 +1035 5 optimizer.lr 0.00174193769066852 +1035 5 negative_sampler.num_negs_per_pos 93.0 +1035 5 training.batch_size 2.0 +1035 6 model.embedding_dim 1.0 +1035 6 model.scoring_fct_norm 2.0 +1035 6 optimizer.lr 0.002028023768846823 +1035 6 negative_sampler.num_negs_per_pos 53.0 +1035 6 training.batch_size 0.0 +1035 7 model.embedding_dim 0.0 +1035 7 model.scoring_fct_norm 2.0 +1035 7 optimizer.lr 0.022238038839715604 +1035 7 negative_sampler.num_negs_per_pos 15.0 +1035 7 training.batch_size 1.0 +1035 8 model.embedding_dim 2.0 +1035 8 model.scoring_fct_norm 1.0 +1035 8 optimizer.lr 0.0018253757145995976 +1035 8 negative_sampler.num_negs_per_pos 95.0 +1035 8 training.batch_size 0.0 +1035 9 model.embedding_dim 0.0 +1035 9 model.scoring_fct_norm 2.0 +1035 9 optimizer.lr 0.006294999212914696 +1035 9 negative_sampler.num_negs_per_pos 93.0 +1035 9 training.batch_size 2.0 +1035 10 model.embedding_dim 1.0 +1035 10 model.scoring_fct_norm 1.0 +1035 10 optimizer.lr 0.04525615150483605 +1035 10 negative_sampler.num_negs_per_pos 37.0 +1035 10 training.batch_size 1.0 +1035 11 model.embedding_dim 2.0 +1035 11 model.scoring_fct_norm 2.0 +1035 11 optimizer.lr 0.006562991784299593 +1035 11 negative_sampler.num_negs_per_pos 82.0 +1035 11 training.batch_size 2.0 +1035 12 model.embedding_dim 1.0 +1035 12 model.scoring_fct_norm 2.0 +1035 12 optimizer.lr 0.017603849237806907 +1035 12 negative_sampler.num_negs_per_pos 72.0 +1035 12 training.batch_size 0.0 +1035 13 model.embedding_dim 1.0 +1035 13 model.scoring_fct_norm 2.0 +1035 13 optimizer.lr 0.055590700514547814 +1035 13 negative_sampler.num_negs_per_pos 7.0 +1035 13 training.batch_size 2.0 +1035 14 model.embedding_dim 0.0 +1035 14 model.scoring_fct_norm 2.0 +1035 14 optimizer.lr 0.01245988631641007 +1035 14 negative_sampler.num_negs_per_pos 0.0 +1035 14 training.batch_size 0.0 +1035 15 model.embedding_dim 2.0 +1035 15 model.scoring_fct_norm 2.0 +1035 15 optimizer.lr 0.07845170803015229 +1035 15 negative_sampler.num_negs_per_pos 60.0 +1035 15 training.batch_size 2.0 +1035 16 model.embedding_dim 2.0 +1035 16 model.scoring_fct_norm 2.0 +1035 16 optimizer.lr 0.005065885461190506 +1035 16 negative_sampler.num_negs_per_pos 37.0 +1035 16 training.batch_size 2.0 +1035 17 model.embedding_dim 0.0 +1035 17 model.scoring_fct_norm 2.0 +1035 17 optimizer.lr 0.014815363065713037 +1035 17 negative_sampler.num_negs_per_pos 20.0 +1035 17 training.batch_size 0.0 +1035 18 model.embedding_dim 2.0 +1035 18 model.scoring_fct_norm 2.0 +1035 18 optimizer.lr 0.01175731483789765 +1035 18 negative_sampler.num_negs_per_pos 9.0 +1035 18 training.batch_size 2.0 +1035 19 model.embedding_dim 1.0 +1035 19 model.scoring_fct_norm 1.0 +1035 19 optimizer.lr 0.002921354595150473 +1035 19 negative_sampler.num_negs_per_pos 39.0 +1035 19 training.batch_size 0.0 +1035 20 model.embedding_dim 1.0 +1035 20 model.scoring_fct_norm 1.0 +1035 20 optimizer.lr 0.01206023743866005 +1035 20 negative_sampler.num_negs_per_pos 19.0 +1035 20 training.batch_size 1.0 +1035 21 model.embedding_dim 2.0 +1035 21 model.scoring_fct_norm 1.0 +1035 21 optimizer.lr 0.0046756344782300325 +1035 21 negative_sampler.num_negs_per_pos 42.0 +1035 21 training.batch_size 0.0 +1035 22 model.embedding_dim 0.0 +1035 22 model.scoring_fct_norm 2.0 +1035 22 optimizer.lr 0.009365601549019142 +1035 22 negative_sampler.num_negs_per_pos 76.0 +1035 22 training.batch_size 2.0 +1035 23 model.embedding_dim 0.0 +1035 23 model.scoring_fct_norm 2.0 +1035 23 optimizer.lr 0.005796257213740082 +1035 23 negative_sampler.num_negs_per_pos 61.0 +1035 23 training.batch_size 1.0 +1035 24 model.embedding_dim 0.0 +1035 24 model.scoring_fct_norm 1.0 +1035 24 optimizer.lr 0.004015171723702764 +1035 24 negative_sampler.num_negs_per_pos 28.0 +1035 24 training.batch_size 2.0 +1035 25 model.embedding_dim 0.0 +1035 25 model.scoring_fct_norm 2.0 +1035 25 optimizer.lr 0.005008046894481032 +1035 25 negative_sampler.num_negs_per_pos 32.0 +1035 25 training.batch_size 1.0 +1035 26 model.embedding_dim 1.0 +1035 26 model.scoring_fct_norm 2.0 +1035 26 optimizer.lr 0.006367096519864704 +1035 26 negative_sampler.num_negs_per_pos 84.0 +1035 26 training.batch_size 1.0 +1035 27 model.embedding_dim 1.0 +1035 27 model.scoring_fct_norm 2.0 +1035 27 optimizer.lr 0.054279038817338324 +1035 27 negative_sampler.num_negs_per_pos 66.0 +1035 27 training.batch_size 1.0 +1035 28 model.embedding_dim 0.0 +1035 28 model.scoring_fct_norm 1.0 +1035 28 optimizer.lr 0.02347767408360855 +1035 28 negative_sampler.num_negs_per_pos 28.0 +1035 28 training.batch_size 0.0 +1035 29 model.embedding_dim 0.0 +1035 29 model.scoring_fct_norm 2.0 +1035 29 optimizer.lr 0.014858612951524455 +1035 29 negative_sampler.num_negs_per_pos 53.0 +1035 29 training.batch_size 1.0 +1035 30 model.embedding_dim 2.0 +1035 30 model.scoring_fct_norm 2.0 +1035 30 optimizer.lr 0.0013361176338175604 +1035 30 negative_sampler.num_negs_per_pos 59.0 +1035 30 training.batch_size 0.0 +1035 31 model.embedding_dim 1.0 +1035 31 model.scoring_fct_norm 1.0 +1035 31 optimizer.lr 0.007047126180143251 +1035 31 negative_sampler.num_negs_per_pos 16.0 +1035 31 training.batch_size 0.0 +1035 32 model.embedding_dim 0.0 +1035 32 model.scoring_fct_norm 2.0 +1035 32 optimizer.lr 0.02190849967771881 +1035 32 negative_sampler.num_negs_per_pos 0.0 +1035 32 training.batch_size 0.0 +1035 33 model.embedding_dim 2.0 +1035 33 model.scoring_fct_norm 1.0 +1035 33 optimizer.lr 0.004462695527486886 +1035 33 negative_sampler.num_negs_per_pos 84.0 +1035 33 training.batch_size 1.0 +1035 34 model.embedding_dim 2.0 +1035 34 model.scoring_fct_norm 1.0 +1035 34 optimizer.lr 0.010549118296621607 +1035 34 negative_sampler.num_negs_per_pos 72.0 +1035 34 training.batch_size 2.0 +1035 35 model.embedding_dim 2.0 +1035 35 model.scoring_fct_norm 2.0 +1035 35 optimizer.lr 0.055757941136746335 +1035 35 negative_sampler.num_negs_per_pos 17.0 +1035 35 training.batch_size 1.0 +1035 36 model.embedding_dim 1.0 +1035 36 model.scoring_fct_norm 2.0 +1035 36 optimizer.lr 0.021972863539139482 +1035 36 negative_sampler.num_negs_per_pos 94.0 +1035 36 training.batch_size 1.0 +1035 37 model.embedding_dim 1.0 +1035 37 model.scoring_fct_norm 2.0 +1035 37 optimizer.lr 0.0031608300532928574 +1035 37 negative_sampler.num_negs_per_pos 91.0 +1035 37 training.batch_size 1.0 +1035 38 model.embedding_dim 1.0 +1035 38 model.scoring_fct_norm 1.0 +1035 38 optimizer.lr 0.0869268287805119 +1035 38 negative_sampler.num_negs_per_pos 25.0 +1035 38 training.batch_size 0.0 +1035 39 model.embedding_dim 0.0 +1035 39 model.scoring_fct_norm 2.0 +1035 39 optimizer.lr 0.0028435592337803096 +1035 39 negative_sampler.num_negs_per_pos 17.0 +1035 39 training.batch_size 2.0 +1035 40 model.embedding_dim 0.0 +1035 40 model.scoring_fct_norm 1.0 +1035 40 optimizer.lr 0.05633198167005139 +1035 40 negative_sampler.num_negs_per_pos 76.0 +1035 40 training.batch_size 0.0 +1035 41 model.embedding_dim 0.0 +1035 41 model.scoring_fct_norm 1.0 +1035 41 optimizer.lr 0.00243911096086695 +1035 41 negative_sampler.num_negs_per_pos 12.0 +1035 41 training.batch_size 2.0 +1035 42 model.embedding_dim 1.0 +1035 42 model.scoring_fct_norm 2.0 +1035 42 optimizer.lr 0.034129609233101564 +1035 42 negative_sampler.num_negs_per_pos 80.0 +1035 42 training.batch_size 1.0 +1035 43 model.embedding_dim 0.0 +1035 43 model.scoring_fct_norm 2.0 +1035 43 optimizer.lr 0.06043466426717316 +1035 43 negative_sampler.num_negs_per_pos 37.0 +1035 43 training.batch_size 2.0 +1035 44 model.embedding_dim 2.0 +1035 44 model.scoring_fct_norm 2.0 +1035 44 optimizer.lr 0.044937726845410315 +1035 44 negative_sampler.num_negs_per_pos 5.0 +1035 44 training.batch_size 2.0 +1035 45 model.embedding_dim 2.0 +1035 45 model.scoring_fct_norm 1.0 +1035 45 optimizer.lr 0.01132555051508979 +1035 45 negative_sampler.num_negs_per_pos 90.0 +1035 45 training.batch_size 0.0 +1035 46 model.embedding_dim 2.0 +1035 46 model.scoring_fct_norm 1.0 +1035 46 optimizer.lr 0.0013363668395955357 +1035 46 negative_sampler.num_negs_per_pos 74.0 +1035 46 training.batch_size 2.0 +1035 47 model.embedding_dim 2.0 +1035 47 model.scoring_fct_norm 1.0 +1035 47 optimizer.lr 0.0010789235126598337 +1035 47 negative_sampler.num_negs_per_pos 79.0 +1035 47 training.batch_size 2.0 +1035 48 model.embedding_dim 0.0 +1035 48 model.scoring_fct_norm 1.0 +1035 48 optimizer.lr 0.014440915086109615 +1035 48 negative_sampler.num_negs_per_pos 45.0 +1035 48 training.batch_size 2.0 +1035 49 model.embedding_dim 1.0 +1035 49 model.scoring_fct_norm 2.0 +1035 49 optimizer.lr 0.03143938847643318 +1035 49 negative_sampler.num_negs_per_pos 82.0 +1035 49 training.batch_size 1.0 +1035 50 model.embedding_dim 0.0 +1035 50 model.scoring_fct_norm 1.0 +1035 50 optimizer.lr 0.08707707184452702 +1035 50 negative_sampler.num_negs_per_pos 4.0 +1035 50 training.batch_size 1.0 +1035 51 model.embedding_dim 1.0 +1035 51 model.scoring_fct_norm 1.0 +1035 51 optimizer.lr 0.08617321995199503 +1035 51 negative_sampler.num_negs_per_pos 91.0 +1035 51 training.batch_size 0.0 +1035 52 model.embedding_dim 2.0 +1035 52 model.scoring_fct_norm 1.0 +1035 52 optimizer.lr 0.007165178848450184 +1035 52 negative_sampler.num_negs_per_pos 10.0 +1035 52 training.batch_size 1.0 +1035 53 model.embedding_dim 0.0 +1035 53 model.scoring_fct_norm 1.0 +1035 53 optimizer.lr 0.02703666691762888 +1035 53 negative_sampler.num_negs_per_pos 57.0 +1035 53 training.batch_size 0.0 +1035 54 model.embedding_dim 0.0 +1035 54 model.scoring_fct_norm 2.0 +1035 54 optimizer.lr 0.07003234003430958 +1035 54 negative_sampler.num_negs_per_pos 85.0 +1035 54 training.batch_size 2.0 +1035 55 model.embedding_dim 0.0 +1035 55 model.scoring_fct_norm 2.0 +1035 55 optimizer.lr 0.03301909234821428 +1035 55 negative_sampler.num_negs_per_pos 1.0 +1035 55 training.batch_size 0.0 +1035 56 model.embedding_dim 1.0 +1035 56 model.scoring_fct_norm 2.0 +1035 56 optimizer.lr 0.038056459665079825 +1035 56 negative_sampler.num_negs_per_pos 16.0 +1035 56 training.batch_size 2.0 +1035 57 model.embedding_dim 2.0 +1035 57 model.scoring_fct_norm 2.0 +1035 57 optimizer.lr 0.04296977185848928 +1035 57 negative_sampler.num_negs_per_pos 13.0 +1035 57 training.batch_size 1.0 +1035 58 model.embedding_dim 1.0 +1035 58 model.scoring_fct_norm 2.0 +1035 58 optimizer.lr 0.008393251710146304 +1035 58 negative_sampler.num_negs_per_pos 35.0 +1035 58 training.batch_size 1.0 +1035 59 model.embedding_dim 0.0 +1035 59 model.scoring_fct_norm 2.0 +1035 59 optimizer.lr 0.0020143696061017055 +1035 59 negative_sampler.num_negs_per_pos 44.0 +1035 59 training.batch_size 1.0 +1035 60 model.embedding_dim 0.0 +1035 60 model.scoring_fct_norm 2.0 +1035 60 optimizer.lr 0.00145793869368732 +1035 60 negative_sampler.num_negs_per_pos 14.0 +1035 60 training.batch_size 1.0 +1035 61 model.embedding_dim 1.0 +1035 61 model.scoring_fct_norm 1.0 +1035 61 optimizer.lr 0.030388426677936346 +1035 61 negative_sampler.num_negs_per_pos 86.0 +1035 61 training.batch_size 1.0 +1035 62 model.embedding_dim 2.0 +1035 62 model.scoring_fct_norm 1.0 +1035 62 optimizer.lr 0.06397001547180936 +1035 62 negative_sampler.num_negs_per_pos 72.0 +1035 62 training.batch_size 0.0 +1035 63 model.embedding_dim 1.0 +1035 63 model.scoring_fct_norm 1.0 +1035 63 optimizer.lr 0.0016281353572432163 +1035 63 negative_sampler.num_negs_per_pos 13.0 +1035 63 training.batch_size 0.0 +1035 64 model.embedding_dim 2.0 +1035 64 model.scoring_fct_norm 1.0 +1035 64 optimizer.lr 0.001809132050988438 +1035 64 negative_sampler.num_negs_per_pos 66.0 +1035 64 training.batch_size 1.0 +1035 65 model.embedding_dim 1.0 +1035 65 model.scoring_fct_norm 2.0 +1035 65 optimizer.lr 0.0010775383587262022 +1035 65 negative_sampler.num_negs_per_pos 21.0 +1035 65 training.batch_size 0.0 +1035 66 model.embedding_dim 1.0 +1035 66 model.scoring_fct_norm 1.0 +1035 66 optimizer.lr 0.05985617258266121 +1035 66 negative_sampler.num_negs_per_pos 18.0 +1035 66 training.batch_size 2.0 +1035 67 model.embedding_dim 2.0 +1035 67 model.scoring_fct_norm 2.0 +1035 67 optimizer.lr 0.001757343198021501 +1035 67 negative_sampler.num_negs_per_pos 26.0 +1035 67 training.batch_size 1.0 +1035 68 model.embedding_dim 0.0 +1035 68 model.scoring_fct_norm 1.0 +1035 68 optimizer.lr 0.011704856387390734 +1035 68 negative_sampler.num_negs_per_pos 18.0 +1035 68 training.batch_size 2.0 +1035 69 model.embedding_dim 2.0 +1035 69 model.scoring_fct_norm 2.0 +1035 69 optimizer.lr 0.016147752611745718 +1035 69 negative_sampler.num_negs_per_pos 13.0 +1035 69 training.batch_size 2.0 +1035 70 model.embedding_dim 2.0 +1035 70 model.scoring_fct_norm 2.0 +1035 70 optimizer.lr 0.0238108223537411 +1035 70 negative_sampler.num_negs_per_pos 8.0 +1035 70 training.batch_size 2.0 +1035 71 model.embedding_dim 0.0 +1035 71 model.scoring_fct_norm 2.0 +1035 71 optimizer.lr 0.016281400267035187 +1035 71 negative_sampler.num_negs_per_pos 69.0 +1035 71 training.batch_size 0.0 +1035 72 model.embedding_dim 2.0 +1035 72 model.scoring_fct_norm 2.0 +1035 72 optimizer.lr 0.0014023773256436989 +1035 72 negative_sampler.num_negs_per_pos 49.0 +1035 72 training.batch_size 0.0 +1035 73 model.embedding_dim 0.0 +1035 73 model.scoring_fct_norm 1.0 +1035 73 optimizer.lr 0.006297035063649332 +1035 73 negative_sampler.num_negs_per_pos 29.0 +1035 73 training.batch_size 0.0 +1035 74 model.embedding_dim 0.0 +1035 74 model.scoring_fct_norm 2.0 +1035 74 optimizer.lr 0.003782803931157682 +1035 74 negative_sampler.num_negs_per_pos 42.0 +1035 74 training.batch_size 0.0 +1035 75 model.embedding_dim 0.0 +1035 75 model.scoring_fct_norm 2.0 +1035 75 optimizer.lr 0.009981576219304043 +1035 75 negative_sampler.num_negs_per_pos 78.0 +1035 75 training.batch_size 1.0 +1035 76 model.embedding_dim 0.0 +1035 76 model.scoring_fct_norm 1.0 +1035 76 optimizer.lr 0.0020334097993279436 +1035 76 negative_sampler.num_negs_per_pos 68.0 +1035 76 training.batch_size 1.0 +1035 77 model.embedding_dim 0.0 +1035 77 model.scoring_fct_norm 2.0 +1035 77 optimizer.lr 0.010488395212327582 +1035 77 negative_sampler.num_negs_per_pos 69.0 +1035 77 training.batch_size 1.0 +1035 78 model.embedding_dim 0.0 +1035 78 model.scoring_fct_norm 2.0 +1035 78 optimizer.lr 0.0013333965880064284 +1035 78 negative_sampler.num_negs_per_pos 67.0 +1035 78 training.batch_size 2.0 +1035 79 model.embedding_dim 2.0 +1035 79 model.scoring_fct_norm 2.0 +1035 79 optimizer.lr 0.003010216265188642 +1035 79 negative_sampler.num_negs_per_pos 58.0 +1035 79 training.batch_size 2.0 +1035 80 model.embedding_dim 1.0 +1035 80 model.scoring_fct_norm 2.0 +1035 80 optimizer.lr 0.003563712646134241 +1035 80 negative_sampler.num_negs_per_pos 19.0 +1035 80 training.batch_size 0.0 +1035 81 model.embedding_dim 0.0 +1035 81 model.scoring_fct_norm 1.0 +1035 81 optimizer.lr 0.006595395060955117 +1035 81 negative_sampler.num_negs_per_pos 9.0 +1035 81 training.batch_size 2.0 +1035 82 model.embedding_dim 2.0 +1035 82 model.scoring_fct_norm 1.0 +1035 82 optimizer.lr 0.010741176659543973 +1035 82 negative_sampler.num_negs_per_pos 92.0 +1035 82 training.batch_size 1.0 +1035 83 model.embedding_dim 0.0 +1035 83 model.scoring_fct_norm 1.0 +1035 83 optimizer.lr 0.0031349751892915385 +1035 83 negative_sampler.num_negs_per_pos 88.0 +1035 83 training.batch_size 1.0 +1035 84 model.embedding_dim 1.0 +1035 84 model.scoring_fct_norm 1.0 +1035 84 optimizer.lr 0.013138483285526099 +1035 84 negative_sampler.num_negs_per_pos 0.0 +1035 84 training.batch_size 0.0 +1035 85 model.embedding_dim 0.0 +1035 85 model.scoring_fct_norm 2.0 +1035 85 optimizer.lr 0.005938143058127455 +1035 85 negative_sampler.num_negs_per_pos 27.0 +1035 85 training.batch_size 2.0 +1035 86 model.embedding_dim 0.0 +1035 86 model.scoring_fct_norm 2.0 +1035 86 optimizer.lr 0.025746338210993234 +1035 86 negative_sampler.num_negs_per_pos 2.0 +1035 86 training.batch_size 1.0 +1035 87 model.embedding_dim 0.0 +1035 87 model.scoring_fct_norm 1.0 +1035 87 optimizer.lr 0.03614115259036976 +1035 87 negative_sampler.num_negs_per_pos 9.0 +1035 87 training.batch_size 0.0 +1035 88 model.embedding_dim 2.0 +1035 88 model.scoring_fct_norm 1.0 +1035 88 optimizer.lr 0.0027686127117191183 +1035 88 negative_sampler.num_negs_per_pos 97.0 +1035 88 training.batch_size 1.0 +1035 89 model.embedding_dim 0.0 +1035 89 model.scoring_fct_norm 2.0 +1035 89 optimizer.lr 0.019956993735782153 +1035 89 negative_sampler.num_negs_per_pos 83.0 +1035 89 training.batch_size 2.0 +1035 90 model.embedding_dim 0.0 +1035 90 model.scoring_fct_norm 2.0 +1035 90 optimizer.lr 0.019459321078498067 +1035 90 negative_sampler.num_negs_per_pos 57.0 +1035 90 training.batch_size 1.0 +1035 91 model.embedding_dim 2.0 +1035 91 model.scoring_fct_norm 1.0 +1035 91 optimizer.lr 0.0035300129803986 +1035 91 negative_sampler.num_negs_per_pos 52.0 +1035 91 training.batch_size 2.0 +1035 92 model.embedding_dim 2.0 +1035 92 model.scoring_fct_norm 1.0 +1035 92 optimizer.lr 0.05767126372287894 +1035 92 negative_sampler.num_negs_per_pos 19.0 +1035 92 training.batch_size 1.0 +1035 93 model.embedding_dim 1.0 +1035 93 model.scoring_fct_norm 2.0 +1035 93 optimizer.lr 0.005381410023839685 +1035 93 negative_sampler.num_negs_per_pos 71.0 +1035 93 training.batch_size 1.0 +1035 94 model.embedding_dim 0.0 +1035 94 model.scoring_fct_norm 1.0 +1035 94 optimizer.lr 0.029797899166589065 +1035 94 negative_sampler.num_negs_per_pos 41.0 +1035 94 training.batch_size 2.0 +1035 95 model.embedding_dim 1.0 +1035 95 model.scoring_fct_norm 1.0 +1035 95 optimizer.lr 0.003092709770483591 +1035 95 negative_sampler.num_negs_per_pos 58.0 +1035 95 training.batch_size 0.0 +1035 96 model.embedding_dim 1.0 +1035 96 model.scoring_fct_norm 1.0 +1035 96 optimizer.lr 0.0117086335433829 +1035 96 negative_sampler.num_negs_per_pos 31.0 +1035 96 training.batch_size 0.0 +1035 97 model.embedding_dim 0.0 +1035 97 model.scoring_fct_norm 2.0 +1035 97 optimizer.lr 0.00917404951100168 +1035 97 negative_sampler.num_negs_per_pos 4.0 +1035 97 training.batch_size 2.0 +1035 98 model.embedding_dim 1.0 +1035 98 model.scoring_fct_norm 2.0 +1035 98 optimizer.lr 0.007481924390923398 +1035 98 negative_sampler.num_negs_per_pos 56.0 +1035 98 training.batch_size 2.0 +1035 99 model.embedding_dim 2.0 +1035 99 model.scoring_fct_norm 2.0 +1035 99 optimizer.lr 0.03838961001966002 +1035 99 negative_sampler.num_negs_per_pos 0.0 +1035 99 training.batch_size 2.0 +1035 100 model.embedding_dim 2.0 +1035 100 model.scoring_fct_norm 1.0 +1035 100 optimizer.lr 0.0012707986109710399 +1035 100 negative_sampler.num_negs_per_pos 4.0 +1035 100 training.batch_size 0.0 +1035 1 dataset """wn18rr""" +1035 1 model """unstructuredmodel""" +1035 1 loss """softplus""" +1035 1 regularizer """no""" +1035 1 optimizer """adam""" +1035 1 training_loop """owa""" +1035 1 negative_sampler """basic""" +1035 1 evaluator """rankbased""" +1035 2 dataset """wn18rr""" +1035 2 model """unstructuredmodel""" +1035 2 loss """softplus""" +1035 2 regularizer """no""" +1035 2 optimizer """adam""" +1035 2 training_loop """owa""" +1035 2 negative_sampler """basic""" +1035 2 evaluator """rankbased""" +1035 3 dataset """wn18rr""" +1035 3 model """unstructuredmodel""" +1035 3 loss """softplus""" +1035 3 regularizer """no""" +1035 3 optimizer """adam""" +1035 3 training_loop """owa""" +1035 3 negative_sampler """basic""" +1035 3 evaluator """rankbased""" +1035 4 dataset """wn18rr""" +1035 4 model """unstructuredmodel""" +1035 4 loss """softplus""" +1035 4 regularizer """no""" +1035 4 optimizer """adam""" +1035 4 training_loop """owa""" +1035 4 negative_sampler """basic""" +1035 4 evaluator """rankbased""" +1035 5 dataset """wn18rr""" +1035 5 model """unstructuredmodel""" +1035 5 loss """softplus""" +1035 5 regularizer """no""" +1035 5 optimizer """adam""" +1035 5 training_loop """owa""" +1035 5 negative_sampler """basic""" +1035 5 evaluator """rankbased""" +1035 6 dataset """wn18rr""" +1035 6 model """unstructuredmodel""" +1035 6 loss """softplus""" +1035 6 regularizer """no""" +1035 6 optimizer """adam""" +1035 6 training_loop """owa""" +1035 6 negative_sampler """basic""" +1035 6 evaluator """rankbased""" +1035 7 dataset """wn18rr""" +1035 7 model """unstructuredmodel""" +1035 7 loss """softplus""" +1035 7 regularizer """no""" +1035 7 optimizer """adam""" +1035 7 training_loop """owa""" +1035 7 negative_sampler """basic""" +1035 7 evaluator """rankbased""" +1035 8 dataset """wn18rr""" +1035 8 model """unstructuredmodel""" +1035 8 loss """softplus""" +1035 8 regularizer """no""" +1035 8 optimizer """adam""" +1035 8 training_loop """owa""" +1035 8 negative_sampler """basic""" +1035 8 evaluator """rankbased""" +1035 9 dataset """wn18rr""" +1035 9 model """unstructuredmodel""" +1035 9 loss """softplus""" +1035 9 regularizer """no""" +1035 9 optimizer """adam""" +1035 9 training_loop """owa""" +1035 9 negative_sampler """basic""" +1035 9 evaluator """rankbased""" +1035 10 dataset """wn18rr""" +1035 10 model """unstructuredmodel""" +1035 10 loss """softplus""" +1035 10 regularizer """no""" +1035 10 optimizer """adam""" +1035 10 training_loop """owa""" +1035 10 negative_sampler """basic""" +1035 10 evaluator """rankbased""" +1035 11 dataset """wn18rr""" +1035 11 model """unstructuredmodel""" +1035 11 loss """softplus""" +1035 11 regularizer """no""" +1035 11 optimizer """adam""" +1035 11 training_loop """owa""" +1035 11 negative_sampler """basic""" +1035 11 evaluator """rankbased""" +1035 12 dataset """wn18rr""" +1035 12 model """unstructuredmodel""" +1035 12 loss """softplus""" +1035 12 regularizer """no""" +1035 12 optimizer """adam""" +1035 12 training_loop """owa""" +1035 12 negative_sampler """basic""" +1035 12 evaluator """rankbased""" +1035 13 dataset """wn18rr""" +1035 13 model """unstructuredmodel""" +1035 13 loss """softplus""" +1035 13 regularizer """no""" +1035 13 optimizer """adam""" +1035 13 training_loop """owa""" +1035 13 negative_sampler """basic""" +1035 13 evaluator """rankbased""" +1035 14 dataset """wn18rr""" +1035 14 model """unstructuredmodel""" +1035 14 loss """softplus""" +1035 14 regularizer """no""" +1035 14 optimizer """adam""" +1035 14 training_loop """owa""" +1035 14 negative_sampler """basic""" +1035 14 evaluator """rankbased""" +1035 15 dataset """wn18rr""" +1035 15 model """unstructuredmodel""" +1035 15 loss """softplus""" +1035 15 regularizer """no""" +1035 15 optimizer """adam""" +1035 15 training_loop """owa""" +1035 15 negative_sampler """basic""" +1035 15 evaluator """rankbased""" +1035 16 dataset """wn18rr""" +1035 16 model """unstructuredmodel""" +1035 16 loss """softplus""" +1035 16 regularizer """no""" +1035 16 optimizer """adam""" +1035 16 training_loop """owa""" +1035 16 negative_sampler """basic""" +1035 16 evaluator """rankbased""" +1035 17 dataset """wn18rr""" +1035 17 model """unstructuredmodel""" +1035 17 loss """softplus""" +1035 17 regularizer """no""" +1035 17 optimizer """adam""" +1035 17 training_loop """owa""" +1035 17 negative_sampler """basic""" +1035 17 evaluator """rankbased""" +1035 18 dataset """wn18rr""" +1035 18 model """unstructuredmodel""" +1035 18 loss """softplus""" +1035 18 regularizer """no""" +1035 18 optimizer """adam""" +1035 18 training_loop """owa""" +1035 18 negative_sampler """basic""" +1035 18 evaluator """rankbased""" +1035 19 dataset """wn18rr""" +1035 19 model """unstructuredmodel""" +1035 19 loss """softplus""" +1035 19 regularizer """no""" +1035 19 optimizer """adam""" +1035 19 training_loop """owa""" +1035 19 negative_sampler """basic""" +1035 19 evaluator """rankbased""" +1035 20 dataset """wn18rr""" +1035 20 model """unstructuredmodel""" +1035 20 loss """softplus""" +1035 20 regularizer """no""" +1035 20 optimizer """adam""" +1035 20 training_loop """owa""" +1035 20 negative_sampler """basic""" +1035 20 evaluator """rankbased""" +1035 21 dataset """wn18rr""" +1035 21 model """unstructuredmodel""" +1035 21 loss """softplus""" +1035 21 regularizer """no""" +1035 21 optimizer """adam""" +1035 21 training_loop """owa""" +1035 21 negative_sampler """basic""" +1035 21 evaluator """rankbased""" +1035 22 dataset """wn18rr""" +1035 22 model """unstructuredmodel""" +1035 22 loss """softplus""" +1035 22 regularizer """no""" +1035 22 optimizer """adam""" +1035 22 training_loop """owa""" +1035 22 negative_sampler """basic""" +1035 22 evaluator """rankbased""" +1035 23 dataset """wn18rr""" +1035 23 model """unstructuredmodel""" +1035 23 loss """softplus""" +1035 23 regularizer """no""" +1035 23 optimizer """adam""" +1035 23 training_loop """owa""" +1035 23 negative_sampler """basic""" +1035 23 evaluator """rankbased""" +1035 24 dataset """wn18rr""" +1035 24 model """unstructuredmodel""" +1035 24 loss """softplus""" +1035 24 regularizer """no""" +1035 24 optimizer """adam""" +1035 24 training_loop """owa""" +1035 24 negative_sampler """basic""" +1035 24 evaluator """rankbased""" +1035 25 dataset """wn18rr""" +1035 25 model """unstructuredmodel""" +1035 25 loss """softplus""" +1035 25 regularizer """no""" +1035 25 optimizer """adam""" +1035 25 training_loop """owa""" +1035 25 negative_sampler """basic""" +1035 25 evaluator """rankbased""" +1035 26 dataset """wn18rr""" +1035 26 model """unstructuredmodel""" +1035 26 loss """softplus""" +1035 26 regularizer """no""" +1035 26 optimizer """adam""" +1035 26 training_loop """owa""" +1035 26 negative_sampler """basic""" +1035 26 evaluator """rankbased""" +1035 27 dataset """wn18rr""" +1035 27 model """unstructuredmodel""" +1035 27 loss """softplus""" +1035 27 regularizer """no""" +1035 27 optimizer """adam""" +1035 27 training_loop """owa""" +1035 27 negative_sampler """basic""" +1035 27 evaluator """rankbased""" +1035 28 dataset """wn18rr""" +1035 28 model """unstructuredmodel""" +1035 28 loss """softplus""" +1035 28 regularizer """no""" +1035 28 optimizer """adam""" +1035 28 training_loop """owa""" +1035 28 negative_sampler """basic""" +1035 28 evaluator """rankbased""" +1035 29 dataset """wn18rr""" +1035 29 model """unstructuredmodel""" +1035 29 loss """softplus""" +1035 29 regularizer """no""" +1035 29 optimizer """adam""" +1035 29 training_loop """owa""" +1035 29 negative_sampler """basic""" +1035 29 evaluator """rankbased""" +1035 30 dataset """wn18rr""" +1035 30 model """unstructuredmodel""" +1035 30 loss """softplus""" +1035 30 regularizer """no""" +1035 30 optimizer """adam""" +1035 30 training_loop """owa""" +1035 30 negative_sampler """basic""" +1035 30 evaluator """rankbased""" +1035 31 dataset """wn18rr""" +1035 31 model """unstructuredmodel""" +1035 31 loss """softplus""" +1035 31 regularizer """no""" +1035 31 optimizer """adam""" +1035 31 training_loop """owa""" +1035 31 negative_sampler """basic""" +1035 31 evaluator """rankbased""" +1035 32 dataset """wn18rr""" +1035 32 model """unstructuredmodel""" +1035 32 loss """softplus""" +1035 32 regularizer """no""" +1035 32 optimizer """adam""" +1035 32 training_loop """owa""" +1035 32 negative_sampler """basic""" +1035 32 evaluator """rankbased""" +1035 33 dataset """wn18rr""" +1035 33 model """unstructuredmodel""" +1035 33 loss """softplus""" +1035 33 regularizer """no""" +1035 33 optimizer """adam""" +1035 33 training_loop """owa""" +1035 33 negative_sampler """basic""" +1035 33 evaluator """rankbased""" +1035 34 dataset """wn18rr""" +1035 34 model """unstructuredmodel""" +1035 34 loss """softplus""" +1035 34 regularizer """no""" +1035 34 optimizer """adam""" +1035 34 training_loop """owa""" +1035 34 negative_sampler """basic""" +1035 34 evaluator """rankbased""" +1035 35 dataset """wn18rr""" +1035 35 model """unstructuredmodel""" +1035 35 loss """softplus""" +1035 35 regularizer """no""" +1035 35 optimizer """adam""" +1035 35 training_loop """owa""" +1035 35 negative_sampler """basic""" +1035 35 evaluator """rankbased""" +1035 36 dataset """wn18rr""" +1035 36 model """unstructuredmodel""" +1035 36 loss """softplus""" +1035 36 regularizer """no""" +1035 36 optimizer """adam""" +1035 36 training_loop """owa""" +1035 36 negative_sampler """basic""" +1035 36 evaluator """rankbased""" +1035 37 dataset """wn18rr""" +1035 37 model """unstructuredmodel""" +1035 37 loss """softplus""" +1035 37 regularizer """no""" +1035 37 optimizer """adam""" +1035 37 training_loop """owa""" +1035 37 negative_sampler """basic""" +1035 37 evaluator """rankbased""" +1035 38 dataset """wn18rr""" +1035 38 model """unstructuredmodel""" +1035 38 loss """softplus""" +1035 38 regularizer """no""" +1035 38 optimizer """adam""" +1035 38 training_loop """owa""" +1035 38 negative_sampler """basic""" +1035 38 evaluator """rankbased""" +1035 39 dataset """wn18rr""" +1035 39 model """unstructuredmodel""" +1035 39 loss """softplus""" +1035 39 regularizer """no""" +1035 39 optimizer """adam""" +1035 39 training_loop """owa""" +1035 39 negative_sampler """basic""" +1035 39 evaluator """rankbased""" +1035 40 dataset """wn18rr""" +1035 40 model """unstructuredmodel""" +1035 40 loss """softplus""" +1035 40 regularizer """no""" +1035 40 optimizer """adam""" +1035 40 training_loop """owa""" +1035 40 negative_sampler """basic""" +1035 40 evaluator """rankbased""" +1035 41 dataset """wn18rr""" +1035 41 model """unstructuredmodel""" +1035 41 loss """softplus""" +1035 41 regularizer """no""" +1035 41 optimizer """adam""" +1035 41 training_loop """owa""" +1035 41 negative_sampler """basic""" +1035 41 evaluator """rankbased""" +1035 42 dataset """wn18rr""" +1035 42 model """unstructuredmodel""" +1035 42 loss """softplus""" +1035 42 regularizer """no""" +1035 42 optimizer """adam""" +1035 42 training_loop """owa""" +1035 42 negative_sampler """basic""" +1035 42 evaluator """rankbased""" +1035 43 dataset """wn18rr""" +1035 43 model """unstructuredmodel""" +1035 43 loss """softplus""" +1035 43 regularizer """no""" +1035 43 optimizer """adam""" +1035 43 training_loop """owa""" +1035 43 negative_sampler """basic""" +1035 43 evaluator """rankbased""" +1035 44 dataset """wn18rr""" +1035 44 model """unstructuredmodel""" +1035 44 loss """softplus""" +1035 44 regularizer """no""" +1035 44 optimizer """adam""" +1035 44 training_loop """owa""" +1035 44 negative_sampler """basic""" +1035 44 evaluator """rankbased""" +1035 45 dataset """wn18rr""" +1035 45 model """unstructuredmodel""" +1035 45 loss """softplus""" +1035 45 regularizer """no""" +1035 45 optimizer """adam""" +1035 45 training_loop """owa""" +1035 45 negative_sampler """basic""" +1035 45 evaluator """rankbased""" +1035 46 dataset """wn18rr""" +1035 46 model """unstructuredmodel""" +1035 46 loss """softplus""" +1035 46 regularizer """no""" +1035 46 optimizer """adam""" +1035 46 training_loop """owa""" +1035 46 negative_sampler """basic""" +1035 46 evaluator """rankbased""" +1035 47 dataset """wn18rr""" +1035 47 model """unstructuredmodel""" +1035 47 loss """softplus""" +1035 47 regularizer """no""" +1035 47 optimizer """adam""" +1035 47 training_loop """owa""" +1035 47 negative_sampler """basic""" +1035 47 evaluator """rankbased""" +1035 48 dataset """wn18rr""" +1035 48 model """unstructuredmodel""" +1035 48 loss """softplus""" +1035 48 regularizer """no""" +1035 48 optimizer """adam""" +1035 48 training_loop """owa""" +1035 48 negative_sampler """basic""" +1035 48 evaluator """rankbased""" +1035 49 dataset """wn18rr""" +1035 49 model """unstructuredmodel""" +1035 49 loss """softplus""" +1035 49 regularizer """no""" +1035 49 optimizer """adam""" +1035 49 training_loop """owa""" +1035 49 negative_sampler """basic""" +1035 49 evaluator """rankbased""" +1035 50 dataset """wn18rr""" +1035 50 model """unstructuredmodel""" +1035 50 loss """softplus""" +1035 50 regularizer """no""" +1035 50 optimizer """adam""" +1035 50 training_loop """owa""" +1035 50 negative_sampler """basic""" +1035 50 evaluator """rankbased""" +1035 51 dataset """wn18rr""" +1035 51 model """unstructuredmodel""" +1035 51 loss """softplus""" +1035 51 regularizer """no""" +1035 51 optimizer """adam""" +1035 51 training_loop """owa""" +1035 51 negative_sampler """basic""" +1035 51 evaluator """rankbased""" +1035 52 dataset """wn18rr""" +1035 52 model """unstructuredmodel""" +1035 52 loss """softplus""" +1035 52 regularizer """no""" +1035 52 optimizer """adam""" +1035 52 training_loop """owa""" +1035 52 negative_sampler """basic""" +1035 52 evaluator """rankbased""" +1035 53 dataset """wn18rr""" +1035 53 model """unstructuredmodel""" +1035 53 loss """softplus""" +1035 53 regularizer """no""" +1035 53 optimizer """adam""" +1035 53 training_loop """owa""" +1035 53 negative_sampler """basic""" +1035 53 evaluator """rankbased""" +1035 54 dataset """wn18rr""" +1035 54 model """unstructuredmodel""" +1035 54 loss """softplus""" +1035 54 regularizer """no""" +1035 54 optimizer """adam""" +1035 54 training_loop """owa""" +1035 54 negative_sampler """basic""" +1035 54 evaluator """rankbased""" +1035 55 dataset """wn18rr""" +1035 55 model """unstructuredmodel""" +1035 55 loss """softplus""" +1035 55 regularizer """no""" +1035 55 optimizer """adam""" +1035 55 training_loop """owa""" +1035 55 negative_sampler """basic""" +1035 55 evaluator """rankbased""" +1035 56 dataset """wn18rr""" +1035 56 model """unstructuredmodel""" +1035 56 loss """softplus""" +1035 56 regularizer """no""" +1035 56 optimizer """adam""" +1035 56 training_loop """owa""" +1035 56 negative_sampler """basic""" +1035 56 evaluator """rankbased""" +1035 57 dataset """wn18rr""" +1035 57 model """unstructuredmodel""" +1035 57 loss """softplus""" +1035 57 regularizer """no""" +1035 57 optimizer """adam""" +1035 57 training_loop """owa""" +1035 57 negative_sampler """basic""" +1035 57 evaluator """rankbased""" +1035 58 dataset """wn18rr""" +1035 58 model """unstructuredmodel""" +1035 58 loss """softplus""" +1035 58 regularizer """no""" +1035 58 optimizer """adam""" +1035 58 training_loop """owa""" +1035 58 negative_sampler """basic""" +1035 58 evaluator """rankbased""" +1035 59 dataset """wn18rr""" +1035 59 model """unstructuredmodel""" +1035 59 loss """softplus""" +1035 59 regularizer """no""" +1035 59 optimizer """adam""" +1035 59 training_loop """owa""" +1035 59 negative_sampler """basic""" +1035 59 evaluator """rankbased""" +1035 60 dataset """wn18rr""" +1035 60 model """unstructuredmodel""" +1035 60 loss """softplus""" +1035 60 regularizer """no""" +1035 60 optimizer """adam""" +1035 60 training_loop """owa""" +1035 60 negative_sampler """basic""" +1035 60 evaluator """rankbased""" +1035 61 dataset """wn18rr""" +1035 61 model """unstructuredmodel""" +1035 61 loss """softplus""" +1035 61 regularizer """no""" +1035 61 optimizer """adam""" +1035 61 training_loop """owa""" +1035 61 negative_sampler """basic""" +1035 61 evaluator """rankbased""" +1035 62 dataset """wn18rr""" +1035 62 model """unstructuredmodel""" +1035 62 loss """softplus""" +1035 62 regularizer """no""" +1035 62 optimizer """adam""" +1035 62 training_loop """owa""" +1035 62 negative_sampler """basic""" +1035 62 evaluator """rankbased""" +1035 63 dataset """wn18rr""" +1035 63 model """unstructuredmodel""" +1035 63 loss """softplus""" +1035 63 regularizer """no""" +1035 63 optimizer """adam""" +1035 63 training_loop """owa""" +1035 63 negative_sampler """basic""" +1035 63 evaluator """rankbased""" +1035 64 dataset """wn18rr""" +1035 64 model """unstructuredmodel""" +1035 64 loss """softplus""" +1035 64 regularizer """no""" +1035 64 optimizer """adam""" +1035 64 training_loop """owa""" +1035 64 negative_sampler """basic""" +1035 64 evaluator """rankbased""" +1035 65 dataset """wn18rr""" +1035 65 model """unstructuredmodel""" +1035 65 loss """softplus""" +1035 65 regularizer """no""" +1035 65 optimizer """adam""" +1035 65 training_loop """owa""" +1035 65 negative_sampler """basic""" +1035 65 evaluator """rankbased""" +1035 66 dataset """wn18rr""" +1035 66 model """unstructuredmodel""" +1035 66 loss """softplus""" +1035 66 regularizer """no""" +1035 66 optimizer """adam""" +1035 66 training_loop """owa""" +1035 66 negative_sampler """basic""" +1035 66 evaluator """rankbased""" +1035 67 dataset """wn18rr""" +1035 67 model """unstructuredmodel""" +1035 67 loss """softplus""" +1035 67 regularizer """no""" +1035 67 optimizer """adam""" +1035 67 training_loop """owa""" +1035 67 negative_sampler """basic""" +1035 67 evaluator """rankbased""" +1035 68 dataset """wn18rr""" +1035 68 model """unstructuredmodel""" +1035 68 loss """softplus""" +1035 68 regularizer """no""" +1035 68 optimizer """adam""" +1035 68 training_loop """owa""" +1035 68 negative_sampler """basic""" +1035 68 evaluator """rankbased""" +1035 69 dataset """wn18rr""" +1035 69 model """unstructuredmodel""" +1035 69 loss """softplus""" +1035 69 regularizer """no""" +1035 69 optimizer """adam""" +1035 69 training_loop """owa""" +1035 69 negative_sampler """basic""" +1035 69 evaluator """rankbased""" +1035 70 dataset """wn18rr""" +1035 70 model """unstructuredmodel""" +1035 70 loss """softplus""" +1035 70 regularizer """no""" +1035 70 optimizer """adam""" +1035 70 training_loop """owa""" +1035 70 negative_sampler """basic""" +1035 70 evaluator """rankbased""" +1035 71 dataset """wn18rr""" +1035 71 model """unstructuredmodel""" +1035 71 loss """softplus""" +1035 71 regularizer """no""" +1035 71 optimizer """adam""" +1035 71 training_loop """owa""" +1035 71 negative_sampler """basic""" +1035 71 evaluator """rankbased""" +1035 72 dataset """wn18rr""" +1035 72 model """unstructuredmodel""" +1035 72 loss """softplus""" +1035 72 regularizer """no""" +1035 72 optimizer """adam""" +1035 72 training_loop """owa""" +1035 72 negative_sampler """basic""" +1035 72 evaluator """rankbased""" +1035 73 dataset """wn18rr""" +1035 73 model """unstructuredmodel""" +1035 73 loss """softplus""" +1035 73 regularizer """no""" +1035 73 optimizer """adam""" +1035 73 training_loop """owa""" +1035 73 negative_sampler """basic""" +1035 73 evaluator """rankbased""" +1035 74 dataset """wn18rr""" +1035 74 model """unstructuredmodel""" +1035 74 loss """softplus""" +1035 74 regularizer """no""" +1035 74 optimizer """adam""" +1035 74 training_loop """owa""" +1035 74 negative_sampler """basic""" +1035 74 evaluator """rankbased""" +1035 75 dataset """wn18rr""" +1035 75 model """unstructuredmodel""" +1035 75 loss """softplus""" +1035 75 regularizer """no""" +1035 75 optimizer """adam""" +1035 75 training_loop """owa""" +1035 75 negative_sampler """basic""" +1035 75 evaluator """rankbased""" +1035 76 dataset """wn18rr""" +1035 76 model """unstructuredmodel""" +1035 76 loss """softplus""" +1035 76 regularizer """no""" +1035 76 optimizer """adam""" +1035 76 training_loop """owa""" +1035 76 negative_sampler """basic""" +1035 76 evaluator """rankbased""" +1035 77 dataset """wn18rr""" +1035 77 model """unstructuredmodel""" +1035 77 loss """softplus""" +1035 77 regularizer """no""" +1035 77 optimizer """adam""" +1035 77 training_loop """owa""" +1035 77 negative_sampler """basic""" +1035 77 evaluator """rankbased""" +1035 78 dataset """wn18rr""" +1035 78 model """unstructuredmodel""" +1035 78 loss """softplus""" +1035 78 regularizer """no""" +1035 78 optimizer """adam""" +1035 78 training_loop """owa""" +1035 78 negative_sampler """basic""" +1035 78 evaluator """rankbased""" +1035 79 dataset """wn18rr""" +1035 79 model """unstructuredmodel""" +1035 79 loss """softplus""" +1035 79 regularizer """no""" +1035 79 optimizer """adam""" +1035 79 training_loop """owa""" +1035 79 negative_sampler """basic""" +1035 79 evaluator """rankbased""" +1035 80 dataset """wn18rr""" +1035 80 model """unstructuredmodel""" +1035 80 loss """softplus""" +1035 80 regularizer """no""" +1035 80 optimizer """adam""" +1035 80 training_loop """owa""" +1035 80 negative_sampler """basic""" +1035 80 evaluator """rankbased""" +1035 81 dataset """wn18rr""" +1035 81 model """unstructuredmodel""" +1035 81 loss """softplus""" +1035 81 regularizer """no""" +1035 81 optimizer """adam""" +1035 81 training_loop """owa""" +1035 81 negative_sampler """basic""" +1035 81 evaluator """rankbased""" +1035 82 dataset """wn18rr""" +1035 82 model """unstructuredmodel""" +1035 82 loss """softplus""" +1035 82 regularizer """no""" +1035 82 optimizer """adam""" +1035 82 training_loop """owa""" +1035 82 negative_sampler """basic""" +1035 82 evaluator """rankbased""" +1035 83 dataset """wn18rr""" +1035 83 model """unstructuredmodel""" +1035 83 loss """softplus""" +1035 83 regularizer """no""" +1035 83 optimizer """adam""" +1035 83 training_loop """owa""" +1035 83 negative_sampler """basic""" +1035 83 evaluator """rankbased""" +1035 84 dataset """wn18rr""" +1035 84 model """unstructuredmodel""" +1035 84 loss """softplus""" +1035 84 regularizer """no""" +1035 84 optimizer """adam""" +1035 84 training_loop """owa""" +1035 84 negative_sampler """basic""" +1035 84 evaluator """rankbased""" +1035 85 dataset """wn18rr""" +1035 85 model """unstructuredmodel""" +1035 85 loss """softplus""" +1035 85 regularizer """no""" +1035 85 optimizer """adam""" +1035 85 training_loop """owa""" +1035 85 negative_sampler """basic""" +1035 85 evaluator """rankbased""" +1035 86 dataset """wn18rr""" +1035 86 model """unstructuredmodel""" +1035 86 loss """softplus""" +1035 86 regularizer """no""" +1035 86 optimizer """adam""" +1035 86 training_loop """owa""" +1035 86 negative_sampler """basic""" +1035 86 evaluator """rankbased""" +1035 87 dataset """wn18rr""" +1035 87 model """unstructuredmodel""" +1035 87 loss """softplus""" +1035 87 regularizer """no""" +1035 87 optimizer """adam""" +1035 87 training_loop """owa""" +1035 87 negative_sampler """basic""" +1035 87 evaluator """rankbased""" +1035 88 dataset """wn18rr""" +1035 88 model """unstructuredmodel""" +1035 88 loss """softplus""" +1035 88 regularizer """no""" +1035 88 optimizer """adam""" +1035 88 training_loop """owa""" +1035 88 negative_sampler """basic""" +1035 88 evaluator """rankbased""" +1035 89 dataset """wn18rr""" +1035 89 model """unstructuredmodel""" +1035 89 loss """softplus""" +1035 89 regularizer """no""" +1035 89 optimizer """adam""" +1035 89 training_loop """owa""" +1035 89 negative_sampler """basic""" +1035 89 evaluator """rankbased""" +1035 90 dataset """wn18rr""" +1035 90 model """unstructuredmodel""" +1035 90 loss """softplus""" +1035 90 regularizer """no""" +1035 90 optimizer """adam""" +1035 90 training_loop """owa""" +1035 90 negative_sampler """basic""" +1035 90 evaluator """rankbased""" +1035 91 dataset """wn18rr""" +1035 91 model """unstructuredmodel""" +1035 91 loss """softplus""" +1035 91 regularizer """no""" +1035 91 optimizer """adam""" +1035 91 training_loop """owa""" +1035 91 negative_sampler """basic""" +1035 91 evaluator """rankbased""" +1035 92 dataset """wn18rr""" +1035 92 model """unstructuredmodel""" +1035 92 loss """softplus""" +1035 92 regularizer """no""" +1035 92 optimizer """adam""" +1035 92 training_loop """owa""" +1035 92 negative_sampler """basic""" +1035 92 evaluator """rankbased""" +1035 93 dataset """wn18rr""" +1035 93 model """unstructuredmodel""" +1035 93 loss """softplus""" +1035 93 regularizer """no""" +1035 93 optimizer """adam""" +1035 93 training_loop """owa""" +1035 93 negative_sampler """basic""" +1035 93 evaluator """rankbased""" +1035 94 dataset """wn18rr""" +1035 94 model """unstructuredmodel""" +1035 94 loss """softplus""" +1035 94 regularizer """no""" +1035 94 optimizer """adam""" +1035 94 training_loop """owa""" +1035 94 negative_sampler """basic""" +1035 94 evaluator """rankbased""" +1035 95 dataset """wn18rr""" +1035 95 model """unstructuredmodel""" +1035 95 loss """softplus""" +1035 95 regularizer """no""" +1035 95 optimizer """adam""" +1035 95 training_loop """owa""" +1035 95 negative_sampler """basic""" +1035 95 evaluator """rankbased""" +1035 96 dataset """wn18rr""" +1035 96 model """unstructuredmodel""" +1035 96 loss """softplus""" +1035 96 regularizer """no""" +1035 96 optimizer """adam""" +1035 96 training_loop """owa""" +1035 96 negative_sampler """basic""" +1035 96 evaluator """rankbased""" +1035 97 dataset """wn18rr""" +1035 97 model """unstructuredmodel""" +1035 97 loss """softplus""" +1035 97 regularizer """no""" +1035 97 optimizer """adam""" +1035 97 training_loop """owa""" +1035 97 negative_sampler """basic""" +1035 97 evaluator """rankbased""" +1035 98 dataset """wn18rr""" +1035 98 model """unstructuredmodel""" +1035 98 loss """softplus""" +1035 98 regularizer """no""" +1035 98 optimizer """adam""" +1035 98 training_loop """owa""" +1035 98 negative_sampler """basic""" +1035 98 evaluator """rankbased""" +1035 99 dataset """wn18rr""" +1035 99 model """unstructuredmodel""" +1035 99 loss """softplus""" +1035 99 regularizer """no""" +1035 99 optimizer """adam""" +1035 99 training_loop """owa""" +1035 99 negative_sampler """basic""" +1035 99 evaluator """rankbased""" +1035 100 dataset """wn18rr""" +1035 100 model """unstructuredmodel""" +1035 100 loss """softplus""" +1035 100 regularizer """no""" +1035 100 optimizer """adam""" +1035 100 training_loop """owa""" +1035 100 negative_sampler """basic""" +1035 100 evaluator """rankbased""" +1036 1 model.embedding_dim 1.0 +1036 1 model.scoring_fct_norm 1.0 +1036 1 optimizer.lr 0.011824669481361941 +1036 1 negative_sampler.num_negs_per_pos 68.0 +1036 1 training.batch_size 1.0 +1036 2 model.embedding_dim 0.0 +1036 2 model.scoring_fct_norm 1.0 +1036 2 optimizer.lr 0.001028850512593234 +1036 2 negative_sampler.num_negs_per_pos 22.0 +1036 2 training.batch_size 0.0 +1036 3 model.embedding_dim 2.0 +1036 3 model.scoring_fct_norm 2.0 +1036 3 optimizer.lr 0.01585270330892662 +1036 3 negative_sampler.num_negs_per_pos 46.0 +1036 3 training.batch_size 1.0 +1036 4 model.embedding_dim 2.0 +1036 4 model.scoring_fct_norm 1.0 +1036 4 optimizer.lr 0.011492292079722494 +1036 4 negative_sampler.num_negs_per_pos 86.0 +1036 4 training.batch_size 2.0 +1036 5 model.embedding_dim 2.0 +1036 5 model.scoring_fct_norm 2.0 +1036 5 optimizer.lr 0.007844524610077525 +1036 5 negative_sampler.num_negs_per_pos 59.0 +1036 5 training.batch_size 0.0 +1036 6 model.embedding_dim 0.0 +1036 6 model.scoring_fct_norm 2.0 +1036 6 optimizer.lr 0.03793667074381844 +1036 6 negative_sampler.num_negs_per_pos 87.0 +1036 6 training.batch_size 1.0 +1036 7 model.embedding_dim 0.0 +1036 7 model.scoring_fct_norm 1.0 +1036 7 optimizer.lr 0.018888141618919767 +1036 7 negative_sampler.num_negs_per_pos 16.0 +1036 7 training.batch_size 0.0 +1036 8 model.embedding_dim 1.0 +1036 8 model.scoring_fct_norm 1.0 +1036 8 optimizer.lr 0.02078625875809931 +1036 8 negative_sampler.num_negs_per_pos 60.0 +1036 8 training.batch_size 2.0 +1036 9 model.embedding_dim 1.0 +1036 9 model.scoring_fct_norm 1.0 +1036 9 optimizer.lr 0.05841895760944403 +1036 9 negative_sampler.num_negs_per_pos 75.0 +1036 9 training.batch_size 0.0 +1036 10 model.embedding_dim 0.0 +1036 10 model.scoring_fct_norm 2.0 +1036 10 optimizer.lr 0.028512116223849873 +1036 10 negative_sampler.num_negs_per_pos 32.0 +1036 10 training.batch_size 2.0 +1036 11 model.embedding_dim 0.0 +1036 11 model.scoring_fct_norm 1.0 +1036 11 optimizer.lr 0.008570183152148409 +1036 11 negative_sampler.num_negs_per_pos 86.0 +1036 11 training.batch_size 0.0 +1036 12 model.embedding_dim 2.0 +1036 12 model.scoring_fct_norm 1.0 +1036 12 optimizer.lr 0.09203753804027512 +1036 12 negative_sampler.num_negs_per_pos 36.0 +1036 12 training.batch_size 0.0 +1036 13 model.embedding_dim 0.0 +1036 13 model.scoring_fct_norm 2.0 +1036 13 optimizer.lr 0.002393772698399401 +1036 13 negative_sampler.num_negs_per_pos 9.0 +1036 13 training.batch_size 1.0 +1036 14 model.embedding_dim 1.0 +1036 14 model.scoring_fct_norm 2.0 +1036 14 optimizer.lr 0.028818072290579047 +1036 14 negative_sampler.num_negs_per_pos 60.0 +1036 14 training.batch_size 0.0 +1036 15 model.embedding_dim 2.0 +1036 15 model.scoring_fct_norm 1.0 +1036 15 optimizer.lr 0.05911272740531313 +1036 15 negative_sampler.num_negs_per_pos 28.0 +1036 15 training.batch_size 0.0 +1036 16 model.embedding_dim 2.0 +1036 16 model.scoring_fct_norm 2.0 +1036 16 optimizer.lr 0.011373457252890455 +1036 16 negative_sampler.num_negs_per_pos 7.0 +1036 16 training.batch_size 0.0 +1036 17 model.embedding_dim 0.0 +1036 17 model.scoring_fct_norm 1.0 +1036 17 optimizer.lr 0.00635487787989132 +1036 17 negative_sampler.num_negs_per_pos 99.0 +1036 17 training.batch_size 0.0 +1036 18 model.embedding_dim 0.0 +1036 18 model.scoring_fct_norm 2.0 +1036 18 optimizer.lr 0.0011458334085743202 +1036 18 negative_sampler.num_negs_per_pos 96.0 +1036 18 training.batch_size 2.0 +1036 19 model.embedding_dim 1.0 +1036 19 model.scoring_fct_norm 1.0 +1036 19 optimizer.lr 0.0018273352655454162 +1036 19 negative_sampler.num_negs_per_pos 68.0 +1036 19 training.batch_size 0.0 +1036 20 model.embedding_dim 1.0 +1036 20 model.scoring_fct_norm 2.0 +1036 20 optimizer.lr 0.008865128858615408 +1036 20 negative_sampler.num_negs_per_pos 94.0 +1036 20 training.batch_size 2.0 +1036 21 model.embedding_dim 0.0 +1036 21 model.scoring_fct_norm 2.0 +1036 21 optimizer.lr 0.006304310794163422 +1036 21 negative_sampler.num_negs_per_pos 32.0 +1036 21 training.batch_size 0.0 +1036 22 model.embedding_dim 0.0 +1036 22 model.scoring_fct_norm 2.0 +1036 22 optimizer.lr 0.001225852383410165 +1036 22 negative_sampler.num_negs_per_pos 35.0 +1036 22 training.batch_size 1.0 +1036 23 model.embedding_dim 0.0 +1036 23 model.scoring_fct_norm 2.0 +1036 23 optimizer.lr 0.0017767848279971405 +1036 23 negative_sampler.num_negs_per_pos 5.0 +1036 23 training.batch_size 0.0 +1036 24 model.embedding_dim 2.0 +1036 24 model.scoring_fct_norm 1.0 +1036 24 optimizer.lr 0.0038658471502708547 +1036 24 negative_sampler.num_negs_per_pos 49.0 +1036 24 training.batch_size 1.0 +1036 25 model.embedding_dim 0.0 +1036 25 model.scoring_fct_norm 2.0 +1036 25 optimizer.lr 0.00274793734688588 +1036 25 negative_sampler.num_negs_per_pos 67.0 +1036 25 training.batch_size 2.0 +1036 26 model.embedding_dim 1.0 +1036 26 model.scoring_fct_norm 2.0 +1036 26 optimizer.lr 0.031109733298122114 +1036 26 negative_sampler.num_negs_per_pos 79.0 +1036 26 training.batch_size 0.0 +1036 27 model.embedding_dim 1.0 +1036 27 model.scoring_fct_norm 1.0 +1036 27 optimizer.lr 0.0026822302212261714 +1036 27 negative_sampler.num_negs_per_pos 28.0 +1036 27 training.batch_size 2.0 +1036 28 model.embedding_dim 0.0 +1036 28 model.scoring_fct_norm 2.0 +1036 28 optimizer.lr 0.021348927358409795 +1036 28 negative_sampler.num_negs_per_pos 40.0 +1036 28 training.batch_size 2.0 +1036 29 model.embedding_dim 1.0 +1036 29 model.scoring_fct_norm 1.0 +1036 29 optimizer.lr 0.005319985022964069 +1036 29 negative_sampler.num_negs_per_pos 6.0 +1036 29 training.batch_size 1.0 +1036 30 model.embedding_dim 2.0 +1036 30 model.scoring_fct_norm 2.0 +1036 30 optimizer.lr 0.005916976573124153 +1036 30 negative_sampler.num_negs_per_pos 37.0 +1036 30 training.batch_size 1.0 +1036 31 model.embedding_dim 1.0 +1036 31 model.scoring_fct_norm 1.0 +1036 31 optimizer.lr 0.001375161175367074 +1036 31 negative_sampler.num_negs_per_pos 39.0 +1036 31 training.batch_size 0.0 +1036 32 model.embedding_dim 2.0 +1036 32 model.scoring_fct_norm 2.0 +1036 32 optimizer.lr 0.003425639021005735 +1036 32 negative_sampler.num_negs_per_pos 75.0 +1036 32 training.batch_size 2.0 +1036 33 model.embedding_dim 2.0 +1036 33 model.scoring_fct_norm 1.0 +1036 33 optimizer.lr 0.007698146707920607 +1036 33 negative_sampler.num_negs_per_pos 23.0 +1036 33 training.batch_size 2.0 +1036 34 model.embedding_dim 2.0 +1036 34 model.scoring_fct_norm 1.0 +1036 34 optimizer.lr 0.01567613794789651 +1036 34 negative_sampler.num_negs_per_pos 82.0 +1036 34 training.batch_size 1.0 +1036 35 model.embedding_dim 2.0 +1036 35 model.scoring_fct_norm 2.0 +1036 35 optimizer.lr 0.03877540766204587 +1036 35 negative_sampler.num_negs_per_pos 78.0 +1036 35 training.batch_size 2.0 +1036 36 model.embedding_dim 0.0 +1036 36 model.scoring_fct_norm 2.0 +1036 36 optimizer.lr 0.010679108069502043 +1036 36 negative_sampler.num_negs_per_pos 41.0 +1036 36 training.batch_size 1.0 +1036 37 model.embedding_dim 0.0 +1036 37 model.scoring_fct_norm 2.0 +1036 37 optimizer.lr 0.0026666002974712762 +1036 37 negative_sampler.num_negs_per_pos 24.0 +1036 37 training.batch_size 2.0 +1036 38 model.embedding_dim 1.0 +1036 38 model.scoring_fct_norm 1.0 +1036 38 optimizer.lr 0.008999999164968723 +1036 38 negative_sampler.num_negs_per_pos 9.0 +1036 38 training.batch_size 2.0 +1036 39 model.embedding_dim 0.0 +1036 39 model.scoring_fct_norm 2.0 +1036 39 optimizer.lr 0.06226471273942769 +1036 39 negative_sampler.num_negs_per_pos 91.0 +1036 39 training.batch_size 2.0 +1036 40 model.embedding_dim 0.0 +1036 40 model.scoring_fct_norm 2.0 +1036 40 optimizer.lr 0.008498513666887109 +1036 40 negative_sampler.num_negs_per_pos 44.0 +1036 40 training.batch_size 2.0 +1036 41 model.embedding_dim 2.0 +1036 41 model.scoring_fct_norm 1.0 +1036 41 optimizer.lr 0.048465823957256 +1036 41 negative_sampler.num_negs_per_pos 37.0 +1036 41 training.batch_size 2.0 +1036 42 model.embedding_dim 1.0 +1036 42 model.scoring_fct_norm 1.0 +1036 42 optimizer.lr 0.05119090114774917 +1036 42 negative_sampler.num_negs_per_pos 91.0 +1036 42 training.batch_size 2.0 +1036 43 model.embedding_dim 0.0 +1036 43 model.scoring_fct_norm 1.0 +1036 43 optimizer.lr 0.028607701328237346 +1036 43 negative_sampler.num_negs_per_pos 66.0 +1036 43 training.batch_size 2.0 +1036 44 model.embedding_dim 0.0 +1036 44 model.scoring_fct_norm 1.0 +1036 44 optimizer.lr 0.00245604080988639 +1036 44 negative_sampler.num_negs_per_pos 18.0 +1036 44 training.batch_size 1.0 +1036 45 model.embedding_dim 2.0 +1036 45 model.scoring_fct_norm 1.0 +1036 45 optimizer.lr 0.0764387287983095 +1036 45 negative_sampler.num_negs_per_pos 51.0 +1036 45 training.batch_size 1.0 +1036 46 model.embedding_dim 1.0 +1036 46 model.scoring_fct_norm 1.0 +1036 46 optimizer.lr 0.056074350568796144 +1036 46 negative_sampler.num_negs_per_pos 44.0 +1036 46 training.batch_size 0.0 +1036 47 model.embedding_dim 1.0 +1036 47 model.scoring_fct_norm 2.0 +1036 47 optimizer.lr 0.00701025338611896 +1036 47 negative_sampler.num_negs_per_pos 88.0 +1036 47 training.batch_size 0.0 +1036 48 model.embedding_dim 2.0 +1036 48 model.scoring_fct_norm 1.0 +1036 48 optimizer.lr 0.0016022803832856332 +1036 48 negative_sampler.num_negs_per_pos 22.0 +1036 48 training.batch_size 2.0 +1036 49 model.embedding_dim 0.0 +1036 49 model.scoring_fct_norm 2.0 +1036 49 optimizer.lr 0.08665780901462433 +1036 49 negative_sampler.num_negs_per_pos 60.0 +1036 49 training.batch_size 0.0 +1036 50 model.embedding_dim 0.0 +1036 50 model.scoring_fct_norm 1.0 +1036 50 optimizer.lr 0.0013578384009551143 +1036 50 negative_sampler.num_negs_per_pos 14.0 +1036 50 training.batch_size 1.0 +1036 51 model.embedding_dim 2.0 +1036 51 model.scoring_fct_norm 2.0 +1036 51 optimizer.lr 0.0050578975733989225 +1036 51 negative_sampler.num_negs_per_pos 59.0 +1036 51 training.batch_size 2.0 +1036 52 model.embedding_dim 1.0 +1036 52 model.scoring_fct_norm 2.0 +1036 52 optimizer.lr 0.015116942666457958 +1036 52 negative_sampler.num_negs_per_pos 93.0 +1036 52 training.batch_size 1.0 +1036 53 model.embedding_dim 1.0 +1036 53 model.scoring_fct_norm 2.0 +1036 53 optimizer.lr 0.09255290527711818 +1036 53 negative_sampler.num_negs_per_pos 71.0 +1036 53 training.batch_size 2.0 +1036 54 model.embedding_dim 2.0 +1036 54 model.scoring_fct_norm 1.0 +1036 54 optimizer.lr 0.006724602905035645 +1036 54 negative_sampler.num_negs_per_pos 58.0 +1036 54 training.batch_size 1.0 +1036 55 model.embedding_dim 1.0 +1036 55 model.scoring_fct_norm 2.0 +1036 55 optimizer.lr 0.00646766682944935 +1036 55 negative_sampler.num_negs_per_pos 91.0 +1036 55 training.batch_size 2.0 +1036 56 model.embedding_dim 1.0 +1036 56 model.scoring_fct_norm 1.0 +1036 56 optimizer.lr 0.03423574005735411 +1036 56 negative_sampler.num_negs_per_pos 29.0 +1036 56 training.batch_size 1.0 +1036 57 model.embedding_dim 1.0 +1036 57 model.scoring_fct_norm 1.0 +1036 57 optimizer.lr 0.020988133466698543 +1036 57 negative_sampler.num_negs_per_pos 5.0 +1036 57 training.batch_size 0.0 +1036 58 model.embedding_dim 1.0 +1036 58 model.scoring_fct_norm 1.0 +1036 58 optimizer.lr 0.03468829644220838 +1036 58 negative_sampler.num_negs_per_pos 77.0 +1036 58 training.batch_size 0.0 +1036 59 model.embedding_dim 0.0 +1036 59 model.scoring_fct_norm 2.0 +1036 59 optimizer.lr 0.002341254705853244 +1036 59 negative_sampler.num_negs_per_pos 38.0 +1036 59 training.batch_size 2.0 +1036 60 model.embedding_dim 0.0 +1036 60 model.scoring_fct_norm 2.0 +1036 60 optimizer.lr 0.01763245589306343 +1036 60 negative_sampler.num_negs_per_pos 47.0 +1036 60 training.batch_size 0.0 +1036 61 model.embedding_dim 2.0 +1036 61 model.scoring_fct_norm 2.0 +1036 61 optimizer.lr 0.03934245150426207 +1036 61 negative_sampler.num_negs_per_pos 20.0 +1036 61 training.batch_size 0.0 +1036 62 model.embedding_dim 0.0 +1036 62 model.scoring_fct_norm 1.0 +1036 62 optimizer.lr 0.003212415788191394 +1036 62 negative_sampler.num_negs_per_pos 4.0 +1036 62 training.batch_size 2.0 +1036 63 model.embedding_dim 2.0 +1036 63 model.scoring_fct_norm 2.0 +1036 63 optimizer.lr 0.03248247866271892 +1036 63 negative_sampler.num_negs_per_pos 81.0 +1036 63 training.batch_size 2.0 +1036 64 model.embedding_dim 1.0 +1036 64 model.scoring_fct_norm 1.0 +1036 64 optimizer.lr 0.006722180631045757 +1036 64 negative_sampler.num_negs_per_pos 76.0 +1036 64 training.batch_size 1.0 +1036 65 model.embedding_dim 2.0 +1036 65 model.scoring_fct_norm 1.0 +1036 65 optimizer.lr 0.058755907987542204 +1036 65 negative_sampler.num_negs_per_pos 35.0 +1036 65 training.batch_size 0.0 +1036 66 model.embedding_dim 2.0 +1036 66 model.scoring_fct_norm 2.0 +1036 66 optimizer.lr 0.030840877842550893 +1036 66 negative_sampler.num_negs_per_pos 91.0 +1036 66 training.batch_size 0.0 +1036 67 model.embedding_dim 0.0 +1036 67 model.scoring_fct_norm 2.0 +1036 67 optimizer.lr 0.02639016915151888 +1036 67 negative_sampler.num_negs_per_pos 22.0 +1036 67 training.batch_size 1.0 +1036 68 model.embedding_dim 1.0 +1036 68 model.scoring_fct_norm 2.0 +1036 68 optimizer.lr 0.03293323821931814 +1036 68 negative_sampler.num_negs_per_pos 33.0 +1036 68 training.batch_size 1.0 +1036 69 model.embedding_dim 2.0 +1036 69 model.scoring_fct_norm 2.0 +1036 69 optimizer.lr 0.025533494222452113 +1036 69 negative_sampler.num_negs_per_pos 28.0 +1036 69 training.batch_size 2.0 +1036 70 model.embedding_dim 0.0 +1036 70 model.scoring_fct_norm 2.0 +1036 70 optimizer.lr 0.010689517137509992 +1036 70 negative_sampler.num_negs_per_pos 67.0 +1036 70 training.batch_size 2.0 +1036 71 model.embedding_dim 0.0 +1036 71 model.scoring_fct_norm 1.0 +1036 71 optimizer.lr 0.00451773250959013 +1036 71 negative_sampler.num_negs_per_pos 33.0 +1036 71 training.batch_size 0.0 +1036 72 model.embedding_dim 1.0 +1036 72 model.scoring_fct_norm 2.0 +1036 72 optimizer.lr 0.023996086734881298 +1036 72 negative_sampler.num_negs_per_pos 78.0 +1036 72 training.batch_size 1.0 +1036 73 model.embedding_dim 0.0 +1036 73 model.scoring_fct_norm 1.0 +1036 73 optimizer.lr 0.006489128304184635 +1036 73 negative_sampler.num_negs_per_pos 2.0 +1036 73 training.batch_size 0.0 +1036 74 model.embedding_dim 0.0 +1036 74 model.scoring_fct_norm 2.0 +1036 74 optimizer.lr 0.004931920559257883 +1036 74 negative_sampler.num_negs_per_pos 54.0 +1036 74 training.batch_size 0.0 +1036 75 model.embedding_dim 1.0 +1036 75 model.scoring_fct_norm 2.0 +1036 75 optimizer.lr 0.007006012475829935 +1036 75 negative_sampler.num_negs_per_pos 41.0 +1036 75 training.batch_size 1.0 +1036 76 model.embedding_dim 2.0 +1036 76 model.scoring_fct_norm 2.0 +1036 76 optimizer.lr 0.0015749064127813213 +1036 76 negative_sampler.num_negs_per_pos 38.0 +1036 76 training.batch_size 2.0 +1036 77 model.embedding_dim 2.0 +1036 77 model.scoring_fct_norm 1.0 +1036 77 optimizer.lr 0.012877955967815899 +1036 77 negative_sampler.num_negs_per_pos 78.0 +1036 77 training.batch_size 2.0 +1036 78 model.embedding_dim 2.0 +1036 78 model.scoring_fct_norm 2.0 +1036 78 optimizer.lr 0.0024784522997247315 +1036 78 negative_sampler.num_negs_per_pos 55.0 +1036 78 training.batch_size 0.0 +1036 79 model.embedding_dim 2.0 +1036 79 model.scoring_fct_norm 2.0 +1036 79 optimizer.lr 0.001737241914908088 +1036 79 negative_sampler.num_negs_per_pos 50.0 +1036 79 training.batch_size 2.0 +1036 80 model.embedding_dim 1.0 +1036 80 model.scoring_fct_norm 2.0 +1036 80 optimizer.lr 0.0023441180776206153 +1036 80 negative_sampler.num_negs_per_pos 36.0 +1036 80 training.batch_size 2.0 +1036 81 model.embedding_dim 1.0 +1036 81 model.scoring_fct_norm 2.0 +1036 81 optimizer.lr 0.0011491791974039514 +1036 81 negative_sampler.num_negs_per_pos 67.0 +1036 81 training.batch_size 2.0 +1036 82 model.embedding_dim 1.0 +1036 82 model.scoring_fct_norm 2.0 +1036 82 optimizer.lr 0.0040153055917528276 +1036 82 negative_sampler.num_negs_per_pos 92.0 +1036 82 training.batch_size 0.0 +1036 83 model.embedding_dim 0.0 +1036 83 model.scoring_fct_norm 2.0 +1036 83 optimizer.lr 0.051253711277184595 +1036 83 negative_sampler.num_negs_per_pos 62.0 +1036 83 training.batch_size 0.0 +1036 84 model.embedding_dim 2.0 +1036 84 model.scoring_fct_norm 1.0 +1036 84 optimizer.lr 0.0026406169355566705 +1036 84 negative_sampler.num_negs_per_pos 93.0 +1036 84 training.batch_size 2.0 +1036 85 model.embedding_dim 0.0 +1036 85 model.scoring_fct_norm 2.0 +1036 85 optimizer.lr 0.005946291753715887 +1036 85 negative_sampler.num_negs_per_pos 57.0 +1036 85 training.batch_size 1.0 +1036 86 model.embedding_dim 0.0 +1036 86 model.scoring_fct_norm 2.0 +1036 86 optimizer.lr 0.02255803976596731 +1036 86 negative_sampler.num_negs_per_pos 4.0 +1036 86 training.batch_size 2.0 +1036 87 model.embedding_dim 2.0 +1036 87 model.scoring_fct_norm 2.0 +1036 87 optimizer.lr 0.00968427444335225 +1036 87 negative_sampler.num_negs_per_pos 57.0 +1036 87 training.batch_size 1.0 +1036 88 model.embedding_dim 1.0 +1036 88 model.scoring_fct_norm 1.0 +1036 88 optimizer.lr 0.003819209241411661 +1036 88 negative_sampler.num_negs_per_pos 44.0 +1036 88 training.batch_size 2.0 +1036 89 model.embedding_dim 2.0 +1036 89 model.scoring_fct_norm 2.0 +1036 89 optimizer.lr 0.016167692935121065 +1036 89 negative_sampler.num_negs_per_pos 78.0 +1036 89 training.batch_size 1.0 +1036 90 model.embedding_dim 2.0 +1036 90 model.scoring_fct_norm 2.0 +1036 90 optimizer.lr 0.07779794400080835 +1036 90 negative_sampler.num_negs_per_pos 28.0 +1036 90 training.batch_size 1.0 +1036 91 model.embedding_dim 2.0 +1036 91 model.scoring_fct_norm 2.0 +1036 91 optimizer.lr 0.001391394821297714 +1036 91 negative_sampler.num_negs_per_pos 30.0 +1036 91 training.batch_size 2.0 +1036 92 model.embedding_dim 1.0 +1036 92 model.scoring_fct_norm 2.0 +1036 92 optimizer.lr 0.02841851966492398 +1036 92 negative_sampler.num_negs_per_pos 41.0 +1036 92 training.batch_size 2.0 +1036 93 model.embedding_dim 1.0 +1036 93 model.scoring_fct_norm 1.0 +1036 93 optimizer.lr 0.05602772308587794 +1036 93 negative_sampler.num_negs_per_pos 53.0 +1036 93 training.batch_size 1.0 +1036 94 model.embedding_dim 2.0 +1036 94 model.scoring_fct_norm 1.0 +1036 94 optimizer.lr 0.0037107489024853215 +1036 94 negative_sampler.num_negs_per_pos 80.0 +1036 94 training.batch_size 0.0 +1036 95 model.embedding_dim 2.0 +1036 95 model.scoring_fct_norm 1.0 +1036 95 optimizer.lr 0.009388132413046662 +1036 95 negative_sampler.num_negs_per_pos 74.0 +1036 95 training.batch_size 2.0 +1036 96 model.embedding_dim 0.0 +1036 96 model.scoring_fct_norm 2.0 +1036 96 optimizer.lr 0.003091739605802511 +1036 96 negative_sampler.num_negs_per_pos 82.0 +1036 96 training.batch_size 0.0 +1036 97 model.embedding_dim 2.0 +1036 97 model.scoring_fct_norm 1.0 +1036 97 optimizer.lr 0.013767040039819448 +1036 97 negative_sampler.num_negs_per_pos 21.0 +1036 97 training.batch_size 1.0 +1036 98 model.embedding_dim 2.0 +1036 98 model.scoring_fct_norm 1.0 +1036 98 optimizer.lr 0.01658425416204804 +1036 98 negative_sampler.num_negs_per_pos 76.0 +1036 98 training.batch_size 1.0 +1036 99 model.embedding_dim 0.0 +1036 99 model.scoring_fct_norm 1.0 +1036 99 optimizer.lr 0.013100932775855707 +1036 99 negative_sampler.num_negs_per_pos 96.0 +1036 99 training.batch_size 0.0 +1036 100 model.embedding_dim 1.0 +1036 100 model.scoring_fct_norm 2.0 +1036 100 optimizer.lr 0.005727630092416809 +1036 100 negative_sampler.num_negs_per_pos 71.0 +1036 100 training.batch_size 1.0 +1036 1 dataset """wn18rr""" +1036 1 model """unstructuredmodel""" +1036 1 loss """bceaftersigmoid""" +1036 1 regularizer """no""" +1036 1 optimizer """adam""" +1036 1 training_loop """owa""" +1036 1 negative_sampler """basic""" +1036 1 evaluator """rankbased""" +1036 2 dataset """wn18rr""" +1036 2 model """unstructuredmodel""" +1036 2 loss """bceaftersigmoid""" +1036 2 regularizer """no""" +1036 2 optimizer """adam""" +1036 2 training_loop """owa""" +1036 2 negative_sampler """basic""" +1036 2 evaluator """rankbased""" +1036 3 dataset """wn18rr""" +1036 3 model """unstructuredmodel""" +1036 3 loss """bceaftersigmoid""" +1036 3 regularizer """no""" +1036 3 optimizer """adam""" +1036 3 training_loop """owa""" +1036 3 negative_sampler """basic""" +1036 3 evaluator """rankbased""" +1036 4 dataset """wn18rr""" +1036 4 model """unstructuredmodel""" +1036 4 loss """bceaftersigmoid""" +1036 4 regularizer """no""" +1036 4 optimizer """adam""" +1036 4 training_loop """owa""" +1036 4 negative_sampler """basic""" +1036 4 evaluator """rankbased""" +1036 5 dataset """wn18rr""" +1036 5 model """unstructuredmodel""" +1036 5 loss """bceaftersigmoid""" +1036 5 regularizer """no""" +1036 5 optimizer """adam""" +1036 5 training_loop """owa""" +1036 5 negative_sampler """basic""" +1036 5 evaluator """rankbased""" +1036 6 dataset """wn18rr""" +1036 6 model """unstructuredmodel""" +1036 6 loss """bceaftersigmoid""" +1036 6 regularizer """no""" +1036 6 optimizer """adam""" +1036 6 training_loop """owa""" +1036 6 negative_sampler """basic""" +1036 6 evaluator """rankbased""" +1036 7 dataset """wn18rr""" +1036 7 model """unstructuredmodel""" +1036 7 loss """bceaftersigmoid""" +1036 7 regularizer """no""" +1036 7 optimizer """adam""" +1036 7 training_loop """owa""" +1036 7 negative_sampler """basic""" +1036 7 evaluator """rankbased""" +1036 8 dataset """wn18rr""" +1036 8 model """unstructuredmodel""" +1036 8 loss """bceaftersigmoid""" +1036 8 regularizer """no""" +1036 8 optimizer """adam""" +1036 8 training_loop """owa""" +1036 8 negative_sampler """basic""" +1036 8 evaluator """rankbased""" +1036 9 dataset """wn18rr""" +1036 9 model """unstructuredmodel""" +1036 9 loss """bceaftersigmoid""" +1036 9 regularizer """no""" +1036 9 optimizer """adam""" +1036 9 training_loop """owa""" +1036 9 negative_sampler """basic""" +1036 9 evaluator """rankbased""" +1036 10 dataset """wn18rr""" +1036 10 model """unstructuredmodel""" +1036 10 loss """bceaftersigmoid""" +1036 10 regularizer """no""" +1036 10 optimizer """adam""" +1036 10 training_loop """owa""" +1036 10 negative_sampler """basic""" +1036 10 evaluator """rankbased""" +1036 11 dataset """wn18rr""" +1036 11 model """unstructuredmodel""" +1036 11 loss """bceaftersigmoid""" +1036 11 regularizer """no""" +1036 11 optimizer """adam""" +1036 11 training_loop """owa""" +1036 11 negative_sampler """basic""" +1036 11 evaluator """rankbased""" +1036 12 dataset """wn18rr""" +1036 12 model """unstructuredmodel""" +1036 12 loss """bceaftersigmoid""" +1036 12 regularizer """no""" +1036 12 optimizer """adam""" +1036 12 training_loop """owa""" +1036 12 negative_sampler """basic""" +1036 12 evaluator """rankbased""" +1036 13 dataset """wn18rr""" +1036 13 model """unstructuredmodel""" +1036 13 loss """bceaftersigmoid""" +1036 13 regularizer """no""" +1036 13 optimizer """adam""" +1036 13 training_loop """owa""" +1036 13 negative_sampler """basic""" +1036 13 evaluator """rankbased""" +1036 14 dataset """wn18rr""" +1036 14 model """unstructuredmodel""" +1036 14 loss """bceaftersigmoid""" +1036 14 regularizer """no""" +1036 14 optimizer """adam""" +1036 14 training_loop """owa""" +1036 14 negative_sampler """basic""" +1036 14 evaluator """rankbased""" +1036 15 dataset """wn18rr""" +1036 15 model """unstructuredmodel""" +1036 15 loss """bceaftersigmoid""" +1036 15 regularizer """no""" +1036 15 optimizer """adam""" +1036 15 training_loop """owa""" +1036 15 negative_sampler """basic""" +1036 15 evaluator """rankbased""" +1036 16 dataset """wn18rr""" +1036 16 model """unstructuredmodel""" +1036 16 loss """bceaftersigmoid""" +1036 16 regularizer """no""" +1036 16 optimizer """adam""" +1036 16 training_loop """owa""" +1036 16 negative_sampler """basic""" +1036 16 evaluator """rankbased""" +1036 17 dataset """wn18rr""" +1036 17 model """unstructuredmodel""" +1036 17 loss """bceaftersigmoid""" +1036 17 regularizer """no""" +1036 17 optimizer """adam""" +1036 17 training_loop """owa""" +1036 17 negative_sampler """basic""" +1036 17 evaluator """rankbased""" +1036 18 dataset """wn18rr""" +1036 18 model """unstructuredmodel""" +1036 18 loss """bceaftersigmoid""" +1036 18 regularizer """no""" +1036 18 optimizer """adam""" +1036 18 training_loop """owa""" +1036 18 negative_sampler """basic""" +1036 18 evaluator """rankbased""" +1036 19 dataset """wn18rr""" +1036 19 model """unstructuredmodel""" +1036 19 loss """bceaftersigmoid""" +1036 19 regularizer """no""" +1036 19 optimizer """adam""" +1036 19 training_loop """owa""" +1036 19 negative_sampler """basic""" +1036 19 evaluator """rankbased""" +1036 20 dataset """wn18rr""" +1036 20 model """unstructuredmodel""" +1036 20 loss """bceaftersigmoid""" +1036 20 regularizer """no""" +1036 20 optimizer """adam""" +1036 20 training_loop """owa""" +1036 20 negative_sampler """basic""" +1036 20 evaluator """rankbased""" +1036 21 dataset """wn18rr""" +1036 21 model """unstructuredmodel""" +1036 21 loss """bceaftersigmoid""" +1036 21 regularizer """no""" +1036 21 optimizer """adam""" +1036 21 training_loop """owa""" +1036 21 negative_sampler """basic""" +1036 21 evaluator """rankbased""" +1036 22 dataset """wn18rr""" +1036 22 model """unstructuredmodel""" +1036 22 loss """bceaftersigmoid""" +1036 22 regularizer """no""" +1036 22 optimizer """adam""" +1036 22 training_loop """owa""" +1036 22 negative_sampler """basic""" +1036 22 evaluator """rankbased""" +1036 23 dataset """wn18rr""" +1036 23 model """unstructuredmodel""" +1036 23 loss """bceaftersigmoid""" +1036 23 regularizer """no""" +1036 23 optimizer """adam""" +1036 23 training_loop """owa""" +1036 23 negative_sampler """basic""" +1036 23 evaluator """rankbased""" +1036 24 dataset """wn18rr""" +1036 24 model """unstructuredmodel""" +1036 24 loss """bceaftersigmoid""" +1036 24 regularizer """no""" +1036 24 optimizer """adam""" +1036 24 training_loop """owa""" +1036 24 negative_sampler """basic""" +1036 24 evaluator """rankbased""" +1036 25 dataset """wn18rr""" +1036 25 model """unstructuredmodel""" +1036 25 loss """bceaftersigmoid""" +1036 25 regularizer """no""" +1036 25 optimizer """adam""" +1036 25 training_loop """owa""" +1036 25 negative_sampler """basic""" +1036 25 evaluator """rankbased""" +1036 26 dataset """wn18rr""" +1036 26 model """unstructuredmodel""" +1036 26 loss """bceaftersigmoid""" +1036 26 regularizer """no""" +1036 26 optimizer """adam""" +1036 26 training_loop """owa""" +1036 26 negative_sampler """basic""" +1036 26 evaluator """rankbased""" +1036 27 dataset """wn18rr""" +1036 27 model """unstructuredmodel""" +1036 27 loss """bceaftersigmoid""" +1036 27 regularizer """no""" +1036 27 optimizer """adam""" +1036 27 training_loop """owa""" +1036 27 negative_sampler """basic""" +1036 27 evaluator """rankbased""" +1036 28 dataset """wn18rr""" +1036 28 model """unstructuredmodel""" +1036 28 loss """bceaftersigmoid""" +1036 28 regularizer """no""" +1036 28 optimizer """adam""" +1036 28 training_loop """owa""" +1036 28 negative_sampler """basic""" +1036 28 evaluator """rankbased""" +1036 29 dataset """wn18rr""" +1036 29 model """unstructuredmodel""" +1036 29 loss """bceaftersigmoid""" +1036 29 regularizer """no""" +1036 29 optimizer """adam""" +1036 29 training_loop """owa""" +1036 29 negative_sampler """basic""" +1036 29 evaluator """rankbased""" +1036 30 dataset """wn18rr""" +1036 30 model """unstructuredmodel""" +1036 30 loss """bceaftersigmoid""" +1036 30 regularizer """no""" +1036 30 optimizer """adam""" +1036 30 training_loop """owa""" +1036 30 negative_sampler """basic""" +1036 30 evaluator """rankbased""" +1036 31 dataset """wn18rr""" +1036 31 model """unstructuredmodel""" +1036 31 loss """bceaftersigmoid""" +1036 31 regularizer """no""" +1036 31 optimizer """adam""" +1036 31 training_loop """owa""" +1036 31 negative_sampler """basic""" +1036 31 evaluator """rankbased""" +1036 32 dataset """wn18rr""" +1036 32 model """unstructuredmodel""" +1036 32 loss """bceaftersigmoid""" +1036 32 regularizer """no""" +1036 32 optimizer """adam""" +1036 32 training_loop """owa""" +1036 32 negative_sampler """basic""" +1036 32 evaluator """rankbased""" +1036 33 dataset """wn18rr""" +1036 33 model """unstructuredmodel""" +1036 33 loss """bceaftersigmoid""" +1036 33 regularizer """no""" +1036 33 optimizer """adam""" +1036 33 training_loop """owa""" +1036 33 negative_sampler """basic""" +1036 33 evaluator """rankbased""" +1036 34 dataset """wn18rr""" +1036 34 model """unstructuredmodel""" +1036 34 loss """bceaftersigmoid""" +1036 34 regularizer """no""" +1036 34 optimizer """adam""" +1036 34 training_loop """owa""" +1036 34 negative_sampler """basic""" +1036 34 evaluator """rankbased""" +1036 35 dataset """wn18rr""" +1036 35 model """unstructuredmodel""" +1036 35 loss """bceaftersigmoid""" +1036 35 regularizer """no""" +1036 35 optimizer """adam""" +1036 35 training_loop """owa""" +1036 35 negative_sampler """basic""" +1036 35 evaluator """rankbased""" +1036 36 dataset """wn18rr""" +1036 36 model """unstructuredmodel""" +1036 36 loss """bceaftersigmoid""" +1036 36 regularizer """no""" +1036 36 optimizer """adam""" +1036 36 training_loop """owa""" +1036 36 negative_sampler """basic""" +1036 36 evaluator """rankbased""" +1036 37 dataset """wn18rr""" +1036 37 model """unstructuredmodel""" +1036 37 loss """bceaftersigmoid""" +1036 37 regularizer """no""" +1036 37 optimizer """adam""" +1036 37 training_loop """owa""" +1036 37 negative_sampler """basic""" +1036 37 evaluator """rankbased""" +1036 38 dataset """wn18rr""" +1036 38 model """unstructuredmodel""" +1036 38 loss """bceaftersigmoid""" +1036 38 regularizer """no""" +1036 38 optimizer """adam""" +1036 38 training_loop """owa""" +1036 38 negative_sampler """basic""" +1036 38 evaluator """rankbased""" +1036 39 dataset """wn18rr""" +1036 39 model """unstructuredmodel""" +1036 39 loss """bceaftersigmoid""" +1036 39 regularizer """no""" +1036 39 optimizer """adam""" +1036 39 training_loop """owa""" +1036 39 negative_sampler """basic""" +1036 39 evaluator """rankbased""" +1036 40 dataset """wn18rr""" +1036 40 model """unstructuredmodel""" +1036 40 loss """bceaftersigmoid""" +1036 40 regularizer """no""" +1036 40 optimizer """adam""" +1036 40 training_loop """owa""" +1036 40 negative_sampler """basic""" +1036 40 evaluator """rankbased""" +1036 41 dataset """wn18rr""" +1036 41 model """unstructuredmodel""" +1036 41 loss """bceaftersigmoid""" +1036 41 regularizer """no""" +1036 41 optimizer """adam""" +1036 41 training_loop """owa""" +1036 41 negative_sampler """basic""" +1036 41 evaluator """rankbased""" +1036 42 dataset """wn18rr""" +1036 42 model """unstructuredmodel""" +1036 42 loss """bceaftersigmoid""" +1036 42 regularizer """no""" +1036 42 optimizer """adam""" +1036 42 training_loop """owa""" +1036 42 negative_sampler """basic""" +1036 42 evaluator """rankbased""" +1036 43 dataset """wn18rr""" +1036 43 model """unstructuredmodel""" +1036 43 loss """bceaftersigmoid""" +1036 43 regularizer """no""" +1036 43 optimizer """adam""" +1036 43 training_loop """owa""" +1036 43 negative_sampler """basic""" +1036 43 evaluator """rankbased""" +1036 44 dataset """wn18rr""" +1036 44 model """unstructuredmodel""" +1036 44 loss """bceaftersigmoid""" +1036 44 regularizer """no""" +1036 44 optimizer """adam""" +1036 44 training_loop """owa""" +1036 44 negative_sampler """basic""" +1036 44 evaluator """rankbased""" +1036 45 dataset """wn18rr""" +1036 45 model """unstructuredmodel""" +1036 45 loss """bceaftersigmoid""" +1036 45 regularizer """no""" +1036 45 optimizer """adam""" +1036 45 training_loop """owa""" +1036 45 negative_sampler """basic""" +1036 45 evaluator """rankbased""" +1036 46 dataset """wn18rr""" +1036 46 model """unstructuredmodel""" +1036 46 loss """bceaftersigmoid""" +1036 46 regularizer """no""" +1036 46 optimizer """adam""" +1036 46 training_loop """owa""" +1036 46 negative_sampler """basic""" +1036 46 evaluator """rankbased""" +1036 47 dataset """wn18rr""" +1036 47 model """unstructuredmodel""" +1036 47 loss """bceaftersigmoid""" +1036 47 regularizer """no""" +1036 47 optimizer """adam""" +1036 47 training_loop """owa""" +1036 47 negative_sampler """basic""" +1036 47 evaluator """rankbased""" +1036 48 dataset """wn18rr""" +1036 48 model """unstructuredmodel""" +1036 48 loss """bceaftersigmoid""" +1036 48 regularizer """no""" +1036 48 optimizer """adam""" +1036 48 training_loop """owa""" +1036 48 negative_sampler """basic""" +1036 48 evaluator """rankbased""" +1036 49 dataset """wn18rr""" +1036 49 model """unstructuredmodel""" +1036 49 loss """bceaftersigmoid""" +1036 49 regularizer """no""" +1036 49 optimizer """adam""" +1036 49 training_loop """owa""" +1036 49 negative_sampler """basic""" +1036 49 evaluator """rankbased""" +1036 50 dataset """wn18rr""" +1036 50 model """unstructuredmodel""" +1036 50 loss """bceaftersigmoid""" +1036 50 regularizer """no""" +1036 50 optimizer """adam""" +1036 50 training_loop """owa""" +1036 50 negative_sampler """basic""" +1036 50 evaluator """rankbased""" +1036 51 dataset """wn18rr""" +1036 51 model """unstructuredmodel""" +1036 51 loss """bceaftersigmoid""" +1036 51 regularizer """no""" +1036 51 optimizer """adam""" +1036 51 training_loop """owa""" +1036 51 negative_sampler """basic""" +1036 51 evaluator """rankbased""" +1036 52 dataset """wn18rr""" +1036 52 model """unstructuredmodel""" +1036 52 loss """bceaftersigmoid""" +1036 52 regularizer """no""" +1036 52 optimizer """adam""" +1036 52 training_loop """owa""" +1036 52 negative_sampler """basic""" +1036 52 evaluator """rankbased""" +1036 53 dataset """wn18rr""" +1036 53 model """unstructuredmodel""" +1036 53 loss """bceaftersigmoid""" +1036 53 regularizer """no""" +1036 53 optimizer """adam""" +1036 53 training_loop """owa""" +1036 53 negative_sampler """basic""" +1036 53 evaluator """rankbased""" +1036 54 dataset """wn18rr""" +1036 54 model """unstructuredmodel""" +1036 54 loss """bceaftersigmoid""" +1036 54 regularizer """no""" +1036 54 optimizer """adam""" +1036 54 training_loop """owa""" +1036 54 negative_sampler """basic""" +1036 54 evaluator """rankbased""" +1036 55 dataset """wn18rr""" +1036 55 model """unstructuredmodel""" +1036 55 loss """bceaftersigmoid""" +1036 55 regularizer """no""" +1036 55 optimizer """adam""" +1036 55 training_loop """owa""" +1036 55 negative_sampler """basic""" +1036 55 evaluator """rankbased""" +1036 56 dataset """wn18rr""" +1036 56 model """unstructuredmodel""" +1036 56 loss """bceaftersigmoid""" +1036 56 regularizer """no""" +1036 56 optimizer """adam""" +1036 56 training_loop """owa""" +1036 56 negative_sampler """basic""" +1036 56 evaluator """rankbased""" +1036 57 dataset """wn18rr""" +1036 57 model """unstructuredmodel""" +1036 57 loss """bceaftersigmoid""" +1036 57 regularizer """no""" +1036 57 optimizer """adam""" +1036 57 training_loop """owa""" +1036 57 negative_sampler """basic""" +1036 57 evaluator """rankbased""" +1036 58 dataset """wn18rr""" +1036 58 model """unstructuredmodel""" +1036 58 loss """bceaftersigmoid""" +1036 58 regularizer """no""" +1036 58 optimizer """adam""" +1036 58 training_loop """owa""" +1036 58 negative_sampler """basic""" +1036 58 evaluator """rankbased""" +1036 59 dataset """wn18rr""" +1036 59 model """unstructuredmodel""" +1036 59 loss """bceaftersigmoid""" +1036 59 regularizer """no""" +1036 59 optimizer """adam""" +1036 59 training_loop """owa""" +1036 59 negative_sampler """basic""" +1036 59 evaluator """rankbased""" +1036 60 dataset """wn18rr""" +1036 60 model """unstructuredmodel""" +1036 60 loss """bceaftersigmoid""" +1036 60 regularizer """no""" +1036 60 optimizer """adam""" +1036 60 training_loop """owa""" +1036 60 negative_sampler """basic""" +1036 60 evaluator """rankbased""" +1036 61 dataset """wn18rr""" +1036 61 model """unstructuredmodel""" +1036 61 loss """bceaftersigmoid""" +1036 61 regularizer """no""" +1036 61 optimizer """adam""" +1036 61 training_loop """owa""" +1036 61 negative_sampler """basic""" +1036 61 evaluator """rankbased""" +1036 62 dataset """wn18rr""" +1036 62 model """unstructuredmodel""" +1036 62 loss """bceaftersigmoid""" +1036 62 regularizer """no""" +1036 62 optimizer """adam""" +1036 62 training_loop """owa""" +1036 62 negative_sampler """basic""" +1036 62 evaluator """rankbased""" +1036 63 dataset """wn18rr""" +1036 63 model """unstructuredmodel""" +1036 63 loss """bceaftersigmoid""" +1036 63 regularizer """no""" +1036 63 optimizer """adam""" +1036 63 training_loop """owa""" +1036 63 negative_sampler """basic""" +1036 63 evaluator """rankbased""" +1036 64 dataset """wn18rr""" +1036 64 model """unstructuredmodel""" +1036 64 loss """bceaftersigmoid""" +1036 64 regularizer """no""" +1036 64 optimizer """adam""" +1036 64 training_loop """owa""" +1036 64 negative_sampler """basic""" +1036 64 evaluator """rankbased""" +1036 65 dataset """wn18rr""" +1036 65 model """unstructuredmodel""" +1036 65 loss """bceaftersigmoid""" +1036 65 regularizer """no""" +1036 65 optimizer """adam""" +1036 65 training_loop """owa""" +1036 65 negative_sampler """basic""" +1036 65 evaluator """rankbased""" +1036 66 dataset """wn18rr""" +1036 66 model """unstructuredmodel""" +1036 66 loss """bceaftersigmoid""" +1036 66 regularizer """no""" +1036 66 optimizer """adam""" +1036 66 training_loop """owa""" +1036 66 negative_sampler """basic""" +1036 66 evaluator """rankbased""" +1036 67 dataset """wn18rr""" +1036 67 model """unstructuredmodel""" +1036 67 loss """bceaftersigmoid""" +1036 67 regularizer """no""" +1036 67 optimizer """adam""" +1036 67 training_loop """owa""" +1036 67 negative_sampler """basic""" +1036 67 evaluator """rankbased""" +1036 68 dataset """wn18rr""" +1036 68 model """unstructuredmodel""" +1036 68 loss """bceaftersigmoid""" +1036 68 regularizer """no""" +1036 68 optimizer """adam""" +1036 68 training_loop """owa""" +1036 68 negative_sampler """basic""" +1036 68 evaluator """rankbased""" +1036 69 dataset """wn18rr""" +1036 69 model """unstructuredmodel""" +1036 69 loss """bceaftersigmoid""" +1036 69 regularizer """no""" +1036 69 optimizer """adam""" +1036 69 training_loop """owa""" +1036 69 negative_sampler """basic""" +1036 69 evaluator """rankbased""" +1036 70 dataset """wn18rr""" +1036 70 model """unstructuredmodel""" +1036 70 loss """bceaftersigmoid""" +1036 70 regularizer """no""" +1036 70 optimizer """adam""" +1036 70 training_loop """owa""" +1036 70 negative_sampler """basic""" +1036 70 evaluator """rankbased""" +1036 71 dataset """wn18rr""" +1036 71 model """unstructuredmodel""" +1036 71 loss """bceaftersigmoid""" +1036 71 regularizer """no""" +1036 71 optimizer """adam""" +1036 71 training_loop """owa""" +1036 71 negative_sampler """basic""" +1036 71 evaluator """rankbased""" +1036 72 dataset """wn18rr""" +1036 72 model """unstructuredmodel""" +1036 72 loss """bceaftersigmoid""" +1036 72 regularizer """no""" +1036 72 optimizer """adam""" +1036 72 training_loop """owa""" +1036 72 negative_sampler """basic""" +1036 72 evaluator """rankbased""" +1036 73 dataset """wn18rr""" +1036 73 model """unstructuredmodel""" +1036 73 loss """bceaftersigmoid""" +1036 73 regularizer """no""" +1036 73 optimizer """adam""" +1036 73 training_loop """owa""" +1036 73 negative_sampler """basic""" +1036 73 evaluator """rankbased""" +1036 74 dataset """wn18rr""" +1036 74 model """unstructuredmodel""" +1036 74 loss """bceaftersigmoid""" +1036 74 regularizer """no""" +1036 74 optimizer """adam""" +1036 74 training_loop """owa""" +1036 74 negative_sampler """basic""" +1036 74 evaluator """rankbased""" +1036 75 dataset """wn18rr""" +1036 75 model """unstructuredmodel""" +1036 75 loss """bceaftersigmoid""" +1036 75 regularizer """no""" +1036 75 optimizer """adam""" +1036 75 training_loop """owa""" +1036 75 negative_sampler """basic""" +1036 75 evaluator """rankbased""" +1036 76 dataset """wn18rr""" +1036 76 model """unstructuredmodel""" +1036 76 loss """bceaftersigmoid""" +1036 76 regularizer """no""" +1036 76 optimizer """adam""" +1036 76 training_loop """owa""" +1036 76 negative_sampler """basic""" +1036 76 evaluator """rankbased""" +1036 77 dataset """wn18rr""" +1036 77 model """unstructuredmodel""" +1036 77 loss """bceaftersigmoid""" +1036 77 regularizer """no""" +1036 77 optimizer """adam""" +1036 77 training_loop """owa""" +1036 77 negative_sampler """basic""" +1036 77 evaluator """rankbased""" +1036 78 dataset """wn18rr""" +1036 78 model """unstructuredmodel""" +1036 78 loss """bceaftersigmoid""" +1036 78 regularizer """no""" +1036 78 optimizer """adam""" +1036 78 training_loop """owa""" +1036 78 negative_sampler """basic""" +1036 78 evaluator """rankbased""" +1036 79 dataset """wn18rr""" +1036 79 model """unstructuredmodel""" +1036 79 loss """bceaftersigmoid""" +1036 79 regularizer """no""" +1036 79 optimizer """adam""" +1036 79 training_loop """owa""" +1036 79 negative_sampler """basic""" +1036 79 evaluator """rankbased""" +1036 80 dataset """wn18rr""" +1036 80 model """unstructuredmodel""" +1036 80 loss """bceaftersigmoid""" +1036 80 regularizer """no""" +1036 80 optimizer """adam""" +1036 80 training_loop """owa""" +1036 80 negative_sampler """basic""" +1036 80 evaluator """rankbased""" +1036 81 dataset """wn18rr""" +1036 81 model """unstructuredmodel""" +1036 81 loss """bceaftersigmoid""" +1036 81 regularizer """no""" +1036 81 optimizer """adam""" +1036 81 training_loop """owa""" +1036 81 negative_sampler """basic""" +1036 81 evaluator """rankbased""" +1036 82 dataset """wn18rr""" +1036 82 model """unstructuredmodel""" +1036 82 loss """bceaftersigmoid""" +1036 82 regularizer """no""" +1036 82 optimizer """adam""" +1036 82 training_loop """owa""" +1036 82 negative_sampler """basic""" +1036 82 evaluator """rankbased""" +1036 83 dataset """wn18rr""" +1036 83 model """unstructuredmodel""" +1036 83 loss """bceaftersigmoid""" +1036 83 regularizer """no""" +1036 83 optimizer """adam""" +1036 83 training_loop """owa""" +1036 83 negative_sampler """basic""" +1036 83 evaluator """rankbased""" +1036 84 dataset """wn18rr""" +1036 84 model """unstructuredmodel""" +1036 84 loss """bceaftersigmoid""" +1036 84 regularizer """no""" +1036 84 optimizer """adam""" +1036 84 training_loop """owa""" +1036 84 negative_sampler """basic""" +1036 84 evaluator """rankbased""" +1036 85 dataset """wn18rr""" +1036 85 model """unstructuredmodel""" +1036 85 loss """bceaftersigmoid""" +1036 85 regularizer """no""" +1036 85 optimizer """adam""" +1036 85 training_loop """owa""" +1036 85 negative_sampler """basic""" +1036 85 evaluator """rankbased""" +1036 86 dataset """wn18rr""" +1036 86 model """unstructuredmodel""" +1036 86 loss """bceaftersigmoid""" +1036 86 regularizer """no""" +1036 86 optimizer """adam""" +1036 86 training_loop """owa""" +1036 86 negative_sampler """basic""" +1036 86 evaluator """rankbased""" +1036 87 dataset """wn18rr""" +1036 87 model """unstructuredmodel""" +1036 87 loss """bceaftersigmoid""" +1036 87 regularizer """no""" +1036 87 optimizer """adam""" +1036 87 training_loop """owa""" +1036 87 negative_sampler """basic""" +1036 87 evaluator """rankbased""" +1036 88 dataset """wn18rr""" +1036 88 model """unstructuredmodel""" +1036 88 loss """bceaftersigmoid""" +1036 88 regularizer """no""" +1036 88 optimizer """adam""" +1036 88 training_loop """owa""" +1036 88 negative_sampler """basic""" +1036 88 evaluator """rankbased""" +1036 89 dataset """wn18rr""" +1036 89 model """unstructuredmodel""" +1036 89 loss """bceaftersigmoid""" +1036 89 regularizer """no""" +1036 89 optimizer """adam""" +1036 89 training_loop """owa""" +1036 89 negative_sampler """basic""" +1036 89 evaluator """rankbased""" +1036 90 dataset """wn18rr""" +1036 90 model """unstructuredmodel""" +1036 90 loss """bceaftersigmoid""" +1036 90 regularizer """no""" +1036 90 optimizer """adam""" +1036 90 training_loop """owa""" +1036 90 negative_sampler """basic""" +1036 90 evaluator """rankbased""" +1036 91 dataset """wn18rr""" +1036 91 model """unstructuredmodel""" +1036 91 loss """bceaftersigmoid""" +1036 91 regularizer """no""" +1036 91 optimizer """adam""" +1036 91 training_loop """owa""" +1036 91 negative_sampler """basic""" +1036 91 evaluator """rankbased""" +1036 92 dataset """wn18rr""" +1036 92 model """unstructuredmodel""" +1036 92 loss """bceaftersigmoid""" +1036 92 regularizer """no""" +1036 92 optimizer """adam""" +1036 92 training_loop """owa""" +1036 92 negative_sampler """basic""" +1036 92 evaluator """rankbased""" +1036 93 dataset """wn18rr""" +1036 93 model """unstructuredmodel""" +1036 93 loss """bceaftersigmoid""" +1036 93 regularizer """no""" +1036 93 optimizer """adam""" +1036 93 training_loop """owa""" +1036 93 negative_sampler """basic""" +1036 93 evaluator """rankbased""" +1036 94 dataset """wn18rr""" +1036 94 model """unstructuredmodel""" +1036 94 loss """bceaftersigmoid""" +1036 94 regularizer """no""" +1036 94 optimizer """adam""" +1036 94 training_loop """owa""" +1036 94 negative_sampler """basic""" +1036 94 evaluator """rankbased""" +1036 95 dataset """wn18rr""" +1036 95 model """unstructuredmodel""" +1036 95 loss """bceaftersigmoid""" +1036 95 regularizer """no""" +1036 95 optimizer """adam""" +1036 95 training_loop """owa""" +1036 95 negative_sampler """basic""" +1036 95 evaluator """rankbased""" +1036 96 dataset """wn18rr""" +1036 96 model """unstructuredmodel""" +1036 96 loss """bceaftersigmoid""" +1036 96 regularizer """no""" +1036 96 optimizer """adam""" +1036 96 training_loop """owa""" +1036 96 negative_sampler """basic""" +1036 96 evaluator """rankbased""" +1036 97 dataset """wn18rr""" +1036 97 model """unstructuredmodel""" +1036 97 loss """bceaftersigmoid""" +1036 97 regularizer """no""" +1036 97 optimizer """adam""" +1036 97 training_loop """owa""" +1036 97 negative_sampler """basic""" +1036 97 evaluator """rankbased""" +1036 98 dataset """wn18rr""" +1036 98 model """unstructuredmodel""" +1036 98 loss """bceaftersigmoid""" +1036 98 regularizer """no""" +1036 98 optimizer """adam""" +1036 98 training_loop """owa""" +1036 98 negative_sampler """basic""" +1036 98 evaluator """rankbased""" +1036 99 dataset """wn18rr""" +1036 99 model """unstructuredmodel""" +1036 99 loss """bceaftersigmoid""" +1036 99 regularizer """no""" +1036 99 optimizer """adam""" +1036 99 training_loop """owa""" +1036 99 negative_sampler """basic""" +1036 99 evaluator """rankbased""" +1036 100 dataset """wn18rr""" +1036 100 model """unstructuredmodel""" +1036 100 loss """bceaftersigmoid""" +1036 100 regularizer """no""" +1036 100 optimizer """adam""" +1036 100 training_loop """owa""" +1036 100 negative_sampler """basic""" +1036 100 evaluator """rankbased""" +1037 1 model.embedding_dim 1.0 +1037 1 model.scoring_fct_norm 2.0 +1037 1 optimizer.lr 0.013628128170392504 +1037 1 negative_sampler.num_negs_per_pos 59.0 +1037 1 training.batch_size 2.0 +1037 2 model.embedding_dim 1.0 +1037 2 model.scoring_fct_norm 1.0 +1037 2 optimizer.lr 0.04906100940790034 +1037 2 negative_sampler.num_negs_per_pos 19.0 +1037 2 training.batch_size 2.0 +1037 3 model.embedding_dim 2.0 +1037 3 model.scoring_fct_norm 1.0 +1037 3 optimizer.lr 0.0039434689087217084 +1037 3 negative_sampler.num_negs_per_pos 55.0 +1037 3 training.batch_size 0.0 +1037 4 model.embedding_dim 0.0 +1037 4 model.scoring_fct_norm 1.0 +1037 4 optimizer.lr 0.014565369384181254 +1037 4 negative_sampler.num_negs_per_pos 28.0 +1037 4 training.batch_size 0.0 +1037 5 model.embedding_dim 0.0 +1037 5 model.scoring_fct_norm 1.0 +1037 5 optimizer.lr 0.08668400816864039 +1037 5 negative_sampler.num_negs_per_pos 92.0 +1037 5 training.batch_size 2.0 +1037 6 model.embedding_dim 2.0 +1037 6 model.scoring_fct_norm 2.0 +1037 6 optimizer.lr 0.004882151358675631 +1037 6 negative_sampler.num_negs_per_pos 66.0 +1037 6 training.batch_size 1.0 +1037 7 model.embedding_dim 1.0 +1037 7 model.scoring_fct_norm 1.0 +1037 7 optimizer.lr 0.07326049005836233 +1037 7 negative_sampler.num_negs_per_pos 65.0 +1037 7 training.batch_size 1.0 +1037 8 model.embedding_dim 0.0 +1037 8 model.scoring_fct_norm 1.0 +1037 8 optimizer.lr 0.0014483431481295546 +1037 8 negative_sampler.num_negs_per_pos 76.0 +1037 8 training.batch_size 0.0 +1037 9 model.embedding_dim 1.0 +1037 9 model.scoring_fct_norm 1.0 +1037 9 optimizer.lr 0.015887580933020586 +1037 9 negative_sampler.num_negs_per_pos 90.0 +1037 9 training.batch_size 2.0 +1037 10 model.embedding_dim 1.0 +1037 10 model.scoring_fct_norm 2.0 +1037 10 optimizer.lr 0.0010463288041134085 +1037 10 negative_sampler.num_negs_per_pos 55.0 +1037 10 training.batch_size 2.0 +1037 11 model.embedding_dim 0.0 +1037 11 model.scoring_fct_norm 2.0 +1037 11 optimizer.lr 0.0059156854435291305 +1037 11 negative_sampler.num_negs_per_pos 22.0 +1037 11 training.batch_size 0.0 +1037 12 model.embedding_dim 2.0 +1037 12 model.scoring_fct_norm 2.0 +1037 12 optimizer.lr 0.00567076714995114 +1037 12 negative_sampler.num_negs_per_pos 44.0 +1037 12 training.batch_size 1.0 +1037 13 model.embedding_dim 0.0 +1037 13 model.scoring_fct_norm 2.0 +1037 13 optimizer.lr 0.0025707863878994884 +1037 13 negative_sampler.num_negs_per_pos 90.0 +1037 13 training.batch_size 1.0 +1037 14 model.embedding_dim 1.0 +1037 14 model.scoring_fct_norm 1.0 +1037 14 optimizer.lr 0.0024361044845545 +1037 14 negative_sampler.num_negs_per_pos 99.0 +1037 14 training.batch_size 1.0 +1037 15 model.embedding_dim 2.0 +1037 15 model.scoring_fct_norm 1.0 +1037 15 optimizer.lr 0.009694076104965707 +1037 15 negative_sampler.num_negs_per_pos 75.0 +1037 15 training.batch_size 1.0 +1037 16 model.embedding_dim 1.0 +1037 16 model.scoring_fct_norm 1.0 +1037 16 optimizer.lr 0.0026305344307754387 +1037 16 negative_sampler.num_negs_per_pos 15.0 +1037 16 training.batch_size 2.0 +1037 17 model.embedding_dim 0.0 +1037 17 model.scoring_fct_norm 1.0 +1037 17 optimizer.lr 0.03193745089393313 +1037 17 negative_sampler.num_negs_per_pos 50.0 +1037 17 training.batch_size 0.0 +1037 18 model.embedding_dim 0.0 +1037 18 model.scoring_fct_norm 1.0 +1037 18 optimizer.lr 0.014773145110659134 +1037 18 negative_sampler.num_negs_per_pos 71.0 +1037 18 training.batch_size 2.0 +1037 19 model.embedding_dim 0.0 +1037 19 model.scoring_fct_norm 2.0 +1037 19 optimizer.lr 0.008069902146753809 +1037 19 negative_sampler.num_negs_per_pos 60.0 +1037 19 training.batch_size 2.0 +1037 20 model.embedding_dim 2.0 +1037 20 model.scoring_fct_norm 1.0 +1037 20 optimizer.lr 0.05325285101777268 +1037 20 negative_sampler.num_negs_per_pos 73.0 +1037 20 training.batch_size 1.0 +1037 21 model.embedding_dim 1.0 +1037 21 model.scoring_fct_norm 1.0 +1037 21 optimizer.lr 0.02904934402443454 +1037 21 negative_sampler.num_negs_per_pos 61.0 +1037 21 training.batch_size 0.0 +1037 22 model.embedding_dim 2.0 +1037 22 model.scoring_fct_norm 2.0 +1037 22 optimizer.lr 0.012329532994127673 +1037 22 negative_sampler.num_negs_per_pos 10.0 +1037 22 training.batch_size 1.0 +1037 23 model.embedding_dim 1.0 +1037 23 model.scoring_fct_norm 1.0 +1037 23 optimizer.lr 0.005801375265841349 +1037 23 negative_sampler.num_negs_per_pos 75.0 +1037 23 training.batch_size 2.0 +1037 24 model.embedding_dim 1.0 +1037 24 model.scoring_fct_norm 1.0 +1037 24 optimizer.lr 0.0010297374778358694 +1037 24 negative_sampler.num_negs_per_pos 83.0 +1037 24 training.batch_size 0.0 +1037 25 model.embedding_dim 2.0 +1037 25 model.scoring_fct_norm 1.0 +1037 25 optimizer.lr 0.001789901387179095 +1037 25 negative_sampler.num_negs_per_pos 49.0 +1037 25 training.batch_size 2.0 +1037 26 model.embedding_dim 0.0 +1037 26 model.scoring_fct_norm 1.0 +1037 26 optimizer.lr 0.028502005982815545 +1037 26 negative_sampler.num_negs_per_pos 22.0 +1037 26 training.batch_size 0.0 +1037 27 model.embedding_dim 2.0 +1037 27 model.scoring_fct_norm 1.0 +1037 27 optimizer.lr 0.09141571257140135 +1037 27 negative_sampler.num_negs_per_pos 32.0 +1037 27 training.batch_size 2.0 +1037 28 model.embedding_dim 1.0 +1037 28 model.scoring_fct_norm 2.0 +1037 28 optimizer.lr 0.03206466940962283 +1037 28 negative_sampler.num_negs_per_pos 12.0 +1037 28 training.batch_size 0.0 +1037 29 model.embedding_dim 1.0 +1037 29 model.scoring_fct_norm 2.0 +1037 29 optimizer.lr 0.0012061513082329853 +1037 29 negative_sampler.num_negs_per_pos 72.0 +1037 29 training.batch_size 2.0 +1037 30 model.embedding_dim 0.0 +1037 30 model.scoring_fct_norm 2.0 +1037 30 optimizer.lr 0.038629593863315353 +1037 30 negative_sampler.num_negs_per_pos 41.0 +1037 30 training.batch_size 0.0 +1037 31 model.embedding_dim 1.0 +1037 31 model.scoring_fct_norm 2.0 +1037 31 optimizer.lr 0.06931496648799199 +1037 31 negative_sampler.num_negs_per_pos 32.0 +1037 31 training.batch_size 1.0 +1037 32 model.embedding_dim 0.0 +1037 32 model.scoring_fct_norm 2.0 +1037 32 optimizer.lr 0.0010224327505263754 +1037 32 negative_sampler.num_negs_per_pos 34.0 +1037 32 training.batch_size 1.0 +1037 33 model.embedding_dim 0.0 +1037 33 model.scoring_fct_norm 2.0 +1037 33 optimizer.lr 0.0022618670550196736 +1037 33 negative_sampler.num_negs_per_pos 15.0 +1037 33 training.batch_size 2.0 +1037 34 model.embedding_dim 0.0 +1037 34 model.scoring_fct_norm 1.0 +1037 34 optimizer.lr 0.031677344332605904 +1037 34 negative_sampler.num_negs_per_pos 39.0 +1037 34 training.batch_size 1.0 +1037 35 model.embedding_dim 1.0 +1037 35 model.scoring_fct_norm 2.0 +1037 35 optimizer.lr 0.003077789910201698 +1037 35 negative_sampler.num_negs_per_pos 49.0 +1037 35 training.batch_size 2.0 +1037 36 model.embedding_dim 1.0 +1037 36 model.scoring_fct_norm 2.0 +1037 36 optimizer.lr 0.0015544320594618668 +1037 36 negative_sampler.num_negs_per_pos 47.0 +1037 36 training.batch_size 0.0 +1037 37 model.embedding_dim 0.0 +1037 37 model.scoring_fct_norm 2.0 +1037 37 optimizer.lr 0.0011998377711887774 +1037 37 negative_sampler.num_negs_per_pos 11.0 +1037 37 training.batch_size 1.0 +1037 38 model.embedding_dim 1.0 +1037 38 model.scoring_fct_norm 2.0 +1037 38 optimizer.lr 0.0021310295058685268 +1037 38 negative_sampler.num_negs_per_pos 77.0 +1037 38 training.batch_size 2.0 +1037 39 model.embedding_dim 1.0 +1037 39 model.scoring_fct_norm 1.0 +1037 39 optimizer.lr 0.002338994445280527 +1037 39 negative_sampler.num_negs_per_pos 85.0 +1037 39 training.batch_size 0.0 +1037 40 model.embedding_dim 2.0 +1037 40 model.scoring_fct_norm 1.0 +1037 40 optimizer.lr 0.0018651546964558502 +1037 40 negative_sampler.num_negs_per_pos 16.0 +1037 40 training.batch_size 1.0 +1037 41 model.embedding_dim 2.0 +1037 41 model.scoring_fct_norm 1.0 +1037 41 optimizer.lr 0.012785162381761844 +1037 41 negative_sampler.num_negs_per_pos 57.0 +1037 41 training.batch_size 1.0 +1037 42 model.embedding_dim 1.0 +1037 42 model.scoring_fct_norm 2.0 +1037 42 optimizer.lr 0.002304093787719204 +1037 42 negative_sampler.num_negs_per_pos 80.0 +1037 42 training.batch_size 0.0 +1037 43 model.embedding_dim 2.0 +1037 43 model.scoring_fct_norm 2.0 +1037 43 optimizer.lr 0.0028558045765712608 +1037 43 negative_sampler.num_negs_per_pos 15.0 +1037 43 training.batch_size 1.0 +1037 44 model.embedding_dim 0.0 +1037 44 model.scoring_fct_norm 2.0 +1037 44 optimizer.lr 0.004009182066343883 +1037 44 negative_sampler.num_negs_per_pos 81.0 +1037 44 training.batch_size 2.0 +1037 45 model.embedding_dim 2.0 +1037 45 model.scoring_fct_norm 1.0 +1037 45 optimizer.lr 0.009794577803951874 +1037 45 negative_sampler.num_negs_per_pos 44.0 +1037 45 training.batch_size 1.0 +1037 46 model.embedding_dim 0.0 +1037 46 model.scoring_fct_norm 2.0 +1037 46 optimizer.lr 0.007076758930263112 +1037 46 negative_sampler.num_negs_per_pos 56.0 +1037 46 training.batch_size 1.0 +1037 47 model.embedding_dim 0.0 +1037 47 model.scoring_fct_norm 1.0 +1037 47 optimizer.lr 0.0018061314247253813 +1037 47 negative_sampler.num_negs_per_pos 71.0 +1037 47 training.batch_size 1.0 +1037 48 model.embedding_dim 2.0 +1037 48 model.scoring_fct_norm 2.0 +1037 48 optimizer.lr 0.00493650546853235 +1037 48 negative_sampler.num_negs_per_pos 36.0 +1037 48 training.batch_size 0.0 +1037 49 model.embedding_dim 0.0 +1037 49 model.scoring_fct_norm 1.0 +1037 49 optimizer.lr 0.043514810573652266 +1037 49 negative_sampler.num_negs_per_pos 58.0 +1037 49 training.batch_size 1.0 +1037 50 model.embedding_dim 1.0 +1037 50 model.scoring_fct_norm 1.0 +1037 50 optimizer.lr 0.009522208520479126 +1037 50 negative_sampler.num_negs_per_pos 54.0 +1037 50 training.batch_size 1.0 +1037 51 model.embedding_dim 0.0 +1037 51 model.scoring_fct_norm 1.0 +1037 51 optimizer.lr 0.0020209973203232064 +1037 51 negative_sampler.num_negs_per_pos 92.0 +1037 51 training.batch_size 1.0 +1037 52 model.embedding_dim 0.0 +1037 52 model.scoring_fct_norm 1.0 +1037 52 optimizer.lr 0.021084253720746463 +1037 52 negative_sampler.num_negs_per_pos 71.0 +1037 52 training.batch_size 2.0 +1037 53 model.embedding_dim 2.0 +1037 53 model.scoring_fct_norm 2.0 +1037 53 optimizer.lr 0.0013462276021939494 +1037 53 negative_sampler.num_negs_per_pos 50.0 +1037 53 training.batch_size 0.0 +1037 54 model.embedding_dim 2.0 +1037 54 model.scoring_fct_norm 2.0 +1037 54 optimizer.lr 0.002660609785697702 +1037 54 negative_sampler.num_negs_per_pos 45.0 +1037 54 training.batch_size 1.0 +1037 55 model.embedding_dim 2.0 +1037 55 model.scoring_fct_norm 2.0 +1037 55 optimizer.lr 0.003630997923303474 +1037 55 negative_sampler.num_negs_per_pos 72.0 +1037 55 training.batch_size 1.0 +1037 56 model.embedding_dim 0.0 +1037 56 model.scoring_fct_norm 2.0 +1037 56 optimizer.lr 0.0013789432139198505 +1037 56 negative_sampler.num_negs_per_pos 15.0 +1037 56 training.batch_size 2.0 +1037 57 model.embedding_dim 1.0 +1037 57 model.scoring_fct_norm 1.0 +1037 57 optimizer.lr 0.014792338053376555 +1037 57 negative_sampler.num_negs_per_pos 27.0 +1037 57 training.batch_size 1.0 +1037 58 model.embedding_dim 2.0 +1037 58 model.scoring_fct_norm 2.0 +1037 58 optimizer.lr 0.005922417908675332 +1037 58 negative_sampler.num_negs_per_pos 15.0 +1037 58 training.batch_size 2.0 +1037 59 model.embedding_dim 1.0 +1037 59 model.scoring_fct_norm 1.0 +1037 59 optimizer.lr 0.010383690225996296 +1037 59 negative_sampler.num_negs_per_pos 41.0 +1037 59 training.batch_size 1.0 +1037 60 model.embedding_dim 1.0 +1037 60 model.scoring_fct_norm 1.0 +1037 60 optimizer.lr 0.03332752734140291 +1037 60 negative_sampler.num_negs_per_pos 23.0 +1037 60 training.batch_size 1.0 +1037 61 model.embedding_dim 0.0 +1037 61 model.scoring_fct_norm 1.0 +1037 61 optimizer.lr 0.010286382662654055 +1037 61 negative_sampler.num_negs_per_pos 10.0 +1037 61 training.batch_size 0.0 +1037 62 model.embedding_dim 1.0 +1037 62 model.scoring_fct_norm 2.0 +1037 62 optimizer.lr 0.0017041241051508412 +1037 62 negative_sampler.num_negs_per_pos 12.0 +1037 62 training.batch_size 2.0 +1037 63 model.embedding_dim 2.0 +1037 63 model.scoring_fct_norm 2.0 +1037 63 optimizer.lr 0.03560533270073921 +1037 63 negative_sampler.num_negs_per_pos 62.0 +1037 63 training.batch_size 0.0 +1037 64 model.embedding_dim 1.0 +1037 64 model.scoring_fct_norm 2.0 +1037 64 optimizer.lr 0.07927044073651295 +1037 64 negative_sampler.num_negs_per_pos 28.0 +1037 64 training.batch_size 1.0 +1037 65 model.embedding_dim 1.0 +1037 65 model.scoring_fct_norm 2.0 +1037 65 optimizer.lr 0.00663193698332455 +1037 65 negative_sampler.num_negs_per_pos 74.0 +1037 65 training.batch_size 2.0 +1037 66 model.embedding_dim 0.0 +1037 66 model.scoring_fct_norm 1.0 +1037 66 optimizer.lr 0.011246564640589376 +1037 66 negative_sampler.num_negs_per_pos 89.0 +1037 66 training.batch_size 0.0 +1037 67 model.embedding_dim 1.0 +1037 67 model.scoring_fct_norm 2.0 +1037 67 optimizer.lr 0.007843546212519016 +1037 67 negative_sampler.num_negs_per_pos 73.0 +1037 67 training.batch_size 2.0 +1037 68 model.embedding_dim 1.0 +1037 68 model.scoring_fct_norm 1.0 +1037 68 optimizer.lr 0.02925154195073124 +1037 68 negative_sampler.num_negs_per_pos 30.0 +1037 68 training.batch_size 1.0 +1037 69 model.embedding_dim 2.0 +1037 69 model.scoring_fct_norm 2.0 +1037 69 optimizer.lr 0.03121257259841224 +1037 69 negative_sampler.num_negs_per_pos 28.0 +1037 69 training.batch_size 1.0 +1037 70 model.embedding_dim 0.0 +1037 70 model.scoring_fct_norm 1.0 +1037 70 optimizer.lr 0.061968905354693245 +1037 70 negative_sampler.num_negs_per_pos 92.0 +1037 70 training.batch_size 1.0 +1037 71 model.embedding_dim 1.0 +1037 71 model.scoring_fct_norm 2.0 +1037 71 optimizer.lr 0.01047739221332497 +1037 71 negative_sampler.num_negs_per_pos 80.0 +1037 71 training.batch_size 2.0 +1037 72 model.embedding_dim 0.0 +1037 72 model.scoring_fct_norm 2.0 +1037 72 optimizer.lr 0.0033884704384250213 +1037 72 negative_sampler.num_negs_per_pos 7.0 +1037 72 training.batch_size 2.0 +1037 73 model.embedding_dim 0.0 +1037 73 model.scoring_fct_norm 2.0 +1037 73 optimizer.lr 0.00502829767092021 +1037 73 negative_sampler.num_negs_per_pos 40.0 +1037 73 training.batch_size 0.0 +1037 74 model.embedding_dim 2.0 +1037 74 model.scoring_fct_norm 1.0 +1037 74 optimizer.lr 0.06129196165516643 +1037 74 negative_sampler.num_negs_per_pos 3.0 +1037 74 training.batch_size 0.0 +1037 75 model.embedding_dim 0.0 +1037 75 model.scoring_fct_norm 1.0 +1037 75 optimizer.lr 0.0014985714835119145 +1037 75 negative_sampler.num_negs_per_pos 42.0 +1037 75 training.batch_size 2.0 +1037 76 model.embedding_dim 1.0 +1037 76 model.scoring_fct_norm 1.0 +1037 76 optimizer.lr 0.014096322227182418 +1037 76 negative_sampler.num_negs_per_pos 53.0 +1037 76 training.batch_size 2.0 +1037 77 model.embedding_dim 2.0 +1037 77 model.scoring_fct_norm 1.0 +1037 77 optimizer.lr 0.00371892576765902 +1037 77 negative_sampler.num_negs_per_pos 15.0 +1037 77 training.batch_size 2.0 +1037 78 model.embedding_dim 1.0 +1037 78 model.scoring_fct_norm 1.0 +1037 78 optimizer.lr 0.002854706736314068 +1037 78 negative_sampler.num_negs_per_pos 33.0 +1037 78 training.batch_size 1.0 +1037 79 model.embedding_dim 1.0 +1037 79 model.scoring_fct_norm 1.0 +1037 79 optimizer.lr 0.010179589984572694 +1037 79 negative_sampler.num_negs_per_pos 18.0 +1037 79 training.batch_size 1.0 +1037 80 model.embedding_dim 2.0 +1037 80 model.scoring_fct_norm 2.0 +1037 80 optimizer.lr 0.024363771617342447 +1037 80 negative_sampler.num_negs_per_pos 38.0 +1037 80 training.batch_size 1.0 +1037 81 model.embedding_dim 1.0 +1037 81 model.scoring_fct_norm 2.0 +1037 81 optimizer.lr 0.007999910491677973 +1037 81 negative_sampler.num_negs_per_pos 17.0 +1037 81 training.batch_size 2.0 +1037 82 model.embedding_dim 0.0 +1037 82 model.scoring_fct_norm 1.0 +1037 82 optimizer.lr 0.0025319206993708395 +1037 82 negative_sampler.num_negs_per_pos 32.0 +1037 82 training.batch_size 0.0 +1037 83 model.embedding_dim 2.0 +1037 83 model.scoring_fct_norm 2.0 +1037 83 optimizer.lr 0.03877811410172725 +1037 83 negative_sampler.num_negs_per_pos 86.0 +1037 83 training.batch_size 1.0 +1037 84 model.embedding_dim 0.0 +1037 84 model.scoring_fct_norm 1.0 +1037 84 optimizer.lr 0.0016084963336822542 +1037 84 negative_sampler.num_negs_per_pos 97.0 +1037 84 training.batch_size 1.0 +1037 85 model.embedding_dim 2.0 +1037 85 model.scoring_fct_norm 2.0 +1037 85 optimizer.lr 0.08145227646616757 +1037 85 negative_sampler.num_negs_per_pos 90.0 +1037 85 training.batch_size 0.0 +1037 86 model.embedding_dim 0.0 +1037 86 model.scoring_fct_norm 1.0 +1037 86 optimizer.lr 0.018472858444838383 +1037 86 negative_sampler.num_negs_per_pos 38.0 +1037 86 training.batch_size 2.0 +1037 87 model.embedding_dim 2.0 +1037 87 model.scoring_fct_norm 2.0 +1037 87 optimizer.lr 0.0041173104082066255 +1037 87 negative_sampler.num_negs_per_pos 91.0 +1037 87 training.batch_size 0.0 +1037 88 model.embedding_dim 0.0 +1037 88 model.scoring_fct_norm 2.0 +1037 88 optimizer.lr 0.04642637404753517 +1037 88 negative_sampler.num_negs_per_pos 8.0 +1037 88 training.batch_size 2.0 +1037 89 model.embedding_dim 0.0 +1037 89 model.scoring_fct_norm 1.0 +1037 89 optimizer.lr 0.04371810431689948 +1037 89 negative_sampler.num_negs_per_pos 84.0 +1037 89 training.batch_size 1.0 +1037 90 model.embedding_dim 1.0 +1037 90 model.scoring_fct_norm 1.0 +1037 90 optimizer.lr 0.018163085263652644 +1037 90 negative_sampler.num_negs_per_pos 9.0 +1037 90 training.batch_size 1.0 +1037 91 model.embedding_dim 2.0 +1037 91 model.scoring_fct_norm 1.0 +1037 91 optimizer.lr 0.022577435702251323 +1037 91 negative_sampler.num_negs_per_pos 35.0 +1037 91 training.batch_size 0.0 +1037 92 model.embedding_dim 2.0 +1037 92 model.scoring_fct_norm 1.0 +1037 92 optimizer.lr 0.0022800545364110004 +1037 92 negative_sampler.num_negs_per_pos 78.0 +1037 92 training.batch_size 1.0 +1037 93 model.embedding_dim 2.0 +1037 93 model.scoring_fct_norm 2.0 +1037 93 optimizer.lr 0.003855619321384726 +1037 93 negative_sampler.num_negs_per_pos 95.0 +1037 93 training.batch_size 0.0 +1037 94 model.embedding_dim 0.0 +1037 94 model.scoring_fct_norm 1.0 +1037 94 optimizer.lr 0.024897841532082188 +1037 94 negative_sampler.num_negs_per_pos 59.0 +1037 94 training.batch_size 0.0 +1037 95 model.embedding_dim 1.0 +1037 95 model.scoring_fct_norm 1.0 +1037 95 optimizer.lr 0.003210762125523001 +1037 95 negative_sampler.num_negs_per_pos 7.0 +1037 95 training.batch_size 2.0 +1037 96 model.embedding_dim 1.0 +1037 96 model.scoring_fct_norm 1.0 +1037 96 optimizer.lr 0.021967356235447134 +1037 96 negative_sampler.num_negs_per_pos 51.0 +1037 96 training.batch_size 1.0 +1037 97 model.embedding_dim 0.0 +1037 97 model.scoring_fct_norm 2.0 +1037 97 optimizer.lr 0.02208475057479882 +1037 97 negative_sampler.num_negs_per_pos 1.0 +1037 97 training.batch_size 1.0 +1037 98 model.embedding_dim 0.0 +1037 98 model.scoring_fct_norm 2.0 +1037 98 optimizer.lr 0.06796853169757555 +1037 98 negative_sampler.num_negs_per_pos 9.0 +1037 98 training.batch_size 2.0 +1037 99 model.embedding_dim 2.0 +1037 99 model.scoring_fct_norm 2.0 +1037 99 optimizer.lr 0.018559855731578725 +1037 99 negative_sampler.num_negs_per_pos 25.0 +1037 99 training.batch_size 1.0 +1037 100 model.embedding_dim 1.0 +1037 100 model.scoring_fct_norm 2.0 +1037 100 optimizer.lr 0.016461766172132908 +1037 100 negative_sampler.num_negs_per_pos 87.0 +1037 100 training.batch_size 1.0 +1037 1 dataset """wn18rr""" +1037 1 model """unstructuredmodel""" +1037 1 loss """softplus""" +1037 1 regularizer """no""" +1037 1 optimizer """adam""" +1037 1 training_loop """owa""" +1037 1 negative_sampler """basic""" +1037 1 evaluator """rankbased""" +1037 2 dataset """wn18rr""" +1037 2 model """unstructuredmodel""" +1037 2 loss """softplus""" +1037 2 regularizer """no""" +1037 2 optimizer """adam""" +1037 2 training_loop """owa""" +1037 2 negative_sampler """basic""" +1037 2 evaluator """rankbased""" +1037 3 dataset """wn18rr""" +1037 3 model """unstructuredmodel""" +1037 3 loss """softplus""" +1037 3 regularizer """no""" +1037 3 optimizer """adam""" +1037 3 training_loop """owa""" +1037 3 negative_sampler """basic""" +1037 3 evaluator """rankbased""" +1037 4 dataset """wn18rr""" +1037 4 model """unstructuredmodel""" +1037 4 loss """softplus""" +1037 4 regularizer """no""" +1037 4 optimizer """adam""" +1037 4 training_loop """owa""" +1037 4 negative_sampler """basic""" +1037 4 evaluator """rankbased""" +1037 5 dataset """wn18rr""" +1037 5 model """unstructuredmodel""" +1037 5 loss """softplus""" +1037 5 regularizer """no""" +1037 5 optimizer """adam""" +1037 5 training_loop """owa""" +1037 5 negative_sampler """basic""" +1037 5 evaluator """rankbased""" +1037 6 dataset """wn18rr""" +1037 6 model """unstructuredmodel""" +1037 6 loss """softplus""" +1037 6 regularizer """no""" +1037 6 optimizer """adam""" +1037 6 training_loop """owa""" +1037 6 negative_sampler """basic""" +1037 6 evaluator """rankbased""" +1037 7 dataset """wn18rr""" +1037 7 model """unstructuredmodel""" +1037 7 loss """softplus""" +1037 7 regularizer """no""" +1037 7 optimizer """adam""" +1037 7 training_loop """owa""" +1037 7 negative_sampler """basic""" +1037 7 evaluator """rankbased""" +1037 8 dataset """wn18rr""" +1037 8 model """unstructuredmodel""" +1037 8 loss """softplus""" +1037 8 regularizer """no""" +1037 8 optimizer """adam""" +1037 8 training_loop """owa""" +1037 8 negative_sampler """basic""" +1037 8 evaluator """rankbased""" +1037 9 dataset """wn18rr""" +1037 9 model """unstructuredmodel""" +1037 9 loss """softplus""" +1037 9 regularizer """no""" +1037 9 optimizer """adam""" +1037 9 training_loop """owa""" +1037 9 negative_sampler """basic""" +1037 9 evaluator """rankbased""" +1037 10 dataset """wn18rr""" +1037 10 model """unstructuredmodel""" +1037 10 loss """softplus""" +1037 10 regularizer """no""" +1037 10 optimizer """adam""" +1037 10 training_loop """owa""" +1037 10 negative_sampler """basic""" +1037 10 evaluator """rankbased""" +1037 11 dataset """wn18rr""" +1037 11 model """unstructuredmodel""" +1037 11 loss """softplus""" +1037 11 regularizer """no""" +1037 11 optimizer """adam""" +1037 11 training_loop """owa""" +1037 11 negative_sampler """basic""" +1037 11 evaluator """rankbased""" +1037 12 dataset """wn18rr""" +1037 12 model """unstructuredmodel""" +1037 12 loss """softplus""" +1037 12 regularizer """no""" +1037 12 optimizer """adam""" +1037 12 training_loop """owa""" +1037 12 negative_sampler """basic""" +1037 12 evaluator """rankbased""" +1037 13 dataset """wn18rr""" +1037 13 model """unstructuredmodel""" +1037 13 loss """softplus""" +1037 13 regularizer """no""" +1037 13 optimizer """adam""" +1037 13 training_loop """owa""" +1037 13 negative_sampler """basic""" +1037 13 evaluator """rankbased""" +1037 14 dataset """wn18rr""" +1037 14 model """unstructuredmodel""" +1037 14 loss """softplus""" +1037 14 regularizer """no""" +1037 14 optimizer """adam""" +1037 14 training_loop """owa""" +1037 14 negative_sampler """basic""" +1037 14 evaluator """rankbased""" +1037 15 dataset """wn18rr""" +1037 15 model """unstructuredmodel""" +1037 15 loss """softplus""" +1037 15 regularizer """no""" +1037 15 optimizer """adam""" +1037 15 training_loop """owa""" +1037 15 negative_sampler """basic""" +1037 15 evaluator """rankbased""" +1037 16 dataset """wn18rr""" +1037 16 model """unstructuredmodel""" +1037 16 loss """softplus""" +1037 16 regularizer """no""" +1037 16 optimizer """adam""" +1037 16 training_loop """owa""" +1037 16 negative_sampler """basic""" +1037 16 evaluator """rankbased""" +1037 17 dataset """wn18rr""" +1037 17 model """unstructuredmodel""" +1037 17 loss """softplus""" +1037 17 regularizer """no""" +1037 17 optimizer """adam""" +1037 17 training_loop """owa""" +1037 17 negative_sampler """basic""" +1037 17 evaluator """rankbased""" +1037 18 dataset """wn18rr""" +1037 18 model """unstructuredmodel""" +1037 18 loss """softplus""" +1037 18 regularizer """no""" +1037 18 optimizer """adam""" +1037 18 training_loop """owa""" +1037 18 negative_sampler """basic""" +1037 18 evaluator """rankbased""" +1037 19 dataset """wn18rr""" +1037 19 model """unstructuredmodel""" +1037 19 loss """softplus""" +1037 19 regularizer """no""" +1037 19 optimizer """adam""" +1037 19 training_loop """owa""" +1037 19 negative_sampler """basic""" +1037 19 evaluator """rankbased""" +1037 20 dataset """wn18rr""" +1037 20 model """unstructuredmodel""" +1037 20 loss """softplus""" +1037 20 regularizer """no""" +1037 20 optimizer """adam""" +1037 20 training_loop """owa""" +1037 20 negative_sampler """basic""" +1037 20 evaluator """rankbased""" +1037 21 dataset """wn18rr""" +1037 21 model """unstructuredmodel""" +1037 21 loss """softplus""" +1037 21 regularizer """no""" +1037 21 optimizer """adam""" +1037 21 training_loop """owa""" +1037 21 negative_sampler """basic""" +1037 21 evaluator """rankbased""" +1037 22 dataset """wn18rr""" +1037 22 model """unstructuredmodel""" +1037 22 loss """softplus""" +1037 22 regularizer """no""" +1037 22 optimizer """adam""" +1037 22 training_loop """owa""" +1037 22 negative_sampler """basic""" +1037 22 evaluator """rankbased""" +1037 23 dataset """wn18rr""" +1037 23 model """unstructuredmodel""" +1037 23 loss """softplus""" +1037 23 regularizer """no""" +1037 23 optimizer """adam""" +1037 23 training_loop """owa""" +1037 23 negative_sampler """basic""" +1037 23 evaluator """rankbased""" +1037 24 dataset """wn18rr""" +1037 24 model """unstructuredmodel""" +1037 24 loss """softplus""" +1037 24 regularizer """no""" +1037 24 optimizer """adam""" +1037 24 training_loop """owa""" +1037 24 negative_sampler """basic""" +1037 24 evaluator """rankbased""" +1037 25 dataset """wn18rr""" +1037 25 model """unstructuredmodel""" +1037 25 loss """softplus""" +1037 25 regularizer """no""" +1037 25 optimizer """adam""" +1037 25 training_loop """owa""" +1037 25 negative_sampler """basic""" +1037 25 evaluator """rankbased""" +1037 26 dataset """wn18rr""" +1037 26 model """unstructuredmodel""" +1037 26 loss """softplus""" +1037 26 regularizer """no""" +1037 26 optimizer """adam""" +1037 26 training_loop """owa""" +1037 26 negative_sampler """basic""" +1037 26 evaluator """rankbased""" +1037 27 dataset """wn18rr""" +1037 27 model """unstructuredmodel""" +1037 27 loss """softplus""" +1037 27 regularizer """no""" +1037 27 optimizer """adam""" +1037 27 training_loop """owa""" +1037 27 negative_sampler """basic""" +1037 27 evaluator """rankbased""" +1037 28 dataset """wn18rr""" +1037 28 model """unstructuredmodel""" +1037 28 loss """softplus""" +1037 28 regularizer """no""" +1037 28 optimizer """adam""" +1037 28 training_loop """owa""" +1037 28 negative_sampler """basic""" +1037 28 evaluator """rankbased""" +1037 29 dataset """wn18rr""" +1037 29 model """unstructuredmodel""" +1037 29 loss """softplus""" +1037 29 regularizer """no""" +1037 29 optimizer """adam""" +1037 29 training_loop """owa""" +1037 29 negative_sampler """basic""" +1037 29 evaluator """rankbased""" +1037 30 dataset """wn18rr""" +1037 30 model """unstructuredmodel""" +1037 30 loss """softplus""" +1037 30 regularizer """no""" +1037 30 optimizer """adam""" +1037 30 training_loop """owa""" +1037 30 negative_sampler """basic""" +1037 30 evaluator """rankbased""" +1037 31 dataset """wn18rr""" +1037 31 model """unstructuredmodel""" +1037 31 loss """softplus""" +1037 31 regularizer """no""" +1037 31 optimizer """adam""" +1037 31 training_loop """owa""" +1037 31 negative_sampler """basic""" +1037 31 evaluator """rankbased""" +1037 32 dataset """wn18rr""" +1037 32 model """unstructuredmodel""" +1037 32 loss """softplus""" +1037 32 regularizer """no""" +1037 32 optimizer """adam""" +1037 32 training_loop """owa""" +1037 32 negative_sampler """basic""" +1037 32 evaluator """rankbased""" +1037 33 dataset """wn18rr""" +1037 33 model """unstructuredmodel""" +1037 33 loss """softplus""" +1037 33 regularizer """no""" +1037 33 optimizer """adam""" +1037 33 training_loop """owa""" +1037 33 negative_sampler """basic""" +1037 33 evaluator """rankbased""" +1037 34 dataset """wn18rr""" +1037 34 model """unstructuredmodel""" +1037 34 loss """softplus""" +1037 34 regularizer """no""" +1037 34 optimizer """adam""" +1037 34 training_loop """owa""" +1037 34 negative_sampler """basic""" +1037 34 evaluator """rankbased""" +1037 35 dataset """wn18rr""" +1037 35 model """unstructuredmodel""" +1037 35 loss """softplus""" +1037 35 regularizer """no""" +1037 35 optimizer """adam""" +1037 35 training_loop """owa""" +1037 35 negative_sampler """basic""" +1037 35 evaluator """rankbased""" +1037 36 dataset """wn18rr""" +1037 36 model """unstructuredmodel""" +1037 36 loss """softplus""" +1037 36 regularizer """no""" +1037 36 optimizer """adam""" +1037 36 training_loop """owa""" +1037 36 negative_sampler """basic""" +1037 36 evaluator """rankbased""" +1037 37 dataset """wn18rr""" +1037 37 model """unstructuredmodel""" +1037 37 loss """softplus""" +1037 37 regularizer """no""" +1037 37 optimizer """adam""" +1037 37 training_loop """owa""" +1037 37 negative_sampler """basic""" +1037 37 evaluator """rankbased""" +1037 38 dataset """wn18rr""" +1037 38 model """unstructuredmodel""" +1037 38 loss """softplus""" +1037 38 regularizer """no""" +1037 38 optimizer """adam""" +1037 38 training_loop """owa""" +1037 38 negative_sampler """basic""" +1037 38 evaluator """rankbased""" +1037 39 dataset """wn18rr""" +1037 39 model """unstructuredmodel""" +1037 39 loss """softplus""" +1037 39 regularizer """no""" +1037 39 optimizer """adam""" +1037 39 training_loop """owa""" +1037 39 negative_sampler """basic""" +1037 39 evaluator """rankbased""" +1037 40 dataset """wn18rr""" +1037 40 model """unstructuredmodel""" +1037 40 loss """softplus""" +1037 40 regularizer """no""" +1037 40 optimizer """adam""" +1037 40 training_loop """owa""" +1037 40 negative_sampler """basic""" +1037 40 evaluator """rankbased""" +1037 41 dataset """wn18rr""" +1037 41 model """unstructuredmodel""" +1037 41 loss """softplus""" +1037 41 regularizer """no""" +1037 41 optimizer """adam""" +1037 41 training_loop """owa""" +1037 41 negative_sampler """basic""" +1037 41 evaluator """rankbased""" +1037 42 dataset """wn18rr""" +1037 42 model """unstructuredmodel""" +1037 42 loss """softplus""" +1037 42 regularizer """no""" +1037 42 optimizer """adam""" +1037 42 training_loop """owa""" +1037 42 negative_sampler """basic""" +1037 42 evaluator """rankbased""" +1037 43 dataset """wn18rr""" +1037 43 model """unstructuredmodel""" +1037 43 loss """softplus""" +1037 43 regularizer """no""" +1037 43 optimizer """adam""" +1037 43 training_loop """owa""" +1037 43 negative_sampler """basic""" +1037 43 evaluator """rankbased""" +1037 44 dataset """wn18rr""" +1037 44 model """unstructuredmodel""" +1037 44 loss """softplus""" +1037 44 regularizer """no""" +1037 44 optimizer """adam""" +1037 44 training_loop """owa""" +1037 44 negative_sampler """basic""" +1037 44 evaluator """rankbased""" +1037 45 dataset """wn18rr""" +1037 45 model """unstructuredmodel""" +1037 45 loss """softplus""" +1037 45 regularizer """no""" +1037 45 optimizer """adam""" +1037 45 training_loop """owa""" +1037 45 negative_sampler """basic""" +1037 45 evaluator """rankbased""" +1037 46 dataset """wn18rr""" +1037 46 model """unstructuredmodel""" +1037 46 loss """softplus""" +1037 46 regularizer """no""" +1037 46 optimizer """adam""" +1037 46 training_loop """owa""" +1037 46 negative_sampler """basic""" +1037 46 evaluator """rankbased""" +1037 47 dataset """wn18rr""" +1037 47 model """unstructuredmodel""" +1037 47 loss """softplus""" +1037 47 regularizer """no""" +1037 47 optimizer """adam""" +1037 47 training_loop """owa""" +1037 47 negative_sampler """basic""" +1037 47 evaluator """rankbased""" +1037 48 dataset """wn18rr""" +1037 48 model """unstructuredmodel""" +1037 48 loss """softplus""" +1037 48 regularizer """no""" +1037 48 optimizer """adam""" +1037 48 training_loop """owa""" +1037 48 negative_sampler """basic""" +1037 48 evaluator """rankbased""" +1037 49 dataset """wn18rr""" +1037 49 model """unstructuredmodel""" +1037 49 loss """softplus""" +1037 49 regularizer """no""" +1037 49 optimizer """adam""" +1037 49 training_loop """owa""" +1037 49 negative_sampler """basic""" +1037 49 evaluator """rankbased""" +1037 50 dataset """wn18rr""" +1037 50 model """unstructuredmodel""" +1037 50 loss """softplus""" +1037 50 regularizer """no""" +1037 50 optimizer """adam""" +1037 50 training_loop """owa""" +1037 50 negative_sampler """basic""" +1037 50 evaluator """rankbased""" +1037 51 dataset """wn18rr""" +1037 51 model """unstructuredmodel""" +1037 51 loss """softplus""" +1037 51 regularizer """no""" +1037 51 optimizer """adam""" +1037 51 training_loop """owa""" +1037 51 negative_sampler """basic""" +1037 51 evaluator """rankbased""" +1037 52 dataset """wn18rr""" +1037 52 model """unstructuredmodel""" +1037 52 loss """softplus""" +1037 52 regularizer """no""" +1037 52 optimizer """adam""" +1037 52 training_loop """owa""" +1037 52 negative_sampler """basic""" +1037 52 evaluator """rankbased""" +1037 53 dataset """wn18rr""" +1037 53 model """unstructuredmodel""" +1037 53 loss """softplus""" +1037 53 regularizer """no""" +1037 53 optimizer """adam""" +1037 53 training_loop """owa""" +1037 53 negative_sampler """basic""" +1037 53 evaluator """rankbased""" +1037 54 dataset """wn18rr""" +1037 54 model """unstructuredmodel""" +1037 54 loss """softplus""" +1037 54 regularizer """no""" +1037 54 optimizer """adam""" +1037 54 training_loop """owa""" +1037 54 negative_sampler """basic""" +1037 54 evaluator """rankbased""" +1037 55 dataset """wn18rr""" +1037 55 model """unstructuredmodel""" +1037 55 loss """softplus""" +1037 55 regularizer """no""" +1037 55 optimizer """adam""" +1037 55 training_loop """owa""" +1037 55 negative_sampler """basic""" +1037 55 evaluator """rankbased""" +1037 56 dataset """wn18rr""" +1037 56 model """unstructuredmodel""" +1037 56 loss """softplus""" +1037 56 regularizer """no""" +1037 56 optimizer """adam""" +1037 56 training_loop """owa""" +1037 56 negative_sampler """basic""" +1037 56 evaluator """rankbased""" +1037 57 dataset """wn18rr""" +1037 57 model """unstructuredmodel""" +1037 57 loss """softplus""" +1037 57 regularizer """no""" +1037 57 optimizer """adam""" +1037 57 training_loop """owa""" +1037 57 negative_sampler """basic""" +1037 57 evaluator """rankbased""" +1037 58 dataset """wn18rr""" +1037 58 model """unstructuredmodel""" +1037 58 loss """softplus""" +1037 58 regularizer """no""" +1037 58 optimizer """adam""" +1037 58 training_loop """owa""" +1037 58 negative_sampler """basic""" +1037 58 evaluator """rankbased""" +1037 59 dataset """wn18rr""" +1037 59 model """unstructuredmodel""" +1037 59 loss """softplus""" +1037 59 regularizer """no""" +1037 59 optimizer """adam""" +1037 59 training_loop """owa""" +1037 59 negative_sampler """basic""" +1037 59 evaluator """rankbased""" +1037 60 dataset """wn18rr""" +1037 60 model """unstructuredmodel""" +1037 60 loss """softplus""" +1037 60 regularizer """no""" +1037 60 optimizer """adam""" +1037 60 training_loop """owa""" +1037 60 negative_sampler """basic""" +1037 60 evaluator """rankbased""" +1037 61 dataset """wn18rr""" +1037 61 model """unstructuredmodel""" +1037 61 loss """softplus""" +1037 61 regularizer """no""" +1037 61 optimizer """adam""" +1037 61 training_loop """owa""" +1037 61 negative_sampler """basic""" +1037 61 evaluator """rankbased""" +1037 62 dataset """wn18rr""" +1037 62 model """unstructuredmodel""" +1037 62 loss """softplus""" +1037 62 regularizer """no""" +1037 62 optimizer """adam""" +1037 62 training_loop """owa""" +1037 62 negative_sampler """basic""" +1037 62 evaluator """rankbased""" +1037 63 dataset """wn18rr""" +1037 63 model """unstructuredmodel""" +1037 63 loss """softplus""" +1037 63 regularizer """no""" +1037 63 optimizer """adam""" +1037 63 training_loop """owa""" +1037 63 negative_sampler """basic""" +1037 63 evaluator """rankbased""" +1037 64 dataset """wn18rr""" +1037 64 model """unstructuredmodel""" +1037 64 loss """softplus""" +1037 64 regularizer """no""" +1037 64 optimizer """adam""" +1037 64 training_loop """owa""" +1037 64 negative_sampler """basic""" +1037 64 evaluator """rankbased""" +1037 65 dataset """wn18rr""" +1037 65 model """unstructuredmodel""" +1037 65 loss """softplus""" +1037 65 regularizer """no""" +1037 65 optimizer """adam""" +1037 65 training_loop """owa""" +1037 65 negative_sampler """basic""" +1037 65 evaluator """rankbased""" +1037 66 dataset """wn18rr""" +1037 66 model """unstructuredmodel""" +1037 66 loss """softplus""" +1037 66 regularizer """no""" +1037 66 optimizer """adam""" +1037 66 training_loop """owa""" +1037 66 negative_sampler """basic""" +1037 66 evaluator """rankbased""" +1037 67 dataset """wn18rr""" +1037 67 model """unstructuredmodel""" +1037 67 loss """softplus""" +1037 67 regularizer """no""" +1037 67 optimizer """adam""" +1037 67 training_loop """owa""" +1037 67 negative_sampler """basic""" +1037 67 evaluator """rankbased""" +1037 68 dataset """wn18rr""" +1037 68 model """unstructuredmodel""" +1037 68 loss """softplus""" +1037 68 regularizer """no""" +1037 68 optimizer """adam""" +1037 68 training_loop """owa""" +1037 68 negative_sampler """basic""" +1037 68 evaluator """rankbased""" +1037 69 dataset """wn18rr""" +1037 69 model """unstructuredmodel""" +1037 69 loss """softplus""" +1037 69 regularizer """no""" +1037 69 optimizer """adam""" +1037 69 training_loop """owa""" +1037 69 negative_sampler """basic""" +1037 69 evaluator """rankbased""" +1037 70 dataset """wn18rr""" +1037 70 model """unstructuredmodel""" +1037 70 loss """softplus""" +1037 70 regularizer """no""" +1037 70 optimizer """adam""" +1037 70 training_loop """owa""" +1037 70 negative_sampler """basic""" +1037 70 evaluator """rankbased""" +1037 71 dataset """wn18rr""" +1037 71 model """unstructuredmodel""" +1037 71 loss """softplus""" +1037 71 regularizer """no""" +1037 71 optimizer """adam""" +1037 71 training_loop """owa""" +1037 71 negative_sampler """basic""" +1037 71 evaluator """rankbased""" +1037 72 dataset """wn18rr""" +1037 72 model """unstructuredmodel""" +1037 72 loss """softplus""" +1037 72 regularizer """no""" +1037 72 optimizer """adam""" +1037 72 training_loop """owa""" +1037 72 negative_sampler """basic""" +1037 72 evaluator """rankbased""" +1037 73 dataset """wn18rr""" +1037 73 model """unstructuredmodel""" +1037 73 loss """softplus""" +1037 73 regularizer """no""" +1037 73 optimizer """adam""" +1037 73 training_loop """owa""" +1037 73 negative_sampler """basic""" +1037 73 evaluator """rankbased""" +1037 74 dataset """wn18rr""" +1037 74 model """unstructuredmodel""" +1037 74 loss """softplus""" +1037 74 regularizer """no""" +1037 74 optimizer """adam""" +1037 74 training_loop """owa""" +1037 74 negative_sampler """basic""" +1037 74 evaluator """rankbased""" +1037 75 dataset """wn18rr""" +1037 75 model """unstructuredmodel""" +1037 75 loss """softplus""" +1037 75 regularizer """no""" +1037 75 optimizer """adam""" +1037 75 training_loop """owa""" +1037 75 negative_sampler """basic""" +1037 75 evaluator """rankbased""" +1037 76 dataset """wn18rr""" +1037 76 model """unstructuredmodel""" +1037 76 loss """softplus""" +1037 76 regularizer """no""" +1037 76 optimizer """adam""" +1037 76 training_loop """owa""" +1037 76 negative_sampler """basic""" +1037 76 evaluator """rankbased""" +1037 77 dataset """wn18rr""" +1037 77 model """unstructuredmodel""" +1037 77 loss """softplus""" +1037 77 regularizer """no""" +1037 77 optimizer """adam""" +1037 77 training_loop """owa""" +1037 77 negative_sampler """basic""" +1037 77 evaluator """rankbased""" +1037 78 dataset """wn18rr""" +1037 78 model """unstructuredmodel""" +1037 78 loss """softplus""" +1037 78 regularizer """no""" +1037 78 optimizer """adam""" +1037 78 training_loop """owa""" +1037 78 negative_sampler """basic""" +1037 78 evaluator """rankbased""" +1037 79 dataset """wn18rr""" +1037 79 model """unstructuredmodel""" +1037 79 loss """softplus""" +1037 79 regularizer """no""" +1037 79 optimizer """adam""" +1037 79 training_loop """owa""" +1037 79 negative_sampler """basic""" +1037 79 evaluator """rankbased""" +1037 80 dataset """wn18rr""" +1037 80 model """unstructuredmodel""" +1037 80 loss """softplus""" +1037 80 regularizer """no""" +1037 80 optimizer """adam""" +1037 80 training_loop """owa""" +1037 80 negative_sampler """basic""" +1037 80 evaluator """rankbased""" +1037 81 dataset """wn18rr""" +1037 81 model """unstructuredmodel""" +1037 81 loss """softplus""" +1037 81 regularizer """no""" +1037 81 optimizer """adam""" +1037 81 training_loop """owa""" +1037 81 negative_sampler """basic""" +1037 81 evaluator """rankbased""" +1037 82 dataset """wn18rr""" +1037 82 model """unstructuredmodel""" +1037 82 loss """softplus""" +1037 82 regularizer """no""" +1037 82 optimizer """adam""" +1037 82 training_loop """owa""" +1037 82 negative_sampler """basic""" +1037 82 evaluator """rankbased""" +1037 83 dataset """wn18rr""" +1037 83 model """unstructuredmodel""" +1037 83 loss """softplus""" +1037 83 regularizer """no""" +1037 83 optimizer """adam""" +1037 83 training_loop """owa""" +1037 83 negative_sampler """basic""" +1037 83 evaluator """rankbased""" +1037 84 dataset """wn18rr""" +1037 84 model """unstructuredmodel""" +1037 84 loss """softplus""" +1037 84 regularizer """no""" +1037 84 optimizer """adam""" +1037 84 training_loop """owa""" +1037 84 negative_sampler """basic""" +1037 84 evaluator """rankbased""" +1037 85 dataset """wn18rr""" +1037 85 model """unstructuredmodel""" +1037 85 loss """softplus""" +1037 85 regularizer """no""" +1037 85 optimizer """adam""" +1037 85 training_loop """owa""" +1037 85 negative_sampler """basic""" +1037 85 evaluator """rankbased""" +1037 86 dataset """wn18rr""" +1037 86 model """unstructuredmodel""" +1037 86 loss """softplus""" +1037 86 regularizer """no""" +1037 86 optimizer """adam""" +1037 86 training_loop """owa""" +1037 86 negative_sampler """basic""" +1037 86 evaluator """rankbased""" +1037 87 dataset """wn18rr""" +1037 87 model """unstructuredmodel""" +1037 87 loss """softplus""" +1037 87 regularizer """no""" +1037 87 optimizer """adam""" +1037 87 training_loop """owa""" +1037 87 negative_sampler """basic""" +1037 87 evaluator """rankbased""" +1037 88 dataset """wn18rr""" +1037 88 model """unstructuredmodel""" +1037 88 loss """softplus""" +1037 88 regularizer """no""" +1037 88 optimizer """adam""" +1037 88 training_loop """owa""" +1037 88 negative_sampler """basic""" +1037 88 evaluator """rankbased""" +1037 89 dataset """wn18rr""" +1037 89 model """unstructuredmodel""" +1037 89 loss """softplus""" +1037 89 regularizer """no""" +1037 89 optimizer """adam""" +1037 89 training_loop """owa""" +1037 89 negative_sampler """basic""" +1037 89 evaluator """rankbased""" +1037 90 dataset """wn18rr""" +1037 90 model """unstructuredmodel""" +1037 90 loss """softplus""" +1037 90 regularizer """no""" +1037 90 optimizer """adam""" +1037 90 training_loop """owa""" +1037 90 negative_sampler """basic""" +1037 90 evaluator """rankbased""" +1037 91 dataset """wn18rr""" +1037 91 model """unstructuredmodel""" +1037 91 loss """softplus""" +1037 91 regularizer """no""" +1037 91 optimizer """adam""" +1037 91 training_loop """owa""" +1037 91 negative_sampler """basic""" +1037 91 evaluator """rankbased""" +1037 92 dataset """wn18rr""" +1037 92 model """unstructuredmodel""" +1037 92 loss """softplus""" +1037 92 regularizer """no""" +1037 92 optimizer """adam""" +1037 92 training_loop """owa""" +1037 92 negative_sampler """basic""" +1037 92 evaluator """rankbased""" +1037 93 dataset """wn18rr""" +1037 93 model """unstructuredmodel""" +1037 93 loss """softplus""" +1037 93 regularizer """no""" +1037 93 optimizer """adam""" +1037 93 training_loop """owa""" +1037 93 negative_sampler """basic""" +1037 93 evaluator """rankbased""" +1037 94 dataset """wn18rr""" +1037 94 model """unstructuredmodel""" +1037 94 loss """softplus""" +1037 94 regularizer """no""" +1037 94 optimizer """adam""" +1037 94 training_loop """owa""" +1037 94 negative_sampler """basic""" +1037 94 evaluator """rankbased""" +1037 95 dataset """wn18rr""" +1037 95 model """unstructuredmodel""" +1037 95 loss """softplus""" +1037 95 regularizer """no""" +1037 95 optimizer """adam""" +1037 95 training_loop """owa""" +1037 95 negative_sampler """basic""" +1037 95 evaluator """rankbased""" +1037 96 dataset """wn18rr""" +1037 96 model """unstructuredmodel""" +1037 96 loss """softplus""" +1037 96 regularizer """no""" +1037 96 optimizer """adam""" +1037 96 training_loop """owa""" +1037 96 negative_sampler """basic""" +1037 96 evaluator """rankbased""" +1037 97 dataset """wn18rr""" +1037 97 model """unstructuredmodel""" +1037 97 loss """softplus""" +1037 97 regularizer """no""" +1037 97 optimizer """adam""" +1037 97 training_loop """owa""" +1037 97 negative_sampler """basic""" +1037 97 evaluator """rankbased""" +1037 98 dataset """wn18rr""" +1037 98 model """unstructuredmodel""" +1037 98 loss """softplus""" +1037 98 regularizer """no""" +1037 98 optimizer """adam""" +1037 98 training_loop """owa""" +1037 98 negative_sampler """basic""" +1037 98 evaluator """rankbased""" +1037 99 dataset """wn18rr""" +1037 99 model """unstructuredmodel""" +1037 99 loss """softplus""" +1037 99 regularizer """no""" +1037 99 optimizer """adam""" +1037 99 training_loop """owa""" +1037 99 negative_sampler """basic""" +1037 99 evaluator """rankbased""" +1037 100 dataset """wn18rr""" +1037 100 model """unstructuredmodel""" +1037 100 loss """softplus""" +1037 100 regularizer """no""" +1037 100 optimizer """adam""" +1037 100 training_loop """owa""" +1037 100 negative_sampler """basic""" +1037 100 evaluator """rankbased""" +1038 1 model.embedding_dim 1.0 +1038 1 model.scoring_fct_norm 2.0 +1038 1 loss.margin 4.330002581844037 +1038 1 optimizer.lr 0.0025146313918213295 +1038 1 negative_sampler.num_negs_per_pos 38.0 +1038 1 training.batch_size 1.0 +1038 2 model.embedding_dim 0.0 +1038 2 model.scoring_fct_norm 1.0 +1038 2 loss.margin 9.147439717409657 +1038 2 optimizer.lr 0.014840695735806826 +1038 2 negative_sampler.num_negs_per_pos 35.0 +1038 2 training.batch_size 0.0 +1038 3 model.embedding_dim 1.0 +1038 3 model.scoring_fct_norm 1.0 +1038 3 loss.margin 3.376612660690699 +1038 3 optimizer.lr 0.02508765183943397 +1038 3 negative_sampler.num_negs_per_pos 41.0 +1038 3 training.batch_size 0.0 +1038 4 model.embedding_dim 0.0 +1038 4 model.scoring_fct_norm 1.0 +1038 4 loss.margin 7.67230602750086 +1038 4 optimizer.lr 0.031670833968938196 +1038 4 negative_sampler.num_negs_per_pos 12.0 +1038 4 training.batch_size 1.0 +1038 5 model.embedding_dim 1.0 +1038 5 model.scoring_fct_norm 2.0 +1038 5 loss.margin 1.6620584136853092 +1038 5 optimizer.lr 0.012638626988431762 +1038 5 negative_sampler.num_negs_per_pos 2.0 +1038 5 training.batch_size 2.0 +1038 6 model.embedding_dim 2.0 +1038 6 model.scoring_fct_norm 1.0 +1038 6 loss.margin 2.0363966168848036 +1038 6 optimizer.lr 0.0012466862395001854 +1038 6 negative_sampler.num_negs_per_pos 16.0 +1038 6 training.batch_size 0.0 +1038 7 model.embedding_dim 1.0 +1038 7 model.scoring_fct_norm 2.0 +1038 7 loss.margin 6.908965709510549 +1038 7 optimizer.lr 0.004038815067990456 +1038 7 negative_sampler.num_negs_per_pos 62.0 +1038 7 training.batch_size 1.0 +1038 8 model.embedding_dim 1.0 +1038 8 model.scoring_fct_norm 2.0 +1038 8 loss.margin 4.471134706699714 +1038 8 optimizer.lr 0.09393919561114154 +1038 8 negative_sampler.num_negs_per_pos 71.0 +1038 8 training.batch_size 2.0 +1038 9 model.embedding_dim 2.0 +1038 9 model.scoring_fct_norm 2.0 +1038 9 loss.margin 4.006137605495716 +1038 9 optimizer.lr 0.011024735683346335 +1038 9 negative_sampler.num_negs_per_pos 70.0 +1038 9 training.batch_size 0.0 +1038 10 model.embedding_dim 2.0 +1038 10 model.scoring_fct_norm 2.0 +1038 10 loss.margin 6.822936752180079 +1038 10 optimizer.lr 0.008978528893723801 +1038 10 negative_sampler.num_negs_per_pos 5.0 +1038 10 training.batch_size 1.0 +1038 11 model.embedding_dim 0.0 +1038 11 model.scoring_fct_norm 1.0 +1038 11 loss.margin 8.9106719573232 +1038 11 optimizer.lr 0.003114552306711365 +1038 11 negative_sampler.num_negs_per_pos 8.0 +1038 11 training.batch_size 1.0 +1038 12 model.embedding_dim 1.0 +1038 12 model.scoring_fct_norm 2.0 +1038 12 loss.margin 6.130397236950902 +1038 12 optimizer.lr 0.007648037145209683 +1038 12 negative_sampler.num_negs_per_pos 41.0 +1038 12 training.batch_size 1.0 +1038 13 model.embedding_dim 2.0 +1038 13 model.scoring_fct_norm 1.0 +1038 13 loss.margin 2.610761216270468 +1038 13 optimizer.lr 0.0076501020723538605 +1038 13 negative_sampler.num_negs_per_pos 91.0 +1038 13 training.batch_size 1.0 +1038 14 model.embedding_dim 2.0 +1038 14 model.scoring_fct_norm 1.0 +1038 14 loss.margin 5.411901140938638 +1038 14 optimizer.lr 0.04188760656175523 +1038 14 negative_sampler.num_negs_per_pos 21.0 +1038 14 training.batch_size 1.0 +1038 15 model.embedding_dim 1.0 +1038 15 model.scoring_fct_norm 1.0 +1038 15 loss.margin 3.2362745599640257 +1038 15 optimizer.lr 0.03734482362335251 +1038 15 negative_sampler.num_negs_per_pos 24.0 +1038 15 training.batch_size 2.0 +1038 16 model.embedding_dim 1.0 +1038 16 model.scoring_fct_norm 2.0 +1038 16 loss.margin 2.6282947973426394 +1038 16 optimizer.lr 0.001483310233618108 +1038 16 negative_sampler.num_negs_per_pos 81.0 +1038 16 training.batch_size 0.0 +1038 17 model.embedding_dim 1.0 +1038 17 model.scoring_fct_norm 2.0 +1038 17 loss.margin 1.8512827647024437 +1038 17 optimizer.lr 0.08155648750810507 +1038 17 negative_sampler.num_negs_per_pos 99.0 +1038 17 training.batch_size 1.0 +1038 18 model.embedding_dim 0.0 +1038 18 model.scoring_fct_norm 1.0 +1038 18 loss.margin 5.136179080309888 +1038 18 optimizer.lr 0.0033169015336824968 +1038 18 negative_sampler.num_negs_per_pos 44.0 +1038 18 training.batch_size 0.0 +1038 19 model.embedding_dim 2.0 +1038 19 model.scoring_fct_norm 2.0 +1038 19 loss.margin 8.845697249770685 +1038 19 optimizer.lr 0.008632898648518295 +1038 19 negative_sampler.num_negs_per_pos 21.0 +1038 19 training.batch_size 0.0 +1038 20 model.embedding_dim 0.0 +1038 20 model.scoring_fct_norm 2.0 +1038 20 loss.margin 1.869885776893054 +1038 20 optimizer.lr 0.02903366489309267 +1038 20 negative_sampler.num_negs_per_pos 87.0 +1038 20 training.batch_size 1.0 +1038 21 model.embedding_dim 2.0 +1038 21 model.scoring_fct_norm 1.0 +1038 21 loss.margin 1.8609421939813369 +1038 21 optimizer.lr 0.06371998636181044 +1038 21 negative_sampler.num_negs_per_pos 72.0 +1038 21 training.batch_size 1.0 +1038 22 model.embedding_dim 1.0 +1038 22 model.scoring_fct_norm 1.0 +1038 22 loss.margin 4.925912768935282 +1038 22 optimizer.lr 0.04290159502035178 +1038 22 negative_sampler.num_negs_per_pos 86.0 +1038 22 training.batch_size 1.0 +1038 23 model.embedding_dim 1.0 +1038 23 model.scoring_fct_norm 1.0 +1038 23 loss.margin 3.0825518670944483 +1038 23 optimizer.lr 0.02934480122491451 +1038 23 negative_sampler.num_negs_per_pos 48.0 +1038 23 training.batch_size 0.0 +1038 24 model.embedding_dim 2.0 +1038 24 model.scoring_fct_norm 1.0 +1038 24 loss.margin 3.248809805110647 +1038 24 optimizer.lr 0.0019261422581345548 +1038 24 negative_sampler.num_negs_per_pos 59.0 +1038 24 training.batch_size 2.0 +1038 25 model.embedding_dim 0.0 +1038 25 model.scoring_fct_norm 2.0 +1038 25 loss.margin 1.5542421645280193 +1038 25 optimizer.lr 0.061542089238269704 +1038 25 negative_sampler.num_negs_per_pos 40.0 +1038 25 training.batch_size 2.0 +1038 26 model.embedding_dim 1.0 +1038 26 model.scoring_fct_norm 1.0 +1038 26 loss.margin 5.827234912511717 +1038 26 optimizer.lr 0.01591308907728722 +1038 26 negative_sampler.num_negs_per_pos 70.0 +1038 26 training.batch_size 2.0 +1038 27 model.embedding_dim 2.0 +1038 27 model.scoring_fct_norm 1.0 +1038 27 loss.margin 1.7652689745827712 +1038 27 optimizer.lr 0.01882086084859096 +1038 27 negative_sampler.num_negs_per_pos 97.0 +1038 27 training.batch_size 2.0 +1038 28 model.embedding_dim 0.0 +1038 28 model.scoring_fct_norm 2.0 +1038 28 loss.margin 8.391131005872905 +1038 28 optimizer.lr 0.002573926662569287 +1038 28 negative_sampler.num_negs_per_pos 57.0 +1038 28 training.batch_size 1.0 +1038 29 model.embedding_dim 0.0 +1038 29 model.scoring_fct_norm 1.0 +1038 29 loss.margin 6.9424711548817495 +1038 29 optimizer.lr 0.0011126057091608063 +1038 29 negative_sampler.num_negs_per_pos 63.0 +1038 29 training.batch_size 0.0 +1038 30 model.embedding_dim 0.0 +1038 30 model.scoring_fct_norm 2.0 +1038 30 loss.margin 9.758350165002495 +1038 30 optimizer.lr 0.0032325120403506233 +1038 30 negative_sampler.num_negs_per_pos 55.0 +1038 30 training.batch_size 0.0 +1038 31 model.embedding_dim 0.0 +1038 31 model.scoring_fct_norm 2.0 +1038 31 loss.margin 8.136040673415433 +1038 31 optimizer.lr 0.0019982519380225467 +1038 31 negative_sampler.num_negs_per_pos 48.0 +1038 31 training.batch_size 1.0 +1038 32 model.embedding_dim 2.0 +1038 32 model.scoring_fct_norm 2.0 +1038 32 loss.margin 5.375270291759117 +1038 32 optimizer.lr 0.005807384287552015 +1038 32 negative_sampler.num_negs_per_pos 8.0 +1038 32 training.batch_size 2.0 +1038 33 model.embedding_dim 2.0 +1038 33 model.scoring_fct_norm 2.0 +1038 33 loss.margin 6.444165517311693 +1038 33 optimizer.lr 0.0012041296175703783 +1038 33 negative_sampler.num_negs_per_pos 10.0 +1038 33 training.batch_size 2.0 +1038 34 model.embedding_dim 1.0 +1038 34 model.scoring_fct_norm 1.0 +1038 34 loss.margin 3.0760449583770533 +1038 34 optimizer.lr 0.0021709380899169965 +1038 34 negative_sampler.num_negs_per_pos 75.0 +1038 34 training.batch_size 2.0 +1038 35 model.embedding_dim 1.0 +1038 35 model.scoring_fct_norm 2.0 +1038 35 loss.margin 2.5074904450606534 +1038 35 optimizer.lr 0.007855559814124894 +1038 35 negative_sampler.num_negs_per_pos 44.0 +1038 35 training.batch_size 1.0 +1038 36 model.embedding_dim 2.0 +1038 36 model.scoring_fct_norm 1.0 +1038 36 loss.margin 3.85542822238153 +1038 36 optimizer.lr 0.0015893668821664593 +1038 36 negative_sampler.num_negs_per_pos 50.0 +1038 36 training.batch_size 2.0 +1038 37 model.embedding_dim 2.0 +1038 37 model.scoring_fct_norm 1.0 +1038 37 loss.margin 8.827909623146315 +1038 37 optimizer.lr 0.0063562169978293726 +1038 37 negative_sampler.num_negs_per_pos 6.0 +1038 37 training.batch_size 0.0 +1038 38 model.embedding_dim 2.0 +1038 38 model.scoring_fct_norm 2.0 +1038 38 loss.margin 4.281594744637613 +1038 38 optimizer.lr 0.010432913157241137 +1038 38 negative_sampler.num_negs_per_pos 60.0 +1038 38 training.batch_size 2.0 +1038 39 model.embedding_dim 1.0 +1038 39 model.scoring_fct_norm 1.0 +1038 39 loss.margin 2.39169537698306 +1038 39 optimizer.lr 0.0010636383245450549 +1038 39 negative_sampler.num_negs_per_pos 75.0 +1038 39 training.batch_size 2.0 +1038 40 model.embedding_dim 1.0 +1038 40 model.scoring_fct_norm 1.0 +1038 40 loss.margin 1.3417179034729771 +1038 40 optimizer.lr 0.0021273924923080082 +1038 40 negative_sampler.num_negs_per_pos 85.0 +1038 40 training.batch_size 0.0 +1038 41 model.embedding_dim 0.0 +1038 41 model.scoring_fct_norm 2.0 +1038 41 loss.margin 2.906757794159455 +1038 41 optimizer.lr 0.030393758381115802 +1038 41 negative_sampler.num_negs_per_pos 44.0 +1038 41 training.batch_size 1.0 +1038 42 model.embedding_dim 1.0 +1038 42 model.scoring_fct_norm 2.0 +1038 42 loss.margin 5.754364123797573 +1038 42 optimizer.lr 0.034025893415659966 +1038 42 negative_sampler.num_negs_per_pos 22.0 +1038 42 training.batch_size 2.0 +1038 43 model.embedding_dim 2.0 +1038 43 model.scoring_fct_norm 1.0 +1038 43 loss.margin 8.294988542556823 +1038 43 optimizer.lr 0.02017080406789545 +1038 43 negative_sampler.num_negs_per_pos 29.0 +1038 43 training.batch_size 2.0 +1038 44 model.embedding_dim 1.0 +1038 44 model.scoring_fct_norm 2.0 +1038 44 loss.margin 1.8139077675607043 +1038 44 optimizer.lr 0.002663637581195822 +1038 44 negative_sampler.num_negs_per_pos 71.0 +1038 44 training.batch_size 0.0 +1038 45 model.embedding_dim 1.0 +1038 45 model.scoring_fct_norm 2.0 +1038 45 loss.margin 7.455337776851788 +1038 45 optimizer.lr 0.0015278780480552543 +1038 45 negative_sampler.num_negs_per_pos 51.0 +1038 45 training.batch_size 2.0 +1038 46 model.embedding_dim 2.0 +1038 46 model.scoring_fct_norm 2.0 +1038 46 loss.margin 4.584956927751124 +1038 46 optimizer.lr 0.08793374703799582 +1038 46 negative_sampler.num_negs_per_pos 45.0 +1038 46 training.batch_size 2.0 +1038 47 model.embedding_dim 0.0 +1038 47 model.scoring_fct_norm 2.0 +1038 47 loss.margin 7.989703560290702 +1038 47 optimizer.lr 0.09647991951667145 +1038 47 negative_sampler.num_negs_per_pos 62.0 +1038 47 training.batch_size 0.0 +1038 48 model.embedding_dim 0.0 +1038 48 model.scoring_fct_norm 1.0 +1038 48 loss.margin 4.9890482837464125 +1038 48 optimizer.lr 0.0728128016217464 +1038 48 negative_sampler.num_negs_per_pos 7.0 +1038 48 training.batch_size 0.0 +1038 1 dataset """wn18rr""" +1038 1 model """unstructuredmodel""" +1038 1 loss """marginranking""" +1038 1 regularizer """no""" +1038 1 optimizer """adam""" +1038 1 training_loop """owa""" +1038 1 negative_sampler """basic""" +1038 1 evaluator """rankbased""" +1038 2 dataset """wn18rr""" +1038 2 model """unstructuredmodel""" +1038 2 loss """marginranking""" +1038 2 regularizer """no""" +1038 2 optimizer """adam""" +1038 2 training_loop """owa""" +1038 2 negative_sampler """basic""" +1038 2 evaluator """rankbased""" +1038 3 dataset """wn18rr""" +1038 3 model """unstructuredmodel""" +1038 3 loss """marginranking""" +1038 3 regularizer """no""" +1038 3 optimizer """adam""" +1038 3 training_loop """owa""" +1038 3 negative_sampler """basic""" +1038 3 evaluator """rankbased""" +1038 4 dataset """wn18rr""" +1038 4 model """unstructuredmodel""" +1038 4 loss """marginranking""" +1038 4 regularizer """no""" +1038 4 optimizer """adam""" +1038 4 training_loop """owa""" +1038 4 negative_sampler """basic""" +1038 4 evaluator """rankbased""" +1038 5 dataset """wn18rr""" +1038 5 model """unstructuredmodel""" +1038 5 loss """marginranking""" +1038 5 regularizer """no""" +1038 5 optimizer """adam""" +1038 5 training_loop """owa""" +1038 5 negative_sampler """basic""" +1038 5 evaluator """rankbased""" +1038 6 dataset """wn18rr""" +1038 6 model """unstructuredmodel""" +1038 6 loss """marginranking""" +1038 6 regularizer """no""" +1038 6 optimizer """adam""" +1038 6 training_loop """owa""" +1038 6 negative_sampler """basic""" +1038 6 evaluator """rankbased""" +1038 7 dataset """wn18rr""" +1038 7 model """unstructuredmodel""" +1038 7 loss """marginranking""" +1038 7 regularizer """no""" +1038 7 optimizer """adam""" +1038 7 training_loop """owa""" +1038 7 negative_sampler """basic""" +1038 7 evaluator """rankbased""" +1038 8 dataset """wn18rr""" +1038 8 model """unstructuredmodel""" +1038 8 loss """marginranking""" +1038 8 regularizer """no""" +1038 8 optimizer """adam""" +1038 8 training_loop """owa""" +1038 8 negative_sampler """basic""" +1038 8 evaluator """rankbased""" +1038 9 dataset """wn18rr""" +1038 9 model """unstructuredmodel""" +1038 9 loss """marginranking""" +1038 9 regularizer """no""" +1038 9 optimizer """adam""" +1038 9 training_loop """owa""" +1038 9 negative_sampler """basic""" +1038 9 evaluator """rankbased""" +1038 10 dataset """wn18rr""" +1038 10 model """unstructuredmodel""" +1038 10 loss """marginranking""" +1038 10 regularizer """no""" +1038 10 optimizer """adam""" +1038 10 training_loop """owa""" +1038 10 negative_sampler """basic""" +1038 10 evaluator """rankbased""" +1038 11 dataset """wn18rr""" +1038 11 model """unstructuredmodel""" +1038 11 loss """marginranking""" +1038 11 regularizer """no""" +1038 11 optimizer """adam""" +1038 11 training_loop """owa""" +1038 11 negative_sampler """basic""" +1038 11 evaluator """rankbased""" +1038 12 dataset """wn18rr""" +1038 12 model """unstructuredmodel""" +1038 12 loss """marginranking""" +1038 12 regularizer """no""" +1038 12 optimizer """adam""" +1038 12 training_loop """owa""" +1038 12 negative_sampler """basic""" +1038 12 evaluator """rankbased""" +1038 13 dataset """wn18rr""" +1038 13 model """unstructuredmodel""" +1038 13 loss """marginranking""" +1038 13 regularizer """no""" +1038 13 optimizer """adam""" +1038 13 training_loop """owa""" +1038 13 negative_sampler """basic""" +1038 13 evaluator """rankbased""" +1038 14 dataset """wn18rr""" +1038 14 model """unstructuredmodel""" +1038 14 loss """marginranking""" +1038 14 regularizer """no""" +1038 14 optimizer """adam""" +1038 14 training_loop """owa""" +1038 14 negative_sampler """basic""" +1038 14 evaluator """rankbased""" +1038 15 dataset """wn18rr""" +1038 15 model """unstructuredmodel""" +1038 15 loss """marginranking""" +1038 15 regularizer """no""" +1038 15 optimizer """adam""" +1038 15 training_loop """owa""" +1038 15 negative_sampler """basic""" +1038 15 evaluator """rankbased""" +1038 16 dataset """wn18rr""" +1038 16 model """unstructuredmodel""" +1038 16 loss """marginranking""" +1038 16 regularizer """no""" +1038 16 optimizer """adam""" +1038 16 training_loop """owa""" +1038 16 negative_sampler """basic""" +1038 16 evaluator """rankbased""" +1038 17 dataset """wn18rr""" +1038 17 model """unstructuredmodel""" +1038 17 loss """marginranking""" +1038 17 regularizer """no""" +1038 17 optimizer """adam""" +1038 17 training_loop """owa""" +1038 17 negative_sampler """basic""" +1038 17 evaluator """rankbased""" +1038 18 dataset """wn18rr""" +1038 18 model """unstructuredmodel""" +1038 18 loss """marginranking""" +1038 18 regularizer """no""" +1038 18 optimizer """adam""" +1038 18 training_loop """owa""" +1038 18 negative_sampler """basic""" +1038 18 evaluator """rankbased""" +1038 19 dataset """wn18rr""" +1038 19 model """unstructuredmodel""" +1038 19 loss """marginranking""" +1038 19 regularizer """no""" +1038 19 optimizer """adam""" +1038 19 training_loop """owa""" +1038 19 negative_sampler """basic""" +1038 19 evaluator """rankbased""" +1038 20 dataset """wn18rr""" +1038 20 model """unstructuredmodel""" +1038 20 loss """marginranking""" +1038 20 regularizer """no""" +1038 20 optimizer """adam""" +1038 20 training_loop """owa""" +1038 20 negative_sampler """basic""" +1038 20 evaluator """rankbased""" +1038 21 dataset """wn18rr""" +1038 21 model """unstructuredmodel""" +1038 21 loss """marginranking""" +1038 21 regularizer """no""" +1038 21 optimizer """adam""" +1038 21 training_loop """owa""" +1038 21 negative_sampler """basic""" +1038 21 evaluator """rankbased""" +1038 22 dataset """wn18rr""" +1038 22 model """unstructuredmodel""" +1038 22 loss """marginranking""" +1038 22 regularizer """no""" +1038 22 optimizer """adam""" +1038 22 training_loop """owa""" +1038 22 negative_sampler """basic""" +1038 22 evaluator """rankbased""" +1038 23 dataset """wn18rr""" +1038 23 model """unstructuredmodel""" +1038 23 loss """marginranking""" +1038 23 regularizer """no""" +1038 23 optimizer """adam""" +1038 23 training_loop """owa""" +1038 23 negative_sampler """basic""" +1038 23 evaluator """rankbased""" +1038 24 dataset """wn18rr""" +1038 24 model """unstructuredmodel""" +1038 24 loss """marginranking""" +1038 24 regularizer """no""" +1038 24 optimizer """adam""" +1038 24 training_loop """owa""" +1038 24 negative_sampler """basic""" +1038 24 evaluator """rankbased""" +1038 25 dataset """wn18rr""" +1038 25 model """unstructuredmodel""" +1038 25 loss """marginranking""" +1038 25 regularizer """no""" +1038 25 optimizer """adam""" +1038 25 training_loop """owa""" +1038 25 negative_sampler """basic""" +1038 25 evaluator """rankbased""" +1038 26 dataset """wn18rr""" +1038 26 model """unstructuredmodel""" +1038 26 loss """marginranking""" +1038 26 regularizer """no""" +1038 26 optimizer """adam""" +1038 26 training_loop """owa""" +1038 26 negative_sampler """basic""" +1038 26 evaluator """rankbased""" +1038 27 dataset """wn18rr""" +1038 27 model """unstructuredmodel""" +1038 27 loss """marginranking""" +1038 27 regularizer """no""" +1038 27 optimizer """adam""" +1038 27 training_loop """owa""" +1038 27 negative_sampler """basic""" +1038 27 evaluator """rankbased""" +1038 28 dataset """wn18rr""" +1038 28 model """unstructuredmodel""" +1038 28 loss """marginranking""" +1038 28 regularizer """no""" +1038 28 optimizer """adam""" +1038 28 training_loop """owa""" +1038 28 negative_sampler """basic""" +1038 28 evaluator """rankbased""" +1038 29 dataset """wn18rr""" +1038 29 model """unstructuredmodel""" +1038 29 loss """marginranking""" +1038 29 regularizer """no""" +1038 29 optimizer """adam""" +1038 29 training_loop """owa""" +1038 29 negative_sampler """basic""" +1038 29 evaluator """rankbased""" +1038 30 dataset """wn18rr""" +1038 30 model """unstructuredmodel""" +1038 30 loss """marginranking""" +1038 30 regularizer """no""" +1038 30 optimizer """adam""" +1038 30 training_loop """owa""" +1038 30 negative_sampler """basic""" +1038 30 evaluator """rankbased""" +1038 31 dataset """wn18rr""" +1038 31 model """unstructuredmodel""" +1038 31 loss """marginranking""" +1038 31 regularizer """no""" +1038 31 optimizer """adam""" +1038 31 training_loop """owa""" +1038 31 negative_sampler """basic""" +1038 31 evaluator """rankbased""" +1038 32 dataset """wn18rr""" +1038 32 model """unstructuredmodel""" +1038 32 loss """marginranking""" +1038 32 regularizer """no""" +1038 32 optimizer """adam""" +1038 32 training_loop """owa""" +1038 32 negative_sampler """basic""" +1038 32 evaluator """rankbased""" +1038 33 dataset """wn18rr""" +1038 33 model """unstructuredmodel""" +1038 33 loss """marginranking""" +1038 33 regularizer """no""" +1038 33 optimizer """adam""" +1038 33 training_loop """owa""" +1038 33 negative_sampler """basic""" +1038 33 evaluator """rankbased""" +1038 34 dataset """wn18rr""" +1038 34 model """unstructuredmodel""" +1038 34 loss """marginranking""" +1038 34 regularizer """no""" +1038 34 optimizer """adam""" +1038 34 training_loop """owa""" +1038 34 negative_sampler """basic""" +1038 34 evaluator """rankbased""" +1038 35 dataset """wn18rr""" +1038 35 model """unstructuredmodel""" +1038 35 loss """marginranking""" +1038 35 regularizer """no""" +1038 35 optimizer """adam""" +1038 35 training_loop """owa""" +1038 35 negative_sampler """basic""" +1038 35 evaluator """rankbased""" +1038 36 dataset """wn18rr""" +1038 36 model """unstructuredmodel""" +1038 36 loss """marginranking""" +1038 36 regularizer """no""" +1038 36 optimizer """adam""" +1038 36 training_loop """owa""" +1038 36 negative_sampler """basic""" +1038 36 evaluator """rankbased""" +1038 37 dataset """wn18rr""" +1038 37 model """unstructuredmodel""" +1038 37 loss """marginranking""" +1038 37 regularizer """no""" +1038 37 optimizer """adam""" +1038 37 training_loop """owa""" +1038 37 negative_sampler """basic""" +1038 37 evaluator """rankbased""" +1038 38 dataset """wn18rr""" +1038 38 model """unstructuredmodel""" +1038 38 loss """marginranking""" +1038 38 regularizer """no""" +1038 38 optimizer """adam""" +1038 38 training_loop """owa""" +1038 38 negative_sampler """basic""" +1038 38 evaluator """rankbased""" +1038 39 dataset """wn18rr""" +1038 39 model """unstructuredmodel""" +1038 39 loss """marginranking""" +1038 39 regularizer """no""" +1038 39 optimizer """adam""" +1038 39 training_loop """owa""" +1038 39 negative_sampler """basic""" +1038 39 evaluator """rankbased""" +1038 40 dataset """wn18rr""" +1038 40 model """unstructuredmodel""" +1038 40 loss """marginranking""" +1038 40 regularizer """no""" +1038 40 optimizer """adam""" +1038 40 training_loop """owa""" +1038 40 negative_sampler """basic""" +1038 40 evaluator """rankbased""" +1038 41 dataset """wn18rr""" +1038 41 model """unstructuredmodel""" +1038 41 loss """marginranking""" +1038 41 regularizer """no""" +1038 41 optimizer """adam""" +1038 41 training_loop """owa""" +1038 41 negative_sampler """basic""" +1038 41 evaluator """rankbased""" +1038 42 dataset """wn18rr""" +1038 42 model """unstructuredmodel""" +1038 42 loss """marginranking""" +1038 42 regularizer """no""" +1038 42 optimizer """adam""" +1038 42 training_loop """owa""" +1038 42 negative_sampler """basic""" +1038 42 evaluator """rankbased""" +1038 43 dataset """wn18rr""" +1038 43 model """unstructuredmodel""" +1038 43 loss """marginranking""" +1038 43 regularizer """no""" +1038 43 optimizer """adam""" +1038 43 training_loop """owa""" +1038 43 negative_sampler """basic""" +1038 43 evaluator """rankbased""" +1038 44 dataset """wn18rr""" +1038 44 model """unstructuredmodel""" +1038 44 loss """marginranking""" +1038 44 regularizer """no""" +1038 44 optimizer """adam""" +1038 44 training_loop """owa""" +1038 44 negative_sampler """basic""" +1038 44 evaluator """rankbased""" +1038 45 dataset """wn18rr""" +1038 45 model """unstructuredmodel""" +1038 45 loss """marginranking""" +1038 45 regularizer """no""" +1038 45 optimizer """adam""" +1038 45 training_loop """owa""" +1038 45 negative_sampler """basic""" +1038 45 evaluator """rankbased""" +1038 46 dataset """wn18rr""" +1038 46 model """unstructuredmodel""" +1038 46 loss """marginranking""" +1038 46 regularizer """no""" +1038 46 optimizer """adam""" +1038 46 training_loop """owa""" +1038 46 negative_sampler """basic""" +1038 46 evaluator """rankbased""" +1038 47 dataset """wn18rr""" +1038 47 model """unstructuredmodel""" +1038 47 loss """marginranking""" +1038 47 regularizer """no""" +1038 47 optimizer """adam""" +1038 47 training_loop """owa""" +1038 47 negative_sampler """basic""" +1038 47 evaluator """rankbased""" +1038 48 dataset """wn18rr""" +1038 48 model """unstructuredmodel""" +1038 48 loss """marginranking""" +1038 48 regularizer """no""" +1038 48 optimizer """adam""" +1038 48 training_loop """owa""" +1038 48 negative_sampler """basic""" +1038 48 evaluator """rankbased""" +1039 1 model.embedding_dim 1.0 +1039 1 model.scoring_fct_norm 2.0 +1039 1 loss.margin 4.236547497788847 +1039 1 optimizer.lr 0.004642279028700308 +1039 1 negative_sampler.num_negs_per_pos 18.0 +1039 1 training.batch_size 0.0 +1039 2 model.embedding_dim 2.0 +1039 2 model.scoring_fct_norm 2.0 +1039 2 loss.margin 5.444131277724818 +1039 2 optimizer.lr 0.0023423296164405303 +1039 2 negative_sampler.num_negs_per_pos 32.0 +1039 2 training.batch_size 1.0 +1039 3 model.embedding_dim 2.0 +1039 3 model.scoring_fct_norm 2.0 +1039 3 loss.margin 1.1601572177225148 +1039 3 optimizer.lr 0.001946457462570581 +1039 3 negative_sampler.num_negs_per_pos 99.0 +1039 3 training.batch_size 2.0 +1039 4 model.embedding_dim 2.0 +1039 4 model.scoring_fct_norm 1.0 +1039 4 loss.margin 9.970372566941556 +1039 4 optimizer.lr 0.09403129091332874 +1039 4 negative_sampler.num_negs_per_pos 63.0 +1039 4 training.batch_size 1.0 +1039 5 model.embedding_dim 1.0 +1039 5 model.scoring_fct_norm 2.0 +1039 5 loss.margin 4.004524263349692 +1039 5 optimizer.lr 0.010369865337709882 +1039 5 negative_sampler.num_negs_per_pos 49.0 +1039 5 training.batch_size 1.0 +1039 6 model.embedding_dim 1.0 +1039 6 model.scoring_fct_norm 2.0 +1039 6 loss.margin 6.982352142159387 +1039 6 optimizer.lr 0.0017360455396836206 +1039 6 negative_sampler.num_negs_per_pos 69.0 +1039 6 training.batch_size 1.0 +1039 7 model.embedding_dim 2.0 +1039 7 model.scoring_fct_norm 2.0 +1039 7 loss.margin 7.998146114623861 +1039 7 optimizer.lr 0.00889898796416061 +1039 7 negative_sampler.num_negs_per_pos 47.0 +1039 7 training.batch_size 1.0 +1039 8 model.embedding_dim 1.0 +1039 8 model.scoring_fct_norm 2.0 +1039 8 loss.margin 8.078110582394983 +1039 8 optimizer.lr 0.005274528758885808 +1039 8 negative_sampler.num_negs_per_pos 86.0 +1039 8 training.batch_size 0.0 +1039 9 model.embedding_dim 1.0 +1039 9 model.scoring_fct_norm 2.0 +1039 9 loss.margin 8.18705846544098 +1039 9 optimizer.lr 0.006569614254158829 +1039 9 negative_sampler.num_negs_per_pos 47.0 +1039 9 training.batch_size 2.0 +1039 10 model.embedding_dim 1.0 +1039 10 model.scoring_fct_norm 1.0 +1039 10 loss.margin 3.215786645127265 +1039 10 optimizer.lr 0.03822355093673895 +1039 10 negative_sampler.num_negs_per_pos 73.0 +1039 10 training.batch_size 0.0 +1039 11 model.embedding_dim 2.0 +1039 11 model.scoring_fct_norm 1.0 +1039 11 loss.margin 6.503921046394234 +1039 11 optimizer.lr 0.017472374953850716 +1039 11 negative_sampler.num_negs_per_pos 13.0 +1039 11 training.batch_size 0.0 +1039 12 model.embedding_dim 2.0 +1039 12 model.scoring_fct_norm 1.0 +1039 12 loss.margin 9.186209735118211 +1039 12 optimizer.lr 0.00859331765433116 +1039 12 negative_sampler.num_negs_per_pos 54.0 +1039 12 training.batch_size 2.0 +1039 13 model.embedding_dim 2.0 +1039 13 model.scoring_fct_norm 2.0 +1039 13 loss.margin 4.765498303964354 +1039 13 optimizer.lr 0.0011177133163019536 +1039 13 negative_sampler.num_negs_per_pos 25.0 +1039 13 training.batch_size 1.0 +1039 14 model.embedding_dim 0.0 +1039 14 model.scoring_fct_norm 2.0 +1039 14 loss.margin 9.034405519669763 +1039 14 optimizer.lr 0.0024670889728799577 +1039 14 negative_sampler.num_negs_per_pos 20.0 +1039 14 training.batch_size 1.0 +1039 15 model.embedding_dim 2.0 +1039 15 model.scoring_fct_norm 2.0 +1039 15 loss.margin 9.408407040220354 +1039 15 optimizer.lr 0.001634850245187436 +1039 15 negative_sampler.num_negs_per_pos 56.0 +1039 15 training.batch_size 1.0 +1039 16 model.embedding_dim 0.0 +1039 16 model.scoring_fct_norm 2.0 +1039 16 loss.margin 2.808133287927111 +1039 16 optimizer.lr 0.04578701156907902 +1039 16 negative_sampler.num_negs_per_pos 31.0 +1039 16 training.batch_size 0.0 +1039 17 model.embedding_dim 1.0 +1039 17 model.scoring_fct_norm 1.0 +1039 17 loss.margin 7.881354901213142 +1039 17 optimizer.lr 0.0014586798815635336 +1039 17 negative_sampler.num_negs_per_pos 31.0 +1039 17 training.batch_size 2.0 +1039 18 model.embedding_dim 0.0 +1039 18 model.scoring_fct_norm 1.0 +1039 18 loss.margin 8.75514032119965 +1039 18 optimizer.lr 0.002765551032559282 +1039 18 negative_sampler.num_negs_per_pos 26.0 +1039 18 training.batch_size 1.0 +1039 19 model.embedding_dim 2.0 +1039 19 model.scoring_fct_norm 1.0 +1039 19 loss.margin 7.581035151927148 +1039 19 optimizer.lr 0.004101176834422205 +1039 19 negative_sampler.num_negs_per_pos 72.0 +1039 19 training.batch_size 1.0 +1039 20 model.embedding_dim 2.0 +1039 20 model.scoring_fct_norm 1.0 +1039 20 loss.margin 3.9062057689604543 +1039 20 optimizer.lr 0.0020608894534993104 +1039 20 negative_sampler.num_negs_per_pos 47.0 +1039 20 training.batch_size 2.0 +1039 21 model.embedding_dim 0.0 +1039 21 model.scoring_fct_norm 1.0 +1039 21 loss.margin 4.404635136136046 +1039 21 optimizer.lr 0.0015236052420708828 +1039 21 negative_sampler.num_negs_per_pos 94.0 +1039 21 training.batch_size 1.0 +1039 22 model.embedding_dim 1.0 +1039 22 model.scoring_fct_norm 2.0 +1039 22 loss.margin 7.140527180107457 +1039 22 optimizer.lr 0.07750857485909478 +1039 22 negative_sampler.num_negs_per_pos 13.0 +1039 22 training.batch_size 1.0 +1039 23 model.embedding_dim 0.0 +1039 23 model.scoring_fct_norm 2.0 +1039 23 loss.margin 8.112112489603732 +1039 23 optimizer.lr 0.0031728579274851094 +1039 23 negative_sampler.num_negs_per_pos 85.0 +1039 23 training.batch_size 1.0 +1039 24 model.embedding_dim 1.0 +1039 24 model.scoring_fct_norm 1.0 +1039 24 loss.margin 7.087748674478669 +1039 24 optimizer.lr 0.03739162588327966 +1039 24 negative_sampler.num_negs_per_pos 60.0 +1039 24 training.batch_size 2.0 +1039 25 model.embedding_dim 2.0 +1039 25 model.scoring_fct_norm 2.0 +1039 25 loss.margin 5.1076689383626945 +1039 25 optimizer.lr 0.0013859222817824645 +1039 25 negative_sampler.num_negs_per_pos 65.0 +1039 25 training.batch_size 1.0 +1039 26 model.embedding_dim 0.0 +1039 26 model.scoring_fct_norm 1.0 +1039 26 loss.margin 2.965152382168249 +1039 26 optimizer.lr 0.011151607741941254 +1039 26 negative_sampler.num_negs_per_pos 86.0 +1039 26 training.batch_size 1.0 +1039 27 model.embedding_dim 2.0 +1039 27 model.scoring_fct_norm 2.0 +1039 27 loss.margin 3.1315610573682946 +1039 27 optimizer.lr 0.0012243192510395736 +1039 27 negative_sampler.num_negs_per_pos 38.0 +1039 27 training.batch_size 2.0 +1039 28 model.embedding_dim 0.0 +1039 28 model.scoring_fct_norm 2.0 +1039 28 loss.margin 7.343703099180138 +1039 28 optimizer.lr 0.0011028472621084306 +1039 28 negative_sampler.num_negs_per_pos 43.0 +1039 28 training.batch_size 2.0 +1039 29 model.embedding_dim 0.0 +1039 29 model.scoring_fct_norm 2.0 +1039 29 loss.margin 3.4845797580621234 +1039 29 optimizer.lr 0.007508038305075502 +1039 29 negative_sampler.num_negs_per_pos 77.0 +1039 29 training.batch_size 2.0 +1039 30 model.embedding_dim 0.0 +1039 30 model.scoring_fct_norm 2.0 +1039 30 loss.margin 5.8306827391169245 +1039 30 optimizer.lr 0.006495122769930367 +1039 30 negative_sampler.num_negs_per_pos 15.0 +1039 30 training.batch_size 2.0 +1039 31 model.embedding_dim 2.0 +1039 31 model.scoring_fct_norm 2.0 +1039 31 loss.margin 1.0926014412478693 +1039 31 optimizer.lr 0.009048929072619765 +1039 31 negative_sampler.num_negs_per_pos 89.0 +1039 31 training.batch_size 1.0 +1039 32 model.embedding_dim 0.0 +1039 32 model.scoring_fct_norm 2.0 +1039 32 loss.margin 2.665207238128197 +1039 32 optimizer.lr 0.00542291349377555 +1039 32 negative_sampler.num_negs_per_pos 21.0 +1039 32 training.batch_size 1.0 +1039 33 model.embedding_dim 2.0 +1039 33 model.scoring_fct_norm 1.0 +1039 33 loss.margin 2.9495766539066586 +1039 33 optimizer.lr 0.0017153369004389585 +1039 33 negative_sampler.num_negs_per_pos 39.0 +1039 33 training.batch_size 2.0 +1039 34 model.embedding_dim 1.0 +1039 34 model.scoring_fct_norm 2.0 +1039 34 loss.margin 6.55917729363102 +1039 34 optimizer.lr 0.05682461804769615 +1039 34 negative_sampler.num_negs_per_pos 6.0 +1039 34 training.batch_size 0.0 +1039 35 model.embedding_dim 0.0 +1039 35 model.scoring_fct_norm 1.0 +1039 35 loss.margin 6.268641864457546 +1039 35 optimizer.lr 0.005619283959351781 +1039 35 negative_sampler.num_negs_per_pos 66.0 +1039 35 training.batch_size 2.0 +1039 36 model.embedding_dim 1.0 +1039 36 model.scoring_fct_norm 2.0 +1039 36 loss.margin 6.144459302303038 +1039 36 optimizer.lr 0.03772622608877285 +1039 36 negative_sampler.num_negs_per_pos 39.0 +1039 36 training.batch_size 0.0 +1039 37 model.embedding_dim 1.0 +1039 37 model.scoring_fct_norm 1.0 +1039 37 loss.margin 1.6816574074202866 +1039 37 optimizer.lr 0.0020551114498567643 +1039 37 negative_sampler.num_negs_per_pos 95.0 +1039 37 training.batch_size 2.0 +1039 38 model.embedding_dim 1.0 +1039 38 model.scoring_fct_norm 2.0 +1039 38 loss.margin 1.0724499481624656 +1039 38 optimizer.lr 0.0039550149672363324 +1039 38 negative_sampler.num_negs_per_pos 40.0 +1039 38 training.batch_size 1.0 +1039 39 model.embedding_dim 0.0 +1039 39 model.scoring_fct_norm 1.0 +1039 39 loss.margin 2.690435936038504 +1039 39 optimizer.lr 0.024120042217154195 +1039 39 negative_sampler.num_negs_per_pos 3.0 +1039 39 training.batch_size 2.0 +1039 40 model.embedding_dim 1.0 +1039 40 model.scoring_fct_norm 1.0 +1039 40 loss.margin 1.4571064022388511 +1039 40 optimizer.lr 0.06410062977504705 +1039 40 negative_sampler.num_negs_per_pos 32.0 +1039 40 training.batch_size 1.0 +1039 41 model.embedding_dim 1.0 +1039 41 model.scoring_fct_norm 2.0 +1039 41 loss.margin 9.934635050470442 +1039 41 optimizer.lr 0.004058225570749886 +1039 41 negative_sampler.num_negs_per_pos 70.0 +1039 41 training.batch_size 2.0 +1039 42 model.embedding_dim 1.0 +1039 42 model.scoring_fct_norm 1.0 +1039 42 loss.margin 5.709530652728306 +1039 42 optimizer.lr 0.003018368727889149 +1039 42 negative_sampler.num_negs_per_pos 41.0 +1039 42 training.batch_size 1.0 +1039 43 model.embedding_dim 1.0 +1039 43 model.scoring_fct_norm 2.0 +1039 43 loss.margin 5.544030619833202 +1039 43 optimizer.lr 0.00447242804192096 +1039 43 negative_sampler.num_negs_per_pos 63.0 +1039 43 training.batch_size 1.0 +1039 44 model.embedding_dim 2.0 +1039 44 model.scoring_fct_norm 2.0 +1039 44 loss.margin 3.22152502460786 +1039 44 optimizer.lr 0.007649230385694627 +1039 44 negative_sampler.num_negs_per_pos 82.0 +1039 44 training.batch_size 1.0 +1039 45 model.embedding_dim 2.0 +1039 45 model.scoring_fct_norm 2.0 +1039 45 loss.margin 8.276723862709565 +1039 45 optimizer.lr 0.05927941218342697 +1039 45 negative_sampler.num_negs_per_pos 42.0 +1039 45 training.batch_size 2.0 +1039 46 model.embedding_dim 2.0 +1039 46 model.scoring_fct_norm 1.0 +1039 46 loss.margin 8.440078849437112 +1039 46 optimizer.lr 0.03490366627081581 +1039 46 negative_sampler.num_negs_per_pos 13.0 +1039 46 training.batch_size 2.0 +1039 47 model.embedding_dim 2.0 +1039 47 model.scoring_fct_norm 1.0 +1039 47 loss.margin 3.9159919909631067 +1039 47 optimizer.lr 0.003766103837715512 +1039 47 negative_sampler.num_negs_per_pos 96.0 +1039 47 training.batch_size 0.0 +1039 48 model.embedding_dim 1.0 +1039 48 model.scoring_fct_norm 2.0 +1039 48 loss.margin 7.251529173883834 +1039 48 optimizer.lr 0.07223623060018898 +1039 48 negative_sampler.num_negs_per_pos 46.0 +1039 48 training.batch_size 1.0 +1039 49 model.embedding_dim 2.0 +1039 49 model.scoring_fct_norm 2.0 +1039 49 loss.margin 9.288746665854134 +1039 49 optimizer.lr 0.009101162693508882 +1039 49 negative_sampler.num_negs_per_pos 28.0 +1039 49 training.batch_size 1.0 +1039 50 model.embedding_dim 1.0 +1039 50 model.scoring_fct_norm 2.0 +1039 50 loss.margin 6.052706874672618 +1039 50 optimizer.lr 0.0011278884007965288 +1039 50 negative_sampler.num_negs_per_pos 9.0 +1039 50 training.batch_size 0.0 +1039 51 model.embedding_dim 2.0 +1039 51 model.scoring_fct_norm 2.0 +1039 51 loss.margin 8.402787607807703 +1039 51 optimizer.lr 0.001278445808700143 +1039 51 negative_sampler.num_negs_per_pos 5.0 +1039 51 training.batch_size 0.0 +1039 52 model.embedding_dim 2.0 +1039 52 model.scoring_fct_norm 2.0 +1039 52 loss.margin 8.1926112024308 +1039 52 optimizer.lr 0.03187479223342065 +1039 52 negative_sampler.num_negs_per_pos 44.0 +1039 52 training.batch_size 1.0 +1039 53 model.embedding_dim 0.0 +1039 53 model.scoring_fct_norm 2.0 +1039 53 loss.margin 8.710485310693809 +1039 53 optimizer.lr 0.0835355622172664 +1039 53 negative_sampler.num_negs_per_pos 37.0 +1039 53 training.batch_size 1.0 +1039 54 model.embedding_dim 2.0 +1039 54 model.scoring_fct_norm 2.0 +1039 54 loss.margin 9.036520775077507 +1039 54 optimizer.lr 0.014371844014297088 +1039 54 negative_sampler.num_negs_per_pos 82.0 +1039 54 training.batch_size 0.0 +1039 55 model.embedding_dim 0.0 +1039 55 model.scoring_fct_norm 1.0 +1039 55 loss.margin 1.597963141229872 +1039 55 optimizer.lr 0.035289520829733564 +1039 55 negative_sampler.num_negs_per_pos 21.0 +1039 55 training.batch_size 0.0 +1039 56 model.embedding_dim 0.0 +1039 56 model.scoring_fct_norm 1.0 +1039 56 loss.margin 1.00895666068205 +1039 56 optimizer.lr 0.010507788326067927 +1039 56 negative_sampler.num_negs_per_pos 90.0 +1039 56 training.batch_size 0.0 +1039 57 model.embedding_dim 2.0 +1039 57 model.scoring_fct_norm 1.0 +1039 57 loss.margin 7.479847240040948 +1039 57 optimizer.lr 0.004245764733855745 +1039 57 negative_sampler.num_negs_per_pos 35.0 +1039 57 training.batch_size 2.0 +1039 58 model.embedding_dim 2.0 +1039 58 model.scoring_fct_norm 2.0 +1039 58 loss.margin 7.452596520942439 +1039 58 optimizer.lr 0.019565914439957817 +1039 58 negative_sampler.num_negs_per_pos 7.0 +1039 58 training.batch_size 0.0 +1039 59 model.embedding_dim 2.0 +1039 59 model.scoring_fct_norm 2.0 +1039 59 loss.margin 7.1466586648861385 +1039 59 optimizer.lr 0.005694844612852388 +1039 59 negative_sampler.num_negs_per_pos 57.0 +1039 59 training.batch_size 0.0 +1039 60 model.embedding_dim 1.0 +1039 60 model.scoring_fct_norm 2.0 +1039 60 loss.margin 6.0656440280946065 +1039 60 optimizer.lr 0.00156050582936545 +1039 60 negative_sampler.num_negs_per_pos 15.0 +1039 60 training.batch_size 2.0 +1039 61 model.embedding_dim 0.0 +1039 61 model.scoring_fct_norm 2.0 +1039 61 loss.margin 5.745263639788978 +1039 61 optimizer.lr 0.0010176553237026147 +1039 61 negative_sampler.num_negs_per_pos 83.0 +1039 61 training.batch_size 2.0 +1039 62 model.embedding_dim 0.0 +1039 62 model.scoring_fct_norm 2.0 +1039 62 loss.margin 3.683318565872087 +1039 62 optimizer.lr 0.0012123441264933084 +1039 62 negative_sampler.num_negs_per_pos 90.0 +1039 62 training.batch_size 2.0 +1039 63 model.embedding_dim 0.0 +1039 63 model.scoring_fct_norm 2.0 +1039 63 loss.margin 3.5303743110976606 +1039 63 optimizer.lr 0.011296313541571111 +1039 63 negative_sampler.num_negs_per_pos 37.0 +1039 63 training.batch_size 2.0 +1039 64 model.embedding_dim 0.0 +1039 64 model.scoring_fct_norm 1.0 +1039 64 loss.margin 3.6908155013981916 +1039 64 optimizer.lr 0.003227060661275544 +1039 64 negative_sampler.num_negs_per_pos 95.0 +1039 64 training.batch_size 0.0 +1039 65 model.embedding_dim 2.0 +1039 65 model.scoring_fct_norm 1.0 +1039 65 loss.margin 6.883297908576425 +1039 65 optimizer.lr 0.005826633066016418 +1039 65 negative_sampler.num_negs_per_pos 47.0 +1039 65 training.batch_size 0.0 +1039 66 model.embedding_dim 0.0 +1039 66 model.scoring_fct_norm 2.0 +1039 66 loss.margin 2.103034832280103 +1039 66 optimizer.lr 0.027198374062111796 +1039 66 negative_sampler.num_negs_per_pos 89.0 +1039 66 training.batch_size 2.0 +1039 67 model.embedding_dim 2.0 +1039 67 model.scoring_fct_norm 2.0 +1039 67 loss.margin 6.500307417050713 +1039 67 optimizer.lr 0.04040794638966638 +1039 67 negative_sampler.num_negs_per_pos 68.0 +1039 67 training.batch_size 1.0 +1039 68 model.embedding_dim 0.0 +1039 68 model.scoring_fct_norm 1.0 +1039 68 loss.margin 6.3341378460371915 +1039 68 optimizer.lr 0.09885893163120732 +1039 68 negative_sampler.num_negs_per_pos 59.0 +1039 68 training.batch_size 0.0 +1039 69 model.embedding_dim 2.0 +1039 69 model.scoring_fct_norm 1.0 +1039 69 loss.margin 1.4218731115977108 +1039 69 optimizer.lr 0.06430604574839106 +1039 69 negative_sampler.num_negs_per_pos 5.0 +1039 69 training.batch_size 2.0 +1039 70 model.embedding_dim 0.0 +1039 70 model.scoring_fct_norm 2.0 +1039 70 loss.margin 3.7576120463219422 +1039 70 optimizer.lr 0.0011319990447833472 +1039 70 negative_sampler.num_negs_per_pos 84.0 +1039 70 training.batch_size 1.0 +1039 71 model.embedding_dim 1.0 +1039 71 model.scoring_fct_norm 1.0 +1039 71 loss.margin 1.1352834248414028 +1039 71 optimizer.lr 0.0018186345769479841 +1039 71 negative_sampler.num_negs_per_pos 71.0 +1039 71 training.batch_size 2.0 +1039 72 model.embedding_dim 0.0 +1039 72 model.scoring_fct_norm 2.0 +1039 72 loss.margin 6.103211433500705 +1039 72 optimizer.lr 0.001030989548915746 +1039 72 negative_sampler.num_negs_per_pos 54.0 +1039 72 training.batch_size 2.0 +1039 73 model.embedding_dim 0.0 +1039 73 model.scoring_fct_norm 1.0 +1039 73 loss.margin 5.011672870223296 +1039 73 optimizer.lr 0.015501950892554643 +1039 73 negative_sampler.num_negs_per_pos 5.0 +1039 73 training.batch_size 2.0 +1039 74 model.embedding_dim 1.0 +1039 74 model.scoring_fct_norm 2.0 +1039 74 loss.margin 9.904775186859172 +1039 74 optimizer.lr 0.003526191277090752 +1039 74 negative_sampler.num_negs_per_pos 55.0 +1039 74 training.batch_size 2.0 +1039 75 model.embedding_dim 2.0 +1039 75 model.scoring_fct_norm 1.0 +1039 75 loss.margin 3.5118157858054566 +1039 75 optimizer.lr 0.009554354020666681 +1039 75 negative_sampler.num_negs_per_pos 97.0 +1039 75 training.batch_size 2.0 +1039 76 model.embedding_dim 2.0 +1039 76 model.scoring_fct_norm 1.0 +1039 76 loss.margin 8.114199362971139 +1039 76 optimizer.lr 0.0017646273280607064 +1039 76 negative_sampler.num_negs_per_pos 4.0 +1039 76 training.batch_size 1.0 +1039 77 model.embedding_dim 0.0 +1039 77 model.scoring_fct_norm 2.0 +1039 77 loss.margin 5.366413893301094 +1039 77 optimizer.lr 0.05734281719819712 +1039 77 negative_sampler.num_negs_per_pos 96.0 +1039 77 training.batch_size 1.0 +1039 78 model.embedding_dim 2.0 +1039 78 model.scoring_fct_norm 2.0 +1039 78 loss.margin 4.648973988640954 +1039 78 optimizer.lr 0.024607625062113735 +1039 78 negative_sampler.num_negs_per_pos 80.0 +1039 78 training.batch_size 1.0 +1039 79 model.embedding_dim 0.0 +1039 79 model.scoring_fct_norm 1.0 +1039 79 loss.margin 5.269681117844947 +1039 79 optimizer.lr 0.0010237319510500603 +1039 79 negative_sampler.num_negs_per_pos 77.0 +1039 79 training.batch_size 0.0 +1039 80 model.embedding_dim 1.0 +1039 80 model.scoring_fct_norm 2.0 +1039 80 loss.margin 9.424177046014375 +1039 80 optimizer.lr 0.005064165024057352 +1039 80 negative_sampler.num_negs_per_pos 96.0 +1039 80 training.batch_size 0.0 +1039 81 model.embedding_dim 0.0 +1039 81 model.scoring_fct_norm 2.0 +1039 81 loss.margin 0.6766497457704471 +1039 81 optimizer.lr 0.08948033298023884 +1039 81 negative_sampler.num_negs_per_pos 55.0 +1039 81 training.batch_size 2.0 +1039 82 model.embedding_dim 0.0 +1039 82 model.scoring_fct_norm 1.0 +1039 82 loss.margin 6.4228927442124055 +1039 82 optimizer.lr 0.001274267117310879 +1039 82 negative_sampler.num_negs_per_pos 19.0 +1039 82 training.batch_size 0.0 +1039 83 model.embedding_dim 0.0 +1039 83 model.scoring_fct_norm 1.0 +1039 83 loss.margin 7.64046039149359 +1039 83 optimizer.lr 0.05324283185820733 +1039 83 negative_sampler.num_negs_per_pos 45.0 +1039 83 training.batch_size 1.0 +1039 84 model.embedding_dim 2.0 +1039 84 model.scoring_fct_norm 1.0 +1039 84 loss.margin 0.5304626380202391 +1039 84 optimizer.lr 0.006117611413444652 +1039 84 negative_sampler.num_negs_per_pos 92.0 +1039 84 training.batch_size 2.0 +1039 85 model.embedding_dim 0.0 +1039 85 model.scoring_fct_norm 2.0 +1039 85 loss.margin 1.484063197341036 +1039 85 optimizer.lr 0.00288699468312127 +1039 85 negative_sampler.num_negs_per_pos 83.0 +1039 85 training.batch_size 1.0 +1039 86 model.embedding_dim 2.0 +1039 86 model.scoring_fct_norm 1.0 +1039 86 loss.margin 9.647844615458332 +1039 86 optimizer.lr 0.0016624454980514235 +1039 86 negative_sampler.num_negs_per_pos 10.0 +1039 86 training.batch_size 1.0 +1039 87 model.embedding_dim 0.0 +1039 87 model.scoring_fct_norm 1.0 +1039 87 loss.margin 2.170560626948527 +1039 87 optimizer.lr 0.0021471200725055317 +1039 87 negative_sampler.num_negs_per_pos 87.0 +1039 87 training.batch_size 1.0 +1039 88 model.embedding_dim 0.0 +1039 88 model.scoring_fct_norm 2.0 +1039 88 loss.margin 5.217145246849303 +1039 88 optimizer.lr 0.0014288481205408715 +1039 88 negative_sampler.num_negs_per_pos 0.0 +1039 88 training.batch_size 1.0 +1039 89 model.embedding_dim 0.0 +1039 89 model.scoring_fct_norm 1.0 +1039 89 loss.margin 7.382470482019859 +1039 89 optimizer.lr 0.08059751426649536 +1039 89 negative_sampler.num_negs_per_pos 25.0 +1039 89 training.batch_size 1.0 +1039 90 model.embedding_dim 1.0 +1039 90 model.scoring_fct_norm 1.0 +1039 90 loss.margin 2.7024065108845643 +1039 90 optimizer.lr 0.07678825169752668 +1039 90 negative_sampler.num_negs_per_pos 63.0 +1039 90 training.batch_size 1.0 +1039 91 model.embedding_dim 0.0 +1039 91 model.scoring_fct_norm 1.0 +1039 91 loss.margin 8.933674277061412 +1039 91 optimizer.lr 0.04813700774617068 +1039 91 negative_sampler.num_negs_per_pos 14.0 +1039 91 training.batch_size 0.0 +1039 92 model.embedding_dim 2.0 +1039 92 model.scoring_fct_norm 2.0 +1039 92 loss.margin 7.653889366080781 +1039 92 optimizer.lr 0.03781702979493293 +1039 92 negative_sampler.num_negs_per_pos 73.0 +1039 92 training.batch_size 2.0 +1039 93 model.embedding_dim 1.0 +1039 93 model.scoring_fct_norm 2.0 +1039 93 loss.margin 8.763830380617204 +1039 93 optimizer.lr 0.04171161570438487 +1039 93 negative_sampler.num_negs_per_pos 71.0 +1039 93 training.batch_size 0.0 +1039 94 model.embedding_dim 0.0 +1039 94 model.scoring_fct_norm 2.0 +1039 94 loss.margin 8.120876619155759 +1039 94 optimizer.lr 0.001396593062146782 +1039 94 negative_sampler.num_negs_per_pos 1.0 +1039 94 training.batch_size 1.0 +1039 1 dataset """wn18rr""" +1039 1 model """unstructuredmodel""" +1039 1 loss """marginranking""" +1039 1 regularizer """no""" +1039 1 optimizer """adam""" +1039 1 training_loop """owa""" +1039 1 negative_sampler """basic""" +1039 1 evaluator """rankbased""" +1039 2 dataset """wn18rr""" +1039 2 model """unstructuredmodel""" +1039 2 loss """marginranking""" +1039 2 regularizer """no""" +1039 2 optimizer """adam""" +1039 2 training_loop """owa""" +1039 2 negative_sampler """basic""" +1039 2 evaluator """rankbased""" +1039 3 dataset """wn18rr""" +1039 3 model """unstructuredmodel""" +1039 3 loss """marginranking""" +1039 3 regularizer """no""" +1039 3 optimizer """adam""" +1039 3 training_loop """owa""" +1039 3 negative_sampler """basic""" +1039 3 evaluator """rankbased""" +1039 4 dataset """wn18rr""" +1039 4 model """unstructuredmodel""" +1039 4 loss """marginranking""" +1039 4 regularizer """no""" +1039 4 optimizer """adam""" +1039 4 training_loop """owa""" +1039 4 negative_sampler """basic""" +1039 4 evaluator """rankbased""" +1039 5 dataset """wn18rr""" +1039 5 model """unstructuredmodel""" +1039 5 loss """marginranking""" +1039 5 regularizer """no""" +1039 5 optimizer """adam""" +1039 5 training_loop """owa""" +1039 5 negative_sampler """basic""" +1039 5 evaluator """rankbased""" +1039 6 dataset """wn18rr""" +1039 6 model """unstructuredmodel""" +1039 6 loss """marginranking""" +1039 6 regularizer """no""" +1039 6 optimizer """adam""" +1039 6 training_loop """owa""" +1039 6 negative_sampler """basic""" +1039 6 evaluator """rankbased""" +1039 7 dataset """wn18rr""" +1039 7 model """unstructuredmodel""" +1039 7 loss """marginranking""" +1039 7 regularizer """no""" +1039 7 optimizer """adam""" +1039 7 training_loop """owa""" +1039 7 negative_sampler """basic""" +1039 7 evaluator """rankbased""" +1039 8 dataset """wn18rr""" +1039 8 model """unstructuredmodel""" +1039 8 loss """marginranking""" +1039 8 regularizer """no""" +1039 8 optimizer """adam""" +1039 8 training_loop """owa""" +1039 8 negative_sampler """basic""" +1039 8 evaluator """rankbased""" +1039 9 dataset """wn18rr""" +1039 9 model """unstructuredmodel""" +1039 9 loss """marginranking""" +1039 9 regularizer """no""" +1039 9 optimizer """adam""" +1039 9 training_loop """owa""" +1039 9 negative_sampler """basic""" +1039 9 evaluator """rankbased""" +1039 10 dataset """wn18rr""" +1039 10 model """unstructuredmodel""" +1039 10 loss """marginranking""" +1039 10 regularizer """no""" +1039 10 optimizer """adam""" +1039 10 training_loop """owa""" +1039 10 negative_sampler """basic""" +1039 10 evaluator """rankbased""" +1039 11 dataset """wn18rr""" +1039 11 model """unstructuredmodel""" +1039 11 loss """marginranking""" +1039 11 regularizer """no""" +1039 11 optimizer """adam""" +1039 11 training_loop """owa""" +1039 11 negative_sampler """basic""" +1039 11 evaluator """rankbased""" +1039 12 dataset """wn18rr""" +1039 12 model """unstructuredmodel""" +1039 12 loss """marginranking""" +1039 12 regularizer """no""" +1039 12 optimizer """adam""" +1039 12 training_loop """owa""" +1039 12 negative_sampler """basic""" +1039 12 evaluator """rankbased""" +1039 13 dataset """wn18rr""" +1039 13 model """unstructuredmodel""" +1039 13 loss """marginranking""" +1039 13 regularizer """no""" +1039 13 optimizer """adam""" +1039 13 training_loop """owa""" +1039 13 negative_sampler """basic""" +1039 13 evaluator """rankbased""" +1039 14 dataset """wn18rr""" +1039 14 model """unstructuredmodel""" +1039 14 loss """marginranking""" +1039 14 regularizer """no""" +1039 14 optimizer """adam""" +1039 14 training_loop """owa""" +1039 14 negative_sampler """basic""" +1039 14 evaluator """rankbased""" +1039 15 dataset """wn18rr""" +1039 15 model """unstructuredmodel""" +1039 15 loss """marginranking""" +1039 15 regularizer """no""" +1039 15 optimizer """adam""" +1039 15 training_loop """owa""" +1039 15 negative_sampler """basic""" +1039 15 evaluator """rankbased""" +1039 16 dataset """wn18rr""" +1039 16 model """unstructuredmodel""" +1039 16 loss """marginranking""" +1039 16 regularizer """no""" +1039 16 optimizer """adam""" +1039 16 training_loop """owa""" +1039 16 negative_sampler """basic""" +1039 16 evaluator """rankbased""" +1039 17 dataset """wn18rr""" +1039 17 model """unstructuredmodel""" +1039 17 loss """marginranking""" +1039 17 regularizer """no""" +1039 17 optimizer """adam""" +1039 17 training_loop """owa""" +1039 17 negative_sampler """basic""" +1039 17 evaluator """rankbased""" +1039 18 dataset """wn18rr""" +1039 18 model """unstructuredmodel""" +1039 18 loss """marginranking""" +1039 18 regularizer """no""" +1039 18 optimizer """adam""" +1039 18 training_loop """owa""" +1039 18 negative_sampler """basic""" +1039 18 evaluator """rankbased""" +1039 19 dataset """wn18rr""" +1039 19 model """unstructuredmodel""" +1039 19 loss """marginranking""" +1039 19 regularizer """no""" +1039 19 optimizer """adam""" +1039 19 training_loop """owa""" +1039 19 negative_sampler """basic""" +1039 19 evaluator """rankbased""" +1039 20 dataset """wn18rr""" +1039 20 model """unstructuredmodel""" +1039 20 loss """marginranking""" +1039 20 regularizer """no""" +1039 20 optimizer """adam""" +1039 20 training_loop """owa""" +1039 20 negative_sampler """basic""" +1039 20 evaluator """rankbased""" +1039 21 dataset """wn18rr""" +1039 21 model """unstructuredmodel""" +1039 21 loss """marginranking""" +1039 21 regularizer """no""" +1039 21 optimizer """adam""" +1039 21 training_loop """owa""" +1039 21 negative_sampler """basic""" +1039 21 evaluator """rankbased""" +1039 22 dataset """wn18rr""" +1039 22 model """unstructuredmodel""" +1039 22 loss """marginranking""" +1039 22 regularizer """no""" +1039 22 optimizer """adam""" +1039 22 training_loop """owa""" +1039 22 negative_sampler """basic""" +1039 22 evaluator """rankbased""" +1039 23 dataset """wn18rr""" +1039 23 model """unstructuredmodel""" +1039 23 loss """marginranking""" +1039 23 regularizer """no""" +1039 23 optimizer """adam""" +1039 23 training_loop """owa""" +1039 23 negative_sampler """basic""" +1039 23 evaluator """rankbased""" +1039 24 dataset """wn18rr""" +1039 24 model """unstructuredmodel""" +1039 24 loss """marginranking""" +1039 24 regularizer """no""" +1039 24 optimizer """adam""" +1039 24 training_loop """owa""" +1039 24 negative_sampler """basic""" +1039 24 evaluator """rankbased""" +1039 25 dataset """wn18rr""" +1039 25 model """unstructuredmodel""" +1039 25 loss """marginranking""" +1039 25 regularizer """no""" +1039 25 optimizer """adam""" +1039 25 training_loop """owa""" +1039 25 negative_sampler """basic""" +1039 25 evaluator """rankbased""" +1039 26 dataset """wn18rr""" +1039 26 model """unstructuredmodel""" +1039 26 loss """marginranking""" +1039 26 regularizer """no""" +1039 26 optimizer """adam""" +1039 26 training_loop """owa""" +1039 26 negative_sampler """basic""" +1039 26 evaluator """rankbased""" +1039 27 dataset """wn18rr""" +1039 27 model """unstructuredmodel""" +1039 27 loss """marginranking""" +1039 27 regularizer """no""" +1039 27 optimizer """adam""" +1039 27 training_loop """owa""" +1039 27 negative_sampler """basic""" +1039 27 evaluator """rankbased""" +1039 28 dataset """wn18rr""" +1039 28 model """unstructuredmodel""" +1039 28 loss """marginranking""" +1039 28 regularizer """no""" +1039 28 optimizer """adam""" +1039 28 training_loop """owa""" +1039 28 negative_sampler """basic""" +1039 28 evaluator """rankbased""" +1039 29 dataset """wn18rr""" +1039 29 model """unstructuredmodel""" +1039 29 loss """marginranking""" +1039 29 regularizer """no""" +1039 29 optimizer """adam""" +1039 29 training_loop """owa""" +1039 29 negative_sampler """basic""" +1039 29 evaluator """rankbased""" +1039 30 dataset """wn18rr""" +1039 30 model """unstructuredmodel""" +1039 30 loss """marginranking""" +1039 30 regularizer """no""" +1039 30 optimizer """adam""" +1039 30 training_loop """owa""" +1039 30 negative_sampler """basic""" +1039 30 evaluator """rankbased""" +1039 31 dataset """wn18rr""" +1039 31 model """unstructuredmodel""" +1039 31 loss """marginranking""" +1039 31 regularizer """no""" +1039 31 optimizer """adam""" +1039 31 training_loop """owa""" +1039 31 negative_sampler """basic""" +1039 31 evaluator """rankbased""" +1039 32 dataset """wn18rr""" +1039 32 model """unstructuredmodel""" +1039 32 loss """marginranking""" +1039 32 regularizer """no""" +1039 32 optimizer """adam""" +1039 32 training_loop """owa""" +1039 32 negative_sampler """basic""" +1039 32 evaluator """rankbased""" +1039 33 dataset """wn18rr""" +1039 33 model """unstructuredmodel""" +1039 33 loss """marginranking""" +1039 33 regularizer """no""" +1039 33 optimizer """adam""" +1039 33 training_loop """owa""" +1039 33 negative_sampler """basic""" +1039 33 evaluator """rankbased""" +1039 34 dataset """wn18rr""" +1039 34 model """unstructuredmodel""" +1039 34 loss """marginranking""" +1039 34 regularizer """no""" +1039 34 optimizer """adam""" +1039 34 training_loop """owa""" +1039 34 negative_sampler """basic""" +1039 34 evaluator """rankbased""" +1039 35 dataset """wn18rr""" +1039 35 model """unstructuredmodel""" +1039 35 loss """marginranking""" +1039 35 regularizer """no""" +1039 35 optimizer """adam""" +1039 35 training_loop """owa""" +1039 35 negative_sampler """basic""" +1039 35 evaluator """rankbased""" +1039 36 dataset """wn18rr""" +1039 36 model """unstructuredmodel""" +1039 36 loss """marginranking""" +1039 36 regularizer """no""" +1039 36 optimizer """adam""" +1039 36 training_loop """owa""" +1039 36 negative_sampler """basic""" +1039 36 evaluator """rankbased""" +1039 37 dataset """wn18rr""" +1039 37 model """unstructuredmodel""" +1039 37 loss """marginranking""" +1039 37 regularizer """no""" +1039 37 optimizer """adam""" +1039 37 training_loop """owa""" +1039 37 negative_sampler """basic""" +1039 37 evaluator """rankbased""" +1039 38 dataset """wn18rr""" +1039 38 model """unstructuredmodel""" +1039 38 loss """marginranking""" +1039 38 regularizer """no""" +1039 38 optimizer """adam""" +1039 38 training_loop """owa""" +1039 38 negative_sampler """basic""" +1039 38 evaluator """rankbased""" +1039 39 dataset """wn18rr""" +1039 39 model """unstructuredmodel""" +1039 39 loss """marginranking""" +1039 39 regularizer """no""" +1039 39 optimizer """adam""" +1039 39 training_loop """owa""" +1039 39 negative_sampler """basic""" +1039 39 evaluator """rankbased""" +1039 40 dataset """wn18rr""" +1039 40 model """unstructuredmodel""" +1039 40 loss """marginranking""" +1039 40 regularizer """no""" +1039 40 optimizer """adam""" +1039 40 training_loop """owa""" +1039 40 negative_sampler """basic""" +1039 40 evaluator """rankbased""" +1039 41 dataset """wn18rr""" +1039 41 model """unstructuredmodel""" +1039 41 loss """marginranking""" +1039 41 regularizer """no""" +1039 41 optimizer """adam""" +1039 41 training_loop """owa""" +1039 41 negative_sampler """basic""" +1039 41 evaluator """rankbased""" +1039 42 dataset """wn18rr""" +1039 42 model """unstructuredmodel""" +1039 42 loss """marginranking""" +1039 42 regularizer """no""" +1039 42 optimizer """adam""" +1039 42 training_loop """owa""" +1039 42 negative_sampler """basic""" +1039 42 evaluator """rankbased""" +1039 43 dataset """wn18rr""" +1039 43 model """unstructuredmodel""" +1039 43 loss """marginranking""" +1039 43 regularizer """no""" +1039 43 optimizer """adam""" +1039 43 training_loop """owa""" +1039 43 negative_sampler """basic""" +1039 43 evaluator """rankbased""" +1039 44 dataset """wn18rr""" +1039 44 model """unstructuredmodel""" +1039 44 loss """marginranking""" +1039 44 regularizer """no""" +1039 44 optimizer """adam""" +1039 44 training_loop """owa""" +1039 44 negative_sampler """basic""" +1039 44 evaluator """rankbased""" +1039 45 dataset """wn18rr""" +1039 45 model """unstructuredmodel""" +1039 45 loss """marginranking""" +1039 45 regularizer """no""" +1039 45 optimizer """adam""" +1039 45 training_loop """owa""" +1039 45 negative_sampler """basic""" +1039 45 evaluator """rankbased""" +1039 46 dataset """wn18rr""" +1039 46 model """unstructuredmodel""" +1039 46 loss """marginranking""" +1039 46 regularizer """no""" +1039 46 optimizer """adam""" +1039 46 training_loop """owa""" +1039 46 negative_sampler """basic""" +1039 46 evaluator """rankbased""" +1039 47 dataset """wn18rr""" +1039 47 model """unstructuredmodel""" +1039 47 loss """marginranking""" +1039 47 regularizer """no""" +1039 47 optimizer """adam""" +1039 47 training_loop """owa""" +1039 47 negative_sampler """basic""" +1039 47 evaluator """rankbased""" +1039 48 dataset """wn18rr""" +1039 48 model """unstructuredmodel""" +1039 48 loss """marginranking""" +1039 48 regularizer """no""" +1039 48 optimizer """adam""" +1039 48 training_loop """owa""" +1039 48 negative_sampler """basic""" +1039 48 evaluator """rankbased""" +1039 49 dataset """wn18rr""" +1039 49 model """unstructuredmodel""" +1039 49 loss """marginranking""" +1039 49 regularizer """no""" +1039 49 optimizer """adam""" +1039 49 training_loop """owa""" +1039 49 negative_sampler """basic""" +1039 49 evaluator """rankbased""" +1039 50 dataset """wn18rr""" +1039 50 model """unstructuredmodel""" +1039 50 loss """marginranking""" +1039 50 regularizer """no""" +1039 50 optimizer """adam""" +1039 50 training_loop """owa""" +1039 50 negative_sampler """basic""" +1039 50 evaluator """rankbased""" +1039 51 dataset """wn18rr""" +1039 51 model """unstructuredmodel""" +1039 51 loss """marginranking""" +1039 51 regularizer """no""" +1039 51 optimizer """adam""" +1039 51 training_loop """owa""" +1039 51 negative_sampler """basic""" +1039 51 evaluator """rankbased""" +1039 52 dataset """wn18rr""" +1039 52 model """unstructuredmodel""" +1039 52 loss """marginranking""" +1039 52 regularizer """no""" +1039 52 optimizer """adam""" +1039 52 training_loop """owa""" +1039 52 negative_sampler """basic""" +1039 52 evaluator """rankbased""" +1039 53 dataset """wn18rr""" +1039 53 model """unstructuredmodel""" +1039 53 loss """marginranking""" +1039 53 regularizer """no""" +1039 53 optimizer """adam""" +1039 53 training_loop """owa""" +1039 53 negative_sampler """basic""" +1039 53 evaluator """rankbased""" +1039 54 dataset """wn18rr""" +1039 54 model """unstructuredmodel""" +1039 54 loss """marginranking""" +1039 54 regularizer """no""" +1039 54 optimizer """adam""" +1039 54 training_loop """owa""" +1039 54 negative_sampler """basic""" +1039 54 evaluator """rankbased""" +1039 55 dataset """wn18rr""" +1039 55 model """unstructuredmodel""" +1039 55 loss """marginranking""" +1039 55 regularizer """no""" +1039 55 optimizer """adam""" +1039 55 training_loop """owa""" +1039 55 negative_sampler """basic""" +1039 55 evaluator """rankbased""" +1039 56 dataset """wn18rr""" +1039 56 model """unstructuredmodel""" +1039 56 loss """marginranking""" +1039 56 regularizer """no""" +1039 56 optimizer """adam""" +1039 56 training_loop """owa""" +1039 56 negative_sampler """basic""" +1039 56 evaluator """rankbased""" +1039 57 dataset """wn18rr""" +1039 57 model """unstructuredmodel""" +1039 57 loss """marginranking""" +1039 57 regularizer """no""" +1039 57 optimizer """adam""" +1039 57 training_loop """owa""" +1039 57 negative_sampler """basic""" +1039 57 evaluator """rankbased""" +1039 58 dataset """wn18rr""" +1039 58 model """unstructuredmodel""" +1039 58 loss """marginranking""" +1039 58 regularizer """no""" +1039 58 optimizer """adam""" +1039 58 training_loop """owa""" +1039 58 negative_sampler """basic""" +1039 58 evaluator """rankbased""" +1039 59 dataset """wn18rr""" +1039 59 model """unstructuredmodel""" +1039 59 loss """marginranking""" +1039 59 regularizer """no""" +1039 59 optimizer """adam""" +1039 59 training_loop """owa""" +1039 59 negative_sampler """basic""" +1039 59 evaluator """rankbased""" +1039 60 dataset """wn18rr""" +1039 60 model """unstructuredmodel""" +1039 60 loss """marginranking""" +1039 60 regularizer """no""" +1039 60 optimizer """adam""" +1039 60 training_loop """owa""" +1039 60 negative_sampler """basic""" +1039 60 evaluator """rankbased""" +1039 61 dataset """wn18rr""" +1039 61 model """unstructuredmodel""" +1039 61 loss """marginranking""" +1039 61 regularizer """no""" +1039 61 optimizer """adam""" +1039 61 training_loop """owa""" +1039 61 negative_sampler """basic""" +1039 61 evaluator """rankbased""" +1039 62 dataset """wn18rr""" +1039 62 model """unstructuredmodel""" +1039 62 loss """marginranking""" +1039 62 regularizer """no""" +1039 62 optimizer """adam""" +1039 62 training_loop """owa""" +1039 62 negative_sampler """basic""" +1039 62 evaluator """rankbased""" +1039 63 dataset """wn18rr""" +1039 63 model """unstructuredmodel""" +1039 63 loss """marginranking""" +1039 63 regularizer """no""" +1039 63 optimizer """adam""" +1039 63 training_loop """owa""" +1039 63 negative_sampler """basic""" +1039 63 evaluator """rankbased""" +1039 64 dataset """wn18rr""" +1039 64 model """unstructuredmodel""" +1039 64 loss """marginranking""" +1039 64 regularizer """no""" +1039 64 optimizer """adam""" +1039 64 training_loop """owa""" +1039 64 negative_sampler """basic""" +1039 64 evaluator """rankbased""" +1039 65 dataset """wn18rr""" +1039 65 model """unstructuredmodel""" +1039 65 loss """marginranking""" +1039 65 regularizer """no""" +1039 65 optimizer """adam""" +1039 65 training_loop """owa""" +1039 65 negative_sampler """basic""" +1039 65 evaluator """rankbased""" +1039 66 dataset """wn18rr""" +1039 66 model """unstructuredmodel""" +1039 66 loss """marginranking""" +1039 66 regularizer """no""" +1039 66 optimizer """adam""" +1039 66 training_loop """owa""" +1039 66 negative_sampler """basic""" +1039 66 evaluator """rankbased""" +1039 67 dataset """wn18rr""" +1039 67 model """unstructuredmodel""" +1039 67 loss """marginranking""" +1039 67 regularizer """no""" +1039 67 optimizer """adam""" +1039 67 training_loop """owa""" +1039 67 negative_sampler """basic""" +1039 67 evaluator """rankbased""" +1039 68 dataset """wn18rr""" +1039 68 model """unstructuredmodel""" +1039 68 loss """marginranking""" +1039 68 regularizer """no""" +1039 68 optimizer """adam""" +1039 68 training_loop """owa""" +1039 68 negative_sampler """basic""" +1039 68 evaluator """rankbased""" +1039 69 dataset """wn18rr""" +1039 69 model """unstructuredmodel""" +1039 69 loss """marginranking""" +1039 69 regularizer """no""" +1039 69 optimizer """adam""" +1039 69 training_loop """owa""" +1039 69 negative_sampler """basic""" +1039 69 evaluator """rankbased""" +1039 70 dataset """wn18rr""" +1039 70 model """unstructuredmodel""" +1039 70 loss """marginranking""" +1039 70 regularizer """no""" +1039 70 optimizer """adam""" +1039 70 training_loop """owa""" +1039 70 negative_sampler """basic""" +1039 70 evaluator """rankbased""" +1039 71 dataset """wn18rr""" +1039 71 model """unstructuredmodel""" +1039 71 loss """marginranking""" +1039 71 regularizer """no""" +1039 71 optimizer """adam""" +1039 71 training_loop """owa""" +1039 71 negative_sampler """basic""" +1039 71 evaluator """rankbased""" +1039 72 dataset """wn18rr""" +1039 72 model """unstructuredmodel""" +1039 72 loss """marginranking""" +1039 72 regularizer """no""" +1039 72 optimizer """adam""" +1039 72 training_loop """owa""" +1039 72 negative_sampler """basic""" +1039 72 evaluator """rankbased""" +1039 73 dataset """wn18rr""" +1039 73 model """unstructuredmodel""" +1039 73 loss """marginranking""" +1039 73 regularizer """no""" +1039 73 optimizer """adam""" +1039 73 training_loop """owa""" +1039 73 negative_sampler """basic""" +1039 73 evaluator """rankbased""" +1039 74 dataset """wn18rr""" +1039 74 model """unstructuredmodel""" +1039 74 loss """marginranking""" +1039 74 regularizer """no""" +1039 74 optimizer """adam""" +1039 74 training_loop """owa""" +1039 74 negative_sampler """basic""" +1039 74 evaluator """rankbased""" +1039 75 dataset """wn18rr""" +1039 75 model """unstructuredmodel""" +1039 75 loss """marginranking""" +1039 75 regularizer """no""" +1039 75 optimizer """adam""" +1039 75 training_loop """owa""" +1039 75 negative_sampler """basic""" +1039 75 evaluator """rankbased""" +1039 76 dataset """wn18rr""" +1039 76 model """unstructuredmodel""" +1039 76 loss """marginranking""" +1039 76 regularizer """no""" +1039 76 optimizer """adam""" +1039 76 training_loop """owa""" +1039 76 negative_sampler """basic""" +1039 76 evaluator """rankbased""" +1039 77 dataset """wn18rr""" +1039 77 model """unstructuredmodel""" +1039 77 loss """marginranking""" +1039 77 regularizer """no""" +1039 77 optimizer """adam""" +1039 77 training_loop """owa""" +1039 77 negative_sampler """basic""" +1039 77 evaluator """rankbased""" +1039 78 dataset """wn18rr""" +1039 78 model """unstructuredmodel""" +1039 78 loss """marginranking""" +1039 78 regularizer """no""" +1039 78 optimizer """adam""" +1039 78 training_loop """owa""" +1039 78 negative_sampler """basic""" +1039 78 evaluator """rankbased""" +1039 79 dataset """wn18rr""" +1039 79 model """unstructuredmodel""" +1039 79 loss """marginranking""" +1039 79 regularizer """no""" +1039 79 optimizer """adam""" +1039 79 training_loop """owa""" +1039 79 negative_sampler """basic""" +1039 79 evaluator """rankbased""" +1039 80 dataset """wn18rr""" +1039 80 model """unstructuredmodel""" +1039 80 loss """marginranking""" +1039 80 regularizer """no""" +1039 80 optimizer """adam""" +1039 80 training_loop """owa""" +1039 80 negative_sampler """basic""" +1039 80 evaluator """rankbased""" +1039 81 dataset """wn18rr""" +1039 81 model """unstructuredmodel""" +1039 81 loss """marginranking""" +1039 81 regularizer """no""" +1039 81 optimizer """adam""" +1039 81 training_loop """owa""" +1039 81 negative_sampler """basic""" +1039 81 evaluator """rankbased""" +1039 82 dataset """wn18rr""" +1039 82 model """unstructuredmodel""" +1039 82 loss """marginranking""" +1039 82 regularizer """no""" +1039 82 optimizer """adam""" +1039 82 training_loop """owa""" +1039 82 negative_sampler """basic""" +1039 82 evaluator """rankbased""" +1039 83 dataset """wn18rr""" +1039 83 model """unstructuredmodel""" +1039 83 loss """marginranking""" +1039 83 regularizer """no""" +1039 83 optimizer """adam""" +1039 83 training_loop """owa""" +1039 83 negative_sampler """basic""" +1039 83 evaluator """rankbased""" +1039 84 dataset """wn18rr""" +1039 84 model """unstructuredmodel""" +1039 84 loss """marginranking""" +1039 84 regularizer """no""" +1039 84 optimizer """adam""" +1039 84 training_loop """owa""" +1039 84 negative_sampler """basic""" +1039 84 evaluator """rankbased""" +1039 85 dataset """wn18rr""" +1039 85 model """unstructuredmodel""" +1039 85 loss """marginranking""" +1039 85 regularizer """no""" +1039 85 optimizer """adam""" +1039 85 training_loop """owa""" +1039 85 negative_sampler """basic""" +1039 85 evaluator """rankbased""" +1039 86 dataset """wn18rr""" +1039 86 model """unstructuredmodel""" +1039 86 loss """marginranking""" +1039 86 regularizer """no""" +1039 86 optimizer """adam""" +1039 86 training_loop """owa""" +1039 86 negative_sampler """basic""" +1039 86 evaluator """rankbased""" +1039 87 dataset """wn18rr""" +1039 87 model """unstructuredmodel""" +1039 87 loss """marginranking""" +1039 87 regularizer """no""" +1039 87 optimizer """adam""" +1039 87 training_loop """owa""" +1039 87 negative_sampler """basic""" +1039 87 evaluator """rankbased""" +1039 88 dataset """wn18rr""" +1039 88 model """unstructuredmodel""" +1039 88 loss """marginranking""" +1039 88 regularizer """no""" +1039 88 optimizer """adam""" +1039 88 training_loop """owa""" +1039 88 negative_sampler """basic""" +1039 88 evaluator """rankbased""" +1039 89 dataset """wn18rr""" +1039 89 model """unstructuredmodel""" +1039 89 loss """marginranking""" +1039 89 regularizer """no""" +1039 89 optimizer """adam""" +1039 89 training_loop """owa""" +1039 89 negative_sampler """basic""" +1039 89 evaluator """rankbased""" +1039 90 dataset """wn18rr""" +1039 90 model """unstructuredmodel""" +1039 90 loss """marginranking""" +1039 90 regularizer """no""" +1039 90 optimizer """adam""" +1039 90 training_loop """owa""" +1039 90 negative_sampler """basic""" +1039 90 evaluator """rankbased""" +1039 91 dataset """wn18rr""" +1039 91 model """unstructuredmodel""" +1039 91 loss """marginranking""" +1039 91 regularizer """no""" +1039 91 optimizer """adam""" +1039 91 training_loop """owa""" +1039 91 negative_sampler """basic""" +1039 91 evaluator """rankbased""" +1039 92 dataset """wn18rr""" +1039 92 model """unstructuredmodel""" +1039 92 loss """marginranking""" +1039 92 regularizer """no""" +1039 92 optimizer """adam""" +1039 92 training_loop """owa""" +1039 92 negative_sampler """basic""" +1039 92 evaluator """rankbased""" +1039 93 dataset """wn18rr""" +1039 93 model """unstructuredmodel""" +1039 93 loss """marginranking""" +1039 93 regularizer """no""" +1039 93 optimizer """adam""" +1039 93 training_loop """owa""" +1039 93 negative_sampler """basic""" +1039 93 evaluator """rankbased""" +1039 94 dataset """wn18rr""" +1039 94 model """unstructuredmodel""" +1039 94 loss """marginranking""" +1039 94 regularizer """no""" +1039 94 optimizer """adam""" +1039 94 training_loop """owa""" +1039 94 negative_sampler """basic""" +1039 94 evaluator """rankbased""" +1040 1 model.embedding_dim 1.0 +1040 1 model.scoring_fct_norm 1.0 +1040 1 loss.margin 13.762542464664696 +1040 1 loss.adversarial_temperature 0.7722304004264187 +1040 1 optimizer.lr 0.004539294158154274 +1040 1 negative_sampler.num_negs_per_pos 98.0 +1040 1 training.batch_size 0.0 +1040 2 model.embedding_dim 0.0 +1040 2 model.scoring_fct_norm 1.0 +1040 2 loss.margin 15.878163815927149 +1040 2 loss.adversarial_temperature 0.25780934812694045 +1040 2 optimizer.lr 0.002480441559295748 +1040 2 negative_sampler.num_negs_per_pos 57.0 +1040 2 training.batch_size 0.0 +1040 3 model.embedding_dim 1.0 +1040 3 model.scoring_fct_norm 1.0 +1040 3 loss.margin 19.800526498174925 +1040 3 loss.adversarial_temperature 0.6313775553100504 +1040 3 optimizer.lr 0.0031211124563350406 +1040 3 negative_sampler.num_negs_per_pos 49.0 +1040 3 training.batch_size 1.0 +1040 4 model.embedding_dim 2.0 +1040 4 model.scoring_fct_norm 1.0 +1040 4 loss.margin 4.567265183368379 +1040 4 loss.adversarial_temperature 0.175327368487728 +1040 4 optimizer.lr 0.005844946232650243 +1040 4 negative_sampler.num_negs_per_pos 87.0 +1040 4 training.batch_size 1.0 +1040 5 model.embedding_dim 2.0 +1040 5 model.scoring_fct_norm 2.0 +1040 5 loss.margin 25.34271265244516 +1040 5 loss.adversarial_temperature 0.7189431428478185 +1040 5 optimizer.lr 0.02022061643549679 +1040 5 negative_sampler.num_negs_per_pos 98.0 +1040 5 training.batch_size 1.0 +1040 6 model.embedding_dim 2.0 +1040 6 model.scoring_fct_norm 2.0 +1040 6 loss.margin 17.74962973231689 +1040 6 loss.adversarial_temperature 0.31125675433312955 +1040 6 optimizer.lr 0.00184051588312568 +1040 6 negative_sampler.num_negs_per_pos 78.0 +1040 6 training.batch_size 2.0 +1040 7 model.embedding_dim 0.0 +1040 7 model.scoring_fct_norm 2.0 +1040 7 loss.margin 29.2060212565309 +1040 7 loss.adversarial_temperature 0.33446578830262835 +1040 7 optimizer.lr 0.003889812115720504 +1040 7 negative_sampler.num_negs_per_pos 40.0 +1040 7 training.batch_size 2.0 +1040 8 model.embedding_dim 0.0 +1040 8 model.scoring_fct_norm 1.0 +1040 8 loss.margin 8.398313406242094 +1040 8 loss.adversarial_temperature 0.5486678702727055 +1040 8 optimizer.lr 0.004924677654738782 +1040 8 negative_sampler.num_negs_per_pos 0.0 +1040 8 training.batch_size 2.0 +1040 9 model.embedding_dim 1.0 +1040 9 model.scoring_fct_norm 1.0 +1040 9 loss.margin 14.295824728573558 +1040 9 loss.adversarial_temperature 0.4633905046683571 +1040 9 optimizer.lr 0.0012151962340593602 +1040 9 negative_sampler.num_negs_per_pos 98.0 +1040 9 training.batch_size 0.0 +1040 10 model.embedding_dim 0.0 +1040 10 model.scoring_fct_norm 2.0 +1040 10 loss.margin 19.523242703132265 +1040 10 loss.adversarial_temperature 0.32452459821249835 +1040 10 optimizer.lr 0.002259701150020333 +1040 10 negative_sampler.num_negs_per_pos 34.0 +1040 10 training.batch_size 0.0 +1040 11 model.embedding_dim 1.0 +1040 11 model.scoring_fct_norm 2.0 +1040 11 loss.margin 1.4858683965800874 +1040 11 loss.adversarial_temperature 0.25492576934075817 +1040 11 optimizer.lr 0.011510462949602203 +1040 11 negative_sampler.num_negs_per_pos 57.0 +1040 11 training.batch_size 1.0 +1040 12 model.embedding_dim 1.0 +1040 12 model.scoring_fct_norm 1.0 +1040 12 loss.margin 18.293767281287625 +1040 12 loss.adversarial_temperature 0.6462021533186169 +1040 12 optimizer.lr 0.0011816346164412335 +1040 12 negative_sampler.num_negs_per_pos 7.0 +1040 12 training.batch_size 1.0 +1040 13 model.embedding_dim 0.0 +1040 13 model.scoring_fct_norm 2.0 +1040 13 loss.margin 17.597208871223692 +1040 13 loss.adversarial_temperature 0.14595395713798717 +1040 13 optimizer.lr 0.0012441636858804503 +1040 13 negative_sampler.num_negs_per_pos 28.0 +1040 13 training.batch_size 2.0 +1040 14 model.embedding_dim 0.0 +1040 14 model.scoring_fct_norm 2.0 +1040 14 loss.margin 2.2454743729157784 +1040 14 loss.adversarial_temperature 0.8956899450251491 +1040 14 optimizer.lr 0.006289454362275361 +1040 14 negative_sampler.num_negs_per_pos 96.0 +1040 14 training.batch_size 1.0 +1040 15 model.embedding_dim 2.0 +1040 15 model.scoring_fct_norm 1.0 +1040 15 loss.margin 23.875592683112362 +1040 15 loss.adversarial_temperature 0.6334535388952486 +1040 15 optimizer.lr 0.04942700711572616 +1040 15 negative_sampler.num_negs_per_pos 39.0 +1040 15 training.batch_size 0.0 +1040 16 model.embedding_dim 1.0 +1040 16 model.scoring_fct_norm 2.0 +1040 16 loss.margin 13.751123723648147 +1040 16 loss.adversarial_temperature 0.6442721354522843 +1040 16 optimizer.lr 0.0015800029683868029 +1040 16 negative_sampler.num_negs_per_pos 16.0 +1040 16 training.batch_size 2.0 +1040 17 model.embedding_dim 2.0 +1040 17 model.scoring_fct_norm 1.0 +1040 17 loss.margin 13.7924150463353 +1040 17 loss.adversarial_temperature 0.8001240961086045 +1040 17 optimizer.lr 0.007043848262592125 +1040 17 negative_sampler.num_negs_per_pos 65.0 +1040 17 training.batch_size 2.0 +1040 18 model.embedding_dim 0.0 +1040 18 model.scoring_fct_norm 1.0 +1040 18 loss.margin 1.17890317041887 +1040 18 loss.adversarial_temperature 0.6503865075535268 +1040 18 optimizer.lr 0.011368560132477024 +1040 18 negative_sampler.num_negs_per_pos 46.0 +1040 18 training.batch_size 0.0 +1040 19 model.embedding_dim 2.0 +1040 19 model.scoring_fct_norm 1.0 +1040 19 loss.margin 29.02907741012323 +1040 19 loss.adversarial_temperature 0.29702202654553933 +1040 19 optimizer.lr 0.07645506067448947 +1040 19 negative_sampler.num_negs_per_pos 87.0 +1040 19 training.batch_size 2.0 +1040 20 model.embedding_dim 2.0 +1040 20 model.scoring_fct_norm 2.0 +1040 20 loss.margin 19.877908164256528 +1040 20 loss.adversarial_temperature 0.23408209955378162 +1040 20 optimizer.lr 0.0695638360645669 +1040 20 negative_sampler.num_negs_per_pos 31.0 +1040 20 training.batch_size 2.0 +1040 21 model.embedding_dim 1.0 +1040 21 model.scoring_fct_norm 1.0 +1040 21 loss.margin 15.474470185530866 +1040 21 loss.adversarial_temperature 0.9255282482299176 +1040 21 optimizer.lr 0.05761944888392174 +1040 21 negative_sampler.num_negs_per_pos 92.0 +1040 21 training.batch_size 0.0 +1040 22 model.embedding_dim 1.0 +1040 22 model.scoring_fct_norm 2.0 +1040 22 loss.margin 20.676029904127695 +1040 22 loss.adversarial_temperature 0.4532987423176347 +1040 22 optimizer.lr 0.008997218370061564 +1040 22 negative_sampler.num_negs_per_pos 11.0 +1040 22 training.batch_size 0.0 +1040 23 model.embedding_dim 1.0 +1040 23 model.scoring_fct_norm 1.0 +1040 23 loss.margin 8.069906051203837 +1040 23 loss.adversarial_temperature 0.6220708931688015 +1040 23 optimizer.lr 0.007620599750960486 +1040 23 negative_sampler.num_negs_per_pos 87.0 +1040 23 training.batch_size 0.0 +1040 24 model.embedding_dim 2.0 +1040 24 model.scoring_fct_norm 2.0 +1040 24 loss.margin 27.293390871500378 +1040 24 loss.adversarial_temperature 0.5779280038145937 +1040 24 optimizer.lr 0.003402367780529121 +1040 24 negative_sampler.num_negs_per_pos 89.0 +1040 24 training.batch_size 2.0 +1040 25 model.embedding_dim 0.0 +1040 25 model.scoring_fct_norm 1.0 +1040 25 loss.margin 14.718421711366187 +1040 25 loss.adversarial_temperature 0.5557769813006416 +1040 25 optimizer.lr 0.08216440085125223 +1040 25 negative_sampler.num_negs_per_pos 26.0 +1040 25 training.batch_size 2.0 +1040 26 model.embedding_dim 0.0 +1040 26 model.scoring_fct_norm 1.0 +1040 26 loss.margin 24.847797222671748 +1040 26 loss.adversarial_temperature 0.34556959442165 +1040 26 optimizer.lr 0.009025873226260006 +1040 26 negative_sampler.num_negs_per_pos 22.0 +1040 26 training.batch_size 2.0 +1040 27 model.embedding_dim 1.0 +1040 27 model.scoring_fct_norm 1.0 +1040 27 loss.margin 16.48977404348693 +1040 27 loss.adversarial_temperature 0.9662016772436562 +1040 27 optimizer.lr 0.0018366262183253097 +1040 27 negative_sampler.num_negs_per_pos 72.0 +1040 27 training.batch_size 0.0 +1040 28 model.embedding_dim 0.0 +1040 28 model.scoring_fct_norm 1.0 +1040 28 loss.margin 17.356143375825763 +1040 28 loss.adversarial_temperature 0.19527549503189603 +1040 28 optimizer.lr 0.08443311292900194 +1040 28 negative_sampler.num_negs_per_pos 43.0 +1040 28 training.batch_size 2.0 +1040 29 model.embedding_dim 0.0 +1040 29 model.scoring_fct_norm 1.0 +1040 29 loss.margin 10.376221086194299 +1040 29 loss.adversarial_temperature 0.5600789551537787 +1040 29 optimizer.lr 0.013855468551076063 +1040 29 negative_sampler.num_negs_per_pos 42.0 +1040 29 training.batch_size 2.0 +1040 30 model.embedding_dim 2.0 +1040 30 model.scoring_fct_norm 1.0 +1040 30 loss.margin 6.553915496055978 +1040 30 loss.adversarial_temperature 0.9200728154188393 +1040 30 optimizer.lr 0.008777314164763829 +1040 30 negative_sampler.num_negs_per_pos 72.0 +1040 30 training.batch_size 0.0 +1040 31 model.embedding_dim 2.0 +1040 31 model.scoring_fct_norm 1.0 +1040 31 loss.margin 23.03587951054406 +1040 31 loss.adversarial_temperature 0.6618097823973685 +1040 31 optimizer.lr 0.005845758476089596 +1040 31 negative_sampler.num_negs_per_pos 13.0 +1040 31 training.batch_size 0.0 +1040 32 model.embedding_dim 0.0 +1040 32 model.scoring_fct_norm 1.0 +1040 32 loss.margin 23.740712651141 +1040 32 loss.adversarial_temperature 0.989839499756238 +1040 32 optimizer.lr 0.004734863461600906 +1040 32 negative_sampler.num_negs_per_pos 54.0 +1040 32 training.batch_size 1.0 +1040 33 model.embedding_dim 0.0 +1040 33 model.scoring_fct_norm 2.0 +1040 33 loss.margin 3.805116496713988 +1040 33 loss.adversarial_temperature 0.9753127953875937 +1040 33 optimizer.lr 0.0022441833437070596 +1040 33 negative_sampler.num_negs_per_pos 13.0 +1040 33 training.batch_size 1.0 +1040 34 model.embedding_dim 0.0 +1040 34 model.scoring_fct_norm 2.0 +1040 34 loss.margin 23.945665066155442 +1040 34 loss.adversarial_temperature 0.21206626447697008 +1040 34 optimizer.lr 0.046364233369492955 +1040 34 negative_sampler.num_negs_per_pos 35.0 +1040 34 training.batch_size 2.0 +1040 35 model.embedding_dim 0.0 +1040 35 model.scoring_fct_norm 2.0 +1040 35 loss.margin 1.9497979415885935 +1040 35 loss.adversarial_temperature 0.8353640864098594 +1040 35 optimizer.lr 0.0017792270422825943 +1040 35 negative_sampler.num_negs_per_pos 83.0 +1040 35 training.batch_size 2.0 +1040 36 model.embedding_dim 1.0 +1040 36 model.scoring_fct_norm 2.0 +1040 36 loss.margin 14.914315749940496 +1040 36 loss.adversarial_temperature 0.9829294915850718 +1040 36 optimizer.lr 0.003908793146026157 +1040 36 negative_sampler.num_negs_per_pos 89.0 +1040 36 training.batch_size 1.0 +1040 37 model.embedding_dim 1.0 +1040 37 model.scoring_fct_norm 1.0 +1040 37 loss.margin 21.373926013427205 +1040 37 loss.adversarial_temperature 0.47248239154562127 +1040 37 optimizer.lr 0.009702363705351522 +1040 37 negative_sampler.num_negs_per_pos 2.0 +1040 37 training.batch_size 1.0 +1040 38 model.embedding_dim 1.0 +1040 38 model.scoring_fct_norm 2.0 +1040 38 loss.margin 10.210045906203664 +1040 38 loss.adversarial_temperature 0.8915647569836268 +1040 38 optimizer.lr 0.02926502072265764 +1040 38 negative_sampler.num_negs_per_pos 31.0 +1040 38 training.batch_size 0.0 +1040 39 model.embedding_dim 1.0 +1040 39 model.scoring_fct_norm 2.0 +1040 39 loss.margin 17.011334019649638 +1040 39 loss.adversarial_temperature 0.40031553485308485 +1040 39 optimizer.lr 0.025604582045194967 +1040 39 negative_sampler.num_negs_per_pos 49.0 +1040 39 training.batch_size 1.0 +1040 40 model.embedding_dim 1.0 +1040 40 model.scoring_fct_norm 1.0 +1040 40 loss.margin 4.803928243695601 +1040 40 loss.adversarial_temperature 0.27289038814556754 +1040 40 optimizer.lr 0.026626356814374474 +1040 40 negative_sampler.num_negs_per_pos 91.0 +1040 40 training.batch_size 0.0 +1040 41 model.embedding_dim 0.0 +1040 41 model.scoring_fct_norm 1.0 +1040 41 loss.margin 24.985938132973033 +1040 41 loss.adversarial_temperature 0.8282311106898197 +1040 41 optimizer.lr 0.009780446371729878 +1040 41 negative_sampler.num_negs_per_pos 19.0 +1040 41 training.batch_size 0.0 +1040 42 model.embedding_dim 1.0 +1040 42 model.scoring_fct_norm 1.0 +1040 42 loss.margin 13.969530871664839 +1040 42 loss.adversarial_temperature 0.8667076195130358 +1040 42 optimizer.lr 0.001123270319337355 +1040 42 negative_sampler.num_negs_per_pos 71.0 +1040 42 training.batch_size 1.0 +1040 43 model.embedding_dim 0.0 +1040 43 model.scoring_fct_norm 2.0 +1040 43 loss.margin 28.9681380257907 +1040 43 loss.adversarial_temperature 0.9559654834695964 +1040 43 optimizer.lr 0.05796479821201495 +1040 43 negative_sampler.num_negs_per_pos 2.0 +1040 43 training.batch_size 0.0 +1040 44 model.embedding_dim 0.0 +1040 44 model.scoring_fct_norm 2.0 +1040 44 loss.margin 8.021088665859653 +1040 44 loss.adversarial_temperature 0.543062274440928 +1040 44 optimizer.lr 0.001134407131138122 +1040 44 negative_sampler.num_negs_per_pos 90.0 +1040 44 training.batch_size 1.0 +1040 45 model.embedding_dim 0.0 +1040 45 model.scoring_fct_norm 1.0 +1040 45 loss.margin 6.319628118884712 +1040 45 loss.adversarial_temperature 0.2817045736885642 +1040 45 optimizer.lr 0.007307682324073801 +1040 45 negative_sampler.num_negs_per_pos 7.0 +1040 45 training.batch_size 0.0 +1040 46 model.embedding_dim 0.0 +1040 46 model.scoring_fct_norm 1.0 +1040 46 loss.margin 8.715337867111458 +1040 46 loss.adversarial_temperature 0.25699468595474656 +1040 46 optimizer.lr 0.009092862801948586 +1040 46 negative_sampler.num_negs_per_pos 21.0 +1040 46 training.batch_size 2.0 +1040 47 model.embedding_dim 1.0 +1040 47 model.scoring_fct_norm 1.0 +1040 47 loss.margin 11.590366316307097 +1040 47 loss.adversarial_temperature 0.7887156206632142 +1040 47 optimizer.lr 0.0022921170362547997 +1040 47 negative_sampler.num_negs_per_pos 61.0 +1040 47 training.batch_size 0.0 +1040 48 model.embedding_dim 1.0 +1040 48 model.scoring_fct_norm 1.0 +1040 48 loss.margin 26.333374430339695 +1040 48 loss.adversarial_temperature 0.5538884134904594 +1040 48 optimizer.lr 0.010936713621566375 +1040 48 negative_sampler.num_negs_per_pos 74.0 +1040 48 training.batch_size 0.0 +1040 49 model.embedding_dim 0.0 +1040 49 model.scoring_fct_norm 2.0 +1040 49 loss.margin 1.50980140563637 +1040 49 loss.adversarial_temperature 0.44838100140076953 +1040 49 optimizer.lr 0.005526713948945878 +1040 49 negative_sampler.num_negs_per_pos 34.0 +1040 49 training.batch_size 0.0 +1040 50 model.embedding_dim 2.0 +1040 50 model.scoring_fct_norm 1.0 +1040 50 loss.margin 12.829647734865578 +1040 50 loss.adversarial_temperature 0.8164730117762069 +1040 50 optimizer.lr 0.04767265149307998 +1040 50 negative_sampler.num_negs_per_pos 70.0 +1040 50 training.batch_size 0.0 +1040 51 model.embedding_dim 1.0 +1040 51 model.scoring_fct_norm 1.0 +1040 51 loss.margin 1.1965185575931532 +1040 51 loss.adversarial_temperature 0.7468746864483161 +1040 51 optimizer.lr 0.06513664046597588 +1040 51 negative_sampler.num_negs_per_pos 60.0 +1040 51 training.batch_size 2.0 +1040 52 model.embedding_dim 2.0 +1040 52 model.scoring_fct_norm 2.0 +1040 52 loss.margin 19.143431233021925 +1040 52 loss.adversarial_temperature 0.8442137761476919 +1040 52 optimizer.lr 0.026898792220514122 +1040 52 negative_sampler.num_negs_per_pos 70.0 +1040 52 training.batch_size 0.0 +1040 53 model.embedding_dim 0.0 +1040 53 model.scoring_fct_norm 1.0 +1040 53 loss.margin 17.641490204207717 +1040 53 loss.adversarial_temperature 0.39244507180376126 +1040 53 optimizer.lr 0.003542896824816924 +1040 53 negative_sampler.num_negs_per_pos 59.0 +1040 53 training.batch_size 1.0 +1040 54 model.embedding_dim 2.0 +1040 54 model.scoring_fct_norm 1.0 +1040 54 loss.margin 22.35773301278958 +1040 54 loss.adversarial_temperature 0.21345494162301182 +1040 54 optimizer.lr 0.001293137656165038 +1040 54 negative_sampler.num_negs_per_pos 98.0 +1040 54 training.batch_size 1.0 +1040 55 model.embedding_dim 2.0 +1040 55 model.scoring_fct_norm 1.0 +1040 55 loss.margin 12.416523009609335 +1040 55 loss.adversarial_temperature 0.5240616077490061 +1040 55 optimizer.lr 0.012348251933257295 +1040 55 negative_sampler.num_negs_per_pos 89.0 +1040 55 training.batch_size 0.0 +1040 56 model.embedding_dim 0.0 +1040 56 model.scoring_fct_norm 2.0 +1040 56 loss.margin 7.4707295871051524 +1040 56 loss.adversarial_temperature 0.9454805137659221 +1040 56 optimizer.lr 0.05524173371795213 +1040 56 negative_sampler.num_negs_per_pos 13.0 +1040 56 training.batch_size 1.0 +1040 57 model.embedding_dim 0.0 +1040 57 model.scoring_fct_norm 1.0 +1040 57 loss.margin 11.149657051576854 +1040 57 loss.adversarial_temperature 0.4768828909641151 +1040 57 optimizer.lr 0.012087714881753493 +1040 57 negative_sampler.num_negs_per_pos 25.0 +1040 57 training.batch_size 2.0 +1040 58 model.embedding_dim 0.0 +1040 58 model.scoring_fct_norm 1.0 +1040 58 loss.margin 21.53962756064273 +1040 58 loss.adversarial_temperature 0.2461261399143599 +1040 58 optimizer.lr 0.07520742306463075 +1040 58 negative_sampler.num_negs_per_pos 47.0 +1040 58 training.batch_size 0.0 +1040 59 model.embedding_dim 0.0 +1040 59 model.scoring_fct_norm 1.0 +1040 59 loss.margin 24.329909029874347 +1040 59 loss.adversarial_temperature 0.838263158735378 +1040 59 optimizer.lr 0.04511091343590061 +1040 59 negative_sampler.num_negs_per_pos 7.0 +1040 59 training.batch_size 1.0 +1040 60 model.embedding_dim 0.0 +1040 60 model.scoring_fct_norm 1.0 +1040 60 loss.margin 29.12419997526501 +1040 60 loss.adversarial_temperature 0.37139595785505275 +1040 60 optimizer.lr 0.004869312050215897 +1040 60 negative_sampler.num_negs_per_pos 35.0 +1040 60 training.batch_size 0.0 +1040 61 model.embedding_dim 2.0 +1040 61 model.scoring_fct_norm 1.0 +1040 61 loss.margin 13.41785599913774 +1040 61 loss.adversarial_temperature 0.15587801535684875 +1040 61 optimizer.lr 0.057573987060945346 +1040 61 negative_sampler.num_negs_per_pos 40.0 +1040 61 training.batch_size 1.0 +1040 62 model.embedding_dim 0.0 +1040 62 model.scoring_fct_norm 1.0 +1040 62 loss.margin 16.885202560107096 +1040 62 loss.adversarial_temperature 0.4252135752273809 +1040 62 optimizer.lr 0.010339715114020423 +1040 62 negative_sampler.num_negs_per_pos 27.0 +1040 62 training.batch_size 2.0 +1040 63 model.embedding_dim 2.0 +1040 63 model.scoring_fct_norm 1.0 +1040 63 loss.margin 13.38435570487634 +1040 63 loss.adversarial_temperature 0.4783350511723974 +1040 63 optimizer.lr 0.0025071474812606363 +1040 63 negative_sampler.num_negs_per_pos 79.0 +1040 63 training.batch_size 0.0 +1040 64 model.embedding_dim 2.0 +1040 64 model.scoring_fct_norm 1.0 +1040 64 loss.margin 10.841364286861275 +1040 64 loss.adversarial_temperature 0.7441299988091576 +1040 64 optimizer.lr 0.013589470627588663 +1040 64 negative_sampler.num_negs_per_pos 19.0 +1040 64 training.batch_size 0.0 +1040 65 model.embedding_dim 0.0 +1040 65 model.scoring_fct_norm 2.0 +1040 65 loss.margin 1.674339225948623 +1040 65 loss.adversarial_temperature 0.9477011273208813 +1040 65 optimizer.lr 0.003032531065724188 +1040 65 negative_sampler.num_negs_per_pos 60.0 +1040 65 training.batch_size 0.0 +1040 66 model.embedding_dim 0.0 +1040 66 model.scoring_fct_norm 1.0 +1040 66 loss.margin 9.22226906002535 +1040 66 loss.adversarial_temperature 0.2717121051810887 +1040 66 optimizer.lr 0.0018108531803051255 +1040 66 negative_sampler.num_negs_per_pos 76.0 +1040 66 training.batch_size 0.0 +1040 67 model.embedding_dim 1.0 +1040 67 model.scoring_fct_norm 1.0 +1040 67 loss.margin 4.917903192584552 +1040 67 loss.adversarial_temperature 0.8986200299182533 +1040 67 optimizer.lr 0.004001381907907793 +1040 67 negative_sampler.num_negs_per_pos 43.0 +1040 67 training.batch_size 0.0 +1040 68 model.embedding_dim 1.0 +1040 68 model.scoring_fct_norm 2.0 +1040 68 loss.margin 2.15610209605005 +1040 68 loss.adversarial_temperature 0.11634879150899084 +1040 68 optimizer.lr 0.020842146715031363 +1040 68 negative_sampler.num_negs_per_pos 52.0 +1040 68 training.batch_size 2.0 +1040 69 model.embedding_dim 2.0 +1040 69 model.scoring_fct_norm 2.0 +1040 69 loss.margin 22.162689724895696 +1040 69 loss.adversarial_temperature 0.9986656387050906 +1040 69 optimizer.lr 0.014138778229866903 +1040 69 negative_sampler.num_negs_per_pos 45.0 +1040 69 training.batch_size 1.0 +1040 70 model.embedding_dim 2.0 +1040 70 model.scoring_fct_norm 1.0 +1040 70 loss.margin 23.567389331293942 +1040 70 loss.adversarial_temperature 0.875500097336927 +1040 70 optimizer.lr 0.011801286587116438 +1040 70 negative_sampler.num_negs_per_pos 47.0 +1040 70 training.batch_size 2.0 +1040 71 model.embedding_dim 1.0 +1040 71 model.scoring_fct_norm 2.0 +1040 71 loss.margin 11.616167475357186 +1040 71 loss.adversarial_temperature 0.16595208924215687 +1040 71 optimizer.lr 0.06813769638193978 +1040 71 negative_sampler.num_negs_per_pos 76.0 +1040 71 training.batch_size 2.0 +1040 72 model.embedding_dim 2.0 +1040 72 model.scoring_fct_norm 1.0 +1040 72 loss.margin 15.05280326872567 +1040 72 loss.adversarial_temperature 0.44690523896501105 +1040 72 optimizer.lr 0.013368038520254621 +1040 72 negative_sampler.num_negs_per_pos 18.0 +1040 72 training.batch_size 1.0 +1040 73 model.embedding_dim 2.0 +1040 73 model.scoring_fct_norm 2.0 +1040 73 loss.margin 26.043711059942638 +1040 73 loss.adversarial_temperature 0.702948935868403 +1040 73 optimizer.lr 0.0068786471944965654 +1040 73 negative_sampler.num_negs_per_pos 23.0 +1040 73 training.batch_size 2.0 +1040 74 model.embedding_dim 1.0 +1040 74 model.scoring_fct_norm 2.0 +1040 74 loss.margin 27.713559466585263 +1040 74 loss.adversarial_temperature 0.799740128439122 +1040 74 optimizer.lr 0.0047256463338221245 +1040 74 negative_sampler.num_negs_per_pos 89.0 +1040 74 training.batch_size 2.0 +1040 75 model.embedding_dim 1.0 +1040 75 model.scoring_fct_norm 1.0 +1040 75 loss.margin 20.814791269514632 +1040 75 loss.adversarial_temperature 0.6730898077158133 +1040 75 optimizer.lr 0.0882436999597653 +1040 75 negative_sampler.num_negs_per_pos 56.0 +1040 75 training.batch_size 1.0 +1040 76 model.embedding_dim 0.0 +1040 76 model.scoring_fct_norm 2.0 +1040 76 loss.margin 5.988617692609885 +1040 76 loss.adversarial_temperature 0.9646694072209675 +1040 76 optimizer.lr 0.020274898702938696 +1040 76 negative_sampler.num_negs_per_pos 84.0 +1040 76 training.batch_size 0.0 +1040 77 model.embedding_dim 1.0 +1040 77 model.scoring_fct_norm 1.0 +1040 77 loss.margin 12.920204081236658 +1040 77 loss.adversarial_temperature 0.678116603681846 +1040 77 optimizer.lr 0.008157045101790847 +1040 77 negative_sampler.num_negs_per_pos 28.0 +1040 77 training.batch_size 0.0 +1040 78 model.embedding_dim 1.0 +1040 78 model.scoring_fct_norm 2.0 +1040 78 loss.margin 11.844451595630023 +1040 78 loss.adversarial_temperature 0.511089338840731 +1040 78 optimizer.lr 0.0377347719532093 +1040 78 negative_sampler.num_negs_per_pos 47.0 +1040 78 training.batch_size 0.0 +1040 79 model.embedding_dim 2.0 +1040 79 model.scoring_fct_norm 2.0 +1040 79 loss.margin 7.0328823766496384 +1040 79 loss.adversarial_temperature 0.5079762376884575 +1040 79 optimizer.lr 0.026310203016913752 +1040 79 negative_sampler.num_negs_per_pos 51.0 +1040 79 training.batch_size 0.0 +1040 80 model.embedding_dim 0.0 +1040 80 model.scoring_fct_norm 1.0 +1040 80 loss.margin 22.027305408760594 +1040 80 loss.adversarial_temperature 0.881189269095716 +1040 80 optimizer.lr 0.004688556552640171 +1040 80 negative_sampler.num_negs_per_pos 93.0 +1040 80 training.batch_size 2.0 +1040 81 model.embedding_dim 0.0 +1040 81 model.scoring_fct_norm 1.0 +1040 81 loss.margin 15.669810085683997 +1040 81 loss.adversarial_temperature 0.1707639384851061 +1040 81 optimizer.lr 0.0041999465634113345 +1040 81 negative_sampler.num_negs_per_pos 21.0 +1040 81 training.batch_size 0.0 +1040 82 model.embedding_dim 2.0 +1040 82 model.scoring_fct_norm 1.0 +1040 82 loss.margin 6.75874531623481 +1040 82 loss.adversarial_temperature 0.5450445062715952 +1040 82 optimizer.lr 0.010702596701832002 +1040 82 negative_sampler.num_negs_per_pos 40.0 +1040 82 training.batch_size 0.0 +1040 83 model.embedding_dim 0.0 +1040 83 model.scoring_fct_norm 1.0 +1040 83 loss.margin 27.900703332489375 +1040 83 loss.adversarial_temperature 0.12605974938088402 +1040 83 optimizer.lr 0.01601081056694396 +1040 83 negative_sampler.num_negs_per_pos 87.0 +1040 83 training.batch_size 0.0 +1040 84 model.embedding_dim 1.0 +1040 84 model.scoring_fct_norm 1.0 +1040 84 loss.margin 26.092936858826178 +1040 84 loss.adversarial_temperature 0.7111299193335553 +1040 84 optimizer.lr 0.01636446575967773 +1040 84 negative_sampler.num_negs_per_pos 62.0 +1040 84 training.batch_size 0.0 +1040 85 model.embedding_dim 0.0 +1040 85 model.scoring_fct_norm 2.0 +1040 85 loss.margin 18.687704878575598 +1040 85 loss.adversarial_temperature 0.24072721140251277 +1040 85 optimizer.lr 0.020401668460416193 +1040 85 negative_sampler.num_negs_per_pos 91.0 +1040 85 training.batch_size 0.0 +1040 86 model.embedding_dim 0.0 +1040 86 model.scoring_fct_norm 1.0 +1040 86 loss.margin 18.287508382844667 +1040 86 loss.adversarial_temperature 0.749876263996695 +1040 86 optimizer.lr 0.09749525873922878 +1040 86 negative_sampler.num_negs_per_pos 76.0 +1040 86 training.batch_size 2.0 +1040 87 model.embedding_dim 2.0 +1040 87 model.scoring_fct_norm 2.0 +1040 87 loss.margin 8.107742046998846 +1040 87 loss.adversarial_temperature 0.46159773887281025 +1040 87 optimizer.lr 0.023232116391351428 +1040 87 negative_sampler.num_negs_per_pos 3.0 +1040 87 training.batch_size 0.0 +1040 88 model.embedding_dim 1.0 +1040 88 model.scoring_fct_norm 1.0 +1040 88 loss.margin 19.3672676498201 +1040 88 loss.adversarial_temperature 0.9943795925120571 +1040 88 optimizer.lr 0.00196129806693986 +1040 88 negative_sampler.num_negs_per_pos 70.0 +1040 88 training.batch_size 0.0 +1040 89 model.embedding_dim 1.0 +1040 89 model.scoring_fct_norm 1.0 +1040 89 loss.margin 6.7063046492168334 +1040 89 loss.adversarial_temperature 0.31065527248186225 +1040 89 optimizer.lr 0.0010778511838553527 +1040 89 negative_sampler.num_negs_per_pos 35.0 +1040 89 training.batch_size 1.0 +1040 90 model.embedding_dim 2.0 +1040 90 model.scoring_fct_norm 1.0 +1040 90 loss.margin 1.2849780750435915 +1040 90 loss.adversarial_temperature 0.44154934519271827 +1040 90 optimizer.lr 0.03050998900640335 +1040 90 negative_sampler.num_negs_per_pos 49.0 +1040 90 training.batch_size 1.0 +1040 91 model.embedding_dim 1.0 +1040 91 model.scoring_fct_norm 1.0 +1040 91 loss.margin 23.7874606074389 +1040 91 loss.adversarial_temperature 0.5523856676393271 +1040 91 optimizer.lr 0.09827740754883861 +1040 91 negative_sampler.num_negs_per_pos 11.0 +1040 91 training.batch_size 2.0 +1040 92 model.embedding_dim 2.0 +1040 92 model.scoring_fct_norm 1.0 +1040 92 loss.margin 6.526190775472051 +1040 92 loss.adversarial_temperature 0.14337223523498796 +1040 92 optimizer.lr 0.002680806463758715 +1040 92 negative_sampler.num_negs_per_pos 72.0 +1040 92 training.batch_size 2.0 +1040 93 model.embedding_dim 1.0 +1040 93 model.scoring_fct_norm 2.0 +1040 93 loss.margin 17.93617214216207 +1040 93 loss.adversarial_temperature 0.11939429216478342 +1040 93 optimizer.lr 0.0017141855301250001 +1040 93 negative_sampler.num_negs_per_pos 66.0 +1040 93 training.batch_size 1.0 +1040 94 model.embedding_dim 0.0 +1040 94 model.scoring_fct_norm 1.0 +1040 94 loss.margin 20.313917278394996 +1040 94 loss.adversarial_temperature 0.8830353199198422 +1040 94 optimizer.lr 0.04473024921446816 +1040 94 negative_sampler.num_negs_per_pos 4.0 +1040 94 training.batch_size 2.0 +1040 95 model.embedding_dim 1.0 +1040 95 model.scoring_fct_norm 1.0 +1040 95 loss.margin 19.66280825353789 +1040 95 loss.adversarial_temperature 0.17867647559213543 +1040 95 optimizer.lr 0.01482771933621341 +1040 95 negative_sampler.num_negs_per_pos 81.0 +1040 95 training.batch_size 1.0 +1040 96 model.embedding_dim 1.0 +1040 96 model.scoring_fct_norm 2.0 +1040 96 loss.margin 29.4305102780738 +1040 96 loss.adversarial_temperature 0.8185909104417683 +1040 96 optimizer.lr 0.08608325688668629 +1040 96 negative_sampler.num_negs_per_pos 71.0 +1040 96 training.batch_size 0.0 +1040 97 model.embedding_dim 1.0 +1040 97 model.scoring_fct_norm 2.0 +1040 97 loss.margin 11.384138004276222 +1040 97 loss.adversarial_temperature 0.4998994275898853 +1040 97 optimizer.lr 0.01674651490112439 +1040 97 negative_sampler.num_negs_per_pos 85.0 +1040 97 training.batch_size 0.0 +1040 98 model.embedding_dim 1.0 +1040 98 model.scoring_fct_norm 2.0 +1040 98 loss.margin 24.020755874957 +1040 98 loss.adversarial_temperature 0.43583964689502297 +1040 98 optimizer.lr 0.011769172828124144 +1040 98 negative_sampler.num_negs_per_pos 93.0 +1040 98 training.batch_size 1.0 +1040 99 model.embedding_dim 2.0 +1040 99 model.scoring_fct_norm 2.0 +1040 99 loss.margin 14.148892044942773 +1040 99 loss.adversarial_temperature 0.13640716987560428 +1040 99 optimizer.lr 0.012790856925302919 +1040 99 negative_sampler.num_negs_per_pos 85.0 +1040 99 training.batch_size 1.0 +1040 100 model.embedding_dim 0.0 +1040 100 model.scoring_fct_norm 2.0 +1040 100 loss.margin 1.4739380989799384 +1040 100 loss.adversarial_temperature 0.3233919999870714 +1040 100 optimizer.lr 0.03372524564062345 +1040 100 negative_sampler.num_negs_per_pos 40.0 +1040 100 training.batch_size 1.0 +1040 1 dataset """wn18rr""" +1040 1 model """unstructuredmodel""" +1040 1 loss """nssa""" +1040 1 regularizer """no""" +1040 1 optimizer """adam""" +1040 1 training_loop """owa""" +1040 1 negative_sampler """basic""" +1040 1 evaluator """rankbased""" +1040 2 dataset """wn18rr""" +1040 2 model """unstructuredmodel""" +1040 2 loss """nssa""" +1040 2 regularizer """no""" +1040 2 optimizer """adam""" +1040 2 training_loop """owa""" +1040 2 negative_sampler """basic""" +1040 2 evaluator """rankbased""" +1040 3 dataset """wn18rr""" +1040 3 model """unstructuredmodel""" +1040 3 loss """nssa""" +1040 3 regularizer """no""" +1040 3 optimizer """adam""" +1040 3 training_loop """owa""" +1040 3 negative_sampler """basic""" +1040 3 evaluator """rankbased""" +1040 4 dataset """wn18rr""" +1040 4 model """unstructuredmodel""" +1040 4 loss """nssa""" +1040 4 regularizer """no""" +1040 4 optimizer """adam""" +1040 4 training_loop """owa""" +1040 4 negative_sampler """basic""" +1040 4 evaluator """rankbased""" +1040 5 dataset """wn18rr""" +1040 5 model """unstructuredmodel""" +1040 5 loss """nssa""" +1040 5 regularizer """no""" +1040 5 optimizer """adam""" +1040 5 training_loop """owa""" +1040 5 negative_sampler """basic""" +1040 5 evaluator """rankbased""" +1040 6 dataset """wn18rr""" +1040 6 model """unstructuredmodel""" +1040 6 loss """nssa""" +1040 6 regularizer """no""" +1040 6 optimizer """adam""" +1040 6 training_loop """owa""" +1040 6 negative_sampler """basic""" +1040 6 evaluator """rankbased""" +1040 7 dataset """wn18rr""" +1040 7 model """unstructuredmodel""" +1040 7 loss """nssa""" +1040 7 regularizer """no""" +1040 7 optimizer """adam""" +1040 7 training_loop """owa""" +1040 7 negative_sampler """basic""" +1040 7 evaluator """rankbased""" +1040 8 dataset """wn18rr""" +1040 8 model """unstructuredmodel""" +1040 8 loss """nssa""" +1040 8 regularizer """no""" +1040 8 optimizer """adam""" +1040 8 training_loop """owa""" +1040 8 negative_sampler """basic""" +1040 8 evaluator """rankbased""" +1040 9 dataset """wn18rr""" +1040 9 model """unstructuredmodel""" +1040 9 loss """nssa""" +1040 9 regularizer """no""" +1040 9 optimizer """adam""" +1040 9 training_loop """owa""" +1040 9 negative_sampler """basic""" +1040 9 evaluator """rankbased""" +1040 10 dataset """wn18rr""" +1040 10 model """unstructuredmodel""" +1040 10 loss """nssa""" +1040 10 regularizer """no""" +1040 10 optimizer """adam""" +1040 10 training_loop """owa""" +1040 10 negative_sampler """basic""" +1040 10 evaluator """rankbased""" +1040 11 dataset """wn18rr""" +1040 11 model """unstructuredmodel""" +1040 11 loss """nssa""" +1040 11 regularizer """no""" +1040 11 optimizer """adam""" +1040 11 training_loop """owa""" +1040 11 negative_sampler """basic""" +1040 11 evaluator """rankbased""" +1040 12 dataset """wn18rr""" +1040 12 model """unstructuredmodel""" +1040 12 loss """nssa""" +1040 12 regularizer """no""" +1040 12 optimizer """adam""" +1040 12 training_loop """owa""" +1040 12 negative_sampler """basic""" +1040 12 evaluator """rankbased""" +1040 13 dataset """wn18rr""" +1040 13 model """unstructuredmodel""" +1040 13 loss """nssa""" +1040 13 regularizer """no""" +1040 13 optimizer """adam""" +1040 13 training_loop """owa""" +1040 13 negative_sampler """basic""" +1040 13 evaluator """rankbased""" +1040 14 dataset """wn18rr""" +1040 14 model """unstructuredmodel""" +1040 14 loss """nssa""" +1040 14 regularizer """no""" +1040 14 optimizer """adam""" +1040 14 training_loop """owa""" +1040 14 negative_sampler """basic""" +1040 14 evaluator """rankbased""" +1040 15 dataset """wn18rr""" +1040 15 model """unstructuredmodel""" +1040 15 loss """nssa""" +1040 15 regularizer """no""" +1040 15 optimizer """adam""" +1040 15 training_loop """owa""" +1040 15 negative_sampler """basic""" +1040 15 evaluator """rankbased""" +1040 16 dataset """wn18rr""" +1040 16 model """unstructuredmodel""" +1040 16 loss """nssa""" +1040 16 regularizer """no""" +1040 16 optimizer """adam""" +1040 16 training_loop """owa""" +1040 16 negative_sampler """basic""" +1040 16 evaluator """rankbased""" +1040 17 dataset """wn18rr""" +1040 17 model """unstructuredmodel""" +1040 17 loss """nssa""" +1040 17 regularizer """no""" +1040 17 optimizer """adam""" +1040 17 training_loop """owa""" +1040 17 negative_sampler """basic""" +1040 17 evaluator """rankbased""" +1040 18 dataset """wn18rr""" +1040 18 model """unstructuredmodel""" +1040 18 loss """nssa""" +1040 18 regularizer """no""" +1040 18 optimizer """adam""" +1040 18 training_loop """owa""" +1040 18 negative_sampler """basic""" +1040 18 evaluator """rankbased""" +1040 19 dataset """wn18rr""" +1040 19 model """unstructuredmodel""" +1040 19 loss """nssa""" +1040 19 regularizer """no""" +1040 19 optimizer """adam""" +1040 19 training_loop """owa""" +1040 19 negative_sampler """basic""" +1040 19 evaluator """rankbased""" +1040 20 dataset """wn18rr""" +1040 20 model """unstructuredmodel""" +1040 20 loss """nssa""" +1040 20 regularizer """no""" +1040 20 optimizer """adam""" +1040 20 training_loop """owa""" +1040 20 negative_sampler """basic""" +1040 20 evaluator """rankbased""" +1040 21 dataset """wn18rr""" +1040 21 model """unstructuredmodel""" +1040 21 loss """nssa""" +1040 21 regularizer """no""" +1040 21 optimizer """adam""" +1040 21 training_loop """owa""" +1040 21 negative_sampler """basic""" +1040 21 evaluator """rankbased""" +1040 22 dataset """wn18rr""" +1040 22 model """unstructuredmodel""" +1040 22 loss """nssa""" +1040 22 regularizer """no""" +1040 22 optimizer """adam""" +1040 22 training_loop """owa""" +1040 22 negative_sampler """basic""" +1040 22 evaluator """rankbased""" +1040 23 dataset """wn18rr""" +1040 23 model """unstructuredmodel""" +1040 23 loss """nssa""" +1040 23 regularizer """no""" +1040 23 optimizer """adam""" +1040 23 training_loop """owa""" +1040 23 negative_sampler """basic""" +1040 23 evaluator """rankbased""" +1040 24 dataset """wn18rr""" +1040 24 model """unstructuredmodel""" +1040 24 loss """nssa""" +1040 24 regularizer """no""" +1040 24 optimizer """adam""" +1040 24 training_loop """owa""" +1040 24 negative_sampler """basic""" +1040 24 evaluator """rankbased""" +1040 25 dataset """wn18rr""" +1040 25 model """unstructuredmodel""" +1040 25 loss """nssa""" +1040 25 regularizer """no""" +1040 25 optimizer """adam""" +1040 25 training_loop """owa""" +1040 25 negative_sampler """basic""" +1040 25 evaluator """rankbased""" +1040 26 dataset """wn18rr""" +1040 26 model """unstructuredmodel""" +1040 26 loss """nssa""" +1040 26 regularizer """no""" +1040 26 optimizer """adam""" +1040 26 training_loop """owa""" +1040 26 negative_sampler """basic""" +1040 26 evaluator """rankbased""" +1040 27 dataset """wn18rr""" +1040 27 model """unstructuredmodel""" +1040 27 loss """nssa""" +1040 27 regularizer """no""" +1040 27 optimizer """adam""" +1040 27 training_loop """owa""" +1040 27 negative_sampler """basic""" +1040 27 evaluator """rankbased""" +1040 28 dataset """wn18rr""" +1040 28 model """unstructuredmodel""" +1040 28 loss """nssa""" +1040 28 regularizer """no""" +1040 28 optimizer """adam""" +1040 28 training_loop """owa""" +1040 28 negative_sampler """basic""" +1040 28 evaluator """rankbased""" +1040 29 dataset """wn18rr""" +1040 29 model """unstructuredmodel""" +1040 29 loss """nssa""" +1040 29 regularizer """no""" +1040 29 optimizer """adam""" +1040 29 training_loop """owa""" +1040 29 negative_sampler """basic""" +1040 29 evaluator """rankbased""" +1040 30 dataset """wn18rr""" +1040 30 model """unstructuredmodel""" +1040 30 loss """nssa""" +1040 30 regularizer """no""" +1040 30 optimizer """adam""" +1040 30 training_loop """owa""" +1040 30 negative_sampler """basic""" +1040 30 evaluator """rankbased""" +1040 31 dataset """wn18rr""" +1040 31 model """unstructuredmodel""" +1040 31 loss """nssa""" +1040 31 regularizer """no""" +1040 31 optimizer """adam""" +1040 31 training_loop """owa""" +1040 31 negative_sampler """basic""" +1040 31 evaluator """rankbased""" +1040 32 dataset """wn18rr""" +1040 32 model """unstructuredmodel""" +1040 32 loss """nssa""" +1040 32 regularizer """no""" +1040 32 optimizer """adam""" +1040 32 training_loop """owa""" +1040 32 negative_sampler """basic""" +1040 32 evaluator """rankbased""" +1040 33 dataset """wn18rr""" +1040 33 model """unstructuredmodel""" +1040 33 loss """nssa""" +1040 33 regularizer """no""" +1040 33 optimizer """adam""" +1040 33 training_loop """owa""" +1040 33 negative_sampler """basic""" +1040 33 evaluator """rankbased""" +1040 34 dataset """wn18rr""" +1040 34 model """unstructuredmodel""" +1040 34 loss """nssa""" +1040 34 regularizer """no""" +1040 34 optimizer """adam""" +1040 34 training_loop """owa""" +1040 34 negative_sampler """basic""" +1040 34 evaluator """rankbased""" +1040 35 dataset """wn18rr""" +1040 35 model """unstructuredmodel""" +1040 35 loss """nssa""" +1040 35 regularizer """no""" +1040 35 optimizer """adam""" +1040 35 training_loop """owa""" +1040 35 negative_sampler """basic""" +1040 35 evaluator """rankbased""" +1040 36 dataset """wn18rr""" +1040 36 model """unstructuredmodel""" +1040 36 loss """nssa""" +1040 36 regularizer """no""" +1040 36 optimizer """adam""" +1040 36 training_loop """owa""" +1040 36 negative_sampler """basic""" +1040 36 evaluator """rankbased""" +1040 37 dataset """wn18rr""" +1040 37 model """unstructuredmodel""" +1040 37 loss """nssa""" +1040 37 regularizer """no""" +1040 37 optimizer """adam""" +1040 37 training_loop """owa""" +1040 37 negative_sampler """basic""" +1040 37 evaluator """rankbased""" +1040 38 dataset """wn18rr""" +1040 38 model """unstructuredmodel""" +1040 38 loss """nssa""" +1040 38 regularizer """no""" +1040 38 optimizer """adam""" +1040 38 training_loop """owa""" +1040 38 negative_sampler """basic""" +1040 38 evaluator """rankbased""" +1040 39 dataset """wn18rr""" +1040 39 model """unstructuredmodel""" +1040 39 loss """nssa""" +1040 39 regularizer """no""" +1040 39 optimizer """adam""" +1040 39 training_loop """owa""" +1040 39 negative_sampler """basic""" +1040 39 evaluator """rankbased""" +1040 40 dataset """wn18rr""" +1040 40 model """unstructuredmodel""" +1040 40 loss """nssa""" +1040 40 regularizer """no""" +1040 40 optimizer """adam""" +1040 40 training_loop """owa""" +1040 40 negative_sampler """basic""" +1040 40 evaluator """rankbased""" +1040 41 dataset """wn18rr""" +1040 41 model """unstructuredmodel""" +1040 41 loss """nssa""" +1040 41 regularizer """no""" +1040 41 optimizer """adam""" +1040 41 training_loop """owa""" +1040 41 negative_sampler """basic""" +1040 41 evaluator """rankbased""" +1040 42 dataset """wn18rr""" +1040 42 model """unstructuredmodel""" +1040 42 loss """nssa""" +1040 42 regularizer """no""" +1040 42 optimizer """adam""" +1040 42 training_loop """owa""" +1040 42 negative_sampler """basic""" +1040 42 evaluator """rankbased""" +1040 43 dataset """wn18rr""" +1040 43 model """unstructuredmodel""" +1040 43 loss """nssa""" +1040 43 regularizer """no""" +1040 43 optimizer """adam""" +1040 43 training_loop """owa""" +1040 43 negative_sampler """basic""" +1040 43 evaluator """rankbased""" +1040 44 dataset """wn18rr""" +1040 44 model """unstructuredmodel""" +1040 44 loss """nssa""" +1040 44 regularizer """no""" +1040 44 optimizer """adam""" +1040 44 training_loop """owa""" +1040 44 negative_sampler """basic""" +1040 44 evaluator """rankbased""" +1040 45 dataset """wn18rr""" +1040 45 model """unstructuredmodel""" +1040 45 loss """nssa""" +1040 45 regularizer """no""" +1040 45 optimizer """adam""" +1040 45 training_loop """owa""" +1040 45 negative_sampler """basic""" +1040 45 evaluator """rankbased""" +1040 46 dataset """wn18rr""" +1040 46 model """unstructuredmodel""" +1040 46 loss """nssa""" +1040 46 regularizer """no""" +1040 46 optimizer """adam""" +1040 46 training_loop """owa""" +1040 46 negative_sampler """basic""" +1040 46 evaluator """rankbased""" +1040 47 dataset """wn18rr""" +1040 47 model """unstructuredmodel""" +1040 47 loss """nssa""" +1040 47 regularizer """no""" +1040 47 optimizer """adam""" +1040 47 training_loop """owa""" +1040 47 negative_sampler """basic""" +1040 47 evaluator """rankbased""" +1040 48 dataset """wn18rr""" +1040 48 model """unstructuredmodel""" +1040 48 loss """nssa""" +1040 48 regularizer """no""" +1040 48 optimizer """adam""" +1040 48 training_loop """owa""" +1040 48 negative_sampler """basic""" +1040 48 evaluator """rankbased""" +1040 49 dataset """wn18rr""" +1040 49 model """unstructuredmodel""" +1040 49 loss """nssa""" +1040 49 regularizer """no""" +1040 49 optimizer """adam""" +1040 49 training_loop """owa""" +1040 49 negative_sampler """basic""" +1040 49 evaluator """rankbased""" +1040 50 dataset """wn18rr""" +1040 50 model """unstructuredmodel""" +1040 50 loss """nssa""" +1040 50 regularizer """no""" +1040 50 optimizer """adam""" +1040 50 training_loop """owa""" +1040 50 negative_sampler """basic""" +1040 50 evaluator """rankbased""" +1040 51 dataset """wn18rr""" +1040 51 model """unstructuredmodel""" +1040 51 loss """nssa""" +1040 51 regularizer """no""" +1040 51 optimizer """adam""" +1040 51 training_loop """owa""" +1040 51 negative_sampler """basic""" +1040 51 evaluator """rankbased""" +1040 52 dataset """wn18rr""" +1040 52 model """unstructuredmodel""" +1040 52 loss """nssa""" +1040 52 regularizer """no""" +1040 52 optimizer """adam""" +1040 52 training_loop """owa""" +1040 52 negative_sampler """basic""" +1040 52 evaluator """rankbased""" +1040 53 dataset """wn18rr""" +1040 53 model """unstructuredmodel""" +1040 53 loss """nssa""" +1040 53 regularizer """no""" +1040 53 optimizer """adam""" +1040 53 training_loop """owa""" +1040 53 negative_sampler """basic""" +1040 53 evaluator """rankbased""" +1040 54 dataset """wn18rr""" +1040 54 model """unstructuredmodel""" +1040 54 loss """nssa""" +1040 54 regularizer """no""" +1040 54 optimizer """adam""" +1040 54 training_loop """owa""" +1040 54 negative_sampler """basic""" +1040 54 evaluator """rankbased""" +1040 55 dataset """wn18rr""" +1040 55 model """unstructuredmodel""" +1040 55 loss """nssa""" +1040 55 regularizer """no""" +1040 55 optimizer """adam""" +1040 55 training_loop """owa""" +1040 55 negative_sampler """basic""" +1040 55 evaluator """rankbased""" +1040 56 dataset """wn18rr""" +1040 56 model """unstructuredmodel""" +1040 56 loss """nssa""" +1040 56 regularizer """no""" +1040 56 optimizer """adam""" +1040 56 training_loop """owa""" +1040 56 negative_sampler """basic""" +1040 56 evaluator """rankbased""" +1040 57 dataset """wn18rr""" +1040 57 model """unstructuredmodel""" +1040 57 loss """nssa""" +1040 57 regularizer """no""" +1040 57 optimizer """adam""" +1040 57 training_loop """owa""" +1040 57 negative_sampler """basic""" +1040 57 evaluator """rankbased""" +1040 58 dataset """wn18rr""" +1040 58 model """unstructuredmodel""" +1040 58 loss """nssa""" +1040 58 regularizer """no""" +1040 58 optimizer """adam""" +1040 58 training_loop """owa""" +1040 58 negative_sampler """basic""" +1040 58 evaluator """rankbased""" +1040 59 dataset """wn18rr""" +1040 59 model """unstructuredmodel""" +1040 59 loss """nssa""" +1040 59 regularizer """no""" +1040 59 optimizer """adam""" +1040 59 training_loop """owa""" +1040 59 negative_sampler """basic""" +1040 59 evaluator """rankbased""" +1040 60 dataset """wn18rr""" +1040 60 model """unstructuredmodel""" +1040 60 loss """nssa""" +1040 60 regularizer """no""" +1040 60 optimizer """adam""" +1040 60 training_loop """owa""" +1040 60 negative_sampler """basic""" +1040 60 evaluator """rankbased""" +1040 61 dataset """wn18rr""" +1040 61 model """unstructuredmodel""" +1040 61 loss """nssa""" +1040 61 regularizer """no""" +1040 61 optimizer """adam""" +1040 61 training_loop """owa""" +1040 61 negative_sampler """basic""" +1040 61 evaluator """rankbased""" +1040 62 dataset """wn18rr""" +1040 62 model """unstructuredmodel""" +1040 62 loss """nssa""" +1040 62 regularizer """no""" +1040 62 optimizer """adam""" +1040 62 training_loop """owa""" +1040 62 negative_sampler """basic""" +1040 62 evaluator """rankbased""" +1040 63 dataset """wn18rr""" +1040 63 model """unstructuredmodel""" +1040 63 loss """nssa""" +1040 63 regularizer """no""" +1040 63 optimizer """adam""" +1040 63 training_loop """owa""" +1040 63 negative_sampler """basic""" +1040 63 evaluator """rankbased""" +1040 64 dataset """wn18rr""" +1040 64 model """unstructuredmodel""" +1040 64 loss """nssa""" +1040 64 regularizer """no""" +1040 64 optimizer """adam""" +1040 64 training_loop """owa""" +1040 64 negative_sampler """basic""" +1040 64 evaluator """rankbased""" +1040 65 dataset """wn18rr""" +1040 65 model """unstructuredmodel""" +1040 65 loss """nssa""" +1040 65 regularizer """no""" +1040 65 optimizer """adam""" +1040 65 training_loop """owa""" +1040 65 negative_sampler """basic""" +1040 65 evaluator """rankbased""" +1040 66 dataset """wn18rr""" +1040 66 model """unstructuredmodel""" +1040 66 loss """nssa""" +1040 66 regularizer """no""" +1040 66 optimizer """adam""" +1040 66 training_loop """owa""" +1040 66 negative_sampler """basic""" +1040 66 evaluator """rankbased""" +1040 67 dataset """wn18rr""" +1040 67 model """unstructuredmodel""" +1040 67 loss """nssa""" +1040 67 regularizer """no""" +1040 67 optimizer """adam""" +1040 67 training_loop """owa""" +1040 67 negative_sampler """basic""" +1040 67 evaluator """rankbased""" +1040 68 dataset """wn18rr""" +1040 68 model """unstructuredmodel""" +1040 68 loss """nssa""" +1040 68 regularizer """no""" +1040 68 optimizer """adam""" +1040 68 training_loop """owa""" +1040 68 negative_sampler """basic""" +1040 68 evaluator """rankbased""" +1040 69 dataset """wn18rr""" +1040 69 model """unstructuredmodel""" +1040 69 loss """nssa""" +1040 69 regularizer """no""" +1040 69 optimizer """adam""" +1040 69 training_loop """owa""" +1040 69 negative_sampler """basic""" +1040 69 evaluator """rankbased""" +1040 70 dataset """wn18rr""" +1040 70 model """unstructuredmodel""" +1040 70 loss """nssa""" +1040 70 regularizer """no""" +1040 70 optimizer """adam""" +1040 70 training_loop """owa""" +1040 70 negative_sampler """basic""" +1040 70 evaluator """rankbased""" +1040 71 dataset """wn18rr""" +1040 71 model """unstructuredmodel""" +1040 71 loss """nssa""" +1040 71 regularizer """no""" +1040 71 optimizer """adam""" +1040 71 training_loop """owa""" +1040 71 negative_sampler """basic""" +1040 71 evaluator """rankbased""" +1040 72 dataset """wn18rr""" +1040 72 model """unstructuredmodel""" +1040 72 loss """nssa""" +1040 72 regularizer """no""" +1040 72 optimizer """adam""" +1040 72 training_loop """owa""" +1040 72 negative_sampler """basic""" +1040 72 evaluator """rankbased""" +1040 73 dataset """wn18rr""" +1040 73 model """unstructuredmodel""" +1040 73 loss """nssa""" +1040 73 regularizer """no""" +1040 73 optimizer """adam""" +1040 73 training_loop """owa""" +1040 73 negative_sampler """basic""" +1040 73 evaluator """rankbased""" +1040 74 dataset """wn18rr""" +1040 74 model """unstructuredmodel""" +1040 74 loss """nssa""" +1040 74 regularizer """no""" +1040 74 optimizer """adam""" +1040 74 training_loop """owa""" +1040 74 negative_sampler """basic""" +1040 74 evaluator """rankbased""" +1040 75 dataset """wn18rr""" +1040 75 model """unstructuredmodel""" +1040 75 loss """nssa""" +1040 75 regularizer """no""" +1040 75 optimizer """adam""" +1040 75 training_loop """owa""" +1040 75 negative_sampler """basic""" +1040 75 evaluator """rankbased""" +1040 76 dataset """wn18rr""" +1040 76 model """unstructuredmodel""" +1040 76 loss """nssa""" +1040 76 regularizer """no""" +1040 76 optimizer """adam""" +1040 76 training_loop """owa""" +1040 76 negative_sampler """basic""" +1040 76 evaluator """rankbased""" +1040 77 dataset """wn18rr""" +1040 77 model """unstructuredmodel""" +1040 77 loss """nssa""" +1040 77 regularizer """no""" +1040 77 optimizer """adam""" +1040 77 training_loop """owa""" +1040 77 negative_sampler """basic""" +1040 77 evaluator """rankbased""" +1040 78 dataset """wn18rr""" +1040 78 model """unstructuredmodel""" +1040 78 loss """nssa""" +1040 78 regularizer """no""" +1040 78 optimizer """adam""" +1040 78 training_loop """owa""" +1040 78 negative_sampler """basic""" +1040 78 evaluator """rankbased""" +1040 79 dataset """wn18rr""" +1040 79 model """unstructuredmodel""" +1040 79 loss """nssa""" +1040 79 regularizer """no""" +1040 79 optimizer """adam""" +1040 79 training_loop """owa""" +1040 79 negative_sampler """basic""" +1040 79 evaluator """rankbased""" +1040 80 dataset """wn18rr""" +1040 80 model """unstructuredmodel""" +1040 80 loss """nssa""" +1040 80 regularizer """no""" +1040 80 optimizer """adam""" +1040 80 training_loop """owa""" +1040 80 negative_sampler """basic""" +1040 80 evaluator """rankbased""" +1040 81 dataset """wn18rr""" +1040 81 model """unstructuredmodel""" +1040 81 loss """nssa""" +1040 81 regularizer """no""" +1040 81 optimizer """adam""" +1040 81 training_loop """owa""" +1040 81 negative_sampler """basic""" +1040 81 evaluator """rankbased""" +1040 82 dataset """wn18rr""" +1040 82 model """unstructuredmodel""" +1040 82 loss """nssa""" +1040 82 regularizer """no""" +1040 82 optimizer """adam""" +1040 82 training_loop """owa""" +1040 82 negative_sampler """basic""" +1040 82 evaluator """rankbased""" +1040 83 dataset """wn18rr""" +1040 83 model """unstructuredmodel""" +1040 83 loss """nssa""" +1040 83 regularizer """no""" +1040 83 optimizer """adam""" +1040 83 training_loop """owa""" +1040 83 negative_sampler """basic""" +1040 83 evaluator """rankbased""" +1040 84 dataset """wn18rr""" +1040 84 model """unstructuredmodel""" +1040 84 loss """nssa""" +1040 84 regularizer """no""" +1040 84 optimizer """adam""" +1040 84 training_loop """owa""" +1040 84 negative_sampler """basic""" +1040 84 evaluator """rankbased""" +1040 85 dataset """wn18rr""" +1040 85 model """unstructuredmodel""" +1040 85 loss """nssa""" +1040 85 regularizer """no""" +1040 85 optimizer """adam""" +1040 85 training_loop """owa""" +1040 85 negative_sampler """basic""" +1040 85 evaluator """rankbased""" +1040 86 dataset """wn18rr""" +1040 86 model """unstructuredmodel""" +1040 86 loss """nssa""" +1040 86 regularizer """no""" +1040 86 optimizer """adam""" +1040 86 training_loop """owa""" +1040 86 negative_sampler """basic""" +1040 86 evaluator """rankbased""" +1040 87 dataset """wn18rr""" +1040 87 model """unstructuredmodel""" +1040 87 loss """nssa""" +1040 87 regularizer """no""" +1040 87 optimizer """adam""" +1040 87 training_loop """owa""" +1040 87 negative_sampler """basic""" +1040 87 evaluator """rankbased""" +1040 88 dataset """wn18rr""" +1040 88 model """unstructuredmodel""" +1040 88 loss """nssa""" +1040 88 regularizer """no""" +1040 88 optimizer """adam""" +1040 88 training_loop """owa""" +1040 88 negative_sampler """basic""" +1040 88 evaluator """rankbased""" +1040 89 dataset """wn18rr""" +1040 89 model """unstructuredmodel""" +1040 89 loss """nssa""" +1040 89 regularizer """no""" +1040 89 optimizer """adam""" +1040 89 training_loop """owa""" +1040 89 negative_sampler """basic""" +1040 89 evaluator """rankbased""" +1040 90 dataset """wn18rr""" +1040 90 model """unstructuredmodel""" +1040 90 loss """nssa""" +1040 90 regularizer """no""" +1040 90 optimizer """adam""" +1040 90 training_loop """owa""" +1040 90 negative_sampler """basic""" +1040 90 evaluator """rankbased""" +1040 91 dataset """wn18rr""" +1040 91 model """unstructuredmodel""" +1040 91 loss """nssa""" +1040 91 regularizer """no""" +1040 91 optimizer """adam""" +1040 91 training_loop """owa""" +1040 91 negative_sampler """basic""" +1040 91 evaluator """rankbased""" +1040 92 dataset """wn18rr""" +1040 92 model """unstructuredmodel""" +1040 92 loss """nssa""" +1040 92 regularizer """no""" +1040 92 optimizer """adam""" +1040 92 training_loop """owa""" +1040 92 negative_sampler """basic""" +1040 92 evaluator """rankbased""" +1040 93 dataset """wn18rr""" +1040 93 model """unstructuredmodel""" +1040 93 loss """nssa""" +1040 93 regularizer """no""" +1040 93 optimizer """adam""" +1040 93 training_loop """owa""" +1040 93 negative_sampler """basic""" +1040 93 evaluator """rankbased""" +1040 94 dataset """wn18rr""" +1040 94 model """unstructuredmodel""" +1040 94 loss """nssa""" +1040 94 regularizer """no""" +1040 94 optimizer """adam""" +1040 94 training_loop """owa""" +1040 94 negative_sampler """basic""" +1040 94 evaluator """rankbased""" +1040 95 dataset """wn18rr""" +1040 95 model """unstructuredmodel""" +1040 95 loss """nssa""" +1040 95 regularizer """no""" +1040 95 optimizer """adam""" +1040 95 training_loop """owa""" +1040 95 negative_sampler """basic""" +1040 95 evaluator """rankbased""" +1040 96 dataset """wn18rr""" +1040 96 model """unstructuredmodel""" +1040 96 loss """nssa""" +1040 96 regularizer """no""" +1040 96 optimizer """adam""" +1040 96 training_loop """owa""" +1040 96 negative_sampler """basic""" +1040 96 evaluator """rankbased""" +1040 97 dataset """wn18rr""" +1040 97 model """unstructuredmodel""" +1040 97 loss """nssa""" +1040 97 regularizer """no""" +1040 97 optimizer """adam""" +1040 97 training_loop """owa""" +1040 97 negative_sampler """basic""" +1040 97 evaluator """rankbased""" +1040 98 dataset """wn18rr""" +1040 98 model """unstructuredmodel""" +1040 98 loss """nssa""" +1040 98 regularizer """no""" +1040 98 optimizer """adam""" +1040 98 training_loop """owa""" +1040 98 negative_sampler """basic""" +1040 98 evaluator """rankbased""" +1040 99 dataset """wn18rr""" +1040 99 model """unstructuredmodel""" +1040 99 loss """nssa""" +1040 99 regularizer """no""" +1040 99 optimizer """adam""" +1040 99 training_loop """owa""" +1040 99 negative_sampler """basic""" +1040 99 evaluator """rankbased""" +1040 100 dataset """wn18rr""" +1040 100 model """unstructuredmodel""" +1040 100 loss """nssa""" +1040 100 regularizer """no""" +1040 100 optimizer """adam""" +1040 100 training_loop """owa""" +1040 100 negative_sampler """basic""" +1040 100 evaluator """rankbased""" +1041 1 model.embedding_dim 2.0 +1041 1 model.scoring_fct_norm 1.0 +1041 1 loss.margin 29.013624232844585 +1041 1 loss.adversarial_temperature 0.8368012175453701 +1041 1 optimizer.lr 0.001140003689606425 +1041 1 negative_sampler.num_negs_per_pos 44.0 +1041 1 training.batch_size 2.0 +1041 2 model.embedding_dim 0.0 +1041 2 model.scoring_fct_norm 2.0 +1041 2 loss.margin 17.51155476639437 +1041 2 loss.adversarial_temperature 0.8471549935324236 +1041 2 optimizer.lr 0.0026142757807019125 +1041 2 negative_sampler.num_negs_per_pos 30.0 +1041 2 training.batch_size 2.0 +1041 3 model.embedding_dim 0.0 +1041 3 model.scoring_fct_norm 1.0 +1041 3 loss.margin 2.886637227527364 +1041 3 loss.adversarial_temperature 0.3698034766542894 +1041 3 optimizer.lr 0.08484753114457586 +1041 3 negative_sampler.num_negs_per_pos 45.0 +1041 3 training.batch_size 2.0 +1041 4 model.embedding_dim 1.0 +1041 4 model.scoring_fct_norm 1.0 +1041 4 loss.margin 13.369158493801677 +1041 4 loss.adversarial_temperature 0.11040331187572885 +1041 4 optimizer.lr 0.006201720714341969 +1041 4 negative_sampler.num_negs_per_pos 12.0 +1041 4 training.batch_size 0.0 +1041 5 model.embedding_dim 2.0 +1041 5 model.scoring_fct_norm 1.0 +1041 5 loss.margin 4.140186113734569 +1041 5 loss.adversarial_temperature 0.41783541167045263 +1041 5 optimizer.lr 0.0049787449295255705 +1041 5 negative_sampler.num_negs_per_pos 58.0 +1041 5 training.batch_size 1.0 +1041 6 model.embedding_dim 0.0 +1041 6 model.scoring_fct_norm 2.0 +1041 6 loss.margin 28.60521896648744 +1041 6 loss.adversarial_temperature 0.3609107549429655 +1041 6 optimizer.lr 0.03324815206255882 +1041 6 negative_sampler.num_negs_per_pos 1.0 +1041 6 training.batch_size 0.0 +1041 7 model.embedding_dim 2.0 +1041 7 model.scoring_fct_norm 1.0 +1041 7 loss.margin 25.7345485585101 +1041 7 loss.adversarial_temperature 0.7199998583970022 +1041 7 optimizer.lr 0.006648041904253918 +1041 7 negative_sampler.num_negs_per_pos 98.0 +1041 7 training.batch_size 0.0 +1041 8 model.embedding_dim 2.0 +1041 8 model.scoring_fct_norm 1.0 +1041 8 loss.margin 4.6785509920619655 +1041 8 loss.adversarial_temperature 0.39046386528206084 +1041 8 optimizer.lr 0.0210524577962977 +1041 8 negative_sampler.num_negs_per_pos 44.0 +1041 8 training.batch_size 0.0 +1041 9 model.embedding_dim 2.0 +1041 9 model.scoring_fct_norm 1.0 +1041 9 loss.margin 20.36358506742979 +1041 9 loss.adversarial_temperature 0.7920968477068342 +1041 9 optimizer.lr 0.005193521204137711 +1041 9 negative_sampler.num_negs_per_pos 77.0 +1041 9 training.batch_size 0.0 +1041 10 model.embedding_dim 1.0 +1041 10 model.scoring_fct_norm 2.0 +1041 10 loss.margin 19.42149870195891 +1041 10 loss.adversarial_temperature 0.7349287069655059 +1041 10 optimizer.lr 0.0031411147796046204 +1041 10 negative_sampler.num_negs_per_pos 59.0 +1041 10 training.batch_size 2.0 +1041 11 model.embedding_dim 2.0 +1041 11 model.scoring_fct_norm 1.0 +1041 11 loss.margin 2.7349476092306793 +1041 11 loss.adversarial_temperature 0.5564407229108113 +1041 11 optimizer.lr 0.09543941398410989 +1041 11 negative_sampler.num_negs_per_pos 60.0 +1041 11 training.batch_size 2.0 +1041 12 model.embedding_dim 0.0 +1041 12 model.scoring_fct_norm 2.0 +1041 12 loss.margin 28.895694080158535 +1041 12 loss.adversarial_temperature 0.6859917122867534 +1041 12 optimizer.lr 0.03265584973092788 +1041 12 negative_sampler.num_negs_per_pos 20.0 +1041 12 training.batch_size 1.0 +1041 13 model.embedding_dim 0.0 +1041 13 model.scoring_fct_norm 2.0 +1041 13 loss.margin 22.2413598453498 +1041 13 loss.adversarial_temperature 0.4594916513812584 +1041 13 optimizer.lr 0.09451870496194922 +1041 13 negative_sampler.num_negs_per_pos 47.0 +1041 13 training.batch_size 1.0 +1041 14 model.embedding_dim 1.0 +1041 14 model.scoring_fct_norm 2.0 +1041 14 loss.margin 28.32464564564673 +1041 14 loss.adversarial_temperature 0.3454205901661842 +1041 14 optimizer.lr 0.016449494238541414 +1041 14 negative_sampler.num_negs_per_pos 25.0 +1041 14 training.batch_size 0.0 +1041 15 model.embedding_dim 2.0 +1041 15 model.scoring_fct_norm 2.0 +1041 15 loss.margin 24.80732870218814 +1041 15 loss.adversarial_temperature 0.6426340906713072 +1041 15 optimizer.lr 0.02170563378208567 +1041 15 negative_sampler.num_negs_per_pos 97.0 +1041 15 training.batch_size 2.0 +1041 16 model.embedding_dim 1.0 +1041 16 model.scoring_fct_norm 1.0 +1041 16 loss.margin 10.56813035531896 +1041 16 loss.adversarial_temperature 0.8234196947747738 +1041 16 optimizer.lr 0.0018510303213914456 +1041 16 negative_sampler.num_negs_per_pos 71.0 +1041 16 training.batch_size 1.0 +1041 17 model.embedding_dim 0.0 +1041 17 model.scoring_fct_norm 2.0 +1041 17 loss.margin 5.688853185986598 +1041 17 loss.adversarial_temperature 0.604910567787556 +1041 17 optimizer.lr 0.0516484068461828 +1041 17 negative_sampler.num_negs_per_pos 63.0 +1041 17 training.batch_size 1.0 +1041 18 model.embedding_dim 2.0 +1041 18 model.scoring_fct_norm 1.0 +1041 18 loss.margin 5.128916378388409 +1041 18 loss.adversarial_temperature 0.21471749016923755 +1041 18 optimizer.lr 0.001169299818771451 +1041 18 negative_sampler.num_negs_per_pos 26.0 +1041 18 training.batch_size 1.0 +1041 19 model.embedding_dim 2.0 +1041 19 model.scoring_fct_norm 2.0 +1041 19 loss.margin 27.462051970411643 +1041 19 loss.adversarial_temperature 0.6423170475143446 +1041 19 optimizer.lr 0.02302822833733389 +1041 19 negative_sampler.num_negs_per_pos 10.0 +1041 19 training.batch_size 0.0 +1041 20 model.embedding_dim 1.0 +1041 20 model.scoring_fct_norm 1.0 +1041 20 loss.margin 22.729589284616317 +1041 20 loss.adversarial_temperature 0.5244142101049472 +1041 20 optimizer.lr 0.002730001179953096 +1041 20 negative_sampler.num_negs_per_pos 62.0 +1041 20 training.batch_size 1.0 +1041 21 model.embedding_dim 0.0 +1041 21 model.scoring_fct_norm 2.0 +1041 21 loss.margin 13.654710294192181 +1041 21 loss.adversarial_temperature 0.25764839315382204 +1041 21 optimizer.lr 0.036641549646616436 +1041 21 negative_sampler.num_negs_per_pos 80.0 +1041 21 training.batch_size 1.0 +1041 22 model.embedding_dim 1.0 +1041 22 model.scoring_fct_norm 1.0 +1041 22 loss.margin 23.689148380740484 +1041 22 loss.adversarial_temperature 0.47207232485400236 +1041 22 optimizer.lr 0.0017254957894686132 +1041 22 negative_sampler.num_negs_per_pos 25.0 +1041 22 training.batch_size 2.0 +1041 23 model.embedding_dim 0.0 +1041 23 model.scoring_fct_norm 2.0 +1041 23 loss.margin 29.37699482561671 +1041 23 loss.adversarial_temperature 0.3987988735528531 +1041 23 optimizer.lr 0.014289849956548509 +1041 23 negative_sampler.num_negs_per_pos 20.0 +1041 23 training.batch_size 1.0 +1041 24 model.embedding_dim 1.0 +1041 24 model.scoring_fct_norm 2.0 +1041 24 loss.margin 3.7705993768736095 +1041 24 loss.adversarial_temperature 0.2926643367777228 +1041 24 optimizer.lr 0.012743322227749031 +1041 24 negative_sampler.num_negs_per_pos 41.0 +1041 24 training.batch_size 2.0 +1041 25 model.embedding_dim 2.0 +1041 25 model.scoring_fct_norm 2.0 +1041 25 loss.margin 9.490257673999997 +1041 25 loss.adversarial_temperature 0.760826555868025 +1041 25 optimizer.lr 0.040946580011816544 +1041 25 negative_sampler.num_negs_per_pos 64.0 +1041 25 training.batch_size 0.0 +1041 26 model.embedding_dim 0.0 +1041 26 model.scoring_fct_norm 2.0 +1041 26 loss.margin 9.254862765286923 +1041 26 loss.adversarial_temperature 0.7844464116497202 +1041 26 optimizer.lr 0.004113192260451698 +1041 26 negative_sampler.num_negs_per_pos 65.0 +1041 26 training.batch_size 0.0 +1041 27 model.embedding_dim 1.0 +1041 27 model.scoring_fct_norm 1.0 +1041 27 loss.margin 20.021554039640186 +1041 27 loss.adversarial_temperature 0.2172484729232464 +1041 27 optimizer.lr 0.06268418778824338 +1041 27 negative_sampler.num_negs_per_pos 55.0 +1041 27 training.batch_size 2.0 +1041 28 model.embedding_dim 0.0 +1041 28 model.scoring_fct_norm 2.0 +1041 28 loss.margin 14.084243356343638 +1041 28 loss.adversarial_temperature 0.5209531539983113 +1041 28 optimizer.lr 0.012270250857629067 +1041 28 negative_sampler.num_negs_per_pos 22.0 +1041 28 training.batch_size 1.0 +1041 29 model.embedding_dim 1.0 +1041 29 model.scoring_fct_norm 1.0 +1041 29 loss.margin 19.499635329356323 +1041 29 loss.adversarial_temperature 0.6924435219263917 +1041 29 optimizer.lr 0.001400108224406494 +1041 29 negative_sampler.num_negs_per_pos 60.0 +1041 29 training.batch_size 2.0 +1041 30 model.embedding_dim 2.0 +1041 30 model.scoring_fct_norm 1.0 +1041 30 loss.margin 22.467117893089963 +1041 30 loss.adversarial_temperature 0.687188895791621 +1041 30 optimizer.lr 0.01300682225405279 +1041 30 negative_sampler.num_negs_per_pos 91.0 +1041 30 training.batch_size 0.0 +1041 31 model.embedding_dim 0.0 +1041 31 model.scoring_fct_norm 1.0 +1041 31 loss.margin 16.522098607274923 +1041 31 loss.adversarial_temperature 0.6053015106497178 +1041 31 optimizer.lr 0.014270729085194115 +1041 31 negative_sampler.num_negs_per_pos 37.0 +1041 31 training.batch_size 2.0 +1041 32 model.embedding_dim 2.0 +1041 32 model.scoring_fct_norm 2.0 +1041 32 loss.margin 23.425360113806388 +1041 32 loss.adversarial_temperature 0.4281448983699294 +1041 32 optimizer.lr 0.004018767440215793 +1041 32 negative_sampler.num_negs_per_pos 37.0 +1041 32 training.batch_size 0.0 +1041 33 model.embedding_dim 2.0 +1041 33 model.scoring_fct_norm 2.0 +1041 33 loss.margin 10.694772469684313 +1041 33 loss.adversarial_temperature 0.16709096358168612 +1041 33 optimizer.lr 0.009216034932093919 +1041 33 negative_sampler.num_negs_per_pos 55.0 +1041 33 training.batch_size 2.0 +1041 34 model.embedding_dim 0.0 +1041 34 model.scoring_fct_norm 2.0 +1041 34 loss.margin 7.767059487248437 +1041 34 loss.adversarial_temperature 0.5119079382794892 +1041 34 optimizer.lr 0.02192038044442275 +1041 34 negative_sampler.num_negs_per_pos 8.0 +1041 34 training.batch_size 2.0 +1041 35 model.embedding_dim 2.0 +1041 35 model.scoring_fct_norm 1.0 +1041 35 loss.margin 24.028637246574004 +1041 35 loss.adversarial_temperature 0.5544487746550102 +1041 35 optimizer.lr 0.010191944109169331 +1041 35 negative_sampler.num_negs_per_pos 82.0 +1041 35 training.batch_size 1.0 +1041 36 model.embedding_dim 2.0 +1041 36 model.scoring_fct_norm 2.0 +1041 36 loss.margin 23.388795245833318 +1041 36 loss.adversarial_temperature 0.30708983382046895 +1041 36 optimizer.lr 0.0032639419909413713 +1041 36 negative_sampler.num_negs_per_pos 24.0 +1041 36 training.batch_size 1.0 +1041 37 model.embedding_dim 1.0 +1041 37 model.scoring_fct_norm 2.0 +1041 37 loss.margin 11.00057255753444 +1041 37 loss.adversarial_temperature 0.47600269264094364 +1041 37 optimizer.lr 0.08217823990800384 +1041 37 negative_sampler.num_negs_per_pos 73.0 +1041 37 training.batch_size 0.0 +1041 38 model.embedding_dim 2.0 +1041 38 model.scoring_fct_norm 1.0 +1041 38 loss.margin 4.723525496883752 +1041 38 loss.adversarial_temperature 0.8203155889870469 +1041 38 optimizer.lr 0.0011650283906191746 +1041 38 negative_sampler.num_negs_per_pos 28.0 +1041 38 training.batch_size 1.0 +1041 39 model.embedding_dim 0.0 +1041 39 model.scoring_fct_norm 2.0 +1041 39 loss.margin 27.377224891700262 +1041 39 loss.adversarial_temperature 0.5594019349036534 +1041 39 optimizer.lr 0.07262300183365311 +1041 39 negative_sampler.num_negs_per_pos 81.0 +1041 39 training.batch_size 2.0 +1041 40 model.embedding_dim 0.0 +1041 40 model.scoring_fct_norm 2.0 +1041 40 loss.margin 16.320963752939196 +1041 40 loss.adversarial_temperature 0.8426721438326291 +1041 40 optimizer.lr 0.005531066938907557 +1041 40 negative_sampler.num_negs_per_pos 33.0 +1041 40 training.batch_size 1.0 +1041 41 model.embedding_dim 0.0 +1041 41 model.scoring_fct_norm 2.0 +1041 41 loss.margin 16.68311822984894 +1041 41 loss.adversarial_temperature 0.8446705519416663 +1041 41 optimizer.lr 0.00301676072000258 +1041 41 negative_sampler.num_negs_per_pos 87.0 +1041 41 training.batch_size 1.0 +1041 42 model.embedding_dim 0.0 +1041 42 model.scoring_fct_norm 1.0 +1041 42 loss.margin 2.1757894393707757 +1041 42 loss.adversarial_temperature 0.7667066404411852 +1041 42 optimizer.lr 0.016267835094201065 +1041 42 negative_sampler.num_negs_per_pos 60.0 +1041 42 training.batch_size 1.0 +1041 43 model.embedding_dim 1.0 +1041 43 model.scoring_fct_norm 1.0 +1041 43 loss.margin 18.302794050207687 +1041 43 loss.adversarial_temperature 0.8721145673569979 +1041 43 optimizer.lr 0.00166436710205776 +1041 43 negative_sampler.num_negs_per_pos 76.0 +1041 43 training.batch_size 1.0 +1041 44 model.embedding_dim 2.0 +1041 44 model.scoring_fct_norm 1.0 +1041 44 loss.margin 24.90117093648055 +1041 44 loss.adversarial_temperature 0.541242908925168 +1041 44 optimizer.lr 0.008783006653679398 +1041 44 negative_sampler.num_negs_per_pos 43.0 +1041 44 training.batch_size 2.0 +1041 45 model.embedding_dim 1.0 +1041 45 model.scoring_fct_norm 1.0 +1041 45 loss.margin 25.10522819099437 +1041 45 loss.adversarial_temperature 0.7024478643393803 +1041 45 optimizer.lr 0.002030975441204859 +1041 45 negative_sampler.num_negs_per_pos 28.0 +1041 45 training.batch_size 0.0 +1041 46 model.embedding_dim 2.0 +1041 46 model.scoring_fct_norm 1.0 +1041 46 loss.margin 27.47024831985202 +1041 46 loss.adversarial_temperature 0.9225967142394007 +1041 46 optimizer.lr 0.0018531330782257016 +1041 46 negative_sampler.num_negs_per_pos 19.0 +1041 46 training.batch_size 0.0 +1041 47 model.embedding_dim 2.0 +1041 47 model.scoring_fct_norm 2.0 +1041 47 loss.margin 21.674609826033045 +1041 47 loss.adversarial_temperature 0.66399907634355 +1041 47 optimizer.lr 0.08772082821698961 +1041 47 negative_sampler.num_negs_per_pos 44.0 +1041 47 training.batch_size 2.0 +1041 48 model.embedding_dim 2.0 +1041 48 model.scoring_fct_norm 1.0 +1041 48 loss.margin 20.111721894147156 +1041 48 loss.adversarial_temperature 0.8162435770506014 +1041 48 optimizer.lr 0.0012061981890102583 +1041 48 negative_sampler.num_negs_per_pos 98.0 +1041 48 training.batch_size 0.0 +1041 49 model.embedding_dim 2.0 +1041 49 model.scoring_fct_norm 2.0 +1041 49 loss.margin 23.864646103508605 +1041 49 loss.adversarial_temperature 0.12289620619685361 +1041 49 optimizer.lr 0.009953292737168216 +1041 49 negative_sampler.num_negs_per_pos 4.0 +1041 49 training.batch_size 1.0 +1041 50 model.embedding_dim 0.0 +1041 50 model.scoring_fct_norm 2.0 +1041 50 loss.margin 21.8421333356317 +1041 50 loss.adversarial_temperature 0.892439919788416 +1041 50 optimizer.lr 0.044386309318589175 +1041 50 negative_sampler.num_negs_per_pos 3.0 +1041 50 training.batch_size 1.0 +1041 51 model.embedding_dim 0.0 +1041 51 model.scoring_fct_norm 2.0 +1041 51 loss.margin 16.67453365825878 +1041 51 loss.adversarial_temperature 0.6527818263886966 +1041 51 optimizer.lr 0.015098735326216861 +1041 51 negative_sampler.num_negs_per_pos 39.0 +1041 51 training.batch_size 0.0 +1041 52 model.embedding_dim 1.0 +1041 52 model.scoring_fct_norm 2.0 +1041 52 loss.margin 17.340431562472133 +1041 52 loss.adversarial_temperature 0.5871767477831522 +1041 52 optimizer.lr 0.029860284780575157 +1041 52 negative_sampler.num_negs_per_pos 5.0 +1041 52 training.batch_size 0.0 +1041 53 model.embedding_dim 0.0 +1041 53 model.scoring_fct_norm 1.0 +1041 53 loss.margin 21.949818353112633 +1041 53 loss.adversarial_temperature 0.18765247609368227 +1041 53 optimizer.lr 0.004089418602147943 +1041 53 negative_sampler.num_negs_per_pos 45.0 +1041 53 training.batch_size 1.0 +1041 54 model.embedding_dim 2.0 +1041 54 model.scoring_fct_norm 2.0 +1041 54 loss.margin 3.018683581000672 +1041 54 loss.adversarial_temperature 0.44790355228470047 +1041 54 optimizer.lr 0.08333130707319858 +1041 54 negative_sampler.num_negs_per_pos 42.0 +1041 54 training.batch_size 0.0 +1041 55 model.embedding_dim 2.0 +1041 55 model.scoring_fct_norm 1.0 +1041 55 loss.margin 13.184822079500888 +1041 55 loss.adversarial_temperature 0.5244206389552881 +1041 55 optimizer.lr 0.007159817696070097 +1041 55 negative_sampler.num_negs_per_pos 79.0 +1041 55 training.batch_size 1.0 +1041 56 model.embedding_dim 0.0 +1041 56 model.scoring_fct_norm 2.0 +1041 56 loss.margin 1.7355614258836116 +1041 56 loss.adversarial_temperature 0.9763506886148506 +1041 56 optimizer.lr 0.059808321919932385 +1041 56 negative_sampler.num_negs_per_pos 3.0 +1041 56 training.batch_size 0.0 +1041 57 model.embedding_dim 0.0 +1041 57 model.scoring_fct_norm 1.0 +1041 57 loss.margin 2.4267591977755827 +1041 57 loss.adversarial_temperature 0.5567819180146665 +1041 57 optimizer.lr 0.0029335114278230988 +1041 57 negative_sampler.num_negs_per_pos 46.0 +1041 57 training.batch_size 2.0 +1041 58 model.embedding_dim 2.0 +1041 58 model.scoring_fct_norm 2.0 +1041 58 loss.margin 27.625086467677868 +1041 58 loss.adversarial_temperature 0.4572208735009143 +1041 58 optimizer.lr 0.029699633717393502 +1041 58 negative_sampler.num_negs_per_pos 48.0 +1041 58 training.batch_size 1.0 +1041 59 model.embedding_dim 2.0 +1041 59 model.scoring_fct_norm 1.0 +1041 59 loss.margin 22.19520825813296 +1041 59 loss.adversarial_temperature 0.8927633387705328 +1041 59 optimizer.lr 0.06461435456264704 +1041 59 negative_sampler.num_negs_per_pos 81.0 +1041 59 training.batch_size 2.0 +1041 60 model.embedding_dim 0.0 +1041 60 model.scoring_fct_norm 1.0 +1041 60 loss.margin 4.008891364785188 +1041 60 loss.adversarial_temperature 0.34383539268174 +1041 60 optimizer.lr 0.001794324464685251 +1041 60 negative_sampler.num_negs_per_pos 15.0 +1041 60 training.batch_size 1.0 +1041 61 model.embedding_dim 0.0 +1041 61 model.scoring_fct_norm 1.0 +1041 61 loss.margin 11.321817905329187 +1041 61 loss.adversarial_temperature 0.5036691942816625 +1041 61 optimizer.lr 0.05797589168796162 +1041 61 negative_sampler.num_negs_per_pos 40.0 +1041 61 training.batch_size 1.0 +1041 62 model.embedding_dim 2.0 +1041 62 model.scoring_fct_norm 1.0 +1041 62 loss.margin 12.690604075637813 +1041 62 loss.adversarial_temperature 0.44405010736660505 +1041 62 optimizer.lr 0.012568402451142525 +1041 62 negative_sampler.num_negs_per_pos 98.0 +1041 62 training.batch_size 2.0 +1041 63 model.embedding_dim 1.0 +1041 63 model.scoring_fct_norm 1.0 +1041 63 loss.margin 13.122399404367666 +1041 63 loss.adversarial_temperature 0.804160564896351 +1041 63 optimizer.lr 0.009394946995738273 +1041 63 negative_sampler.num_negs_per_pos 19.0 +1041 63 training.batch_size 2.0 +1041 64 model.embedding_dim 0.0 +1041 64 model.scoring_fct_norm 2.0 +1041 64 loss.margin 8.396395832095735 +1041 64 loss.adversarial_temperature 0.2308283562907131 +1041 64 optimizer.lr 0.01187050806017337 +1041 64 negative_sampler.num_negs_per_pos 79.0 +1041 64 training.batch_size 2.0 +1041 65 model.embedding_dim 1.0 +1041 65 model.scoring_fct_norm 1.0 +1041 65 loss.margin 5.206585039063111 +1041 65 loss.adversarial_temperature 0.7075894803770513 +1041 65 optimizer.lr 0.016828895930872025 +1041 65 negative_sampler.num_negs_per_pos 20.0 +1041 65 training.batch_size 2.0 +1041 66 model.embedding_dim 0.0 +1041 66 model.scoring_fct_norm 2.0 +1041 66 loss.margin 11.49688305559643 +1041 66 loss.adversarial_temperature 0.24780145540290066 +1041 66 optimizer.lr 0.07094336260930796 +1041 66 negative_sampler.num_negs_per_pos 32.0 +1041 66 training.batch_size 1.0 +1041 67 model.embedding_dim 2.0 +1041 67 model.scoring_fct_norm 2.0 +1041 67 loss.margin 18.578159003438888 +1041 67 loss.adversarial_temperature 0.8055738606285433 +1041 67 optimizer.lr 0.043291694403987994 +1041 67 negative_sampler.num_negs_per_pos 64.0 +1041 67 training.batch_size 2.0 +1041 68 model.embedding_dim 2.0 +1041 68 model.scoring_fct_norm 1.0 +1041 68 loss.margin 7.642250879431746 +1041 68 loss.adversarial_temperature 0.5488259628832861 +1041 68 optimizer.lr 0.0015445894965679867 +1041 68 negative_sampler.num_negs_per_pos 20.0 +1041 68 training.batch_size 0.0 +1041 69 model.embedding_dim 0.0 +1041 69 model.scoring_fct_norm 2.0 +1041 69 loss.margin 3.369403067902459 +1041 69 loss.adversarial_temperature 0.5612601804378408 +1041 69 optimizer.lr 0.0015150758457689142 +1041 69 negative_sampler.num_negs_per_pos 41.0 +1041 69 training.batch_size 0.0 +1041 70 model.embedding_dim 1.0 +1041 70 model.scoring_fct_norm 2.0 +1041 70 loss.margin 5.119816483896115 +1041 70 loss.adversarial_temperature 0.5392968995352052 +1041 70 optimizer.lr 0.04258488470028106 +1041 70 negative_sampler.num_negs_per_pos 89.0 +1041 70 training.batch_size 1.0 +1041 71 model.embedding_dim 0.0 +1041 71 model.scoring_fct_norm 1.0 +1041 71 loss.margin 4.133388667971723 +1041 71 loss.adversarial_temperature 0.7798823681153644 +1041 71 optimizer.lr 0.005131536423197126 +1041 71 negative_sampler.num_negs_per_pos 85.0 +1041 71 training.batch_size 2.0 +1041 72 model.embedding_dim 2.0 +1041 72 model.scoring_fct_norm 2.0 +1041 72 loss.margin 12.719045111622817 +1041 72 loss.adversarial_temperature 0.6355903444193001 +1041 72 optimizer.lr 0.004609988124772361 +1041 72 negative_sampler.num_negs_per_pos 13.0 +1041 72 training.batch_size 2.0 +1041 73 model.embedding_dim 2.0 +1041 73 model.scoring_fct_norm 2.0 +1041 73 loss.margin 29.111331845448746 +1041 73 loss.adversarial_temperature 0.7123689485901812 +1041 73 optimizer.lr 0.06830048831740533 +1041 73 negative_sampler.num_negs_per_pos 12.0 +1041 73 training.batch_size 0.0 +1041 74 model.embedding_dim 0.0 +1041 74 model.scoring_fct_norm 2.0 +1041 74 loss.margin 28.63164655326026 +1041 74 loss.adversarial_temperature 0.8977005472351652 +1041 74 optimizer.lr 0.022667207092457297 +1041 74 negative_sampler.num_negs_per_pos 74.0 +1041 74 training.batch_size 1.0 +1041 75 model.embedding_dim 0.0 +1041 75 model.scoring_fct_norm 2.0 +1041 75 loss.margin 26.67788173961228 +1041 75 loss.adversarial_temperature 0.7628459113288877 +1041 75 optimizer.lr 0.003933907241702587 +1041 75 negative_sampler.num_negs_per_pos 89.0 +1041 75 training.batch_size 0.0 +1041 76 model.embedding_dim 1.0 +1041 76 model.scoring_fct_norm 1.0 +1041 76 loss.margin 2.770803669703811 +1041 76 loss.adversarial_temperature 0.3038468415703656 +1041 76 optimizer.lr 0.0024951587309229527 +1041 76 negative_sampler.num_negs_per_pos 74.0 +1041 76 training.batch_size 1.0 +1041 77 model.embedding_dim 2.0 +1041 77 model.scoring_fct_norm 1.0 +1041 77 loss.margin 28.7997182982017 +1041 77 loss.adversarial_temperature 0.8737248414806575 +1041 77 optimizer.lr 0.001337593897257231 +1041 77 negative_sampler.num_negs_per_pos 20.0 +1041 77 training.batch_size 2.0 +1041 78 model.embedding_dim 1.0 +1041 78 model.scoring_fct_norm 1.0 +1041 78 loss.margin 17.458623636871348 +1041 78 loss.adversarial_temperature 0.16494135764942977 +1041 78 optimizer.lr 0.0029383384598212726 +1041 78 negative_sampler.num_negs_per_pos 37.0 +1041 78 training.batch_size 1.0 +1041 79 model.embedding_dim 1.0 +1041 79 model.scoring_fct_norm 1.0 +1041 79 loss.margin 24.442172067308654 +1041 79 loss.adversarial_temperature 0.767176194008581 +1041 79 optimizer.lr 0.0012262880881990347 +1041 79 negative_sampler.num_negs_per_pos 90.0 +1041 79 training.batch_size 1.0 +1041 80 model.embedding_dim 0.0 +1041 80 model.scoring_fct_norm 2.0 +1041 80 loss.margin 8.136926644083715 +1041 80 loss.adversarial_temperature 0.8009610268220162 +1041 80 optimizer.lr 0.013208281895029482 +1041 80 negative_sampler.num_negs_per_pos 31.0 +1041 80 training.batch_size 1.0 +1041 81 model.embedding_dim 2.0 +1041 81 model.scoring_fct_norm 2.0 +1041 81 loss.margin 11.798100496753197 +1041 81 loss.adversarial_temperature 0.837270733435768 +1041 81 optimizer.lr 0.006707003382911081 +1041 81 negative_sampler.num_negs_per_pos 72.0 +1041 81 training.batch_size 2.0 +1041 82 model.embedding_dim 0.0 +1041 82 model.scoring_fct_norm 2.0 +1041 82 loss.margin 14.401795119810135 +1041 82 loss.adversarial_temperature 0.1581937216423625 +1041 82 optimizer.lr 0.0025778815831552166 +1041 82 negative_sampler.num_negs_per_pos 78.0 +1041 82 training.batch_size 2.0 +1041 83 model.embedding_dim 2.0 +1041 83 model.scoring_fct_norm 2.0 +1041 83 loss.margin 9.427526949678944 +1041 83 loss.adversarial_temperature 0.18143970814415947 +1041 83 optimizer.lr 0.007471207984322013 +1041 83 negative_sampler.num_negs_per_pos 66.0 +1041 83 training.batch_size 2.0 +1041 84 model.embedding_dim 0.0 +1041 84 model.scoring_fct_norm 2.0 +1041 84 loss.margin 21.19176673942028 +1041 84 loss.adversarial_temperature 0.8317128998777696 +1041 84 optimizer.lr 0.004974657556995706 +1041 84 negative_sampler.num_negs_per_pos 70.0 +1041 84 training.batch_size 1.0 +1041 85 model.embedding_dim 1.0 +1041 85 model.scoring_fct_norm 1.0 +1041 85 loss.margin 28.254280994394357 +1041 85 loss.adversarial_temperature 0.5969716452695858 +1041 85 optimizer.lr 0.0014832901613876304 +1041 85 negative_sampler.num_negs_per_pos 46.0 +1041 85 training.batch_size 1.0 +1041 86 model.embedding_dim 1.0 +1041 86 model.scoring_fct_norm 2.0 +1041 86 loss.margin 7.583825570882029 +1041 86 loss.adversarial_temperature 0.6678183133759658 +1041 86 optimizer.lr 0.0034918205853246833 +1041 86 negative_sampler.num_negs_per_pos 88.0 +1041 86 training.batch_size 0.0 +1041 87 model.embedding_dim 1.0 +1041 87 model.scoring_fct_norm 2.0 +1041 87 loss.margin 25.07213670905362 +1041 87 loss.adversarial_temperature 0.2168693151858009 +1041 87 optimizer.lr 0.0012800721497739475 +1041 87 negative_sampler.num_negs_per_pos 18.0 +1041 87 training.batch_size 2.0 +1041 88 model.embedding_dim 1.0 +1041 88 model.scoring_fct_norm 1.0 +1041 88 loss.margin 10.35364539136171 +1041 88 loss.adversarial_temperature 0.8421417659954736 +1041 88 optimizer.lr 0.0345176142100115 +1041 88 negative_sampler.num_negs_per_pos 84.0 +1041 88 training.batch_size 2.0 +1041 89 model.embedding_dim 1.0 +1041 89 model.scoring_fct_norm 1.0 +1041 89 loss.margin 3.891884918064988 +1041 89 loss.adversarial_temperature 0.5036083781805115 +1041 89 optimizer.lr 0.05963539395203836 +1041 89 negative_sampler.num_negs_per_pos 12.0 +1041 89 training.batch_size 2.0 +1041 90 model.embedding_dim 1.0 +1041 90 model.scoring_fct_norm 2.0 +1041 90 loss.margin 1.2461793931082434 +1041 90 loss.adversarial_temperature 0.45961274824472964 +1041 90 optimizer.lr 0.006045951467238823 +1041 90 negative_sampler.num_negs_per_pos 39.0 +1041 90 training.batch_size 2.0 +1041 91 model.embedding_dim 2.0 +1041 91 model.scoring_fct_norm 1.0 +1041 91 loss.margin 4.817823170199285 +1041 91 loss.adversarial_temperature 0.43592299000765383 +1041 91 optimizer.lr 0.017695717121529494 +1041 91 negative_sampler.num_negs_per_pos 11.0 +1041 91 training.batch_size 0.0 +1041 92 model.embedding_dim 2.0 +1041 92 model.scoring_fct_norm 1.0 +1041 92 loss.margin 10.551229990887194 +1041 92 loss.adversarial_temperature 0.9928345113975715 +1041 92 optimizer.lr 0.022300019317461024 +1041 92 negative_sampler.num_negs_per_pos 42.0 +1041 92 training.batch_size 1.0 +1041 93 model.embedding_dim 1.0 +1041 93 model.scoring_fct_norm 1.0 +1041 93 loss.margin 5.4563320874025445 +1041 93 loss.adversarial_temperature 0.6319311823582198 +1041 93 optimizer.lr 0.015076884332841819 +1041 93 negative_sampler.num_negs_per_pos 66.0 +1041 93 training.batch_size 2.0 +1041 94 model.embedding_dim 1.0 +1041 94 model.scoring_fct_norm 2.0 +1041 94 loss.margin 6.650509444118083 +1041 94 loss.adversarial_temperature 0.7610423298934231 +1041 94 optimizer.lr 0.009023562499594146 +1041 94 negative_sampler.num_negs_per_pos 93.0 +1041 94 training.batch_size 2.0 +1041 95 model.embedding_dim 1.0 +1041 95 model.scoring_fct_norm 2.0 +1041 95 loss.margin 20.476474876382003 +1041 95 loss.adversarial_temperature 0.9982037710864197 +1041 95 optimizer.lr 0.0017345044526222034 +1041 95 negative_sampler.num_negs_per_pos 7.0 +1041 95 training.batch_size 1.0 +1041 96 model.embedding_dim 2.0 +1041 96 model.scoring_fct_norm 2.0 +1041 96 loss.margin 16.50956486886896 +1041 96 loss.adversarial_temperature 0.1773513372293363 +1041 96 optimizer.lr 0.00168936786683778 +1041 96 negative_sampler.num_negs_per_pos 55.0 +1041 96 training.batch_size 2.0 +1041 97 model.embedding_dim 2.0 +1041 97 model.scoring_fct_norm 2.0 +1041 97 loss.margin 24.67467755123379 +1041 97 loss.adversarial_temperature 0.9766136672256 +1041 97 optimizer.lr 0.07902471209702176 +1041 97 negative_sampler.num_negs_per_pos 24.0 +1041 97 training.batch_size 2.0 +1041 98 model.embedding_dim 0.0 +1041 98 model.scoring_fct_norm 1.0 +1041 98 loss.margin 11.114619233509167 +1041 98 loss.adversarial_temperature 0.36587934407857786 +1041 98 optimizer.lr 0.0292127131529029 +1041 98 negative_sampler.num_negs_per_pos 45.0 +1041 98 training.batch_size 2.0 +1041 99 model.embedding_dim 0.0 +1041 99 model.scoring_fct_norm 2.0 +1041 99 loss.margin 29.468027053447756 +1041 99 loss.adversarial_temperature 0.4608992242607257 +1041 99 optimizer.lr 0.012337920248270089 +1041 99 negative_sampler.num_negs_per_pos 97.0 +1041 99 training.batch_size 2.0 +1041 100 model.embedding_dim 2.0 +1041 100 model.scoring_fct_norm 2.0 +1041 100 loss.margin 4.4177782567985115 +1041 100 loss.adversarial_temperature 0.5800435784018807 +1041 100 optimizer.lr 0.09085396132355066 +1041 100 negative_sampler.num_negs_per_pos 71.0 +1041 100 training.batch_size 1.0 +1041 1 dataset """wn18rr""" +1041 1 model """unstructuredmodel""" +1041 1 loss """nssa""" +1041 1 regularizer """no""" +1041 1 optimizer """adam""" +1041 1 training_loop """owa""" +1041 1 negative_sampler """basic""" +1041 1 evaluator """rankbased""" +1041 2 dataset """wn18rr""" +1041 2 model """unstructuredmodel""" +1041 2 loss """nssa""" +1041 2 regularizer """no""" +1041 2 optimizer """adam""" +1041 2 training_loop """owa""" +1041 2 negative_sampler """basic""" +1041 2 evaluator """rankbased""" +1041 3 dataset """wn18rr""" +1041 3 model """unstructuredmodel""" +1041 3 loss """nssa""" +1041 3 regularizer """no""" +1041 3 optimizer """adam""" +1041 3 training_loop """owa""" +1041 3 negative_sampler """basic""" +1041 3 evaluator """rankbased""" +1041 4 dataset """wn18rr""" +1041 4 model """unstructuredmodel""" +1041 4 loss """nssa""" +1041 4 regularizer """no""" +1041 4 optimizer """adam""" +1041 4 training_loop """owa""" +1041 4 negative_sampler """basic""" +1041 4 evaluator """rankbased""" +1041 5 dataset """wn18rr""" +1041 5 model """unstructuredmodel""" +1041 5 loss """nssa""" +1041 5 regularizer """no""" +1041 5 optimizer """adam""" +1041 5 training_loop """owa""" +1041 5 negative_sampler """basic""" +1041 5 evaluator """rankbased""" +1041 6 dataset """wn18rr""" +1041 6 model """unstructuredmodel""" +1041 6 loss """nssa""" +1041 6 regularizer """no""" +1041 6 optimizer """adam""" +1041 6 training_loop """owa""" +1041 6 negative_sampler """basic""" +1041 6 evaluator """rankbased""" +1041 7 dataset """wn18rr""" +1041 7 model """unstructuredmodel""" +1041 7 loss """nssa""" +1041 7 regularizer """no""" +1041 7 optimizer """adam""" +1041 7 training_loop """owa""" +1041 7 negative_sampler """basic""" +1041 7 evaluator """rankbased""" +1041 8 dataset """wn18rr""" +1041 8 model """unstructuredmodel""" +1041 8 loss """nssa""" +1041 8 regularizer """no""" +1041 8 optimizer """adam""" +1041 8 training_loop """owa""" +1041 8 negative_sampler """basic""" +1041 8 evaluator """rankbased""" +1041 9 dataset """wn18rr""" +1041 9 model """unstructuredmodel""" +1041 9 loss """nssa""" +1041 9 regularizer """no""" +1041 9 optimizer """adam""" +1041 9 training_loop """owa""" +1041 9 negative_sampler """basic""" +1041 9 evaluator """rankbased""" +1041 10 dataset """wn18rr""" +1041 10 model """unstructuredmodel""" +1041 10 loss """nssa""" +1041 10 regularizer """no""" +1041 10 optimizer """adam""" +1041 10 training_loop """owa""" +1041 10 negative_sampler """basic""" +1041 10 evaluator """rankbased""" +1041 11 dataset """wn18rr""" +1041 11 model """unstructuredmodel""" +1041 11 loss """nssa""" +1041 11 regularizer """no""" +1041 11 optimizer """adam""" +1041 11 training_loop """owa""" +1041 11 negative_sampler """basic""" +1041 11 evaluator """rankbased""" +1041 12 dataset """wn18rr""" +1041 12 model """unstructuredmodel""" +1041 12 loss """nssa""" +1041 12 regularizer """no""" +1041 12 optimizer """adam""" +1041 12 training_loop """owa""" +1041 12 negative_sampler """basic""" +1041 12 evaluator """rankbased""" +1041 13 dataset """wn18rr""" +1041 13 model """unstructuredmodel""" +1041 13 loss """nssa""" +1041 13 regularizer """no""" +1041 13 optimizer """adam""" +1041 13 training_loop """owa""" +1041 13 negative_sampler """basic""" +1041 13 evaluator """rankbased""" +1041 14 dataset """wn18rr""" +1041 14 model """unstructuredmodel""" +1041 14 loss """nssa""" +1041 14 regularizer """no""" +1041 14 optimizer """adam""" +1041 14 training_loop """owa""" +1041 14 negative_sampler """basic""" +1041 14 evaluator """rankbased""" +1041 15 dataset """wn18rr""" +1041 15 model """unstructuredmodel""" +1041 15 loss """nssa""" +1041 15 regularizer """no""" +1041 15 optimizer """adam""" +1041 15 training_loop """owa""" +1041 15 negative_sampler """basic""" +1041 15 evaluator """rankbased""" +1041 16 dataset """wn18rr""" +1041 16 model """unstructuredmodel""" +1041 16 loss """nssa""" +1041 16 regularizer """no""" +1041 16 optimizer """adam""" +1041 16 training_loop """owa""" +1041 16 negative_sampler """basic""" +1041 16 evaluator """rankbased""" +1041 17 dataset """wn18rr""" +1041 17 model """unstructuredmodel""" +1041 17 loss """nssa""" +1041 17 regularizer """no""" +1041 17 optimizer """adam""" +1041 17 training_loop """owa""" +1041 17 negative_sampler """basic""" +1041 17 evaluator """rankbased""" +1041 18 dataset """wn18rr""" +1041 18 model """unstructuredmodel""" +1041 18 loss """nssa""" +1041 18 regularizer """no""" +1041 18 optimizer """adam""" +1041 18 training_loop """owa""" +1041 18 negative_sampler """basic""" +1041 18 evaluator """rankbased""" +1041 19 dataset """wn18rr""" +1041 19 model """unstructuredmodel""" +1041 19 loss """nssa""" +1041 19 regularizer """no""" +1041 19 optimizer """adam""" +1041 19 training_loop """owa""" +1041 19 negative_sampler """basic""" +1041 19 evaluator """rankbased""" +1041 20 dataset """wn18rr""" +1041 20 model """unstructuredmodel""" +1041 20 loss """nssa""" +1041 20 regularizer """no""" +1041 20 optimizer """adam""" +1041 20 training_loop """owa""" +1041 20 negative_sampler """basic""" +1041 20 evaluator """rankbased""" +1041 21 dataset """wn18rr""" +1041 21 model """unstructuredmodel""" +1041 21 loss """nssa""" +1041 21 regularizer """no""" +1041 21 optimizer """adam""" +1041 21 training_loop """owa""" +1041 21 negative_sampler """basic""" +1041 21 evaluator """rankbased""" +1041 22 dataset """wn18rr""" +1041 22 model """unstructuredmodel""" +1041 22 loss """nssa""" +1041 22 regularizer """no""" +1041 22 optimizer """adam""" +1041 22 training_loop """owa""" +1041 22 negative_sampler """basic""" +1041 22 evaluator """rankbased""" +1041 23 dataset """wn18rr""" +1041 23 model """unstructuredmodel""" +1041 23 loss """nssa""" +1041 23 regularizer """no""" +1041 23 optimizer """adam""" +1041 23 training_loop """owa""" +1041 23 negative_sampler """basic""" +1041 23 evaluator """rankbased""" +1041 24 dataset """wn18rr""" +1041 24 model """unstructuredmodel""" +1041 24 loss """nssa""" +1041 24 regularizer """no""" +1041 24 optimizer """adam""" +1041 24 training_loop """owa""" +1041 24 negative_sampler """basic""" +1041 24 evaluator """rankbased""" +1041 25 dataset """wn18rr""" +1041 25 model """unstructuredmodel""" +1041 25 loss """nssa""" +1041 25 regularizer """no""" +1041 25 optimizer """adam""" +1041 25 training_loop """owa""" +1041 25 negative_sampler """basic""" +1041 25 evaluator """rankbased""" +1041 26 dataset """wn18rr""" +1041 26 model """unstructuredmodel""" +1041 26 loss """nssa""" +1041 26 regularizer """no""" +1041 26 optimizer """adam""" +1041 26 training_loop """owa""" +1041 26 negative_sampler """basic""" +1041 26 evaluator """rankbased""" +1041 27 dataset """wn18rr""" +1041 27 model """unstructuredmodel""" +1041 27 loss """nssa""" +1041 27 regularizer """no""" +1041 27 optimizer """adam""" +1041 27 training_loop """owa""" +1041 27 negative_sampler """basic""" +1041 27 evaluator """rankbased""" +1041 28 dataset """wn18rr""" +1041 28 model """unstructuredmodel""" +1041 28 loss """nssa""" +1041 28 regularizer """no""" +1041 28 optimizer """adam""" +1041 28 training_loop """owa""" +1041 28 negative_sampler """basic""" +1041 28 evaluator """rankbased""" +1041 29 dataset """wn18rr""" +1041 29 model """unstructuredmodel""" +1041 29 loss """nssa""" +1041 29 regularizer """no""" +1041 29 optimizer """adam""" +1041 29 training_loop """owa""" +1041 29 negative_sampler """basic""" +1041 29 evaluator """rankbased""" +1041 30 dataset """wn18rr""" +1041 30 model """unstructuredmodel""" +1041 30 loss """nssa""" +1041 30 regularizer """no""" +1041 30 optimizer """adam""" +1041 30 training_loop """owa""" +1041 30 negative_sampler """basic""" +1041 30 evaluator """rankbased""" +1041 31 dataset """wn18rr""" +1041 31 model """unstructuredmodel""" +1041 31 loss """nssa""" +1041 31 regularizer """no""" +1041 31 optimizer """adam""" +1041 31 training_loop """owa""" +1041 31 negative_sampler """basic""" +1041 31 evaluator """rankbased""" +1041 32 dataset """wn18rr""" +1041 32 model """unstructuredmodel""" +1041 32 loss """nssa""" +1041 32 regularizer """no""" +1041 32 optimizer """adam""" +1041 32 training_loop """owa""" +1041 32 negative_sampler """basic""" +1041 32 evaluator """rankbased""" +1041 33 dataset """wn18rr""" +1041 33 model """unstructuredmodel""" +1041 33 loss """nssa""" +1041 33 regularizer """no""" +1041 33 optimizer """adam""" +1041 33 training_loop """owa""" +1041 33 negative_sampler """basic""" +1041 33 evaluator """rankbased""" +1041 34 dataset """wn18rr""" +1041 34 model """unstructuredmodel""" +1041 34 loss """nssa""" +1041 34 regularizer """no""" +1041 34 optimizer """adam""" +1041 34 training_loop """owa""" +1041 34 negative_sampler """basic""" +1041 34 evaluator """rankbased""" +1041 35 dataset """wn18rr""" +1041 35 model """unstructuredmodel""" +1041 35 loss """nssa""" +1041 35 regularizer """no""" +1041 35 optimizer """adam""" +1041 35 training_loop """owa""" +1041 35 negative_sampler """basic""" +1041 35 evaluator """rankbased""" +1041 36 dataset """wn18rr""" +1041 36 model """unstructuredmodel""" +1041 36 loss """nssa""" +1041 36 regularizer """no""" +1041 36 optimizer """adam""" +1041 36 training_loop """owa""" +1041 36 negative_sampler """basic""" +1041 36 evaluator """rankbased""" +1041 37 dataset """wn18rr""" +1041 37 model """unstructuredmodel""" +1041 37 loss """nssa""" +1041 37 regularizer """no""" +1041 37 optimizer """adam""" +1041 37 training_loop """owa""" +1041 37 negative_sampler """basic""" +1041 37 evaluator """rankbased""" +1041 38 dataset """wn18rr""" +1041 38 model """unstructuredmodel""" +1041 38 loss """nssa""" +1041 38 regularizer """no""" +1041 38 optimizer """adam""" +1041 38 training_loop """owa""" +1041 38 negative_sampler """basic""" +1041 38 evaluator """rankbased""" +1041 39 dataset """wn18rr""" +1041 39 model """unstructuredmodel""" +1041 39 loss """nssa""" +1041 39 regularizer """no""" +1041 39 optimizer """adam""" +1041 39 training_loop """owa""" +1041 39 negative_sampler """basic""" +1041 39 evaluator """rankbased""" +1041 40 dataset """wn18rr""" +1041 40 model """unstructuredmodel""" +1041 40 loss """nssa""" +1041 40 regularizer """no""" +1041 40 optimizer """adam""" +1041 40 training_loop """owa""" +1041 40 negative_sampler """basic""" +1041 40 evaluator """rankbased""" +1041 41 dataset """wn18rr""" +1041 41 model """unstructuredmodel""" +1041 41 loss """nssa""" +1041 41 regularizer """no""" +1041 41 optimizer """adam""" +1041 41 training_loop """owa""" +1041 41 negative_sampler """basic""" +1041 41 evaluator """rankbased""" +1041 42 dataset """wn18rr""" +1041 42 model """unstructuredmodel""" +1041 42 loss """nssa""" +1041 42 regularizer """no""" +1041 42 optimizer """adam""" +1041 42 training_loop """owa""" +1041 42 negative_sampler """basic""" +1041 42 evaluator """rankbased""" +1041 43 dataset """wn18rr""" +1041 43 model """unstructuredmodel""" +1041 43 loss """nssa""" +1041 43 regularizer """no""" +1041 43 optimizer """adam""" +1041 43 training_loop """owa""" +1041 43 negative_sampler """basic""" +1041 43 evaluator """rankbased""" +1041 44 dataset """wn18rr""" +1041 44 model """unstructuredmodel""" +1041 44 loss """nssa""" +1041 44 regularizer """no""" +1041 44 optimizer """adam""" +1041 44 training_loop """owa""" +1041 44 negative_sampler """basic""" +1041 44 evaluator """rankbased""" +1041 45 dataset """wn18rr""" +1041 45 model """unstructuredmodel""" +1041 45 loss """nssa""" +1041 45 regularizer """no""" +1041 45 optimizer """adam""" +1041 45 training_loop """owa""" +1041 45 negative_sampler """basic""" +1041 45 evaluator """rankbased""" +1041 46 dataset """wn18rr""" +1041 46 model """unstructuredmodel""" +1041 46 loss """nssa""" +1041 46 regularizer """no""" +1041 46 optimizer """adam""" +1041 46 training_loop """owa""" +1041 46 negative_sampler """basic""" +1041 46 evaluator """rankbased""" +1041 47 dataset """wn18rr""" +1041 47 model """unstructuredmodel""" +1041 47 loss """nssa""" +1041 47 regularizer """no""" +1041 47 optimizer """adam""" +1041 47 training_loop """owa""" +1041 47 negative_sampler """basic""" +1041 47 evaluator """rankbased""" +1041 48 dataset """wn18rr""" +1041 48 model """unstructuredmodel""" +1041 48 loss """nssa""" +1041 48 regularizer """no""" +1041 48 optimizer """adam""" +1041 48 training_loop """owa""" +1041 48 negative_sampler """basic""" +1041 48 evaluator """rankbased""" +1041 49 dataset """wn18rr""" +1041 49 model """unstructuredmodel""" +1041 49 loss """nssa""" +1041 49 regularizer """no""" +1041 49 optimizer """adam""" +1041 49 training_loop """owa""" +1041 49 negative_sampler """basic""" +1041 49 evaluator """rankbased""" +1041 50 dataset """wn18rr""" +1041 50 model """unstructuredmodel""" +1041 50 loss """nssa""" +1041 50 regularizer """no""" +1041 50 optimizer """adam""" +1041 50 training_loop """owa""" +1041 50 negative_sampler """basic""" +1041 50 evaluator """rankbased""" +1041 51 dataset """wn18rr""" +1041 51 model """unstructuredmodel""" +1041 51 loss """nssa""" +1041 51 regularizer """no""" +1041 51 optimizer """adam""" +1041 51 training_loop """owa""" +1041 51 negative_sampler """basic""" +1041 51 evaluator """rankbased""" +1041 52 dataset """wn18rr""" +1041 52 model """unstructuredmodel""" +1041 52 loss """nssa""" +1041 52 regularizer """no""" +1041 52 optimizer """adam""" +1041 52 training_loop """owa""" +1041 52 negative_sampler """basic""" +1041 52 evaluator """rankbased""" +1041 53 dataset """wn18rr""" +1041 53 model """unstructuredmodel""" +1041 53 loss """nssa""" +1041 53 regularizer """no""" +1041 53 optimizer """adam""" +1041 53 training_loop """owa""" +1041 53 negative_sampler """basic""" +1041 53 evaluator """rankbased""" +1041 54 dataset """wn18rr""" +1041 54 model """unstructuredmodel""" +1041 54 loss """nssa""" +1041 54 regularizer """no""" +1041 54 optimizer """adam""" +1041 54 training_loop """owa""" +1041 54 negative_sampler """basic""" +1041 54 evaluator """rankbased""" +1041 55 dataset """wn18rr""" +1041 55 model """unstructuredmodel""" +1041 55 loss """nssa""" +1041 55 regularizer """no""" +1041 55 optimizer """adam""" +1041 55 training_loop """owa""" +1041 55 negative_sampler """basic""" +1041 55 evaluator """rankbased""" +1041 56 dataset """wn18rr""" +1041 56 model """unstructuredmodel""" +1041 56 loss """nssa""" +1041 56 regularizer """no""" +1041 56 optimizer """adam""" +1041 56 training_loop """owa""" +1041 56 negative_sampler """basic""" +1041 56 evaluator """rankbased""" +1041 57 dataset """wn18rr""" +1041 57 model """unstructuredmodel""" +1041 57 loss """nssa""" +1041 57 regularizer """no""" +1041 57 optimizer """adam""" +1041 57 training_loop """owa""" +1041 57 negative_sampler """basic""" +1041 57 evaluator """rankbased""" +1041 58 dataset """wn18rr""" +1041 58 model """unstructuredmodel""" +1041 58 loss """nssa""" +1041 58 regularizer """no""" +1041 58 optimizer """adam""" +1041 58 training_loop """owa""" +1041 58 negative_sampler """basic""" +1041 58 evaluator """rankbased""" +1041 59 dataset """wn18rr""" +1041 59 model """unstructuredmodel""" +1041 59 loss """nssa""" +1041 59 regularizer """no""" +1041 59 optimizer """adam""" +1041 59 training_loop """owa""" +1041 59 negative_sampler """basic""" +1041 59 evaluator """rankbased""" +1041 60 dataset """wn18rr""" +1041 60 model """unstructuredmodel""" +1041 60 loss """nssa""" +1041 60 regularizer """no""" +1041 60 optimizer """adam""" +1041 60 training_loop """owa""" +1041 60 negative_sampler """basic""" +1041 60 evaluator """rankbased""" +1041 61 dataset """wn18rr""" +1041 61 model """unstructuredmodel""" +1041 61 loss """nssa""" +1041 61 regularizer """no""" +1041 61 optimizer """adam""" +1041 61 training_loop """owa""" +1041 61 negative_sampler """basic""" +1041 61 evaluator """rankbased""" +1041 62 dataset """wn18rr""" +1041 62 model """unstructuredmodel""" +1041 62 loss """nssa""" +1041 62 regularizer """no""" +1041 62 optimizer """adam""" +1041 62 training_loop """owa""" +1041 62 negative_sampler """basic""" +1041 62 evaluator """rankbased""" +1041 63 dataset """wn18rr""" +1041 63 model """unstructuredmodel""" +1041 63 loss """nssa""" +1041 63 regularizer """no""" +1041 63 optimizer """adam""" +1041 63 training_loop """owa""" +1041 63 negative_sampler """basic""" +1041 63 evaluator """rankbased""" +1041 64 dataset """wn18rr""" +1041 64 model """unstructuredmodel""" +1041 64 loss """nssa""" +1041 64 regularizer """no""" +1041 64 optimizer """adam""" +1041 64 training_loop """owa""" +1041 64 negative_sampler """basic""" +1041 64 evaluator """rankbased""" +1041 65 dataset """wn18rr""" +1041 65 model """unstructuredmodel""" +1041 65 loss """nssa""" +1041 65 regularizer """no""" +1041 65 optimizer """adam""" +1041 65 training_loop """owa""" +1041 65 negative_sampler """basic""" +1041 65 evaluator """rankbased""" +1041 66 dataset """wn18rr""" +1041 66 model """unstructuredmodel""" +1041 66 loss """nssa""" +1041 66 regularizer """no""" +1041 66 optimizer """adam""" +1041 66 training_loop """owa""" +1041 66 negative_sampler """basic""" +1041 66 evaluator """rankbased""" +1041 67 dataset """wn18rr""" +1041 67 model """unstructuredmodel""" +1041 67 loss """nssa""" +1041 67 regularizer """no""" +1041 67 optimizer """adam""" +1041 67 training_loop """owa""" +1041 67 negative_sampler """basic""" +1041 67 evaluator """rankbased""" +1041 68 dataset """wn18rr""" +1041 68 model """unstructuredmodel""" +1041 68 loss """nssa""" +1041 68 regularizer """no""" +1041 68 optimizer """adam""" +1041 68 training_loop """owa""" +1041 68 negative_sampler """basic""" +1041 68 evaluator """rankbased""" +1041 69 dataset """wn18rr""" +1041 69 model """unstructuredmodel""" +1041 69 loss """nssa""" +1041 69 regularizer """no""" +1041 69 optimizer """adam""" +1041 69 training_loop """owa""" +1041 69 negative_sampler """basic""" +1041 69 evaluator """rankbased""" +1041 70 dataset """wn18rr""" +1041 70 model """unstructuredmodel""" +1041 70 loss """nssa""" +1041 70 regularizer """no""" +1041 70 optimizer """adam""" +1041 70 training_loop """owa""" +1041 70 negative_sampler """basic""" +1041 70 evaluator """rankbased""" +1041 71 dataset """wn18rr""" +1041 71 model """unstructuredmodel""" +1041 71 loss """nssa""" +1041 71 regularizer """no""" +1041 71 optimizer """adam""" +1041 71 training_loop """owa""" +1041 71 negative_sampler """basic""" +1041 71 evaluator """rankbased""" +1041 72 dataset """wn18rr""" +1041 72 model """unstructuredmodel""" +1041 72 loss """nssa""" +1041 72 regularizer """no""" +1041 72 optimizer """adam""" +1041 72 training_loop """owa""" +1041 72 negative_sampler """basic""" +1041 72 evaluator """rankbased""" +1041 73 dataset """wn18rr""" +1041 73 model """unstructuredmodel""" +1041 73 loss """nssa""" +1041 73 regularizer """no""" +1041 73 optimizer """adam""" +1041 73 training_loop """owa""" +1041 73 negative_sampler """basic""" +1041 73 evaluator """rankbased""" +1041 74 dataset """wn18rr""" +1041 74 model """unstructuredmodel""" +1041 74 loss """nssa""" +1041 74 regularizer """no""" +1041 74 optimizer """adam""" +1041 74 training_loop """owa""" +1041 74 negative_sampler """basic""" +1041 74 evaluator """rankbased""" +1041 75 dataset """wn18rr""" +1041 75 model """unstructuredmodel""" +1041 75 loss """nssa""" +1041 75 regularizer """no""" +1041 75 optimizer """adam""" +1041 75 training_loop """owa""" +1041 75 negative_sampler """basic""" +1041 75 evaluator """rankbased""" +1041 76 dataset """wn18rr""" +1041 76 model """unstructuredmodel""" +1041 76 loss """nssa""" +1041 76 regularizer """no""" +1041 76 optimizer """adam""" +1041 76 training_loop """owa""" +1041 76 negative_sampler """basic""" +1041 76 evaluator """rankbased""" +1041 77 dataset """wn18rr""" +1041 77 model """unstructuredmodel""" +1041 77 loss """nssa""" +1041 77 regularizer """no""" +1041 77 optimizer """adam""" +1041 77 training_loop """owa""" +1041 77 negative_sampler """basic""" +1041 77 evaluator """rankbased""" +1041 78 dataset """wn18rr""" +1041 78 model """unstructuredmodel""" +1041 78 loss """nssa""" +1041 78 regularizer """no""" +1041 78 optimizer """adam""" +1041 78 training_loop """owa""" +1041 78 negative_sampler """basic""" +1041 78 evaluator """rankbased""" +1041 79 dataset """wn18rr""" +1041 79 model """unstructuredmodel""" +1041 79 loss """nssa""" +1041 79 regularizer """no""" +1041 79 optimizer """adam""" +1041 79 training_loop """owa""" +1041 79 negative_sampler """basic""" +1041 79 evaluator """rankbased""" +1041 80 dataset """wn18rr""" +1041 80 model """unstructuredmodel""" +1041 80 loss """nssa""" +1041 80 regularizer """no""" +1041 80 optimizer """adam""" +1041 80 training_loop """owa""" +1041 80 negative_sampler """basic""" +1041 80 evaluator """rankbased""" +1041 81 dataset """wn18rr""" +1041 81 model """unstructuredmodel""" +1041 81 loss """nssa""" +1041 81 regularizer """no""" +1041 81 optimizer """adam""" +1041 81 training_loop """owa""" +1041 81 negative_sampler """basic""" +1041 81 evaluator """rankbased""" +1041 82 dataset """wn18rr""" +1041 82 model """unstructuredmodel""" +1041 82 loss """nssa""" +1041 82 regularizer """no""" +1041 82 optimizer """adam""" +1041 82 training_loop """owa""" +1041 82 negative_sampler """basic""" +1041 82 evaluator """rankbased""" +1041 83 dataset """wn18rr""" +1041 83 model """unstructuredmodel""" +1041 83 loss """nssa""" +1041 83 regularizer """no""" +1041 83 optimizer """adam""" +1041 83 training_loop """owa""" +1041 83 negative_sampler """basic""" +1041 83 evaluator """rankbased""" +1041 84 dataset """wn18rr""" +1041 84 model """unstructuredmodel""" +1041 84 loss """nssa""" +1041 84 regularizer """no""" +1041 84 optimizer """adam""" +1041 84 training_loop """owa""" +1041 84 negative_sampler """basic""" +1041 84 evaluator """rankbased""" +1041 85 dataset """wn18rr""" +1041 85 model """unstructuredmodel""" +1041 85 loss """nssa""" +1041 85 regularizer """no""" +1041 85 optimizer """adam""" +1041 85 training_loop """owa""" +1041 85 negative_sampler """basic""" +1041 85 evaluator """rankbased""" +1041 86 dataset """wn18rr""" +1041 86 model """unstructuredmodel""" +1041 86 loss """nssa""" +1041 86 regularizer """no""" +1041 86 optimizer """adam""" +1041 86 training_loop """owa""" +1041 86 negative_sampler """basic""" +1041 86 evaluator """rankbased""" +1041 87 dataset """wn18rr""" +1041 87 model """unstructuredmodel""" +1041 87 loss """nssa""" +1041 87 regularizer """no""" +1041 87 optimizer """adam""" +1041 87 training_loop """owa""" +1041 87 negative_sampler """basic""" +1041 87 evaluator """rankbased""" +1041 88 dataset """wn18rr""" +1041 88 model """unstructuredmodel""" +1041 88 loss """nssa""" +1041 88 regularizer """no""" +1041 88 optimizer """adam""" +1041 88 training_loop """owa""" +1041 88 negative_sampler """basic""" +1041 88 evaluator """rankbased""" +1041 89 dataset """wn18rr""" +1041 89 model """unstructuredmodel""" +1041 89 loss """nssa""" +1041 89 regularizer """no""" +1041 89 optimizer """adam""" +1041 89 training_loop """owa""" +1041 89 negative_sampler """basic""" +1041 89 evaluator """rankbased""" +1041 90 dataset """wn18rr""" +1041 90 model """unstructuredmodel""" +1041 90 loss """nssa""" +1041 90 regularizer """no""" +1041 90 optimizer """adam""" +1041 90 training_loop """owa""" +1041 90 negative_sampler """basic""" +1041 90 evaluator """rankbased""" +1041 91 dataset """wn18rr""" +1041 91 model """unstructuredmodel""" +1041 91 loss """nssa""" +1041 91 regularizer """no""" +1041 91 optimizer """adam""" +1041 91 training_loop """owa""" +1041 91 negative_sampler """basic""" +1041 91 evaluator """rankbased""" +1041 92 dataset """wn18rr""" +1041 92 model """unstructuredmodel""" +1041 92 loss """nssa""" +1041 92 regularizer """no""" +1041 92 optimizer """adam""" +1041 92 training_loop """owa""" +1041 92 negative_sampler """basic""" +1041 92 evaluator """rankbased""" +1041 93 dataset """wn18rr""" +1041 93 model """unstructuredmodel""" +1041 93 loss """nssa""" +1041 93 regularizer """no""" +1041 93 optimizer """adam""" +1041 93 training_loop """owa""" +1041 93 negative_sampler """basic""" +1041 93 evaluator """rankbased""" +1041 94 dataset """wn18rr""" +1041 94 model """unstructuredmodel""" +1041 94 loss """nssa""" +1041 94 regularizer """no""" +1041 94 optimizer """adam""" +1041 94 training_loop """owa""" +1041 94 negative_sampler """basic""" +1041 94 evaluator """rankbased""" +1041 95 dataset """wn18rr""" +1041 95 model """unstructuredmodel""" +1041 95 loss """nssa""" +1041 95 regularizer """no""" +1041 95 optimizer """adam""" +1041 95 training_loop """owa""" +1041 95 negative_sampler """basic""" +1041 95 evaluator """rankbased""" +1041 96 dataset """wn18rr""" +1041 96 model """unstructuredmodel""" +1041 96 loss """nssa""" +1041 96 regularizer """no""" +1041 96 optimizer """adam""" +1041 96 training_loop """owa""" +1041 96 negative_sampler """basic""" +1041 96 evaluator """rankbased""" +1041 97 dataset """wn18rr""" +1041 97 model """unstructuredmodel""" +1041 97 loss """nssa""" +1041 97 regularizer """no""" +1041 97 optimizer """adam""" +1041 97 training_loop """owa""" +1041 97 negative_sampler """basic""" +1041 97 evaluator """rankbased""" +1041 98 dataset """wn18rr""" +1041 98 model """unstructuredmodel""" +1041 98 loss """nssa""" +1041 98 regularizer """no""" +1041 98 optimizer """adam""" +1041 98 training_loop """owa""" +1041 98 negative_sampler """basic""" +1041 98 evaluator """rankbased""" +1041 99 dataset """wn18rr""" +1041 99 model """unstructuredmodel""" +1041 99 loss """nssa""" +1041 99 regularizer """no""" +1041 99 optimizer """adam""" +1041 99 training_loop """owa""" +1041 99 negative_sampler """basic""" +1041 99 evaluator """rankbased""" +1041 100 dataset """wn18rr""" +1041 100 model """unstructuredmodel""" +1041 100 loss """nssa""" +1041 100 regularizer """no""" +1041 100 optimizer """adam""" +1041 100 training_loop """owa""" +1041 100 negative_sampler """basic""" +1041 100 evaluator """rankbased""" +1042 1 model.embedding_dim 1.0 +1042 1 model.scoring_fct_norm 2.0 +1042 1 optimizer.lr 0.011358476155100073 +1042 1 training.batch_size 2.0 +1042 1 training.label_smoothing 0.23494671526495434 +1042 2 model.embedding_dim 1.0 +1042 2 model.scoring_fct_norm 1.0 +1042 2 optimizer.lr 0.026640822668379815 +1042 2 training.batch_size 0.0 +1042 2 training.label_smoothing 0.001195654933202636 +1042 3 model.embedding_dim 0.0 +1042 3 model.scoring_fct_norm 1.0 +1042 3 optimizer.lr 0.055725648169406214 +1042 3 training.batch_size 2.0 +1042 3 training.label_smoothing 0.0010246249286064055 +1042 4 model.embedding_dim 2.0 +1042 4 model.scoring_fct_norm 1.0 +1042 4 optimizer.lr 0.0013880621636746867 +1042 4 training.batch_size 2.0 +1042 4 training.label_smoothing 0.12137283684512873 +1042 5 model.embedding_dim 1.0 +1042 5 model.scoring_fct_norm 2.0 +1042 5 optimizer.lr 0.007962047425829021 +1042 5 training.batch_size 2.0 +1042 5 training.label_smoothing 0.04364425075614437 +1042 6 model.embedding_dim 2.0 +1042 6 model.scoring_fct_norm 1.0 +1042 6 optimizer.lr 0.0011718811345383386 +1042 6 training.batch_size 0.0 +1042 6 training.label_smoothing 0.12148650799964546 +1042 7 model.embedding_dim 1.0 +1042 7 model.scoring_fct_norm 1.0 +1042 7 optimizer.lr 0.08246943699968481 +1042 7 training.batch_size 2.0 +1042 7 training.label_smoothing 0.0555099672344615 +1042 8 model.embedding_dim 1.0 +1042 8 model.scoring_fct_norm 1.0 +1042 8 optimizer.lr 0.027802598278366928 +1042 8 training.batch_size 2.0 +1042 8 training.label_smoothing 0.003302783343736581 +1042 9 model.embedding_dim 0.0 +1042 9 model.scoring_fct_norm 1.0 +1042 9 optimizer.lr 0.02621626981736768 +1042 9 training.batch_size 1.0 +1042 9 training.label_smoothing 0.690151919417886 +1042 10 model.embedding_dim 2.0 +1042 10 model.scoring_fct_norm 2.0 +1042 10 optimizer.lr 0.08282177372941034 +1042 10 training.batch_size 2.0 +1042 10 training.label_smoothing 0.1524922834195639 +1042 1 dataset """wn18rr""" +1042 1 model """unstructuredmodel""" +1042 1 loss """crossentropy""" +1042 1 regularizer """no""" +1042 1 optimizer """adam""" +1042 1 training_loop """lcwa""" +1042 1 evaluator """rankbased""" +1042 2 dataset """wn18rr""" +1042 2 model """unstructuredmodel""" +1042 2 loss """crossentropy""" +1042 2 regularizer """no""" +1042 2 optimizer """adam""" +1042 2 training_loop """lcwa""" +1042 2 evaluator """rankbased""" +1042 3 dataset """wn18rr""" +1042 3 model """unstructuredmodel""" +1042 3 loss """crossentropy""" +1042 3 regularizer """no""" +1042 3 optimizer """adam""" +1042 3 training_loop """lcwa""" +1042 3 evaluator """rankbased""" +1042 4 dataset """wn18rr""" +1042 4 model """unstructuredmodel""" +1042 4 loss """crossentropy""" +1042 4 regularizer """no""" +1042 4 optimizer """adam""" +1042 4 training_loop """lcwa""" +1042 4 evaluator """rankbased""" +1042 5 dataset """wn18rr""" +1042 5 model """unstructuredmodel""" +1042 5 loss """crossentropy""" +1042 5 regularizer """no""" +1042 5 optimizer """adam""" +1042 5 training_loop """lcwa""" +1042 5 evaluator """rankbased""" +1042 6 dataset """wn18rr""" +1042 6 model """unstructuredmodel""" +1042 6 loss """crossentropy""" +1042 6 regularizer """no""" +1042 6 optimizer """adam""" +1042 6 training_loop """lcwa""" +1042 6 evaluator """rankbased""" +1042 7 dataset """wn18rr""" +1042 7 model """unstructuredmodel""" +1042 7 loss """crossentropy""" +1042 7 regularizer """no""" +1042 7 optimizer """adam""" +1042 7 training_loop """lcwa""" +1042 7 evaluator """rankbased""" +1042 8 dataset """wn18rr""" +1042 8 model """unstructuredmodel""" +1042 8 loss """crossentropy""" +1042 8 regularizer """no""" +1042 8 optimizer """adam""" +1042 8 training_loop """lcwa""" +1042 8 evaluator """rankbased""" +1042 9 dataset """wn18rr""" +1042 9 model """unstructuredmodel""" +1042 9 loss """crossentropy""" +1042 9 regularizer """no""" +1042 9 optimizer """adam""" +1042 9 training_loop """lcwa""" +1042 9 evaluator """rankbased""" +1042 10 dataset """wn18rr""" +1042 10 model """unstructuredmodel""" +1042 10 loss """crossentropy""" +1042 10 regularizer """no""" +1042 10 optimizer """adam""" +1042 10 training_loop """lcwa""" +1042 10 evaluator """rankbased""" +1043 1 model.embedding_dim 0.0 +1043 1 model.scoring_fct_norm 1.0 +1043 1 optimizer.lr 0.0011235290438163644 +1043 1 training.batch_size 1.0 +1043 1 training.label_smoothing 0.04995644512495971 +1043 2 model.embedding_dim 0.0 +1043 2 model.scoring_fct_norm 2.0 +1043 2 optimizer.lr 0.003437701530823122 +1043 2 training.batch_size 0.0 +1043 2 training.label_smoothing 0.38731736843374454 +1043 3 model.embedding_dim 1.0 +1043 3 model.scoring_fct_norm 2.0 +1043 3 optimizer.lr 0.0022211692696307048 +1043 3 training.batch_size 1.0 +1043 3 training.label_smoothing 0.7575754248504948 +1043 4 model.embedding_dim 1.0 +1043 4 model.scoring_fct_norm 2.0 +1043 4 optimizer.lr 0.009467673254159347 +1043 4 training.batch_size 1.0 +1043 4 training.label_smoothing 0.31305575629937404 +1043 5 model.embedding_dim 0.0 +1043 5 model.scoring_fct_norm 1.0 +1043 5 optimizer.lr 0.08584347103969277 +1043 5 training.batch_size 0.0 +1043 5 training.label_smoothing 0.7611012955738367 +1043 6 model.embedding_dim 1.0 +1043 6 model.scoring_fct_norm 1.0 +1043 6 optimizer.lr 0.07116013554014504 +1043 6 training.batch_size 2.0 +1043 6 training.label_smoothing 0.009822555812136567 +1043 7 model.embedding_dim 0.0 +1043 7 model.scoring_fct_norm 1.0 +1043 7 optimizer.lr 0.09822694121537497 +1043 7 training.batch_size 2.0 +1043 7 training.label_smoothing 0.0020523746009668293 +1043 8 model.embedding_dim 1.0 +1043 8 model.scoring_fct_norm 2.0 +1043 8 optimizer.lr 0.01359891763148505 +1043 8 training.batch_size 2.0 +1043 8 training.label_smoothing 0.05635400338519808 +1043 9 model.embedding_dim 0.0 +1043 9 model.scoring_fct_norm 1.0 +1043 9 optimizer.lr 0.01768230459314133 +1043 9 training.batch_size 1.0 +1043 9 training.label_smoothing 0.07788212606986093 +1043 10 model.embedding_dim 2.0 +1043 10 model.scoring_fct_norm 1.0 +1043 10 optimizer.lr 0.06156630703422834 +1043 10 training.batch_size 2.0 +1043 10 training.label_smoothing 0.2012627875275361 +1043 11 model.embedding_dim 1.0 +1043 11 model.scoring_fct_norm 2.0 +1043 11 optimizer.lr 0.09593518039670959 +1043 11 training.batch_size 2.0 +1043 11 training.label_smoothing 0.09337405663337849 +1043 12 model.embedding_dim 0.0 +1043 12 model.scoring_fct_norm 1.0 +1043 12 optimizer.lr 0.028196394465382964 +1043 12 training.batch_size 0.0 +1043 12 training.label_smoothing 0.00932225016151858 +1043 13 model.embedding_dim 1.0 +1043 13 model.scoring_fct_norm 2.0 +1043 13 optimizer.lr 0.08806074503749829 +1043 13 training.batch_size 2.0 +1043 13 training.label_smoothing 0.572602014440911 +1043 14 model.embedding_dim 0.0 +1043 14 model.scoring_fct_norm 1.0 +1043 14 optimizer.lr 0.033760036623413016 +1043 14 training.batch_size 2.0 +1043 14 training.label_smoothing 0.08016366675167795 +1043 15 model.embedding_dim 2.0 +1043 15 model.scoring_fct_norm 2.0 +1043 15 optimizer.lr 0.005219801840306413 +1043 15 training.batch_size 1.0 +1043 15 training.label_smoothing 0.009562260624715472 +1043 16 model.embedding_dim 0.0 +1043 16 model.scoring_fct_norm 2.0 +1043 16 optimizer.lr 0.001517404244587651 +1043 16 training.batch_size 1.0 +1043 16 training.label_smoothing 0.0010242991387723749 +1043 17 model.embedding_dim 1.0 +1043 17 model.scoring_fct_norm 2.0 +1043 17 optimizer.lr 0.06756135980079807 +1043 17 training.batch_size 1.0 +1043 17 training.label_smoothing 0.6597223191211248 +1043 18 model.embedding_dim 0.0 +1043 18 model.scoring_fct_norm 2.0 +1043 18 optimizer.lr 0.005527563491886539 +1043 18 training.batch_size 0.0 +1043 18 training.label_smoothing 0.0016826634970742733 +1043 19 model.embedding_dim 2.0 +1043 19 model.scoring_fct_norm 2.0 +1043 19 optimizer.lr 0.025608301228930666 +1043 19 training.batch_size 2.0 +1043 19 training.label_smoothing 0.007622666150967021 +1043 1 dataset """wn18rr""" +1043 1 model """unstructuredmodel""" +1043 1 loss """crossentropy""" +1043 1 regularizer """no""" +1043 1 optimizer """adam""" +1043 1 training_loop """lcwa""" +1043 1 evaluator """rankbased""" +1043 2 dataset """wn18rr""" +1043 2 model """unstructuredmodel""" +1043 2 loss """crossentropy""" +1043 2 regularizer """no""" +1043 2 optimizer """adam""" +1043 2 training_loop """lcwa""" +1043 2 evaluator """rankbased""" +1043 3 dataset """wn18rr""" +1043 3 model """unstructuredmodel""" +1043 3 loss """crossentropy""" +1043 3 regularizer """no""" +1043 3 optimizer """adam""" +1043 3 training_loop """lcwa""" +1043 3 evaluator """rankbased""" +1043 4 dataset """wn18rr""" +1043 4 model """unstructuredmodel""" +1043 4 loss """crossentropy""" +1043 4 regularizer """no""" +1043 4 optimizer """adam""" +1043 4 training_loop """lcwa""" +1043 4 evaluator """rankbased""" +1043 5 dataset """wn18rr""" +1043 5 model """unstructuredmodel""" +1043 5 loss """crossentropy""" +1043 5 regularizer """no""" +1043 5 optimizer """adam""" +1043 5 training_loop """lcwa""" +1043 5 evaluator """rankbased""" +1043 6 dataset """wn18rr""" +1043 6 model """unstructuredmodel""" +1043 6 loss """crossentropy""" +1043 6 regularizer """no""" +1043 6 optimizer """adam""" +1043 6 training_loop """lcwa""" +1043 6 evaluator """rankbased""" +1043 7 dataset """wn18rr""" +1043 7 model """unstructuredmodel""" +1043 7 loss """crossentropy""" +1043 7 regularizer """no""" +1043 7 optimizer """adam""" +1043 7 training_loop """lcwa""" +1043 7 evaluator """rankbased""" +1043 8 dataset """wn18rr""" +1043 8 model """unstructuredmodel""" +1043 8 loss """crossentropy""" +1043 8 regularizer """no""" +1043 8 optimizer """adam""" +1043 8 training_loop """lcwa""" +1043 8 evaluator """rankbased""" +1043 9 dataset """wn18rr""" +1043 9 model """unstructuredmodel""" +1043 9 loss """crossentropy""" +1043 9 regularizer """no""" +1043 9 optimizer """adam""" +1043 9 training_loop """lcwa""" +1043 9 evaluator """rankbased""" +1043 10 dataset """wn18rr""" +1043 10 model """unstructuredmodel""" +1043 10 loss """crossentropy""" +1043 10 regularizer """no""" +1043 10 optimizer """adam""" +1043 10 training_loop """lcwa""" +1043 10 evaluator """rankbased""" +1043 11 dataset """wn18rr""" +1043 11 model """unstructuredmodel""" +1043 11 loss """crossentropy""" +1043 11 regularizer """no""" +1043 11 optimizer """adam""" +1043 11 training_loop """lcwa""" +1043 11 evaluator """rankbased""" +1043 12 dataset """wn18rr""" +1043 12 model """unstructuredmodel""" +1043 12 loss """crossentropy""" +1043 12 regularizer """no""" +1043 12 optimizer """adam""" +1043 12 training_loop """lcwa""" +1043 12 evaluator """rankbased""" +1043 13 dataset """wn18rr""" +1043 13 model """unstructuredmodel""" +1043 13 loss """crossentropy""" +1043 13 regularizer """no""" +1043 13 optimizer """adam""" +1043 13 training_loop """lcwa""" +1043 13 evaluator """rankbased""" +1043 14 dataset """wn18rr""" +1043 14 model """unstructuredmodel""" +1043 14 loss """crossentropy""" +1043 14 regularizer """no""" +1043 14 optimizer """adam""" +1043 14 training_loop """lcwa""" +1043 14 evaluator """rankbased""" +1043 15 dataset """wn18rr""" +1043 15 model """unstructuredmodel""" +1043 15 loss """crossentropy""" +1043 15 regularizer """no""" +1043 15 optimizer """adam""" +1043 15 training_loop """lcwa""" +1043 15 evaluator """rankbased""" +1043 16 dataset """wn18rr""" +1043 16 model """unstructuredmodel""" +1043 16 loss """crossentropy""" +1043 16 regularizer """no""" +1043 16 optimizer """adam""" +1043 16 training_loop """lcwa""" +1043 16 evaluator """rankbased""" +1043 17 dataset """wn18rr""" +1043 17 model """unstructuredmodel""" +1043 17 loss """crossentropy""" +1043 17 regularizer """no""" +1043 17 optimizer """adam""" +1043 17 training_loop """lcwa""" +1043 17 evaluator """rankbased""" +1043 18 dataset """wn18rr""" +1043 18 model """unstructuredmodel""" +1043 18 loss """crossentropy""" +1043 18 regularizer """no""" +1043 18 optimizer """adam""" +1043 18 training_loop """lcwa""" +1043 18 evaluator """rankbased""" +1043 19 dataset """wn18rr""" +1043 19 model """unstructuredmodel""" +1043 19 loss """crossentropy""" +1043 19 regularizer """no""" +1043 19 optimizer """adam""" +1043 19 training_loop """lcwa""" +1043 19 evaluator """rankbased""" +1044 1 model.embedding_dim 2.0 +1044 1 model.scoring_fct_norm 1.0 +1044 1 optimizer.lr 0.012943470224073573 +1044 1 training.batch_size 0.0 +1044 1 training.label_smoothing 0.0011409569279592407 +1044 2 model.embedding_dim 2.0 +1044 2 model.scoring_fct_norm 1.0 +1044 2 optimizer.lr 0.0018062003119241675 +1044 2 training.batch_size 0.0 +1044 2 training.label_smoothing 0.21653482150278247 +1044 3 model.embedding_dim 0.0 +1044 3 model.scoring_fct_norm 2.0 +1044 3 optimizer.lr 0.0024069523072522037 +1044 3 training.batch_size 2.0 +1044 3 training.label_smoothing 0.004889691310858438 +1044 4 model.embedding_dim 2.0 +1044 4 model.scoring_fct_norm 1.0 +1044 4 optimizer.lr 0.05289340832572961 +1044 4 training.batch_size 0.0 +1044 4 training.label_smoothing 0.006015860349462024 +1044 5 model.embedding_dim 0.0 +1044 5 model.scoring_fct_norm 1.0 +1044 5 optimizer.lr 0.0017553818188277813 +1044 5 training.batch_size 1.0 +1044 5 training.label_smoothing 0.19054858209941308 +1044 6 model.embedding_dim 1.0 +1044 6 model.scoring_fct_norm 2.0 +1044 6 optimizer.lr 0.0031605672968648916 +1044 6 training.batch_size 1.0 +1044 6 training.label_smoothing 0.0018854172778200653 +1044 7 model.embedding_dim 0.0 +1044 7 model.scoring_fct_norm 1.0 +1044 7 optimizer.lr 0.02083986780560408 +1044 7 training.batch_size 1.0 +1044 7 training.label_smoothing 0.003254024466260543 +1044 8 model.embedding_dim 0.0 +1044 8 model.scoring_fct_norm 1.0 +1044 8 optimizer.lr 0.0020301490404278583 +1044 8 training.batch_size 0.0 +1044 8 training.label_smoothing 0.001118416360100519 +1044 9 model.embedding_dim 1.0 +1044 9 model.scoring_fct_norm 1.0 +1044 9 optimizer.lr 0.05200276976027116 +1044 9 training.batch_size 2.0 +1044 9 training.label_smoothing 0.2125504856077943 +1044 10 model.embedding_dim 1.0 +1044 10 model.scoring_fct_norm 2.0 +1044 10 optimizer.lr 0.0063287340062622875 +1044 10 training.batch_size 2.0 +1044 10 training.label_smoothing 0.1458898860511936 +1044 11 model.embedding_dim 1.0 +1044 11 model.scoring_fct_norm 1.0 +1044 11 optimizer.lr 0.002797815810678245 +1044 11 training.batch_size 1.0 +1044 11 training.label_smoothing 0.031456099517966074 +1044 1 dataset """wn18rr""" +1044 1 model """unstructuredmodel""" +1044 1 loss """softplus""" +1044 1 regularizer """no""" +1044 1 optimizer """adam""" +1044 1 training_loop """lcwa""" +1044 1 evaluator """rankbased""" +1044 2 dataset """wn18rr""" +1044 2 model """unstructuredmodel""" +1044 2 loss """softplus""" +1044 2 regularizer """no""" +1044 2 optimizer """adam""" +1044 2 training_loop """lcwa""" +1044 2 evaluator """rankbased""" +1044 3 dataset """wn18rr""" +1044 3 model """unstructuredmodel""" +1044 3 loss """softplus""" +1044 3 regularizer """no""" +1044 3 optimizer """adam""" +1044 3 training_loop """lcwa""" +1044 3 evaluator """rankbased""" +1044 4 dataset """wn18rr""" +1044 4 model """unstructuredmodel""" +1044 4 loss """softplus""" +1044 4 regularizer """no""" +1044 4 optimizer """adam""" +1044 4 training_loop """lcwa""" +1044 4 evaluator """rankbased""" +1044 5 dataset """wn18rr""" +1044 5 model """unstructuredmodel""" +1044 5 loss """softplus""" +1044 5 regularizer """no""" +1044 5 optimizer """adam""" +1044 5 training_loop """lcwa""" +1044 5 evaluator """rankbased""" +1044 6 dataset """wn18rr""" +1044 6 model """unstructuredmodel""" +1044 6 loss """softplus""" +1044 6 regularizer """no""" +1044 6 optimizer """adam""" +1044 6 training_loop """lcwa""" +1044 6 evaluator """rankbased""" +1044 7 dataset """wn18rr""" +1044 7 model """unstructuredmodel""" +1044 7 loss """softplus""" +1044 7 regularizer """no""" +1044 7 optimizer """adam""" +1044 7 training_loop """lcwa""" +1044 7 evaluator """rankbased""" +1044 8 dataset """wn18rr""" +1044 8 model """unstructuredmodel""" +1044 8 loss """softplus""" +1044 8 regularizer """no""" +1044 8 optimizer """adam""" +1044 8 training_loop """lcwa""" +1044 8 evaluator """rankbased""" +1044 9 dataset """wn18rr""" +1044 9 model """unstructuredmodel""" +1044 9 loss """softplus""" +1044 9 regularizer """no""" +1044 9 optimizer """adam""" +1044 9 training_loop """lcwa""" +1044 9 evaluator """rankbased""" +1044 10 dataset """wn18rr""" +1044 10 model """unstructuredmodel""" +1044 10 loss """softplus""" +1044 10 regularizer """no""" +1044 10 optimizer """adam""" +1044 10 training_loop """lcwa""" +1044 10 evaluator """rankbased""" +1044 11 dataset """wn18rr""" +1044 11 model """unstructuredmodel""" +1044 11 loss """softplus""" +1044 11 regularizer """no""" +1044 11 optimizer """adam""" +1044 11 training_loop """lcwa""" +1044 11 evaluator """rankbased""" +1045 1 model.embedding_dim 2.0 +1045 1 model.scoring_fct_norm 2.0 +1045 1 optimizer.lr 0.0019388417557766474 +1045 1 training.batch_size 0.0 +1045 1 training.label_smoothing 0.31915002520550506 +1045 2 model.embedding_dim 0.0 +1045 2 model.scoring_fct_norm 2.0 +1045 2 optimizer.lr 0.0024087468953417806 +1045 2 training.batch_size 2.0 +1045 2 training.label_smoothing 0.013406321357544998 +1045 3 model.embedding_dim 0.0 +1045 3 model.scoring_fct_norm 2.0 +1045 3 optimizer.lr 0.02040720157619847 +1045 3 training.batch_size 0.0 +1045 3 training.label_smoothing 0.0047394143402451246 +1045 4 model.embedding_dim 1.0 +1045 4 model.scoring_fct_norm 1.0 +1045 4 optimizer.lr 0.0016504554262992677 +1045 4 training.batch_size 2.0 +1045 4 training.label_smoothing 0.04805124516703569 +1045 5 model.embedding_dim 1.0 +1045 5 model.scoring_fct_norm 1.0 +1045 5 optimizer.lr 0.0011151203404266724 +1045 5 training.batch_size 2.0 +1045 5 training.label_smoothing 0.0015531106825055867 +1045 6 model.embedding_dim 1.0 +1045 6 model.scoring_fct_norm 2.0 +1045 6 optimizer.lr 0.007313463636084285 +1045 6 training.batch_size 2.0 +1045 6 training.label_smoothing 0.01071194458238773 +1045 7 model.embedding_dim 2.0 +1045 7 model.scoring_fct_norm 2.0 +1045 7 optimizer.lr 0.011457174432749736 +1045 7 training.batch_size 1.0 +1045 7 training.label_smoothing 0.0038224425857330763 +1045 8 model.embedding_dim 1.0 +1045 8 model.scoring_fct_norm 2.0 +1045 8 optimizer.lr 0.054382625113570955 +1045 8 training.batch_size 1.0 +1045 8 training.label_smoothing 0.029096198456250114 +1045 9 model.embedding_dim 2.0 +1045 9 model.scoring_fct_norm 2.0 +1045 9 optimizer.lr 0.0031967581877869957 +1045 9 training.batch_size 1.0 +1045 9 training.label_smoothing 0.1289602026718098 +1045 10 model.embedding_dim 1.0 +1045 10 model.scoring_fct_norm 1.0 +1045 10 optimizer.lr 0.019706843076418623 +1045 10 training.batch_size 0.0 +1045 10 training.label_smoothing 0.469553486962106 +1045 11 model.embedding_dim 2.0 +1045 11 model.scoring_fct_norm 2.0 +1045 11 optimizer.lr 0.007403726221883584 +1045 11 training.batch_size 1.0 +1045 11 training.label_smoothing 0.011536607534137868 +1045 12 model.embedding_dim 1.0 +1045 12 model.scoring_fct_norm 2.0 +1045 12 optimizer.lr 0.023800950688398666 +1045 12 training.batch_size 2.0 +1045 12 training.label_smoothing 0.24771849067879073 +1045 13 model.embedding_dim 1.0 +1045 13 model.scoring_fct_norm 1.0 +1045 13 optimizer.lr 0.033767018275786514 +1045 13 training.batch_size 0.0 +1045 13 training.label_smoothing 0.28495891400291934 +1045 14 model.embedding_dim 0.0 +1045 14 model.scoring_fct_norm 1.0 +1045 14 optimizer.lr 0.0025199665520217666 +1045 14 training.batch_size 2.0 +1045 14 training.label_smoothing 0.09795563206687594 +1045 15 model.embedding_dim 2.0 +1045 15 model.scoring_fct_norm 1.0 +1045 15 optimizer.lr 0.02435358233509711 +1045 15 training.batch_size 2.0 +1045 15 training.label_smoothing 0.3160089259165939 +1045 16 model.embedding_dim 0.0 +1045 16 model.scoring_fct_norm 2.0 +1045 16 optimizer.lr 0.0020935230996704133 +1045 16 training.batch_size 0.0 +1045 16 training.label_smoothing 0.04248629694616089 +1045 17 model.embedding_dim 0.0 +1045 17 model.scoring_fct_norm 2.0 +1045 17 optimizer.lr 0.002041134220289752 +1045 17 training.batch_size 0.0 +1045 17 training.label_smoothing 0.001798537875549853 +1045 18 model.embedding_dim 1.0 +1045 18 model.scoring_fct_norm 2.0 +1045 18 optimizer.lr 0.011229142732120773 +1045 18 training.batch_size 1.0 +1045 18 training.label_smoothing 0.10622733337520898 +1045 19 model.embedding_dim 1.0 +1045 19 model.scoring_fct_norm 2.0 +1045 19 optimizer.lr 0.07988563851561654 +1045 19 training.batch_size 2.0 +1045 19 training.label_smoothing 0.0019974875033134036 +1045 20 model.embedding_dim 0.0 +1045 20 model.scoring_fct_norm 2.0 +1045 20 optimizer.lr 0.013211548305959943 +1045 20 training.batch_size 1.0 +1045 20 training.label_smoothing 0.01066956656651771 +1045 21 model.embedding_dim 2.0 +1045 21 model.scoring_fct_norm 2.0 +1045 21 optimizer.lr 0.010529266104220123 +1045 21 training.batch_size 0.0 +1045 21 training.label_smoothing 0.1785949917831498 +1045 22 model.embedding_dim 2.0 +1045 22 model.scoring_fct_norm 1.0 +1045 22 optimizer.lr 0.0057402931093904815 +1045 22 training.batch_size 1.0 +1045 22 training.label_smoothing 0.013545244727245428 +1045 23 model.embedding_dim 0.0 +1045 23 model.scoring_fct_norm 2.0 +1045 23 optimizer.lr 0.00825715739123714 +1045 23 training.batch_size 0.0 +1045 23 training.label_smoothing 0.0053535636139631915 +1045 24 model.embedding_dim 0.0 +1045 24 model.scoring_fct_norm 2.0 +1045 24 optimizer.lr 0.0043264451896345155 +1045 24 training.batch_size 1.0 +1045 24 training.label_smoothing 0.15027130802879762 +1045 1 dataset """wn18rr""" +1045 1 model """unstructuredmodel""" +1045 1 loss """softplus""" +1045 1 regularizer """no""" +1045 1 optimizer """adam""" +1045 1 training_loop """lcwa""" +1045 1 evaluator """rankbased""" +1045 2 dataset """wn18rr""" +1045 2 model """unstructuredmodel""" +1045 2 loss """softplus""" +1045 2 regularizer """no""" +1045 2 optimizer """adam""" +1045 2 training_loop """lcwa""" +1045 2 evaluator """rankbased""" +1045 3 dataset """wn18rr""" +1045 3 model """unstructuredmodel""" +1045 3 loss """softplus""" +1045 3 regularizer """no""" +1045 3 optimizer """adam""" +1045 3 training_loop """lcwa""" +1045 3 evaluator """rankbased""" +1045 4 dataset """wn18rr""" +1045 4 model """unstructuredmodel""" +1045 4 loss """softplus""" +1045 4 regularizer """no""" +1045 4 optimizer """adam""" +1045 4 training_loop """lcwa""" +1045 4 evaluator """rankbased""" +1045 5 dataset """wn18rr""" +1045 5 model """unstructuredmodel""" +1045 5 loss """softplus""" +1045 5 regularizer """no""" +1045 5 optimizer """adam""" +1045 5 training_loop """lcwa""" +1045 5 evaluator """rankbased""" +1045 6 dataset """wn18rr""" +1045 6 model """unstructuredmodel""" +1045 6 loss """softplus""" +1045 6 regularizer """no""" +1045 6 optimizer """adam""" +1045 6 training_loop """lcwa""" +1045 6 evaluator """rankbased""" +1045 7 dataset """wn18rr""" +1045 7 model """unstructuredmodel""" +1045 7 loss """softplus""" +1045 7 regularizer """no""" +1045 7 optimizer """adam""" +1045 7 training_loop """lcwa""" +1045 7 evaluator """rankbased""" +1045 8 dataset """wn18rr""" +1045 8 model """unstructuredmodel""" +1045 8 loss """softplus""" +1045 8 regularizer """no""" +1045 8 optimizer """adam""" +1045 8 training_loop """lcwa""" +1045 8 evaluator """rankbased""" +1045 9 dataset """wn18rr""" +1045 9 model """unstructuredmodel""" +1045 9 loss """softplus""" +1045 9 regularizer """no""" +1045 9 optimizer """adam""" +1045 9 training_loop """lcwa""" +1045 9 evaluator """rankbased""" +1045 10 dataset """wn18rr""" +1045 10 model """unstructuredmodel""" +1045 10 loss """softplus""" +1045 10 regularizer """no""" +1045 10 optimizer """adam""" +1045 10 training_loop """lcwa""" +1045 10 evaluator """rankbased""" +1045 11 dataset """wn18rr""" +1045 11 model """unstructuredmodel""" +1045 11 loss """softplus""" +1045 11 regularizer """no""" +1045 11 optimizer """adam""" +1045 11 training_loop """lcwa""" +1045 11 evaluator """rankbased""" +1045 12 dataset """wn18rr""" +1045 12 model """unstructuredmodel""" +1045 12 loss """softplus""" +1045 12 regularizer """no""" +1045 12 optimizer """adam""" +1045 12 training_loop """lcwa""" +1045 12 evaluator """rankbased""" +1045 13 dataset """wn18rr""" +1045 13 model """unstructuredmodel""" +1045 13 loss """softplus""" +1045 13 regularizer """no""" +1045 13 optimizer """adam""" +1045 13 training_loop """lcwa""" +1045 13 evaluator """rankbased""" +1045 14 dataset """wn18rr""" +1045 14 model """unstructuredmodel""" +1045 14 loss """softplus""" +1045 14 regularizer """no""" +1045 14 optimizer """adam""" +1045 14 training_loop """lcwa""" +1045 14 evaluator """rankbased""" +1045 15 dataset """wn18rr""" +1045 15 model """unstructuredmodel""" +1045 15 loss """softplus""" +1045 15 regularizer """no""" +1045 15 optimizer """adam""" +1045 15 training_loop """lcwa""" +1045 15 evaluator """rankbased""" +1045 16 dataset """wn18rr""" +1045 16 model """unstructuredmodel""" +1045 16 loss """softplus""" +1045 16 regularizer """no""" +1045 16 optimizer """adam""" +1045 16 training_loop """lcwa""" +1045 16 evaluator """rankbased""" +1045 17 dataset """wn18rr""" +1045 17 model """unstructuredmodel""" +1045 17 loss """softplus""" +1045 17 regularizer """no""" +1045 17 optimizer """adam""" +1045 17 training_loop """lcwa""" +1045 17 evaluator """rankbased""" +1045 18 dataset """wn18rr""" +1045 18 model """unstructuredmodel""" +1045 18 loss """softplus""" +1045 18 regularizer """no""" +1045 18 optimizer """adam""" +1045 18 training_loop """lcwa""" +1045 18 evaluator """rankbased""" +1045 19 dataset """wn18rr""" +1045 19 model """unstructuredmodel""" +1045 19 loss """softplus""" +1045 19 regularizer """no""" +1045 19 optimizer """adam""" +1045 19 training_loop """lcwa""" +1045 19 evaluator """rankbased""" +1045 20 dataset """wn18rr""" +1045 20 model """unstructuredmodel""" +1045 20 loss """softplus""" +1045 20 regularizer """no""" +1045 20 optimizer """adam""" +1045 20 training_loop """lcwa""" +1045 20 evaluator """rankbased""" +1045 21 dataset """wn18rr""" +1045 21 model """unstructuredmodel""" +1045 21 loss """softplus""" +1045 21 regularizer """no""" +1045 21 optimizer """adam""" +1045 21 training_loop """lcwa""" +1045 21 evaluator """rankbased""" +1045 22 dataset """wn18rr""" +1045 22 model """unstructuredmodel""" +1045 22 loss """softplus""" +1045 22 regularizer """no""" +1045 22 optimizer """adam""" +1045 22 training_loop """lcwa""" +1045 22 evaluator """rankbased""" +1045 23 dataset """wn18rr""" +1045 23 model """unstructuredmodel""" +1045 23 loss """softplus""" +1045 23 regularizer """no""" +1045 23 optimizer """adam""" +1045 23 training_loop """lcwa""" +1045 23 evaluator """rankbased""" +1045 24 dataset """wn18rr""" +1045 24 model """unstructuredmodel""" +1045 24 loss """softplus""" +1045 24 regularizer """no""" +1045 24 optimizer """adam""" +1045 24 training_loop """lcwa""" +1045 24 evaluator """rankbased""" +1046 1 model.embedding_dim 1.0 +1046 1 model.scoring_fct_norm 1.0 +1046 1 optimizer.lr 0.008008640683284838 +1046 1 training.batch_size 0.0 +1046 1 training.label_smoothing 0.011768630350156769 +1046 2 model.embedding_dim 1.0 +1046 2 model.scoring_fct_norm 1.0 +1046 2 optimizer.lr 0.005050553566624761 +1046 2 training.batch_size 1.0 +1046 2 training.label_smoothing 0.13010528379942699 +1046 3 model.embedding_dim 0.0 +1046 3 model.scoring_fct_norm 1.0 +1046 3 optimizer.lr 0.0037962398618561315 +1046 3 training.batch_size 0.0 +1046 3 training.label_smoothing 0.0016772971260159222 +1046 4 model.embedding_dim 0.0 +1046 4 model.scoring_fct_norm 1.0 +1046 4 optimizer.lr 0.0016825177870422757 +1046 4 training.batch_size 1.0 +1046 4 training.label_smoothing 0.011204662321278092 +1046 5 model.embedding_dim 0.0 +1046 5 model.scoring_fct_norm 2.0 +1046 5 optimizer.lr 0.0010550365386847863 +1046 5 training.batch_size 2.0 +1046 5 training.label_smoothing 0.0010669782309587672 +1046 6 model.embedding_dim 1.0 +1046 6 model.scoring_fct_norm 1.0 +1046 6 optimizer.lr 0.0266928276570262 +1046 6 training.batch_size 2.0 +1046 6 training.label_smoothing 0.003958037645653355 +1046 7 model.embedding_dim 1.0 +1046 7 model.scoring_fct_norm 2.0 +1046 7 optimizer.lr 0.052661774962270265 +1046 7 training.batch_size 2.0 +1046 7 training.label_smoothing 0.020301706249591437 +1046 8 model.embedding_dim 0.0 +1046 8 model.scoring_fct_norm 2.0 +1046 8 optimizer.lr 0.0037563284263231454 +1046 8 training.batch_size 2.0 +1046 8 training.label_smoothing 0.004446151929752386 +1046 9 model.embedding_dim 0.0 +1046 9 model.scoring_fct_norm 1.0 +1046 9 optimizer.lr 0.047089830217987814 +1046 9 training.batch_size 1.0 +1046 9 training.label_smoothing 0.04066219824990153 +1046 10 model.embedding_dim 2.0 +1046 10 model.scoring_fct_norm 2.0 +1046 10 optimizer.lr 0.04118061796949609 +1046 10 training.batch_size 0.0 +1046 10 training.label_smoothing 0.002959427092890259 +1046 11 model.embedding_dim 0.0 +1046 11 model.scoring_fct_norm 1.0 +1046 11 optimizer.lr 0.02088889023179728 +1046 11 training.batch_size 1.0 +1046 11 training.label_smoothing 0.8859817509160072 +1046 12 model.embedding_dim 0.0 +1046 12 model.scoring_fct_norm 2.0 +1046 12 optimizer.lr 0.0034191249668582744 +1046 12 training.batch_size 2.0 +1046 12 training.label_smoothing 0.0328745449775833 +1046 13 model.embedding_dim 1.0 +1046 13 model.scoring_fct_norm 1.0 +1046 13 optimizer.lr 0.02980210861030595 +1046 13 training.batch_size 2.0 +1046 13 training.label_smoothing 0.005959431263050101 +1046 14 model.embedding_dim 1.0 +1046 14 model.scoring_fct_norm 1.0 +1046 14 optimizer.lr 0.004452152813991384 +1046 14 training.batch_size 2.0 +1046 14 training.label_smoothing 0.026138471414857704 +1046 15 model.embedding_dim 1.0 +1046 15 model.scoring_fct_norm 2.0 +1046 15 optimizer.lr 0.028414912900337902 +1046 15 training.batch_size 2.0 +1046 15 training.label_smoothing 0.002797448139894867 +1046 16 model.embedding_dim 2.0 +1046 16 model.scoring_fct_norm 2.0 +1046 16 optimizer.lr 0.003528902751749043 +1046 16 training.batch_size 1.0 +1046 16 training.label_smoothing 0.06773521143125487 +1046 17 model.embedding_dim 1.0 +1046 17 model.scoring_fct_norm 2.0 +1046 17 optimizer.lr 0.09668241826894744 +1046 17 training.batch_size 2.0 +1046 17 training.label_smoothing 0.5657805136003488 +1046 1 dataset """wn18rr""" +1046 1 model """unstructuredmodel""" +1046 1 loss """bceaftersigmoid""" +1046 1 regularizer """no""" +1046 1 optimizer """adam""" +1046 1 training_loop """lcwa""" +1046 1 evaluator """rankbased""" +1046 2 dataset """wn18rr""" +1046 2 model """unstructuredmodel""" +1046 2 loss """bceaftersigmoid""" +1046 2 regularizer """no""" +1046 2 optimizer """adam""" +1046 2 training_loop """lcwa""" +1046 2 evaluator """rankbased""" +1046 3 dataset """wn18rr""" +1046 3 model """unstructuredmodel""" +1046 3 loss """bceaftersigmoid""" +1046 3 regularizer """no""" +1046 3 optimizer """adam""" +1046 3 training_loop """lcwa""" +1046 3 evaluator """rankbased""" +1046 4 dataset """wn18rr""" +1046 4 model """unstructuredmodel""" +1046 4 loss """bceaftersigmoid""" +1046 4 regularizer """no""" +1046 4 optimizer """adam""" +1046 4 training_loop """lcwa""" +1046 4 evaluator """rankbased""" +1046 5 dataset """wn18rr""" +1046 5 model """unstructuredmodel""" +1046 5 loss """bceaftersigmoid""" +1046 5 regularizer """no""" +1046 5 optimizer """adam""" +1046 5 training_loop """lcwa""" +1046 5 evaluator """rankbased""" +1046 6 dataset """wn18rr""" +1046 6 model """unstructuredmodel""" +1046 6 loss """bceaftersigmoid""" +1046 6 regularizer """no""" +1046 6 optimizer """adam""" +1046 6 training_loop """lcwa""" +1046 6 evaluator """rankbased""" +1046 7 dataset """wn18rr""" +1046 7 model """unstructuredmodel""" +1046 7 loss """bceaftersigmoid""" +1046 7 regularizer """no""" +1046 7 optimizer """adam""" +1046 7 training_loop """lcwa""" +1046 7 evaluator """rankbased""" +1046 8 dataset """wn18rr""" +1046 8 model """unstructuredmodel""" +1046 8 loss """bceaftersigmoid""" +1046 8 regularizer """no""" +1046 8 optimizer """adam""" +1046 8 training_loop """lcwa""" +1046 8 evaluator """rankbased""" +1046 9 dataset """wn18rr""" +1046 9 model """unstructuredmodel""" +1046 9 loss """bceaftersigmoid""" +1046 9 regularizer """no""" +1046 9 optimizer """adam""" +1046 9 training_loop """lcwa""" +1046 9 evaluator """rankbased""" +1046 10 dataset """wn18rr""" +1046 10 model """unstructuredmodel""" +1046 10 loss """bceaftersigmoid""" +1046 10 regularizer """no""" +1046 10 optimizer """adam""" +1046 10 training_loop """lcwa""" +1046 10 evaluator """rankbased""" +1046 11 dataset """wn18rr""" +1046 11 model """unstructuredmodel""" +1046 11 loss """bceaftersigmoid""" +1046 11 regularizer """no""" +1046 11 optimizer """adam""" +1046 11 training_loop """lcwa""" +1046 11 evaluator """rankbased""" +1046 12 dataset """wn18rr""" +1046 12 model """unstructuredmodel""" +1046 12 loss """bceaftersigmoid""" +1046 12 regularizer """no""" +1046 12 optimizer """adam""" +1046 12 training_loop """lcwa""" +1046 12 evaluator """rankbased""" +1046 13 dataset """wn18rr""" +1046 13 model """unstructuredmodel""" +1046 13 loss """bceaftersigmoid""" +1046 13 regularizer """no""" +1046 13 optimizer """adam""" +1046 13 training_loop """lcwa""" +1046 13 evaluator """rankbased""" +1046 14 dataset """wn18rr""" +1046 14 model """unstructuredmodel""" +1046 14 loss """bceaftersigmoid""" +1046 14 regularizer """no""" +1046 14 optimizer """adam""" +1046 14 training_loop """lcwa""" +1046 14 evaluator """rankbased""" +1046 15 dataset """wn18rr""" +1046 15 model """unstructuredmodel""" +1046 15 loss """bceaftersigmoid""" +1046 15 regularizer """no""" +1046 15 optimizer """adam""" +1046 15 training_loop """lcwa""" +1046 15 evaluator """rankbased""" +1046 16 dataset """wn18rr""" +1046 16 model """unstructuredmodel""" +1046 16 loss """bceaftersigmoid""" +1046 16 regularizer """no""" +1046 16 optimizer """adam""" +1046 16 training_loop """lcwa""" +1046 16 evaluator """rankbased""" +1046 17 dataset """wn18rr""" +1046 17 model """unstructuredmodel""" +1046 17 loss """bceaftersigmoid""" +1046 17 regularizer """no""" +1046 17 optimizer """adam""" +1046 17 training_loop """lcwa""" +1046 17 evaluator """rankbased""" +1047 1 model.embedding_dim 0.0 +1047 1 model.scoring_fct_norm 1.0 +1047 1 optimizer.lr 0.0020543127901716473 +1047 1 training.batch_size 0.0 +1047 1 training.label_smoothing 0.11426727987268638 +1047 2 model.embedding_dim 1.0 +1047 2 model.scoring_fct_norm 1.0 +1047 2 optimizer.lr 0.003547056513394591 +1047 2 training.batch_size 1.0 +1047 2 training.label_smoothing 0.25963893421591944 +1047 3 model.embedding_dim 1.0 +1047 3 model.scoring_fct_norm 1.0 +1047 3 optimizer.lr 0.06081634934231114 +1047 3 training.batch_size 2.0 +1047 3 training.label_smoothing 0.24124188326715576 +1047 4 model.embedding_dim 0.0 +1047 4 model.scoring_fct_norm 2.0 +1047 4 optimizer.lr 0.00533523615871343 +1047 4 training.batch_size 1.0 +1047 4 training.label_smoothing 0.019849220708344145 +1047 5 model.embedding_dim 0.0 +1047 5 model.scoring_fct_norm 1.0 +1047 5 optimizer.lr 0.021460925434992447 +1047 5 training.batch_size 1.0 +1047 5 training.label_smoothing 0.005392355214017285 +1047 6 model.embedding_dim 0.0 +1047 6 model.scoring_fct_norm 1.0 +1047 6 optimizer.lr 0.0027558677627117093 +1047 6 training.batch_size 2.0 +1047 6 training.label_smoothing 0.02007412304981378 +1047 7 model.embedding_dim 1.0 +1047 7 model.scoring_fct_norm 2.0 +1047 7 optimizer.lr 0.005345418421298351 +1047 7 training.batch_size 1.0 +1047 7 training.label_smoothing 0.03159675945538726 +1047 8 model.embedding_dim 1.0 +1047 8 model.scoring_fct_norm 2.0 +1047 8 optimizer.lr 0.001047023193577762 +1047 8 training.batch_size 0.0 +1047 8 training.label_smoothing 0.001907936052627165 +1047 9 model.embedding_dim 2.0 +1047 9 model.scoring_fct_norm 1.0 +1047 9 optimizer.lr 0.03497689306366936 +1047 9 training.batch_size 0.0 +1047 9 training.label_smoothing 0.0011528333247339065 +1047 10 model.embedding_dim 2.0 +1047 10 model.scoring_fct_norm 2.0 +1047 10 optimizer.lr 0.002481500695030386 +1047 10 training.batch_size 2.0 +1047 10 training.label_smoothing 0.39539381580558547 +1047 11 model.embedding_dim 1.0 +1047 11 model.scoring_fct_norm 2.0 +1047 11 optimizer.lr 0.07717631595851976 +1047 11 training.batch_size 2.0 +1047 11 training.label_smoothing 0.10033178833604788 +1047 12 model.embedding_dim 1.0 +1047 12 model.scoring_fct_norm 2.0 +1047 12 optimizer.lr 0.015848106951390106 +1047 12 training.batch_size 1.0 +1047 12 training.label_smoothing 0.7877235426143814 +1047 13 model.embedding_dim 1.0 +1047 13 model.scoring_fct_norm 1.0 +1047 13 optimizer.lr 0.08455809464464063 +1047 13 training.batch_size 1.0 +1047 13 training.label_smoothing 0.28691053383822435 +1047 14 model.embedding_dim 1.0 +1047 14 model.scoring_fct_norm 1.0 +1047 14 optimizer.lr 0.005419185224339364 +1047 14 training.batch_size 1.0 +1047 14 training.label_smoothing 0.005434251635616444 +1047 15 model.embedding_dim 1.0 +1047 15 model.scoring_fct_norm 2.0 +1047 15 optimizer.lr 0.01990202654154851 +1047 15 training.batch_size 2.0 +1047 15 training.label_smoothing 0.011878271823541757 +1047 16 model.embedding_dim 0.0 +1047 16 model.scoring_fct_norm 2.0 +1047 16 optimizer.lr 0.0026773673217484377 +1047 16 training.batch_size 1.0 +1047 16 training.label_smoothing 0.02834904457216349 +1047 17 model.embedding_dim 0.0 +1047 17 model.scoring_fct_norm 2.0 +1047 17 optimizer.lr 0.012584115531983244 +1047 17 training.batch_size 1.0 +1047 17 training.label_smoothing 0.017592023108903412 +1047 18 model.embedding_dim 0.0 +1047 18 model.scoring_fct_norm 2.0 +1047 18 optimizer.lr 0.0703589855314801 +1047 18 training.batch_size 0.0 +1047 18 training.label_smoothing 0.40153038123475 +1047 19 model.embedding_dim 0.0 +1047 19 model.scoring_fct_norm 1.0 +1047 19 optimizer.lr 0.005340409410933988 +1047 19 training.batch_size 2.0 +1047 19 training.label_smoothing 0.11103990338704592 +1047 20 model.embedding_dim 2.0 +1047 20 model.scoring_fct_norm 2.0 +1047 20 optimizer.lr 0.001063517796626406 +1047 20 training.batch_size 1.0 +1047 20 training.label_smoothing 0.006094357818989733 +1047 21 model.embedding_dim 2.0 +1047 21 model.scoring_fct_norm 2.0 +1047 21 optimizer.lr 0.008363502681255491 +1047 21 training.batch_size 0.0 +1047 21 training.label_smoothing 0.043539589225688284 +1047 22 model.embedding_dim 0.0 +1047 22 model.scoring_fct_norm 1.0 +1047 22 optimizer.lr 0.052637619793102906 +1047 22 training.batch_size 2.0 +1047 22 training.label_smoothing 0.11818469973243703 +1047 23 model.embedding_dim 2.0 +1047 23 model.scoring_fct_norm 1.0 +1047 23 optimizer.lr 0.022357364070479006 +1047 23 training.batch_size 2.0 +1047 23 training.label_smoothing 0.08753443781226763 +1047 24 model.embedding_dim 0.0 +1047 24 model.scoring_fct_norm 1.0 +1047 24 optimizer.lr 0.02372954798739383 +1047 24 training.batch_size 2.0 +1047 24 training.label_smoothing 0.00731176461294837 +1047 25 model.embedding_dim 2.0 +1047 25 model.scoring_fct_norm 1.0 +1047 25 optimizer.lr 0.0019013277493017413 +1047 25 training.batch_size 2.0 +1047 25 training.label_smoothing 0.5802478394909187 +1047 1 dataset """wn18rr""" +1047 1 model """unstructuredmodel""" +1047 1 loss """bceaftersigmoid""" +1047 1 regularizer """no""" +1047 1 optimizer """adam""" +1047 1 training_loop """lcwa""" +1047 1 evaluator """rankbased""" +1047 2 dataset """wn18rr""" +1047 2 model """unstructuredmodel""" +1047 2 loss """bceaftersigmoid""" +1047 2 regularizer """no""" +1047 2 optimizer """adam""" +1047 2 training_loop """lcwa""" +1047 2 evaluator """rankbased""" +1047 3 dataset """wn18rr""" +1047 3 model """unstructuredmodel""" +1047 3 loss """bceaftersigmoid""" +1047 3 regularizer """no""" +1047 3 optimizer """adam""" +1047 3 training_loop """lcwa""" +1047 3 evaluator """rankbased""" +1047 4 dataset """wn18rr""" +1047 4 model """unstructuredmodel""" +1047 4 loss """bceaftersigmoid""" +1047 4 regularizer """no""" +1047 4 optimizer """adam""" +1047 4 training_loop """lcwa""" +1047 4 evaluator """rankbased""" +1047 5 dataset """wn18rr""" +1047 5 model """unstructuredmodel""" +1047 5 loss """bceaftersigmoid""" +1047 5 regularizer """no""" +1047 5 optimizer """adam""" +1047 5 training_loop """lcwa""" +1047 5 evaluator """rankbased""" +1047 6 dataset """wn18rr""" +1047 6 model """unstructuredmodel""" +1047 6 loss """bceaftersigmoid""" +1047 6 regularizer """no""" +1047 6 optimizer """adam""" +1047 6 training_loop """lcwa""" +1047 6 evaluator """rankbased""" +1047 7 dataset """wn18rr""" +1047 7 model """unstructuredmodel""" +1047 7 loss """bceaftersigmoid""" +1047 7 regularizer """no""" +1047 7 optimizer """adam""" +1047 7 training_loop """lcwa""" +1047 7 evaluator """rankbased""" +1047 8 dataset """wn18rr""" +1047 8 model """unstructuredmodel""" +1047 8 loss """bceaftersigmoid""" +1047 8 regularizer """no""" +1047 8 optimizer """adam""" +1047 8 training_loop """lcwa""" +1047 8 evaluator """rankbased""" +1047 9 dataset """wn18rr""" +1047 9 model """unstructuredmodel""" +1047 9 loss """bceaftersigmoid""" +1047 9 regularizer """no""" +1047 9 optimizer """adam""" +1047 9 training_loop """lcwa""" +1047 9 evaluator """rankbased""" +1047 10 dataset """wn18rr""" +1047 10 model """unstructuredmodel""" +1047 10 loss """bceaftersigmoid""" +1047 10 regularizer """no""" +1047 10 optimizer """adam""" +1047 10 training_loop """lcwa""" +1047 10 evaluator """rankbased""" +1047 11 dataset """wn18rr""" +1047 11 model """unstructuredmodel""" +1047 11 loss """bceaftersigmoid""" +1047 11 regularizer """no""" +1047 11 optimizer """adam""" +1047 11 training_loop """lcwa""" +1047 11 evaluator """rankbased""" +1047 12 dataset """wn18rr""" +1047 12 model """unstructuredmodel""" +1047 12 loss """bceaftersigmoid""" +1047 12 regularizer """no""" +1047 12 optimizer """adam""" +1047 12 training_loop """lcwa""" +1047 12 evaluator """rankbased""" +1047 13 dataset """wn18rr""" +1047 13 model """unstructuredmodel""" +1047 13 loss """bceaftersigmoid""" +1047 13 regularizer """no""" +1047 13 optimizer """adam""" +1047 13 training_loop """lcwa""" +1047 13 evaluator """rankbased""" +1047 14 dataset """wn18rr""" +1047 14 model """unstructuredmodel""" +1047 14 loss """bceaftersigmoid""" +1047 14 regularizer """no""" +1047 14 optimizer """adam""" +1047 14 training_loop """lcwa""" +1047 14 evaluator """rankbased""" +1047 15 dataset """wn18rr""" +1047 15 model """unstructuredmodel""" +1047 15 loss """bceaftersigmoid""" +1047 15 regularizer """no""" +1047 15 optimizer """adam""" +1047 15 training_loop """lcwa""" +1047 15 evaluator """rankbased""" +1047 16 dataset """wn18rr""" +1047 16 model """unstructuredmodel""" +1047 16 loss """bceaftersigmoid""" +1047 16 regularizer """no""" +1047 16 optimizer """adam""" +1047 16 training_loop """lcwa""" +1047 16 evaluator """rankbased""" +1047 17 dataset """wn18rr""" +1047 17 model """unstructuredmodel""" +1047 17 loss """bceaftersigmoid""" +1047 17 regularizer """no""" +1047 17 optimizer """adam""" +1047 17 training_loop """lcwa""" +1047 17 evaluator """rankbased""" +1047 18 dataset """wn18rr""" +1047 18 model """unstructuredmodel""" +1047 18 loss """bceaftersigmoid""" +1047 18 regularizer """no""" +1047 18 optimizer """adam""" +1047 18 training_loop """lcwa""" +1047 18 evaluator """rankbased""" +1047 19 dataset """wn18rr""" +1047 19 model """unstructuredmodel""" +1047 19 loss """bceaftersigmoid""" +1047 19 regularizer """no""" +1047 19 optimizer """adam""" +1047 19 training_loop """lcwa""" +1047 19 evaluator """rankbased""" +1047 20 dataset """wn18rr""" +1047 20 model """unstructuredmodel""" +1047 20 loss """bceaftersigmoid""" +1047 20 regularizer """no""" +1047 20 optimizer """adam""" +1047 20 training_loop """lcwa""" +1047 20 evaluator """rankbased""" +1047 21 dataset """wn18rr""" +1047 21 model """unstructuredmodel""" +1047 21 loss """bceaftersigmoid""" +1047 21 regularizer """no""" +1047 21 optimizer """adam""" +1047 21 training_loop """lcwa""" +1047 21 evaluator """rankbased""" +1047 22 dataset """wn18rr""" +1047 22 model """unstructuredmodel""" +1047 22 loss """bceaftersigmoid""" +1047 22 regularizer """no""" +1047 22 optimizer """adam""" +1047 22 training_loop """lcwa""" +1047 22 evaluator """rankbased""" +1047 23 dataset """wn18rr""" +1047 23 model """unstructuredmodel""" +1047 23 loss """bceaftersigmoid""" +1047 23 regularizer """no""" +1047 23 optimizer """adam""" +1047 23 training_loop """lcwa""" +1047 23 evaluator """rankbased""" +1047 24 dataset """wn18rr""" +1047 24 model """unstructuredmodel""" +1047 24 loss """bceaftersigmoid""" +1047 24 regularizer """no""" +1047 24 optimizer """adam""" +1047 24 training_loop """lcwa""" +1047 24 evaluator """rankbased""" +1047 25 dataset """wn18rr""" +1047 25 model """unstructuredmodel""" +1047 25 loss """bceaftersigmoid""" +1047 25 regularizer """no""" +1047 25 optimizer """adam""" +1047 25 training_loop """lcwa""" +1047 25 evaluator """rankbased"""